summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-09-07 13:12:05 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-11-09 10:02:59 +0000
commit33fc33aa94d4add0878ec30dc818e34e1dd3cc2a (patch)
treef6af110909c79b2759136554f1143d8b0572af0a /chromium/third_party/dawn
parent7d2c5d177e9813077a621df8d18c0deda73099b3 (diff)
downloadqtwebengine-chromium-33fc33aa94d4add0878ec30dc818e34e1dd3cc2a.tar.gz
BASELINE: Update Chromium to 104.0.5112.120
Change-Id: I5d2726c2ab018d75d055739b6ba64317904f05bb Reviewed-on: https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/438935 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn')
-rw-r--r--chromium/third_party/dawn/.clang-format12
-rw-r--r--chromium/third_party/dawn/.clang-tidy163
-rw-r--r--chromium/third_party/dawn/.gn17
-rw-r--r--chromium/third_party/dawn/AUTHORS.dawn6
-rw-r--r--chromium/third_party/dawn/AUTHORS.tint8
-rw-r--r--chromium/third_party/dawn/CMakeLists.txt17
-rw-r--r--chromium/third_party/dawn/DEPS37
-rw-r--r--chromium/third_party/dawn/PRESUBMIT.py26
-rw-r--r--chromium/third_party/dawn/PRESUBMIT.py.dawn38
-rwxr-xr-xchromium/third_party/dawn/PRESUBMIT.py.tint167
-rw-r--r--chromium/third_party/dawn/README.md121
-rw-r--r--chromium/third_party/dawn/README.md.dawn52
-rw-r--r--chromium/third_party/dawn/README.md.tint106
-rw-r--r--chromium/third_party/dawn/dawn.json106
-rw-r--r--chromium/third_party/dawn/dawn_wire.json20
-rw-r--r--chromium/third_party/dawn/docs/building.md90
-rw-r--r--chromium/third_party/dawn/docs/clang-tidy.md37
-rw-r--r--chromium/third_party/dawn/docs/dawn/building.md46
-rw-r--r--chromium/third_party/dawn/docs/dawn/codegen.md2
-rw-r--r--chromium/third_party/dawn/docs/dawn/fuzzing.md4
-rw-r--r--chromium/third_party/dawn/docs/dawn/infra.md4
-rw-r--r--chromium/third_party/dawn/docs/dawn/testing.md2
-rw-r--r--chromium/third_party/dawn/docs/tint/compound_statements.md4
-rw-r--r--chromium/third_party/dawn/docs/tint/origin-trial-changes.md11
-rw-r--r--chromium/third_party/dawn/docs/tint/spirv-input-output-variables.md4
-rw-r--r--chromium/third_party/dawn/docs/tint/translations.md2
-rw-r--r--chromium/third_party/dawn/generator/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/generator/dawn_gpu_info_generator.py131
-rw-r--r--chromium/third_party/dawn/generator/dawn_version_generator.py72
-rw-r--r--chromium/third_party/dawn/generator/remove_files.py1
-rw-r--r--chromium/third_party/dawn/generator/templates/api.h3
-rw-r--r--chromium/third_party/dawn/generator/templates/api_struct_info.json2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.cpp106
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.h52
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/common/Version.h4
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/native/ProcTable.cpp4
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/native/api_dawn_native_proc.cpp2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/native/dawn_platform.h10
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.cpp68
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.h2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/client/ClientBase.h2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerBase.h2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerDoers.cpp16
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerHandlers.cpp18
-rw-r--r--chromium/third_party/dawn/generator/templates/library_api_enum_tables.js2
-rw-r--r--chromium/third_party/dawn/generator/templates/mock_api.h4
-rw-r--r--chromium/third_party/dawn/go.mod (renamed from chromium/third_party/dawn/tools/src/go.mod)30
-rw-r--r--chromium/third_party/dawn/go.sum (renamed from chromium/third_party/dawn/tools/src/go.sum)59
-rw-r--r--chromium/third_party/dawn/gpu_info.json129
-rw-r--r--chromium/third_party/dawn/include/dawn/CPPLINT.cfg1
-rw-r--r--chromium/third_party/dawn/include/dawn/EnumClassBitmasks.h231
-rw-r--r--chromium/third_party/dawn/include/dawn/dawn_wsi.h4
-rw-r--r--chromium/third_party/dawn/include/dawn/native/D3D12Backend.h151
-rw-r--r--chromium/third_party/dawn/include/dawn/native/DawnNative.h448
-rw-r--r--chromium/third_party/dawn/include/dawn/native/MetalBackend.h44
-rw-r--r--chromium/third_party/dawn/include/dawn/native/NullBackend.h6
-rw-r--r--chromium/third_party/dawn/include/dawn/native/OpenGLBackend.h45
-rw-r--r--chromium/third_party/dawn/include/dawn/native/VulkanBackend.h204
-rw-r--r--chromium/third_party/dawn/include/dawn/native/dawn_native_export.h30
-rw-r--r--chromium/third_party/dawn/include/dawn/platform/DawnPlatform.h167
-rw-r--r--chromium/third_party/dawn/include/dawn/platform/dawn_platform_export.h30
-rw-r--r--chromium/third_party/dawn/include/dawn/wire/Wire.h73
-rw-r--r--chromium/third_party/dawn/include/dawn/wire/WireClient.h282
-rw-r--r--chromium/third_party/dawn/include/dawn/wire/WireServer.h231
-rw-r--r--chromium/third_party/dawn/include/dawn/wire/dawn_wire_export.h30
-rw-r--r--chromium/third_party/dawn/include/tint/.clang-format2
-rw-r--r--chromium/third_party/dawn/include/tint/tint.h1
-rw-r--r--chromium/third_party/dawn/include/webgpu/webgpu_cpp.h2
-rw-r--r--chromium/third_party/dawn/infra/config/global/generated/commit-queue.cfg4
-rw-r--r--chromium/third_party/dawn/infra/config/global/generated/cr-buildbucket.cfg52
-rw-r--r--chromium/third_party/dawn/infra/config/global/generated/project.cfg2
-rwxr-xr-xchromium/third_party/dawn/infra/config/global/main.star10
-rwxr-xr-xchromium/third_party/dawn/infra/kokoro/linux/build.sh6
-rwxr-xr-xchromium/third_party/dawn/infra/kokoro/linux/docker.sh15
-rw-r--r--chromium/third_party/dawn/scripts/dawn_features.gni2
-rw-r--r--chromium/third_party/dawn/scripts/extract.py63
-rw-r--r--chromium/third_party/dawn/src/Placeholder.cpp (renamed from chromium/third_party/dawn/src/Dummy.cpp)0
-rw-r--r--chromium/third_party/dawn/src/dawn/CMakeLists.txt10
-rw-r--r--chromium/third_party/dawn/src/dawn/CPPLINT.cfg16
-rw-r--r--chromium/third_party/dawn/src/dawn/OWNERS2
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Alloc.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Assert.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Assert.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/common/BUILD.gn34
-rw-r--r--chromium/third_party/dawn/src/dawn/common/BitSetIterator.h33
-rw-r--r--chromium/third_party/dawn/src/dawn/common/CMakeLists.txt12
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Compiler.h89
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h12
-rw-r--r--chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn/common/DynamicLib.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp127
-rw-r--r--chromium/third_party/dawn/src/dawn/common/GPUInfo.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/common/HashUtils.h28
-rw-r--r--chromium/third_party/dawn/src/dawn/common/IOKitRef.h12
-rw-r--r--chromium/third_party/dawn/src/dawn/common/LinkedList.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Log.cpp173
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Log.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Math.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Math.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/NSRef.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Numeric.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Platform.h120
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Preprocessor.h2
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefBase.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefCounted.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefCounted.h39
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Result.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Result.h106
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialMap.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialQueue.h5
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialStorage.h24
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SlabAllocator.h16
-rw-r--r--chromium/third_party/dawn/src/dawn/common/StackContainer.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SystemUtils.h9
-rw-r--r--chromium/third_party/dawn/src/dawn/common/TypedInteger.h332
-rw-r--r--chromium/third_party/dawn/src/dawn/common/UnderlyingType.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_array.h134
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_bitset.h159
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_span.h138
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h126
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_vector.h155
-rw-r--r--chromium/third_party/dawn/src/dawn/common/vulkan_platform.h220
-rw-r--r--chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Adapter.cpp386
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Adapter.h103
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp95
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AsyncTask.h57
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp227
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AttachmentState.h93
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BUILD.gn54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BackendConnection.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroup.cpp923
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroup.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp1139
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h260
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h202
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp312
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindingInfo.h133
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Blob.cpp80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Blob.h60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BlobCache.cpp66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BlobCache.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp382
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h160
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp152
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Buffer.cpp896
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Buffer.h202
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CMakeLists.txt24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CacheKey.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CacheKey.h324
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CachedObject.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CachedObject.h61
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp378
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandAllocator.h465
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp402
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBuffer.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp677
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h122
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp2524
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandEncoder.h185
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp835
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandValidation.h133
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Commands.cpp625
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Commands.h611
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp304
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CompilationMessages.h64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp775
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h147
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp127
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePipeline.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp809
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h28
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp332
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h168
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DawnNative.cpp466
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Device.cpp3213
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Device.h1029
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp182
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DynamicUploader.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp363
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EncodingContext.h275
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h28
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h83
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Error.cpp68
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Error.h152
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorData.cpp160
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorData.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorInjector.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp115
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorScope.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp335
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ExternalTexture.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Features.cpp478
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Features.h92
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Format.cpp558
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Format.h240
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Forward.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp312
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h197
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp624
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h28
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Instance.cpp614
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Instance.h144
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IntegerTypes.h78
-rw-r--r--chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Limits.cpp234
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Limits.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectBase.h147
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h96
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsage.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h148
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp379
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h135
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PerStage.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PerStage.h102
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PersistentCache.h92
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Pipeline.cpp396
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Pipeline.h105
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineCache.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineCache.h64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp661
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineLayout.h121
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp67
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp330
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp206
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QueryHelper.h34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QuerySet.cpp274
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QuerySet.h65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Queue.cpp779
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Queue.h152
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundle.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp261
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp767
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h114
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp732
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h154
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp1855
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPipeline.h228
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceHeap.h12
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h20
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h111
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp153
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Sampler.cpp241
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Sampler.h96
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp2359
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ShaderModule.h483
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp90
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SpirvValidation.h10
-rw-r--r--chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn/native/StagingBuffer.h24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Subresource.cpp188
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Subresource.h162
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h923
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface.cpp462
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface.h150
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface_metal.mm10
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SwapChain.cpp625
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SwapChain.h278
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Texture.cpp1625
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Texture.h270
-rw-r--r--chromium/third_party/dawn/src/dawn/native/TintUtils.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn/native/TintUtils.h24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ToBackend.h262
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Toggles.cpp610
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Toggles.h141
-rw-r--r--chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/VertexFormat.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h32
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp731
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp305
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp405
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp274
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.h22
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp801
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h126
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CacheKeyD3D12.cpp139
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp2673
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp268
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h52
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp142
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp262
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h119
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp202
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp83
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h14
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp177
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h27
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp1246
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h435
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.cpp106
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp81
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h22
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h10
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h90
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp700
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h130
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp422
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h167
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h22
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp437
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h125
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp887
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp555
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h101
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp690
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h156
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp157
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h20
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp226
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h125
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp1410
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h71
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp431
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h110
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h22
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp195
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h71
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp623
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h114
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp962
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp2453
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h261
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp609
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h17
-rw-r--r--chromium/third_party/dawn/src/dawn/native/dawn_platform.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h17
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm1034
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm47
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h18
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm23
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h79
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm346
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm2442
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm186
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h52
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm104
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h234
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm830
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/Forward.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h61
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm94
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm201
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h15
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm879
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm159
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm421
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h21
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm198
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h120
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm2048
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h122
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm718
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp876
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h597
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.cpp260
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp306
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h20
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h20
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp273
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp2250
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h25
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp545
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h188
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/Forward.h78
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp72
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h25
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h36
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h38
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp305
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp117
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h14
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp587
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h52
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp182
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp276
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h93
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h18
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp1108
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h99
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp232
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h28
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json4
-rw-r--r--chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp304
-rw-r--r--chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h185
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp551
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp668
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp294
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h106
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp230
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp654
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h105
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.cpp474
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp2177
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h12
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp277
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp1780
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h368
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp261
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h78
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp350
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h79
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.h49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp157
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp474
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h144
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp1173
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp448
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp197
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp390
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h24
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp1200
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h134
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp2480
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h320
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp489
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h233
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp177
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp150
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h8
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp598
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h284
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp626
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h599
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp506
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h128
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp626
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp78
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp264
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp267
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp189
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp213
-rw-r--r--chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp691
-rw-r--r--chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h210
-rw-r--r--chromium/third_party/dawn/src/dawn/node/CMakeLists.txt12
-rw-r--r--chromium/third_party/dawn/src/dawn/node/Module.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn/node/README.md6
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp67
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp2269
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Converter.h552
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp305
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Errors.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Flags.h18
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp231
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPU.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp366
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp270
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h101
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp372
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h111
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp168
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp876
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h172
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h33
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp232
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp331
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h115
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp447
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h154
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h35
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp175
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp99
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h51
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Browser.idl2
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt4
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Core.cpp300
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Core.h1350
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/go.mod9
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/go.sum33
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go4
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go113
-rw-r--r--chromium/third_party/dawn/src/dawn/node/utils/Debug.h181
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/BUILD.gn2
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp98
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/WorkerThread.h14
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h362
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/Animometer.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/BUILD.gn1
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/CHelloTriangle.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/ComputeBoids.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/CppHelloTriangle.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/ManualSwapChainTest.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/SampleUtils.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn/samples/SampleUtils.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/tests/BUILD.gn135
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/BackendBinding.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h15
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp220
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h75
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm36
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm174
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp85
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm6
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h10
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp81
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm31
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/SystemUtils.h2
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp294
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TestUtils.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp1463
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TextureUtils.h449
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/Timer.h35
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp671
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h297
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp161
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp278
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WireHelper.h32
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BUILD.gn6
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h115
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt8
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp100
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h75
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h157
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp57
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h4
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/Wire.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireClient.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp68
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireResult.h8
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireServer.cpp127
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp202
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Adapter.h66
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h2
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp669
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Buffer.h165
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Client.cpp263
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Client.h94
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp182
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp159
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp159
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h138
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Device.cpp505
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Device.h157
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp144
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Instance.h52
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp66
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h148
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.cpp (renamed from chromium/third_party/dawn/src/tint/ast/uint_literal_expression_test.cc)19
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/QuerySet.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/QuerySet.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Queue.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h91
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp68
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Texture.cpp82
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Texture.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h369
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/Server.cpp322
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/Server.h412
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp148
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp438
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp304
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp116
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp142
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h144
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp129
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp54
-rw-r--r--chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_proc.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_wsi.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn/webgpu.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/NullBackend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/Wire.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h2
-rw-r--r--chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h2
-rw-r--r--chromium/third_party/dawn/src/tint/.clang-format2
-rw-r--r--chromium/third_party/dawn/src/tint/BUILD.gn163
-rw-r--r--chromium/third_party/dawn/src/tint/CMakeLists.txt293
-rw-r--r--chromium/third_party/dawn/src/tint/ast/access.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/access.h20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/alias.cc17
-rw-r--r--chromium/third_party/dawn/src/tint/ast/alias.h41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/alias_test.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/array.cc55
-rw-r--r--chromium/third_party/dawn/src/tint/ast/array.h66
-rw-r--r--chromium/third_party/dawn/src/tint/ast/array_test.cc53
-rw-r--r--chromium/third_party/dawn/src/tint/ast/assignment_statement.cc18
-rw-r--r--chromium/third_party/dawn/src/tint/ast/assignment_statement.h53
-rw-r--r--chromium/third_party/dawn/src/tint/ast/assignment_statement_test.cc87
-rw-r--r--chromium/third_party/dawn/src/tint/ast/atomic.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/ast/atomic.h44
-rw-r--r--chromium/third_party/dawn/src/tint/ast/atomic_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binary_expression.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binary_expression.h362
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binary_expression_test.cc87
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binding_attribute.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binding_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/binding_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bitcast_expression.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bitcast_expression.h48
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bitcast_expression_test.cc63
-rw-r--r--chromium/third_party/dawn/src/tint/ast/block_statement.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/block_statement.h52
-rw-r--r--chromium/third_party/dawn/src/tint/ast/block_statement_test.cc50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool_literal_expression.cc13
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool_literal_expression.h35
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool_literal_expression_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/bool_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/break_statement.cc9
-rw-r--r--chromium/third_party/dawn/src/tint/ast/break_statement.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/break_statement_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin.cc114
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin.h34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin_attribute.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.cc4395
-rw-r--r--chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.h421
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_expression.cc53
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_expression.h80
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_expression_test.cc178
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_statement.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_statement.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/call_statement_test.cc37
-rw-r--r--chromium/third_party/dawn/src/tint/ast/case_statement.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/ast/case_statement.h48
-rw-r--r--chromium/third_party/dawn/src/tint/ast/case_statement_test.cc152
-rw-r--r--chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.cc24
-rw-r--r--chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.h63
-rw-r--r--chromium/third_party/dawn/src/tint/ast/compound_assignment_statement_test.cc101
-rw-r--r--chromium/third_party/dawn/src/tint/ast/continue_statement.cc9
-rw-r--r--chromium/third_party/dawn/src/tint/ast/continue_statement.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/continue_statement_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.h43
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture_test.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_texture.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_texture.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/depth_texture_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.h77
-rw-r--r--chromium/third_party/dawn/src/tint/ast/discard_statement.cc9
-rw-r--r--chromium/third_party/dawn/src/tint/ast/discard_statement.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/discard_statement_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/else_statement.cc45
-rw-r--r--chromium/third_party/dawn/src/tint/ast/else_statement.h59
-rw-r--r--chromium/third_party/dawn/src/tint/ast/else_statement_test.cc92
-rw-r--r--chromium/third_party/dawn/src/tint/ast/enable.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/enable.h59
-rw-r--r--chromium/third_party/dawn/src/tint/ast/enable_test.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/expression.h20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/extension.cc51
-rw-r--r--chromium/third_party/dawn/src/tint/ast/extension.h68
-rw-r--r--chromium/third_party/dawn/src/tint/ast/extension_test.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/external_texture.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/external_texture.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/external_texture_test.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f16.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f16.h48
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f16_test.cc (renamed from chromium/third_party/dawn/src/tint/ast/sint_literal_expression_test.cc)13
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f32.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f32.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/f32_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/fallthrough_statement.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/fallthrough_statement.h29
-rw-r--r--chromium/third_party/dawn/src/tint/ast/fallthrough_statement_test.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/ast/float_literal_expression.cc25
-rw-r--r--chromium/third_party/dawn/src/tint/ast/float_literal_expression.h55
-rw-r--r--chromium/third_party/dawn/src/tint/ast/float_literal_expression_test.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/ast/for_loop_statement.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/for_loop_statement.h60
-rw-r--r--chromium/third_party/dawn/src/tint/ast/for_loop_statement_test.cc105
-rw-r--r--chromium/third_party/dawn/src/tint/ast/function.cc84
-rw-r--r--chromium/third_party/dawn/src/tint/ast/function.h144
-rw-r--r--chromium/third_party/dawn/src/tint/ast/function_test.cc237
-rw-r--r--chromium/third_party/dawn/src/tint/ast/group_attribute.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/group_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/group_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/i32.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/ast/i32.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/i32_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/id_attribute.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/id_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/id_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/identifier_expression.cc19
-rw-r--r--chromium/third_party/dawn/src/tint/ast/identifier_expression.h39
-rw-r--r--chromium/third_party/dawn/src/tint/ast/identifier_expression_test.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/ast/if_statement.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/ast/if_statement.h63
-rw-r--r--chromium/third_party/dawn/src/tint/ast/if_statement_test.cc101
-rw-r--r--chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/increment_decrement_statement_test.cc51
-rw-r--r--chromium/third_party/dawn/src/tint/ast/index_accessor_expression.cc24
-rw-r--r--chromium/third_party/dawn/src/tint/ast/index_accessor_expression.h53
-rw-r--r--chromium/third_party/dawn/src/tint/ast/index_accessor_expression_test.cc82
-rw-r--r--chromium/third_party/dawn/src/tint/ast/int_literal_expression.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/int_literal_expression.h58
-rw-r--r--chromium/third_party/dawn/src/tint/ast/int_literal_expression_test.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/ast/internal_attribute.cc2
-rw-r--r--chromium/third_party/dawn/src/tint/ast/internal_attribute.h22
-rw-r--r--chromium/third_party/dawn/src/tint/ast/interpolate_attribute.cc75
-rw-r--r--chromium/third_party/dawn/src/tint/ast/interpolate_attribute.h47
-rw-r--r--chromium/third_party/dawn/src/tint/ast/interpolate_attribute_test.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/invariant_attribute.cc11
-rw-r--r--chromium/third_party/dawn/src/tint/ast/invariant_attribute.h33
-rw-r--r--chromium/third_party/dawn/src/tint/ast/literal_expression.cc3
-rw-r--r--chromium/third_party/dawn/src/tint/ast/literal_expression.h16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/location_attribute.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/location_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/location_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/loop_statement.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/loop_statement.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/loop_statement_test.cc87
-rw-r--r--chromium/third_party/dawn/src/tint/ast/matrix.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/matrix.h62
-rw-r--r--chromium/third_party/dawn/src/tint/ast/matrix_test.cc21
-rw-r--r--chromium/third_party/dawn/src/tint/ast/member_accessor_expression.cc33
-rw-r--r--chromium/third_party/dawn/src/tint/ast/member_accessor_expression.h53
-rw-r--r--chromium/third_party/dawn/src/tint/ast/member_accessor_expression_test.cc79
-rw-r--r--chromium/third_party/dawn/src/tint/ast/module.cc138
-rw-r--r--chromium/third_party/dawn/src/tint/ast/module.h179
-rw-r--r--chromium/third_party/dawn/src/tint/ast/module_clone_test.cc102
-rw-r--r--chromium/third_party/dawn/src/tint/ast/module_test.cc169
-rw-r--r--chromium/third_party/dawn/src/tint/ast/multisampled_texture.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/ast/multisampled_texture.h52
-rw-r--r--chromium/third_party/dawn/src/tint/ast/multisampled_texture_test.cc30
-rw-r--r--chromium/third_party/dawn/src/tint/ast/node.h41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/phony_expression.cc9
-rw-r--r--chromium/third_party/dawn/src/tint/ast/phony_expression.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/phony_expression_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/ast/pipeline_stage.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/pointer.cc30
-rw-r--r--chromium/third_party/dawn/src/tint/ast/pointer.h58
-rw-r--r--chromium/third_party/dawn/src/tint/ast/pointer_test.cc24
-rw-r--r--chromium/third_party/dawn/src/tint/ast/return_statement.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/ast/return_statement.h46
-rw-r--r--chromium/third_party/dawn/src/tint/ast/return_statement_test.cc42
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampled_texture.cc21
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampled_texture.h49
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampled_texture_test.cc28
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampler.cc27
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampler.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sampler_test.cc18
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sint_literal_expression.cc41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/sint_literal_expression.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stage_attribute.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stage_attribute.h40
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stage_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/statement.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/ast/statement.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/storage_class.cc50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/storage_class.h22
-rw-r--r--chromium/third_party/dawn/src/tint/ast/storage_texture.cc180
-rw-r--r--chromium/third_party/dawn/src/tint/ast/storage_texture.h118
-rw-r--r--chromium/third_party/dawn/src/tint/ast/storage_texture_test.cc80
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stride_attribute.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stride_attribute.h38
-rw-r--r--chromium/third_party/dawn/src/tint/ast/stride_attribute_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct.h56
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member.h52
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.cc15
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.h41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.cc11
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.h43
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.cc15
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.h41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_member_test.cc101
-rw-r--r--chromium/third_party/dawn/src/tint/ast/struct_test.cc137
-rw-r--r--chromium/third_party/dawn/src/tint/ast/switch_statement.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/ast/switch_statement.h58
-rw-r--r--chromium/third_party/dawn/src/tint/ast/switch_statement_test.cc133
-rw-r--r--chromium/third_party/dawn/src/tint/ast/texture.cc103
-rw-r--r--chromium/third_party/dawn/src/tint/ast/texture.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/texture_test.cc28
-rw-r--r--chromium/third_party/dawn/src/tint/ast/traverse_expressions.h207
-rw-r--r--chromium/third_party/dawn/src/tint/ast/traverse_expressions_test.cc373
-rw-r--r--chromium/third_party/dawn/src/tint/ast/type.h30
-rw-r--r--chromium/third_party/dawn/src/tint/ast/type_decl.cc5
-rw-r--r--chromium/third_party/dawn/src/tint/ast/type_decl.h26
-rw-r--r--chromium/third_party/dawn/src/tint/ast/type_name.cc11
-rw-r--r--chromium/third_party/dawn/src/tint/ast/type_name.h46
-rw-r--r--chromium/third_party/dawn/src/tint/ast/u32.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/ast/u32.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/u32_test.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/ast/uint_literal_expression.cc41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/uint_literal_expression.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/unary_op.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/ast/unary_op.h10
-rw-r--r--chromium/third_party/dawn/src/tint/ast/unary_op_expression.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/ast/unary_op_expression.h50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/unary_op_expression_test.cc51
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable.cc41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable.h138
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable_decl_statement.cc19
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable_decl_statement.h41
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable_decl_statement_test.cc50
-rw-r--r--chromium/third_party/dawn/src/tint/ast/variable_test.cc189
-rw-r--r--chromium/third_party/dawn/src/tint/ast/vector.cc31
-rw-r--r--chromium/third_party/dawn/src/tint/ast/vector.h60
-rw-r--r--chromium/third_party/dawn/src/tint/ast/vector_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/ast/void.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/ast/void.h36
-rw-r--r--chromium/third_party/dawn/src/tint/ast/workgroup_attribute.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/ast/workgroup_attribute.h59
-rw-r--r--chromium/third_party/dawn/src/tint/ast/workgroup_attribute_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/bench/benchmark.cc130
-rw-r--r--chromium/third_party/dawn/src/tint/bench/benchmark.h42
-rw-r--r--chromium/third_party/dawn/src/tint/builtin_table.cc1169
-rw-r--r--chromium/third_party/dawn/src/tint/builtin_table.h54
-rw-r--r--chromium/third_party/dawn/src/tint/builtin_table.inl9638
-rw-r--r--chromium/third_party/dawn/src/tint/builtin_table_test.cc601
-rw-r--r--chromium/third_party/dawn/src/tint/castable.h909
-rw-r--r--chromium/third_party/dawn/src/tint/castable_bench.cc216
-rw-r--r--chromium/third_party/dawn/src/tint/castable_test.cc1224
-rw-r--r--chromium/third_party/dawn/src/tint/clone_context.cc110
-rw-r--r--chromium/third_party/dawn/src/tint/clone_context.h964
-rw-r--r--chromium/third_party/dawn/src/tint/clone_context_test.cc1542
-rw-r--r--chromium/third_party/dawn/src/tint/cmd/main.cc1779
-rw-r--r--chromium/third_party/dawn/src/tint/debug.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/debug.h87
-rw-r--r--chromium/third_party/dawn/src/tint/debug_test.cc17
-rw-r--r--chromium/third_party/dawn/src/tint/demangler.cc53
-rw-r--r--chromium/third_party/dawn/src/tint/demangler.h23
-rw-r--r--chromium/third_party/dawn/src/tint/demangler_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/diagnostic.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/diagnostic.h389
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/diagnostic_test.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/formatter.cc406
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/formatter.h60
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/formatter_test.cc284
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer.h64
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer_linux.cc117
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer_other.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer_test.cc88
-rw-r--r--chromium/third_party/dawn/src/tint/diagnostic/printer_windows.cc133
-rw-r--r--chromium/third_party/dawn/src/tint/fuzzers/BUILD.gn3
-rw-r--r--chromium/third_party/dawn/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn6
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/entry_point.cc46
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/entry_point.h223
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/inspector.cc1378
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/inspector.h380
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/inspector_test.cc4175
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/resource_binding.cc159
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/resource_binding.h160
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/scalar.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/scalar.h90
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.cc415
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.h669
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.cc21
-rw-r--r--chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.h34
-rw-r--r--chromium/third_party/dawn/src/tint/intrinsics.def (renamed from chromium/third_party/dawn/src/tint/builtins.def)558
-rw-r--r--chromium/third_party/dawn/src/tint/number.cc57
-rw-r--r--chromium/third_party/dawn/src/tint/number.h415
-rw-r--r--chromium/third_party/dawn/src/tint/number_test.cc308
-rw-r--r--chromium/third_party/dawn/src/tint/program.cc106
-rw-r--r--chromium/third_party/dawn/src/tint/program.h262
-rw-r--r--chromium/third_party/dawn/src/tint/program_builder.cc106
-rw-r--r--chromium/third_party/dawn/src/tint/program_builder.h5038
-rw-r--r--chromium/third_party/dawn/src/tint/program_builder_test.cc72
-rw-r--r--chromium/third_party/dawn/src/tint/program_id.cc18
-rw-r--r--chromium/third_party/dawn/src/tint/program_id.h72
-rw-r--r--chromium/third_party/dawn/src/tint/program_test.cc104
-rw-r--r--chromium/third_party/dawn/src/tint/reader/reader.h50
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/construct.cc13
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/construct.h261
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/entry_point_info.h106
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.cc274
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.h76
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/enum_converter_test.cc490
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/fail_stream.h71
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/fail_stream_test.cc54
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function.cc10539
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function.h2370
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_arithmetic_test.cc1029
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_bit_test.cc940
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_call_test.cc97
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_cfg_test.cc7007
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_composite_test.cc667
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_conversion_test.cc478
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_decl_test.cc75
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_glsl_std_450_test.cc1071
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_logical_test.cc997
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_memory_test.cc689
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_misc_test.cc221
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/function_var_test.cc835
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/namer.cc271
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/namer.h240
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/namer_test.cc413
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser.cc58
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.cc4744
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.h1579
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_barrier_test.cc110
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_member_decoration_test.cc175
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_type_test.cc946
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_function_decl_test.cc332
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_get_decorations_test.cc231
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_handle_test.cc2504
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_import_test.cc58
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_module_var_test.cc2756
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_named_types_test.cc71
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test.cc159
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.cc77
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.h462
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_user_name_test.cc160
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_test.cc10
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_type.cc489
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_type.h747
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/parser_type_test.cc133
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/spirv_tools_helpers_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/usage.cc223
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/usage.h174
-rw-r--r--chromium/third_party/dawn/src/tint/reader/spirv/usage_test.cc454
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/lexer.cc2025
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/lexer.h129
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/lexer_test.cc1533
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_bench.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.cc4657
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.h1615
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_additive_expression_test.cc101
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_and_expression_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_argument_expression_list_test.cc106
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_assignment_stmt_test.cc272
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_body_stmt_test.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_break_stmt_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_bug_cases_test.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_call_stmt_test.cc120
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_case_body_test.cc68
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_expr_test.cc225
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_literal_test.cc1027
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc28
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_test.cc95
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_type_test.cc95
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_detail.h36
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_elseif_stmt_test.cc66
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_enable_directive_test.cc167
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_equality_expression_test.cc118
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_msg_test.cc638
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_resync_test.cc52
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_test.cc (renamed from chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_type_test.cc)20
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_for_stmt_test.cc342
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc95
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_test.cc411
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_decl_test.cc496
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_header_test.cc160
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_constant_decl_test.cc290
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_decl_test.cc194
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_variable_decl_test.cc253
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_if_stmt_test.cc188
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_inclusive_or_expression_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_increment_decrement_stmt_test.cc192
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_and_expression_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_or_expression_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_loop_stmt_test.cc128
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_multiplicative_expression_test.cc147
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_param_list_test.cc184
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_paren_rhs_stmt_test.cc60
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_pipeline_stage_test.cc51
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_primary_expression_test.cc412
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_relational_expression_test.cc192
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc91
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_test.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_type_test.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_test.cc (renamed from chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_type_test.cc)48
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_shift_expression_test.cc142
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_singular_expression_test.cc323
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statement_test.cc348
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statements_test.cc24
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_class_test.cc53
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_test.cc65
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_type_test.cc65
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc71
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc92
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_decl_test.cc140
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_decl_test.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_test.cc168
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_test.cc117
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_body_test.cc378
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_stmt_test.cc144
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test.cc78
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test_helper.h63
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texel_format_test.cc170
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_test.cc261
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_types_test.cc263
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_alias_test.cc112
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_decl_test.cc860
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_unary_expression_test.cc268
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_list_test.cc61
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_test.cc551
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_decl_test.cc126
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_ident_decl_test.cc72
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_qualifier_test.cc121
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_stmt_test.cc266
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/parser_test.cc32
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/token.cc555
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/token.h731
-rw-r--r--chromium/third_party/dawn/src/tint/reader/wgsl/token_test.cc75
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/array_accessor_test.cc398
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/assignment_validation_test.cc593
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/atomics_test.cc63
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/atomics_validation_test.cc476
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/attribute_validation_test.cc1921
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/bitcast_validation_test.cc242
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/builtin_test.cc2383
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/builtin_validation_test.cc577
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/builtins_validation_test.cc1645
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/call_test.cc57
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/call_validation_test.cc409
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/compound_assignment_validation_test.cc337
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/compound_statement_test.cc670
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/const_eval.cc19
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/const_eval.h37
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/control_block_validation_test.cc567
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc70
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc.tmpl28
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h100
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h.tmpl73
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/dependency_graph.cc1164
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/dependency_graph.h54
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/dependency_graph_test.cc1731
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/entry_point_validation_test.cc1067
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/function_validation_test.cc1109
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/host_shareable_validation_test.cc154
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/increment_decrement_validation_test.cc262
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/inferred_type_test.cc121
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/intrinsic_table.cc1649
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/intrinsic_table.h118
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl14214
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl.tmpl (renamed from chromium/third_party/dawn/src/tint/builtin_table.inl.tmpl)182
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/intrinsic_table_test.cc1254
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/is_host_shareable_test.cc110
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/is_storeable_test.cc149
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/materialize_test.cc963
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/pipeline_overridable_constant_test.cc103
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ptr_ref_test.cc147
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/ptr_ref_validation_test.cc190
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver.cc4682
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver.h803
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_behavior_test.cc668
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_constants.cc323
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_constants_test.cc764
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_is_storeable_test.cc78
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_test.cc2759
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_test_helper.h779
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/resolver_validation.cc2428
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/sem_helper.cc39
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/sem_helper.h82
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/side_effects_test.cc540
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/source_variable_test.cc291
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/storage_class_layout_validation_test.cc778
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/storage_class_validation_test.cc465
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/struct_layout_test.cc670
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/struct_pipeline_stage_use_test.cc181
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/struct_storage_class_use_test.cc195
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/type_constructor_validation_test.cc3851
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/type_validation_test.cc1357
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/uniformity.cc1567
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/uniformity.h36
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/uniformity_test.cc6422
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/validation_test.cc1813
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/validator.cc2277
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/validator.h438
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/validator_is_storeable_test.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/var_let_test.cc1165
-rw-r--r--chromium/third_party/dawn/src/tint/resolver/var_let_validation_test.cc422
-rw-r--r--chromium/third_party/dawn/src/tint/scope_stack.h90
-rw-r--r--chromium/third_party/dawn/src/tint/scope_stack_test.cc78
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_float.cc40
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_float.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_int.cc40
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_int.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_numeric.cc37
-rw-r--r--chromium/third_party/dawn/src/tint/sem/abstract_numeric.h47
-rw-r--r--chromium/third_party/dawn/src/tint/sem/array.cc45
-rw-r--r--chromium/third_party/dawn/src/tint/sem/array.h157
-rw-r--r--chromium/third_party/dawn/src/tint/sem/atomic.cc (renamed from chromium/third_party/dawn/src/tint/sem/atomic_type.cc)28
-rw-r--r--chromium/third_party/dawn/src/tint/sem/atomic.h66
-rw-r--r--chromium/third_party/dawn/src/tint/sem/atomic_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/atomic_type_test.cc)40
-rw-r--r--chromium/third_party/dawn/src/tint/sem/atomic_type.h66
-rw-r--r--chromium/third_party/dawn/src/tint/sem/behavior.cc30
-rw-r--r--chromium/third_party/dawn/src/tint/sem/behavior.h12
-rw-r--r--chromium/third_party/dawn/src/tint/sem/binding_point.h43
-rw-r--r--chromium/third_party/dawn/src/tint/sem/block_statement.cc19
-rw-r--r--chromium/third_party/dawn/src/tint/sem/block_statement.h140
-rw-r--r--chromium/third_party/dawn/src/tint/sem/bool.cc (renamed from chromium/third_party/dawn/src/tint/sem/bool_type.cc)14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/bool.h68
-rw-r--r--chromium/third_party/dawn/src/tint/sem/bool_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/bool_type_test.cc)26
-rw-r--r--chromium/third_party/dawn/src/tint/sem/bool_type.h68
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin.cc124
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin.h128
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin_test.cc171
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin_type.cc1082
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin_type.cc.tmpl32
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin_type.h220
-rw-r--r--chromium/third_party/dawn/src/tint/sem/builtin_type.h.tmpl8
-rw-r--r--chromium/third_party/dawn/src/tint/sem/call.cc6
-rw-r--r--chromium/third_party/dawn/src/tint/sem/call.h56
-rw-r--r--chromium/third_party/dawn/src/tint/sem/call_target.cc49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/call_target.h94
-rw-r--r--chromium/third_party/dawn/src/tint/sem/constant.cc105
-rw-r--r--chromium/third_party/dawn/src/tint/sem/constant.h264
-rw-r--r--chromium/third_party/dawn/src/tint/sem/constant_test.cc304
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.cc)29
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.h50
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/depth_texture_type.cc)26
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_texture.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_texture_test.cc76
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_texture_type.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/depth_texture_type_test.cc76
-rw-r--r--chromium/third_party/dawn/src/tint/sem/expression.cc15
-rw-r--r--chromium/third_party/dawn/src/tint/sem/expression.h109
-rw-r--r--chromium/third_party/dawn/src/tint/sem/expression_test.cc39
-rw-r--r--chromium/third_party/dawn/src/tint/sem/external_texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/external_texture_type.cc)8
-rw-r--r--chromium/third_party/dawn/src/tint/sem/external_texture.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/external_texture_test.cc70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/external_texture_type.h49
-rw-r--r--chromium/third_party/dawn/src/tint/sem/external_texture_type_test.cc70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f16.cc55
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f16.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f16_test.cc48
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f32.cc (renamed from chromium/third_party/dawn/src/tint/sem/f32_type.cc)14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f32.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f32_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/f32_type_test.cc)26
-rw-r--r--chromium/third_party/dawn/src/tint/sem/f32_type.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/for_loop_statement.cc2
-rw-r--r--chromium/third_party/dawn/src/tint/sem/for_loop_statement.h51
-rw-r--r--chromium/third_party/dawn/src/tint/sem/function.cc211
-rw-r--r--chromium/third_party/dawn/src/tint/sem/function.h452
-rw-r--r--chromium/third_party/dawn/src/tint/sem/i32.cc (renamed from chromium/third_party/dawn/src/tint/sem/i32_type.cc)14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/i32.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/i32_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/i32_type_test.cc)26
-rw-r--r--chromium/third_party/dawn/src/tint/sem/i32_type.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/if_statement.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/if_statement.h70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/info.h161
-rw-r--r--chromium/third_party/dawn/src/tint/sem/loop_statement.cc15
-rw-r--r--chromium/third_party/dawn/src/tint/sem/loop_statement.h44
-rw-r--r--chromium/third_party/dawn/src/tint/sem/materialize.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/sem/materialize.h48
-rw-r--r--chromium/third_party/dawn/src/tint/sem/matrix.cc (renamed from chromium/third_party/dawn/src/tint/sem/matrix_type.cc)39
-rw-r--r--chromium/third_party/dawn/src/tint/sem/matrix.h85
-rw-r--r--chromium/third_party/dawn/src/tint/sem/matrix_test.cc75
-rw-r--r--chromium/third_party/dawn/src/tint/sem/matrix_type.h85
-rw-r--r--chromium/third_party/dawn/src/tint/sem/matrix_type_test.cc75
-rw-r--r--chromium/third_party/dawn/src/tint/sem/member_accessor_expression.cc31
-rw-r--r--chromium/third_party/dawn/src/tint/sem/member_accessor_expression.h122
-rw-r--r--chromium/third_party/dawn/src/tint/sem/module.cc4
-rw-r--r--chromium/third_party/dawn/src/tint/sem/module.h37
-rw-r--r--chromium/third_party/dawn/src/tint/sem/multisampled_texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.cc)28
-rw-r--r--chromium/third_party/dawn/src/tint/sem/multisampled_texture.h56
-rw-r--r--chromium/third_party/dawn/src/tint/sem/multisampled_texture_test.cc89
-rw-r--r--chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.h57
-rw-r--r--chromium/third_party/dawn/src/tint/sem/multisampled_texture_type_test.cc101
-rw-r--r--chromium/third_party/dawn/src/tint/sem/node.h14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/parameter_usage.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/sem/parameter_usage.cc.tmpl16
-rw-r--r--chromium/third_party/dawn/src/tint/sem/parameter_usage.h43
-rw-r--r--chromium/third_party/dawn/src/tint/sem/parameter_usage.h.tmpl6
-rw-r--r--chromium/third_party/dawn/src/tint/sem/pointer.cc (renamed from chromium/third_party/dawn/src/tint/sem/pointer_type.cc)41
-rw-r--r--chromium/third_party/dawn/src/tint/sem/pointer.h68
-rw-r--r--chromium/third_party/dawn/src/tint/sem/pointer_test.cc78
-rw-r--r--chromium/third_party/dawn/src/tint/sem/pointer_type.h70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/pointer_type_test.cc95
-rw-r--r--chromium/third_party/dawn/src/tint/sem/reference.cc (renamed from chromium/third_party/dawn/src/tint/sem/reference_type.cc)39
-rw-r--r--chromium/third_party/dawn/src/tint/sem/reference.h68
-rw-r--r--chromium/third_party/dawn/src/tint/sem/reference_test.cc90
-rw-r--r--chromium/third_party/dawn/src/tint/sem/reference_type.h70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/reference_type_test.cc95
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampled_texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/sampled_texture_type.cc)21
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampled_texture.h56
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampled_texture_test.cc93
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampled_texture_type.h56
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampled_texture_type_test.cc93
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler.cc (renamed from chromium/third_party/dawn/src/tint/sem/sampler_type.cc)14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler.h59
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler_test.cc69
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler_texture_pair.h46
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler_type.h61
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sampler_type_test.cc69
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sem_array_test.cc164
-rw-r--r--chromium/third_party/dawn/src/tint/sem/sem_struct_test.cc127
-rw-r--r--chromium/third_party/dawn/src/tint/sem/statement.cc2
-rw-r--r--chromium/third_party/dawn/src/tint/sem/statement.h219
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture.cc85
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture.h82
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture_test.cc147
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture_type.cc89
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture_type.h82
-rw-r--r--chromium/third_party/dawn/src/tint/sem/storage_texture_type_test.cc154
-rw-r--r--chromium/third_party/dawn/src/tint/sem/struct.cc170
-rw-r--r--chromium/third_party/dawn/src/tint/sem/struct.h342
-rw-r--r--chromium/third_party/dawn/src/tint/sem/switch_statement.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/sem/switch_statement.h85
-rw-r--r--chromium/third_party/dawn/src/tint/sem/test_helper.h34
-rw-r--r--chromium/third_party/dawn/src/tint/sem/texture.cc (renamed from chromium/third_party/dawn/src/tint/sem/texture_type.cc)2
-rw-r--r--chromium/third_party/dawn/src/tint/sem/texture.h (renamed from chromium/third_party/dawn/src/tint/sem/texture_type.h)32
-rw-r--r--chromium/third_party/dawn/src/tint/sem/texture_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/texture_type_test.cc)16
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type.cc195
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type.h220
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_constructor.cc3
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_constructor.h14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_conversion.cc3
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_conversion.h22
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_manager.h62
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_manager_test.cc70
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_mappings.h41
-rw-r--r--chromium/third_party/dawn/src/tint/sem/type_test.cc393
-rw-r--r--chromium/third_party/dawn/src/tint/sem/u32.cc (renamed from chromium/third_party/dawn/src/tint/sem/u32_type.cc)14
-rw-r--r--chromium/third_party/dawn/src/tint/sem/u32.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/u32_test.cc (renamed from chromium/third_party/dawn/src/tint/sem/u32_type_test.cc)26
-rw-r--r--chromium/third_party/dawn/src/tint/sem/u32_type.h58
-rw-r--r--chromium/third_party/dawn/src/tint/sem/variable.cc13
-rw-r--r--chromium/third_party/dawn/src/tint/sem/variable.h378
-rw-r--r--chromium/third_party/dawn/src/tint/sem/vector.cc (renamed from chromium/third_party/dawn/src/tint/sem/vector_type.cc)63
-rw-r--r--chromium/third_party/dawn/src/tint/sem/vector.h79
-rw-r--r--chromium/third_party/dawn/src/tint/sem/vector_test.cc67
-rw-r--r--chromium/third_party/dawn/src/tint/sem/vector_type.h79
-rw-r--r--chromium/third_party/dawn/src/tint/sem/vector_type_test.cc67
-rw-r--r--chromium/third_party/dawn/src/tint/sem/void.cc (renamed from chromium/third_party/dawn/src/tint/sem/void_type.cc)8
-rw-r--r--chromium/third_party/dawn/src/tint/sem/void.h (renamed from chromium/third_party/dawn/src/tint/sem/void_type.h)42
-rw-r--r--chromium/third_party/dawn/src/tint/source.cc201
-rw-r--r--chromium/third_party/dawn/src/tint/source.h343
-rw-r--r--chromium/third_party/dawn/src/tint/source_test.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/symbol.cc15
-rw-r--r--chromium/third_party/dawn/src/tint/symbol.h130
-rw-r--r--chromium/third_party/dawn/src/tint/symbol_table.cc66
-rw-r--r--chromium/third_party/dawn/src/tint/symbol_table.h122
-rw-r--r--chromium/third_party/dawn/src/tint/symbol_table_test.cc46
-rw-r--r--chromium/third_party/dawn/src/tint/symbol_test.cc32
-rw-r--r--chromium/third_party/dawn/src/tint/test_main.cc68
-rw-r--r--chromium/third_party/dawn/src/tint/text/unicode.cc793
-rw-r--r--chromium/third_party/dawn/src/tint/text/unicode.h56
-rw-r--r--chromium/third_party/dawn/src/tint/text/unicode_test.cc355
-rw-r--r--chromium/third_party/dawn/src/tint/traits.h62
-rw-r--r--chromium/third_party/dawn/src/tint/traits_test.cc278
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.cc27
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.h44
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_empty_entry_point_test.cc40
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.cc135
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.h66
-rw-r--r--chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute_test.cc175
-rw-r--r--chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.cc244
-rw-r--r--chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.h122
-rw-r--r--chromium/third_party/dawn/src/tint/transform/array_length_from_uniform_test.cc208
-rw-r--r--chromium/third_party/dawn/src/tint/transform/binding_remapper.cc206
-rw-r--r--chromium/third_party/dawn/src/tint/transform/binding_remapper.h99
-rw-r--r--chromium/third_party/dawn/src/tint/transform/binding_remapper_test.cc218
-rw-r--r--chromium/third_party/dawn/src/tint/transform/builtin_polyfill.cc1027
-rw-r--r--chromium/third_party/dawn/src/tint/transform/builtin_polyfill.h113
-rw-r--r--chromium/third_party/dawn/src/tint/transform/builtin_polyfill_test.cc390
-rw-r--r--chromium/third_party/dawn/src/tint/transform/calculate_array_length.cc341
-rw-r--r--chromium/third_party/dawn/src/tint/transform/calculate_array_length.h73
-rw-r--r--chromium/third_party/dawn/src/tint/transform/calculate_array_length_test.cc138
-rw-r--r--chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.cc1290
-rw-r--r--chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.h99
-rw-r--r--chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io_test.cc1345
-rw-r--r--chromium/third_party/dawn/src/tint/transform/combine_samplers.cc564
-rw-r--r--chromium/third_party/dawn/src/tint/transform/combine_samplers.h84
-rw-r--r--chromium/third_party/dawn/src/tint/transform/combine_samplers_test.cc416
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_memory_access.cc1645
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_memory_access.h177
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_memory_access_test.cc471
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_array.cc200
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_array.h46
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_array_test.cc870
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.cc334
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.h46
-rw-r--r--chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix_test.cc823
-rw-r--r--chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.cc40
-rw-r--r--chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.h47
-rw-r--r--chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.cc284
-rw-r--r--chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.h40
-rw-r--r--chromium/third_party/dawn/src/tint/transform/expand_compound_assignment_test.cc202
-rw-r--r--chromium/third_party/dawn/src/tint/transform/first_index_offset.cc220
-rw-r--r--chromium/third_party/dawn/src/tint/transform/first_index_offset.h132
-rw-r--r--chromium/third_party/dawn/src/tint/transform/first_index_offset_test.cc327
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_constants.cc97
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_constants.h45
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_constants_test.cc425
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.h35
-rw-r--r--chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets_test.cc72
-rw-r--r--chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.h43
-rw-r--r--chromium/third_party/dawn/src/tint/transform/for_loop_to_loop_test.cc104
-rw-r--r--chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.cc323
-rw-r--r--chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.h41
-rw-r--r--chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment_test.cc183
-rw-r--r--chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.cc165
-rw-r--r--chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.h43
-rw-r--r--chromium/third_party/dawn/src/tint/transform/loop_to_for_loop_test.cc132
-rw-r--r--chromium/third_party/dawn/src/tint/transform/manager.cc70
-rw-r--r--chromium/third_party/dawn/src/tint/transform/manager.h54
-rw-r--r--chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.cc640
-rw-r--r--chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.h43
-rw-r--r--chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param_test.cc386
-rw-r--r--chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.cc763
-rw-r--r--chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.h96
-rw-r--r--chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture_test.cc1218
-rw-r--r--chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.cc228
-rw-r--r--chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.h71
-rw-r--r--chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform_test.cc434
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.cc84
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.h35
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var_test.cc178
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.cc1182
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.h29
-rw-r--r--chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl_test.cc1086
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.cc179
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.h46
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch_test.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_phonies.cc197
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_phonies.h43
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_phonies_test.cc72
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.h46
-rw-r--r--chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements_test.cc172
-rw-r--r--chromium/third_party/dawn/src/tint/transform/renamer.cc189
-rw-r--r--chromium/third_party/dawn/src/tint/transform/renamer.h126
-rw-r--r--chromium/third_party/dawn/src/tint/transform/renamer_test.cc236
-rw-r--r--chromium/third_party/dawn/src/tint/transform/robustness.cc491
-rw-r--r--chromium/third_party/dawn/src/tint/transform/robustness.h82
-rw-r--r--chromium/third_party/dawn/src/tint/transform/robustness_test.cc662
-rw-r--r--chromium/third_party/dawn/src/tint/transform/simplify_pointers.cc348
-rw-r--r--chromium/third_party/dawn/src/tint/transform/simplify_pointers.h36
-rw-r--r--chromium/third_party/dawn/src/tint/transform/simplify_pointers_test.cc96
-rw-r--r--chromium/third_party/dawn/src/tint/transform/single_entry_point.cc138
-rw-r--r--chromium/third_party/dawn/src/tint/transform/single_entry_point.h58
-rw-r--r--chromium/third_party/dawn/src/tint/transform/single_entry_point_test.cc309
-rw-r--r--chromium/third_party/dawn/src/tint/transform/test_helper.h199
-rw-r--r--chromium/third_party/dawn/src/tint/transform/transform.cc200
-rw-r--r--chromium/third_party/dawn/src/tint/transform/transform.h301
-rw-r--r--chromium/third_party/dawn/src/tint/transform/transform_test.cc124
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unshadow.cc110
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unshadow.h36
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unshadow_test.cc168
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.cc605
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.h40
-rw-r--r--chromium/third_party/dawn/src/tint/transform/unwind_discard_functions_test.cc328
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.cc61
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.h3
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point_test.cc102
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.cc482
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.h73
-rw-r--r--chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before_test.cc753
-rw-r--r--chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.h32
-rw-r--r--chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index_test.cc198
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.cc117
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.h43
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors_test.cc155
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vertex_pulling.cc1621
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vertex_pulling.h214
-rw-r--r--chromium/third_party/dawn/src/tint/transform/vertex_pulling_test.cc769
-rw-r--r--chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.cc213
-rw-r--r--chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.h83
-rw-r--r--chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs_test.cc104
-rw-r--r--chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.cc749
-rw-r--r--chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.h52
-rw-r--r--chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory_test.cc404
-rw-r--r--chromium/third_party/dawn/src/tint/utils/bitcast.h39
-rw-r--r--chromium/third_party/dawn/src/tint/utils/bitcast_test.cc37
-rw-r--r--chromium/third_party/dawn/src/tint/utils/block_allocator.h486
-rw-r--r--chromium/third_party/dawn/src/tint/utils/block_allocator_test.cc184
-rw-r--r--chromium/third_party/dawn/src/tint/utils/compiler_macros.h82
-rw-r--r--chromium/third_party/dawn/src/tint/utils/crc32.h93
-rw-r--r--chromium/third_party/dawn/src/tint/utils/crc32_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/utils/debugger.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/utils/defer.h33
-rw-r--r--chromium/third_party/dawn/src/tint/utils/defer_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/utils/enum_set.h394
-rw-r--r--chromium/third_party/dawn/src/tint/utils/enum_set_test.cc256
-rw-r--r--chromium/third_party/dawn/src/tint/utils/hash.h84
-rw-r--r--chromium/third_party/dawn/src/tint/utils/hash_test.cc63
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/command.h86
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/command_other.cc10
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/command_posix.cc373
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/command_test.cc92
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/command_windows.cc358
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/tmpfile.h74
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/tmpfile_other.cc2
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/tmpfile_posix.cc56
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/tmpfile_test.cc94
-rw-r--r--chromium/third_party/dawn/src/tint/utils/io/tmpfile_windows.cc46
-rw-r--r--chromium/third_party/dawn/src/tint/utils/map.h26
-rw-r--r--chromium/third_party/dawn/src/tint/utils/map_test.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/utils/math.h16
-rw-r--r--chromium/third_party/dawn/src/tint/utils/math_test.cc90
-rw-r--r--chromium/third_party/dawn/src/tint/utils/result.h103
-rw-r--r--chromium/third_party/dawn/src/tint/utils/result_test.cc55
-rw-r--r--chromium/third_party/dawn/src/tint/utils/reverse.h10
-rw-r--r--chromium/third_party/dawn/src/tint/utils/reverse_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/utils/scoped_assignment.h50
-rw-r--r--chromium/third_party/dawn/src/tint/utils/scoped_assignment_test.cc32
-rw-r--r--chromium/third_party/dawn/src/tint/utils/string.h12
-rw-r--r--chromium/third_party/dawn/src/tint/utils/string_test.cc20
-rw-r--r--chromium/third_party/dawn/src/tint/utils/to_const_ptr_vec.h12
-rw-r--r--chromium/third_party/dawn/src/tint/utils/transform.h63
-rw-r--r--chromium/third_party/dawn/src/tint/utils/transform_test.cc217
-rw-r--r--chromium/third_party/dawn/src/tint/utils/unique_allocator.h92
-rw-r--r--chromium/third_party/dawn/src/tint/utils/unique_allocator_test.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/utils/unique_vector.h129
-rw-r--r--chromium/third_party/dawn/src/tint/utils/unique_vector_test.cc211
-rw-r--r--chromium/third_party/dawn/src/tint/val/hlsl.cc257
-rw-r--r--chromium/third_party/dawn/src/tint/val/msl.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/val/msl_metal.mm41
-rw-r--r--chromium/third_party/dawn/src/tint/val/val.h16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/append_vector.cc250
-rw-r--r--chromium/third_party/dawn/src/tint/writer/append_vector_test.cc827
-rw-r--r--chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.cc8
-rw-r--r--chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.h43
-rw-r--r--chromium/third_party/dawn/src/tint/writer/flatten_bindings.cc78
-rw-r--r--chromium/third_party/dawn/src/tint/writer/flatten_bindings.h31
-rw-r--r--chromium/third_party/dawn/src/tint/writer/flatten_bindings_test.cc142
-rw-r--r--chromium/third_party/dawn/src/tint/writer/float_to_string.cc235
-rw-r--r--chromium/third_party/dawn/src/tint/writer/float_to_string_test.cc180
-rw-r--r--chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.cc56
-rw-r--r--chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.h4
-rw-r--r--chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings_test.cc172
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator.cc55
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator.h87
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_bench.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.cc4960
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.h857
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_array_accessor_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_assign_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_binary_test.cc572
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_bitcast_test.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_block_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_break_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_test.cc749
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_texture_test.cc504
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_call_test.cc84
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_case_test.cc68
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_cast_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_constructor_test.cc211
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_continue_test.cc13
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_discard_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_function_test.cc847
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_identifier_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_if_test.cc96
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_import_test.cc237
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_loop_test.cc325
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_member_accessor_test.cc911
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_module_constant_test.cc58
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_return_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_sanitizer_test.cc325
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_storage_buffer_test.cc55
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_switch_test.cc36
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_test.cc71
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_type_test.cc532
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_unary_op_test.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_uniform_buffer_test.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_variable_decl_statement_test.cc103
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_workgroup_var_test.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/test_helper.h112
-rw-r--r--chromium/third_party/dawn/src/tint/writer/glsl/version.h46
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator.cc52
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator.h82
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_bench.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.cc6789
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.h898
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_array_accessor_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_assign_test.cc190
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_binary_test.cc840
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_bitcast_test.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_block_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_break_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_test.cc761
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_texture_test.cc622
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_call_test.cc84
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_case_test.cc73
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_cast_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_constructor_test.cc238
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_continue_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_discard_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_function_test.cc966
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_identifier_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_if_test.cc96
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_import_test.cc188
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_loop_test.cc300
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_member_accessor_test.cc883
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_module_constant_test.cc58
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_return_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_sanitizer_test.cc385
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_switch_test.cc42
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_test.cc57
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_type_test.cc757
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_unary_op_test.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_variable_decl_statement_test.cc100
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_workgroup_var_test.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/writer/hlsl/test_helper.h127
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator.cc45
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator.h117
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_bench.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl.cc5196
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl.h690
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_array_accessor_test.cc32
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_assign_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_binary_test.cc216
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_bitcast_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_block_test.cc24
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_break_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_test.cc486
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_texture_test.cc524
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_call_test.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_case_test.cc68
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_cast_test.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_constructor_test.cc161
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_continue_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_discard_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_function_test.cc639
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_identifier_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_if_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_import_test.cc199
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_loop_test.cc316
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_member_accessor_test.cc44
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_module_constant_test.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_return_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_sanitizer_test.cc244
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_switch_test.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_test.cc307
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_type_test.cc1097
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_unary_op_test.cc97
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/generator_impl_variable_decl_statement_test.cc145
-rw-r--r--chromium/third_party/dawn/src/tint/writer/msl/test_helper.h106
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.cc58
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.h46
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/binary_writer_test.cc158
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder.cc7319
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder.h1188
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_accessor_expression_test.cc1013
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_assign_test.cc262
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_binary_expression_test.cc1043
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_bitcast_expression_test.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_block_test.cc35
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_test.cc2580
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_texture_test.cc1432
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_call_test.cc40
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_constructor_expression_test.cc2250
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_discard_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_entry_point_test.cc183
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_format_conversion_test.cc109
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_function_attribute_test.cc301
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_function_test.cc222
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_function_variable_test.cc152
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_global_variable_test.cc588
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_ident_expression_test.cc122
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_if_test.cc552
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_literal_test.cc142
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_loop_test.cc454
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_return_test.cc62
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_switch_test.cc385
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_test.cc37
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_type_test.cc948
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/builder_unary_op_expression_test.cc97
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/function.cc28
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/function.h128
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/generator.cc43
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/generator.h44
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/generator_bench.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.cc125
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.h38
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/instruction.cc10
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/instruction.h42
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/instruction_test.cc28
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/operand.cc48
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/operand.h92
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/operand_test.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant.h197
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant_test.cc52
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/spv_dump.cc89
-rw-r--r--chromium/third_party/dawn/src/tint/writer/spirv/test_helper.h173
-rw-r--r--chromium/third_party/dawn/src/tint/writer/text.h8
-rw-r--r--chromium/third_party/dawn/src/tint/writer/text_generator.cc143
-rw-r--r--chromium/third_party/dawn/src/tint/writer/text_generator.h400
-rw-r--r--chromium/third_party/dawn/src/tint/writer/text_generator_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator.h24
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_bench.cc22
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.cc1918
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.h334
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_alias_type_test.cc42
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_array_accessor_test.cc32
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_assign_test.cc16
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_binary_test.cc82
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_bitcast_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_block_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_break_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_call_test.cc86
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_case_test.cc42
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_cast_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_constructor_test.cc106
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_continue_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_discard_test.cc12
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_enable_test.cc33
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_fallthrough_test.cc18
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_function_test.cc262
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_global_decl_test.cc116
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_identifier_test.cc14
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_if_test.cc96
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_literal_test.cc114
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_loop_test.cc195
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_member_accessor_test.cc34
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_return_test.cc26
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_switch_test.cc38
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_test.cc9
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_type_test.cc525
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_unary_op_test.cc83
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_decl_statement_test.cc30
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_test.cc143
-rw-r--r--chromium/third_party/dawn/src/tint/writer/wgsl/test_helper.h47
-rw-r--r--chromium/third_party/dawn/src/tint/writer/writer.h26
-rw-r--r--chromium/third_party/dawn/test/tint/BUILD.gn71
-rw-r--r--chromium/third_party/dawn/third_party/CMakeLists.txt47
-rw-r--r--chromium/third_party/dawn/third_party/glfw/.appveyor.yml47
-rw-r--r--chromium/third_party/dawn/third_party/glfw/.github/CODEOWNERS10
-rw-r--r--chromium/third_party/dawn/third_party/glfw/.github/workflows/build.yml93
-rw-r--r--chromium/third_party/dawn/third_party/glfw/.mailmap10
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/GenerateMappings.cmake48
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/Info.plist.in38
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/cmake_uninstall.cmake.in29
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/glfw3.pc.in13
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/glfw3Config.cmake.in3
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32-clang.cmake13
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32.cmake13
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/modules/FindEpollShim.cmake17
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/modules/FindOSMesa.cmake18
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32-clang.cmake13
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32.cmake13
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CMakeLists.txt179
-rw-r--r--chromium/third_party/dawn/third_party/glfw/CONTRIBUTORS.md248
-rw-r--r--chromium/third_party/dawn/third_party/glfw/LICENSE.md23
-rw-r--r--chromium/third_party/dawn/third_party/glfw/README.md361
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/getopt.c230
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/getopt.h57
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/glad/gl.h5996
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/glad/gles2.h1805
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/glad/vulkan.h4612
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/linmath.h606
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/mingw/_mingw_dxhelper.h117
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/mingw/dinput.h2467
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/mingw/xinput.h239
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/nuklear.h25778
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/nuklear_glfw_gl2.h381
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/stb_image_write.h1724
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/tinycthread.c594
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/tinycthread.h443
-rw-r--r--chromium/third_party/dawn/third_party/glfw/deps/vs2008/stdint.h247
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/CMakeLists.txt46
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/CONTRIBUTING.md391
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/Doxyfile.in2465
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/DoxygenLayout.xml71
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/SUPPORT.md14
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/build.dox338
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/compat.dox284
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/compile.dox394
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/context.dox342
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/extra.css2
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/extra.css.map7
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/extra.scss449
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/footer.html7
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/header.html34
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/input.dox953
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/internal.dox123
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/intro.dox619
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/main.dox46
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/monitor.dox268
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/moving.dox513
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/news.dox240
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/quick.dox371
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/spaces.svg877
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/vulkan.dox246
-rw-r--r--chromium/third_party/dawn/third_party/glfw/docs/window.dox1457
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/CMakeLists.txt83
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/boing.c680
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/gears.c361
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/glfw.icnsbin0 -> 27988 bytes
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/glfw.icobin0 -> 21630 bytes
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/glfw.rc3
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/heightmap.c513
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/offscreen.c165
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/particles.c1074
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/sharing.c235
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/splitview.c547
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/triangle-opengl.c171
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/triangle-opengles.c170
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/wave.c463
-rw-r--r--chromium/third_party/dawn/third_party/glfw/examples/windows.c110
-rw-r--r--chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3.h6397
-rw-r--r--chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3native.h614
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/CMakeLists.txt400
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_init.m684
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.h51
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.m477
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_monitor.m627
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_platform.h302
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_time.c55
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_time.h35
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/cocoa_window.m1952
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/context.c758
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/egl_context.c868
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/glfw.rc.in30
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/glx_context.c712
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/init.c545
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/input.c1431
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/internal.h1010
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/linux_joystick.c431
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/linux_joystick.h65
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/mappings.h1001
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/mappings.h.in82
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/monitor.c543
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/nsgl_context.m376
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_init.c133
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_joystick.c58
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_joystick.h32
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_monitor.c161
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_platform.h149
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/null_window.c711
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/osmesa_context.c386
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/platform.c189
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/platform.h163
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_module.c51
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_poll.c81
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_poll.h32
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_thread.c105
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_thread.h49
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_time.c63
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/posix_time.h41
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/vulkan.c330
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/wgl_context.c790
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_init.c679
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_joystick.c758
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_joystick.h53
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_module.c49
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_monitor.c547
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_platform.h627
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_thread.c98
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_thread.h48
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_time.c53
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_time.h38
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/win32_window.c2497
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/window.c1113
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/wl_init.c779
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/wl_monitor.c233
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/wl_platform.h515
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/wl_window.c2709
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/x11_init.c1651
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/x11_monitor.c616
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/x11_platform.h1003
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/x11_window.c3267
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.c942
-rw-r--r--chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.h30
-rw-r--r--chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn117
-rw-r--r--chromium/third_party/dawn/third_party/gn/webgpu-cts/BUILD.gn2
-rw-r--r--chromium/third_party/dawn/third_party/gn/webgpu-cts/resource_files.txt2
-rw-r--r--chromium/third_party/dawn/third_party/gn/webgpu-cts/ts_sources.txt104
-rw-r--r--chromium/third_party/dawn/third_party/go.mod1
-rwxr-xr-xchromium/third_party/dawn/tools/format10
-rwxr-xr-xchromium/third_party/dawn/tools/setup-build19
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser_test.go210
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/check-spec-examples/main.go4
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/build.go168
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/cmds.go31
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/config.go95
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/constants.go37
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/deps.go67
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/paths.go38
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/common/results.go355
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/config.json56
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/export/export.go317
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/format/format.go55
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/main.go55
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/merge/merge.go76
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/results/results.go79
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/roll/roll.go629
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/roll/roll_test.go75
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/time/time.go148
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/cts/update/update.go87
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/ast/ast.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/ast/ast.go)137
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/builtin_table.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/builtin_table.go)242
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/generate.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/generate.go)18
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/permutate.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/permutate.go)59
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer.go)43
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer_test.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer_test.go)67
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/main.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/main.go)20
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser.go)180
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser_test.go705
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolve.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolve.go)202
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolver_test.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolver_test.go)233
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/sem/sem.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/sem/sem.go)85
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/tok/tok.go (renamed from chromium/third_party/dawn/tools/src/cmd/builtin-gen/tok/tok.go)32
-rw-r--r--chromium/third_party/dawn/tools/src/cmd/perfmon/main.go514
-rw-r--r--chromium/third_party/dawn/tools/src/container/set.go38
-rw-r--r--chromium/third_party/dawn/tools/src/container/set_test.go50
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/diagnostic.go56
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/expectations.go230
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/parse.go298
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/parse_test.go472
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/update.go610
-rw-r--r--chromium/third_party/dawn/tools/src/cts/expectations/update_test.go672
-rw-r--r--chromium/third_party/dawn/tools/src/cts/query/errors.go33
-rw-r--r--chromium/third_party/dawn/tools/src/cts/query/query.go12
-rw-r--r--chromium/third_party/dawn/tools/src/cts/query/tree.go404
-rw-r--r--chromium/third_party/dawn/tools/src/cts/query/tree_test.go934
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/mvt.go146
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/mvt_test.go117
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/result.go285
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/result_test.go499
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/status.go11
-rw-r--r--chromium/third_party/dawn/tools/src/cts/result/status_test.go54
-rw-r--r--chromium/third_party/dawn/tools/src/gerrit/gerrit.go78
-rw-r--r--chromium/third_party/dawn/tools/src/gerrit/gerrit_test.go64
-rw-r--r--chromium/third_party/dawn/tools/src/git/git.go58
-rw-r--r--chromium/third_party/dawn/tools/src/resultsdb/resultsdb.go2
-rw-r--r--chromium/third_party/dawn/tools/src/subcmd/subcmd.go131
-rw-r--r--chromium/third_party/dawn/tools/src/utils/paths.go77
-rw-r--r--chromium/third_party/dawn/tools/src/utils/paths_test.go51
-rw-r--r--chromium/third_party/dawn/webgpu-cts/expectations.txt656
-rwxr-xr-xchromium/third_party/dawn/webgpu-cts/scripts/compile_src.py76
-rwxr-xr-xchromium/third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py4
-rwxr-xr-xchromium/third_party/dawn/webgpu-cts/scripts/list.py8
2009 files changed, 353494 insertions, 222248 deletions
diff --git a/chromium/third_party/dawn/.clang-format b/chromium/third_party/dawn/.clang-format
index ff58eead320..d9e1846d210 100644
--- a/chromium/third_party/dawn/.clang-format
+++ b/chromium/third_party/dawn/.clang-format
@@ -1,8 +1,5 @@
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium
-Standard: Cpp11
-
-AllowShortFunctionsOnASingleLine: false
ColumnLimit: 100
@@ -10,11 +7,4 @@ ColumnLimit: 100
IndentWidth: 4
ObjCBlockIndentWidth: 4
AccessModifierOffset: -2
-
-CompactNamespaces: true
-
-# This should result in only one indentation level with compacted namespaces
-NamespaceIndentation: All
-
-# Use this option once clang-format 6 is out.
-IndentPPDirectives: AfterHash
+InsertBraces: true \ No newline at end of file
diff --git a/chromium/third_party/dawn/.clang-tidy b/chromium/third_party/dawn/.clang-tidy
new file mode 100644
index 00000000000..1dc5f3b8e50
--- /dev/null
+++ b/chromium/third_party/dawn/.clang-tidy
@@ -0,0 +1,163 @@
+FormatStyle: file
+Checks: "-*,\
+ abseil-*,\
+ -abseil-string-find-startswith,\
+ -abseil-string-find-str-contains,\
+ bugprone-*,\
+ -bugprone-branch-clone,\
+ -bugprone-copy-constructor-init,\
+ -bugprone-easily-swappable-parameters,\
+ -bugprone-forwarding-reference-overload,\
+ -bugprone-implicit-widening-of-multiplication-result,\
+ -bugprone-lambda-function-name,\
+ -bugprone-macro-parentheses,\
+ -bugprone-misplaced-widening-cast,\
+ -bugprone-narrowing-conversions,\
+ -bugprone-parent-virtual-call,\
+ -bugprone-reserved-identifier,\
+ -bugprone-signed-char-misuse,\
+ -bugprone-sizeof-expression,\
+ -bugprone-string-constructor,\
+ -bugprone-suspicious-include,\
+ -bugprone-too-small-loop-variable,\
+ -bugprone-unhandled-self-assignment,\
+ -bugprone-use-after-move,\
+ cert-*,\
+ -cert-dcl16-c,\
+ -cert-dcl21-cpp,\
+ -cert-dcl37-c,\
+ -cert-dcl50-cpp,\
+ -cert-dcl51-cpp,\
+ -cert-dcl54-cpp,\
+ -cert-dcl58-cpp,\
+ -cert-err33-c,\
+ -cert-msc30-c,\
+ -cert-msc32-c,\
+ -cert-msc50-cpp,\
+ -cert-msc51-cpp,\
+ -cert-oop54-cpp,\
+ -cert-str34-c,\
+ -cert-str34-c,\
+ -cert-str34-c,\
+ -cert-str34-c,\
+ -clang-analyzer-*,\
+ concurrency-*,\
+ -concurrency-mt-unsafe,\
+ cppcoreguidelines-*,\
+ -concurrency-mt-unsafe,\
+ -cppcoreguidelines-avoid-c-arrays,\
+ -cppcoreguidelines-avoid-goto,\
+ -cppcoreguidelines-avoid-magic-numbers,\
+ -cppcoreguidelines-avoid-non-const-global-variables,\
+ -cppcoreguidelines-c-copy-assignment-signature,\
+ -cppcoreguidelines-explicit-virtual-functions,\
+ -cppcoreguidelines-init-variables,\
+ -cppcoreguidelines-interfaces-global-init,\
+ -cppcoreguidelines-macro-usage,\
+ -cppcoreguidelines-narrowing-conversions,\
+ -cppcoreguidelines-no-malloc,\
+ -cppcoreguidelines-non-private-member-variables-in-classes,\
+ -cppcoreguidelines-owning-memory,\
+ -cppcoreguidelines-prefer-member-initializer,\
+ -cppcoreguidelines-pro-bounds-array-to-pointer-decay,\
+ -cppcoreguidelines-pro-bounds-constant-array-index,\
+ -cppcoreguidelines-pro-bounds-pointer-arithmetic,\
+ -cppcoreguidelines-pro-type-const-cast,\
+ -cppcoreguidelines-pro-type-member-init,\
+ -cppcoreguidelines-pro-type-reinterpret-cast,\
+ -cppcoreguidelines-pro-type-static-cast-downcast,\
+ -cppcoreguidelines-pro-type-union-access,\
+ -cppcoreguidelines-pro-type-vararg,\
+ -cppcoreguidelines-slicing,\
+ -cppcoreguidelines-special-member-functions,\
+ -cppcoreguidelines-virtual-class-destructor,\
+ google-*,\
+ -google-default-arguments,\
+ -google-explicit-constructor,\
+ -google-readability-avoid-underscore-in-googletest-name,\
+ -google-readability-braces-around-statements,\
+ -google-readability-casting,\
+ -google-readability-namespace-comments,\
+ -google-readability-todo,\
+ -google-runtime-int,\
+ -google-upgrade-googletest-case,\
+ misc-*,\
+ -misc-misplaced-const,\
+ -misc-new-delete-overloads,\
+ -misc-non-private-member-variables-in-classes,\
+ -misc-no-recursion,\
+ -misc-redundant-expression,\
+ -misc-uniqueptr-reset-release,\
+ -misc-unconventional-assign-operator,\
+ -misc-unused-parameters,\
+ -misc-unused-using-decls,\
+ modernize-*,\
+ -modernize-avoid-c-arrays,\
+ -modernize-concat-nested-namespaces,\
+ -modernize-deprecated-headers,\
+ -modernize-loop-convert,\
+ -modernize-macro-to-enum,\
+ -modernize-make-unique,\
+ -modernize-pass-by-value,\
+ -modernize-raw-string-literal,\
+ -modernize-redundant-void-arg,\
+ -modernize-return-braced-init-list,\
+ -modernize-unary-static-assert,\
+ -modernize-use-auto,\
+ -modernize-use-bool-literals,\
+ -modernize-use-default-member-init,\
+ -modernize-use-emplace,\
+ -modernize-use-equals-default,\
+ -modernize-use-equals-delete,\
+ -modernize-use-nodiscard,\
+ -modernize-use-nullptr,\
+ -modernize-use-override,\
+ -modernize-use-trailing-return-type,\
+ -modernize-use-transparent-functors,\
+ -modernize-use-using,\
+ performance-*,\
+ -performance-faster-string-find,\
+ -performance-for-range-copy,\
+ -performance-inefficient-algorithm,\
+ -performance-inefficient-string-concatenation,\
+ -performance-inefficient-vector-operation,\
+ -performance-move-const-arg,\
+ -performance-no-automatic-move,\
+ -performance-noexcept-move-constructor,\
+ -performance-no-int-to-ptr,\
+ -performance-trivially-destructible,\
+ -performance-unnecessary-copy-initialization,\
+ -performance-unnecessary-value-param,\
+ portability-*,\
+ readability-*,\
+ -readability-avoid-const-params-in-decls,\
+ -readability-braces-around-statements,\
+ -readability-const-return-type,\
+ -readability-container-data-pointer,\
+ -readability-container-size-empty,\
+ -readability-convert-member-functions-to-static,\
+ -readability-else-after-return,\
+ -readability-function-cognitive-complexity,\
+ -readability-identifier-length,\
+ -readability-implicit-bool-conversion,\
+ -readability-inconsistent-declaration-parameter-name,\
+ -readability-isolate-declaration,\
+ -readability-magic-numbers,\
+ -readability-make-member-function-const,\
+ -readability-named-parameter,\
+ -readability-non-const-parameter,\
+ -readability-qualified-auto,\
+ -readability-redundant-access-specifiers,\
+ -readability-redundant-control-flow,\
+ -readability-redundant-declaration,\
+ -readability-redundant-member-init,\
+ -readability-redundant-smartptr-get,\
+ -readability-redundant-string-cstr,\
+ -readability-redundant-string-init,\
+ -readability-simplify-boolean-expr,\
+ -readability-static-accessed-through-instance,\
+ -readability-static-definition-in-anonymous-namespace,\
+ -readability-suspicious-call-argument,\
+ -readability-uppercase-literal-suffix,\
+ -readability-use-anyofallof
+ "
diff --git a/chromium/third_party/dawn/.gn b/chromium/third_party/dawn/.gn
index 38604408470..9b9c5b82bea 100644
--- a/chromium/third_party/dawn/.gn
+++ b/chromium/third_party/dawn/.gn
@@ -20,17 +20,12 @@ script_executable = "python3"
default_args = {
clang_use_chrome_plugins = false
- # Override the mac version so standalone Dawn compiles with at least 10.11
- # which allows us to not skip the -Wunguarded-availability warning and get
- # proper warnings for use of APIs that are 10.12 and above (even if
- # Chromium is still on 10.10).
- mac_deployment_target = "10.11.0"
- mac_min_system_version = "10.11.0"
-
angle_enable_abseil = false
angle_standalone = false
angle_build_all = false
+ angle_build_tests = false
angle_has_rapidjson = false
+ angle_use_wayland = false
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
@@ -39,11 +34,3 @@ default_args = {
vma_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
}
-
-check_targets = [
- # Everything in BUILD.gn
- "//:*",
-
- # Everything in third_party/BUILD.gn
- "//third_party/:*",
-]
diff --git a/chromium/third_party/dawn/AUTHORS.dawn b/chromium/third_party/dawn/AUTHORS.dawn
deleted file mode 100644
index 32a6c3cec68..00000000000
--- a/chromium/third_party/dawn/AUTHORS.dawn
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is the list of Dawn authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control.
-Google Inc.
diff --git a/chromium/third_party/dawn/AUTHORS.tint b/chromium/third_party/dawn/AUTHORS.tint
deleted file mode 100644
index a66d09ee44a..00000000000
--- a/chromium/third_party/dawn/AUTHORS.tint
+++ /dev/null
@@ -1,8 +0,0 @@
-# This is the list of the Tint authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control.
-
-Google LLC
-Vasyl Teliman
diff --git a/chromium/third_party/dawn/CMakeLists.txt b/chromium/third_party/dawn/CMakeLists.txt
index 6c410a97c6c..287b903ac44 100644
--- a/chromium/third_party/dawn/CMakeLists.txt
+++ b/chromium/third_party/dawn/CMakeLists.txt
@@ -14,7 +14,7 @@
cmake_minimum_required(VERSION 3.10.2)
-# When upgrading to CMake 3.11 we can remove DAWN_DUMMY_FILE because source-less add_library
+# When upgrading to CMake 3.11 we can remove DAWN_PLACEHOLDER_FILE because source-less add_library
# becomes available.
# When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in
# case any of the generator files changes. We should also remove the CACHE "" FORCE stuff to
@@ -46,7 +46,7 @@ set(DAWN_SRC_DIR "${Dawn_SOURCE_DIR}/src")
set(DAWN_INCLUDE_DIR "${Dawn_SOURCE_DIR}/include")
set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates")
-set(DAWN_DUMMY_FILE "${DAWN_SRC_DIR}/Dummy.cpp")
+set(DAWN_PLACEHOLDER_FILE "${DAWN_SRC_DIR}/Placeholder.cpp")
################################################################################
# Configuration options
@@ -82,6 +82,7 @@ set(ENABLE_METAL OFF)
set(ENABLE_OPENGLES OFF)
set(ENABLE_DESKTOP_GL OFF)
set(ENABLE_VULKAN OFF)
+set(USE_WAYLAND OFF)
set(USE_X11 OFF)
set(BUILD_SAMPLES OFF)
if (WIN32)
@@ -124,6 +125,7 @@ option_if_not_defined(DAWN_ENABLE_DESKTOP_GL "Enable compilation of the OpenGL b
option_if_not_defined(DAWN_ENABLE_OPENGLES "Enable compilation of the OpenGL ES backend" ${ENABLE_OPENGLES})
option_if_not_defined(DAWN_ENABLE_VULKAN "Enable compilation of the Vulkan backend" ${ENABLE_VULKAN})
option_if_not_defined(DAWN_ALWAYS_ASSERT "Enable assertions on all build types" OFF)
+option_if_not_defined(DAWN_USE_WAYLAND "Enable support for Wayland surface" ${USE_WAYLAND})
option_if_not_defined(DAWN_USE_X11 "Enable support for X11 surface" ${USE_X11})
option_if_not_defined(DAWN_BUILD_SAMPLES "Enables building Dawn's samples" ${BUILD_SAMPLES})
@@ -172,6 +174,7 @@ function(common_compile_options TARGET)
target_compile_options(${TARGET} PRIVATE
-fno-exceptions
-fno-rtti
+ -fvisibility-inlines-hidden
)
if (${DAWN_ENABLE_MSAN})
@@ -231,6 +234,9 @@ endif()
if (DAWN_ENABLE_VULKAN)
target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_VULKAN")
endif()
+if (DAWN_USE_WAYLAND)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_USE_WAYLAND")
+endif()
if (DAWN_USE_X11)
target_compile_definitions(dawn_internal_config INTERFACE "DAWN_USE_X11")
endif()
@@ -564,7 +570,14 @@ endfunction()
################################################################################
add_subdirectory(third_party)
+
+# TODO(crbug.com/tint/455): Tint does not currently build with CMake when
+# BUILD_SHARED_LIBS=1, so always build it as static for now.
+set(BUILD_SHARED_LIBS_SAVED ${BUILD_SHARED_LIBS})
+set(BUILD_SHARED_LIBS 0)
add_subdirectory(src/tint)
+set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_SAVED})
+
add_subdirectory(generator)
add_subdirectory(src/dawn)
diff --git a/chromium/third_party/dawn/DEPS b/chromium/third_party/dawn/DEPS
index e4590e5e0c2..8fd9da0e7e7 100644
--- a/chromium/third_party/dawn/DEPS
+++ b/chromium/third_party/dawn/DEPS
@@ -26,16 +26,19 @@ vars = {
# GN variable required by //testing that will be output in the gclient_args.gni
'generate_location_tags': False,
+
+ # Fetch clang-tidy into the same bin/ directory as our clang binary.
+ 'checkout_clang_tidy': False,
}
deps = {
# Dependencies required to use GN/Clang in standalone
'build': {
- 'url': '{chromium_git}/chromium/src/build@c7876b5a44308b94074287939244bc562007de69',
+ 'url': '{chromium_git}/chromium/src/build@87b04ad66530e4a571cef36d6e71ef737d23a887',
'condition': 'dawn_standalone',
},
'buildtools': {
- 'url': '{chromium_git}/chromium/src/buildtools@e1471b21ee9c6765ee95e9db0c76fe997ccad35c',
+ 'url': '{chromium_git}/chromium/src/buildtools@f0d740e4e2f803e39dfd5d8d11f7d87bdf489514',
'condition': 'dawn_standalone',
},
'buildtools/clang_format/script': {
@@ -78,7 +81,7 @@ deps = {
},
'tools/clang': {
- 'url': '{chromium_git}/chromium/src/tools/clang@df9b14e26c163dd8e2c0ab081e2689f038ae7141',
+ 'url': '{chromium_git}/chromium/src/tools/clang@3c4a622d9f0b0ce5ec2a438189d46c695216b324',
'condition': 'dawn_standalone',
},
'tools/clang/dsymutil': {
@@ -96,7 +99,7 @@ deps = {
'condition': 'dawn_standalone',
},
'third_party/googletest': {
- 'url': '{chromium_git}/external/github.com/google/googletest@6b74da4757a549563d7c37c8fae3e704662a043b',
+ 'url': '{chromium_git}/external/github.com/google/googletest@bda85449f48f2d80a494c8c07766b6aba3170f3b',
'condition': 'dawn_standalone',
},
# This is a dependency of //testing
@@ -117,8 +120,7 @@ deps = {
# GLFW for tests and samples
'third_party/glfw': {
- 'url': '{chromium_git}/external/github.com/glfw/glfw@94773111300fee0453844a4c9407af7e880b4df8',
- 'condition': 'dawn_standalone',
+ 'url': '{chromium_git}/external/github.com/glfw/glfw@62e175ef9fae75335575964c845a302447c012c7',
},
'third_party/vulkan_memory_allocator': {
@@ -127,17 +129,17 @@ deps = {
},
'third_party/angle': {
- 'url': '{chromium_git}/angle/angle@8718783526307a3fbb35d4c1ad4e8101262a0d73',
+ 'url': '{chromium_git}/angle/angle@c11af00ae1d10d43a71a28c07f99969665c4ea2a',
'condition': 'dawn_standalone',
},
'third_party/swiftshader': {
- 'url': '{swiftshader_git}/SwiftShader@1d450ae99a0f4ade28dd55dac962f8b28d990376',
+ 'url': '{swiftshader_git}/SwiftShader@26243894edb812abe0f120ac36c400439848dacb',
'condition': 'dawn_standalone',
},
'third_party/vulkan-deps': {
- 'url': '{chromium_git}/vulkan-deps@7e9ab0686bf4d4fa9c52eeb8def33b2057624987',
+ 'url': '{chromium_git}/vulkan-deps@23b710f1a0b3c44d51035c6400a554415f95d9c6',
'condition': 'dawn_standalone',
},
@@ -153,7 +155,7 @@ deps = {
# WebGPU CTS - not used directly by Dawn, only transitively by Chromium.
'third_party/webgpu-cts': {
- 'url': '{chromium_git}/external/github.com/gpuweb/cts@9861f5e1d92559c03a8d769117124de97adc47c4',
+ 'url': '{chromium_git}/external/github.com/gpuweb/cts@f19f0c38c943499913a9ab59b0fe0c26168a2368',
'condition': 'build_with_chromium',
},
@@ -167,13 +169,9 @@ deps = {
'condition': 'dawn_node',
},
'third_party/gpuweb': {
- 'url': '{github_git}/gpuweb/gpuweb.git@881403b5fda2d9ac9ffc5daa24e34738205bf155',
+ 'url': '{github_git}/gpuweb/gpuweb.git@3c4734b09c68eb800b15da5e9ecefeca735fa7df',
'condition': 'dawn_node',
},
- 'third_party/gpuweb-cts': {
- 'url': '{chromium_git}/external/github.com/gpuweb/cts@9861f5e1d92559c03a8d769117124de97adc47c4',
- 'condition': 'dawn_standalone',
- },
'tools/golang': {
'condition': 'dawn_node',
@@ -243,6 +241,15 @@ hooks = [
'condition': 'dawn_standalone',
},
{
+ # This is also supposed to support the same set of platforms as 'clang'
+ # above. LLVM ToT support isn't provided at the moment.
+ 'name': 'clang_tidy',
+ 'pattern': '.',
+ 'condition': 'dawn_standalone and checkout_clang_tidy',
+ 'action': ['python3', 'tools/clang/scripts/update.py',
+ '--package=clang-tidy'],
+ },
+ {
# Pull rc binaries using checked-in hashes.
'name': 'rc_win',
'pattern': '.',
diff --git a/chromium/third_party/dawn/PRESUBMIT.py b/chromium/third_party/dawn/PRESUBMIT.py
index 8520b75d855..7cfb3ae3099 100644
--- a/chromium/third_party/dawn/PRESUBMIT.py
+++ b/chromium/third_party/dawn/PRESUBMIT.py
@@ -80,7 +80,9 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
matches = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter):
- for line_num, line in f.ChangedContents():
+ line_num = 0
+ for line in f.NewContents():
+ line_num += 1
for reg in NONINCLUSIVE_REGEX_LIST:
match = reg.search(line)
if match:
@@ -99,11 +101,29 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
def _NonInclusiveFileFilter(file):
filter_list = [
+ "Doxyfile", # References to main pages
"PRESUBMIT.py", # Non-inclusive language check data
+ "PRESUBMIT.py.tint", # Non-inclusive language check data
+ "docs/dawn/debug_markers.md", # External URL
+ "docs/dawn/infra.md", # Infra settings
"docs/tint/spirv-input-output-variables.md", # External URL
- "test/tint/samples/compute_boids.wgsl ", # External URL
+ "infra/config/global/generated/cr-buildbucket.cfg", # Infra settings
+ "infra/config/global/main.star", # Infra settings
+ "infra/kokoro/windows/build.bat", # External URL
+ "src/dawn/common/GPUInfo.cpp", # External URL
+ "src/dawn/native/metal/BackendMTL.mm", # OSX Constant
+ "src/dawn/native/vulkan/SamplerVk.cpp", # External URL
+ "src/dawn/native/vulkan/TextureVk.cpp", # External URL
+ "src/dawn/node/tools/src/cmd/run-cts/main.go", # Terminal type name
+ "src/dawn/samples/ComputeBoids.cpp", # External URL
+ "src/dawn/tests/end2end/DepthBiasTests.cpp", # External URL
+ "test/tint/samples/compute_boids.wgsl", # External URL
+ "third_party/khronos/KHR/khrplatform.h", # Third party file
+ "tools/roll-all", # Branch name
+ "tools/src/container/key.go", # External URL
+ "go.sum", # External URL
]
- return file in filter_list
+ return file.LocalPath() not in filter_list
def _DoCommonChecks(input_api, output_api):
diff --git a/chromium/third_party/dawn/PRESUBMIT.py.dawn b/chromium/third_party/dawn/PRESUBMIT.py.dawn
deleted file mode 100644
index 899e0e282ae..00000000000
--- a/chromium/third_party/dawn/PRESUBMIT.py.dawn
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2018 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import platform
-import subprocess
-
-USE_PYTHON3 = True
-
-
-def _DoCommonChecks(input_api, output_api):
- results = []
- results.extend(
- input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
- results.extend(
- input_api.canned_checks.CheckPatchFormatted(input_api,
- output_api,
- check_python=True))
- return results
-
-
-def CheckChangeOnUpload(input_api, output_api):
- return _DoCommonChecks(input_api, output_api)
-
-
-def CheckChangeOnCommit(input_api, output_api):
- return _DoCommonChecks(input_api, output_api)
diff --git a/chromium/third_party/dawn/PRESUBMIT.py.tint b/chromium/third_party/dawn/PRESUBMIT.py.tint
deleted file mode 100755
index 97623c165f3..00000000000
--- a/chromium/third_party/dawn/PRESUBMIT.py.tint
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2020 The Tint Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Presubmit script for Tint.
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into depot_tools.
-"""
-
-import re
-
-USE_PYTHON3 = True
-
-
-def _LicenseHeader(input_api):
- """Returns the license header regexp."""
- # Accept any year number from 2019 to the current year
- current_year = int(input_api.time.strftime('%Y'))
- allowed_years = (str(s) for s in reversed(xrange(2019, current_year + 1)))
- years_re = '(' + '|'.join(allowed_years) + ')'
- license_header = (
- r'.*? Copyright( \(c\))? %(year)s The Tint [Aa]uthors\n '
- r'.*?\n'
- r'.*? Licensed under the Apache License, Version 2.0 (the "License");\n'
- r'.*? you may not use this file except in compliance with the License.\n'
- r'.*? You may obtain a copy of the License at\n'
- r'.*?\n'
- r'.*? http://www.apache.org/licenses/LICENSE-2.0\n'
- r'.*?\n'
- r'.*? Unless required by applicable law or agreed to in writing, software\n'
- r'.*? distributed under the License is distributed on an "AS IS" BASIS,\n'
- r'.*? WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
- r'.*? See the License for the specific language governing permissions and\n'
- r'.*? limitations under the License.\n') % {
- 'year': years_re,
- }
- return license_header
-
-
-REGEXES = [
- r"(?i)black[-_]?list",
- r"(?i)white[-_]?list",
- r"(?i)gr[ea]y[-_]?list",
- r"(?i)(first class citizen)",
- r"(?i)black[-_]?hat",
- r"(?i)white[-_]?hat",
- r"(?i)gr[ea]y[-_]?hat",
- r"(?i)master",
- r"(?i)slave",
- r"(?i)\bhim\b",
- r"(?i)\bhis\b",
- r"(?i)\bshe\b",
- r"(?i)\bher\b",
- r"(?i)\bguys\b",
- r"(?i)\bhers\b",
- r"(?i)\bman\b",
- r"(?i)\bwoman\b",
- r"(?i)\she\s",
- r"(?i)\she$",
- r"(?i)^he\s",
- r"(?i)^he$",
- r"(?i)\she['|\u2019]d\s",
- r"(?i)\she['|\u2019]d$",
- r"(?i)^he['|\u2019]d\s",
- r"(?i)^he['|\u2019]d$",
- r"(?i)\she['|\u2019]s\s",
- r"(?i)\she['|\u2019]s$",
- r"(?i)^he['|\u2019]s\s",
- r"(?i)^he['|\u2019]s$",
- r"(?i)\she['|\u2019]ll\s",
- r"(?i)\she['|\u2019]ll$",
- r"(?i)^he['|\u2019]ll\s",
- r"(?i)^he['|\u2019]ll$",
- r"(?i)grandfather",
- r"(?i)\bmitm\b",
- r"(?i)\bcrazy\b",
- r"(?i)\binsane\b",
- r"(?i)\bblind\sto\b",
- r"(?i)\bflying\sblind\b",
- r"(?i)\bblind\seye\b",
- r"(?i)\bcripple\b",
- r"(?i)\bcrippled\b",
- r"(?i)\bdumb\b",
- r"(?i)\bdummy\b",
- r"(?i)\bparanoid\b",
- r"(?i)\bsane\b",
- r"(?i)\bsanity\b",
- r"(?i)red[-_]?line",
-]
-
-REGEX_LIST = []
-for reg in REGEXES:
- REGEX_LIST.append(re.compile(reg))
-
-def CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
- """Checks the files for non-inclusive language."""
-
- matches = []
- for f in input_api.AffectedFiles(include_deletes=False,
- file_filter=source_file_filter):
- for line_num, line in f.ChangedContents():
- for reg in REGEX_LIST:
- match = reg.search(line)
- if match:
- matches.append(
- "{} ({}): found non-inclusive language: {}".format(
- f.LocalPath(), line_num, match.group(0)))
-
- if len(matches):
- return [
- output_api.PresubmitPromptWarning('Non-inclusive language found:',
- items=matches)
- ]
-
- return []
-
-
-def CheckChange(input_api, output_api):
- results = []
-
- results += input_api.canned_checks.CheckChangeHasDescription(
- input_api, output_api)
- results += input_api.canned_checks.CheckPatchFormatted(input_api,
- output_api,
- check_python=True)
- results += input_api.canned_checks.CheckGNFormatted(input_api, output_api)
- results += input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
- input_api, output_api)
- results += input_api.canned_checks.CheckChangeHasNoTabs(
- input_api, output_api)
- results += input_api.canned_checks.CheckChangeTodoHasOwner(
- input_api, output_api)
- results += input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
- input_api, output_api)
- results += input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)
- results += input_api.canned_checks.CheckChangeLintsClean(input_api,
- output_api,
- lint_filters="")
-
- def NonInclusiveFileFilter(file):
- filter_list = [
- "docs/tint/spirv-input-output-variables.md", # External URL
- "test/tint/samples/compute_boids.wgsl ", # External URL
- ]
- return file in filter_list
-
- results += CheckNonInclusiveLanguage(input_api, output_api,
- NonInclusiveFileFilter)
-
- return results
-
-
-def CheckChangeOnUpload(input_api, output_api):
- return CheckChange(input_api, output_api)
-
-
-def CheckChangeOnCommit(input_api, output_api):
- return CheckChange(input_api, output_api)
diff --git a/chromium/third_party/dawn/README.md b/chromium/third_party/dawn/README.md
index 51049a8b52b..56c75409d2d 100644
--- a/chromium/third_party/dawn/README.md
+++ b/chromium/third_party/dawn/README.md
@@ -3,7 +3,7 @@
# Dawn, a WebGPU implementation
Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
-More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
+More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/main/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
Dawn provides several WebGPU building blocks:
@@ -16,26 +16,32 @@ Dawn provides several WebGPU building blocks:
- **Vulkan** on Windows, Linux, ChromeOS, Android and Fuchsia
- OpenGL as best effort where available
- **A client-server implementation of WebGPU** for applications that are in a sandbox without access to native drivers
+ - **Tint** is a compiler for the WebGPU Shader Language (WGSL).
Helpful links:
- - [Dawn's bug tracker](https://bugs.chromium.org/p/dawn/issues/entry) if you find issues with Dawn.
+ - [Dawn bug tracker](https://bugs.chromium.org/p/dawn/issues/entry) if you find issues with Dawn.
+ - [Tint bug tracker](https://bugs.chromium.org/p/tint/issues/entry) if you find issues with Tint.
- [Dawn's mailing list](https://groups.google.com/forum/#!members/dawn-graphics) for other discussions related to Dawn.
- [Dawn's source code](https://dawn.googlesource.com/dawn)
- [Dawn's Matrix chatroom](https://matrix.to/#/#webgpu-dawn:matrix.org) for live discussion around contributing or using Dawn.
- [WebGPU's Matrix chatroom](https://matrix.to/#/#WebGPU:matrix.org)
+ - [Tint mirror](https://dawn.googlesource.com/tint) for standalone usage.
## Documentation table of content
Developer documentation:
- [Dawn overview](docs/dawn/overview.md)
- - [Building Dawn](docs/dawn/building.md)
- - [Contributing to Dawn](docs/dawn/contributing.md)
+ - [Building](docs/building.md)
+ - [Contributing](CONTRIBUTING.md)
+ - [Code of Conduct](CODE_OF_CONDUCT.md)
- [Testing Dawn](docs/dawn/testing.md)
- [Debugging Dawn](docs/dawn/debugging.md)
- [Dawn's infrastructure](docs/dawn/infra.md)
- [Dawn errors](docs/dawn/errors.md)
+ - [Tint experimental extensions](docs/tint/experimental_extensions.md)
+
User documentation: (TODO, figure out what overlaps with the webgpu.h docs)
@@ -50,110 +56,3 @@ Apache 2.0 Public License, please see [LICENSE](/LICENSE).
## Disclaimer
This is not an officially supported Google product.
-
-# Tint
-
-Tint is a compiler for the WebGPU Shader Language (WGSL).
-
-This is not an officially supported Google product.
-
-## Requirements
- * Git
- * CMake (3.10.2 or later)
- * Ninja (or other build tool)
- * Python, for fetching dependencies
- * [depot_tools] in your path
-
-## Build options
- * `TINT_BUILD_SPV_READER` : enable the SPIR-V input reader (off by default)
- * `TINT_BUILD_WGSL_READER` : enable the WGSL input reader (on by default)
- * `TINT_BUILD_SPV_WRITER` : enable the SPIR-V output writer (on by default)
- * `TINT_BUILD_WGSL_WRITER` : enable the WGSL output writer (on by default)
- * `TINT_BUILD_FUZZERS` : enable building fuzzzers (off by default)
-
-## Building
-Tint uses Chromium dependency management so you need to install [depot_tools]
-and add it to your PATH.
-
-[depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
-
-### Getting source & dependencies
-
-```sh
-# Clone the repo as "tint"
-git clone https://dawn.googlesource.com/tint tint
-cd tint
-
-# Bootstrap the gclient configuration
-cp scripts/standalone.gclient .gclient
-
-# Fetch external dependencies and toolchains with gclient
-gclient sync
-```
-
-### Compiling using CMake + Ninja
-```sh
-mkdir -p out/Debug
-cd out/Debug
-cmake -GNinja ../..
-ninja # or autoninja
-```
-
-### Compiling using CMake + make
-```sh
-mkdir -p out/Debug
-cd out/Debug
-cmake ../..
-make # -j N for N-way parallel build
-```
-
-### Compiling using gn + ninja
-```sh
-mkdir -p out/Debug
-gn gen out/Debug
-autoninja -C out/Debug
-```
-
-### Fuzzers on MacOS
-If you are attempting fuzz, using `TINT_BUILD_FUZZERS=ON`, the version of llvm
-in the XCode SDK does not have the needed libfuzzer functionality included.
-
-The build error that you will see from using the XCode SDK will look something
-like this:
-```
-ld: file not found:/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/11.0.0/lib/darwin/libclang_rt.fuzzer_osx.a
-```
-
-The solution to this problem is to use a full version llvm, like what you would
-get via homebrew, `brew install llvm`, and use something like `CC=<path to full
-clang> cmake ..` to setup a build using that toolchain.
-
-### Checking [chromium-style] issues in CMake builds
-The gn based work flow uses the Chromium toolchain for building in anticipation
-of integration of Tint into Chromium based projects. This toolchain has
-additional plugins for checking for style issues, which are marked with
-[chromium-style] in log messages. This means that this toolchain is more strict
-then the default clang toolchain.
-
-In the future we will have a CQ that will build this work flow and flag issues
-automatically. Until that is in place, to avoid causing breakages you can run
-the [chromium-style] checks using the CMake based work flows. This requires
-setting `CC` to the version of clang checked out by `gclient sync` and setting
-the `TINT_CHECK_CHROMIUM_STYLE` to `ON`.
-
-```sh
-mkdir -p out/style
-cd out/style
-cmake ../..
-CC=../../third_party/llvm-build/Release+Asserts/bin/clang cmake -DTINT_CHECK_CHROMIUM_STYLE=ON ../../ # add -GNinja for ninja builds
-```
-
-## Issues
-Please file any issues or feature requests at
-https://bugs.chromium.org/p/tint/issues/entry
-
-## Contributing
-Please see the CONTRIBUTING and CODE_OF_CONDUCT files on how to contribute to
-Tint.
-
-Tint has a process for supporting [experimental extensions](docs/tint/experimental_extensions.md).
diff --git a/chromium/third_party/dawn/README.md.dawn b/chromium/third_party/dawn/README.md.dawn
deleted file mode 100644
index 18713889c20..00000000000
--- a/chromium/third_party/dawn/README.md.dawn
+++ /dev/null
@@ -1,52 +0,0 @@
-![Dawn's logo: a sun rising behind a stylized mountain inspired by the WebGPU logo. The text "Dawn" is written below it.](docs/imgs/dawn_logo.png "Dawn's logo")
-
-# Dawn, a WebGPU implementation
-
-Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
-More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
-Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
-
-Dawn provides several WebGPU building blocks:
- - **WebGPU C/C++ headers** that applications and other building blocks use.
- - The `webgpu.h` version that Dawn implements.
- - A C++ wrapper for the `webgpu.h`.
- - **A "native" implementation of WebGPU** using platforms' GPU APIs:
- - **D3D12** on Windows 10
- - **Metal** on macOS and iOS
- - **Vulkan** on Windows, Linux, ChromeOS, Android and Fuchsia
- - OpenGL as best effort where available
- - **A client-server implementation of WebGPU** for applications that are in a sandbox without access to native drivers
-
-Helpful links:
-
- - [Dawn's bug tracker](https://bugs.chromium.org/p/dawn/issues/entry) if you find issues with Dawn.
- - [Dawn's mailing list](https://groups.google.com/forum/#!members/dawn-graphics) for other discussions related to Dawn.
- - [Dawn's source code](https://dawn.googlesource.com/dawn)
- - [Dawn's Matrix chatroom](https://matrix.to/#/#webgpu-dawn:matrix.org) for live discussion around contributing or using Dawn.
- - [WebGPU's Matrix chatroom](https://matrix.to/#/#WebGPU:matrix.org)
-
-## Documentation table of content
-
-Developer documentation:
-
- - [Dawn overview](docs/dawn/overview.md)
- - [Building Dawn](docs/dawn/building.md)
- - [Contributing to Dawn](docs/dawn/contributing.md)
- - [Testing Dawn](docs/dawn/testing.md)
- - [Debugging Dawn](docs/dawn/debugging.md)
- - [Dawn's infrastructure](docs/dawn/infra.md)
- - [Dawn errors](docs/dawn/errors.md)
-
-User documentation: (TODO, figure out what overlaps with the webgpu.h docs)
-
-## Status
-
-(TODO)
-
-## License
-
-Apache 2.0 Public License, please see [LICENSE](/LICENSE).
-
-## Disclaimer
-
-This is not an officially supported Google product.
diff --git a/chromium/third_party/dawn/README.md.tint b/chromium/third_party/dawn/README.md.tint
deleted file mode 100644
index 4ae10215490..00000000000
--- a/chromium/third_party/dawn/README.md.tint
+++ /dev/null
@@ -1,106 +0,0 @@
-# Tint
-
-Tint is a compiler for the WebGPU Shader Language (WGSL).
-
-This is not an officially supported Google product.
-
-## Requirements
- * Git
- * CMake (3.10.2 or later)
- * Ninja (or other build tool)
- * Python, for fetching dependencies
- * [depot_tools] in your path
-
-## Build options
- * `TINT_BUILD_SPV_READER` : enable the SPIR-V input reader (off by default)
- * `TINT_BUILD_WGSL_READER` : enable the WGSL input reader (on by default)
- * `TINT_BUILD_SPV_WRITER` : enable the SPIR-V output writer (on by default)
- * `TINT_BUILD_WGSL_WRITER` : enable the WGSL output writer (on by default)
- * `TINT_BUILD_FUZZERS` : enable building fuzzzers (off by default)
-
-## Building
-Tint uses Chromium dependency management so you need to install [depot_tools]
-and add it to your PATH.
-
-[depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
-
-### Getting source & dependencies
-
-```sh
-# Clone the repo as "tint"
-git clone https://dawn.googlesource.com/tint tint
-cd tint
-
-# Bootstrap the gclient configuration
-cp scripts/standalone.gclient .gclient
-
-# Fetch external dependencies and toolchains with gclient
-gclient sync
-```
-
-### Compiling using CMake + Ninja
-```sh
-mkdir -p out/Debug
-cd out/Debug
-cmake -GNinja ../..
-ninja # or autoninja
-```
-
-### Compiling using CMake + make
-```sh
-mkdir -p out/Debug
-cd out/Debug
-cmake ../..
-make # -j N for N-way parallel build
-```
-
-### Compiling using gn + ninja
-```sh
-mkdir -p out/Debug
-gn gen out/Debug
-autoninja -C out/Debug
-```
-
-### Fuzzers on MacOS
-If you are attempting fuzz, using `TINT_BUILD_FUZZERS=ON`, the version of llvm
-in the XCode SDK does not have the needed libfuzzer functionality included.
-
-The build error that you will see from using the XCode SDK will look something
-like this:
-```
-ld: file not found:/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/11.0.0/lib/darwin/libclang_rt.fuzzer_osx.a
-```
-
-The solution to this problem is to use a full version llvm, like what you would
-get via homebrew, `brew install llvm`, and use something like `CC=<path to full
-clang> cmake ..` to setup a build using that toolchain.
-
-### Checking [chromium-style] issues in CMake builds
-The gn based work flow uses the Chromium toolchain for building in anticipation
-of integration of Tint into Chromium based projects. This toolchain has
-additional plugins for checking for style issues, which are marked with
-[chromium-style] in log messages. This means that this toolchain is more strict
-then the default clang toolchain.
-
-In the future we will have a CQ that will build this work flow and flag issues
-automatically. Until that is in place, to avoid causing breakages you can run
-the [chromium-style] checks using the CMake based work flows. This requires
-setting `CC` to the version of clang checked out by `gclient sync` and setting
-the `TINT_CHECK_CHROMIUM_STYLE` to `ON`.
-
-```sh
-mkdir -p out/style
-cd out/style
-cmake ../..
-CC=../../third_party/llvm-build/Release+Asserts/bin/clang cmake -DTINT_CHECK_CHROMIUM_STYLE=ON ../../ # add -GNinja for ninja builds
-```
-
-## Issues
-Please file any issues or feature requests at
-https://bugs.chromium.org/p/tint/issues/entry
-
-## Contributing
-Please see the CONTRIBUTING and CODE_OF_CONDUCT files on how to contribute to
-Tint.
-
-Tint has a process for supporting [experimental extensions](docs/tint/experimental_extensions.md).
diff --git a/chromium/third_party/dawn/dawn.json b/chromium/third_party/dawn/dawn.json
index f2c5fd0a49f..fe0c7d10c14 100644
--- a/chromium/third_party/dawn/dawn.json
+++ b/chromium/third_party/dawn/dawn.json
@@ -128,6 +128,8 @@
"extensible": "out",
"members": [
{"name": "vendor ID", "type": "uint32_t"},
+ {"name": "vendor name", "type": "char", "annotation": "const*", "length": "strlen"},
+ {"name": "architecture", "type": "char", "annotation": "const*", "length": "strlen"},
{"name": "device ID", "type": "uint32_t"},
{"name": "name", "type": "char", "annotation": "const*", "length": "strlen"},
{"name": "driver description", "type": "char", "annotation": "const*", "length": "strlen"},
@@ -413,7 +415,7 @@
"returns": "void *",
"args": [
{"name": "offset", "type": "size_t", "default": 0},
- {"name": "size", "type": "size_t", "default": 0}
+ {"name": "size", "type": "size_t", "default": "WGPU_WHOLE_MAP_SIZE"}
]
},
{
@@ -421,7 +423,7 @@
"returns": "void const *",
"args": [
{"name": "offset", "type": "size_t", "default": 0},
- {"name": "size", "type": "size_t", "default": 0}
+ {"name": "size", "type": "size_t", "default": "WGPU_WHOLE_MAP_SIZE"}
]
},
{
@@ -432,6 +434,14 @@
]
},
{
+ "name": "get usage",
+ "returns": "buffer usage"
+ },
+ {
+ "name": "get size",
+ "returns": "uint64_t"
+ },
+ {
"name": "unmap"
},
{
@@ -796,6 +806,15 @@
},
{
"name": "dispatch",
+ "tags": ["deprecated"],
+ "args": [
+ {"name": "workgroupCountX", "type": "uint32_t"},
+ {"name": "workgroupCountY", "type": "uint32_t", "default": "1"},
+ {"name": "workgroupCountZ", "type": "uint32_t", "default": "1"}
+ ]
+ },
+ {
+ "name": "dispatch workgroups",
"args": [
{"name": "workgroupCountX", "type": "uint32_t"},
{"name": "workgroupCountY", "type": "uint32_t", "default": "1"},
@@ -804,6 +823,14 @@
},
{
"name": "dispatch indirect",
+ "tags": ["deprecated"],
+ "args": [
+ {"name": "indirect buffer", "type": "buffer"},
+ {"name": "indirect offset", "type": "uint64_t"}
+ ]
+ },
+ {
+ "name": "dispatch workgroups indirect",
"args": [
{"name": "indirect buffer", "type": "buffer"},
{"name": "indirect offset", "type": "uint64_t"}
@@ -1048,6 +1075,7 @@
},
{
"name": "create swap chain",
+ "tags": ["dawn"],
"returns": "swap chain",
"args": [
{"name": "surface", "type": "surface", "optional": true},
@@ -1055,6 +1083,15 @@
]
},
{
+ "name": "create swap chain",
+ "tags": ["upstream", "emscripten"],
+ "returns": "swap chain",
+ "args": [
+ {"name": "surface", "type": "surface"},
+ {"name": "descriptor", "type": "swap chain descriptor", "annotation": "const*"}
+ ]
+ },
+ {
"name": "create texture",
"returns": "texture",
"args": [
@@ -1184,6 +1221,8 @@
{"name": "depth clamping", "type": "bool", "default": "false"},
{"name": "depth24 unorm stencil8", "type": "bool", "default": "false"},
{"name": "depth32 float stencil8", "type": "bool", "default": "false"},
+ {"name": "chromium experimental dp4a", "type": "bool", "default": "false"},
+ {"name": "indirect first instance", "type": "bool", "default": "false"},
{"name": "invalid feature", "type": "bool", "default": "false"},
{"name": "dawn internal usages", "type": "bool", "default": "false"},
{"name": "dawn native", "type": "bool", "default": "false"},
@@ -1316,7 +1355,14 @@
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
{"name": "plane 0", "type": "texture view"},
{"name": "plane 1", "type": "texture view", "optional": true},
- {"name": "color space", "type": "predefined color space", "default": "srgb"}
+ {"name": "yuv to rgb conversion matrix", "type": "float", "annotation": "const*",
+ "length": 12, "optional": true},
+ {"name": "src transfer function parameters", "type": "float", "annotation": "const*",
+ "length": 7},
+ {"name": "dst transfer function parameters", "type": "float", "annotation": "const*",
+ "length": 7},
+ {"name": "gamut conversion matrix", "type": "float", "annotation": "const*",
+ "length": 9}
]
},
"feature name": {
@@ -1336,7 +1382,8 @@
{"value": 1001, "name": "dawn shader float 16", "tags": ["dawn"]},
{"value": 1002, "name": "dawn internal usages", "tags": ["dawn"]},
{"value": 1003, "name": "dawn multi planar formats", "tags": ["dawn"]},
- {"value": 1004, "name": "dawn native", "tags": ["dawn", "native"]}
+ {"value": 1004, "name": "dawn native", "tags": ["dawn", "native"]},
+ {"value": 1005, "name": "chromium experimental dp4a", "tags": ["dawn"]}
]
},
"filter mode": {
@@ -1443,12 +1490,12 @@
"category": "enum",
"values": [
{"value": 0, "name": "vertex"},
- {"value": 1, "name": "instance"}
+ {"value": 1, "name": "instance"},
+ {"value": 2, "name": "vertex buffer not used"}
]
},
"load op": {
"category": "enum",
- "emscripten_no_enum_table": true,
"values": [
{"value": 0, "name": "undefined", "jsrepr": "undefined"},
{"value": 1, "name": "clear"},
@@ -1526,13 +1573,6 @@
{"value": 2, "name": "high performance"}
]
},
- "predefined color space": {
- "category": "enum",
- "values": [
- {"value": 0, "name": "undefined", "jsrepr": "undefined"},
- {"value": 1, "name": "srgb"}
- ]
- },
"present mode": {
"category": "enum",
"emscripten_no_enum_table": true,
@@ -1573,6 +1613,14 @@
]
},
{
+ "name": "get type",
+ "returns": "query type"
+ },
+ {
+ "name": "get count",
+ "returns": "uint32_t"
+ },
+ {
"name": "destroy"
}
]
@@ -2480,6 +2528,38 @@
]
},
{
+ "name": "get width",
+ "returns": "uint32_t"
+ },
+ {
+ "name": "get height",
+ "returns": "uint32_t"
+ },
+ {
+ "name": "get depth or array layers",
+ "returns": "uint32_t"
+ },
+ {
+ "name": "get mip level count",
+ "returns": "uint32_t"
+ },
+ {
+ "name": "get sample count",
+ "returns": "uint32_t"
+ },
+ {
+ "name": "get dimension",
+ "returns": "texture dimension"
+ },
+ {
+ "name": "get format",
+ "returns": "texture format"
+ },
+ {
+ "name": "get usage",
+ "returns": "texture usage"
+ },
+ {
"name": "destroy"
}
]
diff --git a/chromium/third_party/dawn/dawn_wire.json b/chromium/third_party/dawn/dawn_wire.json
index 2e2318efb1f..5da5cc46e50 100644
--- a/chromium/third_party/dawn/dawn_wire.json
+++ b/chromium/third_party/dawn/dawn_wire.json
@@ -189,6 +189,8 @@
"BufferMapAsync",
"BufferGetConstMappedRange",
"BufferGetMappedRange",
+ "BufferGetSize",
+ "BufferGetUsage",
"DeviceCreateBuffer",
"DeviceCreateComputePipelineAsync",
"DeviceCreateRenderPipelineAsync",
@@ -201,14 +203,26 @@
"DeviceSetLoggingCallback",
"InstanceRequestAdapter",
"ShaderModuleGetCompilationInfo",
+ "QuerySetGetType",
+ "QuerySetGetCount",
"QueueOnSubmittedWorkDone",
"QueueWriteBuffer",
- "QueueWriteTexture"
+ "QueueWriteTexture",
+ "TextureGetWidth",
+ "TextureGetHeight",
+ "TextureGetDepthOrArrayLayers",
+ "TextureGetMipLevelCount",
+ "TextureGetSampleCount",
+ "TextureGetDimension",
+ "TextureGetFormat",
+ "TextureGetUsage"
],
"client_handwritten_commands": [
"BufferDestroy",
"BufferUnmap",
"DeviceCreateErrorBuffer",
+ "DeviceCreateQuerySet",
+ "DeviceCreateTexture",
"DeviceGetQueue",
"DeviceInjectError"
],
@@ -217,8 +231,10 @@
"Buffer",
"Device",
"Instance",
+ "QuerySet",
"Queue",
- "ShaderModule"
+ "ShaderModule",
+ "Texture"
],
"server_custom_pre_handler_commands": [
"BufferDestroy",
diff --git a/chromium/third_party/dawn/docs/building.md b/chromium/third_party/dawn/docs/building.md
new file mode 100644
index 00000000000..dc32aad3c52
--- /dev/null
+++ b/chromium/third_party/dawn/docs/building.md
@@ -0,0 +1,90 @@
+# Building Dawn
+
+## System requirements
+
+ * Git
+ * CMake (3.10.2 or later) (if desired)
+ * GN (if desired)
+ * Ninja (or other build tool)
+ * Python, for fetching dependencies
+ * [depot_tools] in your path
+
+- Linux
+ - The `pkg-config` command:
+ ```sh
+ # Install pkg-config on Ubuntu
+ sudo apt-get install pkg-config
+ ```
+
+- Mac
+ - [Xcode](https://developer.apple.com/xcode/) 12.2+.
+ - The macOS 11.0 SDK. Run `xcode-select` to check whether you have it.
+ ```sh
+ ls `xcode-select -p`/Platforms/MacOSX.platform/Developer/SDKs
+ ```
+
+## Install `depot_tools`
+
+Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
+
+[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+## Get the code
+
+```sh
+# Clone the repo as "dawn"
+git clone https://dawn.googlesource.com/dawn dawn && cd dawn
+
+# Bootstrap the gclient configuration
+cp scripts/standalone.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+## Build Dawn
+
+### Compiling using CMake + Ninja
+```sh
+mkdir -p out/Debug
+cd out/Debug
+cmake -GNinja ../..
+ninja # or autoninja
+```
+
+### Compiling using CMake + make
+```sh
+mkdir -p out/Debug
+cd out/Debug
+cmake ../..
+make # -j N for N-way parallel build
+```
+
+### Compiling using gn + ninja
+```sh
+mkdir -p out/Debug
+gn gen out/Debug
+autoninja -C out/Debug
+```
+
+The most common GN build option is `is_debug=true/false`; otherwise
+`gn args out/Debug --list` shows all the possible options.
+
+On macOS you'll want to add the `use_system_xcode=true` in most cases.
+(and if you're a googler please get XCode from go/xcode).
+
+
+### Fuzzers on MacOS
+If you are attempting fuzz, using `TINT_BUILD_FUZZERS=ON`, the version of llvm
+in the XCode SDK does not have the needed libfuzzer functionality included.
+
+The build error that you will see from using the XCode SDK will look something
+like this:
+```
+ld: file not found:/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/11.0.0/lib/darwin/libclang_rt.fuzzer_osx.a
+```
+
+The solution to this problem is to use a full version llvm, like what you would
+get via homebrew, `brew install llvm`, and use something like `CC=<path to full
+clang> cmake ..` to setup a build using that toolchain.
+
diff --git a/chromium/third_party/dawn/docs/clang-tidy.md b/chromium/third_party/dawn/docs/clang-tidy.md
new file mode 100644
index 00000000000..e2f9738c7fd
--- /dev/null
+++ b/chromium/third_party/dawn/docs/clang-tidy.md
@@ -0,0 +1,37 @@
+# Running clang-tidy
+
+* Add `"checkout_clang_tidy": True` to `.gclient` file in the `custom_vars`.
+ ```
+ {
+ "custom_vars": {
+ "checkout_clang_tidy": True,
+ }
+ }
+ ```
+* `gclient sync`
+
+There should now be `third_party/llvm-build/Release+Asserts/bin/clang-tidy`
+
+* `cd out`
+* `git clone https://chromium.googlesource.com/chromium/tools/build`
+
+The Chromium build folder contains the `tricium` files used to run `clang-tidy`
+
+Running clang-tidy over all the source can be done with:
+
+```
+cd ..
+out/build/recipes/recipe_modules/tricium_clang_tidy/resources/tricium_clang_tidy_script.py \
+--base_path $PWD \
+--out_dir out/Debug \
+--findings_file all_findings.json \
+--clang_tidy_binary $PWD/third_party/llvm-build/Release+Asserts/bin/clang-tidy \
+--all
+```
+
+`--all` can be replaced by specific files if desired to run on individual source
+files.
+
+
+## References
+* https://chromium.googlesource.com/chromium/src.git/+/HEAD/docs/clang_tidy.md
diff --git a/chromium/third_party/dawn/docs/dawn/building.md b/chromium/third_party/dawn/docs/dawn/building.md
deleted file mode 100644
index 230c222da8c..00000000000
--- a/chromium/third_party/dawn/docs/dawn/building.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Building Dawn
-
-## System requirements
-
-- Linux
- - The `pkg-config` command:
- ```sh
- # Install pkg-config on Ubuntu
- sudo apt-get install pkg-config
- ```
-
-- Mac
- - [Xcode](https://developer.apple.com/xcode/) 12.2+.
- - The macOS 11.0 SDK. Run `xcode-select` to check whether you have it.
- ```sh
- ls `xcode-select -p`/Platforms/MacOSX.platform/Developer/SDKs
- ```
-
-## Install `depot_tools`
-
-Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
-
-[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
-
-## Get the code
-
-```sh
-# Clone the repo as "dawn"
-git clone https://dawn.googlesource.com/dawn dawn && cd dawn
-
-# Bootstrap the gclient configuration
-cp scripts/standalone.gclient .gclient
-
-# Fetch external dependencies and toolchains with gclient
-gclient sync
-```
-
-## Build Dawn
-
-Then generate build files using `gn args out/Debug` or `gn args out/Release`.
-A text editor will appear asking build options, the most common option is `is_debug=true/false`; otherwise `gn args out/Release --list` shows all the possible options.
-
-On macOS you'll want to add the `use_system_xcode=true` in most cases. (and if you're a googler please get XCode from go/xcode).
-
-Then use `ninja -C out/Release` to build dawn and for example `./out/Release/dawn_end2end_tests` to run the tests.
-
diff --git a/chromium/third_party/dawn/docs/dawn/codegen.md b/chromium/third_party/dawn/docs/dawn/codegen.md
index b62c449930a..315364c07b1 100644
--- a/chromium/third_party/dawn/docs/dawn/codegen.md
+++ b/chromium/third_party/dawn/docs/dawn/codegen.md
@@ -2,7 +2,7 @@
Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development.
-Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/master:out/Debug/gen/third_party/dawn/src/).
+Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/main:out/Debug/gen/third_party/dawn/src/).
## Dawn "JSON API" generators
diff --git a/chromium/third_party/dawn/docs/dawn/fuzzing.md b/chromium/third_party/dawn/docs/dawn/fuzzing.md
index 85219015a30..ffa1b30c58a 100644
--- a/chromium/third_party/dawn/docs/dawn/fuzzing.md
+++ b/chromium/third_party/dawn/docs/dawn/fuzzing.md
@@ -13,6 +13,6 @@ The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_f
Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories.
-Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/master:recipes/recipes/dawn.py] recipe for specific details.
+Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/main:recipes/recipes/dawn.py] recipe for specific details.
-Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes. \ No newline at end of file
+Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes.
diff --git a/chromium/third_party/dawn/docs/dawn/infra.md b/chromium/third_party/dawn/docs/dawn/infra.md
index 605d9cad187..aa019253441 100644
--- a/chromium/third_party/dawn/docs/dawn/infra.md
+++ b/chromium/third_party/dawn/docs/dawn/infra.md
@@ -6,7 +6,7 @@ Dawn uses Chromium's continuous integration (CI) infrastructure to continually r
- [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders)
- [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console)
-For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/master/docs/gpu/gpu_testing_bot_details.md).
+For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/main/docs/gpu/gpu_testing_bot_details.md).
## Dawn CI/Try Builders
Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders:
@@ -85,7 +85,7 @@ Using the [[chromium/tools/build]//scripts/slave/recipes/chromium_trybot.py](htt
## Bot Allocation
-Bots are physically allocated based on the configuration in [[chromium/infradata/config]//configs/chromium-swarm/starlark/bots/dawn.star](https://chrome-internal.googlesource.com/infradata/config/+/refs/heads/master/configs/chromium-swarm/starlark/bots/dawn.star) (Google only).
+Bots are physically allocated based on the configuration in [[chromium/infradata/config]//configs/chromium-swarm/starlark/bots/dawn.star](https://chrome-internal.googlesource.com/infradata/config/+/refs/heads/main/configs/chromium-swarm/starlark/bots/dawn.star) (Google only).
`dawn/try` bots are using builderless configurations which means they use builderless GCEs shared with Chromium bots and don't need explicit allocation.
diff --git a/chromium/third_party/dawn/docs/dawn/testing.md b/chromium/third_party/dawn/docs/dawn/testing.md
index 749736ba261..454f9d09154 100644
--- a/chromium/third_party/dawn/docs/dawn/testing.md
+++ b/chromium/third_party/dawn/docs/dawn/testing.md
@@ -30,7 +30,7 @@ A Chromium checkout is required for the highest optimization flags. It is possib
- `recording_time`: The time to convert Dawn commands to native commands.
Metrics are reported according to the format specified at
-[[chromium]//build/scripts/slave/performance_log_processor.py](https://cs.chromium.org/chromium/build/scripts/slave/performance_log_processor.py)
+[[chromium]//build/recipes/performance_log_processor.py](https://cs.chromium.org/chromium/build/recipes/performance_log_processor.py)
### Dumping Trace Files
diff --git a/chromium/third_party/dawn/docs/tint/compound_statements.md b/chromium/third_party/dawn/docs/tint/compound_statements.md
index a113cceae10..1d75415e14e 100644
--- a/chromium/third_party/dawn/docs/tint/compound_statements.md
+++ b/chromium/third_party/dawn/docs/tint/compound_statements.md
@@ -24,13 +24,11 @@ sem::IfStatement {
sem::BlockStatement {
statement_a
}
- sem::ElseStatement {
+ sem::IfStatement {
condition_b
sem::BlockStatement {
statement_b
}
- }
- sem::ElseStatement {
sem::BlockStatement {
statement_c
}
diff --git a/chromium/third_party/dawn/docs/tint/origin-trial-changes.md b/chromium/third_party/dawn/docs/tint/origin-trial-changes.md
index 31f91ceb810..55758aeb79d 100644
--- a/chromium/third_party/dawn/docs/tint/origin-trial-changes.md
+++ b/chromium/third_party/dawn/docs/tint/origin-trial-changes.md
@@ -1,5 +1,16 @@
# Tint changes during Origin Trial
+## Changes for M103
+
+### New features
+
+* Produce warnings for when calling barriers, textureSample, and derivative
+builtins in non-uniform control flow [tint:880](crbug.com/tint/880)
+* Matrix identity constructors and constructors for a single scalar value are now supported [tint:1545](crbug.com/tint/1545)
+
+### Breaking changes
+* Builtin `atomicCompareExchangeWeak` returns a struct instead of a vec2. [tint:1185](crbug.com/tint/1185)
+
## Changes for M102
### New Features
diff --git a/chromium/third_party/dawn/docs/tint/spirv-input-output-variables.md b/chromium/third_party/dawn/docs/tint/spirv-input-output-variables.md
index 0f149e072ca..0ba4ae46b9c 100644
--- a/chromium/third_party/dawn/docs/tint/spirv-input-output-variables.md
+++ b/chromium/third_party/dawn/docs/tint/spirv-input-output-variables.md
@@ -94,7 +94,7 @@ Current translation, through SPIR-V, SPIR-V reader, WGSL writer:
return;
}
- @stage(fragment)
+ @fragment
fn main() -> void {
bar_();
return;
@@ -126,7 +126,7 @@ Proposed translation, through SPIR-V, SPIR-V reader, WGSL writer:
@location(0) frag_color : vec4<f32>;
};
- @stage(fragment)
+ @fragment
fn main(
// 'in' variables are entry point parameters
diff --git a/chromium/third_party/dawn/docs/tint/translations.md b/chromium/third_party/dawn/docs/tint/translations.md
index 19bf2fa2270..83370b60e28 100644
--- a/chromium/third_party/dawn/docs/tint/translations.md
+++ b/chromium/third_party/dawn/docs/tint/translations.md
@@ -92,7 +92,7 @@ decorated with `NonWritable` or each member of the struct can be decorated with
| sign | GLSLstd450FSign | sign | sign |
| sin | GLSLstd450Sin | sin | sin |
| sinh | GLSLstd450Sinh | sinh | sinh |
-| smoothStep | GLSLstd450SmoothStep | smoothstep | smoothstep |
+| smoothstep | GLSLstd450SmoothStep | smoothstep | smoothstep |
| sqrt | GLSLstd450Sqrt | sqrt | sqrt |
| step | GLSLstd450Step | step | step |
| tan | GLSLstd450Tan | tan | tan |
diff --git a/chromium/third_party/dawn/generator/CMakeLists.txt b/chromium/third_party/dawn/generator/CMakeLists.txt
index a2d67844ffb..ea87236f824 100644
--- a/chromium/third_party/dawn/generator/CMakeLists.txt
+++ b/chromium/third_party/dawn/generator/CMakeLists.txt
@@ -26,7 +26,7 @@ if (NOT DAWN_JINJA2_DIR)
message(FATAL_ERROR "Dawn: Missing dependencies for code generation, please ensure you have python-jinja2 installed.")
endif()
else()
- message(STATUS "Dawn: Using jinja2 at ${DAWN_JINJA2_DIR}")
+ message(STATUS "Dawn: using jinja2 at ${DAWN_JINJA2_DIR}")
endif()
# Function to invoke a generator_lib.py generator.
diff --git a/chromium/third_party/dawn/generator/dawn_gpu_info_generator.py b/chromium/third_party/dawn/generator/dawn_gpu_info_generator.py
new file mode 100644
index 00000000000..31b1eb5e629
--- /dev/null
+++ b/chromium/third_party/dawn/generator/dawn_gpu_info_generator.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, os, sys
+from collections import namedtuple
+
+from generator_lib import Generator, run_generator, FileRender
+
+
+class Name:
+ def __init__(self, name):
+ self.name = name
+ self.chunks = name.split(' ')
+
+ def get(self):
+ return self.name
+
+ def CamelChunk(self, chunk):
+ return chunk[0].upper() + chunk[1:]
+
+ def canonical_case(self):
+ return (' '.join(self.chunks)).lower()
+
+ def concatcase(self):
+ return ''.join(self.chunks)
+
+ def camelCase(self):
+ return self.chunks[0] + ''.join(
+ [self.CamelChunk(chunk) for chunk in self.chunks[1:]])
+
+ def CamelCase(self):
+ return ''.join([self.CamelChunk(chunk) for chunk in self.chunks])
+
+ def SNAKE_CASE(self):
+ return '_'.join([chunk.upper() for chunk in self.chunks])
+
+ def snake_case(self):
+ return '_'.join(self.chunks)
+
+ def js_enum_case(self):
+ result = self.chunks[0].lower()
+ for chunk in self.chunks[1:]:
+ if not result[-1].isdigit():
+ result += '-'
+ result += chunk.lower()
+ return result
+
+
+class Architecture:
+ def __init__(self, name, json_data):
+ self.name = Name(name)
+ self.devices = []
+ for device in json_data:
+ self.devices.append(device)
+
+
+class Vendor:
+ def __init__(self, name, json_data):
+ self.name = Name(name)
+ self.id = json_data['id']
+
+ self.deviceMask = None
+ if 'deviceMask' in json_data:
+ self.deviceMask = json_data['deviceMask']
+
+ self.architectures = []
+
+ if 'architecture' in json_data:
+ for (arch_name, arch_data) in json_data['architecture'].items():
+ # Skip any entries that start with an underscore. Used for comments.
+ if arch_name[0] == '_':
+ continue
+
+ self.architectures.append(Architecture(arch_name, arch_data))
+
+ def maskDeviceId(self):
+ if not self.deviceMask:
+ return ''
+ return ' & ' + self.deviceMask
+
+
+def parse_json(json):
+ vendors = []
+
+ for (vendor, vendor_data) in json['vendors'].items():
+ vendors.append(Vendor(vendor, vendor_data))
+
+ return {'vendors': vendors}
+
+
+class DawnGpuInfoGenerator(Generator):
+ def get_description(self):
+ return "Generates GPU Info Dawn code."
+
+ def add_commandline_arguments(self, parser):
+ parser.add_argument('--gpu-info-json',
+ required=True,
+ type=str,
+ help='The GPU Info JSON definition to use.')
+
+ def get_dependencies(self, args):
+ return [os.path.abspath(args.gpu_info_json)]
+
+ def get_file_renders(self, args):
+ with open(args.gpu_info_json) as f:
+ loaded_json = json.loads(f.read())
+
+ params = parse_json(loaded_json)
+
+ return [
+ FileRender("dawn/common/GPUInfo.h",
+ "src/dawn/common/GPUInfo_autogen.h", [params]),
+ FileRender("dawn/common/GPUInfo.cpp",
+ "src/dawn/common/GPUInfo_autogen.cpp", [params]),
+ ]
+
+
+if __name__ == "__main__":
+ sys.exit(run_generator(DawnGpuInfoGenerator()))
diff --git a/chromium/third_party/dawn/generator/dawn_version_generator.py b/chromium/third_party/dawn/generator/dawn_version_generator.py
index 1907e88da48..120be495bd6 100644
--- a/chromium/third_party/dawn/generator/dawn_version_generator.py
+++ b/chromium/third_party/dawn/generator/dawn_version_generator.py
@@ -13,28 +13,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os, subprocess, sys
+import os, subprocess, sys, shutil
from generator_lib import Generator, run_generator, FileRender
-
def get_git():
- return 'git.bat' if sys.platform == 'win32' else 'git'
+ # Will find git, git.exe, git.bat...
+ git_exec = shutil.which("git")
+ if not git_exec:
+ raise Exception("No git executable found")
+
+ return git_exec
def get_gitHash(dawnDir):
- result = subprocess.run([get_git(), 'rev-parse', 'HEAD'],
- stdout=subprocess.PIPE,
- cwd=dawnDir)
- if result.returncode == 0:
- return result.stdout.decode('utf-8').strip()
+ try:
+ result = subprocess.run([get_git(), "rev-parse", "HEAD"],
+ stdout=subprocess.PIPE,
+ cwd=dawnDir)
+ if result.returncode == 0:
+ return result.stdout.decode("utf-8").strip()
+ except Exception:
+ return ""
# No hash was available (possibly) because the directory was not a git checkout. Dawn should
# explicitly handle its absenece and disable features relying on the hash, i.e. caching.
- return ''
+ return ""
def get_gitHead(dawnDir):
- return os.path.join(dawnDir, '.git', 'HEAD')
+ return os.path.join(dawnDir, ".git", "HEAD")
def gitExists(dawnDir):
@@ -43,71 +50,76 @@ def gitExists(dawnDir):
def unpackGitRef(packed, resolved):
with open(packed) as fin:
- refs = fin.read().strip().split('\n')
+ refs = fin.read().strip().split("\n")
# Strip comments
- refs = [ref.split(' ') for ref in refs if ref.strip()[0] != '#']
+ refs = [ref.split(" ") for ref in refs if ref.strip()[0] != "#"]
# Parse results which are in the format [<gitHash>, <refFile>] from previous step.
refs = [gitHash for (gitHash, refFile) in refs if refFile == resolved]
if len(refs) == 1:
- with open(resolved, 'w') as fout:
- fout.write(refs[0] + '\n')
+ with open(resolved, "w") as fout:
+ fout.write(refs[0] + "\n")
return True
return False
def get_gitResolvedHead(dawnDir):
result = subprocess.run(
- [get_git(), 'rev-parse', '--symbolic-full-name', 'HEAD'],
+ [get_git(), "rev-parse", "--symbolic-full-name", "HEAD"],
stdout=subprocess.PIPE,
cwd=dawnDir)
if result.returncode != 0:
- raise Exception('Failed to execute git rev-parse to resolve git head.')
+ raise Exception("Failed to execute git rev-parse to resolve git head:", result.stdout)
- resolved = os.path.join(dawnDir, '.git',
- result.stdout.decode('utf-8').strip())
+ resolved = os.path.join(dawnDir, ".git",
+ result.stdout.decode("utf-8").strip())
# Check a packed-refs file exists. If so, we need to potentially unpack and include it as a dep.
- packed = os.path.join(dawnDir, '.git', 'packed-refs')
+ packed = os.path.join(dawnDir, ".git", "packed-refs")
if os.path.exists(packed) and unpackGitRef(packed, resolved):
return [packed, resolved]
if not os.path.exists(resolved):
- raise Exception('Unable to resolve git HEAD hash file:', path)
+ raise Exception("Unable to resolve git HEAD hash file:", resolved)
return [resolved]
def compute_params(args):
return {
- 'get_gitHash': lambda: get_gitHash(os.path.abspath(args.dawn_dir)),
+ "get_gitHash": lambda: get_gitHash(os.path.abspath(args.dawn_dir)),
}
class DawnVersionGenerator(Generator):
def get_description(self):
- return 'Generates version dependent Dawn code. Currently regenerated dependent on git hash.'
+ return "Generates version dependent Dawn code. Currently regenerated dependent on git hash."
def add_commandline_arguments(self, parser):
- parser.add_argument('--dawn-dir',
- required=True,
- type=str,
- help='The Dawn root directory path to use')
+ parser.add_argument(
+ "--dawn-dir",
+ required=True,
+ type=str,
+ help="The Dawn root directory path to use",
+ )
def get_dependencies(self, args):
dawnDir = os.path.abspath(args.dawn_dir)
if gitExists(dawnDir):
- return [get_gitHead(dawnDir)] + get_gitResolvedHead(dawnDir)
+ try:
+ return [get_gitHead(dawnDir)] + get_gitResolvedHead(dawnDir)
+ except Exception:
+ return []
return []
def get_file_renders(self, args):
params = compute_params(args)
return [
- FileRender('dawn/common/Version.h',
- 'src/dawn/common/Version_autogen.h', [params]),
+ FileRender("dawn/common/Version.h",
+ "src/dawn/common/Version_autogen.h", [params]),
]
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(run_generator(DawnVersionGenerator()))
diff --git a/chromium/third_party/dawn/generator/remove_files.py b/chromium/third_party/dawn/generator/remove_files.py
index 6ddf463667d..21eef8410bc 100644
--- a/chromium/third_party/dawn/generator/remove_files.py
+++ b/chromium/third_party/dawn/generator/remove_files.py
@@ -78,6 +78,7 @@ def run():
for candidate in get_all_files_in_dir(stale_dir):
if not check_is_allowed(candidate, allowed_dirs):
+ print("Warning: remove_files.py removed " + candidate)
os.remove(candidate)
# Finished! Write the stamp file so ninja knows to not run this again.
diff --git a/chromium/third_party/dawn/generator/templates/api.h b/chromium/third_party/dawn/generator/templates/api.h
index db9d94a81ba..2694456010e 100644
--- a/chromium/third_party/dawn/generator/templates/api.h
+++ b/chromium/third_party/dawn/generator/templates/api.h
@@ -100,6 +100,7 @@ typedef struct {{c_prefix}}ChainedStructOut {
{% endif %}
{% for member in type.members %}
{{as_annotated_cType(member)}};
+ {%- if member.optional %} // nullable{% endif %}{{""}}
{% endfor %}
} {{as_cType(type.name)}};
@@ -143,6 +144,7 @@ extern "C" {
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
+ {%- if arg.optional %} /* nullable */{% endif %}
{%- endfor -%}
);
{% endfor %}
@@ -167,6 +169,7 @@ extern "C" {
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
+ {%- if arg.optional %} /* nullable */{% endif %}
{%- endfor -%}
);
{% endfor %}
diff --git a/chromium/third_party/dawn/generator/templates/api_struct_info.json b/chromium/third_party/dawn/generator/templates/api_struct_info.json
index 04e56cfe95d..65726081ada 100644
--- a/chromium/third_party/dawn/generator/templates/api_struct_info.json
+++ b/chromium/third_party/dawn/generator/templates/api_struct_info.json
@@ -16,7 +16,7 @@
//* This generator is used to produce part of Emscripten's struct_info.json,
//* which is a list of struct fields that it uses to generate field offset
//* information for its own code generators.
-//* https://github.com/emscripten-core/emscripten/blob/master/src/struct_info.json
+//* https://github.com/emscripten-core/emscripten/blob/main/src/struct_info.json
//*
{
{% set api = metadata.api.lower() %}
diff --git a/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.cpp b/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.cpp
new file mode 100644
index 00000000000..b39369d7cda
--- /dev/null
+++ b/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.cpp
@@ -0,0 +1,106 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <array>
+#include <sstream>
+#include <iomanip>
+
+#include "dawn/common/GPUInfo_autogen.h"
+
+#include "dawn/common/Assert.h"
+
+namespace gpu_info {
+
+namespace {
+
+enum class Architecture {
+ Unknown,
+ {% for vendor in vendors %}
+ {% for architecture in vendor.architectures %}
+ {{vendor.name.CamelCase()}}_{{architecture.name.CamelCase()}},
+ {% endfor %}
+ {% endfor %}
+};
+
+Architecture GetArchitecture(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ switch(vendorId) {
+ {% for vendor in vendors %}
+ {% if len(vendor.architectures) %}
+
+ case kVendorID_{{vendor.name.CamelCase()}}: {
+ switch (deviceId{{vendor.maskDeviceId()}}) {
+ {% for architecture in vendor.architectures %}
+ {% for device in architecture.devices %}
+ case {{device}}:
+ {% endfor %}
+ return Architecture::{{vendor.name.CamelCase()}}_{{architecture.name.CamelCase()}};
+ {% endfor %}
+ }
+ } break;
+ {% endif %}
+ {% endfor %}
+ }
+
+ return Architecture::Unknown;
+}
+
+} // namespace
+
+// Vendor checks
+{% for vendor in vendors %}
+ bool Is{{vendor.name.CamelCase()}}(PCIVendorID vendorId) {
+ return vendorId == kVendorID_{{vendor.name.CamelCase()}};
+ }
+{% endfor %}
+
+// Architecture checks
+
+{% for vendor in vendors %}
+ {% if len(vendor.architectures) %}
+ // {{vendor.name.get()}} architectures
+ {% for architecture in vendor.architectures %}
+ bool Is{{vendor.name.CamelCase()}}{{architecture.name.CamelCase()}}(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ return GetArchitecture(vendorId, deviceId) == Architecture::{{vendor.name.CamelCase()}}_{{architecture.name.CamelCase()}};
+ }
+ {% endfor %}
+ {% endif %}
+{% endfor %}
+
+// GPUAdapterInfo fields
+std::string GetVendorName(PCIVendorID vendorId) {
+ switch(vendorId) {
+ {% for vendor in vendors %}
+ case kVendorID_{{vendor.name.CamelCase()}}: return "{{vendor.name.js_enum_case()}}";
+ {% endfor %}
+ }
+
+ return "";
+}
+
+std::string GetArchitectureName(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ Architecture arch = GetArchitecture(vendorId, deviceId);
+ switch(arch) {
+ case Architecture::Unknown:
+ return "";
+ {% for vendor in vendors %}
+ {% for architecture in vendor.architectures %}
+ case Architecture::{{vendor.name.CamelCase()}}_{{architecture.name.CamelCase()}}:
+ return "{{architecture.name.js_enum_case()}}";
+ {% endfor %}
+ {% endfor %}
+ }
+}
+
+} // namespace gpu_info
diff --git a/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.h b/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.h
new file mode 100644
index 00000000000..f4a54e0a7ab
--- /dev/null
+++ b/chromium/third_party/dawn/generator/templates/dawn/common/GPUInfo.h
@@ -0,0 +1,52 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_COMMON_GPUINFO_AUTOGEN_H_
+#define SRC_DAWN_COMMON_GPUINFO_AUTOGEN_H_
+
+#include <cstdint>
+#include <string>
+
+using PCIVendorID = uint32_t;
+using PCIDeviceID = uint32_t;
+
+namespace gpu_info {
+
+// Vendor IDs
+{% for vendor in vendors %}
+ static constexpr PCIVendorID kVendorID_{{vendor.name.CamelCase()}} = {{vendor.id}};
+{% endfor %}
+
+// Vendor checks
+{% for vendor in vendors %}
+ bool Is{{vendor.name.CamelCase()}}(PCIVendorID vendorId);
+{% endfor %}
+
+// Architecture checks
+{% for vendor in vendors %}
+ {% if len(vendor.architectures) %}
+
+ // {{vendor.name.get()}} architectures
+ {% for architecture in vendor.architectures %}
+ bool Is{{vendor.name.CamelCase()}}{{architecture.name.CamelCase()}}(PCIVendorID vendorId, PCIDeviceID deviceId);
+ {% endfor %}
+ {% endif %}
+{% endfor %}
+
+// GPUAdapterInfo fields
+std::string GetVendorName(PCIVendorID vendorId);
+std::string GetArchitectureName(PCIVendorID vendorId, PCIDeviceID deviceId);
+
+} // namespace gpu_info
+#endif // SRC_DAWN_COMMON_GPUINFO_AUTOGEN_H_
diff --git a/chromium/third_party/dawn/generator/templates/dawn/common/Version.h b/chromium/third_party/dawn/generator/templates/dawn/common/Version.h
index f9f67e726ce..d8d5cdbb295 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/common/Version.h
+++ b/chromium/third_party/dawn/generator/templates/dawn/common/Version.h
@@ -15,9 +15,11 @@
#ifndef COMMON_VERISON_AUTOGEN_H_
#define COMMON_VERISON_AUTOGEN_H_
+#include <string_view>
+
namespace dawn {
-static constexpr char kGitHash[] = "{{get_gitHash()}}";
+static constexpr std::string_view kGitHash("{{get_gitHash()}}");
} // namespace dawn
diff --git a/chromium/third_party/dawn/generator/templates/dawn/native/ProcTable.cpp b/chromium/third_party/dawn/generator/templates/dawn/native/ProcTable.cpp
index cd829e30310..f9980ffc619 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/native/ProcTable.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn/native/ProcTable.cpp
@@ -66,7 +66,7 @@ namespace {{native_namespace}} {
{%- endfor -%}
);
{% if method.return_type.name.canonical_case() != "void" %}
- {% if method.return_type.category == "object" %}
+ {% if method.return_type.category in ["object", "enum", "bitmask"] %}
return ToAPI(result);
{% else %}
return result;
@@ -104,7 +104,7 @@ namespace {{native_namespace}} {
{%- endfor -%}
);
{% if function.return_type.name.canonical_case() != "void" %}
- {% if function.return_type.category == "object" %}
+ {% if function.return_type.category in ["object", "enum", "bitmask"] %}
return ToAPI(result);
{% else %}
return result;
diff --git a/chromium/third_party/dawn/generator/templates/dawn/native/api_dawn_native_proc.cpp b/chromium/third_party/dawn/generator/templates/dawn/native/api_dawn_native_proc.cpp
index f9147c6d407..e573c11df97 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/native/api_dawn_native_proc.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn/native/api_dawn_native_proc.cpp
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <dawn/{{metadata.api.lower()}}.h>
+#include "dawn/{{metadata.api.lower()}}.h"
namespace dawn::native {
diff --git a/chromium/third_party/dawn/generator/templates/dawn/native/dawn_platform.h b/chromium/third_party/dawn/generator/templates/dawn/native/dawn_platform.h
index e3f1c91a335..1bb54269911 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/native/dawn_platform.h
+++ b/chromium/third_party/dawn/generator/templates/dawn/native/dawn_platform.h
@@ -77,6 +77,16 @@ namespace {{native_namespace}} {
static constexpr uint32_t value = {{len(e.values)}};
};
{% endfor %}
+
+ {% for type in by_category["enum"] + by_category["bitmask"] %}
+ inline {{as_cType(type.name)}} ToAPI({{namespace}}::{{as_cppType(type.name)}} rhs) {
+ return static_cast<{{as_cType(type.name)}}>(rhs);
+ }
+
+ inline {{namespace}}::{{as_cppType(type.name)}} FromAPI({{as_cType(type.name)}} rhs) {
+ return static_cast<{{namespace}}::{{as_cppType(type.name)}}>(rhs);
+ }
+ {% endfor %}
}
#endif // {{NATIVE_DIR}}_{{PREFIX}}_PLATFORM_AUTOGEN_H_
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.cpp b/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.cpp
index c945bee871a..d0a220d7297 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.cpp
@@ -784,72 +784,4 @@ namespace dawn::wire {
{{ write_command_serialization_methods(command, True) }}
{% endfor %}
- // Implementations of serialization/deserialization of WPGUDeviceProperties.
- size_t SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties) {
- return sizeof(WGPUDeviceProperties) +
- WGPUDevicePropertiesGetExtraRequiredSize(*deviceProperties);
- }
-
- void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
- char* buffer) {
- SerializeBuffer serializeBuffer(buffer, SerializedWGPUDevicePropertiesSize(deviceProperties));
-
- WGPUDevicePropertiesTransfer* transfer;
-
- WireResult result = serializeBuffer.Next(&transfer);
- ASSERT(result == WireResult::Success);
-
- ErrorObjectIdProvider provider;
- result = WGPUDevicePropertiesSerialize(*deviceProperties, transfer, &serializeBuffer, provider);
- ASSERT(result == WireResult::Success);
- }
-
- bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
- const volatile char* buffer,
- size_t size) {
- const volatile WGPUDevicePropertiesTransfer* transfer;
- DeserializeBuffer deserializeBuffer(buffer, size);
- if (deserializeBuffer.Read(&transfer) != WireResult::Success) {
- return false;
- }
-
- ErrorObjectIdResolver resolver;
- return WGPUDevicePropertiesDeserialize(deviceProperties, transfer, &deserializeBuffer,
- nullptr, resolver) == WireResult::Success;
- }
-
- size_t SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits) {
- return sizeof(WGPUSupportedLimits) +
- WGPUSupportedLimitsGetExtraRequiredSize(*supportedLimits);
- }
-
- void SerializeWGPUSupportedLimits(
- const WGPUSupportedLimits* supportedLimits,
- char* buffer) {
- SerializeBuffer serializeBuffer(buffer, SerializedWGPUSupportedLimitsSize(supportedLimits));
-
- WGPUSupportedLimitsTransfer* transfer;
-
- WireResult result = serializeBuffer.Next(&transfer);
- ASSERT(result == WireResult::Success);
-
- ErrorObjectIdProvider provider;
- result = WGPUSupportedLimitsSerialize(*supportedLimits, transfer, &serializeBuffer, provider);
- ASSERT(result == WireResult::Success);
- }
-
- bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
- const volatile char* buffer,
- size_t size) {
- const volatile WGPUSupportedLimitsTransfer* transfer;
- DeserializeBuffer deserializeBuffer(buffer, size);
- if (deserializeBuffer.Read(&transfer) != WireResult::Success) {
- return false;
- }
-
- ErrorObjectIdResolver resolver;
- return WGPUSupportedLimitsDeserialize(supportedLimits, transfer, &deserializeBuffer,
- nullptr, resolver) == WireResult::Success;
- }
-
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.h b/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.h
index f8c2762cf1f..23a8685e7d6 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.h
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/WireCmd.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_WIRECMD_AUTOGEN_H_
#define DAWNWIRE_WIRECMD_AUTOGEN_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/wire/BufferConsumer.h"
#include "dawn/wire/ObjectType_autogen.h"
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/client/ClientBase.h b/chromium/third_party/dawn/generator/templates/dawn/wire/client/ClientBase.h
index 0f9cbfe3385..84b2889526c 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/client/ClientBase.h
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/client/ClientBase.h
@@ -25,7 +25,7 @@ namespace dawn::wire::client {
class ClientBase : public ChunkedCommandHandler, public ObjectIdProvider {
public:
ClientBase() = default;
- virtual ~ClientBase() = default;
+ ~ClientBase() override = default;
{% for type in by_category["object"] %}
const ObjectAllocator<{{type.name.CamelCase()}}>& {{type.name.CamelCase()}}Allocator() const {
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerBase.h b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerBase.h
index 8fef34aee4c..35011324f69 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerBase.h
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerBase.h
@@ -27,7 +27,7 @@ namespace dawn::wire::server {
class ServerBase : public ChunkedCommandHandler, public ObjectIdResolver {
public:
ServerBase() = default;
- virtual ~ServerBase() = default;
+ ~ServerBase() override = default;
protected:
void DestroyAllObjects(const DawnProcTable& procs) {
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerDoers.cpp b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerDoers.cpp
index 9c6df80132a..11776e24177 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerDoers.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerDoers.cpp
@@ -77,11 +77,6 @@ namespace dawn::wire::server {
if (data == nullptr) {
return false;
}
- if (data->deviceInfo != nullptr) {
- if (!UntrackDeviceChild(data->deviceInfo, objectType, objectId)) {
- return false;
- }
- }
if (data->state == AllocationState::Allocated) {
ASSERT(data->handle != nullptr);
{% if type.name.CamelCase() in server_reverse_lookup_objects %}
@@ -89,17 +84,6 @@ namespace dawn::wire::server {
{% endif %}
{% if type.name.get() == "device" %}
- //* TODO(crbug.com/dawn/384): This is a hack to make sure that all child objects
- //* are destroyed before their device. We should have a solution in
- //* Dawn native that makes all child objects internally null if their
- //* Device is destroyed.
- while (data->info->childObjectTypesAndIds.size() > 0) {
- auto [childObjectType, childObjectId] = UnpackObjectTypeAndId(
- *data->info->childObjectTypesAndIds.begin());
- if (!DoDestroyObject(childObjectType, childObjectId)) {
- return false;
- }
- }
if (data->handle != nullptr) {
//* Deregisters uncaptured error and device lost callbacks since
//* they should not be forwarded if the device no longer exists on the wire.
diff --git a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerHandlers.cpp b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerHandlers.cpp
index 5514a3370c3..9a9f05a6b5e 100644
--- a/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerHandlers.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn/wire/server/ServerHandlers.cpp
@@ -52,24 +52,6 @@ namespace dawn::wire::server {
return false;
}
{{name}}Data->generation = cmd.{{name}}.generation;
-
- //* TODO(crbug.com/dawn/384): This is a hack to make sure that all child objects
- //* are destroyed before their device. The dawn_native device needs to track all child objects so
- //* it can destroy them if the device is destroyed first.
- {% if command.derived_object %}
- {% set type = command.derived_object %}
- {% if type.name.get() == "device" %}
- {{name}}Data->deviceInfo = DeviceObjects().Get(cmd.selfId)->info.get();
- {% else %}
- auto* selfData = {{type.name.CamelCase()}}Objects().Get(cmd.selfId);
- {{name}}Data->deviceInfo = selfData->deviceInfo;
- {% endif %}
- if ({{name}}Data->deviceInfo != nullptr) {
- if (!TrackDeviceChild({{name}}Data->deviceInfo, ObjectType::{{Type}}, cmd.{{name}}.id)) {
- return false;
- }
- }
- {% endif %}
{% endfor %}
//* Do command
diff --git a/chromium/third_party/dawn/generator/templates/library_api_enum_tables.js b/chromium/third_party/dawn/generator/templates/library_api_enum_tables.js
index 2ec4eb63cbb..6732ebf73ba 100644
--- a/chromium/third_party/dawn/generator/templates/library_api_enum_tables.js
+++ b/chromium/third_party/dawn/generator/templates/library_api_enum_tables.js
@@ -15,7 +15,7 @@
//*
//* This generator is used to produce the number-to-string mappings for
//* Emscripten's library_webgpu.js.
-//* https://github.com/emscripten-core/emscripten/blob/master/src/library_webgpu.js
+//* https://github.com/emscripten-core/emscripten/blob/main/src/library_webgpu.js
//*
{% for type in by_category["enum"] if not type.json_data.get("emscripten_no_enum_table") %}
{{type.name.CamelCase()}}: {% if type.contiguousFromZero -%}
diff --git a/chromium/third_party/dawn/generator/templates/mock_api.h b/chromium/third_party/dawn/generator/templates/mock_api.h
index 1c0a880f1a6..85beb56ec41 100644
--- a/chromium/third_party/dawn/generator/templates/mock_api.h
+++ b/chromium/third_party/dawn/generator/templates/mock_api.h
@@ -19,8 +19,8 @@
{% set Prefix = metadata.proc_table_prefix %}
{% set prefix = Prefix.lower() %}
-#include <dawn/{{prefix}}_proc_table.h>
-#include <dawn/{{api}}.h>
+#include "dawn/{{prefix}}_proc_table.h"
+#include "dawn/{{api}}.h"
#include <gmock/gmock.h>
#include <memory>
diff --git a/chromium/third_party/dawn/tools/src/go.mod b/chromium/third_party/dawn/go.mod
index 6cc00c1e7e9..21d9656bfe2 100644
--- a/chromium/third_party/dawn/tools/src/go.mod
+++ b/chromium/third_party/dawn/go.mod
@@ -1,44 +1,48 @@
-module dawn.googlesource.com/dawn/tools/src
+module dawn.googlesource.com/dawn
go 1.18
require (
- github.com/andygrunwald/go-gerrit v0.0.0-20220404064545-525eecd29744
+ github.com/andygrunwald/go-gerrit v0.0.0-20220427111355-d3e91fbf2db5
+ github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
github.com/fatih/color v1.13.0
- github.com/go-git/go-git/v5 v5.4.2
github.com/google/go-cmp v0.5.6
+ github.com/mattn/go-colorable v0.1.9
+ github.com/mattn/go-isatty v0.0.14
github.com/sergi/go-diff v1.2.0
github.com/shirou/gopsutil v3.21.11+incompatible
+ github.com/tidwall/jsonc v0.3.2
go.chromium.org/luci v0.0.0-20220412023008-ab2409fe739a
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b
+ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
+ google.golang.org/api v0.63.0
+ google.golang.org/protobuf v1.28.0
)
require (
cloud.google.com/go v0.99.0 // indirect
- github.com/acomagu/bufpipe v1.0.3 // indirect
- github.com/go-git/gcfg v1.5.0 // indirect
- github.com/go-git/go-billy/v5 v5.3.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
- github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+ github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/klauspost/compress v1.13.5 // indirect
- github.com/mattn/go-colorable v0.1.9 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
+ github.com/kr/pretty v0.3.0 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/maruel/subcommands v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/rogpeppe/go-internal v1.8.0 // indirect
+ github.com/texttheater/golang-levenshtein v1.0.1 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
- golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
+ go.opencensus.io v0.23.0 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/text v0.3.7 // indirect
- google.golang.org/api v0.63.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 // indirect
google.golang.org/grpc v1.44.0 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
- gopkg.in/warnings.v0 v0.1.2 // indirect
)
diff --git a/chromium/third_party/dawn/tools/src/go.sum b/chromium/third_party/dawn/go.sum
index 8172f47c009..0b540d3577a 100644
--- a/chromium/third_party/dawn/tools/src/go.sum
+++ b/chromium/third_party/dawn/go.sum
@@ -72,27 +72,20 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/VividCortex/mysqlerr v1.0.0/go.mod h1:xERx8E4tBhLvpjzdUyQiSfUxeMcATEQrflDAfXsqcAE=
-github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
-github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.15.1/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I=
-github.com/andygrunwald/go-gerrit v0.0.0-20220404064545-525eecd29744 h1:IMdsOeMJS4c1R8SvLeyay1qVbUywc1WUX+q2TcSBtjQ=
-github.com/andygrunwald/go-gerrit v0.0.0-20220404064545-525eecd29744/go.mod h1:aqcjwEnmLLSalFNYR0p2ttnEXOVVRctIzsUMHbEcruU=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/andygrunwald/go-gerrit v0.0.0-20220427111355-d3e91fbf2db5 h1:HBlTlvyq4siv4ZK41DebGIX11/9gFBqUF8G64AePjyQ=
+github.com/andygrunwald/go-gerrit v0.0.0-20220427111355-d3e91fbf2db5/go.mod h1:aqcjwEnmLLSalFNYR0p2ttnEXOVVRctIzsUMHbEcruU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.42/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
@@ -100,6 +93,8 @@ github.com/bazelbuild/buildtools v0.0.0-20210911013817-37179d5767a1/go.mod h1:68
github.com/bazelbuild/remote-apis v0.0.0-20210718193713-0ecef08215cf/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8=
github.com/bazelbuild/remote-apis v0.0.0-20210812183132-3e816456ee28/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8=
github.com/bazelbuild/remote-apis-sdks v0.0.0-20220301013006-36f659de32ef/go.mod h1:p6PH8Kyjfm/hhbwC8ymX8SarB7CQTUiW6J0T/zbEKj8=
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094 h1:CTVJdI6oUCRNucMEmoh3c2U88DesoPtefsxKhoZ1WuQ=
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094/go.mod h1:bV550SPlMos7UhMprxlm14XTBTpKHSUZ8Q4Id5qQuyw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@@ -142,7 +137,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -156,20 +150,9 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
-github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
-github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
-github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
-github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
-github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -191,6 +174,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -274,6 +258,7 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20210901121439-eee08aaf2717 h1:V1j4G8AXIJeyzT3ng2Oh4IRo/VEgRWYAsyYwhOz5rko=
@@ -318,11 +303,7 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -335,7 +316,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -359,10 +339,10 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm
github.com/luci/gtreap v0.0.0-20161228054646-35df89791e8f/go.mod h1:OjKOY0UvVOOH5nWXSIWTbQWESn8dDiGlaEZx6IAsWhU=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/maruel/subcommands v1.1.0 h1:5k7Y1KXDrI4U2Q7J6R7rbnpoNAiklUDTdaK2fFT998g=
github.com/maruel/subcommands v1.1.0/go.mod h1:b25AG9Eho2Rs1NUPAPAYBFy1B5y63QMxw/2WmLGO8m8=
+github.com/maruel/ut v1.0.2 h1:mQTlQk3jubTbdTcza+hwoZQWhzcvE4L6K6RTtAFlA1k=
github.com/maruel/ut v1.0.2/go.mod h1:RV8PwPD9dd2KFlnlCc/DB2JVvkXmyaalfc5xvmSrRSs=
-github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
-github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -400,7 +380,6 @@ github.com/mostynb/zstdpool-syncpool v0.0.10/go.mod h1:BmhpjzZxG8KCduFi0N/Do6j9w
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
@@ -413,7 +392,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/xattr v0.4.4/go.mod h1:sBD3RAqlr8Q+RC3FutZcikpT8nyDrIEEBw2J744gVWs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -443,7 +421,6 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
@@ -453,7 +430,6 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJ
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
@@ -483,14 +459,16 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U=
github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8=
+github.com/tidwall/jsonc v0.3.2 h1:ZTKrmejRlAJYdn0kcaFqRAKlxxFIC21pYq8vLa4p2Wc=
+github.com/tidwall/jsonc v0.3.2/go.mod h1:dw+3CIxqHi+t8eFSpzzMlcVYxKp08UP5CD8/uSFCyJE=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8=
@@ -514,6 +492,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0=
@@ -524,7 +503,6 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -532,7 +510,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -608,7 +585,6 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -701,11 +677,9 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210507014357-30e306a8bba5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -964,29 +938,24 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/chromium/third_party/dawn/gpu_info.json b/chromium/third_party/dawn/gpu_info.json
new file mode 100644
index 00000000000..d9699621665
--- /dev/null
+++ b/chromium/third_party/dawn/gpu_info.json
@@ -0,0 +1,129 @@
+{
+ "_comment": [
+ "Copyright 2022 The Dawn Authors",
+ "",
+ "Licensed under the Apache License, Version 2.0 (the \"License\");",
+ "you may not use this file except in compliance with the License.",
+ "You may obtain a copy of the License at",
+ "",
+ " http://www.apache.org/licenses/LICENSE-2.0",
+ "",
+ "Unless required by applicable law or agreed to in writing, software",
+ "distributed under the License is distributed on an \"AS IS\" BASIS,",
+ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+ "See the License for the specific language governing permissions and",
+ "limitations under the License."
+ ],
+
+ "vendors": {
+
+ "AMD": {
+ "id": "0x1002",
+
+ "deviceMask": "0xFFF0",
+ "architecture": {
+ "GCN 1": ["0x6600", "0x6610", "0x6660", "0x6790", "0x6800", "0x6810", "0x6820", "0x6830"],
+ "GCN 2": ["0x1300", "0x1310", "0x6640", "0x6650", "0x67A0", "0x67B0", "0x9830", "0x9850"],
+ "GCN 3": ["0x6900", "0x6920", "0x6930", "0x7300", "0x9870", "0x98E0"],
+ "GCN 4": ["0x67C0", "0x67D0", "0x67E0", "0x67F0", "0x6980", "0x6990"],
+ "GCN 5": ["0x15D0", "0x1630", "0x1640", "0x66A0", "0x6860", "0x6870", "0x6940", "0x69A0"],
+ "RDNA 1": ["0x7310", "0x7340", "0x7360"],
+ "RDNA 2": ["0x73A0", "0x73B0", "0x73D0", "0x73E0", "0x73F0", "0x7420", "0x7430"]
+ }
+ },
+
+ "Apple": {
+ "id": "0x106b"
+ },
+
+ "ARM": {
+ "id": "0x13B5",
+
+ "deviceMask": "0xF0000000",
+ "architecture": {
+ "_comment": [
+ "The Midgard GPUs have device IDs like 0x07______ and 0x08______, but it's easiest to",
+ "mask those values out and simply check for the highest octet being zero, since that",
+ "distinguishes it from the other architectures."
+ ],
+
+ "Midgard": ["0x00000000"],
+ "Bifrost": ["0x60000000", "0x70000000"],
+ "Valhall": ["0x90000000", "0xA0000000"]
+ }
+ },
+
+ "Google": {
+ "id": "0x1AE0",
+
+ "architecture": {
+ "Swiftshader": ["0xC0DE"]
+ }
+ },
+
+ "Img Tec": {
+ "id": "0x1010"
+ },
+
+ "Intel": {
+ "id": "0x8086",
+
+ "deviceMask": "0xFF00",
+ "architecture": {
+ "Gen 7": ["0x0100", "0x0400", "0x0A00", "0x0D00", "0x0F00"],
+ "Gen 8": ["0x1600", "0x2200"],
+ "Gen 9": ["0x1900", "0x3100", "0x3E00", "0x5A00", "0x5900", "0x9B00"],
+ "Gen 11": ["0x8A00"],
+ "Xe": ["0x4600", "0x4C00", "0x4900", "0x9A00"]
+ }
+ },
+
+ "Mesa": {
+ "id": "0x10005"
+ },
+
+ "Microsoft": {
+ "id": "0x1414",
+
+ "architecture": {
+ "WARP": ["0x8c"]
+ }
+ },
+
+ "Nvidia": {
+ "id": "0x10DE",
+
+ "deviceMask": "0xFF00",
+ "architecture": {
+ "Fermi": ["0x0D00"],
+ "Kepler": ["0x0F00", "0x1000", "0x1100", "0x1200"],
+ "Maxwell": ["0x1300", "0x1400", "0x1600", "0x1700"],
+ "Pascal": ["0x1500", "0x1B00", "0x1C00", "0x1D00"],
+ "Turing": ["0x1E00", "0x1F00", "0x2100"],
+ "Ampere": ["0x2200", "0x2400", "0x2500"]
+ }
+ },
+
+ "Qualcomm": {
+ "id": "0x5143",
+
+ "deviceMask": "0xFF000000",
+ "architecture": {
+ "Adreno 4xx": ["0x04000000"],
+ "Adreno 5xx": ["0x05000000"],
+ "Adreno 6xx": ["0x06000000"],
+ "Adreno 7xx": ["0x07000000"]
+ }
+ },
+
+ "Samsung": {
+ "id": "0x144d",
+
+ "architecture": {
+ "RDNA 2": ["0x73A0"]
+ }
+ }
+
+ }
+
+} \ No newline at end of file
diff --git a/chromium/third_party/dawn/include/dawn/CPPLINT.cfg b/chromium/third_party/dawn/include/dawn/CPPLINT.cfg
deleted file mode 100644
index f5c9c6dfc49..00000000000
--- a/chromium/third_party/dawn/include/dawn/CPPLINT.cfg
+++ /dev/null
@@ -1 +0,0 @@
-filter=-runtime/indentation_namespace
diff --git a/chromium/third_party/dawn/include/dawn/EnumClassBitmasks.h b/chromium/third_party/dawn/include/dawn/EnumClassBitmasks.h
index 7bfe4ecc5cb..0dbe09031b9 100644
--- a/chromium/third_party/dawn/include/dawn/EnumClassBitmasks.h
+++ b/chromium/third_party/dawn/include/dawn/EnumClassBitmasks.h
@@ -31,126 +31,117 @@
namespace dawn {
- template <typename T>
- struct IsDawnBitmask {
- static constexpr bool enable = false;
- };
-
- template <typename T, typename Enable = void>
- struct LowerBitmask {
- static constexpr bool enable = false;
- };
-
- template <typename T>
- struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
- static constexpr bool enable = true;
- using type = T;
- constexpr static T Lower(T t) {
- return t;
- }
- };
-
- template <typename T>
- struct BoolConvertible {
- using Integral = typename std::underlying_type<T>::type;
-
- // NOLINTNEXTLINE(runtime/explicit)
- constexpr BoolConvertible(Integral value) : value(value) {
- }
- constexpr operator bool() const {
- return value != 0;
- }
- constexpr operator T() const {
- return static_cast<T>(value);
- }
-
- Integral value;
- };
-
- template <typename T>
- struct LowerBitmask<BoolConvertible<T>> {
- static constexpr bool enable = true;
- using type = T;
- static constexpr type Lower(BoolConvertible<T> t) {
- return t;
- }
- };
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator&=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l & r;
- return l;
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator|=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l | r;
- return l;
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator^=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l ^ r;
- return l;
- }
-
- template <typename T>
- constexpr bool HasZeroOrOneBits(T value) {
- using Integral = typename std::underlying_type<T>::type;
- return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
- }
+template <typename T>
+struct IsDawnBitmask {
+ static constexpr bool enable = false;
+};
+
+template <typename T, typename Enable = void>
+struct LowerBitmask {
+ static constexpr bool enable = false;
+};
+
+template <typename T>
+struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
+ static constexpr bool enable = true;
+ using type = T;
+ constexpr static T Lower(T t) { return t; }
+};
+
+template <typename T>
+struct BoolConvertible {
+ using Integral = typename std::underlying_type<T>::type;
+
+ // NOLINTNEXTLINE(runtime/explicit)
+ constexpr BoolConvertible(Integral value) : value(value) {}
+ constexpr operator bool() const { return value != 0; }
+ constexpr operator T() const { return static_cast<T>(value); }
+
+ Integral value;
+};
+
+template <typename T>
+struct LowerBitmask<BoolConvertible<T>> {
+ static constexpr bool enable = true;
+ using type = T;
+ static constexpr type Lower(BoolConvertible<T> t) { return t; }
+};
+
+template <
+ typename T1,
+ typename T2,
+ typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
+ using T = typename LowerBitmask<T1>::type;
+ using Integral = typename std::underlying_type<T>::type;
+ return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
+ static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
+
+template <
+ typename T1,
+ typename T2,
+ typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
+ using T = typename LowerBitmask<T1>::type;
+ using Integral = typename std::underlying_type<T>::type;
+ return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
+ static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
+
+template <
+ typename T1,
+ typename T2,
+ typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
+ using T = typename LowerBitmask<T1>::type;
+ using Integral = typename std::underlying_type<T>::type;
+ return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
+ static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
+
+template <typename T1>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
+ using T = typename LowerBitmask<T1>::type;
+ using Integral = typename std::underlying_type<T>::type;
+ return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
+}
+
+template <
+ typename T,
+ typename T2,
+ typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator&=(T& l, T2 right) {
+ T r = LowerBitmask<T2>::Lower(right);
+ l = l & r;
+ return l;
+}
+
+template <
+ typename T,
+ typename T2,
+ typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator|=(T& l, T2 right) {
+ T r = LowerBitmask<T2>::Lower(right);
+ l = l | r;
+ return l;
+}
+
+template <
+ typename T,
+ typename T2,
+ typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator^=(T& l, T2 right) {
+ T r = LowerBitmask<T2>::Lower(right);
+ l = l ^ r;
+ return l;
+}
+
+template <typename T>
+constexpr bool HasZeroOrOneBits(T value) {
+ using Integral = typename std::underlying_type<T>::type;
+ return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
+}
} // namespace dawn
diff --git a/chromium/third_party/dawn/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/include/dawn/dawn_wsi.h
index cf30dffa883..aecb252893f 100644
--- a/chromium/third_party/dawn/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/include/dawn/dawn_wsi.h
@@ -15,7 +15,7 @@
#ifndef INCLUDE_DAWN_DAWN_WSI_H_
#define INCLUDE_DAWN_DAWN_WSI_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
// Error message (or nullptr if there was no error)
typedef const char* DawnSwapChainError;
@@ -65,7 +65,7 @@ struct DawnWSIContextD3D12 {
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
-# import <Metal/Metal.h>
+#import <Metal/Metal.h>
struct DawnWSIContextMetal {
id<MTLDevice> device = nil;
diff --git a/chromium/third_party/dawn/include/dawn/native/D3D12Backend.h b/chromium/third_party/dawn/include/dawn/native/D3D12Backend.h
index 99a6c62702c..9f00ff4ce82 100644
--- a/chromium/third_party/dawn/include/dawn/native/D3D12Backend.h
+++ b/chromium/third_party/dawn/include/dawn/native/D3D12Backend.h
@@ -15,9 +15,6 @@
#ifndef INCLUDE_DAWN_NATIVE_D3D12BACKEND_H_
#define INCLUDE_DAWN_NATIVE_D3D12BACKEND_H_
-#include <dawn/dawn_wsi.h>
-#include <dawn/native/DawnNative.h>
-
#include <DXGI1_4.h>
#include <d3d12.h>
#include <windows.h>
@@ -25,86 +22,84 @@
#include <memory>
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
+
struct ID3D12Device;
struct ID3D12Resource;
namespace dawn::native::d3d12 {
- class D3D11on12ResourceCache;
-
- DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
- HWND window);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
-
- enum MemorySegment {
- Local,
- NonLocal,
- };
-
- DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
- uint64_t requestedReservationSize,
- MemorySegment memorySegment);
-
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorDXGISharedHandle();
-
- // Note: SharedHandle must be a handle to a texture object.
- HANDLE sharedHandle;
- };
-
- // Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
- constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
-
- struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
- : ExternalImageAccessDescriptor {
- public:
- // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
- // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
- uint64_t acquireMutexKey;
- uint64_t releaseMutexKey;
- bool isSwapChainTexture = false;
- };
-
- class DAWN_NATIVE_EXPORT ExternalImageDXGI {
- public:
- ~ExternalImageDXGI();
-
- // Note: SharedHandle must be a handle to a texture object.
- static std::unique_ptr<ExternalImageDXGI> Create(
- WGPUDevice device,
- const ExternalImageDescriptorDXGISharedHandle* descriptor);
-
- WGPUTexture ProduceTexture(WGPUDevice device,
- const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
-
- private:
- ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
- const WGPUTextureDescriptor* descriptor);
-
- Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
-
- // Contents of WGPUTextureDescriptor are stored individually since the descriptor
- // could outlive this image.
- WGPUTextureUsageFlags mUsage;
- WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
- WGPUTextureDimension mDimension;
- WGPUExtent3D mSize;
- WGPUTextureFormat mFormat;
- uint32_t mMipLevelCount;
- uint32_t mSampleCount;
-
- std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
- };
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
- explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
-
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
- };
+class D3D11on12ResourceCache;
+class Device;
+class ExternalImageDXGIImpl;
+
+DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+ HWND window);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+
+enum MemorySegment {
+ Local,
+ NonLocal,
+};
+
+DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
+ uint64_t requestedReservationSize,
+ MemorySegment memorySegment);
+
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
+ public:
+ ExternalImageDescriptorDXGISharedHandle();
+
+ // Note: SharedHandle must be a handle to a texture object.
+ HANDLE sharedHandle;
+};
+
+// Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
+constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
+
+struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
+ : ExternalImageAccessDescriptor {
+ public:
+ // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
+ // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
+ uint64_t acquireMutexKey;
+ uint64_t releaseMutexKey;
+ bool isSwapChainTexture = false;
+};
+
+class DAWN_NATIVE_EXPORT ExternalImageDXGI {
+ public:
+ ~ExternalImageDXGI();
+
+ static std::unique_ptr<ExternalImageDXGI> Create(
+ WGPUDevice device,
+ const ExternalImageDescriptorDXGISharedHandle* descriptor);
+
+ // Returns true if the external image resources are still valid, otherwise ProduceTexture() is
+ // guaranteed to fail e.g. after device destruction.
+ bool IsValid() const;
+
+ // TODO(sunnyps): |device| is ignored - remove after Chromium migrates to single parameter call.
+ WGPUTexture ProduceTexture(WGPUDevice device,
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+
+ WGPUTexture ProduceTexture(const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+
+ private:
+ explicit ExternalImageDXGI(std::unique_ptr<ExternalImageDXGIImpl> impl);
+
+ std::unique_ptr<ExternalImageDXGIImpl> mImpl;
+};
+
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptions();
+ explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
+
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/include/dawn/native/DawnNative.h b/chromium/third_party/dawn/include/dawn/native/DawnNative.h
index dc54dedac21..ee01ff245a0 100644
--- a/chromium/third_party/dawn/include/dawn/native/DawnNative.h
+++ b/chromium/third_party/dawn/include/dawn/native/DawnNative.h
@@ -15,244 +15,252 @@
#ifndef INCLUDE_DAWN_NATIVE_DAWNNATIVE_H_
#define INCLUDE_DAWN_NATIVE_DAWNNATIVE_H_
-#include <dawn/dawn_proc_table.h>
-#include <dawn/native/dawn_native_export.h>
-#include <dawn/webgpu.h>
-
#include <string>
#include <vector>
+#include "dawn/dawn_proc_table.h"
+#include "dawn/native/dawn_native_export.h"
+#include "dawn/webgpu.h"
+
namespace dawn::platform {
- class Platform;
+class Platform;
} // namespace dawn::platform
namespace wgpu {
- struct AdapterProperties;
- struct DeviceDescriptor;
+struct AdapterProperties;
+struct DeviceDescriptor;
} // namespace wgpu
namespace dawn::native {
- class InstanceBase;
- class AdapterBase;
-
- // An optional parameter of Adapter::CreateDevice() to send additional information when creating
- // a Device. For example, we can use it to enable a workaround, optimization or feature.
- struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
- std::vector<const char*> requiredFeatures;
- std::vector<const char*> forceEnabledToggles;
- std::vector<const char*> forceDisabledToggles;
-
- const WGPURequiredLimits* requiredLimits = nullptr;
- };
-
- // A struct to record the information of a toggle. A toggle is a code path in Dawn device that
- // can be manually configured to run or not outside Dawn, including workarounds, special
- // features and optimizations.
- struct ToggleInfo {
- const char* name;
- const char* description;
- const char* url;
- };
-
- // A struct to record the information of a feature. A feature is a GPU feature that is not
- // required to be supported by all Dawn backends and can only be used when it is enabled on the
- // creation of device.
- using FeatureInfo = ToggleInfo;
-
- // An adapter is an object that represent on possibility of creating devices in the system.
- // Most of the time it will represent a combination of a physical GPU and an API. Not that the
- // same GPU can be represented by multiple adapters but on different APIs.
- //
- // The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
- // a reference to an underlying adapter.
- class DAWN_NATIVE_EXPORT Adapter {
- public:
- Adapter();
- // NOLINTNEXTLINE(runtime/explicit)
- Adapter(AdapterBase* impl);
- ~Adapter();
-
- Adapter(const Adapter& other);
- Adapter& operator=(const Adapter& other);
-
- // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
- // dawn.json
- void GetProperties(wgpu::AdapterProperties* properties) const;
- void GetProperties(WGPUAdapterProperties* properties) const;
-
- std::vector<const char*> GetSupportedExtensions() const;
- std::vector<const char*> GetSupportedFeatures() const;
- WGPUDeviceProperties GetAdapterProperties() const;
- bool GetLimits(WGPUSupportedLimits* limits) const;
-
- void SetUseTieredLimits(bool useTieredLimits);
-
- // Check that the Adapter is able to support importing external images. This is necessary
- // to implement the swapchain and interop APIs in Chromium.
- bool SupportsExternalImages() const;
-
- explicit operator bool() const;
-
- // Create a device on this adapter. On an error, nullptr is returned.
- WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
- WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
- WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
-
- void RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
- void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
- void RequestDevice(const WGPUDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
-
- // Returns the underlying WGPUAdapter object.
- WGPUAdapter Get() const;
-
- // Reset the backend device object for testing purposes.
- void ResetInternalDeviceForTesting();
-
- private:
- AdapterBase* mImpl = nullptr;
- };
-
- // Base class for options passed to Instance::DiscoverAdapters.
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
- public:
- const WGPUBackendType backendType;
-
- protected:
- explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
- };
-
- enum BackendValidationLevel { Full, Partial, Disabled };
-
- // Represents a connection to dawn_native and is used for dependency injection, discovering
- // system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
- //
- // This is an RAII class for Dawn instances and also controls the lifetime of all adapters
- // for this instance.
- class DAWN_NATIVE_EXPORT Instance {
- public:
- explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
- ~Instance();
-
- Instance(const Instance& other) = delete;
- Instance& operator=(const Instance& other) = delete;
-
- // Gather all adapters in the system that can be accessed with no special options. These
- // adapters will later be returned by GetAdapters.
- void DiscoverDefaultAdapters();
-
- // Adds adapters that can be discovered with the options provided (like a getProcAddress).
- // The backend is chosen based on the type of the options used. Returns true on success.
- bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
-
- // Returns all the adapters that the instance knows about.
- std::vector<Adapter> GetAdapters() const;
-
- const ToggleInfo* GetToggleInfo(const char* toggleName);
- const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
-
- // Enables backend validation layers
- void EnableBackendValidation(bool enableBackendValidation);
- void SetBackendValidationLevel(BackendValidationLevel validationLevel);
-
- // Enable debug capture on Dawn startup
- void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
-
- void SetPlatform(dawn::platform::Platform* platform);
-
- // Returns the underlying WGPUInstance object.
- WGPUInstance Get() const;
-
- private:
- InstanceBase* mImpl = nullptr;
- };
-
- // Backend-agnostic API for dawn_native
- DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
-
- // Query the names of all the toggles that are enabled in device
- DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
-
- // Backdoor to get the number of lazy clears for testing
- DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
-
- // Backdoor to get the number of deprecation warnings for testing
- DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
-
- // Query if texture has been initialized
- DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
- WGPUTexture texture,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- WGPUTextureAspect aspect = WGPUTextureAspect_All);
-
- // Backdoor to get the order of the ProcMap for testing
- DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
-
- DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
-
- // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
- DAWN_NATIVE_EXPORT void EnableErrorInjector();
- DAWN_NATIVE_EXPORT void DisableErrorInjector();
- DAWN_NATIVE_EXPORT void ClearErrorInjector();
- DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
- DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
-
- // The different types of external images
- enum ExternalImageType {
- OpaqueFD,
- DmaBuf,
- IOSurface,
- DXGISharedHandle,
- EGLImage,
- };
+class InstanceBase;
+class AdapterBase;
+
+// An optional parameter of Adapter::CreateDevice() to send additional information when creating
+// a Device. For example, we can use it to enable a workaround, optimization or feature.
+struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
+ DawnDeviceDescriptor();
+ ~DawnDeviceDescriptor();
+
+ std::vector<const char*> requiredFeatures;
+ std::vector<const char*> forceEnabledToggles;
+ std::vector<const char*> forceDisabledToggles;
+
+ const WGPURequiredLimits* requiredLimits = nullptr;
+};
+
+// A struct to record the information of a toggle. A toggle is a code path in Dawn device that
+// can be manually configured to run or not outside Dawn, including workarounds, special
+// features and optimizations.
+struct ToggleInfo {
+ const char* name;
+ const char* description;
+ const char* url;
+};
+
+// A struct to record the information of a feature. A feature is a GPU feature that is not
+// required to be supported by all Dawn backends and can only be used when it is enabled on the
+// creation of device.
+using FeatureInfo = ToggleInfo;
+
+// An adapter is an object that represent on possibility of creating devices in the system.
+// Most of the time it will represent a combination of a physical GPU and an API. Not that the
+// same GPU can be represented by multiple adapters but on different APIs.
+//
+// The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
+// a reference to an underlying adapter.
+class DAWN_NATIVE_EXPORT Adapter {
+ public:
+ Adapter();
+ // NOLINTNEXTLINE(runtime/explicit)
+ Adapter(AdapterBase* impl);
+ ~Adapter();
+
+ Adapter(const Adapter& other);
+ Adapter& operator=(const Adapter& other);
+
+ // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
+ // dawn.json
+ void GetProperties(wgpu::AdapterProperties* properties) const;
+ void GetProperties(WGPUAdapterProperties* properties) const;
+
+ std::vector<const char*> GetSupportedExtensions() const;
+ std::vector<const char*> GetSupportedFeatures() const;
+ WGPUDeviceProperties GetAdapterProperties() const;
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+
+ void SetUseTieredLimits(bool useTieredLimits);
+
+ // Check that the Adapter is able to support importing external images. This is necessary
+ // to implement the swapchain and interop APIs in Chromium.
+ bool SupportsExternalImages() const;
+
+ explicit operator bool() const;
+
+ // Create a device on this adapter. On an error, nullptr is returned.
+ WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
+ WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
+ WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
+
+ void RequestDevice(const DawnDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+ void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+ void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+
+ // Returns the underlying WGPUAdapter object.
+ WGPUAdapter Get() const;
+
+ // Reset the backend device object for testing purposes.
+ void ResetInternalDeviceForTesting();
+
+ private:
+ AdapterBase* mImpl = nullptr;
+};
+
+// Base class for options passed to Instance::DiscoverAdapters.
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
+ public:
+ const WGPUBackendType backendType;
+
+ protected:
+ explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
+};
+
+enum BackendValidationLevel { Full, Partial, Disabled };
+
+// Represents a connection to dawn_native and is used for dependency injection, discovering
+// system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
+//
+// This is an RAII class for Dawn instances and also controls the lifetime of all adapters
+// for this instance.
+class DAWN_NATIVE_EXPORT Instance {
+ public:
+ explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
+ ~Instance();
+
+ Instance(const Instance& other) = delete;
+ Instance& operator=(const Instance& other) = delete;
+
+ // Gather all adapters in the system that can be accessed with no special options. These
+ // adapters will later be returned by GetAdapters.
+ void DiscoverDefaultAdapters();
+
+ // Adds adapters that can be discovered with the options provided (like a getProcAddress).
+ // The backend is chosen based on the type of the options used. Returns true on success.
+ bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+
+ // Returns all the adapters that the instance knows about.
+ std::vector<Adapter> GetAdapters() const;
+
+ const ToggleInfo* GetToggleInfo(const char* toggleName);
+ const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
+
+ // Enables backend validation layers
+ void EnableBackendValidation(bool enableBackendValidation);
+ void SetBackendValidationLevel(BackendValidationLevel validationLevel);
+
+ // Enable debug capture on Dawn startup
+ void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+
+ // TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
+ void SetPlatform(dawn::platform::Platform* platform);
+
+ uint64_t GetDeviceCountForTesting() const;
+
+ // Returns the underlying WGPUInstance object.
+ WGPUInstance Get() const;
+
+ private:
+ InstanceBase* mImpl = nullptr;
+};
+
+// Backend-agnostic API for dawn_native
+DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
+
+// Query the names of all the toggles that are enabled in device
+DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
+
+// Backdoor to get the number of lazy clears for testing
+DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
+
+// Backdoor to get the number of deprecation warnings for testing
+DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
+
+// Query if texture has been initialized
+DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
+ WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ WGPUTextureAspect aspect = WGPUTextureAspect_All);
+
+// Backdoor to get the order of the ProcMap for testing
+DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+
+DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
+
+// ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
+DAWN_NATIVE_EXPORT void EnableErrorInjector();
+DAWN_NATIVE_EXPORT void DisableErrorInjector();
+DAWN_NATIVE_EXPORT void ClearErrorInjector();
+DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
+DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
+
+// The different types of external images
+enum ExternalImageType {
+ OpaqueFD,
+ DmaBuf,
+ IOSurface,
+ DXGISharedHandle,
+ EGLImage,
+};
+
+// Common properties of external images
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
+ public:
+ const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
+ bool isInitialized; // Whether the texture is initialized on import
+ ExternalImageType GetType() const;
+
+ protected:
+ explicit ExternalImageDescriptor(ExternalImageType type);
+
+ private:
+ ExternalImageType mType;
+};
+
+struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
+ public:
+ bool isInitialized; // Whether the texture is initialized on import
+ WGPUTextureUsageFlags usage;
+};
- // Common properties of external images
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
- public:
- const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
- bool isInitialized; // Whether the texture is initialized on import
- ExternalImageType GetType() const;
-
- protected:
- explicit ExternalImageDescriptor(ExternalImageType type);
-
- private:
- ExternalImageType mType;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
- public:
- bool isInitialized; // Whether the texture is initialized on import
- WGPUTextureUsageFlags usage;
- };
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
+ public:
+ bool isInitialized; // Whether the texture is initialized after export
+ ExternalImageType GetType() const;
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
- public:
- bool isInitialized; // Whether the texture is initialized after export
- ExternalImageType GetType() const;
+ protected:
+ explicit ExternalImageExportInfo(ExternalImageType type);
- protected:
- explicit ExternalImageExportInfo(ExternalImageType type);
+ private:
+ ExternalImageType mType;
+};
- private:
- ExternalImageType mType;
- };
+DAWN_NATIVE_EXPORT bool CheckIsErrorForTesting(void* objectHandle);
- DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
+DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
- DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
+DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
- DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
- WGPUBindGroupLayout b);
+DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
+ WGPUBindGroupLayout b);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/include/dawn/native/MetalBackend.h b/chromium/third_party/dawn/include/dawn/native/MetalBackend.h
index dfe71148fc2..20c80484fd6 100644
--- a/chromium/third_party/dawn/include/dawn/native/MetalBackend.h
+++ b/chromium/third_party/dawn/include/dawn/native/MetalBackend.h
@@ -15,8 +15,8 @@
#ifndef INCLUDE_DAWN_NATIVE_METALBACKEND_H_
#define INCLUDE_DAWN_NATIVE_METALBACKEND_H_
-#include <dawn/dawn_wsi.h>
-#include <dawn/native/DawnNative.h>
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
// The specifics of the Metal backend expose types in function signatures that might not be
// available in dependent's minimum supported SDK version. Suppress all availability errors using
@@ -29,41 +29,41 @@ struct __IOSurface;
typedef __IOSurface* IOSurfaceRef;
#ifdef __OBJC__
-# import <Metal/Metal.h>
+#import <Metal/Metal.h>
#endif // __OBJC__
namespace dawn::native::metal {
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
- };
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptions();
+};
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorIOSurface();
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
+ public:
+ ExternalImageDescriptorIOSurface();
- IOSurfaceRef ioSurface;
+ IOSurfaceRef ioSurface;
- // This has been deprecated.
- uint32_t plane;
- };
+ // This has been deprecated.
+ uint32_t plane;
+};
- DAWN_NATIVE_EXPORT WGPUTexture
- WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
+DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
+ const ExternalImageDescriptorIOSurface* descriptor);
- // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
- // mean that the operations will be visible to other APIs/Metal devices right away. macOS
- // does have a global queue of graphics operations, but the command buffers are inserted there
- // when they are "scheduled". Submitting other operations before the command buffer is
- // scheduled could lead to races in who gets scheduled first and incorrect rendering.
- DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
+// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
+// mean that the operations will be visible to other APIs/Metal devices right away. macOS
+// does have a global queue of graphics operations, but the command buffers are inserted there
+// when they are "scheduled". Submitting other operations before the command buffer is
+// scheduled could lead to races in who gets scheduled first and incorrect rendering.
+DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
} // namespace dawn::native::metal
#ifdef __OBJC__
namespace dawn::native::metal {
- DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
+DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
} // namespace dawn::native::metal
#endif // __OBJC__
diff --git a/chromium/third_party/dawn/include/dawn/native/NullBackend.h b/chromium/third_party/dawn/include/dawn/native/NullBackend.h
index 5df866456fa..bfa8a630458 100644
--- a/chromium/third_party/dawn/include/dawn/native/NullBackend.h
+++ b/chromium/third_party/dawn/include/dawn/native/NullBackend.h
@@ -15,11 +15,11 @@
#ifndef INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
#define INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
-#include <dawn/dawn_wsi.h>
-#include <dawn/native/DawnNative.h>
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
namespace dawn::native::null {
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
} // namespace dawn::native::null
#endif // INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
diff --git a/chromium/third_party/dawn/include/dawn/native/OpenGLBackend.h b/chromium/third_party/dawn/include/dawn/native/OpenGLBackend.h
index 5077a5209ea..bee9daeae94 100644
--- a/chromium/third_party/dawn/include/dawn/native/OpenGLBackend.h
+++ b/chromium/third_party/dawn/include/dawn/native/OpenGLBackend.h
@@ -17,38 +17,39 @@
typedef void* EGLImage;
-#include <dawn/dawn_wsi.h>
-#include <dawn/native/DawnNative.h>
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
namespace dawn::native::opengl {
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptions();
- void* (*getProc)(const char*);
- };
+ void* (*getProc)(const char*);
+};
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptionsES();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptionsES();
- void* (*getProc)(const char*);
- };
+ void* (*getProc)(const char*);
+};
- using PresentCallback = void (*)(void*);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+using PresentCallback = void (*)(void*);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+ PresentCallback present,
+ void* presentUserdata);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorEGLImage();
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
+ public:
+ ExternalImageDescriptorEGLImage();
- ::EGLImage image;
- };
+ ::EGLImage image;
+};
- DAWN_NATIVE_EXPORT WGPUTexture
- WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
+DAWN_NATIVE_EXPORT WGPUTexture
+WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/include/dawn/native/VulkanBackend.h b/chromium/third_party/dawn/include/dawn/native/VulkanBackend.h
index 5bbc00e35e3..9ee41258095 100644
--- a/chromium/third_party/dawn/include/dawn/native/VulkanBackend.h
+++ b/chromium/third_party/dawn/include/dawn/native/VulkanBackend.h
@@ -15,125 +15,133 @@
#ifndef INCLUDE_DAWN_NATIVE_VULKANBACKEND_H_
#define INCLUDE_DAWN_NATIVE_VULKANBACKEND_H_
-#include <dawn/dawn_wsi.h>
-#include <dawn/native/DawnNative.h>
-
#include <vulkan/vulkan.h>
+#include <array>
#include <vector>
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
+
namespace dawn::native::vulkan {
- DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
+DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
- DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
+DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+ ::VkSurfaceKHR surface);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptions();
- bool forceSwiftShader = false;
- };
+ bool forceSwiftShader = false;
+};
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
- public:
- // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
- // since the import does not need to preserve texture contents.
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
+ public:
+ // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
+ // since the import does not need to preserve texture contents.
- // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
- // operation old/new layouts must match exactly the layouts in the release operation. So
- // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
- // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
- // The first barrier is the queue transfer, the second is the layout transition to our
- // desired usage.
- VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
- VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
+ // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
+ // operation old/new layouts must match exactly the layouts in the release operation. So
+ // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
+ // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
+ // The first barrier is the queue transfer, the second is the layout transition to our
+ // desired usage.
+ VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
+ VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
- protected:
- using ExternalImageDescriptor::ExternalImageDescriptor;
- };
+ protected:
+ using ExternalImageDescriptor::ExternalImageDescriptor;
+};
- struct ExternalImageExportInfoVk : ExternalImageExportInfo {
- public:
- // See comments in |ExternalImageDescriptorVk|
- // Contains the old/new layouts used in the queue release operation.
- VkImageLayout releasedOldLayout;
- VkImageLayout releasedNewLayout;
+struct ExternalImageExportInfoVk : ExternalImageExportInfo {
+ public:
+ // See comments in |ExternalImageDescriptorVk|
+ // Contains the old/new layouts used in the queue release operation.
+ VkImageLayout releasedOldLayout;
+ VkImageLayout releasedNewLayout;
- protected:
- using ExternalImageExportInfo::ExternalImageExportInfo;
- };
+ protected:
+ using ExternalImageExportInfo::ExternalImageExportInfo;
+};
-// Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
+// Can't use DAWN_PLATFORM_IS(LINUX) since header included in both Dawn and Chrome
#ifdef __linux__
- // Common properties of external images represented by FDs. On successful import the file
- // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
- // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
- // caller can assume the FD is always consumed.
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
- public:
- int memoryFD; // A file descriptor from an export of the memory of the image
- std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
-
- protected:
- using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
- };
-
- // Descriptor for opaque file descriptor image import
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
- ExternalImageDescriptorOpaqueFD();
-
- VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
- uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
- };
-
- // Descriptor for dma-buf file descriptor image import
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
- ExternalImageDescriptorDmaBuf();
-
- uint32_t stride; // Stride of the buffer in bytes
- uint64_t drmModifier; // DRM modifier of the buffer
- };
-
- // Info struct that is written to in |ExportVulkanImage|.
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
- public:
- // Contains the exported semaphore handles.
- std::vector<int> semaphoreHandles;
-
- protected:
- using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
- ExternalImageExportInfoOpaqueFD();
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
- ExternalImageExportInfoDmaBuf();
- };
+// Common properties of external images represented by FDs. On successful import the file
+// descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
+// used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
+// caller can assume the FD is always consumed.
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
+ public:
+ int memoryFD; // A file descriptor from an export of the memory of the image
+ std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
+
+ protected:
+ using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
+};
+
+// Descriptor for opaque file descriptor image import
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
+ ExternalImageDescriptorOpaqueFD();
+
+ VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
+ uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
+};
+
+// The plane-wise offset and stride.
+struct DAWN_NATIVE_EXPORT PlaneLayout {
+ uint64_t offset;
+ uint32_t stride;
+};
+
+// Descriptor for dma-buf file descriptor image import
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
+ ExternalImageDescriptorDmaBuf();
+
+ static constexpr uint32_t kMaxPlanes = 3;
+ std::array<PlaneLayout, kMaxPlanes> planeLayouts;
+ uint64_t drmModifier; // DRM modifier of the buffer
+};
+
+// Info struct that is written to in |ExportVulkanImage|.
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
+ public:
+ // Contains the exported semaphore handles.
+ std::vector<int> semaphoreHandles;
+
+ protected:
+ using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
+};
+
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
+ ExternalImageExportInfoOpaqueFD();
+};
+
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
+ ExternalImageExportInfoDmaBuf();
+};
#endif // __linux__
- // Imports external memory into a Vulkan image. Internally, this uses external memory /
- // semaphore extensions to import the image and wait on the provided synchronizaton
- // primitives before the texture can be used.
- // On failure, returns a nullptr.
- DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
- const ExternalImageDescriptorVk* descriptor);
-
- // Exports external memory from a Vulkan image. This must be called on wrapped textures
- // before they are destroyed. It writes the semaphore to wait on and the old/new image
- // layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
- // perform a layout transition.
- DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info);
+// Imports external memory into a Vulkan image. Internally, this uses external memory /
+// semaphore extensions to import the image and wait on the provided synchronizaton
+// primitives before the texture can be used.
+// On failure, returns a nullptr.
+DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
+ const ExternalImageDescriptorVk* descriptor);
+
+// Exports external memory from a Vulkan image. This must be called on wrapped textures
+// before they are destroyed. It writes the semaphore to wait on and the old/new image
+// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
+// perform a layout transition.
+DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info);
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/include/dawn/native/dawn_native_export.h b/chromium/third_party/dawn/include/dawn/native/dawn_native_export.h
index 329b1a1d8bd..c23772010b1 100644
--- a/chromium/third_party/dawn/include/dawn/native/dawn_native_export.h
+++ b/chromium/third_party/dawn/include/dawn/native/dawn_native_export.h
@@ -16,21 +16,21 @@
#define INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
#if defined(DAWN_NATIVE_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_NATIVE_IMPLEMENTATION)
-# define DAWN_NATIVE_EXPORT __declspec(dllexport)
-# else
-# define DAWN_NATIVE_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_NATIVE_IMPLEMENTATION)
-# define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_NATIVE_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_NATIVE_SHARED_LIBRARY)
-# define DAWN_NATIVE_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_NATIVE_IMPLEMENTATION)
+#define DAWN_NATIVE_EXPORT __declspec(dllexport)
+#else
+#define DAWN_NATIVE_EXPORT __declspec(dllimport)
+#endif
+#else // defined(_WIN32)
+#if defined(DAWN_NATIVE_IMPLEMENTATION)
+#define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_NATIVE_EXPORT
+#endif
+#endif // defined(_WIN32)
+#else // defined(DAWN_NATIVE_SHARED_LIBRARY)
+#define DAWN_NATIVE_EXPORT
#endif // defined(DAWN_NATIVE_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
diff --git a/chromium/third_party/dawn/include/dawn/platform/DawnPlatform.h b/chromium/third_party/dawn/include/dawn/platform/DawnPlatform.h
index 1112a88b327..5c616d1bbc2 100644
--- a/chromium/third_party/dawn/include/dawn/platform/DawnPlatform.h
+++ b/chromium/third_party/dawn/include/dawn/platform/DawnPlatform.h
@@ -15,101 +15,94 @@
#ifndef INCLUDE_DAWN_PLATFORM_DAWNPLATFORM_H_
#define INCLUDE_DAWN_PLATFORM_DAWNPLATFORM_H_
-#include <dawn/webgpu.h>
-
#include <cstddef>
#include <cstdint>
#include <memory>
#include "dawn/platform/dawn_platform_export.h"
+#include "dawn/webgpu.h"
namespace dawn::platform {
- enum class TraceCategory {
- General, // General trace events
- Validation, // Dawn validation
- Recording, // Native command recording
- GPUWork, // Actual GPU work
- };
-
- class DAWN_PLATFORM_EXPORT CachingInterface {
- public:
- CachingInterface();
- virtual ~CachingInterface();
-
- // LoadData has two modes. The first mode is used to get a value which
- // corresponds to the |key|. The |valueOut| is a caller provided buffer
- // allocated to the size |valueSize| which is loaded with data of the
- // size returned. The second mode is used to query for the existence of
- // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
- // The return size is non-zero if the |key| exists.
- virtual size_t LoadData(const WGPUDevice device,
- const void* key,
- size_t keySize,
- void* valueOut,
- size_t valueSize) = 0;
-
- // StoreData puts a |value| in the cache which corresponds to the |key|.
- virtual void StoreData(const WGPUDevice device,
- const void* key,
- size_t keySize,
- const void* value,
- size_t valueSize) = 0;
-
- private:
- CachingInterface(const CachingInterface&) = delete;
- CachingInterface& operator=(const CachingInterface&) = delete;
- };
-
- class DAWN_PLATFORM_EXPORT WaitableEvent {
- public:
- WaitableEvent() = default;
- virtual ~WaitableEvent() = default;
- virtual void Wait() = 0; // Wait for completion
- virtual bool IsComplete() = 0; // Non-blocking check if the event is complete
- };
-
- using PostWorkerTaskCallback = void (*)(void* userdata);
-
- class DAWN_PLATFORM_EXPORT WorkerTaskPool {
- public:
- WorkerTaskPool() = default;
- virtual ~WorkerTaskPool() = default;
- virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
- void* userdata) = 0;
- };
-
- class DAWN_PLATFORM_EXPORT Platform {
- public:
- Platform();
- virtual ~Platform();
-
- virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
-
- virtual double MonotonicallyIncreasingTime();
-
- virtual uint64_t AddTraceEvent(char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- double timestamp,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags);
-
- // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
- // when the fingerprint changes. The returned CachingInterface is expected to outlive the
- // device which uses it to persistently cache objects.
- virtual CachingInterface* GetCachingInterface(const void* fingerprint,
- size_t fingerprintSize);
- virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
-
- private:
- Platform(const Platform&) = delete;
- Platform& operator=(const Platform&) = delete;
- };
+enum class TraceCategory {
+ General, // General trace events
+ Validation, // Dawn validation
+ Recording, // Native command recording
+ GPUWork, // Actual GPU work
+};
+
+class DAWN_PLATFORM_EXPORT CachingInterface {
+ public:
+ CachingInterface();
+ virtual ~CachingInterface();
+
+ // LoadData has two modes. The first mode is used to get a value which
+ // corresponds to the |key|. The |valueOut| is a caller provided buffer
+ // allocated to the size |valueSize| which is loaded with data of the
+ // size returned. The second mode is used to query for the existence of
+ // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
+ // The return size is non-zero if the |key| exists.
+ virtual size_t LoadData(const void* key, size_t keySize, void* valueOut, size_t valueSize) = 0;
+
+ // StoreData puts a |value| in the cache which corresponds to the |key|.
+ virtual void StoreData(const void* key,
+ size_t keySize,
+ const void* value,
+ size_t valueSize) = 0;
+
+ private:
+ CachingInterface(const CachingInterface&) = delete;
+ CachingInterface& operator=(const CachingInterface&) = delete;
+};
+
+class DAWN_PLATFORM_EXPORT WaitableEvent {
+ public:
+ WaitableEvent() = default;
+ virtual ~WaitableEvent() = default;
+ virtual void Wait() = 0; // Wait for completion
+ virtual bool IsComplete() = 0; // Non-blocking check if the event is complete
+};
+
+using PostWorkerTaskCallback = void (*)(void* userdata);
+
+class DAWN_PLATFORM_EXPORT WorkerTaskPool {
+ public:
+ WorkerTaskPool() = default;
+ virtual ~WorkerTaskPool() = default;
+ virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
+ void* userdata) = 0;
+};
+
+class DAWN_PLATFORM_EXPORT Platform {
+ public:
+ Platform();
+ virtual ~Platform();
+
+ virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
+
+ virtual double MonotonicallyIncreasingTime();
+
+ virtual uint64_t AddTraceEvent(char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ double timestamp,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags);
+
+ // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
+ // when the fingerprint changes. The returned CachingInterface is expected to outlive the
+ // device which uses it to persistently cache objects.
+ virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
+ virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
+
+ private:
+ Platform(const Platform&) = delete;
+ Platform& operator=(const Platform&) = delete;
+};
} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/include/dawn/platform/dawn_platform_export.h b/chromium/third_party/dawn/include/dawn/platform/dawn_platform_export.h
index e8d22e37aa1..fbdb33c64e0 100644
--- a/chromium/third_party/dawn/include/dawn/platform/dawn_platform_export.h
+++ b/chromium/third_party/dawn/include/dawn/platform/dawn_platform_export.h
@@ -16,21 +16,21 @@
#define INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
#if defined(DAWN_PLATFORM_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_PLATFORM_IMPLEMENTATION)
-# define DAWN_PLATFORM_EXPORT __declspec(dllexport)
-# else
-# define DAWN_PLATFORM_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_PLATFORM_IMPLEMENTATION)
-# define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_PLATFORM_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_PLATFORM_SHARED_LIBRARY)
-# define DAWN_PLATFORM_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#define DAWN_PLATFORM_EXPORT __declspec(dllexport)
+#else
+#define DAWN_PLATFORM_EXPORT __declspec(dllimport)
+#endif
+#else // defined(_WIN32)
+#if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_PLATFORM_EXPORT
+#endif
+#endif // defined(_WIN32)
+#else // defined(DAWN_PLATFORM_SHARED_LIBRARY)
+#define DAWN_PLATFORM_EXPORT
#endif // defined(DAWN_PLATFORM_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
diff --git a/chromium/third_party/dawn/include/dawn/wire/Wire.h b/chromium/third_party/dawn/include/dawn/wire/Wire.h
index 10028e05b72..e866db3dd82 100644
--- a/chromium/third_party/dawn/include/dawn/wire/Wire.h
+++ b/chromium/third_party/dawn/include/dawn/wire/Wire.h
@@ -23,53 +23,32 @@
namespace dawn::wire {
- class DAWN_WIRE_EXPORT CommandSerializer {
- public:
- CommandSerializer();
- virtual ~CommandSerializer();
- CommandSerializer(const CommandSerializer& rhs) = delete;
- CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
-
- // Get space for serializing commands.
- // GetCmdSpace will never be called with a value larger than
- // what GetMaximumAllocationSize returns. Return nullptr to indicate
- // a fatal error.
- virtual void* GetCmdSpace(size_t size) = 0;
- virtual bool Flush() = 0;
- virtual size_t GetMaximumAllocationSize() const = 0;
- virtual void OnSerializeError();
- };
-
- class DAWN_WIRE_EXPORT CommandHandler {
- public:
- CommandHandler();
- virtual ~CommandHandler();
- CommandHandler(const CommandHandler& rhs) = delete;
- CommandHandler& operator=(const CommandHandler& rhs) = delete;
-
- virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
- };
-
- DAWN_WIRE_EXPORT size_t
- SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
-
- DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
- const WGPUDeviceProperties* deviceProperties,
- char* serializeBuffer);
-
- DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
- const volatile char* deserializeBuffer,
- size_t deserializeBufferSize);
-
- DAWN_WIRE_EXPORT size_t
- SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
-
- DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
- char* serializeBuffer);
-
- DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
- const volatile char* deserializeBuffer,
- size_t deserializeBufferSize);
+class DAWN_WIRE_EXPORT CommandSerializer {
+ public:
+ CommandSerializer();
+ virtual ~CommandSerializer();
+ CommandSerializer(const CommandSerializer& rhs) = delete;
+ CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
+
+ // Get space for serializing commands.
+ // GetCmdSpace will never be called with a value larger than
+ // what GetMaximumAllocationSize returns. Return nullptr to indicate
+ // a fatal error.
+ virtual void* GetCmdSpace(size_t size) = 0;
+ virtual bool Flush() = 0;
+ virtual size_t GetMaximumAllocationSize() const = 0;
+ virtual void OnSerializeError();
+};
+
+class DAWN_WIRE_EXPORT CommandHandler {
+ public:
+ CommandHandler();
+ virtual ~CommandHandler();
+ CommandHandler(const CommandHandler& rhs) = delete;
+ CommandHandler& operator=(const CommandHandler& rhs) = delete;
+
+ virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
+};
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/include/dawn/wire/WireClient.h b/chromium/third_party/dawn/include/dawn/wire/WireClient.h
index d8b50a33a81..c1adfb13372 100644
--- a/chromium/third_party/dawn/include/dawn/wire/WireClient.h
+++ b/chromium/third_party/dawn/include/dawn/wire/WireClient.h
@@ -23,160 +23,158 @@
namespace dawn::wire {
- namespace client {
- class Client;
- class MemoryTransferService;
-
- DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
- } // namespace client
-
- struct ReservedTexture {
- WGPUTexture texture;
- uint32_t id;
- uint32_t generation;
- uint32_t deviceId;
- uint32_t deviceGeneration;
- };
-
- struct ReservedSwapChain {
- WGPUSwapChain swapchain;
- uint32_t id;
- uint32_t generation;
- uint32_t deviceId;
- uint32_t deviceGeneration;
- };
-
- struct ReservedDevice {
- WGPUDevice device;
- uint32_t id;
- uint32_t generation;
- };
-
- struct ReservedInstance {
- WGPUInstance instance;
- uint32_t id;
- uint32_t generation;
- };
+namespace client {
+class Client;
+class MemoryTransferService;
+
+DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
+} // namespace client
+
+struct ReservedTexture {
+ WGPUTexture texture;
+ uint32_t id;
+ uint32_t generation;
+ uint32_t deviceId;
+ uint32_t deviceGeneration;
+};
+
+struct ReservedSwapChain {
+ WGPUSwapChain swapchain;
+ uint32_t id;
+ uint32_t generation;
+ uint32_t deviceId;
+ uint32_t deviceGeneration;
+};
+
+struct ReservedDevice {
+ WGPUDevice device;
+ uint32_t id;
+ uint32_t generation;
+};
+
+struct ReservedInstance {
+ WGPUInstance instance;
+ uint32_t id;
+ uint32_t generation;
+};
+
+struct DAWN_WIRE_EXPORT WireClientDescriptor {
+ CommandSerializer* serializer;
+ client::MemoryTransferService* memoryTransferService = nullptr;
+};
+
+class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
+ public:
+ explicit WireClient(const WireClientDescriptor& descriptor);
+ ~WireClient() override;
+
+ const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
+
+ ReservedTexture ReserveTexture(WGPUDevice device);
+ ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+ ReservedDevice ReserveDevice();
+ ReservedInstance ReserveInstance();
+
+ void ReclaimTextureReservation(const ReservedTexture& reservation);
+ void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+ void ReclaimDeviceReservation(const ReservedDevice& reservation);
+ void ReclaimInstanceReservation(const ReservedInstance& reservation);
+
+ // Disconnects the client.
+ // Commands allocated after this point will not be sent.
+ void Disconnect();
+
+ private:
+ std::unique_ptr<client::Client> mImpl;
+};
+
+namespace client {
+class DAWN_WIRE_EXPORT MemoryTransferService {
+ public:
+ MemoryTransferService();
+ virtual ~MemoryTransferService();
+
+ class ReadHandle;
+ class WriteHandle;
+
+ // Create a handle for reading server data.
+ // This may fail and return nullptr.
+ virtual ReadHandle* CreateReadHandle(size_t) = 0;
+
+ // Create a handle for writing server data.
+ // This may fail and return nullptr.
+ virtual WriteHandle* CreateWriteHandle(size_t) = 0;
+
+ class DAWN_WIRE_EXPORT ReadHandle {
+ public:
+ ReadHandle();
+ virtual ~ReadHandle();
+
+ // Get the required serialization size for SerializeCreate
+ virtual size_t SerializeCreateSize() = 0;
+
+ // Serialize the handle into |serializePointer| so it can be received by the server.
+ virtual void SerializeCreate(void* serializePointer) = 0;
+
+ // Simply return the base address of the allocation (without applying any offset)
+ // Returns nullptr if the allocation failed.
+ // The data must live at least until the ReadHandle is destructued
+ virtual const void* GetData() = 0;
+
+ // Gets called when a MapReadCallback resolves.
+ // deserialize the data update and apply
+ // it to the range (offset, offset + size) of allocation
+ // There could be nothing to be deserialized (if using shared memory)
+ // Needs to check potential offset/size OOB and overflow
+ virtual bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) = 0;
- struct DAWN_WIRE_EXPORT WireClientDescriptor {
- CommandSerializer* serializer;
- client::MemoryTransferService* memoryTransferService = nullptr;
+ private:
+ ReadHandle(const ReadHandle&) = delete;
+ ReadHandle& operator=(const ReadHandle&) = delete;
};
- class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
+ class DAWN_WIRE_EXPORT WriteHandle {
public:
- explicit WireClient(const WireClientDescriptor& descriptor);
- ~WireClient() override;
+ WriteHandle();
+ virtual ~WriteHandle();
- const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
+ // Get the required serialization size for SerializeCreate
+ virtual size_t SerializeCreateSize() = 0;
- ReservedTexture ReserveTexture(WGPUDevice device);
- ReservedSwapChain ReserveSwapChain(WGPUDevice device);
- ReservedDevice ReserveDevice();
- ReservedInstance ReserveInstance();
+ // Serialize the handle into |serializePointer| so it can be received by the server.
+ virtual void SerializeCreate(void* serializePointer) = 0;
- void ReclaimTextureReservation(const ReservedTexture& reservation);
- void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
- void ReclaimDeviceReservation(const ReservedDevice& reservation);
- void ReclaimInstanceReservation(const ReservedInstance& reservation);
+ // Simply return the base address of the allocation (without applying any offset)
+ // The data returned should be zero-initialized.
+ // The data returned must live at least until the WriteHandle is destructed.
+ // On failure, the pointer returned should be null.
+ virtual void* GetData() = 0;
- // Disconnects the client.
- // Commands allocated after this point will not be sent.
- void Disconnect();
+ // Get the required serialization size for SerializeDataUpdate
+ virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
+
+ // Serialize a command to send the modified contents of
+ // the subrange (offset, offset + size) of the allocation at buffer unmap
+ // This subrange is always the whole mapped region for now
+ // There could be nothing to be serialized (if using shared memory)
+ virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
private:
- std::unique_ptr<client::Client> mImpl;
+ WriteHandle(const WriteHandle&) = delete;
+ WriteHandle& operator=(const WriteHandle&) = delete;
};
- namespace client {
- class DAWN_WIRE_EXPORT MemoryTransferService {
- public:
- MemoryTransferService();
- virtual ~MemoryTransferService();
-
- class ReadHandle;
- class WriteHandle;
-
- // Create a handle for reading server data.
- // This may fail and return nullptr.
- virtual ReadHandle* CreateReadHandle(size_t) = 0;
-
- // Create a handle for writing server data.
- // This may fail and return nullptr.
- virtual WriteHandle* CreateWriteHandle(size_t) = 0;
-
- class DAWN_WIRE_EXPORT ReadHandle {
- public:
- ReadHandle();
- virtual ~ReadHandle();
-
- // Get the required serialization size for SerializeCreate
- virtual size_t SerializeCreateSize() = 0;
-
- // Serialize the handle into |serializePointer| so it can be received by the server.
- virtual void SerializeCreate(void* serializePointer) = 0;
-
- // Simply return the base address of the allocation (without applying any offset)
- // Returns nullptr if the allocation failed.
- // The data must live at least until the ReadHandle is destructued
- virtual const void* GetData() = 0;
-
- // Gets called when a MapReadCallback resolves.
- // deserialize the data update and apply
- // it to the range (offset, offset + size) of allocation
- // There could be nothing to be deserialized (if using shared memory)
- // Needs to check potential offset/size OOB and overflow
- virtual bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) = 0;
-
- private:
- ReadHandle(const ReadHandle&) = delete;
- ReadHandle& operator=(const ReadHandle&) = delete;
- };
-
- class DAWN_WIRE_EXPORT WriteHandle {
- public:
- WriteHandle();
- virtual ~WriteHandle();
-
- // Get the required serialization size for SerializeCreate
- virtual size_t SerializeCreateSize() = 0;
-
- // Serialize the handle into |serializePointer| so it can be received by the server.
- virtual void SerializeCreate(void* serializePointer) = 0;
-
- // Simply return the base address of the allocation (without applying any offset)
- // The data returned should be zero-initialized.
- // The data returned must live at least until the WriteHandle is destructed.
- // On failure, the pointer returned should be null.
- virtual void* GetData() = 0;
-
- // Get the required serialization size for SerializeDataUpdate
- virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
-
- // Serialize a command to send the modified contents of
- // the subrange (offset, offset + size) of the allocation at buffer unmap
- // This subrange is always the whole mapped region for now
- // There could be nothing to be serialized (if using shared memory)
- virtual void SerializeDataUpdate(void* serializePointer,
- size_t offset,
- size_t size) = 0;
-
- private:
- WriteHandle(const WriteHandle&) = delete;
- WriteHandle& operator=(const WriteHandle&) = delete;
- };
-
- private:
- MemoryTransferService(const MemoryTransferService&) = delete;
- MemoryTransferService& operator=(const MemoryTransferService&) = delete;
- };
-
- // Backdoor to get the order of the ProcMap for testing
- DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
- } // namespace client
+ private:
+ MemoryTransferService(const MemoryTransferService&) = delete;
+ MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+};
+
+// Backdoor to get the order of the ProcMap for testing
+DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+} // namespace client
} // namespace dawn::wire
#endif // INCLUDE_DAWN_WIRE_WIRECLIENT_H_
diff --git a/chromium/third_party/dawn/include/dawn/wire/WireServer.h b/chromium/third_party/dawn/include/dawn/wire/WireServer.h
index 1957de03567..9fc2ab39739 100644
--- a/chromium/third_party/dawn/include/dawn/wire/WireServer.h
+++ b/chromium/third_party/dawn/include/dawn/wire/WireServer.h
@@ -23,126 +23,131 @@ struct DawnProcTable;
namespace dawn::wire {
- namespace server {
- class Server;
- class MemoryTransferService;
- } // namespace server
-
- struct DAWN_WIRE_EXPORT WireServerDescriptor {
- const DawnProcTable* procs;
- CommandSerializer* serializer;
- server::MemoryTransferService* memoryTransferService = nullptr;
+namespace server {
+class Server;
+class MemoryTransferService;
+} // namespace server
+
+struct DAWN_WIRE_EXPORT WireServerDescriptor {
+ const DawnProcTable* procs;
+ CommandSerializer* serializer;
+ server::MemoryTransferService* memoryTransferService = nullptr;
+};
+
+class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
+ public:
+ explicit WireServer(const WireServerDescriptor& descriptor);
+ ~WireServer() override;
+
+ const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
+
+ bool InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+ bool InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
+ bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+ bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+ // Look up a device by (id, generation) pair. Returns nullptr if the generation
+ // has expired or the id is not found.
+ // The Wire does not have destroy hooks to allow an embedder to observe when an object
+ // has been destroyed, but in Chrome, we need to know the list of live devices so we
+ // can call device.Tick() on all of them periodically to ensure progress on asynchronous
+ // work is made. Getting this list can be done by tracking the (id, generation) of
+ // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
+ WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+
+ // Check if a device handle is known by the wire.
+ // In Chrome, we need to know the list of live devices so we can call device.Tick() on all of
+ // them periodically to ensure progress on asynchronous work is made.
+ bool IsDeviceKnown(WGPUDevice device) const;
+
+ private:
+ std::unique_ptr<server::Server> mImpl;
+};
+
+namespace server {
+class DAWN_WIRE_EXPORT MemoryTransferService {
+ public:
+ MemoryTransferService();
+ virtual ~MemoryTransferService();
+
+ class ReadHandle;
+ class WriteHandle;
+
+ // Deserialize data to create Read/Write handles. These handles are for the client
+ // to Read/Write data.
+ virtual bool DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) = 0;
+ virtual bool DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) = 0;
+
+ class DAWN_WIRE_EXPORT ReadHandle {
+ public:
+ ReadHandle();
+ virtual ~ReadHandle();
+
+ // Return the size of the command serialized if
+ // SerializeDataUpdate is called with the same offset/size args
+ virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
+
+ // Gets called when a MapReadCallback resolves.
+ // Serialize the data update for the range (offset, offset + size) into
+ // |serializePointer| to the client There could be nothing to be serialized (if
+ // using shared memory)
+ virtual void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) = 0;
+
+ private:
+ ReadHandle(const ReadHandle&) = delete;
+ ReadHandle& operator=(const ReadHandle&) = delete;
};
- class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
+ class DAWN_WIRE_EXPORT WriteHandle {
public:
- explicit WireServer(const WireServerDescriptor& descriptor);
- ~WireServer() override;
-
- const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
-
- bool InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
- bool InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
-
- bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
-
- // Look up a device by (id, generation) pair. Returns nullptr if the generation
- // has expired or the id is not found.
- // The Wire does not have destroy hooks to allow an embedder to observe when an object
- // has been destroyed, but in Chrome, we need to know the list of live devices so we
- // can call device.Tick() on all of them periodically to ensure progress on asynchronous
- // work is made. Getting this list can be done by tracking the (id, generation) of
- // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
- WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+ WriteHandle();
+ virtual ~WriteHandle();
+
+ // Set the target for writes from the client. DeserializeFlush should copy data
+ // into the target.
+ void SetTarget(void* data);
+ // Set Staging data length for OOB check
+ void SetDataLength(size_t dataLength);
+
+ // This function takes in the serialized result of
+ // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
+ // Needs to check potential offset/size OOB and overflow
+ virtual bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) = 0;
+
+ protected:
+ void* mTargetData = nullptr;
+ size_t mDataLength = 0;
private:
- std::unique_ptr<server::Server> mImpl;
+ WriteHandle(const WriteHandle&) = delete;
+ WriteHandle& operator=(const WriteHandle&) = delete;
};
- namespace server {
- class DAWN_WIRE_EXPORT MemoryTransferService {
- public:
- MemoryTransferService();
- virtual ~MemoryTransferService();
-
- class ReadHandle;
- class WriteHandle;
-
- // Deserialize data to create Read/Write handles. These handles are for the client
- // to Read/Write data.
- virtual bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) = 0;
- virtual bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) = 0;
-
- class DAWN_WIRE_EXPORT ReadHandle {
- public:
- ReadHandle();
- virtual ~ReadHandle();
-
- // Return the size of the command serialized if
- // SerializeDataUpdate is called with the same offset/size args
- virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
-
- // Gets called when a MapReadCallback resolves.
- // Serialize the data update for the range (offset, offset + size) into
- // |serializePointer| to the client There could be nothing to be serialized (if
- // using shared memory)
- virtual void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) = 0;
-
- private:
- ReadHandle(const ReadHandle&) = delete;
- ReadHandle& operator=(const ReadHandle&) = delete;
- };
-
- class DAWN_WIRE_EXPORT WriteHandle {
- public:
- WriteHandle();
- virtual ~WriteHandle();
-
- // Set the target for writes from the client. DeserializeFlush should copy data
- // into the target.
- void SetTarget(void* data);
- // Set Staging data length for OOB check
- void SetDataLength(size_t dataLength);
-
- // This function takes in the serialized result of
- // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
- // Needs to check potential offset/size OOB and overflow
- virtual bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) = 0;
-
- protected:
- void* mTargetData = nullptr;
- size_t mDataLength = 0;
-
- private:
- WriteHandle(const WriteHandle&) = delete;
- WriteHandle& operator=(const WriteHandle&) = delete;
- };
-
- private:
- MemoryTransferService(const MemoryTransferService&) = delete;
- MemoryTransferService& operator=(const MemoryTransferService&) = delete;
- };
- } // namespace server
+ private:
+ MemoryTransferService(const MemoryTransferService&) = delete;
+ MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+};
+} // namespace server
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/include/dawn/wire/dawn_wire_export.h b/chromium/third_party/dawn/include/dawn/wire/dawn_wire_export.h
index 285d5db0029..e5b211387ce 100644
--- a/chromium/third_party/dawn/include/dawn/wire/dawn_wire_export.h
+++ b/chromium/third_party/dawn/include/dawn/wire/dawn_wire_export.h
@@ -16,21 +16,21 @@
#define INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
#if defined(DAWN_WIRE_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_WIRE_IMPLEMENTATION)
-# define DAWN_WIRE_EXPORT __declspec(dllexport)
-# else
-# define DAWN_WIRE_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_WIRE_IMPLEMENTATION)
-# define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_WIRE_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_WIRE_SHARED_LIBRARY)
-# define DAWN_WIRE_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_WIRE_IMPLEMENTATION)
+#define DAWN_WIRE_EXPORT __declspec(dllexport)
+#else
+#define DAWN_WIRE_EXPORT __declspec(dllimport)
+#endif
+#else // defined(_WIN32)
+#if defined(DAWN_WIRE_IMPLEMENTATION)
+#define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_WIRE_EXPORT
+#endif
+#endif // defined(_WIN32)
+#else // defined(DAWN_WIRE_SHARED_LIBRARY)
+#define DAWN_WIRE_EXPORT
#endif // defined(DAWN_WIRE_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
diff --git a/chromium/third_party/dawn/include/tint/.clang-format b/chromium/third_party/dawn/include/tint/.clang-format
deleted file mode 100644
index 2fb833a5df1..00000000000
--- a/chromium/third_party/dawn/include/tint/.clang-format
+++ /dev/null
@@ -1,2 +0,0 @@
-# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
-BasedOnStyle: Chromium
diff --git a/chromium/third_party/dawn/include/tint/tint.h b/chromium/third_party/dawn/include/tint/tint.h
index 2b8430e01d8..c397cdcba83 100644
--- a/chromium/third_party/dawn/include/tint/tint.h
+++ b/chromium/third_party/dawn/include/tint/tint.h
@@ -33,6 +33,7 @@
#include "src/tint/transform/robustness.h"
#include "src/tint/transform/single_entry_point.h"
#include "src/tint/transform/vertex_pulling.h"
+#include "src/tint/writer/flatten_bindings.h"
#include "src/tint/writer/writer.h"
#if TINT_BUILD_SPV_READER
diff --git a/chromium/third_party/dawn/include/webgpu/webgpu_cpp.h b/chromium/third_party/dawn/include/webgpu/webgpu_cpp.h
index c8928cc7b35..f1a633356f3 100644
--- a/chromium/third_party/dawn/include/webgpu/webgpu_cpp.h
+++ b/chromium/third_party/dawn/include/webgpu/webgpu_cpp.h
@@ -15,6 +15,6 @@
#ifndef INCLUDE_WEBGPU_WEBGPU_CPP_H_
#define INCLUDE_WEBGPU_WEBGPU_CPP_H_
-#include <dawn/webgpu_cpp.h>
+#include "dawn/webgpu_cpp.h"
#endif // INCLUDE_WEBGPU_WEBGPU_CPP_H_
diff --git a/chromium/third_party/dawn/infra/config/global/generated/commit-queue.cfg b/chromium/third_party/dawn/infra/config/global/generated/commit-queue.cfg
index ac774d17864..864c2a6e303 100644
--- a/chromium/third_party/dawn/infra/config/global/generated/commit-queue.cfg
+++ b/chromium/third_party/dawn/infra/config/global/generated/commit-queue.cfg
@@ -27,6 +27,10 @@ config_groups {
}
tryjob {
builders {
+ name: "chromium/try/dawn-try-win10-x86-rel"
+ includable_only: true
+ }
+ builders {
name: "chromium/try/linux-dawn-rel"
}
builders {
diff --git a/chromium/third_party/dawn/infra/config/global/generated/cr-buildbucket.cfg b/chromium/third_party/dawn/infra/config/global/generated/cr-buildbucket.cfg
index 732cf74342c..9c58b0180f8 100644
--- a/chromium/third_party/dawn/infra/config/global/generated/cr-buildbucket.cfg
+++ b/chromium/third_party/dawn/infra/config/global/generated/cr-buildbucket.cfg
@@ -19,7 +19,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -41,7 +41,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:true"
@@ -62,7 +62,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:true"
@@ -83,7 +83,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -104,7 +104,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -125,7 +125,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:true"
@@ -150,7 +150,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -175,7 +175,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:true"
@@ -200,7 +200,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:true"
@@ -225,7 +225,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -250,7 +250,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "clang:true"
properties_j: "debug:false"
@@ -275,7 +275,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "clang:false"
properties_j: "debug:true"
properties_j: "target_cpu:\"x64\""
@@ -295,7 +295,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "clang:false"
properties_j: "debug:false"
properties_j: "target_cpu:\"x64\""
@@ -331,7 +331,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -353,7 +353,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -375,7 +375,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -397,7 +397,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -419,7 +419,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -445,7 +445,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -471,7 +471,7 @@ buckets {
recipe {
name: "run_presubmit"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "repo_name:\"dawn\""
properties_j: "runhooks:true"
@@ -491,7 +491,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -517,7 +517,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -543,7 +543,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -569,7 +569,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:true"
@@ -595,7 +595,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:false"
properties_j: "debug:true"
@@ -616,7 +616,7 @@ buckets {
recipe {
name: "dawn"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
- cipd_version: "refs/heads/master"
+ cipd_version: "refs/heads/main"
properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
properties_j: "clang:false"
properties_j: "debug:false"
diff --git a/chromium/third_party/dawn/infra/config/global/generated/project.cfg b/chromium/third_party/dawn/infra/config/global/generated/project.cfg
index 06a91726219..19abc318fd7 100644
--- a/chromium/third_party/dawn/infra/config/global/generated/project.cfg
+++ b/chromium/third_party/dawn/infra/config/global/generated/project.cfg
@@ -7,7 +7,7 @@
name: "dawn"
access: "group:all"
lucicfg {
- version: "1.30.9"
+ version: "1.30.11"
package_dir: ".."
config_dir: "generated"
entry_point: "main.star"
diff --git a/chromium/third_party/dawn/infra/config/global/main.star b/chromium/third_party/dawn/infra/config/global/main.star
index 7331e9a502f..051e75799af 100755
--- a/chromium/third_party/dawn/infra/config/global/main.star
+++ b/chromium/third_party/dawn/infra/config/global/main.star
@@ -128,7 +128,7 @@ def get_builder_executable():
return luci.recipe(
name = "dawn",
cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
- cipd_version = "refs/heads/master",
+ cipd_version = "refs/heads/main",
)
def get_presubmit_executable():
@@ -140,7 +140,7 @@ def get_presubmit_executable():
return luci.recipe(
name = "run_presubmit",
cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
- cipd_version = "refs/heads/master",
+ cipd_version = "refs/heads/main",
)
def get_os_from_arg(arg):
@@ -411,6 +411,12 @@ chromium_dawn_tryjob("linux")
chromium_dawn_tryjob("mac")
chromium_dawn_tryjob("win")
+luci.cq_tryjob_verifier(
+ cq_group = "Dawn-CQ",
+ builder = "chromium:try/dawn-try-win10-x86-rel",
+ includable_only = True,
+)
+
# Views
luci.milo(
diff --git a/chromium/third_party/dawn/infra/kokoro/linux/build.sh b/chromium/third_party/dawn/infra/kokoro/linux/build.sh
index c8f8ff5f4e8..6d05eaacb0b 100755
--- a/chromium/third_party/dawn/infra/kokoro/linux/build.sh
+++ b/chromium/third_party/dawn/infra/kokoro/linux/build.sh
@@ -30,6 +30,12 @@ else
TMP_DIR=/tmp
fi
+echo "*****************************************************************"
+echo "* build.sh"
+echo "*"
+echo "* df:"
+df
+echo "*****************************************************************"
# --privileged is required for some sanitizer builds, as they seem to require PTRACE privileges
docker run --rm -i \
diff --git a/chromium/third_party/dawn/infra/kokoro/linux/docker.sh b/chromium/third_party/dawn/infra/kokoro/linux/docker.sh
index 56f19c2a20f..e07cd9cabfe 100755
--- a/chromium/third_party/dawn/infra/kokoro/linux/docker.sh
+++ b/chromium/third_party/dawn/infra/kokoro/linux/docker.sh
@@ -50,6 +50,9 @@ function status {
echo ""
echo "*****************************************************************"
echo "* $@"
+ echo "*"
+ echo "* df:"
+ df
echo "*****************************************************************"
echo ""
task_begin $@
@@ -90,7 +93,7 @@ status "Checking for CRLF"
./tools/check-no-crlf
status "Fetching dependencies"
-cp scripts/standalone.gclient .gclient
+cp scripts/standalone-with-node.gclient .gclient
with_retry gclient sync
status "Linting"
@@ -139,9 +142,7 @@ if [ "$BUILD_SYSTEM" == "cmake" ]; then
status "Running go tool unittests"
show_cmds
- pushd tools/src
- go test ./...
- popd
+ go test ./...
hide_cmds
cd ${BUILD_DIR}
@@ -163,6 +164,12 @@ if [ "$BUILD_SYSTEM" == "cmake" ]; then
cmake --build . -- --jobs=$(nproc)
hide_cmds
+ status "Re-building dawn in '${BUILD_DIR}' with dawn/node enabled"
+ show_cmds
+ cmake ${SRC_DIR} ${CMAKE_FLAGS} ${COMMON_CMAKE_FLAGS} -DDAWN_BUILD_NODE_BINDINGS=1 -DDAWN_ENABLE_PIC=1 -DDAWN_USE_X11=OFF
+ cmake --build . -- --jobs=$(nproc)
+ hide_cmds
+
status "Running tint_unittests"
show_cmds
./tint_unittests
diff --git a/chromium/third_party/dawn/scripts/dawn_features.gni b/chromium/third_party/dawn/scripts/dawn_features.gni
index 234791cf0d3..57227dea12e 100644
--- a/chromium/third_party/dawn/scripts/dawn_features.gni
+++ b/chromium/third_party/dawn/scripts/dawn_features.gni
@@ -19,10 +19,12 @@ if (build_with_chromium) {
import("//build/config/sanitizers/sanitizers.gni")
dawn_use_x11 = ozone_platform_x11
+ dawn_use_wayland = false
} else {
declare_args() {
# Whether Dawn should enable X11 support.
dawn_use_x11 = is_linux && !is_chromeos
+ dawn_use_wayland = false
}
}
diff --git a/chromium/third_party/dawn/scripts/extract.py b/chromium/third_party/dawn/scripts/extract.py
index ed263f49212..ef040365a5b 100644
--- a/chromium/third_party/dawn/scripts/extract.py
+++ b/chromium/third_party/dawn/scripts/extract.py
@@ -25,12 +25,11 @@ import zipfile
def CheckedJoin(output, path):
"""
- CheckedJoin returns os.path.join(output, path). It does sanity checks to
- ensure the resulting path is under output, but shouldn't be used on untrusted
- input.
- """
+ CheckedJoin returns os.path.join(output, path). It checks that the resulting
+ path is under output, but shouldn't be used on untrusted input.
+ """
path = os.path.normpath(path)
- if os.path.isabs(path) or path.startswith('.'):
+ if os.path.isabs(path) or path.startswith("."):
raise ValueError(path)
return os.path.join(output, path)
@@ -51,22 +50,22 @@ class SymlinkEntry(object):
def IterateZip(path):
"""
- IterateZip opens the zip file at path and returns a generator of entry objects
- for each file in it.
- """
- with zipfile.ZipFile(path, 'r') as zip_file:
+ IterateZip opens the zip file at path and returns a generator of entry objects
+ for each file in it.
+ """
+ with zipfile.ZipFile(path, "r") as zip_file:
for info in zip_file.infolist():
- if info.filename.endswith('/'):
+ if info.filename.endswith("/"):
continue
yield FileEntry(info.filename, None, zip_file.open(info))
def IterateTar(path, compression):
"""
- IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
- entry objects for each file in it.
- """
- with tarfile.open(path, 'r:' + compression) as tar_file:
+ IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
+ entry objects for each file in it.
+ """
+ with tarfile.open(path, "r:" + compression) as tar_file:
for info in tar_file:
if info.isdir():
pass
@@ -80,11 +79,13 @@ def IterateTar(path, compression):
def main(args):
- parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT')
- parser.add_option('--no-prefix',
- dest='no_prefix',
- action='store_true',
- help='Do not remove a prefix from paths in the archive.')
+ parser = optparse.OptionParser(usage="Usage: %prog ARCHIVE OUTPUT")
+ parser.add_option(
+ "--no-prefix",
+ dest="no_prefix",
+ action="store_true",
+ help="Do not remove a prefix from paths in the archive.",
+ )
options, args = parser.parse_args(args)
if len(args) != 2:
@@ -97,7 +98,7 @@ def main(args):
# Skip archives that weren't downloaded.
return 0
- with open(archive, 'rb') as f:
+ with open(archive, "rb") as f:
sha256 = hashlib.sha256()
while True:
chunk = f.read(1024 * 1024)
@@ -113,12 +114,12 @@ def main(args):
print("Already up-to-date.")
return 0
- if archive.endswith('.zip'):
+ if archive.endswith(".zip"):
entries = IterateZip(archive)
- elif archive.endswith('.tar.gz'):
- entries = IterateTar(archive, 'gz')
- elif archive.endswith('.tar.bz2'):
- entries = IterateTar(archive, 'bz2')
+ elif archive.endswith(".tar.gz"):
+ entries = IterateTar(archive, "gz")
+ elif archive.endswith(".tar.bz2"):
+ entries = IterateTar(archive, "bz2")
else:
raise ValueError(archive)
@@ -132,11 +133,11 @@ def main(args):
num_extracted = 0
for entry in entries:
# Even on Windows, zip files must always use forward slashes.
- if '\\' in entry.path or entry.path.startswith('/'):
+ if "\\" in entry.path or entry.path.startswith("/"):
raise ValueError(entry.path)
if not options.no_prefix:
- new_prefix, rest = entry.path.split('/', 1)
+ new_prefix, rest = entry.path.split("/", 1)
# Ensure the archive is consistent.
if prefix is None:
@@ -151,12 +152,12 @@ def main(args):
if not os.path.isdir(os.path.dirname(fixed_path)):
os.makedirs(os.path.dirname(fixed_path))
if isinstance(entry, FileEntry):
- with open(fixed_path, 'wb') as out:
+ with open(fixed_path, "wb") as out:
shutil.copyfileobj(entry.fileobj, out)
elif isinstance(entry, SymlinkEntry):
os.symlink(entry.target, fixed_path)
else:
- raise TypeError('unknown entry type')
+ raise TypeError("unknown entry type")
# Fix up permissions if needbe.
# TODO(davidben): To be extra tidy, this should only track the execute bit
@@ -171,12 +172,12 @@ def main(args):
finally:
entries.close()
- with open(stamp_path, 'w') as f:
+ with open(stamp_path, "w") as f:
f.write(digest)
print("Done. Extracted %d files." % (num_extracted, ))
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
diff --git a/chromium/third_party/dawn/src/Dummy.cpp b/chromium/third_party/dawn/src/Placeholder.cpp
index 5959a87bb60..5959a87bb60 100644
--- a/chromium/third_party/dawn/src/Dummy.cpp
+++ b/chromium/third_party/dawn/src/Placeholder.cpp
diff --git a/chromium/third_party/dawn/src/dawn/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
index b2a3cbcaa3f..f1f834eea11 100644
--- a/chromium/third_party/dawn/src/dawn/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
@@ -63,13 +63,13 @@ DawnJSONGenerator(
# Headers only INTERFACE library with generated headers don't work in CMake
# because the GENERATED property is local to a directory. Instead we make a
-# STATIC library with a Dummy cpp file.
+# STATIC library with a placeholder cpp file.
#
# INTERFACE libraries can only have INTERFACE sources so the sources get added
# to the dependant's list of sources. If these dependents are in another
# directory, they don't see the GENERATED property and fail to configure
# because the file doesn't exist on disk.
-add_library(dawn_headers STATIC ${DAWN_DUMMY_FILE})
+add_library(dawn_headers STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_headers)
target_sources(dawn_headers PRIVATE
"${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h"
@@ -89,7 +89,7 @@ DawnJSONGenerator(
# This headers only library needs to be a STATIC library, see comment for
# dawn_headers above.
-add_library(dawncpp_headers STATIC ${DAWN_DUMMY_FILE})
+add_library(dawncpp_headers STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawncpp_headers)
target_sources(dawncpp_headers PRIVATE
"${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h"
@@ -107,7 +107,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
)
-add_library(dawncpp STATIC ${DAWN_DUMMY_FILE})
+add_library(dawncpp STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawncpp)
target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES})
target_link_libraries(dawncpp PUBLIC dawncpp_headers)
@@ -122,7 +122,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
)
-add_library(dawn_proc ${DAWN_DUMMY_FILE})
+add_library(dawn_proc ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_proc)
target_compile_definitions(dawn_proc PRIVATE "WGPU_IMPLEMENTATION")
if(BUILD_SHARED_LIBS)
diff --git a/chromium/third_party/dawn/src/dawn/CPPLINT.cfg b/chromium/third_party/dawn/src/dawn/CPPLINT.cfg
deleted file mode 100644
index 4f810e8f2a6..00000000000
--- a/chromium/third_party/dawn/src/dawn/CPPLINT.cfg
+++ /dev/null
@@ -1,16 +0,0 @@
-filter=-build/include_order
-filter=-build/include_what_you_use
-filter=-build/namespaces
-filter=-readability/casting
-filter=-readability/check
-filter=-readability/namespace
-filter=-readability/todo
-filter=-runtime/arrays
-filter=-runtime/explicit
-filter=-runtime/indentation_namespace
-filter=-runtime/int
-filter=-runtime/printf
-filter=-runtime/threadsafe_fn
-filter=-whitespace/blank_line
-filter=-whitespace/comments
-filter=-whitespace/todo
diff --git a/chromium/third_party/dawn/src/dawn/OWNERS b/chromium/third_party/dawn/src/dawn/OWNERS
index fdc046d6a6b..65efb86493e 100644
--- a/chromium/third_party/dawn/src/dawn/OWNERS
+++ b/chromium/third_party/dawn/src/dawn/OWNERS
@@ -3,4 +3,4 @@ cwallez@chromium.org
enga@chromium.org
kainino@chromium.org
jiawei.shao@intel.com
-lokokun@google.com
+lokokung@google.com
diff --git a/chromium/third_party/dawn/src/dawn/common/Alloc.h b/chromium/third_party/dawn/src/dawn/common/Alloc.h
index c13b33b24a9..23bc92d9341 100644
--- a/chromium/third_party/dawn/src/dawn/common/Alloc.h
+++ b/chromium/third_party/dawn/src/dawn/common/Alloc.h
@@ -20,9 +20,9 @@
template <typename T>
T* AllocNoThrow(size_t count) {
-#if defined(ADDRESS_SANITIZER)
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)
if (count * sizeof(T) >= 0x70000000) {
- // std::nothrow isn't implemented on ASAN and it has a 2GB allocation limit.
+ // std::nothrow isn't implemented in sanitizers and they often have a 2GB allocation limit.
// Catch large allocations and error out so fuzzers make progress.
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn/common/Assert.cpp b/chromium/third_party/dawn/src/dawn/common/Assert.cpp
index 95d2efd30b7..2599e12417f 100644
--- a/chromium/third_party/dawn/src/dawn/common/Assert.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/Assert.cpp
@@ -13,10 +13,11 @@
// limitations under the License.
#include "dawn/common/Assert.h"
-#include "dawn/common/Log.h"
#include <cstdlib>
+#include "dawn/common/Log.h"
+
void HandleAssertionFailure(const char* file,
const char* function,
int line,
diff --git a/chromium/third_party/dawn/src/dawn/common/Assert.h b/chromium/third_party/dawn/src/dawn/common/Assert.h
index ee9eeb477ba..244cd41e951 100644
--- a/chromium/third_party/dawn/src/dawn/common/Assert.h
+++ b/chromium/third_party/dawn/src/dawn/common/Assert.h
@@ -31,33 +31,33 @@
// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
// points out that it looks like an owl face.
-#if defined(DAWN_COMPILER_MSVC)
-# define DAWN_ASSERT_LOOP_CONDITION (0, 0)
+#if DAWN_COMPILER_IS(MSVC)
+#define DAWN_ASSERT_LOOP_CONDITION (0, 0)
#else
-# define DAWN_ASSERT_LOOP_CONDITION (0)
+#define DAWN_ASSERT_LOOP_CONDITION (0)
#endif
// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
// expect of an assert and in release it tries to give hints to make the compiler generate better
// code.
#if defined(DAWN_ENABLE_ASSERTS)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
- do { \
- if (!(condition)) { \
- HandleAssertionFailure(file, func, line, #condition); \
- } \
- } while (DAWN_ASSERT_LOOP_CONDITION)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+ do { \
+ if (!(condition)) { \
+ HandleAssertionFailure(file, func, line, #condition); \
+ } \
+ } while (DAWN_ASSERT_LOOP_CONDITION)
+#else
+#if DAWN_COMPILER_IS(MSVC)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
+#elif DAWN_COMPILER_IS(CLANG) && defined(__builtin_assume)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
#else
-# if defined(DAWN_COMPILER_MSVC)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
-# elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
-# else
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
- do { \
- DAWN_UNUSED(sizeof(condition)); \
- } while (DAWN_ASSERT_LOOP_CONDITION)
-# endif
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+ do { \
+ DAWN_UNUSED(sizeof(condition)); \
+ } while (DAWN_ASSERT_LOOP_CONDITION)
+#endif
#endif
#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
@@ -68,8 +68,8 @@
} while (DAWN_ASSERT_LOOP_CONDITION)
#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
-# define ASSERT DAWN_ASSERT
-# define UNREACHABLE DAWN_UNREACHABLE
+#define ASSERT DAWN_ASSERT
+#define UNREACHABLE DAWN_UNREACHABLE
#endif
void HandleAssertionFailure(const char* file,
diff --git a/chromium/third_party/dawn/src/dawn/common/BUILD.gn b/chromium/third_party/dawn/src/dawn/common/BUILD.gn
index d0b008672a6..a9ecded0c81 100644
--- a/chromium/third_party/dawn/src/dawn/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/common/BUILD.gn
@@ -49,8 +49,13 @@ config("internal_config") {
}
if (use_fuzzing_engine) {
- # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
- defines += [ "DAWN_ABORT_ON_ASSERT" ]
+ defines += [
+ # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
+ "DAWN_ABORT_ON_ASSERT",
+
+ # Disable logging to make fuzzing more efficient.
+ "DAWN_DISABLE_LOGGING",
+ ]
}
if (dawn_enable_d3d12) {
@@ -75,6 +80,9 @@ config("internal_config") {
defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
}
+ if (dawn_use_wayland) {
+ defines += [ "DAWN_USE_WAYLAND" ]
+ }
if (dawn_use_x11) {
defines += [ "DAWN_USE_X11" ]
}
@@ -118,9 +126,12 @@ config("internal_config") {
"-Wredundant-move",
"-Wshadow-field",
"-Wstrict-prototypes",
+ "-Wsuggest-destructor-override",
+ "-Wsuggest-override",
"-Wtautological-unsigned-zero-compare",
"-Wunreachable-code-aggressive",
"-Wunused-but-set-variable",
+ "-Wunused-macros",
]
if (is_win) {
@@ -177,6 +188,18 @@ dawn_generator("dawn_version_gen") {
outputs = [ "src/dawn/common/Version_autogen.h" ]
}
+dawn_generator("dawn_gpu_info_gen") {
+ script = "${dawn_root}/generator/dawn_gpu_info_generator.py"
+ args = [
+ "--gpu-info-json",
+ rebase_path("${dawn_root}/gpu_info.json", root_build_dir),
+ ]
+ outputs = [
+ "src/dawn/common/GPUInfo_autogen.h",
+ "src/dawn/common/GPUInfo_autogen.cpp",
+ ]
+}
+
# This GN file is discovered by all Chromium builds, but common doesn't support
# all of Chromium's OSes so we explicitly make the target visible only on
# systems we know Dawn is able to compile on.
@@ -204,6 +227,7 @@ if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
"Math.h",
"NSRef.h",
"NonCopyable.h",
+ "Numeric.h",
"PlacementAllocated.h",
"Platform.h",
"Preprocessor.h",
@@ -232,8 +256,12 @@ if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
"vulkan_platform.h",
"xlib_with_undefs.h",
]
+ sources += get_target_outputs(":dawn_gpu_info_gen")
- public_deps = [ ":dawn_version_gen" ]
+ public_deps = [
+ ":dawn_gpu_info_gen",
+ ":dawn_version_gen",
+ ]
if (is_mac) {
sources += [ "SystemUtils_mac.mm" ]
diff --git a/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h b/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h
index 05f2916af82..a0112497148 100644
--- a/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h
+++ b/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h
@@ -15,14 +15,15 @@
#ifndef SRC_DAWN_COMMON_BITSETITERATOR_H_
#define SRC_DAWN_COMMON_BITSETITERATOR_H_
+#include <bitset>
+#include <limits>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Math.h"
#include "dawn/common/UnderlyingType.h"
-#include <bitset>
-#include <limits>
-
-// This is ANGLE's BitSetIterator class with a customizable return type
+// This is ANGLE's BitSetIterator class with a customizable return type.
+// Types have been updated to be more specific.
// TODO(crbug.com/dawn/306): it could be optimized, in particular when N <= 64
template <typename T>
@@ -53,32 +54,26 @@ class BitSetIterator final {
}
private:
- unsigned long getNextBit();
+ uint32_t getNextBit();
static constexpr size_t kBitsPerWord = sizeof(uint32_t) * 8;
std::bitset<N> mBits;
- unsigned long mCurrentBit;
- unsigned long mOffset;
+ uint32_t mCurrentBit;
+ uint32_t mOffset;
};
- Iterator begin() const {
- return Iterator(mBits);
- }
- Iterator end() const {
- return Iterator(std::bitset<N>(0));
- }
+ Iterator begin() const { return Iterator(mBits); }
+ Iterator end() const { return Iterator(std::bitset<N>(0)); }
private:
const std::bitset<N> mBits;
};
template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
-}
+BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
-}
+BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
template <size_t N, typename T>
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
@@ -92,7 +87,7 @@ BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
if (bits.any()) {
mCurrentBit = getNextBit();
} else {
- mOffset = static_cast<unsigned long>(roundUp(N, kBitsPerWord));
+ mOffset = static_cast<uint32_t>(roundUp(N, kBitsPerWord));
}
}
@@ -115,7 +110,7 @@ bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
}
template <size_t N, typename T>
-unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
+uint32_t BitSetIterator<N, T>::Iterator::getNextBit() {
static std::bitset<N> wordMask(std::numeric_limits<uint32_t>::max());
while (mOffset < N) {
diff --git a/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt
index 770d0840ba6..7e1c373146d 100644
--- a/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt
@@ -20,10 +20,19 @@ DawnGenerator(
RESULT_VARIABLE "DAWN_VERSION_AUTOGEN_SOURCES"
)
-add_library(dawn_common STATIC ${DAWN_DUMMY_FILE})
+DawnGenerator(
+ SCRIPT "${Dawn_SOURCE_DIR}/generator/dawn_gpu_info_generator.py"
+ PRINT_NAME "Dawn GPU info utilities"
+ ARGS "--gpu-info-json"
+ "${Dawn_SOURCE_DIR}/gpu_info.json"
+ RESULT_VARIABLE "DAWN_GPU_INFO_AUTOGEN_SOURCES"
+)
+
+add_library(dawn_common STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_common)
target_sources(dawn_common PRIVATE
${DAWN_VERSION_AUTOGEN_SOURCES}
+ ${DAWN_GPU_INFO_AUTOGEN_SOURCES}
"Alloc.h"
"Assert.cpp"
"Assert.h"
@@ -45,6 +54,7 @@ target_sources(dawn_common PRIVATE
"Math.h"
"NSRef.h"
"NonCopyable.h"
+ "Numeric.h"
"PlacementAllocated.h"
"Platform.h"
"Preprocessor.h"
diff --git a/chromium/third_party/dawn/src/dawn/common/Compiler.h b/chromium/third_party/dawn/src/dawn/common/Compiler.h
index db759333cd6..eb7e6239f15 100644
--- a/chromium/third_party/dawn/src/dawn/common/Compiler.h
+++ b/chromium/third_party/dawn/src/dawn/common/Compiler.h
@@ -16,7 +16,7 @@
#define SRC_DAWN_COMMON_COMPILER_H_
// Defines macros for compiler-specific functionality
-// - DAWN_COMPILER_[CLANG|GCC|MSVC]: Compiler detection
+// - DAWN_COMPILER_IS(CLANG|GCC|MSVC): Compiler detection
// - DAWN_BREAKPOINT(): Raises an exception and breaks in the debugger
// - DAWN_BUILTIN_UNREACHABLE(): Hints the compiler that a code path is unreachable
// - DAWN_(UN)?LIKELY(EXPR): Where available, hints the compiler that the expression will be true
@@ -29,51 +29,68 @@
// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
#if defined(__GNUC__) || defined(__clang__)
-# if defined(__clang__)
-# define DAWN_COMPILER_CLANG
-# else
-# define DAWN_COMPILER_GCC
-# endif
-
-# if defined(__i386__) || defined(__x86_64__)
-# define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
-# else
+#if defined(__clang__)
+#define DAWN_COMPILER_IS_CLANG 1
+#else
+#define DAWN_COMPILER_IS_GCC 1
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+#define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
+#else
// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
-# define DAWN_BREAKPOINT()
-# endif
+#define DAWN_BREAKPOINT()
+#endif
-# define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
-# define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
-# define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
+#define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
+#define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
-# if !defined(__has_cpp_attribute)
-# define __has_cpp_attribute(name) 0
-# endif
+#if !defined(__has_cpp_attribute)
+#define __has_cpp_attribute(name) 0
+#endif
-# define DAWN_DECLARE_UNUSED __attribute__((unused))
-# if defined(NDEBUG)
-# define DAWN_FORCE_INLINE inline __attribute__((always_inline))
-# endif
-# define DAWN_NOINLINE __attribute__((noinline))
+#define DAWN_DECLARE_UNUSED __attribute__((unused))
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+#endif
+#define DAWN_NOINLINE __attribute__((noinline))
// MSVC
#elif defined(_MSC_VER)
-# define DAWN_COMPILER_MSVC
+#define DAWN_COMPILER_IS_MSVC 1
extern void __cdecl __debugbreak(void);
-# define DAWN_BREAKPOINT() __debugbreak()
+#define DAWN_BREAKPOINT() __debugbreak()
-# define DAWN_BUILTIN_UNREACHABLE() __assume(false)
+#define DAWN_BUILTIN_UNREACHABLE() __assume(false)
-# define DAWN_DECLARE_UNUSED
-# if defined(NDEBUG)
-# define DAWN_FORCE_INLINE __forceinline
-# endif
-# define DAWN_NOINLINE __declspec(noinline)
+#define DAWN_DECLARE_UNUSED
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE __forceinline
+#endif
+#define DAWN_NOINLINE __declspec(noinline)
#else
-# error "Unsupported compiler"
+#error "Unsupported compiler"
+#endif
+
+// This section defines other compiler macros to 0 to avoid undefined macro usage error.
+#if !defined(DAWN_COMPILER_IS_CLANG)
+#define DAWN_COMPILER_IS_CLANG 0
#endif
+#if !defined(DAWN_COMPILER_IS_GCC)
+#define DAWN_COMPILER_IS_GCC 0
+#endif
+#if !defined(DAWN_COMPILER_IS_MSVC)
+#define DAWN_COMPILER_IS_MSVC 0
+#endif
+
+// Use #if DAWN_COMPILER_IS(XXX) for compiler specific code.
+// Do not use #ifdef or the naked macro DAWN_COMPILER_IS_XXX.
+// This can help avoid common mistakes like not including "Compiler.h" and falling into unwanted
+// code block as usage of undefined macro "function" will be blocked by the compiler.
+#define DAWN_COMPILER_IS(X) (1 == DAWN_COMPILER_IS_##X)
// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
#define DAWN_UNUSED(EXPR) (void)EXPR
@@ -82,16 +99,16 @@ extern void __cdecl __debugbreak(void);
// Add noop replacements for macros for features that aren't supported by the compiler.
#if !defined(DAWN_LIKELY)
-# define DAWN_LIKELY(X) X
+#define DAWN_LIKELY(X) X
#endif
#if !defined(DAWN_UNLIKELY)
-# define DAWN_UNLIKELY(X) X
+#define DAWN_UNLIKELY(X) X
#endif
#if !defined(DAWN_FORCE_INLINE)
-# define DAWN_FORCE_INLINE inline
+#define DAWN_FORCE_INLINE inline
#endif
#if !defined(DAWN_NOINLINE)
-# define DAWN_NOINLINE
+#define DAWN_NOINLINE
#endif
#endif // SRC_DAWN_COMMON_COMPILER_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h b/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h
index 87543ef8fe5..99c41ac6b4c 100644
--- a/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h
+++ b/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h
@@ -15,12 +15,12 @@
#ifndef SRC_DAWN_COMMON_CONCURRENTCACHE_H_
#define SRC_DAWN_COMMON_CONCURRENTCACHE_H_
-#include "dawn/common/NonCopyable.h"
-
#include <mutex>
#include <unordered_set>
#include <utility>
+#include "dawn/common/NonCopyable.h"
+
template <typename T>
class ConcurrentCache : public NonMovable {
public:
diff --git a/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h b/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h
index f77e8e08c51..3a4724e6ec7 100644
--- a/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h
+++ b/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h
@@ -15,19 +15,15 @@
#ifndef SRC_DAWN_COMMON_COREFOUNDATIONREF_H_
#define SRC_DAWN_COMMON_COREFOUNDATIONREF_H_
-#include "dawn/common/RefBase.h"
-
#include <CoreFoundation/CoreFoundation.h>
+#include "dawn/common/RefBase.h"
+
template <typename T>
struct CoreFoundationRefTraits {
static constexpr T kNullValue = nullptr;
- static void Reference(T value) {
- CFRetain(value);
- }
- static void Release(T value) {
- CFRelease(value);
- }
+ static void Reference(T value) { CFRetain(value); }
+ static void Release(T value) { CFRelease(value); }
};
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp b/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp
index ab4f2d795dd..479ca653c65 100644
--- a/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp
@@ -14,17 +14,19 @@
#include "dawn/common/DynamicLib.h"
+#include <utility>
+
#include "dawn/common/Platform.h"
-#if DAWN_PLATFORM_WINDOWS
-# include "dawn/common/windows_with_undefs.h"
-# if DAWN_PLATFORM_WINUWP
-# include "dawn/common/WindowsUtils.h"
-# endif
-#elif DAWN_PLATFORM_POSIX
-# include <dlfcn.h>
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include "dawn/common/windows_with_undefs.h"
+#if DAWN_PLATFORM_IS(WINUWP)
+#include "dawn/common/WindowsUtils.h"
+#endif
+#elif DAWN_PLATFORM_IS(POSIX)
+#include <dlfcn.h>
#else
-# error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
#endif
DynamicLib::~DynamicLib() {
@@ -45,23 +47,23 @@ bool DynamicLib::Valid() const {
}
bool DynamicLib::Open(const std::string& filename, std::string* error) {
-#if DAWN_PLATFORM_WINDOWS
-# if DAWN_PLATFORM_WINUWP
+#if DAWN_PLATFORM_IS(WINDOWS)
+#if DAWN_PLATFORM_IS(WINUWP)
mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
-# else
+#else
mHandle = LoadLibraryA(filename.c_str());
-# endif
+#endif
if (mHandle == nullptr && error != nullptr) {
*error = "Windows Error: " + std::to_string(GetLastError());
}
-#elif DAWN_PLATFORM_POSIX
+#elif DAWN_PLATFORM_IS(POSIX)
mHandle = dlopen(filename.c_str(), RTLD_NOW);
if (mHandle == nullptr && error != nullptr) {
*error = dlerror();
}
#else
-# error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
#endif
return mHandle != nullptr;
@@ -72,12 +74,12 @@ void DynamicLib::Close() {
return;
}
-#if DAWN_PLATFORM_WINDOWS
+#if DAWN_PLATFORM_IS(WINDOWS)
FreeLibrary(static_cast<HMODULE>(mHandle));
-#elif DAWN_PLATFORM_POSIX
+#elif DAWN_PLATFORM_IS(POSIX)
dlclose(mHandle);
#else
-# error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
#endif
mHandle = nullptr;
@@ -86,20 +88,20 @@ void DynamicLib::Close() {
void* DynamicLib::GetProc(const std::string& procName, std::string* error) const {
void* proc = nullptr;
-#if DAWN_PLATFORM_WINDOWS
+#if DAWN_PLATFORM_IS(WINDOWS)
proc = reinterpret_cast<void*>(GetProcAddress(static_cast<HMODULE>(mHandle), procName.c_str()));
if (proc == nullptr && error != nullptr) {
*error = "Windows Error: " + std::to_string(GetLastError());
}
-#elif DAWN_PLATFORM_POSIX
+#elif DAWN_PLATFORM_IS(POSIX)
proc = reinterpret_cast<void*>(dlsym(mHandle, procName.c_str()));
if (proc == nullptr && error != nullptr) {
*error = dlerror();
}
#else
-# error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
#endif
return proc;
diff --git a/chromium/third_party/dawn/src/dawn/common/DynamicLib.h b/chromium/third_party/dawn/src/dawn/common/DynamicLib.h
index 00f3960194f..a5980f5fc27 100644
--- a/chromium/third_party/dawn/src/dawn/common/DynamicLib.h
+++ b/chromium/third_party/dawn/src/dawn/common/DynamicLib.h
@@ -15,11 +15,11 @@
#ifndef SRC_DAWN_COMMON_DYNAMICLIB_H_
#define SRC_DAWN_COMMON_DYNAMICLIB_H_
-#include "dawn/common/Assert.h"
-
#include <string>
#include <type_traits>
+#include "dawn/common/Assert.h"
+
class DynamicLib {
public:
DynamicLib() = default;
diff --git a/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp b/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp
index ddd8459703f..8a6f75d6b8b 100644
--- a/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp
@@ -14,95 +14,48 @@
#include "dawn/common/GPUInfo.h"
-#include "dawn/common/Assert.h"
-
#include <algorithm>
-#include <array>
-
-namespace gpu_info {
- namespace {
- // Intel
- // Referenced from the following Mesa source code:
- // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
- // gen9
- const std::array<uint32_t, 25> Skylake = {
- {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
- 0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
- 0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
- // gen9p5
- const std::array<uint32_t, 20> Kabylake = {
- {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
- 0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
- const std::array<uint32_t, 17> Coffeelake = {
- {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
- 0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
- const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
- const std::array<uint32_t, 21> Cometlake = {
- {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
- 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
-
- // According to Intel graphics driver version schema, build number is generated from the
- // last two fields.
- // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
- // more details.
- uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
- return driverVersion[2] * 10000 + driverVersion[3];
- }
-
- } // anonymous namespace
-
- bool IsAMD(PCIVendorID vendorId) {
- return vendorId == kVendorID_AMD;
- }
- bool IsARM(PCIVendorID vendorId) {
- return vendorId == kVendorID_ARM;
- }
- bool IsImgTec(PCIVendorID vendorId) {
- return vendorId == kVendorID_ImgTec;
- }
- bool IsIntel(PCIVendorID vendorId) {
- return vendorId == kVendorID_Intel;
- }
- bool IsMesa(PCIVendorID vendorId) {
- return vendorId == kVendorID_Mesa;
- }
- bool IsNvidia(PCIVendorID vendorId) {
- return vendorId == kVendorID_Nvidia;
- }
- bool IsQualcomm(PCIVendorID vendorId) {
- return vendorId == kVendorID_Qualcomm;
- }
- bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
- return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
- }
- bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
- return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
- }
- int CompareD3DDriverVersion(PCIVendorID vendorId,
- const D3DDriverVersion& version1,
- const D3DDriverVersion& version2) {
- if (IsIntel(vendorId)) {
- uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
- uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
- return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
- }
+#include "dawn/common/Assert.h"
- // TODO(crbug.com/dawn/823): support other GPU vendors
- UNREACHABLE();
- return 0;
- }
+namespace gpu_info {
+namespace {
+// Intel
+// Referenced from the following Mesa source code:
+// https://github.com/mesa3d/mesa/blob/main/include/pci_ids/iris_pci_ids.h
+// gen9
+const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
+ 0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
+ 0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
+ 0x1932, 0x193A, 0x193B, 0x193D}};
+
+// According to Intel graphics driver version schema, build number is generated from the
+// last two fields.
+// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
+// more details.
+uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
+ return driverVersion[2] * 10000 + driverVersion[3];
+}
+
+} // anonymous namespace
+
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+ const D3DDriverVersion& version1,
+ const D3DDriverVersion& version2) {
+ if (IsIntel(vendorId)) {
+ uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
+ uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
+ return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
+ }
+
+ // TODO(crbug.com/dawn/823): support other GPU vendors
+ UNREACHABLE();
+ return 0;
+}
+
+// Intel GPUs
+bool IsSkylake(PCIDeviceID deviceId) {
+ return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+}
- // Intel GPUs
- bool IsSkylake(PCIDeviceID deviceId) {
- return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
- }
- bool IsKabylake(PCIDeviceID deviceId) {
- return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
- }
- bool IsCoffeelake(PCIDeviceID deviceId) {
- return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
- (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
- (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
- }
} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/dawn/common/GPUInfo.h b/chromium/third_party/dawn/src/dawn/common/GPUInfo.h
index 9a036771859..a96b9072514 100644
--- a/chromium/third_party/dawn/src/dawn/common/GPUInfo.h
+++ b/chromium/third_party/dawn/src/dawn/common/GPUInfo.h
@@ -15,52 +15,25 @@
#ifndef SRC_DAWN_COMMON_GPUINFO_H_
#define SRC_DAWN_COMMON_GPUINFO_H_
-#include <array>
-#include <cstdint>
+#include "dawn/common/GPUInfo_autogen.h"
-using PCIVendorID = uint32_t;
-using PCIDeviceID = uint32_t;
+#include <array>
namespace gpu_info {
- static constexpr PCIVendorID kVendorID_AMD = 0x1002;
- static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
- static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
- static constexpr PCIVendorID kVendorID_Intel = 0x8086;
- static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
- static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
- static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
- static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
- static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
-
- static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
- static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
-
- bool IsAMD(PCIVendorID vendorId);
- bool IsARM(PCIVendorID vendorId);
- bool IsImgTec(PCIVendorID vendorId);
- bool IsIntel(PCIVendorID vendorId);
- bool IsMesa(PCIVendorID vendorId);
- bool IsNvidia(PCIVendorID vendorId);
- bool IsQualcomm(PCIVendorID vendorId);
- bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
- bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
-
- using D3DDriverVersion = std::array<uint16_t, 4>;
+using D3DDriverVersion = std::array<uint16_t, 4>;
- // Do comparison between two driver versions. Currently we only support the comparison between
- // Intel D3D driver versions.
- // - Return -1 if build number of version1 is smaller
- // - Return 1 if build number of version1 is bigger
- // - Return 0 if version1 and version2 represent same driver version
- int CompareD3DDriverVersion(PCIVendorID vendorId,
- const D3DDriverVersion& version1,
- const D3DDriverVersion& version2);
+// Do comparison between two driver versions. Currently we only support the comparison between
+// Intel D3D driver versions.
+// - Return -1 if build number of version1 is smaller
+// - Return 1 if build number of version1 is bigger
+// - Return 0 if version1 and version2 represent same driver version
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+ const D3DDriverVersion& version1,
+ const D3DDriverVersion& version2);
- // Intel architectures
- bool IsSkylake(PCIDeviceID deviceId);
- bool IsKabylake(PCIDeviceID deviceId);
- bool IsCoffeelake(PCIDeviceID deviceId);
+// Intel architectures
+bool IsSkylake(PCIDeviceID deviceId);
} // namespace gpu_info
#endif // SRC_DAWN_COMMON_GPUINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/HashUtils.h b/chromium/third_party/dawn/src/dawn/common/HashUtils.h
index 3aaa960fd37..e8d17821f46 100644
--- a/chromium/third_party/dawn/src/dawn/common/HashUtils.h
+++ b/chromium/third_party/dawn/src/dawn/common/HashUtils.h
@@ -15,13 +15,13 @@
#ifndef SRC_DAWN_COMMON_HASHUTILS_H_
#define SRC_DAWN_COMMON_HASHUTILS_H_
+#include <bitset>
+#include <functional>
+
#include "dawn/common/Platform.h"
#include "dawn/common/TypedInteger.h"
#include "dawn/common/ityp_bitset.h"
-#include <bitset>
-#include <functional>
-
// Wrapper around std::hash to make it a templated function instead of a functor. It is marginally
// nicer, and avoids adding to the std namespace to add hashing of other types.
template <typename T>
@@ -45,12 +45,12 @@ size_t Hash(const TypedInteger<Tag, T>& value) {
// return hash;
template <typename T>
void HashCombine(size_t* hash, const T& value) {
-#if defined(DAWN_PLATFORM_64_BIT)
+#if DAWN_PLATFORM_IS(64_BIT)
const size_t offset = 0x9e3779b97f4a7c16;
-#elif defined(DAWN_PLATFORM_32_BIT)
+#elif DAWN_PLATFORM_IS(32_BIT)
const size_t offset = 0x9e3779b9;
#else
-# error "Unsupported platform"
+#error "Unsupported platform"
#endif
*hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
}
@@ -75,7 +75,7 @@ void HashCombine(size_t* hash, const T& value, const Args&... args) {
#if defined(_GLIBCXX_DEBUG)
template <size_t N>
size_t Hash(const std::bitset<N>& value) {
- constexpr size_t kWindowSize = sizeof(unsigned long long);
+ constexpr size_t kWindowSize = sizeof(uint64_t);
std::bitset<N> bits = value;
size_t hash = 0;
@@ -89,13 +89,13 @@ size_t Hash(const std::bitset<N>& value) {
#endif
namespace std {
- template <typename Index, size_t N>
- struct hash<ityp::bitset<Index, N>> {
- public:
- size_t operator()(const ityp::bitset<Index, N>& value) const {
- return Hash(static_cast<const std::bitset<N>&>(value));
- }
- };
+template <typename Index, size_t N>
+struct hash<ityp::bitset<Index, N>> {
+ public:
+ size_t operator()(const ityp::bitset<Index, N>& value) const {
+ return Hash(static_cast<const std::bitset<N>&>(value));
+ }
+};
} // namespace std
#endif // SRC_DAWN_COMMON_HASHUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/IOKitRef.h b/chromium/third_party/dawn/src/dawn/common/IOKitRef.h
index 43119a46190..33367b1e4c8 100644
--- a/chromium/third_party/dawn/src/dawn/common/IOKitRef.h
+++ b/chromium/third_party/dawn/src/dawn/common/IOKitRef.h
@@ -15,19 +15,15 @@
#ifndef SRC_DAWN_COMMON_IOKITREF_H_
#define SRC_DAWN_COMMON_IOKITREF_H_
-#include "dawn/common/RefBase.h"
-
#include <IOKit/IOKitLib.h>
+#include "dawn/common/RefBase.h"
+
template <typename T>
struct IOKitRefTraits {
static constexpr T kNullValue = IO_OBJECT_NULL;
- static void Reference(T value) {
- IOObjectRetain(value);
- }
- static void Release(T value) {
- IOObjectRelease(value);
- }
+ static void Reference(T value) { IOObjectRetain(value); }
+ static void Release(T value) { IOObjectRelease(value); }
};
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn/common/LinkedList.h b/chromium/third_party/dawn/src/dawn/common/LinkedList.h
index 5227041ca27..b9503eee182 100644
--- a/chromium/third_party/dawn/src/dawn/common/LinkedList.h
+++ b/chromium/third_party/dawn/src/dawn/common/LinkedList.h
@@ -99,10 +99,8 @@ class LinkedList;
template <typename T>
class LinkNode {
public:
- LinkNode() : previous_(nullptr), next_(nullptr) {
- }
- LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
- }
+ LinkNode() : previous_(nullptr), next_(nullptr) {}
+ LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
LinkNode(LinkNode<T>&& rhs) {
next_ = rhs.next_;
@@ -154,22 +152,14 @@ class LinkNode {
return true;
}
- LinkNode<T>* previous() const {
- return previous_;
- }
+ LinkNode<T>* previous() const { return previous_; }
- LinkNode<T>* next() const {
- return next_;
- }
+ LinkNode<T>* next() const { return next_; }
// Cast from the node-type to the value type.
- const T* value() const {
- return static_cast<const T*>(this);
- }
+ const T* value() const { return static_cast<const T*>(this); }
- T* value() {
- return static_cast<T*>(this);
- }
+ T* value() { return static_cast<T*>(this); }
private:
friend class LinkedList<T>;
@@ -183,8 +173,7 @@ class LinkedList {
// The "root" node is self-referential, and forms the basis of a circular
// list (root_.next() will point back to the start of the list,
// and root_->previous() wraps around to the end of the list).
- LinkedList() : root_(&root_, &root_) {
- }
+ LinkedList() : root_(&root_, &root_) {}
~LinkedList() {
// If any LinkNodes still exist in the LinkedList, there will be outstanding references to
@@ -194,9 +183,7 @@ class LinkedList {
}
// Appends |e| to the end of the linked list.
- void Append(LinkNode<T>* e) {
- e->InsertBefore(&root_);
- }
+ void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
// Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
void MoveInto(LinkedList<T>* l) {
@@ -212,21 +199,13 @@ class LinkedList {
root_.previous_ = &root_;
}
- LinkNode<T>* head() const {
- return root_.next();
- }
+ LinkNode<T>* head() const { return root_.next(); }
- LinkNode<T>* tail() const {
- return root_.previous();
- }
+ LinkNode<T>* tail() const { return root_.previous(); }
- const LinkNode<T>* end() const {
- return &root_;
- }
+ const LinkNode<T>* end() const { return &root_; }
- bool empty() const {
- return head() == end();
- }
+ bool empty() const { return head() == end(); }
private:
LinkNode<T> root_;
@@ -235,8 +214,7 @@ class LinkedList {
template <typename T>
class LinkedListIterator {
public:
- explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
- }
+ explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
// We keep an early reference to the next node in the list so that even if the current element
// is modified or removed from the list, we have a valid next node.
@@ -246,13 +224,9 @@ class LinkedListIterator {
return *this;
}
- bool operator!=(const LinkedListIterator<T>& other) const {
- return current_ != other.current_;
- }
+ bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
- LinkNode<T>* operator*() const {
- return current_;
- }
+ LinkNode<T>* operator*() const { return current_; }
private:
LinkNode<T>* current_;
diff --git a/chromium/third_party/dawn/src/dawn/common/Log.cpp b/chromium/third_party/dawn/src/dawn/common/Log.cpp
index b85094b76fc..5edc40e1319 100644
--- a/chromium/third_party/dawn/src/dawn/common/Log.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/Log.cpp
@@ -14,103 +14,112 @@
#include "dawn/common/Log.h"
+#include <cstdio>
+#include <string>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Platform.h"
-#include <cstdio>
-
-#if defined(DAWN_PLATFORM_ANDROID)
-# include <android/log.h>
+#if DAWN_PLATFORM_IS(ANDROID)
+#include <android/log.h>
#endif
namespace dawn {
- namespace {
-
- const char* SeverityName(LogSeverity severity) {
- switch (severity) {
- case LogSeverity::Debug:
- return "Debug";
- case LogSeverity::Info:
- return "Info";
- case LogSeverity::Warning:
- return "Warning";
- case LogSeverity::Error:
- return "Error";
- default:
- UNREACHABLE();
- return "";
- }
- }
-
-#if defined(DAWN_PLATFORM_ANDROID)
- android_LogPriority AndroidLogPriority(LogSeverity severity) {
- switch (severity) {
- case LogSeverity::Debug:
- return ANDROID_LOG_INFO;
- case LogSeverity::Info:
- return ANDROID_LOG_INFO;
- case LogSeverity::Warning:
- return ANDROID_LOG_WARN;
- case LogSeverity::Error:
- return ANDROID_LOG_ERROR;
- default:
- UNREACHABLE();
- return ANDROID_LOG_ERROR;
- }
- }
-#endif // defined(DAWN_PLATFORM_ANDROID)
-
- } // anonymous namespace
-
- LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+namespace {
+
+const char* SeverityName(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return "Debug";
+ case LogSeverity::Info:
+ return "Info";
+ case LogSeverity::Warning:
+ return "Warning";
+ case LogSeverity::Error:
+ return "Error";
+ default:
+ UNREACHABLE();
+ return "";
}
-
- LogMessage::~LogMessage() {
- std::string fullMessage = mStream.str();
-
- // If this message has been moved, its stream is empty.
- if (fullMessage.empty()) {
- return;
- }
-
- const char* severityName = SeverityName(mSeverity);
-
-#if defined(DAWN_PLATFORM_ANDROID)
- android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
- __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
-#else // defined(DAWN_PLATFORM_ANDROID)
- FILE* outputStream = stdout;
- if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
- outputStream = stderr;
- }
-
- // Note: we use fprintf because <iostream> includes static initializers.
- fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
- fflush(outputStream);
-#endif // defined(DAWN_PLATFORM_ANDROID)
+}
+
+#if DAWN_PLATFORM_IS(ANDROID)
+android_LogPriority AndroidLogPriority(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Info:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Warning:
+ return ANDROID_LOG_WARN;
+ case LogSeverity::Error:
+ return ANDROID_LOG_ERROR;
+ default:
+ UNREACHABLE();
+ return ANDROID_LOG_ERROR;
}
+}
+#endif // DAWN_PLATFORM_IS(ANDROID)
- LogMessage DebugLog() {
- return {LogSeverity::Debug};
- }
+} // anonymous namespace
- LogMessage InfoLog() {
- return {LogSeverity::Info};
- }
+LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
- LogMessage WarningLog() {
- return {LogSeverity::Warning};
- }
+LogMessage::LogMessage(LogMessage&& other) = default;
+
+LogMessage& LogMessage::operator=(LogMessage&& other) = default;
+
+LogMessage::~LogMessage() {
+#if defined(DAWN_DISABLE_LOGGING)
+ // Don't print logs to make fuzzing more efficient. Implemented as
+ // an early return to avoid warnings about unused member variables.
+ return;
+#endif
+ std::string fullMessage = mStream.str();
- LogMessage ErrorLog() {
- return {LogSeverity::Error};
+ // If this message has been moved, its stream is empty.
+ if (fullMessage.empty()) {
+ return;
}
- LogMessage DebugLog(const char* file, const char* function, int line) {
- LogMessage message = DebugLog();
- message << file << ":" << line << "(" << function << ")";
- return message;
+ const char* severityName = SeverityName(mSeverity);
+
+#if DAWN_PLATFORM_IS(ANDROID)
+ android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+ __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+#else // DAWN_PLATFORM_IS(ANDROID)
+ FILE* outputStream = stdout;
+ if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+ outputStream = stderr;
}
+ // Note: we use fprintf because <iostream> includes static initializers.
+ fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+ fflush(outputStream);
+#endif // DAWN_PLATFORM_IS(ANDROID)
+}
+
+LogMessage DebugLog() {
+ return LogMessage(LogSeverity::Debug);
+}
+
+LogMessage InfoLog() {
+ return LogMessage(LogSeverity::Info);
+}
+
+LogMessage WarningLog() {
+ return LogMessage(LogSeverity::Warning);
+}
+
+LogMessage ErrorLog() {
+ return LogMessage(LogSeverity::Error);
+}
+
+LogMessage DebugLog(const char* file, const char* function, int line) {
+ LogMessage message = DebugLog();
+ message << file << ":" << line << "(" << function << ")";
+ return message;
+}
+
} // namespace dawn
diff --git a/chromium/third_party/dawn/src/dawn/common/Log.h b/chromium/third_party/dawn/src/dawn/common/Log.h
index d2b4fdbcc86..2f56ce53ae9 100644
--- a/chromium/third_party/dawn/src/dawn/common/Log.h
+++ b/chromium/third_party/dawn/src/dawn/common/Log.h
@@ -47,47 +47,47 @@
namespace dawn {
- // Log levels mostly used to signal intent where the log message is produced and used to route
- // the message to the correct output.
- enum class LogSeverity {
- Debug,
- Info,
- Warning,
- Error,
- };
+// Log levels mostly used to signal intent where the log message is produced and used to route
+// the message to the correct output.
+enum class LogSeverity {
+ Debug,
+ Info,
+ Warning,
+ Error,
+};
- // Essentially an ostringstream that will print itself in its destructor.
- class LogMessage {
- public:
- LogMessage(LogSeverity severity);
- ~LogMessage();
+// Essentially an ostringstream that will print itself in its destructor.
+class LogMessage {
+ public:
+ explicit LogMessage(LogSeverity severity);
+ ~LogMessage();
- LogMessage(LogMessage&& other) = default;
- LogMessage& operator=(LogMessage&& other) = default;
+ LogMessage(LogMessage&& other);
+ LogMessage& operator=(LogMessage&& other);
- template <typename T>
- LogMessage& operator<<(T&& value) {
- mStream << value;
- return *this;
- }
+ template <typename T>
+ LogMessage& operator<<(T&& value) {
+ mStream << value;
+ return *this;
+ }
- private:
- LogMessage(const LogMessage& other) = delete;
- LogMessage& operator=(const LogMessage& other) = delete;
+ private:
+ LogMessage(const LogMessage& other) = delete;
+ LogMessage& operator=(const LogMessage& other) = delete;
- LogSeverity mSeverity;
- std::ostringstream mStream;
- };
+ LogSeverity mSeverity;
+ std::ostringstream mStream;
+};
- // Short-hands to create a LogMessage with the respective severity.
- LogMessage DebugLog();
- LogMessage InfoLog();
- LogMessage WarningLog();
- LogMessage ErrorLog();
+// Short-hands to create a LogMessage with the respective severity.
+LogMessage DebugLog();
+LogMessage InfoLog();
+LogMessage WarningLog();
+LogMessage ErrorLog();
- // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
- // information
- LogMessage DebugLog(const char* file, const char* function, int line);
+// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
+// information
+LogMessage DebugLog(const char* file, const char* function, int line);
#define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
} // namespace dawn
diff --git a/chromium/third_party/dawn/src/dawn/common/Math.cpp b/chromium/third_party/dawn/src/dawn/common/Math.cpp
index bd936a8f71c..a3794e25021 100644
--- a/chromium/third_party/dawn/src/dawn/common/Math.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/Math.cpp
@@ -14,20 +14,21 @@
#include "dawn/common/Math.h"
-#include "dawn/common/Assert.h"
-#include "dawn/common/Platform.h"
-
#include <algorithm>
#include <cmath>
#include <limits>
-#if defined(DAWN_COMPILER_MSVC)
-# include <intrin.h>
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+
+#if DAWN_COMPILER_IS(MSVC)
+#include <intrin.h>
#endif
uint32_t ScanForward(uint32_t bits) {
ASSERT(bits != 0);
-#if defined(DAWN_COMPILER_MSVC)
+#if DAWN_COMPILER_IS(MSVC)
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanForward(&firstBitIndex, bits);
ASSERT(ret != 0);
@@ -39,7 +40,8 @@ uint32_t ScanForward(uint32_t bits) {
uint32_t Log2(uint32_t value) {
ASSERT(value != 0);
-#if defined(DAWN_COMPILER_MSVC)
+#if DAWN_COMPILER_IS(MSVC)
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse(&firstBitIndex, value);
ASSERT(ret != 0);
@@ -51,13 +53,15 @@ uint32_t Log2(uint32_t value) {
uint32_t Log2(uint64_t value) {
ASSERT(value != 0);
-#if defined(DAWN_COMPILER_MSVC)
-# if defined(DAWN_PLATFORM_64_BIT)
+#if DAWN_COMPILER_IS(MSVC)
+#if DAWN_PLATFORM_IS(64_BIT)
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
ASSERT(ret != 0);
return firstBitIndex;
-# else // defined(DAWN_PLATFORM_64_BIT)
+#else // DAWN_PLATFORM_IS(64_BIT)
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
if (_BitScanReverse(&firstBitIndex, value >> 32)) {
return firstBitIndex + 32;
@@ -65,10 +69,10 @@ uint32_t Log2(uint64_t value) {
unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
ASSERT(ret != 0);
return firstBitIndex;
-# endif // defined(DAWN_PLATFORM_64_BIT)
-#else // defined(DAWN_COMPILER_MSVC)
+#endif // DAWN_PLATFORM_IS(64_BIT)
+#else // DAWN_COMPILER_IS(MSVC)
return 63 - static_cast<uint32_t>(__builtin_clzll(value));
-#endif // defined(DAWN_COMPILER_MSVC)
+#endif // DAWN_COMPILER_IS(MSVC)
}
uint64_t NextPowerOfTwo(uint64_t n) {
diff --git a/chromium/third_party/dawn/src/dawn/common/Math.h b/chromium/third_party/dawn/src/dawn/common/Math.h
index 59384430fff..9984c4b0b00 100644
--- a/chromium/third_party/dawn/src/dawn/common/Math.h
+++ b/chromium/third_party/dawn/src/dawn/common/Math.h
@@ -15,8 +15,6 @@
#ifndef SRC_DAWN_COMMON_MATH_H_
#define SRC_DAWN_COMMON_MATH_H_
-#include "dawn/common/Assert.h"
-
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -24,6 +22,8 @@
#include <limits>
#include <type_traits>
+#include "dawn/common/Assert.h"
+
// The following are not valid for 0
uint32_t ScanForward(uint32_t bits);
uint32_t Log2(uint32_t value);
diff --git a/chromium/third_party/dawn/src/dawn/common/NSRef.h b/chromium/third_party/dawn/src/dawn/common/NSRef.h
index ddec95e3f8e..4afb5e03905 100644
--- a/chromium/third_party/dawn/src/dawn/common/NSRef.h
+++ b/chromium/third_party/dawn/src/dawn/common/NSRef.h
@@ -20,7 +20,7 @@
#import <Foundation/NSObject.h>
#if !defined(__OBJC__)
-# error "NSRef can only be used in Objective C/C++ code."
+#error "NSRef can only be used in Objective C/C++ code."
#endif
// This file contains smart pointers that automatically reference and release Objective C objects
@@ -67,12 +67,8 @@
template <typename T>
struct NSRefTraits {
static constexpr T kNullValue = nullptr;
- static void Reference(T value) {
- [value retain];
- }
- static void Release(T value) {
- [value release];
- }
+ static void Reference(T value) { [value retain]; }
+ static void Release(T value) { [value release]; }
};
template <typename T>
@@ -80,13 +76,9 @@ class NSRef : public RefBase<T*, NSRefTraits<T*>> {
public:
using RefBase<T*, NSRefTraits<T*>>::RefBase;
- const T* operator*() const {
- return this->Get();
- }
+ const T* operator*() const { return this->Get(); }
- T* operator*() {
- return this->Get();
- }
+ T* operator*() { return this->Get(); }
};
template <typename T>
@@ -104,13 +96,9 @@ class NSPRef : public RefBase<T, NSRefTraits<T>> {
public:
using RefBase<T, NSRefTraits<T>>::RefBase;
- const T operator*() const {
- return this->Get();
- }
+ const T operator*() const { return this->Get(); }
- T operator*() {
- return this->Get();
- }
+ T operator*() { return this->Get(); }
};
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn/common/Numeric.h b/chromium/third_party/dawn/src/dawn/common/Numeric.h
new file mode 100644
index 00000000000..50f6d40ba79
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Numeric.h
@@ -0,0 +1,53 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_COMMON_NUMERIC_H_
+#define SRC_DAWN_COMMON_NUMERIC_H_
+
+#include <limits>
+#include <type_traits>
+
+#include "dawn/common/Assert.h"
+
+namespace detail {
+
+template <typename T>
+inline constexpr uint32_t u32_sizeof() {
+ static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
+ return uint32_t(sizeof(T));
+}
+
+template <typename T>
+inline constexpr uint32_t u32_alignof() {
+ static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
+ return uint32_t(alignof(T));
+}
+
+} // namespace detail
+
+template <typename T>
+inline constexpr uint32_t u32_sizeof = detail::u32_sizeof<T>();
+
+template <typename T>
+inline constexpr uint32_t u32_alignof = detail::u32_alignof<T>();
+
+// Only defined for unsigned integers because that is all that is
+// needed at the time of writing.
+template <typename Dst, typename Src, typename = std::enable_if_t<std::is_unsigned_v<Src>>>
+inline Dst checked_cast(const Src& value) {
+ ASSERT(value <= std::numeric_limits<Dst>::max());
+ return static_cast<Dst>(value);
+}
+
+#endif // SRC_DAWN_COMMON_NUMERIC_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/Platform.h b/chromium/third_party/dawn/src/dawn/common/Platform.h
index 5e4f9d7ff49..2a8643489f7 100644
--- a/chromium/third_party/dawn/src/dawn/common/Platform.h
+++ b/chromium/third_party/dawn/src/dawn/common/Platform.h
@@ -16,67 +16,119 @@
#define SRC_DAWN_COMMON_PLATFORM_H_
#if defined(_WIN32) || defined(_WIN64)
-# include <winapifamily.h>
-# define DAWN_PLATFORM_WINDOWS 1
-# if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
-# define DAWN_PLATFORM_WIN32 1
-# elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
-# define DAWN_PLATFORM_WINUWP 1
-# else
-# error "Unsupported Windows platform."
-# endif
+#include <winapifamily.h>
+#define DAWN_PLATFORM_IS_WINDOWS 1
+#if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+#define DAWN_PLATFORM_IS_WIN32 1
+#elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
+#define DAWN_PLATFORM_IS_WINUWP 1
+#else
+#error "Unsupported Windows platform."
+#endif
#elif defined(__linux__)
-# define DAWN_PLATFORM_LINUX 1
-# define DAWN_PLATFORM_POSIX 1
-# if defined(__ANDROID__)
-# define DAWN_PLATFORM_ANDROID 1
-# endif
+#define DAWN_PLATFORM_IS_LINUX 1
+#define DAWN_PLATFORM_IS_POSIX 1
+#if defined(__ANDROID__)
+#define DAWN_PLATFORM_IS_ANDROID 1
+#endif
#elif defined(__APPLE__)
-# define DAWN_PLATFORM_APPLE 1
-# define DAWN_PLATFORM_POSIX 1
-# include <TargetConditionals.h>
-# if TARGET_OS_IPHONE
-# define DAWN_PLATFORM_IOS
-# elif TARGET_OS_MAC
-# define DAWN_PLATFORM_MACOS
-# else
-# error "Unsupported Apple platform."
-# endif
+#define DAWN_PLATFORM_IS_APPLE 1
+#define DAWN_PLATFORM_IS_POSIX 1
+#include <TargetConditionals.h>
+#if TARGET_OS_IPHONE
+#define DAWN_PLATFORM_IS_IOS 1
+#elif TARGET_OS_MAC
+#define DAWN_PLATFORM_IS_MACOS 1
+#else
+#error "Unsupported Apple platform."
+#endif
#elif defined(__Fuchsia__)
-# define DAWN_PLATFORM_FUCHSIA 1
-# define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_IS_FUCHSIA 1
+#define DAWN_PLATFORM_IS_POSIX 1
#elif defined(__EMSCRIPTEN__)
-# define DAWN_PLATFORM_EMSCRIPTEN 1
-# define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_IS_EMSCRIPTEN 1
+#define DAWN_PLATFORM_IS_POSIX 1
#else
-# error "Unsupported platform."
+#error "Unsupported platform."
#endif
// Distinguish mips32.
#if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
-# define __mips32__
+#define __mips32__
#endif
// Distinguish mips64.
#if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
-# define __mips64__
+#define __mips64__
#endif
#if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
defined(__s390x__) || defined(__PPC64__)
-# define DAWN_PLATFORM_64_BIT 1
+#define DAWN_PLATFORM_IS_64_BIT 1
static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
#elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
defined(__s390__) || defined(__EMSCRIPTEN__)
-# define DAWN_PLATFORM_32_BIT 1
+#define DAWN_PLATFORM_IS_32_BIT 1
static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
#else
-# error "Unsupported platform"
+#error "Unsupported platform"
+#endif
+
+// This section define other platform macros to 0 to avoid undefined macro usage error.
+#if !defined(DAWN_PLATFORM_IS_WINDOWS)
+#define DAWN_PLATFORM_IS_WINDOWS 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_WIN32)
+#define DAWN_PLATFORM_IS_WIN32 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_WINUWP)
+#define DAWN_PLATFORM_IS_WINUWP 0
+#endif
+
+#if !defined(DAWN_PLATFORM_IS_POSIX)
+#define DAWN_PLATFORM_IS_POSIX 0
+#endif
+
+#if !defined(DAWN_PLATFORM_IS_LINUX)
+#define DAWN_PLATFORM_IS_LINUX 0
#endif
+#if !defined(DAWN_PLATFORM_IS_ANDROID)
+#define DAWN_PLATFORM_IS_ANDROID 0
+#endif
+
+#if !defined(DAWN_PLATFORM_IS_APPLE)
+#define DAWN_PLATFORM_IS_APPLE 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_IOS)
+#define DAWN_PLATFORM_IS_IOS 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_MACOS)
+#define DAWN_PLATFORM_IS_MACOS 0
+#endif
+
+#if !defined(DAWN_PLATFORM_IS_FUCHSIA)
+#define DAWN_PLATFORM_IS_FUCHSIA 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_EMSCRIPTEN)
+#define DAWN_PLATFORM_IS_EMSCRIPTEN 0
+#endif
+
+#if !defined(DAWN_PLATFORM_IS_64_BIT)
+#define DAWN_PLATFORM_IS_64_BIT 0
+#endif
+#if !defined(DAWN_PLATFORM_IS_32_BIT)
+#define DAWN_PLATFORM_IS_32_BIT 0
+#endif
+
+// Use #if DAWN_PLATFORM_IS(XXX) for platform specific code.
+// Do not use #ifdef or the naked macro DAWN_PLATFORM_IS_XXX.
+// This can help avoid common mistakes like not including "Platform.h" and falling into unwanted
+// code block as usage of undefined macro "function" will be blocked by the compiler.
+#define DAWN_PLATFORM_IS(X) (1 == DAWN_PLATFORM_IS_##X)
#endif // SRC_DAWN_COMMON_PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/Preprocessor.h b/chromium/third_party/dawn/src/dawn/common/Preprocessor.h
index 458ccb3ba98..88606052580 100644
--- a/chromium/third_party/dawn/src/dawn/common/Preprocessor.h
+++ b/chromium/third_party/dawn/src/dawn/common/Preprocessor.h
@@ -18,7 +18,7 @@
// DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty
// __VA_ARGS__ warnings.
#define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam
-#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, dummyArg)
+#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, placeholderArg)
// DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in.
#define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2)
diff --git a/chromium/third_party/dawn/src/dawn/common/RefBase.h b/chromium/third_party/dawn/src/dawn/common/RefBase.h
index fca12d600d6..8f06f19f159 100644
--- a/chromium/third_party/dawn/src/dawn/common/RefBase.h
+++ b/chromium/third_party/dawn/src/dawn/common/RefBase.h
@@ -15,12 +15,12 @@
#ifndef SRC_DAWN_COMMON_REFBASE_H_
#define SRC_DAWN_COMMON_REFBASE_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/Compiler.h"
-
#include <type_traits>
#include <utility>
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
// A common class for various smart-pointers acting on referenceable/releasable pointer-like
// objects. Logic for each specialization can be customized using a Traits type that looks
// like the following:
@@ -36,16 +36,13 @@ template <typename T, typename Traits>
class RefBase {
public:
// Default constructor and destructor.
- RefBase() : mValue(Traits::kNullValue) {
- }
+ RefBase() : mValue(Traits::kNullValue) {}
- ~RefBase() {
- Release(mValue);
- }
+ ~RefBase() { Release(mValue); }
// Constructors from nullptr.
- constexpr RefBase(std::nullptr_t) : RefBase() {
- }
+ // NOLINTNEXTLINE(runtime/explicit)
+ constexpr RefBase(std::nullptr_t) : RefBase() {}
RefBase<T, Traits>& operator=(std::nullptr_t) {
Set(Traits::kNullValue);
@@ -53,9 +50,8 @@ class RefBase {
}
// Constructors from a value T.
- RefBase(T value) : mValue(value) {
- Reference(value);
- }
+ // NOLINTNEXTLINE(runtime/explicit)
+ RefBase(T value) : mValue(value) { Reference(value); }
RefBase<T, Traits>& operator=(const T& value) {
Set(value);
@@ -63,18 +59,14 @@ class RefBase {
}
// Constructors from a RefBase<T>
- RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
- Reference(other.mValue);
- }
+ RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
Set(other.mValue);
return *this;
}
- RefBase(RefBase<T, Traits>&& other) {
- mValue = other.Detach();
- }
+ RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
if (&other != this) {
@@ -111,28 +103,16 @@ class RefBase {
}
// Comparison operators.
- bool operator==(const T& other) const {
- return mValue == other;
- }
+ bool operator==(const T& other) const { return mValue == other; }
- bool operator!=(const T& other) const {
- return mValue != other;
- }
+ bool operator!=(const T& other) const { return mValue != other; }
- const T operator->() const {
- return mValue;
- }
- T operator->() {
- return mValue;
- }
+ const T operator->() const { return mValue; }
+ T operator->() { return mValue; }
// Smart pointer methods.
- const T& Get() const {
- return mValue;
- }
- T& Get() {
- return mValue;
- }
+ const T& Get() const { return mValue; }
+ T& Get() { return mValue; }
[[nodiscard]] T Detach() {
T value{std::move(mValue)};
diff --git a/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp b/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp
index 6950d134503..14bd0b1df77 100644
--- a/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp
@@ -14,23 +14,23 @@
#include "dawn/common/RefCounted.h"
-#include "dawn/common/Assert.h"
-
#include <cstddef>
+#include "dawn/common/Assert.h"
+
static constexpr size_t kPayloadBits = 1;
static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
-RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
+RefCount::RefCount(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
ASSERT((payload & kPayloadMask) == payload);
}
-uint64_t RefCounted::GetRefCountForTesting() const {
+uint64_t RefCount::GetValueForTesting() const {
return mRefCount >> kPayloadBits;
}
-uint64_t RefCounted::GetRefCountPayload() const {
+uint64_t RefCount::GetPayload() const {
// We only care about the payload bits of the refcount. These never change after
// initialization so we can use the relaxed memory order. The order doesn't guarantee
// anything except the atomicity of the load, which is enough since any past values of the
@@ -38,7 +38,7 @@ uint64_t RefCounted::GetRefCountPayload() const {
return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
}
-void RefCounted::Reference() {
+void RefCount::Increment() {
ASSERT((mRefCount & ~kPayloadMask) != 0);
// The relaxed ordering guarantees only the atomicity of the update, which is enough here
@@ -49,7 +49,7 @@ void RefCounted::Reference() {
mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
}
-void RefCounted::Release() {
+bool RefCount::Decrement() {
ASSERT((mRefCount & ~kPayloadMask) != 0);
// The release fence here is to make sure all accesses to the object on a thread A
@@ -69,16 +69,30 @@ void RefCounted::Release() {
// memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
// should be enough and could end up being faster.
std::atomic_thread_fence(std::memory_order_acquire);
- DeleteThis();
+ return true;
}
+ return false;
+}
+
+RefCounted::RefCounted(uint64_t payload) : mRefCount(payload) {}
+RefCounted::~RefCounted() = default;
+
+uint64_t RefCounted::GetRefCountForTesting() const {
+ return mRefCount.GetValueForTesting();
+}
+
+uint64_t RefCounted::GetRefCountPayload() const {
+ return mRefCount.GetPayload();
}
-void RefCounted::APIReference() {
- Reference();
+void RefCounted::Reference() {
+ mRefCount.Increment();
}
-void RefCounted::APIRelease() {
- Release();
+void RefCounted::Release() {
+ if (mRefCount.Decrement()) {
+ DeleteThis();
+ }
}
void RefCounted::DeleteThis() {
diff --git a/chromium/third_party/dawn/src/dawn/common/RefCounted.h b/chromium/third_party/dawn/src/dawn/common/RefCounted.h
index ef70d4df403..b5d74f70ea9 100644
--- a/chromium/third_party/dawn/src/dawn/common/RefCounted.h
+++ b/chromium/third_party/dawn/src/dawn/common/RefCounted.h
@@ -15,11 +15,29 @@
#ifndef SRC_DAWN_COMMON_REFCOUNTED_H_
#define SRC_DAWN_COMMON_REFCOUNTED_H_
-#include "dawn/common/RefBase.h"
-
#include <atomic>
#include <cstdint>
+#include "dawn/common/RefBase.h"
+
+class RefCount {
+ public:
+ // Create a refcount with a payload. The refcount starts initially at one.
+ explicit RefCount(uint64_t payload = 0);
+
+ uint64_t GetValueForTesting() const;
+ uint64_t GetPayload() const;
+
+ // Add a reference.
+ void Increment();
+
+ // Remove a reference. Returns true if this was the last reference.
+ bool Decrement();
+
+ private:
+ std::atomic<uint64_t> mRefCount;
+};
+
class RefCounted {
public:
explicit RefCounted(uint64_t payload = 0);
@@ -30,27 +48,24 @@ class RefCounted {
void Reference();
void Release();
- void APIReference();
- void APIRelease();
+ void APIReference() { Reference(); }
+ void APIRelease() { Release(); }
protected:
- virtual ~RefCounted() = default;
+ virtual ~RefCounted();
+
// A Derived class may override this if they require a custom deleter.
virtual void DeleteThis();
private:
- std::atomic<uint64_t> mRefCount;
+ RefCount mRefCount;
};
template <typename T>
struct RefCountedTraits {
static constexpr T* kNullValue = nullptr;
- static void Reference(T* value) {
- value->Reference();
- }
- static void Release(T* value) {
- value->Release();
- }
+ static void Reference(T* value) { value->Reference(); }
+ static void Release(T* value) { value->Release(); }
};
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn/common/Result.cpp b/chromium/third_party/dawn/src/dawn/common/Result.cpp
index 2101e47d098..c009df682d5 100644
--- a/chromium/third_party/dawn/src/dawn/common/Result.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/Result.cpp
@@ -17,14 +17,14 @@
// Implementation details of the tagged pointer Results
namespace detail {
- intptr_t MakePayload(const void* pointer, PayloadType type) {
- intptr_t payload = reinterpret_cast<intptr_t>(pointer);
- ASSERT((payload & 3) == 0);
- return payload | type;
- }
+intptr_t MakePayload(const void* pointer, PayloadType type) {
+ intptr_t payload = reinterpret_cast<intptr_t>(pointer);
+ ASSERT((payload & 3) == 0);
+ return payload | type;
+}
- PayloadType GetPayloadType(intptr_t payload) {
- return static_cast<PayloadType>(payload & 3);
- }
+PayloadType GetPayloadType(intptr_t payload) {
+ return static_cast<PayloadType>(payload & 3);
+}
} // namespace detail
diff --git a/chromium/third_party/dawn/src/dawn/common/Result.h b/chromium/third_party/dawn/src/dawn/common/Result.h
index d3ccbf1910a..849cd30170e 100644
--- a/chromium/third_party/dawn/src/dawn/common/Result.h
+++ b/chromium/third_party/dawn/src/dawn/common/Result.h
@@ -15,15 +15,15 @@
#ifndef SRC_DAWN_COMMON_RESULT_H_
#define SRC_DAWN_COMMON_RESULT_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/Compiler.h"
-
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
// Result<T, E> is the following sum type (Haskell notation):
//
// data Result T E = Success T | Error E | Empty
@@ -63,7 +63,7 @@ class [[nodiscard]] Result<void, E> {
Result();
Result(std::unique_ptr<E> error);
- Result(Result<void, E> && other);
+ Result(Result<void, E>&& other);
Result<void, E>& operator=(Result<void, E>&& other);
~Result();
@@ -89,23 +89,23 @@ constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T)
// tagged pointer. The tag for Success is 0 so that returning the value is fastest.
namespace detail {
- // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
- // but we really want them inlined so we keep them in the headers
- enum PayloadType {
- Success = 0,
- Error = 1,
- Empty = 2,
- };
+// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
+// but we really want them inlined so we keep them in the headers
+enum PayloadType {
+ Success = 0,
+ Error = 1,
+ Empty = 2,
+};
- intptr_t MakePayload(const void* pointer, PayloadType type);
- PayloadType GetPayloadType(intptr_t payload);
+intptr_t MakePayload(const void* pointer, PayloadType type);
+PayloadType GetPayloadType(intptr_t payload);
- template <typename T>
- static T* GetSuccessFromPayload(intptr_t payload);
- template <typename E>
- static E* GetErrorFromPayload(intptr_t payload);
+template <typename T>
+static T* GetSuccessFromPayload(intptr_t payload);
+template <typename E>
+static E* GetErrorFromPayload(intptr_t payload);
- constexpr static intptr_t kEmptyPayload = Empty;
+constexpr static intptr_t kEmptyPayload = Empty;
} // namespace detail
template <typename T, typename E>
@@ -116,12 +116,12 @@ class [[nodiscard]] Result<T*, E> {
static_assert(alignof_if_defined_else_default<E, 4> >= 4,
"Result<T*, E*> reserves two bits for tagging pointers");
- Result(T * success);
+ Result(T* success);
Result(std::unique_ptr<E> error);
// Support returning a Result<T*, E*> from a Result<TChild*, E*>
template <typename TChild>
- Result(Result<TChild*, E> && other);
+ Result(Result<TChild*, E>&& other);
template <typename TChild>
Result<T*, E>& operator=(Result<TChild*, E>&& other);
@@ -151,7 +151,7 @@ class [[nodiscard]] Result<const T*, E> {
Result(const T* success);
Result(std::unique_ptr<E> error);
- Result(Result<const T*, E> && other);
+ Result(Result<const T*, E>&& other);
Result<const T*, E>& operator=(Result<const T*, E>&& other);
~Result();
@@ -178,13 +178,13 @@ class [[nodiscard]] Result<Ref<T>, E> {
"Result<Ref<T>, E> reserves two bits for tagging pointers");
template <typename U>
- Result(Ref<U> && success);
+ Result(Ref<U>&& success);
template <typename U>
Result(const Ref<U>& success);
Result(std::unique_ptr<E> error);
template <typename U>
- Result(Result<Ref<U>, E> && other);
+ Result(Result<Ref<U>, E>&& other);
template <typename U>
Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
@@ -209,10 +209,10 @@ class [[nodiscard]] Result<Ref<T>, E> {
template <typename T, typename E>
class [[nodiscard]] Result {
public:
- Result(T && success);
+ Result(T&& success);
Result(std::unique_ptr<E> error);
- Result(Result<T, E> && other);
+ Result(Result<T, E>&& other);
Result<T, E>& operator=(Result<T, E>&& other);
~Result();
@@ -237,16 +237,13 @@ class [[nodiscard]] Result {
// Implementation of Result<void, E>
template <typename E>
-Result<void, E>::Result() {
-}
+Result<void, E>::Result() {}
template <typename E>
-Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
-}
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
template <typename E>
-Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
-}
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
template <typename E>
Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
@@ -271,8 +268,7 @@ bool Result<void, E>::IsSuccess() const {
}
template <typename E>
-void Result<void, E>::AcquireSuccess() {
-}
+void Result<void, E>::AcquireSuccess() {}
template <typename E>
std::unique_ptr<E> Result<void, E>::AcquireError() {
@@ -282,29 +278,27 @@ std::unique_ptr<E> Result<void, E>::AcquireError() {
// Implementation details of the tagged pointer Results
namespace detail {
- template <typename T>
- T* GetSuccessFromPayload(intptr_t payload) {
- ASSERT(GetPayloadType(payload) == Success);
- return reinterpret_cast<T*>(payload);
- }
+template <typename T>
+T* GetSuccessFromPayload(intptr_t payload) {
+ ASSERT(GetPayloadType(payload) == Success);
+ return reinterpret_cast<T*>(payload);
+}
- template <typename E>
- E* GetErrorFromPayload(intptr_t payload) {
- ASSERT(GetPayloadType(payload) == Error);
- return reinterpret_cast<E*>(payload ^ 1);
- }
+template <typename E>
+E* GetErrorFromPayload(intptr_t payload) {
+ ASSERT(GetPayloadType(payload) == Error);
+ return reinterpret_cast<E*>(payload ^ 1);
+}
} // namespace detail
// Implementation of Result<T*, E>
template <typename T, typename E>
-Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
template <typename T, typename E>
Result<T*, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
template <typename T, typename E>
template <typename TChild>
@@ -355,13 +349,11 @@ std::unique_ptr<E> Result<T*, E>::AcquireError() {
// Implementation of Result<const T*, E*>
template <typename T, typename E>
Result<const T*, E>::Result(const T* success)
- : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+ : mPayload(detail::MakePayload(success, detail::Success)) {}
template <typename T, typename E>
Result<const T*, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
template <typename T, typename E>
Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
@@ -415,13 +407,11 @@ Result<Ref<T>, E>::Result(Ref<U>&& success)
template <typename T, typename E>
template <typename U>
-Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
-}
+Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
template <typename T, typename E>
Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
template <typename T, typename E>
template <typename U>
@@ -473,12 +463,10 @@ std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
// Implementation of Result<T, E>
template <typename T, typename E>
-Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
-}
+Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
template <typename T, typename E>
-Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
-}
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
template <typename T, typename E>
Result<T, E>::~Result() {
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialMap.h b/chromium/third_party/dawn/src/dawn/common/SerialMap.h
index 74a4657f6aa..cd26099c5af 100644
--- a/chromium/third_party/dawn/src/dawn/common/SerialMap.h
+++ b/chromium/third_party/dawn/src/dawn/common/SerialMap.h
@@ -15,11 +15,11 @@
#ifndef SRC_DAWN_COMMON_SERIALMAP_H_
#define SRC_DAWN_COMMON_SERIALMAP_H_
-#include "dawn/common/SerialStorage.h"
-
#include <map>
#include <vector>
+#include "dawn/common/SerialStorage.h"
+
template <typename Serial, typename Value>
class SerialMap;
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialQueue.h b/chromium/third_party/dawn/src/dawn/common/SerialQueue.h
index 0091ecaf30a..d3d4f4feb5b 100644
--- a/chromium/third_party/dawn/src/dawn/common/SerialQueue.h
+++ b/chromium/third_party/dawn/src/dawn/common/SerialQueue.h
@@ -15,10 +15,11 @@
#ifndef SRC_DAWN_COMMON_SERIALQUEUE_H_
#define SRC_DAWN_COMMON_SERIALQUEUE_H_
-#include "dawn/common/SerialStorage.h"
-
+#include <utility>
#include <vector>
+#include "dawn/common/SerialStorage.h"
+
template <typename Serial, typename Value>
class SerialQueue;
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialStorage.h b/chromium/third_party/dawn/src/dawn/common/SerialStorage.h
index 98216590813..0d4c8b5d6ad 100644
--- a/chromium/third_party/dawn/src/dawn/common/SerialStorage.h
+++ b/chromium/third_party/dawn/src/dawn/common/SerialStorage.h
@@ -15,11 +15,11 @@
#ifndef SRC_DAWN_COMMON_SERIALSTORAGE_H_
#define SRC_DAWN_COMMON_SERIALSTORAGE_H_
-#include "dawn/common/Assert.h"
-
#include <cstdint>
#include <utility>
+#include "dawn/common/Assert.h"
+
template <typename T>
struct SerialStorageTraits {};
@@ -35,7 +35,7 @@ class SerialStorage {
public:
class Iterator {
public:
- Iterator(StorageIterator start);
+ explicit Iterator(StorageIterator start);
Iterator& operator++();
bool operator==(const Iterator& other) const;
@@ -52,7 +52,7 @@ class SerialStorage {
class ConstIterator {
public:
- ConstIterator(ConstStorageIterator start);
+ explicit ConstIterator(ConstStorageIterator start);
ConstIterator& operator++();
bool operator==(const ConstIterator& other) const;
@@ -193,25 +193,23 @@ typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpT
template <typename Derived>
SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
typename SerialStorage<Derived>::StorageIterator end)
- : mStartIt(start), mEndIt(end) {
-}
+ : mStartIt(start), mEndIt(end) {}
template <typename Derived>
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
- return {mStartIt};
+ return SerialStorage::Iterator(mStartIt);
}
template <typename Derived>
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end() const {
- return {mEndIt};
+ return SerialStorage::Iterator(mEndIt);
}
// SerialStorage::Iterator
template <typename Derived>
SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
- : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+ : mStorageIterator(start), mSerialIterator(nullptr) {}
template <typename Derived>
typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
@@ -257,8 +255,7 @@ template <typename Derived>
SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
typename SerialStorage<Derived>::ConstStorageIterator start,
typename SerialStorage<Derived>::ConstStorageIterator end)
- : mStartIt(start), mEndIt(end) {
-}
+ : mStartIt(start), mEndIt(end) {}
template <typename Derived>
typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
@@ -276,8 +273,7 @@ typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBegi
template <typename Derived>
SerialStorage<Derived>::ConstIterator::ConstIterator(
typename SerialStorage<Derived>::ConstStorageIterator start)
- : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+ : mStorageIterator(start), mSerialIterator(nullptr) {}
template <typename Derived>
typename SerialStorage<Derived>::ConstIterator&
diff --git a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp
index d680ee36c12..b4d18275c2c 100644
--- a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp
@@ -14,30 +14,27 @@
#include "dawn/common/SlabAllocator.h"
-#include "dawn/common/Assert.h"
-#include "dawn/common/Math.h"
-
#include <algorithm>
#include <cstdlib>
#include <limits>
#include <new>
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
// IndexLinkNode
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
- : index(index), nextIndex(nextIndex) {
-}
+ : index(index), nextIndex(nextIndex) {}
// Slab
SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
- : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
-}
+ : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
-SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
-}
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
@@ -60,10 +57,10 @@ SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
uint32_t objectSize,
uint32_t objectAlignment)
- : mAllocationAlignment(std::max(static_cast<uint32_t>(alignof(Slab)), objectAlignment)),
- mSlabBlocksOffset(Align(sizeof(Slab), objectAlignment)),
+ : mAllocationAlignment(std::max(u32_alignof<Slab>, objectAlignment)),
+ mSlabBlocksOffset(Align(u32_sizeof<Slab>, objectAlignment)),
mIndexLinkNodeOffset(Align(objectSize, alignof(IndexLinkNode))),
- mBlockStride(Align(mIndexLinkNodeOffset + sizeof(IndexLinkNode), objectAlignment)),
+ mBlockStride(Align(mIndexLinkNodeOffset + u32_sizeof<IndexLinkNode>, objectAlignment)),
mBlocksPerSlab(blocksPerSlab),
mTotalAllocationSize(
// required allocation size
@@ -83,8 +80,7 @@ SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
mTotalAllocationSize(rhs.mTotalAllocationSize),
mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
mFullSlabs(std::move(rhs.mFullSlabs)),
- mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
-}
+ mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
SlabAllocatorImpl::~SlabAllocatorImpl() = default;
diff --git a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h
index 3a6f3485d22..e828dea40ca 100644
--- a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h
@@ -15,12 +15,13 @@
#ifndef SRC_DAWN_COMMON_SLABALLOCATOR_H_
#define SRC_DAWN_COMMON_SLABALLOCATOR_H_
-#include "dawn/common/PlacementAllocated.h"
-
#include <cstdint>
#include <type_traits>
#include <utility>
+#include "dawn/common/Numeric.h"
+#include "dawn/common/PlacementAllocated.h"
+
// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
@@ -165,10 +166,9 @@ template <typename T>
class SlabAllocator : public SlabAllocatorImpl {
public:
SlabAllocator(size_t totalObjectBytes,
- uint32_t objectSize = sizeof(T),
- uint32_t objectAlignment = alignof(T))
- : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
- }
+ uint32_t objectSize = u32_sizeof<T>,
+ uint32_t objectAlignment = u32_alignof<T>)
+ : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
template <typename... Args>
T* Allocate(Args&&... args) {
@@ -176,9 +176,7 @@ class SlabAllocator : public SlabAllocatorImpl {
return new (ptr) T(std::forward<Args>(args)...);
}
- void Deallocate(T* object) {
- SlabAllocatorImpl::Deallocate(object);
- }
+ void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
};
#endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/StackContainer.h b/chromium/third_party/dawn/src/dawn/common/StackContainer.h
index 1efcc054c7e..1d1d7ca401a 100644
--- a/chromium/third_party/dawn/src/dawn/common/StackContainer.h
+++ b/chromium/third_party/dawn/src/dawn/common/StackContainer.h
@@ -7,11 +7,12 @@
#ifndef SRC_DAWN_COMMON_STACKCONTAINER_H_
#define SRC_DAWN_COMMON_STACKCONTAINER_H_
-#include "dawn/common/Compiler.h"
-
#include <cstddef>
+#include <memory>
#include <vector>
+#include "dawn/common/Compiler.h"
+
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
@@ -33,29 +34,24 @@
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
- typedef typename std::allocator<T>::pointer pointer;
- typedef typename std::allocator<T>::size_type size_type;
+ typedef typename std::allocator_traits<std::allocator<T>>::pointer pointer;
+ typedef typename std::allocator_traits<std::allocator<T>>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
- Source() : used_stack_buffer_(false) {
- }
+ Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
- T* stack_buffer() {
- return reinterpret_cast<T*>(stack_buffer_);
- }
- const T* stack_buffer() const {
- return reinterpret_cast<const T*>(&stack_buffer_);
- }
+ T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
+ const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
-#if defined(DAWN_COMPILER_GCC) && !defined(__x86_64__) && !defined(__i386__)
+#if DAWN_COMPILER_IS(GCC) && !defined(__x86_64__) && !defined(__i386__)
static_assert(alignof(T) <= 16, "http://crbug.com/115612");
#endif
@@ -72,8 +68,7 @@ class StackAllocator : public std::allocator<T> {
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
- : std::allocator<T>(), source_(rhs.source_) {
- }
+ : std::allocator<T>(), source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
@@ -82,21 +77,16 @@ class StackAllocator : public std::allocator<T> {
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
- // TODO: If we were fancy pants, perhaps we could share storage
- // iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
- StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
- }
+ StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
// This constructor must exist. It creates a default allocator that doesn't
// actually have a stack buffer. glibc's std::string() will compare the
// current allocator against the default-constructed allocator, so this
// should be fast.
- StackAllocator() : source_(nullptr) {
- }
+ StackAllocator() : source_(nullptr) {}
- explicit StackAllocator(Source* source) : source_(source) {
- }
+ explicit StackAllocator(Source* source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
@@ -113,10 +103,11 @@ class StackAllocator : public std::allocator<T> {
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
- if (source_ && p == source_->stack_buffer())
+ if (source_ && p == source_->stack_buffer()) {
source_->used_stack_buffer_ = false;
- else
+ } else {
std::allocator<T>::deallocate(p, n);
+ }
}
private:
@@ -155,28 +146,18 @@ class StackContainer {
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
- ContainerType& container() {
- return container_;
- }
- const ContainerType& container() const {
- return container_;
- }
+ ContainerType& container() { return container_; }
+ const ContainerType& container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
- ContainerType* operator->() {
- return &container_;
- }
- const ContainerType* operator->() const {
- return &container_;
- }
+ ContainerType* operator->() { return &container_; }
+ const ContainerType* operator->() const { return &container_; }
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
- const typename Allocator::Source& stack_data() const {
- return stack_data_;
- }
+ const typename Allocator::Source& stack_data() const { return stack_data_; }
protected:
typename Allocator::Source stack_data_;
@@ -226,8 +207,7 @@ class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
public:
StackVector()
- : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
- }
+ : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
@@ -245,12 +225,8 @@ class StackVector
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
- T& operator[](size_t i) {
- return this->container().operator[](i);
- }
- const T& operator[](size_t i) const {
- return this->container().operator[](i);
- }
+ T& operator[](size_t i) { return this->container().operator[](i); }
+ const T& operator[](size_t i) const { return this->container().operator[](i); }
private:
// StackVector(const StackVector& rhs) = delete;
diff --git a/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp b/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp
index a5ce0f15402..cad35955d6e 100644
--- a/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp
@@ -17,23 +17,23 @@
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <Windows.h>
-# include <vector>
-#elif defined(DAWN_PLATFORM_LINUX)
-# include <dlfcn.h>
-# include <limits.h>
-# include <unistd.h>
-# include <cstdlib>
-#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-# include <dlfcn.h>
-# include <mach-o/dyld.h>
-# include <vector>
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include <Windows.h>
+#include <vector>
+#elif DAWN_PLATFORM_IS(LINUX)
+#include <dlfcn.h>
+#include <limits.h>
+#include <unistd.h>
+#include <cstdlib>
+#elif DAWN_PLATFORM_IS(MACOS) || DAWN_PLATFORM_IS(IOS)
+#include <dlfcn.h>
+#include <mach-o/dyld.h>
+#include <vector>
#endif
#include <array>
-#if defined(DAWN_PLATFORM_WINDOWS)
+#if DAWN_PLATFORM_IS(WINDOWS)
const char* GetPathSeparator() {
return "\\";
}
@@ -66,7 +66,7 @@ std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
bool SetEnvironmentVar(const char* variableName, const char* value) {
return SetEnvironmentVariableA(variableName, value) == TRUE;
}
-#elif defined(DAWN_PLATFORM_POSIX)
+#elif DAWN_PLATFORM_IS(POSIX)
const char* GetPathSeparator() {
return "/";
}
@@ -84,10 +84,10 @@ bool SetEnvironmentVar(const char* variableName, const char* value) {
return setenv(variableName, value, 1) == 0;
}
#else
-# error "Implement Get/SetEnvironmentVar for your platform."
+#error "Implement Get/SetEnvironmentVar for your platform."
#endif
-#if defined(DAWN_PLATFORM_WINDOWS)
+#if DAWN_PLATFORM_IS(WINDOWS)
std::optional<std::string> GetHModulePath(HMODULE module) {
std::array<char, MAX_PATH> executableFileBuf;
DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(),
@@ -100,7 +100,7 @@ std::optional<std::string> GetHModulePath(HMODULE module) {
std::optional<std::string> GetExecutablePath() {
return GetHModulePath(nullptr);
}
-#elif defined(DAWN_PLATFORM_LINUX)
+#elif DAWN_PLATFORM_IS(LINUX)
std::optional<std::string> GetExecutablePath() {
std::array<char, PATH_MAX> path;
ssize_t result = readlink("/proc/self/exe", path.data(), PATH_MAX - 1);
@@ -111,7 +111,7 @@ std::optional<std::string> GetExecutablePath() {
path[result] = '\0';
return path.data();
}
-#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+#elif DAWN_PLATFORM_IS(MACOS) || DAWN_PLATFORM_IS(IOS)
std::optional<std::string> GetExecutablePath() {
uint32_t size = 0;
_NSGetExecutablePath(nullptr, &size);
@@ -124,17 +124,17 @@ std::optional<std::string> GetExecutablePath() {
buffer[size] = '\0';
return buffer.data();
}
-#elif defined(DAWN_PLATFORM_FUCHSIA)
+#elif DAWN_PLATFORM_IS(FUCHSIA)
std::optional<std::string> GetExecutablePath() {
- // TODO: Implement on Fuchsia
+ // UNIMPLEMENTED
return {};
}
-#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+#elif DAWN_PLATFORM_IS(EMSCRIPTEN)
std::optional<std::string> GetExecutablePath() {
return {};
}
#else
-# error "Implement GetExecutablePath for your platform."
+#error "Implement GetExecutablePath for your platform."
#endif
std::optional<std::string> GetExecutableDirectory() {
@@ -149,7 +149,7 @@ std::optional<std::string> GetExecutableDirectory() {
return exePath->substr(0, lastPathSepLoc + 1);
}
-#if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+#if DAWN_PLATFORM_IS(LINUX) || DAWN_PLATFORM_IS(MACOS) || DAWN_PLATFORM_IS(IOS)
std::optional<std::string> GetModulePath() {
static int placeholderSymbol = 0;
Dl_info dlInfo;
@@ -163,32 +163,32 @@ std::optional<std::string> GetModulePath() {
}
return absolutePath.data();
}
-#elif defined(DAWN_PLATFORM_WINDOWS)
+#elif DAWN_PLATFORM_IS(WINDOWS)
std::optional<std::string> GetModulePath() {
static int placeholderSymbol = 0;
HMODULE module = nullptr;
// GetModuleHandleEx is unavailable on UWP
-# if defined(DAWN_IS_WINUWP)
+#if defined(DAWN_IS_WINUWP)
return {};
-# else
+#else
if (!GetModuleHandleExA(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
return {};
}
-# endif
+#endif
return GetHModulePath(module);
}
-#elif defined(DAWN_PLATFORM_FUCHSIA)
+#elif DAWN_PLATFORM_IS(FUCHSIA)
std::optional<std::string> GetModulePath() {
return {};
}
-#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+#elif DAWN_PLATFORM_IS(EMSCRIPTEN)
std::optional<std::string> GetModulePath() {
return {};
}
#else
-# error "Implement GetModulePath for your platform."
+#error "Implement GetModulePath for your platform."
#endif
std::optional<std::string> GetModuleDirectory() {
@@ -205,11 +205,12 @@ std::optional<std::string> GetModuleDirectory() {
// ScopedEnvironmentVar
+ScopedEnvironmentVar::ScopedEnvironmentVar() = default;
+
ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
: mName(variableName),
mOriginalValue(GetEnvironmentVar(variableName)),
- mIsSet(SetEnvironmentVar(variableName, value)) {
-}
+ mIsSet(SetEnvironmentVar(variableName, value)) {}
ScopedEnvironmentVar::~ScopedEnvironmentVar() {
if (mIsSet) {
diff --git a/chromium/third_party/dawn/src/dawn/common/SystemUtils.h b/chromium/third_party/dawn/src/dawn/common/SystemUtils.h
index 31b39023e6c..ed37085348c 100644
--- a/chromium/third_party/dawn/src/dawn/common/SystemUtils.h
+++ b/chromium/third_party/dawn/src/dawn/common/SystemUtils.h
@@ -15,10 +15,11 @@
#ifndef SRC_DAWN_COMMON_SYSTEMUTILS_H_
#define SRC_DAWN_COMMON_SYSTEMUTILS_H_
-#include "dawn/common/Platform.h"
-
#include <optional>
#include <string>
+#include <utility>
+
+#include "dawn/common/Platform.h"
const char* GetPathSeparator();
// Returns a pair of the environment variable's value, and a boolean indicating whether the variable
@@ -32,14 +33,14 @@ bool SetEnvironmentVar(const char* variableName, const char* value);
std::optional<std::string> GetExecutableDirectory();
std::optional<std::string> GetModuleDirectory();
-#ifdef DAWN_PLATFORM_MACOS
+#if DAWN_PLATFORM_IS(MACOS)
void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion = nullptr);
bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion = 0);
#endif
class ScopedEnvironmentVar {
public:
- ScopedEnvironmentVar() = default;
+ ScopedEnvironmentVar();
ScopedEnvironmentVar(const char* variableName, const char* value);
~ScopedEnvironmentVar();
diff --git a/chromium/third_party/dawn/src/dawn/common/TypedInteger.h b/chromium/third_party/dawn/src/dawn/common/TypedInteger.h
index d00160c350b..b9d43a4468b 100644
--- a/chromium/third_party/dawn/src/dawn/common/TypedInteger.h
+++ b/chromium/third_party/dawn/src/dawn/common/TypedInteger.h
@@ -15,12 +15,12 @@
#ifndef SRC_DAWN_COMMON_TYPEDINTEGER_H_
#define SRC_DAWN_COMMON_TYPEDINTEGER_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/UnderlyingType.h"
-
#include <limits>
#include <type_traits>
+#include "dawn/common/Assert.h"
+#include "dawn/common/UnderlyingType.h"
+
// TypedInteger is helper class that provides additional type safety in Debug.
// - Integers of different (Tag, BaseIntegerType) may not be used interoperably
// - Allows casts only to the underlying type.
@@ -50,8 +50,8 @@
// uint32_t aValue = static_cast<uint32_t>(a);
//
namespace detail {
- template <typename Tag, typename T>
- class TypedIntegerImpl;
+template <typename Tag, typename T>
+class TypedIntegerImpl;
} // namespace detail
template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
@@ -62,200 +62,198 @@ using TypedInteger = T;
#endif
namespace detail {
- template <typename Tag, typename T>
- class alignas(T) TypedIntegerImpl {
- static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
- T mValue;
-
- public:
- constexpr TypedIntegerImpl() : mValue(0) {
- static_assert(alignof(TypedIntegerImpl) == alignof(T));
- static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
- }
+template <typename Tag, typename T>
+class alignas(T) TypedIntegerImpl {
+ static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+ T mValue;
+
+ public:
+ constexpr TypedIntegerImpl() : mValue(0) {
+ static_assert(alignof(TypedIntegerImpl) == alignof(T));
+ static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
+ }
- // Construction from non-narrowing integral types.
- template <typename I,
- typename = std::enable_if_t<
- std::is_integral<I>::value &&
- std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
- std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
- explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
- }
+ // Construction from non-narrowing integral types.
+ template <typename I,
+ typename =
+ std::enable_if_t<std::is_integral<I>::value &&
+ std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+ std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+ explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
- // Allow explicit casts only to the underlying type. If you're casting out of an
- // TypedInteger, you should know what what you're doing, and exactly what type you
- // expect.
- explicit constexpr operator T() const {
- return static_cast<T>(this->mValue);
- }
+ // Allow explicit casts only to the underlying type. If you're casting out of an
+ // TypedInteger, you should know what what you're doing, and exactly what type you
+ // expect.
+ explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
// Same-tag TypedInteger comparison operators
-#define TYPED_COMPARISON(op) \
- constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
- return mValue op rhs.mValue; \
- }
- TYPED_COMPARISON(<)
- TYPED_COMPARISON(<=)
- TYPED_COMPARISON(>)
- TYPED_COMPARISON(>=)
- TYPED_COMPARISON(==)
- TYPED_COMPARISON(!=)
+#define TYPED_COMPARISON(op) \
+ constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
+ TYPED_COMPARISON(<)
+ TYPED_COMPARISON(<=)
+ TYPED_COMPARISON(>)
+ TYPED_COMPARISON(>=)
+ TYPED_COMPARISON(==)
+ TYPED_COMPARISON(!=)
#undef TYPED_COMPARISON
- // Increment / decrement operators for for-loop iteration
- constexpr TypedIntegerImpl& operator++() {
- ASSERT(this->mValue < std::numeric_limits<T>::max());
- ++this->mValue;
- return *this;
- }
+ // Increment / decrement operators for for-loop iteration
+ constexpr TypedIntegerImpl& operator++() {
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return *this;
+ }
- constexpr TypedIntegerImpl operator++(int) {
- TypedIntegerImpl ret = *this;
+ constexpr TypedIntegerImpl operator++(int) {
+ TypedIntegerImpl ret = *this;
- ASSERT(this->mValue < std::numeric_limits<T>::max());
- ++this->mValue;
- return ret;
- }
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return ret;
+ }
- constexpr TypedIntegerImpl& operator--() {
- assert(this->mValue > std::numeric_limits<T>::min());
- --this->mValue;
- return *this;
- }
+ constexpr TypedIntegerImpl& operator--() {
+ ASSERT(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return *this;
+ }
- constexpr TypedIntegerImpl operator--(int) {
- TypedIntegerImpl ret = *this;
+ constexpr TypedIntegerImpl operator--(int) {
+ TypedIntegerImpl ret = *this;
- ASSERT(this->mValue > std::numeric_limits<T>::min());
- --this->mValue;
- return ret;
- }
+ ASSERT(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return ret;
+ }
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
- AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value);
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+ TypedIntegerImpl<Tag, T> lhs,
+ TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
- // Overflow would wrap around
- ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
- return lhs.mValue + rhs.mValue;
- }
+ // Overflow would wrap around
+ ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
+ return lhs.mValue + rhs.mValue;
+ }
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
- AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value);
-
- if (lhs.mValue > 0) {
- // rhs is positive: |rhs| is at most the distance between max and |lhs|.
- // rhs is negative: (positive + negative) won't overflow
- ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
- } else {
- // rhs is postive: (negative + positive) won't underflow
- // rhs is negative: |rhs| isn't less than the (negative) distance between min
- // and |lhs|
- ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
- }
- return lhs.mValue + rhs.mValue;
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+ TypedIntegerImpl<Tag, T> lhs,
+ TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ if (lhs.mValue > 0) {
+ // rhs is positive: |rhs| is at most the distance between max and |lhs|.
+ // rhs is negative: (positive + negative) won't overflow
+ ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
+ } else {
+ // rhs is postive: (negative + positive) won't underflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between min
+ // and |lhs|
+ ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
}
+ return lhs.mValue + rhs.mValue;
+ }
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
- SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value);
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
+ TypedIntegerImpl<Tag, T> lhs,
+ TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
- // Overflow would wrap around
- ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
- return lhs.mValue - rhs.mValue;
- }
+ // Overflow would wrap around
+ ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
+ return lhs.mValue - rhs.mValue;
+ }
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
- TypedIntegerImpl<Tag, T> lhs,
- TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value);
-
- if (lhs.mValue > 0) {
- // rhs is positive: positive minus positive won't overflow
- // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
- // and max.
- ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
- } else {
- // rhs is positive: |rhs| is at most the distance between min and |lhs|
- // rhs is negative: negative minus negative won't overflow
- ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
- }
- return lhs.mValue - rhs.mValue;
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
+ TypedIntegerImpl<Tag, T> lhs,
+ TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ if (lhs.mValue > 0) {
+ // rhs is positive: positive minus positive won't overflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
+ // and max.
+ ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
+ } else {
+ // rhs is positive: |rhs| is at most the distance between min and |lhs|
+ // rhs is negative: negative minus negative won't overflow
+ ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
}
+ return lhs.mValue - rhs.mValue;
+ }
- template <typename T2 = T>
- constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
- static_assert(std::is_same<T, T2>::value);
- // The negation of the most negative value cannot be represented.
- ASSERT(this->mValue != std::numeric_limits<T>::min());
- return TypedIntegerImpl(-this->mValue);
- }
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+ static_assert(std::is_same<T, T2>::value);
+ // The negation of the most negative value cannot be represented.
+ ASSERT(this->mValue != std::numeric_limits<T>::min());
+ return TypedIntegerImpl(-this->mValue);
+ }
- constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
- auto result = AddImpl(*this, rhs);
- static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
- return TypedIntegerImpl(result);
- }
+ constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
+ auto result = AddImpl(*this, rhs);
+ static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
+ return TypedIntegerImpl(result);
+ }
- constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
- auto result = SubImpl(*this, rhs);
- static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
- return TypedIntegerImpl(result);
- }
- };
+ constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
+ auto result = SubImpl(*this, rhs);
+ static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
+ return TypedIntegerImpl(result);
+ }
+};
} // namespace detail
namespace std {
- template <typename Tag, typename T>
- class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
- public:
- static detail::TypedIntegerImpl<Tag, T> max() noexcept {
- return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
- }
- static detail::TypedIntegerImpl<Tag, T> min() noexcept {
- return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
- }
- };
+template <typename Tag, typename T>
+class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+ public:
+ static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+ }
+ static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+ }
+};
} // namespace std
namespace ityp {
- // These helpers below are provided since the default arithmetic operators for small integer
- // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
- // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
- // ityp::Sub(a, b) instead.
-
- template <typename Tag, typename T>
- constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
- ::detail::TypedIntegerImpl<Tag, T> rhs) {
- return ::detail::TypedIntegerImpl<Tag, T>(
- static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
- }
-
- template <typename Tag, typename T>
- constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
- ::detail::TypedIntegerImpl<Tag, T> rhs) {
- return ::detail::TypedIntegerImpl<Tag, T>(
- static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
- }
-
- template <typename T>
- constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
- return static_cast<T>(lhs + rhs);
- }
-
- template <typename T>
- constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
- return static_cast<T>(lhs - rhs);
- }
+// These helpers below are provided since the default arithmetic operators for small integer
+// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
+// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
+// ityp::Sub(a, b) instead.
+
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
+ ::detail::TypedIntegerImpl<Tag, T> rhs) {
+ return ::detail::TypedIntegerImpl<Tag, T>(
+ static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
+}
+
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
+ ::detail::TypedIntegerImpl<Tag, T> rhs) {
+ return ::detail::TypedIntegerImpl<Tag, T>(
+ static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
+}
+
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
+ return static_cast<T>(lhs + rhs);
+}
+
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
+ return static_cast<T>(lhs - rhs);
+}
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h b/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h
index 5b499da4cae..b4ff8ea6040 100644
--- a/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h
+++ b/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h
@@ -22,27 +22,27 @@
// template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
// the wrapped integer type.
namespace detail {
- template <typename T, typename Enable = void>
- struct UnderlyingTypeImpl;
-
- template <typename I>
- struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
- using type = I;
- };
-
- template <typename E>
- struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
- using type = std::underlying_type_t<E>;
- };
-
- // Forward declare the TypedInteger impl.
- template <typename Tag, typename T>
- class TypedIntegerImpl;
-
- template <typename Tag, typename I>
- struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
- using type = typename UnderlyingTypeImpl<I>::type;
- };
+template <typename T, typename Enable = void>
+struct UnderlyingTypeImpl;
+
+template <typename I>
+struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
+ using type = I;
+};
+
+template <typename E>
+struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
+ using type = std::underlying_type_t<E>;
+};
+
+// Forward declare the TypedInteger impl.
+template <typename Tag, typename T>
+class TypedIntegerImpl;
+
+template <typename Tag, typename I>
+struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
+ using type = typename UnderlyingTypeImpl<I>::type;
+};
} // namespace detail
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp
index fd924f4ae21..6a5b143d17a 100644
--- a/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp
@@ -14,10 +14,10 @@
#include "dawn/common/WindowsUtils.h"
-#include "dawn/common/windows_with_undefs.h"
-
#include <memory>
+#include "dawn/common/windows_with_undefs.h"
+
std::string WCharToUTF8(const wchar_t* input) {
// The -1 argument asks WideCharToMultiByte to use the null terminator to know the size of
// input. It will return a size that includes the null terminator.
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_array.h b/chromium/third_party/dawn/src/dawn/common/ityp_array.h
index cb0d650e933..a4103027f92 100644
--- a/chromium/third_party/dawn/src/dawn/common/ityp_array.h
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_array.h
@@ -15,83 +15,75 @@
#ifndef SRC_DAWN_COMMON_ITYP_ARRAY_H_
#define SRC_DAWN_COMMON_ITYP_ARRAY_H_
-#include "dawn/common/TypedInteger.h"
-#include "dawn/common/UnderlyingType.h"
-
#include <array>
#include <cstddef>
+#include <limits>
#include <type_traits>
+#include <utility>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
namespace ityp {
- // ityp::array is a helper class that wraps std::array with the restriction that
- // indices must be a particular type |Index|. Dawn uses multiple flat maps of
- // index-->data, and this class helps ensure an indices cannot be passed interchangably
- // to a flat map of a different type.
- template <typename Index, typename Value, size_t Size>
- class array : private std::array<Value, Size> {
- using I = UnderlyingType<Index>;
- using Base = std::array<Value, Size>;
-
- static_assert(Size <= std::numeric_limits<I>::max());
-
- public:
- constexpr array() = default;
-
- template <typename... Values>
- constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
- }
-
- Value& operator[](Index i) {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::operator[](index);
- }
-
- constexpr const Value& operator[](Index i) const {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::operator[](index);
- }
-
- Value& at(Index i) {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::at(index);
- }
-
- constexpr const Value& at(Index i) const {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::at(index);
- }
-
- typename Base::iterator begin() noexcept {
- return Base::begin();
- }
-
- typename Base::const_iterator begin() const noexcept {
- return Base::begin();
- }
-
- typename Base::iterator end() noexcept {
- return Base::end();
- }
-
- typename Base::const_iterator end() const noexcept {
- return Base::end();
- }
-
- constexpr Index size() const {
- return Index(I(Size));
- }
-
- using Base::back;
- using Base::data;
- using Base::empty;
- using Base::fill;
- using Base::front;
- };
+// ityp::array is a helper class that wraps std::array with the restriction that
+// indices must be a particular type |Index|. Dawn uses multiple flat maps of
+// index-->data, and this class helps ensure an indices cannot be passed interchangably
+// to a flat map of a different type.
+template <typename Index, typename Value, size_t Size>
+class array : private std::array<Value, Size> {
+ using I = UnderlyingType<Index>;
+ using Base = std::array<Value, Size>;
+
+ static_assert(Size <= std::numeric_limits<I>::max());
+
+ public:
+ constexpr array() = default;
+
+ template <typename... Values>
+ // NOLINTNEXTLINE(runtime/explicit)
+ constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
+
+ Value& operator[](Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::operator[](index);
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::operator[](index);
+ }
+
+ Value& at(Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::at(index);
+ }
+
+ constexpr const Value& at(Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::at(index);
+ }
+
+ typename Base::iterator begin() noexcept { return Base::begin(); }
+
+ typename Base::const_iterator begin() const noexcept { return Base::begin(); }
+
+ typename Base::iterator end() noexcept { return Base::end(); }
+
+ typename Base::const_iterator end() const noexcept { return Base::end(); }
+
+ constexpr Index size() const { return Index(I(Size)); }
+
+ using Base::back;
+ using Base::data;
+ using Base::empty;
+ using Base::fill;
+ using Base::front;
+};
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h b/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h
index 7c5909dda6f..0be94a7e726 100644
--- a/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h
@@ -16,120 +16,101 @@
#define SRC_DAWN_COMMON_ITYP_BITSET_H_
#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Platform.h"
#include "dawn/common/TypedInteger.h"
#include "dawn/common/UnderlyingType.h"
namespace ityp {
- // ityp::bitset is a helper class that wraps std::bitset with the restriction that
- // indices must be a particular type |Index|.
- template <typename Index, size_t N>
- class bitset : private std::bitset<N> {
- using I = UnderlyingType<Index>;
- using Base = std::bitset<N>;
+// ityp::bitset is a helper class that wraps std::bitset with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, size_t N>
+class bitset : private std::bitset<N> {
+ using I = UnderlyingType<Index>;
+ using Base = std::bitset<N>;
- static_assert(sizeof(I) <= sizeof(size_t));
+ static_assert(sizeof(I) <= sizeof(size_t));
- explicit constexpr bitset(const Base& rhs) : Base(rhs) {
- }
+ explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
- public:
- using reference = typename Base::reference;
+ public:
+ using reference = typename Base::reference;
- constexpr bitset() noexcept : Base() {
- }
+ constexpr bitset() noexcept : Base() {}
- constexpr bitset(unsigned long long value) noexcept : Base(value) {
- }
+ // NOLINTNEXTLINE(runtime/explicit)
+ constexpr bitset(uint64_t value) noexcept : Base(value) {}
- constexpr bool operator[](Index i) const {
- return Base::operator[](static_cast<I>(i));
- }
+ constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
- typename Base::reference operator[](Index i) {
- return Base::operator[](static_cast<I>(i));
- }
+ typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
- bool test(Index i) const {
- return Base::test(static_cast<I>(i));
- }
+ bool test(Index i) const { return Base::test(static_cast<I>(i)); }
- using Base::all;
- using Base::any;
- using Base::count;
- using Base::none;
- using Base::size;
+ using Base::all;
+ using Base::any;
+ using Base::count;
+ using Base::none;
+ using Base::size;
- bool operator==(const bitset& other) const noexcept {
- return Base::operator==(static_cast<const Base&>(other));
- }
+ bool operator==(const bitset& other) const noexcept {
+ return Base::operator==(static_cast<const Base&>(other));
+ }
- bool operator!=(const bitset& other) const noexcept {
- return Base::operator!=(static_cast<const Base&>(other));
- }
+ bool operator!=(const bitset& other) const noexcept {
+ return Base::operator!=(static_cast<const Base&>(other));
+ }
- bitset& operator&=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
- }
+ bitset& operator&=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+ }
- bitset& operator|=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
- }
+ bitset& operator|=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+ }
- bitset& operator^=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
- }
+ bitset& operator^=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+ }
- bitset operator~() const noexcept {
- return bitset(*this).flip();
- }
+ bitset operator~() const noexcept { return bitset(*this).flip(); }
- bitset& set() noexcept {
- return static_cast<bitset&>(Base::set());
- }
+ bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
- bitset& set(Index i, bool value = true) {
- return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
- }
+ bitset& set(Index i, bool value = true) {
+ return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+ }
- bitset& reset() noexcept {
- return static_cast<bitset&>(Base::reset());
- }
+ bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
- bitset& reset(Index i) {
- return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
- }
+ bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
- bitset& flip() noexcept {
- return static_cast<bitset&>(Base::flip());
- }
+ bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
- bitset& flip(Index i) {
- return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
- }
+ bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
- using Base::to_string;
- using Base::to_ullong;
- using Base::to_ulong;
+ using Base::to_string;
+ using Base::to_ullong;
+ using Base::to_ulong;
- friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
- }
+ friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+ }
- friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
- }
+ friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+ }
- friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
- }
+ friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+ }
- friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
- return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
- }
+ friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+ return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+ }
- friend struct std::hash<bitset>;
- };
+ friend struct std::hash<bitset>;
+};
} // namespace ityp
@@ -144,16 +125,17 @@ namespace ityp {
template <typename Index, size_t N>
Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
using I = UnderlyingType<Index>;
-#if defined(DAWN_COMPILER_MSVC)
+#if DAWN_COMPILER_IS(MSVC)
if constexpr (N > 32) {
-# if defined(DAWN_PLATFORM_64_BIT)
+#if DAWN_PLATFORM_IS(64_BIT)
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
if (ret == 0) {
return Index(static_cast<I>(0));
}
return Index(static_cast<I>(firstBitIndex + 1));
-# else // defined(DAWN_PLATFORM_64_BIT)
+#else // DAWN_PLATFORM_IS(64_BIT)
if (bitset.none()) {
return Index(static_cast<I>(0));
}
@@ -163,8 +145,9 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
}
}
UNREACHABLE();
-# endif // defined(DAWN_PLATFORM_64_BIT)
+#endif // DAWN_PLATFORM_IS(64_BIT)
} else {
+ // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse(&firstBitIndex, bitset.to_ulong());
if (ret == 0) {
@@ -172,7 +155,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
}
return Index(static_cast<I>(firstBitIndex + 1));
}
-#else // defined(DAWN_COMPILER_MSVC)
+#else // DAWN_COMPILER_IS(MSVC)
if (bitset.none()) {
return Index(static_cast<I>(0));
}
@@ -182,7 +165,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
} else {
return Index(static_cast<I>(32 - static_cast<uint32_t>(__builtin_clz(bitset.to_ulong()))));
}
-#endif // defined(DAWN_COMPILER_MSVC)
+#endif // DAWN_COMPILER_IS(MSVC)
}
#endif // SRC_DAWN_COMMON_ITYP_BITSET_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_span.h b/chromium/third_party/dawn/src/dawn/common/ityp_span.h
index 893982cf56d..4f76b57bd2d 100644
--- a/chromium/third_party/dawn/src/dawn/common/ityp_span.h
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_span.h
@@ -15,88 +15,72 @@
#ifndef SRC_DAWN_COMMON_ITYP_SPAN_H_
#define SRC_DAWN_COMMON_ITYP_SPAN_H_
+#include <type_traits>
+
#include "dawn/common/TypedInteger.h"
#include "dawn/common/UnderlyingType.h"
-#include <type_traits>
-
namespace ityp {
- // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
- // It stores the size and pointer to first element. It has the restriction that
- // indices must be a particular type |Index|. This provides a type-safe way to index
- // raw pointers.
- template <typename Index, typename Value>
- class span {
- using I = UnderlyingType<Index>;
-
- public:
- constexpr span() : mData(nullptr), mSize(0) {
- }
- constexpr span(Value* data, Index size) : mData(data), mSize(size) {
- }
-
- constexpr Value& operator[](Index i) const {
- ASSERT(i < mSize);
- return mData[static_cast<I>(i)];
- }
-
- Value* data() noexcept {
- return mData;
- }
-
- const Value* data() const noexcept {
- return mData;
- }
-
- Value* begin() noexcept {
- return mData;
- }
-
- const Value* begin() const noexcept {
- return mData;
- }
-
- Value* end() noexcept {
- return mData + static_cast<I>(mSize);
- }
-
- const Value* end() const noexcept {
- return mData + static_cast<I>(mSize);
- }
-
- Value& front() {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *mData;
- }
-
- const Value& front() const {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *mData;
- }
-
- Value& back() {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *(mData + static_cast<I>(mSize) - 1);
- }
-
- const Value& back() const {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *(mData + static_cast<I>(mSize) - 1);
- }
-
- Index size() const {
- return mSize;
- }
-
- private:
- Value* mData;
- Index mSize;
- };
+// ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+// It stores the size and pointer to first element. It has the restriction that
+// indices must be a particular type |Index|. This provides a type-safe way to index
+// raw pointers.
+template <typename Index, typename Value>
+class span {
+ using I = UnderlyingType<Index>;
+
+ public:
+ constexpr span() : mData(nullptr), mSize(0) {}
+ constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
+
+ constexpr Value& operator[](Index i) const {
+ ASSERT(i < mSize);
+ return mData[static_cast<I>(i)];
+ }
+
+ Value* data() noexcept { return mData; }
+
+ const Value* data() const noexcept { return mData; }
+
+ Value* begin() noexcept { return mData; }
+
+ const Value* begin() const noexcept { return mData; }
+
+ Value* end() noexcept { return mData + static_cast<I>(mSize); }
+
+ const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
+
+ Value& front() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ const Value& front() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ Value& back() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ const Value& back() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ Index size() const { return mSize; }
+
+ private:
+ Value* mData;
+ Index mSize;
+};
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h b/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h
index f1f260111ce..d35adf6d98b 100644
--- a/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h
@@ -15,88 +15,62 @@
#ifndef SRC_DAWN_COMMON_ITYP_STACK_VEC_H_
#define SRC_DAWN_COMMON_ITYP_STACK_VEC_H_
+#include <limits>
+#include <vector>
+
#include "dawn/common/Assert.h"
#include "dawn/common/StackContainer.h"
#include "dawn/common/UnderlyingType.h"
namespace ityp {
- template <typename Index, typename Value, size_t StaticCapacity>
- class stack_vec : private StackVector<Value, StaticCapacity> {
- using I = UnderlyingType<Index>;
- using Base = StackVector<Value, StaticCapacity>;
- using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
- static_assert(StaticCapacity <= std::numeric_limits<I>::max());
-
- public:
- stack_vec() : Base() {
- }
- explicit stack_vec(Index size) : Base() {
- this->container().resize(static_cast<I>(size));
- }
-
- Value& operator[](Index i) {
- ASSERT(i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- constexpr const Value& operator[](Index i) const {
- ASSERT(i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- void resize(Index size) {
- this->container().resize(static_cast<I>(size));
- }
-
- void reserve(Index size) {
- this->container().reserve(static_cast<I>(size));
- }
-
- Value* data() {
- return this->container().data();
- }
-
- const Value* data() const {
- return this->container().data();
- }
-
- typename VectorBase::iterator begin() noexcept {
- return this->container().begin();
- }
-
- typename VectorBase::const_iterator begin() const noexcept {
- return this->container().begin();
- }
-
- typename VectorBase::iterator end() noexcept {
- return this->container().end();
- }
-
- typename VectorBase::const_iterator end() const noexcept {
- return this->container().end();
- }
-
- typename VectorBase::reference front() {
- return this->container().front();
- }
-
- typename VectorBase::const_reference front() const {
- return this->container().front();
- }
-
- typename VectorBase::reference back() {
- return this->container().back();
- }
-
- typename VectorBase::const_reference back() const {
- return this->container().back();
- }
-
- Index size() const {
- return Index(static_cast<I>(this->container().size()));
- }
- };
+template <typename Index, typename Value, size_t StaticCapacity>
+class stack_vec : private StackVector<Value, StaticCapacity> {
+ using I = UnderlyingType<Index>;
+ using Base = StackVector<Value, StaticCapacity>;
+ using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+ static_assert(StaticCapacity <= std::numeric_limits<I>::max());
+
+ public:
+ stack_vec() : Base() {}
+ explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
+
+ Value& operator[](Index i) {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ void resize(Index size) { this->container().resize(static_cast<I>(size)); }
+
+ void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
+
+ Value* data() { return this->container().data(); }
+
+ const Value* data() const { return this->container().data(); }
+
+ typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
+
+ typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
+
+ typename VectorBase::iterator end() noexcept { return this->container().end(); }
+
+ typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
+
+ typename VectorBase::reference front() { return this->container().front(); }
+
+ typename VectorBase::const_reference front() const { return this->container().front(); }
+
+ typename VectorBase::reference back() { return this->container().back(); }
+
+ typename VectorBase::const_reference back() const { return this->container().back(); }
+
+ Index size() const { return Index(static_cast<I>(this->container().size())); }
+};
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_vector.h b/chromium/third_party/dawn/src/dawn/common/ityp_vector.h
index dc4f4c97bec..3d402cfdbbf 100644
--- a/chromium/third_party/dawn/src/dawn/common/ityp_vector.h
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_vector.h
@@ -15,93 +15,84 @@
#ifndef SRC_DAWN_COMMON_ITYP_VECTOR_H_
#define SRC_DAWN_COMMON_ITYP_VECTOR_H_
-#include "dawn/common/TypedInteger.h"
-#include "dawn/common/UnderlyingType.h"
-
+#include <limits>
#include <type_traits>
#include <vector>
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
namespace ityp {
- // ityp::vector is a helper class that wraps std::vector with the restriction that
- // indices must be a particular type |Index|.
- template <typename Index, typename Value>
- class vector : public std::vector<Value> {
- using I = UnderlyingType<Index>;
- using Base = std::vector<Value>;
-
- private:
- // Disallow access to base constructors and untyped index/size-related operators.
- using Base::Base;
- using Base::operator=;
- using Base::operator[];
- using Base::at;
- using Base::reserve;
- using Base::resize;
- using Base::size;
-
- public:
- vector() : Base() {
- }
-
- explicit vector(Index size) : Base(static_cast<I>(size)) {
- }
-
- vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
- }
-
- vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
- }
-
- vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
- }
-
- vector(std::initializer_list<Value> init) : Base(init) {
- }
-
- vector& operator=(const vector& rhs) {
- Base::operator=(static_cast<const Base&>(rhs));
- return *this;
- }
-
- vector& operator=(vector&& rhs) noexcept {
- Base::operator=(static_cast<Base&&>(rhs));
- return *this;
- }
-
- Value& operator[](Index i) {
- ASSERT(i >= Index(0) && i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- constexpr const Value& operator[](Index i) const {
- ASSERT(i >= Index(0) && i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- Value& at(Index i) {
- ASSERT(i >= Index(0) && i < size());
- return Base::at(static_cast<I>(i));
- }
-
- constexpr const Value& at(Index i) const {
- ASSERT(i >= Index(0) && i < size());
- return Base::at(static_cast<I>(i));
- }
-
- constexpr Index size() const {
- ASSERT(std::numeric_limits<I>::max() >= Base::size());
- return Index(static_cast<I>(Base::size()));
- }
-
- void resize(Index size) {
- Base::resize(static_cast<I>(size));
- }
-
- void reserve(Index size) {
- Base::reserve(static_cast<I>(size));
- }
- };
+// ityp::vector is a helper class that wraps std::vector with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, typename Value>
+class vector : public std::vector<Value> {
+ using I = UnderlyingType<Index>;
+ using Base = std::vector<Value>;
+
+ private:
+ // Disallow access to base constructors and untyped index/size-related operators.
+ using Base::Base;
+ using Base::operator=;
+ using Base::operator[];
+ using Base::at;
+ using Base::reserve;
+ using Base::resize;
+ using Base::size;
+
+ public:
+ vector() : Base() {}
+
+ explicit vector(Index size) : Base(static_cast<I>(size)) {}
+
+ vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
+
+ vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
+
+ vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
+
+ vector(std::initializer_list<Value> init) : Base(init) {}
+
+ vector& operator=(const vector& rhs) {
+ Base::operator=(static_cast<const Base&>(rhs));
+ return *this;
+ }
+
+ vector& operator=(vector&& rhs) noexcept {
+ Base::operator=(static_cast<Base&&>(rhs));
+ return *this;
+ }
+
+ Value& operator[](Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ Value& at(Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr const Value& at(Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr Index size() const {
+ ASSERT(std::numeric_limits<I>::max() >= Base::size());
+ return Index(static_cast<I>(Base::size()));
+ }
+
+ void resize(Index size) { Base::resize(static_cast<I>(size)); }
+
+ void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
+};
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h b/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h
index 097e4cb890a..d38d4ca9a99 100644
--- a/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h
+++ b/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h
@@ -16,17 +16,17 @@
#define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
-# error "vulkan_platform.h included without the Vulkan backend enabled"
+#error "vulkan_platform.h included without the Vulkan backend enabled"
#endif
#if defined(VULKAN_CORE_H_)
-# error "vulkan.h included before vulkan_platform.h"
+#error "vulkan.h included before vulkan_platform.h"
#endif
-#include "dawn/common/Platform.h"
-
#include <cstddef>
#include <cstdint>
+#include "dawn/common/Platform.h"
+
// vulkan.h defines non-dispatchable handles to opaque pointers on 64bit architectures and uint64_t
// on 32bit architectures. This causes a problem in 32bit where the handles cannot be used to
// distinguish between overloads of the same function.
@@ -35,25 +35,25 @@
// redefined to be nullptr). This keeps the type-safety of having the handles be different types
// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
-#if defined(DAWN_PLATFORM_64_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+#if DAWN_PLATFORM_IS(64_BIT)
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
template <typename T>
T NativeNonDispatachableHandleFromU64(uint64_t u64) {
return reinterpret_cast<T>(u64);
}
-#elif defined(DAWN_PLATFORM_32_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+#elif DAWN_PLATFORM_IS(32_BIT)
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
template <typename T>
T NativeNonDispatachableHandleFromU64(uint64_t u64) {
return u64;
}
#else
-# error "Unsupported platform"
+#error "Unsupported platform"
#endif
-// Define a dummy Vulkan handle for use before we include vulkan.h
+// Define a placeholder Vulkan handle for use before we include vulkan.h
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so
@@ -67,127 +67,115 @@ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
namespace dawn::native::vulkan {
- namespace detail {
- template <typename T>
- struct WrapperStruct {
- T member;
- };
-
- template <typename T>
- static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
-
- static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
- static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
-
- // Simple handle types that supports "nullptr_t" as a 0 value.
- template <typename Tag, typename HandleType>
- class alignas(detail::kNativeVkHandleAlignment) VkHandle {
- public:
- // Default constructor and assigning of VK_NULL_HANDLE
- VkHandle() = default;
- VkHandle(std::nullptr_t) {
- }
-
- // Use default copy constructor/assignment
- VkHandle(const VkHandle<Tag, HandleType>& other) = default;
- VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
-
- // Comparisons between handles
- bool operator==(VkHandle<Tag, HandleType> other) const {
- return mHandle == other.mHandle;
- }
- bool operator!=(VkHandle<Tag, HandleType> other) const {
- return mHandle != other.mHandle;
- }
-
- // Comparisons between handles and VK_NULL_HANDLE
- bool operator==(std::nullptr_t) const {
- return mHandle == 0;
- }
- bool operator!=(std::nullptr_t) const {
- return mHandle != 0;
- }
-
- // Implicit conversion to real Vulkan types.
- operator HandleType() const {
- return GetHandle();
- }
-
- HandleType GetHandle() const {
- return mHandle;
- }
-
- HandleType& operator*() {
- return mHandle;
- }
-
- static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
- return VkHandle{handle};
- }
-
- private:
- explicit VkHandle(HandleType handle) : mHandle(handle) {
- }
-
- HandleType mHandle = 0;
- };
- } // namespace detail
-
- static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-
- template <typename Tag, typename HandleType>
- HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
- return reinterpret_cast<HandleType*>(handle);
+namespace detail {
+template <typename T>
+struct WrapperStruct {
+ T member;
+};
+
+template <typename T>
+static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+
+static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+
+// Simple handle types that supports "nullptr_t" as a 0 value.
+template <typename Tag, typename HandleType>
+class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+ public:
+ // Default constructor and assigning of VK_NULL_HANDLE
+ VkHandle() = default;
+ VkHandle(std::nullptr_t) {}
+
+ // Use default copy constructor/assignment
+ VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+ VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+
+ // Comparisons between handles
+ bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
+ bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
+
+ // Comparisons between handles and VK_NULL_HANDLE
+ bool operator==(std::nullptr_t) const { return mHandle == 0; }
+ bool operator!=(std::nullptr_t) const { return mHandle != 0; }
+
+ // Implicit conversion to real Vulkan types.
+ operator HandleType() const { return GetHandle(); }
+
+ HandleType GetHandle() const { return mHandle; }
+
+ HandleType& operator*() { return mHandle; }
+
+ static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+ return VkHandle{handle};
}
+ private:
+ explicit VkHandle(HandleType handle) : mHandle(handle) {}
+
+ HandleType mHandle = 0;
+};
+} // namespace detail
+
+template <typename Tag, typename HandleType>
+HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+ return reinterpret_cast<HandleType*>(handle);
+}
+
} // namespace dawn::native::vulkan
-#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
- DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
- namespace dawn::native::vulkan { \
- using object = detail::VkHandle<struct VkTag##object, ::object>; \
- static_assert(sizeof(object) == sizeof(uint64_t)); \
- static_assert(alignof(object) == detail::kUint64Alignment); \
- static_assert(sizeof(object) == sizeof(::object)); \
- static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
+ namespace dawn::native::vulkan { \
+ using object = detail::VkHandle<struct VkTag##object, ::object>; \
+ static_assert(sizeof(object) == sizeof(uint64_t)); \
+ static_assert(alignof(object) == detail::kUint64Alignment); \
+ static_assert(sizeof(object) == sizeof(::object)); \
+ static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
} // namespace dawn::native::vulkan
// Import additional parts of Vulkan that are supported on our architecture and preemptively include
// headers that vulkan.h includes that we have "undefs" for. Note that some of the VK_USE_PLATFORM_*
// defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
// CMake, hence they cannot be removed at the moment.
-#if defined(DAWN_PLATFORM_WINDOWS)
-# ifndef VK_USE_PLATFORM_WIN32_KHR
-# define VK_USE_PLATFORM_WIN32_KHR
-# endif
-# include "dawn/common/windows_with_undefs.h"
-#endif // DAWN_PLATFORM_WINDOWS
+#if DAWN_PLATFORM_IS(WINDOWS)
+#ifndef VK_USE_PLATFORM_WIN32_KHR
+#define VK_USE_PLATFORM_WIN32_KHR
+#endif
+#include "dawn/common/windows_with_undefs.h"
+#endif // DAWN_PLATFORM_IS(WINDOWS)
#if defined(DAWN_USE_X11)
-# define VK_USE_PLATFORM_XLIB_KHR
-# ifndef VK_USE_PLATFORM_XCB_KHR
-# define VK_USE_PLATFORM_XCB_KHR
-# endif
-# include "dawn/common/xlib_with_undefs.h"
+#define VK_USE_PLATFORM_XLIB_KHR
+#ifndef VK_USE_PLATFORM_XCB_KHR
+#define VK_USE_PLATFORM_XCB_KHR
+#endif
+#include "dawn/common/xlib_with_undefs.h"
#endif // defined(DAWN_USE_X11)
+#if defined(DAWN_USE_WAYLAND)
+#ifndef VK_USE_PLATFORM_WAYLAND_KHR
+#define VK_USE_PLATFORM_WAYLAND_KHR
+#endif
+#endif // defined(DAWN_USE_WAYLAND)
+
#if defined(DAWN_ENABLE_BACKEND_METAL)
-# ifndef VK_USE_PLATFORM_METAL_EXT
-# define VK_USE_PLATFORM_METAL_EXT
-# endif
+#ifndef VK_USE_PLATFORM_METAL_EXT
+#define VK_USE_PLATFORM_METAL_EXT
+#endif
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_PLATFORM_ANDROID)
-# ifndef VK_USE_PLATFORM_ANDROID_KHR
-# define VK_USE_PLATFORM_ANDROID_KHR
-# endif
-#endif // defined(DAWN_PLATFORM_ANDROID)
+#if DAWN_PLATFORM_IS(ANDROID)
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_USE_PLATFORM_ANDROID_KHR
+#endif
+#endif // DAWN_PLATFORM_IS(ANDROID)
-#if defined(DAWN_PLATFORM_FUCHSIA)
-# ifndef VK_USE_PLATFORM_FUCHSIA
-# define VK_USE_PLATFORM_FUCHSIA
-# endif
-#endif // defined(DAWN_PLATFORM_FUCHSIA)
+#if DAWN_PLATFORM_IS(FUCHSIA)
+#ifndef VK_USE_PLATFORM_FUCHSIA
+#define VK_USE_PLATFORM_FUCHSIA
+#endif
+#endif // DAWN_PLATFORM_IS(FUCHSIA)
// The actual inclusion of vulkan.h!
#define VK_NO_PROTOTYPES
@@ -195,12 +183,6 @@ namespace dawn::native::vulkan {
// Redefine VK_NULL_HANDLE for better type safety where possible.
#undef VK_NULL_HANDLE
-#if defined(DAWN_PLATFORM_64_BIT)
static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-#elif defined(DAWN_PLATFORM_32_BIT)
-static constexpr uint64_t VK_NULL_HANDLE = 0;
-#else
-# error "Unsupported platform"
-#endif
#endif // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h b/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h
index 337ed605abf..858234fa0b3 100644
--- a/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h
+++ b/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h
@@ -17,8 +17,8 @@
#include "dawn/common/Platform.h"
-#if !defined(DAWN_PLATFORM_WINDOWS)
-# error "windows_with_undefs.h included on non-Windows"
+#if !DAWN_PLATFORM_IS(WINDOWS)
+#error "windows_with_undefs.h included on non-Windows"
#endif
// This header includes <windows.h> but removes all the extra defines that conflict with identifiers
diff --git a/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h b/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h
index 8073aa2c940..48355134635 100644
--- a/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h
+++ b/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h
@@ -17,8 +17,8 @@
#include "dawn/common/Platform.h"
-#if !defined(DAWN_PLATFORM_LINUX)
-# error "xlib_with_undefs.h included on non-Linux"
+#if !DAWN_PLATFORM_IS(LINUX)
+#error "xlib_with_undefs.h included on non-Linux"
#endif
// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
diff --git a/chromium/third_party/dawn/src/dawn/native/Adapter.cpp b/chromium/third_party/dawn/src/dawn/native/Adapter.cpp
index 4c000ac1bfe..53580aa6e64 100644
--- a/chromium/third_party/dawn/src/dawn/native/Adapter.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Adapter.cpp
@@ -14,214 +14,222 @@
#include "dawn/native/Adapter.h"
+#include <algorithm>
+#include <memory>
+
#include "dawn/common/Constants.h"
+#include "dawn/common/GPUInfo.h"
#include "dawn/native/Device.h"
#include "dawn/native/Instance.h"
#include "dawn/native/ValidationUtils_autogen.h"
namespace dawn::native {
- AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
- : mInstance(instance), mBackend(backend) {
- mSupportedFeatures.EnableFeature(Feature::DawnNative);
- mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
- }
-
- MaybeError AdapterBase::Initialize() {
- DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
- DAWN_TRY_CONTEXT(
- InitializeSupportedFeaturesImpl(),
- "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
- "backend=%s type=%s)",
- mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
- DAWN_TRY_CONTEXT(
- InitializeSupportedLimitsImpl(&mLimits),
- "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
- "backend=%s type=%s)",
- mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
-
- // Enforce internal Dawn constants.
- mLimits.v1.maxVertexBufferArrayStride =
- std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
- mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
- mLimits.v1.maxVertexAttributes =
- std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
- mLimits.v1.maxVertexBuffers =
- std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
- mLimits.v1.maxInterStageShaderComponents =
- std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
- mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
- mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
- mLimits.v1.maxSamplersPerShaderStage =
- std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
- mLimits.v1.maxStorageBuffersPerShaderStage =
- std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
- mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
- mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
- mLimits.v1.maxUniformBuffersPerShaderStage =
- std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
- mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
- std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
- kMaxDynamicUniformBuffersPerPipelineLayout);
- mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
- std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
- kMaxDynamicStorageBuffersPerPipelineLayout);
-
- return {};
- }
-
- bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
- return GetLimits(limits);
- }
-
- void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
- properties->vendorID = mVendorId;
- properties->deviceID = mDeviceId;
- properties->name = mName.c_str();
- properties->driverDescription = mDriverDescription.c_str();
- properties->adapterType = mAdapterType;
- properties->backendType = mBackend;
- }
-
- bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
- return mSupportedFeatures.IsEnabled(feature);
- }
-
- size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
- return mSupportedFeatures.EnumerateFeatures(features);
- }
-
- DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
- DeviceDescriptor defaultDesc = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDesc;
- }
- auto result = CreateDeviceInternal(descriptor);
- if (result.IsError()) {
- mInstance->ConsumedError(result.AcquireError());
- return nullptr;
- }
- return result.AcquireSuccess().Detach();
- }
-
- void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- static constexpr DeviceDescriptor kDefaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &kDefaultDescriptor;
- }
- auto result = CreateDeviceInternal(descriptor);
-
- if (result.IsError()) {
- std::unique_ptr<ErrorData> errorData = result.AcquireError();
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPURequestDeviceStatus_Error, nullptr,
- errorData->GetFormattedMessage().c_str(), userdata);
- return;
- }
-
- Ref<DeviceBase> device = result.AcquireSuccess();
-
- WGPURequestDeviceStatus status =
- device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
+ : mInstance(instance), mBackend(backend) {
+ mSupportedFeatures.EnableFeature(Feature::DawnNative);
+ mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+}
+
+MaybeError AdapterBase::Initialize() {
+ DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
+ DAWN_TRY_CONTEXT(
+ InitializeSupportedFeaturesImpl(),
+ "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+ "backend=%s type=%s)",
+ mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+ DAWN_TRY_CONTEXT(
+ InitializeSupportedLimitsImpl(&mLimits),
+ "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+ "backend=%s type=%s)",
+ mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+
+ mVendorName = gpu_info::GetVendorName(mVendorId);
+ mArchitectureName = gpu_info::GetArchitectureName(mVendorId, mDeviceId);
+
+ // Enforce internal Dawn constants.
+ mLimits.v1.maxVertexBufferArrayStride =
+ std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
+ mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
+ mLimits.v1.maxVertexAttributes =
+ std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
+ mLimits.v1.maxVertexBuffers =
+ std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
+ mLimits.v1.maxInterStageShaderComponents =
+ std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
+ mLimits.v1.maxSampledTexturesPerShaderStage =
+ std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
+ mLimits.v1.maxSamplersPerShaderStage =
+ std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
+ mLimits.v1.maxStorageBuffersPerShaderStage =
+ std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
+ mLimits.v1.maxStorageTexturesPerShaderStage =
+ std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
+ mLimits.v1.maxUniformBuffersPerShaderStage =
+ std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
+ mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
+ std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
+ kMaxDynamicUniformBuffersPerPipelineLayout);
+ mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
+ std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
+ kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ return {};
+}
+
+bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
+ return GetLimits(limits);
+}
+
+void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
+ properties->vendorID = mVendorId;
+ properties->vendorName = mVendorName.c_str();
+ properties->architecture = mArchitectureName.c_str();
+ properties->deviceID = mDeviceId;
+ properties->name = mName.c_str();
+ properties->driverDescription = mDriverDescription.c_str();
+ properties->adapterType = mAdapterType;
+ properties->backendType = mBackend;
+}
+
+bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
+ return mSupportedFeatures.IsEnabled(feature);
+}
+
+size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+ return mSupportedFeatures.EnumerateFeatures(features);
+}
+
+DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
+ DeviceDescriptor defaultDesc = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDesc;
+ }
+ auto result = CreateDeviceInternal(descriptor);
+ if (result.IsError()) {
+ mInstance->ConsumedError(result.AcquireError());
+ return nullptr;
+ }
+ return result.AcquireSuccess().Detach();
+}
+
+void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ static constexpr DeviceDescriptor kDefaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &kDefaultDescriptor;
+ }
+ auto result = CreateDeviceInternal(descriptor);
+
+ if (result.IsError()) {
+ std::unique_ptr<ErrorData> errorData = result.AcquireError();
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(status, ToAPI(device.Detach()), nullptr, userdata);
+ callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
+ userdata);
+ return;
}
- uint32_t AdapterBase::GetVendorId() const {
- return mVendorId;
- }
+ Ref<DeviceBase> device = result.AcquireSuccess();
- uint32_t AdapterBase::GetDeviceId() const {
- return mDeviceId;
- }
+ WGPURequestDeviceStatus status =
+ device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(status, ToAPI(device.Detach()), nullptr, userdata);
+}
- wgpu::BackendType AdapterBase::GetBackendType() const {
- return mBackend;
- }
+uint32_t AdapterBase::GetVendorId() const {
+ return mVendorId;
+}
- InstanceBase* AdapterBase::GetInstance() const {
- return mInstance;
- }
+uint32_t AdapterBase::GetDeviceId() const {
+ return mDeviceId;
+}
- FeaturesSet AdapterBase::GetSupportedFeatures() const {
- return mSupportedFeatures;
- }
+wgpu::BackendType AdapterBase::GetBackendType() const {
+ return mBackend;
+}
- bool AdapterBase::SupportsAllRequiredFeatures(
- const ityp::span<size_t, const wgpu::FeatureName>& features) const {
- for (wgpu::FeatureName f : features) {
- if (!mSupportedFeatures.IsEnabled(f)) {
- return false;
- }
- }
- return true;
- }
+InstanceBase* AdapterBase::GetInstance() const {
+ return mInstance;
+}
- WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
- WGPUDeviceProperties adapterProperties = {};
- adapterProperties.deviceID = mDeviceId;
- adapterProperties.vendorID = mVendorId;
- adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
-
- mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
- // This is OK for now because there are no limit feature structs.
- // If we add additional structs, the caller will need to provide memory
- // to store them (ex. by calling GetLimits directly instead). Currently,
- // we keep this function as it's only used internally in Chromium to
- // send the adapter properties across the wire.
- GetLimits(FromAPI(&adapterProperties.limits));
- return adapterProperties;
- }
+FeaturesSet AdapterBase::GetSupportedFeatures() const {
+ return mSupportedFeatures;
+}
- bool AdapterBase::GetLimits(SupportedLimits* limits) const {
- ASSERT(limits != nullptr);
- if (limits->nextInChain != nullptr) {
+bool AdapterBase::SupportsAllRequiredFeatures(
+ const ityp::span<size_t, const wgpu::FeatureName>& features) const {
+ for (wgpu::FeatureName f : features) {
+ if (!mSupportedFeatures.IsEnabled(f)) {
return false;
}
- if (mUseTieredLimits) {
- limits->limits = ApplyLimitTiers(mLimits.v1);
- } else {
- limits->limits = mLimits.v1;
- }
- return true;
- }
-
- ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
- const DeviceDescriptor* descriptor) {
- ASSERT(descriptor != nullptr);
-
- for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
- wgpu::FeatureName f = descriptor->requiredFeatures[i];
- DAWN_TRY(ValidateFeatureName(f));
- DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
- "Requested feature %s is not supported.", f);
- }
-
- if (descriptor->requiredLimits != nullptr) {
- DAWN_TRY_CONTEXT(
- ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
- descriptor->requiredLimits->limits),
- "validating required limits");
-
- DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
- "nextInChain is not nullptr.");
- }
- return CreateDeviceImpl(descriptor);
- }
-
- void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
- mUseTieredLimits = useTieredLimits;
- }
-
- void AdapterBase::ResetInternalDeviceForTesting() {
- mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
- }
-
- MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
- return DAWN_INTERNAL_ERROR(
- "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
}
+ return true;
+}
+
+WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+ WGPUDeviceProperties adapterProperties = {};
+ adapterProperties.deviceID = mDeviceId;
+ adapterProperties.vendorID = mVendorId;
+ adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+
+ mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+ // This is OK for now because there are no limit feature structs.
+ // If we add additional structs, the caller will need to provide memory
+ // to store them (ex. by calling GetLimits directly instead). Currently,
+ // we keep this function as it's only used internally in Chromium to
+ // send the adapter properties across the wire.
+ GetLimits(FromAPI(&adapterProperties.limits));
+ return adapterProperties;
+}
+
+bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ if (mUseTieredLimits) {
+ limits->limits = ApplyLimitTiers(mLimits.v1);
+ } else {
+ limits->limits = mLimits.v1;
+ }
+ return true;
+}
+
+ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
+ const DeviceDescriptor* descriptor) {
+ ASSERT(descriptor != nullptr);
+
+ for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
+ wgpu::FeatureName f = descriptor->requiredFeatures[i];
+ DAWN_TRY(ValidateFeatureName(f));
+ DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
+ f);
+ }
+
+ if (descriptor->requiredLimits != nullptr) {
+ DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+ descriptor->requiredLimits->limits),
+ "validating required limits");
+
+ DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+ "nextInChain is not nullptr.");
+ }
+ return CreateDeviceImpl(descriptor);
+}
+
+void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+ mUseTieredLimits = useTieredLimits;
+}
+
+void AdapterBase::ResetInternalDeviceForTesting() {
+ mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
+}
+
+MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
+ return DAWN_INTERNAL_ERROR(
+ "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Adapter.h b/chromium/third_party/dawn/src/dawn/native/Adapter.h
index 99b68e73186..a02e77c253e 100644
--- a/chromium/third_party/dawn/src/dawn/native/Adapter.h
+++ b/chromium/third_party/dawn/src/dawn/native/Adapter.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_ADAPTER_H_
#define SRC_DAWN_NATIVE_ADAPTER_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/common/RefCounted.h"
@@ -24,75 +26,74 @@
#include "dawn/native/Limits.h"
#include "dawn/native/dawn_platform.h"
-#include <string>
-
namespace dawn::native {
- class DeviceBase;
+class DeviceBase;
- class AdapterBase : public RefCounted {
- public:
- AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
- virtual ~AdapterBase() = default;
+class AdapterBase : public RefCounted {
+ public:
+ AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
+ ~AdapterBase() override = default;
- MaybeError Initialize();
+ MaybeError Initialize();
- // WebGPU API
- bool APIGetLimits(SupportedLimits* limits) const;
- void APIGetProperties(AdapterProperties* properties) const;
- bool APIHasFeature(wgpu::FeatureName feature) const;
- size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
- void APIRequestDevice(const DeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
- DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
+ // WebGPU API
+ bool APIGetLimits(SupportedLimits* limits) const;
+ void APIGetProperties(AdapterProperties* properties) const;
+ bool APIHasFeature(wgpu::FeatureName feature) const;
+ size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+ void APIRequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+ DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
- uint32_t GetVendorId() const;
- uint32_t GetDeviceId() const;
- wgpu::BackendType GetBackendType() const;
- InstanceBase* GetInstance() const;
+ uint32_t GetVendorId() const;
+ uint32_t GetDeviceId() const;
+ wgpu::BackendType GetBackendType() const;
+ InstanceBase* GetInstance() const;
- void ResetInternalDeviceForTesting();
+ void ResetInternalDeviceForTesting();
- FeaturesSet GetSupportedFeatures() const;
- bool SupportsAllRequiredFeatures(
- const ityp::span<size_t, const wgpu::FeatureName>& features) const;
- WGPUDeviceProperties GetAdapterProperties() const;
+ FeaturesSet GetSupportedFeatures() const;
+ bool SupportsAllRequiredFeatures(
+ const ityp::span<size_t, const wgpu::FeatureName>& features) const;
+ WGPUDeviceProperties GetAdapterProperties() const;
- bool GetLimits(SupportedLimits* limits) const;
+ bool GetLimits(SupportedLimits* limits) const;
- void SetUseTieredLimits(bool useTieredLimits);
+ void SetUseTieredLimits(bool useTieredLimits);
- virtual bool SupportsExternalImages() const = 0;
+ virtual bool SupportsExternalImages() const = 0;
- protected:
- uint32_t mVendorId = 0xFFFFFFFF;
- uint32_t mDeviceId = 0xFFFFFFFF;
- std::string mName;
- wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
- std::string mDriverDescription;
- FeaturesSet mSupportedFeatures;
+ protected:
+ uint32_t mVendorId = 0xFFFFFFFF;
+ std::string mVendorName;
+ std::string mArchitectureName;
+ uint32_t mDeviceId = 0xFFFFFFFF;
+ std::string mName;
+ wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
+ std::string mDriverDescription;
+ FeaturesSet mSupportedFeatures;
- private:
- virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) = 0;
+ private:
+ virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
- virtual MaybeError InitializeImpl() = 0;
+ virtual MaybeError InitializeImpl() = 0;
- // Check base WebGPU features and discover supported featurees.
- virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
+ // Check base WebGPU features and discover supported featurees.
+ virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
- // Check base WebGPU limits and populate supported limits.
- virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
+ // Check base WebGPU limits and populate supported limits.
+ virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
- ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
+ ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
- virtual MaybeError ResetInternalDeviceForTestingImpl();
- InstanceBase* mInstance = nullptr;
- wgpu::BackendType mBackend;
- CombinedLimits mLimits;
- bool mUseTieredLimits = false;
- };
+ virtual MaybeError ResetInternalDeviceForTestingImpl();
+ InstanceBase* mInstance = nullptr;
+ wgpu::BackendType mBackend;
+ CombinedLimits mLimits;
+ bool mUseTieredLimits = false;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp b/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp
index 2f764ef556f..2f0facef3ed 100644
--- a/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp
@@ -14,66 +14,71 @@
#include "dawn/native/AsyncTask.h"
+#include <utility>
+
#include "dawn/platform/DawnPlatform.h"
namespace dawn::native {
- AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
- : mWorkerTaskPool(workerTaskPool) {
- }
+AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
+ : mWorkerTaskPool(workerTaskPool) {}
- void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
- // If these allocations becomes expensive, we can slab-allocate tasks.
- Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
- waitableTask->taskManager = this;
- waitableTask->asyncTask = std::move(asyncTask);
-
- {
- // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
- // and we may remove waitableTask objects from mPendingTasks in either main thread
- // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
- // protected by a mutex.
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- mPendingTasks.emplace(waitableTask.Get(), waitableTask);
- }
-
- // Ref the task since it is accessed inside the worker function.
- // The worker function will acquire and release the task upon completion.
- waitableTask->Reference();
- waitableTask->waitableEvent =
- mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
- }
+void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
+ // If these allocations becomes expensive, we can slab-allocate tasks.
+ Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
+ waitableTask->taskManager = this;
+ waitableTask->asyncTask = std::move(asyncTask);
- void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+ {
+ // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
+ // and we may remove waitableTask objects from mPendingTasks in either main thread
+ // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
+ // protected by a mutex.
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- auto iter = mPendingTasks.find(task);
- if (iter != mPendingTasks.end()) {
- mPendingTasks.erase(iter);
- }
+ mPendingTasks.emplace(waitableTask.Get(), waitableTask);
}
- void AsyncTaskManager::WaitAllPendingTasks() {
- std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+ // Ref the task since it is accessed inside the worker function.
+ // The worker function will acquire and release the task upon completion.
+ waitableTask->Reference();
+ waitableTask->waitableEvent =
+ mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
+}
- {
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- allPendingTasks.swap(mPendingTasks);
- }
-
- for (auto& [_, task] : allPendingTasks) {
- task->waitableEvent->Wait();
- }
+void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ auto iter = mPendingTasks.find(task);
+ if (iter != mPendingTasks.end()) {
+ mPendingTasks.erase(iter);
}
+}
+
+void AsyncTaskManager::WaitAllPendingTasks() {
+ std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
- bool AsyncTaskManager::HasPendingTasks() {
+ {
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- return !mPendingTasks.empty();
+ allPendingTasks.swap(mPendingTasks);
}
- void AsyncTaskManager::DoWaitableTask(void* task) {
- Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
- waitableTask->asyncTask();
- waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+ for (auto& [_, task] : allPendingTasks) {
+ task->waitableEvent->Wait();
}
+}
+
+bool AsyncTaskManager::HasPendingTasks() {
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ return !mPendingTasks.empty();
+}
+
+void AsyncTaskManager::DoWaitableTask(void* task) {
+ Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
+ waitableTask->asyncTask();
+ waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+}
+
+AsyncTaskManager::WaitableTask::WaitableTask() = default;
+
+AsyncTaskManager::WaitableTask::~WaitableTask() = default;
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/AsyncTask.h b/chromium/third_party/dawn/src/dawn/native/AsyncTask.h
index b71c80e96b2..78fc8af1f3b 100644
--- a/chromium/third_party/dawn/src/dawn/native/AsyncTask.h
+++ b/chromium/third_party/dawn/src/dawn/native/AsyncTask.h
@@ -23,43 +23,46 @@
#include "dawn/common/RefCounted.h"
namespace dawn::platform {
- class WaitableEvent;
- class WorkerTaskPool;
+class WaitableEvent;
+class WorkerTaskPool;
} // namespace dawn::platform
namespace dawn::native {
- // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
- // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
- // shutting down the device. RunNow() could be used for more advanced scenarios, for example
- // always doing ShaderModule initial compilation asynchronously, but being able to steal the
- // task if we need it for synchronous pipeline compilation.
- using AsyncTask = std::function<void()>;
+// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
+// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
+// shutting down the device. RunNow() could be used for more advanced scenarios, for example
+// always doing ShaderModule initial compilation asynchronously, but being able to steal the
+// task if we need it for synchronous pipeline compilation.
+using AsyncTask = std::function<void()>;
- class AsyncTaskManager {
- public:
- explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
-
- void PostTask(AsyncTask asyncTask);
- void WaitAllPendingTasks();
- bool HasPendingTasks();
+class AsyncTaskManager {
+ public:
+ explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
- private:
- class WaitableTask : public RefCounted {
- public:
- AsyncTask asyncTask;
- AsyncTaskManager* taskManager;
- std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
- };
+ void PostTask(AsyncTask asyncTask);
+ void WaitAllPendingTasks();
+ bool HasPendingTasks();
- static void DoWaitableTask(void* task);
- void HandleTaskCompletion(WaitableTask* task);
+ private:
+ class WaitableTask : public RefCounted {
+ public:
+ WaitableTask();
+ ~WaitableTask() override;
- std::mutex mPendingTasksMutex;
- std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
- dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+ AsyncTask asyncTask;
+ AsyncTaskManager* taskManager;
+ std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
};
+ static void DoWaitableTask(void* task);
+ void HandleTaskCompletion(WaitableTask* task);
+
+ std::mutex mPendingTasksMutex;
+ std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
+ dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_ASYNCTASK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp
index 1e38d9d24e5..bbb8ecd9e2a 100644
--- a/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp
@@ -21,155 +21,148 @@
namespace dawn::native {
- AttachmentStateBlueprint::AttachmentStateBlueprint(
- const RenderBundleEncoderDescriptor* descriptor)
- : mSampleCount(descriptor->sampleCount) {
- ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
+ : mSampleCount(descriptor->sampleCount) {
+ ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
+ wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+ if (format != wgpu::TextureFormat::Undefined) {
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = format;
+ }
+ }
+ mDepthStencilFormat = descriptor->depthStencilFormat;
+}
+
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
+ : mSampleCount(descriptor->multisample.count) {
+ if (descriptor->fragment != nullptr) {
+ ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
- wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+ ++i) {
+ wgpu::TextureFormat format =
+ descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
if (format != wgpu::TextureFormat::Undefined) {
mColorAttachmentsSet.set(i);
mColorFormats[i] = format;
}
}
- mDepthStencilFormat = descriptor->depthStencilFormat;
}
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
- : mSampleCount(descriptor->multisample.count) {
- if (descriptor->fragment != nullptr) {
- ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
- ++i) {
- wgpu::TextureFormat format =
- descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
- if (format != wgpu::TextureFormat::Undefined) {
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = format;
- }
- }
- }
- if (descriptor->depthStencil != nullptr) {
- mDepthStencilFormat = descriptor->depthStencil->format;
- }
+ if (descriptor->depthStencil != nullptr) {
+ mDepthStencilFormat = descriptor->depthStencil->format;
}
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
- ++i) {
- TextureViewBase* attachment =
- descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
- if (attachment == nullptr) {
- continue;
- }
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = attachment->GetFormat().format;
- if (mSampleCount == 0) {
- mSampleCount = attachment->GetTexture()->GetSampleCount();
- } else {
- ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
- }
+}
+
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
+ TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+ if (attachment == nullptr) {
+ continue;
}
- if (descriptor->depthStencilAttachment != nullptr) {
- TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
- mDepthStencilFormat = attachment->GetFormat().format;
- if (mSampleCount == 0) {
- mSampleCount = attachment->GetTexture()->GetSampleCount();
- } else {
- ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
- }
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = attachment->GetFormat().format;
+ if (mSampleCount == 0) {
+ mSampleCount = attachment->GetTexture()->GetSampleCount();
+ } else {
+ ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
}
- ASSERT(mSampleCount > 0);
}
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
- default;
-
- size_t AttachmentStateBlueprint::HashFunc::operator()(
- const AttachmentStateBlueprint* attachmentState) const {
- size_t hash = 0;
-
- // Hash color formats
- HashCombine(&hash, attachmentState->mColorAttachmentsSet);
- for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
- HashCombine(&hash, attachmentState->mColorFormats[i]);
+ if (descriptor->depthStencilAttachment != nullptr) {
+ TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+ mDepthStencilFormat = attachment->GetFormat().format;
+ if (mSampleCount == 0) {
+ mSampleCount = attachment->GetTexture()->GetSampleCount();
+ } else {
+ ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
}
+ }
+ ASSERT(mSampleCount > 0);
+}
- // Hash depth stencil attachment
- HashCombine(&hash, attachmentState->mDepthStencilFormat);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
- // Hash sample count
- HashCombine(&hash, attachmentState->mSampleCount);
+size_t AttachmentStateBlueprint::HashFunc::operator()(
+ const AttachmentStateBlueprint* attachmentState) const {
+ size_t hash = 0;
- return hash;
+ // Hash color formats
+ HashCombine(&hash, attachmentState->mColorAttachmentsSet);
+ for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
+ HashCombine(&hash, attachmentState->mColorFormats[i]);
}
- bool AttachmentStateBlueprint::EqualityFunc::operator()(
- const AttachmentStateBlueprint* a,
- const AttachmentStateBlueprint* b) const {
- // Check set attachments
- if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
- return false;
- }
+ // Hash depth stencil attachment
+ HashCombine(&hash, attachmentState->mDepthStencilFormat);
- // Check color formats
- for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
- if (a->mColorFormats[i] != b->mColorFormats[i]) {
- return false;
- }
- }
+ // Hash sample count
+ HashCombine(&hash, attachmentState->mSampleCount);
- // Check depth stencil format
- if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
- return false;
- }
+ return hash;
+}
- // Check sample count
- if (a->mSampleCount != b->mSampleCount) {
+bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
+ const AttachmentStateBlueprint* b) const {
+ // Check set attachments
+ if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+ return false;
+ }
+
+ // Check color formats
+ for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
+ if (a->mColorFormats[i] != b->mColorFormats[i]) {
return false;
}
-
- return true;
}
- AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
- : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
+ // Check depth stencil format
+ if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
+ return false;
}
- AttachmentState::~AttachmentState() {
- GetDevice()->UncacheAttachmentState(this);
+ // Check sample count
+ if (a->mSampleCount != b->mSampleCount) {
+ return false;
}
- size_t AttachmentState::ComputeContentHash() {
- // TODO(dawn:549): skip this traversal and reuse the blueprint.
- return AttachmentStateBlueprint::HashFunc()(this);
- }
+ return true;
+}
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
- AttachmentState::GetColorAttachmentsMask() const {
- return mColorAttachmentsSet;
- }
+AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
+ : AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
- wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
- ColorAttachmentIndex index) const {
- ASSERT(mColorAttachmentsSet[index]);
- return mColorFormats[index];
- }
+AttachmentState::~AttachmentState() {
+ GetDevice()->UncacheAttachmentState(this);
+}
- bool AttachmentState::HasDepthStencilAttachment() const {
- return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
- }
+size_t AttachmentState::ComputeContentHash() {
+ // TODO(dawn:549): skip this traversal and reuse the blueprint.
+ return AttachmentStateBlueprint::HashFunc()(this);
+}
- wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
- ASSERT(HasDepthStencilAttachment());
- return mDepthStencilFormat;
- }
+ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
+ const {
+ return mColorAttachmentsSet;
+}
- uint32_t AttachmentState::GetSampleCount() const {
- return mSampleCount;
- }
+wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
+ ASSERT(mColorAttachmentsSet[index]);
+ return mColorFormats[index];
+}
+
+bool AttachmentState::HasDepthStencilAttachment() const {
+ return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
+}
+
+wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+ ASSERT(HasDepthStencilAttachment());
+ return mDepthStencilFormat;
+}
+
+uint32_t AttachmentState::GetSampleCount() const {
+ return mSampleCount;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/AttachmentState.h b/chromium/third_party/dawn/src/dawn/native/AttachmentState.h
index 63c5defb17b..815ce29cf61 100644
--- a/chromium/third_party/dawn/src/dawn/native/AttachmentState.h
+++ b/chromium/third_party/dawn/src/dawn/native/AttachmentState.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_ATTACHMENTSTATE_H_
#define SRC_DAWN_NATIVE_ATTACHMENTSTATE_H_
+#include <array>
+#include <bitset>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
@@ -24,59 +27,55 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- class DeviceBase;
-
- // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
- // can be constructed by copying the blueprint state instead of traversing descriptors.
- // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
- class AttachmentStateBlueprint {
- public:
- // Note: Descriptors must be validated before the AttachmentState is constructed.
- explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
- explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
- explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
-
- AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
-
- // Functors necessary for the unordered_set<AttachmentState*>-based cache.
- struct HashFunc {
- size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
- };
- struct EqualityFunc {
- bool operator()(const AttachmentStateBlueprint* a,
- const AttachmentStateBlueprint* b) const;
- };
-
- protected:
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
- ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
- // Default (texture format Undefined) indicates there is no depth stencil attachment.
- wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
- uint32_t mSampleCount = 0;
- };
-
- class AttachmentState final : public AttachmentStateBlueprint,
- public ObjectBase,
- public CachedObject {
- public:
- AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+class DeviceBase;
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
- wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
- bool HasDepthStencilAttachment() const;
- wgpu::TextureFormat GetDepthStencilFormat() const;
- uint32_t GetSampleCount() const;
+// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
+// can be constructed by copying the blueprint state instead of traversing descriptors.
+// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
+class AttachmentStateBlueprint {
+ public:
+ // Note: Descriptors must be validated before the AttachmentState is constructed.
+ explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
+ explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+ explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
- size_t ComputeContentHash() override;
+ AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
- private:
- ~AttachmentState() override;
+ // Functors necessary for the unordered_set<AttachmentState*>-based cache.
+ struct HashFunc {
+ size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
};
+ struct EqualityFunc {
+ bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
+ };
+
+ protected:
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
+ ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
+ // Default (texture format Undefined) indicates there is no depth stencil attachment.
+ wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
+ uint32_t mSampleCount = 0;
+};
+
+class AttachmentState final : public AttachmentStateBlueprint,
+ public ObjectBase,
+ public CachedObject {
+ public:
+ AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+ wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
+ bool HasDepthStencilAttachment() const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
+ uint32_t GetSampleCount() const;
+
+ size_t ComputeContentHash() override;
+
+ private:
+ ~AttachmentState() override;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BUILD.gn b/chromium/third_party/dawn/src/dawn/native/BUILD.gn
index 7fa3fb417ab..b9cef200014 100644
--- a/chromium/third_party/dawn/src/dawn/native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/native/BUILD.gn
@@ -19,15 +19,6 @@ import("${dawn_root}/generator/dawn_generator.gni")
import("${dawn_root}/scripts/dawn_component.gni")
import("${dawn_root}/scripts/dawn_features.gni")
-# Import mac_deployment_target
-if (is_mac) {
- if (dawn_has_build) {
- import("//build/config/mac/mac_sdk.gni")
- } else {
- mac_deployment_target = "10.11.0"
- }
-}
-
# The VVLs are an optional dependency, only use it if the path has been set.
enable_vulkan_validation_layers = dawn_enable_vulkan_validation_layers &&
dawn_vulkan_validation_layers_dir != ""
@@ -60,26 +51,15 @@ group("abseil") {
if (build_with_chromium) {
public_deps = [ "$dawn_abseil_dir:absl" ]
} else {
- public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:str_format" ]
+ public_deps = [
+ "${dawn_root}/third_party/gn/abseil-cpp:str_format",
+ "${dawn_root}/third_party/gn/abseil-cpp:strings",
+ ]
}
}
config("internal") {
configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
-
- # Suppress warnings that Metal isn't in the deployment target of Chrome:
- # initialization of the Metal backend is behind a IsMetalSupported check so
- # Dawn won't call Metal functions on macOS 10.10.
- # At the time this is written Chromium supports 10.10.0 and above, so if we
- # aren't on 10.11 it means we are on 10.11 and above, and Metal is available.
- # Skipping this check on 10.11 and above is important as it allows getting
- # proper compilation warning when using 10.12 and above feature for example.
- # TODO(crbug.com/1004024): Consider using API_AVAILABLE annotations on all
- # metal code in dawn once crbug.com/1004024 is sorted out if Chromium still
- # supports 10.10 then.
- if (is_mac && mac_deployment_target == "10.10.0") {
- cflags_objcc = [ "-Wno-unguarded-availability" ]
- }
}
config("weak_framework") {
@@ -177,6 +157,12 @@ source_set("sources") {
configs += [ ":internal" ]
+ # Enable -Wglobal-constructors here only, instead of in internal_config,
+ # because gtest and some other targets don't build with it.
+ if (is_clang) {
+ cflags = [ "-Wglobal-constructors" ]
+ }
+
# Dependencies that are needed to compile dawn native entry points in
# FooBackend.cpp need to be public deps so they are propagated to the
# dawn native target
@@ -202,6 +188,10 @@ source_set("sources") {
"BindGroupTracker.h",
"BindingInfo.cpp",
"BindingInfo.h",
+ "Blob.cpp",
+ "Blob.h",
+ "BlobCache.cpp",
+ "BlobCache.h",
"BuddyAllocator.cpp",
"BuddyAllocator.h",
"BuddyMemoryAllocator.cpp",
@@ -274,15 +264,16 @@ source_set("sources") {
"ObjectBase.h",
"ObjectContentHasher.cpp",
"ObjectContentHasher.h",
+ "PassResourceUsage.cpp",
"PassResourceUsage.h",
"PassResourceUsageTracker.cpp",
"PassResourceUsageTracker.h",
"PerStage.cpp",
"PerStage.h",
- "PersistentCache.cpp",
- "PersistentCache.h",
"Pipeline.cpp",
"Pipeline.h",
+ "PipelineCache.cpp",
+ "PipelineCache.h",
"PipelineLayout.cpp",
"PipelineLayout.h",
"PooledResourceMemoryAllocator.cpp",
@@ -295,6 +286,8 @@ source_set("sources") {
"QuerySet.h",
"Queue.cpp",
"Queue.h",
+ "RefCountedWithExternalCount.cpp",
+ "RefCountedWithExternalCount.h",
"RenderBundle.cpp",
"RenderBundle.h",
"RenderBundleEncoder.cpp",
@@ -383,10 +376,13 @@ source_set("sources") {
"d3d12/BindGroupD3D12.h",
"d3d12/BindGroupLayoutD3D12.cpp",
"d3d12/BindGroupLayoutD3D12.h",
+ "d3d12/BlobD3D12.cpp",
+ "d3d12/BlobD3D12.h",
"d3d12/BufferD3D12.cpp",
"d3d12/BufferD3D12.h",
"d3d12/CPUDescriptorHeapAllocationD3D12.cpp",
"d3d12/CPUDescriptorHeapAllocationD3D12.h",
+ "d3d12/CacheKeyD3D12.cpp",
"d3d12/CommandAllocatorManager.cpp",
"d3d12/CommandAllocatorManager.h",
"d3d12/CommandBufferD3D12.cpp",
@@ -403,6 +399,8 @@ source_set("sources") {
"d3d12/D3D12Info.h",
"d3d12/DeviceD3D12.cpp",
"d3d12/DeviceD3D12.h",
+ "d3d12/ExternalImageDXGIImpl.cpp",
+ "d3d12/ExternalImageDXGIImpl.h",
"d3d12/Forward.h",
"d3d12/GPUDescriptorHeapAllocationD3D12.cpp",
"d3d12/GPUDescriptorHeapAllocationD3D12.h",
@@ -527,6 +525,8 @@ source_set("sources") {
]
sources += get_target_outputs(":opengl_loader_gen")
sources += [
+ "opengl/AdapterGL.cpp",
+ "opengl/AdapterGL.h",
"opengl/BackendGL.cpp",
"opengl/BackendGL.h",
"opengl/BindGroupGL.cpp",
@@ -609,6 +609,8 @@ source_set("sources") {
"vulkan/Forward.h",
"vulkan/NativeSwapChainImplVk.cpp",
"vulkan/NativeSwapChainImplVk.h",
+ "vulkan/PipelineCacheVk.cpp",
+ "vulkan/PipelineCacheVk.h",
"vulkan/PipelineLayoutVk.cpp",
"vulkan/PipelineLayoutVk.h",
"vulkan/QuerySetVk.cpp",
diff --git a/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp b/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp
index abcc2714530..0c54731d323 100644
--- a/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp
@@ -16,21 +16,20 @@
namespace dawn::native {
- BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
- : mInstance(instance), mType(type) {
- }
+BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
+ : mInstance(instance), mType(type) {}
- wgpu::BackendType BackendConnection::GetType() const {
- return mType;
- }
+wgpu::BackendType BackendConnection::GetType() const {
+ return mType;
+}
- InstanceBase* BackendConnection::GetInstance() const {
- return mInstance;
- }
+InstanceBase* BackendConnection::GetInstance() const {
+ return mInstance;
+}
- ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options) {
- return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
- }
+ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options) {
+ return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BackendConnection.h b/chromium/third_party/dawn/src/dawn/native/BackendConnection.h
index 45b0709fb9b..04fe35bec61 100644
--- a/chromium/third_party/dawn/src/dawn/native/BackendConnection.h
+++ b/chromium/third_party/dawn/src/dawn/native/BackendConnection.h
@@ -15,35 +15,36 @@
#ifndef SRC_DAWN_NATIVE_BACKENDCONNECTION_H_
#define SRC_DAWN_NATIVE_BACKENDCONNECTION_H_
+#include <memory>
+#include <vector>
+
#include "dawn/native/Adapter.h"
#include "dawn/native/DawnNative.h"
-#include <memory>
-
namespace dawn::native {
- // An common interface for all backends. Mostly used to create adapters for a particular
- // backend.
- class BackendConnection {
- public:
- BackendConnection(InstanceBase* instance, wgpu::BackendType type);
- virtual ~BackendConnection() = default;
+// An common interface for all backends. Mostly used to create adapters for a particular
+// backend.
+class BackendConnection {
+ public:
+ BackendConnection(InstanceBase* instance, wgpu::BackendType type);
+ virtual ~BackendConnection() = default;
- wgpu::BackendType GetType() const;
- InstanceBase* GetInstance() const;
+ wgpu::BackendType GetType() const;
+ InstanceBase* GetInstance() const;
- // Returns all the adapters for the system that can be created by the backend, without extra
- // options (such as debug adapters, custom driver libraries, etc.)
- virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
+ // Returns all the adapters for the system that can be created by the backend, without extra
+ // options (such as debug adapters, custom driver libraries, etc.)
+ virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
- // Returns new adapters created with the backend-specific options.
- virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options);
+ // Returns new adapters created with the backend-specific options.
+ virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options);
- private:
- InstanceBase* mInstance = nullptr;
- wgpu::BackendType mType;
- };
+ private:
+ InstanceBase* mInstance = nullptr;
+ wgpu::BackendType mType;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp
index 503e6137e4c..0d3af775b3c 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp
@@ -29,517 +29,504 @@
namespace dawn::native {
- namespace {
-
- // Helper functions to perform binding-type specific validation
-
- MaybeError ValidateBufferBinding(const DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
-
- DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
- "Expected only buffer to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.buffer));
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
-
- wgpu::BufferUsage requiredUsage;
- uint64_t maxBindingSize;
- uint64_t requiredBindingAlignment;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- requiredUsage = wgpu::BufferUsage::Uniform;
- maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minUniformBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- requiredUsage = wgpu::BufferUsage::Storage;
- maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case kInternalStorageBufferBinding:
- requiredUsage = kInternalStorageBuffer;
- maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- uint64_t bufferSize = entry.buffer->GetSize();
-
- // Handle wgpu::WholeSize, avoiding overflows.
- DAWN_INVALID_IF(entry.offset > bufferSize,
- "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
- bufferSize, entry.buffer);
-
- uint64_t bindingSize =
- (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
-
- DAWN_INVALID_IF(bindingSize > bufferSize,
- "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
- bufferSize, entry.buffer);
-
- DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
-
- // Note that no overflow can happen because we already checked that
- // bufferSize >= bindingSize
- DAWN_INVALID_IF(
- entry.offset > bufferSize - bindingSize,
- "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
- entry.offset, bufferSize, bindingSize, entry.buffer);
+namespace {
+
+// Helper functions to perform binding-type specific validation
+
+MaybeError ValidateBufferBinding(const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
+
+ DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
+ "Expected only buffer to be set for binding entry.");
+
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(entry.buffer));
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+
+ wgpu::BufferUsage requiredUsage;
+ uint64_t maxBindingSize;
+ uint64_t requiredBindingAlignment;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ requiredUsage = wgpu::BufferUsage::Uniform;
+ maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
+ requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ requiredUsage = wgpu::BufferUsage::Storage;
+ maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+ requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case kInternalStorageBufferBinding:
+ requiredUsage = kInternalStorageBuffer;
+ maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+ requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
- DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
- "Offset (%u) does not satisfy the minimum %s alignment (%u).",
- entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
+ uint64_t bufferSize = entry.buffer->GetSize();
- DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
- "Binding usage (%s) of %s doesn't match expected usage (%s).",
- entry.buffer->GetUsage(), entry.buffer, requiredUsage);
+ // Handle wgpu::WholeSize, avoiding overflows.
+ DAWN_INVALID_IF(entry.offset > bufferSize,
+ "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+ bufferSize, entry.buffer);
- DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
- "Binding size (%u) is smaller than the minimum binding size (%u).",
- bindingSize, bindingInfo.buffer.minBindingSize);
+ uint64_t bindingSize =
+ (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
- DAWN_INVALID_IF(bindingSize > maxBindingSize,
- "Binding size (%u) is larger than the maximum binding size (%u).",
- bindingSize, maxBindingSize);
+ DAWN_INVALID_IF(bindingSize > bufferSize,
+ "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+ bufferSize, entry.buffer);
- return {};
- }
+ DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
- MaybeError ValidateTextureBinding(DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
-
- DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
- "Expected only textureView to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.textureView));
-
- TextureViewBase* view = entry.textureView;
-
- Aspect aspect = view->GetAspects();
- DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
- view);
-
- TextureBase* texture = view->GetTexture();
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Texture: {
- SampleTypeBit supportedTypes =
- texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
- SampleTypeBit requiredType =
- SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
-
- DAWN_INVALID_IF(
- !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
- "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
- texture->GetUsage(), texture);
-
- DAWN_INVALID_IF(
- texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
- "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
- texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
-
- DAWN_INVALID_IF(
- (supportedTypes & requiredType) == 0,
- "None of the supported sample types (%s) of %s match the expected sample "
- "types (%s).",
- supportedTypes, texture, requiredType);
-
- DAWN_INVALID_IF(
- entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
- "Dimension (%s) of %s doesn't match the expected dimension (%s).",
- entry.textureView->GetDimension(), entry.textureView,
- bindingInfo.texture.viewDimension);
- break;
- }
- case BindingInfoType::StorageTexture: {
- DAWN_INVALID_IF(
- !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
- "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
- texture->GetUsage(), texture);
-
- ASSERT(!texture->IsMultisampledTexture());
-
- DAWN_INVALID_IF(
- texture->GetFormat().format != bindingInfo.storageTexture.format,
- "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
- texture, bindingInfo.storageTexture.format);
-
- DAWN_INVALID_IF(
- entry.textureView->GetDimension() !=
- bindingInfo.storageTexture.viewDimension,
- "Dimension (%s) of %s doesn't match the expected dimension (%s).",
- entry.textureView->GetDimension(), entry.textureView,
- bindingInfo.storageTexture.viewDimension);
-
- DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
- "mipLevelCount (%u) of %s expected to be 1.",
- entry.textureView->GetLevelCount(), entry.textureView);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
+ // Note that no overflow can happen because we already checked that
+ // bufferSize >= bindingSize
+ DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
+ "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+ entry.offset, bufferSize, bindingSize, entry.buffer);
- MaybeError ValidateSamplerBinding(const DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
-
- DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
- "Expected only sampler to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.sampler));
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
-
- switch (bindingInfo.sampler.type) {
- case wgpu::SamplerBindingType::NonFiltering:
- DAWN_INVALID_IF(
- entry.sampler->IsFiltering(),
- "Filtering sampler %s is incompatible with non-filtering sampler "
- "binding.",
- entry.sampler);
- [[fallthrough]];
- case wgpu::SamplerBindingType::Filtering:
- DAWN_INVALID_IF(
- entry.sampler->IsComparison(),
- "Comparison sampler %s is incompatible with non-comparison sampler "
- "binding.",
- entry.sampler);
- break;
- case wgpu::SamplerBindingType::Comparison:
- DAWN_INVALID_IF(
- !entry.sampler->IsComparison(),
- "Non-comparison sampler %s is imcompatible with comparison sampler "
- "binding.",
- entry.sampler);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
+ DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+ "Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
+ bindingInfo.buffer.type, requiredBindingAlignment);
- MaybeError ValidateExternalTextureBinding(
- const DeviceBase* device,
- const BindGroupEntry& entry,
- const ExternalTextureBindingEntry* externalTextureBindingEntry,
- const ExternalTextureBindingExpansionMap& expansions) {
- DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
- "Binding entry external texture not set.");
+ DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+ "Binding usage (%s) of %s doesn't match expected usage (%s).",
+ entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
- DAWN_INVALID_IF(
- entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
- "Expected only external texture to be set for binding entry.");
+ DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+ "Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
+ bindingInfo.buffer.minBindingSize);
- DAWN_INVALID_IF(
- expansions.find(BindingNumber(entry.binding)) == expansions.end(),
- "External texture binding entry %u is not present in the bind group layout.",
- entry.binding);
+ DAWN_INVALID_IF(bindingSize > maxBindingSize,
+ "Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
+ maxBindingSize);
- DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
- wgpu::SType::ExternalTextureBindingEntry));
+ return {};
+}
- DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+MaybeError ValidateTextureBinding(DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
- return {};
- }
+ DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
+ "Expected only textureView to be set for binding entry.");
- } // anonymous namespace
-
- MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
- const BindGroupDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(descriptor->layout));
-
- DAWN_INVALID_IF(
- descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
- "Number of entries (%u) did not match the number of entries (%u) specified in %s."
- "\nExpected layout: %s",
- descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
- descriptor->layout, descriptor->layout->EntriesToString());
-
- const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
- ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
-
- ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
-
- const auto& it = bindingMap.find(BindingNumber(entry.binding));
- DAWN_INVALID_IF(it == bindingMap.end(),
- "In entries[%u], binding index %u not present in the bind group layout."
- "\nExpected layout: %s",
- i, entry.binding, descriptor->layout->EntriesToString());
-
- BindingIndex bindingIndex = it->second;
- ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
-
- DAWN_INVALID_IF(bindingsSet[bindingIndex],
- "In entries[%u], binding index %u already used by a previous entry", i,
- entry.binding);
-
- bindingsSet.set(bindingIndex);
-
- // Below this block we validate entries based on the bind group layout, in which
- // external textures have been expanded into their underlying contents. For this reason
- // we must identify external texture binding entries by checking the bind group entry
- // itself.
- // TODO:(dawn:1293): Store external textures in
- // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
- // be moved in the switch below.
- const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingEntry);
- if (externalTextureBindingEntry != nullptr) {
- DAWN_TRY(ValidateExternalTextureBinding(
- device, entry, externalTextureBindingEntry,
- descriptor->layout->GetExternalTextureBindingExpansionMap()));
- continue;
- }
-
- const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
-
- // Perform binding-type specific validation.
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Buffer."
- "\nExpected entry layout: %s",
- i, bindingInfo);
- break;
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture:
- DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Texture."
- "\nExpected entry layout: %s",
- i, bindingInfo);
- break;
- case BindingInfoType::Sampler:
- DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Sampler."
- "\nExpected entry layout: %s",
- i, bindingInfo);
- break;
- case BindingInfoType::ExternalTexture:
- UNREACHABLE();
- break;
- }
- }
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
- // This should always be true because
- // - numBindings has to match between the bind group and its layout.
- // - Each binding must be set at most once
- //
- // We don't validate the equality because it wouldn't be possible to cover it with a test.
- ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+ DAWN_TRY(device->ValidateObject(entry.textureView));
- return {};
- } // anonymous namespace
+ TextureViewBase* view = entry.textureView;
- // BindGroup
+ Aspect aspect = view->GetAspects();
+ DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
- BindGroupBase::BindGroupBase(DeviceBase* device,
- const BindGroupDescriptor* descriptor,
- void* bindingDataStart)
- : ApiObjectBase(device, descriptor->label),
- mLayout(descriptor->layout),
- mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
- for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
- // TODO(enga): Shouldn't be needed when bindings are tightly packed.
- // This is to fill Ref<ObjectBase> holes with nullptrs.
- new (&mBindingData.bindings[i]) Ref<ObjectBase>();
- }
+ TextureBase* texture = view->GetTexture();
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Texture: {
+ SampleTypeBit supportedTypes =
+ texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
+ SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
-
- BindingIndex bindingIndex =
- descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
- ASSERT(bindingIndex < mLayout->GetBindingCount());
-
- // Only a single binding type should be set, so once we found it we can skip to the
- // next loop iteration.
-
- if (entry.buffer != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.buffer;
- mBindingData.bufferData[bindingIndex].offset = entry.offset;
- uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
- ? entry.buffer->GetSize() - entry.offset
- : entry.size;
- mBindingData.bufferData[bindingIndex].size = bufferSize;
- continue;
- }
-
- if (entry.textureView != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.textureView;
- continue;
- }
-
- if (entry.sampler != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.sampler;
- continue;
- }
-
- // Here we unpack external texture bindings into multiple additional bindings for the
- // external texture's contents. New binding locations previously determined in the bind
- // group layout are created in this bind group and filled with the external texture's
- // underlying resources.
- const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingEntry);
- if (externalTextureBindingEntry != nullptr) {
- mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
-
- ExternalTextureBindingExpansionMap expansions =
- mLayout->GetExternalTextureBindingExpansionMap();
- ExternalTextureBindingExpansionMap::iterator it =
- expansions.find(BindingNumber(entry.binding));
-
- ASSERT(it != expansions.end());
-
- BindingIndex plane0BindingIndex =
- descriptor->layout->GetBindingIndex(it->second.plane0);
- BindingIndex plane1BindingIndex =
- descriptor->layout->GetBindingIndex(it->second.plane1);
- BindingIndex paramsBindingIndex =
- descriptor->layout->GetBindingIndex(it->second.params);
-
- ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
-
- mBindingData.bindings[plane0BindingIndex] =
- externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
-
- ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
- mBindingData.bindings[plane1BindingIndex] =
- externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
-
- ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
- mBindingData.bindings[paramsBindingIndex] =
- externalTextureBindingEntry->externalTexture->GetParamsBuffer();
- mBindingData.bufferData[paramsBindingIndex].offset = 0;
- mBindingData.bufferData[paramsBindingIndex].size =
- sizeof(dawn_native::ExternalTextureParams);
-
- continue;
- }
- }
+ DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+ texture->GetUsage(), texture);
- uint32_t packedIdx = 0;
- for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
- ++bindingIndex) {
- if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
- mBindingData.unverifiedBufferSizes[packedIdx] =
- mBindingData.bufferData[bindingIndex].size;
- ++packedIdx;
- }
+ DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+ "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+ texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
+
+ DAWN_INVALID_IF(
+ (supportedTypes & requiredType) == 0,
+ "None of the supported sample types (%s) of %s match the expected sample "
+ "types (%s).",
+ supportedTypes, texture, requiredType);
+
+ DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.texture.viewDimension);
+ break;
}
+ case BindingInfoType::StorageTexture: {
+ DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+ texture->GetUsage(), texture);
+
+ ASSERT(!texture->IsMultisampledTexture());
- TrackInDevice();
+ DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
+ "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+ texture, bindingInfo.storageTexture.format);
+
+ DAWN_INVALID_IF(
+ entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.storageTexture.viewDimension);
+
+ DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
+ "mipLevelCount (%u) of %s expected to be 1.",
+ entry.textureView->GetLevelCount(), entry.textureView);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
+ return {};
+}
+
+MaybeError ValidateSamplerBinding(const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
+
+ DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
+ "Expected only sampler to be set for binding entry.");
+
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(entry.sampler));
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
+
+ switch (bindingInfo.sampler.type) {
+ case wgpu::SamplerBindingType::NonFiltering:
+ DAWN_INVALID_IF(entry.sampler->IsFiltering(),
+ "Filtering sampler %s is incompatible with non-filtering sampler "
+ "binding.",
+ entry.sampler);
+ [[fallthrough]];
+ case wgpu::SamplerBindingType::Filtering:
+ DAWN_INVALID_IF(entry.sampler->IsComparison(),
+ "Comparison sampler %s is incompatible with non-comparison sampler "
+ "binding.",
+ entry.sampler);
+ break;
+ case wgpu::SamplerBindingType::Comparison:
+ DAWN_INVALID_IF(!entry.sampler->IsComparison(),
+ "Non-comparison sampler %s is imcompatible with comparison sampler "
+ "binding.",
+ entry.sampler);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- BindGroupBase::~BindGroupBase() = default;
+ return {};
+}
+
+MaybeError ValidateExternalTextureBinding(
+ const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const ExternalTextureBindingEntry* externalTextureBindingEntry,
+ const ExternalTextureBindingExpansionMap& expansions) {
+ DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
+ "Binding entry external texture not set.");
+
+ DAWN_INVALID_IF(
+ entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
+ "Expected only external texture to be set for binding entry.");
+
+ DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
+ "External texture binding entry %u is not present in the bind group layout.",
+ entry.binding);
+
+ DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
+ wgpu::SType::ExternalTextureBindingEntry));
+
+ DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+
+ return {};
+}
+
+} // anonymous namespace
+
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+
+ DAWN_INVALID_IF(
+ descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
+ "Number of entries (%u) did not match the number of entries (%u) specified in %s."
+ "\nExpected layout: %s",
+ descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+ descriptor->layout, descriptor->layout->EntriesToString());
+
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+
+ ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
+
+ const auto& it = bindingMap.find(BindingNumber(entry.binding));
+ DAWN_INVALID_IF(it == bindingMap.end(),
+ "In entries[%u], binding index %u not present in the bind group layout."
+ "\nExpected layout: %s",
+ i, entry.binding, descriptor->layout->EntriesToString());
+
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+ DAWN_INVALID_IF(bindingsSet[bindingIndex],
+ "In entries[%u], binding index %u already used by a previous entry", i,
+ entry.binding);
+
+ bindingsSet.set(bindingIndex);
+
+ // Below this block we validate entries based on the bind group layout, in which
+ // external textures have been expanded into their underlying contents. For this reason
+ // we must identify external texture binding entries by checking the bind group entry
+ // itself.
+ // TODO(dawn:1293): Store external textures in
+ // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
+ // be moved in the switch below.
+ const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+ if (externalTextureBindingEntry != nullptr) {
+ DAWN_TRY(ValidateExternalTextureBinding(
+ device, entry, externalTextureBindingEntry,
+ descriptor->layout->GetExternalTextureBindingExpansionMap()));
+ continue;
+ } else {
+ DAWN_INVALID_IF(descriptor->layout->GetExternalTextureBindingExpansionMap().count(
+ BindingNumber(entry.binding)),
+ "entries[%u] is not an ExternalTexture when the layout contains an "
+ "ExternalTexture entry.",
+ i);
+ }
- void BindGroupBase::DestroyImpl() {
- if (mLayout != nullptr) {
- ASSERT(!IsError());
- for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
- mBindingData.bindings[i].~Ref<ObjectBase>();
- }
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+
+ // Perform binding-type specific validation.
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Buffer."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture:
+ DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Texture."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::Sampler:
+ DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Sampler."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
}
}
- void BindGroupBase::DeleteThis() {
- // Add another ref to the layout so that if this is the last ref, the layout
- // is destroyed after the bind group. The bind group is slab-allocated inside
- // memory owned by the layout (except for the null backend).
- Ref<BindGroupLayoutBase> layout = mLayout;
- ApiObjectBase::DeleteThis();
+ // This should always be true because
+ // - numBindings has to match between the bind group and its layout.
+ // - Each binding must be set at most once
+ //
+ // We don't validate the equality because it wouldn't be possible to cover it with a test.
+ ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+
+ return {};
+} // anonymous namespace
+
+// BindGroup
+
+BindGroupBase::BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart)
+ : ApiObjectBase(device, descriptor->label),
+ mLayout(descriptor->layout),
+ mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+ // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+ // This is to fill Ref<ObjectBase> holes with nullptrs.
+ new (&mBindingData.bindings[i]) Ref<ObjectBase>();
}
- BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mBindingData() {
- }
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
- // static
- BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
- return new BindGroupBase(device, ObjectBase::kError);
- }
+ BindingIndex bindingIndex =
+ descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
- ObjectType BindGroupBase::GetType() const {
- return ObjectType::BindGroup;
- }
+ // Only a single binding type should be set, so once we found it we can skip to the
+ // next loop iteration.
+
+ if (entry.buffer != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.buffer;
+ mBindingData.bufferData[bindingIndex].offset = entry.offset;
+ uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
+ ? entry.buffer->GetSize() - entry.offset
+ : entry.size;
+ mBindingData.bufferData[bindingIndex].size = bufferSize;
+ continue;
+ }
- BindGroupLayoutBase* BindGroupBase::GetLayout() {
- ASSERT(!IsError());
- return mLayout.Get();
- }
+ if (entry.textureView != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.textureView;
+ continue;
+ }
- const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
- ASSERT(!IsError());
- return mLayout.Get();
- }
+ if (entry.sampler != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.sampler;
+ continue;
+ }
- const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
- ASSERT(!IsError());
- return mBindingData.unverifiedBufferSizes;
+ // Here we unpack external texture bindings into multiple additional bindings for the
+ // external texture's contents. New binding locations previously determined in the bind
+ // group layout are created in this bind group and filled with the external texture's
+ // underlying resources.
+ const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+ if (externalTextureBindingEntry != nullptr) {
+ mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
+
+ ExternalTextureBindingExpansionMap expansions =
+ mLayout->GetExternalTextureBindingExpansionMap();
+ ExternalTextureBindingExpansionMap::iterator it =
+ expansions.find(BindingNumber(entry.binding));
+
+ ASSERT(it != expansions.end());
+
+ BindingIndex plane0BindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.plane0);
+ BindingIndex plane1BindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.plane1);
+ BindingIndex paramsBindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.params);
+
+ ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
+
+ mBindingData.bindings[plane0BindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
+
+ ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
+ mBindingData.bindings[plane1BindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
+
+ ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
+ mBindingData.bindings[paramsBindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetParamsBuffer();
+ mBindingData.bufferData[paramsBindingIndex].offset = 0;
+ mBindingData.bufferData[paramsBindingIndex].size =
+ sizeof(dawn_native::ExternalTextureParams);
+
+ continue;
+ }
}
- BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
- BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
- return {buffer, mBindingData.bufferData[bindingIndex].offset,
- mBindingData.bufferData[bindingIndex].size};
+ uint32_t packedIdx = 0;
+ for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+ ++bindingIndex) {
+ if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
+ mBindingData.unverifiedBufferSizes[packedIdx] =
+ mBindingData.bufferData[bindingIndex].size;
+ ++packedIdx;
+ }
}
- SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
- return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
- }
+ TrackInDevice();
+}
- TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
- mLayout->GetBindingInfo(bindingIndex).bindingType ==
- BindingInfoType::StorageTexture);
- return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
- }
+BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
+
+BindGroupBase::~BindGroupBase() = default;
- const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
- return mBoundExternalTextures;
+void BindGroupBase::DestroyImpl() {
+ if (mLayout != nullptr) {
+ ASSERT(!IsError());
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+ mBindingData.bindings[i].~Ref<ObjectBase>();
+ }
}
+}
+
+void BindGroupBase::DeleteThis() {
+ // Add another ref to the layout so that if this is the last ref, the layout
+ // is destroyed after the bind group. The bind group is slab-allocated inside
+ // memory owned by the layout (except for the null backend).
+ Ref<BindGroupLayoutBase> layout = mLayout;
+ ApiObjectBase::DeleteThis();
+}
+
+BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mBindingData() {}
+
+// static
+BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
+ return new BindGroupBase(device, ObjectBase::kError);
+}
+
+ObjectType BindGroupBase::GetType() const {
+ return ObjectType::BindGroup;
+}
+
+BindGroupLayoutBase* BindGroupBase::GetLayout() {
+ ASSERT(!IsError());
+ return mLayout.Get();
+}
+
+const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+ ASSERT(!IsError());
+ return mLayout.Get();
+}
+
+const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+ ASSERT(!IsError());
+ return mBindingData.unverifiedBufferSizes;
+}
+
+BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
+ BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+ return {buffer, mBindingData.bufferData[bindingIndex].offset,
+ mBindingData.bufferData[bindingIndex].size};
+}
+
+SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
+ return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
+}
+
+TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
+ mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
+ return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
+}
+
+const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
+ return mBoundExternalTextures;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroup.h b/chromium/third_party/dawn/src/dawn/native/BindGroup.h
index 6ec3c7a6337..236e4fb9774 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindGroup.h
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroup.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_BINDGROUP_H_
#define SRC_DAWN_NATIVE_BINDGROUP_H_
+#include <array>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
#include "dawn/native/BindGroupLayout.h"
@@ -24,72 +27,69 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-
namespace dawn::native {
- class DeviceBase;
-
- MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
- const BindGroupDescriptor* descriptor);
-
- struct BufferBinding {
- BufferBase* buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- class BindGroupBase : public ApiObjectBase {
- public:
- static BindGroupBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- BindGroupLayoutBase* GetLayout();
- const BindGroupLayoutBase* GetLayout() const;
- BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
- SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
- TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
- const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
- const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
-
- protected:
- // To save memory, the size of a bind group is dynamically determined and the bind group is
- // placement-allocated into memory big enough to hold the bind group with its
- // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
- // binding data should be passed as |bindingDataStart|.
- BindGroupBase(DeviceBase* device,
- const BindGroupDescriptor* descriptor,
- void* bindingDataStart);
-
- // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
- // be first in the allocation. The binding data is stored after the Derived class.
- template <typename Derived>
- BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(device,
- descriptor,
- AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
- descriptor->layout->GetBindingDataAlignment())) {
- static_assert(std::is_base_of<BindGroupBase, Derived>::value);
- }
-
- // Constructor used only for mocking and testing.
- explicit BindGroupBase(DeviceBase* device);
- void DestroyImpl() override;
-
- ~BindGroupBase() override;
-
- private:
- BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DeleteThis() override;
-
- Ref<BindGroupLayoutBase> mLayout;
- BindGroupLayoutBase::BindingDataPointers mBindingData;
-
- // TODO:(dawn:1293): Store external textures in
- // BindGroupLayoutBase::BindingDataPointers::bindings
- std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
- };
+class DeviceBase;
+
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
+
+struct BufferBinding {
+ BufferBase* buffer;
+ uint64_t offset;
+ uint64_t size;
+};
+
+class BindGroupBase : public ApiObjectBase {
+ public:
+ static BindGroupBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ BindGroupLayoutBase* GetLayout();
+ const BindGroupLayoutBase* GetLayout() const;
+ BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+ SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
+ TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+ const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
+ const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
+
+ protected:
+ // To save memory, the size of a bind group is dynamically determined and the bind group is
+ // placement-allocated into memory big enough to hold the bind group with its
+ // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+ // binding data should be passed as |bindingDataStart|.
+ BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart);
+
+ // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+ // be first in the allocation. The binding data is stored after the Derived class.
+ template <typename Derived>
+ BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(device,
+ descriptor,
+ AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+ descriptor->layout->GetBindingDataAlignment())) {
+ static_assert(std::is_base_of<BindGroupBase, Derived>::value);
+ }
+
+ // Constructor used only for mocking and testing.
+ explicit BindGroupBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ ~BindGroupBase() override;
+
+ private:
+ BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DeleteThis() override;
+
+ Ref<BindGroupLayoutBase> mLayout;
+ BindGroupLayoutBase::BindingDataPointers mBindingData;
+
+ // TODO(dawn:1293): Store external textures in
+ // BindGroupLayoutBase::BindingDataPointers::bindings
+ std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp
index 201aecc1188..b57cd69e981 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp
@@ -14,8 +14,13 @@
#include "dawn/native/BindGroupLayout.h"
-#include "dawn/common/BitSetIterator.h"
+#include <algorithm>
+#include <functional>
+#include <limits>
+#include <set>
+#include <vector>
+#include "dawn/common/BitSetIterator.h"
#include "dawn/native/ChainUtils_autogen.h"
#include "dawn/native/Device.h"
#include "dawn/native/ObjectBase.h"
@@ -24,653 +29,641 @@
#include "dawn/native/PerStage.h"
#include "dawn/native/ValidationUtils_autogen.h"
-#include <algorithm>
-#include <functional>
-#include <set>
-
namespace dawn::native {
- namespace {
- MaybeError ValidateStorageTextureFormat(DeviceBase* device,
- wgpu::TextureFormat storageTextureFormat) {
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
-
- ASSERT(format != nullptr);
- DAWN_INVALID_IF(!format->supportsStorageUsage,
- "Texture format (%s) does not support storage textures.",
- storageTextureFormat);
-
+namespace {
+MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+ wgpu::TextureFormat storageTextureFormat) {
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+
+ ASSERT(format != nullptr);
+ DAWN_INVALID_IF(!format->supportsStorageUsage,
+ "Texture format (%s) does not support storage textures.", storageTextureFormat);
+
+ return {};
+}
+
+MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "%s texture views cannot be used as storage textures.", dimension);
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
return {};
- }
- MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "%s texture views cannot be used as storage textures.", dimension);
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::e3D:
- return {};
-
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
+ const BindGroupLayoutEntry& entry,
+ bool allowInternalBinding) {
+ DAWN_TRY(ValidateShaderStage(entry.visibility));
+
+ int bindingMemberCount = 0;
+ BindingInfoType bindingType;
+ wgpu::ShaderStage allowedStages = kAllStages;
+
+ if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Buffer;
+ const BufferBindingLayout& buffer = entry.buffer;
+
+ // The kInternalStorageBufferBinding is used internally and not a value
+ // in wgpu::BufferBindingType.
+ if (buffer.type == kInternalStorageBufferBinding) {
+ DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
+ } else {
+ DAWN_TRY(ValidateBufferBindingType(buffer.type));
}
- MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
- const BindGroupLayoutEntry& entry,
- bool allowInternalBinding) {
- DAWN_TRY(ValidateShaderStage(entry.visibility));
-
- int bindingMemberCount = 0;
- BindingInfoType bindingType;
- wgpu::ShaderStage allowedStages = kAllStages;
-
- if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Buffer;
- const BufferBindingLayout& buffer = entry.buffer;
-
- // The kInternalStorageBufferBinding is used internally and not a value
- // in wgpu::BufferBindingType.
- if (buffer.type == kInternalStorageBufferBinding) {
- DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
- } else {
- DAWN_TRY(ValidateBufferBindingType(buffer.type));
- }
-
- if (buffer.type == wgpu::BufferBindingType::Storage ||
- buffer.type == kInternalStorageBufferBinding) {
- allowedStages &= ~wgpu::ShaderStage::Vertex;
- }
- }
-
- if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Sampler;
- DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
- }
-
- if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Texture;
- const TextureBindingLayout& texture = entry.texture;
- DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
-
- // viewDimension defaults to 2D if left undefined, needs validation otherwise.
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
- if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
- DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
- viewDimension = texture.viewDimension;
- }
-
- DAWN_INVALID_IF(
- texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
- "View dimension (%s) for a multisampled texture bindings was not %s.",
- viewDimension, wgpu::TextureViewDimension::e2D);
- }
-
- if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::StorageTexture;
- const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
- DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
- DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
-
- // viewDimension defaults to 2D if left undefined, needs validation otherwise.
- if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
- DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
- DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
- }
-
- if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
- allowedStages &= ~wgpu::ShaderStage::Vertex;
- }
- }
-
- const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- bindingMemberCount++;
- bindingType = BindingInfoType::ExternalTexture;
- }
-
- DAWN_INVALID_IF(bindingMemberCount == 0,
- "BindGroupLayoutEntry had none of buffer, sampler, texture, "
- "storageTexture, or externalTexture set");
-
- DAWN_INVALID_IF(bindingMemberCount != 1,
- "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
- "storageTexture, or externalTexture set");
-
- DAWN_INVALID_IF(
- !IsSubset(entry.visibility, allowedStages),
- "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
- bindingType, entry.visibility, allowedStages);
-
- return {};
+ if (buffer.type == wgpu::BufferBindingType::Storage ||
+ buffer.type == kInternalStorageBufferBinding) {
+ allowedStages &= ~wgpu::ShaderStage::Vertex;
}
+ }
- BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
- uint32_t binding,
- wgpu::ShaderStage visibility) {
- BindGroupLayoutEntry entry;
- entry.binding = binding;
- entry.visibility = visibility;
- entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
- entry.texture.multisampled = false;
- entry.texture.sampleType = wgpu::TextureSampleType::Float;
- return entry;
- }
+ if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Sampler;
+ DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
+ }
- BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
- wgpu::ShaderStage visibility) {
- BindGroupLayoutEntry entry;
- entry.binding = binding;
- entry.visibility = visibility;
- entry.buffer.hasDynamicOffset = false;
- entry.buffer.type = wgpu::BufferBindingType::Uniform;
- return entry;
+ if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Texture;
+ const TextureBindingLayout& texture = entry.texture;
+ DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
+
+ // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
+ if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+ DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
+ viewDimension = texture.viewDimension;
}
- std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
- const BindGroupLayoutDescriptor* descriptor,
- BindingCounts* bindingCounts,
- ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
- std::vector<BindGroupLayoutEntry> expandedOutput;
-
- // When new bgl entries are created, we use binding numbers larger than
- // kMaxBindingNumber to ensure there are no collisions.
- uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
- for (uint32_t i = 0; i < descriptor->entryCount; i++) {
- const BindGroupLayoutEntry& entry = descriptor->entries[i];
- const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingLayout);
- // External textures are expanded from a texture_external into two sampled texture
- // bindings and one uniform buffer binding. The original binding number is used
- // for the first sampled texture.
- if (externalTextureBindingLayout != nullptr) {
- for (SingleShaderStage stage : IterateStages(entry.visibility)) {
- // External textures are not fully implemented, which means that expanding
- // the external texture at this time will not occupy the same number of
- // binding slots as defined in the WebGPU specification. Here we prematurely
- // increment the binding counts for an additional sampled textures and a
- // sampler so that an external texture will occupy the correct number of
- // slots for correct validation of shader binding limits.
- // TODO:(dawn:1082): Consider removing this and instead making a change to
- // the validation.
- constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
- constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
- bindingCounts->perStage[stage].sampledTextureCount +=
- kUnimplementedSampledTexturesPerExternalTexture;
- bindingCounts->perStage[stage].samplerCount +=
- kUnimplementedSamplersPerExternalTexture;
- }
-
- dawn_native::ExternalTextureBindingExpansion bindingExpansion;
-
- BindGroupLayoutEntry plane0Entry =
- CreateSampledTextureBindingForExternalTexture(entry.binding,
- entry.visibility);
- bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
- expandedOutput.push_back(plane0Entry);
-
- BindGroupLayoutEntry plane1Entry =
- CreateSampledTextureBindingForExternalTexture(
- nextOpenBindingNumberForNewEntry++, entry.visibility);
- bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
- expandedOutput.push_back(plane1Entry);
-
- BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
- nextOpenBindingNumberForNewEntry++, entry.visibility);
- bindingExpansion.params = BindingNumber(paramsEntry.binding);
- expandedOutput.push_back(paramsEntry);
-
- externalTextureBindingExpansions->insert(
- {BindingNumber(entry.binding), bindingExpansion});
- } else {
- expandedOutput.push_back(entry);
- }
- }
+ DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
+ "View dimension (%s) for a multisampled texture bindings was not %s.",
+ viewDimension, wgpu::TextureViewDimension::e2D);
+ }
- return expandedOutput;
+ if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::StorageTexture;
+ const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
+ DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
+ DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
+
+ // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+ if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+ DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
+ DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
}
- } // anonymous namespace
-
- MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- std::set<BindingNumber> bindingsSet;
- BindingCounts bindingCounts = {};
-
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupLayoutEntry& entry = descriptor->entries[i];
- BindingNumber bindingNumber = BindingNumber(entry.binding);
-
- DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
- "Binding number (%u) exceeds the maximum binding number (%u).",
- uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
- DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
- "On entries[%u]: binding index (%u) was specified by a previous entry.",
- i, entry.binding);
- DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
- "validating entries[%u]", i);
-
- IncrementBindingCounts(&bindingCounts, entry);
-
- bindingsSet.insert(bindingNumber);
+ if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
+ allowedStages &= ~wgpu::ShaderStage::Vertex;
}
-
- DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
-
- return {};
}
- namespace {
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::ExternalTexture;
+ }
- bool operator!=(const BindingInfo& a, const BindingInfo& b) {
- if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
- return true;
+ DAWN_INVALID_IF(bindingMemberCount == 0,
+ "BindGroupLayoutEntry had none of buffer, sampler, texture, "
+ "storageTexture, or externalTexture set");
+
+ DAWN_INVALID_IF(bindingMemberCount != 1,
+ "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
+ "storageTexture, or externalTexture set");
+
+ DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
+ "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
+ bindingType, entry.visibility, allowedStages);
+
+ return {};
+}
+
+BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
+ wgpu::ShaderStage visibility) {
+ BindGroupLayoutEntry entry;
+ entry.binding = binding;
+ entry.visibility = visibility;
+ entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+ entry.texture.multisampled = false;
+ entry.texture.sampleType = wgpu::TextureSampleType::Float;
+ return entry;
+}
+
+BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
+ wgpu::ShaderStage visibility) {
+ BindGroupLayoutEntry entry;
+ entry.binding = binding;
+ entry.visibility = visibility;
+ entry.buffer.hasDynamicOffset = false;
+ entry.buffer.type = wgpu::BufferBindingType::Uniform;
+ return entry;
+}
+
+std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
+ const BindGroupLayoutDescriptor* descriptor,
+ BindingCounts* bindingCounts,
+ ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
+ std::vector<BindGroupLayoutEntry> expandedOutput;
+
+ // When new bgl entries are created, we use binding numbers larger than
+ // kMaxBindingNumber to ensure there are no collisions.
+ uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
+ for (uint32_t i = 0; i < descriptor->entryCount; i++) {
+ const BindGroupLayoutEntry& entry = descriptor->entries[i];
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ // External textures are expanded from a texture_external into two sampled texture
+ // bindings and one uniform buffer binding. The original binding number is used
+ // for the first sampled texture.
+ if (externalTextureBindingLayout != nullptr) {
+ for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+ // External textures are not fully implemented, which means that expanding
+ // the external texture at this time will not occupy the same number of
+ // binding slots as defined in the WebGPU specification. Here we prematurely
+ // increment the binding counts for an additional sampled textures and a
+ // sampler so that an external texture will occupy the correct number of
+ // slots for correct validation of shader binding limits.
+ // TODO(dawn:1082): Consider removing this and instead making a change to
+ // the validation.
+ constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
+ constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
+ bindingCounts->perStage[stage].sampledTextureCount +=
+ kUnimplementedSampledTexturesPerExternalTexture;
+ bindingCounts->perStage[stage].samplerCount +=
+ kUnimplementedSamplersPerExternalTexture;
}
- switch (a.bindingType) {
- case BindingInfoType::Buffer:
- return a.buffer.type != b.buffer.type ||
- a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
- a.buffer.minBindingSize != b.buffer.minBindingSize;
- case BindingInfoType::Sampler:
- return a.sampler.type != b.sampler.type;
- case BindingInfoType::Texture:
- return a.texture.sampleType != b.texture.sampleType ||
- a.texture.viewDimension != b.texture.viewDimension ||
- a.texture.multisampled != b.texture.multisampled;
- case BindingInfoType::StorageTexture:
- return a.storageTexture.access != b.storageTexture.access ||
- a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
- a.storageTexture.format != b.storageTexture.format;
- case BindingInfoType::ExternalTexture:
- return false;
- }
- UNREACHABLE();
- }
+ dawn_native::ExternalTextureBindingExpansion bindingExpansion;
- bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
- return binding.buffer.type != wgpu::BufferBindingType::Undefined;
- }
+ BindGroupLayoutEntry plane0Entry =
+ CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
+ bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
+ expandedOutput.push_back(plane0Entry);
- bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
- if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
- return binding.buffer.hasDynamicOffset;
- }
- return false;
- }
+ BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
+ nextOpenBindingNumberForNewEntry++, entry.visibility);
+ bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
+ expandedOutput.push_back(plane1Entry);
- BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
- BindingInfo bindingInfo;
- bindingInfo.binding = BindingNumber(binding.binding);
- bindingInfo.visibility = binding.visibility;
-
- if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Buffer;
- bindingInfo.buffer = binding.buffer;
- } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Sampler;
- bindingInfo.sampler = binding.sampler;
- } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Texture;
- bindingInfo.texture = binding.texture;
-
- if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
- bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
- }
- } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- bindingInfo.bindingType = BindingInfoType::StorageTexture;
- bindingInfo.storageTexture = binding.storageTexture;
-
- if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
- bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
- }
- } else {
- const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
- FindInChain(binding.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- bindingInfo.bindingType = BindingInfoType::ExternalTexture;
- }
- }
+ BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
+ nextOpenBindingNumberForNewEntry++, entry.visibility);
+ bindingExpansion.params = BindingNumber(paramsEntry.binding);
+ expandedOutput.push_back(paramsEntry);
- return bindingInfo;
+ externalTextureBindingExpansions->insert(
+ {BindingNumber(entry.binding), bindingExpansion});
+ } else {
+ expandedOutput.push_back(entry);
}
+ }
- bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
- const bool aIsBuffer = IsBufferBinding(a);
- const bool bIsBuffer = IsBufferBinding(b);
- if (aIsBuffer != bIsBuffer) {
- // Always place buffers first.
- return aIsBuffer;
- }
-
- if (aIsBuffer) {
- bool aHasDynamicOffset = BindingHasDynamicOffset(a);
- bool bHasDynamicOffset = BindingHasDynamicOffset(b);
- ASSERT(bIsBuffer);
- if (aHasDynamicOffset != bHasDynamicOffset) {
- // Buffers with dynamic offsets should come before those without.
- // This makes it easy to iterate over the dynamic buffer bindings
- // [0, dynamicBufferCount) during validation.
- return aHasDynamicOffset;
- }
- if (aHasDynamicOffset) {
- ASSERT(bHasDynamicOffset);
- ASSERT(a.binding != b.binding);
- // Above, we ensured that dynamic buffers are first. Now, ensure that
- // dynamic buffer bindings are in increasing order. This is because dynamic
- // buffer offsets are applied in increasing order of binding number.
- return a.binding < b.binding;
- }
- }
-
- // This applies some defaults and gives us a single value to check for the binding type.
- BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
- BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
-
- // Sort by type.
- if (aInfo.bindingType != bInfo.bindingType) {
- return aInfo.bindingType < bInfo.bindingType;
- }
-
- if (a.visibility != b.visibility) {
- return a.visibility < b.visibility;
- }
+ return expandedOutput;
+}
+} // anonymous namespace
- switch (aInfo.bindingType) {
- case BindingInfoType::Buffer:
- if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
- return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
- }
- break;
- case BindingInfoType::Sampler:
- if (aInfo.sampler.type != bInfo.sampler.type) {
- return aInfo.sampler.type < bInfo.sampler.type;
- }
- break;
- case BindingInfoType::Texture:
- if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
- return aInfo.texture.multisampled < bInfo.texture.multisampled;
- }
- if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
- return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
- }
- if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
- return aInfo.texture.sampleType < bInfo.texture.sampleType;
- }
- break;
- case BindingInfoType::StorageTexture:
- if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
- return aInfo.storageTexture.access < bInfo.storageTexture.access;
- }
- if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
- return aInfo.storageTexture.viewDimension <
- bInfo.storageTexture.viewDimension;
- }
- if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
- return aInfo.storageTexture.format < bInfo.storageTexture.format;
- }
- break;
- case BindingInfoType::ExternalTexture:
- break;
- }
- return a.binding < b.binding;
- }
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
- // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
- // first.
- bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
- BindingIndex lastBufferIndex{0};
- BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
- for (BindingIndex i{0}; i < bindings.size(); ++i) {
- if (bindings[i].bindingType == BindingInfoType::Buffer) {
- lastBufferIndex = std::max(i, lastBufferIndex);
- } else {
- firstNonBufferIndex = std::min(i, firstNonBufferIndex);
- }
- }
+ std::set<BindingNumber> bindingsSet;
+ BindingCounts bindingCounts = {};
- // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
- // |firstNonBufferIndex| gets set to 0.
- return firstNonBufferIndex >= lastBufferIndex;
- }
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupLayoutEntry& entry = descriptor->entries[i];
+ BindingNumber bindingNumber = BindingNumber(entry.binding);
- } // namespace
+ DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+ "Binding number (%u) exceeds the maximum binding number (%u).",
+ uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+ DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
+ "On entries[%u]: binding index (%u) was specified by a previous entry.", i,
+ entry.binding);
- // BindGroupLayoutBase
+ DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
+ "validating entries[%u]", i);
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label),
- mPipelineCompatibilityToken(pipelineCompatibilityToken),
- mUnexpandedBindingCount(descriptor->entryCount) {
- std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
- descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
+ IncrementBindingCounts(&bindingCounts, entry);
- std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
+ bindingsSet.insert(bindingNumber);
+ }
- for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
- const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
+ DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
- mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
+ return {};
+}
- if (IsBufferBinding(binding)) {
- // Buffers must be contiguously packed at the start of the binding info.
- ASSERT(GetBufferCount() == BindingIndex(i));
- }
- IncrementBindingCounts(&mBindingCounts, binding);
+namespace {
- const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
- ASSERT(inserted);
- }
- ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
- ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+ if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
+ return true;
}
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
- TrackInDevice();
+ switch (a.bindingType) {
+ case BindingInfoType::Buffer:
+ return a.buffer.type != b.buffer.type ||
+ a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
+ a.buffer.minBindingSize != b.buffer.minBindingSize;
+ case BindingInfoType::Sampler:
+ return a.sampler.type != b.sampler.type;
+ case BindingInfoType::Texture:
+ return a.texture.sampleType != b.texture.sampleType ||
+ a.texture.viewDimension != b.texture.viewDimension ||
+ a.texture.multisampled != b.texture.multisampled;
+ case BindingInfoType::StorageTexture:
+ return a.storageTexture.access != b.storageTexture.access ||
+ a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
+ a.storageTexture.format != b.storageTexture.format;
+ case BindingInfoType::ExternalTexture:
+ return false;
}
+ UNREACHABLE();
+}
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
+bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
+ return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+}
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
+bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
+ if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+ return binding.buffer.hasDynamicOffset;
}
+ return false;
+}
+
+BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
+ BindingInfo bindingInfo;
+ bindingInfo.binding = BindingNumber(binding.binding);
+ bindingInfo.visibility = binding.visibility;
+
+ if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Buffer;
+ bindingInfo.buffer = binding.buffer;
+ } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Sampler;
+ bindingInfo.sampler = binding.sampler;
+ } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Texture;
+ bindingInfo.texture = binding.texture;
+
+ if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+ bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+ }
+ } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::StorageTexture;
+ bindingInfo.storageTexture = binding.storageTexture;
- BindGroupLayoutBase::~BindGroupLayoutBase() = default;
-
- void BindGroupLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheBindGroupLayout(this);
+ if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+ bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
+ }
+ } else {
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(binding.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ bindingInfo.bindingType = BindingInfoType::ExternalTexture;
}
}
- // static
- BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
- return new BindGroupLayoutBase(device, ObjectBase::kError);
- }
+ return bindingInfo;
+}
- ObjectType BindGroupLayoutBase::GetType() const {
- return ObjectType::BindGroupLayout;
+bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+ const bool aIsBuffer = IsBufferBinding(a);
+ const bool bIsBuffer = IsBufferBinding(b);
+ if (aIsBuffer != bIsBuffer) {
+ // Always place buffers first.
+ return aIsBuffer;
}
- const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
- ASSERT(!IsError());
- return mBindingMap;
+ if (aIsBuffer) {
+ bool aHasDynamicOffset = BindingHasDynamicOffset(a);
+ bool bHasDynamicOffset = BindingHasDynamicOffset(b);
+ ASSERT(bIsBuffer);
+ if (aHasDynamicOffset != bHasDynamicOffset) {
+ // Buffers with dynamic offsets should come before those without.
+ // This makes it easy to iterate over the dynamic buffer bindings
+ // [0, dynamicBufferCount) during validation.
+ return aHasDynamicOffset;
+ }
+ if (aHasDynamicOffset) {
+ ASSERT(bHasDynamicOffset);
+ ASSERT(a.binding != b.binding);
+ // Above, we ensured that dynamic buffers are first. Now, ensure that
+ // dynamic buffer bindings are in increasing order. This is because dynamic
+ // buffer offsets are applied in increasing order of binding number.
+ return a.binding < b.binding;
+ }
}
- bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
- return mBindingMap.count(bindingNumber) != 0;
- }
+ // This applies some defaults and gives us a single value to check for the binding type.
+ BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
+ BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
- BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
- ASSERT(!IsError());
- const auto& it = mBindingMap.find(bindingNumber);
- ASSERT(it != mBindingMap.end());
- return it->second;
+ // Sort by type.
+ if (aInfo.bindingType != bInfo.bindingType) {
+ return aInfo.bindingType < bInfo.bindingType;
}
- size_t BindGroupLayoutBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mPipelineCompatibilityToken);
-
- // std::map is sorted by key, so two BGLs constructed in different orders
- // will still record the same.
- for (const auto [id, index] : mBindingMap) {
- recorder.Record(id, index);
-
- const BindingInfo& info = mBindingInfo[index];
- recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
- info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
- info.texture.sampleType, info.texture.viewDimension,
- info.texture.multisampled, info.storageTexture.access,
- info.storageTexture.format, info.storageTexture.viewDimension);
- }
-
- return recorder.GetContentHash();
+ if (a.visibility != b.visibility) {
+ return a.visibility < b.visibility;
}
- bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
- const BindGroupLayoutBase* b) const {
- return a->IsLayoutEqual(b);
+ switch (aInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
+ return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
+ }
+ break;
+ case BindingInfoType::Sampler:
+ if (aInfo.sampler.type != bInfo.sampler.type) {
+ return aInfo.sampler.type < bInfo.sampler.type;
+ }
+ break;
+ case BindingInfoType::Texture:
+ if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
+ return aInfo.texture.multisampled < bInfo.texture.multisampled;
+ }
+ if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
+ return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
+ }
+ if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
+ return aInfo.texture.sampleType < bInfo.texture.sampleType;
+ }
+ break;
+ case BindingInfoType::StorageTexture:
+ if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
+ return aInfo.storageTexture.access < bInfo.storageTexture.access;
+ }
+ if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
+ return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
+ }
+ if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
+ return aInfo.storageTexture.format < bInfo.storageTexture.format;
+ }
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
}
-
- BindingIndex BindGroupLayoutBase::GetBindingCount() const {
- return mBindingInfo.size();
+ return a.binding < b.binding;
+}
+
+// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+// first.
+bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+ BindingIndex lastBufferIndex{0};
+ BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+ for (BindingIndex i{0}; i < bindings.size(); ++i) {
+ if (bindings[i].bindingType == BindingInfoType::Buffer) {
+ lastBufferIndex = std::max(i, lastBufferIndex);
+ } else {
+ firstNonBufferIndex = std::min(i, firstNonBufferIndex);
+ }
}
- BindingIndex BindGroupLayoutBase::GetBufferCount() const {
- return BindingIndex(mBindingCounts.bufferCount);
- }
+ // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+ // |firstNonBufferIndex| gets set to 0.
+ return firstNonBufferIndex >= lastBufferIndex;
+}
- BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
- // This is a binding index because dynamic buffers are packed at the front of the binding
- // info.
- return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
- mBindingCounts.dynamicUniformBufferCount);
- }
+} // namespace
- uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
- return mBindingCounts.unverifiedBufferCount;
- }
+// BindGroupLayoutBase
- uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
- return mExternalTextureBindingExpansionMap.size();
- }
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label),
+ mPipelineCompatibilityToken(pipelineCompatibilityToken),
+ mUnexpandedBindingCount(descriptor->entryCount) {
+ std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
+ descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
- const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
- return mBindingCounts;
- }
+ std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
- const ExternalTextureBindingExpansionMap&
- BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
- return mExternalTextureBindingExpansionMap;
- }
+ for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
+ const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
- uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
- return mUnexpandedBindingCount;
- }
+ mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
- bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
- bool excludePipelineCompatibiltyToken) const {
- if (!excludePipelineCompatibiltyToken &&
- GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
- return false;
- }
- if (GetBindingCount() != other->GetBindingCount()) {
- return false;
+ if (IsBufferBinding(binding)) {
+ // Buffers must be contiguously packed at the start of the binding info.
+ ASSERT(GetBufferCount() == BindingIndex(i));
}
- for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
- if (mBindingInfo[i] != other->mBindingInfo[i]) {
- return false;
- }
- }
- return mBindingMap == other->mBindingMap;
- }
+ IncrementBindingCounts(&mBindingCounts, binding);
- PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
- return mPipelineCompatibilityToken;
+ const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
+ ASSERT(inserted);
}
-
- size_t BindGroupLayoutBase::GetBindingDataSize() const {
- // | ------ buffer-specific ----------| ------------ object pointers -------------|
- // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
- // Followed by:
- // |---------buffer size array--------|
- // |-uint64_t[mUnverifiedBufferCount]-|
- size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
- ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
- size_t bufferSizeArrayStart =
- Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
- sizeof(uint64_t));
- ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
- return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+ ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+ ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
+ TrackInDevice();
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
+
+BindGroupLayoutBase::~BindGroupLayoutBase() = default;
+
+void BindGroupLayoutBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheBindGroupLayout(this);
}
-
- BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
- void* dataStart) const {
- BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
- auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
- uint64_t* unverifiedBufferSizes = AlignPtr(
- reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
-
- ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
- ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
- ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
-
- return {{bufferData, GetBufferCount()},
- {bindings, GetBindingCount()},
- {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+}
+
+// static
+BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
+ return new BindGroupLayoutBase(device, ObjectBase::kError);
+}
+
+ObjectType BindGroupLayoutBase::GetType() const {
+ return ObjectType::BindGroupLayout;
+}
+
+const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+ ASSERT(!IsError());
+ return mBindingMap;
+}
+
+bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+ return mBindingMap.count(bindingNumber) != 0;
+}
+
+BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
+ ASSERT(!IsError());
+ const auto& it = mBindingMap.find(bindingNumber);
+ ASSERT(it != mBindingMap.end());
+ return it->second;
+}
+
+size_t BindGroupLayoutBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mPipelineCompatibilityToken);
+
+ // std::map is sorted by key, so two BGLs constructed in different orders
+ // will still record the same.
+ for (const auto [id, index] : mBindingMap) {
+ recorder.Record(id, index);
+
+ const BindingInfo& info = mBindingInfo[index];
+ recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
+ info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
+ info.texture.sampleType, info.texture.viewDimension,
+ info.texture.multisampled, info.storageTexture.access,
+ info.storageTexture.format, info.storageTexture.viewDimension);
}
- bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
- ASSERT(bindingIndex < GetBufferCount());
- switch (GetBindingInfo(bindingIndex).buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- return false;
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return true;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
+ return recorder.GetContentHash();
+}
+
+bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
+ const BindGroupLayoutBase* b) const {
+ return a->IsLayoutEqual(b);
+}
+
+BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+ return mBindingInfo.size();
+}
+
+BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+ return BindingIndex(mBindingCounts.bufferCount);
+}
+
+BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
+ // This is a binding index because dynamic buffers are packed at the front of the binding
+ // info.
+ return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+ mBindingCounts.dynamicUniformBufferCount);
+}
+
+uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+ return mBindingCounts.unverifiedBufferCount;
+}
+
+uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
+ return mExternalTextureBindingExpansionMap.size();
+}
+
+const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+ return mBindingCounts;
+}
+
+const ExternalTextureBindingExpansionMap&
+BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
+ return mExternalTextureBindingExpansionMap;
+}
+
+uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
+ return mUnexpandedBindingCount;
+}
+
+bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken) const {
+ if (!excludePipelineCompatibiltyToken &&
+ GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+ return false;
}
-
- std::string BindGroupLayoutBase::EntriesToString() const {
- std::string entries = "[";
- std::string sep = "";
- const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
- for (const auto [bindingNumber, bindingIndex] : bindingMap) {
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
- entries += absl::StrFormat("%s%s", sep, bindingInfo);
- sep = ", ";
+ if (GetBindingCount() != other->GetBindingCount()) {
+ return false;
+ }
+ for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+ if (mBindingInfo[i] != other->mBindingInfo[i]) {
+ return false;
}
- entries += "]";
- return entries;
}
+ return mBindingMap == other->mBindingMap;
+}
+
+PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+ return mPipelineCompatibilityToken;
+}
+
+size_t BindGroupLayoutBase::GetBindingDataSize() const {
+ // | ------ buffer-specific ----------| ------------ object pointers -------------|
+ // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+ // Followed by:
+ // |---------buffer size array--------|
+ // |-uint64_t[mUnverifiedBufferCount]-|
+ size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
+ ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+ size_t bufferSizeArrayStart = Align(
+ objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
+ ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+ return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+}
+
+BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+ void* dataStart) const {
+ BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+ auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+ uint64_t* unverifiedBufferSizes = AlignPtr(
+ reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
+
+ ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+ ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+ ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
+
+ return {{bufferData, GetBufferCount()},
+ {bindings, GetBindingCount()},
+ {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+}
+
+bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
+ ASSERT(bindingIndex < GetBufferCount());
+ switch (GetBindingInfo(bindingIndex).buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ return false;
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ return true;
+ case wgpu::BufferBindingType::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+std::string BindGroupLayoutBase::EntriesToString() const {
+ std::string entries = "[";
+ std::string sep = "";
+ const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
+ for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+ entries += absl::StrFormat("%s%s", sep, bindingInfo);
+ sep = ", ";
+ }
+ entries += "]";
+ return entries;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h
index c8cb27c111a..a218877498b 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h
@@ -15,6 +15,11 @@
#ifndef SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_
#define SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_
+#include <algorithm>
+#include <bitset>
+#include <map>
+#include <string>
+
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
#include "dawn/common/SlabAllocator.h"
@@ -28,143 +33,138 @@
#include "dawn/native/dawn_platform.h"
-#include <bitset>
-#include <map>
-
namespace dawn::native {
- // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
- struct ExternalTextureBindingExpansion {
- BindingNumber plane0;
- BindingNumber plane1;
- BindingNumber params;
+// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
+struct ExternalTextureBindingExpansion {
+ BindingNumber plane0;
+ BindingNumber plane1;
+ BindingNumber params;
+};
+
+using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
+
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding = false);
+
+// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+// into a packed range of |BindingIndex| integers.
+class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+ public:
+ BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayoutBase() override;
+
+ static BindGroupLayoutBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // A map from the BindingNumber to its packed BindingIndex.
+ using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+ const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mBindingInfo.size());
+ return mBindingInfo[bindingIndex];
+ }
+ const BindingMap& GetBindingMap() const;
+ bool HasBinding(BindingNumber bindingNumber) const;
+ BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
+
+ // Functions necessary for the unordered_set<BGLBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
- using ExternalTextureBindingExpansionMap =
- std::map<BindingNumber, ExternalTextureBindingExpansion>;
-
- MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding = false);
-
- // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
- // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
- // into a packed range of |BindingIndex| integers.
- class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
- public:
- BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken,
- ApiObjectBase::UntrackedByDeviceTag tag);
- BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayoutBase() override;
-
- static BindGroupLayoutBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // A map from the BindingNumber to its packed BindingIndex.
- using BindingMap = std::map<BindingNumber, BindingIndex>;
-
- const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mBindingInfo.size());
- return mBindingInfo[bindingIndex];
- }
- const BindingMap& GetBindingMap() const;
- bool HasBinding(BindingNumber bindingNumber) const;
- BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
-
- // Functions necessary for the unordered_set<BGLBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
- };
-
- BindingIndex GetBindingCount() const;
- // Returns |BindingIndex| because buffers are packed at the front.
- BindingIndex GetBufferCount() const;
- // Returns |BindingIndex| because dynamic buffers are packed at the front.
- BindingIndex GetDynamicBufferCount() const;
- uint32_t GetUnverifiedBufferCount() const;
-
- // Used to get counts and validate them in pipeline layout creation. Other getters
- // should be used to get typed integer counts.
- const BindingCounts& GetBindingCountInfo() const;
-
- uint32_t GetExternalTextureBindingCount() const;
-
- // Used to specify unpacked external texture binding slots when transforming shader modules.
- const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
-
- uint32_t GetUnexpandedBindingCount() const;
-
- // Tests that the BindingInfo of two bind groups are equal,
- // ignoring their compatibility groups.
- bool IsLayoutEqual(const BindGroupLayoutBase* other,
- bool excludePipelineCompatibiltyToken = false) const;
- PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
-
- struct BufferBindingData {
- uint64_t offset;
- uint64_t size;
- };
-
- struct BindingDataPointers {
- ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
- ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
- ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
- };
-
- // Compute the amount of space / alignment required to store bindings for a bind group of
- // this layout.
- size_t GetBindingDataSize() const;
- static constexpr size_t GetBindingDataAlignment() {
- static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
- return alignof(BufferBindingData);
- }
-
- BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
-
- bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
-
- // Returns a detailed string representation of the layout entries for use in error messages.
- std::string EntriesToString() const;
-
- protected:
- // Constructor used only for mocking and testing.
- explicit BindGroupLayoutBase(DeviceBase* device);
- void DestroyImpl() override;
-
- template <typename BindGroup>
- SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
- return SlabAllocator<BindGroup>(
- size, // bytes
- Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
- std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
- );
- }
-
- private:
- BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- BindingCounts mBindingCounts = {};
- ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
-
- // Map from BindGroupLayoutEntry.binding to packed indices.
- BindingMap mBindingMap;
-
- ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+ BindingIndex GetBindingCount() const;
+ // Returns |BindingIndex| because buffers are packed at the front.
+ BindingIndex GetBufferCount() const;
+ // Returns |BindingIndex| because dynamic buffers are packed at the front.
+ BindingIndex GetDynamicBufferCount() const;
+ uint32_t GetUnverifiedBufferCount() const;
- // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
- const PipelineCompatibilityToken mPipelineCompatibilityToken =
- PipelineCompatibilityToken(0);
+ // Used to get counts and validate them in pipeline layout creation. Other getters
+ // should be used to get typed integer counts.
+ const BindingCounts& GetBindingCountInfo() const;
+
+ uint32_t GetExternalTextureBindingCount() const;
+
+ // Used to specify unpacked external texture binding slots when transforming shader modules.
+ const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
+
+ uint32_t GetUnexpandedBindingCount() const;
+
+ // Tests that the BindingInfo of two bind groups are equal,
+ // ignoring their compatibility groups.
+ bool IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken = false) const;
+ PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
+
+ struct BufferBindingData {
+ uint64_t offset;
+ uint64_t size;
+ };
- uint32_t mUnexpandedBindingCount;
+ struct BindingDataPointers {
+ ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+ ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+ ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
};
+ // Compute the amount of space / alignment required to store bindings for a bind group of
+ // this layout.
+ size_t GetBindingDataSize() const;
+ static constexpr size_t GetBindingDataAlignment() {
+ static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
+ return alignof(BufferBindingData);
+ }
+
+ BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+ bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
+
+ // Returns a detailed string representation of the layout entries for use in error messages.
+ std::string EntriesToString() const;
+
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit BindGroupLayoutBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ template <typename BindGroup>
+ SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+ return SlabAllocator<BindGroup>(
+ size, // bytes
+ Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
+ std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
+ );
+ }
+
+ private:
+ BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ BindingCounts mBindingCounts = {};
+ ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
+
+ // Map from BindGroupLayoutEntry.binding to packed indices.
+ BindingMap mBindingMap;
+
+ ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+
+ // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+ const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
+
+ uint32_t mUnexpandedBindingCount;
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h
index dd2f056c955..cd8254c98af 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h
@@ -15,127 +15,127 @@
#ifndef SRC_DAWN_NATIVE_BINDGROUPTRACKER_H_
#define SRC_DAWN_NATIVE_BINDGROUPTRACKER_H_
+#include <array>
+#include <bitset>
+
#include "dawn/common/Constants.h"
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/Pipeline.h"
#include "dawn/native/PipelineLayout.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- // Keeps track of the dirty bind groups so they can be lazily applied when we know the
- // pipeline state or it changes.
- // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
- // in other backends.
- template <bool CanInheritBindGroups, typename DynamicOffset>
- class BindGroupTrackerBase {
- public:
- void OnSetBindGroup(BindGroupIndex index,
- BindGroupBase* bindGroup,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
- ASSERT(index < kMaxBindGroupsTyped);
-
- if (mBindGroupLayoutsMask[index]) {
- // It is okay to only dirty bind groups that are used by the current pipeline
- // layout. If the pipeline layout changes, then the bind groups it uses will
- // become dirty.
-
- if (mBindGroups[index] != bindGroup) {
- mDirtyBindGroups.set(index);
- mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
- }
-
- if (dynamicOffsetCount > 0) {
- mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
- }
+// Keeps track of the dirty bind groups so they can be lazily applied when we know the
+// pipeline state or it changes.
+// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+// in other backends.
+template <bool CanInheritBindGroups, typename DynamicOffset>
+class BindGroupTrackerBase {
+ public:
+ void OnSetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindGroup,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ ASSERT(index < kMaxBindGroupsTyped);
+
+ if (mBindGroupLayoutsMask[index]) {
+ // It is okay to only dirty bind groups that are used by the current pipeline
+ // layout. If the pipeline layout changes, then the bind groups it uses will
+ // become dirty.
+
+ if (mBindGroups[index] != bindGroup) {
+ mDirtyBindGroups.set(index);
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
}
- mBindGroups[index] = bindGroup;
- mDynamicOffsetCounts[index] = dynamicOffsetCount;
- SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+ if (dynamicOffsetCount > 0) {
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+ }
}
- void OnSetPipeline(PipelineBase* pipeline) {
- mPipelineLayout = pipeline->GetLayout();
- }
+ mBindGroups[index] = bindGroup;
+ mDynamicOffsetCounts[index] = dynamicOffsetCount;
+ SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+ }
- protected:
- // The Derived class should call this before it applies bind groups.
- void BeforeApply() {
- if (mLastAppliedPipelineLayout == mPipelineLayout) {
- return;
- }
+ void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
- // Use the bind group layout mask to avoid marking unused bind groups as dirty.
- mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
-
- // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
- // the first |k| matching bind groups may be inherited.
- if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
- // Dirty bind groups that cannot be inherited.
- BindGroupLayoutMask dirtiedGroups =
- ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
-
- mDirtyBindGroups |= dirtiedGroups;
- mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
-
- // Clear any bind groups not in the mask.
- mDirtyBindGroups &= mBindGroupLayoutsMask;
- mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
- } else {
- mDirtyBindGroups = mBindGroupLayoutsMask;
- mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
- }
+ protected:
+ // The Derived class should call this before it applies bind groups.
+ void BeforeApply() {
+ if (mLastAppliedPipelineLayout == mPipelineLayout) {
+ return;
}
- // The Derived class should call this after it applies bind groups.
- void AfterApply() {
- // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
- // will be dirtied again by the next pipeline change.
- mDirtyBindGroups.reset();
- mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
- // Keep track of the last applied pipeline layout. This allows us to avoid computing
- // the intersection of the dirty bind groups and bind group layout mask in next Draw
- // or Dispatch (which is very hot code) until the layout is changed again.
- mLastAppliedPipelineLayout = mPipelineLayout;
+ // Use the bind group layout mask to avoid marking unused bind groups as dirty.
+ mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+ // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+ // the first |k| matching bind groups may be inherited.
+ if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+ // Dirty bind groups that cannot be inherited.
+ BindGroupLayoutMask dirtiedGroups =
+ ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+ mDirtyBindGroups |= dirtiedGroups;
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+ // Clear any bind groups not in the mask.
+ mDirtyBindGroups &= mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+ } else {
+ mDirtyBindGroups = mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
}
-
- BindGroupLayoutMask mDirtyBindGroups = 0;
- BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
- BindGroupLayoutMask mBindGroupLayoutsMask = 0;
- ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
- ityp::array<BindGroupIndex,
- std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
- kMaxBindGroups>
- mDynamicOffsets = {};
-
- // |mPipelineLayout| is the current pipeline layout set on the command buffer.
- // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
- // to the bind group bindings.
- PipelineLayoutBase* mPipelineLayout = nullptr;
- PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
-
- private:
- // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
- // in other backends.
- static void SetDynamicOffsets(uint64_t* data,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
- for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
- data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
- }
+ }
+
+ // The Derived class should call this after it applies bind groups.
+ void AfterApply() {
+ // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+ // will be dirtied again by the next pipeline change.
+ mDirtyBindGroups.reset();
+ mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+ // Keep track of the last applied pipeline layout. This allows us to avoid computing
+ // the intersection of the dirty bind groups and bind group layout mask in next Draw
+ // or Dispatch (which is very hot code) until the layout is changed again.
+ mLastAppliedPipelineLayout = mPipelineLayout;
+ }
+
+ BindGroupLayoutMask mDirtyBindGroups = 0;
+ BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+ BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+ ityp::array<BindGroupIndex,
+ std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+ kMaxBindGroups>
+ mDynamicOffsets = {};
+
+ // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+ // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+ // to the bind group bindings.
+ PipelineLayoutBase* mPipelineLayout = nullptr;
+ PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+ private:
+ // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+ // in other backends.
+ static void SetDynamicOffsets(uint64_t* data,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+ data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
}
+ }
- static void SetDynamicOffsets(uint32_t* data,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
+ static void SetDynamicOffsets(uint32_t* data,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ if (dynamicOffsetCount > 0) {
memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
}
- };
+ }
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp b/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp
index 009735c8d21..1d4b60d3960 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp
@@ -18,178 +18,172 @@
namespace dawn::native {
- void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
- bindingCounts->totalCount += 1;
-
- uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
-
- if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
- ++bindingCounts->bufferCount;
- const BufferBindingLayout& buffer = entry.buffer;
-
- if (buffer.minBindingSize == 0) {
- ++bindingCounts->unverifiedBufferCount;
- }
-
- switch (buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (buffer.hasDynamicOffset) {
- ++bindingCounts->dynamicUniformBufferCount;
- }
- perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
- break;
-
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (buffer.hasDynamicOffset) {
- ++bindingCounts->dynamicStorageBufferCount;
- }
- perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
- break;
-
- case wgpu::BufferBindingType::Undefined:
- // Can't get here due to the enclosing if statement.
- UNREACHABLE();
- break;
- }
- } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
- } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
- } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
- } else {
- const ExternalTextureBindingLayout* externalTextureBindingLayout;
- FindInChain(entry.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
- }
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+ bindingCounts->totalCount += 1;
+
+ uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+
+ if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+ ++bindingCounts->bufferCount;
+ const BufferBindingLayout& buffer = entry.buffer;
+
+ if (buffer.minBindingSize == 0) {
+ ++bindingCounts->unverifiedBufferCount;
}
- ASSERT(perStageBindingCountMember != nullptr);
- for (SingleShaderStage stage : IterateStages(entry.visibility)) {
- ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+ switch (buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (buffer.hasDynamicOffset) {
+ ++bindingCounts->dynamicUniformBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+ break;
+
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (buffer.hasDynamicOffset) {
+ ++bindingCounts->dynamicStorageBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+ break;
+
+ case wgpu::BufferBindingType::Undefined:
+ // Can't get here due to the enclosing if statement.
+ UNREACHABLE();
+ break;
+ }
+ } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+ } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+ } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+ } else {
+ const ExternalTextureBindingLayout* externalTextureBindingLayout;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
}
}
- void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
- bindingCounts->totalCount += rhs.totalCount;
- bindingCounts->bufferCount += rhs.bufferCount;
- bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
- bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
- bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
-
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- bindingCounts->perStage[stage].sampledTextureCount +=
- rhs.perStage[stage].sampledTextureCount;
- bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
- bindingCounts->perStage[stage].storageBufferCount +=
- rhs.perStage[stage].storageBufferCount;
- bindingCounts->perStage[stage].storageTextureCount +=
- rhs.perStage[stage].storageTextureCount;
- bindingCounts->perStage[stage].uniformBufferCount +=
- rhs.perStage[stage].uniformBufferCount;
- bindingCounts->perStage[stage].externalTextureCount +=
- rhs.perStage[stage].externalTextureCount;
- }
+ ASSERT(perStageBindingCountMember != nullptr);
+ for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+ ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
}
+}
+
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+ bindingCounts->totalCount += rhs.totalCount;
+ bindingCounts->bufferCount += rhs.bufferCount;
+ bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+ bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+ bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ bindingCounts->perStage[stage].sampledTextureCount +=
+ rhs.perStage[stage].sampledTextureCount;
+ bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+ bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
+ bindingCounts->perStage[stage].storageTextureCount +=
+ rhs.perStage[stage].storageTextureCount;
+ bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
+ bindingCounts->perStage[stage].externalTextureCount +=
+ rhs.perStage[stage].externalTextureCount;
+ }
+}
+
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+ DAWN_INVALID_IF(
+ bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
+ "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+ "limit (%u).",
+ bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+
+ DAWN_INVALID_IF(
+ bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
+ "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+ "limit (%u).",
+ bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
+ "The number of sampled textures (%u) in the %s stage exceeds the maximum "
+ "per-stage limit (%u).",
+ bindingCounts.perStage[stage].sampledTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage);
+
+ // The per-stage number of external textures is bound by the maximum sampled textures
+ // per stage.
+ DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
+ kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+ "The number of external textures (%u) in the %s stage exceeds the maximum "
+ "per-stage limit (%u).",
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
- MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
DAWN_INVALID_IF(
- bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
- "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+ bindingCounts.perStage[stage].sampledTextureCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kSampledTexturesPerExternalTexture) >
+ kMaxSampledTexturesPerShaderStage,
+ "The combination of sampled textures (%u) and external textures (%u) in the %s "
+ "stage exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].sampledTextureCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
+ "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
+ "(%u).",
+ bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].samplerCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kSamplersPerExternalTexture) >
+ kMaxSamplersPerShaderStage,
+ "The combination of samplers (%u) and external textures (%u) in the %s stage "
+ "exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].samplerCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
+ "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).",
- bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+ bindingCounts.perStage[stage].storageBufferCount, stage,
+ kMaxStorageBuffersPerShaderStage);
DAWN_INVALID_IF(
- bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
- "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+ bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
+ "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).",
- bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
-
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].sampledTextureCount >
- kMaxSampledTexturesPerShaderStage,
- "The number of sampled textures (%u) in the %s stage exceeds the maximum "
- "per-stage limit (%u).",
- bindingCounts.perStage[stage].sampledTextureCount, stage,
- kMaxSampledTexturesPerShaderStage);
-
- // The per-stage number of external textures is bound by the maximum sampled textures
- // per stage.
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].externalTextureCount >
- kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
- "The number of external textures (%u) in the %s stage exceeds the maximum "
- "per-stage limit (%u).",
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].sampledTextureCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kSampledTexturesPerExternalTexture) >
- kMaxSampledTexturesPerShaderStage,
- "The combination of sampled textures (%u) and external textures (%u) in the %s "
- "stage exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].sampledTextureCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSampledTexturesPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
- "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
- "(%u).",
- bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].samplerCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kSamplersPerExternalTexture) >
- kMaxSamplersPerShaderStage,
- "The combination of samplers (%u) and external textures (%u) in the %s stage "
- "exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].samplerCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSamplersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
- "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].storageBufferCount, stage,
- kMaxStorageBuffersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].storageTextureCount >
- kMaxStorageTexturesPerShaderStage,
- "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].storageTextureCount, stage,
- kMaxStorageTexturesPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
- "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].uniformBufferCount, stage,
- kMaxUniformBuffersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].uniformBufferCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kUniformsPerExternalTexture) >
- kMaxUniformBuffersPerShaderStage,
- "The combination of uniform buffers (%u) and external textures (%u) in the %s "
- "stage exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].uniformBufferCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxUniformBuffersPerShaderStage);
- }
+ bindingCounts.perStage[stage].storageTextureCount, stage,
+ kMaxStorageTexturesPerShaderStage);
- return {};
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
+ "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
+ "limit (%u).",
+ bindingCounts.perStage[stage].uniformBufferCount, stage,
+ kMaxUniformBuffersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].uniformBufferCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kUniformsPerExternalTexture) >
+ kMaxUniformBuffersPerShaderStage,
+ "The combination of uniform buffers (%u) and external textures (%u) in the %s "
+ "stage exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].uniformBufferCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxUniformBuffersPerShaderStage);
}
+ return {};
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindingInfo.h b/chromium/third_party/dawn/src/dawn/native/BindingInfo.h
index e04d014afef..9d32b05e08a 100644
--- a/chromium/third_party/dawn/src/dawn/native/BindingInfo.h
+++ b/chromium/third_party/dawn/src/dawn/native/BindingInfo.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_BINDINGINFO_H_
#define SRC_DAWN_NATIVE_BINDINGINFO_H_
+#include <cstdint>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/native/Error.h"
@@ -24,74 +27,72 @@
#include "dawn/native/dawn_platform.h"
-#include <cstdint>
-
namespace dawn::native {
- // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
- static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
- kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
-
- static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
- BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
-
- // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
- // API. There should never be more bindings than the max per stage, for each stage.
- static constexpr uint32_t kMaxBindingsPerPipelineLayout =
- 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
- kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
- kMaxUniformBuffersPerShaderStage);
-
- static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
- BindingIndex(kMaxBindingsPerPipelineLayout);
-
- // TODO(enga): Figure out a good number for this.
- static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
-
- enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
-
- struct BindingInfo {
- BindingNumber binding;
- wgpu::ShaderStage visibility;
-
- BindingInfoType bindingType;
-
- // TODO(dawn:527): These four values could be made into a union.
- BufferBindingLayout buffer;
- SamplerBindingLayout sampler;
- TextureBindingLayout texture;
- StorageTextureBindingLayout storageTexture;
- };
-
- struct BindingSlot {
- BindGroupIndex group;
- BindingNumber binding;
- };
-
- struct PerStageBindingCounts {
- uint32_t sampledTextureCount;
- uint32_t samplerCount;
- uint32_t storageBufferCount;
- uint32_t storageTextureCount;
- uint32_t uniformBufferCount;
- uint32_t externalTextureCount;
- };
-
- struct BindingCounts {
- uint32_t totalCount;
- uint32_t bufferCount;
- uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
- uint32_t dynamicUniformBufferCount;
- uint32_t dynamicStorageBufferCount;
- PerStage<PerStageBindingCounts> perStage;
- };
-
- void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
- void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
- MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
-
- // For buffer size validation
- using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+ kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+
+static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+ BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+
+// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+// API. There should never be more bindings than the max per stage, for each stage.
+static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+ 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+ kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+ kMaxUniformBuffersPerShaderStage);
+
+static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+ BindingIndex(kMaxBindingsPerPipelineLayout);
+
+// TODO(enga): Figure out a good number for this.
+static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+
+enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
+
+struct BindingInfo {
+ BindingNumber binding;
+ wgpu::ShaderStage visibility;
+
+ BindingInfoType bindingType;
+
+ // TODO(dawn:527): These four values could be made into a union.
+ BufferBindingLayout buffer;
+ SamplerBindingLayout sampler;
+ TextureBindingLayout texture;
+ StorageTextureBindingLayout storageTexture;
+};
+
+struct BindingSlot {
+ BindGroupIndex group;
+ BindingNumber binding;
+};
+
+struct PerStageBindingCounts {
+ uint32_t sampledTextureCount;
+ uint32_t samplerCount;
+ uint32_t storageBufferCount;
+ uint32_t storageTextureCount;
+ uint32_t uniformBufferCount;
+ uint32_t externalTextureCount;
+};
+
+struct BindingCounts {
+ uint32_t totalCount;
+ uint32_t bufferCount;
+ uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
+ uint32_t dynamicUniformBufferCount;
+ uint32_t dynamicStorageBufferCount;
+ PerStage<PerStageBindingCounts> perStage;
+};
+
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+
+// For buffer size validation
+using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Blob.cpp b/chromium/third_party/dawn/src/dawn/native/Blob.cpp
new file mode 100644
index 00000000000..a3ac2b28efd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Blob.cpp
@@ -0,0 +1,80 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <utility>
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Blob.h"
+
+namespace dawn::native {
+
+Blob CreateBlob(size_t size) {
+ if (size > 0) {
+ uint8_t* data = new uint8_t[size];
+ return Blob::UnsafeCreateWithDeleter(data, size, [=]() { delete[] data; });
+ } else {
+ return Blob();
+ }
+}
+
+// static
+Blob Blob::UnsafeCreateWithDeleter(uint8_t* data, size_t size, std::function<void()> deleter) {
+ return Blob(data, size, deleter);
+}
+
+Blob::Blob() : mData(nullptr), mSize(0), mDeleter({}) {}
+
+Blob::Blob(uint8_t* data, size_t size, std::function<void()> deleter)
+ : mData(data), mSize(size), mDeleter(std::move(deleter)) {
+ // It is invalid to make a blob that has null data unless its size is also zero.
+ ASSERT(data != nullptr || size == 0);
+}
+
+Blob::Blob(Blob&& rhs) : mData(rhs.mData), mSize(rhs.mSize) {
+ mDeleter = std::move(rhs.mDeleter);
+}
+
+Blob& Blob::operator=(Blob&& rhs) {
+ mData = rhs.mData;
+ mSize = rhs.mSize;
+ if (mDeleter) {
+ mDeleter();
+ }
+ mDeleter = std::move(rhs.mDeleter);
+ return *this;
+}
+
+Blob::~Blob() {
+ if (mDeleter) {
+ mDeleter();
+ }
+}
+
+bool Blob::Empty() const {
+ return mSize == 0;
+}
+
+const uint8_t* Blob::Data() const {
+ return mData;
+}
+
+uint8_t* Blob::Data() {
+ return mData;
+}
+
+size_t Blob::Size() const {
+ return mSize;
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Blob.h b/chromium/third_party/dawn/src/dawn/native/Blob.h
new file mode 100644
index 00000000000..4bdcef7be3f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Blob.h
@@ -0,0 +1,60 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_BLOB_H_
+#define SRC_DAWN_NATIVE_BLOB_H_
+
+#include <functional>
+#include <memory>
+
+namespace dawn::native {
+
+// Blob represents a block of bytes. It may be constructed from
+// various other container types and uses type erasure to take
+// ownership of the container and release its memory on destruction.
+class Blob {
+ public:
+ // This function is used to create Blob with actual data.
+ // Make sure the creation and deleter handles the data ownership and lifetime correctly.
+ static Blob UnsafeCreateWithDeleter(uint8_t* data, size_t size, std::function<void()> deleter);
+
+ Blob();
+ ~Blob();
+
+ Blob(const Blob&) = delete;
+ Blob& operator=(const Blob&) = delete;
+
+ Blob(Blob&&);
+ Blob& operator=(Blob&&);
+
+ bool Empty() const;
+ const uint8_t* Data() const;
+ uint8_t* Data();
+ size_t Size() const;
+
+ private:
+ // The constructor should be responsible to take ownership of |data| and releases ownership by
+ // calling |deleter|. The deleter function is called at ~Blob() and during std::move.
+ explicit Blob(uint8_t* data, size_t size, std::function<void()> deleter);
+
+ uint8_t* mData;
+ size_t mSize;
+ std::function<void()> mDeleter;
+};
+
+Blob CreateBlob(size_t size);
+
+} // namespace dawn::native
+
+#endif // SRC_DAWN_NATIVE_BLOB_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BlobCache.cpp b/chromium/third_party/dawn/src/dawn/native/BlobCache.cpp
new file mode 100644
index 00000000000..435f12dd493
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BlobCache.cpp
@@ -0,0 +1,66 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BlobCache.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/CacheKey.h"
+#include "dawn/native/Instance.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native {
+
+BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
+ : mCache(cachingInterface) {}
+
+Blob BlobCache::Load(const CacheKey& key) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return LoadInternal(key);
+}
+
+void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ StoreInternal(key, valueSize, value);
+}
+
+void BlobCache::Store(const CacheKey& key, const Blob& value) {
+ Store(key, value.Size(), value.Data());
+}
+
+Blob BlobCache::LoadInternal(const CacheKey& key) {
+ if (mCache == nullptr) {
+ return Blob();
+ }
+ const size_t expectedSize = mCache->LoadData(key.data(), key.size(), nullptr, 0);
+ if (expectedSize > 0) {
+ // Need to put this inside to trigger copy elision.
+ Blob result = CreateBlob(expectedSize);
+ const size_t actualSize =
+ mCache->LoadData(key.data(), key.size(), result.Data(), expectedSize);
+ ASSERT(expectedSize == actualSize);
+ return result;
+ }
+ return Blob();
+}
+
+void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
+ ASSERT(value != nullptr);
+ ASSERT(valueSize > 0);
+ if (mCache == nullptr) {
+ return;
+ }
+ mCache->StoreData(key.data(), key.size(), value, valueSize);
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BlobCache.h b/chromium/third_party/dawn/src/dawn/native/BlobCache.h
new file mode 100644
index 00000000000..615af2f877c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BlobCache.h
@@ -0,0 +1,58 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_BLOBCACHE_H_
+#define SRC_DAWN_NATIVE_BLOBCACHE_H_
+
+#include <mutex>
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/Blob.h"
+
+namespace dawn::platform {
+class CachingInterface;
+}
+
+namespace dawn::native {
+
+class CacheKey;
+class InstanceBase;
+
+// This class should always be thread-safe because it may be called asynchronously. Its purpose
+// is to wrap the CachingInterface provided via a platform.
+class BlobCache {
+ public:
+ explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
+
+ // Returns empty blob if the key is not found in the cache.
+ Blob Load(const CacheKey& key);
+
+ // Value to store must be non-empty/non-null.
+ void Store(const CacheKey& key, size_t valueSize, const void* value);
+ void Store(const CacheKey& key, const Blob& value);
+
+ private:
+ // Non-thread safe internal implementations of load and store. Exposed callers that use
+ // these helpers need to make sure that these are entered with `mMutex` held.
+ Blob LoadInternal(const CacheKey& key);
+ void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
+
+ // Protects thread safety of access to mCache.
+ std::mutex mMutex;
+ dawn::platform::CachingInterface* mCache;
+};
+
+} // namespace dawn::native
+
+#endif // SRC_DAWN_NATIVE_BLOBCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp
index 76d7a657a26..2d7de752b71 100644
--- a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp
@@ -19,246 +19,246 @@
namespace dawn::native {
- BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
- ASSERT(IsPowerOfTwo(maxSize));
+BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+ ASSERT(IsPowerOfTwo(maxSize));
- mFreeLists.resize(Log2(mMaxBlockSize) + 1);
+ mFreeLists.resize(Log2(mMaxBlockSize) + 1);
- // Insert the level0 free block.
- mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
- mFreeLists[0] = {mRoot};
- }
+ // Insert the level0 free block.
+ mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
+ mFreeLists[0] = {mRoot};
+}
- BuddyAllocator::~BuddyAllocator() {
- if (mRoot) {
- DeleteBlock(mRoot);
- }
+BuddyAllocator::~BuddyAllocator() {
+ if (mRoot) {
+ DeleteBlock(mRoot);
}
-
- uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
- return ComputeNumOfFreeBlocks(mRoot);
+}
+
+uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
+ return ComputeNumOfFreeBlocks(mRoot);
+}
+
+uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
+ if (block->mState == BlockState::Free) {
+ return 1;
+ } else if (block->mState == BlockState::Split) {
+ return ComputeNumOfFreeBlocks(block->split.pLeft) +
+ ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
}
-
- uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
- if (block->mState == BlockState::Free) {
- return 1;
- } else if (block->mState == BlockState::Split) {
- return ComputeNumOfFreeBlocks(block->split.pLeft) +
- ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
+ return 0;
+}
+
+uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
+ // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
+ // However, mFreeList zero-indexed by level.
+ // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
+ return Log2(mMaxBlockSize) - Log2(blockSize);
+}
+
+uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
+ uint64_t alignment) const {
+ ASSERT(IsPowerOfTwo(alignment));
+ // The current level is the level that corresponds to the allocation size. The free list may
+ // not contain a block at that level until a larger one gets allocated (and splits).
+ // Continue to go up the tree until such a larger block exists.
+ //
+ // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
+ // When the alignment is also a power-of-two, we simply use the next free block whose size
+ // is greater than or equal to the alignment value.
+ //
+ // After one 8-byte allocation:
+ //
+ // Level --------------------------------
+ // 0 32 | S |
+ // --------------------------------
+ // 1 16 | S | F2 | S - split
+ // -------------------------------- F - free
+ // 2 8 | Aa | F1 | | A - allocated
+ // --------------------------------
+ //
+ // Allocate(size=8, alignment=8) will be satisfied by using F1.
+ // Allocate(size=8, alignment=4) will be satified by using F1.
+ // Allocate(size=8, alignment=16) will be satisified by using F2.
+ //
+ for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
+ size_t currLevel = allocationBlockLevel - ii;
+ BuddyBlock* freeBlock = mFreeLists[currLevel].head;
+ if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
+ return currLevel;
}
- return 0;
}
-
- uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
- // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
- // However, mFreeList zero-indexed by level.
- // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
- return Log2(mMaxBlockSize) - Log2(blockSize);
+ return kInvalidOffset; // No free block exists at any level.
+}
+
+// Inserts existing free block into the free-list.
+// Called by allocate upon splitting to insert a child block into a free-list.
+// Note: Always insert into the head of the free-list. As when a larger free block at a lower
+// level was split, there were no smaller free blocks at a higher level to allocate.
+void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
+ ASSERT(block->mState == BlockState::Free);
+
+ // Inserted block is now the front (no prev).
+ block->free.pPrev = nullptr;
+
+ // Old head is now the inserted block's next.
+ block->free.pNext = mFreeLists[level].head;
+
+ // Block already in HEAD position (ex. right child was inserted first).
+ if (mFreeLists[level].head != nullptr) {
+ // Old head's previous is the inserted block.
+ mFreeLists[level].head->free.pPrev = block;
}
- uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
- uint64_t alignment) const {
- ASSERT(IsPowerOfTwo(alignment));
- // The current level is the level that corresponds to the allocation size. The free list may
- // not contain a block at that level until a larger one gets allocated (and splits).
- // Continue to go up the tree until such a larger block exists.
- //
- // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
- // When the alignment is also a power-of-two, we simply use the next free block whose size
- // is greater than or equal to the alignment value.
- //
- // After one 8-byte allocation:
- //
- // Level --------------------------------
- // 0 32 | S |
- // --------------------------------
- // 1 16 | S | F2 | S - split
- // -------------------------------- F - free
- // 2 8 | Aa | F1 | | A - allocated
- // --------------------------------
- //
- // Allocate(size=8, alignment=8) will be satisfied by using F1.
- // Allocate(size=8, alignment=4) will be satified by using F1.
- // Allocate(size=8, alignment=16) will be satisified by using F2.
- //
- for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
- size_t currLevel = allocationBlockLevel - ii;
- BuddyBlock* freeBlock = mFreeLists[currLevel].head;
- if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
- return currLevel;
- }
- }
- return kInvalidOffset; // No free block exists at any level.
- }
+ mFreeLists[level].head = block;
+}
- // Inserts existing free block into the free-list.
- // Called by allocate upon splitting to insert a child block into a free-list.
- // Note: Always insert into the head of the free-list. As when a larger free block at a lower
- // level was split, there were no smaller free blocks at a higher level to allocate.
- void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
- ASSERT(block->mState == BlockState::Free);
+void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
+ ASSERT(block->mState == BlockState::Free);
- // Inserted block is now the front (no prev).
- block->free.pPrev = nullptr;
+ if (mFreeLists[level].head == block) {
+ // Block is in HEAD position.
+ mFreeLists[level].head = mFreeLists[level].head->free.pNext;
+ } else {
+ // Block is after HEAD position.
+ BuddyBlock* pPrev = block->free.pPrev;
+ BuddyBlock* pNext = block->free.pNext;
- // Old head is now the inserted block's next.
- block->free.pNext = mFreeLists[level].head;
+ ASSERT(pPrev != nullptr);
+ ASSERT(pPrev->mState == BlockState::Free);
- // Block already in HEAD position (ex. right child was inserted first).
- if (mFreeLists[level].head != nullptr) {
- // Old head's previous is the inserted block.
- mFreeLists[level].head->free.pPrev = block;
- }
+ pPrev->free.pNext = pNext;
- mFreeLists[level].head = block;
+ if (pNext != nullptr) {
+ ASSERT(pNext->mState == BlockState::Free);
+ pNext->free.pPrev = pPrev;
+ }
}
+}
- void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
- ASSERT(block->mState == BlockState::Free);
+uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
+ if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
+ return kInvalidOffset;
+ }
- if (mFreeLists[level].head == block) {
- // Block is in HEAD position.
- mFreeLists[level].head = mFreeLists[level].head->free.pNext;
- } else {
- // Block is after HEAD position.
- BuddyBlock* pPrev = block->free.pPrev;
- BuddyBlock* pNext = block->free.pNext;
+ // Compute the level
+ const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
- ASSERT(pPrev != nullptr);
- ASSERT(pPrev->mState == BlockState::Free);
+ ASSERT(allocationSizeToLevel < mFreeLists.size());
- pPrev->free.pNext = pNext;
+ uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
- if (pNext != nullptr) {
- ASSERT(pNext->mState == BlockState::Free);
- pNext->free.pPrev = pPrev;
- }
- }
+ // Error when no free blocks exist (allocator is full)
+ if (currBlockLevel == kInvalidOffset) {
+ return kInvalidOffset;
}
- uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
- if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
- return kInvalidOffset;
- }
+ // Split free blocks level-by-level.
+ // Terminate when the current block level is equal to the computed level of the requested
+ // allocation.
+ BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
- // Compute the level
- const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
+ for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
+ ASSERT(currBlock->mState == BlockState::Free);
- ASSERT(allocationSizeToLevel < mFreeLists.size());
+ // Remove curr block (about to be split).
+ RemoveFreeBlock(currBlock, currBlockLevel);
- uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
+ // Create two free child blocks (the buddies).
+ const uint64_t nextLevelSize = currBlock->mSize / 2;
+ BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
+ BuddyBlock* rightChildBlock =
+ new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
- // Error when no free blocks exist (allocator is full)
- if (currBlockLevel == kInvalidOffset) {
- return kInvalidOffset;
- }
+ // Remember the parent to merge these back upon de-allocation.
+ rightChildBlock->pParent = currBlock;
+ leftChildBlock->pParent = currBlock;
- // Split free blocks level-by-level.
- // Terminate when the current block level is equal to the computed level of the requested
- // allocation.
- BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
+ // Make them buddies.
+ leftChildBlock->pBuddy = rightChildBlock;
+ rightChildBlock->pBuddy = leftChildBlock;
- for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
- ASSERT(currBlock->mState == BlockState::Free);
+ // Insert the children back into the free list into the next level.
+ // The free list does not require a specific order. However, an order is specified as
+ // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
+ InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
+ InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
- // Remove curr block (about to be split).
- RemoveFreeBlock(currBlock, currBlockLevel);
+ // Curr block is now split.
+ currBlock->mState = BlockState::Split;
+ currBlock->split.pLeft = leftChildBlock;
- // Create two free child blocks (the buddies).
- const uint64_t nextLevelSize = currBlock->mSize / 2;
- BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
- BuddyBlock* rightChildBlock =
- new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
+ // Decend down into the next level.
+ currBlock = leftChildBlock;
+ }
- // Remember the parent to merge these back upon de-allocation.
- rightChildBlock->pParent = currBlock;
- leftChildBlock->pParent = currBlock;
+ // Remove curr block from free-list (now allocated).
+ RemoveFreeBlock(currBlock, currBlockLevel);
+ currBlock->mState = BlockState::Allocated;
- // Make them buddies.
- leftChildBlock->pBuddy = rightChildBlock;
- rightChildBlock->pBuddy = leftChildBlock;
+ return currBlock->mOffset;
+}
- // Insert the children back into the free list into the next level.
- // The free list does not require a specific order. However, an order is specified as
- // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
- InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
- InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
+void BuddyAllocator::Deallocate(uint64_t offset) {
+ BuddyBlock* curr = mRoot;
- // Curr block is now split.
- currBlock->mState = BlockState::Split;
- currBlock->split.pLeft = leftChildBlock;
+ // TODO(crbug.com/dawn/827): Optimize de-allocation.
+ // Passing allocationSize directly will avoid the following level-by-level search;
+ // however, it requires the size information to be stored outside the allocator.
- // Decend down into the next level.
- currBlock = leftChildBlock;
+ // Search for the free block node that corresponds to the block offset.
+ size_t currBlockLevel = 0;
+ while (curr->mState == BlockState::Split) {
+ if (offset < curr->split.pLeft->pBuddy->mOffset) {
+ curr = curr->split.pLeft;
+ } else {
+ curr = curr->split.pLeft->pBuddy;
}
- // Remove curr block from free-list (now allocated).
- RemoveFreeBlock(currBlock, currBlockLevel);
- currBlock->mState = BlockState::Allocated;
-
- return currBlock->mOffset;
+ currBlockLevel++;
}
- void BuddyAllocator::Deallocate(uint64_t offset) {
- BuddyBlock* curr = mRoot;
-
- // TODO(crbug.com/dawn/827): Optimize de-allocation.
- // Passing allocationSize directly will avoid the following level-by-level search;
- // however, it requires the size information to be stored outside the allocator.
-
- // Search for the free block node that corresponds to the block offset.
- size_t currBlockLevel = 0;
- while (curr->mState == BlockState::Split) {
- if (offset < curr->split.pLeft->pBuddy->mOffset) {
- curr = curr->split.pLeft;
- } else {
- curr = curr->split.pLeft->pBuddy;
- }
+ ASSERT(curr->mState == BlockState::Allocated);
- currBlockLevel++;
- }
-
- ASSERT(curr->mState == BlockState::Allocated);
+ // Ensure the block is at the correct level
+ ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
- // Ensure the block is at the correct level
- ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
+ // Mark curr free so we can merge.
+ curr->mState = BlockState::Free;
- // Mark curr free so we can merge.
- curr->mState = BlockState::Free;
+ // Merge the buddies (LevelN-to-Level0).
+ while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
+ // Remove the buddy.
+ RemoveFreeBlock(curr->pBuddy, currBlockLevel);
- // Merge the buddies (LevelN-to-Level0).
- while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
- // Remove the buddy.
- RemoveFreeBlock(curr->pBuddy, currBlockLevel);
+ BuddyBlock* parent = curr->pParent;
- BuddyBlock* parent = curr->pParent;
+ // The buddies were inserted in a specific order but
+ // could be deleted in any order.
+ DeleteBlock(curr->pBuddy);
+ DeleteBlock(curr);
- // The buddies were inserted in a specific order but
- // could be deleted in any order.
- DeleteBlock(curr->pBuddy);
- DeleteBlock(curr);
+ // Parent is now free.
+ parent->mState = BlockState::Free;
- // Parent is now free.
- parent->mState = BlockState::Free;
-
- // Ascend up to the next level (parent block).
- curr = parent;
- currBlockLevel--;
- }
-
- InsertFreeBlock(curr, currBlockLevel);
+ // Ascend up to the next level (parent block).
+ curr = parent;
+ currBlockLevel--;
}
- // Helper which deletes a block in the tree recursively (post-order).
- void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
- ASSERT(block != nullptr);
+ InsertFreeBlock(curr, currBlockLevel);
+}
- if (block->mState == BlockState::Split) {
- // Delete the pair in same order we inserted.
- DeleteBlock(block->split.pLeft->pBuddy);
- DeleteBlock(block->split.pLeft);
- }
- delete block;
+// Helper which deletes a block in the tree recursively (post-order).
+void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
+ ASSERT(block != nullptr);
+
+ if (block->mState == BlockState::Split) {
+ // Delete the pair in same order we inserted.
+ DeleteBlock(block->split.pLeft->pBuddy);
+ DeleteBlock(block->split.pLeft);
}
+ delete block;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h
index d22bd588120..e0c478b656b 100644
--- a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h
@@ -22,96 +22,96 @@
namespace dawn::native {
- // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
- // Memory is split into halves until just large enough to fit to the request. This
- // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
- // returning the starting offset whose size is guaranteed to be greater than or equal to the
- // allocation size. To deallocate, the same offset is used to find the corresponding block.
- //
- // Internally, it manages a free list to track free blocks in a full binary tree.
- // Every index in the free list corresponds to a level in the tree. That level also determines
- // the size of the block to be used to satisfy the request. The first level (index=0) represents
- // the root whose size is also called the max block size.
- //
- class BuddyAllocator {
- public:
- explicit BuddyAllocator(uint64_t maxSize);
- ~BuddyAllocator();
-
- // Required methods.
- uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
- void Deallocate(uint64_t offset);
-
- // For testing purposes only.
- uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
-
- static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
-
- private:
- uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
- uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
-
- enum class BlockState { Free, Split, Allocated };
-
- struct BuddyBlock {
- BuddyBlock(uint64_t size, uint64_t offset)
- : mOffset(offset), mSize(size), mState(BlockState::Free) {
- free.pPrev = nullptr;
- free.pNext = nullptr;
- }
-
- uint64_t mOffset;
- uint64_t mSize;
-
- // Pointer to this block's buddy, iff parent is split.
- // Used to quickly merge buddy blocks upon de-allocate.
- BuddyBlock* pBuddy = nullptr;
- BuddyBlock* pParent = nullptr;
-
- // Track whether this block has been split or not.
- BlockState mState;
-
- struct FreeLinks {
- BuddyBlock* pPrev;
- BuddyBlock* pNext;
- };
-
- struct SplitLink {
- BuddyBlock* pLeft;
- };
-
- union {
- // Used upon allocation.
- // Avoids searching for the next free block.
- FreeLinks free;
-
- // Used upon de-allocation.
- // Had this block split upon allocation, it and it's buddy is to be deleted.
- SplitLink split;
- };
+// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
+// Memory is split into halves until just large enough to fit to the request. This
+// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
+// returning the starting offset whose size is guaranteed to be greater than or equal to the
+// allocation size. To deallocate, the same offset is used to find the corresponding block.
+//
+// Internally, it manages a free list to track free blocks in a full binary tree.
+// Every index in the free list corresponds to a level in the tree. That level also determines
+// the size of the block to be used to satisfy the request. The first level (index=0) represents
+// the root whose size is also called the max block size.
+//
+class BuddyAllocator {
+ public:
+ explicit BuddyAllocator(uint64_t maxSize);
+ ~BuddyAllocator();
+
+ // Required methods.
+ uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
+ void Deallocate(uint64_t offset);
+
+ // For testing purposes only.
+ uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+ private:
+ uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
+ uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
+
+ enum class BlockState { Free, Split, Allocated };
+
+ struct BuddyBlock {
+ BuddyBlock(uint64_t size, uint64_t offset)
+ : mOffset(offset), mSize(size), mState(BlockState::Free) {
+ free.pPrev = nullptr;
+ free.pNext = nullptr;
+ }
+
+ uint64_t mOffset;
+ uint64_t mSize;
+
+ // Pointer to this block's buddy, iff parent is split.
+ // Used to quickly merge buddy blocks upon de-allocate.
+ BuddyBlock* pBuddy = nullptr;
+ BuddyBlock* pParent = nullptr;
+
+ // Track whether this block has been split or not.
+ BlockState mState;
+
+ struct FreeLinks {
+ BuddyBlock* pPrev;
+ BuddyBlock* pNext;
};
- void InsertFreeBlock(BuddyBlock* block, size_t level);
- void RemoveFreeBlock(BuddyBlock* block, size_t level);
- void DeleteBlock(BuddyBlock* block);
+ struct SplitLink {
+ BuddyBlock* pLeft;
+ };
- uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
+ union {
+ // Used upon allocation.
+ // Avoids searching for the next free block.
+ FreeLinks free;
- // Keep track the head and tail (for faster insertion/removal).
- struct BlockList {
- BuddyBlock* head = nullptr; // First free block in level.
- // TODO(crbug.com/dawn/827): Track the tail.
+ // Used upon de-allocation.
+ // Had this block split upon allocation, it and it's buddy is to be deleted.
+ SplitLink split;
};
+ };
- BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks.
+ void InsertFreeBlock(BuddyBlock* block, size_t level);
+ void RemoveFreeBlock(BuddyBlock* block, size_t level);
+ void DeleteBlock(BuddyBlock* block);
- uint64_t mMaxBlockSize = 0;
+ uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
- // List of linked-lists of free blocks where the index is a level that
- // corresponds to a power-of-two sized block.
- std::vector<BlockList> mFreeLists;
+ // Keep track the head and tail (for faster insertion/removal).
+ struct BlockList {
+ BuddyBlock* head = nullptr; // First free block in level.
+ // TODO(crbug.com/dawn/827): Track the tail.
};
+ BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks.
+
+ uint64_t mMaxBlockSize = 0;
+
+ // List of linked-lists of free blocks where the index is a level that
+ // corresponds to a power-of-two sized block.
+ std::vector<BlockList> mFreeLists;
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BUDDYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp
index faee03e2da8..f744440f3a7 100644
--- a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp
@@ -14,107 +14,111 @@
#include "dawn/native/BuddyMemoryAllocator.h"
+#include <utility>
+
#include "dawn/common/Math.h"
#include "dawn/native/ResourceHeapAllocator.h"
namespace dawn::native {
- BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
- uint64_t memoryBlockSize,
- ResourceHeapAllocator* heapAllocator)
- : mMemoryBlockSize(memoryBlockSize),
- mBuddyBlockAllocator(maxSystemSize),
- mHeapAllocator(heapAllocator) {
- ASSERT(memoryBlockSize <= maxSystemSize);
- ASSERT(IsPowerOfTwo(mMemoryBlockSize));
- ASSERT(maxSystemSize % mMemoryBlockSize == 0);
-
- mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
- }
+BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator)
+ : mMemoryBlockSize(memoryBlockSize),
+ mBuddyBlockAllocator(maxSystemSize),
+ mHeapAllocator(heapAllocator) {
+ ASSERT(memoryBlockSize <= maxSystemSize);
+ ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+ ASSERT(maxSystemSize % mMemoryBlockSize == 0);
- uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
- ASSERT(offset != BuddyAllocator::kInvalidOffset);
- return offset / mMemoryBlockSize;
- }
+ mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+}
- ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
- uint64_t alignment) {
- ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+BuddyMemoryAllocator::~BuddyMemoryAllocator() = default;
- if (allocationSize == 0) {
- return std::move(invalidAllocation);
- }
+uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+ ASSERT(offset != BuddyAllocator::kInvalidOffset);
+ return offset / mMemoryBlockSize;
+}
- // Check the unaligned size to avoid overflowing NextPowerOfTwo.
- if (allocationSize > mMemoryBlockSize) {
- return std::move(invalidAllocation);
- }
+ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+ uint64_t alignment) {
+ ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
- // Round allocation size to nearest power-of-two.
- allocationSize = NextPowerOfTwo(allocationSize);
+ if (allocationSize == 0) {
+ return std::move(invalidAllocation);
+ }
- // Allocation cannot exceed the memory size.
- if (allocationSize > mMemoryBlockSize) {
- return std::move(invalidAllocation);
- }
+ // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+ if (allocationSize > mMemoryBlockSize) {
+ return std::move(invalidAllocation);
+ }
- // Attempt to sub-allocate a block of the requested size.
- const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
- if (blockOffset == BuddyAllocator::kInvalidOffset) {
- return std::move(invalidAllocation);
- }
+ // Round allocation size to nearest power-of-two.
+ allocationSize = NextPowerOfTwo(allocationSize);
- const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
- if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
- // Transfer ownership to this allocator
- std::unique_ptr<ResourceHeapBase> memory;
- DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
- mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
- }
+ // Allocation cannot exceed the memory size.
+ if (allocationSize > mMemoryBlockSize) {
+ return std::move(invalidAllocation);
+ }
- mTrackedSubAllocations[memoryIndex].refcount++;
+ // Attempt to sub-allocate a block of the requested size.
+ const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+ if (blockOffset == BuddyAllocator::kInvalidOffset) {
+ return std::move(invalidAllocation);
+ }
- AllocationInfo info;
- info.mBlockOffset = blockOffset;
- info.mMethod = AllocationMethod::kSubAllocated;
+ const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ // Transfer ownership to this allocator
+ std::unique_ptr<ResourceHeapBase> memory;
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
+ mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+ }
- // Allocation offset is always local to the memory.
- const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
+ mTrackedSubAllocations[memoryIndex].refcount++;
- return ResourceMemoryAllocation{
- info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
- }
+ AllocationInfo info;
+ info.mBlockOffset = blockOffset;
+ info.mMethod = AllocationMethod::kSubAllocated;
- void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
- const AllocationInfo info = allocation.GetInfo();
+ // Allocation offset is always local to the memory.
+ const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
- ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+ return ResourceMemoryAllocation{info, memoryOffset,
+ mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+}
- const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+ const AllocationInfo info = allocation.GetInfo();
- ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
- mTrackedSubAllocations[memoryIndex].refcount--;
+ ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
- if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
- mHeapAllocator->DeallocateResourceHeap(
- std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
- }
+ const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
- mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
- }
+ ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+ mTrackedSubAllocations[memoryIndex].refcount--;
- uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
- return mMemoryBlockSize;
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ mHeapAllocator->DeallocateResourceHeap(
+ std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
}
- uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
- uint64_t count = 0;
- for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
- if (allocation.refcount > 0) {
- count++;
- }
+ mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+}
+
+uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+ return mMemoryBlockSize;
+}
+
+uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+ uint64_t count = 0;
+ for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+ if (allocation.refcount > 0) {
+ count++;
}
- return count;
}
+ return count;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h
index e4d4f10b4cd..adbcc960742 100644
--- a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h
@@ -15,60 +15,59 @@
#ifndef SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_
#define SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_
+#include <memory>
+#include <vector>
+
#include "dawn/native/BuddyAllocator.h"
#include "dawn/native/Error.h"
#include "dawn/native/ResourceMemoryAllocation.h"
-#include <memory>
-#include <vector>
-
namespace dawn::native {
- class ResourceHeapAllocator;
-
- // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
- // memory created by MemoryAllocator clients. It creates a very large buddy system
- // where backing device memory blocks equal a specified level in the system.
- //
- // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
- // memory index and should the memory not exist, it is created. If two sub-allocations share the
- // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
- // release the other prematurely.
- //
- // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
- // It should also outlive all the resources that are in the buddy allocator.
- class BuddyMemoryAllocator {
- public:
- BuddyMemoryAllocator(uint64_t maxSystemSize,
- uint64_t memoryBlockSize,
- ResourceHeapAllocator* heapAllocator);
- ~BuddyMemoryAllocator() = default;
-
- ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
- uint64_t alignment);
- void Deallocate(const ResourceMemoryAllocation& allocation);
-
- uint64_t GetMemoryBlockSize() const;
-
- // For testing purposes.
- uint64_t ComputeTotalNumOfHeapsForTesting() const;
-
- private:
- uint64_t GetMemoryIndex(uint64_t offset) const;
-
- uint64_t mMemoryBlockSize = 0;
-
- BuddyAllocator mBuddyBlockAllocator;
- ResourceHeapAllocator* mHeapAllocator;
-
- struct TrackedSubAllocations {
- size_t refcount = 0;
- std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
- };
-
- std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+class ResourceHeapAllocator;
+
+// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+// memory created by MemoryAllocator clients. It creates a very large buddy system
+// where backing device memory blocks equal a specified level in the system.
+//
+// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+// memory index and should the memory not exist, it is created. If two sub-allocations share the
+// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+// release the other prematurely.
+//
+// The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+// It should also outlive all the resources that are in the buddy allocator.
+class BuddyMemoryAllocator {
+ public:
+ BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator);
+ ~BuddyMemoryAllocator();
+
+ ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
+ void Deallocate(const ResourceMemoryAllocation& allocation);
+
+ uint64_t GetMemoryBlockSize() const;
+
+ // For testing purposes.
+ uint64_t ComputeTotalNumOfHeapsForTesting() const;
+
+ private:
+ uint64_t GetMemoryIndex(uint64_t offset) const;
+
+ uint64_t mMemoryBlockSize = 0;
+
+ BuddyAllocator mBuddyBlockAllocator;
+ ResourceHeapAllocator* mHeapAllocator;
+
+ struct TrackedSubAllocations {
+ size_t refcount = 0;
+ std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
};
+ std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Buffer.cpp b/chromium/third_party/dawn/src/dawn/native/Buffer.cpp
index f324597401b..b9b2e3dbb0f 100644
--- a/chromium/third_party/dawn/src/dawn/native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Buffer.cpp
@@ -14,6 +14,11 @@
#include "dawn/native/Buffer.h"
+#include <cstdio>
+#include <cstring>
+#include <limits>
+#include <utility>
+
#include "dawn/common/Alloc.h"
#include "dawn/common/Assert.h"
#include "dawn/native/Commands.h"
@@ -23,540 +28,543 @@
#include "dawn/native/ObjectType_autogen.h"
#include "dawn/native/Queue.h"
#include "dawn/native/ValidationUtils_autogen.h"
-
-#include <cstdio>
-#include <cstring>
-#include <utility>
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
namespace dawn::native {
- namespace {
- struct MapRequestTask : QueueBase::TaskInFlight {
- MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
- : buffer(std::move(buffer)), id(id) {
- }
- void Finish() override {
- buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
- }
- void HandleDeviceLoss() override {
- buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
- }
- ~MapRequestTask() override = default;
-
- private:
- Ref<BufferBase> buffer;
- MapRequestID id;
- };
-
- class ErrorBuffer final : public BufferBase {
- public:
- ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor, ObjectBase::kError) {
- if (descriptor->mappedAtCreation) {
- // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
- // is invalid, and on 32bit systems we should avoid a narrowing conversion that
- // would make size = 1 << 32 + 1 allocate one byte.
- bool isValidSize =
- descriptor->size != 0 &&
- descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
-
- if (isValidSize) {
- mFakeMappedData =
- std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
- }
- // Since error buffers in this case may allocate memory, we need to track them
- // for destruction on the device.
- TrackInDevice();
- }
- }
+namespace {
+struct MapRequestTask : QueueBase::TaskInFlight {
+ MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
+ void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
+ TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
+ uint64_t(serial));
+ buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
+ }
+ void HandleDeviceLoss() override {
+ buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
+ }
+ ~MapRequestTask() override = default;
- private:
- bool IsCPUWritableAtCreation() const override {
- UNREACHABLE();
- }
+ private:
+ Ref<BufferBase> buffer;
+ MapRequestID id;
+};
- MaybeError MapAtCreationImpl() override {
- UNREACHABLE();
+class ErrorBuffer final : public BufferBase {
+ public:
+ ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor, ObjectBase::kError) {
+ if (descriptor->mappedAtCreation) {
+ // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+ // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+ // would make size = 1 << 32 + 1 allocate one byte.
+ bool isValidSize = descriptor->size != 0 &&
+ descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
+
+ if (isValidSize) {
+ mFakeMappedData =
+ std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
}
+ // Since error buffers in this case may allocate memory, we need to track them
+ // for destruction on the device.
+ TrackInDevice();
+ }
+ }
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
- UNREACHABLE();
- }
+ private:
+ bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
- void* GetMappedPointerImpl() override {
- return mFakeMappedData.get();
- }
+ MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
- void UnmapImpl() override {
- mFakeMappedData.reset();
- }
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
+ UNREACHABLE();
+ }
- std::unique_ptr<uint8_t[]> mFakeMappedData;
- };
+ void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
- } // anonymous namespace
+ void UnmapImpl() override { mFakeMappedData.reset(); }
- MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
- DAWN_TRY(ValidateBufferUsage(descriptor->usage));
+ std::unique_ptr<uint8_t[]> mFakeMappedData;
+};
- wgpu::BufferUsage usage = descriptor->usage;
+} // anonymous namespace
- DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
+MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+ DAWN_TRY(ValidateBufferUsage(descriptor->usage));
- const wgpu::BufferUsage kMapWriteAllowedUsages =
- wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
- DAWN_INVALID_IF(
- usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
- "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
- "usage is %s.",
- usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
+ wgpu::BufferUsage usage = descriptor->usage;
- const wgpu::BufferUsage kMapReadAllowedUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
- DAWN_INVALID_IF(
- usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
- "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
- "usage is %s.",
- usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
+ DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
- DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
- "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
- descriptor->size);
+ const wgpu::BufferUsage kMapWriteAllowedUsages =
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+ "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+ "usage is %s.",
+ usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
- return {};
- }
+ const wgpu::BufferUsage kMapReadAllowedUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+ "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+ "usage is %s.",
+ usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
- // Buffer
-
- BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label),
- mSize(descriptor->size),
- mUsage(descriptor->usage),
- mState(BufferState::Unmapped) {
- // Add readonly storage usage if the buffer has a storage usage. The validation rules in
- // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
- if (mUsage & wgpu::BufferUsage::Storage) {
- mUsage |= kReadOnlyStorageBuffer;
- }
+ DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+ "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+ descriptor->size);
- // The query resolve buffer need to be used as a storage buffer in the internal compute
- // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
- // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
- // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
- // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
- // as storage buffer if it's created without Storage usage.
- if (mUsage & wgpu::BufferUsage::QueryResolve) {
- mUsage |= kInternalStorageBuffer;
- }
+ return {};
+}
- // We also add internal storage usage for Indirect buffers for some transformations before
- // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
- // D3D12), since these transformations involve binding them as storage buffers for use in a
- // compute pass.
- if (mUsage & wgpu::BufferUsage::Indirect) {
- mUsage |= kInternalStorageBuffer;
- }
+// Buffer
- TrackInDevice();
+BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label),
+ mSize(descriptor->size),
+ mUsage(descriptor->usage),
+ mState(BufferState::Unmapped) {
+ // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+ // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
+ if (mUsage & wgpu::BufferUsage::Storage) {
+ mUsage |= kReadOnlyStorageBuffer;
}
- BufferBase::BufferBase(DeviceBase* device,
- const BufferDescriptor* descriptor,
- ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
- if (descriptor->mappedAtCreation) {
- mState = BufferState::MappedAtCreation;
- mMapOffset = 0;
- mMapSize = mSize;
- }
+ // The query resolve buffer need to be used as a storage buffer in the internal compute
+ // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
+ // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
+ // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
+ // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
+ // as storage buffer if it's created without Storage usage.
+ if (mUsage & wgpu::BufferUsage::QueryResolve) {
+ mUsage |= kInternalStorageBuffer;
}
- BufferBase::BufferBase(DeviceBase* device, BufferState state)
- : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
- TrackInDevice();
+ // We also add internal storage usage for Indirect buffers for some transformations before
+ // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
+ // D3D12), since these transformations involve binding them as storage buffers for use in a
+ // compute pass.
+ if (mUsage & wgpu::BufferUsage::Indirect) {
+ mUsage |= kInternalStorageBuffer;
}
- BufferBase::~BufferBase() {
- ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+ TrackInDevice();
+}
+
+BufferBase::BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag),
+ mSize(descriptor->size),
+ mUsage(descriptor->usage),
+ mState(BufferState::Unmapped) {
+ if (descriptor->mappedAtCreation) {
+ mState = BufferState::MappedAtCreation;
+ mMapOffset = 0;
+ mMapSize = mSize;
}
+}
+
+BufferBase::BufferBase(DeviceBase* device, BufferState state)
+ : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
+ TrackInDevice();
+}
+
+BufferBase::~BufferBase() {
+ ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+}
- void BufferBase::DestroyImpl() {
- if (mState == BufferState::Mapped) {
+void BufferBase::DestroyImpl() {
+ if (mState == BufferState::Mapped) {
+ UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+ } else if (mState == BufferState::MappedAtCreation) {
+ if (mStagingBuffer != nullptr) {
+ mStagingBuffer.reset();
+ } else if (mSize != 0) {
UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- } else if (mState == BufferState::MappedAtCreation) {
- if (mStagingBuffer != nullptr) {
- mStagingBuffer.reset();
- } else if (mSize != 0) {
- UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- }
}
- mState = BufferState::Destroyed;
- }
-
- // static
- BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
- return new ErrorBuffer(device, descriptor);
}
-
- ObjectType BufferBase::GetType() const {
- return ObjectType::Buffer;
+ mState = BufferState::Destroyed;
+}
+
+// static
+BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+ return new ErrorBuffer(device, descriptor);
+}
+
+ObjectType BufferBase::GetType() const {
+ return ObjectType::Buffer;
+}
+
+uint64_t BufferBase::GetSize() const {
+ ASSERT(!IsError());
+ return mSize;
+}
+
+uint64_t BufferBase::GetAllocatedSize() const {
+ ASSERT(!IsError());
+ // The backend must initialize this value.
+ ASSERT(mAllocatedSize != 0);
+ return mAllocatedSize;
+}
+
+wgpu::BufferUsage BufferBase::GetUsage() const {
+ ASSERT(!IsError());
+ return mUsage;
+}
+
+wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
+ ASSERT(!IsError());
+ return GetUsage() & ~kAllInternalBufferUsages;
+}
+
+wgpu::BufferUsage BufferBase::APIGetUsage() const {
+ return mUsage & ~kAllInternalBufferUsages;
+}
+
+MaybeError BufferBase::MapAtCreation() {
+ DAWN_TRY(MapAtCreationInternal());
+
+ void* ptr;
+ size_t size;
+ if (mSize == 0) {
+ return {};
+ } else if (mStagingBuffer) {
+ // If there is a staging buffer for initialization, clear its contents directly.
+ // It should be exactly as large as the buffer allocation.
+ ptr = mStagingBuffer->GetMappedPointer();
+ size = mStagingBuffer->GetSize();
+ ASSERT(size == GetAllocatedSize());
+ } else {
+ // Otherwise, the buffer is directly mappable on the CPU.
+ ptr = GetMappedPointerImpl();
+ size = GetAllocatedSize();
+ }
+
+ DeviceBase* device = GetDevice();
+ if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ memset(ptr, uint8_t(0u), size);
+ SetIsDataInitialized();
+ device->IncrementLazyClearCountForTesting();
+ } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ memset(ptr, uint8_t(1u), size);
+ }
+
+ return {};
+}
+
+MaybeError BufferBase::MapAtCreationInternal() {
+ ASSERT(!IsError());
+ mMapOffset = 0;
+ mMapSize = mSize;
+
+ // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
+ // Skip handling 0-sized buffers so we don't try to map them in the backend.
+ if (mSize != 0) {
+ // Mappable buffers don't use a staging buffer and are just as if mapped through
+ // MapAsync.
+ if (IsCPUWritableAtCreation()) {
+ DAWN_TRY(MapAtCreationImpl());
+ } else {
+ // If any of these fail, the buffer will be deleted and replaced with an error
+ // buffer. The staging buffer is used to return mappable data to inititalize the
+ // buffer contents. Allocate one as large as the real buffer size so that every byte
+ // is initialized.
+ // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
+ // buffer so we don't create many small buffers.
+ DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
+ }
}
- uint64_t BufferBase::GetSize() const {
- ASSERT(!IsError());
- return mSize;
- }
+ // Only set the state to mapped at creation if we did no fail any point in this helper.
+ // Otherwise, if we override the default unmapped state before succeeding to create a
+ // staging buffer, we will have issues when we try to destroy the buffer.
+ mState = BufferState::MappedAtCreation;
+ return {};
+}
- uint64_t BufferBase::GetAllocatedSize() const {
- ASSERT(!IsError());
- // The backend must initialize this value.
- ASSERT(mAllocatedSize != 0);
- return mAllocatedSize;
- }
+MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
+ ASSERT(!IsError());
- wgpu::BufferUsage BufferBase::GetUsage() const {
- ASSERT(!IsError());
- return mUsage;
+ switch (mState) {
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
+ case BufferState::Unmapped:
+ return {};
}
+ UNREACHABLE();
+}
- MaybeError BufferBase::MapAtCreation() {
- DAWN_TRY(MapAtCreationInternal());
+void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+ ASSERT(!IsError());
+ if (mMapCallback != nullptr && mapID == mLastMapID) {
+ // Tag the callback as fired before firing it, otherwise it could fire a second time if
+ // for example buffer.Unmap() is called inside the application-provided callback.
+ WGPUBufferMapCallback callback = mMapCallback;
+ mMapCallback = nullptr;
- void* ptr;
- size_t size;
- if (mSize == 0) {
- return {};
- } else if (mStagingBuffer) {
- // If there is a staging buffer for initialization, clear its contents directly.
- // It should be exactly as large as the buffer allocation.
- ptr = mStagingBuffer->GetMappedPointer();
- size = mStagingBuffer->GetSize();
- ASSERT(size == GetAllocatedSize());
+ if (GetDevice()->IsLost()) {
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
} else {
- // Otherwise, the buffer is directly mappable on the CPU.
- ptr = GetMappedPointerImpl();
- size = GetAllocatedSize();
+ callback(status, mMapUserdata);
}
-
- DeviceBase* device = GetDevice();
- if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- memset(ptr, uint8_t(0u), size);
- SetIsDataInitialized();
- device->IncrementLazyClearCountForTesting();
- } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- memset(ptr, uint8_t(1u), size);
+ }
+}
+
+void BufferBase::APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+ // possible to default the function argument (because there is the callback later in the
+ // argument list)
+ if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
+ size = mSize - offset;
+ }
+
+ WGPUBufferMapAsyncStatus status;
+ if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+ "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
+ size)) {
+ if (callback) {
+ callback(status, userdata);
}
+ return;
+ }
+ ASSERT(!IsError());
- return {};
+ mLastMapID++;
+ mMapMode = mode;
+ mMapOffset = offset;
+ mMapSize = size;
+ mMapCallback = callback;
+ mMapUserdata = userdata;
+ mState = BufferState::Mapped;
+
+ if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+ CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
+ return;
}
+ std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
+ TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
+ uint64_t(GetDevice()->GetPendingCommandSerial()));
+ GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
+}
- MaybeError BufferBase::MapAtCreationInternal() {
- ASSERT(!IsError());
- mMapOffset = 0;
- mMapSize = mSize;
+void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, true);
+}
- // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
- // Skip handling 0-sized buffers so we don't try to map them in the backend.
- if (mSize != 0) {
- // Mappable buffers don't use a staging buffer and are just as if mapped through
- // MapAsync.
- if (IsCPUWritableAtCreation()) {
- DAWN_TRY(MapAtCreationImpl());
- } else {
- // If any of these fail, the buffer will be deleted and replaced with an error
- // buffer. The staging buffer is used to return mappable data to inititalize the
- // buffer contents. Allocate one as large as the real buffer size so that every byte
- // is initialized.
- // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
- // buffer so we don't create many small buffers.
- DAWN_TRY_ASSIGN(mStagingBuffer,
- GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
- }
- }
+const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, false);
+}
- // Only set the state to mapped at creation if we did no fail any point in this helper.
- // Otherwise, if we override the default unmapped state before succeeding to create a
- // staging buffer, we will have issues when we try to destroy the buffer.
- mState = BufferState::MappedAtCreation;
- return {};
+void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
+ if (!CanGetMappedRange(writable, offset, size)) {
+ return nullptr;
}
- MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
- ASSERT(!IsError());
-
- switch (mState) {
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
- case BufferState::Unmapped:
- return {};
- }
- UNREACHABLE();
+ if (mStagingBuffer != nullptr) {
+ return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
}
-
- void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
- ASSERT(!IsError());
- if (mMapCallback != nullptr && mapID == mLastMapID) {
- // Tag the callback as fired before firing it, otherwise it could fire a second time if
- // for example buffer.Unmap() is called inside the application-provided callback.
- WGPUBufferMapCallback callback = mMapCallback;
- mMapCallback = nullptr;
-
- if (GetDevice()->IsLost()) {
- callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
- } else {
- callback(status, mMapUserdata);
- }
- }
+ if (mSize == 0) {
+ return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
}
+ uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
+ return start == nullptr ? nullptr : start + offset;
+}
- void BufferBase::APIMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata) {
- // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
- // possible to default the function argument (because there is the callback later in the
- // argument list)
- if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
- size = mSize - offset;
- }
+void BufferBase::APIDestroy() {
+ Destroy();
+}
- WGPUBufferMapAsyncStatus status;
- if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
- "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
- size)) {
- if (callback) {
- callback(status, userdata);
- }
- return;
- }
- ASSERT(!IsError());
-
- mLastMapID++;
- mMapMode = mode;
- mMapOffset = offset;
- mMapSize = size;
- mMapCallback = callback;
- mMapUserdata = userdata;
- mState = BufferState::Mapped;
-
- if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
- CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
- return;
- }
- std::unique_ptr<MapRequestTask> request =
- std::make_unique<MapRequestTask>(this, mLastMapID);
- GetDevice()->GetQueue()->TrackTask(std::move(request),
- GetDevice()->GetPendingCommandSerial());
- }
+uint64_t BufferBase::APIGetSize() const {
+ return mSize;
+}
- void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
- return GetMappedRange(offset, size, true);
+MaybeError BufferBase::CopyFromStagingBuffer() {
+ ASSERT(mStagingBuffer);
+ if (mSize == 0) {
+ // Staging buffer is not created if zero size.
+ ASSERT(mStagingBuffer == nullptr);
+ return {};
}
- const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
- return GetMappedRange(offset, size, false);
+ DAWN_TRY(
+ GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
+
+ DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
+ uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+
+ return {};
+}
+
+void BufferBase::APIUnmap() {
+ if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
+ return;
}
+ Unmap();
+}
- void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
- if (!CanGetMappedRange(writable, offset, size)) {
- return nullptr;
- }
+void BufferBase::Unmap() {
+ UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+}
+
+void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
+ if (mState == BufferState::Mapped) {
+ // A map request can only be called once, so this will fire only if the request wasn't
+ // completed before the Unmap.
+ // Callbacks are not fired if there is no callback registered, so this is correct for
+ // mappedAtCreation = true.
+ CallMapCallback(mLastMapID, callbackStatus);
+ UnmapImpl();
+ mMapCallback = nullptr;
+ mMapUserdata = 0;
+ } else if (mState == BufferState::MappedAtCreation) {
if (mStagingBuffer != nullptr) {
- return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
- }
- if (mSize == 0) {
- return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+ GetDevice()->ConsumedError(CopyFromStagingBuffer());
+ } else if (mSize != 0) {
+ UnmapImpl();
}
- uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
- return start == nullptr ? nullptr : start + offset;
}
- void BufferBase::APIDestroy() {
- Destroy();
- }
+ mState = BufferState::Unmapped;
+}
- MaybeError BufferBase::CopyFromStagingBuffer() {
- ASSERT(mStagingBuffer);
- if (mSize == 0) {
- // Staging buffer is not created if zero size.
- ASSERT(mStagingBuffer == nullptr);
- return {};
- }
+MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const {
+ *status = WGPUBufferMapAsyncStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
- GetAllocatedSize()));
+ *status = WGPUBufferMapAsyncStatus_Error;
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
- uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+ DAWN_INVALID_IF(uint64_t(offset) > mSize,
+ "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
- return {};
- }
+ DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
- void BufferBase::APIUnmap() {
- if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
- return;
- }
- Unmap();
- }
+ DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
+ "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+ offset, size, mSize, this);
- void BufferBase::Unmap() {
- UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+ switch (mState) {
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+ case BufferState::Unmapped:
+ break;
}
- void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
- if (mState == BufferState::Mapped) {
- // A map request can only be called once, so this will fire only if the request wasn't
- // completed before the Unmap.
- // Callbacks are not fired if there is no callback registered, so this is correct for
- // mappedAtCreation = true.
- CallMapCallback(mLastMapID, callbackStatus);
- UnmapImpl();
-
- mMapCallback = nullptr;
- mMapUserdata = 0;
- } else if (mState == BufferState::MappedAtCreation) {
- if (mStagingBuffer != nullptr) {
- GetDevice()->ConsumedError(CopyFromStagingBuffer());
- } else if (mSize != 0) {
- UnmapImpl();
- }
- }
+ bool isReadMode = mode & wgpu::MapMode::Read;
+ bool isWriteMode = mode & wgpu::MapMode::Write;
+ DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+ wgpu::MapMode::Write, wgpu::MapMode::Read);
- mState = BufferState::Unmapped;
+ if (mode & wgpu::MapMode::Read) {
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapRead);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapWrite);
}
- MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapAsyncStatus* status) const {
- *status = WGPUBufferMapAsyncStatus_DeviceLost;
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- *status = WGPUBufferMapAsyncStatus_Error;
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(uint64_t(offset) > mSize,
- "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
- this);
-
- DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
- DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
-
- DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
- "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
- offset, size, mSize, this);
-
- switch (mState) {
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
- case BufferState::Unmapped:
- break;
- }
+ *status = WGPUBufferMapAsyncStatus_Success;
+ return {};
+}
- bool isReadMode = mode & wgpu::MapMode::Read;
- bool isWriteMode = mode & wgpu::MapMode::Write;
- DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
- wgpu::MapMode::Write, wgpu::MapMode::Read);
+bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+ if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+ return false;
+ }
- if (mode & wgpu::MapMode::Read) {
- DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
- "The buffer usages (%s) do not contain %s.", mUsage,
- wgpu::BufferUsage::MapRead);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
- "The buffer usages (%s) do not contain %s.", mUsage,
- wgpu::BufferUsage::MapWrite);
- }
+ size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
- *status = WGPUBufferMapAsyncStatus_Success;
- return {};
+ if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
+ return false;
}
- bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
- if (offset % 8 != 0 || size % 4 != 0) {
- return false;
- }
+ size_t offsetInMappedRange = offset - mMapOffset;
+ if (offsetInMappedRange > mMapSize - rangeSize) {
+ return false;
+ }
- if (size > mMapSize || offset < mMapOffset) {
- return false;
- }
+ // Note that:
+ //
+ // - We don't check that the device is alive because the application can ask for the
+ // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+ // still needs to work properly.
+ // - We don't check that the object is alive because we need to return mapped pointers
+ // for error buffers too.
- size_t offsetInMappedRange = offset - mMapOffset;
- if (offsetInMappedRange > mMapSize - size) {
- return false;
- }
+ switch (mState) {
+ // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+ case BufferState::MappedAtCreation:
+ return true;
- // Note that:
- //
- // - We don't check that the device is alive because the application can ask for the
- // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
- // still needs to work properly.
- // - We don't check that the object is alive because we need to return mapped pointers
- // for error buffers too.
-
- switch (mState) {
- // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
- case BufferState::MappedAtCreation:
- return true;
-
- case BufferState::Mapped:
- ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
- bool(mMapMode & wgpu::MapMode::Write));
- return !writable || (mMapMode & wgpu::MapMode::Write);
-
- case BufferState::Unmapped:
- case BufferState::Destroyed:
- return false;
- }
- UNREACHABLE();
- }
+ case BufferState::Mapped:
+ ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
+ return !writable || (mMapMode & wgpu::MapMode::Write);
- MaybeError BufferBase::ValidateUnmap() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- switch (mState) {
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- // A buffer may be in the Mapped state if it was created with mappedAtCreation
- // even if it did not have a mappable usage.
- return {};
- case BufferState::Unmapped:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
- }
- UNREACHABLE();
+ case BufferState::Unmapped:
+ case BufferState::Destroyed:
+ return false;
}
+ UNREACHABLE();
+}
- void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
- CallMapCallback(mapID, status);
- }
+MaybeError BufferBase::ValidateUnmap() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
- bool BufferBase::NeedsInitialization() const {
- return !mIsDataInitialized &&
- GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+ switch (mState) {
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ // A buffer may be in the Mapped state if it was created with mappedAtCreation
+ // even if it did not have a mappable usage.
+ return {};
+ case BufferState::Unmapped:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
}
+ UNREACHABLE();
+}
- bool BufferBase::IsDataInitialized() const {
- return mIsDataInitialized;
- }
+void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+ CallMapCallback(mapID, status);
+}
- void BufferBase::SetIsDataInitialized() {
- mIsDataInitialized = true;
- }
+bool BufferBase::NeedsInitialization() const {
+ return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+}
- bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
- return offset == 0 && size == GetSize();
- }
+bool BufferBase::IsDataInitialized() const {
+ return mIsDataInitialized;
+}
+
+void BufferBase::SetIsDataInitialized() {
+ mIsDataInitialized = true;
+}
+
+bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+ return offset == 0 && size == GetSize();
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Buffer.h b/chromium/third_party/dawn/src/dawn/native/Buffer.h
index 60a41bceee6..6bb15033406 100644
--- a/chromium/third_party/dawn/src/dawn/native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn/native/Buffer.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_BUFFER_H_
#define SRC_DAWN_NATIVE_BUFFER_H_
+#include <memory>
+
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/IntegerTypes.h"
@@ -22,113 +24,115 @@
#include "dawn/native/dawn_platform.h"
-#include <memory>
-
namespace dawn::native {
- struct CopyTextureToBufferCmd;
-
- enum class MapType : uint32_t;
-
- MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
-
- static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
- wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
- wgpu::BufferUsage::Indirect;
-
- static constexpr wgpu::BufferUsage kMappableBufferUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
-
- class BufferBase : public ApiObjectBase {
- public:
- enum class BufferState {
- Unmapped,
- Mapped,
- MappedAtCreation,
- Destroyed,
- };
- BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
-
- static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
-
- ObjectType GetType() const override;
-
- uint64_t GetSize() const;
- uint64_t GetAllocatedSize() const;
- wgpu::BufferUsage GetUsage() const;
-
- MaybeError MapAtCreation();
- void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
- MaybeError ValidateCanUseOnQueueNow() const;
-
- bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
- bool NeedsInitialization() const;
- bool IsDataInitialized() const;
- void SetIsDataInitialized();
-
- void* GetMappedRange(size_t offset, size_t size, bool writable = true);
- void Unmap();
-
- // Dawn API
- void APIMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata);
- void* APIGetMappedRange(size_t offset, size_t size);
- const void* APIGetConstMappedRange(size_t offset, size_t size);
- void APIUnmap();
- void APIDestroy();
-
- protected:
- BufferBase(DeviceBase* device,
- const BufferDescriptor* descriptor,
- ObjectBase::ErrorTag tag);
-
- // Constructor used only for mocking and testing.
- BufferBase(DeviceBase* device, BufferState state);
- void DestroyImpl() override;
-
- ~BufferBase() override;
-
- MaybeError MapAtCreationInternal();
-
- uint64_t mAllocatedSize = 0;
-
- private:
- virtual MaybeError MapAtCreationImpl() = 0;
- virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
- virtual void UnmapImpl() = 0;
- virtual void* GetMappedPointerImpl() = 0;
+struct CopyTextureToBufferCmd;
- virtual bool IsCPUWritableAtCreation() const = 0;
- MaybeError CopyFromStagingBuffer();
- void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+enum class MapType : uint32_t;
- MaybeError ValidateMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapAsyncStatus* status) const;
- MaybeError ValidateUnmap() const;
- bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
- void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
- uint64_t mSize = 0;
- wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
- BufferState mState;
- bool mIsDataInitialized = false;
+static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+ wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+ wgpu::BufferUsage::Indirect;
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
+static constexpr wgpu::BufferUsage kMappableBufferUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
- WGPUBufferMapCallback mMapCallback = nullptr;
- void* mMapUserdata = 0;
- MapRequestID mLastMapID = MapRequestID(0);
- wgpu::MapMode mMapMode = wgpu::MapMode::None;
- size_t mMapOffset = 0;
- size_t mMapSize = 0;
+class BufferBase : public ApiObjectBase {
+ public:
+ enum class BufferState {
+ Unmapped,
+ Mapped,
+ MappedAtCreation,
+ Destroyed,
};
+ static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+
+ ObjectType GetType() const override;
+
+ uint64_t GetSize() const;
+ uint64_t GetAllocatedSize() const;
+
+ // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
+ // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
+ // returns the union of base usage and the usages added internally.
+ wgpu::BufferUsage GetUsage() const;
+ wgpu::BufferUsage GetUsageExternalOnly() const;
+
+ MaybeError MapAtCreation();
+ void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+ MaybeError ValidateCanUseOnQueueNow() const;
+
+ bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+ bool NeedsInitialization() const;
+ bool IsDataInitialized() const;
+ void SetIsDataInitialized();
+
+ void* GetMappedRange(size_t offset, size_t size, bool writable = true);
+ void Unmap();
+
+ // Dawn API
+ void APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* APIGetMappedRange(size_t offset, size_t size);
+ const void* APIGetConstMappedRange(size_t offset, size_t size);
+ void APIUnmap();
+ void APIDestroy();
+ wgpu::BufferUsage APIGetUsage() const;
+ uint64_t APIGetSize() const;
+
+ protected:
+ BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
+ BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
+ // Constructor used only for mocking and testing.
+ BufferBase(DeviceBase* device, BufferState state);
+
+ void DestroyImpl() override;
+
+ ~BufferBase() override;
+
+ MaybeError MapAtCreationInternal();
+
+ uint64_t mAllocatedSize = 0;
+
+ private:
+ virtual MaybeError MapAtCreationImpl() = 0;
+ virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
+ virtual void UnmapImpl() = 0;
+ virtual void* GetMappedPointerImpl() = 0;
+
+ virtual bool IsCPUWritableAtCreation() const = 0;
+ MaybeError CopyFromStagingBuffer();
+ void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+ MaybeError ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const;
+ MaybeError ValidateUnmap() const;
+ bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
+ void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+
+ uint64_t mSize = 0;
+ wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+ BufferState mState;
+ bool mIsDataInitialized = false;
+
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
+
+ WGPUBufferMapCallback mMapCallback = nullptr;
+ void* mMapUserdata = 0;
+ MapRequestID mLastMapID = MapRequestID(0);
+ wgpu::MapMode mMapMode = wgpu::MapMode::None;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt
index 0acb7ba7b55..f704ccbb016 100644
--- a/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt
@@ -18,7 +18,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
)
-add_library(dawn_native ${DAWN_DUMMY_FILE})
+add_library(dawn_native ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_native)
target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
@@ -45,6 +45,10 @@ target_sources(dawn_native PRIVATE
"BindGroupTracker.h"
"BindingInfo.cpp"
"BindingInfo.h"
+ "Blob.cpp"
+ "Blob.h"
+ "BlobCache.cpp"
+ "BlobCache.h"
"BuddyAllocator.cpp"
"BuddyAllocator.h"
"BuddyMemoryAllocator.cpp"
@@ -117,15 +121,16 @@ target_sources(dawn_native PRIVATE
"Limits.h"
"ObjectBase.cpp"
"ObjectBase.h"
+ "PassResourceUsage.cpp"
"PassResourceUsage.h"
"PassResourceUsageTracker.cpp"
"PassResourceUsageTracker.h"
- "PersistentCache.cpp"
- "PersistentCache.h"
"PerStage.cpp"
"PerStage.h"
"Pipeline.cpp"
"Pipeline.h"
+ "PipelineCache.cpp"
+ "PipelineCache.h"
"PipelineLayout.cpp"
"PipelineLayout.h"
"PooledResourceMemoryAllocator.cpp"
@@ -138,6 +143,8 @@ target_sources(dawn_native PRIVATE
"QuerySet.h"
"Queue.cpp"
"Queue.h"
+ "RefCountedWithExternalCount.cpp"
+ "RefCountedWithExternalCount.h"
"RenderBundle.cpp"
"RenderBundle.h"
"RenderBundleEncoder.cpp"
@@ -236,12 +243,15 @@ if (DAWN_ENABLE_D3D12)
"d3d12/BindGroupD3D12.h"
"d3d12/BindGroupLayoutD3D12.cpp"
"d3d12/BindGroupLayoutD3D12.h"
+ "d3d12/BlobD3D12.cpp"
+ "d3d12/BlobD3D12.h"
"d3d12/BufferD3D12.cpp"
"d3d12/BufferD3D12.h"
"d3d12/CPUDescriptorHeapAllocationD3D12.cpp"
"d3d12/CPUDescriptorHeapAllocationD3D12.h"
"d3d12/CommandAllocatorManager.cpp"
"d3d12/CommandAllocatorManager.h"
+ "d3d12/CacheKeyD3D12.cpp"
"d3d12/CommandBufferD3D12.cpp"
"d3d12/CommandBufferD3D12.h"
"d3d12/CommandRecordingContext.cpp"
@@ -256,6 +266,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/D3D12Info.h"
"d3d12/DeviceD3D12.cpp"
"d3d12/DeviceD3D12.h"
+ "d3d12/ExternalImageDXGIImpl.cpp"
+ "d3d12/ExternalImageDXGIImpl.h"
"d3d12/Forward.h"
"d3d12/GPUDescriptorHeapAllocationD3D12.cpp"
"d3d12/GPUDescriptorHeapAllocationD3D12.h"
@@ -391,6 +403,8 @@ if (DAWN_ENABLE_OPENGL)
target_sources(dawn_native PRIVATE
"${DAWN_INCLUDE_DIR}/dawn/native/OpenGLBackend.h"
${DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES}
+ "opengl/AdapterGL.cpp"
+ "opengl/AdapterGL.h"
"opengl/BackendGL.cpp"
"opengl/BackendGL.h"
"opengl/BindGroupGL.cpp"
@@ -477,6 +491,8 @@ if (DAWN_ENABLE_VULKAN)
"vulkan/Forward.h"
"vulkan/NativeSwapChainImplVk.cpp"
"vulkan/NativeSwapChainImplVk.h"
+ "vulkan/PipelineCacheVk.cpp"
+ "vulkan/PipelineCacheVk.h"
"vulkan/PipelineLayoutVk.cpp"
"vulkan/PipelineLayoutVk.h"
"vulkan/QuerySetVk.cpp"
@@ -555,7 +571,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
)
-add_library(webgpu_dawn ${DAWN_DUMMY_FILE})
+add_library(webgpu_dawn ${DAWN_PLACEHOLDER_FILE})
common_compile_options(webgpu_dawn)
target_link_libraries(webgpu_dawn PRIVATE dawn_native)
target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")
diff --git a/chromium/third_party/dawn/src/dawn/native/CacheKey.cpp b/chromium/third_party/dawn/src/dawn/native/CacheKey.cpp
index dea67f848f3..495b013ed98 100644
--- a/chromium/third_party/dawn/src/dawn/native/CacheKey.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CacheKey.cpp
@@ -18,26 +18,26 @@
namespace dawn::native {
- std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
- os << std::hex;
- for (const int b : key) {
- os << std::setfill('0') << std::setw(2) << b << " ";
- }
- os << std::dec;
- return os;
+std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
+ os << std::hex;
+ for (const int b : key) {
+ os << std::setfill('0') << std::setw(2) << b << " ";
}
+ os << std::dec;
+ return os;
+}
- template <>
- void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
- key->Record(static_cast<size_t>(t.length()));
- key->insert(key->end(), t.begin(), t.end());
- }
+template <>
+void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
+ key->Record(static_cast<size_t>(t.length()));
+ key->insert(key->end(), t.begin(), t.end());
+}
- template <>
- void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
- // For nested cache keys, we do not record the length, and just copy the key so that it
- // appears we just flatten the keys into a single key.
- key->insert(key->end(), t.begin(), t.end());
- }
+template <>
+void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
+ // For nested cache keys, we do not record the length, and just copy the key so that it
+ // appears we just flatten the keys into a single key.
+ key->insert(key->end(), t.begin(), t.end());
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CacheKey.h b/chromium/third_party/dawn/src/dawn/native/CacheKey.h
index 2d58d76cb6b..357ce4b325b 100644
--- a/chromium/third_party/dawn/src/dawn/native/CacheKey.h
+++ b/chromium/third_party/dawn/src/dawn/native/CacheKey.h
@@ -15,9 +15,6 @@
#ifndef SRC_DAWN_NATIVE_CACHEKEY_H_
#define SRC_DAWN_NATIVE_CACHEKEY_H_
-#include "dawn/common/TypedInteger.h"
-#include "dawn/common/ityp_array.h"
-
#include <bitset>
#include <iostream>
#include <limits>
@@ -25,179 +22,180 @@
#include <type_traits>
#include <vector>
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_array.h"
+
namespace dawn::native {
- // Forward declare classes because of co-dependency.
- class CacheKey;
- class CachedObject;
-
- // Stream operator for CacheKey for debugging.
- std::ostream& operator<<(std::ostream& os, const CacheKey& key);
-
- // Overridable serializer struct that should be implemented for cache key serializable
- // types/classes.
- template <typename T, typename SFINAE = void>
- class CacheKeySerializer {
- public:
- static void Serialize(CacheKey* key, const T& t);
- };
-
- class CacheKey : public std::vector<uint8_t> {
- public:
- using std::vector<uint8_t>::vector;
-
- template <typename T>
- CacheKey& Record(const T& t) {
- CacheKeySerializer<T>::Serialize(this, t);
- return *this;
- }
- template <typename T, typename... Args>
- CacheKey& Record(const T& t, const Args&... args) {
- CacheKeySerializer<T>::Serialize(this, t);
- return Record(args...);
- }
+// Forward declare classes because of co-dependency.
+class CacheKey;
+class CachedObject;
- // Records iterables by prepending the number of elements. Some common iterables are have a
- // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
- // strings and CacheKeys, but they fundamentally do the same as this function.
- template <typename IterableT>
- CacheKey& RecordIterable(const IterableT& iterable) {
- // Always record the size of generic iterables as a size_t for now.
- Record(static_cast<size_t>(iterable.size()));
- for (auto it = iterable.begin(); it != iterable.end(); ++it) {
- Record(*it);
- }
- return *this;
- }
- template <typename Index, typename Value, size_t Size>
- CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
- Record(static_cast<Index>(iterable.size()));
- for (auto it = iterable.begin(); it != iterable.end(); ++it) {
- Record(*it);
- }
- return *this;
- }
- template <typename Ptr>
- CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
- Record(n);
- for (size_t i = 0; i < n; ++i) {
- Record(ptr[i]);
- }
- return *this;
- }
- };
+// Stream operator for CacheKey for debugging.
+std::ostream& operator<<(std::ostream& os, const CacheKey& key);
- // Specialized overload for fundamental types.
- template <typename T>
- class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
- public:
- static void Serialize(CacheKey* key, const T t) {
- const char* it = reinterpret_cast<const char*>(&t);
- key->insert(key->end(), it, (it + sizeof(T)));
- }
- };
-
- // Specialized overload for bitsets that are smaller than 64.
- template <size_t N>
- class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
- public:
- static void Serialize(CacheKey* key, const std::bitset<N>& t) {
- key->Record(t.to_ullong());
- }
- };
-
- // Specialized overload for bitsets since using the built-in to_ullong have a size limit.
- template <size_t N>
- class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
- public:
- static void Serialize(CacheKey* key, const std::bitset<N>& t) {
- // Serializes the bitset into series of uint8_t, along with recording the size.
- static_assert(N > 0);
- key->Record(static_cast<size_t>(N));
- uint8_t value = 0;
- for (size_t i = 0; i < N; i++) {
- value <<= 1;
- // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
- value |= t[i] ? 1 : 0;
- if (i % 8 == 7) {
- // Whenever we fill an 8 bit value, record it and zero it out.
- key->Record(value);
- value = 0;
- }
- }
- // Serialize the last value if we are not a multiple of 8.
- if (N % 8 != 0) {
- key->Record(value);
- }
- }
- };
+// Overridable serializer struct that should be implemented for cache key serializable
+// types/classes.
+template <typename T, typename SFINAE = void>
+class CacheKeySerializer {
+ public:
+ static void Serialize(CacheKey* key, const T& t);
+};
+
+class CacheKey : public std::vector<uint8_t> {
+ public:
+ using std::vector<uint8_t>::vector;
+
+ enum class Type { ComputePipeline, RenderPipeline, Shader };
- // Specialized overload for enums.
template <typename T>
- class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
- public:
- static void Serialize(CacheKey* key, const T t) {
- CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
- key, static_cast<std::underlying_type_t<T>>(t));
+ CacheKey& Record(const T& t) {
+ CacheKeySerializer<T>::Serialize(this, t);
+ return *this;
+ }
+ template <typename T, typename... Args>
+ CacheKey& Record(const T& t, const Args&... args) {
+ CacheKeySerializer<T>::Serialize(this, t);
+ return Record(args...);
+ }
+
+ // Records iterables by prepending the number of elements. Some common iterables are have a
+ // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
+ // strings and CacheKeys, but they fundamentally do the same as this function.
+ template <typename IterableT>
+ CacheKey& RecordIterable(const IterableT& iterable) {
+ // Always record the size of generic iterables as a size_t for now.
+ Record(static_cast<size_t>(iterable.size()));
+ for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+ Record(*it);
}
- };
-
- // Specialized overload for TypedInteger.
- template <typename Tag, typename Integer>
- class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
- public:
- static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
- CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
+ return *this;
+ }
+ template <typename Index, typename Value, size_t Size>
+ CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
+ Record(static_cast<Index>(iterable.size()));
+ for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+ Record(*it);
}
- };
-
- // Specialized overload for pointers. Since we are serializing for a cache key, we always
- // serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
- // the pointer was nullptr followed by the contents if applicable.
- template <typename T>
- class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
- public:
- static void Serialize(CacheKey* key, const T t) {
- key->Record(t == nullptr);
- if (t != nullptr) {
- CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
+ return *this;
+ }
+ template <typename Ptr>
+ CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
+ Record(n);
+ for (size_t i = 0; i < n; ++i) {
+ Record(ptr[i]);
+ }
+ return *this;
+ }
+};
+
+// Specialized overload for fundamental types.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T t) {
+ const char* it = reinterpret_cast<const char*>(&t);
+ key->insert(key->end(), it, (it + sizeof(T)));
+ }
+};
+
+// Specialized overload for bitsets that are smaller than 64.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
+ public:
+ static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
+};
+
+// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
+ public:
+ static void Serialize(CacheKey* key, const std::bitset<N>& t) {
+ // Serializes the bitset into series of uint8_t, along with recording the size.
+ static_assert(N > 0);
+ key->Record(static_cast<size_t>(N));
+ uint8_t value = 0;
+ for (size_t i = 0; i < N; i++) {
+ value <<= 1;
+ // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
+ value |= t[i] ? 1 : 0;
+ if (i % 8 == 7) {
+ // Whenever we fill an 8 bit value, record it and zero it out.
+ key->Record(value);
+ value = 0;
}
}
- };
-
- // Specialized overload for fixed arrays of primitives.
- template <typename T, size_t N>
- class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
- public:
- static void Serialize(CacheKey* key, const T (&t)[N]) {
- static_assert(N > 0);
- key->Record(static_cast<size_t>(N));
- const char* it = reinterpret_cast<const char*>(t);
- key->insert(key->end(), it, it + sizeof(t));
+ // Serialize the last value if we are not a multiple of 8.
+ if (N % 8 != 0) {
+ key->Record(value);
}
- };
-
- // Specialized overload for fixed arrays of non-primitives.
- template <typename T, size_t N>
- class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
- public:
- static void Serialize(CacheKey* key, const T (&t)[N]) {
- static_assert(N > 0);
- key->Record(static_cast<size_t>(N));
- for (size_t i = 0; i < N; i++) {
- key->Record(t[i]);
- }
+ }
+};
+
+// Specialized overload for enums.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T t) {
+ CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
+ key, static_cast<std::underlying_type_t<T>>(t));
+ }
+};
+
+// Specialized overload for TypedInteger.
+template <typename Tag, typename Integer>
+class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
+ public:
+ static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
+ CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
+ }
+};
+
+// Specialized overload for pointers. Since we are serializing for a cache key, we always
+// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
+// the pointer was nullptr followed by the contents if applicable.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T t) {
+ key->Record(t == nullptr);
+ if (t != nullptr) {
+ CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
}
- };
-
- // Specialized overload for CachedObjects.
- template <typename T>
- class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
- public:
- static void Serialize(CacheKey* key, const T& t) {
- key->Record(t.GetCacheKey());
+ }
+};
+
+// Specialized overload for fixed arrays of primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T (&t)[N]) {
+ static_assert(N > 0);
+ key->Record(static_cast<size_t>(N));
+ const char* it = reinterpret_cast<const char*>(t);
+ key->insert(key->end(), it, it + sizeof(t));
+ }
+};
+
+// Specialized overload for fixed arrays of non-primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T (&t)[N]) {
+ static_assert(N > 0);
+ key->Record(static_cast<size_t>(N));
+ for (size_t i = 0; i < N; i++) {
+ key->Record(t[i]);
}
- };
+ }
+};
+
+// Specialized overload for CachedObjects.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
+ public:
+ static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp b/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp
index e7e7cd84d55..5fa6a0affd9 100644
--- a/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp
@@ -19,35 +19,31 @@
namespace dawn::native {
- bool CachedObject::IsCachedReference() const {
- return mIsCachedReference;
- }
-
- void CachedObject::SetIsCachedReference() {
- mIsCachedReference = true;
- }
-
- size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
- return obj->GetContentHash();
- }
-
- size_t CachedObject::GetContentHash() const {
- ASSERT(mIsContentHashInitialized);
- return mContentHash;
- }
-
- void CachedObject::SetContentHash(size_t contentHash) {
- ASSERT(!mIsContentHashInitialized);
- mContentHash = contentHash;
- mIsContentHashInitialized = true;
- }
-
- const CacheKey& CachedObject::GetCacheKey() const {
- return mCacheKey;
- }
-
- CacheKey* CachedObject::GetCacheKey() {
- return &mCacheKey;
- }
+bool CachedObject::IsCachedReference() const {
+ return mIsCachedReference;
+}
+
+void CachedObject::SetIsCachedReference() {
+ mIsCachedReference = true;
+}
+
+size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
+ return obj->GetContentHash();
+}
+
+size_t CachedObject::GetContentHash() const {
+ ASSERT(mIsContentHashInitialized);
+ return mContentHash;
+}
+
+void CachedObject::SetContentHash(size_t contentHash) {
+ ASSERT(!mIsContentHashInitialized);
+ mContentHash = contentHash;
+ mIsContentHashInitialized = true;
+}
+
+const CacheKey& CachedObject::GetCacheKey() const {
+ return mCacheKey;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CachedObject.h b/chromium/third_party/dawn/src/dawn/native/CachedObject.h
index 5e485602be3..3fbba631157 100644
--- a/chromium/third_party/dawn/src/dawn/native/CachedObject.h
+++ b/chromium/third_party/dawn/src/dawn/native/CachedObject.h
@@ -15,50 +15,49 @@
#ifndef SRC_DAWN_NATIVE_CACHEDOBJECT_H_
#define SRC_DAWN_NATIVE_CACHEDOBJECT_H_
-#include "dawn/native/CacheKey.h"
-#include "dawn/native/Forward.h"
-
#include <cstddef>
#include <string>
+#include "dawn/native/CacheKey.h"
+#include "dawn/native/Forward.h"
+
namespace dawn::native {
- // Some objects are cached so that instead of creating new duplicate objects,
- // we increase the refcount of an existing object.
- // When an object is successfully created, the device should call
- // SetIsCachedReference() and insert the object into the cache.
- class CachedObject {
- public:
- bool IsCachedReference() const;
+// Some objects are cached so that instead of creating new duplicate objects,
+// we increase the refcount of an existing object.
+// When an object is successfully created, the device should call
+// SetIsCachedReference() and insert the object into the cache.
+class CachedObject {
+ public:
+ bool IsCachedReference() const;
- // Functor necessary for the unordered_set<CachedObject*>-based cache.
- struct HashFunc {
- size_t operator()(const CachedObject* obj) const;
- };
+ // Functor necessary for the unordered_set<CachedObject*>-based cache.
+ struct HashFunc {
+ size_t operator()(const CachedObject* obj) const;
+ };
- size_t GetContentHash() const;
- void SetContentHash(size_t contentHash);
+ size_t GetContentHash() const;
+ void SetContentHash(size_t contentHash);
- // Returns the cache key for the object only, i.e. without device/adapter information.
- const CacheKey& GetCacheKey() const;
+ // Returns the cache key for the object only, i.e. without device/adapter information.
+ const CacheKey& GetCacheKey() const;
- protected:
- // Protected accessor for derived classes to access and modify the key.
- CacheKey* GetCacheKey();
+ protected:
+ // Cache key member is protected so that derived classes can modify it.
+ CacheKey mCacheKey;
- private:
- friend class DeviceBase;
- void SetIsCachedReference();
+ private:
+ friend class DeviceBase;
+ void SetIsCachedReference();
- bool mIsCachedReference = false;
+ bool mIsCachedReference = false;
- // Called by ObjectContentHasher upon creation to record the object.
- virtual size_t ComputeContentHash() = 0;
+ // Called by ObjectContentHasher upon creation to record the object.
+ virtual size_t ComputeContentHash() = 0;
- size_t mContentHash = 0;
- bool mIsContentHashInitialized = false;
- CacheKey mCacheKey;
- };
+ size_t mContentHash = 0;
+ bool mIsContentHashInitialized = false;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp
index a8be5cc744f..51a9b86b614 100644
--- a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp
@@ -14,24 +14,30 @@
#include "dawn/native/CallbackTaskManager.h"
+#include <utility>
+
namespace dawn::native {
- bool CallbackTaskManager::IsEmpty() {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
- return mCallbackTaskQueue.empty();
- }
+CallbackTaskManager::CallbackTaskManager() = default;
+
+CallbackTaskManager::~CallbackTaskManager() = default;
+
+bool CallbackTaskManager::IsEmpty() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ return mCallbackTaskQueue.empty();
+}
- std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
- std::vector<std::unique_ptr<CallbackTask>> allTasks;
- allTasks.swap(mCallbackTaskQueue);
- return allTasks;
- }
+ std::vector<std::unique_ptr<CallbackTask>> allTasks;
+ allTasks.swap(mCallbackTaskQueue);
+ return allTasks;
+}
- void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
- mCallbackTaskQueue.push_back(std::move(callbackTask));
- }
+void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ mCallbackTaskQueue.push_back(std::move(callbackTask));
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h
index 0a4253ac442..aceea0a49e7 100644
--- a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h
+++ b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h
@@ -21,24 +21,27 @@
namespace dawn::native {
- struct CallbackTask {
- public:
- virtual ~CallbackTask() = default;
- virtual void Finish() = 0;
- virtual void HandleShutDown() = 0;
- virtual void HandleDeviceLoss() = 0;
- };
-
- class CallbackTaskManager {
- public:
- void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
- bool IsEmpty();
- std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
-
- private:
- std::mutex mCallbackTaskQueueMutex;
- std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
- };
+struct CallbackTask {
+ public:
+ virtual ~CallbackTask() = default;
+ virtual void Finish() = 0;
+ virtual void HandleShutDown() = 0;
+ virtual void HandleDeviceLoss() = 0;
+};
+
+class CallbackTaskManager {
+ public:
+ CallbackTaskManager();
+ ~CallbackTaskManager();
+
+ void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+ bool IsEmpty();
+ std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+
+ private:
+ std::mutex mCallbackTaskQueueMutex;
+ std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp
index 5d36aad0b41..7f1c0223db9 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp
@@ -14,215 +14,213 @@
#include "dawn/native/CommandAllocator.h"
-#include "dawn/common/Assert.h"
-#include "dawn/common/Math.h"
-
#include <algorithm>
#include <climits>
#include <cstdlib>
#include <utility>
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
namespace dawn::native {
- // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
+// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
- CommandIterator::CommandIterator() {
- Reset();
- }
+CommandIterator::CommandIterator() {
+ Reset();
+}
- CommandIterator::~CommandIterator() {
- ASSERT(IsEmpty());
- }
+CommandIterator::~CommandIterator() {
+ ASSERT(IsEmpty());
+}
- CommandIterator::CommandIterator(CommandIterator&& other) {
- if (!other.IsEmpty()) {
- mBlocks = std::move(other.mBlocks);
- other.Reset();
- }
- Reset();
- }
-
- CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
- ASSERT(IsEmpty());
- if (!other.IsEmpty()) {
- mBlocks = std::move(other.mBlocks);
- other.Reset();
- }
- Reset();
- return *this;
+CommandIterator::CommandIterator(CommandIterator&& other) {
+ if (!other.IsEmpty()) {
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
}
+ Reset();
+}
- CommandIterator::CommandIterator(CommandAllocator allocator)
- : mBlocks(allocator.AcquireBlocks()) {
- Reset();
+CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
+ ASSERT(IsEmpty());
+ if (!other.IsEmpty()) {
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
}
-
- void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
- ASSERT(IsEmpty());
- mBlocks.clear();
- for (CommandAllocator& allocator : allocators) {
- CommandBlocks blocks = allocator.AcquireBlocks();
- if (!blocks.empty()) {
- mBlocks.reserve(mBlocks.size() + blocks.size());
- for (BlockDef& block : blocks) {
- mBlocks.push_back(std::move(block));
- }
+ Reset();
+ return *this;
+}
+
+CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
+ Reset();
+}
+
+void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
+ ASSERT(IsEmpty());
+ mBlocks.clear();
+ for (CommandAllocator& allocator : allocators) {
+ CommandBlocks blocks = allocator.AcquireBlocks();
+ if (!blocks.empty()) {
+ mBlocks.reserve(mBlocks.size() + blocks.size());
+ for (BlockDef& block : blocks) {
+ mBlocks.push_back(std::move(block));
}
}
- Reset();
- }
-
- bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
- mCurrentBlock++;
- if (mCurrentBlock >= mBlocks.size()) {
- Reset();
- *commandId = detail::kEndOfBlock;
- return false;
- }
- mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
- return NextCommandId(commandId);
- }
-
- void CommandIterator::Reset() {
- mCurrentBlock = 0;
-
- if (mBlocks.empty()) {
- // This will case the first NextCommandId call to try to move to the next block and stop
- // the iteration immediately, without special casing the initialization.
- mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
- mBlocks.emplace_back();
- mBlocks[0].size = sizeof(mEndOfBlock);
- mBlocks[0].block = mCurrentPtr;
- } else {
- mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
- }
- }
-
- void CommandIterator::MakeEmptyAsDataWasDestroyed() {
- if (IsEmpty()) {
- return;
- }
-
- for (BlockDef& block : mBlocks) {
- free(block.block);
- }
- mBlocks.clear();
- Reset();
- ASSERT(IsEmpty());
- }
-
- bool CommandIterator::IsEmpty() const {
- return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
- }
-
- // Potential TODO(crbug.com/dawn/835):
- // - Host the size and pointer to next block in the block itself to avoid having an allocation
- // in the vector
- // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
- // in Allocate
- // - Be able to optimize allocation to one block, for command buffers expected to live long to
- // avoid cache misses
- // - Better block allocation, maybe have Dawn API to say command buffer is going to have size
- // close to another
-
- CommandAllocator::CommandAllocator() {
- ResetPointers();
- }
-
- CommandAllocator::~CommandAllocator() {
- Reset();
- }
-
- CommandAllocator::CommandAllocator(CommandAllocator&& other)
- : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
- other.mBlocks.clear();
- if (!other.IsEmpty()) {
- mCurrentPtr = other.mCurrentPtr;
- mEndPtr = other.mEndPtr;
- } else {
- ResetPointers();
- }
- other.Reset();
}
+ Reset();
+}
- CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+ mCurrentBlock++;
+ if (mCurrentBlock >= mBlocks.size()) {
Reset();
- if (!other.IsEmpty()) {
- std::swap(mBlocks, other.mBlocks);
- mLastAllocationSize = other.mLastAllocationSize;
- mCurrentPtr = other.mCurrentPtr;
- mEndPtr = other.mEndPtr;
- }
- other.Reset();
- return *this;
- }
-
- void CommandAllocator::Reset() {
- for (BlockDef& block : mBlocks) {
- free(block.block);
- }
- mBlocks.clear();
- mLastAllocationSize = kDefaultBaseAllocationSize;
+ *commandId = detail::kEndOfBlock;
+ return false;
+ }
+ mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+ return NextCommandId(commandId);
+}
+
+void CommandIterator::Reset() {
+ mCurrentBlock = 0;
+
+ if (mBlocks.empty()) {
+ // This will case the first NextCommandId call to try to move to the next block and stop
+ // the iteration immediately, without special casing the initialization.
+ mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
+ mBlocks.emplace_back();
+ mBlocks[0].size = sizeof(mEndOfBlock);
+ mBlocks[0].block = mCurrentPtr;
+ } else {
+ mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
+ }
+}
+
+void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+ if (IsEmpty()) {
+ return;
+ }
+
+ for (BlockDef& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ Reset();
+ ASSERT(IsEmpty());
+}
+
+bool CommandIterator::IsEmpty() const {
+ return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+}
+
+// Potential TODO(crbug.com/dawn/835):
+// - Host the size and pointer to next block in the block itself to avoid having an allocation
+// in the vector
+// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
+// in Allocate
+// - Be able to optimize allocation to one block, for command buffers expected to live long to
+// avoid cache misses
+// - Better block allocation, maybe have Dawn API to say command buffer is going to have size
+// close to another
+
+CommandAllocator::CommandAllocator() {
+ ResetPointers();
+}
+
+CommandAllocator::~CommandAllocator() {
+ Reset();
+}
+
+CommandAllocator::CommandAllocator(CommandAllocator&& other)
+ : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+ other.mBlocks.clear();
+ if (!other.IsEmpty()) {
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ } else {
ResetPointers();
}
-
- bool CommandAllocator::IsEmpty() const {
- return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
- }
-
- CommandBlocks&& CommandAllocator::AcquireBlocks() {
- ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
- *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
-
- mCurrentPtr = nullptr;
- mEndPtr = nullptr;
- return std::move(mBlocks);
- }
-
- uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
- // to move to the next one. kEndOfBlock on the last block means the end of the commands.
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = detail::kEndOfBlock;
-
- // We'll request a block that can contain at least the command ID, the command and an
- // additional ID to contain the kEndOfBlock tag.
- size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
-
- // The computation of the request could overflow.
- if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
- return nullptr;
- }
-
- if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
- return nullptr;
- }
- return Allocate(commandId, commandSize, commandAlignment);
- }
-
- bool CommandAllocator::GetNewBlock(size_t minimumSize) {
- // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
- mLastAllocationSize =
- std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
-
- uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
- if (DAWN_UNLIKELY(block == nullptr)) {
- return false;
- }
-
- mBlocks.push_back({mLastAllocationSize, block});
- mCurrentPtr = AlignPtr(block, alignof(uint32_t));
- mEndPtr = block + mLastAllocationSize;
- return true;
- }
-
- void CommandAllocator::ResetPointers() {
- mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
- mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
- }
+ other.Reset();
+}
+
+CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+ Reset();
+ if (!other.IsEmpty()) {
+ std::swap(mBlocks, other.mBlocks);
+ mLastAllocationSize = other.mLastAllocationSize;
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ }
+ other.Reset();
+ return *this;
+}
+
+void CommandAllocator::Reset() {
+ for (BlockDef& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ mLastAllocationSize = kDefaultBaseAllocationSize;
+ ResetPointers();
+}
+
+bool CommandAllocator::IsEmpty() const {
+ return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
+}
+
+CommandBlocks&& CommandAllocator::AcquireBlocks() {
+ ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+ *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
+
+ mCurrentPtr = nullptr;
+ mEndPtr = nullptr;
+ return std::move(mBlocks);
+}
+
+uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+ // to move to the next one. kEndOfBlock on the last block means the end of the commands.
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = detail::kEndOfBlock;
+
+ // We'll request a block that can contain at least the command ID, the command and an
+ // additional ID to contain the kEndOfBlock tag.
+ size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+ // The computation of the request could overflow.
+ if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+ return nullptr;
+ }
+
+ if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+ return nullptr;
+ }
+ return Allocate(commandId, commandSize, commandAlignment);
+}
+
+bool CommandAllocator::GetNewBlock(size_t minimumSize) {
+ // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
+ mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
+
+ uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
+ if (DAWN_UNLIKELY(block == nullptr)) {
+ return false;
+ }
+
+ mBlocks.push_back({mLastAllocationSize, block});
+ mCurrentPtr = AlignPtr(block, alignof(uint32_t));
+ mEndPtr = block + mLastAllocationSize;
+ return true;
+}
+
+void CommandAllocator::ResetPointers() {
+ mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
+ mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h
index 4b7ea99a564..c3e999e4e66 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h
@@ -15,258 +15,257 @@
#ifndef SRC_DAWN_NATIVE_COMMANDALLOCATOR_H_
#define SRC_DAWN_NATIVE_COMMANDALLOCATOR_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/Math.h"
-#include "dawn/common/NonCopyable.h"
-
#include <cstddef>
#include <cstdint>
+#include <limits>
#include <vector>
-namespace dawn::native {
-
- // Allocation for command buffers should be fast. To avoid doing an allocation per command
- // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
- // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
- // so that iteration over the commands is easy.
-
- // Usage of the allocator and iterator:
- // CommandAllocator allocator;
- // DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
- // // Fill command
- // // Repeat allocation and filling commands
- //
- // CommandIterator commands(allocator);
- // CommandType type;
- // while(commands.NextCommandId(&type)) {
- // switch(type) {
- // case CommandType::Draw:
- // DrawCommand* draw = commands.NextCommand<DrawCommand>();
- // // Do the draw
- // break;
- // // other cases
- // }
- // }
-
- // Note that you need to extract the commands from the CommandAllocator before destroying it
- // and must tell the CommandIterator when the allocated commands have been processed for
- // deletion.
-
- // These are the lists of blocks, should not be used directly, only through CommandAllocator
- // and CommandIterator
- struct BlockDef {
- size_t size;
- uint8_t* block;
- };
- using CommandBlocks = std::vector<BlockDef>;
-
- namespace detail {
- constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
- constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
- } // namespace detail
-
- class CommandAllocator;
-
- class CommandIterator : public NonCopyable {
- public:
- CommandIterator();
- ~CommandIterator();
-
- CommandIterator(CommandIterator&& other);
- CommandIterator& operator=(CommandIterator&& other);
-
- // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
- explicit CommandIterator(CommandAllocator allocator);
-
- void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
-
- template <typename E>
- bool NextCommandId(E* commandId) {
- return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
- }
- template <typename T>
- T* NextCommand() {
- return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
- }
- template <typename T>
- T* NextData(size_t count) {
- return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
- }
-
- // Sets iterator to the beginning of the commands without emptying the list. This method can
- // be used if iteration was stopped early and the iterator needs to be restarted.
- void Reset();
-
- // This method must to be called after commands have been deleted. This indicates that the
- // commands have been submitted and they are no longer valid.
- void MakeEmptyAsDataWasDestroyed();
-
- private:
- bool IsEmpty() const;
-
- DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
- uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
- ASSERT(idPtr + sizeof(uint32_t) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
- if (id != detail::kEndOfBlock) {
- mCurrentPtr = idPtr + sizeof(uint32_t);
- *commandId = id;
- return true;
- }
- return NextCommandIdInNewBlock(commandId);
- }
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/NonCopyable.h"
- bool NextCommandIdInNewBlock(uint32_t* commandId);
+namespace dawn::native {
- DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
- uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
- ASSERT(commandPtr + sizeof(commandSize) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+// Allocation for command buffers should be fast. To avoid doing an allocation per command
+// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
+// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
+// so that iteration over the commands is easy.
- mCurrentPtr = commandPtr + commandSize;
- return commandPtr;
+// Usage of the allocator and iterator:
+// CommandAllocator allocator;
+// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
+// // Fill command
+// // Repeat allocation and filling commands
+//
+// CommandIterator commands(allocator);
+// CommandType type;
+// while(commands.NextCommandId(&type)) {
+// switch(type) {
+// case CommandType::Draw:
+// DrawCommand* draw = commands.NextCommand<DrawCommand>();
+// // Do the draw
+// break;
+// // other cases
+// }
+// }
+
+// Note that you need to extract the commands from the CommandAllocator before destroying it
+// and must tell the CommandIterator when the allocated commands have been processed for
+// deletion.
+
+// These are the lists of blocks, should not be used directly, only through CommandAllocator
+// and CommandIterator
+struct BlockDef {
+ size_t size;
+ uint8_t* block;
+};
+using CommandBlocks = std::vector<BlockDef>;
+
+namespace detail {
+constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+} // namespace detail
+
+class CommandAllocator;
+
+class CommandIterator : public NonCopyable {
+ public:
+ CommandIterator();
+ ~CommandIterator();
+
+ CommandIterator(CommandIterator&& other);
+ CommandIterator& operator=(CommandIterator&& other);
+
+ // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+ explicit CommandIterator(CommandAllocator allocator);
+
+ void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
+
+ template <typename E>
+ bool NextCommandId(E* commandId) {
+ return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+ }
+ template <typename T>
+ T* NextCommand() {
+ return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+ }
+ template <typename T>
+ T* NextData(size_t count) {
+ return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+ }
+
+ // Sets iterator to the beginning of the commands without emptying the list. This method can
+ // be used if iteration was stopped early and the iterator needs to be restarted.
+ void Reset();
+
+ // This method must to be called after commands have been deleted. This indicates that the
+ // commands have been submitted and they are no longer valid.
+ void MakeEmptyAsDataWasDestroyed();
+
+ private:
+ bool IsEmpty() const;
+
+ DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+ uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+ ASSERT(idPtr + sizeof(uint32_t) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+ if (id != detail::kEndOfBlock) {
+ mCurrentPtr = idPtr + sizeof(uint32_t);
+ *commandId = id;
+ return true;
}
-
- DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
- uint32_t id;
- bool hasId = NextCommandId(&id);
- ASSERT(hasId);
- ASSERT(id == detail::kAdditionalData);
-
- return NextCommand(dataSize, dataAlignment);
+ return NextCommandIdInNewBlock(commandId);
+ }
+
+ bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+ DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+ uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+ ASSERT(commandPtr + sizeof(commandSize) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ mCurrentPtr = commandPtr + commandSize;
+ return commandPtr;
+ }
+
+ DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+ uint32_t id;
+ bool hasId = NextCommandId(&id);
+ ASSERT(hasId);
+ ASSERT(id == detail::kAdditionalData);
+
+ return NextCommand(dataSize, dataAlignment);
+ }
+
+ CommandBlocks mBlocks;
+ uint8_t* mCurrentPtr = nullptr;
+ size_t mCurrentBlock = 0;
+ // Used to avoid a special case for empty iterators.
+ uint32_t mEndOfBlock = detail::kEndOfBlock;
+};
+
+class CommandAllocator : public NonCopyable {
+ public:
+ CommandAllocator();
+ ~CommandAllocator();
+
+ // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+ CommandAllocator(CommandAllocator&&);
+ CommandAllocator& operator=(CommandAllocator&&);
+
+ // Frees all blocks held by the allocator and restores it to its initial empty state.
+ void Reset();
+
+ bool IsEmpty() const;
+
+ template <typename T, typename E>
+ T* Allocate(E commandId) {
+ static_assert(sizeof(E) == sizeof(uint32_t));
+ static_assert(alignof(E) == alignof(uint32_t));
+ static_assert(alignof(T) <= kMaxSupportedAlignment);
+ T* result =
+ reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
+ if (!result) {
+ return nullptr;
}
-
- CommandBlocks mBlocks;
- uint8_t* mCurrentPtr = nullptr;
- size_t mCurrentBlock = 0;
- // Used to avoid a special case for empty iterators.
- uint32_t mEndOfBlock = detail::kEndOfBlock;
- };
-
- class CommandAllocator : public NonCopyable {
- public:
- CommandAllocator();
- ~CommandAllocator();
-
- // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
- CommandAllocator(CommandAllocator&&);
- CommandAllocator& operator=(CommandAllocator&&);
-
- // Frees all blocks held by the allocator and restores it to its initial empty state.
- void Reset();
-
- bool IsEmpty() const;
-
- template <typename T, typename E>
- T* Allocate(E commandId) {
- static_assert(sizeof(E) == sizeof(uint32_t));
- static_assert(alignof(E) == alignof(uint32_t));
- static_assert(alignof(T) <= kMaxSupportedAlignment);
- T* result = reinterpret_cast<T*>(
- Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
- if (!result) {
- return nullptr;
- }
- new (result) T;
- return result;
+ new (result) T;
+ return result;
+ }
+
+ template <typename T>
+ T* AllocateData(size_t count) {
+ static_assert(alignof(T) <= kMaxSupportedAlignment);
+ T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
+ if (!result) {
+ return nullptr;
}
-
- template <typename T>
- T* AllocateData(size_t count) {
- static_assert(alignof(T) <= kMaxSupportedAlignment);
- T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
- if (!result) {
- return nullptr;
- }
- for (size_t i = 0; i < count; i++) {
- new (result + i) T;
- }
- return result;
+ for (size_t i = 0; i < count; i++) {
+ new (result + i) T;
}
-
- private:
- // This is used for some internal computations and can be any power of two as long as code
- // using the CommandAllocator passes the static_asserts.
- static constexpr size_t kMaxSupportedAlignment = 8;
-
- // To avoid checking for overflows at every step of the computations we compute an upper
- // bound of the space that will be needed in addition to the command data.
- static constexpr size_t kWorstCaseAdditionalSize =
- sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
-
- // The default value of mLastAllocationSize.
- static constexpr size_t kDefaultBaseAllocationSize = 2048;
-
- friend CommandIterator;
- CommandBlocks&& AcquireBlocks();
-
- DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- ASSERT(mCurrentPtr != nullptr);
- ASSERT(mEndPtr != nullptr);
- ASSERT(commandId != detail::kEndOfBlock);
-
- // It should always be possible to allocate one id, for kEndOfBlock tagging,
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mEndPtr >= mCurrentPtr);
- ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
-
- // The memory after the ID will contain the following:
- // - the current ID
- // - padding to align the command, maximum kMaxSupportedAlignment
- // - the command of size commandSize
- // - padding to align the next ID, maximum alignof(uint32_t)
- // - the next ID of size sizeof(uint32_t)
-
- // This can't overflow because by construction mCurrentPtr always has space for the next
- // ID.
- size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
-
- // The good case were we have enough space for the command data and upper bound of the
- // extra required space.
- if ((remainingSize >= kWorstCaseAdditionalSize) &&
- (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = commandId;
-
- uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
- mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
-
- return commandAlloc;
- }
- return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+ return result;
+ }
+
+ private:
+ // This is used for some internal computations and can be any power of two as long as code
+ // using the CommandAllocator passes the static_asserts.
+ static constexpr size_t kMaxSupportedAlignment = 8;
+
+ // To avoid checking for overflows at every step of the computations we compute an upper
+ // bound of the space that will be needed in addition to the command data.
+ static constexpr size_t kWorstCaseAdditionalSize =
+ sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+
+ // The default value of mLastAllocationSize.
+ static constexpr size_t kDefaultBaseAllocationSize = 2048;
+
+ friend CommandIterator;
+ CommandBlocks&& AcquireBlocks();
+
+ DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ ASSERT(mCurrentPtr != nullptr);
+ ASSERT(mEndPtr != nullptr);
+ ASSERT(commandId != detail::kEndOfBlock);
+
+ // It should always be possible to allocate one id, for kEndOfBlock tagging,
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mEndPtr >= mCurrentPtr);
+ ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+
+ // The memory after the ID will contain the following:
+ // - the current ID
+ // - padding to align the command, maximum kMaxSupportedAlignment
+ // - the command of size commandSize
+ // - padding to align the next ID, maximum alignof(uint32_t)
+ // - the next ID of size sizeof(uint32_t)
+
+ // This can't overflow because by construction mCurrentPtr always has space for the next
+ // ID.
+ size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+
+ // The good case were we have enough space for the command data and upper bound of the
+ // extra required space.
+ if ((remainingSize >= kWorstCaseAdditionalSize) &&
+ (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = commandId;
+
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+ mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+ return commandAlloc;
}
+ return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+ }
- uint8_t* AllocateInNewBlock(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment);
+ uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
- DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
- return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
- }
+ DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+ return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
+ }
- bool GetNewBlock(size_t minimumSize);
+ bool GetNewBlock(size_t minimumSize);
- void ResetPointers();
+ void ResetPointers();
- CommandBlocks mBlocks;
- size_t mLastAllocationSize = kDefaultBaseAllocationSize;
+ CommandBlocks mBlocks;
+ size_t mLastAllocationSize = kDefaultBaseAllocationSize;
- // Data used for the block range at initialization so that the first call to Allocate sees
- // there is not enough space and calls GetNewBlock. This avoids having to special case the
- // initialization in Allocate.
- uint32_t mDummyEnum[1] = {0};
+ // Data used for the block range at initialization so that the first call to Allocate sees
+ // there is not enough space and calls GetNewBlock. This avoids having to special case the
+ // initialization in Allocate.
+ uint32_t mPlaceholderEnum[1] = {0};
- // Pointers to the current range of allocation in the block. Guaranteed to allow for at
- // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
- // be written. Nullptr iff the blocks were moved out.
- uint8_t* mCurrentPtr = nullptr;
- uint8_t* mEndPtr = nullptr;
- };
+ // Pointers to the current range of allocation in the block. Guaranteed to allow for at
+ // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
+ // be written. Nullptr iff the blocks were moved out.
+ uint8_t* mCurrentPtr = nullptr;
+ uint8_t* mEndPtr = nullptr;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp
index f8c7836b40f..ab22dea037a 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp
@@ -25,221 +25,221 @@
namespace dawn::native {
- CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor)
- : ApiObjectBase(encoder->GetDevice(), descriptor->label),
- mCommands(encoder->AcquireCommands()),
- mResourceUsages(encoder->AcquireResourceUsages()) {
- TrackInDevice();
- }
-
- CommandBufferBase::CommandBufferBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- // static
- CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
- return new CommandBufferBase(device, ObjectBase::kError);
- }
-
- ObjectType CommandBufferBase::GetType() const {
- return ObjectType::CommandBuffer;
- }
-
- MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
-
- DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
- return {};
- }
-
- void CommandBufferBase::DestroyImpl() {
- FreeCommands(&mCommands);
- mResourceUsages = {};
- }
-
- const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
- return mResourceUsages;
- }
-
- CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
- return &mCommands;
- }
-
- bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
- const Extent3D copySize,
- const uint32_t mipLevel) {
- Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
-
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- return extent.width == copySize.width;
- case wgpu::TextureDimension::e2D:
- return extent.width == copySize.width && extent.height == copySize.height;
- case wgpu::TextureDimension::e3D:
- return extent.width == copySize.width && extent.height == copySize.height &&
- extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor)
+ : ApiObjectBase(encoder->GetDevice(), descriptor->label),
+ mCommands(encoder->AcquireCommands()),
+ mResourceUsages(encoder->AcquireResourceUsages()) {
+ TrackInDevice();
+}
+
+CommandBufferBase::CommandBufferBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
+
+CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
+
+// static
+CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
+ return new CommandBufferBase(device, ObjectBase::kError);
+}
+
+ObjectType CommandBufferBase::GetType() const {
+ return ObjectType::CommandBuffer;
+}
+
+MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+
+ DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
+ return {};
+}
+
+void CommandBufferBase::DestroyImpl() {
+ FreeCommands(&mCommands);
+ mResourceUsages = {};
+}
+
+const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
+ return mResourceUsages;
+}
+
+CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
+ return &mCommands;
+}
+
+bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+ const Extent3D copySize,
+ const uint32_t mipLevel) {
+ Extent3D extent = texture->GetMipLevelSingleSubresourcePhysicalSize(mipLevel);
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ return extent.width == copySize.width;
+ case wgpu::TextureDimension::e2D:
+ return extent.width == copySize.width && extent.height == copySize.height;
+ case wgpu::TextureDimension::e3D:
+ return extent.width == copySize.width && extent.height == copySize.height &&
+ extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+ }
+
+ UNREACHABLE();
+}
+
+SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
+ switch (copy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+ ASSERT(copy.mipLevel == 0);
+ return {copy.aspect, {0, 1}, {0, 1}};
+ case wgpu::TextureDimension::e2D:
+ return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
+ case wgpu::TextureDimension::e3D:
+ return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
+ }
+
+ UNREACHABLE();
+}
+
+void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ auto& attachmentInfo = renderPass->colorAttachments[i];
+ TextureViewBase* view = attachmentInfo.view.Get();
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
+
+ // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
+ if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
+ !view->GetTexture()->IsSubresourceContentInitialized(range)) {
+ attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+ attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
}
- }
- SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
- const Extent3D& copySize) {
- switch (copy.texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
- ASSERT(copy.mipLevel == 0);
- return {copy.aspect, {0, 1}, {0, 1}};
- case wgpu::TextureDimension::e2D:
- return {
- copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
- case wgpu::TextureDimension::e3D:
- return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
+ if (hasResolveTarget) {
+ // We need to set the resolve target to initialized so that it does not get
+ // cleared later in the pipeline. The texture will be resolved from the
+ // source color attachment, which will be correctly initialized.
+ TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+ ASSERT(resolveView->GetLayerCount() == 1);
+ ASSERT(resolveView->GetLevelCount() == 1);
+ resolveView->GetTexture()->SetIsSubresourceContentInitialized(
+ true, resolveView->GetSubresourceRange());
}
- }
- void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureViewBase* view = attachmentInfo.view.Get();
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
-
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetLevelCount() == 1);
- SubresourceRange range = view->GetSubresourceRange();
-
- // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
- if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
- !view->GetTexture()->IsSubresourceContentInitialized(range)) {
- attachmentInfo.loadOp = wgpu::LoadOp::Clear;
- attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
- }
-
- if (hasResolveTarget) {
- // We need to set the resolve target to initialized so that it does not get
- // cleared later in the pipeline. The texture will be resolved from the
- // source color attachment, which will be correctly initialized.
- TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
- ASSERT(resolveView->GetLayerCount() == 1);
- ASSERT(resolveView->GetLevelCount() == 1);
- resolveView->GetTexture()->SetIsSubresourceContentInitialized(
- true, resolveView->GetSubresourceRange());
- }
-
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
- break;
-
- case wgpu::StoreOp::Discard:
- view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
- break;
-
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
+ break;
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- TextureViewBase* view = attachmentInfo.view.Get();
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetLevelCount() == 1);
- SubresourceRange range = view->GetSubresourceRange();
-
- SubresourceRange depthRange = range;
- depthRange.aspects = range.aspects & Aspect::Depth;
-
- SubresourceRange stencilRange = range;
- stencilRange.aspects = range.aspects & Aspect::Stencil;
-
- // If the depth stencil texture has not been initialized, we want to use loadop
- // clear to init the contents to 0's
- if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
- attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- }
-
- if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
- attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- }
-
- view->GetTexture()->SetIsSubresourceContentInitialized(
- attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
-
- view->GetTexture()->SetIsSubresourceContentInitialized(
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+ case wgpu::StoreOp::Discard:
+ view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
+ break;
+
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
}
}
- bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
- ASSERT(copy != nullptr);
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureViewBase* view = attachmentInfo.view.Get();
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
- if (copy->destination.offset > 0) {
- // The copy doesn't touch the start of the buffer.
- return false;
- }
+ SubresourceRange depthRange = range;
+ depthRange.aspects = range.aspects & Aspect::Depth;
- const TextureBase* texture = copy->source.texture.Get();
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
- const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
- const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
- const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
- const bool multiRow = multiSlice || heightInBlocks > 1;
-
- if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
- // There are gaps between slices that aren't overwritten
- return false;
- }
+ SubresourceRange stencilRange = range;
+ stencilRange.aspects = range.aspects & Aspect::Stencil;
- const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
- if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
- // There are gaps between rows that aren't overwritten
- return false;
+ // If the depth stencil texture has not been initialized, we want to use loadop
+ // clear to init the contents to 0's
+ if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
+ attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
}
- // After the above checks, we're sure the copy has no gaps.
- // Now, compute the total number of bytes written.
- const uint64_t writtenBytes =
- ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
- copy->destination.rowsPerImage)
- .AcquireSuccess();
- if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
- // The written bytes don't cover the whole buffer.
- return false;
+ if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
+ attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
}
- return true;
- }
-
- std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
- const std::array<float, 4> outputValue = {
- static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
- static_cast<float>(color.a)};
- return outputValue;
- }
- std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
- const std::array<int32_t, 4> outputValue = {
- static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
- static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
- return outputValue;
- }
-
- std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
- const std::array<uint32_t, 4> outputValue = {
- static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
- static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
- return outputValue;
- }
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
+
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+ }
+}
+
+bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
+ ASSERT(copy != nullptr);
+
+ if (copy->destination.offset > 0) {
+ // The copy doesn't touch the start of the buffer.
+ return false;
+ }
+
+ const TextureBase* texture = copy->source.texture.Get();
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
+ const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
+ const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
+ const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
+ const bool multiRow = multiSlice || heightInBlocks > 1;
+
+ if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
+ // There are gaps between slices that aren't overwritten
+ return false;
+ }
+
+ const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
+ if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
+ // There are gaps between rows that aren't overwritten
+ return false;
+ }
+
+ // After the above checks, we're sure the copy has no gaps.
+ // Now, compute the total number of bytes written.
+ const uint64_t writtenBytes =
+ ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
+ copy->destination.rowsPerImage)
+ .AcquireSuccess();
+ if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
+ // The written bytes don't cover the whole buffer.
+ return false;
+ }
+
+ return true;
+}
+
+std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
+ const std::array<float, 4> outputValue = {
+ static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
+ static_cast<float>(color.a)};
+ return outputValue;
+}
+std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
+ const std::array<int32_t, 4> outputValue = {
+ static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
+ static_cast<int32_t>(color.a)};
+ return outputValue;
+}
+
+std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
+ const std::array<uint32_t, 4> outputValue = {
+ static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
+ static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
+ return outputValue;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h
index 455a8d9c026..19d9f68f977 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h
@@ -26,50 +26,49 @@
namespace dawn::native {
- struct BeginRenderPassCmd;
- struct CopyTextureToBufferCmd;
- struct TextureCopy;
+struct BeginRenderPassCmd;
+struct CopyTextureToBufferCmd;
+struct TextureCopy;
- class CommandBufferBase : public ApiObjectBase {
- public:
- CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+class CommandBufferBase : public ApiObjectBase {
+ public:
+ CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- static CommandBufferBase* MakeError(DeviceBase* device);
+ static CommandBufferBase* MakeError(DeviceBase* device);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- MaybeError ValidateCanUseInSubmitNow() const;
+ MaybeError ValidateCanUseInSubmitNow() const;
- const CommandBufferResourceUsage& GetResourceUsages() const;
+ const CommandBufferResourceUsage& GetResourceUsages() const;
- CommandIterator* GetCommandIteratorForTesting();
+ CommandIterator* GetCommandIteratorForTesting();
- protected:
- // Constructor used only for mocking and testing.
- explicit CommandBufferBase(DeviceBase* device);
- void DestroyImpl() override;
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit CommandBufferBase(DeviceBase* device);
+ void DestroyImpl() override;
- CommandIterator mCommands;
+ CommandIterator mCommands;
- private:
- CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ private:
+ CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- CommandBufferResourceUsage mResourceUsages;
- };
+ CommandBufferResourceUsage mResourceUsages;
+};
- bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
- const Extent3D copySize,
- const uint32_t mipLevel);
- SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
- const Extent3D& copySize);
+bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+ const Extent3D copySize,
+ const uint32_t mipLevel);
+SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
- void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
- bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
+bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
- std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
- std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
- std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
+std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
+std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
+std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp
index ee164c7b291..3a213fe48b2 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp
@@ -30,392 +30,409 @@
namespace dawn::native {
- namespace {
- bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
- const std::vector<uint64_t>& pipelineMinBufferSizes) {
- ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
-
- for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
- if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
- return false;
- }
- }
-
- return true;
+namespace {
+bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
+ const std::vector<uint64_t>& pipelineMinBufferSizes) {
+ ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
+
+ for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
+ if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
+ return false;
}
- } // namespace
+ }
- enum ValidationAspect {
- VALIDATION_ASPECT_PIPELINE,
- VALIDATION_ASPECT_BIND_GROUPS,
- VALIDATION_ASPECT_VERTEX_BUFFERS,
- VALIDATION_ASPECT_INDEX_BUFFER,
+ return true;
+}
+} // namespace
- VALIDATION_ASPECT_COUNT
- };
- static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
+enum ValidationAspect {
+ VALIDATION_ASPECT_PIPELINE,
+ VALIDATION_ASPECT_BIND_GROUPS,
+ VALIDATION_ASPECT_VERTEX_BUFFERS,
+ VALIDATION_ASPECT_INDEX_BUFFER,
- static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
+ VALIDATION_ASPECT_COUNT
+};
+static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
- static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
- 1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
+static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
- static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
- 1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+ 1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
- static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
- 1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
- 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+ 1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
- MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
- return ValidateOperation(kDispatchAspects);
- }
+static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
+ 1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
+ 1 << VALIDATION_ASPECT_INDEX_BUFFER;
- MaybeError CommandBufferStateTracker::ValidateCanDraw() {
- return ValidateOperation(kDrawAspects);
- }
+CommandBufferStateTracker::CommandBufferStateTracker() = default;
- MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
- return ValidateOperation(kDrawIndexedAspects);
- }
+CommandBufferStateTracker::CommandBufferStateTracker(const CommandBufferStateTracker&) = default;
- MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
- uint32_t vertexCount,
- uint32_t firstVertex) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+CommandBufferStateTracker::CommandBufferStateTracker(CommandBufferStateTracker&&) = default;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- vertexBufferSlotsUsedAsVertexBuffer =
- lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
-
- for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
- const VertexBufferInfo& vertexBuffer =
- lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
- uint64_t arrayStride = vertexBuffer.arrayStride;
- uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
-
- if (arrayStride == 0) {
- DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
- "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
- "is smaller than the required size for all attributes (%u)",
- bufferSize, static_cast<uint8_t>(usedSlotVertex),
- vertexBuffer.usedBytesInStride);
- } else {
- uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
- if (strideCount != 0u) {
- uint64_t requiredSize =
- (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
- // firstVertex and vertexCount are in uint32_t,
- // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
- // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
- // sizeof(attribute.format)) with attribute.offset being no larger than
- // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
- // overflows.
- DAWN_INVALID_IF(
- requiredSize > bufferSize,
- "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
- "the "
- "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
- firstVertex, vertexCount, requiredSize, bufferSize,
- static_cast<uint8_t>(usedSlotVertex), arrayStride);
- }
- }
- }
+CommandBufferStateTracker::~CommandBufferStateTracker() = default;
- return {};
- }
+CommandBufferStateTracker& CommandBufferStateTracker::operator=(const CommandBufferStateTracker&) =
+ default;
- MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
- uint32_t instanceCount,
- uint32_t firstInstance) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+CommandBufferStateTracker& CommandBufferStateTracker::operator=(CommandBufferStateTracker&&) =
+ default;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- vertexBufferSlotsUsedAsInstanceBuffer =
- lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
-
- for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
- const VertexBufferInfo& vertexBuffer =
- lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
- uint64_t arrayStride = vertexBuffer.arrayStride;
- uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
- if (arrayStride == 0) {
- DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
- "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
- "is smaller than the required size for all attributes (%u)",
- bufferSize, static_cast<uint8_t>(usedSlotInstance),
- vertexBuffer.usedBytesInStride);
- } else {
- uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
- if (strideCount != 0u) {
- uint64_t requiredSize =
- (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
- // firstInstance and instanceCount are in uint32_t,
- // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
- // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
- // sizeof(attribute.format)) with attribute.offset being no larger than
- // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
- // overflows.
- DAWN_INVALID_IF(
- requiredSize > bufferSize,
- "Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
- "the "
- "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
- firstInstance, instanceCount, requiredSize, bufferSize,
- static_cast<uint8_t>(usedSlotInstance), arrayStride);
- }
- }
- }
+MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
+ return ValidateOperation(kDispatchAspects);
+}
- return {};
- }
+MaybeError CommandBufferStateTracker::ValidateCanDraw() {
+ return ValidateOperation(kDrawAspects);
+}
- MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
- uint32_t firstIndex) {
- // Validate the range of index buffer
- // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
- // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
- // uint64_t we avoid overflows.
- DAWN_INVALID_IF(
- (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
- mIndexBufferSize,
- "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
- "(%u).",
- firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
+MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
+ return ValidateOperation(kDrawIndexedAspects);
+}
+
+MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
+ uint32_t firstVertex) {
+ uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
+
+ if (strideCount == 0) {
+ // All vertex step mode buffers are always in range if stride count is zero
return {};
}
- MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
- // Fast return-true path if everything is good
- ValidationAspects missingAspects = requiredAspects & ~mAspects;
- if (missingAspects.none()) {
- return {};
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
+ lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
+
+ for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
+ const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
+ uint64_t arrayStride = vertexBuffer.arrayStride;
+ uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
+
+ if (arrayStride == 0) {
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotVertex),
+ vertexBuffer.usedBytesInStride);
+ } else {
+ DAWN_ASSERT(strideCount != 0u);
+ uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+ // firstVertex and vertexCount are in uint32_t,
+ // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+ // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+ // sizeof(attribute.format)) with attribute.offset being no larger than
+ // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+ // overflows.
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
+ "the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+ firstVertex, vertexCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotVertex), arrayStride);
}
+ }
- // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
- // requires the pipeline to be set.
- DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
-
- RecomputeLazyAspects(missingAspects);
+ return {};
+}
- DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
+MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
+ uint32_t instanceCount,
+ uint32_t firstInstance) {
+ uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
+ if (strideCount == 0) {
+ // All instance step mode buffers are always in range if stride count is zero
return {};
}
- void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
- ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
- ASSERT((aspects & ~kLazyAspects).none());
-
- if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
- bool matches = true;
-
- for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
- if (mBindgroups[i] == nullptr ||
- mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
- !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
- (*mMinBufferSizes)[i])) {
- matches = false;
- break;
- }
- }
-
- if (matches) {
- mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
- }
- }
-
- if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
- lastRenderPipeline->GetVertexBufferSlotsUsed();
- if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
- mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
- }
- }
-
- if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
- if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
- mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
- mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
- }
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
+ lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
+
+ for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
+ const VertexBufferInfo& vertexBuffer =
+ lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
+ uint64_t arrayStride = vertexBuffer.arrayStride;
+ uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
+ if (arrayStride == 0) {
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotInstance),
+ vertexBuffer.usedBytesInStride);
+ } else {
+ DAWN_ASSERT(strideCount != 0u);
+ uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+ // firstInstance and instanceCount are in uint32_t,
+ // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+ // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+ // sizeof(attribute.format)) with attribute.offset being no larger than
+ // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+ // overflows.
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
+ "the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+ firstInstance, instanceCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotInstance), arrayStride);
}
}
- MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
- if (!aspects.any()) {
- return {};
- }
+ return {};
+}
+
+MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
+ uint32_t firstIndex) {
+ // Validate the range of index buffer
+ // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
+ // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
+ // uint64_t we avoid overflows.
+ DAWN_INVALID_IF(
+ (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
+ mIndexBufferSize,
+ "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
+ "(%u).",
+ firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
+ return {};
+}
+
+MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
+ // Fast return-true path if everything is good
+ ValidationAspects missingAspects = requiredAspects & ~mAspects;
+ if (missingAspects.none()) {
+ return {};
+ }
- DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
+ // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
+ // requires the pipeline to be set.
+ DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
- if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
- DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
+ RecomputeLazyAspects(missingAspects);
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
- wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
+ DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
- if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
- DAWN_INVALID_IF(
- pipelineIndexFormat == wgpu::IndexFormat::Undefined,
- "%s has a strip primitive topology (%s) but a strip index format of %s, which "
- "prevents it for being used for indexed draw calls.",
- lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
- pipelineIndexFormat);
+ return {};
+}
- DAWN_INVALID_IF(
- mIndexFormat != pipelineIndexFormat,
- "Strip index format (%s) of %s does not match index buffer format (%s).",
- pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
- }
+void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
+ ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
+ ASSERT((aspects & ~kLazyAspects).none());
- // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
- // It returns the first invalid state found. We shouldn't be able to reach this line
- // because to have invalid aspects one of the above conditions must have failed earlier.
- // If this is reached, make sure lazy aspects and the error checks above are consistent.
- UNREACHABLE();
- return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
- }
+ if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
+ bool matches = true;
- // TODO(dawn:563): Indicate which slots were not set.
- DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
- "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
-
- if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
- for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
- ASSERT(HasPipeline());
-
- DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
- static_cast<uint32_t>(i));
-
- BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
- BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
-
- DAWN_INVALID_IF(
- requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
- currentBGL->GetPipelineCompatibilityToken() !=
- requiredBGL->GetPipelineCompatibilityToken(),
- "The current pipeline (%s) was created with a default layout, and is not "
- "compatible with the %s at index %u which uses a %s that was not created by "
- "the pipeline. Either use the bind group layout returned by calling "
- "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
- "provide an explicit pipeline layout when creating the pipeline.",
- mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
- static_cast<uint32_t>(i));
-
- DAWN_INVALID_IF(
- requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
- currentBGL->GetPipelineCompatibilityToken() !=
- PipelineCompatibilityToken(0),
- "%s at index %u uses a %s which was created as part of the default layout for "
- "a different pipeline than the current one (%s), and as a result is not "
- "compatible. Use an explicit bind group layout when creating bind groups and "
- "an explicit pipeline layout when creating pipelines to share bind groups "
- "between pipelines.",
- mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
-
- DAWN_INVALID_IF(
- mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
- "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
- "group %s at index %u.",
- requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
- static_cast<uint32_t>(i));
-
- // TODO(dawn:563): Report the binding sizes and which ones are failing.
- DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
- (*mMinBufferSizes)[i]),
- "Binding sizes are too small for bind group %s at index %u",
- mBindgroups[i], static_cast<uint32_t>(i));
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ if (mBindgroups[i] == nullptr ||
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
+ !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinBufferSizes)[i])) {
+ matches = false;
+ break;
}
-
- // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
- // It returns the first invalid state found. We shouldn't be able to reach this line
- // because to have invalid aspects one of the above conditions must have failed earlier.
- // If this is reached, make sure lazy aspects and the error checks above are consistent.
- UNREACHABLE();
- return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
}
- UNREACHABLE();
- }
-
- void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
- SetPipelineCommon(pipeline);
- }
-
- void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
- SetPipelineCommon(pipeline);
- }
-
- void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
- BindGroupBase* bindgroup,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mBindgroups[index] = bindgroup;
- mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
- mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
+ if (matches) {
+ mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
+ }
}
- void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
- mIndexBufferSet = true;
- mIndexFormat = format;
- mIndexBufferSize = size;
- }
+ if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
- void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
- mVertexBufferSlotsUsed.set(slot);
- mVertexBufferSizes[slot] = size;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
+ lastRenderPipeline->GetVertexBufferSlotsUsed();
+ if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
+ mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
+ }
}
- void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
- mLastPipeline = pipeline;
- mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
- mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
-
- mAspects.set(VALIDATION_ASPECT_PIPELINE);
-
- // Reset lazy aspects so they get recomputed on the next operation.
- mAspects &= ~kLazyAspects;
+ if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+ if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
+ mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
+ mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
+ }
}
+}
- BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
- return mBindgroups[index];
+MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
+ if (!aspects.any()) {
+ return {};
}
- const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
- BindGroupIndex index) const {
- return mDynamicOffsets[index];
- }
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
- bool CommandBufferStateTracker::HasPipeline() const {
- return mLastPipeline != nullptr;
- }
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
+ DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
- RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
- ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
- return static_cast<RenderPipelineBase*>(mLastPipeline);
- }
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+ wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
+
+ if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
+ DAWN_INVALID_IF(
+ pipelineIndexFormat == wgpu::IndexFormat::Undefined,
+ "%s has a strip primitive topology (%s) but a strip index format of %s, which "
+ "prevents it for being used for indexed draw calls.",
+ lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
+ pipelineIndexFormat);
+
+ DAWN_INVALID_IF(
+ mIndexFormat != pipelineIndexFormat,
+ "Strip index format (%s) of %s does not match index buffer format (%s).",
+ pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
+ }
- ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
- ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
- return static_cast<ComputePipelineBase*>(mLastPipeline);
+ // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+ // It returns the first invalid state found. We shouldn't be able to reach this line
+ // because to have invalid aspects one of the above conditions must have failed earlier.
+ // If this is reached, make sure lazy aspects and the error checks above are consistent.
+ UNREACHABLE();
+ return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
}
- PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
- return mLastPipelineLayout;
- }
+ // TODO(dawn:563): Indicate which slots were not set.
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
+ "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
+
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ ASSERT(HasPipeline());
+
+ DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
+ static_cast<uint32_t>(i));
+
+ BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
+ BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
+
+ DAWN_INVALID_IF(
+ requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
+ currentBGL->GetPipelineCompatibilityToken() !=
+ requiredBGL->GetPipelineCompatibilityToken(),
+ "The current pipeline (%s) was created with a default layout, and is not "
+ "compatible with the %s at index %u which uses a %s that was not created by "
+ "the pipeline. Either use the bind group layout returned by calling "
+ "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
+ "provide an explicit pipeline layout when creating the pipeline.",
+ mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
+ static_cast<uint32_t>(i));
+
+ DAWN_INVALID_IF(
+ requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
+ currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
+ "%s at index %u uses a %s which was created as part of the default layout for "
+ "a different pipeline than the current one (%s), and as a result is not "
+ "compatible. Use an explicit bind group layout when creating bind groups and "
+ "an explicit pipeline layout when creating pipelines to share bind groups "
+ "between pipelines.",
+ mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
+
+ DAWN_INVALID_IF(
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
+ "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
+ "group %s at index %u.",
+ requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
+ static_cast<uint32_t>(i));
+
+ // TODO(dawn:563): Report the binding sizes and which ones are failing.
+ DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinBufferSizes)[i]),
+ "Binding sizes are too small for bind group %s at index %u",
+ mBindgroups[i], static_cast<uint32_t>(i));
+ }
- wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
- return mIndexFormat;
+ // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+ // It returns the first invalid state found. We shouldn't be able to reach this line
+ // because to have invalid aspects one of the above conditions must have failed earlier.
+ // If this is reached, make sure lazy aspects and the error checks above are consistent.
+ UNREACHABLE();
+ return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
}
- uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
- return mIndexBufferSize;
- }
+ UNREACHABLE();
+}
+
+void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
+ SetPipelineCommon(pipeline);
+}
+
+void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
+ SetPipelineCommon(pipeline);
+}
+
+void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindgroup,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mBindgroups[index] = bindgroup;
+ mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
+ mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
+}
+
+void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
+ mIndexBufferSet = true;
+ mIndexFormat = format;
+ mIndexBufferSize = size;
+}
+
+void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
+ mVertexBufferSlotsUsed.set(slot);
+ mVertexBufferSizes[slot] = size;
+}
+
+void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
+ mLastPipeline = pipeline;
+ mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
+ mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
+
+ mAspects.set(VALIDATION_ASPECT_PIPELINE);
+
+ // Reset lazy aspects so they get recomputed on the next operation.
+ mAspects &= ~kLazyAspects;
+}
+
+BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
+ return mBindgroups[index];
+}
+
+const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
+ BindGroupIndex index) const {
+ return mDynamicOffsets[index];
+}
+
+bool CommandBufferStateTracker::HasPipeline() const {
+ return mLastPipeline != nullptr;
+}
+
+RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
+ ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
+ return static_cast<RenderPipelineBase*>(mLastPipeline);
+}
+
+ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
+ ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
+ return static_cast<ComputePipelineBase*>(mLastPipeline);
+}
+
+PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
+ return mLastPipelineLayout;
+}
+
+wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
+ return mIndexFormat;
+}
+
+uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
+ return mIndexBufferSize;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h
index d212876f641..43ac23834b1 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_COMMANDBUFFERSTATETRACKER_H_
#define SRC_DAWN_NATIVE_COMMANDBUFFERSTATETRACKER_H_
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
@@ -24,62 +26,70 @@
namespace dawn::native {
- class CommandBufferStateTracker {
- public:
- // Non-state-modifying validation functions
- MaybeError ValidateCanDispatch();
- MaybeError ValidateCanDraw();
- MaybeError ValidateCanDrawIndexed();
- MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
- MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
- uint32_t firstInstance);
- MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
-
- // State-modifying methods
- void SetComputePipeline(ComputePipelineBase* pipeline);
- void SetRenderPipeline(RenderPipelineBase* pipeline);
- void SetBindGroup(BindGroupIndex index,
- BindGroupBase* bindgroup,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets);
- void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
- void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
-
- static constexpr size_t kNumAspects = 4;
- using ValidationAspects = std::bitset<kNumAspects>;
-
- BindGroupBase* GetBindGroup(BindGroupIndex index) const;
- const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
- bool HasPipeline() const;
- RenderPipelineBase* GetRenderPipeline() const;
- ComputePipelineBase* GetComputePipeline() const;
- PipelineLayoutBase* GetPipelineLayout() const;
- wgpu::IndexFormat GetIndexFormat() const;
- uint64_t GetIndexBufferSize() const;
-
- private:
- MaybeError ValidateOperation(ValidationAspects requiredAspects);
- void RecomputeLazyAspects(ValidationAspects aspects);
- MaybeError CheckMissingAspects(ValidationAspects aspects);
-
- void SetPipelineCommon(PipelineBase* pipeline);
-
- ValidationAspects mAspects;
-
- ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
- ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
- bool mIndexBufferSet = false;
- wgpu::IndexFormat mIndexFormat;
- uint64_t mIndexBufferSize = 0;
-
- ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
-
- PipelineLayoutBase* mLastPipelineLayout = nullptr;
- PipelineBase* mLastPipeline = nullptr;
-
- const RequiredBufferSizes* mMinBufferSizes = nullptr;
- };
+class CommandBufferStateTracker {
+ public:
+ CommandBufferStateTracker();
+ CommandBufferStateTracker(const CommandBufferStateTracker&);
+ CommandBufferStateTracker(CommandBufferStateTracker&&);
+ ~CommandBufferStateTracker();
+
+ CommandBufferStateTracker& operator=(const CommandBufferStateTracker&);
+ CommandBufferStateTracker& operator=(CommandBufferStateTracker&&);
+
+ // Non-state-modifying validation functions
+ MaybeError ValidateCanDispatch();
+ MaybeError ValidateCanDraw();
+ MaybeError ValidateCanDrawIndexed();
+ MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
+ MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
+ uint32_t firstInstance);
+ MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
+
+ // State-modifying methods
+ void SetComputePipeline(ComputePipelineBase* pipeline);
+ void SetRenderPipeline(RenderPipelineBase* pipeline);
+ void SetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindgroup,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+ void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
+ void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
+
+ static constexpr size_t kNumAspects = 4;
+ using ValidationAspects = std::bitset<kNumAspects>;
+
+ BindGroupBase* GetBindGroup(BindGroupIndex index) const;
+ const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
+ bool HasPipeline() const;
+ RenderPipelineBase* GetRenderPipeline() const;
+ ComputePipelineBase* GetComputePipeline() const;
+ PipelineLayoutBase* GetPipelineLayout() const;
+ wgpu::IndexFormat GetIndexFormat() const;
+ uint64_t GetIndexBufferSize() const;
+
+ private:
+ MaybeError ValidateOperation(ValidationAspects requiredAspects);
+ void RecomputeLazyAspects(ValidationAspects aspects);
+ MaybeError CheckMissingAspects(ValidationAspects aspects);
+
+ void SetPipelineCommon(PipelineBase* pipeline);
+
+ ValidationAspects mAspects;
+
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
+ ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+ bool mIndexBufferSet = false;
+ wgpu::IndexFormat mIndexFormat;
+ uint64_t mIndexBufferSize = 0;
+
+ ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
+
+ PipelineLayoutBase* mLastPipelineLayout = nullptr;
+ PipelineBase* mLastPipeline = nullptr;
+
+ const RequiredBufferSizes* mMinBufferSizes = nullptr;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp
index 7f516ab84e0..197e0c0c1a0 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/CommandEncoder.h"
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/Math.h"
#include "dawn/native/BindGroup.h"
@@ -38,1385 +42,1413 @@
namespace dawn::native {
- namespace {
-
- bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
- return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
- !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
- }
-
- MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
- uint64_t srcOffset,
- uint64_t dstOffset) {
- // Copy size must be a multiple of 4 bytes on macOS.
- DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
-
- // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
- DAWN_INVALID_IF(
- srcOffset % 4 != 0 || dstOffset % 4 != 0,
- "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
- srcOffset, dstOffset);
-
- return {};
- }
-
- MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
- DAWN_INVALID_IF(texture->GetSampleCount() > 1,
- "%s sample count (%u) is not 1 when copying to or from a buffer.",
- texture, texture->GetSampleCount());
-
- return {};
- }
-
- MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
- const TexelBlockInfo& blockInfo,
- const bool hasDepthOrStencil) {
- if (hasDepthOrStencil) {
- // For depth-stencil texture, buffer offset must be a multiple of 4.
- DAWN_INVALID_IF(layout.offset % 4 != 0,
- "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
- layout.offset);
- } else {
- DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
- "Offset (%u) is not a multiple of the texel block byte size (%u).",
- layout.offset, blockInfo.byteSize);
- }
- return {};
+namespace {
+
+bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
+ return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
+ !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
+}
+
+MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, uint64_t srcOffset, uint64_t dstOffset) {
+ // Copy size must be a multiple of 4 bytes on macOS.
+ DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
+
+ // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
+ DAWN_INVALID_IF(srcOffset % 4 != 0 || dstOffset % 4 != 0,
+ "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
+ srcOffset, dstOffset);
+
+ return {};
+}
+
+MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
+ DAWN_INVALID_IF(texture->GetSampleCount() > 1,
+ "%s sample count (%u) is not 1 when copying to or from a buffer.", texture,
+ texture->GetSampleCount());
+
+ return {};
+}
+
+MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
+ const TexelBlockInfo& blockInfo,
+ const bool hasDepthOrStencil) {
+ if (hasDepthOrStencil) {
+ // For depth-stencil texture, buffer offset must be a multiple of 4.
+ DAWN_INVALID_IF(layout.offset % 4 != 0,
+ "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
+ layout.offset);
+ } else {
+ DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
+ "Offset (%u) is not a multiple of the texel block byte size (%u).",
+ layout.offset, blockInfo.byteSize);
+ }
+ return {};
+}
+
+MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const ImageCopyTexture& src) {
+ Aspect aspectUsed;
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
+ if (aspectUsed == Aspect::Depth) {
+ switch (src.texture->GetFormat().format) {
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The depth aspect of %s format %s cannot be selected in a texture to "
+ "buffer copy.",
+ src.texture, src.texture->GetFormat().format);
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ break;
+
+ default:
+ UNREACHABLE();
}
+ }
- MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
- const ImageCopyTexture& src) {
- Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
- if (aspectUsed == Aspect::Depth) {
- switch (src.texture->GetFormat().format) {
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "The depth aspect of %s format %s cannot be selected in a texture to "
- "buffer copy.",
- src.texture, src.texture->GetFormat().format);
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- return {};
- }
+ return {};
+}
+
+MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
+ // Currently we do not support layered rendering.
+ DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
+ "The layer count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLayerCount(), attachment);
+
+ DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
+ "The mip level count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLevelCount(), attachment);
+
+ return {};
+}
+
+MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
+ uint32_t* width,
+ uint32_t* height) {
+ const Extent3D& attachmentSize =
+ attachment->GetTexture()->GetMipLevelSingleSubresourceVirtualSize(
+ attachment->GetBaseMipLevel());
+
+ if (*width == 0) {
+ DAWN_ASSERT(*height == 0);
+ *width = attachmentSize.width;
+ *height = attachmentSize.height;
+ DAWN_ASSERT(*width != 0 && *height != 0);
+ } else {
+ DAWN_INVALID_IF(*width != attachmentSize.width || *height != attachmentSize.height,
+ "Attachment %s size (width: %u, height: %u) does not match the size of the "
+ "other attachments (width: %u, height: %u).",
+ attachment, attachmentSize.width, attachmentSize.height, *width, *height);
+ }
- MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
- // Currently we do not support layered rendering.
- DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
- "The layer count (%u) of %s used as attachment is greater than 1.",
- attachment->GetLayerCount(), attachment);
+ return {};
+}
- DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
- "The mip level count (%u) of %s used as attachment is greater than 1.",
- attachment->GetLevelCount(), attachment);
+MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
+ uint32_t* sampleCount) {
+ if (*sampleCount == 0) {
+ *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
+ DAWN_ASSERT(*sampleCount != 0);
+ } else {
+ DAWN_INVALID_IF(
+ *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
+ "Color attachment %s sample count (%u) does not match the sample count of the "
+ "other attachments (%u).",
+ colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
+ }
- return {};
- }
+ return {};
+}
- MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
- uint32_t* width,
- uint32_t* height) {
- const Extent3D& attachmentSize =
- attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
-
- if (*width == 0) {
- DAWN_ASSERT(*height == 0);
- *width = attachmentSize.width;
- *height = attachmentSize.height;
- DAWN_ASSERT(*width != 0 && *height != 0);
- } else {
- DAWN_INVALID_IF(
- *width != attachmentSize.width || *height != attachmentSize.height,
- "Attachment %s size (width: %u, height: %u) does not match the size of the "
- "other attachments (width: %u, height: %u).",
- attachment, attachmentSize.width, attachmentSize.height, *width, *height);
- }
+MaybeError ValidateResolveTarget(const DeviceBase* device,
+ const RenderPassColorAttachment& colorAttachment,
+ UsageValidationMode usageValidationMode) {
+ if (colorAttachment.resolveTarget == nullptr) {
+ return {};
+ }
- return {};
- }
+ const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
+ const TextureViewBase* attachment = colorAttachment.view;
+ DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
+ DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
+ wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+ DAWN_INVALID_IF(!attachment->GetTexture()->IsMultisampledTexture(),
+ "Cannot set %s as a resolve target when the color attachment %s has a sample "
+ "count of 1.",
+ resolveTarget, attachment);
+
+ DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
+ "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
+ resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
+
+ DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
+ "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLayerCount());
+
+ DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
+ "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLevelCount());
+
+ const Extent3D& colorTextureSize =
+ attachment->GetTexture()->GetMipLevelSingleSubresourceVirtualSize(
+ attachment->GetBaseMipLevel());
+ const Extent3D& resolveTextureSize =
+ resolveTarget->GetTexture()->GetMipLevelSingleSubresourceVirtualSize(
+ resolveTarget->GetBaseMipLevel());
+ DAWN_INVALID_IF(colorTextureSize.width != resolveTextureSize.width ||
+ colorTextureSize.height != resolveTextureSize.height,
+ "The Resolve target %s size (width: %u, height: %u) does not match the color "
+ "attachment %s size (width: %u, height: %u).",
+ resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
+ colorTextureSize.width, colorTextureSize.height);
+
+ wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+ DAWN_INVALID_IF(
+ resolveTargetFormat != attachment->GetFormat().format,
+ "The resolve target %s format (%s) does not match the color attachment %s format "
+ "(%s).",
+ resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
+ DAWN_INVALID_IF(
+ !resolveTarget->GetFormat().supportsResolveTarget,
+ "The resolve target %s format (%s) does not support being used as resolve target.",
+ resolveTarget, resolveTargetFormat);
+
+ return {};
+}
+
+MaybeError ValidateRenderPassColorAttachment(DeviceBase* device,
+ const RenderPassColorAttachment& colorAttachment,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ TextureViewBase* attachment = colorAttachment.view;
+ if (attachment == nullptr) {
+ return {};
+ }
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
+ usageValidationMode));
+
+ DAWN_INVALID_IF(
+ !(attachment->GetAspects() & Aspect::Color) || !attachment->GetFormat().isRenderable,
+ "The color attachment %s format (%s) is not color renderable.", attachment,
+ attachment->GetFormat().format);
+
+ DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
+ DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
+ DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, "loadOp must be set.");
+ DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined, "storeOp must be set.");
+
+ // TODO(dawn:1269): Remove after the deprecation period.
+ bool useClearColor = HasDeprecatedColor(colorAttachment);
+ const dawn::native::Color& clearValue =
+ useClearColor ? colorAttachment.clearColor : colorAttachment.clearValue;
+ if (useClearColor) {
+ device->EmitDeprecationWarning(
+ "clearColor is deprecated, prefer using clearValue instead.");
+ }
- MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
- uint32_t* sampleCount) {
- if (*sampleCount == 0) {
- *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
- DAWN_ASSERT(*sampleCount != 0);
- } else {
- DAWN_INVALID_IF(
- *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
- "Color attachment %s sample count (%u) does not match the sample count of the "
- "other attachments (%u).",
- colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
- }
+ if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
+ DAWN_INVALID_IF(std::isnan(clearValue.r) || std::isnan(clearValue.g) ||
+ std::isnan(clearValue.b) || std::isnan(clearValue.a),
+ "Color clear value (%s) contain a NaN.", &clearValue);
+ }
- return {};
+ DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
+
+ DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
+
+ DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+ DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+ return {};
+}
+
+MaybeError ValidateRenderPassDepthStencilAttachment(
+ DeviceBase* device,
+ const RenderPassDepthStencilAttachment* depthStencilAttachment,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ DAWN_ASSERT(depthStencilAttachment != nullptr);
+
+ TextureViewBase* attachment = depthStencilAttachment->view;
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
+ usageValidationMode));
+
+ // DS attachments must encompass all aspects of the texture, so we first check that this is
+ // true, which means that in the rest of the function we can assume that the view's format is
+ // the same as the texture's format.
+ const Format& format = attachment->GetTexture()->GetFormat();
+ DAWN_INVALID_IF(
+ attachment->GetAspects() != format.aspects,
+ "The depth stencil attachment %s must encompass all aspects of it's texture's format (%s).",
+ attachment, format.format);
+ ASSERT(attachment->GetFormat().format == format.format);
+
+ DAWN_INVALID_IF(!format.HasDepthOrStencil(),
+ "The depth stencil attachment %s format (%s) is not a depth stencil format.",
+ attachment, format.format);
+
+ DAWN_INVALID_IF(!format.isRenderable,
+ "The depth stencil attachment %s format (%s) is not renderable.", attachment,
+ format.format);
+
+ DAWN_INVALID_IF(
+ attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
+ depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly,
+ "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
+ "is 'all'.",
+ depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
+
+ // Read only, or depth doesn't exist.
+ if (depthStencilAttachment->depthReadOnly ||
+ !IsSubset(Aspect::Depth, attachment->GetAspects())) {
+ if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
+ depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ device->EmitDeprecationWarning(
+ "Setting depthLoadOp and depthStoreOp when "
+ "the attachment has no depth aspect or depthReadOnly is true is "
+ "deprecated.");
+ } else {
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
+ "depthLoadOp (%s) must not be set if the attachment (%s) has "
+ "no depth aspect or depthReadOnly (%u) is true.",
+ depthStencilAttachment->depthLoadOp, attachment,
+ depthStencilAttachment->depthReadOnly);
+ DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
+ "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
+ "aspect or depthReadOnly (%u) is true.",
+ depthStencilAttachment->depthStoreOp, attachment,
+ depthStencilAttachment->depthReadOnly);
}
+ } else {
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Undefined,
+ "depthLoadOp must be set if the attachment (%s) has a depth aspect "
+ "and depthReadOnly (%u) is false.",
+ attachment, depthStencilAttachment->depthReadOnly);
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+ DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Undefined,
+ "depthStoreOp must be set if the attachment (%s) has a depth "
+ "aspect and depthReadOnly (%u) is false.",
+ attachment, depthStencilAttachment->depthReadOnly);
+ }
- MaybeError ValidateResolveTarget(const DeviceBase* device,
- const RenderPassColorAttachment& colorAttachment,
- UsageValidationMode usageValidationMode) {
- if (colorAttachment.resolveTarget == nullptr) {
- return {};
- }
-
- const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
- const TextureViewBase* attachment = colorAttachment.view;
- DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
- DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
- wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
- DAWN_INVALID_IF(
- !attachment->GetTexture()->IsMultisampledTexture(),
- "Cannot set %s as a resolve target when the color attachment %s has a sample "
- "count of 1.",
- resolveTarget, attachment);
-
- DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
- "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
- resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
-
- DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
- "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
- resolveTarget->GetLayerCount());
-
- DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
- "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
- resolveTarget->GetLevelCount());
-
- const Extent3D& colorTextureSize =
- attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
- const Extent3D& resolveTextureSize =
- resolveTarget->GetTexture()->GetMipLevelVirtualSize(
- resolveTarget->GetBaseMipLevel());
- DAWN_INVALID_IF(
- colorTextureSize.width != resolveTextureSize.width ||
- colorTextureSize.height != resolveTextureSize.height,
- "The Resolve target %s size (width: %u, height: %u) does not match the color "
- "attachment %s size (width: %u, height: %u).",
- resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
- colorTextureSize.width, colorTextureSize.height);
-
- wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+ // Read only, or stencil doesn't exist.
+ if (depthStencilAttachment->stencilReadOnly ||
+ !IsSubset(Aspect::Stencil, attachment->GetAspects())) {
+ if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
+ depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ device->EmitDeprecationWarning(
+ "Setting stencilLoadOp and stencilStoreOp when "
+ "the attachment has no stencil aspect or stencilReadOnly is true is "
+ "deprecated.");
+ } else {
DAWN_INVALID_IF(
- resolveTargetFormat != attachment->GetFormat().format,
- "The resolve target %s format (%s) does not match the color attachment %s format "
- "(%s).",
- resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
+ depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
+ "stencilLoadOp (%s) must not be set if the attachment (%s) has no stencil "
+ "aspect or stencilReadOnly (%u) is true.",
+ depthStencilAttachment->stencilLoadOp, attachment,
+ depthStencilAttachment->stencilReadOnly);
DAWN_INVALID_IF(
- !resolveTarget->GetFormat().supportsResolveTarget,
- "The resolve target %s format (%s) does not support being used as resolve target.",
- resolveTarget, resolveTargetFormat);
-
- return {};
+ depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
+ "stencilStoreOp (%s) must not be set if the attachment (%s) has no stencil "
+ "aspect or stencilReadOnly (%u) is true.",
+ depthStencilAttachment->stencilStoreOp, attachment,
+ depthStencilAttachment->stencilReadOnly);
}
+ } else {
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+ DAWN_INVALID_IF(depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
+ "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
+ "aspect and stencilReadOnly (%u) is false.",
+ depthStencilAttachment->stencilLoadOp, attachment,
+ depthStencilAttachment->stencilReadOnly);
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+ DAWN_INVALID_IF(depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
+ "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
+ "aspect and stencilReadOnly (%u) is false.",
+ depthStencilAttachment->stencilStoreOp, attachment,
+ depthStencilAttachment->stencilReadOnly);
+ }
- MaybeError ValidateRenderPassColorAttachment(
- DeviceBase* device,
- const RenderPassColorAttachment& colorAttachment,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount,
- UsageValidationMode usageValidationMode) {
- TextureViewBase* attachment = colorAttachment.view;
- if (attachment == nullptr) {
- return {};
- }
- DAWN_TRY(device->ValidateObject(attachment));
- DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
- wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
- DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
- !attachment->GetFormat().isRenderable,
- "The color attachment %s format (%s) is not color renderable.",
- attachment, attachment->GetFormat().format);
-
- DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
- DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
- DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined,
- "loadOp must be set.");
- DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
- "storeOp must be set.");
-
- // TODO(dawn:1269): Remove after the deprecation period.
- bool useClearColor = HasDeprecatedColor(colorAttachment);
- const dawn::native::Color& clearValue =
- useClearColor ? colorAttachment.clearColor : colorAttachment.clearValue;
- if (useClearColor) {
- device->EmitDeprecationWarning(
- "clearColor is deprecated, prefer using clearValue instead.");
- }
-
- if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
- DAWN_INVALID_IF(std::isnan(clearValue.r) || std::isnan(clearValue.g) ||
- std::isnan(clearValue.b) || std::isnan(clearValue.a),
- "Color clear value (%s) contain a NaN.", &clearValue);
- }
-
- DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
+ if (!std::isnan(depthStencilAttachment->clearDepth)) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ device->EmitDeprecationWarning("clearDepth is deprecated, prefer depthClearValue instead.");
+ } else {
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+ std::isnan(depthStencilAttachment->depthClearValue),
+ "depthClearValue is NaN.");
+ }
- DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
+ // TODO(dawn:1269): Remove after the deprecation period.
+ if (depthStencilAttachment->stencilClearValue == 0 &&
+ depthStencilAttachment->clearStencil != 0) {
+ device->EmitDeprecationWarning(
+ "clearStencil is deprecated, prefer stencilClearValue instead.");
+ }
- DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
- DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+ // *sampleCount == 0 must only happen when there is no color attachment. In that case we
+ // do not need to validate the sample count of the depth stencil attachment.
+ const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
+ if (*sampleCount != 0) {
+ DAWN_INVALID_IF(
+ depthStencilSampleCount != *sampleCount,
+ "The depth stencil attachment %s sample count (%u) does not match the sample "
+ "count of the other attachments (%u).",
+ attachment, depthStencilSampleCount, *sampleCount);
+ } else {
+ *sampleCount = depthStencilSampleCount;
+ }
- return {};
+ DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+ DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+ return {};
+}
+
+MaybeError ValidateTimestampLocationOnRenderPass(
+ wgpu::RenderPassTimestampLocation location,
+ const std::unordered_set<wgpu::RenderPassTimestampLocation>& writtenLocations) {
+ DAWN_TRY(ValidateRenderPassTimestampLocation(location));
+
+ DAWN_INVALID_IF(writtenLocations.find(location) != writtenLocations.end(),
+ "There are two same RenderPassTimestampLocation %u in a render pass.",
+ location);
+
+ return {};
+}
+
+MaybeError ValidateTimestampLocationOnComputePass(
+ wgpu::ComputePassTimestampLocation location,
+ const std::unordered_set<wgpu::ComputePassTimestampLocation>& writtenLocations) {
+ DAWN_TRY(ValidateComputePassTimestampLocation(location));
+
+ DAWN_INVALID_IF(writtenLocations.find(location) != writtenLocations.end(),
+ "There are two same ComputePassTimestampLocation %u in a compute pass.",
+ location);
+
+ return {};
+}
+
+MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ DAWN_INVALID_IF(
+ descriptor->colorAttachmentCount > kMaxColorAttachments,
+ "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
+ descriptor->colorAttachmentCount, kMaxColorAttachments);
+
+ bool isAllColorAttachmentNull = true;
+ for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
+ DAWN_TRY_CONTEXT(
+ ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i], width,
+ height, sampleCount, usageValidationMode),
+ "validating colorAttachments[%u].", i);
+ if (descriptor->colorAttachments[i].view) {
+ isAllColorAttachmentNull = false;
}
+ }
- MaybeError ValidateRenderPassDepthStencilAttachment(
- DeviceBase* device,
- const RenderPassDepthStencilAttachment* depthStencilAttachment,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount,
- UsageValidationMode usageValidationMode) {
- DAWN_ASSERT(depthStencilAttachment != nullptr);
-
- TextureViewBase* attachment = depthStencilAttachment->view;
- DAWN_TRY(device->ValidateObject(attachment));
- DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
- wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
- const Format& format = attachment->GetFormat();
- DAWN_INVALID_IF(
- !format.HasDepthOrStencil(),
- "The depth stencil attachment %s format (%s) is not a depth stencil format.",
- attachment, format.format);
-
- DAWN_INVALID_IF(!format.isRenderable,
- "The depth stencil attachment %s format (%s) is not renderable.",
- attachment, format.format);
-
- DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
- "The depth stencil attachment %s must encompass all aspects.",
- attachment);
+ if (descriptor->depthStencilAttachment != nullptr) {
+ DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
+ device, descriptor->depthStencilAttachment, width, height, sampleCount,
+ usageValidationMode),
+ "validating depthStencilAttachment.");
+ } else {
+ DAWN_INVALID_IF(
+ isAllColorAttachmentNull,
+ "No color or depthStencil attachments specified. At least one is required.");
+ }
- DAWN_INVALID_IF(
- attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
- depthStencilAttachment->depthReadOnly !=
- depthStencilAttachment->stencilReadOnly,
- "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
- "is 'all'.",
- depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
-
- // Read only, or depth doesn't exist.
- if (depthStencilAttachment->depthReadOnly ||
- !IsSubset(Aspect::Depth, attachment->GetAspects())) {
- if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
- depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
- // TODO(dawn:1269): Remove this branch after the deprecation period.
- device->EmitDeprecationWarning(
- "Setting depthLoadOp and depthStoreOp when "
- "the attachment has no depth aspect or depthReadOnly is true is "
- "deprecated.");
- } else {
- DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
- "depthLoadOp (%s) must not be set if the attachment (%s) has "
- "no depth aspect or depthReadOnly (%u) is true.",
- depthStencilAttachment->depthLoadOp, attachment,
- depthStencilAttachment->depthReadOnly);
- DAWN_INVALID_IF(
- depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
- "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
- "aspect or depthReadOnly (%u) is true.",
- depthStencilAttachment->depthStoreOp, attachment,
- depthStencilAttachment->depthReadOnly);
- }
- } else {
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
- DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Undefined,
- "depthLoadOp must be set if the attachment (%s) has a depth aspect "
- "and depthReadOnly (%u) is false.",
- attachment, depthStencilAttachment->depthReadOnly);
- DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
- DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Undefined,
- "depthStoreOp must be set if the attachment (%s) has a depth "
- "aspect and depthReadOnly (%u) is false.",
- attachment, depthStencilAttachment->depthReadOnly);
- }
+ if (descriptor->occlusionQuerySet != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
- // Read only, or stencil doesn't exist.
- if (depthStencilAttachment->stencilReadOnly ||
- !IsSubset(Aspect::Stencil, attachment->GetAspects())) {
- if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
- depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
- // TODO(dawn:1269): Remove this branch after the deprecation period.
- device->EmitDeprecationWarning(
- "Setting stencilLoadOp and stencilStoreOp when "
- "the attachment has no stencil aspect or stencilReadOnly is true is "
- "deprecated.");
- } else {
- DAWN_INVALID_IF(
- depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
- "stencilLoadOp (%s) must not be set if the attachment (%s) has no stencil "
- "aspect or stencilReadOnly (%u) is true.",
- depthStencilAttachment->stencilLoadOp, attachment,
- depthStencilAttachment->stencilReadOnly);
- DAWN_INVALID_IF(
- depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
- "stencilStoreOp (%s) must not be set if the attachment (%s) has no stencil "
- "aspect or stencilReadOnly (%u) is true.",
- depthStencilAttachment->stencilStoreOp, attachment,
- depthStencilAttachment->stencilReadOnly);
- }
- } else {
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
- DAWN_INVALID_IF(
- depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
- "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
- "aspect and stencilReadOnly (%u) is false.",
- depthStencilAttachment->stencilLoadOp, attachment,
- depthStencilAttachment->stencilReadOnly);
- DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
- DAWN_INVALID_IF(
- depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
- "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
- "aspect and stencilReadOnly (%u) is false.",
- depthStencilAttachment->stencilStoreOp, attachment,
- depthStencilAttachment->stencilReadOnly);
- }
+ DAWN_INVALID_IF(descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
+ "The occlusionQuerySet %s type (%s) is not %s.",
+ descriptor->occlusionQuerySet,
+ descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
+ }
- if (!std::isnan(depthStencilAttachment->clearDepth)) {
- // TODO(dawn:1269): Remove this branch after the deprecation period.
- device->EmitDeprecationWarning(
- "clearDepth is deprecated, prefer depthClearValue instead.");
- } else {
- DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
- std::isnan(depthStencilAttachment->depthClearValue),
- "depthClearValue is NaN.");
- }
+ if (descriptor->timestampWriteCount > 0) {
+ DAWN_ASSERT(descriptor->timestampWrites != nullptr);
+
+ // Record the query set and query index used on render passes for validating query
+ // index overwrite. The TrackQueryAvailability of
+ // RenderPassResourceUsageTracker is not used here because the timestampWrites are
+ // not validated and encoded one by one, but encoded together after passing the
+ // validation.
+ QueryAvailabilityMap usedQueries;
+ // TODO(https://crbug.com/dawn/1452):
+ // 1. Add an enum that's TimestampLocationMask and has bit values.
+ // 2. Add a function with a switch that converts from one to the other.
+ // 3. type alias the ityp::bitset for that to call it TimestampLocationSet.
+ // 4. Use it here.
+ std::unordered_set<wgpu::RenderPassTimestampLocation> writtenLocations;
+ for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+ QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+ DAWN_ASSERT(querySet != nullptr);
+ uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+ DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
+ "validating querySet and queryIndex of timestampWrites[%u].", i);
+ DAWN_TRY_CONTEXT(ValidateTimestampLocationOnRenderPass(
+ descriptor->timestampWrites[i].location, writtenLocations),
+ "validating location of timestampWrites[%u].", i);
+ writtenLocations.insert(descriptor->timestampWrites[i].location);
+
+ auto checkIt = usedQueries.find(querySet);
+ DAWN_INVALID_IF(checkIt != usedQueries.end() && checkIt->second[queryIndex],
+ "Query index %u of %s is written to twice in a render pass.",
+ queryIndex, querySet);
+
+ // Gets the iterator for that querySet or create a new vector of bool set to
+ // false if the querySet wasn't registered.
+ auto addIt = usedQueries.emplace(querySet, querySet->GetQueryCount()).first;
+ addIt->second[queryIndex] = true;
+ }
+ }
- // TODO(dawn:1269): Remove after the deprecation period.
- if (depthStencilAttachment->stencilClearValue == 0 &&
- depthStencilAttachment->clearStencil != 0) {
- device->EmitDeprecationWarning(
- "clearStencil is deprecated, prefer stencilClearValue instead.");
- }
+ DAWN_INVALID_IF(
+ descriptor->colorAttachmentCount == 0 && descriptor->depthStencilAttachment == nullptr,
+ "Render pass has no attachments.");
- // *sampleCount == 0 must only happen when there is no color attachment. In that case we
- // do not need to validate the sample count of the depth stencil attachment.
- const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
- if (*sampleCount != 0) {
- DAWN_INVALID_IF(
- depthStencilSampleCount != *sampleCount,
- "The depth stencil attachment %s sample count (%u) does not match the sample "
- "count of the other attachments (%u).",
- attachment, depthStencilSampleCount, *sampleCount);
- } else {
- *sampleCount = depthStencilSampleCount;
- }
+ return {};
+}
- DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
- DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
+ const ComputePassDescriptor* descriptor) {
+ if (descriptor == nullptr) {
+ return {};
+ }
- return {};
+ if (descriptor->timestampWriteCount > 0) {
+ DAWN_ASSERT(descriptor->timestampWrites != nullptr);
+
+ // TODO(https://crbug.com/dawn/1452):
+ // 1. Add an enum that's TimestampLocationMask and has bit values.
+ // 2. Add a function with a switch that converts from one to the other.
+ // 3. type alias the ityp::bitset for that to call it TimestampLocationSet.
+ // 4. Use it here.
+ std::unordered_set<wgpu::ComputePassTimestampLocation> writtenLocations;
+ for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+ DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
+ DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
+ descriptor->timestampWrites[i].queryIndex),
+ "validating querySet and queryIndex of timestampWrites[%u].", i);
+ DAWN_TRY_CONTEXT(ValidateTimestampLocationOnComputePass(
+ descriptor->timestampWrites[i].location, writtenLocations),
+ "validating location of timestampWrites[%u].", i);
+ writtenLocations.insert(descriptor->timestampWrites[i].location);
}
+ }
- MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount,
- UsageValidationMode usageValidationMode) {
- DAWN_INVALID_IF(
- descriptor->colorAttachmentCount > kMaxColorAttachments,
- "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
- descriptor->colorAttachmentCount, kMaxColorAttachments);
-
- bool isAllColorAttachmentNull = true;
- for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
- device, descriptor->colorAttachments[i], width, height,
- sampleCount, usageValidationMode),
- "validating colorAttachments[%u].", i);
- if (descriptor->colorAttachments[i].view) {
- isAllColorAttachmentNull = false;
- }
- }
+ return {};
+}
+
+MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ const BufferBase* destination,
+ uint64_t destinationOffset) {
+ DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
+ "First query (%u) exceeds the number of queries (%u) in %s.", firstQuery,
+ querySet->GetQueryCount(), querySet);
+
+ DAWN_INVALID_IF(
+ queryCount > querySet->GetQueryCount() - firstQuery,
+ "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
+ "(%u) in %s.",
+ firstQuery, queryCount, querySet->GetQueryCount(), querySet);
+
+ DAWN_INVALID_IF(destinationOffset % 256 != 0,
+ "The destination buffer %s offset (%u) is not a multiple of 256.", destination,
+ destinationOffset);
+
+ uint64_t bufferSize = destination->GetSize();
+ // The destination buffer must have enough storage, from destination offset, to contain
+ // the result of resolved queries
+ bool fitsInBuffer =
+ destinationOffset <= bufferSize &&
+ (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= (bufferSize - destinationOffset));
+ DAWN_INVALID_IF(
+ !fitsInBuffer,
+ "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
+ querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, bufferSize,
+ destinationOffset);
+
+ return {};
+}
+
+MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
+ QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
+ DeviceBase* device = encoder->GetDevice();
+
+ // The availability got from query set is a reference to vector<bool>, need to covert
+ // bool to uint32_t due to a user input in pipeline must not contain a bool type in
+ // WGSL.
+ std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
+ querySet->GetQueryAvailability().end()};
+
+ // Timestamp availability storage buffer
+ BufferDescriptor availabilityDesc = {};
+ availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+ availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
+ Ref<BufferBase> availabilityBuffer;
+ DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
+
+ DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
+ availability.size() * sizeof(uint32_t)));
+
+ // Timestamp params uniform buffer
+ TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
+ device->GetTimestampPeriodInNS());
+
+ BufferDescriptor parmsDesc = {};
+ parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+ parmsDesc.size = sizeof(params);
+ Ref<BufferBase> paramsBuffer;
+ DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
+
+ DAWN_TRY(device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
+
+ return EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
+ paramsBuffer.Get());
+}
+
+bool IsReadOnlyDepthStencilAttachment(
+ const RenderPassDepthStencilAttachment* depthStencilAttachment) {
+ DAWN_ASSERT(depthStencilAttachment != nullptr);
+ Aspect aspects = depthStencilAttachment->view->GetAspects();
+ DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
+
+ if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
+ return false;
+ }
+ if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::DawnEncoderInternalUsageDescriptor));
+
+ const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ DAWN_INVALID_IF(internalUsageDesc != nullptr &&
+ !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
+ "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
+ return {};
+}
+
+// static
+Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor) {
+ return AcquireRef(new CommandEncoder(device, descriptor));
+}
+
+// static
+CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
+ return new CommandEncoder(device, ObjectBase::kError);
+}
+
+CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
+ TrackInDevice();
+
+ const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
+ mUsageValidationMode = UsageValidationMode::Internal;
+ } else {
+ mUsageValidationMode = UsageValidationMode::Default;
+ }
+}
+
+CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag),
+ mEncodingContext(device, this),
+ mUsageValidationMode(UsageValidationMode::Default) {
+ mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
+}
+
+ObjectType CommandEncoder::GetType() const {
+ return ObjectType::CommandEncoder;
+}
+
+void CommandEncoder::DestroyImpl() {
+ mEncodingContext.Destroy();
+}
+
+CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
+ return CommandBufferResourceUsage{
+ mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
+ std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
+}
+
+CommandIterator CommandEncoder::AcquireCommands() {
+ return mEncodingContext.AcquireCommands();
+}
+
+void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
+ mUsedQuerySets.insert(querySet);
+}
+
+void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+ DAWN_ASSERT(querySet != nullptr);
+
+ if (GetDevice()->IsValidationEnabled()) {
+ TrackUsedQuerySet(querySet);
+ }
- if (descriptor->depthStencilAttachment != nullptr) {
- DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
- device, descriptor->depthStencilAttachment, width, height,
- sampleCount, usageValidationMode),
- "validating depthStencilAttachment.");
- } else {
- DAWN_INVALID_IF(
- isAllColorAttachmentNull,
- "No color or depthStencil attachments specified. At least one is required.");
- }
+ // Set the query at queryIndex to available for resolving in query set.
+ querySet->SetQueryAvailability(queryIndex, true);
+}
- if (descriptor->occlusionQuerySet != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
+// Implementation of the API's command recording methods
- DAWN_INVALID_IF(
- descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
- "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
- descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
- }
+ComputePassEncoder* CommandEncoder::APIBeginComputePass(const ComputePassDescriptor* descriptor) {
+ return BeginComputePass(descriptor).Detach();
+}
- if (descriptor->timestampWriteCount > 0) {
- DAWN_ASSERT(descriptor->timestampWrites != nullptr);
-
- // Record the query set and query index used on render passes for validating query
- // index overwrite. The TrackQueryAvailability of
- // RenderPassResourceUsageTracker is not used here because the timestampWrites are
- // not validated and encoded one by one, but encoded together after passing the
- // validation.
- QueryAvailabilityMap usedQueries;
- for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
- QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
- DAWN_ASSERT(querySet != nullptr);
- uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
- DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
- "validating querySet and queryIndex of timestampWrites[%u].",
- i);
- DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation(
- descriptor->timestampWrites[i].location),
- "validating location of timestampWrites[%u].", i);
-
- auto checkIt = usedQueries.find(querySet);
- DAWN_INVALID_IF(checkIt != usedQueries.end() && checkIt->second[queryIndex],
- "Query index %u of %s is written to twice in a render pass.",
- queryIndex, querySet);
-
- // Gets the iterator for that querySet or create a new vector of bool set to
- // false if the querySet wasn't registered.
- auto addIt = usedQueries.emplace(querySet, querySet->GetQueryCount()).first;
- addIt->second[queryIndex] = true;
- }
- }
+Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
- DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
- descriptor->depthStencilAttachment == nullptr,
- "Render pass has no attachments.");
+ std::vector<TimestampWrite> timestampWritesAtBeginning;
+ std::vector<TimestampWrite> timestampWritesAtEnd;
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
- return {};
- }
+ BeginComputePassCmd* cmd =
+ allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
- MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
- const ComputePassDescriptor* descriptor) {
if (descriptor == nullptr) {
return {};
}
- if (descriptor->timestampWriteCount > 0) {
- DAWN_ASSERT(descriptor->timestampWrites != nullptr);
-
- for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
- DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
- DAWN_TRY_CONTEXT(
- ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
- descriptor->timestampWrites[i].queryIndex),
- "validating querySet and queryIndex of timestampWrites[%u].", i);
- DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation(
- descriptor->timestampWrites[i].location),
- "validating location of timestampWrites[%u].", i);
- }
- }
+ // Split the timestampWrites used in BeginComputePassCmd and EndComputePassCmd
+ for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+ QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+ uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
- return {};
- }
+ switch (descriptor->timestampWrites[i].location) {
+ case wgpu::ComputePassTimestampLocation::Beginning:
+ timestampWritesAtBeginning.push_back({querySet, queryIndex});
+ break;
+ case wgpu::ComputePassTimestampLocation::End:
+ timestampWritesAtEnd.push_back({querySet, queryIndex});
+ break;
+ default:
+ break;
+ }
- MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- const BufferBase* destination,
- uint64_t destinationOffset) {
- DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
- "First query (%u) exceeds the number of queries (%u) in %s.",
- firstQuery, querySet->GetQueryCount(), querySet);
+ TrackQueryAvailability(querySet, queryIndex);
+ }
- DAWN_INVALID_IF(
- queryCount > querySet->GetQueryCount() - firstQuery,
- "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
- "(%u) in %s.",
- firstQuery, queryCount, querySet->GetQueryCount(), querySet);
-
- DAWN_INVALID_IF(destinationOffset % 256 != 0,
- "The destination buffer %s offset (%u) is not a multiple of 256.",
- destination, destinationOffset);
-
- uint64_t bufferSize = destination->GetSize();
- // The destination buffer must have enough storage, from destination offset, to contain
- // the result of resolved queries
- bool fitsInBuffer = destinationOffset <= bufferSize &&
- (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
- (bufferSize - destinationOffset));
- DAWN_INVALID_IF(
- !fitsInBuffer,
- "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
- querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
- bufferSize, destinationOffset);
+ cmd->timestampWrites = std::move(timestampWritesAtBeginning);
return {};
- }
+ },
+ "encoding %s.BeginComputePass(%s).", this, descriptor);
- MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
- QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
- DeviceBase* device = encoder->GetDevice();
-
- // The availability got from query set is a reference to vector<bool>, need to covert
- // bool to uint32_t due to a user input in pipeline must not contain a bool type in
- // WGSL.
- std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
- querySet->GetQueryAvailability().end()};
-
- // Timestamp availability storage buffer
- BufferDescriptor availabilityDesc = {};
- availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
- availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
- Ref<BufferBase> availabilityBuffer;
- DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
-
- DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
- availability.data(),
- availability.size() * sizeof(uint32_t)));
-
- // Timestamp params uniform buffer
- TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
- device->GetTimestampPeriodInNS());
-
- BufferDescriptor parmsDesc = {};
- parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
- parmsDesc.size = sizeof(params);
- Ref<BufferBase> paramsBuffer;
- DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
-
- DAWN_TRY(
- device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
-
- return EncodeConvertTimestampsToNanoseconds(
- encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
- }
-
- bool IsReadOnlyDepthStencilAttachment(
- const RenderPassDepthStencilAttachment* depthStencilAttachment) {
- DAWN_ASSERT(depthStencilAttachment != nullptr);
- Aspect aspects = depthStencilAttachment->view->GetAspects();
- DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
-
- if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
- return false;
- }
- if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
- return false;
- }
- return true;
+ if (success) {
+ const ComputePassDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
}
- } // namespace
-
- MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
- const CommandEncoderDescriptor* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::DawnEncoderInternalUsageDescriptor));
-
- const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
- DAWN_INVALID_IF(internalUsageDesc != nullptr &&
- !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
- "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
- return {};
+ Ref<ComputePassEncoder> passEncoder = ComputePassEncoder::Create(
+ device, descriptor, this, &mEncodingContext, std::move(timestampWritesAtEnd));
+ mEncodingContext.EnterPass(passEncoder.Get());
+ return passEncoder;
}
- // static
- Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
- const CommandEncoderDescriptor* descriptor) {
- return AcquireRef(new CommandEncoder(device, descriptor));
- }
-
- // static
- CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
- return new CommandEncoder(device, ObjectBase::kError);
- }
-
- CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
- TrackInDevice();
-
- const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
- if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
- mUsageValidationMode = UsageValidationMode::Internal;
- } else {
- mUsageValidationMode = UsageValidationMode::Default;
- }
- }
+ return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
+}
- CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag),
- mEncodingContext(device, this),
- mUsageValidationMode(UsageValidationMode::Default) {
- mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
- }
+RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
+ return BeginRenderPass(descriptor).Detach();
+}
- ObjectType CommandEncoder::GetType() const {
- return ObjectType::CommandEncoder;
- }
+Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
- void CommandEncoder::DestroyImpl() {
- mEncodingContext.Destroy();
- }
+ RenderPassResourceUsageTracker usageTracker;
- CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
- return CommandBufferResourceUsage{
- mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
- std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
- }
+ uint32_t width = 0;
+ uint32_t height = 0;
+ bool depthReadOnly = false;
+ bool stencilReadOnly = false;
+ Ref<AttachmentState> attachmentState;
+ std::vector<TimestampWrite> timestampWritesAtBeginning;
+ std::vector<TimestampWrite> timestampWritesAtEnd;
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ uint32_t sampleCount = 0;
- CommandIterator CommandEncoder::AcquireCommands() {
- return mEncodingContext.AcquireCommands();
- }
+ DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, &sampleCount,
+ mUsageValidationMode));
- void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
- mUsedQuerySets.insert(querySet);
- }
+ ASSERT(width > 0 && height > 0 && sampleCount > 0);
- void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
- DAWN_ASSERT(querySet != nullptr);
+ mEncodingContext.WillBeginRenderPass();
+ BeginRenderPassCmd* cmd =
+ allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
- if (GetDevice()->IsValidationEnabled()) {
- TrackUsedQuerySet(querySet);
- }
+ cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
+ attachmentState = cmd->attachmentState;
- // Set the query at queryIndex to available for resolving in query set.
- querySet->SetQueryAvailability(queryIndex, true);
- }
+ // Split the timestampWrites used in BeginRenderPassCmd and EndRenderPassCmd
+ for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+ QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+ uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
- // Implementation of the API's command recording methods
+ switch (descriptor->timestampWrites[i].location) {
+ case wgpu::RenderPassTimestampLocation::Beginning:
+ timestampWritesAtBeginning.push_back({querySet, queryIndex});
+ break;
+ case wgpu::RenderPassTimestampLocation::End:
+ timestampWritesAtEnd.push_back({querySet, queryIndex});
+ break;
+ default:
+ break;
+ }
- ComputePassEncoder* CommandEncoder::APIBeginComputePass(
- const ComputePassDescriptor* descriptor) {
- return BeginComputePass(descriptor).Detach();
- }
+ TrackQueryAvailability(querySet, queryIndex);
+ // Track the query availability with true on render pass again for rewrite
+ // validation and query reset on Vulkan
+ usageTracker.TrackQueryAvailability(querySet, queryIndex);
+ }
- Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(
- const ComputePassDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
+ for (ColorAttachmentIndex index :
+ IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(index);
+ TextureViewBase* view = descriptor->colorAttachments[i].view;
+ TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
- std::vector<TimestampWrite> timestampWritesAtBeginning;
- std::vector<TimestampWrite> timestampWritesAtEnd;
- bool success = mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
+ cmd->colorAttachments[index].view = view;
+ cmd->colorAttachments[index].resolveTarget = resolveTarget;
+ cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
+ cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
- BeginComputePassCmd* cmd =
- allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
+ cmd->colorAttachments[index].clearColor =
+ HasDeprecatedColor(descriptor->colorAttachments[i])
+ ? descriptor->colorAttachments[i].clearColor
+ : descriptor->colorAttachments[i].clearValue;
- if (descriptor == nullptr) {
- return {};
- }
+ usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
- // Split the timestampWrites used in BeginComputePassCmd and EndComputePassCmd
- for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
- QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
- uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
-
- switch (descriptor->timestampWrites[i].location) {
- case wgpu::ComputePassTimestampLocation::Beginning:
- timestampWritesAtBeginning.push_back({querySet, queryIndex});
- break;
- case wgpu::ComputePassTimestampLocation::End:
- timestampWritesAtEnd.push_back({querySet, queryIndex});
- break;
- default:
- break;
- }
-
- TrackQueryAvailability(querySet, queryIndex);
+ if (resolveTarget != nullptr) {
+ usageTracker.TextureViewUsedAs(resolveTarget,
+ wgpu::TextureUsage::RenderAttachment);
}
-
- cmd->timestampWrites = std::move(timestampWritesAtBeginning);
-
- return {};
- },
- "encoding %s.BeginComputePass(%s).", this, descriptor);
-
- if (success) {
- const ComputePassDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
}
- Ref<ComputePassEncoder> passEncoder = ComputePassEncoder::Create(
- device, descriptor, this, &mEncodingContext, std::move(timestampWritesAtEnd));
- mEncodingContext.EnterPass(passEncoder.Get());
- return passEncoder;
- }
+ if (cmd->attachmentState->HasDepthStencilAttachment()) {
+ TextureViewBase* view = descriptor->depthStencilAttachment->view;
- return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
- }
+ cmd->depthStencilAttachment.view = view;
- RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
- return BeginRenderPass(descriptor).Detach();
- }
-
- Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- RenderPassResourceUsageTracker usageTracker;
-
- uint32_t width = 0;
- uint32_t height = 0;
- bool depthReadOnly = false;
- bool stencilReadOnly = false;
- Ref<AttachmentState> attachmentState;
- std::vector<TimestampWrite> timestampWritesAtBeginning;
- std::vector<TimestampWrite> timestampWritesAtEnd;
- bool success = mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- uint32_t sampleCount = 0;
-
- DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
- &sampleCount, mUsageValidationMode));
-
- ASSERT(width > 0 && height > 0 && sampleCount > 0);
-
- mEncodingContext.WillBeginRenderPass();
- BeginRenderPassCmd* cmd =
- allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
-
- cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
- attachmentState = cmd->attachmentState;
-
- // Split the timestampWrites used in BeginRenderPassCmd and EndRenderPassCmd
- for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
- QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
- uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
-
- switch (descriptor->timestampWrites[i].location) {
- case wgpu::RenderPassTimestampLocation::Beginning:
- timestampWritesAtBeginning.push_back({querySet, queryIndex});
- break;
- case wgpu::RenderPassTimestampLocation::End:
- timestampWritesAtEnd.push_back({querySet, queryIndex});
- break;
- default:
- break;
- }
-
- TrackQueryAvailability(querySet, queryIndex);
- // Track the query availability with true on render pass again for rewrite
- // validation and query reset on Vulkan
- usageTracker.TrackQueryAvailability(querySet, queryIndex);
+ if (!std::isnan(descriptor->depthStencilAttachment->clearDepth)) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ cmd->depthStencilAttachment.clearDepth =
+ descriptor->depthStencilAttachment->clearDepth;
+ } else {
+ cmd->depthStencilAttachment.clearDepth =
+ descriptor->depthStencilAttachment->depthClearValue;
}
- for (ColorAttachmentIndex index :
- IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(index);
- TextureViewBase* view = descriptor->colorAttachments[i].view;
- TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
-
- cmd->colorAttachments[index].view = view;
- cmd->colorAttachments[index].resolveTarget = resolveTarget;
- cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
- cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
-
- cmd->colorAttachments[index].clearColor =
- HasDeprecatedColor(descriptor->colorAttachments[i])
- ? descriptor->colorAttachments[i].clearColor
- : descriptor->colorAttachments[i].clearValue;
-
- usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
-
- if (resolveTarget != nullptr) {
- usageTracker.TextureViewUsedAs(resolveTarget,
- wgpu::TextureUsage::RenderAttachment);
- }
+ if (descriptor->depthStencilAttachment->stencilClearValue == 0 &&
+ descriptor->depthStencilAttachment->clearStencil != 0) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ cmd->depthStencilAttachment.clearStencil =
+ descriptor->depthStencilAttachment->clearStencil;
+ } else {
+ cmd->depthStencilAttachment.clearStencil =
+ descriptor->depthStencilAttachment->stencilClearValue;
}
-
- if (cmd->attachmentState->HasDepthStencilAttachment()) {
- TextureViewBase* view = descriptor->depthStencilAttachment->view;
-
- cmd->depthStencilAttachment.view = view;
-
- if (!std::isnan(descriptor->depthStencilAttachment->clearDepth)) {
- // TODO(dawn:1269): Remove this branch after the deprecation period.
- cmd->depthStencilAttachment.clearDepth =
- descriptor->depthStencilAttachment->clearDepth;
- } else {
- cmd->depthStencilAttachment.clearDepth =
- descriptor->depthStencilAttachment->depthClearValue;
- }
-
- if (descriptor->depthStencilAttachment->stencilClearValue == 0 &&
- descriptor->depthStencilAttachment->clearStencil != 0) {
- // TODO(dawn:1269): Remove this branch after the deprecation period.
- cmd->depthStencilAttachment.clearStencil =
- descriptor->depthStencilAttachment->clearStencil;
- } else {
- cmd->depthStencilAttachment.clearStencil =
- descriptor->depthStencilAttachment->stencilClearValue;
- }
-
- cmd->depthStencilAttachment.depthReadOnly =
- descriptor->depthStencilAttachment->depthReadOnly;
- cmd->depthStencilAttachment.stencilReadOnly =
- descriptor->depthStencilAttachment->stencilReadOnly;
-
- if (descriptor->depthStencilAttachment->depthReadOnly ||
- !IsSubset(Aspect::Depth,
- descriptor->depthStencilAttachment->view->GetAspects())) {
- cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
- cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
- } else {
- cmd->depthStencilAttachment.depthLoadOp =
- descriptor->depthStencilAttachment->depthLoadOp;
- cmd->depthStencilAttachment.depthStoreOp =
- descriptor->depthStencilAttachment->depthStoreOp;
- }
-
- if (descriptor->depthStencilAttachment->stencilReadOnly ||
- !IsSubset(Aspect::Stencil,
- descriptor->depthStencilAttachment->view->GetAspects())) {
- cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
- cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
- } else {
- cmd->depthStencilAttachment.stencilLoadOp =
- descriptor->depthStencilAttachment->stencilLoadOp;
- cmd->depthStencilAttachment.stencilStoreOp =
- descriptor->depthStencilAttachment->stencilStoreOp;
- }
-
- if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
- usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
- } else {
- usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
- }
-
- depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
- stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
+ if (view->GetFormat().HasStencil()) {
+ // GPURenderPassDepthStencilAttachment.stencilClearValue will be converted to
+ // the type of the stencil aspect of view by taking the same number of LSBs as
+ // the number of bits in the stencil aspect of one texel block of view.
+ ASSERT(view->GetFormat()
+ .GetAspectInfo(dawn::native::Aspect::Stencil)
+ .block.byteSize == 1u);
+ cmd->depthStencilAttachment.clearStencil &= 0xFF;
}
- cmd->width = width;
- cmd->height = height;
-
- cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
-
- cmd->timestampWrites = std::move(timestampWritesAtBeginning);
+ cmd->depthStencilAttachment.depthReadOnly =
+ descriptor->depthStencilAttachment->depthReadOnly;
+ cmd->depthStencilAttachment.stencilReadOnly =
+ descriptor->depthStencilAttachment->stencilReadOnly;
- return {};
- },
- "encoding %s.BeginRenderPass(%s).", this, descriptor);
-
- if (success) {
- Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
- device, descriptor, this, &mEncodingContext, std::move(usageTracker),
- std::move(attachmentState), std::move(timestampWritesAtEnd), width, height,
- depthReadOnly, stencilReadOnly);
- mEncodingContext.EnterPass(passEncoder.Get());
- return passEncoder;
- }
+ if (descriptor->depthStencilAttachment->depthReadOnly ||
+ !IsSubset(Aspect::Depth,
+ descriptor->depthStencilAttachment->view->GetAspects())) {
+ cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
+ cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+ } else {
+ cmd->depthStencilAttachment.depthLoadOp =
+ descriptor->depthStencilAttachment->depthLoadOp;
+ cmd->depthStencilAttachment.depthStoreOp =
+ descriptor->depthStencilAttachment->depthStoreOp;
+ }
- return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
- }
+ if (descriptor->depthStencilAttachment->stencilReadOnly ||
+ !IsSubset(Aspect::Stencil,
+ descriptor->depthStencilAttachment->view->GetAspects())) {
+ cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
+ cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
+ } else {
+ cmd->depthStencilAttachment.stencilLoadOp =
+ descriptor->depthStencilAttachment->stencilLoadOp;
+ cmd->depthStencilAttachment.stencilStoreOp =
+ descriptor->depthStencilAttachment->stencilStoreOp;
+ }
- void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
-
- DAWN_INVALID_IF(source == destination,
- "Source and destination are the same buffer (%s).", source);
-
- DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
- "validating source %s copy size.", source);
- DAWN_TRY_CONTEXT(
- ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
- "validating destination %s copy size.", destination);
- DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
-
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
- "validating source %s usage.", source);
- DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
- "validating destination %s usage.", destination);
-
- mTopLevelBuffers.insert(source);
- mTopLevelBuffers.insert(destination);
+ if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
+ usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
+ } else {
+ usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
}
- CopyBufferToBufferCmd* copy =
- allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
- copy->source = source;
- copy->sourceOffset = sourceOffset;
- copy->destination = destination;
- copy->destinationOffset = destinationOffset;
- copy->size = size;
+ depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
+ stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
+ }
- return {};
- },
- "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
- destination, destinationOffset, size);
- }
+ cmd->width = width;
+ cmd->height = height;
- void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
- "validating source %s usage.", source->buffer);
-
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
- DAWN_TRY_CONTEXT(
- ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
- mUsageValidationMode),
- "validating destination %s usage.", destination->texture);
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
-
- DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- source->layout, blockInfo,
- destination->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
- blockInfo, *copySize));
-
- mTopLevelBuffers.insert(source->buffer);
- mTopLevelTextures.insert(destination->texture);
- }
+ cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
- TextureDataLayout srcLayout = source->layout;
- ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
-
- CopyBufferToTextureCmd* copy =
- allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
- copy->source.buffer = source->buffer;
- copy->source.offset = srcLayout.offset;
- copy->source.bytesPerRow = srcLayout.bytesPerRow;
- copy->source.rowsPerImage = srcLayout.rowsPerImage;
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ cmd->timestampWrites = std::move(timestampWritesAtBeginning);
- return {};
- },
- "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
- destination->texture, copySize);
+ return {};
+ },
+ "encoding %s.BeginRenderPass(%s).", this, descriptor);
+
+ if (success) {
+ Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
+ device, descriptor, this, &mEncodingContext, std::move(usageTracker),
+ std::move(attachmentState), std::move(timestampWritesAtEnd), width, height,
+ depthReadOnly, stencilReadOnly);
+ mEncodingContext.EnterPass(passEncoder.Get());
+ return passEncoder;
}
- void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
- const ImageCopyBuffer* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
- mUsageValidationMode),
- "validating source %s usage.", source->texture);
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
- DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
-
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
- DAWN_TRY_CONTEXT(
- ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
- "validating destination %s usage.", destination->buffer);
-
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- source->texture->GetFormat().GetAspectInfo(source->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- destination->layout, blockInfo,
- source->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(
- destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
-
- mTopLevelTextures.insert(source->texture);
- mTopLevelBuffers.insert(destination->buffer);
- }
+ return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
+}
+
+void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_INVALID_IF(source == destination,
+ "Source and destination are the same buffer (%s).", source);
+
+ DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
+ "validating source %s copy size.", source);
+ DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
+ "validating destination %s copy size.", destination);
+ DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
+
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source);
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination);
+
+ mTopLevelBuffers.insert(source);
+ mTopLevelBuffers.insert(destination);
+ }
- TextureDataLayout dstLayout = destination->layout;
- ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
-
- CopyTextureToBufferCmd* copy =
- allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.buffer = destination->buffer;
- copy->destination.offset = dstLayout.offset;
- copy->destination.bytesPerRow = dstLayout.bytesPerRow;
- copy->destination.rowsPerImage = dstLayout.rowsPerImage;
- copy->copySize = *copySize;
+ CopyBufferToBufferCmd* copy =
+ allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+ copy->source = source;
+ copy->sourceOffset = sourceOffset;
+ copy->destination = destination;
+ copy->destinationOffset = destinationOffset;
+ copy->size = size;
- return {};
- },
- "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
- destination->buffer, copySize);
- }
+ return {};
+ },
+ "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
+ destination, destinationOffset, size);
+}
+
+void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source->buffer);
+
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ mUsageValidationMode),
+ "validating destination %s usage.", destination->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
+
+ DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ source->layout, blockInfo,
+ destination->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
+ blockInfo, *copySize));
+
+ mTopLevelBuffers.insert(source->buffer);
+ mTopLevelTextures.insert(destination->texture);
+ }
- void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- APICopyTextureToTextureHelper<false>(source, destination, copySize);
- }
+ TextureDataLayout srcLayout = source->layout;
+ ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+
+ CopyBufferToTextureCmd* copy =
+ allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
+ copy->source.buffer = source->buffer;
+ copy->source.offset = srcLayout.offset;
+ copy->source.bytesPerRow = srcLayout.bytesPerRow;
+ copy->source.rowsPerImage = srcLayout.rowsPerImage;
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
- void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- APICopyTextureToTextureHelper<true>(source, destination, copySize);
- }
+ return {};
+ },
+ "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, destination->texture,
+ copySize);
+}
+
+void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ mUsageValidationMode),
+ "validating source %s usage.", source->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
+ DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
+
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination->buffer);
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ source->texture->GetFormat().GetAspectInfo(source->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ destination->layout, blockInfo,
+ source->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(
+ destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
+
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelBuffers.insert(destination->buffer);
+ }
- template <bool Internal>
- void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source->texture));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
-
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
- "validating source %s.", source->texture);
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
- "validating destination %s.", destination->texture);
-
- DAWN_TRY(
- ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
-
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
- "validating source %s copy range.", source->texture);
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
- "validating source %s copy range.", destination->texture);
-
- // For internal usages (CopyToCopyInternal) we don't care if the user has added
- // CopySrc as a usage for this texture, but we will always add it internally.
- if (Internal) {
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
- UsageValidationMode::Internal));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
- UsageValidationMode::Internal));
- } else {
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
- mUsageValidationMode));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
- mUsageValidationMode));
- }
+ TextureDataLayout dstLayout = destination->layout;
+ ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+
+ CopyTextureToBufferCmd* copy =
+ allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.buffer = destination->buffer;
+ copy->destination.offset = dstLayout.offset;
+ copy->destination.bytesPerRow = dstLayout.bytesPerRow;
+ copy->destination.rowsPerImage = dstLayout.rowsPerImage;
+ copy->copySize = *copySize;
- mTopLevelTextures.insert(source->texture);
- mTopLevelTextures.insert(destination->texture);
+ return {};
+ },
+ "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, destination->buffer,
+ copySize);
+}
+
+void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ APICopyTextureToTextureHelper<false>(source, destination, copySize);
+}
+
+void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ APICopyTextureToTextureHelper<true>(source, destination, copySize);
+}
+
+template <bool Internal>
+void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source->texture));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
+ "validating source %s.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
+ "validating destination %s.", destination->texture);
+
+ DAWN_TRY(
+ ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
+ "validating source %s copy range.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
+ "validating source %s copy range.", destination->texture);
+
+ // For internal usages (CopyToCopyInternal) we don't care if the user has added
+ // CopySrc as a usage for this texture, but we will always add it internally.
+ if (Internal) {
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ UsageValidationMode::Internal));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ UsageValidationMode::Internal));
+ } else {
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ mUsageValidationMode));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ mUsageValidationMode));
}
- CopyTextureToTextureCmd* copy =
- allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelTextures.insert(destination->texture);
+ }
- return {};
- },
- "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
- destination->texture, copySize);
- }
+ CopyTextureToTextureCmd* copy =
+ allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
- void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) of %s.",
- offset, size, bufferSize, buffer);
- }
-
- DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
- "validating buffer %s usage.", buffer);
-
- // Size must be a multiple of 4 bytes on macOS.
- DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
- size);
-
- // Offset must be multiples of 4 bytes on macOS.
- DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
- offset);
-
- mTopLevelBuffers.insert(buffer);
+ return {};
+ },
+ "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
+ destination->texture, copySize);
+}
+
+void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Buffer offset (%u) is larger than the size (%u) of %s.", offset,
+ bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
} else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
+ DAWN_INVALID_IF(size > remainingSize,
+ "Buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) of %s.",
+ offset, size, bufferSize, buffer);
}
- ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
- cmd->buffer = buffer;
- cmd->offset = offset;
- cmd->size = size;
-
- return {};
- },
- "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
- }
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
+ "validating buffer %s usage.", buffer);
- void CommandEncoder::APIInjectValidationError(const char* message) {
- if (mEncodingContext.CheckCurrentEncoder(this)) {
- mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
- }
- }
+ // Size must be a multiple of 4 bytes on macOS.
+ DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
+ size);
- void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
+ // Offset must be multiples of 4 bytes on macOS.
+ DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
+ offset);
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
+ mTopLevelBuffers.insert(buffer);
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
+ }
+ }
- return {};
- },
- "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
- }
+ ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
+ cmd->buffer = buffer;
+ cmd->offset = offset;
+ cmd->size = size;
- void CommandEncoder::APIPopDebugGroup() {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_INVALID_IF(
- mDebugGroupStackSize == 0,
- "PopDebugGroup called when no debug groups are currently pushed.");
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
- mEncodingContext.PopDebugGroupLabel();
+ return {};
+ },
+ "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
+}
- return {};
- },
- "encoding %s.PopDebugGroup().", this);
+void CommandEncoder::APIInjectValidationError(const char* message) {
+ if (mEncodingContext.CheckCurrentEncoder(this)) {
+ mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
}
+}
- void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
+void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
- mDebugGroupStackSize++;
- mEncodingContext.PushDebugGroupLabel(groupLabel);
+ return {};
+ },
+ "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+}
+
+void CommandEncoder::APIPopDebugGroup() {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_INVALID_IF(mDebugGroupStackSize == 0,
+ "PopDebugGroup called when no debug groups are currently pushed.");
+ }
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext.PopDebugGroupLabel();
- return {};
- },
- "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
- }
+ return {};
+ },
+ "encoding %s.PopDebugGroup().", this);
+}
- void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
-
- DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
- destinationOffset));
-
- DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
-
- TrackUsedQuerySet(querySet);
- mTopLevelBuffers.insert(destination);
- }
+void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
- ResolveQuerySetCmd* cmd =
- allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
- cmd->querySet = querySet;
- cmd->firstQuery = firstQuery;
- cmd->queryCount = queryCount;
- cmd->destination = destination;
- cmd->destinationOffset = destinationOffset;
-
- // Encode internal compute pipeline for timestamp query
- if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
- !GetDevice()->IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
- DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
- this, querySet, firstQuery, queryCount, destination, destinationOffset));
- }
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
- return {};
- },
- "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
- queryCount, destination, destinationOffset);
- }
+ mDebugGroupStackSize++;
+ mEncodingContext.PushDebugGroupLabel(groupLabel);
- void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
- }
+ return {};
+ },
+ "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+}
+
+void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+ destinationOffset));
+
+ DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+
+ TrackUsedQuerySet(querySet);
+ mTopLevelBuffers.insert(destination);
+ }
- WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
- cmd->buffer = buffer;
- cmd->offset = bufferOffset;
- cmd->size = size;
+ ResolveQuerySetCmd* cmd =
+ allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+ cmd->querySet = querySet;
+ cmd->firstQuery = firstQuery;
+ cmd->queryCount = queryCount;
+ cmd->destination = destination;
+ cmd->destinationOffset = destinationOffset;
+
+ // Encode internal compute pipeline for timestamp query
+ if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
+ !GetDevice()->IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+ DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+ this, querySet, firstQuery, queryCount, destination, destinationOffset));
+ }
- uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
- memcpy(inlinedData, data, size);
+ return {};
+ },
+ "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, queryCount,
+ destination, destinationOffset);
+}
+
+void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ }
- mTopLevelBuffers.insert(buffer);
+ WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
+ cmd->buffer = buffer;
+ cmd->offset = bufferOffset;
+ cmd->size = size;
- return {};
- },
- "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
- }
+ uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
+ memcpy(inlinedData, data, size);
- void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
- }
+ mTopLevelBuffers.insert(buffer);
- TrackQueryAvailability(querySet, queryIndex);
+ return {};
+ },
+ "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
+}
+
+void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+ }
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ TrackQueryAvailability(querySet, queryIndex);
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
- }
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
- Ref<CommandBufferBase> commandBuffer;
- if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
- return CommandBufferBase::MakeError(GetDevice());
- }
- ASSERT(!IsError());
- return commandBuffer.Detach();
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
+
+CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
+ Ref<CommandBufferBase> commandBuffer;
+ if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
+ return CommandBufferBase::MakeError(GetDevice());
+ }
+ ASSERT(!IsError());
+ return commandBuffer.Detach();
+}
+
+ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
+ const CommandBufferDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+ // state of the encoding context. The internal state is set to finished, and subsequent
+ // calls to encode commands will generate errors.
+ DAWN_TRY(mEncodingContext.Finish());
+ DAWN_TRY(device->ValidateIsAlive());
+
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY(ValidateFinish());
}
- ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
- const CommandBufferDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
+ const CommandBufferDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
+ }
- // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
- // state of the encoding context. The internal state is set to finished, and subsequent
- // calls to encode commands will generate errors.
- DAWN_TRY(mEncodingContext.Finish());
- DAWN_TRY(device->ValidateIsAlive());
+ return device->CreateCommandBuffer(this, descriptor);
+}
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateFinish());
- }
-
- const CommandBufferDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
- }
+// Implementation of the command buffer validation that can be precomputed before submit
+MaybeError CommandEncoder::ValidateFinish() const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- return device->CreateCommandBuffer(this, descriptor);
+ for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
+ "validating render pass usage.");
}
- // Implementation of the command buffer validation that can be precomputed before submit
- MaybeError CommandEncoder::ValidateFinish() const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
- DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
- "validating render pass usage.");
- }
-
- for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
- for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
- DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
- "validating compute pass usage.");
- }
+ for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
+ for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
+ "validating compute pass usage.");
}
+ }
- DAWN_INVALID_IF(
- mDebugGroupStackSize != 0,
- "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
- "calling Finish.",
- mDebugGroupStackSize);
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize != 0,
+ "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
+ "calling Finish.",
+ mDebugGroupStackSize);
- return {};
- }
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h
index 81263c05391..79e6e96f348 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_COMMANDENCODER_H_
#define SRC_DAWN_NATIVE_COMMANDENCODER_H_
+#include <set>
+#include <string>
+
#include "dawn/native/dawn_platform.h"
#include "dawn/native/EncodingContext.h"
@@ -22,100 +25,98 @@
#include "dawn/native/ObjectBase.h"
#include "dawn/native/PassResourceUsage.h"
-#include <string>
-
namespace dawn::native {
- enum class UsageValidationMode;
-
- MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
- const CommandEncoderDescriptor* descriptor);
-
- class CommandEncoder final : public ApiObjectBase {
- public:
- static Ref<CommandEncoder> Create(DeviceBase* device,
- const CommandEncoderDescriptor* descriptor);
- static CommandEncoder* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- CommandIterator AcquireCommands();
- CommandBufferResourceUsage AcquireResourceUsages();
-
- void TrackUsedQuerySet(QuerySetBase* querySet);
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
-
- // Dawn API
- ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
- RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
-
- void APICopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
- void APICopyBufferToTexture(const ImageCopyBuffer* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APICopyTextureToBuffer(const ImageCopyTexture* source,
- const ImageCopyBuffer* destination,
- const Extent3D* copySize);
- void APICopyTextureToTexture(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
-
- void APIInjectValidationError(const char* message);
- void APIInsertDebugMarker(const char* groupLabel);
- void APIPopDebugGroup();
- void APIPushDebugGroup(const char* groupLabel);
-
- void APIResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset);
- void APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size);
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
-
- Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
- Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
- ResultOrError<Ref<CommandBufferBase>> Finish(
- const CommandBufferDescriptor* descriptor = nullptr);
-
- private:
- CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
- CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- void DestroyImpl() override;
-
- // Helper to be able to implement both APICopyTextureToTexture and
- // APICopyTextureToTextureInternal. The only difference between both
- // copies, is that the Internal one will also check internal usage.
- template <bool Internal>
- void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
-
- MaybeError ValidateFinish() const;
-
- EncodingContext mEncodingContext;
- std::set<BufferBase*> mTopLevelBuffers;
- std::set<TextureBase*> mTopLevelTextures;
- std::set<QuerySetBase*> mUsedQuerySets;
-
- uint64_t mDebugGroupStackSize = 0;
-
- UsageValidationMode mUsageValidationMode;
- };
+enum class UsageValidationMode;
+
+MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor);
+
+class CommandEncoder final : public ApiObjectBase {
+ public:
+ static Ref<CommandEncoder> Create(DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor);
+ static CommandEncoder* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ CommandIterator AcquireCommands();
+ CommandBufferResourceUsage AcquireResourceUsages();
+
+ void TrackUsedQuerySet(QuerySetBase* querySet);
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+ // Dawn API
+ ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
+ RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
+
+ void APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+ void APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
+
+ void APIInjectValidationError(const char* message);
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
+
+ void APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset);
+ void APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size);
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
+
+ Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
+ Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
+ ResultOrError<Ref<CommandBufferBase>> Finish(
+ const CommandBufferDescriptor* descriptor = nullptr);
+
+ private:
+ CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+ CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ void DestroyImpl() override;
+
+ // Helper to be able to implement both APICopyTextureToTexture and
+ // APICopyTextureToTextureInternal. The only difference between both
+ // copies, is that the Internal one will also check internal usage.
+ template <bool Internal>
+ void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+
+ MaybeError ValidateFinish() const;
+
+ EncodingContext mEncodingContext;
+ std::set<BufferBase*> mTopLevelBuffers;
+ std::set<TextureBase*> mTopLevelTextures;
+ std::set<QuerySetBase*> mUsedQuerySets;
+
+ uint64_t mDebugGroupStackSize = 0;
+
+ UsageValidationMode mUsageValidationMode;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp
index 44fbdf85824..723f658d1fa 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/CommandValidation.h"
+#include <algorithm>
+#include <limits>
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/native/BindGroup.h"
#include "dawn/native/Buffer.h"
@@ -28,469 +32,454 @@
namespace dawn::native {
- // Performs validation of the "synchronization scope" rules of WebGPU.
- MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
- // Buffers can only be used as single-write or multiple read.
- for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
- const wgpu::BufferUsage usage = scope.bufferUsages[i];
- bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
- bool singleUse = wgpu::HasZeroOrOneBits(usage);
+// Performs validation of the "synchronization scope" rules of WebGPU.
+MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
+ // Buffers can only be used as single-write or multiple read.
+ for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
+ const wgpu::BufferUsage usage = scope.bufferUsages[i];
+ bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
+ bool singleUse = wgpu::HasZeroOrOneBits(usage);
- DAWN_INVALID_IF(!readOnly && !singleUse,
- "%s usage (%s) includes writable usage and another usage in the same "
- "synchronization scope.",
- scope.buffers[i], usage);
- }
-
- // Check that every single subresource is used as either a single-write usage or a
- // combination of readonly usages.
- for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
- const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
- MaybeError error = {};
- textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
- bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
- bool singleUse = wgpu::HasZeroOrOneBits(usage);
- if (!readOnly && !singleUse && !error.IsError()) {
- error = DAWN_FORMAT_VALIDATION_ERROR(
+ DAWN_INVALID_IF(!readOnly && !singleUse,
"%s usage (%s) includes writable usage and another usage in the same "
"synchronization scope.",
- scope.textures[i], usage);
- }
- });
- DAWN_TRY(std::move(error));
- }
- return {};
+ scope.buffers[i], usage);
}
- MaybeError ValidateTimestampQuery(const DeviceBase* device,
- const QuerySetBase* querySet,
- uint32_t queryIndex) {
- DAWN_TRY(device->ValidateObject(querySet));
-
- DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
- "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
-
- DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
- "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
- querySet->GetQueryCount(), querySet);
-
- return {};
+ // Check that every single subresource is used as either a single-write usage or a
+ // combination of readonly usages.
+ for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
+ const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
+ MaybeError error = {};
+ textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
+ bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
+ bool singleUse = wgpu::HasZeroOrOneBits(usage);
+ if (!readOnly && !singleUse && !error.IsError()) {
+ error = DAWN_FORMAT_VALIDATION_ERROR(
+ "%s usage (%s) includes writable usage and another usage in the same "
+ "synchronization scope.",
+ scope.textures[i], usage);
+ }
+ });
+ DAWN_TRY(std::move(error));
}
-
- MaybeError ValidateWriteBuffer(const DeviceBase* device,
- const BufferBase* buffer,
- uint64_t bufferOffset,
- uint64_t size) {
- DAWN_TRY(device->ValidateObject(buffer));
-
- DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
- bufferOffset);
-
- DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
- "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
- bufferOffset, size, buffer, bufferSize);
-
- DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst),
- "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(),
- wgpu::BufferUsage::CopyDst);
-
- return {};
+ return {};
+}
+
+MaybeError ValidateTimestampQuery(const DeviceBase* device,
+ const QuerySetBase* querySet,
+ uint32_t queryIndex) {
+ DAWN_TRY(device->ValidateObject(querySet));
+
+ DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
+ "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
+
+ DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
+ querySet->GetQueryCount(), querySet);
+
+ return {};
+}
+
+MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size) {
+ DAWN_TRY(device->ValidateObject(buffer));
+
+ DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
+ bufferOffset);
+
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
+ "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
+ bufferOffset, size, buffer, bufferSize);
+
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
+
+ return {};
+}
+
+bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
+ uint32_t maxStart = std::max(startA, startB);
+ uint32_t minStart = std::min(startA, startB);
+ return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
+ static_cast<uint64_t>(maxStart);
+}
+
+ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ ASSERT(copySize.width % blockInfo.width == 0);
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+ uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
+
+ if (copySize.depthOrArrayLayers == 0) {
+ return 0;
}
- bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
- uint32_t maxStart = std::max(startA, startB);
- uint32_t minStart = std::min(startA, startB);
- return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
- static_cast<uint64_t>(maxStart);
+ // Check for potential overflows for the rest of the computations. We have the following
+ // inequalities:
+ //
+ // bytesInLastRow <= bytesPerRow
+ // heightInBlocks <= rowsPerImage
+ //
+ // So:
+ //
+ // bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
+ // <= bytesPerRow * heightInBlocks
+ // <= bytesPerRow * rowsPerImage
+ // <= bytesPerImage
+ //
+ // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
+ // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
+ ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
+ rowsPerImage != wgpu::kCopyStrideUndefined));
+ uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+ DAWN_INVALID_IF(
+ bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
+ bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ copySize.depthOrArrayLayers);
+
+ uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+ if (heightInBlocks > 0) {
+ ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
+ uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
+ requiredBytesInCopy += bytesInLastImage;
}
+ return requiredBytesInCopy;
+}
+
+MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size) {
+ uint64_t bufferSize = buffer->GetSize();
+ bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
+ DAWN_INVALID_IF(!fitsInBuffer,
+ "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
+ buffer.Get(), bufferSize);
+
+ return {};
+}
+
+// Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
+// it.
+void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent) {
+ ASSERT(layout != nullptr);
+ ASSERT(copyExtent.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+ if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
+ ASSERT(copyExtent.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+ uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
- template <typename A, typename B>
- DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
- static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
- static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
- return uint64_t(a) * uint64_t(b);
+ ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
+ layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
}
-
- ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
- const Extent3D& copySize,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- ASSERT(copySize.width % blockInfo.width == 0);
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
- uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
-
- if (copySize.depthOrArrayLayers == 0) {
- return 0;
- }
-
- // Check for potential overflows for the rest of the computations. We have the following
- // inequalities:
- //
- // bytesInLastRow <= bytesPerRow
- // heightInBlocks <= rowsPerImage
- //
- // So:
- //
- // bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
- // <= bytesPerRow * heightInBlocks
- // <= bytesPerRow * rowsPerImage
- // <= bytesPerImage
- //
- // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
- // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
- ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
- rowsPerImage != wgpu::kCopyStrideUndefined));
- uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
- DAWN_INVALID_IF(
- bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
- "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
- bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
- copySize.depthOrArrayLayers);
-
- uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
- if (heightInBlocks > 0) {
- ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
- uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
- requiredBytesInCopy += bytesInLastImage;
- }
- return requiredBytesInCopy;
+ if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
+ ASSERT(copyExtent.depthOrArrayLayers <= 1);
+ layout->rowsPerImage = heightInBlocks;
+ }
+}
+
+MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent) {
+ ASSERT(copyExtent.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+ // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
+ // validation message. Investigate ways to make it print as a more readable symbol.
+ DAWN_INVALID_IF(
+ copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+ layout.rowsPerImage == wgpu::kCopyStrideUndefined),
+ "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
+ copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
+
+ DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
+ "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
+ heightInBlocks);
+
+ // Validation for other members in layout:
+ ASSERT(copyExtent.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+ ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
+ uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+ // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
+ // but they should get optimized out.
+ DAWN_INVALID_IF(
+ layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
+ "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
+ layout.bytesPerRow);
+
+ DAWN_INVALID_IF(
+ layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
+ "The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
+ layout.rowsPerImage);
+
+ // We compute required bytes in copy after validating texel block alignments
+ // because the divisibility conditions are necessary for the algorithm to be valid,
+ // also the bytesPerRow bound is necessary to avoid overflows.
+ uint64_t requiredBytesInCopy;
+ DAWN_TRY_ASSIGN(
+ requiredBytesInCopy,
+ ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
+
+ bool fitsInData =
+ layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+ DAWN_INVALID_IF(
+ !fitsInData,
+ "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
+ "offset (%u).",
+ requiredBytesInCopy, byteSize, layout.offset);
+
+ return {};
+}
+
+MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer) {
+ DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
+ if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
+ DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
+ "bytesPerRow (%u) is not a multiple of %u.",
+ imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
}
- MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size) {
- uint64_t bufferSize = buffer->GetSize();
- bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
- DAWN_INVALID_IF(!fitsInBuffer,
- "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
- size, buffer.Get(), bufferSize);
+ return {};
+}
- return {};
- }
+MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
+ const Extent3D& copySize) {
+ const TextureBase* texture = textureCopy.texture;
+ DAWN_TRY(device->ValidateObject(texture));
- // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
- // it.
- void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent) {
- ASSERT(layout != nullptr);
- ASSERT(copyExtent.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
-
- if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
- ASSERT(copyExtent.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
- uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
-
- ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
- layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
- }
- if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
- ASSERT(copyExtent.depthOrArrayLayers <= 1);
- layout->rowsPerImage = heightInBlocks;
- }
- }
+ DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
+ "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
+ textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
- MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
- uint64_t byteSize,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent) {
- ASSERT(copyExtent.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+ DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
+ DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
+ "%s format (%s) does not have the selected aspect (%s).", texture,
+ texture->GetFormat().format, textureCopy.aspect);
- // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
- // validation message. Investigate ways to make it print as a more readable symbol.
+ if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
+ Extent3D subresourceSize =
+ texture->GetMipLevelSingleSubresourcePhysicalSize(textureCopy.mipLevel);
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
DAWN_INVALID_IF(
- copyExtent.depthOrArrayLayers > 1 &&
- (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
- layout.rowsPerImage == wgpu::kCopyStrideUndefined),
- "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
- copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
+ textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
+ subresourceSize.width != copySize.width ||
+ subresourceSize.height != copySize.height,
+ "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
+ "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
+ "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
+ &textureCopy.origin, &copySize, &subresourceSize, texture, texture->GetFormat().format,
+ texture->GetSampleCount());
+ }
- DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
- "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
- heightInBlocks);
+ return {};
+}
- // Validation for other members in layout:
- ASSERT(copyExtent.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
- ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
- std::numeric_limits<uint32_t>::max());
- uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
+ const Extent3D& copySize) {
+ const TextureBase* texture = textureCopy.texture;
- // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
- // but they should get optimized out.
+ // Validation for the copy being in-bounds:
+ Extent3D mipSize = texture->GetMipLevelSingleSubresourcePhysicalSize(textureCopy.mipLevel);
+ // For 1D/2D textures, include the array layer as depth so it can be checked with other
+ // dimensions.
+ if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
+ mipSize.depthOrArrayLayers = texture->GetArrayLayers();
+ }
+ // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
+ // overflows.
+ DAWN_INVALID_IF(
+ static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+ static_cast<uint64_t>(mipSize.width) ||
+ static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
+ static_cast<uint64_t>(mipSize.height) ||
+ static_cast<uint64_t>(textureCopy.origin.z) +
+ static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+ static_cast<uint64_t>(mipSize.depthOrArrayLayers),
+ "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
+ "size (%s).",
+ &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
+
+ // Validation for the texel block alignments:
+ const Format& format = textureCopy.texture->GetFormat();
+ if (format.isCompressed) {
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
DAWN_INVALID_IF(
- layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
- "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
- layout.bytesPerRow);
-
- DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
- heightInBlocks > layout.rowsPerImage,
- "The height of each image in blocks (%u) is > rowsPerImage (%u).",
- heightInBlocks, layout.rowsPerImage);
-
- // We compute required bytes in copy after validating texel block alignments
- // because the divisibility conditions are necessary for the algorithm to be valid,
- // also the bytesPerRow bound is necessary to avoid overflows.
- uint64_t requiredBytesInCopy;
- DAWN_TRY_ASSIGN(requiredBytesInCopy,
- ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
- layout.rowsPerImage));
-
- bool fitsInData =
- layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+ textureCopy.origin.x % blockInfo.width != 0,
+ "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
+ "width (%u).",
+ textureCopy.origin.x, blockInfo.width);
DAWN_INVALID_IF(
- !fitsInData,
- "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
- "offset (%u).",
- requiredBytesInCopy, byteSize, layout.offset);
-
- return {};
- }
-
- MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
- const ImageCopyBuffer& imageCopyBuffer) {
- DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
- if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
- DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
- "bytesPerRow (%u) is not a multiple of %u.",
- imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
- }
-
- return {};
+ textureCopy.origin.y % blockInfo.height != 0,
+ "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ textureCopy.origin.y, blockInfo.height);
+ DAWN_INVALID_IF(
+ copySize.width % blockInfo.width != 0,
+ "copySize.width (%u) is not a multiple of compressed texture format block width "
+ "(%u).",
+ copySize.width, blockInfo.width);
+ DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
+ "copySize.height (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ copySize.height, blockInfo.height);
}
- MaybeError ValidateImageCopyTexture(DeviceBase const* device,
- const ImageCopyTexture& textureCopy,
- const Extent3D& copySize) {
- const TextureBase* texture = textureCopy.texture;
- DAWN_TRY(device->ValidateObject(texture));
-
- DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
- "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
- textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
+ return {};
+}
- DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
- DAWN_INVALID_IF(
- SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
- "%s format (%s) does not have the selected aspect (%s).", texture,
- texture->GetFormat().format, textureCopy.aspect);
-
- if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
- Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
+// formats).
+ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
+ const Format& format = view.texture->GetFormat();
+ switch (view.aspect) {
+ case wgpu::TextureAspect::All: {
DAWN_INVALID_IF(
- textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
- subresourceSize.width != copySize.width ||
- subresourceSize.height != copySize.height,
- "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
- "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
- "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
- &textureCopy.origin, &copySize, &subresourceSize, texture,
- texture->GetFormat().format, texture->GetSampleCount());
- }
+ !HasOneBit(format.aspects),
+ "More than a single aspect (%s) is selected for multi-planar format (%s) in "
+ "%s <-> linear data copy.",
+ view.aspect, format.format, view.texture);
- return {};
- }
-
- MaybeError ValidateTextureCopyRange(DeviceBase const* device,
- const ImageCopyTexture& textureCopy,
- const Extent3D& copySize) {
- const TextureBase* texture = textureCopy.texture;
-
- // Validation for the copy being in-bounds:
- Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- // For 1D/2D textures, include the array layer as depth so it can be checked with other
- // dimensions.
- if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
- mipSize.depthOrArrayLayers = texture->GetArrayLayers();
- }
- // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
- // overflows.
- DAWN_INVALID_IF(
- static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
- static_cast<uint64_t>(mipSize.width) ||
- static_cast<uint64_t>(textureCopy.origin.y) +
- static_cast<uint64_t>(copySize.height) >
- static_cast<uint64_t>(mipSize.height) ||
- static_cast<uint64_t>(textureCopy.origin.z) +
- static_cast<uint64_t>(copySize.depthOrArrayLayers) >
- static_cast<uint64_t>(mipSize.depthOrArrayLayers),
- "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
- "size (%s).",
- &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
-
- // Validation for the texel block alignments:
- const Format& format = textureCopy.texture->GetFormat();
- if (format.isCompressed) {
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
- DAWN_INVALID_IF(
- textureCopy.origin.x % blockInfo.width != 0,
- "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
- "width (%u).",
- textureCopy.origin.x, blockInfo.width);
- DAWN_INVALID_IF(
- textureCopy.origin.y % blockInfo.height != 0,
- "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
- "height (%u).",
- textureCopy.origin.y, blockInfo.height);
- DAWN_INVALID_IF(
- copySize.width % blockInfo.width != 0,
- "copySize.width (%u) is not a multiple of compressed texture format block width "
- "(%u).",
- copySize.width, blockInfo.width);
- DAWN_INVALID_IF(
- copySize.height % blockInfo.height != 0,
- "copySize.height (%u) is not a multiple of compressed texture format block "
- "height (%u).",
- copySize.height, blockInfo.height);
+ Aspect single = format.aspects;
+ return single;
}
-
- return {};
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(format.aspects & Aspect::Depth);
+ return Aspect::Depth;
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(format.aspects & Aspect::Stencil);
+ return Aspect::Stencil;
+ case wgpu::TextureAspect::Plane0Only:
+ case wgpu::TextureAspect::Plane1Only:
+ break;
+ }
+ UNREACHABLE();
+}
+
+MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
+ Aspect aspectUsed;
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
+
+ const Format& format = dst.texture->GetFormat();
+ switch (format.format) {
+ case wgpu::TextureFormat::Depth16Unorm:
+ return {};
+ default:
+ DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
+ "Cannot copy into the depth aspect of %s with format %s.", dst.texture,
+ format.format);
+ break;
}
- // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
- // formats).
- ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
- const Format& format = view.texture->GetFormat();
- switch (view.aspect) {
- case wgpu::TextureAspect::All: {
+ return {};
+}
+
+MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ const uint32_t srcSamples = src.texture->GetSampleCount();
+ const uint32_t dstSamples = dst.texture->GetSampleCount();
+
+ DAWN_INVALID_IF(
+ srcSamples != dstSamples,
+ "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
+ src.texture, srcSamples, dst.texture, dstSamples);
+
+ // Metal cannot select a single aspect for texture-to-texture copies.
+ const Format& format = src.texture->GetFormat();
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, src.aspect) != format.aspects,
+ "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
+ src.texture, src.aspect, format.format);
+
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, dst.aspect) != format.aspects,
+ "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
+ "(%s).",
+ dst.texture, dst.aspect, format.format);
+
+ if (src.texture == dst.texture) {
+ switch (src.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
+ return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
+
+ case wgpu::TextureDimension::e2D:
DAWN_INVALID_IF(
- !HasOneBit(format.aspects),
- "More than a single aspect (%s) is selected for multi-planar format (%s) in "
- "%s <-> linear data copy.",
- view.aspect, format.format, view.texture);
-
- Aspect single = format.aspects;
- return single;
- }
- case wgpu::TextureAspect::DepthOnly:
- ASSERT(format.aspects & Aspect::Depth);
- return Aspect::Depth;
- case wgpu::TextureAspect::StencilOnly:
- ASSERT(format.aspects & Aspect::Stencil);
- return Aspect::Stencil;
- case wgpu::TextureAspect::Plane0Only:
- case wgpu::TextureAspect::Plane1Only:
+ src.mipLevel == dst.mipLevel &&
+ IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
+ "Copy source and destination are overlapping layer ranges "
+ "([%u, %u) and [%u, %u)) of %s mip level %u",
+ src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
+ dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
break;
- }
- UNREACHABLE();
- }
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
- Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
-
- const Format& format = dst.texture->GetFormat();
- switch (format.format) {
- case wgpu::TextureFormat::Depth16Unorm:
- return {};
- default:
- DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
- "Cannot copy into the depth aspect of %s with format %s.",
- dst.texture, format.format);
+ case wgpu::TextureDimension::e3D:
+ DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
+ "Copy is from %s mip level %u to itself.", src.texture,
+ src.mipLevel);
break;
}
-
- return {};
}
- MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize) {
- const uint32_t srcSamples = src.texture->GetSampleCount();
- const uint32_t dstSamples = dst.texture->GetSampleCount();
-
- DAWN_INVALID_IF(
- srcSamples != dstSamples,
- "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
- src.texture, srcSamples, dst.texture, dstSamples);
-
- // Metal cannot select a single aspect for texture-to-texture copies.
- const Format& format = src.texture->GetFormat();
- DAWN_INVALID_IF(
- SelectFormatAspects(format, src.aspect) != format.aspects,
- "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
- src.texture, src.aspect, format.format);
-
- DAWN_INVALID_IF(
- SelectFormatAspects(format, dst.aspect) != format.aspects,
- "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
- "(%s).",
- dst.texture, dst.aspect, format.format);
-
- if (src.texture == dst.texture) {
- switch (src.texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
- return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
-
- case wgpu::TextureDimension::e2D:
- DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
- IsRangeOverlapped(src.origin.z, dst.origin.z,
- copySize.depthOrArrayLayers),
- "Copy source and destination are overlapping layer ranges "
- "([%u, %u) and [%u, %u)) of %s mip level %u",
- src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
- dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
- src.texture, src.mipLevel);
- break;
-
- case wgpu::TextureDimension::e3D:
- DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
- "Copy is from %s mip level %u to itself.", src.texture,
- src.mipLevel);
- break;
- }
- }
-
- return {};
+ return {};
+}
+
+MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ // Metal requires texture-to-texture copies happens between texture formats that equal to
+ // each other or only have diff on srgb-ness.
+ DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
+ "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
+ src.texture, src.texture->GetFormat().format, dst.texture,
+ dst.texture->GetFormat().format);
+
+ return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+}
+
+MaybeError ValidateCanUseAs(const TextureBase* texture,
+ wgpu::TextureUsage usage,
+ UsageValidationMode mode) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
+ switch (mode) {
+ case UsageValidationMode::Default:
+ DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
+ texture, texture->GetUsage(), usage);
+ break;
+ case UsageValidationMode::Internal:
+ DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
+ "%s internal usage (%s) doesn't include %s.", texture,
+ texture->GetInternalUsage(), usage);
+ break;
}
- MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize) {
- // Metal requires texture-to-texture copies happens between texture formats that equal to
- // each other or only have diff on srgb-ness.
- DAWN_INVALID_IF(
- !src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
- "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
- src.texture, src.texture->GetFormat().format, dst.texture,
- dst.texture->GetFormat().format);
+ return {};
+}
- return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
- }
-
- MaybeError ValidateCanUseAs(const TextureBase* texture,
- wgpu::TextureUsage usage,
- UsageValidationMode mode) {
- ASSERT(wgpu::HasZeroOrOneBits(usage));
- switch (mode) {
- case UsageValidationMode::Default:
- DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
- texture, texture->GetUsage(), usage);
- break;
- case UsageValidationMode::Internal:
- DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
- "%s internal usage (%s) doesn't include %s.", texture,
- texture->GetInternalUsage(), usage);
- break;
- }
-
- return {};
- }
-
- MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
- ASSERT(wgpu::HasZeroOrOneBits(usage));
- DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
- buffer->GetUsage(), usage);
- return {};
- }
+MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
+ DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
+ buffer, buffer->GetUsageExternalOnly(), usage);
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandValidation.h b/chromium/third_party/dawn/src/dawn/native/CommandValidation.h
index c65e9837c66..ede6b310b4a 100644
--- a/chromium/third_party/dawn/src/dawn/native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn/native/CommandValidation.h
@@ -15,75 +15,82 @@
#ifndef SRC_DAWN_NATIVE_COMMANDVALIDATION_H_
#define SRC_DAWN_NATIVE_COMMANDVALIDATION_H_
+#include <vector>
+
#include "dawn/native/CommandAllocator.h"
#include "dawn/native/Error.h"
#include "dawn/native/Texture.h"
-#include <vector>
-
namespace dawn::native {
- class QuerySetBase;
- struct SyncScopeResourceUsage;
- struct TexelBlockInfo;
-
- MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
-
- MaybeError ValidateTimestampQuery(const DeviceBase* device,
- const QuerySetBase* querySet,
- uint32_t queryIndex);
-
- MaybeError ValidateWriteBuffer(const DeviceBase* device,
- const BufferBase* buffer,
- uint64_t bufferOffset,
- uint64_t size);
-
- ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
- const Extent3D& copySize,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
-
- void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent);
- MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
- uint64_t byteSize,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent);
- MaybeError ValidateTextureCopyRange(DeviceBase const* device,
- const ImageCopyTexture& imageCopyTexture,
- const Extent3D& copySize);
- ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
-
- MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
- const ImageCopyBuffer& imageCopyBuffer);
- MaybeError ValidateImageCopyTexture(DeviceBase const* device,
- const ImageCopyTexture& imageCopyTexture,
- const Extent3D& copySize);
-
- MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size);
-
- bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
-
- MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize);
- MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize);
-
- enum class UsageValidationMode {
- Default,
- Internal,
- };
-
- MaybeError ValidateCanUseAs(const TextureBase* texture,
- wgpu::TextureUsage usage,
- UsageValidationMode mode);
- MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
+class QuerySetBase;
+struct SyncScopeResourceUsage;
+struct TexelBlockInfo;
+
+MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
+
+MaybeError ValidateTimestampQuery(const DeviceBase* device,
+ const QuerySetBase* querySet,
+ uint32_t queryIndex);
+
+MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size);
+
+template <typename A, typename B>
+DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
+ static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
+ static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
+ return uint64_t(a) * uint64_t(b);
+}
+
+ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent);
+MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent);
+MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
+ const Extent3D& copySize);
+ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
+MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
+
+MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer);
+MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
+ const Extent3D& copySize);
+
+MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size);
+
+bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+
+MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
+MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
+
+enum class UsageValidationMode {
+ Default,
+ Internal,
+};
+
+MaybeError ValidateCanUseAs(const TextureBase* texture,
+ wgpu::TextureUsage usage,
+ UsageValidationMode mode);
+MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Commands.cpp b/chromium/third_party/dawn/src/dawn/native/Commands.cpp
index 3337cbd4cf8..6f3d6df4674 100644
--- a/chromium/third_party/dawn/src/dawn/native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Commands.cpp
@@ -25,341 +25,412 @@
namespace dawn::native {
- void FreeCommands(CommandIterator* commands) {
- commands->Reset();
-
- Command type;
- while (commands->NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
- begin->~BeginComputePassCmd();
- break;
- }
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
- begin->~BeginOcclusionQueryCmd();
- break;
- }
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
- begin->~BeginRenderPassCmd();
- break;
- }
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
- copy->~CopyBufferToBufferCmd();
- break;
- }
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
- copy->~CopyBufferToTextureCmd();
- break;
- }
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
- copy->~CopyTextureToBufferCmd();
- break;
- }
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- commands->NextCommand<CopyTextureToTextureCmd>();
- copy->~CopyTextureToTextureCmd();
- break;
- }
- case Command::Dispatch: {
- DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
- dispatch->~DispatchCmd();
- break;
- }
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
- dispatch->~DispatchIndirectCmd();
- break;
- }
- case Command::Draw: {
- DrawCmd* draw = commands->NextCommand<DrawCmd>();
- draw->~DrawCmd();
- break;
- }
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
- draw->~DrawIndexedCmd();
- break;
- }
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
- draw->~DrawIndirectCmd();
- break;
- }
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
- draw->~DrawIndexedIndirectCmd();
- break;
- }
- case Command::EndComputePass: {
- EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
- cmd->~EndComputePassCmd();
- break;
- }
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
- cmd->~EndOcclusionQueryCmd();
- break;
- }
- case Command::EndRenderPass: {
- EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
- cmd->~EndRenderPassCmd();
- break;
- }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
- auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
- for (size_t i = 0; i < cmd->count; ++i) {
- (&bundles[i])->~Ref<RenderBundleBase>();
- }
- cmd->~ExecuteBundlesCmd();
- break;
- }
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
- cmd->~ClearBufferCmd();
- break;
- }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
- commands->NextData<char>(cmd->length + 1);
- cmd->~InsertDebugMarkerCmd();
- break;
- }
- case Command::PopDebugGroup: {
- PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
- cmd->~PopDebugGroupCmd();
- break;
- }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
- commands->NextData<char>(cmd->length + 1);
- cmd->~PushDebugGroupCmd();
- break;
- }
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
- cmd->~ResolveQuerySetCmd();
- break;
- }
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
- cmd->~SetComputePipelineCmd();
- break;
- }
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
- cmd->~SetRenderPipelineCmd();
- break;
- }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
- cmd->~SetStencilReferenceCmd();
- break;
- }
- case Command::SetViewport: {
- SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
- cmd->~SetViewportCmd();
- break;
- }
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
- cmd->~SetScissorRectCmd();
- break;
- }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
- cmd->~SetBlendConstantCmd();
- break;
- }
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
- if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- cmd->~SetBindGroupCmd();
- break;
- }
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
- cmd->~SetIndexBufferCmd();
- break;
- }
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
- cmd->~SetVertexBufferCmd();
- break;
- }
- case Command::WriteBuffer: {
- WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
- commands->NextData<uint8_t>(write->size);
- write->~WriteBufferCmd();
- break;
- }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
- cmd->~WriteTimestampCmd();
- break;
- }
- }
- }
-
- commands->MakeEmptyAsDataWasDestroyed();
- }
+void FreeCommands(CommandIterator* commands) {
+ commands->Reset();
- void SkipCommand(CommandIterator* commands, Command type) {
+ Command type;
+ while (commands->NextCommandId(&type)) {
switch (type) {
- case Command::BeginComputePass:
- commands->NextCommand<BeginComputePassCmd>();
+ case Command::BeginComputePass: {
+ BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
+ begin->~BeginComputePassCmd();
break;
-
- case Command::BeginOcclusionQuery:
- commands->NextCommand<BeginOcclusionQueryCmd>();
+ }
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
+ begin->~BeginOcclusionQueryCmd();
break;
-
- case Command::BeginRenderPass:
- commands->NextCommand<BeginRenderPassCmd>();
+ }
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
+ begin->~BeginRenderPassCmd();
break;
-
- case Command::CopyBufferToBuffer:
- commands->NextCommand<CopyBufferToBufferCmd>();
+ }
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
+ copy->~CopyBufferToBufferCmd();
break;
-
- case Command::CopyBufferToTexture:
- commands->NextCommand<CopyBufferToTextureCmd>();
+ }
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
+ copy->~CopyBufferToTextureCmd();
break;
-
- case Command::CopyTextureToBuffer:
- commands->NextCommand<CopyTextureToBufferCmd>();
+ }
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
+ copy->~CopyTextureToBufferCmd();
break;
-
- case Command::CopyTextureToTexture:
- commands->NextCommand<CopyTextureToTextureCmd>();
+ }
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
+ copy->~CopyTextureToTextureCmd();
break;
-
- case Command::Dispatch:
- commands->NextCommand<DispatchCmd>();
+ }
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
+ dispatch->~DispatchCmd();
break;
-
- case Command::DispatchIndirect:
- commands->NextCommand<DispatchIndirectCmd>();
+ }
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
+ dispatch->~DispatchIndirectCmd();
break;
-
- case Command::Draw:
- commands->NextCommand<DrawCmd>();
+ }
+ case Command::Draw: {
+ DrawCmd* draw = commands->NextCommand<DrawCmd>();
+ draw->~DrawCmd();
break;
-
- case Command::DrawIndexed:
- commands->NextCommand<DrawIndexedCmd>();
+ }
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
+ draw->~DrawIndexedCmd();
break;
-
- case Command::DrawIndirect:
- commands->NextCommand<DrawIndirectCmd>();
+ }
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
+ draw->~DrawIndirectCmd();
break;
-
- case Command::DrawIndexedIndirect:
- commands->NextCommand<DrawIndexedIndirectCmd>();
+ }
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
+ draw->~DrawIndexedIndirectCmd();
break;
-
- case Command::EndComputePass:
- commands->NextCommand<EndComputePassCmd>();
+ }
+ case Command::EndComputePass: {
+ EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
+ cmd->~EndComputePassCmd();
break;
-
- case Command::EndOcclusionQuery:
- commands->NextCommand<EndOcclusionQueryCmd>();
+ }
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
+ cmd->~EndOcclusionQueryCmd();
break;
-
- case Command::EndRenderPass:
- commands->NextCommand<EndRenderPassCmd>();
+ }
+ case Command::EndRenderPass: {
+ EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
+ cmd->~EndRenderPassCmd();
break;
-
+ }
case Command::ExecuteBundles: {
- auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
- commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+ ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+ auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+ for (size_t i = 0; i < cmd->count; ++i) {
+ (&bundles[i])->~Ref<RenderBundleBase>();
+ }
+ cmd->~ExecuteBundlesCmd();
break;
}
-
- case Command::ClearBuffer:
- commands->NextCommand<ClearBufferCmd>();
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
+ cmd->~ClearBufferCmd();
break;
-
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
+ cmd->~InsertDebugMarkerCmd();
break;
}
-
- case Command::PopDebugGroup:
- commands->NextCommand<PopDebugGroupCmd>();
+ case Command::PopDebugGroup: {
+ PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
+ cmd->~PopDebugGroupCmd();
break;
-
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
+ cmd->~PushDebugGroupCmd();
break;
}
-
case Command::ResolveQuerySet: {
- commands->NextCommand<ResolveQuerySetCmd>();
+ ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
+ cmd->~ResolveQuerySetCmd();
break;
}
-
- case Command::SetComputePipeline:
- commands->NextCommand<SetComputePipelineCmd>();
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
+ cmd->~SetComputePipelineCmd();
break;
-
- case Command::SetRenderPipeline:
- commands->NextCommand<SetRenderPipelineCmd>();
+ }
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
+ cmd->~SetRenderPipelineCmd();
break;
-
- case Command::SetStencilReference:
- commands->NextCommand<SetStencilReferenceCmd>();
+ }
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
+ cmd->~SetStencilReferenceCmd();
break;
-
- case Command::SetViewport:
- commands->NextCommand<SetViewportCmd>();
+ }
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
+ cmd->~SetViewportCmd();
break;
-
- case Command::SetScissorRect:
- commands->NextCommand<SetScissorRectCmd>();
+ }
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
+ cmd->~SetScissorRectCmd();
break;
-
- case Command::SetBlendConstant:
- commands->NextCommand<SetBlendConstantCmd>();
+ }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
+ cmd->~SetBlendConstantCmd();
break;
-
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
+ cmd->~SetBindGroupCmd();
break;
}
-
- case Command::SetIndexBuffer:
- commands->NextCommand<SetIndexBufferCmd>();
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
+ cmd->~SetIndexBufferCmd();
break;
-
+ }
case Command::SetVertexBuffer: {
- commands->NextCommand<SetVertexBufferCmd>();
+ SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
+ cmd->~SetVertexBufferCmd();
break;
}
-
- case Command::WriteBuffer:
- commands->NextCommand<WriteBufferCmd>();
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
+ commands->NextData<uint8_t>(write->size);
+ write->~WriteBufferCmd();
break;
-
+ }
case Command::WriteTimestamp: {
- commands->NextCommand<WriteTimestampCmd>();
+ WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
+ cmd->~WriteTimestampCmd();
break;
}
}
}
+ commands->MakeEmptyAsDataWasDestroyed();
+}
+
+void SkipCommand(CommandIterator* commands, Command type) {
+ switch (type) {
+ case Command::BeginComputePass:
+ commands->NextCommand<BeginComputePassCmd>();
+ break;
+
+ case Command::BeginOcclusionQuery:
+ commands->NextCommand<BeginOcclusionQueryCmd>();
+ break;
+
+ case Command::BeginRenderPass:
+ commands->NextCommand<BeginRenderPassCmd>();
+ break;
+
+ case Command::CopyBufferToBuffer:
+ commands->NextCommand<CopyBufferToBufferCmd>();
+ break;
+
+ case Command::CopyBufferToTexture:
+ commands->NextCommand<CopyBufferToTextureCmd>();
+ break;
+
+ case Command::CopyTextureToBuffer:
+ commands->NextCommand<CopyTextureToBufferCmd>();
+ break;
+
+ case Command::CopyTextureToTexture:
+ commands->NextCommand<CopyTextureToTextureCmd>();
+ break;
+
+ case Command::Dispatch:
+ commands->NextCommand<DispatchCmd>();
+ break;
+
+ case Command::DispatchIndirect:
+ commands->NextCommand<DispatchIndirectCmd>();
+ break;
+
+ case Command::Draw:
+ commands->NextCommand<DrawCmd>();
+ break;
+
+ case Command::DrawIndexed:
+ commands->NextCommand<DrawIndexedCmd>();
+ break;
+
+ case Command::DrawIndirect:
+ commands->NextCommand<DrawIndirectCmd>();
+ break;
+
+ case Command::DrawIndexedIndirect:
+ commands->NextCommand<DrawIndexedIndirectCmd>();
+ break;
+
+ case Command::EndComputePass:
+ commands->NextCommand<EndComputePassCmd>();
+ break;
+
+ case Command::EndOcclusionQuery:
+ commands->NextCommand<EndOcclusionQueryCmd>();
+ break;
+
+ case Command::EndRenderPass:
+ commands->NextCommand<EndRenderPassCmd>();
+ break;
+
+ case Command::ExecuteBundles: {
+ auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+ commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+ break;
+ }
+
+ case Command::ClearBuffer:
+ commands->NextCommand<ClearBufferCmd>();
+ break;
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ break;
+ }
+
+ case Command::PopDebugGroup:
+ commands->NextCommand<PopDebugGroupCmd>();
+ break;
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ commands->NextCommand<ResolveQuerySetCmd>();
+ break;
+ }
+
+ case Command::SetComputePipeline:
+ commands->NextCommand<SetComputePipelineCmd>();
+ break;
+
+ case Command::SetRenderPipeline:
+ commands->NextCommand<SetRenderPipelineCmd>();
+ break;
+
+ case Command::SetStencilReference:
+ commands->NextCommand<SetStencilReferenceCmd>();
+ break;
+
+ case Command::SetViewport:
+ commands->NextCommand<SetViewportCmd>();
+ break;
+
+ case Command::SetScissorRect:
+ commands->NextCommand<SetScissorRectCmd>();
+ break;
+
+ case Command::SetBlendConstant:
+ commands->NextCommand<SetBlendConstantCmd>();
+ break;
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+ if (cmd->dynamicOffsetCount > 0) {
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ break;
+ }
+
+ case Command::SetIndexBuffer:
+ commands->NextCommand<SetIndexBufferCmd>();
+ break;
+
+ case Command::SetVertexBuffer: {
+ commands->NextCommand<SetVertexBufferCmd>();
+ break;
+ }
+
+ case Command::WriteBuffer:
+ commands->NextCommand<WriteBufferCmd>();
+ break;
+
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
+ }
+}
+
+TimestampWrite::TimestampWrite(const Ref<QuerySetBase>& set, uint32_t idx)
+ : querySet(set), queryIndex(idx) {}
+TimestampWrite::TimestampWrite(TimestampWrite&&) = default;
+TimestampWrite::~TimestampWrite() = default;
+
+BeginComputePassCmd::BeginComputePassCmd() = default;
+BeginComputePassCmd::~BeginComputePassCmd() = default;
+
+BeginOcclusionQueryCmd::BeginOcclusionQueryCmd() = default;
+BeginOcclusionQueryCmd::~BeginOcclusionQueryCmd() = default;
+
+RenderPassColorAttachmentInfo::RenderPassColorAttachmentInfo() = default;
+RenderPassColorAttachmentInfo::~RenderPassColorAttachmentInfo() = default;
+
+RenderPassDepthStencilAttachmentInfo::RenderPassDepthStencilAttachmentInfo() = default;
+RenderPassDepthStencilAttachmentInfo::~RenderPassDepthStencilAttachmentInfo() = default;
+
+BeginRenderPassCmd::BeginRenderPassCmd() = default;
+BeginRenderPassCmd::~BeginRenderPassCmd() = default;
+
+BufferCopy::BufferCopy() = default;
+BufferCopy::~BufferCopy() = default;
+
+TextureCopy::TextureCopy() = default;
+TextureCopy::TextureCopy(const TextureCopy&) = default;
+TextureCopy::~TextureCopy() = default;
+
+CopyBufferToBufferCmd::CopyBufferToBufferCmd() = default;
+CopyBufferToBufferCmd::~CopyBufferToBufferCmd() = default;
+
+DispatchIndirectCmd::DispatchIndirectCmd() = default;
+DispatchIndirectCmd::~DispatchIndirectCmd() = default;
+
+DrawIndirectCmd::DrawIndirectCmd() = default;
+DrawIndirectCmd::~DrawIndirectCmd() = default;
+
+EndComputePassCmd::EndComputePassCmd() = default;
+EndComputePassCmd::~EndComputePassCmd() = default;
+
+EndOcclusionQueryCmd::EndOcclusionQueryCmd() = default;
+EndOcclusionQueryCmd::~EndOcclusionQueryCmd() = default;
+
+EndRenderPassCmd::EndRenderPassCmd() = default;
+EndRenderPassCmd::~EndRenderPassCmd() = default;
+
+ClearBufferCmd::ClearBufferCmd() = default;
+ClearBufferCmd::~ClearBufferCmd() = default;
+
+ResolveQuerySetCmd::ResolveQuerySetCmd() = default;
+ResolveQuerySetCmd::~ResolveQuerySetCmd() = default;
+
+SetComputePipelineCmd::SetComputePipelineCmd() = default;
+SetComputePipelineCmd::~SetComputePipelineCmd() = default;
+
+SetRenderPipelineCmd::SetRenderPipelineCmd() = default;
+SetRenderPipelineCmd::~SetRenderPipelineCmd() = default;
+
+SetBindGroupCmd::SetBindGroupCmd() = default;
+SetBindGroupCmd::~SetBindGroupCmd() = default;
+
+SetIndexBufferCmd::SetIndexBufferCmd() = default;
+SetIndexBufferCmd::~SetIndexBufferCmd() = default;
+
+SetVertexBufferCmd::SetVertexBufferCmd() = default;
+SetVertexBufferCmd::~SetVertexBufferCmd() = default;
+
+WriteBufferCmd::WriteBufferCmd() = default;
+WriteBufferCmd::~WriteBufferCmd() = default;
+
+WriteTimestampCmd::WriteTimestampCmd() = default;
+WriteTimestampCmd::~WriteTimestampCmd() = default;
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Commands.h b/chromium/third_party/dawn/src/dawn/native/Commands.h
index be465fcf2ce..c7bfa042113 100644
--- a/chromium/third_party/dawn/src/dawn/native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn/native/Commands.h
@@ -15,6 +15,10 @@
#ifndef SRC_DAWN_NATIVE_COMMANDS_H_
#define SRC_DAWN_NATIVE_COMMANDS_H_
+#include <array>
+#include <bitset>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/native/AttachmentState.h"
@@ -23,279 +27,344 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- // Definition of the commands that are present in the CommandIterator given by the
- // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
- // dependencies: Ref<Object> needs Object to be defined.
-
- enum class Command {
- BeginComputePass,
- BeginOcclusionQuery,
- BeginRenderPass,
- ClearBuffer,
- CopyBufferToBuffer,
- CopyBufferToTexture,
- CopyTextureToBuffer,
- CopyTextureToTexture,
- Dispatch,
- DispatchIndirect,
- Draw,
- DrawIndexed,
- DrawIndirect,
- DrawIndexedIndirect,
- EndComputePass,
- EndOcclusionQuery,
- EndRenderPass,
- ExecuteBundles,
- InsertDebugMarker,
- PopDebugGroup,
- PushDebugGroup,
- ResolveQuerySet,
- SetComputePipeline,
- SetRenderPipeline,
- SetStencilReference,
- SetViewport,
- SetScissorRect,
- SetBlendConstant,
- SetBindGroup,
- SetIndexBuffer,
- SetVertexBuffer,
- WriteBuffer,
- WriteTimestamp,
- };
-
- struct TimestampWrite {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- struct BeginComputePassCmd {
- std::vector<TimestampWrite> timestampWrites;
- };
-
- struct BeginOcclusionQueryCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- struct RenderPassColorAttachmentInfo {
- Ref<TextureViewBase> view;
- Ref<TextureViewBase> resolveTarget;
- wgpu::LoadOp loadOp;
- wgpu::StoreOp storeOp;
- dawn::native::Color clearColor;
- };
-
- struct RenderPassDepthStencilAttachmentInfo {
- Ref<TextureViewBase> view;
- wgpu::LoadOp depthLoadOp;
- wgpu::StoreOp depthStoreOp;
- wgpu::LoadOp stencilLoadOp;
- wgpu::StoreOp stencilStoreOp;
- float clearDepth;
- uint32_t clearStencil;
- bool depthReadOnly;
- bool stencilReadOnly;
- };
-
- struct BeginRenderPassCmd {
- Ref<AttachmentState> attachmentState;
- ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
- colorAttachments;
- RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
-
- // Cache the width and height of all attachments for convenience
- uint32_t width;
- uint32_t height;
-
- Ref<QuerySetBase> occlusionQuerySet;
- std::vector<TimestampWrite> timestampWrites;
- };
-
- struct BufferCopy {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint32_t bytesPerRow;
- uint32_t rowsPerImage;
- };
-
- struct TextureCopy {
- Ref<TextureBase> texture;
- uint32_t mipLevel;
- Origin3D origin; // Texels / array layer
- Aspect aspect;
- };
-
- struct CopyBufferToBufferCmd {
- Ref<BufferBase> source;
- uint64_t sourceOffset;
- Ref<BufferBase> destination;
- uint64_t destinationOffset;
- uint64_t size;
- };
-
- struct CopyBufferToTextureCmd {
- BufferCopy source;
- TextureCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct CopyTextureToBufferCmd {
- TextureCopy source;
- BufferCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct CopyTextureToTextureCmd {
- TextureCopy source;
- TextureCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct DispatchCmd {
- uint32_t x;
- uint32_t y;
- uint32_t z;
- };
-
- struct DispatchIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct DrawCmd {
- uint32_t vertexCount;
- uint32_t instanceCount;
- uint32_t firstVertex;
- uint32_t firstInstance;
- };
-
- struct DrawIndexedCmd {
- uint32_t indexCount;
- uint32_t instanceCount;
- uint32_t firstIndex;
- int32_t baseVertex;
- uint32_t firstInstance;
- };
-
- struct DrawIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct DrawIndexedIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct EndComputePassCmd {
- std::vector<TimestampWrite> timestampWrites;
- };
-
- struct EndOcclusionQueryCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- struct EndRenderPassCmd {
- std::vector<TimestampWrite> timestampWrites;
- };
-
- struct ExecuteBundlesCmd {
- uint32_t count;
- };
-
- struct ClearBufferCmd {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct InsertDebugMarkerCmd {
- uint32_t length;
- };
-
- struct PopDebugGroupCmd {};
-
- struct PushDebugGroupCmd {
- uint32_t length;
- };
-
- struct ResolveQuerySetCmd {
- Ref<QuerySetBase> querySet;
- uint32_t firstQuery;
- uint32_t queryCount;
- Ref<BufferBase> destination;
- uint64_t destinationOffset;
- };
-
- struct SetComputePipelineCmd {
- Ref<ComputePipelineBase> pipeline;
- };
-
- struct SetRenderPipelineCmd {
- Ref<RenderPipelineBase> pipeline;
- };
-
- struct SetStencilReferenceCmd {
- uint32_t reference;
- };
-
- struct SetViewportCmd {
- float x, y, width, height, minDepth, maxDepth;
- };
-
- struct SetScissorRectCmd {
- uint32_t x, y, width, height;
- };
-
- struct SetBlendConstantCmd {
- Color color;
- };
-
- struct SetBindGroupCmd {
- BindGroupIndex index;
- Ref<BindGroupBase> group;
- uint32_t dynamicOffsetCount;
- };
-
- struct SetIndexBufferCmd {
- Ref<BufferBase> buffer;
- wgpu::IndexFormat format;
- uint64_t offset;
- uint64_t size;
- };
-
- struct SetVertexBufferCmd {
- VertexBufferSlot slot;
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct WriteBufferCmd {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct WriteTimestampCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- // This needs to be called before the CommandIterator is freed so that the Ref<> present in
- // the commands have a chance to run their destructor and remove internal references.
- class CommandIterator;
- void FreeCommands(CommandIterator* commands);
-
- // Helper function to allow skipping over a command when it is unimplemented, while still
- // consuming the correct amount of data from the command iterator.
- void SkipCommand(CommandIterator* commands, Command type);
+// Definition of the commands that are present in the CommandIterator given by the
+// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
+// dependencies: Ref<Object> needs Object to be defined.
+
+enum class Command {
+ BeginComputePass,
+ BeginOcclusionQuery,
+ BeginRenderPass,
+ ClearBuffer,
+ CopyBufferToBuffer,
+ CopyBufferToTexture,
+ CopyTextureToBuffer,
+ CopyTextureToTexture,
+ Dispatch,
+ DispatchIndirect,
+ Draw,
+ DrawIndexed,
+ DrawIndirect,
+ DrawIndexedIndirect,
+ EndComputePass,
+ EndOcclusionQuery,
+ EndRenderPass,
+ ExecuteBundles,
+ InsertDebugMarker,
+ PopDebugGroup,
+ PushDebugGroup,
+ ResolveQuerySet,
+ SetComputePipeline,
+ SetRenderPipeline,
+ SetStencilReference,
+ SetViewport,
+ SetScissorRect,
+ SetBlendConstant,
+ SetBindGroup,
+ SetIndexBuffer,
+ SetVertexBuffer,
+ WriteBuffer,
+ WriteTimestamp,
+};
+
+struct TimestampWrite {
+ TimestampWrite(const Ref<QuerySetBase>& set, uint32_t idx);
+ TimestampWrite(TimestampWrite&&);
+ ~TimestampWrite();
+
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+};
+
+struct BeginComputePassCmd {
+ BeginComputePassCmd();
+ ~BeginComputePassCmd();
+
+ std::vector<TimestampWrite> timestampWrites;
+};
+
+struct BeginOcclusionQueryCmd {
+ BeginOcclusionQueryCmd();
+ ~BeginOcclusionQueryCmd();
+
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+};
+
+struct RenderPassColorAttachmentInfo {
+ RenderPassColorAttachmentInfo();
+ ~RenderPassColorAttachmentInfo();
+
+ Ref<TextureViewBase> view;
+ Ref<TextureViewBase> resolveTarget;
+ wgpu::LoadOp loadOp;
+ wgpu::StoreOp storeOp;
+ dawn::native::Color clearColor;
+};
+
+struct RenderPassDepthStencilAttachmentInfo {
+ RenderPassDepthStencilAttachmentInfo();
+ ~RenderPassDepthStencilAttachmentInfo();
+
+ Ref<TextureViewBase> view;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
+ wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
+ float clearDepth;
+ uint32_t clearStencil;
+ bool depthReadOnly;
+ bool stencilReadOnly;
+};
+
+struct BeginRenderPassCmd {
+ BeginRenderPassCmd();
+ ~BeginRenderPassCmd();
+
+ Ref<AttachmentState> attachmentState;
+ ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
+ colorAttachments;
+ RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
+
+ // Cache the width and height of all attachments for convenience
+ uint32_t width;
+ uint32_t height;
+
+ Ref<QuerySetBase> occlusionQuerySet;
+ std::vector<TimestampWrite> timestampWrites;
+};
+
+struct BufferCopy {
+ BufferCopy();
+ ~BufferCopy();
+
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint32_t bytesPerRow;
+ uint32_t rowsPerImage;
+};
+
+struct TextureCopy {
+ TextureCopy();
+ TextureCopy(const TextureCopy&);
+ ~TextureCopy();
+
+ Ref<TextureBase> texture;
+ uint32_t mipLevel;
+ Origin3D origin; // Texels / array layer
+ Aspect aspect;
+};
+
+struct CopyBufferToBufferCmd {
+ CopyBufferToBufferCmd();
+ ~CopyBufferToBufferCmd();
+
+ Ref<BufferBase> source;
+ uint64_t sourceOffset;
+ Ref<BufferBase> destination;
+ uint64_t destinationOffset;
+ uint64_t size;
+};
+
+struct CopyBufferToTextureCmd {
+ BufferCopy source;
+ TextureCopy destination;
+ Extent3D copySize; // Texels
+};
+
+struct CopyTextureToBufferCmd {
+ TextureCopy source;
+ BufferCopy destination;
+ Extent3D copySize; // Texels
+};
+
+struct CopyTextureToTextureCmd {
+ TextureCopy source;
+ TextureCopy destination;
+ Extent3D copySize; // Texels
+};
+
+struct DispatchCmd {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+};
+
+struct DispatchIndirectCmd {
+ DispatchIndirectCmd();
+ ~DispatchIndirectCmd();
+
+ Ref<BufferBase> indirectBuffer;
+ uint64_t indirectOffset;
+};
+
+struct DrawCmd {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+};
+
+struct DrawIndexedCmd {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t baseVertex;
+ uint32_t firstInstance;
+};
+
+struct DrawIndirectCmd {
+ DrawIndirectCmd();
+ ~DrawIndirectCmd();
+
+ Ref<BufferBase> indirectBuffer;
+ uint64_t indirectOffset;
+};
+
+struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
+
+struct EndComputePassCmd {
+ EndComputePassCmd();
+ ~EndComputePassCmd();
+
+ std::vector<TimestampWrite> timestampWrites;
+};
+
+struct EndOcclusionQueryCmd {
+ EndOcclusionQueryCmd();
+ ~EndOcclusionQueryCmd();
+
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+};
+
+struct EndRenderPassCmd {
+ EndRenderPassCmd();
+ ~EndRenderPassCmd();
+
+ std::vector<TimestampWrite> timestampWrites;
+};
+
+struct ExecuteBundlesCmd {
+ uint32_t count;
+};
+
+struct ClearBufferCmd {
+ ClearBufferCmd();
+ ~ClearBufferCmd();
+
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct InsertDebugMarkerCmd {
+ uint32_t length;
+};
+
+struct PopDebugGroupCmd {};
+
+struct PushDebugGroupCmd {
+ uint32_t length;
+};
+
+struct ResolveQuerySetCmd {
+ ResolveQuerySetCmd();
+ ~ResolveQuerySetCmd();
+
+ Ref<QuerySetBase> querySet;
+ uint32_t firstQuery;
+ uint32_t queryCount;
+ Ref<BufferBase> destination;
+ uint64_t destinationOffset;
+};
+
+struct SetComputePipelineCmd {
+ SetComputePipelineCmd();
+ ~SetComputePipelineCmd();
+
+ Ref<ComputePipelineBase> pipeline;
+};
+
+struct SetRenderPipelineCmd {
+ SetRenderPipelineCmd();
+ ~SetRenderPipelineCmd();
+
+ Ref<RenderPipelineBase> pipeline;
+};
+
+struct SetStencilReferenceCmd {
+ uint32_t reference;
+};
+
+struct SetViewportCmd {
+ float x, y, width, height, minDepth, maxDepth;
+};
+
+struct SetScissorRectCmd {
+ uint32_t x, y, width, height;
+};
+
+struct SetBlendConstantCmd {
+ Color color;
+};
+
+struct SetBindGroupCmd {
+ SetBindGroupCmd();
+ ~SetBindGroupCmd();
+
+ BindGroupIndex index;
+ Ref<BindGroupBase> group;
+ uint32_t dynamicOffsetCount;
+};
+
+struct SetIndexBufferCmd {
+ SetIndexBufferCmd();
+ ~SetIndexBufferCmd();
+
+ Ref<BufferBase> buffer;
+ wgpu::IndexFormat format;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct SetVertexBufferCmd {
+ SetVertexBufferCmd();
+ ~SetVertexBufferCmd();
+
+ VertexBufferSlot slot;
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct WriteBufferCmd {
+ WriteBufferCmd();
+ ~WriteBufferCmd();
+
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct WriteTimestampCmd {
+ WriteTimestampCmd();
+ ~WriteTimestampCmd();
+
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+};
+
+// This needs to be called before the CommandIterator is freed so that the Ref<> present in
+// the commands have a chance to run their destructor and remove internal references.
+class CommandIterator;
+void FreeCommands(CommandIterator* commands);
+
+// Helper function to allow skipping over a command when it is unimplemented, while still
+// consuming the correct amount of data from the command iterator.
+void SkipCommand(CommandIterator* commands, Command type);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp
index 47c3d0be329..a605b299f8a 100644
--- a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp
@@ -17,185 +17,191 @@
#include "dawn/common/Assert.h"
#include "dawn/native/dawn_platform.h"
-#include <tint/tint.h>
+#include "tint/tint.h"
namespace dawn::native {
- namespace {
+namespace {
- WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
- switch (severity) {
- case tint::diag::Severity::Note:
- return WGPUCompilationMessageType_Info;
- case tint::diag::Severity::Warning:
- return WGPUCompilationMessageType_Warning;
- default:
- return WGPUCompilationMessageType_Error;
- }
- }
-
- } // anonymous namespace
-
- OwnedCompilationMessages::OwnedCompilationMessages() {
- mCompilationInfo.nextInChain = 0;
- mCompilationInfo.messageCount = 0;
- mCompilationInfo.messages = nullptr;
+WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
+ switch (severity) {
+ case tint::diag::Severity::Note:
+ return WGPUCompilationMessageType_Info;
+ case tint::diag::Severity::Warning:
+ return WGPUCompilationMessageType_Warning;
+ default:
+ return WGPUCompilationMessageType_Error;
}
+}
+
+} // anonymous namespace
+
+OwnedCompilationMessages::OwnedCompilationMessages() {
+ mCompilationInfo.nextInChain = 0;
+ mCompilationInfo.messageCount = 0;
+ mCompilationInfo.messages = nullptr;
+}
+
+OwnedCompilationMessages::~OwnedCompilationMessages() = default;
+
+void OwnedCompilationMessages::AddMessageForTesting(std::string message,
+ wgpu::CompilationMessageType type,
+ uint64_t lineNum,
+ uint64_t linePos,
+ uint64_t offset,
+ uint64_t length) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ mMessageStrings.push_back(message);
+ mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
+ linePos, offset, length});
+}
+
+void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ // Tint line and column values are 1-based.
+ uint64_t lineNum = diagnostic.source.range.begin.line;
+ uint64_t linePos = diagnostic.source.range.begin.column;
+ // The offset is 0-based.
+ uint64_t offset = 0;
+ uint64_t length = 0;
+
+ if (lineNum && linePos && diagnostic.source.file) {
+ const auto& lines = diagnostic.source.file->content.lines;
+ size_t i = 0;
+ // To find the offset of the message position, loop through each of the first lineNum-1
+ // lines and add it's length (+1 to account for the line break) to the offset.
+ for (; i < lineNum - 1; ++i) {
+ offset += lines[i].length() + 1;
+ }
- void OwnedCompilationMessages::AddMessageForTesting(std::string message,
- wgpu::CompilationMessageType type,
- uint64_t lineNum,
- uint64_t linePos,
- uint64_t offset,
- uint64_t length) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- mMessageStrings.push_back(message);
- mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
- lineNum, linePos, offset, length});
- }
-
- void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- // Tint line and column values are 1-based.
- uint64_t lineNum = diagnostic.source.range.begin.line;
- uint64_t linePos = diagnostic.source.range.begin.column;
- // The offset is 0-based.
- uint64_t offset = 0;
- uint64_t length = 0;
-
- if (lineNum && linePos && diagnostic.source.file) {
- const auto& lines = diagnostic.source.file->content.lines;
- size_t i = 0;
- // To find the offset of the message position, loop through each of the first lineNum-1
- // lines and add it's length (+1 to account for the line break) to the offset.
- for (; i < lineNum - 1; ++i) {
- offset += lines[i].length() + 1;
- }
-
- // If the end line is on a different line from the beginning line, add the length of the
- // lines in between to the ending offset.
- uint64_t endLineNum = diagnostic.source.range.end.line;
- uint64_t endLinePos = diagnostic.source.range.end.column;
-
- // If the range has a valid start but the end it not specified, clamp it to the start.
- if (endLineNum == 0 || endLinePos == 0) {
- endLineNum = lineNum;
- endLinePos = linePos;
- }
+ // If the end line is on a different line from the beginning line, add the length of the
+ // lines in between to the ending offset.
+ uint64_t endLineNum = diagnostic.source.range.end.line;
+ uint64_t endLinePos = diagnostic.source.range.end.column;
- // Negative ranges aren't allowed
- ASSERT(endLineNum >= lineNum);
+ // If the range has a valid start but the end it not specified, clamp it to the start.
+ if (endLineNum == 0 || endLinePos == 0) {
+ endLineNum = lineNum;
+ endLinePos = linePos;
+ }
- uint64_t endOffset = offset;
- for (; i < endLineNum - 1; ++i) {
- endOffset += lines[i].length() + 1;
- }
+ // Negative ranges aren't allowed
+ ASSERT(endLineNum >= lineNum);
- // Add the line positions to the offset and endOffset to get their final positions
- // within the code string.
- offset += linePos - 1;
- endOffset += endLinePos - 1;
+ uint64_t endOffset = offset;
+ for (; i < endLineNum - 1; ++i) {
+ endOffset += lines[i].length() + 1;
+ }
- // Negative ranges aren't allowed
- ASSERT(endOffset >= offset);
+ // Add the line positions to the offset and endOffset to get their final positions
+ // within the code string.
+ offset += linePos - 1;
+ endOffset += endLinePos - 1;
- // The length of the message is the difference between the starting offset and the
- // ending offset.
- length = endOffset - offset;
- }
+ // Negative ranges aren't allowed
+ ASSERT(endOffset >= offset);
- if (diagnostic.code) {
- mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
- } else {
- mMessageStrings.push_back(diagnostic.message);
- }
+ // The length of the message is the difference between the starting offset and the
+ // ending offset.
+ length = endOffset - offset;
+ }
- mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
- lineNum, linePos, offset, length});
+ if (diagnostic.code) {
+ mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
+ } else {
+ mMessageStrings.push_back(diagnostic.message);
}
- void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
+ mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
+ linePos, offset, length});
+}
- for (const auto& diag : diagnostics) {
- AddMessage(diag);
- }
+void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
- AddFormattedTintMessages(diagnostics);
+ for (const auto& diag : diagnostics) {
+ AddMessage(diag);
}
- void OwnedCompilationMessages::ClearMessages() {
- // Cannot clear messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
+ AddFormattedTintMessages(diagnostics);
+}
- mMessageStrings.clear();
- mMessages.clear();
- }
-
- const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
- mCompilationInfo.messageCount = mMessages.size();
- mCompilationInfo.messages = mMessages.data();
+void OwnedCompilationMessages::ClearMessages() {
+ // Cannot clear messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
- // Ensure every message points at the correct message string. Cannot do this earlier, since
- // vector reallocations may move the pointers around.
- for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
- WGPUCompilationMessage& message = mMessages[i];
- std::string& messageString = mMessageStrings[i];
- message.message = messageString.c_str();
- }
+ mMessageStrings.clear();
+ mMessages.clear();
+}
- return &mCompilationInfo;
- }
+const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
+ mCompilationInfo.messageCount = mMessages.size();
+ mCompilationInfo.messages = mMessages.data();
- const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
- return mFormattedTintMessages;
+ // Ensure every message points at the correct message string. Cannot do this earlier, since
+ // vector reallocations may move the pointers around.
+ for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
+ WGPUCompilationMessage& message = mMessages[i];
+ std::string& messageString = mMessageStrings[i];
+ message.message = messageString.c_str();
}
- void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
- tint::diag::List messageList;
- size_t warningCount = 0;
- size_t errorCount = 0;
- for (auto& diag : diagnostics) {
- switch (diag.severity) {
- case (tint::diag::Severity::Fatal):
- case (tint::diag::Severity::Error):
- case (tint::diag::Severity::InternalCompilerError): {
- errorCount++;
- messageList.add(tint::diag::Diagnostic(diag));
- break;
- }
- case (tint::diag::Severity::Warning): {
- warningCount++;
- messageList.add(tint::diag::Diagnostic(diag));
- break;
- }
- default:
- break;
+ return &mCompilationInfo;
+}
+
+const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
+ return mFormattedTintMessages;
+}
+
+void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
+ tint::diag::List messageList;
+ size_t warningCount = 0;
+ size_t errorCount = 0;
+ for (auto& diag : diagnostics) {
+ switch (diag.severity) {
+ case (tint::diag::Severity::Fatal):
+ case (tint::diag::Severity::Error):
+ case (tint::diag::Severity::InternalCompilerError): {
+ errorCount++;
+ messageList.add(tint::diag::Diagnostic(diag));
+ break;
}
- }
- if (errorCount == 0 && warningCount == 0) {
- return;
- }
- tint::diag::Formatter::Style style;
- style.print_newline_at_end = false;
- std::ostringstream t;
- if (errorCount > 0) {
- t << errorCount << " error(s) ";
- if (warningCount > 0) {
- t << "and ";
+ case (tint::diag::Severity::Warning): {
+ warningCount++;
+ messageList.add(tint::diag::Diagnostic(diag));
+ break;
+ }
+ case (tint::diag::Severity::Note): {
+ messageList.add(tint::diag::Diagnostic(diag));
+ break;
}
+ default:
+ break;
}
+ }
+ if (errorCount == 0 && warningCount == 0) {
+ return;
+ }
+ tint::diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+ std::ostringstream t;
+ if (errorCount > 0) {
+ t << errorCount << " error(s) ";
if (warningCount > 0) {
- t << warningCount << " warning(s) ";
+ t << "and ";
}
- t << "generated while compiling the shader:" << std::endl
- << tint::diag::Formatter{style}.format(messageList);
- mFormattedTintMessages.push_back(t.str());
}
+ if (warningCount > 0) {
+ t << warningCount << " warning(s) ";
+ }
+ t << "generated while compiling the shader:" << std::endl
+ << tint::diag::Formatter{style}.format(messageList);
+ mFormattedTintMessages.push_back(t.str());
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h
index 4697045ddcb..13d30b2ddc1 100644
--- a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h
+++ b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h
@@ -15,47 +15,47 @@
#ifndef SRC_DAWN_NATIVE_COMPILATIONMESSAGES_H_
#define SRC_DAWN_NATIVE_COMPILATIONMESSAGES_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/dawn_platform.h"
#include "dawn/common/NonCopyable.h"
-#include <string>
-#include <vector>
-
namespace tint::diag {
- class Diagnostic;
- class List;
+class Diagnostic;
+class List;
} // namespace tint::diag
namespace dawn::native {
- class OwnedCompilationMessages : public NonCopyable {
- public:
- OwnedCompilationMessages();
- ~OwnedCompilationMessages() = default;
-
- void AddMessageForTesting(
- std::string message,
- wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
- uint64_t lineNum = 0,
- uint64_t linePos = 0,
- uint64_t offset = 0,
- uint64_t length = 0);
- void AddMessages(const tint::diag::List& diagnostics);
- void ClearMessages();
-
- const WGPUCompilationInfo* GetCompilationInfo();
- const std::vector<std::string>& GetFormattedTintMessages();
-
- private:
- void AddMessage(const tint::diag::Diagnostic& diagnostic);
- void AddFormattedTintMessages(const tint::diag::List& diagnostics);
-
- WGPUCompilationInfo mCompilationInfo;
- std::vector<std::string> mMessageStrings;
- std::vector<WGPUCompilationMessage> mMessages;
- std::vector<std::string> mFormattedTintMessages;
- };
+class OwnedCompilationMessages : public NonCopyable {
+ public:
+ OwnedCompilationMessages();
+ ~OwnedCompilationMessages();
+
+ void AddMessageForTesting(
+ std::string message,
+ wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
+ uint64_t lineNum = 0,
+ uint64_t linePos = 0,
+ uint64_t offset = 0,
+ uint64_t length = 0);
+ void AddMessages(const tint::diag::List& diagnostics);
+ void ClearMessages();
+
+ const WGPUCompilationInfo* GetCompilationInfo();
+ const std::vector<std::string>& GetFormattedTintMessages();
+
+ private:
+ void AddMessage(const tint::diag::Diagnostic& diagnostic);
+ void AddFormattedTintMessages(const tint::diag::List& diagnostics);
+
+ WGPUCompilationInfo mCompilationInfo;
+ std::vector<std::string> mMessageStrings;
+ std::vector<WGPUCompilationMessage> mMessages;
+ std::vector<std::string> mFormattedTintMessages;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp
index e825ef26bec..e70aea31935 100644
--- a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp
@@ -30,21 +30,21 @@
namespace dawn::native {
- namespace {
+namespace {
- ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
+ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
+ DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
- if (store->dispatchIndirectValidationPipeline != nullptr) {
- return store->dispatchIndirectValidationPipeline.Get();
- }
+ if (store->dispatchIndirectValidationPipeline != nullptr) {
+ return store->dispatchIndirectValidationPipeline.Get();
+ }
- // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
- // shader in various failure modes.
- // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
- Ref<ShaderModuleBase> shaderModule;
- DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
+ // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
+ // shader in various failure modes.
+ // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
+ Ref<ShaderModuleBase> shaderModule;
+ DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
struct UniformParams {
maxComputeWorkgroupsPerDimension: u32;
clientOffsetInU32: u32;
@@ -64,7 +64,7 @@ namespace dawn::native {
@group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
@group(0) @binding(2) var<storage, write> validatedParams: ValidatedParams;
- @stage(compute) @workgroup_size(1, 1, 1)
+ @compute @workgroup_size(1, 1, 1)
fn main() {
for (var i = 0u; i < 3u; i = i + 1u) {
var numWorkgroups = clientParams.data[uniformParams.clientOffsetInU32 + i];
@@ -81,405 +81,412 @@ namespace dawn::native {
}
)"));
- Ref<BindGroupLayoutBase> bindGroupLayout;
- DAWN_TRY_ASSIGN(
- bindGroupLayout,
- utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
- {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
- },
- /* allowInternalBinding */ true));
-
- Ref<PipelineLayoutBase> pipelineLayout;
- DAWN_TRY_ASSIGN(pipelineLayout,
- utils::MakeBasicPipelineLayout(device, bindGroupLayout));
-
- ComputePipelineDescriptor computePipelineDescriptor = {};
- computePipelineDescriptor.layout = pipelineLayout.Get();
- computePipelineDescriptor.compute.module = shaderModule.Get();
- computePipelineDescriptor.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
- device->CreateComputePipeline(&computePipelineDescriptor));
-
- return store->dispatchIndirectValidationPipeline.Get();
- }
-
- } // namespace
-
- ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- std::vector<TimestampWrite> timestampWritesAtEnd)
- : ProgrammableEncoder(device, descriptor->label, encodingContext),
- mCommandEncoder(commandEncoder),
- mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
- TrackInDevice();
- }
-
- // static
- Ref<ComputePassEncoder> ComputePassEncoder::Create(
- DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- std::vector<TimestampWrite> timestampWritesAtEnd) {
- return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder,
- encodingContext, std::move(timestampWritesAtEnd)));
- }
-
- ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
- }
-
- // static
- Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext) {
- return AcquireRef(
- new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
- }
-
- void ComputePassEncoder::DestroyImpl() {
- // Ensure that the pass has exited. This is done for passes only since validation requires
- // they exit before destruction while bundles do not.
- mEncodingContext->EnsurePassExited(this);
- }
-
- ObjectType ComputePassEncoder::GetType() const {
- return ObjectType::ComputePassEncoder;
- }
-
- void ComputePassEncoder::APIEnd() {
- if (mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- }
-
- EndComputePassCmd* cmd =
- allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
- // The query availability has already been updated at the beginning of compute
- // pass, and no need to do update here.
- cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
-
- return {};
- },
- "encoding %s.End().", this)) {
- mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
- }
- }
-
- void ComputePassEncoder::APIEndPass() {
- GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
- APIEnd();
- }
-
- void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
- uint32_t workgroupCountY,
- uint32_t workgroupCountZ) {
- mEncodingContext->TryEncode(
+ Ref<BindGroupLayoutBase> bindGroupLayout;
+ DAWN_TRY_ASSIGN(bindGroupLayout,
+ utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+ {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+ },
+ /* allowInternalBinding */ true));
+
+ Ref<PipelineLayoutBase> pipelineLayout;
+ DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+ ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.layout = pipelineLayout.Get();
+ computePipelineDescriptor.compute.module = shaderModule.Get();
+ computePipelineDescriptor.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
+ device->CreateComputePipeline(&computePipelineDescriptor));
+
+ return store->dispatchIndirectValidationPipeline.Get();
+}
+
+} // namespace
+
+ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ std::vector<TimestampWrite> timestampWritesAtEnd)
+ : ProgrammableEncoder(device, descriptor->label, encodingContext),
+ mCommandEncoder(commandEncoder),
+ mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+ TrackInDevice();
+}
+
+// static
+Ref<ComputePassEncoder> ComputePassEncoder::Create(
+ DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ std::vector<TimestampWrite> timestampWritesAtEnd) {
+ return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder, encodingContext,
+ std::move(timestampWritesAtEnd)));
+}
+
+ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
+
+// static
+Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return AcquireRef(
+ new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+}
+
+void ComputePassEncoder::DestroyImpl() {
+ // Ensure that the pass has exited. This is done for passes only since validation requires
+ // they exit before destruction while bundles do not.
+ mEncodingContext->EnsurePassExited(this);
+}
+
+ObjectType ComputePassEncoder::GetType() const {
+ return ObjectType::ComputePassEncoder;
+}
+
+void ComputePassEncoder::APIEnd() {
+ if (mEncodingContext->TryEncode(
this,
[&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
- uint32_t workgroupsPerDimension =
- GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
-
- DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
- "Dispatch workgroup count X (%u) exceeds max compute "
- "workgroups per dimension (%u).",
- workgroupCountX, workgroupsPerDimension);
-
- DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
- "Dispatch workgroup count Y (%u) exceeds max compute "
- "workgroups per dimension (%u).",
- workgroupCountY, workgroupsPerDimension);
-
- DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
- "Dispatch workgroup count Z (%u) exceeds max compute "
- "workgroups per dimension (%u).",
- workgroupCountZ, workgroupsPerDimension);
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
}
- // Record the synchronization scope for Dispatch, which is just the current
- // bindgroups.
- AddDispatchSyncScope();
-
- DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
- dispatch->x = workgroupCountX;
- dispatch->y = workgroupCountY;
- dispatch->z = workgroupCountZ;
+ EndComputePassCmd* cmd =
+ allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+ // The query availability has already been updated at the beginning of compute
+ // pass, and no need to do update here.
+ cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
return {};
},
- "encoding %s.Dispatch(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
- workgroupCountZ);
+ "encoding %s.End().", this)) {
+ mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
}
+}
+
+void ComputePassEncoder::APIEndPass() {
+ GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+ APIEnd();
+}
+
+void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
+ uint32_t workgroupCountY,
+ uint32_t workgroupCountZ) {
+ GetDevice()->EmitDeprecationWarning(
+ "dispatch() has been deprecated. Use dispatchWorkgroups() instead.");
+ APIDispatchWorkgroups(workgroupCountX, workgroupCountY, workgroupCountZ);
+}
+
+void ComputePassEncoder::APIDispatchWorkgroups(uint32_t workgroupCountX,
+ uint32_t workgroupCountY,
+ uint32_t workgroupCountZ) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+ uint32_t workgroupsPerDimension =
+ GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+
+ DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
+ "Dispatch workgroup count X (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountX, workgroupsPerDimension);
+
+ DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
+ "Dispatch workgroup count Y (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountY, workgroupsPerDimension);
+
+ DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
+ "Dispatch workgroup count Z (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountZ, workgroupsPerDimension);
+ }
- ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
- ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
- uint64_t indirectOffset) {
- DeviceBase* device = GetDevice();
-
- const bool shouldDuplicateNumWorkgroups =
- device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- mCommandBufferState.GetComputePipeline());
- if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
- return std::make_pair(indirectBuffer, indirectOffset);
- }
-
- // Save the previous command buffer state so it can be restored after the
- // validation inserts additional commands.
- CommandBufferStateTracker previousState = mCommandBufferState;
-
- auto* const store = device->GetInternalPipelineStore();
-
- Ref<ComputePipelineBase> validationPipeline;
- DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
-
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
-
- uint32_t storageBufferOffsetAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
-
- // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
- const uint32_t clientOffsetFromAlignedBoundary =
- indirectOffset % storageBufferOffsetAlignment;
- const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
- const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
-
- // Let the size of the binding be the additional offset, plus the size.
- const uint64_t clientIndirectBindingSize =
- kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
-
- // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
- // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
- // non-host-shareable'.
- struct UniformParams {
- uint32_t maxComputeWorkgroupsPerDimension;
- uint32_t clientOffsetInU32;
- uint32_t enableValidation;
- uint32_t duplicateNumWorkgroups;
- };
-
- // Create a uniform buffer to hold parameters for the shader.
- Ref<BufferBase> uniformBuffer;
- {
- UniformParams params;
- params.maxComputeWorkgroupsPerDimension =
- device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
- params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
- params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
- params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
-
- DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
- device, wgpu::BufferUsage::Uniform, {params}));
- }
-
- // Reserve space in the scratch buffer to hold the validated indirect params.
- ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
- const uint64_t scratchBufferSize =
- shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
- DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
- Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
-
- Ref<BindGroupBase> validationBindGroup;
- ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
- DAWN_TRY_ASSIGN(validationBindGroup,
- utils::MakeBindGroup(device, layout,
- {
- {0, uniformBuffer},
- {1, indirectBuffer, clientIndirectBindingOffset,
- clientIndirectBindingSize},
- {2, validatedIndirectBuffer, 0, scratchBufferSize},
- }));
-
- // Issue commands to validate the indirect buffer.
- APISetPipeline(validationPipeline.Get());
- APISetBindGroup(0, validationBindGroup.Get());
- APIDispatch(1);
-
- // Restore the state.
- RestoreCommandBufferState(std::move(previousState));
-
- // Return the new indirect buffer and indirect buffer offset.
- return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+ // Record the synchronization scope for Dispatch, which is just the current
+ // bindgroups.
+ AddDispatchSyncScope();
+
+ DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+ dispatch->x = workgroupCountX;
+ dispatch->y = workgroupCountY;
+ dispatch->z = workgroupCountZ;
+
+ return {};
+ },
+ "encoding %s.DispatchWorkgroups(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
+ workgroupCountZ);
+}
+
+ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
+ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
+ uint64_t indirectOffset) {
+ DeviceBase* device = GetDevice();
+
+ const bool shouldDuplicateNumWorkgroups =
+ device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ mCommandBufferState.GetComputePipeline());
+ if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
+ return std::make_pair(indirectBuffer, indirectOffset);
}
- void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- indirectOffset >= indirectBuffer->GetSize() ||
- indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
- "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
- "size (%u).",
- indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
- }
-
- SyncScopeUsageTracker scope;
- scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
- mUsageTracker.AddReferencedBuffer(indirectBuffer);
- // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
- // is needed for correct usage validation even though it will only be bound for
- // storage. This will unecessarily transition the |indirectBuffer| in
- // the backend.
-
- Ref<BufferBase> indirectBufferRef = indirectBuffer;
-
- // Get applied indirect buffer with necessary changes on the original indirect
- // buffer. For example,
- // - Validate each indirect dispatch with a single dispatch to copy the indirect
- // buffer params into a scratch buffer if they're valid, and otherwise zero them
- // out.
- // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
- // D3D12.
- // - Directly return the original indirect dispatch buffer if we don't need any
- // transformations on it.
- // We could consider moving the validation earlier in the pass after the last
- // last point the indirect buffer was used with writable usage, as well as batch
- // validation for multiple dispatches into one, but inserting commands at
- // arbitrary points in the past is not possible right now.
- DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
- TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
-
- // If we have created a new scratch dispatch indirect buffer in
- // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
- if (indirectBufferRef.Get() != indirectBuffer) {
- // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
- // synchronization scope.
- scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
- mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
- }
-
- AddDispatchSyncScope(std::move(scope));
-
- DispatchIndirectCmd* dispatch =
- allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
- dispatch->indirectBuffer = std::move(indirectBufferRef);
- dispatch->indirectOffset = indirectOffset;
- return {};
- },
- "encoding %s.DispatchIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+ // Save the previous command buffer state so it can be restored after the
+ // validation inserts additional commands.
+ CommandBufferStateTracker previousState = mCommandBufferState;
+
+ auto* const store = device->GetInternalPipelineStore();
+
+ Ref<ComputePipelineBase> validationPipeline;
+ DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
+
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
+
+ uint32_t storageBufferOffsetAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+ // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
+ const uint32_t clientOffsetFromAlignedBoundary = indirectOffset % storageBufferOffsetAlignment;
+ const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
+ const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
+
+ // Let the size of the binding be the additional offset, plus the size.
+ const uint64_t clientIndirectBindingSize =
+ kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
+
+ // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
+ // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
+ // non-host-shareable'.
+ struct UniformParams {
+ uint32_t maxComputeWorkgroupsPerDimension;
+ uint32_t clientOffsetInU32;
+ uint32_t enableValidation;
+ uint32_t duplicateNumWorkgroups;
+ };
+
+ // Create a uniform buffer to hold parameters for the shader.
+ Ref<BufferBase> uniformBuffer;
+ {
+ UniformParams params;
+ params.maxComputeWorkgroupsPerDimension =
+ device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+ params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
+ params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
+ params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
+
+ DAWN_TRY_ASSIGN(uniformBuffer,
+ utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {params}));
}
- void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
- }
-
- mCommandBufferState.SetComputePipeline(pipeline);
-
- SetComputePipelineCmd* cmd =
- allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
- cmd->pipeline = pipeline;
-
- return {};
- },
- "encoding %s.SetPipeline(%s).", this, pipeline);
- }
+ // Reserve space in the scratch buffer to hold the validated indirect params.
+ ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
+ const uint64_t scratchBufferSize =
+ shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
+ DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
+ Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
+
+ Ref<BindGroupBase> validationBindGroup;
+ ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
+ DAWN_TRY_ASSIGN(validationBindGroup,
+ utils::MakeBindGroup(device, layout,
+ {
+ {0, uniformBuffer},
+ {1, indirectBuffer, clientIndirectBindingOffset,
+ clientIndirectBindingSize},
+ {2, validatedIndirectBuffer, 0, scratchBufferSize},
+ }));
+
+ // Issue commands to validate the indirect buffer.
+ APISetPipeline(validationPipeline.Get());
+ APISetBindGroup(0, validationBindGroup.Get());
+ APIDispatchWorkgroups(1);
+
+ // Restore the state.
+ RestoreCommandBufferState(std::move(previousState));
+
+ // Return the new indirect buffer and indirect buffer offset.
+ return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+}
+
+void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+ GetDevice()->EmitDeprecationWarning(
+ "dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead.");
+ APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset);
+}
+
+void ComputePassEncoder::APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ indirectOffset >= indirectBuffer->GetSize() ||
+ indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
+ "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
+ "size (%u).",
+ indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
+ }
- void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
+ SyncScopeUsageTracker scope;
+ scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBuffer);
+ // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
+ // is needed for correct usage validation even though it will only be bound for
+ // storage. This will unecessarily transition the |indirectBuffer| in
+ // the backend.
+
+ Ref<BufferBase> indirectBufferRef = indirectBuffer;
+
+ // Get applied indirect buffer with necessary changes on the original indirect
+ // buffer. For example,
+ // - Validate each indirect dispatch with a single dispatch to copy the indirect
+ // buffer params into a scratch buffer if they're valid, and otherwise zero them
+ // out.
+ // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
+ // D3D12.
+ // - Directly return the original indirect dispatch buffer if we don't need any
+ // transformations on it.
+ // We could consider moving the validation earlier in the pass after the last
+ // last point the indirect buffer was used with writable usage, as well as batch
+ // validation for multiple dispatches into one, but inserting commands at
+ // arbitrary points in the past is not possible right now.
+ DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
+ TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
+
+ // If we have created a new scratch dispatch indirect buffer in
+ // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
+ if (indirectBufferRef.Get() != indirectBuffer) {
+ // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
+ // synchronization scope.
+ scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
+ }
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets));
- }
+ AddDispatchSyncScope(std::move(scope));
+
+ DispatchIndirectCmd* dispatch =
+ allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
+ dispatch->indirectBuffer = std::move(indirectBufferRef);
+ dispatch->indirectOffset = indirectOffset;
+ return {};
+ },
+ "encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+ }
- mUsageTracker.AddResourcesReferencedByBindGroup(group);
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
+ mCommandBufferState.SetComputePipeline(pipeline);
+
+ SetComputePipelineCmd* cmd =
+ allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
+ cmd->pipeline = pipeline;
+
+ return {};
+ },
+ "encoding %s.SetPipeline(%s).", this, pipeline);
+}
+
+void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(
+ ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+ }
- return {};
- },
- "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
- dynamicOffsetCount);
- }
+ mUsageTracker.AddResourcesReferencedByBindGroup(group);
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+
+ return {};
+ },
+ "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
+ dynamicOffsetCount);
+}
+
+void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+ }
- void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
- }
+ mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
- mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
+ PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
}
+ mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+}
- void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
- PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
- for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
- }
- mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
+ // Encode commands for the backend to restore the pipeline and bind groups.
+ if (state.HasPipeline()) {
+ APISetPipeline(state.GetComputePipeline());
}
-
- void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
- // Encode commands for the backend to restore the pipeline and bind groups.
- if (state.HasPipeline()) {
- APISetPipeline(state.GetComputePipeline());
- }
- for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
- BindGroupBase* bg = state.GetBindGroup(i);
- if (bg != nullptr) {
- const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
- if (offsets.empty()) {
- APISetBindGroup(static_cast<uint32_t>(i), bg);
- } else {
- APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
- }
+ for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+ BindGroupBase* bg = state.GetBindGroup(i);
+ if (bg != nullptr) {
+ const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
+ if (offsets.empty()) {
+ APISetBindGroup(static_cast<uint32_t>(i), bg);
+ } else {
+ APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
}
}
-
- // Restore the frontend state tracking information.
- mCommandBufferState = std::move(state);
}
- CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
- return &mCommandBufferState;
- }
+ // Restore the frontend state tracking information.
+ mCommandBufferState = std::move(state);
+}
+
+CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
+ return &mCommandBufferState;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h
index a2bb08ff129..ad950964b2b 100644
--- a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_COMPUTEPASSENCODER_H_
#define SRC_DAWN_NATIVE_COMPUTEPASSENCODER_H_
+#include <utility>
+#include <vector>
+
#include "dawn/native/CommandBufferStateTracker.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
@@ -23,75 +26,81 @@
namespace dawn::native {
- class SyncScopeUsageTracker;
-
- class ComputePassEncoder final : public ProgrammableEncoder {
- public:
- static Ref<ComputePassEncoder> Create(DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- std::vector<TimestampWrite> timestampWritesAtEnd);
- static Ref<ComputePassEncoder> MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext);
-
- ObjectType GetType() const override;
-
- void APIEnd();
- void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
-
- void APIDispatch(uint32_t workgroupCountX,
- uint32_t workgroupCountY = 1,
- uint32_t workgroupCountZ = 1);
- void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void APISetPipeline(ComputePipelineBase* pipeline);
-
- void APISetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
-
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
- void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
- RestoreCommandBufferState(std::move(state));
- }
-
- protected:
- ComputePassEncoder(DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- std::vector<TimestampWrite> timestampWritesAtEnd);
- ComputePassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
-
- private:
- void DestroyImpl() override;
-
- ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
- Ref<BufferBase> indirectBuffer,
- uint64_t indirectOffset);
-
- void RestoreCommandBufferState(CommandBufferStateTracker state);
-
- CommandBufferStateTracker mCommandBufferState;
-
- // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
- // records it in mUsageTracker.
- void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
- ComputePassResourceUsageTracker mUsageTracker;
-
- // For render and compute passes, the encoding context is borrowed from the command encoder.
- // Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoder> mCommandEncoder;
-
- std::vector<TimestampWrite> mTimestampWritesAtEnd;
- };
+class SyncScopeUsageTracker;
+
+class ComputePassEncoder final : public ProgrammableEncoder {
+ public:
+ static Ref<ComputePassEncoder> Create(DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ std::vector<TimestampWrite> timestampWritesAtEnd);
+ static Ref<ComputePassEncoder> MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
+
+ ObjectType GetType() const override;
+
+ void APIEnd();
+ void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
+
+ void APIDispatchWorkgroups(uint32_t workgroupCountX,
+ uint32_t workgroupCountY = 1,
+ uint32_t workgroupCountZ = 1);
+ void APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APISetPipeline(ComputePipelineBase* pipeline);
+
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
+
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
+ void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
+ RestoreCommandBufferState(std::move(state));
+ }
+
+ // Deprecated
+ void APIDispatch(uint32_t workgroupCountX,
+ uint32_t workgroupCountY = 1,
+ uint32_t workgroupCountZ = 1);
+ void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+
+ protected:
+ ComputePassEncoder(DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ std::vector<TimestampWrite> timestampWritesAtEnd);
+ ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
+
+ private:
+ void DestroyImpl() override;
+
+ ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
+ Ref<BufferBase> indirectBuffer,
+ uint64_t indirectOffset);
+
+ void RestoreCommandBufferState(CommandBufferStateTracker state);
+
+ CommandBufferStateTracker mCommandBufferState;
+
+ // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
+ // records it in mUsageTracker.
+ void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
+ ComputePassResourceUsageTracker mUsageTracker;
+
+ // For render and compute passes, the encoding context is borrowed from the command encoder.
+ // Keep a reference to the encoder to make sure the context isn't freed.
+ Ref<CommandEncoder> mCommandEncoder;
+
+ std::vector<TimestampWrite> mTimestampWritesAtEnd;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp
index ecd3d799ca8..a1dcf15a481 100644
--- a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp
@@ -20,77 +20,78 @@
namespace dawn::native {
- MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
- }
-
- if (descriptor->layout != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->layout));
- }
-
- return ValidateProgrammableStage(
- device, descriptor->compute.module, descriptor->compute.entryPoint,
- descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
- SingleShaderStage::Compute);
- }
-
- // ComputePipelineBase
-
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor)
- : PipelineBase(device,
- descriptor->layout,
- descriptor->label,
- {{SingleShaderStage::Compute, descriptor->compute.module,
- descriptor->compute.entryPoint, descriptor->compute.constantCount,
- descriptor->compute.constants}}) {
- SetContentHash(ComputeContentHash());
- TrackInDevice();
+MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
}
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
- TrackInDevice();
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
}
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : PipelineBase(device, tag) {
+ return ValidateProgrammableStage(
+ device, descriptor->compute.module, descriptor->compute.entryPoint,
+ descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
+ SingleShaderStage::Compute);
+}
+
+// ComputePipelineBase
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor)
+ : PipelineBase(
+ device,
+ descriptor->layout,
+ descriptor->label,
+ {{SingleShaderStage::Compute, descriptor->compute.module, descriptor->compute.entryPoint,
+ descriptor->compute.constantCount, descriptor->compute.constants}}) {
+ SetContentHash(ComputeContentHash());
+ TrackInDevice();
+
+ // Initialize the cache key to include the cache type and device information.
+ mCacheKey.Record(CacheKey::Type::ComputePipeline, device->GetCacheKey());
+}
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
+ TrackInDevice();
+}
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : PipelineBase(device, tag) {}
+
+ComputePipelineBase::~ComputePipelineBase() = default;
+
+void ComputePipelineBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheComputePipeline(this);
}
-
- ComputePipelineBase::~ComputePipelineBase() = default;
-
- void ComputePipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheComputePipeline(this);
+}
+
+// static
+ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
+ class ErrorComputePipeline final : public ComputePipelineBase {
+ public:
+ explicit ErrorComputePipeline(DeviceBase* device)
+ : ComputePipelineBase(device, ObjectBase::kError) {}
+
+ MaybeError Initialize() override {
+ UNREACHABLE();
+ return {};
}
- }
+ };
- // static
- ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
- class ErrorComputePipeline final : public ComputePipelineBase {
- public:
- explicit ErrorComputePipeline(DeviceBase* device)
- : ComputePipelineBase(device, ObjectBase::kError) {
- }
-
- MaybeError Initialize() override {
- UNREACHABLE();
- return {};
- }
- };
-
- return new ErrorComputePipeline(device);
- }
+ return new ErrorComputePipeline(device);
+}
- ObjectType ComputePipelineBase::GetType() const {
- return ObjectType::ComputePipeline;
- }
+ObjectType ComputePipelineBase::GetType() const {
+ return ObjectType::ComputePipeline;
+}
- bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
- const ComputePipelineBase* b) const {
- return PipelineBase::EqualForCache(a, b);
- }
+bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
+ const ComputePipelineBase* b) const {
+ return PipelineBase::EqualForCache(a, b);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h
index 257bd85af05..36bb34fffeb 100644
--- a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h
@@ -21,34 +21,34 @@
namespace dawn::native {
- class DeviceBase;
- struct EntryPointMetadata;
+class DeviceBase;
+struct EntryPointMetadata;
- MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor);
+MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor);
- class ComputePipelineBase : public PipelineBase {
- public:
- ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
- ~ComputePipelineBase() override;
+class ComputePipelineBase : public PipelineBase {
+ public:
+ ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
+ ~ComputePipelineBase() override;
- static ComputePipelineBase* MakeError(DeviceBase* device);
+ static ComputePipelineBase* MakeError(DeviceBase* device);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
- struct EqualityFunc {
- bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
- };
+ // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
+ struct EqualityFunc {
+ bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
+ };
- protected:
- // Constructor used only for mocking and testing.
- explicit ComputePipelineBase(DeviceBase* device);
- void DestroyImpl() override;
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit ComputePipelineBase(DeviceBase* device);
+ void DestroyImpl() override;
- private:
- ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- };
+ private:
+ ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp
index a72cedce141..48cf6e66551 100644
--- a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/CopyTextureForBrowserHelper.h"
+#include <unordered_set>
+#include <utility>
+
#include "dawn/common/Log.h"
#include "dawn/native/BindGroup.h"
#include "dawn/native/BindGroupLayout.h"
@@ -31,39 +34,37 @@
#include "dawn/native/ValidationUtils_autogen.h"
#include "dawn/native/utils/WGPUHelpers.h"
-#include <unordered_set>
-
namespace dawn::native {
- namespace {
+namespace {
- static const char sCopyTextureForBrowserShader[] = R"(
+static const char sCopyTextureForBrowserShader[] = R"(
struct GammaTransferParams {
- G: f32;
- A: f32;
- B: f32;
- C: f32;
- D: f32;
- E: f32;
- F: f32;
- padding: u32;
+ G: f32,
+ A: f32,
+ B: f32,
+ C: f32,
+ D: f32,
+ E: f32,
+ F: f32,
+ padding: u32,
};
struct Uniforms { // offset align size
- scale: vec2<f32>; // 0 8 8
- offset: vec2<f32>; // 8 8 8
- steps_mask: u32; // 16 4 4
+ scale: vec2<f32>, // 0 8 8
+ offset: vec2<f32>, // 8 8 8
+ steps_mask: u32, // 16 4 4
// implicit padding; // 20 12
- conversion_matrix: mat3x3<f32>; // 32 16 48
- gamma_decoding_params: GammaTransferParams; // 80 4 32
- gamma_encoding_params: GammaTransferParams; // 112 4 32
- gamma_decoding_for_dst_srgb_params: GammaTransferParams; // 144 4 32
+ conversion_matrix: mat3x3<f32>, // 32 16 48
+ gamma_decoding_params: GammaTransferParams, // 80 4 32
+ gamma_encoding_params: GammaTransferParams, // 112 4 32
+ gamma_decoding_for_dst_srgb_params: GammaTransferParams, // 144 4 32
};
@binding(0) @group(0) var<uniform> uniforms : Uniforms;
struct VertexOutputs {
- @location(0) texcoords : vec2<f32>;
- @builtin(position) position : vec4<f32>;
+ @location(0) texcoords : vec2<f32>,
+ @builtin(position) position : vec4<f32>,
};
// Chromium uses unified equation to construct gamma decoding function
@@ -84,7 +85,7 @@ namespace dawn::native {
return sign(v) * (pow(params.A * abs(v) + params.B, params.G) + params.E);
}
- @stage(vertex)
+ @vertex
fn vs_main(
@builtin(vertex_index) VertexIndex : u32
) -> VertexOutputs {
@@ -122,21 +123,22 @@ namespace dawn::native {
@binding(1) @group(0) var mySampler: sampler;
@binding(2) @group(0) var myTexture: texture_2d<f32>;
- @stage(fragment)
+ @fragment
fn fs_main(
@location(0) texcoord : vec2<f32>
) -> @location(0) vec4<f32> {
// Clamp the texcoord and discard the out-of-bound pixels.
var clampedTexcoord =
clamp(texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
- if (!all(clampedTexcoord == texcoord)) {
- discard;
- }
// Swizzling of texture formats when sampling / rendering is handled by the
// hardware so we don't need special logic in this shader. This is covered by tests.
var color = textureSample(myTexture, mySampler, texcoord);
+ if (!all(clampedTexcoord == texcoord)) {
+ discard;
+ }
+
let kUnpremultiplyStep = 0x01u;
let kDecodeToLinearStep = 0x02u;
let kConvertToDstGamutStep = 0x04u;
@@ -194,411 +196,406 @@ namespace dawn::native {
}
)";
- // Follow the same order of skcms_TransferFunction
- // https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
- struct GammaTransferParams {
- float G = 0.0;
- float A = 0.0;
- float B = 0.0;
- float C = 0.0;
- float D = 0.0;
- float E = 0.0;
- float F = 0.0;
- uint32_t padding = 0;
- };
-
- struct Uniform {
- float scaleX;
- float scaleY;
- float offsetX;
- float offsetY;
- uint32_t stepsMask = 0;
- const std::array<uint32_t, 3> padding = {}; // 12 bytes padding
- std::array<float, 12> conversionMatrix = {};
- GammaTransferParams gammaDecodingParams = {};
- GammaTransferParams gammaEncodingParams = {};
- GammaTransferParams gammaDecodingForDstSrgbParams = {};
- };
- static_assert(sizeof(Uniform) == 176);
-
- // TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
- // non-depth, non-stencil, non-compressed texture format pair copy.
- MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
- const wgpu::TextureFormat dstFormat) {
- switch (srcFormat) {
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::RGBA8Unorm:
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Source texture format (%s) is not supported.", srcFormat);
- }
+// Follow the same order of skcms_TransferFunction
+// https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
+struct GammaTransferParams {
+ float G = 0.0;
+ float A = 0.0;
+ float B = 0.0;
+ float C = 0.0;
+ float D = 0.0;
+ float E = 0.0;
+ float F = 0.0;
+ uint32_t padding = 0;
+};
+
+struct Uniform {
+ float scaleX;
+ float scaleY;
+ float offsetX;
+ float offsetY;
+ uint32_t stepsMask = 0;
+ const std::array<uint32_t, 3> padding = {}; // 12 bytes padding
+ std::array<float, 12> conversionMatrix = {};
+ GammaTransferParams gammaDecodingParams = {};
+ GammaTransferParams gammaEncodingParams = {};
+ GammaTransferParams gammaDecodingForDstSrgbParams = {};
+};
+static_assert(sizeof(Uniform) == 176);
+
+// TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
+// non-depth, non-stencil, non-compressed texture format pair copy.
+MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
+ const wgpu::TextureFormat dstFormat) {
+ switch (srcFormat) {
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR("Source texture format (%s) is not supported.",
+ srcFormat);
+ }
- switch (dstFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Destination texture format (%s) is not supported.", dstFormat);
- }
+ switch (dstFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR("Destination texture format (%s) is not supported.",
+ dstFormat);
+ }
- return {};
- }
+ return {};
+}
- RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
- wgpu::TextureFormat dstFormat) {
- auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
- if (pipeline != store->copyTextureForBrowserPipelines.end()) {
- return pipeline->second.Get();
- }
- return nullptr;
+RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store, wgpu::TextureFormat dstFormat) {
+ auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
+ if (pipeline != store->copyTextureForBrowserPipelines.end()) {
+ return pipeline->second.Get();
+ }
+ return nullptr;
+}
+
+ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
+ DeviceBase* device,
+ wgpu::TextureFormat dstFormat) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (GetCachedPipeline(store, dstFormat) == nullptr) {
+ // Create vertex shader module if not cached before.
+ if (store->copyTextureForBrowser == nullptr) {
+ DAWN_TRY_ASSIGN(store->copyTextureForBrowser,
+ utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
}
- ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
- DeviceBase* device,
- wgpu::TextureFormat dstFormat) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (GetCachedPipeline(store, dstFormat) == nullptr) {
- // Create vertex shader module if not cached before.
- if (store->copyTextureForBrowser == nullptr) {
- DAWN_TRY_ASSIGN(
- store->copyTextureForBrowser,
- utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
- }
+ ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
- ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
+ // Prepare vertex stage.
+ VertexState vertex = {};
+ vertex.module = shaderModule;
+ vertex.entryPoint = "vs_main";
- // Prepare vertex stage.
- VertexState vertex = {};
- vertex.module = shaderModule;
- vertex.entryPoint = "vs_main";
+ // Prepare frgament stage.
+ FragmentState fragment = {};
+ fragment.module = shaderModule;
+ fragment.entryPoint = "fs_main";
- // Prepare frgament stage.
- FragmentState fragment = {};
- fragment.module = shaderModule;
- fragment.entryPoint = "fs_main";
+ // Prepare color state.
+ ColorTargetState target = {};
+ target.format = dstFormat;
- // Prepare color state.
- ColorTargetState target = {};
- target.format = dstFormat;
+ // Create RenderPipeline.
+ RenderPipelineDescriptor renderPipelineDesc = {};
- // Create RenderPipeline.
- RenderPipelineDescriptor renderPipelineDesc = {};
+ // Generate the layout based on shader modules.
+ renderPipelineDesc.layout = nullptr;
- // Generate the layout based on shader modules.
- renderPipelineDesc.layout = nullptr;
+ renderPipelineDesc.vertex = vertex;
+ renderPipelineDesc.fragment = &fragment;
- renderPipelineDesc.vertex = vertex;
- renderPipelineDesc.fragment = &fragment;
+ renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
- renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+ fragment.targetCount = 1;
+ fragment.targets = &target;
- fragment.targetCount = 1;
- fragment.targets = &target;
-
- Ref<RenderPipelineBase> pipeline;
- DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
- store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
- }
+ Ref<RenderPipelineBase> pipeline;
+ DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
+ store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
+ }
- return GetCachedPipeline(store, dstFormat);
- }
- } // anonymous namespace
-
- MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- DAWN_TRY(device->ValidateObject(source->texture));
- DAWN_TRY(device->ValidateObject(destination->texture));
-
- DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
- "Source texture %s is destroyed.", source->texture);
-
- DAWN_INVALID_IF(
- destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
- "Destination texture %s is destroyed.", destination->texture);
-
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
- "validating the ImageCopyTexture for the source");
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
- "validating the ImageCopyTexture for the destination");
-
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
- "validating that the copy fits in the source");
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
- "validating that the copy fits in the destination");
-
- DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
-
- DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
- source->origin.z);
- DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
- "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
-
- DAWN_INVALID_IF(
- source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
- "The source texture sample count (%u) or the destination texture sample count (%u) is "
- "not 1.",
- source->texture->GetSampleCount(), destination->texture->GetSampleCount());
-
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
- UsageValidationMode::Default));
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
- UsageValidationMode::Default));
-
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
- UsageValidationMode::Default));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
- UsageValidationMode::Default));
-
- DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
- destination->texture->GetFormat().format));
-
- DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
-
- DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
- DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
-
- if (options->needsColorSpaceConversion) {
- DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
- "srcTransferFunctionParameters is nullptr when doing color conversion");
- DAWN_INVALID_IF(options->conversionMatrix == nullptr,
- "conversionMatrix is nullptr when doing color conversion");
- DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
- "dstTransferFunctionParameters is nullptr when doing color conversion");
- }
+ return GetCachedPipeline(store, dstFormat);
+}
+} // anonymous namespace
+
+MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ DAWN_TRY(device->ValidateObject(source->texture));
+ DAWN_TRY(device->ValidateObject(destination->texture));
+
+ DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+ "Source texture %s is destroyed.", source->texture);
+
+ DAWN_INVALID_IF(destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+ "Destination texture %s is destroyed.", destination->texture);
+
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
+ "validating the ImageCopyTexture for the source");
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
+ "validating the ImageCopyTexture for the destination");
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
+ "validating that the copy fits in the source");
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
+ "validating that the copy fits in the destination");
+
+ DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
+
+ DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).", source->origin.z);
+ DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1, "Copy is for more than one array layer (%u)",
+ copySize->depthOrArrayLayers);
+
+ DAWN_INVALID_IF(
+ source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
+ "The source texture sample count (%u) or the destination texture sample count (%u) is "
+ "not 1.",
+ source->texture->GetSampleCount(), destination->texture->GetSampleCount());
+
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ UsageValidationMode::Default));
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
+ UsageValidationMode::Default));
+
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ UsageValidationMode::Default));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
+ UsageValidationMode::Default));
+
+ DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
+ destination->texture->GetFormat().format));
+
+ DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
+ DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
+
+ if (options->needsColorSpaceConversion) {
+ DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
+ "srcTransferFunctionParameters is nullptr when doing color conversion");
+ DAWN_INVALID_IF(options->conversionMatrix == nullptr,
+ "conversionMatrix is nullptr when doing color conversion");
+ DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
+ "dstTransferFunctionParameters is nullptr when doing color conversion");
+ }
+ return {};
+}
+
+// Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
+bool IsSrgbDstFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return true;
+ default:
+ return false;
+ }
+}
+
+MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
+ // copy to each other. This can be a potential fast path.
+
+ // Noop copy
+ if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
return {};
}
- // Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
- bool IsSrgbDstFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return true;
- default:
- return false;
- }
+ bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
+ RenderPipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
+ device, destination->texture->GetFormat().format));
+
+ // Prepare bind group layout.
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ Extent3D srcTextureSize = source->texture->GetSize();
+
+ // Prepare binding 0 resource: uniform buffer.
+ Uniform uniformData = {
+ copySize->width / static_cast<float>(srcTextureSize.width),
+ copySize->height / static_cast<float>(srcTextureSize.height), // scale
+ source->origin.x / static_cast<float>(srcTextureSize.width),
+ source->origin.y / static_cast<float>(srcTextureSize.height) // offset
+ };
+
+ // Handle flipY. FlipY here means we flip the source texture firstly and then
+ // do copy. This helps on the case which source texture is flipped and the copy
+ // need to unpack the flip.
+ if (options->flipY) {
+ uniformData.scaleY *= -1.0;
+ uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
}
- MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
- // copy to each other. This can be a potential fast path.
-
- // Noop copy
- if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
- return {};
- }
-
- bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
- RenderPipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
- device, destination->texture->GetFormat().format));
-
- // Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- Extent3D srcTextureSize = source->texture->GetSize();
-
- // Prepare binding 0 resource: uniform buffer.
- Uniform uniformData = {
- copySize->width / static_cast<float>(srcTextureSize.width),
- copySize->height / static_cast<float>(srcTextureSize.height), // scale
- source->origin.x / static_cast<float>(srcTextureSize.width),
- source->origin.y / static_cast<float>(srcTextureSize.height) // offset
- };
-
- // Handle flipY. FlipY here means we flip the source texture firstly and then
- // do copy. This helps on the case which source texture is flipped and the copy
- // need to unpack the flip.
- if (options->flipY) {
- uniformData.scaleY *= -1.0;
- uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
- }
-
- uint32_t stepsMask = 0u;
-
- // Steps to do color space conversion
- // From https://skia.org/docs/user/color/
- // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
- // management, and we need to divide it out if it’s multiplied in.
- // - linearize the source color using the source color space’s transfer function
- // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
- // a 3x3 matrix.
- // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
- // - encode that color using the inverse of the destination color space’s transfer function.
- // - premultiply by alpha if the destination is premultiplied.
- // The reason to choose XYZ D50 as intermediate color space:
- // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
- // "Since the Lab TIFF specification, the ICC profile specification and
- // Adobe Photoshop all use a D50"
- constexpr uint32_t kUnpremultiplyStep = 0x01;
- constexpr uint32_t kDecodeToLinearStep = 0x02;
- constexpr uint32_t kConvertToDstGamutStep = 0x04;
- constexpr uint32_t kEncodeToGammaStep = 0x08;
- constexpr uint32_t kPremultiplyStep = 0x10;
- constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
-
- if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
- if (options->needsColorSpaceConversion ||
- options->srcAlphaMode != options->dstAlphaMode) {
- stepsMask |= kUnpremultiplyStep;
- }
- }
-
- if (options->needsColorSpaceConversion) {
- stepsMask |= kDecodeToLinearStep;
- const float* decodingParams = options->srcTransferFunctionParameters;
-
- uniformData.gammaDecodingParams = {
- decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3],
- decodingParams[4], decodingParams[5], decodingParams[6]};
-
- stepsMask |= kConvertToDstGamutStep;
- const float* matrix = options->conversionMatrix;
- uniformData.conversionMatrix = {{
- matrix[0],
- matrix[1],
- matrix[2],
- 0.0,
- matrix[3],
- matrix[4],
- matrix[5],
- 0.0,
- matrix[6],
- matrix[7],
- matrix[8],
- 0.0,
- }};
-
- stepsMask |= kEncodeToGammaStep;
- const float* encodingParams = options->dstTransferFunctionParameters;
-
- uniformData.gammaEncodingParams = {
- encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3],
- encodingParams[4], encodingParams[5], encodingParams[6]};
+ uint32_t stepsMask = 0u;
+
+ // Steps to do color space conversion
+ // From https://skia.org/docs/user/color/
+ // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
+ // management, and we need to divide it out if it’s multiplied in.
+ // - linearize the source color using the source color space’s transfer function
+ // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
+ // a 3x3 matrix.
+ // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
+ // - encode that color using the inverse of the destination color space’s transfer function.
+ // - premultiply by alpha if the destination is premultiplied.
+ // The reason to choose XYZ D50 as intermediate color space:
+ // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
+ // "Since the Lab TIFF specification, the ICC profile specification and
+ // Adobe Photoshop all use a D50"
+ constexpr uint32_t kUnpremultiplyStep = 0x01;
+ constexpr uint32_t kDecodeToLinearStep = 0x02;
+ constexpr uint32_t kConvertToDstGamutStep = 0x04;
+ constexpr uint32_t kEncodeToGammaStep = 0x08;
+ constexpr uint32_t kPremultiplyStep = 0x10;
+ constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
+
+ if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
+ if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
+ stepsMask |= kUnpremultiplyStep;
}
+ }
- if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
- if (options->needsColorSpaceConversion ||
- options->srcAlphaMode != options->dstAlphaMode) {
- stepsMask |= kPremultiplyStep;
- }
- }
+ if (options->needsColorSpaceConversion) {
+ stepsMask |= kDecodeToLinearStep;
+ const float* decodingParams = options->srcTransferFunctionParameters;
+
+ uniformData.gammaDecodingParams = {decodingParams[0], decodingParams[1], decodingParams[2],
+ decodingParams[3], decodingParams[4], decodingParams[5],
+ decodingParams[6]};
+
+ stepsMask |= kConvertToDstGamutStep;
+ const float* matrix = options->conversionMatrix;
+ uniformData.conversionMatrix = {{
+ matrix[0],
+ matrix[1],
+ matrix[2],
+ 0.0,
+ matrix[3],
+ matrix[4],
+ matrix[5],
+ 0.0,
+ matrix[6],
+ matrix[7],
+ matrix[8],
+ 0.0,
+ }};
+
+ stepsMask |= kEncodeToGammaStep;
+ const float* encodingParams = options->dstTransferFunctionParameters;
+
+ uniformData.gammaEncodingParams = {encodingParams[0], encodingParams[1], encodingParams[2],
+ encodingParams[3], encodingParams[4], encodingParams[5],
+ encodingParams[6]};
+ }
- // Copy to *-srgb texture should keep the bytes exactly the same as copy
- // to non-srgb texture. Add an extra decode-to-linear step so that after the
- // sampler of *-srgb format texture applying encoding, the bytes keeps the same
- // as non-srgb format texture.
- // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
- // source input. But above operation also valid for *-srgb format texture input and
- // non-srgb format dst texture.
- // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
- // and use it as render attachment when possible.
- // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
- // bypass this extra step in some cases.
- if (isSrgbDstFormat) {
- stepsMask |= kDecodeForSrgbDstFormat;
- // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
- // mathematics. Order: {G, A, B, C, D, E, F, }
- uniformData.gammaDecodingForDstSrgbParams = {
- 2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
+ if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
+ if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
+ stepsMask |= kPremultiplyStep;
}
+ }
- uniformData.stepsMask = stepsMask;
-
- Ref<BufferBase> uniformBuffer;
- DAWN_TRY_ASSIGN(
- uniformBuffer,
- utils::CreateBufferFromData(
- device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
-
- // Prepare binding 1 resource: sampler
- // Use default configuration, filterMode set to Nearest for min and mag.
- SamplerDescriptor samplerDesc = {};
- Ref<SamplerBase> sampler;
- DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
-
- // Prepare binding 2 resource: sampled texture
- TextureViewDescriptor srcTextureViewDesc = {};
- srcTextureViewDesc.baseMipLevel = source->mipLevel;
- srcTextureViewDesc.mipLevelCount = 1;
- srcTextureViewDesc.arrayLayerCount = 1;
- Ref<TextureViewBase> srcTextureView;
- DAWN_TRY_ASSIGN(srcTextureView,
- device->CreateTextureView(source->texture, &srcTextureViewDesc));
-
- // Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
- device, layout,
- {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
-
- // Create command encoder.
- Ref<CommandEncoder> encoder;
- DAWN_TRY_ASSIGN(encoder, device->CreateCommandEncoder());
-
- // Prepare dst texture view as color Attachment.
- TextureViewDescriptor dstTextureViewDesc;
- dstTextureViewDesc.baseMipLevel = destination->mipLevel;
- dstTextureViewDesc.mipLevelCount = 1;
- dstTextureViewDesc.baseArrayLayer = destination->origin.z;
- dstTextureViewDesc.arrayLayerCount = 1;
- Ref<TextureViewBase> dstView;
-
- DAWN_TRY_ASSIGN(dstView,
- device->CreateTextureView(destination->texture, &dstTextureViewDesc));
- // Prepare render pass color attachment descriptor.
- RenderPassColorAttachment colorAttachmentDesc;
-
- colorAttachmentDesc.view = dstView.Get();
- colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
- colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
- colorAttachmentDesc.clearValue = {0.0, 0.0, 0.0, 1.0};
-
- // Create render pass.
- RenderPassDescriptor renderPassDesc;
- renderPassDesc.colorAttachmentCount = 1;
- renderPassDesc.colorAttachments = &colorAttachmentDesc;
- Ref<RenderPassEncoder> passEncoder = encoder->BeginRenderPass(&renderPassDesc);
-
- // Start pipeline and encode commands to complete
- // the copy from src texture to dst texture with transformation.
- passEncoder->APISetPipeline(pipeline);
- passEncoder->APISetBindGroup(0, bindGroup.Get());
- passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
- copySize->height, 0.0, 1.0);
- passEncoder->APIDraw(3);
- passEncoder->APIEnd();
-
- // Finsh encoding.
- Ref<CommandBufferBase> commandBuffer;
- DAWN_TRY_ASSIGN(commandBuffer, encoder->Finish());
- CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
-
- // Submit command buffer.
- device->GetQueue()->APISubmit(1, &submitCommandBuffer);
- return {};
+ // Copy to *-srgb texture should keep the bytes exactly the same as copy
+ // to non-srgb texture. Add an extra decode-to-linear step so that after the
+ // sampler of *-srgb format texture applying encoding, the bytes keeps the same
+ // as non-srgb format texture.
+ // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
+ // source input. But above operation also valid for *-srgb format texture input and
+ // non-srgb format dst texture.
+ // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
+ // and use it as render attachment when possible.
+ // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
+ // bypass this extra step in some cases.
+ if (isSrgbDstFormat) {
+ stepsMask |= kDecodeForSrgbDstFormat;
+ // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+ // mathematics. Order: {G, A, B, C, D, E, F, }
+ uniformData.gammaDecodingForDstSrgbParams = {
+ 2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
}
+ uniformData.stepsMask = stepsMask;
+
+ Ref<BufferBase> uniformBuffer;
+ DAWN_TRY_ASSIGN(
+ uniformBuffer,
+ utils::CreateBufferFromData(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform,
+ {uniformData}));
+
+ // Prepare binding 1 resource: sampler
+ // Use default configuration, filterMode set to Nearest for min and mag.
+ SamplerDescriptor samplerDesc = {};
+ Ref<SamplerBase> sampler;
+ DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
+
+ // Prepare binding 2 resource: sampled texture
+ TextureViewDescriptor srcTextureViewDesc = {};
+ srcTextureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+ srcTextureViewDesc.baseMipLevel = source->mipLevel;
+ srcTextureViewDesc.mipLevelCount = 1;
+ srcTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> srcTextureView;
+ DAWN_TRY_ASSIGN(srcTextureView,
+ device->CreateTextureView(source->texture, &srcTextureViewDesc));
+
+ // Create bind group after all binding entries are set.
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup,
+ utils::MakeBindGroup(device, layout,
+ {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
+
+ // Create command encoder.
+ Ref<CommandEncoder> encoder;
+ DAWN_TRY_ASSIGN(encoder, device->CreateCommandEncoder());
+
+ // Prepare dst texture view as color Attachment.
+ TextureViewDescriptor dstTextureViewDesc;
+ dstTextureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+ dstTextureViewDesc.baseMipLevel = destination->mipLevel;
+ dstTextureViewDesc.mipLevelCount = 1;
+ dstTextureViewDesc.baseArrayLayer = destination->origin.z;
+ dstTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> dstView;
+
+ DAWN_TRY_ASSIGN(dstView, device->CreateTextureView(destination->texture, &dstTextureViewDesc));
+ // Prepare render pass color attachment descriptor.
+ RenderPassColorAttachment colorAttachmentDesc;
+
+ colorAttachmentDesc.view = dstView.Get();
+ colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
+ colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
+ colorAttachmentDesc.clearValue = {0.0, 0.0, 0.0, 1.0};
+
+ // Create render pass.
+ RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &colorAttachmentDesc;
+ Ref<RenderPassEncoder> passEncoder = encoder->BeginRenderPass(&renderPassDesc);
+
+ // Start pipeline and encode commands to complete
+ // the copy from src texture to dst texture with transformation.
+ passEncoder->APISetPipeline(pipeline);
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
+ copySize->height, 0.0, 1.0);
+ passEncoder->APIDraw(3);
+ passEncoder->APIEnd();
+
+ // Finsh encoding.
+ Ref<CommandBufferBase> commandBuffer;
+ DAWN_TRY_ASSIGN(commandBuffer, encoder->Finish());
+ CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
+
+ // Submit command buffer.
+ device->GetQueue()->APISubmit(1, &submitCommandBuffer);
+ return {};
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h
index 86e3135a698..0e427ba8e98 100644
--- a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h
+++ b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h
@@ -19,22 +19,22 @@
#include "dawn/native/ObjectBase.h"
namespace dawn::native {
- class DeviceBase;
- struct Extent3D;
- struct ImageCopyTexture;
- struct CopyTextureForBrowserOptions;
+class DeviceBase;
+struct Extent3D;
+struct ImageCopyTexture;
+struct CopyTextureForBrowserOptions;
- MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
+MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
- MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
+MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp
index 92a3bdf951c..e7319abc270 100644
--- a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/CreatePipelineAsyncTask.h"
+#include <utility>
+
#include "dawn/native/AsyncTask.h"
#include "dawn/native/ComputePipeline.h"
#include "dawn/native/Device.h"
@@ -24,183 +26,185 @@
namespace dawn::native {
- CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
- std::string errorMessage,
- void* userdata)
- : mErrorMessage(errorMessage), mUserData(userdata) {
- }
+CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(std::string errorMessage,
+ void* userdata)
+ : mErrorMessage(errorMessage), mUserData(userdata) {}
- CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
- Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata)
- : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
- mPipeline(std::move(pipeline)),
- mCreateComputePipelineAsyncCallback(callback) {
- }
+CreatePipelineAsyncCallbackTaskBase::~CreatePipelineAsyncCallbackTaskBase() = default;
- void CreateComputePipelineAsyncCallbackTask::Finish() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
+ Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata)
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
+ mCreateComputePipelineAsyncCallback(callback) {}
- if (mPipeline.Get() != nullptr) {
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
- ToAPI(mPipeline.Detach()), "", mUserData);
- } else {
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
- mErrorMessage.c_str(), mUserData);
- }
- }
-
- void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
-
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", mUserData);
- }
+CreateComputePipelineAsyncCallbackTask::~CreateComputePipelineAsyncCallbackTask() = default;
- void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+void CreateComputePipelineAsyncCallbackTask::Finish() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "Device lost before callback", mUserData);
+ if (mPipeline.Get() != nullptr) {
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+ ToAPI(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
}
-
- CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
- Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata)
- : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
- mPipeline(std::move(pipeline)),
- mCreateRenderPipelineAsyncCallback(callback) {
+}
+
+void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+}
+
+void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
+}
+
+CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
+ Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata)
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
+ mCreateRenderPipelineAsyncCallback(callback) {}
+
+CreateRenderPipelineAsyncCallbackTask::~CreateRenderPipelineAsyncCallbackTask() = default;
+
+void CreateRenderPipelineAsyncCallbackTask::Finish() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ if (mPipeline.Get() != nullptr) {
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+ ToAPI(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
}
-
- void CreateRenderPipelineAsyncCallbackTask::Finish() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
- if (mPipeline.Get() != nullptr) {
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
- ToAPI(mPipeline.Detach()), "", mUserData);
- } else {
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
- mErrorMessage.c_str(), mUserData);
- }
+}
+
+void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+}
+
+void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
+}
+
+CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
+ Ref<ComputePipelineBase> nonInitializedComputePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata)
+ : mComputePipeline(std::move(nonInitializedComputePipeline)),
+ mCallback(callback),
+ mUserdata(userdata) {
+ ASSERT(mComputePipeline != nullptr);
+}
+
+CreateComputePipelineAsyncTask::~CreateComputePipelineAsyncTask() = default;
+
+void CreateComputePipelineAsyncTask::Run() {
+ const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
+
+ DeviceBase* device = mComputePipeline->GetDevice();
+ TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
+ "CreateComputePipelineAsyncTask::RunAsync", this, "label", eventLabel);
+ TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
+ eventLabel);
+
+ MaybeError maybeError = mComputePipeline->Initialize();
+ std::string errorMessage;
+ if (maybeError.IsError()) {
+ mComputePipeline = nullptr;
+ errorMessage = maybeError.AcquireError()->GetMessage();
}
- void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", mUserData);
+ device->AddComputePipelineAsyncCallbackTask(mComputePipeline, errorMessage, mCallback,
+ mUserdata);
+}
+
+void CreateComputePipelineAsyncTask::RunAsync(
+ std::unique_ptr<CreateComputePipelineAsyncTask> task) {
+ DeviceBase* device = task->mComputePipeline->GetDevice();
+
+ const char* eventLabel = utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
+
+ // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+ // since C++14:
+ // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+ auto asyncTask = [taskPtr = task.release()] {
+ std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
+ innnerTaskPtr->Run();
+ };
+
+ TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+ "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
+ eventLabel);
+ device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+}
+
+CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+ Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata)
+ : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
+ mCallback(callback),
+ mUserdata(userdata) {
+ ASSERT(mRenderPipeline != nullptr);
+}
+
+CreateRenderPipelineAsyncTask::~CreateRenderPipelineAsyncTask() = default;
+
+void CreateRenderPipelineAsyncTask::Run() {
+ const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
+
+ DeviceBase* device = mRenderPipeline->GetDevice();
+ TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::RunAsync",
+ this, "label", eventLabel);
+ TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
+ eventLabel);
+
+ MaybeError maybeError = mRenderPipeline->Initialize();
+ std::string errorMessage;
+ if (maybeError.IsError()) {
+ mRenderPipeline = nullptr;
+ errorMessage = maybeError.AcquireError()->GetMessage();
}
- void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+ device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback, mUserdata);
+}
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "Device lost before callback", mUserData);
- }
+void CreateRenderPipelineAsyncTask::RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
+ DeviceBase* device = task->mRenderPipeline->GetDevice();
- CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
- Ref<ComputePipelineBase> nonInitializedComputePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata)
- : mComputePipeline(std::move(nonInitializedComputePipeline)),
- mCallback(callback),
- mUserdata(userdata) {
- ASSERT(mComputePipeline != nullptr);
- }
+ const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
- void CreateComputePipelineAsyncTask::Run() {
- const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
-
- DeviceBase* device = mComputePipeline->GetDevice();
- TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
- "CreateComputePipelineAsyncTask::RunAsync", this, "label",
- eventLabel);
- TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
- eventLabel);
-
- MaybeError maybeError = mComputePipeline->Initialize();
- std::string errorMessage;
- if (maybeError.IsError()) {
- mComputePipeline = nullptr;
- errorMessage = maybeError.AcquireError()->GetMessage();
- }
-
- device->AddComputePipelineAsyncCallbackTask(mComputePipeline, errorMessage, mCallback,
- mUserdata);
- }
+ // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+ // since C++14:
+ // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+ auto asyncTask = [taskPtr = task.release()] {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
+ innerTaskPtr->Run();
+ };
- void CreateComputePipelineAsyncTask::RunAsync(
- std::unique_ptr<CreateComputePipelineAsyncTask> task) {
- DeviceBase* device = task->mComputePipeline->GetDevice();
-
- const char* eventLabel =
- utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
-
- // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
- // since C++14:
- // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
- auto asyncTask = [taskPtr = task.release()] {
- std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
- innnerTaskPtr->Run();
- };
-
- TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
- "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
- eventLabel);
- device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
- }
-
- CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
- Ref<RenderPipelineBase> nonInitializedRenderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata)
- : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
- mCallback(callback),
- mUserdata(userdata) {
- ASSERT(mRenderPipeline != nullptr);
- }
-
- void CreateRenderPipelineAsyncTask::Run() {
- const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
-
- DeviceBase* device = mRenderPipeline->GetDevice();
- TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
- "CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
- TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
- eventLabel);
-
- MaybeError maybeError = mRenderPipeline->Initialize();
- std::string errorMessage;
- if (maybeError.IsError()) {
- mRenderPipeline = nullptr;
- errorMessage = maybeError.AcquireError()->GetMessage();
- }
-
- device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback,
- mUserdata);
- }
-
- void CreateRenderPipelineAsyncTask::RunAsync(
- std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
- DeviceBase* device = task->mRenderPipeline->GetDevice();
-
- const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
-
- // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
- // since C++14:
- // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
- auto asyncTask = [taskPtr = task.release()] {
- std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
- innerTaskPtr->Run();
- };
-
- TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
- "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
- eventLabel);
- device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
- }
+ TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+ "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
+ eventLabel);
+ device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h
index 28497264c0b..f461c1d1806 100644
--- a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h
+++ b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_CREATEPIPELINEASYNCTASK_H_
#define SRC_DAWN_NATIVE_CREATEPIPELINEASYNCTASK_H_
+#include <memory>
+#include <string>
+
#include "dawn/common/RefCounted.h"
#include "dawn/native/CallbackTaskManager.h"
#include "dawn/native/Error.h"
@@ -22,86 +25,91 @@
namespace dawn::native {
- class ComputePipelineBase;
- class DeviceBase;
- class PipelineLayoutBase;
- class RenderPipelineBase;
- class ShaderModuleBase;
- struct FlatComputePipelineDescriptor;
-
- struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
- CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
-
- protected:
- std::string mErrorMessage;
- void* mUserData;
- };
-
- struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
- CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() override;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- protected:
- Ref<ComputePipelineBase> mPipeline;
- WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
- };
-
- struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
- CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() override;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- protected:
- Ref<RenderPipelineBase> mPipeline;
- WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
- };
-
- // CreateComputePipelineAsyncTask defines all the inputs and outputs of
- // CreateComputePipelineAsync() tasks, which are the same among all the backends.
- class CreateComputePipelineAsyncTask {
- public:
- CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Run();
-
- static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
-
- private:
- Ref<ComputePipelineBase> mComputePipeline;
- WGPUCreateComputePipelineAsyncCallback mCallback;
- void* mUserdata;
- };
-
- // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
- // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
- class CreateRenderPipelineAsyncTask {
- public:
- CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void Run();
-
- static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
-
- private:
- Ref<RenderPipelineBase> mRenderPipeline;
- WGPUCreateRenderPipelineAsyncCallback mCallback;
- void* mUserdata;
- };
+class ComputePipelineBase;
+class DeviceBase;
+class PipelineLayoutBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
+struct FlatComputePipelineDescriptor;
+
+struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
+ CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
+ ~CreatePipelineAsyncCallbackTaskBase() override;
+
+ protected:
+ std::string mErrorMessage;
+ void* mUserData;
+};
+
+struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+ CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ~CreateComputePipelineAsyncCallbackTask() override;
+
+ void Finish() override;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ protected:
+ Ref<ComputePipelineBase> mPipeline;
+ WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
+};
+
+struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+ CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ ~CreateRenderPipelineAsyncCallbackTask() override;
+
+ void Finish() override;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ protected:
+ Ref<RenderPipelineBase> mPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
+};
+
+// CreateComputePipelineAsyncTask defines all the inputs and outputs of
+// CreateComputePipelineAsync() tasks, which are the same among all the backends.
+class CreateComputePipelineAsyncTask {
+ public:
+ CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ~CreateComputePipelineAsyncTask();
+
+ void Run();
+
+ static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
+
+ private:
+ Ref<ComputePipelineBase> mComputePipeline;
+ WGPUCreateComputePipelineAsyncCallback mCallback;
+ void* mUserdata;
+};
+
+// CreateRenderPipelineAsyncTask defines all the inputs and outputs of
+// CreateRenderPipelineAsync() tasks, which are the same among all the backends.
+class CreateRenderPipelineAsyncTask {
+ public:
+ CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ ~CreateRenderPipelineAsyncTask();
+
+ void Run();
+
+ static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
+
+ private:
+ Ref<RenderPipelineBase> mRenderPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCallback;
+ void* mUserdata;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp
index 136685c0f19..7eaf0c72ccc 100644
--- a/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/DawnNative.h"
+#include <vector>
+
#include "dawn/common/Log.h"
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/Buffer.h"
@@ -26,288 +28,294 @@
namespace dawn::native {
- namespace {
- struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
- explicit ComboDeprecatedDawnDeviceDescriptor(
- const DawnDeviceDescriptor* deviceDescriptor) {
- dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
- "WGPUDeviceDescriptor instead.";
-
- DeviceDescriptor* desc = this;
-
- if (deviceDescriptor != nullptr) {
- desc->nextInChain = &mTogglesDesc;
- mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
- mTogglesDesc.forceEnabledTogglesCount =
- deviceDescriptor->forceEnabledToggles.size();
- mTogglesDesc.forceDisabledToggles =
- deviceDescriptor->forceDisabledToggles.data();
- mTogglesDesc.forceDisabledTogglesCount =
- deviceDescriptor->forceDisabledToggles.size();
-
- desc->requiredLimits =
- reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
-
- FeaturesInfo featuresInfo;
- for (const char* featureStr : deviceDescriptor->requiredFeatures) {
- mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
- }
- desc->requiredFeatures = mRequiredFeatures.data();
- desc->requiredFeaturesCount = mRequiredFeatures.size();
- }
- }
-
- DawnTogglesDeviceDescriptor mTogglesDesc = {};
- std::vector<wgpu::FeatureName> mRequiredFeatures = {};
- };
- } // namespace
-
- const DawnProcTable& GetProcsAutogen();
-
- const DawnProcTable& GetProcs() {
- return GetProcsAutogen();
- }
-
- std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
- return FromAPI(device)->GetTogglesUsed();
- }
-
- // Adapter
+namespace {
+struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
+ explicit ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
+ dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
+ "WGPUDeviceDescriptor instead.";
- Adapter::Adapter() = default;
+ DeviceDescriptor* desc = this;
- Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
- if (mImpl != nullptr) {
- mImpl->Reference();
- }
- }
-
- Adapter::~Adapter() {
- if (mImpl != nullptr) {
- mImpl->Release();
- }
- mImpl = nullptr;
- }
+ if (deviceDescriptor != nullptr) {
+ desc->nextInChain = &mTogglesDesc;
+ mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
+ mTogglesDesc.forceEnabledTogglesCount = deviceDescriptor->forceEnabledToggles.size();
+ mTogglesDesc.forceDisabledToggles = deviceDescriptor->forceDisabledToggles.data();
+ mTogglesDesc.forceDisabledTogglesCount = deviceDescriptor->forceDisabledToggles.size();
- Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {
- }
+ desc->requiredLimits =
+ reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
- Adapter& Adapter::operator=(const Adapter& other) {
- if (this != &other) {
- if (mImpl) {
- mImpl->Release();
- }
- mImpl = other.mImpl;
- if (mImpl) {
- mImpl->Reference();
+ FeaturesInfo featuresInfo;
+ for (const char* featureStr : deviceDescriptor->requiredFeatures) {
+ mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
}
+ desc->requiredFeatures = mRequiredFeatures.data();
+ desc->requiredFeaturesCount = mRequiredFeatures.size();
}
- return *this;
}
- void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
- GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
- }
+ DawnTogglesDeviceDescriptor mTogglesDesc = {};
+ std::vector<wgpu::FeatureName> mRequiredFeatures = {};
+};
+} // namespace
- void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
- mImpl->APIGetProperties(FromAPI(properties));
- }
+const DawnProcTable& GetProcsAutogen();
- WGPUAdapter Adapter::Get() const {
- return ToAPI(mImpl);
- }
+const DawnProcTable& GetProcs() {
+ return GetProcsAutogen();
+}
- std::vector<const char*> Adapter::GetSupportedFeatures() const {
- FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
- return supportedFeaturesSet.GetEnabledFeatureNames();
- }
+std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
+ return FromAPI(device)->GetTogglesUsed();
+}
- WGPUDeviceProperties Adapter::GetAdapterProperties() const {
- return mImpl->GetAdapterProperties();
- }
+// DawnDeviceDescriptor
- bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
- return mImpl->GetLimits(FromAPI(limits));
- }
+DawnDeviceDescriptor::DawnDeviceDescriptor() = default;
- void Adapter::SetUseTieredLimits(bool useTieredLimits) {
- mImpl->SetUseTieredLimits(useTieredLimits);
- }
+DawnDeviceDescriptor::~DawnDeviceDescriptor() = default;
- bool Adapter::SupportsExternalImages() const {
- return mImpl->SupportsExternalImages();
- }
-
- Adapter::operator bool() const {
- return mImpl != nullptr;
- }
-
- WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
- ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
- return ToAPI(mImpl->APICreateDevice(&desc));
- }
-
- WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
- return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
- }
+// Adapter
- WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
- return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
- }
+Adapter::Adapter() = default;
- void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
- mImpl->APIRequestDevice(&desc, callback, userdata);
+Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
+ if (mImpl != nullptr) {
+ mImpl->Reference();
}
+}
- void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
- userdata);
+Adapter::~Adapter() {
+ if (mImpl != nullptr) {
+ mImpl->Release();
}
+ mImpl = nullptr;
+}
- void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
- userdata);
- }
+Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {}
- void Adapter::ResetInternalDeviceForTesting() {
- mImpl->ResetInternalDeviceForTesting();
+Adapter& Adapter::operator=(const Adapter& other) {
+ if (this != &other) {
+ if (mImpl) {
+ mImpl->Release();
+ }
+ mImpl = other.mImpl;
+ if (mImpl) {
+ mImpl->Reference();
+ }
}
-
- // AdapterDiscoverOptionsBase
-
- AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
- : backendType(type) {
+ return *this;
+}
+
+void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
+ GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
+}
+
+void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+ mImpl->APIGetProperties(FromAPI(properties));
+}
+
+WGPUAdapter Adapter::Get() const {
+ return ToAPI(mImpl);
+}
+
+std::vector<const char*> Adapter::GetSupportedFeatures() const {
+ FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
+ return supportedFeaturesSet.GetEnabledFeatureNames();
+}
+
+WGPUDeviceProperties Adapter::GetAdapterProperties() const {
+ return mImpl->GetAdapterProperties();
+}
+
+bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+ return mImpl->GetLimits(FromAPI(limits));
+}
+
+void Adapter::SetUseTieredLimits(bool useTieredLimits) {
+ mImpl->SetUseTieredLimits(useTieredLimits);
+}
+
+bool Adapter::SupportsExternalImages() const {
+ return mImpl->SupportsExternalImages();
+}
+
+Adapter::operator bool() const {
+ return mImpl != nullptr;
+}
+
+WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
+ ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
+ return ToAPI(mImpl->APICreateDevice(&desc));
+}
+
+WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
+ return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
+}
+
+WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
+ return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
+}
+
+void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
+ mImpl->APIRequestDevice(&desc, callback, userdata);
+}
+
+void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+ userdata);
+}
+
+void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+ userdata);
+}
+
+void Adapter::ResetInternalDeviceForTesting() {
+ mImpl->ResetInternalDeviceForTesting();
+}
+
+// AdapterDiscoverOptionsBase
+
+AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
+ : backendType(type) {}
+
+// Instance
+
+Instance::Instance(const WGPUInstanceDescriptor* desc)
+ : mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {}
+
+Instance::~Instance() {
+ if (mImpl != nullptr) {
+ mImpl->Release();
+ mImpl = nullptr;
}
+}
- // Instance
+void Instance::DiscoverDefaultAdapters() {
+ mImpl->DiscoverDefaultAdapters();
+}
- Instance::Instance(const WGPUInstanceDescriptor* desc)
- : mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {
- }
+bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+ return mImpl->DiscoverAdapters(options);
+}
- Instance::~Instance() {
- if (mImpl != nullptr) {
- mImpl->Release();
- mImpl = nullptr;
- }
+std::vector<Adapter> Instance::GetAdapters() const {
+ // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
+ std::vector<Adapter> adapters;
+ for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
+ adapters.push_back(Adapter(adapter.Get()));
}
+ return adapters;
+}
- void Instance::DiscoverDefaultAdapters() {
- mImpl->DiscoverDefaultAdapters();
- }
+const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
+ return mImpl->GetToggleInfo(toggleName);
+}
- bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
- return mImpl->DiscoverAdapters(options);
- }
+const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
+ return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
+}
- std::vector<Adapter> Instance::GetAdapters() const {
- // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
- std::vector<Adapter> adapters;
- for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
- adapters.push_back(Adapter(adapter.Get()));
- }
- return adapters;
+void Instance::EnableBackendValidation(bool enableBackendValidation) {
+ if (enableBackendValidation) {
+ mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
}
+}
- const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
- return mImpl->GetToggleInfo(toggleName);
- }
+void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
+ mImpl->SetBackendValidationLevel(level);
+}
- const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
- return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
- }
+void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+ mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
+}
- void Instance::EnableBackendValidation(bool enableBackendValidation) {
- if (enableBackendValidation) {
- mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
- }
- }
+// TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
+void Instance::SetPlatform(dawn::platform::Platform* platform) {
+ mImpl->SetPlatform(platform);
+}
- void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
- mImpl->SetBackendValidationLevel(level);
- }
+uint64_t Instance::GetDeviceCountForTesting() const {
+ return mImpl->GetDeviceCountForTesting();
+}
- void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
- mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
- }
+WGPUInstance Instance::Get() const {
+ return ToAPI(mImpl);
+}
- void Instance::SetPlatform(dawn::platform::Platform* platform) {
- mImpl->SetPlatform(platform);
- }
+size_t GetLazyClearCountForTesting(WGPUDevice device) {
+ return FromAPI(device)->GetLazyClearCountForTesting();
+}
- WGPUInstance Instance::Get() const {
- return ToAPI(mImpl);
- }
+size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
+ return FromAPI(device)->GetDeprecationWarningCountForTesting();
+}
- size_t GetLazyClearCountForTesting(WGPUDevice device) {
- return FromAPI(device)->GetLazyClearCountForTesting();
- }
+bool IsTextureSubresourceInitialized(WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ WGPUTextureAspect cAspect) {
+ TextureBase* textureBase = FromAPI(texture);
- size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
- return FromAPI(device)->GetDeprecationWarningCountForTesting();
- }
+ Aspect aspect =
+ ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
+ SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
+ return textureBase->IsSubresourceContentInitialized(range);
+}
- bool IsTextureSubresourceInitialized(WGPUTexture texture,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- WGPUTextureAspect cAspect) {
- TextureBase* textureBase = FromAPI(texture);
-
- Aspect aspect =
- ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
- SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
- return textureBase->IsSubresourceContentInitialized(range);
- }
+std::vector<const char*> GetProcMapNamesForTestingInternal();
- std::vector<const char*> GetProcMapNamesForTestingInternal();
+std::vector<const char*> GetProcMapNamesForTesting() {
+ return GetProcMapNamesForTestingInternal();
+}
- std::vector<const char*> GetProcMapNamesForTesting() {
- return GetProcMapNamesForTestingInternal();
- }
+DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
+ return FromAPI(device)->APITick();
+}
- DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
- return FromAPI(device)->APITick();
- }
+// ExternalImageDescriptor
- // ExternalImageDescriptor
+ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {}
- ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {
- }
+ExternalImageType ExternalImageDescriptor::GetType() const {
+ return mType;
+}
- ExternalImageType ExternalImageDescriptor::GetType() const {
- return mType;
- }
+// ExternalImageExportInfo
- // ExternalImageExportInfo
+ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {}
- ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {
- }
+ExternalImageType ExternalImageExportInfo::GetType() const {
+ return mType;
+}
- ExternalImageType ExternalImageExportInfo::GetType() const {
- return mType;
- }
+bool CheckIsErrorForTesting(void* objectHandle) {
+ return reinterpret_cast<ErrorMonad*>(objectHandle)->IsError();
+}
- const char* GetObjectLabelForTesting(void* objectHandle) {
- ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
- return object->GetLabel().c_str();
- }
+const char* GetObjectLabelForTesting(void* objectHandle) {
+ ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
+ return object->GetLabel().c_str();
+}
- uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
- return FromAPI(buffer)->GetAllocatedSize();
- }
+uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
+ return FromAPI(buffer)->GetAllocatedSize();
+}
- bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
- bool excludePipelineCompatibiltyToken = true;
- return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
- }
+bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
+ bool excludePipelineCompatibiltyToken = true;
+ return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Device.cpp b/chromium/third_party/dawn/src/dawn/native/Device.cpp
index 45c20fed798..55435d810d2 100644
--- a/chromium/third_party/dawn/src/dawn/native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Device.cpp
@@ -14,12 +14,18 @@
#include "dawn/native/Device.h"
+#include <algorithm>
+#include <array>
+#include <mutex>
+#include <unordered_set>
+
#include "dawn/common/Log.h"
#include "dawn/native/Adapter.h"
#include "dawn/native/AsyncTask.h"
#include "dawn/native/AttachmentState.h"
#include "dawn/native/BindGroup.h"
#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/BlobCache.h"
#include "dawn/native/Buffer.h"
#include "dawn/native/ChainUtils_autogen.h"
#include "dawn/native/CommandBuffer.h"
@@ -34,7 +40,7 @@
#include "dawn/native/Instance.h"
#include "dawn/native/InternalPipelineStore.h"
#include "dawn/native/ObjectType_autogen.h"
-#include "dawn/native/PersistentCache.h"
+#include "dawn/native/PipelineCache.h"
#include "dawn/native/QuerySet.h"
#include "dawn/native/Queue.h"
#include "dawn/native/RenderBundleEncoder.h"
@@ -48,256 +54,290 @@
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-#include <array>
-#include <mutex>
-#include <unordered_set>
-
namespace dawn::native {
- // DeviceBase sub-structures
-
- // The caches are unordered_sets of pointers with special hash and compare functions
- // to compare the value of the objects, instead of the pointers.
- template <typename Object>
- using ContentLessObjectCache =
- std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
-
- struct DeviceBase::Caches {
- ~Caches() {
- ASSERT(attachmentStates.empty());
- ASSERT(bindGroupLayouts.empty());
- ASSERT(computePipelines.empty());
- ASSERT(pipelineLayouts.empty());
- ASSERT(renderPipelines.empty());
- ASSERT(samplers.empty());
- ASSERT(shaderModules.empty());
- }
-
- ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
- ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
- ContentLessObjectCache<ComputePipelineBase> computePipelines;
- ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
- ContentLessObjectCache<RenderPipelineBase> renderPipelines;
- ContentLessObjectCache<SamplerBase> samplers;
- ContentLessObjectCache<ShaderModuleBase> shaderModules;
- };
+// DeviceBase sub-structures
+
+// The caches are unordered_sets of pointers with special hash and compare functions
+// to compare the value of the objects, instead of the pointers.
+template <typename Object>
+using ContentLessObjectCache =
+ std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
+
+struct DeviceBase::Caches {
+ ~Caches() {
+ ASSERT(attachmentStates.empty());
+ ASSERT(bindGroupLayouts.empty());
+ ASSERT(computePipelines.empty());
+ ASSERT(pipelineLayouts.empty());
+ ASSERT(renderPipelines.empty());
+ ASSERT(samplers.empty());
+ ASSERT(shaderModules.empty());
+ }
+
+ ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
+ ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
+ ContentLessObjectCache<ComputePipelineBase> computePipelines;
+ ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
+ ContentLessObjectCache<RenderPipelineBase> renderPipelines;
+ ContentLessObjectCache<SamplerBase> samplers;
+ ContentLessObjectCache<ShaderModuleBase> shaderModules;
+};
+
+struct DeviceBase::DeprecationWarnings {
+ std::unordered_set<std::string> emitted;
+ size_t count = 0;
+};
+
+namespace {
+struct LoggingCallbackTask : CallbackTask {
+ public:
+ LoggingCallbackTask() = delete;
+ LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
+ WGPULoggingType loggingType,
+ const char* message,
+ void* userdata)
+ : mCallback(loggingCallback),
+ mLoggingType(loggingType),
+ mMessage(message),
+ mUserdata(userdata) {
+ // Since the Finish() will be called in uncertain future in which time the message
+ // may already disposed, we must keep a local copy in the CallbackTask.
+ }
+
+ void Finish() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
+
+ void HandleShutDown() override {
+ // Do the logging anyway
+ mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+ }
+
+ void HandleDeviceLoss() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
+
+ private:
+ // As all deferred callback tasks will be triggered before modifying the registered
+ // callback or shutting down, we are ensured that callback function and userdata pointer
+ // stored in tasks is valid when triggered.
+ wgpu::LoggingCallback mCallback;
+ WGPULoggingType mLoggingType;
+ std::string mMessage;
+ void* mUserdata;
+};
+
+ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const ComputePipelineDescriptor& descriptor,
+ ComputePipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (outDescriptor->layout == nullptr) {
+ DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+ device, {{
+ SingleShaderStage::Compute,
+ outDescriptor->compute.module,
+ outDescriptor->compute.entryPoint,
+ outDescriptor->compute.constantCount,
+ outDescriptor->compute.constants,
+ }}));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const RenderPipelineDescriptor& descriptor,
+ RenderPipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (descriptor.layout == nullptr) {
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ DAWN_TRY_ASSIGN(layoutRef,
+ PipelineLayoutBase::CreateDefault(
+ device, GetRenderStagesAndSetPlaceholderShader(device, &descriptor)));
+ outDescriptor->layout = layoutRef.Get();
+ }
- struct DeviceBase::DeprecationWarnings {
- std::unordered_set<std::string> emitted;
- size_t count = 0;
- };
+ return layoutRef;
+}
- namespace {
- struct LoggingCallbackTask : CallbackTask {
- public:
- LoggingCallbackTask() = delete;
- LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
- WGPULoggingType loggingType,
- const char* message,
- void* userdata)
- : mCallback(loggingCallback),
- mLoggingType(loggingType),
- mMessage(message),
- mUserdata(userdata) {
- // Since the Finish() will be called in uncertain future in which time the message
- // may already disposed, we must keep a local copy in the CallbackTask.
- }
+} // anonymous namespace
- void Finish() override {
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
+// DeviceBase
- void HandleShutDown() override {
- // Do the logging anyway
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
+DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
+ : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
+ mInstance->IncrementDeviceCountForTesting();
+ ASSERT(descriptor != nullptr);
- void HandleDeviceLoss() override {
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
+ AdapterProperties adapterProperties;
+ adapter->APIGetProperties(&adapterProperties);
- private:
- // As all deferred callback tasks will be triggered before modifying the registered
- // callback or shutting down, we are ensured that callback function and userdata pointer
- // stored in tasks is valid when triggered.
- wgpu::LoggingCallback mCallback;
- WGPULoggingType mLoggingType;
- std::string mMessage;
- void* mUserdata;
- };
+ const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &togglesDesc);
+ if (togglesDesc != nullptr) {
+ ApplyToggleOverrides(togglesDesc);
+ }
- ResultOrError<Ref<PipelineLayoutBase>>
- ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- DeviceBase* device,
- const ComputePipelineDescriptor& descriptor,
- ComputePipelineDescriptor* outDescriptor) {
- Ref<PipelineLayoutBase> layoutRef;
- *outDescriptor = descriptor;
-
- if (outDescriptor->layout == nullptr) {
- DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
- device, {{
- SingleShaderStage::Compute,
- outDescriptor->compute.module,
- outDescriptor->compute.entryPoint,
- outDescriptor->compute.constantCount,
- outDescriptor->compute.constants,
- }}));
- outDescriptor->layout = layoutRef.Get();
- }
+ SetDefaultToggles();
+ ApplyFeatures(descriptor);
- return layoutRef;
- }
+ DawnCacheDeviceDescriptor defaultCacheDesc = {};
+ const DawnCacheDeviceDescriptor* cacheDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &cacheDesc);
+ if (cacheDesc == nullptr) {
+ cacheDesc = &defaultCacheDesc;
+ }
- ResultOrError<Ref<PipelineLayoutBase>>
- ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- DeviceBase* device,
- const RenderPipelineDescriptor& descriptor,
- RenderPipelineDescriptor* outDescriptor) {
- Ref<PipelineLayoutBase> layoutRef;
- *outDescriptor = descriptor;
-
- if (descriptor.layout == nullptr) {
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- DAWN_TRY_ASSIGN(layoutRef,
- PipelineLayoutBase::CreateDefault(
- device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
- outDescriptor->layout = layoutRef.Get();
- }
+ if (descriptor->requiredLimits != nullptr) {
+ mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
+ } else {
+ GetDefaultLimits(&mLimits.v1);
+ }
- return layoutRef;
- }
+ mFormatTable = BuildFormatTable(this);
- } // anonymous namespace
+ if (descriptor->label != nullptr && strlen(descriptor->label) != 0) {
+ mLabel = descriptor->label;
+ }
- // DeviceBase
+ // Record the cache key from the properties. Note that currently, if a new extension
+ // descriptor is added (and probably handled here), the cache key recording needs to be
+ // updated.
+ mDeviceCacheKey.Record(adapterProperties, mEnabledFeatures.featuresBitSet,
+ mEnabledToggles.toggleBitset, cacheDesc);
+}
- DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
- : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
- ASSERT(descriptor != nullptr);
+DeviceBase::DeviceBase() : mState(State::Alive) {
+ mCaches = std::make_unique<DeviceBase::Caches>();
+}
- AdapterProperties adapterProperties;
- adapter->APIGetProperties(&adapterProperties);
+DeviceBase::~DeviceBase() {
+ // We need to explicitly release the Queue before we complete the destructor so that the
+ // Queue does not get destroyed after the Device.
+ mQueue = nullptr;
+ // mInstance is not set for mock test devices.
+ if (mInstance != nullptr) {
+ mInstance->DecrementDeviceCountForTesting();
+ }
+}
- const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
- FindInChain(descriptor->nextInChain, &togglesDesc);
- if (togglesDesc != nullptr) {
- ApplyToggleOverrides(togglesDesc);
- }
- ApplyFeatures(descriptor);
+MaybeError DeviceBase::Initialize(Ref<QueueBase> defaultQueue) {
+ SetWGSLExtensionAllowList();
- DawnCacheDeviceDescriptor defaultCacheDesc = {};
- const DawnCacheDeviceDescriptor* cacheDesc = nullptr;
- FindInChain(descriptor->nextInChain, &cacheDesc);
- if (cacheDesc == nullptr) {
- cacheDesc = &defaultCacheDesc;
- }
+ mQueue = std::move(defaultQueue);
- if (descriptor->requiredLimits != nullptr) {
- mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
- } else {
- GetDefaultLimits(&mLimits.v1);
+#if defined(DAWN_ENABLE_ASSERTS)
+ mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+ "probably not intended. If you really want to ignore errors "
+ "and suppress this message, set the callback to null.";
}
+ };
- mFormatTable = BuildFormatTable(this);
- SetDefaultToggles();
-
- if (descriptor->label != nullptr && strlen(descriptor->label) != 0) {
- mLabel = descriptor->label;
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+ "intended. If you really want to ignore device lost "
+ "and suppress this message, set the callback to null.";
}
+ };
+#endif // DAWN_ENABLE_ASSERTS
- // Record the cache key from the properties. Note that currently, if a new extension
- // descriptor is added (and probably handled here), the cache key recording needs to be
- // updated.
- mDeviceCacheKey.Record(adapterProperties, mEnabledFeatures.featuresBitSet,
- mEnabledToggles.toggleBitset, cacheDesc);
- }
-
- DeviceBase::DeviceBase() : mState(State::Alive) {
- mCaches = std::make_unique<DeviceBase::Caches>();
- }
+ mCaches = std::make_unique<DeviceBase::Caches>();
+ mErrorScopeStack = std::make_unique<ErrorScopeStack>();
+ mDynamicUploader = std::make_unique<DynamicUploader>(this);
+ mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
+ mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
+ mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
- DeviceBase::~DeviceBase() {
- // We need to explicitly release the Queue before we complete the destructor so that the
- // Queue does not get destroyed after the Device.
- mQueue = nullptr;
- }
+ ASSERT(GetPlatform() != nullptr);
+ mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
+ mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
- MaybeError DeviceBase::Initialize(Ref<QueueBase> defaultQueue) {
- mQueue = std::move(defaultQueue);
+ // Starting from now the backend can start doing reentrant calls so the device is marked as
+ // alive.
+ mState = State::Alive;
-#if defined(DAWN_ENABLE_ASSERTS)
- mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
- "probably not intended. If you really want to ignore errors "
- "and suppress this message, set the callback to null.";
- }
- };
-
- mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
- "intended. If you really want to ignore device lost "
- "and suppress this message, set the callback to null.";
- }
- };
-#endif // DAWN_ENABLE_ASSERTS
+ DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
- mCaches = std::make_unique<DeviceBase::Caches>();
- mErrorScopeStack = std::make_unique<ErrorScopeStack>();
- mDynamicUploader = std::make_unique<DynamicUploader>(this);
- mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
- mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
- mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
- mPersistentCache = std::make_unique<PersistentCache>(this);
-
- ASSERT(GetPlatform() != nullptr);
- mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
- mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
-
- // Starting from now the backend can start doing reentrant calls so the device is marked as
- // alive.
- mState = State::Alive;
-
- DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
-
- // If dummy fragment shader module is needed, initialize it
- if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
- // The empty fragment shader, used as a work around for vertex-only render pipeline
- constexpr char kEmptyFragmentShader[] = R"(
- @stage(fragment) fn fs_empty_main() {}
+ // If placeholder fragment shader module is needed, initialize it
+ if (IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
+ // The empty fragment shader, used as a work around for vertex-only render pipeline
+ constexpr char kEmptyFragmentShader[] = R"(
+ @fragment fn fs_empty_main() {}
)";
- ShaderModuleDescriptor descriptor;
- ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = kEmptyFragmentShader;
- descriptor.nextInChain = &wgslDesc;
-
- DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
- CreateShaderModule(&descriptor));
- }
-
- return {};
- }
-
- void DeviceBase::DestroyObjects() {
- // List of object types in reverse "dependency" order so we can iterate and delete the
- // objects safely. We define dependent here such that if B has a ref to A, then B depends on
- // A. We therefore try to destroy B before destroying A. Note that this only considers the
- // immediate frontend dependencies, while backend objects could add complications and extra
- // dependencies.
- //
- // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
- // since AttachmentStates are cached by the device, objects that hold references to
- // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
- // can destroy the frontend cache.
+ ShaderModuleDescriptor descriptor;
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = kEmptyFragmentShader;
+ descriptor.nextInChain = &wgslDesc;
+
+ DAWN_TRY_ASSIGN(mInternalPipelineStore->placeholderFragmentShader,
+ CreateShaderModule(&descriptor));
+ }
+
+ return {};
+}
+
+void DeviceBase::WillDropLastExternalRef() {
+ // DeviceBase uses RefCountedWithExternalCount to break refcycles.
+ //
+ // DeviceBase holds multiple Refs to various API objects (pipelines, buffers, etc.) which are
+ // used to implement various device-level facilities. These objects are cached on the device,
+ // so we want to keep them around instead of making transient allocations. However, many of
+ // the objects also hold a Ref<Device> back to their parent device.
+ //
+ // In order to break this cycle and prevent leaks, when the application drops the last external
+ // ref and WillDropLastExternalRef is called, the device clears out any member refs to API
+ // objects that hold back-refs to the device - thus breaking any reference cycles.
+ //
+ // Currently, this is done by calling Destroy on the device to cease all in-flight work and
+ // drop references to internal objects. We may want to lift this in the future, but it would
+ // make things more complex because there might be pending tasks which hold a ref back to the
+ // device - either directly or indirectly. We would need to ensure those tasks don't create new
+ // reference cycles, and we would need to continuously try draining the pending tasks to clear
+ // out all remaining refs.
+ Destroy();
+
+ // Drop te device's reference to the queue. Because the application dropped the last external
+ // references, they can no longer get the queue from APIGetQueue().
+ mQueue = nullptr;
+
+ // Reset callbacks since after this, since after dropping the last external reference, the
+ // application may have freed any device-scope memory needed to run the callback.
+ mUncapturedErrorCallback = [](WGPUErrorType, char const* message, void*) {
+ dawn::WarningLog() << "Uncaptured error after last external device reference dropped.\n"
+ << message;
+ };
- // clang-format off
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const* message, void*) {
+ dawn::WarningLog() << "Device lost after last external device reference dropped.\n"
+ << message;
+ };
+}
+
+void DeviceBase::DestroyObjects() {
+ // List of object types in reverse "dependency" order so we can iterate and delete the
+ // objects safely. We define dependent here such that if B has a ref to A, then B depends on
+ // A. We therefore try to destroy B before destroying A. Note that this only considers the
+ // immediate frontend dependencies, while backend objects could add complications and extra
+ // dependencies.
+ //
+ // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
+ // since AttachmentStates are cached by the device, objects that hold references to
+ // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
+ // can destroy the frontend cache.
+
+ // clang-format off
static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
ObjectType::ComputePassEncoder,
ObjectType::RenderPassEncoder,
@@ -319,1506 +359,1559 @@ namespace dawn::native {
ObjectType::Sampler,
ObjectType::Buffer,
};
- // clang-format on
-
- // We first move all objects out from the tracking list into a separate list so that we can
- // avoid locking the same mutex twice. We can then iterate across the separate list to call
- // the actual destroy function.
- LinkedList<ApiObjectBase> objects;
- for (ObjectType type : kObjectTypeDependencyOrder) {
- ApiObjectList& objList = mObjectLists[type];
- const std::lock_guard<std::mutex> lock(objList.mutex);
- objList.objects.MoveInto(&objects);
- }
- while (!objects.empty()) {
- // The destroy call should also remove the object from the list.
- objects.head()->value()->Destroy();
- }
- }
-
- void DeviceBase::Destroy() {
- // Skip if we are already destroyed.
- if (mState == State::Destroyed) {
- return;
- }
-
- // Skip handling device facilities if they haven't even been created (or failed doing so)
- if (mState != State::BeingCreated) {
- // The device is being destroyed so it will be lost, call the application callback.
- if (mDeviceLostCallback != nullptr) {
- mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
- mDeviceLostUserdata);
- mDeviceLostCallback = nullptr;
- }
-
- // Call all the callbacks immediately as the device is about to shut down.
- // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
- mAsyncTaskManager->WaitAllPendingTasks();
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->HandleShutDown();
- }
- }
-
- // Disconnect the device, depending on which state we are currently in.
- switch (mState) {
- case State::BeingCreated:
- // The GPU timeline was never started so we don't have to wait.
- break;
-
- case State::Alive:
- // Alive is the only state which can have GPU work happening. Wait for all of it to
- // complete before proceeding with destruction.
- // Ignore errors so that we can continue with destruction
- IgnoreErrors(WaitForIdleForDestruction());
- AssumeCommandsComplete();
- break;
-
- case State::BeingDisconnected:
- // Getting disconnected is a transient state happening in a single API call so there
- // is always an external reference keeping the Device alive, which means the
- // destructor cannot run while BeingDisconnected.
- UNREACHABLE();
- break;
-
- case State::Disconnected:
- break;
-
- case State::Destroyed:
- // If we are already destroyed we should've skipped this work entirely.
- UNREACHABLE();
- break;
- }
- ASSERT(mCompletedSerial == mLastSubmittedSerial);
- ASSERT(mFutureSerial <= mCompletedSerial);
-
- if (mState != State::BeingCreated) {
- // The GPU timeline is finished.
- // Finish destroying all objects owned by the device and tick the queue-related tasks
- // since they should be complete. This must be done before DestroyImpl() it may
- // relinquish resources that will be freed by backends in the DestroyImpl() call.
- DestroyObjects();
- mQueue->Tick(GetCompletedCommandSerial());
- // Call TickImpl once last time to clean up resources
- // Ignore errors so that we can continue with destruction
- IgnoreErrors(TickImpl());
- }
-
- // At this point GPU operations are always finished, so we are in the disconnected state.
- // Note that currently this state change is required because some of the backend
- // implementations of DestroyImpl checks that we are disconnected before doing work.
- mState = State::Disconnected;
-
- mDynamicUploader = nullptr;
- mCallbackTaskManager = nullptr;
- mAsyncTaskManager = nullptr;
- mPersistentCache = nullptr;
- mEmptyBindGroupLayout = nullptr;
- mInternalPipelineStore = nullptr;
- mExternalTextureDummyView = nullptr;
-
- AssumeCommandsComplete();
-
- // Now that the GPU timeline is empty, destroy the backend device.
- DestroyImpl();
-
- mCaches = nullptr;
- mState = State::Destroyed;
- }
-
- void DeviceBase::APIDestroy() {
- Destroy();
- }
-
- void DeviceBase::HandleError(InternalErrorType type, const char* message) {
- if (type == InternalErrorType::DeviceLost) {
- mState = State::Disconnected;
-
- // If the ErrorInjector is enabled, then the device loss might be fake and the device
- // still be executing commands. Force a wait for idle in this case, with State being
- // Disconnected so we can detect this case in WaitForIdleForDestruction.
- if (ErrorInjectorEnabled()) {
- IgnoreErrors(WaitForIdleForDestruction());
- }
-
- // A real device lost happened. Set the state to disconnected as the device cannot be
- // used. Also tags all commands as completed since the device stopped running.
- AssumeCommandsComplete();
- } else if (type == InternalErrorType::Internal) {
- // If we receive an internal error, assume the backend can't recover and proceed with
- // device destruction. We first wait for all previous commands to be completed so that
- // backend objects can be freed immediately, before handling the loss.
-
- // Move away from the Alive state so that the application cannot use this device
- // anymore.
- // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
- // threads in a multithreaded scenario?
- mState = State::BeingDisconnected;
-
+ // clang-format on
+
+ // We first move all objects out from the tracking list into a separate list so that we can
+ // avoid locking the same mutex twice. We can then iterate across the separate list to call
+ // the actual destroy function.
+ LinkedList<ApiObjectBase> objects;
+ for (ObjectType type : kObjectTypeDependencyOrder) {
+ ApiObjectList& objList = mObjectLists[type];
+ const std::lock_guard<std::mutex> lock(objList.mutex);
+ objList.objects.MoveInto(&objects);
+ }
+ while (!objects.empty()) {
+ // The destroy call should also remove the object from the list.
+ objects.head()->value()->Destroy();
+ }
+}
+
+void DeviceBase::Destroy() {
+ // Skip if we are already destroyed.
+ if (mState == State::Destroyed) {
+ return;
+ }
+
+ // This function may be called re-entrantly inside APITick(). Tick triggers callbacks
+ // inside which the application may destroy the device. Thus, we should be careful not
+ // to delete objects that are needed inside Tick after callbacks have been called.
+ // - mCallbackTaskManager is not deleted since we flush the callback queue at the end
+ // of Tick(). Note: that flush should always be emtpy since all callbacks are drained
+ // inside Destroy() so there should be no outstanding tasks holding objects alive.
+ // - Similiarly, mAsyncTaskManager is not deleted since we use it to return a status
+ // from Tick() whether or not there is any more pending work.
+
+ // Skip handling device facilities if they haven't even been created (or failed doing so)
+ if (mState != State::BeingCreated) {
+ // The device is being destroyed so it will be lost, call the application callback.
+ if (mDeviceLostCallback != nullptr) {
+ mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
+ mDeviceLostUserdata);
+ mDeviceLostCallback = nullptr;
+ }
+
+ // Call all the callbacks immediately as the device is about to shut down.
+ // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+ mAsyncTaskManager->WaitAllPendingTasks();
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleShutDown();
+ }
+ }
+
+ // Disconnect the device, depending on which state we are currently in.
+ switch (mState) {
+ case State::BeingCreated:
+ // The GPU timeline was never started so we don't have to wait.
+ break;
+
+ case State::Alive:
+ // Alive is the only state which can have GPU work happening. Wait for all of it to
+ // complete before proceeding with destruction.
// Ignore errors so that we can continue with destruction
- // Assume all commands are complete after WaitForIdleForDestruction (because they were)
IgnoreErrors(WaitForIdleForDestruction());
- IgnoreErrors(TickImpl());
AssumeCommandsComplete();
- ASSERT(mFutureSerial <= mCompletedSerial);
- mState = State::Disconnected;
-
- // Now everything is as if the device was lost.
- type = InternalErrorType::DeviceLost;
- }
-
- if (type == InternalErrorType::DeviceLost) {
- // The device was lost, call the application callback.
- if (mDeviceLostCallback != nullptr) {
- mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
- mDeviceLostCallback = nullptr;
- }
-
- mQueue->HandleDeviceLoss();
-
- // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
- mAsyncTaskManager->WaitAllPendingTasks();
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->HandleDeviceLoss();
- }
+ break;
+
+ case State::BeingDisconnected:
+ // Getting disconnected is a transient state happening in a single API call so there
+ // is always an external reference keeping the Device alive, which means the
+ // destructor cannot run while BeingDisconnected.
+ UNREACHABLE();
+ break;
+
+ case State::Disconnected:
+ break;
+
+ case State::Destroyed:
+ // If we are already destroyed we should've skipped this work entirely.
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(mCompletedSerial == mLastSubmittedSerial);
+ ASSERT(mFutureSerial <= mCompletedSerial);
+
+ if (mState != State::BeingCreated) {
+ // The GPU timeline is finished.
+ // Finish destroying all objects owned by the device and tick the queue-related tasks
+ // since they should be complete. This must be done before DestroyImpl() it may
+ // relinquish resources that will be freed by backends in the DestroyImpl() call.
+ DestroyObjects();
+ mQueue->Tick(GetCompletedCommandSerial());
+ // Call TickImpl once last time to clean up resources
+ // Ignore errors so that we can continue with destruction
+ IgnoreErrors(TickImpl());
+ }
+
+ // At this point GPU operations are always finished, so we are in the disconnected state.
+ // Note that currently this state change is required because some of the backend
+ // implementations of DestroyImpl checks that we are disconnected before doing work.
+ mState = State::Disconnected;
+
+ // Note: mQueue is not released here since the application may still get it after calling
+ // Destroy() via APIGetQueue.
+ mDynamicUploader = nullptr;
+ mEmptyBindGroupLayout = nullptr;
+ mInternalPipelineStore = nullptr;
+ mExternalTexturePlaceholderView = nullptr;
+
+ AssumeCommandsComplete();
+
+ // Now that the GPU timeline is empty, destroy the backend device.
+ DestroyImpl();
+
+ mCaches = nullptr;
+ mState = State::Destroyed;
+}
+
+void DeviceBase::APIDestroy() {
+ Destroy();
+}
+
+void DeviceBase::HandleError(InternalErrorType type, const char* message) {
+ if (type == InternalErrorType::DeviceLost) {
+ mState = State::Disconnected;
- // Still forward device loss errors to the error scopes so they all reject.
- mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
- } else {
- // Pass the error to the error scope stack and call the uncaptured error callback
- // if it isn't handled. DeviceLost is not handled here because it should be
- // handled by the lost callback.
- bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
- if (!captured && mUncapturedErrorCallback != nullptr) {
- mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
- mUncapturedErrorUserdata);
- }
+ // If the ErrorInjector is enabled, then the device loss might be fake and the device
+ // still be executing commands. Force a wait for idle in this case, with State being
+ // Disconnected so we can detect this case in WaitForIdleForDestruction.
+ if (ErrorInjectorEnabled()) {
+ IgnoreErrors(WaitForIdleForDestruction());
}
- }
- void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
- ASSERT(error != nullptr);
- HandleError(error->GetType(), error->GetFormattedMessage().c_str());
- }
-
- void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mLoggingCallback = callback;
- mLoggingUserdata = userdata;
- }
-
- void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mUncapturedErrorCallback = callback;
- mUncapturedErrorUserdata = userdata;
- }
-
- void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mDeviceLostCallback = callback;
- mDeviceLostUserdata = userdata;
- }
-
- void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
- if (ConsumedError(ValidateErrorFilter(filter))) {
- return;
- }
- mErrorScopeStack->Push(filter);
- }
+ // A real device lost happened. Set the state to disconnected as the device cannot be
+ // used. Also tags all commands as completed since the device stopped running.
+ AssumeCommandsComplete();
+ } else if (type == InternalErrorType::Internal) {
+ // If we receive an internal error, assume the backend can't recover and proceed with
+ // device destruction. We first wait for all previous commands to be completed so that
+ // backend objects can be freed immediately, before handling the loss.
+
+ // Move away from the Alive state so that the application cannot use this device
+ // anymore.
+ // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
+ // threads in a multithreaded scenario?
+ mState = State::BeingDisconnected;
+
+ // Ignore errors so that we can continue with destruction
+ // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+ IgnoreErrors(WaitForIdleForDestruction());
+ IgnoreErrors(TickImpl());
+ AssumeCommandsComplete();
+ ASSERT(mFutureSerial <= mCompletedSerial);
+ mState = State::Disconnected;
- bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
- // TODO(crbug.com/dawn/1324) Remove return and make function void when users are updated.
- bool returnValue = true;
- if (callback == nullptr) {
- static wgpu::ErrorCallback defaultCallback = [](WGPUErrorType, char const*, void*) {};
- callback = defaultCallback;
- }
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- if (IsLost()) {
- callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
- return returnValue;
- }
- if (mErrorScopeStack->Empty()) {
- callback(WGPUErrorType_Unknown, "No error scopes to pop", userdata);
- return returnValue;
- }
- ErrorScope scope = mErrorScopeStack->Pop();
- callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
- userdata);
+ // Now everything is as if the device was lost.
+ type = InternalErrorType::DeviceLost;
+ }
+
+ if (type == InternalErrorType::DeviceLost) {
+ // The device was lost, call the application callback.
+ if (mDeviceLostCallback != nullptr) {
+ mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
+ mDeviceLostCallback = nullptr;
+ }
+
+ mQueue->HandleDeviceLoss();
+
+ // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+ mAsyncTaskManager->WaitAllPendingTasks();
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleDeviceLoss();
+ }
+
+ // Still forward device loss errors to the error scopes so they all reject.
+ mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+ } else {
+ // Pass the error to the error scope stack and call the uncaptured error callback
+ // if it isn't handled. DeviceLost is not handled here because it should be
+ // handled by the lost callback.
+ bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+ if (!captured && mUncapturedErrorCallback != nullptr) {
+ mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
+ mUncapturedErrorUserdata);
+ }
+ }
+}
+
+void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
+ ASSERT(error != nullptr);
+ AppendDebugLayerMessages(error.get());
+ HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+}
+
+void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mLoggingCallback = callback;
+ mLoggingUserdata = userdata;
+}
+
+void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mUncapturedErrorCallback = callback;
+ mUncapturedErrorUserdata = userdata;
+}
+
+void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+}
+
+void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
+ if (ConsumedError(ValidateErrorFilter(filter))) {
+ return;
+ }
+ mErrorScopeStack->Push(filter);
+}
+
+bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
+ // TODO(crbug.com/dawn/1324) Remove return and make function void when users are updated.
+ bool returnValue = true;
+ if (callback == nullptr) {
+ static wgpu::ErrorCallback defaultCallback = [](WGPUErrorType, char const*, void*) {};
+ callback = defaultCallback;
+ }
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ if (IsLost()) {
+ callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
return returnValue;
}
-
- PersistentCache* DeviceBase::GetPersistentCache() {
- ASSERT(mPersistentCache.get() != nullptr);
- return mPersistentCache.get();
- }
-
- MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
- ASSERT(object != nullptr);
- DAWN_INVALID_IF(object->GetDevice() != this,
- "%s is associated with %s, and cannot be used with %s.", object,
- object->GetDevice(), this);
-
- // TODO(dawn:563): Preserve labels for error objects.
- DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
-
- return {};
- }
-
- MaybeError DeviceBase::ValidateIsAlive() const {
- DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
- return {};
- }
-
- void DeviceBase::APILoseForTesting() {
- if (mState != State::Alive) {
- return;
- }
-
- HandleError(InternalErrorType::Internal, "Device lost for testing");
- }
-
- DeviceBase::State DeviceBase::GetState() const {
- return mState;
- }
-
- bool DeviceBase::IsLost() const {
- ASSERT(mState != State::BeingCreated);
- return mState != State::Alive;
- }
-
- void DeviceBase::TrackObject(ApiObjectBase* object) {
- ApiObjectList& objectList = mObjectLists[object->GetType()];
- std::lock_guard<std::mutex> lock(objectList.mutex);
- object->InsertBefore(objectList.objects.head());
- }
-
- std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
- return &mObjectLists[type].mutex;
- }
-
- AdapterBase* DeviceBase::GetAdapter() const {
- return mAdapter;
- }
-
- dawn::platform::Platform* DeviceBase::GetPlatform() const {
- return GetAdapter()->GetInstance()->GetPlatform();
- }
-
- ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
- return mCompletedSerial;
- }
-
- ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
- return mLastSubmittedSerial;
- }
-
- ExecutionSerial DeviceBase::GetFutureSerial() const {
- return mFutureSerial;
- }
-
- InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
- return mInternalPipelineStore.get();
+ if (mErrorScopeStack->Empty()) {
+ callback(WGPUErrorType_Unknown, "No error scopes to pop", userdata);
+ return returnValue;
}
+ ErrorScope scope = mErrorScopeStack->Pop();
+ callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(), userdata);
+ return returnValue;
+}
- void DeviceBase::IncrementLastSubmittedCommandSerial() {
- mLastSubmittedSerial++;
+BlobCache* DeviceBase::GetBlobCache() {
+ if (IsToggleEnabled(Toggle::EnableBlobCache)) {
+ return mInstance->GetBlobCache();
}
+ return nullptr;
+}
- void DeviceBase::AssumeCommandsComplete() {
- ExecutionSerial maxSerial =
- ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
- mLastSubmittedSerial = maxSerial;
- mCompletedSerial = maxSerial;
+Blob DeviceBase::LoadCachedBlob(const CacheKey& key) {
+ BlobCache* blobCache = GetBlobCache();
+ if (!blobCache) {
+ return Blob();
}
+ return blobCache->Load(key);
+}
- bool DeviceBase::IsDeviceIdle() {
- if (mAsyncTaskManager->HasPendingTasks()) {
- return false;
- }
-
- ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
- if (mCompletedSerial == maxSerial) {
- return true;
+void DeviceBase::StoreCachedBlob(const CacheKey& key, const Blob& blob) {
+ if (!blob.Empty()) {
+ BlobCache* blobCache = GetBlobCache();
+ if (blobCache) {
+ blobCache->Store(key, blob);
}
- return false;
- }
-
- ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
- return mLastSubmittedSerial + ExecutionSerial(1);
}
+}
- void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
- if (serial > mFutureSerial) {
- mFutureSerial = serial;
- }
- }
+MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
+ ASSERT(object != nullptr);
+ DAWN_INVALID_IF(object->GetDevice() != this,
+ "%s is associated with %s, and cannot be used with %s.", object,
+ object->GetDevice(), this);
- MaybeError DeviceBase::CheckPassedSerials() {
- ExecutionSerial completedSerial;
- DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
+ // TODO(dawn:563): Preserve labels for error objects.
+ DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
- ASSERT(completedSerial <= mLastSubmittedSerial);
- // completedSerial should not be less than mCompletedSerial unless it is 0.
- // It can be 0 when there's no fences to check.
- ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
+ return {};
+}
- if (completedSerial > mCompletedSerial) {
- mCompletedSerial = completedSerial;
- }
+MaybeError DeviceBase::ValidateIsAlive() const {
+ DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
+ return {};
+}
- return {};
+void DeviceBase::APILoseForTesting() {
+ if (mState != State::Alive) {
+ return;
}
- ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
- FormatIndex index = ComputeFormatIndex(format);
- DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
+ HandleError(InternalErrorType::Internal, "Device lost for testing");
+}
- const Format* internalFormat = &mFormatTable[index];
- DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
+DeviceBase::State DeviceBase::GetState() const {
+ return mState;
+}
- return internalFormat;
- }
+bool DeviceBase::IsLost() const {
+ ASSERT(mState != State::BeingCreated);
+ return mState != State::Alive;
+}
- const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
- FormatIndex index = ComputeFormatIndex(format);
- ASSERT(index < mFormatTable.size());
- ASSERT(mFormatTable[index].isSupported);
- return mFormatTable[index];
- }
+void DeviceBase::TrackObject(ApiObjectBase* object) {
+ ApiObjectList& objectList = mObjectLists[object->GetType()];
+ std::lock_guard<std::mutex> lock(objectList.mutex);
+ object->InsertBefore(objectList.objects.head());
+}
- const Format& DeviceBase::GetValidInternalFormat(FormatIndex index) const {
- ASSERT(index < mFormatTable.size());
- ASSERT(mFormatTable[index].isSupported);
- return mFormatTable[index];
- }
+std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
+ return &mObjectLists[type].mutex;
+}
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
- ApiObjectBase::kUntrackedByDevice);
+AdapterBase* DeviceBase::GetAdapter() const {
+ return mAdapter;
+}
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
+dawn::platform::Platform* DeviceBase::GetPlatform() const {
+ return GetAdapter()->GetInstance()->GetPlatform();
+}
- Ref<BindGroupLayoutBase> result;
- auto iter = mCaches->bindGroupLayouts.find(&blueprint);
- if (iter != mCaches->bindGroupLayouts.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result,
- CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->bindGroupLayouts.insert(result.Get());
- }
+ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
+ return mCompletedSerial;
+}
- return std::move(result);
- }
+ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
+ return mLastSubmittedSerial;
+}
- void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- // Private function used at initialization
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
- BindGroupLayoutDescriptor desc = {};
- desc.entryCount = 0;
- desc.entries = nullptr;
-
- return GetOrCreateBindGroupLayout(&desc);
- }
+ExecutionSerial DeviceBase::GetFutureSerial() const {
+ return mFutureSerial;
+}
- BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
- ASSERT(mEmptyBindGroupLayout != nullptr);
- return mEmptyBindGroupLayout.Get();
- }
+InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
+ return mInternalPipelineStore.get();
+}
- Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
- ComputePipelineBase* uninitializedComputePipeline) {
- Ref<ComputePipelineBase> cachedPipeline;
- auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
- if (iter != mCaches->computePipelines.end()) {
- cachedPipeline = *iter;
- }
-
- return cachedPipeline;
- }
-
- Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
- RenderPipelineBase* uninitializedRenderPipeline) {
- Ref<RenderPipelineBase> cachedPipeline;
- auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
- if (iter != mCaches->renderPipelines.end()) {
- cachedPipeline = *iter;
- }
- return cachedPipeline;
- }
+void DeviceBase::IncrementLastSubmittedCommandSerial() {
+ mLastSubmittedSerial++;
+}
- Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
- Ref<ComputePipelineBase> computePipeline) {
- auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
- if (inserted) {
- computePipeline->SetIsCachedReference();
- return computePipeline;
- } else {
- return *cachedPipeline;
- }
- }
+void DeviceBase::AssumeCommandsComplete() {
+ ExecutionSerial maxSerial =
+ ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
+ mLastSubmittedSerial = maxSerial;
+ mCompletedSerial = maxSerial;
+}
- Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
- Ref<RenderPipelineBase> renderPipeline) {
- auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
- if (inserted) {
- renderPipeline->SetIsCachedReference();
- return renderPipeline;
- } else {
- return *cachedPipeline;
- }
+bool DeviceBase::IsDeviceIdle() {
+ if (mAsyncTaskManager->HasPendingTasks()) {
+ return false;
}
- void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->computePipelines.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- ResultOrError<Ref<TextureViewBase>>
- DeviceBase::GetOrCreateDummyTextureViewForExternalTexture() {
- if (!mExternalTextureDummyView.Get()) {
- Ref<TextureBase> externalTextureDummy;
- TextureDescriptor textureDesc;
- textureDesc.dimension = wgpu::TextureDimension::e2D;
- textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
- textureDesc.label = "Dawn_External_Texture_Dummy_Texture";
- textureDesc.size = {1, 1, 1};
- textureDesc.usage = wgpu::TextureUsage::TextureBinding;
-
- DAWN_TRY_ASSIGN(externalTextureDummy, CreateTexture(&textureDesc));
-
- TextureViewDescriptor textureViewDesc;
- textureViewDesc.arrayLayerCount = 1;
- textureViewDesc.aspect = wgpu::TextureAspect::All;
- textureViewDesc.baseArrayLayer = 0;
- textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
- textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
- textureViewDesc.label = "Dawn_External_Texture_Dummy_Texture_View";
- textureViewDesc.mipLevelCount = 1;
-
- DAWN_TRY_ASSIGN(mExternalTextureDummyView,
- CreateTextureView(externalTextureDummy.Get(), &textureViewDesc));
- }
-
- return mExternalTextureDummyView;
+ ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
+ if (mCompletedSerial == maxSerial) {
+ return true;
}
+ return false;
+}
- ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
+ return mLastSubmittedSerial + ExecutionSerial(1);
+}
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
+void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
+ if (serial > mFutureSerial) {
+ mFutureSerial = serial;
+ }
+}
- Ref<PipelineLayoutBase> result;
- auto iter = mCaches->pipelineLayouts.find(&blueprint);
- if (iter != mCaches->pipelineLayouts.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->pipelineLayouts.insert(result.Get());
- }
+MaybeError DeviceBase::CheckPassedSerials() {
+ ExecutionSerial completedSerial;
+ DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
- return std::move(result);
- }
-
- void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->pipelineLayouts.erase(obj);
- ASSERT(removedCount == 1);
+ ASSERT(completedSerial <= mLastSubmittedSerial);
+ // completedSerial should not be less than mCompletedSerial unless it is 0.
+ // It can be 0 when there's no fences to check.
+ ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
+
+ if (completedSerial > mCompletedSerial) {
+ mCompletedSerial = completedSerial;
+ }
+
+ return {};
+}
+
+ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
+ FormatIndex index = ComputeFormatIndex(format);
+ DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
+
+ const Format* internalFormat = &mFormatTable[index];
+ DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
+
+ return internalFormat;
+}
+
+const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
+ FormatIndex index = ComputeFormatIndex(format);
+ ASSERT(index < mFormatTable.size());
+ ASSERT(mFormatTable[index].isSupported);
+ return mFormatTable[index];
+}
+
+const Format& DeviceBase::GetValidInternalFormat(FormatIndex index) const {
+ ASSERT(index < mFormatTable.size());
+ ASSERT(mFormatTable[index].isSupported);
+ return mFormatTable[index];
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
+ ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<BindGroupLayoutBase> result;
+ auto iter = mCaches->bindGroupLayouts.find(&blueprint);
+ if (iter != mCaches->bindGroupLayouts.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->bindGroupLayouts.insert(result.Get());
+ }
+
+ return std::move(result);
+}
+
+void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+// Private function used at initialization
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
+ BindGroupLayoutDescriptor desc = {};
+ desc.entryCount = 0;
+ desc.entries = nullptr;
+
+ return GetOrCreateBindGroupLayout(&desc);
+}
+
+BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
+ ASSERT(mEmptyBindGroupLayout != nullptr);
+ return mEmptyBindGroupLayout.Get();
+}
+
+Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
+ ComputePipelineBase* uninitializedComputePipeline) {
+ Ref<ComputePipelineBase> cachedPipeline;
+ auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
+ if (iter != mCaches->computePipelines.end()) {
+ cachedPipeline = *iter;
+ }
+
+ return cachedPipeline;
+}
+
+Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline) {
+ Ref<RenderPipelineBase> cachedPipeline;
+ auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
+ if (iter != mCaches->renderPipelines.end()) {
+ cachedPipeline = *iter;
+ }
+ return cachedPipeline;
+}
+
+Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
+ Ref<ComputePipelineBase> computePipeline) {
+ auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
+ if (inserted) {
+ computePipeline->SetIsCachedReference();
+ return computePipeline;
+ } else {
+ return *cachedPipeline;
+ }
+}
+
+Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
+ Ref<RenderPipelineBase> renderPipeline) {
+ auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
+ if (inserted) {
+ renderPipeline->SetIsCachedReference();
+ return renderPipeline;
+ } else {
+ return *cachedPipeline;
+ }
+}
+
+void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->computePipelines.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<TextureViewBase>>
+DeviceBase::GetOrCreatePlaceholderTextureViewForExternalTexture() {
+ if (!mExternalTexturePlaceholderView.Get()) {
+ Ref<TextureBase> externalTexturePlaceholder;
+ TextureDescriptor textureDesc;
+ textureDesc.dimension = wgpu::TextureDimension::e2D;
+ textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+ textureDesc.label = "Dawn_External_Texture_Placeholder_Texture";
+ textureDesc.size = {1, 1, 1};
+ textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+ DAWN_TRY_ASSIGN(externalTexturePlaceholder, CreateTexture(&textureDesc));
+
+ TextureViewDescriptor textureViewDesc;
+ textureViewDesc.arrayLayerCount = 1;
+ textureViewDesc.aspect = wgpu::TextureAspect::All;
+ textureViewDesc.baseArrayLayer = 0;
+ textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+ textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+ textureViewDesc.label = "Dawn_External_Texture_Placeholder_Texture_View";
+ textureViewDesc.mipLevelCount = 1;
+
+ DAWN_TRY_ASSIGN(mExternalTexturePlaceholderView,
+ CreateTextureView(externalTexturePlaceholder.Get(), &textureViewDesc));
+ }
+
+ return mExternalTexturePlaceholderView;
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<PipelineLayoutBase> result;
+ auto iter = mCaches->pipelineLayouts.find(&blueprint);
+ if (iter != mCaches->pipelineLayouts.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->pipelineLayouts.insert(result.Get());
+ }
+
+ return std::move(result);
+}
+
+void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->pipelineLayouts.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->renderPipelines.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
+ const SamplerDescriptor* descriptor) {
+ SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<SamplerBase> result;
+ auto iter = mCaches->samplers.find(&blueprint);
+ if (iter != mCaches->samplers.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->samplers.insert(result.Get());
+ }
+
+ return std::move(result);
+}
+
+void DeviceBase::UncacheSampler(SamplerBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->samplers.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ ASSERT(parseResult != nullptr);
+
+ ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<ShaderModuleBase> result;
+ auto iter = mCaches->shaderModules.find(&blueprint);
+ if (iter != mCaches->shaderModules.end()) {
+ result = *iter;
+ } else {
+ if (!parseResult->HasParsedShader()) {
+ // We skip the parse on creation if validation isn't enabled which let's us quickly
+ // lookup in the cache without validating and parsing. We need the parsed module
+ // now.
+ ASSERT(!IsValidationEnabled());
+ DAWN_TRY(
+ ValidateAndParseShaderModule(this, descriptor, parseResult, compilationMessages));
+ }
+ DAWN_TRY_ASSIGN(result,
+ CreateShaderModuleImpl(descriptor, parseResult, compilationMessages));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->shaderModules.insert(result.Get());
+ }
+
+ return std::move(result);
+}
+
+void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->shaderModules.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint) {
+ auto iter = mCaches->attachmentStates.find(blueprint);
+ if (iter != mCaches->attachmentStates.end()) {
+ return static_cast<AttachmentState*>(*iter);
+ }
+
+ Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
+ attachmentState->SetIsCachedReference();
+ attachmentState->SetContentHash(attachmentState->ComputeContentHash());
+ mCaches->attachmentStates.insert(attachmentState.Get());
+ return attachmentState;
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderPipelineDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderPassDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+}
+
+void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->attachmentStates.erase(obj);
+ ASSERT(removedCount == 1);
+}
+
+Ref<PipelineCacheBase> DeviceBase::GetOrCreatePipelineCache(const CacheKey& key) {
+ return GetOrCreatePipelineCacheImpl(key);
+}
+
+// Object creation API methods
+
+BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
+ Ref<BindGroupBase> result;
+ if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).", this,
+ descriptor)) {
+ return BindGroupBase::MakeError(this);
+ }
+ return result.Detach();
+}
+BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor) {
+ Ref<BindGroupLayoutBase> result;
+ if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
+ "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
+ return BindGroupLayoutBase::MakeError(this);
+ }
+ return result.Detach();
+}
+BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
+ Ref<BufferBase> result = nullptr;
+ if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
+ descriptor)) {
+ ASSERT(result == nullptr);
+ return BufferBase::MakeError(this, descriptor);
+ }
+ return result.Detach();
+}
+CommandEncoder* DeviceBase::APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
+ Ref<CommandEncoder> result;
+ if (ConsumedError(CreateCommandEncoder(descriptor), &result,
+ "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
+ return CommandEncoder::MakeError(this);
+ }
+ return result.Detach();
+}
+ComputePipelineBase* DeviceBase::APICreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<ComputePipelineBase> result;
+ if (ConsumedError(CreateComputePipeline(descriptor), &result,
+ "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
+ return ComputePipelineBase::MakeError(this);
+ }
+ return result.Detach();
+}
+void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
+
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateComputePipelineAsync will call the
+ // callback.
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+ userdata);
}
-
- void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->renderPipelines.erase(obj);
- ASSERT(removedCount == 1);
+}
+PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayoutBase> result;
+ if (ConsumedError(CreatePipelineLayout(descriptor), &result,
+ "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
+ return PipelineLayoutBase::MakeError(this);
+ }
+ return result.Detach();
+}
+QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
+ Ref<QuerySetBase> result;
+ if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).", this,
+ descriptor)) {
+ return QuerySetBase::MakeError(this, descriptor);
+ }
+ return result.Detach();
+}
+SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
+ Ref<SamplerBase> result;
+ if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
+ descriptor)) {
+ return SamplerBase::MakeError(this);
+ }
+ return result.Detach();
+}
+void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
+ utils::GetLabelForTrace(descriptor->label));
+ // TODO(dawn:563): Add validation error context.
+ MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
+
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateRenderPipelineAsync will call the
+ // callback.
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+ userdata);
}
-
- ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
- const SamplerDescriptor* descriptor) {
- SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<SamplerBase> result;
- auto iter = mCaches->samplers.find(&blueprint);
- if (iter != mCaches->samplers.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->samplers.insert(result.Get());
- }
-
- return std::move(result);
+}
+RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ Ref<RenderBundleEncoder> result;
+ if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
+ "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
+ return RenderBundleEncoder::MakeError(this);
+ }
+ return result.Detach();
+}
+RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<RenderPipelineBase> result;
+ if (ConsumedError(CreateRenderPipeline(descriptor), &result,
+ "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
+ return RenderPipelineBase::MakeError(this);
+ }
+ return result.Detach();
+}
+ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<ShaderModuleBase> result;
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages(
+ std::make_unique<OwnedCompilationMessages>());
+ if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
+ "calling %s.CreateShaderModule(%s).", this, descriptor)) {
+ DAWN_ASSERT(result == nullptr);
+ result = ShaderModuleBase::MakeError(this);
+ }
+ // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
+ // after all other operations are finished, even if any of them is failed and result
+ // is an error shader module.
+ result->InjectCompilationMessages(std::move(compilationMessages));
+
+ return result.Detach();
+}
+SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChainBase> result;
+ if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
+ "calling %s.CreateSwapChain(%s).", this, descriptor)) {
+ return SwapChainBase::MakeError(this);
+ }
+ return result.Detach();
+}
+TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
+ Ref<TextureBase> result;
+ if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
+ descriptor)) {
+ return TextureBase::MakeError(this, descriptor);
+ }
+ return result.Detach();
+}
+
+// For Dawn Wire
+
+BufferBase* DeviceBase::APICreateErrorBuffer() {
+ BufferDescriptor desc = {};
+ return BufferBase::MakeError(this, &desc);
+}
+
+// Other Device API methods
+
+// Returns true if future ticking is needed.
+bool DeviceBase::APITick() {
+ // Tick may trigger callbacks which drop a ref to the device itself. Hold a Ref to ourselves
+ // to avoid deleting |this| in the middle of this function call.
+ Ref<DeviceBase> self(this);
+ if (IsLost() || ConsumedError(Tick())) {
+ return false;
}
- void DeviceBase::UncacheSampler(SamplerBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->samplers.erase(obj);
- ASSERT(removedCount == 1);
- }
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APITick::IsDeviceIdle", "isDeviceIdle",
+ IsDeviceIdle());
- ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* compilationMessages) {
- ASSERT(parseResult != nullptr);
+ return !IsDeviceIdle();
+}
- ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+MaybeError DeviceBase::Tick() {
+ DAWN_TRY(ValidateIsAlive());
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
+ // to avoid overly ticking, we only want to tick when:
+ // 1. the last submitted serial has moved beyond the completed serial
+ // 2. or the completed serial has not reached the future serial set by the trackers
+ if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
+ DAWN_TRY(CheckPassedSerials());
+ DAWN_TRY(TickImpl());
- Ref<ShaderModuleBase> result;
- auto iter = mCaches->shaderModules.find(&blueprint);
- if (iter != mCaches->shaderModules.end()) {
- result = *iter;
- } else {
- if (!parseResult->HasParsedShader()) {
- // We skip the parse on creation if validation isn't enabled which let's us quickly
- // lookup in the cache without validating and parsing. We need the parsed module
- // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
- // we can consider splitting it if additional validation is added.
- ASSERT(!IsValidationEnabled());
- DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
- compilationMessages));
- }
- DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->shaderModules.insert(result.Get());
+ // There is no GPU work in flight, we need to move the serials forward so that
+ // so that CPU operations waiting on GPU completion can know they don't have to wait.
+ // AssumeCommandsComplete will assign the max serial we must tick to in order to
+ // fire the awaiting callbacks.
+ if (mCompletedSerial == mLastSubmittedSerial) {
+ AssumeCommandsComplete();
}
- return std::move(result);
+ // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
+ // tick the dynamic uploader before the backend resource allocators. This would allow
+ // reclaiming resources one tick earlier.
+ mDynamicUploader->Deallocate(mCompletedSerial);
+ mQueue->Tick(mCompletedSerial);
}
- void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->shaderModules.erase(obj);
- ASSERT(removedCount == 1);
- }
+ // We have to check callback tasks in every Tick because it is not related to any global
+ // serials.
+ FlushCallbackTaskQueue();
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- AttachmentStateBlueprint* blueprint) {
- auto iter = mCaches->attachmentStates.find(blueprint);
- if (iter != mCaches->attachmentStates.end()) {
- return static_cast<AttachmentState*>(*iter);
- }
+ return {};
+}
- Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
- attachmentState->SetIsCachedReference();
- attachmentState->SetContentHash(attachmentState->ComputeContentHash());
- mCaches->attachmentStates.insert(attachmentState.Get());
- return attachmentState;
- }
+QueueBase* DeviceBase::APIGetQueue() {
+ // Backends gave the primary queue during initialization.
+ ASSERT(mQueue != nullptr);
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderBundleEncoderDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
- }
+ // Returns a new reference to the queue.
+ mQueue->Reference();
+ return mQueue.Get();
+}
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderPipelineDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
+ExternalTextureBase* DeviceBase::APICreateExternalTexture(
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> result = nullptr;
+ if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
+ "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
+ return ExternalTextureBase::MakeError(this);
}
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderPassDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
- }
+ return result.Detach();
+}
- void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->attachmentStates.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- // Object creation API methods
+void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
+ ASSERT(deviceDescriptor);
+ ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
+ {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
- BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
- Ref<BindGroupBase> result;
- if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
- this, descriptor)) {
- return BindGroupBase::MakeError(this);
- }
- return result.Detach();
- }
- BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor) {
- Ref<BindGroupLayoutBase> result;
- if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
- "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
- return BindGroupLayoutBase::MakeError(this);
- }
- return result.Detach();
- }
- BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
- Ref<BufferBase> result = nullptr;
- if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
- descriptor)) {
- ASSERT(result == nullptr);
- return BufferBase::MakeError(this, descriptor);
- }
- return result.Detach();
- }
- CommandEncoder* DeviceBase::APICreateCommandEncoder(
- const CommandEncoderDescriptor* descriptor) {
- Ref<CommandEncoder> result;
- if (ConsumedError(CreateCommandEncoder(descriptor), &result,
- "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
- return CommandEncoder::MakeError(this);
- }
- return result.Detach();
+ for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
+ mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
}
- ComputePipelineBase* DeviceBase::APICreateComputePipeline(
- const ComputePipelineDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
- utils::GetLabelForTrace(descriptor->label));
+}
- Ref<ComputePipelineBase> result;
- if (ConsumedError(CreateComputePipeline(descriptor), &result,
- "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
- return ComputePipelineBase::MakeError(this);
- }
- return result.Detach();
- }
- void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
-
- // Call the callback directly when a validation error has been found in the front-end
- // validations. If there is no error, then CreateComputePipelineAsync will call the
- // callback.
- if (maybeResult.IsError()) {
- std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
- userdata);
- }
- }
- PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayoutBase> result;
- if (ConsumedError(CreatePipelineLayout(descriptor), &result,
- "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
- return PipelineLayoutBase::MakeError(this);
- }
- return result.Detach();
- }
- QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
- Ref<QuerySetBase> result;
- if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
- this, descriptor)) {
- return QuerySetBase::MakeError(this);
- }
- return result.Detach();
- }
- SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
- Ref<SamplerBase> result;
- if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
- descriptor)) {
- return SamplerBase::MakeError(this);
- }
- return result.Detach();
- }
- void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
- utils::GetLabelForTrace(descriptor->label));
- // TODO(dawn:563): Add validation error context.
- MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
-
- // Call the callback directly when a validation error has been found in the front-end
- // validations. If there is no error, then CreateRenderPipelineAsync will call the
- // callback.
- if (maybeResult.IsError()) {
- std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
- userdata);
- }
- }
- RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor) {
- Ref<RenderBundleEncoder> result;
- if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
- "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
- return RenderBundleEncoder::MakeError(this);
- }
- return result.Detach();
- }
- RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
- utils::GetLabelForTrace(descriptor->label));
+bool DeviceBase::IsFeatureEnabled(Feature feature) const {
+ return mEnabledFeatures.IsEnabled(feature);
+}
- Ref<RenderPipelineBase> result;
- if (ConsumedError(CreateRenderPipeline(descriptor), &result,
- "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
- return RenderPipelineBase::MakeError(this);
- }
- return result.Detach();
- }
- ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- Ref<ShaderModuleBase> result;
- std::unique_ptr<OwnedCompilationMessages> compilationMessages(
- std::make_unique<OwnedCompilationMessages>());
- if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
- "calling %s.CreateShaderModule(%s).", this, descriptor)) {
- DAWN_ASSERT(result == nullptr);
- result = ShaderModuleBase::MakeError(this);
- }
- // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
- // after all other operations are finished successfully.
- result->InjectCompilationMessages(std::move(compilationMessages));
-
- return result.Detach();
- }
- SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChainBase> result;
- if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
- "calling %s.CreateSwapChain(%s).", this, descriptor)) {
- return SwapChainBase::MakeError(this);
- }
- return result.Detach();
- }
- TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
- Ref<TextureBase> result;
- if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
- descriptor)) {
- return TextureBase::MakeError(this);
- }
- return result.Detach();
+void DeviceBase::SetWGSLExtensionAllowList() {
+ // Set the WGSL extensions allow list based on device's enabled features and other
+ // propority. For example:
+ // mWGSLExtensionAllowList.insert("InternalExtensionForTesting");
+ if (IsFeatureEnabled(Feature::ChromiumExperimentalDp4a)) {
+ mWGSLExtensionAllowList.insert("chromium_experimental_dp4a");
}
+}
- // For Dawn Wire
+WGSLExtensionSet DeviceBase::GetWGSLExtensionAllowList() const {
+ return mWGSLExtensionAllowList;
+}
- BufferBase* DeviceBase::APICreateErrorBuffer() {
- BufferDescriptor desc = {};
- return BufferBase::MakeError(this, &desc);
- }
+bool DeviceBase::IsValidationEnabled() const {
+ return !IsToggleEnabled(Toggle::SkipValidation);
+}
- // Other Device API methods
+bool DeviceBase::IsRobustnessEnabled() const {
+ return !IsToggleEnabled(Toggle::DisableRobustness);
+}
- // Returns true if future ticking is needed.
- bool DeviceBase::APITick() {
- if (IsLost() || ConsumedError(Tick())) {
- return false;
- }
- return !IsDeviceIdle();
- }
-
- MaybeError DeviceBase::Tick() {
- DAWN_TRY(ValidateIsAlive());
-
- // to avoid overly ticking, we only want to tick when:
- // 1. the last submitted serial has moved beyond the completed serial
- // 2. or the completed serial has not reached the future serial set by the trackers
- if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
- DAWN_TRY(CheckPassedSerials());
- DAWN_TRY(TickImpl());
-
- // There is no GPU work in flight, we need to move the serials forward so that
- // so that CPU operations waiting on GPU completion can know they don't have to wait.
- // AssumeCommandsComplete will assign the max serial we must tick to in order to
- // fire the awaiting callbacks.
- if (mCompletedSerial == mLastSubmittedSerial) {
- AssumeCommandsComplete();
- }
+size_t DeviceBase::GetLazyClearCountForTesting() {
+ return mLazyClearCountForTesting;
+}
- // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
- // tick the dynamic uploader before the backend resource allocators. This would allow
- // reclaiming resources one tick earlier.
- mDynamicUploader->Deallocate(mCompletedSerial);
- mQueue->Tick(mCompletedSerial);
- }
+void DeviceBase::IncrementLazyClearCountForTesting() {
+ ++mLazyClearCountForTesting;
+}
- // We have to check callback tasks in every Tick because it is not related to any global
- // serials.
- FlushCallbackTaskQueue();
+size_t DeviceBase::GetDeprecationWarningCountForTesting() {
+ return mDeprecationWarnings->count;
+}
- return {};
+void DeviceBase::EmitDeprecationWarning(const char* warning) {
+ mDeprecationWarnings->count++;
+ if (mDeprecationWarnings->emitted.insert(warning).second) {
+ dawn::WarningLog() << warning;
}
+}
- QueueBase* DeviceBase::APIGetQueue() {
- // Backends gave the primary queue during initialization.
- ASSERT(mQueue != nullptr);
+void DeviceBase::EmitLog(const char* message) {
+ this->EmitLog(WGPULoggingType_Info, message);
+}
- // Returns a new reference to the queue.
- mQueue->Reference();
- return mQueue.Get();
+void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
+ if (mLoggingCallback != nullptr) {
+ // Use the thread-safe CallbackTaskManager routine
+ std::unique_ptr<LoggingCallbackTask> callbackTask = std::make_unique<LoggingCallbackTask>(
+ mLoggingCallback, loggingType, message, mLoggingUserdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
}
+}
- ExternalTextureBase* DeviceBase::APICreateExternalTexture(
- const ExternalTextureDescriptor* descriptor) {
- Ref<ExternalTextureBase> result = nullptr;
- if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
- "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
- return ExternalTextureBase::MakeError(this);
- }
-
- return result.Detach();
+bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
}
+ limits->limits = mLimits.v1;
+ return true;
+}
- void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
- ASSERT(deviceDescriptor);
- ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
- {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
+bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
+ return mEnabledFeatures.IsEnabled(feature);
+}
- for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
- mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
- }
- }
+size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+ return mEnabledFeatures.EnumerateFeatures(features);
+}
- bool DeviceBase::IsFeatureEnabled(Feature feature) const {
- return mEnabledFeatures.IsEnabled(feature);
+void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
+ if (ConsumedError(ValidateErrorType(type))) {
+ return;
}
- bool DeviceBase::IsValidationEnabled() const {
- return !IsToggleEnabled(Toggle::SkipValidation);
+ // This method should only be used to make error scope reject. For DeviceLost there is the
+ // LoseForTesting function that can be used instead.
+ if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
+ HandleError(InternalErrorType::Validation,
+ "Invalid injected error, must be Validation or OutOfMemory");
+ return;
}
- bool DeviceBase::IsRobustnessEnabled() const {
- return !IsToggleEnabled(Toggle::DisableRobustness);
- }
+ HandleError(FromWGPUErrorType(type), message);
+}
- size_t DeviceBase::GetLazyClearCountForTesting() {
- return mLazyClearCountForTesting;
- }
+QueueBase* DeviceBase::GetQueue() const {
+ ASSERT(mQueue != nullptr);
+ return mQueue.Get();
+}
- void DeviceBase::IncrementLazyClearCountForTesting() {
- ++mLazyClearCountForTesting;
- }
+// Implementation details of object creation
- size_t DeviceBase::GetDeprecationWarningCountForTesting() {
- return mDeprecationWarnings->count;
+ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
+ const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor), "validating %s against %s",
+ descriptor, descriptor->layout);
}
+ return CreateBindGroupImpl(descriptor);
+}
- void DeviceBase::EmitDeprecationWarning(const char* warning) {
- mDeprecationWarnings->count++;
- if (mDeprecationWarnings->emitted.insert(warning).second) {
- dawn::WarningLog() << warning;
- }
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
+ "validating %s", descriptor);
}
+ return GetOrCreateBindGroupLayout(descriptor);
+}
- void DeviceBase::EmitLog(const char* message) {
- this->EmitLog(WGPULoggingType_Info, message);
+ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s", descriptor);
}
- void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
- if (mLoggingCallback != nullptr) {
- // Use the thread-safe CallbackTaskManager routine
- std::unique_ptr<LoggingCallbackTask> callbackTask =
- std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
- mLoggingUserdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
- }
- }
+ Ref<BufferBase> buffer;
+ DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
- bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
- ASSERT(limits != nullptr);
- if (limits->nextInChain != nullptr) {
- return false;
- }
- limits->limits = mLimits.v1;
- return true;
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreation());
}
- bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
- return mEnabledFeatures.IsEnabled(feature);
- }
+ return std::move(buffer);
+}
- size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
- return mEnabledFeatures.EnumerateFeatures(features);
+ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
}
- void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
- if (ConsumedError(ValidateErrorType(type))) {
- return;
- }
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
- // This method should only be used to make error scope reject. For DeviceLost there is the
- // LoseForTesting function that can be used instead.
- if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
- HandleError(InternalErrorType::Validation,
- "Invalid injected error, must be Validation or OutOfMemory");
- return;
- }
-
- HandleError(FromWGPUErrorType(type), message);
- }
-
- QueueBase* DeviceBase::GetQueue() const {
- return mQueue.Get();
+ Ref<ComputePipelineBase> uninitializedComputePipeline =
+ CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+ Ref<ComputePipelineBase> cachedComputePipeline =
+ GetCachedComputePipeline(uninitializedComputePipeline.Get());
+ if (cachedComputePipeline.Get() != nullptr) {
+ return cachedComputePipeline;
}
- // Implementation details of object creation
+ DAWN_TRY(uninitializedComputePipeline->Initialize());
+ return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+}
- ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
- const BindGroupDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
- "validating %s against %s", descriptor, descriptor->layout);
- }
- return CreateBindGroupImpl(descriptor);
+ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor) {
+ const CommandEncoderDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
}
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
- "validating %s", descriptor);
- }
- return GetOrCreateBindGroupLayout(descriptor);
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
}
+ return CommandEncoder::Create(this, descriptor);
+}
- ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
-
- Ref<BufferBase> buffer;
- DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+// Overwritten on the backends to return pipeline caches if supported.
+Ref<PipelineCacheBase> DeviceBase::GetOrCreatePipelineCacheImpl(const CacheKey& key) {
+ UNREACHABLE();
+}
- if (descriptor->mappedAtCreation) {
- DAWN_TRY(buffer->MapAtCreation());
- }
-
- return std::move(buffer);
+MaybeError DeviceBase::CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
}
- ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
- const ComputePipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
- }
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<ComputePipelineBase> uninitializedComputePipeline =
- CreateUninitializedComputePipelineImpl(&appliedDescriptor);
- Ref<ComputePipelineBase> cachedComputePipeline =
- GetCachedComputePipeline(uninitializedComputePipeline.Get());
- if (cachedComputePipeline.Get() != nullptr) {
- return cachedComputePipeline;
- }
+ Ref<ComputePipelineBase> uninitializedComputePipeline =
+ CreateUninitializedComputePipelineImpl(&appliedDescriptor);
- DAWN_TRY(uninitializedComputePipeline->Initialize());
- return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+ // Call the callback directly when we can get a cached compute pipeline object.
+ Ref<ComputePipelineBase> cachedComputePipeline =
+ GetCachedComputePipeline(uninitializedComputePipeline.Get());
+ if (cachedComputePipeline.Get() != nullptr) {
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()), "",
+ userdata);
+ } else {
+ // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
+ // where the pipeline object may be initialized asynchronously and the result will be
+ // saved to mCreatePipelineAsyncTracker.
+ InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
+ userdata);
+ }
+
+ return {};
+}
+
+// This function is overwritten with the async version on the backends that supports
+// initializing compute pipelines asynchronously.
+void DeviceBase::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<ComputePipelineBase> result;
+ std::string errorMessage;
+
+ MaybeError maybeError = computePipeline->Initialize();
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedComputePipeline(std::move(computePipeline));
+ }
+
+ std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateComputePipelineAsyncCallbackTask>(std::move(result), errorMessage,
+ callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+}
+
+// This function is overwritten with the async version on the backends
+// that supports initializing render pipeline asynchronously
+void DeviceBase::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<RenderPipelineBase> result;
+ std::string errorMessage;
+
+ MaybeError maybeError = renderPipeline->Initialize();
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
+ }
+
+ std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
+ callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+ }
+ return GetOrCreatePipelineLayout(descriptor);
+}
+
+ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
+ const ExternalTextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
+ descriptor);
+ }
+
+ return ExternalTextureBase::Create(this, descriptor);
+}
+
+ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s", descriptor);
+ }
+ return CreateQuerySetImpl(descriptor);
+}
+
+ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
+ }
+ return RenderBundleEncoder::Create(this, descriptor);
+}
+
+ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ return cachedRenderPipeline;
+ }
+
+ DAWN_TRY(uninitializedRenderPipeline->Initialize());
+ return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+}
+
+MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+ // Call the callback directly when we can get a cached render pipeline object.
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()), "",
+ userdata);
+ } else {
+ // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
+ // where the pipeline object may be initialized asynchronously and the result will be
+ // saved to mCreatePipelineAsyncTracker.
+ InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
+ userdata);
}
- ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
- const CommandEncoderDescriptor* descriptor) {
- const CommandEncoderDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
- }
+ return {};
+}
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
- }
- return CommandEncoder::Create(this, descriptor);
+ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
+ const SamplerDescriptor defaultDescriptor = {};
+ DAWN_TRY(ValidateIsAlive());
+ descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s", descriptor);
}
+ return GetOrCreateSampler(descriptor);
+}
- MaybeError DeviceBase::CreateComputePipelineAsync(
- const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
- }
+ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ OwnedCompilationMessages* compilationMessages) {
+ DAWN_TRY(ValidateIsAlive());
- Ref<PipelineLayoutBase> layoutRef;
- ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<ComputePipelineBase> uninitializedComputePipeline =
- CreateUninitializedComputePipelineImpl(&appliedDescriptor);
-
- // Call the callback directly when we can get a cached compute pipeline object.
- Ref<ComputePipelineBase> cachedComputePipeline =
- GetCachedComputePipeline(uninitializedComputePipeline.Get());
- if (cachedComputePipeline.Get() != nullptr) {
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
- "", userdata);
- } else {
- // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
- // where the pipeline object may be initialized asynchronously and the result will be
- // saved to mCreatePipelineAsyncTracker.
- InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
- userdata);
- }
+ // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
+ // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
+ // long as dawn_native don't use the compilationMessages of these internal shader modules.
+ ShaderModuleParseResult parseResult;
- return {};
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(
+ ValidateAndParseShaderModule(this, descriptor, &parseResult, compilationMessages),
+ "validating %s", descriptor);
}
- // This function is overwritten with the async version on the backends that supports
- // initializing compute pipelines asynchronously.
- void DeviceBase::InitializeComputePipelineAsyncImpl(
- Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- Ref<ComputePipelineBase> result;
- std::string errorMessage;
-
- MaybeError maybeError = computePipeline->Initialize();
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- errorMessage = error->GetMessage();
- } else {
- result = AddOrGetCachedComputePipeline(std::move(computePipeline));
- }
+ return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+}
- std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
- std::make_unique<CreateComputePipelineAsyncCallbackTask>(
- std::move(result), errorMessage, callback, userdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
+ Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor), "validating %s",
+ descriptor);
}
- // This function is overwritten with the async version on the backends
- // that supports initializing render pipeline asynchronously
- void DeviceBase::InitializeRenderPipelineAsyncImpl(
- Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- Ref<RenderPipelineBase> result;
- std::string errorMessage;
-
- MaybeError maybeError = renderPipeline->Initialize();
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- errorMessage = error->GetMessage();
- } else {
- result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
- }
+ // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
+ if (surface == nullptr) {
+ return CreateSwapChainImpl(descriptor);
+ } else {
+ ASSERT(descriptor->implementation == 0);
- std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
- std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
- callback, userdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
- }
+ NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
+ ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
+ CreateSwapChainImpl(surface, previousSwapChain, descriptor);
- ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+ if (previousSwapChain != nullptr) {
+ previousSwapChain->DetachFromSurface();
}
- return GetOrCreatePipelineLayout(descriptor);
- }
- ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
- const ExternalTextureDescriptor* descriptor) {
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
+ Ref<NewSwapChainBase> newSwapChain;
+ DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
- return ExternalTextureBase::Create(this, descriptor);
+ newSwapChain->SetIsAttached();
+ surface->SetAttachedSwapChain(newSwapChain.Get());
+ return newSwapChain;
}
+}
- ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
- const QuerySetDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
- return CreateQuerySetImpl(descriptor);
+ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.", descriptor);
}
+ return CreateTextureImpl(descriptor);
+}
- ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
- }
- return RenderBundleEncoder::Create(this, descriptor);
- }
-
- ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
- }
+ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ DAWN_TRY(ValidateObject(texture));
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- RenderPipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<RenderPipelineBase> uninitializedRenderPipeline =
- CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
- Ref<RenderPipelineBase> cachedRenderPipeline =
- GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
- if (cachedRenderPipeline != nullptr) {
- return cachedRenderPipeline;
- }
+ TextureViewDescriptor desc;
+ DAWN_TRY_ASSIGN(desc, GetTextureViewDescriptorWithDefaults(texture, descriptor));
- DAWN_TRY(uninitializedRenderPipeline->Initialize());
- return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
+ "validating %s against %s.", &desc, texture);
}
+ return CreateTextureViewImpl(texture, &desc);
+}
- MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
- }
-
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- RenderPipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<RenderPipelineBase> uninitializedRenderPipeline =
- CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
- // Call the callback directly when we can get a cached render pipeline object.
- Ref<RenderPipelineBase> cachedRenderPipeline =
- GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
- if (cachedRenderPipeline != nullptr) {
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
- "", userdata);
- } else {
- // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
- // where the pipeline object may be initialized asynchronously and the result will be
- // saved to mCreatePipelineAsyncTracker.
- InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
- userdata);
- }
-
- return {};
- }
+// Other implementation details
- ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
- const SamplerDescriptor defaultDescriptor = {};
- DAWN_TRY(ValidateIsAlive());
- descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
- return GetOrCreateSampler(descriptor);
- }
+DynamicUploader* DeviceBase::GetDynamicUploader() const {
+ return mDynamicUploader.get();
+}
- ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- OwnedCompilationMessages* compilationMessages) {
- DAWN_TRY(ValidateIsAlive());
+// The Toggle device facility
- // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
- // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
- // long as dawn_native don't use the compilationMessages of these internal shader modules.
- ShaderModuleParseResult parseResult;
+std::vector<const char*> DeviceBase::GetTogglesUsed() const {
+ return mEnabledToggles.GetContainedToggleNames();
+}
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
- "validating %s", descriptor);
- }
+bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
+ return mEnabledToggles.Has(toggle);
+}
- return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
+ if (!mOverridenToggles.Has(toggle)) {
+ mEnabledToggles.Set(toggle, isEnabled);
}
-
- ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
- Surface* surface,
- const SwapChainDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
- "validating %s", descriptor);
- }
-
- // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
- if (surface == nullptr) {
- return CreateSwapChainImpl(descriptor);
- } else {
- ASSERT(descriptor->implementation == 0);
-
- NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
- ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
- CreateSwapChainImpl(surface, previousSwapChain, descriptor);
-
- if (previousSwapChain != nullptr) {
- previousSwapChain->DetachFromSurface();
+}
+
+void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
+ if (mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
+ dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
+ << isEnabled << " when it was overriden to be " << !isEnabled;
+ }
+ mEnabledToggles.Set(toggle, isEnabled);
+}
+
+void DeviceBase::SetDefaultToggles() {
+ SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
+ SetToggle(Toggle::DisallowUnsafeAPIs, true);
+}
+
+void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
+ ASSERT(togglesDescriptor != nullptr);
+
+ for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
+ Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+ togglesDescriptor->forceEnabledToggles[i]);
+ if (toggle != Toggle::InvalidEnum) {
+ mEnabledToggles.Set(toggle, true);
+ mOverridenToggles.Set(toggle, true);
+ }
+ }
+ for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
+ Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+ togglesDescriptor->forceDisabledToggles[i]);
+ if (toggle != Toggle::InvalidEnum) {
+ mEnabledToggles.Set(toggle, false);
+ mOverridenToggles.Set(toggle, true);
+ }
+ }
+}
+
+void DeviceBase::FlushCallbackTaskQueue() {
+ if (!mCallbackTaskManager->IsEmpty()) {
+ // If a user calls Queue::Submit inside the callback, then the device will be ticked,
+ // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
+ // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
+ // update mCallbackTaskManager, then call all the callbacks.
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->Finish();
+ }
+ }
+}
+
+const CombinedLimits& DeviceBase::GetLimits() const {
+ return mLimits;
+}
+
+AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
+ return mAsyncTaskManager.get();
+}
+
+CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
+ return mCallbackTaskManager.get();
+}
+
+dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
+ return mWorkerTaskPool.get();
+}
+
+void DeviceBase::AddComputePipelineAsyncCallbackTask(
+ Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
+ // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
+ struct CreateComputePipelineAsyncWaitableCallbackTask final
+ : CreateComputePipelineAsyncCallbackTask {
+ using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
+ void Finish() final {
+ // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
+ // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+ // thread-safe.
+ if (mPipeline.Get() != nullptr) {
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
}
- Ref<NewSwapChainBase> newSwapChain;
- DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
-
- newSwapChain->SetIsAttached();
- surface->SetAttachedSwapChain(newSwapChain.Get());
- return newSwapChain;
+ CreateComputePipelineAsyncCallbackTask::Finish();
}
- }
-
- ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
- descriptor);
- }
- return CreateTextureImpl(descriptor);
- }
-
- ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- DAWN_TRY(ValidateObject(texture));
-
- TextureViewDescriptor desc;
- DAWN_TRY_ASSIGN(desc, GetTextureViewDescriptorWithDefaults(texture, descriptor));
-
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
- "validating %s against %s.", &desc, texture);
- }
- return CreateTextureViewImpl(texture, &desc);
- }
-
- // Other implementation details
-
- DynamicUploader* DeviceBase::GetDynamicUploader() const {
- return mDynamicUploader.get();
- }
-
- // The Toggle device facility
-
- std::vector<const char*> DeviceBase::GetTogglesUsed() const {
- return mEnabledToggles.GetContainedToggleNames();
- }
-
- bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
- return mEnabledToggles.Has(toggle);
- }
-
- void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
- if (!mOverridenToggles.Has(toggle)) {
- mEnabledToggles.Set(toggle, isEnabled);
- }
- }
-
- void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
- if (mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
- dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
- << isEnabled << " when it was overriden to be " << !isEnabled;
- }
- mEnabledToggles.Set(toggle, isEnabled);
- }
-
- void DeviceBase::SetDefaultToggles() {
- SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
- SetToggle(Toggle::DisallowUnsafeAPIs, true);
- }
-
- void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
- ASSERT(togglesDescriptor != nullptr);
+ };
- for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
- Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
- togglesDescriptor->forceEnabledToggles[i]);
- if (toggle != Toggle::InvalidEnum) {
- mEnabledToggles.Set(toggle, true);
- mOverridenToggles.Set(toggle, true);
+ mCallbackTaskManager->AddCallbackTask(
+ std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
+ std::move(pipeline), errorMessage, callback, userdata));
+}
+
+void DeviceBase::AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
+ // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
+ struct CreateRenderPipelineAsyncWaitableCallbackTask final
+ : CreateRenderPipelineAsyncCallbackTask {
+ using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
+
+ void Finish() final {
+ // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
+ // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+ // thread-safe.
+ if (mPipeline.Get() != nullptr) {
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
}
- }
- for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
- Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
- togglesDescriptor->forceDisabledToggles[i]);
- if (toggle != Toggle::InvalidEnum) {
- mEnabledToggles.Set(toggle, false);
- mOverridenToggles.Set(toggle, true);
- }
- }
- }
- void DeviceBase::FlushCallbackTaskQueue() {
- if (!mCallbackTaskManager->IsEmpty()) {
- // If a user calls Queue::Submit inside the callback, then the device will be ticked,
- // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
- // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
- // update mCallbackTaskManager, then call all the callbacks.
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->Finish();
- }
+ CreateRenderPipelineAsyncCallbackTask::Finish();
}
- }
-
- const CombinedLimits& DeviceBase::GetLimits() const {
- return mLimits;
- }
-
- AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
- return mAsyncTaskManager.get();
- }
-
- CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
- return mCallbackTaskManager.get();
- }
-
- dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
- return mWorkerTaskPool.get();
- }
-
- void DeviceBase::AddComputePipelineAsyncCallbackTask(
- Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
- // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
- struct CreateComputePipelineAsyncWaitableCallbackTask final
- : CreateComputePipelineAsyncCallbackTask {
- using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
- void Finish() final {
- // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
- // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
- // thread-safe.
- if (mPipeline.Get() != nullptr) {
- mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
- }
+ };
- CreateComputePipelineAsyncCallbackTask::Finish();
- }
- };
+ mCallbackTaskManager->AddCallbackTask(
+ std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
+ std::move(pipeline), errorMessage, callback, userdata));
+}
- mCallbackTaskManager->AddCallbackTask(
- std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
- std::move(pipeline), errorMessage, callback, userdata));
- }
-
- void DeviceBase::AddRenderPipelineAsyncCallbackTask(
- Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
- // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
- struct CreateRenderPipelineAsyncWaitableCallbackTask final
- : CreateRenderPipelineAsyncCallbackTask {
- using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
-
- void Finish() final {
- // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
- // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
- // thread-safe.
- if (mPipeline.Get() != nullptr) {
- mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
- }
-
- CreateRenderPipelineAsyncCallbackTask::Finish();
- }
- };
+PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
+ return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
+}
- mCallbackTaskManager->AddCallbackTask(
- std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
- std::move(pipeline), errorMessage, callback, userdata));
- }
+const CacheKey& DeviceBase::GetCacheKey() const {
+ return mDeviceCacheKey;
+}
- PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
- return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
- }
+const std::string& DeviceBase::GetLabel() const {
+ return mLabel;
+}
- const CacheKey& DeviceBase::GetCacheKey() const {
- return mDeviceCacheKey;
- }
+void DeviceBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+}
- const std::string& DeviceBase::GetLabel() const {
- return mLabel;
- }
+void DeviceBase::SetLabelImpl() {}
- void DeviceBase::APISetLabel(const char* label) {
- mLabel = label;
- SetLabelImpl();
- }
+bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const {
+ return false;
+}
- void DeviceBase::SetLabelImpl() {
- }
+bool DeviceBase::MayRequireDuplicationOfIndirectParameters() const {
+ return false;
+}
- bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const {
- return false;
- }
+bool DeviceBase::ShouldDuplicateParametersForDrawIndirect(
+ const RenderPipelineBase* renderPipelineBase) const {
+ return false;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Device.h b/chromium/third_party/dawn/src/dawn/native/Device.h
index bbffcc9b523..dc14e152577 100644
--- a/chromium/third_party/dawn/src/dawn/native/Device.h
+++ b/chromium/third_party/dawn/src/dawn/native/Device.h
@@ -15,6 +15,13 @@
#ifndef SRC_DAWN_NATIVE_DEVICE_H_
#define SRC_DAWN_NATIVE_DEVICE_H_
+#include <memory>
+#include <mutex>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
#include "dawn/native/CacheKey.h"
#include "dawn/native/Commands.h"
#include "dawn/native/ComputePipeline.h"
@@ -25,531 +32,545 @@
#include "dawn/native/Limits.h"
#include "dawn/native/ObjectBase.h"
#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/RefCountedWithExternalCount.h"
#include "dawn/native/StagingBuffer.h"
#include "dawn/native/Toggles.h"
#include "dawn/native/DawnNative.h"
#include "dawn/native/dawn_platform.h"
-#include <mutex>
-#include <utility>
-
namespace dawn::platform {
- class WorkerTaskPool;
+class WorkerTaskPool;
} // namespace dawn::platform
namespace dawn::native {
- class AdapterBase;
- class AsyncTaskManager;
- class AttachmentState;
- class AttachmentStateBlueprint;
- class BindGroupLayoutBase;
- class CallbackTaskManager;
- class DynamicUploader;
- class ErrorScopeStack;
- class ExternalTextureBase;
- class OwnedCompilationMessages;
- class PersistentCache;
- class StagingBufferBase;
- struct CallbackTask;
- struct InternalPipelineStore;
- struct ShaderModuleParseResult;
-
- class DeviceBase : public RefCounted {
- public:
- DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
- virtual ~DeviceBase();
-
- void HandleError(InternalErrorType type, const char* message);
-
- bool ConsumedError(MaybeError maybeError) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- ConsumeError(maybeError.AcquireError());
- return true;
- }
- return false;
+class AsyncTaskManager;
+class AttachmentState;
+class AttachmentStateBlueprint;
+class Blob;
+class BlobCache;
+class CallbackTaskManager;
+class DynamicUploader;
+class ErrorScopeStack;
+class OwnedCompilationMessages;
+struct CallbackTask;
+struct InternalPipelineStore;
+struct ShaderModuleParseResult;
+
+using WGSLExtensionSet = std::unordered_set<std::string>;
+
+class DeviceBase : public RefCountedWithExternalCount {
+ public:
+ DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
+ ~DeviceBase() override;
+
+ void HandleError(InternalErrorType type, const char* message);
+
+ bool ConsumedError(MaybeError maybeError) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ ConsumeError(maybeError.AcquireError());
+ return true;
}
-
- template <typename T>
- bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
- if (DAWN_UNLIKELY(resultOrError.IsError())) {
- ConsumeError(resultOrError.AcquireError());
- return true;
- }
- *result = resultOrError.AcquireSuccess();
- return false;
+ return false;
+ }
+
+ template <typename T>
+ bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ ConsumeError(resultOrError.AcquireError());
+ return true;
}
-
- template <typename... Args>
- bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(
- absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
- }
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
+ template <typename... Args>
+ bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ } else {
+ error->AppendContext(
+ absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
}
- ConsumeError(std::move(error));
- return true;
}
- return false;
+ ConsumeError(std::move(error));
+ return true;
}
-
- template <typename T, typename... Args>
- bool ConsumedError(ResultOrError<T> resultOrError,
- T* result,
- const char* formatStr,
- const Args&... args) {
- if (DAWN_UNLIKELY(resultOrError.IsError())) {
- std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(
- absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
- }
+ return false;
+ }
+
+ template <typename T, typename... Args>
+ bool ConsumedError(ResultOrError<T> resultOrError,
+ T* result,
+ const char* formatStr,
+ const Args&... args) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ } else {
+ error->AppendContext(
+ absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
}
- ConsumeError(std::move(error));
- return true;
}
- *result = resultOrError.AcquireSuccess();
- return false;
+ ConsumeError(std::move(error));
+ return true;
}
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
+ MaybeError ValidateObject(const ApiObjectBase* object) const;
+
+ AdapterBase* GetAdapter() const;
+ dawn::platform::Platform* GetPlatform() const;
+
+ // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
+ // isn't a valid wgpu::TextureFormat or isn't supported by this device.
+ // The pointer returned has the same lifetime as the device.
+ ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
+
+ // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
+ // valid and supported.
+ // The reference returned has the same lifetime as the device.
+ const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
+ const Format& GetValidInternalFormat(FormatIndex formatIndex) const;
+
+ virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) = 0;
+
+ ExecutionSerial GetCompletedCommandSerial() const;
+ ExecutionSerial GetLastSubmittedCommandSerial() const;
+ ExecutionSerial GetFutureSerial() const;
+ ExecutionSerial GetPendingCommandSerial() const;
+
+ // Many Dawn objects are completely immutable once created which means that if two
+ // creations are given the same arguments, they can return the same object. Reusing
+ // objects will help make comparisons between objects by a single pointer comparison.
+ //
+ // Technically no object is immutable as they have a reference count, and an
+ // application with reference-counting issues could "see" that objects are reused.
+ // This is solved by automatic-reference counting, and also the fact that when using
+ // the client-server wire every creation will get a different proxy object, with a
+ // different reference count.
+ //
+ // When trying to create an object, we give both the descriptor and an example of what
+ // the created object will be, the "blueprint". The blueprint is just a FooBase object
+ // instead of a backend Foo object. If the blueprint doesn't match an object in the
+ // cache, then the descriptor is used to make a new object.
+ ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+ void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
+
+ BindGroupLayoutBase* GetEmptyBindGroupLayout();
+
+ void UncacheComputePipeline(ComputePipelineBase* obj);
+
+ ResultOrError<Ref<TextureViewBase>> GetOrCreatePlaceholderTextureViewForExternalTexture();
+
+ ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor);
+ void UncachePipelineLayout(PipelineLayoutBase* obj);
+
+ void UncacheRenderPipeline(RenderPipelineBase* obj);
+
+ ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+ void UncacheSampler(SamplerBase* obj);
+
+ ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+ void UncacheShaderModule(ShaderModuleBase* obj);
+
+ Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
+ Ref<AttachmentState> GetOrCreateAttachmentState(
+ const RenderBundleEncoderDescriptor* descriptor);
+ Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
+ Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
+ void UncacheAttachmentState(AttachmentState* obj);
+
+ Ref<PipelineCacheBase> GetOrCreatePipelineCache(const CacheKey& key);
+
+ // Object creation methods that be used in a reentrant manner.
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding = false);
+ ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
+ ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor = nullptr);
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor);
+ MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
- MaybeError ValidateObject(const ApiObjectBase* object) const;
-
- AdapterBase* GetAdapter() const;
- dawn::platform::Platform* GetPlatform() const;
-
- // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
- // isn't a valid wgpu::TextureFormat or isn't supported by this device.
- // The pointer returned has the same lifetime as the device.
- ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
-
- // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
- // valid and supported.
- // The reference returned has the same lifetime as the device.
- const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
- const Format& GetValidInternalFormat(FormatIndex formatIndex) const;
-
- virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) = 0;
-
- ExecutionSerial GetCompletedCommandSerial() const;
- ExecutionSerial GetLastSubmittedCommandSerial() const;
- ExecutionSerial GetFutureSerial() const;
- ExecutionSerial GetPendingCommandSerial() const;
-
- // Many Dawn objects are completely immutable once created which means that if two
- // creations are given the same arguments, they can return the same object. Reusing
- // objects will help make comparisons between objects by a single pointer comparison.
- //
- // Technically no object is immutable as they have a reference count, and an
- // application with reference-counting issues could "see" that objects are reused.
- // This is solved by automatic-reference counting, and also the fact that when using
- // the client-server wire every creation will get a different proxy object, with a
- // different reference count.
- //
- // When trying to create an object, we give both the descriptor and an example of what
- // the created object will be, the "blueprint". The blueprint is just a FooBase object
- // instead of a backend Foo object. If the blueprint doesn't match an object in the
- // cache, then the descriptor is used to make a new object.
- ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
- void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
-
- BindGroupLayoutBase* GetEmptyBindGroupLayout();
-
- void UncacheComputePipeline(ComputePipelineBase* obj);
-
- ResultOrError<Ref<TextureViewBase>> GetOrCreateDummyTextureViewForExternalTexture();
-
- ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor);
- void UncachePipelineLayout(PipelineLayoutBase* obj);
-
- void UncacheRenderPipeline(RenderPipelineBase* obj);
-
- ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
- void UncacheSampler(SamplerBase* obj);
-
- ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* compilationMessages);
- void UncacheShaderModule(ShaderModuleBase* obj);
-
- Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
- Ref<AttachmentState> GetOrCreateAttachmentState(
- const RenderBundleEncoderDescriptor* descriptor);
- Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
- Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
- void UncacheAttachmentState(AttachmentState* obj);
-
- // Object creation methods that be used in a reentrant manner.
- ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding = false);
- ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
- ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
- const CommandEncoderDescriptor* descriptor = nullptr);
- ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
- const ComputePipelineDescriptor* descriptor);
- MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor);
- ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
- ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor);
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor);
- MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor);
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
+ ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor);
+ MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor = nullptr);
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ OwnedCompilationMessages* compilationMessages = nullptr);
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor);
+ ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+ ResultOrError<Ref<TextureViewBase>> CreateTextureView(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
+ BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
+ BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
+ BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
+ CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+ ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
+ PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+ QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
+ void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ RenderBundleEncoder* APICreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor);
+ RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
+ ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
+ SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
+ ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
+ SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
+ TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
+
+ InternalPipelineStore* GetInternalPipelineStore();
+
+ // For Dawn Wire
+ BufferBase* APICreateErrorBuffer();
+
+ QueueBase* APIGetQueue();
+
+ bool APIGetLimits(SupportedLimits* limits) const;
+ // Note that we should not use this function to query the features which can only be enabled
+ // behind toggles (use IsFeatureEnabled() instead).
+ bool APIHasFeature(wgpu::FeatureName feature) const;
+ // Note that we should not use this function to query the features which can only be enabled
+ // behind toggles (use IsFeatureEnabled() instead).
+ size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+ void APIInjectError(wgpu::ErrorType type, const char* message);
+ bool APITick();
+
+ void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
+ void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+ void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
+ void APIPushErrorScope(wgpu::ErrorFilter filter);
+ bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+
+ MaybeError ValidateIsAlive() const;
+
+ BlobCache* GetBlobCache();
+ Blob LoadCachedBlob(const CacheKey& key);
+ void StoreCachedBlob(const CacheKey& key, const Blob& blob);
+
+ virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) = 0;
+ virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) = 0;
+ virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) = 0;
+
+ DynamicUploader* GetDynamicUploader() const;
+
+ // The device state which is a combination of creation state and loss state.
+ //
+ // - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
+ // (both for the application calling WebGPU, or re-entrant calls). No work exists on
+ // the GPU timeline.
+ // - Alive: the device is usable and might have work happening on the GPU timeline.
+ // - BeingDisconnected: the device is no longer usable because we are waiting for all
+ // work on the GPU timeline to finish. (this is to make validation prevent the
+ // application from adding more work during the transition from Available to
+ // Disconnected)
+ // - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
+ // structures can be safely destroyed without additional synchronization.
+ // - Destroyed: the device is disconnected and resources have been reclaimed.
+ enum class State {
+ BeingCreated,
+ Alive,
+ BeingDisconnected,
+ Disconnected,
+ Destroyed,
+ };
+ State GetState() const;
+ bool IsLost() const;
+ void TrackObject(ApiObjectBase* object);
+ std::mutex* GetObjectListMutex(ObjectType type);
+
+ std::vector<const char*> GetTogglesUsed() const;
+ WGSLExtensionSet GetWGSLExtensionAllowList() const;
+ bool IsToggleEnabled(Toggle toggle) const;
+ bool IsValidationEnabled() const;
+ bool IsRobustnessEnabled() const;
+ size_t GetLazyClearCountForTesting();
+ void IncrementLazyClearCountForTesting();
+ size_t GetDeprecationWarningCountForTesting();
+ void EmitDeprecationWarning(const char* warning);
+ void EmitLog(const char* message);
+ void EmitLog(WGPULoggingType loggingType, const char* message);
+ void APILoseForTesting();
+ QueueBase* GetQueue() const;
+
+ // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
+ // ticked in order to clean up all pending callback work or to execute asynchronous resource
+ // writes. It should be given the serial that a callback is tracked with, so that once that
+ // serial is completed, it can be resolved and cleaned up. This is so that when there is no
+ // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
+ // still check if we have pending work to take care of, rather than hanging and never
+ // reaching the serial the work will be executed on.
+ void AddFutureSerial(ExecutionSerial serial);
+ // Check for passed fences and set the new completed serial
+ MaybeError CheckPassedSerials();
+
+ MaybeError Tick();
+
+ // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
+ // BackendMetadata that we can query from the device.
+ virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
+ virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
+
+ virtual float GetTimestampPeriodInNS() const = 0;
+
+ virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const;
+
+ virtual bool MayRequireDuplicationOfIndirectParameters() const;
+
+ virtual bool ShouldDuplicateParametersForDrawIndirect(
+ const RenderPipelineBase* renderPipelineBase) const;
+
+ // TODO(crbug.com/dawn/1434): Make this function non-overridable when we support requesting
+ // Adapter with toggles.
+ virtual bool IsFeatureEnabled(Feature feature) const;
+
+ const CombinedLimits& GetLimits() const;
+
+ AsyncTaskManager* GetAsyncTaskManager() const;
+ CallbackTaskManager* GetCallbackTaskManager() const;
+ dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
+
+ void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
- ResultOrError<Ref<SamplerBase>> CreateSampler(
- const SamplerDescriptor* descriptor = nullptr);
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- OwnedCompilationMessages* compilationMessages = nullptr);
- ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
- const SwapChainDescriptor* descriptor);
- ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
- ResultOrError<Ref<TextureViewBase>> CreateTextureView(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
- BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
- BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
- BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
- CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
- ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
- PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
- QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
- void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- RenderBundleEncoder* APICreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor);
- RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
- ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
- SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
- ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
- SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
- TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
-
- InternalPipelineStore* GetInternalPipelineStore();
-
- // For Dawn Wire
- BufferBase* APICreateErrorBuffer();
-
- QueueBase* APIGetQueue();
-
- bool APIGetLimits(SupportedLimits* limits) const;
- bool APIHasFeature(wgpu::FeatureName feature) const;
- size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
- void APIInjectError(wgpu::ErrorType type, const char* message);
- bool APITick();
-
- void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
- void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
- void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
- void APIPushErrorScope(wgpu::ErrorFilter filter);
- bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
-
- MaybeError ValidateIsAlive() const;
-
- PersistentCache* GetPersistentCache();
-
- virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
- size_t size) = 0;
- virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) = 0;
- virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) = 0;
-
- DynamicUploader* GetDynamicUploader() const;
-
- // The device state which is a combination of creation state and loss state.
- //
- // - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
- // (both for the application calling WebGPU, or re-entrant calls). No work exists on
- // the GPU timeline.
- // - Alive: the device is usable and might have work happening on the GPU timeline.
- // - BeingDisconnected: the device is no longer usable because we are waiting for all
- // work on the GPU timeline to finish. (this is to make validation prevent the
- // application from adding more work during the transition from Available to
- // Disconnected)
- // - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
- // structures can be safely destroyed without additional synchronization.
- // - Destroyed: the device is disconnected and resources have been reclaimed.
- enum class State {
- BeingCreated,
- Alive,
- BeingDisconnected,
- Disconnected,
- Destroyed,
- };
- State GetState() const;
- bool IsLost() const;
- void TrackObject(ApiObjectBase* object);
- std::mutex* GetObjectListMutex(ObjectType type);
-
- std::vector<const char*> GetTogglesUsed() const;
- bool IsFeatureEnabled(Feature feature) const;
- bool IsToggleEnabled(Toggle toggle) const;
- bool IsValidationEnabled() const;
- bool IsRobustnessEnabled() const;
- size_t GetLazyClearCountForTesting();
- void IncrementLazyClearCountForTesting();
- size_t GetDeprecationWarningCountForTesting();
- void EmitDeprecationWarning(const char* warning);
- void EmitLog(const char* message);
- void EmitLog(WGPULoggingType loggingType, const char* message);
- void APILoseForTesting();
- QueueBase* GetQueue() const;
-
- // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
- // ticked in order to clean up all pending callback work or to execute asynchronous resource
- // writes. It should be given the serial that a callback is tracked with, so that once that
- // serial is completed, it can be resolved and cleaned up. This is so that when there is no
- // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
- // still check if we have pending work to take care of, rather than hanging and never
- // reaching the serial the work will be executed on.
- void AddFutureSerial(ExecutionSerial serial);
- // Check for passed fences and set the new completed serial
- MaybeError CheckPassedSerials();
-
- MaybeError Tick();
-
- // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
- // BackendMetadata that we can query from the device.
- virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
- virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
-
- virtual float GetTimestampPeriodInNS() const = 0;
-
- virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const;
-
- const CombinedLimits& GetLimits() const;
-
- AsyncTaskManager* GetAsyncTaskManager() const;
- CallbackTaskManager* GetCallbackTaskManager() const;
- dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
-
- void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
-
- const CacheKey& GetCacheKey() const;
- const std::string& GetLabel() const;
- void APISetLabel(const char* label);
- void APIDestroy();
-
- protected:
- // Constructor used only for mocking and testing.
- DeviceBase();
-
- void SetToggle(Toggle toggle, bool isEnabled);
- void ForceSetToggle(Toggle toggle, bool isEnabled);
-
- MaybeError Initialize(Ref<QueueBase> defaultQueue);
- void DestroyObjects();
- void Destroy();
-
- // Incrememt mLastSubmittedSerial when we submit the next serial
- void IncrementLastSubmittedCommandSerial();
-
- private:
- virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
- virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
- const ExternalTextureDescriptor* descriptor);
- virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) = 0;
- virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) = 0;
- // Note that previousSwapChain may be nullptr, or come from a different backend.
- virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) = 0;
- virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) = 0;
- virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) = 0;
- virtual void SetLabelImpl();
-
- virtual MaybeError TickImpl() = 0;
- void FlushCallbackTaskQueue();
-
- ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
-
- Ref<ComputePipelineBase> GetCachedComputePipeline(
- ComputePipelineBase* uninitializedComputePipeline);
- Ref<RenderPipelineBase> GetCachedRenderPipeline(
- RenderPipelineBase* uninitializedRenderPipeline);
- Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
- Ref<ComputePipelineBase> computePipeline);
- Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
- Ref<RenderPipelineBase> renderPipeline);
- virtual void InitializeComputePipelineAsyncImpl(
- Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- virtual void InitializeRenderPipelineAsyncImpl(
- Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
- void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
-
- void SetDefaultToggles();
-
- void ConsumeError(std::unique_ptr<ErrorData> error);
-
- // Each backend should implement to check their passed fences if there are any and return a
- // completed serial. Return 0 should indicate no fences to check.
- virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
- // During shut down of device, some operations might have been started since the last submit
- // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
- // make all commands look completed.
- void AssumeCommandsComplete();
- bool IsDeviceIdle();
-
- // mCompletedSerial tracks the last completed command serial that the fence has returned.
- // mLastSubmittedSerial tracks the last submitted command serial.
- // During device removal, the serials could be artificially incremented
- // to make it appear as if commands have been compeleted. They can also be artificially
- // incremented when no work is being done in the GPU so CPU operations don't have to wait on
- // stale serials.
- // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
- // callbacks to fire
- ExecutionSerial mCompletedSerial = ExecutionSerial(0);
- ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
- ExecutionSerial mFutureSerial = ExecutionSerial(0);
-
- // DestroyImpl is used to clean up and release resources used by device, does not wait for
- // GPU or check errors.
- virtual void DestroyImpl() = 0;
-
- // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
- // destruction. This is only used when properly destructing the device. For a real
- // device loss, this function doesn't need to be called since the driver already closed all
- // resources.
- virtual MaybeError WaitForIdleForDestruction() = 0;
-
- wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
- void* mUncapturedErrorUserdata = nullptr;
-
- wgpu::LoggingCallback mLoggingCallback = nullptr;
- void* mLoggingUserdata = nullptr;
-
- wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
- void* mDeviceLostUserdata = nullptr;
-
- std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
-
- // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
- // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
- // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
- // Instance.
- Ref<InstanceBase> mInstance;
- AdapterBase* mAdapter = nullptr;
-
- // The object caches aren't exposed in the header as they would require a lot of
- // additional includes.
- struct Caches;
- std::unique_ptr<Caches> mCaches;
-
- Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
-
- Ref<TextureViewBase> mExternalTextureDummyView;
-
- std::unique_ptr<DynamicUploader> mDynamicUploader;
- std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
- Ref<QueueBase> mQueue;
-
- struct DeprecationWarnings;
- std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
-
- State mState = State::BeingCreated;
-
- // Encompasses the mutex and the actual list that contains all live objects "owned" by the
- // device.
- struct ApiObjectList {
- std::mutex mutex;
- LinkedList<ApiObjectBase> objects;
- };
- PerObjectType<ApiObjectList> mObjectLists;
-
- FormatTable mFormatTable;
-
- TogglesSet mEnabledToggles;
- TogglesSet mOverridenToggles;
- size_t mLazyClearCountForTesting = 0;
- std::atomic_uint64_t mNextPipelineCompatibilityToken;
-
- CombinedLimits mLimits;
- FeaturesSet mEnabledFeatures;
-
- std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
-
- std::unique_ptr<PersistentCache> mPersistentCache;
-
- std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
- std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
- std::string mLabel;
- CacheKey mDeviceCacheKey;
+ void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
+
+ const CacheKey& GetCacheKey() const;
+ const std::string& GetLabel() const;
+ void APISetLabel(const char* label);
+ void APIDestroy();
+
+ virtual void AppendDebugLayerMessages(ErrorData* error) {}
+
+ protected:
+ // Constructor used only for mocking and testing.
+ DeviceBase();
+
+ void SetToggle(Toggle toggle, bool isEnabled);
+ void ForceSetToggle(Toggle toggle, bool isEnabled);
+
+ MaybeError Initialize(Ref<QueueBase> defaultQueue);
+ void DestroyObjects();
+ void Destroy();
+
+ // Incrememt mLastSubmittedSerial when we submit the next serial
+ void IncrementLastSubmittedCommandSerial();
+
+ private:
+ void WillDropLastExternalRef() override;
+
+ virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
+ virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
+ const ExternalTextureDescriptor* descriptor);
+ virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) = 0;
+ virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) = 0;
+ // Note that previousSwapChain may be nullptr, or come from a different backend.
+ virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) = 0;
+ virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) = 0;
+ virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) = 0;
+ virtual void SetLabelImpl();
+
+ virtual MaybeError TickImpl() = 0;
+ void FlushCallbackTaskQueue();
+
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
+
+ Ref<ComputePipelineBase> GetCachedComputePipeline(
+ ComputePipelineBase* uninitializedComputePipeline);
+ Ref<RenderPipelineBase> GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline);
+ Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
+ Ref<ComputePipelineBase> computePipeline);
+ Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(Ref<RenderPipelineBase> renderPipeline);
+ virtual Ref<PipelineCacheBase> GetOrCreatePipelineCacheImpl(const CacheKey& key);
+ virtual void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ virtual void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
+ void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
+
+ void SetDefaultToggles();
+
+ void SetWGSLExtensionAllowList();
+
+ void ConsumeError(std::unique_ptr<ErrorData> error);
+
+ // Each backend should implement to check their passed fences if there are any and return a
+ // completed serial. Return 0 should indicate no fences to check.
+ virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
+ // During shut down of device, some operations might have been started since the last submit
+ // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
+ // make all commands look completed.
+ void AssumeCommandsComplete();
+ bool IsDeviceIdle();
+
+ // mCompletedSerial tracks the last completed command serial that the fence has returned.
+ // mLastSubmittedSerial tracks the last submitted command serial.
+ // During device removal, the serials could be artificially incremented
+ // to make it appear as if commands have been compeleted. They can also be artificially
+ // incremented when no work is being done in the GPU so CPU operations don't have to wait on
+ // stale serials.
+ // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
+ // callbacks to fire
+ ExecutionSerial mCompletedSerial = ExecutionSerial(0);
+ ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
+ ExecutionSerial mFutureSerial = ExecutionSerial(0);
+
+ // DestroyImpl is used to clean up and release resources used by device, does not wait for
+ // GPU or check errors.
+ virtual void DestroyImpl() = 0;
+
+ // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
+ // destruction. This is only used when properly destructing the device. For a real
+ // device loss, this function doesn't need to be called since the driver already closed all
+ // resources.
+ virtual MaybeError WaitForIdleForDestruction() = 0;
+
+ wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
+ void* mUncapturedErrorUserdata = nullptr;
+
+ wgpu::LoggingCallback mLoggingCallback = nullptr;
+ void* mLoggingUserdata = nullptr;
+
+ wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
+ void* mDeviceLostUserdata = nullptr;
+
+ std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
+
+ // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
+ // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
+ // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
+ // Instance.
+ Ref<InstanceBase> mInstance;
+ AdapterBase* mAdapter = nullptr;
+
+ // The object caches aren't exposed in the header as they would require a lot of
+ // additional includes.
+ struct Caches;
+ std::unique_ptr<Caches> mCaches;
+
+ Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
+
+ Ref<TextureViewBase> mExternalTexturePlaceholderView;
+
+ std::unique_ptr<DynamicUploader> mDynamicUploader;
+ std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
+ Ref<QueueBase> mQueue;
+
+ struct DeprecationWarnings;
+ std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
+
+ State mState = State::BeingCreated;
+
+ // Encompasses the mutex and the actual list that contains all live objects "owned" by the
+ // device.
+ struct ApiObjectList {
+ std::mutex mutex;
+ LinkedList<ApiObjectBase> objects;
};
+ PerObjectType<ApiObjectList> mObjectLists;
+
+ FormatTable mFormatTable;
+
+ TogglesSet mEnabledToggles;
+ TogglesSet mOverridenToggles;
+ size_t mLazyClearCountForTesting = 0;
+ std::atomic_uint64_t mNextPipelineCompatibilityToken;
+
+ CombinedLimits mLimits;
+ FeaturesSet mEnabledFeatures;
+ WGSLExtensionSet mWGSLExtensionAllowList;
+
+ std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
+
+ std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
+ std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
+ std::string mLabel;
+ CacheKey mDeviceCacheKey;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp
index 262c07d7185..bae374f2cb8 100644
--- a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp
@@ -13,117 +13,117 @@
// limitations under the License.
#include "dawn/native/DynamicUploader.h"
+
+#include <utility>
+
#include "dawn/common/Math.h"
#include "dawn/native/Device.h"
namespace dawn::native {
- DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
- mRingBuffers.emplace_back(
- std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
- }
+DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+ mRingBuffers.emplace_back(
+ std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
+}
- void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
- mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
- mDevice->GetPendingCommandSerial());
- }
+void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
+ mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer), mDevice->GetPendingCommandSerial());
+}
- ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
- ExecutionSerial serial) {
- // Disable further sub-allocation should the request be too large.
- if (allocationSize > kRingBufferSize) {
- std::unique_ptr<StagingBufferBase> stagingBuffer;
- DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
+ ExecutionSerial serial) {
+ // Disable further sub-allocation should the request be too large.
+ if (allocationSize > kRingBufferSize) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
- UploadHandle uploadHandle;
- uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
- uploadHandle.stagingBuffer = stagingBuffer.get();
+ UploadHandle uploadHandle;
+ uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
+ uploadHandle.stagingBuffer = stagingBuffer.get();
- ReleaseStagingBuffer(std::move(stagingBuffer));
- return uploadHandle;
- }
+ ReleaseStagingBuffer(std::move(stagingBuffer));
+ return uploadHandle;
+ }
- // Note: Validation ensures size is already aligned.
- // First-fit: find next smallest buffer large enough to satisfy the allocation request.
- RingBuffer* targetRingBuffer = mRingBuffers.back().get();
- for (auto& ringBuffer : mRingBuffers) {
- const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
- // Prevent overflow.
- ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
- const uint64_t remainingSize =
- ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
- if (allocationSize <= remainingSize) {
- targetRingBuffer = ringBuffer.get();
- break;
- }
+ // Note: Validation ensures size is already aligned.
+ // First-fit: find next smallest buffer large enough to satisfy the allocation request.
+ RingBuffer* targetRingBuffer = mRingBuffers.back().get();
+ for (auto& ringBuffer : mRingBuffers) {
+ const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
+ // Prevent overflow.
+ ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
+ const uint64_t remainingSize =
+ ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
+ if (allocationSize <= remainingSize) {
+ targetRingBuffer = ringBuffer.get();
+ break;
}
+ }
- uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
- if (targetRingBuffer != nullptr) {
- startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
- }
+ uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
+ if (targetRingBuffer != nullptr) {
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+ }
- // Upon failure, append a newly created ring buffer to fulfill the
- // request.
- if (startOffset == RingBufferAllocator::kInvalidOffset) {
- mRingBuffers.emplace_back(
- std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
+ // Upon failure, append a newly created ring buffer to fulfill the
+ // request.
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
+ mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
+ new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
- targetRingBuffer = mRingBuffers.back().get();
- startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
- }
+ targetRingBuffer = mRingBuffers.back().get();
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+ }
- ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+ ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
- // Allocate the staging buffer backing the ringbuffer.
- // Note: the first ringbuffer will be lazily created.
- if (targetRingBuffer->mStagingBuffer == nullptr) {
- std::unique_ptr<StagingBufferBase> stagingBuffer;
- DAWN_TRY_ASSIGN(stagingBuffer,
- mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
- targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
- }
+ // Allocate the staging buffer backing the ringbuffer.
+ // Note: the first ringbuffer will be lazily created.
+ if (targetRingBuffer->mStagingBuffer == nullptr) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer,
+ mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
+ targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
+ }
- ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
+ ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
- UploadHandle uploadHandle;
- uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
- uploadHandle.mappedBuffer =
- static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
- uploadHandle.startOffset = startOffset;
+ UploadHandle uploadHandle;
+ uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
+ uploadHandle.mappedBuffer =
+ static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
+ uploadHandle.startOffset = startOffset;
- return uploadHandle;
- }
+ return uploadHandle;
+}
- void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
- // Reclaim memory within the ring buffers by ticking (or removing requests no longer
- // in-flight).
- for (size_t i = 0; i < mRingBuffers.size(); ++i) {
- mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
-
- // Never erase the last buffer as to prevent re-creating smaller buffers
- // again. The last buffer is the largest.
- if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
- mRingBuffers.erase(mRingBuffers.begin() + i);
- }
- }
- mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
- }
+void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
+ // Reclaim memory within the ring buffers by ticking (or removing requests no longer
+ // in-flight).
+ for (size_t i = 0; i < mRingBuffers.size(); ++i) {
+ mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
- // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
- // when it's not necessary.
- ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
- ExecutionSerial serial,
- uint64_t offsetAlignment) {
- ASSERT(offsetAlignment > 0);
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- AllocateInternal(allocationSize + offsetAlignment - 1, serial));
- uint64_t additionalOffset =
- Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
- uploadHandle.mappedBuffer =
- static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
- uploadHandle.startOffset += additionalOffset;
- return uploadHandle;
+ // Never erase the last buffer as to prevent re-creating smaller buffers
+ // again. The last buffer is the largest.
+ if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
+ mRingBuffers.erase(mRingBuffers.begin() + i);
+ }
}
+ mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
+}
+
+// TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
+// when it's not necessary.
+ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
+ ExecutionSerial serial,
+ uint64_t offsetAlignment) {
+ ASSERT(offsetAlignment > 0);
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, AllocateInternal(allocationSize + offsetAlignment - 1, serial));
+ uint64_t additionalOffset =
+ Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
+ uploadHandle.mappedBuffer = static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
+ uploadHandle.startOffset += additionalOffset;
+ return uploadHandle;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h
index c7b24a8c416..0317e8de5e3 100644
--- a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h
+++ b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_DYNAMICUPLOADER_H_
#define SRC_DAWN_NATIVE_DYNAMICUPLOADER_H_
+#include <memory>
+#include <vector>
+
#include "dawn/native/Forward.h"
#include "dawn/native/IntegerTypes.h"
#include "dawn/native/RingBufferAllocator.h"
@@ -24,43 +27,42 @@
// usage.
namespace dawn::native {
- struct UploadHandle {
- uint8_t* mappedBuffer = nullptr;
- uint64_t startOffset = 0;
- StagingBufferBase* stagingBuffer = nullptr;
- };
+struct UploadHandle {
+ uint8_t* mappedBuffer = nullptr;
+ uint64_t startOffset = 0;
+ StagingBufferBase* stagingBuffer = nullptr;
+};
- class DynamicUploader {
- public:
- explicit DynamicUploader(DeviceBase* device);
- ~DynamicUploader() = default;
+class DynamicUploader {
+ public:
+ explicit DynamicUploader(DeviceBase* device);
+ ~DynamicUploader() = default;
- // We add functions to Release StagingBuffers to the DynamicUploader as there's
- // currently no place to track the allocated staging buffers such that they're freed after
- // pending commands are finished. This should be changed when better resource allocation is
- // implemented.
- void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
+ // We add functions to Release StagingBuffers to the DynamicUploader as there's
+ // currently no place to track the allocated staging buffers such that they're freed after
+ // pending commands are finished. This should be changed when better resource allocation is
+ // implemented.
+ void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
- ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
- ExecutionSerial serial,
- uint64_t offsetAlignment);
- void Deallocate(ExecutionSerial lastCompletedSerial);
+ ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
+ ExecutionSerial serial,
+ uint64_t offsetAlignment);
+ void Deallocate(ExecutionSerial lastCompletedSerial);
- private:
- static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
+ private:
+ static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
- struct RingBuffer {
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
- RingBufferAllocator mAllocator;
- };
+ struct RingBuffer {
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
+ RingBufferAllocator mAllocator;
+ };
- ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
- ExecutionSerial serial);
+ ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, ExecutionSerial serial);
- std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
- SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
- DeviceBase* mDevice;
- };
+ std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
+ SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
+ DeviceBase* mDevice;
+};
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_DYNAMICUPLOADER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp
index b9ba5298636..d22292716ea 100644
--- a/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp
@@ -24,194 +24,197 @@
namespace dawn::native {
- EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
- : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
- }
-
- EncodingContext::~EncodingContext() {
- Destroy();
- }
-
- void EncodingContext::Destroy() {
- if (mDestroyed) {
- return;
+EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
+ : mDevice(device),
+ mTopLevelEncoder(initialEncoder),
+ mCurrentEncoder(initialEncoder),
+ mDestroyed(device->IsLost()) {}
+
+EncodingContext::~EncodingContext() {
+ Destroy();
+}
+
+void EncodingContext::Destroy() {
+ if (mDestroyed) {
+ return;
+ }
+ if (!mWereCommandsAcquired) {
+ FreeCommands(GetIterator());
+ }
+ // If we weren't already finished, then we want to handle an error here so that any calls
+ // to Finish after Destroy will return a meaningful error.
+ if (!IsFinished()) {
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
+ }
+ mDestroyed = true;
+ mCurrentEncoder = nullptr;
+}
+
+CommandIterator EncodingContext::AcquireCommands() {
+ MoveToIterator();
+ ASSERT(!mWereCommandsAcquired);
+ mWereCommandsAcquired = true;
+ return std::move(mIterator);
+}
+
+CommandIterator* EncodingContext::GetIterator() {
+ MoveToIterator();
+ ASSERT(!mWereCommandsAcquired);
+ return &mIterator;
+}
+
+void EncodingContext::MoveToIterator() {
+ CommitCommands(std::move(mPendingCommands));
+ if (!mWasMovedToIterator) {
+ mIterator.AcquireCommandBlocks(std::move(mAllocators));
+ mWasMovedToIterator = true;
+ }
+}
+
+void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
+ // Append in reverse so that the most recently set debug group is printed first, like a
+ // call stack.
+ for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
+ error->AppendDebugGroup(*iter);
+ }
+
+ if (!IsFinished()) {
+ // Encoding should only generate validation errors.
+ ASSERT(error->GetType() == InternalErrorType::Validation);
+ // If the encoding context is not finished, errors are deferred until
+ // Finish() is called.
+ if (mError == nullptr) {
+ mError = std::move(error);
}
- if (!mWereCommandsAcquired) {
- FreeCommands(GetIterator());
- }
- // If we weren't already finished, then we want to handle an error here so that any calls
- // to Finish after Destroy will return a meaningful error.
- if (!IsFinished()) {
- HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
- }
- mDestroyed = true;
- mCurrentEncoder = nullptr;
- }
-
- CommandIterator EncodingContext::AcquireCommands() {
- MoveToIterator();
- ASSERT(!mWereCommandsAcquired);
- mWereCommandsAcquired = true;
- return std::move(mIterator);
- }
-
- CommandIterator* EncodingContext::GetIterator() {
- MoveToIterator();
- ASSERT(!mWereCommandsAcquired);
- return &mIterator;
- }
-
- void EncodingContext::MoveToIterator() {
+ } else {
+ mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+ }
+}
+
+void EncodingContext::WillBeginRenderPass() {
+ ASSERT(mCurrentEncoder == mTopLevelEncoder);
+ if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
+ // When validation is enabled or indirect parameters require duplication, we are going
+ // to want to capture all commands encoded between and including BeginRenderPassCmd and
+ // EndRenderPassCmd, and defer their sequencing util after we have a chance to insert
+ // any necessary validation or duplication commands. To support this we commit any
+ // current commands now, so that the impending BeginRenderPassCmd starts in a fresh
+ // CommandAllocator.
CommitCommands(std::move(mPendingCommands));
- if (!mWasMovedToIterator) {
- mIterator.AcquireCommandBlocks(std::move(mAllocators));
- mWasMovedToIterator = true;
- }
}
-
- void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
- // Append in reverse so that the most recently set debug group is printed first, like a
- // call stack.
- for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
- error->AppendDebugGroup(*iter);
- }
-
- if (!IsFinished()) {
- // Encoding should only generate validation errors.
- ASSERT(error->GetType() == InternalErrorType::Validation);
- // If the encoding context is not finished, errors are deferred until
- // Finish() is called.
- if (mError == nullptr) {
- mError = std::move(error);
- }
- } else {
- mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
- }
- }
-
- void EncodingContext::WillBeginRenderPass() {
- ASSERT(mCurrentEncoder == mTopLevelEncoder);
- if (mDevice->IsValidationEnabled()) {
- // When validation is enabled, we are going to want to capture all commands encoded
- // between and including BeginRenderPassCmd and EndRenderPassCmd, and defer their
- // sequencing util after we have a chance to insert any necessary validation
- // commands. To support this we commit any current commands now, so that the
- // impending BeginRenderPassCmd starts in a fresh CommandAllocator.
- CommitCommands(std::move(mPendingCommands));
- }
- }
-
- void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
- // Assert we're at the top level.
- ASSERT(mCurrentEncoder == mTopLevelEncoder);
- ASSERT(passEncoder != nullptr);
-
- mCurrentEncoder = passEncoder;
+}
+
+void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
+ // Assert we're at the top level.
+ ASSERT(mCurrentEncoder == mTopLevelEncoder);
+ ASSERT(passEncoder != nullptr);
+
+ mCurrentEncoder = passEncoder;
+}
+
+MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata) {
+ ASSERT(mCurrentEncoder != mTopLevelEncoder);
+ ASSERT(mCurrentEncoder == passEncoder);
+
+ mCurrentEncoder = mTopLevelEncoder;
+
+ if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
+ // With validation enabled, commands were committed just before BeginRenderPassCmd was
+ // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
+ // mPendingCommands contains only the commands from BeginRenderPassCmd to
+ // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
+ // the validation encoder a chance to insert its commands first.
+ CommandAllocator renderCommands = std::move(mPendingCommands);
+ DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
+ &indirectDrawMetadata));
+ CommitCommands(std::move(mPendingCommands));
+ CommitCommands(std::move(renderCommands));
}
- MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
- RenderPassResourceUsageTracker usageTracker,
- CommandEncoder* commandEncoder,
- IndirectDrawMetadata indirectDrawMetadata) {
- ASSERT(mCurrentEncoder != mTopLevelEncoder);
- ASSERT(mCurrentEncoder == passEncoder);
-
- mCurrentEncoder = mTopLevelEncoder;
+ mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
+ return {};
+}
- if (mDevice->IsValidationEnabled()) {
- // With validation enabled, commands were committed just before BeginRenderPassCmd was
- // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
- // mPendingCommands contains only the commands from BeginRenderPassCmd to
- // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
- // the validation encoder a chance to insert its commands first.
- CommandAllocator renderCommands = std::move(mPendingCommands);
- DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
- &indirectDrawMetadata));
- CommitCommands(std::move(mPendingCommands));
- CommitCommands(std::move(renderCommands));
- }
-
- mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
- return {};
- }
+void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
+ ComputePassResourceUsage usages) {
+ ASSERT(mCurrentEncoder != mTopLevelEncoder);
+ ASSERT(mCurrentEncoder == passEncoder);
- void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
- ComputePassResourceUsage usages) {
- ASSERT(mCurrentEncoder != mTopLevelEncoder);
- ASSERT(mCurrentEncoder == passEncoder);
+ mCurrentEncoder = mTopLevelEncoder;
+ mComputePassUsages.push_back(std::move(usages));
+}
+void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
+ if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
+ // The current pass encoder is being deleted. Implicitly end the pass with an error.
mCurrentEncoder = mTopLevelEncoder;
- mComputePassUsages.push_back(std::move(usages));
- }
-
- void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
- if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
- // The current pass encoder is being deleted. Implicitly end the pass with an error.
- mCurrentEncoder = mTopLevelEncoder;
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Command buffer recording ended before %s was ended.", passEncoder));
- }
- }
-
- const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
- ASSERT(!mWereRenderPassUsagesAcquired);
- return mRenderPassUsages;
- }
-
- RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
- ASSERT(!mWereRenderPassUsagesAcquired);
- mWereRenderPassUsagesAcquired = true;
- return std::move(mRenderPassUsages);
- }
-
- const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
- ASSERT(!mWereComputePassUsagesAcquired);
- return mComputePassUsages;
- }
-
- ComputePassUsages EncodingContext::AcquireComputePassUsages() {
- ASSERT(!mWereComputePassUsagesAcquired);
- mWereComputePassUsagesAcquired = true;
- return std::move(mComputePassUsages);
- }
-
- void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
- mDebugGroupLabels.emplace_back(groupLabel);
- }
-
- void EncodingContext::PopDebugGroupLabel() {
- mDebugGroupLabels.pop_back();
- }
-
- MaybeError EncodingContext::Finish() {
- DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
-
- const ApiObjectBase* currentEncoder = mCurrentEncoder;
- const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
-
- // Even if finish validation fails, it is now invalid to call any encoding commands,
- // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
- // if Finish() has been called.
- mCurrentEncoder = nullptr;
- mTopLevelEncoder = nullptr;
- CommitCommands(std::move(mPendingCommands));
-
- if (mError != nullptr) {
- return std::move(mError);
- }
- DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
- "Command buffer recording ended before %s was ended.", currentEncoder);
- return {};
- }
-
- void EncodingContext::CommitCommands(CommandAllocator allocator) {
- if (!allocator.IsEmpty()) {
- mAllocators.push_back(std::move(allocator));
- }
- }
-
- bool EncodingContext::IsFinished() const {
- return mTopLevelEncoder == nullptr;
- }
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Command buffer recording ended before %s was ended.", passEncoder));
+ }
+}
+
+const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ return mRenderPassUsages;
+}
+
+RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ mWereRenderPassUsagesAcquired = true;
+ return std::move(mRenderPassUsages);
+}
+
+const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ return mComputePassUsages;
+}
+
+ComputePassUsages EncodingContext::AcquireComputePassUsages() {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ mWereComputePassUsagesAcquired = true;
+ return std::move(mComputePassUsages);
+}
+
+void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
+ mDebugGroupLabels.emplace_back(groupLabel);
+}
+
+void EncodingContext::PopDebugGroupLabel() {
+ mDebugGroupLabels.pop_back();
+}
+
+MaybeError EncodingContext::Finish() {
+ DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
+
+ const ApiObjectBase* currentEncoder = mCurrentEncoder;
+ const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
+
+ // Even if finish validation fails, it is now invalid to call any encoding commands,
+ // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
+ // if Finish() has been called.
+ mCurrentEncoder = nullptr;
+ mTopLevelEncoder = nullptr;
+ CommitCommands(std::move(mPendingCommands));
+
+ if (mError != nullptr) {
+ return std::move(mError);
+ }
+ DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
+ "Command buffer recording ended before %s was ended.", currentEncoder);
+ return {};
+}
+
+void EncodingContext::CommitCommands(CommandAllocator allocator) {
+ if (!allocator.IsEmpty()) {
+ mAllocators.push_back(std::move(allocator));
+ }
+}
+
+bool EncodingContext::IsFinished() const {
+ return mTopLevelEncoder == nullptr;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/EncodingContext.h b/chromium/third_party/dawn/src/dawn/native/EncodingContext.h
index 341cc105090..020132e5273 100644
--- a/chromium/third_party/dawn/src/dawn/native/EncodingContext.h
+++ b/chromium/third_party/dawn/src/dawn/native/EncodingContext.h
@@ -15,6 +15,11 @@
#ifndef SRC_DAWN_NATIVE_ENCODINGCONTEXT_H_
#define SRC_DAWN_NATIVE_ENCODINGCONTEXT_H_
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
#include "dawn/native/CommandAllocator.h"
#include "dawn/native/Error.h"
#include "dawn/native/ErrorData.h"
@@ -22,160 +27,158 @@
#include "dawn/native/PassResourceUsageTracker.h"
#include "dawn/native/dawn_platform.h"
-#include <string>
-
namespace dawn::native {
- class CommandEncoder;
- class DeviceBase;
- class ApiObjectBase;
+class CommandEncoder;
+class DeviceBase;
+class ApiObjectBase;
- // Base class for allocating/iterating commands.
- // It performs error tracking as well as encoding state for render/compute passes.
- class EncodingContext {
- public:
- EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
- ~EncodingContext();
+// Base class for allocating/iterating commands.
+// It performs error tracking as well as encoding state for render/compute passes.
+class EncodingContext {
+ public:
+ EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
+ ~EncodingContext();
- // Marks the encoding context as destroyed so that any future encodes will fail, and all
- // encoded commands are released.
- void Destroy();
+ // Marks the encoding context as destroyed so that any future encodes will fail, and all
+ // encoded commands are released.
+ void Destroy();
- CommandIterator AcquireCommands();
- CommandIterator* GetIterator();
+ CommandIterator AcquireCommands();
+ CommandIterator* GetIterator();
- // Functions to handle encoder errors
- void HandleError(std::unique_ptr<ErrorData> error);
+ // Functions to handle encoder errors
+ void HandleError(std::unique_ptr<ErrorData> error);
- inline bool ConsumedError(MaybeError maybeError) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- HandleError(maybeError.AcquireError());
- return true;
- }
- return false;
- }
-
- template <typename... Args>
- inline bool ConsumedError(MaybeError maybeError,
- const char* formatStr,
- const Args&... args) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(absl::StrFormat(
- "[Failed to format error message: \"%s\"].", formatStr));
- }
- }
- HandleError(std::move(error));
- return true;
- }
- return false;
+ inline bool ConsumedError(MaybeError maybeError) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ HandleError(maybeError.AcquireError());
+ return true;
}
-
- inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
- if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
- if (mDestroyed) {
- HandleError(
- DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
- } else if (mCurrentEncoder != mTopLevelEncoder) {
- // The top level encoder was used when a pass encoder was current.
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Command cannot be recorded while %s is active.", mCurrentEncoder));
+ return false;
+ }
+
+ template <typename... Args>
+ inline bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
} else {
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Recording in an error or already ended %s.", encoder));
+ error->AppendContext(
+ absl::StrFormat("[Failed to format error message: \"%s\"].", formatStr));
}
- return false;
}
+ HandleError(std::move(error));
return true;
}
+ return false;
+ }
- template <typename EncodeFunction>
- inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
- if (!CheckCurrentEncoder(encoder)) {
- return false;
- }
- ASSERT(!mWasMovedToIterator);
- return !ConsumedError(encodeFunction(&mPendingCommands));
+ inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
+ if (mDestroyed) {
+ HandleError(
+ DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", mCurrentEncoder));
+ return false;
}
-
- template <typename EncodeFunction, typename... Args>
- inline bool TryEncode(const ApiObjectBase* encoder,
- EncodeFunction&& encodeFunction,
- const char* formatStr,
- const Args&... args) {
- if (!CheckCurrentEncoder(encoder)) {
- return false;
+ if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
+ if (mCurrentEncoder != mTopLevelEncoder) {
+ // The top level encoder was used when a pass encoder was current.
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Command cannot be recorded while %s is active.", mCurrentEncoder));
+ } else {
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Recording in an error or already ended %s.", encoder));
}
- ASSERT(!mWasMovedToIterator);
- return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+ return false;
}
+ return true;
+ }
- // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
- // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
- // failed validation before the BeginRenderPassCmd could be encoded.
- void WillBeginRenderPass();
-
- // Functions to set current encoder state
- void EnterPass(const ApiObjectBase* passEncoder);
- MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
- RenderPassResourceUsageTracker usageTracker,
- CommandEncoder* commandEncoder,
- IndirectDrawMetadata indirectDrawMetadata);
- void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
- MaybeError Finish();
-
- // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
- // mCurrentEncoder.
- void EnsurePassExited(const ApiObjectBase* passEncoder);
-
- const RenderPassUsages& GetRenderPassUsages() const;
- const ComputePassUsages& GetComputePassUsages() const;
- RenderPassUsages AcquireRenderPassUsages();
- ComputePassUsages AcquireComputePassUsages();
-
- void PushDebugGroupLabel(const char* groupLabel);
- void PopDebugGroupLabel();
-
- private:
- void CommitCommands(CommandAllocator allocator);
-
- bool IsFinished() const;
- void MoveToIterator();
-
- DeviceBase* mDevice;
-
- // There can only be two levels of encoders. Top-level and render/compute pass.
- // The top level encoder is the encoder the EncodingContext is created with.
- // It doubles as flag to check if encoding has been Finished.
- const ApiObjectBase* mTopLevelEncoder;
- // The current encoder must be the same as the encoder provided to TryEncode,
- // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
- // The current encoder changes with Enter/ExitPass which should be called by
- // CommandEncoder::Begin/EndPass.
- const ApiObjectBase* mCurrentEncoder;
-
- RenderPassUsages mRenderPassUsages;
- bool mWereRenderPassUsagesAcquired = false;
- ComputePassUsages mComputePassUsages;
- bool mWereComputePassUsagesAcquired = false;
-
- CommandAllocator mPendingCommands;
-
- std::vector<CommandAllocator> mAllocators;
- CommandIterator mIterator;
- bool mWasMovedToIterator = false;
- bool mWereCommandsAcquired = false;
- bool mDestroyed = false;
-
- std::unique_ptr<ErrorData> mError;
- std::vector<std::string> mDebugGroupLabels;
- };
+ template <typename EncodeFunction>
+ inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
+ if (!CheckCurrentEncoder(encoder)) {
+ return false;
+ }
+ ASSERT(!mWasMovedToIterator);
+ return !ConsumedError(encodeFunction(&mPendingCommands));
+ }
+
+ template <typename EncodeFunction, typename... Args>
+ inline bool TryEncode(const ApiObjectBase* encoder,
+ EncodeFunction&& encodeFunction,
+ const char* formatStr,
+ const Args&... args) {
+ if (!CheckCurrentEncoder(encoder)) {
+ return false;
+ }
+ ASSERT(!mWasMovedToIterator);
+ return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+ }
+
+ // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
+ // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
+ // failed validation before the BeginRenderPassCmd could be encoded.
+ void WillBeginRenderPass();
+
+ // Functions to set current encoder state
+ void EnterPass(const ApiObjectBase* passEncoder);
+ MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata);
+ void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
+ MaybeError Finish();
+
+ // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
+ // mCurrentEncoder.
+ void EnsurePassExited(const ApiObjectBase* passEncoder);
+
+ const RenderPassUsages& GetRenderPassUsages() const;
+ const ComputePassUsages& GetComputePassUsages() const;
+ RenderPassUsages AcquireRenderPassUsages();
+ ComputePassUsages AcquireComputePassUsages();
+
+ void PushDebugGroupLabel(const char* groupLabel);
+ void PopDebugGroupLabel();
+
+ private:
+ void CommitCommands(CommandAllocator allocator);
+
+ bool IsFinished() const;
+ void MoveToIterator();
+
+ DeviceBase* mDevice;
+
+ // There can only be two levels of encoders. Top-level and render/compute pass.
+ // The top level encoder is the encoder the EncodingContext is created with.
+ // It doubles as flag to check if encoding has been Finished.
+ const ApiObjectBase* mTopLevelEncoder;
+ // The current encoder must be the same as the encoder provided to TryEncode,
+ // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
+ // The current encoder changes with Enter/ExitPass which should be called by
+ // CommandEncoder::Begin/EndPass.
+ const ApiObjectBase* mCurrentEncoder;
+
+ RenderPassUsages mRenderPassUsages;
+ bool mWereRenderPassUsagesAcquired = false;
+ ComputePassUsages mComputePassUsages;
+ bool mWereComputePassUsagesAcquired = false;
+
+ CommandAllocator mPendingCommands;
+
+ std::vector<CommandAllocator> mAllocators;
+ CommandIterator mIterator;
+ bool mWasMovedToIterator = false;
+ bool mWereCommandsAcquired = false;
+ bool mDestroyed = false;
+
+ std::unique_ptr<ErrorData> mError;
+ std::vector<std::string> mDebugGroupLabels;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h b/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h
index 51453cd5943..14101558151 100644
--- a/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h
+++ b/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h
@@ -19,20 +19,20 @@
namespace dawn::native {
- // EnumClassBitmmasks is a helper in the dawn:: namespace.
- // Re-export it in the dawn_native namespace.
- DAWN_IMPORT_BITMASK_OPERATORS
-
- // Specify this for usage with EnumMaskIterator
- template <typename T>
- struct EnumBitmaskSize {
- static constexpr unsigned value = 0;
- };
-
- template <typename T>
- constexpr bool HasOneBit(T value) {
- return HasZeroOrOneBits(value) && value != T(0);
- }
+// EnumClassBitmmasks is a helper in the dawn:: namespace.
+// Re-export it in the dawn_native namespace.
+DAWN_IMPORT_BITMASK_OPERATORS
+
+// Specify this for usage with EnumMaskIterator
+template <typename T>
+struct EnumBitmaskSize {
+ static constexpr unsigned value = 0;
+};
+
+template <typename T>
+constexpr bool HasOneBit(T value) {
+ return HasZeroOrOneBits(value) && value != T(0);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h b/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h
index ec2328561c5..1b0e89ab2f7 100644
--- a/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h
+++ b/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h
@@ -20,63 +20,54 @@
namespace dawn::native {
- template <typename T>
- class EnumMaskIterator final {
- static constexpr size_t N = EnumBitmaskSize<T>::value;
- static_assert(N > 0);
-
- using U = std::underlying_type_t<T>;
+template <typename T>
+class EnumMaskIterator final {
+ static constexpr size_t N = EnumBitmaskSize<T>::value;
+ static_assert(N > 0);
+
+ using U = std::underlying_type_t<T>;
+
+ public:
+ explicit EnumMaskIterator(const T& mask)
+ : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
+ // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
+ ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
+ }
+ class Iterator final {
public:
- explicit EnumMaskIterator(const T& mask)
- : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
- // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
- ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
- }
-
- class Iterator final {
- public:
- explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
- }
-
- Iterator& operator++() {
- ++mIter;
- return *this;
- }
-
- bool operator==(const Iterator& other) const {
- return mIter == other.mIter;
- }
-
- bool operator!=(const Iterator& other) const {
- return mIter != other.mIter;
- }
+ explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {}
- T operator*() const {
- U value = *mIter;
- return static_cast<T>(U(1) << value);
- }
+ Iterator& operator++() {
+ ++mIter;
+ return *this;
+ }
- private:
- typename BitSetIterator<N, U>::Iterator mIter;
- };
+ bool operator==(const Iterator& other) const { return mIter == other.mIter; }
- Iterator begin() const {
- return Iterator(mBitSetIterator.begin());
- }
+ bool operator!=(const Iterator& other) const { return mIter != other.mIter; }
- Iterator end() const {
- return Iterator(mBitSetIterator.end());
+ T operator*() const {
+ U value = *mIter;
+ return static_cast<T>(U(1) << value);
}
private:
- BitSetIterator<N, U> mBitSetIterator;
+ typename BitSetIterator<N, U>::Iterator mIter;
};
- template <typename T>
- EnumMaskIterator<T> IterateEnumMask(const T& mask) {
- return EnumMaskIterator<T>(mask);
- }
+ Iterator begin() const { return Iterator(mBitSetIterator.begin()); }
+
+ Iterator end() const { return Iterator(mBitSetIterator.end()); }
+
+ private:
+ BitSetIterator<N, U> mBitSetIterator;
+};
+
+template <typename T>
+EnumMaskIterator<T> IterateEnumMask(const T& mask) {
+ return EnumMaskIterator<T>(mask);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Error.cpp b/chromium/third_party/dawn/src/dawn/native/Error.cpp
index d524a327660..2d06da24b66 100644
--- a/chromium/third_party/dawn/src/dawn/native/Error.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Error.cpp
@@ -19,46 +19,46 @@
namespace dawn::native {
- void IgnoreErrors(MaybeError maybeError) {
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
- // During shutdown and destruction, device lost errors can be ignored.
- // We can also ignore other unexpected internal errors on shut down and treat it as
- // device lost so that we can continue with destruction.
- ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
- errorData->GetType() == InternalErrorType::Internal);
- }
+void IgnoreErrors(MaybeError maybeError) {
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
+ // During shutdown and destruction, device lost errors can be ignored.
+ // We can also ignore other unexpected internal errors on shut down and treat it as
+ // device lost so that we can continue with destruction.
+ ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
+ errorData->GetType() == InternalErrorType::Internal);
}
+}
- wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
- switch (type) {
- case InternalErrorType::Validation:
- return wgpu::ErrorType::Validation;
- case InternalErrorType::OutOfMemory:
- return wgpu::ErrorType::OutOfMemory;
+wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
+ switch (type) {
+ case InternalErrorType::Validation:
+ return wgpu::ErrorType::Validation;
+ case InternalErrorType::OutOfMemory:
+ return wgpu::ErrorType::OutOfMemory;
- // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
- // the device at the API level to be lost, so treat it like a DeviceLost error.
- case InternalErrorType::Internal:
- case InternalErrorType::DeviceLost:
- return wgpu::ErrorType::DeviceLost;
+ // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
+ // the device at the API level to be lost, so treat it like a DeviceLost error.
+ case InternalErrorType::Internal:
+ case InternalErrorType::DeviceLost:
+ return wgpu::ErrorType::DeviceLost;
- default:
- return wgpu::ErrorType::Unknown;
- }
+ default:
+ return wgpu::ErrorType::Unknown;
}
+}
- InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
- switch (type) {
- case wgpu::ErrorType::Validation:
- return InternalErrorType::Validation;
- case wgpu::ErrorType::OutOfMemory:
- return InternalErrorType::OutOfMemory;
- case wgpu::ErrorType::DeviceLost:
- return InternalErrorType::DeviceLost;
- default:
- return InternalErrorType::Internal;
- }
+InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
+ switch (type) {
+ case wgpu::ErrorType::Validation:
+ return InternalErrorType::Validation;
+ case wgpu::ErrorType::OutOfMemory:
+ return InternalErrorType::OutOfMemory;
+ case wgpu::ErrorType::DeviceLost:
+ return InternalErrorType::DeviceLost;
+ default:
+ return InternalErrorType::Internal;
}
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Error.h b/chromium/third_party/dawn/src/dawn/native/Error.h
index 1a5ac313f8e..0b71644f270 100644
--- a/chromium/third_party/dawn/src/dawn/native/Error.h
+++ b/chromium/third_party/dawn/src/dawn/native/Error.h
@@ -15,56 +15,58 @@
#ifndef SRC_DAWN_NATIVE_ERROR_H_
#define SRC_DAWN_NATIVE_ERROR_H_
+#include <memory>
+#include <string>
+#include <utility>
+
#include "absl/strings/str_format.h"
#include "dawn/common/Result.h"
#include "dawn/native/ErrorData.h"
#include "dawn/native/webgpu_absl_format.h"
-#include <string>
-
namespace dawn::native {
- enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
-
- // MaybeError and ResultOrError are meant to be used as return value for function that are not
- // expected to, but might fail. The handling of error is potentially much slower than successes.
- using MaybeError = Result<void, ErrorData>;
-
- template <typename T>
- using ResultOrError = Result<T, ErrorData>;
-
- // Returning a success is done like so:
- // return {}; // for Error
- // return SomethingOfTypeT; // for ResultOrError<T>
- //
- // Returning an error is done via:
- // return DAWN_MAKE_ERROR(errorType, "My error message");
- //
- // but shorthand version for specific error types are preferred:
- // return DAWN_VALIDATION_ERROR("My error message");
- //
- // There are different types of errors that should be used for different purpose:
- //
- // - Validation: these are errors that show the user did something bad, which causes the
- // whole call to be a no-op. It's most commonly found in the frontend but there can be some
- // backend specific validation in non-conformant backends too.
- //
- // - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
- // This is similar to validation errors in that the call becomes a no-op and returns an
- // error object, but is reported separated from validation to the user.
- //
- // - Device loss: the backend driver reported that the GPU has been lost, which means all
- // previous commands magically disappeared and the only thing left to do is clean up.
- // Note: Device loss should be used rarely and in most case you want to use Internal
- // instead.
- //
- // - Internal: something happened that the backend didn't expect, and it doesn't know
- // how to recover from that situation. This causes the device to be lost, but is separate
- // from device loss, because the GPU execution is still happening so we need to clean up
- // more gracefully.
- //
- // - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
- // more clarity.
+enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
+
+// MaybeError and ResultOrError are meant to be used as return value for function that are not
+// expected to, but might fail. The handling of error is potentially much slower than successes.
+using MaybeError = Result<void, ErrorData>;
+
+template <typename T>
+using ResultOrError = Result<T, ErrorData>;
+
+// Returning a success is done like so:
+// return {}; // for Error
+// return SomethingOfTypeT; // for ResultOrError<T>
+//
+// Returning an error is done via:
+// return DAWN_MAKE_ERROR(errorType, "My error message");
+//
+// but shorthand version for specific error types are preferred:
+// return DAWN_VALIDATION_ERROR("My error message");
+//
+// There are different types of errors that should be used for different purpose:
+//
+// - Validation: these are errors that show the user did something bad, which causes the
+// whole call to be a no-op. It's most commonly found in the frontend but there can be some
+// backend specific validation in non-conformant backends too.
+//
+// - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
+// This is similar to validation errors in that the call becomes a no-op and returns an
+// error object, but is reported separated from validation to the user.
+//
+// - Device loss: the backend driver reported that the GPU has been lost, which means all
+// previous commands magically disappeared and the only thing left to do is clean up.
+// Note: Device loss should be used rarely and in most case you want to use Internal
+// instead.
+//
+// - Internal: something happened that the backend didn't expect, and it doesn't know
+// how to recover from that situation. This causes the device to be lost, but is separate
+// from device loss, because the GPU execution is still happening so we need to clean up
+// more gracefully.
+//
+// - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
+// more clarity.
#define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
::dawn::native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
@@ -106,9 +108,9 @@ namespace dawn::native {
#define DAWN_CONCAT2(x, y) DAWN_CONCAT1(x, y)
#define DAWN_LOCAL_VAR DAWN_CONCAT2(_localVar, __LINE__)
- // When Errors aren't handled explicitly, calls to functions returning errors should be
- // wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
- // the current function.
+// When Errors aren't handled explicitly, calls to functions returning errors should be
+// wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
+// the current function.
#define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
#define DAWN_TRY_CONTEXT(EXPR, ...) \
@@ -127,39 +129,39 @@ namespace dawn::native {
for (;;) \
break
- // DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
- // any, to VAR.
+// DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
+// any, to VAR.
#define DAWN_TRY_ASSIGN(VAR, EXPR) DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {})
#define DAWN_TRY_ASSIGN_CONTEXT(VAR, EXPR, ...) \
DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
- // Argument helpers are used to determine which macro implementations should be called when
- // overloading with different number of variables.
+// Argument helpers are used to determine which macro implementations should be called when
+// overloading with different number of variables.
#define DAWN_ERROR_UNIMPLEMENTED_MACRO_(...) UNREACHABLE()
#define DAWN_ERROR_GET_5TH_ARG_HELPER_(_1, _2, _3, _4, NAME, ...) NAME
#define DAWN_ERROR_GET_5TH_ARG_(args) DAWN_ERROR_GET_5TH_ARG_HELPER_ args
- // DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
- // return value of the macro when necessary. This is particularly useful if the function
- // calling the macro may want to return void instead of the error, i.e. in a test where we may
- // just want to assert and fail if the assign cannot go through. In both the cleanup and return
- // clauses, users can use the `error` variable to access the pointer to the acquired error.
- //
- // Example usages:
- // 3 Argument Case:
- // Result res;
- // DAWN_TRY_ASSIGN_WITH_CLEANUP(
- // res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
- // );
- //
- // 4 Argument Case:
- // bool FunctionThatReturnsBool() {
- // DAWN_TRY_ASSIGN_WITH_CLEANUP(
- // res, GetResultOrErrorFunction(),
- // { AddAdditionalErrorInformation(error.get()); },
- // false
- // );
- // }
+// DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
+// return value of the macro when necessary. This is particularly useful if the function
+// calling the macro may want to return void instead of the error, i.e. in a test where we may
+// just want to assert and fail if the assign cannot go through. In both the cleanup and return
+// clauses, users can use the `error` variable to access the pointer to the acquired error.
+//
+// Example usages:
+// 3 Argument Case:
+// Result res;
+// DAWN_TRY_ASSIGN_WITH_CLEANUP(
+// res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
+// );
+//
+// 4 Argument Case:
+// bool FunctionThatReturnsBool() {
+// DAWN_TRY_ASSIGN_WITH_CLEANUP(
+// res, GetResultOrErrorFunction(),
+// { AddAdditionalErrorInformation(error.get()); },
+// false
+// );
+// }
#define DAWN_TRY_ASSIGN_WITH_CLEANUP(...) \
DAWN_ERROR_GET_5TH_ARG_((__VA_ARGS__, DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_, \
DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_, \
@@ -183,11 +185,11 @@ namespace dawn::native {
for (;;) \
break
- // Assert that errors are device loss so that we can continue with destruction
- void IgnoreErrors(MaybeError maybeError);
+// Assert that errors are device loss so that we can continue with destruction
+void IgnoreErrors(MaybeError maybeError);
- wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
- InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
+wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
+InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp
index 863d20ffc4d..ee962403b49 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp
@@ -14,90 +14,108 @@
#include "dawn/native/ErrorData.h"
+#include <utility>
+
#include "dawn/native/Error.h"
#include "dawn/native/ObjectBase.h"
#include "dawn/native/dawn_platform.h"
namespace dawn::native {
- std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
- std::string message,
- const char* file,
- const char* function,
- int line) {
- std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
- error->AppendBacktrace(file, function, line);
- return error;
- }
-
- ErrorData::ErrorData(InternalErrorType type, std::string message)
- : mType(type), mMessage(std::move(message)) {
- }
-
- void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
- BacktraceRecord record;
- record.file = file;
- record.function = function;
- record.line = line;
-
- mBacktrace.push_back(std::move(record));
- }
-
- void ErrorData::AppendContext(std::string context) {
- mContexts.push_back(std::move(context));
- }
-
- void ErrorData::AppendDebugGroup(std::string label) {
- mDebugGroups.push_back(std::move(label));
- }
-
- InternalErrorType ErrorData::GetType() const {
- return mType;
- }
-
- const std::string& ErrorData::GetMessage() const {
- return mMessage;
- }
-
- const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
- return mBacktrace;
- }
-
- const std::vector<std::string>& ErrorData::GetContexts() const {
- return mContexts;
- }
-
- const std::vector<std::string>& ErrorData::GetDebugGroups() const {
- return mDebugGroups;
+std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
+ std::string message,
+ const char* file,
+ const char* function,
+ int line) {
+ std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
+ error->AppendBacktrace(file, function, line);
+ return error;
+}
+
+ErrorData::ErrorData(InternalErrorType type, std::string message)
+ : mType(type), mMessage(std::move(message)) {}
+
+ErrorData::~ErrorData() = default;
+
+void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
+ BacktraceRecord record;
+ record.file = file;
+ record.function = function;
+ record.line = line;
+
+ mBacktrace.push_back(std::move(record));
+}
+
+void ErrorData::AppendContext(std::string context) {
+ mContexts.push_back(std::move(context));
+}
+
+void ErrorData::AppendDebugGroup(std::string label) {
+ mDebugGroups.push_back(std::move(label));
+}
+
+void ErrorData::AppendBackendMessage(std::string message) {
+ mBackendMessages.push_back(std::move(message));
+}
+
+InternalErrorType ErrorData::GetType() const {
+ return mType;
+}
+
+const std::string& ErrorData::GetMessage() const {
+ return mMessage;
+}
+
+const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
+ return mBacktrace;
+}
+
+const std::vector<std::string>& ErrorData::GetContexts() const {
+ return mContexts;
+}
+
+const std::vector<std::string>& ErrorData::GetDebugGroups() const {
+ return mDebugGroups;
+}
+
+const std::vector<std::string>& ErrorData::GetBackendMessages() const {
+ return mBackendMessages;
+}
+
+std::string ErrorData::GetFormattedMessage() const {
+ std::ostringstream ss;
+ ss << mMessage << "\n";
+
+ if (!mContexts.empty()) {
+ for (auto context : mContexts) {
+ ss << " - While " << context << "\n";
+ }
}
- std::string ErrorData::GetFormattedMessage() const {
- std::ostringstream ss;
- ss << mMessage << "\n";
-
- if (!mContexts.empty()) {
- for (auto context : mContexts) {
- ss << " - While " << context << "\n";
- }
+ // For non-validation errors, or errors that lack a context include the
+ // stack trace for debugging purposes.
+ if (mContexts.empty() || mType != InternalErrorType::Validation) {
+ for (const auto& callsite : mBacktrace) {
+ ss << " at " << callsite.function << " (" << callsite.file << ":" << callsite.line
+ << ")\n";
}
+ }
- // For non-validation errors, or erros that lack a context include the
- // stack trace for debugging purposes.
- if (mContexts.empty() || mType != InternalErrorType::Validation) {
- for (const auto& callsite : mBacktrace) {
- ss << " at " << callsite.function << " (" << callsite.file << ":"
- << callsite.line << ")\n";
- }
+ if (!mDebugGroups.empty()) {
+ ss << "\nDebug group stack:\n";
+ for (auto label : mDebugGroups) {
+ ss << " > \"" << label << "\"\n";
}
+ }
- if (!mDebugGroups.empty()) {
- ss << "\nDebug group stack:\n";
- for (auto label : mDebugGroups) {
- ss << " > \"" << label << "\"\n";
- }
+ if (!mBackendMessages.empty()) {
+ ss << "\nBackend messages:\n";
+ for (auto message : mBackendMessages) {
+ ss << " * " << message << "\n";
}
-
- return ss.str();
}
+ return ss.str();
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorData.h b/chromium/third_party/dawn/src/dawn/native/ErrorData.h
index 6ad2e13576d..936252f827f 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorData.h
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorData.h
@@ -15,55 +15,61 @@
#ifndef SRC_DAWN_NATIVE_ERRORDATA_H_
#define SRC_DAWN_NATIVE_ERRORDATA_H_
-#include "dawn/common/Compiler.h"
-
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
+#include "dawn/common/Compiler.h"
+
namespace wgpu {
- enum class ErrorType : uint32_t;
+enum class ErrorType : uint32_t;
}
namespace dawn {
- using ErrorType = wgpu::ErrorType;
+using ErrorType = wgpu::ErrorType;
}
namespace dawn::native {
- enum class InternalErrorType : uint32_t;
+enum class InternalErrorType : uint32_t;
- class [[nodiscard]] ErrorData {
- public:
- [[nodiscard]] static std::unique_ptr<ErrorData> Create(
- InternalErrorType type, std::string message, const char* file, const char* function,
- int line);
- ErrorData(InternalErrorType type, std::string message);
+class [[nodiscard]] ErrorData {
+ public:
+ [[nodiscard]] static std::unique_ptr<ErrorData> Create(InternalErrorType type,
+ std::string message,
+ const char* file,
+ const char* function,
+ int line);
+ ErrorData(InternalErrorType type, std::string message);
+ ~ErrorData();
- struct BacktraceRecord {
- const char* file;
- const char* function;
- int line;
- };
- void AppendBacktrace(const char* file, const char* function, int line);
- void AppendContext(std::string context);
- void AppendDebugGroup(std::string label);
+ struct BacktraceRecord {
+ const char* file;
+ const char* function;
+ int line;
+ };
+ void AppendBacktrace(const char* file, const char* function, int line);
+ void AppendContext(std::string context);
+ void AppendDebugGroup(std::string label);
+ void AppendBackendMessage(std::string message);
- InternalErrorType GetType() const;
- const std::string& GetMessage() const;
- const std::vector<BacktraceRecord>& GetBacktrace() const;
- const std::vector<std::string>& GetContexts() const;
- const std::vector<std::string>& GetDebugGroups() const;
+ InternalErrorType GetType() const;
+ const std::string& GetMessage() const;
+ const std::vector<BacktraceRecord>& GetBacktrace() const;
+ const std::vector<std::string>& GetContexts() const;
+ const std::vector<std::string>& GetDebugGroups() const;
+ const std::vector<std::string>& GetBackendMessages() const;
- std::string GetFormattedMessage() const;
+ std::string GetFormattedMessage() const;
- private:
- InternalErrorType mType;
- std::string mMessage;
- std::vector<BacktraceRecord> mBacktrace;
- std::vector<std::string> mContexts;
- std::vector<std::string> mDebugGroups;
- };
+ private:
+ InternalErrorType mType;
+ std::string mMessage;
+ std::vector<BacktraceRecord> mBacktrace;
+ std::vector<std::string> mContexts;
+ std::vector<std::string> mDebugGroups;
+ std::vector<std::string> mBackendMessages;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp
index af87498e371..5942f30378b 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp
@@ -19,52 +19,52 @@
namespace dawn::native {
- namespace {
+namespace {
- bool sIsEnabled = false;
- uint64_t sNextIndex = 0;
- uint64_t sInjectedFailureIndex = 0;
- bool sHasPendingInjectedError = false;
+bool sIsEnabled = false;
+uint64_t sNextIndex = 0;
+uint64_t sInjectedFailureIndex = 0;
+bool sHasPendingInjectedError = false;
- } // anonymous namespace
+} // anonymous namespace
- void EnableErrorInjector() {
- sIsEnabled = true;
- }
+void EnableErrorInjector() {
+ sIsEnabled = true;
+}
- void DisableErrorInjector() {
- sIsEnabled = false;
- }
+void DisableErrorInjector() {
+ sIsEnabled = false;
+}
- void ClearErrorInjector() {
- sNextIndex = 0;
- sHasPendingInjectedError = false;
- }
+void ClearErrorInjector() {
+ sNextIndex = 0;
+ sHasPendingInjectedError = false;
+}
- bool ErrorInjectorEnabled() {
- return sIsEnabled;
- }
+bool ErrorInjectorEnabled() {
+ return sIsEnabled;
+}
- uint64_t AcquireErrorInjectorCallCount() {
- uint64_t count = sNextIndex;
- ClearErrorInjector();
- return count;
- }
+uint64_t AcquireErrorInjectorCallCount() {
+ uint64_t count = sNextIndex;
+ ClearErrorInjector();
+ return count;
+}
- bool ShouldInjectError() {
- uint64_t index = sNextIndex++;
- if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
- sHasPendingInjectedError = false;
- return true;
- }
- return false;
+bool ShouldInjectError() {
+ uint64_t index = sNextIndex++;
+ if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
+ sHasPendingInjectedError = false;
+ return true;
}
+ return false;
+}
- void InjectErrorAt(uint64_t index) {
- // Only one error can be injected at a time.
- ASSERT(!sHasPendingInjectedError);
- sInjectedFailureIndex = index;
- sHasPendingInjectedError = true;
- }
+void InjectErrorAt(uint64_t index) {
+ // Only one error can be injected at a time.
+ ASSERT(!sHasPendingInjectedError);
+ sInjectedFailureIndex = index;
+ sHasPendingInjectedError = true;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h
index 02cbea26adc..a65d80b5cfa 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h
@@ -20,48 +20,48 @@
namespace dawn::native {
- template <typename ErrorType>
- struct InjectedErrorResult {
- ErrorType error;
- bool injected;
- };
+template <typename ErrorType>
+struct InjectedErrorResult {
+ ErrorType error;
+ bool injected;
+};
- bool ErrorInjectorEnabled();
+bool ErrorInjectorEnabled();
- bool ShouldInjectError();
+bool ShouldInjectError();
- template <typename ErrorType>
- InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
- return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
- }
+template <typename ErrorType>
+InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
+ return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+}
- template <typename ErrorType, typename... ErrorTypes>
- InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
- if (ShouldInjectError()) {
- return InjectedErrorResult<ErrorType>{errorType, true};
- }
- return MaybeInjectError(errorTypes...);
+template <typename ErrorType, typename... ErrorTypes>
+InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
+ if (ShouldInjectError()) {
+ return InjectedErrorResult<ErrorType>{errorType, true};
}
+ return MaybeInjectError(errorTypes...);
+}
} // namespace dawn::native
#if defined(DAWN_ENABLE_ERROR_INJECTION)
-# define INJECT_ERROR_OR_RUN(stmt, ...) \
- [&]() { \
- if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) { \
- /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
- auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__); \
- if (injectedError.injected) { \
- return injectedError.error; \
- } \
- } \
- return (stmt); \
- }()
+#define INJECT_ERROR_OR_RUN(stmt, ...) \
+ [&]() { \
+ if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) { \
+ /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
+ auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__); \
+ if (injectedError.injected) { \
+ return injectedError.error; \
+ } \
+ } \
+ return (stmt); \
+ }()
#else
-# define INJECT_ERROR_OR_RUN(stmt, ...) stmt
+#define INJECT_ERROR_OR_RUN(stmt, ...) stmt
#endif
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp
index 06b7a95472b..b90fb13b6d8 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp
@@ -14,79 +14,84 @@
#include "dawn/native/ErrorScope.h"
+#include <utility>
+
#include "dawn/common/Assert.h"
namespace dawn::native {
- namespace {
+namespace {
- wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
- switch (filter) {
- case wgpu::ErrorFilter::Validation:
- return wgpu::ErrorType::Validation;
- case wgpu::ErrorFilter::OutOfMemory:
- return wgpu::ErrorType::OutOfMemory;
- }
- UNREACHABLE();
- }
+wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
+ switch (filter) {
+ case wgpu::ErrorFilter::Validation:
+ return wgpu::ErrorType::Validation;
+ case wgpu::ErrorFilter::OutOfMemory:
+ return wgpu::ErrorType::OutOfMemory;
+ }
+ UNREACHABLE();
+}
- } // namespace
+} // namespace
- ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
- : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
- }
+ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
+ : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {}
- wgpu::ErrorType ErrorScope::GetErrorType() const {
- return mCapturedError;
- }
+wgpu::ErrorType ErrorScope::GetErrorType() const {
+ return mCapturedError;
+}
- const char* ErrorScope::GetErrorMessage() const {
- return mErrorMessage.c_str();
- }
+const char* ErrorScope::GetErrorMessage() const {
+ return mErrorMessage.c_str();
+}
- void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
- mScopes.push_back(ErrorScope(filter));
- }
+ErrorScopeStack::ErrorScopeStack() = default;
- ErrorScope ErrorScopeStack::Pop() {
- ASSERT(!mScopes.empty());
- ErrorScope scope = std::move(mScopes.back());
- mScopes.pop_back();
- return scope;
- }
+ErrorScopeStack::~ErrorScopeStack() = default;
- bool ErrorScopeStack::Empty() const {
- return mScopes.empty();
- }
+void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
+ mScopes.push_back(ErrorScope(filter));
+}
- bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
- for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
- if (it->mMatchedErrorType != type) {
- // Error filter does not match. Move on to the next scope.
- continue;
- }
+ErrorScope ErrorScopeStack::Pop() {
+ ASSERT(!mScopes.empty());
+ ErrorScope scope = std::move(mScopes.back());
+ mScopes.pop_back();
+ return scope;
+}
+
+bool ErrorScopeStack::Empty() const {
+ return mScopes.empty();
+}
+
+bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
+ for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
+ if (it->mMatchedErrorType != type) {
+ // Error filter does not match. Move on to the next scope.
+ continue;
+ }
+
+ // Filter matches.
+ // Record the error if the scope doesn't have one yet.
+ if (it->mCapturedError == wgpu::ErrorType::NoError) {
+ it->mCapturedError = type;
+ it->mErrorMessage = message;
+ }
- // Filter matches.
- // Record the error if the scope doesn't have one yet.
- if (it->mCapturedError == wgpu::ErrorType::NoError) {
+ if (type == wgpu::ErrorType::DeviceLost) {
+ if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
+ // DeviceLost overrides any other error that is not a DeviceLost.
it->mCapturedError = type;
it->mErrorMessage = message;
}
-
- if (type == wgpu::ErrorType::DeviceLost) {
- if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
- // DeviceLost overrides any other error that is not a DeviceLost.
- it->mCapturedError = type;
- it->mErrorMessage = message;
- }
- } else {
- // Errors that are not device lost are captured and stop propogating.
- return true;
- }
+ } else {
+ // Errors that are not device lost are captured and stop propogating.
+ return true;
}
-
- // The error was not captured.
- return false;
}
+ // The error was not captured.
+ return false;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorScope.h b/chromium/third_party/dawn/src/dawn/native/ErrorScope.h
index 68a0dde1c22..7901d1883bb 100644
--- a/chromium/third_party/dawn/src/dawn/native/ErrorScope.h
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorScope.h
@@ -15,42 +15,45 @@
#ifndef SRC_DAWN_NATIVE_ERRORSCOPE_H_
#define SRC_DAWN_NATIVE_ERRORSCOPE_H_
-#include "dawn/native/dawn_platform.h"
-
#include <string>
#include <vector>
+#include "dawn/native/dawn_platform.h"
+
namespace dawn::native {
- class ErrorScope {
- public:
- wgpu::ErrorType GetErrorType() const;
- const char* GetErrorMessage() const;
+class ErrorScope {
+ public:
+ wgpu::ErrorType GetErrorType() const;
+ const char* GetErrorMessage() const;
+
+ private:
+ friend class ErrorScopeStack;
+ explicit ErrorScope(wgpu::ErrorFilter errorFilter);
- private:
- friend class ErrorScopeStack;
- explicit ErrorScope(wgpu::ErrorFilter errorFilter);
+ wgpu::ErrorType mMatchedErrorType;
+ wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
+ std::string mErrorMessage = "";
+};
- wgpu::ErrorType mMatchedErrorType;
- wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
- std::string mErrorMessage = "";
- };
+class ErrorScopeStack {
+ public:
+ ErrorScopeStack();
+ ~ErrorScopeStack();
- class ErrorScopeStack {
- public:
- void Push(wgpu::ErrorFilter errorFilter);
- ErrorScope Pop();
+ void Push(wgpu::ErrorFilter errorFilter);
+ ErrorScope Pop();
- bool Empty() const;
+ bool Empty() const;
- // Pass an error to the scopes in the stack. Returns true if one of the scopes
- // captured the error. Returns false if the error should be forwarded to the
- // uncaptured error callback.
- bool HandleError(wgpu::ErrorType type, const char* message);
+ // Pass an error to the scopes in the stack. Returns true if one of the scopes
+ // captured the error. Returns false if the error should be forwarded to the
+ // uncaptured error callback.
+ bool HandleError(wgpu::ErrorType type, const char* message);
- private:
- std::vector<ErrorScope> mScopes;
- };
+ private:
+ std::vector<ErrorScope> mScopes;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp
index 1570825489d..0dfa963c151 100644
--- a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/ExternalTexture.h"
+#include <algorithm>
+#include <utility>
+
#include "dawn/native/Buffer.h"
#include "dawn/native/Device.h"
#include "dawn/native/ObjectType_autogen.h"
@@ -24,189 +27,201 @@
namespace dawn::native {
- MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
- DAWN_INVALID_IF(
- (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
- "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
- textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
-
- DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
- "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
- textureView->GetDimension());
+MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
+ DAWN_INVALID_IF(
+ (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
+ "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
+ textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
- DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
- "The external texture plane (%s) mip level count (%u) is not 1.",
- textureView, textureView->GetLevelCount());
+ DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
+ "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
+ textureView->GetDimension());
- DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
- "The external texture plane (%s) sample count (%u) is not one.",
- textureView, textureView->GetTexture()->GetSampleCount());
+ DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
+ "The external texture plane (%s) mip level count (%u) is not 1.", textureView,
+ textureView->GetLevelCount());
- return {};
- }
+ DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
+ "The external texture plane (%s) sample count (%u) is not one.", textureView,
+ textureView->GetTexture()->GetSampleCount());
- MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
- const ExternalTextureDescriptor* descriptor) {
- ASSERT(descriptor);
- ASSERT(descriptor->plane0);
-
- DAWN_TRY(device->ValidateObject(descriptor->plane0));
-
- wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
-
- if (descriptor->plane1) {
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Bi-planar external textures are disabled until the implementation is completed.");
-
- DAWN_INVALID_IF(descriptor->colorSpace != wgpu::PredefinedColorSpace::Srgb,
- "The specified color space (%s) is not %s.", descriptor->colorSpace,
- wgpu::PredefinedColorSpace::Srgb);
-
- DAWN_TRY(device->ValidateObject(descriptor->plane1));
- wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
-
- DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
- "The bi-planar external texture plane (%s) format (%s) is not %s.",
- descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
- DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
- "The bi-planar external texture plane (%s) format (%s) is not %s.",
- descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
-
- DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
- DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
- } else {
- switch (plane0Format) {
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::RGBA16Float:
- DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "The external texture plane (%s) format (%s) is not a supported format "
- "(%s, %s, %s).",
- descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
- wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
- }
- }
+ return {};
+}
- return {};
- }
+MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ ASSERT(descriptor);
+ ASSERT(descriptor->plane0);
- // static
- ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
- DeviceBase* device,
- const ExternalTextureDescriptor* descriptor) {
- Ref<ExternalTextureBase> externalTexture =
- AcquireRef(new ExternalTextureBase(device, descriptor));
- DAWN_TRY(externalTexture->Initialize(device, descriptor));
- return std::move(externalTexture);
- }
-
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
- const ExternalTextureDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
- TrackInDevice();
- }
-
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
- TrackInDevice();
- }
+ DAWN_TRY(device->ValidateObject(descriptor->plane0));
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
+ wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
- ExternalTextureBase::~ExternalTextureBase() = default;
+ DAWN_INVALID_IF(!descriptor->gamutConversionMatrix,
+ "The gamut conversion matrix must be non-null.");
- MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
- const ExternalTextureDescriptor* descriptor) {
- // Store any passed in TextureViews associated with individual planes.
- mTextureViews[0] = descriptor->plane0;
+ DAWN_INVALID_IF(!descriptor->srcTransferFunctionParameters,
+ "The source transfer function parameters must be non-null.");
- if (descriptor->plane1) {
- mTextureViews[1] = descriptor->plane1;
- } else {
- DAWN_TRY_ASSIGN(mTextureViews[1],
- device->GetOrCreateDummyTextureViewForExternalTexture());
- }
+ DAWN_INVALID_IF(!descriptor->dstTransferFunctionParameters,
+ "The destination transfer function parameters must be non-null.");
- // We must create a buffer to store parameters needed by a shader that operates on this
- // external texture.
- BufferDescriptor bufferDesc;
- bufferDesc.size = sizeof(ExternalTextureParams);
- bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
- bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
-
- DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
-
- // Dawn & Tint's YUV to RGB conversion implementation was inspired by the conversions found
- // in libYUV. If this implementation needs expanded to support more colorspaces, this file
- // is an excellent reference: chromium/src/third_party/libyuv/source/row_common.cc.
- //
- // The conversion from YUV to RGB looks like this:
- // r = Y * 1.164 + V * vr
- // g = Y * 1.164 - U * ug - V * vg
- // b = Y * 1.164 + U * ub
- //
- // By changing the values of vr, vg, ub, and ug we can change the destination color space.
- ExternalTextureParams params;
- params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
-
- switch (descriptor->colorSpace) {
- case wgpu::PredefinedColorSpace::Srgb:
- // Numbers derived from ITU-R recommendation for limited range BT.709
- params.vr = 1.793;
- params.vg = 0.392;
- params.ub = 0.813;
- params.ug = 2.017;
- break;
- case wgpu::PredefinedColorSpace::Undefined:
+ if (descriptor->plane1) {
+ DAWN_INVALID_IF(
+ !descriptor->yuvToRgbConversionMatrix,
+ "When more than one plane is set, the YUV-to-RGB conversion matrix must be non-null.");
+
+ DAWN_TRY(device->ValidateObject(descriptor->plane1));
+ wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
+
+ DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
+ "The bi-planar external texture plane (%s) format (%s) is not %s.",
+ descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
+ DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
+ "The bi-planar external texture plane (%s) format (%s) is not %s.",
+ descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
+
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
+ } else {
+ switch (plane0Format) {
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The external texture plane (%s) format (%s) is not a supported format "
+ "(%s, %s, %s).",
+ descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
}
-
- DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
- sizeof(ExternalTextureParams)));
-
- return {};
}
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
- ExternalTextureBase::GetTextureViews() const {
- return mTextureViews;
+ return {};
+}
+
+// static
+ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> externalTexture =
+ AcquireRef(new ExternalTextureBase(device, descriptor));
+ DAWN_TRY(externalTexture->Initialize(device, descriptor));
+ return std::move(externalTexture);
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
+ TrackInDevice();
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
+ TrackInDevice();
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
+
+ExternalTextureBase::~ExternalTextureBase() = default;
+
+MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ // Store any passed in TextureViews associated with individual planes.
+ mTextureViews[0] = descriptor->plane0;
+
+ if (descriptor->plane1) {
+ mTextureViews[1] = descriptor->plane1;
+ } else {
+ DAWN_TRY_ASSIGN(mTextureViews[1],
+ device->GetOrCreatePlaceholderTextureViewForExternalTexture());
}
- MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
- "Destroyed external texture %s is used in a submit.", this);
- return {};
+ // We must create a buffer to store parameters needed by a shader that operates on this
+ // external texture.
+ BufferDescriptor bufferDesc;
+ bufferDesc.size = sizeof(ExternalTextureParams);
+ bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+ bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
+
+ DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
+
+ ExternalTextureParams params;
+ params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
+
+ // YUV-to-RGB conversion is performed by multiplying the source YUV values with a 4x3 matrix
+ // passed from Chromium. The matrix was originally sourced from /skia/src/core/SkYUVMath.cpp.
+ // This matrix is only used in multiplanar scenarios.
+ if (params.numPlanes == 2) {
+ ASSERT(descriptor->yuvToRgbConversionMatrix);
+ const float* yMat = descriptor->yuvToRgbConversionMatrix;
+ std::copy(yMat, yMat + 12, params.yuvToRgbConversionMatrix.begin());
}
- void ExternalTextureBase::APIDestroy() {
- if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
- return;
- }
- Destroy();
+ // Gamut correction is performed by multiplying a 3x3 matrix passed from Chromium. The
+ // matrix was computed by multiplying the appropriate source and destination gamut
+ // matrices sourced from ui/gfx/color_space.cc.
+ const float* gMat = descriptor->gamutConversionMatrix;
+ params.gamutConversionMatrix = {gMat[0], gMat[1], gMat[2], 0.0f, //
+ gMat[3], gMat[4], gMat[5], 0.0f, //
+ gMat[6], gMat[7], gMat[8], 0.0f};
+
+ // Gamma decode/encode is performed by the logic:
+ // if (abs(v) < params.D) {
+ // return sign(v) * (params.C * abs(v) + params.F);
+ // }
+ // return pow(A * x + B, G) + E
+ //
+ // Constants are passed from Chromium and originally sourced from ui/gfx/color_space.cc
+ const float* srcFn = descriptor->srcTransferFunctionParameters;
+ std::copy(srcFn, srcFn + 7, params.gammaDecodingParams.begin());
+
+ const float* dstFn = descriptor->dstTransferFunctionParameters;
+ std::copy(dstFn, dstFn + 7, params.gammaEncodingParams.begin());
+
+ DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
+ sizeof(ExternalTextureParams)));
+
+ return {};
+}
+
+const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& ExternalTextureBase::GetTextureViews()
+ const {
+ return mTextureViews;
+}
+
+MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
+ "Destroyed external texture %s is used in a submit.", this);
+ return {};
+}
+
+void ExternalTextureBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
+ return;
}
+ Destroy();
+}
- void ExternalTextureBase::DestroyImpl() {
- mState = ExternalTextureState::Destroyed;
- }
+void ExternalTextureBase::DestroyImpl() {
+ mState = ExternalTextureState::Destroyed;
+}
- // static
- ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
- return new ExternalTextureBase(device, ObjectBase::kError);
- }
+// static
+ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
+ return new ExternalTextureBase(device, ObjectBase::kError);
+}
- BufferBase* ExternalTextureBase::GetParamsBuffer() const {
- return mParamsBuffer.Get();
- }
+BufferBase* ExternalTextureBase::GetParamsBuffer() const {
+ return mParamsBuffer.Get();
+}
- ObjectType ExternalTextureBase::GetType() const {
- return ObjectType::ExternalTexture;
- }
+ObjectType ExternalTextureBase::GetType() const {
+ return ObjectType::ExternalTexture;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h
index 771659b5c61..50f3b89885f 100644
--- a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h
+++ b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h
@@ -15,63 +15,64 @@
#ifndef SRC_DAWN_NATIVE_EXTERNALTEXTURE_H_
#define SRC_DAWN_NATIVE_EXTERNALTEXTURE_H_
+#include <array>
+
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/ObjectBase.h"
#include "dawn/native/Subresource.h"
-#include <array>
-
namespace dawn::native {
- class TextureViewBase;
+class TextureViewBase;
- struct ExternalTextureParams {
- uint32_t numPlanes;
- float vr;
- float vg;
- float ub;
- float ug;
- };
+struct ExternalTextureParams {
+ uint32_t numPlanes;
+ std::array<uint32_t, 3> padding;
+ std::array<float, 12> yuvToRgbConversionMatrix;
+ std::array<float, 8> gammaDecodingParams = {};
+ std::array<float, 8> gammaEncodingParams = {};
+ std::array<float, 12> gamutConversionMatrix = {};
+};
- MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
- const ExternalTextureDescriptor* descriptor);
+MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
- class ExternalTextureBase : public ApiObjectBase {
- public:
- static ResultOrError<Ref<ExternalTextureBase>> Create(
- DeviceBase* device,
- const ExternalTextureDescriptor* descriptor);
+class ExternalTextureBase : public ApiObjectBase {
+ public:
+ static ResultOrError<Ref<ExternalTextureBase>> Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
- BufferBase* GetParamsBuffer() const;
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
- ObjectType GetType() const override;
+ BufferBase* GetParamsBuffer() const;
+ const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
+ ObjectType GetType() const override;
- MaybeError ValidateCanUseInSubmitNow() const;
- static ExternalTextureBase* MakeError(DeviceBase* device);
+ MaybeError ValidateCanUseInSubmitNow() const;
+ static ExternalTextureBase* MakeError(DeviceBase* device);
- void APIDestroy();
+ void APIDestroy();
- protected:
- // Constructor used only for mocking and testing.
- explicit ExternalTextureBase(DeviceBase* device);
- void DestroyImpl() override;
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit ExternalTextureBase(DeviceBase* device);
+ void DestroyImpl() override;
- ~ExternalTextureBase() override;
+ ~ExternalTextureBase() override;
- private:
- ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+ private:
+ ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
- enum class ExternalTextureState { Alive, Destroyed };
- ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+ enum class ExternalTextureState { Alive, Destroyed };
+ ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
- Ref<TextureBase> mDummyTexture;
- Ref<BufferBase> mParamsBuffer;
- std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
+ Ref<TextureBase> mPlaceholderTexture;
+ Ref<BufferBase> mParamsBuffer;
+ std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
- ExternalTextureState mState;
- };
+ ExternalTextureState mState;
+};
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_EXTERNALTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Features.cpp b/chromium/third_party/dawn/src/dawn/native/Features.cpp
index 56a532c1298..a813c6871dc 100644
--- a/chromium/third_party/dawn/src/dawn/native/Features.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Features.cpp
@@ -12,266 +12,284 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "dawn/native/Features.h"
+
#include <array>
+#include <utility>
#include "dawn/common/Assert.h"
#include "dawn/common/BitSetIterator.h"
-#include "dawn/native/Features.h"
namespace dawn::native {
- namespace {
-
- struct FeatureEnumAndInfo {
- Feature feature;
- FeatureInfo info;
- bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
- };
-
- using FeatureEnumAndInfoList =
- std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
-
- static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
- {{Feature::TextureCompressionBC,
- {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
- &WGPUDeviceProperties::textureCompressionBC},
- {Feature::TextureCompressionETC2,
- {"texture-compression-etc2",
- "Support Ericsson Texture Compressed (ETC2/EAC) texture "
- "formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
- &WGPUDeviceProperties::textureCompressionETC2},
- {Feature::TextureCompressionASTC,
- {"texture-compression-astc",
- "Support Adaptable Scalable Texture Compressed (ASTC) "
- "texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
- &WGPUDeviceProperties::textureCompressionASTC},
- {Feature::ShaderFloat16,
- {"shader-float16",
- "Support 16bit float arithmetic and declarations in uniform and storage buffers",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
- &WGPUDeviceProperties::shaderFloat16},
- {Feature::PipelineStatisticsQuery,
- {"pipeline-statistics-query", "Support Pipeline Statistics Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::pipelineStatisticsQuery},
- {Feature::TimestampQuery,
- {"timestamp-query", "Support Timestamp Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::timestampQuery},
- {Feature::DepthClamping,
- {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
- &WGPUDeviceProperties::depthClamping},
- {Feature::Depth24UnormStencil8,
- {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
- &WGPUDeviceProperties::depth24UnormStencil8},
- {Feature::Depth32FloatStencil8,
- {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
- &WGPUDeviceProperties::depth32FloatStencil8},
- {Feature::DawnInternalUsages,
- {"dawn-internal-usages",
- "Add internal usages to resources to affect how the texture is allocated, but not "
- "frontend validation. Other internal commands may access this usage.",
- "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
- "dawn_internal_usages.md"},
- &WGPUDeviceProperties::dawnInternalUsages},
- {Feature::MultiPlanarFormats,
- {"multiplanar-formats",
- "Import and use multi-planar texture formats with per plane views",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
- &WGPUDeviceProperties::multiPlanarFormats},
- {Feature::DawnNative,
- {"dawn-native", "WebGPU is running on top of dawn_native.",
- "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
- "dawn_native.md"},
- &WGPUDeviceProperties::dawnNative}}};
-
- Feature FromAPIFeature(wgpu::FeatureName feature) {
- switch (feature) {
- case wgpu::FeatureName::Undefined:
- return Feature::InvalidEnum;
-
- case wgpu::FeatureName::TimestampQuery:
- return Feature::TimestampQuery;
- case wgpu::FeatureName::PipelineStatisticsQuery:
- return Feature::PipelineStatisticsQuery;
- case wgpu::FeatureName::TextureCompressionBC:
- return Feature::TextureCompressionBC;
- case wgpu::FeatureName::TextureCompressionETC2:
- return Feature::TextureCompressionETC2;
- case wgpu::FeatureName::TextureCompressionASTC:
- return Feature::TextureCompressionASTC;
- case wgpu::FeatureName::DepthClamping:
- return Feature::DepthClamping;
- case wgpu::FeatureName::Depth24UnormStencil8:
- return Feature::Depth24UnormStencil8;
- case wgpu::FeatureName::Depth32FloatStencil8:
- return Feature::Depth32FloatStencil8;
- case wgpu::FeatureName::DawnShaderFloat16:
- return Feature::ShaderFloat16;
- case wgpu::FeatureName::DawnInternalUsages:
- return Feature::DawnInternalUsages;
- case wgpu::FeatureName::DawnMultiPlanarFormats:
- return Feature::MultiPlanarFormats;
- case wgpu::FeatureName::DawnNative:
- return Feature::DawnNative;
-
- case wgpu::FeatureName::IndirectFirstInstance:
- return Feature::InvalidEnum;
- }
+namespace {
+
+struct FeatureEnumAndInfo {
+ Feature feature;
+ FeatureInfo info;
+ bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
+};
+
+using FeatureEnumAndInfoList =
+ std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
+
+static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {{
+ {Feature::TextureCompressionBC,
+ {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+ &WGPUDeviceProperties::textureCompressionBC},
+ {Feature::TextureCompressionETC2,
+ {"texture-compression-etc2",
+ "Support Ericsson Texture Compressed (ETC2/EAC) texture "
+ "formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionETC2},
+ {Feature::TextureCompressionASTC,
+ {"texture-compression-astc",
+ "Support Adaptable Scalable Texture Compressed (ASTC) "
+ "texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionASTC},
+ {Feature::ShaderFloat16,
+ {"shader-float16",
+ "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+ &WGPUDeviceProperties::shaderFloat16},
+ {Feature::PipelineStatisticsQuery,
+ {"pipeline-statistics-query", "Support Pipeline Statistics Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::pipelineStatisticsQuery},
+ {Feature::TimestampQuery,
+ {"timestamp-query", "Support Timestamp Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::timestampQuery},
+ {Feature::DepthClamping,
+ {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+ &WGPUDeviceProperties::depthClamping},
+ {Feature::Depth24UnormStencil8,
+ {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+ &WGPUDeviceProperties::depth24UnormStencil8},
+ {Feature::Depth32FloatStencil8,
+ {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+ &WGPUDeviceProperties::depth32FloatStencil8},
+ {Feature::ChromiumExperimentalDp4a,
+ {"chromium-experimental-dp4a", "Support experimental DP4a instructions in WGSL",
+ "https://bugs.chromium.org/p/tint/issues/detail?id=1497"},
+ &WGPUDeviceProperties::chromiumExperimentalDp4a},
+ {Feature::IndirectFirstInstance,
+ {"indirect-first-instance", "Support non-zero first instance values on indirect draw calls",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=1197"},
+ &WGPUDeviceProperties::indirectFirstInstance},
+ {Feature::DawnInternalUsages,
+ {"dawn-internal-usages",
+ "Add internal usages to resources to affect how the texture is allocated, but not "
+ "frontend validation. Other internal commands may access this usage.",
+ "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+ "dawn_internal_usages.md"},
+ &WGPUDeviceProperties::dawnInternalUsages},
+ {Feature::MultiPlanarFormats,
+ {"multiplanar-formats", "Import and use multi-planar texture formats with per plane views",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
+ &WGPUDeviceProperties::multiPlanarFormats},
+ {Feature::DawnNative,
+ {"dawn-native", "WebGPU is running on top of dawn_native.",
+ "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+ "dawn_native.md"},
+ &WGPUDeviceProperties::dawnNative},
+}};
+
+Feature FromAPIFeature(wgpu::FeatureName feature) {
+ switch (feature) {
+ case wgpu::FeatureName::Undefined:
return Feature::InvalidEnum;
- }
-
- wgpu::FeatureName ToAPIFeature(Feature feature) {
- switch (feature) {
- case Feature::TextureCompressionBC:
- return wgpu::FeatureName::TextureCompressionBC;
- case Feature::TextureCompressionETC2:
- return wgpu::FeatureName::TextureCompressionETC2;
- case Feature::TextureCompressionASTC:
- return wgpu::FeatureName::TextureCompressionASTC;
- case Feature::PipelineStatisticsQuery:
- return wgpu::FeatureName::PipelineStatisticsQuery;
- case Feature::TimestampQuery:
- return wgpu::FeatureName::TimestampQuery;
- case Feature::DepthClamping:
- return wgpu::FeatureName::DepthClamping;
- case Feature::Depth24UnormStencil8:
- return wgpu::FeatureName::Depth24UnormStencil8;
- case Feature::Depth32FloatStencil8:
- return wgpu::FeatureName::Depth32FloatStencil8;
- case Feature::ShaderFloat16:
- return wgpu::FeatureName::DawnShaderFloat16;
- case Feature::DawnInternalUsages:
- return wgpu::FeatureName::DawnInternalUsages;
- case Feature::MultiPlanarFormats:
- return wgpu::FeatureName::DawnMultiPlanarFormats;
- case Feature::DawnNative:
- return wgpu::FeatureName::DawnNative;
-
- case Feature::EnumCount:
- UNREACHABLE();
- }
- }
-
- } // anonymous namespace
- void FeaturesSet::EnableFeature(Feature feature) {
- ASSERT(feature != Feature::InvalidEnum);
- const size_t featureIndex = static_cast<size_t>(feature);
- featuresBitSet.set(featureIndex);
- }
-
- void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
- EnableFeature(FromAPIFeature(feature));
+ case wgpu::FeatureName::TimestampQuery:
+ return Feature::TimestampQuery;
+ case wgpu::FeatureName::PipelineStatisticsQuery:
+ return Feature::PipelineStatisticsQuery;
+ case wgpu::FeatureName::TextureCompressionBC:
+ return Feature::TextureCompressionBC;
+ case wgpu::FeatureName::TextureCompressionETC2:
+ return Feature::TextureCompressionETC2;
+ case wgpu::FeatureName::TextureCompressionASTC:
+ return Feature::TextureCompressionASTC;
+ case wgpu::FeatureName::DepthClamping:
+ return Feature::DepthClamping;
+ case wgpu::FeatureName::Depth24UnormStencil8:
+ return Feature::Depth24UnormStencil8;
+ case wgpu::FeatureName::Depth32FloatStencil8:
+ return Feature::Depth32FloatStencil8;
+ case wgpu::FeatureName::IndirectFirstInstance:
+ return Feature::IndirectFirstInstance;
+ case wgpu::FeatureName::DawnShaderFloat16:
+ return Feature::ShaderFloat16;
+ case wgpu::FeatureName::DawnInternalUsages:
+ return Feature::DawnInternalUsages;
+ case wgpu::FeatureName::DawnMultiPlanarFormats:
+ return Feature::MultiPlanarFormats;
+ case wgpu::FeatureName::DawnNative:
+ return Feature::DawnNative;
+ case wgpu::FeatureName::ChromiumExperimentalDp4a:
+ return Feature::ChromiumExperimentalDp4a;
}
-
- bool FeaturesSet::IsEnabled(Feature feature) const {
- ASSERT(feature != Feature::InvalidEnum);
- const size_t featureIndex = static_cast<size_t>(feature);
- return featuresBitSet[featureIndex];
- }
-
- bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
- Feature f = FromAPIFeature(feature);
- return f != Feature::InvalidEnum && IsEnabled(f);
+ return Feature::InvalidEnum;
+}
+
+wgpu::FeatureName ToAPIFeature(Feature feature) {
+ switch (feature) {
+ case Feature::TextureCompressionBC:
+ return wgpu::FeatureName::TextureCompressionBC;
+ case Feature::TextureCompressionETC2:
+ return wgpu::FeatureName::TextureCompressionETC2;
+ case Feature::TextureCompressionASTC:
+ return wgpu::FeatureName::TextureCompressionASTC;
+ case Feature::PipelineStatisticsQuery:
+ return wgpu::FeatureName::PipelineStatisticsQuery;
+ case Feature::TimestampQuery:
+ return wgpu::FeatureName::TimestampQuery;
+ case Feature::DepthClamping:
+ return wgpu::FeatureName::DepthClamping;
+ case Feature::Depth24UnormStencil8:
+ return wgpu::FeatureName::Depth24UnormStencil8;
+ case Feature::Depth32FloatStencil8:
+ return wgpu::FeatureName::Depth32FloatStencil8;
+ case Feature::IndirectFirstInstance:
+ return wgpu::FeatureName::IndirectFirstInstance;
+ case Feature::ShaderFloat16:
+ return wgpu::FeatureName::DawnShaderFloat16;
+ case Feature::DawnInternalUsages:
+ return wgpu::FeatureName::DawnInternalUsages;
+ case Feature::MultiPlanarFormats:
+ return wgpu::FeatureName::DawnMultiPlanarFormats;
+ case Feature::DawnNative:
+ return wgpu::FeatureName::DawnNative;
+ case Feature::ChromiumExperimentalDp4a:
+ return wgpu::FeatureName::ChromiumExperimentalDp4a;
+
+ case Feature::EnumCount:
+ break;
}
-
- size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
- for (uint32_t i : IterateBitSet(featuresBitSet)) {
- wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
- if (features != nullptr) {
- *features = feature;
- features += 1;
- }
+ UNREACHABLE();
+}
+
+} // anonymous namespace
+
+void FeaturesSet::EnableFeature(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ featuresBitSet.set(featureIndex);
+}
+
+void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
+ EnableFeature(FromAPIFeature(feature));
+}
+
+bool FeaturesSet::IsEnabled(Feature feature) const {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ return featuresBitSet[featureIndex];
+}
+
+bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
+ Feature f = FromAPIFeature(feature);
+ return f != Feature::InvalidEnum && IsEnabled(f);
+}
+
+size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
+ if (features != nullptr) {
+ *features = feature;
+ features += 1;
}
- return featuresBitSet.count();
}
+ return featuresBitSet.count();
+}
- std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
- std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
+std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
+ std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
- uint32_t index = 0;
- for (uint32_t i : IterateBitSet(featuresBitSet)) {
- Feature feature = static_cast<Feature>(i);
- ASSERT(feature != Feature::InvalidEnum);
+ uint32_t index = 0;
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ Feature feature = static_cast<Feature>(i);
+ ASSERT(feature != Feature::InvalidEnum);
- const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
- ASSERT(featureNameAndInfo.feature == feature);
+ const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
+ ASSERT(featureNameAndInfo.feature == feature);
- enabledFeatureNames[index] = featureNameAndInfo.info.name;
- ++index;
- }
- return enabledFeatureNames;
+ enabledFeatureNames[index] = featureNameAndInfo.info.name;
+ ++index;
}
+ return enabledFeatureNames;
+}
- void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
- ASSERT(properties != nullptr);
+void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+ ASSERT(properties != nullptr);
- for (uint32_t i : IterateBitSet(featuresBitSet)) {
- properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
- }
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
}
-
- wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
- ASSERT(feature != Feature::InvalidEnum);
- return ToAPIFeature(feature);
+}
+
+wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+ return ToAPIFeature(feature);
+}
+
+FeaturesInfo::FeaturesInfo() {
+ for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
+ const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
+ ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
+ mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
}
+}
- FeaturesInfo::FeaturesInfo() {
- for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
- const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
- ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
- mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
- }
- }
+FeaturesInfo::~FeaturesInfo() = default;
- const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
- Feature f = FromAPIFeature(feature);
- if (f == Feature::InvalidEnum) {
- return nullptr;
- }
- return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
+const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
+ Feature f = FromAPIFeature(feature);
+ if (f == Feature::InvalidEnum) {
+ return nullptr;
}
+ return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
+}
- Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
- ASSERT(featureName);
+Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
+ ASSERT(featureName);
- const auto& iter = mFeatureNameToEnumMap.find(featureName);
- if (iter != mFeatureNameToEnumMap.cend()) {
- return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
- }
+ const auto& iter = mFeatureNameToEnumMap.find(featureName);
+ if (iter != mFeatureNameToEnumMap.cend()) {
+ return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
+ }
- // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
- constexpr std::array<std::pair<const char*, const char*>, 6>
- kReplacementsForDeprecatedNames = {{
- {"texture_compression_bc", "texture-compression-bc"},
- {"depth_clamping", "depth-clamping"},
- {"pipeline_statistics_query", "pipeline-statistics-query"},
- {"shader_float16", "shader-float16"},
- {"timestamp_query", "timestamp-query"},
- {"multiplanar_formats", "multiplanar-formats"},
- }};
- for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
- if (strcmp(featureName, name) == 0) {
- return FeatureNameToEnum(replacement);
- }
+ // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
+ constexpr std::array<std::pair<const char*, const char*>, 6> kReplacementsForDeprecatedNames = {
+ {
+ {"texture_compression_bc", "texture-compression-bc"},
+ {"depth_clamping", "depth-clamping"},
+ {"pipeline_statistics_query", "pipeline-statistics-query"},
+ {"shader_float16", "shader-float16"},
+ {"timestamp_query", "timestamp-query"},
+ {"multiplanar_formats", "multiplanar-formats"},
+ }};
+ for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
+ if (strcmp(featureName, name) == 0) {
+ return FeatureNameToEnum(replacement);
}
-
- return Feature::InvalidEnum;
}
- wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
- Feature f = FeatureNameToEnum(featureName);
- if (f != Feature::InvalidEnum) {
- return ToAPIFeature(f);
- }
- // Pass something invalid.
- return static_cast<wgpu::FeatureName>(-1);
+ return Feature::InvalidEnum;
+}
+
+wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
+ Feature f = FeatureNameToEnum(featureName);
+ if (f != Feature::InvalidEnum) {
+ return ToAPIFeature(f);
}
+ // Pass something invalid.
+ return static_cast<wgpu::FeatureName>(-1);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Features.h b/chromium/third_party/dawn/src/dawn/native/Features.h
index 6a110a20ee4..ebf804e5ca9 100644
--- a/chromium/third_party/dawn/src/dawn/native/Features.h
+++ b/chromium/third_party/dawn/src/dawn/native/Features.h
@@ -16,6 +16,7 @@
#define SRC_DAWN_NATIVE_FEATURES_H_
#include <bitset>
+#include <string>
#include <unordered_map>
#include <vector>
@@ -25,58 +26,61 @@
namespace dawn::native {
- enum class Feature {
- TextureCompressionBC,
- TextureCompressionETC2,
- TextureCompressionASTC,
- ShaderFloat16,
- PipelineStatisticsQuery,
- TimestampQuery,
- DepthClamping,
- Depth24UnormStencil8,
- Depth32FloatStencil8,
+enum class Feature {
+ TextureCompressionBC,
+ TextureCompressionETC2,
+ TextureCompressionASTC,
+ ShaderFloat16,
+ PipelineStatisticsQuery,
+ TimestampQuery,
+ DepthClamping,
+ Depth24UnormStencil8,
+ Depth32FloatStencil8,
+ ChromiumExperimentalDp4a,
+ IndirectFirstInstance,
- // Dawn-specific
- DawnInternalUsages,
- MultiPlanarFormats,
- DawnNative,
+ // Dawn-specific
+ DawnInternalUsages,
+ MultiPlanarFormats,
+ DawnNative,
- EnumCount,
- InvalidEnum = EnumCount,
- FeatureMin = TextureCompressionBC,
- };
+ EnumCount,
+ InvalidEnum = EnumCount,
+ FeatureMin = TextureCompressionBC,
+};
- // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
- // convenience to convert the enums of enum class Feature to the indices of a bitset.
- struct FeaturesSet {
- std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
+// A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
+// convenience to convert the enums of enum class Feature to the indices of a bitset.
+struct FeaturesSet {
+ std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
- void EnableFeature(Feature feature);
- void EnableFeature(wgpu::FeatureName feature);
- bool IsEnabled(Feature feature) const;
- bool IsEnabled(wgpu::FeatureName feature) const;
- // Returns |count|, the number of features. Writes out all |count| values if |features| is
- // non-null.
- size_t EnumerateFeatures(wgpu::FeatureName* features) const;
- std::vector<const char*> GetEnabledFeatureNames() const;
- void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
- };
+ void EnableFeature(Feature feature);
+ void EnableFeature(wgpu::FeatureName feature);
+ bool IsEnabled(Feature feature) const;
+ bool IsEnabled(wgpu::FeatureName feature) const;
+ // Returns |count|, the number of features. Writes out all |count| values if |features| is
+ // non-null.
+ size_t EnumerateFeatures(wgpu::FeatureName* features) const;
+ std::vector<const char*> GetEnabledFeatureNames() const;
+ void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
+};
- wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
+wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
- class FeaturesInfo {
- public:
- FeaturesInfo();
+class FeaturesInfo {
+ public:
+ FeaturesInfo();
+ ~FeaturesInfo();
- // Used to query the details of an feature. Return nullptr if featureName is not a valid
- // name of an feature supported in Dawn
- const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
- Feature FeatureNameToEnum(const char* featureName) const;
- wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn
+ const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
+ Feature FeatureNameToEnum(const char* featureName) const;
+ wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
- private:
- std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
- };
+ private:
+ std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Format.cpp b/chromium/third_party/dawn/src/dawn/native/Format.cpp
index 946baf5a606..f1b4cb87ce0 100644
--- a/chromium/third_party/dawn/src/dawn/native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Format.cpp
@@ -14,313 +14,311 @@
#include "dawn/native/Format.h"
+#include <bitset>
+
#include "dawn/native/Device.h"
#include "dawn/native/EnumMaskIterator.h"
#include "dawn/native/Features.h"
#include "dawn/native/Texture.h"
-#include <bitset>
-
namespace dawn::native {
- // Format
-
- // TODO(dawn:527): Remove when unused.
- SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
- switch (type) {
- case wgpu::TextureComponentType::Float:
- return SampleTypeBit::Float;
- case wgpu::TextureComponentType::Sint:
- return SampleTypeBit::Sint;
- case wgpu::TextureComponentType::Uint:
- return SampleTypeBit::Uint;
- case wgpu::TextureComponentType::DepthComparison:
- return SampleTypeBit::Depth;
- }
- UNREACHABLE();
- }
-
- SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
- switch (sampleType) {
- case wgpu::TextureSampleType::Float:
- case wgpu::TextureSampleType::UnfilterableFloat:
- case wgpu::TextureSampleType::Sint:
- case wgpu::TextureSampleType::Uint:
- case wgpu::TextureSampleType::Depth:
- case wgpu::TextureSampleType::Undefined:
- // When the compiler complains that you need to add a case statement here, please
- // also add a corresponding static assert below!
- break;
- }
-
- static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
- if (sampleType == wgpu::TextureSampleType::Undefined) {
- return SampleTypeBit::None;
- }
-
- // Check that SampleTypeBit bits are in the same position / order as the respective
- // wgpu::TextureSampleType value.
- static_assert(SampleTypeBit::Float ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
- static_assert(
- SampleTypeBit::UnfilterableFloat ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
- static_assert(SampleTypeBit::Uint ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
- static_assert(SampleTypeBit::Sint ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
- static_assert(SampleTypeBit::Depth ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
- return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
- }
-
- bool Format::IsColor() const {
- return aspects == Aspect::Color;
- }
-
- bool Format::HasDepth() const {
- return (aspects & Aspect::Depth) != 0;
- }
-
- bool Format::HasStencil() const {
- return (aspects & Aspect::Stencil) != 0;
- }
-
- bool Format::HasDepthOrStencil() const {
- return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
- }
-
- bool Format::IsMultiPlanar() const {
- return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
- }
-
- bool Format::CopyCompatibleWith(const Format& format) const {
- // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
- return baseFormat == format.baseFormat;
+// Format
+
+// TODO(dawn:527): Remove when unused.
+SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
+ switch (type) {
+ case wgpu::TextureComponentType::Float:
+ return SampleTypeBit::Float;
+ case wgpu::TextureComponentType::Sint:
+ return SampleTypeBit::Sint;
+ case wgpu::TextureComponentType::Uint:
+ return SampleTypeBit::Uint;
+ case wgpu::TextureComponentType::DepthComparison:
+ return SampleTypeBit::Depth;
}
-
- bool Format::ViewCompatibleWith(const Format& format) const {
- // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
- return baseFormat == format.baseFormat;
+ UNREACHABLE();
+}
+
+SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
+ switch (sampleType) {
+ case wgpu::TextureSampleType::Float:
+ case wgpu::TextureSampleType::UnfilterableFloat:
+ case wgpu::TextureSampleType::Sint:
+ case wgpu::TextureSampleType::Uint:
+ case wgpu::TextureSampleType::Depth:
+ case wgpu::TextureSampleType::Undefined:
+ // When the compiler complains that you need to add a case statement here, please
+ // also add a corresponding static assert below!
+ break;
}
- const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
- return GetAspectInfo(SelectFormatAspects(*this, aspect));
- }
-
- const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
- ASSERT(HasOneBit(aspect));
- ASSERT(aspects & aspect);
- const size_t aspectIndex = GetAspectIndex(aspect);
- ASSERT(aspectIndex < GetAspectCount(aspects));
- return aspectInfo[aspectIndex];
- }
-
- FormatIndex Format::GetIndex() const {
- return ComputeFormatIndex(format);
- }
-
- // FormatSet implementation
-
- bool FormatSet::operator[](const Format& format) const {
- return Base::operator[](format.GetIndex());
+ static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
+ if (sampleType == wgpu::TextureSampleType::Undefined) {
+ return SampleTypeBit::None;
}
- typename std::bitset<kKnownFormatCount>::reference FormatSet::operator[](const Format& format) {
- return Base::operator[](format.GetIndex());
- }
-
- // Implementation details of the format table of the DeviceBase
-
- // For the enum for formats are packed but this might change when we have a broader feature
- // mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
- FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
- // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
- // of the range of the FormatTable.
- static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
- kKnownFormatCount);
- return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
- }
-
- FormatTable BuildFormatTable(const DeviceBase* device) {
- FormatTable table;
- FormatSet formatsSet;
-
- static constexpr SampleTypeBit kAnyFloat =
- SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
-
- auto AddFormat = [&table, &formatsSet](Format format) {
- FormatIndex index = ComputeFormatIndex(format.format);
- ASSERT(index < table.size());
-
- // This checks that each format is set at most once, the first part of checking that all
- // formats are set exactly once.
- ASSERT(!formatsSet[index]);
-
- // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
- // ASSERT isn't true, then additional validation on bytesPerRow must be added.
- const bool hasMultipleAspects = !HasOneBit(format.aspects);
- ASSERT(hasMultipleAspects ||
- (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
-
- table[index] = format;
- formatsSet.set(index);
- };
-
- auto AddColorFormat =
- [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
- bool supportsMultisample, bool supportsResolveTarget, uint32_t byteSize,
- SampleTypeBit sampleTypes, uint8_t componentCount,
- wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = renderable;
- internalFormat.isCompressed = false;
- internalFormat.isSupported = true;
- internalFormat.supportsStorageUsage = supportsStorageUsage;
-
- if (supportsMultisample) {
- ASSERT(renderable);
- }
- internalFormat.supportsMultisample = supportsMultisample;
- internalFormat.supportsResolveTarget = supportsResolveTarget;
- internalFormat.aspects = Aspect::Color;
- internalFormat.componentCount = componentCount;
-
- // Default baseFormat of each color formats should be themselves.
- if (baseFormat == wgpu::TextureFormat::Undefined) {
- internalFormat.baseFormat = format;
- } else {
- internalFormat.baseFormat = baseFormat;
- }
-
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = byteSize;
- firstAspect->block.width = 1;
- firstAspect->block.height = 1;
- if (HasOneBit(sampleTypes)) {
- switch (sampleTypes) {
- case SampleTypeBit::Float:
- case SampleTypeBit::UnfilterableFloat:
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- break;
- case SampleTypeBit::Sint:
- firstAspect->baseType = wgpu::TextureComponentType::Sint;
- break;
- case SampleTypeBit::Uint:
- firstAspect->baseType = wgpu::TextureComponentType::Uint;
- break;
- default:
- UNREACHABLE();
- }
- } else {
- ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- }
- firstAspect->supportedSampleTypes = sampleTypes;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
- bool isSupported) {
+ // Check that SampleTypeBit bits are in the same position / order as the respective
+ // wgpu::TextureSampleType value.
+ static_assert(SampleTypeBit::Float ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
+ static_assert(
+ SampleTypeBit::UnfilterableFloat ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
+ static_assert(SampleTypeBit::Uint ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
+ static_assert(SampleTypeBit::Sint ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
+ static_assert(SampleTypeBit::Depth ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
+ return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
+}
+
+bool Format::IsColor() const {
+ return aspects == Aspect::Color;
+}
+
+bool Format::HasDepth() const {
+ return (aspects & Aspect::Depth) != 0;
+}
+
+bool Format::HasStencil() const {
+ return (aspects & Aspect::Stencil) != 0;
+}
+
+bool Format::HasDepthOrStencil() const {
+ return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
+}
+
+bool Format::IsMultiPlanar() const {
+ return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
+}
+
+bool Format::CopyCompatibleWith(const Format& format) const {
+ // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+ return baseFormat == format.baseFormat;
+}
+
+bool Format::ViewCompatibleWith(const Format& format) const {
+ // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+ return baseFormat == format.baseFormat;
+}
+
+const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
+ return GetAspectInfo(SelectFormatAspects(*this, aspect));
+}
+
+const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(aspects & aspect);
+ const size_t aspectIndex = GetAspectIndex(aspect);
+ ASSERT(aspectIndex < GetAspectCount(aspects));
+ return aspectInfo[aspectIndex];
+}
+
+FormatIndex Format::GetIndex() const {
+ return ComputeFormatIndex(format);
+}
+
+// FormatSet implementation
+
+bool FormatSet::operator[](const Format& format) const {
+ return Base::operator[](format.GetIndex());
+}
+
+typename std::bitset<kKnownFormatCount>::reference FormatSet::operator[](const Format& format) {
+ return Base::operator[](format.GetIndex());
+}
+
+// Implementation details of the format table of the DeviceBase
+
+// For the enum for formats are packed but this might change when we have a broader feature
+// mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
+FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
+ // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
+ // of the range of the FormatTable.
+ static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount);
+ return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
+}
+
+FormatTable BuildFormatTable(const DeviceBase* device) {
+ FormatTable table;
+ FormatSet formatsSet;
+
+ static constexpr SampleTypeBit kAnyFloat =
+ SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+
+ auto AddFormat = [&table, &formatsSet](Format format) {
+ FormatIndex index = ComputeFormatIndex(format.format);
+ ASSERT(index < table.size());
+
+ // This checks that each format is set at most once, the first part of checking that all
+ // formats are set exactly once.
+ ASSERT(!formatsSet[index]);
+
+ // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
+ // ASSERT isn't true, then additional validation on bytesPerRow must be added.
+ const bool hasMultipleAspects = !HasOneBit(format.aspects);
+ ASSERT(hasMultipleAspects ||
+ (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
+
+ table[index] = format;
+ formatsSet.set(index);
+ };
+
+ auto AddColorFormat =
+ [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
+ bool supportsMultisample, bool supportsResolveTarget, uint32_t byteSize,
+ SampleTypeBit sampleTypes, uint8_t componentCount,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
Format internalFormat;
internalFormat.format = format;
- internalFormat.baseFormat = format;
- internalFormat.isRenderable = true;
+ internalFormat.isRenderable = renderable;
internalFormat.isCompressed = false;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.supportsMultisample = true;
- internalFormat.supportsResolveTarget = false;
- internalFormat.aspects = Aspect::Depth;
- internalFormat.componentCount = 1;
+ internalFormat.isSupported = true;
+ internalFormat.supportsStorageUsage = supportsStorageUsage;
+
+ if (supportsMultisample) {
+ ASSERT(renderable);
+ }
+ internalFormat.supportsMultisample = supportsMultisample;
+ internalFormat.supportsResolveTarget = supportsResolveTarget;
+ internalFormat.aspects = Aspect::Color;
+ internalFormat.componentCount = componentCount;
+
+ // Default baseFormat of each color formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
AspectInfo* firstAspect = internalFormat.aspectInfo.data();
firstAspect->block.byteSize = byteSize;
firstAspect->block.width = 1;
firstAspect->block.height = 1;
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+ if (HasOneBit(sampleTypes)) {
+ switch (sampleTypes) {
+ case SampleTypeBit::Float:
+ case SampleTypeBit::UnfilterableFloat:
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ break;
+ case SampleTypeBit::Sint:
+ firstAspect->baseType = wgpu::TextureComponentType::Sint;
+ break;
+ case SampleTypeBit::Uint:
+ firstAspect->baseType = wgpu::TextureComponentType::Uint;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ }
+ firstAspect->supportedSampleTypes = sampleTypes;
firstAspect->format = format;
AddFormat(internalFormat);
};
- auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
+ auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
+ bool isSupported) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.baseFormat = format;
+ internalFormat.isRenderable = true;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = true;
+ internalFormat.supportsResolveTarget = false;
+ internalFormat.aspects = Aspect::Depth;
+ internalFormat.componentCount = 1;
+
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = byteSize;
+ firstAspect->block.width = 1;
+ firstAspect->block.height = 1;
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+ firstAspect->format = format;
+ AddFormat(internalFormat);
+ };
+
+ auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.baseFormat = format;
+ internalFormat.isRenderable = true;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = true;
+ internalFormat.supportsResolveTarget = false;
+ internalFormat.aspects = Aspect::Stencil;
+ internalFormat.componentCount = 1;
+
+ // Duplicate the data for the stencil aspect in both the first and second aspect info.
+ // - aspectInfo[0] is used by AddMultiAspectFormat to copy the info for the whole
+ // stencil8 aspect of depth-stencil8 formats.
+ // - aspectInfo[1] is the actual info used in the rest of Dawn since
+ // GetAspectIndex(Aspect::Stencil) is 1.
+ ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+
+ internalFormat.aspectInfo[0].block.byteSize = 1;
+ internalFormat.aspectInfo[0].block.width = 1;
+ internalFormat.aspectInfo[0].block.height = 1;
+ internalFormat.aspectInfo[0].baseType = wgpu::TextureComponentType::Uint;
+ internalFormat.aspectInfo[0].supportedSampleTypes = SampleTypeBit::Uint;
+ internalFormat.aspectInfo[0].format = format;
+
+ internalFormat.aspectInfo[1] = internalFormat.aspectInfo[0];
+
+ AddFormat(internalFormat);
+ };
+
+ auto AddCompressedFormat =
+ [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width, uint32_t height,
+ bool isSupported, uint8_t componentCount,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
Format internalFormat;
internalFormat.format = format;
- internalFormat.baseFormat = format;
- internalFormat.isRenderable = true;
- internalFormat.isCompressed = false;
+ internalFormat.isRenderable = false;
+ internalFormat.isCompressed = true;
internalFormat.isSupported = isSupported;
internalFormat.supportsStorageUsage = false;
- internalFormat.supportsMultisample = true;
+ internalFormat.supportsMultisample = false;
internalFormat.supportsResolveTarget = false;
- internalFormat.aspects = Aspect::Stencil;
- internalFormat.componentCount = 1;
-
- // Duplicate the data for the stencil aspect in both the first and second aspect info.
- // - aspectInfo[0] is used by AddMultiAspectFormat to copy the info for the whole
- // stencil8 aspect of depth-stencil8 formats.
- // - aspectInfo[1] is the actual info used in the rest of Dawn since
- // GetAspectIndex(Aspect::Stencil) is 1.
- ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
-
- internalFormat.aspectInfo[0].block.byteSize = 1;
- internalFormat.aspectInfo[0].block.width = 1;
- internalFormat.aspectInfo[0].block.height = 1;
- internalFormat.aspectInfo[0].baseType = wgpu::TextureComponentType::Uint;
- internalFormat.aspectInfo[0].supportedSampleTypes = SampleTypeBit::Uint;
- internalFormat.aspectInfo[0].format = format;
+ internalFormat.aspects = Aspect::Color;
+ internalFormat.componentCount = componentCount;
- internalFormat.aspectInfo[1] = internalFormat.aspectInfo[0];
+ // Default baseFormat of each compressed formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = byteSize;
+ firstAspect->block.width = width;
+ firstAspect->block.height = height;
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ firstAspect->supportedSampleTypes = kAnyFloat;
+ firstAspect->format = format;
AddFormat(internalFormat);
};
- auto AddCompressedFormat =
- [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width,
- uint32_t height, bool isSupported, uint8_t componentCount,
- wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = false;
- internalFormat.isCompressed = true;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.supportsMultisample = false;
- internalFormat.supportsResolveTarget = false;
- internalFormat.aspects = Aspect::Color;
- internalFormat.componentCount = componentCount;
-
- // Default baseFormat of each compressed formats should be themselves.
- if (baseFormat == wgpu::TextureFormat::Undefined) {
- internalFormat.baseFormat = format;
- } else {
- internalFormat.baseFormat = baseFormat;
- }
-
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = byteSize;
- firstAspect->block.width = width;
- firstAspect->block.height = height;
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- firstAspect->supportedSampleTypes = kAnyFloat;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddMultiAspectFormat = [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
- wgpu::TextureFormat firstFormat,
- wgpu::TextureFormat secondFormat,
- bool isRenderable, bool isSupported,
- bool supportsMultisample,
- uint8_t componentCount) {
+ auto AddMultiAspectFormat =
+ [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
+ wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
+ bool isRenderable, bool isSupported, bool supportsMultisample,
+ uint8_t componentCount) {
Format internalFormat;
internalFormat.format = format;
internalFormat.baseFormat = format;
@@ -348,7 +346,7 @@ namespace dawn::native {
AddFormat(internalFormat);
};
- // clang-format off
+ // clang-format off
// 1 byte color formats
AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, true, true, 1, kAnyFloat, 1);
AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, false, false, 1, kAnyFloat, 1);
@@ -479,13 +477,13 @@ namespace dawn::native {
AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, false, 3);
- // clang-format on
+ // clang-format on
- // This checks that each format is set at least once, the second part of checking that all
- // formats are checked exactly once.
- ASSERT(formatsSet.all());
+ // This checks that each format is set at least once, the second part of checking that all
+ // formats are checked exactly once.
+ ASSERT(formatsSet.all());
- return table;
- }
+ return table;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Format.h b/chromium/third_party/dawn/src/dawn/native/Format.h
index 509e64bdc1e..8f750da25b1 100644
--- a/chromium/third_party/dawn/src/dawn/native/Format.h
+++ b/chromium/third_party/dawn/src/dawn/native/Format.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_FORMAT_H_
#define SRC_DAWN_NATIVE_FORMAT_H_
+#include <array>
+
#include "dawn/native/dawn_platform.h"
#include "dawn/common/TypedInteger.h"
@@ -24,8 +26,6 @@
#include "dawn/native/Error.h"
#include "dawn/native/Subresource.h"
-#include <array>
-
// About multi-planar formats.
//
// Dawn supports additional multi-planar formats when the multiplanar-formats extension is enabled.
@@ -44,129 +44,129 @@
namespace dawn::native {
- enum class Aspect : uint8_t;
- class DeviceBase;
-
- // This mirrors wgpu::TextureSampleType as a bitmask instead.
- enum class SampleTypeBit : uint8_t {
- None = 0x0,
- Float = 0x1,
- UnfilterableFloat = 0x2,
- Depth = 0x4,
- Sint = 0x8,
- Uint = 0x10,
- };
-
- // Converts an wgpu::TextureComponentType to its bitmask representation.
- SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
- // Converts an wgpu::TextureSampleType to its bitmask representation.
- SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
-
- struct TexelBlockInfo {
- uint32_t byteSize;
- uint32_t width;
- uint32_t height;
- };
-
- struct AspectInfo {
- TexelBlockInfo block;
- // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
- // an internal Dawn enum.
- wgpu::TextureComponentType baseType;
- SampleTypeBit supportedSampleTypes;
- wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
- };
-
- // The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
- // exact number of known format.
- static constexpr uint32_t kKnownFormatCount = 96;
-
- using FormatIndex = TypedInteger<struct FormatIndexT, uint32_t>;
-
- struct Format;
- using FormatTable = ityp::array<FormatIndex, Format, kKnownFormatCount>;
-
- // A wgpu::TextureFormat along with all the information about it necessary for validation.
- struct Format {
- wgpu::TextureFormat format;
-
- // TODO(crbug.com/dawn/1332): These members could be stored in a Format capability matrix.
- bool isRenderable;
- bool isCompressed;
- // A format can be known but not supported because it is part of a disabled extension.
- bool isSupported;
- bool supportsStorageUsage;
- bool supportsMultisample;
- bool supportsResolveTarget;
- Aspect aspects;
- // Only used for renderable color formats, number of color channels.
- uint8_t componentCount;
-
- bool IsColor() const;
- bool HasDepth() const;
- bool HasStencil() const;
- bool HasDepthOrStencil() const;
-
- // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
- // allowed by multi-planar formats (ex. NV12).
- bool IsMultiPlanar() const;
-
- const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
- const AspectInfo& GetAspectInfo(Aspect aspect) const;
-
- // The index of the format in the list of all known formats: a unique number for each format
- // in [0, kKnownFormatCount)
- FormatIndex GetIndex() const;
-
- // baseFormat represents the memory layout of the format.
- // If two formats has the same baseFormat, they could copy to and be viewed as the other
- // format. Currently two formats have the same baseFormat if they differ only in sRGB-ness.
- wgpu::TextureFormat baseFormat;
-
- // Returns true if the formats are copy compatible.
- // Currently means they differ only in sRGB-ness.
- bool CopyCompatibleWith(const Format& format) const;
-
- // Returns true if the formats are texture view format compatible.
- // Currently means they differ only in sRGB-ness.
- bool ViewCompatibleWith(const Format& format) const;
-
- private:
- // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
- // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
- // info is depth and the second aspect info is stencil. For multi-planar formats,
- // aspectInfo[i] is the ith plane.
- std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
-
- friend FormatTable BuildFormatTable(const DeviceBase* device);
- };
-
- class FormatSet : public ityp::bitset<FormatIndex, kKnownFormatCount> {
- using Base = ityp::bitset<FormatIndex, kKnownFormatCount>;
-
- public:
- using Base::Base;
- using Base::operator[];
-
- bool operator[](const Format& format) const;
- typename Base::reference operator[](const Format& format);
- };
-
- // Implementation details of the format table in the device.
-
- // Returns the index of a format in the FormatTable.
- FormatIndex ComputeFormatIndex(wgpu::TextureFormat format);
- // Builds the format table with the extensions enabled on the device.
- FormatTable BuildFormatTable(const DeviceBase* device);
+enum class Aspect : uint8_t;
+class DeviceBase;
+
+// This mirrors wgpu::TextureSampleType as a bitmask instead.
+enum class SampleTypeBit : uint8_t {
+ None = 0x0,
+ Float = 0x1,
+ UnfilterableFloat = 0x2,
+ Depth = 0x4,
+ Sint = 0x8,
+ Uint = 0x10,
+};
+
+// Converts an wgpu::TextureComponentType to its bitmask representation.
+SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
+// Converts an wgpu::TextureSampleType to its bitmask representation.
+SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
+
+struct TexelBlockInfo {
+ uint32_t byteSize;
+ uint32_t width;
+ uint32_t height;
+};
+
+struct AspectInfo {
+ TexelBlockInfo block;
+ // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
+ // an internal Dawn enum.
+ wgpu::TextureComponentType baseType{};
+ SampleTypeBit supportedSampleTypes{};
+ wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+};
+
+// The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
+// exact number of known format.
+static constexpr uint32_t kKnownFormatCount = 96;
+
+using FormatIndex = TypedInteger<struct FormatIndexT, uint32_t>;
+
+struct Format;
+using FormatTable = ityp::array<FormatIndex, Format, kKnownFormatCount>;
+
+// A wgpu::TextureFormat along with all the information about it necessary for validation.
+struct Format {
+ wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+
+ // TODO(crbug.com/dawn/1332): These members could be stored in a Format capability matrix.
+ bool isRenderable = false;
+ bool isCompressed = false;
+ // A format can be known but not supported because it is part of a disabled extension.
+ bool isSupported = false;
+ bool supportsStorageUsage = false;
+ bool supportsMultisample = false;
+ bool supportsResolveTarget = false;
+ Aspect aspects{};
+ // Only used for renderable color formats, number of color channels.
+ uint8_t componentCount = 0;
+
+ bool IsColor() const;
+ bool HasDepth() const;
+ bool HasStencil() const;
+ bool HasDepthOrStencil() const;
+
+ // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
+ // allowed by multi-planar formats (ex. NV12).
+ bool IsMultiPlanar() const;
+
+ const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
+ const AspectInfo& GetAspectInfo(Aspect aspect) const;
+
+ // The index of the format in the list of all known formats: a unique number for each format
+ // in [0, kKnownFormatCount)
+ FormatIndex GetIndex() const;
+
+ // baseFormat represents the memory layout of the format.
+ // If two formats has the same baseFormat, they could copy to and be viewed as the other
+ // format. Currently two formats have the same baseFormat if they differ only in sRGB-ness.
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined;
+
+ // Returns true if the formats are copy compatible.
+ // Currently means they differ only in sRGB-ness.
+ bool CopyCompatibleWith(const Format& format) const;
+
+ // Returns true if the formats are texture view format compatible.
+ // Currently means they differ only in sRGB-ness.
+ bool ViewCompatibleWith(const Format& format) const;
+
+ private:
+ // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
+ // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
+ // info is depth and the second aspect info is stencil. For multi-planar formats,
+ // aspectInfo[i] is the ith plane.
+ std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo{};
+
+ friend FormatTable BuildFormatTable(const DeviceBase* device);
+};
+
+class FormatSet : public ityp::bitset<FormatIndex, kKnownFormatCount> {
+ using Base = ityp::bitset<FormatIndex, kKnownFormatCount>;
+
+ public:
+ using Base::Base;
+ using Base::operator[];
+
+ bool operator[](const Format& format) const;
+ typename Base::reference operator[](const Format& format);
+};
+
+// Implementation details of the format table in the device.
+
+// Returns the index of a format in the FormatTable.
+FormatIndex ComputeFormatIndex(wgpu::TextureFormat format);
+// Builds the format table with the extensions enabled on the device.
+FormatTable BuildFormatTable(const DeviceBase* device);
} // namespace dawn::native
namespace dawn {
- template <>
- struct IsDawnBitmask<dawn::native::SampleTypeBit> {
- static constexpr bool enable = true;
- };
+template <>
+struct IsDawnBitmask<dawn::native::SampleTypeBit> {
+ static constexpr bool enable = true;
+};
} // namespace dawn
diff --git a/chromium/third_party/dawn/src/dawn/native/Forward.h b/chromium/third_party/dawn/src/dawn/native/Forward.h
index 80125a4dcd5..541cb9c80a7 100644
--- a/chromium/third_party/dawn/src/dawn/native/Forward.h
+++ b/chromium/third_party/dawn/src/dawn/native/Forward.h
@@ -22,49 +22,50 @@ class Ref;
namespace dawn::native {
- enum class ObjectType : uint32_t;
+enum class ObjectType : uint32_t;
- class AdapterBase;
- class BindGroupBase;
- class BindGroupLayoutBase;
- class BufferBase;
- class ComputePipelineBase;
- class CommandBufferBase;
- class CommandEncoder;
- class ComputePassEncoder;
- class ExternalTextureBase;
- class InstanceBase;
- class PipelineBase;
- class PipelineLayoutBase;
- class QuerySetBase;
- class QueueBase;
- class RenderBundleBase;
- class RenderBundleEncoder;
- class RenderPassEncoder;
- class RenderPipelineBase;
- class ResourceHeapBase;
- class SamplerBase;
- class Surface;
- class ShaderModuleBase;
- class StagingBufferBase;
- class SwapChainBase;
- class NewSwapChainBase;
- class TextureBase;
- class TextureViewBase;
+class AdapterBase;
+class BindGroupBase;
+class BindGroupLayoutBase;
+class BufferBase;
+class ComputePipelineBase;
+class CommandBufferBase;
+class CommandEncoder;
+class ComputePassEncoder;
+class ExternalTextureBase;
+class InstanceBase;
+class PipelineBase;
+class PipelineCacheBase;
+class PipelineLayoutBase;
+class QuerySetBase;
+class QueueBase;
+class RenderBundleBase;
+class RenderBundleEncoder;
+class RenderPassEncoder;
+class RenderPipelineBase;
+class ResourceHeapBase;
+class SamplerBase;
+class Surface;
+class ShaderModuleBase;
+class StagingBufferBase;
+class SwapChainBase;
+class NewSwapChainBase;
+class TextureBase;
+class TextureViewBase;
- class DeviceBase;
+class DeviceBase;
- template <typename T>
- class PerStage;
+template <typename T>
+class PerStage;
- struct Format;
+struct Format;
- // Aliases for frontend-only types.
- using CommandEncoderBase = CommandEncoder;
- using ComputePassEncoderBase = ComputePassEncoder;
- using RenderBundleEncoderBase = RenderBundleEncoder;
- using RenderPassEncoderBase = RenderPassEncoder;
- using SurfaceBase = Surface;
+// Aliases for frontend-only types.
+using CommandEncoderBase = CommandEncoder;
+using ComputePassEncoderBase = ComputePassEncoder;
+using RenderBundleEncoderBase = RenderBundleEncoder;
+using RenderPassEncoderBase = RenderPassEncoder;
+using SurfaceBase = Surface;
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp
index ebe0e7fb909..466b67735f9 100644
--- a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp
@@ -14,180 +14,212 @@
#include "dawn/native/IndirectDrawMetadata.h"
+#include <algorithm>
+#include <utility>
+
#include "dawn/common/Constants.h"
#include "dawn/common/RefCounted.h"
#include "dawn/native/IndirectDrawValidationEncoder.h"
#include "dawn/native/Limits.h"
#include "dawn/native/RenderBundle.h"
-#include <algorithm>
-#include <utility>
-
namespace dawn::native {
- uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
- return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
- kDrawIndexedIndirectSize;
- }
-
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
- BufferBase* indirectBuffer)
- : mIndirectBuffer(indirectBuffer) {
- }
-
- void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
- uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- IndexedIndirectDraw draw) {
- const uint64_t newOffset = draw.clientBufferOffset;
- auto it = mBatches.begin();
- while (it != mBatches.end()) {
- IndexedIndirectValidationBatch& batch = *it;
- if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
- // This batch is full. If its minOffset is to the right of the new offset, we can
- // just insert a new batch here.
- if (newOffset < batch.minOffset) {
- break;
- }
-
- // Otherwise keep looking.
- ++it;
- continue;
- }
-
- if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
- batch.draws.push_back(std::move(draw));
- return;
- }
-
- if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
- // We can extend this batch to the left in order to fit the new offset.
- batch.minOffset = newOffset;
- batch.draws.push_back(std::move(draw));
- return;
- }
-
- if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
- // We can extend this batch to the right in order to fit the new offset.
- batch.maxOffset = newOffset;
- batch.draws.push_back(std::move(draw));
- return;
- }
-
+uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
+ return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
+ kDrawIndexedIndirectSize;
+}
+
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
+ BufferBase* indirectBuffer)
+ : mIndirectBuffer(indirectBuffer) {}
+
+void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw(
+ uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint64_t maxBatchOffsetRange,
+ IndirectDraw draw) {
+ const uint64_t newOffset = draw.inputBufferOffset;
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndirectValidationBatch& batch = *it;
+ if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
+ // This batch is full. If its minOffset is to the right of the new offset, we can
+ // just insert a new batch here.
if (newOffset < batch.minOffset) {
- // We want to insert a new batch just before this one.
break;
}
+ // Otherwise keep looking.
++it;
+ continue;
}
- IndexedIndirectValidationBatch newBatch;
- newBatch.minOffset = newOffset;
- newBatch.maxOffset = newOffset;
- newBatch.draws.push_back(std::move(draw));
-
- mBatches.insert(it, std::move(newBatch));
- }
+ if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
- void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
- uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- const IndexedIndirectValidationBatch& newBatch) {
- auto it = mBatches.begin();
- while (it != mBatches.end()) {
- IndexedIndirectValidationBatch& batch = *it;
- uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
- uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
- if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
- maxDrawCallsPerIndirectValidationBatch) {
- // This batch fits within the limits of an existing batch. Merge it.
- batch.minOffset = min;
- batch.maxOffset = max;
- batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
- return;
- }
+ if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
+ // We can extend this batch to the left in order to fit the new offset.
+ batch.minOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
- if (newBatch.minOffset < batch.minOffset) {
- break;
- }
+ if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
+ // We can extend this batch to the right in order to fit the new offset.
+ batch.maxOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
- ++it;
+ if (newOffset < batch.minOffset) {
+ // We want to insert a new batch just before this one.
+ break;
}
- mBatches.push_back(newBatch);
- }
- const std::vector<IndirectDrawMetadata::IndexedIndirectValidationBatch>&
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
- return mBatches;
+ ++it;
}
- IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
- : mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)),
- mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)) {
+ IndirectValidationBatch newBatch;
+ newBatch.minOffset = newOffset;
+ newBatch.maxOffset = newOffset;
+ newBatch.draws.push_back(std::move(draw));
+
+ mBatches.insert(it, std::move(newBatch));
+}
+
+void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
+ uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint64_t maxBatchOffsetRange,
+ const IndirectValidationBatch& newBatch) {
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndirectValidationBatch& batch = *it;
+ uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
+ uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
+ if (max - min <= maxBatchOffsetRange &&
+ batch.draws.size() + newBatch.draws.size() <= maxDrawCallsPerIndirectValidationBatch) {
+ // This batch fits within the limits of an existing batch. Merge it.
+ batch.minOffset = min;
+ batch.maxOffset = max;
+ batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
+ return;
+ }
+
+ if (newBatch.minOffset < batch.minOffset) {
+ break;
+ }
+
+ ++it;
}
+ mBatches.push_back(newBatch);
+}
- IndirectDrawMetadata::~IndirectDrawMetadata() = default;
+const std::vector<IndirectDrawMetadata::IndirectValidationBatch>&
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
+ return mBatches;
+}
- IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
+IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
+ : mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)),
+ mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {}
- IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
+IndirectDrawMetadata::~IndirectDrawMetadata() = default;
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
- IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
- return &mIndexedIndirectBufferValidationInfo;
- }
+IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
- void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
- auto [_, inserted] = mAddedBundles.insert(bundle);
- if (!inserted) {
- return;
- }
+IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
- for (const auto& [config, validationInfo] :
- bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
- auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
- if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
- // We already have batches for the same config. Merge the new ones in.
- for (const IndexedIndirectValidationBatch& batch : validationInfo.GetBatches()) {
- it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
- }
- } else {
- mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
- }
- }
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
+IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
+ return &mIndexedIndirectBufferValidationInfo;
+}
+
+void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
+ auto [_, inserted] = mAddedBundles.insert(bundle);
+ if (!inserted) {
+ return;
}
- void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
- uint64_t indexBufferSize,
- BufferBase* indirectBuffer,
- uint64_t indirectOffset,
- DrawIndexedIndirectCmd* cmd) {
- uint64_t numIndexBufferElements;
- switch (indexFormat) {
- case wgpu::IndexFormat::Uint16:
- numIndexBufferElements = indexBufferSize / 2;
- break;
- case wgpu::IndexFormat::Uint32:
- numIndexBufferElements = indexBufferSize / 4;
- break;
- case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
+ for (const auto& [config, validationInfo] :
+ bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
+ auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
+ if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
+ // We already have batches for the same config. Merge the new ones in.
+ for (const IndirectValidationBatch& batch : validationInfo.GetBatches()) {
+ it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
+ }
+ } else {
+ mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
}
+ }
+}
+
+void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ bool duplicateBaseVertexInstance,
+ DrawIndexedIndirectCmd* cmd) {
+ uint64_t numIndexBufferElements;
+ switch (indexFormat) {
+ case wgpu::IndexFormat::Uint16:
+ numIndexBufferElements = indexBufferSize / 2;
+ break;
+ case wgpu::IndexFormat::Uint32:
+ numIndexBufferElements = indexBufferSize / 4;
+ break;
+ case wgpu::IndexFormat::Undefined:
+ UNREACHABLE();
+ }
- const IndexedIndirectConfig config(indirectBuffer, numIndexBufferElements);
- auto it = mIndexedIndirectBufferValidationInfo.find(config);
- if (it == mIndexedIndirectBufferValidationInfo.end()) {
- auto result = mIndexedIndirectBufferValidationInfo.emplace(
- config, IndexedIndirectBufferValidationInfo(indirectBuffer));
- it = result.first;
- }
+ const IndexedIndirectConfig config = {indirectBuffer, numIndexBufferElements,
+ duplicateBaseVertexInstance, DrawType::Indexed};
+ auto it = mIndexedIndirectBufferValidationInfo.find(config);
+ if (it == mIndexedIndirectBufferValidationInfo.end()) {
+ auto result = mIndexedIndirectBufferValidationInfo.emplace(
+ config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+ it = result.first;
+ }
- IndexedIndirectDraw draw;
- draw.clientBufferOffset = indirectOffset;
- draw.cmd = cmd;
- it->second.AddIndexedIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange,
- std::move(draw));
+ IndirectDraw draw{};
+ draw.inputBufferOffset = indirectOffset;
+ draw.cmd = cmd;
+ it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+}
+
+void IndirectDrawMetadata::AddIndirectDraw(BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ bool duplicateBaseVertexInstance,
+ DrawIndirectCmd* cmd) {
+ const IndexedIndirectConfig config = {indirectBuffer, 0, duplicateBaseVertexInstance,
+ DrawType::NonIndexed};
+ auto it = mIndexedIndirectBufferValidationInfo.find(config);
+ if (it == mIndexedIndirectBufferValidationInfo.end()) {
+ auto result = mIndexedIndirectBufferValidationInfo.emplace(
+ config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+ it = result.first;
}
+ IndirectDraw draw{};
+ draw.inputBufferOffset = indirectOffset;
+ draw.cmd = cmd;
+ it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+}
+
+bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
+ const IndexedIndirectConfig& other) const {
+ return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
+ drawType) < std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
+ other.duplicateBaseVertexInstance, other.drawType);
+}
+
+bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
+ const IndexedIndirectConfig& other) const {
+ return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
+ drawType) == std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
+ other.duplicateBaseVertexInstance, other.drawType);
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h
index 76f61db4999..87c03bad6db 100644
--- a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h
@@ -15,111 +15,128 @@
#ifndef SRC_DAWN_NATIVE_INDIRECTDRAWMETADATA_H_
#define SRC_DAWN_NATIVE_INDIRECTDRAWMETADATA_H_
-#include "dawn/common/NonCopyable.h"
-#include "dawn/common/RefCounted.h"
-#include "dawn/native/Buffer.h"
-#include "dawn/native/CommandBufferStateTracker.h"
-#include "dawn/native/Commands.h"
-
#include <cstdint>
#include <map>
#include <set>
#include <utility>
#include <vector>
+#include "dawn/common/NonCopyable.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Commands.h"
+
namespace dawn::native {
- class RenderBundleBase;
- struct CombinedLimits;
+class RenderBundleBase;
+struct CombinedLimits;
+
+// In the unlikely scenario that indirect offsets used over a single buffer span more than
+// this length of the buffer, we split the validation work into multiple batches.
+uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
+
+// Metadata corresponding to the validation requirements of a single render pass. This metadata
+// is accumulated while its corresponding render pass is encoded, and is later used to encode
+// validation commands to be inserted into the command buffer just before the render pass's own
+// commands.
+class IndirectDrawMetadata : public NonCopyable {
+ public:
+ struct IndirectDraw {
+ uint64_t inputBufferOffset;
+ // This is a pointer to the command that should be populated with the validated
+ // indirect scratch buffer. It is only valid up until the encoded command buffer
+ // is submitted.
+ DrawIndirectCmd* cmd;
+ };
- // In the unlikely scenario that indirect offsets used over a single buffer span more than
- // this length of the buffer, we split the validation work into multiple batches.
- uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
+ struct IndirectValidationBatch {
+ uint64_t minOffset;
+ uint64_t maxOffset;
+ std::vector<IndirectDraw> draws;
+ };
- // Metadata corresponding to the validation requirements of a single render pass. This metadata
- // is accumulated while its corresponding render pass is encoded, and is later used to encode
- // validation commands to be inserted into the command buffer just before the render pass's own
- // commands.
- class IndirectDrawMetadata : public NonCopyable {
+ // Tracks information about every draw call in this render pass which uses the same indirect
+ // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
+ // that validation work can be chunked efficiently if necessary.
+ class IndexedIndirectBufferValidationInfo {
public:
- struct IndexedIndirectDraw {
- uint64_t clientBufferOffset;
- // This is a pointer to the command that should be populated with the validated
- // indirect scratch buffer. It is only valid up until the encoded command buffer
- // is submitted.
- DrawIndexedIndirectCmd* cmd;
- };
-
- struct IndexedIndirectValidationBatch {
- uint64_t minOffset;
- uint64_t maxOffset;
- std::vector<IndexedIndirectDraw> draws;
- };
-
- // Tracks information about every draw call in this render pass which uses the same indirect
- // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
- // that validation work can be chunked efficiently if necessary.
- class IndexedIndirectBufferValidationInfo {
- public:
- explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
-
- // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
- // assigned (and deferred) buffer ref and relative offset before returning.
- void AddIndexedIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- IndexedIndirectDraw draw);
-
- // Adds draw calls from an already-computed batch, e.g. from a previously encoded
- // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
- // it's added to mBatch.
- void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- const IndexedIndirectValidationBatch& batch);
-
- const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
-
- private:
- Ref<BufferBase> mIndirectBuffer;
-
- // A list of information about validation batches that will need to be executed for the
- // corresponding indirect buffer prior to a single render pass. These are kept sorted by
- // minOffset and may overlap iff the number of offsets in one batch would otherwise
- // exceed some large upper bound (roughly ~33M draw calls).
- //
- // Since the most common expected cases will overwhelmingly require only a single
- // validation pass per render pass, this is optimized for efficient updates to a single
- // batch rather than for efficient manipulation of a large number of batches.
- std::vector<IndexedIndirectValidationBatch> mBatches;
- };
-
- // Combination of an indirect buffer reference, and the number of addressable index buffer
- // elements at the time of a draw call.
- using IndexedIndirectConfig = std::pair<BufferBase*, uint64_t>;
- using IndexedIndirectBufferValidationInfoMap =
- std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
-
- explicit IndirectDrawMetadata(const CombinedLimits& limits);
- ~IndirectDrawMetadata();
-
- IndirectDrawMetadata(IndirectDrawMetadata&&);
- IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
-
- IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
-
- void AddBundle(RenderBundleBase* bundle);
- void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
- uint64_t indexBufferSize,
- BufferBase* indirectBuffer,
- uint64_t indirectOffset,
- DrawIndexedIndirectCmd* cmd);
+ explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
+
+ // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
+ // assigned (and deferred) buffer ref and relative offset before returning.
+ void AddIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint64_t maxBatchOffsetRange,
+ IndirectDraw draw);
+
+ // Adds draw calls from an already-computed batch, e.g. from a previously encoded
+ // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
+ // it's added to mBatch.
+ void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint64_t maxBatchOffsetRange,
+ const IndirectValidationBatch& batch);
+
+ const std::vector<IndirectValidationBatch>& GetBatches() const;
private:
- IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
- std::set<RenderBundleBase*> mAddedBundles;
+ Ref<BufferBase> mIndirectBuffer;
+
+ // A list of information about validation batches that will need to be executed for the
+ // corresponding indirect buffer prior to a single render pass. These are kept sorted by
+ // minOffset and may overlap iff the number of offsets in one batch would otherwise
+ // exceed some large upper bound (roughly ~33M draw calls).
+ //
+ // Since the most common expected cases will overwhelmingly require only a single
+ // validation pass per render pass, this is optimized for efficient updates to a single
+ // batch rather than for efficient manipulation of a large number of batches.
+ std::vector<IndirectValidationBatch> mBatches;
+ };
- uint32_t mMaxDrawCallsPerBatch;
- uint32_t mMaxBatchOffsetRange;
+ enum class DrawType {
+ NonIndexed,
+ Indexed,
};
+ struct IndexedIndirectConfig {
+ BufferBase* inputIndirectBuffer;
+ uint64_t numIndexBufferElements;
+ bool duplicateBaseVertexInstance;
+ DrawType drawType;
+
+ bool operator<(const IndexedIndirectConfig& other) const;
+ bool operator==(const IndexedIndirectConfig& other) const;
+ };
+
+ using IndexedIndirectBufferValidationInfoMap =
+ std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
+
+ explicit IndirectDrawMetadata(const CombinedLimits& limits);
+ ~IndirectDrawMetadata();
+
+ IndirectDrawMetadata(IndirectDrawMetadata&&);
+ IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
+
+ IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
+
+ void AddBundle(RenderBundleBase* bundle);
+ void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ bool duplicateBaseVertexInstance,
+ DrawIndexedIndirectCmd* cmd);
+
+ void AddIndirectDraw(BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ bool duplicateBaseVertexInstance,
+ DrawIndirectCmd* cmd);
+
+ private:
+ IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
+ std::set<RenderBundleBase*> mAddedBundles;
+
+ uint64_t mMaxBatchOffsetRange;
+ uint32_t mMaxDrawCallsPerBatch;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp
index 6567b3efa0a..10a8164026c 100644
--- a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp
@@ -14,6 +14,13 @@
#include "dawn/native/IndirectDrawValidationEncoder.h"
+#include <algorithm>
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
#include "dawn/native/BindGroup.h"
@@ -26,83 +33,126 @@
#include "dawn/native/Queue.h"
#include "dawn/native/utils/WGPUHelpers.h"
-#include <cstdlib>
-#include <limits>
-
namespace dawn::native {
- namespace {
- // NOTE: This must match the workgroup_size attribute on the compute entry point below.
- constexpr uint64_t kWorkgroupSize = 64;
+namespace {
+// NOTE: This must match the workgroup_size attribute on the compute entry point below.
+constexpr uint64_t kWorkgroupSize = 64;
+
+// Bitmasks for BatchInfo::flags
+constexpr uint32_t kDuplicateBaseVertexInstance = 1;
+constexpr uint32_t kIndexedDraw = 2;
+constexpr uint32_t kValidationEnabled = 4;
+constexpr uint32_t kIndirectFirstInstanceEnabled = 8;
+
+// Equivalent to the BatchInfo struct defined in the shader below.
+struct BatchInfo {
+ uint64_t numIndexBufferElements;
+ uint32_t numDraws;
+ uint32_t flags;
+};
- // Equivalent to the BatchInfo struct defined in the shader below.
- struct BatchInfo {
- uint64_t numIndexBufferElements;
- uint32_t numDraws;
- uint32_t padding;
- };
+// TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
+// various failure modes.
+static const char sRenderValidationShaderSource[] = R"(
- // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
- // various failure modes.
- static const char sRenderValidationShaderSource[] = R"(
- let kNumIndirectParamsPerDrawCall = 5u;
+ let kNumDrawIndirectParams = 4u;
let kIndexCountEntry = 0u;
- let kInstanceCountEntry = 1u;
let kFirstIndexEntry = 2u;
- let kBaseVertexEntry = 3u;
- let kFirstInstanceEntry = 4u;
+
+ // Bitmasks for BatchInfo::flags
+ let kDuplicateBaseVertexInstance = 1u;
+ let kIndexedDraw = 2u;
+ let kValidationEnabled = 4u;
+ let kIndirectFirstInstanceEnabled = 8u;
struct BatchInfo {
- numIndexBufferElementsLow: u32;
- numIndexBufferElementsHigh: u32;
- numDraws: u32;
- padding: u32;
- indirectOffsets: array<u32>;
- };
+ numIndexBufferElementsLow: u32,
+ numIndexBufferElementsHigh: u32,
+ numDraws: u32,
+ flags: u32,
+ indirectOffsets: array<u32>,
+ }
struct IndirectParams {
- data: array<u32>;
- };
+ data: array<u32>,
+ }
@group(0) @binding(0) var<storage, read> batch: BatchInfo;
- @group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
- @group(0) @binding(2) var<storage, write> validatedParams: IndirectParams;
+ @group(0) @binding(1) var<storage, read_write> inputParams: IndirectParams;
+ @group(0) @binding(2) var<storage, write> outputParams: IndirectParams;
+
+ fn numIndirectParamsPerDrawCallInput() -> u32 {
+ var numParams = kNumDrawIndirectParams;
+ // Indexed Draw has an extra parameter (firstIndex)
+ if (bool(batch.flags & kIndexedDraw)) {
+ numParams = numParams + 1u;
+ }
+ return numParams;
+ }
+
+ fn numIndirectParamsPerDrawCallOutput() -> u32 {
+ var numParams = numIndirectParamsPerDrawCallInput();
+ // 2 extra parameter for duplicated first/baseVexter and firstInstance
+ if (bool(batch.flags & kDuplicateBaseVertexInstance)) {
+ numParams = numParams + 2u;
+ }
+ return numParams;
+ }
fn fail(drawIndex: u32) {
- let index = drawIndex * kNumIndirectParamsPerDrawCall;
- validatedParams.data[index + kIndexCountEntry] = 0u;
- validatedParams.data[index + kInstanceCountEntry] = 0u;
- validatedParams.data[index + kFirstIndexEntry] = 0u;
- validatedParams.data[index + kBaseVertexEntry] = 0u;
- validatedParams.data[index + kFirstInstanceEntry] = 0u;
+ let numParams = numIndirectParamsPerDrawCallOutput();
+ let index = drawIndex * numParams;
+ for(var i = 0u; i < numParams; i = i + 1u) {
+ outputParams.data[index + i] = 0u;
+ }
}
fn pass(drawIndex: u32) {
- let vIndex = drawIndex * kNumIndirectParamsPerDrawCall;
- let cIndex = batch.indirectOffsets[drawIndex];
- validatedParams.data[vIndex + kIndexCountEntry] =
- clientParams.data[cIndex + kIndexCountEntry];
- validatedParams.data[vIndex + kInstanceCountEntry] =
- clientParams.data[cIndex + kInstanceCountEntry];
- validatedParams.data[vIndex + kFirstIndexEntry] =
- clientParams.data[cIndex + kFirstIndexEntry];
- validatedParams.data[vIndex + kBaseVertexEntry] =
- clientParams.data[cIndex + kBaseVertexEntry];
- validatedParams.data[vIndex + kFirstInstanceEntry] =
- clientParams.data[cIndex + kFirstInstanceEntry];
+ let numInputParams = numIndirectParamsPerDrawCallInput();
+ var outIndex = drawIndex * numIndirectParamsPerDrawCallOutput();
+ let inIndex = batch.indirectOffsets[drawIndex];
+
+ // The first 2 parameter is reserved for the duplicated first/baseVertex and firstInstance
+
+ if (bool(batch.flags & kDuplicateBaseVertexInstance)) {
+ // first/baseVertex and firstInstance are always last two parameters
+ let dupIndex = inIndex + numInputParams - 2u;
+ outputParams.data[outIndex] = inputParams.data[dupIndex];
+ outputParams.data[outIndex + 1u] = inputParams.data[dupIndex + 1u];
+
+ outIndex = outIndex + 2u;
+ }
+
+ for(var i = 0u; i < numInputParams; i = i + 1u) {
+ outputParams.data[outIndex + i] = inputParams.data[inIndex + i];
+ }
}
- @stage(compute) @workgroup_size(64, 1, 1)
+ @compute @workgroup_size(64, 1, 1)
fn main(@builtin(global_invocation_id) id : vec3<u32>) {
if (id.x >= batch.numDraws) {
return;
}
- let clientIndex = batch.indirectOffsets[id.x];
- let firstInstance = clientParams.data[clientIndex + kFirstInstanceEntry];
- if (firstInstance != 0u) {
- fail(id.x);
+ if(!bool(batch.flags & kValidationEnabled)) {
+ pass(id.x);
+ return;
+ }
+
+ let inputIndex = batch.indirectOffsets[id.x];
+ if(!bool(batch.flags & kIndirectFirstInstanceEnabled)) {
+ // firstInstance is always the last parameter
+ let firstInstance = inputParams.data[inputIndex + numIndirectParamsPerDrawCallInput() - 1u];
+ if (firstInstance != 0u) {
+ fail(id.x);
+ return;
+ }
+ }
+
+ if (!bool(batch.flags & kIndexedDraw)) {
+ pass(id.x);
return;
}
@@ -113,7 +163,7 @@ namespace dawn::native {
return;
}
- let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
+ let firstIndex = inputParams.data[inputIndex + kFirstIndexEntry];
if (batch.numIndexBufferElementsHigh == 0u &&
batch.numIndexBufferElementsLow < firstIndex) {
fail(id.x);
@@ -123,7 +173,7 @@ namespace dawn::native {
// Note that this subtraction may underflow, but only when
// numIndexBufferElementsHigh is 1u. The result is still correct in that case.
let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
- let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
+ let indexCount = inputParams.data[inputIndex + kIndexCountEntry];
if (indexCount > maxIndexCount) {
fail(id.x);
return;
@@ -132,251 +182,271 @@ namespace dawn::native {
}
)";
- ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (store->renderValidationPipeline == nullptr) {
- // Create compute shader module if not cached before.
- if (store->renderValidationShader == nullptr) {
- DAWN_TRY_ASSIGN(
- store->renderValidationShader,
- utils::CreateShaderModule(device, sRenderValidationShaderSource));
- }
-
- Ref<BindGroupLayoutBase> bindGroupLayout;
- DAWN_TRY_ASSIGN(
- bindGroupLayout,
- utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute,
- wgpu::BufferBindingType::ReadOnlyStorage},
- {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
- },
- /* allowInternalBinding */ true));
-
- Ref<PipelineLayoutBase> pipelineLayout;
- DAWN_TRY_ASSIGN(pipelineLayout,
- utils::MakeBasicPipelineLayout(device, bindGroupLayout));
-
- ComputePipelineDescriptor computePipelineDescriptor = {};
- computePipelineDescriptor.layout = pipelineLayout.Get();
- computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
- computePipelineDescriptor.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->renderValidationPipeline,
- device->CreateComputePipeline(&computePipelineDescriptor));
- }
-
- return store->renderValidationPipeline.Get();
- }
+ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
- size_t GetBatchDataSize(uint32_t numDraws) {
- return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+ if (store->renderValidationPipeline == nullptr) {
+ // Create compute shader module if not cached before.
+ if (store->renderValidationShader == nullptr) {
+ DAWN_TRY_ASSIGN(store->renderValidationShader,
+ utils::CreateShaderModule(device, sRenderValidationShaderSource));
}
- } // namespace
+ Ref<BindGroupLayoutBase> bindGroupLayout;
+ DAWN_TRY_ASSIGN(
+ bindGroupLayout,
+ utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
+ {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+ },
+ /* allowInternalBinding */ true));
+
+ Ref<PipelineLayoutBase> pipelineLayout;
+ DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+ ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.layout = pipelineLayout.Get();
+ computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
+ computePipelineDescriptor.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->renderValidationPipeline,
+ device->CreateComputePipeline(&computePipelineDescriptor));
+ }
- uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
- const uint64_t batchDrawCallLimitByDispatchSize =
- static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
- const uint64_t batchDrawCallLimitByStorageBindingSize =
- (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
- return static_cast<uint32_t>(
- std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
- uint64_t(std::numeric_limits<uint32_t>::max())}));
+ return store->renderValidationPipeline.Get();
+}
+
+size_t GetBatchDataSize(uint32_t numDraws) {
+ return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+}
+
+} // namespace
+
+uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
+ const uint64_t batchDrawCallLimitByDispatchSize =
+ static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
+ const uint64_t batchDrawCallLimitByStorageBindingSize =
+ (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
+ return static_cast<uint32_t>(
+ std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
+ uint64_t(std::numeric_limits<uint32_t>::max())}));
+}
+
+MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata) {
+ struct Batch {
+ const IndirectDrawMetadata::IndirectValidationBatch* metadata;
+ uint64_t numIndexBufferElements;
+ uint64_t dataBufferOffset;
+ uint64_t dataSize;
+ uint64_t inputIndirectOffset;
+ uint64_t inputIndirectSize;
+ uint64_t outputParamsOffset;
+ uint64_t outputParamsSize;
+ BatchInfo* batchInfo;
+ };
+
+ struct Pass {
+ uint32_t flags;
+ BufferBase* inputIndirectBuffer;
+ uint64_t outputParamsSize = 0;
+ uint64_t batchDataSize = 0;
+ std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
+ std::vector<Batch> batches;
+ };
+
+ // First stage is grouping all batches into passes. We try to pack as many batches into a
+ // single pass as possible. Batches can be grouped together as long as they're validating
+ // data from the same indirect buffer, but they may still be split into multiple passes if
+ // the number of draw calls in a pass would exceed some (very high) upper bound.
+ uint64_t outputParamsSize = 0;
+ std::vector<Pass> passes;
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
+ *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
+ if (bufferInfoMap.empty()) {
+ return {};
}
- MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
- CommandEncoder* commandEncoder,
- RenderPassResourceUsageTracker* usageTracker,
- IndirectDrawMetadata* indirectDrawMetadata) {
- struct Batch {
- const IndirectDrawMetadata::IndexedIndirectValidationBatch* metadata;
- uint64_t numIndexBufferElements;
- uint64_t dataBufferOffset;
- uint64_t dataSize;
- uint64_t clientIndirectOffset;
- uint64_t clientIndirectSize;
- uint64_t validatedParamsOffset;
- uint64_t validatedParamsSize;
- BatchInfo* batchInfo;
- };
-
- struct Pass {
- BufferBase* clientIndirectBuffer;
- uint64_t validatedParamsSize = 0;
- uint64_t batchDataSize = 0;
- std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
- std::vector<Batch> batches;
- };
-
- // First stage is grouping all batches into passes. We try to pack as many batches into a
- // single pass as possible. Batches can be grouped together as long as they're validating
- // data from the same indirect buffer, but they may still be split into multiple passes if
- // the number of draw calls in a pass would exceed some (very high) upper bound.
- size_t validatedParamsSize = 0;
- std::vector<Pass> passes;
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
- *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
- if (bufferInfoMap.empty()) {
- return {};
+ const uint64_t maxStorageBufferBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+ const uint32_t minStorageBufferOffsetAlignment =
+ device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+ for (auto& [config, validationInfo] : bufferInfoMap) {
+ const uint64_t indirectDrawCommandSize =
+ config.drawType == IndirectDrawMetadata::DrawType::Indexed ? kDrawIndexedIndirectSize
+ : kDrawIndirectSize;
+
+ uint64_t outputIndirectSize = indirectDrawCommandSize;
+ if (config.duplicateBaseVertexInstance) {
+ outputIndirectSize += 2 * sizeof(uint32_t);
}
- const uint32_t maxStorageBufferBindingSize =
- device->GetLimits().v1.maxStorageBufferBindingSize;
- const uint32_t minStorageBufferOffsetAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
-
- for (auto& [config, validationInfo] : bufferInfoMap) {
- BufferBase* clientIndirectBuffer = config.first;
- for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
- validationInfo.GetBatches()) {
- const uint64_t minOffsetFromAlignedBoundary =
- batch.minOffset % minStorageBufferOffsetAlignment;
- const uint64_t minOffsetAlignedDown =
- batch.minOffset - minOffsetFromAlignedBoundary;
-
- Batch newBatch;
- newBatch.metadata = &batch;
- newBatch.numIndexBufferElements = config.second;
- newBatch.dataSize = GetBatchDataSize(batch.draws.size());
- newBatch.clientIndirectOffset = minOffsetAlignedDown;
- newBatch.clientIndirectSize =
- batch.maxOffset + kDrawIndexedIndirectSize - minOffsetAlignedDown;
-
- newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
- newBatch.validatedParamsOffset =
- Align(validatedParamsSize, minStorageBufferOffsetAlignment);
- validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
- if (validatedParamsSize > maxStorageBufferBindingSize) {
- return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
- }
+ for (const IndirectDrawMetadata::IndirectValidationBatch& batch :
+ validationInfo.GetBatches()) {
+ const uint64_t minOffsetFromAlignedBoundary =
+ batch.minOffset % minStorageBufferOffsetAlignment;
+ const uint64_t minOffsetAlignedDown = batch.minOffset - minOffsetFromAlignedBoundary;
+
+ Batch newBatch;
+ newBatch.metadata = &batch;
+ newBatch.numIndexBufferElements = config.numIndexBufferElements;
+ newBatch.dataSize = GetBatchDataSize(batch.draws.size());
+ newBatch.inputIndirectOffset = minOffsetAlignedDown;
+ newBatch.inputIndirectSize =
+ batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
+
+ newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
+ newBatch.outputParamsOffset = Align(outputParamsSize, minStorageBufferOffsetAlignment);
+ outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
+ if (outputParamsSize > maxStorageBufferBindingSize) {
+ return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
+ }
- Pass* currentPass = passes.empty() ? nullptr : &passes.back();
- if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
- uint64_t nextBatchDataOffset =
- Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
- uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
- if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
- // We can fit this batch in the current pass.
- newBatch.dataBufferOffset = nextBatchDataOffset;
- currentPass->batchDataSize = newPassBatchDataSize;
- currentPass->batches.push_back(newBatch);
- continue;
- }
+ Pass* currentPass = passes.empty() ? nullptr : &passes.back();
+ if (currentPass && currentPass->inputIndirectBuffer == config.inputIndirectBuffer) {
+ uint64_t nextBatchDataOffset =
+ Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
+ uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
+ if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
+ // We can fit this batch in the current pass.
+ newBatch.dataBufferOffset = nextBatchDataOffset;
+ currentPass->batchDataSize = newPassBatchDataSize;
+ currentPass->batches.push_back(newBatch);
+ continue;
}
+ }
- // We need to start a new pass for this batch.
- newBatch.dataBufferOffset = 0;
+ // We need to start a new pass for this batch.
+ newBatch.dataBufferOffset = 0;
- Pass newPass;
- newPass.clientIndirectBuffer = clientIndirectBuffer;
- newPass.batchDataSize = newBatch.dataSize;
- newPass.batches.push_back(newBatch);
- passes.push_back(std::move(newPass));
+ Pass newPass{};
+ newPass.inputIndirectBuffer = config.inputIndirectBuffer;
+ newPass.batchDataSize = newBatch.dataSize;
+ newPass.batches.push_back(newBatch);
+ newPass.flags = 0;
+ if (config.duplicateBaseVertexInstance) {
+ newPass.flags |= kDuplicateBaseVertexInstance;
+ }
+ if (config.drawType == IndirectDrawMetadata::DrawType::Indexed) {
+ newPass.flags |= kIndexedDraw;
}
+ if (device->IsValidationEnabled()) {
+ newPass.flags |= kValidationEnabled;
+ }
+ if (device->IsFeatureEnabled(Feature::IndirectFirstInstance)) {
+ newPass.flags |= kIndirectFirstInstanceEnabled;
+ }
+ passes.push_back(std::move(newPass));
}
+ }
- auto* const store = device->GetInternalPipelineStore();
- ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
- ScratchBuffer& batchDataBuffer = store->scratchStorage;
+ auto* const store = device->GetInternalPipelineStore();
+ ScratchBuffer& outputParamsBuffer = store->scratchIndirectStorage;
+ ScratchBuffer& batchDataBuffer = store->scratchStorage;
- uint64_t requiredBatchDataBufferSize = 0;
- for (const Pass& pass : passes) {
- requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
- }
- DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
- usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
-
- DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
- usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
-
- // Now we allocate and populate host-side batch data to be copied to the GPU.
- for (Pass& pass : passes) {
- // We use std::malloc here because it guarantees maximal scalar alignment.
- pass.batchData = {std::malloc(pass.batchDataSize), std::free};
- memset(pass.batchData.get(), 0, pass.batchDataSize);
- uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
- for (Batch& batch : pass.batches) {
- batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
- batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
- batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
-
- uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
- uint64_t validatedParamsOffset = batch.validatedParamsOffset;
- for (auto& draw : batch.metadata->draws) {
- // The shader uses this to index an array of u32, hence the division by 4 bytes.
- *indirectOffsets++ = static_cast<uint32_t>(
- (draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
-
- draw.cmd->indirectBuffer = validatedParamsBuffer.GetBuffer();
- draw.cmd->indirectOffset = validatedParamsOffset;
-
- validatedParamsOffset += kDrawIndexedIndirectSize;
+ uint64_t requiredBatchDataBufferSize = 0;
+ for (const Pass& pass : passes) {
+ requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
+ }
+ DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
+ usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
+
+ DAWN_TRY(outputParamsBuffer.EnsureCapacity(outputParamsSize));
+ usageTracker->BufferUsedAs(outputParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
+
+ // Now we allocate and populate host-side batch data to be copied to the GPU.
+ for (Pass& pass : passes) {
+ // We use std::malloc here because it guarantees maximal scalar alignment.
+ pass.batchData = {std::malloc(pass.batchDataSize), std::free};
+ memset(pass.batchData.get(), 0, pass.batchDataSize);
+ uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
+ for (Batch& batch : pass.batches) {
+ batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
+ batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
+ batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
+ batch.batchInfo->flags = pass.flags;
+
+ uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
+ uint64_t outputParamsOffset = batch.outputParamsOffset;
+ for (auto& draw : batch.metadata->draws) {
+ // The shader uses this to index an array of u32, hence the division by 4 bytes.
+ *indirectOffsets++ =
+ static_cast<uint32_t>((draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
+
+ draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
+ draw.cmd->indirectOffset = outputParamsOffset;
+ if (pass.flags & kIndexedDraw) {
+ outputParamsOffset += kDrawIndexedIndirectSize;
+ } else {
+ outputParamsOffset += kDrawIndirectSize;
}
}
}
+ }
- ComputePipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
-
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- BindGroupEntry bindings[3];
- BindGroupEntry& bufferDataBinding = bindings[0];
- bufferDataBinding.binding = 0;
- bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
-
- BindGroupEntry& clientIndirectBinding = bindings[1];
- clientIndirectBinding.binding = 1;
-
- BindGroupEntry& validatedParamsBinding = bindings[2];
- validatedParamsBinding.binding = 2;
- validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
-
- BindGroupDescriptor bindGroupDescriptor = {};
- bindGroupDescriptor.layout = layout.Get();
- bindGroupDescriptor.entryCount = 3;
- bindGroupDescriptor.entries = bindings;
-
- // Finally, we can now encode our validation passes. Each pass first does a single
- // WriteBuffer to get batch data over to the GPU, followed by a single compute pass. The
- // compute pass encodes a separate SetBindGroup and Dispatch command for each batch.
- for (const Pass& pass : passes) {
- commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
- static_cast<const uint8_t*>(pass.batchData.get()),
- pass.batchDataSize);
-
- Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
- passEncoder->APISetPipeline(pipeline);
-
- clientIndirectBinding.buffer = pass.clientIndirectBuffer;
-
- for (const Batch& batch : pass.batches) {
- bufferDataBinding.offset = batch.dataBufferOffset;
- bufferDataBinding.size = batch.dataSize;
- clientIndirectBinding.offset = batch.clientIndirectOffset;
- clientIndirectBinding.size = batch.clientIndirectSize;
- validatedParamsBinding.offset = batch.validatedParamsOffset;
- validatedParamsBinding.size = batch.validatedParamsSize;
-
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
-
- const uint32_t numDrawsRoundedUp =
- (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
- passEncoder->APISetBindGroup(0, bindGroup.Get());
- passEncoder->APIDispatch(numDrawsRoundedUp);
- }
-
- passEncoder->APIEnd();
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
+
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ BindGroupEntry bindings[3];
+ BindGroupEntry& bufferDataBinding = bindings[0];
+ bufferDataBinding.binding = 0;
+ bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
+
+ BindGroupEntry& inputIndirectBinding = bindings[1];
+ inputIndirectBinding.binding = 1;
+
+ BindGroupEntry& outputParamsBinding = bindings[2];
+ outputParamsBinding.binding = 2;
+ outputParamsBinding.buffer = outputParamsBuffer.GetBuffer();
+
+ BindGroupDescriptor bindGroupDescriptor = {};
+ bindGroupDescriptor.layout = layout.Get();
+ bindGroupDescriptor.entryCount = 3;
+ bindGroupDescriptor.entries = bindings;
+
+ // Finally, we can now encode our validation and duplication passes. Each pass first does a
+ // two WriteBuffer to get batch and pass data over to the GPU, followed by a single compute
+ // pass. The compute pass encodes a separate SetBindGroup and Dispatch command for each
+ // batch.
+ for (const Pass& pass : passes) {
+ commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
+ static_cast<const uint8_t*>(pass.batchData.get()),
+ pass.batchDataSize);
+
+ Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
+ passEncoder->APISetPipeline(pipeline);
+
+ inputIndirectBinding.buffer = pass.inputIndirectBuffer;
+
+ for (const Batch& batch : pass.batches) {
+ bufferDataBinding.offset = batch.dataBufferOffset;
+ bufferDataBinding.size = batch.dataSize;
+ inputIndirectBinding.offset = batch.inputIndirectOffset;
+ inputIndirectBinding.size = batch.inputIndirectSize;
+ outputParamsBinding.offset = batch.outputParamsOffset;
+ outputParamsBinding.size = batch.outputParamsSize;
+
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
+
+ const uint32_t numDrawsRoundedUp =
+ (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APIDispatchWorkgroups(numDrawsRoundedUp);
}
- return {};
+ passEncoder->APIEnd();
}
+ return {};
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h
index de246c25c7f..21946de51cc 100644
--- a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h
@@ -20,20 +20,20 @@
namespace dawn::native {
- class CommandEncoder;
- struct CombinedLimits;
- class DeviceBase;
- class RenderPassResourceUsageTracker;
-
- // The maximum number of draws call we can fit into a single validation batch. This is
- // essentially limited by the number of indirect parameter blocks that can fit into the maximum
- // allowed storage binding size (with the base limits, it is about 6.7M).
- uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
-
- MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
- CommandEncoder* commandEncoder,
- RenderPassResourceUsageTracker* usageTracker,
- IndirectDrawMetadata* indirectDrawMetadata);
+class CommandEncoder;
+struct CombinedLimits;
+class DeviceBase;
+class RenderPassResourceUsageTracker;
+
+// The maximum number of draws call we can fit into a single validation batch. This is
+// essentially limited by the number of indirect parameter blocks that can fit into the maximum
+// allowed storage binding size (with the base limits, it is about 6.7M).
+uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
+
+MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Instance.cpp b/chromium/third_party/dawn/src/dawn/native/Instance.cpp
index 92842f519db..3d8cce91e9a 100644
--- a/chromium/third_party/dawn/src/dawn/native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Instance.cpp
@@ -14,10 +14,13 @@
#include "dawn/native/Instance.h"
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/GPUInfo.h"
#include "dawn/common/Log.h"
#include "dawn/common/SystemUtils.h"
+#include "dawn/common/Version_autogen.h"
#include "dawn/native/ChainUtils_autogen.h"
#include "dawn/native/ErrorData.h"
#include "dawn/native/Surface.h"
@@ -26,414 +29,447 @@
// For SwiftShader fallback
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
-# include "dawn/native/VulkanBackend.h"
+#include "dawn/native/VulkanBackend.h"
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
#if defined(DAWN_USE_X11)
-# include "dawn/native/XlibXcbFunctions.h"
+#include "dawn/native/XlibXcbFunctions.h"
#endif // defined(DAWN_USE_X11)
#include <optional>
namespace dawn::native {
- // Forward definitions of each backend's "Connect" function that creates new BackendConnection.
- // Conditionally compiled declarations are used to avoid using static constructors instead.
+// Forward definitions of each backend's "Connect" function that creates new BackendConnection.
+// Conditionally compiled declarations are used to avoid using static constructors instead.
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- namespace d3d12 {
- BackendConnection* Connect(InstanceBase* instance);
- }
+namespace d3d12 {
+BackendConnection* Connect(InstanceBase* instance);
+}
#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- namespace metal {
- BackendConnection* Connect(InstanceBase* instance);
- }
+namespace metal {
+BackendConnection* Connect(InstanceBase* instance);
+}
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_ENABLE_BACKEND_NULL)
- namespace null {
- BackendConnection* Connect(InstanceBase* instance);
- }
+namespace null {
+BackendConnection* Connect(InstanceBase* instance);
+}
#endif // defined(DAWN_ENABLE_BACKEND_NULL)
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- namespace opengl {
- BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
- }
+namespace opengl {
+BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
+}
#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- namespace vulkan {
- BackendConnection* Connect(InstanceBase* instance);
- }
+namespace vulkan {
+BackendConnection* Connect(InstanceBase* instance);
+}
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
- namespace {
+namespace {
- BackendsBitset GetEnabledBackends() {
- BackendsBitset enabledBackends;
+BackendsBitset GetEnabledBackends() {
+ BackendsBitset enabledBackends;
#if defined(DAWN_ENABLE_BACKEND_NULL)
- enabledBackends.set(wgpu::BackendType::Null);
+ enabledBackends.set(wgpu::BackendType::Null);
#endif // defined(DAWN_ENABLE_BACKEND_NULL)
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- enabledBackends.set(wgpu::BackendType::D3D12);
+ enabledBackends.set(wgpu::BackendType::D3D12);
#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- enabledBackends.set(wgpu::BackendType::Metal);
+ enabledBackends.set(wgpu::BackendType::Metal);
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- enabledBackends.set(wgpu::BackendType::Vulkan);
+ enabledBackends.set(wgpu::BackendType::Vulkan);
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- enabledBackends.set(wgpu::BackendType::OpenGL);
+ enabledBackends.set(wgpu::BackendType::OpenGL);
#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- enabledBackends.set(wgpu::BackendType::OpenGLES);
+ enabledBackends.set(wgpu::BackendType::OpenGLES);
#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
- return enabledBackends;
- }
+ return enabledBackends;
+}
- } // anonymous namespace
-
- InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor) {
- return InstanceBase::Create().Detach();
+dawn::platform::CachingInterface* GetCachingInterface(dawn::platform::Platform* platform) {
+ if (platform != nullptr && dawn::kGitHash.size() > 0) {
+ return platform->GetCachingInterface(dawn::kGitHash.data(), dawn::kGitHash.size());
}
+ return nullptr;
+}
- // InstanceBase
+} // anonymous namespace
- // static
- Ref<InstanceBase> InstanceBase::Create(const InstanceDescriptor* descriptor) {
- Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
- static constexpr InstanceDescriptor kDefaultDesc = {};
- if (descriptor == nullptr) {
- descriptor = &kDefaultDesc;
- }
- if (instance->ConsumedError(instance->Initialize(descriptor))) {
- return nullptr;
- }
- return instance;
+InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor) {
+ return InstanceBase::Create().Detach();
+}
+
+// InstanceBase
+
+// static
+Ref<InstanceBase> InstanceBase::Create(const InstanceDescriptor* descriptor) {
+ Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
+ static constexpr InstanceDescriptor kDefaultDesc = {};
+ if (descriptor == nullptr) {
+ descriptor = &kDefaultDesc;
+ }
+ if (instance->ConsumedError(instance->Initialize(descriptor))) {
+ return nullptr;
}
+ return instance;
+}
- // TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
- MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
- const DawnInstanceDescriptor* dawnDesc = nullptr;
- FindInChain(descriptor->nextInChain, &dawnDesc);
- if (dawnDesc != nullptr) {
- for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
- mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
- }
- }
- // Default paths to search are next to the shared library, next to the executable, and
- // no path (just libvulkan.so).
- if (auto p = GetModuleDirectory()) {
- mRuntimeSearchPaths.push_back(std::move(*p));
- }
- if (auto p = GetExecutableDirectory()) {
- mRuntimeSearchPaths.push_back(std::move(*p));
+InstanceBase::InstanceBase() = default;
+
+InstanceBase::~InstanceBase() = default;
+
+// TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
+MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
+ const DawnInstanceDescriptor* dawnDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &dawnDesc);
+ if (dawnDesc != nullptr) {
+ for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
+ mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
}
- mRuntimeSearchPaths.push_back("");
- return {};
}
+ // Default paths to search are next to the shared library, next to the executable, and
+ // no path (just libvulkan.so).
+ if (auto p = GetModuleDirectory()) {
+ mRuntimeSearchPaths.push_back(std::move(*p));
+ }
+ if (auto p = GetExecutableDirectory()) {
+ mRuntimeSearchPaths.push_back(std::move(*p));
+ }
+ mRuntimeSearchPaths.push_back("");
- void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
- WGPURequestAdapterCallback callback,
- void* userdata) {
- static constexpr RequestAdapterOptions kDefaultOptions = {};
- if (options == nullptr) {
- options = &kDefaultOptions;
- }
- auto result = RequestAdapterInternal(options);
- if (result.IsError()) {
- auto err = result.AcquireError();
- std::string msg = err->GetFormattedMessage();
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
- } else {
- Ref<AdapterBase> adapter = result.AcquireSuccess();
- // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
- callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
- }
+ // Initialize the platform to the default for now.
+ mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
+ SetPlatform(mDefaultPlatform.get());
+
+ return {};
+}
+
+void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata) {
+ static constexpr RequestAdapterOptions kDefaultOptions = {};
+ if (options == nullptr) {
+ options = &kDefaultOptions;
+ }
+ auto result = RequestAdapterInternal(options);
+ if (result.IsError()) {
+ auto err = result.AcquireError();
+ std::string msg = err->GetFormattedMessage();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
+ } else {
+ Ref<AdapterBase> adapter = result.AcquireSuccess();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
}
+}
- ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
- const RequestAdapterOptions* options) {
- ASSERT(options != nullptr);
- if (options->forceFallbackAdapter) {
+ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
+ const RequestAdapterOptions* options) {
+ ASSERT(options != nullptr);
+ if (options->forceFallbackAdapter) {
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
- dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
- vulkanOptions.forceSwiftShader = true;
- DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
- }
+ if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
+ dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
+ vulkanOptions.forceSwiftShader = true;
+ DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
+ }
#else
- return Ref<AdapterBase>(nullptr);
+ return Ref<AdapterBase>(nullptr);
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
- } else {
- DiscoverDefaultAdapters();
- }
+ } else {
+ DiscoverDefaultAdapters();
+ }
- wgpu::AdapterType preferredType;
- switch (options->powerPreference) {
- case wgpu::PowerPreference::LowPower:
- preferredType = wgpu::AdapterType::IntegratedGPU;
- break;
- case wgpu::PowerPreference::Undefined:
- case wgpu::PowerPreference::HighPerformance:
- preferredType = wgpu::AdapterType::DiscreteGPU;
- break;
- }
+ wgpu::AdapterType preferredType;
+ switch (options->powerPreference) {
+ case wgpu::PowerPreference::LowPower:
+ preferredType = wgpu::AdapterType::IntegratedGPU;
+ break;
+ case wgpu::PowerPreference::Undefined:
+ case wgpu::PowerPreference::HighPerformance:
+ preferredType = wgpu::AdapterType::DiscreteGPU;
+ break;
+ }
- std::optional<size_t> discreteGPUAdapterIndex;
- std::optional<size_t> integratedGPUAdapterIndex;
- std::optional<size_t> cpuAdapterIndex;
- std::optional<size_t> unknownAdapterIndex;
+ std::optional<size_t> discreteGPUAdapterIndex;
+ std::optional<size_t> integratedGPUAdapterIndex;
+ std::optional<size_t> cpuAdapterIndex;
+ std::optional<size_t> unknownAdapterIndex;
- for (size_t i = 0; i < mAdapters.size(); ++i) {
- AdapterProperties properties;
- mAdapters[i]->APIGetProperties(&properties);
+ for (size_t i = 0; i < mAdapters.size(); ++i) {
+ AdapterProperties properties;
+ mAdapters[i]->APIGetProperties(&properties);
- if (options->forceFallbackAdapter) {
- if (!gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID)) {
- continue;
- }
- return mAdapters[i];
- }
- if (properties.adapterType == preferredType) {
- return mAdapters[i];
- }
- switch (properties.adapterType) {
- case wgpu::AdapterType::DiscreteGPU:
- discreteGPUAdapterIndex = i;
- break;
- case wgpu::AdapterType::IntegratedGPU:
- integratedGPUAdapterIndex = i;
- break;
- case wgpu::AdapterType::CPU:
- cpuAdapterIndex = i;
- break;
- case wgpu::AdapterType::Unknown:
- unknownAdapterIndex = i;
- break;
+ if (options->forceFallbackAdapter) {
+ if (!gpu_info::IsGoogleSwiftshader(properties.vendorID, properties.deviceID)) {
+ continue;
}
+ return mAdapters[i];
}
-
- // For now, we always prefer the discrete GPU
- if (discreteGPUAdapterIndex) {
- return mAdapters[*discreteGPUAdapterIndex];
- }
- if (integratedGPUAdapterIndex) {
- return mAdapters[*integratedGPUAdapterIndex];
+ if (properties.adapterType == preferredType) {
+ return mAdapters[i];
}
- if (cpuAdapterIndex) {
- return mAdapters[*cpuAdapterIndex];
- }
- if (unknownAdapterIndex) {
- return mAdapters[*unknownAdapterIndex];
+ switch (properties.adapterType) {
+ case wgpu::AdapterType::DiscreteGPU:
+ discreteGPUAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::IntegratedGPU:
+ integratedGPUAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::CPU:
+ cpuAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::Unknown:
+ unknownAdapterIndex = i;
+ break;
}
+ }
- return Ref<AdapterBase>(nullptr);
+ // For now, we always prefer the discrete GPU
+ if (discreteGPUAdapterIndex) {
+ return mAdapters[*discreteGPUAdapterIndex];
+ }
+ if (integratedGPUAdapterIndex) {
+ return mAdapters[*integratedGPUAdapterIndex];
+ }
+ if (cpuAdapterIndex) {
+ return mAdapters[*cpuAdapterIndex];
+ }
+ if (unknownAdapterIndex) {
+ return mAdapters[*unknownAdapterIndex];
}
- void InstanceBase::DiscoverDefaultAdapters() {
- for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
- EnsureBackendConnection(b);
- }
+ return Ref<AdapterBase>(nullptr);
+}
- if (mDiscoveredDefaultAdapters) {
- return;
- }
+void InstanceBase::DiscoverDefaultAdapters() {
+ for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
+ EnsureBackendConnection(b);
+ }
- // Query and merge all default adapters for all backends
- for (std::unique_ptr<BackendConnection>& backend : mBackends) {
- std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
+ if (mDiscoveredDefaultAdapters) {
+ return;
+ }
- for (Ref<AdapterBase>& adapter : backendAdapters) {
- ASSERT(adapter->GetBackendType() == backend->GetType());
- ASSERT(adapter->GetInstance() == this);
- mAdapters.push_back(std::move(adapter));
- }
- }
+ // Query and merge all default adapters for all backends
+ for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+ std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
- mDiscoveredDefaultAdapters = true;
+ for (Ref<AdapterBase>& adapter : backendAdapters) {
+ ASSERT(adapter->GetBackendType() == backend->GetType());
+ ASSERT(adapter->GetInstance() == this);
+ mAdapters.push_back(std::move(adapter));
+ }
}
- // This is just a wrapper around the real logic that uses Error.h error handling.
- bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
- return !ConsumedError(DiscoverAdaptersInternal(options));
- }
+ mDiscoveredDefaultAdapters = true;
+}
- const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
- return mTogglesInfo.GetToggleInfo(toggleName);
- }
+// This is just a wrapper around the real logic that uses Error.h error handling.
+bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+ return !ConsumedError(DiscoverAdaptersInternal(options));
+}
- Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
- return mTogglesInfo.ToggleNameToEnum(toggleName);
- }
+const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
+ return mTogglesInfo.GetToggleInfo(toggleName);
+}
- const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
- return mFeaturesInfo.GetFeatureInfo(feature);
- }
+Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
+ return mTogglesInfo.ToggleNameToEnum(toggleName);
+}
- const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
- return mAdapters;
+const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
+ return mFeaturesInfo.GetFeatureInfo(feature);
+}
+
+const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
+ return mAdapters;
+}
+
+void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
+ if (mBackendsConnected[backendType]) {
+ return;
}
- void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
- if (mBackendsConnected[backendType]) {
- return;
+ auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
+ if (connection != nullptr) {
+ ASSERT(connection->GetType() == expectedType);
+ ASSERT(connection->GetInstance() == this);
+ mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
}
+ };
- auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
- if (connection != nullptr) {
- ASSERT(connection->GetType() == expectedType);
- ASSERT(connection->GetInstance() == this);
- mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
- }
- };
-
- switch (backendType) {
+ switch (backendType) {
#if defined(DAWN_ENABLE_BACKEND_NULL)
- case wgpu::BackendType::Null:
- Register(null::Connect(this), wgpu::BackendType::Null);
- break;
+ case wgpu::BackendType::Null:
+ Register(null::Connect(this), wgpu::BackendType::Null);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_NULL)
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- case wgpu::BackendType::D3D12:
- Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
- break;
+ case wgpu::BackendType::D3D12:
+ Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- case wgpu::BackendType::Metal:
- Register(metal::Connect(this), wgpu::BackendType::Metal);
- break;
+ case wgpu::BackendType::Metal:
+ Register(metal::Connect(this), wgpu::BackendType::Metal);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- case wgpu::BackendType::Vulkan:
- Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
- break;
+ case wgpu::BackendType::Vulkan:
+ Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- case wgpu::BackendType::OpenGL:
- Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
- wgpu::BackendType::OpenGL);
- break;
+ case wgpu::BackendType::OpenGL:
+ Register(opengl::Connect(this, wgpu::BackendType::OpenGL), wgpu::BackendType::OpenGL);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- case wgpu::BackendType::OpenGLES:
- Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
- wgpu::BackendType::OpenGLES);
- break;
+ case wgpu::BackendType::OpenGLES:
+ Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
+ wgpu::BackendType::OpenGLES);
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
- default:
- UNREACHABLE();
- }
-
- mBackendsConnected.set(backendType);
+ default:
+ UNREACHABLE();
}
- MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
- wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
- DAWN_TRY(ValidateBackendType(backendType));
-
- if (!GetEnabledBackends()[backendType]) {
- return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
- }
+ mBackendsConnected.set(backendType);
+}
- EnsureBackendConnection(backendType);
+MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
+ wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
+ DAWN_TRY(ValidateBackendType(backendType));
- bool foundBackend = false;
- for (std::unique_ptr<BackendConnection>& backend : mBackends) {
- if (backend->GetType() != backendType) {
- continue;
- }
- foundBackend = true;
+ if (!GetEnabledBackends()[backendType]) {
+ return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
+ }
- std::vector<Ref<AdapterBase>> newAdapters;
- DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
+ EnsureBackendConnection(backendType);
- for (Ref<AdapterBase>& adapter : newAdapters) {
- ASSERT(adapter->GetBackendType() == backend->GetType());
- ASSERT(adapter->GetInstance() == this);
- mAdapters.push_back(std::move(adapter));
- }
+ bool foundBackend = false;
+ for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+ if (backend->GetType() != backendType) {
+ continue;
}
+ foundBackend = true;
- DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
- return {};
- }
-
- bool InstanceBase::ConsumedError(MaybeError maybeError) {
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ std::vector<Ref<AdapterBase>> newAdapters;
+ DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
- ASSERT(error != nullptr);
- dawn::ErrorLog() << error->GetFormattedMessage();
- return true;
+ for (Ref<AdapterBase>& adapter : newAdapters) {
+ ASSERT(adapter->GetBackendType() == backend->GetType());
+ ASSERT(adapter->GetInstance() == this);
+ mAdapters.push_back(std::move(adapter));
}
- return false;
}
- bool InstanceBase::IsBackendValidationEnabled() const {
- return mBackendValidationLevel != BackendValidationLevel::Disabled;
- }
+ DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
+ return {};
+}
- void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
- mBackendValidationLevel = level;
- }
+bool InstanceBase::ConsumedError(MaybeError maybeError) {
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
- return mBackendValidationLevel;
+ ASSERT(error != nullptr);
+ dawn::ErrorLog() << error->GetFormattedMessage();
+ return true;
}
+ return false;
+}
- void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
- mBeginCaptureOnStartup = beginCaptureOnStartup;
- }
+bool InstanceBase::IsBackendValidationEnabled() const {
+ return mBackendValidationLevel != BackendValidationLevel::Disabled;
+}
- bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
- return mBeginCaptureOnStartup;
- }
+void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
+ mBackendValidationLevel = level;
+}
+
+BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
+ return mBackendValidationLevel;
+}
+
+void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+ mBeginCaptureOnStartup = beginCaptureOnStartup;
+}
- void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
+bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
+ return mBeginCaptureOnStartup;
+}
+
+void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
+ if (platform == nullptr) {
+ mPlatform = mDefaultPlatform.get();
+ } else {
mPlatform = platform;
}
+ mBlobCache = std::make_unique<BlobCache>(GetCachingInterface(platform));
+}
- dawn::platform::Platform* InstanceBase::GetPlatform() {
- if (mPlatform != nullptr) {
- return mPlatform;
- }
+void InstanceBase::SetPlatformForTesting(dawn::platform::Platform* platform) {
+ SetPlatform(platform);
+}
- if (mDefaultPlatform == nullptr) {
- mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
- }
- return mDefaultPlatform.get();
- }
+dawn::platform::Platform* InstanceBase::GetPlatform() {
+ return mPlatform;
+}
- const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
- return mRuntimeSearchPaths;
- }
+BlobCache* InstanceBase::GetBlobCache() {
+ return mBlobCache.get();
+}
+
+uint64_t InstanceBase::GetDeviceCountForTesting() const {
+ return mDeviceCountForTesting.load();
+}
+
+void InstanceBase::IncrementDeviceCountForTesting() {
+ mDeviceCountForTesting++;
+}
+
+void InstanceBase::DecrementDeviceCountForTesting() {
+ mDeviceCountForTesting--;
+}
+
+const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
+ return mRuntimeSearchPaths;
+}
- const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
+const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
#if defined(DAWN_USE_X11)
- if (mXlibXcbFunctions == nullptr) {
- mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
- }
- return mXlibXcbFunctions.get();
+ if (mXlibXcbFunctions == nullptr) {
+ mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
+ }
+ return mXlibXcbFunctions.get();
#else
- UNREACHABLE();
+ UNREACHABLE();
#endif // defined(DAWN_USE_X11)
- }
+}
- Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
- if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
- return nullptr;
- }
-
- return new Surface(this, descriptor);
+Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
+ if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
+ return Surface::MakeError(this);
}
+ return new Surface(this, descriptor);
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Instance.h b/chromium/third_party/dawn/src/dawn/native/Instance.h
index b574f6335e5..581cb8519b8 100644
--- a/chromium/third_party/dawn/src/dawn/native/Instance.h
+++ b/chromium/third_party/dawn/src/dawn/native/Instance.h
@@ -15,116 +15,128 @@
#ifndef SRC_DAWN_NATIVE_INSTANCE_H_
#define SRC_DAWN_NATIVE_INSTANCE_H_
+#include <array>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
#include "dawn/common/RefCounted.h"
#include "dawn/common/ityp_bitset.h"
#include "dawn/native/Adapter.h"
#include "dawn/native/BackendConnection.h"
+#include "dawn/native/BlobCache.h"
#include "dawn/native/Features.h"
#include "dawn/native/Toggles.h"
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <memory>
-#include <unordered_map>
-#include <vector>
-
namespace dawn::platform {
- class Platform;
+class Platform;
} // namespace dawn::platform
namespace dawn::native {
- class Surface;
- class XlibXcbFunctions;
+class Surface;
+class XlibXcbFunctions;
+
+using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
- using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
+InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor);
- InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor);
+// This is called InstanceBase for consistency across the frontend, even if the backends don't
+// specialize this class.
+class InstanceBase final : public RefCounted {
+ public:
+ static Ref<InstanceBase> Create(const InstanceDescriptor* descriptor = nullptr);
- // This is called InstanceBase for consistency across the frontend, even if the backends don't
- // specialize this class.
- class InstanceBase final : public RefCounted {
- public:
- static Ref<InstanceBase> Create(const InstanceDescriptor* descriptor = nullptr);
+ void APIRequestAdapter(const RequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata);
- void APIRequestAdapter(const RequestAdapterOptions* options,
- WGPURequestAdapterCallback callback,
- void* userdata);
+ void DiscoverDefaultAdapters();
+ bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
- void DiscoverDefaultAdapters();
- bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+ const std::vector<Ref<AdapterBase>>& GetAdapters() const;
- const std::vector<Ref<AdapterBase>>& GetAdapters() const;
+ // Used to handle error that happen up to device creation.
+ bool ConsumedError(MaybeError maybeError);
- // Used to handle error that happen up to device creation.
- bool ConsumedError(MaybeError maybeError);
+ // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+ // of a toggle supported in Dawn.
+ const ToggleInfo* GetToggleInfo(const char* toggleName);
+ Toggle ToggleNameToEnum(const char* toggleName);
- // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
- // of a toggle supported in Dawn.
- const ToggleInfo* GetToggleInfo(const char* toggleName);
- Toggle ToggleNameToEnum(const char* toggleName);
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn.
+ const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
- // Used to query the details of an feature. Return nullptr if featureName is not a valid
- // name of an feature supported in Dawn.
- const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
+ bool IsBackendValidationEnabled() const;
+ void SetBackendValidationLevel(BackendValidationLevel level);
+ BackendValidationLevel GetBackendValidationLevel() const;
- bool IsBackendValidationEnabled() const;
- void SetBackendValidationLevel(BackendValidationLevel level);
- BackendValidationLevel GetBackendValidationLevel() const;
+ void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+ bool IsBeginCaptureOnStartupEnabled() const;
- void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
- bool IsBeginCaptureOnStartupEnabled() const;
+ // TODO(dawn:1374): SetPlatform should become a private helper, and SetPlatformForTesting
+ // will become the NOT thread-safe testing version exposed for special testing cases.
+ void SetPlatform(dawn::platform::Platform* platform);
+ void SetPlatformForTesting(dawn::platform::Platform* platform);
+ dawn::platform::Platform* GetPlatform();
+ BlobCache* GetBlobCache();
- void SetPlatform(dawn::platform::Platform* platform);
- dawn::platform::Platform* GetPlatform();
+ uint64_t GetDeviceCountForTesting() const;
+ void IncrementDeviceCountForTesting();
+ void DecrementDeviceCountForTesting();
- const std::vector<std::string>& GetRuntimeSearchPaths() const;
+ const std::vector<std::string>& GetRuntimeSearchPaths() const;
- // Get backend-independent libraries that need to be loaded dynamically.
- const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
+ // Get backend-independent libraries that need to be loaded dynamically.
+ const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
- // Dawn API
- Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
+ // Dawn API
+ Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
- private:
- InstanceBase() = default;
- ~InstanceBase() = default;
+ private:
+ InstanceBase();
+ ~InstanceBase() override;
- InstanceBase(const InstanceBase& other) = delete;
- InstanceBase& operator=(const InstanceBase& other) = delete;
+ InstanceBase(const InstanceBase& other) = delete;
+ InstanceBase& operator=(const InstanceBase& other) = delete;
- MaybeError Initialize(const InstanceDescriptor* descriptor);
+ MaybeError Initialize(const InstanceDescriptor* descriptor);
- // Lazily creates connections to all backends that have been compiled.
- void EnsureBackendConnection(wgpu::BackendType backendType);
+ // Lazily creates connections to all backends that have been compiled.
+ void EnsureBackendConnection(wgpu::BackendType backendType);
- MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
+ MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
- ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(
- const RequestAdapterOptions* options);
+ ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(const RequestAdapterOptions* options);
- std::vector<std::string> mRuntimeSearchPaths;
+ std::vector<std::string> mRuntimeSearchPaths;
- BackendsBitset mBackendsConnected;
+ BackendsBitset mBackendsConnected;
- bool mDiscoveredDefaultAdapters = false;
+ bool mDiscoveredDefaultAdapters = false;
- bool mBeginCaptureOnStartup = false;
- BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
+ bool mBeginCaptureOnStartup = false;
+ BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
- dawn::platform::Platform* mPlatform = nullptr;
- std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
+ dawn::platform::Platform* mPlatform = nullptr;
+ std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
+ std::unique_ptr<BlobCache> mBlobCache;
- std::vector<std::unique_ptr<BackendConnection>> mBackends;
- std::vector<Ref<AdapterBase>> mAdapters;
+ std::vector<std::unique_ptr<BackendConnection>> mBackends;
+ std::vector<Ref<AdapterBase>> mAdapters;
- FeaturesInfo mFeaturesInfo;
- TogglesInfo mTogglesInfo;
+ FeaturesInfo mFeaturesInfo;
+ TogglesInfo mTogglesInfo;
#if defined(DAWN_USE_X11)
- std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
+ std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
#endif // defined(DAWN_USE_X11)
- };
+
+ std::atomic_uint64_t mDeviceCountForTesting{0};
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h b/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h
index 48658e28828..0d5c7f89ed0 100644
--- a/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h
+++ b/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h
@@ -15,61 +15,61 @@
#ifndef SRC_DAWN_NATIVE_INTEGERTYPES_H_
#define SRC_DAWN_NATIVE_INTEGERTYPES_H_
+#include <cstdint>
+
#include "dawn/common/Constants.h"
#include "dawn/common/TypedInteger.h"
-#include <cstdint>
-
namespace dawn::native {
- // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
- using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
- constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
+// Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
+using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
+constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
- // Binding numbers get mapped to a packed range of indices
- using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
+// Binding numbers get mapped to a packed range of indices
+using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
- using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
+using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
- constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
+constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
- using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
+using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
- constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
- ColorAttachmentIndex(kMaxColorAttachments);
+constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
+ ColorAttachmentIndex(kMaxColorAttachments);
- using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
- using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
+using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
+using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
- constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
- constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
- VertexAttributeLocation(kMaxVertexAttributes);
+constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
+constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
+ VertexAttributeLocation(kMaxVertexAttributes);
- // Serials are 64bit integers that are incremented by one each time to produce unique values.
- // Some serials (like queue serials) are compared numerically to know which one is before
- // another, while some serials are only checked for equality. We call serials only checked
- // for equality IDs.
+// Serials are 64bit integers that are incremented by one each time to produce unique values.
+// Some serials (like queue serials) are compared numerically to know which one is before
+// another, while some serials are only checked for equality. We call serials only checked
+// for equality IDs.
- // Buffer mapping requests are stored outside of the buffer while they are being processed and
- // cannot be invalidated. Instead they are associated with an ID, and when a map request is
- // finished, the mapping callback is fired only if its ID matches the ID if the last request
- // that was sent.
- using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
+// Buffer mapping requests are stored outside of the buffer while they are being processed and
+// cannot be invalidated. Instead they are associated with an ID, and when a map request is
+// finished, the mapping callback is fired only if its ID matches the ID if the last request
+// that was sent.
+using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
- // The type for the WebGPU API fence serial values.
- using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
+// The type for the WebGPU API fence serial values.
+using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
- // A serial used to watch the progression of GPU execution on a queue, each time operations
- // that need to be followed individually are scheduled for execution on a queue, the serial
- // is incremented by one. This way to know if something is done executing, we just need to
- // compare its serial with the currently completed serial.
- using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
- constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
+// A serial used to watch the progression of GPU execution on a queue, each time operations
+// that need to be followed individually are scheduled for execution on a queue, the serial
+// is incremented by one. This way to know if something is done executing, we just need to
+// compare its serial with the currently completed serial.
+using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
+constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
- // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
- // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
- // token, which prevents them (and any BindGroups created with them) from being used with any
- // other pipelines.
- using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
+// An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
+// created with a default layout will produce BindGroupLayouts with a non-zero compatibility
+// token, which prevents them (and any BindGroups created with them) from being used with any
+// other pipelines.
+using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp
index a2532aa8a68..bc28cb725e0 100644
--- a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp
@@ -14,25 +14,24 @@
#include "dawn/native/InternalPipelineStore.h"
+#include <unordered_map>
+
#include "dawn/native/ComputePipeline.h"
#include "dawn/native/Device.h"
#include "dawn/native/RenderPipeline.h"
#include "dawn/native/ShaderModule.h"
-#include <unordered_map>
-
namespace dawn::native {
- class RenderPipelineBase;
- class ShaderModuleBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
- InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
- : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
- scratchIndirectStorage(device,
- wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
- wgpu::BufferUsage::Storage) {
- }
+InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
+ : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
+ scratchIndirectStorage(
+ device,
+ wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage) {}
- InternalPipelineStore::~InternalPipelineStore() = default;
+InternalPipelineStore::~InternalPipelineStore() = default;
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h
index 4f80d9db4b9..3defe672604 100644
--- a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h
+++ b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h
@@ -15,45 +15,44 @@
#ifndef SRC_DAWN_NATIVE_INTERNALPIPELINESTORE_H_
#define SRC_DAWN_NATIVE_INTERNALPIPELINESTORE_H_
+#include <unordered_map>
+
#include "dawn/native/ObjectBase.h"
#include "dawn/native/ScratchBuffer.h"
#include "dawn/native/dawn_platform.h"
-#include <unordered_map>
-
namespace dawn::native {
- class DeviceBase;
- class RenderPipelineBase;
- class ShaderModuleBase;
+class DeviceBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
- // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
- // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
- struct InternalPipelineStore {
- explicit InternalPipelineStore(DeviceBase* device);
- ~InternalPipelineStore();
+// Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
+// long-lived objects scoped to a device and used to support arbitrary pipeline operations.
+struct InternalPipelineStore {
+ explicit InternalPipelineStore(DeviceBase* device);
+ ~InternalPipelineStore();
- std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
- copyTextureForBrowserPipelines;
+ std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>> copyTextureForBrowserPipelines;
- Ref<ShaderModuleBase> copyTextureForBrowser;
+ Ref<ShaderModuleBase> copyTextureForBrowser;
- Ref<ComputePipelineBase> timestampComputePipeline;
- Ref<ShaderModuleBase> timestampCS;
+ Ref<ComputePipelineBase> timestampComputePipeline;
+ Ref<ShaderModuleBase> timestampCS;
- Ref<ShaderModuleBase> dummyFragmentShader;
+ Ref<ShaderModuleBase> placeholderFragmentShader;
- // A scratch buffer suitable for use as a copy destination and storage binding.
- ScratchBuffer scratchStorage;
+ // A scratch buffer suitable for use as a copy destination and storage binding.
+ ScratchBuffer scratchStorage;
- // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
- // buffer for indirect dispatch or draw calls.
- ScratchBuffer scratchIndirectStorage;
+ // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
+ // buffer for indirect dispatch or draw calls.
+ ScratchBuffer scratchIndirectStorage;
- Ref<ComputePipelineBase> renderValidationPipeline;
- Ref<ShaderModuleBase> renderValidationShader;
- Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
- };
+ Ref<ComputePipelineBase> renderValidationPipeline;
+ Ref<ShaderModuleBase> renderValidationShader;
+ Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Limits.cpp b/chromium/third_party/dawn/src/dawn/native/Limits.cpp
index 3b20a1bc997..65af8dfaaaa 100644
--- a/chromium/third_party/dawn/src/dawn/native/Limits.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Limits.cpp
@@ -14,16 +14,16 @@
#include "dawn/native/Limits.h"
+#include <array>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Math.h"
-#include <array>
-
// clang-format off
// TODO(crbug.com/dawn/685):
// For now, only expose these tiers until metrics can determine better ones.
#define LIMITS_WORKGROUP_STORAGE_SIZE(X) \
- X(Maximum, maxComputeWorkgroupStorageSize, 16352, 32768, 49152, 65536)
+ X(Maximum, maxComputeWorkgroupStorageSize, 16384, 32768, 49152, 65536)
#define LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
X(Maximum, maxStorageBufferBindingSize, 134217728, 1073741824, 2147483647, 4294967295)
@@ -69,115 +69,115 @@
LIMITS_OTHER(X)
namespace dawn::native {
- namespace {
- template <uint32_t A, uint32_t B>
- constexpr void StaticAssertSame() {
- static_assert(A == B, "Mismatching tier count in limit group.");
- }
-
- template <uint32_t I, uint32_t... Is>
- constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
- int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
- DAWN_UNUSED(unused);
- return I;
- }
-
- enum class LimitClass {
- Alignment,
- Maximum,
- };
-
- template <LimitClass C>
- struct CheckLimit;
-
- template <>
- struct CheckLimit<LimitClass::Alignment> {
- template <typename T>
- static bool IsBetter(T lhs, T rhs) {
- return lhs < rhs;
- }
-
- template <typename T>
- static MaybeError Validate(T supported, T required) {
- DAWN_INVALID_IF(IsBetter(required, supported),
- "Required limit (%u) is lower than the supported limit (%u).",
- required, supported);
- DAWN_INVALID_IF(!IsPowerOfTwo(required),
- "Required limit (%u) is not a power of two.", required);
- return {};
- }
- };
-
- template <>
- struct CheckLimit<LimitClass::Maximum> {
- template <typename T>
- static bool IsBetter(T lhs, T rhs) {
- return lhs > rhs;
- }
-
- template <typename T>
- static MaybeError Validate(T supported, T required) {
- DAWN_INVALID_IF(IsBetter(required, supported),
- "Required limit (%u) is greater than the supported limit (%u).",
- required, supported);
- return {};
- }
- };
-
- template <typename T>
- bool IsLimitUndefined(T value) {
- static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
- return false;
- }
-
- template <>
- bool IsLimitUndefined<uint32_t>(uint32_t value) {
- return value == wgpu::kLimitU32Undefined;
- }
-
- template <>
- bool IsLimitUndefined<uint64_t>(uint64_t value) {
- return value == wgpu::kLimitU64Undefined;
- }
-
- } // namespace
-
- void GetDefaultLimits(Limits* limits) {
- ASSERT(limits != nullptr);
-#define X(Better, limitName, base, ...) limits->limitName = base;
- LIMITS(X)
-#undef X
+namespace {
+template <uint32_t A, uint32_t B>
+constexpr void StaticAssertSame() {
+ static_assert(A == B, "Mismatching tier count in limit group.");
+}
+
+template <uint32_t I, uint32_t... Is>
+constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
+ int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
+ DAWN_UNUSED(unused);
+ return I;
+}
+
+enum class LimitClass {
+ Alignment,
+ Maximum,
+};
+
+template <LimitClass C>
+struct CheckLimit;
+
+template <>
+struct CheckLimit<LimitClass::Alignment> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs < rhs;
}
- Limits ReifyDefaultLimits(const Limits& limits) {
- Limits out;
-#define X(Class, limitName, base, ...) \
- if (IsLimitUndefined(limits.limitName) || \
- CheckLimit<LimitClass::Class>::IsBetter( \
- static_cast<decltype(limits.limitName)>(base), limits.limitName)) { \
- /* If the limit is undefined or the default is better, use the default */ \
- out.limitName = base; \
- } else { \
- out.limitName = limits.limitName; \
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ DAWN_INVALID_IF(IsBetter(required, supported),
+ "Required limit (%u) is lower than the supported limit (%u).", required,
+ supported);
+ DAWN_INVALID_IF(!IsPowerOfTwo(required), "Required limit (%u) is not a power of two.",
+ required);
+ return {};
}
- LIMITS(X)
-#undef X
- return out;
+};
+
+template <>
+struct CheckLimit<LimitClass::Maximum> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs > rhs;
}
- MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
-#define X(Class, limitName, ...) \
- if (!IsLimitUndefined(requiredLimits.limitName)) { \
- DAWN_TRY_CONTEXT(CheckLimit<LimitClass::Class>::Validate( \
- supportedLimits.limitName, requiredLimits.limitName), \
- "validating " #limitName); \
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ DAWN_INVALID_IF(IsBetter(required, supported),
+ "Required limit (%u) is greater than the supported limit (%u).", required,
+ supported);
+ return {};
}
- LIMITS(X)
+};
+
+template <typename T>
+bool IsLimitUndefined(T value) {
+ static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
+ return false;
+}
+
+template <>
+bool IsLimitUndefined<uint32_t>(uint32_t value) {
+ return value == wgpu::kLimitU32Undefined;
+}
+
+template <>
+bool IsLimitUndefined<uint64_t>(uint64_t value) {
+ return value == wgpu::kLimitU64Undefined;
+}
+
+} // namespace
+
+void GetDefaultLimits(Limits* limits) {
+ ASSERT(limits != nullptr);
+#define X(Better, limitName, base, ...) limits->limitName = base;
+ LIMITS(X)
#undef X
- return {};
+}
+
+Limits ReifyDefaultLimits(const Limits& limits) {
+ Limits out;
+#define X(Class, limitName, base, ...) \
+ if (IsLimitUndefined(limits.limitName) || \
+ CheckLimit<LimitClass::Class>::IsBetter(static_cast<decltype(limits.limitName)>(base), \
+ limits.limitName)) { \
+ /* If the limit is undefined or the default is better, use the default */ \
+ out.limitName = base; \
+ } else { \
+ out.limitName = limits.limitName; \
+ }
+ LIMITS(X)
+#undef X
+ return out;
+}
+
+MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
+#define X(Class, limitName, ...) \
+ if (!IsLimitUndefined(requiredLimits.limitName)) { \
+ DAWN_TRY_CONTEXT(CheckLimit<LimitClass::Class>::Validate(supportedLimits.limitName, \
+ requiredLimits.limitName), \
+ "validating " #limitName); \
}
+ LIMITS(X)
+#undef X
+ return {};
+}
- Limits ApplyLimitTiers(Limits limits) {
+Limits ApplyLimitTiers(Limits limits) {
#define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
#define GET_TIER_COUNT(LIMIT_GROUP) \
ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
@@ -192,25 +192,25 @@ namespace dawn::native {
} \
}
-#define X_CHECK_BETTER_AND_CLAMP(Class, limitName, ...) \
- { \
- constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__}; \
- decltype(Limits::limitName) tierValue = tiers[i - 1]; \
- if (CheckLimit<LimitClass::Class>::IsBetter(tierValue, limits.limitName)) { \
- /* The tier is better. Go to the next tier. */ \
- continue; \
- } else if (tierValue != limits.limitName) { \
- /* Better than the tier. Degrade |limits| to the tier. */ \
- limits.limitName = tiers[i - 1]; \
- } \
+#define X_CHECK_BETTER_AND_CLAMP(Class, limitName, ...) \
+ { \
+ constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__}; \
+ decltype(Limits::limitName) tierValue = tiers[i - 1]; \
+ if (CheckLimit<LimitClass::Class>::IsBetter(tierValue, limits.limitName)) { \
+ /* The tier is better. Go to the next tier. */ \
+ continue; \
+ } else if (tierValue != limits.limitName) { \
+ /* Better than the tier. Degrade |limits| to the tier. */ \
+ limits.limitName = tiers[i - 1]; \
+ } \
}
- LIMITS_EACH_GROUP(X_EACH_GROUP)
+ LIMITS_EACH_GROUP(X_EACH_GROUP)
#undef X_CHECK_BETTER
#undef X_EACH_GROUP
#undef GET_TIER_COUNT
#undef X_TIER_COUNT
- return limits;
- }
+ return limits;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Limits.h b/chromium/third_party/dawn/src/dawn/native/Limits.h
index c8724e48b2a..cc817420534 100644
--- a/chromium/third_party/dawn/src/dawn/native/Limits.h
+++ b/chromium/third_party/dawn/src/dawn/native/Limits.h
@@ -20,23 +20,23 @@
namespace dawn::native {
- struct CombinedLimits {
- Limits v1;
- };
+struct CombinedLimits {
+ Limits v1;
+};
- // Populate |limits| with the default limits.
- void GetDefaultLimits(Limits* limits);
+// Populate |limits| with the default limits.
+void GetDefaultLimits(Limits* limits);
- // Returns a copy of |limits| where all undefined values are replaced
- // with their defaults. Also clamps to the defaults if the provided limits
- // are worse.
- Limits ReifyDefaultLimits(const Limits& limits);
+// Returns a copy of |limits| where all undefined values are replaced
+// with their defaults. Also clamps to the defaults if the provided limits
+// are worse.
+Limits ReifyDefaultLimits(const Limits& limits);
- // Validate that |requiredLimits| are no better than |supportedLimits|.
- MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
+// Validate that |requiredLimits| are no better than |supportedLimits|.
+MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
- // Returns a copy of |limits| where limit tiers are applied.
- Limits ApplyLimitTiers(Limits limits);
+// Returns a copy of |limits| where limit tiers are applied.
+Limits ApplyLimitTiers(Limits limits);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp b/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp
index 3cafdb79bb5..763129bf101 100644
--- a/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp
@@ -12,79 +12,75 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/native/ObjectBase.h"
-#include "dawn/native/Device.h"
-
#include <mutex>
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+
namespace dawn::native {
- static constexpr uint64_t kErrorPayload = 0;
- static constexpr uint64_t kNotErrorPayload = 1;
+static constexpr uint64_t kErrorPayload = 0;
+static constexpr uint64_t kNotErrorPayload = 1;
- ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
- }
+ErrorMonad::ErrorMonad() : RefCounted(kNotErrorPayload) {}
+ErrorMonad::ErrorMonad(ErrorTag) : RefCounted(kErrorPayload) {}
- ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
- : RefCounted(kErrorPayload), mDevice(device) {
- }
+bool ErrorMonad::IsError() const {
+ return GetRefCountPayload() == kErrorPayload;
+}
- DeviceBase* ObjectBase::GetDevice() const {
- return mDevice;
- }
+ObjectBase::ObjectBase(DeviceBase* device) : ErrorMonad(), mDevice(device) {}
- bool ObjectBase::IsError() const {
- return GetRefCountPayload() == kErrorPayload;
- }
+ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) : ErrorMonad(kError), mDevice(device) {}
- ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
- if (label) {
- mLabel = label;
- }
- }
+DeviceBase* ObjectBase::GetDevice() const {
+ return mDevice.Get();
+}
- ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
+ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
+ if (label) {
+ mLabel = label;
}
+}
- ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
- : ObjectBase(device) {
- }
+ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {}
- ApiObjectBase::~ApiObjectBase() {
- ASSERT(!IsAlive());
- }
+ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag) : ObjectBase(device) {}
- void ApiObjectBase::APISetLabel(const char* label) {
- mLabel = label;
- SetLabelImpl();
- }
+ApiObjectBase::~ApiObjectBase() {
+ ASSERT(!IsAlive());
+}
- const std::string& ApiObjectBase::GetLabel() const {
- return mLabel;
- }
+void ApiObjectBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+}
- void ApiObjectBase::SetLabelImpl() {
- }
+const std::string& ApiObjectBase::GetLabel() const {
+ return mLabel;
+}
- bool ApiObjectBase::IsAlive() const {
- return IsInList();
- }
+void ApiObjectBase::SetLabelImpl() {}
- void ApiObjectBase::DeleteThis() {
- Destroy();
- RefCounted::DeleteThis();
- }
+bool ApiObjectBase::IsAlive() const {
+ return IsInList();
+}
- void ApiObjectBase::TrackInDevice() {
- ASSERT(GetDevice() != nullptr);
- GetDevice()->TrackObject(this);
- }
+void ApiObjectBase::DeleteThis() {
+ Destroy();
+ RefCounted::DeleteThis();
+}
+
+void ApiObjectBase::TrackInDevice() {
+ ASSERT(GetDevice() != nullptr);
+ GetDevice()->TrackObject(this);
+}
- void ApiObjectBase::Destroy() {
- const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
- if (RemoveFromList()) {
- DestroyImpl();
- }
+void ApiObjectBase::Destroy() {
+ const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
+ if (RemoveFromList()) {
+ DestroyImpl();
}
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectBase.h b/chromium/third_party/dawn/src/dawn/native/ObjectBase.h
index 4ea489c0d52..5ebaad0b908 100644
--- a/chromium/third_party/dawn/src/dawn/native/ObjectBase.h
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectBase.h
@@ -15,82 +15,89 @@
#ifndef SRC_DAWN_NATIVE_OBJECTBASE_H_
#define SRC_DAWN_NATIVE_OBJECTBASE_H_
+#include <string>
+
#include "dawn/common/LinkedList.h"
#include "dawn/common/RefCounted.h"
#include "dawn/native/Forward.h"
-#include <string>
-
namespace dawn::native {
- class DeviceBase;
-
- class ObjectBase : public RefCounted {
- public:
- struct ErrorTag {};
- static constexpr ErrorTag kError = {};
-
- explicit ObjectBase(DeviceBase* device);
- ObjectBase(DeviceBase* device, ErrorTag tag);
-
- DeviceBase* GetDevice() const;
- bool IsError() const;
-
- private:
- // Pointer to owning device.
- DeviceBase* mDevice;
- };
-
- class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
- public:
- struct LabelNotImplementedTag {};
- static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
- struct UntrackedByDeviceTag {};
- static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
-
- ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
- ApiObjectBase(DeviceBase* device, const char* label);
- ApiObjectBase(DeviceBase* device, ErrorTag tag);
- ~ApiObjectBase() override;
-
- virtual ObjectType GetType() const = 0;
- const std::string& GetLabel() const;
-
- // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
- // by the owning device.
- bool IsAlive() const;
-
- // This needs to be public because it can be called from the device owning the object.
- void Destroy();
-
- // Dawn API
- void APISetLabel(const char* label);
-
- protected:
- // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
- // always call their derived class implementation of Destroy prior to the derived
- // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
- // then the underlying backend's Destroy calls are executed. We cannot naively put the call
- // to Destroy in the destructor of this class because it calls DestroyImpl
- // which is a virtual function often implemented in the Derived class which would already
- // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
- // order. Note that some classes like BindGroup may override the DeleteThis function again,
- // and they should ensure that their overriding versions call this underlying version
- // somewhere.
- void DeleteThis() override;
- void TrackInDevice();
-
- // Sub-classes may override this function multiple times. Whenever overriding this function,
- // however, users should be sure to call their parent's version in the new override to make
- // sure that all destroy functionality is kept. This function is guaranteed to only be
- // called once through the exposed Destroy function.
- virtual void DestroyImpl() = 0;
-
- private:
- virtual void SetLabelImpl();
-
- std::string mLabel;
- };
+class DeviceBase;
+
+class ErrorMonad : public RefCounted {
+ public:
+ struct ErrorTag {};
+ static constexpr ErrorTag kError = {};
+
+ ErrorMonad();
+ explicit ErrorMonad(ErrorTag tag);
+
+ bool IsError() const;
+};
+
+class ObjectBase : public ErrorMonad {
+ public:
+ explicit ObjectBase(DeviceBase* device);
+ ObjectBase(DeviceBase* device, ErrorTag tag);
+
+ DeviceBase* GetDevice() const;
+
+ private:
+ // Ref to owning device.
+ Ref<DeviceBase> mDevice;
+};
+
+class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
+ public:
+ struct LabelNotImplementedTag {};
+ static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
+ struct UntrackedByDeviceTag {};
+ static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
+
+ ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
+ ApiObjectBase(DeviceBase* device, const char* label);
+ ApiObjectBase(DeviceBase* device, ErrorTag tag);
+ ~ApiObjectBase() override;
+
+ virtual ObjectType GetType() const = 0;
+ const std::string& GetLabel() const;
+
+ // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
+ // by the owning device.
+ bool IsAlive() const;
+
+ // This needs to be public because it can be called from the device owning the object.
+ void Destroy();
+
+ // Dawn API
+ void APISetLabel(const char* label);
+
+ protected:
+ // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
+ // always call their derived class implementation of Destroy prior to the derived
+ // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
+ // then the underlying backend's Destroy calls are executed. We cannot naively put the call
+ // to Destroy in the destructor of this class because it calls DestroyImpl
+ // which is a virtual function often implemented in the Derived class which would already
+ // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
+ // order. Note that some classes like BindGroup may override the DeleteThis function again,
+ // and they should ensure that their overriding versions call this underlying version
+ // somewhere.
+ void DeleteThis() override;
+ void TrackInDevice();
+
+ // Sub-classes may override this function multiple times. Whenever overriding this function,
+ // however, users should be sure to call their parent's version in the new override to make
+ // sure that all destroy functionality is kept. This function is guaranteed to only be
+ // called once through the exposed Destroy function.
+ virtual void DestroyImpl() = 0;
+
+ private:
+ virtual void SetLabelImpl();
+
+ std::string mLabel;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp
index 58c892e9275..caea3929832 100644
--- a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp
@@ -16,7 +16,7 @@
namespace dawn::native {
- size_t ObjectContentHasher::GetContentHash() const {
- return mContentHash;
- }
+size_t ObjectContentHasher::GetContentHash() const {
+ return mContentHash;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h
index 8a065a834ff..4211fb35142 100644
--- a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h
@@ -15,68 +15,68 @@
#ifndef SRC_DAWN_NATIVE_OBJECTCONTENTHASHER_H_
#define SRC_DAWN_NATIVE_OBJECTCONTENTHASHER_H_
-#include "dawn/common/HashUtils.h"
-
#include <string>
#include <vector>
-namespace dawn::native {
-
- // ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
- // cache.
- class ObjectContentHasher {
- public:
- // Record calls the appropriate record function based on the type.
- template <typename T, typename... Args>
- void Record(const T& value, const Args&... args) {
- RecordImpl<T, Args...>::Call(this, value, args...);
- }
-
- size_t GetContentHash() const;
+#include "dawn/common/HashUtils.h"
- private:
- template <typename T, typename... Args>
- struct RecordImpl {
- static constexpr void Call(ObjectContentHasher* recorder,
- const T& value,
- const Args&... args) {
- HashCombine(&recorder->mContentHash, value, args...);
- }
- };
+namespace dawn::native {
- template <typename T>
- struct RecordImpl<T*> {
- static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
- // Calling Record(objPtr) is not allowed. This check exists to only prevent such
- // mistakes.
- static_assert(obj == nullptr);
- }
- };
+// ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
+// cache.
+class ObjectContentHasher {
+ public:
+ // Record calls the appropriate record function based on the type.
+ template <typename T, typename... Args>
+ void Record(const T& value, const Args&... args) {
+ RecordImpl<T, Args...>::Call(this, value, args...);
+ }
- template <typename T>
- struct RecordImpl<std::vector<T>> {
- static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
- recorder->RecordIterable<std::vector<T>>(vec);
- }
- };
+ size_t GetContentHash() const;
- template <typename IteratorT>
- constexpr void RecordIterable(const IteratorT& iterable) {
- for (auto it = iterable.begin(); it != iterable.end(); ++it) {
- Record(*it);
- }
+ private:
+ template <typename T, typename... Args>
+ struct RecordImpl {
+ static constexpr void Call(ObjectContentHasher* recorder,
+ const T& value,
+ const Args&... args) {
+ HashCombine(&recorder->mContentHash, value, args...);
}
+ };
- size_t mContentHash = 0;
+ template <typename T>
+ struct RecordImpl<T*> {
+ static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
+ // Calling Record(objPtr) is not allowed. This check exists to only prevent such
+ // mistakes.
+ static_assert(obj == nullptr);
+ }
};
- template <>
- struct ObjectContentHasher::RecordImpl<std::string> {
- static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
- recorder->RecordIterable<std::string>(str);
+ template <typename T>
+ struct RecordImpl<std::vector<T>> {
+ static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
+ recorder->RecordIterable<std::vector<T>>(vec);
}
};
+ template <typename IteratorT>
+ constexpr void RecordIterable(const IteratorT& iterable) {
+ for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+ Record(*it);
+ }
+ }
+
+ size_t mContentHash = 0;
+};
+
+template <>
+struct ObjectContentHasher::RecordImpl<std::string> {
+ static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
+ recorder->RecordIterable<std::string>(str);
+ }
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_OBJECTCONTENTHASHER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.cpp b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.cpp
new file mode 100644
index 00000000000..fc9ed33de85
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.cpp
@@ -0,0 +1,23 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PassResourceUsage.h"
+
+namespace dawn::native {
+
+ComputePassResourceUsage::ComputePassResourceUsage() = default;
+
+ComputePassResourceUsage::ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h
index 307a7266f04..c22498fb327 100644
--- a/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h
@@ -15,85 +15,85 @@
#ifndef SRC_DAWN_NATIVE_PASSRESOURCEUSAGE_H_
#define SRC_DAWN_NATIVE_PASSRESOURCEUSAGE_H_
-#include "dawn/native/SubresourceStorage.h"
-#include "dawn/native/dawn_platform.h"
-
#include <set>
#include <vector>
+#include "dawn/native/SubresourceStorage.h"
+#include "dawn/native/dawn_platform.h"
+
namespace dawn::native {
- // This file declares various "ResourceUsage" structures. They are produced by the frontend
- // while recording commands to be used for later validation and also some operations in the
- // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
- // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
-
- class BufferBase;
- class QuerySetBase;
- class TextureBase;
-
- // The texture usage inside passes must be tracked per-subresource.
- using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
-
- // Which resources are used by a synchronization scope and how they are used. The command
- // buffer validation pre-computes this information so that backends with explicit barriers
- // don't have to re-compute it.
- struct SyncScopeResourceUsage {
- std::vector<BufferBase*> buffers;
- std::vector<wgpu::BufferUsage> bufferUsages;
-
- std::vector<TextureBase*> textures;
- std::vector<TextureSubresourceUsage> textureUsages;
-
- std::vector<ExternalTextureBase*> externalTextures;
- };
-
- // Contains all the resource usage data for a compute pass.
- //
- // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
- // specification. ComputePassResourceUsage also stores nline the set of all buffers and
- // textures used, because some unused BindGroups may not be used at all in synchronization
- // scope but their resources still need to be validated on Queue::Submit.
- struct ComputePassResourceUsage {
- // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
- // use the copy constructor (that's deleted) when doing operations on a
- // vector<ComputePassResourceUsage>
- ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
- ComputePassResourceUsage() = default;
-
- std::vector<SyncScopeResourceUsage> dispatchUsages;
-
- // All the resources referenced by this compute pass for validation in Queue::Submit.
- std::set<BufferBase*> referencedBuffers;
- std::set<TextureBase*> referencedTextures;
- std::set<ExternalTextureBase*> referencedExternalTextures;
- };
-
- // Contains all the resource usage data for a render pass.
- //
- // In the WebGPU specification render passes are synchronization scopes but we also need to
- // track additional data. It is stored for render passes used by a CommandBuffer, but also in
- // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
- struct RenderPassResourceUsage : public SyncScopeResourceUsage {
- // Storage to track the occlusion queries used during the pass.
- std::vector<QuerySetBase*> querySets;
- std::vector<std::vector<bool>> queryAvailabilities;
- };
-
- using RenderPassUsages = std::vector<RenderPassResourceUsage>;
- using ComputePassUsages = std::vector<ComputePassResourceUsage>;
-
- // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
- // is used for validation and to produce barriers and lazy clears in the backends.
- struct CommandBufferResourceUsage {
- RenderPassUsages renderPasses;
- ComputePassUsages computePasses;
-
- // Resources used in commands that aren't in a pass.
- std::set<BufferBase*> topLevelBuffers;
- std::set<TextureBase*> topLevelTextures;
- std::set<QuerySetBase*> usedQuerySets;
- };
+// This file declares various "ResourceUsage" structures. They are produced by the frontend
+// while recording commands to be used for later validation and also some operations in the
+// backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
+// "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
+
+class BufferBase;
+class QuerySetBase;
+class TextureBase;
+
+// The texture usage inside passes must be tracked per-subresource.
+using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
+
+// Which resources are used by a synchronization scope and how they are used. The command
+// buffer validation pre-computes this information so that backends with explicit barriers
+// don't have to re-compute it.
+struct SyncScopeResourceUsage {
+ std::vector<BufferBase*> buffers;
+ std::vector<wgpu::BufferUsage> bufferUsages;
+
+ std::vector<TextureBase*> textures;
+ std::vector<TextureSubresourceUsage> textureUsages;
+
+ std::vector<ExternalTextureBase*> externalTextures;
+};
+
+// Contains all the resource usage data for a compute pass.
+//
+// Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
+// specification. ComputePassResourceUsage also stores nline the set of all buffers and
+// textures used, because some unused BindGroups may not be used at all in synchronization
+// scope but their resources still need to be validated on Queue::Submit.
+struct ComputePassResourceUsage {
+ // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
+ // use the copy constructor (that's deleted) when doing operations on a
+ // vector<ComputePassResourceUsage>
+ ComputePassResourceUsage(ComputePassResourceUsage&&);
+ ComputePassResourceUsage();
+
+ std::vector<SyncScopeResourceUsage> dispatchUsages;
+
+ // All the resources referenced by this compute pass for validation in Queue::Submit.
+ std::set<BufferBase*> referencedBuffers;
+ std::set<TextureBase*> referencedTextures;
+ std::set<ExternalTextureBase*> referencedExternalTextures;
+};
+
+// Contains all the resource usage data for a render pass.
+//
+// In the WebGPU specification render passes are synchronization scopes but we also need to
+// track additional data. It is stored for render passes used by a CommandBuffer, but also in
+// RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
+struct RenderPassResourceUsage : public SyncScopeResourceUsage {
+ // Storage to track the occlusion queries used during the pass.
+ std::vector<QuerySetBase*> querySets;
+ std::vector<std::vector<bool>> queryAvailabilities;
+};
+
+using RenderPassUsages = std::vector<RenderPassResourceUsage>;
+using ComputePassUsages = std::vector<ComputePassResourceUsage>;
+
+// Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
+// is used for validation and to produce barriers and lazy clears in the backends.
+struct CommandBufferResourceUsage {
+ RenderPassUsages renderPasses;
+ ComputePassUsages computePasses;
+
+ // Resources used in commands that aren't in a pass.
+ std::set<BufferBase*> topLevelBuffers;
+ std::set<TextureBase*> topLevelTextures;
+ std::set<QuerySetBase*> usedQuerySets;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp
index b4814cfa626..e1f1ae6b32c 100644
--- a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/PassResourceUsageTracker.h"
+#include <utility>
+
#include "dawn/native/BindGroup.h"
#include "dawn/native/Buffer.h"
#include "dawn/native/EnumMaskIterator.h"
@@ -22,222 +24,241 @@
#include "dawn/native/QuerySet.h"
#include "dawn/native/Texture.h"
-#include <utility>
-
namespace dawn::native {
- void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
- // std::map's operator[] will create the key and return 0 if the key didn't exist
- // before.
- mBufferUsages[buffer] |= usage;
- }
-
- void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
- TextureBase* texture = view->GetTexture();
- const SubresourceRange& range = view->GetSubresourceRange();
-
- // Get or create a new TextureSubresourceUsage for that texture (initially filled with
- // wgpu::TextureUsage::None)
- auto it = mTextureUsages.emplace(
- std::piecewise_construct, std::forward_as_tuple(texture),
- std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
- texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- TextureSubresourceUsage& textureUsage = it.first->second;
-
- textureUsage.Update(range,
- [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
- // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
- // branches.
- if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
- (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
- // Using the same subresource as an attachment for two different
- // render attachments is a write-write hazard. Add this internal
- // usage so we will fail the check that a subresource with
- // writable usage is the single usage.
- *storedUsage |= kAgainAsRenderAttachment;
- }
- *storedUsage |= usage;
+SyncScopeUsageTracker::SyncScopeUsageTracker() = default;
+
+SyncScopeUsageTracker::SyncScopeUsageTracker(SyncScopeUsageTracker&&) = default;
+
+SyncScopeUsageTracker::~SyncScopeUsageTracker() = default;
+
+SyncScopeUsageTracker& SyncScopeUsageTracker::operator=(SyncScopeUsageTracker&&) = default;
+
+void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
+ // std::map's operator[] will create the key and return 0 if the key didn't exist
+ // before.
+ mBufferUsages[buffer] |= usage;
+}
+
+void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
+ TextureBase* texture = view->GetTexture();
+ const SubresourceRange& range = view->GetSubresourceRange();
+
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+ // wgpu::TextureUsage::None)
+ auto it = mTextureUsages.emplace(
+ std::piecewise_construct, std::forward_as_tuple(texture),
+ std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+ texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+ TextureSubresourceUsage& textureUsage = it.first->second;
+
+ textureUsage.Update(range, [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
+ // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
+ // branches.
+ if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
+ (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
+ // Using the same subresource as an attachment for two different
+ // render attachments is a write-write hazard. Add this internal
+ // usage so we will fail the check that a subresource with
+ // writable usage is the single usage.
+ *storedUsage |= kAgainAsRenderAttachment;
+ }
+ *storedUsage |= usage;
+ });
+}
+
+void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
+ TextureBase* texture,
+ const TextureSubresourceUsage& textureUsage) {
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+ // wgpu::TextureUsage::None)
+ auto it = mTextureUsages.emplace(
+ std::piecewise_construct, std::forward_as_tuple(texture),
+ std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+ texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+ TextureSubresourceUsage* passTextureUsage = &it.first->second;
+
+ passTextureUsage->Merge(textureUsage,
+ [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
+ const wgpu::TextureUsage& addedUsage) {
+ ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
+ *storedUsage |= addedUsage;
});
- }
-
- void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
- TextureBase* texture,
- const TextureSubresourceUsage& textureUsage) {
- // Get or create a new TextureSubresourceUsage for that texture (initially filled with
- // wgpu::TextureUsage::None)
- auto it = mTextureUsages.emplace(
- std::piecewise_construct, std::forward_as_tuple(texture),
- std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
- texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- TextureSubresourceUsage* passTextureUsage = &it.first->second;
-
- passTextureUsage->Merge(
- textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
- const wgpu::TextureUsage& addedUsage) {
- ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
- *storedUsage |= addedUsage;
- });
- }
-
- void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
- for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
- break;
- case wgpu::BufferBindingType::Storage:
- BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
- break;
- case kInternalStorageBufferBinding:
- BufferUsedAs(buffer, kInternalStorageBuffer);
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- BufferUsedAs(buffer, kReadOnlyStorageBuffer);
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
+}
+
+void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+ break;
+ case wgpu::BufferBindingType::Storage:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+ break;
+ case kInternalStorageBufferBinding:
+ BufferUsedAs(buffer, kInternalStorageBuffer);
+ break;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ BufferUsedAs(buffer, kReadOnlyStorageBuffer);
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
}
+ break;
+ }
- case BindingInfoType::Texture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
- break;
- }
+ case BindingInfoType::Texture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
+ break;
+ }
- case BindingInfoType::StorageTexture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
- break;
+ case BindingInfoType::StorageTexture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
+ break;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
}
-
- case BindingInfoType::ExternalTexture:
- UNREACHABLE();
- break;
-
- case BindingInfoType::Sampler:
- break;
+ break;
}
- }
- for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
- mExternalTextureUsages.insert(externalTexture.Get());
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+
+ case BindingInfoType::Sampler:
+ break;
}
}
- SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
- SyncScopeResourceUsage result;
- result.buffers.reserve(mBufferUsages.size());
- result.bufferUsages.reserve(mBufferUsages.size());
- result.textures.reserve(mTextureUsages.size());
- result.textureUsages.reserve(mTextureUsages.size());
+ for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+ mExternalTextureUsages.insert(externalTexture.Get());
+ }
+}
+
+SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
+ SyncScopeResourceUsage result;
+ result.buffers.reserve(mBufferUsages.size());
+ result.bufferUsages.reserve(mBufferUsages.size());
+ result.textures.reserve(mTextureUsages.size());
+ result.textureUsages.reserve(mTextureUsages.size());
+
+ for (auto& [buffer, usage] : mBufferUsages) {
+ result.buffers.push_back(buffer);
+ result.bufferUsages.push_back(usage);
+ }
- for (auto& [buffer, usage] : mBufferUsages) {
- result.buffers.push_back(buffer);
- result.bufferUsages.push_back(usage);
- }
+ for (auto& [texture, usage] : mTextureUsages) {
+ result.textures.push_back(texture);
+ result.textureUsages.push_back(std::move(usage));
+ }
- for (auto& [texture, usage] : mTextureUsages) {
- result.textures.push_back(texture);
- result.textureUsages.push_back(std::move(usage));
- }
+ for (auto* const it : mExternalTextureUsages) {
+ result.externalTextures.push_back(it);
+ }
- for (auto& it : mExternalTextureUsages) {
- result.externalTextures.push_back(it);
- }
+ mBufferUsages.clear();
+ mTextureUsages.clear();
+ mExternalTextureUsages.clear();
- mBufferUsages.clear();
- mTextureUsages.clear();
- mExternalTextureUsages.clear();
+ return result;
+}
- return result;
- }
+ComputePassResourceUsageTracker::ComputePassResourceUsageTracker() = default;
- void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
- mUsage.dispatchUsages.push_back(std::move(scope));
- }
+ComputePassResourceUsageTracker::~ComputePassResourceUsageTracker() = default;
- void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
- mUsage.referencedBuffers.insert(buffer);
- }
+void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
+ mUsage.dispatchUsages.push_back(std::move(scope));
+}
- void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
- for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
- const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
+void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
+ mUsage.referencedBuffers.insert(buffer);
+}
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
- break;
- }
+void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
+ for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
- case BindingInfoType::Texture: {
- mUsage.referencedTextures.insert(
- group->GetBindingAsTextureView(index)->GetTexture());
- break;
- }
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
+ break;
+ }
- case BindingInfoType::ExternalTexture:
- UNREACHABLE();
- case BindingInfoType::StorageTexture:
- case BindingInfoType::Sampler:
- break;
+ case BindingInfoType::Texture: {
+ mUsage.referencedTextures.insert(
+ group->GetBindingAsTextureView(index)->GetTexture());
+ break;
}
- }
- for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
- mUsage.referencedExternalTextures.insert(externalTexture.Get());
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ case BindingInfoType::StorageTexture:
+ case BindingInfoType::Sampler:
+ break;
}
}
- ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
- return std::move(mUsage);
+ for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+ mUsage.referencedExternalTextures.insert(externalTexture.Get());
}
+}
- RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
- RenderPassResourceUsage result;
- *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
+ return std::move(mUsage);
+}
- result.querySets.reserve(mQueryAvailabilities.size());
- result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+RenderPassResourceUsageTracker::RenderPassResourceUsageTracker() = default;
- for (auto& it : mQueryAvailabilities) {
- result.querySets.push_back(it.first);
- result.queryAvailabilities.push_back(std::move(it.second));
- }
+RenderPassResourceUsageTracker::RenderPassResourceUsageTracker(RenderPassResourceUsageTracker&&) =
+ default;
- mQueryAvailabilities.clear();
+RenderPassResourceUsageTracker::~RenderPassResourceUsageTracker() = default;
- return result;
- }
+RenderPassResourceUsageTracker& RenderPassResourceUsageTracker::operator=(
+ RenderPassResourceUsageTracker&&) = default;
- void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
- uint32_t queryIndex) {
- // The query availability only needs to be tracked again on render passes for checking
- // query overwrite on render pass and resetting query sets on the Vulkan backend.
- DAWN_ASSERT(querySet != nullptr);
+RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
+ RenderPassResourceUsage result;
+ *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
- // Gets the iterator for that querySet or create a new vector of bool set to false
- // if the querySet wasn't registered.
- auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
- it->second[queryIndex] = true;
- }
+ result.querySets.reserve(mQueryAvailabilities.size());
+ result.queryAvailabilities.reserve(mQueryAvailabilities.size());
- const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
- return mQueryAvailabilities;
+ for (auto& it : mQueryAvailabilities) {
+ result.querySets.push_back(it.first);
+ result.queryAvailabilities.push_back(std::move(it.second));
}
+ mQueryAvailabilities.clear();
+
+ return result;
+}
+
+void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+ uint32_t queryIndex) {
+ // The query availability only needs to be tracked again on render passes for checking
+ // query overwrite on render pass and resetting query sets on the Vulkan backend.
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Gets the iterator for that querySet or create a new vector of bool set to false
+ // if the querySet wasn't registered.
+ auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+ it->second[queryIndex] = true;
+}
+
+const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
+ return mQueryAvailabilities;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h
index e6626f5a1e1..c18d52efba0 100644
--- a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h
@@ -15,71 +15,88 @@
#ifndef SRC_DAWN_NATIVE_PASSRESOURCEUSAGETRACKER_H_
#define SRC_DAWN_NATIVE_PASSRESOURCEUSAGETRACKER_H_
+#include <map>
+#include <set>
+#include <vector>
+
#include "dawn/native/PassResourceUsage.h"
#include "dawn/native/dawn_platform.h"
-#include <map>
-
namespace dawn::native {
- class BindGroupBase;
- class BufferBase;
- class ExternalTextureBase;
- class QuerySetBase;
- class TextureBase;
-
- using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
-
- // Helper class to build SyncScopeResourceUsages
- class SyncScopeUsageTracker {
- public:
- void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
- void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
- void AddRenderBundleTextureUsage(TextureBase* texture,
- const TextureSubresourceUsage& textureUsage);
-
- // Walks the bind groups and tracks all its resources.
- void AddBindGroup(BindGroupBase* group);
-
- // Returns the per-pass usage for use by backends for APIs with explicit barriers.
- SyncScopeResourceUsage AcquireSyncScopeUsage();
-
- private:
- std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
- std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
- std::set<ExternalTextureBase*> mExternalTextureUsages;
- };
-
- // Helper class to build ComputePassResourceUsages
- class ComputePassResourceUsageTracker {
- public:
- void AddDispatch(SyncScopeResourceUsage scope);
- void AddReferencedBuffer(BufferBase* buffer);
- void AddResourcesReferencedByBindGroup(BindGroupBase* group);
-
- ComputePassResourceUsage AcquireResourceUsage();
-
- private:
- ComputePassResourceUsage mUsage;
- };
-
- // Helper class to build RenderPassResourceUsages
- class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
- public:
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
- const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
-
- RenderPassResourceUsage AcquireResourceUsage();
-
- private:
- // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
- // instead.
- using SyncScopeUsageTracker::AcquireSyncScopeUsage;
-
- // Tracks queries used in the render pass to validate that they aren't written twice.
- QueryAvailabilityMap mQueryAvailabilities;
- };
+class BindGroupBase;
+class BufferBase;
+class ExternalTextureBase;
+class QuerySetBase;
+class TextureBase;
+
+using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
+
+// Helper class to build SyncScopeResourceUsages
+class SyncScopeUsageTracker {
+ public:
+ SyncScopeUsageTracker();
+ SyncScopeUsageTracker(SyncScopeUsageTracker&&);
+ ~SyncScopeUsageTracker();
+
+ SyncScopeUsageTracker& operator=(SyncScopeUsageTracker&&);
+
+ void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
+ void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
+ void AddRenderBundleTextureUsage(TextureBase* texture,
+ const TextureSubresourceUsage& textureUsage);
+
+ // Walks the bind groups and tracks all its resources.
+ void AddBindGroup(BindGroupBase* group);
+
+ // Returns the per-pass usage for use by backends for APIs with explicit barriers.
+ SyncScopeResourceUsage AcquireSyncScopeUsage();
+
+ private:
+ std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
+ std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
+ std::set<ExternalTextureBase*> mExternalTextureUsages;
+};
+
+// Helper class to build ComputePassResourceUsages
+class ComputePassResourceUsageTracker {
+ public:
+ ComputePassResourceUsageTracker();
+ ~ComputePassResourceUsageTracker();
+
+ void AddDispatch(SyncScopeResourceUsage scope);
+ void AddReferencedBuffer(BufferBase* buffer);
+ void AddResourcesReferencedByBindGroup(BindGroupBase* group);
+
+ ComputePassResourceUsage AcquireResourceUsage();
+
+ private:
+ ComputePassResourceUsage mUsage;
+};
+
+// Helper class to build RenderPassResourceUsages
+class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
+ public:
+ RenderPassResourceUsageTracker();
+ RenderPassResourceUsageTracker(RenderPassResourceUsageTracker&&);
+ ~RenderPassResourceUsageTracker();
+
+ RenderPassResourceUsageTracker& operator=(RenderPassResourceUsageTracker&&);
+
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+ const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+
+ RenderPassResourceUsage AcquireResourceUsage();
+
+ private:
+ // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
+ // instead.
+ using SyncScopeUsageTracker::AcquireSyncScopeUsage;
+
+ // Tracks queries used in the render pass to validate that they aren't written twice.
+ QueryAvailabilityMap mQueryAvailabilities;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PerStage.cpp b/chromium/third_party/dawn/src/dawn/native/PerStage.cpp
index f3d5dc5935b..a8246897f09 100644
--- a/chromium/third_party/dawn/src/dawn/native/PerStage.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/PerStage.cpp
@@ -16,14 +16,14 @@
namespace dawn::native {
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
- std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
- return BitSetIterator<kNumStages, SingleShaderStage>(bits);
- }
+BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
+ std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
+ return BitSetIterator<kNumStages, SingleShaderStage>(bits);
+}
- wgpu::ShaderStage StageBit(SingleShaderStage stage) {
- ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
- }
+wgpu::ShaderStage StageBit(SingleShaderStage stage) {
+ ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PerStage.h b/chromium/third_party/dawn/src/dawn/native/PerStage.h
index 43fbb743384..3983b11b8ee 100644
--- a/chromium/third_party/dawn/src/dawn/native/PerStage.h
+++ b/chromium/third_party/dawn/src/dawn/native/PerStage.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_PERSTAGE_H_
#define SRC_DAWN_NATIVE_PERSTAGE_H_
+#include <array>
+
#include "dawn/common/Assert.h"
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/Constants.h"
@@ -22,60 +24,56 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-
namespace dawn::native {
- enum class SingleShaderStage { Vertex, Fragment, Compute };
-
- static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
- static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
- static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
-
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
-
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
- wgpu::ShaderStage StageBit(SingleShaderStage stage);
-
- static constexpr wgpu::ShaderStage kAllStages =
- static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
-
- template <typename T>
- class PerStage {
- public:
- PerStage() = default;
- explicit PerStage(const T& initialValue) {
- mData.fill(initialValue);
- }
-
- T& operator[](SingleShaderStage stage) {
- DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return mData[static_cast<uint32_t>(stage)];
- }
- const T& operator[](SingleShaderStage stage) const {
- DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return mData[static_cast<uint32_t>(stage)];
- }
-
- T& operator[](wgpu::ShaderStage stageBit) {
- uint32_t bit = static_cast<uint32_t>(stageBit);
- DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
- return mData[Log2(bit)];
- }
- const T& operator[](wgpu::ShaderStage stageBit) const {
- uint32_t bit = static_cast<uint32_t>(stageBit);
- DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
- return mData[Log2(bit)];
- }
-
- private:
- std::array<T, kNumStages> mData;
- };
+enum class SingleShaderStage { Vertex, Fragment, Compute };
+
+static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
+static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
+static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
+
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
+
+BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
+wgpu::ShaderStage StageBit(SingleShaderStage stage);
+
+static constexpr wgpu::ShaderStage kAllStages =
+ static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
+
+template <typename T>
+class PerStage {
+ public:
+ PerStage() = default;
+ explicit PerStage(const T& initialValue) { mData.fill(initialValue); }
+
+ T& operator[](SingleShaderStage stage) {
+ DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return mData[static_cast<uint32_t>(stage)];
+ }
+ const T& operator[](SingleShaderStage stage) const {
+ DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return mData[static_cast<uint32_t>(stage)];
+ }
+
+ T& operator[](wgpu::ShaderStage stageBit) {
+ uint32_t bit = static_cast<uint32_t>(stageBit);
+ DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+ return mData[Log2(bit)];
+ }
+ const T& operator[](wgpu::ShaderStage stageBit) const {
+ uint32_t bit = static_cast<uint32_t>(stageBit);
+ DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+ return mData[Log2(bit)];
+ }
+
+ private:
+ std::array<T, kNumStages> mData;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp b/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp
deleted file mode 100644
index ce3ab492320..00000000000
--- a/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn/native/PersistentCache.h"
-
-#include "dawn/common/Assert.h"
-#include "dawn/native/Device.h"
-#include "dawn/platform/DawnPlatform.h"
-
-namespace dawn::native {
-
- PersistentCache::PersistentCache(DeviceBase* device)
- : mDevice(device), mCache(GetPlatformCache()) {
- }
-
- ScopedCachedBlob PersistentCache::LoadData(const PersistentCacheKey& key) {
- ScopedCachedBlob blob = {};
- if (mCache == nullptr) {
- return blob;
- }
- std::lock_guard<std::mutex> lock(mMutex);
- blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
- if (blob.bufferSize > 0) {
- blob.buffer.reset(new uint8_t[blob.bufferSize]);
- const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
- blob.buffer.get(), blob.bufferSize);
- ASSERT(bufferSize == blob.bufferSize);
- return blob;
- }
- return blob;
- }
-
- void PersistentCache::StoreData(const PersistentCacheKey& key, const void* value, size_t size) {
- if (mCache == nullptr) {
- return;
- }
- ASSERT(value != nullptr);
- ASSERT(size > 0);
- std::lock_guard<std::mutex> lock(mMutex);
- mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
- }
-
- dawn::platform::CachingInterface* PersistentCache::GetPlatformCache() {
- // TODO(dawn:549): Create a fingerprint of concatenated version strings (ex. Tint commit
- // hash, Dawn commit hash). This will be used by the client so it may know when to discard
- // previously cached Dawn objects should this fingerprint change.
- dawn::platform::Platform* platform = mDevice->GetPlatform();
- if (platform != nullptr) {
- return platform->GetCachingInterface(/*fingerprint*/ nullptr, /*fingerprintSize*/ 0);
- }
- return nullptr;
- }
-} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PersistentCache.h b/chromium/third_party/dawn/src/dawn/native/PersistentCache.h
deleted file mode 100644
index 24aa9d0ac5f..00000000000
--- a/chromium/third_party/dawn/src/dawn/native/PersistentCache.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_DAWN_NATIVE_PERSISTENTCACHE_H_
-#define SRC_DAWN_NATIVE_PERSISTENTCACHE_H_
-
-#include "dawn/native/Error.h"
-
-#include <mutex>
-#include <vector>
-
-namespace dawn::platform {
- class CachingInterface;
-}
-
-namespace dawn::native {
-
- using PersistentCacheKey = std::vector<uint8_t>;
-
- struct ScopedCachedBlob {
- std::unique_ptr<uint8_t[]> buffer;
- size_t bufferSize = 0;
- };
-
- class DeviceBase;
-
- enum class PersistentKeyType { Shader };
-
- // This class should always be thread-safe as it is used in Create*PipelineAsync() where it is
- // called asynchronously.
- // The thread-safety of any access to mCache (the function LoadData() and StoreData()) is
- // protected by mMutex.
- class PersistentCache {
- public:
- explicit PersistentCache(DeviceBase* device);
-
- // Combines load/store operations into a single call.
- // If the load was successful, a non-empty blob is returned to the caller.
- // Else, the creation callback |createFn| gets invoked with a callback
- // |doCache| to store the newly created blob back in the cache.
- //
- // Example usage:
- //
- // ScopedCachedBlob cachedBlob = {};
- // DAWN_TRY_ASSIGN(cachedBlob, GetOrCreate(key, [&](auto doCache)) {
- // // Create a new blob to be stored
- // doCache(newBlobPtr, newBlobSize); // store
- // }));
- //
- template <typename CreateFn>
- ResultOrError<ScopedCachedBlob> GetOrCreate(const PersistentCacheKey& key,
- CreateFn&& createFn) {
- // Attempt to load an existing blob from the cache.
- ScopedCachedBlob blob = LoadData(key);
- if (blob.bufferSize > 0) {
- return std::move(blob);
- }
-
- // Allow the caller to create a new blob to be stored for the given key.
- DAWN_TRY(createFn([this, key](const void* value, size_t size) {
- this->StoreData(key, value, size);
- }));
-
- return std::move(blob);
- }
-
- private:
- // PersistentCache impl
- ScopedCachedBlob LoadData(const PersistentCacheKey& key);
- void StoreData(const PersistentCacheKey& key, const void* value, size_t size);
-
- dawn::platform::CachingInterface* GetPlatformCache();
-
- DeviceBase* mDevice = nullptr;
-
- std::mutex mMutex;
- dawn::platform::CachingInterface* mCache = nullptr;
- };
-} // namespace dawn::native
-
-#endif // SRC_DAWN_NATIVE_PERSISTENTCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp
index 344d948abac..6bee1eb5a5e 100644
--- a/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/Pipeline.h"
+#include <algorithm>
+#include <unordered_set>
+#include <utility>
+
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/Device.h"
#include "dawn/native/ObjectBase.h"
@@ -22,238 +26,234 @@
#include "dawn/native/ShaderModule.h"
namespace dawn::native {
- MaybeError ValidateProgrammableStage(DeviceBase* device,
- const ShaderModuleBase* module,
- const std::string& entryPoint,
- uint32_t constantCount,
- const ConstantEntry* constants,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage) {
- DAWN_TRY(device->ValidateObject(module));
-
- DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
- "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
- module);
-
- const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
-
- if (!metadata.infringedLimitErrors.empty()) {
- std::ostringstream out;
- out << "Entry point \"" << entryPoint << "\" infringes limits:\n";
- for (const std::string& limit : metadata.infringedLimitErrors) {
- out << " - " << limit << "\n";
- }
- return DAWN_VALIDATION_ERROR(out.str());
+MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage) {
+ DAWN_TRY(device->ValidateObject(module));
+
+ DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
+ "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+ module);
+
+ const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
+
+ if (!metadata.infringedLimitErrors.empty()) {
+ std::ostringstream out;
+ out << "Entry point \"" << entryPoint << "\" infringes limits:\n";
+ for (const std::string& limit : metadata.infringedLimitErrors) {
+ out << " - " << limit << "\n";
}
+ return DAWN_VALIDATION_ERROR(out.str());
+ }
- DAWN_INVALID_IF(metadata.stage != stage,
- "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
- metadata.stage, entryPoint, stage);
+ DAWN_INVALID_IF(metadata.stage != stage,
+ "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
+ metadata.stage, entryPoint, stage);
- if (layout != nullptr) {
- DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
- }
-
- if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline overridable constants are disallowed because they are partially "
- "implemented.");
- }
+ if (layout != nullptr) {
+ DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
+ }
- // Validate if overridable constants exist in shader module
- // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
- size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
- // Keep an initialized constants sets to handle duplicate initialization cases
- std::unordered_set<std::string> stageInitializedConstantIdentifiers;
- for (uint32_t i = 0; i < constantCount; i++) {
- DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
- "Pipeline overridable constant \"%s\" not found in %s.",
- constants[i].key, module);
-
- if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
- if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
- numUninitializedConstants--;
- }
- stageInitializedConstantIdentifiers.insert(constants[i].key);
- } else {
- // There are duplicate initializations
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Pipeline overridable constants \"%s\" is set more than once in %s",
- constants[i].key, module);
- }
- }
+ if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "Pipeline overridable constants are disallowed because they are partially "
+ "implemented.");
+ }
- // Validate if any overridable constant is left uninitialized
- if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
- std::string uninitializedConstantsArray;
- bool isFirst = true;
- for (std::string identifier : metadata.uninitializedOverridableConstants) {
- if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
- continue;
- }
+ // Validate if overridable constants exist in shader module
+ // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
+ size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
+ // Keep an initialized constants sets to handle duplicate initialization cases
+ std::unordered_set<std::string> stageInitializedConstantIdentifiers;
+ for (uint32_t i = 0; i < constantCount; i++) {
+ DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
+ "Pipeline overridable constant \"%s\" not found in %s.", constants[i].key,
+ module);
- if (isFirst) {
- isFirst = false;
- } else {
- uninitializedConstantsArray.append(", ");
- }
- uninitializedConstantsArray.append(identifier);
+ if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
+ if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
+ numUninitializedConstants--;
}
-
+ stageInitializedConstantIdentifiers.insert(constants[i].key);
+ } else {
+ // There are duplicate initializations
return DAWN_FORMAT_VALIDATION_ERROR(
- "There are uninitialized pipeline overridable constants in shader module %s, their "
- "identifiers:[%s]",
- module, uninitializedConstantsArray);
+ "Pipeline overridable constants \"%s\" is set more than once in %s",
+ constants[i].key, module);
}
-
- return {};
}
- // PipelineBase
-
- PipelineBase::PipelineBase(DeviceBase* device,
- PipelineLayoutBase* layout,
- const char* label,
- std::vector<StageAndDescriptor> stages)
- : ApiObjectBase(device, label), mLayout(layout) {
- ASSERT(!stages.empty());
-
- for (const StageAndDescriptor& stage : stages) {
- // Extract argument for this stage.
- SingleShaderStage shaderStage = stage.shaderStage;
- ShaderModuleBase* module = stage.module;
- const char* entryPointName = stage.entryPoint.c_str();
-
- const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
- ASSERT(metadata.stage == shaderStage);
-
- // Record them internally.
- bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
- mStageMask |= StageBit(shaderStage);
- mStages[shaderStage] = {module, entryPointName, &metadata, {}};
- auto& constants = mStages[shaderStage].constants;
- for (uint32_t i = 0; i < stage.constantCount; i++) {
- constants.emplace(stage.constants[i].key, stage.constants[i].value);
+ // Validate if any overridable constant is left uninitialized
+ if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
+ std::string uninitializedConstantsArray;
+ bool isFirst = true;
+ for (std::string identifier : metadata.uninitializedOverridableConstants) {
+ if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
+ continue;
}
- // Compute the max() of all minBufferSizes across all stages.
- RequiredBufferSizes stageMinBufferSizes =
- ComputeRequiredBufferSizesForLayout(metadata, layout);
-
- if (isFirstStage) {
- mMinBufferSizes = std::move(stageMinBufferSizes);
+ if (isFirst) {
+ isFirst = false;
} else {
- for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
- ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
-
- for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
- mMinBufferSizes[group][i] =
- std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
- }
- }
+ uninitializedConstantsArray.append(", ");
}
+ uninitializedConstantsArray.append(identifier);
}
- }
-
- PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- }
-
- PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- PipelineBase::~PipelineBase() = default;
- PipelineLayoutBase* PipelineBase::GetLayout() {
- ASSERT(!IsError());
- return mLayout.Get();
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "There are uninitialized pipeline overridable constants in shader module %s, their "
+ "identifiers:[%s]",
+ module, uninitializedConstantsArray);
}
- const PipelineLayoutBase* PipelineBase::GetLayout() const {
- ASSERT(!IsError());
- return mLayout.Get();
- }
+ return {};
+}
+
+// PipelineBase
+
+PipelineBase::PipelineBase(DeviceBase* device,
+ PipelineLayoutBase* layout,
+ const char* label,
+ std::vector<StageAndDescriptor> stages)
+ : ApiObjectBase(device, label), mLayout(layout) {
+ ASSERT(!stages.empty());
+
+ for (const StageAndDescriptor& stage : stages) {
+ // Extract argument for this stage.
+ SingleShaderStage shaderStage = stage.shaderStage;
+ ShaderModuleBase* module = stage.module;
+ const char* entryPointName = stage.entryPoint.c_str();
+
+ const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
+ ASSERT(metadata.stage == shaderStage);
+
+ // Record them internally.
+ bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
+ mStageMask |= StageBit(shaderStage);
+ mStages[shaderStage] = {module, entryPointName, &metadata, {}};
+ auto& constants = mStages[shaderStage].constants;
+ for (uint32_t i = 0; i < stage.constantCount; i++) {
+ constants.emplace(stage.constants[i].key, stage.constants[i].value);
+ }
- const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
- ASSERT(!IsError());
- return mMinBufferSizes;
- }
+ // Compute the max() of all minBufferSizes across all stages.
+ RequiredBufferSizes stageMinBufferSizes =
+ ComputeRequiredBufferSizesForLayout(metadata, layout);
- const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
- ASSERT(!IsError());
- return mStages[stage];
- }
+ if (isFirstStage) {
+ mMinBufferSizes = std::move(stageMinBufferSizes);
+ } else {
+ for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
+ ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
- const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
- return mStages;
+ for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
+ mMinBufferSizes[group][i] =
+ std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
+ }
+ }
+ }
}
-
- wgpu::ShaderStage PipelineBase::GetStageMask() const {
- return mStageMask;
+}
+
+PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {}
+
+PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
+
+PipelineBase::~PipelineBase() = default;
+
+PipelineLayoutBase* PipelineBase::GetLayout() {
+ ASSERT(!IsError());
+ return mLayout.Get();
+}
+
+const PipelineLayoutBase* PipelineBase::GetLayout() const {
+ ASSERT(!IsError());
+ return mLayout.Get();
+}
+
+const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
+ ASSERT(!IsError());
+ return mMinBufferSizes;
+}
+
+const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
+ ASSERT(!IsError());
+ return mStages[stage];
+}
+
+const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
+ return mStages;
+}
+
+wgpu::ShaderStage PipelineBase::GetStageMask() const {
+ return mStageMask;
+}
+
+MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
+ DAWN_INVALID_IF(groupIndex >= kMaxBindGroups,
+ "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
+ groupIndex, kMaxBindGroups);
+ return {};
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
+ DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
+
+ BindGroupIndex groupIndex(groupIndexIn);
+ if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
+ return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
+ } else {
+ return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
}
-
- MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
- DAWN_INVALID_IF(
- groupIndex >= kMaxBindGroups,
- "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
- groupIndex, kMaxBindGroups);
- return {};
+}
+
+BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
+ Ref<BindGroupLayoutBase> result;
+ if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
+ "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
+ this)) {
+ return BindGroupLayoutBase::MakeError(GetDevice());
}
+ return result.Detach();
+}
- ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
- uint32_t groupIndexIn) {
- DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
+size_t PipelineBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mLayout->GetContentHash());
- BindGroupIndex groupIndex(groupIndexIn);
- if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
- return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
- } else {
- return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
- }
+ recorder.Record(mStageMask);
+ for (SingleShaderStage stage : IterateStages(mStageMask)) {
+ recorder.Record(mStages[stage].module->GetContentHash());
+ recorder.Record(mStages[stage].entryPoint);
}
- BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
- Ref<BindGroupLayoutBase> result;
- if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
- "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
- this)) {
- return BindGroupLayoutBase::MakeError(GetDevice());
- }
- return result.Detach();
- }
-
- size_t PipelineBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mLayout->GetContentHash());
+ return recorder.GetContentHash();
+}
- recorder.Record(mStageMask);
- for (SingleShaderStage stage : IterateStages(mStageMask)) {
- recorder.Record(mStages[stage].module->GetContentHash());
- recorder.Record(mStages[stage].entryPoint);
- }
-
- return recorder.GetContentHash();
+// static
+bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
+ // The layout is deduplicated so it can be compared by pointer.
+ if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+ return false;
}
- // static
- bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
- // The layout is deduplicated so it can be compared by pointer.
- if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+ for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
+ // The module is deduplicated so it can be compared by pointer.
+ if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
+ a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
return false;
}
-
- for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
- // The module is deduplicated so it can be compared by pointer.
- if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
- a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
- return false;
- }
- }
-
- return true;
}
+ return true;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Pipeline.h b/chromium/third_party/dawn/src/dawn/native/Pipeline.h
index 4fffac53763..2d5b6dfc65e 100644
--- a/chromium/third_party/dawn/src/dawn/native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn/native/Pipeline.h
@@ -15,6 +15,12 @@
#ifndef SRC_DAWN_NATIVE_PIPELINE_H_
#define SRC_DAWN_NATIVE_PIPELINE_H_
+#include <array>
+#include <bitset>
+#include <map>
+#include <string>
+#include <vector>
+
#include "dawn/native/CachedObject.h"
#include "dawn/native/Forward.h"
#include "dawn/native/ObjectBase.h"
@@ -24,74 +30,71 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- MaybeError ValidateProgrammableStage(DeviceBase* device,
- const ShaderModuleBase* module,
- const std::string& entryPoint,
- uint32_t constantCount,
- const ConstantEntry* constants,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage);
+MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage);
- // Use map to make sure constant keys are sorted for creating shader cache keys
- using PipelineConstantEntries = std::map<std::string, double>;
+// Use map to make sure constant keys are sorted for creating shader cache keys
+using PipelineConstantEntries = std::map<std::string, double>;
- struct ProgrammableStage {
- Ref<ShaderModuleBase> module;
- std::string entryPoint;
+struct ProgrammableStage {
+ Ref<ShaderModuleBase> module;
+ std::string entryPoint;
- // The metadata lives as long as module, that's ref-ed in the same structure.
- const EntryPointMetadata* metadata = nullptr;
+ // The metadata lives as long as module, that's ref-ed in the same structure.
+ const EntryPointMetadata* metadata = nullptr;
- PipelineConstantEntries constants;
- };
+ PipelineConstantEntries constants;
+};
- class PipelineBase : public ApiObjectBase, public CachedObject {
- public:
- ~PipelineBase() override;
+class PipelineBase : public ApiObjectBase, public CachedObject {
+ public:
+ ~PipelineBase() override;
- PipelineLayoutBase* GetLayout();
- const PipelineLayoutBase* GetLayout() const;
- const RequiredBufferSizes& GetMinBufferSizes() const;
- const ProgrammableStage& GetStage(SingleShaderStage stage) const;
- const PerStage<ProgrammableStage>& GetAllStages() const;
- wgpu::ShaderStage GetStageMask() const;
+ PipelineLayoutBase* GetLayout();
+ const PipelineLayoutBase* GetLayout() const;
+ const RequiredBufferSizes& GetMinBufferSizes() const;
+ const ProgrammableStage& GetStage(SingleShaderStage stage) const;
+ const PerStage<ProgrammableStage>& GetAllStages() const;
+ wgpu::ShaderStage GetStageMask() const;
- ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
+ ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
- // Helper functions for std::unordered_map-based pipeline caches.
- size_t ComputeContentHash() override;
- static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
+ // Helper functions for std::unordered_map-based pipeline caches.
+ size_t ComputeContentHash() override;
+ static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
- // Implementation of the API entrypoint. Do not use in a reentrant manner.
- BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+ // Implementation of the API entrypoint. Do not use in a reentrant manner.
+ BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
- // Initialize() should only be called once by the frontend.
- virtual MaybeError Initialize() = 0;
+ // Initialize() should only be called once by the frontend.
+ virtual MaybeError Initialize() = 0;
- protected:
- PipelineBase(DeviceBase* device,
- PipelineLayoutBase* layout,
- const char* label,
- std::vector<StageAndDescriptor> stages);
- PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ protected:
+ PipelineBase(DeviceBase* device,
+ PipelineLayoutBase* layout,
+ const char* label,
+ std::vector<StageAndDescriptor> stages);
+ PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- // Constructor used only for mocking and testing.
- explicit PipelineBase(DeviceBase* device);
+ // Constructor used only for mocking and testing.
+ explicit PipelineBase(DeviceBase* device);
- private:
- MaybeError ValidateGetBindGroupLayout(uint32_t group);
+ private:
+ MaybeError ValidateGetBindGroupLayout(uint32_t group);
- wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
- PerStage<ProgrammableStage> mStages;
+ wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
+ PerStage<ProgrammableStage> mStages;
- Ref<PipelineLayoutBase> mLayout;
- RequiredBufferSizes mMinBufferSizes;
- };
+ Ref<PipelineLayoutBase> mLayout;
+ RequiredBufferSizes mMinBufferSizes;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineCache.cpp b/chromium/third_party/dawn/src/dawn/native/PipelineCache.cpp
new file mode 100644
index 00000000000..762e08526de
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineCache.cpp
@@ -0,0 +1,58 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PipelineCache.h"
+
+namespace dawn::native {
+
+PipelineCacheBase::PipelineCacheBase(BlobCache* cache, const CacheKey& key)
+ : mCache(cache), mKey(key) {}
+
+Blob PipelineCacheBase::Initialize() {
+ ASSERT(!mInitialized);
+ Blob blob = mCache != nullptr ? mCache->Load(mKey) : Blob();
+ mCacheHit = !blob.Empty();
+ mInitialized = true;
+ return blob;
+}
+
+bool PipelineCacheBase::CacheHit() const {
+ ASSERT(mInitialized);
+ return mCacheHit;
+}
+
+MaybeError PipelineCacheBase::Flush() {
+ if (mCache == nullptr) {
+ return {};
+ }
+ // Try to write the data out to the persistent cache.
+ Blob blob;
+ DAWN_TRY(SerializeToBlobImpl(&blob));
+ if (blob.Size() > 0) {
+ // Using a simple heuristic to decide whether to write out the blob right now. May need
+ // smarter tracking when we are dealing with monolithic caches.
+ mCache->Store(mKey, blob);
+ }
+ return {};
+}
+
+MaybeError PipelineCacheBase::FlushIfNeeded() {
+ ASSERT(mInitialized);
+ if (!CacheHit()) {
+ return Flush();
+ }
+ return {};
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineCache.h b/chromium/third_party/dawn/src/dawn/native/PipelineCache.h
new file mode 100644
index 00000000000..5b44a60138a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineCache.h
@@ -0,0 +1,64 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_PIPELINECACHE_H_
+#define SRC_DAWN_NATIVE_PIPELINECACHE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/BlobCache.h"
+#include "dawn/native/CacheKey.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+
+// Abstraction layer for backend dependent pipeline caching.
+class PipelineCacheBase : public RefCounted {
+ public:
+ // Returns whether or not we got a cache hit when initializing.
+ bool CacheHit() const;
+
+ // Serializes and writes the current contents of the backend cache object into the backing
+ // blob cache, potentially overwriting what is already there. Useful when we are working
+ // with more monolithic-like caches where we expect overwriting sometimes.
+ MaybeError Flush();
+
+ // Serializes and writes the current contents of the backend cache object into the backing
+ // blob cache iff the initial read from the backend cache did not result in a hit.
+ MaybeError FlushIfNeeded();
+
+ protected:
+ PipelineCacheBase(BlobCache* cache, const CacheKey& key);
+
+ // Initializes and returns the cached blob given the cache and keys. Used by backend
+ // implementations to get the cache and set the cache hit state. Should only be called once.
+ Blob Initialize();
+
+ private:
+ // Backend implementation of serialization of the cache into a blob.
+ // Note: given that no local cached blob should be destructed and copy elision has strict
+ // requirement cached blob is passed in as a pointer to be assigned.
+ virtual MaybeError SerializeToBlobImpl(Blob* blob) = 0;
+
+ // The blob cache is owned by the Adapter and pipeline caches are owned/created by devices
+ // or adapters. Since the device owns a reference to the Instance which owns the Adapter,
+ // the blob cache is guaranteed to be valid throughout the lifetime of the object.
+ BlobCache* mCache;
+ CacheKey mKey;
+ bool mInitialized = false;
+ bool mCacheHit = false;
+};
+
+} // namespace dawn::native
+
+#endif // SRC_DAWN_NATIVE_PIPELINECACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp
index 56ab1004221..8365f6db3aa 100644
--- a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/PipelineLayout.h"
+#include <algorithm>
+#include <map>
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/ityp_stack_vec.h"
@@ -25,385 +29,378 @@
namespace dawn::native {
- MaybeError ValidatePipelineLayoutDescriptor(
- DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
- if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
- return DAWN_VALIDATION_ERROR("too many bind group layouts");
- }
-
- BindingCounts bindingCounts = {};
- for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
- DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
- if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
- pipelineCompatibilityToken) {
- return DAWN_VALIDATION_ERROR(
- "cannot create a pipeline layout using a bind group layout that was created as "
- "part of a pipeline's default layout");
- }
- AccumulateBindingCounts(&bindingCounts,
- descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
- }
+MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
- DAWN_TRY(ValidateBindingCounts(bindingCounts));
- return {};
+ if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("too many bind group layouts");
}
- // PipelineLayoutBase
-
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label) {
- ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
- for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
- ++group) {
- mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
- mMask.set(group);
+ BindingCounts bindingCounts = {};
+ for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
+ DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
+ if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
+ pipelineCompatibilityToken) {
+ return DAWN_VALIDATION_ERROR(
+ "cannot create a pipeline layout using a bind group layout that was created as "
+ "part of a pipeline's default layout");
}
+ AccumulateBindingCounts(&bindingCounts,
+ descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
}
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
+ DAWN_TRY(ValidateBindingCounts(bindingCounts));
+ return {};
+}
+
+// PipelineLayoutBase
+
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label) {
+ ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
+ for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
+ ++group) {
+ mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
+ mMask.set(group);
}
+}
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+}
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
- PipelineLayoutBase::~PipelineLayoutBase() = default;
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
- void PipelineLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncachePipelineLayout(this);
- }
- }
+PipelineLayoutBase::~PipelineLayoutBase() = default;
- // static
- PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
- return new PipelineLayoutBase(device, ObjectBase::kError);
+void PipelineLayoutBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncachePipelineLayout(this);
}
+}
+
+// static
+PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
+ return new PipelineLayoutBase(device, ObjectBase::kError);
+}
+
+// static
+ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
+ DeviceBase* device,
+ std::vector<StageAndDescriptor> stages) {
+ using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
+
+ // Merges two entries at the same location, if they are allowed to be merged.
+ auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
+ const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
+ // Visibility is excluded because we take the OR across stages.
+ bool compatible =
+ modifiedEntry->binding == mergedEntry.binding &&
+ modifiedEntry->buffer.type == mergedEntry.buffer.type &&
+ modifiedEntry->sampler.type == mergedEntry.sampler.type &&
+ // Compatibility between these sample types is checked below.
+ (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
+ (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
+ modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
+
+ // Minimum buffer binding size excluded because we take the maximum seen across stages.
+ if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
+ compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
+ mergedEntry.buffer.hasDynamicOffset;
+ }
- // static
- ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
- DeviceBase* device,
- std::vector<StageAndDescriptor> stages) {
- using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
-
- // Merges two entries at the same location, if they are allowed to be merged.
- auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
- const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
- // Visibility is excluded because we take the OR across stages.
- bool compatible =
- modifiedEntry->binding == mergedEntry.binding &&
- modifiedEntry->buffer.type == mergedEntry.buffer.type &&
- modifiedEntry->sampler.type == mergedEntry.sampler.type &&
- // Compatibility between these sample types is checked below.
- (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
- (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
- modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
-
- // Minimum buffer binding size excluded because we take the maximum seen across stages.
- if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
- compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
- mergedEntry.buffer.hasDynamicOffset;
- }
-
- if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
- // Sample types are compatible if they are exactly equal,
- // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
- // Note that the |mergedEntry| never has type Float. Texture bindings all start
- // as UnfilterableFloat and are promoted to Float if they are statically used with
- // a sampler.
- ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
- bool compatibleSampleTypes =
- modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
- (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
- mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
- compatible =
- compatible && compatibleSampleTypes &&
- modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
- modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
- }
+ if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ // Sample types are compatible if they are exactly equal,
+ // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
+ // Note that the |mergedEntry| never has type Float. Texture bindings all start
+ // as UnfilterableFloat and are promoted to Float if they are statically used with
+ // a sampler.
+ ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
+ bool compatibleSampleTypes =
+ modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
+ (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
+ mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
+ compatible =
+ compatible && compatibleSampleTypes &&
+ modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
+ modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
+ }
- if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- compatible =
- compatible &&
- modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
- modifiedEntry->storageTexture.viewDimension ==
- mergedEntry.storageTexture.viewDimension;
- }
+ if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ compatible =
+ compatible &&
+ modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
+ modifiedEntry->storageTexture.viewDimension ==
+ mergedEntry.storageTexture.viewDimension;
+ }
- // Check if any properties are incompatible with existing entry
- // If compatible, we will merge some properties
- if (!compatible) {
- return DAWN_VALIDATION_ERROR(
- "Duplicate binding in default pipeline layout initialization "
- "not compatible with previous declaration");
- }
+ // Check if any properties are incompatible with existing entry
+ // If compatible, we will merge some properties
+ if (!compatible) {
+ return DAWN_VALIDATION_ERROR(
+ "Duplicate binding in default pipeline layout initialization "
+ "not compatible with previous declaration");
+ }
- // Use the max |minBufferBindingSize| we find.
- modifiedEntry->buffer.minBindingSize =
- std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
-
- // Use the OR of all the stages at which we find this binding.
- modifiedEntry->visibility |= mergedEntry.visibility;
-
- return {};
- };
-
- // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
- auto ConvertMetadataToEntry =
- [](const ShaderBindingInfo& shaderBinding,
- const ExternalTextureBindingLayout* externalTextureBindingEntry)
- -> BindGroupLayoutEntry {
- BindGroupLayoutEntry entry = {};
- switch (shaderBinding.bindingType) {
- case BindingInfoType::Buffer:
- entry.buffer.type = shaderBinding.buffer.type;
- entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
- entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
- break;
- case BindingInfoType::Sampler:
- if (shaderBinding.sampler.isComparison) {
- entry.sampler.type = wgpu::SamplerBindingType::Comparison;
- } else {
- entry.sampler.type = wgpu::SamplerBindingType::Filtering;
- }
- break;
- case BindingInfoType::Texture:
- switch (shaderBinding.texture.compatibleSampleTypes) {
- case SampleTypeBit::Depth:
- entry.texture.sampleType = wgpu::TextureSampleType::Depth;
- break;
- case SampleTypeBit::Sint:
- entry.texture.sampleType = wgpu::TextureSampleType::Sint;
- break;
- case SampleTypeBit::Uint:
- entry.texture.sampleType = wgpu::TextureSampleType::Uint;
- break;
- case SampleTypeBit::Float:
- case SampleTypeBit::UnfilterableFloat:
- case SampleTypeBit::None:
- UNREACHABLE();
- break;
- default:
- if (shaderBinding.texture.compatibleSampleTypes ==
- (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
- // Default to UnfilterableFloat. It will be promoted to Float if it
- // is used with a sampler.
- entry.texture.sampleType =
- wgpu::TextureSampleType::UnfilterableFloat;
- } else {
- UNREACHABLE();
- }
- }
- entry.texture.viewDimension = shaderBinding.texture.viewDimension;
- entry.texture.multisampled = shaderBinding.texture.multisampled;
- break;
- case BindingInfoType::StorageTexture:
- entry.storageTexture.access = shaderBinding.storageTexture.access;
- entry.storageTexture.format = shaderBinding.storageTexture.format;
- entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
- break;
- case BindingInfoType::ExternalTexture:
- entry.nextInChain = externalTextureBindingEntry;
- break;
- }
- return entry;
- };
-
- PipelineCompatibilityToken pipelineCompatibilityToken =
- device->GetNextPipelineCompatibilityToken();
-
- // Creates the BGL from the entries for a stage, checking it is valid.
- auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- -> ResultOrError<Ref<BindGroupLayoutBase>> {
- std::vector<BindGroupLayoutEntry> entryVec;
- entryVec.reserve(entries.size());
- for (auto& [_, entry] : entries) {
- entryVec.push_back(entry);
- }
+ // Use the max |minBufferBindingSize| we find.
+ modifiedEntry->buffer.minBindingSize =
+ std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
- BindGroupLayoutDescriptor desc = {};
- desc.entries = entryVec.data();
- desc.entryCount = entryVec.size();
+ // Use the OR of all the stages at which we find this binding.
+ modifiedEntry->visibility |= mergedEntry.visibility;
- if (device->IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
- &desc);
- }
- return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
- };
-
- ASSERT(!stages.empty());
-
- // Data which BindGroupLayoutDescriptor will point to for creation
- ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
- entryData = {};
-
- // External texture binding layouts are chained structs that are set as a pointer within
- // the bind group layout entry. We declare an entry here so that it can be used when needed
- // in each BindGroupLayoutEntry and so it can stay alive until the call to
- // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
- // there's no issue with using the same struct multiple times.
- ExternalTextureBindingLayout externalTextureBindingLayout;
-
- // Loops over all the reflected BindGroupLayoutEntries from shaders.
- for (const StageAndDescriptor& stage : stages) {
- const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
-
- for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
- for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
- // Create the BindGroupLayoutEntry
- BindGroupLayoutEntry entry =
- ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
- entry.binding = static_cast<uint32_t>(bindingNumber);
- entry.visibility = StageBit(stage.shaderStage);
-
- // Add it to our map of all entries, if there is an existing entry, then we
- // need to merge, if we can.
- const auto& [existingEntry, inserted] =
- entryData[group].insert({bindingNumber, entry});
- if (!inserted) {
- DAWN_TRY(MergeEntries(&existingEntry->second, entry));
- }
+ return {};
+ };
+
+ // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
+ auto ConvertMetadataToEntry =
+ [](const ShaderBindingInfo& shaderBinding,
+ const ExternalTextureBindingLayout* externalTextureBindingEntry)
+ -> BindGroupLayoutEntry {
+ BindGroupLayoutEntry entry = {};
+ switch (shaderBinding.bindingType) {
+ case BindingInfoType::Buffer:
+ entry.buffer.type = shaderBinding.buffer.type;
+ entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
+ entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
+ break;
+ case BindingInfoType::Sampler:
+ if (shaderBinding.sampler.isComparison) {
+ entry.sampler.type = wgpu::SamplerBindingType::Comparison;
+ } else {
+ entry.sampler.type = wgpu::SamplerBindingType::Filtering;
}
- }
-
- // Promote any Unfilterable textures used with a sampler to Filtering.
- for (const EntryPointMetadata::SamplerTexturePair& pair :
- metadata.samplerTexturePairs) {
- BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
- if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
- entry->texture.sampleType = wgpu::TextureSampleType::Float;
+ break;
+ case BindingInfoType::Texture:
+ switch (shaderBinding.texture.compatibleSampleTypes) {
+ case SampleTypeBit::Depth:
+ entry.texture.sampleType = wgpu::TextureSampleType::Depth;
+ break;
+ case SampleTypeBit::Sint:
+ entry.texture.sampleType = wgpu::TextureSampleType::Sint;
+ break;
+ case SampleTypeBit::Uint:
+ entry.texture.sampleType = wgpu::TextureSampleType::Uint;
+ break;
+ case SampleTypeBit::Float:
+ case SampleTypeBit::UnfilterableFloat:
+ case SampleTypeBit::None:
+ UNREACHABLE();
+ break;
+ default:
+ if (shaderBinding.texture.compatibleSampleTypes ==
+ (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
+ // Default to UnfilterableFloat. It will be promoted to Float if it
+ // is used with a sampler.
+ entry.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
+ } else {
+ UNREACHABLE();
+ }
}
- }
+ entry.texture.viewDimension = shaderBinding.texture.viewDimension;
+ entry.texture.multisampled = shaderBinding.texture.multisampled;
+ break;
+ case BindingInfoType::StorageTexture:
+ entry.storageTexture.access = shaderBinding.storageTexture.access;
+ entry.storageTexture.format = shaderBinding.storageTexture.format;
+ entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
+ break;
+ case BindingInfoType::ExternalTexture:
+ entry.nextInChain = externalTextureBindingEntry;
+ break;
}
+ return entry;
+ };
+
+ PipelineCompatibilityToken pipelineCompatibilityToken =
+ device->GetNextPipelineCompatibilityToken();
+
+ // Creates the BGL from the entries for a stage, checking it is valid.
+ auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ -> ResultOrError<Ref<BindGroupLayoutBase>> {
+ std::vector<BindGroupLayoutEntry> entryVec;
+ entryVec.reserve(entries.size());
+ for (auto& [_, entry] : entries) {
+ entryVec.push_back(entry);
+ }
+
+ BindGroupLayoutDescriptor desc = {};
+ desc.entries = entryVec.data();
+ desc.entryCount = entryVec.size();
- // Create the bind group layouts. We need to keep track of the last non-empty BGL because
- // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
- // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
- // same.
- BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
- ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
- for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
- DAWN_TRY_ASSIGN(bindGroupLayouts[group],
- CreateBGL(device, entryData[group], pipelineCompatibilityToken));
- if (entryData[group].size() != 0) {
- pipelineBGLCount = group + BindGroupIndex(1);
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
+ &desc);
+ }
+ return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
+ };
+
+ ASSERT(!stages.empty());
+
+ // Data which BindGroupLayoutDescriptor will point to for creation
+ ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
+ entryData = {};
+
+ // External texture binding layouts are chained structs that are set as a pointer within
+ // the bind group layout entry. We declare an entry here so that it can be used when needed
+ // in each BindGroupLayoutEntry and so it can stay alive until the call to
+ // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
+ // there's no issue with using the same struct multiple times.
+ ExternalTextureBindingLayout externalTextureBindingLayout;
+
+ // Loops over all the reflected BindGroupLayoutEntries from shaders.
+ for (const StageAndDescriptor& stage : stages) {
+ const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+
+ for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
+ for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
+ // Create the BindGroupLayoutEntry
+ BindGroupLayoutEntry entry =
+ ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
+ entry.binding = static_cast<uint32_t>(bindingNumber);
+ entry.visibility = StageBit(stage.shaderStage);
+
+ // Add it to our map of all entries, if there is an existing entry, then we
+ // need to merge, if we can.
+ const auto& [existingEntry, inserted] =
+ entryData[group].insert({bindingNumber, entry});
+ if (!inserted) {
+ DAWN_TRY(MergeEntries(&existingEntry->second, entry));
+ }
}
}
- // Create the deduced pipeline layout, validating if it is valid.
- ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
- for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
- bgls[group] = bindGroupLayouts[group].Get();
+ // Promote any Unfilterable textures used with a sampler to Filtering.
+ for (const EntryPointMetadata::SamplerTexturePair& pair : metadata.samplerTexturePairs) {
+ BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
+ if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
+ entry->texture.sampleType = wgpu::TextureSampleType::Float;
+ }
}
+ }
- PipelineLayoutDescriptor desc = {};
- desc.bindGroupLayouts = bgls.data();
- desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
-
- DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
-
- Ref<PipelineLayoutBase> result;
- DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
- ASSERT(!result->IsError());
-
- // Sanity check in debug that the pipeline layout is compatible with the current
- // pipeline.
- for (const StageAndDescriptor& stage : stages) {
- const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
- ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
- .IsSuccess());
+ // Create the bind group layouts. We need to keep track of the last non-empty BGL because
+ // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
+ // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
+ // same.
+ BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
+ ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
+ for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
+ DAWN_TRY_ASSIGN(bindGroupLayouts[group],
+ CreateBGL(device, entryData[group], pipelineCompatibilityToken));
+ if (entryData[group].size() != 0) {
+ pipelineBGLCount = group + BindGroupIndex(1);
}
-
- return std::move(result);
}
- ObjectType PipelineLayoutBase::GetType() const {
- return ObjectType::PipelineLayout;
+ // Create the deduced pipeline layout, validating if it is valid.
+ ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+ for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
+ bgls[group] = bindGroupLayouts[group].Get();
}
- const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
- ASSERT(!IsError());
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(mMask[group]);
- const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
- ASSERT(bgl != nullptr);
- return bgl;
- }
+ PipelineLayoutDescriptor desc = {};
+ desc.bindGroupLayouts = bgls.data();
+ desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
- BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
- ASSERT(!IsError());
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(mMask[group]);
- BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
- ASSERT(bgl != nullptr);
- return bgl;
- }
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
- const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
- ASSERT(!IsError());
- return mMask;
- }
+ Ref<PipelineLayoutBase> result;
+ DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
+ ASSERT(!result->IsError());
- BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
- const PipelineLayoutBase* other) const {
- ASSERT(!IsError());
- return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
+ // Check in debug that the pipeline layout is compatible with the current pipeline.
+ for (const StageAndDescriptor& stage : stages) {
+ const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+ ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()).IsSuccess());
}
- BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
- ASSERT(!IsError());
-
- for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
- if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
- return i;
- }
+ return std::move(result);
+}
+
+ObjectType PipelineLayoutBase::GetType() const {
+ return ObjectType::PipelineLayout;
+}
+
+const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
+ ASSERT(!IsError());
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(mMask[group]);
+ const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
+}
+
+BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
+ ASSERT(!IsError());
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(mMask[group]);
+ BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
+}
+
+const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
+ ASSERT(!IsError());
+ return mMask;
+}
+
+BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(const PipelineLayoutBase* other) const {
+ ASSERT(!IsError());
+ return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
+}
+
+BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
+ ASSERT(!IsError());
+
+ for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+ if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
+ return i;
}
- return kMaxBindGroupsTyped;
}
+ return kMaxBindGroupsTyped;
+}
- size_t PipelineLayoutBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mMask);
+size_t PipelineLayoutBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mMask);
- for (BindGroupIndex group : IterateBitSet(mMask)) {
- recorder.Record(GetBindGroupLayout(group)->GetContentHash());
- }
+ for (BindGroupIndex group : IterateBitSet(mMask)) {
+ recorder.Record(GetBindGroupLayout(group)->GetContentHash());
+ }
- return recorder.GetContentHash();
+ return recorder.GetContentHash();
+}
+
+bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
+ const PipelineLayoutBase* b) const {
+ if (a->mMask != b->mMask) {
+ return false;
}
- bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
- const PipelineLayoutBase* b) const {
- if (a->mMask != b->mMask) {
+ for (BindGroupIndex group : IterateBitSet(a->mMask)) {
+ if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
return false;
}
-
- for (BindGroupIndex group : IterateBitSet(a->mMask)) {
- if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
- return false;
- }
- }
-
- return true;
}
+ return true;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h
index 634c082aa18..c2536c220c4 100644
--- a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h
@@ -15,6 +15,11 @@
#ifndef SRC_DAWN_NATIVE_PIPELINELAYOUT_H_
#define SRC_DAWN_NATIVE_PIPELINELAYOUT_H_
+#include <array>
+#include <bitset>
+#include <string>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
@@ -26,71 +31,67 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- MaybeError ValidatePipelineLayoutDescriptor(
- DeviceBase*,
- const PipelineLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
-
- using BindGroupLayoutArray =
- ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
- using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
-
- struct StageAndDescriptor {
- SingleShaderStage shaderStage;
- ShaderModuleBase* module;
- std::string entryPoint;
- uint32_t constantCount = 0u;
- ConstantEntry const* constants = nullptr;
+MaybeError ValidatePipelineLayoutDescriptor(
+ DeviceBase*,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+
+using BindGroupLayoutArray = ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
+
+struct StageAndDescriptor {
+ SingleShaderStage shaderStage;
+ ShaderModuleBase* module;
+ std::string entryPoint;
+ uint32_t constantCount = 0u;
+ ConstantEntry const* constants = nullptr;
+};
+
+class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
+ public:
+ PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
+ ~PipelineLayoutBase() override;
+
+ static PipelineLayoutBase* MakeError(DeviceBase* device);
+ static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
+ DeviceBase* device,
+ std::vector<StageAndDescriptor> stages);
+
+ ObjectType GetType() const override;
+
+ const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
+ BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
+ const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
+
+ // Utility functions to compute inherited bind groups.
+ // Returns the inherited bind groups as a mask.
+ BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
+
+ // Returns the index of the first incompatible bind group in the range
+ // [0, kMaxBindGroups]
+ BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
+
+ // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
};
- class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
- public:
- PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
- ~PipelineLayoutBase() override;
-
- static PipelineLayoutBase* MakeError(DeviceBase* device);
- static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
- DeviceBase* device,
- std::vector<StageAndDescriptor> stages);
-
- ObjectType GetType() const override;
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit PipelineLayoutBase(DeviceBase* device);
+ PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DestroyImpl() override;
- const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
- BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
- const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
-
- // Utility functions to compute inherited bind groups.
- // Returns the inherited bind groups as a mask.
- BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
-
- // Returns the index of the first incompatible bind group in the range
- // [0, kMaxBindGroups]
- BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
-
- // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
- };
-
- protected:
- // Constructor used only for mocking and testing.
- explicit PipelineLayoutBase(DeviceBase* device);
- PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DestroyImpl() override;
-
- BindGroupLayoutArray mBindGroupLayouts;
- BindGroupLayoutMask mMask;
- };
+ BindGroupLayoutArray mBindGroupLayouts;
+ BindGroupLayoutMask mMask;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp
index 0a01a99ab53..96b10c6981e 100644
--- a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp
@@ -13,48 +13,51 @@
// limitations under the License.
#include "dawn/native/PooledResourceMemoryAllocator.h"
+
+#include <utility>
+
#include "dawn/native/Device.h"
namespace dawn::native {
- PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
- ResourceHeapAllocator* heapAllocator)
- : mHeapAllocator(heapAllocator) {
- }
+PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator)
+ : mHeapAllocator(heapAllocator) {}
- void PooledResourceMemoryAllocator::DestroyPool() {
- for (auto& resourceHeap : mPool) {
- ASSERT(resourceHeap != nullptr);
- mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
- }
+PooledResourceMemoryAllocator::~PooledResourceMemoryAllocator() = default;
- mPool.clear();
+void PooledResourceMemoryAllocator::DestroyPool() {
+ for (auto& resourceHeap : mPool) {
+ ASSERT(resourceHeap != nullptr);
+ mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
}
- ResultOrError<std::unique_ptr<ResourceHeapBase>>
- PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
- // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
- // pooling is disabled in-frame when the memory is still pending. For high in-frame
- // memory users, FIFO might be preferable when memory consumption is a higher priority.
- std::unique_ptr<ResourceHeapBase> memory;
- if (!mPool.empty()) {
- memory = std::move(mPool.front());
- mPool.pop_front();
- }
-
- if (memory == nullptr) {
- DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
- }
-
- return std::move(memory);
+ mPool.clear();
+}
+
+ResultOrError<std::unique_ptr<ResourceHeapBase>>
+PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
+ // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
+ // pooling is disabled in-frame when the memory is still pending. For high in-frame
+ // memory users, FIFO might be preferable when memory consumption is a higher priority.
+ std::unique_ptr<ResourceHeapBase> memory;
+ if (!mPool.empty()) {
+ memory = std::move(mPool.front());
+ mPool.pop_front();
}
- void PooledResourceMemoryAllocator::DeallocateResourceHeap(
- std::unique_ptr<ResourceHeapBase> allocation) {
- mPool.push_front(std::move(allocation));
+ if (memory == nullptr) {
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
}
- uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
- return mPool.size();
- }
+ return std::move(memory);
+}
+
+void PooledResourceMemoryAllocator::DeallocateResourceHeap(
+ std::unique_ptr<ResourceHeapBase> allocation) {
+ mPool.push_front(std::move(allocation));
+}
+
+uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
+ return mPool.size();
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h
index 04d41ba433c..073ea265458 100644
--- a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h
@@ -15,38 +15,38 @@
#ifndef SRC_DAWN_NATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
#define SRC_DAWN_NATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+#include <deque>
+#include <memory>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/native/ResourceHeapAllocator.h"
-#include <deque>
-
namespace dawn::native {
- class DeviceBase;
+class DeviceBase;
- // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
- // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
- // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
- // the pool and made AVAILABLE.
- class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
- public:
- explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
- ~PooledResourceMemoryAllocator() override = default;
+// |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
+// pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
+// The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
+// the pool and made AVAILABLE.
+class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
+ public:
+ explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
+ ~PooledResourceMemoryAllocator() override;
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override;
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
- void DestroyPool();
+ void DestroyPool();
- // For testing purposes.
- uint64_t GetPoolSizeForTesting() const;
+ // For testing purposes.
+ uint64_t GetPoolSizeForTesting() const;
- private:
- ResourceHeapAllocator* mHeapAllocator = nullptr;
+ private:
+ ResourceHeapAllocator* mHeapAllocator = nullptr;
- std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
- };
+ std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp
index 8bdc08b0c06..29a0f03c547 100644
--- a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/ProgrammableEncoder.h"
+#include <cstring>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/ityp_array.h"
#include "dawn/native/BindGroup.h"
@@ -24,180 +26,174 @@
#include "dawn/native/ObjectType_autogen.h"
#include "dawn/native/ValidationUtils_autogen.h"
-#include <cstring>
-
namespace dawn::native {
- ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext)
- : ApiObjectBase(device, label),
- mEncodingContext(encodingContext),
- mValidationEnabled(device->IsValidationEnabled()) {
- }
-
- ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ApiObjectBase(device, errorTag),
- mEncodingContext(encodingContext),
- mValidationEnabled(device->IsValidationEnabled()) {
- }
-
- bool ProgrammableEncoder::IsValidationEnabled() const {
- return mValidationEnabled;
- }
-
- MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
- DAWN_INVALID_IF(mDebugGroupStackSize != 0,
- "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
- mDebugGroupStackSize);
- return {};
- }
-
- void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- return {};
- },
- "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
- }
-
- void ProgrammableEncoder::APIPopDebugGroup() {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- mDebugGroupStackSize == 0,
- "PopDebugGroup called when no debug groups are currently pushed.");
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
- mEncodingContext->PopDebugGroupLabel();
-
- return {};
- },
- "encoding %s.PopDebugGroup().", this);
- }
-
- void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- mDebugGroupStackSize++;
- mEncodingContext->PushDebugGroupLabel(groupLabel);
-
- return {};
- },
- "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
- }
-
- MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) const {
- DAWN_TRY(GetDevice()->ValidateObject(group));
-
- DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
- "Bind group index (%u) exceeds the maximum (%u).",
- static_cast<uint32_t>(index), kMaxBindGroups);
-
- ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
- BindingIndex(dynamicOffsetCountIn));
-
- // Dynamic offsets count must match the number required by the layout perfectly.
- const BindGroupLayoutBase* layout = group->GetLayout();
- DAWN_INVALID_IF(
- layout->GetDynamicBufferCount() != dynamicOffsets.size(),
- "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
- "in %s.",
- static_cast<uint32_t>(dynamicOffsets.size()),
- static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
-
- for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
-
- // BGL creation sorts bindings such that the dynamic buffer bindings are first.
- // ASSERT that this true.
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
- ASSERT(bindingInfo.buffer.hasDynamicOffset);
-
- uint64_t requiredAlignment;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- case kInternalStorageBufferBinding:
- requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
- "Dynamic Offset[%u] (%u) is not %u byte aligned.",
- static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
-
- BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
-
- // During BindGroup creation, validation ensures binding offset + binding size
- // <= buffer size.
- ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
- ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
-
- if ((dynamicOffsets[i] >
- bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
- DAWN_INVALID_IF(
- (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
- "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
- "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
- "even with a dynamic offset of 0. Did you forget to specify "
- "the binding's size?",
- static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
- bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
-
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Dynamic Offset[%u] (%u) is out of bounds of "
- "%s with a size of %u and a bound range of (offset: %u, size: %u).",
- static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
- bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext)
+ : ApiObjectBase(device, label),
+ mEncodingContext(encodingContext),
+ mValidationEnabled(device->IsValidationEnabled()) {}
+
+ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ApiObjectBase(device, errorTag),
+ mEncodingContext(encodingContext),
+ mValidationEnabled(device->IsValidationEnabled()) {}
+
+bool ProgrammableEncoder::IsValidationEnabled() const {
+ return mValidationEnabled;
+}
+
+MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
+ DAWN_INVALID_IF(mDebugGroupStackSize != 0,
+ "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
+ mDebugGroupStackSize);
+ return {};
+}
+
+void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ },
+ "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+}
+
+void ProgrammableEncoder::APIPopDebugGroup() {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(mDebugGroupStackSize == 0,
+ "PopDebugGroup called when no debug groups are currently pushed.");
}
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext->PopDebugGroupLabel();
+
+ return {};
+ },
+ "encoding %s.PopDebugGroup().", this);
+}
+
+void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ mDebugGroupStackSize++;
+ mEncodingContext->PushDebugGroupLabel(groupLabel);
+
+ return {};
+ },
+ "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+}
+
+MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const {
+ DAWN_TRY(GetDevice()->ValidateObject(group));
+
+ DAWN_INVALID_IF(index >= kMaxBindGroupsTyped, "Bind group index (%u) exceeds the maximum (%u).",
+ static_cast<uint32_t>(index), kMaxBindGroups);
+
+ ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
+ BindingIndex(dynamicOffsetCountIn));
+
+ // Dynamic offsets count must match the number required by the layout perfectly.
+ const BindGroupLayoutBase* layout = group->GetLayout();
+ DAWN_INVALID_IF(
+ layout->GetDynamicBufferCount() != dynamicOffsets.size(),
+ "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
+ "in %s.",
+ static_cast<uint32_t>(dynamicOffsets.size()),
+ static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
+
+ for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
+
+ // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+ // ASSERT that this true.
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+ ASSERT(bindingInfo.buffer.hasDynamicOffset);
+
+ uint64_t requiredAlignment;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ case kInternalStorageBufferBinding:
+ requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
}
- return {};
+ DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
+ "Dynamic Offset[%u] (%u) is not %u byte aligned.", static_cast<uint32_t>(i),
+ dynamicOffsets[i], requiredAlignment);
+
+ BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+
+ // During BindGroup creation, validation ensures binding offset + binding size
+ // <= buffer size.
+ ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+ ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+
+ if ((dynamicOffsets[i] >
+ bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
+ DAWN_INVALID_IF(
+ (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
+ "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
+ "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
+ "even with a dynamic offset of 0. Did you forget to specify "
+ "the binding's size?",
+ static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+ bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Dynamic Offset[%u] (%u) is out of bounds of "
+ "%s with a size of %u and a bound range of (offset: %u, size: %u).",
+ static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+ bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+ }
}
- void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) const {
- SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
- cmd->index = index;
- cmd->group = group;
- cmd->dynamicOffsetCount = dynamicOffsetCount;
- if (dynamicOffsetCount > 0) {
- uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
- memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
- }
+ return {};
+}
+
+void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const {
+ SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
+ cmd->index = index;
+ cmd->group = group;
+ cmd->dynamicOffsetCount = dynamicOffsetCount;
+ if (dynamicOffsetCount > 0) {
+ uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+ memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
}
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h
index 6a7918f6ea6..0aa53d1f244 100644
--- a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h
@@ -25,47 +25,43 @@
namespace dawn::native {
- class DeviceBase;
+class DeviceBase;
- // Base class for shared functionality between programmable encoders.
- class ProgrammableEncoder : public ApiObjectBase {
- public:
- ProgrammableEncoder(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext);
+// Base class for shared functionality between programmable encoders.
+class ProgrammableEncoder : public ApiObjectBase {
+ public:
+ ProgrammableEncoder(DeviceBase* device, const char* label, EncodingContext* encodingContext);
- void APIInsertDebugMarker(const char* groupLabel);
- void APIPopDebugGroup();
- void APIPushDebugGroup(const char* groupLabel);
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
- protected:
- bool IsValidationEnabled() const;
- MaybeError ValidateProgrammableEncoderEnd() const;
+ protected:
+ bool IsValidationEnabled() const;
+ MaybeError ValidateProgrammableEncoderEnd() const;
- // Compute and render passes do different things on SetBindGroup. These are helper functions
- // for the logic they have in common.
- MaybeError ValidateSetBindGroup(BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) const;
- void RecordSetBindGroup(CommandAllocator* allocator,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) const;
+ // Compute and render passes do different things on SetBindGroup. These are helper functions
+ // for the logic they have in common.
+ MaybeError ValidateSetBindGroup(BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const;
+ void RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const;
- // Construct an "error" programmable pass encoder.
- ProgrammableEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
+ // Construct an "error" programmable pass encoder.
+ ProgrammableEncoder(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
- EncodingContext* mEncodingContext = nullptr;
+ EncodingContext* mEncodingContext = nullptr;
- uint64_t mDebugGroupStackSize = 0;
+ uint64_t mDebugGroupStackSize = 0;
- private:
- const bool mValidationEnabled;
- };
+ private:
+ const bool mValidationEnabled;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp b/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp
index c6d7541ed62..e72dfed7a12 100644
--- a/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/QueryHelper.h"
+#include <algorithm>
+#include <cmath>
+
#include "dawn/native/BindGroup.h"
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/Buffer.h"
@@ -24,20 +27,18 @@
#include "dawn/native/InternalPipelineStore.h"
#include "dawn/native/utils/WGPUHelpers.h"
-#include <cmath>
-
namespace dawn::native {
- namespace {
+namespace {
- // Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
- static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
- static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
- static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
- static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
- static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
+// Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
+static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
+static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
+static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
+static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
+static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
- static const char sConvertTimestampsToNanoseconds[] = R"(
+static const char sConvertTimestampsToNanoseconds[] = R"(
struct Timestamp {
low : u32;
high : u32;
@@ -65,7 +66,7 @@ namespace dawn::native {
let sizeofTimestamp : u32 = 8u;
- @stage(compute) @workgroup_size(8, 1, 1)
+ @compute @workgroup_size(8, 1, 1)
fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
if (GlobalInvocationID.x >= params.count) { return; }
@@ -115,103 +116,100 @@ namespace dawn::native {
}
)";
- ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
+ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
- if (store->timestampComputePipeline == nullptr) {
- // Create compute shader module if not cached before.
- if (store->timestampCS == nullptr) {
- DAWN_TRY_ASSIGN(
- store->timestampCS,
- utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
- }
-
- // Create binding group layout
- Ref<BindGroupLayoutBase> bgl;
- DAWN_TRY_ASSIGN(
- bgl, utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {1, wgpu::ShaderStage::Compute,
- wgpu::BufferBindingType::ReadOnlyStorage},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
- },
- /* allowInternalBinding */ true));
-
- // Create pipeline layout
- Ref<PipelineLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
-
- // Create ComputePipeline.
- ComputePipelineDescriptor computePipelineDesc = {};
- // Generate the layout based on shader module.
- computePipelineDesc.layout = layout.Get();
- computePipelineDesc.compute.module = store->timestampCS.Get();
- computePipelineDesc.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->timestampComputePipeline,
- device->CreateComputePipeline(&computePipelineDesc));
- }
-
- return store->timestampComputePipeline.Get();
+ if (store->timestampComputePipeline == nullptr) {
+ // Create compute shader module if not cached before.
+ if (store->timestampCS == nullptr) {
+ DAWN_TRY_ASSIGN(store->timestampCS,
+ utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
}
- } // anonymous namespace
-
- TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
- : first(first), count(count), offset(offset) {
- // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
- //
- // m = round(p * 2^s)
- //
- // Then in the shader we compute:
- //
- // m / 2^s = round(p * 2^s) / 2*s ~= p
- //
- // The goal is to find the best shift to keep the precision of computations. The
- // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
- // so we need to keep the multiplier under 2^16. At the same time, the larger the
- // multiplier, the better the precision, so we maximize the value of the right shift while
- // keeping the multiplier under 2 ^ 16
- uint32_t upperLog2 = ceil(log2(period));
-
- // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
- // multiplication by the period will overflow the chunks, but timestamps are mostly
- // informational so that's ok.
- rightShift = 16u - std::min(upperLog2, 16u);
- multiplier = uint32_t(period * (1 << rightShift));
+ // Create binding group layout
+ Ref<BindGroupLayoutBase> bgl;
+ DAWN_TRY_ASSIGN(
+ bgl, utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+ },
+ /* allowInternalBinding */ true));
+
+ // Create pipeline layout
+ Ref<PipelineLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
+
+ // Create ComputePipeline.
+ ComputePipelineDescriptor computePipelineDesc = {};
+ // Generate the layout based on shader module.
+ computePipelineDesc.layout = layout.Get();
+ computePipelineDesc.compute.module = store->timestampCS.Get();
+ computePipelineDesc.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->timestampComputePipeline,
+ device->CreateComputePipeline(&computePipelineDesc));
}
- MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params) {
- DeviceBase* device = encoder->GetDevice();
-
- ComputePipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
-
- // Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- // Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup,
- utils::MakeBindGroup(device, layout,
- {{0, timestamps}, {1, availability}, {2, params}}));
-
- // Create compute encoder and issue dispatch.
- Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
- pass->APISetPipeline(pipeline);
- pass->APISetBindGroup(0, bindGroup.Get());
- pass->APIDispatch(
- static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
- pass->APIEnd();
-
- return {};
- }
+ return store->timestampComputePipeline.Get();
+}
+
+} // anonymous namespace
+
+TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
+ : first(first), count(count), offset(offset) {
+ // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
+ //
+ // m = round(p * 2^s)
+ //
+ // Then in the shader we compute:
+ //
+ // m / 2^s = round(p * 2^s) / 2*s ~= p
+ //
+ // The goal is to find the best shift to keep the precision of computations. The
+ // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
+ // so we need to keep the multiplier under 2^16. At the same time, the larger the
+ // multiplier, the better the precision, so we maximize the value of the right shift while
+ // keeping the multiplier under 2 ^ 16
+ uint32_t upperLog2 = ceil(log2(period));
+
+ // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
+ // multiplication by the period will overflow the chunks, but timestamps are mostly
+ // informational so that's ok.
+ rightShift = 16u - std::min(upperLog2, 16u);
+ multiplier = uint32_t(period * (1 << rightShift));
+}
+
+MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params) {
+ DeviceBase* device = encoder->GetDevice();
+
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
+
+ // Prepare bind group layout.
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ // Create bind group after all binding entries are set.
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(
+ bindGroup,
+ utils::MakeBindGroup(device, layout, {{0, timestamps}, {1, availability}, {2, params}}));
+
+ // Create compute encoder and issue dispatch.
+ Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
+ pass->APISetPipeline(pipeline);
+ pass->APISetBindGroup(0, bindGroup.Get());
+ pass->APIDispatchWorkgroups(
+ static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
+ pass->APIEnd();
+
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QueryHelper.h b/chromium/third_party/dawn/src/dawn/native/QueryHelper.h
index 744e612a414..11aadd02c49 100644
--- a/chromium/third_party/dawn/src/dawn/native/QueryHelper.h
+++ b/chromium/third_party/dawn/src/dawn/native/QueryHelper.h
@@ -20,23 +20,23 @@
namespace dawn::native {
- class BufferBase;
- class CommandEncoder;
-
- struct TimestampParams {
- TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
-
- uint32_t first;
- uint32_t count;
- uint32_t offset;
- uint32_t multiplier;
- uint32_t rightShift;
- };
-
- MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params);
+class BufferBase;
+class CommandEncoder;
+
+struct TimestampParams {
+ TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
+
+ uint32_t first;
+ uint32_t count;
+ uint32_t offset;
+ uint32_t multiplier;
+ uint32_t rightShift;
+};
+
+MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp
index 0e5f90eb617..a5031659c66 100644
--- a/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp
@@ -14,167 +14,173 @@
#include "dawn/native/QuerySet.h"
+#include <set>
+
#include "dawn/native/Device.h"
#include "dawn/native/Features.h"
#include "dawn/native/ObjectType_autogen.h"
#include "dawn/native/ValidationUtils_autogen.h"
-#include <set>
-
namespace dawn::native {
- namespace {
+namespace {
- class ErrorQuerySet final : public QuerySetBase {
- public:
- explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
- }
+class ErrorQuerySet final : public QuerySetBase {
+ public:
+ explicit ErrorQuerySet(DeviceBase* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor, ObjectBase::kError) {}
+
+ private:
+ void DestroyImpl() override { UNREACHABLE(); }
+};
+
+} // anonymous namespace
+
+MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_TRY(ValidateQueryType(descriptor->type));
- private:
- void DestroyImpl() override {
- UNREACHABLE();
+ DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
+ "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
+ kMaxQueryCount);
+
+ switch (descriptor->type) {
+ case wgpu::QueryType::Occlusion:
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
+ break;
+
+ case wgpu::QueryType::PipelineStatistics: {
+ // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
+ // Disallow it as unsafe until the implementaion is completed.
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline statistics queries are disallowed because they are not "
+ "fully implemented");
+
+ DAWN_INVALID_IF(
+ !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
+ "Pipeline statistics query set created without the feature being enabled.");
+
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
+ "Pipeline statistics query set created with 0 statistics.");
+
+ std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
+
+ auto [_, inserted] =
+ pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
+ DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
+ descriptor->pipelineStatistics[i]);
}
- };
-
- } // anonymous namespace
-
- MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
- const QuerySetDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- DAWN_TRY(ValidateQueryType(descriptor->type));
-
- DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
- "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
- kMaxQueryCount);
-
- switch (descriptor->type) {
- case wgpu::QueryType::Occlusion:
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
- "Pipeline statistics specified for a query of type %s.",
- descriptor->type);
- break;
-
- case wgpu::QueryType::PipelineStatistics: {
- // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
- // Disallow it as unsafe until the implementaion is completed.
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Pipeline statistics queries are disallowed because they are not "
- "fully implemented");
-
- DAWN_INVALID_IF(
- !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
- "Pipeline statistics query set created without the feature being enabled.");
-
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
- "Pipeline statistics query set created with 0 statistics.");
-
- std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
- for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
- DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
-
- auto [_, inserted] =
- pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
- DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
- descriptor->pipelineStatistics[i]);
- }
- } break;
-
- case wgpu::QueryType::Timestamp:
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Timestamp queries are disallowed because they may expose precise "
- "timing information.");
-
- DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
- "Timestamp query set created without the feature being enabled.");
-
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
- "Pipeline statistics specified for a query of type %s.",
- descriptor->type);
- break;
-
- default:
- break;
- }
-
- return {};
- }
+ } break;
- QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label),
- mQueryType(descriptor->type),
- mQueryCount(descriptor->count),
- mState(QuerySetState::Available) {
- for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
- mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
- }
-
- mQueryAvailability.resize(descriptor->count);
- TrackInDevice();
- }
+ case wgpu::QueryType::Timestamp:
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Timestamp queries are disallowed because they may expose precise "
+ "timing information.");
- QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
+ DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
+ "Timestamp query set created without the feature being enabled.");
- QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
+ break;
- QuerySetBase::~QuerySetBase() {
- // Uninitialized or already destroyed
- ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+ default:
+ break;
}
- void QuerySetBase::DestroyImpl() {
- mState = QuerySetState::Destroyed;
- }
+ return {};
+}
- // static
- QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
- return new ErrorQuerySet(device);
+QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label),
+ mQueryType(descriptor->type),
+ mQueryCount(descriptor->count),
+ mState(QuerySetState::Available) {
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
}
- ObjectType QuerySetBase::GetType() const {
- return ObjectType::QuerySet;
- }
+ mQueryAvailability.resize(descriptor->count);
+ TrackInDevice();
+}
- wgpu::QueryType QuerySetBase::GetQueryType() const {
- return mQueryType;
- }
+QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
- uint32_t QuerySetBase::GetQueryCount() const {
- return mQueryCount;
- }
+QuerySetBase::QuerySetBase(DeviceBase* device,
+ const QuerySetDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mQueryType(descriptor->type), mQueryCount(descriptor->count) {}
- const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
- return mPipelineStatistics;
- }
+QuerySetBase::~QuerySetBase() {
+ // Uninitialized or already destroyed
+ ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+}
- const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
- return mQueryAvailability;
- }
+void QuerySetBase::DestroyImpl() {
+ mState = QuerySetState::Destroyed;
+}
- void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
- mQueryAvailability[index] = available;
- }
+// static
+QuerySetBase* QuerySetBase::MakeError(DeviceBase* device, const QuerySetDescriptor* descriptor) {
+ return new ErrorQuerySet(device, descriptor);
+}
- MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
- return {};
- }
+ObjectType QuerySetBase::GetType() const {
+ return ObjectType::QuerySet;
+}
- void QuerySetBase::APIDestroy() {
- if (GetDevice()->ConsumedError(ValidateDestroy())) {
- return;
- }
- Destroy();
- }
+wgpu::QueryType QuerySetBase::GetQueryType() const {
+ return mQueryType;
+}
+
+uint32_t QuerySetBase::GetQueryCount() const {
+ return mQueryCount;
+}
+
+const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
+ return mPipelineStatistics;
+}
+
+const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
+ return mQueryAvailability;
+}
- MaybeError QuerySetBase::ValidateDestroy() const {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- return {};
+void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
+ mQueryAvailability[index] = available;
+}
+
+MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
+ return {};
+}
+
+void QuerySetBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(ValidateDestroy())) {
+ return;
}
+ Destroy();
+}
+
+wgpu::QueryType QuerySetBase::APIGetType() const {
+ return mQueryType;
+}
+
+uint32_t QuerySetBase::APIGetCount() const {
+ return mQueryCount;
+}
+
+MaybeError QuerySetBase::ValidateDestroy() const {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QuerySet.h b/chromium/third_party/dawn/src/dawn/native/QuerySet.h
index f65972df022..fa288a02487 100644
--- a/chromium/third_party/dawn/src/dawn/native/QuerySet.h
+++ b/chromium/third_party/dawn/src/dawn/native/QuerySet.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_QUERYSET_H_
#define SRC_DAWN_NATIVE_QUERYSET_H_
+#include <vector>
+
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/ObjectBase.h"
@@ -23,49 +25,52 @@
namespace dawn::native {
- MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
-
- class QuerySetBase : public ApiObjectBase {
- public:
- QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
- static QuerySetBase* MakeError(DeviceBase* device);
+class QuerySetBase : public ApiObjectBase {
+ public:
+ static QuerySetBase* MakeError(DeviceBase* device, const QuerySetDescriptor* descriptor);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- wgpu::QueryType GetQueryType() const;
- uint32_t GetQueryCount() const;
- const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
+ wgpu::QueryType GetQueryType() const;
+ uint32_t GetQueryCount() const;
+ const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
- const std::vector<bool>& GetQueryAvailability() const;
- void SetQueryAvailability(uint32_t index, bool available);
+ const std::vector<bool>& GetQueryAvailability() const;
+ void SetQueryAvailability(uint32_t index, bool available);
- MaybeError ValidateCanUseInSubmitNow() const;
+ MaybeError ValidateCanUseInSubmitNow() const;
- void APIDestroy();
+ void APIDestroy();
+ wgpu::QueryType APIGetType() const;
+ uint32_t APIGetCount() const;
- protected:
- QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ protected:
+ QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+ QuerySetBase(DeviceBase* device,
+ const QuerySetDescriptor* descriptor,
+ ObjectBase::ErrorTag tag);
- // Constructor used only for mocking and testing.
- explicit QuerySetBase(DeviceBase* device);
- void DestroyImpl() override;
+ // Constructor used only for mocking and testing.
+ explicit QuerySetBase(DeviceBase* device);
+ void DestroyImpl() override;
- ~QuerySetBase() override;
+ ~QuerySetBase() override;
- private:
- MaybeError ValidateDestroy() const;
+ private:
+ MaybeError ValidateDestroy() const;
- wgpu::QueryType mQueryType;
- uint32_t mQueryCount;
- std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
+ wgpu::QueryType mQueryType;
+ uint32_t mQueryCount;
+ std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
- enum class QuerySetState { Unavailable, Available, Destroyed };
- QuerySetState mState = QuerySetState::Unavailable;
+ enum class QuerySetState { Unavailable, Available, Destroyed };
+ QuerySetState mState = QuerySetState::Unavailable;
- // Indicates the available queries on the query set for resolving
- std::vector<bool> mQueryAvailability;
- };
+ // Indicates the available queries on the query set for resolving
+ std::vector<bool> mQueryAvailability;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Queue.cpp b/chromium/third_party/dawn/src/dawn/native/Queue.cpp
index 919f554c9cb..c17ed8f4d9b 100644
--- a/chromium/third_party/dawn/src/dawn/native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Queue.cpp
@@ -14,6 +14,11 @@
#include "dawn/native/Queue.h"
+#include <algorithm>
+#include <cstring>
+#include <utility>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/native/Buffer.h"
#include "dawn/native/CommandBuffer.h"
@@ -32,482 +37,472 @@
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-#include <cstring>
-
namespace dawn::native {
- namespace {
-
- void CopyTextureData(uint8_t* dstPointer,
- const uint8_t* srcPointer,
- uint32_t depth,
- uint32_t rowsPerImage,
- uint64_t imageAdditionalStride,
- uint32_t actualBytesPerRow,
- uint32_t dstBytesPerRow,
- uint32_t srcBytesPerRow) {
- bool copyWholeLayer =
- actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
- bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
-
- if (!copyWholeLayer) { // copy row by row
- for (uint32_t d = 0; d < depth; ++d) {
- for (uint32_t h = 0; h < rowsPerImage; ++h) {
- memcpy(dstPointer, srcPointer, actualBytesPerRow);
- dstPointer += dstBytesPerRow;
- srcPointer += srcBytesPerRow;
- }
- srcPointer += imageAdditionalStride;
- }
- } else {
- uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
- if (!copyWholeData) { // copy layer by layer
- for (uint32_t d = 0; d < depth; ++d) {
- memcpy(dstPointer, srcPointer, layerSize);
- dstPointer += layerSize;
- srcPointer += layerSize + imageAdditionalStride;
- }
- } else { // do a single copy
- memcpy(dstPointer, srcPointer, layerSize * depth);
- }
+namespace {
+
+void CopyTextureData(uint8_t* dstPointer,
+ const uint8_t* srcPointer,
+ uint32_t depth,
+ uint32_t rowsPerImage,
+ uint64_t imageAdditionalStride,
+ uint32_t actualBytesPerRow,
+ uint32_t dstBytesPerRow,
+ uint32_t srcBytesPerRow) {
+ bool copyWholeLayer = actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
+ bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
+
+ if (!copyWholeLayer) { // copy row by row
+ for (uint32_t d = 0; d < depth; ++d) {
+ for (uint32_t h = 0; h < rowsPerImage; ++h) {
+ memcpy(dstPointer, srcPointer, actualBytesPerRow);
+ dstPointer += dstBytesPerRow;
+ srcPointer += srcBytesPerRow;
}
+ srcPointer += imageAdditionalStride;
}
-
- ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
- DeviceBase* device,
- const void* data,
- uint32_t alignedBytesPerRow,
- uint32_t optimallyAlignedBytesPerRow,
- uint32_t alignedRowsPerImage,
- const TextureDataLayout& dataLayout,
- bool hasDepthOrStencil,
- const TexelBlockInfo& blockInfo,
- const Extent3D& writeSizePixel) {
- uint64_t newDataSizeBytes;
- DAWN_TRY_ASSIGN(
- newDataSizeBytes,
- ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
- alignedRowsPerImage));
-
- uint64_t optimalOffsetAlignment =
- device->GetOptimalBufferToTextureCopyOffsetAlignment();
- ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
- ASSERT(IsPowerOfTwo(blockInfo.byteSize));
- // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
- // since both of them are powers of two, we only need to align to the max value.
- uint64_t offsetAlignment =
- std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
-
- // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
- // by WebGPU and Vulkan SPEC.
- if (hasDepthOrStencil) {
- constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
- offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
- }
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- newDataSizeBytes, device->GetPendingCommandSerial(),
- offsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
-
- uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
- const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
- srcPointer += dataLayout.offset;
-
- uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
- if (dataRowsPerImage == 0) {
- dataRowsPerImage = writeSizePixel.height / blockInfo.height;
+ } else {
+ uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
+ if (!copyWholeData) { // copy layer by layer
+ for (uint32_t d = 0; d < depth; ++d) {
+ memcpy(dstPointer, srcPointer, layerSize);
+ dstPointer += layerSize;
+ srcPointer += layerSize + imageAdditionalStride;
}
-
- ASSERT(dataRowsPerImage >= alignedRowsPerImage);
- uint64_t imageAdditionalStride =
- dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
-
- CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
- alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
- optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
-
- return uploadHandle;
+ } else { // do a single copy
+ memcpy(dstPointer, srcPointer, layerSize * depth);
}
+ }
+}
+
+ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
+ DeviceBase* device,
+ const void* data,
+ uint32_t alignedBytesPerRow,
+ uint32_t optimallyAlignedBytesPerRow,
+ uint32_t alignedRowsPerImage,
+ const TextureDataLayout& dataLayout,
+ bool hasDepthOrStencil,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& writeSizePixel) {
+ uint64_t newDataSizeBytes;
+ DAWN_TRY_ASSIGN(newDataSizeBytes,
+ ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
+ optimallyAlignedBytesPerRow, alignedRowsPerImage));
+
+ uint64_t optimalOffsetAlignment = device->GetOptimalBufferToTextureCopyOffsetAlignment();
+ ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
+ ASSERT(IsPowerOfTwo(blockInfo.byteSize));
+ // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
+ // since both of them are powers of two, we only need to align to the max value.
+ uint64_t offsetAlignment = std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
+
+ // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
+ // by WebGPU and Vulkan SPEC.
+ if (hasDepthOrStencil) {
+ constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
+ offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
+ }
- struct SubmittedWorkDone : QueueBase::TaskInFlight {
- SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
- : mCallback(callback), mUserdata(userdata) {
- }
- void Finish() override {
- ASSERT(mCallback != nullptr);
- mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
- mCallback = nullptr;
- }
- void HandleDeviceLoss() override {
- ASSERT(mCallback != nullptr);
- mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
- mCallback = nullptr;
- }
- ~SubmittedWorkDone() override = default;
-
- private:
- WGPUQueueWorkDoneCallback mCallback = nullptr;
- void* mUserdata;
- };
-
- class ErrorQueue : public QueueBase {
- public:
- explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
- }
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount,
- CommandBufferBase* const* commands) override {
- UNREACHABLE();
- }
- };
- } // namespace
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ device->GetDynamicUploader()->Allocate(
+ newDataSizeBytes, device->GetPendingCommandSerial(), offsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
- // QueueBase
+ uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+ const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+ srcPointer += dataLayout.offset;
- QueueBase::TaskInFlight::~TaskInFlight() {
+ uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
+ if (dataRowsPerImage == 0) {
+ dataRowsPerImage = writeSizePixel.height / blockInfo.height;
}
- QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label) {
+ ASSERT(dataRowsPerImage >= alignedRowsPerImage);
+ uint64_t imageAdditionalStride =
+ dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
+
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers, alignedRowsPerImage,
+ imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+ dataLayout.bytesPerRow);
+
+ return uploadHandle;
+}
+
+struct SubmittedWorkDone : QueueBase::TaskInFlight {
+ SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
+ : mCallback(callback), mUserdata(userdata) {}
+ void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
+ ASSERT(mCallback != nullptr);
+ TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
+ uint64_t(serial));
+ mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
+ mCallback = nullptr;
}
-
- QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
+ void HandleDeviceLoss() override {
+ ASSERT(mCallback != nullptr);
+ mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
+ mCallback = nullptr;
}
+ ~SubmittedWorkDone() override = default;
- QueueBase::~QueueBase() {
- ASSERT(mTasksInFlight.Empty());
- }
+ private:
+ WGPUQueueWorkDoneCallback mCallback = nullptr;
+ void* mUserdata;
+};
- void QueueBase::DestroyImpl() {
- }
+class ErrorQueue : public QueueBase {
+ public:
+ explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {}
- // static
- QueueBase* QueueBase::MakeError(DeviceBase* device) {
- return new ErrorQueue(device);
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override {
+ UNREACHABLE();
}
+};
+} // namespace
- ObjectType QueueBase::GetType() const {
- return ObjectType::Queue;
- }
+// QueueBase
- void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
- SubmitInternal(commandCount, commands);
+QueueBase::TaskInFlight::~TaskInFlight() {}
- for (uint32_t i = 0; i < commandCount; ++i) {
- commands[i]->Destroy();
- }
- }
+QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label) {}
- void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata) {
- // The error status depends on the type of error so we let the validation function choose it
- WGPUQueueWorkDoneStatus status;
- if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
- callback(status, userdata);
- return;
- }
+QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ApiObjectBase(device, tag) {}
- std::unique_ptr<SubmittedWorkDone> task =
- std::make_unique<SubmittedWorkDone>(callback, userdata);
+QueueBase::~QueueBase() {
+ ASSERT(mTasksInFlight.Empty());
+}
- // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
- // also used to make sure ALL queue work is finished in tests, so we also wait for pending
- // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
- // spec).
- TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
- }
+void QueueBase::DestroyImpl() {}
- void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
- mTasksInFlight.Enqueue(std::move(task), serial);
- GetDevice()->AddFutureSerial(serial);
- }
+// static
+QueueBase* QueueBase::MakeError(DeviceBase* device) {
+ return new ErrorQueue(device);
+}
- void QueueBase::Tick(ExecutionSerial finishedSerial) {
- // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
- // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
- // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
- // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
- // callbacks.
- std::vector<std::unique_ptr<TaskInFlight>> tasks;
- for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
- tasks.push_back(std::move(task));
- }
- mTasksInFlight.ClearUpTo(finishedSerial);
+ObjectType QueueBase::GetType() const {
+ return ObjectType::Queue;
+}
- for (auto& task : tasks) {
- task->Finish();
- }
+void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
+ SubmitInternal(commandCount, commands);
+
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ commands[i]->Destroy();
+ }
+}
+
+void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata) {
+ // The error status depends on the type of error so we let the validation function choose it
+ WGPUQueueWorkDoneStatus status;
+ if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
+ callback(status, userdata);
+ return;
}
- void QueueBase::HandleDeviceLoss() {
- for (auto& task : mTasksInFlight.IterateAll()) {
- task->HandleDeviceLoss();
- }
- mTasksInFlight.Clear();
+ std::unique_ptr<SubmittedWorkDone> task =
+ std::make_unique<SubmittedWorkDone>(callback, userdata);
+
+ // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
+ // also used to make sure ALL queue work is finished in tests, so we also wait for pending
+ // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
+ // spec).
+ TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
+
+ TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::APIOnSubmittedWorkDone", "serial",
+ uint64_t(GetDevice()->GetPendingCommandSerial()));
+}
+
+void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
+ mTasksInFlight.Enqueue(std::move(task), serial);
+ GetDevice()->AddFutureSerial(serial);
+}
+
+void QueueBase::Tick(ExecutionSerial finishedSerial) {
+ // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
+ // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
+ // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
+ // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
+ // callbacks.
+ TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::Tick", "finishedSerial",
+ uint64_t(finishedSerial));
+
+ std::vector<std::unique_ptr<TaskInFlight>> tasks;
+ for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
+ tasks.push_back(std::move(task));
}
+ mTasksInFlight.ClearUpTo(finishedSerial);
- void QueueBase::APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
+ for (auto& task : tasks) {
+ task->Finish(GetDevice()->GetPlatform(), finishedSerial);
}
+}
- MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+void QueueBase::HandleDeviceLoss() {
+ for (auto& task : mTasksInFlight.IterateAll()) {
+ task->HandleDeviceLoss();
+ }
+ mTasksInFlight.Clear();
+}
+
+void QueueBase::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
+}
+
+MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ return WriteBufferImpl(buffer, bufferOffset, data, size);
+}
+
+MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- return WriteBufferImpl(buffer, bufferOffset, data, size);
+ if (size == 0) {
+ return {};
}
- MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- if (size == 0) {
- return {};
- }
-
- DeviceBase* device = GetDevice();
+ DeviceBase* device = GetDevice();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
-
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- device->AddFutureSerial(device->GetPendingCommandSerial());
-
- return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
- buffer, bufferOffset, size);
- }
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ device->GetDynamicUploader()->Allocate(size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
- void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize) {
- GetDevice()->ConsumedError(
- WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
- }
+ memcpy(uploadHandle.mappedBuffer, data, size);
- MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) {
- DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
+ device->AddFutureSerial(device->GetPendingCommandSerial());
- if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
- return {};
- }
+ return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
+ buffer, bufferOffset, size);
+}
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- TextureDataLayout layout = dataLayout;
- ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
- return WriteTextureImpl(*destination, data, layout, *writeSize);
- }
+void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) {
+ GetDevice()->ConsumedError(
+ WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
+}
- MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
const void* data,
+ size_t dataSize,
const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) {
- const Format& format = destination.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
-
- // We are only copying the part of the data that will appear in the texture.
- // Note that validating texture copy range ensures that writeSizePixel->width and
- // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
- ASSERT(writeSizePixel.width % blockInfo.width == 0);
- ASSERT(writeSizePixel.height % blockInfo.height == 0);
- uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
- uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
-
- uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
- uint32_t optimallyAlignedBytesPerRow =
- Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- UploadTextureDataAligningBytesPerRowAndOffset(
- GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
- alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
- writeSizePixel));
-
- TextureDataLayout passDataLayout = dataLayout;
- passDataLayout.offset = uploadHandle.startOffset;
- passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
- passDataLayout.rowsPerImage = alignedRowsPerImage;
-
- TextureCopy textureCopy;
- textureCopy.texture = destination.texture;
- textureCopy.mipLevel = destination.mipLevel;
- textureCopy.origin = destination.origin;
- textureCopy.aspect = ConvertAspect(format, destination.aspect);
-
- DeviceBase* device = GetDevice();
-
- device->AddFutureSerial(device->GetPendingCommandSerial());
-
- return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
- &textureCopy, writeSizePixel);
- }
+ const Extent3D* writeSize) {
+ DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
- void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- GetDevice()->ConsumedError(
- CopyTextureForBrowserInternal(source, destination, copySize, options));
+ if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
+ return {};
}
- MaybeError QueueBase::CopyTextureForBrowserInternal(
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
- "validating CopyTextureForBrowser from %s to %s", source->texture,
- destination->texture);
- }
-
- return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ TextureDataLayout layout = dataLayout;
+ ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
+ return WriteTextureImpl(*destination, data, layout, *writeSize);
+}
+
+MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ const Format& format = destination.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
+
+ // We are only copying the part of the data that will appear in the texture.
+ // Note that validating texture copy range ensures that writeSizePixel->width and
+ // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+ ASSERT(writeSizePixel.width % blockInfo.width == 0);
+ ASSERT(writeSizePixel.height % blockInfo.height == 0);
+ uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
+ uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
+
+ uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
+ uint32_t optimallyAlignedBytesPerRow = Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRowAndOffset(
+ GetDevice(), data, alignedBytesPerRow,
+ optimallyAlignedBytesPerRow, alignedRowsPerImage, dataLayout,
+ format.HasDepthOrStencil(), blockInfo, writeSizePixel));
+
+ TextureDataLayout passDataLayout = dataLayout;
+ passDataLayout.offset = uploadHandle.startOffset;
+ passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+ passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = ConvertAspect(format, destination.aspect);
+
+ DeviceBase* device = GetDevice();
+
+ device->AddFutureSerial(device->GetPendingCommandSerial());
+
+ return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
+ &textureCopy, writeSizePixel);
+}
+
+void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ GetDevice()->ConsumedError(
+ CopyTextureForBrowserInternal(source, destination, copySize, options));
+}
+
+MaybeError QueueBase::CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(
+ ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
+ "validating CopyTextureForBrowser from %s to %s", source->texture,
+ destination->texture);
}
- MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
- CommandBufferBase* const* commands) const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
- DAWN_TRY(GetDevice()->ValidateObject(this));
+ return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+}
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
- DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
+MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
+ CommandBufferBase* const* commands) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
+ DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
- for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
- for (const BufferBase* buffer : scope.buffers) {
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- }
+ const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
- for (const TextureBase* texture : scope.textures) {
- DAWN_TRY(texture->ValidateCanUseInSubmitNow());
- }
+ for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
+ for (const BufferBase* buffer : scope.buffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
- for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
- DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
- }
+ for (const TextureBase* texture : scope.textures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
}
- for (const ComputePassResourceUsage& pass : usages.computePasses) {
- for (const BufferBase* buffer : pass.referencedBuffers) {
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- }
- for (const TextureBase* texture : pass.referencedTextures) {
- DAWN_TRY(texture->ValidateCanUseInSubmitNow());
- }
- for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
- DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
- }
+ for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
+ DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
}
+ }
- for (const BufferBase* buffer : usages.topLevelBuffers) {
+ for (const ComputePassResourceUsage& pass : usages.computePasses) {
+ for (const BufferBase* buffer : pass.referencedBuffers) {
DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
}
- for (const TextureBase* texture : usages.topLevelTextures) {
+ for (const TextureBase* texture : pass.referencedTextures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow());
}
- for (const QuerySetBase* querySet : usages.usedQuerySets) {
- DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+ for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
+ DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
}
}
- return {};
+ for (const BufferBase* buffer : usages.topLevelBuffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
+ for (const TextureBase* texture : usages.topLevelTextures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+ }
+ for (const QuerySetBase* querySet : usages.usedQuerySets) {
+ DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+ }
}
- MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneStatus* status) const {
- *status = WGPUQueueWorkDoneStatus_DeviceLost;
- DAWN_TRY(GetDevice()->ValidateIsAlive());
+ return {};
+}
- *status = WGPUQueueWorkDoneStatus_Error;
- DAWN_TRY(GetDevice()->ValidateObject(this));
+MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneStatus* status) const {
+ *status = WGPUQueueWorkDoneStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
+ *status = WGPUQueueWorkDoneStatus_Error;
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- return {};
- }
+ DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
- MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+ return {};
+}
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
+MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
- DAWN_INVALID_IF(dataLayout.offset > dataSize,
- "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
- dataSize);
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
- DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
- "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
- destination->texture, wgpu::TextureUsage::CopyDst);
+ DAWN_INVALID_IF(dataLayout.offset > dataSize,
+ "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
+ dataSize);
- DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
- "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
- destination->texture);
+ DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
+ "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
+ destination->texture, wgpu::TextureUsage::CopyDst);
- DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
+ DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1, "Sample count (%u) of %s is not 1",
+ destination->texture->GetSampleCount(), destination->texture);
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
- DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
+ DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
- return {};
- }
+ DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
- void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
- DeviceBase* device = GetDevice();
- if (device->ConsumedError(device->ValidateIsAlive())) {
- // If device is lost, don't let any commands be submitted
- return;
- }
+ return {};
+}
- TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
- if (device->IsValidationEnabled() &&
- device->ConsumedError(ValidateSubmit(commandCount, commands))) {
- return;
- }
- ASSERT(!IsError());
+void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
+ DeviceBase* device = GetDevice();
+ if (device->ConsumedError(device->ValidateIsAlive())) {
+ // If device is lost, don't let any commands be submitted
+ return;
+ }
- if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
- return;
- }
+ TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+ if (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+ return;
+ }
+ ASSERT(!IsError());
+
+ if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+ return;
}
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Queue.h b/chromium/third_party/dawn/src/dawn/native/Queue.h
index cb2f0183d63..5bfd9a0c923 100644
--- a/chromium/third_party/dawn/src/dawn/native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn/native/Queue.h
@@ -15,96 +15,96 @@
#ifndef SRC_DAWN_NATIVE_QUEUE_H_
#define SRC_DAWN_NATIVE_QUEUE_H_
+#include <memory>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/IntegerTypes.h"
#include "dawn/native/ObjectBase.h"
+#include "dawn/native/DawnNative.h"
#include "dawn/native/dawn_platform.h"
+#include "dawn/platform/DawnPlatform.h"
namespace dawn::native {
- class QueueBase : public ApiObjectBase {
- public:
- struct TaskInFlight {
- virtual ~TaskInFlight();
- virtual void Finish() = 0;
- virtual void HandleDeviceLoss() = 0;
- };
-
- ~QueueBase() override;
-
- static QueueBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Dawn API
- void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
- void APIOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata);
- void APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- void APIWriteTexture(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize);
- void APICopyTextureForBrowser(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
- MaybeError WriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
- void Tick(ExecutionSerial finishedSerial);
- void HandleDeviceLoss();
-
- protected:
- QueueBase(DeviceBase* device, const QueueDescriptor* descriptor);
- QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DestroyImpl() override;
-
- private:
- MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
+class QueueBase : public ApiObjectBase {
+ public:
+ struct TaskInFlight {
+ virtual ~TaskInFlight();
+ virtual void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) = 0;
+ virtual void HandleDeviceLoss() = 0;
+ };
+
+ ~QueueBase() override;
+
+ static QueueBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Dawn API
+ void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
+ void APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata);
+ void APIWriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
+ void APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize);
+ void APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ MaybeError WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
+ void Tick(ExecutionSerial finishedSerial);
+ void HandleDeviceLoss();
+
+ protected:
+ QueueBase(DeviceBase* device, const QueueDescriptor* descriptor);
+ QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DestroyImpl() override;
+
+ private:
+ MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize);
+ MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) = 0;
+ virtual MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
const void* data,
- size_t dataSize,
const TextureDataLayout& dataLayout,
- const Extent3D* writeSize);
- MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
- virtual MaybeError SubmitImpl(uint32_t commandCount,
- CommandBufferBase* const* commands) = 0;
- virtual MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSize);
-
- MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
- MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneStatus* status) const;
- MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) const;
+ const Extent3D& writeSize);
- void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
+ MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
+ MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneStatus* status) const;
+ MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize) const;
- SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
- };
+ void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
+
+ SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.cpp b/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.cpp
new file mode 100644
index 00000000000..523b3a5d799
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.cpp
@@ -0,0 +1,31 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RefCountedWithExternalCount.h"
+
+namespace dawn::native {
+
+void RefCountedWithExternalCount::APIReference() {
+ mExternalRefCount.Increment();
+ RefCounted::APIReference();
+}
+
+void RefCountedWithExternalCount::APIRelease() {
+ if (mExternalRefCount.Decrement()) {
+ WillDropLastExternalRef();
+ }
+ RefCounted::APIRelease();
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.h b/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.h
new file mode 100644
index 00000000000..d38c7e3e152
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RefCountedWithExternalCount.h
@@ -0,0 +1,45 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_REFCOUNTEDWITHEXTERNALCOUNT_H_
+#define SRC_DAWN_NATIVE_REFCOUNTEDWITHEXTERNALCOUNT_H_
+
+#include "dawn/common/RefCounted.h"
+
+namespace dawn::native {
+
+// RecCountedWithExternalCount is a version of RefCounted which tracks a separate
+// refcount for calls to APIReference/APIRelease (refs added/removed by the application).
+// The external refcount starts at 1, and the total refcount starts at 1 - i.e. the first
+// ref is the external ref.
+// Then, when the external refcount drops to zero, WillDropLastExternalRef is called.
+// The derived class should override the behavior of WillDropLastExternalRef.
+class RefCountedWithExternalCount : private RefCounted {
+ public:
+ using RefCounted::RefCounted;
+ using RefCounted::Reference;
+ using RefCounted::Release;
+
+ void APIReference();
+ void APIRelease();
+
+ private:
+ virtual void WillDropLastExternalRef() = 0;
+
+ RefCount mExternalRefCount;
+};
+
+} // namespace dawn::native
+
+#endif // SRC_DAWN_NATIVE_REFCOUNTEDWITHEXTERNALCOUNT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp
index da1018823ba..2781983d823 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/RenderBundle.h"
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/native/Commands.h"
#include "dawn/native/Device.h"
@@ -22,70 +24,69 @@
namespace dawn::native {
- RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
- const RenderBundleDescriptor* descriptor,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly,
- RenderPassResourceUsage resourceUsage,
- IndirectDrawMetadata indirectDrawMetadata)
- : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
- mCommands(encoder->AcquireCommands()),
- mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
- mAttachmentState(std::move(attachmentState)),
- mDepthReadOnly(depthReadOnly),
- mStencilReadOnly(stencilReadOnly),
- mResourceUsage(std::move(resourceUsage)) {
- TrackInDevice();
- }
-
- void RenderBundleBase::DestroyImpl() {
- FreeCommands(&mCommands);
-
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- // static
- RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
- return new RenderBundleBase(device, ObjectBase::kError);
- }
-
- RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
- : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
- }
-
- ObjectType RenderBundleBase::GetType() const {
- return ObjectType::RenderBundle;
- }
-
- CommandIterator* RenderBundleBase::GetCommands() {
- return &mCommands;
- }
-
- const AttachmentState* RenderBundleBase::GetAttachmentState() const {
- ASSERT(!IsError());
- return mAttachmentState.Get();
- }
-
- bool RenderBundleBase::IsDepthReadOnly() const {
- ASSERT(!IsError());
- return mDepthReadOnly;
- }
-
- bool RenderBundleBase::IsStencilReadOnly() const {
- ASSERT(!IsError());
- return mStencilReadOnly;
- }
-
- const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
- ASSERT(!IsError());
- return mResourceUsage;
- }
-
- const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
- return mIndirectDrawMetadata;
- }
+RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
+ const RenderBundleDescriptor* descriptor,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly,
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata)
+ : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
+ mCommands(encoder->AcquireCommands()),
+ mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
+ mAttachmentState(std::move(attachmentState)),
+ mDepthReadOnly(depthReadOnly),
+ mStencilReadOnly(stencilReadOnly),
+ mResourceUsage(std::move(resourceUsage)) {
+ TrackInDevice();
+}
+
+void RenderBundleBase::DestroyImpl() {
+ FreeCommands(&mCommands);
+
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+}
+
+// static
+RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
+ return new RenderBundleBase(device, ObjectBase::kError);
+}
+
+RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
+ : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {}
+
+ObjectType RenderBundleBase::GetType() const {
+ return ObjectType::RenderBundle;
+}
+
+CommandIterator* RenderBundleBase::GetCommands() {
+ return &mCommands;
+}
+
+const AttachmentState* RenderBundleBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+ return mAttachmentState.Get();
+}
+
+bool RenderBundleBase::IsDepthReadOnly() const {
+ ASSERT(!IsError());
+ return mDepthReadOnly;
+}
+
+bool RenderBundleBase::IsStencilReadOnly() const {
+ ASSERT(!IsError());
+ return mStencilReadOnly;
+}
+
+const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
+ ASSERT(!IsError());
+ return mResourceUsage;
+}
+
+const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
+ return mIndirectDrawMetadata;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundle.h b/chromium/third_party/dawn/src/dawn/native/RenderBundle.h
index 0fa9ec58081..9297e015276 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderBundle.h
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundle.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_RENDERBUNDLE_H_
#define SRC_DAWN_NATIVE_RENDERBUNDLE_H_
+#include <bitset>
+
#include "dawn/common/Constants.h"
#include "dawn/native/AttachmentState.h"
#include "dawn/native/CommandAllocator.h"
@@ -26,47 +28,45 @@
#include "dawn/native/dawn_platform.h"
-#include <bitset>
-
namespace dawn::native {
- struct RenderBundleDescriptor;
- class RenderBundleEncoder;
+struct RenderBundleDescriptor;
+class RenderBundleEncoder;
- class RenderBundleBase final : public ApiObjectBase {
- public:
- RenderBundleBase(RenderBundleEncoder* encoder,
- const RenderBundleDescriptor* descriptor,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly,
- RenderPassResourceUsage resourceUsage,
- IndirectDrawMetadata indirectDrawMetadata);
+class RenderBundleBase final : public ApiObjectBase {
+ public:
+ RenderBundleBase(RenderBundleEncoder* encoder,
+ const RenderBundleDescriptor* descriptor,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly,
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata);
- static RenderBundleBase* MakeError(DeviceBase* device);
+ static RenderBundleBase* MakeError(DeviceBase* device);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- CommandIterator* GetCommands();
+ CommandIterator* GetCommands();
- const AttachmentState* GetAttachmentState() const;
- bool IsDepthReadOnly() const;
- bool IsStencilReadOnly() const;
- const RenderPassResourceUsage& GetResourceUsage() const;
- const IndirectDrawMetadata& GetIndirectDrawMetadata();
+ const AttachmentState* GetAttachmentState() const;
+ bool IsDepthReadOnly() const;
+ bool IsStencilReadOnly() const;
+ const RenderPassResourceUsage& GetResourceUsage() const;
+ const IndirectDrawMetadata& GetIndirectDrawMetadata();
- private:
- RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
+ private:
+ RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
- void DestroyImpl() override;
+ void DestroyImpl() override;
- CommandIterator mCommands;
- IndirectDrawMetadata mIndirectDrawMetadata;
- Ref<AttachmentState> mAttachmentState;
- bool mDepthReadOnly;
- bool mStencilReadOnly;
- RenderPassResourceUsage mResourceUsage;
- };
+ CommandIterator mCommands;
+ IndirectDrawMetadata mIndirectDrawMetadata;
+ Ref<AttachmentState> mAttachmentState;
+ bool mDepthReadOnly;
+ bool mStencilReadOnly;
+ RenderPassResourceUsage mResourceUsage;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp
index 6d7a2db41a8..56a9d666ff7 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/RenderBundleEncoder.h"
+#include <utility>
+
#include "dawn/native/CommandValidation.h"
#include "dawn/native/Commands.h"
#include "dawn/native/Device.h"
@@ -26,147 +28,144 @@
namespace dawn::native {
- MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
- wgpu::TextureFormat textureFormat) {
- DAWN_TRY(ValidateTextureFormat(textureFormat));
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
- "Texture format %s is not color renderable.", textureFormat);
- return {};
- }
-
- MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
- wgpu::TextureFormat textureFormat,
- bool depthReadOnly,
- bool stencilReadOnly) {
- DAWN_TRY(ValidateTextureFormat(textureFormat));
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
- "Texture format %s is not depth/stencil renderable.", textureFormat);
-
- DAWN_INVALID_IF(
- format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
- "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
- "both depth and stencil aspects.",
- depthReadOnly, stencilReadOnly, textureFormat);
-
- return {};
- }
-
- MaybeError ValidateRenderBundleEncoderDescriptor(
- const DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor) {
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
- "Sample count (%u) is not supported.", descriptor->sampleCount);
-
- DAWN_INVALID_IF(
- descriptor->colorFormatsCount > kMaxColorAttachments,
- "Color formats count (%u) exceeds maximum number of color attachements (%u).",
- descriptor->colorFormatsCount, kMaxColorAttachments);
-
- bool allColorFormatsUndefined = true;
- for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
- wgpu::TextureFormat format = descriptor->colorFormats[i];
- if (format != wgpu::TextureFormat::Undefined) {
- DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
- "validating colorFormats[%u]", i);
- allColorFormatsUndefined = false;
- }
- }
-
- if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
- DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
- device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
- descriptor->stencilReadOnly),
- "validating depthStencilFormat");
- } else {
- DAWN_INVALID_IF(
- allColorFormatsUndefined,
- "No color or depthStencil attachments specified. At least one is required.");
+MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
+ wgpu::TextureFormat textureFormat) {
+ DAWN_TRY(ValidateTextureFormat(textureFormat));
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Texture format %s is not color renderable.", textureFormat);
+ return {};
+}
+
+MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
+ wgpu::TextureFormat textureFormat,
+ bool depthReadOnly,
+ bool stencilReadOnly) {
+ DAWN_TRY(ValidateTextureFormat(textureFormat));
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Texture format %s is not depth/stencil renderable.", textureFormat);
+
+ DAWN_INVALID_IF(
+ format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
+ "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
+ "both depth and stencil aspects.",
+ depthReadOnly, stencilReadOnly, textureFormat);
+
+ return {};
+}
+
+MaybeError ValidateRenderBundleEncoderDescriptor(const DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor) {
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "Sample count (%u) is not supported.", descriptor->sampleCount);
+
+ DAWN_INVALID_IF(descriptor->colorFormatsCount > kMaxColorAttachments,
+ "Color formats count (%u) exceeds maximum number of color attachements (%u).",
+ descriptor->colorFormatsCount, kMaxColorAttachments);
+
+ bool allColorFormatsUndefined = true;
+ for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
+ wgpu::TextureFormat format = descriptor->colorFormats[i];
+ if (format != wgpu::TextureFormat::Undefined) {
+ DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
+ "validating colorFormats[%u]", i);
+ allColorFormatsUndefined = false;
}
-
- return {};
- }
-
- RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor)
- : RenderEncoderBase(device,
- descriptor->label,
- &mBundleEncodingContext,
- device->GetOrCreateAttachmentState(descriptor),
- descriptor->depthReadOnly,
- descriptor->stencilReadOnly),
- mBundleEncodingContext(device, this) {
- TrackInDevice();
- }
-
- RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
- : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
- mBundleEncodingContext(device, this) {
- }
-
- void RenderBundleEncoder::DestroyImpl() {
- RenderEncoderBase::DestroyImpl();
- mBundleEncodingContext.Destroy();
- }
-
- // static
- Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
- DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor) {
- return AcquireRef(new RenderBundleEncoder(device, descriptor));
- }
-
- // static
- RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
- return new RenderBundleEncoder(device, ObjectBase::kError);
- }
-
- ObjectType RenderBundleEncoder::GetType() const {
- return ObjectType::RenderBundleEncoder;
}
- CommandIterator RenderBundleEncoder::AcquireCommands() {
- return mBundleEncodingContext.AcquireCommands();
+ if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
+ DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
+ device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
+ descriptor->stencilReadOnly),
+ "validating depthStencilFormat");
+ } else {
+ DAWN_INVALID_IF(
+ allColorFormatsUndefined,
+ "No color or depthStencil attachments specified. At least one is required.");
}
- RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
- RenderBundleBase* result = nullptr;
-
- if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).",
- this, descriptor)) {
- return RenderBundleBase::MakeError(GetDevice());
- }
-
- return result;
+ return {};
+}
+
+RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor)
+ : RenderEncoderBase(device,
+ descriptor->label,
+ &mBundleEncodingContext,
+ device->GetOrCreateAttachmentState(descriptor),
+ descriptor->depthReadOnly,
+ descriptor->stencilReadOnly),
+ mBundleEncodingContext(device, this) {
+ TrackInDevice();
+}
+
+RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
+ : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
+ mBundleEncodingContext(device, this) {}
+
+void RenderBundleEncoder::DestroyImpl() {
+ RenderEncoderBase::DestroyImpl();
+ mBundleEncodingContext.Destroy();
+}
+
+// static
+Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
+ DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor) {
+ return AcquireRef(new RenderBundleEncoder(device, descriptor));
+}
+
+// static
+RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
+ return new RenderBundleEncoder(device, ObjectBase::kError);
+}
+
+ObjectType RenderBundleEncoder::GetType() const {
+ return ObjectType::RenderBundleEncoder;
+}
+
+CommandIterator RenderBundleEncoder::AcquireCommands() {
+ return mBundleEncodingContext.AcquireCommands();
+}
+
+RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
+ RenderBundleBase* result = nullptr;
+
+ if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).", this,
+ descriptor)) {
+ return RenderBundleBase::MakeError(GetDevice());
}
- ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
- const RenderBundleDescriptor* descriptor) {
- // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
- // internal state of the encoding context. Subsequent calls to encode commands will generate
- // errors.
- DAWN_TRY(mBundleEncodingContext.Finish());
-
- RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- DAWN_TRY(ValidateFinish(usages));
- }
+ return result;
+}
- return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
- IsStencilReadOnly(), std::move(usages),
- std::move(mIndirectDrawMetadata));
- }
+ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
+ const RenderBundleDescriptor* descriptor) {
+ // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
+ // internal state of the encoding context. Subsequent calls to encode commands will generate
+ // errors.
+ DAWN_TRY(mBundleEncodingContext.Finish());
- MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+ RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+ if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
- return {};
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+ DAWN_TRY(ValidateFinish(usages));
}
+ return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
+ IsStencilReadOnly(), std::move(usages),
+ std::move(mIndirectDrawMetadata));
+}
+
+MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
+ return {};
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h
index 53ab119b943..24ee19ef010 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h
@@ -23,33 +23,32 @@
namespace dawn::native {
- MaybeError ValidateRenderBundleEncoderDescriptor(
- const DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor);
+MaybeError ValidateRenderBundleEncoderDescriptor(const DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor);
- class RenderBundleEncoder final : public RenderEncoderBase {
- public:
- static Ref<RenderBundleEncoder> Create(DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor);
- static RenderBundleEncoder* MakeError(DeviceBase* device);
+class RenderBundleEncoder final : public RenderEncoderBase {
+ public:
+ static Ref<RenderBundleEncoder> Create(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor);
+ static RenderBundleEncoder* MakeError(DeviceBase* device);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
+ RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
- CommandIterator AcquireCommands();
+ CommandIterator AcquireCommands();
- private:
- RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
- RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
+ private:
+ RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
+ RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
- void DestroyImpl() override;
+ void DestroyImpl() override;
- ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
- MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
+ ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
+ MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
- EncodingContext mBundleEncodingContext;
- };
+ EncodingContext mBundleEncodingContext;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp
index f186c162dcf..242e47be839 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/RenderEncoderBase.h"
+#include <math.h>
+#include <cstring>
+#include <utility>
+
#include "dawn/common/Constants.h"
#include "dawn/common/Log.h"
#include "dawn/native/Buffer.h"
@@ -24,391 +28,402 @@
#include "dawn/native/RenderPipeline.h"
#include "dawn/native/ValidationUtils_autogen.h"
-#include <math.h>
-#include <cstring>
-
namespace dawn::native {
- RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly)
- : ProgrammableEncoder(device, label, encodingContext),
- mIndirectDrawMetadata(device->GetLimits()),
- mAttachmentState(std::move(attachmentState)),
- mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
- mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
- mDepthReadOnly = depthReadOnly;
- mStencilReadOnly = stencilReadOnly;
- }
-
- RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ProgrammableEncoder(device, encodingContext, errorTag),
- mIndirectDrawMetadata(device->GetLimits()),
- mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
- mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
- }
-
- void RenderEncoderBase::DestroyImpl() {
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
- ASSERT(!IsError());
- ASSERT(mAttachmentState != nullptr);
- return mAttachmentState.Get();
- }
-
- bool RenderEncoderBase::IsDepthReadOnly() const {
- ASSERT(!IsError());
- return mDepthReadOnly;
- }
-
- bool RenderEncoderBase::IsStencilReadOnly() const {
- ASSERT(!IsError());
- return mStencilReadOnly;
- }
-
- Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
- return std::move(mAttachmentState);
- }
-
- void RenderEncoderBase::APIDraw(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
- "First instance (%u) must be zero.", firstInstance);
-
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
- firstVertex));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
- instanceCount, firstInstance));
- }
-
- DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
- draw->vertexCount = vertexCount;
- draw->instanceCount = instanceCount;
- draw->firstVertex = firstVertex;
- draw->firstInstance = firstInstance;
-
- return {};
- },
- "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
- firstInstance);
- }
-
- void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
- "First instance (%u) must be zero.", firstInstance);
-
- DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
- "Base vertex (%u) must be zero.", baseVertex);
-
- DAWN_TRY(
- mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
-
- // Although we don't know actual vertex access range in CPU, we still call the
- // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
- // mode vertex buffer with an array stride of zero.
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
- instanceCount, firstInstance));
- }
-
- DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
- draw->indexCount = indexCount;
- draw->instanceCount = instanceCount;
- draw->firstIndex = firstIndex;
- draw->baseVertex = baseVertex;
- draw->firstInstance = firstInstance;
-
- return {};
- },
- "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount,
- firstIndex, baseVertex, firstInstance);
- }
-
- void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
- "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
- indirectOffset, indirectBuffer, indirectBuffer->GetSize());
- }
-
- DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly)
+ : ProgrammableEncoder(device, label, encodingContext),
+ mIndirectDrawMetadata(device->GetLimits()),
+ mAttachmentState(std::move(attachmentState)),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+ mDepthReadOnly = depthReadOnly;
+ mStencilReadOnly = stencilReadOnly;
+}
+
+RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ProgrammableEncoder(device, encodingContext, errorTag),
+ mIndirectDrawMetadata(device->GetLimits()),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {}
+
+void RenderEncoderBase::DestroyImpl() {
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+}
+
+const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+ ASSERT(mAttachmentState != nullptr);
+ return mAttachmentState.Get();
+}
+
+bool RenderEncoderBase::IsDepthReadOnly() const {
+ ASSERT(!IsError());
+ return mDepthReadOnly;
+}
+
+bool RenderEncoderBase::IsStencilReadOnly() const {
+ ASSERT(!IsError());
+ return mStencilReadOnly;
+}
+
+Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
+ return std::move(mAttachmentState);
+}
+
+void RenderEncoderBase::APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
+ firstVertex));
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
+ firstInstance));
+ }
+
+ DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
+ draw->vertexCount = vertexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstVertex = firstVertex;
+ draw->firstInstance = firstInstance;
+
+ return {};
+ },
+ "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
+ firstInstance);
+}
+
+void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
+ "Base vertex (%u) must be zero.", baseVertex);
+
+ DAWN_TRY(mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
+
+ // DrawIndexed only validate instance step mode vertex buffer
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
+ firstInstance));
+ }
+
+ DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
+ draw->indexCount = indexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstIndex = firstIndex;
+ draw->baseVertex = baseVertex;
+ draw->firstInstance = firstInstance;
+
+ return {};
+ },
+ "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount, firstIndex,
+ baseVertex, firstInstance);
+}
+
+void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+ }
+
+ DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+
+ bool duplicateBaseVertexInstance =
+ GetDevice()->ShouldDuplicateParametersForDrawIndirect(
+ mCommandBufferState.GetRenderPipeline());
+ if (IsValidationEnabled() || duplicateBaseVertexInstance) {
+ // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+ // buffer which will store the validated or duplicated indirect data. The buffer
+ // and offset will be updated to point to it.
+ // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+ // render pass, while the |cmd| pointer is still valid.
+ cmd->indirectBuffer = nullptr;
+
+ mIndirectDrawMetadata.AddIndirectDraw(indirectBuffer, indirectOffset,
+ duplicateBaseVertexInstance, cmd);
+ } else {
cmd->indirectBuffer = indirectBuffer;
cmd->indirectOffset = indirectOffset;
-
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
- return {};
- },
- "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
- }
-
- void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- (indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
- "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
- indirectOffset, indirectBuffer, indirectBuffer->GetSize());
- }
-
- DrawIndexedIndirectCmd* cmd =
- allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
- if (IsValidationEnabled()) {
- // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
- // buffer which will store the validated indirect data. The buffer and offset
- // will be updated to point to it.
- // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
- // render pass, while the |cmd| pointer is still valid.
- cmd->indirectBuffer = nullptr;
-
- mIndirectDrawMetadata.AddIndexedIndirectDraw(
- mCommandBufferState.GetIndexFormat(),
- mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
- cmd);
+ }
+
+ // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+ // validation, but it will unnecessarily transition to indirectBuffer usage in the
+ // backend.
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+ return {};
+ },
+ "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ (indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+ }
+
+ DrawIndexedIndirectCmd* cmd =
+ allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+
+ bool duplicateBaseVertexInstance =
+ GetDevice()->ShouldDuplicateParametersForDrawIndirect(
+ mCommandBufferState.GetRenderPipeline());
+ if (IsValidationEnabled() || duplicateBaseVertexInstance) {
+ // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+ // buffer which will store the validated or duplicated indirect data. The buffer
+ // and offset will be updated to point to it.
+ // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+ // render pass, while the |cmd| pointer is still valid.
+ cmd->indirectBuffer = nullptr;
+
+ mIndirectDrawMetadata.AddIndexedIndirectDraw(
+ mCommandBufferState.GetIndexFormat(), mCommandBufferState.GetIndexBufferSize(),
+ indirectBuffer, indirectOffset, duplicateBaseVertexInstance, cmd);
+ } else {
+ cmd->indirectBuffer = indirectBuffer;
+ cmd->indirectOffset = indirectOffset;
+ }
+
+ // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+ // validation, but it will unecessarily transition to indirectBuffer usage in the
+ // backend.
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+ return {};
+ },
+ "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+
+ DAWN_INVALID_IF(pipeline->GetAttachmentState() != mAttachmentState.Get(),
+ "Attachment state of %s is not compatible with %s.\n"
+ "%s expects an attachment state of %s.\n"
+ "%s has an attachment state of %s.",
+ pipeline, this, this, mAttachmentState.Get(), pipeline,
+ pipeline->GetAttachmentState());
+
+ DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
+ "%s writes depth while %s's depthReadOnly is true", pipeline, this);
+
+ DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
+ "%s writes stencil while %s's stencilReadOnly is true", pipeline,
+ this);
+ }
+
+ mCommandBufferState.SetRenderPipeline(pipeline);
+
+ SetRenderPipelineCmd* cmd =
+ allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
+ cmd->pipeline = pipeline;
+
+ return {};
+ },
+ "encoding %s.SetPipeline(%s).", this, pipeline);
+}
+
+void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
+
+ DAWN_TRY(ValidateIndexFormat(format));
+
+ DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
+ "Index format must be specified");
+
+ DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
+ "Index buffer offset (%u) is not a multiple of the size (%u) "
+ "of %s.",
+ offset, IndexFormatSize(format), format);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Index buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
} else {
- cmd->indirectBuffer = indirectBuffer;
- cmd->indirectOffset = indirectOffset;
- }
-
- // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
- // validation, but it will unecessarily transition to indirectBuffer usage in the
- // backend.
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
- return {};
- },
- "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
- }
-
- void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
-
- DAWN_INVALID_IF(pipeline->GetAttachmentState() != mAttachmentState.Get(),
- "Attachment state of %s is not compatible with %s.\n"
- "%s expects an attachment state of %s.\n"
- "%s has an attachment state of %s.",
- pipeline, this, this, mAttachmentState.Get(), pipeline,
- pipeline->GetAttachmentState());
-
- DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
- "%s writes depth while %s's depthReadOnly is true", pipeline,
- this);
-
- DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
- "%s writes stencil while %s's stencilReadOnly is true",
- pipeline, this);
+ DAWN_INVALID_IF(size > remainingSize,
+ "Index buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) of "
+ "%s.",
+ offset, size, bufferSize, buffer);
}
-
- mCommandBufferState.SetRenderPipeline(pipeline);
-
- SetRenderPipelineCmd* cmd =
- allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
- cmd->pipeline = pipeline;
-
- return {};
- },
- "encoding %s.SetPipeline(%s).", this, pipeline);
- }
-
- void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
-
- DAWN_TRY(ValidateIndexFormat(format));
-
- DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
- "Index format must be specified");
-
- DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
- "Index buffer offset (%u) is not a multiple of the size (%u) "
- "of %s.",
- offset, IndexFormatSize(format), format);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Index buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
-
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Index buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) of "
- "%s.",
- offset, size, bufferSize, buffer);
- }
- } else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
}
-
- mCommandBufferState.SetIndexBuffer(format, size);
-
- SetIndexBufferCmd* cmd =
- allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
- cmd->buffer = buffer;
- cmd->format = format;
- cmd->offset = offset;
- cmd->size = size;
-
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
-
- return {};
- },
- "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
- }
-
- void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
- BufferBase* buffer,
- uint64_t offset,
- uint64_t size) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
-
- DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
- "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
- kMaxVertexBuffers - 1);
-
- DAWN_INVALID_IF(offset % 4 != 0,
- "Vertex buffer offset (%u) is not a multiple of 4", offset);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
-
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) "
- "of %s.",
- offset, size, bufferSize, buffer);
- }
+ }
+
+ mCommandBufferState.SetIndexBuffer(format, size);
+
+ SetIndexBufferCmd* cmd =
+ allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
+ cmd->buffer = buffer;
+ cmd->format = format;
+ cmd->offset = offset;
+ cmd->size = size;
+
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+
+ return {};
+ },
+ "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
+}
+
+void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
+ BufferBase* buffer,
+ uint64_t offset,
+ uint64_t size) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
+
+ DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
+ "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
+ kMaxVertexBuffers - 1);
+
+ DAWN_INVALID_IF(offset % 4 != 0, "Vertex buffer offset (%u) is not a multiple of 4",
+ offset);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
} else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
+ DAWN_INVALID_IF(size > remainingSize,
+ "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) "
+ "of %s.",
+ offset, size, bufferSize, buffer);
}
-
- mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
-
- SetVertexBufferCmd* cmd =
- allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
- cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
- cmd->buffer = buffer;
- cmd->offset = offset;
- cmd->size = size;
-
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
-
- return {};
- },
- "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
- }
-
- void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
-
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets));
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
}
-
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mUsageTracker.AddBindGroup(group);
-
- return {};
- },
- // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
- // as a string value in the message. This despite the exact same code working as
- // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
- // until the reason for the failure can be determined.
- "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
- dynamicOffsetCount);
- }
+ }
+
+ mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
+
+ SetVertexBufferCmd* cmd =
+ allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+ cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
+ cmd->buffer = buffer;
+ cmd->offset = offset;
+ cmd->size = size;
+
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+
+ return {};
+ },
+ "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
+}
+
+void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(
+ ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+ }
+
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+ mUsageTracker.AddBindGroup(group);
+
+ return {};
+ },
+ // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
+ // as a string value in the message. This despite the exact same code working as
+ // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
+ // until the reason for the failure can be determined.
+ "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
+ dynamicOffsetCount);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h
index 18ac91a2995..0bdcc4d16ab 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h
@@ -24,63 +24,63 @@
namespace dawn::native {
- class RenderEncoderBase : public ProgrammableEncoder {
- public:
- RenderEncoderBase(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly);
-
- void APIDraw(uint32_t vertexCount,
- uint32_t instanceCount = 1,
- uint32_t firstVertex = 0,
- uint32_t firstInstance = 0);
- void APIDrawIndexed(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance);
-
- void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
-
- void APISetPipeline(RenderPipelineBase* pipeline);
-
- void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
- void APISetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size);
-
- void APISetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
-
- const AttachmentState* GetAttachmentState() const;
- bool IsDepthReadOnly() const;
- bool IsStencilReadOnly() const;
- Ref<AttachmentState> AcquireAttachmentState();
-
- protected:
- // Construct an "error" render encoder base.
- RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
-
- void DestroyImpl() override;
-
- CommandBufferStateTracker mCommandBufferState;
- RenderPassResourceUsageTracker mUsageTracker;
- IndirectDrawMetadata mIndirectDrawMetadata;
-
- private:
- Ref<AttachmentState> mAttachmentState;
- const bool mDisableBaseVertex;
- const bool mDisableBaseInstance;
- bool mDepthReadOnly = false;
- bool mStencilReadOnly = false;
- };
+class RenderEncoderBase : public ProgrammableEncoder {
+ public:
+ RenderEncoderBase(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly);
+
+ void APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount = 1,
+ uint32_t firstVertex = 0,
+ uint32_t firstInstance = 0);
+ void APIDrawIndexed(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance);
+
+ void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+
+ void APISetPipeline(RenderPipelineBase* pipeline);
+
+ void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
+ void APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size);
+
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
+
+ const AttachmentState* GetAttachmentState() const;
+ bool IsDepthReadOnly() const;
+ bool IsStencilReadOnly() const;
+ Ref<AttachmentState> AcquireAttachmentState();
+
+ protected:
+ // Construct an "error" render encoder base.
+ RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+
+ void DestroyImpl() override;
+
+ CommandBufferStateTracker mCommandBufferState;
+ RenderPassResourceUsageTracker mUsageTracker;
+ IndirectDrawMetadata mIndirectDrawMetadata;
+
+ private:
+ Ref<AttachmentState> mAttachmentState;
+ const bool mDisableBaseVertex;
+ const bool mDisableBaseInstance;
+ bool mDepthReadOnly = false;
+ bool mStencilReadOnly = false;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp
index 31e740d6890..716ce97bad4 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/RenderPassEncoder.h"
+#include <math.h>
+#include <cstring>
+#include <utility>
+
#include "dawn/common/Constants.h"
#include "dawn/native/Buffer.h"
#include "dawn/native/CommandEncoder.h"
@@ -25,400 +29,388 @@
#include "dawn/native/RenderBundle.h"
#include "dawn/native/RenderPipeline.h"
-#include <math.h>
-#include <cstring>
-
namespace dawn::native {
- namespace {
-
- // Check the query at queryIndex is unavailable, otherwise it cannot be written.
- MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
- uint32_t queryIndex,
- const QueryAvailabilityMap& queryAvailabilityMap) {
- auto it = queryAvailabilityMap.find(querySet);
- DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
- "Query index %u of %s is written to twice in a render pass.",
- queryIndex, querySet);
-
+namespace {
+
+// Check the query at queryIndex is unavailable, otherwise it cannot be written.
+MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
+ uint32_t queryIndex,
+ const QueryAvailabilityMap& queryAvailabilityMap) {
+ auto it = queryAvailabilityMap.find(querySet);
+ DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
+ "Query index %u of %s is written to twice in a render pass.", queryIndex,
+ querySet);
+
+ return {};
+}
+
+} // namespace
+
+// The usage tracker is passed in here, because it is prepopulated with usages from the
+// BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
+// command, then this wouldn't be necessary.
+RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ std::vector<TimestampWrite> timestampWritesAtEnd,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly)
+ : RenderEncoderBase(device,
+ descriptor->label,
+ encodingContext,
+ std::move(attachmentState),
+ depthReadOnly,
+ stencilReadOnly),
+ mCommandEncoder(commandEncoder),
+ mRenderTargetWidth(renderTargetWidth),
+ mRenderTargetHeight(renderTargetHeight),
+ mOcclusionQuerySet(descriptor->occlusionQuerySet),
+ mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+ mUsageTracker = std::move(usageTracker);
+ TrackInDevice();
+}
+
+// static
+Ref<RenderPassEncoder> RenderPassEncoder::Create(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ std::vector<TimestampWrite> timestampWritesAtEnd,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly) {
+ return AcquireRef(new RenderPassEncoder(device, descriptor, commandEncoder, encodingContext,
+ std::move(usageTracker), std::move(attachmentState),
+ std::move(timestampWritesAtEnd), renderTargetWidth,
+ renderTargetHeight, depthReadOnly, stencilReadOnly));
+}
+
+RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
+
+// static
+Ref<RenderPassEncoder> RenderPassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return AcquireRef(
+ new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+}
+
+void RenderPassEncoder::DestroyImpl() {
+ RenderEncoderBase::DestroyImpl();
+ // Ensure that the pass has exited. This is done for passes only since validation requires
+ // they exit before destruction while bundles do not.
+ mEncodingContext->EnsurePassExited(this);
+}
+
+ObjectType RenderPassEncoder::GetType() const {
+ return ObjectType::RenderPassEncoder;
+}
+
+void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Track the query availability with true on render pass for rewrite validation and query
+ // reset on render pass on Vulkan
+ mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
+
+ // Track it again on command encoder for zero-initializing when resolving unused queries.
+ mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+}
+
+void RenderPassEncoder::APIEnd() {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+
+ DAWN_INVALID_IF(
+ mOcclusionQueryActive,
+ "Render pass %s ended with incomplete occlusion query index %u of %s.", this,
+ mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+ }
+
+ EndRenderPassCmd* cmd = allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
+ // The query availability has already been updated at the beginning of render
+ // pass, and no need to do update here.
+ cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
+
+ DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
+ mCommandEncoder.Get(),
+ std::move(mIndirectDrawMetadata)));
return {};
- }
-
- } // namespace
-
- // The usage tracker is passed in here, because it is prepopulated with usages from the
- // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
- // command, then this wouldn't be necessary.
- RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- std::vector<TimestampWrite> timestampWritesAtEnd,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly)
- : RenderEncoderBase(device,
- descriptor->label,
- encodingContext,
- std::move(attachmentState),
- depthReadOnly,
- stencilReadOnly),
- mCommandEncoder(commandEncoder),
- mRenderTargetWidth(renderTargetWidth),
- mRenderTargetHeight(renderTargetHeight),
- mOcclusionQuerySet(descriptor->occlusionQuerySet),
- mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
- mUsageTracker = std::move(usageTracker);
- TrackInDevice();
- }
-
- // static
- Ref<RenderPassEncoder> RenderPassEncoder::Create(
- DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- std::vector<TimestampWrite> timestampWritesAtEnd,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly) {
- return AcquireRef(new RenderPassEncoder(
- device, descriptor, commandEncoder, encodingContext, std::move(usageTracker),
- std::move(attachmentState), std::move(timestampWritesAtEnd), renderTargetWidth,
- renderTargetHeight, depthReadOnly, stencilReadOnly));
- }
-
- RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
- }
-
- // static
- Ref<RenderPassEncoder> RenderPassEncoder::MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext) {
- return AcquireRef(
- new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
- }
-
- void RenderPassEncoder::DestroyImpl() {
- RenderEncoderBase::DestroyImpl();
- // Ensure that the pass has exited. This is done for passes only since validation requires
- // they exit before destruction while bundles do not.
- mEncodingContext->EnsurePassExited(this);
- }
-
- ObjectType RenderPassEncoder::GetType() const {
- return ObjectType::RenderPassEncoder;
- }
-
- void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
- DAWN_ASSERT(querySet != nullptr);
-
- // Track the query availability with true on render pass for rewrite validation and query
- // reset on render pass on Vulkan
- mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
-
- // Track it again on command encoder for zero-initializing when resolving unused queries.
- mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
- }
-
- void RenderPassEncoder::APIEnd() {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
+ },
+ "encoding %s.End().", this);
+}
+
+void RenderPassEncoder::APIEndPass() {
+ GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+ APIEnd();
+}
+
+void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetStencilReferenceCmd* cmd =
+ allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
+ cmd->reference = reference;
- DAWN_INVALID_IF(
- mOcclusionQueryActive,
- "Render pass %s ended with incomplete occlusion query index %u of %s.",
- this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
- }
-
- EndRenderPassCmd* cmd =
- allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
- // The query availability has already been updated at the beginning of render
- // pass, and no need to do update here.
- cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
-
- DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
- mCommandEncoder.Get(),
- std::move(mIndirectDrawMetadata)));
- return {};
- },
- "encoding %s.End().", this);
- }
-
- void RenderPassEncoder::APIEndPass() {
- GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
- APIEnd();
- }
-
- void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- SetStencilReferenceCmd* cmd =
- allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
- cmd->reference = reference;
-
- return {};
- },
- "encoding %s.SetStencilReference(%u).", this, reference);
- }
-
- void RenderPassEncoder::APISetBlendConstant(const Color* color) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- SetBlendConstantCmd* cmd =
- allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
- cmd->color = *color;
-
- return {};
- },
- "encoding %s.SetBlendConstant(%s).", this, color);
- }
-
- void RenderPassEncoder::APISetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
- isnan(maxDepth)),
- "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
- "minDepth: %f, maxDepth: %f) is NaN.",
- x, y, width, height, minDepth, maxDepth);
-
- DAWN_INVALID_IF(
- x < 0 || y < 0 || width < 0 || height < 0,
- "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
- "value.",
- x, y, width, height);
-
- DAWN_INVALID_IF(
- x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
- "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
- "in "
- "the render target dimensions (%u x %u).",
- x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
-
- // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
- DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
- "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
- "minDepth was "
- "greater than maxDepth.",
- minDepth, maxDepth);
- }
-
- SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
- cmd->minDepth = minDepth;
- cmd->maxDepth = maxDepth;
-
- return {};
- },
- "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
- maxDepth);
- }
-
- void RenderPassEncoder::APISetScissorRect(uint32_t x,
- uint32_t y,
- uint32_t width,
- uint32_t height) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- width > mRenderTargetWidth || height > mRenderTargetHeight ||
- x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
- "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
- "the render target dimensions (%u x %u).",
- x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
- }
-
- SetScissorRectCmd* cmd =
- allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
-
- return {};
- },
- "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
- }
-
- void RenderPassEncoder::APIExecuteBundles(uint32_t count,
- RenderBundleBase* const* renderBundles) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- const AttachmentState* attachmentState = GetAttachmentState();
- bool depthReadOnlyInPass = IsDepthReadOnly();
- bool stencilReadOnlyInPass = IsStencilReadOnly();
- for (uint32_t i = 0; i < count; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
-
- DAWN_INVALID_IF(attachmentState != renderBundles[i]->GetAttachmentState(),
- "Attachment state of renderBundles[%i] (%s) is not "
- "compatible with %s.\n"
- "%s expects an attachment state of %s.\n"
- "renderBundles[%i] (%s) has an attachment state of %s.",
- i, renderBundles[i], this, this, attachmentState, i,
- renderBundles[i], renderBundles[i]->GetAttachmentState());
-
- bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
- DAWN_INVALID_IF(
- depthReadOnlyInPass && !depthReadOnlyInBundle,
- "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
- "with DepthReadOnly (%u) of %s.",
- depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass, this);
-
- bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
- DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
- "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
- "compatible with StencilReadOnly (%u) of %s.",
- stencilReadOnlyInBundle, i, renderBundles[i],
- stencilReadOnlyInPass, this);
- }
- }
+ return {};
+ },
+ "encoding %s.SetStencilReference(%u).", this, reference);
+}
+
+void RenderPassEncoder::APISetBlendConstant(const Color* color) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetBlendConstantCmd* cmd =
+ allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
+ cmd->color = *color;
- mCommandBufferState = CommandBufferStateTracker{};
+ return {};
+ },
+ "encoding %s.SetBlendConstant(%s).", this, color);
+}
+
+void RenderPassEncoder::APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF((isnan(x) || isnan(y) || isnan(width) || isnan(height) ||
+ isnan(minDepth) || isnan(maxDepth)),
+ "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
+ "minDepth: %f, maxDepth: %f) is NaN.",
+ x, y, width, height, minDepth, maxDepth);
+
+ DAWN_INVALID_IF(
+ x < 0 || y < 0 || width < 0 || height < 0,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
+ "value.",
+ x, y, width, height);
+
+ DAWN_INVALID_IF(
+ x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
+ "in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+
+ // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
+ DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
+ "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
+ "minDepth was "
+ "greater than maxDepth.",
+ minDepth, maxDepth);
+ }
+
+ SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
+ cmd->minDepth = minDepth;
+ cmd->maxDepth = maxDepth;
- ExecuteBundlesCmd* cmd =
- allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
- cmd->count = count;
+ return {};
+ },
+ "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
+ maxDepth);
+}
+
+void RenderPassEncoder::APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ width > mRenderTargetWidth || height > mRenderTargetHeight ||
+ x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
+ "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+ }
+
+ SetScissorRectCmd* cmd =
+ allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
- Ref<RenderBundleBase>* bundles =
- allocator->AllocateData<Ref<RenderBundleBase>>(count);
+ return {};
+ },
+ "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
+}
+
+void RenderPassEncoder::APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ const AttachmentState* attachmentState = GetAttachmentState();
+ bool depthReadOnlyInPass = IsDepthReadOnly();
+ bool stencilReadOnlyInPass = IsStencilReadOnly();
for (uint32_t i = 0; i < count; ++i) {
- bundles[i] = renderBundles[i];
-
- const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
- for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
- mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
- }
-
- for (uint32_t i = 0; i < usages.textures.size(); ++i) {
- mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
- usages.textureUsages[i]);
- }
-
- if (IsValidationEnabled()) {
- mIndirectDrawMetadata.AddBundle(renderBundles[i]);
- }
+ DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
+
+ DAWN_INVALID_IF(attachmentState != renderBundles[i]->GetAttachmentState(),
+ "Attachment state of renderBundles[%i] (%s) is not "
+ "compatible with %s.\n"
+ "%s expects an attachment state of %s.\n"
+ "renderBundles[%i] (%s) has an attachment state of %s.",
+ i, renderBundles[i], this, this, attachmentState, i,
+ renderBundles[i], renderBundles[i]->GetAttachmentState());
+
+ bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
+ DAWN_INVALID_IF(depthReadOnlyInPass && !depthReadOnlyInBundle,
+ "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
+ "with DepthReadOnly (%u) of %s.",
+ depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass,
+ this);
+
+ bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
+ DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
+ "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
+ "compatible with StencilReadOnly (%u) of %s.",
+ stencilReadOnlyInBundle, i, renderBundles[i],
+ stencilReadOnlyInPass, this);
}
+ }
- return {};
- },
- "encoding %s.ExecuteBundles(%u, ...).", this, count);
- }
-
- void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
- "The occlusionQuerySet in RenderPassDescriptor is not set.");
-
- // The type of querySet has been validated by ValidateRenderPassDescriptor
+ mCommandBufferState = CommandBufferStateTracker{};
- DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
- "Query index (%u) exceeds the number of queries (%u) in %s.",
- queryIndex, mOcclusionQuerySet->GetQueryCount(),
- mOcclusionQuerySet.Get());
+ ExecuteBundlesCmd* cmd =
+ allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
+ cmd->count = count;
- DAWN_INVALID_IF(mOcclusionQueryActive,
- "An occlusion query (%u) in %s is already active.",
- mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+ Ref<RenderBundleBase>* bundles = allocator->AllocateData<Ref<RenderBundleBase>>(count);
+ for (uint32_t i = 0; i < count; ++i) {
+ bundles[i] = renderBundles[i];
- DAWN_TRY_CONTEXT(
- ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()),
- "validating the occlusion query index (%u) in %s", queryIndex,
- mOcclusionQuerySet.Get());
+ const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
+ for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+ mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
}
- // Record the current query index for endOcclusionQuery.
- mCurrentOcclusionQueryIndex = queryIndex;
- mOcclusionQueryActive = true;
-
- BeginOcclusionQueryCmd* cmd =
- allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = queryIndex;
-
- return {};
- },
- "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
- }
+ for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+ mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
+ usages.textureUsages[i]);
+ }
- void RenderPassEncoder::APIEndOcclusionQuery() {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
- DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
+ mIndirectDrawMetadata.AddBundle(renderBundles[i]);
}
+ }
- TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+ return {};
+ },
+ "encoding %s.ExecuteBundles(%u, ...).", this, count);
+}
+
+void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
+ "The occlusionQuerySet in RenderPassDescriptor is not set.");
+
+ // The type of querySet has been validated by ValidateRenderPassDescriptor
+
+ DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.",
+ queryIndex, mOcclusionQuerySet->GetQueryCount(),
+ mOcclusionQuerySet.Get());
+
+ DAWN_INVALID_IF(mOcclusionQueryActive,
+ "An occlusion query (%u) in %s is already active.",
+ mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+
+ DAWN_TRY_CONTEXT(
+ ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the occlusion query index (%u) in %s", queryIndex,
+ mOcclusionQuerySet.Get());
+ }
+
+ // Record the current query index for endOcclusionQuery.
+ mCurrentOcclusionQueryIndex = queryIndex;
+ mOcclusionQueryActive = true;
+
+ BeginOcclusionQueryCmd* cmd =
+ allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = queryIndex;
- mOcclusionQueryActive = false;
+ return {};
+ },
+ "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
+}
- EndOcclusionQueryCmd* cmd =
- allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = mCurrentOcclusionQueryIndex;
+void RenderPassEncoder::APIEndOcclusionQuery() {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
+ }
- return {};
- },
- "encoding %s.EndOcclusionQuery().", this);
- }
+ TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
- void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
- DAWN_TRY_CONTEXT(
- ValidateQueryIndexOverwrite(querySet, queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()),
- "validating the timestamp query index (%u) of %s", queryIndex, querySet);
- }
+ mOcclusionQueryActive = false;
- TrackQueryAvailability(querySet, queryIndex);
+ EndOcclusionQueryCmd* cmd =
+ allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = mCurrentOcclusionQueryIndex;
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ return {};
+ },
+ "encoding %s.EndOcclusionQuery().", this);
+}
+
+void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+ DAWN_TRY_CONTEXT(ValidateQueryIndexOverwrite(
+ querySet, queryIndex, mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the timestamp query index (%u) of %s", queryIndex,
+ querySet);
+ }
+
+ TrackQueryAvailability(querySet, queryIndex);
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
- }
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h
index 757b46b6589..ad4c1300aa5 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h
@@ -15,88 +15,90 @@
#ifndef SRC_DAWN_NATIVE_RENDERPASSENCODER_H_
#define SRC_DAWN_NATIVE_RENDERPASSENCODER_H_
+#include <vector>
+
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/RenderEncoderBase.h"
namespace dawn::native {
- class RenderBundleBase;
-
- class RenderPassEncoder final : public RenderEncoderBase {
- public:
- static Ref<RenderPassEncoder> Create(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- std::vector<TimestampWrite> timestampWritesAtEnd,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly);
- static Ref<RenderPassEncoder> MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext);
-
- ObjectType GetType() const override;
-
- void APIEnd();
- void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
-
- void APISetStencilReference(uint32_t reference);
- void APISetBlendConstant(const Color* color);
- void APISetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth);
- void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
- void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
-
- void APIBeginOcclusionQuery(uint32_t queryIndex);
- void APIEndOcclusionQuery();
-
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- protected:
- RenderPassEncoder(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- std::vector<TimestampWrite> timestampWritesAtEnd,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly);
- RenderPassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
-
- private:
- void DestroyImpl() override;
-
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
-
- // For render and compute passes, the encoding context is borrowed from the command encoder.
- // Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoder> mCommandEncoder;
-
- uint32_t mRenderTargetWidth;
- uint32_t mRenderTargetHeight;
-
- // The resources for occlusion query
- Ref<QuerySetBase> mOcclusionQuerySet;
- uint32_t mCurrentOcclusionQueryIndex = 0;
- bool mOcclusionQueryActive = false;
-
- std::vector<TimestampWrite> mTimestampWritesAtEnd;
- };
+class RenderBundleBase;
+
+class RenderPassEncoder final : public RenderEncoderBase {
+ public:
+ static Ref<RenderPassEncoder> Create(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ std::vector<TimestampWrite> timestampWritesAtEnd,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly);
+ static Ref<RenderPassEncoder> MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
+
+ ObjectType GetType() const override;
+
+ void APIEnd();
+ void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
+
+ void APISetStencilReference(uint32_t reference);
+ void APISetBlendConstant(const Color* color);
+ void APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth);
+ void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+ void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+
+ void APIBeginOcclusionQuery(uint32_t queryIndex);
+ void APIEndOcclusionQuery();
+
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ protected:
+ RenderPassEncoder(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ std::vector<TimestampWrite> timestampWritesAtEnd,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly);
+ RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
+
+ private:
+ void DestroyImpl() override;
+
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+ // For render and compute passes, the encoding context is borrowed from the command encoder.
+ // Keep a reference to the encoder to make sure the context isn't freed.
+ Ref<CommandEncoder> mCommandEncoder;
+
+ uint32_t mRenderTargetWidth;
+ uint32_t mRenderTargetHeight;
+
+ // The resources for occlusion query
+ Ref<QuerySetBase> mOcclusionQuerySet;
+ uint32_t mCurrentOcclusionQueryIndex = 0;
+ bool mOcclusionQueryActive = false;
+
+ std::vector<TimestampWrite> mTimestampWritesAtEnd;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp
index c3410047b51..3ec6bdcd3ec 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/RenderPipeline.h"
+#include <algorithm>
+#include <cmath>
+#include <sstream>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/native/ChainUtils_autogen.h"
#include "dawn/native/Commands.h"
@@ -24,991 +28,968 @@
#include "dawn/native/ValidationUtils_autogen.h"
#include "dawn/native/VertexFormat.h"
-#include <cmath>
-#include <sstream>
-
namespace dawn::native {
- // Helper functions
- namespace {
- MaybeError ValidateVertexAttribute(
- DeviceBase* device,
- const VertexAttribute* attribute,
- const EntryPointMetadata& metadata,
- uint64_t vertexBufferStride,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
- DAWN_TRY(ValidateVertexFormat(attribute->format));
- const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
-
- DAWN_INVALID_IF(
- attribute->shaderLocation >= kMaxVertexAttributes,
- "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
- "(%u).",
- attribute->shaderLocation, kMaxVertexAttributes);
-
- VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
-
- // No underflow is possible because the max vertex format size is smaller than
- // kMaxVertexBufferArrayStride.
- ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
- DAWN_INVALID_IF(
- attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
- "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
- "buffer stride (%u).",
- attribute->offset, attribute->format, formatInfo.byteSize,
- kMaxVertexBufferArrayStride);
-
- // No overflow is possible because the offset is already validated to be less
- // than kMaxVertexBufferArrayStride.
- ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
- DAWN_INVALID_IF(
- vertexBufferStride > 0 &&
- attribute->offset + formatInfo.byteSize > vertexBufferStride,
- "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
- "stride (%u).",
- attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
-
- DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
- "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
- std::min(4u, formatInfo.byteSize));
-
- DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
- formatInfo.baseType != metadata.vertexInputBaseTypes[location],
- "Attribute base type (%s) does not match the "
- "shader's base type (%s) in location (%u).",
- formatInfo.baseType, metadata.vertexInputBaseTypes[location],
- attribute->shaderLocation);
-
- DAWN_INVALID_IF((*attributesSetMask)[location],
- "Attribute shader location (%u) is used more than once.",
- attribute->shaderLocation);
-
- attributesSetMask->set(location);
- return {};
- }
-
- MaybeError ValidateVertexBufferLayout(
- DeviceBase* device,
- const VertexBufferLayout* buffer,
- const EntryPointMetadata& metadata,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
- DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
- DAWN_INVALID_IF(
- buffer->arrayStride > kMaxVertexBufferArrayStride,
- "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
- buffer->arrayStride, kMaxVertexBufferArrayStride);
-
- DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
- "Vertex buffer arrayStride (%u) is not a multiple of 4.",
- buffer->arrayStride);
-
- for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
- buffer->arrayStride, attributesSetMask),
- "validating attributes[%u].", i);
+// Helper functions
+namespace {
+MaybeError ValidateVertexAttribute(
+ DeviceBase* device,
+ const VertexAttribute* attribute,
+ const EntryPointMetadata& metadata,
+ uint64_t vertexBufferStride,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+ DAWN_TRY(ValidateVertexFormat(attribute->format));
+ const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
+
+ DAWN_INVALID_IF(
+ attribute->shaderLocation >= kMaxVertexAttributes,
+ "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
+ "(%u).",
+ attribute->shaderLocation, kMaxVertexAttributes);
+
+ VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
+
+ // No underflow is possible because the max vertex format size is smaller than
+ // kMaxVertexBufferArrayStride.
+ ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
+ DAWN_INVALID_IF(
+ attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
+ "buffer stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize, kMaxVertexBufferArrayStride);
+
+ // No overflow is possible because the offset is already validated to be less
+ // than kMaxVertexBufferArrayStride.
+ ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
+ DAWN_INVALID_IF(
+ vertexBufferStride > 0 && attribute->offset + formatInfo.byteSize > vertexBufferStride,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
+ "stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
+
+ DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
+ "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
+ std::min(4u, formatInfo.byteSize));
+
+ DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
+ formatInfo.baseType != metadata.vertexInputBaseTypes[location],
+ "Attribute base type (%s) does not match the "
+ "shader's base type (%s) in location (%u).",
+ formatInfo.baseType, metadata.vertexInputBaseTypes[location],
+ attribute->shaderLocation);
+
+ DAWN_INVALID_IF((*attributesSetMask)[location],
+ "Attribute shader location (%u) is used more than once.",
+ attribute->shaderLocation);
+
+ attributesSetMask->set(location);
+ return {};
+}
+
+MaybeError ValidateVertexBufferLayout(
+ DeviceBase* device,
+ const VertexBufferLayout* buffer,
+ const EntryPointMetadata& metadata,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+ DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
+ DAWN_INVALID_IF(buffer->arrayStride > kMaxVertexBufferArrayStride,
+ "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
+ buffer->arrayStride, kMaxVertexBufferArrayStride);
+
+ DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
+ "Vertex buffer arrayStride (%u) is not a multiple of 4.", buffer->arrayStride);
+
+ for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
+ buffer->arrayStride, attributesSetMask),
+ "validating attributes[%u].", i);
+ }
+
+ return {};
+}
+
+MaybeError ValidateVertexState(DeviceBase* device,
+ const VertexState* descriptor,
+ const PipelineLayoutBase* layout) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_INVALID_IF(descriptor->bufferCount > kMaxVertexBuffers,
+ "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
+ descriptor->bufferCount, kMaxVertexBuffers);
+
+ DAWN_TRY_CONTEXT(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants,
+ layout, SingleShaderStage::Vertex),
+ "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
+ const EntryPointMetadata& vertexMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
+ uint32_t totalAttributesNum = 0;
+ for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i], vertexMetadata,
+ &attributesSetMask),
+ "validating buffers[%u].", i);
+ totalAttributesNum += descriptor->buffers[i].attributeCount;
+ }
+
+ // Every vertex attribute has a member called shaderLocation, and there are some
+ // requirements for shaderLocation: 1) >=0, 2) values are different across different
+ // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
+ // attribute number never exceed kMaxVertexAttributes.
+ ASSERT(totalAttributesNum <= kMaxVertexAttributes);
+
+ // TODO(dawn:563): Specify which inputs were not used in error message.
+ DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
+ "Pipeline vertex stage uses vertex buffers not in the vertex state");
+
+ return {};
+}
+
+MaybeError ValidatePrimitiveState(const DeviceBase* device, const PrimitiveState* descriptor) {
+ DAWN_TRY(
+ ValidateSingleSType(descriptor->nextInChain, wgpu::SType::PrimitiveDepthClampingState));
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(descriptor->nextInChain, &clampInfo);
+ if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
+ return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
+ }
+ DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
+ DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
+ DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
+ DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+
+ // Pipeline descriptors must have stripIndexFormat == undefined if they are using
+ // non-strip topologies.
+ if (!IsStripPrimitiveTopology(descriptor->topology)) {
+ DAWN_INVALID_IF(descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
+ "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
+ "topology (%s).",
+ descriptor->stripIndexFormat, descriptor->topology);
+ }
+
+ return {};
+}
+
+MaybeError ValidateDepthStencilState(const DeviceBase* device,
+ const DepthStencilState* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Depth stencil format (%s) is not depth-stencil renderable.",
+ descriptor->format);
+
+ DAWN_INVALID_IF(
+ std::isnan(descriptor->depthBiasSlopeScale) || std::isnan(descriptor->depthBiasClamp),
+ "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
+ descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
+
+ DAWN_INVALID_IF(
+ !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
+ descriptor->depthWriteEnabled),
+ "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
+ "not %s or depthWriteEnabled (%u) is true.",
+ descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
+ descriptor->depthWriteEnabled);
+
+ DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
+ "Depth stencil format (%s) doesn't have stencil aspect while stencil "
+ "test or stencil write is enabled.",
+ descriptor->format);
+
+ return {};
+}
+
+MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
+ "Multisample count (%u) is not supported.", descriptor->count);
+
+ DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
+ "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
+ descriptor->count);
+
+ return {};
+}
+
+MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
+ if (blendComponent.operation == wgpu::BlendOperation::Min ||
+ blendComponent.operation == wgpu::BlendOperation::Max) {
+ DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
+ blendComponent.dstFactor != wgpu::BlendFactor::One,
+ "Blend factor is not %s when blend operation is %s.",
+ wgpu::BlendFactor::One, blendComponent.operation);
+ }
+
+ return {};
+}
+
+MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
+ DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
+ DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+ DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
+ DAWN_TRY(ValidateBlendComponent(descriptor->color));
+
+ return {};
+}
+
+bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+ return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+ blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+ blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+}
+
+MaybeError ValidateColorTargetState(
+ DeviceBase* device,
+ const ColorTargetState* descriptor,
+ bool fragmentWritten,
+ const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ if (descriptor->blend) {
+ DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend), "validating blend state.");
+ }
+
+ DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Color format (%s) is not color renderable.", descriptor->format);
+
+ DAWN_INVALID_IF(
+ descriptor->blend &&
+ !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes & SampleTypeBit::Float),
+ "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
+
+ if (fragmentWritten) {
+ DAWN_INVALID_IF(
+ fragmentOutputVariable.baseType != format->GetAspectInfo(Aspect::Color).baseType,
+ "Color format (%s) base type (%s) doesn't match the fragment "
+ "module output type (%s).",
+ descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
+ fragmentOutputVariable.baseType);
+
+ DAWN_INVALID_IF(fragmentOutputVariable.componentCount < format->componentCount,
+ "The fragment stage has fewer output components (%u) than the color format "
+ "(%s) component count (%u).",
+ fragmentOutputVariable.componentCount, descriptor->format,
+ format->componentCount);
+
+ if (descriptor->blend) {
+ if (fragmentOutputVariable.componentCount < 4u) {
+ // No alpha channel output
+ // Make sure there's no alpha involved in the blending operation
+ DAWN_INVALID_IF(BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
+ BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
+ "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
+ "but it is missing from fragment output.",
+ descriptor->blend->color.srcFactor,
+ descriptor->blend->color.dstFactor);
}
-
- return {};
}
-
- MaybeError ValidateVertexState(DeviceBase* device,
- const VertexState* descriptor,
- const PipelineLayoutBase* layout) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_INVALID_IF(
- descriptor->bufferCount > kMaxVertexBuffers,
- "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
- descriptor->bufferCount, kMaxVertexBuffers);
-
+ } else {
+ DAWN_INVALID_IF(
+ descriptor->writeMask != wgpu::ColorWriteMask::None,
+ "Color target has no corresponding fragment stage output but writeMask (%s) is "
+ "not zero.",
+ descriptor->writeMask);
+ }
+
+ return {};
+}
+
+MaybeError ValidateFragmentState(DeviceBase* device,
+ const FragmentState* descriptor,
+ const PipelineLayoutBase* layout) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY_CONTEXT(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants,
+ layout, SingleShaderStage::Fragment),
+ "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
+
+ DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
+ "Number of targets (%u) exceeds the maximum (%u).", descriptor->targetCount,
+ kMaxColorAttachments);
+
+ const EntryPointMetadata& fragmentMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
+ const ColorTargetState* target = &descriptor->targets[static_cast<uint8_t>(i)];
+ if (target->format != wgpu::TextureFormat::Undefined) {
DAWN_TRY_CONTEXT(
- ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- descriptor->constantCount, descriptor->constants, layout,
- SingleShaderStage::Vertex),
- "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
- descriptor->entryPoint);
- const EntryPointMetadata& vertexMetadata =
- descriptor->module->GetEntryPoint(descriptor->entryPoint);
-
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
- uint32_t totalAttributesNum = 0;
- for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
- vertexMetadata, &attributesSetMask),
- "validating buffers[%u].", i);
- totalAttributesNum += descriptor->buffers[i].attributeCount;
- }
-
- // Every vertex attribute has a member called shaderLocation, and there are some
- // requirements for shaderLocation: 1) >=0, 2) values are different across different
- // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
- // attribute number never exceed kMaxVertexAttributes.
- ASSERT(totalAttributesNum <= kMaxVertexAttributes);
-
- // TODO(dawn:563): Specify which inputs were not used in error message.
- DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
- "Pipeline vertex stage uses vertex buffers not in the vertex state");
-
- return {};
+ ValidateColorTargetState(device, target, fragmentMetadata.fragmentOutputsWritten[i],
+ fragmentMetadata.fragmentOutputVariables[i]),
+ "validating targets[%u].", static_cast<uint8_t>(i));
+ } else {
+ DAWN_INVALID_IF(target->blend,
+ "Color target[%u] blend state is set when the format is undefined.",
+ static_cast<uint8_t>(i));
}
+ }
- MaybeError ValidatePrimitiveState(const DeviceBase* device,
- const PrimitiveState* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::PrimitiveDepthClampingState));
- const PrimitiveDepthClampingState* clampInfo = nullptr;
- FindInChain(descriptor->nextInChain, &clampInfo);
- if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
- return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
- }
- DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
- DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
- DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
- DAWN_TRY(ValidateCullMode(descriptor->cullMode));
-
- // Pipeline descriptors must have stripIndexFormat == undefined if they are using
- // non-strip topologies.
- if (!IsStripPrimitiveTopology(descriptor->topology)) {
- DAWN_INVALID_IF(
- descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
- "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
- "topology (%s).",
- descriptor->stripIndexFormat, descriptor->topology);
- }
-
- return {};
- }
+ return {};
+}
- MaybeError ValidateDepthStencilState(const DeviceBase* device,
- const DepthStencilState* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+MaybeError ValidateInterStageMatching(DeviceBase* device,
+ const VertexState& vertexState,
+ const FragmentState& fragmentState) {
+ const EntryPointMetadata& vertexMetadata =
+ vertexState.module->GetEntryPoint(vertexState.entryPoint);
+ const EntryPointMetadata& fragmentMetadata =
+ fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
+
+ // TODO(dawn:563): Can this message give more details?
+ DAWN_INVALID_IF(
+ vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
+ "One or more fragment inputs and vertex outputs are not one-to-one matching");
+
+ // TODO(dawn:802): Validate interpolation types and interpolition sampling types
+ for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
+ const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
+ const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
+ DAWN_INVALID_IF(
+ vertexOutputInfo.baseType != fragmentInputInfo.baseType,
+ "The base type (%s) of the vertex output at location %u is different from the "
+ "base type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
+
+ DAWN_INVALID_IF(vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
+ "The component count (%u) of the vertex output at location %u is different "
+ "from the component count (%u) of the fragment input at location %u.",
+ vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
+ "The interpolation type (%s) of the vertex output at location %u is different "
+ "from the interpolation type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationSampling != fragmentInputInfo.interpolationSampling,
+ "The interpolation sampling (%s) of the vertex output at location %u is "
+ "different from the interpolation sampling (%s) of the fragment input at "
+ "location %u.",
+ vertexOutputInfo.interpolationSampling, i, fragmentInputInfo.interpolationSampling, i);
+ }
+
+ return {};
+}
+} // anonymous namespace
+
+// Helper functions
+size_t IndexFormatSize(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return sizeof(uint16_t);
+ case wgpu::IndexFormat::Uint32:
+ return sizeof(uint32_t);
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
+ primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
+}
+
+MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+ }
+
+ DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
+ "validating vertex state.");
+
+ DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
+ "validating primitive state.");
+
+ if (descriptor->depthStencil) {
+ DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
+ "validating depthStencil state.");
+ }
+
+ DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
+ "validating multisample state.");
+
+ if (descriptor->fragment != nullptr) {
+ DAWN_TRY_CONTEXT(ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
+ "validating fragment state.");
+
+ DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
+ "Must have at least one color or depthStencil target.");
+
+ DAWN_TRY(ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
+ }
+
+ return {};
+}
+
+std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor) {
+ std::vector<StageAndDescriptor> stages;
+ stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
+ descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
+ descriptor->vertex.constants});
+ if (descriptor->fragment != nullptr) {
+ stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
+ descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
+ descriptor->fragment->constants});
+ } else if (device->IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+ // The placeholder fragment shader module should already be initialized
+ DAWN_ASSERT(store->placeholderFragmentShader != nullptr);
+ ShaderModuleBase* placeholderFragmentShader = store->placeholderFragmentShader.Get();
+ stages.push_back(
+ {SingleShaderStage::Fragment, placeholderFragmentShader, "fs_empty_main", 0, nullptr});
+ }
+ return stages;
+}
+
+bool StencilTestEnabled(const DepthStencilState* depthStencil) {
+ return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
+ depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
+ depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
+}
+
+// RenderPipelineBase
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor)
+ : PipelineBase(device,
+ descriptor->layout,
+ descriptor->label,
+ GetRenderStagesAndSetPlaceholderShader(device, descriptor)),
+ mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
+ mVertexBufferCount = descriptor->vertex.bufferCount;
+ const VertexBufferLayout* buffers = descriptor->vertex.buffers;
+ for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
+ // Skip unused slots
+ if (buffers[slot].stepMode == wgpu::VertexStepMode::VertexBufferNotUsed) {
+ continue;
+ }
+
+ VertexBufferSlot typedSlot(slot);
+
+ mVertexBufferSlotsUsed.set(typedSlot);
+ mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
+ mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
+ mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
+ mVertexBufferInfos[typedSlot].lastStride = 0;
+ switch (buffers[slot].stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
+ break;
+ case wgpu::VertexStepMode::Instance:
+ mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
+ break;
+ default:
+ DAWN_UNREACHABLE();
+ }
+
+ for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
+ VertexAttributeLocation location = VertexAttributeLocation(
+ static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
+ mAttributeLocationsUsed.set(location);
+ mAttributeInfos[location].shaderLocation = location;
+ mAttributeInfos[location].vertexBufferSlot = typedSlot;
+ mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
+ mAttributeInfos[location].format = buffers[slot].attributes[i].format;
+ // Compute the access boundary of this attribute by adding attribute format size to
+ // attribute offset. Although offset is in uint64_t, such sum must be no larger than
+ // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
+ // validation of creating render pipeline. Therefore, calculating in uint16_t will
+ // cause no overflow.
+ uint32_t formatByteSize =
+ GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize;
+ DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
+ uint16_t accessBoundary =
+ uint16_t(buffers[slot].attributes[i].offset) + uint16_t(formatByteSize);
+ mVertexBufferInfos[typedSlot].usedBytesInStride =
+ std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
+ mVertexBufferInfos[typedSlot].lastStride =
+ std::max(mVertexBufferInfos[typedSlot].lastStride,
+ mAttributeInfos[location].offset + formatByteSize);
+ }
+ }
+
+ mPrimitive = descriptor->primitive;
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(mPrimitive.nextInChain, &clampInfo);
+ if (clampInfo) {
+ mClampDepth = clampInfo->clampDepth;
+ }
+ mMultisample = descriptor->multisample;
+
+ if (mAttachmentState->HasDepthStencilAttachment()) {
+ mDepthStencil = *descriptor->depthStencil;
+ mWritesDepth = mDepthStencil.depthWriteEnabled;
+ if (mDepthStencil.stencilWriteMask) {
+ if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
+ (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
+ (mPrimitive.cullMode != wgpu::CullMode::Back &&
+ (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
+ mWritesStencil = true;
}
-
- DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
- "Depth stencil format (%s) is not depth-stencil renderable.",
- descriptor->format);
-
- DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
- std::isnan(descriptor->depthBiasClamp),
- "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
- descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
-
- DAWN_INVALID_IF(
- !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
- descriptor->depthWriteEnabled),
- "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
- "not %s or depthWriteEnabled (%u) is true.",
- descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
- descriptor->depthWriteEnabled);
-
- DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
- "Depth stencil format (%s) doesn't have stencil aspect while stencil "
- "test or stencil write is enabled.",
- descriptor->format);
-
- return {};
}
-
- MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
- "Multisample count (%u) is not supported.", descriptor->count);
-
- DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
- "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
- descriptor->count);
-
+ } else {
+ // These default values below are useful for backends to fill information.
+ // The values indicate that depth and stencil test are disabled when backends
+ // set their own depth stencil states/descriptors according to the values in
+ // mDepthStencil.
+ mDepthStencil.format = wgpu::TextureFormat::Undefined;
+ mDepthStencil.depthWriteEnabled = false;
+ mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilReadMask = 0xff;
+ mDepthStencil.stencilWriteMask = 0xff;
+ mDepthStencil.depthBias = 0;
+ mDepthStencil.depthBiasSlopeScale = 0.0f;
+ mDepthStencil.depthBiasClamp = 0.0f;
+ }
+
+ for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+ // Vertex-only render pipeline have no color attachment. For a render pipeline with
+ // color attachments, there must be a valid FragmentState.
+ ASSERT(descriptor->fragment != nullptr);
+ const ColorTargetState* target = &descriptor->fragment->targets[static_cast<uint8_t>(i)];
+ mTargets[i] = *target;
+
+ if (target->blend != nullptr) {
+ mTargetBlend[i] = *target->blend;
+ mTargets[i].blend = &mTargetBlend[i];
+ }
+ }
+
+ SetContentHash(ComputeContentHash());
+ TrackInDevice();
+
+ // Initialize the cache key to include the cache type and device information.
+ mCacheKey.Record(CacheKey::Type::RenderPipeline, device->GetCacheKey());
+}
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
+ TrackInDevice();
+}
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : PipelineBase(device, tag) {}
+
+RenderPipelineBase::~RenderPipelineBase() = default;
+
+void RenderPipelineBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheRenderPipeline(this);
+ }
+
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+}
+
+// static
+RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
+ class ErrorRenderPipeline final : public RenderPipelineBase {
+ public:
+ explicit ErrorRenderPipeline(DeviceBase* device)
+ : RenderPipelineBase(device, ObjectBase::kError) {}
+
+ MaybeError Initialize() override {
+ UNREACHABLE();
return {};
}
+ };
+
+ return new ErrorRenderPipeline(device);
+}
+
+ObjectType RenderPipelineBase::GetType() const {
+ return ObjectType::RenderPipeline;
+}
+
+const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+RenderPipelineBase::GetAttributeLocationsUsed() const {
+ ASSERT(!IsError());
+ return mAttributeLocationsUsed;
+}
+
+const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
+ VertexAttributeLocation location) const {
+ ASSERT(!IsError());
+ ASSERT(mAttributeLocationsUsed[location]);
+ return mAttributeInfos[location];
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsed() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsed;
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsedAsVertexBuffer;
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsedAsInstanceBuffer;
+}
+
+const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
+ ASSERT(!IsError());
+ ASSERT(mVertexBufferSlotsUsed[slot]);
+ return mVertexBufferInfos[slot];
+}
+
+uint32_t RenderPipelineBase::GetVertexBufferCount() const {
+ ASSERT(!IsError());
+ return mVertexBufferCount;
+}
+
+const ColorTargetState* RenderPipelineBase::GetColorTargetState(
+ ColorAttachmentIndex attachmentSlot) const {
+ ASSERT(!IsError());
+ ASSERT(attachmentSlot < mTargets.size());
+ return &mTargets[attachmentSlot];
+}
+
+const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
+ ASSERT(!IsError());
+ return &mDepthStencil;
+}
+
+wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
+ ASSERT(!IsError());
+ return mPrimitive.topology;
+}
+
+wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
+ ASSERT(!IsError());
+ return mPrimitive.stripIndexFormat;
+}
+
+wgpu::CullMode RenderPipelineBase::GetCullMode() const {
+ ASSERT(!IsError());
+ return mPrimitive.cullMode;
+}
+
+wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
+ ASSERT(!IsError());
+ return mPrimitive.frontFace;
+}
+
+bool RenderPipelineBase::IsDepthBiasEnabled() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
+}
+
+int32_t RenderPipelineBase::GetDepthBias() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBias;
+}
+
+float RenderPipelineBase::GetDepthBiasSlopeScale() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBiasSlopeScale;
+}
+
+float RenderPipelineBase::GetDepthBiasClamp() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBiasClamp;
+}
+
+bool RenderPipelineBase::ShouldClampDepth() const {
+ ASSERT(!IsError());
+ return mClampDepth;
+}
+
+ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+RenderPipelineBase::GetColorAttachmentsMask() const {
+ ASSERT(!IsError());
+ return mAttachmentState->GetColorAttachmentsMask();
+}
+
+bool RenderPipelineBase::HasDepthStencilAttachment() const {
+ ASSERT(!IsError());
+ return mAttachmentState->HasDepthStencilAttachment();
+}
+
+wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
+ ColorAttachmentIndex attachment) const {
+ ASSERT(!IsError());
+ return mTargets[attachment].format;
+}
+
+wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
+ ASSERT(!IsError());
+ ASSERT(mAttachmentState->HasDepthStencilAttachment());
+ return mDepthStencil.format;
+}
+
+uint32_t RenderPipelineBase::GetSampleCount() const {
+ ASSERT(!IsError());
+ return mAttachmentState->GetSampleCount();
+}
+
+uint32_t RenderPipelineBase::GetSampleMask() const {
+ ASSERT(!IsError());
+ return mMultisample.mask;
+}
+
+bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
+ ASSERT(!IsError());
+ return mMultisample.alphaToCoverageEnabled;
+}
+
+const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+
+ return mAttachmentState.Get();
+}
+
+bool RenderPipelineBase::WritesDepth() const {
+ ASSERT(!IsError());
+
+ return mWritesDepth;
+}
+
+bool RenderPipelineBase::WritesStencil() const {
+ ASSERT(!IsError());
+
+ return mWritesStencil;
+}
+
+size_t RenderPipelineBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+
+ // Record modules and layout
+ recorder.Record(PipelineBase::ComputeContentHash());
+
+ // Hierarchically record the attachment state.
+ // It contains the attachments set, texture formats, and sample count.
+ recorder.Record(mAttachmentState->GetContentHash());
+
+ // Record attachments
+ for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+ const ColorTargetState& desc = *GetColorTargetState(i);
+ recorder.Record(desc.writeMask);
+ if (desc.blend != nullptr) {
+ recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
+ desc.blend->color.dstFactor);
+ recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
+ desc.blend->alpha.dstFactor);
+ }
+ }
+
+ if (mAttachmentState->HasDepthStencilAttachment()) {
+ const DepthStencilState& desc = mDepthStencil;
+ recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
+ recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
+ recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
+ desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
+ recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
+ desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
+ recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
+ }
+
+ // Record vertex state
+ recorder.Record(mAttributeLocationsUsed);
+ for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
+ const VertexAttributeInfo& desc = GetAttribute(location);
+ recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
+ }
+
+ recorder.Record(mVertexBufferSlotsUsed);
+ for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& desc = GetVertexBuffer(slot);
+ recorder.Record(desc.arrayStride, desc.stepMode);
+ }
- MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
- if (blendComponent.operation == wgpu::BlendOperation::Min ||
- blendComponent.operation == wgpu::BlendOperation::Max) {
- DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
- blendComponent.dstFactor != wgpu::BlendFactor::One,
- "Blend factor is not %s when blend operation is %s.",
- wgpu::BlendFactor::One, blendComponent.operation);
- }
+ // Record primitive state
+ recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
+ mPrimitive.cullMode, mClampDepth);
- return {};
- }
+ // Record multisample state
+ // Sample count hashed as part of the attachment state
+ recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
- MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
- DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
- DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
- DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
- DAWN_TRY(ValidateBlendComponent(descriptor->color));
-
- return {};
- }
+ return recorder.GetContentHash();
+}
- bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
- return blendFactor == wgpu::BlendFactor::SrcAlpha ||
- blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
- blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
- }
+bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
+ const RenderPipelineBase* b) const {
+ // Check the layout and shader stages.
+ if (!PipelineBase::EqualForCache(a, b)) {
+ return false;
+ }
- MaybeError ValidateColorTargetState(
- DeviceBase* device,
- const ColorTargetState* descriptor,
- bool fragmentWritten,
- const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+ // Check the attachment state.
+ // It contains the attachments set, texture formats, and sample count.
+ if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
+ return false;
+ }
- if (descriptor->blend) {
- DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
- "validating blend state.");
+ if (a->mAttachmentState.Get() != nullptr) {
+ for (ColorAttachmentIndex i :
+ IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
+ const ColorTargetState& descA = *a->GetColorTargetState(i);
+ const ColorTargetState& descB = *b->GetColorTargetState(i);
+ if (descA.writeMask != descB.writeMask) {
+ return false;
}
-
- DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
- "Color format (%s) is not color renderable.", descriptor->format);
-
- DAWN_INVALID_IF(
- descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
- SampleTypeBit::Float),
- "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
-
- if (fragmentWritten) {
- DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
- format->GetAspectInfo(Aspect::Color).baseType,
- "Color format (%s) base type (%s) doesn't match the fragment "
- "module output type (%s).",
- descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
- fragmentOutputVariable.baseType);
-
- DAWN_INVALID_IF(
- fragmentOutputVariable.componentCount < format->componentCount,
- "The fragment stage has fewer output components (%u) than the color format "
- "(%s) component count (%u).",
- fragmentOutputVariable.componentCount, descriptor->format,
- format->componentCount);
-
- if (descriptor->blend) {
- if (fragmentOutputVariable.componentCount < 4u) {
- // No alpha channel output
- // Make sure there's no alpha involved in the blending operation
- DAWN_INVALID_IF(
- BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
- BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
- "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
- "but it is missing from fragment output.",
- descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
- }
- }
- } else {
- DAWN_INVALID_IF(
- descriptor->writeMask != wgpu::ColorWriteMask::None,
- "Color target has no corresponding fragment stage output but writeMask (%s) is "
- "not zero.",
- descriptor->writeMask);
+ if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
+ return false;
}
-
- return {};
- }
-
- MaybeError ValidateFragmentState(DeviceBase* device,
- const FragmentState* descriptor,
- const PipelineLayoutBase* layout) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY_CONTEXT(
- ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- descriptor->constantCount, descriptor->constants, layout,
- SingleShaderStage::Fragment),
- "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
- descriptor->entryPoint);
-
- DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
- "Number of targets (%u) exceeds the maximum (%u).",
- descriptor->targetCount, kMaxColorAttachments);
-
- const EntryPointMetadata& fragmentMetadata =
- descriptor->module->GetEntryPoint(descriptor->entryPoint);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
- const ColorTargetState* target = &descriptor->targets[static_cast<uint8_t>(i)];
- if (target->format != wgpu::TextureFormat::Undefined) {
- DAWN_TRY_CONTEXT(ValidateColorTargetState(
- device, target, fragmentMetadata.fragmentOutputsWritten[i],
- fragmentMetadata.fragmentOutputVariables[i]),
- "validating targets[%u].", static_cast<uint8_t>(i));
- } else {
- DAWN_INVALID_IF(
- target->blend,
- "Color target[%u] blend state is set when the format is undefined.",
- static_cast<uint8_t>(i));
- DAWN_INVALID_IF(
- target->writeMask != wgpu::ColorWriteMask::None,
- "Color target[%u] write mask is set to (%s) when the format is undefined.",
- static_cast<uint8_t>(i), target->writeMask);
+ if (descA.blend != nullptr) {
+ if (descA.blend->color.operation != descB.blend->color.operation ||
+ descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
+ descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
+ return false;
+ }
+ if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
+ descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
+ descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
+ return false;
}
}
-
- return {};
- }
-
- MaybeError ValidateInterStageMatching(DeviceBase* device,
- const VertexState& vertexState,
- const FragmentState& fragmentState) {
- const EntryPointMetadata& vertexMetadata =
- vertexState.module->GetEntryPoint(vertexState.entryPoint);
- const EntryPointMetadata& fragmentMetadata =
- fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
-
- // TODO(dawn:563): Can this message give more details?
- DAWN_INVALID_IF(
- vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
- "One or more fragment inputs and vertex outputs are not one-to-one matching");
-
- // TODO(dawn:802): Validate interpolation types and interpolition sampling types
- for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
- const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
- const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
- DAWN_INVALID_IF(
- vertexOutputInfo.baseType != fragmentInputInfo.baseType,
- "The base type (%s) of the vertex output at location %u is different from the "
- "base type (%s) of the fragment input at location %u.",
- vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
- "The component count (%u) of the vertex output at location %u is different "
- "from the component count (%u) of the fragment input at location %u.",
- vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
- "The interpolation type (%s) of the vertex output at location %u is different "
- "from the interpolation type (%s) of the fragment input at location %u.",
- vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.interpolationSampling !=
- fragmentInputInfo.interpolationSampling,
- "The interpolation sampling (%s) of the vertex output at location %u is "
- "different from the interpolation sampling (%s) of the fragment input at "
- "location %u.",
- vertexOutputInfo.interpolationSampling, i,
- fragmentInputInfo.interpolationSampling, i);
- }
-
- return {};
- }
- } // anonymous namespace
-
- // Helper functions
- size_t IndexFormatSize(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return sizeof(uint16_t);
- case wgpu::IndexFormat::Uint32:
- return sizeof(uint32_t);
- case wgpu::IndexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
- primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
- }
-
- MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- if (descriptor->layout != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->layout));
- }
-
- DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
- "validating vertex state.");
-
- DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
- "validating primitive state.");
-
- if (descriptor->depthStencil) {
- DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
- "validating depthStencil state.");
}
- DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
- "validating multisample state.");
-
- if (descriptor->fragment != nullptr) {
- DAWN_TRY_CONTEXT(
- ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
- "validating fragment state.");
-
- DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
- "Must have at least one color or depthStencil target.");
+ // Check depth/stencil state
+ if (a->mAttachmentState->HasDepthStencilAttachment()) {
+ const DepthStencilState& stateA = a->mDepthStencil;
+ const DepthStencilState& stateB = b->mDepthStencil;
- DAWN_TRY(
- ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
- }
-
- return {};
- }
-
- std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
- DeviceBase* device,
- const RenderPipelineDescriptor* descriptor) {
- std::vector<StageAndDescriptor> stages;
- stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
- descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
- descriptor->vertex.constants});
- if (descriptor->fragment != nullptr) {
- stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
- descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
- descriptor->fragment->constants});
- } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
- // The dummy fragment shader module should already be initialized
- DAWN_ASSERT(store->dummyFragmentShader != nullptr);
- ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
- stages.push_back(
- {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
- }
- return stages;
- }
-
- bool StencilTestEnabled(const DepthStencilState* depthStencil) {
- return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
- depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
- depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
- }
-
- // RenderPipelineBase
-
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor)
- : PipelineBase(device,
- descriptor->layout,
- descriptor->label,
- GetRenderStagesAndSetDummyShader(device, descriptor)),
- mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
- mVertexBufferCount = descriptor->vertex.bufferCount;
- const VertexBufferLayout* buffers = descriptor->vertex.buffers;
- for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
- if (buffers[slot].attributeCount == 0) {
- continue;
- }
+ ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateA.depthBiasClamp));
+ ASSERT(!std::isnan(stateB.depthBiasClamp));
- VertexBufferSlot typedSlot(slot);
-
- mVertexBufferSlotsUsed.set(typedSlot);
- mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
- mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
- mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
- mVertexBufferInfos[typedSlot].lastStride = 0;
- switch (buffers[slot].stepMode) {
- case wgpu::VertexStepMode::Vertex:
- mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
- break;
- case wgpu::VertexStepMode::Instance:
- mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
- break;
- default:
- DAWN_UNREACHABLE();
+ if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
+ stateA.depthCompare != stateB.depthCompare ||
+ stateA.depthBias != stateB.depthBias ||
+ stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
+ stateA.depthBiasClamp != stateB.depthBiasClamp) {
+ return false;
}
-
- for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
- VertexAttributeLocation location = VertexAttributeLocation(
- static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
- mAttributeLocationsUsed.set(location);
- mAttributeInfos[location].shaderLocation = location;
- mAttributeInfos[location].vertexBufferSlot = typedSlot;
- mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
- mAttributeInfos[location].format = buffers[slot].attributes[i].format;
- // Compute the access boundary of this attribute by adding attribute format size to
- // attribute offset. Although offset is in uint64_t, such sum must be no larger than
- // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
- // validation of creating render pipeline. Therefore, calculating in uint16_t will
- // cause no overflow.
- uint32_t formatByteSize =
- GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize;
- DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
- uint16_t accessBoundary =
- uint16_t(buffers[slot].attributes[i].offset) + uint16_t(formatByteSize);
- mVertexBufferInfos[typedSlot].usedBytesInStride =
- std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
- mVertexBufferInfos[typedSlot].lastStride =
- std::max(mVertexBufferInfos[typedSlot].lastStride,
- mAttributeInfos[location].offset + formatByteSize);
+ if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
+ stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
+ stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
+ stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
+ return false;
}
- }
-
- mPrimitive = descriptor->primitive;
- const PrimitiveDepthClampingState* clampInfo = nullptr;
- FindInChain(mPrimitive.nextInChain, &clampInfo);
- if (clampInfo) {
- mClampDepth = clampInfo->clampDepth;
- }
- mMultisample = descriptor->multisample;
-
- if (mAttachmentState->HasDepthStencilAttachment()) {
- mDepthStencil = *descriptor->depthStencil;
- mWritesDepth = mDepthStencil.depthWriteEnabled;
- if (mDepthStencil.stencilWriteMask) {
- if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
- (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
- (mPrimitive.cullMode != wgpu::CullMode::Back &&
- (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
- mWritesStencil = true;
- }
+ if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
+ stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
+ stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
+ stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
+ return false;
}
- } else {
- // These default values below are useful for backends to fill information.
- // The values indicate that depth and stencil test are disabled when backends
- // set their own depth stencil states/descriptors according to the values in
- // mDepthStencil.
- mDepthStencil.format = wgpu::TextureFormat::Undefined;
- mDepthStencil.depthWriteEnabled = false;
- mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilReadMask = 0xff;
- mDepthStencil.stencilWriteMask = 0xff;
- mDepthStencil.depthBias = 0;
- mDepthStencil.depthBiasSlopeScale = 0.0f;
- mDepthStencil.depthBiasClamp = 0.0f;
- }
-
- for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- // Vertex-only render pipeline have no color attachment. For a render pipeline with
- // color attachments, there must be a valid FragmentState.
- ASSERT(descriptor->fragment != nullptr);
- const ColorTargetState* target =
- &descriptor->fragment->targets[static_cast<uint8_t>(i)];
- mTargets[i] = *target;
-
- if (target->blend != nullptr) {
- mTargetBlend[i] = *target->blend;
- mTargets[i].blend = &mTargetBlend[i];
+ if (stateA.stencilReadMask != stateB.stencilReadMask ||
+ stateA.stencilWriteMask != stateB.stencilWriteMask) {
+ return false;
}
}
-
- SetContentHash(ComputeContentHash());
- TrackInDevice();
}
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
- TrackInDevice();
+ // Check vertex state
+ if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
+ return false;
}
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : PipelineBase(device, tag) {
- }
-
- RenderPipelineBase::~RenderPipelineBase() = default;
-
- void RenderPipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheRenderPipeline(this);
+ for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
+ const VertexAttributeInfo& descA = a->GetAttribute(loc);
+ const VertexAttributeInfo& descB = b->GetAttribute(loc);
+ if (descA.shaderLocation != descB.shaderLocation ||
+ descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
+ descA.format != descB.format) {
+ return false;
}
-
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- // static
- RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
- class ErrorRenderPipeline final : public RenderPipelineBase {
- public:
- explicit ErrorRenderPipeline(DeviceBase* device)
- : RenderPipelineBase(device, ObjectBase::kError) {
- }
-
- MaybeError Initialize() override {
- UNREACHABLE();
- return {};
- }
- };
-
- return new ErrorRenderPipeline(device);
- }
-
- ObjectType RenderPipelineBase::GetType() const {
- return ObjectType::RenderPipeline;
- }
-
- const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
- RenderPipelineBase::GetAttributeLocationsUsed() const {
- ASSERT(!IsError());
- return mAttributeLocationsUsed;
- }
-
- const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
- VertexAttributeLocation location) const {
- ASSERT(!IsError());
- ASSERT(mAttributeLocationsUsed[location]);
- return mAttributeInfos[location];
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsed() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsed;
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsedAsVertexBuffer;
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsedAsInstanceBuffer;
- }
-
- const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
- ASSERT(!IsError());
- ASSERT(mVertexBufferSlotsUsed[slot]);
- return mVertexBufferInfos[slot];
- }
-
- uint32_t RenderPipelineBase::GetVertexBufferCount() const {
- ASSERT(!IsError());
- return mVertexBufferCount;
- }
-
- const ColorTargetState* RenderPipelineBase::GetColorTargetState(
- ColorAttachmentIndex attachmentSlot) const {
- ASSERT(!IsError());
- ASSERT(attachmentSlot < mTargets.size());
- return &mTargets[attachmentSlot];
- }
-
- const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
- ASSERT(!IsError());
- return &mDepthStencil;
- }
-
- wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
- ASSERT(!IsError());
- return mPrimitive.topology;
- }
-
- wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
- ASSERT(!IsError());
- return mPrimitive.stripIndexFormat;
- }
-
- wgpu::CullMode RenderPipelineBase::GetCullMode() const {
- ASSERT(!IsError());
- return mPrimitive.cullMode;
- }
-
- wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
- ASSERT(!IsError());
- return mPrimitive.frontFace;
- }
-
- bool RenderPipelineBase::IsDepthBiasEnabled() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
- }
-
- int32_t RenderPipelineBase::GetDepthBias() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBias;
}
- float RenderPipelineBase::GetDepthBiasSlopeScale() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBiasSlopeScale;
+ if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+ return false;
}
- float RenderPipelineBase::GetDepthBiasClamp() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBiasClamp;
- }
-
- bool RenderPipelineBase::ShouldClampDepth() const {
- ASSERT(!IsError());
- return mClampDepth;
- }
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
- RenderPipelineBase::GetColorAttachmentsMask() const {
- ASSERT(!IsError());
- return mAttachmentState->GetColorAttachmentsMask();
- }
-
- bool RenderPipelineBase::HasDepthStencilAttachment() const {
- ASSERT(!IsError());
- return mAttachmentState->HasDepthStencilAttachment();
- }
-
- wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
- ColorAttachmentIndex attachment) const {
- ASSERT(!IsError());
- return mTargets[attachment].format;
- }
-
- wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
- ASSERT(!IsError());
- ASSERT(mAttachmentState->HasDepthStencilAttachment());
- return mDepthStencil.format;
- }
-
- uint32_t RenderPipelineBase::GetSampleCount() const {
- ASSERT(!IsError());
- return mAttachmentState->GetSampleCount();
- }
-
- uint32_t RenderPipelineBase::GetSampleMask() const {
- ASSERT(!IsError());
- return mMultisample.mask;
- }
-
- bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
- ASSERT(!IsError());
- return mMultisample.alphaToCoverageEnabled;
- }
-
- const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
- ASSERT(!IsError());
-
- return mAttachmentState.Get();
- }
-
- bool RenderPipelineBase::WritesDepth() const {
- ASSERT(!IsError());
-
- return mWritesDepth;
- }
-
- bool RenderPipelineBase::WritesStencil() const {
- ASSERT(!IsError());
-
- return mWritesStencil;
- }
-
- size_t RenderPipelineBase::ComputeContentHash() {
- ObjectContentHasher recorder;
-
- // Record modules and layout
- recorder.Record(PipelineBase::ComputeContentHash());
-
- // Hierarchically record the attachment state.
- // It contains the attachments set, texture formats, and sample count.
- recorder.Record(mAttachmentState->GetContentHash());
-
- // Record attachments
- for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- const ColorTargetState& desc = *GetColorTargetState(i);
- recorder.Record(desc.writeMask);
- if (desc.blend != nullptr) {
- recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
- desc.blend->color.dstFactor);
- recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
- desc.blend->alpha.dstFactor);
- }
- }
-
- if (mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilState& desc = mDepthStencil;
- recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
- recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
- recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
- desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
- recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
- desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
- recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
- }
-
- // Record vertex state
- recorder.Record(mAttributeLocationsUsed);
- for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
- const VertexAttributeInfo& desc = GetAttribute(location);
- recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
- }
-
- recorder.Record(mVertexBufferSlotsUsed);
- for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
- const VertexBufferInfo& desc = GetVertexBuffer(slot);
- recorder.Record(desc.arrayStride, desc.stepMode);
- }
-
- // Record primitive state
- recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
- mPrimitive.cullMode, mClampDepth);
-
- // Record multisample state
- // Sample count hashed as part of the attachment state
- recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
-
- return recorder.GetContentHash();
- }
-
- bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
- const RenderPipelineBase* b) const {
- // Check the layout and shader stages.
- if (!PipelineBase::EqualForCache(a, b)) {
- return false;
- }
-
- // Check the attachment state.
- // It contains the attachments set, texture formats, and sample count.
- if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
+ for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
+ const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
+ if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
return false;
}
+ }
- if (a->mAttachmentState.Get() != nullptr) {
- for (ColorAttachmentIndex i :
- IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
- const ColorTargetState& descA = *a->GetColorTargetState(i);
- const ColorTargetState& descB = *b->GetColorTargetState(i);
- if (descA.writeMask != descB.writeMask) {
- return false;
- }
- if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
- return false;
- }
- if (descA.blend != nullptr) {
- if (descA.blend->color.operation != descB.blend->color.operation ||
- descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
- descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
- return false;
- }
- if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
- descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
- descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
- return false;
- }
- }
- }
-
- // Check depth/stencil state
- if (a->mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilState& stateA = a->mDepthStencil;
- const DepthStencilState& stateB = b->mDepthStencil;
-
- ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
- ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
- ASSERT(!std::isnan(stateA.depthBiasClamp));
- ASSERT(!std::isnan(stateB.depthBiasClamp));
-
- if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
- stateA.depthCompare != stateB.depthCompare ||
- stateA.depthBias != stateB.depthBias ||
- stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
- stateA.depthBiasClamp != stateB.depthBiasClamp) {
- return false;
- }
- if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
- stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
- stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
- stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
- return false;
- }
- if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
- stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
- stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
- stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
- return false;
- }
- if (stateA.stencilReadMask != stateB.stencilReadMask ||
- stateA.stencilWriteMask != stateB.stencilWriteMask) {
- return false;
- }
- }
- }
-
- // Check vertex state
- if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
+ // Check primitive state
+ {
+ const PrimitiveState& stateA = a->mPrimitive;
+ const PrimitiveState& stateB = b->mPrimitive;
+ if (stateA.topology != stateB.topology ||
+ stateA.stripIndexFormat != stateB.stripIndexFormat ||
+ stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
+ a->mClampDepth != b->mClampDepth) {
return false;
}
+ }
- for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
- const VertexAttributeInfo& descA = a->GetAttribute(loc);
- const VertexAttributeInfo& descB = b->GetAttribute(loc);
- if (descA.shaderLocation != descB.shaderLocation ||
- descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
- descA.format != descB.format) {
- return false;
- }
- }
-
- if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+ // Check multisample state
+ {
+ const MultisampleState& stateA = a->mMultisample;
+ const MultisampleState& stateB = b->mMultisample;
+ // Sample count already checked as part of the attachment state.
+ if (stateA.mask != stateB.mask ||
+ stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
return false;
}
-
- for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
- const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
- const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
- if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
- return false;
- }
- }
-
- // Check primitive state
- {
- const PrimitiveState& stateA = a->mPrimitive;
- const PrimitiveState& stateB = b->mPrimitive;
- if (stateA.topology != stateB.topology ||
- stateA.stripIndexFormat != stateB.stripIndexFormat ||
- stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
- a->mClampDepth != b->mClampDepth) {
- return false;
- }
- }
-
- // Check multisample state
- {
- const MultisampleState& stateA = a->mMultisample;
- const MultisampleState& stateB = b->mMultisample;
- // Sample count already checked as part of the attachment state.
- if (stateA.mask != stateB.mask ||
- stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
- return false;
- }
- }
-
- return true;
}
+ return true;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h
index 9a3c8c51658..f904f8a8931 100644
--- a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h
@@ -15,6 +15,10 @@
#ifndef SRC_DAWN_NATIVE_RENDERPIPELINE_H_
#define SRC_DAWN_NATIVE_RENDERPIPELINE_H_
+#include <array>
+#include <bitset>
+#include <vector>
+
#include "dawn/common/TypedInteger.h"
#include "dawn/native/AttachmentState.h"
#include "dawn/native/Forward.h"
@@ -23,124 +27,120 @@
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-
namespace dawn::native {
- class DeviceBase;
-
- MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor);
-
- std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
- DeviceBase* device,
- const RenderPipelineDescriptor* descriptor);
-
- size_t IndexFormatSize(wgpu::IndexFormat format);
-
- bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
-
- bool StencilTestEnabled(const DepthStencilState* depthStencil);
-
- struct VertexAttributeInfo {
- wgpu::VertexFormat format;
- uint64_t offset;
- VertexAttributeLocation shaderLocation;
- VertexBufferSlot vertexBufferSlot;
+class DeviceBase;
+
+MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor);
+
+std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor);
+
+size_t IndexFormatSize(wgpu::IndexFormat format);
+
+bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
+
+bool StencilTestEnabled(const DepthStencilState* depthStencil);
+
+struct VertexAttributeInfo {
+ wgpu::VertexFormat format;
+ uint64_t offset;
+ VertexAttributeLocation shaderLocation;
+ VertexBufferSlot vertexBufferSlot;
+};
+
+struct VertexBufferInfo {
+ uint64_t arrayStride;
+ wgpu::VertexStepMode stepMode;
+ uint16_t usedBytesInStride;
+ // As indicated in the spec, the lastStride is max(attribute.offset +
+ // sizeof(attribute.format)) for each attribute in the buffer[slot]
+ uint64_t lastStride;
+};
+
+class RenderPipelineBase : public PipelineBase {
+ public:
+ RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+ ~RenderPipelineBase() override;
+
+ static RenderPipelineBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>& GetAttributeLocationsUsed()
+ const;
+ const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ GetVertexBufferSlotsUsedAsVertexBuffer() const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ GetVertexBufferSlotsUsedAsInstanceBuffer() const;
+ const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
+ uint32_t GetVertexBufferCount() const;
+
+ const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
+ const DepthStencilState* GetDepthStencilState() const;
+ wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+ wgpu::IndexFormat GetStripIndexFormat() const;
+ wgpu::CullMode GetCullMode() const;
+ wgpu::FrontFace GetFrontFace() const;
+ bool IsDepthBiasEnabled() const;
+ int32_t GetDepthBias() const;
+ float GetDepthBiasSlopeScale() const;
+ float GetDepthBiasClamp() const;
+ bool ShouldClampDepth() const;
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+ bool HasDepthStencilAttachment() const;
+ wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
+ uint32_t GetSampleCount() const;
+ uint32_t GetSampleMask() const;
+ bool IsAlphaToCoverageEnabled() const;
+ bool WritesDepth() const;
+ bool WritesStencil() const;
+
+ const AttachmentState* GetAttachmentState() const;
+
+ // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
};
- struct VertexBufferInfo {
- uint64_t arrayStride;
- wgpu::VertexStepMode stepMode;
- uint16_t usedBytesInStride;
- // As indicated in the spec, the lastStride is max(attribute.offset +
- // sizeof(attribute.format)) for each attribute in the buffer[slot]
- uint64_t lastStride;
- };
-
- class RenderPipelineBase : public PipelineBase {
- public:
- RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
- ~RenderPipelineBase() override;
-
- static RenderPipelineBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
- GetAttributeLocationsUsed() const;
- const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- GetVertexBufferSlotsUsedAsVertexBuffer() const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- GetVertexBufferSlotsUsedAsInstanceBuffer() const;
- const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
- uint32_t GetVertexBufferCount() const;
-
- const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
- const DepthStencilState* GetDepthStencilState() const;
- wgpu::PrimitiveTopology GetPrimitiveTopology() const;
- wgpu::IndexFormat GetStripIndexFormat() const;
- wgpu::CullMode GetCullMode() const;
- wgpu::FrontFace GetFrontFace() const;
- bool IsDepthBiasEnabled() const;
- int32_t GetDepthBias() const;
- float GetDepthBiasSlopeScale() const;
- float GetDepthBiasClamp() const;
- bool ShouldClampDepth() const;
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
- bool HasDepthStencilAttachment() const;
- wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
- wgpu::TextureFormat GetDepthStencilFormat() const;
- uint32_t GetSampleCount() const;
- uint32_t GetSampleMask() const;
- bool IsAlphaToCoverageEnabled() const;
- bool WritesDepth() const;
- bool WritesStencil() const;
-
- const AttachmentState* GetAttachmentState() const;
-
- // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
- };
-
- protected:
- // Constructor used only for mocking and testing.
- explicit RenderPipelineBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // Vertex state
- uint32_t mVertexBufferCount;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
- ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
- mAttributeInfos;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
- ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
-
- // Attachments
- Ref<AttachmentState> mAttachmentState;
- ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
- ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
-
- // Other state
- PrimitiveState mPrimitive;
- DepthStencilState mDepthStencil;
- MultisampleState mMultisample;
- bool mClampDepth = false;
- bool mWritesDepth = false;
- bool mWritesStencil = false;
- };
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit RenderPipelineBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ private:
+ RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // Vertex state
+ uint32_t mVertexBufferCount;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
+ ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes> mAttributeInfos;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
+ ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
+
+ // Attachments
+ Ref<AttachmentState> mAttachmentState;
+ ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
+ ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
+
+ // Other state
+ PrimitiveState mPrimitive;
+ DepthStencilState mDepthStencil;
+ MultisampleState mMultisample;
+ bool mClampDepth = false;
+ bool mWritesDepth = false;
+ bool mWritesStencil = false;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h b/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h
index b0ea037c5f7..1e5347bd030 100644
--- a/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h
@@ -19,12 +19,12 @@
namespace dawn::native {
- // Wrapper for a resource backed by a heap.
- class ResourceHeapBase {
- public:
- ResourceHeapBase() = default;
- virtual ~ResourceHeapBase() = default;
- };
+// Wrapper for a resource backed by a heap.
+class ResourceHeapBase {
+ public:
+ ResourceHeapBase() = default;
+ virtual ~ResourceHeapBase() = default;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h b/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h
index 42b922904ca..1d77f7e1775 100644
--- a/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h
@@ -15,22 +15,22 @@
#ifndef SRC_DAWN_NATIVE_RESOURCEHEAPALLOCATOR_H_
#define SRC_DAWN_NATIVE_RESOURCEHEAPALLOCATOR_H_
+#include <memory>
+
#include "dawn/native/Error.h"
#include "dawn/native/ResourceHeap.h"
-#include <memory>
-
namespace dawn::native {
- // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
- class ResourceHeapAllocator {
- public:
- virtual ~ResourceHeapAllocator() = default;
+// Interface for backend allocators that create memory heaps resoruces can be suballocated in.
+class ResourceHeapAllocator {
+ public:
+ virtual ~ResourceHeapAllocator() = default;
- virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) = 0;
- virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
- };
+ virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) = 0;
+ virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp
index 8848c18a6b9..58a315ef2e8 100644
--- a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp
@@ -17,37 +17,35 @@
namespace dawn::native {
- ResourceMemoryAllocation::ResourceMemoryAllocation()
- : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
- }
-
- ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
- uint64_t offset,
- ResourceHeapBase* resourceHeap,
- uint8_t* mappedPointer)
- : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
- }
-
- ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
- ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
- return mResourceHeap;
- }
-
- uint64_t ResourceMemoryAllocation::GetOffset() const {
- ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
- return mOffset;
- }
-
- AllocationInfo ResourceMemoryAllocation::GetInfo() const {
- return mInfo;
- }
-
- uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
- return mMappedPointer;
- }
-
- void ResourceMemoryAllocation::Invalidate() {
- mResourceHeap = nullptr;
- mInfo = {};
- }
+ResourceMemoryAllocation::ResourceMemoryAllocation()
+ : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {}
+
+ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ResourceHeapBase* resourceHeap,
+ uint8_t* mappedPointer)
+ : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {}
+
+ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+ return mResourceHeap;
+}
+
+uint64_t ResourceMemoryAllocation::GetOffset() const {
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+ return mOffset;
+}
+
+AllocationInfo ResourceMemoryAllocation::GetInfo() const {
+ return mInfo;
+}
+
+uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
+ return mMappedPointer;
+}
+
+void ResourceMemoryAllocation::Invalidate() {
+ mResourceHeap = nullptr;
+ mInfo = {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h
index 5fea0f11317..7a05d109832 100644
--- a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h
@@ -19,62 +19,61 @@
namespace dawn::native {
- class ResourceHeapBase;
-
- // Allocation method determines how memory was sub-divided.
- // Used by the device to get the allocator that was responsible for the allocation.
- enum class AllocationMethod {
-
- // Memory not sub-divided.
- kDirect,
-
- // Memory sub-divided using one or more blocks of various sizes.
- kSubAllocated,
-
- // Memory was allocated outside of Dawn.
- kExternal,
-
- // Memory not allocated or freed.
- kInvalid
- };
-
- // Metadata that describes how the allocation was allocated.
- struct AllocationInfo {
- // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
- // The block offset is within the entire allocator memory range and only required by the
- // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
- // allocation offset is always local to the memory.
- uint64_t mBlockOffset = 0;
-
- AllocationMethod mMethod = AllocationMethod::kInvalid;
- };
-
- // Handle into a resource heap pool.
- class ResourceMemoryAllocation {
- public:
- ResourceMemoryAllocation();
- ResourceMemoryAllocation(const AllocationInfo& info,
- uint64_t offset,
- ResourceHeapBase* resourceHeap,
- uint8_t* mappedPointer = nullptr);
- virtual ~ResourceMemoryAllocation() = default;
-
- ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
- ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
-
- ResourceHeapBase* GetResourceHeap() const;
- uint64_t GetOffset() const;
- uint8_t* GetMappedPointer() const;
- AllocationInfo GetInfo() const;
-
- virtual void Invalidate();
-
- private:
- AllocationInfo mInfo;
- uint64_t mOffset;
- ResourceHeapBase* mResourceHeap;
- uint8_t* mMappedPointer;
- };
+class ResourceHeapBase;
+
+// Allocation method determines how memory was sub-divided.
+// Used by the device to get the allocator that was responsible for the allocation.
+enum class AllocationMethod {
+ // Memory not sub-divided.
+ kDirect,
+
+ // Memory sub-divided using one or more blocks of various sizes.
+ kSubAllocated,
+
+ // Memory was allocated outside of Dawn.
+ kExternal,
+
+ // Memory not allocated or freed.
+ kInvalid
+};
+
+// Metadata that describes how the allocation was allocated.
+struct AllocationInfo {
+ // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
+ // The block offset is within the entire allocator memory range and only required by the
+ // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
+ // allocation offset is always local to the memory.
+ uint64_t mBlockOffset = 0;
+
+ AllocationMethod mMethod = AllocationMethod::kInvalid;
+};
+
+// Handle into a resource heap pool.
+class ResourceMemoryAllocation {
+ public:
+ ResourceMemoryAllocation();
+ ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ResourceHeapBase* resourceHeap,
+ uint8_t* mappedPointer = nullptr);
+ virtual ~ResourceMemoryAllocation() = default;
+
+ ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
+ ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
+
+ ResourceHeapBase* GetResourceHeap() const;
+ uint64_t GetOffset() const;
+ uint8_t* GetMappedPointer() const;
+ AllocationInfo GetInfo() const;
+
+ virtual void Invalidate();
+
+ private:
+ AllocationInfo mInfo;
+ uint64_t mOffset;
+ ResourceHeapBase* mResourceHeap;
+ uint8_t* mMappedPointer;
+};
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp
index e1dc7aeb67a..01e23e7c15a 100644
--- a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/RingBufferAllocator.h"
+#include <utility>
+
// Note: Current RingBufferAllocator implementation uses two indices (start and end) to implement a
// circular queue. However, this approach defines a full queue when one element is still unused.
//
@@ -28,94 +30,101 @@
// used bytes.
namespace dawn::native {
- RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
- }
+RingBufferAllocator::RingBufferAllocator() = default;
- void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
- // Reclaim memory from previously recorded blocks.
- for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
- mUsedStartOffset = request.endOffset;
- mUsedSize -= request.size;
- }
+RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {}
- // Dequeue previously recorded requests.
- mInflightRequests.ClearUpTo(lastCompletedSerial);
- }
+RingBufferAllocator::RingBufferAllocator(const RingBufferAllocator&) = default;
- uint64_t RingBufferAllocator::GetSize() const {
- return mMaxBlockSize;
- }
+RingBufferAllocator::~RingBufferAllocator() = default;
+
+RingBufferAllocator& RingBufferAllocator::operator=(const RingBufferAllocator&) = default;
- uint64_t RingBufferAllocator::GetUsedSize() const {
- return mUsedSize;
+void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
+ // Reclaim memory from previously recorded blocks.
+ for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
+ mUsedStartOffset = request.endOffset;
+ mUsedSize -= request.size;
}
- bool RingBufferAllocator::Empty() const {
- return mInflightRequests.Empty();
+ // Dequeue previously recorded requests.
+ mInflightRequests.ClearUpTo(lastCompletedSerial);
+}
+
+uint64_t RingBufferAllocator::GetSize() const {
+ return mMaxBlockSize;
+}
+
+uint64_t RingBufferAllocator::GetUsedSize() const {
+ return mUsedSize;
+}
+
+bool RingBufferAllocator::Empty() const {
+ return mInflightRequests.Empty();
+}
+
+// Sub-allocate the ring-buffer by requesting a chunk of the specified size.
+// This is a serial-based resource scheme, the life-span of resources (and the allocations) get
+// tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
+// completed up to a given serial. Each sub-allocation request is tracked in the serial offset
+// queue, which identifies an existing (or new) frames-worth of resources. Internally, the
+// ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
+// in FIFO order as older frames would free resources before newer ones.
+uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
+ // Check if the buffer is full by comparing the used size.
+ // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
+ // subsequent sub-alloc could fail where the used size was previously adjusted to include
+ // the wasted.
+ if (mUsedSize >= mMaxBlockSize) {
+ return kInvalidOffset;
}
- // Sub-allocate the ring-buffer by requesting a chunk of the specified size.
- // This is a serial-based resource scheme, the life-span of resources (and the allocations) get
- // tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
- // completed up to a given serial. Each sub-allocation request is tracked in the serial offset
- // queue, which identifies an existing (or new) frames-worth of resources. Internally, the
- // ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
- // in FIFO order as older frames would free resources before newer ones.
- uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
- // Check if the buffer is full by comparing the used size.
- // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
- // subsequent sub-alloc could fail where the used size was previously adjusted to include
- // the wasted.
- if (mUsedSize >= mMaxBlockSize) {
- return kInvalidOffset;
- }
+ // Ensure adding allocationSize does not overflow.
+ const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
+ if (allocationSize > remainingSize) {
+ return kInvalidOffset;
+ }
- // Ensure adding allocationSize does not overflow.
- const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
- if (allocationSize > remainingSize) {
- return kInvalidOffset;
- }
+ uint64_t startOffset = kInvalidOffset;
- uint64_t startOffset = kInvalidOffset;
-
- // Check if the buffer is NOT split (i.e sub-alloc on ends)
- if (mUsedStartOffset <= mUsedEndOffset) {
- // Order is important (try to sub-alloc at end first).
- // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
- // wrapped).
- if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
- startOffset = mUsedEndOffset;
- mUsedEndOffset += allocationSize;
- mUsedSize += allocationSize;
- mCurrentRequestSize += allocationSize;
- } else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
- // Count the space at the end so that a subsequent
- // sub-alloc cannot not succeed when the buffer is full.
- const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
-
- startOffset = 0;
- mUsedEndOffset = allocationSize;
- mUsedSize += requestSize;
- mCurrentRequestSize += requestSize;
- }
- } else if (mUsedEndOffset + allocationSize <=
- mUsedStartOffset) { // Otherwise, buffer is split where sub-alloc must be
- // in-between.
+ // Check if the buffer is NOT split (i.e sub-alloc on ends)
+ if (mUsedStartOffset <= mUsedEndOffset) {
+ // Order is important (try to sub-alloc at end first).
+ // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
+ // wrapped).
+ if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
startOffset = mUsedEndOffset;
mUsedEndOffset += allocationSize;
mUsedSize += allocationSize;
mCurrentRequestSize += allocationSize;
+ } else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
+ // Count the space at the end so that a subsequent
+ // sub-alloc cannot not succeed when the buffer is full.
+ const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
+
+ startOffset = 0;
+ mUsedEndOffset = allocationSize;
+ mUsedSize += requestSize;
+ mCurrentRequestSize += requestSize;
}
+ } else if (mUsedEndOffset + allocationSize <=
+ mUsedStartOffset) { // Otherwise, buffer is split where sub-alloc must be
+ // in-between.
+ startOffset = mUsedEndOffset;
+ mUsedEndOffset += allocationSize;
+ mUsedSize += allocationSize;
+ mCurrentRequestSize += allocationSize;
+ }
- if (startOffset != kInvalidOffset) {
- Request request;
- request.endOffset = mUsedEndOffset;
- request.size = mCurrentRequestSize;
-
- mInflightRequests.Enqueue(std::move(request), serial);
- mCurrentRequestSize = 0; // reset
- }
+ if (startOffset != kInvalidOffset) {
+ Request request;
+ request.endOffset = mUsedEndOffset;
+ request.size = mCurrentRequestSize;
- return startOffset;
+ mInflightRequests.Enqueue(std::move(request), serial);
+ mCurrentRequestSize = 0; // reset
}
+
+ return startOffset;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h
index 27f7622449b..6aeb1427be6 100644
--- a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h
@@ -15,49 +15,49 @@
#ifndef SRC_DAWN_NATIVE_RINGBUFFERALLOCATOR_H_
#define SRC_DAWN_NATIVE_RINGBUFFERALLOCATOR_H_
-#include "dawn/common/SerialQueue.h"
-#include "dawn/native/IntegerTypes.h"
-
#include <limits>
#include <memory>
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/IntegerTypes.h"
+
// RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
namespace dawn::native {
- class RingBufferAllocator {
- public:
- RingBufferAllocator() = default;
- RingBufferAllocator(uint64_t maxSize);
- ~RingBufferAllocator() = default;
- RingBufferAllocator(const RingBufferAllocator&) = default;
- RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
-
- uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
- void Deallocate(ExecutionSerial lastCompletedSerial);
-
- uint64_t GetSize() const;
- bool Empty() const;
- uint64_t GetUsedSize() const;
-
- static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
-
- private:
- struct Request {
- uint64_t endOffset;
- uint64_t size;
- };
-
- SerialQueue<ExecutionSerial, Request>
- mInflightRequests; // Queue of the recorded sub-alloc requests
- // (e.g. frame of resources).
-
- uint64_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
- uint64_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
- uint64_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
- uint64_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
- uint64_t mCurrentRequestSize =
- 0; // Size of the sub-alloc requests (in bytes) of the current serial.
+class RingBufferAllocator {
+ public:
+ RingBufferAllocator();
+ explicit RingBufferAllocator(uint64_t maxSize);
+ RingBufferAllocator(const RingBufferAllocator&);
+ ~RingBufferAllocator();
+
+ RingBufferAllocator& operator=(const RingBufferAllocator&);
+
+ uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
+ void Deallocate(ExecutionSerial lastCompletedSerial);
+
+ uint64_t GetSize() const;
+ bool Empty() const;
+ uint64_t GetUsedSize() const;
+
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+ private:
+ struct Request {
+ uint64_t endOffset;
+ uint64_t size;
};
+
+ SerialQueue<ExecutionSerial, Request> mInflightRequests; // Queue of the recorded sub-alloc
+ // requests (e.g. frame of resources).
+
+ uint64_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
+ uint64_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
+ uint64_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
+ uint64_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
+ uint64_t mCurrentRequestSize =
+ 0; // Size of the sub-alloc requests (in bytes) of the current serial.
+};
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Sampler.cpp b/chromium/third_party/dawn/src/dawn/native/Sampler.cpp
index ffd8a724cb0..7e436fc1d74 100644
--- a/chromium/third_party/dawn/src/dawn/native/Sampler.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Sampler.cpp
@@ -14,140 +14,137 @@
#include "dawn/native/Sampler.h"
+#include <cmath>
+
#include "dawn/native/Device.h"
#include "dawn/native/ObjectContentHasher.h"
#include "dawn/native/ValidationUtils_autogen.h"
-#include <cmath>
-
namespace dawn::native {
- MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
- "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
- descriptor->lodMaxClamp);
-
- DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
- "LOD clamp bounds [%f, %f] contain contain a negative number.",
- descriptor->lodMinClamp, descriptor->lodMaxClamp);
-
- DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
- "LOD min clamp (%f) is larger than the max clamp (%f).",
- descriptor->lodMinClamp, descriptor->lodMaxClamp);
-
- if (descriptor->maxAnisotropy > 1) {
- DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
- descriptor->magFilter != wgpu::FilterMode::Linear ||
- descriptor->mipmapFilter != wgpu::FilterMode::Linear,
- "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
- "while using anisotropic filter (maxAnisotropy is %f)",
- descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
- wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
- } else if (descriptor->maxAnisotropy == 0u) {
- return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
- descriptor->maxAnisotropy);
- }
-
- DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
- DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
- DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
-
- // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
- // SamplerDescriptor where it is a special value that means the sampler is not a
- // comparison-sampler.
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- DAWN_TRY(ValidateCompareFunction(descriptor->compare));
- }
-
- return {};
+MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
+ "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
+ descriptor->lodMaxClamp);
+
+ DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
+ "LOD clamp bounds [%f, %f] contain contain a negative number.",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+ DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
+ "LOD min clamp (%f) is larger than the max clamp (%f).",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+ if (descriptor->maxAnisotropy > 1) {
+ DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
+ descriptor->magFilter != wgpu::FilterMode::Linear ||
+ descriptor->mipmapFilter != wgpu::FilterMode::Linear,
+ "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
+ "while using anisotropic filter (maxAnisotropy is %f)",
+ descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
+ wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
+ } else if (descriptor->maxAnisotropy == 0u) {
+ return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
+ descriptor->maxAnisotropy);
}
- // SamplerBase
-
- SamplerBase::SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label),
- mAddressModeU(descriptor->addressModeU),
- mAddressModeV(descriptor->addressModeV),
- mAddressModeW(descriptor->addressModeW),
- mMagFilter(descriptor->magFilter),
- mMinFilter(descriptor->minFilter),
- mMipmapFilter(descriptor->mipmapFilter),
- mLodMinClamp(descriptor->lodMinClamp),
- mLodMaxClamp(descriptor->lodMaxClamp),
- mCompareFunction(descriptor->compare),
- mMaxAnisotropy(descriptor->maxAnisotropy) {
+ DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
+ DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
+ DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
+
+ // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
+ // SamplerDescriptor where it is a special value that means the sampler is not a
+ // comparison-sampler.
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ DAWN_TRY(ValidateCompareFunction(descriptor->compare));
}
- SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
+ return {};
+}
+
+// SamplerBase
+
+SamplerBase::SamplerBase(DeviceBase* device,
+ const SamplerDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label),
+ mAddressModeU(descriptor->addressModeU),
+ mAddressModeV(descriptor->addressModeV),
+ mAddressModeW(descriptor->addressModeW),
+ mMagFilter(descriptor->magFilter),
+ mMinFilter(descriptor->minFilter),
+ mMipmapFilter(descriptor->mipmapFilter),
+ mLodMinClamp(descriptor->lodMinClamp),
+ mLodMaxClamp(descriptor->lodMaxClamp),
+ mCompareFunction(descriptor->compare),
+ mMaxAnisotropy(descriptor->maxAnisotropy) {}
+
+SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+}
+
+SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
+
+SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
+
+SamplerBase::~SamplerBase() = default;
+
+void SamplerBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheSampler(this);
}
-
- SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
+}
+
+// static
+SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
+ return new SamplerBase(device, ObjectBase::kError);
+}
+
+ObjectType SamplerBase::GetType() const {
+ return ObjectType::Sampler;
+}
+
+bool SamplerBase::IsComparison() const {
+ return mCompareFunction != wgpu::CompareFunction::Undefined;
+}
+
+bool SamplerBase::IsFiltering() const {
+ return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
+ mMipmapFilter == wgpu::FilterMode::Linear;
+}
+
+size_t SamplerBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
+ mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction, mMaxAnisotropy);
+ return recorder.GetContentHash();
+}
+
+bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
+ if (a == b) {
+ return true;
}
- SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- SamplerBase::~SamplerBase() = default;
-
- void SamplerBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheSampler(this);
- }
- }
-
- // static
- SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
- return new SamplerBase(device, ObjectBase::kError);
- }
-
- ObjectType SamplerBase::GetType() const {
- return ObjectType::Sampler;
- }
-
- bool SamplerBase::IsComparison() const {
- return mCompareFunction != wgpu::CompareFunction::Undefined;
- }
-
- bool SamplerBase::IsFiltering() const {
- return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
- mMipmapFilter == wgpu::FilterMode::Linear;
- }
-
- size_t SamplerBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
- mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction,
- mMaxAnisotropy);
- return recorder.GetContentHash();
- }
-
- bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
- if (a == b) {
- return true;
- }
-
- ASSERT(!std::isnan(a->mLodMinClamp));
- ASSERT(!std::isnan(b->mLodMinClamp));
- ASSERT(!std::isnan(a->mLodMaxClamp));
- ASSERT(!std::isnan(b->mLodMaxClamp));
-
- return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
- a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
- a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
- a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
- a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
- }
+ ASSERT(!std::isnan(a->mLodMinClamp));
+ ASSERT(!std::isnan(b->mLodMinClamp));
+ ASSERT(!std::isnan(a->mLodMaxClamp));
+ ASSERT(!std::isnan(b->mLodMaxClamp));
+
+ return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
+ a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
+ a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
+ a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
+ a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Sampler.h b/chromium/third_party/dawn/src/dawn/native/Sampler.h
index 73391a1c6d5..eac3446b78e 100644
--- a/chromium/third_party/dawn/src/dawn/native/Sampler.h
+++ b/chromium/third_party/dawn/src/dawn/native/Sampler.h
@@ -24,57 +24,55 @@
namespace dawn::native {
- class DeviceBase;
-
- MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
-
- class SamplerBase : public ApiObjectBase, public CachedObject {
- public:
- SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
- ~SamplerBase() override;
-
- static SamplerBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- bool IsComparison() const;
- bool IsFiltering() const;
-
- // Functions necessary for the unordered_set<SamplerBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const SamplerBase* a, const SamplerBase* b) const;
- };
-
- uint16_t GetMaxAnisotropy() const {
- return mMaxAnisotropy;
- }
-
- protected:
- // Constructor used only for mocking and testing.
- explicit SamplerBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
- wgpu::AddressMode mAddressModeU;
- wgpu::AddressMode mAddressModeV;
- wgpu::AddressMode mAddressModeW;
- wgpu::FilterMode mMagFilter;
- wgpu::FilterMode mMinFilter;
- wgpu::FilterMode mMipmapFilter;
- float mLodMinClamp;
- float mLodMaxClamp;
- wgpu::CompareFunction mCompareFunction;
- uint16_t mMaxAnisotropy;
+class DeviceBase;
+
+MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
+
+class SamplerBase : public ApiObjectBase, public CachedObject {
+ public:
+ SamplerBase(DeviceBase* device,
+ const SamplerDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
+ ~SamplerBase() override;
+
+ static SamplerBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ bool IsComparison() const;
+ bool IsFiltering() const;
+
+ // Functions necessary for the unordered_set<SamplerBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const SamplerBase* a, const SamplerBase* b) const;
};
+ uint16_t GetMaxAnisotropy() const { return mMaxAnisotropy; }
+
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit SamplerBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ private:
+ SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
+ wgpu::AddressMode mAddressModeU;
+ wgpu::AddressMode mAddressModeV;
+ wgpu::AddressMode mAddressModeW;
+ wgpu::FilterMode mMagFilter;
+ wgpu::FilterMode mMinFilter;
+ wgpu::FilterMode mMipmapFilter;
+ float mLodMinClamp;
+ float mLodMaxClamp;
+ wgpu::CompareFunction mCompareFunction;
+ uint16_t mMaxAnisotropy;
+};
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_SAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp
index be536836ade..7902555414c 100644
--- a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp
@@ -18,30 +18,29 @@
namespace dawn::native {
- ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
- : mDevice(device), mUsage(usage) {
+ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
+ : mDevice(device), mUsage(usage) {}
+
+ScratchBuffer::~ScratchBuffer() = default;
+
+void ScratchBuffer::Reset() {
+ mBuffer = nullptr;
+}
+
+MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
+ if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
+ BufferDescriptor descriptor;
+ descriptor.size = capacity;
+ descriptor.usage = mUsage;
+ DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
+ mBuffer->SetIsDataInitialized();
}
+ return {};
+}
- ScratchBuffer::~ScratchBuffer() = default;
-
- void ScratchBuffer::Reset() {
- mBuffer = nullptr;
- }
-
- MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
- if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
- BufferDescriptor descriptor;
- descriptor.size = capacity;
- descriptor.usage = mUsage;
- DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
- mBuffer->SetIsDataInitialized();
- }
- return {};
- }
-
- BufferBase* ScratchBuffer::GetBuffer() const {
- ASSERT(mBuffer.Get() != nullptr);
- return mBuffer.Get();
- }
+BufferBase* ScratchBuffer::GetBuffer() const {
+ ASSERT(mBuffer.Get() != nullptr);
+ return mBuffer.Get();
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h
index 45f2d7aadea..4cfd1b92e52 100644
--- a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h
@@ -15,40 +15,40 @@
#ifndef SRC_DAWN_NATIVE_SCRATCHBUFFER_H_
#define SRC_DAWN_NATIVE_SCRATCHBUFFER_H_
+#include <cstdint>
+
#include "dawn/common/RefCounted.h"
#include "dawn/native/Buffer.h"
-#include <cstdint>
-
namespace dawn::native {
- class DeviceBase;
-
- // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
- // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
- // be careful not to exposed uninitialized bytes to client shaders.
- class ScratchBuffer {
- public:
- // Note that this object does not retain a reference to `device`, so `device` MUST outlive
- // this object.
- ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
- ~ScratchBuffer();
-
- // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
- // fresh buffer.
- void Reset();
-
- // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
- // `capacity` bytes of storage.
- MaybeError EnsureCapacity(uint64_t capacity);
-
- BufferBase* GetBuffer() const;
-
- private:
- DeviceBase* const mDevice;
- const wgpu::BufferUsage mUsage;
- Ref<BufferBase> mBuffer;
- };
+class DeviceBase;
+
+// A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
+// commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
+// be careful not to exposed uninitialized bytes to client shaders.
+class ScratchBuffer {
+ public:
+ // Note that this object does not retain a reference to `device`, so `device` MUST outlive
+ // this object.
+ ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
+ ~ScratchBuffer();
+
+ // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
+ // fresh buffer.
+ void Reset();
+
+ // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
+ // `capacity` bytes of storage.
+ MaybeError EnsureCapacity(uint64_t capacity);
+
+ BufferBase* GetBuffer() const;
+
+ private:
+ DeviceBase* const mDevice;
+ const wgpu::BufferUsage mUsage;
+ Ref<BufferBase> mBuffer;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp
index da4959e3823..21edca997fa 100644
--- a/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/ShaderModule.h"
+#include <algorithm>
+#include <sstream>
+
#include "absl/strings/str_format.h"
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/Constants.h"
@@ -28,596 +31,577 @@
#include "dawn/native/RenderPipeline.h"
#include "dawn/native/TintUtils.h"
-#include <tint/tint.h>
-
-#include <sstream>
+#include "tint/tint.h"
namespace dawn::native {
- namespace {
-
- tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return tint::transform::VertexFormat::kUint8x2;
- case wgpu::VertexFormat::Uint8x4:
- return tint::transform::VertexFormat::kUint8x4;
- case wgpu::VertexFormat::Sint8x2:
- return tint::transform::VertexFormat::kSint8x2;
- case wgpu::VertexFormat::Sint8x4:
- return tint::transform::VertexFormat::kSint8x4;
- case wgpu::VertexFormat::Unorm8x2:
- return tint::transform::VertexFormat::kUnorm8x2;
- case wgpu::VertexFormat::Unorm8x4:
- return tint::transform::VertexFormat::kUnorm8x4;
- case wgpu::VertexFormat::Snorm8x2:
- return tint::transform::VertexFormat::kSnorm8x2;
- case wgpu::VertexFormat::Snorm8x4:
- return tint::transform::VertexFormat::kSnorm8x4;
- case wgpu::VertexFormat::Uint16x2:
- return tint::transform::VertexFormat::kUint16x2;
- case wgpu::VertexFormat::Uint16x4:
- return tint::transform::VertexFormat::kUint16x4;
- case wgpu::VertexFormat::Sint16x2:
- return tint::transform::VertexFormat::kSint16x2;
- case wgpu::VertexFormat::Sint16x4:
- return tint::transform::VertexFormat::kSint16x4;
- case wgpu::VertexFormat::Unorm16x2:
- return tint::transform::VertexFormat::kUnorm16x2;
- case wgpu::VertexFormat::Unorm16x4:
- return tint::transform::VertexFormat::kUnorm16x4;
- case wgpu::VertexFormat::Snorm16x2:
- return tint::transform::VertexFormat::kSnorm16x2;
- case wgpu::VertexFormat::Snorm16x4:
- return tint::transform::VertexFormat::kSnorm16x4;
- case wgpu::VertexFormat::Float16x2:
- return tint::transform::VertexFormat::kFloat16x2;
- case wgpu::VertexFormat::Float16x4:
- return tint::transform::VertexFormat::kFloat16x4;
- case wgpu::VertexFormat::Float32:
- return tint::transform::VertexFormat::kFloat32;
- case wgpu::VertexFormat::Float32x2:
- return tint::transform::VertexFormat::kFloat32x2;
- case wgpu::VertexFormat::Float32x3:
- return tint::transform::VertexFormat::kFloat32x3;
- case wgpu::VertexFormat::Float32x4:
- return tint::transform::VertexFormat::kFloat32x4;
- case wgpu::VertexFormat::Uint32:
- return tint::transform::VertexFormat::kUint32;
- case wgpu::VertexFormat::Uint32x2:
- return tint::transform::VertexFormat::kUint32x2;
- case wgpu::VertexFormat::Uint32x3:
- return tint::transform::VertexFormat::kUint32x3;
- case wgpu::VertexFormat::Uint32x4:
- return tint::transform::VertexFormat::kUint32x4;
- case wgpu::VertexFormat::Sint32:
- return tint::transform::VertexFormat::kSint32;
- case wgpu::VertexFormat::Sint32x2:
- return tint::transform::VertexFormat::kSint32x2;
- case wgpu::VertexFormat::Sint32x3:
- return tint::transform::VertexFormat::kSint32x3;
- case wgpu::VertexFormat::Sint32x4:
- return tint::transform::VertexFormat::kSint32x4;
-
- case wgpu::VertexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return tint::transform::VertexStepMode::kVertex;
- case wgpu::VertexStepMode::Instance:
- return tint::transform::VertexStepMode::kInstance;
- }
- UNREACHABLE();
- }
-
- ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
- tint::ast::PipelineStage stage) {
- switch (stage) {
- case tint::ast::PipelineStage::kVertex:
- return SingleShaderStage::Vertex;
- case tint::ast::PipelineStage::kFragment:
- return SingleShaderStage::Fragment;
- case tint::ast::PipelineStage::kCompute:
- return SingleShaderStage::Compute;
- case tint::ast::PipelineStage::kNone:
- break;
- }
- UNREACHABLE();
- }
-
- BindingInfoType TintResourceTypeToBindingInfoType(
- tint::inspector::ResourceBinding::ResourceType type) {
- switch (type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
- return BindingInfoType::Buffer;
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
- return BindingInfoType::Sampler;
- case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
- case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
- return BindingInfoType::Texture;
- case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
- return BindingInfoType::StorageTexture;
- case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
- return BindingInfoType::ExternalTexture;
-
- default:
- UNREACHABLE();
- return BindingInfoType::Buffer;
- }
- }
-
- wgpu::TextureFormat TintImageFormatToTextureFormat(
- tint::inspector::ResourceBinding::TexelFormat format) {
- switch (format) {
- case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
- return wgpu::TextureFormat::R32Uint;
- case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
- return wgpu::TextureFormat::R32Sint;
- case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
- return wgpu::TextureFormat::R32Float;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
- return wgpu::TextureFormat::RGBA8Unorm;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
- return wgpu::TextureFormat::RGBA8Snorm;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
- return wgpu::TextureFormat::RGBA8Uint;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
- return wgpu::TextureFormat::RGBA8Sint;
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
- return wgpu::TextureFormat::RG32Uint;
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
- return wgpu::TextureFormat::RG32Sint;
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
- return wgpu::TextureFormat::RG32Float;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
- return wgpu::TextureFormat::RGBA16Uint;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
- return wgpu::TextureFormat::RGBA16Sint;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
- return wgpu::TextureFormat::RGBA16Float;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
- return wgpu::TextureFormat::RGBA32Uint;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
- return wgpu::TextureFormat::RGBA32Sint;
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
- return wgpu::TextureFormat::RGBA32Float;
- case tint::inspector::ResourceBinding::TexelFormat::kNone:
- return wgpu::TextureFormat::Undefined;
-
- default:
- UNREACHABLE();
- return wgpu::TextureFormat::Undefined;
- }
- }
-
- wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
- tint::inspector::ResourceBinding::TextureDimension dim) {
- switch (dim) {
- case tint::inspector::ResourceBinding::TextureDimension::k1d:
- return wgpu::TextureViewDimension::e1D;
- case tint::inspector::ResourceBinding::TextureDimension::k2d:
- return wgpu::TextureViewDimension::e2D;
- case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
- return wgpu::TextureViewDimension::e2DArray;
- case tint::inspector::ResourceBinding::TextureDimension::k3d:
- return wgpu::TextureViewDimension::e3D;
- case tint::inspector::ResourceBinding::TextureDimension::kCube:
- return wgpu::TextureViewDimension::Cube;
- case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
- return wgpu::TextureViewDimension::CubeArray;
- case tint::inspector::ResourceBinding::TextureDimension::kNone:
- return wgpu::TextureViewDimension::Undefined;
- }
- UNREACHABLE();
- }
-
- SampleTypeBit TintSampledKindToSampleTypeBit(
- tint::inspector::ResourceBinding::SampledKind s) {
- switch (s) {
- case tint::inspector::ResourceBinding::SampledKind::kSInt:
- return SampleTypeBit::Sint;
- case tint::inspector::ResourceBinding::SampledKind::kUInt:
- return SampleTypeBit::Uint;
- case tint::inspector::ResourceBinding::SampledKind::kFloat:
- return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
- case tint::inspector::ResourceBinding::SampledKind::kUnknown:
- return SampleTypeBit::None;
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return wgpu::TextureComponentType::Float;
- case tint::inspector::ComponentType::kSInt:
- return wgpu::TextureComponentType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return wgpu::TextureComponentType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return VertexFormatBaseType::Float;
- case tint::inspector::ComponentType::kSInt:
- return VertexFormatBaseType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return VertexFormatBaseType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
- tint::inspector::ResourceBinding::ResourceType resource_type) {
- switch (resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- return wgpu::BufferBindingType::Uniform;
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- return wgpu::BufferBindingType::Storage;
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
- return wgpu::BufferBindingType::ReadOnlyStorage;
- default:
- return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
- tint::inspector::ResourceBinding::ResourceType resource_type) {
- switch (resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
- return wgpu::StorageTextureAccess::WriteOnly;
- default:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert non-storage texture resource type");
- }
- UNREACHABLE();
- }
-
- ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return InterStageComponentType::Float;
- case tint::inspector::ComponentType::kSInt:
- return InterStageComponentType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return InterStageComponentType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
- tint::inspector::CompositionType type) {
- switch (type) {
- case tint::inspector::CompositionType::kScalar:
- return 1u;
- case tint::inspector::CompositionType::kVec2:
- return 2u;
- case tint::inspector::CompositionType::kVec3:
- return 3u;
- case tint::inspector::CompositionType::kVec4:
- return 4u;
- case tint::inspector::CompositionType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempt to convert 'Unknown' composition type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
- tint::inspector::InterpolationType type) {
- switch (type) {
- case tint::inspector::InterpolationType::kPerspective:
- return InterpolationType::Perspective;
- case tint::inspector::InterpolationType::kLinear:
- return InterpolationType::Linear;
- case tint::inspector::InterpolationType::kFlat:
- return InterpolationType::Flat;
- case tint::inspector::InterpolationType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' interpolation type from Tint");
- }
+namespace {
+
+tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return tint::transform::VertexFormat::kUint8x2;
+ case wgpu::VertexFormat::Uint8x4:
+ return tint::transform::VertexFormat::kUint8x4;
+ case wgpu::VertexFormat::Sint8x2:
+ return tint::transform::VertexFormat::kSint8x2;
+ case wgpu::VertexFormat::Sint8x4:
+ return tint::transform::VertexFormat::kSint8x4;
+ case wgpu::VertexFormat::Unorm8x2:
+ return tint::transform::VertexFormat::kUnorm8x2;
+ case wgpu::VertexFormat::Unorm8x4:
+ return tint::transform::VertexFormat::kUnorm8x4;
+ case wgpu::VertexFormat::Snorm8x2:
+ return tint::transform::VertexFormat::kSnorm8x2;
+ case wgpu::VertexFormat::Snorm8x4:
+ return tint::transform::VertexFormat::kSnorm8x4;
+ case wgpu::VertexFormat::Uint16x2:
+ return tint::transform::VertexFormat::kUint16x2;
+ case wgpu::VertexFormat::Uint16x4:
+ return tint::transform::VertexFormat::kUint16x4;
+ case wgpu::VertexFormat::Sint16x2:
+ return tint::transform::VertexFormat::kSint16x2;
+ case wgpu::VertexFormat::Sint16x4:
+ return tint::transform::VertexFormat::kSint16x4;
+ case wgpu::VertexFormat::Unorm16x2:
+ return tint::transform::VertexFormat::kUnorm16x2;
+ case wgpu::VertexFormat::Unorm16x4:
+ return tint::transform::VertexFormat::kUnorm16x4;
+ case wgpu::VertexFormat::Snorm16x2:
+ return tint::transform::VertexFormat::kSnorm16x2;
+ case wgpu::VertexFormat::Snorm16x4:
+ return tint::transform::VertexFormat::kSnorm16x4;
+ case wgpu::VertexFormat::Float16x2:
+ return tint::transform::VertexFormat::kFloat16x2;
+ case wgpu::VertexFormat::Float16x4:
+ return tint::transform::VertexFormat::kFloat16x4;
+ case wgpu::VertexFormat::Float32:
+ return tint::transform::VertexFormat::kFloat32;
+ case wgpu::VertexFormat::Float32x2:
+ return tint::transform::VertexFormat::kFloat32x2;
+ case wgpu::VertexFormat::Float32x3:
+ return tint::transform::VertexFormat::kFloat32x3;
+ case wgpu::VertexFormat::Float32x4:
+ return tint::transform::VertexFormat::kFloat32x4;
+ case wgpu::VertexFormat::Uint32:
+ return tint::transform::VertexFormat::kUint32;
+ case wgpu::VertexFormat::Uint32x2:
+ return tint::transform::VertexFormat::kUint32x2;
+ case wgpu::VertexFormat::Uint32x3:
+ return tint::transform::VertexFormat::kUint32x3;
+ case wgpu::VertexFormat::Uint32x4:
+ return tint::transform::VertexFormat::kUint32x4;
+ case wgpu::VertexFormat::Sint32:
+ return tint::transform::VertexFormat::kSint32;
+ case wgpu::VertexFormat::Sint32x2:
+ return tint::transform::VertexFormat::kSint32x2;
+ case wgpu::VertexFormat::Sint32x3:
+ return tint::transform::VertexFormat::kSint32x3;
+ case wgpu::VertexFormat::Sint32x4:
+ return tint::transform::VertexFormat::kSint32x4;
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return tint::transform::VertexStepMode::kVertex;
+ case wgpu::VertexStepMode::Instance:
+ return tint::transform::VertexStepMode::kInstance;
+ case wgpu::VertexStepMode::VertexBufferNotUsed:
UNREACHABLE();
- }
-
- ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
- tint::inspector::InterpolationSampling type) {
- switch (type) {
- case tint::inspector::InterpolationSampling::kNone:
- return InterpolationSampling::None;
- case tint::inspector::InterpolationSampling::kCenter:
- return InterpolationSampling::Center;
- case tint::inspector::InterpolationSampling::kCentroid:
- return InterpolationSampling::Centroid;
- case tint::inspector::InterpolationSampling::kSample:
- return InterpolationSampling::Sample;
- case tint::inspector::InterpolationSampling::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' interpolation sampling type from Tint");
- }
+ }
+}
+
+ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(tint::ast::PipelineStage stage) {
+ switch (stage) {
+ case tint::ast::PipelineStage::kVertex:
+ return SingleShaderStage::Vertex;
+ case tint::ast::PipelineStage::kFragment:
+ return SingleShaderStage::Fragment;
+ case tint::ast::PipelineStage::kCompute:
+ return SingleShaderStage::Compute;
+ case tint::ast::PipelineStage::kNone:
+ break;
+ }
+ UNREACHABLE();
+}
+
+BindingInfoType TintResourceTypeToBindingInfoType(
+ tint::inspector::ResourceBinding::ResourceType type) {
+ switch (type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return BindingInfoType::Buffer;
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ return BindingInfoType::Sampler;
+ case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+ return BindingInfoType::Texture;
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return BindingInfoType::StorageTexture;
+ case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+ return BindingInfoType::ExternalTexture;
+
+ default:
UNREACHABLE();
- }
-
- EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
- tint::inspector::OverridableConstant::Type type) {
- switch (type) {
- case tint::inspector::OverridableConstant::Type::kBool:
- return EntryPointMetadata::OverridableConstant::Type::Boolean;
- case tint::inspector::OverridableConstant::Type::kFloat32:
- return EntryPointMetadata::OverridableConstant::Type::Float32;
- case tint::inspector::OverridableConstant::Type::kInt32:
- return EntryPointMetadata::OverridableConstant::Type::Int32;
- case tint::inspector::OverridableConstant::Type::kUint32:
- return EntryPointMetadata::OverridableConstant::Type::Uint32;
- }
+ return BindingInfoType::Buffer;
+ }
+}
+
+wgpu::TextureFormat TintImageFormatToTextureFormat(
+ tint::inspector::ResourceBinding::TexelFormat format) {
+ switch (format) {
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
+ return wgpu::TextureFormat::R32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
+ return wgpu::TextureFormat::R32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
+ return wgpu::TextureFormat::R32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
+ return wgpu::TextureFormat::RGBA8Snorm;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
+ return wgpu::TextureFormat::RGBA8Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
+ return wgpu::TextureFormat::RGBA8Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
+ return wgpu::TextureFormat::RG32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
+ return wgpu::TextureFormat::RG32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
+ return wgpu::TextureFormat::RG32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
+ return wgpu::TextureFormat::RGBA16Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
+ return wgpu::TextureFormat::RGBA16Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
+ return wgpu::TextureFormat::RGBA16Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
+ return wgpu::TextureFormat::RGBA32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
+ return wgpu::TextureFormat::RGBA32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
+ return wgpu::TextureFormat::RGBA32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kNone:
+ return wgpu::TextureFormat::Undefined;
+
+ default:
UNREACHABLE();
- }
+ return wgpu::TextureFormat::Undefined;
+ }
+}
+
+wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
+ tint::inspector::ResourceBinding::TextureDimension dim) {
+ switch (dim) {
+ case tint::inspector::ResourceBinding::TextureDimension::k1d:
+ return wgpu::TextureViewDimension::e1D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2d:
+ return wgpu::TextureViewDimension::e2D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+ return wgpu::TextureViewDimension::e2DArray;
+ case tint::inspector::ResourceBinding::TextureDimension::k3d:
+ return wgpu::TextureViewDimension::e3D;
+ case tint::inspector::ResourceBinding::TextureDimension::kCube:
+ return wgpu::TextureViewDimension::Cube;
+ case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+ return wgpu::TextureViewDimension::CubeArray;
+ case tint::inspector::ResourceBinding::TextureDimension::kNone:
+ return wgpu::TextureViewDimension::Undefined;
+ }
+ UNREACHABLE();
+}
+
+SampleTypeBit TintSampledKindToSampleTypeBit(tint::inspector::ResourceBinding::SampledKind s) {
+ switch (s) {
+ case tint::inspector::ResourceBinding::SampledKind::kSInt:
+ return SampleTypeBit::Sint;
+ case tint::inspector::ResourceBinding::SampledKind::kUInt:
+ return SampleTypeBit::Uint;
+ case tint::inspector::ResourceBinding::SampledKind::kFloat:
+ return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+ case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+ return SampleTypeBit::None;
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return wgpu::TextureComponentType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return wgpu::TextureComponentType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return wgpu::TextureComponentType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return VertexFormatBaseType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return VertexFormatBaseType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return VertexFormatBaseType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ return wgpu::BufferBindingType::Uniform;
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ return wgpu::BufferBindingType::Storage;
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return wgpu::BufferBindingType::ReadOnlyStorage;
+ default:
+ return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return wgpu::StorageTextureAccess::WriteOnly;
+ default:
+ return DAWN_VALIDATION_ERROR("Attempted to convert non-storage texture resource type");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return InterStageComponentType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return InterStageComponentType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return InterStageComponentType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
+ tint::inspector::CompositionType type) {
+ switch (type) {
+ case tint::inspector::CompositionType::kScalar:
+ return 1u;
+ case tint::inspector::CompositionType::kVec2:
+ return 2u;
+ case tint::inspector::CompositionType::kVec3:
+ return 3u;
+ case tint::inspector::CompositionType::kVec4:
+ return 4u;
+ case tint::inspector::CompositionType::kUnknown:
+ return DAWN_VALIDATION_ERROR("Attempt to convert 'Unknown' composition type from Tint");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
+ tint::inspector::InterpolationType type) {
+ switch (type) {
+ case tint::inspector::InterpolationType::kPerspective:
+ return InterpolationType::Perspective;
+ case tint::inspector::InterpolationType::kLinear:
+ return InterpolationType::Linear;
+ case tint::inspector::InterpolationType::kFlat:
+ return InterpolationType::Flat;
+ case tint::inspector::InterpolationType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' interpolation type from Tint");
+ }
+ UNREACHABLE();
+}
+
+ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
+ tint::inspector::InterpolationSampling type) {
+ switch (type) {
+ case tint::inspector::InterpolationSampling::kNone:
+ return InterpolationSampling::None;
+ case tint::inspector::InterpolationSampling::kCenter:
+ return InterpolationSampling::Center;
+ case tint::inspector::InterpolationSampling::kCentroid:
+ return InterpolationSampling::Centroid;
+ case tint::inspector::InterpolationSampling::kSample:
+ return InterpolationSampling::Sample;
+ case tint::inspector::InterpolationSampling::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' interpolation sampling type from Tint");
+ }
+ UNREACHABLE();
+}
+
+EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
+ tint::inspector::OverridableConstant::Type type) {
+ switch (type) {
+ case tint::inspector::OverridableConstant::Type::kBool:
+ return EntryPointMetadata::OverridableConstant::Type::Boolean;
+ case tint::inspector::OverridableConstant::Type::kFloat32:
+ return EntryPointMetadata::OverridableConstant::Type::Float32;
+ case tint::inspector::OverridableConstant::Type::kInt32:
+ return EntryPointMetadata::OverridableConstant::Type::Int32;
+ case tint::inspector::OverridableConstant::Type::kUint32:
+ return EntryPointMetadata::OverridableConstant::Type::Uint32;
+ }
+ UNREACHABLE();
+}
- ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
- OwnedCompilationMessages* outMessages) {
+ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
+ OwnedCompilationMessages* outMessages) {
#if TINT_BUILD_WGSL_READER
- tint::Program program = tint::reader::wgsl::Parse(file);
- if (outMessages != nullptr) {
- outMessages->AddMessages(program.Diagnostics());
- }
- if (!program.IsValid()) {
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
- program.Diagnostics().str(), file->content.data);
- }
+ tint::Program program = tint::reader::wgsl::Parse(file);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
+ if (!program.IsValid()) {
+ return DAWN_FORMAT_VALIDATION_ERROR("Tint WGSL reader failure: %s\n",
+ program.Diagnostics().str());
+ }
- return std::move(program);
+ return std::move(program);
#else
- return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_WGSL_READER is not defined.");
+ return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_WGSL_READER is not defined.");
#endif
- }
+}
- ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
- OwnedCompilationMessages* outMessages) {
+ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
+ OwnedCompilationMessages* outMessages) {
#if TINT_BUILD_SPV_READER
- tint::Program program = tint::reader::spirv::Parse(spirv);
- if (outMessages != nullptr) {
- outMessages->AddMessages(program.Diagnostics());
- }
- if (!program.IsValid()) {
- return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
- program.Diagnostics().str());
- }
+ tint::Program program = tint::reader::spirv::Parse(spirv);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
+ if (!program.IsValid()) {
+ return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
+ program.Diagnostics().str());
+ }
- return std::move(program);
+ return std::move(program);
#else
- return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_SPV_READER is not defined.");
+ return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_SPV_READER is not defined.");
#endif
+}
+
+std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
+ const BindGroupLayoutBase* layout) {
+ std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
+ uint32_t packedIdx = 0;
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+ if (bindingInfo.buffer.minBindingSize != 0) {
+ // Skip bindings that have minimum buffer size set in the layout
+ continue;
}
- std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
- const BindGroupLayoutBase* layout) {
- std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
- uint32_t packedIdx = 0;
-
- for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
- if (bindingInfo.buffer.minBindingSize != 0) {
- // Skip bindings that have minimum buffer size set in the layout
- continue;
- }
+ ASSERT(packedIdx < requiredBufferSizes.size());
+ const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
+ if (shaderInfo != shaderBindings.end()) {
+ requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
+ } else {
+ // We have to include buffers if they are included in the bind group's
+ // packed vector. We don't actually need to check these at draw time, so
+ // if this is a problem in the future we can optimize it further.
+ requiredBufferSizes[packedIdx] = 0;
+ }
+ ++packedIdx;
+ }
- ASSERT(packedIdx < requiredBufferSizes.size());
- const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
- if (shaderInfo != shaderBindings.end()) {
- requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
- } else {
- // We have to include buffers if they are included in the bind group's
- // packed vector. We don't actually need to check these at draw time, so
- // if this is a problem in the future we can optimize it further.
- requiredBufferSizes[packedIdx] = 0;
- }
- ++packedIdx;
- }
+ return requiredBufferSizes;
+}
+
+MaybeError ValidateCompatibilityOfSingleBindingWithLayout(const DeviceBase* device,
+ const BindGroupLayoutBase* layout,
+ SingleShaderStage entryPointStage,
+ BindingNumber bindingNumber,
+ const ShaderBindingInfo& shaderInfo) {
+ const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
+
+ // An external texture binding found in the shader will later be expanded into multiple
+ // bindings at compile time. This expansion will have already happened in the bgl - so
+ // the shader and bgl will always mismatch at this point. Expansion info is contained in
+ // the bgl object, so we can still verify the bgl used to have an external texture in
+ // the slot corresponding to the shader reflection.
+ if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
+ // If an external texture binding used to exist in the bgl, it will be found as a
+ // key in the ExternalTextureBindingExpansions map.
+ ExternalTextureBindingExpansionMap expansions =
+ layout->GetExternalTextureBindingExpansionMap();
+ std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+ expansions.find(bindingNumber);
+ // TODO(dawn:563): Provide info about the binding types.
+ DAWN_INVALID_IF(it == expansions.end(),
+ "Binding type in the shader (texture_external) doesn't match the "
+ "type in the layout.");
- return requiredBufferSizes;
- }
+ return {};
+ }
- MaybeError ValidateCompatibilityOfSingleBindingWithLayout(
- const DeviceBase* device,
- const BindGroupLayoutBase* layout,
- SingleShaderStage entryPointStage,
- BindingNumber bindingNumber,
- const ShaderBindingInfo& shaderInfo) {
- const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
-
- // An external texture binding found in the shader will later be expanded into multiple
- // bindings at compile time. This expansion will have already happened in the bgl - so
- // the shader and bgl will always mismatch at this point. Expansion info is contained in
- // the bgl object, so we can still verify the bgl used to have an external texture in
- // the slot corresponding to the shader reflection.
- if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
- // If an external texture binding used to exist in the bgl, it will be found as a
- // key in the ExternalTextureBindingExpansions map.
- ExternalTextureBindingExpansionMap expansions =
- layout->GetExternalTextureBindingExpansionMap();
- std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
- expansions.find(bindingNumber);
- // TODO(dawn:563): Provide info about the binding types.
- DAWN_INVALID_IF(it == expansions.end(),
- "Binding type in the shader (texture_external) doesn't match the "
- "type in the layout.");
-
- return {};
- }
+ const auto& bindingIt = layoutBindings.find(bindingNumber);
+ DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.", layout);
+
+ BindingIndex bindingIndex(bindingIt->second);
+ const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+
+ // TODO(dawn:563): Provide info about the binding types.
+ DAWN_INVALID_IF(
+ layoutInfo.bindingType != shaderInfo.bindingType,
+ "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
+ "in the layout.");
- const auto& bindingIt = layoutBindings.find(bindingNumber);
- DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.",
- layout);
+ ExternalTextureBindingExpansionMap expansions = layout->GetExternalTextureBindingExpansionMap();
+ DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
+ "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
+ "match the type in the layout.");
- BindingIndex bindingIndex(bindingIt->second);
- const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+ // TODO(dawn:563): Provide info about the visibility.
+ DAWN_INVALID_IF((layoutInfo.visibility & StageBit(entryPointStage)) == 0,
+ "Entry point's stage is not in the binding visibility in the layout (%s)",
+ layoutInfo.visibility);
- // TODO(dawn:563): Provide info about the binding types.
+ switch (layoutInfo.bindingType) {
+ case BindingInfoType::Texture: {
DAWN_INVALID_IF(
- layoutInfo.bindingType != shaderInfo.bindingType,
- "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
- "in the layout.");
+ layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
+ "Binding multisampled flag (%u) doesn't match the layout's multisampled "
+ "flag (%u)",
+ layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
- ExternalTextureBindingExpansionMap expansions =
- layout->GetExternalTextureBindingExpansionMap();
- DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
- "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
- "match the type in the layout.");
+ // TODO(dawn:563): Provide info about the sample types.
+ DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
+ shaderInfo.texture.compatibleSampleTypes) == 0,
+ "The sample type in the shader is not compatible with the "
+ "sample type of the layout.");
- // TODO(dawn:563): Provide info about the visibility.
DAWN_INVALID_IF(
- (layoutInfo.visibility & StageBit(entryPointStage)) == 0,
- "Entry point's stage is not in the binding visibility in the layout (%s)",
- layoutInfo.visibility);
-
- switch (layoutInfo.bindingType) {
- case BindingInfoType::Texture: {
- DAWN_INVALID_IF(
- layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
- "Binding multisampled flag (%u) doesn't match the layout's multisampled "
- "flag (%u)",
- layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
-
- // TODO(dawn:563): Provide info about the sample types.
- DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
- shaderInfo.texture.compatibleSampleTypes) == 0,
- "The sample type in the shader is not compatible with the "
- "sample type of the layout.");
-
- DAWN_INVALID_IF(
- layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
- "The shader's binding dimension (%s) doesn't match the shader's binding "
- "dimension (%s).",
- layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
- break;
- }
+ layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
+ "The shader's binding dimension (%s) doesn't match the shader's binding "
+ "dimension (%s).",
+ layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
+ break;
+ }
- case BindingInfoType::StorageTexture: {
- ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
- ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
-
- DAWN_INVALID_IF(
- layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
- "The layout's binding access (%s) isn't compatible with the shader's "
- "binding access (%s).",
- layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
-
- DAWN_INVALID_IF(
- layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
- "The layout's binding format (%s) doesn't match the shader's binding "
- "format (%s).",
- layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
-
- DAWN_INVALID_IF(layoutInfo.storageTexture.viewDimension !=
- shaderInfo.storageTexture.viewDimension,
- "The layout's binding dimension (%s) doesn't match the "
- "shader's binding dimension (%s).",
- layoutInfo.storageTexture.viewDimension,
- shaderInfo.storageTexture.viewDimension);
- break;
- }
+ case BindingInfoType::StorageTexture: {
+ ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+ ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
- case BindingInfoType::Buffer: {
- // Binding mismatch between shader and bind group is invalid. For example, a
- // writable binding in the shader with a readonly storage buffer in the bind
- // group layout is invalid. For internal usage with internal shaders, a storage
- // binding in the shader with an internal storage buffer in the bind group
- // layout is also valid.
- bool validBindingConversion =
- (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
- shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
-
- DAWN_INVALID_IF(
- layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
- "The buffer type in the shader (%s) is not compatible with the type in the "
- "layout (%s).",
- shaderInfo.buffer.type, layoutInfo.buffer.type);
-
- DAWN_INVALID_IF(
- layoutInfo.buffer.minBindingSize != 0 &&
- shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
- "The shader uses more bytes of the buffer (%u) than the layout's "
- "minBindingSize (%u).",
- shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
- break;
- }
+ DAWN_INVALID_IF(layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
+ "The layout's binding access (%s) isn't compatible with the shader's "
+ "binding access (%s).",
+ layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
- case BindingInfoType::Sampler:
- DAWN_INVALID_IF(
- (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
- shaderInfo.sampler.isComparison,
- "The sampler type in the shader (comparison: %u) doesn't match the type in "
- "the layout (comparison: %u).",
- shaderInfo.sampler.isComparison,
- layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
- break;
-
- case BindingInfoType::ExternalTexture: {
- UNREACHABLE();
- break;
- }
- }
+ DAWN_INVALID_IF(layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
+ "The layout's binding format (%s) doesn't match the shader's binding "
+ "format (%s).",
+ layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
- return {};
+ DAWN_INVALID_IF(
+ layoutInfo.storageTexture.viewDimension != shaderInfo.storageTexture.viewDimension,
+ "The layout's binding dimension (%s) doesn't match the "
+ "shader's binding dimension (%s).",
+ layoutInfo.storageTexture.viewDimension, shaderInfo.storageTexture.viewDimension);
+ break;
}
- MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
- BindGroupIndex group,
- const EntryPointMetadata& entryPoint,
- const BindGroupLayoutBase* layout) {
- // Iterate over all bindings used by this group in the shader, and find the
- // corresponding binding in the BindGroupLayout, if it exists.
- for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
- DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
- device, layout, entryPoint.stage, bindingId, bindingInfo),
- "validating that the entry-point's declaration for @group(%u) "
- "@binding(%u) matches %s",
- static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId),
- layout);
- }
- return {};
+ case BindingInfoType::Buffer: {
+ // Binding mismatch between shader and bind group is invalid. For example, a
+ // writable binding in the shader with a readonly storage buffer in the bind
+ // group layout is invalid. For internal usage with internal shaders, a storage
+ // binding in the shader with an internal storage buffer in the bind group
+ // layout is also valid.
+ bool validBindingConversion =
+ (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
+ shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
+
+ DAWN_INVALID_IF(
+ layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
+ "The buffer type in the shader (%s) is not compatible with the type in the "
+ "layout (%s).",
+ shaderInfo.buffer.type, layoutInfo.buffer.type);
+
+ DAWN_INVALID_IF(layoutInfo.buffer.minBindingSize != 0 &&
+ shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
+ "The shader uses more bytes of the buffer (%u) than the layout's "
+ "minBindingSize (%u).",
+ shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
+ break;
+ }
+
+ case BindingInfoType::Sampler:
+ DAWN_INVALID_IF(
+ (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
+ shaderInfo.sampler.isComparison,
+ "The sampler type in the shader (comparison: %u) doesn't match the type in "
+ "the layout (comparison: %u).",
+ shaderInfo.sampler.isComparison,
+ layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
+ break;
+
+ case BindingInfoType::ExternalTexture: {
+ UNREACHABLE();
+ break;
}
+ }
+
+ return {};
+}
+MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
+ BindGroupIndex group,
+ const EntryPointMetadata& entryPoint,
+ const BindGroupLayoutBase* layout) {
+ // Iterate over all bindings used by this group in the shader, and find the
+ // corresponding binding in the BindGroupLayout, if it exists.
+ for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
+ DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
+ device, layout, entryPoint.stage, bindingId, bindingInfo),
+ "validating that the entry-point's declaration for @group(%u) "
+ "@binding(%u) matches %s",
+ static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId), layout);
+ }
- ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
- const DeviceBase* device,
- tint::inspector::Inspector* inspector,
- const tint::inspector::EntryPoint& entryPoint) {
- const CombinedLimits& limits = device->GetLimits();
- constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
+ return {};
+}
- std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
+ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
+ const DeviceBase* device,
+ tint::inspector::Inspector* inspector,
+ const tint::inspector::EntryPoint& entryPoint) {
+ const CombinedLimits& limits = device->GetLimits();
+ constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
- // Returns the invalid argument, and if it is true additionally store the formatted
- // error in metadata.infringedLimits. This is to delay the emission of these validation
- // errors until the entry point is used.
+ std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
+
+ // Returns the invalid argument, and if it is true additionally store the formatted
+ // error in metadata.infringedLimits. This is to delay the emission of these validation
+ // errors until the entry point is used.
#define DelayedInvalidIf(invalid, ...) \
([&]() { \
if (invalid) { \
@@ -626,719 +610,736 @@ namespace dawn::native {
return invalid; \
})()
- if (!entryPoint.overridable_constants.empty()) {
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Pipeline overridable constants are disallowed because they "
- "are partially implemented.");
-
- const auto& name2Id = inspector->GetConstantNameToIdMap();
- const auto& id2Scalar = inspector->GetConstantIDs();
-
- for (auto& c : entryPoint.overridable_constants) {
- uint32_t id = name2Id.at(c.name);
- OverridableConstantScalar defaultValue;
- if (c.is_initialized) {
- // if it is initialized, the scalar must exist
- const auto& scalar = id2Scalar.at(id);
- if (scalar.IsBool()) {
- defaultValue.b = scalar.AsBool();
- } else if (scalar.IsU32()) {
- defaultValue.u32 = scalar.AsU32();
- } else if (scalar.IsI32()) {
- defaultValue.i32 = scalar.AsI32();
- } else if (scalar.IsFloat()) {
- defaultValue.f32 = scalar.AsFloat();
- } else {
- UNREACHABLE();
- }
- }
- EntryPointMetadata::OverridableConstant constant = {
- id, FromTintOverridableConstantType(c.type), c.is_initialized,
- defaultValue};
-
- std::string identifier =
- c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
- metadata->overridableConstants[identifier] = constant;
-
- if (!c.is_initialized) {
- auto [_, inserted] = metadata->uninitializedOverridableConstants.emplace(
- std::move(identifier));
- // The insertion should have taken place
- ASSERT(inserted);
- } else {
- auto [_, inserted] = metadata->initializedOverridableConstants.emplace(
- std::move(identifier));
- // The insertion should have taken place
- ASSERT(inserted);
- }
+ if (!entryPoint.overridable_constants.empty()) {
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline overridable constants are disallowed because they "
+ "are partially implemented.");
+
+ const auto& name2Id = inspector->GetConstantNameToIdMap();
+ const auto& id2Scalar = inspector->GetConstantIDs();
+
+ for (auto& c : entryPoint.overridable_constants) {
+ uint32_t id = name2Id.at(c.name);
+ OverridableConstantScalar defaultValue;
+ if (c.is_initialized) {
+ // if it is initialized, the scalar must exist
+ const auto& scalar = id2Scalar.at(id);
+ if (scalar.IsBool()) {
+ defaultValue.b = scalar.AsBool();
+ } else if (scalar.IsU32()) {
+ defaultValue.u32 = scalar.AsU32();
+ } else if (scalar.IsI32()) {
+ defaultValue.i32 = scalar.AsI32();
+ } else if (scalar.IsFloat()) {
+ defaultValue.f32 = scalar.AsFloat();
+ } else {
+ UNREACHABLE();
}
}
-
- DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
-
- if (metadata->stage == SingleShaderStage::Compute) {
- DelayedInvalidIf(
- entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
- entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
- entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
- "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
- "maximum allowed (%u, %u, %u).",
- entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
- entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
- limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
-
- // Dimensions have already been validated against their individual limits above.
- // Cast to uint64_t to avoid overflow in this multiplication.
- uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
- entryPoint.workgroup_size_y * entryPoint.workgroup_size_z;
- DelayedInvalidIf(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
- "The total number of workgroup invocations (%u) exceeds the "
- "maximum allowed (%u).",
- numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
-
- const size_t workgroupStorageSize =
- inspector->GetWorkgroupStorageSize(entryPoint.name);
- DelayedInvalidIf(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
- "The total use of workgroup storage (%u bytes) is larger than "
- "the maximum allowed (%u bytes).",
- workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
-
- metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
- metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
- metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
-
- metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+ EntryPointMetadata::OverridableConstant constant = {
+ id, FromTintOverridableConstantType(c.type), c.is_initialized, defaultValue};
+
+ std::string identifier =
+ c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
+ metadata->overridableConstants[identifier] = constant;
+
+ if (!c.is_initialized) {
+ auto [_, inserted] =
+ metadata->uninitializedOverridableConstants.emplace(std::move(identifier));
+ // The insertion should have taken place
+ ASSERT(inserted);
+ } else {
+ auto [_, inserted] =
+ metadata->initializedOverridableConstants.emplace(std::move(identifier));
+ // The insertion should have taken place
+ ASSERT(inserted);
}
+ }
+ }
- if (metadata->stage == SingleShaderStage::Vertex) {
- for (const auto& inputVar : entryPoint.input_variables) {
- uint32_t unsanitizedLocation = inputVar.location_decoration;
- if (DelayedInvalidIf(unsanitizedLocation >= kMaxVertexAttributes,
- "Vertex input variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u)",
- inputVar.name, unsanitizedLocation,
- kMaxVertexAttributes)) {
- continue;
- }
-
- VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
- DAWN_TRY_ASSIGN(
- metadata->vertexInputBaseTypes[location],
- TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
- metadata->usedVertexInputs.set(location);
- }
+ DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
+
+ if (metadata->stage == SingleShaderStage::Compute) {
+ DelayedInvalidIf(entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
+ entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
+ entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
+ "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
+ "maximum allowed (%u, %u, %u).",
+ entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
+ entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
+ limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
+
+ // Dimensions have already been validated against their individual limits above.
+ // Cast to uint64_t to avoid overflow in this multiplication.
+ uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
+ entryPoint.workgroup_size_y * entryPoint.workgroup_size_z;
+ DelayedInvalidIf(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
+ "The total number of workgroup invocations (%u) exceeds the "
+ "maximum allowed (%u).",
+ numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
+
+ const size_t workgroupStorageSize = inspector->GetWorkgroupStorageSize(entryPoint.name);
+ DelayedInvalidIf(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
+ "The total use of workgroup storage (%u bytes) is larger than "
+ "the maximum allowed (%u bytes).",
+ workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
+
+ metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
+ metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
+ metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
+
+ metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+ }
- // [[position]] must be declared in a vertex shader but is not exposed as an
- // output variable by Tint so we directly add its components to the total.
- uint32_t totalInterStageShaderComponents = 4;
- for (const auto& outputVar : entryPoint.output_variables) {
- EntryPointMetadata::InterStageVariableInfo variable;
- DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
- outputVar.component_type));
- DAWN_TRY_ASSIGN(
- variable.componentCount,
- TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
- DAWN_TRY_ASSIGN(
- variable.interpolationType,
- TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
- DAWN_TRY_ASSIGN(variable.interpolationSampling,
- TintInterpolationSamplingToInterpolationSamplingType(
- outputVar.interpolation_sampling));
- totalInterStageShaderComponents += variable.componentCount;
-
- uint32_t location = outputVar.location_decoration;
- if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
- "Vertex output variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- outputVar.name, location, kMaxInterStageShaderLocation)) {
- continue;
- }
-
- metadata->usedInterStageVariables.set(location);
- metadata->interStageVariables[location] = variable;
- }
+ if (metadata->stage == SingleShaderStage::Vertex) {
+ for (const auto& inputVar : entryPoint.input_variables) {
+ uint32_t unsanitizedLocation = inputVar.location_decoration;
+ if (DelayedInvalidIf(unsanitizedLocation >= kMaxVertexAttributes,
+ "Vertex input variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u)",
+ inputVar.name, unsanitizedLocation, kMaxVertexAttributes)) {
+ continue;
+ }
- DelayedInvalidIf(
- totalInterStageShaderComponents > kMaxInterStageShaderComponents,
- "Total vertex output components count (%u) exceeds the maximum (%u).",
- totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+ VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
+ DAWN_TRY_ASSIGN(metadata->vertexInputBaseTypes[location],
+ TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
+ metadata->usedVertexInputs.set(location);
+ }
+
+ // [[position]] must be declared in a vertex shader but is not exposed as an
+ // output variable by Tint so we directly add its components to the total.
+ uint32_t totalInterStageShaderComponents = 4;
+ for (const auto& outputVar : entryPoint.output_variables) {
+ EntryPointMetadata::InterStageVariableInfo variable;
+ DAWN_TRY_ASSIGN(variable.baseType,
+ TintComponentTypeToInterStageComponentType(outputVar.component_type));
+ DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+ outputVar.composition_type));
+ DAWN_TRY_ASSIGN(variable.interpolationType,
+ TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
+ DAWN_TRY_ASSIGN(variable.interpolationSampling,
+ TintInterpolationSamplingToInterpolationSamplingType(
+ outputVar.interpolation_sampling));
+ totalInterStageShaderComponents += variable.componentCount;
+
+ uint32_t location = outputVar.location_decoration;
+ if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+ "Vertex output variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ outputVar.name, location, kMaxInterStageShaderLocation)) {
+ continue;
}
- if (metadata->stage == SingleShaderStage::Fragment) {
- uint32_t totalInterStageShaderComponents = 0;
- for (const auto& inputVar : entryPoint.input_variables) {
- EntryPointMetadata::InterStageVariableInfo variable;
- DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
- inputVar.component_type));
- DAWN_TRY_ASSIGN(
- variable.componentCount,
- TintCompositionTypeToInterStageComponentCount(inputVar.composition_type));
- DAWN_TRY_ASSIGN(
- variable.interpolationType,
- TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
- DAWN_TRY_ASSIGN(variable.interpolationSampling,
- TintInterpolationSamplingToInterpolationSamplingType(
- inputVar.interpolation_sampling));
- totalInterStageShaderComponents += variable.componentCount;
-
- uint32_t location = inputVar.location_decoration;
- if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
- "Fragment input variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- inputVar.name, location, kMaxInterStageShaderLocation)) {
- continue;
- }
-
- metadata->usedInterStageVariables.set(location);
- metadata->interStageVariables[location] = variable;
- }
+ metadata->usedInterStageVariables.set(location);
+ metadata->interStageVariables[location] = variable;
+ }
- if (entryPoint.front_facing_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.input_sample_mask_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.sample_index_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.input_position_used) {
- totalInterStageShaderComponents += 4;
- }
+ DelayedInvalidIf(totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+ "Total vertex output components count (%u) exceeds the maximum (%u).",
+ totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+ }
- DelayedInvalidIf(
- totalInterStageShaderComponents > kMaxInterStageShaderComponents,
- "Total fragment input components count (%u) exceeds the maximum (%u).",
- totalInterStageShaderComponents, kMaxInterStageShaderComponents);
-
- for (const auto& outputVar : entryPoint.output_variables) {
- EntryPointMetadata::FragmentOutputVariableInfo variable;
- DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToTextureComponentType(
- outputVar.component_type));
- DAWN_TRY_ASSIGN(
- variable.componentCount,
- TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
- ASSERT(variable.componentCount <= 4);
-
- uint32_t unsanitizedAttachment = outputVar.location_decoration;
- if (DelayedInvalidIf(unsanitizedAttachment >= kMaxColorAttachments,
- "Fragment output variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- outputVar.name, unsanitizedAttachment,
- kMaxColorAttachments)) {
- continue;
- }
-
- ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
- metadata->fragmentOutputVariables[attachment] = variable;
- metadata->fragmentOutputsWritten.set(attachment);
- }
+ if (metadata->stage == SingleShaderStage::Fragment) {
+ uint32_t totalInterStageShaderComponents = 0;
+ for (const auto& inputVar : entryPoint.input_variables) {
+ EntryPointMetadata::InterStageVariableInfo variable;
+ DAWN_TRY_ASSIGN(variable.baseType,
+ TintComponentTypeToInterStageComponentType(inputVar.component_type));
+ DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+ inputVar.composition_type));
+ DAWN_TRY_ASSIGN(variable.interpolationType,
+ TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
+ DAWN_TRY_ASSIGN(variable.interpolationSampling,
+ TintInterpolationSamplingToInterpolationSamplingType(
+ inputVar.interpolation_sampling));
+ totalInterStageShaderComponents += variable.componentCount;
+
+ uint32_t location = inputVar.location_decoration;
+ if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+ "Fragment input variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ inputVar.name, location, kMaxInterStageShaderLocation)) {
+ continue;
}
- for (const tint::inspector::ResourceBinding& resource :
- inspector->GetResourceBindings(entryPoint.name)) {
- ShaderBindingInfo info;
+ metadata->usedInterStageVariables.set(location);
+ metadata->interStageVariables[location] = variable;
+ }
- info.bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+ if (entryPoint.front_facing_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.input_sample_mask_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.sample_index_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.input_position_used) {
+ totalInterStageShaderComponents += 4;
+ }
- switch (info.bindingType) {
- case BindingInfoType::Buffer:
- info.buffer.minBindingSize = resource.size_no_padding;
- DAWN_TRY_ASSIGN(info.buffer.type, TintResourceTypeToBufferBindingType(
- resource.resource_type));
- break;
- case BindingInfoType::Sampler:
- switch (resource.resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- info.sampler.isComparison = false;
- break;
- case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
- info.sampler.isComparison = true;
- break;
- default:
- UNREACHABLE();
- }
- break;
- case BindingInfoType::Texture:
- info.texture.viewDimension =
- TintTextureDimensionToTextureViewDimension(resource.dim);
- if (resource.resource_type ==
- tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
- resource.resource_type == tint::inspector::ResourceBinding::
- ResourceType::kDepthMultisampledTexture) {
- info.texture.compatibleSampleTypes = SampleTypeBit::Depth;
- } else {
- info.texture.compatibleSampleTypes =
- TintSampledKindToSampleTypeBit(resource.sampled_kind);
- }
- info.texture.multisampled =
- resource.resource_type == tint::inspector::ResourceBinding::
- ResourceType::kMultisampledTexture ||
- resource.resource_type == tint::inspector::ResourceBinding::
- ResourceType::kDepthMultisampledTexture;
+ DelayedInvalidIf(totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+ "Total fragment input components count (%u) exceeds the maximum (%u).",
+ totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+
+ for (const auto& outputVar : entryPoint.output_variables) {
+ EntryPointMetadata::FragmentOutputVariableInfo variable;
+ DAWN_TRY_ASSIGN(variable.baseType,
+ TintComponentTypeToTextureComponentType(outputVar.component_type));
+ DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+ outputVar.composition_type));
+ ASSERT(variable.componentCount <= 4);
+
+ uint32_t unsanitizedAttachment = outputVar.location_decoration;
+ if (DelayedInvalidIf(unsanitizedAttachment >= kMaxColorAttachments,
+ "Fragment output variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ outputVar.name, unsanitizedAttachment, kMaxColorAttachments)) {
+ continue;
+ }
- break;
- case BindingInfoType::StorageTexture:
- DAWN_TRY_ASSIGN(
- info.storageTexture.access,
- TintResourceTypeToStorageTextureAccess(resource.resource_type));
- info.storageTexture.format =
- TintImageFormatToTextureFormat(resource.image_format);
- info.storageTexture.viewDimension =
- TintTextureDimensionToTextureViewDimension(resource.dim);
+ ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
+ metadata->fragmentOutputVariables[attachment] = variable;
+ metadata->fragmentOutputsWritten.set(attachment);
+ }
+ }
+ for (const tint::inspector::ResourceBinding& resource :
+ inspector->GetResourceBindings(entryPoint.name)) {
+ ShaderBindingInfo info;
+
+ info.bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+
+ switch (info.bindingType) {
+ case BindingInfoType::Buffer:
+ info.buffer.minBindingSize = resource.size_no_padding;
+ DAWN_TRY_ASSIGN(info.buffer.type,
+ TintResourceTypeToBufferBindingType(resource.resource_type));
+ break;
+ case BindingInfoType::Sampler:
+ switch (resource.resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ info.sampler.isComparison = false;
break;
- case BindingInfoType::ExternalTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ info.sampler.isComparison = true;
break;
default:
- return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
+ UNREACHABLE();
}
-
- BindingNumber bindingNumber(resource.binding);
- BindGroupIndex bindGroupIndex(resource.bind_group);
-
- if (DelayedInvalidIf(bindGroupIndex >= kMaxBindGroupsTyped,
- "The entry-point uses a binding with a group decoration (%u) "
- "that exceeds the maximum (%u).",
- resource.bind_group, kMaxBindGroups) ||
- DelayedInvalidIf(bindingNumber > kMaxBindingNumberTyped,
- "Binding number (%u) exceeds the maximum binding number (%u).",
- uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped))) {
- continue;
+ break;
+ case BindingInfoType::Texture:
+ info.texture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+ if (resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
+ resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture) {
+ info.texture.compatibleSampleTypes = SampleTypeBit::Depth;
+ } else {
+ info.texture.compatibleSampleTypes =
+ TintSampledKindToSampleTypeBit(resource.sampled_kind);
}
-
- const auto& [binding, inserted] =
- metadata->bindings[bindGroupIndex].emplace(bindingNumber, info);
- DAWN_INVALID_IF(!inserted,
- "Entry-point has a duplicate binding for (group:%u, binding:%u).",
- resource.binding, resource.bind_group);
- }
-
- std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
- inspector->GetSamplerTextureUses(entryPoint.name);
- metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
- std::transform(samplerTextureUses.begin(), samplerTextureUses.end(),
- std::back_inserter(metadata->samplerTexturePairs),
- [](const tint::inspector::SamplerTexturePair& pair) {
- EntryPointMetadata::SamplerTexturePair result;
- result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
- BindingNumber(pair.sampler_binding_point.binding)};
- result.texture = {BindGroupIndex(pair.texture_binding_point.group),
- BindingNumber(pair.texture_binding_point.binding)};
- return result;
- });
-
-#undef DelayedInvalidIf
- return std::move(metadata);
+ info.texture.multisampled =
+ resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture ||
+ resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture;
+
+ break;
+ case BindingInfoType::StorageTexture:
+ DAWN_TRY_ASSIGN(info.storageTexture.access,
+ TintResourceTypeToStorageTextureAccess(resource.resource_type));
+ info.storageTexture.format = TintImageFormatToTextureFormat(resource.image_format);
+ info.storageTexture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
}
- ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
- const DeviceBase* device,
- const tint::Program* program) {
- ASSERT(program->IsValid());
-
- tint::inspector::Inspector inspector(program);
- std::vector<tint::inspector::EntryPoint> entryPoints = inspector.GetEntryPoints();
- DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
- inspector.error());
-
- EntryPointMetadataTable result;
+ BindingNumber bindingNumber(resource.binding);
+ BindGroupIndex bindGroupIndex(resource.bind_group);
+
+ if (DelayedInvalidIf(bindGroupIndex >= kMaxBindGroupsTyped,
+ "The entry-point uses a binding with a group decoration (%u) "
+ "that exceeds the maximum (%u).",
+ resource.bind_group, kMaxBindGroups) ||
+ DelayedInvalidIf(bindingNumber > kMaxBindingNumberTyped,
+ "Binding number (%u) exceeds the maximum binding number (%u).",
+ uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped))) {
+ continue;
+ }
- for (const tint::inspector::EntryPoint& entryPoint : entryPoints) {
- std::unique_ptr<EntryPointMetadata> metadata;
- DAWN_TRY_ASSIGN_CONTEXT(metadata,
- ReflectEntryPointUsingTint(device, &inspector, entryPoint),
- "processing entry point \"%s\".", entryPoint.name);
+ const auto& [binding, inserted] =
+ metadata->bindings[bindGroupIndex].emplace(bindingNumber, info);
+ DAWN_INVALID_IF(!inserted,
+ "Entry-point has a duplicate binding for (group:%u, binding:%u).",
+ resource.binding, resource.bind_group);
+ }
- ASSERT(result.count(entryPoint.name) == 0);
- result[entryPoint.name] = std::move(metadata);
- }
- return std::move(result);
- }
- } // anonymous namespace
+ std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
+ inspector->GetSamplerTextureUses(entryPoint.name);
+ metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
+ std::transform(samplerTextureUses.begin(), samplerTextureUses.end(),
+ std::back_inserter(metadata->samplerTexturePairs),
+ [](const tint::inspector::SamplerTexturePair& pair) {
+ EntryPointMetadata::SamplerTexturePair result;
+ result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
+ BindingNumber(pair.sampler_binding_point.binding)};
+ result.texture = {BindGroupIndex(pair.texture_binding_point.group),
+ BindingNumber(pair.texture_binding_point.binding)};
+ return result;
+ });
- ShaderModuleParseResult::ShaderModuleParseResult() = default;
- ShaderModuleParseResult::~ShaderModuleParseResult() = default;
+#undef DelayedInvalidIf
+ return std::move(metadata);
+}
- ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
+MaybeError ValidateWGSLProgramExtension(const DeviceBase* device,
+ const WGSLExtensionSet* enabledExtensions,
+ OwnedCompilationMessages* outMessages) {
+ const WGSLExtensionSet& extensionAllowList = device->GetWGSLExtensionAllowList();
- ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
- default;
+ bool hasDisallowedExtension = false;
+ tint::diag::List messages;
- bool ShaderModuleParseResult::HasParsedShader() const {
- return tintProgram != nullptr;
+ for (const std::string& extension : *enabledExtensions) {
+ if (extensionAllowList.count(extension)) {
+ continue;
+ }
+ hasDisallowedExtension = true;
+ messages.add_error(tint::diag::System::Program,
+ "Extension " + extension + " is not allowed on the Device.");
}
- // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
- // long as tint diagnostics are inspected / printed.
- class TintSource {
- public:
- template <typename... ARGS>
- explicit TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
+ if (hasDisallowedExtension) {
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(messages);
}
+ return DAWN_MAKE_ERROR(InternalErrorType::Validation,
+ "Shader module uses extension(s) not enabled for its device.");
+ }
- tint::Source::File file;
- };
+ return {};
+}
- MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* outMessages) {
- ASSERT(parseResult != nullptr);
+MaybeError ReflectShaderUsingTint(const DeviceBase* device,
+ const tint::Program* program,
+ OwnedCompilationMessages* compilationMessages,
+ EntryPointMetadataTable* entryPointMetadataTable,
+ WGSLExtensionSet* enabledWGSLExtensions) {
+ ASSERT(program->IsValid());
- const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
- DAWN_INVALID_IF(chainedDescriptor == nullptr,
- "Shader module descriptor missing chained descriptor");
+ tint::inspector::Inspector inspector(program);
- // For now only a single SPIRV or WGSL subdescriptor is allowed.
- DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
- wgpu::SType::ShaderModuleWGSLDescriptor));
+ ASSERT(enabledWGSLExtensions->empty());
+ auto usedExtensionNames = inspector.GetUsedExtensionNames();
+ for (std::string name : usedExtensionNames) {
+ enabledWGSLExtensions->insert(name);
+ }
+ DAWN_TRY(ValidateWGSLProgramExtension(device, enabledWGSLExtensions, compilationMessages));
- ScopedTintICEHandler scopedICEHandler(device);
+ std::vector<tint::inspector::EntryPoint> entryPoints = inspector.GetEntryPoints();
+ DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
+ inspector.error());
- const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
- FindInChain(chainedDescriptor, &spirvDesc);
- const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
- FindInChain(chainedDescriptor, &wgslDesc);
+ for (const tint::inspector::EntryPoint& entryPoint : entryPoints) {
+ std::unique_ptr<EntryPointMetadata> metadata;
+ DAWN_TRY_ASSIGN_CONTEXT(metadata,
+ ReflectEntryPointUsingTint(device, &inspector, entryPoint),
+ "processing entry point \"%s\".", entryPoint.name);
- // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
- // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
- ShaderModuleWGSLDescriptor newWgslDesc;
- std::string newWgslCode;
- if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
+ ASSERT(entryPointMetadataTable->count(entryPoint.name) == 0);
+ (*entryPointMetadataTable)[entryPoint.name] = std::move(metadata);
+ }
+ return {};
+}
+} // anonymous namespace
+
+ShaderModuleParseResult::ShaderModuleParseResult() = default;
+ShaderModuleParseResult::~ShaderModuleParseResult() = default;
+
+ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
+
+ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
+ default;
+
+bool ShaderModuleParseResult::HasParsedShader() const {
+ return tintProgram != nullptr;
+}
+
+// TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
+// long as tint diagnostics are inspected / printed.
+class TintSource {
+ public:
+ template <typename... ARGS>
+ explicit TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {}
+
+ tint::Source::File file;
+};
+
+MaybeError ValidateAndParseShaderModule(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* outMessages) {
+ ASSERT(parseResult != nullptr);
+
+ const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+ DAWN_INVALID_IF(chainedDescriptor == nullptr,
+ "Shader module descriptor missing chained descriptor");
+
+ // For now only a single SPIRV or WGSL subdescriptor is allowed.
+ DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
+ wgpu::SType::ShaderModuleWGSLDescriptor));
+
+ ScopedTintICEHandler scopedICEHandler(device);
+
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(chainedDescriptor, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(chainedDescriptor, &wgslDesc);
+
+ // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
+ // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
+ ShaderModuleWGSLDescriptor newWgslDesc;
+ std::string newWgslCode;
+ if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
#if TINT_BUILD_WGSL_WRITER
- std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+ std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
- tint::writer::wgsl::Options options;
- auto result = tint::writer::wgsl::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
+ tint::writer::wgsl::Options options;
+ auto result = tint::writer::wgsl::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
- newWgslCode = std::move(result.wgsl);
- newWgslDesc.source = newWgslCode.c_str();
+ newWgslCode = std::move(result.wgsl);
+ newWgslDesc.source = newWgslCode.c_str();
- spirvDesc = nullptr;
- wgslDesc = &newWgslDesc;
+ spirvDesc = nullptr;
+ wgslDesc = &newWgslDesc;
#else
- device->EmitLog(
- WGPULoggingType_Info,
- "Toggle::ForceWGSLStep skipped because TINT_BUILD_WGSL_WRITER is not defined\n");
+ device->EmitLog(
+ WGPULoggingType_Info,
+ "Toggle::ForceWGSLStep skipped because TINT_BUILD_WGSL_WRITER is not defined\n");
#endif
- }
+ }
- if (spirvDesc) {
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv),
- "SPIR-V is disallowed.");
-
- std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- } else if (wgslDesc) {
- auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
-
- if (device->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
- device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
+ if (spirvDesc) {
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv), "SPIR-V is disallowed.");
+
+ std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ } else if (wgslDesc) {
+ auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- parseResult->tintSource = std::move(tintSource);
+ if (device->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
+ device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
}
- return {};
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ parseResult->tintSource = std::move(tintSource);
}
- RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout) {
- RequiredBufferSizes bufferSizes;
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
- layout->GetBindGroupLayout(group));
- }
+ return {};
+}
- return bufferSizes;
+RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout) {
+ RequiredBufferSizes bufferSizes;
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
+ layout->GetBindGroupLayout(group));
}
- ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- const tint::Program* program,
- const tint::transform::DataMap& inputs,
- tint::transform::DataMap* outputs,
- OwnedCompilationMessages* outMessages) {
- tint::transform::Output output = transform->Run(program, inputs);
- if (outMessages != nullptr) {
- outMessages->AddMessages(output.program.Diagnostics());
- }
- DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
- output.program.Diagnostics().str());
- if (outputs != nullptr) {
- *outputs = std::move(output.data);
- }
- return std::move(output.program);
+ return bufferSizes;
+}
+
+ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+ const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
+ OwnedCompilationMessages* outMessages) {
+ tint::transform::Output output = transform->Run(program, inputs);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(output.program.Diagnostics());
}
-
- void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet,
- tint::transform::DataMap* transformInputs) {
- tint::transform::VertexPulling::Config cfg;
- cfg.entry_point_name = entryPoint;
- cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
-
- cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
- for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
- tint::transform::VertexBufferLayoutDescriptor* tintInfo =
- &cfg.vertex_state[static_cast<uint8_t>(slot)];
-
- tintInfo->array_stride = dawnInfo.arrayStride;
- tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
- }
-
- for (VertexAttributeLocation location :
- IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
- tint::transform::VertexAttributeDescriptor tintInfo;
- tintInfo.format = ToTintVertexFormat(dawnInfo.format);
- tintInfo.offset = dawnInfo.offset;
- tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
-
- uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
- cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
- }
-
- transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+ DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
+ output.program.Diagnostics().str());
+ if (outputs != nullptr) {
+ *outputs = std::move(output.data);
}
-
- MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
- const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout) {
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
- device, group, entryPoint, layout->GetBindGroupLayout(group)),
- "validating the entry-point's compatibility for group %u with %s",
- static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
- }
-
- for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
- DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
- "The entry-point uses bindings in group %u but %s doesn't have a "
- "BindGroupLayout for this index",
- static_cast<uint32_t>(group), layout);
- }
-
- // Validate that filtering samplers are not used with unfilterable textures.
- for (const auto& pair : entryPoint.samplerTexturePairs) {
- const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
- const BindingInfo& samplerInfo =
- samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
- if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
- continue;
- }
- const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
- const BindingInfo& textureInfo =
- textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
-
- ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
- textureInfo.bindingType != BindingInfoType::Sampler &&
- textureInfo.bindingType != BindingInfoType::StorageTexture);
-
- if (textureInfo.bindingType != BindingInfoType::Texture) {
- continue;
- }
-
- // Uint/sint can't be statically used with a sampler, so they any
- // texture bindings reflected must be float or depth textures. If
- // the shader uses a float/depth texture but the bind group layout
- // specifies a uint/sint texture binding,
- // |ValidateCompatibilityWithBindGroupLayout| will fail since the
- // sampleType does not match.
- ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
- textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
- textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
-
- DAWN_INVALID_IF(
- textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
- "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
- "(group:%u, binding:%u) that's %s",
- static_cast<uint32_t>(pair.texture.group),
- static_cast<uint32_t>(pair.texture.binding),
- wgpu::TextureSampleType::UnfilterableFloat,
- static_cast<uint32_t>(pair.sampler.group),
- static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
- }
-
- return {};
+ return std::move(output.program);
+}
+
+void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs) {
+ tint::transform::VertexPulling::Config cfg;
+ cfg.entry_point_name = entryPoint;
+ cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
+
+ cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
+ for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
+ tint::transform::VertexBufferLayoutDescriptor* tintInfo =
+ &cfg.vertex_state[static_cast<uint8_t>(slot)];
+
+ tintInfo->array_stride = dawnInfo.arrayStride;
+ tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
}
- // ShaderModuleBase
-
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
- ASSERT(descriptor->nextInChain != nullptr);
- const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
- FindInChain(descriptor->nextInChain, &spirvDesc);
- const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
- FindInChain(descriptor->nextInChain, &wgslDesc);
- ASSERT(spirvDesc || wgslDesc);
-
- if (spirvDesc) {
- mType = Type::Spirv;
- mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- } else if (wgslDesc) {
- mType = Type::Wgsl;
- mWgsl = std::string(wgslDesc->source);
- }
- }
+ for (VertexAttributeLocation location :
+ IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
+ tint::transform::VertexAttributeDescriptor tintInfo;
+ tintInfo.format = ToTintVertexFormat(dawnInfo.format);
+ tintInfo.offset = dawnInfo.offset;
+ tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
+ uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
+ cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
}
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
+ transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+}
+
+MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+ const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout) {
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
+ device, group, entryPoint, layout->GetBindGroupLayout(group)),
+ "validating the entry-point's compatibility for group %u with %s",
+ static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
}
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mType(Type::Undefined) {
+ for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
+ DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
+ "The entry-point uses bindings in group %u but %s doesn't have a "
+ "BindGroupLayout for this index",
+ static_cast<uint32_t>(group), layout);
}
- ShaderModuleBase::~ShaderModuleBase() = default;
-
- void ShaderModuleBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheShaderModule(this);
+ // Validate that filtering samplers are not used with unfilterable textures.
+ for (const auto& pair : entryPoint.samplerTexturePairs) {
+ const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
+ const BindingInfo& samplerInfo =
+ samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
+ if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
+ continue;
}
- }
+ const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
+ const BindingInfo& textureInfo =
+ textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
- // static
- Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
- return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
- }
+ ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
+ textureInfo.bindingType != BindingInfoType::Sampler &&
+ textureInfo.bindingType != BindingInfoType::StorageTexture);
- ObjectType ShaderModuleBase::GetType() const {
- return ObjectType::ShaderModule;
- }
+ if (textureInfo.bindingType != BindingInfoType::Texture) {
+ continue;
+ }
- bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
- return mEntryPoints.count(entryPoint) > 0;
+ // Uint/sint can't be statically used with a sampler, so they any
+ // texture bindings reflected must be float or depth textures. If
+ // the shader uses a float/depth texture but the bind group layout
+ // specifies a uint/sint texture binding,
+ // |ValidateCompatibilityWithBindGroupLayout| will fail since the
+ // sampleType does not match.
+ ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
+ textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
+ textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
+
+ DAWN_INVALID_IF(
+ textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
+ "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
+ "(group:%u, binding:%u) that's %s",
+ static_cast<uint32_t>(pair.texture.group), static_cast<uint32_t>(pair.texture.binding),
+ wgpu::TextureSampleType::UnfilterableFloat, static_cast<uint32_t>(pair.sampler.group),
+ static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
}
- const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
- ASSERT(HasEntryPoint(entryPoint));
- return *mEntryPoints.at(entryPoint);
+ return {};
+}
+
+// ShaderModuleBase
+
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
+ ASSERT(descriptor->nextInChain != nullptr);
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &wgslDesc);
+ ASSERT(spirvDesc || wgslDesc);
+
+ if (spirvDesc) {
+ mType = Type::Spirv;
+ mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ } else if (wgslDesc) {
+ mType = Type::Wgsl;
+ mWgsl = std::string(wgslDesc->source);
}
+}
- size_t ShaderModuleBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mType);
- recorder.Record(mOriginalSpirv);
- recorder.Record(mWgsl);
- return recorder.GetContentHash();
- }
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+}
- bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
- const ShaderModuleBase* b) const {
- return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv &&
- a->mWgsl == b->mWgsl;
- }
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
- const tint::Program* ShaderModuleBase::GetTintProgram() const {
- ASSERT(mTintProgram);
- return mTintProgram.get();
- }
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mType(Type::Undefined) {}
- void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
- void* userdata) {
- if (callback == nullptr) {
- return;
- }
+ShaderModuleBase::~ShaderModuleBase() = default;
- callback(WGPUCompilationInfoRequestStatus_Success,
- mCompilationMessages->GetCompilationInfo(), userdata);
+void ShaderModuleBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheShaderModule(this);
}
-
- void ShaderModuleBase::InjectCompilationMessages(
- std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
- // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
- // module returned from cache.
- // InjectCompilationMessages should be called only once for a shader module, after it is
- // created. However currently InjectCompilationMessages may be called on a shader module
- // returned from cache rather than newly created, and violate the rule. We just skip the
- // injection in this case for now, but a proper solution including ensure the cache goes
- // before the validation is required.
- if (mCompilationMessages != nullptr) {
- return;
- }
- // Move the compilationMessages into the shader module and emit the tint errors and warnings
- mCompilationMessages = std::move(compilationMessages);
-
- // Emit the formatted Tint errors and warnings within the moved compilationMessages
- const std::vector<std::string>& formattedTintMessages =
- mCompilationMessages->GetFormattedTintMessages();
- if (formattedTintMessages.empty()) {
- return;
- }
- std::ostringstream t;
- for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
- pMessage++) {
- if (pMessage != formattedTintMessages.begin()) {
- t << std::endl;
- }
- t << *pMessage;
- }
- this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
+}
+
+// static
+Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
+ return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
+}
+
+ObjectType ShaderModuleBase::GetType() const {
+ return ObjectType::ShaderModule;
+}
+
+bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
+ return mEntryPoints.count(entryPoint) > 0;
+}
+
+const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
+ ASSERT(HasEntryPoint(entryPoint));
+ return *mEntryPoints.at(entryPoint);
+}
+
+size_t ShaderModuleBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mType);
+ recorder.Record(mOriginalSpirv);
+ recorder.Record(mWgsl);
+ return recorder.GetContentHash();
+}
+
+bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
+ const ShaderModuleBase* b) const {
+ return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv && a->mWgsl == b->mWgsl;
+}
+
+const tint::Program* ShaderModuleBase::GetTintProgram() const {
+ ASSERT(mTintProgram);
+ return mTintProgram.get();
+}
+
+void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
+ void* userdata) {
+ if (callback == nullptr) {
+ return;
}
- OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
- return mCompilationMessages.get();
+ callback(WGPUCompilationInfoRequestStatus_Success, mCompilationMessages->GetCompilationInfo(),
+ userdata);
+}
+
+void ShaderModuleBase::InjectCompilationMessages(
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
+ // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
+ // module returned from cache.
+ // InjectCompilationMessages should be called only once for a shader module, after it is
+ // created. However currently InjectCompilationMessages may be called on a shader module
+ // returned from cache rather than newly created, and violate the rule. We just skip the
+ // injection in this case for now, but a proper solution including ensure the cache goes
+ // before the validation is required.
+ if (mCompilationMessages != nullptr) {
+ return;
}
-
- // static
- void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
- tint::transform::Manager* transformManager,
- tint::transform::DataMap* transformInputs) {
- tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
- for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
-
- for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
- newBindingsMap[{static_cast<uint32_t>(i),
- static_cast<uint32_t>(expansion.second.plane0)}] = {
- {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
- {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
- }
- }
-
- if (!newBindingsMap.empty()) {
- transformManager->Add<tint::transform::MultiplanarExternalTexture>();
- transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
- newBindingsMap);
+ // Move the compilationMessages into the shader module and emit the tint errors and warnings
+ mCompilationMessages = std::move(compilationMessages);
+
+ // Emit the formatted Tint errors and warnings within the moved compilationMessages
+ const std::vector<std::string>& formattedTintMessages =
+ mCompilationMessages->GetFormattedTintMessages();
+ if (formattedTintMessages.empty()) {
+ return;
+ }
+ std::ostringstream t;
+ for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
+ pMessage++) {
+ if (pMessage != formattedTintMessages.begin()) {
+ t << std::endl;
}
+ t << *pMessage;
}
-
- MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
- mTintProgram = std::move(parseResult->tintProgram);
- mTintSource = std::move(parseResult->tintSource);
-
- DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
- return {};
+ this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
+}
+
+OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
+ return mCompilationMessages.get();
+}
+
+// static
+void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
+ tint::transform::Manager* transformManager,
+ tint::transform::DataMap* transformInputs) {
+ tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+ for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
+ newBindingsMap[{static_cast<uint32_t>(i),
+ static_cast<uint32_t>(expansion.second.plane0)}] = {
+ {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
+ {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
+ }
}
- size_t PipelineLayoutEntryPointPairHashFunc::operator()(
- const PipelineLayoutEntryPointPair& pair) const {
- size_t hash = 0;
- HashCombine(&hash, pair.first, pair.second);
- return hash;
+ if (!newBindingsMap.empty()) {
+ transformManager->Add<tint::transform::MultiplanarExternalTexture>();
+ transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+ newBindingsMap);
}
+}
+
+MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ mTintProgram = std::move(parseResult->tintProgram);
+ mTintSource = std::move(parseResult->tintSource);
+
+ DAWN_TRY(ReflectShaderUsingTint(GetDevice(), mTintProgram.get(), compilationMessages,
+ &mEntryPoints, &mEnabledWGSLExtensions));
+ return {};
+}
+
+size_t PipelineLayoutEntryPointPairHashFunc::operator()(
+ const PipelineLayoutEntryPointPair& pair) const {
+ size_t hash = 0;
+ HashCombine(&hash, pair.first, pair.second);
+ return hash;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ShaderModule.h b/chromium/third_party/dawn/src/dawn/native/ShaderModule.h
index e69082146b0..1df775cfbae 100644
--- a/chromium/third_party/dawn/src/dawn/native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn/native/ShaderModule.h
@@ -15,6 +15,15 @@
#ifndef SRC_DAWN_NATIVE_SHADERMODULE_H_
#define SRC_DAWN_NATIVE_SHADERMODULE_H_
+#include <bitset>
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/native/BindingInfo.h"
@@ -29,285 +38,281 @@
#include "dawn/native/VertexFormat.h"
#include "dawn/native/dawn_platform.h"
-#include <bitset>
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
namespace tint {
- class Program;
+class Program;
- namespace transform {
- class DataMap;
- class Manager;
- class Transform;
- class VertexPulling;
- } // namespace transform
+namespace transform {
+class DataMap;
+class Manager;
+class Transform;
+class VertexPulling;
+} // namespace transform
} // namespace tint
namespace dawn::native {
- struct EntryPointMetadata;
-
- // Base component type of an inter-stage variable
- enum class InterStageComponentType {
- Sint,
- Uint,
- Float,
+using WGSLExtensionSet = std::unordered_set<std::string>;
+struct EntryPointMetadata;
+
+// Base component type of an inter-stage variable
+enum class InterStageComponentType {
+ Sint,
+ Uint,
+ Float,
+};
+
+enum class InterpolationType {
+ Perspective,
+ Linear,
+ Flat,
+};
+
+enum class InterpolationSampling {
+ None,
+ Center,
+ Centroid,
+ Sample,
+};
+
+using PipelineLayoutEntryPointPair = std::pair<const PipelineLayoutBase*, std::string>;
+struct PipelineLayoutEntryPointPairHashFunc {
+ size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+};
+
+// A map from name to EntryPointMetadata.
+using EntryPointMetadataTable =
+ std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+
+// Source for a tint program
+class TintSource;
+
+struct ShaderModuleParseResult {
+ ShaderModuleParseResult();
+ ~ShaderModuleParseResult();
+ ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
+ ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
+
+ bool HasParsedShader() const;
+
+ std::unique_ptr<tint::Program> tintProgram;
+ std::unique_ptr<TintSource> tintSource;
+};
+
+MaybeError ValidateAndParseShaderModule(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* outMessages);
+MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+ const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout);
+
+RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout);
+ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+ const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
+ OwnedCompilationMessages* messages);
+
+/// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
+void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs);
+
+// Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
+// for isComparison instead of a wgpu::SamplerBindingType enum.
+struct ShaderSamplerBindingInfo {
+ bool isComparison;
+};
+
+// Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
+// instead of a single enum.
+struct ShaderTextureBindingInfo {
+ SampleTypeBit compatibleSampleTypes;
+ wgpu::TextureViewDimension viewDimension;
+ bool multisampled;
+};
+
+// Per-binding shader metadata contains some SPIRV specific information in addition to
+// most of the frontend per-binding information.
+struct ShaderBindingInfo {
+ // The SPIRV ID of the resource.
+ uint32_t id;
+ uint32_t base_type_id;
+
+ BindingNumber binding;
+ BindingInfoType bindingType;
+
+ BufferBindingLayout buffer;
+ ShaderSamplerBindingInfo sampler;
+ ShaderTextureBindingInfo texture;
+ StorageTextureBindingLayout storageTexture;
+};
+
+using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+
+// The WebGPU overridable constants only support these scalar types
+union OverridableConstantScalar {
+ // Use int32_t for boolean to initialize the full 32bit
+ int32_t b;
+ float f32;
+ int32_t i32;
+ uint32_t u32;
+};
+
+// Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
+// stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
+// pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
+// ShaderModuleBase.
+struct EntryPointMetadata {
+ // It is valid for a shader to contain entry points that go over limits. To keep this
+ // structure with packed arrays and bitsets, we still validate against limits when
+ // doing reflection, but store the errors in this vector, for later use if the application
+ // tries to use the entry point.
+ std::vector<std::string> infringedLimitErrors;
+
+ // bindings[G][B] is the reflection data for the binding defined with
+ // @group(G) @binding(B) in WGSL / SPIRV.
+ BindingInfoArray bindings;
+
+ struct SamplerTexturePair {
+ BindingSlot sampler;
+ BindingSlot texture;
};
+ std::vector<SamplerTexturePair> samplerTexturePairs;
- enum class InterpolationType {
- Perspective,
- Linear,
- Flat,
- };
+ // The set of vertex attributes this entryPoint uses.
+ ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
+ vertexInputBaseTypes;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
- enum class InterpolationSampling {
- None,
- Center,
- Centroid,
- Sample,
+ // An array to record the basic types (float, int and uint) of the fragment shader outputs.
+ struct FragmentOutputVariableInfo {
+ wgpu::TextureComponentType baseType;
+ uint8_t componentCount;
};
-
- using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
- struct PipelineLayoutEntryPointPairHashFunc {
- size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+ ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
+ fragmentOutputVariables;
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
+
+ struct InterStageVariableInfo {
+ InterStageComponentType baseType;
+ uint32_t componentCount;
+ InterpolationType interpolationType;
+ InterpolationSampling interpolationSampling;
};
+ // Now that we only support vertex and fragment stages, there can't be both inter-stage
+ // inputs and outputs in one shader stage.
+ std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
+ std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
- // A map from name to EntryPointMetadata.
- using EntryPointMetadataTable =
- std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+ // The local workgroup size declared for a compute entry point (or 0s otehrwise).
+ Origin3D localWorkgroupSize;
- // Source for a tint program
- class TintSource;
+ // The shader stage for this binding.
+ SingleShaderStage stage;
- struct ShaderModuleParseResult {
- ShaderModuleParseResult();
- ~ShaderModuleParseResult();
- ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
- ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
-
- bool HasParsedShader() const;
-
- std::unique_ptr<tint::Program> tintProgram;
- std::unique_ptr<TintSource> tintSource;
- };
-
- MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* outMessages);
- MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
- const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout);
-
- RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout);
- ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- const tint::Program* program,
- const tint::transform::DataMap& inputs,
- tint::transform::DataMap* outputs,
- OwnedCompilationMessages* messages);
-
- /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
- void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet,
- tint::transform::DataMap* transformInputs);
-
- // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
- // for isComparison instead of a wgpu::SamplerBindingType enum.
- struct ShaderSamplerBindingInfo {
- bool isComparison;
- };
-
- // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
- // instead of a single enum.
- struct ShaderTextureBindingInfo {
- SampleTypeBit compatibleSampleTypes;
- wgpu::TextureViewDimension viewDimension;
- bool multisampled;
- };
-
- // Per-binding shader metadata contains some SPIRV specific information in addition to
- // most of the frontend per-binding information.
- struct ShaderBindingInfo {
- // The SPIRV ID of the resource.
+ struct OverridableConstant {
uint32_t id;
- uint32_t base_type_id;
+ // Match tint::inspector::OverridableConstant::Type
+ // Bool is defined as a macro on linux X11 and cannot compile
+ enum class Type { Boolean, Float32, Uint32, Int32 } type;
+
+ // If the constant doesn't not have an initializer in the shader
+ // Then it is required for the pipeline stage to have a constant record to initialize a
+ // value
+ bool isInitialized;
+
+ // Store the default initialized value in shader
+ // This is used by metal backend as the function_constant does not have dafault values
+ // Initialized when isInitialized == true
+ OverridableConstantScalar defaultValue;
+ };
- BindingNumber binding;
- BindingInfoType bindingType;
+ using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
- BufferBindingLayout buffer;
- ShaderSamplerBindingInfo sampler;
- ShaderTextureBindingInfo texture;
- StorageTextureBindingLayout storageTexture;
- };
+ // Map identifier to overridable constant
+ // Identifier is unique: either the variable name or the numeric ID if specified
+ OverridableConstantsMap overridableConstants;
- using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
- using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+ // Overridable constants that are not initialized in shaders
+ // They need value initialization from pipeline stage or it is a validation error
+ std::unordered_set<std::string> uninitializedOverridableConstants;
- // The WebGPU overridable constants only support these scalar types
- union OverridableConstantScalar {
- // Use int32_t for boolean to initialize the full 32bit
- int32_t b;
- float f32;
- int32_t i32;
- uint32_t u32;
- };
+ // Store constants with shader initialized values as well
+ // This is used by metal backend to set values with default initializers that are not
+ // overridden
+ std::unordered_set<std::string> initializedOverridableConstants;
- // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
- // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
- // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
- // ShaderModuleBase.
- struct EntryPointMetadata {
- // It is valid for a shader to contain entry points that go over limits. To keep this
- // structure with packed arrays and bitsets, we still validate against limits when
- // doing reflection, but store the errors in this vector, for later use if the application
- // tries to use the entry point.
- std::vector<std::string> infringedLimitErrors;
-
- // bindings[G][B] is the reflection data for the binding defined with
- // @group(G) @binding(B) in WGSL / SPIRV.
- BindingInfoArray bindings;
-
- struct SamplerTexturePair {
- BindingSlot sampler;
- BindingSlot texture;
- };
- std::vector<SamplerTexturePair> samplerTexturePairs;
-
- // The set of vertex attributes this entryPoint uses.
- ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
- vertexInputBaseTypes;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
-
- // An array to record the basic types (float, int and uint) of the fragment shader outputs.
- struct FragmentOutputVariableInfo {
- wgpu::TextureComponentType baseType;
- uint8_t componentCount;
- };
- ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
- fragmentOutputVariables;
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
-
- struct InterStageVariableInfo {
- InterStageComponentType baseType;
- uint32_t componentCount;
- InterpolationType interpolationType;
- InterpolationSampling interpolationSampling;
- };
- // Now that we only support vertex and fragment stages, there can't be both inter-stage
- // inputs and outputs in one shader stage.
- std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
- std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
-
- // The local workgroup size declared for a compute entry point (or 0s otehrwise).
- Origin3D localWorkgroupSize;
-
- // The shader stage for this binding.
- SingleShaderStage stage;
-
- struct OverridableConstant {
- uint32_t id;
- // Match tint::inspector::OverridableConstant::Type
- // Bool is defined as a macro on linux X11 and cannot compile
- enum class Type { Boolean, Float32, Uint32, Int32 } type;
-
- // If the constant doesn't not have an initializer in the shader
- // Then it is required for the pipeline stage to have a constant record to initialize a
- // value
- bool isInitialized;
-
- // Store the default initialized value in shader
- // This is used by metal backend as the function_constant does not have dafault values
- // Initialized when isInitialized == true
- OverridableConstantScalar defaultValue;
- };
-
- using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
-
- // Map identifier to overridable constant
- // Identifier is unique: either the variable name or the numeric ID if specified
- OverridableConstantsMap overridableConstants;
-
- // Overridable constants that are not initialized in shaders
- // They need value initialization from pipeline stage or it is a validation error
- std::unordered_set<std::string> uninitializedOverridableConstants;
-
- // Store constants with shader initialized values as well
- // This is used by metal backend to set values with default initializers that are not
- // overridden
- std::unordered_set<std::string> initializedOverridableConstants;
-
- bool usesNumWorkgroups = false;
- };
+ bool usesNumWorkgroups = false;
+};
- class ShaderModuleBase : public ApiObjectBase, public CachedObject {
- public:
- ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModuleBase() override;
+class ShaderModuleBase : public ApiObjectBase, public CachedObject {
+ public:
+ ShaderModuleBase(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModuleBase() override;
- static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
+ static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
- ObjectType GetType() const override;
+ ObjectType GetType() const override;
- // Return true iff the program has an entrypoint called `entryPoint`.
- bool HasEntryPoint(const std::string& entryPoint) const;
+ // Return true iff the program has an entrypoint called `entryPoint`.
+ bool HasEntryPoint(const std::string& entryPoint) const;
- // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
- // must be true.
- const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
+ // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
+ // must be true.
+ const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
- // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
- size_t ComputeContentHash() override;
+ // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
+ size_t ComputeContentHash() override;
- struct EqualityFunc {
- bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
- };
+ struct EqualityFunc {
+ bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
+ };
- const tint::Program* GetTintProgram() const;
+ const tint::Program* GetTintProgram() const;
- void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
+ void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
- void InjectCompilationMessages(
- std::unique_ptr<OwnedCompilationMessages> compilationMessages);
+ void InjectCompilationMessages(std::unique_ptr<OwnedCompilationMessages> compilationMessages);
- OwnedCompilationMessages* GetCompilationMessages() const;
+ OwnedCompilationMessages* GetCompilationMessages() const;
- protected:
- // Constructor used only for mocking and testing.
- explicit ShaderModuleBase(DeviceBase* device);
- void DestroyImpl() override;
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit ShaderModuleBase(DeviceBase* device);
+ void DestroyImpl() override;
- MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
+ MaybeError InitializeBase(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
- static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
- tint::transform::Manager* transformManager,
- tint::transform::DataMap* transformInputs);
+ static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
+ tint::transform::Manager* transformManager,
+ tint::transform::DataMap* transformInputs);
- private:
- ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ private:
+ ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- // The original data in the descriptor for caching.
- enum class Type { Undefined, Spirv, Wgsl };
- Type mType;
- std::vector<uint32_t> mOriginalSpirv;
- std::string mWgsl;
+ // The original data in the descriptor for caching.
+ enum class Type { Undefined, Spirv, Wgsl };
+ Type mType;
+ std::vector<uint32_t> mOriginalSpirv;
+ std::string mWgsl;
- EntryPointMetadataTable mEntryPoints;
- std::unique_ptr<tint::Program> mTintProgram;
- std::unique_ptr<TintSource> mTintSource; // Keep the tint::Source::File alive
+ EntryPointMetadataTable mEntryPoints;
+ WGSLExtensionSet mEnabledWGSLExtensions;
+ std::unique_ptr<tint::Program> mTintProgram;
+ std::unique_ptr<TintSource> mTintSource; // Keep the tint::Source::File alive
- std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
- };
+ std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp
index 72eb8c15c51..b40a80390e3 100644
--- a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp
@@ -14,61 +14,59 @@
#include "dawn/native/SpirvValidation.h"
-#include "dawn/native/Device.h"
-
#include <spirv-tools/libspirv.hpp>
+
#include <sstream>
+#include <string>
+
+#include "dawn/native/Device.h"
namespace dawn::native {
- MaybeError ValidateSpirv(DeviceBase* device,
- const std::vector<uint32_t>& spirv,
- bool dumpSpirv) {
- spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
- spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
- const spv_position_t& position,
- const char* message) {
- WGPULoggingType wgpuLogLevel;
- switch (level) {
- case SPV_MSG_FATAL:
- case SPV_MSG_INTERNAL_ERROR:
- case SPV_MSG_ERROR:
- wgpuLogLevel = WGPULoggingType_Error;
- break;
- case SPV_MSG_WARNING:
- wgpuLogLevel = WGPULoggingType_Warning;
- break;
- case SPV_MSG_INFO:
- wgpuLogLevel = WGPULoggingType_Info;
- break;
- default:
- wgpuLogLevel = WGPULoggingType_Error;
- break;
- }
+MaybeError ValidateSpirv(DeviceBase* device, const std::vector<uint32_t>& spirv, bool dumpSpirv) {
+ spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
+ spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
+ const spv_position_t& position, const char* message) {
+ WGPULoggingType wgpuLogLevel;
+ switch (level) {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ wgpuLogLevel = WGPULoggingType_Error;
+ break;
+ case SPV_MSG_WARNING:
+ wgpuLogLevel = WGPULoggingType_Warning;
+ break;
+ case SPV_MSG_INFO:
+ wgpuLogLevel = WGPULoggingType_Info;
+ break;
+ default:
+ wgpuLogLevel = WGPULoggingType_Error;
+ break;
+ }
- std::ostringstream ss;
- ss << "SPIRV line " << position.index << ": " << message << std::endl;
- device->EmitLog(wgpuLogLevel, ss.str().c_str());
- });
+ std::ostringstream ss;
+ ss << "SPIRV line " << position.index << ": " << message << std::endl;
+ device->EmitLog(wgpuLogLevel, ss.str().c_str());
+ });
- const bool valid = spirvTools.Validate(spirv);
- if (dumpSpirv || !valid) {
- std::ostringstream dumpedMsg;
- std::string disassembly;
- if (spirvTools.Disassemble(
- spirv, &disassembly,
- SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
- dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
- } else {
- dumpedMsg << "/* Failed to disassemble generated SPIRV */";
- }
- device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ const bool valid = spirvTools.Validate(spirv);
+ if (dumpSpirv || !valid) {
+ std::ostringstream dumpedMsg;
+ std::string disassembly;
+ if (spirvTools.Disassemble(
+ spirv, &disassembly,
+ SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
+ dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
+ } else {
+ dumpedMsg << "/* Failed to disassemble generated SPIRV */";
}
+ device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
- DAWN_INVALID_IF(!valid,
- "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
+ DAWN_INVALID_IF(!valid, "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
- return {};
- }
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h
index 0b4f3689382..b50d38af777 100644
--- a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h
+++ b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h
@@ -15,17 +15,15 @@
#ifndef SRC_DAWN_NATIVE_SPIRVVALIDATION_H_
#define SRC_DAWN_NATIVE_SPIRVVALIDATION_H_
-#include "dawn/native/Error.h"
-
#include <vector>
+#include "dawn/native/Error.h"
+
namespace dawn::native {
- class DeviceBase;
+class DeviceBase;
- MaybeError ValidateSpirv(DeviceBase* device,
- const std::vector<uint32_t>& spirv,
- bool dumpSpirv);
+MaybeError ValidateSpirv(DeviceBase* device, const std::vector<uint32_t>& spirv, bool dumpSpirv);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp
index a6c258ca5aa..2b40323c602 100644
--- a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp
@@ -16,14 +16,13 @@
namespace dawn::native {
- StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {
- }
+StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {}
- size_t StagingBufferBase::GetSize() const {
- return mBufferSize;
- }
+size_t StagingBufferBase::GetSize() const {
+ return mBufferSize;
+}
- void* StagingBufferBase::GetMappedPointer() const {
- return mMappedPointer;
- }
+void* StagingBufferBase::GetMappedPointer() const {
+ return mMappedPointer;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h
index 4bda9c665e5..741d2134c08 100644
--- a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h
@@ -19,22 +19,22 @@
namespace dawn::native {
- class StagingBufferBase {
- public:
- explicit StagingBufferBase(size_t size);
- virtual ~StagingBufferBase() = default;
+class StagingBufferBase {
+ public:
+ explicit StagingBufferBase(size_t size);
+ virtual ~StagingBufferBase() = default;
- virtual MaybeError Initialize() = 0;
+ virtual MaybeError Initialize() = 0;
- void* GetMappedPointer() const;
- size_t GetSize() const;
+ void* GetMappedPointer() const;
+ size_t GetSize() const;
- protected:
- void* mMappedPointer = nullptr;
+ protected:
+ void* mMappedPointer = nullptr;
- private:
- const size_t mBufferSize;
- };
+ private:
+ const size_t mBufferSize;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Subresource.cpp b/chromium/third_party/dawn/src/dawn/native/Subresource.cpp
index 6ebba9ff944..e8fce4d6b32 100644
--- a/chromium/third_party/dawn/src/dawn/native/Subresource.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Subresource.cpp
@@ -19,118 +19,116 @@
namespace dawn::native {
- Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
- Aspect aspectMask = ConvertAspect(format, aspect);
- ASSERT(HasOneBit(aspectMask));
- return aspectMask;
- }
-
- Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
- Aspect aspectMask = SelectFormatAspects(format, aspect);
- ASSERT(aspectMask != Aspect::None);
- return aspectMask;
- }
+Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
+ Aspect aspectMask = ConvertAspect(format, aspect);
+ ASSERT(HasOneBit(aspectMask));
+ return aspectMask;
+}
- Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
- // Color view |format| must be treated as the same plane |aspect|.
- if (format.aspects == Aspect::Color) {
- switch (aspect) {
- case wgpu::TextureAspect::Plane0Only:
- return Aspect::Plane0;
- case wgpu::TextureAspect::Plane1Only:
- return Aspect::Plane1;
- default:
- break;
- }
- }
- return ConvertAspect(format, aspect);
- }
+Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
+ Aspect aspectMask = SelectFormatAspects(format, aspect);
+ ASSERT(aspectMask != Aspect::None);
+ return aspectMask;
+}
- Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
+ // Color view |format| must be treated as the same plane |aspect|.
+ if (format.aspects == Aspect::Color) {
switch (aspect) {
- case wgpu::TextureAspect::All:
- return format.aspects;
- case wgpu::TextureAspect::DepthOnly:
- return format.aspects & Aspect::Depth;
- case wgpu::TextureAspect::StencilOnly:
- return format.aspects & Aspect::Stencil;
case wgpu::TextureAspect::Plane0Only:
- return format.aspects & Aspect::Plane0;
+ return Aspect::Plane0;
case wgpu::TextureAspect::Plane1Only:
- return format.aspects & Aspect::Plane1;
+ return Aspect::Plane1;
+ default:
+ break;
}
- UNREACHABLE();
}
+ return ConvertAspect(format, aspect);
+}
- uint8_t GetAspectIndex(Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- switch (aspect) {
- case Aspect::Color:
- case Aspect::Depth:
- case Aspect::Plane0:
- case Aspect::CombinedDepthStencil:
- return 0;
- case Aspect::Plane1:
- case Aspect::Stencil:
- return 1;
- default:
- UNREACHABLE();
- }
+Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+ switch (aspect) {
+ case wgpu::TextureAspect::All:
+ return format.aspects;
+ case wgpu::TextureAspect::DepthOnly:
+ return format.aspects & Aspect::Depth;
+ case wgpu::TextureAspect::StencilOnly:
+ return format.aspects & Aspect::Stencil;
+ case wgpu::TextureAspect::Plane0Only:
+ return format.aspects & Aspect::Plane0;
+ case wgpu::TextureAspect::Plane1Only:
+ return format.aspects & Aspect::Plane1;
}
+ UNREACHABLE();
+}
- uint8_t GetAspectCount(Aspect aspects) {
- // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
- // Note that we can't do a switch because compilers complain that Depth | Stencil is not
- // a valid enum value.
- if (aspects == Aspect::Color || aspects == Aspect::Depth ||
- aspects == Aspect::CombinedDepthStencil) {
+uint8_t GetAspectIndex(Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ switch (aspect) {
+ case Aspect::Color:
+ case Aspect::Depth:
+ case Aspect::Plane0:
+ case Aspect::CombinedDepthStencil:
+ return 0;
+ case Aspect::Plane1:
+ case Aspect::Stencil:
return 1;
- } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
- return 2;
- } else if (aspects == Aspect::Stencil) {
- // Fake a the existence of a depth aspect so that the stencil data stays at index 1.
- ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
- return 2;
- } else {
- ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
- return 2;
- }
+ default:
+ UNREACHABLE();
}
+}
- SubresourceRange::SubresourceRange(Aspect aspects,
- FirstAndCountRange<uint32_t> arrayLayerParam,
- FirstAndCountRange<uint32_t> mipLevelParams)
- : aspects(aspects),
- baseArrayLayer(arrayLayerParam.first),
- layerCount(arrayLayerParam.count),
- baseMipLevel(mipLevelParams.first),
- levelCount(mipLevelParams.count) {
+uint8_t GetAspectCount(Aspect aspects) {
+ // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
+ // Note that we can't do a switch because compilers complain that Depth | Stencil is not
+ // a valid enum value.
+ if (aspects == Aspect::Color || aspects == Aspect::Depth ||
+ aspects == Aspect::CombinedDepthStencil) {
+ return 1;
+ } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
+ return 2;
+ } else if (aspects == Aspect::Stencil) {
+ // Fake a the existence of a depth aspect so that the stencil data stays at index 1.
+ ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+ return 2;
+ } else {
+ ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
+ return 2;
}
+}
- SubresourceRange::SubresourceRange()
- : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {
- }
+SubresourceRange::SubresourceRange(Aspect aspects,
+ FirstAndCountRange<uint32_t> arrayLayerParam,
+ FirstAndCountRange<uint32_t> mipLevelParams)
+ : aspects(aspects),
+ baseArrayLayer(arrayLayerParam.first),
+ layerCount(arrayLayerParam.count),
+ baseMipLevel(mipLevelParams.first),
+ levelCount(mipLevelParams.count) {}
- // static
- SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
- uint32_t baseArrayLayer,
- Aspect aspects) {
- return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
- }
+SubresourceRange::SubresourceRange()
+ : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {}
- // static
- SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
- uint32_t baseArrayLayer,
- uint32_t baseMipLevel) {
- ASSERT(HasOneBit(aspect));
- return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
- }
+// static
+SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects) {
+ return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+}
- // static
- SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
- uint32_t layerCount,
- uint32_t levelCount) {
- return {aspects, {0, layerCount}, {0, levelCount}};
- }
+// static
+SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
+ uint32_t baseArrayLayer,
+ uint32_t baseMipLevel) {
+ ASSERT(HasOneBit(aspect));
+ return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+}
+
+// static
+SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
+ uint32_t layerCount,
+ uint32_t levelCount) {
+ return {aspects, {0, layerCount}, {0, levelCount}};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Subresource.h b/chromium/third_party/dawn/src/dawn/native/Subresource.h
index f985c685a36..473631afc73 100644
--- a/chromium/third_party/dawn/src/dawn/native/Subresource.h
+++ b/chromium/third_party/dawn/src/dawn/native/Subresource.h
@@ -20,92 +20,92 @@
namespace dawn::native {
- // Note: Subresource indices are computed by iterating the aspects in increasing order.
- // D3D12 uses these directly, so the order much match D3D12's indices.
- // - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
- enum class Aspect : uint8_t {
- None = 0x0,
- Color = 0x1,
- Depth = 0x2,
- Stencil = 0x4,
-
- // Aspects used to select individual planes in a multi-planar format.
- Plane0 = 0x8,
- Plane1 = 0x10,
-
- // An aspect for that represents the combination of both the depth and stencil aspects. It
- // can be ignored outside of the Vulkan backend.
- CombinedDepthStencil = 0x20,
- };
-
- template <>
- struct EnumBitmaskSize<Aspect> {
- static constexpr unsigned value = 6;
- };
-
- // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
- // does not exist in the format.
- // Also ASSERTs if "All" is selected and results in more than one aspect.
- Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
- // does not exist in the format.
- Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
- // Note that this can return Aspect::None if the Format doesn't have any of the
- // selected aspects.
- Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
-
- // Convert TextureAspect to the aspect which corresponds to the view format. This
- // special cases per plane view formats before calling ConvertAspect.
- Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Helper struct to make it clear that what the parameters of a range mean.
- template <typename T>
- struct FirstAndCountRange {
- T first;
- T count;
- };
-
- struct SubresourceRange {
- SubresourceRange(Aspect aspects,
- FirstAndCountRange<uint32_t> arrayLayerParam,
- FirstAndCountRange<uint32_t> mipLevelParams);
- SubresourceRange();
-
- Aspect aspects;
- uint32_t baseArrayLayer;
- uint32_t layerCount;
- uint32_t baseMipLevel;
- uint32_t levelCount;
-
- static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
- uint32_t baseArrayLayer,
- Aspect aspects);
- static SubresourceRange MakeSingle(Aspect aspect,
- uint32_t baseArrayLayer,
- uint32_t baseMipLevel);
-
- static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
- };
-
- // Helper function to use aspects as linear indices in arrays.
- uint8_t GetAspectIndex(Aspect aspect);
- uint8_t GetAspectCount(Aspect aspects);
-
- // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
- // the per plane index does not exceed the known maximum plane count.
- static constexpr uint32_t kMaxPlanesPerFormat = 3;
+// Note: Subresource indices are computed by iterating the aspects in increasing order.
+// D3D12 uses these directly, so the order much match D3D12's indices.
+// - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
+enum class Aspect : uint8_t {
+ None = 0x0,
+ Color = 0x1,
+ Depth = 0x2,
+ Stencil = 0x4,
+
+ // Aspects used to select individual planes in a multi-planar format.
+ Plane0 = 0x8,
+ Plane1 = 0x10,
+
+ // An aspect for that represents the combination of both the depth and stencil aspects. It
+ // can be ignored outside of the Vulkan backend.
+ CombinedDepthStencil = 0x20,
+};
+
+template <>
+struct EnumBitmaskSize<Aspect> {
+ static constexpr unsigned value = 6;
+};
+
+// Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+// does not exist in the format.
+// Also ASSERTs if "All" is selected and results in more than one aspect.
+Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
+
+// Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+// does not exist in the format.
+Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
+
+// Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
+// Note that this can return Aspect::None if the Format doesn't have any of the
+// selected aspects.
+Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
+
+// Convert TextureAspect to the aspect which corresponds to the view format. This
+// special cases per plane view formats before calling ConvertAspect.
+Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
+
+// Helper struct to make it clear that what the parameters of a range mean.
+template <typename T>
+struct FirstAndCountRange {
+ T first;
+ T count;
+};
+
+struct SubresourceRange {
+ SubresourceRange(Aspect aspects,
+ FirstAndCountRange<uint32_t> arrayLayerParam,
+ FirstAndCountRange<uint32_t> mipLevelParams);
+ SubresourceRange();
+
+ Aspect aspects;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+
+ static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects);
+ static SubresourceRange MakeSingle(Aspect aspect,
+ uint32_t baseArrayLayer,
+ uint32_t baseMipLevel);
+
+ static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
+};
+
+// Helper function to use aspects as linear indices in arrays.
+uint8_t GetAspectIndex(Aspect aspect);
+uint8_t GetAspectCount(Aspect aspects);
+
+// The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
+// the per plane index does not exceed the known maximum plane count.
+static constexpr uint32_t kMaxPlanesPerFormat = 3;
} // namespace dawn::native
namespace dawn {
- template <>
- struct IsDawnBitmask<dawn::native::Aspect> {
- static constexpr bool enable = true;
- };
+template <>
+struct IsDawnBitmask<dawn::native::Aspect> {
+ static constexpr bool enable = true;
+};
} // namespace dawn
diff --git a/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h b/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h
index 9dc9d99f417..cc3d10dacf0 100644
--- a/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h
+++ b/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h
@@ -15,541 +15,534 @@
#ifndef SRC_DAWN_NATIVE_SUBRESOURCESTORAGE_H_
#define SRC_DAWN_NATIVE_SUBRESOURCESTORAGE_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/TypeTraits.h"
-#include "dawn/native/EnumMaskIterator.h"
-#include "dawn/native/Subresource.h"
-
#include <array>
#include <limits>
#include <memory>
#include <vector>
+#include "dawn/common/Assert.h"
+#include "dawn/common/TypeTraits.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Subresource.h"
+
namespace dawn::native {
- // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
- // value of type T except that it tries to compress similar subresources so that algorithms
- // can act on a whole range of subresources at once if they have the same state.
- //
- // For example a very common case to optimize for is the tracking of the usage of texture
- // subresources inside a render pass: the vast majority of texture views will select the whole
- // texture while a small minority will select a sub-range. We want to optimize the common case
- // by setting and checking a single "usage" value when a full subresource is used but at the
- // same time allow per-subresource data when needed.
- //
- // Another example is barrier tracking per-subresource in the backends: it will often happen
- // that during texture upload each mip level will have a different "barrier state". However
- // when the texture is fully uploaded and after it is used for sampling (with a full view) for
- // the first time, the barrier state will likely be the same across all the subresources.
- // That's why some form of "recompression" of subresource state must be possibe.
- //
- // In order to keep the implementation details private and to avoid iterator-hell, this
- // container uses a more functional approach of calling a closure on the interesting ranges.
- // This is for example how to look at the state of all subresources.
+// SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
+// value of type T except that it tries to compress similar subresources so that algorithms
+// can act on a whole range of subresources at once if they have the same state.
+//
+// For example a very common case to optimize for is the tracking of the usage of texture
+// subresources inside a render pass: the vast majority of texture views will select the whole
+// texture while a small minority will select a sub-range. We want to optimize the common case
+// by setting and checking a single "usage" value when a full subresource is used but at the
+// same time allow per-subresource data when needed.
+//
+// Another example is barrier tracking per-subresource in the backends: it will often happen
+// that during texture upload each mip level will have a different "barrier state". However
+// when the texture is fully uploaded and after it is used for sampling (with a full view) for
+// the first time, the barrier state will likely be the same across all the subresources.
+// That's why some form of "recompression" of subresource state must be possibe.
+//
+// In order to keep the implementation details private and to avoid iterator-hell, this
+// container uses a more functional approach of calling a closure on the interesting ranges.
+// This is for example how to look at the state of all subresources.
+//
+// subresources.Iterate([](const SubresourceRange& range, const T& data) {
+// // Do something with the knowledge that all the subresources in `range` have value
+// // `data`.
+// });
+//
+// SubresourceStorage internally tracks compression state per aspect and then per layer of each
+// aspect. This means that a 2-aspect texture can have the following compression state:
+//
+// - Aspect 0 is fully compressed.
+// - Aspect 1 is partially compressed:
+// - Aspect 1 layer 3 is decompressed.
+// - Aspect 1 layer 0-2 and 4-42 are compressed.
+//
+// A useful model to reason about SubresourceStorage is to represent is as a tree:
+//
+// - SubresourceStorage is the root.
+// |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
+// any children because the data is constant across all of the subtree.
+// |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
+// its node doesn't have any children because the data is constant across all of the
+// subtree.
+// |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
+//
+// The concept of recompression is the removal of all child nodes of a non-leaf node when the
+// data is constant across them. Decompression is the addition of child nodes to a leaf node
+// and copying of its data to all its children.
+//
+// The choice of having secondary compression for array layers is to optimize for the cases
+// where transfer operations are used to update specific layers of texture with render or
+// transfer operations, while the rest is untouched. It seems much less likely that there
+// would be operations that touch all Nth mips of a 2D array texture without touching the
+// others.
+//
+// There are several hot code paths that create new SubresourceStorage like the tracking of
+// resource usage per-pass. We don't want to allocate a container for the decompressed data
+// unless we have to because it would dramatically lower performance. Instead
+// SubresourceStorage contains an inline array that contains the per-aspect compressed data
+// and only allocates a per-subresource on aspect decompression.
+//
+// T must be a copyable type that supports equality comparison with ==.
+//
+// The implementation of functions in this file can have a lot of control flow and corner cases
+// so each modification should come with extensive tests and ensure 100% code coverage of the
+// modified functions. See instructions at
+// https://chromium.googlesource.com/chromium/src/+/main/docs/testing/code_coverage.md#local-coverage-script
+// to run the test with code coverage. A command line that worked in the past (with the right
+// GN args for the out/coverage directory in a Chromium checkout) is:
+//
+/*
+ python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
+ "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
+ third_party/dawn/src/dawn/native
+*/
+//
+// TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
+// if recompression can happen or not in Update() and Merge()
+template <typename T>
+class SubresourceStorage {
+ public:
+ static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
+ static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
+
+ // Creates the storage with the given "dimensions" and all subresources starting with the
+ // initial value.
+ SubresourceStorage(Aspect aspects,
+ uint32_t arrayLayerCount,
+ uint32_t mipLevelCount,
+ T initialValue = {});
+
+ // Returns the data for a single subresource. Note that the reference returned might be the
+ // same for multiple subresources.
+ const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
+
+ // Given an iterateFunc that's a function or function-like objet that can be called with
+ // arguments of type (const SubresourceRange& range, const T& data) and returns void,
+ // calls it with aggregate ranges if possible, such that each subresource is part of
+ // exactly one of the ranges iterateFunc is called with (and obviously data is the value
+ // stored for that subresource). For example:
//
- // subresources.Iterate([](const SubresourceRange& range, const T& data) {
- // // Do something with the knowledge that all the subresources in `range` have value
- // // `data`.
+ // subresources.Iterate([&](const SubresourceRange& range, const T& data) {
+ // // Do something with range and data.
// });
+ template <typename F>
+ void Iterate(F&& iterateFunc) const;
+
+ // Given an updateFunc that's a function or function-like objet that can be called with
+ // arguments of type (const SubresourceRange& range, T* data) and returns void,
+ // calls it with ranges that in aggregate form `range` and pass for each of the
+ // sub-ranges a pointer to modify the value for that sub-range. For example:
//
- // SubresourceStorage internally tracks compression state per aspect and then per layer of each
- // aspect. This means that a 2-aspect texture can have the following compression state:
- //
- // - Aspect 0 is fully compressed.
- // - Aspect 1 is partially compressed:
- // - Aspect 1 layer 3 is decompressed.
- // - Aspect 1 layer 0-2 and 4-42 are compressed.
- //
- // A useful model to reason about SubresourceStorage is to represent is as a tree:
- //
- // - SubresourceStorage is the root.
- // |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
- // any children because the data is constant across all of the subtree.
- // |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
- // its node doesn't have any children because the data is constant across all of the
- // subtree.
- // |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
- //
- // The concept of recompression is the removal of all child nodes of a non-leaf node when the
- // data is constant across them. Decompression is the addition of child nodes to a leaf node
- // and copying of its data to all its children.
- //
- // The choice of having secondary compression for array layers is to optimize for the cases
- // where transfer operations are used to update specific layers of texture with render or
- // transfer operations, while the rest is untouched. It seems much less likely that there
- // would be operations that touch all Nth mips of a 2D array texture without touching the
- // others.
- //
- // There are several hot code paths that create new SubresourceStorage like the tracking of
- // resource usage per-pass. We don't want to allocate a container for the decompressed data
- // unless we have to because it would dramatically lower performance. Instead
- // SubresourceStorage contains an inline array that contains the per-aspect compressed data
- // and only allocates a per-subresource on aspect decompression.
+ // subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
+ // *data |= wgpu::TextureUsage::Stuff;
+ // });
//
- // T must be a copyable type that supports equality comparison with ==.
+ // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
+ // your code is likely to break when compression happens. Range should only be used for
+ // side effects like using it to compute a Vulkan pipeline barrier.
+ template <typename F>
+ void Update(const SubresourceRange& range, F&& updateFunc);
+
+ // Given a mergeFunc that's a function or a function-like object that can be called with
+ // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
+ // returns void, calls it with ranges that in aggregate form the full resources and pass
+ // for each of the sub-ranges a pointer to modify the value for that sub-range and the
+ // corresponding value from other for that sub-range. For example:
//
- // The implementation of functions in this file can have a lot of control flow and corner cases
- // so each modification should come with extensive tests and ensure 100% code coverage of the
- // modified functions. See instructions at
- // https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
- // to run the test with code coverage. A command line that worked in the past (with the right
- // GN args for the out/coverage directory in a Chromium checkout) is:
+ // subresources.Merge(otherUsages,
+ // [](const SubresourceRange&, T* data, const T& otherData) {
+ // *data |= otherData;
+ // });
//
- /*
- python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
- "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
- third_party/dawn/src/dawn/native
- */
+ // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
+ // your code is likely to break when compression happens. Range should only be used for
+ // side effects like using it to compute a Vulkan pipeline barrier.
+ template <typename U, typename F>
+ void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
+
+ // Other operations to consider:
//
- // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
- // if recompression can happen or not in Update() and Merge()
- template <typename T>
- class SubresourceStorage {
- public:
- static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
- static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
-
- // Creates the storage with the given "dimensions" and all subresources starting with the
- // initial value.
- SubresourceStorage(Aspect aspects,
- uint32_t arrayLayerCount,
- uint32_t mipLevelCount,
- T initialValue = {});
-
- // Returns the data for a single subresource. Note that the reference returned might be the
- // same for multiple subresources.
- const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
-
- // Given an iterateFunc that's a function or function-like objet that can be called with
- // arguments of type (const SubresourceRange& range, const T& data) and returns void,
- // calls it with aggregate ranges if possible, such that each subresource is part of
- // exactly one of the ranges iterateFunc is called with (and obviously data is the value
- // stored for that subresource). For example:
- //
- // subresources.Iterate([&](const SubresourceRange& range, const T& data) {
- // // Do something with range and data.
- // });
- template <typename F>
- void Iterate(F&& iterateFunc) const;
-
- // Given an updateFunc that's a function or function-like objet that can be called with
- // arguments of type (const SubresourceRange& range, T* data) and returns void,
- // calls it with ranges that in aggregate form `range` and pass for each of the
- // sub-ranges a pointer to modify the value for that sub-range. For example:
- //
- // subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
- // *data |= wgpu::TextureUsage::Stuff;
- // });
- //
- // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
- // your code is likely to break when compression happens. Range should only be used for
- // side effects like using it to compute a Vulkan pipeline barrier.
- template <typename F>
- void Update(const SubresourceRange& range, F&& updateFunc);
-
- // Given a mergeFunc that's a function or a function-like object that can be called with
- // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
- // returns void, calls it with ranges that in aggregate form the full resources and pass
- // for each of the sub-ranges a pointer to modify the value for that sub-range and the
- // corresponding value from other for that sub-range. For example:
- //
- // subresources.Merge(otherUsages,
- // [](const SubresourceRange&, T* data, const T& otherData) {
- // *data |= otherData;
- // });
- //
- // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
- // your code is likely to break when compression happens. Range should only be used for
- // side effects like using it to compute a Vulkan pipeline barrier.
- template <typename U, typename F>
- void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
-
- // Other operations to consider:
- //
- // - UpdateTo(Range, T) that updates the range to a constant value.
-
- // Methods to query the internal state of SubresourceStorage for testing.
- Aspect GetAspectsForTesting() const;
- uint32_t GetArrayLayerCountForTesting() const;
- uint32_t GetMipLevelCountForTesting() const;
- bool IsAspectCompressedForTesting(Aspect aspect) const;
- bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
-
- private:
- template <typename U>
- friend class SubresourceStorage;
-
- void DecompressAspect(uint32_t aspectIndex);
- void RecompressAspect(uint32_t aspectIndex);
-
- void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
- void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
-
- SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
-
- // LayerCompressed should never be called when the aspect is compressed otherwise it would
- // need to check that mLayerCompressed is not null before indexing it.
- bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
- bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
-
- // Return references to the data for a compressed plane / layer or subresource.
- // Each variant should be called exactly under the correct compression level.
- T& DataInline(uint32_t aspectIndex);
- T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
- const T& DataInline(uint32_t aspectIndex) const;
- const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
-
- Aspect mAspects;
- uint8_t mMipLevelCount;
- uint16_t mArrayLayerCount;
-
- // Invariant: if an aspect is marked compressed, then all it's layers are marked as
- // compressed.
- static constexpr size_t kMaxAspects = 2;
- std::array<bool, kMaxAspects> mAspectCompressed;
- std::array<T, kMaxAspects> mInlineAspectData;
-
- // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
- std::unique_ptr<bool[]> mLayerCompressed;
-
- // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
- // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
- // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
- std::unique_ptr<T[]> mData;
- };
-
- template <typename T>
- SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
- uint32_t arrayLayerCount,
- uint32_t mipLevelCount,
- T initialValue)
- : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
- ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
- ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
-
- uint32_t aspectCount = GetAspectCount(aspects);
- ASSERT(aspectCount <= kMaxAspects);
-
- for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
- mAspectCompressed[aspectIndex] = true;
- DataInline(aspectIndex) = initialValue;
- }
+ // - UpdateTo(Range, T) that updates the range to a constant value.
+
+ // Methods to query the internal state of SubresourceStorage for testing.
+ Aspect GetAspectsForTesting() const;
+ uint32_t GetArrayLayerCountForTesting() const;
+ uint32_t GetMipLevelCountForTesting() const;
+ bool IsAspectCompressedForTesting(Aspect aspect) const;
+ bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
+
+ private:
+ template <typename U>
+ friend class SubresourceStorage;
+
+ void DecompressAspect(uint32_t aspectIndex);
+ void RecompressAspect(uint32_t aspectIndex);
+
+ void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
+ void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
+
+ SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
+
+ // LayerCompressed should never be called when the aspect is compressed otherwise it would
+ // need to check that mLayerCompressed is not null before indexing it.
+ bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
+ bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
+
+ // Return references to the data for a compressed plane / layer or subresource.
+ // Each variant should be called exactly under the correct compression level.
+ T& DataInline(uint32_t aspectIndex);
+ T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
+ const T& DataInline(uint32_t aspectIndex) const;
+ const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
+
+ Aspect mAspects;
+ uint8_t mMipLevelCount;
+ uint16_t mArrayLayerCount;
+
+ // Invariant: if an aspect is marked compressed, then all it's layers are marked as
+ // compressed.
+ static constexpr size_t kMaxAspects = 2;
+ std::array<bool, kMaxAspects> mAspectCompressed;
+ std::array<T, kMaxAspects> mInlineAspectData;
+
+ // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
+ std::unique_ptr<bool[]> mLayerCompressed;
+
+ // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
+ // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
+ // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
+ std::unique_ptr<T[]> mData;
+};
+
+template <typename T>
+SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
+ uint32_t arrayLayerCount,
+ uint32_t mipLevelCount,
+ T initialValue)
+ : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
+ ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
+ ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
+
+ uint32_t aspectCount = GetAspectCount(aspects);
+ ASSERT(aspectCount <= kMaxAspects);
+
+ for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
+ mAspectCompressed[aspectIndex] = true;
+ DataInline(aspectIndex) = initialValue;
}
+}
- template <typename T>
- template <typename F>
- void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
- bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
- bool fullAspects =
- range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
-
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // Call the updateFunc once for the whole aspect if possible or decompress and fallback
- // to per-layer handling.
- if (mAspectCompressed[aspectIndex]) {
- if (fullAspects) {
- SubresourceRange updateRange =
- SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
- updateFunc(updateRange, &DataInline(aspectIndex));
- continue;
- }
- DecompressAspect(aspectIndex);
- }
-
- uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
- for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
- // Call the updateFunc once for the whole layer if possible or decompress and
- // fallback to per-level handling.
- if (LayerCompressed(aspectIndex, layer)) {
- if (fullLayers) {
- SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
- updateFunc(updateRange, &Data(aspectIndex, layer));
- continue;
- }
- DecompressLayer(aspectIndex, layer);
- }
+template <typename T>
+template <typename F>
+void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
+ bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
+ bool fullAspects =
+ range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
- // Worst case: call updateFunc per level.
- uint32_t levelEnd = range.baseMipLevel + range.levelCount;
- for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
- SubresourceRange updateRange =
- SubresourceRange::MakeSingle(aspect, layer, level);
- updateFunc(updateRange, &Data(aspectIndex, layer, level));
- }
-
- // If the range has fullLayers then it is likely we can recompress after the calls
- // to updateFunc (this branch is skipped if updateFunc was called for the whole
- // layer).
- if (fullLayers) {
- RecompressLayer(aspectIndex, layer);
- }
- }
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
- // If the range has fullAspects then it is likely we can recompress after the calls to
- // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
+ // Call the updateFunc once for the whole aspect if possible or decompress and fallback
+ // to per-layer handling.
+ if (mAspectCompressed[aspectIndex]) {
if (fullAspects) {
- RecompressAspect(aspectIndex);
- }
- }
- }
-
- template <typename T>
- template <typename U, typename F>
- void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
- ASSERT(mAspects == other.mAspects);
- ASSERT(mArrayLayerCount == other.mArrayLayerCount);
- ASSERT(mMipLevelCount == other.mMipLevelCount);
-
- for (Aspect aspect : IterateEnumMask(mAspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // If the other storage's aspect is compressed we don't need to decompress anything
- // in `this` and can just iterate through it, merging with `other`'s constant value for
- // the aspect. For code simplicity this can be done with a call to Update().
- if (other.mAspectCompressed[aspectIndex]) {
- const U& otherData = other.DataInline(aspectIndex);
- Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
- [&](const SubresourceRange& subrange, T* data) {
- mergeFunc(subrange, data, otherData);
- });
+ SubresourceRange updateRange =
+ SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+ updateFunc(updateRange, &DataInline(aspectIndex));
continue;
}
+ DecompressAspect(aspectIndex);
+ }
- // Other doesn't have the aspect compressed so we must do at least per-layer merging.
- if (mAspectCompressed[aspectIndex]) {
- DecompressAspect(aspectIndex);
- }
-
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- // Similarly to above, use a fast path if other's layer is compressed.
- if (other.LayerCompressed(aspectIndex, layer)) {
- const U& otherData = other.Data(aspectIndex, layer);
- Update(GetFullLayerRange(aspect, layer),
- [&](const SubresourceRange& subrange, T* data) {
- mergeFunc(subrange, data, otherData);
- });
+ uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
+ for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
+ // Call the updateFunc once for the whole layer if possible or decompress and
+ // fallback to per-level handling.
+ if (LayerCompressed(aspectIndex, layer)) {
+ if (fullLayers) {
+ SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
+ updateFunc(updateRange, &Data(aspectIndex, layer));
continue;
}
+ DecompressLayer(aspectIndex, layer);
+ }
- // Sad case, other is decompressed for this layer, do per-level merging.
- if (LayerCompressed(aspectIndex, layer)) {
- DecompressLayer(aspectIndex, layer);
- }
-
- for (uint32_t level = 0; level < mMipLevelCount; level++) {
- SubresourceRange updateRange =
- SubresourceRange::MakeSingle(aspect, layer, level);
- mergeFunc(updateRange, &Data(aspectIndex, layer, level),
- other.Data(aspectIndex, layer, level));
- }
+ // Worst case: call updateFunc per level.
+ uint32_t levelEnd = range.baseMipLevel + range.levelCount;
+ for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
+ SubresourceRange updateRange = SubresourceRange::MakeSingle(aspect, layer, level);
+ updateFunc(updateRange, &Data(aspectIndex, layer, level));
+ }
+ // If the range has fullLayers then it is likely we can recompress after the calls
+ // to updateFunc (this branch is skipped if updateFunc was called for the whole
+ // layer).
+ if (fullLayers) {
RecompressLayer(aspectIndex, layer);
}
+ }
+ // If the range has fullAspects then it is likely we can recompress after the calls to
+ // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
+ if (fullAspects) {
RecompressAspect(aspectIndex);
}
}
+}
- template <typename T>
- template <typename F>
- void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
- for (Aspect aspect : IterateEnumMask(mAspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // Fastest path, call iterateFunc on the whole aspect at once.
- if (mAspectCompressed[aspectIndex]) {
- SubresourceRange range =
- SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
- iterateFunc(range, DataInline(aspectIndex));
- continue;
- }
-
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- // Fast path, call iterateFunc on the whole array layer at once.
- if (LayerCompressed(aspectIndex, layer)) {
- SubresourceRange range = GetFullLayerRange(aspect, layer);
- iterateFunc(range, Data(aspectIndex, layer));
- continue;
- }
+template <typename T>
+template <typename U, typename F>
+void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+ ASSERT(mAspects == other.mAspects);
+ ASSERT(mArrayLayerCount == other.mArrayLayerCount);
+ ASSERT(mMipLevelCount == other.mMipLevelCount);
- // Slow path, call iterateFunc for each mip level.
- for (uint32_t level = 0; level < mMipLevelCount; level++) {
- SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
- iterateFunc(range, Data(aspectIndex, layer, level));
- }
- }
- }
- }
-
- template <typename T>
- const T& SubresourceStorage<T>::Get(Aspect aspect,
- uint32_t arrayLayer,
- uint32_t mipLevel) const {
+ for (Aspect aspect : IterateEnumMask(mAspects)) {
uint32_t aspectIndex = GetAspectIndex(aspect);
- ASSERT(aspectIndex < GetAspectCount(mAspects));
- ASSERT(arrayLayer < mArrayLayerCount);
- ASSERT(mipLevel < mMipLevelCount);
- // Fastest path, the aspect is compressed!
- if (mAspectCompressed[aspectIndex]) {
- return DataInline(aspectIndex);
+ // If the other storage's aspect is compressed we don't need to decompress anything
+ // in `this` and can just iterate through it, merging with `other`'s constant value for
+ // the aspect. For code simplicity this can be done with a call to Update().
+ if (other.mAspectCompressed[aspectIndex]) {
+ const U& otherData = other.DataInline(aspectIndex);
+ Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
+ [&](const SubresourceRange& subrange, T* data) {
+ mergeFunc(subrange, data, otherData);
+ });
+ continue;
}
- // Fast path, the array layer is compressed.
- if (LayerCompressed(aspectIndex, arrayLayer)) {
- return Data(aspectIndex, arrayLayer);
+ // Other doesn't have the aspect compressed so we must do at least per-layer merging.
+ if (mAspectCompressed[aspectIndex]) {
+ DecompressAspect(aspectIndex);
}
- return Data(aspectIndex, arrayLayer, mipLevel);
- }
-
- template <typename T>
- Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
- return mAspects;
- }
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ // Similarly to above, use a fast path if other's layer is compressed.
+ if (other.LayerCompressed(aspectIndex, layer)) {
+ const U& otherData = other.Data(aspectIndex, layer);
+ Update(GetFullLayerRange(aspect, layer),
+ [&](const SubresourceRange& subrange, T* data) {
+ mergeFunc(subrange, data, otherData);
+ });
+ continue;
+ }
- template <typename T>
- uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
- return mArrayLayerCount;
- }
+ // Sad case, other is decompressed for this layer, do per-level merging.
+ if (LayerCompressed(aspectIndex, layer)) {
+ DecompressLayer(aspectIndex, layer);
+ }
- template <typename T>
- uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
- return mMipLevelCount;
- }
+ for (uint32_t level = 0; level < mMipLevelCount; level++) {
+ SubresourceRange updateRange = SubresourceRange::MakeSingle(aspect, layer, level);
+ mergeFunc(updateRange, &Data(aspectIndex, layer, level),
+ other.Data(aspectIndex, layer, level));
+ }
- template <typename T>
- bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
- return mAspectCompressed[GetAspectIndex(aspect)];
- }
+ RecompressLayer(aspectIndex, layer);
+ }
- template <typename T>
- bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
- return mAspectCompressed[GetAspectIndex(aspect)] ||
- mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
+ RecompressAspect(aspectIndex);
}
+}
- template <typename T>
- void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
- ASSERT(mAspectCompressed[aspectIndex]);
- const T& aspectData = DataInline(aspectIndex);
- mAspectCompressed[aspectIndex] = false;
-
- // Extra allocations are only needed when aspects are decompressed. Create them lazily.
- if (mData == nullptr) {
- ASSERT(mLayerCompressed == nullptr);
-
- uint32_t aspectCount = GetAspectCount(mAspects);
- mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
- mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
-
- for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
- layerIndex++) {
- mLayerCompressed[layerIndex] = true;
- }
- }
+template <typename T>
+template <typename F>
+void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
+ for (Aspect aspect : IterateEnumMask(mAspects)) {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
- ASSERT(LayerCompressed(aspectIndex, 0));
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- Data(aspectIndex, layer) = aspectData;
- ASSERT(LayerCompressed(aspectIndex, layer));
+ // Fastest path, call iterateFunc on the whole aspect at once.
+ if (mAspectCompressed[aspectIndex]) {
+ SubresourceRange range =
+ SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+ iterateFunc(range, DataInline(aspectIndex));
+ continue;
}
- }
- template <typename T>
- void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
- ASSERT(!mAspectCompressed[aspectIndex]);
- // All layers of the aspect must be compressed for the aspect to possibly recompress.
for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- if (!LayerCompressed(aspectIndex, layer)) {
- return;
+ // Fast path, call iterateFunc on the whole array layer at once.
+ if (LayerCompressed(aspectIndex, layer)) {
+ SubresourceRange range = GetFullLayerRange(aspect, layer);
+ iterateFunc(range, Data(aspectIndex, layer));
+ continue;
}
- }
- T layer0Data = Data(aspectIndex, 0);
- for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
- if (!(Data(aspectIndex, layer) == layer0Data)) {
- return;
+ // Slow path, call iterateFunc for each mip level.
+ for (uint32_t level = 0; level < mMipLevelCount; level++) {
+ SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+ iterateFunc(range, Data(aspectIndex, layer, level));
}
}
+ }
+}
+
+template <typename T>
+const T& SubresourceStorage<T>::Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
+ ASSERT(aspectIndex < GetAspectCount(mAspects));
+ ASSERT(arrayLayer < mArrayLayerCount);
+ ASSERT(mipLevel < mMipLevelCount);
+
+ // Fastest path, the aspect is compressed!
+ if (mAspectCompressed[aspectIndex]) {
+ return DataInline(aspectIndex);
+ }
- mAspectCompressed[aspectIndex] = true;
- DataInline(aspectIndex) = layer0Data;
+ // Fast path, the array layer is compressed.
+ if (LayerCompressed(aspectIndex, arrayLayer)) {
+ return Data(aspectIndex, arrayLayer);
}
- template <typename T>
- void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- const T& layerData = Data(aspectIndex, layer);
- LayerCompressed(aspectIndex, layer) = false;
-
- // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
- // allows starting the iteration at level 1.
- for (uint32_t level = 1; level < mMipLevelCount; level++) {
- Data(aspectIndex, layer, level) = layerData;
+ return Data(aspectIndex, arrayLayer, mipLevel);
+}
+
+template <typename T>
+Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
+ return mAspects;
+}
+
+template <typename T>
+uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
+ return mArrayLayerCount;
+}
+
+template <typename T>
+uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
+ return mMipLevelCount;
+}
+
+template <typename T>
+bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
+ return mAspectCompressed[GetAspectIndex(aspect)];
+}
+
+template <typename T>
+bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
+ return mAspectCompressed[GetAspectIndex(aspect)] ||
+ mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
+}
+
+template <typename T>
+void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ const T& aspectData = DataInline(aspectIndex);
+ mAspectCompressed[aspectIndex] = false;
+
+ // Extra allocations are only needed when aspects are decompressed. Create them lazily.
+ if (mData == nullptr) {
+ ASSERT(mLayerCompressed == nullptr);
+
+ uint32_t aspectCount = GetAspectCount(mAspects);
+ mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
+ mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
+
+ for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount; layerIndex++) {
+ mLayerCompressed[layerIndex] = true;
}
}
- template <typename T>
- void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(!LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- const T& level0Data = Data(aspectIndex, layer, 0);
-
- for (uint32_t level = 1; level < mMipLevelCount; level++) {
- if (!(Data(aspectIndex, layer, level) == level0Data)) {
- return;
- }
+ ASSERT(LayerCompressed(aspectIndex, 0));
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ Data(aspectIndex, layer) = aspectData;
+ ASSERT(LayerCompressed(aspectIndex, layer));
+ }
+}
+
+template <typename T>
+void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ // All layers of the aspect must be compressed for the aspect to possibly recompress.
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ if (!LayerCompressed(aspectIndex, layer)) {
+ return;
}
-
- LayerCompressed(aspectIndex, layer) = true;
}
- template <typename T>
- SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
- return {aspect, {layer, 1}, {0, mMipLevelCount}};
+ T layer0Data = Data(aspectIndex, 0);
+ for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
+ if (!(Data(aspectIndex, layer) == layer0Data)) {
+ return;
+ }
}
- template <typename T>
- bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+ mAspectCompressed[aspectIndex] = true;
+ DataInline(aspectIndex) = layer0Data;
+}
+
+template <typename T>
+void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ const T& layerData = Data(aspectIndex, layer);
+ LayerCompressed(aspectIndex, layer) = false;
+
+ // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
+ // allows starting the iteration at level 1.
+ for (uint32_t level = 1; level < mMipLevelCount; level++) {
+ Data(aspectIndex, layer, level) = layerData;
}
+}
- template <typename T>
- bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
- }
+template <typename T>
+void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(!LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ const T& level0Data = Data(aspectIndex, layer, 0);
- template <typename T>
- T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
- ASSERT(mAspectCompressed[aspectIndex]);
- return mInlineAspectData[aspectIndex];
- }
- template <typename T>
- T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
- ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
- }
- template <typename T>
- const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
- ASSERT(mAspectCompressed[aspectIndex]);
- return mInlineAspectData[aspectIndex];
- }
- template <typename T>
- const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
- uint32_t layer,
- uint32_t level) const {
- ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+ for (uint32_t level = 1; level < mMipLevelCount; level++) {
+ if (!(Data(aspectIndex, layer, level) == level0Data)) {
+ return;
+ }
}
+ LayerCompressed(aspectIndex, layer) = true;
+}
+
+template <typename T>
+SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
+ return {aspect, {layer, 1}, {0, mMipLevelCount}};
+}
+
+template <typename T>
+bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+}
+
+template <typename T>
+bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+}
+
+template <typename T>
+T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ return mInlineAspectData[aspectIndex];
+}
+template <typename T>
+T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
+ ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+}
+template <typename T>
+const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ return mInlineAspectData[aspectIndex];
+}
+template <typename T>
+const T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) const {
+ ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+}
+
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_SUBRESOURCESTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface.cpp b/chromium/third_party/dawn/src/dawn/native/Surface.cpp
index ff6fd07559d..6822dbbe486 100644
--- a/chromium/third_party/dawn/src/dawn/native/Surface.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Surface.cpp
@@ -19,252 +19,296 @@
#include "dawn/native/Instance.h"
#include "dawn/native/SwapChain.h"
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <windows.ui.core.h>
-# include <windows.ui.xaml.controls.h>
-#endif // defined(DAWN_PLATFORM_WINDOWS)
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include <windows.ui.core.h>
+#include <windows.ui.xaml.controls.h>
+#endif // DAWN_PLATFORM_IS(WINDOWS)
#if defined(DAWN_USE_X11)
-# include "dawn/common/xlib_with_undefs.h"
+#include "dawn/common/xlib_with_undefs.h"
#endif // defined(DAWN_USE_X11)
namespace dawn::native {
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- Surface::Type value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case Surface::Type::AndroidWindow:
- s->Append("AndroidWindow");
- break;
- case Surface::Type::MetalLayer:
- s->Append("MetalLayer");
- break;
- case Surface::Type::WindowsHWND:
- s->Append("WindowsHWND");
- break;
- case Surface::Type::WindowsCoreWindow:
- s->Append("WindowsCoreWindow");
- break;
- case Surface::Type::WindowsSwapChainPanel:
- s->Append("WindowsSwapChainPanel");
- break;
- case Surface::Type::XlibWindow:
- s->Append("XlibWindow");
- break;
- }
- return {true};
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ Surface::Type value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case Surface::Type::AndroidWindow:
+ s->Append("AndroidWindow");
+ break;
+ case Surface::Type::MetalLayer:
+ s->Append("MetalLayer");
+ break;
+ case Surface::Type::WaylandSurface:
+ s->Append("WaylandSurface");
+ break;
+ case Surface::Type::WindowsHWND:
+ s->Append("WindowsHWND");
+ break;
+ case Surface::Type::WindowsCoreWindow:
+ s->Append("WindowsCoreWindow");
+ break;
+ case Surface::Type::WindowsSwapChainPanel:
+ s->Append("WindowsSwapChainPanel");
+ break;
+ case Surface::Type::XlibWindow:
+ s->Append("XlibWindow");
+ break;
}
+ return {true};
+}
#if defined(DAWN_ENABLE_BACKEND_METAL)
- bool InheritsFromCAMetalLayer(void* obj);
+bool InheritsFromCAMetalLayer(void* obj);
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
- MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
- const SurfaceDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
- "Surface cannot be created with %s. nextInChain is not specified.",
- descriptor);
+MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
+ "Surface cannot be created with %s. nextInChain is not specified.", descriptor);
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::SurfaceDescriptorFromAndroidNativeWindow,
- wgpu::SType::SurfaceDescriptorFromMetalLayer,
- wgpu::SType::SurfaceDescriptorFromWindowsHWND,
- wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
- wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
- wgpu::SType::SurfaceDescriptorFromXlibWindow));
+ DAWN_TRY(ValidateSingleSType(
+ descriptor->nextInChain, wgpu::SType::SurfaceDescriptorFromAndroidNativeWindow,
+ wgpu::SType::SurfaceDescriptorFromMetalLayer, wgpu::SType::SurfaceDescriptorFromWindowsHWND,
+ wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
+ wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
+ wgpu::SType::SurfaceDescriptorFromXlibWindow));
#if defined(DAWN_ENABLE_BACKEND_METAL)
- const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
- FindInChain(descriptor->nextInChain, &metalDesc);
- if (metalDesc) {
- // Check that the layer is a CAMetalLayer (or a derived class).
- DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
- "Layer must be a CAMetalLayer");
- return {};
- }
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ if (metalDesc) {
+ // Check that the layer is a CAMetalLayer (or a derived class).
+ DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
+ "Layer must be a CAMetalLayer");
+ return {};
+ }
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_PLATFORM_ANDROID)
- const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
- FindInChain(descriptor->nextInChain, &androidDesc);
- // Currently the best validation we can do since it's not possible to check if the pointer
- // to a ANativeWindow is valid.
- if (androidDesc) {
- DAWN_INVALID_IF(androidDesc->window == nullptr, "Android window is not set.");
- return {};
- }
-#endif // defined(DAWN_PLATFORM_ANDROID)
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# if defined(DAWN_PLATFORM_WIN32)
- const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
- FindInChain(descriptor->nextInChain, &hwndDesc);
- if (hwndDesc) {
- DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
- return {};
- }
-# endif // defined(DAWN_PLATFORM_WIN32)
- const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
- FindInChain(descriptor->nextInChain, &coreWindowDesc);
- if (coreWindowDesc) {
- // Validate the coreWindow by query for ICoreWindow interface
- ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
- DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
- FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
- ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
- "Invalid CoreWindow");
- return {};
- }
- const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
- FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
- if (swapChainPanelDesc) {
- // Validate the swapChainPanel by querying for ISwapChainPanel interface
- ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
- DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
- FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
- ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
- "Invalid SwapChainPanel");
- return {};
- }
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
- const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
- FindInChain(descriptor->nextInChain, &xDesc);
- if (xDesc) {
- // Check the validity of the window by calling a getter function on the window that
- // returns a status code. If the window is bad the call return a status of zero. We
- // need to set a temporary X11 error handler while doing this because the default
- // X11 error handler exits the program on any error.
- XErrorHandler oldErrorHandler =
- XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
- XWindowAttributes attributes;
- int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
- xDesc->window, &attributes);
- XSetErrorHandler(oldErrorHandler);
-
- DAWN_INVALID_IF(status == 0, "Invalid X Window");
- return {};
- }
-#endif // defined(DAWN_USE_X11)
-
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)",
- descriptor->nextInChain->sType);
+#if DAWN_PLATFORM_IS(ANDROID)
+ const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &androidDesc);
+ // Currently the best validation we can do since it's not possible to check if the pointer
+ // to a ANativeWindow is valid.
+ if (androidDesc) {
+ DAWN_INVALID_IF(androidDesc->window == nullptr, "Android window is not set.");
+ return {};
}
+#endif // DAWN_PLATFORM_IS(ANDROID)
- Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
- : mInstance(instance) {
- ASSERT(descriptor->nextInChain != nullptr);
- const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
- const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
- const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
- const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
- const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
- const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
- FindInChain(descriptor->nextInChain, &androidDesc);
- FindInChain(descriptor->nextInChain, &metalDesc);
- FindInChain(descriptor->nextInChain, &hwndDesc);
- FindInChain(descriptor->nextInChain, &coreWindowDesc);
- FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
- FindInChain(descriptor->nextInChain, &xDesc);
- if (metalDesc) {
- mType = Type::MetalLayer;
- mMetalLayer = metalDesc->layer;
- } else if (androidDesc) {
- mType = Type::AndroidWindow;
- mAndroidNativeWindow = androidDesc->window;
- } else if (hwndDesc) {
- mType = Type::WindowsHWND;
- mHInstance = hwndDesc->hinstance;
- mHWND = hwndDesc->hwnd;
- } else if (coreWindowDesc) {
-#if defined(DAWN_PLATFORM_WINDOWS)
- mType = Type::WindowsCoreWindow;
- mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
-#endif // defined(DAWN_PLATFORM_WINDOWS)
- } else if (swapChainPanelDesc) {
-#if defined(DAWN_PLATFORM_WINDOWS)
- mType = Type::WindowsSwapChainPanel;
- mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
-#endif // defined(DAWN_PLATFORM_WINDOWS)
- } else if (xDesc) {
- mType = Type::XlibWindow;
- mXDisplay = xDesc->display;
- mXWindow = xDesc->window;
- } else {
- UNREACHABLE();
- }
+#if DAWN_PLATFORM_IS(WINDOWS)
+#if DAWN_PLATFORM_IS(WIN32)
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ if (hwndDesc) {
+ DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
+ return {};
}
-
- Surface::~Surface() {
- if (mSwapChain != nullptr) {
- mSwapChain->DetachFromSurface();
- mSwapChain = nullptr;
- }
+#endif // DAWN_PLATFORM_IS(WIN32)
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ if (coreWindowDesc) {
+ // Validate the coreWindow by query for ICoreWindow interface
+ ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
+ DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
+ FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
+ ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
+ "Invalid CoreWindow");
+ return {};
}
-
- NewSwapChainBase* Surface::GetAttachedSwapChain() {
- return mSwapChain.Get();
+ const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+ if (swapChainPanelDesc) {
+ // Validate the swapChainPanel by querying for ISwapChainPanel interface
+ ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
+ DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
+ FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
+ ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
+ "Invalid SwapChainPanel");
+ return {};
}
+#endif // DAWN_PLATFORM_IS(WINDOWS)
- void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
- mSwapChain = swapChain;
+#if defined(DAWN_USE_WAYLAND)
+ const SurfaceDescriptorFromWaylandSurface* waylandDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &waylandDesc);
+ if (waylandDesc) {
+ // Unfortunately we can't check the validity of wayland objects. Only that they
+ // aren't nullptr.
+ DAWN_INVALID_IF(waylandDesc->display == nullptr, "Wayland display is nullptr.");
+ DAWN_INVALID_IF(waylandDesc->surface == nullptr, "Wayland surface is nullptr.");
+ return {};
}
+#endif // defined(DAWN_USE_X11)
- InstanceBase* Surface::GetInstance() {
- return mInstance.Get();
- }
+#if defined(DAWN_USE_X11)
+ const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &xDesc);
+ if (xDesc) {
+ // Check the validity of the window by calling a getter function on the window that
+ // returns a status code. If the window is bad the call return a status of zero. We
+ // need to set a temporary X11 error handler while doing this because the default
+ // X11 error handler exits the program on any error.
+ XErrorHandler oldErrorHandler = XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+ XWindowAttributes attributes;
+ int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display), xDesc->window,
+ &attributes);
+ XSetErrorHandler(oldErrorHandler);
- Surface::Type Surface::GetType() const {
- return mType;
+ DAWN_INVALID_IF(status == 0, "Invalid X Window");
+ return {};
}
+#endif // defined(DAWN_USE_X11)
- void* Surface::GetAndroidNativeWindow() const {
- ASSERT(mType == Type::AndroidWindow);
- return mAndroidNativeWindow;
- }
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)", descriptor->nextInChain->sType);
+}
- void* Surface::GetMetalLayer() const {
- ASSERT(mType == Type::MetalLayer);
- return mMetalLayer;
- }
+// static
+Surface* Surface::MakeError(InstanceBase* instance) {
+ return new Surface(instance, ErrorMonad::kError);
+}
+
+Surface::Surface(InstanceBase* instance, ErrorTag tag) : ErrorMonad(tag), mInstance(instance) {}
- void* Surface::GetHInstance() const {
- ASSERT(mType == Type::WindowsHWND);
- return mHInstance;
+Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
+ : ErrorMonad(), mInstance(instance) {
+ ASSERT(descriptor->nextInChain != nullptr);
+ const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+ const SurfaceDescriptorFromWaylandSurface* waylandDesc = nullptr;
+ const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &androidDesc);
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+ FindInChain(descriptor->nextInChain, &xDesc);
+ if (metalDesc) {
+ mType = Type::MetalLayer;
+ mMetalLayer = metalDesc->layer;
+ } else if (androidDesc) {
+ mType = Type::AndroidWindow;
+ mAndroidNativeWindow = androidDesc->window;
+ } else if (waylandDesc) {
+ mType = Type::WaylandSurface;
+ mWaylandDisplay = waylandDesc->display;
+ mWaylandSurface = waylandDesc->surface;
+ } else if (hwndDesc) {
+ mType = Type::WindowsHWND;
+ mHInstance = hwndDesc->hinstance;
+ mHWND = hwndDesc->hwnd;
+ } else if (coreWindowDesc) {
+#if DAWN_PLATFORM_IS(WINDOWS)
+ mType = Type::WindowsCoreWindow;
+ mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
+#endif // DAWN_PLATFORM_IS(WINDOWS)
+ } else if (swapChainPanelDesc) {
+#if DAWN_PLATFORM_IS(WINDOWS)
+ mType = Type::WindowsSwapChainPanel;
+ mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
+#endif // DAWN_PLATFORM_IS(WINDOWS)
+ } else if (xDesc) {
+ mType = Type::XlibWindow;
+ mXDisplay = xDesc->display;
+ mXWindow = xDesc->window;
+ } else {
+ UNREACHABLE();
}
- void* Surface::GetHWND() const {
- ASSERT(mType == Type::WindowsHWND);
- return mHWND;
+}
+
+Surface::~Surface() {
+ if (mSwapChain != nullptr) {
+ mSwapChain->DetachFromSurface();
+ mSwapChain = nullptr;
}
+}
+
+NewSwapChainBase* Surface::GetAttachedSwapChain() {
+ ASSERT(!IsError());
+ return mSwapChain.Get();
+}
+
+void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
+ ASSERT(!IsError());
+ mSwapChain = swapChain;
+}
+
+InstanceBase* Surface::GetInstance() const {
+ return mInstance.Get();
+}
+
+Surface::Type Surface::GetType() const {
+ ASSERT(!IsError());
+ return mType;
+}
+
+void* Surface::GetAndroidNativeWindow() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::AndroidWindow);
+ return mAndroidNativeWindow;
+}
+
+void* Surface::GetMetalLayer() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::MetalLayer);
+ return mMetalLayer;
+}
- IUnknown* Surface::GetCoreWindow() const {
- ASSERT(mType == Type::WindowsCoreWindow);
-#if defined(DAWN_PLATFORM_WINDOWS)
- return mCoreWindow.Get();
+void* Surface::GetWaylandDisplay() const {
+ ASSERT(mType == Type::WaylandSurface);
+ return mWaylandDisplay;
+}
+
+void* Surface::GetWaylandSurface() const {
+ ASSERT(mType == Type::WaylandSurface);
+ return mWaylandSurface;
+}
+
+void* Surface::GetHInstance() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::WindowsHWND);
+ return mHInstance;
+}
+void* Surface::GetHWND() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::WindowsHWND);
+ return mHWND;
+}
+
+IUnknown* Surface::GetCoreWindow() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::WindowsCoreWindow);
+#if DAWN_PLATFORM_IS(WINDOWS)
+ return mCoreWindow.Get();
#else
- return nullptr;
+ return nullptr;
#endif
- }
+}
- IUnknown* Surface::GetSwapChainPanel() const {
- ASSERT(mType == Type::WindowsSwapChainPanel);
-#if defined(DAWN_PLATFORM_WINDOWS)
- return mSwapChainPanel.Get();
+IUnknown* Surface::GetSwapChainPanel() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::WindowsSwapChainPanel);
+#if DAWN_PLATFORM_IS(WINDOWS)
+ return mSwapChainPanel.Get();
#else
- return nullptr;
+ return nullptr;
#endif
- }
+}
- void* Surface::GetXDisplay() const {
- ASSERT(mType == Type::XlibWindow);
- return mXDisplay;
- }
- uint32_t Surface::GetXWindow() const {
- ASSERT(mType == Type::XlibWindow);
- return mXWindow;
- }
+void* Surface::GetXDisplay() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::XlibWindow);
+ return mXDisplay;
+}
+uint32_t Surface::GetXWindow() const {
+ ASSERT(!IsError());
+ ASSERT(mType == Type::XlibWindow);
+ return mXWindow;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface.h b/chromium/third_party/dawn/src/dawn/native/Surface.h
index c66253a60ae..96da3c25494 100644
--- a/chromium/third_party/dawn/src/dawn/native/Surface.h
+++ b/chromium/third_party/dawn/src/dawn/native/Surface.h
@@ -15,17 +15,17 @@
#ifndef SRC_DAWN_NATIVE_SURFACE_H_
#define SRC_DAWN_NATIVE_SURFACE_H_
-#include "dawn/common/RefCounted.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
#include "dawn/native/dawn_platform.h"
#include "dawn/common/Platform.h"
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "dawn/native/d3d12/d3d12_platform.h"
-#endif // defined(DAWN_PLATFORM_WINDOWS)
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include "dawn/native/d3d12/d3d12_platform.h"
+#endif // DAWN_PLATFORM_IS(WINDOWS)
// Forward declare IUnknown
// GetCoreWindow needs to return an IUnknown pointer
@@ -34,90 +34,100 @@ struct IUnknown;
namespace dawn::native {
- MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
- const SurfaceDescriptor* descriptor);
-
- // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
- // aren't used because they would cause compilation errors on other OSes (or require
- // ObjectiveC).
- // The surface is also used to store the current swapchain so that we can detach it when it is
- // replaced.
- class Surface final : public RefCounted {
- public:
- Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor);
+
+// A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
+// aren't used because they would cause compilation errors on other OSes (or require
+// ObjectiveC).
+// The surface is also used to store the current swapchain so that we can detach it when it is
+// replaced.
+class Surface final : public ErrorMonad {
+ public:
+ static Surface* MakeError(InstanceBase* instance);
+
+ Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+
+ void SetAttachedSwapChain(NewSwapChainBase* swapChain);
+ NewSwapChainBase* GetAttachedSwapChain();
+
+ // These are valid to call on all Surfaces.
+ enum class Type {
+ AndroidWindow,
+ MetalLayer,
+ WaylandSurface,
+ WindowsHWND,
+ WindowsCoreWindow,
+ WindowsSwapChainPanel,
+ XlibWindow,
+ };
+ Type GetType() const;
+ InstanceBase* GetInstance() const;
- void SetAttachedSwapChain(NewSwapChainBase* swapChain);
- NewSwapChainBase* GetAttachedSwapChain();
+ // Valid to call if the type is MetalLayer
+ void* GetMetalLayer() const;
- // These are valid to call on all Surfaces.
- enum class Type {
- AndroidWindow,
- MetalLayer,
- WindowsHWND,
- WindowsCoreWindow,
- WindowsSwapChainPanel,
- XlibWindow,
- };
- Type GetType() const;
- InstanceBase* GetInstance();
+ // Valid to call if the type is Android
+ void* GetAndroidNativeWindow() const;
- // Valid to call if the type is MetalLayer
- void* GetMetalLayer() const;
+ // Valid to call if the type is WaylandSurface
+ void* GetWaylandDisplay() const;
+ void* GetWaylandSurface() const;
- // Valid to call if the type is Android
- void* GetAndroidNativeWindow() const;
+ // Valid to call if the type is WindowsHWND
+ void* GetHInstance() const;
+ void* GetHWND() const;
- // Valid to call if the type is WindowsHWND
- void* GetHInstance() const;
- void* GetHWND() const;
+ // Valid to call if the type is WindowsCoreWindow
+ IUnknown* GetCoreWindow() const;
- // Valid to call if the type is WindowsCoreWindow
- IUnknown* GetCoreWindow() const;
+ // Valid to call if the type is WindowsSwapChainPanel
+ IUnknown* GetSwapChainPanel() const;
- // Valid to call if the type is WindowsSwapChainPanel
- IUnknown* GetSwapChainPanel() const;
+ // Valid to call if the type is WindowsXlib
+ void* GetXDisplay() const;
+ uint32_t GetXWindow() const;
- // Valid to call if the type is WindowsXlib
- void* GetXDisplay() const;
- uint32_t GetXWindow() const;
+ private:
+ Surface(InstanceBase* instance, ErrorMonad::ErrorTag tag);
+ ~Surface() override;
- private:
- ~Surface() override;
+ Ref<InstanceBase> mInstance;
+ Type mType;
- Ref<InstanceBase> mInstance;
- Type mType;
+ // The swapchain will set this to null when it is destroyed.
+ Ref<NewSwapChainBase> mSwapChain;
- // The swapchain will set this to null when it is destroyed.
- Ref<NewSwapChainBase> mSwapChain;
+ // MetalLayer
+ void* mMetalLayer = nullptr;
- // MetalLayer
- void* mMetalLayer = nullptr;
+ // ANativeWindow
+ void* mAndroidNativeWindow = nullptr;
- // ANativeWindow
- void* mAndroidNativeWindow = nullptr;
+ // Wayland
+ void* mWaylandDisplay = nullptr;
+ void* mWaylandSurface = nullptr;
- // WindowsHwnd
- void* mHInstance = nullptr;
- void* mHWND = nullptr;
+ // WindowsHwnd
+ void* mHInstance = nullptr;
+ void* mHWND = nullptr;
-#if defined(DAWN_PLATFORM_WINDOWS)
- // WindowsCoreWindow
- ComPtr<IUnknown> mCoreWindow;
+#if DAWN_PLATFORM_IS(WINDOWS)
+ // WindowsCoreWindow
+ ComPtr<IUnknown> mCoreWindow;
- // WindowsSwapChainPanel
- ComPtr<IUnknown> mSwapChainPanel;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
+ // WindowsSwapChainPanel
+ ComPtr<IUnknown> mSwapChainPanel;
+#endif // DAWN_PLATFORM_IS(WINDOWS)
- // Xlib
- void* mXDisplay = nullptr;
- uint32_t mXWindow = 0;
- };
+ // Xlib
+ void* mXDisplay = nullptr;
+ uint32_t mXWindow = 0;
+};
- // Not defined in webgpu_absl_format.h/cpp because you can't forward-declare a nested type.
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- Surface::Type value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
+// Not defined in webgpu_absl_format.h/cpp because you can't forward-declare a nested type.
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Surface::Type value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm b/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm
index ecb5d884aa6..c4f9792079a 100644
--- a/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm
+++ b/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm
@@ -15,16 +15,16 @@
// Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
#if !defined(DAWN_ENABLE_BACKEND_METAL)
-# error "Surface_metal.mm requires the Metal backend to be enabled."
+#error "Surface_metal.mm requires the Metal backend to be enabled."
#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
#import <QuartzCore/CAMetalLayer.h>
namespace dawn::native {
- bool InheritsFromCAMetalLayer(void* obj) {
- id<NSObject> object = static_cast<id>(obj);
- return [object isKindOfClass:[CAMetalLayer class]];
- }
+bool InheritsFromCAMetalLayer(void* obj) {
+ id<NSObject> object = static_cast<id>(obj);
+ return [object isKindOfClass:[CAMetalLayer class]];
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp
index a2e735a5ed3..19089295923 100644
--- a/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp
@@ -24,400 +24,387 @@
namespace dawn::native {
- namespace {
-
- class ErrorSwapChain final : public SwapChainBase {
- public:
- explicit ErrorSwapChain(DeviceBase* device)
- : SwapChainBase(device, ObjectBase::kError) {
- }
-
- private:
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- }
-
- TextureViewBase* APIGetCurrentTextureView() override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- return TextureViewBase::MakeError(GetDevice());
- }
-
- void APIPresent() override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- }
- };
-
- } // anonymous namespace
-
- MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
- const Surface* surface,
- const SwapChainDescriptor* descriptor) {
- if (descriptor->implementation != 0) {
- DAWN_INVALID_IF(surface != nullptr,
- "Exactly one of surface or implementation must be set");
-
- DawnSwapChainImplementation* impl =
- reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
-
- DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
- !impl->GetNextTexture || !impl->Present,
- "Implementation is incomplete");
-
- } else {
- DAWN_INVALID_IF(surface == nullptr,
- "At least one of surface or implementation must be set");
-
- DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
+namespace {
+
+class ErrorSwapChain final : public SwapChainBase {
+ public:
+ explicit ErrorSwapChain(DeviceBase* device) : SwapChainBase(device, ObjectBase::kError) {}
+
+ private:
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override {
+ GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ }
+
+ TextureViewBase* APIGetCurrentTextureView() override {
+ GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ void APIPresent() override {
+ GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ }
+};
+
+} // anonymous namespace
+
+MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ if (descriptor->implementation != 0) {
+ DAWN_INVALID_IF(surface != nullptr, "Exactly one of surface or implementation must be set");
+
+ DawnSwapChainImplementation* impl =
+ reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+
+ DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
+ !impl->GetNextTexture || !impl->Present,
+ "Implementation is incomplete");
+
+ } else {
+ DAWN_INVALID_IF(surface == nullptr,
+ "At least one of surface or implementation must be set");
+ DAWN_INVALID_IF(surface->IsError(), "[Surface] is invalid.");
+
+ DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
// TODO(crbug.com/dawn/160): Lift this restriction once wgpu::Instance::GetPreferredSurfaceFormat is
// implemented.
// TODO(dawn:286):
-#if defined(DAWN_PLATFORM_ANDROID)
- constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
+#if DAWN_PLATFORM_IS(ANDROID)
+ constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
#else
- constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
-#endif // !defined(DAWN_PLATFORM_ANDROID)
- DAWN_INVALID_IF(descriptor->format != kRequireSwapChainFormat,
- "Format (%s) is not %s, which is (currently) the only accepted format.",
- descriptor->format, kRequireSwapChainFormat);
-
- DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
- "Usage (%s) is not %s, which is (currently) the only accepted usage.",
- descriptor->usage, wgpu::TextureUsage::RenderAttachment);
-
- DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
- "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
- descriptor->height);
-
- DAWN_INVALID_IF(
- descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
- descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
- "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
- "size (width: %u, height: %u).",
- descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
- device->GetLimits().v1.maxTextureDimension2D);
- }
-
- return {};
- }
+ constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
+#endif // !DAWN_PLATFORM_IS(ANDROID)
+ DAWN_INVALID_IF(descriptor->format != kRequireSwapChainFormat,
+ "Format (%s) is not %s, which is (currently) the only accepted format.",
+ descriptor->format, kRequireSwapChainFormat);
- TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
- TextureDescriptor desc;
- desc.usage = swapChain->GetUsage();
- desc.dimension = wgpu::TextureDimension::e2D;
- desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
- desc.format = swapChain->GetFormat();
- desc.mipLevelCount = 1;
- desc.sampleCount = 1;
+ DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
+ "Usage (%s) is not %s, which is (currently) the only accepted usage.",
+ descriptor->usage, wgpu::TextureUsage::RenderAttachment);
- return desc;
+ DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
+ "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
+ descriptor->height);
+
+ DAWN_INVALID_IF(
+ descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
+ descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
+ "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
+ "size (width: %u, height: %u).",
+ descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
+ device->GetLimits().v1.maxTextureDimension2D);
}
- // SwapChainBase
+ return {};
+}
- SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
+TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
+ TextureDescriptor desc;
+ desc.usage = swapChain->GetUsage();
+ desc.dimension = wgpu::TextureDimension::e2D;
+ desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
+ desc.format = swapChain->GetFormat();
+ desc.mipLevelCount = 1;
+ desc.sampleCount = 1;
- SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
+ return desc;
+}
- SwapChainBase::~SwapChainBase() {
- }
+// SwapChainBase
- void SwapChainBase::DestroyImpl() {
- }
+SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+}
- // static
- SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
- return new ErrorSwapChain(device);
- }
+SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {}
- ObjectType SwapChainBase::GetType() const {
- return ObjectType::SwapChain;
- }
+SwapChainBase::~SwapChainBase() {}
+
+void SwapChainBase::DestroyImpl() {}
+
+// static
+SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
+ return new ErrorSwapChain(device);
+}
+
+ObjectType SwapChainBase::GetType() const {
+ return ObjectType::SwapChain;
+}
- // OldSwapChainBase
+// OldSwapChainBase
- OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device),
- mImplementation(
- *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mImplementation(*reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+}
+
+OldSwapChainBase::~OldSwapChainBase() {
+ if (!IsError()) {
+ const auto& im = GetImplementation();
+ im.Destroy(im.userData);
}
+}
- OldSwapChainBase::~OldSwapChainBase() {
- if (!IsError()) {
- const auto& im = GetImplementation();
- im.Destroy(im.userData);
- }
+void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
+ if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
+ return;
}
+ ASSERT(!IsError());
+
+ allowedUsage |= wgpu::TextureUsage::Present;
+
+ mFormat = format;
+ mAllowedUsage = allowedUsage;
+ mWidth = width;
+ mHeight = height;
+ mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
+ static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+}
- void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
- if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
- return;
- }
- ASSERT(!IsError());
-
- allowedUsage |= wgpu::TextureUsage::Present;
-
- mFormat = format;
- mAllowedUsage = allowedUsage;
- mWidth = width;
- mHeight = height;
- mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
- static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
+ if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+ return TextureViewBase::MakeError(GetDevice());
}
+ ASSERT(!IsError());
- TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
- if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
- return TextureViewBase::MakeError(GetDevice());
- }
- ASSERT(!IsError());
-
- // Return the same current texture view until Present is called.
- if (mCurrentTextureView != nullptr) {
- // Calling GetCurrentTextureView always returns a new reference so add it even when
- // reuse the existing texture view.
- mCurrentTextureView->Reference();
- return mCurrentTextureView.Get();
- }
-
- // Create the backing texture and the view.
- TextureDescriptor descriptor;
- descriptor.dimension = wgpu::TextureDimension::e2D;
- descriptor.size.width = mWidth;
- descriptor.size.height = mHeight;
- descriptor.size.depthOrArrayLayers = 1;
- descriptor.sampleCount = 1;
- descriptor.format = mFormat;
- descriptor.mipLevelCount = 1;
- descriptor.usage = mAllowedUsage;
-
- // Get the texture but remove the external refcount because it is never passed outside
- // of dawn_native
- mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
-
- mCurrentTextureView = mCurrentTexture->APICreateView();
+ // Return the same current texture view until Present is called.
+ if (mCurrentTextureView != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference so add it even when
+ // reuse the existing texture view.
+ mCurrentTextureView->Reference();
return mCurrentTextureView.Get();
}
- void OldSwapChainBase::APIPresent() {
- if (GetDevice()->ConsumedError(ValidatePresent())) {
- return;
- }
- ASSERT(!IsError());
+ // Create the backing texture and the view.
+ TextureDescriptor descriptor;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
+ descriptor.size.width = mWidth;
+ descriptor.size.height = mHeight;
+ descriptor.size.depthOrArrayLayers = 1;
+ descriptor.sampleCount = 1;
+ descriptor.format = mFormat;
+ descriptor.mipLevelCount = 1;
+ descriptor.usage = mAllowedUsage;
- if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
- return;
- }
+ // Get the texture but remove the external refcount because it is never passed outside
+ // of dawn_native
+ mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
- mImplementation.Present(mImplementation.userData);
+ mCurrentTextureView = mCurrentTexture->APICreateView();
+ return mCurrentTextureView.Get();
+}
- mCurrentTexture = nullptr;
- mCurrentTextureView = nullptr;
+void OldSwapChainBase::APIPresent() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
+ return;
}
+ ASSERT(!IsError());
- const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
- ASSERT(!IsError());
- return mImplementation;
+ if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
+ return;
}
- MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
+ mImplementation.Present(mImplementation.userData);
- DAWN_TRY(ValidateTextureUsage(allowedUsage));
- DAWN_TRY(ValidateTextureFormat(format));
+ mCurrentTexture = nullptr;
+ mCurrentTextureView = nullptr;
+}
- DAWN_INVALID_IF(width == 0 || height == 0,
- "Configuration size (width: %u, height: %u) for %s is empty.", width,
- height, this);
+const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
+ ASSERT(!IsError());
+ return mImplementation;
+}
- return {};
- }
+MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateTextureUsage(allowedUsage));
+ DAWN_TRY(ValidateTextureFormat(format));
- // If width is 0, it implies swap chain has never been configured
- DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.",
- this);
+ DAWN_INVALID_IF(width == 0 || height == 0,
+ "Configuration size (width: %u, height: %u) for %s is empty.", width, height,
+ this);
- return {};
- }
+ return {};
+}
- MaybeError OldSwapChainBase::ValidatePresent() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
+MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_INVALID_IF(
- mCurrentTextureView == nullptr,
- "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
- this);
+ // If width is 0, it implies swap chain has never been configured
+ DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.", this);
- return {};
- }
+ return {};
+}
- // Implementation of NewSwapChainBase
-
- NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
- Surface* surface,
- const SwapChainDescriptor* descriptor)
- : SwapChainBase(device),
- mAttached(false),
- mWidth(descriptor->width),
- mHeight(descriptor->height),
- mFormat(descriptor->format),
- mUsage(descriptor->usage),
- mPresentMode(descriptor->presentMode),
- mSurface(surface) {
- }
+MaybeError OldSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- NewSwapChainBase::~NewSwapChainBase() {
- if (mCurrentTextureView != nullptr) {
- ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
- TextureBase::TextureState::Destroyed);
- }
+ DAWN_INVALID_IF(
+ mCurrentTextureView == nullptr,
+ "GetCurrentTextureView was not called on %s this frame prior to calling Present.", this);
- ASSERT(!mAttached);
- }
+ return {};
+}
- void NewSwapChainBase::DetachFromSurface() {
- if (mAttached) {
- DetachFromSurfaceImpl();
- mSurface = nullptr;
- mAttached = false;
- }
- }
+// Implementation of NewSwapChainBase
- void NewSwapChainBase::SetIsAttached() {
- mAttached = true;
+NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
+ Surface* surface,
+ const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mAttached(false),
+ mWidth(descriptor->width),
+ mHeight(descriptor->height),
+ mFormat(descriptor->format),
+ mUsage(descriptor->usage),
+ mPresentMode(descriptor->presentMode),
+ mSurface(surface) {}
+
+NewSwapChainBase::~NewSwapChainBase() {
+ if (mCurrentTextureView != nullptr) {
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
}
- void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
+ ASSERT(!mAttached);
+}
+
+void NewSwapChainBase::DetachFromSurface() {
+ if (mAttached) {
+ DetachFromSurfaceImpl();
+ mSurface = nullptr;
+ mAttached = false;
}
+}
- TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
- Ref<TextureViewBase> result;
- if (GetDevice()->ConsumedError(GetCurrentTextureView(), &result,
- "calling %s.GetCurrentTextureView()", this)) {
- return TextureViewBase::MakeError(GetDevice());
- }
- return result.Detach();
+void NewSwapChainBase::SetIsAttached() {
+ mAttached = true;
+}
+
+void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
+ GetDevice()->ConsumedError(
+ DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
+}
+
+TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
+ Ref<TextureViewBase> result;
+ if (GetDevice()->ConsumedError(GetCurrentTextureView(), &result,
+ "calling %s.GetCurrentTextureView()", this)) {
+ return TextureViewBase::MakeError(GetDevice());
}
+ return result.Detach();
+}
- ResultOrError<Ref<TextureViewBase>> NewSwapChainBase::GetCurrentTextureView() {
- DAWN_TRY(ValidateGetCurrentTextureView());
-
- if (mCurrentTextureView != nullptr) {
- // Calling GetCurrentTextureView always returns a new reference.
- return mCurrentTextureView;
- }
-
- DAWN_TRY_ASSIGN(mCurrentTextureView, GetCurrentTextureViewImpl());
-
- // Check that the return texture view matches exactly what was given for this descriptor.
- ASSERT(mCurrentTextureView->GetTexture()->GetFormat().format == mFormat);
- ASSERT(IsSubset(mUsage, mCurrentTextureView->GetTexture()->GetUsage()));
- ASSERT(mCurrentTextureView->GetLevelCount() == 1);
- ASSERT(mCurrentTextureView->GetLayerCount() == 1);
- ASSERT(mCurrentTextureView->GetDimension() == wgpu::TextureViewDimension::e2D);
- ASSERT(mCurrentTextureView->GetTexture()
- ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
- .width == mWidth);
- ASSERT(mCurrentTextureView->GetTexture()
- ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
- .height == mHeight);
+ResultOrError<Ref<TextureViewBase>> NewSwapChainBase::GetCurrentTextureView() {
+ DAWN_TRY(ValidateGetCurrentTextureView());
+ if (mCurrentTextureView != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference.
return mCurrentTextureView;
}
- void NewSwapChainBase::APIPresent() {
- if (GetDevice()->ConsumedError(ValidatePresent())) {
- return;
- }
+ DAWN_TRY_ASSIGN(mCurrentTextureView, GetCurrentTextureViewImpl());
- if (GetDevice()->ConsumedError(PresentImpl())) {
- return;
- }
+ // Check that the return texture view matches exactly what was given for this descriptor.
+ ASSERT(mCurrentTextureView->GetTexture()->GetFormat().format == mFormat);
+ ASSERT(IsSubset(mUsage, mCurrentTextureView->GetTexture()->GetUsage()));
+ ASSERT(mCurrentTextureView->GetLevelCount() == 1);
+ ASSERT(mCurrentTextureView->GetLayerCount() == 1);
+ ASSERT(mCurrentTextureView->GetDimension() == wgpu::TextureViewDimension::e2D);
+ ASSERT(mCurrentTextureView->GetTexture()
+ ->GetMipLevelSingleSubresourceVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+ .width == mWidth);
+ ASSERT(mCurrentTextureView->GetTexture()
+ ->GetMipLevelSingleSubresourceVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+ .height == mHeight);
- ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
- TextureBase::TextureState::Destroyed);
- mCurrentTextureView = nullptr;
- }
+ return mCurrentTextureView;
+}
- uint32_t NewSwapChainBase::GetWidth() const {
- return mWidth;
+void NewSwapChainBase::APIPresent() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
+ return;
}
- uint32_t NewSwapChainBase::GetHeight() const {
- return mHeight;
+ if (GetDevice()->ConsumedError(PresentImpl())) {
+ return;
}
- wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
- return mFormat;
- }
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
+ mCurrentTextureView = nullptr;
+}
- wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
- return mUsage;
- }
+uint32_t NewSwapChainBase::GetWidth() const {
+ return mWidth;
+}
- wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
- return mPresentMode;
- }
+uint32_t NewSwapChainBase::GetHeight() const {
+ return mHeight;
+}
- Surface* NewSwapChainBase::GetSurface() const {
- return mSurface;
- }
+wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
+ return mFormat;
+}
- bool NewSwapChainBase::IsAttached() const {
- return mAttached;
- }
+wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
+ return mUsage;
+}
- wgpu::BackendType NewSwapChainBase::GetBackendType() const {
- return GetDevice()->GetAdapter()->GetBackendType();
- }
+wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
+ return mPresentMode;
+}
- MaybeError NewSwapChainBase::ValidatePresent() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
+Surface* NewSwapChainBase::GetSurface() const {
+ return mSurface;
+}
- DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
+bool NewSwapChainBase::IsAttached() const {
+ return mAttached;
+}
- DAWN_INVALID_IF(
- mCurrentTextureView == nullptr,
- "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
- this);
+wgpu::BackendType NewSwapChainBase::GetBackendType() const {
+ return GetDevice()->GetAdapter()->GetBackendType();
+}
- return {};
- }
+MaybeError NewSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
- MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
- DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+ DAWN_INVALID_IF(
+ mCurrentTextureView == nullptr,
+ "GetCurrentTextureView was not called on %s this frame prior to calling Present.", this);
- return {};
- }
+ return {};
+}
+
+MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+
+ return {};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SwapChain.h b/chromium/third_party/dawn/src/dawn/native/SwapChain.h
index 24f12e4f793..36ed02a0ded 100644
--- a/chromium/third_party/dawn/src/dawn/native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn/native/SwapChain.h
@@ -24,146 +24,144 @@
namespace dawn::native {
- MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
- const Surface* surface,
- const SwapChainDescriptor* descriptor);
-
- TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
-
- class SwapChainBase : public ApiObjectBase {
- public:
- explicit SwapChainBase(DeviceBase* device);
-
- static SwapChainBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Dawn API
- virtual void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) = 0;
- virtual TextureViewBase* APIGetCurrentTextureView() = 0;
- virtual void APIPresent() = 0;
-
- protected:
- SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- ~SwapChainBase() override;
- void DestroyImpl() override;
- };
-
- // The base class for implementation-based SwapChains that are deprecated.
- class OldSwapChainBase : public SwapChainBase {
- public:
- OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
-
- // Dawn API
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* APIGetCurrentTextureView() override;
- void APIPresent() override;
-
- protected:
- ~OldSwapChainBase() override;
- const DawnSwapChainImplementation& GetImplementation();
- virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
- virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
-
- private:
- MaybeError ValidateConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) const;
- MaybeError ValidateGetCurrentTextureView() const;
- MaybeError ValidatePresent() const;
-
- DawnSwapChainImplementation mImplementation = {};
- wgpu::TextureFormat mFormat = {};
- wgpu::TextureUsage mAllowedUsage;
- uint32_t mWidth = 0;
- uint32_t mHeight = 0;
- Ref<TextureBase> mCurrentTexture;
- Ref<TextureViewBase> mCurrentTextureView;
- };
-
- // The base class for surface-based SwapChains that aren't ready yet.
- class NewSwapChainBase : public SwapChainBase {
- public:
- NewSwapChainBase(DeviceBase* device,
- Surface* surface,
- const SwapChainDescriptor* descriptor);
-
- // This is called when the swapchain is detached when one of the following happens:
- //
- // - The surface it is attached to is being destroyed.
- // - The swapchain is being replaced by another one on the surface.
- //
- // Note that the surface has a Ref on the last swapchain that was used on it so the
- // SwapChain destructor will only be called after one of the things above happens.
- //
- // The call for the detaching previous swapchain should be called inside the backend
- // implementation of SwapChains. This is to allow them to acquire any resources before
- // calling detach to make a seamless transition from the previous swapchain.
- //
- // Likewise the call for the swapchain being destroyed must be done in the backend's
- // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
- // destructor.
- void DetachFromSurface();
-
- void SetIsAttached();
-
- // Dawn API
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* APIGetCurrentTextureView() override;
- void APIPresent() override;
-
- uint32_t GetWidth() const;
- uint32_t GetHeight() const;
- wgpu::TextureFormat GetFormat() const;
- wgpu::TextureUsage GetUsage() const;
- wgpu::PresentMode GetPresentMode() const;
- Surface* GetSurface() const;
- bool IsAttached() const;
- wgpu::BackendType GetBackendType() const;
-
- protected:
- ~NewSwapChainBase() override;
-
- private:
- bool mAttached;
- uint32_t mWidth;
- uint32_t mHeight;
- wgpu::TextureFormat mFormat;
- wgpu::TextureUsage mUsage;
- wgpu::PresentMode mPresentMode;
-
- // This is a weak reference to the surface. If the surface is destroyed it will call
- // DetachFromSurface and mSurface will be updated to nullptr.
- Surface* mSurface = nullptr;
- Ref<TextureViewBase> mCurrentTextureView;
-
- MaybeError ValidatePresent() const;
- MaybeError ValidateGetCurrentTextureView() const;
-
- // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
- // manner, starting with GetCurrentTextureViewImpl.
-
- // The returned texture view must match the swapchain descriptor exactly.
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureView();
- virtual ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() = 0;
- // The call to present must destroy the current view's texture so further access to it are
- // invalid.
- virtual MaybeError PresentImpl() = 0;
-
- // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
- // called no other virtual method can be called.
- virtual void DetachFromSurfaceImpl() = 0;
- };
+MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
+ const SwapChainDescriptor* descriptor);
+
+TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
+
+class SwapChainBase : public ApiObjectBase {
+ public:
+ explicit SwapChainBase(DeviceBase* device);
+
+ static SwapChainBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Dawn API
+ virtual void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) = 0;
+ virtual TextureViewBase* APIGetCurrentTextureView() = 0;
+ virtual void APIPresent() = 0;
+
+ protected:
+ SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ ~SwapChainBase() override;
+ void DestroyImpl() override;
+};
+
+// The base class for implementation-based SwapChains that are deprecated.
+class OldSwapChainBase : public SwapChainBase {
+ public:
+ OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
+
+ // Dawn API
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
+
+ protected:
+ ~OldSwapChainBase() override;
+ const DawnSwapChainImplementation& GetImplementation();
+ virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
+ virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
+
+ private:
+ MaybeError ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) const;
+ MaybeError ValidateGetCurrentTextureView() const;
+ MaybeError ValidatePresent() const;
+
+ DawnSwapChainImplementation mImplementation = {};
+ wgpu::TextureFormat mFormat = {};
+ wgpu::TextureUsage mAllowedUsage;
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ Ref<TextureBase> mCurrentTexture;
+ Ref<TextureViewBase> mCurrentTextureView;
+};
+
+// The base class for surface-based SwapChains that aren't ready yet.
+class NewSwapChainBase : public SwapChainBase {
+ public:
+ NewSwapChainBase(DeviceBase* device, Surface* surface, const SwapChainDescriptor* descriptor);
+
+ // This is called when the swapchain is detached when one of the following happens:
+ //
+ // - The surface it is attached to is being destroyed.
+ // - The swapchain is being replaced by another one on the surface.
+ //
+ // Note that the surface has a Ref on the last swapchain that was used on it so the
+ // SwapChain destructor will only be called after one of the things above happens.
+ //
+ // The call for the detaching previous swapchain should be called inside the backend
+ // implementation of SwapChains. This is to allow them to acquire any resources before
+ // calling detach to make a seamless transition from the previous swapchain.
+ //
+ // Likewise the call for the swapchain being destroyed must be done in the backend's
+ // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
+ // destructor.
+ void DetachFromSurface();
+
+ void SetIsAttached();
+
+ // Dawn API
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
+
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ wgpu::TextureFormat GetFormat() const;
+ wgpu::TextureUsage GetUsage() const;
+ wgpu::PresentMode GetPresentMode() const;
+ Surface* GetSurface() const;
+ bool IsAttached() const;
+ wgpu::BackendType GetBackendType() const;
+
+ protected:
+ ~NewSwapChainBase() override;
+
+ private:
+ bool mAttached;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ wgpu::TextureFormat mFormat;
+ wgpu::TextureUsage mUsage;
+ wgpu::PresentMode mPresentMode;
+
+ // This is a weak reference to the surface. If the surface is destroyed it will call
+ // DetachFromSurface and mSurface will be updated to nullptr.
+ Surface* mSurface = nullptr;
+ Ref<TextureViewBase> mCurrentTextureView;
+
+ MaybeError ValidatePresent() const;
+ MaybeError ValidateGetCurrentTextureView() const;
+
+ // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
+ // manner, starting with GetCurrentTextureViewImpl.
+
+ // The returned texture view must match the swapchain descriptor exactly.
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureView();
+ virtual ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() = 0;
+ // The call to present must destroy the current view's texture so further access to it are
+ // invalid.
+ virtual MaybeError PresentImpl() = 0;
+
+ // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
+ // called no other virtual method can be called.
+ virtual void DetachFromSurfaceImpl() = 0;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Texture.cpp b/chromium/third_party/dawn/src/dawn/native/Texture.cpp
index cabef76fe0e..7b1526a04a5 100644
--- a/chromium/third_party/dawn/src/dawn/native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Texture.cpp
@@ -28,844 +28,879 @@
#include "dawn/native/ValidationUtils_autogen.h"
namespace dawn::native {
- namespace {
-
- MaybeError ValidateTextureViewFormatCompatibility(const DeviceBase* device,
- const Format& format,
- wgpu::TextureFormat viewFormatEnum) {
- const Format* viewFormat;
- DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(viewFormatEnum));
-
- DAWN_INVALID_IF(!format.ViewCompatibleWith(*viewFormat),
- "The texture view format (%s) is not texture view format compatible "
- "with the texture format (%s).",
- viewFormatEnum, format.format);
+namespace {
+
+MaybeError ValidateTextureViewFormatCompatibility(const DeviceBase* device,
+ const Format& format,
+ wgpu::TextureFormat viewFormatEnum) {
+ const Format* viewFormat;
+ DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(viewFormatEnum));
+
+ DAWN_INVALID_IF(!format.ViewCompatibleWith(*viewFormat),
+ "The texture view format (%s) is not texture view format compatible "
+ "with the texture format (%s).",
+ viewFormatEnum, format.format);
+ return {};
+}
+
+MaybeError ValidateCanViewTextureAs(const DeviceBase* device,
+ const TextureBase* texture,
+ const Format& viewFormat,
+ wgpu::TextureAspect aspect) {
+ const Format& format = texture->GetFormat();
+
+ if (aspect != wgpu::TextureAspect::All) {
+ wgpu::TextureFormat aspectFormat = format.GetAspectInfo(aspect).format;
+ if (viewFormat.format == aspectFormat) {
return {};
+ } else {
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The view format (%s) is not compatible with %s of %s (%s).", viewFormat.format,
+ aspect, format.format, aspectFormat);
}
+ }
- MaybeError ValidateCanViewTextureAs(const DeviceBase* device,
- const TextureBase* texture,
- const Format& viewFormat,
- wgpu::TextureAspect aspect) {
- const Format& format = texture->GetFormat();
-
- if (aspect != wgpu::TextureAspect::All) {
- wgpu::TextureFormat aspectFormat = format.GetAspectInfo(aspect).format;
- if (viewFormat.format == aspectFormat) {
- return {};
- } else {
- return DAWN_FORMAT_VALIDATION_ERROR(
- "The view format (%s) is not compatible with %s of %s (%s).",
- viewFormat.format, aspect, format.format, aspectFormat);
- }
- }
-
- if (format.format == viewFormat.format) {
- return {};
- }
-
- const FormatSet& compatibleViewFormats = texture->GetViewFormats();
- if (compatibleViewFormats[viewFormat]) {
- // Validation of this list is done on texture creation, so we don't need to
- // handle the case where a format is in the list, but not compatible.
- return {};
- }
-
- // |viewFormat| is not in the list. Check compatibility to generate an error message
- // depending on whether it could be compatible, but needs to be explicitly listed,
- // or it could never be compatible.
- if (!format.ViewCompatibleWith(viewFormat)) {
- // The view format isn't compatible with the format at all. Return an error
- // that indicates this, in addition to reporting that it's missing from the
- // list.
- return DAWN_FORMAT_VALIDATION_ERROR(
- "The texture view format (%s) is not compatible with the "
- "texture format (%s)."
- "The formats must be compatible, and the view format "
- "must be passed in the list of view formats on texture creation.",
- viewFormat.format, format.format);
- } else {
- // The view format is compatible, but not in the list.
- return DAWN_FORMAT_VALIDATION_ERROR(
- "%s was not created with the texture view format (%s) "
- "in the list of compatible view formats.",
- texture, viewFormat.format);
- }
- return {};
- }
-
- bool IsTextureViewDimensionCompatibleWithTextureDimension(
- wgpu::TextureViewDimension textureViewDimension,
- wgpu::TextureDimension textureDimension) {
- switch (textureViewDimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return textureDimension == wgpu::TextureDimension::e2D;
-
- case wgpu::TextureViewDimension::e3D:
- return textureDimension == wgpu::TextureDimension::e3D;
-
- case wgpu::TextureViewDimension::e1D:
- return textureDimension == wgpu::TextureDimension::e1D;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
-
- bool IsArrayLayerValidForTextureViewDimension(
- wgpu::TextureViewDimension textureViewDimension,
- uint32_t textureViewArrayLayer) {
- switch (textureViewDimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e3D:
- return textureViewArrayLayer == 1u;
- case wgpu::TextureViewDimension::e2DArray:
- return true;
- case wgpu::TextureViewDimension::Cube:
- return textureViewArrayLayer == 6u;
- case wgpu::TextureViewDimension::CubeArray:
- return textureViewArrayLayer % 6 == 0;
- case wgpu::TextureViewDimension::e1D:
- return textureViewArrayLayer == 1u;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
+ if (format.format == viewFormat.format) {
+ return {};
+ }
- MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
- wgpu::TextureUsage usage,
- const Format* format) {
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
- "The sample count (%u) of the texture is not supported.",
- descriptor->sampleCount);
-
- if (descriptor->sampleCount > 1) {
- DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
- "The mip level count (%u) of a multisampled texture is not 1.",
- descriptor->mipLevelCount);
-
- // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
- // Multisampled 2D array texture is not supported because on Metal it requires the
- // version of macOS be greater than 10.14.
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "The dimension (%s) of a multisampled texture is not 2D.",
- descriptor->dimension);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
- "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
- descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(!format->supportsMultisample,
- "The texture format (%s) does not support multisampling.",
- format->format);
-
- // Compressed formats are not renderable. They cannot support multisample.
- ASSERT(!format->isCompressed);
-
- DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
- "The sample count (%u) of a storage textures is not 1.",
- descriptor->sampleCount);
- }
+ const FormatSet& compatibleViewFormats = texture->GetViewFormats();
+ if (compatibleViewFormats[viewFormat]) {
+ // Validation of this list is done on texture creation, so we don't need to
+ // handle the case where a format is in the list, but not compatible.
+ return {};
+ }
- return {};
- }
+ // |viewFormat| is not in the list. Check compatibility to generate an error message
+ // depending on whether it could be compatible, but needs to be explicitly listed,
+ // or it could never be compatible.
+ if (!format.ViewCompatibleWith(viewFormat)) {
+ // The view format isn't compatible with the format at all. Return an error
+ // that indicates this, in addition to reporting that it's missing from the
+ // list.
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The texture view format (%s) is not compatible with the "
+ "texture format (%s)."
+ "The formats must be compatible, and the view format "
+ "must be passed in the list of view formats on texture creation.",
+ viewFormat.format, format.format);
+ } else {
+ // The view format is compatible, but not in the list.
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "%s was not created with the texture view format (%s) "
+ "in the list of compatible view formats.",
+ texture, viewFormat.format);
+ }
+ return {};
+}
+
+bool IsTextureViewDimensionCompatibleWithTextureDimension(
+ wgpu::TextureViewDimension textureViewDimension,
+ wgpu::TextureDimension textureDimension) {
+ switch (textureViewDimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return textureDimension == wgpu::TextureDimension::e2D;
+
+ case wgpu::TextureViewDimension::e3D:
+ return textureDimension == wgpu::TextureDimension::e3D;
+
+ case wgpu::TextureViewDimension::e1D:
+ return textureDimension == wgpu::TextureDimension::e1D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+bool IsArrayLayerValidForTextureViewDimension(wgpu::TextureViewDimension textureViewDimension,
+ uint32_t textureViewArrayLayer) {
+ switch (textureViewDimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e3D:
+ return textureViewArrayLayer == 1u;
+ case wgpu::TextureViewDimension::e2DArray:
+ return true;
+ case wgpu::TextureViewDimension::Cube:
+ return textureViewArrayLayer == 6u;
+ case wgpu::TextureViewDimension::CubeArray:
+ return textureViewArrayLayer % 6 == 0;
+ case wgpu::TextureViewDimension::e1D:
+ return textureViewArrayLayer == 1u;
+
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
+ wgpu::TextureUsage usage,
+ const Format* format) {
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "The sample count (%u) of the texture is not supported.",
+ descriptor->sampleCount);
+
+ if (descriptor->sampleCount > 1) {
+ DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
+ "The mip level count (%u) of a multisampled texture is not 1.",
+ descriptor->mipLevelCount);
- MaybeError ValidateTextureViewDimensionCompatibility(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
+ // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
+ // Multisampled 2D array texture is not supported because on Metal it requires the
+ // version of macOS be greater than 10.14.
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "The dimension (%s) of a multisampled texture is not 2D.",
+ descriptor->dimension);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
+ "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
+ descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(!format->supportsMultisample,
+ "The texture format (%s) does not support multisampling.", format->format);
+
+ // Compressed formats are not renderable. They cannot support multisample.
+ ASSERT(!format->isCompressed);
+
+ DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
+ "The sample count (%u) of a storage textures is not 1.",
+ descriptor->sampleCount);
+
+ DAWN_INVALID_IF((usage & wgpu::TextureUsage::RenderAttachment) == 0,
+ "The usage (%s) of a multisampled texture doesn't include (%s).",
+ descriptor->usage, wgpu::TextureUsage::RenderAttachment);
+ }
+
+ return {};
+}
+
+MaybeError ValidateTextureViewDimensionCompatibility(const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_INVALID_IF(!IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
+ descriptor->arrayLayerCount),
+ "The dimension (%s) of the texture view is not compatible with the layer count "
+ "(%u) of %s.",
+ descriptor->dimension, descriptor->arrayLayerCount, texture);
+
+ DAWN_INVALID_IF(
+ !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
+ texture->GetDimension()),
+ "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
+ "of %s.",
+ descriptor->dimension, texture->GetDimension(), texture);
+
+ DAWN_INVALID_IF(
+ texture->GetSampleCount() > 1 && descriptor->dimension != wgpu::TextureViewDimension::e2D,
+ "The dimension (%s) of the multisampled texture view is not %s.", descriptor->dimension,
+ wgpu::TextureViewDimension::e2D);
+
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
DAWN_INVALID_IF(
- !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
- descriptor->arrayLayerCount),
- "The dimension (%s) of the texture view is not compatible with the layer count "
- "(%u) of %s.",
- descriptor->dimension, descriptor->arrayLayerCount, texture);
-
+ texture->GetSize().width != texture->GetSize().height,
+ "A %s texture view is not compatible with %s because the texture's width "
+ "(%u) and height (%u) are not equal.",
+ descriptor->dimension, texture, texture->GetSize().width,
+ texture->GetSize().height);
+ break;
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
+ break;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+
+ return {};
+}
+
+MaybeError ValidateTextureSize(const DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ const Format* format) {
+ ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
+ descriptor->size.depthOrArrayLayers != 0);
+ const CombinedLimits& limits = device->GetLimits();
+ Extent3D maxExtent;
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e1D:
+ maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
+ break;
+ case wgpu::TextureDimension::e2D:
+ maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
+ limits.v1.maxTextureArrayLayers};
+ break;
+ case wgpu::TextureDimension::e3D:
+ maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
+ limits.v1.maxTextureDimension3D};
+ break;
+ }
+ DAWN_INVALID_IF(
+ descriptor->size.width > maxExtent.width || descriptor->size.height > maxExtent.height ||
+ descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
+ "Texture size (%s) exceeded maximum texture size (%s).", &descriptor->size, &maxExtent);
+
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e1D:
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1,
+ "Texture mip level count (%u) is more than 1 when its dimension is %s.",
+ descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
+ break;
+ case wgpu::TextureDimension::e2D: {
+ uint32_t maxMippedDimension = std::max(descriptor->size.width, descriptor->size.height);
DAWN_INVALID_IF(
- !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
- texture->GetDimension()),
- "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
- "of %s.",
- descriptor->dimension, texture->GetDimension(), texture);
-
- DAWN_INVALID_IF(texture->GetSampleCount() > 1 &&
- descriptor->dimension != wgpu::TextureViewDimension::e2D,
- "The dimension (%s) of the multisampled texture view is not %s.",
- descriptor->dimension, wgpu::TextureViewDimension::e2D);
-
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- DAWN_INVALID_IF(
- texture->GetSize().width != texture->GetSize().height,
- "A %s texture view is not compatible with %s because the texture's width "
- "(%u) and height (%u) are not equal.",
- descriptor->dimension, texture, texture->GetSize().width,
- texture->GetSize().height);
- break;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::e3D:
- break;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
-
- return {};
+ Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+ "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+ descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+ break;
}
-
- MaybeError ValidateTextureSize(const DeviceBase* device,
- const TextureDescriptor* descriptor,
- const Format* format) {
- ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
- descriptor->size.depthOrArrayLayers != 0);
- const CombinedLimits& limits = device->GetLimits();
- Extent3D maxExtent;
- switch (descriptor->dimension) {
- case wgpu::TextureDimension::e1D:
- maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
- break;
- case wgpu::TextureDimension::e2D:
- maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
- limits.v1.maxTextureArrayLayers};
- break;
- case wgpu::TextureDimension::e3D:
- maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
- limits.v1.maxTextureDimension3D};
- break;
- }
- DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
- descriptor->size.height > maxExtent.height ||
- descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
- "Texture size (%s) exceeded maximum texture size (%s).",
- &descriptor->size, &maxExtent);
-
- switch (descriptor->dimension) {
- case wgpu::TextureDimension::e1D:
- DAWN_INVALID_IF(
- descriptor->mipLevelCount != 1,
- "Texture mip level count (%u) is more than 1 when its dimension is %s.",
- descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
- break;
- case wgpu::TextureDimension::e2D: {
- uint32_t maxMippedDimension =
- std::max(descriptor->size.width, descriptor->size.height);
- DAWN_INVALID_IF(
- Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
- "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
- descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
- break;
- }
- case wgpu::TextureDimension::e3D: {
- uint32_t maxMippedDimension = std::max(
- descriptor->size.width,
- std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
- DAWN_INVALID_IF(
- Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
- "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
- descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
- break;
- }
- }
-
- if (format->isCompressed) {
- const TexelBlockInfo& blockInfo =
- format->GetAspectInfo(wgpu::TextureAspect::All).block;
- DAWN_INVALID_IF(
- descriptor->size.width % blockInfo.width != 0 ||
- descriptor->size.height % blockInfo.height != 0,
- "The size (%s) of the texture is not a multiple of the block width (%u) and "
- "height (%u) of the texture format (%s).",
- &descriptor->size, blockInfo.width, blockInfo.height, format->format);
- }
-
- return {};
- }
-
- MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
- wgpu::TextureUsage usage,
- const Format* format) {
- DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
-
- DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
-
- constexpr wgpu::TextureUsage kValidCompressedUsages =
- wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
- wgpu::TextureUsage::CopyDst;
- DAWN_INVALID_IF(
- format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
- "The texture usage (%s) is incompatible with the compressed texture format (%s).",
- usage, format->format);
-
- DAWN_INVALID_IF(
- !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
- "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
- "format (%s).",
- usage, wgpu::TextureUsage::RenderAttachment, format->format);
-
+ case wgpu::TextureDimension::e3D: {
+ uint32_t maxMippedDimension =
+ std::max(descriptor->size.width,
+ std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
DAWN_INVALID_IF(
- !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
- "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
- usage, wgpu::TextureUsage::StorageBinding, format->format);
-
- // Only allows simple readonly texture usages.
- constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
- wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
- DAWN_INVALID_IF(
- format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
- "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
- format->format);
-
- return {};
+ Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+ "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+ descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+ break;
}
-
- } // anonymous namespace
-
- MaybeError ValidateTextureDescriptor(const DeviceBase* device,
- const TextureDescriptor* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::DawnTextureInternalUsageDescriptor));
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
- DAWN_INVALID_IF(
- internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
- "The dawn-internal-usages feature is not enabled");
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
-
- for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
- DAWN_TRY_CONTEXT(
- ValidateTextureViewFormatCompatibility(device, *format, descriptor->viewFormats[i]),
- "validating viewFormats[%u]", i);
- }
-
- wgpu::TextureUsage usage = descriptor->usage;
- if (internalUsageDesc != nullptr) {
- usage |= internalUsageDesc->internalUsage;
- }
-
- DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
- DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
- DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
-
- DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
- descriptor->size.depthOrArrayLayers == 0 ||
- descriptor->mipLevelCount == 0,
- "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(
- descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
- "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
- descriptor->dimension, format->format);
-
- // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
- // doesn't support depth/stencil formats on 3D textures.
- DAWN_INVALID_IF(
- descriptor->dimension != wgpu::TextureDimension::e2D &&
- (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
- "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
- descriptor->dimension, format->format);
-
- DAWN_TRY(ValidateTextureSize(device, descriptor, format));
-
- // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
- // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
- descriptor->mipLevelCount > 1 &&
- device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
- "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
- "disabled on Metal.");
-
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisableR8RG8Mipmaps) && descriptor->mipLevelCount > 1 &&
- (descriptor->format == wgpu::TextureFormat::R8Unorm ||
- descriptor->format == wgpu::TextureFormat::RG8Unorm),
- "https://crbug.com/dawn/1071: r8unorm and rg8unorm textures with more than one mip "
- "level are disabled on Metal.");
-
- return {};
}
- MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- // Parent texture should have been already validated.
- ASSERT(texture);
- ASSERT(!texture->IsError());
-
- DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
- DAWN_TRY(ValidateTextureFormat(descriptor->format));
- DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
-
- const Format& format = texture->GetFormat();
- const Format* viewFormat;
- DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(descriptor->format));
-
- DAWN_INVALID_IF(
- SelectFormatAspects(format, descriptor->aspect) == Aspect::None,
- "Texture format (%s) does not have the texture view's selected aspect (%s).",
- format.format, descriptor->aspect);
-
- DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
- "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
- descriptor->arrayLayerCount, descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(
- uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
- uint64_t(texture->GetArrayLayers()),
- "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
- "texture's array layer count (%u).",
- descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
-
+ if (format->isCompressed) {
+ const TexelBlockInfo& blockInfo = format->GetAspectInfo(wgpu::TextureAspect::All).block;
DAWN_INVALID_IF(
- uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
- uint64_t(texture->GetNumMipLevels()),
- "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
- "texture's mip level count (%u).",
- descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
+ descriptor->size.width % blockInfo.width != 0 ||
+ descriptor->size.height % blockInfo.height != 0,
+ "The size (%s) of the texture is not a multiple of the block width (%u) and "
+ "height (%u) of the texture format (%s).",
+ &descriptor->size, blockInfo.width, blockInfo.height, format->format);
+ }
+
+ return {};
+}
+
+MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
+ wgpu::TextureUsage usage,
+ const Format* format) {
+ DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
+
+ DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
+
+ constexpr wgpu::TextureUsage kValidCompressedUsages = wgpu::TextureUsage::TextureBinding |
+ wgpu::TextureUsage::CopySrc |
+ wgpu::TextureUsage::CopyDst;
+ DAWN_INVALID_IF(
+ format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
+ "The texture usage (%s) is incompatible with the compressed texture format (%s).", usage,
+ format->format);
+
+ DAWN_INVALID_IF(
+ !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
+ "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
+ "format (%s).",
+ usage, wgpu::TextureUsage::RenderAttachment, format->format);
+
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D &&
+ (usage & wgpu::TextureUsage::RenderAttachment),
+ "The texture usage (%s) includes %s, which is incompatible with the texture "
+ "dimension (%s).",
+ usage, wgpu::TextureUsage::RenderAttachment, descriptor->dimension);
+
+ DAWN_INVALID_IF(
+ !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
+ "The texture usage (%s) includes %s, which is incompatible with the format (%s).", usage,
+ wgpu::TextureUsage::StorageBinding, format->format);
+
+ // Only allows simple readonly texture usages.
+ constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
+ wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
+ DAWN_INVALID_IF(format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+ "The texture usage (%s) is incompatible with the multi-planar format (%s).",
+ usage, format->format);
+
+ return {};
+}
+
+} // anonymous namespace
+
+MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+ const TextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::DawnTextureInternalUsageDescriptor));
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ DAWN_INVALID_IF(
+ internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
+ "The dawn-internal-usages feature is not enabled");
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+
+ for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+ DAWN_TRY_CONTEXT(
+ ValidateTextureViewFormatCompatibility(device, *format, descriptor->viewFormats[i]),
+ "validating viewFormats[%u]", i);
+ }
+
+ wgpu::TextureUsage usage = descriptor->usage;
+ if (internalUsageDesc != nullptr) {
+ usage |= internalUsageDesc->internalUsage;
+ }
+
+ DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
+ DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
+ DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
+
+ DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
+ descriptor->size.depthOrArrayLayers == 0 || descriptor->mipLevelCount == 0,
+ "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
+ "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
+ descriptor->dimension, format->format);
+
+ // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
+ // doesn't support depth/stencil formats on 3D textures.
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D &&
+ (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
+ "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
+ descriptor->dimension, format->format);
+
+ DAWN_TRY(ValidateTextureSize(device, descriptor, format));
+
+ // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
+ // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
+ descriptor->mipLevelCount > 1 &&
+ device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
+ "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
+ "disabled on Metal.");
+
+ return {};
+}
+
+MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ // Parent texture should have been already validated.
+ ASSERT(texture);
+ ASSERT(!texture->IsError());
+
+ DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
+ DAWN_TRY(ValidateTextureFormat(descriptor->format));
+ DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+
+ const Format& format = texture->GetFormat();
+ const Format* viewFormat;
+ DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(descriptor->format));
+
+ DAWN_INVALID_IF(SelectFormatAspects(format, descriptor->aspect) == Aspect::None,
+ "Texture format (%s) does not have the texture view's selected aspect (%s).",
+ format.format, descriptor->aspect);
+
+ DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
+ "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
+ descriptor->arrayLayerCount, descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
+ uint64_t(texture->GetArrayLayers()),
+ "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
+ "texture's array layer count (%u).",
+ descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
+ uint64_t(texture->GetNumMipLevels()),
+ "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
+ "texture's mip level count (%u).",
+ descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
- DAWN_TRY(ValidateCanViewTextureAs(device, texture, *viewFormat, descriptor->aspect));
- DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
-
- return {};
- }
-
- ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- ASSERT(texture);
-
- TextureViewDescriptor desc = {};
- if (descriptor) {
- desc = *descriptor;
- }
-
- // The default value for the view dimension depends on the texture's dimension with a
- // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
- if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- desc.dimension = wgpu::TextureViewDimension::e1D;
- break;
-
- case wgpu::TextureDimension::e2D:
+ DAWN_TRY(ValidateCanViewTextureAs(device, texture, *viewFormat, descriptor->aspect));
+ DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
+
+ return {};
+}
+
+ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ ASSERT(texture);
+
+ TextureViewDescriptor desc = {};
+ if (descriptor) {
+ desc = *descriptor;
+ }
+
+ // The default value for the view dimension depends on the texture's dimension with a
+ // special case for 2DArray being chosen if texture is 2D but has more than one array layer.
+ if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ desc.dimension = wgpu::TextureViewDimension::e1D;
+ break;
+
+ case wgpu::TextureDimension::e2D:
+ if (texture->GetArrayLayers() == 1) {
desc.dimension = wgpu::TextureViewDimension::e2D;
- break;
+ } else {
+ desc.dimension = wgpu::TextureViewDimension::e2DArray;
+ }
+ break;
- case wgpu::TextureDimension::e3D:
- desc.dimension = wgpu::TextureViewDimension::e3D;
- break;
- }
+ case wgpu::TextureDimension::e3D:
+ desc.dimension = wgpu::TextureViewDimension::e3D;
+ break;
}
+ }
- if (desc.format == wgpu::TextureFormat::Undefined) {
- const Format& format = texture->GetFormat();
-
- // Check the aspect since |SelectFormatAspects| assumes a valid aspect.
- // Creation would have failed validation later since the aspect is invalid.
- DAWN_TRY(ValidateTextureAspect(desc.aspect));
+ if (desc.format == wgpu::TextureFormat::Undefined) {
+ const Format& format = texture->GetFormat();
- Aspect aspects = SelectFormatAspects(format, desc.aspect);
- if (HasOneBit(aspects)) {
- desc.format = format.GetAspectInfo(aspects).format;
- } else {
- desc.format = format.format;
- }
- }
- if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
- switch (desc.dimension) {
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e3D:
- desc.arrayLayerCount = 1;
- break;
- case wgpu::TextureViewDimension::Cube:
- desc.arrayLayerCount = 6;
- break;
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::CubeArray:
- desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
- break;
- default:
- // We don't put UNREACHABLE() here because we validate enums only after this
- // function sets default values. Otherwise, the UNREACHABLE() will be hit.
- break;
- }
- }
+ // Check the aspect since |SelectFormatAspects| assumes a valid aspect.
+ // Creation would have failed validation later since the aspect is invalid.
+ DAWN_TRY(ValidateTextureAspect(desc.aspect));
- if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
- desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
+ Aspect aspects = SelectFormatAspects(format, desc.aspect);
+ if (HasOneBit(aspects)) {
+ desc.format = format.GetAspectInfo(aspects).format;
+ } else {
+ desc.format = format.format;
}
- return desc;
}
-
- // WebGPU only supports sample counts of 1 and 4. We could expand to more based on
- // platform support, but it would probably be a feature.
- bool IsValidSampleCount(uint32_t sampleCount) {
- switch (sampleCount) {
- case 1:
- case 4:
- return true;
-
+ if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
+ switch (desc.dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e3D:
+ desc.arrayLayerCount = 1;
+ break;
+ case wgpu::TextureViewDimension::Cube:
+ desc.arrayLayerCount = 6;
+ break;
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::CubeArray:
+ desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
+ break;
default:
- return false;
+ // We don't put UNREACHABLE() here because we validate enums only after this
+ // function sets default values. Otherwise, the UNREACHABLE() will be hit.
+ break;
}
}
- // TextureBase
-
- TextureBase::TextureBase(DeviceBase* device,
- const TextureDescriptor* descriptor,
- TextureState state)
- : ApiObjectBase(device, descriptor->label),
- mDimension(descriptor->dimension),
- mFormat(device->GetValidInternalFormat(descriptor->format)),
- mSize(descriptor->size),
- mMipLevelCount(descriptor->mipLevelCount),
- mSampleCount(descriptor->sampleCount),
- mUsage(descriptor->usage),
- mInternalUsage(mUsage),
- mState(state) {
- uint32_t subresourceCount =
- mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
- mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
-
- for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
- if (descriptor->viewFormats[i] == descriptor->format) {
- // Skip our own format, so the backends don't allocate the texture for
- // reinterpretation if it's not needed.
- continue;
- }
- mViewFormats[device->GetValidInternalFormat(descriptor->viewFormats[i])] = true;
- }
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
- if (internalUsageDesc != nullptr) {
- mInternalUsage |= internalUsageDesc->internalUsage;
+ if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
+ desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
+ }
+ return desc;
+}
+
+// WebGPU only supports sample counts of 1 and 4. We could expand to more based on
+// platform support, but it would probably be a feature.
+bool IsValidSampleCount(uint32_t sampleCount) {
+ switch (sampleCount) {
+ case 1:
+ case 4:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+// TextureBase
+
+TextureBase::TextureBase(DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ TextureState state)
+ : ApiObjectBase(device, descriptor->label),
+ mDimension(descriptor->dimension),
+ mFormat(device->GetValidInternalFormat(descriptor->format)),
+ mSize(descriptor->size),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount),
+ mUsage(descriptor->usage),
+ mInternalUsage(mUsage),
+ mState(state),
+ mFormatEnumForReflection(descriptor->format) {
+ uint32_t subresourceCount = mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
+ mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
+
+ for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+ if (descriptor->viewFormats[i] == descriptor->format) {
+ // Skip our own format, so the backends don't allocate the texture for
+ // reinterpretation if it's not needed.
+ continue;
}
- TrackInDevice();
- }
-
- static Format kUnusedFormat;
-
- TextureBase::TextureBase(DeviceBase* device, TextureState state)
- : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
- TrackInDevice();
- }
-
- TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
- }
-
- void TextureBase::DestroyImpl() {
- mState = TextureState::Destroyed;
- }
-
- // static
- TextureBase* TextureBase::MakeError(DeviceBase* device) {
- return new TextureBase(device, ObjectBase::kError);
- }
-
- ObjectType TextureBase::GetType() const {
- return ObjectType::Texture;
- }
-
- wgpu::TextureDimension TextureBase::GetDimension() const {
- ASSERT(!IsError());
- return mDimension;
- }
-
- const Format& TextureBase::GetFormat() const {
- ASSERT(!IsError());
- return mFormat;
- }
- const FormatSet& TextureBase::GetViewFormats() const {
- ASSERT(!IsError());
- return mViewFormats;
- }
- const Extent3D& TextureBase::GetSize() const {
- ASSERT(!IsError());
- return mSize;
- }
- uint32_t TextureBase::GetWidth() const {
- ASSERT(!IsError());
- return mSize.width;
- }
- uint32_t TextureBase::GetHeight() const {
- ASSERT(!IsError());
- return mSize.height;
- }
- uint32_t TextureBase::GetDepth() const {
- ASSERT(!IsError());
- ASSERT(mDimension == wgpu::TextureDimension::e3D);
- return mSize.depthOrArrayLayers;
- }
- uint32_t TextureBase::GetArrayLayers() const {
- ASSERT(!IsError());
- if (mDimension == wgpu::TextureDimension::e3D) {
- return 1;
- }
- return mSize.depthOrArrayLayers;
- }
- uint32_t TextureBase::GetNumMipLevels() const {
- ASSERT(!IsError());
- return mMipLevelCount;
- }
- SubresourceRange TextureBase::GetAllSubresources() const {
- ASSERT(!IsError());
- return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
- }
- uint32_t TextureBase::GetSampleCount() const {
- ASSERT(!IsError());
- return mSampleCount;
- }
- uint32_t TextureBase::GetSubresourceCount() const {
- ASSERT(!IsError());
- return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
- }
- wgpu::TextureUsage TextureBase::GetUsage() const {
- ASSERT(!IsError());
- return mUsage;
- }
- wgpu::TextureUsage TextureBase::GetInternalUsage() const {
- ASSERT(!IsError());
- return mInternalUsage;
- }
-
- TextureBase::TextureState TextureBase::GetTextureState() const {
- ASSERT(!IsError());
- return mState;
- }
-
- uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
- uint32_t arraySlice,
- Aspect aspect) const {
- ASSERT(HasOneBit(aspect));
- return mipLevel +
- GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
- }
-
- bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
- ASSERT(!IsError());
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
- return false;
- }
+ mViewFormats[device->GetValidInternalFormat(descriptor->viewFormats[i])] = true;
+ }
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+ if (internalUsageDesc != nullptr) {
+ mInternalUsage |= internalUsageDesc->internalUsage;
+ }
+ TrackInDevice();
+}
+
+TextureBase::~TextureBase() = default;
+
+static constexpr Format kUnusedFormat;
+
+TextureBase::TextureBase(DeviceBase* device, TextureState state)
+ : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
+ TrackInDevice();
+}
+
+TextureBase::TextureBase(DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag),
+ mDimension(descriptor->dimension),
+ mFormat(kUnusedFormat),
+ mSize(descriptor->size),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount),
+ mUsage(descriptor->usage),
+ mFormatEnumForReflection(descriptor->format) {}
+
+void TextureBase::DestroyImpl() {
+ mState = TextureState::Destroyed;
+}
+
+// static
+TextureBase* TextureBase::MakeError(DeviceBase* device, const TextureDescriptor* descriptor) {
+ return new TextureBase(device, descriptor, ObjectBase::kError);
+}
+
+ObjectType TextureBase::GetType() const {
+ return ObjectType::Texture;
+}
+
+wgpu::TextureDimension TextureBase::GetDimension() const {
+ ASSERT(!IsError());
+ return mDimension;
+}
+
+const Format& TextureBase::GetFormat() const {
+ ASSERT(!IsError());
+ return mFormat;
+}
+const FormatSet& TextureBase::GetViewFormats() const {
+ ASSERT(!IsError());
+ return mViewFormats;
+}
+const Extent3D& TextureBase::GetSize() const {
+ ASSERT(!IsError());
+ return mSize;
+}
+uint32_t TextureBase::GetWidth() const {
+ ASSERT(!IsError());
+ return mSize.width;
+}
+uint32_t TextureBase::GetHeight() const {
+ ASSERT(!IsError());
+ return mSize.height;
+}
+uint32_t TextureBase::GetDepth() const {
+ ASSERT(!IsError());
+ ASSERT(mDimension == wgpu::TextureDimension::e3D);
+ return mSize.depthOrArrayLayers;
+}
+uint32_t TextureBase::GetArrayLayers() const {
+ ASSERT(!IsError());
+ if (mDimension == wgpu::TextureDimension::e3D) {
+ return 1;
+ }
+ return mSize.depthOrArrayLayers;
+}
+uint32_t TextureBase::GetNumMipLevels() const {
+ ASSERT(!IsError());
+ return mMipLevelCount;
+}
+SubresourceRange TextureBase::GetAllSubresources() const {
+ ASSERT(!IsError());
+ return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
+}
+uint32_t TextureBase::GetSampleCount() const {
+ ASSERT(!IsError());
+ return mSampleCount;
+}
+uint32_t TextureBase::GetSubresourceCount() const {
+ ASSERT(!IsError());
+ return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
+}
+wgpu::TextureUsage TextureBase::GetUsage() const {
+ ASSERT(!IsError());
+ return mUsage;
+}
+wgpu::TextureUsage TextureBase::GetInternalUsage() const {
+ ASSERT(!IsError());
+ return mInternalUsage;
+}
+
+TextureBase::TextureState TextureBase::GetTextureState() const {
+ ASSERT(!IsError());
+ return mState;
+}
+
+uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
+ uint32_t arraySlice,
+ Aspect aspect) const {
+ ASSERT(HasOneBit(aspect));
+ return mipLevel + GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
+}
+
+bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
+ ASSERT(!IsError());
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
+ return false;
}
}
}
- return true;
}
-
- void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
- const SubresourceRange& range) {
- ASSERT(!IsError());
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
- }
+ return true;
+}
+
+void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
+ const SubresourceRange& range) {
+ ASSERT(!IsError());
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
}
}
}
+}
- MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
- this);
- return {};
- }
+MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
+ this);
+ return {};
+}
- bool TextureBase::IsMultisampledTexture() const {
- ASSERT(!IsError());
- return mSampleCount > 1;
- }
+bool TextureBase::IsMultisampledTexture() const {
+ ASSERT(!IsError());
+ return mSampleCount > 1;
+}
- Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
- Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
- if (mDimension == wgpu::TextureDimension::e1D) {
- return extent;
- }
-
- extent.height = std::max(mSize.height >> level, 1u);
- if (mDimension == wgpu::TextureDimension::e2D) {
- return extent;
- }
-
- extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+Extent3D TextureBase::GetMipLevelSingleSubresourceVirtualSize(uint32_t level) const {
+ Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
+ if (mDimension == wgpu::TextureDimension::e1D) {
return extent;
}
- Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
- Extent3D extent = GetMipLevelVirtualSize(level);
-
- // Compressed Textures will have paddings if their width or height is not a multiple of
- // 4 at non-zero mipmap levels.
- if (mFormat.isCompressed && level != 0) {
- // If |level| is non-zero, then each dimension of |extent| is at most half of
- // the max texture dimension. Computations here which add the block width/height
- // to the extent cannot overflow.
- const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
- extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
- extent.height =
- (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
- }
-
+ extent.height = std::max(mSize.height >> level, 1u);
+ if (mDimension == wgpu::TextureDimension::e2D) {
return extent;
}
- Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
- const Origin3D& origin,
- const Extent3D& extent) const {
- const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
- ASSERT(origin.x <= virtualSizeAtLevel.width);
- ASSERT(origin.y <= virtualSizeAtLevel.height);
- uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
- ? (virtualSizeAtLevel.width - origin.x)
- : extent.width;
- uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
- ? (virtualSizeAtLevel.height - origin.y)
- : extent.height;
- return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
- }
-
- ResultOrError<Ref<TextureViewBase>> TextureBase::CreateView(
- const TextureViewDescriptor* descriptor) {
- return GetDevice()->CreateTextureView(this, descriptor);
- }
-
- TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- Ref<TextureViewBase> result;
- if (device->ConsumedError(CreateView(descriptor), &result, "calling %s.CreateView(%s).",
- this, descriptor)) {
- return TextureViewBase::MakeError(device);
- }
- return result.Detach();
- }
-
- void TextureBase::APIDestroy() {
- if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
- return;
- }
- ASSERT(!IsError());
- Destroy();
- }
-
- MaybeError TextureBase::ValidateDestroy() const {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- return {};
- }
-
- // TextureViewBase
-
- TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : ApiObjectBase(texture->GetDevice(), descriptor->label),
- mTexture(texture),
- mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
- mDimension(descriptor->dimension),
- mRange({ConvertViewAspect(mFormat, descriptor->aspect),
- {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
- {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
- TrackInDevice();
- }
-
- TextureViewBase::TextureViewBase(TextureBase* texture)
- : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
- mTexture(texture),
- mFormat(kUnusedFormat) {
- TrackInDevice();
- }
-
- TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
- }
-
- void TextureViewBase::DestroyImpl() {
- }
-
- // static
- TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
- return new TextureViewBase(device, ObjectBase::kError);
- }
-
- ObjectType TextureViewBase::GetType() const {
- return ObjectType::TextureView;
- }
-
- const TextureBase* TextureViewBase::GetTexture() const {
- ASSERT(!IsError());
- return mTexture.Get();
- }
-
- TextureBase* TextureViewBase::GetTexture() {
- ASSERT(!IsError());
- return mTexture.Get();
- }
-
- Aspect TextureViewBase::GetAspects() const {
- ASSERT(!IsError());
- return mRange.aspects;
- }
-
- const Format& TextureViewBase::GetFormat() const {
- ASSERT(!IsError());
- return mFormat;
- }
-
- wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
- ASSERT(!IsError());
- return mDimension;
- }
-
- uint32_t TextureViewBase::GetBaseMipLevel() const {
- ASSERT(!IsError());
- return mRange.baseMipLevel;
- }
-
- uint32_t TextureViewBase::GetLevelCount() const {
- ASSERT(!IsError());
- return mRange.levelCount;
- }
-
- uint32_t TextureViewBase::GetBaseArrayLayer() const {
- ASSERT(!IsError());
- return mRange.baseArrayLayer;
- }
-
- uint32_t TextureViewBase::GetLayerCount() const {
- ASSERT(!IsError());
- return mRange.layerCount;
- }
-
- const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
- ASSERT(!IsError());
- return mRange;
- }
+ extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+ return extent;
+}
+
+Extent3D TextureBase::GetMipLevelSingleSubresourcePhysicalSize(uint32_t level) const {
+ Extent3D extent = GetMipLevelSingleSubresourceVirtualSize(level);
+
+ // Compressed Textures will have paddings if their width or height is not a multiple of
+ // 4 at non-zero mipmap levels.
+ if (mFormat.isCompressed && level != 0) {
+ // If |level| is non-zero, then each dimension of |extent| is at most half of
+ // the max texture dimension. Computations here which add the block width/height
+ // to the extent cannot overflow.
+ const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
+ extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
+ extent.height =
+ (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
+ }
+
+ return extent;
+}
+
+Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const {
+ const Extent3D virtualSizeAtLevel = GetMipLevelSingleSubresourceVirtualSize(level);
+ ASSERT(origin.x <= virtualSizeAtLevel.width);
+ ASSERT(origin.y <= virtualSizeAtLevel.height);
+ uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
+ ? (virtualSizeAtLevel.width - origin.x)
+ : extent.width;
+ uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
+ ? (virtualSizeAtLevel.height - origin.y)
+ : extent.height;
+ return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
+}
+
+ResultOrError<Ref<TextureViewBase>> TextureBase::CreateView(
+ const TextureViewDescriptor* descriptor) {
+ return GetDevice()->CreateTextureView(this, descriptor);
+}
+
+TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ Ref<TextureViewBase> result;
+ if (device->ConsumedError(CreateView(descriptor), &result, "calling %s.CreateView(%s).", this,
+ descriptor)) {
+ return TextureViewBase::MakeError(device);
+ }
+ return result.Detach();
+}
+
+void TextureBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
+ return;
+ }
+ ASSERT(!IsError());
+ Destroy();
+}
+
+uint32_t TextureBase::APIGetWidth() const {
+ return mSize.width;
+}
+
+uint32_t TextureBase::APIGetHeight() const {
+ return mSize.height;
+}
+uint32_t TextureBase::APIGetDepthOrArrayLayers() const {
+ return mSize.depthOrArrayLayers;
+}
+
+uint32_t TextureBase::APIGetMipLevelCount() const {
+ return mMipLevelCount;
+}
+
+uint32_t TextureBase::APIGetSampleCount() const {
+ return mSampleCount;
+}
+
+wgpu::TextureDimension TextureBase::APIGetDimension() const {
+ return mDimension;
+}
+
+wgpu::TextureFormat TextureBase::APIGetFormat() const {
+ return mFormatEnumForReflection;
+}
+
+wgpu::TextureUsage TextureBase::APIGetUsage() const {
+ return mUsage;
+}
+
+MaybeError TextureBase::ValidateDestroy() const {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ return {};
+}
+
+// TextureViewBase
+
+TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : ApiObjectBase(texture->GetDevice(), descriptor->label),
+ mTexture(texture),
+ mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
+ mDimension(descriptor->dimension),
+ mRange({ConvertViewAspect(mFormat, descriptor->aspect),
+ {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
+ {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
+ TrackInDevice();
+}
+
+TextureViewBase::TextureViewBase(TextureBase* texture)
+ : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
+ mTexture(texture),
+ mFormat(kUnusedFormat) {
+ TrackInDevice();
+}
+
+TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {}
+
+TextureViewBase::~TextureViewBase() = default;
+
+void TextureViewBase::DestroyImpl() {}
+
+// static
+TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
+ return new TextureViewBase(device, ObjectBase::kError);
+}
+
+ObjectType TextureViewBase::GetType() const {
+ return ObjectType::TextureView;
+}
+
+const TextureBase* TextureViewBase::GetTexture() const {
+ ASSERT(!IsError());
+ return mTexture.Get();
+}
+
+TextureBase* TextureViewBase::GetTexture() {
+ ASSERT(!IsError());
+ return mTexture.Get();
+}
+
+Aspect TextureViewBase::GetAspects() const {
+ ASSERT(!IsError());
+ return mRange.aspects;
+}
+
+const Format& TextureViewBase::GetFormat() const {
+ ASSERT(!IsError());
+ return mFormat;
+}
+
+wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
+ ASSERT(!IsError());
+ return mDimension;
+}
+
+uint32_t TextureViewBase::GetBaseMipLevel() const {
+ ASSERT(!IsError());
+ return mRange.baseMipLevel;
+}
+
+uint32_t TextureViewBase::GetLevelCount() const {
+ ASSERT(!IsError());
+ return mRange.levelCount;
+}
+
+uint32_t TextureViewBase::GetBaseArrayLayer() const {
+ ASSERT(!IsError());
+ return mRange.baseArrayLayer;
+}
+
+uint32_t TextureViewBase::GetLayerCount() const {
+ ASSERT(!IsError());
+ return mRange.layerCount;
+}
+
+const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
+ ASSERT(!IsError());
+ return mRange;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Texture.h b/chromium/third_party/dawn/src/dawn/native/Texture.h
index c2f14cf5de2..595809442bc 100644
--- a/chromium/third_party/dawn/src/dawn/native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn/native/Texture.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_TEXTURE_H_
#define SRC_DAWN_NATIVE_TEXTURE_H_
+#include <vector>
+
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
#include "dawn/native/Error.h"
@@ -25,138 +27,146 @@
#include "dawn/native/dawn_platform.h"
-#include <vector>
-
namespace dawn::native {
- MaybeError ValidateTextureDescriptor(const DeviceBase* device,
- const TextureDescriptor* descriptor);
- MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor);
- ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- bool IsValidSampleCount(uint32_t sampleCount);
-
- static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
- wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
- kReadOnlyRenderAttachment;
-
- class TextureBase : public ApiObjectBase {
- public:
- enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
- enum class ClearValue { Zero, NonZero };
- TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
-
- static TextureBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- wgpu::TextureDimension GetDimension() const;
- const Format& GetFormat() const;
- const FormatSet& GetViewFormats() const;
- const Extent3D& GetSize() const;
- uint32_t GetWidth() const;
- uint32_t GetHeight() const;
- uint32_t GetDepth() const;
- uint32_t GetArrayLayers() const;
- uint32_t GetNumMipLevels() const;
- SubresourceRange GetAllSubresources() const;
- uint32_t GetSampleCount() const;
- uint32_t GetSubresourceCount() const;
-
- // |GetUsage| returns the usage with which the texture was created using the base WebGPU
- // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
- // returns the union of base usage and the usages added by the extension.
- wgpu::TextureUsage GetUsage() const;
- wgpu::TextureUsage GetInternalUsage() const;
-
- TextureState GetTextureState() const;
- uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
- bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
- void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
-
- MaybeError ValidateCanUseInSubmitNow() const;
-
- bool IsMultisampledTexture() const;
-
- // For a texture with non-block-compressed texture format, its physical size is always equal
- // to its virtual size. For a texture with block compressed texture format, the physical
- // size is the one with paddings if necessary, which is always a multiple of the block size
- // and used in texture copying. The virtual size is the one without paddings, which is not
- // required to be a multiple of the block size and used in texture sampling.
- Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
- Extent3D GetMipLevelVirtualSize(uint32_t level) const;
- Extent3D ClampToMipLevelVirtualSize(uint32_t level,
- const Origin3D& origin,
- const Extent3D& extent) const;
-
- ResultOrError<Ref<TextureViewBase>> CreateView(
- const TextureViewDescriptor* descriptor = nullptr);
-
- // Dawn API
- TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
- void APIDestroy();
-
- protected:
- // Constructor used only for mocking and testing.
- TextureBase(DeviceBase* device, TextureState state);
- void DestroyImpl() override;
-
- private:
- TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- MaybeError ValidateDestroy() const;
- wgpu::TextureDimension mDimension;
- const Format& mFormat;
- FormatSet mViewFormats;
- Extent3D mSize;
- uint32_t mMipLevelCount;
- uint32_t mSampleCount;
- wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
- wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
- TextureState mState;
-
- // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
- std::vector<bool> mIsSubresourceContentInitializedAtIndex;
- };
-
- class TextureViewBase : public ApiObjectBase {
- public:
- TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- static TextureViewBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- const TextureBase* GetTexture() const;
- TextureBase* GetTexture();
-
- Aspect GetAspects() const;
- const Format& GetFormat() const;
- wgpu::TextureViewDimension GetDimension() const;
- uint32_t GetBaseMipLevel() const;
- uint32_t GetLevelCount() const;
- uint32_t GetBaseArrayLayer() const;
- uint32_t GetLayerCount() const;
- const SubresourceRange& GetSubresourceRange() const;
-
- protected:
- // Constructor used only for mocking and testing.
- explicit TextureViewBase(TextureBase* texture);
- void DestroyImpl() override;
-
- private:
- TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- Ref<TextureBase> mTexture;
-
- const Format& mFormat;
- wgpu::TextureViewDimension mDimension;
- SubresourceRange mRange;
- };
+MaybeError ValidateTextureDescriptor(const DeviceBase* device, const TextureDescriptor* descriptor);
+MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+bool IsValidSampleCount(uint32_t sampleCount);
+
+static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
+ wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment;
+
+class TextureBase : public ApiObjectBase {
+ public:
+ enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
+ enum class ClearValue { Zero, NonZero };
+
+ static TextureBase* MakeError(DeviceBase* device, const TextureDescriptor* descriptor);
+
+ ObjectType GetType() const override;
+
+ wgpu::TextureDimension GetDimension() const;
+ const Format& GetFormat() const;
+ const FormatSet& GetViewFormats() const;
+ const Extent3D& GetSize() const;
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ uint32_t GetDepth() const;
+ uint32_t GetArrayLayers() const;
+ uint32_t GetNumMipLevels() const;
+ SubresourceRange GetAllSubresources() const;
+ uint32_t GetSampleCount() const;
+ uint32_t GetSubresourceCount() const;
+
+ // |GetUsage| returns the usage with which the texture was created using the base WebGPU
+ // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
+ // returns the union of base usage and the usages added by the extension.
+ wgpu::TextureUsage GetUsage() const;
+ wgpu::TextureUsage GetInternalUsage() const;
+
+ TextureState GetTextureState() const;
+ uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
+ bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
+ void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
+
+ MaybeError ValidateCanUseInSubmitNow() const;
+
+ bool IsMultisampledTexture() const;
+
+ // For a texture with non-block-compressed texture format, its physical size is always equal
+ // to its virtual size. For a texture with block compressed texture format, the physical
+ // size is the one with paddings if necessary, which is always a multiple of the block size
+ // and used in texture copying. The virtual size is the one without paddings, which is not
+ // required to be a multiple of the block size and used in texture sampling.
+ Extent3D GetMipLevelSingleSubresourcePhysicalSize(uint32_t level) const;
+ Extent3D GetMipLevelSingleSubresourceVirtualSize(uint32_t level) const;
+ Extent3D ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const;
+
+ ResultOrError<Ref<TextureViewBase>> CreateView(
+ const TextureViewDescriptor* descriptor = nullptr);
+
+ // Dawn API
+ TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
+ void APIDestroy();
+ uint32_t APIGetWidth() const;
+ uint32_t APIGetHeight() const;
+ uint32_t APIGetDepthOrArrayLayers() const;
+ uint32_t APIGetMipLevelCount() const;
+ uint32_t APIGetSampleCount() const;
+ wgpu::TextureDimension APIGetDimension() const;
+ wgpu::TextureFormat APIGetFormat() const;
+ wgpu::TextureUsage APIGetUsage() const;
+
+ protected:
+ TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
+ // Constructor used only for mocking and testing.
+ TextureBase(DeviceBase* device, TextureState state);
+ ~TextureBase() override;
+
+ void DestroyImpl() override;
+
+ private:
+ TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, ObjectBase::ErrorTag tag);
+
+ MaybeError ValidateDestroy() const;
+ wgpu::TextureDimension mDimension;
+ const Format& mFormat;
+ FormatSet mViewFormats;
+ Extent3D mSize;
+ uint32_t mMipLevelCount;
+ uint32_t mSampleCount;
+ wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
+ wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
+ TextureState mState;
+ wgpu::TextureFormat mFormatEnumForReflection;
+
+ // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
+ std::vector<bool> mIsSubresourceContentInitializedAtIndex;
+};
+
+class TextureViewBase : public ApiObjectBase {
+ public:
+ TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
+ ~TextureViewBase() override;
+
+ static TextureViewBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ const TextureBase* GetTexture() const;
+ TextureBase* GetTexture();
+
+ Aspect GetAspects() const;
+ const Format& GetFormat() const;
+ wgpu::TextureViewDimension GetDimension() const;
+ uint32_t GetBaseMipLevel() const;
+ uint32_t GetLevelCount() const;
+ uint32_t GetBaseArrayLayer() const;
+ uint32_t GetLayerCount() const;
+ const SubresourceRange& GetSubresourceRange() const;
+
+ protected:
+ // Constructor used only for mocking and testing.
+ explicit TextureViewBase(TextureBase* texture);
+ void DestroyImpl() override;
+
+ private:
+ TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ Ref<TextureBase> mTexture;
+
+ const Format& mFormat;
+ wgpu::TextureViewDimension mDimension;
+ SubresourceRange mRange;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp b/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp
index d84c982af29..ca4aea40e4a 100644
--- a/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp
@@ -13,43 +13,44 @@
// limitations under the License.
#include "dawn/native/TintUtils.h"
+
#include "dawn/native/Device.h"
-#include <tint/tint.h>
+#include "tint/tint.h"
namespace dawn::native {
- namespace {
-
- thread_local DeviceBase* tlDevice = nullptr;
-
- void TintICEReporter(const tint::diag::List& diagnostics) {
- if (tlDevice) {
- tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
- }
- }
-
- bool InitializeTintErrorReporter() {
- tint::SetInternalCompilerErrorReporter(&TintICEReporter);
- return true;
- }
+namespace {
- } // namespace
-
- ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
- // Call tint::SetInternalCompilerErrorReporter() the first time
- // this constructor is called. Static initialization is
- // guaranteed to be thread-safe, and only occur once.
- static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
- (void)init_once_tint_error_reporter;
-
- // Shouldn't have overlapping instances of this handler.
- ASSERT(tlDevice == nullptr);
- tlDevice = device;
- }
+thread_local DeviceBase* tlDevice = nullptr;
- ScopedTintICEHandler::~ScopedTintICEHandler() {
- tlDevice = nullptr;
+void TintICEReporter(const tint::diag::List& diagnostics) {
+ if (tlDevice) {
+ tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
}
+}
+
+bool InitializeTintErrorReporter() {
+ tint::SetInternalCompilerErrorReporter(&TintICEReporter);
+ return true;
+}
+
+} // namespace
+
+ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
+ // Call tint::SetInternalCompilerErrorReporter() the first time
+ // this constructor is called. Static initialization is
+ // guaranteed to be thread-safe, and only occur once.
+ static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
+ (void)init_once_tint_error_reporter;
+
+ // Shouldn't have overlapping instances of this handler.
+ ASSERT(tlDevice == nullptr);
+ tlDevice = device;
+}
+
+ScopedTintICEHandler::~ScopedTintICEHandler() {
+ tlDevice = nullptr;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/TintUtils.h b/chromium/third_party/dawn/src/dawn/native/TintUtils.h
index e7a8fa7a6c9..4a2df60d75f 100644
--- a/chromium/third_party/dawn/src/dawn/native/TintUtils.h
+++ b/chromium/third_party/dawn/src/dawn/native/TintUtils.h
@@ -19,18 +19,18 @@
namespace dawn::native {
- class DeviceBase;
-
- // Indicates that for the lifetime of this object tint internal compiler errors should be
- // reported to the given device.
- class ScopedTintICEHandler : public NonCopyable {
- public:
- explicit ScopedTintICEHandler(DeviceBase* device);
- ~ScopedTintICEHandler();
-
- private:
- ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
- };
+class DeviceBase;
+
+// Indicates that for the lifetime of this object tint internal compiler errors should be
+// reported to the given device.
+class ScopedTintICEHandler : public NonCopyable {
+ public:
+ explicit ScopedTintICEHandler(DeviceBase* device);
+ ~ScopedTintICEHandler();
+
+ private:
+ ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ToBackend.h b/chromium/third_party/dawn/src/dawn/native/ToBackend.h
index ef8f2377ff0..89bddc1dc59 100644
--- a/chromium/third_party/dawn/src/dawn/native/ToBackend.h
+++ b/chromium/third_party/dawn/src/dawn/native/ToBackend.h
@@ -19,136 +19,138 @@
namespace dawn::native {
- // ToBackendTraits implements the mapping from base type to member type of BackendTraits
- template <typename T, typename BackendTraits>
- struct ToBackendTraits;
-
- template <typename BackendTraits>
- struct ToBackendTraits<AdapterBase, BackendTraits> {
- using BackendType = typename BackendTraits::AdapterType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BindGroupBase, BackendTraits> {
- using BackendType = typename BackendTraits::BindGroupType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
- using BackendType = typename BackendTraits::BindGroupLayoutType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::BufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<CommandBufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::CommandBufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
- using BackendType = typename BackendTraits::ComputePipelineType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<DeviceBase, BackendTraits> {
- using BackendType = typename BackendTraits::DeviceType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
- using BackendType = typename BackendTraits::PipelineLayoutType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<QuerySetBase, BackendTraits> {
- using BackendType = typename BackendTraits::QuerySetType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<QueueBase, BackendTraits> {
- using BackendType = typename BackendTraits::QueueType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
- using BackendType = typename BackendTraits::RenderPipelineType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
- using BackendType = typename BackendTraits::ResourceHeapType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<SamplerBase, BackendTraits> {
- using BackendType = typename BackendTraits::SamplerType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
- using BackendType = typename BackendTraits::ShaderModuleType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<StagingBufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::StagingBufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<TextureBase, BackendTraits> {
- using BackendType = typename BackendTraits::TextureType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<SwapChainBase, BackendTraits> {
- using BackendType = typename BackendTraits::SwapChainType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<TextureViewBase, BackendTraits> {
- using BackendType = typename BackendTraits::TextureViewType;
- };
-
- // ToBackendBase implements conversion to the given BackendTraits
- // To use it in a backend, use the following:
- // template<typename T>
- // auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
- // return ToBackendBase<MyBackendTraits>(common);
- // }
-
- template <typename BackendTraits, typename T>
- Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
- return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
- common);
- }
-
- template <typename BackendTraits, typename T>
- Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
- return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
- common);
- }
-
- template <typename BackendTraits, typename T>
- const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
- const Ref<T>& common) {
- return reinterpret_cast<
- const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
- }
-
- template <typename BackendTraits, typename T>
- typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
- return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
- }
-
- template <typename BackendTraits, typename T>
- const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
- return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(
- common);
- }
+// ToBackendTraits implements the mapping from base type to member type of BackendTraits
+template <typename T, typename BackendTraits>
+struct ToBackendTraits;
+
+template <typename BackendTraits>
+struct ToBackendTraits<AdapterBase, BackendTraits> {
+ using BackendType = typename BackendTraits::AdapterType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<BindGroupBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BindGroupType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BindGroupLayoutType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<BufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BufferType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<CommandBufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::CommandBufferType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ComputePipelineType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<DeviceBase, BackendTraits> {
+ using BackendType = typename BackendTraits::DeviceType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<PipelineCacheBase, BackendTraits> {
+ using BackendType = typename BackendTraits::PipelineCacheType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
+ using BackendType = typename BackendTraits::PipelineLayoutType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<QuerySetBase, BackendTraits> {
+ using BackendType = typename BackendTraits::QuerySetType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<QueueBase, BackendTraits> {
+ using BackendType = typename BackendTraits::QueueType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
+ using BackendType = typename BackendTraits::RenderPipelineType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ResourceHeapType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<SamplerBase, BackendTraits> {
+ using BackendType = typename BackendTraits::SamplerType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ShaderModuleType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<StagingBufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::StagingBufferType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<TextureBase, BackendTraits> {
+ using BackendType = typename BackendTraits::TextureType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<SwapChainBase, BackendTraits> {
+ using BackendType = typename BackendTraits::SwapChainType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<TextureViewBase, BackendTraits> {
+ using BackendType = typename BackendTraits::TextureViewType;
+};
+
+// ToBackendBase implements conversion to the given BackendTraits
+// To use it in a backend, use the following:
+// template<typename T>
+// auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
+// return ToBackendBase<MyBackendTraits>(common);
+// }
+
+template <typename BackendTraits, typename T>
+Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
+ return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
+}
+
+template <typename BackendTraits, typename T>
+Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
+ return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(common);
+}
+
+template <typename BackendTraits, typename T>
+const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
+ const Ref<T>& common) {
+ return reinterpret_cast<const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
+ common);
+}
+
+template <typename BackendTraits, typename T>
+typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
+ return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+}
+
+template <typename BackendTraits, typename T>
+const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
+ return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Toggles.cpp b/chromium/third_party/dawn/src/dawn/native/Toggles.cpp
index 9b3a65517cc..f98c94dbdfb 100644
--- a/chromium/third_party/dawn/src/dawn/native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/Toggles.cpp
@@ -19,328 +19,340 @@
#include "dawn/native/Toggles.h"
namespace dawn::native {
- namespace {
+namespace {
- struct ToggleEnumAndInfo {
- Toggle toggle;
- ToggleInfo info;
- };
+struct ToggleEnumAndInfo {
+ Toggle toggle;
+ ToggleInfo info;
+};
- using ToggleEnumAndInfoList =
- std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
+using ToggleEnumAndInfoList = std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
- static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
- {Toggle::EmulateStoreAndMSAAResolve,
- {"emulate_store_and_msaa_resolve",
- "Emulate storing into multisampled color attachments and doing MSAA resolve "
- "simultaneously. This workaround is enabled by default on the Metal drivers that do "
- "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
- "those platforms, we should do MSAA resolve in another render pass after ending the "
- "previous one.",
- "https://crbug.com/dawn/56"}},
- {Toggle::NonzeroClearResourcesOnCreationForTesting,
- {"nonzero_clear_resources_on_creation_for_testing",
- "Clears texture to full 1 bits as soon as they are created, but doesn't update "
- "the tracking state of the texture. This way we can test the logic of clearing "
- "textures that use recycled memory.",
- "https://crbug.com/dawn/145"}},
- {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
- {"always_resolve_into_zero_level_and_layer",
- "When the resolve target is a texture view that is created on the non-zero level or "
- "layer of a texture, we first resolve into a temporarily 2D texture with only one "
- "mipmap level and one array layer, and copy the result of MSAA resolve into the "
- "true resolve target. This workaround is enabled by default on the Metal drivers "
- "that have bugs when setting non-zero resolveLevel or resolveSlice.",
- "https://crbug.com/dawn/56"}},
- {Toggle::LazyClearResourceOnFirstUse,
- {"lazy_clear_resource_on_first_use",
- "Clears resource to zero on first usage. This initializes the resource "
- "so that no dirty bits from recycled memory is present in the new resource.",
- "https://crbug.com/dawn/145"}},
- {Toggle::TurnOffVsync,
- {"turn_off_vsync",
- "Turn off vsync when rendering. In order to do performance test or run perf tests, "
- "turn off vsync so that the fps can exeed 60.",
- "https://crbug.com/dawn/237"}},
- {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
- {"use_temporary_buffer_in_texture_to_texture_copy",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "when copying between compressed textures that don't have block-aligned sizes. This "
- "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
- "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
- "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
- "https://crbug.com/dawn/42"}},
- {Toggle::UseD3D12ResourceHeapTier2,
- {"use_d3d12_resource_heap_tier2",
- "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
- "texture and buffers in the same heap. This allows better heap re-use and reduces "
- "fragmentation.",
- "https://crbug.com/dawn/27"}},
- {Toggle::UseD3D12RenderPass,
- {"use_d3d12_render_pass",
- "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
- "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
- "will emulate a render pass.",
- "https://crbug.com/dawn/36"}},
- {Toggle::UseD3D12ResidencyManagement,
- {"use_d3d12_residency_management",
- "Enable residency management. This allows page-in and page-out of resource heaps in "
- "GPU memory. This component improves overcommitted performance by keeping the most "
- "recently used resources local to the GPU. Turning this component off can cause "
- "allocation failures when application memory exceeds physical device memory.",
- "https://crbug.com/dawn/193"}},
- {Toggle::DisableResourceSuballocation,
- {"disable_resource_suballocation",
- "Force the backends to not perform resource suballocation. This may expose "
- "allocation "
- "patterns which would otherwise only occur with large or specific types of "
- "resources.",
- "https://crbug.com/1313172"}},
- {Toggle::SkipValidation,
- {"skip_validation", "Skip expensive validation of Dawn commands.",
- "https://crbug.com/dawn/271"}},
- {Toggle::VulkanUseD32S8,
- {"vulkan_use_d32s8",
- "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
- "backend will use D32S8 (toggle to on) but setting the toggle to off will make it "
- "use the D24S8 format when possible.",
- "https://crbug.com/dawn/286"}},
- {Toggle::VulkanUseS8,
- {"vulkan_use_s8",
- "Vulkan has a pure stencil8 format but it is not universally available. When this "
- "toggle is on, the backend will use S8 for the stencil8 format, otherwise it will "
- "fallback to D32S8 or D24S8.",
- "https://crbug.com/dawn/666"}},
- {Toggle::MetalDisableSamplerCompare,
- {"metal_disable_sampler_compare",
- "Disables the use of sampler compare on Metal. This is unsupported before A9 "
- "processors.",
- "https://crbug.com/dawn/342"}},
- {Toggle::MetalUseSharedModeForCounterSampleBuffer,
- {"metal_use_shared_mode_for_counter_sample_buffer",
- "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
- "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
- "does not work properly on Intel platforms. The workaround is use shared mode "
- "instead.",
- "https://crbug.com/dawn/434"}},
- {Toggle::DisableBaseVertex,
- {"disable_base_vertex",
- "Disables the use of non-zero base vertex which is unsupported on some platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::DisableBaseInstance,
- {"disable_base_instance",
- "Disables the use of non-zero base instance which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::DisableIndexedDrawBuffers,
- {"disable_indexed_draw_buffers",
- "Disables the use of indexed draw buffer state which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/582"}},
- {Toggle::DisableSnormRead,
- {"disable_snorm_read",
- "Disables reading from Snorm textures which is unsupported on some platforms.",
- "https://crbug.com/dawn/667"}},
- {Toggle::DisableDepthStencilRead,
- {"disable_depth_stencil_read",
- "Disables reading from depth/stencil textures which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/667"}},
- {Toggle::DisableSampleVariables,
- {"disable_sample_variables",
- "Disables gl_SampleMask and related functionality which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/673"}},
- {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
- {"use_d3d12_small_shader_visible_heap",
- "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
- "default. This setting is used to test bindgroup encoding.",
- "https://crbug.com/dawn/155"}},
- {Toggle::UseDXC,
- {"use_dxc",
- "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
- "is available.",
- "https://crbug.com/dawn/402"}},
- {Toggle::DisableRobustness,
- {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
- {Toggle::MetalEnableVertexPulling,
- {"metal_enable_vertex_pulling",
- "Uses vertex pulling to protect out-of-bounds reads on Metal",
- "https://crbug.com/dawn/480"}},
- {Toggle::DisallowUnsafeAPIs,
- {"disallow_unsafe_apis",
- "Produces validation errors on API entry points or parameter combinations that "
- "aren't considered secure yet.",
- "http://crbug.com/1138528"}},
- {Toggle::FlushBeforeClientWaitSync,
- {"flush_before_client_wait_sync",
- "Call glFlush before glClientWaitSync to work around bugs in the latter",
- "https://crbug.com/dawn/633"}},
- {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
- "level",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "under specific situations. This workaround is by default enabled on some Intel "
- "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
- "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
- "level to a smaller mip level on D3D12 backends.",
- "https://crbug.com/1161355"}},
- {Toggle::EmitHLSLDebugSymbols,
- {"emit_hlsl_debug_symbols",
- "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
- "compiling HLSL code. Enables better shader debugging with external graphics "
- "debugging tools.",
- "https://crbug.com/dawn/776"}},
- {Toggle::DisallowSpirv,
- {"disallow_spirv",
- "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules. "
- "This is useful to prevent a Chromium renderer process from successfully sending "
- "SPIR-V code to be compiled in the GPU process.",
- "https://crbug.com/1214923"}},
- {Toggle::DumpShaders,
- {"dump_shaders",
- "Dump shaders for debugging purposes. Dumped shaders will be log via "
- "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
- "function.",
- "https://crbug.com/dawn/792"}},
- {Toggle::DEPRECATED_DumpTranslatedShaders,
- {"dump_translated_shaders", "Deprecated. Use dump_shaders",
- "https://crbug.com/dawn/792"}},
- {Toggle::ForceWGSLStep,
- {"force_wgsl_step",
- "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
- "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
- "work when the same translation runs in a WASM module in the page.",
- "https://crbug.com/dawn/960"}},
- {Toggle::DisableWorkgroupInit,
- {"disable_workgroup_init",
- "Disables the workgroup memory zero-initialization for compute shaders.",
- "https://crbug.com/tint/1003"}},
- {Toggle::DisableSymbolRenaming,
- {"disable_symbol_renaming",
- "Disables the WGSL symbol renaming so that names are preserved.",
- "https://crbug.com/dawn/1016"}},
- {Toggle::UseUserDefinedLabelsInBackend,
- {"use_user_defined_labels_in_backend",
- "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
- "objects.",
- "https://crbug.com/dawn/840"}},
- {Toggle::DisableR8RG8Mipmaps,
- {"disable_r8_rg8_mipmaps",
- "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
- "to not clear correctly.",
- "https://crbug.com/dawn/1071"}},
- {Toggle::UseDummyFragmentInVertexOnlyPipeline,
- {"use_dummy_fragment_in_vertex_only_pipeline",
- "Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
- "be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
- "some Metal devices with Intel GPU to ensure the depth result is correct.",
- "https://crbug.com/dawn/136"}},
- {Toggle::FxcOptimizations,
- {"fxc_optimizations",
- "Enable optimizations when compiling with FXC. Disabled by default because FXC "
- "miscompiles in many cases when optimizations are enabled.",
- "https://crbug.com/dawn/1203"}},
- {Toggle::RecordDetailedTimingInTraceEvents,
- {"record_detailed_timing_in_trace_events",
- "Record detailed timing information in trace events at certain point. Currently the "
- "timing information is recorded right before calling ExecuteCommandLists on a D3D12 "
- "command queue, and the information includes system time, CPU timestamp, GPU "
- "timestamp, and their frequency.",
- "https://crbug.com/dawn/1264"}},
- {Toggle::DisableTimestampQueryConversion,
- {"disable_timestamp_query_conversion",
- "Resolve timestamp queries into ticks instead of nanoseconds.",
- "https://crbug.com/dawn/1305"}},
- {Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension,
- {"use_vulkan_zero_initialize_workgroup_memory_extension",
- "Initialize workgroup memory with OpConstantNull on Vulkan when the Vulkan extension "
- "VK_KHR_zero_initialize_workgroup_memory is supported.",
- "https://crbug.com/dawn/1302"}},
+static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
+ {Toggle::EmulateStoreAndMSAAResolve,
+ {"emulate_store_and_msaa_resolve",
+ "Emulate storing into multisampled color attachments and doing MSAA resolve simultaneously. "
+ "This workaround is enabled by default on the Metal drivers that do not support "
+ "MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on those platforms, we "
+ "should do MSAA resolve in another render pass after ending the previous one.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::NonzeroClearResourcesOnCreationForTesting,
+ {"nonzero_clear_resources_on_creation_for_testing",
+ "Clears texture to full 1 bits as soon as they are created, but doesn't update the tracking "
+ "state of the texture. This way we can test the logic of clearing textures that use recycled "
+ "memory.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+ {"always_resolve_into_zero_level_and_layer",
+ "When the resolve target is a texture view that is created on the non-zero level or layer of "
+ "a texture, we first resolve into a temporarily 2D texture with only one mipmap level and "
+ "one array layer, and copy the result of MSAA resolve into the true resolve target. This "
+ "workaround is enabled by default on the Metal drivers that have bugs when setting non-zero "
+ "resolveLevel or resolveSlice.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::LazyClearResourceOnFirstUse,
+ {"lazy_clear_resource_on_first_use",
+ "Clears resource to zero on first usage. This initializes the resource so that no dirty bits "
+ "from recycled memory is present in the new resource.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::TurnOffVsync,
+ {"turn_off_vsync",
+ "Turn off vsync when rendering. In order to do performance test or run perf tests, turn off "
+ "vsync so that the fps can exeed 60.",
+ "https://crbug.com/dawn/237"}},
+ {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+ {"use_temporary_buffer_in_texture_to_texture_copy",
+ "Split texture-to-texture copy into two copies: copy from source texture into a temporary "
+ "buffer, and copy from the temporary buffer into the destination texture when copying "
+ "between compressed textures that don't have block-aligned sizes. This workaround is enabled "
+ "by default on all Vulkan drivers to solve an issue in the Vulkan SPEC about the "
+ "texture-to-texture copies with compressed formats. See #1005 "
+ "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+ "https://crbug.com/dawn/42"}},
+ {Toggle::UseD3D12ResourceHeapTier2,
+ {"use_d3d12_resource_heap_tier2",
+ "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of texture and "
+ "buffers in the same heap. This allows better heap re-use and reduces fragmentation.",
+ "https://crbug.com/dawn/27"}},
+ {Toggle::UseD3D12RenderPass,
+ {"use_d3d12_render_pass",
+ "Use the D3D12 render pass API introduced in Windows build 1809 by default. On versions of "
+ "Windows prior to build 1809, or when this toggle is turned off, Dawn will emulate a render "
+ "pass.",
+ "https://crbug.com/dawn/36"}},
+ {Toggle::UseD3D12ResidencyManagement,
+ {"use_d3d12_residency_management",
+ "Enable residency management. This allows page-in and page-out of resource heaps in GPU "
+ "memory. This component improves overcommitted performance by keeping the most recently used "
+ "resources local to the GPU. Turning this component off can cause allocation failures when "
+ "application memory exceeds physical device memory.",
+ "https://crbug.com/dawn/193"}},
+ {Toggle::DisableResourceSuballocation,
+ {"disable_resource_suballocation",
+ "Force the backends to not perform resource suballocation. This may expose allocation "
+ "patterns which would otherwise only occur with large or specific types of resources.",
+ "https://crbug.com/1313172"}},
+ {Toggle::SkipValidation,
+ {"skip_validation", "Skip expensive validation of Dawn commands.",
+ "https://crbug.com/dawn/271"}},
+ {Toggle::VulkanUseD32S8,
+ {"vulkan_use_d32s8",
+ "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the backend "
+ "will use D32S8 (toggle to on) but setting the toggle to off will make it use the D24S8 "
+ "format when possible.",
+ "https://crbug.com/dawn/286"}},
+ {Toggle::VulkanUseS8,
+ {"vulkan_use_s8",
+ "Vulkan has a pure stencil8 format but it is not universally available. When this toggle is "
+ "on, the backend will use S8 for the stencil8 format, otherwise it will fallback to D32S8 or "
+ "D24S8.",
+ "https://crbug.com/dawn/666"}},
+ {Toggle::MetalDisableSamplerCompare,
+ {"metal_disable_sampler_compare",
+ "Disables the use of sampler compare on Metal. This is unsupported before A9 processors.",
+ "https://crbug.com/dawn/342"}},
+ {Toggle::MetalUseSharedModeForCounterSampleBuffer,
+ {"metal_use_shared_mode_for_counter_sample_buffer",
+ "The query set on Metal need to create MTLCounterSampleBuffer which storage mode must be "
+ "either MTLStorageModeShared or MTLStorageModePrivate. But the private mode does not work "
+ "properly on Intel platforms. The workaround is use shared mode instead.",
+ "https://crbug.com/dawn/434"}},
+ {Toggle::DisableBaseVertex,
+ {"disable_base_vertex",
+ "Disables the use of non-zero base vertex which is unsupported on some platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::DisableBaseInstance,
+ {"disable_base_instance",
+ "Disables the use of non-zero base instance which is unsupported on some platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::DisableIndexedDrawBuffers,
+ {"disable_indexed_draw_buffers",
+ "Disables the use of indexed draw buffer state which is unsupported on some platforms.",
+ "https://crbug.com/dawn/582"}},
+ {Toggle::DisableSnormRead,
+ {"disable_snorm_read",
+ "Disables reading from Snorm textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableDepthRead,
+ {"disable_depth_read",
+ "Disables reading from depth textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableStencilRead,
+ {"disable_stencil_read",
+ "Disables reading from stencil textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableDepthStencilRead,
+ {"disable_depth_stencil_read",
+ "Disables reading from depth/stencil textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableBGRARead,
+ {"disable_bgra_read",
+ "Disables reading from BGRA textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/1393"}},
+ {Toggle::DisableSampleVariables,
+ {"disable_sample_variables",
+ "Disables gl_SampleMask and related functionality which is unsupported on some platforms.",
+ "https://crbug.com/dawn/673"}},
+ {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+ {"use_d3d12_small_shader_visible_heap",
+ "Enable use of a small D3D12 shader visible heap, instead of using a large one by default. "
+ "This setting is used to test bindgroup encoding.",
+ "https://crbug.com/dawn/155"}},
+ {Toggle::UseDXC,
+ {"use_dxc",
+ "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll is "
+ "available.",
+ "https://crbug.com/dawn/402"}},
+ {Toggle::DisableRobustness,
+ {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
+ {Toggle::MetalEnableVertexPulling,
+ {"metal_enable_vertex_pulling", "Uses vertex pulling to protect out-of-bounds reads on Metal",
+ "https://crbug.com/dawn/480"}},
+ {Toggle::DisallowUnsafeAPIs,
+ {"disallow_unsafe_apis",
+ "Produces validation errors on API entry points or parameter combinations that aren't "
+ "considered secure yet.",
+ "http://crbug.com/1138528"}},
+ {Toggle::FlushBeforeClientWaitSync,
+ {"flush_before_client_wait_sync",
+ "Call glFlush before glClientWaitSync to work around bugs in the latter",
+ "https://crbug.com/dawn/633"}},
+ {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_level",
+ "Split texture-to-texture copy into two copies: copy from source texture into a temporary "
+ "buffer, and copy from the temporary buffer into the destination texture under specific "
+ "situations. This workaround is by default enabled on some Intel GPUs which have a driver "
+ "bug "
+ "in the execution of CopyTextureRegion() when we copy with the formats whose texel block "
+ "sizes are less than 4 bytes from a greater mip level to a smaller mip level on D3D12 "
+ "backends.",
+ "https://crbug.com/1161355"}},
+ {Toggle::EmitHLSLDebugSymbols,
+ {"emit_hlsl_debug_symbols",
+ "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when compiling "
+ "HLSL code. Enables better shader debugging with external graphics debugging tools.",
+ "https://crbug.com/dawn/776"}},
+ {Toggle::DisallowSpirv,
+ {"disallow_spirv",
+ "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules. This is "
+ "useful to prevent a Chromium renderer process from successfully sending SPIR-V code to be "
+ "compiled in the GPU process.",
+ "https://crbug.com/1214923"}},
+ {Toggle::DumpShaders,
+ {"dump_shaders",
+ "Dump shaders for debugging purposes. Dumped shaders will be log via EmitLog, thus printed "
+ "in Chrome console or consumed by user-defined callback function.",
+ "https://crbug.com/dawn/792"}},
+ {Toggle::DEPRECATED_DumpTranslatedShaders,
+ {"dump_translated_shaders", "Deprecated. Use dump_shaders", "https://crbug.com/dawn/792"}},
+ {Toggle::ForceWGSLStep,
+ {"force_wgsl_step",
+ "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows testing Tint's "
+ "SPIRV->WGSL translation on real content to be sure that it will work when the same "
+ "translation runs in a WASM module in the page.",
+ "https://crbug.com/dawn/960"}},
+ {Toggle::DisableWorkgroupInit,
+ {"disable_workgroup_init",
+ "Disables the workgroup memory zero-initialization for compute shaders.",
+ "https://crbug.com/tint/1003"}},
+ {Toggle::DisableSymbolRenaming,
+ {"disable_symbol_renaming", "Disables the WGSL symbol renaming so that names are preserved.",
+ "https://crbug.com/dawn/1016"}},
+ {Toggle::UseUserDefinedLabelsInBackend,
+ {"use_user_defined_labels_in_backend",
+ "Enables calls to SetLabel to be forwarded to backend-specific APIs that label objects.",
+ "https://crbug.com/dawn/840"}},
+ {Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
+ {"use_placeholder_fragment_in_vertex_only_pipeline",
+ "Use a placeholder empty fragment shader in vertex only render pipeline. This toggle must be "
+ "enabled for OpenGL ES backend, and serves as a workaround by default enabled on some Metal "
+ "devices with Intel GPU to ensure the depth result is correct.",
+ "https://crbug.com/dawn/136"}},
+ {Toggle::FxcOptimizations,
+ {"fxc_optimizations",
+ "Enable optimizations when compiling with FXC. Disabled by default because FXC miscompiles "
+ "in many cases when optimizations are enabled.",
+ "https://crbug.com/dawn/1203"}},
+ {Toggle::RecordDetailedTimingInTraceEvents,
+ {"record_detailed_timing_in_trace_events",
+ "Record detailed timing information in trace events at certain point. Currently the timing "
+ "information is recorded right before calling ExecuteCommandLists on a D3D12 command queue, "
+ "and the information includes system time, CPU timestamp, GPU timestamp, and their "
+ "frequency.",
+ "https://crbug.com/dawn/1264"}},
+ {Toggle::DisableTimestampQueryConversion,
+ {"disable_timestamp_query_conversion",
+ "Resolve timestamp queries into ticks instead of nanoseconds.",
+ "https://crbug.com/dawn/1305"}},
+ {Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension,
+ {"use_vulkan_zero_initialize_workgroup_memory_extension",
+ "Initialize workgroup memory with OpConstantNull on Vulkan when the Vulkan extension "
+ "VK_KHR_zero_initialize_workgroup_memory is supported.",
+ "https://crbug.com/dawn/1302"}},
+ {Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
+ {"d3d12_split_buffer_texture_copy_for_rows_per_image_paddings",
+ "D3D12 requires more buffer storage than it should when rowsPerImage is greater than "
+ "copyHeight, which means there are pure padding row(s) on each image. In this situation, "
+ "the buffer used for B2T/T2B copy might be big enough according to WebGPU's spec but it "
+ "doesn't meet D3D12's requirement, then we need to workaround it via split the copy "
+ "operation into two copies, in order to make B2T/T2B copy being done correctly on D3D12.",
+ "https://crbug.com/dawn/1289"}},
+ {Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture,
+ {"metal_render_r8_rg8_unorm_small_mip_to_temp_texture",
+ "Metal Intel devices have issues with r8unorm and rg8unorm textures where rendering to small "
+ "mips (level >= 2) doesn't work correctly. Workaround this issue by detecting this case and "
+ "rendering to a temporary texture instead (with copies before and after if needed).",
+ "https://crbug.com/dawn/1071"}},
+ {Toggle::EnableBlobCache,
+ {"enable_blob_cache",
+ "Enables usage of the blob cache (backed by the platform cache if set/passed). Necessary for "
+ "any persistent caching capabilities, i.e. pipeline caching.",
+ "https://crbug.com/dawn/549"}},
+ // Comment to separate the }} so it is clearer what to copy-paste to add a toggle.
+}};
+} // anonymous namespace
- // Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
- }};
- } // anonymous namespace
+void TogglesSet::Set(Toggle toggle, bool enabled) {
+ if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+ Set(Toggle::DumpShaders, enabled);
+ return;
+ }
+ ASSERT(toggle != Toggle::InvalidEnum);
+ const size_t toggleIndex = static_cast<size_t>(toggle);
+ toggleBitset.set(toggleIndex, enabled);
+}
- void TogglesSet::Set(Toggle toggle, bool enabled) {
- if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
- Set(Toggle::DumpShaders, enabled);
- return;
- }
- ASSERT(toggle != Toggle::InvalidEnum);
- const size_t toggleIndex = static_cast<size_t>(toggle);
- toggleBitset.set(toggleIndex, enabled);
+bool TogglesSet::Has(Toggle toggle) const {
+ if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+ return Has(Toggle::DumpShaders);
}
+ ASSERT(toggle != Toggle::InvalidEnum);
+ const size_t toggleIndex = static_cast<size_t>(toggle);
+ return toggleBitset.test(toggleIndex);
+}
+
+std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
+ std::vector<const char*> togglesNameInUse(toggleBitset.count());
- bool TogglesSet::Has(Toggle toggle) const {
- if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
- return Has(Toggle::DumpShaders);
- }
- ASSERT(toggle != Toggle::InvalidEnum);
- const size_t toggleIndex = static_cast<size_t>(toggle);
- return toggleBitset.test(toggleIndex);
+ uint32_t index = 0;
+ for (uint32_t i : IterateBitSet(toggleBitset)) {
+ const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
+ togglesNameInUse[index] = toggleName;
+ ++index;
}
- std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
- std::vector<const char*> togglesNameInUse(toggleBitset.count());
+ return togglesNameInUse;
+}
- uint32_t index = 0;
- for (uint32_t i : IterateBitSet(toggleBitset)) {
- const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
- togglesNameInUse[index] = toggleName;
- ++index;
- }
+const char* ToggleEnumToName(Toggle toggle) {
+ ASSERT(toggle != Toggle::InvalidEnum);
- return togglesNameInUse;
- }
+ const ToggleEnumAndInfo& toggleNameAndInfo =
+ kToggleNameAndInfoList[static_cast<size_t>(toggle)];
+ ASSERT(toggleNameAndInfo.toggle == toggle);
+ return toggleNameAndInfo.info.name;
+}
- const char* ToggleEnumToName(Toggle toggle) {
- ASSERT(toggle != Toggle::InvalidEnum);
+TogglesInfo::TogglesInfo() = default;
- const ToggleEnumAndInfo& toggleNameAndInfo =
- kToggleNameAndInfoList[static_cast<size_t>(toggle)];
- ASSERT(toggleNameAndInfo.toggle == toggle);
- return toggleNameAndInfo.info.name;
- }
+TogglesInfo::~TogglesInfo() = default;
- const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
- ASSERT(toggleName);
+const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
+ ASSERT(toggleName);
- EnsureToggleNameToEnumMapInitialized();
+ EnsureToggleNameToEnumMapInitialized();
- const auto& iter = mToggleNameToEnumMap.find(toggleName);
- if (iter != mToggleNameToEnumMap.cend()) {
- return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
- }
- return nullptr;
+ const auto& iter = mToggleNameToEnumMap.find(toggleName);
+ if (iter != mToggleNameToEnumMap.cend()) {
+ return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
}
+ return nullptr;
+}
- Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
- ASSERT(toggleName);
+Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
+ ASSERT(toggleName);
- EnsureToggleNameToEnumMapInitialized();
+ EnsureToggleNameToEnumMapInitialized();
- const auto& iter = mToggleNameToEnumMap.find(toggleName);
- if (iter != mToggleNameToEnumMap.cend()) {
- return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
- }
- return Toggle::InvalidEnum;
+ const auto& iter = mToggleNameToEnumMap.find(toggleName);
+ if (iter != mToggleNameToEnumMap.cend()) {
+ return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
}
+ return Toggle::InvalidEnum;
+}
- void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
- if (mToggleNameToEnumMapInitialized) {
- return;
- }
-
- for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
- const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
- ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
- mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
- }
+void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
+ if (mToggleNameToEnumMapInitialized) {
+ return;
+ }
- mToggleNameToEnumMapInitialized = true;
+ for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
+ const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
+ ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
+ mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
}
+ mToggleNameToEnumMapInitialized = true;
+}
+
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Toggles.h b/chromium/third_party/dawn/src/dawn/native/Toggles.h
index 883d0239d5c..341db798fe6 100644
--- a/chromium/third_party/dawn/src/dawn/native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn/native/Toggles.h
@@ -16,6 +16,7 @@
#define SRC_DAWN_NATIVE_TOGGLES_H_
#include <bitset>
+#include <string>
#include <unordered_map>
#include <vector>
@@ -23,79 +24,87 @@
namespace dawn::native {
- enum class Toggle {
- EmulateStoreAndMSAAResolve,
- NonzeroClearResourcesOnCreationForTesting,
- AlwaysResolveIntoZeroLevelAndLayer,
- LazyClearResourceOnFirstUse,
- TurnOffVsync,
- UseTemporaryBufferInCompressedTextureToTextureCopy,
- UseD3D12ResourceHeapTier2,
- UseD3D12RenderPass,
- UseD3D12ResidencyManagement,
- DisableResourceSuballocation,
- SkipValidation,
- VulkanUseD32S8,
- VulkanUseS8,
- MetalDisableSamplerCompare,
- MetalUseSharedModeForCounterSampleBuffer,
- DisableBaseVertex,
- DisableBaseInstance,
- DisableIndexedDrawBuffers,
- DisableSnormRead,
- DisableDepthStencilRead,
- DisableSampleVariables,
- UseD3D12SmallShaderVisibleHeapForTesting,
- UseDXC,
- DisableRobustness,
- MetalEnableVertexPulling,
- DisallowUnsafeAPIs,
- FlushBeforeClientWaitSync,
- UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- EmitHLSLDebugSymbols,
- DisallowSpirv,
- DumpShaders,
- DEPRECATED_DumpTranslatedShaders, // Use DumpShaders
- ForceWGSLStep,
- DisableWorkgroupInit,
- DisableSymbolRenaming,
- UseUserDefinedLabelsInBackend,
- DisableR8RG8Mipmaps,
- UseDummyFragmentInVertexOnlyPipeline,
- FxcOptimizations,
- RecordDetailedTimingInTraceEvents,
- DisableTimestampQueryConversion,
- VulkanUseZeroInitializeWorkgroupMemoryExtension,
+enum class Toggle {
+ EmulateStoreAndMSAAResolve,
+ NonzeroClearResourcesOnCreationForTesting,
+ AlwaysResolveIntoZeroLevelAndLayer,
+ LazyClearResourceOnFirstUse,
+ TurnOffVsync,
+ UseTemporaryBufferInCompressedTextureToTextureCopy,
+ UseD3D12ResourceHeapTier2,
+ UseD3D12RenderPass,
+ UseD3D12ResidencyManagement,
+ DisableResourceSuballocation,
+ SkipValidation,
+ VulkanUseD32S8,
+ VulkanUseS8,
+ MetalDisableSamplerCompare,
+ MetalUseSharedModeForCounterSampleBuffer,
+ DisableBaseVertex,
+ DisableBaseInstance,
+ DisableIndexedDrawBuffers,
+ DisableSnormRead,
+ DisableDepthRead,
+ DisableStencilRead,
+ DisableDepthStencilRead,
+ DisableBGRARead,
+ DisableSampleVariables,
+ UseD3D12SmallShaderVisibleHeapForTesting,
+ UseDXC,
+ DisableRobustness,
+ MetalEnableVertexPulling,
+ DisallowUnsafeAPIs,
+ FlushBeforeClientWaitSync,
+ UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ EmitHLSLDebugSymbols,
+ DisallowSpirv,
+ DumpShaders,
+ DEPRECATED_DumpTranslatedShaders, // Use DumpShaders
+ ForceWGSLStep,
+ DisableWorkgroupInit,
+ DisableSymbolRenaming,
+ UseUserDefinedLabelsInBackend,
+ UsePlaceholderFragmentInVertexOnlyPipeline,
+ FxcOptimizations,
+ RecordDetailedTimingInTraceEvents,
+ DisableTimestampQueryConversion,
+ VulkanUseZeroInitializeWorkgroupMemoryExtension,
+ D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
+ MetalRenderR8RG8UnormSmallMipToTempTexture,
+ EnableBlobCache,
- EnumCount,
- InvalidEnum = EnumCount,
- };
+ EnumCount,
+ InvalidEnum = EnumCount,
+};
- // A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
- // convenience to convert the enums of enum class Toggle to the indices of a bitset.
- struct TogglesSet {
- std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
+// A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
+// convenience to convert the enums of enum class Toggle to the indices of a bitset.
+struct TogglesSet {
+ std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
- void Set(Toggle toggle, bool enabled);
- bool Has(Toggle toggle) const;
- std::vector<const char*> GetContainedToggleNames() const;
- };
+ void Set(Toggle toggle, bool enabled);
+ bool Has(Toggle toggle) const;
+ std::vector<const char*> GetContainedToggleNames() const;
+};
- const char* ToggleEnumToName(Toggle toggle);
+const char* ToggleEnumToName(Toggle toggle);
- class TogglesInfo {
- public:
- // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
- // of a toggle supported in Dawn.
- const ToggleInfo* GetToggleInfo(const char* toggleName);
- Toggle ToggleNameToEnum(const char* toggleName);
+class TogglesInfo {
+ public:
+ TogglesInfo();
+ ~TogglesInfo();
- private:
- void EnsureToggleNameToEnumMapInitialized();
+ // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+ // of a toggle supported in Dawn.
+ const ToggleInfo* GetToggleInfo(const char* toggleName);
+ Toggle ToggleNameToEnum(const char* toggleName);
- bool mToggleNameToEnumMapInitialized = false;
- std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
- };
+ private:
+ void EnsureToggleNameToEnumMapInitialized();
+
+ bool mToggleNameToEnumMapInitialized = false;
+ std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp b/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp
index 2f2ae7f23d4..c859887a0f8 100644
--- a/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp
@@ -12,58 +12,58 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <array>
+
#include "dawn/native/VertexFormat.h"
#include "dawn/common/Assert.h"
-#include <array>
-
namespace dawn::native {
- static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
- //
- {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
+static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
+ //
+ {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
- //
- }};
+ {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
+ //
+}};
- const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
- ASSERT(format != wgpu::VertexFormat::Undefined);
- ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
- ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
- return sVertexFormatTable[static_cast<uint32_t>(format)];
- }
+const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
+ ASSERT(format != wgpu::VertexFormat::Undefined);
+ ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
+ ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
+ return sVertexFormatTable[static_cast<uint32_t>(format)];
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/VertexFormat.h b/chromium/third_party/dawn/src/dawn/native/VertexFormat.h
index d3212322333..33e615c6f5c 100644
--- a/chromium/third_party/dawn/src/dawn/native/VertexFormat.h
+++ b/chromium/third_party/dawn/src/dawn/native/VertexFormat.h
@@ -19,21 +19,21 @@
namespace dawn::native {
- enum class VertexFormatBaseType {
- Float,
- Uint,
- Sint,
- };
-
- struct VertexFormatInfo {
- wgpu::VertexFormat format;
- uint32_t byteSize;
- uint32_t componentCount;
- uint32_t componentByteSize;
- VertexFormatBaseType baseType;
- };
-
- const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
+enum class VertexFormatBaseType {
+ Float,
+ Uint,
+ Sint,
+};
+
+struct VertexFormatInfo {
+ wgpu::VertexFormat format;
+ uint32_t byteSize;
+ uint32_t componentCount;
+ uint32_t componentByteSize;
+ VertexFormatBaseType baseType;
+};
+
+const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp
index 1b0f6e88cbd..59966765c52 100644
--- a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp
@@ -16,16 +16,15 @@
namespace dawn::native {
- XlibXcbFunctions::XlibXcbFunctions() {
- if (!mLib.Open("libX11-xcb.so.1") ||
- !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
- mLib.Close();
- }
+XlibXcbFunctions::XlibXcbFunctions() {
+ if (!mLib.Open("libX11-xcb.so.1") || !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
+ mLib.Close();
}
- XlibXcbFunctions::~XlibXcbFunctions() = default;
+}
+XlibXcbFunctions::~XlibXcbFunctions() = default;
- bool XlibXcbFunctions::IsLoaded() const {
- return xGetXCBConnection != nullptr;
- }
+bool XlibXcbFunctions::IsLoaded() const {
+ return xGetXCBConnection != nullptr;
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h
index 5f0659ed261..a8b967a22c3 100644
--- a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h
+++ b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h
@@ -24,22 +24,22 @@ class DynamicLib;
namespace dawn::native {
- // A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
- // (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
- // deployment platforms that Chromium targets.
- class XlibXcbFunctions {
- public:
- XlibXcbFunctions();
- ~XlibXcbFunctions();
-
- bool IsLoaded() const;
-
- // Functions from x11-xcb
- decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
-
- private:
- DynamicLib mLib;
- };
+// A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
+// (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
+// deployment platforms that Chromium targets.
+class XlibXcbFunctions {
+ public:
+ XlibXcbFunctions();
+ ~XlibXcbFunctions();
+
+ bool IsLoaded() const;
+
+ // Functions from x11-xcb
+ decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
+
+ private:
+ DynamicLib mLib;
+};
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp
index d31b9afef65..f9a8a93caa3 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/d3d12/AdapterD3D12.h"
+#include <sstream>
+
#include "dawn/common/Constants.h"
#include "dawn/common/WindowsUtils.h"
#include "dawn/native/Instance.h"
@@ -21,405 +23,410 @@
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/PlatformFunctions.h"
-
-#include <sstream>
+#include "dawn/native/d3d12/UtilsD3D12.h"
namespace dawn::native::d3d12 {
- Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
- : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
- mHardwareAdapter(hardwareAdapter),
- mBackend(backend) {
+Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
+ : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
+ mHardwareAdapter(hardwareAdapter),
+ mBackend(backend) {}
+
+Adapter::~Adapter() {
+ CleanUpDebugLayerFilters();
+}
+
+bool Adapter::SupportsExternalImages() const {
+ // Via dawn::native::d3d12::ExternalImageDXGI::Create
+ return true;
+}
+
+const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
+ return mDeviceInfo;
+}
+
+IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
+ return mHardwareAdapter.Get();
+}
+
+Backend* Adapter::GetBackend() const {
+ return mBackend;
+}
+
+ComPtr<ID3D12Device> Adapter::GetDevice() const {
+ return mD3d12Device;
+}
+
+const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
+ return mDriverVersion;
+}
+
+MaybeError Adapter::InitializeImpl() {
+ // D3D12 cannot check for feature support without a device.
+ // Create the device to populate the adapter properties then reuse it when needed for actual
+ // rendering.
+ const PlatformFunctions* functions = GetBackend()->GetFunctions();
+ if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
+ _uuidof(ID3D12Device), &mD3d12Device))) {
+ return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
}
- Adapter::~Adapter() {
- CleanUpDebugLayerFilters();
- }
+ DAWN_TRY(InitializeDebugLayerFilters());
- bool Adapter::SupportsExternalImages() const {
- // Via dawn::native::d3d12::ExternalImageDXGI::Create
- return true;
- }
+ DXGI_ADAPTER_DESC1 adapterDesc;
+ mHardwareAdapter->GetDesc1(&adapterDesc);
- const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
- return mDeviceInfo;
- }
+ mDeviceId = adapterDesc.DeviceId;
+ mVendorId = adapterDesc.VendorId;
+ mName = WCharToUTF8(adapterDesc.Description);
- IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
- return mHardwareAdapter.Get();
- }
+ DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
- Backend* Adapter::GetBackend() const {
- return mBackend;
+ if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
+ mAdapterType = wgpu::AdapterType::CPU;
+ } else {
+ mAdapterType =
+ (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU : wgpu::AdapterType::DiscreteGPU;
}
- ComPtr<ID3D12Device> Adapter::GetDevice() const {
- return mD3d12Device;
+ // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
+ LARGE_INTEGER umdVersion;
+ if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
+ DXGI_ERROR_UNSUPPORTED) {
+ uint64_t encodedVersion = umdVersion.QuadPart;
+
+ std::ostringstream o;
+ o << "D3D12 driver version ";
+ for (size_t i = 0; i < mDriverVersion.size(); ++i) {
+ mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
+ o << mDriverVersion[i] << ".";
+ }
+ mDriverDescription = o.str();
}
- const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
- return mDriverVersion;
+ return {};
+}
+
+bool Adapter::AreTimestampQueriesSupported() const {
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
+ HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
+ if (FAILED(hr)) {
+ return false;
}
- MaybeError Adapter::InitializeImpl() {
- // D3D12 cannot check for feature support without a device.
- // Create the device to populate the adapter properties then reuse it when needed for actual
- // rendering.
- const PlatformFunctions* functions = GetBackend()->GetFunctions();
- if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
- _uuidof(ID3D12Device), &mD3d12Device))) {
- return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
- }
-
- DAWN_TRY(InitializeDebugLayerFilters());
-
- DXGI_ADAPTER_DESC1 adapterDesc;
- mHardwareAdapter->GetDesc1(&adapterDesc);
-
- mDeviceId = adapterDesc.DeviceId;
- mVendorId = adapterDesc.VendorId;
- mName = WCharToUTF8(adapterDesc.Description);
-
- DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
-
- if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
- mAdapterType = wgpu::AdapterType::CPU;
- } else {
- mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
- : wgpu::AdapterType::DiscreteGPU;
- }
-
- // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
- LARGE_INTEGER umdVersion;
- if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
- DXGI_ERROR_UNSUPPORTED) {
- uint64_t encodedVersion = umdVersion.QuadPart;
-
- std::ostringstream o;
- o << "D3D12 driver version ";
- for (size_t i = 0; i < mDriverVersion.size(); ++i) {
- mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
- o << mDriverVersion[i] << ".";
- }
- mDriverDescription = o.str();
- }
-
- return {};
+ // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
+ // and vGPU implementations.
+ uint64_t timeStampFrequency;
+ hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
+ if (FAILED(hr)) {
+ return false;
}
- bool Adapter::AreTimestampQueriesSupported() const {
- D3D12_COMMAND_QUEUE_DESC queueDesc = {};
- queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
- queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
- ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
- HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
- if (FAILED(hr)) {
- return false;
- }
+ return true;
+}
- // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
- // and vGPU implementations.
- uint64_t timeStampFrequency;
- hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
- if (FAILED(hr)) {
- return false;
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ if (AreTimestampQueriesSupported()) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ mSupportedFeatures.EnableFeature(Feature::IndirectFirstInstance);
+
+ if (GetBackend()->GetFunctions()->IsDXCAvailable()) {
+ uint64_t dxcVersion = 0;
+ DAWN_TRY_ASSIGN(dxcVersion, GetBackend()->GetDXCompilerVersion());
+ constexpr uint64_t kLeastMajorVersionForDP4a = 1;
+ constexpr uint64_t kLeastMinorVersionForDP4a = 4;
+ if (mDeviceInfo.supportsDP4a &&
+ dxcVersion >= MakeDXCVersion(kLeastMajorVersionForDP4a, kLeastMinorVersionForDP4a)) {
+ mSupportedFeatures.EnableFeature(Feature::ChromiumExperimentalDp4a);
}
-
- return true;
}
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- if (AreTimestampQueriesSupported()) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
- mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
- mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
- mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ return {};
+}
- return {};
- }
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
-
- DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
- &featureData, sizeof(featureData)),
- "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
-
- // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
- const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
-
- D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
- featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
- featureLevels.pFeatureLevelsRequested = levelsToQuery;
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
- &featureLevels, sizeof(featureLevels)),
- "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
-
- if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
- featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
- return DAWN_VALIDATION_ERROR(
- "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
- "devices.");
- }
+ DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+ &featureData, sizeof(featureData)),
+ "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
- GetDefaultLimits(&limits->v1);
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
-
- // Limits that are the same across D3D feature levels
- limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
- limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
- limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
- limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
- // Slot values can be 0-15, inclusive:
- // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
- limits->v1.maxVertexBuffers = 16;
- limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
-
- // Note: WebGPU requires FL11.1+
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
- // Resource Binding Tier: 1 2 3
-
- // Max(CBV+UAV+SRV) 1M 1M 1M+
- // Max CBV per stage 14 14 full
- // Max SRV per stage 128 full full
- // Max UAV in all stages 64 64 full
- // Max Samplers per stage 16 2048 2048
-
- // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
- // "full" means the full heap can be used. This is tested
- // to work for 1 million descriptors, and 1.1M for tier 3.
- uint32_t maxCBVsPerStage;
- uint32_t maxSRVsPerStage;
- uint32_t maxUAVsAllStages;
- uint32_t maxSamplersPerStage;
- switch (featureData.ResourceBindingTier) {
- case D3D12_RESOURCE_BINDING_TIER_1:
- maxCBVsPerStage = 14;
- maxSRVsPerStage = 128;
- maxUAVsAllStages = 64;
- maxSamplersPerStage = 16;
- break;
- case D3D12_RESOURCE_BINDING_TIER_2:
- maxCBVsPerStage = 14;
- maxSRVsPerStage = 1'000'000;
- maxUAVsAllStages = 64;
- maxSamplersPerStage = 2048;
- break;
- case D3D12_RESOURCE_BINDING_TIER_3:
- default:
- maxCBVsPerStage = 1'100'000;
- maxSRVsPerStage = 1'100'000;
- maxUAVsAllStages = 1'100'000;
- maxSamplersPerStage = 2048;
- break;
- }
+ // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
+ const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
- ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
- ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
- uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
-
- limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
- // Allocate half of the UAVs to storage buffers, and half to storage textures.
- limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
- limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
- limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
- limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
- // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
- static constexpr uint32_t kMaxRootSignatureSize = 64u;
- // Dawn maps WebGPU's binding model by:
- // - (maxBindGroups)
- // CBVs/UAVs/SRVs for bind group are a root descriptor table
- // - (maxBindGroups)
- // Samplers for each bind group are a root descriptor table
- // - (2 * maxDynamicBuffers)
- // Each dynamic buffer is a root descriptor
- // RESERVED:
- // - 3 = max of:
- // - 2 root constants for the baseVertex/baseInstance constants.
- // - 3 root constants for num workgroups X, Y, Z
- // - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
- // buffer lengths.
- static constexpr uint32_t kReservedSlots = 7;
-
- // Available slots after base limits considered.
- uint32_t availableRootSignatureSlots =
- kMaxRootSignatureSize - kReservedSlots -
- 2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
-
- // Because we need either:
- // - 1 cbv/uav/srv table + 1 sampler table
- // - 2 slots for a root descriptor
- uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
-
- // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
- // Distribute evenly.
- limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout +=
- availableDynamicBufferOrBindGroup / 3;
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
- (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
-
- ASSERT(2 * (limits->v1.maxBindGroups +
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
- kMaxRootSignatureSize - kReservedSlots);
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
- limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
- limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
- limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
- limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
-
- // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
- limits->v1.maxComputeWorkgroupsPerDimension =
- D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
- // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
- // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
- limits->v1.maxComputeWorkgroupStorageSize = 32768;
-
- // Max number of "constants" where each constant is a 16-byte float4
- limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
- // D3D12 has no documented limit on the size of a storage buffer binding.
- limits->v1.maxStorageBufferBindingSize = 4294967295;
-
- // TODO(crbug.com/dawn/685):
- // LIMITS NOT SET:
- // - maxInterStageShaderComponents
- // - maxVertexBufferArrayStride
+ D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
+ featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
+ featureLevels.pFeatureLevelsRequested = levelsToQuery;
+ DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
+ &featureLevels, sizeof(featureLevels)),
+ "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
- return {};
+ if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
+ featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
+ return DAWN_VALIDATION_ERROR(
+ "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
+ "devices.");
}
- MaybeError Adapter::InitializeDebugLayerFilters() {
- if (!GetInstance()->IsBackendValidationEnabled()) {
- return {};
- }
-
- D3D12_MESSAGE_ID denyIds[] = {
-
- //
- // Permanent IDs: list of warnings that are not applicable
- //
-
- // Resource sub-allocation partially maps pre-allocated heaps. This means the
- // entire physical addresses space may have no resources or have many resources
- // assigned the same heap.
- D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
- D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
-
- // The debug layer validates pipeline objects when they are created. Dawn validates
- // them when them when they are set. Therefore, since the issue is caught at a later
- // time, we can silence this warnings.
- D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
-
- // Adding a clear color during resource creation would require heuristics or delayed
- // creation.
- // https://crbug.com/dawn/418
- D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
- D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
-
- // Dawn enforces proper Unmaps at a later time.
- // https://crbug.com/dawn/422
- D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
-
- // WebGPU allows empty scissors without empty viewports.
- D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
-
- //
- // Temporary IDs: list of warnings that should be fixed or promoted
- //
-
- // Remove after warning have been addressed
- // https://crbug.com/dawn/421
- D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
-
- // For small placed resource alignment, we first request the small alignment, which may
- // get rejected and generate a debug error. Then, we request 0 to get the allowed
- // allowed alignment.
- D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
-
- // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
- // behavior.
- D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
-
- // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
- // size.
- // Even this means that no vertex buffer view has been set in D3D12 backend.
- // https://crbug.com/dawn/1255
- D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
- };
-
- // Create a retrieval filter with a deny list to suppress messages.
- // Any messages remaining will be converted to Dawn errors.
- D3D12_INFO_QUEUE_FILTER filter{};
- // Filter out info/message and only create errors from warnings or worse.
- D3D12_MESSAGE_SEVERITY severities[] = {
- D3D12_MESSAGE_SEVERITY_INFO,
- D3D12_MESSAGE_SEVERITY_MESSAGE,
- };
- filter.DenyList.NumSeverities = ARRAYSIZE(severities);
- filter.DenyList.pSeverityList = severities;
- filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
- filter.DenyList.pIDList = denyIds;
-
- ComPtr<ID3D12InfoQueue> infoQueue;
- DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
- "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
-
- // To avoid flooding the console, a storage-filter is also used to
- // prevent messages from getting logged.
- DAWN_TRY(CheckHRESULT(infoQueue->PushStorageFilter(&filter),
- "ID3D12InfoQueue::PushStorageFilter"));
-
- DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
- "ID3D12InfoQueue::PushRetrievalFilter"));
+ GetDefaultLimits(&limits->v1);
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
+
+ // Limits that are the same across D3D feature levels
+ limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
+ limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
+ limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
+ limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
+ // Slot values can be 0-15, inclusive:
+ // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
+ limits->v1.maxVertexBuffers = 16;
+ limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
+
+ // Note: WebGPU requires FL11.1+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
+ // Resource Binding Tier: 1 2 3
+
+ // Max(CBV+UAV+SRV) 1M 1M 1M+
+ // Max CBV per stage 14 14 full
+ // Max SRV per stage 128 full full
+ // Max UAV in all stages 64 64 full
+ // Max Samplers per stage 16 2048 2048
+
+ // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
+ // "full" means the full heap can be used. This is tested
+ // to work for 1 million descriptors, and 1.1M for tier 3.
+ uint32_t maxCBVsPerStage;
+ uint32_t maxSRVsPerStage;
+ uint32_t maxUAVsAllStages;
+ uint32_t maxSamplersPerStage;
+ switch (featureData.ResourceBindingTier) {
+ case D3D12_RESOURCE_BINDING_TIER_1:
+ maxCBVsPerStage = 14;
+ maxSRVsPerStage = 128;
+ maxUAVsAllStages = 64;
+ maxSamplersPerStage = 16;
+ break;
+ case D3D12_RESOURCE_BINDING_TIER_2:
+ maxCBVsPerStage = 14;
+ maxSRVsPerStage = 1'000'000;
+ maxUAVsAllStages = 64;
+ maxSamplersPerStage = 2048;
+ break;
+ case D3D12_RESOURCE_BINDING_TIER_3:
+ default:
+ maxCBVsPerStage = 1'100'000;
+ maxSRVsPerStage = 1'100'000;
+ maxUAVsAllStages = 1'100'000;
+ maxSamplersPerStage = 2048;
+ break;
+ }
+ ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
+ ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
+ uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
+
+ limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
+ // Allocate half of the UAVs to storage buffers, and half to storage textures.
+ limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
+ limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
+ limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
+ limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
+ // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
+ static constexpr uint32_t kMaxRootSignatureSize = 64u;
+ // Dawn maps WebGPU's binding model by:
+ // - (maxBindGroups)
+ // CBVs/UAVs/SRVs for bind group are a root descriptor table
+ // - (maxBindGroups)
+ // Samplers for each bind group are a root descriptor table
+ // - (2 * maxDynamicBuffers)
+ // Each dynamic buffer is a root descriptor
+ // RESERVED:
+ // - 3 = max of:
+ // - 2 root constants for the baseVertex/baseInstance constants.
+ // - 3 root constants for num workgroups X, Y, Z
+ // - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
+ // buffer lengths.
+ static constexpr uint32_t kReservedSlots = 7;
+
+ // Available slots after base limits considered.
+ uint32_t availableRootSignatureSlots =
+ kMaxRootSignatureSize - kReservedSlots -
+ 2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
+
+ // Because we need either:
+ // - 1 cbv/uav/srv table + 1 sampler table
+ // - 2 slots for a root descriptor
+ uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
+
+ // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
+ // Distribute evenly.
+ limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
+ limits->v1.maxDynamicUniformBuffersPerPipelineLayout += availableDynamicBufferOrBindGroup / 3;
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
+ (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
+
+ ASSERT(2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
+ kMaxRootSignatureSize - kReservedSlots);
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
+ limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
+ limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
+ limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
+ limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
+
+ // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
+ limits->v1.maxComputeWorkgroupsPerDimension = D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
+ // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
+ // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
+ limits->v1.maxComputeWorkgroupStorageSize = 32768;
+
+ // Max number of "constants" where each constant is a 16-byte float4
+ limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
+ // D3D12 has no documented limit on the size of a storage buffer binding.
+ limits->v1.maxStorageBufferBindingSize = 4294967295;
+
+ // TODO(crbug.com/dawn/685):
+ // LIMITS NOT SET:
+ // - maxInterStageShaderComponents
+ // - maxVertexBufferArrayStride
+
+ return {};
+}
+
+MaybeError Adapter::InitializeDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
return {};
}
- void Adapter::CleanUpDebugLayerFilters() {
- if (!GetInstance()->IsBackendValidationEnabled()) {
- return;
- }
-
- // The device may not exist if this adapter failed to initialize.
- if (mD3d12Device == nullptr) {
- return;
- }
-
- // If the debug layer is not installed, return immediately to avoid crashing the process.
- ComPtr<ID3D12InfoQueue> infoQueue;
- if (FAILED(mD3d12Device.As(&infoQueue))) {
- return;
- }
+ D3D12_MESSAGE_ID denyIds[] = {
+ //
+ // Permanent IDs: list of warnings that are not applicable
+ //
+
+ // Resource sub-allocation partially maps pre-allocated heaps. This means the
+ // entire physical addresses space may have no resources or have many resources
+ // assigned the same heap.
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
+
+ // The debug layer validates pipeline objects when they are created. Dawn validates
+ // them when them when they are set. Therefore, since the issue is caught at a later
+ // time, we can silence this warnings.
+ D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
+
+ // Adding a clear color during resource creation would require heuristics or delayed
+ // creation.
+ // https://crbug.com/dawn/418
+ D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
+ D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
+
+ // Dawn enforces proper Unmaps at a later time.
+ // https://crbug.com/dawn/422
+ D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
+
+ // WebGPU allows empty scissors without empty viewports.
+ D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
+
+ //
+ // Temporary IDs: list of warnings that should be fixed or promoted
+ //
+
+ // Remove after warning have been addressed
+ // https://crbug.com/dawn/421
+ D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+
+ // For small placed resource alignment, we first request the small alignment, which may
+ // get rejected and generate a debug error. Then, we request 0 to get the allowed
+ // allowed alignment.
+ D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
+
+ // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
+ // behavior.
+ D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
+
+ // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
+ // size.
+ // Even this means that no vertex buffer view has been set in D3D12 backend.
+ // https://crbug.com/dawn/1255
+ D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
+ };
+
+ // Create a retrieval filter with a deny list to suppress messages.
+ // Any messages remaining will be converted to Dawn errors.
+ D3D12_INFO_QUEUE_FILTER filter{};
+ // Filter out info/message and only create errors from warnings or worse.
+ D3D12_MESSAGE_SEVERITY severities[] = {
+ D3D12_MESSAGE_SEVERITY_INFO,
+ D3D12_MESSAGE_SEVERITY_MESSAGE,
+ };
+ filter.DenyList.NumSeverities = ARRAYSIZE(severities);
+ filter.DenyList.pSeverityList = severities;
+ filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
+ filter.DenyList.pIDList = denyIds;
+
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+ "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+
+ // To avoid flooding the console, a storage-filter is also used to
+ // prevent messages from getting logged.
+ DAWN_TRY(
+ CheckHRESULT(infoQueue->PushStorageFilter(&filter), "ID3D12InfoQueue::PushStorageFilter"));
+
+ DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
+ "ID3D12InfoQueue::PushRetrievalFilter"));
+
+ return {};
+}
+
+void Adapter::CleanUpDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ return;
+ }
- infoQueue->PopRetrievalFilter();
- infoQueue->PopStorageFilter();
+ // The device may not exist if this adapter failed to initialize.
+ if (mD3d12Device == nullptr) {
+ return;
}
- ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
+ // If the debug layer is not installed, return immediately to avoid crashing the process.
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ if (FAILED(mD3d12Device.As(&infoQueue))) {
+ return;
}
- // Resets the backend device and creates a new one. If any D3D12 objects belonging to the
- // current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
- // and the subequent call to CreateDevice will return a handle the existing device instead of
- // creating a new one.
- MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
- ASSERT(mD3d12Device.Reset() == 0);
- DAWN_TRY(Initialize());
+ infoQueue->PopRetrievalFilter();
+ infoQueue->PopStorageFilter();
+}
- return {};
- }
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+}
+
+// Resets the backend device and creates a new one. If any D3D12 objects belonging to the
+// current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
+// and the subequent call to CreateDevice will return a handle the existing device instead of
+// creating a new one.
+MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
+ ASSERT(mD3d12Device.Reset() == 0);
+ DAWN_TRY(Initialize());
+
+ return {};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h
index b1f676083fd..035e291a4cf 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h
@@ -23,43 +23,42 @@
namespace dawn::native::d3d12 {
- class Backend;
+class Backend;
- class Adapter : public AdapterBase {
- public:
- Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
- ~Adapter() override;
+class Adapter : public AdapterBase {
+ public:
+ Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
+ ~Adapter() override;
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
- const D3D12DeviceInfo& GetDeviceInfo() const;
- IDXGIAdapter3* GetHardwareAdapter() const;
- Backend* GetBackend() const;
- ComPtr<ID3D12Device> GetDevice() const;
- const gpu_info::D3DDriverVersion& GetDriverVersion() const;
+ const D3D12DeviceInfo& GetDeviceInfo() const;
+ IDXGIAdapter3* GetHardwareAdapter() const;
+ Backend* GetBackend() const;
+ ComPtr<ID3D12Device> GetDevice() const;
+ const gpu_info::D3DDriverVersion& GetDriverVersion() const;
- private:
- ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) override;
- MaybeError ResetInternalDeviceForTestingImpl() override;
+ private:
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
+ MaybeError ResetInternalDeviceForTestingImpl() override;
- bool AreTimestampQueriesSupported() const;
+ bool AreTimestampQueriesSupported() const;
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
- MaybeError InitializeDebugLayerFilters();
- void CleanUpDebugLayerFilters();
+ MaybeError InitializeDebugLayerFilters();
+ void CleanUpDebugLayerFilters();
- ComPtr<IDXGIAdapter3> mHardwareAdapter;
- ComPtr<ID3D12Device> mD3d12Device;
- gpu_info::D3DDriverVersion mDriverVersion;
+ ComPtr<IDXGIAdapter3> mHardwareAdapter;
+ ComPtr<ID3D12Device> mD3d12Device;
+ gpu_info::D3DDriverVersion mDriverVersion;
- Backend* mBackend;
- D3D12DeviceInfo mDeviceInfo = {};
- };
+ Backend* mBackend;
+ D3D12DeviceInfo mDeviceInfo = {};
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp
index 27a98823b9a..3d0b13f491f 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp
@@ -14,196 +14,207 @@
#include "dawn/native/d3d12/BackendD3D12.h"
+#include <utility>
+
#include "dawn/native/D3D12Backend.h"
#include "dawn/native/Instance.h"
#include "dawn/native/d3d12/AdapterD3D12.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
namespace dawn::native::d3d12 {
- namespace {
-
- ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
- BackendValidationLevel validationLevel,
- bool beginCaptureOnStartup) {
- ComPtr<IDXGIFactory4> factory;
-
- uint32_t dxgiFactoryFlags = 0;
-
- // Enable the debug layer (requires the Graphics Tools "optional feature").
- {
- if (validationLevel != BackendValidationLevel::Disabled) {
- ComPtr<ID3D12Debug3> debugController;
- if (SUCCEEDED(
- functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
- ASSERT(debugController != nullptr);
- debugController->EnableDebugLayer();
- if (validationLevel == BackendValidationLevel::Full) {
- debugController->SetEnableGPUBasedValidation(true);
- }
-
- // Enable additional debug layers.
- dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
- }
- }
+namespace {
+
+ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
+ BackendValidationLevel validationLevel,
+ bool beginCaptureOnStartup) {
+ ComPtr<IDXGIFactory4> factory;
- if (beginCaptureOnStartup) {
- ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
- if (functions->dxgiGetDebugInterface1 != nullptr &&
- SUCCEEDED(functions->dxgiGetDebugInterface1(
- 0, IID_PPV_ARGS(&graphicsAnalysis)))) {
- graphicsAnalysis->BeginCapture();
- }
+ uint32_t dxgiFactoryFlags = 0;
+
+ // Enable the debug layer (requires the Graphics Tools "optional feature").
+ {
+ if (validationLevel != BackendValidationLevel::Disabled) {
+ ComPtr<ID3D12Debug3> debugController;
+ if (SUCCEEDED(functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
+ ASSERT(debugController != nullptr);
+ debugController->EnableDebugLayer();
+ if (validationLevel == BackendValidationLevel::Full) {
+ debugController->SetEnableGPUBasedValidation(true);
}
- }
- if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
- return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
+ // Enable additional debug layers.
+ dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
}
-
- ASSERT(factory != nullptr);
- return std::move(factory);
}
- ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(
- Backend* backend,
- ComPtr<IDXGIAdapter> dxgiAdapter) {
- ComPtr<IDXGIAdapter3> dxgiAdapter3;
- DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
- Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
- DAWN_TRY(adapter->Initialize());
-
- return {std::move(adapter)};
+ if (beginCaptureOnStartup) {
+ ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
+ if (functions->dxgiGetDebugInterface1 != nullptr &&
+ SUCCEEDED(functions->dxgiGetDebugInterface1(0, IID_PPV_ARGS(&graphicsAnalysis)))) {
+ graphicsAnalysis->BeginCapture();
+ }
}
+ }
- } // anonymous namespace
-
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::D3D12) {
+ if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
+ return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
}
- MaybeError Backend::Initialize() {
- mFunctions = std::make_unique<PlatformFunctions>();
- DAWN_TRY(mFunctions->LoadFunctions());
+ ASSERT(factory != nullptr);
+ return std::move(factory);
+}
- const auto instance = GetInstance();
+ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(Backend* backend,
+ ComPtr<IDXGIAdapter> dxgiAdapter) {
+ ComPtr<IDXGIAdapter3> dxgiAdapter3;
+ DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
+ Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
+ DAWN_TRY(adapter->Initialize());
- DAWN_TRY_ASSIGN(mFactory,
- CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
- instance->IsBeginCaptureOnStartupEnabled()));
+ return {std::move(adapter)};
+}
- return {};
- }
+} // anonymous namespace
- ComPtr<IDXGIFactory4> Backend::GetFactory() const {
- return mFactory;
- }
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::D3D12) {}
- MaybeError Backend::EnsureDxcLibrary() {
- if (mDxcLibrary == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
- "DXC create library"));
- ASSERT(mDxcLibrary != nullptr);
- }
- return {};
- }
+MaybeError Backend::Initialize() {
+ mFunctions = std::make_unique<PlatformFunctions>();
+ DAWN_TRY(mFunctions->LoadFunctions());
- MaybeError Backend::EnsureDxcCompiler() {
- if (mDxcCompiler == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
- "DXC create compiler"));
- ASSERT(mDxcCompiler != nullptr);
- }
- return {};
- }
+ const auto instance = GetInstance();
- MaybeError Backend::EnsureDxcValidator() {
- if (mDxcValidator == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
- "DXC create validator"));
- ASSERT(mDxcValidator != nullptr);
- }
- return {};
- }
+ DAWN_TRY_ASSIGN(mFactory, CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
+ instance->IsBeginCaptureOnStartupEnabled()));
+
+ return {};
+}
- ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+ComPtr<IDXGIFactory4> Backend::GetFactory() const {
+ return mFactory;
+}
+
+MaybeError Backend::EnsureDxcLibrary() {
+ if (mDxcLibrary == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
+ "DXC create library"));
ASSERT(mDxcLibrary != nullptr);
- return mDxcLibrary;
}
-
- ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+ return {};
+}
+
+MaybeError Backend::EnsureDxcCompiler() {
+ if (mDxcCompiler == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
+ "DXC create compiler"));
ASSERT(mDxcCompiler != nullptr);
- return mDxcCompiler;
}
-
- ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+ return {};
+}
+
+MaybeError Backend::EnsureDxcValidator() {
+ if (mDxcValidator == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
+ "DXC create validator"));
ASSERT(mDxcValidator != nullptr);
- return mDxcValidator;
}
-
- const PlatformFunctions* Backend::GetFunctions() const {
- return mFunctions.get();
+ return {};
+}
+
+ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+ ASSERT(mDxcLibrary != nullptr);
+ return mDxcLibrary;
+}
+
+ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+ ASSERT(mDxcCompiler != nullptr);
+ return mDxcCompiler;
+}
+
+ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+ ASSERT(mDxcValidator != nullptr);
+ return mDxcValidator;
+}
+
+ResultOrError<uint64_t> Backend::GetDXCompilerVersion() {
+ DAWN_TRY(EnsureDxcValidator());
+
+ ComPtr<IDxcVersionInfo> versionInfo;
+ DAWN_TRY(CheckHRESULT(mDxcValidator.As(&versionInfo),
+ "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
+
+ uint32_t compilerMajor, compilerMinor;
+ DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
+ "IDxcVersionInfo::GetVersion"));
+
+ // Pack both into a single version number.
+ return MakeDXCVersion(compilerMajor, compilerMinor);
+}
+
+const PlatformFunctions* Backend::GetFunctions() const {
+ return mFunctions.get();
+}
+
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
}
-
- std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
+ return result.AcquireSuccess();
+}
+
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+ std::vector<Ref<AdapterBase>> adapters;
+ if (options->dxgiAdapter != nullptr) {
+ // |dxgiAdapter| was provided. Discover just that adapter.
+ Ref<AdapterBase> adapter;
+ DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
+ adapters.push_back(std::move(adapter));
+ return std::move(adapters);
}
- ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
- std::vector<Ref<AdapterBase>> adapters;
- if (options->dxgiAdapter != nullptr) {
- // |dxgiAdapter| was provided. Discover just that adapter.
- Ref<AdapterBase> adapter;
- DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
- adapters.push_back(std::move(adapter));
- return std::move(adapters);
+ // Enumerate and discover all available adapters.
+ for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+ ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+ if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+ break; // No more adapters to enumerate.
}
- // Enumerate and discover all available adapters.
- for (uint32_t adapterIndex = 0;; ++adapterIndex) {
- ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
- if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
- break; // No more adapters to enumerate.
- }
-
- ASSERT(dxgiAdapter != nullptr);
- ResultOrError<Ref<AdapterBase>> adapter =
- CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
- if (adapter.IsError()) {
- GetInstance()->ConsumedError(adapter.AcquireError());
- continue;
- }
-
- adapters.push_back(adapter.AcquireSuccess());
+ ASSERT(dxgiAdapter != nullptr);
+ ResultOrError<Ref<AdapterBase>> adapter = CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
+ if (adapter.IsError()) {
+ GetInstance()->ConsumedError(adapter.AcquireError());
+ continue;
}
- return adapters;
+ adapters.push_back(adapter.AcquireSuccess());
}
- BackendConnection* Connect(InstanceBase* instance) {
- Backend* backend = new Backend(instance);
+ return adapters;
+}
- if (instance->ConsumedError(backend->Initialize())) {
- delete backend;
- return nullptr;
- }
+BackendConnection* Connect(InstanceBase* instance) {
+ Backend* backend = new Backend(instance);
- return backend;
+ if (instance->ConsumedError(backend->Initialize())) {
+ delete backend;
+ return nullptr;
}
+ return backend;
+}
+
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h
index 4d0ff7c7488..1bf1ead583d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h
@@ -15,44 +15,48 @@
#ifndef SRC_DAWN_NATIVE_D3D12_BACKENDD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_BACKENDD3D12_H_
+#include <memory>
+#include <vector>
+
#include "dawn/native/BackendConnection.h"
#include "dawn/native/d3d12/d3d12_platform.h"
namespace dawn::native::d3d12 {
- class PlatformFunctions;
+class PlatformFunctions;
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance);
+class Backend : public BackendConnection {
+ public:
+ explicit Backend(InstanceBase* instance);
- MaybeError Initialize();
+ MaybeError Initialize();
- ComPtr<IDXGIFactory4> GetFactory() const;
+ ComPtr<IDXGIFactory4> GetFactory() const;
- MaybeError EnsureDxcLibrary();
- MaybeError EnsureDxcCompiler();
- MaybeError EnsureDxcValidator();
- ComPtr<IDxcLibrary> GetDxcLibrary() const;
- ComPtr<IDxcCompiler> GetDxcCompiler() const;
- ComPtr<IDxcValidator> GetDxcValidator() const;
+ MaybeError EnsureDxcLibrary();
+ MaybeError EnsureDxcCompiler();
+ MaybeError EnsureDxcValidator();
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
+ ResultOrError<uint64_t> GetDXCompilerVersion();
- const PlatformFunctions* GetFunctions() const;
+ const PlatformFunctions* GetFunctions() const;
- std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
- private:
- // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
- // the D3D12 DLLs are unloaded before we are done using them.
- std::unique_ptr<PlatformFunctions> mFunctions;
- ComPtr<IDXGIFactory4> mFactory;
- ComPtr<IDxcLibrary> mDxcLibrary;
- ComPtr<IDxcCompiler> mDxcCompiler;
- ComPtr<IDxcValidator> mDxcValidator;
- };
+ private:
+ // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
+ // the D3D12 DLLs are unloaded before we are done using them.
+ std::unique_ptr<PlatformFunctions> mFunctions;
+ ComPtr<IDXGIFactory4> mFactory;
+ ComPtr<IDxcLibrary> mDxcLibrary;
+ ComPtr<IDxcCompiler> mDxcCompiler;
+ ComPtr<IDxcValidator> mDxcValidator;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp
index f1693452269..f0f8471764c 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/native/ExternalTexture.h"
#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
@@ -25,244 +27,241 @@
namespace dawn::native::d3d12 {
- // static
- ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
+// static
+ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
+
+BindGroup::BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ uint32_t viewSizeIncrement,
+ const CPUDescriptorHeapAllocation& viewAllocation)
+ : BindGroupBase(this, device, descriptor) {
+ BindGroupLayout* bgl = ToBackend(GetLayout());
+
+ mCPUViewAllocation = viewAllocation;
+
+ const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
+
+ ID3D12Device* d3d12Device = device->GetD3D12Device();
+
+ // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
+ // This is because they are created as root descriptors which are never heap allocated.
+ // Since dynamic buffers are packed in the front, we can skip over these bindings by
+ // starting from the dynamic buffer count.
+ for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+ bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+
+ // Increment size does not need to be stored and is only used to get a handle
+ // local to the allocation with OffsetFrom().
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+ ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Buffer was destroyed. Skip creating buffer views since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
+ }
- BindGroup::BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation)
- : BindGroupBase(this, device, descriptor) {
- BindGroupLayout* bgl = ToBackend(GetLayout());
-
- mCPUViewAllocation = viewAllocation;
-
- const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
-
- ID3D12Device* d3d12Device = device->GetD3D12Device();
-
- // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
- // This is because they are created as root descriptors which are never heap allocated.
- // Since dynamic buffers are packed in the front, we can skip over these bindings by
- // starting from the dynamic buffer count.
- for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
- bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
-
- // Increment size does not need to be stored and is only used to get a handle
- // local to the allocation with OffsetFrom().
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
-
- ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
- if (resource == nullptr) {
- // The Buffer was destroyed. Skip creating buffer views since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform: {
+ D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
+ desc.SizeInBytes =
+ Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
+ desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
+
+ d3d12Device->CreateConstantBufferView(
+ &desc, viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
}
-
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform: {
- D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
- desc.SizeInBytes =
- Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
- desc.BufferLocation =
- ToBackend(binding.buffer)->GetVA() + binding.offset;
-
- d3d12Device->CreateConstantBufferView(
- &desc, viewAllocation.OffsetFrom(
- viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding: {
- // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
- // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
- // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
- // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
- // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
- // byte aligned. Since binding.size and binding.offset are in bytes,
- // we need to divide by 4 to obtain the element size.
- D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
- desc.Buffer.NumElements = binding.size / 4;
- desc.Format = DXGI_FORMAT_R32_TYPELESS;
- desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
- desc.Buffer.FirstElement = binding.offset / 4;
- desc.Buffer.StructureByteStride = 0;
- desc.Buffer.CounterOffsetInBytes = 0;
- desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
-
- d3d12Device->CreateUnorderedAccessView(
- resource, nullptr, &desc,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::ReadOnlyStorage: {
- // Like StorageBuffer, Tint outputs HLSL shaders for readonly
- // storage buffer with ByteAddressBuffer. So we must use
- // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
- // similar requirement for format, element size, etc.
- D3D12_SHADER_RESOURCE_VIEW_DESC desc;
- desc.Format = DXGI_FORMAT_R32_TYPELESS;
- desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
- desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
- desc.Buffer.FirstElement = binding.offset / 4;
- desc.Buffer.NumElements = binding.size / 4;
- desc.Buffer.StructureByteStride = 0;
- desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
- d3d12Device->CreateShaderResourceView(
- resource, &desc,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding: {
+ // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
+ // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
+ // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
+ // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
+ // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
+ // byte aligned. Since binding.size and binding.offset are in bytes,
+ // we need to divide by 4 to obtain the element size.
+ D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
+ desc.Buffer.NumElements = binding.size / 4;
+ desc.Format = DXGI_FORMAT_R32_TYPELESS;
+ desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
+ desc.Buffer.FirstElement = binding.offset / 4;
+ desc.Buffer.StructureByteStride = 0;
+ desc.Buffer.CounterOffsetInBytes = 0;
+ desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
+
+ d3d12Device->CreateUnorderedAccessView(
+ resource, nullptr, &desc,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
}
-
- break;
+ case wgpu::BufferBindingType::ReadOnlyStorage: {
+ // Like StorageBuffer, Tint outputs HLSL shaders for readonly
+ // storage buffer with ByteAddressBuffer. So we must use
+ // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
+ // similar requirement for format, element size, etc.
+ D3D12_SHADER_RESOURCE_VIEW_DESC desc;
+ desc.Format = DXGI_FORMAT_R32_TYPELESS;
+ desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
+ desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+ desc.Buffer.FirstElement = binding.offset / 4;
+ desc.Buffer.NumElements = binding.size / 4;
+ desc.Buffer.StructureByteStride = 0;
+ desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
+ d3d12Device->CreateShaderResourceView(
+ resource, &desc,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
}
- case BindingInfoType::Texture: {
- auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
- auto& srv = view->GetSRVDescriptor();
+ break;
+ }
- ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
- if (resource == nullptr) {
- // The Texture was destroyed. Skip creating the SRV since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
- }
+ case BindingInfoType::Texture: {
+ auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+ auto& srv = view->GetSRVDescriptor();
- d3d12Device->CreateShaderResourceView(
- resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
+ ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Texture was destroyed. Skip creating the SRV since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
}
- case BindingInfoType::StorageTexture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+ d3d12Device->CreateShaderResourceView(
+ resource, &srv,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
- ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
- if (resource == nullptr) {
- // The Texture was destroyed. Skip creating the SRV/UAV since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
- }
+ case BindingInfoType::StorageTexture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Texture was destroyed. Skip creating the SRV/UAV since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
+ }
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly: {
- D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
- d3d12Device->CreateUnorderedAccessView(
- resource, nullptr, &uav,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
-
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly: {
+ D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
+ d3d12Device->CreateUnorderedAccessView(
+ resource, nullptr, &uav,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
}
- break;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
}
- case BindingInfoType::ExternalTexture: {
- UNREACHABLE();
- }
+ break;
+ }
- case BindingInfoType::Sampler: {
- // No-op as samplers will be later initialized by CreateSamplers().
- break;
- }
+ case BindingInfoType::ExternalTexture: {
+ UNREACHABLE();
}
- }
- // Loop through the dynamic storage buffers and build a flat map from the index of the
- // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
- // means that it is the i'th buffer that is both dynamic and storage, in increasing order
- // of BindingNumber.
- mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
- uint32_t dynamicStorageBufferIndex = 0;
- for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
- ++bindingIndex) {
- if (bgl->IsStorageBufferBinding(bindingIndex)) {
- mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
- GetBindingAsBufferBinding(bindingIndex).size;
+ case BindingInfoType::Sampler: {
+ // No-op as samplers will be later initialized by CreateSamplers().
+ break;
}
}
}
- BindGroup::~BindGroup() = default;
-
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
- ASSERT(!mCPUViewAllocation.IsValid());
- }
-
- bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
- const BindGroupLayout* bgl = ToBackend(GetLayout());
-
- const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
- if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
- return true;
+ // Loop through the dynamic storage buffers and build a flat map from the index of the
+ // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
+ // means that it is the i'th buffer that is both dynamic and storage, in increasing order
+ // of BindingNumber.
+ mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+ uint32_t dynamicStorageBufferIndex = 0;
+ for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+ ++bindingIndex) {
+ if (bgl->IsStorageBufferBinding(bindingIndex)) {
+ mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
+ GetBindingAsBufferBinding(bindingIndex).size;
}
+ }
+}
- // Attempt to allocate descriptors for the currently bound shader-visible heaps.
- // If either failed, return early to re-allocate and switch the heaps.
- Device* device = ToBackend(GetDevice());
+BindGroup::~BindGroup() = default;
- D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
- if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
- device->GetPendingCommandSerial(),
- &baseCPUDescriptor, &mGPUViewAllocation)) {
- return false;
- }
+void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
+ ASSERT(!mCPUViewAllocation.IsValid());
+}
- // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
- // simple copies per bindgroup, a single non-simple copy could be issued.
- // TODO(dawn:155): Consider doing this optimization.
- device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
- mCPUViewAllocation.GetBaseDescriptor(),
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
+ const BindGroupLayout* bgl = ToBackend(GetLayout());
+ const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+ if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
return true;
}
- D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
- return mGPUViewAllocation.GetBaseDescriptor();
- }
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ Device* device = ToBackend(GetDevice());
- D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
- ASSERT(mSamplerAllocationEntry != nullptr);
- return mSamplerAllocationEntry->GetBaseDescriptor();
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+ if (!viewAllocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUViewAllocation)) {
+ return false;
}
- bool BindGroup::PopulateSamplers(Device* device,
- ShaderVisibleDescriptorAllocator* samplerAllocator) {
- if (mSamplerAllocationEntry == nullptr) {
- return true;
- }
- return mSamplerAllocationEntry->Populate(device, samplerAllocator);
- }
+ // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+ // simple copies per bindgroup, a single non-simple copy could be issued.
+ // TODO(dawn:155): Consider doing this optimization.
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUViewAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
- void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
- mSamplerAllocationEntry = std::move(entry);
- }
+ return true;
+}
+
+D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
+ return mGPUViewAllocation.GetBaseDescriptor();
+}
+
+D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
+ ASSERT(mSamplerAllocationEntry != nullptr);
+ return mSamplerAllocationEntry->GetBaseDescriptor();
+}
- const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths()
- const {
- return mDynamicStorageBufferLengths;
+bool BindGroup::PopulateSamplers(Device* device,
+ ShaderVisibleDescriptorAllocator* samplerAllocator) {
+ if (mSamplerAllocationEntry == nullptr) {
+ return true;
}
+ return mSamplerAllocationEntry->Populate(device, samplerAllocator);
+}
+
+void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
+ mSamplerAllocationEntry = std::move(entry);
+}
+
+const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths() const {
+ return mDynamicStorageBufferLengths;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h
index 58498fc9ae4..243374a0bf9 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h
@@ -24,45 +24,45 @@
namespace dawn::native::d3d12 {
- class Device;
- class SamplerHeapCacheEntry;
- class ShaderVisibleDescriptorAllocator;
+class Device;
+class SamplerHeapCacheEntry;
+class ShaderVisibleDescriptorAllocator;
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static ResultOrError<Ref<BindGroup>> Create(Device* device,
- const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
- BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation);
+ BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ uint32_t viewSizeIncrement,
+ const CPUDescriptorHeapAllocation& viewAllocation);
- // Returns true if the BindGroup was successfully populated.
- bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
- bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
+ // Returns true if the BindGroup was successfully populated.
+ bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
+ bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
- void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
+ void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
- using DynamicStorageBufferLengths =
- ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
- const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
+ using DynamicStorageBufferLengths =
+ ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
+ const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
- private:
- ~BindGroup() override;
+ private:
+ ~BindGroup() override;
- void DestroyImpl() override;
+ void DestroyImpl() override;
- Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
+ Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
- GPUDescriptorHeapAllocation mGPUViewAllocation;
- CPUDescriptorHeapAllocation mCPUViewAllocation;
+ GPUDescriptorHeapAllocation mGPUViewAllocation;
+ CPUDescriptorHeapAllocation mCPUViewAllocation;
- DynamicStorageBufferLengths mDynamicStorageBufferLengths;
- };
+ DynamicStorageBufferLengths mDynamicStorageBufferLengths;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
index 4e586a09477..9e5abadd1f8 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
@@ -14,172 +14,168 @@
#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
-#include "dawn/native/d3d12/BindGroupD3D12.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
namespace dawn::native::d3d12 {
- namespace {
- D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
- const BindingInfo& bindingInfo) {
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- case BindingInfoType::Sampler:
- return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
-
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
+namespace {
+D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(const BindingInfo& bindingInfo) {
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ case BindingInfoType::Sampler:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
- case BindingInfoType::StorageTexture:
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+
+ case BindingInfoType::StorageTexture:
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
}
- }
- } // anonymous namespace
-
- // static
- Ref<BindGroupLayout> BindGroupLayout::Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
}
-
- BindGroupLayout::BindGroupLayout(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mDescriptorHeapOffsets(GetBindingCount()),
- mShaderRegisters(GetBindingCount()),
- mCbvUavSrvDescriptorCount(0),
- mSamplerDescriptorCount(0),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-
- D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
- WGPUBindingInfoToDescriptorRangeType(bindingInfo);
- mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
-
- // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
- // need to allocate the descriptor from descriptor heap or create descriptor ranges.
- if (bindingIndex < GetDynamicBufferCount()) {
+}
+} // anonymous namespace
+
+// static
+Ref<BindGroupLayout> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+}
+
+BindGroupLayout::BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mDescriptorHeapOffsets(GetBindingCount()),
+ mShaderRegisters(GetBindingCount()),
+ mCbvUavSrvDescriptorCount(0),
+ mSamplerDescriptorCount(0),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
+ WGPUBindingInfoToDescriptorRangeType(bindingInfo);
+ mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
+
+ // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
+ // need to allocate the descriptor from descriptor heap or create descriptor ranges.
+ if (bindingIndex < GetDynamicBufferCount()) {
+ continue;
+ }
+ ASSERT(!bindingInfo.buffer.hasDynamicOffset);
+
+ mDescriptorHeapOffsets[bindingIndex] =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+ ? mSamplerDescriptorCount++
+ : mCbvUavSrvDescriptorCount++;
+
+ D3D12_DESCRIPTOR_RANGE range;
+ range.RangeType = descriptorRangeType;
+ range.NumDescriptors = 1;
+ range.BaseShaderRegister = GetShaderRegister(bindingIndex);
+ range.RegisterSpace = kRegisterSpacePlaceholder;
+ range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
+
+ std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER ? mSamplerDescriptorRanges
+ : mCbvUavSrvDescriptorRanges;
+
+ // Try to join this range with the previous one, if the current range is a continuation
+ // of the previous. This is possible because the binding infos in the base type are
+ // sorted.
+ if (descriptorRanges.size() >= 2) {
+ D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
+ if (previous.RangeType == range.RangeType &&
+ previous.BaseShaderRegister + previous.NumDescriptors == range.BaseShaderRegister) {
+ previous.NumDescriptors += range.NumDescriptors;
continue;
}
- ASSERT(!bindingInfo.buffer.hasDynamicOffset);
-
- mDescriptorHeapOffsets[bindingIndex] =
- descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
- ? mSamplerDescriptorCount++
- : mCbvUavSrvDescriptorCount++;
-
- D3D12_DESCRIPTOR_RANGE range;
- range.RangeType = descriptorRangeType;
- range.NumDescriptors = 1;
- range.BaseShaderRegister = GetShaderRegister(bindingIndex);
- range.RegisterSpace = kRegisterSpacePlaceholder;
- range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
-
- std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
- descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
- ? mSamplerDescriptorRanges
- : mCbvUavSrvDescriptorRanges;
-
- // Try to join this range with the previous one, if the current range is a continuation
- // of the previous. This is possible because the binding infos in the base type are
- // sorted.
- if (descriptorRanges.size() >= 2) {
- D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
- if (previous.RangeType == range.RangeType &&
- previous.BaseShaderRegister + previous.NumDescriptors ==
- range.BaseShaderRegister) {
- previous.NumDescriptors += range.NumDescriptors;
- continue;
- }
- }
-
- descriptorRanges.push_back(range);
}
- mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
- mSamplerAllocator =
- device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+ descriptorRanges.push_back(range);
}
- ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
- Device* device,
- const BindGroupDescriptor* descriptor) {
- uint32_t viewSizeIncrement = 0;
- CPUDescriptorHeapAllocation viewAllocation;
- if (GetCbvUavSrvDescriptorCount() > 0) {
- DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
- viewSizeIncrement = mViewAllocator->GetSizeIncrement();
- }
-
- Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
- mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
+ mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
+ mSamplerAllocator = device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+}
+
+ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+ Device* device,
+ const BindGroupDescriptor* descriptor) {
+ uint32_t viewSizeIncrement = 0;
+ CPUDescriptorHeapAllocation viewAllocation;
+ if (GetCbvUavSrvDescriptorCount() > 0) {
+ DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
+ viewSizeIncrement = mViewAllocator->GetSizeIncrement();
+ }
- if (GetSamplerDescriptorCount() > 0) {
- Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
- DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
- bindGroup.Get(), mSamplerAllocator));
- bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
- }
+ Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
+ mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
- return bindGroup;
+ if (GetSamplerDescriptorCount() > 0) {
+ Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
+ DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
+ bindGroup.Get(), mSamplerAllocator));
+ bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
}
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
- CPUDescriptorHeapAllocation* viewAllocation) {
- if (viewAllocation->IsValid()) {
- mViewAllocator->Deallocate(viewAllocation);
- }
+ return bindGroup;
+}
- mBindGroupAllocator.Deallocate(bindGroup);
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+ CPUDescriptorHeapAllocation* viewAllocation) {
+ if (viewAllocation->IsValid()) {
+ mViewAllocator->Deallocate(viewAllocation);
}
- ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
- return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
- }
+ mBindGroupAllocator.Deallocate(bindGroup);
+}
- uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
- return mShaderRegisters[bindingIndex];
- }
+ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
+ return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
+}
- uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
- return mCbvUavSrvDescriptorCount;
- }
+uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
+ return mShaderRegisters[bindingIndex];
+}
- uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
- return mSamplerDescriptorCount;
- }
+uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
+ return mCbvUavSrvDescriptorCount;
+}
- const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
- const {
- return mCbvUavSrvDescriptorRanges;
- }
+uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
+ return mSamplerDescriptorCount;
+}
- const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
- return mSamplerDescriptorRanges;
- }
+const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges() const {
+ return mCbvUavSrvDescriptorRanges;
+}
+
+const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
+ return mSamplerDescriptorRanges;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
index 5a3596421df..f045492ced6 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
@@ -15,79 +15,81 @@
#ifndef SRC_DAWN_NATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
+#include <vector>
+
#include "dawn/native/BindGroupLayout.h"
#include "dawn/common/SlabAllocator.h"
#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
#include "dawn/native/d3d12/d3d12_platform.h"
namespace dawn::native::d3d12 {
- class BindGroup;
- class CPUDescriptorHeapAllocation;
- class Device;
- class StagingDescriptorAllocator;
+class CPUDescriptorHeapAllocation;
+class Device;
+class StagingDescriptorAllocator;
- // A purposefully invalid register space.
+// A purposefully invalid register space.
+//
+// We use the bind group index as the register space, but don't know the bind group index until
+// pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
+static constexpr uint32_t kRegisterSpacePlaceholder =
+ D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+
+class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static Ref<BindGroupLayout> Create(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
+
+ // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
+ // dynamic binding indexes.
+ ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+
+ // The D3D shader register that the Dawn binding index is mapped to by this bind group
+ // layout.
+ uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+
+ // Counts of descriptors in the descriptor tables.
+ uint32_t GetCbvUavSrvDescriptorCount() const;
+ uint32_t GetSamplerDescriptorCount() const;
+
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
+
+ private:
+ BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayout() override = default;
+
+ // Contains the offset into the descriptor heap for the given resource view. Samplers and
+ // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
+ // within each group and tightly packed.
//
- // We use the bind group index as the register space, but don't know the bind group index until
- // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
- static constexpr uint32_t kRegisterSpacePlaceholder =
- D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static Ref<BindGroupLayout> Create(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
-
- // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
- // dynamic binding indexes.
- ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
-
- // The D3D shader register that the Dawn binding index is mapped to by this bind group
- // layout.
- uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
-
- // Counts of descriptors in the descriptor tables.
- uint32_t GetCbvUavSrvDescriptorCount() const;
- uint32_t GetSamplerDescriptorCount() const;
-
- const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
- const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
-
- private:
- BindGroupLayout(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayout() override = default;
-
- // Contains the offset into the descriptor heap for the given resource view. Samplers and
- // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
- // within each group and tightly packed.
- //
- // Dynamic resources are not used here since their descriptors are placed directly in root
- // parameters.
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
-
- // Contains the shader register this binding is mapped to.
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
-
- uint32_t mCbvUavSrvDescriptorCount;
- uint32_t mSamplerDescriptorCount;
-
- std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
- std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
-
- StagingDescriptorAllocator* mSamplerAllocator = nullptr;
- StagingDescriptorAllocator* mViewAllocator = nullptr;
- };
+ // Dynamic resources are not used here since their descriptors are placed directly in root
+ // parameters.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
+
+ // Contains the shader register this binding is mapped to.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
+
+ uint32_t mCbvUavSrvDescriptorCount;
+ uint32_t mSamplerDescriptorCount;
+
+ std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
+ std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+
+ StagingDescriptorAllocator* mSamplerAllocator = nullptr;
+ StagingDescriptorAllocator* mViewAllocator = nullptr;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.cpp
new file mode 100644
index 00000000000..ef9bbb99054
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.cpp
@@ -0,0 +1,31 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BlobD3D12.h"
+
+namespace dawn::native {
+
+Blob CreateBlob(ComPtr<ID3DBlob> blob) {
+ // Detach so the deleter callback can "own" the reference
+ ID3DBlob* ptr = blob.Detach();
+ return Blob::UnsafeCreateWithDeleter(reinterpret_cast<uint8_t*>(ptr->GetBufferPointer()),
+ ptr->GetBufferSize(), [=]() {
+ // Reattach and drop to delete it.
+ ComPtr<ID3DBlob> b;
+ b.Attach(ptr);
+ b = nullptr;
+ });
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.h
new file mode 100644
index 00000000000..563ac7341c0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BlobD3D12.h
@@ -0,0 +1,22 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Blob.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native {
+
+Blob CreateBlob(ComPtr<ID3DBlob> blob);
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp
index 84fbbb67c7e..0488fce6aa9 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/d3d12/BufferD3D12.h"
+#include <algorithm>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
@@ -25,474 +27,471 @@
#include "dawn/native/d3d12/HeapD3D12.h"
#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
#include "dawn/native/d3d12/UtilsD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
namespace dawn::native::d3d12 {
- namespace {
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
- D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+namespace {
+D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
- }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+ }
- return flags;
- }
+ return flags;
+}
- D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
- D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
-
- if (usage & wgpu::BufferUsage::CopySrc) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
- if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
- resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
- }
- if (usage & wgpu::BufferUsage::Index) {
- resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
- }
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- }
- if (usage & kReadOnlyStorageBuffer) {
- resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
-
- return resourceState;
- }
+D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
+ D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
- D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
- if (allowedUsage & wgpu::BufferUsage::MapRead) {
- return D3D12_HEAP_TYPE_READBACK;
- } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
- return D3D12_HEAP_TYPE_UPLOAD;
- } else {
- return D3D12_HEAP_TYPE_DEFAULT;
- }
- }
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+ }
+ if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
+ resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
+ }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+ }
+ if (usage & kReadOnlyStorageBuffer) {
+ resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+ }
- size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
- if ((usage & wgpu::BufferUsage::Uniform) != 0) {
- // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
- // forbids binding a CBV to an unaligned size. To prevent, one can always safely
- // align the buffer size to the CBV data alignment as other buffer usages
- // ignore it (no size check). The validation will still enforce bound checks with
- // the unaligned size returned by GetSize().
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
- return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
- }
- return 1;
- }
- } // namespace
+ return resourceState;
+}
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return buffer;
+D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
+ if (allowedUsage & wgpu::BufferUsage::MapRead) {
+ return D3D12_HEAP_TYPE_READBACK;
+ } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
+ return D3D12_HEAP_TYPE_UPLOAD;
+ } else {
+ return D3D12_HEAP_TYPE_DEFAULT;
+ }
+}
+
+size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
+ if ((usage & wgpu::BufferUsage::Uniform) != 0) {
+ // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
+ // forbids binding a CBV to an unaligned size. To prevent, one can always safely
+ // align the buffer size to the CBV data alignment as other buffer usages
+ // ignore it (no size check). The validation will still enforce bound checks with
+ // the unaligned size returned by GetSize().
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
+ return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
+ }
+ return 1;
+}
+} // namespace
+
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return buffer;
+}
+
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {}
+
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ uint64_t size = std::max(GetSize(), uint64_t(4u));
+ size_t alignment = D3D12BufferSizeAlignment(GetUsage());
+ if (size > std::numeric_limits<uint64_t>::max() - alignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ mAllocatedSize = Align(size, alignment);
+
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+ resourceDescriptor.Alignment = 0;
+ resourceDescriptor.Width = mAllocatedSize;
+ resourceDescriptor.Height = 1;
+ resourceDescriptor.DepthOrArraySize = 1;
+ resourceDescriptor.MipLevels = 1;
+ resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+ resourceDescriptor.SampleDesc.Count = 1;
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+ // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+ // and robust resource initialization.
+ resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
+
+ auto heapType = D3D12HeapType(GetUsage());
+ auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
+
+ // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
+ // state
+ if (heapType == D3D12_HEAP_TYPE_READBACK) {
+ bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
+ mFixedResourceState = true;
+ mLastUsage = wgpu::BufferUsage::CopyDst;
}
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
+ // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
+ // state
+ if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
+ bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
+ mFixedResourceState = true;
+ mLastUsage = wgpu::BufferUsage::CopySrc;
}
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- uint64_t size = std::max(GetSize(), uint64_t(4u));
- size_t alignment = D3D12BufferSizeAlignment(GetUsage());
- if (size > std::numeric_limits<uint64_t>::max() - alignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- mAllocatedSize = Align(size, alignment);
-
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
- resourceDescriptor.Alignment = 0;
- resourceDescriptor.Width = mAllocatedSize;
- resourceDescriptor.Height = 1;
- resourceDescriptor.DepthOrArraySize = 1;
- resourceDescriptor.MipLevels = 1;
- resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
- resourceDescriptor.SampleDesc.Count = 1;
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
- // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
- // and robust resource initialization.
- resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
-
- auto heapType = D3D12HeapType(GetUsage());
- auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
-
- // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
- // state
- if (heapType == D3D12_HEAP_TYPE_READBACK) {
- bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
- mFixedResourceState = true;
- mLastUsage = wgpu::BufferUsage::CopyDst;
- }
+ DAWN_TRY_ASSIGN(
+ mResourceAllocation,
+ ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
- // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
- // state
- if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
- bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
- mFixedResourceState = true;
- mLastUsage = wgpu::BufferUsage::CopySrc;
- }
+ SetLabelImpl();
- DAWN_TRY_ASSIGN(
- mResourceAllocation,
- ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext,
+ ToBackend(GetDevice())->GetPendingCommandContext());
- SetLabelImpl();
+ DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
+ }
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
+ // Initialize the padding bytes to zero.
+ if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
CommandRecordingContext* commandRecordingContext;
DAWN_TRY_ASSIGN(commandRecordingContext,
ToBackend(GetDevice())->GetPendingCommandContext());
- DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
+ uint32_t clearSize = paddingBytes;
+ uint64_t clearOffset = GetSize();
+ DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
}
-
- // Initialize the padding bytes to zero.
- if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
- !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- CommandRecordingContext* commandRecordingContext;
- DAWN_TRY_ASSIGN(commandRecordingContext,
- ToBackend(GetDevice())->GetPendingCommandContext());
-
- uint32_t clearSize = paddingBytes;
- uint64_t clearOffset = GetSize();
- DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
- }
- }
-
- return {};
- }
-
- Buffer::~Buffer() = default;
-
- ID3D12Resource* Buffer::GetD3D12Resource() const {
- return mResourceAllocation.GetD3D12Resource();
- }
-
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
-
- // Return the resource barrier.
- return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
}
- void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::BufferUsage newUsage) {
- D3D12_RESOURCE_BARRIER barrier;
+ return {};
+}
- if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
- }
- }
-
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage) {
- // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
- if (mFixedResourceState) {
- ASSERT(mLastUsage == newUsage);
- return false;
- }
-
- D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
- D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+Buffer::~Buffer() = default;
- // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
- // If one of the usages isn't UAV, then other barriers are used.
- bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
- newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+ID3D12Resource* Buffer::GetD3D12Resource() const {
+ return mResourceAllocation.GetD3D12Resource();
+}
- if (needsUAVBarrier) {
- barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
- barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->UAV.pResource = GetD3D12Resource();
+// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+// cause subsequent errors.
+bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
- mLastUsage = newUsage;
- return true;
- }
+ // Return the resource barrier.
+ return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
+}
- // We can skip transitions to already current usages.
- if (IsSubset(newUsage, mLastUsage)) {
- return false;
- }
+void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage) {
+ D3D12_RESOURCE_BARRIER barrier;
- mLastUsage = newUsage;
-
- // The COMMON state represents a state where no write operations can be pending, which makes
- // it possible to transition to and from some states without synchronizaton (i.e. without an
- // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
- // state, or 2) multiple read states. A buffer that is accessed within a command list will
- // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
- // completes - this is because all buffer writes are guaranteed to be completed before the
- // next ExecuteCommandLists call executes.
- // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
- // To track implicit decays, we must record the pending serial on which a transition will
- // occur. When that buffer is used again, the previously recorded serial must be compared to
- // the last completed serial to determine if the buffer has implicity decayed to the common
- // state.
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
- if (pendingCommandSerial > mLastUsedSerial) {
- lastState = D3D12_RESOURCE_STATE_COMMON;
- mLastUsedSerial = pendingCommandSerial;
- }
+ if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ }
+}
+
+// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+// cause subsequent errors.
+bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage) {
+ // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
+ if (mFixedResourceState) {
+ ASSERT(mLastUsage == newUsage);
+ return false;
+ }
- // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
- // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
- // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
- // destination state cannot be 1) more than one write state, or 2) both a read and write
- // state. This goes unchecked here because it should not be allowed through render/compute
- // pass validation.
- if (lastState == D3D12_RESOURCE_STATE_COMMON) {
- return false;
- }
+ D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
+ D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
- // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
- // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
- // barrier state.
- if (lastState == newState) {
- return false;
- }
+ // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+ // If one of the usages isn't UAV, then other barriers are used.
+ bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+ newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ if (needsUAVBarrier) {
+ barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->Transition.pResource = GetD3D12Resource();
- barrier->Transition.StateBefore = lastState;
- barrier->Transition.StateAfter = newState;
- barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+ barrier->UAV.pResource = GetD3D12Resource();
+ mLastUsage = newUsage;
return true;
}
- D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
- return mResourceAllocation.GetGPUPointer();
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
- // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
- // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
- // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
- // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
- // was created. With a staging buffer, the data on the CPU side will first upload to the
- // staging buffer, and copied from the staging buffer to the GPU memory of the current
- // buffer in the unmap() call.
- // TODO(enga): Handle CPU-visible memory on UMA
- return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
- }
-
- MaybeError Buffer::MapInternal(bool isWrite,
- size_t offset,
- size_t size,
- const char* contextInfo) {
- // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
- // evicted. This buffer should already have been made resident when it was created.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
-
- D3D12_RANGE range = {offset, offset + size};
- // mMappedData is the pointer to the start of the resource, irrespective of offset.
- // MSDN says (note the weird use of "never"):
- //
- // When ppData is not NULL, the pointer returned is never offset by any values in
- // pReadRange.
- //
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
- DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
-
- if (isWrite) {
- mWrittenMappedRange = range;
- }
-
- return {};
+ // We can skip transitions to already current usages.
+ if (IsSubset(newUsage, mLastUsage)) {
+ return false;
}
- MaybeError Buffer::MapAtCreationImpl() {
- // We will use a staging buffer for MapRead buffers instead so we just clear the staging
- // buffer and initialize the original buffer by copying the staging buffer to the original
- // buffer one the first time Unmap() is called.
- ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
-
- return {};
+ mLastUsage = newUsage;
+
+ // The COMMON state represents a state where no write operations can be pending, which makes
+ // it possible to transition to and from some states without synchronizaton (i.e. without an
+ // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
+ // state, or 2) multiple read states. A buffer that is accessed within a command list will
+ // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
+ // completes - this is because all buffer writes are guaranteed to be completed before the
+ // next ExecuteCommandLists call executes.
+ // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+ // To track implicit decays, we must record the pending serial on which a transition will
+ // occur. When that buffer is used again, the previously recorded serial must be compared to
+ // the last completed serial to determine if the buffer has implicity decayed to the common
+ // state.
+ const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+ if (pendingCommandSerial > mLastUsedSerial) {
+ lastState = D3D12_RESOURCE_STATE_COMMON;
+ mLastUsedSerial = pendingCommandSerial;
}
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- // GetPendingCommandContext() call might create a new commandList. Dawn will handle
- // it in Tick() by execute the commandList and signal a fence for it even it is empty.
- // Skip the unnecessary GetPendingCommandContext() call saves an extra fence.
- if (NeedsInitialization()) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
- DAWN_TRY(EnsureDataInitialized(commandContext));
- }
-
- return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+ // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
+ // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
+ // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
+ // destination state cannot be 1) more than one write state, or 2) both a read and write
+ // state. This goes unchecked here because it should not be allowed through render/compute
+ // pass validation.
+ if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+ return false;
}
- void Buffer::UnmapImpl() {
- GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
- mMappedData = nullptr;
- mWrittenMappedRange = {0, 0};
-
- // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
- // them when they are unmapped.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+ // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
+ // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
+ // barrier state.
+ if (lastState == newState) {
+ return false;
}
- void* Buffer::GetMappedPointerImpl() {
- // The frontend asks that the pointer returned is from the start of the resource
- // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
- return mMappedData;
+ barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier->Transition.pResource = GetD3D12Resource();
+ barrier->Transition.StateBefore = lastState;
+ barrier->Transition.StateAfter = newState;
+ barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+
+ return true;
+}
+
+D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
+ return mResourceAllocation.GetGPUPointer();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+ // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
+ // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
+ // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
+ // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
+ // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
+ // was created. With a staging buffer, the data on the CPU side will first upload to the
+ // staging buffer, and copied from the staging buffer to the GPU memory of the current
+ // buffer in the unmap() call.
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+}
+
+MaybeError Buffer::MapInternal(bool isWrite, size_t offset, size_t size, const char* contextInfo) {
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "BufferD3D12::MapInternal");
+
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
+
+ D3D12_RANGE range = {offset, offset + size};
+ // mMappedData is the pointer to the start of the resource, irrespective of offset.
+ // MSDN says (note the weird use of "never"):
+ //
+ // When ppData is not NULL, the pointer returned is never offset by any values in
+ // pReadRange.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
+ DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
+
+ if (isWrite) {
+ mWrittenMappedRange = range;
}
- void Buffer::DestroyImpl() {
- if (mMappedData != nullptr) {
- // If the buffer is currently mapped, unmap without flushing the writes to the GPU
- // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
- // which parts to flush, so we set it to an empty range to prevent flushes.
- mWrittenMappedRange = {0, 0};
- }
- BufferBase::DestroyImpl();
-
- ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
+ return {};
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+ // We will use a staging buffer for MapRead buffers instead so we just clear the staging
+ // buffer and initialize the original buffer by copying the staging buffer to the original
+ // buffer one the first time Unmap() is called.
+ ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
+
+ return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ // GetPendingCommandContext() call might create a new commandList. Dawn will handle
+ // it in Tick() by execute the commandList and signal a fence for it even it is empty.
+ // Skip the unnecessary GetPendingCommandContext() call saves an extra fence.
+ if (NeedsInitialization()) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+ DAWN_TRY(EnsureDataInitialized(commandContext));
}
- bool Buffer::CheckIsResidentForTesting() const {
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- return heap->IsInList() || heap->IsResidencyLocked();
+ return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+}
+
+void Buffer::UnmapImpl() {
+ GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
+ mMappedData = nullptr;
+ mWrittenMappedRange = {0, 0};
+
+ // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+ // them when they are unmapped.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+}
+
+void* Buffer::GetMappedPointerImpl() {
+ // The frontend asks that the pointer returned is from the start of the resource
+ // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
+ return mMappedData;
+}
+
+void Buffer::DestroyImpl() {
+ if (mMappedData != nullptr) {
+ // If the buffer is currently mapped, unmap without flushing the writes to the GPU
+ // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
+ // which parts to flush, so we set it to an empty range to prevent flushes.
+ mWrittenMappedRange = {0, 0};
}
+ BufferBase::DestroyImpl();
- bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
- return mResourceAllocation.GetInfo().mMethod == allocationMethod;
- }
+ ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
+}
- MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
- if (!NeedsInitialization()) {
- return {};
- }
+bool Buffer::CheckIsResidentForTesting() const {
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ return heap->IsInList() || heap->IsResidencyLocked();
+}
- DAWN_TRY(InitializeToZero(commandContext));
+bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
+ return mResourceAllocation.GetInfo().mMethod == allocationMethod;
+}
+
+MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ if (!NeedsInitialization()) {
return {};
}
- ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
- CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return {false};
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return {false};
- }
+ DAWN_TRY(InitializeToZero(commandContext));
+ return {};
+}
- DAWN_TRY(InitializeToZero(commandContext));
- return {true};
+ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
+ CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return {false};
}
- MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return {};
- }
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ return {false};
+ }
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- } else {
- DAWN_TRY(InitializeToZero(commandContext));
- }
+ DAWN_TRY(InitializeToZero(commandContext));
+ return {true};
+}
+MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
return {};
}
- void Buffer::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
- GetLabel());
- }
-
- MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
- ASSERT(NeedsInitialization());
-
- // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
- // that has already been zero initialized.
- DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
SetIsDataInitialized();
- GetDevice()->IncrementLazyClearCountForTesting();
-
- return {};
+ } else {
+ DAWN_TRY(InitializeToZero(commandContext));
}
- MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset,
- uint64_t size) {
- Device* device = ToBackend(GetDevice());
- size = size > 0 ? size : GetAllocatedSize();
-
- // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
- // changed away, so we can only clear such buffer with buffer mapping.
- if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
- DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
- "D3D12 map at clear buffer"));
- memset(mMappedData, clearValue, size);
- UnmapImpl();
- } else if (clearValue == 0u) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
- } else {
- // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
- // includes STORAGE.
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
-
- memset(uploadHandle.mappedBuffer, clearValue, size);
-
- device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
- uploadHandle.startOffset, this, offset, size);
- }
-
- return {};
+ return {};
+}
+
+void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
+ GetLabel());
+}
+
+MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(NeedsInitialization());
+
+ // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
+ // that has already been zero initialized.
+ DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+
+ return {};
+}
+
+MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ Device* device = ToBackend(GetDevice());
+ size = size > 0 ? size : GetAllocatedSize();
+
+ // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
+ // changed away, so we can only clear such buffer with buffer mapping.
+ if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+ DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
+ "D3D12 map at clear buffer"));
+ memset(mMappedData, clearValue, size);
+ UnmapImpl();
+ } else if (clearValue == 0u) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
+ } else {
+ // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
+ // includes STORAGE.
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+
+ memset(uploadHandle.mappedBuffer, clearValue, size);
+
+ device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, this, offset, size);
}
+
+ return {};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h
index cdca5600e95..cb36a851a10 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_BUFFERD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_BUFFERD3D12_H_
+#include <limits>
+
#include "dawn/native/Buffer.h"
#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
@@ -22,69 +24,67 @@
namespace dawn::native::d3d12 {
- class CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
-
- ID3D12Resource* GetD3D12Resource() const;
- D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
-
- bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::BufferUsage newUsage);
-
- bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
- bool CheckIsResidentForTesting() const;
-
- MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
- ResultOrError<bool> EnsureDataInitializedAsDestination(
- CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size);
- MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- Buffer(Device* device, const BufferDescriptor* descriptor);
- ~Buffer() override;
-
- MaybeError Initialize(bool mappedAtCreation);
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
-
- bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage);
-
- MaybeError InitializeToZero(CommandRecordingContext* commandContext);
- MaybeError ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- ResourceHeapAllocation mResourceAllocation;
- bool mFixedResourceState = false;
- wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
- ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
-
- D3D12_RANGE mWrittenMappedRange = {0, 0};
- void* mMappedData = nullptr;
- };
+class CommandRecordingContext;
+class Device;
+
+class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
+
+ ID3D12Resource* GetD3D12Resource() const;
+ D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
+
+ bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage);
+
+ bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
+ bool CheckIsResidentForTesting() const;
+
+ MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
+ ResultOrError<bool> EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ Buffer(Device* device, const BufferDescriptor* descriptor);
+ ~Buffer() override;
+
+ MaybeError Initialize(bool mappedAtCreation);
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
+
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+
+ MaybeError InitializeToZero(CommandRecordingContext* commandContext);
+ MaybeError ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ ResourceHeapAllocation mResourceAllocation;
+ bool mFixedResourceState = false;
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+ ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
+
+ D3D12_RANGE mWrittenMappedRange = {0, 0};
+ void* mMappedData = nullptr;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
index 617c1966d1b..88e1d6d2a97 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
@@ -17,37 +17,35 @@
namespace dawn::native::d3d12 {
- CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
- uint32_t heapIndex)
- : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
- }
-
- D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
- ASSERT(IsValid());
- return mBaseDescriptor;
- }
-
- D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
- uint32_t sizeIncrementInBytes,
- uint32_t offsetInDescriptorCount) const {
- ASSERT(IsValid());
- D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
- cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
- return cpuHandle;
- }
-
- uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
- ASSERT(mHeapIndex >= 0);
- return mHeapIndex;
- }
-
- bool CPUDescriptorHeapAllocation::IsValid() const {
- return mBaseDescriptor.ptr != 0;
- }
-
- void CPUDescriptorHeapAllocation::Invalidate() {
- mBaseDescriptor = {0};
- }
+CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+ uint32_t heapIndex)
+ : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {}
+
+D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+ ASSERT(IsValid());
+ return mBaseDescriptor;
+}
+
+D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
+ uint32_t sizeIncrementInBytes,
+ uint32_t offsetInDescriptorCount) const {
+ ASSERT(IsValid());
+ D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
+ cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
+ return cpuHandle;
+}
+
+uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
+ ASSERT(mHeapIndex >= 0);
+ return mHeapIndex;
+}
+
+bool CPUDescriptorHeapAllocation::IsValid() const {
+ return mBaseDescriptor.ptr != 0;
+}
+
+void CPUDescriptorHeapAllocation::Invalidate() {
+ mBaseDescriptor = {0};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
index 5c8526827c9..58c4eb51fd1 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
@@ -21,26 +21,26 @@
namespace dawn::native::d3d12 {
- // Wrapper for a handle into a CPU-only descriptor heap.
- class CPUDescriptorHeapAllocation {
- public:
- CPUDescriptorHeapAllocation() = default;
- CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
+// Wrapper for a handle into a CPU-only descriptor heap.
+class CPUDescriptorHeapAllocation {
+ public:
+ CPUDescriptorHeapAllocation() = default;
+ CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
- D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+ D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
- D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
- uint32_t offsetInDescriptorCount) const;
- uint32_t GetHeapIndex() const;
+ D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
+ uint32_t offsetInDescriptorCount) const;
+ uint32_t GetHeapIndex() const;
- bool IsValid() const;
+ bool IsValid() const;
- void Invalidate();
+ void Invalidate();
- private:
- D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
- uint32_t mHeapIndex = -1;
- };
+ private:
+ D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+ uint32_t mHeapIndex = -1;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CacheKeyD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CacheKeyD3D12.cpp
new file mode 100644
index 00000000000..0daf5264dd7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CacheKeyD3D12.cpp
@@ -0,0 +1,139 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/native/CacheKey.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native {
+
+template <>
+void CacheKeySerializer<D3D12_COMPUTE_PIPELINE_STATE_DESC>::Serialize(
+ CacheKey* key,
+ const D3D12_COMPUTE_PIPELINE_STATE_DESC& t) {
+ // Don't record pRootSignature as we already record the signature blob in pipline layout.
+ key->Record(t.CS).Record(t.NodeMask).Record(t.Flags);
+}
+
+template <>
+void CacheKeySerializer<D3D12_RENDER_TARGET_BLEND_DESC>::Serialize(
+ CacheKey* key,
+ const D3D12_RENDER_TARGET_BLEND_DESC& t) {
+ key->Record(t.BlendEnable, t.LogicOpEnable, t.SrcBlend, t.DestBlend, t.BlendOp, t.SrcBlendAlpha,
+ t.DestBlendAlpha, t.BlendOpAlpha, t.LogicOp, t.RenderTargetWriteMask);
+}
+
+template <>
+void CacheKeySerializer<D3D12_BLEND_DESC>::Serialize(CacheKey* key, const D3D12_BLEND_DESC& t) {
+ key->Record(t.AlphaToCoverageEnable, t.IndependentBlendEnable).Record(t.RenderTarget);
+}
+
+template <>
+void CacheKeySerializer<D3D12_DEPTH_STENCILOP_DESC>::Serialize(
+ CacheKey* key,
+ const D3D12_DEPTH_STENCILOP_DESC& t) {
+ key->Record(t.StencilFailOp, t.StencilDepthFailOp, t.StencilPassOp, t.StencilFunc);
+}
+
+template <>
+void CacheKeySerializer<D3D12_DEPTH_STENCIL_DESC>::Serialize(CacheKey* key,
+ const D3D12_DEPTH_STENCIL_DESC& t) {
+ key->Record(t.DepthEnable, t.DepthWriteMask, t.DepthFunc, t.StencilEnable, t.StencilReadMask,
+ t.StencilWriteMask, t.FrontFace, t.BackFace);
+}
+
+template <>
+void CacheKeySerializer<D3D12_RASTERIZER_DESC>::Serialize(CacheKey* key,
+ const D3D12_RASTERIZER_DESC& t) {
+ key->Record(t.FillMode, t.CullMode, t.FrontCounterClockwise, t.DepthBias, t.DepthBiasClamp,
+ t.SlopeScaledDepthBias, t.DepthClipEnable, t.MultisampleEnable,
+ t.AntialiasedLineEnable, t.ForcedSampleCount, t.ConservativeRaster);
+}
+
+template <>
+void CacheKeySerializer<D3D12_INPUT_ELEMENT_DESC>::Serialize(CacheKey* key,
+ const D3D12_INPUT_ELEMENT_DESC& t) {
+ key->Record(t.SemanticName, t.SemanticIndex, t.Format, t.InputSlot, t.AlignedByteOffset,
+ t.InputSlotClass, t.InstanceDataStepRate);
+}
+
+template <>
+void CacheKeySerializer<D3D12_INPUT_LAYOUT_DESC>::Serialize(CacheKey* key,
+ const D3D12_INPUT_LAYOUT_DESC& t) {
+ key->RecordIterable(t.pInputElementDescs, t.NumElements);
+}
+
+template <>
+void CacheKeySerializer<D3D12_SO_DECLARATION_ENTRY>::Serialize(
+ CacheKey* key,
+ const D3D12_SO_DECLARATION_ENTRY& t) {
+ key->Record(t.Stream, t.SemanticName, t.SemanticIndex, t.StartComponent, t.ComponentCount,
+ t.OutputSlot);
+}
+
+template <>
+void CacheKeySerializer<D3D12_STREAM_OUTPUT_DESC>::Serialize(CacheKey* key,
+ const D3D12_STREAM_OUTPUT_DESC& t) {
+ key->RecordIterable(t.pSODeclaration, t.NumEntries)
+ .RecordIterable(t.pBufferStrides, t.NumStrides)
+ .Record(t.RasterizedStream);
+}
+
+template <>
+void CacheKeySerializer<DXGI_SAMPLE_DESC>::Serialize(CacheKey* key, const DXGI_SAMPLE_DESC& t) {
+ key->Record(t.Count, t.Quality);
+}
+
+template <>
+void CacheKeySerializer<D3D12_SHADER_BYTECODE>::Serialize(CacheKey* key,
+ const D3D12_SHADER_BYTECODE& t) {
+ key->RecordIterable(reinterpret_cast<const uint8_t*>(t.pShaderBytecode), t.BytecodeLength);
+}
+
+template <>
+void CacheKeySerializer<D3D12_GRAPHICS_PIPELINE_STATE_DESC>::Serialize(
+ CacheKey* key,
+ const D3D12_GRAPHICS_PIPELINE_STATE_DESC& t) {
+ // Don't record pRootSignature as we already record the signature blob in pipline layout.
+ // Don't record CachedPSO as it is in the cached blob.
+ key->Record(t.VS)
+ .Record(t.PS)
+ .Record(t.DS)
+ .Record(t.HS)
+ .Record(t.GS)
+ .Record(t.StreamOutput)
+ .Record(t.BlendState)
+ .Record(t.SampleMask)
+ .Record(t.RasterizerState)
+ .Record(t.DepthStencilState)
+ .Record(t.InputLayout)
+ .Record(t.IBStripCutValue)
+ .Record(t.PrimitiveTopologyType)
+ .RecordIterable(t.RTVFormats, t.NumRenderTargets)
+ .Record(t.DSVFormat)
+ .Record(t.SampleDesc)
+ .Record(t.NodeMask)
+ .Record(t.Flags);
+}
+
+template <>
+void CacheKeySerializer<ID3DBlob>::Serialize(CacheKey* key, const ID3DBlob& t) {
+ // Workaround: GetBufferPointer and GetbufferSize are not marked as const
+ ID3DBlob* pBlob = const_cast<ID3DBlob*>(&t);
+ key->RecordIterable(reinterpret_cast<uint8_t*>(pBlob->GetBufferPointer()),
+ pBlob->GetBufferSize());
+}
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp
index 88ac0b8aa54..892cc3277b5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp
@@ -22,51 +22,51 @@
namespace dawn::native::d3d12 {
- CommandAllocatorManager::CommandAllocatorManager(Device* device)
- : device(device), mAllocatorCount(0) {
- mFreeAllocators.set();
- }
+CommandAllocatorManager::CommandAllocatorManager(Device* device)
+ : device(device), mAllocatorCount(0) {
+ mFreeAllocators.set();
+}
- ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
- // If there are no free allocators, get the oldest serial in flight and wait on it
- if (mFreeAllocators.none()) {
- const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
- DAWN_TRY(device->WaitForSerial(firstSerial));
- DAWN_TRY(Tick(firstSerial));
- }
+ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
+ // If there are no free allocators, get the oldest serial in flight and wait on it
+ if (mFreeAllocators.none()) {
+ const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
+ DAWN_TRY(device->WaitForSerial(firstSerial));
+ DAWN_TRY(Tick(firstSerial));
+ }
- ASSERT(mFreeAllocators.any());
+ ASSERT(mFreeAllocators.any());
- // Get the index of the first free allocator from the bitset
- unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
+ // Get the index of the first free allocator from the bitset
+ unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
- if (firstFreeIndex >= mAllocatorCount) {
- ASSERT(firstFreeIndex == mAllocatorCount);
- mAllocatorCount++;
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
- D3D12_COMMAND_LIST_TYPE_DIRECT,
- IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
- "D3D12 create command allocator"));
- }
+ if (firstFreeIndex >= mAllocatorCount) {
+ ASSERT(firstFreeIndex == mAllocatorCount);
+ mAllocatorCount++;
+ DAWN_TRY(CheckHRESULT(
+ device->GetD3D12Device()->CreateCommandAllocator(
+ D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
+ "D3D12 create command allocator"));
+ }
- // Mark the command allocator as used
- mFreeAllocators.reset(firstFreeIndex);
+ // Mark the command allocator as used
+ mFreeAllocators.reset(firstFreeIndex);
- // Enqueue the command allocator. It will be scheduled for reset after the next
- // ExecuteCommandLists
- mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
- device->GetPendingCommandSerial());
- return mCommandAllocators[firstFreeIndex].Get();
- }
+ // Enqueue the command allocator. It will be scheduled for reset after the next
+ // ExecuteCommandLists
+ mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
+ device->GetPendingCommandSerial());
+ return mCommandAllocators[firstFreeIndex].Get();
+}
- MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
- // Reset all command allocators that are no longer in flight
- for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
- DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
- mFreeAllocators.set(it.index);
- }
- mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
- return {};
+MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
+ // Reset all command allocators that are no longer in flight
+ for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
+ DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
+ mFreeAllocators.set(it.index);
}
+ mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
+ return {};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h
index baec984bcb1..708ba44cfb4 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h
@@ -15,44 +15,44 @@
#ifndef SRC_DAWN_NATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
#define SRC_DAWN_NATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
+#include <bitset>
+
#include "dawn/native/d3d12/d3d12_platform.h"
#include "dawn/common/SerialQueue.h"
#include "dawn/native/Error.h"
#include "dawn/native/IntegerTypes.h"
-#include <bitset>
-
namespace dawn::native::d3d12 {
- class Device;
-
- class CommandAllocatorManager {
- public:
- CommandAllocatorManager(Device* device);
+class Device;
- // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
- // otherwise its commands may be reset before execution has completed on the GPU
- ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
- MaybeError Tick(ExecutionSerial lastCompletedSerial);
+class CommandAllocatorManager {
+ public:
+ explicit CommandAllocatorManager(Device* device);
- private:
- Device* device;
+ // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
+ // otherwise its commands may be reset before execution has completed on the GPU
+ ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
+ MaybeError Tick(ExecutionSerial lastCompletedSerial);
- // This must be at least 2 because the Device and Queue use separate command allocators
- static constexpr unsigned int kMaxCommandAllocators = 32;
- unsigned int mAllocatorCount;
+ private:
+ Device* device;
- struct IndexedCommandAllocator {
- ComPtr<ID3D12CommandAllocator> commandAllocator;
- unsigned int index;
- };
+ // This must be at least 2 because the Device and Queue use separate command allocators
+ static constexpr unsigned int kMaxCommandAllocators = 32;
+ unsigned int mAllocatorCount;
- ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
- std::bitset<kMaxCommandAllocators> mFreeAllocators;
- SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+ struct IndexedCommandAllocator {
+ ComPtr<ID3D12CommandAllocator> commandAllocator;
+ unsigned int index;
};
+ ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
+ std::bitset<kMaxCommandAllocators> mFreeAllocators;
+ SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+};
+
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp
index 86022c72e28..77c7ba3ca78 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include <algorithm>
+#include <utility>
+#include <vector>
+
#include "dawn/native/BindGroupTracker.h"
#include "dawn/native/CommandValidation.h"
#include "dawn/native/DynamicUploader.h"
@@ -35,1642 +39,1573 @@
namespace dawn::native::d3d12 {
- namespace {
+namespace {
- DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Undefined:
- return DXGI_FORMAT_UNKNOWN;
- case wgpu::IndexFormat::Uint16:
- return DXGI_FORMAT_R16_UINT;
- case wgpu::IndexFormat::Uint32:
- return DXGI_FORMAT_R32_UINT;
- }
+DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Undefined:
+ return DXGI_FORMAT_UNKNOWN;
+ case wgpu::IndexFormat::Uint16:
+ return DXGI_FORMAT_R16_UINT;
+ case wgpu::IndexFormat::Uint32:
+ return DXGI_FORMAT_R32_UINT;
+ }
+}
+
+D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_TYPE_TIMESTAMP;
+ }
+}
+
+bool CanUseCopyResource(const TextureCopy& src, const TextureCopy& dst, const Extent3D& copySize) {
+ // Checked by validation
+ ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
+ ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
+ ASSERT(src.aspect == dst.aspect);
+
+ const Extent3D& srcSize = src.texture->GetSize();
+ const Extent3D& dstSize = dst.texture->GetSize();
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
+ // In order to use D3D12's copy resource, the textures must be the same dimensions, and
+ // the copy must be of the entire resource.
+ // TODO(dawn:129): Support 1D textures.
+ return src.aspect == src.texture->GetFormat().aspects &&
+ src.texture->GetDimension() == dst.texture->GetDimension() && //
+ dst.texture->GetNumMipLevels() == 1 && //
+ src.texture->GetNumMipLevels() == 1 && // A copy command is of a single mip, so
+ // if a resource has more than one, we
+ // definitely cannot use CopyResource.
+ copySize.width == dstSize.width && //
+ copySize.width == srcSize.width && //
+ copySize.height == dstSize.height && //
+ copySize.height == srcSize.height && //
+ copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers && //
+ copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
+}
+
+void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList, WriteTimestampCmd* cmd) {
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
+ commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP, cmd->queryIndex);
+}
+
+void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
+ Device* device,
+ QuerySet* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ Buffer* destination,
+ uint64_t destinationOffset) {
+ const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+ auto currentIt = availability.begin() + firstQuery;
+ auto lastIt = availability.begin() + firstQuery + queryCount;
+
+ // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No available query found for resolving
+ if (firstTrueIt == lastIt) {
+ break;
}
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
- D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return D3D12_QUERY_TYPE_TIMESTAMP;
- }
- }
+ // The query index of firstTrueIt where the resolving starts
+ uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+ // The queries count between firstTrueIt and nextFalseIt need to be resolved
+ uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
- bool CanUseCopyResource(const TextureCopy& src,
- const TextureCopy& dst,
- const Extent3D& copySize) {
- // Checked by validation
- ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
- ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
- ASSERT(src.aspect == dst.aspect);
-
- const Extent3D& srcSize = src.texture->GetSize();
- const Extent3D& dstSize = dst.texture->GetSize();
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
- // In order to use D3D12's copy resource, the textures must be the same dimensions, and
- // the copy must be of the entire resource.
- // TODO(dawn:129): Support 1D textures.
- return src.aspect == src.texture->GetFormat().aspects &&
- src.texture->GetDimension() == dst.texture->GetDimension() && //
- dst.texture->GetNumMipLevels() == 1 && //
- src.texture->GetNumMipLevels() == 1 && // A copy command is of a single mip, so
- // if a resource has more than one, we
- // definitely cannot use CopyResource.
- copySize.width == dstSize.width && //
- copySize.width == srcSize.width && //
- copySize.height == dstSize.height && //
- copySize.height == srcSize.height && //
- copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers && //
- copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
- }
+ // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+ uint32_t resolveDestinationOffset =
+ destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+ // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+ commandList->ResolveQueryData(
+ querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()), resolveQueryIndex,
+ resolveQueryCount, destination->GetD3D12Resource(), resolveDestinationOffset);
- void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
- WriteTimestampCmd* cmd) {
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
- commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
- cmd->queryIndex);
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
+ }
+}
+
+void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
+ RenderPipeline* pipeline,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
+ if (!pipeline->UsesVertexOrInstanceIndex()) {
+ return;
+ }
+ std::array<uint32_t, 2> offsets{firstVertex, firstInstance};
+ PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+ commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
+ offsets.size(), offsets.data(), 0);
+}
+
+bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy) {
+ // Currently we only need the workaround for an Intel D3D12 driver issue.
+ if (device->IsToggleEnabled(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
+ bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
+ ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+
+ // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
+ // sizes of depth stencil formats are always no less than 4 bytes.
+ bool isSmallColorFormat =
+ HasOneBit(srcCopy.aspect) &&
+ srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
+ if (copyToLesserLevel && isSmallColorFormat) {
+ return true;
}
+ }
- void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
- Device* device,
- QuerySet* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- Buffer* destination,
- uint64_t destinationOffset) {
- const std::vector<bool>& availability = querySet->GetQueryAvailability();
-
- auto currentIt = availability.begin() + firstQuery;
- auto lastIt = availability.begin() + firstQuery + queryCount;
-
- // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No available query found for resolving
- if (firstTrueIt == lastIt) {
- break;
- }
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+ return false;
+}
+
+MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+ ASSERT(srcCopy.aspect == dstCopy.aspect);
+ dawn::native::Format format = srcCopy.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+ ASSERT(copySize.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+ // Create tempBuffer
+ uint32_t bytesPerRow = Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
+ uint32_t rowsPerImage = heightInBlocks;
+
+ // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
+ // need to set mappedAtCreation to be true.
+ auto tempBufferSize =
+ ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
+
+ BufferDescriptor tempBufferDescriptor;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+ tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
+ Device* device = ToBackend(srcCopy.texture->GetDevice());
+ Ref<BufferBase> tempBufferBase;
+ DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+ Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
+
+ BufferCopy bufferCopy;
+ bufferCopy.buffer = tempBuffer;
+ bufferCopy.offset = 0;
+ bufferCopy.bytesPerRow = bytesPerRow;
+ bufferCopy.rowsPerImage = rowsPerImage;
+
+ // Copy from source texture into tempBuffer
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, recordingContext->GetCommandList(),
+ bufferCopy, srcCopy, copySize);
+
+ // Copy from tempBuffer into destination texture
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, recordingContext->GetCommandList(),
+ bufferCopy, dstCopy, copySize);
+
+ // Save tempBuffer into recordingContext
+ recordingContext->AddToTempBuffers(std::move(tempBuffer));
+
+ return {};
+}
+
+void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
+ ComputePipeline* pipeline,
+ DispatchCmd* dispatch) {
+ if (!pipeline->UsesNumWorkgroups()) {
+ return;
+ }
- // The query index of firstTrueIt where the resolving starts
- uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
- // The queries count between firstTrueIt and nextFalseIt need to be resolved
- uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+ PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+ commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3, dispatch,
+ 0);
+}
- // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
- uint32_t resolveDestinationOffset =
- destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+// Records the necessary barriers for a synchronization scope using the resource usage
+// data pre-computed in the frontend. Also performs lazy initialization if required.
+// Returns whether any UAV are used in the synchronization scope.
+bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
+ const SyncScopeResourceUsage& usages) {
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
- // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
- commandList->ResolveQueryData(
- querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
- resolveQueryIndex, resolveQueryCount, destination->GetD3D12Resource(),
- resolveDestinationOffset);
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- // Set current iterator to next false
- currentIt = nextFalseIt;
- }
- }
+ wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
- void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
- RenderPipeline* pipeline,
- uint32_t firstVertex,
- uint32_t firstInstance) {
- const FirstOffsetInfo& firstOffsetInfo = pipeline->GetFirstOffsetInfo();
- if (!firstOffsetInfo.usesVertexIndex && !firstOffsetInfo.usesInstanceIndex) {
- return;
- }
- std::array<uint32_t, 2> offsets{};
- uint32_t count = 0;
- if (firstOffsetInfo.usesVertexIndex) {
- offsets[firstOffsetInfo.vertexIndexOffset / sizeof(uint32_t)] = firstVertex;
- ++count;
- }
- if (firstOffsetInfo.usesInstanceIndex) {
- offsets[firstOffsetInfo.instanceIndexOffset / sizeof(uint32_t)] = firstInstance;
- ++count;
- }
- PipelineLayout* layout = ToBackend(pipeline->GetLayout());
- commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
- count, offsets.data(), 0);
- }
+ for (size_t i = 0; i < usages.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(usages.buffers[i]);
- bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy) {
- // Currently we only need the workaround for an Intel D3D12 driver issue.
- if (device->IsToggleEnabled(
- Toggle::
- UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
- bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
- ASSERT(
- srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
-
- // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
- // sizes of depth stencil formats are always no less than 4 bytes.
- bool isSmallColorFormat =
- HasOneBit(srcCopy.aspect) &&
- srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
- if (copyToLesserLevel && isSmallColorFormat) {
- return true;
- }
- }
+ // TODO(crbug.com/dawn/852): clear storage buffers with
+ // ClearUnorderedAccessView*().
+ buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
- return false;
- }
-
- MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
- ASSERT(srcCopy.aspect == dstCopy.aspect);
- dawn::native::Format format = srcCopy.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
- ASSERT(copySize.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
- // Create tempBuffer
- uint32_t bytesPerRow =
- Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
- uint32_t rowsPerImage = heightInBlocks;
-
- // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
- // need to set mappedAtCreation to be true.
- auto tempBufferSize =
- ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
-
- BufferDescriptor tempBufferDescriptor;
- tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
- tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
- Device* device = ToBackend(srcCopy.texture->GetDevice());
- Ref<BufferBase> tempBufferBase;
- DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
- Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
-
- BufferCopy bufferCopy;
- bufferCopy.buffer = tempBuffer;
- bufferCopy.offset = 0;
- bufferCopy.bytesPerRow = bytesPerRow;
- bufferCopy.rowsPerImage = rowsPerImage;
-
- // Copy from source texture into tempBuffer
- tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
- RecordBufferTextureCopy(BufferTextureCopyDirection::T2B,
- recordingContext->GetCommandList(), bufferCopy, srcCopy,
- copySize);
-
- // Copy from tempBuffer into destination texture
- tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
- RecordBufferTextureCopy(BufferTextureCopyDirection::B2T,
- recordingContext->GetCommandList(), bufferCopy, dstCopy,
- copySize);
-
- // Save tempBuffer into recordingContext
- recordingContext->AddToTempBuffers(std::move(tempBuffer));
-
- return {};
+ D3D12_RESOURCE_BARRIER barrier;
+ if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
+ barriers.push_back(barrier);
}
+ bufferUsages |= usages.bufferUsages[i];
+ }
- void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
- ComputePipeline* pipeline,
- DispatchCmd* dispatch) {
- if (!pipeline->UsesNumWorkgroups()) {
- return;
- }
+ wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
- PipelineLayout* layout = ToBackend(pipeline->GetLayout());
- commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3,
- dispatch, 0);
- }
+ for (size_t i = 0; i < usages.textures.size(); ++i) {
+ Texture* texture = ToBackend(usages.textures[i]);
- // Records the necessary barriers for a synchronization scope using the resource usage
- // data pre-computed in the frontend. Also performs lazy initialization if required.
- // Returns whether any UAV are used in the synchronization scope.
- bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
- const SyncScopeResourceUsage& usages) {
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ usages.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+ textureUsages |= usage;
+ });
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ ToBackend(usages.textures[i])
+ ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+ usages.textureUsages[i]);
+ }
- wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+ if (barriers.size()) {
+ commandList->ResourceBarrier(barriers.size(), barriers.data());
+ }
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
+ return (bufferUsages & wgpu::BufferUsage::Storage ||
+ textureUsages & wgpu::TextureUsage::StorageBinding);
+}
- // TODO(crbug.com/dawn/852): clear storage buffers with
- // ClearUnorderedAccessView*().
- buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+} // anonymous namespace
- D3D12_RESOURCE_BARRIER barrier;
- if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.bufferUsages[i])) {
- barriers.push_back(barrier);
- }
- bufferUsages |= usages.bufferUsages[i];
- }
+class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
+ using Base = BindGroupTrackerBase;
- wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
+ public:
+ explicit BindGroupStateTracker(Device* device)
+ : BindGroupTrackerBase(),
+ mDevice(device),
+ mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
+ mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {}
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
+ void SetInComputePass(bool inCompute_) { mInCompute = inCompute_; }
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- textureUsages |= usage;
- });
+ MaybeError Apply(CommandRecordingContext* commandContext) {
+ BeforeApply();
- ToBackend(usages.textures[i])
- ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
- usages.textureUsages[i]);
- }
-
- if (barriers.size()) {
- commandList->ResourceBarrier(barriers.size(), barriers.data());
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ UpdateRootSignatureIfNecessary(commandList);
+
+ // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
+ // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
+ // at any given time. This means that when we switch heaps, all other currently bound
+ // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
+ // the signal to change the bounded heaps.
+ // Re-populating all bindgroups after the last one fails causes duplicated allocations
+ // to occur on overflow.
+ bool didCreateBindGroupViews = true;
+ bool didCreateBindGroupSamplers = true;
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+ didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
+ if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
+ break;
}
-
- return (bufferUsages & wgpu::BufferUsage::Storage ||
- textureUsages & wgpu::TextureUsage::StorageBinding);
}
- } // anonymous namespace
+ if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
+ if (!didCreateBindGroupViews) {
+ DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
+ }
- class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
- using Base = BindGroupTrackerBase;
+ if (!didCreateBindGroupSamplers) {
+ DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
+ }
- public:
- BindGroupStateTracker(Device* device)
- : BindGroupTrackerBase(),
- mDevice(device),
- mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
- mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
- }
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
+ mDirtyBindGroups |= mBindGroupLayoutsMask;
- void SetInComputePass(bool inCompute_) {
- mInCompute = inCompute_;
- }
+ // Must be called before applying the bindgroups.
+ SetID3D12DescriptorHeaps(commandList);
- MaybeError Apply(CommandRecordingContext* commandContext) {
- BeforeApply();
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- UpdateRootSignatureIfNecessary(commandList);
-
- // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
- // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
- // at any given time. This means that when we switch heaps, all other currently bound
- // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
- // the signal to change the bounded heaps.
- // Re-populating all bindgroups after the last one fails causes duplicated allocations
- // to occur on overflow.
- bool didCreateBindGroupViews = true;
- bool didCreateBindGroupSamplers = true;
- for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+ for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
BindGroup* group = ToBackend(mBindGroups[index]);
didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
- if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
- break;
- }
+ ASSERT(didCreateBindGroupViews);
+ ASSERT(didCreateBindGroupSamplers);
}
+ }
- if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
- if (!didCreateBindGroupViews) {
- DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
- }
-
- if (!didCreateBindGroupSamplers) {
- DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
- }
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+ }
- mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
- mDirtyBindGroups |= mBindGroupLayoutsMask;
+ AfterApply();
- // Must be called before applying the bindgroups.
- SetID3D12DescriptorHeaps(commandList);
+ return {};
+ }
- for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
- BindGroup* group = ToBackend(mBindGroups[index]);
- didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
- didCreateBindGroupSamplers =
- group->PopulateSamplers(mDevice, mSamplerAllocator);
- ASSERT(didCreateBindGroupViews);
- ASSERT(didCreateBindGroupSamplers);
- }
- }
+ void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
+ ASSERT(commandList != nullptr);
+ std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
+ mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
+ ASSERT(descriptorHeaps[0] != nullptr);
+ ASSERT(descriptorHeaps[1] != nullptr);
+ commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
+
+ // Descriptor table state is undefined at the beginning of a command list and after
+ // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
+ // reset the root descriptor table for samplers, otherwise the shader cannot access the
+ // descriptor heaps.
+ mBoundRootSamplerTables = {};
+ }
- for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- BindGroup* group = ToBackend(mBindGroups[index]);
- ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
- mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+ private:
+ void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
+ if (mLastAppliedPipelineLayout != mPipelineLayout) {
+ if (mInCompute) {
+ commandList->SetComputeRootSignature(
+ ToBackend(mPipelineLayout)->GetRootSignature());
+ } else {
+ commandList->SetGraphicsRootSignature(
+ ToBackend(mPipelineLayout)->GetRootSignature());
}
-
- AfterApply();
-
- return {};
- }
-
- void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
- ASSERT(commandList != nullptr);
- std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
- mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
- ASSERT(descriptorHeaps[0] != nullptr);
- ASSERT(descriptorHeaps[1] != nullptr);
- commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
-
- // Descriptor table state is undefined at the beginning of a command list and after
- // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
- // reset the root descriptor table for samplers, otherwise the shader cannot access the
- // descriptor heaps.
+ // Invalidate the root sampler tables previously set in the root signature.
mBoundRootSamplerTables = {};
}
+ }
- private:
- void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
- if (mLastAppliedPipelineLayout != mPipelineLayout) {
- if (mInCompute) {
- commandList->SetComputeRootSignature(
- ToBackend(mPipelineLayout)->GetRootSignature());
- } else {
- commandList->SetGraphicsRootSignature(
- ToBackend(mPipelineLayout)->GetRootSignature());
+ void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
+ const PipelineLayout* pipelineLayout,
+ BindGroupIndex index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint64_t* dynamicOffsetsIn) {
+ ityp::span<BindingIndex, const uint64_t> dynamicOffsets(dynamicOffsetsIn,
+ BindingIndex(dynamicOffsetCountIn));
+ ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
+
+ // Usually, the application won't set the same offsets many times,
+ // so always try to apply dynamic offsets even if the offsets stay the same
+ if (dynamicOffsets.size() != BindingIndex(0)) {
+ // Update dynamic offsets.
+ // Dynamic buffer bindings are packed at the beginning of the layout.
+ for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
+ }
+
+ uint32_t parameterIndex =
+ pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+
+ // Calculate buffer locations that root descriptors links to. The location
+ // is (base buffer location + initial offset + dynamic offset)
+ uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
+ uint64_t offset = binding.offset + dynamicOffset;
+ D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
+ ToBackend(binding.buffer)->GetVA() + offset;
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (mInCompute) {
+ commandList->SetComputeRootConstantBufferView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootConstantBufferView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ if (mInCompute) {
+ commandList->SetComputeRootUnorderedAccessView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (mInCompute) {
+ commandList->SetComputeRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
}
- // Invalidate the root sampler tables previously set in the root signature.
- mBoundRootSamplerTables = {};
}
}
- void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
- const PipelineLayout* pipelineLayout,
- BindGroupIndex index,
- BindGroup* group,
- uint32_t dynamicOffsetCountIn,
- const uint64_t* dynamicOffsetsIn) {
- ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
- dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
- ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
-
- // Usually, the application won't set the same offsets many times,
- // so always try to apply dynamic offsets even if the offsets stay the same
- if (dynamicOffsets.size() != BindingIndex(0)) {
- // Update dynamic offsets.
- // Dynamic buffer bindings are packed at the beginning of the layout.
- for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
- ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
- if (bindingInfo.visibility == wgpu::ShaderStage::None) {
- // Skip dynamic buffers that are not visible. D3D12 does not have None
- // visibility.
- continue;
- }
+ // It's not necessary to update descriptor tables if only the dynamic offset changed.
+ if (!mDirtyBindGroups[index]) {
+ return;
+ }
- uint32_t parameterIndex =
- pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
-
- // Calculate buffer locations that root descriptors links to. The location
- // is (base buffer location + initial offset + dynamic offset)
- uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
- uint64_t offset = binding.offset + dynamicOffset;
- D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
- ToBackend(binding.buffer)->GetVA() + offset;
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (mInCompute) {
- commandList->SetComputeRootConstantBufferView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootConstantBufferView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- if (mInCompute) {
- commandList->SetComputeRootUnorderedAccessView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (mInCompute) {
- commandList->SetComputeRootShaderResourceView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootShaderResourceView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- }
- }
+ const uint32_t cbvUavSrvCount =
+ ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+ const uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
- // It's not necessary to update descriptor tables if only the dynamic offset changed.
- if (!mDirtyBindGroups[index]) {
- return;
+ if (cbvUavSrvCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+ } else {
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
+ }
- const uint32_t cbvUavSrvCount =
- ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
- const uint32_t samplerCount =
- ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
-
- if (cbvUavSrvCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
- const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+ if (samplerCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseSamplerDescriptor();
+ // Check if the group requires its sampler table to be set in the pipeline.
+ // This because sampler heap allocations could be cached and use the same table.
+ if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
if (mInCompute) {
commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
} else {
commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
- }
- if (samplerCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
- const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
- group->GetBaseSamplerDescriptor();
- // Check if the group requires its sampler table to be set in the pipeline.
- // This because sampler heap allocations could be cached and use the same table.
- if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
- } else {
- commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
- }
-
- mBoundRootSamplerTables[index] = baseDescriptor;
- }
+ mBoundRootSamplerTables[index] = baseDescriptor;
}
+ }
- const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
- if (dynamicStorageBufferLengths.size() != 0) {
- uint32_t parameterIndex =
- pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
- uint32_t firstRegisterOffset =
- pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
-
- if (mInCompute) {
- commandList->SetComputeRoot32BitConstants(
- parameterIndex, dynamicStorageBufferLengths.size(),
- dynamicStorageBufferLengths.data(), firstRegisterOffset);
- } else {
- commandList->SetGraphicsRoot32BitConstants(
- parameterIndex, dynamicStorageBufferLengths.size(),
- dynamicStorageBufferLengths.data(), firstRegisterOffset);
- }
+ const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
+ if (dynamicStorageBufferLengths.size() != 0) {
+ uint32_t parameterIndex =
+ pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
+ uint32_t firstRegisterOffset =
+ pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
+
+ if (mInCompute) {
+ commandList->SetComputeRoot32BitConstants(
+ parameterIndex, dynamicStorageBufferLengths.size(),
+ dynamicStorageBufferLengths.data(), firstRegisterOffset);
+ } else {
+ commandList->SetGraphicsRoot32BitConstants(
+ parameterIndex, dynamicStorageBufferLengths.size(),
+ dynamicStorageBufferLengths.data(), firstRegisterOffset);
}
}
+ }
- Device* mDevice;
+ Device* mDevice;
- bool mInCompute = false;
+ bool mInCompute = false;
- ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
- mBoundRootSamplerTables = {};
+ ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
+ mBoundRootSamplerTables = {};
- ShaderVisibleDescriptorAllocator* mViewAllocator;
- ShaderVisibleDescriptorAllocator* mSamplerAllocator;
- };
-
- namespace {
- class VertexBufferTracker {
- public:
- void OnSetVertexBuffer(VertexBufferSlot slot,
- Buffer* buffer,
- uint64_t offset,
- uint64_t size) {
- mStartSlot = std::min(mStartSlot, slot);
- mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
-
- auto* d3d12BufferView = &mD3D12BufferViews[slot];
- d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
- d3d12BufferView->SizeInBytes = size;
- // The bufferView stride is set based on the vertex state before a draw.
- }
+ ShaderVisibleDescriptorAllocator* mViewAllocator;
+ ShaderVisibleDescriptorAllocator* mSamplerAllocator;
+};
- void Apply(ID3D12GraphicsCommandList* commandList,
- const RenderPipeline* renderPipeline) {
- ASSERT(renderPipeline != nullptr);
-
- VertexBufferSlot startSlot = mStartSlot;
- VertexBufferSlot endSlot = mEndSlot;
-
- // If the vertex state has changed, we need to update the StrideInBytes
- // for the D3D12 buffer views. We also need to extend the dirty range to
- // touch all these slots because the stride may have changed.
- if (mLastAppliedRenderPipeline != renderPipeline) {
- mLastAppliedRenderPipeline = renderPipeline;
-
- for (VertexBufferSlot slot :
- IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
- startSlot = std::min(startSlot, slot);
- endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
- mD3D12BufferViews[slot].StrideInBytes =
- renderPipeline->GetVertexBuffer(slot).arrayStride;
- }
- }
+namespace {
+class VertexBufferTracker {
+ public:
+ void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset, uint64_t size) {
+ mStartSlot = std::min(mStartSlot, slot);
+ mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
- if (endSlot <= startSlot) {
- return;
- }
+ auto* d3d12BufferView = &mD3D12BufferViews[slot];
+ d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
+ d3d12BufferView->SizeInBytes = size;
+ // The bufferView stride is set based on the vertex state before a draw.
+ }
- // mD3D12BufferViews is kept up to date with the most recent data passed
- // to SetVertexBuffer. This makes it correct to only track the start
- // and end of the dirty range. When Apply is called,
- // we will at worst set non-dirty vertex buffers in duplicate.
- commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
- static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
- &mD3D12BufferViews[startSlot]);
+ void Apply(ID3D12GraphicsCommandList* commandList, const RenderPipeline* renderPipeline) {
+ ASSERT(renderPipeline != nullptr);
- mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
- mEndSlot = VertexBufferSlot(uint8_t(0));
- }
+ VertexBufferSlot startSlot = mStartSlot;
+ VertexBufferSlot endSlot = mEndSlot;
- private:
- // startSlot and endSlot indicate the range of dirty vertex buffers.
- // If there are multiple calls to SetVertexBuffer, the start and end
- // represent the union of the dirty ranges (the union may have non-dirty
- // data in the middle of the range).
- const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
- VertexBufferSlot mStartSlot{kMaxVertexBuffers};
- VertexBufferSlot mEndSlot{uint8_t(0)};
- ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers>
- mD3D12BufferViews = {};
- };
-
- void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass) {
- ASSERT(renderPass != nullptr);
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- TextureViewBase* resolveTarget =
- renderPass->colorAttachments[i].resolveTarget.Get();
- if (resolveTarget == nullptr) {
- continue;
- }
+ // If the vertex state has changed, we need to update the StrideInBytes
+ // for the D3D12 buffer views. We also need to extend the dirty range to
+ // touch all these slots because the stride may have changed.
+ if (mLastAppliedRenderPipeline != renderPipeline) {
+ mLastAppliedRenderPipeline = renderPipeline;
- TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
- Texture* colorTexture = ToBackend(colorView->GetTexture());
- Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
-
- // Transition the usages of the color attachment and resolve target.
- colorTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
- colorView->GetSubresourceRange());
- resolveTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_DEST,
- resolveTarget->GetSubresourceRange());
-
- // Do MSAA resolve with ResolveSubResource().
- ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
- ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
- const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
- resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
- Aspect::Color);
- constexpr uint32_t kColorTextureSubresourceIndex = 0;
- commandContext->GetCommandList()->ResolveSubresource(
- resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
- kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
+ for (VertexBufferSlot slot :
+ IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+ startSlot = std::min(startSlot, slot);
+ endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+ mD3D12BufferViews[slot].StrideInBytes =
+ renderPipeline->GetVertexBuffer(slot).arrayStride;
}
}
- } // anonymous namespace
+ if (endSlot <= startSlot) {
+ return;
+ }
+
+ // mD3D12BufferViews is kept up to date with the most recent data passed
+ // to SetVertexBuffer. This makes it correct to only track the start
+ // and end of the dirty range. When Apply is called,
+ // we will at worst set non-dirty vertex buffers in duplicate.
+ commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
+ static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
+ &mD3D12BufferViews[startSlot]);
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
+ mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
+ mEndSlot = VertexBufferSlot(uint8_t(0));
}
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
+ private:
+ // startSlot and endSlot indicate the range of dirty vertex buffers.
+ // If there are multiple calls to SetVertexBuffer, the start and end
+ // represent the union of the dirty ranges (the union may have non-dirty
+ // data in the middle of the range).
+ const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
+ VertexBufferSlot mStartSlot{kMaxVertexBuffers};
+ VertexBufferSlot mEndSlot{uint8_t(0)};
+ ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers> mD3D12BufferViews =
+ {};
+};
+
+void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass) {
+ ASSERT(renderPass != nullptr);
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ TextureViewBase* resolveTarget = renderPass->colorAttachments[i].resolveTarget.Get();
+ if (resolveTarget == nullptr) {
+ continue;
+ }
+
+ TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
+ Texture* colorTexture = ToBackend(colorView->GetTexture());
+ Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
+
+ // Transition the usages of the color attachment and resolve target.
+ colorTexture->TrackUsageAndTransitionNow(
+ commandContext, D3D12_RESOURCE_STATE_RESOLVE_SOURCE, colorView->GetSubresourceRange());
+ resolveTexture->TrackUsageAndTransitionNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveTarget->GetSubresourceRange());
+
+ // Do MSAA resolve with ResolveSubResource().
+ ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
+ ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
+ const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
+ resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(), Aspect::Color);
+ constexpr uint32_t kColorTextureSubresourceIndex = 0;
+ commandContext->GetCommandList()->ResolveSubresource(
+ resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
+ kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
}
+}
- MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
- Device* device = ToBackend(GetDevice());
- BindGroupStateTracker bindingTracker(device);
+} // anonymous namespace
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
- // Make sure we use the correct descriptors for this command list. Could be done once per
- // actual command list but here is ok because there should be few command buffers.
- bindingTracker.SetID3D12DescriptorHeaps(commandList);
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {}
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
+MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
+ Device* device = ToBackend(GetDevice());
+ BindGroupStateTracker bindingTracker(device);
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- bindingTracker.SetInComputePass(true);
- DAWN_TRY(RecordComputePass(
- commandContext, &bindingTracker,
- GetResourceUsages().computePasses[nextComputePassNumber]));
+ // Make sure we use the correct descriptors for this command list. Could be done once per
+ // actual command list but here is ok because there should be few command buffers.
+ bindingTracker.SetID3D12DescriptorHeaps(commandList);
- nextComputePassNumber++;
- break;
- }
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+
+ bindingTracker.SetInComputePass(true);
+ DAWN_TRY(
+ RecordComputePass(commandContext, &bindingTracker,
+ GetResourceUsages().computePasses[nextComputePassNumber]));
+
+ nextComputePassNumber++;
+ break;
+ }
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* beginRenderPassCmd =
- mCommands.NextCommand<BeginRenderPassCmd>();
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* beginRenderPassCmd =
+ mCommands.NextCommand<BeginRenderPassCmd>();
- const bool passHasUAV = TransitionAndClearForSyncScope(
- commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
- bindingTracker.SetInComputePass(false);
+ const bool passHasUAV = TransitionAndClearForSyncScope(
+ commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
+ bindingTracker.SetInComputePass(false);
- LazyClearRenderPassAttachments(beginRenderPassCmd);
- DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
- passHasUAV));
+ LazyClearRenderPassAttachments(beginRenderPassCmd);
+ DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
+ passHasUAV));
- nextRenderPassNumber++;
+ nextRenderPassNumber++;
+ break;
+ }
+
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
break;
}
+ Buffer* srcBuffer = ToBackend(copy->source.Get());
+ Buffer* dstBuffer = ToBackend(copy->destination.Get());
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
- Buffer* srcBuffer = ToBackend(copy->source.Get());
- Buffer* dstBuffer = ToBackend(copy->destination.Get());
-
- DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
- bool cleared;
- DAWN_TRY_ASSIGN(cleared,
- dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, copy->destinationOffset, copy->size));
- DAWN_UNUSED(cleared);
-
- srcBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopySrc);
- dstBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopyDst);
-
- commandList->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), copy->destinationOffset,
- srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
- break;
+ DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, copy->destinationOffset, copy->size));
+ DAWN_UNUSED(cleared);
+
+ srcBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+ commandList->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), copy->destinationOffset,
+ srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
+ break;
+ }
+
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ Buffer* buffer = ToBackend(copy->source.buffer.Get());
+ Texture* texture = ToBackend(copy->destination.texture.Get());
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Buffer* buffer = ToBackend(copy->source.buffer.Get());
- Texture* texture = ToBackend(copy->destination.texture.Get());
+ DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
- DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+ if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
+ copy->destination.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, subresources);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+ }
- if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
- copy->destination.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, subresources);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, subresources);
- }
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+ subresources);
- buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
- subresources);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList, copy->source,
+ copy->destination, copy->copySize);
- RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList,
- copy->source, copy->destination, copy->copySize);
+ break;
+ }
- break;
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ Texture* texture = ToBackend(copy->source.texture.Get());
+ Buffer* buffer = ToBackend(copy->destination.buffer.Get());
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* texture = ToBackend(copy->source.texture.Get());
- Buffer* buffer = ToBackend(copy->destination.buffer.Get());
+ DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
- DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
- texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ subresources);
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
- subresources);
- buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
+ copy->destination, copy->source, copy->copySize);
- RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
- copy->destination, copy->source, copy->copySize);
+ break;
+ }
- break;
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ Texture* source = ToBackend(copy->source.texture.Get());
+ Texture* destination = ToBackend(copy->destination.texture.Get());
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* source = ToBackend(copy->source.texture.Get());
- Texture* destination = ToBackend(copy->destination.texture.Get());
-
- SubresourceRange srcRange =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
- SubresourceRange dstRange =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
- source->EnsureSubresourceContentInitialized(commandContext, srcRange);
- if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
- copy->destination.mipLevel)) {
- destination->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
- }
+ SubresourceRange srcRange =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+ SubresourceRange dstRange =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
- if (copy->source.texture.Get() == copy->destination.texture.Get() &&
- copy->source.mipLevel == copy->destination.mipLevel) {
- // When there are overlapped subresources, the layout of the overlapped
- // subresources should all be COMMON instead of what we set now. Currently
- // it is not allowed to copy with overlapped subresources, but we still
- // add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
- copy->copySize.depthOrArrayLayers));
- }
- source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
- srcRange);
- destination->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::CopyDst, dstRange);
-
- ASSERT(srcRange.aspects == dstRange.aspects);
- if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
- copy->destination)) {
- DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
- commandContext, copy->source, copy->destination, copy->copySize));
- break;
+ source->EnsureSubresourceContentInitialized(commandContext, srcRange);
+ if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
+ copy->destination.mipLevel)) {
+ destination->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
+ }
+
+ if (copy->source.texture.Get() == copy->destination.texture.Get() &&
+ copy->source.mipLevel == copy->destination.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be COMMON instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
+ copy->copySize.depthOrArrayLayers));
+ }
+ source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ srcRange);
+ destination->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+ dstRange);
+
+ ASSERT(srcRange.aspects == dstRange.aspects);
+ if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source, copy->destination)) {
+ DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
+ commandContext, copy->source, copy->destination, copy->copySize));
+ break;
+ }
+
+ if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
+ commandList->CopyResource(destination->GetD3D12Resource(),
+ source->GetD3D12Resource());
+ } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
+ destination->GetDimension() == wgpu::TextureDimension::e3D) {
+ for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ D3D12_TEXTURE_COPY_LOCATION srcLocation =
+ ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel, 0,
+ aspect);
+ D3D12_TEXTURE_COPY_LOCATION dstLocation =
+ ComputeTextureCopyLocationForTexture(
+ destination, copy->destination.mipLevel, 0, aspect);
+
+ D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(copy->source.origin, copy->copySize);
+
+ commandList->CopyTextureRegion(
+ &dstLocation, copy->destination.origin.x, copy->destination.origin.y,
+ copy->destination.origin.z, &srcLocation, &sourceRegion);
}
+ } else {
+ const dawn::native::Extent3D copyExtentOneSlice = {copy->copySize.width,
+ copy->copySize.height, 1u};
+
+ for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+ uint32_t sourceLayer = 0;
+ uint32_t sourceZ = 0;
+ switch (source->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy->source.origin.z == 0);
+ break;
+ case wgpu::TextureDimension::e2D:
+ sourceLayer = copy->source.origin.z + z;
+ break;
+ case wgpu::TextureDimension::e3D:
+ sourceZ = copy->source.origin.z + z;
+ break;
+ }
- if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
- commandList->CopyResource(destination->GetD3D12Resource(),
- source->GetD3D12Resource());
- } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
- destination->GetDimension() == wgpu::TextureDimension::e3D) {
- for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ uint32_t destinationLayer = 0;
+ uint32_t destinationZ = 0;
+ switch (destination->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy->destination.origin.z == 0);
+ break;
+ case wgpu::TextureDimension::e2D:
+ destinationLayer = copy->destination.origin.z + z;
+ break;
+ case wgpu::TextureDimension::e3D:
+ destinationZ = copy->destination.origin.z + z;
+ break;
+ }
D3D12_TEXTURE_COPY_LOCATION srcLocation =
ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
- 0, aspect);
+ sourceLayer, aspect);
+
D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(
- destination, copy->destination.mipLevel, 0, aspect);
+ ComputeTextureCopyLocationForTexture(destination,
+ copy->destination.mipLevel,
+ destinationLayer, aspect);
+ Origin3D sourceOriginInSubresource = copy->source.origin;
+ sourceOriginInSubresource.z = sourceZ;
D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
- copy->source.origin, copy->copySize);
+ sourceOriginInSubresource, copyExtentOneSlice);
commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
- copy->destination.origin.y,
- copy->destination.origin.z, &srcLocation,
- &sourceRegion);
- }
- } else {
- const dawn::native::Extent3D copyExtentOneSlice = {
- copy->copySize.width, copy->copySize.height, 1u};
-
- for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
- for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
- uint32_t sourceLayer = 0;
- uint32_t sourceZ = 0;
- switch (source->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- ASSERT(copy->source.origin.z == 0);
- break;
- case wgpu::TextureDimension::e2D:
- sourceLayer = copy->source.origin.z + z;
- break;
- case wgpu::TextureDimension::e3D:
- sourceZ = copy->source.origin.z + z;
- break;
- }
-
- uint32_t destinationLayer = 0;
- uint32_t destinationZ = 0;
- switch (destination->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- ASSERT(copy->destination.origin.z == 0);
- break;
- case wgpu::TextureDimension::e2D:
- destinationLayer = copy->destination.origin.z + z;
- break;
- case wgpu::TextureDimension::e3D:
- destinationZ = copy->destination.origin.z + z;
- break;
- }
- D3D12_TEXTURE_COPY_LOCATION srcLocation =
- ComputeTextureCopyLocationForTexture(
- source, copy->source.mipLevel, sourceLayer, aspect);
-
- D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(destination,
- copy->destination.mipLevel,
- destinationLayer, aspect);
-
- Origin3D sourceOriginInSubresource = copy->source.origin;
- sourceOriginInSubresource.z = sourceZ;
- D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
- sourceOriginInSubresource, copyExtentOneSlice);
-
- commandList->CopyTextureRegion(
- &dstLocation, copy->destination.origin.x,
- copy->destination.origin.y, destinationZ, &srcLocation,
- &sourceRegion);
- }
+ copy->destination.origin.y, destinationZ,
+ &srcLocation, &sourceRegion);
}
}
- break;
}
+ break;
+ }
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
- }
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
- bool clearedToZero;
- DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, cmd->offset, cmd->size));
-
- if (!clearedToZero) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
- cmd->offset, cmd->size));
- }
-
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
break;
}
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- uint32_t firstQuery = cmd->firstQuery;
- uint32_t queryCount = cmd->queryCount;
- Buffer* destination = ToBackend(cmd->destination.Get());
- uint64_t destinationOffset = cmd->destinationOffset;
-
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, destination->EnsureDataInitializedAsDestination(
- commandContext, destinationOffset,
- queryCount * sizeof(uint64_t)));
- DAWN_UNUSED(cleared);
-
- // Resolving unavailable queries is undefined behaviour on D3D12, we only can
- // resolve the available part of sparse queries. In order to resolve the
- // unavailables as 0s, we need to clear the resolving region of the destination
- // buffer to 0s.
- auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
- auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
- bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
- if (hasUnavailableQueries) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
- destinationOffset,
- queryCount * sizeof(uint64_t)));
- }
+ bool clearedToZero;
+ DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, cmd->offset, cmd->size));
- destination->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::QueryResolve);
+ if (!clearedToZero) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
+ cmd->offset, cmd->size));
+ }
- RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
- destination, destinationOffset);
+ break;
+ }
- break;
- }
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ uint32_t firstQuery = cmd->firstQuery;
+ uint32_t queryCount = cmd->queryCount;
+ Buffer* destination = ToBackend(cmd->destination.Get());
+ uint64_t destinationOffset = cmd->destinationOffset;
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ bool cleared;
+ DAWN_TRY_ASSIGN(
+ cleared, destination->EnsureDataInitializedAsDestination(
+ commandContext, destinationOffset, queryCount * sizeof(uint64_t)));
+ DAWN_UNUSED(cleared);
+
+ // Resolving unavailable queries is undefined behaviour on D3D12, we only can
+ // resolve the available part of sparse queries. In order to resolve the
+ // unavailables as 0s, we need to clear the resolving region of the destination
+ // buffer to 0s.
+ auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
+ auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
+ bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+ if (hasUnavailableQueries) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
+ destinationOffset,
+ queryCount * sizeof(uint64_t)));
+ }
+
+ destination->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::QueryResolve);
+
+ RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
+ destination, destinationOffset);
+
+ break;
+ }
- RecordWriteTimestampCmd(commandList, cmd);
- break;
- }
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, offset, size));
- DAWN_UNUSED(cleared);
- dstBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopyDst);
- commandList->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), offset,
- ToBackend(uploadHandle.stagingBuffer)->GetResource(),
- uploadHandle.startOffset, size);
- break;
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
}
- default:
- UNREACHABLE();
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, offset, size));
+ DAWN_UNUSED(cleared);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+ commandList->CopyBufferRegion(dstBuffer->GetD3D12Resource(), offset,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ uploadHandle.startOffset, size);
+ break;
}
- }
- return {};
+ default:
+ UNREACHABLE();
+ }
}
- MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- const ComputePassResourceUsage& resourceUsages) {
- uint64_t currentDispatch = 0;
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- Command type;
- ComputePipeline* lastPipeline = nullptr;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
- // Skip noop dispatches, it can cause D3D12 warning from validation layers and
- // leads to device lost.
- if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
- break;
- }
-
- TransitionAndClearForSyncScope(commandContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- DAWN_TRY(bindingTracker->Apply(commandContext));
-
- RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
- commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
- currentDispatch++;
+ return {};
+}
+
+MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages) {
+ uint64_t currentDispatch = 0;
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ Command type;
+ ComputePipeline* lastPipeline = nullptr;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+ // Skip noop dispatches, it can cause D3D12 warning from validation layers and
+ // leads to device lost.
+ if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
break;
}
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
- TransitionAndClearForSyncScope(commandContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- DAWN_TRY(bindingTracker->Apply(commandContext));
-
- ComPtr<ID3D12CommandSignature> signature =
- lastPipeline->GetDispatchIndirectCommandSignature();
- commandList->ExecuteIndirect(
- signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
- dispatch->indirectOffset, nullptr, 0);
- currentDispatch++;
- break;
- }
+ RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
+ commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
+ break;
+ }
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
- commandList->SetPipelineState(pipeline->GetPipelineState());
+ ComPtr<ID3D12CommandSignature> signature =
+ lastPipeline->GetDispatchIndirectCommandSignature();
+ commandList->ExecuteIndirect(
+ signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
+ dispatch->indirectOffset, nullptr, 0);
+ currentDispatch++;
+ break;
+ }
- bindingTracker->OnSetPipeline(pipeline);
- lastPipeline = pipeline;
- break;
- }
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- BindGroup* group = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
+ commandList->SetPipelineState(pipeline->GetPipelineState());
- bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
+ bindingTracker->OnSetPipeline(pipeline);
+ lastPipeline = pipeline;
+ break;
+ }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ BindGroup* group = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
}
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
- RecordWriteTimestampCmd(commandList, cmd);
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- default:
- UNREACHABLE();
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
}
- }
- return {};
+ default:
+ UNREACHABLE();
+ }
}
- MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass,
- RenderPassBuilder* renderPassBuilder) {
- Device* device = ToBackend(GetDevice());
+ return {};
+}
- CPUDescriptorHeapAllocation nullRTVAllocation;
- D3D12_CPU_DESCRIPTOR_HANDLE nullRTV;
+MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder) {
+ Device* device = ToBackend(GetDevice());
- const auto& colorAttachmentsMaskBitSet =
- renderPass->attachmentState->GetColorAttachmentsMask();
- for (ColorAttachmentIndex i(uint8_t(0)); i < ColorAttachmentIndex(kMaxColorAttachments);
- i++) {
- if (colorAttachmentsMaskBitSet.test(i)) {
- RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
+ CPUDescriptorHeapAllocation nullRTVAllocation;
+ D3D12_CPU_DESCRIPTOR_HANDLE nullRTV;
- // Set view attachment.
- CPUDescriptorHeapAllocation rtvAllocation;
- DAWN_TRY_ASSIGN(
- rtvAllocation,
- device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+ const auto& colorAttachmentsMaskBitSet = renderPass->attachmentState->GetColorAttachmentsMask();
+ for (ColorAttachmentIndex i(uint8_t(0)); i < ColorAttachmentIndex(kMaxColorAttachments); i++) {
+ if (colorAttachmentsMaskBitSet.test(i)) {
+ RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
- const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
- rtvAllocation.GetBaseDescriptor();
+ // Set view attachment.
+ CPUDescriptorHeapAllocation rtvAllocation;
+ DAWN_TRY_ASSIGN(
+ rtvAllocation,
+ device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
- device->GetD3D12Device()->CreateRenderTargetView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+ const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = rtvAllocation.GetBaseDescriptor();
- renderPassBuilder->SetRenderTargetView(i, baseDescriptor, false);
+ device->GetD3D12Device()->CreateRenderTargetView(
+ ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
- // Set color load operation.
- renderPassBuilder->SetRenderTargetBeginningAccess(
- i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
+ renderPassBuilder->SetRenderTargetView(i, baseDescriptor, false);
- // Set color store operation.
- if (attachmentInfo.resolveTarget != nullptr) {
- TextureView* resolveDestinationView =
- ToBackend(attachmentInfo.resolveTarget.Get());
- Texture* resolveDestinationTexture =
- ToBackend(resolveDestinationView->GetTexture());
+ // Set color load operation.
+ renderPassBuilder->SetRenderTargetBeginningAccess(
+ i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
- resolveDestinationTexture->TrackUsageAndTransitionNow(
- commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
- resolveDestinationView->GetSubresourceRange());
+ // Set color store operation.
+ if (attachmentInfo.resolveTarget != nullptr) {
+ TextureView* resolveDestinationView = ToBackend(attachmentInfo.resolveTarget.Get());
+ Texture* resolveDestinationTexture =
+ ToBackend(resolveDestinationView->GetTexture());
- renderPassBuilder->SetRenderTargetEndingAccessResolve(
- i, attachmentInfo.storeOp, view, resolveDestinationView);
- } else {
- renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
- }
- } else {
- if (!nullRTVAllocation.IsValid()) {
- DAWN_TRY_ASSIGN(
- nullRTVAllocation,
- device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
- nullRTV = nullRTVAllocation.GetBaseDescriptor();
- D3D12_RENDER_TARGET_VIEW_DESC nullRTVDesc;
- nullRTVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
- nullRTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
- nullRTVDesc.Texture2D.MipSlice = 0;
- nullRTVDesc.Texture2D.PlaneSlice = 0;
- device->GetD3D12Device()->CreateRenderTargetView(nullptr, &nullRTVDesc,
- nullRTV);
- }
+ resolveDestinationTexture->TrackUsageAndTransitionNow(
+ commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveDestinationView->GetSubresourceRange());
- renderPassBuilder->SetRenderTargetView(i, nullRTV, true);
+ renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
+ view, resolveDestinationView);
+ } else {
+ renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
+ }
+ } else {
+ if (!nullRTVAllocation.IsValid()) {
+ DAWN_TRY_ASSIGN(
+ nullRTVAllocation,
+ device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+ nullRTV = nullRTVAllocation.GetBaseDescriptor();
+ D3D12_RENDER_TARGET_VIEW_DESC nullRTVDesc;
+ nullRTVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+ nullRTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
+ nullRTVDesc.Texture2D.MipSlice = 0;
+ nullRTVDesc.Texture2D.PlaneSlice = 0;
+ device->GetD3D12Device()->CreateRenderTargetView(nullptr, &nullRTVDesc, nullRTV);
}
+
+ renderPassBuilder->SetRenderTargetView(i, nullRTV, true);
}
+ }
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- RenderPassDepthStencilAttachmentInfo& attachmentInfo =
- renderPass->depthStencilAttachment;
- TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ RenderPassDepthStencilAttachmentInfo& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
- // Set depth attachment.
- CPUDescriptorHeapAllocation dsvAllocation;
- DAWN_TRY_ASSIGN(
- dsvAllocation,
- device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+ // Set depth attachment.
+ CPUDescriptorHeapAllocation dsvAllocation;
+ DAWN_TRY_ASSIGN(dsvAllocation,
+ device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
- const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc = view->GetDSVDescriptor(
- attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
+ const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc =
+ view->GetDSVDescriptor(attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
- device->GetD3D12Device()->CreateDepthStencilView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+ device->GetD3D12Device()->CreateDepthStencilView(
+ ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
- renderPassBuilder->SetDepthStencilView(baseDescriptor);
+ renderPassBuilder->SetDepthStencilView(baseDescriptor);
- const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
- const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+ const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
+ const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
- // Set depth/stencil load operations.
- if (hasDepth) {
- renderPassBuilder->SetDepthAccess(
- attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
- attachmentInfo.clearDepth, view->GetD3D12Format());
- } else {
- renderPassBuilder->SetDepthNoAccess();
- }
-
- if (hasStencil) {
- renderPassBuilder->SetStencilAccess(
- attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
- attachmentInfo.clearStencil, view->GetD3D12Format());
- } else {
- renderPassBuilder->SetStencilNoAccess();
- }
+ // Set depth/stencil load operations.
+ if (hasDepth) {
+ renderPassBuilder->SetDepthAccess(attachmentInfo.depthLoadOp,
+ attachmentInfo.depthStoreOp,
+ attachmentInfo.clearDepth, view->GetD3D12Format());
+ } else {
+ renderPassBuilder->SetDepthNoAccess();
+ }
+ if (hasStencil) {
+ renderPassBuilder->SetStencilAccess(
+ attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+ attachmentInfo.clearStencil, view->GetD3D12Format());
} else {
- renderPassBuilder->SetDepthStencilNoAccess();
+ renderPassBuilder->SetStencilNoAccess();
}
- return {};
+ } else {
+ renderPassBuilder->SetDepthStencilNoAccess();
}
- void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
- const RenderPassBuilder* renderPassBuilder) const {
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- // Clear framebuffer attachments as needed.
- {
- for (const auto& attachment :
- renderPassBuilder->GetRenderPassRenderTargetDescriptors()) {
- // Load op - color
- if (attachment.cpuDescriptor.ptr != 0 &&
- attachment.BeginningAccess.Type ==
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- commandList->ClearRenderTargetView(
- attachment.cpuDescriptor, attachment.BeginningAccess.Clear.ClearValue.Color,
- 0, nullptr);
- }
+ return {};
+}
+
+void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ // Clear framebuffer attachments as needed.
+ {
+ for (const auto& attachment : renderPassBuilder->GetRenderPassRenderTargetDescriptors()) {
+ // Load op - color
+ if (attachment.cpuDescriptor.ptr != 0 &&
+ attachment.BeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ commandList->ClearRenderTargetView(
+ attachment.cpuDescriptor, attachment.BeginningAccess.Clear.ClearValue.Color, 0,
+ nullptr);
}
+ }
- if (renderPassBuilder->HasDepthOrStencil()) {
- D3D12_CLEAR_FLAGS clearFlags = {};
- float depthClear = 0.0f;
- uint8_t stencilClear = 0u;
-
- if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->DepthBeginningAccess.Type ==
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
- }
- if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->StencilBeginningAccess.Type ==
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- stencilClear =
- renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
- }
+ if (renderPassBuilder->HasDepthOrStencil()) {
+ D3D12_CLEAR_FLAGS clearFlags = {};
+ float depthClear = 0.0f;
+ uint8_t stencilClear = 0u;
- if (clearFlags) {
- commandList->ClearDepthStencilView(
- renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
- clearFlags, depthClear, stencilClear, 0, nullptr);
- }
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
+ }
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Type ==
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ stencilClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
}
- }
-
- commandList->OMSetRenderTargets(
- static_cast<uint8_t>(renderPassBuilder->GetHighestColorAttachmentIndexPlusOne()),
- renderPassBuilder->GetRenderTargetViews(), FALSE,
- renderPassBuilder->HasDepthOrStencil()
- ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
- : nullptr);
- }
- MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- BeginRenderPassCmd* renderPass,
- const bool passHasUAV) {
- Device* device = ToBackend(GetDevice());
- const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
-
- // renderPassBuilder must be scoped to RecordRenderPass because any underlying
- // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
- // valid until after EndRenderPass() has been called.
- RenderPassBuilder renderPassBuilder(passHasUAV);
-
- DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
-
- // Use D3D12's native render pass API if it's available, otherwise emulate the
- // beginning and ending access operations.
- if (useRenderPass) {
- commandContext->GetCommandList4()->BeginRenderPass(
- static_cast<uint8_t>(renderPassBuilder.GetHighestColorAttachmentIndexPlusOne()),
- renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
- renderPassBuilder.HasDepthOrStencil()
- ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
- : nullptr,
- renderPassBuilder.GetRenderPassFlags());
- } else {
- EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+ if (clearFlags) {
+ commandList->ClearDepthStencilView(
+ renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
+ clearFlags, depthClear, stencilClear, 0, nullptr);
+ }
}
+ }
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- // Set up default dynamic state
- {
- uint32_t width = renderPass->width;
- uint32_t height = renderPass->height;
- D3D12_VIEWPORT viewport = {
- 0.f, 0.f, static_cast<float>(width), static_cast<float>(height), 0.f, 1.f};
- D3D12_RECT scissorRect = {0, 0, static_cast<long>(width), static_cast<long>(height)};
- commandList->RSSetViewports(1, &viewport);
- commandList->RSSetScissorRects(1, &scissorRect);
-
- static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
- commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
+ commandList->OMSetRenderTargets(
+ static_cast<uint8_t>(renderPassBuilder->GetHighestColorAttachmentIndexPlusOne()),
+ renderPassBuilder->GetRenderTargetViews(), FALSE,
+ renderPassBuilder->HasDepthOrStencil()
+ ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
+ : nullptr);
+}
+
+MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ const bool passHasUAV) {
+ Device* device = ToBackend(GetDevice());
+ const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+
+ // renderPassBuilder must be scoped to RecordRenderPass because any underlying
+ // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
+ // valid until after EndRenderPass() has been called.
+ RenderPassBuilder renderPassBuilder(passHasUAV);
+
+ DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
+
+ // Use D3D12's native render pass API if it's available, otherwise emulate the
+ // beginning and ending access operations.
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->BeginRenderPass(
+ static_cast<uint8_t>(renderPassBuilder.GetHighestColorAttachmentIndexPlusOne()),
+ renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
+ renderPassBuilder.HasDepthOrStencil()
+ ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
+ : nullptr,
+ renderPassBuilder.GetRenderPassFlags());
+ } else {
+ EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+ }
- commandList->OMSetStencilRef(0);
- }
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- RenderPipeline* lastPipeline = nullptr;
- VertexBufferTracker vertexBufferTracker = {};
+ // Set up default dynamic state
+ {
+ uint32_t width = renderPass->width;
+ uint32_t height = renderPass->height;
+ D3D12_VIEWPORT viewport = {0.f, 0.f, static_cast<float>(width), static_cast<float>(height),
+ 0.f, 1.f};
+ D3D12_RECT scissorRect = {0, 0, static_cast<int32_t>(width), static_cast<int32_t>(height)};
+ commandList->RSSetViewports(1, &viewport);
+ commandList->RSSetScissorRects(1, &scissorRect);
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
+ static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
+ commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
- RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
- draw->firstInstance);
- commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
- draw->firstVertex, draw->firstInstance);
- break;
- }
+ commandList->OMSetStencilRef(0);
+ }
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+ RenderPipeline* lastPipeline = nullptr;
+ VertexBufferTracker vertexBufferTracker = {};
+
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+ RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
+ draw->firstInstance);
+ commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
+ draw->firstVertex, draw->firstInstance);
+ break;
+ }
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
- RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
- draw->firstInstance);
- commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
- draw->firstIndex, draw->baseVertex,
- draw->firstInstance);
- break;
- }
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+ RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
+ draw->firstInstance);
+ commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
+ draw->firstIndex, draw->baseVertex,
+ draw->firstInstance);
+ break;
+ }
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
- // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
- // Zero the index offset values to avoid reusing values from the previous draw
- RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ComPtr<ID3D12CommandSignature> signature =
+ lastPipeline->GetDrawIndirectCommandSignature();
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+ draw->indirectOffset, nullptr, 0);
+ break;
+ }
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ComPtr<ID3D12CommandSignature> signature =
- ToBackend(GetDevice())->GetDrawIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
- draw->indirectOffset, nullptr, 0);
- break;
- }
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
- // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
- // Zero the index offset values to avoid reusing values from the previous draw
- RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+ ComPtr<ID3D12CommandSignature> signature =
+ lastPipeline->GetDrawIndexedIndirectCommandSignature();
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+ draw->indirectOffset, nullptr, 0);
+ break;
+ }
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
- ComPtr<ID3D12CommandSignature> signature =
- ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
- draw->indirectOffset, nullptr, 0);
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
+ case Command::PopDebugGroup: {
+ iter->NextCommand<PopDebugGroupCmd>();
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
}
+ break;
+ }
- case Command::PopDebugGroup: {
- iter->NextCommand<PopDebugGroupCmd>();
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
+ commandList->SetPipelineState(pipeline->GetPipelineState());
+ commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+ bindingTracker->OnSetPipeline(pipeline);
- commandList->SetPipelineState(pipeline->GetPipelineState());
- commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
+ lastPipeline = pipeline;
+ break;
+ }
- bindingTracker->OnSetPipeline(pipeline);
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ BindGroup* group = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
- lastPipeline = pipeline;
- break;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- BindGroup* group = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
-
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
- bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ D3D12_INDEX_BUFFER_VIEW bufferView;
+ bufferView.Format = DXGIIndexFormat(cmd->format);
+ bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
+ bufferView.SizeInBytes = cmd->size;
- D3D12_INDEX_BUFFER_VIEW bufferView;
- bufferView.Format = DXGIIndexFormat(cmd->format);
- bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
- bufferView.SizeInBytes = cmd->size;
+ commandList->IASetIndexBuffer(&bufferView);
+ break;
+ }
- commandList->IASetIndexBuffer(&bufferView);
- break;
- }
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset, cmd->size);
+ break;
+ }
- vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
- cmd->offset, cmd->size);
- break;
- }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return {};
+ };
- default:
- UNREACHABLE();
- break;
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->EndRenderPass();
+ } else if (renderPass->attachmentState->GetSampleCount() > 1) {
+ ResolveMultisampledRenderPass(commandContext, renderPass);
+ }
+ return {};
}
- return {};
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- if (useRenderPass) {
- commandContext->GetCommandList4()->EndRenderPass();
- } else if (renderPass->attachmentState->GetSampleCount() > 1) {
- ResolveMultisampledRenderPass(commandContext, renderPass);
- }
- return {};
- }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- commandList->OMSetStencilRef(cmd->reference);
- break;
- }
+ commandList->OMSetStencilRef(cmd->reference);
+ break;
+ }
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- D3D12_VIEWPORT viewport;
- viewport.TopLeftX = cmd->x;
- viewport.TopLeftY = cmd->y;
- viewport.Width = cmd->width;
- viewport.Height = cmd->height;
- viewport.MinDepth = cmd->minDepth;
- viewport.MaxDepth = cmd->maxDepth;
-
- commandList->RSSetViewports(1, &viewport);
- break;
- }
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ D3D12_VIEWPORT viewport;
+ viewport.TopLeftX = cmd->x;
+ viewport.TopLeftY = cmd->y;
+ viewport.Width = cmd->width;
+ viewport.Height = cmd->height;
+ viewport.MinDepth = cmd->minDepth;
+ viewport.MaxDepth = cmd->maxDepth;
+
+ commandList->RSSetViewports(1, &viewport);
+ break;
+ }
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- D3D12_RECT rect;
- rect.left = cmd->x;
- rect.top = cmd->y;
- rect.right = cmd->x + cmd->width;
- rect.bottom = cmd->y + cmd->height;
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ D3D12_RECT rect;
+ rect.left = cmd->x;
+ rect.top = cmd->y;
+ rect.right = cmd->x + cmd->width;
+ rect.bottom = cmd->y + cmd->height;
- commandList->RSSetScissorRects(1, &rect);
- break;
- }
+ commandList->RSSetScissorRects(1, &rect);
+ break;
+ }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
- commandList->OMSetBlendFactor(color.data());
- break;
- }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
+ commandList->OMSetBlendFactor(color.data());
+ break;
+ }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- DAWN_TRY(EncodeRenderBundleCommand(iter, type));
- }
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ DAWN_TRY(EncodeRenderBundleCommand(iter, type));
}
- break;
}
+ break;
+ }
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
- D3D12_QUERY_TYPE_BINARY_OCCLUSION);
- commandList->BeginQuery(querySet->GetQueryHeap(),
- D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
- break;
- }
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+ commandList->BeginQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_BINARY_OCCLUSION,
+ cmd->queryIndex);
+ break;
+ }
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
- D3D12_QUERY_TYPE_BINARY_OCCLUSION);
- commandList->EndQuery(querySet->GetQueryHeap(),
- D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
- break;
- }
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+ commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_BINARY_OCCLUSION,
+ cmd->queryIndex);
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- RecordWriteTimestampCmd(commandList, cmd);
- break;
- }
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
- default: {
- DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
- break;
- }
+ default: {
+ DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
+ break;
}
}
- return {};
}
+ return {};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h
index 952d4503b83..ea0bb704ac5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h
@@ -19,38 +19,38 @@
#include "dawn/native/Error.h"
namespace dawn::native {
- struct BeginRenderPassCmd;
+struct BeginRenderPassCmd;
} // namespace dawn::native
namespace dawn::native::d3d12 {
- class BindGroupStateTracker;
- class CommandRecordingContext;
- class RenderPassBuilder;
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordCommands(CommandRecordingContext* commandContext);
-
- private:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- const ComputePassResourceUsage& resourceUsages);
- MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- BeginRenderPassCmd* renderPass,
- bool passHasUAV);
- MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass,
- RenderPassBuilder* renderPassBuilder);
- void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
- const RenderPassBuilder* renderPassBuilder) const;
- };
+class BindGroupStateTracker;
+class CommandRecordingContext;
+class RenderPassBuilder;
+
+class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordCommands(CommandRecordingContext* commandContext);
+
+ private:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages);
+ MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ bool passHasUAV);
+ MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder);
+ void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp
index bb8ef813005..d4fa04d525f 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp
@@ -11,8 +11,15 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include <profileapi.h>
+#include <sysinfoapi.h>
+
+#include <string>
+#include <utility>
+
#include "dawn/native/d3d12/CommandAllocatorManager.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
@@ -21,155 +28,150 @@
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-#include <profileapi.h>
-#include <sysinfoapi.h>
-
namespace dawn::native::d3d12 {
- void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
- ASSERT(IsOpen());
- mSharedTextures.insert(texture);
+void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
+ ASSERT(IsOpen());
+ mSharedTextures.insert(texture);
+}
+
+MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
+ CommandAllocatorManager* commandAllocationManager) {
+ ASSERT(!IsOpen());
+ ID3D12CommandAllocator* commandAllocator;
+ DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
+ if (mD3d12CommandList != nullptr) {
+ MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
+ "D3D12 resetting command list");
+ if (error.IsError()) {
+ mD3d12CommandList.Reset();
+ DAWN_TRY(std::move(error));
+ }
+ } else {
+ ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
+ nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
+ "D3D12 creating direct command list"));
+ mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+ // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
+ // pass APIs introduced in Windows build 1809.
+ mD3d12CommandList.As(&mD3d12CommandList4);
}
- MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
- CommandAllocatorManager* commandAllocationManager) {
- ASSERT(!IsOpen());
- ID3D12CommandAllocator* commandAllocator;
- DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
- if (mD3d12CommandList != nullptr) {
- MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
- "D3D12 resetting command list");
- if (error.IsError()) {
- mD3d12CommandList.Reset();
- DAWN_TRY(std::move(error));
- }
- } else {
- ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
- DAWN_TRY(CheckHRESULT(
- d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
- nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
- "D3D12 creating direct command list"));
- mD3d12CommandList = std::move(d3d12GraphicsCommandList);
- // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
- // pass APIs introduced in Windows build 1809.
- mD3d12CommandList.As(&mD3d12CommandList4);
- }
+ mIsOpen = true;
- mIsOpen = true;
+ return {};
+}
- return {};
- }
-
- MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
- if (IsOpen()) {
- // Shared textures must be transitioned to common state after the last usage in order
- // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
- // common state right before command list submission. TransitionUsageNow itself ensures
- // no unnecessary transitions happen if the resources is already in the common state.
- for (Texture* texture : mSharedTextures) {
- DAWN_TRY(texture->AcquireKeyedMutex());
- texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
- }
-
- MaybeError error =
- CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
- if (error.IsError()) {
- Release();
- DAWN_TRY(std::move(error));
- }
- DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
- mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
-
- if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
- uint64_t gpuTimestamp;
- uint64_t cpuTimestamp;
- FILETIME fileTimeNonPrecise;
- SYSTEMTIME systemTimeNonPrecise;
-
- // Both supported since Windows 2000, have a accuracy of 1ms
- GetSystemTimeAsFileTime(&fileTimeNonPrecise);
- GetSystemTime(&systemTimeNonPrecise);
- // Query CPU and GPU timestamps at almost the same time
- device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
-
- uint64_t gpuFrequency;
- uint64_t cpuFrequency;
- LARGE_INTEGER cpuFrequencyLargeInteger;
- device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
- QueryPerformanceFrequency(
- &cpuFrequencyLargeInteger); // Supported since Windows 2000
- cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
-
- std::string timingInfo = absl::StrFormat(
- "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
- "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
- "%u",
- systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth,
- systemTimeNonPrecise.wDay, systemTimeNonPrecise.wHour,
- systemTimeNonPrecise.wMinute, systemTimeNonPrecise.wSecond,
- systemTimeNonPrecise.wMilliseconds,
- (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
- fileTimeNonPrecise.dwLowDateTime,
- cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
-
- TRACE_EVENT_INSTANT1(
- device->GetPlatform(), General,
- "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
- timingInfo.c_str());
- }
-
- ID3D12CommandList* d3d12CommandList = GetCommandList();
- device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
-
- for (Texture* texture : mSharedTextures) {
- texture->ReleaseKeyedMutex();
- }
-
- mIsOpen = false;
- mSharedTextures.clear();
- mHeapsPendingUsage.clear();
+MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
+ if (IsOpen()) {
+ // Shared textures must be transitioned to common state after the last usage in order
+ // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
+ // common state right before command list submission. TransitionUsageNow itself ensures
+ // no unnecessary transitions happen if the resources is already in the common state.
+ for (Texture* texture : mSharedTextures) {
+ DAWN_TRY(texture->AcquireKeyedMutex());
+ texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
}
- return {};
- }
- void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
- // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
- // tracking it more than once.
- if (heap->GetLastUsage() < serial) {
- heap->SetLastUsage(serial);
- mHeapsPendingUsage.push_back(heap);
+ MaybeError error =
+ CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+ if (error.IsError()) {
+ Release();
+ DAWN_TRY(std::move(error));
+ }
+ DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(mHeapsPendingUsage.data(),
+ mHeapsPendingUsage.size()));
+
+ if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
+ uint64_t gpuTimestamp;
+ uint64_t cpuTimestamp;
+ FILETIME fileTimeNonPrecise;
+ SYSTEMTIME systemTimeNonPrecise;
+
+ // Both supported since Windows 2000, have a accuracy of 1ms
+ GetSystemTimeAsFileTime(&fileTimeNonPrecise);
+ GetSystemTime(&systemTimeNonPrecise);
+ // Query CPU and GPU timestamps at almost the same time
+ device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
+
+ uint64_t gpuFrequency;
+ uint64_t cpuFrequency;
+ LARGE_INTEGER cpuFrequencyLargeInteger;
+ device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
+ QueryPerformanceFrequency(&cpuFrequencyLargeInteger); // Supported since Windows 2000
+ cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
+
+ std::string timingInfo = absl::StrFormat(
+ "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
+ "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
+ "%u",
+ systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth, systemTimeNonPrecise.wDay,
+ systemTimeNonPrecise.wHour, systemTimeNonPrecise.wMinute,
+ systemTimeNonPrecise.wSecond, systemTimeNonPrecise.wMilliseconds,
+ (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
+ fileTimeNonPrecise.dwLowDateTime,
+ cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
+
+ TRACE_EVENT_INSTANT1(
+ device->GetPlatform(), General,
+ "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
+ timingInfo.c_str());
}
- }
- ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
- ASSERT(mD3d12CommandList != nullptr);
- ASSERT(IsOpen());
- return mD3d12CommandList.Get();
- }
+ ID3D12CommandList* d3d12CommandList = GetCommandList();
+ device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
- // This function will fail on Windows versions prior to 1809. Support must be queried through
- // the device before calling.
- ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
- ASSERT(IsOpen());
- ASSERT(mD3d12CommandList != nullptr);
- return mD3d12CommandList4.Get();
- }
+ for (Texture* texture : mSharedTextures) {
+ texture->ReleaseKeyedMutex();
+ }
- void CommandRecordingContext::Release() {
- mD3d12CommandList.Reset();
- mD3d12CommandList4.Reset();
mIsOpen = false;
mSharedTextures.clear();
mHeapsPendingUsage.clear();
- mTempBuffers.clear();
}
-
- bool CommandRecordingContext::IsOpen() const {
- return mIsOpen;
- }
-
- void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
- mTempBuffers.emplace_back(tempBuffer);
+ return {};
+}
+
+void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
+ // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
+ // tracking it more than once.
+ if (heap->GetLastUsage() < serial) {
+ heap->SetLastUsage(serial);
+ mHeapsPendingUsage.push_back(heap);
}
+}
+
+ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
+ ASSERT(mD3d12CommandList != nullptr);
+ ASSERT(IsOpen());
+ return mD3d12CommandList.Get();
+}
+
+// This function will fail on Windows versions prior to 1809. Support must be queried through
+// the device before calling.
+ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
+ ASSERT(IsOpen());
+ ASSERT(mD3d12CommandList != nullptr);
+ return mD3d12CommandList4.Get();
+}
+
+void CommandRecordingContext::Release() {
+ mD3d12CommandList.Reset();
+ mD3d12CommandList4.Reset();
+ mIsOpen = false;
+ mSharedTextures.clear();
+ mHeapsPendingUsage.clear();
+ mTempBuffers.clear();
+}
+
+bool CommandRecordingContext::IsOpen() const {
+ return mIsOpen;
+}
+
+void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
+ mTempBuffers.emplace_back(tempBuffer);
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h
index 59c2ffaafeb..80b6204e1b8 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h
@@ -14,45 +14,45 @@
#ifndef SRC_DAWN_NATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
#define SRC_DAWN_NATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+#include <set>
+#include <vector>
+
#include "dawn/native/Error.h"
#include "dawn/native/IntegerTypes.h"
#include "dawn/native/d3d12/BufferD3D12.h"
#include "dawn/native/d3d12/d3d12_platform.h"
-#include <set>
-
namespace dawn::native::d3d12 {
- class CommandAllocatorManager;
- class Device;
- class Heap;
- class Texture;
+class CommandAllocatorManager;
+class Device;
+class Heap;
+class Texture;
- class CommandRecordingContext {
- public:
- void AddToSharedTextureList(Texture* texture);
- MaybeError Open(ID3D12Device* d3d12Device,
- CommandAllocatorManager* commandAllocationManager);
+class CommandRecordingContext {
+ public:
+ void AddToSharedTextureList(Texture* texture);
+ MaybeError Open(ID3D12Device* d3d12Device, CommandAllocatorManager* commandAllocationManager);
- ID3D12GraphicsCommandList* GetCommandList() const;
- ID3D12GraphicsCommandList4* GetCommandList4() const;
- void Release();
- bool IsOpen() const;
+ ID3D12GraphicsCommandList* GetCommandList() const;
+ ID3D12GraphicsCommandList4* GetCommandList4() const;
+ void Release();
+ bool IsOpen() const;
- MaybeError ExecuteCommandList(Device* device);
+ MaybeError ExecuteCommandList(Device* device);
- void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
+ void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
- void AddToTempBuffers(Ref<Buffer> tempBuffer);
+ void AddToTempBuffers(Ref<Buffer> tempBuffer);
- private:
- ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
- ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
- bool mIsOpen = false;
- std::set<Texture*> mSharedTextures;
- std::vector<Heap*> mHeapsPendingUsage;
+ private:
+ ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+ ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
+ bool mIsOpen = false;
+ std::set<Texture*> mSharedTextures;
+ std::vector<Heap*> mHeapsPendingUsage;
- std::vector<Ref<Buffer>> mTempBuffers;
- };
+ std::vector<Ref<Buffer>> mTempBuffers;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
index 6df1049ffbf..5ba8b7604dd 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
@@ -14,7 +14,11 @@
#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+#include <memory>
+#include <utility>
+
#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/BlobD3D12.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
@@ -24,82 +28,102 @@
namespace dawn::native::d3d12 {
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+}
- MaybeError ComputePipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
- uint32_t compileFlags = 0;
+MaybeError ComputePipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ uint32_t compileFlags = 0;
- if (!device->IsToggleEnabled(Toggle::UseDXC) &&
- !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
- compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
- }
+ if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+ !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+ compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+ }
- if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
- compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
- }
+ if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+ compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+ }
- // SPRIV-cross does matrix multiplication expecting row major matrices
- compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+ // SPRIV-cross does matrix multiplication expecting row major matrices
+ compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- ShaderModule* module = ToBackend(computeStage.module.Get());
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule* module = ToBackend(computeStage.module.Get());
- D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
- d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
+ D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
+ d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
- CompiledShader compiledShader;
- DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
- ToBackend(GetLayout()), compileFlags));
- d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
- auto* d3d12Device = device->GetD3D12Device();
- DAWN_TRY(CheckHRESULT(
- d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
- "D3D12 creating pipeline state"));
+ // TODO(dawn:549): Compile shader everytime before we implement compiled shader cache
+ CompiledShader compiledShader;
+ DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
+ ToBackend(GetLayout()), compileFlags));
+ d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
- SetLabelImpl();
+ mCacheKey.Record(d3dDesc, ToBackend(GetLayout())->GetRootSignatureBlob());
- return {};
+ // Try to see if we have anything in the blob cache.
+ Blob blob = device->LoadCachedBlob(GetCacheKey());
+ const bool cacheHit = !blob.Empty();
+ if (cacheHit) {
+ // Cache hits, attach cached blob to descriptor.
+ d3dDesc.CachedPSO.pCachedBlob = blob.Data();
+ d3dDesc.CachedPSO.CachedBlobSizeInBytes = blob.Size();
}
- ComputePipeline::~ComputePipeline() = default;
-
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+ auto* d3d12Device = device->GetD3D12Device();
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 creating pipeline state"));
+
+ if (!cacheHit) {
+ // Cache misses, need to get pipeline cached blob and store.
+ ComPtr<ID3DBlob> d3dBlob;
+ DAWN_TRY(CheckHRESULT(GetPipelineState()->GetCachedBlob(&d3dBlob),
+ "D3D12 compute pipeline state get cached blob"));
+ device->StoreCachedBlob(GetCacheKey(), CreateBlob(std::move(d3dBlob)));
}
- ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
- return mPipelineState.Get();
- }
+ SetLabelImpl();
- void ComputePipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
- GetLabel());
- }
+ return {};
+}
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
+ComputePipeline::~ComputePipeline() = default;
- bool ComputePipeline::UsesNumWorkgroups() const {
- return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
- }
+void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+}
+
+ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
+ return mPipelineState.Get();
+}
+
+void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline", GetLabel());
+}
+
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
+
+bool ComputePipeline::UsesNumWorkgroups() const {
+ return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
+}
- ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
- if (UsesNumWorkgroups()) {
- return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
- }
- return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
+ if (UsesNumWorkgroups()) {
+ return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
}
+ return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h
index cf55c13cafe..ef07ced69da 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h
@@ -21,37 +21,36 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- ComputePipeline() = delete;
+class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ComputePipeline() = delete;
- ID3D12PipelineState* GetPipelineState() const;
+ ID3D12PipelineState* GetPipelineState() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- bool UsesNumWorkgroups() const;
+ bool UsesNumWorkgroups() const;
- ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
+ ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
- private:
- ~ComputePipeline() override;
+ private:
+ ~ComputePipeline() override;
- void DestroyImpl() override;
+ void DestroyImpl() override;
- using ComputePipelineBase::ComputePipelineBase;
- ComPtr<ID3D12PipelineState> mPipelineState;
- };
+ using ComputePipelineBase::ComputePipelineBase;
+ ComPtr<ID3D12PipelineState> mPipelineState;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp
index d48d41fe7a5..01a0081bf70 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp
@@ -17,171 +17,167 @@
#include "dawn/native/d3d12/D3D11on12Util.h"
+#include <utility>
+
#include "dawn/common/HashUtils.h"
#include "dawn/common/Log.h"
+#include "dawn/native/D3D12Backend.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
-#include <dawn/native/D3D12Backend.h>
-
namespace dawn::native::d3d12 {
- void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
- if (d3d11on12Device == nullptr) {
- return;
- }
-
- ComPtr<ID3D11Device> d3d11Device;
- if (FAILED(d3d11on12Device.As(&d3d11Device))) {
- return;
- }
+void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
+ if (d3d11on12Device == nullptr) {
+ return;
+ }
- ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
- d3d11Device->GetImmediateContext(&d3d11DeviceContext);
+ ComPtr<ID3D11Device> d3d11Device;
+ if (FAILED(d3d11on12Device.As(&d3d11Device))) {
+ return;
+ }
- ASSERT(d3d11DeviceContext != nullptr);
+ ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+ d3d11Device->GetImmediateContext(&d3d11DeviceContext);
- // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
- // are not released until work is submitted to the device context and flushed.
- // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+ ASSERT(d3d11DeviceContext != nullptr);
- // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
- // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
- ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
- if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
- return;
- }
+ // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+ // are not released until work is submitted to the device context and flushed.
+ // The most minimal work we can get away with is issuing a TiledResourceBarrier.
- d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
- d3d11DeviceContext2->Flush();
+ // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+ // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+ ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+ if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
+ return;
}
- D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
- ComPtr<ID3D11On12Device> d3d11On12Device)
- : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
- }
+ d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
+ d3d11DeviceContext2->Flush();
+}
+
+D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
+ ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {}
- D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
- ComPtr<ID3D11On12Device> d3d11On12Device)
- : mD3D11on12Device(std::move(d3d11On12Device)) {
+D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mD3D11on12Device(std::move(d3d11On12Device)) {}
+
+D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
+ if (mDXGIKeyedMutex == nullptr) {
+ return;
}
- D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
- if (mDXGIKeyedMutex == nullptr) {
- return;
- }
+ if (mAcquireCount > 0) {
+ mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+ }
- if (mAcquireCount > 0) {
- mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
- }
+ ComPtr<ID3D11Resource> d3d11Resource;
+ if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
+ return;
+ }
- ComPtr<ID3D11Resource> d3d11Resource;
- if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
- return;
- }
+ ASSERT(mD3D11on12Device != nullptr);
- ASSERT(mD3D11on12Device != nullptr);
+ ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+ mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
- ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
- mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+ d3d11Resource.Reset();
+ mDXGIKeyedMutex.Reset();
- d3d11Resource.Reset();
- mDXGIKeyedMutex.Reset();
+ Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+}
- Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
+ ASSERT(mDXGIKeyedMutex != nullptr);
+ ASSERT(mAcquireCount >= 0);
+ if (mAcquireCount == 0) {
+ DAWN_TRY(
+ CheckHRESULT(mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
+ "D3D12 acquiring shared mutex"));
}
-
- MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
- ASSERT(mDXGIKeyedMutex != nullptr);
- ASSERT(mAcquireCount >= 0);
- if (mAcquireCount == 0) {
- DAWN_TRY(CheckHRESULT(
- mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
- "D3D12 acquiring shared mutex"));
- }
- mAcquireCount++;
- return {};
+ mAcquireCount++;
+ return {};
+}
+
+void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
+ ASSERT(mDXGIKeyedMutex != nullptr);
+ ASSERT(mAcquireCount > 0);
+ mAcquireCount--;
+ if (mAcquireCount == 0) {
+ mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+ }
+}
+
+size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a) const {
+ size_t hash = 0;
+ HashCombine(&hash, a->mD3D11on12Device.Get());
+ return hash;
+}
+
+bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const {
+ return a->mD3D11on12Device == b->mD3D11on12Device;
+}
+
+D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
+
+D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
+
+Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
+ Device* backendDevice,
+ ID3D12Resource* d3d12Resource) {
+ // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
+ // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
+ // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
+ // using the 11on12 device as the cache key.
+ ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
+ if (d3d11on12Device == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 device for external image";
+ return nullptr;
}
- void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
- ASSERT(mDXGIKeyedMutex != nullptr);
- ASSERT(mAcquireCount > 0);
- mAcquireCount--;
- if (mAcquireCount == 0) {
- mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
- }
+ D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return *iter;
}
- size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
- const Ref<D3D11on12ResourceCacheEntry> a) const {
- size_t hash = 0;
- HashCombine(&hash, a->mD3D11on12Device.Get());
- return hash;
+ // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+ // are a viable alternative but are, unfortunately, not available on all versions of Windows
+ // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+ // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+ ComPtr<ID3D11Texture2D> d3d11Texture;
+ D3D11_RESOURCE_FLAGS resourceFlags;
+ resourceFlags.BindFlags = 0;
+ resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+ resourceFlags.CPUAccessFlags = 0;
+ resourceFlags.StructureByteStride = 0;
+ if (FAILED(d3d11on12Device->CreateWrappedResource(
+ d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON, D3D12_RESOURCE_STATE_COMMON,
+ IID_PPV_ARGS(&d3d11Texture)))) {
+ return nullptr;
}
- bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
- const Ref<D3D11on12ResourceCacheEntry> a,
- const Ref<D3D11on12ResourceCacheEntry> b) const {
- return a->mD3D11on12Device == b->mD3D11on12Device;
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+ if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
+ return nullptr;
}
- D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
-
- D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
-
- Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
- WGPUDevice device,
- ID3D12Resource* d3d12Resource) {
- Device* backendDevice = reinterpret_cast<Device*>(device);
- // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
- // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
- // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
- // using the 11on12 device as the cache key.
- ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
- if (d3d11on12Device == nullptr) {
- dawn::ErrorLog() << "Unable to create 11on12 device for external image";
- return nullptr;
- }
-
- D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
- auto iter = mCache.find(&blueprint);
- if (iter != mCache.end()) {
- return *iter;
- }
-
- // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
- // are a viable alternative but are, unfortunately, not available on all versions of Windows
- // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
- // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
- ComPtr<ID3D11Texture2D> d3d11Texture;
- D3D11_RESOURCE_FLAGS resourceFlags;
- resourceFlags.BindFlags = 0;
- resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
- resourceFlags.CPUAccessFlags = 0;
- resourceFlags.StructureByteStride = 0;
- if (FAILED(d3d11on12Device->CreateWrappedResource(
- d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
- D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
- return nullptr;
- }
-
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
- if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
- return nullptr;
- }
-
- // Keep this cache from growing unbounded.
- // TODO(dawn:625): Consider using a replacement policy based cache.
- if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
- mCache.clear();
- }
-
- Ref<D3D11on12ResourceCacheEntry> entry =
- AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
- mCache.insert(entry);
-
- return entry;
+ // Keep this cache from growing unbounded.
+ // TODO(dawn:625): Consider using a replacement policy based cache.
+ if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
+ mCache.clear();
}
+ Ref<D3D11on12ResourceCacheEntry> entry =
+ AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
+ mCache.insert(entry);
+
+ return entry;
+}
+
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h
index 4d960ff3b1b..92dde83f646 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h
@@ -15,78 +15,79 @@
#ifndef SRC_DAWN_NATIVE_D3D12_D3D11ON12UTIL_H_
#define SRC_DAWN_NATIVE_D3D12_D3D11ON12UTIL_H_
+#include <memory>
+#include <unordered_set>
+
#include "dawn/common/RefCounted.h"
+#include "dawn/native/DawnNative.h"
#include "dawn/native/Error.h"
#include "dawn/native/d3d12/d3d12_platform.h"
-#include <dawn/native/DawnNative.h>
-#include <memory>
-#include <unordered_set>
-
struct ID3D11On12Device;
struct IDXGIKeyedMutex;
namespace dawn::native::d3d12 {
- // Wraps 11 wrapped resources in a cache.
- class D3D11on12ResourceCacheEntry : public RefCounted {
- public:
- D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
- D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
- ComPtr<ID3D11On12Device> d3d11on12Device);
- ~D3D11on12ResourceCacheEntry();
-
- MaybeError AcquireKeyedMutex();
- void ReleaseKeyedMutex();
-
- // Functors necessary for the
- // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
- struct HashFunc {
- size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
- };
-
- struct EqualityFunc {
- bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
- const Ref<D3D11on12ResourceCacheEntry> b) const;
- };
-
- private:
- ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
- ComPtr<ID3D11On12Device> mD3D11on12Device;
- int64_t mAcquireCount = 0;
+class Device;
+
+// Wraps 11 wrapped resources in a cache.
+class D3D11on12ResourceCacheEntry : public RefCounted {
+ public:
+ explicit D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
+ D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
+ ComPtr<ID3D11On12Device> d3d11on12Device);
+ ~D3D11on12ResourceCacheEntry() override;
+
+ MaybeError AcquireKeyedMutex();
+ void ReleaseKeyedMutex();
+
+ // Functors necessary for the
+ // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
+ struct HashFunc {
+ size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
};
- // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
- // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
- // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
- // is used as the key for the cache entry which ensures only the same 11 wrapped
- // resource is re-used and also fully released.
- //
- // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
- // and special release code per ProduceTexture(device).
- class D3D11on12ResourceCache {
- public:
- D3D11on12ResourceCache();
- ~D3D11on12ResourceCache();
-
- Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
- WGPUDevice device,
- ID3D12Resource* d3d12Resource);
-
- private:
- // TODO(dawn:625): Figure out a large enough cache size.
- static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
-
- // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
- // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
- // waiting until Dawn device to shutdown.
- using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
- D3D11on12ResourceCacheEntry::HashFunc,
- D3D11on12ResourceCacheEntry::EqualityFunc>;
-
- Cache mCache;
+ struct EqualityFunc {
+ bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const;
};
+ private:
+ ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
+ ComPtr<ID3D11On12Device> mD3D11on12Device;
+ int64_t mAcquireCount = 0;
+};
+
+// |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
+// Each entry represents a 11 resource that is exclusively accessed by Dawn device.
+// Since each Dawn device creates and stores a 11on12 device, the 11on12 device
+// is used as the key for the cache entry which ensures only the same 11 wrapped
+// resource is re-used and also fully released.
+//
+// The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
+// and special release code per ProduceTexture(device).
+class D3D11on12ResourceCache {
+ public:
+ D3D11on12ResourceCache();
+ ~D3D11on12ResourceCache();
+
+ Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(Device* backendDevice,
+ ID3D12Resource* d3d12Resource);
+
+ private:
+ // TODO(dawn:625): Figure out a large enough cache size.
+ static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
+
+ // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
+ // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
+ // waiting until Dawn device to shutdown.
+ using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
+ D3D11on12ResourceCacheEntry::HashFunc,
+ D3D11on12ResourceCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+};
+
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_D3D11ON12UTIL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp
index 18d7145c83e..0e5a76f3617 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp
@@ -17,163 +17,95 @@
#include "dawn/native/D3D12Backend.h"
+#include <memory>
+#include <utility>
+
#include "dawn/common/Log.h"
#include "dawn/common/Math.h"
#include "dawn/common/SwapChainUtils.h"
#include "dawn/native/d3d12/D3D11on12Util.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/ExternalImageDXGIImpl.h"
#include "dawn/native/d3d12/NativeSwapChainImplD3D12.h"
#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
#include "dawn/native/d3d12/TextureD3D12.h"
namespace dawn::native::d3d12 {
- ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
- return ToBackend(FromAPI(device))->GetD3D12Device();
- }
+ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
+ return ToBackend(FromAPI(device))->GetD3D12Device();
+}
- DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
- Device* backendDevice = ToBackend(FromAPI(device));
+DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
+ Device* backendDevice = ToBackend(FromAPI(device));
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
- impl.textureUsage = WGPUTextureUsage_Present;
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+ impl.textureUsage = WGPUTextureUsage_Present;
- return impl;
- }
+ return impl;
+}
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
- ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
- : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
- }
+ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
+ : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {}
- ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
- const WGPUTextureDescriptor* descriptor)
- : mD3D12Resource(std::move(d3d12Resource)),
- mUsage(descriptor->usage),
- mDimension(descriptor->dimension),
- mSize(descriptor->size),
- mFormat(descriptor->format),
- mMipLevelCount(descriptor->mipLevelCount),
- mSampleCount(descriptor->sampleCount) {
- ASSERT(!descriptor->nextInChain ||
- descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
- if (descriptor->nextInChain) {
- mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
- descriptor->nextInChain)
- ->internalUsage;
- }
- mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
- }
+ExternalImageDXGI::ExternalImageDXGI(std::unique_ptr<ExternalImageDXGIImpl> impl)
+ : mImpl(std::move(impl)) {
+ ASSERT(mImpl != nullptr);
+}
- ExternalImageDXGI::~ExternalImageDXGI() = default;
-
- WGPUTexture ExternalImageDXGI::ProduceTexture(
- WGPUDevice device,
- const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- // Ensure the texture usage is allowed
- if (!IsSubset(descriptor->usage, mUsage)) {
- dawn::ErrorLog() << "Texture usage is not valid for external image";
- return nullptr;
- }
-
- TextureDescriptor textureDescriptor = {};
- textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
- textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
- textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
- textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
- textureDescriptor.mipLevelCount = mMipLevelCount;
- textureDescriptor.sampleCount = mSampleCount;
-
- DawnTextureInternalUsageDescriptor internalDesc = {};
- if (mUsageInternal) {
- textureDescriptor.nextInChain = &internalDesc;
- internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
- internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
- }
-
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
- mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
- if (d3d11on12Resource == nullptr) {
- dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
- return nullptr;
- }
-
- Ref<TextureBase> texture = backendDevice->CreateD3D12ExternalTexture(
- &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
- descriptor->isSwapChainTexture, descriptor->isInitialized);
-
- return ToAPI(texture.Detach());
- }
+ExternalImageDXGI::~ExternalImageDXGI() = default;
- // static
- std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
- WGPUDevice device,
- const ExternalImageDescriptorDXGISharedHandle* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
- if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
- descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
- return nullptr;
- }
-
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- if (backendDevice->ConsumedError(
- ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
- return nullptr;
- }
-
- if (backendDevice->ConsumedError(
- ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
- "validating that a D3D12 external image can be wrapped with %s",
- textureDescriptor)) {
- return nullptr;
- }
-
- if (backendDevice->ConsumedError(
- ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
- return nullptr;
- }
-
- // Shared handle is assumed to support resource sharing capability. The resource
- // shared capability tier must agree to share resources between D3D devices.
- const Format* format =
- backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
- if (format->IsMultiPlanar()) {
- if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
- backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
- return nullptr;
- }
- }
-
- std::unique_ptr<ExternalImageDXGI> result(
- new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
- return result;
- }
+bool ExternalImageDXGI::IsValid() const {
+ return mImpl->IsValid();
+}
- uint64_t SetExternalMemoryReservation(WGPUDevice device,
- uint64_t requestedReservationSize,
- MemorySegment memorySegment) {
- Device* backendDevice = ToBackend(FromAPI(device));
+WGPUTexture ExternalImageDXGI::ProduceTexture(
+ WGPUDevice device,
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+ return ProduceTexture(descriptor);
+}
- return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
- memorySegment, requestedReservationSize);
+WGPUTexture ExternalImageDXGI::ProduceTexture(
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+ if (!IsValid()) {
+ dawn::ErrorLog() << "Cannot produce texture from external image after device destruction";
+ return nullptr;
}
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {
+ return mImpl->ProduceTexture(descriptor);
+}
+
+// static
+std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
+ WGPUDevice device,
+ const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ std::unique_ptr<ExternalImageDXGIImpl> impl =
+ backendDevice->CreateExternalImageDXGIImpl(descriptor);
+ if (!impl) {
+ dawn::ErrorLog() << "Failed to create DXGI external image";
+ return nullptr;
}
+ return std::unique_ptr<ExternalImageDXGI>(new ExternalImageDXGI(std::move(impl)));
+}
- AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
- : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
- }
+uint64_t SetExternalMemoryReservation(WGPUDevice device,
+ uint64_t requestedReservationSize,
+ MemorySegment memorySegment) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
+ memorySegment, requestedReservationSize);
+}
+
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {}
+
+AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp
index 23a95568d46..0fda82b599e 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp
@@ -19,33 +19,68 @@
#include <string>
namespace dawn::native::d3d12 {
- MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
- if (DAWN_LIKELY(SUCCEEDED(result))) {
- return {};
- }
-
- std::ostringstream messageStream;
- messageStream << context << " failed with ";
- if (result == E_FAKE_ERROR_FOR_TESTING) {
- messageStream << "E_FAKE_ERROR_FOR_TESTING";
- } else {
- messageStream << "0x" << std::uppercase << std::setfill('0') << std::setw(8) << std::hex
- << result;
- }
-
- if (result == DXGI_ERROR_DEVICE_REMOVED) {
- return DAWN_DEVICE_LOST_ERROR(messageStream.str());
- } else {
- return DAWN_INTERNAL_ERROR(messageStream.str());
- }
+const char* HRESULTAsString(HRESULT result) {
+ // There's a lot of possible HRESULTS, but these ones are the ones specifically listed as
+ // being returned from D3D12, in addition to fake codes used internally for testing.
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/d3d12-graphics-reference-returnvalues
+ switch (result) {
+ case S_OK:
+ return "S_OK";
+ case S_FALSE:
+ return "S_FALSE";
+
+ case E_FAIL:
+ return "E_FAIL";
+ case E_INVALIDARG:
+ return "E_INVALIDARG";
+ case E_OUTOFMEMORY:
+ return "E_OUTOFMEMORY";
+ case E_NOTIMPL:
+ return "E_NOTIMPL";
+
+ case DXGI_ERROR_INVALID_CALL:
+ return "DXGI_ERROR_INVALID_CALL";
+ case DXGI_ERROR_WAS_STILL_DRAWING:
+ return "DXGI_ERROR_WAS_STILL_DRAWING";
+
+ case D3D12_ERROR_ADAPTER_NOT_FOUND:
+ return "D3D12_ERROR_ADAPTER_NOT_FOUND";
+ case D3D12_ERROR_DRIVER_VERSION_MISMATCH:
+ return "D3D12_ERROR_DRIVER_VERSION_MISMATCH";
+
+ case E_FAKE_ERROR_FOR_TESTING:
+ return "E_FAKE_ERROR_FOR_TESTING";
+ case E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING:
+ return "E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING";
+
+ default:
+ return "<Unknown HRESULT>";
}
+}
- MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
- if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
- return DAWN_OUT_OF_MEMORY_ERROR(context);
- }
+MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
+ if (DAWN_LIKELY(SUCCEEDED(result))) {
+ return {};
+ }
+
+ std::ostringstream messageStream;
+ messageStream << context << " failed with " << HRESULTAsString(result) << " (0x"
+ << std::uppercase << std::setfill('0') << std::setw(8) << std::hex << result
+ << ")";
- return CheckHRESULTImpl(result, context);
+ if (result == DXGI_ERROR_DEVICE_REMOVED) {
+ return DAWN_DEVICE_LOST_ERROR(messageStream.str());
+ } else {
+ return DAWN_INTERNAL_ERROR(messageStream.str());
}
+}
+
+MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
+ if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
+ return DAWN_OUT_OF_MEMORY_ERROR(context);
+ }
+
+ return CheckHRESULTImpl(result, context);
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h
index fda0ebe8863..b058c7c6c04 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h
@@ -21,15 +21,15 @@
namespace dawn::native::d3d12 {
- constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
- constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
- MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
+constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
+constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
- // Returns a success only if result of HResult is success
- MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
+// Returns a success only if result of HResult is success
+MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
- // Uses CheckRESULT but returns OOM specific error when recoverable.
- MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
+// Uses CheckRESULT but returns OOM specific error when recoverable.
+MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
#define CheckHRESULT(resultIn, contextIn) \
::dawn::native::d3d12::CheckHRESULTImpl( \
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp
index ebd629b2066..6a4c4356488 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/d3d12/D3D12Info.h"
+#include <utility>
+
#include "dawn/common/GPUInfo.h"
#include "dawn/native/d3d12/AdapterD3D12.h"
#include "dawn/native/d3d12/BackendD3D12.h"
@@ -22,101 +24,102 @@
namespace dawn::native::d3d12 {
- ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
- D3D12DeviceInfo info = {};
-
- // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
- // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
- // for backwards compat.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
- D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
- DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
- &arch, sizeof(arch)),
- "ID3D12Device::CheckFeatureSupport"));
-
- info.isUMA = arch.UMA;
-
- D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
- DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
- &options, sizeof(options)),
- "ID3D12Device::CheckFeatureSupport"));
-
- info.resourceHeapTier = options.ResourceHeapTier;
-
- // Windows builds 1809 and above can use the D3D12 render pass API. If we query
- // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
- // the render pass API.
- info.supportsRenderPass = false;
- D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
- // Performance regressions been observed when using a render pass on Intel graphics
- // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
- // pass on these platforms.
- if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
- !gpu_info::IsIntel(adapter.GetVendorId())) {
- info.supportsRenderPass = true;
- }
- }
-
- // Used to share resources cross-API. If we query CheckFeatureSupport for
- // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
- info.supportsSharedResourceCapabilityTier1 = false;
- D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
- // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
- // is used by Dawn, check for Tier 1.
- if (featureOptions4.SharedResourceCompatibilityTier >=
- D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
- info.supportsSharedResourceCapabilityTier1 = true;
- }
+ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+ D3D12DeviceInfo info = {};
+
+ // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
+ // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
+ // for backwards compat.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
+ D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
+ DAWN_TRY(CheckHRESULT(
+ adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &arch, sizeof(arch)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.isUMA = arch.UMA;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
+ DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+ &options, sizeof(options)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.resourceHeapTier = options.ResourceHeapTier;
+
+ // Windows builds 1809 and above can use the D3D12 render pass API. If we query
+ // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
+ // the render pass API.
+ info.supportsRenderPass = false;
+ D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
+ // Performance regressions been observed when using a render pass on Intel graphics
+ // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
+ // pass on these platforms.
+ if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
+ !gpu_info::IsIntel(adapter.GetVendorId())) {
+ info.supportsRenderPass = true;
}
+ }
- D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
- {D3D_SHADER_MODEL_6_1},
- {D3D_SHADER_MODEL_6_0},
- {D3D_SHADER_MODEL_5_1}};
- uint32_t driverShaderModel = 0;
- for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
- driverShaderModel = shaderModel.HighestShaderModel;
- break;
- }
+ // Used to share resources cross-API. If we query CheckFeatureSupport for
+ // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
+ info.supportsSharedResourceCapabilityTier1 = false;
+ D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
+ // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
+ // is used by Dawn, check for Tier 1.
+ if (featureOptions4.SharedResourceCompatibilityTier >=
+ D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
+ info.supportsSharedResourceCapabilityTier1 = true;
}
+ }
- if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
- return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+ D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {
+ {D3D_SHADER_MODEL_6_4}, {D3D_SHADER_MODEL_6_3}, {D3D_SHADER_MODEL_6_2},
+ {D3D_SHADER_MODEL_6_1}, {D3D_SHADER_MODEL_6_0}, {D3D_SHADER_MODEL_5_1}};
+ uint32_t driverShaderModel = 0;
+ for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
+ driverShaderModel = shaderModel.HighestShaderModel;
+ break;
}
+ }
- // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
- ASSERT(driverShaderModel <= 0xFF);
- uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
- uint32_t shaderModelMinor = (driverShaderModel & 0xF);
-
- ASSERT(shaderModelMajor < 10);
- ASSERT(shaderModelMinor < 10);
- info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
-
- // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
- // it to each of the stage's suffix.
- std::wstring profileSuffix = L"s_M_n";
- profileSuffix[2] = wchar_t('0' + shaderModelMajor);
- profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+ if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
+ return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+ }
- info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
- info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
- info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+ // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
+ ASSERT(driverShaderModel <= 0xFF);
+ uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
+ uint32_t shaderModelMinor = (driverShaderModel & 0xF);
+
+ ASSERT(shaderModelMajor < 10);
+ ASSERT(shaderModelMinor < 10);
+ info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
+
+ // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
+ // it to each of the stage's suffix.
+ std::wstring profileSuffix = L"s_M_n";
+ profileSuffix[2] = wchar_t('0' + shaderModelMajor);
+ profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+
+ info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS4,
+ &featureData4, sizeof(featureData4)))) {
+ info.supportsShaderFloat16 =
+ driverShaderModel >= D3D_SHADER_MODEL_6_2 && featureData4.Native16BitShaderOpsSupported;
+ }
- D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
- info.supportsShaderFloat16 = driverShaderModel >= D3D_SHADER_MODEL_6_2 &&
- featureData4.Native16BitShaderOpsSupported;
- }
+ info.supportsDP4a = driverShaderModel >= D3D_SHADER_MODEL_6_4;
- return std::move(info);
- }
+ return std::move(info);
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h
index d38c4855357..e0f2a669ecc 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h
@@ -21,21 +21,22 @@
namespace dawn::native::d3d12 {
- class Adapter;
+class Adapter;
- struct D3D12DeviceInfo {
- bool isUMA;
- uint32_t resourceHeapTier;
- bool supportsRenderPass;
- bool supportsShaderFloat16;
- // shaderModel indicates the maximum supported shader model, for example, the value 62
- // indicates that current driver supports the maximum shader model is shader model 6.2.
- uint32_t shaderModel;
- PerStage<std::wstring> shaderProfiles;
- bool supportsSharedResourceCapabilityTier1;
- };
+struct D3D12DeviceInfo {
+ bool isUMA;
+ uint32_t resourceHeapTier;
+ bool supportsRenderPass;
+ bool supportsShaderFloat16;
+ // shaderModel indicates the maximum supported shader model, for example, the value 62
+ // indicates that current driver supports the maximum shader model is shader model 6.2.
+ uint32_t shaderModel;
+ PerStage<std::wstring> shaderProfiles;
+ bool supportsSharedResourceCapabilityTier1;
+ bool supportsDP4a;
+};
- ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_D3D12INFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp
index 6b77b3a07ab..1cb342bd361 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp
@@ -14,7 +14,13 @@
#include "dawn/native/d3d12/DeviceD3D12.h"
+#include <algorithm>
+#include <limits>
+#include <sstream>
+#include <utility>
+
#include "dawn/common/GPUInfo.h"
+#include "dawn/native/D3D12Backend.h"
#include "dawn/native/DynamicUploader.h"
#include "dawn/native/Instance.h"
#include "dawn/native/d3d12/AdapterD3D12.h"
@@ -26,6 +32,7 @@
#include "dawn/native/d3d12/ComputePipelineD3D12.h"
#include "dawn/native/d3d12/D3D11on12Util.h"
#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/ExternalImageDXGIImpl.h"
#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
#include "dawn/native/d3d12/PlatformFunctions.h"
#include "dawn/native/d3d12/QuerySetD3D12.h"
@@ -41,710 +48,809 @@
#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
#include "dawn/native/d3d12/SwapChainD3D12.h"
#include "dawn/native/d3d12/UtilsD3D12.h"
-
-#include <sstream>
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
namespace dawn::native::d3d12 {
- // TODO(dawn:155): Figure out these values.
- static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
- static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
-
- // Value may change in the future to better accomodate large clears.
- static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4; // 4 Mb
-
- static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
-
- // static
- ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
- const DeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize(descriptor));
- return device;
- }
-
- MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
- InitTogglesFromDriver();
-
- mD3d12Device = ToBackend(GetAdapter())->GetDevice();
-
- ASSERT(mD3d12Device != nullptr);
-
- // Create device-global objects
- D3D12_COMMAND_QUEUE_DESC queueDesc = {};
- queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
- queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
- "D3D12 create command queue"));
-
- if (IsFeatureEnabled(Feature::TimestampQuery) &&
- !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
- // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
- // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
- // always support timestamps except where there are bugs in Windows container and vGPU
- // implementations.
- uint64_t frequency;
- DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
- "D3D12 get timestamp frequency"));
- // Calculate the period in nanoseconds by the frequency.
- mTimestampPeriod = static_cast<float>(1e9) / frequency;
- }
+// TODO(dawn:155): Figure out these values.
+static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
+static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
- // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
- // value.
- mCommandQueue.As(&mD3d12SharingContract);
+// Value may change in the future to better accomodate large clears.
+static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4; // 4 Mb
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
- D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
- "D3D12 create fence"));
+static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
- mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
- ASSERT(mFenceEvent != nullptr);
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize(descriptor));
+ return device;
+}
- // Initialize backend services
- mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+ InitTogglesFromDriver();
- // Zero sized allocator is never requested and does not need to exist.
- for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
- mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
- this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
- }
+ mD3d12Device = ToBackend(GetAdapter())->GetDevice();
- for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
- mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
- this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
- }
+ ASSERT(mD3d12Device != nullptr);
- mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
- this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
+ // Create device-global objects
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ DAWN_TRY(
+ CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
+ "D3D12 create command queue"));
- mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
- this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
+ if (IsFeatureEnabled(Feature::TimestampQuery) &&
+ !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+ // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
+ // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
+ // always support timestamps except where there are bugs in Windows container and vGPU
+ // implementations.
+ uint64_t frequency;
+ DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
+ "D3D12 get timestamp frequency"));
+ // Calculate the period in nanoseconds by the frequency.
+ mTimestampPeriod = static_cast<float>(1e9) / frequency;
+ }
- mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
+ // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
+ // value.
+ mCommandQueue.As(&mD3d12SharingContract);
- mResidencyManager = std::make_unique<ResidencyManager>(this);
- mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
+ DAWN_TRY(CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
+ D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
+ "D3D12 create fence"));
- // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
- DAWN_TRY_ASSIGN(
- mSamplerShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+ mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ ASSERT(mFenceEvent != nullptr);
- DAWN_TRY_ASSIGN(
- mViewShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
+ // Initialize backend services
+ mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
- // Initialize indirect commands
- D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+ // Zero sized allocator is never requested and does not need to exist.
+ for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
+ mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+ }
- D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
- programDesc.ByteStride = 3 * sizeof(uint32_t);
- programDesc.NumArgumentDescs = 1;
- programDesc.pArgumentDescs = &argumentDesc;
+ for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
+ mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ }
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDispatchIndirectSignature));
+ mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+ this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
- programDesc.ByteStride = 4 * sizeof(uint32_t);
+ mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+ this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDrawIndirectSignature));
+ mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
- programDesc.ByteStride = 5 * sizeof(uint32_t);
+ mResidencyManager = std::make_unique<ResidencyManager>(this);
+ mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
+ // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
+ DAWN_TRY_ASSIGN(
+ mSamplerShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
- DAWN_TRY(DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue)));
- // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
- // device initialization to call NextSerial
- DAWN_TRY(NextSerial());
+ DAWN_TRY_ASSIGN(
+ mViewShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
- // The environment can only use DXC when it's available. Override the decision if it is not
- // applicable.
- DAWN_TRY(ApplyUseDxcToggle());
+ // Initialize indirect commands
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
- DAWN_TRY(CreateZeroBuffer());
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 3 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 1;
+ programDesc.pArgumentDescs = &argumentDesc;
- SetLabelImpl();
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDispatchIndirectSignature));
- return {};
- }
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+ programDesc.ByteStride = 4 * sizeof(uint32_t);
- Device::~Device() {
- Destroy();
- }
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDrawIndirectSignature));
- ID3D12Device* Device::GetD3D12Device() const {
- return mD3d12Device.Get();
- }
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+ programDesc.ByteStride = 5 * sizeof(uint32_t);
- ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
- return mCommandQueue;
- }
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
- ID3D12SharingContract* Device::GetSharingContract() const {
- return mD3d12SharingContract.Get();
- }
+ DAWN_TRY(DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue)));
+ // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
+ // device initialization to call NextSerial
+ DAWN_TRY(NextSerial());
- ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
- return mDispatchIndirectSignature;
- }
+ // The environment can only use DXC when it's available. Override the decision if it is not
+ // applicable.
+ DAWN_TRY(ApplyUseDxcToggle());
- ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
- return mDrawIndirectSignature;
- }
+ DAWN_TRY(CreateZeroBuffer());
- ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
- return mDrawIndexedIndirectSignature;
- }
+ SetLabelImpl();
- ComPtr<IDXGIFactory4> Device::GetFactory() const {
- return ToBackend(GetAdapter())->GetBackend()->GetFactory();
- }
+ return {};
+}
- MaybeError Device::ApplyUseDxcToggle() {
- if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
- ForceSetToggle(Toggle::UseDXC, false);
- } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
- // Currently we can only use DXC to compile HLSL shaders using float16.
- ForceSetToggle(Toggle::UseDXC, true);
- }
+Device::~Device() {
+ Destroy();
+}
- if (IsToggleEnabled(Toggle::UseDXC)) {
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
- }
+ID3D12Device* Device::GetD3D12Device() const {
+ return mD3d12Device.Get();
+}
- return {};
- }
+ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
+ return mCommandQueue;
+}
- ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
- }
+ID3D12SharingContract* Device::GetSharingContract() const {
+ return mD3d12SharingContract.Get();
+}
- ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
- }
+ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
+ return mDispatchIndirectSignature;
+}
- ComPtr<IDxcValidator> Device::GetDxcValidator() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
- }
+ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
+ return mDrawIndirectSignature;
+}
- const PlatformFunctions* Device::GetFunctions() const {
- return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
- }
+ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
+ return mDrawIndexedIndirectSignature;
+}
- CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
- return mCommandAllocatorManager.get();
- }
+ComPtr<IDXGIFactory4> Device::GetFactory() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetFactory();
+}
- ResidencyManager* Device::GetResidencyManager() const {
- return mResidencyManager.get();
+MaybeError Device::ApplyUseDxcToggle() {
+ if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
+ ForceSetToggle(Toggle::UseDXC, false);
}
- ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
- // Callers of GetPendingCommandList do so to record commands. Only reserve a command
- // allocator when it is needed so we don't submit empty command lists
- if (!mPendingCommands.IsOpen()) {
- DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
- }
- return &mPendingCommands;
+ if (IsToggleEnabled(Toggle::UseDXC)) {
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
}
- MaybeError Device::CreateZeroBuffer() {
- BufferDescriptor zeroBufferDescriptor;
- zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
- zeroBufferDescriptor.size = kZeroBufferSize;
- zeroBufferDescriptor.label = "ZeroBuffer_Internal";
- DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
+ return {};
+}
- return {};
- }
+ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
+}
- MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
- BufferBase* destination,
- uint64_t offset,
- uint64_t size) {
- // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
- // the allocation of the staging buffer causes various end2end tests that monitor heap usage
- // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
- // used to avoid that.
- if (!mZeroBuffer->IsDataInitialized()) {
- DynamicUploader* uploader = GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
-
- memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
-
- CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
- uploadHandle.startOffset, mZeroBuffer.Get(), 0,
- kZeroBufferSize);
-
- mZeroBuffer->SetIsDataInitialized();
- }
+ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
+}
- Buffer* dstBuffer = ToBackend(destination);
+ComPtr<IDxcValidator> Device::GetDxcValidator() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
+}
- // Necessary to ensure residency of the zero buffer.
- mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
- dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+const PlatformFunctions* Device::GetFunctions() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
+}
- while (size > 0) {
- uint64_t copySize = std::min(kZeroBufferSize, size);
- commandContext->GetCommandList()->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0,
- copySize);
+CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
+ return mCommandAllocatorManager.get();
+}
- offset += copySize;
- size -= copySize;
- }
+ResidencyManager* Device::GetResidencyManager() const {
+ return mResidencyManager.get();
+}
- return {};
+ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
+ // Callers of GetPendingCommandList do so to record commands. Only reserve a command
+ // allocator when it is needed so we don't submit empty command lists
+ if (!mPendingCommands.IsOpen()) {
+ DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
}
+ return &mPendingCommands;
+}
- MaybeError Device::TickImpl() {
- // Perform cleanup operations to free unused objects
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
+MaybeError Device::CreateZeroBuffer() {
+ BufferDescriptor zeroBufferDescriptor;
+ zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+ zeroBufferDescriptor.size = kZeroBufferSize;
+ zeroBufferDescriptor.label = "ZeroBuffer_Internal";
+ DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
- mResourceAllocatorManager->Tick(completedSerial);
- DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
- mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
- mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
- mRenderTargetViewAllocator->Tick(completedSerial);
- mDepthStencilViewAllocator->Tick(completedSerial);
- mUsedComObjectRefs.ClearUpTo(completedSerial);
+ return {};
+}
- if (mPendingCommands.IsOpen()) {
- DAWN_TRY(ExecutePendingCommandContext());
- DAWN_TRY(NextSerial());
- }
-
- DAWN_TRY(CheckDebugLayerAndGenerateErrors());
+MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
+ BufferBase* destination,
+ uint64_t offset,
+ uint64_t size) {
+ // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
+ // the allocation of the staging buffer causes various end2end tests that monitor heap usage
+ // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
+ // used to avoid that.
+ if (!mZeroBuffer->IsDataInitialized()) {
+ DynamicUploader* uploader = GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
- return {};
- }
+ memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
- MaybeError Device::NextSerial() {
- IncrementLastSubmittedCommandSerial();
+ CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, mZeroBuffer.Get(), 0,
+ kZeroBufferSize);
- return CheckHRESULT(
- mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
- "D3D12 command queue signal fence");
+ mZeroBuffer->SetIsDataInitialized();
}
- MaybeError Device::WaitForSerial(ExecutionSerial serial) {
- DAWN_TRY(CheckPassedSerials());
- if (GetCompletedCommandSerial() < serial) {
- DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
- "D3D12 set event on completion"));
- WaitForSingleObject(mFenceEvent, INFINITE);
- DAWN_TRY(CheckPassedSerials());
- }
- return {};
- }
+ Buffer* dstBuffer = ToBackend(destination);
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
- if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
- // GetCompletedValue returns UINT64_MAX if the device was removed.
- // Try to query the failure reason.
- DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
- "ID3D12Device::GetDeviceRemovedReason"));
- // Otherwise, return a generic device lost error.
- return DAWN_DEVICE_LOST_ERROR("Device lost");
- }
+ // Necessary to ensure residency of the zero buffer.
+ mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- if (completedSerial <= GetCompletedCommandSerial()) {
- return ExecutionSerial(0);
- }
+ while (size > 0) {
+ uint64_t copySize = std::min(kZeroBufferSize, size);
+ commandContext->GetCommandList()->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0, copySize);
- return completedSerial;
+ offset += copySize;
+ size -= copySize;
}
- void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
- mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
- }
+ return {};
+}
- MaybeError Device::ExecutePendingCommandContext() {
- return mPendingCommands.ExecuteCommandList(this);
- }
+MaybeError Device::TickImpl() {
+ // Perform cleanup operations to free unused objects
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
- }
+ mResourceAllocatorManager->Tick(completedSerial);
+ DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
+ mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
+ mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
+ mRenderTargetViewAllocator->Tick(completedSerial);
+ mDepthStencilViewAllocator->Tick(completedSerial);
+ mUsedComObjectRefs.ClearUpTo(completedSerial);
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
+ if (mPendingCommands.IsOpen()) {
+ DAWN_TRY(ExecutePendingCommandContext());
+ DAWN_TRY(NextSerial());
}
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- CommandRecordingContext* commandRecordingContext;
- DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+ DAWN_TRY(CheckDebugLayerAndGenerateErrors());
- Buffer* dstBuffer = ToBackend(destination);
+ return {};
+}
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
- commandRecordingContext, destinationOffset, size));
- DAWN_UNUSED(cleared);
+MaybeError Device::NextSerial() {
+ IncrementLastSubmittedCommandSerial();
- CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
- destinationOffset, size);
+ TRACE_EVENT1(GetPlatform(), General, "D3D12Device::SignalFence", "serial",
+ uint64_t(GetLastSubmittedCommandSerial()));
- return {};
+ return CheckHRESULT(
+ mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
+ "D3D12 command queue signal fence");
+}
+
+MaybeError Device::WaitForSerial(ExecutionSerial serial) {
+ DAWN_TRY(CheckPassedSerials());
+ if (GetCompletedCommandSerial() < serial) {
+ DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
+ "D3D12 set event on completion"));
+ WaitForSingleObject(mFenceEvent, INFINITE);
+ DAWN_TRY(CheckPassedSerials());
}
+ return {};
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
+ if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
+ // GetCompletedValue returns UINT64_MAX if the device was removed.
+ // Try to query the failure reason.
+ DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
+ "ID3D12Device::GetDeviceRemovedReason"));
+ // Otherwise, return a generic device lost error.
+ return DAWN_DEVICE_LOST_ERROR("Device lost");
+ }
+
+ if (completedSerial <= GetCompletedCommandSerial()) {
+ return ExecutionSerial(0);
+ }
+
+ return completedSerial;
+}
+
+void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
+ mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
+}
+
+MaybeError Device::ExecutePendingCommandContext() {
+ return mPendingCommands.ExecuteCommandList(this);
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ return ShaderModule::Create(this, descriptor, parseResult, compilationMessages);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+
+ Buffer* dstBuffer = ToBackend(destination);
+
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+ commandRecordingContext, destinationOffset, size));
+ DAWN_UNUSED(cleared);
+
+ CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
+ destinationOffset, size);
+
+ return {};
+}
+
+void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ ASSERT(commandContext != nullptr);
+ Buffer* dstBuffer = ToBackend(destination);
+ StagingBuffer* srcBuffer = ToBackend(source);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
- StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- ASSERT(commandContext != nullptr);
- Buffer* dstBuffer = ToBackend(destination);
- StagingBuffer* srcBuffer = ToBackend(source);
- dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+ commandContext->GetCommandList()->CopyBufferRegion(dstBuffer->GetD3D12Resource(),
+ destinationOffset, srcBuffer->GetResource(),
+ sourceOffset, size);
+}
- commandContext->GetCommandList()->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
- sourceOffset, size);
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
+ Texture* texture = ToBackend(dst->texture.Get());
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
}
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
- Texture* texture = ToBackend(dst->texture.Get());
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
- SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+ RecordBufferTextureCopyWithBufferHandle(
+ BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
+ ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
+ copySizePixels);
- if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
+ return {};
+}
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
+void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ mResourceAllocatorManager->DeallocateMemory(allocation);
+}
- RecordBufferTextureCopyWithBufferHandle(
- BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
- ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
- copySizePixels);
+ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor, initialUsage);
+}
- return {};
+std::unique_ptr<ExternalImageDXGIImpl> Device::CreateExternalImageDXGIImpl(
+ const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+ Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
+ if (FAILED(GetD3D12Device()->OpenSharedHandle(descriptor->sharedHandle,
+ IID_PPV_ARGS(&d3d12Resource)))) {
+ return nullptr;
+ }
+
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
}
- void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
- mResourceAllocatorManager->DeallocateMemory(allocation);
+ if (ConsumedError(ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
+ "validating that a D3D12 external image can be wrapped with %s",
+ textureDescriptor)) {
+ return nullptr;
}
- ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage) {
- return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
- initialUsage);
+ if (ConsumedError(ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
+ return nullptr;
}
- Ref<TextureBase> Device::CreateD3D12ExternalTexture(
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture,
- bool isInitialized) {
- Ref<Texture> dawnTexture;
- if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
- std::move(d3d11on12Resource),
- isSwapChainTexture, isInitialized),
- &dawnTexture)) {
+ // Shared handle is assumed to support resource sharing capability. The resource
+ // shared capability tier must agree to share resources between D3D devices.
+ const Format* format = GetInternalFormat(textureDescriptor->format).AcquireSuccess();
+ if (format->IsMultiPlanar()) {
+ if (ConsumedError(ValidateD3D12VideoTextureCanBeShared(
+ this, D3D12TextureFormat(textureDescriptor->format)))) {
return nullptr;
}
- return {dawnTexture};
}
- ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
- if (mD3d11On12Device == nullptr) {
- ComPtr<ID3D11Device> d3d11Device;
- D3D_FEATURE_LEVEL d3dFeatureLevel;
- IUnknown* const iUnknownQueue = mCommandQueue.Get();
- if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
- &iUnknownQueue, 1, 1, &d3d11Device,
- nullptr, &d3dFeatureLevel))) {
- return nullptr;
- }
+ auto impl = std::make_unique<ExternalImageDXGIImpl>(this, std::move(d3d12Resource),
+ descriptor->cTextureDescriptor);
+ mExternalImageList.Append(impl.get());
+ return impl;
+}
+
+Ref<TextureBase> Device::CreateD3D12ExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized) {
+ Ref<Texture> dawnTexture;
+ if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+ std::move(d3d11on12Resource), isSwapChainTexture,
+ isInitialized),
+ &dawnTexture)) {
+ return nullptr;
+ }
+ return {dawnTexture};
+}
+
+ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
+ if (mD3d11On12Device == nullptr) {
+ ComPtr<ID3D11Device> d3d11Device;
+ D3D_FEATURE_LEVEL d3dFeatureLevel;
+ IUnknown* const iUnknownQueue = mCommandQueue.Get();
+ if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
+ &iUnknownQueue, 1, 1, &d3d11Device,
+ nullptr, &d3dFeatureLevel))) {
+ return nullptr;
+ }
- ComPtr<ID3D11On12Device> d3d11on12Device;
- HRESULT hr = d3d11Device.As(&d3d11on12Device);
- ASSERT(SUCCEEDED(hr));
+ ComPtr<ID3D11On12Device> d3d11on12Device;
+ HRESULT hr = d3d11Device.As(&d3d11on12Device);
+ ASSERT(SUCCEEDED(hr));
+
+ mD3d11On12Device = std::move(d3d11on12Device);
+ }
+ return mD3d11On12Device;
+}
+
+const D3D12DeviceInfo& Device::GetDeviceInfo() const {
+ return ToBackend(GetAdapter())->GetDeviceInfo();
+}
+
+void Device::InitTogglesFromDriver() {
+ const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
+ SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
+ SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+ SetToggle(Toggle::UseD3D12ResidencyManagement, true);
+ SetToggle(Toggle::UseDXC, false);
+
+ // Disable optimizations when using FXC
+ // See https://crbug.com/dawn/1203
+ SetToggle(Toggle::FxcOptimizations, false);
+
+ // By default use the maximum shader-visible heap size allowed.
+ SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+
+ uint32_t deviceId = GetAdapter()->GetDeviceId();
+ uint32_t vendorId = GetAdapter()->GetVendorId();
+
+ // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
+ // See http://crbug.com/1161355 for more information.
+ if (gpu_info::IsIntelGen9(vendorId, deviceId)) {
+ constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
+ if (gpu_info::CompareD3DDriverVersion(vendorId, ToBackend(GetAdapter())->GetDriverVersion(),
+ kFirstDriverVersionWithFix) < 0) {
+ SetToggle(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ true);
+ }
+ }
- mD3d11On12Device = std::move(d3d11on12Device);
+ // Currently this workaround is needed on any D3D12 backend for some particular situations.
+ // But we may need to limit it if D3D12 runtime fixes the bug on its new release. See
+ // https://crbug.com/dawn/1289 for more information.
+ SetToggle(Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings, true);
+}
+
+MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately forget about all pending commands
+ mPendingCommands.Release();
+
+ DAWN_TRY(NextSerial());
+ // Wait for all in-flight commands to finish executing
+ DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
+
+ return {};
+}
+
+void AppendDebugLayerMessagesToError(ID3D12InfoQueue* infoQueue,
+ uint64_t totalErrors,
+ ErrorData* error) {
+ ASSERT(totalErrors > 0);
+ ASSERT(error != nullptr);
+
+ uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
+ for (uint64_t i = 0; i < errorsToPrint; ++i) {
+ std::ostringstream messageStream;
+ SIZE_T messageLength = 0;
+ HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
+ if (FAILED(hr)) {
+ messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
+ error->AppendBackendMessage(messageStream.str());
+ continue;
}
- return mD3d11On12Device;
- }
-
- const D3D12DeviceInfo& Device::GetDeviceInfo() const {
- return ToBackend(GetAdapter())->GetDeviceInfo();
- }
-
- void Device::InitTogglesFromDriver() {
- const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
- SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
- SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
- SetToggle(Toggle::UseD3D12ResidencyManagement, true);
- SetToggle(Toggle::UseDXC, false);
-
- // Disable optimizations when using FXC
- // See https://crbug.com/dawn/1203
- SetToggle(Toggle::FxcOptimizations, false);
-
- // By default use the maximum shader-visible heap size allowed.
- SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
-
- uint32_t deviceId = GetAdapter()->GetDeviceId();
- uint32_t vendorId = GetAdapter()->GetVendorId();
-
- // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
- // See http://crbug.com/1161355 for more information.
- if (gpu_info::IsIntel(vendorId) &&
- (gpu_info::IsSkylake(deviceId) || gpu_info::IsKabylake(deviceId) ||
- gpu_info::IsCoffeelake(deviceId))) {
- constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
- if (gpu_info::CompareD3DDriverVersion(vendorId,
- ToBackend(GetAdapter())->GetDriverVersion(),
- kFirstDriverVersionWithFix) < 0) {
- SetToggle(
- Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- true);
- }
+
+ std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
+ D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
+ hr = infoQueue->GetMessage(i, message, &messageLength);
+ if (FAILED(hr)) {
+ messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
+ error->AppendBackendMessage(messageStream.str());
+ continue;
}
+
+ messageStream << message->pDescription << " (" << message->ID << ")";
+ error->AppendBackendMessage(messageStream.str());
+ }
+ if (errorsToPrint < totalErrors) {
+ std::ostringstream messages;
+ messages << (totalErrors - errorsToPrint) << " messages silenced";
+ error->AppendBackendMessage(messages.str());
}
- MaybeError Device::WaitForIdleForDestruction() {
- // Immediately forget about all pending commands
- mPendingCommands.Release();
+ // We only print up to the first kMaxDebugMessagesToPrint errors
+ infoQueue->ClearStoredMessages();
+}
- DAWN_TRY(NextSerial());
- // Wait for all in-flight commands to finish executing
- DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
+MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+ return {};
+ }
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+ "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+ uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+
+ // Check if any errors have occurred otherwise we would be creating an empty error. Note
+ // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
+ // because we only convert WARNINGS or higher messages to dawn errors.
+ if (totalErrors == 0) {
return {};
}
- MaybeError Device::CheckDebugLayerAndGenerateErrors() {
- if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
- return {};
- }
+ auto error = DAWN_INTERNAL_ERROR("The D3D12 debug layer reported uncaught errors.");
- ComPtr<ID3D12InfoQueue> infoQueue;
- DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
- "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
- uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+ AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error.get());
- // Check if any errors have occurred otherwise we would be creating an empty error. Note
- // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
- // because we only convert WARNINGS or higher messages to dawn errors.
- if (totalErrors == 0) {
- return {};
- }
+ return error;
+}
- std::ostringstream messages;
- uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
- for (uint64_t i = 0; i < errorsToPrint; ++i) {
- SIZE_T messageLength = 0;
- HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
- if (FAILED(hr)) {
- messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
- continue;
- }
-
- std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
- D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
- hr = infoQueue->GetMessage(i, message, &messageLength);
- if (FAILED(hr)) {
- messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
- continue;
- }
-
- messages << message->pDescription << " (" << message->ID << ")\n";
- }
- if (errorsToPrint < totalErrors) {
- messages << (totalErrors - errorsToPrint) << " messages silenced\n";
- }
- // We only print up to the first kMaxDebugMessagesToPrint errors
- infoQueue->ClearStoredMessages();
+void Device::AppendDebugLayerMessages(ErrorData* error) {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+ return;
+ }
- return DAWN_INTERNAL_ERROR(messages.str());
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ if (FAILED(mD3d12Device.As(&infoQueue))) {
+ return;
}
+ uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
+ if (totalErrors == 0) {
+ return;
+ }
- // Immediately forget about all pending commands for the case where device is lost on its
- // own and WaitForIdleForDestruction isn't called.
- mPendingCommands.Release();
+ AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error);
+}
- if (mFenceEvent != nullptr) {
- ::CloseHandle(mFenceEvent);
- }
+void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
- // Release recycled resource heaps.
- if (mResourceAllocatorManager != nullptr) {
- mResourceAllocatorManager->DestroyPool();
- }
+ while (!mExternalImageList.empty()) {
+ ExternalImageDXGIImpl* externalImage = mExternalImageList.head()->value();
+ // ExternalImageDXGIImpl::Destroy() calls RemoveFromList().
+ externalImage->Destroy();
+ }
- // We need to handle clearing up com object refs that were enqeued after TickImpl
- mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
+ mZeroBuffer = nullptr;
- ASSERT(mUsedComObjectRefs.Empty());
- ASSERT(!mPendingCommands.IsOpen());
- }
+ // Immediately forget about all pending commands for the case where device is lost on its
+ // own and WaitForIdleForDestruction isn't called.
+ mPendingCommands.Release();
- ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
- return mViewShaderVisibleDescriptorAllocator.get();
+ if (mFenceEvent != nullptr) {
+ ::CloseHandle(mFenceEvent);
}
- ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
- return mSamplerShaderVisibleDescriptorAllocator.get();
+ // Release recycled resource heaps.
+ if (mResourceAllocatorManager != nullptr) {
+ mResourceAllocatorManager->DestroyPool();
}
- StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
- uint32_t descriptorCount) const {
- ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
- // This is Log2 of the next power of two, plus 1.
- uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
- return mViewAllocators[allocatorIndex].get();
- }
+ // We need to handle clearing up com object refs that were enqeued after TickImpl
+ mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
- StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
- uint32_t descriptorCount) const {
- ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
- // This is Log2 of the next power of two, plus 1.
- uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
- return mSamplerAllocators[allocatorIndex].get();
- }
+ ASSERT(mUsedComObjectRefs.Empty());
+ ASSERT(!mPendingCommands.IsOpen());
+}
- StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
- return mRenderTargetViewAllocator.get();
- }
+ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
+ return mViewShaderVisibleDescriptorAllocator.get();
+}
- StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
- return mDepthStencilViewAllocator.get();
- }
+ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
+ return mSamplerShaderVisibleDescriptorAllocator.get();
+}
- SamplerHeapCache* Device::GetSamplerHeapCache() {
- return mSamplerHeapCache.get();
- }
+StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
+ uint32_t descriptorCount) const {
+ ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mViewAllocators[allocatorIndex].get();
+}
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
- }
+StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
+ uint32_t descriptorCount) const {
+ ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mSamplerAllocators[allocatorIndex].get();
+}
- // TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
- // should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
- // Current implementations would try to allocate additional 511 bytes,
- // so we return 1 and let ComputeTextureCopySplits take care of the alignment.
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
+StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
+ return mRenderTargetViewAllocator.get();
+}
- float Device::GetTimestampPeriodInNS() const {
- return mTimestampPeriod;
- }
+StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
+ return mDepthStencilViewAllocator.get();
+}
- bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const {
- return ToBackend(computePipeline)->UsesNumWorkgroups();
- }
+SamplerHeapCache* Device::GetSamplerHeapCache() {
+ return mSamplerHeapCache.get();
+}
+
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
+}
+
+// TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
+// should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
+// Current implementations would try to allocate additional 511 bytes,
+// so we return 1 and let ComputeTextureCopySplits take care of the alignment.
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+}
+
+float Device::GetTimestampPeriodInNS() const {
+ return mTimestampPeriod;
+}
+
+bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const {
+ return ToBackend(computePipeline)->UsesNumWorkgroups();
+}
- void Device::SetLabelImpl() {
- SetDebugName(this, mD3d12Device.Get(), "Dawn_Device", GetLabel());
+bool Device::IsFeatureEnabled(Feature feature) const {
+ // Currently we can only use DXC to compile HLSL shaders using float16, and
+ // ChromiumExperimentalDp4a is an experimental feature which can only be enabled with toggle
+ // "use_dxc".
+ if ((feature == Feature::ChromiumExperimentalDp4a || feature == Feature::ShaderFloat16) &&
+ !IsToggleEnabled(Toggle::UseDXC)) {
+ return false;
}
+ return DeviceBase::IsFeatureEnabled(feature);
+}
+
+void Device::SetLabelImpl() {
+ SetDebugName(this, mD3d12Device.Get(), "Dawn_Device", GetLabel());
+}
+
+bool Device::MayRequireDuplicationOfIndirectParameters() const {
+ return true;
+}
+
+bool Device::ShouldDuplicateParametersForDrawIndirect(
+ const RenderPipelineBase* renderPipelineBase) const {
+ return ToBackend(renderPipelineBase)->UsesVertexOrInstanceIndex();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h
index f8290f57a0f..5371e5b0ce8 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_D3D12_DEVICED3D12_H_
#define SRC_DAWN_NATIVE_D3D12_DEVICED3D12_H_
+#include <memory>
+#include <vector>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/native/Device.h"
#include "dawn/native/d3d12/CommandRecordingContext.h"
@@ -24,13 +27,15 @@
namespace dawn::native::d3d12 {
- class CommandAllocatorManager;
- class PlatformFunctions;
- class ResidencyManager;
- class ResourceAllocatorManager;
- class SamplerHeapCache;
- class ShaderVisibleDescriptorAllocator;
- class StagingDescriptorAllocator;
+class CommandAllocatorManager;
+struct ExternalImageDescriptorDXGISharedHandle;
+class ExternalImageDXGIImpl;
+class PlatformFunctions;
+class ResidencyManager;
+class ResourceAllocatorManager;
+class SamplerHeapCache;
+class ShaderVisibleDescriptorAllocator;
+class StagingDescriptorAllocator;
#define ASSERT_SUCCESS(hr) \
do { \
@@ -38,230 +43,238 @@ namespace dawn::native::d3d12 {
ASSERT(SUCCEEDED(succeeded)); \
} while (0)
- // Definition of backend types
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Ref<Device>> Create(Adapter* adapter,
- const DeviceDescriptor* descriptor);
- ~Device() override;
+// Definition of backend types
+class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize(const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ID3D12Device* GetD3D12Device() const;
+ ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
+ ID3D12SharingContract* GetSharingContract() const;
- MaybeError Initialize(const DeviceDescriptor* descriptor);
+ ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
+ ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
+ ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
+ CommandAllocatorManager* GetCommandAllocatorManager() const;
+ ResidencyManager* GetResidencyManager() const;
- MaybeError TickImpl() override;
+ const PlatformFunctions* GetFunctions() const;
+ ComPtr<IDXGIFactory4> GetFactory() const;
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
- ID3D12Device* GetD3D12Device() const;
- ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
- ID3D12SharingContract* GetSharingContract() const;
+ ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
- ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
- ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
- ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
+ MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
- CommandAllocatorManager* GetCommandAllocatorManager() const;
- ResidencyManager* GetResidencyManager() const;
+ const D3D12DeviceInfo& GetDeviceInfo() const;
- const PlatformFunctions* GetFunctions() const;
- ComPtr<IDXGIFactory4> GetFactory() const;
- ComPtr<IDxcLibrary> GetDxcLibrary() const;
- ComPtr<IDxcCompiler> GetDxcCompiler() const;
- ComPtr<IDxcValidator> GetDxcValidator() const;
+ MaybeError NextSerial();
+ MaybeError WaitForSerial(ExecutionSerial serial);
- ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
+ void ReferenceUntilUnused(ComPtr<IUnknown> object);
- MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+ MaybeError ExecutePendingCommandContext();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+
+ void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size);
- const D3D12DeviceInfo& GetDeviceInfo() const;
-
- MaybeError NextSerial();
- MaybeError WaitForSerial(ExecutionSerial serial);
-
- void ReferenceUntilUnused(ComPtr<IUnknown> object);
-
- MaybeError ExecutePendingCommandContext();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
-
- void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
- StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
-
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- ResultOrError<ResourceHeapAllocation> AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage);
-
- void DeallocateMemory(ResourceHeapAllocation& allocation);
-
- ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
- ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
-
- // Returns nullptr when descriptor count is zero.
- StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(
- uint32_t descriptorCount) const;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+ ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
+ ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
+
+ // Returns nullptr when descriptor count is zero.
+ StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(uint32_t descriptorCount) const;
+
+ StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
+ uint32_t descriptorCount) const;
+
+ SamplerHeapCache* GetSamplerHeapCache();
+
+ StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
+
+ StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
+
+ std::unique_ptr<ExternalImageDXGIImpl> CreateExternalImageDXGIImpl(
+ const ExternalImageDescriptorDXGISharedHandle* descriptor);
+
+ Ref<TextureBase> CreateD3D12ExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized);
+
+ ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
+
+ void InitTogglesFromDriver();
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const override;
+
+ bool MayRequireDuplicationOfIndirectParameters() const override;
+
+ bool ShouldDuplicateParametersForDrawIndirect(
+ const RenderPipelineBase* renderPipelineBase) const override;
+
+ bool IsFeatureEnabled(Feature feature) const override;
+
+ // Dawn APIs
+ void SetLabelImpl() override;
+
+ private:
+ using DeviceBase::DeviceBase;
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ MaybeError CheckDebugLayerAndGenerateErrors();
+ void AppendDebugLayerMessages(ErrorData* error) override;
+
+ MaybeError ApplyUseDxcToggle();
+
+ MaybeError CreateZeroBuffer();
+
+ ComPtr<ID3D12Fence> mFence;
+ HANDLE mFenceEvent = nullptr;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
+ ComPtr<ID3D12CommandQueue> mCommandQueue;
+ ComPtr<ID3D12SharingContract> mD3d12SharingContract;
- StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
- uint32_t descriptorCount) const;
-
- SamplerHeapCache* GetSamplerHeapCache();
-
- StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
-
- StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
-
- Ref<TextureBase> CreateD3D12ExternalTexture(
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture,
- bool isInitialized);
-
- ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
-
- void InitTogglesFromDriver();
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const override;
+ // 11on12 device corresponding to mCommandQueue
+ ComPtr<ID3D11On12Device> mD3d11On12Device;
- // Dawn API
- void SetLabelImpl() override;
+ ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
+ ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
+ ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
- private:
- using DeviceBase::DeviceBase;
+ CommandRecordingContext mPendingCommands;
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- MaybeError CheckDebugLayerAndGenerateErrors();
-
- MaybeError ApplyUseDxcToggle();
-
- MaybeError CreateZeroBuffer();
+ SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
- ComPtr<ID3D12Fence> mFence;
- HANDLE mFenceEvent = nullptr;
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+ std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
+ std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
+ std::unique_ptr<ResidencyManager> mResidencyManager;
- ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
- ComPtr<ID3D12CommandQueue> mCommandQueue;
- ComPtr<ID3D12SharingContract> mD3d12SharingContract;
+ static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup = 3 * kMaxSamplersPerShaderStage;
+ static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
+ kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
- // 11on12 device corresponding to mCommandQueue
- ComPtr<ID3D11On12Device> mD3d11On12Device;
-
- ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
- ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
- ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
-
- CommandRecordingContext mPendingCommands;
-
- SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
-
- std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
- std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
- std::unique_ptr<ResidencyManager> mResidencyManager;
-
- static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
- 3 * kMaxSamplersPerShaderStage;
- static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
- kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
-
- static constexpr uint32_t kNumSamplerDescriptorAllocators =
- ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
- static constexpr uint32_t kNumViewDescriptorAllocators =
- ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
-
- // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
- // the range [0, kMaxSamplerDescriptorsPerBindGroup].
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
- mViewAllocators;
-
- // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
- // the range [0, kMaxViewDescriptorsPerBindGroup].
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
- mSamplerAllocators;
-
- std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
-
- std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
-
- std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
-
- std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
-
- // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
- // release is called.
- std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
-
- // A buffer filled with zeros that is used to copy into other buffers when they need to be
- // cleared.
- Ref<Buffer> mZeroBuffer;
-
- // The number of nanoseconds required for a timestamp query to be incremented by 1
- float mTimestampPeriod = 1.0f;
- };
+ static constexpr uint32_t kNumSamplerDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
+ static constexpr uint32_t kNumViewDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
+
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxSamplerDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
+ mViewAllocators;
+
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxViewDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
+ mSamplerAllocators;
+
+ std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
+
+ std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
+
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
+
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
+
+ // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
+ // release is called.
+ std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
+
+ // A buffer filled with zeros that is used to copy into other buffers when they need to be
+ // cleared.
+ Ref<Buffer> mZeroBuffer;
+
+ // The number of nanoseconds required for a timestamp query to be incremented by 1
+ float mTimestampPeriod = 1.0f;
+
+ // List of external image resources opened using this device.
+ LinkedList<ExternalImageDXGIImpl> mExternalImageList;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.cpp
new file mode 100644
index 00000000000..8df2d306ab4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.cpp
@@ -0,0 +1,106 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ExternalImageDXGIImpl.h"
+
+#include <d3d12.h>
+
+#include <utility>
+
+#include "dawn/common/Log.h"
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ExternalImageDXGIImpl::ExternalImageDXGIImpl(Device* backendDevice,
+ Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
+ const WGPUTextureDescriptor* descriptor)
+ : mBackendDevice(backendDevice),
+ mD3D12Resource(std::move(d3d12Resource)),
+ mD3D11on12ResourceCache(std::make_unique<D3D11on12ResourceCache>()),
+ mUsage(descriptor->usage),
+ mDimension(descriptor->dimension),
+ mSize(descriptor->size),
+ mFormat(descriptor->format),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount) {
+ ASSERT(mBackendDevice != nullptr);
+ ASSERT(mD3D12Resource != nullptr);
+ ASSERT(!descriptor->nextInChain ||
+ descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
+ if (descriptor->nextInChain) {
+ mUsageInternal =
+ reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(descriptor->nextInChain)
+ ->internalUsage;
+ }
+}
+
+ExternalImageDXGIImpl::~ExternalImageDXGIImpl() {
+ Destroy();
+}
+
+bool ExternalImageDXGIImpl::IsValid() const {
+ return IsInList();
+}
+
+void ExternalImageDXGIImpl::Destroy() {
+ if (IsInList()) {
+ RemoveFromList();
+ mBackendDevice = nullptr;
+ mD3D12Resource.Reset();
+ mD3D11on12ResourceCache.reset();
+ }
+}
+
+WGPUTexture ExternalImageDXGIImpl::ProduceTexture(
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+ ASSERT(mBackendDevice != nullptr);
+ // Ensure the texture usage is allowed
+ if (!IsSubset(descriptor->usage, mUsage)) {
+ dawn::ErrorLog() << "Texture usage is not valid for external image";
+ return nullptr;
+ }
+
+ TextureDescriptor textureDescriptor = {};
+ textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
+ textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
+ textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
+ textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
+ textureDescriptor.mipLevelCount = mMipLevelCount;
+ textureDescriptor.sampleCount = mSampleCount;
+
+ DawnTextureInternalUsageDescriptor internalDesc = {};
+ if (mUsageInternal) {
+ textureDescriptor.nextInChain = &internalDesc;
+ internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
+ internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+ }
+
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
+ mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(mBackendDevice, mD3D12Resource.Get());
+ if (d3d11on12Resource == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
+ return nullptr;
+ }
+
+ Ref<TextureBase> texture = mBackendDevice->CreateD3D12ExternalTexture(
+ &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
+ descriptor->isSwapChainTexture, descriptor->isInitialized);
+ return ToAPI(texture.Detach());
+}
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.h
new file mode 100644
index 00000000000..ca6acefc87f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ExternalImageDXGIImpl.h
@@ -0,0 +1,70 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_D3D12_EXTERNALIMAGEDXGIIMPL_H_
+#define SRC_DAWN_NATIVE_D3D12_EXTERNALIMAGEDXGIIMPL_H_
+
+#include <wrl/client.h>
+
+#include <memory>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/Forward.h"
+
+struct ID3D12Resource;
+struct ID3D12Fence;
+
+namespace dawn::native::d3d12 {
+
+class D3D11on12ResourceCache;
+class Device;
+struct ExternalImageAccessDescriptorDXGIKeyedMutex;
+struct ExternalImageDescriptorDXGISharedHandle;
+
+class ExternalImageDXGIImpl : public LinkNode<ExternalImageDXGIImpl> {
+ public:
+ ExternalImageDXGIImpl(Device* backendDevice,
+ Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
+ const WGPUTextureDescriptor* descriptor);
+ ~ExternalImageDXGIImpl();
+
+ ExternalImageDXGIImpl(const ExternalImageDXGIImpl&) = delete;
+ ExternalImageDXGIImpl& operator=(const ExternalImageDXGIImpl&) = delete;
+
+ void Destroy();
+
+ bool IsValid() const;
+
+ WGPUTexture ProduceTexture(const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+
+ private:
+ Device* mBackendDevice;
+ Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
+ std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
+
+ // Contents of WGPUTextureDescriptor are stored individually since the descriptor
+ // could outlive this image.
+ WGPUTextureUsageFlags mUsage;
+ WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
+ WGPUTextureDimension mDimension;
+ WGPUExtent3D mSize;
+ WGPUTextureFormat mFormat;
+ uint32_t mMipLevelCount;
+ uint32_t mSampleCount;
+};
+
+} // namespace dawn::native::d3d12
+
+#endif // SRC_DAWN_NATIVE_D3D12_EXTERNALIMAGEDXGIIMPL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h
index 3004f2ed9d8..13f7c81e4aa 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h
@@ -19,50 +19,52 @@
namespace dawn::native::d3d12 {
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class Heap;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class Heap;
+class PipelineCache;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
- struct D3D12BackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = Heap;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
+struct D3D12BackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineCacheType = PipelineCache;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = Heap;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+};
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
- return ToBackendBase<D3D12BackendTraits>(common);
- }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
+ return ToBackendBase<D3D12BackendTraits>(common);
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
index e5d4fb931a8..1b3613d47b5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
@@ -16,24 +16,20 @@
namespace dawn::native::d3d12 {
- GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(
- D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
- ExecutionSerial lastUsageSerial,
- HeapVersionID heapSerial)
- : mBaseDescriptor(baseDescriptor),
- mLastUsageSerial(lastUsageSerial),
- mHeapSerial(heapSerial) {
- }
+GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+ ExecutionSerial lastUsageSerial,
+ HeapVersionID heapSerial)
+ : mBaseDescriptor(baseDescriptor), mLastUsageSerial(lastUsageSerial), mHeapSerial(heapSerial) {}
- D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
- return mBaseDescriptor;
- }
+D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+ return mBaseDescriptor;
+}
- ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
- return mLastUsageSerial;
- }
+ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
+ return mLastUsageSerial;
+}
- HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
- return mHeapSerial;
- }
+HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
+ return mHeapSerial;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
index a7ac12c6ed1..f62286c1a57 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
@@ -21,23 +21,23 @@
namespace dawn::native::d3d12 {
- // Wrapper for a handle into a GPU-only descriptor heap.
- class GPUDescriptorHeapAllocation {
- public:
- GPUDescriptorHeapAllocation() = default;
- GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
- ExecutionSerial lastUsageSerial,
- HeapVersionID heapSerial);
-
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
- ExecutionSerial GetLastUsageSerial() const;
- HeapVersionID GetHeapSerial() const;
-
- private:
- D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
- ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
- HeapVersionID mHeapSerial = HeapVersionID(0);
- };
+// Wrapper for a handle into a GPU-only descriptor heap.
+class GPUDescriptorHeapAllocation {
+ public:
+ GPUDescriptorHeapAllocation() = default;
+ GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+ ExecutionSerial lastUsageSerial,
+ HeapVersionID heapSerial);
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+ ExecutionSerial GetLastUsageSerial() const;
+ HeapVersionID GetHeapSerial() const;
+
+ private:
+ D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+ ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
+ HeapVersionID mHeapSerial = HeapVersionID(0);
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
index 5a26be305d7..28349817a2e 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
@@ -13,6 +13,9 @@
// limitations under the License.
#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+
+#include <utility>
+
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/HeapD3D12.h"
@@ -20,52 +23,48 @@
namespace dawn::native::d3d12 {
- HeapAllocator::HeapAllocator(Device* device,
- D3D12_HEAP_TYPE heapType,
- D3D12_HEAP_FLAGS heapFlags,
- MemorySegment memorySegment)
- : mDevice(device),
- mHeapType(heapType),
- mHeapFlags(heapFlags),
- mMemorySegment(memorySegment) {
- }
+HeapAllocator::HeapAllocator(Device* device,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_HEAP_FLAGS heapFlags,
+ MemorySegment memorySegment)
+ : mDevice(device), mHeapType(heapType), mHeapFlags(heapFlags), mMemorySegment(memorySegment) {}
- ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
- uint64_t size) {
- D3D12_HEAP_DESC heapDesc;
- heapDesc.SizeInBytes = size;
- heapDesc.Properties.Type = mHeapType;
- heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
- heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
- heapDesc.Properties.CreationNodeMask = 0;
- heapDesc.Properties.VisibleNodeMask = 0;
- // It is preferred to use a size that is a multiple of the alignment.
- // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
- // if the heap size is too small, the VMM would fragment.
- // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
- heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
- heapDesc.Flags = mHeapFlags;
+ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
+ uint64_t size) {
+ D3D12_HEAP_DESC heapDesc;
+ heapDesc.SizeInBytes = size;
+ heapDesc.Properties.Type = mHeapType;
+ heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapDesc.Properties.CreationNodeMask = 0;
+ heapDesc.Properties.VisibleNodeMask = 0;
+ // It is preferred to use a size that is a multiple of the alignment.
+ // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
+ // if the heap size is too small, the VMM would fragment.
+ // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
+ heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
+ heapDesc.Flags = mHeapFlags;
- // CreateHeap will implicitly make the created heap resident. We must ensure enough free
- // memory exists before allocating to avoid an out-of-memory error when overcommitted.
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
+ // CreateHeap will implicitly make the created heap resident. We must ensure enough free
+ // memory exists before allocating to avoid an out-of-memory error when overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
- ComPtr<ID3D12Heap> d3d12Heap;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
- "ID3D12Device::CreateHeap"));
+ ComPtr<ID3D12Heap> d3d12Heap;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
+ "ID3D12Device::CreateHeap"));
- std::unique_ptr<ResourceHeapBase> heapBase =
- std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
+ std::unique_ptr<ResourceHeapBase> heapBase =
+ std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
- // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
- // avoid calling MakeResident a second time.
- mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
- return std::move(heapBase);
- }
+ // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
+ // avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
+ return std::move(heapBase);
+}
- void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
- mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
- }
+void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
+ mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h
index 3cc8ee06b0f..a297f043aa5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h
@@ -15,33 +15,34 @@
#ifndef SRC_DAWN_NATIVE_D3D12_HEAPALLOCATORD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_HEAPALLOCATORD3D12_H_
+#include <memory>
+
#include "dawn/native/D3D12Backend.h"
#include "dawn/native/ResourceHeapAllocator.h"
#include "dawn/native/d3d12/d3d12_platform.h"
namespace dawn::native::d3d12 {
- class Device;
-
- // Wrapper to allocate a D3D12 heap.
- class HeapAllocator : public ResourceHeapAllocator {
- public:
- HeapAllocator(Device* device,
- D3D12_HEAP_TYPE heapType,
- D3D12_HEAP_FLAGS heapFlags,
- MemorySegment memorySegment);
- ~HeapAllocator() override = default;
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override;
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
-
- private:
- Device* mDevice;
- D3D12_HEAP_TYPE mHeapType;
- D3D12_HEAP_FLAGS mHeapFlags;
- MemorySegment mMemorySegment;
- };
+class Device;
+
+// Wrapper to allocate a D3D12 heap.
+class HeapAllocator : public ResourceHeapAllocator {
+ public:
+ HeapAllocator(Device* device,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_HEAP_FLAGS heapFlags,
+ MemorySegment memorySegment);
+ ~HeapAllocator() override = default;
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+ private:
+ Device* mDevice;
+ D3D12_HEAP_TYPE mHeapType;
+ D3D12_HEAP_FLAGS mHeapFlags;
+ MemorySegment mMemorySegment;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp
index 7426757dfdd..61aa03b5e8c 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp
@@ -14,18 +14,20 @@
#include "dawn/native/d3d12/HeapD3D12.h"
+#include <utility>
+
namespace dawn::native::d3d12 {
- Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
- : Pageable(std::move(d3d12Pageable), memorySegment, size) {
- mD3d12Pageable.As(&mD3d12Heap);
- }
+Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+ : Pageable(std::move(d3d12Pageable), memorySegment, size) {
+ mD3d12Pageable.As(&mD3d12Heap);
+}
- // This function should only be used when mD3D12Pageable was initialized from a
- // ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
- // ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
- // use GetD3D12Pageable().
- ID3D12Heap* Heap::GetD3D12Heap() const {
- return mD3d12Heap.Get();
- }
+// This function should only be used when mD3D12Pageable was initialized from a
+// ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
+// ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
+// use GetD3D12Pageable().
+ID3D12Heap* Heap::GetD3D12Heap() const {
+ return mD3d12Heap.Get();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h
index b1efe5e0af9..ebce289c9b4 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h
@@ -21,20 +21,20 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
- // representing a directly allocated resource. It inherits from Pageable because each Heap must
- // be represented in the ResidencyManager.
- class Heap : public ResourceHeapBase, public Pageable {
- public:
- Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+// This class is used to represent ID3D12Heap allocations, as well as an implicit heap
+// representing a directly allocated resource. It inherits from Pageable because each Heap must
+// be represented in the ResidencyManager.
+class Heap : public ResourceHeapBase, public Pageable {
+ public:
+ Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
- ID3D12Heap* GetD3D12Heap() const;
+ ID3D12Heap* GetD3D12Heap() const;
- private:
- ComPtr<ID3D12Heap> mD3d12Heap;
- };
+ private:
+ ComPtr<ID3D12Heap> mD3d12Heap;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_HEAPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h b/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h
index eff3d0d789c..a92cfae3fae 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h
@@ -15,16 +15,16 @@
#ifndef SRC_DAWN_NATIVE_D3D12_INTEGERTYPES_H_
#define SRC_DAWN_NATIVE_D3D12_INTEGERTYPES_H_
+#include <cstdint>
+
#include "dawn/common/Constants.h"
#include "dawn/common/TypedInteger.h"
-#include <cstdint>
-
namespace dawn::native::d3d12 {
- // An ID used to desambiguate between multiple uses of the same descriptor heap in the
- // BindGroup allocations.
- using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
+// An ID used to desambiguate between multiple uses of the same descriptor heap in the
+// BindGroup allocations.
+using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
index 5156af58300..de70081ef5f 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -20,101 +20,98 @@
namespace dawn::native::d3d12 {
- namespace {
- DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
- DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
- if (allowedUsages & WGPUTextureUsage_TextureBinding) {
- usage |= DXGI_USAGE_SHADER_INPUT;
- }
- if (allowedUsages & WGPUTextureUsage_StorageBinding) {
- usage |= DXGI_USAGE_UNORDERED_ACCESS;
- }
- if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
- usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
- }
- return usage;
- }
-
- static constexpr unsigned int kFrameCount = 3;
- } // anonymous namespace
-
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
- : mWindow(window), mDevice(device), mInterval(1) {
+namespace {
+DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
+ DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
+ if (allowedUsages & WGPUTextureUsage_TextureBinding) {
+ usage |= DXGI_USAGE_SHADER_INPUT;
}
-
- NativeSwapChainImpl::~NativeSwapChainImpl() {
+ if (allowedUsages & WGPUTextureUsage_StorageBinding) {
+ usage |= DXGI_USAGE_UNORDERED_ACCESS;
}
-
- void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
+ if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
+ usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
}
+ return usage;
+}
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- ASSERT(width > 0);
- ASSERT(height > 0);
- ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
-
- ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
- ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
-
- mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
-
- // Create the D3D12 swapchain, assuming only two buffers for now
- DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
- swapChainDesc.Width = width;
- swapChainDesc.Height = height;
- swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
- swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
- swapChainDesc.BufferCount = kFrameCount;
- swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
- swapChainDesc.SampleDesc.Count = 1;
- swapChainDesc.SampleDesc.Quality = 0;
-
- ComPtr<IDXGISwapChain1> swapChain1;
- ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc,
- nullptr, nullptr, &swapChain1));
-
- ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
-
- // Gather the resources that will be used to present to the swapchain
- mBuffers.resize(kFrameCount);
- for (uint32_t i = 0; i < kFrameCount; ++i) {
- ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
- }
-
- // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
- // used
- mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+static constexpr unsigned int kFrameCount = 3;
+} // anonymous namespace
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
- nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
+ : mWindow(window), mDevice(device), mInterval(1) {}
- // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
- // with the buffer. Ideally the synchronization should be all done on the GPU.
- ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
+NativeSwapChainImpl::~NativeSwapChainImpl() {}
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {}
- DawnSwapChainError NativeSwapChainImpl::Present() {
- // This assumes the texture has already been transition to the PRESENT state.
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
- ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
- // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
- ASSERT(mDevice->NextSerial().IsSuccess());
+ ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
+ ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
- mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+ mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
+ // Create the D3D12 swapchain, assuming only two buffers for now
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.Width = width;
+ swapChainDesc.Height = height;
+ swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
+ swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
+ swapChainDesc.BufferCount = kFrameCount;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.SampleDesc.Count = 1;
+ swapChainDesc.SampleDesc.Quality = 0;
+
+ ComPtr<IDXGISwapChain1> swapChain1;
+ ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc, nullptr,
+ nullptr, &swapChain1));
+
+ ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
+
+ // Gather the resources that will be used to present to the swapchain
+ mBuffers.resize(kFrameCount);
+ for (uint32_t i = 0; i < kFrameCount; ++i) {
+ ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
}
+ // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
+ // used
+ mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
+ nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
+
+ // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
+ // with the buffer. Ideally the synchronization should be all done on the GPU.
+ ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::Present() {
+ // This assumes the texture has already been transition to the PRESENT state.
+
+ ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
+ // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
+ ASSERT(mDevice->NextSerial().IsSuccess());
+
+ mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+}
+
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
index 5eee004d677..6bedd2d8d7e 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
@@ -15,45 +15,45 @@
#ifndef SRC_DAWN_NATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
+#include <vector>
+
#include "dawn/native/d3d12/d3d12_platform.h"
#include "dawn/dawn_wsi.h"
#include "dawn/native/IntegerTypes.h"
#include "dawn/native/dawn_platform.h"
-#include <vector>
-
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextD3D12;
+class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextD3D12;
- NativeSwapChainImpl(Device* device, HWND window);
- ~NativeSwapChainImpl();
+ NativeSwapChainImpl(Device* device, HWND window);
+ ~NativeSwapChainImpl();
- void Init(DawnWSIContextD3D12* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
+ void Init(DawnWSIContextD3D12* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
- wgpu::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
- private:
- HWND mWindow = nullptr;
- Device* mDevice = nullptr;
- UINT mInterval;
+ private:
+ HWND mWindow = nullptr;
+ Device* mDevice = nullptr;
+ UINT mInterval;
- ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
- std::vector<ComPtr<ID3D12Resource>> mBuffers;
- std::vector<ExecutionSerial> mBufferSerials;
- uint32_t mCurrentBuffer;
- };
+ ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
+ std::vector<ComPtr<ID3D12Resource>> mBuffers;
+ std::vector<ExecutionSerial> mBufferSerials;
+ uint32_t mCurrentBuffer;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp
index 13942092700..72f70c08cb7 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp
@@ -14,63 +14,62 @@
#include "dawn/native/d3d12/PageableD3D12.h"
+#include <utility>
+
namespace dawn::native::d3d12 {
- Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
- MemorySegment memorySegment,
- uint64_t size)
- : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
- }
+Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+ : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {}
- // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
- // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
- // ResidencyManager will attempt to use it after it has been deallocated.
- Pageable::~Pageable() {
- if (IsInResidencyLRUCache()) {
- RemoveFromList();
- }
+// When a pageable is destroyed, it no longer resides in resident memory, so we must evict
+// it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+// ResidencyManager will attempt to use it after it has been deallocated.
+Pageable::~Pageable() {
+ if (IsInResidencyLRUCache()) {
+ RemoveFromList();
}
+}
- ID3D12Pageable* Pageable::GetD3D12Pageable() const {
- return mD3d12Pageable.Get();
- }
+ID3D12Pageable* Pageable::GetD3D12Pageable() const {
+ return mD3d12Pageable.Get();
+}
- ExecutionSerial Pageable::GetLastUsage() const {
- return mLastUsage;
- }
+ExecutionSerial Pageable::GetLastUsage() const {
+ return mLastUsage;
+}
- void Pageable::SetLastUsage(ExecutionSerial serial) {
- mLastUsage = serial;
- }
+void Pageable::SetLastUsage(ExecutionSerial serial) {
+ mLastUsage = serial;
+}
- ExecutionSerial Pageable::GetLastSubmission() const {
- return mLastSubmission;
- }
+ExecutionSerial Pageable::GetLastSubmission() const {
+ return mLastSubmission;
+}
- void Pageable::SetLastSubmission(ExecutionSerial serial) {
- mLastSubmission = serial;
- }
+void Pageable::SetLastSubmission(ExecutionSerial serial) {
+ mLastSubmission = serial;
+}
- MemorySegment Pageable::GetMemorySegment() const {
- return mMemorySegment;
- }
+MemorySegment Pageable::GetMemorySegment() const {
+ return mMemorySegment;
+}
- uint64_t Pageable::GetSize() const {
- return mSize;
- }
+uint64_t Pageable::GetSize() const {
+ return mSize;
+}
- bool Pageable::IsInResidencyLRUCache() const {
- return IsInList();
- }
+bool Pageable::IsInResidencyLRUCache() const {
+ return IsInList();
+}
- void Pageable::IncrementResidencyLock() {
- mResidencyLockRefCount++;
- }
+void Pageable::IncrementResidencyLock() {
+ mResidencyLockRefCount++;
+}
- void Pageable::DecrementResidencyLock() {
- mResidencyLockRefCount--;
- }
+void Pageable::DecrementResidencyLock() {
+ mResidencyLockRefCount--;
+}
- bool Pageable::IsResidencyLocked() const {
- return mResidencyLockRefCount != 0;
- }
+bool Pageable::IsResidencyLocked() const {
+ return mResidencyLockRefCount != 0;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h
index a52a8a16167..c11aba6ff5d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h
@@ -21,60 +21,60 @@
#include "dawn/native/d3d12/d3d12_platform.h"
namespace dawn::native::d3d12 {
- // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
- // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
- // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
- // LRU cache when it is evicted from resident memory due to budget constraints, or when the
- // pageable allocation is released.
- class Pageable : public LinkNode<Pageable> {
- public:
- Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
- ~Pageable();
+// This class is used to represent ID3D12Pageable allocations, and also serves as a node within
+// the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+// allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+// LRU cache when it is evicted from resident memory due to budget constraints, or when the
+// pageable allocation is released.
+class Pageable : public LinkNode<Pageable> {
+ public:
+ Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+ ~Pageable();
- ID3D12Pageable* GetD3D12Pageable() const;
+ ID3D12Pageable* GetD3D12Pageable() const;
- // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
- // used. We must check this serial against the current serial when recording usages to
- // ensure we do not process residency for this pageable multiple times.
- ExecutionSerial GetLastUsage() const;
- void SetLastUsage(ExecutionSerial serial);
+ // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
+ // used. We must check this serial against the current serial when recording usages to
+ // ensure we do not process residency for this pageable multiple times.
+ ExecutionSerial GetLastUsage() const;
+ void SetLastUsage(ExecutionSerial serial);
- // The residency manager must know the last serial that any portion of the pageable was
- // submitted to be used so that we can ensure this pageable stays resident in memory at
- // least until that serial has completed.
- ExecutionSerial GetLastSubmission() const;
- void SetLastSubmission(ExecutionSerial serial);
+ // The residency manager must know the last serial that any portion of the pageable was
+ // submitted to be used so that we can ensure this pageable stays resident in memory at
+ // least until that serial has completed.
+ ExecutionSerial GetLastSubmission() const;
+ void SetLastSubmission(ExecutionSerial serial);
- MemorySegment GetMemorySegment() const;
+ MemorySegment GetMemorySegment() const;
- uint64_t GetSize() const;
+ uint64_t GetSize() const;
- bool IsInResidencyLRUCache() const;
+ bool IsInResidencyLRUCache() const;
- // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
- // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
- // mapped in a single heap, we must track the number of resources currently locked.
- void IncrementResidencyLock();
- void DecrementResidencyLock();
- bool IsResidencyLocked() const;
+ // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
+ // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
+ // mapped in a single heap, we must track the number of resources currently locked.
+ void IncrementResidencyLock();
+ void DecrementResidencyLock();
+ bool IsResidencyLocked() const;
- protected:
- ComPtr<ID3D12Pageable> mD3d12Pageable;
+ protected:
+ ComPtr<ID3D12Pageable> mD3d12Pageable;
- private:
- // mLastUsage denotes the last time this pageable was recorded for use.
- ExecutionSerial mLastUsage = ExecutionSerial(0);
- // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
- // although this variable often contains the same value as mLastUsage, it can differ in some
- // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
- // updated upon the call, but the backend operation is deferred until the next submission
- // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
- // accurately identify when a pageable can be evicted.
- ExecutionSerial mLastSubmission = ExecutionSerial(0);
- MemorySegment mMemorySegment;
- uint32_t mResidencyLockRefCount = 0;
- uint64_t mSize = 0;
- };
+ private:
+ // mLastUsage denotes the last time this pageable was recorded for use.
+ ExecutionSerial mLastUsage = ExecutionSerial(0);
+ // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
+ // although this variable often contains the same value as mLastUsage, it can differ in some
+ // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
+ // updated upon the call, but the backend operation is deferred until the next submission
+ // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
+ // accurately identify when a pageable can be evicted.
+ ExecutionSerial mLastSubmission = ExecutionSerial(0);
+ MemorySegment mMemorySegment;
+ uint32_t mResidencyLockRefCount = 0;
+ uint64_t mSize = 0;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_PAGEABLED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
index 794a7634a1e..636fae233d0 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
@@ -13,6 +13,8 @@
// limitations under the License.
#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+
+#include <limits>
#include <sstream>
#include "dawn/common/Assert.h"
@@ -25,353 +27,439 @@
using Microsoft::WRL::ComPtr;
namespace dawn::native::d3d12 {
- namespace {
-
- // Reserve register names for internal use. This registers map to bindings in the shader,
- // but are not directly related to allocation of the root signature.
- // In the root signature, it the index of the root parameter where these registers are
- // used that determines the layout of the root signature.
- static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
- static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
+namespace {
- static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
- static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
+// Reserve register names for internal use. This registers map to bindings in the shader,
+// but are not directly related to allocation of the root signature.
+// In the root signature, it the index of the root parameter where these registers are
+// used that determines the layout of the root signature.
+static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
+static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
- static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
- std::numeric_limits<uint32_t>::max();
+static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
+static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
- D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
- ASSERT(visibility != wgpu::ShaderStage::None);
-
- if (visibility == wgpu::ShaderStage::Vertex) {
- return D3D12_SHADER_VISIBILITY_VERTEX;
- }
+static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
+ std::numeric_limits<uint32_t>::max();
- if (visibility == wgpu::ShaderStage::Fragment) {
- return D3D12_SHADER_VISIBILITY_PIXEL;
- }
-
- // For compute or any two combination of stages, visibility must be ALL
- return D3D12_SHADER_VISIBILITY_ALL;
- }
+D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
+ ASSERT(visibility != wgpu::ShaderStage::None);
- D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
- switch (type) {
- case wgpu::BufferBindingType::Uniform:
- return D3D12_ROOT_PARAMETER_TYPE_CBV;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- return D3D12_ROOT_PARAMETER_TYPE_UAV;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return D3D12_ROOT_PARAMETER_TYPE_SRV;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- }
-
- } // anonymous namespace
-
- ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
- DAWN_TRY(layout->Initialize());
- return layout;
+ if (visibility == wgpu::ShaderStage::Vertex) {
+ return D3D12_SHADER_VISIBILITY_VERTEX;
}
- MaybeError PipelineLayout::Initialize() {
- Device* device = ToBackend(GetDevice());
- // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
- // descriptor.
- std::vector<D3D12_ROOT_PARAMETER> rootParameters;
-
- size_t rangesCount = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
- rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
- bindGroupLayout->GetSamplerDescriptorRanges().size();
- }
-
- // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
- std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
-
- uint32_t rangeIndex = 0;
-
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
-
- // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
- // bind group index Returns whether or not the parameter was set. A root parameter is
- // not set if the number of ranges is 0
- auto SetRootDescriptorTable =
- [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
- auto rangeCount = descriptorRanges.size();
- if (rangeCount == 0) {
- return false;
- }
+ if (visibility == wgpu::ShaderStage::Fragment) {
+ return D3D12_SHADER_VISIBILITY_PIXEL;
+ }
- D3D12_ROOT_PARAMETER rootParameter = {};
- rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
- rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
- rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
+ // For compute or any two combination of stages, visibility must be ALL
+ return D3D12_SHADER_VISIBILITY_ALL;
+}
+
+D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
+ switch (type) {
+ case wgpu::BufferBindingType::Uniform:
+ return D3D12_ROOT_PARAMETER_TYPE_CBV;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ return D3D12_ROOT_PARAMETER_TYPE_UAV;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ return D3D12_ROOT_PARAMETER_TYPE_SRV;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+}
+
+} // anonymous namespace
+
+ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+ DAWN_TRY(layout->Initialize());
+ return layout;
+}
+
+MaybeError PipelineLayout::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
+ // descriptor.
+ std::vector<D3D12_ROOT_PARAMETER> rootParameters;
+
+ size_t rangesCount = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+ rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
+ bindGroupLayout->GetSamplerDescriptorRanges().size();
+ }
- for (auto& range : descriptorRanges) {
- ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
- ranges[rangeIndex] = range;
- ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
- rangeIndex++;
- }
+ // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
+ std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
- rootParameters.emplace_back(rootParameter);
+ uint32_t rangeIndex = 0;
- return true;
- };
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
- if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
- mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
+ // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
+ // bind group index Returns whether or not the parameter was set. A root parameter is
+ // not set if the number of ranges is 0
+ auto SetRootDescriptorTable =
+ [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
+ auto rangeCount = descriptorRanges.size();
+ if (rangeCount == 0) {
+ return false;
}
- if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
- mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
- }
-
- // Init root descriptors in root signatures for dynamic buffer bindings.
- // These are packed at the beginning of the layout binding info.
- for (BindingIndex dynamicBindingIndex{0};
- dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
- ++dynamicBindingIndex) {
- const BindingInfo& bindingInfo =
- bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
-
- if (bindingInfo.visibility == wgpu::ShaderStage::None) {
- // Skip dynamic buffers that are not visible. D3D12 does not have None
- // visibility.
- continue;
- }
- D3D12_ROOT_PARAMETER rootParameter = {};
-
- // Setup root descriptor.
- D3D12_ROOT_DESCRIPTOR rootDescriptor;
- rootDescriptor.ShaderRegister =
- bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
- rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
-
- // Set root descriptors in root signatures.
- rootParameter.Descriptor = rootDescriptor;
- mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
+ D3D12_ROOT_PARAMETER rootParameter = {};
+ rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+ rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
+ rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
+
+ for (auto& range : descriptorRanges) {
+ ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
+ ranges[rangeIndex] = range;
+ ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
+ rangeIndex++;
+ }
- // Set parameter types according to bind group layout descriptor.
- rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
+ rootParameters.emplace_back(rootParameter);
- // Set visibilities according to bind group layout descriptor.
- rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+ return true;
+ };
- rootParameters.emplace_back(rootParameter);
- }
+ if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
+ mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
+ }
+ if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
+ mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
}
- // Make sure that we added exactly the number of elements we expected. If we added more,
- // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
- ASSERT(rangeIndex == rangesCount);
-
- D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
- renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
- // Always allocate 3 constants for either:
- // - vertex_index and instance_index
- // - num_workgroups_x, num_workgroups_y and num_workgroups_z
- // NOTE: We should consider delaying root signature creation until we know how many values
- // we need
- renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
- renderOrComputeInternalConstants.Constants.RegisterSpace =
- kRenderOrComputeInternalRegisterSpace;
- renderOrComputeInternalConstants.Constants.ShaderRegister =
- kRenderOrComputeInternalBaseRegister;
- mFirstIndexOffsetParameterIndex = rootParameters.size();
- mNumWorkgroupsParameterIndex = rootParameters.size();
- // NOTE: We should consider moving this entry to earlier in the root signature since offsets
- // would need to be updated often
- rootParameters.emplace_back(renderOrComputeInternalConstants);
-
- // Loops over all of the dynamic storage buffer bindings in the layout and build
- // a mapping from the binding to the next offset into the root constant array where
- // that dynamic storage buffer's binding size will be stored. The next register offset
- // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
- // This data will be used by shader translation to emit a load from the root constant
- // array to use as the binding's size in runtime array calculations.
- // Each bind group's length data is stored contiguously in the root constant array,
- // so the loop also computes the first register offset for each group where the
- // data should start.
- uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
-
- mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
- dynamicStorageBufferLengthsShaderRegisterOffset;
- mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
- bgl->GetBindingCountInfo().dynamicStorageBufferCount);
-
- for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
- ++bindingIndex) {
- if (bgl->IsStorageBufferBinding(bindingIndex)) {
- mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
- {bgl->GetBindingInfo(bindingIndex).binding,
- dynamicStorageBufferLengthsShaderRegisterOffset++});
- }
+ // Init root descriptors in root signatures for dynamic buffer bindings.
+ // These are packed at the beginning of the layout binding info.
+ for (BindingIndex dynamicBindingIndex{0};
+ dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
+ ++dynamicBindingIndex) {
+ const BindingInfo& bindingInfo = bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
+
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
}
- ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
- bgl->GetBindingCountInfo().dynamicStorageBufferCount);
- }
- ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
- kMaxDynamicStorageBuffersPerPipelineLayout);
-
- if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
- D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
- dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- dynamicStorageBufferLengthConstants.ParameterType =
- D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
- dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
- dynamicStorageBufferLengthsShaderRegisterOffset;
- dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
- kDynamicStorageBufferLengthsRegisterSpace;
- dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
- kDynamicStorageBufferLengthsBaseRegister;
- mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
- rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
- } else {
- mDynamicStorageBufferLengthsParameterIndex =
- kInvalidDynamicStorageBufferLengthsParameterIndex;
- }
+ D3D12_ROOT_PARAMETER rootParameter = {};
- D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
- rootSignatureDescriptor.NumParameters = rootParameters.size();
- rootSignatureDescriptor.pParameters = rootParameters.data();
- rootSignatureDescriptor.NumStaticSamplers = 0;
- rootSignatureDescriptor.pStaticSamplers = nullptr;
- rootSignatureDescriptor.Flags =
- D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
-
- ComPtr<ID3DBlob> signature;
- ComPtr<ID3DBlob> error;
- HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
- &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
- if (DAWN_UNLIKELY(FAILED(hr))) {
- std::ostringstream messageStream;
- if (error) {
- messageStream << static_cast<const char*>(error->GetBufferPointer());
-
- // |error| is observed to always end with a \n, but is not
- // specified to do so, so we add an extra newline just in case.
- messageStream << std::endl;
- }
- messageStream << "D3D12 serialize root signature";
- DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
- }
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
- 0, signature->GetBufferPointer(), signature->GetBufferSize(),
- IID_PPV_ARGS(&mRootSignature)),
- "D3D12 create root signature"));
- return {};
- }
+ // Setup root descriptor.
+ D3D12_ROOT_DESCRIPTOR rootDescriptor;
+ rootDescriptor.ShaderRegister = bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
+ rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
- uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
- ASSERT(group < kMaxBindGroupsTyped);
- return mCbvUavSrvRootParameterInfo[group];
- }
+ // Set root descriptors in root signatures.
+ rootParameter.Descriptor = rootDescriptor;
+ mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
- uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
- ASSERT(group < kMaxBindGroupsTyped);
- return mSamplerRootParameterInfo[group];
- }
+ // Set parameter types according to bind group layout descriptor.
+ rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
- ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
- return mRootSignature.Get();
- }
+ // Set visibilities according to bind group layout descriptor.
+ rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
- const PipelineLayout::DynamicStorageBufferLengthInfo&
- PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
- return mDynamicStorageBufferLengthInfo;
+ rootParameters.emplace_back(rootParameter);
+ }
}
- uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
- BindingIndex bindingIndex) const {
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
- wgpu::ShaderStage::None);
- return mDynamicRootParameterIndices[group][bindingIndex];
- }
+ // Make sure that we added exactly the number of elements we expected. If we added more,
+ // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
+ ASSERT(rangeIndex == rangesCount);
+
+ D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
+ renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ // Always allocate 3 constants for either:
+ // - vertex_index and instance_index
+ // - num_workgroups_x, num_workgroups_y and num_workgroups_z
+ // NOTE: We should consider delaying root signature creation until we know how many values
+ // we need
+ renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
+ renderOrComputeInternalConstants.Constants.RegisterSpace =
+ kRenderOrComputeInternalRegisterSpace;
+ renderOrComputeInternalConstants.Constants.ShaderRegister =
+ kRenderOrComputeInternalBaseRegister;
+ mFirstIndexOffsetParameterIndex = rootParameters.size();
+ mNumWorkgroupsParameterIndex = rootParameters.size();
+ // NOTE: We should consider moving this entry to earlier in the root signature since offsets
+ // would need to be updated often
+ rootParameters.emplace_back(renderOrComputeInternalConstants);
+
+ // Loops over all of the dynamic storage buffer bindings in the layout and build
+ // a mapping from the binding to the next offset into the root constant array where
+ // that dynamic storage buffer's binding size will be stored. The next register offset
+ // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
+ // This data will be used by shader translation to emit a load from the root constant
+ // array to use as the binding's size in runtime array calculations.
+ // Each bind group's length data is stored contiguously in the root constant array,
+ // so the loop also computes the first register offset for each group where the
+ // data should start.
+ uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+
+ mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
+ dynamicStorageBufferLengthsShaderRegisterOffset;
+ mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
+ bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+
+ for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+ ++bindingIndex) {
+ if (bgl->IsStorageBufferBinding(bindingIndex)) {
+ mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
+ {bgl->GetBindingInfo(bindingIndex).binding,
+ dynamicStorageBufferLengthsShaderRegisterOffset++});
+ }
+ }
- uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
- return kRenderOrComputeInternalRegisterSpace;
+ ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
+ bgl->GetBindingCountInfo().dynamicStorageBufferCount);
}
-
- uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
- return kRenderOrComputeInternalBaseRegister;
+ ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
+ kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
+ D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
+ dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ dynamicStorageBufferLengthConstants.ParameterType =
+ D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
+ dynamicStorageBufferLengthsShaderRegisterOffset;
+ dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
+ kDynamicStorageBufferLengthsRegisterSpace;
+ dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
+ kDynamicStorageBufferLengthsBaseRegister;
+ mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
+ rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
+ } else {
+ mDynamicStorageBufferLengthsParameterIndex =
+ kInvalidDynamicStorageBufferLengthsParameterIndex;
}
- uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
- return mFirstIndexOffsetParameterIndex;
+ D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
+ rootSignatureDescriptor.NumParameters = rootParameters.size();
+ rootSignatureDescriptor.pParameters = rootParameters.data();
+ rootSignatureDescriptor.NumStaticSamplers = 0;
+ rootSignatureDescriptor.pStaticSamplers = nullptr;
+ rootSignatureDescriptor.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
+
+ ComPtr<ID3DBlob> error;
+ HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
+ &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &mRootSignatureBlob, &error);
+ if (DAWN_UNLIKELY(FAILED(hr))) {
+ std::ostringstream messageStream;
+ if (error) {
+ messageStream << static_cast<const char*>(error->GetBufferPointer());
+
+ // |error| is observed to always end with a \n, but is not
+ // specified to do so, so we add an extra newline just in case.
+ messageStream << std::endl;
+ }
+ messageStream << "D3D12 serialize root signature";
+ DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
}
-
- uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
- return kRenderOrComputeInternalRegisterSpace;
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
+ 0, mRootSignatureBlob->GetBufferPointer(),
+ mRootSignatureBlob->GetBufferSize(), IID_PPV_ARGS(&mRootSignature)),
+ "D3D12 create root signature"));
+ mCacheKey.Record(mRootSignatureBlob.Get());
+ return {};
+}
+
+void PipelineLayout::DestroyImpl() {
+ PipelineLayoutBase::DestroyImpl();
+
+ Device* device = ToBackend(GetDevice());
+ device->ReferenceUntilUnused(mRootSignature);
+
+ // The ID3D12CommandSignature object should not be referenced by GPU operations in-flight on
+ // Command Queue when it is being deleted. According to D3D12 debug layer, "it is not safe to
+ // final-release objects that may have GPU operations pending. This can result in application
+ // instability (921)".
+ if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get()) {
+ device->ReferenceUntilUnused(mDispatchIndirectCommandSignatureWithNumWorkgroups);
}
-
- uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
- return kRenderOrComputeInternalBaseRegister;
+ if (mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get()) {
+ device->ReferenceUntilUnused(mDrawIndirectCommandSignatureWithInstanceVertexOffsets);
}
-
- uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
- return mNumWorkgroupsParameterIndex;
+ if (mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get()) {
+ device->ReferenceUntilUnused(mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets);
}
-
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
- return kDynamicStorageBufferLengthsRegisterSpace;
+}
+
+uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ return mCbvUavSrvRootParameterInfo[group];
+}
+
+uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ return mSamplerRootParameterInfo[group];
+}
+
+ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
+ return mRootSignature.Get();
+}
+
+ID3DBlob* PipelineLayout::GetRootSignatureBlob() const {
+ return mRootSignatureBlob.Get();
+}
+
+const PipelineLayout::DynamicStorageBufferLengthInfo&
+PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
+ return mDynamicStorageBufferLengthInfo;
+}
+
+uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
+ BindingIndex bindingIndex) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
+ wgpu::ShaderStage::None);
+ return mDynamicRootParameterIndices[group][bindingIndex];
+}
+
+uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
+ return kRenderOrComputeInternalRegisterSpace;
+}
+
+uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
+ return kRenderOrComputeInternalBaseRegister;
+}
+
+uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
+ return mFirstIndexOffsetParameterIndex;
+}
+
+uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
+ return kRenderOrComputeInternalRegisterSpace;
+}
+
+uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
+ return kRenderOrComputeInternalBaseRegister;
+}
+
+uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
+ return mNumWorkgroupsParameterIndex;
+}
+
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
+ return kDynamicStorageBufferLengthsRegisterSpace;
+}
+
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
+ return kDynamicStorageBufferLengthsBaseRegister;
+}
+
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
+ ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
+ kInvalidDynamicStorageBufferLengthsParameterIndex);
+ return mDynamicStorageBufferLengthsParameterIndex;
+}
+
+ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
+ // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
+ if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
+ return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
}
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
- return kDynamicStorageBufferLengthsBaseRegister;
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+ argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+ argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
+ argumentDescs[0].Constant.Num32BitValuesToSet = 3;
+ argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+ // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+ // command. That command must come last.
+ argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 6 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 2;
+ programDesc.pArgumentDescs = argumentDescs;
+
+ // The root signature must be specified if and only if the command signature changes one of
+ // the root arguments.
+ ToBackend(GetDevice())
+ ->GetD3D12Device()
+ ->CreateCommandSignature(&programDesc, GetRootSignature(),
+ IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
+ return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+}
+
+ID3D12CommandSignature* PipelineLayout::GetDrawIndirectCommandSignatureWithInstanceVertexOffsets() {
+ // mDrawIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it is
+ // needed.
+ if (mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
+ return mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get();
}
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
- ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
- kInvalidDynamicStorageBufferLengthsParameterIndex);
- return mDynamicStorageBufferLengthsParameterIndex;
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+ argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+ argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
+ argumentDescs[0].Constant.Num32BitValuesToSet = 2;
+ argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+ // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+ // command. That command must come last.
+ argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 6 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 2;
+ programDesc.pArgumentDescs = argumentDescs;
+
+ // The root signature must be specified if and only if the command signature changes one of
+ // the root arguments.
+ ToBackend(GetDevice())
+ ->GetD3D12Device()
+ ->CreateCommandSignature(
+ &programDesc, GetRootSignature(),
+ IID_PPV_ARGS(&mDrawIndirectCommandSignatureWithInstanceVertexOffsets));
+ return mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get();
+}
+
+ID3D12CommandSignature*
+PipelineLayout::GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets() {
+ // mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it
+ // is needed.
+ if (mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
+ return mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get();
}
- ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
- // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
- if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
- return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
- }
-
- D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
- argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
- argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
- argumentDescs[0].Constant.Num32BitValuesToSet = 3;
- argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
-
- // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
- // command. That command must come last.
- argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
-
- D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
- programDesc.ByteStride = 6 * sizeof(uint32_t);
- programDesc.NumArgumentDescs = 2;
- programDesc.pArgumentDescs = argumentDescs;
-
- // The root signature must be specified if and only if the command signature changes one of
- // the root arguments.
- ToBackend(GetDevice())
- ->GetD3D12Device()
- ->CreateCommandSignature(
- &programDesc, GetRootSignature(),
- IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
- return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
- }
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+ argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+ argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
+ argumentDescs[0].Constant.Num32BitValuesToSet = 2;
+ argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+ // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+ // command. That command must come last.
+ argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 7 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 2;
+ programDesc.pArgumentDescs = argumentDescs;
+
+ // The root signature must be specified if and only if the command signature changes one of
+ // the root arguments.
+ ToBackend(GetDevice())
+ ->GetD3D12Device()
+ ->CreateCommandSignature(
+ &programDesc, GetRootSignature(),
+ IID_PPV_ARGS(&mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets));
+ return mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h
index a1db136fb79..204741265c5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_PIPELINELAYOUTD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#include <vector>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/native/BindingInfo.h"
@@ -23,78 +25,88 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static ResultOrError<Ref<PipelineLayout>> Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static ResultOrError<Ref<PipelineLayout>> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
- uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
- uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
+ uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
+ uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
- // Returns the index of the root parameter reserved for a dynamic buffer binding
- uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
- BindingIndex bindingIndex) const;
+ // Returns the index of the root parameter reserved for a dynamic buffer binding
+ uint32_t GetDynamicRootParameterIndex(BindGroupIndex group, BindingIndex bindingIndex) const;
- uint32_t GetFirstIndexOffsetRegisterSpace() const;
- uint32_t GetFirstIndexOffsetShaderRegister() const;
- uint32_t GetFirstIndexOffsetParameterIndex() const;
+ uint32_t GetFirstIndexOffsetRegisterSpace() const;
+ uint32_t GetFirstIndexOffsetShaderRegister() const;
+ uint32_t GetFirstIndexOffsetParameterIndex() const;
- uint32_t GetNumWorkgroupsRegisterSpace() const;
- uint32_t GetNumWorkgroupsShaderRegister() const;
- uint32_t GetNumWorkgroupsParameterIndex() const;
+ uint32_t GetNumWorkgroupsRegisterSpace() const;
+ uint32_t GetNumWorkgroupsShaderRegister() const;
+ uint32_t GetNumWorkgroupsParameterIndex() const;
- uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
- uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
- uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
+ uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
+ uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
+ uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
- ID3D12RootSignature* GetRootSignature() const;
+ ID3D12RootSignature* GetRootSignature() const;
- ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+ ID3DBlob* GetRootSignatureBlob() const;
- struct PerBindGroupDynamicStorageBufferLengthInfo {
- // First register offset for a bind group's dynamic storage buffer lengths.
- // This is the index into the array of root constants where this bind group's
- // lengths start.
- uint32_t firstRegisterOffset;
+ ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
- struct BindingAndRegisterOffset {
- BindingNumber binding;
- uint32_t registerOffset;
- };
- // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
- // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
- // into the root constant array which holds the dynamic storage buffer lengths.
- std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
- };
+ ID3D12CommandSignature* GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
+
+ ID3D12CommandSignature* GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
- // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
- // Each pair is used in shader translation to
- using DynamicStorageBufferLengthInfo =
- ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
-
- const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
-
- private:
- ~PipelineLayout() override = default;
- using PipelineLayoutBase::PipelineLayoutBase;
- MaybeError Initialize();
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
- ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
- kMaxBindGroups>
- mDynamicRootParameterIndices;
- DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
- uint32_t mFirstIndexOffsetParameterIndex;
- uint32_t mNumWorkgroupsParameterIndex;
- uint32_t mDynamicStorageBufferLengthsParameterIndex;
- ComPtr<ID3D12RootSignature> mRootSignature;
- ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
+ struct PerBindGroupDynamicStorageBufferLengthInfo {
+ // First register offset for a bind group's dynamic storage buffer lengths.
+ // This is the index into the array of root constants where this bind group's
+ // lengths start.
+ uint32_t firstRegisterOffset;
+
+ struct BindingAndRegisterOffset {
+ BindingNumber binding;
+ uint32_t registerOffset;
+ };
+ // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
+ // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
+ // into the root constant array which holds the dynamic storage buffer lengths.
+ std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
};
+ // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
+ // Each pair is used in shader translation to
+ using DynamicStorageBufferLengthInfo =
+ ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
+
+ const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
+
+ private:
+ ~PipelineLayout() override = default;
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
+ void DestroyImpl() override;
+
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
+ kMaxBindGroups>
+ mDynamicRootParameterIndices;
+ DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
+ uint32_t mFirstIndexOffsetParameterIndex;
+ uint32_t mNumWorkgroupsParameterIndex;
+ uint32_t mDynamicStorageBufferLengthsParameterIndex;
+ ComPtr<ID3D12RootSignature> mRootSignature;
+ // Store the root signature blob to put in pipeline cachekey
+ ComPtr<ID3DBlob> mRootSignatureBlob;
+ ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
+ ComPtr<ID3D12CommandSignature> mDrawIndirectCommandSignatureWithInstanceVertexOffsets;
+ ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets;
+};
+
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_PIPELINELAYOUTD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp
index 786ae5a33b1..70b66d8efb4 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp
@@ -14,258 +14,252 @@
#include "dawn/native/d3d12/PlatformFunctions.h"
-#include "dawn/common/DynamicLib.h"
-
#include <comdef.h>
+
+#include <algorithm>
#include <array>
#include <sstream>
+#include <utility>
+
+#include "dawn/common/DynamicLib.h"
namespace dawn::native::d3d12 {
- namespace {
- // Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
- uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
- constexpr char kPrefix[] = "10.0.";
- constexpr char kPostfix[] = ".0";
-
- constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
- constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
- const uint32_t directoryNameLen = strlen(directoryName);
-
- if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
- return 0;
- }
-
- // Check if directoryName starts with "10.0.".
- if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
- return 0;
- }
-
- // Check if directoryName ends with ".0".
- if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) !=
- 0) {
- return 0;
- }
-
- // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
- return atoi(directoryName + kPrefixLen);
- }
+namespace {
+// Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
+uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
+ constexpr char kPrefix[] = "10.0.";
+ constexpr char kPostfix[] = ".0";
+
+ constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
+ constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
+ const uint32_t directoryNameLen = strlen(directoryName);
+
+ if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
+ return 0;
+ }
+
+ // Check if directoryName starts with "10.0.".
+ if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
+ return 0;
+ }
+
+ // Check if directoryName ends with ".0".
+ if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) != 0) {
+ return 0;
+ }
+
+ // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
+ return atoi(directoryName + kPrefixLen);
+}
- class ScopedFileHandle final {
- public:
- explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {
- }
- ~ScopedFileHandle() {
- if (mHandle != INVALID_HANDLE_VALUE) {
- ASSERT(FindClose(mHandle));
- }
- }
- HANDLE GetHandle() const {
- return mHandle;
- }
-
- private:
- HANDLE mHandle;
- };
-
- std::string GetWindowsSDKBasePath() {
- const char* kDefaultWindowsSDKPath =
- "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
- WIN32_FIND_DATAA fileData;
- ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
- if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
- return "";
- }
-
- uint32_t highestWindowsSDKVersion = 0;
- do {
- if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
- continue;
- }
-
- highestWindowsSDKVersion =
- std::max(highestWindowsSDKVersion,
- GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
- } while (FindNextFileA(handle.GetHandle(), &fileData));
-
- if (highestWindowsSDKVersion == 0) {
- return "";
- }
-
- // Currently we only support using DXC on x64.
- std::ostringstream ostream;
- ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0."
- << highestWindowsSDKVersion << ".0\\x64\\";
-
- return ostream.str();
+class ScopedFileHandle final {
+ public:
+ explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {}
+ ~ScopedFileHandle() {
+ if (mHandle != INVALID_HANDLE_VALUE) {
+ ASSERT(FindClose(mHandle));
}
- } // anonymous namespace
-
- PlatformFunctions::PlatformFunctions() = default;
- PlatformFunctions::~PlatformFunctions() = default;
-
- MaybeError PlatformFunctions::LoadFunctions() {
- DAWN_TRY(LoadD3D12());
- DAWN_TRY(LoadDXGI());
- LoadDXCLibraries();
- DAWN_TRY(LoadFXCompiler());
- DAWN_TRY(LoadD3D11());
- LoadPIXRuntime();
- return {};
+ }
+ HANDLE GetHandle() const { return mHandle; }
+
+ private:
+ HANDLE mHandle;
+};
+
+std::string GetWindowsSDKBasePath() {
+ const char* kDefaultWindowsSDKPath = "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
+ WIN32_FIND_DATAA fileData;
+ ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
+ if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
+ return "";
}
- MaybeError PlatformFunctions::LoadD3D12() {
-#if DAWN_PLATFORM_WINUWP
- d3d12CreateDevice = &D3D12CreateDevice;
- d3d12GetDebugInterface = &D3D12GetDebugInterface;
- d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
- d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
- d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
- d3d12CreateVersionedRootSignatureDeserializer =
- &D3D12CreateVersionedRootSignatureDeserializer;
-#else
- std::string error;
- if (!mD3D12Lib.Open("d3d12.dll", &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
- !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
- !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature",
- &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
- "D3D12CreateRootSignatureDeserializer", &error) ||
- !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
- "D3D12SerializeVersionedRootSignature", &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
- "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
+ uint32_t highestWindowsSDKVersion = 0;
+ do {
+ if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ continue;
}
-#endif
- return {};
+ highestWindowsSDKVersion = std::max(
+ highestWindowsSDKVersion, GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
+ } while (FindNextFileA(handle.GetHandle(), &fileData));
+
+ if (highestWindowsSDKVersion == 0) {
+ return "";
}
- MaybeError PlatformFunctions::LoadD3D11() {
-#if DAWN_PLATFORM_WINUWP
- d3d11on12CreateDevice = &D3D11On12CreateDevice;
+ // Currently we only support using DXC on x64.
+ std::ostringstream ostream;
+ ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0." << highestWindowsSDKVersion
+ << ".0\\x64\\";
+
+ return ostream.str();
+}
+} // anonymous namespace
+
+PlatformFunctions::PlatformFunctions() = default;
+PlatformFunctions::~PlatformFunctions() = default;
+
+MaybeError PlatformFunctions::LoadFunctions() {
+ DAWN_TRY(LoadD3D12());
+ DAWN_TRY(LoadDXGI());
+ LoadDXCLibraries();
+ DAWN_TRY(LoadFXCompiler());
+ DAWN_TRY(LoadD3D11());
+ LoadPIXRuntime();
+ return {};
+}
+
+MaybeError PlatformFunctions::LoadD3D12() {
+#if DAWN_PLATFORM_IS(WINUWP)
+ d3d12CreateDevice = &D3D12CreateDevice;
+ d3d12GetDebugInterface = &D3D12GetDebugInterface;
+ d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
+ d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
+ d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
+ d3d12CreateVersionedRootSignatureDeserializer = &D3D12CreateVersionedRootSignatureDeserializer;
#else
- std::string error;
- if (!mD3D11Lib.Open("d3d11.dll", &error) ||
- !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
+ std::string error;
+ if (!mD3D12Lib.Open("d3d12.dll", &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
+ !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
+ !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature", &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
+ "D3D12CreateRootSignatureDeserializer", &error) ||
+ !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
+ "D3D12SerializeVersionedRootSignature", &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
+ "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
#endif
- return {};
- }
+ return {};
+}
- MaybeError PlatformFunctions::LoadDXGI() {
-#if DAWN_PLATFORM_WINUWP
-# if defined(_DEBUG)
- // DXGIGetDebugInterface1 is tagged as a development-only capability
- // which implies that linking to this function will cause
- // the application to fail Windows store certification
- // But we need it when debuging using VS Graphics Diagnostics or PIX
- // So we only link to it in debug build
- dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
-# endif
- createDxgiFactory2 = &CreateDXGIFactory2;
+MaybeError PlatformFunctions::LoadD3D11() {
+#if DAWN_PLATFORM_IS(WINUWP)
+ d3d11on12CreateDevice = &D3D11On12CreateDevice;
#else
- std::string error;
- if (!mDXGILib.Open("dxgi.dll", &error) ||
- !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
- !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
+ std::string error;
+ if (!mD3D11Lib.Open("d3d11.dll", &error) ||
+ !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
#endif
- return {};
+ return {};
+}
+
+MaybeError PlatformFunctions::LoadDXGI() {
+#if DAWN_PLATFORM_IS(WINUWP)
+#if defined(_DEBUG)
+ // DXGIGetDebugInterface1 is tagged as a development-only capability
+ // which implies that linking to this function will cause
+ // the application to fail Windows store certification
+ // But we need it when debuging using VS Graphics Diagnostics or PIX
+ // So we only link to it in debug build
+ dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
+#endif
+ createDxgiFactory2 = &CreateDXGIFactory2;
+#else
+ std::string error;
+ if (!mDXGILib.Open("dxgi.dll", &error) ||
+ !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
+ !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
+#endif
- void PlatformFunctions::LoadDXCLibraries() {
- // TODO(dawn:766)
- // Statically linked with dxcompiler.lib in UWP
- // currently linked with dxcompiler.lib making CoreApp unable to activate
- // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
- // successfully executed.
+ return {};
+}
- const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
+void PlatformFunctions::LoadDXCLibraries() {
+ // TODO(dawn:766)
+ // Statically linked with dxcompiler.lib in UWP
+ // currently linked with dxcompiler.lib making CoreApp unable to activate
+ // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
+ // successfully executed.
- LoadDXIL(windowsSDKBasePath);
- LoadDXCompiler(windowsSDKBasePath);
- }
+ const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
- void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
- const char* dxilDLLName = "dxil.dll";
- const std::array<std::string, 2> kDxilDLLPaths = {
- {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
+ LoadDXIL(windowsSDKBasePath);
+ LoadDXCompiler(windowsSDKBasePath);
+}
- for (const std::string& dxilDLLPath : kDxilDLLPaths) {
- if (mDXILLib.Open(dxilDLLPath, nullptr)) {
- return;
- }
- }
- ASSERT(!mDXILLib.Valid());
- }
+void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
+ const char* dxilDLLName = "dxil.dll";
+ const std::array<std::string, 2> kDxilDLLPaths = {
+ {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
- void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
- // DXIL must be loaded before DXC, otherwise shader signing is unavailable
- if (!mDXILLib.Valid()) {
+ for (const std::string& dxilDLLPath : kDxilDLLPaths) {
+ if (mDXILLib.Open(dxilDLLPath, nullptr)) {
return;
}
+ }
+ ASSERT(!mDXILLib.Valid());
+}
- const char* dxCompilerDLLName = "dxcompiler.dll";
- const std::array<std::string, 2> kDxCompilerDLLPaths = {
- {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
-
- DynamicLib dxCompilerLib;
- for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
- if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
- break;
- }
- }
-
- if (dxCompilerLib.Valid() &&
- dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
- mDXCompilerLib = std::move(dxCompilerLib);
- } else {
- mDXILLib.Close();
- }
+void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
+ // DXIL must be loaded before DXC, otherwise shader signing is unavailable
+ if (!mDXILLib.Valid()) {
+ return;
}
- MaybeError PlatformFunctions::LoadFXCompiler() {
-#if DAWN_PLATFORM_WINUWP
- d3dCompile = &D3DCompile;
- d3dDisassemble = &D3DDisassemble;
-#else
- std::string error;
- if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
- !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
- !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
+ const char* dxCompilerDLLName = "dxcompiler.dll";
+ const std::array<std::string, 2> kDxCompilerDLLPaths = {
+ {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
+
+ DynamicLib dxCompilerLib;
+ for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
+ if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
+ break;
}
-#endif
- return {};
}
- bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
- return mPIXEventRuntimeLib.Valid();
+ if (dxCompilerLib.Valid() &&
+ dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
+ mDXCompilerLib = std::move(dxCompilerLib);
+ } else {
+ mDXILLib.Close();
}
+}
- bool PlatformFunctions::IsDXCAvailable() const {
- return mDXILLib.Valid() && mDXCompilerLib.Valid();
+MaybeError PlatformFunctions::LoadFXCompiler() {
+#if DAWN_PLATFORM_IS(WINUWP)
+ d3dCompile = &D3DCompile;
+ d3dDisassemble = &D3DDisassemble;
+#else
+ std::string error;
+ if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
+ !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
+ !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
-
- void PlatformFunctions::LoadPIXRuntime() {
- // TODO(dawn:766):
- // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
- // So maybe we should put WinPixEventRuntime as a third party package
- // Currently PIX is not going to be loaded in UWP since the following
- // mPIXEventRuntimeLib.Open will fail.
- if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
- !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
- "PIXBeginEventOnCommandList") ||
- !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
- !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
- mPIXEventRuntimeLib.Close();
- }
+#endif
+ return {};
+}
+
+bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
+ return mPIXEventRuntimeLib.Valid();
+}
+
+bool PlatformFunctions::IsDXCAvailable() const {
+ return mDXILLib.Valid() && mDXCompilerLib.Valid();
+}
+
+void PlatformFunctions::LoadPIXRuntime() {
+ // TODO(dawn:766):
+ // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
+ // So maybe we should put WinPixEventRuntime as a third party package
+ // Currently PIX is not going to be loaded in UWP since the following
+ // mPIXEventRuntimeLib.Open will fail.
+ if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
+ !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList, "PIXBeginEventOnCommandList") ||
+ !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
+ !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
+ mPIXEventRuntimeLib.Close();
}
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h
index a8218f7ab14..6d2e2225a91 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h
@@ -15,95 +15,98 @@
#ifndef SRC_DAWN_NATIVE_D3D12_PLATFORMFUNCTIONS_H_
#define SRC_DAWN_NATIVE_D3D12_PLATFORMFUNCTIONS_H_
+#include <d3dcompiler.h>
+
+#include <string>
+
#include "dawn/native/d3d12/d3d12_platform.h"
#include "dawn/common/DynamicLib.h"
#include "dawn/native/Error.h"
-#include <d3dcompiler.h>
-
namespace dawn::native::d3d12 {
- // Loads the functions required from the platform dynamically so that we don't need to rely on
- // them being present in the system. For example linking against d3d12.lib would prevent
- // dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
- class PlatformFunctions {
- public:
- PlatformFunctions();
- ~PlatformFunctions();
-
- MaybeError LoadFunctions();
- bool IsPIXEventRuntimeLoaded() const;
- bool IsDXCAvailable() const;
-
- // Functions from d3d12.dll
- PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
- PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
-
- PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
- PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
- PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
- PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
- d3d12CreateVersionedRootSignatureDeserializer = nullptr;
-
- // Functions from dxgi.dll
- using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
- REFIID riid,
- _COM_Outptr_ void** pDebug);
- PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
-
- using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
- REFIID riid,
- _COM_Outptr_ void** ppFactory);
- PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
-
- // Functions from dxcompiler.dll
- using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
- REFIID riid,
- _COM_Outptr_ void** ppCompiler);
- PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
-
- // Functions from d3d3compiler.dll
- pD3DCompile d3dCompile = nullptr;
- pD3DDisassemble d3dDisassemble = nullptr;
-
- // Functions from WinPixEventRuntime.dll
- using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
- HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
-
- PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
-
- using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
- WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
-
- PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
-
- using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(
- WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
-
- PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
-
- // Functions from D3D11.dll
- PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
-
- private:
- MaybeError LoadD3D12();
- MaybeError LoadD3D11();
- MaybeError LoadDXGI();
- void LoadDXCLibraries();
- void LoadDXIL(const std::string& baseWindowsSDKPath);
- void LoadDXCompiler(const std::string& baseWindowsSDKPath);
- MaybeError LoadFXCompiler();
- void LoadPIXRuntime();
-
- DynamicLib mD3D12Lib;
- DynamicLib mD3D11Lib;
- DynamicLib mDXGILib;
- DynamicLib mDXILLib;
- DynamicLib mDXCompilerLib;
- DynamicLib mFXCompilerLib;
- DynamicLib mPIXEventRuntimeLib;
- };
+// Loads the functions required from the platform dynamically so that we don't need to rely on
+// them being present in the system. For example linking against d3d12.lib would prevent
+// dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
+class PlatformFunctions {
+ public:
+ PlatformFunctions();
+ ~PlatformFunctions();
+
+ MaybeError LoadFunctions();
+ bool IsPIXEventRuntimeLoaded() const;
+ bool IsDXCAvailable() const;
+
+ // Functions from d3d12.dll
+ PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
+ PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
+
+ PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
+ PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
+ PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
+ PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
+ d3d12CreateVersionedRootSignatureDeserializer = nullptr;
+
+ // Functions from dxgi.dll
+ using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
+ REFIID riid,
+ _COM_Outptr_ void** pDebug);
+ PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
+
+ using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
+ REFIID riid,
+ _COM_Outptr_ void** ppFactory);
+ PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
+
+ // Functions from dxcompiler.dll
+ using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
+ REFIID riid,
+ _COM_Outptr_ void** ppCompiler);
+ PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
+
+ // Functions from d3d3compiler.dll
+ pD3DCompile d3dCompile = nullptr;
+ pD3DDisassemble d3dDisassemble = nullptr;
+
+ // Functions from WinPixEventRuntime.dll
+ using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
+ HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
+
+ PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
+
+ using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
+ WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+
+ PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
+
+ using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList,
+ UINT64 color,
+ _In_ PCSTR formatString);
+
+ PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
+
+ // Functions from D3D11.dll
+ PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
+
+ private:
+ MaybeError LoadD3D12();
+ MaybeError LoadD3D11();
+ MaybeError LoadDXGI();
+ void LoadDXCLibraries();
+ void LoadDXIL(const std::string& baseWindowsSDKPath);
+ void LoadDXCompiler(const std::string& baseWindowsSDKPath);
+ MaybeError LoadFXCompiler();
+ void LoadPIXRuntime();
+
+ DynamicLib mD3D12Lib;
+ DynamicLib mD3D11Lib;
+ DynamicLib mDXGILib;
+ DynamicLib mDXILLib;
+ DynamicLib mDXCompilerLib;
+ DynamicLib mFXCompilerLib;
+ DynamicLib mPIXEventRuntimeLib;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp
index 458c23df184..2f8ea05c114 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp
@@ -14,62 +14,64 @@
#include "dawn/native/d3d12/QuerySetD3D12.h"
+#include <algorithm>
+
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/UtilsD3D12.h"
namespace dawn::native::d3d12 {
- namespace {
- D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
- }
- }
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(querySet->Initialize());
- return querySet;
+namespace {
+D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
}
+}
+} // anonymous namespace
- MaybeError QuerySet::Initialize() {
- D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
- queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
- queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(querySet->Initialize());
+ return querySet;
+}
- ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
- "ID3D12Device::CreateQueryHeap"));
+MaybeError QuerySet::Initialize() {
+ D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
+ queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
+ queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
- SetLabelImpl();
+ ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
+ "ID3D12Device::CreateQueryHeap"));
- return {};
- }
+ SetLabelImpl();
- ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
- return mQueryHeap.Get();
- }
+ return {};
+}
- QuerySet::~QuerySet() = default;
+ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
+ return mQueryHeap.Get();
+}
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
- mQueryHeap = nullptr;
- }
+QuerySet::~QuerySet() = default;
- void QuerySet::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
- }
+void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
+ mQueryHeap = nullptr;
+}
+
+void QuerySet::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h
index e67b12f9ebc..03805439bf4 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h
@@ -20,26 +20,26 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class QuerySet : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
+class QuerySet : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
- ID3D12QueryHeap* GetQueryHeap() const;
+ ID3D12QueryHeap* GetQueryHeap() const;
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
- // Dawn API
- void DestroyImpl() override;
- void SetLabelImpl() override;
+ // Dawn API
+ void DestroyImpl() override;
+ void SetLabelImpl() override;
- ComPtr<ID3D12QueryHeap> mQueryHeap;
- };
+ ComPtr<ID3D12QueryHeap> mQueryHeap;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp
index f6e6c2ea5b8..e3539a7888c 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp
@@ -27,48 +27,46 @@
namespace dawn::native::d3d12 {
- // static
- Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
- Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
- queue->Initialize();
- return queue;
- }
+// static
+Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
+ Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
+ queue->Initialize();
+ return queue;
+}
- Queue::Queue(Device* device, const QueueDescriptor* descriptor)
- : QueueBase(device, descriptor) {
- }
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
- void Queue::Initialize() {
- SetLabelImpl();
- }
+void Queue::Initialize() {
+ SetLabelImpl();
+}
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->Tick());
+ DAWN_TRY(device->Tick());
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferD3D12::RecordCommands");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferD3D12::RecordCommands");
+ TRACE_EVENT_BEGIN1(GetDevice()->GetPlatform(), Recording, "CommandBufferD3D12::RecordCommands",
+ "serial", uint64_t(GetDevice()->GetPendingCommandSerial()));
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
+ }
+ TRACE_EVENT_END1(GetDevice()->GetPlatform(), Recording, "CommandBufferD3D12::RecordCommands",
+ "serial", uint64_t(GetDevice()->GetPendingCommandSerial()));
- DAWN_TRY(device->ExecutePendingCommandContext());
+ DAWN_TRY(device->ExecutePendingCommandContext());
- DAWN_TRY(device->NextSerial());
- return {};
- }
+ DAWN_TRY(device->NextSerial());
+ return {};
+}
- void Queue::SetLabelImpl() {
- Device* device = ToBackend(GetDevice());
- // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
- // so it doesn't always change the default queue's label.
- SetDebugName(device, device->GetCommandQueue().Get(), "Dawn_Queue", GetLabel());
- }
+void Queue::SetLabelImpl() {
+ Device* device = ToBackend(GetDevice());
+ // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
+ // so it doesn't always change the default queue's label.
+ SetDebugName(device, device->GetCommandQueue().Get(), "Dawn_Queue", GetLabel());
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h
index 9d35f1cbf9e..a0f8b46b253 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h
@@ -22,22 +22,22 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class Queue final : public QueueBase {
- public:
- static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+ public:
+ static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
- private:
- Queue(Device* device, const QueueDescriptor* descriptor);
+ private:
+ Queue(Device* device, const QueueDescriptor* descriptor);
- void Initialize();
+ void Initialize();
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- // Dawn API
- void SetLabelImpl() override;
- };
+ // Dawn API
+ void SetLabelImpl() override;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
index fc4133108b7..b2f9c95423e 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
@@ -14,237 +14,226 @@
#include "dawn/native/d3d12/RenderPassBuilderD3D12.h"
+#include <algorithm>
+
#include "dawn/native/Format.h"
#include "dawn/native/d3d12/CommandBufferD3D12.h"
#include "dawn/native/d3d12/Forward.h"
#include "dawn/native/d3d12/TextureD3D12.h"
-
#include "dawn/native/dawn_platform.h"
namespace dawn::native::d3d12 {
- namespace {
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
- switch (loadOp) {
- case wgpu::LoadOp::Clear:
- return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
- case wgpu::LoadOp::Load:
- return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
- case wgpu::LoadOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
- switch (storeOp) {
- case wgpu::StoreOp::Discard:
- return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
- case wgpu::StoreOp::Store:
- return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination) {
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
-
- resolveParameters.Format = resolveDestination->GetD3D12Format();
- resolveParameters.pSrcResource =
- ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
- resolveParameters.pDstResource =
- ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
-
- // Clear or preserve the resolve source.
- if (storeOp == wgpu::StoreOp::Discard) {
- resolveParameters.PreserveResolveSource = false;
- } else if (storeOp == wgpu::StoreOp::Store) {
- resolveParameters.PreserveResolveSource = true;
- }
-
- // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
- // TODO: Investigate and determine how integer format resolves should work in WebGPU.
- switch (resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Sint:
- case wgpu::TextureComponentType::Uint:
- resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_MAX;
- break;
- case wgpu::TextureComponentType::Float:
- resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
- break;
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
-
- resolveParameters.SubresourceCount = 1;
-
- return resolveParameters;
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
- D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
- Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
- ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
-
- subresourceParameters.DstX = 0;
- subresourceParameters.DstY = 0;
- subresourceParameters.SrcSubresource = 0;
- subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
- resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
- Aspect::Color);
- // Resolving a specified sub-rect is only valid on hardware that supports sample
- // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
- // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
- // "empty" to resolve the entire region.
- subresourceParameters.SrcRect = {0, 0, 0, 0};
-
- return subresourceParameters;
- }
- } // anonymous namespace
-
- RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
- if (hasUAV) {
- mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
- }
- }
-
- void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
- bool isNullRTV) {
- mRenderTargetViews[attachmentIndex] = baseDescriptor;
- mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
- if (!isNullRTV) {
- mHighestColorAttachmentIndexPlusOne =
- std::max(mHighestColorAttachmentIndexPlusOne,
- ColorAttachmentIndex{
- static_cast<uint8_t>(static_cast<uint8_t>(attachmentIndex) + 1u)});
- }
- }
-
- void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
- mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
- }
-
- ColorAttachmentIndex RenderPassBuilder::GetHighestColorAttachmentIndexPlusOne() const {
- return mHighestColorAttachmentIndexPlusOne;
- }
-
- bool RenderPassBuilder::HasDepthOrStencil() const {
- return mHasDepthOrStencil;
- }
-
- ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
- RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
- return {mRenderPassRenderTargetDescriptors.data(), mHighestColorAttachmentIndexPlusOne};
- }
-
- const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
- RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
- return &mRenderPassDepthStencilDesc;
- }
-
- D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
- return mRenderPassFlags;
- }
-
- const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
- return mRenderTargetViews.data();
- }
-
- void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
- wgpu::LoadOp loadOp,
- dawn::native::Color clearColor,
- DXGI_FORMAT format) {
- mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
- D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
- mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
- format;
- }
- }
-
- void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp) {
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
- D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination) {
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
- D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
-
- mSubresourceParams[attachment] =
- D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
-
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
- &mSubresourceParams[attachment];
- }
-
- void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- float clearDepth,
- DXGI_FORMAT format) {
- mHasDepthOrStencil = true;
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
- clearDepth;
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
- }
- mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- uint8_t clearStencil,
- DXGI_FORMAT format) {
- mHasDepthOrStencil = true;
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
- .Stencil = clearStencil;
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
- }
- mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetDepthNoAccess() {
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
- mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
- }
-
- void RenderPassBuilder::SetDepthStencilNoAccess() {
- SetDepthNoAccess();
- SetStencilNoAccess();
- }
-
- void RenderPassBuilder::SetStencilNoAccess() {
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
- mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
- }
+namespace {
+D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
+ switch (loadOp) {
+ case wgpu::LoadOp::Clear:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
+ case wgpu::LoadOp::Load:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+}
+
+D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
+ switch (storeOp) {
+ case wgpu::StoreOp::Discard:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
+ case wgpu::StoreOp::Store:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+}
+
+D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
+
+ resolveParameters.Format = resolveDestination->GetD3D12Format();
+ resolveParameters.pSrcResource = ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
+ resolveParameters.pDstResource =
+ ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
+
+ // Clear or preserve the resolve source.
+ if (storeOp == wgpu::StoreOp::Discard) {
+ resolveParameters.PreserveResolveSource = false;
+ } else if (storeOp == wgpu::StoreOp::Store) {
+ resolveParameters.PreserveResolveSource = true;
+ }
+
+ // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
+ ASSERT(resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType ==
+ wgpu::TextureComponentType::Float);
+ resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
+
+ resolveParameters.SubresourceCount = 1;
+
+ return resolveParameters;
+}
+
+D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
+D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
+ Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+ ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
+
+ subresourceParameters.DstX = 0;
+ subresourceParameters.DstY = 0;
+ subresourceParameters.SrcSubresource = 0;
+ subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
+ resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
+ Aspect::Color);
+ // Resolving a specified sub-rect is only valid on hardware that supports sample
+ // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
+ // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
+ // "empty" to resolve the entire region.
+ subresourceParameters.SrcRect = {0, 0, 0, 0};
+
+ return subresourceParameters;
+}
+} // anonymous namespace
+
+RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
+ if (hasUAV) {
+ mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
+ }
+}
+
+void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+ bool isNullRTV) {
+ mRenderTargetViews[attachmentIndex] = baseDescriptor;
+ mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
+ if (!isNullRTV) {
+ mHighestColorAttachmentIndexPlusOne = std::max(
+ mHighestColorAttachmentIndexPlusOne,
+ ColorAttachmentIndex{static_cast<uint8_t>(static_cast<uint8_t>(attachmentIndex) + 1u)});
+ }
+}
+
+void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
+ mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
+}
+
+ColorAttachmentIndex RenderPassBuilder::GetHighestColorAttachmentIndexPlusOne() const {
+ return mHighestColorAttachmentIndexPlusOne;
+}
+
+bool RenderPassBuilder::HasDepthOrStencil() const {
+ return mHasDepthOrStencil;
+}
+
+ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
+ return {mRenderPassRenderTargetDescriptors.data(), mHighestColorAttachmentIndexPlusOne};
+}
+
+const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* RenderPassBuilder::GetRenderPassDepthStencilDescriptor()
+ const {
+ return &mRenderPassDepthStencilDesc;
+}
+
+D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
+ return mRenderPassFlags;
+}
+
+const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
+ return mRenderTargetViews.data();
+}
+
+void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+ wgpu::LoadOp loadOp,
+ dawn::native::Color clearColor,
+ DXGI_FORMAT format) {
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
+ D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[0] =
+ clearColor.r;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[1] =
+ clearColor.g;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[2] =
+ clearColor.b;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[3] =
+ clearColor.a;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
+ format;
+ }
+}
+
+void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12EndingAccessType(storeOp);
+}
+
+void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
+ D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
+
+ mSubresourceParams[attachment] =
+ D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
+
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
+ &mSubresourceParams[attachment];
+}
+
+void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format) {
+ mHasDepthOrStencil = true;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
+ clearDepth;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
+}
+
+void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format) {
+ mHasDepthOrStencil = true;
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil =
+ clearStencil;
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
+}
+
+void RenderPassBuilder::SetDepthNoAccess() {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+}
+
+void RenderPassBuilder::SetDepthStencilNoAccess() {
+ SetDepthNoAccess();
+ SetStencilNoAccess();
+}
+
+void RenderPassBuilder::SetStencilNoAccess() {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
index 0e7dcad68f8..42c0fc52bce 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+#include <array>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_span.h"
@@ -22,80 +24,77 @@
#include "dawn/native/d3d12/d3d12_platform.h"
#include "dawn/native/dawn_platform.h"
-#include <array>
-
namespace dawn::native::d3d12 {
- class TextureView;
+class TextureView;
- // RenderPassBuilder stores parameters related to render pass load and store operations.
- // When the D3D12 render pass API is available, the needed descriptors can be fetched
- // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
- // descriptors are still fetched and any information necessary to emulate the load and store
- // operations is extracted from the descriptors.
- class RenderPassBuilder {
- public:
- RenderPassBuilder(bool hasUAV);
+// RenderPassBuilder stores parameters related to render pass load and store operations.
+// When the D3D12 render pass API is available, the needed descriptors can be fetched
+// directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
+// descriptors are still fetched and any information necessary to emulate the load and store
+// operations is extracted from the descriptors.
+class RenderPassBuilder {
+ public:
+ explicit RenderPassBuilder(bool hasUAV);
- // Returns the highest color attachment index + 1. If there is no color attachment, returns
- // 0. Range: [0, kMaxColorAttachments + 1)
- ColorAttachmentIndex GetHighestColorAttachmentIndexPlusOne() const;
+ // Returns the highest color attachment index + 1. If there is no color attachment, returns
+ // 0. Range: [0, kMaxColorAttachments + 1)
+ ColorAttachmentIndex GetHighestColorAttachmentIndexPlusOne() const;
- // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
- // storage if D3D12 render pass API is unavailable.
- ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
- GetRenderPassRenderTargetDescriptors() const;
- const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
+ // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
+ // storage if D3D12 render pass API is unavailable.
+ ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+ GetRenderPassRenderTargetDescriptors() const;
+ const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
- D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
+ D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
- // Returns attachment RTVs to use with OMSetRenderTargets.
- const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
+ // Returns attachment RTVs to use with OMSetRenderTargets.
+ const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
- bool HasDepthOrStencil() const;
+ bool HasDepthOrStencil() const;
- // Functions that set the appropriate values in the render pass descriptors.
- void SetDepthAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- float clearDepth,
- DXGI_FORMAT format);
- void SetDepthNoAccess();
- void SetDepthStencilNoAccess();
- void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
- wgpu::LoadOp loadOp,
- dawn::native::Color clearColor,
- DXGI_FORMAT format);
- void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
- void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination);
- void SetStencilAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- uint8_t clearStencil,
- DXGI_FORMAT format);
- void SetStencilNoAccess();
+ // Functions that set the appropriate values in the render pass descriptors.
+ void SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format);
+ void SetDepthNoAccess();
+ void SetDepthStencilNoAccess();
+ void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+ wgpu::LoadOp loadOp,
+ dawn::native::Color clearColor,
+ DXGI_FORMAT format);
+ void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
+ void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination);
+ void SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format);
+ void SetStencilNoAccess();
- void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
- bool isNullRTV);
- void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
+ void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+ bool isNullRTV);
+ void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
- private:
- ColorAttachmentIndex mHighestColorAttachmentIndexPlusOne{uint8_t(0)};
- bool mHasDepthOrStencil = false;
- D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
- D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
- ityp::
- array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
- mRenderPassRenderTargetDescriptors;
- ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
- mRenderTargetViews;
- ityp::array<ColorAttachmentIndex,
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
- kMaxColorAttachments>
- mSubresourceParams;
- };
+ private:
+ ColorAttachmentIndex mHighestColorAttachmentIndexPlusOne{uint8_t(0)};
+ bool mHasDepthOrStencil = false;
+ D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
+ D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
+ ityp::array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
+ mRenderPassRenderTargetDescriptors;
+ ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
+ mRenderTargetViews;
+ ityp::array<ColorAttachmentIndex,
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
+ kMaxColorAttachments>
+ mSubresourceParams;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
index 7168454e6c0..00fc77b2ddf 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
@@ -14,9 +14,15 @@
#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+#include <d3dcompiler.h>
+
+#include <memory>
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/BlobD3D12.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
@@ -25,481 +31,512 @@
#include "dawn/native/d3d12/TextureD3D12.h"
#include "dawn/native/d3d12/UtilsD3D12.h"
-#include <d3dcompiler.h>
-
namespace dawn::native::d3d12 {
- namespace {
- DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return DXGI_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::Uint8x4:
- return DXGI_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Sint8x2:
- return DXGI_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Sint8x4:
- return DXGI_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::Unorm8x2:
- return DXGI_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::Unorm8x4:
- return DXGI_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Snorm8x2:
- return DXGI_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Snorm8x4:
- return DXGI_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::Uint16x2:
- return DXGI_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::Uint16x4:
- return DXGI_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Sint16x2:
- return DXGI_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Sint16x4:
- return DXGI_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::Unorm16x2:
- return DXGI_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::Unorm16x4:
- return DXGI_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Snorm16x2:
- return DXGI_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Snorm16x4:
- return DXGI_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Float16x2:
- return DXGI_FORMAT_R16G16_FLOAT;
- case wgpu::VertexFormat::Float16x4:
- return DXGI_FORMAT_R16G16B16A16_FLOAT;
- case wgpu::VertexFormat::Float32:
- return DXGI_FORMAT_R32_FLOAT;
- case wgpu::VertexFormat::Float32x2:
- return DXGI_FORMAT_R32G32_FLOAT;
- case wgpu::VertexFormat::Float32x3:
- return DXGI_FORMAT_R32G32B32_FLOAT;
- case wgpu::VertexFormat::Float32x4:
- return DXGI_FORMAT_R32G32B32A32_FLOAT;
- case wgpu::VertexFormat::Uint32:
- return DXGI_FORMAT_R32_UINT;
- case wgpu::VertexFormat::Uint32x2:
- return DXGI_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::Uint32x3:
- return DXGI_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::Uint32x4:
- return DXGI_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Sint32:
- return DXGI_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Sint32x2:
- return DXGI_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Sint32x3:
- return DXGI_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Sint32x4:
- return DXGI_FORMAT_R32G32B32A32_SINT;
- default:
- UNREACHABLE();
- }
- }
-
- D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
- case wgpu::VertexStepMode::Instance:
- return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
- }
- }
-
- D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
- case wgpu::PrimitiveTopology::LineList:
- return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
- case wgpu::PrimitiveTopology::LineStrip:
- return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
- }
- }
-
- D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
- wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::LineStrip:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
- case wgpu::PrimitiveTopology::TriangleList:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
- }
- }
+namespace {
+DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return DXGI_FORMAT_R8G8_UINT;
+ case wgpu::VertexFormat::Uint8x4:
+ return DXGI_FORMAT_R8G8B8A8_UINT;
+ case wgpu::VertexFormat::Sint8x2:
+ return DXGI_FORMAT_R8G8_SINT;
+ case wgpu::VertexFormat::Sint8x4:
+ return DXGI_FORMAT_R8G8B8A8_SINT;
+ case wgpu::VertexFormat::Unorm8x2:
+ return DXGI_FORMAT_R8G8_UNORM;
+ case wgpu::VertexFormat::Unorm8x4:
+ return DXGI_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::VertexFormat::Snorm8x2:
+ return DXGI_FORMAT_R8G8_SNORM;
+ case wgpu::VertexFormat::Snorm8x4:
+ return DXGI_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::VertexFormat::Uint16x2:
+ return DXGI_FORMAT_R16G16_UINT;
+ case wgpu::VertexFormat::Uint16x4:
+ return DXGI_FORMAT_R16G16B16A16_UINT;
+ case wgpu::VertexFormat::Sint16x2:
+ return DXGI_FORMAT_R16G16_SINT;
+ case wgpu::VertexFormat::Sint16x4:
+ return DXGI_FORMAT_R16G16B16A16_SINT;
+ case wgpu::VertexFormat::Unorm16x2:
+ return DXGI_FORMAT_R16G16_UNORM;
+ case wgpu::VertexFormat::Unorm16x4:
+ return DXGI_FORMAT_R16G16B16A16_UNORM;
+ case wgpu::VertexFormat::Snorm16x2:
+ return DXGI_FORMAT_R16G16_SNORM;
+ case wgpu::VertexFormat::Snorm16x4:
+ return DXGI_FORMAT_R16G16B16A16_SNORM;
+ case wgpu::VertexFormat::Float16x2:
+ return DXGI_FORMAT_R16G16_FLOAT;
+ case wgpu::VertexFormat::Float16x4:
+ return DXGI_FORMAT_R16G16B16A16_FLOAT;
+ case wgpu::VertexFormat::Float32:
+ return DXGI_FORMAT_R32_FLOAT;
+ case wgpu::VertexFormat::Float32x2:
+ return DXGI_FORMAT_R32G32_FLOAT;
+ case wgpu::VertexFormat::Float32x3:
+ return DXGI_FORMAT_R32G32B32_FLOAT;
+ case wgpu::VertexFormat::Float32x4:
+ return DXGI_FORMAT_R32G32B32A32_FLOAT;
+ case wgpu::VertexFormat::Uint32:
+ return DXGI_FORMAT_R32_UINT;
+ case wgpu::VertexFormat::Uint32x2:
+ return DXGI_FORMAT_R32G32_UINT;
+ case wgpu::VertexFormat::Uint32x3:
+ return DXGI_FORMAT_R32G32B32_UINT;
+ case wgpu::VertexFormat::Uint32x4:
+ return DXGI_FORMAT_R32G32B32A32_UINT;
+ case wgpu::VertexFormat::Sint32:
+ return DXGI_FORMAT_R32_SINT;
+ case wgpu::VertexFormat::Sint32x2:
+ return DXGI_FORMAT_R32G32_SINT;
+ case wgpu::VertexFormat::Sint32x3:
+ return DXGI_FORMAT_R32G32B32_SINT;
+ case wgpu::VertexFormat::Sint32x4:
+ return DXGI_FORMAT_R32G32B32A32_SINT;
+ default:
+ UNREACHABLE();
+ }
+}
+
+D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
+ case wgpu::VertexStepMode::Instance:
+ return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
+ case wgpu::VertexStepMode::VertexBufferNotUsed:
+ UNREACHABLE();
+ }
+}
+
+D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
+ case wgpu::PrimitiveTopology::LineList:
+ return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
+ }
+}
+
+D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
+ wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+ }
+}
+
+D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return D3D12_CULL_MODE_NONE;
+ case wgpu::CullMode::Front:
+ return D3D12_CULL_MODE_FRONT;
+ case wgpu::CullMode::Back:
+ return D3D12_CULL_MODE_BACK;
+ }
+}
+
+D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return D3D12_BLEND_ZERO;
+ case wgpu::BlendFactor::One:
+ return D3D12_BLEND_ONE;
+ case wgpu::BlendFactor::Src:
+ return D3D12_BLEND_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return D3D12_BLEND_INV_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return D3D12_BLEND_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return D3D12_BLEND_INV_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return D3D12_BLEND_DEST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return D3D12_BLEND_INV_DEST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return D3D12_BLEND_DEST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return D3D12_BLEND_INV_DEST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return D3D12_BLEND_SRC_ALPHA_SAT;
+ case wgpu::BlendFactor::Constant:
+ return D3D12_BLEND_BLEND_FACTOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return D3D12_BLEND_INV_BLEND_FACTOR;
+ }
+}
+
+// When a blend factor is defined for the alpha channel, any of the factors that don't
+// explicitly state that they apply to alpha should be treated as their explicitly-alpha
+// equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
+D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Src:
+ return D3D12_BLEND_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return D3D12_BLEND_INV_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return D3D12_BLEND_DEST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDst:
+ return D3D12_BLEND_INV_DEST_ALPHA;
+
+ // Other blend factors translate to the same D3D12 enum as the color blend factors.
+ default:
+ return D3D12Blend(factor);
+ }
+}
+
+D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return D3D12_BLEND_OP_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return D3D12_BLEND_OP_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return D3D12_BLEND_OP_REV_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return D3D12_BLEND_OP_MIN;
+ case wgpu::BlendOperation::Max:
+ return D3D12_BLEND_OP_MAX;
+ }
+}
+
+uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
+ D3D12_COLOR_WRITE_ENABLE_RED,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
+ D3D12_COLOR_WRITE_ENABLE_GREEN,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
+ D3D12_COLOR_WRITE_ENABLE_BLUE,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
+ D3D12_COLOR_WRITE_ENABLE_ALPHA,
+ "ColorWriteMask values must match");
+ return static_cast<uint8_t>(writeMask);
+}
+
+D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
+ D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
+ blendDesc.BlendEnable = state->blend != nullptr;
+ if (blendDesc.BlendEnable) {
+ blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
+ blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
+ blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
+ blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
+ blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
+ blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
+ }
+ blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
+ blendDesc.LogicOpEnable = false;
+ blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
+ return blendDesc;
+}
+
+D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
+ switch (op) {
+ case wgpu::StencilOperation::Keep:
+ return D3D12_STENCIL_OP_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return D3D12_STENCIL_OP_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return D3D12_STENCIL_OP_REPLACE;
+ case wgpu::StencilOperation::IncrementClamp:
+ return D3D12_STENCIL_OP_INCR_SAT;
+ case wgpu::StencilOperation::DecrementClamp:
+ return D3D12_STENCIL_OP_DECR_SAT;
+ case wgpu::StencilOperation::Invert:
+ return D3D12_STENCIL_OP_INVERT;
+ case wgpu::StencilOperation::IncrementWrap:
+ return D3D12_STENCIL_OP_INCR;
+ case wgpu::StencilOperation::DecrementWrap:
+ return D3D12_STENCIL_OP_DECR;
+ }
+}
+
+D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
+ D3D12_DEPTH_STENCILOP_DESC desc;
+
+ desc.StencilFailOp = StencilOp(descriptor.failOp);
+ desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
+ desc.StencilPassOp = StencilOp(descriptor.passOp);
+ desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
+
+ return desc;
+}
+
+D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+ D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
+ mDepthStencilDescriptor.DepthEnable =
+ (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+ !descriptor->depthWriteEnabled)
+ ? FALSE
+ : TRUE;
+ mDepthStencilDescriptor.DepthWriteMask =
+ descriptor->depthWriteEnabled ? D3D12_DEPTH_WRITE_MASK_ALL : D3D12_DEPTH_WRITE_MASK_ZERO;
+ mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
+
+ mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
+ mDepthStencilDescriptor.StencilReadMask = static_cast<UINT8>(descriptor->stencilReadMask);
+ mDepthStencilDescriptor.StencilWriteMask = static_cast<UINT8>(descriptor->stencilWriteMask);
+
+ mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
+ mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
+ return mDepthStencilDescriptor;
+}
+
+D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
+ wgpu::PrimitiveTopology primitiveTopology,
+ wgpu::IndexFormat indexFormat) {
+ if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
+ primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+ }
- D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return D3D12_CULL_MODE_NONE;
- case wgpu::CullMode::Front:
- return D3D12_CULL_MODE_FRONT;
- case wgpu::CullMode::Back:
- return D3D12_CULL_MODE_BACK;
- }
- }
+ switch (indexFormat) {
+ case wgpu::IndexFormat::Uint16:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
+ case wgpu::IndexFormat::Uint32:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
+ case wgpu::IndexFormat::Undefined:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+ }
+}
- D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return D3D12_BLEND_ZERO;
- case wgpu::BlendFactor::One:
- return D3D12_BLEND_ONE;
- case wgpu::BlendFactor::Src:
- return D3D12_BLEND_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return D3D12_BLEND_INV_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return D3D12_BLEND_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return D3D12_BLEND_INV_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return D3D12_BLEND_DEST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return D3D12_BLEND_INV_DEST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return D3D12_BLEND_DEST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return D3D12_BLEND_INV_DEST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return D3D12_BLEND_SRC_ALPHA_SAT;
- case wgpu::BlendFactor::Constant:
- return D3D12_BLEND_BLEND_FACTOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return D3D12_BLEND_INV_BLEND_FACTOR;
- }
- }
+} // anonymous namespace
- // When a blend factor is defined for the alpha channel, any of the factors that don't
- // explicitly state that they apply to alpha should be treated as their explicitly-alpha
- // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
- D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Src:
- return D3D12_BLEND_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrc:
- return D3D12_BLEND_INV_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return D3D12_BLEND_DEST_ALPHA;
- case wgpu::BlendFactor::OneMinusDst:
- return D3D12_BLEND_INV_DEST_ALPHA;
-
- // Other blend factors translate to the same D3D12 enum as the color blend factors.
- default:
- return D3D12Blend(factor);
- }
- }
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+}
- D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return D3D12_BLEND_OP_ADD;
- case wgpu::BlendOperation::Subtract:
- return D3D12_BLEND_OP_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return D3D12_BLEND_OP_REV_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return D3D12_BLEND_OP_MIN;
- case wgpu::BlendOperation::Max:
- return D3D12_BLEND_OP_MAX;
- }
- }
+MaybeError RenderPipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ uint32_t compileFlags = 0;
- uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
- D3D12_COLOR_WRITE_ENABLE_RED,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
- D3D12_COLOR_WRITE_ENABLE_GREEN,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
- D3D12_COLOR_WRITE_ENABLE_BLUE,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
- D3D12_COLOR_WRITE_ENABLE_ALPHA,
- "ColorWriteMask values must match");
- return static_cast<uint8_t>(writeMask);
- }
-
- D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
- D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
- blendDesc.BlendEnable = state->blend != nullptr;
- if (blendDesc.BlendEnable) {
- blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
- blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
- blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
- blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
- blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
- blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
- }
- blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
- blendDesc.LogicOpEnable = false;
- blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
- return blendDesc;
- }
+ if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+ !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+ compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+ }
- D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
- switch (op) {
- case wgpu::StencilOperation::Keep:
- return D3D12_STENCIL_OP_KEEP;
- case wgpu::StencilOperation::Zero:
- return D3D12_STENCIL_OP_ZERO;
- case wgpu::StencilOperation::Replace:
- return D3D12_STENCIL_OP_REPLACE;
- case wgpu::StencilOperation::IncrementClamp:
- return D3D12_STENCIL_OP_INCR_SAT;
- case wgpu::StencilOperation::DecrementClamp:
- return D3D12_STENCIL_OP_DECR_SAT;
- case wgpu::StencilOperation::Invert:
- return D3D12_STENCIL_OP_INVERT;
- case wgpu::StencilOperation::IncrementWrap:
- return D3D12_STENCIL_OP_INCR;
- case wgpu::StencilOperation::DecrementWrap:
- return D3D12_STENCIL_OP_DECR;
- }
- }
+ if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+ compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+ }
- D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
- D3D12_DEPTH_STENCILOP_DESC desc;
+ // SPRIV-cross does matrix multiplication expecting row major matrices
+ compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
- desc.StencilFailOp = StencilOp(descriptor.failOp);
- desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
- desc.StencilPassOp = StencilOp(descriptor.passOp);
- desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
+ // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
+ // strictness is not enabled. See crbug.com/tint/976.
+ compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
- return desc;
- }
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
- D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
- D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
- mDepthStencilDescriptor.DepthEnable =
- (descriptor->depthCompare == wgpu::CompareFunction::Always &&
- !descriptor->depthWriteEnabled)
- ? FALSE
- : TRUE;
- mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
- ? D3D12_DEPTH_WRITE_MASK_ALL
- : D3D12_DEPTH_WRITE_MASK_ZERO;
- mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
-
- mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
- mDepthStencilDescriptor.StencilReadMask =
- static_cast<UINT8>(descriptor->stencilReadMask);
- mDepthStencilDescriptor.StencilWriteMask =
- static_cast<UINT8>(descriptor->stencilWriteMask);
-
- mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
- mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
- return mDepthStencilDescriptor;
- }
+ PerStage<ProgrammableStage> pipelineStages = GetAllStages();
- D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
- wgpu::PrimitiveTopology primitiveTopology,
- wgpu::IndexFormat indexFormat) {
- if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
- primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
- }
-
- switch (indexFormat) {
- case wgpu::IndexFormat::Uint16:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
- case wgpu::IndexFormat::Uint32:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
- case wgpu::IndexFormat::Undefined:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
- }
- }
+ PerStage<D3D12_SHADER_BYTECODE*> shaders;
+ shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
+ shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
- } // anonymous namespace
+ PerStage<CompiledShader> compiledShader;
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
+ for (auto stage : IterateStages(GetStageMask())) {
+ DAWN_TRY_ASSIGN(compiledShader[stage], ToBackend(pipelineStages[stage].module)
+ ->Compile(pipelineStages[stage], stage,
+ ToBackend(GetLayout()), compileFlags));
+ *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
}
- MaybeError RenderPipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
- uint32_t compileFlags = 0;
+ mUsesVertexOrInstanceIndex =
+ compiledShader[SingleShaderStage::Vertex].usesVertexOrInstanceIndex;
- if (!device->IsToggleEnabled(Toggle::UseDXC) &&
- !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
- compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
- }
+ PipelineLayout* layout = ToBackend(GetLayout());
- if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
- compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
- }
+ descriptorD3D12.pRootSignature = layout->GetRootSignature();
- // SPRIV-cross does matrix multiplication expecting row major matrices
- compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+ // D3D12 logs warnings if any empty input state is used
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
+ if (GetAttributeLocationsUsed().any()) {
+ descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
+ }
- // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
- // strictness is not enabled. See crbug.com/tint/976.
- compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
+ descriptorD3D12.IBStripCutValue =
+ ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
+
+ descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
+ descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
+ descriptorD3D12.RasterizerState.FrontCounterClockwise =
+ (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
+ descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
+ descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
+ descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
+ descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
+ descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
+ descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
+ descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
+ descriptorD3D12.RasterizerState.ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
+
+ if (HasDepthStencilAttachment()) {
+ descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
+ }
- D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
+ static_assert(kMaxColorAttachments == 8);
+ for (uint8_t i = 0; i < kMaxColorAttachments; i++) {
+ descriptorD3D12.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
+ descriptorD3D12.BlendState.RenderTarget[i].BlendEnable = false;
+ descriptorD3D12.BlendState.RenderTarget[i].RenderTargetWriteMask = 0;
+ descriptorD3D12.BlendState.RenderTarget[i].LogicOpEnable = false;
+ descriptorD3D12.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP;
+ }
+ ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+ GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
+ D3D12TextureFormat(GetColorAttachmentFormat(i));
+ descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
+ ComputeColorDesc(GetColorTargetState(i));
+ }
+ ASSERT(highestColorAttachmentIndexPlusOne <= kMaxColorAttachmentsTyped);
+ descriptorD3D12.NumRenderTargets = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
- PerStage<ProgrammableStage> pipelineStages = GetAllStages();
+ descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
+ descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
- PerStage<D3D12_SHADER_BYTECODE*> shaders;
- shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
- shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
+ descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
- PerStage<CompiledShader> compiledShader;
+ descriptorD3D12.SampleMask = GetSampleMask();
+ descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
+ descriptorD3D12.SampleDesc.Count = GetSampleCount();
+ descriptorD3D12.SampleDesc.Quality = 0;
- for (auto stage : IterateStages(GetStageMask())) {
- DAWN_TRY_ASSIGN(
- compiledShader[stage],
- ToBackend(pipelineStages[stage].module)
- ->Compile(pipelineStages[stage], stage, ToBackend(GetLayout()), compileFlags));
- *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
- }
+ mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
- mFirstOffsetInfo = compiledShader[SingleShaderStage::Vertex].firstOffsetInfo;
+ mCacheKey.Record(descriptorD3D12, *layout->GetRootSignatureBlob());
- PipelineLayout* layout = ToBackend(GetLayout());
+ // Try to see if we have anything in the blob cache.
+ Blob blob = device->LoadCachedBlob(GetCacheKey());
+ const bool cacheHit = !blob.Empty();
+ if (cacheHit) {
+ // Cache hits, attach cached blob to descriptor.
+ descriptorD3D12.CachedPSO.pCachedBlob = blob.Data();
+ descriptorD3D12.CachedPSO.CachedBlobSizeInBytes = blob.Size();
+ }
- descriptorD3D12.pRootSignature = layout->GetRootSignature();
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
+ &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 create graphics pipeline state"));
- // D3D12 logs warnings if any empty input state is used
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
- if (GetAttributeLocationsUsed().any()) {
- descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
- }
+ if (!cacheHit) {
+ // Cache misses, need to get pipeline cached blob and store.
+ ComPtr<ID3DBlob> d3dBlob;
+ DAWN_TRY(CheckHRESULT(GetPipelineState()->GetCachedBlob(&d3dBlob),
+ "D3D12 render pipeline state get cached blob"));
+ device->StoreCachedBlob(GetCacheKey(), CreateBlob(std::move(d3dBlob)));
+ }
- descriptorD3D12.IBStripCutValue =
- ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
-
- descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
- descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
- descriptorD3D12.RasterizerState.FrontCounterClockwise =
- (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
- descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
- descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
- descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
- descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
- descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
- descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
- descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
- descriptorD3D12.RasterizerState.ConservativeRaster =
- D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
-
- if (HasDepthStencilAttachment()) {
- descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
- }
+ SetLabelImpl();
- static_assert(kMaxColorAttachments == 8);
- for (uint8_t i = 0; i < kMaxColorAttachments; i++) {
- descriptorD3D12.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
- descriptorD3D12.BlendState.RenderTarget[i].BlendEnable = false;
- descriptorD3D12.BlendState.RenderTarget[i].RenderTargetWriteMask = 0;
- descriptorD3D12.BlendState.RenderTarget[i].LogicOpEnable = false;
- descriptorD3D12.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP;
- }
- ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
- GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
- D3D12TextureFormat(GetColorAttachmentFormat(i));
- descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
- ComputeColorDesc(GetColorTargetState(i));
- }
- ASSERT(highestColorAttachmentIndexPlusOne <= kMaxColorAttachmentsTyped);
- descriptorD3D12.NumRenderTargets = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+ return {};
+}
- descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
- descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
+RenderPipeline::~RenderPipeline() = default;
- descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
+void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+}
- descriptorD3D12.SampleMask = GetSampleMask();
- descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
- descriptorD3D12.SampleDesc.Count = GetSampleCount();
- descriptorD3D12.SampleDesc.Quality = 0;
+D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
+ return mD3d12PrimitiveTopology;
+}
- mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
+ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
+ return mPipelineState.Get();
+}
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
- &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
- "D3D12 create graphics pipeline state"));
+bool RenderPipeline::UsesVertexOrInstanceIndex() const {
+ return mUsesVertexOrInstanceIndex;
+}
- SetLabelImpl();
+void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+}
- return {};
+ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndirectCommandSignature() {
+ if (mUsesVertexOrInstanceIndex) {
+ return ToBackend(GetLayout())->GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
}
- RenderPipeline::~RenderPipeline() = default;
+ return ToBackend(GetDevice())->GetDrawIndirectSignature();
+}
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndexedIndirectCommandSignature() {
+ if (mUsesVertexOrInstanceIndex) {
+ return ToBackend(GetLayout())
+ ->GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
}
- D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
- return mD3d12PrimitiveTopology;
- }
+ return ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
+}
- ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
- return mPipelineState.Get();
- }
+D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
+ unsigned int count = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
- const FirstOffsetInfo& RenderPipeline::GetFirstOffsetInfo() const {
- return mFirstOffsetInfo;
- }
+ const VertexAttributeInfo& attribute = GetAttribute(loc);
- void RenderPipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
- }
+ // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
+ // SemanticIndex N
+ inputElementDescriptor.SemanticName = "TEXCOORD";
+ inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
+ inputElementDescriptor.Format = VertexFormatType(attribute.format);
+ inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
- D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
- unsigned int count = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
-
- const VertexAttributeInfo& attribute = GetAttribute(loc);
-
- // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
- // SemanticIndex N
- inputElementDescriptor.SemanticName = "TEXCOORD";
- inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
- inputElementDescriptor.Format = VertexFormatType(attribute.format);
- inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
-
- const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
-
- inputElementDescriptor.AlignedByteOffset = attribute.offset;
- inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
- if (inputElementDescriptor.InputSlotClass ==
- D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
- inputElementDescriptor.InstanceDataStepRate = 0;
- } else {
- inputElementDescriptor.InstanceDataStepRate = 1;
- }
- }
+ const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
- D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
- inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
- inputLayoutDescriptor.NumElements = count;
- return inputLayoutDescriptor;
+ inputElementDescriptor.AlignedByteOffset = attribute.offset;
+ inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
+ if (inputElementDescriptor.InputSlotClass == D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
+ inputElementDescriptor.InstanceDataStepRate = 0;
+ } else {
+ inputElementDescriptor.InstanceDataStepRate = 1;
+ }
}
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
+ D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
+ inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
+ inputLayoutDescriptor.NumElements = count;
+ return inputLayoutDescriptor;
+}
+
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h
index 049520deeb0..51a93616af2 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h
@@ -22,40 +22,44 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- RenderPipeline() = delete;
+class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ RenderPipeline() = delete;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
- ID3D12PipelineState* GetPipelineState() const;
+ D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
+ ID3D12PipelineState* GetPipelineState() const;
- const FirstOffsetInfo& GetFirstOffsetInfo() const;
+ bool UsesVertexOrInstanceIndex() const;
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- private:
- ~RenderPipeline() override;
+ ComPtr<ID3D12CommandSignature> GetDrawIndirectCommandSignature();
- void DestroyImpl() override;
+ ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectCommandSignature();
- using RenderPipelineBase::RenderPipelineBase;
- D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
+ private:
+ ~RenderPipeline() override;
- D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
- ComPtr<ID3D12PipelineState> mPipelineState;
- FirstOffsetInfo mFirstOffsetInfo;
- };
+ void DestroyImpl() override;
+
+ using RenderPipelineBase::RenderPipelineBase;
+ D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
+
+ D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
+ ComPtr<ID3D12PipelineState> mPipelineState;
+ bool mUsesVertexOrInstanceIndex;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
index b7aab2ccfdd..29525055003 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include <algorithm>
+#include <vector>
+
#include "dawn/native/d3d12/AdapterD3D12.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
@@ -22,350 +25,346 @@
namespace dawn::native::d3d12 {
- ResidencyManager::ResidencyManager(Device* device)
- : mDevice(device),
- mResidencyManagementEnabled(
- device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
- UpdateVideoMemoryInfo();
+ResidencyManager::ResidencyManager(Device* device)
+ : mDevice(device),
+ mResidencyManagementEnabled(device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
+ UpdateVideoMemoryInfo();
+}
+
+// Increments number of locks on a heap to ensure the heap remains resident.
+MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return {};
}
- // Increments number of locks on a heap to ensure the heap remains resident.
- MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
+ // If the heap isn't already resident, make it resident.
+ if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
+ ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
+ uint64_t size = pageable->GetSize();
- // If the heap isn't already resident, make it resident.
- if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
- ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
- uint64_t size = pageable->GetSize();
+ DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()), size,
+ 1, &d3d12Pageable));
+ }
- DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
- size, 1, &d3d12Pageable));
- }
+ // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
+ if (pageable->IsInResidencyLRUCache()) {
+ pageable->RemoveFromList();
+ }
- // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
- if (pageable->IsInResidencyLRUCache()) {
- pageable->RemoveFromList();
- }
+ pageable->IncrementResidencyLock();
- pageable->IncrementResidencyLock();
+ return {};
+}
- return {};
+// Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
+// inserted into the LRU cache and becomes eligible for eviction.
+void ResidencyManager::UnlockAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return;
}
- // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
- // inserted into the LRU cache and becomes eligible for eviction.
- void ResidencyManager::UnlockAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return;
- }
-
- ASSERT(pageable->IsResidencyLocked());
- ASSERT(!pageable->IsInResidencyLRUCache());
- pageable->DecrementResidencyLock();
+ ASSERT(pageable->IsResidencyLocked());
+ ASSERT(!pageable->IsInResidencyLRUCache());
+ pageable->DecrementResidencyLock();
- // If another lock still exists on the heap, nothing further should be done.
- if (pageable->IsResidencyLocked()) {
- return;
- }
-
- // When all locks have been removed, the resource remains resident and becomes tracked in
- // the corresponding LRU.
- TrackResidentAllocation(pageable);
+ // If another lock still exists on the heap, nothing further should be done.
+ if (pageable->IsResidencyLocked()) {
+ return;
}
- // Returns the appropriate MemorySegmentInfo for a given MemorySegment.
- ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
- MemorySegment memorySegment) {
- switch (memorySegment) {
- case MemorySegment::Local:
- return &mVideoMemoryInfo.local;
- case MemorySegment::NonLocal:
- ASSERT(!mDevice->GetDeviceInfo().isUMA);
- return &mVideoMemoryInfo.nonLocal;
- default:
- UNREACHABLE();
- }
+ // When all locks have been removed, the resource remains resident and becomes tracked in
+ // the corresponding LRU.
+ TrackResidentAllocation(pageable);
+}
+
+// Returns the appropriate MemorySegmentInfo for a given MemorySegment.
+ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
+ MemorySegment memorySegment) {
+ switch (memorySegment) {
+ case MemorySegment::Local:
+ return &mVideoMemoryInfo.local;
+ case MemorySegment::NonLocal:
+ ASSERT(!mDevice->GetDeviceInfo().isUMA);
+ return &mVideoMemoryInfo.nonLocal;
+ default:
+ UNREACHABLE();
}
+}
- // Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
- // competition for device memory. Returns the amount of memory reserved, which may be less
- // that the requested reservation when under pressure.
- uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
- uint64_t requestedReservationSize) {
- MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
+// Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
+// competition for device memory. Returns the amount of memory reserved, which may be less
+// that the requested reservation when under pressure.
+uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
+ uint64_t requestedReservationSize) {
+ MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
- segmentInfo->externalRequest = requestedReservationSize;
+ segmentInfo->externalRequest = requestedReservationSize;
- UpdateMemorySegmentInfo(segmentInfo);
+ UpdateMemorySegmentInfo(segmentInfo);
- return segmentInfo->externalReservation;
- }
+ return segmentInfo->externalReservation;
+}
- void ResidencyManager::UpdateVideoMemoryInfo() {
- UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
- if (!mDevice->GetDeviceInfo().isUMA) {
- UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
- }
+void ResidencyManager::UpdateVideoMemoryInfo() {
+ UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
+ if (!mDevice->GetDeviceInfo().isUMA) {
+ UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
}
+}
- void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
- DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
-
- ToBackend(mDevice->GetAdapter())
- ->GetHardwareAdapter()
- ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
+void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
+ DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
- // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
- // system, and may be lower than expected in certain scenarios. Under memory pressure, we
- // cap the external reservation to half the available budget, which prevents the external
- // component from consuming a disproportionate share of memory and ensures that Dawn can
- // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
- // and subject to future experimentation.
- segmentInfo->externalReservation =
- std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
+ ToBackend(mDevice->GetAdapter())
+ ->GetHardwareAdapter()
+ ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
- segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
+ // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
+ // system, and may be lower than expected in certain scenarios. Under memory pressure, we
+ // cap the external reservation to half the available budget, which prevents the external
+ // component from consuming a disproportionate share of memory and ensures that Dawn can
+ // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
+ // and subject to future experimentation.
+ segmentInfo->externalReservation =
+ std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
- // If we're restricting the budget for testing, leave the budget as is.
- if (mRestrictBudgetForTesting) {
- return;
- }
+ segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
- // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
- // decreases fluctuations in the operating-system-defined budget, which improves stability
- // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
- // chosen and subject to future experimentation.
- static constexpr float kBudgetCap = 0.95;
- segmentInfo->budget =
- (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
+ // If we're restricting the budget for testing, leave the budget as is.
+ if (mRestrictBudgetForTesting) {
+ return;
}
- // Removes a heap from the LRU and returns the least recently used heap when possible. Returns
- // nullptr when nothing further can be evicted.
- ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
- MemorySegmentInfo* memorySegment) {
- // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
- // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
- // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
- // the process budget and starving Dawn, which will cause thrash.
- if (memorySegment->lruCache.empty()) {
- return nullptr;
- }
-
- Pageable* pageable = memorySegment->lruCache.head()->value();
-
- ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
+ // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
+ // decreases fluctuations in the operating-system-defined budget, which improves stability
+ // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
+ // chosen and subject to future experimentation.
+ static constexpr float kBudgetCap = 0.95;
+ segmentInfo->budget =
+ (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
+}
+
+// Removes a heap from the LRU and returns the least recently used heap when possible. Returns
+// nullptr when nothing further can be evicted.
+ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
+ MemorySegmentInfo* memorySegment) {
+ // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
+ // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
+ // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
+ // the process budget and starving Dawn, which will cause thrash.
+ if (memorySegment->lruCache.empty()) {
+ return nullptr;
+ }
- // If the next candidate for eviction was inserted into the LRU during the current serial,
- // it is because more memory is being used in a single command list than is available.
- // In this scenario, we cannot make any more resources resident and thrashing must occur.
- if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
- return nullptr;
- }
+ Pageable* pageable = memorySegment->lruCache.head()->value();
- // We must ensure that any previous use of a resource has completed before the resource can
- // be evicted.
- if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
- DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
- }
+ ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
- pageable->RemoveFromList();
- return pageable;
+ // If the next candidate for eviction was inserted into the LRU during the current serial,
+ // it is because more memory is being used in a single command list than is available.
+ // In this scenario, we cannot make any more resources resident and thrashing must occur.
+ if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
+ return nullptr;
}
- MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
- MemorySegment memorySegment) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
+ // We must ensure that any previous use of a resource has completed before the resource can
+ // be evicted.
+ if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
+ DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
+ }
- uint64_t bytesEvicted;
- DAWN_TRY_ASSIGN(bytesEvicted,
- EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
- DAWN_UNUSED(bytesEvicted);
+ pageable->RemoveFromList();
+ return pageable;
+}
+MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
+ MemorySegment memorySegment) {
+ if (!mResidencyManagementEnabled) {
return {};
}
- // Any time we need to make something resident, we must check that we have enough free memory to
- // make the new object resident while also staying within budget. If there isn't enough
- // memory, we should evict until there is. Returns the number of bytes evicted.
- ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
- uint64_t sizeToMakeResident,
- MemorySegmentInfo* memorySegment) {
- ASSERT(mResidencyManagementEnabled);
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted,
+ EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
+ DAWN_UNUSED(bytesEvicted);
- UpdateMemorySegmentInfo(memorySegment);
+ return {};
+}
- uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
+// Any time we need to make something resident, we must check that we have enough free memory to
+// make the new object resident while also staying within budget. If there isn't enough
+// memory, we should evict until there is. Returns the number of bytes evicted.
+ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(uint64_t sizeToMakeResident,
+ MemorySegmentInfo* memorySegment) {
+ ASSERT(mResidencyManagementEnabled);
- // Return when we can call MakeResident and remain under budget.
- if (memoryUsageAfterMakeResident < memorySegment->budget) {
- return 0;
- }
+ UpdateMemorySegmentInfo(memorySegment);
- std::vector<ID3D12Pageable*> resourcesToEvict;
- uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
- uint64_t sizeEvicted = 0;
- while (sizeEvicted < sizeNeededToBeUnderBudget) {
- Pageable* pageable;
- DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
+ uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
- // If no heap was returned, then nothing more can be evicted.
- if (pageable == nullptr) {
- break;
- }
+ // Return when we can call MakeResident and remain under budget.
+ if (memoryUsageAfterMakeResident < memorySegment->budget) {
+ return 0;
+ }
- sizeEvicted += pageable->GetSize();
- resourcesToEvict.push_back(pageable->GetD3D12Pageable());
- }
+ std::vector<ID3D12Pageable*> resourcesToEvict;
+ uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
+ uint64_t sizeEvicted = 0;
+ while (sizeEvicted < sizeNeededToBeUnderBudget) {
+ Pageable* pageable;
+ DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
- if (resourcesToEvict.size() > 0) {
- DAWN_TRY(CheckHRESULT(
- mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
- "Evicting resident heaps to free memory"));
+ // If no heap was returned, then nothing more can be evicted.
+ if (pageable == nullptr) {
+ break;
}
- return sizeEvicted;
+ sizeEvicted += pageable->GetSize();
+ resourcesToEvict.push_back(pageable->GetD3D12Pageable());
}
- // Given a list of heaps that are pending usage, this function will estimate memory needed,
- // evict resources until enough space is available, then make resident any heaps scheduled for
- // usage.
- MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
-
- std::vector<ID3D12Pageable*> localHeapsToMakeResident;
- std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
- uint64_t localSizeToMakeResident = 0;
- uint64_t nonLocalSizeToMakeResident = 0;
+ if (resourcesToEvict.size() > 0) {
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
+ "Evicting resident heaps to free memory"));
+ }
- ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
- for (size_t i = 0; i < heapCount; i++) {
- Heap* heap = heaps[i];
+ return sizeEvicted;
+}
- // Heaps that are locked resident are not tracked in the LRU cache.
- if (heap->IsResidencyLocked()) {
- continue;
- }
+// Given a list of heaps that are pending usage, this function will estimate memory needed,
+// evict resources until enough space is available, then make resident any heaps scheduled for
+// usage.
+MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
- if (heap->IsInResidencyLRUCache()) {
- // If the heap is already in the LRU, we must remove it and append again below to
- // update its position in the LRU.
- heap->RemoveFromList();
- } else {
- if (heap->GetMemorySegment() == MemorySegment::Local) {
- localSizeToMakeResident += heap->GetSize();
- localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
- } else {
- nonLocalSizeToMakeResident += heap->GetSize();
- nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
- }
- }
+ std::vector<ID3D12Pageable*> localHeapsToMakeResident;
+ std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
+ uint64_t localSizeToMakeResident = 0;
+ uint64_t nonLocalSizeToMakeResident = 0;
- // If we submit a command list to the GPU, we must ensure that heaps referenced by that
- // command list stay resident at least until that command list has finished execution.
- // Setting this serial unnecessarily can leave the LRU in a state where nothing is
- // eligible for eviction, even though some evictions may be possible.
- heap->SetLastSubmission(pendingCommandSerial);
+ ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
+ for (size_t i = 0; i < heapCount; i++) {
+ Heap* heap = heaps[i];
- // Insert the heap into the appropriate LRU.
- TrackResidentAllocation(heap);
+ // Heaps that are locked resident are not tracked in the LRU cache.
+ if (heap->IsResidencyLocked()) {
+ continue;
}
- if (localSizeToMakeResident > 0) {
- return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
- localHeapsToMakeResident.size(),
- localHeapsToMakeResident.data());
+ if (heap->IsInResidencyLRUCache()) {
+ // If the heap is already in the LRU, we must remove it and append again below to
+ // update its position in the LRU.
+ heap->RemoveFromList();
+ } else {
+ if (heap->GetMemorySegment() == MemorySegment::Local) {
+ localSizeToMakeResident += heap->GetSize();
+ localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+ } else {
+ nonLocalSizeToMakeResident += heap->GetSize();
+ nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+ }
}
- if (nonLocalSizeToMakeResident > 0) {
- ASSERT(!mDevice->GetDeviceInfo().isUMA);
- return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
- nonLocalHeapsToMakeResident.size(),
- nonLocalHeapsToMakeResident.data());
- }
+ // If we submit a command list to the GPU, we must ensure that heaps referenced by that
+ // command list stay resident at least until that command list has finished execution.
+ // Setting this serial unnecessarily can leave the LRU in a state where nothing is
+ // eligible for eviction, even though some evictions may be possible.
+ heap->SetLastSubmission(pendingCommandSerial);
- return {};
+ // Insert the heap into the appropriate LRU.
+ TrackResidentAllocation(heap);
}
- MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
- uint64_t sizeToMakeResident,
- uint64_t numberOfObjectsToMakeResident,
- ID3D12Pageable** allocations) {
- uint64_t bytesEvicted;
- DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
- DAWN_UNUSED(bytesEvicted);
-
- // Note that MakeResident is a synchronous function and can add a significant
- // overhead to command recording. In the future, it may be possible to decrease this
- // overhead by using MakeResident on a secondary thread, or by instead making use of
- // the EnqueueMakeResident function (which is not available on all Windows 10
- // platforms).
- HRESULT hr =
- mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
-
- // A MakeResident call can fail if there's not enough available memory. This
- // could occur when there's significant fragmentation or if the allocation size
- // estimates are incorrect. We may be able to continue execution by evicting some
- // more memory and calling MakeResident again.
- while (FAILED(hr)) {
- constexpr uint32_t kAdditonalSizeToEvict = 50000000; // 50MB
-
- uint64_t sizeEvicted = 0;
-
- DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
-
- // If nothing can be evicted after MakeResident has failed, we cannot continue
- // execution and must throw a fatal error.
- if (sizeEvicted == 0) {
- return DAWN_OUT_OF_MEMORY_ERROR(
- "MakeResident has failed due to excessive video memory usage.");
- }
-
- hr =
- mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
- }
+ if (localSizeToMakeResident > 0) {
+ return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
+ localHeapsToMakeResident.size(),
+ localHeapsToMakeResident.data());
+ }
- return {};
+ if (nonLocalSizeToMakeResident > 0) {
+ ASSERT(!mDevice->GetDeviceInfo().isUMA);
+ return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
+ nonLocalHeapsToMakeResident.size(),
+ nonLocalHeapsToMakeResident.data());
}
- // Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
- // become resident within the current serial. Failing to call this function when an allocation
- // is implicitly made resident will cause the residency manager to view the allocation as
- // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
- // the allocation out of sync with Dawn.
- void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return;
+ return {};
+}
+
+MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations) {
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
+ DAWN_UNUSED(bytesEvicted);
+
+ // Note that MakeResident is a synchronous function and can add a significant
+ // overhead to command recording. In the future, it may be possible to decrease this
+ // overhead by using MakeResident on a secondary thread, or by instead making use of
+ // the EnqueueMakeResident function (which is not available on all Windows 10
+ // platforms).
+ HRESULT hr =
+ mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+
+ // A MakeResident call can fail if there's not enough available memory. This
+ // could occur when there's significant fragmentation or if the allocation size
+ // estimates are incorrect. We may be able to continue execution by evicting some
+ // more memory and calling MakeResident again.
+ while (FAILED(hr)) {
+ constexpr uint32_t kAdditonalSizeToEvict = 50000000; // 50MB
+
+ uint64_t sizeEvicted = 0;
+
+ DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+
+ // If nothing can be evicted after MakeResident has failed, we cannot continue
+ // execution and must throw a fatal error.
+ if (sizeEvicted == 0) {
+ return DAWN_OUT_OF_MEMORY_ERROR(
+ "MakeResident has failed due to excessive video memory usage.");
}
- ASSERT(pageable->IsInList() == false);
- GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+ hr = mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
}
- // Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
- // this function must be called before any resources have been created.
- void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
- ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
- ASSERT(!mRestrictBudgetForTesting);
-
- mRestrictBudgetForTesting = true;
- UpdateVideoMemoryInfo();
-
- // Dawn has a non-zero memory usage even before any resources have been created, and this
- // value can vary depending on the environment Dawn is running in. By adding this in
- // addition to the artificial budget cap, we can create a predictable and reproducible
- // budget for testing.
- mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
- if (!mDevice->GetDeviceInfo().isUMA) {
- mVideoMemoryInfo.nonLocal.budget =
- mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
- }
+ return {};
+}
+
+// Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
+// become resident within the current serial. Failing to call this function when an allocation
+// is implicitly made resident will cause the residency manager to view the allocation as
+// non-resident and call MakeResident - which will make D3D12's internal residency refcount on
+// the allocation out of sync with Dawn.
+void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ ASSERT(pageable->IsInList() == false);
+ GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+}
+
+// Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
+// this function must be called before any resources have been created.
+void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
+ ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
+ ASSERT(!mRestrictBudgetForTesting);
+
+ mRestrictBudgetForTesting = true;
+ UpdateVideoMemoryInfo();
+
+ // Dawn has a non-zero memory usage even before any resources have been created, and this
+ // value can vary depending on the environment Dawn is running in. By adding this in
+ // addition to the artificial budget cap, we can create a predictable and reproducible
+ // budget for testing.
+ mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
+ if (!mDevice->GetDeviceInfo().isUMA) {
+ mVideoMemoryInfo.nonLocal.budget = mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
}
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h
index 490e487510e..97ff4349ab8 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h
@@ -24,59 +24,58 @@
namespace dawn::native::d3d12 {
- class Device;
- class Heap;
- class Pageable;
-
- class ResidencyManager {
- public:
- ResidencyManager(Device* device);
-
- MaybeError LockAllocation(Pageable* pageable);
- void UnlockAllocation(Pageable* pageable);
-
- MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
- MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
-
- uint64_t SetExternalMemoryReservation(MemorySegment segment,
- uint64_t requestedReservationSize);
-
- void TrackResidentAllocation(Pageable* pageable);
-
- void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
-
- private:
- struct MemorySegmentInfo {
- const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
- LinkedList<Pageable> lruCache = {};
- uint64_t budget = 0;
- uint64_t usage = 0;
- uint64_t externalReservation = 0;
- uint64_t externalRequest = 0;
- };
-
- struct VideoMemoryInfo {
- MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
- MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
- };
-
- MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
- ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
- MemorySegmentInfo* memorySegment);
- ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
- MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
- uint64_t sizeToMakeResident,
- uint64_t numberOfObjectsToMakeResident,
- ID3D12Pageable** allocations);
- void UpdateVideoMemoryInfo();
- void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
-
- Device* mDevice;
- bool mResidencyManagementEnabled = false;
- bool mRestrictBudgetForTesting = false;
- VideoMemoryInfo mVideoMemoryInfo = {};
+class Device;
+class Heap;
+class Pageable;
+
+class ResidencyManager {
+ public:
+ explicit ResidencyManager(Device* device);
+
+ MaybeError LockAllocation(Pageable* pageable);
+ void UnlockAllocation(Pageable* pageable);
+
+ MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
+ MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
+
+ uint64_t SetExternalMemoryReservation(MemorySegment segment, uint64_t requestedReservationSize);
+
+ void TrackResidentAllocation(Pageable* pageable);
+
+ void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
+
+ private:
+ struct MemorySegmentInfo {
+ const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
+ LinkedList<Pageable> lruCache = {};
+ uint64_t budget = 0;
+ uint64_t usage = 0;
+ uint64_t externalReservation = 0;
+ uint64_t externalRequest = 0;
};
+ struct VideoMemoryInfo {
+ MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
+ MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
+ };
+
+ MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
+ ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
+ MemorySegmentInfo* memorySegment);
+ ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+ MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations);
+ void UpdateVideoMemoryInfo();
+ void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
+
+ Device* mDevice;
+ bool mResidencyManagementEnabled = false;
+ bool mRestrictBudgetForTesting = false;
+ VideoMemoryInfo mVideoMemoryInfo = {};
+};
+
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
index 5ed9c1d1d4c..dbcea824cd9 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include <limits>
+#include <utility>
+
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
@@ -22,396 +25,393 @@
#include "dawn/native/d3d12/UtilsD3D12.h"
namespace dawn::native::d3d12 {
- namespace {
- MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
- if (device->GetDeviceInfo().isUMA) {
- return MemorySegment::Local;
- }
+namespace {
+MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
+ if (device->GetDeviceInfo().isUMA) {
+ return MemorySegment::Local;
+ }
- D3D12_HEAP_PROPERTIES heapProperties =
- device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
+ D3D12_HEAP_PROPERTIES heapProperties =
+ device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
- if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
- return MemorySegment::Local;
- }
-
- return MemorySegment::NonLocal;
- }
+ if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
+ return MemorySegment::Local;
+ }
- D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
- switch (resourceHeapKind) {
- case Readback_OnlyBuffers:
- case Readback_AllBuffersAndTextures:
- return D3D12_HEAP_TYPE_READBACK;
- case Default_AllBuffersAndTextures:
- case Default_OnlyBuffers:
- case Default_OnlyNonRenderableOrDepthTextures:
- case Default_OnlyRenderableOrDepthTextures:
- return D3D12_HEAP_TYPE_DEFAULT;
- case Upload_OnlyBuffers:
- case Upload_AllBuffersAndTextures:
- return D3D12_HEAP_TYPE_UPLOAD;
- case EnumCount:
- UNREACHABLE();
- }
+ return MemorySegment::NonLocal;
+}
+
+D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Readback_OnlyBuffers:
+ case Readback_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_READBACK;
+ case Default_AllBuffersAndTextures:
+ case Default_OnlyBuffers:
+ case Default_OnlyNonRenderableOrDepthTextures:
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_TYPE_DEFAULT;
+ case Upload_OnlyBuffers:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_UPLOAD;
+ case EnumCount:
+ UNREACHABLE();
+ }
+}
+
+D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Default_AllBuffersAndTextures:
+ case Readback_AllBuffersAndTextures:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
+ case Default_OnlyBuffers:
+ case Readback_OnlyBuffers:
+ case Upload_OnlyBuffers:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
+ case EnumCount:
+ UNREACHABLE();
+ }
+}
+
+ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_RESOURCE_FLAGS flags,
+ uint32_t resourceHeapTier) {
+ if (resourceHeapTier >= 2) {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_AllBuffersAndTextures;
+ default:
+ UNREACHABLE();
}
+ }
- D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
- switch (resourceHeapKind) {
- case Default_AllBuffersAndTextures:
- case Readback_AllBuffersAndTextures:
- case Upload_AllBuffersAndTextures:
- return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
- case Default_OnlyBuffers:
- case Readback_OnlyBuffers:
- case Upload_OnlyBuffers:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
- case Default_OnlyNonRenderableOrDepthTextures:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
- case Default_OnlyRenderableOrDepthTextures:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
- case EnumCount:
+ switch (dimension) {
+ case D3D12_RESOURCE_DIMENSION_BUFFER: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_OnlyBuffers;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_OnlyBuffers;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_OnlyBuffers;
+ default:
UNREACHABLE();
}
+ break;
}
-
- ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
- D3D12_HEAP_TYPE heapType,
- D3D12_RESOURCE_FLAGS flags,
- uint32_t resourceHeapTier) {
- if (resourceHeapTier >= 2) {
- switch (heapType) {
- case D3D12_HEAP_TYPE_UPLOAD:
- return Upload_AllBuffersAndTextures;
- case D3D12_HEAP_TYPE_DEFAULT:
- return Default_AllBuffersAndTextures;
- case D3D12_HEAP_TYPE_READBACK:
- return Readback_AllBuffersAndTextures;
- default:
- UNREACHABLE();
- }
- }
-
- switch (dimension) {
- case D3D12_RESOURCE_DIMENSION_BUFFER: {
- switch (heapType) {
- case D3D12_HEAP_TYPE_UPLOAD:
- return Upload_OnlyBuffers;
- case D3D12_HEAP_TYPE_DEFAULT:
- return Default_OnlyBuffers;
- case D3D12_HEAP_TYPE_READBACK:
- return Readback_OnlyBuffers;
- default:
- UNREACHABLE();
+ case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_DEFAULT: {
+ if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+ (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
+ return Default_OnlyRenderableOrDepthTextures;
}
- break;
+ return Default_OnlyNonRenderableOrDepthTextures;
}
- case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
- case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
- case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
- switch (heapType) {
- case D3D12_HEAP_TYPE_DEFAULT: {
- if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
- (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
- return Default_OnlyRenderableOrDepthTextures;
- }
- return Default_OnlyNonRenderableOrDepthTextures;
- }
-
- default:
- UNREACHABLE();
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
- uint32_t sampleCount,
- uint64_t requestedAlignment) {
- switch (resourceHeapKind) {
- // Small resources can take advantage of smaller alignments. For example,
- // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
- // Must be non-depth or without render-target to use small resource alignment.
- // This also applies to MSAA textures (4MB => 64KB).
- //
- // Note: Only known to be used for small textures; however, MSDN suggests
- // it could be extended for more cases. If so, this could default to always
- // attempt small resource placement.
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
- case Default_OnlyNonRenderableOrDepthTextures:
- return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
- : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
default:
- return requestedAlignment;
+ UNREACHABLE();
}
+ break;
}
+ default:
+ UNREACHABLE();
+ }
+}
+
+uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
+ uint32_t sampleCount,
+ uint64_t requestedAlignment) {
+ switch (resourceHeapKind) {
+ // Small resources can take advantage of smaller alignments. For example,
+ // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+ // Must be non-depth or without render-target to use small resource alignment.
+ // This also applies to MSAA textures (4MB => 64KB).
+ //
+ // Note: Only known to be used for small textures; however, MSDN suggests
+ // it could be extended for more cases. If so, this could default to always
+ // attempt small resource placement.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
+ : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
+ default:
+ return requestedAlignment;
+ }
+}
+
+bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
+ // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
+ // textures, or typeless resources
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ return !IsTypeless(resourceDescriptor.Format) &&
+ resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
+ (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
+ D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
+}
+
+} // namespace
+
+ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+ mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
+ ? mDevice->GetDeviceInfo().resourceHeapTier
+ : 1;
+
+ for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
+ const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
+ mHeapAllocators[i] = std::make_unique<HeapAllocator>(
+ mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
+ GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
+ mPooledHeapAllocators[i] =
+ std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
+ mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
+ kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+ }
+}
+
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ // In order to suppress a warning in the D3D12 debug layer, we need to specify an
+ // optimized clear value. As there are no negative consequences when picking a mismatched
+ // clear value, we use zero as the optimized clear value. This also enables fast clears on
+ // some architectures.
+ D3D12_CLEAR_VALUE zero{};
+ D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
+ if (IsClearValueOptimizable(resourceDescriptor)) {
+ zero.Format = resourceDescriptor.Format;
+ optimizedClearValue = &zero;
+ }
- bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
- // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
- // textures, or typeless resources
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
- return !IsTypeless(resourceDescriptor.Format) &&
- resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
- (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
- D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
- }
-
- } // namespace
-
- ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
- mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
- ? mDevice->GetDeviceInfo().resourceHeapTier
- : 1;
-
- for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
- const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
- mHeapAllocators[i] = std::make_unique<HeapAllocator>(
- mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
- GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
- mPooledHeapAllocators[i] =
- std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
- mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
- kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+ // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
+ // For very large resources, there is no benefit to suballocate.
+ // For very small resources, it is inefficent to suballocate given the min. heap
+ // size could be much larger then the resource allocation.
+ // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
+ if (!mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+ ResourceHeapAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
+ optimizedClearValue, initialUsage));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(subAllocation);
}
}
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage) {
- // In order to suppress a warning in the D3D12 debug layer, we need to specify an
- // optimized clear value. As there are no negative consequences when picking a mismatched
- // clear value, we use zero as the optimized clear value. This also enables fast clears on
- // some architectures.
- D3D12_CLEAR_VALUE zero{};
- D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
- if (IsClearValueOptimizable(resourceDescriptor)) {
- zero.Format = resourceDescriptor.Format;
- optimizedClearValue = &zero;
- }
+ // If sub-allocation fails, fall-back to direct allocation (committed resource).
+ ResourceHeapAllocation directAllocation;
+ DAWN_TRY_ASSIGN(directAllocation, CreateCommittedResource(heapType, resourceDescriptor,
+ optimizedClearValue, initialUsage));
+ if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(directAllocation);
+ }
- // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
- // For very large resources, there is no benefit to suballocate.
- // For very small resources, it is inefficent to suballocate given the min. heap
- // size could be much larger then the resource allocation.
- // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
- if (!mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
- ResourceHeapAllocation subAllocation;
- DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
- optimizedClearValue, initialUsage));
- if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(subAllocation);
- }
- }
+ // If direct allocation fails, the system is probably out of memory.
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
+}
- // If sub-allocation fails, fall-back to direct allocation (committed resource).
- ResourceHeapAllocation directAllocation;
- DAWN_TRY_ASSIGN(directAllocation,
- CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
- initialUsage));
- if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(directAllocation);
+void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
+ for (ResourceHeapAllocation& allocation : mAllocationsToDelete.IterateUpTo(completedSerial)) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
+ FreeMemory(allocation);
}
-
- // If direct allocation fails, the system is probably out of memory.
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
}
+ mAllocationsToDelete.ClearUpTo(completedSerial);
+ mHeapsToDelete.ClearUpTo(completedSerial);
+}
- void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
- for (ResourceHeapAllocation& allocation :
- mAllocationsToDelete.IterateUpTo(completedSerial)) {
- if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
- FreeMemory(allocation);
- }
- }
- mAllocationsToDelete.ClearUpTo(completedSerial);
- mHeapsToDelete.ClearUpTo(completedSerial);
+void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
}
- void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
- if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return;
- }
+ mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
- mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+ // Directly allocated ResourceHeapAllocations are created with a heap object that must be
+ // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
+ // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
+ // to delete. It cannot be deleted immediately because it may be in use by in-flight or
+ // pending commands.
+ if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
+ mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
+ mDevice->GetPendingCommandSerial());
+ }
- // Directly allocated ResourceHeapAllocations are created with a heap object that must be
- // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
- // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
- // to delete. It cannot be deleted immediately because it may be in use by in-flight or
- // pending commands.
- if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
- mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
- mDevice->GetPendingCommandSerial());
- }
+ // Invalidate the allocation immediately in case one accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation.Invalidate();
- // Invalidate the allocation immediately in case one accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation.Invalidate();
+ ASSERT(allocation.GetD3D12Resource() == nullptr);
+}
- ASSERT(allocation.GetD3D12Resource() == nullptr);
- }
+void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
- void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
- ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+ D3D12_HEAP_PROPERTIES heapProp;
+ allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
- D3D12_HEAP_PROPERTIES heapProp;
- allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+ const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
- const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
+ const size_t resourceHeapKindIndex = GetResourceHeapKind(
+ resourceDescriptor.Dimension, heapProp.Type, resourceDescriptor.Flags, mResourceHeapTier);
- const size_t resourceHeapKindIndex =
- GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
- resourceDescriptor.Flags, mResourceHeapTier);
+ mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
+}
- mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
- }
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage) {
+ const ResourceHeapKind resourceHeapKind =
+ GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
+ requestedResourceDescriptor.Flags, mResourceHeapTier);
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage) {
- const ResourceHeapKind resourceHeapKind =
- GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
- requestedResourceDescriptor.Flags, mResourceHeapTier);
-
- D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
- resourceDescriptor.Alignment = GetResourcePlacementAlignment(
- resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
- requestedResourceDescriptor.Alignment);
-
- // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
- // twice.
- D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
+ resourceDescriptor.Alignment = GetResourcePlacementAlignment(
+ resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
+ requestedResourceDescriptor.Alignment);
- // If the requested resource alignment was rejected, let D3D tell us what the
- // required alignment is for this resource.
- if (resourceDescriptor.Alignment != 0 &&
- resourceDescriptor.Alignment != resourceInfo.Alignment) {
- resourceDescriptor.Alignment = 0;
- resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- }
+ // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
+ // twice.
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- // If d3d tells us the resource size is invalid, treat the error as OOM.
- // Otherwise, creating the resource could cause a device loss (too large).
- // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
- // incorrectly allocate a mismatched size.
- if (resourceInfo.SizeInBytes == 0 ||
- resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR(absl::StrFormat(
- "Resource allocation size (%u) was invalid.", resourceInfo.SizeInBytes));
- }
+ // If the requested resource alignment was rejected, let D3D tell us what the
+ // required alignment is for this resource.
+ if (resourceDescriptor.Alignment != 0 &&
+ resourceDescriptor.Alignment != resourceInfo.Alignment) {
+ resourceDescriptor.Alignment = 0;
+ resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ }
- BuddyMemoryAllocator* allocator =
- mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
+ // If d3d tells us the resource size is invalid, treat the error as OOM.
+ // Otherwise, creating the resource could cause a device loss (too large).
+ // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+ // incorrectly allocate a mismatched size.
+ if (resourceInfo.SizeInBytes == 0 ||
+ resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR(absl::StrFormat(
+ "Resource allocation size (%u) was invalid.", resourceInfo.SizeInBytes));
+ }
- ResourceMemoryAllocation allocation;
- DAWN_TRY_ASSIGN(allocation,
- allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
- if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return ResourceHeapAllocation{}; // invalid
- }
+ BuddyMemoryAllocator* allocator =
+ mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
- Heap* heap = ToBackend(allocation.GetResourceHeap());
-
- // Before calling CreatePlacedResource, we must ensure the target heap is resident.
- // CreatePlacedResource will fail if it is not.
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
-
- // With placed resources, a single heap can be reused.
- // The resource placed at an offset is only reclaimed
- // upon Tick or after the last command list using the resource has completed
- // on the GPU. This means the same physical memory is not reused
- // within the same command-list and does not require additional synchronization (aliasing
- // barrier).
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
- ComPtr<ID3D12Resource> placedResource;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreatePlacedResource(
- heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
- optimizedClearValue, IID_PPV_ARGS(&placedResource)),
- "ID3D12Device::CreatePlacedResource"));
-
- // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
- // will insert it into the residency LRU.
- mDevice->GetResidencyManager()->UnlockAllocation(heap);
-
- return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
- std::move(placedResource), heap};
+ ResourceMemoryAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation,
+ allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return ResourceHeapAllocation{}; // invalid
}
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage) {
- D3D12_HEAP_PROPERTIES heapProperties;
- heapProperties.Type = heapType;
- heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
- heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
- heapProperties.CreationNodeMask = 0;
- heapProperties.VisibleNodeMask = 0;
-
- // If d3d tells us the resource size is invalid, treat the error as OOM.
- // Otherwise, creating the resource could cause a device loss (too large).
- // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
- // incorrectly allocate a mismatched size.
- D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- if (resourceInfo.SizeInBytes == 0 ||
- resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
- }
-
- if (resourceInfo.SizeInBytes > kMaxHeapSize) {
- return ResourceHeapAllocation{}; // Invalid
- }
+ Heap* heap = ToBackend(allocation.GetResourceHeap());
+
+ // Before calling CreatePlacedResource, we must ensure the target heap is resident.
+ // CreatePlacedResource will fail if it is not.
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
+
+ // With placed resources, a single heap can be reused.
+ // The resource placed at an offset is only reclaimed
+ // upon Tick or after the last command list using the resource has completed
+ // on the GPU. This means the same physical memory is not reused
+ // within the same command-list and does not require additional synchronization (aliasing
+ // barrier).
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ ComPtr<ID3D12Resource> placedResource;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreatePlacedResource(
+ heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
+ optimizedClearValue, IID_PPV_ARGS(&placedResource)),
+ "ID3D12Device::CreatePlacedResource"));
+
+ // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
+ // will insert it into the residency LRU.
+ mDevice->GetResidencyManager()->UnlockAllocation(heap);
+
+ return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
+ std::move(placedResource), heap};
+}
+
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage) {
+ D3D12_HEAP_PROPERTIES heapProperties;
+ heapProperties.Type = heapType;
+ heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapProperties.CreationNodeMask = 0;
+ heapProperties.VisibleNodeMask = 0;
+
+ // If d3d tells us the resource size is invalid, treat the error as OOM.
+ // Otherwise, creating the resource could cause a device loss (too large).
+ // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+ // incorrectly allocate a mismatched size.
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ if (resourceInfo.SizeInBytes == 0 ||
+ resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+ }
- // CreateCommittedResource will implicitly make the created resource resident. We must
- // ensure enough free memory exists before allocating to avoid an out-of-memory error when
- // overcommitted.
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
- resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
-
- // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
- // provided to CreateCommittedResource.
- ComPtr<ID3D12Resource> committedResource;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreateCommittedResource(
- &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
- optimizedClearValue, IID_PPV_ARGS(&committedResource)),
- "ID3D12Device::CreateCommittedResource"));
-
- // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
- // resource allocation. Because Dawn's memory residency management occurs at the resource
- // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
- // object. This object is created manually, and must be deleted manually upon deallocation
- // of the committed resource.
- Heap* heap = new Heap(committedResource, GetMemorySegment(mDevice, heapType),
- resourceInfo.SizeInBytes);
-
- // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
- // track this to avoid calling MakeResident a second time.
- mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
-
- return ResourceHeapAllocation{info,
- /*offset*/ 0, std::move(committedResource), heap};
+ if (resourceInfo.SizeInBytes > kMaxHeapSize) {
+ return ResourceHeapAllocation{}; // Invalid
}
- void ResourceAllocatorManager::DestroyPool() {
- for (auto& alloc : mPooledHeapAllocators) {
- alloc->DestroyPool();
- }
+ // CreateCommittedResource will implicitly make the created resource resident. We must
+ // ensure enough free memory exists before allocating to avoid an out-of-memory error when
+ // overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
+ resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
+
+ // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
+ // provided to CreateCommittedResource.
+ ComPtr<ID3D12Resource> committedResource;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateCommittedResource(
+ &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
+ optimizedClearValue, IID_PPV_ARGS(&committedResource)),
+ "ID3D12Device::CreateCommittedResource"));
+
+ // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
+ // resource allocation. Because Dawn's memory residency management occurs at the resource
+ // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
+ // object. This object is created manually, and must be deleted manually upon deallocation
+ // of the committed resource.
+ Heap* heap =
+ new Heap(committedResource, GetMemorySegment(mDevice, heapType), resourceInfo.SizeInBytes);
+
+ // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
+ // track this to avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+
+ return ResourceHeapAllocation{info,
+ /*offset*/ 0, std::move(committedResource), heap};
+}
+
+void ResourceAllocatorManager::DestroyPool() {
+ for (auto& alloc : mPooledHeapAllocators) {
+ alloc->DestroyPool();
}
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
index 41058ce8c24..5b6dfd84600 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+#include <array>
+#include <memory>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/native/BuddyMemoryAllocator.h"
#include "dawn/native/IntegerTypes.h"
@@ -22,86 +25,83 @@
#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
-#include <array>
-
namespace dawn::native::d3d12 {
- class Device;
-
- // Resource heap types + flags combinations are named after the D3D constants.
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
- enum ResourceHeapKind {
-
- // Resource heap tier 2
- // Allows resource heaps to contain all buffer and textures types.
- // This enables better heap re-use by avoiding the need for separate heaps and
- // also reduces fragmentation.
- Readback_AllBuffersAndTextures,
- Upload_AllBuffersAndTextures,
- Default_AllBuffersAndTextures,
-
- // Resource heap tier 1
- // Resource heaps only support types from a single resource category.
- Readback_OnlyBuffers,
- Upload_OnlyBuffers,
- Default_OnlyBuffers,
-
- Default_OnlyNonRenderableOrDepthTextures,
- Default_OnlyRenderableOrDepthTextures,
-
- EnumCount,
- InvalidEnum = EnumCount,
- };
-
- // Manages a list of resource allocators used by the device to create resources using
- // multiple allocation methods.
- class ResourceAllocatorManager {
- public:
- ResourceAllocatorManager(Device* device);
-
- ResultOrError<ResourceHeapAllocation> AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage);
-
- void DeallocateMemory(ResourceHeapAllocation& allocation);
-
- void Tick(ExecutionSerial lastCompletedSerial);
-
- void DestroyPool();
-
- private:
- void FreeMemory(ResourceHeapAllocation& allocation);
-
- ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage);
-
- ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage);
-
- Device* mDevice;
- uint32_t mResourceHeapTier;
-
- static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll; // 32GB
- static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll; // 4MB
-
- std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
- mSubAllocatedResourceAllocators;
- std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
-
- std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
- mPooledHeapAllocators;
-
- SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
- SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
- };
+class Device;
+
+// Resource heap types + flags combinations are named after the D3D constants.
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
+enum ResourceHeapKind {
+ // Resource heap tier 2
+ // Allows resource heaps to contain all buffer and textures types.
+ // This enables better heap re-use by avoiding the need for separate heaps and
+ // also reduces fragmentation.
+ Readback_AllBuffersAndTextures,
+ Upload_AllBuffersAndTextures,
+ Default_AllBuffersAndTextures,
+
+ // Resource heap tier 1
+ // Resource heaps only support types from a single resource category.
+ Readback_OnlyBuffers,
+ Upload_OnlyBuffers,
+ Default_OnlyBuffers,
+
+ Default_OnlyNonRenderableOrDepthTextures,
+ Default_OnlyRenderableOrDepthTextures,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+};
+
+// Manages a list of resource allocators used by the device to create resources using
+// multiple allocation methods.
+class ResourceAllocatorManager {
+ public:
+ explicit ResourceAllocatorManager(Device* device);
+
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+ void Tick(ExecutionSerial lastCompletedSerial);
+
+ void DestroyPool();
+
+ private:
+ void FreeMemory(ResourceHeapAllocation& allocation);
+
+ ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ Device* mDevice;
+ uint32_t mResourceHeapTier;
+
+ static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll; // 32GB
+ static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll; // 4MB
+
+ std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mSubAllocatedResourceAllocators;
+ std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
+
+ std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mPooledHeapAllocators;
+
+ SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
+ SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
index 910e4fb1245..862f87b6dd8 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -14,30 +14,30 @@
#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include <utility>
+
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/HeapD3D12.h"
-#include <utility>
-
namespace dawn::native::d3d12 {
- ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
- uint64_t offset,
- ComPtr<ID3D12Resource> resource,
- Heap* heap)
- : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
- ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
- }
+ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap)
+ : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
+ ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
+}
- void ResourceHeapAllocation::Invalidate() {
- ResourceMemoryAllocation::Invalidate();
- mResource.Reset();
- }
+void ResourceHeapAllocation::Invalidate() {
+ ResourceMemoryAllocation::Invalidate();
+ mResource.Reset();
+}
- ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
- return mResource.Get();
- }
+ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
+ return mResource.Get();
+}
- D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
- return mResource->GetGPUVirtualAddress();
- }
+D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
+ return mResource->GetGPUVirtualAddress();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
index 7cd765c684f..9215199c625 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
@@ -21,27 +21,27 @@
namespace dawn::native::d3d12 {
- class Heap;
-
- class ResourceHeapAllocation : public ResourceMemoryAllocation {
- public:
- ResourceHeapAllocation() = default;
- ResourceHeapAllocation(const AllocationInfo& info,
- uint64_t offset,
- ComPtr<ID3D12Resource> resource,
- Heap* heap);
- ~ResourceHeapAllocation() override = default;
- ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
- ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
-
- void Invalidate() override;
-
- ID3D12Resource* GetD3D12Resource() const;
- D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
-
- private:
- ComPtr<ID3D12Resource> mResource;
- };
+class Heap;
+
+class ResourceHeapAllocation : public ResourceMemoryAllocation {
+ public:
+ ResourceHeapAllocation() = default;
+ ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap);
+ ~ResourceHeapAllocation() override = default;
+ ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
+ ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
+
+ void Invalidate() override;
+
+ ID3D12Resource* GetD3D12Resource() const;
+ D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
+
+ private:
+ ComPtr<ID3D12Resource> mResource;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp
index c656931947b..e9e4be66556 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp
@@ -14,93 +14,94 @@
#include "dawn/native/d3d12/SamplerD3D12.h"
+#include <algorithm>
+
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/UtilsD3D12.h"
namespace dawn::native::d3d12 {
- namespace {
- D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
- case wgpu::AddressMode::MirrorRepeat:
- return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
- case wgpu::AddressMode::ClampToEdge:
- return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
- }
- }
- } // namespace
-
- // static
- Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(device, descriptor));
+namespace {
+D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
+ case wgpu::AddressMode::MirrorRepeat:
+ return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
+ case wgpu::AddressMode::ClampToEdge:
+ return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+ }
+}
+} // namespace
+
+// static
+Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(device, descriptor));
+}
+
+Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor) {
+ D3D12_FILTER_TYPE minFilter;
+ switch (descriptor->minFilter) {
+ case wgpu::FilterMode::Nearest:
+ minFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ minFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
+ }
+
+ D3D12_FILTER_TYPE magFilter;
+ switch (descriptor->magFilter) {
+ case wgpu::FilterMode::Nearest:
+ magFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ magFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
}
- Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor) {
- D3D12_FILTER_TYPE minFilter;
- switch (descriptor->minFilter) {
- case wgpu::FilterMode::Nearest:
- minFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- minFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_TYPE magFilter;
- switch (descriptor->magFilter) {
- case wgpu::FilterMode::Nearest:
- magFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- magFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_TYPE mipmapFilter;
- switch (descriptor->mipmapFilter) {
- case wgpu::FilterMode::Nearest:
- mipmapFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_REDUCTION_TYPE reduction =
- descriptor->compare == wgpu::CompareFunction::Undefined
- ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
- : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
- mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
- if (mSamplerDesc.MaxAnisotropy > 1) {
- mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
- } else {
- mSamplerDesc.Filter =
- D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
- }
-
- mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
- mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
- mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
- mSamplerDesc.MipLODBias = 0.f;
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
- } else {
- // Still set the function so it's not garbage.
- mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
- }
- mSamplerDesc.MinLOD = descriptor->lodMinClamp;
- mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+ D3D12_FILTER_TYPE mipmapFilter;
+ switch (descriptor->mipmapFilter) {
+ case wgpu::FilterMode::Nearest:
+ mipmapFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
}
- const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
- return mSamplerDesc;
+ D3D12_FILTER_REDUCTION_TYPE reduction = descriptor->compare == wgpu::CompareFunction::Undefined
+ ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
+ : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
+ mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+ if (mSamplerDesc.MaxAnisotropy > 1) {
+ mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
+ } else {
+ mSamplerDesc.Filter =
+ D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
+ }
+
+ mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
+ mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
+ mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
+ mSamplerDesc.MipLODBias = 0.f;
+
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
+ } else {
+ // Still set the function so it's not garbage.
+ mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
}
+ mSamplerDesc.MinLOD = descriptor->lodMinClamp;
+ mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+}
+
+const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
+ return mSamplerDesc;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h
index c1013c1dac8..530720ce756 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h
@@ -21,19 +21,19 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class Sampler final : public SamplerBase {
- public:
- static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+ public:
+ static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
- const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
+ const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
- private:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
- ~Sampler() override = default;
- D3D12_SAMPLER_DESC mSamplerDesc = {};
- };
+ private:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
+ ~Sampler() override = default;
+ D3D12_SAMPLER_DESC mSamplerDesc = {};
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
index 4659b36e95c..f1d12e3baee 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/HashUtils.h"
#include "dawn/native/d3d12/BindGroupD3D12.h"
@@ -26,141 +28,137 @@
namespace dawn::native::d3d12 {
- SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
- : mSamplers(std::move(samplers)) {
+SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
+ : mSamplers(std::move(samplers)) {}
+
+SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation)
+ : mCPUAllocation(std::move(allocation)),
+ mSamplers(std::move(samplers)),
+ mAllocator(allocator),
+ mCache(cache) {
+ ASSERT(mCache != nullptr);
+ ASSERT(mCPUAllocation.IsValid());
+ ASSERT(!mSamplers.empty());
+}
+
+std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
+ return std::move(mSamplers);
+}
+
+SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
+ // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
+ if (mCPUAllocation.IsValid()) {
+ mCache->RemoveCacheEntry(this);
+ mAllocator->Deallocate(&mCPUAllocation);
}
- SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
- StagingDescriptorAllocator* allocator,
- std::vector<Sampler*> samplers,
- CPUDescriptorHeapAllocation allocation)
- : mCPUAllocation(std::move(allocation)),
- mSamplers(std::move(samplers)),
- mAllocator(allocator),
- mCache(cache) {
- ASSERT(mCache != nullptr);
- ASSERT(mCPUAllocation.IsValid());
- ASSERT(!mSamplers.empty());
- }
+ ASSERT(!mCPUAllocation.IsValid());
+}
- std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
- return std::move(mSamplers);
+bool SamplerHeapCacheEntry::Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator) {
+ if (allocator->IsAllocationStillValid(mGPUAllocation)) {
+ return true;
}
- SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
- // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
- if (mCPUAllocation.IsValid()) {
- mCache->RemoveCacheEntry(this);
- mAllocator->Deallocate(&mCPUAllocation);
- }
+ ASSERT(!mSamplers.empty());
- ASSERT(!mCPUAllocation.IsValid());
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ const uint32_t descriptorCount = mSamplers.size();
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+ if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUAllocation)) {
+ return false;
}
- bool SamplerHeapCacheEntry::Populate(Device* device,
- ShaderVisibleDescriptorAllocator* allocator) {
- if (allocator->IsAllocationStillValid(mGPUAllocation)) {
- return true;
+ // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+ // simple copies per bindgroup, a single non-simple copy could be issued.
+ // TODO(dawn:155): Consider doing this optimization.
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+
+ return true;
+}
+
+D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
+ return mGPUAllocation.GetBaseDescriptor();
+}
+
+ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator) {
+ const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+
+ // If a previously created bindgroup used the same samplers, the backing sampler heap
+ // allocation can be reused. The packed list of samplers acts as the key to lookup the
+ // allocation in a cache.
+ // TODO(dawn:155): Avoid re-allocating the vector each lookup.
+ std::vector<Sampler*> samplers;
+ samplers.reserve(bgl->GetSamplerDescriptorCount());
+
+ for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+ bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+ if (bindingInfo.bindingType == BindingInfoType::Sampler) {
+ samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
}
-
- ASSERT(!mSamplers.empty());
-
- // Attempt to allocate descriptors for the currently bound shader-visible heaps.
- // If either failed, return early to re-allocate and switch the heaps.
- const uint32_t descriptorCount = mSamplers.size();
- D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
- if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
- &baseCPUDescriptor, &mGPUAllocation)) {
- return false;
- }
-
- // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
- // simple copies per bindgroup, a single non-simple copy could be issued.
- // TODO(dawn:155): Consider doing this optimization.
- device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
- mCPUAllocation.GetBaseDescriptor(),
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
-
- return true;
}
- D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
- return mGPUAllocation.GetBaseDescriptor();
+ // Check the cache if there exists a sampler heap allocation that corresponds to the
+ // samplers.
+ SamplerHeapCacheEntry blueprint(std::move(samplers));
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return Ref<SamplerHeapCacheEntry>(*iter);
}
- ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
- const BindGroup* group,
- StagingDescriptorAllocator* samplerAllocator) {
- const BindGroupLayout* bgl = ToBackend(group->GetLayout());
-
- // If a previously created bindgroup used the same samplers, the backing sampler heap
- // allocation can be reused. The packed list of samplers acts as the key to lookup the
- // allocation in a cache.
- // TODO(dawn:155): Avoid re-allocating the vector each lookup.
- std::vector<Sampler*> samplers;
- samplers.reserve(bgl->GetSamplerDescriptorCount());
-
- for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
- bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
- if (bindingInfo.bindingType == BindingInfoType::Sampler) {
- samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
- }
- }
-
- // Check the cache if there exists a sampler heap allocation that corresponds to the
- // samplers.
- SamplerHeapCacheEntry blueprint(std::move(samplers));
- auto iter = mCache.find(&blueprint);
- if (iter != mCache.end()) {
- return Ref<SamplerHeapCacheEntry>(*iter);
- }
-
- // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
- // real entry below.
- samplers = std::move(blueprint.AcquireSamplers());
+ // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
+ // real entry below.
+ samplers = std::move(blueprint.AcquireSamplers());
- CPUDescriptorHeapAllocation allocation;
- DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
+ CPUDescriptorHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
- const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
- ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
+ const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
+ ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
- for (uint32_t i = 0; i < samplers.size(); ++i) {
- const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
- d3d12Device->CreateSampler(&samplerDesc,
- allocation.OffsetFrom(samplerSizeIncrement, i));
- }
-
- Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
- this, samplerAllocator, std::move(samplers), std::move(allocation)));
- mCache.insert(entry.Get());
- return std::move(entry);
+ for (uint32_t i = 0; i < samplers.size(); ++i) {
+ const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
+ d3d12Device->CreateSampler(&samplerDesc, allocation.OffsetFrom(samplerSizeIncrement, i));
}
- SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
- }
+ Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
+ this, samplerAllocator, std::move(samplers), std::move(allocation)));
+ mCache.insert(entry.Get());
+ return std::move(entry);
+}
- SamplerHeapCache::~SamplerHeapCache() {
- ASSERT(mCache.empty());
- }
+SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {}
- void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
- ASSERT(entry->GetRefCountForTesting() == 0);
- size_t removedCount = mCache.erase(entry);
- ASSERT(removedCount == 1);
- }
+SamplerHeapCache::~SamplerHeapCache() {
+ ASSERT(mCache.empty());
+}
- size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
- size_t hash = 0;
- for (const Sampler* sampler : entry->mSamplers) {
- HashCombine(&hash, sampler);
- }
- return hash;
- }
+void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
+ ASSERT(entry->GetRefCountForTesting() == 0);
+ size_t removedCount = mCache.erase(entry);
+ ASSERT(removedCount == 1);
+}
- bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
- const SamplerHeapCacheEntry* b) const {
- return a->mSamplers == b->mSamplers;
+size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
+ size_t hash = 0;
+ for (const Sampler* sampler : entry->mSamplers) {
+ HashCombine(&hash, sampler);
}
+ return hash;
+}
+
+bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
+ const SamplerHeapCacheEntry* b) const {
+ return a->mSamplers == b->mSamplers;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
index 28c6fae54e0..e70195749f5 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
@@ -15,13 +15,14 @@
#ifndef SRC_DAWN_NATIVE_D3D12_SAMPLERHEAPCACHED3D12_H_
#define SRC_DAWN_NATIVE_D3D12_SAMPLERHEAPCACHED3D12_H_
+#include <unordered_set>
+#include <vector>
+
#include "dawn/common/RefCounted.h"
#include "dawn/native/BindingInfo.h"
#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
-#include <unordered_set>
-
// |SamplerHeapCacheEntry| maintains a cache of sampler descriptor heap allocations.
// Each entry represents one or more sampler descriptors that co-exist in a CPU and
// GPU descriptor heap. The CPU-side allocation is deallocated once the final reference
@@ -34,73 +35,73 @@
// and switches incur expensive pipeline flushes.
namespace dawn::native::d3d12 {
- class BindGroup;
- class Device;
- class Sampler;
- class SamplerHeapCache;
- class StagingDescriptorAllocator;
- class ShaderVisibleDescriptorAllocator;
-
- // Wraps sampler descriptor heap allocations in a cache.
- class SamplerHeapCacheEntry : public RefCounted {
- public:
- SamplerHeapCacheEntry() = default;
- SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
- SamplerHeapCacheEntry(SamplerHeapCache* cache,
- StagingDescriptorAllocator* allocator,
- std::vector<Sampler*> samplers,
- CPUDescriptorHeapAllocation allocation);
- ~SamplerHeapCacheEntry() override;
-
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
-
- std::vector<Sampler*>&& AcquireSamplers();
-
- bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
-
- // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
- struct HashFunc {
- size_t operator()(const SamplerHeapCacheEntry* entry) const;
- };
-
- struct EqualityFunc {
- bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
- };
-
- private:
- CPUDescriptorHeapAllocation mCPUAllocation;
- GPUDescriptorHeapAllocation mGPUAllocation;
-
- // Storing raw pointer because the sampler object will be already hashed
- // by the device and will already be unique.
- std::vector<Sampler*> mSamplers;
-
- StagingDescriptorAllocator* mAllocator = nullptr;
- SamplerHeapCache* mCache = nullptr;
+class BindGroup;
+class Device;
+class Sampler;
+class SamplerHeapCache;
+class StagingDescriptorAllocator;
+class ShaderVisibleDescriptorAllocator;
+
+// Wraps sampler descriptor heap allocations in a cache.
+class SamplerHeapCacheEntry : public RefCounted {
+ public:
+ SamplerHeapCacheEntry() = default;
+ explicit SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
+ SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation);
+ ~SamplerHeapCacheEntry() override;
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+ std::vector<Sampler*>&& AcquireSamplers();
+
+ bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
+
+ // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
+ struct HashFunc {
+ size_t operator()(const SamplerHeapCacheEntry* entry) const;
+ };
+
+ struct EqualityFunc {
+ bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
};
- // Cache descriptor heap allocations so that we don't create duplicate ones for every
- // BindGroup.
- class SamplerHeapCache {
- public:
- SamplerHeapCache(Device* device);
- ~SamplerHeapCache();
+ private:
+ CPUDescriptorHeapAllocation mCPUAllocation;
+ GPUDescriptorHeapAllocation mGPUAllocation;
- ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
- const BindGroup* group,
- StagingDescriptorAllocator* samplerAllocator);
+ // Storing raw pointer because the sampler object will be already hashed
+ // by the device and will already be unique.
+ std::vector<Sampler*> mSamplers;
- void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+ StagingDescriptorAllocator* mAllocator = nullptr;
+ SamplerHeapCache* mCache = nullptr;
+};
- private:
- Device* mDevice;
+// Cache descriptor heap allocations so that we don't create duplicate ones for every
+// BindGroup.
+class SamplerHeapCache {
+ public:
+ explicit SamplerHeapCache(Device* device);
+ ~SamplerHeapCache();
- using Cache = std::unordered_set<SamplerHeapCacheEntry*,
- SamplerHeapCacheEntry::HashFunc,
- SamplerHeapCacheEntry::EqualityFunc>;
+ ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator);
- Cache mCache;
- };
+ void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+
+ private:
+ Device* mDevice;
+
+ using Cache = std::unordered_set<SamplerHeapCacheEntry*,
+ SamplerHeapCacheEntry::HashFunc,
+ SamplerHeapCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
index 0dea76e1d6b..7a23be5e7fd 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
@@ -14,12 +14,25 @@
#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include <d3dcompiler.h>
+
+#include <map>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
#include "dawn/common/Assert.h"
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/Log.h"
#include "dawn/common/WindowsUtils.h"
+#include "dawn/native/CacheKey.h"
#include "dawn/native/Pipeline.h"
#include "dawn/native/TintUtils.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
@@ -29,818 +42,769 @@
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-#include <d3dcompiler.h>
-
-#include <tint/tint.h>
-#include <map>
-#include <sstream>
-#include <unordered_map>
+#include "tint/tint.h"
namespace dawn::native::d3d12 {
- namespace {
- ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
- ComPtr<IDxcVersionInfo> versionInfo;
- DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
- "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
-
- uint32_t compilerMajor, compilerMinor;
- DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
- "IDxcVersionInfo::GetVersion"));
-
- // Pack both into a single version number.
- return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
- }
-
- uint64_t GetD3DCompilerVersion() {
- return D3D_COMPILER_VERSION;
+namespace {
+uint64_t GetD3DCompilerVersion() {
+ return D3D_COMPILER_VERSION;
+}
+
+struct CompareBindingPoint {
+ constexpr bool operator()(const tint::transform::BindingPoint& lhs,
+ const tint::transform::BindingPoint& rhs) const {
+ if (lhs.group != rhs.group) {
+ return lhs.group < rhs.group;
+ } else {
+ return lhs.binding < rhs.binding;
}
+ }
+};
+
+void Serialize(std::stringstream& output, const tint::ast::Access& access) {
+ output << access;
+}
+
+void Serialize(std::stringstream& output, const tint::transform::BindingPoint& binding_point) {
+ output << "(BindingPoint";
+ output << " group=" << binding_point.group;
+ output << " binding=" << binding_point.binding;
+ output << ")";
+}
+
+template <typename T, typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
+void Serialize(std::stringstream& output, const T& val) {
+ output << val;
+}
+
+template <typename T>
+void Serialize(std::stringstream& output,
+ const std::unordered_map<tint::transform::BindingPoint, T>& map) {
+ output << "(map";
+
+ std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(), map.end());
+ for (auto& [bindingPoint, value] : sorted) {
+ output << " ";
+ Serialize(output, bindingPoint);
+ output << "=";
+ Serialize(output, value);
+ }
+ output << ")";
+}
+
+void Serialize(std::stringstream& output,
+ const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
+ output << "(ArrayLengthFromUniformOptions";
+ output << " ubo_binding=";
+ Serialize(output, arrayLengthFromUniform.ubo_binding);
+ output << " bindpoint_to_size_index=";
+ Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
+ output << ")";
+}
+
+// 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
+std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
+ std::ostringstream out;
+ out.precision(n);
+ out << std::fixed << v;
+ return out.str();
+}
+
+std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
+ const OverridableConstantScalar* entry,
+ double value = 0) {
+ switch (dawnType) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ return FloatToStringWithPrecision(entry ? entry->f32 : static_cast<float>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
+ default:
+ UNREACHABLE();
+ }
+}
- struct CompareBindingPoint {
- constexpr bool operator()(const tint::transform::BindingPoint& lhs,
- const tint::transform::BindingPoint& rhs) const {
- if (lhs.group != rhs.group) {
- return lhs.group < rhs.group;
- } else {
- return lhs.binding < rhs.binding;
- }
- }
- };
+constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
- void Serialize(std::stringstream& output, const tint::ast::Access& access) {
- output << access;
- }
+void GetOverridableConstantsDefines(
+ std::vector<std::pair<std::string, std::string>>* defineStrings,
+ const PipelineConstantEntries* pipelineConstantEntries,
+ const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
+ std::unordered_set<std::string> overriddenConstants;
- void Serialize(std::stringstream& output,
- const tint::transform::BindingPoint& binding_point) {
- output << "(BindingPoint";
- output << " group=" << binding_point.group;
- output << " binding=" << binding_point.binding;
- output << ")";
- }
+ // Set pipeline overridden values
+ for (const auto& [name, value] : *pipelineConstantEntries) {
+ overriddenConstants.insert(name);
- template <typename T,
- typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
- void Serialize(std::stringstream& output, const T& val) {
- output << val;
- }
+ // This is already validated so `name` must exist
+ const auto& moduleConstant = shaderEntryPointConstants->at(name);
- template <typename T>
- void Serialize(std::stringstream& output,
- const std::unordered_map<tint::transform::BindingPoint, T>& map) {
- output << "(map";
-
- std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
- map.end());
- for (auto& [bindingPoint, value] : sorted) {
- output << " ";
- Serialize(output, bindingPoint);
- output << "=";
- Serialize(output, value);
- }
- output << ")";
- }
+ defineStrings->emplace_back(
+ kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+ GetHLSLValueString(moduleConstant.type, nullptr, value));
+ }
- void Serialize(std::stringstream& output,
- const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
- output << "(ArrayLengthFromUniformOptions";
- output << " ubo_binding=";
- Serialize(output, arrayLengthFromUniform.ubo_binding);
- output << " bindpoint_to_size_index=";
- Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
- output << ")";
+ // Set shader initialized default values
+ for (const auto& iter : *shaderEntryPointConstants) {
+ const std::string& name = iter.first;
+ if (overriddenConstants.count(name) != 0) {
+ // This constant already has overridden value
+ continue;
}
- // 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
- std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
- std::ostringstream out;
- out.precision(n);
- out << std::fixed << v;
- return out.str();
- }
+ const auto& moduleConstant = shaderEntryPointConstants->at(name);
- std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
- const OverridableConstantScalar* entry,
- double value = 0) {
- switch (dawnType) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- return FloatToStringWithPrecision(entry ? entry->f32
- : static_cast<float>(value));
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
- default:
- UNREACHABLE();
- }
+ // Uninitialized default values are okay since they ar only defined to pass
+ // compilation but not used
+ defineStrings->emplace_back(
+ kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+ GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
+ }
+}
+
+// The inputs to a shader compilation. These have been intentionally isolated from the
+// device to help ensure that the pipeline cache key contains all inputs for compilation.
+struct ShaderCompilationRequest {
+ enum Compiler { FXC, DXC };
+
+ // Common inputs
+ Compiler compiler;
+ const tint::Program* program;
+ const char* entryPointName;
+ SingleShaderStage stage;
+ uint32_t compileFlags;
+ bool disableSymbolRenaming;
+ tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
+ tint::transform::BindingRemapper::AccessControls remappedAccessControls;
+ bool isRobustnessEnabled;
+ bool usesNumWorkgroups;
+ uint32_t numWorkgroupsRegisterSpace;
+ uint32_t numWorkgroupsShaderRegister;
+ tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+ std::vector<std::pair<std::string, std::string>> defineStrings;
+
+ // FXC/DXC common inputs
+ bool disableWorkgroupInit;
+
+ // FXC inputs
+ uint64_t fxcVersion;
+
+ // DXC inputs
+ uint64_t dxcVersion;
+ const D3D12DeviceInfo* deviceInfo;
+ bool hasShaderFloat16Feature;
+
+ static ResultOrError<ShaderCompilationRequest> Create(
+ const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags,
+ const Device* device,
+ const tint::Program* program,
+ const EntryPointMetadata& entryPoint,
+ const ProgrammableStage& programmableStage) {
+ Compiler compiler;
+ uint64_t dxcVersion = 0;
+ if (device->IsToggleEnabled(Toggle::UseDXC)) {
+ compiler = Compiler::DXC;
+ DAWN_TRY_ASSIGN(dxcVersion,
+ ToBackend(device->GetAdapter())->GetBackend()->GetDXCompilerVersion());
+ } else {
+ compiler = Compiler::FXC;
}
- constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
-
- void GetOverridableConstantsDefines(
- std::vector<std::pair<std::string, std::string>>* defineStrings,
- const PipelineConstantEntries* pipelineConstantEntries,
- const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
- std::unordered_set<std::string> overriddenConstants;
-
- // Set pipeline overridden values
- for (const auto& [name, value] : *pipelineConstantEntries) {
- overriddenConstants.insert(name);
-
- // This is already validated so `name` must exist
- const auto& moduleConstant = shaderEntryPointConstants->at(name);
-
- defineStrings->emplace_back(
- kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
- GetHLSLValueString(moduleConstant.type, nullptr, value));
- }
+ using tint::transform::BindingPoint;
+ using tint::transform::BindingRemapper;
+
+ BindingRemapper::BindingPoints remappedBindingPoints;
+ BindingRemapper::AccessControls remappedAccessControls;
+
+ tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+ arrayLengthFromUniform.ubo_binding = {
+ layout->GetDynamicStorageBufferLengthsRegisterSpace(),
+ layout->GetDynamicStorageBufferLengthsShaderRegister()};
+
+ const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+
+ // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
+ // the Tint AST to make the "bindings" decoration match the offset chosen by
+ // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
+ // assigned to each interface variable.
+ for (const auto& [binding, bindingInfo] : groupBindingInfo) {
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ bgl->GetShaderRegister(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
- // Set shader initialized default values
- for (const auto& iter : *shaderEntryPointConstants) {
- const std::string& name = iter.first;
- if (overriddenConstants.count(name) != 0) {
- // This constant already has overridden value
- continue;
+ // Declaring a read-only storage buffer in HLSL but specifying a storage
+ // buffer in the BGL produces the wrong output. Force read-only storage
+ // buffer bindings to be treated as UAV instead of SRV. Internal storage
+ // buffer is a storage buffer used in the internal pipeline.
+ const bool forceStorageBufferAsUAV =
+ (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+ (bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ wgpu::BufferBindingType::Storage ||
+ bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ kInternalStorageBufferBinding));
+ if (forceStorageBufferAsUAV) {
+ remappedAccessControls.emplace(srcBindingPoint, tint::ast::Access::kReadWrite);
}
+ }
- const auto& moduleConstant = shaderEntryPointConstants->at(name);
+ // Add arrayLengthFromUniform options
+ {
+ for (const auto& bindingAndRegisterOffset :
+ layout->GetDynamicStorageBufferLengthInfo()[group].bindingAndRegisterOffsets) {
+ BindingNumber binding = bindingAndRegisterOffset.binding;
+ uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
+
+ BindingPoint bindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ // Get the renamed binding point if it was remapped.
+ auto it = remappedBindingPoints.find(bindingPoint);
+ if (it != remappedBindingPoints.end()) {
+ bindingPoint = it->second;
+ }
- // Uninitialized default values are okay since they ar only defined to pass
- // compilation but not used
- defineStrings->emplace_back(
- kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
- GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
+ arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
+ registerOffset);
+ }
}
}
- // The inputs to a shader compilation. These have been intentionally isolated from the
- // device to help ensure that the pipeline cache key contains all inputs for compilation.
- struct ShaderCompilationRequest {
- enum Compiler { FXC, DXC };
-
- // Common inputs
- Compiler compiler;
- const tint::Program* program;
- const char* entryPointName;
- SingleShaderStage stage;
- uint32_t compileFlags;
- bool disableSymbolRenaming;
- tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
- tint::transform::BindingRemapper::AccessControls remappedAccessControls;
- bool isRobustnessEnabled;
- bool usesNumWorkgroups;
- uint32_t numWorkgroupsRegisterSpace;
- uint32_t numWorkgroupsShaderRegister;
- tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
- std::vector<std::pair<std::string, std::string>> defineStrings;
-
- // FXC/DXC common inputs
- bool disableWorkgroupInit;
-
- // FXC inputs
- uint64_t fxcVersion;
-
- // DXC inputs
- uint64_t dxcVersion;
- const D3D12DeviceInfo* deviceInfo;
- bool hasShaderFloat16Feature;
-
- static ResultOrError<ShaderCompilationRequest> Create(
- const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t compileFlags,
- const Device* device,
- const tint::Program* program,
- const EntryPointMetadata& entryPoint,
- const ProgrammableStage& programmableStage) {
- Compiler compiler;
- uint64_t dxcVersion = 0;
- if (device->IsToggleEnabled(Toggle::UseDXC)) {
- compiler = Compiler::DXC;
- DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
- } else {
- compiler = Compiler::FXC;
- }
-
- using tint::transform::BindingPoint;
- using tint::transform::BindingRemapper;
-
- BindingRemapper::BindingPoints remappedBindingPoints;
- BindingRemapper::AccessControls remappedAccessControls;
-
- tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
- arrayLengthFromUniform.ubo_binding = {
- layout->GetDynamicStorageBufferLengthsRegisterSpace(),
- layout->GetDynamicStorageBufferLengthsShaderRegister()};
-
- const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
- const auto& groupBindingInfo = moduleBindingInfo[group];
-
- // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
- // the Tint AST to make the "bindings" decoration match the offset chosen by
- // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
- // assigned to each interface variable.
- for (const auto& [binding, bindingInfo] : groupBindingInfo) {
- BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
- BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
- bgl->GetShaderRegister(bindingIndex)};
- if (srcBindingPoint != dstBindingPoint) {
- remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
-
- // Declaring a read-only storage buffer in HLSL but specifying a storage
- // buffer in the BGL produces the wrong output. Force read-only storage
- // buffer bindings to be treated as UAV instead of SRV. Internal storage
- // buffer is a storage buffer used in the internal pipeline.
- const bool forceStorageBufferAsUAV =
- (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
- (bgl->GetBindingInfo(bindingIndex).buffer.type ==
- wgpu::BufferBindingType::Storage ||
- bgl->GetBindingInfo(bindingIndex).buffer.type ==
- kInternalStorageBufferBinding));
- if (forceStorageBufferAsUAV) {
- remappedAccessControls.emplace(srcBindingPoint,
- tint::ast::Access::kReadWrite);
- }
- }
+ ShaderCompilationRequest request;
+ request.compiler = compiler;
+ request.program = program;
+ request.entryPointName = entryPointName;
+ request.stage = stage;
+ request.compileFlags = compileFlags;
+ request.disableSymbolRenaming = device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
+ request.remappedBindingPoints = std::move(remappedBindingPoints);
+ request.remappedAccessControls = std::move(remappedAccessControls);
+ request.isRobustnessEnabled = device->IsRobustnessEnabled();
+ request.disableWorkgroupInit = device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
+ request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
+ request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
+ request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
+ request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
+ request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
+ request.deviceInfo = &device->GetDeviceInfo();
+ request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
+
+ GetOverridableConstantsDefines(
+ &request.defineStrings, &programmableStage.constants,
+ &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
+ .overridableConstants);
+
+ return std::move(request);
+ }
- // Add arrayLengthFromUniform options
- {
- for (const auto& bindingAndRegisterOffset :
- layout->GetDynamicStorageBufferLengthInfo()[group]
- .bindingAndRegisterOffsets) {
- BindingNumber binding = bindingAndRegisterOffset.binding;
- uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
-
- BindingPoint bindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
- // Get the renamed binding point if it was remapped.
- auto it = remappedBindingPoints.find(bindingPoint);
- if (it != remappedBindingPoints.end()) {
- bindingPoint = it->second;
- }
-
- arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
- registerOffset);
- }
- }
- }
+ // TODO(dawn:1341): Move to use CacheKey instead of the vector.
+ ResultOrError<std::vector<uint8_t>> CreateCacheKey() const {
+ // Generate the WGSL from the Tint program so it's normalized.
+ // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
+ // compact representation.
+ auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
+ if (!result.success) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL failure:" << std::endl;
+ errorStream << "Generator: " << result.error << std::endl;
+ return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
+ }
- ShaderCompilationRequest request;
- request.compiler = compiler;
- request.program = program;
- request.entryPointName = entryPointName;
- request.stage = stage;
- request.compileFlags = compileFlags;
- request.disableSymbolRenaming =
- device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
- request.remappedBindingPoints = std::move(remappedBindingPoints);
- request.remappedAccessControls = std::move(remappedAccessControls);
- request.isRobustnessEnabled = device->IsRobustnessEnabled();
- request.disableWorkgroupInit =
- device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
- request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
- request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
- request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
- request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
- request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
- request.deviceInfo = &device->GetDeviceInfo();
- request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
-
- GetOverridableConstantsDefines(
- &request.defineStrings, &programmableStage.constants,
- &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
- .overridableConstants);
-
- return std::move(request);
- }
+ std::stringstream stream;
- ResultOrError<PersistentCacheKey> CreateCacheKey() const {
- // Generate the WGSL from the Tint program so it's normalized.
- // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
- // compact representation.
- auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
- if (!result.success) {
- std::ostringstream errorStream;
- errorStream << "Tint WGSL failure:" << std::endl;
- errorStream << "Generator: " << result.error << std::endl;
- return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
- }
+ // Prefix the key with the type to avoid collisions from another type that could
+ // have the same key.
+ stream << static_cast<uint32_t>(CacheKey::Type::Shader);
+ stream << "\n";
- std::stringstream stream;
+ stream << result.wgsl.length();
+ stream << "\n";
- // Prefix the key with the type to avoid collisions from another type that could
- // have the same key.
- stream << static_cast<uint32_t>(PersistentKeyType::Shader);
- stream << "\n";
+ stream << result.wgsl;
+ stream << "\n";
- stream << result.wgsl.length();
- stream << "\n";
+ stream << "(ShaderCompilationRequest";
+ stream << " compiler=" << compiler;
+ stream << " entryPointName=" << entryPointName;
+ stream << " stage=" << uint32_t(stage);
+ stream << " compileFlags=" << compileFlags;
+ stream << " disableSymbolRenaming=" << disableSymbolRenaming;
- stream << result.wgsl;
- stream << "\n";
+ stream << " remappedBindingPoints=";
+ Serialize(stream, remappedBindingPoints);
- stream << "(ShaderCompilationRequest";
- stream << " compiler=" << compiler;
- stream << " entryPointName=" << entryPointName;
- stream << " stage=" << uint32_t(stage);
- stream << " compileFlags=" << compileFlags;
- stream << " disableSymbolRenaming=" << disableSymbolRenaming;
+ stream << " remappedAccessControls=";
+ Serialize(stream, remappedAccessControls);
- stream << " remappedBindingPoints=";
- Serialize(stream, remappedBindingPoints);
+ stream << " useNumWorkgroups=" << usesNumWorkgroups;
+ stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
+ stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
- stream << " remappedAccessControls=";
- Serialize(stream, remappedAccessControls);
+ stream << " arrayLengthFromUniform=";
+ Serialize(stream, arrayLengthFromUniform);
- stream << " useNumWorkgroups=" << usesNumWorkgroups;
- stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
- stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
+ stream << " shaderModel=" << deviceInfo->shaderModel;
+ stream << " disableWorkgroupInit=" << disableWorkgroupInit;
+ stream << " isRobustnessEnabled=" << isRobustnessEnabled;
+ stream << " fxcVersion=" << fxcVersion;
+ stream << " dxcVersion=" << dxcVersion;
+ stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
- stream << " arrayLengthFromUniform=";
- Serialize(stream, arrayLengthFromUniform);
+ stream << " defines={";
+ for (const auto& [name, value] : defineStrings) {
+ stream << " <" << name << "," << value << ">";
+ }
+ stream << " }";
- stream << " shaderModel=" << deviceInfo->shaderModel;
- stream << " disableWorkgroupInit=" << disableWorkgroupInit;
- stream << " isRobustnessEnabled=" << isRobustnessEnabled;
- stream << " fxcVersion=" << fxcVersion;
- stream << " dxcVersion=" << dxcVersion;
- stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
+ stream << ")";
+ stream << "\n";
- stream << " defines={";
- for (const auto& [name, value] : defineStrings) {
- stream << " <" << name << "," << value << ">";
- }
- stream << " }";
+ return std::vector<uint8_t>(std::istreambuf_iterator<char>{stream},
+ std::istreambuf_iterator<char>{});
+ }
+};
- stream << ")";
- stream << "\n";
+std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
+ std::vector<const wchar_t*> arguments;
+ if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
+ arguments.push_back(L"/Gec");
+ }
+ if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
+ arguments.push_back(L"/Gis");
+ }
+ constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+ if (compileFlags & d3dCompileFlagsBits) {
+ switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+ case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+ arguments.push_back(L"/O0");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+ arguments.push_back(L"/O2");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+ arguments.push_back(L"/O3");
+ break;
+ }
+ }
+ if (compileFlags & D3DCOMPILE_DEBUG) {
+ arguments.push_back(L"/Zi");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
+ arguments.push_back(L"/Zpr");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
+ arguments.push_back(L"/Zpc");
+ }
+ if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfa");
+ }
+ if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfp");
+ }
+ if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
+ arguments.push_back(L"/res_may_alias");
+ }
- return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
- std::istreambuf_iterator<char>{});
- }
- };
+ if (enable16BitTypes) {
+ // enable-16bit-types are only allowed in -HV 2018 (default)
+ arguments.push_back(L"/enable-16bit-types");
+ }
- std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
- std::vector<const wchar_t*> arguments;
- if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
- arguments.push_back(L"/Gec");
- }
- if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
- arguments.push_back(L"/Gis");
- }
- constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
- if (compileFlags & d3dCompileFlagsBits) {
- switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
- case D3DCOMPILE_OPTIMIZATION_LEVEL0:
- arguments.push_back(L"/O0");
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL2:
- arguments.push_back(L"/O2");
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL3:
- arguments.push_back(L"/O3");
- break;
- }
- }
- if (compileFlags & D3DCOMPILE_DEBUG) {
- arguments.push_back(L"/Zi");
- }
- if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
- arguments.push_back(L"/Zpr");
- }
- if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
- arguments.push_back(L"/Zpc");
- }
- if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
- arguments.push_back(L"/Gfa");
- }
- if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
- arguments.push_back(L"/Gfp");
- }
- if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
- arguments.push_back(L"/res_may_alias");
- }
+ arguments.push_back(L"-HV");
+ arguments.push_back(L"2018");
- if (enable16BitTypes) {
- // enable-16bit-types are only allowed in -HV 2018 (default)
- arguments.push_back(L"/enable-16bit-types");
- }
+ return arguments;
+}
- arguments.push_back(L"-HV");
- arguments.push_back(L"2018");
+ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ ComPtr<IDxcBlobEncoding> sourceBlob;
+ DAWN_TRY(CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+ hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+ "DXC create blob"));
- return arguments;
- }
+ std::wstring entryPointW;
+ DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
- ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
- IDxcCompiler* dxcCompiler,
- const ShaderCompilationRequest& request,
- const std::string& hlslSource) {
- ComPtr<IDxcBlobEncoding> sourceBlob;
- DAWN_TRY(
- CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
- hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
- "DXC create blob"));
-
- std::wstring entryPointW;
- DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
-
- std::vector<const wchar_t*> arguments =
- GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
-
- // Build defines for overridable constants
- std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
- defineStrings.reserve(request.defineStrings.size());
- for (const auto& [name, value] : request.defineStrings) {
- defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
- }
+ std::vector<const wchar_t*> arguments =
+ GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
- std::vector<DxcDefine> dxcDefines;
- dxcDefines.reserve(defineStrings.size());
- for (const auto& [name, value] : defineStrings) {
- dxcDefines.push_back({name.c_str(), value.c_str()});
- }
+ // Build defines for overridable constants
+ std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
+ defineStrings.reserve(request.defineStrings.size());
+ for (const auto& [name, value] : request.defineStrings) {
+ defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
+ }
- ComPtr<IDxcOperationResult> result;
- DAWN_TRY(CheckHRESULT(
- dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
- request.deviceInfo->shaderProfiles[request.stage].c_str(),
- arguments.data(), arguments.size(), dxcDefines.data(),
- dxcDefines.size(), nullptr, &result),
- "DXC compile"));
+ std::vector<DxcDefine> dxcDefines;
+ dxcDefines.reserve(defineStrings.size());
+ for (const auto& [name, value] : defineStrings) {
+ dxcDefines.push_back({name.c_str(), value.c_str()});
+ }
- HRESULT hr;
- DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+ ComPtr<IDxcOperationResult> result;
+ DAWN_TRY(
+ CheckHRESULT(dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
+ request.deviceInfo->shaderProfiles[request.stage].c_str(),
+ arguments.data(), arguments.size(), dxcDefines.data(),
+ dxcDefines.size(), nullptr, &result),
+ "DXC compile"));
- if (FAILED(hr)) {
- ComPtr<IDxcBlobEncoding> errors;
- DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+ HRESULT hr;
+ DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
- return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
- static_cast<char*>(errors->GetBufferPointer()));
- }
+ if (FAILED(hr)) {
+ ComPtr<IDxcBlobEncoding> errors;
+ DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
- ComPtr<IDxcBlob> compiledShader;
- DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
- return std::move(compiledShader);
- }
+ return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
+ static_cast<char*>(errors->GetBufferPointer()));
+ }
- std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
- struct Flag {
- uint32_t value;
- const char* name;
- };
- constexpr Flag flags[] = {
- // Populated from d3dcompiler.h
+ ComPtr<IDxcBlob> compiledShader;
+ DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+ return std::move(compiledShader);
+}
+
+std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
+ struct Flag {
+ uint32_t value;
+ const char* name;
+ };
+ constexpr Flag flags[] = {
+ // Populated from d3dcompiler.h
#define F(f) Flag{f, #f}
- F(D3DCOMPILE_DEBUG),
- F(D3DCOMPILE_SKIP_VALIDATION),
- F(D3DCOMPILE_SKIP_OPTIMIZATION),
- F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
- F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
- F(D3DCOMPILE_PARTIAL_PRECISION),
- F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
- F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
- F(D3DCOMPILE_NO_PRESHADER),
- F(D3DCOMPILE_AVOID_FLOW_CONTROL),
- F(D3DCOMPILE_PREFER_FLOW_CONTROL),
- F(D3DCOMPILE_ENABLE_STRICTNESS),
- F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
- F(D3DCOMPILE_IEEE_STRICTNESS),
- F(D3DCOMPILE_RESERVED16),
- F(D3DCOMPILE_RESERVED17),
- F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
- F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
- F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
- F(D3DCOMPILE_ALL_RESOURCES_BOUND),
- F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
- F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
+ F(D3DCOMPILE_DEBUG),
+ F(D3DCOMPILE_SKIP_VALIDATION),
+ F(D3DCOMPILE_SKIP_OPTIMIZATION),
+ F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
+ F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
+ F(D3DCOMPILE_PARTIAL_PRECISION),
+ F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
+ F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
+ F(D3DCOMPILE_NO_PRESHADER),
+ F(D3DCOMPILE_AVOID_FLOW_CONTROL),
+ F(D3DCOMPILE_PREFER_FLOW_CONTROL),
+ F(D3DCOMPILE_ENABLE_STRICTNESS),
+ F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
+ F(D3DCOMPILE_IEEE_STRICTNESS),
+ F(D3DCOMPILE_RESERVED16),
+ F(D3DCOMPILE_RESERVED17),
+ F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
+ F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
+ F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
+ F(D3DCOMPILE_ALL_RESOURCES_BOUND),
+ F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
+ F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
#undef F
- };
-
- std::string result;
- for (const Flag& f : flags) {
- if ((compileFlags & f.value) != 0) {
- result += f.name + std::string("\n");
- }
- }
+ };
- // Optimization level must be handled separately as two bits are used, and the values
- // don't map neatly to 0-3.
- constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
- switch (compileFlags & d3dCompileFlagsBits) {
- case D3DCOMPILE_OPTIMIZATION_LEVEL0:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL1:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL2:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL3:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
- break;
- }
- result += std::string("\n");
-
- return result;
+ std::string result;
+ for (const Flag& f : flags) {
+ if ((compileFlags & f.value) != 0) {
+ result += f.name + std::string("\n");
}
+ }
- ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
- const ShaderCompilationRequest& request,
- const std::string& hlslSource) {
- const char* targetProfile = nullptr;
- switch (request.stage) {
- case SingleShaderStage::Vertex:
- targetProfile = "vs_5_1";
- break;
- case SingleShaderStage::Fragment:
- targetProfile = "ps_5_1";
- break;
- case SingleShaderStage::Compute:
- targetProfile = "cs_5_1";
- break;
- }
-
- ComPtr<ID3DBlob> compiledShader;
- ComPtr<ID3DBlob> errors;
-
- // Build defines for overridable constants
- const D3D_SHADER_MACRO* pDefines = nullptr;
- std::vector<D3D_SHADER_MACRO> fxcDefines;
- if (request.defineStrings.size() > 0) {
- fxcDefines.reserve(request.defineStrings.size() + 1);
- for (const auto& [name, value] : request.defineStrings) {
- fxcDefines.push_back({name.c_str(), value.c_str()});
- }
- // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
- fxcDefines.push_back({nullptr, nullptr});
- pDefines = fxcDefines.data();
- }
+ // Optimization level must be handled separately as two bits are used, and the values
+ // don't map neatly to 0-3.
+ constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+ switch (compileFlags & d3dCompileFlagsBits) {
+ case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL1:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
+ break;
+ }
+ result += std::string("\n");
+
+ return result;
+}
+
+ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ const char* targetProfile = nullptr;
+ switch (request.stage) {
+ case SingleShaderStage::Vertex:
+ targetProfile = "vs_5_1";
+ break;
+ case SingleShaderStage::Fragment:
+ targetProfile = "ps_5_1";
+ break;
+ case SingleShaderStage::Compute:
+ targetProfile = "cs_5_1";
+ break;
+ }
- DAWN_INVALID_IF(FAILED(functions->d3dCompile(
- hlslSource.c_str(), hlslSource.length(), nullptr, pDefines, nullptr,
- request.entryPointName, targetProfile, request.compileFlags, 0,
- &compiledShader, &errors)),
- "D3D compile failed with: %s",
- static_cast<char*>(errors->GetBufferPointer()));
+ ComPtr<ID3DBlob> compiledShader;
+ ComPtr<ID3DBlob> errors;
- return std::move(compiledShader);
+ // Build defines for overridable constants
+ const D3D_SHADER_MACRO* pDefines = nullptr;
+ std::vector<D3D_SHADER_MACRO> fxcDefines;
+ if (request.defineStrings.size() > 0) {
+ fxcDefines.reserve(request.defineStrings.size() + 1);
+ for (const auto& [name, value] : request.defineStrings) {
+ fxcDefines.push_back({name.c_str(), value.c_str()});
}
+ // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
+ fxcDefines.push_back({nullptr, nullptr});
+ pDefines = fxcDefines.data();
+ }
- ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
- const ShaderCompilationRequest& request,
- std::string* remappedEntryPointName) {
- std::ostringstream errorStream;
- errorStream << "Tint HLSL failure:" << std::endl;
+ DAWN_INVALID_IF(
+ FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, pDefines,
+ nullptr, request.entryPointName, targetProfile,
+ request.compileFlags, 0, &compiledShader, &errors)),
+ "D3D compile failed with: %s", static_cast<char*>(errors->GetBufferPointer()));
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
+ return std::move(compiledShader);
+}
- if (request.isRobustnessEnabled) {
- transformManager.Add<tint::transform::Robustness>();
- }
+ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
+ const ShaderCompilationRequest& request,
+ std::string* remappedEntryPointName) {
+ std::ostringstream errorStream;
+ errorStream << "Tint HLSL failure:" << std::endl;
- transformManager.Add<tint::transform::BindingRemapper>();
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
- transformManager.Add<tint::transform::SingleEntryPoint>();
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
+ if (request.isRobustnessEnabled) {
+ transformManager.Add<tint::transform::Robustness>();
+ }
- transformManager.Add<tint::transform::Renamer>();
+ transformManager.Add<tint::transform::BindingRemapper>();
- if (request.disableSymbolRenaming) {
- // We still need to rename HLSL reserved keywords
- transformInputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kHlslKeywords);
- }
+ transformManager.Add<tint::transform::SingleEntryPoint>();
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
- // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
- // the remapping but should not be considered a collision because they have
- // different types.
- const bool mayCollide = true;
- transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
- std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
- mayCollide);
+ transformManager.Add<tint::transform::Renamer>();
- tint::Program transformedProgram;
- tint::transform::DataMap transformOutputs;
- {
- TRACE_EVENT0(platform, General, "RunTransforms");
- DAWN_TRY_ASSIGN(transformedProgram,
- RunTransforms(&transformManager, request.program, transformInputs,
- &transformOutputs, nullptr));
- }
+ if (request.disableSymbolRenaming) {
+ // We still need to rename HLSL reserved keywords
+ transformInputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kHlslKeywords);
+ }
- if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
- auto it = data->remappings.find(request.entryPointName);
- if (it != data->remappings.end()) {
- *remappedEntryPointName = it->second;
- } else {
- DAWN_INVALID_IF(!request.disableSymbolRenaming,
- "Could not find remapped name for entry point.");
+ // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
+ // the remapping but should not be considered a collision because they have
+ // different types.
+ const bool mayCollide = true;
+ transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
+ std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
+ mayCollide);
+
+ tint::Program transformedProgram;
+ tint::transform::DataMap transformOutputs;
+ {
+ TRACE_EVENT0(platform, General, "RunTransforms");
+ DAWN_TRY_ASSIGN(transformedProgram,
+ RunTransforms(&transformManager, request.program, transformInputs,
+ &transformOutputs, nullptr));
+ }
- *remappedEntryPointName = request.entryPointName;
- }
- } else {
- return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
- }
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(request.entryPointName);
+ if (it != data->remappings.end()) {
+ *remappedEntryPointName = it->second;
+ } else {
+ DAWN_INVALID_IF(!request.disableSymbolRenaming,
+ "Could not find remapped name for entry point.");
- tint::writer::hlsl::Options options;
- options.disable_workgroup_init = request.disableWorkgroupInit;
- if (request.usesNumWorkgroups) {
- options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
- options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
- }
- // TODO(dawn:549): HLSL generation outputs the indices into the
- // array_length_from_uniform buffer that were actually used. When the blob cache can
- // store more than compiled shaders, we should reflect these used indices and store
- // them as well. This would allow us to only upload root constants that are actually
- // read by the shader.
- options.array_length_from_uniform = request.arrayLengthFromUniform;
- TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
- auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s",
- result.error);
-
- return std::move(result.hlsl);
+ *remappedEntryPointName = request.entryPointName;
}
+ } else {
+ return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
- template <typename F>
- MaybeError CompileShader(dawn::platform::Platform* platform,
- const PlatformFunctions* functions,
- IDxcLibrary* dxcLibrary,
- IDxcCompiler* dxcCompiler,
- ShaderCompilationRequest&& request,
- bool dumpShaders,
- F&& DumpShadersEmitLog,
- CompiledShader* compiledShader) {
- // Compile the source shader to HLSL.
- std::string hlslSource;
- std::string remappedEntryPoint;
- DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
- if (dumpShaders) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
- DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
- request.entryPointName = remappedEntryPoint.c_str();
- switch (request.compiler) {
- case ShaderCompilationRequest::Compiler::DXC: {
- TRACE_EVENT0(platform, General, "CompileShaderDXC");
- DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
- CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
- break;
- }
- case ShaderCompilationRequest::Compiler::FXC: {
- TRACE_EVENT0(platform, General, "CompileShaderFXC");
- DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
- CompileShaderFXC(functions, request, hlslSource));
- break;
- }
- }
+ tint::writer::hlsl::Options options;
+ options.disable_workgroup_init = request.disableWorkgroupInit;
+ if (request.usesNumWorkgroups) {
+ options.root_constant_binding_point = tint::sem::BindingPoint{
+ request.numWorkgroupsRegisterSpace, request.numWorkgroupsShaderRegister};
+ }
+ // TODO(dawn:549): HLSL generation outputs the indices into the
+ // array_length_from_uniform buffer that were actually used. When the blob cache can
+ // store more than compiled shaders, we should reflect these used indices and store
+ // them as well. This would allow us to only upload root constants that are actually
+ // read by the shader.
+ options.array_length_from_uniform = request.arrayLengthFromUniform;
+ TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
+ auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s", result.error);
+
+ return std::move(result.hlsl);
+}
+
+template <typename F>
+MaybeError CompileShader(dawn::platform::Platform* platform,
+ const PlatformFunctions* functions,
+ IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ ShaderCompilationRequest&& request,
+ bool dumpShaders,
+ F&& DumpShadersEmitLog,
+ CompiledShader* compiledShader) {
+ // Compile the source shader to HLSL.
+ std::string hlslSource;
+ std::string remappedEntryPoint;
+ DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
+ if (dumpShaders) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
+ DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+ request.entryPointName = remappedEntryPoint.c_str();
+ switch (request.compiler) {
+ case ShaderCompilationRequest::Compiler::DXC: {
+ TRACE_EVENT0(platform, General, "CompileShaderDXC");
+ DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
+ CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
+ break;
+ }
+ case ShaderCompilationRequest::Compiler::FXC: {
+ TRACE_EVENT0(platform, General, "CompileShaderFXC");
+ DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
+ CompileShaderFXC(functions, request, hlslSource));
+ break;
+ }
+ }
- if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* FXC compile flags */ " << std::endl
- << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
-
- dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
-
- ComPtr<ID3DBlob> disassembly;
- if (FAILED(functions->d3dDisassemble(
- compiledShader->compiledFXCShader->GetBufferPointer(),
- compiledShader->compiledFXCShader->GetBufferSize(), 0, nullptr,
- &disassembly))) {
- dumpedMsg << "D3D disassemble failed" << std::endl;
- } else {
- dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
- }
- DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
+ if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* FXC compile flags */ " << std::endl
+ << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
+
+ dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
- return {};
+ ComPtr<ID3DBlob> disassembly;
+ if (FAILED(functions->d3dDisassemble(compiledShader->compiledFXCShader->GetBufferPointer(),
+ compiledShader->compiledFXCShader->GetBufferSize(), 0,
+ nullptr, &disassembly))) {
+ dumpedMsg << "D3D disassemble failed" << std::endl;
+ } else {
+ dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
}
+ DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
- } // anonymous namespace
+ return {};
+}
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
+} // anonymous namespace
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
- }
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(
+ Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult, compilationMessages));
+ return module;
+}
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
- return InitializeBase(parseResult);
- }
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {}
- ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t compileFlags) {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
- ASSERT(!IsError());
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult, compilationMessages);
+}
- ScopedTintICEHandler scopedICEHandler(GetDevice());
+ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
+ ASSERT(!IsError());
- Device* device = ToBackend(GetDevice());
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
- CompiledShader compiledShader = {};
+ Device* device = ToBackend(GetDevice());
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
+ CompiledShader compiledShader = {};
- const tint::Program* program = GetTintProgram();
- tint::Program programAsValue;
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
- AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+ const tint::Program* program = GetTintProgram();
+ tint::Program programAsValue;
- if (stage == SingleShaderStage::Vertex) {
- transformManager.Add<tint::transform::FirstIndexOffset>();
- transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
- layout->GetFirstIndexOffsetShaderRegister(),
- layout->GetFirstIndexOffsetRegisterSpace());
- }
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
- tint::transform::DataMap transformOutputs;
- DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
- &transformOutputs, nullptr));
- program = &programAsValue;
-
- if (stage == SingleShaderStage::Vertex) {
- if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
- // TODO(dawn:549): Consider adding this information to the pipeline cache once we
- // can store more than the shader blob in it.
- compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
- if (compiledShader.firstOffsetInfo.usesVertexIndex) {
- compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
- }
- compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
- if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
- compiledShader.firstOffsetInfo.instanceIndexOffset =
- data->first_instance_offset;
- }
- }
- }
+ if (stage == SingleShaderStage::Vertex) {
+ transformManager.Add<tint::transform::FirstIndexOffset>();
+ transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
+ layout->GetFirstIndexOffsetShaderRegister(),
+ layout->GetFirstIndexOffsetRegisterSpace());
+ }
- ShaderCompilationRequest request;
- DAWN_TRY_ASSIGN(
- request, ShaderCompilationRequest::Create(
- programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
- program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
-
- PersistentCacheKey shaderCacheKey;
- DAWN_TRY_ASSIGN(shaderCacheKey, request.CreateCacheKey());
-
- DAWN_TRY_ASSIGN(
- compiledShader.cachedShader,
- device->GetPersistentCache()->GetOrCreate(
- shaderCacheKey, [&](auto doCache) -> MaybeError {
- DAWN_TRY(CompileShader(
- device->GetPlatform(), device->GetFunctions(),
- device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get()
- : nullptr,
- device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get()
- : nullptr,
- std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
- [&](WGPULoggingType loggingType, const char* message) {
- GetDevice()->EmitLog(loggingType, message);
- },
- &compiledShader));
- const D3D12_SHADER_BYTECODE shader = compiledShader.GetD3D12ShaderBytecode();
- doCache(shader.pShaderBytecode, shader.BytecodeLength);
- return {};
- }));
-
- return std::move(compiledShader);
- }
-
- D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
- if (cachedShader.buffer != nullptr) {
- return {cachedShader.buffer.get(), cachedShader.bufferSize};
- } else if (compiledFXCShader != nullptr) {
- return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
- } else if (compiledDXCShader != nullptr) {
- return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
+ &transformOutputs, nullptr));
+ program = &programAsValue;
+
+ if (stage == SingleShaderStage::Vertex) {
+ if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
+ // TODO(dawn:549): Consider adding this information to the pipeline cache once we
+ // can store more than the shader blob in it.
+ compiledShader.usesVertexOrInstanceIndex = data->has_vertex_or_instance_index;
}
- UNREACHABLE();
- return {};
}
+
+ ShaderCompilationRequest request;
+ DAWN_TRY_ASSIGN(request,
+ ShaderCompilationRequest::Create(
+ programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
+ program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
+
+ // TODO(dawn:1341): Add shader cache key generation and caching for the compiled shader.
+ DAWN_TRY(CompileShader(
+ device->GetPlatform(), device->GetFunctions(),
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get() : nullptr,
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get() : nullptr,
+ std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
+ [&](WGPULoggingType loggingType, const char* message) {
+ GetDevice()->EmitLog(loggingType, message);
+ },
+ &compiledShader));
+ return std::move(compiledShader);
+}
+
+D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
+ if (compiledFXCShader != nullptr) {
+ return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
+ } else if (compiledDXCShader != nullptr) {
+ return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+ }
+ UNREACHABLE();
+ return {};
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h
index 71d923ad5ed..7f68b10221b 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h
@@ -15,54 +15,47 @@
#ifndef SRC_DAWN_NATIVE_D3D12_SHADERMODULED3D12_H_
#define SRC_DAWN_NATIVE_D3D12_SHADERMODULED3D12_H_
-#include "dawn/native/PersistentCache.h"
#include "dawn/native/ShaderModule.h"
#include "dawn/native/d3d12/d3d12_platform.h"
namespace dawn::native {
- struct ProgrammableStage;
+struct ProgrammableStage;
} // namespace dawn::native
namespace dawn::native::d3d12 {
- class Device;
- class PipelineLayout;
-
- struct FirstOffsetInfo {
- bool usesVertexIndex;
- uint32_t vertexIndexOffset;
- bool usesInstanceIndex;
- uint32_t instanceIndexOffset;
- };
-
- // Manages a ref to one of the various representations of shader blobs and information used to
- // emulate vertex/instance index starts
- struct CompiledShader {
- ScopedCachedBlob cachedShader;
- ComPtr<ID3DBlob> compiledFXCShader;
- ComPtr<IDxcBlob> compiledDXCShader;
- D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
-
- FirstOffsetInfo firstOffsetInfo;
- };
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t compileFlags);
-
- private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
+class Device;
+class PipelineLayout;
+
+// Manages a ref to one of the various representations of shader blobs and information used to
+// emulate vertex/instance index starts
+struct CompiledShader {
+ ComPtr<ID3DBlob> compiledFXCShader;
+ ComPtr<IDxcBlob> compiledDXCShader;
+ D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
+
+ bool usesVertexOrInstanceIndex;
+};
+
+class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+
+ ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags);
+
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override = default;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
index 32d6cd6cb9c..fe99a63ac9d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -13,6 +13,11 @@
// limitations under the License.
#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
@@ -20,235 +25,231 @@
namespace dawn::native::d3d12 {
- // Limits the min/max heap size to always be some known value for testing.
- // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
- // We change the value from {1024, 512} to {32, 16} because we use blending
- // for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
- // and low precision at big integer.
- static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
-
- uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- bool useSmallSize) {
- if (useSmallSize) {
- return kShaderVisibleSmallHeapSizes[heapType];
- }
-
- // Minimum heap size must be large enough to satisfy the largest descriptor allocation
- // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
- // memory should only a tiny fraction ever be used.
- // TODO(dawn:155): Figure out these values.
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- return 4096;
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return 256;
- default:
- UNREACHABLE();
- }
- }
-
- uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- bool useSmallSize) {
- if (useSmallSize) {
- return kShaderVisibleSmallHeapSizes[heapType];
- }
-
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
- default:
- UNREACHABLE();
- }
- }
-
- D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
- default:
- UNREACHABLE();
- }
- }
-
- // static
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
- ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
- std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
- std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
- DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
- return std::move(allocator);
- }
-
- ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
- Device* device,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType)
- : mHeapType(heapType),
- mDevice(device),
- mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
- mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
- heapType,
- mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
- ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
- heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
- }
-
- bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
- uint32_t descriptorCount,
- ExecutionSerial pendingSerial,
- D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
- GPUDescriptorHeapAllocation* allocation) {
- ASSERT(mHeap != nullptr);
- const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
- if (startOffset == RingBufferAllocator::kInvalidOffset) {
- return false;
+// Limits the min/max heap size to always be some known value for testing.
+// Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
+// We change the value from {1024, 512} to {32, 16} because we use blending
+// for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
+// and low precision at big integer.
+static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
+
+uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ // Minimum heap size must be large enough to satisfy the largest descriptor allocation
+ // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
+ // memory should only a tiny fraction ever be used.
+ // TODO(dawn:155): Figure out these values.
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return 4096;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return 256;
+ default:
+ UNREACHABLE();
+ }
+}
+
+uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
+ default:
+ UNREACHABLE();
+ }
+}
+
+D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// static
+ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
+ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
+ std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
+ DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
+ return std::move(allocator);
+}
+
+ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
+ Device* device,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+ : mHeapType(heapType),
+ mDevice(device),
+ mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+ mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
+ heapType,
+ mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+}
+
+bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
+ uint32_t descriptorCount,
+ ExecutionSerial pendingSerial,
+ D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+ GPUDescriptorHeapAllocation* allocation) {
+ ASSERT(mHeap != nullptr);
+ const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
+ return false;
+ }
+
+ ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
+
+ const uint64_t heapOffset = mSizeIncrement * startOffset;
+
+ // Check for 32-bit overflow since CPU heap start handle uses size_t.
+ const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
+
+ ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
+
+ *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
+
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
+ descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
+
+ // Record both the device and heap serials to determine later if the allocations are
+ // still valid.
+ *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
+
+ return true;
+}
+
+ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
+ return mHeap->GetD3D12DescriptorHeap();
+}
+
+void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+ mAllocator.Deallocate(completedSerial);
+}
+
+ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
+ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
+ // The size in bytes of a descriptor heap is best calculated by the increment size
+ // multiplied by the number of descriptors. In practice, this is only an estimate and
+ // the actual size may vary depending on the driver.
+ const uint64_t kSize = mSizeIncrement * descriptorCount;
+
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = mHeapType;
+ heapDescriptor.NumDescriptors = descriptorCount;
+ heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
+ heapDescriptor.NodeMask = 0;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+ &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
+ std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
+
+ // We must track the allocation in the LRU when it is created, otherwise the residency
+ // manager will see the allocation as non-resident in the later call to LockAllocation.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
+
+ return std::move(descriptorHeap);
+}
+
+// Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
+MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
+ // Dynamically allocate using a two-phase allocation strategy.
+ // The first phase increasingly grows a small heap in binary sizes for light users while the
+ // second phase pool-allocates largest sized heaps for heavy users.
+ if (mHeap != nullptr) {
+ mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
+
+ const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
+ mHeapType, mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+ if (mDescriptorCount < maxDescriptorCount) {
+ // Phase #1. Grow the heaps in powers-of-two.
+ mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
+ mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
+ } else {
+ // Phase #2. Pool-allocate heaps.
+ // Return the switched out heap to the pool and retrieve the oldest heap that is no
+ // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
+ // heaps for heavy users.
+ // TODO(dawn:256): Consider periodically triming to avoid OOM.
+ mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
+ if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+ descriptorHeap = std::move(mPool.front().heap);
+ mPool.pop_front();
+ }
}
-
- ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
-
- const uint64_t heapOffset = mSizeIncrement * startOffset;
-
- // Check for 32-bit overflow since CPU heap start handle uses size_t.
- const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
-
- ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
-
- *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
-
- const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
- descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
-
- // Record both the device and heap serials to determine later if the allocations are
- // still valid.
- *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
-
- return true;
}
- ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
- return mHeap->GetD3D12DescriptorHeap();
+ if (descriptorHeap == nullptr) {
+ DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
}
- void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
- mAllocator.Deallocate(completedSerial);
- }
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
- ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
- // The size in bytes of a descriptor heap is best calculated by the increment size
- // multiplied by the number of descriptors. In practice, this is only an estimate and
- // the actual size may vary depending on the driver.
- const uint64_t kSize = mSizeIncrement * descriptorCount;
-
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
-
- ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
- D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
- heapDescriptor.Type = mHeapType;
- heapDescriptor.NumDescriptors = descriptorCount;
- heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
- heapDescriptor.NodeMask = 0;
- DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
- &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
- "ID3D12Device::CreateDescriptorHeap"));
-
- std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
- std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
-
- // We must track the allocation in the LRU when it is created, otherwise the residency
- // manager will see the allocation as non-resident in the later call to LockAllocation.
- mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
-
- return std::move(descriptorHeap);
- }
+ // Create a FIFO buffer from the recently created heap.
+ mHeap = std::move(descriptorHeap);
+ mAllocator = RingBufferAllocator(mDescriptorCount);
- // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
- MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
- std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
- // Dynamically allocate using a two-phase allocation strategy.
- // The first phase increasingly grows a small heap in binary sizes for light users while the
- // second phase pool-allocates largest sized heaps for heavy users.
- if (mHeap != nullptr) {
- mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
-
- const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
- mHeapType,
- mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
- if (mDescriptorCount < maxDescriptorCount) {
- // Phase #1. Grow the heaps in powers-of-two.
- mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
- mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
- } else {
- // Phase #2. Pool-allocate heaps.
- // Return the switched out heap to the pool and retrieve the oldest heap that is no
- // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
- // heaps for heavy users.
- // TODO(dawn:256): Consider periodically triming to avoid OOM.
- mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
- if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
- descriptorHeap = std::move(mPool.front().heap);
- mPool.pop_front();
- }
- }
- }
+ // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
+ // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
+ // heap serial.
+ mHeapSerial++;
- if (descriptorHeap == nullptr) {
- DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
- }
+ return {};
+}
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
+HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
+ return mHeapSerial;
+}
- // Create a FIFO buffer from the recently created heap.
- mHeap = std::move(descriptorHeap);
- mAllocator = RingBufferAllocator(mDescriptorCount);
+uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
+ return mAllocator.GetSize();
+}
- // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
- // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
- // heap serial.
- mHeapSerial++;
+uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
+ return mPool.size();
+}
- return {};
- }
+bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
+ return mHeap->IsResidencyLocked();
+}
- HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
- return mHeapSerial;
- }
+bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
+ ASSERT(!mPool.empty());
+ return mPool.back().heap->IsInResidencyLRUCache();
+}
- uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
- return mAllocator.GetSize();
- }
-
- uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
- return mPool.size();
- }
+bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
+ const GPUDescriptorHeapAllocation& allocation) const {
+ // Consider valid if allocated for the pending submit and the shader visible heaps
+ // have not switched over.
+ return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
+ allocation.GetHeapSerial() == mHeapSerial);
+}
- bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
- return mHeap->IsResidencyLocked();
- }
-
- bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
- ASSERT(!mPool.empty());
- return mPool.back().heap->IsInResidencyLRUCache();
- }
+ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+ uint64_t size)
+ : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
+ mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {}
- bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
- const GPUDescriptorHeapAllocation& allocation) const {
- // Consider valid if allocated for the pending submit and the shader visible heaps
- // have not switched over.
- return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
- allocation.GetHeapSerial() == mHeapSerial);
- }
-
- ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
- ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
- uint64_t size)
- : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
- mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
- }
-
- ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
- return mD3d12DescriptorHeap.Get();
- }
+ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
+ return mD3d12DescriptorHeap.Get();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
index 38a182abd77..cf09f9d504d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -15,14 +15,15 @@
#ifndef SRC_DAWN_NATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATORD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATORD3D12_H_
+#include <list>
+#include <memory>
+
#include "dawn/native/Error.h"
#include "dawn/native/RingBufferAllocator.h"
#include "dawn/native/d3d12/IntegerTypes.h"
#include "dawn/native/d3d12/PageableD3D12.h"
#include "dawn/native/d3d12/d3d12_platform.h"
-#include <list>
-
// |ShaderVisibleDescriptorAllocator| allocates a variable-sized block of descriptors from a GPU
// descriptor heap pool.
// Internally, it manages a list of heaps using a ringbuffer block allocator. The heap is in one
@@ -31,75 +32,74 @@
// is returned to the pool.
namespace dawn::native::d3d12 {
- class Device;
- class GPUDescriptorHeapAllocation;
+class Device;
+class GPUDescriptorHeapAllocation;
- class ShaderVisibleDescriptorHeap : public Pageable {
- public:
- ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
- uint64_t size);
- ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
+class ShaderVisibleDescriptorHeap : public Pageable {
+ public:
+ ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap, uint64_t size);
+ ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
- private:
- ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
- };
+ private:
+ ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+};
- class ShaderVisibleDescriptorAllocator {
- public:
- static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
- Device* device,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+class ShaderVisibleDescriptorAllocator {
+ public:
+ static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
+ Device* device,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType);
- ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+ ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
- // Returns true if the allocation was successful, when false is returned the current heap is
- // full and AllocateAndSwitchShaderVisibleHeap() must be called.
- bool AllocateGPUDescriptors(uint32_t descriptorCount,
- ExecutionSerial pendingSerial,
- D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
- GPUDescriptorHeapAllocation* allocation);
+ // Returns true if the allocation was successful, when false is returned the current heap is
+ // full and AllocateAndSwitchShaderVisibleHeap() must be called.
+ bool AllocateGPUDescriptors(uint32_t descriptorCount,
+ ExecutionSerial pendingSerial,
+ D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+ GPUDescriptorHeapAllocation* allocation);
- void Tick(ExecutionSerial completedSerial);
+ void Tick(ExecutionSerial completedSerial);
- ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
- MaybeError AllocateAndSwitchShaderVisibleHeap();
+ ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
+ MaybeError AllocateAndSwitchShaderVisibleHeap();
- // For testing purposes only.
- HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
- uint64_t GetShaderVisibleHeapSizeForTesting() const;
- uint64_t GetShaderVisiblePoolSizeForTesting() const;
- bool IsShaderVisibleHeapLockedResidentForTesting() const;
- bool IsLastShaderVisibleHeapInLRUForTesting() const;
+ // For testing purposes only.
+ HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
+ uint64_t GetShaderVisibleHeapSizeForTesting() const;
+ uint64_t GetShaderVisiblePoolSizeForTesting() const;
+ bool IsShaderVisibleHeapLockedResidentForTesting() const;
+ bool IsLastShaderVisibleHeapInLRUForTesting() const;
- bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
+ bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
- private:
- struct SerialDescriptorHeap {
- ExecutionSerial heapSerial;
- std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
- };
+ private:
+ struct SerialDescriptorHeap {
+ ExecutionSerial heapSerial;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
+ };
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
- uint32_t descriptorCount) const;
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
+ uint32_t descriptorCount) const;
- std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
- RingBufferAllocator mAllocator;
- std::list<SerialDescriptorHeap> mPool;
- D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
+ RingBufferAllocator mAllocator;
+ std::list<SerialDescriptorHeap> mPool;
+ D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
- Device* mDevice;
+ Device* mDevice;
- // The serial value of 0 means the shader-visible heaps have not been allocated.
- // This value is never returned in the GPUDescriptorHeapAllocation after
- // AllocateGPUDescriptors() is called.
- HeapVersionID mHeapSerial = HeapVersionID(0);
+ // The serial value of 0 means the shader-visible heaps have not been allocated.
+ // This value is never returned in the GPUDescriptorHeapAllocation after
+ // AllocateGPUDescriptors() is called.
+ HeapVersionID mHeapSerial = HeapVersionID(0);
- uint32_t mSizeIncrement;
+ uint32_t mSizeIncrement;
- // The descriptor count is the current size of the heap in number of descriptors.
- // This is stored on the allocator to avoid extra conversions.
- uint32_t mDescriptorCount = 0;
- };
+ // The descriptor count is the current size of the heap in number of descriptors.
+ // This is stored on the allocator to avoid extra conversions.
+ uint32_t mDescriptorCount = 0;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATORD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp
index e608a14e45d..edaa2cff4bd 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp
@@ -21,57 +21,55 @@
namespace dawn::native::d3d12 {
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {}
+
+MaybeError StagingBuffer::Initialize() {
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+ resourceDescriptor.Alignment = 0;
+ resourceDescriptor.Width = GetSize();
+ resourceDescriptor.Height = 1;
+ resourceDescriptor.DepthOrArraySize = 1;
+ resourceDescriptor.MipLevels = 1;
+ resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+ resourceDescriptor.SampleDesc.Count = 1;
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+ resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
- MaybeError StagingBuffer::Initialize() {
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
- resourceDescriptor.Alignment = 0;
- resourceDescriptor.Width = GetSize();
- resourceDescriptor.Height = 1;
- resourceDescriptor.DepthOrArraySize = 1;
- resourceDescriptor.MipLevels = 1;
- resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
- resourceDescriptor.SampleDesc.Count = 1;
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
- resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
+ DAWN_TRY_ASSIGN(mUploadHeap, mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
+ D3D12_RESOURCE_STATE_GENERIC_READ));
- DAWN_TRY_ASSIGN(mUploadHeap,
- mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
- D3D12_RESOURCE_STATE_GENERIC_READ));
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ DAWN_TRY(
+ mDevice->GetResidencyManager()->LockAllocation(ToBackend(mUploadHeap.GetResourceHeap())));
- // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
- // evicted. This buffer should already have been made resident when it was created.
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
- ToBackend(mUploadHeap.GetResourceHeap())));
+ SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
- SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
+ return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
+}
- return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
+StagingBuffer::~StagingBuffer() {
+ // Always check if the allocation is valid before Unmap.
+ // The resource would not exist had it failed to allocate.
+ if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
}
- StagingBuffer::~StagingBuffer() {
- // Always check if the allocation is valid before Unmap.
- // The resource would not exist had it failed to allocate.
- if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return;
- }
+ // The underlying heap was locked in residency upon creation. We must unlock it when this
+ // buffer becomes unmapped.
+ mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
- // The underlying heap was locked in residency upon creation. We must unlock it when this
- // buffer becomes unmapped.
- mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
+ // Invalidate the CPU virtual address & flush cache (if needed).
+ GetResource()->Unmap(0, nullptr);
+ mMappedPointer = nullptr;
- // Invalidate the CPU virtual address & flush cache (if needed).
- GetResource()->Unmap(0, nullptr);
- mMappedPointer = nullptr;
+ mDevice->DeallocateMemory(mUploadHeap);
+}
- mDevice->DeallocateMemory(mUploadHeap);
- }
-
- ID3D12Resource* StagingBuffer::GetResource() const {
- return mUploadHeap.GetD3D12Resource();
- }
+ID3D12Resource* StagingBuffer::GetResource() const {
+ return mUploadHeap.GetD3D12Resource();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h
index 6e67a1aefe0..dcbe7dfedf3 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h
@@ -21,21 +21,21 @@
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
+class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
- ID3D12Resource* GetResource() const;
+ ID3D12Resource* GetResource() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- private:
- Device* mDevice;
- ResourceHeapAllocation mUploadHeap;
- };
+ private:
+ Device* mDevice;
+ ResourceHeapAllocation mUploadHeap;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_STAGINGBUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
index b64da3007a1..f30016a2c3d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
@@ -12,141 +12,142 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/common/Math.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include <utility>
+
+#include "dawn/common/Math.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
-#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
namespace dawn::native::d3d12 {
- StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
- uint32_t descriptorCount,
- uint32_t heapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType)
- : mDevice(device),
- mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
- mBlockSize(descriptorCount * mSizeIncrement),
- mHeapSize(RoundUp(heapSize, descriptorCount)),
- mHeapType(heapType) {
- ASSERT(descriptorCount <= heapSize);
+StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
+ uint32_t descriptorCount,
+ uint32_t heapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+ : mDevice(device),
+ mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+ mBlockSize(descriptorCount * mSizeIncrement),
+ mHeapSize(RoundUp(heapSize, descriptorCount)),
+ mHeapType(heapType) {
+ ASSERT(descriptorCount <= heapSize);
+}
+
+StagingDescriptorAllocator::~StagingDescriptorAllocator() {
+ const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+ for (auto& buffer : mPool) {
+ ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
}
+ ASSERT(mAvailableHeaps.size() == mPool.size());
+}
- StagingDescriptorAllocator::~StagingDescriptorAllocator() {
- const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
- for (auto& buffer : mPool) {
- ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
- }
- ASSERT(mAvailableHeaps.size() == mPool.size());
+ResultOrError<CPUDescriptorHeapAllocation> StagingDescriptorAllocator::AllocateCPUDescriptors() {
+ if (mAvailableHeaps.empty()) {
+ DAWN_TRY(AllocateCPUHeap());
}
- ResultOrError<CPUDescriptorHeapAllocation>
- StagingDescriptorAllocator::AllocateCPUDescriptors() {
- if (mAvailableHeaps.empty()) {
- DAWN_TRY(AllocateCPUHeap());
- }
-
- ASSERT(!mAvailableHeaps.empty());
-
- const uint32_t heapIndex = mAvailableHeaps.back();
- NonShaderVisibleBuffer& buffer = mPool[heapIndex];
-
- ASSERT(!buffer.freeBlockIndices.empty());
+ ASSERT(!mAvailableHeaps.empty());
- const Index blockIndex = buffer.freeBlockIndices.back();
+ const uint32_t heapIndex = mAvailableHeaps.back();
+ NonShaderVisibleBuffer& buffer = mPool[heapIndex];
- buffer.freeBlockIndices.pop_back();
+ ASSERT(!buffer.freeBlockIndices.empty());
- if (buffer.freeBlockIndices.empty()) {
- mAvailableHeaps.pop_back();
- }
+ const Index blockIndex = buffer.freeBlockIndices.back();
- const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
- buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
+ buffer.freeBlockIndices.pop_back();
- return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+ if (buffer.freeBlockIndices.empty()) {
+ mAvailableHeaps.pop_back();
}
- MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
- D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
- heapDescriptor.Type = mHeapType;
- heapDescriptor.NumDescriptors = mHeapSize;
- heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
- heapDescriptor.NodeMask = 0;
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
+ buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
- ComPtr<ID3D12DescriptorHeap> heap;
- DAWN_TRY(CheckHRESULT(
- mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
- "ID3D12Device::CreateDescriptorHeap"));
+ return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+}
- NonShaderVisibleBuffer newBuffer;
- newBuffer.heap = std::move(heap);
+MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = mHeapType;
+ heapDescriptor.NumDescriptors = mHeapSize;
+ heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+ heapDescriptor.NodeMask = 0;
- const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
- newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
+ ComPtr<ID3D12DescriptorHeap> heap;
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
+ "ID3D12Device::CreateDescriptorHeap"));
- for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
- newBuffer.freeBlockIndices.push_back(blockIndex);
- }
+ NonShaderVisibleBuffer newBuffer;
+ newBuffer.heap = std::move(heap);
- mAvailableHeaps.push_back(mPool.size());
- mPool.emplace_back(std::move(newBuffer));
+ const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+ newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
- return {};
+ for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
+ newBuffer.freeBlockIndices.push_back(blockIndex);
}
- void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
- ASSERT(allocation->IsValid());
+ mAvailableHeaps.push_back(mPool.size());
+ mPool.emplace_back(std::move(newBuffer));
- const uint32_t heapIndex = allocation->GetHeapIndex();
+ return {};
+}
- ASSERT(heapIndex < mPool.size());
+void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
+ ASSERT(allocation->IsValid());
- // Insert the deallocated block back into the free-list. Order does not matter. However,
- // having blocks be non-contigious could slow down future allocations due to poor cache
- // locality.
- // TODO(dawn:155): Consider more optimization.
- std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
- if (freeBlockIndices.empty()) {
- mAvailableHeaps.emplace_back(heapIndex);
- }
+ const uint32_t heapIndex = allocation->GetHeapIndex();
- const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
- mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
+ ASSERT(heapIndex < mPool.size());
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
+ // Insert the deallocated block back into the free-list. Order does not matter. However,
+ // having blocks be non-contigious could slow down future allocations due to poor cache
+ // locality.
+ // TODO(dawn:155): Consider more optimization.
+ std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
+ if (freeBlockIndices.empty()) {
+ mAvailableHeaps.emplace_back(heapIndex);
+ }
- const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
+ const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
+ mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
- freeBlockIndices.emplace_back(blockIndex);
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
- // Invalidate the handle in case the developer accidentally uses it again.
- allocation->Invalidate();
- }
+ const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
- uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
- return mSizeIncrement;
- }
+ freeBlockIndices.emplace_back(blockIndex);
- StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
- return ((mHeapSize * mSizeIncrement) / mBlockSize);
- }
+ // Invalidate the handle in case the developer accidentally uses it again.
+ allocation->Invalidate();
+}
- ResultOrError<CPUDescriptorHeapAllocation>
- StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
- CPUDescriptorHeapAllocation allocation;
- DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
- mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
- return allocation;
- }
+uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
+ return mSizeIncrement;
+}
+
+StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
+ return ((mHeapSize * mSizeIncrement) / mBlockSize);
+}
- void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
- for (CPUDescriptorHeapAllocation& allocation :
- mAllocationsToDelete.IterateUpTo(completedSerial)) {
- Deallocate(&allocation);
- }
+ResultOrError<CPUDescriptorHeapAllocation>
+StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
+ CPUDescriptorHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
+ mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+ return allocation;
+}
- mAllocationsToDelete.ClearUpTo(completedSerial);
+void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+ for (CPUDescriptorHeapAllocation& allocation :
+ mAllocationsToDelete.IterateUpTo(completedSerial)) {
+ Deallocate(&allocation);
}
+ mAllocationsToDelete.ClearUpTo(completedSerial);
+}
+
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
index 5c69589aad6..09408677a16 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
@@ -15,12 +15,13 @@
#ifndef SRC_DAWN_NATIVE_D3D12_STAGINGDESCRIPTORALLOCATORD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_STAGINGDESCRIPTORALLOCATORD3D12_H_
-#include "dawn/native/Error.h"
+#include <vector>
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
-#include <vector>
-
// |StagingDescriptorAllocator| allocates a fixed-size block of descriptors from a CPU
// descriptor heap pool.
// Internally, it manages a list of heaps using a fixed-size block allocator. The fixed-size
@@ -32,53 +33,53 @@
// offset is inserted back into the free-list.
namespace dawn::native::d3d12 {
- class Device;
+class Device;
- class StagingDescriptorAllocator {
- public:
- StagingDescriptorAllocator() = default;
- StagingDescriptorAllocator(Device* device,
- uint32_t descriptorCount,
- uint32_t heapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType);
- ~StagingDescriptorAllocator();
+class StagingDescriptorAllocator {
+ public:
+ StagingDescriptorAllocator() = default;
+ StagingDescriptorAllocator(Device* device,
+ uint32_t descriptorCount,
+ uint32_t heapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+ ~StagingDescriptorAllocator();
- ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
+ ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
- // Will call Deallocate when the serial is passed.
- ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
+ // Will call Deallocate when the serial is passed.
+ ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
- void Deallocate(CPUDescriptorHeapAllocation* allocation);
+ void Deallocate(CPUDescriptorHeapAllocation* allocation);
- uint32_t GetSizeIncrement() const;
+ uint32_t GetSizeIncrement() const;
- void Tick(ExecutionSerial completedSerial);
+ void Tick(ExecutionSerial completedSerial);
- private:
- using Index = uint16_t;
+ private:
+ using Index = uint16_t;
- struct NonShaderVisibleBuffer {
- ComPtr<ID3D12DescriptorHeap> heap;
- std::vector<Index> freeBlockIndices;
- };
+ struct NonShaderVisibleBuffer {
+ ComPtr<ID3D12DescriptorHeap> heap;
+ std::vector<Index> freeBlockIndices;
+ };
- MaybeError AllocateCPUHeap();
+ MaybeError AllocateCPUHeap();
- Index GetFreeBlockIndicesSize() const;
+ Index GetFreeBlockIndicesSize() const;
- std::vector<uint32_t> mAvailableHeaps; // Indices into the pool.
- std::vector<NonShaderVisibleBuffer> mPool;
+ std::vector<uint32_t> mAvailableHeaps; // Indices into the pool.
+ std::vector<NonShaderVisibleBuffer> mPool;
- Device* mDevice;
+ Device* mDevice;
- uint32_t mSizeIncrement; // Size of the descriptor (in bytes).
- uint32_t mBlockSize; // Size of the block of descriptors (in bytes).
- uint32_t mHeapSize; // Size of the heap (in number of descriptors).
+ uint32_t mSizeIncrement; // Size of the descriptor (in bytes).
+ uint32_t mBlockSize; // Size of the block of descriptors (in bytes).
+ uint32_t mHeapSize; // Size of the heap (in number of descriptors).
- D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+ D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
- SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
- };
+ SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
+};
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp
index 0c23a01a257..32116c48b3d 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp
@@ -14,362 +14,361 @@
#include "dawn/native/d3d12/SwapChainD3D12.h"
+#include <windows.ui.xaml.media.dxinterop.h>
+
+#include <utility>
+
+#include "dawn/dawn_wsi.h"
#include "dawn/native/Surface.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
#include "dawn/native/d3d12/TextureD3D12.h"
-#include <dawn/dawn_wsi.h>
-
-#include <windows.ui.xaml.media.dxinterop.h>
-
namespace dawn::native::d3d12 {
- namespace {
-
- uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Immediate:
- case wgpu::PresentMode::Fifo:
- return 2;
- case wgpu::PresentMode::Mailbox:
- return 3;
- }
- }
-
- uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Immediate:
- case wgpu::PresentMode::Mailbox:
- return 0;
- case wgpu::PresentMode::Fifo:
- return 1;
- }
- }
-
- UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
- UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
-
- if (mode == wgpu::PresentMode::Immediate) {
- flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
- }
-
- return flags;
- }
+namespace {
+
+uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Fifo:
+ return 2;
+ case wgpu::PresentMode::Mailbox:
+ return 3;
+ }
+}
+
+uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Mailbox:
+ return 0;
+ case wgpu::PresentMode::Fifo:
+ return 1;
+ }
+}
- DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
- DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
- if (usage & wgpu::TextureUsage::TextureBinding) {
- dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
- }
- return dxgiUsage;
- }
+UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
+ UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
- } // namespace
+ if (mode == wgpu::PresentMode::Immediate) {
+ flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
+ }
- // OldSwapChain
+ return flags;
+}
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
+DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
+ DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ }
+ return dxgiUsage;
+}
+
+} // namespace
+
+// OldSwapChain
+
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+}
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextD3D12 wsiContext = {};
+ wsiContext.device = ToAPI(GetDevice());
+ im.Init(im.userData, &wsiContext);
+
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+}
+
+OldSwapChain::~OldSwapChain() = default;
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ device->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
}
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextD3D12 wsiContext = {};
- wsiContext.device = ToAPI(GetDevice());
- im.Init(im.userData, &wsiContext);
-
- ASSERT(im.textureUsage != WGPUTextureUsage_None);
- mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+ ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
+ Ref<Texture> dawnTexture;
+ if (device->ConsumedError(
+ Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
+ &dawnTexture)) {
+ return nullptr;
}
- OldSwapChain::~OldSwapChain() = default;
+ return dawnTexture.Detach();
+}
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- device->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+ Device* device = ToBackend(GetDevice());
- ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
- Ref<Texture> dawnTexture;
- if (device->ConsumedError(
- Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
- &dawnTexture)) {
- return nullptr;
- }
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
- return dawnTexture.Detach();
- }
+ // Perform the necessary transition for the texture to be presented.
+ ToBackend(view->GetTexture())
+ ->TrackUsageAndTransitionNow(commandContext, mTextureUsage, view->GetSubresourceRange());
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
- Device* device = ToBackend(GetDevice());
+ DAWN_TRY(device->ExecutePendingCommandContext());
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+ return {};
+}
- // Perform the necessary transition for the texture to be presented.
- ToBackend(view->GetTexture())
- ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
- view->GetSubresourceRange());
+// SwapChain
- DAWN_TRY(device->ExecutePendingCommandContext());
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+}
- return {};
- }
+SwapChain::~SwapChain() = default;
- // SwapChain
+void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+}
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
+// Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
+// nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
+// surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
- SwapChain::~SwapChain() = default;
+ // Precompute the configuration parameters we want for the DXGI swapchain.
+ mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
+ mConfig.format = D3D12TextureFormat(GetFormat());
+ mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
+ mConfig.usage = ToDXGIUsage(GetUsage());
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
+ // There is no previous swapchain so we can create one directly and don't have anything else
+ // to do.
+ if (previousSwapChain == nullptr) {
+ return InitializeSwapChainFromScratch();
}
- // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
- // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
- // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
-
- // Precompute the configuration parameters we want for the DXGI swapchain.
- mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
- mConfig.format = D3D12TextureFormat(GetFormat());
- mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
- mConfig.usage = ToDXGIUsage(GetUsage());
-
- // There is no previous swapchain so we can create one directly and don't have anything else
- // to do.
- if (previousSwapChain == nullptr) {
- return InitializeSwapChainFromScratch();
- }
-
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
- "D3D12 SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
-
- // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
- SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
-
- // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
- // require just losing the reference to the swapchain, but might also need to wait for
- // all previous operations to complete.
- DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
- "D3D12 SwapChain cannot switch between D3D Devices");
-
- // The previous swapchain is on the same device so we want to reuse it but it is still not
- // always possible. Because DXGI requires that a new swapchain be created if the
- // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
- bool canReuseSwapChain =
- ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
- DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
-
- // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
- // to be forgotten (otherwise DXGI complains that there are outstanding references).
- if (!canReuseSwapChain) {
- DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
- return InitializeSwapChainFromScratch();
- }
-
- // After all this we know we can reuse the swapchain, see if it is possible to also reuse
- // the buffers.
- mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
-
- bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
- GetHeight() == previousSwapChain->GetHeight() &&
- GetFormat() == previousSwapChain->GetFormat() &&
- GetPresentMode() == previousSwapChain->GetPresentMode();
- if (canReuseBuffers) {
- mBuffers = std::move(previousD3D12SwapChain->mBuffers);
- mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
- mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
- return {};
- }
-
- // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
- // that all references to buffers are lost before it is called. Contrary to D3D11, the
- // application is responsible for keeping references to the buffers until the GPU is done
- // using them so we have no choice but to synchrounously wait for all operations to complete
- // on the previous swapchain and then lose references to its buffers.
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
+ "D3D12 SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
+
+ // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+ SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+ // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
+ // require just losing the reference to the swapchain, but might also need to wait for
+ // all previous operations to complete.
+ DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
+ "D3D12 SwapChain cannot switch between D3D Devices");
+
+ // The previous swapchain is on the same device so we want to reuse it but it is still not
+ // always possible. Because DXGI requires that a new swapchain be created if the
+ // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
+ bool canReuseSwapChain =
+ ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
+ DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
+
+ // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
+ // to be forgotten (otherwise DXGI complains that there are outstanding references).
+ if (!canReuseSwapChain) {
DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
- DAWN_TRY(
- CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
- mConfig.format, mConfig.swapChainFlags),
- "IDXGISwapChain::ResizeBuffer"));
- return CollectSwapChainBuffers();
+ return InitializeSwapChainFromScratch();
}
- MaybeError SwapChain::InitializeSwapChainFromScratch() {
- ASSERT(mDXGISwapChain == nullptr);
-
- Device* device = ToBackend(GetDevice());
-
- DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
- swapChainDesc.Width = GetWidth();
- swapChainDesc.Height = GetHeight();
- swapChainDesc.Format = mConfig.format;
- swapChainDesc.Stereo = false;
- swapChainDesc.SampleDesc.Count = 1;
- swapChainDesc.SampleDesc.Quality = 0;
- swapChainDesc.BufferUsage = mConfig.usage;
- swapChainDesc.BufferCount = mConfig.bufferCount;
- swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
- swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
- swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
- swapChainDesc.Flags = mConfig.swapChainFlags;
-
- ComPtr<IDXGIFactory2> factory2 = nullptr;
- DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
- "Getting IDXGIFactory2"));
-
- ComPtr<IDXGISwapChain1> swapChain1;
- switch (GetSurface()->GetType()) {
- case Surface::Type::WindowsHWND: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
- static_cast<HWND>(GetSurface()->GetHWND()),
- &swapChainDesc, nullptr, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- break;
- }
- case Surface::Type::WindowsCoreWindow: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
- GetSurface()->GetCoreWindow(),
- &swapChainDesc, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- break;
- }
- case Surface::Type::WindowsSwapChainPanel: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
- &swapChainDesc, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- ComPtr<ISwapChainPanelNative> swapChainPanelNative;
- DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
- IID_PPV_ARGS(&swapChainPanelNative)),
- "Getting ISwapChainPanelNative"));
- DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
- "Setting SwapChain"));
- break;
- }
- default:
- UNREACHABLE();
- }
-
- DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
-
- return CollectSwapChainBuffers();
+ // After all this we know we can reuse the swapchain, see if it is possible to also reuse
+ // the buffers.
+ mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
+
+ bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
+ GetHeight() == previousSwapChain->GetHeight() &&
+ GetFormat() == previousSwapChain->GetFormat() &&
+ GetPresentMode() == previousSwapChain->GetPresentMode();
+ if (canReuseBuffers) {
+ mBuffers = std::move(previousD3D12SwapChain->mBuffers);
+ mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
+ mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
+ return {};
}
- MaybeError SwapChain::CollectSwapChainBuffers() {
- ASSERT(mDXGISwapChain != nullptr);
- ASSERT(mBuffers.empty());
-
- mBuffers.resize(mConfig.bufferCount);
- for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
- DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
- "Getting IDXGISwapChain buffer"));
+ // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
+ // that all references to buffers are lost before it is called. Contrary to D3D11, the
+ // application is responsible for keeping references to the buffers until the GPU is done
+ // using them so we have no choice but to synchrounously wait for all operations to complete
+ // on the previous swapchain and then lose references to its buffers.
+ DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+ DAWN_TRY(
+ CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
+ mConfig.format, mConfig.swapChainFlags),
+ "IDXGISwapChain::ResizeBuffer"));
+ return CollectSwapChainBuffers();
+}
+
+MaybeError SwapChain::InitializeSwapChainFromScratch() {
+ ASSERT(mDXGISwapChain == nullptr);
+
+ Device* device = ToBackend(GetDevice());
+
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.Width = GetWidth();
+ swapChainDesc.Height = GetHeight();
+ swapChainDesc.Format = mConfig.format;
+ swapChainDesc.Stereo = false;
+ swapChainDesc.SampleDesc.Count = 1;
+ swapChainDesc.SampleDesc.Quality = 0;
+ swapChainDesc.BufferUsage = mConfig.usage;
+ swapChainDesc.BufferCount = mConfig.bufferCount;
+ swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+ swapChainDesc.Flags = mConfig.swapChainFlags;
+
+ ComPtr<IDXGIFactory2> factory2 = nullptr;
+ DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
+ "Getting IDXGIFactory2"));
+
+ ComPtr<IDXGISwapChain1> swapChain1;
+ switch (GetSurface()->GetType()) {
+ case Surface::Type::WindowsHWND: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
+ static_cast<HWND>(GetSurface()->GetHWND()),
+ &swapChainDesc, nullptr, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
}
-
- // Pretend all the buffers were last used at the beginning of time.
- mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
- return {};
+ case Surface::Type::WindowsCoreWindow: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
+ GetSurface()->GetCoreWindow(),
+ &swapChainDesc, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
+ }
+ case Surface::Type::WindowsSwapChainPanel: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
+ &swapChainDesc, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ ComPtr<ISwapChainPanelNative> swapChainPanelNative;
+ DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
+ IID_PPV_ARGS(&swapChainPanelNative)),
+ "Getting ISwapChainPanelNative"));
+ DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
+ "Setting SwapChain"));
+ break;
+ }
+ default:
+ UNREACHABLE();
}
- MaybeError SwapChain::PresentImpl() {
- Device* device = ToBackend(GetDevice());
-
- // Transition the texture to the present state as required by IDXGISwapChain1::Present()
- // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
- // presentable texture to present at the end of submits that use them.
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
- mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
- mApiTexture->GetAllSubresources());
- DAWN_TRY(device->ExecutePendingCommandContext());
-
- // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
- // message to the application that it could stop rendering.
- HRESULT presentResult =
- mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
- if (presentResult != DXGI_STATUS_OCCLUDED) {
- DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
- }
+ DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
- // Record that "new" is the last time the buffer has been used.
- DAWN_TRY(device->NextSerial());
- mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+ return CollectSwapChainBuffers();
+}
- mApiTexture->APIDestroy();
- mApiTexture = nullptr;
+MaybeError SwapChain::CollectSwapChainBuffers() {
+ ASSERT(mDXGISwapChain != nullptr);
+ ASSERT(mBuffers.empty());
- return {};
+ mBuffers.resize(mConfig.bufferCount);
+ for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
+ DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
+ "Getting IDXGISwapChain buffer"));
}
- ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
- Device* device = ToBackend(GetDevice());
-
- // Synchronously wait until previous operations on the next swapchain buffer are finished.
- // This is the logic that performs frame pacing.
- // TODO(crbug.com/dawn/269): Consider whether this should be lifted for Mailbox so that
- // there is not frame pacing.
- mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
- DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
-
- // Create the API side objects for this use of the swapchain's buffer.
- TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
- DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
- mBuffers[mCurrentBuffer]));
- return mApiTexture->CreateView();
+ // Pretend all the buffers were last used at the beginning of time.
+ mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+ return {};
+}
+
+MaybeError SwapChain::PresentImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Transition the texture to the present state as required by IDXGISwapChain1::Present()
+ // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+ // presentable texture to present at the end of submits that use them.
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+ mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
+ mApiTexture->GetAllSubresources());
+ DAWN_TRY(device->ExecutePendingCommandContext());
+
+ // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
+ // message to the application that it could stop rendering.
+ HRESULT presentResult = mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
+ if (presentResult != DXGI_STATUS_OCCLUDED) {
+ DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
}
- MaybeError SwapChain::DetachAndWaitForDeallocation() {
- DetachFromSurface();
-
- // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
- // SerialQueue with the current "pending serial" so that we don't destroy the texture
- // before it is finished being used. Flush the commands and wait for that serial to be
- // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->NextSerial());
- DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
- return device->TickImpl();
+ // Record that "new" is the last time the buffer has been used.
+ DAWN_TRY(device->NextSerial());
+ mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
+
+ return {};
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Synchronously wait until previous operations on the next swapchain buffer are finished.
+ // This is the logic that performs frame pacing.
+ // TODO(crbug.com/dawn/269): Consider whether this should be lifted for Mailbox so that
+ // there is not frame pacing.
+ mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
+ DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
+
+ // Create the API side objects for this use of the swapchain's buffer.
+ TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
+ DAWN_TRY_ASSIGN(mApiTexture,
+ Texture::Create(ToBackend(GetDevice()), &descriptor, mBuffers[mCurrentBuffer]));
+ return mApiTexture->CreateView();
+}
+
+MaybeError SwapChain::DetachAndWaitForDeallocation() {
+ DetachFromSurface();
+
+ // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
+ // SerialQueue with the current "pending serial" so that we don't destroy the texture
+ // before it is finished being used. Flush the commands and wait for that serial to be
+ // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(device->NextSerial());
+ DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
+ return device->TickImpl();
+}
+
+void SwapChain::DetachFromSurfaceImpl() {
+ if (mApiTexture != nullptr) {
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
}
- void SwapChain::DetachFromSurfaceImpl() {
- if (mApiTexture != nullptr) {
- mApiTexture->APIDestroy();
- mApiTexture = nullptr;
- }
-
- mDXGISwapChain = nullptr;
- mBuffers.clear();
- }
+ mDXGISwapChain = nullptr;
+ mBuffers.clear();
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h
index 4aff922694d..53ad519c55e 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_SWAPCHAIND3D12_H_
#define SRC_DAWN_NATIVE_D3D12_SWAPCHAIND3D12_H_
+#include <vector>
+
#include "dawn/native/SwapChain.h"
#include "dawn/native/IntegerTypes.h"
@@ -22,68 +24,68 @@
namespace dawn::native::d3d12 {
- class Device;
- class Texture;
+class Device;
+class Texture;
- class OldSwapChain final : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+class OldSwapChain final : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
- wgpu::TextureUsage mTextureUsage;
- };
+ wgpu::TextureUsage mTextureUsage;
+};
+
+class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+
+ private:
+ ~SwapChain() override;
+
+ void DestroyImpl() override;
+
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
-
- private:
- ~SwapChain() override;
-
- void DestroyImpl() override;
-
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- struct Config {
- // Information that's passed to the D3D12 swapchain creation call.
- UINT bufferCount;
- UINT swapChainFlags;
- DXGI_FORMAT format;
- DXGI_USAGE usage;
- };
-
- // NewSwapChainBase implementation
- MaybeError PresentImpl() override;
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
-
- // Does the swapchain initialization steps assuming there is nothing we can reuse.
- MaybeError InitializeSwapChainFromScratch();
- // Does the swapchain initialization step of gathering the buffers.
- MaybeError CollectSwapChainBuffers();
- // Calls DetachFromSurface but also synchronously waits until all references to the
- // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
- MaybeError DetachAndWaitForDeallocation();
-
- Config mConfig;
-
- ComPtr<IDXGISwapChain3> mDXGISwapChain;
- std::vector<ComPtr<ID3D12Resource>> mBuffers;
- std::vector<ExecutionSerial> mBufferLastUsedSerials;
- uint32_t mCurrentBuffer = 0;
-
- Ref<Texture> mApiTexture;
+ struct Config {
+ // Information that's passed to the D3D12 swapchain creation call.
+ UINT bufferCount;
+ UINT swapChainFlags;
+ DXGI_FORMAT format;
+ DXGI_USAGE usage;
};
+ // NewSwapChainBase implementation
+ MaybeError PresentImpl() override;
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+
+ // Does the swapchain initialization steps assuming there is nothing we can reuse.
+ MaybeError InitializeSwapChainFromScratch();
+ // Does the swapchain initialization step of gathering the buffers.
+ MaybeError CollectSwapChainBuffers();
+ // Calls DetachFromSurface but also synchronously waits until all references to the
+ // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
+ MaybeError DetachAndWaitForDeallocation();
+
+ Config mConfig;
+
+ ComPtr<IDXGISwapChain3> mDXGISwapChain;
+ std::vector<ComPtr<ID3D12Resource>> mBuffers;
+ std::vector<ExecutionSerial> mBufferLastUsedSerials;
+ uint32_t mCurrentBuffer = 0;
+
+ Ref<Texture> mApiTexture;
+};
+
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_SWAPCHAIND3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp
index 83e55fd18f0..7dac6625756 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp
@@ -20,520 +20,518 @@
namespace dawn::native::d3d12 {
- namespace {
- Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
- uint32_t offset,
- uint32_t bytesPerRow) {
- ASSERT(bytesPerRow != 0);
- uint32_t byteOffsetX = offset % bytesPerRow;
- uint32_t byteOffsetY = offset - byteOffsetX;
-
- return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
- byteOffsetY / bytesPerRow * blockInfo.height, 0};
- }
-
- uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
- uint32_t bytesPerRow,
- uint64_t alignedOffset,
- Origin3D bufferOffset) {
- ASSERT(bufferOffset.z == 0);
- return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
- bufferOffset.y * bytesPerRow / blockInfo.height;
- }
+namespace {
+Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
+ uint32_t offset,
+ uint32_t bytesPerRow) {
+ ASSERT(bytesPerRow != 0);
+ uint32_t byteOffsetX = offset % bytesPerRow;
+ uint32_t byteOffsetY = offset - byteOffsetX;
+
+ return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
+ byteOffsetY / bytesPerRow * blockInfo.height, 0};
+}
+
+uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
+ uint32_t bytesPerRow,
+ uint64_t alignedOffset,
+ Origin3D bufferOffset) {
+ ASSERT(bufferOffset.z == 0);
+ return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
+ bufferOffset.y * bytesPerRow / blockInfo.height;
+}
+
+uint64_t AlignDownForDataPlacement(uint32_t offset) {
+ return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
+}
+} // namespace
+
+TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
+ ASSERT(this->count < kMaxTextureCopyRegions);
+ return &this->copies[this->count++];
+}
+
+TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow) {
+ TextureCopySubresource copy;
+
+ ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+
+ // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
+ // preceding our data.
+ uint64_t alignedOffset = AlignDownForDataPlacement(offset);
+
+ // If the provided offset to the data was already 512-aligned, we can simply copy the data
+ // without further translation.
+ if (offset == alignedOffset) {
+ copy.count = 1;
- uint64_t AlignDownForDataPlacement(uint32_t offset) {
- return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
- }
- } // namespace
+ copy.copies[0].alignedOffset = alignedOffset;
+ copy.copies[0].textureOffset = origin;
+ copy.copies[0].copySize = copySize;
+ copy.copies[0].bufferOffset = {0, 0, 0};
+ copy.copies[0].bufferSize = copySize;
- TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
- ASSERT(this->count < kMaxTextureCopyRegions);
- return &this->copies[this->count++];
+ return copy;
}
- TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow) {
- TextureCopySubresource copy;
-
- ASSERT(bytesPerRow % blockInfo.byteSize == 0);
-
- // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
- // preceding our data.
- uint64_t alignedOffset = AlignDownForDataPlacement(offset);
-
- // If the provided offset to the data was already 512-aligned, we can simply copy the data
- // without further translation.
- if (offset == alignedOffset) {
- copy.count = 1;
-
- copy.copies[0].alignedOffset = alignedOffset;
- copy.copies[0].textureOffset = origin;
- copy.copies[0].copySize = copySize;
- copy.copies[0].bufferOffset = {0, 0, 0};
- copy.copies[0].bufferSize = copySize;
-
- return copy;
- }
-
- ASSERT(alignedOffset < offset);
- ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
-
- // We must reinterpret our aligned offset into X and Y offsets with respect to the row
- // pitch.
- //
- // You can visualize the data in the buffer like this:
- // |-----------------------++++++++++++++++++++++++++++++++|
- // ^ 512-aligned address ^ Aligned offset ^ End of copy data
- //
- // Now when you consider the row pitch, you can visualize the data like this:
- // |~~~~~~~~~~~~~~~~|
- // |~~~~~+++++++++++|
- // |++++++++++++++++|
- // |+++++~~~~~~~~~~~|
- // |<---row pitch-->|
- //
- // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
- // |YYYYYYYYYYYYYYYY|
- // |XXXXXX++++++++++|
- // |++++++++++++++++|
- // |++++++~~~~~~~~~~|
- // |<---row pitch-->|
- Origin3D texelOffset = ComputeTexelOffsets(
- blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
-
- ASSERT(texelOffset.y <= blockInfo.height);
- ASSERT(texelOffset.z == 0);
-
- uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
- uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
- if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
- // The region's rows fit inside the bytes per row. In this case, extend the width of the
- // PlacedFootprint and copy the buffer with an offset location
- // |<------------- bytes per row ------------->|
- //
- // |-------------------------------------------|
- // | |
- // | +++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++ |
- // |-------------------------------------------|
-
- // Copy 0:
- // |----------------------------------|
- // | |
- // | +++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |----------------------------------|
-
- copy.count = 1;
-
- copy.copies[0].alignedOffset = alignedOffset;
- copy.copies[0].textureOffset = origin;
- copy.copies[0].copySize = copySize;
- copy.copies[0].bufferOffset = texelOffset;
-
- copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
- copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
- copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- return copy;
- }
-
- // The region's rows straddle the bytes per row. Split the copy into two copies
+ ASSERT(alignedOffset < offset);
+ ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
+
+ // We must reinterpret our aligned offset into X and Y offsets with respect to the row
+ // pitch.
+ //
+ // You can visualize the data in the buffer like this:
+ // |-----------------------++++++++++++++++++++++++++++++++|
+ // ^ 512-aligned address ^ Aligned offset ^ End of copy data
+ //
+ // Now when you consider the row pitch, you can visualize the data like this:
+ // |~~~~~~~~~~~~~~~~|
+ // |~~~~~+++++++++++|
+ // |++++++++++++++++|
+ // |+++++~~~~~~~~~~~|
+ // |<---row pitch-->|
+ //
+ // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
+ // |YYYYYYYYYYYYYYYY|
+ // |XXXXXX++++++++++|
+ // |++++++++++++++++|
+ // |++++++~~~~~~~~~~|
+ // |<---row pitch-->|
+ Origin3D texelOffset =
+ ComputeTexelOffsets(blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
+
+ ASSERT(texelOffset.y <= blockInfo.height);
+ ASSERT(texelOffset.z == 0);
+
+ uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
+ uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
+ if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
+ // The region's rows fit inside the bytes per row. In this case, extend the width of the
+ // PlacedFootprint and copy the buffer with an offset location
// |<------------- bytes per row ------------->|
//
// |-------------------------------------------|
// | |
- // | ++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++ |
- // |-------------------------------------------|
-
- // Copy 0:
- // |-------------------------------------------|
- // | |
- // | ++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // | +++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++ |
// |-------------------------------------------|
- // Copy 1:
- // |---------|
- // | |
- // | |
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |---------|
+ // Copy 0:
+ // |----------------------------------|
+ // | |
+ // | +++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |----------------------------------|
- copy.count = 2;
+ copy.count = 1;
copy.copies[0].alignedOffset = alignedOffset;
copy.copies[0].textureOffset = origin;
-
- ASSERT(bytesPerRow > byteOffsetInRowPitch);
- uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
- copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
- copy.copies[0].copySize.height = copySize.height;
- copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
+ copy.copies[0].copySize = copySize;
copy.copies[0].bufferOffset = texelOffset;
- copy.copies[0].bufferSize.width = texelsPerRow;
+
+ copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
- uint64_t offsetForCopy1 =
- offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
- uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
- Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
- blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
-
- ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
- ASSERT(texelOffsetForCopy1.z == 0);
-
- copy.copies[1].alignedOffset = alignedOffsetForCopy1;
- copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
- copy.copies[1].textureOffset.y = origin.y;
- copy.copies[1].textureOffset.z = origin.z;
-
- ASSERT(copySize.width > copy.copies[0].copySize.width);
- copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
- copy.copies[1].copySize.height = copySize.height;
- copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- copy.copies[1].bufferOffset = texelOffsetForCopy1;
- copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
- copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
- copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
return copy;
}
- TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- TextureCopySplits copies;
-
- const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
-
- // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
- // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
- // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
- // Each layer of a 2D array might need to be split, but because of the WebGPU
- // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
- // will be at an offset multiple of 512 of each other, which means they will all result in
- // the same 2D split. Thus we can just compute the copy splits for the first and second
- // layers, and reuse them for the remaining layers by adding the related offset of each
- // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
- // share the same copy split, so in this situation we just need to compute copy split once
- // and reuse it for all the layers.
- Extent3D copyOneLayerSize = copySize;
- Origin3D copyFirstLayerOrigin = origin;
- copyOneLayerSize.depthOrArrayLayers = 1;
- copyFirstLayerOrigin.z = 0;
-
- copies.copySubresources[0] = Compute2DTextureCopySubresource(
- copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
-
- // When the copy only refers one texture 2D array layer,
- // copies.copySubresources[1] will never be used so we can safely early return here.
- if (copySize.depthOrArrayLayers == 1) {
- return copies;
- }
-
- if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
- copies.copySubresources[1] = copies.copySubresources[0];
- copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
- copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
- } else {
- const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
- copies.copySubresources[1] =
- Compute2DTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
- bufferOffsetNextLayer, bytesPerRow);
- }
-
+ // The region's rows straddle the bytes per row. Split the copy into two copies
+ // |<------------- bytes per row ------------->|
+ //
+ // |-------------------------------------------|
+ // | |
+ // | ++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++ |
+ // |-------------------------------------------|
+
+ // Copy 0:
+ // |-------------------------------------------|
+ // | |
+ // | ++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |-------------------------------------------|
+
+ // Copy 1:
+ // |---------|
+ // | |
+ // | |
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |---------|
+
+ copy.count = 2;
+
+ copy.copies[0].alignedOffset = alignedOffset;
+ copy.copies[0].textureOffset = origin;
+
+ ASSERT(bytesPerRow > byteOffsetInRowPitch);
+ uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
+ copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
+ copy.copies[0].copySize.height = copySize.height;
+ copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ copy.copies[0].bufferOffset = texelOffset;
+ copy.copies[0].bufferSize.width = texelsPerRow;
+ copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+ copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ uint64_t offsetForCopy1 =
+ offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
+ uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
+ Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
+ blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
+
+ ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
+ ASSERT(texelOffsetForCopy1.z == 0);
+
+ copy.copies[1].alignedOffset = alignedOffsetForCopy1;
+ copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
+ copy.copies[1].textureOffset.y = origin.y;
+ copy.copies[1].textureOffset.z = origin.z;
+
+ ASSERT(copySize.width > copy.copies[0].copySize.width);
+ copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
+ copy.copies[1].copySize.height = copySize.height;
+ copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ copy.copies[1].bufferOffset = texelOffsetForCopy1;
+ copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
+ copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
+ copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ return copy;
+}
+
+TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ TextureCopySplits copies;
+
+ const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+ // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
+ // Each layer of a 2D array might need to be split, but because of the WebGPU
+ // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
+ // will be at an offset multiple of 512 of each other, which means they will all result in
+ // the same 2D split. Thus we can just compute the copy splits for the first and second
+ // layers, and reuse them for the remaining layers by adding the related offset of each
+ // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
+ // share the same copy split, so in this situation we just need to compute copy split once
+ // and reuse it for all the layers.
+ Extent3D copyOneLayerSize = copySize;
+ Origin3D copyFirstLayerOrigin = origin;
+ copyOneLayerSize.depthOrArrayLayers = 1;
+ copyFirstLayerOrigin.z = 0;
+
+ copies.copySubresources[0] = Compute2DTextureCopySubresource(
+ copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
+
+ // When the copy only refers one texture 2D array layer,
+ // copies.copySubresources[1] will never be used so we can safely early return here.
+ if (copySize.depthOrArrayLayers == 1) {
return copies;
}
- void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
- Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- TextureCopySubresource& copy,
- uint32_t i) {
- // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
- // is incorrect if there is an empty row at the beginning of the copy block.
- // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
- // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
- // as below:
- //
- // |<----- bytes per row ------>|
- //
- // |----------------------------|
- // row (N - 1) | |
- // row N | ++~~~~~~~~~|
- // row (N + 1) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 2) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 3) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 4) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 5) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 6) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 7) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 8) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 9) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 11) |~~~~~~~~~~~~~~~~~++ |
- // |----------------------------|
-
- // The copy we mean to do is the following:
- //
- // - image 0: row N to row (N + 3),
- // - image 1: row (N + 4) to row (N + 7),
- // - image 2: row (N + 8) to row (N + 11).
- //
- // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
- // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
- //
- // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
- // the following:
- //
- // |-------------------|
- // row (N - 1) | |
- // row N | ++|
- // row (N + 1) |~~~~~~~~~~~~~~~~~++|
- // row (N + 2) |~~~~~~~~~~~~~~~~~++|
- // row (N + 3) |~~~~~~~~~~~~~~~~~++|
- // |-------------------|
- //
- // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
- // texture, we will copy 5 rows every time, and every first row of each slice will be
- // skipped. As a result, the copied data will be:
- //
- // - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
- // - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
- //
- // Likewise, all other image followed will be incorrect because we wrongly keep skipping
- // one row for each depth slice.
- //
- // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
- // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
- // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
- // block of the last row of the last image may out-of-bound (see the details below), so
- // we need an extra copy for the very last row.
-
- // Copy 0: copy 3 rows, not 4 rows.
- // _____________________
- // / /|
- // / / |
- // |-------------------| |
- // row (N - 1) | | |
- // row N | ++| |
- // row (N + 1) |~~~~~~~~~~~~~~~~~++| /
- // row (N + 2) |~~~~~~~~~~~~~~~~~++|/
- // |-------------------|
-
- // Copy 1: move down two rows and copy the last row on image 0, and expand to
- // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
- // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
- // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
- // _____________________
- // / /|
- // / / |
- // |-------------------| |
- // row (N + 1) | | |
- // row (N + 2) | | |
- // row (N + 3) | ++| /
- // row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
- // |-------------------|
- //
- // copy 2: copy the last row of the last image.
- // |-------------------|
- // row (N + 11)| ++|
- // |-------------------|
-
- // Copy 0: copy copySize.height - 1 rows
- TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
- copy0.copySize.height = copySize.height - blockInfo.height;
- copy0.bufferSize.height = rowsPerImage * blockInfo.height; // rowsPerImageInTexels
-
- // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
- // but the last one.
- TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
- *copy1 = copy0;
- copy1->alignedOffset += 2 * bytesPerRow;
- copy1->textureOffset.y += copySize.height - blockInfo.height;
- // Offset two rows from the copy height for the bufferOffset (See the figure above):
- // - one for the row we advanced in the buffer: row (N + 4).
- // - one for the last row we want to copy: row (N + 3) itself.
- copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
- copy1->copySize.height = blockInfo.height;
- copy1->copySize.depthOrArrayLayers--;
- copy1->bufferSize.depthOrArrayLayers--;
-
- // Copy 2: copy the last row of the last image.
- uint64_t offsetForCopy0 = OffsetToFirstCopiedTexel(blockInfo, bytesPerRow,
- copy0.alignedOffset, copy0.bufferOffset);
- uint64_t offsetForLastRowOfLastImage =
- offsetForCopy0 + bytesPerRow * (copy0.copySize.height +
- rowsPerImage * (copySize.depthOrArrayLayers - 1));
- uint64_t alignedOffsetForLastRowOfLastImage =
- AlignDownForDataPlacement(offsetForLastRowOfLastImage);
- Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
- blockInfo,
- static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
- bytesPerRow);
-
- TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
- copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
- copy2->textureOffset = copy1->textureOffset;
- copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
- copy2->copySize = copy1->copySize;
- copy2->copySize.depthOrArrayLayers = 1;
- copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
- copy2->bufferSize.width = copy1->bufferSize.width;
- ASSERT(copy2->copySize.height == 1);
- copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
- copy2->bufferSize.depthOrArrayLayers = 1;
+ if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
+ copies.copySubresources[1] = copies.copySubresources[0];
+ copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
+ copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
+ } else {
+ const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
+ copies.copySubresources[1] = Compute2DTextureCopySubresource(
+ copyFirstLayerOrigin, copyOneLayerSize, blockInfo, bufferOffsetNextLayer, bytesPerRow);
}
- void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
- uint32_t bytesPerRow,
- TextureCopySubresource& copy,
- uint32_t i) {
- // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
- // the reason why it is incorrect if we simply extend the copy region to all depth slices
- // when there is an empty first row at the copy region.
- //
- // If the copy height is odd, we can use two copies to make it correct:
- // - copy 0: only copy the first depth slice. Keep other arguments the same.
- // - copy 1: copy all rest depth slices because it will start without an empty row if
- // copy height is odd. Odd height + one (empty row) is even. An even row number times
- // bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
-
- // Copy 0: copy the first depth slice (image 0)
- TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
- copy0.copySize.depthOrArrayLayers = 1;
- copy0.bufferSize.depthOrArrayLayers = 1;
-
- // Copy 1: copy the rest depth slices in one shot
- TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
- *copy1 = copy0;
- ASSERT(copySize.height % 2 == 1);
- copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
- ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
- // textureOffset.z should add one because the first slice has already been copied in copy0.
- copy1->textureOffset.z++;
- // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
- // row in this copy region.
- copy1->bufferOffset.y = 0;
- copy1->copySize.height = copySize.height;
- copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
- copy1->bufferSize.height = copySize.height;
- copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+ return copies;
+}
+
+void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ TextureCopySubresource& copy,
+ uint32_t i) {
+ // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
+ // is incorrect if there is an empty row at the beginning of the copy block.
+ // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
+ // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
+ // as below:
+ //
+ // |<----- bytes per row ------>|
+ //
+ // |----------------------------|
+ // row (N - 1) | |
+ // row N | ++~~~~~~~~~|
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 3) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 4) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 5) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 6) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 7) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 8) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 9) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 11) |~~~~~~~~~~~~~~~~~++ |
+ // |----------------------------|
+
+ // The copy we mean to do is the following:
+ //
+ // - image 0: row N to row (N + 3),
+ // - image 1: row (N + 4) to row (N + 7),
+ // - image 2: row (N + 8) to row (N + 11).
+ //
+ // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
+ // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
+ //
+ // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
+ // the following:
+ //
+ // |-------------------|
+ // row (N - 1) | |
+ // row N | ++|
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++|
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++|
+ // row (N + 3) |~~~~~~~~~~~~~~~~~++|
+ // |-------------------|
+ //
+ // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
+ // texture, we will copy 5 rows every time, and every first row of each slice will be
+ // skipped. As a result, the copied data will be:
+ //
+ // - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
+ // - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
+ //
+ // Likewise, all other image followed will be incorrect because we wrongly keep skipping
+ // one row for each depth slice.
+ //
+ // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
+ // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
+ // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
+ // block of the last row of the last image may out-of-bound (see the details below), so
+ // we need an extra copy for the very last row.
+
+ // Copy 0: copy 3 rows, not 4 rows.
+ // _____________________
+ // / /|
+ // / / |
+ // |-------------------| |
+ // row (N - 1) | | |
+ // row N | ++| |
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++| /
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++|/
+ // |-------------------|
+
+ // Copy 1: move down two rows and copy the last row on image 0, and expand to
+ // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
+ // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
+ // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
+ // _____________________
+ // / /|
+ // / / |
+ // |-------------------| |
+ // row (N + 1) | | |
+ // row (N + 2) | | |
+ // row (N + 3) | ++| /
+ // row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
+ // |-------------------|
+ //
+ // copy 2: copy the last row of the last image.
+ // |-------------------|
+ // row (N + 11)| ++|
+ // |-------------------|
+
+ // Copy 0: copy copySize.height - 1 rows
+ TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+ copy0.copySize.height = copySize.height - blockInfo.height;
+ copy0.bufferSize.height = rowsPerImage * blockInfo.height; // rowsPerImageInTexels
+
+ // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
+ // but the last one.
+ TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+ *copy1 = copy0;
+ copy1->alignedOffset += 2 * bytesPerRow;
+ copy1->textureOffset.y += copySize.height - blockInfo.height;
+ // Offset two rows from the copy height for the bufferOffset (See the figure above):
+ // - one for the row we advanced in the buffer: row (N + 4).
+ // - one for the last row we want to copy: row (N + 3) itself.
+ copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
+ copy1->copySize.height = blockInfo.height;
+ copy1->copySize.depthOrArrayLayers--;
+ copy1->bufferSize.depthOrArrayLayers--;
+
+ // Copy 2: copy the last row of the last image.
+ uint64_t offsetForCopy0 =
+ OffsetToFirstCopiedTexel(blockInfo, bytesPerRow, copy0.alignedOffset, copy0.bufferOffset);
+ uint64_t offsetForLastRowOfLastImage =
+ offsetForCopy0 +
+ bytesPerRow * (copy0.copySize.height + rowsPerImage * (copySize.depthOrArrayLayers - 1));
+ uint64_t alignedOffsetForLastRowOfLastImage =
+ AlignDownForDataPlacement(offsetForLastRowOfLastImage);
+ Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
+ blockInfo,
+ static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
+ bytesPerRow);
+
+ TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
+ copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
+ copy2->textureOffset = copy1->textureOffset;
+ copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
+ copy2->copySize = copy1->copySize;
+ copy2->copySize.depthOrArrayLayers = 1;
+ copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
+ copy2->bufferSize.width = copy1->bufferSize.width;
+ ASSERT(copy2->copySize.height == 1);
+ copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
+ copy2->bufferSize.depthOrArrayLayers = 1;
+}
+
+void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
+ uint32_t bytesPerRow,
+ TextureCopySubresource& copy,
+ uint32_t i) {
+ // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
+ // the reason why it is incorrect if we simply extend the copy region to all depth slices
+ // when there is an empty first row at the copy region.
+ //
+ // If the copy height is odd, we can use two copies to make it correct:
+ // - copy 0: only copy the first depth slice. Keep other arguments the same.
+ // - copy 1: copy all rest depth slices because it will start without an empty row if
+ // copy height is odd. Odd height + one (empty row) is even. An even row number times
+ // bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+
+ // Copy 0: copy the first depth slice (image 0)
+ TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+ copy0.copySize.depthOrArrayLayers = 1;
+ copy0.bufferSize.depthOrArrayLayers = 1;
+
+ // Copy 1: copy the rest depth slices in one shot
+ TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+ *copy1 = copy0;
+ ASSERT(copySize.height % 2 == 1);
+ copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
+ ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
+ // textureOffset.z should add one because the first slice has already been copied in copy0.
+ copy1->textureOffset.z++;
+ // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
+ // row in this copy region.
+ copy1->bufferOffset.y = 0;
+ copy1->copySize.height = copySize.height;
+ copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+ copy1->bufferSize.height = copySize.height;
+ copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+}
+
+TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
+ // and get copy region(s) for the first slice of the copy, then extend to all depth slices
+ // and become a 3D copy. However, this doesn't work as easily as that due to some corner
+ // cases.
+ //
+ // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
+ // region and we simply extend the 2D copy region to all copied depth slices, copied data
+ // will be incorrectly offset for each depth slice except the first one.
+ //
+ // For these special cases, we need to recompute the copy regions for 3D textures via
+ // split the incorrect copy region to a couple more copy regions.
+
+ // Call Compute2DTextureCopySubresource and get copy regions. This function has already
+ // forwarded "copySize.depthOrArrayLayers" to all depth slices.
+ TextureCopySubresource copySubresource =
+ Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
+
+ ASSERT(copySubresource.count <= 2);
+ // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
+ // the copy region(s) to other depth slice(s).
+ if (copySize.depthOrArrayLayers == 1) {
+ return copySubresource;
}
- TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
- // and get copy region(s) for the first slice of the copy, then extend to all depth slices
- // and become a 3D copy. However, this doesn't work as easily as that due to some corner
- // cases.
- //
- // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
- // region and we simply extend the 2D copy region to all copied depth slices, copied data
- // will be incorrectly offset for each depth slice except the first one.
- //
- // For these special cases, we need to recompute the copy regions for 3D textures via
- // split the incorrect copy region to a couple more copy regions.
-
- // Call Compute2DTextureCopySubresource and get copy regions. This function has already
- // forwarded "copySize.depthOrArrayLayers" to all depth slices.
- TextureCopySubresource copySubresource =
- Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
-
- ASSERT(copySubresource.count <= 2);
- // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
- // the copy region(s) to other depth slice(s).
- if (copySize.depthOrArrayLayers == 1) {
- return copySubresource;
+ uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
+ // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
+ // However, we may append a couple more copy regions in the for loop below. We don't need
+ // to revise these new added copy regions.
+ uint32_t originalCopyCount = copySubresource.count;
+ for (uint32_t i = 0; i < originalCopyCount; ++i) {
+ // There can be one empty row at most in a copy region.
+ ASSERT(copySubresource.copies[i].bufferSize.height <=
+ rowsPerImageInTexels + blockInfo.height);
+ Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
+
+ if (bufferSize.height == rowsPerImageInTexels) {
+ // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
+ // this copy region without any modification.
+ continue;
}
- uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
- // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
- // However, we may append a couple more copy regions in the for loop below. We don't need
- // to revise these new added copy regions.
- uint32_t originalCopyCount = copySubresource.count;
- for (uint32_t i = 0; i < originalCopyCount; ++i) {
- // There can be one empty row at most in a copy region.
- ASSERT(copySubresource.copies[i].bufferSize.height <=
- rowsPerImageInTexels + blockInfo.height);
- Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
-
- if (bufferSize.height == rowsPerImageInTexels) {
- // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
- // this copy region without any modification.
- continue;
- }
-
- if (bufferSize.height < rowsPerImageInTexels) {
- // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
- // for each slice even though we only copy partial rows in each slice sometimes.
- bufferSize.height = rowsPerImageInTexels;
+ if (bufferSize.height < rowsPerImageInTexels) {
+ // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
+ // for each slice even though we only copy partial rows in each slice sometimes.
+ bufferSize.height = rowsPerImageInTexels;
+ } else {
+ // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
+ // region due to alignment adjustment.
+
+ // bytesPerRow is definitely 256, and it is definitely a full copy on height.
+ // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
+ // there won't be an empty row at the beginning of this copy region.
+ ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
+ ASSERT(copySize.height == rowsPerImageInTexels);
+
+ if (copySize.height % 2 == 0) {
+ // If copySize.height is even and there is an empty row at the beginning of the
+ // first slice of the copy region, the offset of all depth slices will never be
+ // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
+ // an empty row at each depth slice. We need a totally different approach to
+ // split the copy region.
+ Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+ origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
} else {
- // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
- // region due to alignment adjustment.
-
- // bytesPerRow is definitely 256, and it is definitely a full copy on height.
- // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
- // there won't be an empty row at the beginning of this copy region.
- ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
- ASSERT(copySize.height == rowsPerImageInTexels);
-
- if (copySize.height % 2 == 0) {
- // If copySize.height is even and there is an empty row at the beginning of the
- // first slice of the copy region, the offset of all depth slices will never be
- // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
- // an empty row at each depth slice. We need a totally different approach to
- // split the copy region.
- Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
- origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
- } else {
- // If copySize.height is odd and there is an empty row at the beginning of the
- // first slice of the copy region, we can split the copy region into two copies:
- // copy0 to copy the first slice, copy1 to copy the rest slices because the
- // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
- // without an empty row. This is an easier case relative to cases with even copy
- // height.
- Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(
- copySize, bytesPerRow, copySubresource, i);
- }
+ // If copySize.height is odd and there is an empty row at the beginning of the
+ // first slice of the copy region, we can split the copy region into two copies:
+ // copy0 to copy the first slice, copy1 to copy the rest slices because the
+ // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+ // without an empty row. This is an easier case relative to cases with even copy
+ // height.
+ Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(copySize, bytesPerRow,
+ copySubresource, i);
}
}
-
- return copySubresource;
}
+
+ return copySubresource;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h
index 207e9165dc8..4e60396bc4b 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h
@@ -15,83 +15,83 @@
#ifndef SRC_DAWN_NATIVE_D3D12_TEXTURECOPYSPLITTER_H_
#define SRC_DAWN_NATIVE_D3D12_TEXTURECOPYSPLITTER_H_
-#include "dawn/native/dawn_platform.h"
-
#include <array>
+#include "dawn/native/dawn_platform.h"
+
namespace dawn::native {
- struct TexelBlockInfo;
+struct TexelBlockInfo;
} // namespace dawn::native
namespace dawn::native::d3d12 {
- struct TextureCopySubresource {
- static constexpr unsigned int kMaxTextureCopyRegions = 4;
-
- struct CopyInfo {
- uint64_t alignedOffset = 0;
- Origin3D textureOffset;
- Origin3D bufferOffset;
- Extent3D bufferSize;
-
- Extent3D copySize;
- };
-
- CopyInfo* AddCopy();
-
- uint32_t count = 0;
- std::array<CopyInfo, kMaxTextureCopyRegions> copies;
- };
+struct TextureCopySubresource {
+ static constexpr unsigned int kMaxTextureCopyRegions = 4;
- struct TextureCopySplits {
- static constexpr uint32_t kMaxTextureCopySubresources = 2;
+ struct CopyInfo {
+ uint64_t alignedOffset = 0;
+ Origin3D textureOffset;
+ Origin3D bufferOffset;
+ Extent3D bufferSize;
- std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
+ Extent3D copySize;
};
- // This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
- // 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
- // details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
- // and 3D textures based on this function.
- // The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
- // regions defines by the arguments of TextureCopySubresource returned by this function and its
- // counterparts. These arguments should strictly conform to particular invariants. Otherwise,
- // D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
- // invariants are listed below. For more details
- // of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
- // - Inside each copy region: 1) its buffer offset plus copy size should be less than its
- // buffer size, 2) its buffer offset on y-axis should be less than copy format's
- // blockInfo.height, 3) its buffer offset on z-axis should be 0.
- // - Each copy region has an offset (aka alignedOffset) aligned to
- // D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
- // - The buffer footprint of each copy region should be entirely within the copied buffer,
- // which means that the last "texel" of the buffer footprint doesn't go past the end of
- // the buffer even though the last "texel" might not be copied.
- // - If there are multiple copy regions, each copy region should not overlap with the others.
- // - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
- // - Every pixel accessed by every copy region should not be out of the bound of the copied
- // texture and buffer.
- TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow);
-
- TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
-
- TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
+ CopyInfo* AddCopy();
+
+ uint32_t count = 0;
+ std::array<CopyInfo, kMaxTextureCopyRegions> copies;
+};
+
+struct TextureCopySplits {
+ static constexpr uint32_t kMaxTextureCopySubresources = 2;
+
+ std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
+};
+
+// This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
+// 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
+// details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
+// and 3D textures based on this function.
+// The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
+// regions defines by the arguments of TextureCopySubresource returned by this function and its
+// counterparts. These arguments should strictly conform to particular invariants. Otherwise,
+// D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
+// invariants are listed below. For more details
+// of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
+// - Inside each copy region: 1) its buffer offset plus copy size should be less than its
+// buffer size, 2) its buffer offset on y-axis should be less than copy format's
+// blockInfo.height, 3) its buffer offset on z-axis should be 0.
+// - Each copy region has an offset (aka alignedOffset) aligned to
+// D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
+// - The buffer footprint of each copy region should be entirely within the copied buffer,
+// which means that the last "texel" of the buffer footprint doesn't go past the end of
+// the buffer even though the last "texel" might not be copied.
+// - If there are multiple copy regions, each copy region should not overlap with the others.
+// - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
+// - Every pixel accessed by every copy region should not be out of the bound of the copied
+// texture and buffer.
+TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow);
+
+TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp
index b86abbde809..f3aeee0e34f 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/d3d12/TextureD3D12.h"
+#include <algorithm>
+#include <utility>
+
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
#include "dawn/native/DynamicUploader.h"
@@ -33,872 +36,866 @@
namespace dawn::native::d3d12 {
- namespace {
-
- D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
- D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
-
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- return D3D12_RESOURCE_STATE_PRESENT;
- }
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
- if (usage & (wgpu::TextureUsage::TextureBinding)) {
- resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
- } else {
- resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
- }
- }
-
- if (usage & kReadOnlyRenderAttachment) {
- // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
- resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
- }
-
- return resourceState;
- }
-
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
- const Format& format,
- bool isMultisampledTexture) {
- D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
-
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
- }
+namespace {
- // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
- // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
- if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
- if (format.HasDepthOrStencil()) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
- } else {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
- }
- }
-
- ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
- flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
- return flags;
- }
+D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
+ D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
- D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
- switch (dimension) {
- case wgpu::TextureDimension::e1D:
- return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
- case wgpu::TextureDimension::e2D:
- return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
- case wgpu::TextureDimension::e3D:
- return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
- }
- }
-
- DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- return DXGI_FORMAT_R8_TYPELESS;
-
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::Depth16Unorm:
- return DXGI_FORMAT_R16_TYPELESS;
-
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- return DXGI_FORMAT_R8G8_TYPELESS;
-
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::R32Float:
- return DXGI_FORMAT_R32_TYPELESS;
-
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- return DXGI_FORMAT_R16G16_TYPELESS;
-
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- return DXGI_FORMAT_R8G8B8A8_TYPELESS;
-
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return DXGI_FORMAT_B8G8R8A8_TYPELESS;
-
- case wgpu::TextureFormat::RGB10A2Unorm:
- return DXGI_FORMAT_R10G10B10A2_TYPELESS;
-
- case wgpu::TextureFormat::RG11B10Ufloat:
- return DXGI_FORMAT_R11G11B10_FLOAT;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
-
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- return DXGI_FORMAT_R32G32_TYPELESS;
-
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- return DXGI_FORMAT_R16G16B16A16_TYPELESS;
-
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- return DXGI_FORMAT_R32G32B32A32_TYPELESS;
-
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- return DXGI_FORMAT_R32_TYPELESS;
-
- // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return DXGI_FORMAT_R24G8_TYPELESS;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return DXGI_FORMAT_R32G8X24_TYPELESS;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return DXGI_FORMAT_BC1_TYPELESS;
-
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return DXGI_FORMAT_BC2_TYPELESS;
-
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return DXGI_FORMAT_BC3_TYPELESS;
-
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC4RUnorm:
- return DXGI_FORMAT_BC4_TYPELESS;
-
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC5RGUnorm:
- return DXGI_FORMAT_BC5_TYPELESS;
-
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return DXGI_FORMAT_BC6H_TYPELESS;
-
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return DXGI_FORMAT_BC7_TYPELESS;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
- }
- }
-
- } // namespace
-
- DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return DXGI_FORMAT_R8_UNORM;
- case wgpu::TextureFormat::R8Snorm:
- return DXGI_FORMAT_R8_SNORM;
- case wgpu::TextureFormat::R8Uint:
- return DXGI_FORMAT_R8_UINT;
- case wgpu::TextureFormat::R8Sint:
- return DXGI_FORMAT_R8_SINT;
-
- case wgpu::TextureFormat::R16Uint:
- return DXGI_FORMAT_R16_UINT;
- case wgpu::TextureFormat::R16Sint:
- return DXGI_FORMAT_R16_SINT;
- case wgpu::TextureFormat::R16Float:
- return DXGI_FORMAT_R16_FLOAT;
- case wgpu::TextureFormat::RG8Unorm:
- return DXGI_FORMAT_R8G8_UNORM;
- case wgpu::TextureFormat::RG8Snorm:
- return DXGI_FORMAT_R8G8_SNORM;
- case wgpu::TextureFormat::RG8Uint:
- return DXGI_FORMAT_R8G8_UINT;
- case wgpu::TextureFormat::RG8Sint:
- return DXGI_FORMAT_R8G8_SINT;
-
- case wgpu::TextureFormat::R32Uint:
- return DXGI_FORMAT_R32_UINT;
- case wgpu::TextureFormat::R32Sint:
- return DXGI_FORMAT_R32_SINT;
- case wgpu::TextureFormat::R32Float:
- return DXGI_FORMAT_R32_FLOAT;
- case wgpu::TextureFormat::RG16Uint:
- return DXGI_FORMAT_R16G16_UINT;
- case wgpu::TextureFormat::RG16Sint:
- return DXGI_FORMAT_R16G16_SINT;
- case wgpu::TextureFormat::RG16Float:
- return DXGI_FORMAT_R16G16_FLOAT;
- case wgpu::TextureFormat::RGBA8Unorm:
- return DXGI_FORMAT_R8G8B8A8_UNORM;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return DXGI_FORMAT_R8G8B8A8_SNORM;
- case wgpu::TextureFormat::RGBA8Uint:
- return DXGI_FORMAT_R8G8B8A8_UINT;
- case wgpu::TextureFormat::RGBA8Sint:
- return DXGI_FORMAT_R8G8B8A8_SINT;
- case wgpu::TextureFormat::BGRA8Unorm:
- return DXGI_FORMAT_B8G8R8A8_UNORM;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return DXGI_FORMAT_R10G10B10A2_UNORM;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return DXGI_FORMAT_R11G11B10_FLOAT;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
-
- case wgpu::TextureFormat::RG32Uint:
- return DXGI_FORMAT_R32G32_UINT;
- case wgpu::TextureFormat::RG32Sint:
- return DXGI_FORMAT_R32G32_SINT;
- case wgpu::TextureFormat::RG32Float:
- return DXGI_FORMAT_R32G32_FLOAT;
- case wgpu::TextureFormat::RGBA16Uint:
- return DXGI_FORMAT_R16G16B16A16_UINT;
- case wgpu::TextureFormat::RGBA16Sint:
- return DXGI_FORMAT_R16G16B16A16_SINT;
- case wgpu::TextureFormat::RGBA16Float:
- return DXGI_FORMAT_R16G16B16A16_FLOAT;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return DXGI_FORMAT_R32G32B32A32_UINT;
- case wgpu::TextureFormat::RGBA32Sint:
- return DXGI_FORMAT_R32G32B32A32_SINT;
- case wgpu::TextureFormat::RGBA32Float:
- return DXGI_FORMAT_R32G32B32A32_FLOAT;
-
- case wgpu::TextureFormat::Depth16Unorm:
- return DXGI_FORMAT_D16_UNORM;
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- return DXGI_FORMAT_D32_FLOAT;
- // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return DXGI_FORMAT_D24_UNORM_S8_UINT;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return DXGI_FORMAT_BC1_UNORM;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return DXGI_FORMAT_BC1_UNORM_SRGB;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return DXGI_FORMAT_BC2_UNORM;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return DXGI_FORMAT_BC2_UNORM_SRGB;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return DXGI_FORMAT_BC3_UNORM;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return DXGI_FORMAT_BC3_UNORM_SRGB;
- case wgpu::TextureFormat::BC4RSnorm:
- return DXGI_FORMAT_BC4_SNORM;
- case wgpu::TextureFormat::BC4RUnorm:
- return DXGI_FORMAT_BC4_UNORM;
- case wgpu::TextureFormat::BC5RGSnorm:
- return DXGI_FORMAT_BC5_SNORM;
- case wgpu::TextureFormat::BC5RGUnorm:
- return DXGI_FORMAT_BC5_UNORM;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return DXGI_FORMAT_BC6H_SF16;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return DXGI_FORMAT_BC6H_UF16;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return DXGI_FORMAT_BC7_UNORM;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return DXGI_FORMAT_BC7_UNORM_SRGB;
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- return DXGI_FORMAT_NV12;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
- case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
- }
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ return D3D12_RESOURCE_STATE_PRESENT;
}
- MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- return {};
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
}
-
- MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
- const TextureDescriptor* dawnDescriptor) {
- const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
- DAWN_INVALID_IF(
- (dawnDescriptor->size.width != d3dDescriptor.Width) ||
- (dawnDescriptor->size.height != d3dDescriptor.Height) ||
- (dawnDescriptor->size.depthOrArrayLayers != 1),
- "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
- "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
- d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
- dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
-
- const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
- DAWN_INVALID_IF(
- dxgiFormatFromDescriptor != d3dDescriptor.Format,
- "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
- d3dDescriptor.Format, dawnDescriptor->format);
-
- DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
- "D3D12 texture number of miplevels (%u) is not 1.",
- d3dDescriptor.MipLevels);
-
- DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1,
- "D3D12 texture array size (%u) is not 1.", d3dDescriptor.DepthOrArraySize);
-
- // Shared textures cannot be multi-sample so no need to check those.
- ASSERT(d3dDescriptor.SampleDesc.Count == 1);
- ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
-
- return {};
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
}
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
- MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
- const bool supportsSharedResourceCapabilityTier1 =
- device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
- switch (textureFormat) {
- // MSDN docs are not correct, NV12 requires at-least tier 1.
- case DXGI_FORMAT_NV12:
- if (supportsSharedResourceCapabilityTier1) {
- return {};
- }
- break;
- default:
- break;
- }
-
- return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
+ if (usage & (wgpu::TextureUsage::TextureBinding)) {
+ resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
}
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-
- DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
- "Cannot create a multi-planar formatted texture directly");
-
- DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
- return std::move(dawnTexture);
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
}
-
- // static
- ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
- Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture,
- bool isInitialized) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
- descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
-
- // Importing a multi-planar format must be initialized. This is required because
- // a shared multi-planar format cannot be initialized by Dawn.
- DAWN_INVALID_IF(
- !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
- "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
- dawnTexture->GetFormat().format);
-
- dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
- dawnTexture->GetAllSubresources());
- return std::move(dawnTexture);
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
+ } else {
+ resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
+ }
}
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
- return std::move(dawnTexture);
+ if (usage & kReadOnlyRenderAttachment) {
+ // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
+ resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
}
- MaybeError Texture::InitializeAsExternalTexture(
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture) {
- mD3D11on12Resource = std::move(d3d11on12Resource);
- mSwapChainTexture = isSwapChainTexture;
+ return resourceState;
+}
- D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
- mD3D12ResourceFlags = desc.Flags;
+D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage, const Format& format) {
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
- AllocationInfo info;
- info.mMethod = AllocationMethod::kExternal;
- // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
- // texture is owned externally. The texture's owning entity must remain responsible for
- // memory management.
- mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
- SetLabelHelper("Dawn_ExternalTexture");
-
- return {};
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
- MaybeError Texture::InitializeAsInternalTexture() {
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
- resourceDescriptor.Alignment = 0;
-
- const Extent3D& size = GetSize();
- resourceDescriptor.Width = size.width;
- resourceDescriptor.Height = size.height;
- resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
-
- // This will need to be much more nuanced when WebGPU has
- // texture view compatibility rules.
- const bool needsTypelessFormat =
- GetFormat().HasDepthOrStencil() &&
- (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
-
- DXGI_FORMAT dxgiFormat = needsTypelessFormat
- ? D3D12TypelessTextureFormat(GetFormat().format)
- : D3D12TextureFormat(GetFormat().format);
-
- resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
- resourceDescriptor.Format = dxgiFormat;
- resourceDescriptor.SampleDesc.Count = GetSampleCount();
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
- resourceDescriptor.Flags =
- D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
- mD3D12ResourceFlags = resourceDescriptor.Flags;
-
- DAWN_TRY_ASSIGN(mResourceAllocation,
- ToBackend(GetDevice())
- ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
- D3D12_RESOURCE_STATE_COMMON));
-
- SetLabelImpl();
-
- Device* device = ToBackend(GetDevice());
-
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
- DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
- TextureBase::ClearValue::NonZero));
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
+ } else {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
}
-
- return {};
}
- MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
- AllocationInfo info;
- info.mMethod = AllocationMethod::kExternal;
- // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
- // texture is owned externally. The texture's owning entity must remain responsible for
- // memory management.
- mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
- SetLabelHelper("Dawn_SwapChainTexture");
-
- return {};
+ ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+ flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
+ return flags;
+}
+
+D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureDimension::e1D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
+ case wgpu::TextureDimension::e2D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+ case wgpu::TextureDimension::e3D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
}
-
- Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
- : TextureBase(device, descriptor, state),
- mSubresourceStateAndDecay(
- GetFormat().aspects,
- GetArrayLayers(),
- GetNumMipLevels(),
- {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {
+}
+
+DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ return DXGI_FORMAT_R8_TYPELESS;
+
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::Depth16Unorm:
+ return DXGI_FORMAT_R16_TYPELESS;
+
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ return DXGI_FORMAT_R8G8_TYPELESS;
+
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ return DXGI_FORMAT_R32_TYPELESS;
+
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ return DXGI_FORMAT_R16G16_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ return DXGI_FORMAT_R8G8B8A8_TYPELESS;
+
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return DXGI_FORMAT_B8G8R8A8_TYPELESS;
+
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return DXGI_FORMAT_R10G10B10A2_TYPELESS;
+
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ return DXGI_FORMAT_R32G32_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ return DXGI_FORMAT_R16G16B16A16_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ return DXGI_FORMAT_R32G32B32A32_TYPELESS;
+
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ return DXGI_FORMAT_R32_TYPELESS;
+
+ // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DXGI_FORMAT_R24G8_TYPELESS;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return DXGI_FORMAT_R32G8X24_TYPELESS;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return DXGI_FORMAT_BC1_TYPELESS;
+
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return DXGI_FORMAT_BC2_TYPELESS;
+
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return DXGI_FORMAT_BC3_TYPELESS;
+
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
+ return DXGI_FORMAT_BC4_TYPELESS;
+
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return DXGI_FORMAT_BC5_TYPELESS;
+
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return DXGI_FORMAT_BC6H_TYPELESS;
+
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return DXGI_FORMAT_BC7_TYPELESS;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
}
-
- Texture::~Texture() {
+}
+
+} // namespace
+
+DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return DXGI_FORMAT_R8_UNORM;
+ case wgpu::TextureFormat::R8Snorm:
+ return DXGI_FORMAT_R8_SNORM;
+ case wgpu::TextureFormat::R8Uint:
+ return DXGI_FORMAT_R8_UINT;
+ case wgpu::TextureFormat::R8Sint:
+ return DXGI_FORMAT_R8_SINT;
+
+ case wgpu::TextureFormat::R16Uint:
+ return DXGI_FORMAT_R16_UINT;
+ case wgpu::TextureFormat::R16Sint:
+ return DXGI_FORMAT_R16_SINT;
+ case wgpu::TextureFormat::R16Float:
+ return DXGI_FORMAT_R16_FLOAT;
+ case wgpu::TextureFormat::RG8Unorm:
+ return DXGI_FORMAT_R8G8_UNORM;
+ case wgpu::TextureFormat::RG8Snorm:
+ return DXGI_FORMAT_R8G8_SNORM;
+ case wgpu::TextureFormat::RG8Uint:
+ return DXGI_FORMAT_R8G8_UINT;
+ case wgpu::TextureFormat::RG8Sint:
+ return DXGI_FORMAT_R8G8_SINT;
+
+ case wgpu::TextureFormat::R32Uint:
+ return DXGI_FORMAT_R32_UINT;
+ case wgpu::TextureFormat::R32Sint:
+ return DXGI_FORMAT_R32_SINT;
+ case wgpu::TextureFormat::R32Float:
+ return DXGI_FORMAT_R32_FLOAT;
+ case wgpu::TextureFormat::RG16Uint:
+ return DXGI_FORMAT_R16G16_UINT;
+ case wgpu::TextureFormat::RG16Sint:
+ return DXGI_FORMAT_R16G16_SINT;
+ case wgpu::TextureFormat::RG16Float:
+ return DXGI_FORMAT_R16G16_FLOAT;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return DXGI_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return DXGI_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return DXGI_FORMAT_R8G8B8A8_UINT;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return DXGI_FORMAT_R8G8B8A8_SINT;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return DXGI_FORMAT_B8G8R8A8_UNORM;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return DXGI_FORMAT_R10G10B10A2_UNORM;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return DXGI_FORMAT_R32G32_UINT;
+ case wgpu::TextureFormat::RG32Sint:
+ return DXGI_FORMAT_R32G32_SINT;
+ case wgpu::TextureFormat::RG32Float:
+ return DXGI_FORMAT_R32G32_FLOAT;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return DXGI_FORMAT_R16G16B16A16_UINT;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return DXGI_FORMAT_R16G16B16A16_SINT;
+ case wgpu::TextureFormat::RGBA16Float:
+ return DXGI_FORMAT_R16G16B16A16_FLOAT;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return DXGI_FORMAT_R32G32B32A32_UINT;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return DXGI_FORMAT_R32G32B32A32_SINT;
+ case wgpu::TextureFormat::RGBA32Float:
+ return DXGI_FORMAT_R32G32B32A32_FLOAT;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return DXGI_FORMAT_D16_UNORM;
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ return DXGI_FORMAT_D32_FLOAT;
+ // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DXGI_FORMAT_D24_UNORM_S8_UINT;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return DXGI_FORMAT_BC1_UNORM;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return DXGI_FORMAT_BC1_UNORM_SRGB;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return DXGI_FORMAT_BC2_UNORM;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return DXGI_FORMAT_BC2_UNORM_SRGB;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return DXGI_FORMAT_BC3_UNORM;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return DXGI_FORMAT_BC3_UNORM_SRGB;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return DXGI_FORMAT_BC4_SNORM;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return DXGI_FORMAT_BC4_UNORM;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return DXGI_FORMAT_BC5_SNORM;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return DXGI_FORMAT_BC5_UNORM;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return DXGI_FORMAT_BC6H_SF16;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return DXGI_FORMAT_BC6H_UF16;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return DXGI_FORMAT_BC7_UNORM;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return DXGI_FORMAT_BC7_UNORM_SRGB;
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ return DXGI_FORMAT_NV12;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
}
-
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
-
- Device* device = ToBackend(GetDevice());
-
- // In PIX's D3D12-only mode, there is no way to determine frame boundaries
- // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
- // PIX will wait forever for a present that never happens.
- // If we know we're dealing with a swapbuffer texture, inform PIX we've
- // "presented" the texture so it can determine frame boundaries and use its
- // contents for the UI.
- if (mSwapChainTexture) {
- ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
- if (d3dSharingContract != nullptr) {
- d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
+}
+
+MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+ descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ return {};
+}
+
+MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+ const TextureDescriptor* dawnDescriptor) {
+ const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
+ DAWN_INVALID_IF(
+ (dawnDescriptor->size.width != d3dDescriptor.Width) ||
+ (dawnDescriptor->size.height != d3dDescriptor.Height) ||
+ (dawnDescriptor->size.depthOrArrayLayers != 1),
+ "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
+ "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
+ d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
+ dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
+
+ const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
+ DAWN_INVALID_IF(dxgiFormatFromDescriptor != d3dDescriptor.Format,
+ "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
+ d3dDescriptor.Format, dawnDescriptor->format);
+
+ DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
+ "D3D12 texture number of miplevels (%u) is not 1.", d3dDescriptor.MipLevels);
+
+ DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1, "D3D12 texture array size (%u) is not 1.",
+ d3dDescriptor.DepthOrArraySize);
+
+ // Shared textures cannot be multi-sample so no need to check those.
+ ASSERT(d3dDescriptor.SampleDesc.Count == 1);
+ ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
+
+ return {};
+}
+
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
+MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
+ const bool supportsSharedResourceCapabilityTier1 =
+ device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
+ switch (textureFormat) {
+ // MSDN docs are not correct, NV12 requires at-least tier 1.
+ case DXGI_FORMAT_NV12:
+ if (supportsSharedResourceCapabilityTier1) {
+ return {};
}
- }
-
- device->DeallocateMemory(mResourceAllocation);
-
- // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
- // We can set mSwapChainTexture to false to avoid passing a nullptr to
- // ID3D12SharingContract::Present.
- mSwapChainTexture = false;
-
- // Now that the texture has been destroyed. It should release the refptr
- // of the d3d11on12 resource.
- mD3D11on12Resource = nullptr;
- }
-
- DXGI_FORMAT Texture::GetD3D12Format() const {
- return D3D12TextureFormat(GetFormat().format);
+ break;
+ default:
+ break;
}
- ID3D12Resource* Texture::GetD3D12Resource() const {
- return mResourceAllocation.GetD3D12Resource();
+ return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+
+ DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
+ "Cannot create a multi-planar formatted texture directly");
+
+ DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
+ return std::move(dawnTexture);
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
+ descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
+
+ // Importing a multi-planar format must be initialized. This is required because
+ // a shared multi-planar format cannot be initialized by Dawn.
+ DAWN_INVALID_IF(
+ !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
+ "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
+ dawnTexture->GetFormat().format);
+
+ dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
+ dawnTexture->GetAllSubresources());
+ return std::move(dawnTexture);
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
+ return std::move(dawnTexture);
+}
+
+MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture) {
+ mD3D11on12Resource = std::move(d3d11on12Resource);
+ mSwapChainTexture = isSwapChainTexture;
+
+ D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
+ mD3D12ResourceFlags = desc.Flags;
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+ SetLabelHelper("Dawn_ExternalTexture");
+
+ return {};
+}
+
+MaybeError Texture::InitializeAsInternalTexture() {
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
+ resourceDescriptor.Alignment = 0;
+
+ const Extent3D& size = GetSize();
+ resourceDescriptor.Width = size.width;
+ resourceDescriptor.Height = size.height;
+ resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
+
+ // This will need to be much more nuanced when WebGPU has
+ // texture view compatibility rules.
+ const bool needsTypelessFormat = GetFormat().HasDepthOrStencil() &&
+ (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
+
+ DXGI_FORMAT dxgiFormat = needsTypelessFormat ? D3D12TypelessTextureFormat(GetFormat().format)
+ : D3D12TextureFormat(GetFormat().format);
+
+ resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
+ resourceDescriptor.Format = dxgiFormat;
+ resourceDescriptor.SampleDesc.Count = GetSampleCount();
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
+ resourceDescriptor.Flags = D3D12ResourceFlags(GetInternalUsage(), GetFormat());
+ mD3D12ResourceFlags = resourceDescriptor.Flags;
+
+ DAWN_TRY_ASSIGN(mResourceAllocation,
+ ToBackend(GetDevice())
+ ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
+ D3D12_RESOURCE_STATE_COMMON));
+
+ SetLabelImpl();
+
+ Device* device = ToBackend(GetDevice());
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ DAWN_TRY(
+ ClearTexture(commandContext, GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
- DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
- ASSERT(GetFormat().aspects & aspect);
-
- switch (GetFormat().format) {
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Stencil8:
- switch (aspect) {
- case Aspect::Depth:
- return DXGI_FORMAT_R32_FLOAT;
- case Aspect::Stencil:
- return DXGI_FORMAT_R8_UINT;
- default:
- UNREACHABLE();
- }
- default:
- ASSERT(HasOneBit(GetFormat().aspects));
- return GetD3D12Format();
+ return {};
+}
+
+MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+ SetLabelHelper("Dawn_SwapChainTexture");
+
+ return {};
+}
+
+Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state),
+ mSubresourceStateAndDecay(
+ GetFormat().aspects,
+ GetArrayLayers(),
+ GetNumMipLevels(),
+ {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {}
+
+Texture::~Texture() {}
+
+void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+
+ Device* device = ToBackend(GetDevice());
+
+ // In PIX's D3D12-only mode, there is no way to determine frame boundaries
+ // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
+ // PIX will wait forever for a present that never happens.
+ // If we know we're dealing with a swapbuffer texture, inform PIX we've
+ // "presented" the texture so it can determine frame boundaries and use its
+ // contents for the UI.
+ if (mSwapChainTexture) {
+ ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
+ if (d3dSharingContract != nullptr) {
+ d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
}
}
- MaybeError Texture::AcquireKeyedMutex() {
- ASSERT(mD3D11on12Resource != nullptr);
- return mD3D11on12Resource->AcquireKeyedMutex();
+ device->DeallocateMemory(mResourceAllocation);
+
+ // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
+ // We can set mSwapChainTexture to false to avoid passing a nullptr to
+ // ID3D12SharingContract::Present.
+ mSwapChainTexture = false;
+
+ // Now that the texture has been destroyed. It should release the refptr
+ // of the d3d11on12 resource.
+ mD3D11on12Resource = nullptr;
+}
+
+DXGI_FORMAT Texture::GetD3D12Format() const {
+ return D3D12TextureFormat(GetFormat().format);
+}
+
+ID3D12Resource* Texture::GetD3D12Resource() const {
+ return mResourceAllocation.GetD3D12Resource();
+}
+
+DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
+ ASSERT(GetFormat().aspects & aspect);
+
+ switch (GetFormat().format) {
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ case wgpu::TextureFormat::Stencil8:
+ switch (aspect) {
+ case Aspect::Depth:
+ return DXGI_FORMAT_R32_FLOAT;
+ case Aspect::Stencil:
+ return DXGI_FORMAT_R8_UINT;
+ default:
+ UNREACHABLE();
+ }
+ default:
+ ASSERT(HasOneBit(GetFormat().aspects));
+ return GetD3D12Format();
}
-
- void Texture::ReleaseKeyedMutex() {
- ASSERT(mD3D11on12Resource != nullptr);
- mD3D11on12Resource->ReleaseKeyedMutex();
+}
+
+MaybeError Texture::AcquireKeyedMutex() {
+ ASSERT(mD3D11on12Resource != nullptr);
+ return mD3D11on12Resource->AcquireKeyedMutex();
+}
+
+void Texture::ReleaseKeyedMutex() {
+ ASSERT(mD3D11on12Resource != nullptr);
+ mD3D11on12Resource->ReleaseKeyedMutex();
+}
+
+void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
+}
+
+void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
+ GetAllSubresources());
+}
+
+void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState) {
+ TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
+}
+
+void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
}
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
- }
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
- void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage) {
- TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
- GetAllSubresources());
+ // TODO(enga): Consider adding a Count helper.
+ uint32_t aspectCount = 0;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ aspectCount++;
+ DAWN_UNUSED(aspect);
}
- void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState) {
- TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
- }
-
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range) {
- if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
- }
+ barriers.reserve(range.levelCount * range.layerCount * aspectCount);
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
+ TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
+ if (barriers.size()) {
+ commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
+ }
+}
- // TODO(enga): Consider adding a Count helper.
- uint32_t aspectCount = 0;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- aspectCount++;
- DAWN_UNUSED(aspect);
- }
+void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const SubresourceRange& range,
+ StateAndDecay* state,
+ D3D12_RESOURCE_STATES newState,
+ ExecutionSerial pendingCommandSerial) const {
+ D3D12_RESOURCE_STATES lastState = state->lastState;
- barriers.reserve(range.levelCount * range.layerCount * aspectCount);
+ // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+ // If one of the usages isn't UAV, then other barriers are used.
+ bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+ newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
- if (barriers.size()) {
- commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
- }
+ if (needsUAVBarrier) {
+ D3D12_RESOURCE_BARRIER barrier;
+ barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+ barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier.UAV.pResource = GetD3D12Resource();
+ barriers->push_back(barrier);
+ return;
}
- void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const SubresourceRange& range,
- StateAndDecay* state,
- D3D12_RESOURCE_STATES newState,
- ExecutionSerial pendingCommandSerial) const {
- // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
- // return false.
- if (state->lastState == newState) {
- return;
- }
+ // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
+ // return false.
+ if (lastState == newState) {
+ return;
+ }
- D3D12_RESOURCE_STATES lastState = state->lastState;
-
- // The COMMON state represents a state where no write operations can be pending, and
- // where all pixels are uncompressed. This makes it possible to transition to and
- // from some states without synchronization (i.e. without an explicit
- // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
- // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
- // state when all of the following are true: 1) the texture is accessed on a command
- // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
- // 3) the texture was promoted implicitly to a read-only state and is still in that
- // state.
- // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
- // To track implicit decays, we must record the pending serial on which that
- // transition will occur. When that texture is used again, the previously recorded
- // serial must be compared to the last completed serial to determine if the texture
- // has implicity decayed to the common state.
- if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
- lastState = D3D12_RESOURCE_STATE_COMMON;
- }
+ // The COMMON state represents a state where no write operations can be pending, and
+ // where all pixels are uncompressed. This makes it possible to transition to and
+ // from some states without synchronization (i.e. without an explicit
+ // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
+ // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
+ // state when all of the following are true: 1) the texture is accessed on a command
+ // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
+ // 3) the texture was promoted implicitly to a read-only state and is still in that
+ // state.
+ // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+ // To track implicit decays, we must record the pending serial on which that
+ // transition will occur. When that texture is used again, the previously recorded
+ // serial must be compared to the last completed serial to determine if the texture
+ // has implicity decayed to the common state.
+ if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
+ lastState = D3D12_RESOURCE_STATE_COMMON;
+ }
- // Update the tracked state.
- state->lastState = newState;
-
- // Destination states that qualify for an implicit promotion for a
- // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
- // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
- {
- static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
- D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
-
- if (lastState == D3D12_RESOURCE_STATE_COMMON) {
- if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
- // Implicit texture state decays can only occur when the texture was implicitly
- // transitioned to a read-only state. isValidToDecay is needed to differentiate
- // between resources that were implictly or explicitly transitioned to a
- // read-only state.
- state->isValidToDecay = true;
- state->lastDecaySerial = pendingCommandSerial;
- return;
- } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
- state->isValidToDecay = false;
- return;
- }
+ // Update the tracked state.
+ state->lastState = newState;
+
+ // Destination states that qualify for an implicit promotion for a
+ // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
+ // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
+ {
+ static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
+ D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
+
+ if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+ if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
+ // Implicit texture state decays can only occur when the texture was implicitly
+ // transitioned to a read-only state. isValidToDecay is needed to differentiate
+ // between resources that were implictly or explicitly transitioned to a
+ // read-only state.
+ state->isValidToDecay = true;
+ state->lastDecaySerial = pendingCommandSerial;
+ return;
+ } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
+ state->isValidToDecay = false;
+ return;
}
}
+ }
- D3D12_RESOURCE_BARRIER barrier;
- barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
- barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier.Transition.pResource = GetD3D12Resource();
- barrier.Transition.StateBefore = lastState;
- barrier.Transition.StateAfter = newState;
-
- bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
- range.layerCount == GetArrayLayers() &&
- range.levelCount == GetNumMipLevels() &&
- range.aspects == GetFormat().aspects;
-
- // Use a single transition for all subresources if possible.
- if (isFullRange) {
- barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
- barriers->push_back(barrier);
- } else {
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
- barrier.Transition.Subresource =
- GetSubresourceIndex(range.baseMipLevel + mipLevel,
- range.baseArrayLayer + arrayLayer, aspect);
- barriers->push_back(barrier);
- }
+ D3D12_RESOURCE_BARRIER barrier;
+ barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier.Transition.pResource = GetD3D12Resource();
+ barrier.Transition.StateBefore = lastState;
+ barrier.Transition.StateAfter = newState;
+
+ bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
+ range.layerCount == GetArrayLayers() &&
+ range.levelCount == GetNumMipLevels() &&
+ range.aspects == GetFormat().aspects;
+
+ // Use a single transition for all subresources if possible.
+ if (isFullRange) {
+ barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+ barriers->push_back(barrier);
+ } else {
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+ barrier.Transition.Subresource = GetSubresourceIndex(
+ range.baseMipLevel + mipLevel, range.baseArrayLayer + arrayLayer, aspect);
+ barriers->push_back(barrier);
}
}
}
-
- state->isValidToDecay = false;
}
- void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
- // Textures with keyed mutexes can be written from other graphics queues. Hence, they
- // must be acquired before command list submission to ensure work from the other queues
- // has finished. See Device::ExecuteCommandContext.
- if (mD3D11on12Resource != nullptr) {
- commandContext->AddToSharedTextureList(this);
- }
- }
+ state->isValidToDecay = false;
+}
- void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- TransitionUsageAndGetResourceBarrier(commandContext, barrier,
- D3D12TextureUsage(usage, GetFormat()), range);
+void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
+ // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+ // must be acquired before command list submission to ensure work from the other queues
+ // has finished. See Device::ExecuteCommandContext.
+ if (mD3D11on12Resource != nullptr) {
+ commandContext->AddToSharedTextureList(this);
}
-
- void Texture::TransitionUsageAndGetResourceBarrier(
- CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range) {
- HandleTransitionSpecialCases(commandContext);
-
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
-
- mSubresourceStateAndDecay.Update(
- range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
- TransitionSubresourceRange(barriers, updateRange, state, newState,
- pendingCommandSerial);
- });
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ TransitionUsageAndGetResourceBarrier(commandContext, barrier,
+ D3D12TextureUsage(usage, GetFormat()), range);
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
+ HandleTransitionSpecialCases(commandContext);
+
+ const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+
+ mSubresourceStateAndDecay.Update(range, [&](const SubresourceRange& updateRange,
+ StateAndDecay* state) {
+ TransitionSubresourceRange(barriers, updateRange, state, newState, pendingCommandSerial);
+ });
+}
+
+void Texture::TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const TextureSubresourceUsage& textureUsages) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
}
- void Texture::TrackUsageAndGetResourceBarrierForPass(
- CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const TextureSubresourceUsage& textureUsages) {
- if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
- }
-
- HandleTransitionSpecialCases(commandContext);
+ HandleTransitionSpecialCases(commandContext);
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
+ const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
- mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
- StateAndDecay* state,
- wgpu::TextureUsage usage) {
+ mSubresourceStateAndDecay.Merge(
+ textureUsages,
+ [&](const SubresourceRange& mergeRange, StateAndDecay* state, wgpu::TextureUsage usage) {
// Skip if this subresource is not used during the current pass
if (usage == wgpu::TextureUsage::None) {
return;
@@ -907,484 +904,482 @@ namespace dawn::native::d3d12 {
D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
});
- }
-
- D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format,
- uint32_t mipLevel,
- uint32_t baseSlice,
- uint32_t sliceCount) const {
- D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
- rtvDesc.Format = D3D12TextureFormat(format.format);
- if (IsMultisampledTexture()) {
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- ASSERT(GetNumMipLevels() == 1);
- ASSERT(sliceCount == 1);
- ASSERT(baseSlice == 0);
- ASSERT(mipLevel == 0);
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
- return rtvDesc;
- }
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
- // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
- // them as 1-layer 2D array textures. (Just like how we treat SRVs)
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
- // _rtv
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
- rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
- rtvDesc.Texture2DArray.ArraySize = sliceCount;
- rtvDesc.Texture2DArray.MipSlice = mipLevel;
- rtvDesc.Texture2DArray.PlaneSlice = 0;
- break;
- case wgpu::TextureDimension::e3D:
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
- rtvDesc.Texture3D.MipSlice = mipLevel;
- rtvDesc.Texture3D.FirstWSlice = baseSlice;
- rtvDesc.Texture3D.WSize = sliceCount;
- break;
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- break;
- }
+}
+
+D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format,
+ uint32_t mipLevel,
+ uint32_t baseSlice,
+ uint32_t sliceCount) const {
+ D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
+ rtvDesc.Format = D3D12TextureFormat(format.format);
+ if (IsMultisampledTexture()) {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(GetNumMipLevels() == 1);
+ ASSERT(sliceCount == 1);
+ ASSERT(baseSlice == 0);
+ ASSERT(mipLevel == 0);
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
return rtvDesc;
}
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
+ // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
+ // them as 1-layer 2D array textures. (Just like how we treat SRVs)
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
+ // _rtv
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
+ rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
+ rtvDesc.Texture2DArray.ArraySize = sliceCount;
+ rtvDesc.Texture2DArray.MipSlice = mipLevel;
+ rtvDesc.Texture2DArray.PlaneSlice = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
+ rtvDesc.Texture3D.MipSlice = mipLevel;
+ rtvDesc.Texture3D.FirstWSlice = baseSlice;
+ rtvDesc.Texture3D.WSize = sliceCount;
+ break;
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ break;
+ }
+ return rtvDesc;
+}
+
+D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ Aspect aspects,
+ bool depthReadOnly,
+ bool stencilReadOnly) const {
+ D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
+ dsvDesc.Format = GetD3D12Format();
+ dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
+ if (depthReadOnly && aspects & Aspect::Depth) {
+ dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
+ }
+ if (stencilReadOnly && aspects & Aspect::Stencil) {
+ dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
+ }
- D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- Aspect aspects,
- bool depthReadOnly,
- bool stencilReadOnly) const {
- D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
- dsvDesc.Format = GetD3D12Format();
- dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
- if (depthReadOnly && aspects & Aspect::Depth) {
- dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
- }
- if (stencilReadOnly && aspects & Aspect::Stencil) {
- dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
- }
-
- if (IsMultisampledTexture()) {
- ASSERT(GetNumMipLevels() == 1);
- ASSERT(layerCount == 1);
- ASSERT(baseArrayLayer == 0);
- ASSERT(mipLevel == 0);
- dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
- } else {
- dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
- dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
- dsvDesc.Texture2DArray.ArraySize = layerCount;
- dsvDesc.Texture2DArray.MipSlice = mipLevel;
- }
-
- return dsvDesc;
+ if (IsMultisampledTexture()) {
+ ASSERT(GetNumMipLevels() == 1);
+ ASSERT(layerCount == 1);
+ ASSERT(baseArrayLayer == 0);
+ ASSERT(mipLevel == 0);
+ dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
+ } else {
+ dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
+ dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+ dsvDesc.Texture2DArray.ArraySize = layerCount;
+ dsvDesc.Texture2DArray.MipSlice = mipLevel;
}
- MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ return dsvDesc;
+}
- Device* device = ToBackend(GetDevice());
+MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+ Device* device = ToBackend(GetDevice());
- if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
+ uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- // Iterate the aspects individually to determine which clear flags to use.
- D3D12_CLEAR_FLAGS clearFlags = {};
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- switch (aspect) {
- case Aspect::Depth:
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- break;
- case Aspect::Stencil:
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- break;
- default:
- UNREACHABLE();
- }
- }
+ if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
- if (clearFlags == 0) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ // Iterate the aspects individually to determine which clear flags to use.
+ D3D12_CLEAR_FLAGS clearFlags = {};
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
continue;
}
- CPUDescriptorHeapAllocation dsvHandle;
- DAWN_TRY_ASSIGN(
- dsvHandle,
- device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
- dsvHandle.GetBaseDescriptor();
- D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
- GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
- device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
- baseDescriptor);
-
- commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
- clearColor, 0, nullptr);
+ switch (aspect) {
+ case Aspect::Depth:
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ break;
+ case Aspect::Stencil:
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (clearFlags == 0) {
+ continue;
+ }
+
+ CPUDescriptorHeapAllocation dsvHandle;
+ DAWN_TRY_ASSIGN(
+ dsvHandle,
+ device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvHandle.GetBaseDescriptor();
+ D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
+ GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
+ device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
+ baseDescriptor);
+
+ commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
+ clearColor, 0, nullptr);
+ }
+ }
+ } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
+
+ const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
+
+ ASSERT(range.aspects == Aspect::Color);
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ CPUDescriptorHeapAllocation rtvHeap;
+ DAWN_TRY_ASSIGN(
+ rtvHeap,
+ device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+ const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
+
+ uint32_t baseSlice = layer;
+ uint32_t sliceCount = 1;
+ if (GetDimension() == wgpu::TextureDimension::e3D) {
+ baseSlice = 0;
+ sliceCount = std::max(GetDepth() >> level, 1u);
}
+ D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
+ GetRTVDescriptor(GetFormat(), level, baseSlice, sliceCount);
+ device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
+ rtvHandle);
+ commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
}
- } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
+ }
+ } else {
+ ASSERT(!IsMultisampledTexture());
- const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
+ // create temp buffer with clear color to copy to the texture image
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
+
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+ Extent3D largestMipSize = GetMipLevelSingleSubresourcePhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ kTextureBytesPerRowAlignment);
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
- ASSERT(range.aspects == Aspect::Color);
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
++level) {
+ // compute d3d12 texture copy locations for texture and buffer
+ Extent3D copySize = GetMipLevelSingleSubresourcePhysicalSize(level);
+
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
// Skip lazy clears if already initialized.
continue;
}
- CPUDescriptorHeapAllocation rtvHeap;
- DAWN_TRY_ASSIGN(
- rtvHeap,
- device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
- const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
-
- uint32_t baseSlice = layer;
- uint32_t sliceCount = 1;
- if (GetDimension() == wgpu::TextureDimension::e3D) {
- baseSlice = 0;
- sliceCount = std::max(GetDepth() >> level, 1u);
- }
- D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
- GetRTVDescriptor(GetFormat(), level, baseSlice, sliceCount);
- device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
- rtvHandle);
- commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
+ TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.origin = {0, 0, layer};
+ textureCopy.mipLevel = level;
+ textureCopy.aspect = aspect;
+ RecordBufferTextureCopyWithBufferHandle(
+ BufferTextureCopyDirection::B2T, commandList,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ uploadHandle.startOffset, bytesPerRow, largestMipSize.height, textureCopy,
+ copySize);
}
}
- } else {
- // create temp buffer with clear color to copy to the texture image
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
-
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
- kTextureBytesPerRowAlignment);
- uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- // compute d3d12 texture copy locations for texture and buffer
- Extent3D copySize = GetMipLevelPhysicalSize(level);
-
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- TextureCopy textureCopy;
- textureCopy.texture = this;
- textureCopy.origin = {0, 0, layer};
- textureCopy.mipLevel = level;
- textureCopy.aspect = aspect;
- RecordBufferTextureCopyWithBufferHandle(
- BufferTextureCopyDirection::B2T, commandList,
- ToBackend(uploadHandle.stagingBuffer)->GetResource(),
- uploadHandle.startOffset, bytesPerRow, GetHeight(), textureCopy,
- copySize);
- }
- }
- }
- }
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- GetDevice()->IncrementLazyClearCountForTesting();
}
- return {};
}
-
- void Texture::SetLabelHelper(const char* prefix) {
- SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
- GetLabel());
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ GetDevice()->IncrementLazyClearCountForTesting();
}
-
- void Texture::SetLabelImpl() {
- SetLabelHelper("Dawn_InternalTexture");
+ return {};
+}
+
+void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
+ GetLabel());
+}
+
+void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+}
+
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range) {
+ if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
}
-
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range) {
- if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could contain
- // dirty bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
- }
- }
-
- bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
- return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
- isValidToDecay == other.isValidToDecay;
- }
-
- // static
- Ref<TextureView> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could contain
+ // dirty bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
}
-
- TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : TextureViewBase(texture, descriptor) {
- mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
- mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
-
- UINT planeSlice = 0;
- const Format& textureFormat = texture->GetFormat();
- if (textureFormat.HasDepthOrStencil()) {
- // Configure the SRV descriptor to reinterpret the texture allocated as
- // TYPELESS as a single-plane shader-accessible view.
- switch (textureFormat.format) {
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
- break;
- case wgpu::TextureFormat::Depth16Unorm:
- mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
- break;
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8: {
- Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
- ASSERT(aspects != Aspect::None);
- if (!HasZeroOrOneBits(aspects)) {
- // A single aspect is not selected. The texture view must not be
- // sampled.
- mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
- break;
- }
- switch (aspects) {
- case Aspect::Depth:
- planeSlice = 0;
- mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
- break;
- case Aspect::Stencil:
- planeSlice = 1;
- mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
- // Stencil is accessed using the .g component in the shader.
- // Map it to the zeroth component to match other APIs.
- mSrvDesc.Shader4ComponentMapping =
- D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
- D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
- break;
- default:
- UNREACHABLE();
- break;
- }
+}
+
+bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
+ return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
+ isValidToDecay == other.isValidToDecay;
+}
+
+// static
+Ref<TextureView> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+}
+
+TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : TextureViewBase(texture, descriptor) {
+ mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
+ mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+
+ UINT planeSlice = 0;
+ const Format& textureFormat = texture->GetFormat();
+ if (textureFormat.HasDepthOrStencil()) {
+ // Configure the SRV descriptor to reinterpret the texture allocated as
+ // TYPELESS as a single-plane shader-accessible view.
+ switch (textureFormat.format) {
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
+ break;
+ case wgpu::TextureFormat::Depth16Unorm:
+ mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
+ break;
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8: {
+ Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+ ASSERT(aspects != Aspect::None);
+ if (!HasZeroOrOneBits(aspects)) {
+ // A single aspect is not selected. The texture view must not be
+ // sampled.
+ mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
break;
}
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8: {
- Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
- ASSERT(aspects != Aspect::None);
- if (!HasZeroOrOneBits(aspects)) {
- // A single aspect is not selected. The texture view must not be
- // sampled.
- mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+ switch (aspects) {
+ case Aspect::Depth:
+ planeSlice = 0;
+ mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
+ break;
+ case Aspect::Stencil:
+ planeSlice = 1;
+ mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
+ // Stencil is accessed using the .g component in the shader.
+ // Map it to the zeroth component to match other APIs.
+ mSrvDesc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+ D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+ break;
+ default:
+ UNREACHABLE();
break;
- }
- switch (aspects) {
- case Aspect::Depth:
- planeSlice = 0;
- mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
- break;
- case Aspect::Stencil:
- planeSlice = 1;
- mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
- // Stencil is accessed using the .g component in the shader.
- // Map it to the zeroth component to match other APIs.
- mSrvDesc.Shader4ComponentMapping =
- D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
- D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
- break;
- default:
- UNREACHABLE();
- break;
- }
- break;
}
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Per plane view formats must have the plane slice number be the index of the plane in the
- // array of textures.
- if (texture->GetFormat().IsMultiPlanar()) {
- const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
- planeSlice = GetAspectIndex(planeAspect);
- mSrvDesc.Format =
- D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
- }
-
- // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
- // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
- // array textures.
- // Multisampled textures may only be one array layer, so we use
- // D3D12_SRV_DIMENSION_TEXTURE2DMS.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
- if (GetTexture()->IsMultisampledTexture()) {
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::e2DArray:
- ASSERT(texture->GetArrayLayers() == 1);
- [[fallthrough]];
- case wgpu::TextureViewDimension::e2D:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
- break;
-
- default:
- UNREACHABLE();
+ break;
}
- } else {
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::e1D:
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
- mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
- break;
-
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
- mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
- mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
- mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
- mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
- break;
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- ASSERT(descriptor->arrayLayerCount % 6 == 0);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
- mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
- mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
- mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
- break;
- case wgpu::TextureViewDimension::e3D:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
- mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8: {
+ Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+ ASSERT(aspects != Aspect::None);
+ if (!HasZeroOrOneBits(aspects)) {
+ // A single aspect is not selected. The texture view must not be
+ // sampled.
+ mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
break;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ }
+ switch (aspects) {
+ case Aspect::Depth:
+ planeSlice = 0;
+ mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
+ break;
+ case Aspect::Stencil:
+ planeSlice = 1;
+ mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
+ // Stencil is accessed using the .g component in the shader.
+ // Map it to the zeroth component to match other APIs.
+ mSrvDesc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+ D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
}
- DXGI_FORMAT TextureView::GetD3D12Format() const {
- return D3D12TextureFormat(GetFormat().format);
+ // Per plane view formats must have the plane slice number be the index of the plane in the
+ // array of textures.
+ if (texture->GetFormat().IsMultiPlanar()) {
+ const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
+ planeSlice = GetAspectIndex(planeAspect);
+ mSrvDesc.Format =
+ D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
}
- const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
- ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
- return mSrvDesc;
- }
-
- D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
- return ToBackend(GetTexture())
- ->GetRTVDescriptor(GetFormat(), GetBaseMipLevel(), GetBaseArrayLayer(),
- GetLayerCount());
- }
-
- D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
- bool stencilReadOnly) const {
- ASSERT(GetLevelCount() == 1);
- return ToBackend(GetTexture())
- ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(),
- GetAspects(), depthReadOnly, stencilReadOnly);
- }
-
- D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
- D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
- uavDesc.Format = GetD3D12Format();
+ // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
+ // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
+ // array textures.
+ // Multisampled textures may only be one array layer, so we use
+ // D3D12_SRV_DIMENSION_TEXTURE2DMS.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
+ if (GetTexture()->IsMultisampledTexture()) {
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetArrayLayers() == 1);
+ [[fallthrough]];
+ case wgpu::TextureViewDimension::e2D:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
+ break;
- ASSERT(!GetTexture()->IsMultisampledTexture());
- switch (GetDimension()) {
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (descriptor->dimension) {
case wgpu::TextureViewDimension::e1D:
- uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
- uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
+ mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
break;
+
case wgpu::TextureViewDimension::e2D:
case wgpu::TextureViewDimension::e2DArray:
- uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
- uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
- uavDesc.Texture2DArray.ArraySize = GetLayerCount();
- uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
- uavDesc.Texture2DArray.PlaneSlice = 0;
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
+ mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
+ mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
+ mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
+ mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
break;
- case wgpu::TextureViewDimension::e3D:
- uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
- uavDesc.Texture3D.FirstWSlice = 0;
- uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
- uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
- break;
- // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
- // descriptor for them.
case wgpu::TextureViewDimension::Cube:
case wgpu::TextureViewDimension::CubeArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(descriptor->arrayLayerCount % 6 == 0);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
+ mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
+ mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
+ mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
+ break;
+ case wgpu::TextureViewDimension::e3D:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
+ mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
+ break;
+
case wgpu::TextureViewDimension::Undefined:
UNREACHABLE();
}
- return uavDesc;
}
+}
+
+DXGI_FORMAT TextureView::GetD3D12Format() const {
+ return D3D12TextureFormat(GetFormat().format);
+}
+
+const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
+ ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
+ return mSrvDesc;
+}
+
+D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
+ return ToBackend(GetTexture())
+ ->GetRTVDescriptor(GetFormat(), GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
+}
+
+D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
+ bool stencilReadOnly) const {
+ ASSERT(GetLevelCount() == 1);
+ return ToBackend(GetTexture())
+ ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(), GetAspects(),
+ depthReadOnly, stencilReadOnly);
+}
+
+D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
+ D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
+ uavDesc.Format = GetD3D12Format();
+
+ ASSERT(!GetTexture()->IsMultisampledTexture());
+ switch (GetDimension()) {
+ case wgpu::TextureViewDimension::e1D:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
+ uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+ break;
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
+ uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
+ uavDesc.Texture2DArray.ArraySize = GetLayerCount();
+ uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
+ uavDesc.Texture2DArray.PlaneSlice = 0;
+ break;
+ case wgpu::TextureViewDimension::e3D:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
+ uavDesc.Texture3D.FirstWSlice = 0;
+ uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
+ uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
+ break;
+ // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
+ // descriptor for them.
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ return uavDesc;
+}
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h
index 2a05e97e639..05b80db12b4 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_TEXTURED3D12_H_
#define SRC_DAWN_NATIVE_D3D12_TEXTURED3D12_H_
+#include <vector>
+
#include "dawn/native/Texture.h"
#include "dawn/native/DawnNative.h"
@@ -26,138 +28,135 @@
namespace dawn::native::d3d12 {
- class CommandRecordingContext;
- class Device;
- class D3D11on12ResourceCacheEntry;
-
- DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
- MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
- const TextureDescriptor* descriptor);
- MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
- MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
-
- class Texture final : public TextureBase {
- public:
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> CreateExternalImage(
- Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture,
- bool isInitialized);
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture);
-
- DXGI_FORMAT GetD3D12Format() const;
- ID3D12Resource* GetD3D12Resource() const;
- DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
-
- D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format,
- uint32_t mipLevel,
- uint32_t baseSlice,
- uint32_t sliceCount) const;
- D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- Aspect aspects,
- bool depthReadOnly,
- bool stencilReadOnly) const;
-
- void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range);
-
- MaybeError AcquireKeyedMutex();
- void ReleaseKeyedMutex();
-
- void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- const TextureSubresourceUsage& textureUsages);
- void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range);
- void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage);
- void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState);
-
- private:
- Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
- ~Texture() override;
- using TextureBase::TextureBase;
-
- MaybeError InitializeAsInternalTexture();
- MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- bool isSwapChainTexture);
- MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
-
- void SetLabelHelper(const char* prefix);
-
- // Dawn API
- void SetLabelImpl() override;
- void DestroyImpl() override;
-
- MaybeError ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue);
-
- // Barriers implementation details.
- struct StateAndDecay {
- D3D12_RESOURCE_STATES lastState;
- ExecutionSerial lastDecaySerial;
- bool isValidToDecay;
-
- bool operator==(const StateAndDecay& other) const;
- };
- void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range);
- void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const SubresourceRange& range,
- StateAndDecay* state,
- D3D12_RESOURCE_STATES subresourceNewState,
- ExecutionSerial pendingCommandSerial) const;
- void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
-
- SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
-
- ResourceHeapAllocation mResourceAllocation;
- bool mSwapChainTexture = false;
- D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
-
- Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- static Ref<TextureView> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- DXGI_FORMAT GetD3D12Format() const;
-
- const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
- D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
- D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly,
- bool stencilReadOnly) const;
- D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
-
- private:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
+class CommandRecordingContext;
+class Device;
+class D3D11on12ResourceCacheEntry;
+
+DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
+MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+ const TextureDescriptor* descriptor);
+MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
+MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
+
+class Texture final : public TextureBase {
+ public:
+ static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
+ static ResultOrError<Ref<Texture>> CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized);
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture);
+
+ DXGI_FORMAT GetD3D12Format() const;
+ ID3D12Resource* GetD3D12Resource() const;
+ DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
+
+ D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format,
+ uint32_t mipLevel,
+ uint32_t baseSlice,
+ uint32_t sliceCount) const;
+ D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ Aspect aspects,
+ bool depthReadOnly,
+ bool stencilReadOnly) const;
+
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range);
+
+ MaybeError AcquireKeyedMutex();
+ void ReleaseKeyedMutex();
+
+ void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ const TextureSubresourceUsage& textureUsages);
+ void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState);
+
+ private:
+ Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+ ~Texture() override;
+ using TextureBase::TextureBase;
+
+ MaybeError InitializeAsInternalTexture();
+ MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture);
+ MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
+
+ void SetLabelHelper(const char* prefix);
+
+ // Dawn API
+ void SetLabelImpl() override;
+ void DestroyImpl() override;
+
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue);
+
+ // Barriers implementation details.
+ struct StateAndDecay {
+ D3D12_RESOURCE_STATES lastState;
+ ExecutionSerial lastDecaySerial;
+ bool isValidToDecay;
+
+ bool operator==(const StateAndDecay& other) const;
};
+ void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
+ void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const SubresourceRange& range,
+ StateAndDecay* state,
+ D3D12_RESOURCE_STATES subresourceNewState,
+ ExecutionSerial pendingCommandSerial) const;
+ void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
+
+ SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
+
+ ResourceHeapAllocation mResourceAllocation;
+ bool mSwapChainTexture = false;
+ D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
+
+ Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
+};
+
+class TextureView final : public TextureViewBase {
+ public:
+ static Ref<TextureView> Create(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ DXGI_FORMAT GetD3D12Format() const;
+
+ const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
+ D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
+ D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly, bool stencilReadOnly) const;
+ D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
+
+ private:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
+};
} // namespace dawn::native::d3d12
#endif // SRC_DAWN_NATIVE_D3D12_TEXTURED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp
index 8d4749fd95a..0e761f8f1cd 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp
@@ -14,295 +14,380 @@
#include "dawn/native/d3d12/UtilsD3D12.h"
+#include <stringapiset.h>
+
+#include <utility>
+
#include "dawn/common/Assert.h"
+#include "dawn/native/CommandValidation.h"
#include "dawn/native/Format.h"
#include "dawn/native/d3d12/BufferD3D12.h"
#include "dawn/native/d3d12/CommandRecordingContext.h"
#include "dawn/native/d3d12/D3D12Error.h"
#include "dawn/native/d3d12/DeviceD3D12.h"
-#include <stringapiset.h>
-
namespace dawn::native::d3d12 {
- ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
- size_t len = strlen(str);
- if (len == 0) {
- return std::wstring();
- }
- int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
- if (numChars == 0) {
- return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
- }
- std::wstring result;
- result.resize(numChars);
- int numConvertedChars =
- MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
- if (numConvertedChars != numChars) {
- return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
- }
- return std::move(result);
+namespace {
+
+uint64_t RequiredCopySizeByD3D12(const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const Extent3D& copySize,
+ const TexelBlockInfo& blockInfo) {
+ uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+
+ // Required copy size for B2T/T2B copy on D3D12 is smaller than (but very close to)
+ // depth * bytesPerImage. The latter is already checked at ComputeRequiredBytesInCopy()
+ // in CommandValidation.cpp.
+ uint64_t requiredCopySizeByD3D12 = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+
+ // When calculating the required copy size for B2T/T2B copy, D3D12 doesn't respect
+ // rowsPerImage paddings on the last image for 3D texture, but it does respect
+ // bytesPerRow paddings on the last row.
+ ASSERT(blockInfo.width == 1);
+ ASSERT(blockInfo.height == 1);
+ uint64_t lastRowBytes = Safe32x32(blockInfo.byteSize, copySize.width);
+ ASSERT(rowsPerImage > copySize.height);
+ uint64_t lastImageBytesByD3D12 = Safe32x32(bytesPerRow, rowsPerImage - 1) + lastRowBytes;
+
+ requiredCopySizeByD3D12 += lastImageBytesByD3D12;
+ return requiredCopySizeByD3D12;
+}
+
+// This function is used to access whether we need a workaround for D3D12's wrong algorithm
+// of calculating required buffer size for B2T/T2B copy. The workaround is needed only when
+// - The corresponding toggle is enabled.
+// - It is a 3D texture (so the format is uncompressed).
+// - There are multiple depth images to be copied (copySize.depthOrArrayLayers > 1).
+// - It has rowsPerImage paddings (rowsPerImage > copySize.height).
+// - The buffer size doesn't meet D3D12's requirement.
+bool NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ TextureBase* texture = textureCopy.texture.Get();
+ Device* device = ToBackend(texture->GetDevice());
+
+ if (!device->IsToggleEnabled(Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings) ||
+ texture->GetDimension() != wgpu::TextureDimension::e3D ||
+ copySize.depthOrArrayLayers <= 1 || bufferCopy.rowsPerImage <= copySize.height) {
+ return false;
}
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
- switch (func) {
- case wgpu::CompareFunction::Never:
- return D3D12_COMPARISON_FUNC_NEVER;
- case wgpu::CompareFunction::Less:
- return D3D12_COMPARISON_FUNC_LESS;
- case wgpu::CompareFunction::LessEqual:
- return D3D12_COMPARISON_FUNC_LESS_EQUAL;
- case wgpu::CompareFunction::Greater:
- return D3D12_COMPARISON_FUNC_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
- case wgpu::CompareFunction::Equal:
- return D3D12_COMPARISON_FUNC_EQUAL;
- case wgpu::CompareFunction::NotEqual:
- return D3D12_COMPARISON_FUNC_NOT_EQUAL;
- case wgpu::CompareFunction::Always:
- return D3D12_COMPARISON_FUNC_ALWAYS;
-
- case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
- }
- }
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+ uint64_t requiredCopySizeByD3D12 = RequiredCopySizeByD3D12(
+ bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, copySize, blockInfo);
+ return bufferCopy.buffer->GetAllocatedSize() - bufferCopy.offset < requiredCopySizeByD3D12;
+}
- D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
- uint32_t level,
- uint32_t layer,
- Aspect aspect) {
- D3D12_TEXTURE_COPY_LOCATION copyLocation;
- copyLocation.pResource = texture->GetD3D12Resource();
- copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
- copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+} // anonymous namespace
- return copyLocation;
+ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
+ size_t len = strlen(str);
+ if (len == 0) {
+ return std::wstring();
}
-
- D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
- const Texture* texture,
- ID3D12Resource* bufferResource,
- const Extent3D& bufferSize,
- const uint64_t offset,
- const uint32_t rowPitch,
- Aspect aspect) {
- D3D12_TEXTURE_COPY_LOCATION bufferLocation;
- bufferLocation.pResource = bufferResource;
- bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
- bufferLocation.PlacedFootprint.Offset = offset;
- bufferLocation.PlacedFootprint.Footprint.Format =
- texture->GetD3D12CopyableSubresourceFormat(aspect);
- bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
- bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
- bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
- bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
- return bufferLocation;
+ int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
+ if (numChars == 0) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
}
-
- D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
- D3D12_BOX sourceRegion;
- sourceRegion.left = offset.x;
- sourceRegion.top = offset.y;
- sourceRegion.front = offset.z;
- sourceRegion.right = offset.x + copySize.width;
- sourceRegion.bottom = offset.y + copySize.height;
- sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
- return sourceRegion;
+ std::wstring result;
+ result.resize(numChars);
+ int numConvertedChars =
+ MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
+ if (numConvertedChars != numChars) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
}
-
- bool IsTypeless(DXGI_FORMAT format) {
- // List generated from <dxgiformat.h>
- switch (format) {
- case DXGI_FORMAT_R32G32B32A32_TYPELESS:
- case DXGI_FORMAT_R32G32B32_TYPELESS:
- case DXGI_FORMAT_R16G16B16A16_TYPELESS:
- case DXGI_FORMAT_R32G32_TYPELESS:
- case DXGI_FORMAT_R32G8X24_TYPELESS:
- case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
- case DXGI_FORMAT_R10G10B10A2_TYPELESS:
- case DXGI_FORMAT_R8G8B8A8_TYPELESS:
- case DXGI_FORMAT_R16G16_TYPELESS:
- case DXGI_FORMAT_R32_TYPELESS:
- case DXGI_FORMAT_R24G8_TYPELESS:
- case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
- case DXGI_FORMAT_R8G8_TYPELESS:
- case DXGI_FORMAT_R16_TYPELESS:
- case DXGI_FORMAT_R8_TYPELESS:
- case DXGI_FORMAT_BC1_TYPELESS:
- case DXGI_FORMAT_BC2_TYPELESS:
- case DXGI_FORMAT_BC3_TYPELESS:
- case DXGI_FORMAT_BC4_TYPELESS:
- case DXGI_FORMAT_BC5_TYPELESS:
- case DXGI_FORMAT_B8G8R8A8_TYPELESS:
- case DXGI_FORMAT_B8G8R8X8_TYPELESS:
- case DXGI_FORMAT_BC6H_TYPELESS:
- case DXGI_FORMAT_BC7_TYPELESS:
- return true;
- default:
- return false;
- }
+ return std::move(result);
+}
+
+D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
+ switch (func) {
+ case wgpu::CompareFunction::Never:
+ return D3D12_COMPARISON_FUNC_NEVER;
+ case wgpu::CompareFunction::Less:
+ return D3D12_COMPARISON_FUNC_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return D3D12_COMPARISON_FUNC_LESS_EQUAL;
+ case wgpu::CompareFunction::Greater:
+ return D3D12_COMPARISON_FUNC_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
+ case wgpu::CompareFunction::Equal:
+ return D3D12_COMPARISON_FUNC_EQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return D3D12_COMPARISON_FUNC_NOT_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return D3D12_COMPARISON_FUNC_ALWAYS;
+
+ case wgpu::CompareFunction::Undefined:
+ UNREACHABLE();
}
-
- void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- const TextureCopySubresource& baseCopySplit,
- ID3D12Resource* bufferResource,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- TextureBase* textureBase,
- uint32_t textureMiplevel,
- uint32_t textureLayer,
- Aspect aspect) {
- Texture* texture = ToBackend(textureBase);
- const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
-
- for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
-
- // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in TextureCopySubresource::CopyInfo.
- const uint64_t offsetBytes = info.alignedOffset + baseOffset;
- const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
- offsetBytes, bufferBytesPerRow, aspect);
- if (direction == BufferTextureCopyDirection::B2T) {
- const D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
- info.textureOffset.y, info.textureOffset.z,
- &bufferLocation, &sourceRegion);
- } else {
- ASSERT(direction == BufferTextureCopyDirection::T2B);
- const D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
-
- commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
- info.bufferOffset.y, info.bufferOffset.z,
- &textureLocation, &sourceRegion);
- }
- }
+}
+
+D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+ uint32_t level,
+ uint32_t layer,
+ Aspect aspect) {
+ D3D12_TEXTURE_COPY_LOCATION copyLocation;
+ copyLocation.pResource = texture->GetD3D12Resource();
+ copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
+ copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+
+ return copyLocation;
+}
+
+D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+ const Texture* texture,
+ ID3D12Resource* bufferResource,
+ const Extent3D& bufferSize,
+ const uint64_t offset,
+ const uint32_t rowPitch,
+ Aspect aspect) {
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation;
+ bufferLocation.pResource = bufferResource;
+ bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
+ bufferLocation.PlacedFootprint.Offset = offset;
+ bufferLocation.PlacedFootprint.Footprint.Format =
+ texture->GetD3D12CopyableSubresourceFormat(aspect);
+ bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
+ bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
+ bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
+ bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
+ return bufferLocation;
+}
+
+D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
+ D3D12_BOX sourceRegion;
+ sourceRegion.left = offset.x;
+ sourceRegion.top = offset.y;
+ sourceRegion.front = offset.z;
+ sourceRegion.right = offset.x + copySize.width;
+ sourceRegion.bottom = offset.y + copySize.height;
+ sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
+ return sourceRegion;
+}
+
+bool IsTypeless(DXGI_FORMAT format) {
+ // List generated from <dxgiformat.h>
+ switch (format) {
+ case DXGI_FORMAT_R32G32B32A32_TYPELESS:
+ case DXGI_FORMAT_R32G32B32_TYPELESS:
+ case DXGI_FORMAT_R16G16B16A16_TYPELESS:
+ case DXGI_FORMAT_R32G32_TYPELESS:
+ case DXGI_FORMAT_R32G8X24_TYPELESS:
+ case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+ case DXGI_FORMAT_R10G10B10A2_TYPELESS:
+ case DXGI_FORMAT_R8G8B8A8_TYPELESS:
+ case DXGI_FORMAT_R16G16_TYPELESS:
+ case DXGI_FORMAT_R32_TYPELESS:
+ case DXGI_FORMAT_R24G8_TYPELESS:
+ case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
+ case DXGI_FORMAT_R8G8_TYPELESS:
+ case DXGI_FORMAT_R16_TYPELESS:
+ case DXGI_FORMAT_R8_TYPELESS:
+ case DXGI_FORMAT_BC1_TYPELESS:
+ case DXGI_FORMAT_BC2_TYPELESS:
+ case DXGI_FORMAT_BC3_TYPELESS:
+ case DXGI_FORMAT_BC4_TYPELESS:
+ case DXGI_FORMAT_BC5_TYPELESS:
+ case DXGI_FORMAT_B8G8R8A8_TYPELESS:
+ case DXGI_FORMAT_B8G8R8X8_TYPELESS:
+ case DXGI_FORMAT_BC6H_TYPELESS:
+ case DXGI_FORMAT_BC7_TYPELESS:
+ return true;
+ default:
+ return false;
}
-
- void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const TextureCopy& textureCopy,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copySize) {
- // See comments in Compute2DTextureCopySplits() for more details.
- const TextureCopySplits copySplits = Compute2DTextureCopySplits(
- textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
- const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
-
- // copySplits.copySubresources[1] is always calculated for the second copy layer with
- // extra "bytesPerLayer" copy offset compared with the first copy layer. So
- // here we use an array bufferOffsetsForNextLayer to record the extra offsets
- // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
- // the next copy layer that uses copySplits.copySubresources[0], and
- // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
- // that uses copySplits.copySubresources[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
- bufferOffsetsForNextLayer = {{0u, 0u}};
-
- for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
- const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
-
- const TextureCopySubresource& copySplitPerLayerBase =
- copySplits.copySubresources[splitIndex];
- const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
- const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
-
- RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
- bufferResource, bufferOffsetForNextLayer, bytesPerRow,
- textureCopy.texture.Get(), textureCopy.mipLevel,
- copyTextureLayer, textureCopy.aspect);
-
- bufferOffsetsForNextLayer[splitIndex] +=
- bytesPerLayer * copySplits.copySubresources.size();
+}
+
+void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const TextureCopySubresource& baseCopySplit,
+ ID3D12Resource* bufferResource,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ TextureBase* textureBase,
+ uint32_t textureMiplevel,
+ uint32_t textureLayer,
+ Aspect aspect) {
+ Texture* texture = ToBackend(textureBase);
+ const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
+
+ for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+ const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
+
+ // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+ // members in TextureCopySubresource::CopyInfo.
+ const uint64_t offsetBytes = info.alignedOffset + baseOffset;
+ const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
+ offsetBytes, bufferBytesPerRow, aspect);
+ if (direction == BufferTextureCopyDirection::B2T) {
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+ info.textureOffset.y, info.textureOffset.z,
+ &bufferLocation, &sourceRegion);
+ } else {
+ ASSERT(direction == BufferTextureCopyDirection::T2B);
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+ info.bufferOffset.y, info.bufferOffset.z,
+ &textureLocation, &sourceRegion);
}
}
+}
+
+void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize) {
+ // See comments in Compute2DTextureCopySplits() for more details.
+ const TextureCopySplits copySplits = Compute2DTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+ const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+ // copySplits.copySubresources[1] is always calculated for the second copy layer with
+ // extra "bytesPerLayer" copy offset compared with the first copy layer. So
+ // here we use an array bufferOffsetsForNextLayer to record the extra offsets
+ // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
+ // the next copy layer that uses copySplits.copySubresources[0], and
+ // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
+ // that uses copySplits.copySubresources[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources> bufferOffsetsForNextLayer =
+ {{0u, 0u}};
+
+ for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+ const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
+
+ const TextureCopySubresource& copySplitPerLayerBase =
+ copySplits.copySubresources[splitIndex];
+ const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
+ const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
+
+ RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
+ bufferResource, bufferOffsetForNextLayer, bytesPerRow,
+ textureCopy.texture.Get(), textureCopy.mipLevel,
+ copyTextureLayer, textureCopy.aspect);
+
+ bufferOffsetsForNextLayer[splitIndex] += bytesPerLayer * copySplits.copySubresources.size();
+ }
+}
+
+void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ ASSERT(HasOneBit(textureCopy.aspect));
+
+ TextureBase* texture = textureCopy.texture.Get();
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
+ // at least while 1D textures can only have a single array layer.
+ ASSERT(texture->GetArrayLayers() == 1);
+
+ TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
+ RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions, bufferResource,
+ 0, bytesPerRow, texture, textureCopy.mipLevel, 0,
+ textureCopy.aspect);
+ break;
+ }
- void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- ASSERT(HasOneBit(textureCopy.aspect));
-
- TextureBase* texture = textureCopy.texture.Get();
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D: {
- // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
- // at least while 1D textures can only have a single array layer.
- ASSERT(texture->GetArrayLayers() == 1);
-
- TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
- textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
- RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
- bufferResource, 0, bytesPerRow, texture,
- textureCopy.mipLevel, 0, textureCopy.aspect);
- break;
- }
-
- // Record the CopyTextureRegion commands for 2D textures, with special handling of array
- // layers since each require their own set of copies.
- case wgpu::TextureDimension::e2D:
- Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
- bytesPerRow, rowsPerImage, textureCopy,
- blockInfo, copySize);
- break;
-
- case wgpu::TextureDimension::e3D: {
- // See comments in Compute3DTextureCopySplits() for more details.
- TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
- textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
- RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
- bufferResource, 0, bytesPerRow, texture,
- textureCopy.mipLevel, 0, textureCopy.aspect);
- break;
- }
+ // Record the CopyTextureRegion commands for 2D textures, with special handling of array
+ // layers since each require their own set of copies.
+ case wgpu::TextureDimension::e2D:
+ Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
+ bytesPerRow, rowsPerImage, textureCopy, blockInfo,
+ copySize);
+ break;
+
+ case wgpu::TextureDimension::e3D: {
+ // See comments in Compute3DTextureCopySplits() for more details.
+ TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+ RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions, bufferResource,
+ 0, bytesPerRow, texture, textureCopy.mipLevel, 0,
+ textureCopy.aspect);
+ break;
}
}
-
- void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- RecordBufferTextureCopyWithBufferHandle(direction, commandList,
- ToBackend(bufferCopy.buffer)->GetD3D12Resource(),
- bufferCopy.offset, bufferCopy.bytesPerRow,
- bufferCopy.rowsPerImage, textureCopy, copySize);
+}
+
+void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ ID3D12Resource* bufferResource = ToBackend(bufferCopy.buffer)->GetD3D12Resource();
+
+ if (NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(bufferCopy, textureCopy, copySize)) {
+ // Split the copy into two copies if the size of bufferCopy.buffer doesn't meet D3D12's
+ // requirement and a workaround is needed:
+ // - The first copy will copy all depth images but the last depth image,
+ // - The second copy will copy the last depth image.
+ Extent3D extentForAllButTheLastImage = copySize;
+ extentForAllButTheLastImage.depthOrArrayLayers -= 1;
+ RecordBufferTextureCopyWithBufferHandle(
+ direction, commandList, bufferResource, bufferCopy.offset, bufferCopy.bytesPerRow,
+ bufferCopy.rowsPerImage, textureCopy, extentForAllButTheLastImage);
+
+ Extent3D extentForTheLastImage = copySize;
+ extentForTheLastImage.depthOrArrayLayers = 1;
+
+ TextureCopy textureCopyForTheLastImage = textureCopy;
+ textureCopyForTheLastImage.origin.z += copySize.depthOrArrayLayers - 1;
+
+ uint64_t copiedBytes =
+ bufferCopy.bytesPerRow * bufferCopy.rowsPerImage * (copySize.depthOrArrayLayers - 1);
+ RecordBufferTextureCopyWithBufferHandle(direction, commandList, bufferResource,
+ bufferCopy.offset + copiedBytes,
+ bufferCopy.bytesPerRow, bufferCopy.rowsPerImage,
+ textureCopyForTheLastImage, extentForTheLastImage);
+ return;
}
- void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
- if (!object) {
- return;
- }
+ RecordBufferTextureCopyWithBufferHandle(direction, commandList, bufferResource,
+ bufferCopy.offset, bufferCopy.bytesPerRow,
+ bufferCopy.rowsPerImage, textureCopy, copySize);
+}
- if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
- object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
- return;
- }
+void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
+ if (!object) {
+ return;
+ }
- std::string objectName = prefix;
- objectName += "_";
- objectName += label;
- object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+ if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
+ return;
}
+ std::string objectName = prefix;
+ objectName += "_";
+ objectName += label;
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+}
+
+uint64_t MakeDXCVersion(uint64_t majorVersion, uint64_t minorVersion) {
+ return (majorVersion << 32) + minorVersion;
+}
+
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h
index 912d1b7f342..1418f545251 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_D3D12_UTILSD3D12_H_
#define SRC_DAWN_NATIVE_D3D12_UTILSD3D12_H_
+#include <string>
+
#include "dawn/native/Commands.h"
#include "dawn/native/d3d12/BufferD3D12.h"
#include "dawn/native/d3d12/TextureCopySplitter.h"
@@ -24,50 +26,49 @@
namespace dawn::native::d3d12 {
- ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
-
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
-
- D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
- uint32_t level,
- uint32_t layer,
- Aspect aspect);
-
- D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
- const Texture* texture,
- ID3D12Resource* bufferResource,
- const Extent3D& bufferSize,
- const uint64_t offset,
- const uint32_t rowPitch,
- Aspect aspect);
- D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
-
- bool IsTypeless(DXGI_FORMAT format);
-
- enum class BufferTextureCopyDirection {
- B2T,
- T2B,
- };
-
- void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
-
- void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
- ID3D12GraphicsCommandList* commandList,
- const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
-
- void SetDebugName(Device* device,
- ID3D12Object* object,
- const char* prefix,
- std::string label = "");
+ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
+
+D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
+
+D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+ uint32_t level,
+ uint32_t layer,
+ Aspect aspect);
+
+D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+ const Texture* texture,
+ ID3D12Resource* bufferResource,
+ const Extent3D& bufferSize,
+ const uint64_t offset,
+ const uint32_t rowPitch,
+ Aspect aspect);
+D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
+
+bool IsTypeless(DXGI_FORMAT format);
+
+enum class BufferTextureCopyDirection {
+ B2T,
+ T2B,
+};
+
+void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label = "");
+
+uint64_t MakeDXCVersion(uint64_t majorVersion, uint64_t minorVersion);
} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h b/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h
index 0622576b9be..59542bf883f 100644
--- a/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h
@@ -18,19 +18,20 @@
// Pre-emptively include windows.h but remove its macros so that they aren't set when declaring the
// COM interfaces. Otherwise ID3D12InfoQueue::GetMessage would be either GetMessageA or GetMessageW
// which causes compilation errors.
+// NOLINTNEXTLINE(build/include_order)
#include "dawn/common/windows_with_undefs.h"
-#include <d3d11_2.h>
-#include <d3d11on12.h>
-#include <d3d12.h>
-#include <dxcapi.h>
-#include <dxgi1_4.h>
-#include <wrl.h>
+#include <d3d11_2.h> // NOLINT(build/include_order)
+#include <d3d11on12.h> // NOLINT(build/include_order)
+#include <d3d12.h> // NOLINT(build/include_order)
+#include <dxcapi.h> // NOLINT(build/include_order)
+#include <dxgi1_4.h> // NOLINT(build/include_order)
+#include <wrl.h> // NOLINT(build/include_order)
// DXProgrammableCapture.h takes a dependency on other platform header
// files, so it must be defined after them.
-#include <DXProgrammableCapture.h>
-#include <dxgidebug.h>
+#include <DXProgrammableCapture.h> // NOLINT(build/include_order)
+#include <dxgidebug.h> // NOLINT(build/include_order)
using Microsoft::WRL::ComPtr;
diff --git a/chromium/third_party/dawn/src/dawn/native/dawn_platform.h b/chromium/third_party/dawn/src/dawn/native/dawn_platform.h
index 3be467bd340..23388b500fc 100644
--- a/chromium/third_party/dawn/src/dawn/native/dawn_platform.h
+++ b/chromium/third_party/dawn/src/dawn/native/dawn_platform.h
@@ -16,47 +16,50 @@
#define SRC_DAWN_NATIVE_DAWN_PLATFORM_H_
// Use webgpu_cpp to have the enum and bitfield definitions
-#include <dawn/webgpu_cpp.h>
+#include "dawn/webgpu_cpp.h"
-#include <dawn/native/dawn_platform_autogen.h>
+#include "dawn/native/dawn_platform_autogen.h"
namespace dawn::native {
- // kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
- // if the enums are contiguous, making it suitable for iteration.
- // It is defined in dawn_platform_autogen.h
- template <typename T>
- constexpr uint32_t kEnumCount = EnumCount<T>::value;
-
- // Extra buffer usages
- // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
- // usage as storage buffer in the internal pipeline.
- static constexpr wgpu::BufferUsage kInternalStorageBuffer =
- static_cast<wgpu::BufferUsage>(0x40000000);
-
- // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
- static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
- static_cast<wgpu::BufferUsage>(0x80000000);
-
- // Extra texture usages
- // Add an extra texture usage (readonly render attachment usage) for render pass resource
- // tracking
- static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
- static_cast<wgpu::TextureUsage>(0x40000000);
-
- // Internal usage to help tracking when a subresource is used as render attachment usage
- // more than once in a render pass.
- static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
- static_cast<wgpu::TextureUsage>(0x80000001);
-
- // Add an extra texture usage for textures that will be presented, for use in backends
- // that needs to transition to present usage.
- // This currently aliases wgpu::TextureUsage::Present, we would assign it
- // some bit when wgpu::TextureUsage::Present is removed.
- static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
-
- static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
- static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
+// kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
+// if the enums are contiguous, making it suitable for iteration.
+// It is defined in dawn_platform_autogen.h
+template <typename T>
+constexpr uint32_t kEnumCount = EnumCount<T>::value;
+
+// Extra buffer usages
+// Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
+// usage as storage buffer in the internal pipeline.
+static constexpr wgpu::BufferUsage kInternalStorageBuffer =
+ static_cast<wgpu::BufferUsage>(0x40000000);
+
+// Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
+static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
+ static_cast<wgpu::BufferUsage>(0x80000000);
+
+static constexpr wgpu::BufferUsage kAllInternalBufferUsages =
+ kInternalStorageBuffer | kReadOnlyStorageBuffer;
+
+// Extra texture usages
+// Add an extra texture usage (readonly render attachment usage) for render pass resource
+// tracking
+static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
+ static_cast<wgpu::TextureUsage>(0x40000000);
+
+// Internal usage to help tracking when a subresource is used as render attachment usage
+// more than once in a render pass.
+static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
+ static_cast<wgpu::TextureUsage>(0x80000001);
+
+// Add an extra texture usage for textures that will be presented, for use in backends
+// that needs to transition to present usage.
+// This currently aliases wgpu::TextureUsage::Present, we would assign it
+// some bit when wgpu::TextureUsage::Present is removed.
+static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
+
+static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
+ static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
} // namespace dawn::native
#endif // SRC_DAWN_NATIVE_DAWN_PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h
index 88608da4478..75be73040e7 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h
@@ -15,18 +15,21 @@
#ifndef SRC_DAWN_NATIVE_METAL_BACKENDMTL_H_
#define SRC_DAWN_NATIVE_METAL_BACKENDMTL_H_
+#include <vector>
+
#include "dawn/native/BackendConnection.h"
namespace dawn::native::metal {
- class Backend : public BackendConnection {
- public:
- explicit Backend(InstanceBase* instance);
+class Backend : public BackendConnection {
+ public:
+ explicit Backend(InstanceBase* instance);
+ ~Backend() override;
- std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
- };
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm
index 6f4751f08c0..4d5d8543548 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm
@@ -25,480 +25,475 @@
#include "dawn/native/metal/BufferMTL.h"
#include "dawn/native/metal/DeviceMTL.h"
-#if defined(DAWN_PLATFORM_MACOS)
-# import <IOKit/IOKitLib.h>
-# include "dawn/common/IOKitRef.h"
+#if DAWN_PLATFORM_IS(MACOS)
+#import <IOKit/IOKitLib.h>
+#include "dawn/common/IOKitRef.h"
#endif
#include <vector>
namespace dawn::native::metal {
- namespace {
-
- struct PCIIDs {
- uint32_t vendorId;
- uint32_t deviceId;
- };
-
- struct Vendor {
- const char* trademark;
- uint32_t vendorId;
- };
-
-#if defined(DAWN_PLATFORM_MACOS)
- const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
- {"Radeon", gpu_info::kVendorID_AMD},
- {"Intel", gpu_info::kVendorID_Intel},
- {"Geforce", gpu_info::kVendorID_Nvidia},
- {"Quadro", gpu_info::kVendorID_Nvidia}};
-
- // Find vendor ID from MTLDevice name.
- MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
- uint32_t vendorId = 0;
- const char* deviceName = [device.name UTF8String];
- for (const auto& it : kVendors) {
- if (strstr(deviceName, it.trademark) != nullptr) {
- vendorId = it.vendorId;
- break;
- }
- }
-
- if (vendorId == 0) {
- return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
- }
-
- // Set vendor id with 0
- *ids = PCIIDs{vendorId, 0};
- return {};
+namespace {
+
+struct PCIIDs {
+ uint32_t vendorId;
+ uint32_t deviceId;
+};
+
+struct Vendor {
+ const char* trademark;
+ uint32_t vendorId;
+};
+
+#if DAWN_PLATFORM_IS(MACOS)
+const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
+ {"Radeon", gpu_info::kVendorID_AMD},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"Geforce", gpu_info::kVendorID_Nvidia},
+ {"Quadro", gpu_info::kVendorID_Nvidia}};
+
+// Find vendor ID from MTLDevice name.
+MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
+ uint32_t vendorId = 0;
+ const char* deviceName = [device.name UTF8String];
+ for (const auto& it : kVendors) {
+ if (strstr(deviceName, it.trademark) != nullptr) {
+ vendorId = it.vendorId;
+ break;
}
+ }
- // Extracts an integer property from a registry entry.
- uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
- uint32_t value = 0;
+ if (vendorId == 0) {
+ return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
+ }
- // Recursively search registry entry and its parents for property name
- // The data should release with CFRelease
- CFRef<CFDataRef> data =
- AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
- entry, kIOServicePlane, name, kCFAllocatorDefault,
- kIORegistryIterateRecursively | kIORegistryIterateParents)));
+ // Set vendor id with 0
+ *ids = PCIIDs{vendorId, 0};
+ return {};
+}
- if (data == nullptr) {
- return value;
- }
+// Extracts an integer property from a registry entry.
+uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
+ uint32_t value = 0;
- // CFDataGetBytePtr() is guaranteed to return a read-only pointer
- value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
- return value;
- }
+ // Recursively search registry entry and its parents for property name
+ // The data should release with CFRelease
+ CFRef<CFDataRef> data = AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
+ entry, kIOServicePlane, name, kCFAllocatorDefault,
+ kIORegistryIterateRecursively | kIORegistryIterateParents)));
- // Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
- // The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
- // because it corresponds to a driver. However its parent entry corresponds to the device
- // itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
- // MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
- //
- // - PCI0@0
- // | - AppleACPIPCI
- // | | - IGPU@2 (type IOPCIDevice)
- // | | | - IntelAccelerator (type IOGraphicsAccelerator2)
- // | | - PEG0@1
- // | | | - IOPP
- // | | | | - GFX0@0 (type IOPCIDevice)
- // | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
- //
- // [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
- // their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
- MaybeError API_AVAILABLE(macos(10.13))
- GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- // Get a matching dictionary for the IOGraphicsAccelerator2
- CFRef<CFMutableDictionaryRef> matchingDict =
- AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
- if (matchingDict == nullptr) {
- return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
- }
+ if (data == nullptr) {
+ return value;
+ }
- // IOServiceGetMatchingService will consume the reference on the matching dictionary,
- // so we don't need to release the dictionary.
- IORef<io_registry_entry_t> acceleratorEntry = AcquireIORef(
- IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
- if (acceleratorEntry == IO_OBJECT_NULL) {
- return DAWN_INTERNAL_ERROR(
- "Failed to get the IO registry entry for the accelerator");
- }
+ // CFDataGetBytePtr() is guaranteed to return a read-only pointer
+ value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
+ return value;
+}
- // Get the parent entry that will be the IOPCIDevice
- IORef<io_registry_entry_t> deviceEntry;
- if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
- deviceEntry.InitializeInto()) != kIOReturnSuccess) {
- return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
- }
+// Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
+// The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
+// because it corresponds to a driver. However its parent entry corresponds to the device
+// itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
+// MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
+//
+// - PCI0@0
+// | - AppleACPIPCI
+// | | - IGPU@2 (type IOPCIDevice)
+// | | | - IntelAccelerator (type IOGraphicsAccelerator2)
+// | | - PEG0@1
+// | | | - IOPP
+// | | | | - GFX0@0 (type IOPCIDevice)
+// | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
+//
+// [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
+// their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
+MaybeError API_AVAILABLE(macos(10.13))
+ GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ // Get a matching dictionary for the IOGraphicsAccelerator2
+ CFRef<CFMutableDictionaryRef> matchingDict =
+ AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
+ if (matchingDict == nullptr) {
+ return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
+ }
- ASSERT(deviceEntry != IO_OBJECT_NULL);
+ // IOServiceGetMatchingService will consume the reference on the matching dictionary,
+ // so we don't need to release the dictionary.
+ IORef<io_registry_entry_t> acceleratorEntry =
+ AcquireIORef(IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
+ if (acceleratorEntry == IO_OBJECT_NULL) {
+ return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the accelerator");
+ }
- uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
- uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
+ // Get the parent entry that will be the IOPCIDevice
+ IORef<io_registry_entry_t> deviceEntry;
+ if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
+ deviceEntry.InitializeInto()) != kIOReturnSuccess) {
+ return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
+ }
- *ids = PCIIDs{vendorId, deviceId};
+ ASSERT(deviceEntry != IO_OBJECT_NULL);
- return {};
- }
+ uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
+ uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
- MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
- // id by vendor name on old macOS
- if (@available(macos 10.13, *)) {
- return GetDeviceIORegistryPCIInfo(device, ids);
- } else {
- return GetVendorIdFromVendors(device, ids);
- }
- }
+ *ids = PCIIDs{vendorId, deviceId};
- bool IsMetalSupported() {
- // Metal was first introduced in macOS 10.11
- // WebGPU is targeted at macOS 10.12+
- // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
- return IsMacOSVersionAtLeast(10, 12);
- }
-#elif defined(DAWN_PLATFORM_IOS)
- MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- DAWN_UNUSED(device);
- *ids = PCIIDs{0, 0};
- return {};
- }
+ return {};
+}
- bool IsMetalSupported() {
- return true;
- }
+MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
+ // id by vendor name on old macOS
+ if (@available(macos 10.13, *)) {
+ return GetDeviceIORegistryPCIInfo(device, ids);
+ } else {
+ return GetVendorIdFromVendors(device, ids);
+ }
+}
+
+bool IsMetalSupported() {
+ // Metal was first introduced in macOS 10.11
+ // WebGPU is targeted at macOS 10.12+
+ // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
+ return IsMacOSVersionAtLeast(10, 12);
+}
+#elif DAWN_PLATFORM_IS(IOS)
+MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ DAWN_UNUSED(device);
+ *ids = PCIIDs{0, 0};
+ return {};
+}
+
+bool IsMetalSupported() {
+ return true;
+}
#else
-# error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
#endif
- DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
- API_AVAILABLE(macos(11.0), ios(14.0)) {
- bool isBlitBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
- bool isDispatchBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
- bool isDrawBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
-
- return isBlitBoundarySupported && isDispatchBoundarySupported &&
- isDrawBoundarySupported;
+DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
+ API_AVAILABLE(macos(11.0), ios(14.0)) {
+ bool isBlitBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
+ bool isDispatchBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
+ bool isDrawBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
+
+ return isBlitBoundarySupported && isDispatchBoundarySupported && isDrawBoundarySupported;
+}
+
+// This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
+// For now, it is written defensively, with many potentially unnecessary guards until
+// we narrow down the cause of the problem.
+DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
+ MTLCommonCounterSet counterSetName,
+ std::vector<MTLCommonCounter> counterNames)
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ NSPRef<id<MTLCounterSet>> counterSet = nil;
+ if (![device respondsToSelector:@selector(counterSets)]) {
+ dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
+ return false;
+ }
+ NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
+ if (counterSets == nil) {
+ // On some systems, [device counterSets] may be null and not an empty array.
+ return false;
+ }
+ // MTLDevice’s counterSets property declares which counter sets it supports. Check
+ // whether it's available on the device before requesting a counter set.
+ // Note: Don't do for..in loop to avoid potentially crashy interaction with
+ // NSFastEnumeration.
+ for (NSUInteger i = 0; i < counterSets.count; ++i) {
+ id<MTLCounterSet> set = [counterSets objectAtIndex:i];
+ if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
+ counterSet = set;
+ break;
}
+ }
- // This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
- // For now, it is written defensively, with many potentially unnecessary guards until
- // we narrow down the cause of the problem.
- DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
- MTLCommonCounterSet counterSetName,
- std::vector<MTLCommonCounter> counterNames)
- API_AVAILABLE(macos(10.15), ios(14.0)) {
- NSPRef<id<MTLCounterSet>> counterSet = nil;
- if (![device respondsToSelector:@selector(counterSets)]) {
- dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
- return false;
- }
- NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
- if (counterSets == nil) {
- // On some systems, [device counterSets] may be null and not an empty array.
- return false;
- }
- // MTLDevice’s counterSets property declares which counter sets it supports. Check
- // whether it's available on the device before requesting a counter set.
- // Note: Don't do for..in loop to avoid potentially crashy interaction with
- // NSFastEnumeration.
- for (NSUInteger i = 0; i < counterSets.count; ++i) {
- id<MTLCounterSet> set = [counterSets objectAtIndex:i];
- if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
- counterSet = set;
- break;
- }
- }
-
- // The counter set is not supported.
- if (counterSet == nil) {
- return false;
- }
-
- if (![*counterSet respondsToSelector:@selector(counters)]) {
- dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
- return false;
- }
- NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
- if (countersInSet == nil) {
- // On some systems, [MTLCounterSet counters] may be null and not an empty array.
- return false;
- }
+ // The counter set is not supported.
+ if (counterSet == nil) {
+ return false;
+ }
- // A GPU might support a counter set, but only support a subset of the counters in that
- // set, check if the counter set supports all specific counters we need. Return false
- // if there is a counter unsupported.
- for (MTLCommonCounter counterName : counterNames) {
- bool found = false;
- // Note: Don't do for..in loop to avoid potentially crashy interaction with
- // NSFastEnumeration.
- for (NSUInteger i = 0; i < countersInSet.count; ++i) {
- id<MTLCounter> counter = [countersInSet objectAtIndex:i];
- if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
- found = true;
- break;
- }
- }
- if (!found) {
- return false;
- }
- }
+ if (![*counterSet respondsToSelector:@selector(counters)]) {
+ dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
+ return false;
+ }
+ NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
+ if (countersInSet == nil) {
+ // On some systems, [MTLCounterSet counters] may be null and not an empty array.
+ return false;
+ }
- if (@available(macOS 11.0, iOS 14.0, *)) {
- // Check whether it can read GPU counters at the specified command boundary. Apple
- // family GPUs do not support sampling between different Metal commands, because
- // they defer fragment processing until after the GPU processes all the primitives
- // in the render pass.
- if (!IsCounterSamplingBoundarySupport(device)) {
- return false;
- }
+ // A GPU might support a counter set, but only support a subset of the counters in that
+ // set, check if the counter set supports all specific counters we need. Return false
+ // if there is a counter unsupported.
+ for (MTLCommonCounter counterName : counterNames) {
+ bool found = false;
+ // Note: Don't do for..in loop to avoid potentially crashy interaction with
+ // NSFastEnumeration.
+ for (NSUInteger i = 0; i < countersInSet.count; ++i) {
+ id<MTLCounter> counter = [countersInSet objectAtIndex:i];
+ if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
+ found = true;
+ break;
}
+ }
+ if (!found) {
+ return false;
+ }
+ }
- return true;
+ if (@available(macOS 11.0, iOS 14.0, *)) {
+ // Check whether it can read GPU counters at the specified command boundary. Apple
+ // family GPUs do not support sampling between different Metal commands, because
+ // they defer fragment processing until after the GPU processes all the primitives
+ // in the render pass.
+ if (!IsCounterSamplingBoundarySupport(device)) {
+ return false;
}
+ }
- } // anonymous namespace
+ return true;
+}
- // The Metal backend's Adapter.
+} // anonymous namespace
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance, id<MTLDevice> device)
- : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
- mName = std::string([[*mDevice name] UTF8String]);
+// The Metal backend's Adapter.
- PCIIDs ids;
- if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
- mVendorId = ids.vendorId;
- mDeviceId = ids.deviceId;
- }
+class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance, id<MTLDevice> device)
+ : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
+ mName = std::string([[*mDevice name] UTF8String]);
+
+ PCIIDs ids;
+ if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
+ mVendorId = ids.vendorId;
+ mDeviceId = ids.deviceId;
+ }
-#if defined(DAWN_PLATFORM_IOS)
+#if DAWN_PLATFORM_IS(IOS)
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
+ const char* systemName = "iOS ";
+#elif DAWN_PLATFORM_IS(MACOS)
+ if ([device isLowPower]) {
mAdapterType = wgpu::AdapterType::IntegratedGPU;
- const char* systemName = "iOS ";
-#elif defined(DAWN_PLATFORM_MACOS)
- if ([device isLowPower]) {
- mAdapterType = wgpu::AdapterType::IntegratedGPU;
- } else {
- mAdapterType = wgpu::AdapterType::DiscreteGPU;
- }
- const char* systemName = "macOS ";
+ } else {
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
+ }
+ const char* systemName = "macOS ";
#else
-# error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
#endif
- NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
- mDriverDescription =
- "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
- }
+ NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
+ mDriverDescription = "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
+ }
- // AdapterBase Implementation
- bool SupportsExternalImages() const override {
- // Via dawn::native::metal::WrapIOSurface
- return true;
- }
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override {
+ // Via dawn::native::metal::WrapIOSurface
+ return true;
+ }
- private:
- ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) override {
- return Device::Create(this, mDevice, descriptor);
- }
+ private:
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override {
+ return Device::Create(this, mDevice, descriptor);
+ }
- MaybeError InitializeImpl() override {
- return {};
+ MaybeError InitializeImpl() override { return {}; }
+
+ MaybeError InitializeSupportedFeaturesImpl() override {
+ // Check compressed texture format with deprecated MTLFeatureSet way.
+#if DAWN_PLATFORM_IS(MACOS)
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ }
+#endif
+#if DAWN_PLATFORM_IS(IOS)
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+ }
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
}
+#endif
- MaybeError InitializeSupportedFeaturesImpl() override {
- // Check compressed texture format with deprecated MTLFeatureSet way.
-#if defined(DAWN_PLATFORM_MACOS)
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+ // Check compressed texture format with MTLGPUFamily
+ if (@available(macOS 10.15, iOS 13.0, *)) {
+ if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
}
-#endif
-#if defined(DAWN_PLATFORM_IOS)
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
}
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
}
-#endif
-
- // Check compressed texture format with MTLGPUFamily
- if (@available(macOS 10.15, iOS 13.0, *)) {
- if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
- }
- }
+ }
- if (@available(macOS 10.15, iOS 14.0, *)) {
- if (IsGPUCounterSupported(
- *mDevice, MTLCommonCounterSetStatistic,
- {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
- MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
- MTLCommonCounterComputeKernelInvocations})) {
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ if (IsGPUCounterSupported(
+ *mDevice, MTLCommonCounterSetStatistic,
+ {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
+ MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
+ MTLCommonCounterComputeKernelInvocations})) {
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
+
+ if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
+ {MTLCommonCounterTimestamp})) {
+ bool enableTimestampQuery = true;
+
+#if DAWN_PLATFORM_IS(MACOS)
+ // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
+ // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
+ // has been fixed on macOS 11.0. See crbug.com/dawn/545.
+ if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
+ enableTimestampQuery = false;
}
-
- if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
- {MTLCommonCounterTimestamp})) {
- bool enableTimestampQuery = true;
-
-#if defined(DAWN_PLATFORM_MACOS)
- // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
- // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
- // has been fixed on macOS 11.0. See crbug.com/dawn/545.
- if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
- enableTimestampQuery = false;
- }
#endif
- if (enableTimestampQuery) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
+ if (enableTimestampQuery) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
}
}
+ }
- if (@available(macOS 10.11, iOS 11.0, *)) {
- mSupportedFeatures.EnableFeature(Feature::DepthClamping);
- }
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+ }
- if (@available(macOS 10.11, iOS 9.0, *)) {
- mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
- }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ }
- // Uses newTextureWithDescriptor::iosurface::plane which is available
- // on ios 11.0+ and macOS 11.0+
- if (@available(macOS 10.11, iOS 11.0, *)) {
- mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
- }
+ // Uses newTextureWithDescriptor::iosurface::plane which is available
+ // on ios 11.0+ and macOS 11.0+
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+ }
-#if defined(DAWN_PLATFORM_MACOS)
- // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
- if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
- mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
- }
+#if DAWN_PLATFORM_IS(MACOS)
+ // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
+ if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ }
#endif
- return {};
- }
+ mSupportedFeatures.EnableFeature(Feature::IndirectFirstInstance);
- enum class MTLGPUFamily {
- Apple1,
- Apple2,
- Apple3,
- Apple4,
- Apple5,
- Apple6,
- Apple7,
- Mac1,
- Mac2,
- };
+ return {};
+ }
- ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
- // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+ enum class MTLGPUFamily {
+ Apple1,
+ Apple2,
+ Apple3,
+ Apple4,
+ Apple5,
+ Apple6,
+ Apple7,
+ Mac1,
+ Mac2,
+ };
- if (@available(macOS 10.15, iOS 10.13, *)) {
- if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
- return MTLGPUFamily::Mac2;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
- return MTLGPUFamily::Mac1;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
- return MTLGPUFamily::Apple7;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
- return MTLGPUFamily::Apple6;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
- return MTLGPUFamily::Apple5;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
- return MTLGPUFamily::Apple4;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
- return MTLGPUFamily::Apple3;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
- return MTLGPUFamily::Apple2;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
- return MTLGPUFamily::Apple1;
- }
+ ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
+ // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+
+ if (@available(macOS 10.15, iOS 10.13, *)) {
+ if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
+ return MTLGPUFamily::Mac2;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
+ return MTLGPUFamily::Mac1;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
+ return MTLGPUFamily::Apple7;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
+ return MTLGPUFamily::Apple6;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
+ return MTLGPUFamily::Apple5;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
+ return MTLGPUFamily::Apple4;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
+ return MTLGPUFamily::Apple3;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
+ return MTLGPUFamily::Apple2;
}
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
+ return MTLGPUFamily::Apple1;
+ }
+ }
#if TARGET_OS_OSX
- if (@available(macOS 10.14, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
- return MTLGPUFamily::Mac2;
- }
+ if (@available(macOS 10.14, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
+ return MTLGPUFamily::Mac2;
}
- if (@available(macOS 10.11, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
- return MTLGPUFamily::Mac1;
- }
+ }
+ if (@available(macOS 10.11, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+ return MTLGPUFamily::Mac1;
}
+ }
#elif TARGET_OS_IOS
- if (@available(iOS 10.11, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
- return MTLGPUFamily::Apple4;
- }
+ if (@available(iOS 10.11, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
+ return MTLGPUFamily::Apple4;
}
- if (@available(iOS 9.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
- return MTLGPUFamily::Apple3;
- }
+ }
+ if (@available(iOS 9.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
+ return MTLGPUFamily::Apple3;
}
- if (@available(iOS 8.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
- return MTLGPUFamily::Apple2;
- }
+ }
+ if (@available(iOS 8.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+ return MTLGPUFamily::Apple2;
}
- if (@available(iOS 8.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
- return MTLGPUFamily::Apple1;
- }
+ }
+ if (@available(iOS 8.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+ return MTLGPUFamily::Apple1;
}
-#endif
- return DAWN_INTERNAL_ERROR("Unsupported Metal device");
}
+#endif
+ return DAWN_INTERNAL_ERROR("Unsupported Metal device");
+ }
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
- struct MTLDeviceLimits {
- uint32_t maxVertexAttribsPerDescriptor;
- uint32_t maxBufferArgumentEntriesPerFunc;
- uint32_t maxTextureArgumentEntriesPerFunc;
- uint32_t maxSamplerStateArgumentEntriesPerFunc;
- uint32_t maxThreadsPerThreadgroup;
- uint32_t maxTotalThreadgroupMemory;
- uint32_t maxFragmentInputComponents;
- uint32_t max1DTextureSize;
- uint32_t max2DTextureSize;
- uint32_t max3DTextureSize;
- uint32_t maxTextureArrayLayers;
- uint32_t minBufferOffsetAlignment;
- };
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+ struct MTLDeviceLimits {
+ uint32_t maxVertexAttribsPerDescriptor;
+ uint32_t maxBufferArgumentEntriesPerFunc;
+ uint32_t maxTextureArgumentEntriesPerFunc;
+ uint32_t maxSamplerStateArgumentEntriesPerFunc;
+ uint32_t maxThreadsPerThreadgroup;
+ uint32_t maxTotalThreadgroupMemory;
+ uint32_t maxFragmentInputComponents;
+ uint32_t max1DTextureSize;
+ uint32_t max2DTextureSize;
+ uint32_t max3DTextureSize;
+ uint32_t maxTextureArrayLayers;
+ uint32_t minBufferOffsetAlignment;
+ };
- struct LimitsForFamily {
- uint32_t MTLDeviceLimits::*limit;
- ityp::array<MTLGPUFamily, uint32_t, 9> values;
- };
+ struct LimitsForFamily {
+ uint32_t MTLDeviceLimits::*limit;
+ ityp::array<MTLGPUFamily, uint32_t, 9> values;
+ };
- // clang-format off
+ // clang-format off
// https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
// Apple Mac
// 1, 2, 3, 4, 5, 6, 7, 1, 2
@@ -516,159 +511,160 @@ namespace dawn::native::metal {
{&MTLDeviceLimits::maxTextureArrayLayers, { 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u }},
{&MTLDeviceLimits::minBufferOffsetAlignment, { 4u, 4u, 4u, 4u, 4u, 4u, 4u, 256u, 256u }},
};
- // clang-format on
-
- MTLGPUFamily mtlGPUFamily;
- DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
+ // clang-format on
- MTLDeviceLimits mtlLimits;
- for (const auto& limitsForFamily : kMTLLimits) {
- mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
- }
+ MTLGPUFamily mtlGPUFamily;
+ DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
- GetDefaultLimits(&limits->v1);
+ MTLDeviceLimits mtlLimits;
+ for (const auto& limitsForFamily : kMTLLimits) {
+ mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
+ }
- limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
- limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
- limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
- limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
+ GetDefaultLimits(&limits->v1);
- uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
- maxBuffersPerStage -= 1; // One slot is reserved to store buffer lengths.
+ limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
+ limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
+ limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
+ limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
- uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
- limits->v1.maxUniformBuffersPerShaderStage +
- limits->v1.maxVertexBuffers;
+ uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
+ maxBuffersPerStage -= 1; // One slot is reserved to store buffer lengths.
- ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
- {
- uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
- limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
- limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
- limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
- }
+ uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
+ limits->v1.maxUniformBuffersPerShaderStage +
+ limits->v1.maxVertexBuffers;
- uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
- limits->v1.maxStorageTexturesPerShaderStage;
+ ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
+ {
+ uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
+ limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
+ limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
+ limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
+ }
- ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
- {
- uint32_t additional =
- mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
- limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
- limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
- }
+ uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
+ limits->v1.maxStorageTexturesPerShaderStage;
- limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
-
- // Metal limits are per-function, so the layout limits are the same as the stage
- // limits. Note: this should likely change if the implementation uses Metal argument
- // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
- // buffers may be set directly.
- // Mac GPU families with tier 1 argument buffers support 64
- // buffers, 128 textures, and 16 samplers. Mac GPU families
- // with tier 2 argument buffers support 500000 buffers and
- // textures, and 1024 unique samplers
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
- limits->v1.maxUniformBuffersPerShaderStage;
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
- limits->v1.maxStorageBuffersPerShaderStage;
-
- // The WebGPU limit is the limit across all vertex buffers, combined.
- limits->v1.maxVertexAttributes =
- limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
-
- limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
-
- limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
- limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
-
- limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
- limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
-
- uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
-
- // Metal has no documented limit on the size of a binding. Use the maximum
- // buffer size.
- limits->v1.maxUniformBufferBindingSize = maxBufferSize;
- limits->v1.maxStorageBufferBindingSize = maxBufferSize;
-
- // TODO(crbug.com/dawn/685):
- // LIMITS NOT SET:
- // - maxBindGroups
- // - maxVertexBufferArrayStride
-
- return {};
+ ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
+ {
+ uint32_t additional =
+ mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
+ limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
+ limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
}
- NSPRef<id<MTLDevice>> mDevice;
- };
+ limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
+
+ // Metal limits are per-function, so the layout limits are the same as the stage
+ // limits. Note: this should likely change if the implementation uses Metal argument
+ // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
+ // buffers may be set directly.
+ // Mac GPU families with tier 1 argument buffers support 64
+ // buffers, 128 textures, and 16 samplers. Mac GPU families
+ // with tier 2 argument buffers support 500000 buffers and
+ // textures, and 1024 unique samplers
+ limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
+ limits->v1.maxUniformBuffersPerShaderStage;
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
+ limits->v1.maxStorageBuffersPerShaderStage;
+
+ // The WebGPU limit is the limit across all vertex buffers, combined.
+ limits->v1.maxVertexAttributes =
+ limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
+
+ limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
+
+ limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
+ limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
+
+ limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+ limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+
+ uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
+
+ // Metal has no documented limit on the size of a binding. Use the maximum
+ // buffer size.
+ limits->v1.maxUniformBufferBindingSize = maxBufferSize;
+ limits->v1.maxStorageBufferBindingSize = maxBufferSize;
+
+ // TODO(crbug.com/dawn/685):
+ // LIMITS NOT SET:
+ // - maxBindGroups
+ // - maxVertexBufferArrayStride
+
+ return {};
+ }
- // Implementation of the Metal backend's BackendConnection
+ NSPRef<id<MTLDevice>> mDevice;
+};
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::Metal) {
- if (GetInstance()->IsBackendValidationEnabled()) {
- setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
- }
- }
+// Implementation of the Metal backend's BackendConnection
- std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Metal) {
+ if (GetInstance()->IsBackendValidationEnabled()) {
+ setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
}
+}
- ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
+Backend::~Backend() = default;
- std::vector<Ref<AdapterBase>> adapters;
- BOOL supportedVersion = NO;
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macOS 10.11, *)) {
- supportedVersion = YES;
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
+ }
+ return result.AcquireSuccess();
+}
- NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
- for (id<MTLDevice> device in devices.Get()) {
- Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
- if (!GetInstance()->ConsumedError(adapter->Initialize())) {
- adapters.push_back(std::move(adapter));
- }
- }
- }
-#endif
+ std::vector<Ref<AdapterBase>> adapters;
+ BOOL supportedVersion = NO;
+#if DAWN_PLATFORM_IS(MACOS)
+ if (@available(macOS 10.11, *)) {
+ supportedVersion = YES;
-#if defined(DAWN_PLATFORM_IOS)
- if (@available(iOS 8.0, *)) {
- supportedVersion = YES;
- // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
- Ref<Adapter> adapter =
- AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+ NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
+
+ for (id<MTLDevice> device in devices.Get()) {
+ Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
if (!GetInstance()->ConsumedError(adapter->Initialize())) {
adapters.push_back(std::move(adapter));
}
}
+ }
#endif
- if (!supportedVersion) {
- UNREACHABLE();
+
+#if DAWN_PLATFORM_IS(IOS)
+ if (@available(iOS 8.0, *)) {
+ supportedVersion = YES;
+ // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
+ Ref<Adapter> adapter =
+ AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+ if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+ adapters.push_back(std::move(adapter));
}
- return adapters;
}
+#endif
+ if (!supportedVersion) {
+ UNREACHABLE();
+ }
+ return adapters;
+}
- BackendConnection* Connect(InstanceBase* instance) {
- if (!IsMetalSupported()) {
- return nullptr;
- }
- return new Backend(instance);
+BackendConnection* Connect(InstanceBase* instance) {
+ if (!IsMetalSupported()) {
+ return nullptr;
}
+ return new Backend(instance);
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h
index fbd344cab38..ab2abefc00c 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h
@@ -20,26 +20,26 @@
namespace dawn::native::metal {
- class BindGroup;
- class Device;
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static Ref<BindGroupLayout> Create(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup);
-
- private:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayout() override = default;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
- };
+class BindGroup;
+class Device;
+
+class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static Ref<BindGroupLayout> Create(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ private:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayout() override;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm
index e413bdd87de..6c1a7acb3eb 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm
@@ -18,28 +18,29 @@
namespace dawn::native::metal {
- // static
- Ref<BindGroupLayout> BindGroupLayout::Create(
- DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
- }
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
-
- Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
- mBindGroupAllocator.Deallocate(bindGroup);
- }
+// static
+Ref<BindGroupLayout> BindGroupLayout::Create(
+ DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+}
+
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
+
+BindGroupLayout::~BindGroupLayout() = default;
+
+Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+}
+
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h
index 9fd71d2ebb7..5e75395e0a6 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h
@@ -20,19 +20,19 @@
namespace dawn::native::metal {
- class Device;
+class Device;
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
- private:
- ~BindGroup() override;
+ private:
+ ~BindGroup() override;
- void DestroyImpl() override;
- };
+ void DestroyImpl() override;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm
index a8e02a805f5..90b9e235b79 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm
@@ -18,20 +18,19 @@
#include "dawn/native/metal/DeviceMTL.h"
namespace dawn::native::metal {
- BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(this, device, descriptor) {
- }
+BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {}
- BindGroup::~BindGroup() = default;
+BindGroup::~BindGroup() = default;
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this);
- }
+void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+}
- // static
- Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
+// static
+Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h
index 4e36736fc53..f36ebe89485 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h
@@ -23,44 +23,47 @@
namespace dawn::native::metal {
- class CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
- id<MTLBuffer> GetMTLBuffer() const;
-
- bool EnsureDataInitialized(CommandRecordingContext* commandContext);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy);
-
- static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
-
- private:
- using BufferBase::BufferBase;
- MaybeError Initialize(bool mappedAtCreation);
-
- ~Buffer() override;
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- void* GetMappedPointerImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
-
- void InitializeToZero(CommandRecordingContext* commandContext);
- void ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- NSPRef<id<MTLBuffer>> mMtlBuffer;
- };
+class CommandRecordingContext;
+class Device;
+
+class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
+
+ Buffer(DeviceBase* device, const BufferDescriptor* descriptor);
+
+ id<MTLBuffer> GetMTLBuffer() const;
+
+ bool EnsureDataInitialized(CommandRecordingContext* commandContext);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
+ static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
+
+ private:
+ using BufferBase::BufferBase;
+ MaybeError Initialize(bool mappedAtCreation);
+
+ ~Buffer() override;
+
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ void* GetMappedPointerImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+
+ void InitializeToZero(CommandRecordingContext* commandContext);
+ void ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ NSPRef<id<MTLBuffer>> mMtlBuffer;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm
index 695872a3e83..92a808841f1 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm
@@ -15,6 +15,7 @@
#include "dawn/native/metal/BufferMTL.h"
#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
#include "dawn/native/CommandBuffer.h"
#include "dawn/native/metal/CommandRecordingContext.h"
#include "dawn/native/metal/DeviceMTL.h"
@@ -22,219 +23,220 @@
#include <limits>
namespace dawn::native::metal {
- // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
- // largest alignment of supported data types
- static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return std::move(buffer);
+// The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
+// largest alignment of supported data types
+static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
+
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return std::move(buffer);
+}
+
+// static
+uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
+ if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
+ return [mtlDevice maxBufferLength];
}
- // static
- uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
- if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
- return [mtlDevice maxBufferLength];
- }
-
- // Earlier versions of Metal had maximums defined in the Metal feature set tables
- // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
-#if defined(DAWN_PLATFORM_MACOS)
- // 10.12 and 10.13 have a 1Gb limit.
- if (@available(macOS 10.12, *)) {
- // |maxBufferLength| isn't always available on older systems. If available, use
- // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
- // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
- return 1024 * 1024 * 1024;
- }
- // 10.11 has a 256Mb limit
- if (@available(maxOS 10.11, *)) {
- return 256 * 1024 * 1024;
- }
-#else
- // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+ // Earlier versions of Metal had maximums defined in the Metal feature set tables
+ // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
+#if DAWN_PLATFORM_IS(MACOS)
+ // 10.12 and 10.13 have a 1Gb limit.
+ if (@available(macOS 10.12, *)) {
+ // |maxBufferLength| isn't always available on older systems. If available, use
+ // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
+ // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
+ return 1024 * 1024 * 1024;
+ }
+ // 10.11 has a 256Mb limit
+ if (@available(macOS 10.11, *)) {
return 256 * 1024 * 1024;
-#endif
}
-
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- MTLResourceOptions storageMode;
- if (GetUsage() & kMappableBufferUsages) {
- storageMode = MTLResourceStorageModeShared;
- } else {
- storageMode = MTLResourceStorageModePrivate;
- }
-
- uint32_t alignment = 1;
-#ifdef DAWN_PLATFORM_MACOS
- // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
- alignment = 4;
+ // 256Mb for other platform if any. (Need to have a return for all branches).
+ return 256 * 1024 * 1024;
+#else
+ // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+ return 256 * 1024 * 1024;
#endif
+}
- // Metal validation layer requires the size of uniform buffer and storage buffer to be no
- // less than the size of the buffer block defined in shader, and the overall size of the
- // buffer must be aligned to the largest alignment of its members.
- if (GetUsage() &
- (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
- alignment = kMinUniformOrStorageBufferAlignment;
- }
+Buffer::Buffer(DeviceBase* dev, const BufferDescriptor* desc) : BufferBase(dev, desc) {}
- // The vertex pulling transform requires at least 4 bytes in the buffer.
- // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
- // after the end.
- NSUInteger extraBytes = 0u;
- if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
- extraBytes = 4u;
- }
-
- if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- NSUInteger currentSize =
- std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
-
- if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- currentSize = Align(currentSize, alignment);
-
- uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
- if (currentSize > maxBufferSize) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ MTLResourceOptions storageMode;
+ if (GetUsage() & kMappableBufferUsages) {
+ storageMode = MTLResourceStorageModeShared;
+ } else {
+ storageMode = MTLResourceStorageModePrivate;
+ }
- mAllocatedSize = currentSize;
- mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice()
- newBufferWithLength:currentSize
- options:storageMode]);
- if (mMtlBuffer == nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
- }
+ uint32_t alignment = 1;
+#if DAWN_PLATFORM_IS(MACOS)
+ // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
+ alignment = 4;
+#endif
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
- CommandRecordingContext* commandContext =
- ToBackend(GetDevice())->GetPendingCommandContext();
- ClearBuffer(commandContext, uint8_t(1u));
- }
+ // Metal validation layer requires the size of uniform buffer and storage buffer to be no
+ // less than the size of the buffer block defined in shader, and the overall size of the
+ // buffer must be aligned to the largest alignment of its members.
+ if (GetUsage() &
+ (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
+ alignment = kMinUniformOrStorageBufferAlignment;
+ }
- // Initialize the padding bytes to zero.
- if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
- !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- uint32_t clearSize = Align(paddingBytes, 4);
- uint64_t clearOffset = GetAllocatedSize() - clearSize;
-
- CommandRecordingContext* commandContext =
- ToBackend(GetDevice())->GetPendingCommandContext();
- ClearBuffer(commandContext, 0, clearOffset, clearSize);
- }
- }
- return {};
+ // The vertex pulling transform requires at least 4 bytes in the buffer.
+ // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
+ // after the end.
+ NSUInteger extraBytes = 0u;
+ if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
+ extraBytes = 4u;
}
- Buffer::~Buffer() = default;
+ if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ NSUInteger currentSize =
+ std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
- id<MTLBuffer> Buffer::GetMTLBuffer() const {
- return mMtlBuffer.Get();
+ if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
+ currentSize = Align(currentSize, alignment);
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): Handle CPU-visible memory on UMA
- return GetUsage() & kMappableBufferUsages;
+ uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
+ if (currentSize > maxBufferSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
+ mAllocatedSize = currentSize;
+ mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice() newBufferWithLength:currentSize
+ options:storageMode]);
+ if (mMtlBuffer == nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
}
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
CommandRecordingContext* commandContext =
ToBackend(GetDevice())->GetPendingCommandContext();
- EnsureDataInitialized(commandContext);
-
- return {};
+ ClearBuffer(commandContext, uint8_t(1u));
}
- void* Buffer::GetMappedPointerImpl() {
- return [*mMtlBuffer contents];
- }
+ // Initialize the padding bytes to zero.
+ if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
+ uint32_t clearSize = Align(paddingBytes, 4);
+ uint64_t clearOffset = GetAllocatedSize() - clearSize;
- void Buffer::UnmapImpl() {
- // Nothing to do, Metal StorageModeShared buffers are always mapped.
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ ClearBuffer(commandContext, 0, clearOffset, clearSize);
+ }
}
+ return {};
+}
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- mMtlBuffer = nullptr;
- }
+Buffer::~Buffer() = default;
- bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
- if (!NeedsInitialization()) {
- return false;
- }
+id<MTLBuffer> Buffer::GetMTLBuffer() const {
+ return mMtlBuffer.Get();
+}
- InitializeToZero(commandContext);
- return true;
- }
+bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return GetUsage() & kMappableBufferUsages;
+}
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
+MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+}
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext();
+ EnsureDataInitialized(commandContext);
- InitializeToZero(commandContext);
- return true;
- }
+ return {};
+}
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
+void* Buffer::GetMappedPointerImpl() {
+ return [*mMtlBuffer contents];
+}
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
+void Buffer::UnmapImpl() {
+ // Nothing to do, Metal StorageModeShared buffers are always mapped.
+}
+
+void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ mMtlBuffer = nullptr;
+}
- InitializeToZero(commandContext);
- return true;
+bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ if (!NeedsInitialization()) {
+ return false;
}
- void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
- ASSERT(NeedsInitialization());
+ InitializeToZero(commandContext);
+ return true;
+}
- ClearBuffer(commandContext, uint8_t(0u));
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+ if (IsFullBufferRange(offset, size)) {
SetIsDataInitialized();
- GetDevice()->IncrementLazyClearCountForTesting();
+ return false;
+ }
+
+ InitializeToZero(commandContext);
+ return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
}
- void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset,
- uint64_t size) {
- ASSERT(commandContext != nullptr);
- size = size > 0 ? size : GetAllocatedSize();
- ASSERT(size > 0);
- [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
- range:NSMakeRange(offset, size)
- value:clearValue];
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
}
+ InitializeToZero(commandContext);
+ return true;
+}
+
+void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(NeedsInitialization());
+
+ ClearBuffer(commandContext, uint8_t(0u));
+
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+}
+
+void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ ASSERT(commandContext != nullptr);
+ size = size > 0 ? size : GetAllocatedSize();
+ ASSERT(size > 0);
+ [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
+ range:NSMakeRange(offset, size)
+ value:clearValue];
+}
+
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h
index 6fd68cf10da..0f95ef42437 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h
@@ -21,48 +21,43 @@
#import <Metal/Metal.h>
namespace dawn::native {
- class CommandEncoder;
+class CommandEncoder;
}
namespace dawn::native::metal {
- class CommandRecordingContext;
- class Device;
- class Texture;
+class CommandRecordingContext;
+class Device;
+class Texture;
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- id<MTLBuffer> mtlBuffer,
- uint64_t bufferSize,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Texture* texture,
- uint32_t mipLevel,
- const Origin3D& origin,
- Aspect aspect,
- const Extent3D& copySize);
+void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize);
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
- MaybeError FillCommands(CommandRecordingContext* commandContext);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+ ~CommandBuffer() override;
- private:
- using CommandBufferBase::CommandBufferBase;
+ MaybeError FillCommands(CommandRecordingContext* commandContext);
- MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
- MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
+ private:
+ using CommandBufferBase::CommandBufferBase;
- MaybeError EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
- };
+ MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
+ MaybeError EncodeRenderPass(id<MTLRenderCommandEncoder> encoder);
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm
index fd232b46870..04dab406994 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm
@@ -36,1557 +36,1361 @@
namespace dawn::native::metal {
- namespace {
-
- // Allows this file to use MTLStoreActionStoreAndMultismapleResolve because the logic is
- // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
- // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability"
- constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
- MTLStoreActionStoreAndMultisampleResolve;
-#pragma clang diagnostic pop
-
- MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return MTLIndexTypeUInt16;
- case wgpu::IndexFormat::Uint32:
- return MTLIndexTypeUInt32;
- case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
- }
+namespace {
+
+MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return MTLIndexTypeUInt16;
+ case wgpu::IndexFormat::Uint32:
+ return MTLIndexTypeUInt32;
+ case wgpu::IndexFormat::Undefined:
+ UNREACHABLE();
+ }
+}
+
+NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(BeginRenderPassCmd* renderPass) {
+ // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> descriptorRef = [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+ for (ColorAttachmentIndex attachment :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(attachment);
+ auto& attachmentInfo = renderPass->colorAttachments[attachment];
+
+ switch (attachmentInfo.loadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
+ descriptor.colorAttachments[i].clearColor =
+ MTLClearColorMake(attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
+ attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ break;
+
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
}
- NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(
- BeginRenderPassCmd* renderPass) {
- // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
- NSRef<MTLRenderPassDescriptor> descriptorRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
-
- for (ColorAttachmentIndex attachment :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(attachment);
- auto& attachmentInfo = renderPass->colorAttachments[attachment];
-
- switch (attachmentInfo.loadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
- descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
- attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
- attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
- break;
-
- case wgpu::LoadOp::Load:
- descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
- break;
-
- case wgpu::LoadOp::Undefined:
- UNREACHABLE();
- break;
- }
-
- auto colorAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
- descriptor.colorAttachments[i].texture = colorAttachment.texture.Get();
- descriptor.colorAttachments[i].level = colorAttachment.baseMipLevel;
- descriptor.colorAttachments[i].slice = colorAttachment.baseArrayLayer;
-
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
- if (hasResolveTarget) {
- auto resolveAttachment =
- ToBackend(attachmentInfo.resolveTarget)->GetAttachmentInfo();
- descriptor.colorAttachments[i].resolveTexture = resolveAttachment.texture.Get();
- descriptor.colorAttachments[i].resolveLevel = resolveAttachment.baseMipLevel;
- descriptor.colorAttachments[i].resolveSlice = resolveAttachment.baseArrayLayer;
-
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- descriptor.colorAttachments[i].storeAction =
- kMTLStoreActionStoreAndMultisampleResolve;
- break;
- case wgpu::StoreOp::Discard:
- descriptor.colorAttachments[i].storeAction =
- MTLStoreActionMultisampleResolve;
- break;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
- } else {
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
- break;
- case wgpu::StoreOp::Discard:
- descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
- break;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
+ auto colorAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
+ descriptor.colorAttachments[i].texture = colorAttachment.texture.Get();
+ descriptor.colorAttachments[i].level = colorAttachment.baseMipLevel;
+ descriptor.colorAttachments[i].slice = colorAttachment.baseArrayLayer;
+
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+ if (hasResolveTarget) {
+ auto resolveAttachment = ToBackend(attachmentInfo.resolveTarget)->GetAttachmentInfo();
+ descriptor.colorAttachments[i].resolveTexture = resolveAttachment.texture.Get();
+ descriptor.colorAttachments[i].resolveLevel = resolveAttachment.baseMipLevel;
+ descriptor.colorAttachments[i].resolveSlice = resolveAttachment.baseArrayLayer;
+
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.colorAttachments[i].storeAction =
+ kMTLStoreActionStoreAndMultisampleResolve;
+ break;
+ case wgpu::StoreOp::Discard:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionMultisampleResolve;
+ break;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
}
+ } else {
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
+ break;
+ case wgpu::StoreOp::Discard:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
+ break;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
-
- auto depthStencilAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
- const Format& format = attachmentInfo.view->GetFormat();
-
- if (format.HasDepth()) {
- descriptor.depthAttachment.texture = depthStencilAttachment.texture.Get();
- descriptor.depthAttachment.level = depthStencilAttachment.baseMipLevel;
- descriptor.depthAttachment.slice = depthStencilAttachment.baseArrayLayer;
-
- switch (attachmentInfo.depthStoreOp) {
- case wgpu::StoreOp::Store:
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- break;
-
- case wgpu::StoreOp::Discard:
- descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
- break;
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
+ auto depthStencilAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
+ const Format& format = attachmentInfo.view->GetFormat();
- switch (attachmentInfo.depthLoadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
- break;
+ if (format.HasDepth()) {
+ descriptor.depthAttachment.texture = depthStencilAttachment.texture.Get();
+ descriptor.depthAttachment.level = depthStencilAttachment.baseMipLevel;
+ descriptor.depthAttachment.slice = depthStencilAttachment.baseArrayLayer;
- case wgpu::LoadOp::Load:
- descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
- break;
+ switch (attachmentInfo.depthStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ break;
- case wgpu::LoadOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
+ case wgpu::StoreOp::Discard:
+ descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
+ break;
- if (format.HasStencil()) {
- descriptor.stencilAttachment.texture = depthStencilAttachment.texture.Get();
- descriptor.stencilAttachment.level = depthStencilAttachment.baseMipLevel;
- descriptor.stencilAttachment.slice = depthStencilAttachment.baseArrayLayer;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
- switch (attachmentInfo.stencilStoreOp) {
- case wgpu::StoreOp::Store:
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- break;
+ switch (attachmentInfo.depthLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
+ break;
- case wgpu::StoreOp::Discard:
- descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
- break;
+ case wgpu::LoadOp::Load:
+ descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+ break;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
- switch (attachmentInfo.stencilLoadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
- break;
+ if (format.HasStencil()) {
+ descriptor.stencilAttachment.texture = depthStencilAttachment.texture.Get();
+ descriptor.stencilAttachment.level = depthStencilAttachment.baseMipLevel;
+ descriptor.stencilAttachment.slice = depthStencilAttachment.baseArrayLayer;
- case wgpu::LoadOp::Load:
- descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
- break;
+ switch (attachmentInfo.stencilStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ break;
- case wgpu::LoadOp::Undefined:
- UNREACHABLE();
- break;
- }
- }
- }
+ case wgpu::StoreOp::Discard:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
+ break;
- if (renderPass->occlusionQuerySet.Get() != nullptr) {
- descriptor.visibilityResultBuffer =
- ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
}
- return descriptorRef;
- }
+ switch (attachmentInfo.stencilLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
+ break;
- // Helper function for Toggle EmulateStoreAndMSAAResolve
- void ResolveInAnotherRenderPass(
- CommandRecordingContext* commandContext,
- const MTLRenderPassDescriptor* mtlRenderPass,
- const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
- // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
- NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
-
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (resolveTextures[i] == nullptr) {
- continue;
- }
+ case wgpu::LoadOp::Load:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+ break;
- mtlRenderPassForResolve.colorAttachments[i].texture =
- mtlRenderPass.colorAttachments[i].texture;
- mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
- mtlRenderPassForResolve.colorAttachments[i].storeAction =
- MTLStoreActionMultisampleResolve;
- mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
- mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
- mtlRenderPass.colorAttachments[i].resolveLevel;
- mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
- mtlRenderPass.colorAttachments[i].resolveSlice;
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
}
-
- commandContext->BeginRender(mtlRenderPassForResolve);
- commandContext->EndRender();
}
+ }
- // Helper functions for Toggle AlwaysResolveIntoZeroLevelAndLayer
- ResultOrError<NSPRef<id<MTLTexture>>> CreateResolveTextureForWorkaround(
- Device* device,
- MTLPixelFormat mtlFormat,
- uint32_t width,
- uint32_t height) {
- NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
- MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.textureType = MTLTextureType2D;
- mtlDesc.usage = MTLTextureUsageRenderTarget;
- mtlDesc.pixelFormat = mtlFormat;
- mtlDesc.width = width;
- mtlDesc.height = height;
- mtlDesc.depth = 1;
- mtlDesc.mipmapLevelCount = 1;
- mtlDesc.arrayLength = 1;
- mtlDesc.storageMode = MTLStorageModePrivate;
- mtlDesc.sampleCount = 1;
-
- id<MTLTexture> texture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc];
- if (texture == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
- }
-
- return AcquireNSPRef(texture);
- }
+ if (renderPass->occlusionQuerySet.Get() != nullptr) {
+ descriptor.visibilityResultBuffer =
+ ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
+ }
- void CopyIntoTrueResolveTarget(CommandRecordingContext* commandContext,
- id<MTLTexture> mtlTrueResolveTexture,
- uint32_t trueResolveLevel,
- uint32_t trueResolveSlice,
- id<MTLTexture> temporaryResolveTexture,
- uint32_t width,
- uint32_t height) {
- [commandContext->EnsureBlit() copyFromTexture:temporaryResolveTexture
- sourceSlice:0
- sourceLevel:0
- sourceOrigin:MTLOriginMake(0, 0, 0)
- sourceSize:MTLSizeMake(width, height, 1)
- toTexture:mtlTrueResolveTexture
- destinationSlice:trueResolveSlice
- destinationLevel:trueResolveLevel
- destinationOrigin:MTLOriginMake(0, 0, 0)];
+ return descriptorRef;
+}
+
+// Metal uses a physical addressing mode which means buffers in the shading language are
+// just pointers to the virtual address of their start. This means there is no way to know
+// the length of a buffer to compute the length() of unsized arrays at the end of storage
+// buffers. Tint implements the length() of unsized arrays by requiring an extra
+// buffer that contains the length of other buffers. This structure that keeps track of the
+// length of storage buffers and can apply them to the reserved "buffer length buffer" when
+// needed for a draw or a dispatch.
+struct StorageBufferLengthTracker {
+ wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
+
+ // The lengths of buffers are stored as 32bit integers because that is the width the
+ // MSL code generated by Tint expects.
+ // UBOs require we align the max buffer count to 4 elements (16 bytes).
+ static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
+ PerStage<std::array<uint32_t, MaxBufferCount>> data;
+
+ void Apply(id<MTLRenderCommandEncoder> render,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
+ wgpu::ShaderStage stagesToApply =
+ dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
+
+ if (stagesToApply == wgpu::ShaderStage::None) {
+ return;
}
- // Metal uses a physical addressing mode which means buffers in the shading language are
- // just pointers to the virtual address of their start. This means there is no way to know
- // the length of a buffer to compute the length() of unsized arrays at the end of storage
- // buffers. Tint implements the length() of unsized arrays by requiring an extra
- // buffer that contains the length of other buffers. This structure that keeps track of the
- // length of storage buffers and can apply them to the reserved "buffer length buffer" when
- // needed for a draw or a dispatch.
- struct StorageBufferLengthTracker {
- wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
-
- // The lengths of buffers are stored as 32bit integers because that is the width the
- // MSL code generated by Tint expects.
- // UBOs require we align the max buffer count to 4 elements (16 bytes).
- static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
- PerStage<std::array<uint32_t, MaxBufferCount>> data;
-
- void Apply(id<MTLRenderCommandEncoder> render,
- RenderPipeline* pipeline,
- bool enableVertexPulling) {
- wgpu::ShaderStage stagesToApply =
- dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
-
- if (stagesToApply == wgpu::ShaderStage::None) {
- return;
- }
+ if (stagesToApply & wgpu::ShaderStage::Vertex) {
+ uint32_t bufferCount =
+ ToBackend(pipeline->GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
- if (stagesToApply & wgpu::ShaderStage::Vertex) {
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Vertex);
+ if (enableVertexPulling) {
+ bufferCount += pipeline->GetVertexBufferCount();
+ }
- if (enableVertexPulling) {
- bufferCount += pipeline->GetVertexBufferCount();
- }
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
+ [render setVertexBytes:data[SingleShaderStage::Vertex].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
+ }
- [render setVertexBytes:data[SingleShaderStage::Vertex].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
- }
+ if (stagesToApply & wgpu::ShaderStage::Fragment) {
+ uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+ ->GetBufferBindingCount(SingleShaderStage::Fragment);
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
- if (stagesToApply & wgpu::ShaderStage::Fragment) {
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Fragment);
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
+ [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
+ }
- [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
- }
+ // Only mark clean stages that were actually applied.
+ dirtyStages ^= stagesToApply;
+ }
- // Only mark clean stages that were actually applied.
- dirtyStages ^= stagesToApply;
- }
+ void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
+ if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
+ return;
+ }
- void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
- if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
- return;
- }
+ if (!pipeline->RequiresStorageBufferLength()) {
+ return;
+ }
- if (!pipeline->RequiresStorageBufferLength()) {
- return;
- }
+ uint32_t bufferCount =
+ ToBackend(pipeline->GetLayout())->GetBufferBindingCount(SingleShaderStage::Compute);
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Compute);
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
+ [compute setBytes:data[SingleShaderStage::Compute].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
- [compute setBytes:data[SingleShaderStage::Compute].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
+ dirtyStages ^= wgpu::ShaderStage::Compute;
+ }
+};
+
+// Keeps track of the dirty bind groups so they can be lazily applied when we know the
+// pipeline state.
+// Bind groups may be inherited because bind groups are packed in the buffer /
+// texture tables in contiguous order.
+class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
+ public:
+ explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
+ : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {}
+
+ template <typename Encoder>
+ void Apply(Encoder encoder) {
+ BeforeApply();
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
+ ToBackend(mPipelineLayout));
+ }
+ AfterApply();
+ }
- dirtyStages ^= wgpu::ShaderStage::Compute;
+ private:
+ // Handles a call to SetBindGroup, directing the commands to the correct encoder.
+ // There is a single function that takes both encoders to factor code. Other approaches
+ // like templates wouldn't work because the name of methods are different between the
+ // two encoder types.
+ void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
+ id<MTLComputeCommandEncoder> compute,
+ BindGroupIndex index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets,
+ PipelineLayout* pipelineLayout) {
+ uint32_t currentDynamicBufferIndex = 0;
+
+ // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
+ // so that we only have to do one setVertexBuffers and one setFragmentBuffers
+ // call here.
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ bool hasVertStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
+ bool hasFragStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
+ bool hasComputeStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
+
+ uint32_t vertIndex = 0;
+ uint32_t fragIndex = 0;
+ uint32_t computeIndex = 0;
+
+ if (hasVertStage) {
+ vertIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Vertex)[index][bindingIndex];
}
- };
-
- // Keeps track of the dirty bind groups so they can be lazily applied when we know the
- // pipeline state.
- // Bind groups may be inherited because bind groups are packed in the buffer /
- // texture tables in contiguous order.
- class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
- public:
- explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
- : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
+ if (hasFragStage) {
+ fragIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Fragment)[index][bindingIndex];
}
-
- template <typename Encoder>
- void Apply(Encoder encoder) {
- BeforeApply();
- for (BindGroupIndex index :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
- mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
- ToBackend(mPipelineLayout));
- }
- AfterApply();
+ if (hasComputeStage) {
+ computeIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Compute)[index][bindingIndex];
}
- private:
- // Handles a call to SetBindGroup, directing the commands to the correct encoder.
- // There is a single function that takes both encoders to factor code. Other approaches
- // like templates wouldn't work because the name of methods are different between the
- // two encoder types.
- void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
- id<MTLComputeCommandEncoder> compute,
- BindGroupIndex index,
- BindGroup* group,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets,
- PipelineLayout* pipelineLayout) {
- uint32_t currentDynamicBufferIndex = 0;
-
- // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
- // so that we only have to do one setVertexBuffers and one setFragmentBuffers
- // call here.
- for (BindingIndex bindingIndex{0};
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
-
- bool hasVertStage =
- bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
- bool hasFragStage =
- bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
- bool hasComputeStage =
- bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
-
- uint32_t vertIndex = 0;
- uint32_t fragIndex = 0;
- uint32_t computeIndex = 0;
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ const BufferBinding& binding = group->GetBindingAsBufferBinding(bindingIndex);
+ const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
+ NSUInteger offset = binding.offset;
+
+ // TODO(crbug.com/dawn/854): Record bound buffer status to use
+ // setBufferOffset to achieve better performance.
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicBufferIndex];
+ currentDynamicBufferIndex++;
+ }
if (hasVertStage) {
- vertIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Vertex)[index][bindingIndex];
+ mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] = binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+ [render setVertexBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(vertIndex, 1)];
}
if (hasFragStage) {
- fragIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Fragment)[index][bindingIndex];
+ mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] = binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
+ [render setFragmentBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(fragIndex, 1)];
}
if (hasComputeStage) {
- computeIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Compute)[index][bindingIndex];
+ mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
+ [compute setBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(computeIndex, 1)];
}
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- const BufferBinding& binding =
- group->GetBindingAsBufferBinding(bindingIndex);
- const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
- NSUInteger offset = binding.offset;
-
- // TODO(crbug.com/dawn/854): Record bound buffer status to use
- // setBufferOffset to achieve better performance.
- if (bindingInfo.buffer.hasDynamicOffset) {
- offset += dynamicOffsets[currentDynamicBufferIndex];
- currentDynamicBufferIndex++;
- }
-
- if (hasVertStage) {
- mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
- [render setVertexBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(vertIndex, 1)];
- }
- if (hasFragStage) {
- mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
- [render setFragmentBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(fragIndex, 1)];
- }
- if (hasComputeStage) {
- mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
- [compute setBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(computeIndex, 1)];
- }
-
- break;
- }
-
- case BindingInfoType::Sampler: {
- auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- if (hasVertStage) {
- [render setVertexSamplerState:sampler->GetMTLSamplerState()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentSamplerState:sampler->GetMTLSamplerState()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setSamplerState:sampler->GetMTLSamplerState()
- atIndex:computeIndex];
- }
- break;
- }
-
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture: {
- auto textureView =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- if (hasVertStage) {
- [render setVertexTexture:textureView->GetMTLTexture()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentTexture:textureView->GetMTLTexture()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setTexture:textureView->GetMTLTexture()
- atIndex:computeIndex];
- }
- break;
- }
+ break;
+ }
- case BindingInfoType::ExternalTexture:
- UNREACHABLE();
+ case BindingInfoType::Sampler: {
+ auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexSamplerState:sampler->GetMTLSamplerState()
+ atIndex:vertIndex];
}
+ if (hasFragStage) {
+ [render setFragmentSamplerState:sampler->GetMTLSamplerState()
+ atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setSamplerState:sampler->GetMTLSamplerState()
+ atIndex:computeIndex];
+ }
+ break;
}
- }
- template <typename... Args>
- void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
- ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
- }
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture: {
+ auto textureView = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexTexture:textureView->GetMTLTexture() atIndex:vertIndex];
+ }
+ if (hasFragStage) {
+ [render setFragmentTexture:textureView->GetMTLTexture() atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setTexture:textureView->GetMTLTexture() atIndex:computeIndex];
+ }
+ break;
+ }
- template <typename... Args>
- void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
- ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
}
+ }
+ }
- StorageBufferLengthTracker* mLengthTracker;
- };
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
+ }
- // Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
- // all the relevant state.
- class VertexBufferTracker {
- public:
- explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
- : mLengthTracker(lengthTracker) {
- }
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
+ }
- void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
- mVertexBuffers[slot] = buffer->GetMTLBuffer();
- mVertexBufferOffsets[slot] = offset;
+ StorageBufferLengthTracker* mLengthTracker;
+};
- ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
- mVertexBufferBindingSizes[slot] =
- static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
- mDirtyVertexBuffers.set(slot);
- }
+// Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
+// all the relevant state.
+class VertexBufferTracker {
+ public:
+ explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
+ : mLengthTracker(lengthTracker) {}
- void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
- // When a new pipeline is bound we must set all the vertex buffers again because
- // they might have been offset by the pipeline layout, and they might be packed
- // differently from the previous pipeline.
- mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
- }
+ void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = buffer->GetMTLBuffer();
+ mVertexBufferOffsets[slot] = offset;
- void Apply(id<MTLRenderCommandEncoder> encoder,
- RenderPipeline* pipeline,
- bool enableVertexPulling) {
- const auto& vertexBuffersToApply =
- mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
+ ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
+ mVertexBufferBindingSizes[slot] =
+ static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
+ mDirtyVertexBuffers.set(slot);
+ }
- for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
- uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
+ void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
+ // When a new pipeline is bound we must set all the vertex buffers again because
+ // they might have been offset by the pipeline layout, and they might be packed
+ // differently from the previous pipeline.
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+ }
- if (enableVertexPulling) {
- // Insert lengths for vertex buffers bound as storage buffers
- mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
- mVertexBufferBindingSizes[slot];
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
- }
+ void Apply(id<MTLRenderCommandEncoder> encoder,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
+ const auto& vertexBuffersToApply =
+ mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
- [encoder setVertexBuffers:&mVertexBuffers[slot]
- offsets:&mVertexBufferOffsets[slot]
- withRange:NSMakeRange(metalIndex, 1)];
- }
+ for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
+ uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
- mDirtyVertexBuffers.reset();
+ if (enableVertexPulling) {
+ // Insert lengths for vertex buffers bound as storage buffers
+ mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
+ mVertexBufferBindingSizes[slot];
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
}
- private:
- // All the indices in these arrays are Dawn vertex buffer indices
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
- ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
- ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
- ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
-
- StorageBufferLengthTracker* mLengthTracker;
- };
-
- } // anonymous namespace
-
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- id<MTLBuffer> mtlBuffer,
- uint64_t bufferSize,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Texture* texture,
- uint32_t mipLevel,
- const Origin3D& origin,
- Aspect aspect,
- const Extent3D& copySize) {
- TextureBufferCopySplit splitCopies =
- ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
- bytesPerRow, rowsPerImage, aspect);
-
- MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
-
- for (const auto& copyInfo : splitCopies) {
- uint64_t bufferOffset = copyInfo.bufferOffset;
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D: {
- [commandContext->EnsureBlit()
- copyFromBuffer:mtlBuffer
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
- toTexture:texture->GetMTLTexture()
- destinationSlice:0
- destinationLevel:mipLevel
- destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
- options:blitOption];
- break;
- }
- case wgpu::TextureDimension::e2D: {
- const MTLOrigin textureOrigin =
- MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent =
- MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
- for (uint32_t z = copyInfo.textureOrigin.z;
- z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
- ++z) {
- [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:copyExtent
- toTexture:texture->GetMTLTexture()
- destinationSlice:z
- destinationLevel:mipLevel
- destinationOrigin:textureOrigin
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
- }
- break;
- }
- case wgpu::TextureDimension::e3D: {
- [commandContext->EnsureBlit()
- copyFromBuffer:mtlBuffer
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
- copyInfo.copyExtent.height,
- copyInfo.copyExtent.depthOrArrayLayers)
- toTexture:texture->GetMTLTexture()
- destinationSlice:0
- destinationLevel:mipLevel
- destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
- copyInfo.textureOrigin.y,
- copyInfo.textureOrigin.z)
- options:blitOption];
- break;
- }
- }
+ [encoder setVertexBuffers:&mVertexBuffers[slot]
+ offsets:&mVertexBufferOffsets[slot]
+ withRange:NSMakeRange(metalIndex, 1)];
}
- }
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
+ mDirtyVertexBuffers.reset();
}
- MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
- CommandRecordingContext* commandContext) {
- for (size_t i = 0; i < scope.textures.size(); ++i) {
- Texture* texture = ToBackend(scope.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- });
+ private:
+ // All the indices in these arrays are Dawn vertex buffer indices
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+ ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
+ ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
+ ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
+
+ StorageBufferLengthTracker* mLengthTracker;
+};
+
+} // anonymous namespace
+
+void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize) {
+ TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+ texture, mipLevel, origin, copySize, bufferSize, offset, bytesPerRow, rowsPerImage, aspect);
+
+ MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
+
+ for (const auto& copyInfo : splitCopies) {
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:0
+ destinationLevel:mipLevel
+ destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
+ options:blitOption];
+ break;
}
- for (BufferBase* bufferBase : scope.buffers) {
- ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin =
+ MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent =
+ MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers; ++z) {
+ [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:copyExtent
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:z
+ destinationLevel:mipLevel
+ destinationOrigin:textureOrigin
+ options:blitOption];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
+ break;
}
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
-
- for (const SyncScopeResourceUsage& scope :
- GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
- LazyClearSyncScope(scope, commandContext);
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent.depthOrArrayLayers)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:0
+ destinationLevel:mipLevel
+ destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ options:blitOption];
+ break;
+ }
+ }
+ }
+}
+
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+
+CommandBuffer::CommandBuffer(CommandEncoder* enc, const CommandBufferDescriptor* desc)
+ : CommandBufferBase(enc, desc) {}
+
+CommandBuffer::~CommandBuffer() = default;
+
+MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
+ CommandRecordingContext* commandContext) {
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
}
- commandContext->EndBlit();
+ });
+ }
+ for (BufferBase* bufferBase : scope.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+ }
+ };
- DAWN_TRY(EncodeComputePass(commandContext));
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
- nextComputePassNumber++;
- break;
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope, commandContext);
}
+ commandContext->EndBlit();
+
+ DAWN_TRY(EncodeComputePass(commandContext));
+
+ nextComputePassNumber++;
+ break;
+ }
+
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
+ commandContext);
+ commandContext->EndBlit();
- LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
- commandContext);
- commandContext->EndBlit();
+ LazyClearRenderPassAttachments(cmd);
+ NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
+ DAWN_TRY(EncodeMetalRenderPass(
+ ToBackend(GetDevice()), commandContext, descriptor.Get(), cmd->width,
+ cmd->height, [this](id<MTLRenderCommandEncoder> encoder) -> MaybeError {
+ return this->EncodeRenderPass(encoder);
+ }));
- LazyClearRenderPassAttachments(cmd);
- NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
- DAWN_TRY(EncodeRenderPass(commandContext, descriptor.Get(), cmd->width,
- cmd->height));
+ nextRenderPassNumber++;
+ break;
+ }
- nextRenderPassNumber++;
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
break;
}
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
+ ToBackend(copy->source)->EnsureDataInitialized(commandContext);
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(commandContext, copy->destinationOffset,
+ copy->size);
- ToBackend(copy->source)->EnsureDataInitialized(commandContext);
- ToBackend(copy->destination)
- ->EnsureDataInitializedAsDestination(commandContext,
- copy->destinationOffset, copy->size);
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
+ sourceOffset:copy->sourceOffset
+ toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
+ destinationOffset:copy->destinationOffset
+ size:copy->size];
+ break;
+ }
- [commandContext->EnsureBlit()
- copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
- sourceOffset:copy->sourceOffset
- toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
- destinationOffset:copy->destinationOffset
- size:copy->size];
- break;
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Buffer* buffer = ToBackend(src.buffer.Get());
+ Texture* texture = ToBackend(dst.texture.Get());
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Buffer* buffer = ToBackend(src.buffer.Get());
- Texture* texture = ToBackend(dst.texture.Get());
-
- buffer->EnsureDataInitialized(commandContext);
- EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
-
- RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
- buffer->GetSize(), src.offset, src.bytesPerRow,
- src.rowsPerImage, texture, dst.mipLevel, dst.origin,
- dst.aspect, copySize);
- break;
+ buffer->EnsureDataInitialized(commandContext);
+ EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
+
+ RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(), buffer->GetSize(),
+ src.offset, src.bytesPerRow, src.rowsPerImage, texture,
+ dst.mipLevel, dst.origin, dst.aspect, copySize);
+ break;
+ }
+
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Texture* texture = ToBackend(src.texture.Get());
+ Buffer* buffer = ToBackend(dst.buffer.Get());
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Texture* texture = ToBackend(src.texture.Get());
- Buffer* buffer = ToBackend(dst.buffer.Get());
+ buffer->EnsureDataInitializedAsDestination(commandContext, copy);
- buffer->EnsureDataInitializedAsDestination(commandContext, copy);
+ texture->EnsureSubresourceContentInitialized(
+ commandContext, GetSubresourcesAffectedByCopy(src, copySize));
- texture->EnsureSubresourceContentInitialized(
- commandContext, GetSubresourcesAffectedByCopy(src, copySize));
+ TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+ texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
+ dst.bytesPerRow, dst.rowsPerImage, src.aspect);
- TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
- texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
- dst.bytesPerRow, dst.rowsPerImage, src.aspect);
+ for (const auto& copyInfo : splitCopies) {
+ MTLBlitOption blitOption =
+ ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
+ uint64_t bufferOffset = copyInfo.bufferOffset;
- for (const auto& copyInfo : splitCopies) {
- MTLBlitOption blitOption =
- ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
- uint64_t bufferOffset = copyInfo.bufferOffset;
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:0
+ sourceLevel:src.mipLevel
+ sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0,
+ 0)
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1,
+ 1)
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ break;
+ }
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D: {
- [commandContext->EnsureBlit()
- copyFromTexture:texture->GetMTLTexture()
- sourceSlice:0
- sourceLevel:src.mipLevel
- sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
- 0, 0)
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
- 1, 1)
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage
- options:blitOption];
- break;
- }
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin = MTLOriginMake(
+ copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent = MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height, 1);
- case wgpu::TextureDimension::e2D: {
- const MTLOrigin textureOrigin = MTLOriginMake(
- copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent = MTLSizeMake(
- copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
- for (uint32_t z = copyInfo.textureOrigin.z;
- z < copyInfo.textureOrigin.z +
- copyInfo.copyExtent.depthOrArrayLayers;
- ++z) {
- [commandContext->EnsureBlit()
- copyFromTexture:texture->GetMTLTexture()
- sourceSlice:z
- sourceLevel:src.mipLevel
- sourceOrigin:textureOrigin
- sourceSize:copyExtent
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
- }
- break;
- }
- case wgpu::TextureDimension::e3D: {
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z <
+ copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
+ ++z) {
[commandContext->EnsureBlit()
copyFromTexture:texture->GetMTLTexture()
- sourceSlice:0
+ sourceSlice:z
sourceLevel:src.mipLevel
- sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
- copyInfo.textureOrigin.y,
- copyInfo.textureOrigin.z)
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
- copyInfo.copyExtent.height,
- copyInfo.copyExtent
- .depthOrArrayLayers)
+ sourceOrigin:textureOrigin
+ sourceSize:copyExtent
toBuffer:buffer->GetMTLBuffer()
destinationOffset:bufferOffset
destinationBytesPerRow:copyInfo.bytesPerRow
destinationBytesPerImage:copyInfo.bytesPerImage
options:blitOption];
- break;
+ bufferOffset += copyInfo.bytesPerImage;
}
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:0
+ sourceLevel:src.mipLevel
+ sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ sourceSize:MTLSizeMake(
+ copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent.depthOrArrayLayers)
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ break;
}
}
- break;
}
+ break;
+ }
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* srcTexture = ToBackend(copy->source.texture.Get());
- Texture* dstTexture = ToBackend(copy->destination.texture.Get());
-
- srcTexture->EnsureSubresourceContentInitialized(
- commandContext,
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
- EnsureDestinationTextureInitialized(commandContext, dstTexture,
- copy->destination, copy->copySize);
-
- const MTLSize sizeOneSlice =
- MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
-
- uint32_t sourceLayer = 0;
- uint32_t sourceOriginZ = 0;
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ Texture* srcTexture = ToBackend(copy->source.texture.Get());
+ Texture* dstTexture = ToBackend(copy->destination.texture.Get());
- uint32_t destinationLayer = 0;
- uint32_t destinationOriginZ = 0;
+ srcTexture->EnsureSubresourceContentInitialized(
+ commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
+ EnsureDestinationTextureInitialized(commandContext, dstTexture, copy->destination,
+ copy->copySize);
- uint32_t* sourceZPtr;
- if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- sourceZPtr = &sourceLayer;
- } else {
- sourceZPtr = &sourceOriginZ;
- }
+ const MTLSize sizeOneSlice =
+ MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
- uint32_t* destinationZPtr;
- if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- destinationZPtr = &destinationLayer;
- } else {
- destinationZPtr = &destinationOriginZ;
- }
+ uint32_t sourceLayer = 0;
+ uint32_t sourceOriginZ = 0;
- // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
- for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
- *sourceZPtr = copy->source.origin.z + z;
- *destinationZPtr = copy->destination.origin.z + z;
+ uint32_t destinationLayer = 0;
+ uint32_t destinationOriginZ = 0;
- // Hold the ref until out of scope
- NSPRef<id<MTLTexture>> dstTextureView =
- dstTexture->CreateFormatView(srcTexture->GetFormat().format);
+ uint32_t* sourceZPtr;
+ if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ sourceZPtr = &sourceLayer;
+ } else {
+ sourceZPtr = &sourceOriginZ;
+ }
- [commandContext->EnsureBlit()
- copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:sourceLayer
- sourceLevel:copy->source.mipLevel
- sourceOrigin:MTLOriginMake(copy->source.origin.x,
- copy->source.origin.y, sourceOriginZ)
- sourceSize:sizeOneSlice
- toTexture:dstTextureView.Get()
- destinationSlice:destinationLayer
- destinationLevel:copy->destination.mipLevel
- destinationOrigin:MTLOriginMake(copy->destination.origin.x,
- copy->destination.origin.y,
- destinationOriginZ)];
- }
- break;
+ uint32_t* destinationZPtr;
+ if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ destinationZPtr = &destinationLayer;
+ } else {
+ destinationZPtr = &destinationOriginZ;
}
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op copies.
- break;
- }
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+ // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
+ for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+ *sourceZPtr = copy->source.origin.z + z;
+ *destinationZPtr = copy->destination.origin.z + z;
- bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, cmd->offset, cmd->size);
+ // Hold the ref until out of scope
+ NSPRef<id<MTLTexture>> dstTextureView =
+ dstTexture->CreateFormatView(srcTexture->GetFormat().format);
- if (!clearedToZero) {
- [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
- range:NSMakeRange(cmd->offset, cmd->size)
- value:0u];
- }
+ [commandContext->EnsureBlit()
+ copyFromTexture:srcTexture->GetMTLTexture()
+ sourceSlice:sourceLayer
+ sourceLevel:copy->source.mipLevel
+ sourceOrigin:MTLOriginMake(copy->source.origin.x,
+ copy->source.origin.y, sourceOriginZ)
+ sourceSize:sizeOneSlice
+ toTexture:dstTextureView.Get()
+ destinationSlice:destinationLayer
+ destinationLevel:copy->destination.mipLevel
+ destinationOrigin:MTLOriginMake(copy->destination.origin.x,
+ copy->destination.origin.y,
+ destinationOriginZ)];
+ }
+ break;
+ }
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op copies.
break;
}
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- Buffer* destination = ToBackend(cmd->destination.Get());
+ bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, cmd->offset, cmd->size);
- destination->EnsureDataInitializedAsDestination(
- commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
-
- if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
- [commandContext->EnsureBlit()
- copyFromBuffer:querySet->GetVisibilityBuffer()
- sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
- toBuffer:destination->GetMTLBuffer()
- destinationOffset:NSUInteger(cmd->destinationOffset)
- size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
- } else {
- if (@available(macos 10.15, iOS 14.0, *)) {
- [commandContext->EnsureBlit()
- resolveCounters:querySet->GetCounterSampleBuffer()
- inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
- destinationBuffer:destination->GetMTLBuffer()
- destinationOffset:NSUInteger(cmd->destinationOffset)];
- } else {
- UNREACHABLE();
- }
- }
- break;
+ if (!clearedToZero) {
+ [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
+ range:NSMakeRange(cmd->offset, cmd->size)
+ value:0u];
}
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ Buffer* destination = ToBackend(cmd->destination.Get());
+
+ destination->EnsureDataInitializedAsDestination(
+ commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
+ if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:querySet->GetVisibilityBuffer()
+ sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
+ toBuffer:destination->GetMTLBuffer()
+ destinationOffset:NSUInteger(cmd->destinationOffset)
+ size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
+ } else {
if (@available(macos 10.15, iOS 14.0, *)) {
[commandContext->EnsureBlit()
- sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
+ resolveCounters:querySet->GetCounterSampleBuffer()
+ inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
+ destinationBuffer:destination->GetMTLBuffer()
+ destinationOffset:NSUInteger(cmd->destinationOffset)];
} else {
UNREACHABLE();
}
- break;
- }
-
- case Command::InsertDebugMarker: {
- // MTLCommandBuffer does not implement insertDebugSignpost
- SkipCommand(&mCommands, type);
- break;
}
+ break;
+ }
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
- if (@available(macos 10.13, *)) {
- [commandContext->GetCommands() popDebugGroup];
- }
- break;
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [commandContext->EnsureBlit()
+ sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
}
+ break;
+ }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
+ case Command::InsertDebugMarker: {
+ // MTLCommandBuffer does not implement insertDebugSignpost
+ SkipCommand(&mCommands, type);
+ break;
+ }
- if (@available(macos 10.13, *)) {
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
- }
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
- break;
+ if (@available(macos 10.13, *)) {
+ [commandContext->GetCommands() popDebugGroup];
}
+ break;
+ }
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
- [commandContext->EnsureBlit()
- copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
- sourceOffset:uploadHandle.startOffset
- toBuffer:dstBuffer->GetMTLBuffer()
- destinationOffset:offset
- size:size];
- break;
+ if (@available(macos 10.13, *)) {
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
}
- default:
- UNREACHABLE();
+ break;
}
- }
-
- commandContext->EndBlit();
- return {};
- }
- MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
- ComputePipeline* lastPipeline = nullptr;
- StorageBufferLengthTracker storageBufferLengths = {};
- BindGroupTracker bindGroups(&storageBufferLengths);
-
- id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- commandContext->EndCompute();
- return {};
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
}
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
- // Skip noop dispatches, it can causes issues on some systems.
- if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
- break;
- }
-
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
-
- [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
- threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
- break;
- }
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
+ sourceOffset:uploadHandle.startOffset
+ toBuffer:dstBuffer->GetMTLBuffer()
+ destinationOffset:offset
+ size:size];
+ break;
+ }
- Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
- indirectBufferOffset:dispatch->indirectOffset
- threadsPerThreadgroup:lastPipeline
- ->GetLocalWorkGroupSize()];
- break;
- }
+ default:
+ UNREACHABLE();
+ }
+ }
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
+ commandContext->EndBlit();
+ return {};
+}
- bindGroups.OnSetPipeline(lastPipeline);
+MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
+ ComputePipeline* lastPipeline = nullptr;
+ StorageBufferLengthTracker storageBufferLengths = {};
+ BindGroupTracker bindGroups(&storageBufferLengths);
- lastPipeline->Encode(encoder);
- break;
- }
+ id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ commandContext->EndCompute();
+ return {};
+ }
- bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder insertDebugSignpost:mtlLabel.Get()];
+ // Skip noop dispatches, it can causes issues on some systems.
+ if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
break;
}
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
- [encoder popDebugGroup];
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder pushDebugGroup:mtlLabel.Get()];
- break;
- }
+ [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
+ threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- if (@available(macos 10.15, iOS 14.0, *)) {
- [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
- } else {
- UNREACHABLE();
- }
- break;
- }
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
- default: {
- UNREACHABLE();
- break;
- }
+ Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder
+ dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
+ indirectBufferOffset:dispatch->indirectOffset
+ threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+ break;
}
- }
- // EndComputePass should have been called
- UNREACHABLE();
- }
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
- MaybeError CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
- ASSERT(mtlRenderPass);
-
- Device* device = ToBackend(GetDevice());
-
- // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
- // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
- // the resolve texture is removed when applying the store + MSAA resolve workaround.
- if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
- std::array<id<MTLTexture>, kMaxColorAttachments> trueResolveTextures = {};
- std::array<uint32_t, kMaxColorAttachments> trueResolveLevels = {};
- std::array<uint32_t, kMaxColorAttachments> trueResolveSlices = {};
-
- // Use temporary resolve texture on the resolve targets with non-zero resolveLevel or
- // resolveSlice.
- bool useTemporaryResolveTexture = false;
- std::array<NSPRef<id<MTLTexture>>, kMaxColorAttachments> temporaryResolveTextures = {};
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
- continue;
- }
+ bindGroups.OnSetPipeline(lastPipeline);
- if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
- mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
- continue;
- }
+ lastPipeline->Encode(encoder);
+ break;
+ }
- trueResolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
- trueResolveLevels[i] = mtlRenderPass.colorAttachments[i].resolveLevel;
- trueResolveSlices[i] = mtlRenderPass.colorAttachments[i].resolveSlice;
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
- const MTLPixelFormat mtlFormat = trueResolveTextures[i].pixelFormat;
- DAWN_TRY_ASSIGN(temporaryResolveTextures[i], CreateResolveTextureForWorkaround(
- device, mtlFormat, width, height));
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
- mtlRenderPass.colorAttachments[i].resolveTexture =
- temporaryResolveTextures[i].Get();
- mtlRenderPass.colorAttachments[i].resolveLevel = 0;
- mtlRenderPass.colorAttachments[i].resolveSlice = 0;
- useTemporaryResolveTexture = true;
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder insertDebugSignpost:mtlLabel.Get()];
+ break;
}
- // If we need to use a temporary resolve texture we need to copy the result of MSAA
- // resolve back to the true resolve targets.
- if (useTemporaryResolveTexture) {
- DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (trueResolveTextures[i] == nullptr) {
- continue;
- }
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
- ASSERT(temporaryResolveTextures[i] != nullptr);
- CopyIntoTrueResolveTarget(commandContext, trueResolveTextures[i],
- trueResolveLevels[i], trueResolveSlices[i],
- temporaryResolveTextures[i].Get(), width, height);
- }
- return {};
+ [encoder popDebugGroup];
+ break;
}
- }
- // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
- if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
- bool hasStoreAndMSAAResolve = false;
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder pushDebugGroup:mtlLabel.Get()];
+ break;
+ }
- // Remove any store + MSAA resolve and remember them.
- std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (mtlRenderPass.colorAttachments[i].storeAction ==
- kMTLStoreActionStoreAndMultisampleResolve) {
- hasStoreAndMSAAResolve = true;
- resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
- mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
- mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
}
+ break;
}
- // If we found a store + MSAA resolve we need to resolve in a different render pass.
- if (hasStoreAndMSAAResolve) {
- DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
- ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
- return {};
+ default: {
+ UNREACHABLE();
+ break;
}
}
-
- DAWN_TRY(EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height));
- return {};
}
- MaybeError CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
- bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
- RenderPipeline* lastPipeline = nullptr;
- id<MTLBuffer> indexBuffer = nullptr;
- uint32_t indexBufferBaseOffset = 0;
- MTLIndexType indexBufferType;
- uint64_t indexFormatSize = 0;
-
- StorageBufferLengthTracker storageBufferLengths = {};
- VertexBufferTracker vertexBuffers(&storageBufferLengths);
- BindGroupTracker bindGroups(&storageBufferLengths);
-
- id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
-
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- // The instance count must be non-zero, otherwise no-op
- if (draw->instanceCount != 0) {
- // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
- if (draw->firstInstance == 0) {
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- vertexStart:draw->firstVertex
- vertexCount:draw->vertexCount
- instanceCount:draw->instanceCount];
- } else {
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- vertexStart:draw->firstVertex
- vertexCount:draw->vertexCount
- instanceCount:draw->instanceCount
- baseInstance:draw->firstInstance];
- }
+ // EndComputePass should have been called
+ UNREACHABLE();
+}
+
+MaybeError CommandBuffer::EncodeRenderPass(id<MTLRenderCommandEncoder> encoder) {
+ bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
+ RenderPipeline* lastPipeline = nullptr;
+ id<MTLBuffer> indexBuffer = nullptr;
+ uint32_t indexBufferBaseOffset = 0;
+ MTLIndexType indexBufferType;
+ uint64_t indexFormatSize = 0;
+
+ StorageBufferLengthTracker storageBufferLengths = {};
+ VertexBufferTracker vertexBuffers(&storageBufferLengths);
+ BindGroupTracker bindGroups(&storageBufferLengths);
+
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ // The instance count must be non-zero, otherwise no-op
+ if (draw->instanceCount != 0) {
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
+ if (draw->firstInstance == 0) {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount
+ baseInstance:draw->firstInstance];
}
- break;
}
+ break;
+ }
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- // The index and instance count must be non-zero, otherwise no-op
- if (draw->indexCount != 0 && draw->instanceCount != 0) {
- // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
- // baseVertex.
- if (draw->baseVertex == 0 && draw->firstInstance == 0) {
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexCount:draw->indexCount
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset +
- draw->firstIndex * indexFormatSize
- instanceCount:draw->instanceCount];
- } else {
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexCount:draw->indexCount
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset +
- draw->firstIndex * indexFormatSize
- instanceCount:draw->instanceCount
- baseVertex:draw->baseVertex
- baseInstance:draw->firstInstance];
- }
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ // The index and instance count must be non-zero, otherwise no-op
+ if (draw->indexCount != 0 && draw->instanceCount != 0) {
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
+ // baseVertex.
+ if (draw->baseVertex == 0 && draw->firstInstance == 0) {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * indexFormatSize
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * indexFormatSize
+ instanceCount:draw->instanceCount
+ baseVertex:draw->baseVertex
+ baseInstance:draw->firstInstance];
}
- break;
}
+ break;
+ }
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indirectBuffer:indirectBuffer
- indirectBufferOffset:draw->indirectOffset];
- break;
- }
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indirectBuffer:indirectBuffer
+ indirectBufferOffset:draw->indirectOffset];
+ break;
+ }
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset
- indirectBuffer:indirectBuffer
- indirectBufferOffset:draw->indirectOffset];
- break;
- }
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset
+ indirectBuffer:indirectBuffer
+ indirectBufferOffset:draw->indirectOffset];
+ break;
+ }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- char* label = iter->NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder insertDebugSignpost:mtlLabel.Get()];
- break;
- }
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ char* label = iter->NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder insertDebugSignpost:mtlLabel.Get()];
+ break;
+ }
- case Command::PopDebugGroup: {
- iter->NextCommand<PopDebugGroupCmd>();
+ case Command::PopDebugGroup: {
+ iter->NextCommand<PopDebugGroupCmd>();
- [encoder popDebugGroup];
- break;
- }
+ [encoder popDebugGroup];
+ break;
+ }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- char* label = iter->NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder pushDebugGroup:mtlLabel.Get()];
- break;
- }
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ char* label = iter->NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder pushDebugGroup:mtlLabel.Get()];
+ break;
+ }
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
-
- vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
- bindGroups.OnSetPipeline(newPipeline);
-
- [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
- [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
- [encoder setCullMode:newPipeline->GetMTLCullMode()];
- [encoder setDepthBias:newPipeline->GetDepthBias()
- slopeScale:newPipeline->GetDepthBiasSlopeScale()
- clamp:newPipeline->GetDepthBiasClamp()];
- if (@available(macOS 10.11, iOS 11.0, *)) {
- MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
- ? MTLDepthClipModeClamp
- : MTLDepthClipModeClip;
- [encoder setDepthClipMode:clipMode];
- }
- newPipeline->Encode(encoder);
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
+
+ vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
+ bindGroups.OnSetPipeline(newPipeline);
+
+ [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
+ [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
+ [encoder setCullMode:newPipeline->GetMTLCullMode()];
+ [encoder setDepthBias:newPipeline->GetDepthBias()
+ slopeScale:newPipeline->GetDepthBiasSlopeScale()
+ clamp:newPipeline->GetDepthBiasClamp()];
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
+ ? MTLDepthClipModeClamp
+ : MTLDepthClipModeClip;
+ [encoder setDepthClipMode:clipMode];
+ }
+ newPipeline->Encode(encoder);
+
+ lastPipeline = newPipeline;
+ break;
+ }
- lastPipeline = newPipeline;
- break;
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
- bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ auto b = ToBackend(cmd->buffer.Get());
+ indexBuffer = b->GetMTLBuffer();
+ indexBufferBaseOffset = cmd->offset;
+ indexBufferType = MTLIndexFormat(cmd->format);
+ indexFormatSize = IndexFormatSize(cmd->format);
+ break;
+ }
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- auto b = ToBackend(cmd->buffer.Get());
- indexBuffer = b->GetMTLBuffer();
- indexBufferBaseOffset = cmd->offset;
- indexBufferType = MTLIndexFormat(cmd->format);
- indexFormatSize = IndexFormatSize(cmd->format);
- break;
- }
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset);
+ break;
+ }
- vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
- cmd->offset);
- break;
- }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
- default:
- UNREACHABLE();
- break;
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ return {};
}
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- commandContext->EndRender();
- return {};
- }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- [encoder setStencilReferenceValue:cmd->reference];
- break;
- }
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ [encoder setStencilReferenceValue:cmd->reference];
+ break;
+ }
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- MTLViewport viewport;
- viewport.originX = cmd->x;
- viewport.originY = cmd->y;
- viewport.width = cmd->width;
- viewport.height = cmd->height;
- viewport.znear = cmd->minDepth;
- viewport.zfar = cmd->maxDepth;
-
- [encoder setViewport:viewport];
- break;
- }
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ MTLViewport viewport;
+ viewport.originX = cmd->x;
+ viewport.originY = cmd->y;
+ viewport.width = cmd->width;
+ viewport.height = cmd->height;
+ viewport.znear = cmd->minDepth;
+ viewport.zfar = cmd->maxDepth;
+
+ [encoder setViewport:viewport];
+ break;
+ }
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- MTLScissorRect rect;
- rect.x = cmd->x;
- rect.y = cmd->y;
- rect.width = cmd->width;
- rect.height = cmd->height;
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ MTLScissorRect rect;
+ rect.x = cmd->x;
+ rect.y = cmd->y;
+ rect.width = cmd->width;
+ rect.height = cmd->height;
- [encoder setScissorRect:rect];
- break;
- }
+ [encoder setScissorRect:rect];
+ break;
+ }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- [encoder setBlendColorRed:cmd->color.r
- green:cmd->color.g
- blue:cmd->color.b
- alpha:cmd->color.a];
- break;
- }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ [encoder setBlendColorRed:cmd->color.r
+ green:cmd->color.g
+ blue:cmd->color.b
+ alpha:cmd->color.a];
+ break;
+ }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- EncodeRenderBundleCommand(iter, type);
- }
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ EncodeRenderBundleCommand(iter, type);
}
- break;
}
+ break;
+ }
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
- [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
- offset:cmd->queryIndex * sizeof(uint64_t)];
- break;
- }
+ [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
+ offset:cmd->queryIndex * sizeof(uint64_t)];
+ break;
+ }
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
- [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
- offset:cmd->queryIndex * sizeof(uint64_t)];
- break;
- }
+ [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
+ offset:cmd->queryIndex * sizeof(uint64_t)];
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
- if (@available(macos 10.15, iOS 14.0, *)) {
- [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
- } else {
- UNREACHABLE();
- }
- break;
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
}
+ break;
+ }
- default: {
- EncodeRenderBundleCommand(&mCommands, type);
- break;
- }
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
}
}
-
- // EndRenderPass should have been called
- UNREACHABLE();
}
+ // EndRenderPass should have been called
+ UNREACHABLE();
+}
+
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h
index 925d8faa0c1..b5ec3defaf2 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h
@@ -22,37 +22,37 @@
namespace dawn::native::metal {
- // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
- // Only one encoder may be open at a time.
- class CommandRecordingContext : NonMovable {
- public:
- CommandRecordingContext();
- ~CommandRecordingContext();
-
- id<MTLCommandBuffer> GetCommands();
- void MarkUsed();
- bool WasUsed() const;
-
- MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
- NSPRef<id<MTLCommandBuffer>> AcquireCommands();
-
- id<MTLBlitCommandEncoder> EnsureBlit();
- void EndBlit();
-
- id<MTLComputeCommandEncoder> BeginCompute();
- void EndCompute();
-
- id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
- void EndRender();
-
- private:
- NSPRef<id<MTLCommandBuffer>> mCommands;
- NSPRef<id<MTLBlitCommandEncoder>> mBlit;
- NSPRef<id<MTLComputeCommandEncoder>> mCompute;
- NSPRef<id<MTLRenderCommandEncoder>> mRender;
- bool mInEncoder = false;
- bool mUsed = false;
- };
+// This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
+// Only one encoder may be open at a time.
+class CommandRecordingContext : NonMovable {
+ public:
+ CommandRecordingContext();
+ ~CommandRecordingContext();
+
+ id<MTLCommandBuffer> GetCommands();
+ void MarkUsed();
+ bool WasUsed() const;
+
+ MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
+ NSPRef<id<MTLCommandBuffer>> AcquireCommands();
+
+ id<MTLBlitCommandEncoder> EnsureBlit();
+ void EndBlit();
+
+ id<MTLComputeCommandEncoder> BeginCompute();
+ void EndCompute();
+
+ id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
+ void EndRender();
+
+ private:
+ NSPRef<id<MTLCommandBuffer>> mCommands;
+ NSPRef<id<MTLBlitCommandEncoder>> mBlit;
+ NSPRef<id<MTLComputeCommandEncoder>> mCompute;
+ NSPRef<id<MTLRenderCommandEncoder>> mRender;
+ bool mInEncoder = false;
+ bool mUsed = false;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm
index cced9a76dea..d4bbef068d9 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm
@@ -18,115 +18,115 @@
namespace dawn::native::metal {
- CommandRecordingContext::CommandRecordingContext() = default;
-
- CommandRecordingContext::~CommandRecordingContext() {
- // Commands must be acquired.
- ASSERT(mCommands == nullptr);
+CommandRecordingContext::CommandRecordingContext() = default;
+
+CommandRecordingContext::~CommandRecordingContext() {
+ // Commands must be acquired.
+ ASSERT(mCommands == nullptr);
+}
+
+id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
+ return mCommands.Get();
+}
+
+void CommandRecordingContext::MarkUsed() {
+ mUsed = true;
+}
+bool CommandRecordingContext::WasUsed() const {
+ return mUsed;
+}
+
+MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
+ ASSERT(mCommands == nil);
+ ASSERT(!mUsed);
+
+ // The MTLCommandBuffer will be autoreleased by default.
+ // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
+ // alive.
+ mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
+ if (mCommands == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
}
- id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
- return mCommands.Get();
- }
+ return {};
+}
- void CommandRecordingContext::MarkUsed() {
- mUsed = true;
- }
- bool CommandRecordingContext::WasUsed() const {
- return mUsed;
+NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
+ // A blit encoder can be left open from WriteBuffer, make sure we close it.
+ if (mCommands != nullptr) {
+ EndBlit();
}
- MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
- ASSERT(mCommands == nil);
- ASSERT(!mUsed);
+ ASSERT(!mInEncoder);
+ mUsed = false;
+ return std::move(mCommands);
+}
- // The MTLCommandBuffer will be autoreleased by default.
- // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
- // alive.
- mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
- if (mCommands == nil) {
- return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
- }
-
- return {};
- }
-
- NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
- // A blit encoder can be left open from WriteBuffer, make sure we close it.
- if (mCommands != nullptr) {
- EndBlit();
- }
+id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
+ ASSERT(mCommands != nullptr);
+ if (mBlit == nullptr) {
ASSERT(!mInEncoder);
- mUsed = false;
- return std::move(mCommands);
- }
-
- id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
- ASSERT(mCommands != nullptr);
-
- if (mBlit == nullptr) {
- ASSERT(!mInEncoder);
- mInEncoder = true;
-
- // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
- // draining from under us.
- mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
- }
- return mBlit.Get();
- }
-
- void CommandRecordingContext::EndBlit() {
- ASSERT(mCommands != nullptr);
-
- if (mBlit != nullptr) {
- [*mBlit endEncoding];
- mBlit = nullptr;
- mInEncoder = false;
- }
- }
-
- id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
- ASSERT(mCommands != nullptr);
- ASSERT(mCompute == nullptr);
- ASSERT(!mInEncoder);
-
mInEncoder = true;
- // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
- // draining from under us.
- mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
- return mCompute.Get();
- }
-
- void CommandRecordingContext::EndCompute() {
- ASSERT(mCommands != nullptr);
- ASSERT(mCompute != nullptr);
- [*mCompute endEncoding];
- mCompute = nullptr;
- mInEncoder = false;
- }
-
- id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
- MTLRenderPassDescriptor* descriptor) {
- ASSERT(mCommands != nullptr);
- ASSERT(mRender == nullptr);
- ASSERT(!mInEncoder);
-
- mInEncoder = true;
// The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
// draining from under us.
- mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
- return mRender.Get();
+ mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
}
+ return mBlit.Get();
+}
- void CommandRecordingContext::EndRender() {
- ASSERT(mCommands != nullptr);
- ASSERT(mRender != nullptr);
+void CommandRecordingContext::EndBlit() {
+ ASSERT(mCommands != nullptr);
- [*mRender endEncoding];
- mRender = nullptr;
+ if (mBlit != nullptr) {
+ [*mBlit endEncoding];
+ mBlit = nullptr;
mInEncoder = false;
}
+}
+
+id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mCompute == nullptr);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+ // draining from under us.
+ mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
+ return mCompute.Get();
+}
+
+void CommandRecordingContext::EndCompute() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mCompute != nullptr);
+
+ [*mCompute endEncoding];
+ mCompute = nullptr;
+ mInEncoder = false;
+}
+
+id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
+ MTLRenderPassDescriptor* descriptor) {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mRender == nullptr);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+ // draining from under us.
+ mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
+ return mRender.Get();
+}
+
+void CommandRecordingContext::EndRender() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mRender != nullptr);
+
+ [*mRender endEncoding];
+ mRender = nullptr;
+ mInEncoder = false;
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h
index 3091d744a4a..48a723dabf9 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h
@@ -15,6 +15,8 @@
#ifndef SRC_DAWN_NATIVE_METAL_COMPUTEPIPELINEMTL_H_
#define SRC_DAWN_NATIVE_METAL_COMPUTEPIPELINEMTL_H_
+#include <vector>
+
#include "dawn/native/ComputePipeline.h"
#include "dawn/common/NSRef.h"
@@ -23,30 +25,32 @@
namespace dawn::native::metal {
- class Device;
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Encode(id<MTLComputeCommandEncoder> encoder);
- MTLSize GetLocalWorkGroupSize() const;
- bool RequiresStorageBufferLength() const;
-
- private:
- using ComputePipelineBase::ComputePipelineBase;
- MaybeError Initialize() override;
-
- NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
- MTLSize mLocalWorkgroupSize;
- bool mRequiresStorageBufferLength;
- std::vector<uint32_t> mWorkgroupAllocations;
- };
+class Device;
+
+class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ ComputePipeline(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
+ ~ComputePipeline() override;
+
+ void Encode(id<MTLComputeCommandEncoder> encoder);
+ MTLSize GetLocalWorkGroupSize() const;
+ bool RequiresStorageBufferLength() const;
+
+ private:
+ using ComputePipelineBase::ComputePipelineBase;
+ MaybeError Initialize() override;
+
+ NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
+ MTLSize mLocalWorkgroupSize;
+ bool mRequiresStorageBufferLength;
+ std::vector<uint32_t> mWorkgroupAllocations;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm
index 71d5a01cced..855cd7b4bfa 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm
@@ -22,68 +22,72 @@
namespace dawn::native::metal {
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+}
- MaybeError ComputePipeline::Initialize() {
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+ComputePipeline::ComputePipeline(DeviceBase* dev, const ComputePipelineDescriptor* desc)
+ : ComputePipelineBase(dev, desc) {}
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- ShaderModule::MetalFunctionData computeData;
+ComputePipeline::~ComputePipeline() = default;
- DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
- &computeData));
+MaybeError ComputePipeline::Initialize() {
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- NSError* error = nullptr;
- mMtlComputePipelineState.Acquire([mtlDevice
- newComputePipelineStateWithFunction:computeData.function.Get()
- error:&error]);
- if (error != nullptr) {
- return DAWN_INTERNAL_ERROR("Error creating pipeline state " +
- std::string([error.localizedDescription UTF8String]));
- }
- ASSERT(mMtlComputePipelineState != nil);
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule::MetalFunctionData computeData;
- // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
- Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
- mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
+ DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
+ &computeData));
- mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
- mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
- return {};
+ NSError* error = nullptr;
+ mMtlComputePipelineState.Acquire(
+ [mtlDevice newComputePipelineStateWithFunction:computeData.function.Get() error:&error]);
+ if (error != nullptr) {
+ return DAWN_INTERNAL_ERROR("Error creating pipeline state " +
+ std::string([error.localizedDescription UTF8String]));
}
+ ASSERT(mMtlComputePipelineState != nil);
+
+ // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
+ Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
+ mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
+
+ mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+ mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
+ return {};
+}
- void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
- [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
- for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
- if (mWorkgroupAllocations[i] == 0) {
- continue;
- }
- // Size must be a multiple of 16 bytes.
- uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
- [encoder setThreadgroupMemoryLength:rounded atIndex:i];
+void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
+ [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
+ for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
+ if (mWorkgroupAllocations[i] == 0) {
+ continue;
}
+ // Size must be a multiple of 16 bytes.
+ uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
+ [encoder setThreadgroupMemoryLength:rounded atIndex:i];
}
+}
- MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
- return mLocalWorkgroupSize;
- }
+MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
+ return mLocalWorkgroupSize;
+}
- bool ComputePipeline::RequiresStorageBufferLength() const {
- return mRequiresStorageBufferLength;
- }
+bool ComputePipeline::RequiresStorageBufferLength() const {
+ return mRequiresStorageBufferLength;
+}
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h
index 0e62bd451c6..074140b89cc 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h
@@ -15,6 +15,10 @@
#ifndef SRC_DAWN_NATIVE_METAL_DEVICEMTL_H_
#define SRC_DAWN_NATIVE_METAL_DEVICEMTL_H_
+#include <atomic>
+#include <memory>
+#include <mutex>
+
#include "dawn/native/dawn_platform.h"
#include "dawn/native/Commands.h"
@@ -26,127 +30,119 @@
#import <Metal/Metal.h>
#import <QuartzCore/QuartzCore.h>
-#include <atomic>
-#include <memory>
-#include <mutex>
-
namespace dawn::native::metal {
- namespace {
- struct KalmanInfo;
- }
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize(const DeviceDescriptor* descriptor);
-
- MaybeError TickImpl() override;
-
- id<MTLDevice> GetMTLDevice();
- id<MTLCommandQueue> GetMTLQueue();
-
- CommandRecordingContext* GetPendingCommandContext();
- MaybeError SubmitPendingCommandBuffer();
-
- Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface);
- void WaitForCommandsToBeScheduled();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& dataLayout,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- Device(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DeviceDescriptor* descriptor);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- void InitTogglesFromDriver();
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- NSPRef<id<MTLDevice>> mMtlDevice;
- NSPRef<id<MTLCommandQueue>> mCommandQueue;
-
- CommandRecordingContext mCommandContext;
-
- // The completed serial is updated in a Metal completion handler that can be fired on a
- // different thread, so it needs to be atomic.
- std::atomic<uint64_t> mCompletedSerial;
-
- // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
- // a different thread so we guard access to it with a mutex.
- std::mutex mLastSubmittedCommandsMutex;
- NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
-
- // The current estimation of timestamp period
- float mTimestampPeriod = 1.0f;
- // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
- // and CPU timestamps.
- MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
- MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
- // The parameters for kalman filter
- std::unique_ptr<KalmanInfo> mKalmanInfo;
- };
+struct KalmanInfo;
+
+class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize(const DeviceDescriptor* descriptor);
+
+ MaybeError TickImpl() override;
+
+ id<MTLDevice> GetMTLDevice();
+ id<MTLCommandQueue> GetMTLQueue();
+
+ CommandRecordingContext* GetPendingCommandContext();
+ MaybeError SubmitPendingCommandBuffer();
+
+ Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface);
+ void WaitForCommandsToBeScheduled();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ Device(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ void InitTogglesFromDriver();
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ NSPRef<id<MTLDevice>> mMtlDevice;
+ NSPRef<id<MTLCommandQueue>> mCommandQueue;
+
+ CommandRecordingContext mCommandContext;
+
+ // The completed serial is updated in a Metal completion handler that can be fired on a
+ // different thread, so it needs to be atomic.
+ std::atomic<uint64_t> mCompletedSerial;
+
+ // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
+ // a different thread so we guard access to it with a mutex.
+ std::mutex mLastSubmittedCommandsMutex;
+ NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
+
+ // The current estimation of timestamp period
+ float mTimestampPeriod = 1.0f;
+ // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
+ // and CPU timestamps.
+ MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+ MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+ // The parameters for kalman filter
+ std::unique_ptr<KalmanInfo> mKalmanInfo;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm
index 91916123aca..0a7d4963b1c 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm
@@ -42,463 +42,461 @@
namespace dawn::native::metal {
- namespace {
-
- // The time interval for each round of kalman filter
- static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
-
- struct KalmanInfo {
- float filterValue; // The estimation value
- float kalmanGain; // The kalman gain
- float R; // The covariance of the observation noise
- float P; // The a posteriori estimate covariance
- };
-
- // A simplified kalman filter for estimating timestamp period based on measured values
- float KalmanFilter(KalmanInfo* info, float measuredValue) {
- // Optimize kalman gain
- info->kalmanGain = info->P / (info->P + info->R);
-
- // Correct filter value
- info->filterValue =
- info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
- // Update estimate covariance
- info->P = (1.0f - info->kalmanGain) * info->P;
- return info->filterValue;
+struct KalmanInfo {
+ float filterValue; // The estimation value
+ float kalmanGain; // The kalman gain
+ float R; // The covariance of the observation noise
+ float P; // The a posteriori estimate covariance
+};
+
+namespace {
+
+// The time interval for each round of kalman filter
+static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
+
+// A simplified kalman filter for estimating timestamp period based on measured values
+float KalmanFilter(KalmanInfo* info, float measuredValue) {
+ // Optimize kalman gain
+ info->kalmanGain = info->P / (info->P + info->R);
+
+ // Correct filter value
+ info->filterValue =
+ info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
+ // Update estimate covariance
+ info->P = (1.0f - info->kalmanGain) * info->P;
+ return info->filterValue;
+}
+
+void API_AVAILABLE(macos(10.15), ios(14)) UpdateTimestampPeriod(id<MTLDevice> device,
+ KalmanInfo* info,
+ MTLTimestamp* cpuTimestampStart,
+ MTLTimestamp* gpuTimestampStart,
+ float* timestampPeriod) {
+ // The filter value is converged to an optimal value when the kalman gain is less than
+ // 0.01. At this time, the weight of the measured value is too small to change the next
+ // filter value, the sampling and calculations do not need to continue anymore.
+ if (info->kalmanGain < 0.01f) {
+ return;
+ }
+
+ MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
+ [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
+
+ // Update the timestamp start values when timestamp reset happens
+ if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
+ *cpuTimestampStart = cpuTimestampEnd;
+ *gpuTimestampStart = gpuTimestampEnd;
+ return;
+ }
+
+ if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
+ // The measured timestamp period
+ float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
+ static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
+
+ // Measurement update
+ *timestampPeriod = KalmanFilter(info, measurement);
+
+ *cpuTimestampStart = cpuTimestampEnd;
+ *gpuTimestampStart = gpuTimestampEnd;
+ }
+}
+
+} // namespace
+
+// static
+ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
+ DAWN_TRY(device->Initialize(descriptor));
+ return device;
+}
+
+Device::Device(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor)
+ : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {}
+
+Device::~Device() {
+ Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+ InitTogglesFromDriver();
+
+ mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
+ if (mCommandQueue == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
+ }
+
+ DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
+
+ if (IsFeatureEnabled(Feature::TimestampQuery) &&
+ !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+ // Make a best guess of timestamp period based on device vendor info, and converge it to
+ // an accurate value by the following calculations.
+ mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
+
+ // Initialize kalman filter parameters
+ mKalmanInfo = std::make_unique<KalmanInfo>();
+ mKalmanInfo->filterValue = 0.0f;
+ mKalmanInfo->kalmanGain = 0.5f;
+ mKalmanInfo->R = 0.0001f; // The smaller this value is, the smaller the error of measured
+ // value is, the more we can trust the measured value.
+ mKalmanInfo->P = 1.0f;
+
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ // Sample CPU timestamp and GPU timestamp for first time at device creation
+ [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
}
-
- void API_AVAILABLE(macos(10.15), ios(14))
- UpdateTimestampPeriod(id<MTLDevice> device,
- KalmanInfo* info,
- MTLTimestamp* cpuTimestampStart,
- MTLTimestamp* gpuTimestampStart,
- float* timestampPeriod) {
- // The filter value is converged to an optimal value when the kalman gain is less than
- // 0.01. At this time, the weight of the measured value is too small to change the next
- // filter value, the sampling and calculations do not need to continue anymore.
- if (info->kalmanGain < 0.01f) {
- return;
- }
-
- MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
- [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
-
- // Update the timestamp start values when timestamp reset happens
- if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
- *cpuTimestampStart = cpuTimestampEnd;
- *gpuTimestampStart = gpuTimestampEnd;
- return;
- }
-
- if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
- // The measured timestamp period
- float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
- static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
-
- // Measurement update
- *timestampPeriod = KalmanFilter(info, measurement);
-
- *cpuTimestampStart = cpuTimestampEnd;
- *gpuTimestampStart = gpuTimestampEnd;
- }
- }
-
- } // namespace
-
- // static
- ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
- DAWN_TRY(device->Initialize(descriptor));
- return device;
- }
-
- Device::Device(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DeviceDescriptor* descriptor)
- : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {
- }
-
- Device::~Device() {
- Destroy();
}
- MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
- InitTogglesFromDriver();
-
- mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
- if (mCommandQueue == nil) {
- return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
- }
+ return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
- DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
-
- if (IsFeatureEnabled(Feature::TimestampQuery) &&
- !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
- // Make a best guess of timestamp period based on device vendor info, and converge it to
- // an accurate value by the following calculations.
- mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
-
- // Initialize kalman filter parameters
- mKalmanInfo = std::make_unique<KalmanInfo>();
- mKalmanInfo->filterValue = 0.0f;
- mKalmanInfo->kalmanGain = 0.5f;
- mKalmanInfo->R =
- 0.0001f; // The smaller this value is, the smaller the error of measured value is,
- // the more we can trust the measured value.
- mKalmanInfo->P = 1.0f;
-
- if (@available(macos 10.15, iOS 14.0, *)) {
- // Sample CPU timestamp and GPU timestamp for first time at device creation
- [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
- }
- }
-
- return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
- }
-
- void Device::InitTogglesFromDriver() {
- {
- bool haveStoreAndMSAAResolve = false;
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macOS 10.12, *)) {
- haveStoreAndMSAAResolve =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
- }
-#elif defined(DAWN_PLATFORM_IOS)
+void Device::InitTogglesFromDriver() {
+ {
+ bool haveStoreAndMSAAResolve = false;
+#if DAWN_PLATFORM_IS(MACOS)
+ if (@available(macOS 10.12, *)) {
haveStoreAndMSAAResolve =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
+ [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+ }
+#elif DAWN_PLATFORM_IS(IOS)
+ haveStoreAndMSAAResolve = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
#endif
- // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
- SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+ // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
+ SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
- bool haveSamplerCompare = true;
-#if defined(DAWN_PLATFORM_IOS)
- haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+ bool haveSamplerCompare = true;
+#if DAWN_PLATFORM_IS(IOS)
+ haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
#endif
- // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
- SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
+ // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
+ SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
- bool haveBaseVertexBaseInstance = true;
-#if defined(DAWN_PLATFORM_IOS)
- haveBaseVertexBaseInstance =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+ bool haveBaseVertexBaseInstance = true;
+#if DAWN_PLATFORM_IS(IOS)
+ haveBaseVertexBaseInstance =
+ [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
#endif
- // TODO(crbug.com/dawn/343): Investigate emulation.
- SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
- SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
- }
-
- // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
- // that code path if it isn't explicitly disabled.
- if (IsRobustnessEnabled()) {
- SetToggle(Toggle::MetalEnableVertexPulling, true);
- }
-
- // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
- SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
-
- uint32_t deviceId = GetAdapter()->GetDeviceId();
- uint32_t vendorId = GetAdapter()->GetVendorId();
-
- // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
- // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
- // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
- if (@available(macOS 10.15, iOS 14.0, *)) {
- bool useSharedMode = gpu_info::IsIntel(vendorId);
- SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
- }
-
- // TODO(crbug.com/dawn/1071): r8unorm and rg8unorm textures with multiple mip levels don't
- // clear properly on Intel Macs.
- if (gpu_info::IsIntel(vendorId)) {
- SetToggle(Toggle::DisableR8RG8Mipmaps, true);
- }
-
- // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
- // shader provided. Create a dummy fragment shader module to work around this issue.
- if (gpu_info::IsIntel(vendorId)) {
- bool useDummyFragmentShader = true;
- if (gpu_info::IsSkylake(deviceId)) {
- useDummyFragmentShader = false;
- }
- SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
- }
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
+ SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
}
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
+ // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
+ // that code path if it isn't explicitly disabled.
+ if (IsRobustnessEnabled()) {
+ SetToggle(Toggle::MetalEnableVertexPulling, true);
}
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
- if (frontendCompletedSerial > mCompletedSerial) {
- // sometimes we increase the serials, in which case the completed serial in
- // the device base will surpass the completed serial we have in the metal backend, so we
- // must update ours when we see that the completed serial from device base has
- // increased.
- mCompletedSerial = frontendCompletedSerial;
- }
- return ExecutionSerial(mCompletedSerial.load());
- }
-
- MaybeError Device::TickImpl() {
- DAWN_TRY(SubmitPendingCommandBuffer());
- // Just run timestamp period calculation when timestamp feature is enabled.
- if (IsFeatureEnabled(Feature::TimestampQuery)) {
- if (@available(macos 10.15, iOS 14.0, *)) {
- UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
- &mGpuTimestamp, &mTimestampPeriod);
- }
- }
+ // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
+ SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
- return {};
- }
+ uint32_t deviceId = GetAdapter()->GetDeviceId();
+ uint32_t vendorId = GetAdapter()->GetVendorId();
- id<MTLDevice> Device::GetMTLDevice() {
- return mMtlDevice.Get();
+ // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
+ // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
+ // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ bool useSharedMode = gpu_info::IsIntel(vendorId);
+ SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
}
- id<MTLCommandQueue> Device::GetMTLQueue() {
- return mCommandQueue.Get();
+ // Rendering R8Unorm and RG8Unorm to small mip doesn't work properly on Intel.
+ // TODO(crbug.com/dawn/1071): Tighten the workaround when this issue is fixed.
+ if (gpu_info::IsIntel(vendorId)) {
+ SetToggle(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture, true);
}
- CommandRecordingContext* Device::GetPendingCommandContext() {
- mCommandContext.MarkUsed();
- return &mCommandContext;
- }
-
- MaybeError Device::SubmitPendingCommandBuffer() {
- if (!mCommandContext.WasUsed()) {
- return {};
+ // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
+ // shader provided. Create a placeholder fragment shader module to work around this issue.
+ if (gpu_info::IsIntel(vendorId)) {
+ bool usePlaceholderFragmentShader = true;
+ if (gpu_info::IsSkylake(deviceId)) {
+ usePlaceholderFragmentShader = false;
}
-
- IncrementLastSubmittedCommandSerial();
-
- // Acquire the pending command buffer, which is retained. It must be released later.
- NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
-
- // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
- // schedule handler and this code.
- {
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- mLastSubmittedCommands = pendingCommands;
+ SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, usePlaceholderFragmentShader);
+ }
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ return ShaderModule::Create(this, descriptor, parseResult, compilationMessages);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
+ // sometimes we increase the serials, in which case the completed serial in
+ // the device base will surpass the completed serial we have in the metal backend, so we
+ // must update ours when we see that the completed serial from device base has
+ // increased.
+ //
+ // This update has to be atomic otherwise there is a race with the `addCompletedHandler`
+ // call below and this call could set the mCompletedSerial backwards.
+ uint64_t current = mCompletedSerial.load();
+ while (frontendCompletedSerial > current &&
+ !mCompletedSerial.compare_exchange_weak(current, frontendCompletedSerial)) {
+ }
+
+ return ExecutionSerial(mCompletedSerial.load());
+}
+
+MaybeError Device::TickImpl() {
+ DAWN_TRY(SubmitPendingCommandBuffer());
+
+ // Just run timestamp period calculation when timestamp feature is enabled.
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp, &mGpuTimestamp,
+ &mTimestampPeriod);
}
-
- // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
- // handle types with copy / move constructors being referenced in the block..
- id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
- [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
- // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
- // is a local value (and not the member itself).
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
- this->mLastSubmittedCommands = nullptr;
- }
- }];
-
- // Update the completed serial once the completed handler is fired. Make a local copy of
- // mLastSubmittedSerial so it is captured by value.
- ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
- // this ObjC block runs on a different thread
- [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
- TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
- uint64_t(pendingSerial));
- ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
- this->mCompletedSerial = uint64_t(pendingSerial);
- }];
-
- TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
- uint64_t(pendingSerial));
- [*pendingCommands commit];
-
- return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
}
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
+ return {};
+}
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- // Metal validation layers forbid 0-sized copies, assert it is skipped prior to calling
- // this function.
- ASSERT(size != 0);
-
- ToBackend(destination)
- ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
- size);
-
- id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
- id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
- [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
- sourceOffset:sourceOffset
- toBuffer:buffer
- destinationOffset:destinationOffset
- size:size];
- return {};
- }
+id<MTLDevice> Device::GetMTLDevice() {
+ return mMtlDevice.Get();
+}
- // In Metal we don't write from the CPU to the texture directly which can be done using the
- // replaceRegion function, because the function requires a non-private storage mode and Dawn
- // sets the private storage mode by default for all textures except IOSurfaces on macOS.
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& dataLayout,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- Texture* texture = ToBackend(dst->texture.Get());
- EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
- copySizePixels);
-
- RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
- source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
- dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
- dst->aspect, copySizePixels);
+id<MTLCommandQueue> Device::GetMTLQueue() {
+ return mCommandQueue.Get();
+}
+
+CommandRecordingContext* Device::GetPendingCommandContext() {
+ mCommandContext.MarkUsed();
+ return &mCommandContext;
+}
+
+MaybeError Device::SubmitPendingCommandBuffer() {
+ if (!mCommandContext.WasUsed()) {
return {};
}
- Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+ IncrementLastSubmittedCommandSerial();
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
- }
- if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface))) {
- return nullptr;
- }
+ // Acquire the pending command buffer, which is retained. It must be released later.
+ NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
- Ref<Texture> result;
- if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface), &result)) {
- return nullptr;
- }
- return result;
+ // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
+ // schedule handler and this code.
+ {
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ mLastSubmittedCommands = pendingCommands;
}
- void Device::WaitForCommandsToBeScheduled() {
- if (ConsumedError(SubmitPendingCommandBuffer())) {
- return;
+ // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
+ // handle types with copy / move constructors being referenced in the block..
+ id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
+ [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
+ // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
+ // is a local value (and not the member itself).
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
+ this->mLastSubmittedCommands = nullptr;
}
-
- // Only lock the object while we take a reference to it, otherwise we could block further
- // progress if the driver calls the scheduled handler (which also acquires the lock) before
- // finishing the waitUntilScheduled.
- NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
- {
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- lastSubmittedCommands = mLastSubmittedCommands;
- }
- [*lastSubmittedCommands waitUntilScheduled];
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- // Forget all pending commands.
- mCommandContext.AcquireCommands();
+ }];
+
+ // Update the completed serial once the completed handler is fired. Make a local copy of
+ // mLastSubmittedSerial so it is captured by value.
+ ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
+ // this ObjC block runs on a different thread
+ [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
+ TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ uint64_t(pendingSerial));
+ ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
+ this->mCompletedSerial = uint64_t(pendingSerial);
+ }];
+
+ TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ uint64_t(pendingSerial));
+ [*pendingCommands commit];
+
+ return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ // Metal validation layers forbid 0-sized copies, assert it is skipped prior to calling
+ // this function.
+ ASSERT(size != 0);
+
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset, size);
+
+ id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
+ id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
+ [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
+ sourceOffset:sourceOffset
+ toBuffer:buffer
+ destinationOffset:destinationOffset
+ size:size];
+ return {};
+}
+
+// In Metal we don't write from the CPU to the texture directly which can be done using the
+// replaceRegion function, because the function requires a non-private storage mode and Dawn
+// sets the private storage mode by default for all textures except IOSurfaces on macOS.
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ Texture* texture = ToBackend(dst->texture.Get());
+ EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst, copySizePixels);
+
+ RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
+ source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
+ dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
+ dst->aspect, copySizePixels);
+ return {};
+}
+
+Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
+ }
+ if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface))) {
+ return nullptr;
+ }
+
+ Ref<Texture> result;
+ if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface), &result)) {
+ return nullptr;
+ }
+ return result;
+}
+
+void Device::WaitForCommandsToBeScheduled() {
+ if (ConsumedError(SubmitPendingCommandBuffer())) {
+ return;
+ }
+
+ // Only lock the object while we take a reference to it, otherwise we could block further
+ // progress if the driver calls the scheduled handler (which also acquires the lock) before
+ // finishing the waitUntilScheduled.
+ NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
+ {
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ lastSubmittedCommands = mLastSubmittedCommands;
+ }
+ [*lastSubmittedCommands waitUntilScheduled];
+}
+
+MaybeError Device::WaitForIdleForDestruction() {
+ // Forget all pending commands.
+ mCommandContext.AcquireCommands();
+ DAWN_TRY(CheckPassedSerials());
+
+ // Wait for all commands to be finished so we can free resources
+ while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
+ usleep(100);
DAWN_TRY(CheckPassedSerials());
-
- // Wait for all commands to be finished so we can free resources
- while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
- usleep(100);
- DAWN_TRY(CheckPassedSerials());
- }
-
- return {};
}
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
+ return {};
+}
- // Forget all pending commands.
- mCommandContext.AcquireCommands();
+void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
- mCommandQueue = nullptr;
- mMtlDevice = nullptr;
- }
+ // Forget all pending commands.
+ mCommandContext.AcquireCommands();
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
+ mCommandQueue = nullptr;
+ mMtlDevice = nullptr;
+}
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+}
- float Device::GetTimestampPeriodInNS() const {
- return mTimestampPeriod;
- }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+}
+
+float Device::GetTimestampPeriodInNS() const {
+ return mTimestampPeriod;
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/Forward.h b/chromium/third_party/dawn/src/dawn/native/metal/Forward.h
index ab85da57662..44f780414f6 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/Forward.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/Forward.h
@@ -19,49 +19,49 @@
namespace dawn::native::metal {
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class Framebuffer;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class Framebuffer;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
- struct MetalBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
+struct MetalBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+};
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
- return ToBackendBase<MetalBackendTraits>(common);
- }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
+ return ToBackendBase<MetalBackendTraits>(common);
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm b/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm
index c0214e58f25..f6cbbdce74e 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm
@@ -22,28 +22,25 @@
namespace dawn::native::metal {
- id<MTLDevice> GetMetalDevice(WGPUDevice device) {
- return ToBackend(FromAPI(device))->GetMTLDevice();
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {
- }
-
- ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
- : ExternalImageDescriptor(ExternalImageType::IOSurface) {
- }
-
- WGPUTexture WrapIOSurface(WGPUDevice device,
- const ExternalImageDescriptorIOSurface* cDescriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
- Ref<TextureBase> texture =
- backendDevice->CreateTextureWrappingIOSurface(cDescriptor, cDescriptor->ioSurface);
- return ToAPI(texture.Detach());
- }
-
- void WaitForCommandsToBeScheduled(WGPUDevice device) {
- ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
- }
+id<MTLDevice> GetMetalDevice(WGPUDevice device) {
+ return ToBackend(FromAPI(device))->GetMTLDevice();
+}
+
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {}
+
+ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
+ : ExternalImageDescriptor(ExternalImageType::IOSurface) {}
+
+WGPUTexture WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* cDescriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ Ref<TextureBase> texture =
+ backendDevice->CreateTextureWrappingIOSurface(cDescriptor, cDescriptor->ioSurface);
+ return ToAPI(texture.Detach());
+}
+
+void WaitForCommandsToBeScheduled(WGPUDevice device) {
+ ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h
index 9e2ee158ff8..48711e76939 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h
@@ -25,37 +25,36 @@
namespace dawn::native::metal {
- class Device;
-
- // The number of Metal buffers usable by applications in general
- static constexpr size_t kMetalBufferTableSize = 31;
- // The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
- static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
- // The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
- static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
-
- static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static Ref<PipelineLayout> Create(Device* device,
- const PipelineLayoutDescriptor* descriptor);
-
- using BindingIndexInfo =
- ityp::array<BindGroupIndex,
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
- kMaxBindGroups>;
- const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
-
- // The number of Metal vertex stage buffers used for the whole pipeline layout.
- uint32_t GetBufferBindingCount(SingleShaderStage stage);
-
- private:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
- ~PipelineLayout() override = default;
- PerStage<BindingIndexInfo> mIndexInfo;
- PerStage<uint32_t> mBufferBindingCount;
- };
+class Device;
+
+// The number of Metal buffers usable by applications in general
+static constexpr size_t kMetalBufferTableSize = 31;
+// The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
+static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
+// The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
+static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
+
+static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
+
+class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static Ref<PipelineLayout> Create(Device* device, const PipelineLayoutDescriptor* descriptor);
+
+ using BindingIndexInfo =
+ ityp::array<BindGroupIndex,
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
+ kMaxBindGroups>;
+ const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
+
+ // The number of Metal vertex stage buffers used for the whole pipeline layout.
+ uint32_t GetBufferBindingCount(SingleShaderStage stage);
+
+ private:
+ PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+ ~PipelineLayout() override;
+ PerStage<BindingIndexInfo> mIndexInfo;
+ PerStage<uint32_t> mBufferBindingCount;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm
index 5f789eae0e6..800db51a7c5 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm
@@ -20,63 +20,65 @@
namespace dawn::native::metal {
- // static
- Ref<PipelineLayout> PipelineLayout::Create(Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(device, descriptor));
- }
+// static
+Ref<PipelineLayout> PipelineLayout::Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(device, descriptor));
+}
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
- // Each stage has its own numbering namespace in CompilerMSL.
- for (auto stage : IterateStages(kAllStages)) {
- uint32_t bufferIndex = 0;
- uint32_t samplerIndex = 0;
- uint32_t textureIndex = 0;
+PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor) {
+ // Each stage has its own numbering namespace in CompilerMSL.
+ for (auto stage : IterateStages(kAllStages)) {
+ uint32_t bufferIndex = 0;
+ uint32_t samplerIndex = 0;
+ uint32_t textureIndex = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
- for (BindingIndex bindingIndex{0};
- bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
- if (!(bindingInfo.visibility & StageBit(stage))) {
- continue;
- }
+ for (BindingIndex bindingIndex{0};
+ bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
+ }
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- mIndexInfo[stage][group][bindingIndex] = bufferIndex;
- bufferIndex++;
- break;
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ mIndexInfo[stage][group][bindingIndex] = bufferIndex;
+ bufferIndex++;
+ break;
- case BindingInfoType::Sampler:
- mIndexInfo[stage][group][bindingIndex] = samplerIndex;
- samplerIndex++;
- break;
+ case BindingInfoType::Sampler:
+ mIndexInfo[stage][group][bindingIndex] = samplerIndex;
+ samplerIndex++;
+ break;
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture:
- case BindingInfoType::ExternalTexture:
- mIndexInfo[stage][group][bindingIndex] = textureIndex;
- textureIndex++;
- break;
- }
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture:
+ case BindingInfoType::ExternalTexture:
+ mIndexInfo[stage][group][bindingIndex] = textureIndex;
+ textureIndex++;
+ break;
}
}
-
- mBufferBindingCount[stage] = bufferIndex;
}
- }
- const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
- SingleShaderStage stage) const {
- return mIndexInfo[stage];
+ mBufferBindingCount[stage] = bufferIndex;
}
+}
- uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
- return mBufferBindingCount[stage];
- }
+PipelineLayout::~PipelineLayout() = default;
+
+const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
+ SingleShaderStage stage) const {
+ return mIndexInfo[stage];
+}
+
+uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
+ return mBufferBindingCount[stage];
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h
index 67cda0fdaab..c8b4c73afa8 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h
@@ -23,31 +23,34 @@
namespace dawn::native::metal {
- class Device;
-
- class QuerySet final : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
-
- id<MTLBuffer> GetVisibilityBuffer() const;
- id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
- API_AVAILABLE(macos(10.15), ios(14.0));
-
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
-
- // Dawn API
- void DestroyImpl() override;
-
- NSPRef<id<MTLBuffer>> mVisibilityBuffer;
- // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
- // propagate nicely through templates.
- id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
- ios(14.0)) = nullptr;
- };
+class Device;
+
+class QuerySet final : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ QuerySet(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+ id<MTLBuffer> GetVisibilityBuffer() const;
+ id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
+ API_AVAILABLE(macos(10.15), ios(14.0));
+
+ private:
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ ~QuerySet() override;
+
+ // Dawn API
+ void DestroyImpl() override;
+
+ NSPRef<id<MTLBuffer>> mVisibilityBuffer;
+ // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
+ // propagate nicely through templates.
+ id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
+ ios(14.0)) = nullptr;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm
index 4882fee86c7..262ddc8338c 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm
@@ -20,120 +20,121 @@
namespace dawn::native::metal {
- namespace {
-
- ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(
- Device* device,
- MTLCommonCounterSet counterSet,
- uint32_t count) API_AVAILABLE(macos(10.15), ios(14.0)) {
- NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
- AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
- MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
-
- // To determine which counters are available from a device, we need to iterate through
- // the counterSets property of a MTLDevice. Then configure which counters will be
- // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
- // property to the matched one of the available set.
- for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
- if ([set.name isEqualToString:counterSet]) {
- descriptor.counterSet = set;
- break;
- }
- }
- ASSERT(descriptor.counterSet != nullptr);
-
- descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
- descriptor.storageMode = MTLStorageModePrivate;
- if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
- descriptor.storageMode = MTLStorageModeShared;
- }
-
- NSError* error = nullptr;
- id<MTLCounterSampleBuffer> counterSampleBuffer =
- [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor
- error:&error];
- if (error != nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
- [error.localizedDescription UTF8String]);
- }
-
- return counterSampleBuffer;
+namespace {
+
+ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(Device* device,
+ MTLCommonCounterSet counterSet,
+ uint32_t count)
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
+ AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
+ MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
+
+ // To determine which counters are available from a device, we need to iterate through
+ // the counterSets property of a MTLDevice. Then configure which counters will be
+ // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
+ // property to the matched one of the available set.
+ for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
+ if ([set.name isEqualToString:counterSet]) {
+ descriptor.counterSet = set;
+ break;
}
}
+ ASSERT(descriptor.counterSet != nullptr);
+
+ descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
+ descriptor.storageMode = MTLStorageModePrivate;
+ if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
+ descriptor.storageMode = MTLStorageModeShared;
+ }
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(queryset->Initialize());
- return queryset;
+ NSError* error = nullptr;
+ id<MTLCounterSampleBuffer> counterSampleBuffer =
+ [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor error:&error];
+ if (error != nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
+ [error.localizedDescription UTF8String]);
}
- MaybeError QuerySet::Initialize() {
- Device* device = ToBackend(GetDevice());
-
- switch (GetQueryType()) {
- case wgpu::QueryType::Occlusion: {
- // Create buffer for writing 64-bit results.
- NSUInteger bufferSize = static_cast<NSUInteger>(
- std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
- mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
- newBufferWithLength:bufferSize
- options:MTLResourceStorageModePrivate]);
-
- if (mVisibilityBuffer == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
- }
- break;
+ return counterSampleBuffer;
+}
+} // namespace
+
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(queryset->Initialize());
+ return queryset;
+}
+
+QuerySet::QuerySet(DeviceBase* dev, const QuerySetDescriptor* desc) : QuerySetBase(dev, desc) {}
+
+MaybeError QuerySet::Initialize() {
+ Device* device = ToBackend(GetDevice());
+
+ switch (GetQueryType()) {
+ case wgpu::QueryType::Occlusion: {
+ // Create buffer for writing 64-bit results.
+ NSUInteger bufferSize =
+ static_cast<NSUInteger>(std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
+ mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
+ newBufferWithLength:bufferSize
+ options:MTLResourceStorageModePrivate]);
+
+ if (mVisibilityBuffer == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
}
- case wgpu::QueryType::PipelineStatistics:
- if (@available(macOS 10.15, iOS 14.0, *)) {
- DAWN_TRY_ASSIGN(mCounterSampleBuffer,
- CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
- GetQueryCount()));
- } else {
- UNREACHABLE();
- }
- break;
- case wgpu::QueryType::Timestamp:
- if (@available(macOS 10.15, iOS 14.0, *)) {
- DAWN_TRY_ASSIGN(mCounterSampleBuffer,
- CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
- GetQueryCount()));
- } else {
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- break;
+ break;
}
-
- return {};
+ case wgpu::QueryType::PipelineStatistics:
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+ CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
+ GetQueryCount()));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case wgpu::QueryType::Timestamp:
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+ CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
+ GetQueryCount()));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
- return mVisibilityBuffer.Get();
- }
+ return {};
+}
- id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
- API_AVAILABLE(macos(10.15), ios(14.0)) {
- return mCounterSampleBuffer;
- }
+id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
+ return mVisibilityBuffer.Get();
+}
- QuerySet::~QuerySet() = default;
+id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ return mCounterSampleBuffer;
+}
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
+QuerySet::~QuerySet() = default;
- mVisibilityBuffer = nullptr;
+void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
- // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
- // templates.
- if (@available(macOS 10.15, iOS 14.0, *)) {
- [mCounterSampleBuffer release];
- mCounterSampleBuffer = nullptr;
- }
+ mVisibilityBuffer = nullptr;
+
+ // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
+ // templates.
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ [mCounterSampleBuffer release];
+ mCounterSampleBuffer = nullptr;
}
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h
index 7c1070e80fa..5f959219161 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h
@@ -19,15 +19,16 @@
namespace dawn::native::metal {
- class Device;
+class Device;
- class Queue final : public QueueBase {
- public:
- Queue(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+ public:
+ Queue(Device* device, const QueueDescriptor* descriptor);
+ ~Queue() override;
- private:
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- };
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm
index 2bf50c5684b..f6cfa4cdf5a 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm
@@ -26,24 +26,24 @@
namespace dawn::native::metal {
- Queue::Queue(Device* device, const QueueDescriptor* descriptor)
- : QueueBase(device, descriptor) {
- }
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
+Queue::~Queue() = default;
- DAWN_TRY(device->Tick());
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
- CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+ DAWN_TRY(device->Tick());
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+ CommandRecordingContext* commandContext = device->GetPendingCommandContext();
- return device->SubmitPendingCommandBuffer();
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
}
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+
+ return device->SubmitPendingCommandBuffer();
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h
index 338a3e9288f..23268aea030 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h
@@ -23,47 +23,49 @@
namespace dawn::native::metal {
- class Device;
+class Device;
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipelineBase> CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
+class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipelineBase> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
- MTLPrimitiveType GetMTLPrimitiveTopology() const;
- MTLWinding GetMTLFrontFace() const;
- MTLCullMode GetMTLCullMode() const;
+ RenderPipeline(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+ ~RenderPipeline() override;
- void Encode(id<MTLRenderCommandEncoder> encoder);
+ MTLPrimitiveType GetMTLPrimitiveTopology() const;
+ MTLWinding GetMTLFrontFace() const;
+ MTLCullMode GetMTLCullMode() const;
- id<MTLDepthStencilState> GetMTLDepthStencilState();
+ void Encode(id<MTLRenderCommandEncoder> encoder);
- // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
- // vertex buffer table.
- uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
+ id<MTLDepthStencilState> GetMTLDepthStencilState();
- wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
+ // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
+ // vertex buffer table.
+ uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
- MaybeError Initialize() override;
+ wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
- private:
- using RenderPipelineBase::RenderPipelineBase;
+ MaybeError Initialize() override;
- NSRef<MTLVertexDescriptor> MakeVertexDesc();
+ private:
+ using RenderPipelineBase::RenderPipelineBase;
- MTLPrimitiveType mMtlPrimitiveTopology;
- MTLWinding mMtlFrontFace;
- MTLCullMode mMtlCullMode;
- NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
- NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
- ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
+ NSRef<MTLVertexDescriptor> MakeVertexDesc();
- wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
- };
+ MTLPrimitiveType mMtlPrimitiveTopology;
+ MTLWinding mMtlFrontFace;
+ MTLCullMode mMtlCullMode;
+ NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
+ NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
+ ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
+
+ wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm
index 18adb692de6..6e5afd54fe0 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm
@@ -24,483 +24,482 @@
namespace dawn::native::metal {
- namespace {
- MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return MTLVertexFormatUChar2;
- case wgpu::VertexFormat::Uint8x4:
- return MTLVertexFormatUChar4;
- case wgpu::VertexFormat::Sint8x2:
- return MTLVertexFormatChar2;
- case wgpu::VertexFormat::Sint8x4:
- return MTLVertexFormatChar4;
- case wgpu::VertexFormat::Unorm8x2:
- return MTLVertexFormatUChar2Normalized;
- case wgpu::VertexFormat::Unorm8x4:
- return MTLVertexFormatUChar4Normalized;
- case wgpu::VertexFormat::Snorm8x2:
- return MTLVertexFormatChar2Normalized;
- case wgpu::VertexFormat::Snorm8x4:
- return MTLVertexFormatChar4Normalized;
- case wgpu::VertexFormat::Uint16x2:
- return MTLVertexFormatUShort2;
- case wgpu::VertexFormat::Uint16x4:
- return MTLVertexFormatUShort4;
- case wgpu::VertexFormat::Sint16x2:
- return MTLVertexFormatShort2;
- case wgpu::VertexFormat::Sint16x4:
- return MTLVertexFormatShort4;
- case wgpu::VertexFormat::Unorm16x2:
- return MTLVertexFormatUShort2Normalized;
- case wgpu::VertexFormat::Unorm16x4:
- return MTLVertexFormatUShort4Normalized;
- case wgpu::VertexFormat::Snorm16x2:
- return MTLVertexFormatShort2Normalized;
- case wgpu::VertexFormat::Snorm16x4:
- return MTLVertexFormatShort4Normalized;
- case wgpu::VertexFormat::Float16x2:
- return MTLVertexFormatHalf2;
- case wgpu::VertexFormat::Float16x4:
- return MTLVertexFormatHalf4;
- case wgpu::VertexFormat::Float32:
- return MTLVertexFormatFloat;
- case wgpu::VertexFormat::Float32x2:
- return MTLVertexFormatFloat2;
- case wgpu::VertexFormat::Float32x3:
- return MTLVertexFormatFloat3;
- case wgpu::VertexFormat::Float32x4:
- return MTLVertexFormatFloat4;
- case wgpu::VertexFormat::Uint32:
- return MTLVertexFormatUInt;
- case wgpu::VertexFormat::Uint32x2:
- return MTLVertexFormatUInt2;
- case wgpu::VertexFormat::Uint32x3:
- return MTLVertexFormatUInt3;
- case wgpu::VertexFormat::Uint32x4:
- return MTLVertexFormatUInt4;
- case wgpu::VertexFormat::Sint32:
- return MTLVertexFormatInt;
- case wgpu::VertexFormat::Sint32x2:
- return MTLVertexFormatInt2;
- case wgpu::VertexFormat::Sint32x3:
- return MTLVertexFormatInt3;
- case wgpu::VertexFormat::Sint32x4:
- return MTLVertexFormatInt4;
- default:
- UNREACHABLE();
- }
- }
+namespace {
+MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return MTLVertexFormatUChar2;
+ case wgpu::VertexFormat::Uint8x4:
+ return MTLVertexFormatUChar4;
+ case wgpu::VertexFormat::Sint8x2:
+ return MTLVertexFormatChar2;
+ case wgpu::VertexFormat::Sint8x4:
+ return MTLVertexFormatChar4;
+ case wgpu::VertexFormat::Unorm8x2:
+ return MTLVertexFormatUChar2Normalized;
+ case wgpu::VertexFormat::Unorm8x4:
+ return MTLVertexFormatUChar4Normalized;
+ case wgpu::VertexFormat::Snorm8x2:
+ return MTLVertexFormatChar2Normalized;
+ case wgpu::VertexFormat::Snorm8x4:
+ return MTLVertexFormatChar4Normalized;
+ case wgpu::VertexFormat::Uint16x2:
+ return MTLVertexFormatUShort2;
+ case wgpu::VertexFormat::Uint16x4:
+ return MTLVertexFormatUShort4;
+ case wgpu::VertexFormat::Sint16x2:
+ return MTLVertexFormatShort2;
+ case wgpu::VertexFormat::Sint16x4:
+ return MTLVertexFormatShort4;
+ case wgpu::VertexFormat::Unorm16x2:
+ return MTLVertexFormatUShort2Normalized;
+ case wgpu::VertexFormat::Unorm16x4:
+ return MTLVertexFormatUShort4Normalized;
+ case wgpu::VertexFormat::Snorm16x2:
+ return MTLVertexFormatShort2Normalized;
+ case wgpu::VertexFormat::Snorm16x4:
+ return MTLVertexFormatShort4Normalized;
+ case wgpu::VertexFormat::Float16x2:
+ return MTLVertexFormatHalf2;
+ case wgpu::VertexFormat::Float16x4:
+ return MTLVertexFormatHalf4;
+ case wgpu::VertexFormat::Float32:
+ return MTLVertexFormatFloat;
+ case wgpu::VertexFormat::Float32x2:
+ return MTLVertexFormatFloat2;
+ case wgpu::VertexFormat::Float32x3:
+ return MTLVertexFormatFloat3;
+ case wgpu::VertexFormat::Float32x4:
+ return MTLVertexFormatFloat4;
+ case wgpu::VertexFormat::Uint32:
+ return MTLVertexFormatUInt;
+ case wgpu::VertexFormat::Uint32x2:
+ return MTLVertexFormatUInt2;
+ case wgpu::VertexFormat::Uint32x3:
+ return MTLVertexFormatUInt3;
+ case wgpu::VertexFormat::Uint32x4:
+ return MTLVertexFormatUInt4;
+ case wgpu::VertexFormat::Sint32:
+ return MTLVertexFormatInt;
+ case wgpu::VertexFormat::Sint32x2:
+ return MTLVertexFormatInt2;
+ case wgpu::VertexFormat::Sint32x3:
+ return MTLVertexFormatInt3;
+ case wgpu::VertexFormat::Sint32x4:
+ return MTLVertexFormatInt4;
+ default:
+ UNREACHABLE();
+ }
+}
+
+MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return MTLVertexStepFunctionPerVertex;
+ case wgpu::VertexStepMode::Instance:
+ return MTLVertexStepFunctionPerInstance;
+ case wgpu::VertexStepMode::VertexBufferNotUsed:
+ UNREACHABLE();
+ }
+}
+
+MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return MTLPrimitiveTypePoint;
+ case wgpu::PrimitiveTopology::LineList:
+ return MTLPrimitiveTypeLine;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return MTLPrimitiveTypeLineStrip;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return MTLPrimitiveTypeTriangle;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return MTLPrimitiveTypeTriangleStrip;
+ }
+}
+
+MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return MTLPrimitiveTopologyClassPoint;
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
+ return MTLPrimitiveTopologyClassLine;
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return MTLPrimitiveTopologyClassTriangle;
+ }
+}
+
+MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return MTLBlendFactorZero;
+ case wgpu::BlendFactor::One:
+ return MTLBlendFactorOne;
+ case wgpu::BlendFactor::Src:
+ return MTLBlendFactorSourceColor;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return MTLBlendFactorOneMinusSourceColor;
+ case wgpu::BlendFactor::SrcAlpha:
+ return MTLBlendFactorSourceAlpha;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return MTLBlendFactorOneMinusSourceAlpha;
+ case wgpu::BlendFactor::Dst:
+ return MTLBlendFactorDestinationColor;
+ case wgpu::BlendFactor::OneMinusDst:
+ return MTLBlendFactorOneMinusDestinationColor;
+ case wgpu::BlendFactor::DstAlpha:
+ return MTLBlendFactorDestinationAlpha;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return MTLBlendFactorOneMinusDestinationAlpha;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return MTLBlendFactorSourceAlphaSaturated;
+ case wgpu::BlendFactor::Constant:
+ return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return alpha ? MTLBlendFactorOneMinusBlendAlpha : MTLBlendFactorOneMinusBlendColor;
+ }
+}
+
+MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return MTLBlendOperationAdd;
+ case wgpu::BlendOperation::Subtract:
+ return MTLBlendOperationSubtract;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return MTLBlendOperationReverseSubtract;
+ case wgpu::BlendOperation::Min:
+ return MTLBlendOperationMin;
+ case wgpu::BlendOperation::Max:
+ return MTLBlendOperationMax;
+ }
+}
- MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return MTLVertexStepFunctionPerVertex;
- case wgpu::VertexStepMode::Instance:
- return MTLVertexStepFunctionPerInstance;
- }
- }
+MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
+ bool isDeclaredInFragmentShader) {
+ if (!isDeclaredInFragmentShader) {
+ return MTLColorWriteMaskNone;
+ }
- MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return MTLPrimitiveTypePoint;
- case wgpu::PrimitiveTopology::LineList:
- return MTLPrimitiveTypeLine;
- case wgpu::PrimitiveTopology::LineStrip:
- return MTLPrimitiveTypeLineStrip;
- case wgpu::PrimitiveTopology::TriangleList:
- return MTLPrimitiveTypeTriangle;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return MTLPrimitiveTypeTriangleStrip;
- }
- }
+ MTLColorWriteMask mask = MTLColorWriteMaskNone;
- MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
- wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return MTLPrimitiveTopologyClassPoint;
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::LineStrip:
- return MTLPrimitiveTopologyClassLine;
- case wgpu::PrimitiveTopology::TriangleList:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return MTLPrimitiveTopologyClassTriangle;
- }
- }
+ if (writeMask & wgpu::ColorWriteMask::Red) {
+ mask |= MTLColorWriteMaskRed;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Green) {
+ mask |= MTLColorWriteMaskGreen;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Blue) {
+ mask |= MTLColorWriteMaskBlue;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Alpha) {
+ mask |= MTLColorWriteMaskAlpha;
+ }
- MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return MTLBlendFactorZero;
- case wgpu::BlendFactor::One:
- return MTLBlendFactorOne;
- case wgpu::BlendFactor::Src:
- return MTLBlendFactorSourceColor;
- case wgpu::BlendFactor::OneMinusSrc:
- return MTLBlendFactorOneMinusSourceColor;
- case wgpu::BlendFactor::SrcAlpha:
- return MTLBlendFactorSourceAlpha;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return MTLBlendFactorOneMinusSourceAlpha;
- case wgpu::BlendFactor::Dst:
- return MTLBlendFactorDestinationColor;
- case wgpu::BlendFactor::OneMinusDst:
- return MTLBlendFactorOneMinusDestinationColor;
- case wgpu::BlendFactor::DstAlpha:
- return MTLBlendFactorDestinationAlpha;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return MTLBlendFactorOneMinusDestinationAlpha;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return MTLBlendFactorSourceAlphaSaturated;
- case wgpu::BlendFactor::Constant:
- return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
- case wgpu::BlendFactor::OneMinusConstant:
- return alpha ? MTLBlendFactorOneMinusBlendAlpha
- : MTLBlendFactorOneMinusBlendColor;
- }
- }
+ return mask;
+}
+
+void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
+ const ColorTargetState* state,
+ bool isDeclaredInFragmentShader) {
+ attachment.blendingEnabled = state->blend != nullptr;
+ if (attachment.blendingEnabled) {
+ attachment.sourceRGBBlendFactor = MetalBlendFactor(state->blend->color.srcFactor, false);
+ attachment.destinationRGBBlendFactor =
+ MetalBlendFactor(state->blend->color.dstFactor, false);
+ attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
+ attachment.sourceAlphaBlendFactor = MetalBlendFactor(state->blend->alpha.srcFactor, true);
+ attachment.destinationAlphaBlendFactor =
+ MetalBlendFactor(state->blend->alpha.dstFactor, true);
+ attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
+ }
+ attachment.writeMask = MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+}
+
+MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
+ switch (stencilOperation) {
+ case wgpu::StencilOperation::Keep:
+ return MTLStencilOperationKeep;
+ case wgpu::StencilOperation::Zero:
+ return MTLStencilOperationZero;
+ case wgpu::StencilOperation::Replace:
+ return MTLStencilOperationReplace;
+ case wgpu::StencilOperation::Invert:
+ return MTLStencilOperationInvert;
+ case wgpu::StencilOperation::IncrementClamp:
+ return MTLStencilOperationIncrementClamp;
+ case wgpu::StencilOperation::DecrementClamp:
+ return MTLStencilOperationDecrementClamp;
+ case wgpu::StencilOperation::IncrementWrap:
+ return MTLStencilOperationIncrementWrap;
+ case wgpu::StencilOperation::DecrementWrap:
+ return MTLStencilOperationDecrementWrap;
+ }
+}
+
+NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
+ NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
+ AcquireNSRef([MTLDepthStencilDescriptor new]);
+ MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
+
+ mtlDepthStencilDescriptor.depthCompareFunction =
+ ToMetalCompareFunction(descriptor->depthCompare);
+ mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
+
+ if (StencilTestEnabled(descriptor)) {
+ NSRef<MTLStencilDescriptor> backFaceStencilRef = AcquireNSRef([MTLStencilDescriptor new]);
+ MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
+ NSRef<MTLStencilDescriptor> frontFaceStencilRef = AcquireNSRef([MTLStencilDescriptor new]);
+ MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
+
+ backFaceStencil.stencilCompareFunction =
+ ToMetalCompareFunction(descriptor->stencilBack.compare);
+ backFaceStencil.stencilFailureOperation =
+ MetalStencilOperation(descriptor->stencilBack.failOp);
+ backFaceStencil.depthFailureOperation =
+ MetalStencilOperation(descriptor->stencilBack.depthFailOp);
+ backFaceStencil.depthStencilPassOperation =
+ MetalStencilOperation(descriptor->stencilBack.passOp);
+ backFaceStencil.readMask = descriptor->stencilReadMask;
+ backFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+ frontFaceStencil.stencilCompareFunction =
+ ToMetalCompareFunction(descriptor->stencilFront.compare);
+ frontFaceStencil.stencilFailureOperation =
+ MetalStencilOperation(descriptor->stencilFront.failOp);
+ frontFaceStencil.depthFailureOperation =
+ MetalStencilOperation(descriptor->stencilFront.depthFailOp);
+ frontFaceStencil.depthStencilPassOperation =
+ MetalStencilOperation(descriptor->stencilFront.passOp);
+ frontFaceStencil.readMask = descriptor->stencilReadMask;
+ frontFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+ mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
+ mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
+ }
- MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return MTLBlendOperationAdd;
- case wgpu::BlendOperation::Subtract:
- return MTLBlendOperationSubtract;
- case wgpu::BlendOperation::ReverseSubtract:
- return MTLBlendOperationReverseSubtract;
- case wgpu::BlendOperation::Min:
- return MTLBlendOperationMin;
- case wgpu::BlendOperation::Max:
- return MTLBlendOperationMax;
- }
- }
+ return mtlDepthStencilDescRef;
+}
- MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
- bool isDeclaredInFragmentShader) {
- if (!isDeclaredInFragmentShader) {
- return MTLColorWriteMaskNone;
- }
+MTLWinding MTLFrontFace(wgpu::FrontFace face) {
+ switch (face) {
+ case wgpu::FrontFace::CW:
+ return MTLWindingClockwise;
+ case wgpu::FrontFace::CCW:
+ return MTLWindingCounterClockwise;
+ }
+}
+
+MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return MTLCullModeNone;
+ case wgpu::CullMode::Front:
+ return MTLCullModeFront;
+ case wgpu::CullMode::Back:
+ return MTLCullModeBack;
+ }
+}
- MTLColorWriteMask mask = MTLColorWriteMaskNone;
+} // anonymous namespace
- if (writeMask & wgpu::ColorWriteMask::Red) {
- mask |= MTLColorWriteMaskRed;
- }
- if (writeMask & wgpu::ColorWriteMask::Green) {
- mask |= MTLColorWriteMaskGreen;
- }
- if (writeMask & wgpu::ColorWriteMask::Blue) {
- mask |= MTLColorWriteMaskBlue;
- }
- if (writeMask & wgpu::ColorWriteMask::Alpha) {
- mask |= MTLColorWriteMaskAlpha;
- }
+// static
+Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+}
- return mask;
- }
+RenderPipeline::RenderPipeline(DeviceBase* dev, const RenderPipelineDescriptor* desc)
+ : RenderPipelineBase(dev, desc) {}
- void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
- const ColorTargetState* state,
- bool isDeclaredInFragmentShader) {
- attachment.blendingEnabled = state->blend != nullptr;
- if (attachment.blendingEnabled) {
- attachment.sourceRGBBlendFactor =
- MetalBlendFactor(state->blend->color.srcFactor, false);
- attachment.destinationRGBBlendFactor =
- MetalBlendFactor(state->blend->color.dstFactor, false);
- attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
- attachment.sourceAlphaBlendFactor =
- MetalBlendFactor(state->blend->alpha.srcFactor, true);
- attachment.destinationAlphaBlendFactor =
- MetalBlendFactor(state->blend->alpha.dstFactor, true);
- attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
- }
- attachment.writeMask =
- MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
- }
+RenderPipeline::~RenderPipeline() = default;
- MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
- switch (stencilOperation) {
- case wgpu::StencilOperation::Keep:
- return MTLStencilOperationKeep;
- case wgpu::StencilOperation::Zero:
- return MTLStencilOperationZero;
- case wgpu::StencilOperation::Replace:
- return MTLStencilOperationReplace;
- case wgpu::StencilOperation::Invert:
- return MTLStencilOperationInvert;
- case wgpu::StencilOperation::IncrementClamp:
- return MTLStencilOperationIncrementClamp;
- case wgpu::StencilOperation::DecrementClamp:
- return MTLStencilOperationDecrementClamp;
- case wgpu::StencilOperation::IncrementWrap:
- return MTLStencilOperationIncrementWrap;
- case wgpu::StencilOperation::DecrementWrap:
- return MTLStencilOperationDecrementWrap;
- }
- }
+MaybeError RenderPipeline::Initialize() {
+ mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
+ mMtlFrontFace = MTLFrontFace(GetFrontFace());
+ mMtlCullMode = ToMTLCullMode(GetCullMode());
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
- NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
- AcquireNSRef([MTLDepthStencilDescriptor new]);
- MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
-
- mtlDepthStencilDescriptor.depthCompareFunction =
- ToMetalCompareFunction(descriptor->depthCompare);
- mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
-
- if (StencilTestEnabled(descriptor)) {
- NSRef<MTLStencilDescriptor> backFaceStencilRef =
- AcquireNSRef([MTLStencilDescriptor new]);
- MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
- NSRef<MTLStencilDescriptor> frontFaceStencilRef =
- AcquireNSRef([MTLStencilDescriptor new]);
- MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
-
- backFaceStencil.stencilCompareFunction =
- ToMetalCompareFunction(descriptor->stencilBack.compare);
- backFaceStencil.stencilFailureOperation =
- MetalStencilOperation(descriptor->stencilBack.failOp);
- backFaceStencil.depthFailureOperation =
- MetalStencilOperation(descriptor->stencilBack.depthFailOp);
- backFaceStencil.depthStencilPassOperation =
- MetalStencilOperation(descriptor->stencilBack.passOp);
- backFaceStencil.readMask = descriptor->stencilReadMask;
- backFaceStencil.writeMask = descriptor->stencilWriteMask;
-
- frontFaceStencil.stencilCompareFunction =
- ToMetalCompareFunction(descriptor->stencilFront.compare);
- frontFaceStencil.stencilFailureOperation =
- MetalStencilOperation(descriptor->stencilFront.failOp);
- frontFaceStencil.depthFailureOperation =
- MetalStencilOperation(descriptor->stencilFront.depthFailOp);
- frontFaceStencil.depthStencilPassOperation =
- MetalStencilOperation(descriptor->stencilFront.passOp);
- frontFaceStencil.readMask = descriptor->stencilReadMask;
- frontFaceStencil.writeMask = descriptor->stencilWriteMask;
-
- mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
- mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
- }
+ NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
+ AcquireNSRef([MTLRenderPipelineDescriptor new]);
+ MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
- return mtlDepthStencilDescRef;
- }
+ // TODO(dawn:1384): MakeVertexDesc should be const in the future, so we don't need to call
+ // it here when vertex pulling is enabled
+ NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
- MTLWinding MTLFrontFace(wgpu::FrontFace face) {
- switch (face) {
- case wgpu::FrontFace::CW:
- return MTLWindingClockwise;
- case wgpu::FrontFace::CCW:
- return MTLWindingCounterClockwise;
- }
- }
-
- MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return MTLCullModeNone;
- case wgpu::CullMode::Front:
- return MTLCullModeFront;
- case wgpu::CullMode::Back:
- return MTLCullModeBack;
- }
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
+ // Calling MakeVertexDesc first is important since it sets indices for packed bindings
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+ vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
}
+ descriptorMTL.vertexDescriptor = vertexDesc.Get();
- MaybeError RenderPipeline::Initialize() {
- mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
- mMtlFrontFace = MTLFrontFace(GetFrontFace());
- mMtlCullMode = ToMTLCullMode(GetCullMode());
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
-
- NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
- AcquireNSRef([MTLRenderPipelineDescriptor new]);
- MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
+ const PerStage<ProgrammableStage>& allStages = GetAllStages();
+ const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
+ ShaderModule::MetalFunctionData vertexData;
+ DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
+ &vertexData, 0xFFFFFFFF, this));
- // TODO: MakeVertexDesc should be const in the future, so we don't need to call it here when
- // vertex pulling is enabled
- NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
-
- // Calling MakeVertexDesc first is important since it sets indices for packed bindings
- if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
- vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
- }
- descriptorMTL.vertexDescriptor = vertexDesc.Get();
+ descriptorMTL.vertexFunction = vertexData.function.Get();
+ if (vertexData.needsStorageBufferLength) {
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+ }
- const PerStage<ProgrammableStage>& allStages = GetAllStages();
- const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
- ShaderModule::MetalFunctionData vertexData;
- DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
- &vertexData, 0xFFFFFFFF, this));
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
+ ShaderModule::MetalFunctionData fragmentData;
+ DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
+ ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
- descriptorMTL.vertexFunction = vertexData.function.Get();
- if (vertexData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+ descriptorMTL.fragmentFunction = fragmentData.function.Get();
+ if (fragmentData.needsStorageBufferLength) {
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
}
- if (GetStageMask() & wgpu::ShaderStage::Fragment) {
- const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
- ShaderModule::MetalFunctionData fragmentData;
- DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
- ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
-
- descriptorMTL.fragmentFunction = fragmentData.function.Get();
- if (fragmentData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
- }
-
- const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
- MetalPixelFormat(GetColorAttachmentFormat(i));
- const ColorTargetState* descriptor = GetColorTargetState(i);
- ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
- descriptor, fragmentOutputsWritten[i]);
- }
+ const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
+ MetalPixelFormat(GetColorAttachmentFormat(i));
+ const ColorTargetState* descriptor = GetColorTargetState(i);
+ ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)], descriptor,
+ fragmentOutputsWritten[i]);
}
+ }
- if (HasDepthStencilAttachment()) {
- wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
- const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
- MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
+ if (HasDepthStencilAttachment()) {
+ wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
+ const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
+ MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
- if (internalFormat.HasDepth()) {
- descriptorMTL.depthAttachmentPixelFormat = metalFormat;
- }
- if (internalFormat.HasStencil()) {
- descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
- }
+ if (internalFormat.HasDepth()) {
+ descriptorMTL.depthAttachmentPixelFormat = metalFormat;
}
-
- descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
- descriptorMTL.sampleCount = GetSampleCount();
- descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
-
- NSError* error = nullptr;
- mMtlRenderPipelineState =
- AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
- error:&error]);
- if (error != nullptr) {
- return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state ") +
- [error.localizedDescription UTF8String]);
+ if (internalFormat.HasStencil()) {
+ descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
}
- ASSERT(mMtlRenderPipelineState != nil);
-
- // Create depth stencil state and cache it, fetch the cached depth stencil state when we
- // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
- // to improve performance.
- NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
- MakeDepthStencilDesc(GetDepthStencilState());
- mMtlDepthStencilState =
- AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
-
- return {};
- }
-
- MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
- return mMtlPrimitiveTopology;
- }
-
- MTLWinding RenderPipeline::GetMTLFrontFace() const {
- return mMtlFrontFace;
}
- MTLCullMode RenderPipeline::GetMTLCullMode() const {
- return mMtlCullMode;
- }
-
- void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
- [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
- }
-
- id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
- return mMtlDepthStencilState.Get();
- }
+ descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
+ descriptorMTL.sampleCount = GetSampleCount();
+ descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
- uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
- ASSERT(slot < kMaxVertexBuffersTyped);
- return mMtlVertexBufferIndices[slot];
+ NSError* error = nullptr;
+ mMtlRenderPipelineState =
+ AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL error:&error]);
+ if (error != nullptr) {
+ return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state ") +
+ [error.localizedDescription UTF8String]);
}
-
- wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
- return mStagesRequiringStorageBufferLength;
- }
-
- NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
- MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
-
- // Vertex buffers are packed after all the buffers for the bind groups.
- uint32_t mtlVertexBufferIndex =
- ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
-
- for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& info = GetVertexBuffer(slot);
-
- MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
- if (info.arrayStride == 0) {
- // For MTLVertexStepFunctionConstant, the stepRate must be 0,
- // but the arrayStride must NOT be 0, so we made up it with
- // max(attrib.offset + sizeof(attrib) for each attrib)
- size_t maxArrayStride = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& attrib = GetAttribute(loc);
- // Only use the attributes that use the current input
- if (attrib.vertexBufferSlot != slot) {
- continue;
- }
- maxArrayStride =
- std::max(maxArrayStride, GetVertexFormatInfo(attrib.format).byteSize +
- size_t(attrib.offset));
+ ASSERT(mMtlRenderPipelineState != nil);
+
+ // Create depth stencil state and cache it, fetch the cached depth stencil state when we
+ // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
+ // to improve performance.
+ NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
+ MakeDepthStencilDesc(GetDepthStencilState());
+ mMtlDepthStencilState =
+ AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
+
+ return {};
+}
+
+MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
+ return mMtlPrimitiveTopology;
+}
+
+MTLWinding RenderPipeline::GetMTLFrontFace() const {
+ return mMtlFrontFace;
+}
+
+MTLCullMode RenderPipeline::GetMTLCullMode() const {
+ return mMtlCullMode;
+}
+
+void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
+ [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
+}
+
+id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
+ return mMtlDepthStencilState.Get();
+}
+
+uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
+ ASSERT(slot < kMaxVertexBuffersTyped);
+ return mMtlVertexBufferIndices[slot];
+}
+
+wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
+ return mStagesRequiringStorageBufferLength;
+}
+
+NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
+ MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
+
+ // Vertex buffers are packed after all the buffers for the bind groups.
+ uint32_t mtlVertexBufferIndex =
+ ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+ for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& info = GetVertexBuffer(slot);
+
+ MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
+ if (info.arrayStride == 0) {
+ // For MTLVertexStepFunctionConstant, the stepRate must be 0,
+ // but the arrayStride must NOT be 0, so we made up it with
+ // max(attrib.offset + sizeof(attrib) for each attrib)
+ size_t maxArrayStride = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& attrib = GetAttribute(loc);
+ // Only use the attributes that use the current input
+ if (attrib.vertexBufferSlot != slot) {
+ continue;
}
- layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
- layoutDesc.stepRate = 0;
- // Metal requires the stride must be a multiple of 4 bytes, align it with next
- // multiple of 4 if it's not.
- layoutDesc.stride = Align(maxArrayStride, 4);
- } else {
- layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
- layoutDesc.stepRate = 1;
- layoutDesc.stride = info.arrayStride;
+ maxArrayStride =
+ std::max(maxArrayStride,
+ GetVertexFormatInfo(attrib.format).byteSize + size_t(attrib.offset));
}
-
- mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
- [layoutDesc release];
-
- mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
- mtlVertexBufferIndex++;
+ layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
+ layoutDesc.stepRate = 0;
+ // Metal requires the stride must be a multiple of 4 bytes, align it with next
+ // multiple of 4 if it's not.
+ layoutDesc.stride = Align(maxArrayStride, 4);
+ } else {
+ layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
+ layoutDesc.stepRate = 1;
+ layoutDesc.stride = info.arrayStride;
}
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& info = GetAttribute(loc);
+ mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
+ [layoutDesc release];
- auto attribDesc = [MTLVertexAttributeDescriptor new];
- attribDesc.format = VertexFormatType(info.format);
- attribDesc.offset = info.offset;
- attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
- mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
- [attribDesc release];
- }
-
- return AcquireNSRef(mtlVertexDescriptor);
+ mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
+ mtlVertexBufferIndex++;
}
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& info = GetAttribute(loc);
+
+ auto attribDesc = [MTLVertexAttributeDescriptor new];
+ attribDesc.format = VertexFormatType(info.format);
+ attribDesc.offset = info.offset;
+ attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
+ mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
+ [attribDesc release];
}
+ return AcquireNSRef(mtlVertexDescriptor);
+}
+
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
+
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h
index c71c884e10f..fb7b1fcb90d 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h
@@ -23,21 +23,23 @@
namespace dawn::native::metal {
- class Device;
+class Device;
- class Sampler final : public SamplerBase {
- public:
- static ResultOrError<Ref<Sampler>> Create(Device* device,
- const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+ public:
+ static ResultOrError<Ref<Sampler>> Create(Device* device, const SamplerDescriptor* descriptor);
- id<MTLSamplerState> GetMTLSamplerState();
+ Sampler(DeviceBase* device, const SamplerDescriptor* descriptor);
+ ~Sampler() override;
- private:
- using SamplerBase::SamplerBase;
- MaybeError Initialize(const SamplerDescriptor* descriptor);
+ id<MTLSamplerState> GetMTLSamplerState();
- NSPRef<id<MTLSamplerState>> mMtlSamplerState;
- };
+ private:
+ using SamplerBase::SamplerBase;
+ MaybeError Initialize(const SamplerDescriptor* descriptor);
+
+ NSPRef<id<MTLSamplerState>> mMtlSamplerState;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm
index 235b2f8204a..97b30465117 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm
@@ -19,88 +19,91 @@
namespace dawn::native::metal {
- namespace {
- MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
- switch (mode) {
- case wgpu::FilterMode::Nearest:
- return MTLSamplerMinMagFilterNearest;
- case wgpu::FilterMode::Linear:
- return MTLSamplerMinMagFilterLinear;
- }
- }
-
- MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
- switch (mode) {
- case wgpu::FilterMode::Nearest:
- return MTLSamplerMipFilterNearest;
- case wgpu::FilterMode::Linear:
- return MTLSamplerMipFilterLinear;
- }
- }
-
- MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return MTLSamplerAddressModeRepeat;
- case wgpu::AddressMode::MirrorRepeat:
- return MTLSamplerAddressModeMirrorRepeat;
- case wgpu::AddressMode::ClampToEdge:
- return MTLSamplerAddressModeClampToEdge;
- }
- }
+namespace {
+MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
+ switch (mode) {
+ case wgpu::FilterMode::Nearest:
+ return MTLSamplerMinMagFilterNearest;
+ case wgpu::FilterMode::Linear:
+ return MTLSamplerMinMagFilterLinear;
}
-
- // static
- ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
- const SamplerDescriptor* descriptor) {
- DAWN_INVALID_IF(
- descriptor->compare != wgpu::CompareFunction::Undefined &&
- device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
- "Sampler compare function (%s) not supported. Compare functions are disabled with the "
- "Metal backend.",
- descriptor->compare);
-
- Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
- DAWN_TRY(sampler->Initialize(descriptor));
- return sampler;
+}
+
+MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
+ switch (mode) {
+ case wgpu::FilterMode::Nearest:
+ return MTLSamplerMipFilterNearest;
+ case wgpu::FilterMode::Linear:
+ return MTLSamplerMipFilterLinear;
}
-
- MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
- NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
- MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
- mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
- mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
-
- mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
- mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
- mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
-
- mtlDesc.lodMinClamp = descriptor->lodMinClamp;
- mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
- // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
- mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- // Sampler compare is unsupported before A9, which we validate in
- // Sampler::Create.
- mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
- // The value is default-initialized in the else-case, and we don't set it or the
- // Metal debug device errors.
- }
-
- mMtlSamplerState = AcquireNSPRef(
- [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
-
- if (mMtlSamplerState == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
- }
- return {};
+}
+
+MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return MTLSamplerAddressModeRepeat;
+ case wgpu::AddressMode::MirrorRepeat:
+ return MTLSamplerAddressModeMirrorRepeat;
+ case wgpu::AddressMode::ClampToEdge:
+ return MTLSamplerAddressModeClampToEdge;
+ }
+}
+} // namespace
+
+// static
+ResultOrError<Ref<Sampler>> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ DAWN_INVALID_IF(
+ descriptor->compare != wgpu::CompareFunction::Undefined &&
+ device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
+ "Sampler compare function (%s) not supported. Compare functions are disabled with the "
+ "Metal backend.",
+ descriptor->compare);
+
+ Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+ DAWN_TRY(sampler->Initialize(descriptor));
+ return sampler;
+}
+
+Sampler::Sampler(DeviceBase* dev, const SamplerDescriptor* desc) : SamplerBase(dev, desc) {}
+
+Sampler::~Sampler() = default;
+
+MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+ NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
+ MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
+ mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
+ mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
+
+ mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
+ mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
+ mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
+
+ mtlDesc.lodMinClamp = descriptor->lodMinClamp;
+ mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
+ // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
+ mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ // Sampler compare is unsupported before A9, which we validate in
+ // Sampler::Create.
+ mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+ // The value is default-initialized in the else-case, and we don't set it or the
+ // Metal debug device errors.
}
- id<MTLSamplerState> Sampler::GetMTLSamplerState() {
- return mMtlSamplerState.Get();
+ mMtlSamplerState = AcquireNSPRef(
+ [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
+
+ if (mMtlSamplerState == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
}
+ return {};
+}
+
+id<MTLSamplerState> Sampler::GetMTLSamplerState() {
+ return mMtlSamplerState.Get();
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h
index 89f1e54c62c..035922384a1 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_METAL_SHADERMODULEMTL_H_
#define SRC_DAWN_NATIVE_METAL_SHADERMODULEMTL_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/ShaderModule.h"
#include "dawn/common/NSRef.h"
@@ -24,46 +27,48 @@
namespace dawn::native::metal {
- class Device;
- class PipelineLayout;
- class RenderPipeline;
+class Device;
+class PipelineLayout;
+class RenderPipeline;
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
- struct MetalFunctionData {
- NSPRef<id<MTLFunction>> function;
- bool needsStorageBufferLength;
- std::vector<uint32_t> workgroupAllocations;
- };
+ struct MetalFunctionData {
+ NSPRef<id<MTLFunction>> function;
+ bool needsStorageBufferLength;
+ std::vector<uint32_t> workgroupAllocations;
+ };
- // MTLFunctionConstantValues needs @available tag to compile
- // Use id (like void*) in function signature as workaround and do static cast inside
- MaybeError CreateFunction(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- MetalFunctionData* out,
- id constantValues = nil,
- uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr);
+ // MTLFunctionConstantValues needs @available tag to compile
+ // Use id (like void*) in function signature as workaround and do static cast inside
+ MaybeError CreateFunction(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ MetalFunctionData* out,
+ id constantValues = nil,
+ uint32_t sampleMask = 0xFFFFFFFF,
+ const RenderPipeline* renderPipeline = nullptr);
- private:
- ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- std::string* remappedEntryPointName,
- bool* needsStorageBufferLength,
- bool* hasInvariantAttribute,
- std::vector<uint32_t>* workgroupAllocations);
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
+ private:
+ ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline,
+ std::string* remappedEntryPointName,
+ bool* needsStorageBufferLength,
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations);
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm
index e182898d2a1..fa8befd2e5e 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm
@@ -28,251 +28,252 @@
namespace dawn::native::metal {
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
- }
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
- return InitializeBase(parseResult);
- }
-
- ResultOrError<std::string> ShaderModule::TranslateToMSL(
- const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- std::string* remappedEntryPointName,
- bool* needsStorageBufferLength,
- bool* hasInvariantAttribute,
- std::vector<uint32_t>* workgroupAllocations) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- std::ostringstream errorStream;
- errorStream << "Tint MSL failure:" << std::endl;
-
- // Remap BindingNumber to BindingIndex in WGSL shader
- using BindingRemapper = tint::transform::BindingRemapper;
- using BindingPoint = tint::transform::BindingPoint;
- BindingRemapper::BindingPoints bindingPoints;
- BindingRemapper::AccessControls accessControls;
-
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase::BindingMap& bindingMap =
- layout->GetBindGroupLayout(group)->GetBindingMap();
- for (const auto [bindingNumber, bindingIndex] : bindingMap) {
- const BindingInfo& bindingInfo =
- layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
-
- if (!(bindingInfo.visibility & StageBit(stage))) {
- continue;
- }
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(
+ Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult, compilationMessages));
+ return module;
+}
+
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {}
+
+ShaderModule::~ShaderModule() = default;
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult, compilationMessages);
+}
+
+ResultOrError<std::string> ShaderModule::TranslateToMSL(
+ const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline,
+ std::string* remappedEntryPointName,
+ bool* needsStorageBufferLength,
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ std::ostringstream errorStream;
+ errorStream << "Tint MSL failure:" << std::endl;
+
+ // Remap BindingNumber to BindingIndex in WGSL shader
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase::BindingMap& bindingMap =
+ layout->GetBindGroupLayout(group)->GetBindingMap();
+ for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+ const BindingInfo& bindingInfo =
+ layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
+ }
- uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
+ uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(bindingNumber)};
- BindingPoint dstBindingPoint{0, shaderIndex};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingNumber)};
+ BindingPoint dstBindingPoint{0, shaderIndex};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
}
}
+ }
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
- // We only remap bindings for the target entry point, so we need to strip all other entry
- // points to avoid generating invalid bindings for them.
- transformManager.Add<tint::transform::SingleEntryPoint>();
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+ // We only remap bindings for the target entry point, so we need to strip all other entry
+ // points to avoid generating invalid bindings for them.
+ transformManager.Add<tint::transform::SingleEntryPoint>();
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
- AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
- if (stage == SingleShaderStage::Vertex &&
- GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
- transformManager.Add<tint::transform::VertexPulling>();
- AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
- kPullingBufferBindingSet, &transformInputs);
+ if (stage == SingleShaderStage::Vertex &&
+ GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+ transformManager.Add<tint::transform::VertexPulling>();
+ AddVertexPullingTransformConfig(*renderPipeline, entryPointName, kPullingBufferBindingSet,
+ &transformInputs);
- for (VertexBufferSlot slot :
- IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
- uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
+ for (VertexBufferSlot slot : IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+ uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
- // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
- BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
- static_cast<uint8_t>(slot)};
- BindingPoint dstBindingPoint{0, metalIndex};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
+ // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
+ static_cast<uint8_t>(slot)};
+ BindingPoint dstBindingPoint{0, metalIndex};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
}
}
- if (GetDevice()->IsRobustnessEnabled()) {
- transformManager.Add<tint::transform::Robustness>();
- }
- transformManager.Add<tint::transform::BindingRemapper>();
- transformManager.Add<tint::transform::Renamer>();
-
- if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
- // We still need to rename MSL reserved keywords
- transformInputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kMslKeywords);
- }
+ }
+ if (GetDevice()->IsRobustnessEnabled()) {
+ transformManager.Add<tint::transform::Robustness>();
+ }
+ transformManager.Add<tint::transform::BindingRemapper>();
+ transformManager.Add<tint::transform::Renamer>();
- transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls),
- /* mayCollide */ true);
+ if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
+ // We still need to rename MSL reserved keywords
+ transformInputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kMslKeywords);
+ }
- tint::Program program;
- tint::transform::DataMap transformOutputs;
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
- transformInputs, &transformOutputs, nullptr));
- }
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls),
+ /* mayCollide */ true);
- if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
- auto it = data->remappings.find(entryPointName);
- if (it != data->remappings.end()) {
- *remappedEntryPointName = it->second;
- } else {
- DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
- "Could not find remapped name for entry point.");
+ tint::Program program;
+ tint::transform::DataMap transformOutputs;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ &transformOutputs, nullptr));
+ }
- *remappedEntryPointName = entryPointName;
- }
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(entryPointName);
+ if (it != data->remappings.end()) {
+ *remappedEntryPointName = it->second;
} else {
- return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
- }
+ DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
+ "Could not find remapped name for entry point.");
- tint::writer::msl::Options options;
- options.buffer_size_ubo_index = kBufferLengthBufferSlot;
- options.fixed_sample_mask = sampleMask;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- options.emit_vertex_point_size =
- stage == SingleShaderStage::Vertex &&
- renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
- auto result = tint::writer::msl::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.",
- result.error);
-
- *needsStorageBufferLength = result.needs_storage_buffer_sizes;
- *hasInvariantAttribute = result.has_invariant_attribute;
- *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
-
- return std::move(result.msl);
+ *remappedEntryPointName = entryPointName;
+ }
+ } else {
+ return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
}
- MaybeError ShaderModule::CreateFunction(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- ShaderModule::MetalFunctionData* out,
- id constantValuesPointer,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline) {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
-
- ASSERT(!IsError());
- ASSERT(out);
-
- // Vertex stages must specify a renderPipeline
- if (stage == SingleShaderStage::Vertex) {
- ASSERT(renderPipeline != nullptr);
- }
+ tint::writer::msl::Options options;
+ options.buffer_size_ubo_index = kBufferLengthBufferSlot;
+ options.fixed_sample_mask = sampleMask;
+ options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ options.emit_vertex_point_size =
+ stage == SingleShaderStage::Vertex &&
+ renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
+ auto result = tint::writer::msl::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.", result.error);
+
+ *needsStorageBufferLength = result.needs_storage_buffer_sizes;
+ *hasInvariantAttribute = result.has_invariant_attribute;
+ *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
+
+ return std::move(result.msl);
+}
+
+MaybeError ShaderModule::CreateFunction(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ ShaderModule::MetalFunctionData* out,
+ id constantValuesPointer,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
+
+ ASSERT(!IsError());
+ ASSERT(out);
+
+ // Vertex stages must specify a renderPipeline
+ if (stage == SingleShaderStage::Vertex) {
+ ASSERT(renderPipeline != nullptr);
+ }
- std::string remappedEntryPointName;
- std::string msl;
- bool hasInvariantAttribute = false;
- DAWN_TRY_ASSIGN(msl,
- TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
- &remappedEntryPointName, &out->needsStorageBufferLength,
- &hasInvariantAttribute, &out->workgroupAllocations));
-
- // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
- // category. -Wunused-variable in particular comes up a lot in generated code, and some
- // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
- // of a warning.
- msl = R"(
+ std::string remappedEntryPointName;
+ std::string msl;
+ bool hasInvariantAttribute = false;
+ DAWN_TRY_ASSIGN(msl, TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
+ &remappedEntryPointName, &out->needsStorageBufferLength,
+ &hasInvariantAttribute, &out->workgroupAllocations));
+
+ // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
+ // category. -Wunused-variable in particular comes up a lot in generated code, and some
+ // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
+ // of a warning.
+ msl = R"(
#ifdef __clang__
#pragma clang diagnostic ignored "-Wall"
#endif
)" + msl;
- if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
- GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
+ if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
+ GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
- NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
+ NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
- NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
- if (hasInvariantAttribute) {
- if (@available(macOS 11.0, iOS 13.0, *)) {
- (*compileOptions).preserveInvariance = true;
- }
- }
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- NSError* error = nullptr;
-
- NSPRef<id<MTLLibrary>> library;
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
- library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
- options:compileOptions.Get()
- error:&error]);
+ NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
+ if (hasInvariantAttribute) {
+ if (@available(macOS 11.0, iOS 13.0, *)) {
+ (*compileOptions).preserveInvariance = true;
}
+ }
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+ NSError* error = nullptr;
+
+ NSPRef<id<MTLLibrary>> library;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
+ library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
+ options:compileOptions.Get()
+ error:&error]);
+ }
- if (error != nullptr) {
- DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
- "Unable to create library object: %s.",
- [error.localizedDescription UTF8String]);
- }
- ASSERT(library != nil);
-
- NSRef<NSString> name =
- AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
-
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
- if (constantValuesPointer != nil) {
- if (@available(macOS 10.12, *)) {
- MTLFunctionConstantValues* constantValues = constantValuesPointer;
- out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
- constantValues:constantValues
- error:&error]);
- if (error != nullptr) {
- if (error.code != MTLLibraryErrorCompileWarning) {
- return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
- [error.localizedDescription UTF8String]);
- }
+ if (error != nullptr) {
+ DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
+ "Unable to create library object: %s.",
+ [error.localizedDescription UTF8String]);
+ }
+ ASSERT(library != nil);
+
+ NSRef<NSString> name =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
+
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
+ if (constantValuesPointer != nil) {
+ if (@available(macOS 10.12, *)) {
+ MTLFunctionConstantValues* constantValues = constantValuesPointer;
+ out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
+ constantValues:constantValues
+ error:&error]);
+ if (error != nullptr) {
+ if (error.code != MTLLibraryErrorCompileWarning) {
+ return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
+ [error.localizedDescription UTF8String]);
}
- ASSERT(out->function != nil);
- } else {
- UNREACHABLE();
}
+ ASSERT(out->function != nil);
} else {
- out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
+ UNREACHABLE();
}
+ } else {
+ out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
}
+ }
- if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
- GetEntryPoint(entryPointName).usedVertexInputs.any()) {
- out->needsStorageBufferLength = true;
- }
-
- return {};
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+ GetEntryPoint(entryPointName).usedVertexInputs.any()) {
+ out->needsStorageBufferLength = true;
}
+
+ return {};
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h
index 7c6636fce26..bbad022d5a2 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h
@@ -23,20 +23,21 @@
namespace dawn::native::metal {
- class Device;
+class Device;
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
+class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
- id<MTLBuffer> GetBufferHandle() const;
+ id<MTLBuffer> GetBufferHandle() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- private:
- Device* mDevice;
- NSPRef<id<MTLBuffer>> mBuffer;
- };
+ private:
+ Device* mDevice;
+ NSPRef<id<MTLBuffer>> mBuffer;
+};
} // namespace dawn::native::metal
#endif // SRC_DAWN_NATIVE_METAL_STAGINGBUFFERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm
index a3fd91ff3a2..f4255f13244 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm
@@ -17,30 +17,31 @@
namespace dawn::native::metal {
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
-
- MaybeError StagingBuffer::Initialize() {
- const size_t bufferSize = GetSize();
- mBuffer = AcquireNSPRef([mDevice->GetMTLDevice()
- newBufferWithLength:bufferSize
- options:MTLResourceStorageModeShared]);
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {}
- if (mBuffer == nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
+StagingBuffer::~StagingBuffer() = default;
- mMappedPointer = [*mBuffer contents];
- if (mMappedPointer == nullptr) {
- return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
- }
+MaybeError StagingBuffer::Initialize() {
+ const size_t bufferSize = GetSize();
+ mBuffer =
+ AcquireNSPRef([mDevice->GetMTLDevice() newBufferWithLength:bufferSize
+ options:MTLResourceStorageModeShared]);
- return {};
+ if (mBuffer == nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
- id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
- return mBuffer.Get();
+ mMappedPointer = [*mBuffer contents];
+ if (mMappedPointer == nullptr) {
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
}
+ return {};
+}
+
+id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
+ return mBuffer.Get();
+}
+
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h
index a5a1e4899d0..de5cd5c5d48 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h
@@ -24,43 +24,45 @@
namespace dawn::native::metal {
- class Device;
- class Texture;
-
- class OldSwapChain final : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
-
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
- };
-
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- void DestroyImpl() override;
-
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- NSRef<CAMetalLayer> mLayer;
-
- NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
- Ref<Texture> mTexture;
-
- MaybeError PresentImpl() override;
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
- };
+class Device;
+class Texture;
+
+class OldSwapChain final : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
+
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
+};
+
+class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+
+ SwapChain(DeviceBase* device, Surface* surface, const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ void DestroyImpl() override;
+
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ NSRef<CAMetalLayer> mLayer;
+
+ NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm
index 04e66fb6ec6..60fb77e4f15 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm
@@ -18,137 +18,139 @@
#include "dawn/native/metal/DeviceMTL.h"
#include "dawn/native/metal/TextureMTL.h"
-#include <dawn/dawn_wsi.h>
+#include "dawn/dawn_wsi.h"
#import <QuartzCore/CAMetalLayer.h>
namespace dawn::native::metal {
- // OldSwapChain
-
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
- }
-
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextMetal wsiContext = {};
- wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
- wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
- im.Init(im.userData, &wsiContext);
- }
-
- OldSwapChain::~OldSwapChain() {
+// OldSwapChain
+
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+}
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextMetal wsiContext = {};
+ wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
+ wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
+ im.Init(im.userData, &wsiContext);
+}
+
+OldSwapChain::~OldSwapChain() {}
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
}
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
+ id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
- id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
+ return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+}
- return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
- }
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+}
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
- }
+// SwapChain
- // SwapChain
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+}
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
+SwapChain::SwapChain(DeviceBase* dev, Surface* sur, const SwapChainDescriptor* desc)
+ : NewSwapChainBase(dev, sur, desc) {}
- SwapChain::~SwapChain() = default;
+SwapChain::~SwapChain() = default;
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
- }
+void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+}
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
- "Metal SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
+ "Metal SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
- previousSwapChain->DetachFromSurface();
- }
+ previousSwapChain->DetachFromSurface();
+ }
- mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
- ASSERT(mLayer != nullptr);
+ mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
+ ASSERT(mLayer != nullptr);
- CGSize size = {};
- size.width = GetWidth();
- size.height = GetHeight();
- [*mLayer setDrawableSize:size];
+ CGSize size = {};
+ size.width = GetWidth();
+ size.height = GetHeight();
+ [*mLayer setDrawableSize:size];
- [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
- [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
- [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
+ [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
+ [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
+ [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macos 10.13, *)) {
- [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
- }
-#endif // defined(DAWN_PLATFORM_MACOS)
+#if DAWN_PLATFORM_IS(MACOS)
+ if (@available(macos 10.13, *)) {
+ [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
+ }
+#endif // DAWN_PLATFORM_IS(MACOS)
- // There is no way to control Fifo vs. Mailbox in Metal.
+ // There is no way to control Fifo vs. Mailbox in Metal.
- return {};
- }
+ return {};
+}
- MaybeError SwapChain::PresentImpl() {
- ASSERT(mCurrentDrawable != nullptr);
- [*mCurrentDrawable present];
+MaybeError SwapChain::PresentImpl() {
+ ASSERT(mCurrentDrawable != nullptr);
+ [*mCurrentDrawable present];
- mTexture->APIDestroy();
- mTexture = nullptr;
+ mTexture->APIDestroy();
+ mTexture = nullptr;
- mCurrentDrawable = nullptr;
+ mCurrentDrawable = nullptr;
- return {};
- }
+ return {};
+}
- ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
- ASSERT(mCurrentDrawable == nullptr);
- mCurrentDrawable = [*mLayer nextDrawable];
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+ ASSERT(mCurrentDrawable == nullptr);
+ mCurrentDrawable = [*mLayer nextDrawable];
- TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
- mTexture = Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc,
- [*mCurrentDrawable texture]);
- return mTexture->CreateView();
- }
+ mTexture =
+ Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc, [*mCurrentDrawable texture]);
+ return mTexture->CreateView();
+}
- void SwapChain::DetachFromSurfaceImpl() {
- ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
+void SwapChain::DetachFromSurfaceImpl() {
+ ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
+ if (mTexture != nullptr) {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
- mCurrentDrawable = nullptr;
- }
+ mCurrentDrawable = nullptr;
}
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h
index cf2e6374845..3a9c3d8f351 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h
@@ -15,90 +15,90 @@
#ifndef SRC_DAWN_NATIVE_METAL_TEXTUREMTL_H_
#define SRC_DAWN_NATIVE_METAL_TEXTUREMTL_H_
+#include <IOSurface/IOSurfaceRef.h>
+#import <Metal/Metal.h>
+
#include "dawn/native/Texture.h"
#include "dawn/common/CoreFoundationRef.h"
#include "dawn/common/NSRef.h"
#include "dawn/native/DawnNative.h"
-#include <IOSurface/IOSurfaceRef.h>
-#import <Metal/Metal.h>
-
namespace dawn::native::metal {
- class CommandRecordingContext;
- class Device;
-
- MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
- MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
- const TextureDescriptor* descriptor,
- IOSurfaceRef ioSurface);
+class CommandRecordingContext;
+class Device;
- class Texture final : public TextureBase {
- public:
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> CreateFromIOSurface(
- Device* device,
- const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface);
- static Ref<Texture> CreateWrapping(Device* device,
- const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped);
+MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
+MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ IOSurfaceRef ioSurface);
- id<MTLTexture> GetMTLTexture() const;
- IOSurfaceRef GetIOSurface();
- NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
+class Texture final : public TextureBase {
+ public:
+ static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
+ static ResultOrError<Ref<Texture>> CreateFromIOSurface(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface);
+ static Ref<Texture> CreateWrapping(Device* device,
+ const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped);
- void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range);
+ Texture(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
- private:
- using TextureBase::TextureBase;
- ~Texture() override;
+ id<MTLTexture> GetMTLTexture() const;
+ IOSurfaceRef GetIOSurface();
+ NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
- NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range);
- MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
- MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- IOSurfaceRef ioSurface);
- void InitializeAsWrapping(const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped);
+ private:
+ using TextureBase::TextureBase;
+ ~Texture() override;
- void DestroyImpl() override;
+ NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
- MaybeError ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue);
+ MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
+ MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ IOSurfaceRef ioSurface);
+ void InitializeAsWrapping(const TextureDescriptor* descriptor, NSPRef<id<MTLTexture>> wrapped);
- NSPRef<id<MTLTexture>> mMtlTexture;
+ void DestroyImpl() override;
- MTLTextureUsage mMtlUsage;
- CFRef<IOSurfaceRef> mIOSurface = nullptr;
- };
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue);
- class TextureView final : public TextureViewBase {
- public:
- static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
+ NSPRef<id<MTLTexture>> mMtlTexture;
- id<MTLTexture> GetMTLTexture() const;
+ MTLTextureUsage mMtlUsage;
+ CFRef<IOSurfaceRef> mIOSurface = nullptr;
+};
- struct AttachmentInfo {
- NSPRef<id<MTLTexture>> texture;
- uint32_t baseMipLevel;
- uint32_t baseArrayLayer;
- };
- AttachmentInfo GetAttachmentInfo() const;
+class TextureView final : public TextureViewBase {
+ public:
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
- private:
- using TextureViewBase::TextureViewBase;
- MaybeError Initialize(const TextureViewDescriptor* descriptor);
+ id<MTLTexture> GetMTLTexture() const;
- // TODO(crbug.com/dawn/1355): Clear this reference on texture destroy.
- NSPRef<id<MTLTexture>> mMtlTextureView;
+ struct AttachmentInfo {
+ NSPRef<id<MTLTexture>> texture;
+ uint32_t baseMipLevel;
+ uint32_t baseArrayLayer;
};
+ AttachmentInfo GetAttachmentInfo() const;
+
+ private:
+ using TextureViewBase::TextureViewBase;
+ MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+ // TODO(crbug.com/dawn/1355): Clear this reference on texture destroy.
+ NSPRef<id<MTLTexture>> mMtlTextureView;
+};
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm
index dc04324a9be..06e525b1f5d 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm
@@ -27,1128 +27,1108 @@
namespace dawn::native::metal {
- namespace {
+namespace {
- MTLTextureUsage MetalTextureUsage(const Format& format,
- wgpu::TextureUsage usage,
- uint32_t sampleCount) {
- MTLTextureUsage result = MTLTextureUsageUnknown; // This is 0
+MTLTextureUsage MetalTextureUsage(const Format& format, wgpu::TextureUsage usage) {
+ MTLTextureUsage result = MTLTextureUsageUnknown; // This is 0
- if (usage & (wgpu::TextureUsage::StorageBinding)) {
- result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
- }
-
- if (usage & (wgpu::TextureUsage::TextureBinding)) {
- result |= MTLTextureUsageShaderRead;
-
- // For sampling stencil aspect of combined depth/stencil.
- // See TextureView::Initialize.
- // Depth views for depth/stencil textures in Metal simply use the original
- // texture's format, but stencil views require format reinterpretation.
- if (@available(macOS 10.12, iOS 10.0, *)) {
- if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
- result |= MTLTextureUsagePixelFormatView;
- }
- }
- }
-
- // MTLTextureUsageRenderTarget is needed to clear multisample textures.
- if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
- result |= MTLTextureUsageRenderTarget;
- }
+ if (usage & (wgpu::TextureUsage::StorageBinding)) {
+ result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
+ }
- return result;
- }
+ if (usage & (wgpu::TextureUsage::TextureBinding)) {
+ result |= MTLTextureUsageShaderRead;
- MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
- unsigned int sampleCount) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e1D:
- return MTLTextureType1D;
- case wgpu::TextureViewDimension::e2D:
- return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
- case wgpu::TextureViewDimension::e2DArray:
- return MTLTextureType2DArray;
- case wgpu::TextureViewDimension::Cube:
- return MTLTextureTypeCube;
- case wgpu::TextureViewDimension::CubeArray:
- return MTLTextureTypeCubeArray;
- case wgpu::TextureViewDimension::e3D:
- return MTLTextureType3D;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ // For sampling stencil aspect of combined depth/stencil.
+ // See TextureView::Initialize.
+ // Depth views for depth/stencil textures in Metal simply use the original
+ // texture's format, but stencil views require format reinterpretation.
+ if (@available(macOS 10.12, iOS 10.0, *)) {
+ if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+ result |= MTLTextureUsagePixelFormatView;
}
}
+ }
- bool RequiresCreatingNewTextureView(const TextureBase* texture,
- const TextureViewDescriptor* textureViewDescriptor) {
- constexpr wgpu::TextureUsage kShaderUsageNeedsView =
- wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
- constexpr wgpu::TextureUsage kUsageNeedsView =
- kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
- if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
- return false;
- }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ result |= MTLTextureUsageRenderTarget;
+ }
- if (texture->GetFormat().format != textureViewDescriptor->format &&
- !texture->GetFormat().HasDepthOrStencil()) {
- // Color format reinterpretation required.
- // Note: Depth/stencil formats don't support reinterpretation.
- // See also TextureView::GetAttachmentInfo when modifying this condition.
- return true;
- }
+ return result;
+}
+
+MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
+ unsigned int sampleCount) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ return MTLTextureType1D;
+ case wgpu::TextureViewDimension::e2D:
+ return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ return MTLTextureType2DArray;
+ case wgpu::TextureViewDimension::Cube:
+ return MTLTextureTypeCube;
+ case wgpu::TextureViewDimension::CubeArray:
+ return MTLTextureTypeCubeArray;
+ case wgpu::TextureViewDimension::e3D:
+ return MTLTextureType3D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+}
+
+bool RequiresCreatingNewTextureView(const TextureBase* texture,
+ const TextureViewDescriptor* textureViewDescriptor) {
+ constexpr wgpu::TextureUsage kShaderUsageNeedsView =
+ wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+ constexpr wgpu::TextureUsage kUsageNeedsView =
+ kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
+ if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
+ return false;
+ }
- // Reinterpretation not required. Now, we only need a new view if the view dimension or
- // set of subresources for the shader is different from the base texture.
- if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
- return false;
- }
+ if (texture->GetFormat().format != textureViewDescriptor->format &&
+ !texture->GetFormat().HasDepthOrStencil()) {
+ // Color format reinterpretation required.
+ // Note: Depth/stencil formats don't support reinterpretation.
+ // See also TextureView::GetAttachmentInfo when modifying this condition.
+ return true;
+ }
- if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
- (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D &&
- textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
- // If the view has a different number of array layers, we need a new view.
- // And, if the original texture is a 2D texture with one array layer, we need a new
- // view to view it as a 2D array texture.
- return true;
- }
+ // Reinterpretation not required. Now, we only need a new view if the view dimension or
+ // set of subresources for the shader is different from the base texture.
+ if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
+ return false;
+ }
- if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
- return true;
- }
+ if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+ (texture->GetArrayLayers() == 1 && texture->GetDimension() == wgpu::TextureDimension::e2D &&
+ textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+ // If the view has a different number of array layers, we need a new view.
+ // And, if the original texture is a 2D texture with one array layer, we need a new
+ // view to view it as a 2D array texture.
+ return true;
+ }
- // If the texture is created with MTLTextureUsagePixelFormatView, we need
- // a new view to perform format reinterpretation.
- if ((MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
- texture->GetSampleCount()) &
- MTLTextureUsagePixelFormatView) != 0) {
- return true;
- }
+ if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+ return true;
+ }
- switch (textureViewDescriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return true;
- default:
- break;
- }
+ // If the texture is created with MTLTextureUsagePixelFormatView, we need
+ // a new view to perform format reinterpretation.
+ if ((MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage()) &
+ MTLTextureUsagePixelFormatView) != 0) {
+ return true;
+ }
- return false;
- }
+ switch (textureViewDescriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return true;
+ default:
+ break;
+ }
- // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
- // between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
- // example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
- // rgba8Unorm_srgb texture view on rgab8Unorm texture.
- bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
- MTLPixelFormat reinterpretation) {
- switch (origin) {
- case MTLPixelFormatRGBA8Unorm:
- return reinterpretation == MTLPixelFormatBGRA8Unorm ||
- reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
- case MTLPixelFormatBGRA8Unorm:
- return reinterpretation == MTLPixelFormatRGBA8Unorm ||
- reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
- case MTLPixelFormatRGBA8Unorm_sRGB:
- return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
- reinterpretation == MTLPixelFormatRGBA8Unorm;
- case MTLPixelFormatBGRA8Unorm_sRGB:
- return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
- reinterpretation == MTLPixelFormatBGRA8Unorm;
-#if defined(DAWN_PLATFORM_MACOS)
- case MTLPixelFormatBC1_RGBA:
- return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
- case MTLPixelFormatBC1_RGBA_sRGB:
- return reinterpretation == MTLPixelFormatBC1_RGBA;
- case MTLPixelFormatBC2_RGBA:
- return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
- case MTLPixelFormatBC2_RGBA_sRGB:
- return reinterpretation == MTLPixelFormatBC2_RGBA;
- case MTLPixelFormatBC3_RGBA:
- return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
- case MTLPixelFormatBC3_RGBA_sRGB:
- return reinterpretation == MTLPixelFormatBC3_RGBA;
- case MTLPixelFormatBC7_RGBAUnorm:
- return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
- case MTLPixelFormatBC7_RGBAUnorm_sRGB:
- return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
+ return false;
+}
+
+// Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+// between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
+// example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
+// rgba8Unorm_srgb texture view on rgab8Unorm texture.
+bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
+ MTLPixelFormat reinterpretation) {
+ switch (origin) {
+ case MTLPixelFormatRGBA8Unorm:
+ return reinterpretation == MTLPixelFormatBGRA8Unorm ||
+ reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
+ case MTLPixelFormatBGRA8Unorm:
+ return reinterpretation == MTLPixelFormatRGBA8Unorm ||
+ reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
+ case MTLPixelFormatRGBA8Unorm_sRGB:
+ return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
+ reinterpretation == MTLPixelFormatRGBA8Unorm;
+ case MTLPixelFormatBGRA8Unorm_sRGB:
+ return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
+ reinterpretation == MTLPixelFormatBGRA8Unorm;
+#if DAWN_PLATFORM_IS(MACOS)
+ case MTLPixelFormatBC1_RGBA:
+ return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
+ case MTLPixelFormatBC1_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC1_RGBA;
+ case MTLPixelFormatBC2_RGBA:
+ return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
+ case MTLPixelFormatBC2_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC2_RGBA;
+ case MTLPixelFormatBC3_RGBA:
+ return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
+ case MTLPixelFormatBC3_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC3_RGBA;
+ case MTLPixelFormatBC7_RGBAUnorm:
+ return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
+ case MTLPixelFormatBC7_RGBAUnorm_sRGB:
+ return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
#endif
- default:
- return false;
- }
- }
-
- ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
- switch (format) {
- case kCVPixelFormatType_64RGBAHalf:
- return wgpu::TextureFormat::RGBA16Float;
- case kCVPixelFormatType_TwoComponent16Half:
- return wgpu::TextureFormat::RG16Float;
- case kCVPixelFormatType_OneComponent16Half:
- return wgpu::TextureFormat::R16Float;
- case kCVPixelFormatType_ARGB2101010LEPacked:
- return wgpu::TextureFormat::RGB10A2Unorm;
- case kCVPixelFormatType_32RGBA:
- return wgpu::TextureFormat::RGBA8Unorm;
- case kCVPixelFormatType_32BGRA:
- return wgpu::TextureFormat::BGRA8Unorm;
- case kCVPixelFormatType_TwoComponent8:
- return wgpu::TextureFormat::RG8Unorm;
- case kCVPixelFormatType_OneComponent8:
- return wgpu::TextureFormat::R8Unorm;
- case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
- return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).",
- format);
- }
- }
-
- uint32_t GetIOSurfacePlane(wgpu::TextureAspect aspect) {
- switch (aspect) {
- case wgpu::TextureAspect::Plane0Only:
- return 0;
- case wgpu::TextureAspect::Plane1Only:
- return 1;
- default:
- UNREACHABLE();
- }
- }
+ default:
+ return false;
+ }
+}
+
+ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+ switch (format) {
+ case kCVPixelFormatType_64RGBAHalf:
+ return wgpu::TextureFormat::RGBA16Float;
+ case kCVPixelFormatType_TwoComponent16Half:
+ return wgpu::TextureFormat::RG16Float;
+ case kCVPixelFormatType_OneComponent16Half:
+ return wgpu::TextureFormat::R16Float;
+ case kCVPixelFormatType_ARGB2101010LEPacked:
+ return wgpu::TextureFormat::RGB10A2Unorm;
+ case kCVPixelFormatType_32RGBA:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case kCVPixelFormatType_32BGRA:
+ return wgpu::TextureFormat::BGRA8Unorm;
+ case kCVPixelFormatType_TwoComponent8:
+ return wgpu::TextureFormat::RG8Unorm;
+ case kCVPixelFormatType_OneComponent8:
+ return wgpu::TextureFormat::R8Unorm;
+ case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
+ return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).", format);
+ }
+}
+
+uint32_t GetIOSurfacePlane(wgpu::TextureAspect aspect) {
+ switch (aspect) {
+ case wgpu::TextureAspect::Plane0Only:
+ return 0;
+ case wgpu::TextureAspect::Plane1Only:
+ return 1;
+ default:
+ UNREACHABLE();
+ }
+}
-#if defined(DAWN_PLATFORM_MACOS)
- MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
-#elif defined(DAWN_PLATFORM_IOS)
- MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
+#if DAWN_PLATFORM_IS(MACOS)
+MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
+#elif DAWN_PLATFORM_IS(IOS)
+MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
#else
-# error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
#endif
- }
-
- MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return MTLPixelFormatR8Unorm;
- case wgpu::TextureFormat::R8Snorm:
- return MTLPixelFormatR8Snorm;
- case wgpu::TextureFormat::R8Uint:
- return MTLPixelFormatR8Uint;
- case wgpu::TextureFormat::R8Sint:
- return MTLPixelFormatR8Sint;
-
- case wgpu::TextureFormat::R16Uint:
- return MTLPixelFormatR16Uint;
- case wgpu::TextureFormat::R16Sint:
- return MTLPixelFormatR16Sint;
- case wgpu::TextureFormat::R16Float:
- return MTLPixelFormatR16Float;
- case wgpu::TextureFormat::RG8Unorm:
- return MTLPixelFormatRG8Unorm;
- case wgpu::TextureFormat::RG8Snorm:
- return MTLPixelFormatRG8Snorm;
- case wgpu::TextureFormat::RG8Uint:
- return MTLPixelFormatRG8Uint;
- case wgpu::TextureFormat::RG8Sint:
- return MTLPixelFormatRG8Sint;
-
- case wgpu::TextureFormat::R32Uint:
- return MTLPixelFormatR32Uint;
- case wgpu::TextureFormat::R32Sint:
- return MTLPixelFormatR32Sint;
- case wgpu::TextureFormat::R32Float:
- return MTLPixelFormatR32Float;
- case wgpu::TextureFormat::RG16Uint:
- return MTLPixelFormatRG16Uint;
- case wgpu::TextureFormat::RG16Sint:
- return MTLPixelFormatRG16Sint;
- case wgpu::TextureFormat::RG16Float:
- return MTLPixelFormatRG16Float;
- case wgpu::TextureFormat::RGBA8Unorm:
- return MTLPixelFormatRGBA8Unorm;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return MTLPixelFormatRGBA8Unorm_sRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return MTLPixelFormatRGBA8Snorm;
- case wgpu::TextureFormat::RGBA8Uint:
- return MTLPixelFormatRGBA8Uint;
- case wgpu::TextureFormat::RGBA8Sint:
- return MTLPixelFormatRGBA8Sint;
- case wgpu::TextureFormat::BGRA8Unorm:
- return MTLPixelFormatBGRA8Unorm;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return MTLPixelFormatBGRA8Unorm_sRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return MTLPixelFormatRGB10A2Unorm;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return MTLPixelFormatRG11B10Float;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return MTLPixelFormatRGB9E5Float;
-
- case wgpu::TextureFormat::RG32Uint:
- return MTLPixelFormatRG32Uint;
- case wgpu::TextureFormat::RG32Sint:
- return MTLPixelFormatRG32Sint;
- case wgpu::TextureFormat::RG32Float:
- return MTLPixelFormatRG32Float;
- case wgpu::TextureFormat::RGBA16Uint:
- return MTLPixelFormatRGBA16Uint;
- case wgpu::TextureFormat::RGBA16Sint:
- return MTLPixelFormatRGBA16Sint;
- case wgpu::TextureFormat::RGBA16Float:
- return MTLPixelFormatRGBA16Float;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return MTLPixelFormatRGBA32Uint;
- case wgpu::TextureFormat::RGBA32Sint:
- return MTLPixelFormatRGBA32Sint;
- case wgpu::TextureFormat::RGBA32Float:
- return MTLPixelFormatRGBA32Float;
-
- case wgpu::TextureFormat::Depth32Float:
- return MTLPixelFormatDepth32Float;
- case wgpu::TextureFormat::Depth24Plus:
- return MTLPixelFormatDepth32Float;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return MTLPixelFormatDepth32Float_Stencil8;
- case wgpu::TextureFormat::Depth16Unorm:
- if (@available(macOS 10.12, iOS 13.0, *)) {
- return MTLPixelFormatDepth16Unorm;
- } else {
- // TODO (dawn:1181): Allow non-conformant implementation on macOS 10.11
- UNREACHABLE();
- }
- case wgpu::TextureFormat::Stencil8:
- return MTLPixelFormatStencil8;
-
-#if defined(DAWN_PLATFORM_MACOS)
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return MTLPixelFormatDepth24Unorm_Stencil8;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return MTLPixelFormatBC1_RGBA;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return MTLPixelFormatBC1_RGBA_sRGB;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return MTLPixelFormatBC2_RGBA;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return MTLPixelFormatBC2_RGBA_sRGB;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return MTLPixelFormatBC3_RGBA;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return MTLPixelFormatBC3_RGBA_sRGB;
- case wgpu::TextureFormat::BC4RSnorm:
- return MTLPixelFormatBC4_RSnorm;
- case wgpu::TextureFormat::BC4RUnorm:
- return MTLPixelFormatBC4_RUnorm;
- case wgpu::TextureFormat::BC5RGSnorm:
- return MTLPixelFormatBC5_RGSnorm;
- case wgpu::TextureFormat::BC5RGUnorm:
- return MTLPixelFormatBC5_RGUnorm;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return MTLPixelFormatBC6H_RGBFloat;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return MTLPixelFormatBC6H_RGBUfloat;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return MTLPixelFormatBC7_RGBAUnorm;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+} // namespace
+
+MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return MTLPixelFormatR8Unorm;
+ case wgpu::TextureFormat::R8Snorm:
+ return MTLPixelFormatR8Snorm;
+ case wgpu::TextureFormat::R8Uint:
+ return MTLPixelFormatR8Uint;
+ case wgpu::TextureFormat::R8Sint:
+ return MTLPixelFormatR8Sint;
+
+ case wgpu::TextureFormat::R16Uint:
+ return MTLPixelFormatR16Uint;
+ case wgpu::TextureFormat::R16Sint:
+ return MTLPixelFormatR16Sint;
+ case wgpu::TextureFormat::R16Float:
+ return MTLPixelFormatR16Float;
+ case wgpu::TextureFormat::RG8Unorm:
+ return MTLPixelFormatRG8Unorm;
+ case wgpu::TextureFormat::RG8Snorm:
+ return MTLPixelFormatRG8Snorm;
+ case wgpu::TextureFormat::RG8Uint:
+ return MTLPixelFormatRG8Uint;
+ case wgpu::TextureFormat::RG8Sint:
+ return MTLPixelFormatRG8Sint;
+
+ case wgpu::TextureFormat::R32Uint:
+ return MTLPixelFormatR32Uint;
+ case wgpu::TextureFormat::R32Sint:
+ return MTLPixelFormatR32Sint;
+ case wgpu::TextureFormat::R32Float:
+ return MTLPixelFormatR32Float;
+ case wgpu::TextureFormat::RG16Uint:
+ return MTLPixelFormatRG16Uint;
+ case wgpu::TextureFormat::RG16Sint:
+ return MTLPixelFormatRG16Sint;
+ case wgpu::TextureFormat::RG16Float:
+ return MTLPixelFormatRG16Float;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return MTLPixelFormatRGBA8Unorm;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return MTLPixelFormatRGBA8Unorm_sRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return MTLPixelFormatRGBA8Snorm;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return MTLPixelFormatRGBA8Uint;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return MTLPixelFormatRGBA8Sint;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return MTLPixelFormatBGRA8Unorm;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return MTLPixelFormatBGRA8Unorm_sRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return MTLPixelFormatRGB10A2Unorm;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return MTLPixelFormatRG11B10Float;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return MTLPixelFormatRGB9E5Float;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return MTLPixelFormatRG32Uint;
+ case wgpu::TextureFormat::RG32Sint:
+ return MTLPixelFormatRG32Sint;
+ case wgpu::TextureFormat::RG32Float:
+ return MTLPixelFormatRG32Float;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return MTLPixelFormatRGBA16Uint;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return MTLPixelFormatRGBA16Sint;
+ case wgpu::TextureFormat::RGBA16Float:
+ return MTLPixelFormatRGBA16Float;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return MTLPixelFormatRGBA32Uint;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return MTLPixelFormatRGBA32Sint;
+ case wgpu::TextureFormat::RGBA32Float:
+ return MTLPixelFormatRGBA32Float;
+
+ case wgpu::TextureFormat::Depth32Float:
+ return MTLPixelFormatDepth32Float;
+ case wgpu::TextureFormat::Depth24Plus:
+ return MTLPixelFormatDepth32Float;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return MTLPixelFormatDepth32Float_Stencil8;
+ case wgpu::TextureFormat::Depth16Unorm:
+ if (@available(macOS 10.12, iOS 13.0, *)) {
+ return MTLPixelFormatDepth16Unorm;
+ } else {
+ // TODO(dawn:1181): Allow non-conformant implementation on macOS 10.11
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::Stencil8:
+ return MTLPixelFormatStencil8;
+
+#if DAWN_PLATFORM_IS(MACOS)
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return MTLPixelFormatDepth24Unorm_Stencil8;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return MTLPixelFormatBC1_RGBA;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return MTLPixelFormatBC1_RGBA_sRGB;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return MTLPixelFormatBC2_RGBA;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return MTLPixelFormatBC2_RGBA_sRGB;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return MTLPixelFormatBC3_RGBA;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return MTLPixelFormatBC3_RGBA_sRGB;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return MTLPixelFormatBC4_RSnorm;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return MTLPixelFormatBC4_RUnorm;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return MTLPixelFormatBC5_RGSnorm;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return MTLPixelFormatBC5_RGUnorm;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return MTLPixelFormatBC6H_RGBFloat;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return MTLPixelFormatBC6H_RGBUfloat;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return MTLPixelFormatBC7_RGBAUnorm;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return MTLPixelFormatBC7_RGBAUnorm_sRGB;
#else
- case wgpu::TextureFormat::Depth24UnormStencil8:
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
#endif
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatETC2_RGB8;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatETC2_RGB8_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatETC2_RGB8A1;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatETC2_RGB8A1_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_RGBA8;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_RGBA8_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::EACR11Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_R11Unorm;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::EACR11Snorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_R11Snorm;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::EACRG11Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_RG11Unorm;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::EACRG11Snorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatEAC_RG11Snorm;
- } else {
- UNREACHABLE();
- }
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_4x4_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_4x4_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC5x4Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_5x4_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_5x4_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC5x5Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_5x5_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_5x5_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC6x5Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_6x5_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_6x5_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC6x6Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_6x6_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_6x6_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x5Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x5_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x5_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x6Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x6_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x6_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x8Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x8_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_8x8_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x5Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x5_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x5_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x6Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x6_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x6_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x8Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x8_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x8_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x10Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x10_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_10x10_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC12x10Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_12x10_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_12x10_sRGB;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC12x12Unorm:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_12x12_LDR;
- } else {
- UNREACHABLE();
- }
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- if (@available(macOS 11.0, iOS 8.0, *)) {
- return MTLPixelFormatASTC_12x12_sRGB;
- } else {
- UNREACHABLE();
- }
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- case wgpu::TextureFormat::Undefined:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatETC2_RGB8;
+ } else {
UNREACHABLE();
- }
- }
-
- MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
- const TextureDescriptor* descriptor,
- IOSurfaceRef ioSurface) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- uint32_t surfaceWidth = IOSurfaceGetWidth(ioSurface);
- uint32_t surfaceHeight = IOSurfaceGetHeight(ioSurface);
-
- DAWN_INVALID_IF(
- descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
- descriptor->size.depthOrArrayLayers != 1,
- "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
- surfaceWidth, surfaceHeight, &descriptor->size);
+ }
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatETC2_RGB8_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatETC2_RGB8A1;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatETC2_RGB8A1_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_RGBA8;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_RGBA8_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::EACR11Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_R11Unorm;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::EACR11Snorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_R11Snorm;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::EACRG11Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_RG11Unorm;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::EACRG11Snorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatEAC_RG11Snorm;
+ } else {
+ UNREACHABLE();
+ }
- wgpu::TextureFormat ioSurfaceFormat;
- DAWN_TRY_ASSIGN(ioSurfaceFormat,
- GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
- DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
- "IOSurface format (%s) doesn't match the descriptor format (%s).",
- ioSurfaceFormat, descriptor->format);
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_4x4_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_4x4_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_5x4_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_5x4_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_5x5_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_5x5_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_6x5_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_6x5_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_6x6_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_6x6_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x5_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x5_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x6_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x6_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x8_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_8x8_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x5_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x5_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x6_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x6_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x8_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x8_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x10_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_10x10_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_12x10_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_12x10_sRGB;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_12x12_LDR;
+ } else {
+ UNREACHABLE();
+ }
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ if (@available(macOS 11.0, iOS 8.0, *)) {
+ return MTLPixelFormatASTC_12x12_sRGB;
+ } else {
+ UNREACHABLE();
+ }
- return {};
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
}
+}
- NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
- NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
- MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
+ const TextureDescriptor* descriptor,
+ IOSurfaceRef ioSurface) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+ descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ uint32_t surfaceWidth = IOSurfaceGetWidth(ioSurface);
+ uint32_t surfaceHeight = IOSurfaceGetHeight(ioSurface);
+
+ DAWN_INVALID_IF(
+ descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
+ descriptor->size.depthOrArrayLayers != 1,
+ "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
+ surfaceWidth, surfaceHeight, &descriptor->size);
+
+ wgpu::TextureFormat ioSurfaceFormat;
+ DAWN_TRY_ASSIGN(ioSurfaceFormat,
+ GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
+ DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
+ "IOSurface format (%s) doesn't match the descriptor format (%s).",
+ ioSurfaceFormat, descriptor->format);
+
+ return {};
+}
+
+NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
+ NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+ MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.width = GetWidth();
+ mtlDesc.sampleCount = GetSampleCount();
+ // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+ // between linear space and sRGB. For example, creating bgra8Unorm texture view on
+ // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
+ mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage());
+ mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
+ mtlDesc.mipmapLevelCount = GetNumMipLevels();
+ mtlDesc.storageMode = MTLStorageModePrivate;
+
+ // Choose the correct MTLTextureType and paper over differences in how the array layer count
+ // is specified.
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ mtlDesc.arrayLength = 1;
+ mtlDesc.depth = 1;
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType1D;
+ break;
- mtlDesc.width = GetWidth();
- mtlDesc.sampleCount = GetSampleCount();
- // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
- // between linear space and sRGB. For example, creating bgra8Unorm texture view on
- // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
- // TODO: add MTLTextureUsagePixelFormatView when needed when we support other format
- // reinterpretation.
- mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
- mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
- mtlDesc.mipmapLevelCount = GetNumMipLevels();
- mtlDesc.storageMode = MTLStorageModePrivate;
-
- // Choose the correct MTLTextureType and paper over differences in how the array layer count
- // is specified.
- switch (GetDimension()) {
- case wgpu::TextureDimension::e1D:
- mtlDesc.arrayLength = 1;
- mtlDesc.depth = 1;
- ASSERT(mtlDesc.sampleCount == 1);
- mtlDesc.textureType = MTLTextureType1D;
- break;
-
- case wgpu::TextureDimension::e2D:
- mtlDesc.height = GetHeight();
- mtlDesc.arrayLength = GetArrayLayers();
- mtlDesc.depth = 1;
- if (mtlDesc.arrayLength > 1) {
- ASSERT(mtlDesc.sampleCount == 1);
- mtlDesc.textureType = MTLTextureType2DArray;
- } else if (mtlDesc.sampleCount > 1) {
- mtlDesc.textureType = MTLTextureType2DMultisample;
- } else {
- mtlDesc.textureType = MTLTextureType2D;
- }
- break;
- case wgpu::TextureDimension::e3D:
- mtlDesc.height = GetHeight();
- mtlDesc.depth = GetDepth();
- mtlDesc.arrayLength = 1;
+ case wgpu::TextureDimension::e2D:
+ mtlDesc.height = GetHeight();
+ mtlDesc.arrayLength = GetArrayLayers();
+ mtlDesc.depth = 1;
+ if (mtlDesc.arrayLength > 1) {
ASSERT(mtlDesc.sampleCount == 1);
- mtlDesc.textureType = MTLTextureType3D;
- break;
- }
-
- return mtlDescRef;
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
- return texture;
+ mtlDesc.textureType = MTLTextureType2DArray;
+ } else if (mtlDesc.sampleCount > 1) {
+ mtlDesc.textureType = MTLTextureType2DMultisample;
+ } else {
+ mtlDesc.textureType = MTLTextureType2D;
+ }
+ break;
+ case wgpu::TextureDimension::e3D:
+ mtlDesc.height = GetHeight();
+ mtlDesc.depth = GetDepth();
+ mtlDesc.arrayLength = 1;
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType3D;
+ break;
}
- // static
- ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(
- Device* device,
- const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- Ref<Texture> texture =
- AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
- DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface));
- return texture;
+ return mtlDescRef;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
+ Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
+ return texture;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(Device* device,
+ const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
+ DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface));
+ return texture;
+}
+
+// static
+Ref<Texture> Texture::CreateWrapping(Device* device,
+ const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped) {
+ Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ texture->InitializeAsWrapping(descriptor, std::move(wrapped));
+ return texture;
+}
+
+MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
+ Device* device = ToBackend(GetDevice());
+
+ NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+ mMtlUsage = [*mtlDesc usage];
+ mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
+
+ if (mMtlTexture == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
}
- // static
- Ref<Texture> Texture::CreateWrapping(Device* device,
- const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- texture->InitializeAsWrapping(descriptor, std::move(wrapped));
- return texture;
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
+ TextureBase::ClearValue::NonZero));
}
- MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
+ return {};
+}
+
+void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped) {
+ NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+ mMtlUsage = [*mtlDesc usage];
+ mMtlTexture = std::move(wrapped);
+}
+
+MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ IOSurfaceRef ioSurface) {
+ mIOSurface = ioSurface;
+
+ // Uses WGPUTexture which wraps multiplanar ioSurface needs to create
+ // texture view explicitly. Wrap the ioSurface and delay to extract
+ // MTLTexture from the plane of it when creating texture view.
+ // WGPUTexture which wraps non-multplanar ioSurface needs to support
+ // ops that doesn't require creating texture view(e.g. copy). Extract
+ // MTLTexture from such ioSurface to support this.
+ if (!GetFormat().IsMultiPlanar()) {
Device* device = ToBackend(GetDevice());
NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
- mMtlUsage = [*mtlDesc usage];
- mMtlTexture =
- AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
-
- if (mMtlTexture == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
- }
-
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
- TextureBase::ClearValue::NonZero));
- }
-
- return {};
- }
+ [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
- void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped) {
- NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
mMtlUsage = [*mtlDesc usage];
- mMtlTexture = std::move(wrapped);
+ mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
+ iosurface:ioSurface
+ plane:0]);
}
+ SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
+ return {};
+}
- MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- IOSurfaceRef ioSurface) {
- mIOSurface = ioSurface;
-
- // Uses WGPUTexture which wraps multiplanar ioSurface needs to create
- // texture view explicitly. Wrap the ioSurface and delay to extract
- // MTLTexture from the plane of it when creating texture view.
- // WGPUTexture which wraps non-multplanar ioSurface needs to support
- // ops that doesn't require creating texture view(e.g. copy). Extract
- // MTLTexture from such ioSurface to support this.
- if (!GetFormat().IsMultiPlanar()) {
- Device* device = ToBackend(GetDevice());
-
- NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
- [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
-
- mMtlUsage = [*mtlDesc usage];
- mMtlTexture =
- AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
- iosurface:ioSurface
- plane:0]);
- }
- SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
- return {};
- }
+Texture::Texture(DeviceBase* dev, const TextureDescriptor* desc, TextureState st)
+ : TextureBase(dev, desc, st) {}
- Texture::~Texture() {
- }
-
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
- mMtlTexture = nullptr;
- mIOSurface = nullptr;
- }
+Texture::~Texture() {}
- id<MTLTexture> Texture::GetMTLTexture() const {
- return mMtlTexture.Get();
- }
+void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+ mMtlTexture = nullptr;
+ mIOSurface = nullptr;
+}
- IOSurfaceRef Texture::GetIOSurface() {
- return mIOSurface.Get();
- }
+id<MTLTexture> Texture::GetMTLTexture() const {
+ return mMtlTexture.Get();
+}
- NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
- if (GetFormat().format == format) {
- return mMtlTexture;
- }
+IOSurfaceRef Texture::GetIOSurface() {
+ return mIOSurface.Get();
+}
- ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
- MetalPixelFormat(format)));
- return AcquireNSPRef(
- [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
+NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
+ if (GetFormat().format == format) {
+ return mMtlTexture;
}
- MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- Device* device = ToBackend(GetDevice());
-
- const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
-
- if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
- ASSERT(GetFormat().isRenderable);
-
- // End the blit encoder if it is open.
- commandContext->EndBlit();
-
- if (GetFormat().HasDepthOrStencil()) {
- // Create a render pass to clear each subresource.
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, range.aspects))) {
- // Skip lazy clears if already initialized.
- continue;
- }
+ ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
+ MetalPixelFormat(format)));
+ return AcquireNSPRef(
+ [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
+}
+
+MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ Device* device = ToBackend(GetDevice());
+
+ const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
+
+ if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
+ ASSERT(GetFormat().isRenderable);
+
+ // End the blit encoder if it is open.
+ commandContext->EndBlit();
+
+ if (GetFormat().HasDepthOrStencil()) {
+ // Create a render pass to clear each subresource.
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, range.aspects))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- // Note that this creates a descriptor that's autoreleased so we don't use
- // AcquireNSRef
- NSRef<MTLRenderPassDescriptor> descriptorRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
-
- // At least one aspect needs clearing. Iterate the aspects individually to
- // determine which to clear.
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- switch (aspect) {
- case Aspect::Depth:
- descriptor.depthAttachment.texture = GetMTLTexture();
- descriptor.depthAttachment.level = level;
- descriptor.depthAttachment.slice = arrayLayer;
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- descriptor.depthAttachment.clearDepth = dClearColor;
- break;
- case Aspect::Stencil:
- descriptor.stencilAttachment.texture = GetMTLTexture();
- descriptor.stencilAttachment.level = level;
- descriptor.stencilAttachment.slice = arrayLayer;
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- descriptor.stencilAttachment.clearStencil =
- static_cast<uint32_t>(clearColor);
- break;
- default:
- UNREACHABLE();
- }
- }
+ // Note that this creates a descriptor that's autoreleased so we don't use
+ // AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> descriptorRef =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
- commandContext->BeginRender(descriptor);
- commandContext->EndRender();
- }
- }
- } else {
- ASSERT(GetFormat().IsColor());
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- // Create multiple render passes with each subresource as a color attachment to
- // clear them all. Only do this for array layers to ensure all attachments have
- // the same size.
- NSRef<MTLRenderPassDescriptor> descriptor;
- uint32_t attachment = 0;
-
- uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
-
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+ // At least one aspect needs clearing. Iterate the aspects individually to
+ // determine which to clear.
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, Aspect::Color))) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
// Skip lazy clears if already initialized.
continue;
}
- for (uint32_t z = 0; z < numZSlices; ++z) {
- if (descriptor == nullptr) {
- // Note that this creates a descriptor that's autoreleased so we
- // don't use AcquireNSRef
- descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
- }
-
- [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
- [*descriptor colorAttachments][attachment].loadAction =
- MTLLoadActionClear;
- [*descriptor colorAttachments][attachment].storeAction =
- MTLStoreActionStore;
- [*descriptor colorAttachments][attachment].clearColor =
- MTLClearColorMake(dClearColor, dClearColor, dClearColor,
- dClearColor);
- [*descriptor colorAttachments][attachment].level = level;
- [*descriptor colorAttachments][attachment].slice = arrayLayer;
- [*descriptor colorAttachments][attachment].depthPlane = z;
-
- attachment++;
-
- if (attachment == kMaxColorAttachments) {
- attachment = 0;
- commandContext->BeginRender(descriptor.Get());
- commandContext->EndRender();
- descriptor = nullptr;
- }
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ switch (aspect) {
+ case Aspect::Depth:
+ descriptor.depthAttachment.texture = GetMTLTexture();
+ descriptor.depthAttachment.level = level;
+ descriptor.depthAttachment.slice = arrayLayer;
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ descriptor.depthAttachment.clearDepth = dClearColor;
+ break;
+ case Aspect::Stencil:
+ descriptor.stencilAttachment.texture = GetMTLTexture();
+ descriptor.stencilAttachment.level = level;
+ descriptor.stencilAttachment.slice = arrayLayer;
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ descriptor.stencilAttachment.clearStencil =
+ static_cast<uint32_t>(clearColor);
+ break;
+ default:
+ UNREACHABLE();
}
}
- if (descriptor != nullptr) {
- commandContext->BeginRender(descriptor.Get());
- commandContext->EndRender();
- }
+ DAWN_TRY(
+ EncodeEmptyMetalRenderPass(device, commandContext, descriptor,
+ GetMipLevelSingleSubresourceVirtualSize(level)));
}
}
} else {
- Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
-
- // Encode a buffer to texture copy to clear each subresource.
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- // Compute the buffer size big enough to fill the largest mip.
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
-
- // Metal validation layers: sourceBytesPerRow must be at least 64.
- uint32_t largestMipBytesPerRow =
- std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
-
- // Metal validation layers: sourceBytesPerImage must be at least 512.
- uint64_t largestMipBytesPerImage =
- std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
- (largestMipSize.height / blockInfo.height),
- 512llu);
-
- uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
-
- if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
-
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
- id<MTLBuffer> uploadBuffer =
- ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
-
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- Extent3D virtualSize = GetMipLevelVirtualSize(level);
+ ASSERT(GetFormat().IsColor());
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ // Create multiple render passes with each subresource as a color attachment to
+ // clear them all. Only do this for array layers to ensure all attachments have
+ // the same size.
+ NSRef<MTLRenderPassDescriptor> descriptor;
+ uint32_t attachment = 0;
+
+ uint32_t depth = GetMipLevelSingleSubresourceVirtualSize(level).depthOrArrayLayers;
+
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
+ for (uint32_t z = 0; z < depth; ++z) {
+ if (descriptor == nullptr) {
+ // Note that this creates a descriptor that's autoreleased so we
+ // don't use AcquireNSRef
+ descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
}
- MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
- [commandContext->EnsureBlit()
- copyFromBuffer:uploadBuffer
- sourceOffset:uploadHandle.startOffset
- sourceBytesPerRow:largestMipBytesPerRow
- sourceBytesPerImage:largestMipBytesPerImage
- sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
- virtualSize.depthOrArrayLayers)
- toTexture:GetMTLTexture()
- destinationSlice:arrayLayer
- destinationLevel:level
- destinationOrigin:MTLOriginMake(0, 0, 0)
- options:blitOption];
+ [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
+ [*descriptor colorAttachments][attachment].loadAction = MTLLoadActionClear;
+ [*descriptor colorAttachments][attachment].storeAction =
+ MTLStoreActionStore;
+ [*descriptor colorAttachments][attachment].clearColor =
+ MTLClearColorMake(dClearColor, dClearColor, dClearColor, dClearColor);
+ [*descriptor colorAttachments][attachment].level = level;
+ [*descriptor colorAttachments][attachment].slice = arrayLayer;
+ [*descriptor colorAttachments][attachment].depthPlane = z;
+
+ attachment++;
+
+ if (attachment == kMaxColorAttachments) {
+ attachment = 0;
+ DAWN_TRY(EncodeEmptyMetalRenderPass(
+ device, commandContext, descriptor.Get(),
+ GetMipLevelSingleSubresourceVirtualSize(level)));
+ descriptor = nullptr;
+ }
}
}
+
+ if (descriptor != nullptr) {
+ DAWN_TRY(
+ EncodeEmptyMetalRenderPass(device, commandContext, descriptor.Get(),
+ GetMipLevelSingleSubresourceVirtualSize(level)));
+ }
}
}
+ } else {
+ ASSERT(!IsMultisampledTexture());
+
+ // Encode a buffer to texture copy to clear each subresource.
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ // Compute the buffer size big enough to fill the largest mip.
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+ // Computations for the bytes per row / image height are done using the physical size
+ // so that enough data is reserved for compressed textures.
+ Extent3D largestMipSize = GetMipLevelSingleSubresourcePhysicalSize(range.baseMipLevel);
+ uint32_t largestMipBytesPerRow =
+ (largestMipSize.width / blockInfo.width) * blockInfo.byteSize;
+ uint64_t largestMipBytesPerImage = static_cast<uint64_t>(largestMipBytesPerRow) *
+ (largestMipSize.height / blockInfo.height);
+ uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
+
+ if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+ id<MTLBuffer> uploadBuffer = ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ Extent3D virtualSize = GetMipLevelSingleSubresourceVirtualSize(level);
+
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
+ MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
+ [commandContext->EnsureBlit()
+ copyFromBuffer:uploadBuffer
+ sourceOffset:uploadHandle.startOffset
+ sourceBytesPerRow:largestMipBytesPerRow
+ sourceBytesPerImage:largestMipBytesPerImage
+ sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
+ virtualSize.depthOrArrayLayers)
+ toTexture:GetMTLTexture()
+ destinationSlice:arrayLayer
+ destinationLevel:level
+ destinationOrigin:MTLOriginMake(0, 0, 0)
+ options:blitOption];
+ }
+ }
}
- return {};
}
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could
- // contain dirty bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
- }
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
}
+ return {};
+}
- // static
- ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
- DAWN_TRY(view->Initialize(descriptor));
- return view;
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could
+ // contain dirty bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
}
+}
- MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
- Texture* texture = ToBackend(GetTexture());
+// static
+ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+ DAWN_TRY(view->Initialize(descriptor));
+ return view;
+}
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return {};
- }
+MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ Texture* texture = ToBackend(GetTexture());
- id<MTLTexture> mtlTexture = texture->GetMTLTexture();
-
- if (!RequiresCreatingNewTextureView(texture, descriptor)) {
- mMtlTextureView = mtlTexture;
- } else if (texture->GetFormat().IsMultiPlanar()) {
- NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
- MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.sampleCount = texture->GetSampleCount();
- mtlDesc.usage = MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
- texture->GetSampleCount());
- mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
- mtlDesc.mipmapLevelCount = texture->GetNumMipLevels();
- mtlDesc.storageMode = kIOSurfaceStorageMode;
-
- uint32_t plane = GetIOSurfacePlane(descriptor->aspect);
- mtlDesc.width = IOSurfaceGetWidthOfPlane(texture->GetIOSurface(), plane);
- mtlDesc.height = IOSurfaceGetHeightOfPlane(texture->GetIOSurface(), plane);
-
- // Multiplanar texture is validated to only have single layer, single mipLevel
- // and 2d textures (depth == 1)
- ASSERT(texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D &&
- texture->GetNumMipLevels() == 1);
- mtlDesc.arrayLength = 1;
- mtlDesc.depth = 1;
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return {};
+ }
- mMtlTextureView = AcquireNSPRef([ToBackend(GetDevice())->GetMTLDevice()
- newTextureWithDescriptor:mtlDesc
- iosurface:texture->GetIOSurface()
- plane:plane]);
- if (mMtlTextureView == nil) {
- return DAWN_INTERNAL_ERROR(
- "Failed to create MTLTexture view for external texture.");
- }
- } else {
- MTLPixelFormat viewFormat = MetalPixelFormat(descriptor->format);
- MTLPixelFormat textureFormat = MetalPixelFormat(GetTexture()->GetFormat().format);
- if (descriptor->aspect == wgpu::TextureAspect::StencilOnly &&
- textureFormat != MTLPixelFormatStencil8) {
- if (@available(macOS 10.12, iOS 10.0, *)) {
- if (textureFormat == MTLPixelFormatDepth32Float_Stencil8) {
- viewFormat = MTLPixelFormatX32_Stencil8;
- }
-#if defined(DAWN_PLATFORM_MACOS)
- else if (textureFormat == MTLPixelFormatDepth24Unorm_Stencil8) {
- viewFormat = MTLPixelFormatX24_Stencil8;
- }
+ id<MTLTexture> mtlTexture = texture->GetMTLTexture();
+
+ if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+ mMtlTextureView = mtlTexture;
+ } else if (texture->GetFormat().IsMultiPlanar()) {
+ NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+ MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.sampleCount = texture->GetSampleCount();
+ mtlDesc.usage = MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage());
+ mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
+ mtlDesc.mipmapLevelCount = texture->GetNumMipLevels();
+ mtlDesc.storageMode = kIOSurfaceStorageMode;
+
+ uint32_t plane = GetIOSurfacePlane(descriptor->aspect);
+ mtlDesc.width = IOSurfaceGetWidthOfPlane(texture->GetIOSurface(), plane);
+ mtlDesc.height = IOSurfaceGetHeightOfPlane(texture->GetIOSurface(), plane);
+
+ // Multiplanar texture is validated to only have single layer, single mipLevel
+ // and 2d textures (depth == 1)
+ ASSERT(texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D &&
+ texture->GetNumMipLevels() == 1);
+ mtlDesc.arrayLength = 1;
+ mtlDesc.depth = 1;
+
+ mMtlTextureView = AcquireNSPRef([ToBackend(GetDevice())->GetMTLDevice()
+ newTextureWithDescriptor:mtlDesc
+ iosurface:texture->GetIOSurface()
+ plane:plane]);
+ if (mMtlTextureView == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view for external texture.");
+ }
+ } else {
+ MTLPixelFormat viewFormat = MetalPixelFormat(descriptor->format);
+ MTLPixelFormat textureFormat = MetalPixelFormat(GetTexture()->GetFormat().format);
+ if (descriptor->aspect == wgpu::TextureAspect::StencilOnly &&
+ textureFormat != MTLPixelFormatStencil8) {
+ if (@available(macOS 10.12, iOS 10.0, *)) {
+ if (textureFormat == MTLPixelFormatDepth32Float_Stencil8) {
+ viewFormat = MTLPixelFormatX32_Stencil8;
+ }
+#if DAWN_PLATFORM_IS(MACOS)
+ else if (textureFormat == MTLPixelFormatDepth24Unorm_Stencil8) {
+ viewFormat = MTLPixelFormatX24_Stencil8;
+ }
#endif
- else {
- UNREACHABLE();
- }
- } else {
- // TODO(enga): Add a workaround to back combined depth/stencil textures
- // with Sampled usage using two separate textures.
- // Or, consider always using the workaround for D32S8.
- GetDevice()->ConsumedError(
- DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
- "combined depth/stencil format."));
+ else {
+ UNREACHABLE();
}
- } else if (GetTexture()->GetFormat().HasDepth() &&
- GetTexture()->GetFormat().HasStencil()) {
- // Depth-only views for depth/stencil textures in Metal simply use the original
- // texture's format.
- viewFormat = textureFormat;
- }
-
- MTLTextureType textureViewType =
- MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
- auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
- auto arrayLayerRange =
- NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
-
- mMtlTextureView =
- AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:viewFormat
- textureType:textureViewType
- levels:mipLevelRange
- slices:arrayLayerRange]);
- if (mMtlTextureView == nil) {
- return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+ } else {
+ // TODO(enga): Add a workaround to back combined depth/stencil textures
+ // with Sampled usage using two separate textures.
+ // Or, consider always using the workaround for D32S8.
+ GetDevice()->ConsumedError(
+ DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
+ "combined depth/stencil format."));
}
+ } else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) {
+ // Depth-only views for depth/stencil textures in Metal simply use the original
+ // texture's format.
+ viewFormat = textureFormat;
}
- return {};
- }
-
- id<MTLTexture> TextureView::GetMTLTexture() const {
- ASSERT(mMtlTextureView != nullptr);
- return mMtlTextureView.Get();
+ MTLTextureType textureViewType =
+ MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
+ auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
+ auto arrayLayerRange = NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+
+ mMtlTextureView = AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:viewFormat
+ textureType:textureViewType
+ levels:mipLevelRange
+ slices:arrayLayerRange]);
+ if (mMtlTextureView == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+ }
}
- TextureView::AttachmentInfo TextureView::GetAttachmentInfo() const {
- ASSERT(GetTexture()->GetInternalUsage() & wgpu::TextureUsage::RenderAttachment);
- // Use our own view if the formats do not match.
- // If the formats do not match, format reinterpretation will be required.
- // Note: Depth/stencil formats don't support reinterpretation.
- // Also, we compute |useOwnView| here instead of relying on whether or not
- // a view was created in Initialize, because rendering to a depth/stencil
- // texture on Metal only works when using the original texture, not a view.
- bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
- !GetTexture()->GetFormat().HasDepthOrStencil();
- if (useOwnView) {
- ASSERT(mMtlTextureView.Get());
- return {mMtlTextureView, 0, 0};
- }
- AttachmentInfo info;
- info.texture = ToBackend(GetTexture())->GetMTLTexture();
- info.baseMipLevel = GetBaseMipLevel();
- info.baseArrayLayer = GetBaseArrayLayer();
- return info;
+ return {};
+}
+
+id<MTLTexture> TextureView::GetMTLTexture() const {
+ ASSERT(mMtlTextureView != nullptr);
+ return mMtlTextureView.Get();
+}
+
+TextureView::AttachmentInfo TextureView::GetAttachmentInfo() const {
+ ASSERT(GetTexture()->GetInternalUsage() & wgpu::TextureUsage::RenderAttachment);
+ // Use our own view if the formats do not match.
+ // If the formats do not match, format reinterpretation will be required.
+ // Note: Depth/stencil formats don't support reinterpretation.
+ // Also, we compute |useOwnView| here instead of relying on whether or not
+ // a view was created in Initialize, because rendering to a depth/stencil
+ // texture on Metal only works when using the original texture, not a view.
+ bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
+ !GetTexture()->GetFormat().HasDepthOrStencil();
+ if (useOwnView) {
+ ASSERT(mMtlTextureView.Get());
+ return {mMtlTextureView, 0, 0};
}
+ AttachmentInfo info;
+ info.texture = ToBackend(GetTexture())->GetMTLTexture();
+ info.baseMipLevel = GetBaseMipLevel();
+ info.baseArrayLayer = GetBaseArrayLayer();
+ return info;
+}
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h
index 3a0d76dc6f4..9ee31bd618d 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h
+++ b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h
@@ -23,63 +23,85 @@
#import <Metal/Metal.h>
namespace dawn::native {
- struct ProgrammableStage;
- struct EntryPointMetadata;
- enum class SingleShaderStage;
-}
+struct ProgrammableStage;
+struct EntryPointMetadata;
+enum class SingleShaderStage;
+} // namespace dawn::native
namespace dawn::native::metal {
- MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
+MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
- struct TextureBufferCopySplit {
- static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
+struct TextureBufferCopySplit {
+ static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
- struct CopyInfo {
- NSUInteger bufferOffset;
- NSUInteger bytesPerRow;
- NSUInteger bytesPerImage;
- Origin3D textureOrigin;
- Extent3D copyExtent;
- };
-
- uint32_t count = 0;
- std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
-
- auto begin() const {
- return copies.begin();
- }
-
- auto end() const {
- return copies.begin() + count;
- }
+ struct CopyInfo {
+ NSUInteger bufferOffset;
+ NSUInteger bytesPerRow;
+ NSUInteger bytesPerImage;
+ Origin3D textureOrigin;
+ Extent3D copyExtent;
};
- TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
- uint32_t mipLevel,
- Origin3D origin,
- Extent3D copyExtent,
- uint64_t bufferSize,
- uint64_t bufferOffset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Aspect aspect);
-
- void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
- Texture* texture,
- const TextureCopy& dst,
- const Extent3D& size);
-
- MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
-
- // Helper function to create function with constant values wrapped in
- // if available branch
- MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
- SingleShaderStage singleShaderStage,
- PipelineLayout* pipelineLayout,
- ShaderModule::MetalFunctionData* functionData,
- uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr);
+ uint32_t count = 0;
+ std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
+
+ auto begin() const { return copies.begin(); }
+
+ auto end() const { return copies.begin() + count; }
+};
+
+TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Aspect aspect);
+
+void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size);
+
+MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+
+// Helper function to create function with constant values wrapped in
+// if available branch
+MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+ SingleShaderStage singleShaderStage,
+ PipelineLayout* pipelineLayout,
+ ShaderModule::MetalFunctionData* functionData,
+ uint32_t sampleMask = 0xFFFFFFFF,
+ const RenderPipeline* renderPipeline = nullptr);
+
+// Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is
+// first to compute what the "best" Metal render pass descriptor is, then fix it up if we
+// are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
+ MTLStoreActionStoreAndMultisampleResolve;
+#pragma clang diagnostic pop
+
+// Helper functions to encode Metal render passes that take care of multiple workarounds that
+// happen at the render pass start and end. Because workarounds wrap the encoding of the render
+// pass, the encoding must be entirely done by the `encodeInside` callback.
+// At the end of this function, `commandContext` will have no encoder open.
+using EncodeInsideRenderPass = std::function<MaybeError(id<MTLRenderCommandEncoder>)>;
+MaybeError EncodeMetalRenderPass(Device* device,
+ CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height,
+ EncodeInsideRenderPass encodeInside);
+
+MaybeError EncodeEmptyMetalRenderPass(Device* device,
+ CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ Extent3D size);
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm
index e2e0ba3357e..22d3681ab50 100644
--- a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm
+++ b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm
@@ -13,276 +13,550 @@
// limitations under the License.
#include "dawn/native/metal/UtilsMetal.h"
+
+#include "dawn/common/Assert.h"
#include "dawn/native/CommandBuffer.h"
#include "dawn/native/Pipeline.h"
#include "dawn/native/ShaderModule.h"
-#include "dawn/common/Assert.h"
-
namespace dawn::native::metal {
- MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
- switch (compareFunction) {
- case wgpu::CompareFunction::Never:
- return MTLCompareFunctionNever;
- case wgpu::CompareFunction::Less:
- return MTLCompareFunctionLess;
- case wgpu::CompareFunction::LessEqual:
- return MTLCompareFunctionLessEqual;
- case wgpu::CompareFunction::Greater:
- return MTLCompareFunctionGreater;
- case wgpu::CompareFunction::GreaterEqual:
- return MTLCompareFunctionGreaterEqual;
- case wgpu::CompareFunction::NotEqual:
- return MTLCompareFunctionNotEqual;
- case wgpu::CompareFunction::Equal:
- return MTLCompareFunctionEqual;
- case wgpu::CompareFunction::Always:
- return MTLCompareFunctionAlways;
-
- case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
- }
+namespace {
+// A helper struct to track state while doing workarounds for Metal render passes. It
+// contains a temporary texture and information about the attachment it replaces.
+// Helper methods encode copies between the two textures.
+struct SavedMetalAttachment {
+ id<MTLTexture> texture = nil;
+ NSUInteger level;
+ NSUInteger slice;
+
+ NSPRef<id<MTLTexture>> temporary;
+
+ void CopyFromTemporaryToAttachment(CommandRecordingContext* commandContext) {
+ [commandContext->EnsureBlit()
+ copyFromTexture:temporary.Get()
+ sourceSlice:0
+ sourceLevel:0
+ sourceOrigin:MTLOriginMake(0, 0, 0)
+ sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height], 1)
+ toTexture:texture
+ destinationSlice:slice
+ destinationLevel:level
+ destinationOrigin:MTLOriginMake(0, 0, 0)];
}
- TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
- uint32_t mipLevel,
- Origin3D origin,
- Extent3D copyExtent,
- uint64_t bufferSize,
- uint64_t bufferOffset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Aspect aspect) {
- TextureBufferCopySplit copy;
- const Format textureFormat = texture->GetFormat();
- const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
-
- // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
- // compute the correct range when checking if the buffer is big enough to contain the
- // data for the whole copy. Instead of looking at the position of the last texel in the
- // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
- // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
- // buffer below where in memory, each row data (D) of the texture is followed by some
- // padding data (P):
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDA|PP|
- // The last pixel read will be A, but the driver will think it is the whole last padding
- // row, causing it to generate an error when the pixel buffer is just big enough.
-
- // We work around this limitation by detecting when Metal would complain and copy the
- // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
- uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
-
- // Metal validation layer requires that if the texture's pixel format is a compressed
- // format, the sourceSize must be a multiple of the pixel format's block size or be
- // clamped to the edge of the texture if the block extends outside the bounds of a
- // texture.
- const Extent3D clampedCopyExtent =
- texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
-
- // Check whether buffer size is big enough.
- bool needWorkaround =
- bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
- if (!needWorkaround) {
- copy.count = 1;
- copy.copies[0].bufferOffset = bufferOffset;
- copy.copies[0].bytesPerRow = bytesPerRow;
- copy.copies[0].bytesPerImage = bytesPerImage;
- copy.copies[0].textureOrigin = origin;
- copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depthOrArrayLayers};
- return copy;
- }
-
- uint64_t currentOffset = bufferOffset;
-
- // Doing all the copy except the last image.
- if (copyExtent.depthOrArrayLayers > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerImage;
- copy.copies[copy.count].textureOrigin = origin;
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depthOrArrayLayers - 1};
-
- ++copy.count;
+ void CopyFromAttachmentToTemporary(CommandRecordingContext* commandContext) {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture
+ sourceSlice:slice
+ sourceLevel:level
+ sourceOrigin:MTLOriginMake(0, 0, 0)
+ sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height], 1)
+ toTexture:temporary.Get()
+ destinationSlice:0
+ destinationLevel:0
+ destinationOrigin:MTLOriginMake(0, 0, 0)];
+ }
+};
+
+// Common code between both kinds of attachments swaps.
+ResultOrError<SavedMetalAttachment> SaveAttachmentCreateTemporary(Device* device,
+ id<MTLTexture> attachmentTexture,
+ NSUInteger attachmentLevel,
+ NSUInteger attachmentSlice) {
+ // Save the attachment.
+ SavedMetalAttachment result;
+ result.texture = attachmentTexture;
+ result.level = attachmentLevel;
+ result.slice = attachmentSlice;
+
+ // Create the temporary texture.
+ NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+ MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.textureType = MTLTextureType2D;
+ mtlDesc.usage = MTLTextureUsageRenderTarget;
+ mtlDesc.pixelFormat = [result.texture pixelFormat];
+ mtlDesc.width = std::max([result.texture width] >> attachmentLevel, NSUInteger(1));
+ mtlDesc.height = std::max([result.texture height] >> attachmentLevel, NSUInteger(1));
+ mtlDesc.depth = 1;
+ mtlDesc.mipmapLevelCount = 1;
+ mtlDesc.arrayLength = 1;
+ mtlDesc.storageMode = MTLStorageModePrivate;
+ mtlDesc.sampleCount = [result.texture sampleCount];
+
+ result.temporary = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc]);
+ if (result.temporary == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
+ }
- // Update offset to copy to the last image.
- currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
+ return result;
+}
+
+// Patches the render pass attachment to replace it with a temporary texture. Returns a
+// SavedMetalAttachment that can be used to easily copy between the original attachment and
+// the temporary.
+ResultOrError<SavedMetalAttachment> PatchAttachmentWithTemporary(
+ Device* device,
+ MTLRenderPassAttachmentDescriptor* attachment) {
+ SavedMetalAttachment result;
+ DAWN_TRY_ASSIGN(result, SaveAttachmentCreateTemporary(device, attachment.texture,
+ attachment.level, attachment.slice));
+
+ // Replace the attachment with the temporary
+ attachment.texture = result.temporary.Get();
+ attachment.level = 0;
+ attachment.slice = 0;
+
+ return result;
+}
+
+// Same as PatchAttachmentWithTemporary but for the resolve attachment.
+ResultOrError<SavedMetalAttachment> PatchResolveAttachmentWithTemporary(
+ Device* device,
+ MTLRenderPassAttachmentDescriptor* attachment) {
+ SavedMetalAttachment result;
+ DAWN_TRY_ASSIGN(
+ result, SaveAttachmentCreateTemporary(device, attachment.resolveTexture,
+ attachment.resolveLevel, attachment.resolveSlice));
+
+ // Replace the resolve attachment with the tempoary.
+ attachment.resolveTexture = result.temporary.Get();
+ attachment.resolveLevel = 0;
+ attachment.resolveSlice = 0;
+
+ return result;
+}
+
+// Helper function for Toggle EmulateStoreAndMSAAResolve
+void ResolveInAnotherRenderPass(
+ CommandRecordingContext* commandContext,
+ const MTLRenderPassDescriptor* mtlRenderPass,
+ const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
+ // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (resolveTextures[i] == nullptr) {
+ continue;
}
- // Doing all the copy in last image except the last row.
- uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
- if (copyBlockRowCount > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
- copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
- origin.z + copyExtent.depthOrArrayLayers - 1};
+ mtlRenderPassForResolve.colorAttachments[i].texture =
+ mtlRenderPass.colorAttachments[i].texture;
+ mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ mtlRenderPassForResolve.colorAttachments[i].storeAction = MTLStoreActionMultisampleResolve;
+ mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
+ mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
+ mtlRenderPass.colorAttachments[i].resolveLevel;
+ mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
+ mtlRenderPass.colorAttachments[i].resolveSlice;
+ }
- ASSERT(copyExtent.height - blockInfo.height <
- texture->GetMipLevelVirtualSize(mipLevel).height);
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
- copyExtent.height - blockInfo.height, 1};
+ commandContext->BeginRender(mtlRenderPassForResolve);
+ commandContext->EndRender();
+}
+} // anonymous namespace
+
+MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
+ switch (compareFunction) {
+ case wgpu::CompareFunction::Never:
+ return MTLCompareFunctionNever;
+ case wgpu::CompareFunction::Less:
+ return MTLCompareFunctionLess;
+ case wgpu::CompareFunction::LessEqual:
+ return MTLCompareFunctionLessEqual;
+ case wgpu::CompareFunction::Greater:
+ return MTLCompareFunctionGreater;
+ case wgpu::CompareFunction::GreaterEqual:
+ return MTLCompareFunctionGreaterEqual;
+ case wgpu::CompareFunction::NotEqual:
+ return MTLCompareFunctionNotEqual;
+ case wgpu::CompareFunction::Equal:
+ return MTLCompareFunctionEqual;
+ case wgpu::CompareFunction::Always:
+ return MTLCompareFunctionAlways;
+
+ case wgpu::CompareFunction::Undefined:
+ UNREACHABLE();
+ }
+}
+
+TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Aspect aspect) {
+ TextureBufferCopySplit copy;
+ const Format textureFormat = texture->GetFormat();
+ const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
+
+ // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
+ // compute the correct range when checking if the buffer is big enough to contain the
+ // data for the whole copy. Instead of looking at the position of the last texel in the
+ // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
+ // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
+ // buffer below where in memory, each row data (D) of the texture is followed by some
+ // padding data (P):
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDA|PP|
+ // The last pixel read will be A, but the driver will think it is the whole last padding
+ // row, causing it to generate an error when the pixel buffer is just big enough.
+
+ // We work around this limitation by detecting when Metal would complain and copy the
+ // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
+ uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
+
+ // Metal validation layer requires that if the texture's pixel format is a compressed
+ // format, the sourceSize must be a multiple of the pixel format's block size or be
+ // clamped to the edge of the texture if the block extends outside the bounds of a
+ // texture.
+ const Extent3D clampedCopyExtent =
+ texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
+
+ // Check whether buffer size is big enough.
+ bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
+ if (!needWorkaround) {
+ copy.count = 1;
+ copy.copies[0].bufferOffset = bufferOffset;
+ copy.copies[0].bytesPerRow = bytesPerRow;
+ copy.copies[0].bytesPerImage = bytesPerImage;
+ copy.copies[0].textureOrigin = origin;
+ copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depthOrArrayLayers};
+ return copy;
+ }
- ++copy.count;
+ uint64_t currentOffset = bufferOffset;
- // Update offset to copy to the last row.
- currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
- }
+ // Doing all the copy except the last image.
+ if (copyExtent.depthOrArrayLayers > 1) {
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerImage;
+ copy.copies[copy.count].textureOrigin = origin;
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depthOrArrayLayers - 1};
- // Doing the last row copy with the exact number of bytes in last row.
- // Workaround this issue in a way just like the copy to a 1D texture.
- uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
- uint32_t lastRowCopyExtentHeight =
- blockInfo.height + clampedCopyExtent.height - copyExtent.height;
- ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
+ ++copy.count;
+ // Update offset to copy to the last image.
+ currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
+ }
+
+ // Doing all the copy in last image except the last row.
+ uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
+ if (copyBlockRowCount > 1) {
copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = lastRowDataSize;
- copy.copies[copy.count].bytesPerImage = lastRowDataSize;
- copy.copies[copy.count].textureOrigin = {origin.x,
- origin.y + copyExtent.height - blockInfo.height,
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
+ copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
origin.z + copyExtent.depthOrArrayLayers - 1};
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+
+ ASSERT(copyExtent.height - blockInfo.height <
+ texture->GetMipLevelSingleSubresourceVirtualSize(mipLevel).height);
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
+ copyExtent.height - blockInfo.height, 1};
+
++copy.count;
- return copy;
+ // Update offset to copy to the last row.
+ currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
}
- void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
- Texture* texture,
- const TextureCopy& dst,
- const Extent3D& size) {
- ASSERT(texture == dst.texture.Get());
- SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
+ // Doing the last row copy with the exact number of bytes in last row.
+ // Workaround this issue in a way just like the copy to a 1D texture.
+ uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
+ uint32_t lastRowCopyExtentHeight =
+ blockInfo.height + clampedCopyExtent.height - copyExtent.height;
+ ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
+
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = lastRowDataSize;
+ copy.copies[copy.count].bytesPerImage = lastRowDataSize;
+ copy.copies[copy.count].textureOrigin = {origin.x,
+ origin.y + copyExtent.height - blockInfo.height,
+ origin.z + copyExtent.depthOrArrayLayers - 1};
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+ ++copy.count;
+
+ return copy;
+}
+
+void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size) {
+ ASSERT(texture == dst.texture.Get());
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+}
+
+MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(format.aspects & aspect);
+
+ if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+ // We only provide a blit option if the format has both depth and stencil.
+ // It is invalid to provide a blit option otherwise.
+ switch (aspect) {
+ case Aspect::Depth:
+ return MTLBlitOptionDepthFromDepthStencil;
+ case Aspect::Stencil:
+ return MTLBlitOptionStencilFromDepthStencil;
+ default:
+ UNREACHABLE();
}
}
+ return MTLBlitOptionNone;
+}
+
+MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+ SingleShaderStage singleShaderStage,
+ PipelineLayout* pipelineLayout,
+ ShaderModule::MetalFunctionData* functionData,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline) {
+ ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
+ const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
+ const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
+ if (entryPointMetadata.overridableConstants.size() == 0) {
+ DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage, pipelineLayout,
+ functionData, nil, sampleMask, renderPipeline));
+ return {};
+ }
- MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- ASSERT(format.aspects & aspect);
-
- if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
- // We only provide a blit option if the format has both depth and stencil.
- // It is invalid to provide a blit option otherwise.
- switch (aspect) {
- case Aspect::Depth:
- return MTLBlitOptionDepthFromDepthStencil;
- case Aspect::Stencil:
- return MTLBlitOptionStencilFromDepthStencil;
+ if (@available(macOS 10.12, *)) {
+ // MTLFunctionConstantValues can only be created within the if available branch
+ NSRef<MTLFunctionConstantValues> constantValues =
+ AcquireNSRef([MTLFunctionConstantValues new]);
+
+ std::unordered_set<std::string> overriddenConstants;
+
+ auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
+ MTLDataType* type, OverridableConstantScalar* entry,
+ double value = 0) {
+ switch (dawnType) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ *type = MTLDataTypeBool;
+ if (entry) {
+ entry->b = static_cast<int32_t>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ *type = MTLDataTypeFloat;
+ if (entry) {
+ entry->f32 = static_cast<float>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ *type = MTLDataTypeInt;
+ if (entry) {
+ entry->i32 = static_cast<int32_t>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ *type = MTLDataTypeUInt;
+ if (entry) {
+ entry->u32 = static_cast<uint32_t>(value);
+ }
+ break;
default:
UNREACHABLE();
}
+ };
+
+ for (const auto& [name, value] : programmableStage.constants) {
+ overriddenConstants.insert(name);
+
+ // This is already validated so `name` must exist
+ const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+
+ MTLDataType type;
+ OverridableConstantScalar entry{};
+
+ switchType(moduleConstant.type, &type, &entry, value);
+
+ [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
}
- return MTLBlitOptionNone;
+
+ // Set shader initialized default values because MSL function_constant
+ // has no default value
+ for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
+ if (overriddenConstants.count(name) != 0) {
+ // This constant already has overridden value
+ continue;
+ }
+
+ // Must exist because it is validated
+ const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+ ASSERT(moduleConstant.isInitialized);
+ MTLDataType type;
+
+ switchType(moduleConstant.type, &type, nullptr);
+
+ [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
+ type:type
+ atIndex:moduleConstant.id];
+ }
+
+ DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage, pipelineLayout,
+ functionData, constantValues.Get(), sampleMask,
+ renderPipeline));
+ } else {
+ UNREACHABLE();
}
+ return {};
+}
+
+MaybeError EncodeMetalRenderPass(Device* device,
+ CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height,
+ EncodeInsideRenderPass encodeInside) {
+ // This function handles multiple workarounds. Because some cases requires multiple
+ // workarounds to happen at the same time, it handles workarounds one by one and calls
+ // itself recursively to handle the next workaround if needed.
+
+ // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
+ // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
+ // the resolve texture is removed when applying the store + MSAA resolve workaround.
+ if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
+ std::array<SavedMetalAttachment, kMaxColorAttachments> trueResolveAttachments = {};
+ bool workaroundUsed = false;
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
+ continue;
+ }
- MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
- SingleShaderStage singleShaderStage,
- PipelineLayout* pipelineLayout,
- ShaderModule::MetalFunctionData* functionData,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline) {
- ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
- const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
- const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
- if (entryPointMetadata.overridableConstants.size() == 0) {
- DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage,
- pipelineLayout, functionData, nil, sampleMask,
- renderPipeline));
- return {};
+ if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
+ mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
+ continue;
+ }
+
+ DAWN_TRY_ASSIGN(
+ trueResolveAttachments[i],
+ PatchResolveAttachmentWithTemporary(device, mtlRenderPass.colorAttachments[i]));
+ workaroundUsed = true;
}
- if (@available(macOS 10.12, *)) {
- // MTLFunctionConstantValues can only be created within the if available branch
- NSRef<MTLFunctionConstantValues> constantValues =
- AcquireNSRef([MTLFunctionConstantValues new]);
-
- std::unordered_set<std::string> overriddenConstants;
-
- auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
- MTLDataType* type, OverridableConstantScalar* entry,
- double value = 0) {
- switch (dawnType) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- *type = MTLDataTypeBool;
- if (entry) {
- entry->b = static_cast<int32_t>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- *type = MTLDataTypeFloat;
- if (entry) {
- entry->f32 = static_cast<float>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- *type = MTLDataTypeInt;
- if (entry) {
- entry->i32 = static_cast<int32_t>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- *type = MTLDataTypeUInt;
- if (entry) {
- entry->u32 = static_cast<uint32_t>(value);
- }
- break;
- default:
- UNREACHABLE();
+ // If we need to use a temporary resolve texture we need to copy the result of MSAA
+ // resolve back to the true resolve targets.
+ if (workaroundUsed) {
+ DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+ std::move(encodeInside)));
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (trueResolveAttachments[i].texture == nullptr) {
+ continue;
}
- };
- for (const auto& [name, value] : programmableStage.constants) {
- overriddenConstants.insert(name);
+ trueResolveAttachments[i].CopyFromTemporaryToAttachment(commandContext);
+ }
+ return {};
+ }
+ }
+
+ // Handles the workaround for r8unorm rg8unorm mipmap rendering being broken on some
+ // devices. Render to a temporary texture instead and then copy back to the attachment.
+ if (device->IsToggleEnabled(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture)) {
+ std::array<SavedMetalAttachment, kMaxColorAttachments> originalAttachments;
+ bool workaroundUsed = false;
- // This is already validated so `name` must exist
- const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (mtlRenderPass.colorAttachments[i].texture == nullptr) {
+ continue;
+ }
- MTLDataType type;
- OverridableConstantScalar entry{};
+ if ([mtlRenderPass.colorAttachments[i].texture pixelFormat] != MTLPixelFormatR8Unorm &&
+ [mtlRenderPass.colorAttachments[i].texture pixelFormat] != MTLPixelFormatRG8Unorm) {
+ continue;
+ }
- switchType(moduleConstant.type, &type, &entry, value);
+ if (mtlRenderPass.colorAttachments[i].level < 2) {
+ continue;
+ }
- [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
+ DAWN_TRY_ASSIGN(originalAttachments[i], PatchAttachmentWithTemporary(
+ device, mtlRenderPass.colorAttachments[i]));
+ workaroundUsed = true;
+
+ if (mtlRenderPass.colorAttachments[i].loadAction == MTLLoadActionLoad) {
+ originalAttachments[i].CopyFromAttachmentToTemporary(commandContext);
}
+ }
- // Set shader initialized default values because MSL function_constant
- // has no default value
- for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
- if (overriddenConstants.count(name) != 0) {
- // This constant already has overridden value
+ if (workaroundUsed) {
+ DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+ std::move(encodeInside)));
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (originalAttachments[i].texture == nullptr) {
continue;
}
- // Must exist because it is validated
- const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
- ASSERT(moduleConstant.isInitialized);
- MTLDataType type;
-
- switchType(moduleConstant.type, &type, nullptr);
+ originalAttachments[i].CopyFromTemporaryToAttachment(commandContext);
+ }
+ return {};
+ }
+ }
- [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
- type:type
- atIndex:moduleConstant.id];
+ // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
+ // Done after the workarounds that modify the non-resolve attachments so that
+ // ResolveInAnotherRenderPass uses the temporary attachments if needed instead of the
+ // original ones.
+ if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
+ bool hasStoreAndMSAAResolve = false;
+
+ // Remove any store + MSAA resolve and remember them.
+ std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (mtlRenderPass.colorAttachments[i].storeAction ==
+ kMTLStoreActionStoreAndMultisampleResolve) {
+ hasStoreAndMSAAResolve = true;
+ resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+
+ mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
+ mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
}
+ }
- DAWN_TRY(shaderModule->CreateFunction(
- shaderEntryPoint, singleShaderStage, pipelineLayout, functionData,
- constantValues.Get(), sampleMask, renderPipeline));
- } else {
- UNREACHABLE();
+ // If we found a store + MSAA resolve we need to resolve in a different render pass.
+ if (hasStoreAndMSAAResolve) {
+ DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+ std::move(encodeInside)));
+
+ ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
+ return {};
}
- return {};
}
+ // No (more) workarounds needed! We can finally encode the actual render pass.
+ commandContext->EndBlit();
+ DAWN_TRY(encodeInside(commandContext->BeginRender(mtlRenderPass)));
+ commandContext->EndRender();
+ return {};
+}
+
+MaybeError EncodeEmptyMetalRenderPass(Device* device,
+ CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ Extent3D size) {
+ return EncodeMetalRenderPass(device, commandContext, mtlRenderPass, size.width, size.height,
+ [&](id<MTLRenderCommandEncoder>) -> MaybeError { return {}; });
+}
+
} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp
index 8c61c23d0de..04069211b0b 100644
--- a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/null/DeviceNull.h"
+#include <limits>
+#include <utility>
+
#include "dawn/native/BackendConnection.h"
#include "dawn/native/Commands.h"
#include "dawn/native/ErrorData.h"
@@ -22,499 +25,488 @@
namespace dawn::native::null {
- // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
+// Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
+
+Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
+ mVendorId = 0;
+ mDeviceId = 0;
+ mName = "Null backend";
+ mAdapterType = wgpu::AdapterType::CPU;
+ MaybeError err = Initialize();
+ ASSERT(err.IsSuccess());
+}
+
+Adapter::~Adapter() = default;
+
+bool Adapter::SupportsExternalImages() const {
+ return false;
+}
+
+// Used for the tests that intend to use an adapter without all features enabled.
+void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
+ mSupportedFeatures = {};
+ for (wgpu::FeatureName f : requiredFeatures) {
+ mSupportedFeatures.EnableFeature(f);
+ }
+}
+
+MaybeError Adapter::InitializeImpl() {
+ return {};
+}
+
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ // Enable all features by default for the convenience of tests.
+ mSupportedFeatures.featuresBitSet.set();
+ return {};
+}
+
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ GetDefaultLimits(&limits->v1);
+ return {};
+}
+
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+}
+
+class Backend : public BackendConnection {
+ public:
+ explicit Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::Null) {}
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
+ // There is always a single Null adapter because it is purely CPU based and doesn't
+ // depend on the system.
+ std::vector<Ref<AdapterBase>> adapters;
+ Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
+ adapters.push_back(std::move(adapter));
+ return adapters;
+ }
+};
+
+BackendConnection* Connect(InstanceBase* instance) {
+ return new Backend(instance);
+}
+
+struct CopyFromStagingToBufferOperation : PendingOperation {
+ void Execute() override {
+ destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
+ }
+
+ StagingBufferBase* staging;
+ Ref<Buffer> destination;
+ uint64_t sourceOffset;
+ uint64_t destinationOffset;
+ uint64_t size;
+};
+
+// Device
+
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize(descriptor));
+ return device;
+}
+
+Device::~Device() {
+ Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+ return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(new BindGroup(this, descriptor));
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ DAWN_TRY(IncrementMemoryUsage(descriptor->size));
+ return AcquireRef(new Buffer(this, descriptor));
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(this, descriptor));
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(this, descriptor));
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(this, descriptor));
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
+ DAWN_TRY(module->Initialize(parseResult, compilationMessages));
+ return module;
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(this, descriptor));
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+}
+
+void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+
+ // Clear pending operations before checking mMemoryUsage because some operations keep a
+ // reference to Buffers.
+ mPendingOperations.clear();
+ ASSERT(mMemoryUsage == 0);
+}
+
+MaybeError Device::WaitForIdleForDestruction() {
+ mPendingOperations.clear();
+ return {};
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ destination->SetIsDataInitialized();
+ }
+
+ auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
+ operation->staging = source;
+ operation->destination = ToBackend(destination);
+ operation->sourceOffset = sourceOffset;
+ operation->destinationOffset = destinationOffset;
+ operation->size = size;
+
+ AddPendingOperation(std::move(operation));
+
+ return {};
+}
+
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ return {};
+}
+
+MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
+ static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
+ if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
+ }
+ mMemoryUsage += bytes;
+ return {};
+}
+
+void Device::DecrementMemoryUsage(uint64_t bytes) {
+ ASSERT(mMemoryUsage >= bytes);
+ mMemoryUsage -= bytes;
+}
+
+MaybeError Device::TickImpl() {
+ return SubmitPendingOperations();
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ return GetLastSubmittedCommandSerial();
+}
+
+void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
+ mPendingOperations.emplace_back(std::move(operation));
+}
+
+MaybeError Device::SubmitPendingOperations() {
+ for (auto& operation : mPendingOperations) {
+ operation->Execute();
+ }
+ mPendingOperations.clear();
+
+ DAWN_TRY(CheckPassedSerials());
+ IncrementLastSubmittedCommandSerial();
+
+ return {};
+}
+
+// BindGroupDataHolder
+
+BindGroupDataHolder::BindGroupDataHolder(size_t size)
+ : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
+ // pointer aligned enough for the allocation
+{}
+
+BindGroupDataHolder::~BindGroupDataHolder() {
+ free(mBindingDataAllocation);
+}
- Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
- mVendorId = 0;
- mDeviceId = 0;
- mName = "Null backend";
- mAdapterType = wgpu::AdapterType::CPU;
- MaybeError err = Initialize();
- ASSERT(err.IsSuccess());
- }
+// BindGroup
- Adapter::~Adapter() = default;
+BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
+ BindGroupBase(device, descriptor, mBindingDataAllocation) {}
- bool Adapter::SupportsExternalImages() const {
- return false;
- }
+// BindGroupLayout
- // Used for the tests that intend to use an adapter without all features enabled.
- void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
- mSupportedFeatures = {};
- for (wgpu::FeatureName f : requiredFeatures) {
- mSupportedFeatures.EnableFeature(f);
- }
- }
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {}
+
+// Buffer
+
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {
+ mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
+ mAllocatedSize = GetSize();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+ // Only return true for mappable buffers so we can test cases that need / don't need a
+ // staging buffer.
+ return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+}
- MaybeError Adapter::InitializeImpl() {
- return {};
- }
+void Buffer::CopyFromStaging(StagingBufferBase* staging,
+ uint64_t sourceOffset,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
+ memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
+}
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- // Enable all features by default for the convenience of tests.
- mSupportedFeatures.featuresBitSet.set();
- return {};
- }
+void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
+ ASSERT(bufferOffset + size <= GetSize());
+ ASSERT(mBackingData);
+ memcpy(mBackingData.get() + bufferOffset, data, size);
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ return {};
+}
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- GetDefaultLimits(&limits->v1);
- return {};
- }
+void* Buffer::GetMappedPointerImpl() {
+ return mBackingData.get();
+}
- ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
- }
+void Buffer::UnmapImpl() {}
- class Backend : public BackendConnection {
- public:
- explicit Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::Null) {
- }
+void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
+}
- std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
- // There is always a single Null adapter because it is purely CPU based and doesn't
- // depend on the system.
- std::vector<Ref<AdapterBase>> adapters;
- Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
- adapters.push_back(std::move(adapter));
- return adapters;
- }
- };
+// CommandBuffer
- BackendConnection* Connect(InstanceBase* instance) {
- return new Backend(instance);
- }
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {}
- struct CopyFromStagingToBufferOperation : PendingOperation {
- virtual void Execute() {
- destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
- }
+// QuerySet
- StagingBufferBase* staging;
- Ref<Buffer> destination;
- uint64_t sourceOffset;
- uint64_t destinationOffset;
- uint64_t size;
- };
-
- // Device
-
- // static
- ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
- const DeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize(descriptor));
- return device;
- }
+QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {}
- Device::~Device() {
- Destroy();
- }
+// Queue
- MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
- return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
- }
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(new BindGroup(this, descriptor));
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- DAWN_TRY(IncrementMemoryUsage(descriptor->size));
- return AcquireRef(new Buffer(this, descriptor));
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(this, descriptor));
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(this, descriptor));
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return AcquireRef(new QuerySet(this, descriptor));
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(this, descriptor));
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(this, descriptor));
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(this, descriptor));
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
- }
+Queue::~Queue() {}
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
+MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
+ Device* device = ToBackend(GetDevice());
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
+ // The Vulkan, D3D12 and Metal implementation all tick the device here,
+ // for testing purposes we should also tick in the null implementation.
+ DAWN_TRY(device->Tick());
- // Clear pending operations before checking mMemoryUsage because some operations keep a
- // reference to Buffers.
- mPendingOperations.clear();
- ASSERT(mMemoryUsage == 0);
- }
+ return device->SubmitPendingOperations();
+}
- MaybeError Device::WaitForIdleForDestruction() {
- mPendingOperations.clear();
- return {};
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- destination->SetIsDataInitialized();
- }
-
- auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
- operation->staging = source;
- operation->destination = ToBackend(destination);
- operation->sourceOffset = sourceOffset;
- operation->destinationOffset = destinationOffset;
- operation->size = size;
-
- AddPendingOperation(std::move(operation));
-
- return {};
- }
-
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- return {};
- }
-
- MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
- static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
- if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
- }
- mMemoryUsage += bytes;
- return {};
- }
-
- void Device::DecrementMemoryUsage(uint64_t bytes) {
- ASSERT(mMemoryUsage >= bytes);
- mMemoryUsage -= bytes;
- }
-
- MaybeError Device::TickImpl() {
- return SubmitPendingOperations();
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- return GetLastSubmittedCommandSerial();
- }
-
- void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
- mPendingOperations.emplace_back(std::move(operation));
- }
-
- MaybeError Device::SubmitPendingOperations() {
- for (auto& operation : mPendingOperations) {
- operation->Execute();
+MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
+ return {};
+}
+
+// ComputePipeline
+MaybeError ComputePipeline::Initialize() {
+ return {};
+}
+
+// RenderPipeline
+MaybeError RenderPipeline::Initialize() {
+ return {};
+}
+
+// SwapChain
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+}
+
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
+ return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
}
- mPendingOperations.clear();
-
- DAWN_TRY(CheckPassedSerials());
- IncrementLastSubmittedCommandSerial();
-
- return {};
- }
-
- // BindGroupDataHolder
-
- BindGroupDataHolder::BindGroupDataHolder(size_t size)
- : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
- // pointer aligned enough for the allocation
- {
- }
-
- BindGroupDataHolder::~BindGroupDataHolder() {
- free(mBindingDataAllocation);
- }
-
- // BindGroup
-
- BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
- : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
- BindGroupBase(device, descriptor, mBindingDataAllocation) {
- }
-
- // BindGroupLayout
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
}
- // Buffer
+ return {};
+}
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
- mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
- mAllocatedSize = GetSize();
- }
+SwapChain::~SwapChain() = default;
- bool Buffer::IsCPUWritableAtCreation() const {
- // Only return true for mappable buffers so we can test cases that need / don't need a
- // staging buffer.
- return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
- }
-
- void Buffer::CopyFromStaging(StagingBufferBase* staging,
- uint64_t sourceOffset,
- uint64_t destinationOffset,
- uint64_t size) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
- memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
- }
-
- void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
- ASSERT(bufferOffset + size <= GetSize());
- ASSERT(mBackingData);
- memcpy(mBackingData.get() + bufferOffset, data, size);
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- return {};
- }
-
- void* Buffer::GetMappedPointerImpl() {
- return mBackingData.get();
- }
-
- void Buffer::UnmapImpl() {
- }
-
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
- }
+MaybeError SwapChain::PresentImpl() {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+ return {};
+}
- // CommandBuffer
-
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
-
- // QuerySet
-
- QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
- : QuerySetBase(device, descriptor) {
- }
-
- // Queue
-
- Queue::Queue(Device* device, const QueueDescriptor* descriptor)
- : QueueBase(device, descriptor) {
- }
-
- Queue::~Queue() {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
- Device* device = ToBackend(GetDevice());
-
- // The Vulkan, D3D12 and Metal implementation all tick the device here,
- // for testing purposes we should also tick in the null implementation.
- DAWN_TRY(device->Tick());
-
- return device->SubmitPendingOperations();
- }
-
- MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
- return {};
- }
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ mTexture = AcquireRef(
+ new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
+ return mTexture->CreateView();
+}
- // ComputePipeline
- MaybeError ComputePipeline::Initialize() {
- return {};
- }
-
- // RenderPipeline
- MaybeError RenderPipeline::Initialize() {
- return {};
- }
-
- // SwapChain
-
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
-
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
- return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
- }
- }
-
- return {};
- }
-
- SwapChain::~SwapChain() = default;
-
- MaybeError SwapChain::PresentImpl() {
+void SwapChain::DetachFromSurfaceImpl() {
+ if (mTexture != nullptr) {
mTexture->APIDestroy();
mTexture = nullptr;
- return {};
}
+}
- ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
- TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
- mTexture = AcquireRef(
- new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
- return mTexture->CreateView();
- }
-
- void SwapChain::DetachFromSurfaceImpl() {
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
- }
- }
+// ShaderModule
- // ShaderModule
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ return InitializeBase(parseResult, compilationMessages);
+}
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- return InitializeBase(parseResult);
- }
+// OldSwapChain
- // OldSwapChain
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ im.Init(im.userData, nullptr);
+}
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- im.Init(im.userData, nullptr);
- }
+OldSwapChain::~OldSwapChain() {}
- OldSwapChain::~OldSwapChain() {
- }
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ return GetDevice()->APICreateTexture(descriptor);
+}
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- return GetDevice()->APICreateTexture(descriptor);
- }
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+}
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
- }
+// NativeSwapChainImpl
- // NativeSwapChainImpl
+void NativeSwapChainImpl::Init(WSIContext* context) {}
- void NativeSwapChainImpl::Init(WSIContext* context) {
- }
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height) {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height) {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+DawnSwapChainError NativeSwapChainImpl::Present() {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
- DawnSwapChainError NativeSwapChainImpl::Present() {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+}
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
- }
+// StagingBuffer
- // StagingBuffer
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {}
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
+StagingBuffer::~StagingBuffer() {
+ if (mBuffer) {
+ mDevice->DecrementMemoryUsage(GetSize());
}
+}
- StagingBuffer::~StagingBuffer() {
- if (mBuffer) {
- mDevice->DecrementMemoryUsage(GetSize());
- }
- }
+MaybeError StagingBuffer::Initialize() {
+ DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
+ mBuffer = std::make_unique<uint8_t[]>(GetSize());
+ mMappedPointer = mBuffer.get();
+ return {};
+}
- MaybeError StagingBuffer::Initialize() {
- DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
- mBuffer = std::make_unique<uint8_t[]>(GetSize());
- mMappedPointer = mBuffer.get();
- return {};
- }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+}
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+}
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
+float Device::GetTimestampPeriodInNS() const {
+ return 1.0f;
+}
- float Device::GetTimestampPeriodInNS() const {
- return 1.0f;
- }
+Texture::Texture(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state) {}
} // namespace dawn::native::null
diff --git a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h
index 299f44a8f22..74da890354a 100644
--- a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_NULL_DEVICENULL_H_
#define SRC_DAWN_NATIVE_NULL_DEVICENULL_H_
+#include <memory>
+#include <vector>
+
#include "dawn/native/Adapter.h"
#include "dawn/native/BindGroup.h"
#include "dawn/native/BindGroupLayout.h"
@@ -38,302 +41,304 @@
namespace dawn::native::null {
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- using PipelineLayout = PipelineLayoutBase;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- using Sampler = SamplerBase;
- class ShaderModule;
- class SwapChain;
- using Texture = TextureBase;
- using TextureView = TextureViewBase;
-
- struct NullBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
- return ToBackendBase<NullBackendTraits>(common);
- }
-
- struct PendingOperation {
- virtual ~PendingOperation() = default;
- virtual void Execute() = 0;
- };
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Ref<Device>> Create(Adapter* adapter,
- const DeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize(const DeviceDescriptor* descriptor);
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
- MaybeError SubmitPendingOperations();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- MaybeError IncrementMemoryUsage(uint64_t bytes);
- void DecrementMemoryUsage(uint64_t bytes);
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- using DeviceBase::DeviceBase;
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
-
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
-
- static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
- size_t mMemoryUsage = 0;
- };
-
- class Adapter : public AdapterBase {
- public:
- explicit Adapter(InstanceBase* instance);
- ~Adapter() override;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
-
- // Used for the tests that intend to use an adapter without all features enabled.
- void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
-
- private:
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
-
- ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) override;
- };
-
- // Helper class so |BindGroup| can allocate memory for its binding data,
- // before calling the BindGroupBase base class constructor.
- class BindGroupDataHolder {
- protected:
- explicit BindGroupDataHolder(size_t size);
- ~BindGroupDataHolder();
-
- void* mBindingDataAllocation;
- };
-
- // We don't have the complexity of placement-allocation of bind group data in
- // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
- class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
- public:
- BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
-
- private:
- ~BindGroup() override = default;
- };
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- private:
- ~BindGroupLayout() override = default;
- };
-
- class Buffer final : public BufferBase {
- public:
- Buffer(Device* device, const BufferDescriptor* descriptor);
-
- void CopyFromStaging(StagingBufferBase* staging,
- uint64_t sourceOffset,
- uint64_t destinationOffset,
- uint64_t size);
-
- void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
-
- private:
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- std::unique_ptr<uint8_t[]> mBackingData;
- };
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- };
-
- class QuerySet final : public QuerySetBase {
- public:
- QuerySet(Device* device, const QuerySetDescriptor* descriptor);
- };
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device, const QueueDescriptor* descriptor);
-
- private:
- ~Queue() override;
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) override;
- };
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- using ComputePipelineBase::ComputePipelineBase;
-
- MaybeError Initialize() override;
- };
-
- class RenderPipeline final : public RenderPipelineBase {
- public:
- using RenderPipelineBase::RenderPipelineBase;
-
- MaybeError Initialize() override;
- };
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- using ShaderModuleBase::ShaderModuleBase;
-
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
-
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- Ref<Texture> mTexture;
-
- MaybeError PresentImpl() override;
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
- };
-
- class OldSwapChain final : public OldSwapChainBase {
- public:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
-
- protected:
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase*) override;
- };
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = struct {};
- void Init(WSIContext* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
- wgpu::TextureFormat GetPreferredFormat() const;
- };
-
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
- MaybeError Initialize() override;
-
- private:
- Device* mDevice;
- std::unique_ptr<uint8_t[]> mBuffer;
- };
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+using PipelineLayout = PipelineLayoutBase;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+using Sampler = SamplerBase;
+class ShaderModule;
+class SwapChain;
+class Texture;
+using TextureView = TextureViewBase;
+
+struct NullBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+};
+
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
+ return ToBackendBase<NullBackendTraits>(common);
+}
+
+struct PendingOperation {
+ virtual ~PendingOperation() = default;
+ virtual void Execute() = 0;
+};
+
+class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize(const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
+ MaybeError SubmitPendingOperations();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ MaybeError IncrementMemoryUsage(uint64_t bytes);
+ void DecrementMemoryUsage(uint64_t bytes);
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ using DeviceBase::DeviceBase;
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
+
+ static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
+ size_t mMemoryUsage = 0;
+};
+
+class Adapter : public AdapterBase {
+ public:
+ explicit Adapter(InstanceBase* instance);
+ ~Adapter() override;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
+
+ // Used for the tests that intend to use an adapter without all features enabled.
+ void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
+
+ private:
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
+};
+
+// Helper class so |BindGroup| can allocate memory for its binding data,
+// before calling the BindGroupBase base class constructor.
+class BindGroupDataHolder {
+ protected:
+ explicit BindGroupDataHolder(size_t size);
+ ~BindGroupDataHolder();
+
+ void* mBindingDataAllocation;
+};
+
+// We don't have the complexity of placement-allocation of bind group data in
+// the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
+class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
+ public:
+ BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
+
+ private:
+ ~BindGroup() override = default;
+};
+
+class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ private:
+ ~BindGroupLayout() override = default;
+};
+
+class Buffer final : public BufferBase {
+ public:
+ Buffer(Device* device, const BufferDescriptor* descriptor);
+
+ void CopyFromStaging(StagingBufferBase* staging,
+ uint64_t sourceOffset,
+ uint64_t destinationOffset,
+ uint64_t size);
+
+ void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
+
+ private:
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ std::unique_ptr<uint8_t[]> mBackingData;
+};
+
+class CommandBuffer final : public CommandBufferBase {
+ public:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+};
+
+class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+};
+
+class Queue final : public QueueBase {
+ public:
+ Queue(Device* device, const QueueDescriptor* descriptor);
+
+ private:
+ ~Queue() override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
+};
+
+class ComputePipeline final : public ComputePipelineBase {
+ public:
+ using ComputePipelineBase::ComputePipelineBase;
+
+ MaybeError Initialize() override;
+};
+
+class RenderPipeline final : public RenderPipelineBase {
+ public:
+ using RenderPipelineBase::RenderPipelineBase;
+
+ MaybeError Initialize() override;
+};
+
+class ShaderModule final : public ShaderModuleBase {
+ public:
+ using ShaderModuleBase::ShaderModuleBase;
+
+ MaybeError Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+};
+
+class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+};
+
+class OldSwapChain final : public OldSwapChainBase {
+ public:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase*) override;
+};
+
+class NativeSwapChainImpl {
+ public:
+ using WSIContext = struct {};
+ void Init(WSIContext* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+ wgpu::TextureFormat GetPreferredFormat() const;
+};
+
+class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
+ MaybeError Initialize() override;
+
+ private:
+ Device* mDevice;
+ std::unique_ptr<uint8_t[]> mBuffer;
+};
+
+class Texture : public TextureBase {
+ public:
+ Texture(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
+};
} // namespace dawn::native::null
diff --git a/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp b/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp
index 43637cd7882..7e4ce536912 100644
--- a/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp
@@ -22,11 +22,11 @@
namespace dawn::native::null {
- DawnSwapChainImplementation CreateNativeSwapChainImpl() {
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
- impl.textureUsage = WGPUTextureUsage_Present;
- return impl;
- }
+DawnSwapChainImplementation CreateNativeSwapChainImpl() {
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
+ impl.textureUsage = WGPUTextureUsage_Present;
+ return impl;
+}
} // namespace dawn::native::null
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.cpp
new file mode 100644
index 00000000000..b3be547286f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.cpp
@@ -0,0 +1,260 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/AdapterGL.h"
+
+#include <string>
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+namespace {
+
+struct Vendor {
+ const char* vendorName;
+ uint32_t vendorId;
+};
+
+const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
+ {"ARM", gpu_info::kVendorID_ARM},
+ {"Imagination", gpu_info::kVendorID_ImgTec},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"NVIDIA", gpu_info::kVendorID_Nvidia},
+ {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
+
+uint32_t GetVendorIdFromVendors(const char* vendor) {
+ uint32_t vendorId = 0;
+ for (const auto& it : kVendors) {
+ // Matching vendor name with vendor string
+ if (strstr(vendor, it.vendorName) != nullptr) {
+ vendorId = it.vendorId;
+ break;
+ }
+ }
+ return vendorId;
+}
+
+void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ const void* userParam) {
+ const char* sourceText;
+ switch (source) {
+ case GL_DEBUG_SOURCE_API:
+ sourceText = "OpenGL";
+ break;
+ case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
+ sourceText = "Window System";
+ break;
+ case GL_DEBUG_SOURCE_SHADER_COMPILER:
+ sourceText = "Shader Compiler";
+ break;
+ case GL_DEBUG_SOURCE_THIRD_PARTY:
+ sourceText = "Third Party";
+ break;
+ case GL_DEBUG_SOURCE_APPLICATION:
+ sourceText = "Application";
+ break;
+ case GL_DEBUG_SOURCE_OTHER:
+ sourceText = "Other";
+ break;
+ default:
+ sourceText = "UNKNOWN";
+ break;
+ }
+
+ const char* severityText;
+ switch (severity) {
+ case GL_DEBUG_SEVERITY_HIGH:
+ severityText = "High";
+ break;
+ case GL_DEBUG_SEVERITY_MEDIUM:
+ severityText = "Medium";
+ break;
+ case GL_DEBUG_SEVERITY_LOW:
+ severityText = "Low";
+ break;
+ case GL_DEBUG_SEVERITY_NOTIFICATION:
+ severityText = "Notification";
+ break;
+ default:
+ severityText = "UNKNOWN";
+ break;
+ }
+
+ if (type == GL_DEBUG_TYPE_ERROR) {
+ dawn::WarningLog() << "OpenGL error:"
+ << "\n Source: " << sourceText //
+ << "\n ID: " << id //
+ << "\n Severity: " << severityText //
+ << "\n Message: " << message;
+
+ // Abort on an error when in Debug mode.
+ UNREACHABLE();
+ }
+}
+
+} // anonymous namespace
+
+Adapter::Adapter(InstanceBase* instance, wgpu::BackendType backendType)
+ : AdapterBase(instance, backendType) {}
+
+MaybeError Adapter::InitializeGLFunctions(void* (*getProc)(const char*)) {
+ // Use getProc to populate the dispatch table
+ return mFunctions.Initialize(getProc);
+}
+
+bool Adapter::SupportsExternalImages() const {
+ // Via dawn::native::opengl::WrapExternalEGLImage
+ return GetBackendType() == wgpu::BackendType::OpenGLES;
+}
+
+MaybeError Adapter::InitializeImpl() {
+ if (mFunctions.GetVersion().IsES()) {
+ ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
+ } else {
+ ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
+ }
+
+ // Use the debug output functionality to get notified about GL errors
+ // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
+ // extensions
+ bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
+
+ if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
+ mFunctions.Enable(GL_DEBUG_OUTPUT);
+ mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
+
+ // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH, 0,
+ nullptr, GL_TRUE);
+
+ // Severe performance warnings; GLSL or other shader compiler and linker warnings;
+ // use of currently deprecated behavior
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM, 0,
+ nullptr, GL_TRUE);
+
+ // Performance warnings from redundant state changes; trivial undefined behavior
+ // This is disabled because we do an incredible amount of redundant state changes.
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
+ nullptr, GL_FALSE);
+
+ // Any message which is not an error or performance concern
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_NOTIFICATION,
+ 0, nullptr, GL_FALSE);
+ mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
+ }
+
+ // Set state that never changes between devices.
+ mFunctions.Enable(GL_DEPTH_TEST);
+ mFunctions.Enable(GL_SCISSOR_TEST);
+ mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
+ if (mFunctions.GetVersion().IsDesktop()) {
+ // These are not necessary on GLES. The functionality is enabled by default, and
+ // works by specifying sample counts and SRGB textures, respectively.
+ mFunctions.Enable(GL_MULTISAMPLE);
+ mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
+ }
+ mFunctions.Enable(GL_SAMPLE_MASK);
+
+ mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
+
+ // Workaroud to find vendor id from vendor name
+ const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
+ mVendorId = GetVendorIdFromVendors(vendor);
+
+ mDriverDescription = std::string("OpenGL version ") +
+ reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
+
+ if (mName.find("SwiftShader") != std::string::npos) {
+ mAdapterType = wgpu::AdapterType::CPU;
+ }
+
+ return {};
+}
+
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ // TextureCompressionBC
+ {
+ // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
+ bool supportsS3TC =
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
+ (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
+ mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
+ mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
+
+ // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
+ // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
+ // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
+ // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
+ bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
+
+ // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
+ // NVidia GLES drivers don't support this extension, but they do support
+ // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
+ // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
+ // SRGB support even if S3TC is supported; see
+ // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
+ bool supportsS3TCSRGB =
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
+ mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
+
+ // BC4 and BC5
+ bool supportsRGTC = mFunctions.IsAtLeastGL(3, 0) ||
+ mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
+
+ // BC6 and BC7
+ bool supportsBPTC = mFunctions.IsAtLeastGL(4, 2) ||
+ mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
+
+ if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
+ supportsBPTC) {
+ mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
+ }
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ }
+
+ // Non-zero baseInstance requires at least desktop OpenGL 4.2, and it is not supported in
+ // OpenGL ES OpenGL:
+ // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glDrawElementsIndirect.xhtml
+ // OpenGL ES:
+ // https://www.khronos.org/registry/OpenGL-Refpages/es3/html/glDrawElementsIndirect.xhtml
+ if (mFunctions.IsAtLeastGL(4, 2)) {
+ mSupportedFeatures.EnableFeature(Feature::IndirectFirstInstance);
+ }
+
+ return {};
+}
+
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ GetDefaultLimits(&limits->v1);
+ return {};
+}
+
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ // There is no limit on the number of devices created from this adapter because they can
+ // all share the same backing OpenGL context.
+ return Device::Create(this, descriptor, mFunctions);
+}
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.h
new file mode 100644
index 00000000000..41018872d76
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/AdapterGL.h
@@ -0,0 +1,45 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_OPENGL_ADAPTERGL_H_
+#define SRC_DAWN_NATIVE_OPENGL_ADAPTERGL_H_
+
+#include "dawn/native/Adapter.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+namespace dawn::native::opengl {
+
+class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance, wgpu::BackendType backendType);
+
+ MaybeError InitializeGLFunctions(void* (*getProc)(const char*));
+
+ ~Adapter() override = default;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
+
+ private:
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
+
+ OpenGLFunctions mFunctions;
+};
+
+} // namespace dawn::native::opengl
+
+#endif // SRC_DAWN_NATIVE_OPENGL_ADAPTERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp
index aac0c14c50e..c4b877667a6 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp
@@ -14,293 +14,47 @@
#include "dawn/native/opengl/BackendGL.h"
-#include "dawn/common/GPUInfo.h"
-#include "dawn/common/Log.h"
-#include "dawn/native/Instance.h"
-#include "dawn/native/OpenGLBackend.h"
-#include "dawn/native/opengl/DeviceGL.h"
+#include <utility>
-#include <cstring>
+#include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/opengl/AdapterGL.h"
namespace dawn::native::opengl {
- namespace {
-
- struct Vendor {
- const char* vendorName;
- uint32_t vendorId;
- };
-
- const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
- {"ARM", gpu_info::kVendorID_ARM},
- {"Imagination", gpu_info::kVendorID_ImgTec},
- {"Intel", gpu_info::kVendorID_Intel},
- {"NVIDIA", gpu_info::kVendorID_Nvidia},
- {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
-
- uint32_t GetVendorIdFromVendors(const char* vendor) {
- uint32_t vendorId = 0;
- for (const auto& it : kVendors) {
- // Matching vendor name with vendor string
- if (strstr(vendor, it.vendorName) != nullptr) {
- vendorId = it.vendorId;
- break;
- }
- }
- return vendorId;
- }
-
- void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
- GLenum type,
- GLuint id,
- GLenum severity,
- GLsizei length,
- const GLchar* message,
- const void* userParam) {
- const char* sourceText;
- switch (source) {
- case GL_DEBUG_SOURCE_API:
- sourceText = "OpenGL";
- break;
- case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
- sourceText = "Window System";
- break;
- case GL_DEBUG_SOURCE_SHADER_COMPILER:
- sourceText = "Shader Compiler";
- break;
- case GL_DEBUG_SOURCE_THIRD_PARTY:
- sourceText = "Third Party";
- break;
- case GL_DEBUG_SOURCE_APPLICATION:
- sourceText = "Application";
- break;
- case GL_DEBUG_SOURCE_OTHER:
- sourceText = "Other";
- break;
- default:
- sourceText = "UNKNOWN";
- break;
- }
-
- const char* severityText;
- switch (severity) {
- case GL_DEBUG_SEVERITY_HIGH:
- severityText = "High";
- break;
- case GL_DEBUG_SEVERITY_MEDIUM:
- severityText = "Medium";
- break;
- case GL_DEBUG_SEVERITY_LOW:
- severityText = "Low";
- break;
- case GL_DEBUG_SEVERITY_NOTIFICATION:
- severityText = "Notification";
- break;
- default:
- severityText = "UNKNOWN";
- break;
- }
-
- if (type == GL_DEBUG_TYPE_ERROR) {
- dawn::WarningLog() << "OpenGL error:"
- << "\n Source: " << sourceText //
- << "\n ID: " << id //
- << "\n Severity: " << severityText //
- << "\n Message: " << message;
-
- // Abort on an error when in Debug mode.
- UNREACHABLE();
- }
- }
-
- } // anonymous namespace
-
- // The OpenGL backend's Adapter.
-
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance, wgpu::BackendType backendType)
- : AdapterBase(instance, backendType) {
- }
-
- MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
- // Use getProc to populate the dispatch table
- return mFunctions.Initialize(getProc);
- }
-
- ~Adapter() override = default;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override {
- // Via dawn::native::opengl::WrapExternalEGLImage
- return GetBackendType() == wgpu::BackendType::OpenGLES;
- }
-
- private:
- MaybeError InitializeImpl() override {
- if (mFunctions.GetVersion().IsES()) {
- ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
- } else {
- ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
- }
-
- // Use the debug output functionality to get notified about GL errors
- // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
- // extensions
- bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
-
- if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
- mFunctions.Enable(GL_DEBUG_OUTPUT);
- mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
-
- // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
- 0, nullptr, GL_TRUE);
-
- // Severe performance warnings; GLSL or other shader compiler and linker warnings;
- // use of currently deprecated behavior
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
- 0, nullptr, GL_TRUE);
-
- // Performance warnings from redundant state changes; trivial undefined behavior
- // This is disabled because we do an incredible amount of redundant state changes.
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
- nullptr, GL_FALSE);
-
- // Any message which is not an error or performance concern
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
- GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
- GL_FALSE);
- mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
- }
-
- // Set state that never changes between devices.
- mFunctions.Enable(GL_DEPTH_TEST);
- mFunctions.Enable(GL_SCISSOR_TEST);
- mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
- if (mFunctions.GetVersion().IsDesktop()) {
- // These are not necessary on GLES. The functionality is enabled by default, and
- // works by specifying sample counts and SRGB textures, respectively.
- mFunctions.Enable(GL_MULTISAMPLE);
- mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
- }
- mFunctions.Enable(GL_SAMPLE_MASK);
-
- mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
-
- // Workaroud to find vendor id from vendor name
- const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
- mVendorId = GetVendorIdFromVendors(vendor);
-
- mDriverDescription = std::string("OpenGL version ") +
- reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
-
- if (mName.find("SwiftShader") != std::string::npos) {
- mAdapterType = wgpu::AdapterType::CPU;
- }
-
- return {};
- }
-
- MaybeError InitializeSupportedFeaturesImpl() override {
- // TextureCompressionBC
- {
- // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
- bool supportsS3TC =
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
- (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
- mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
- mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
-
- // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
- // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
- // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
- // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
- bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
-
- // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
- // NVidia GLES drivers don't support this extension, but they do support
- // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
- // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
- // SRGB support even if S3TC is supported; see
- // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
- bool supportsS3TCSRGB =
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
- mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
-
- // BC4 and BC5
- bool supportsRGTC =
- mFunctions.IsAtLeastGL(3, 0) ||
- mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
-
- // BC6 and BC7
- bool supportsBPTC =
- mFunctions.IsAtLeastGL(4, 2) ||
- mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
-
- if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
- supportsBPTC) {
- mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
- }
- mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
- }
-
- return {};
- }
-
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
- GetDefaultLimits(&limits->v1);
- return {};
- }
-
- ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) override {
- // There is no limit on the number of devices created from this adapter because they can
- // all share the same backing OpenGL context.
- return Device::Create(this, descriptor, mFunctions);
- }
-
- OpenGLFunctions mFunctions;
- };
-
- // Implementation of the OpenGL backend's BackendConnection
+// Implementation of the OpenGL backend's BackendConnection
- Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
- : BackendConnection(instance, backendType) {
- }
+Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
+ : BackendConnection(instance, backendType) {}
- std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
- return {};
- }
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
+ return {};
+}
- ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
- // know how to handle MakeCurrent.
- DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
+ // know how to handle MakeCurrent.
+ DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
- ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+ ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
- DAWN_INVALID_IF(options->getProc == nullptr,
- "AdapterDiscoveryOptions::getProc must be set");
+ DAWN_INVALID_IF(options->getProc == nullptr, "AdapterDiscoveryOptions::getProc must be set");
- Ref<Adapter> adapter = AcquireRef(
- new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
- DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
- DAWN_TRY(adapter->Initialize());
+ Ref<Adapter> adapter = AcquireRef(
+ new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
+ DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
+ DAWN_TRY(adapter->Initialize());
- mCreatedAdapter = true;
- std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
- return std::move(adapters);
- }
+ mCreatedAdapter = true;
+ std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
+ return std::move(adapters);
+}
- BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
- return new Backend(instance, backendType);
- }
+BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
+ return new Backend(instance, backendType);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h
index 8c3bd95985a..591672809e2 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h
@@ -15,21 +15,23 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_BACKENDGL_H_
#define SRC_DAWN_NATIVE_OPENGL_BACKENDGL_H_
+#include <vector>
+
#include "dawn/native/BackendConnection.h"
namespace dawn::native::opengl {
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance, wgpu::BackendType backendType);
+class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance, wgpu::BackendType backendType);
- std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options) override;
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options) override;
- private:
- bool mCreatedAdapter = false;
- };
+ private:
+ bool mCreatedAdapter = false;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp
index 6573a9274ef..a688c2ee763 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp
@@ -20,46 +20,45 @@
namespace dawn::native::opengl {
- MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
- const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
+MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
- const auto& it = bindingMap.find(BindingNumber(entry.binding));
- BindingIndex bindingIndex = it->second;
- ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+ const auto& it = bindingMap.find(BindingNumber(entry.binding));
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
- const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
- if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
- ASSERT(entry.textureView != nullptr);
- const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
- DAWN_INVALID_IF(
- textureViewLayerCount != 1 &&
- textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
- "%s binds %u layers. Currently the OpenGL backend only supports either binding "
- "1 layer or the all layers (%u) for storage texture.",
- entry.textureView, textureViewLayerCount,
- entry.textureView->GetTexture()->GetArrayLayers());
- }
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+ if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
+ ASSERT(entry.textureView != nullptr);
+ const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
+ DAWN_INVALID_IF(
+ textureViewLayerCount != 1 &&
+ textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
+ "%s binds %u layers. Currently the OpenGL backend only supports either binding "
+ "1 layer or the all layers (%u) for storage texture.",
+ entry.textureView, textureViewLayerCount,
+ entry.textureView->GetTexture()->GetArrayLayers());
}
-
- return {};
}
- BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(this, device, descriptor) {
- }
+ return {};
+}
- BindGroup::~BindGroup() = default;
+BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {}
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this);
- }
+BindGroup::~BindGroup() = default;
- // static
- Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
+void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+}
+
+// static
+Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h
index fb6e2b5d391..a33b03be3c4 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h
@@ -20,21 +20,21 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
+MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
- private:
- ~BindGroup() override;
+ private:
+ ~BindGroup() override;
- void DestroyImpl() override;
- };
+ void DestroyImpl() override;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp
index 1cc14749890..b665d1791c0 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp
@@ -14,24 +14,21 @@
#include "dawn/native/opengl/BindGroupLayoutGL.h"
-#include "dawn/native/opengl/BindGroupGL.h"
-
namespace dawn::native::opengl {
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
- Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
- }
+Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+}
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
- mBindGroupAllocator.Deallocate(bindGroup);
- }
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h
index 5e147982404..136b16fa507 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h
@@ -17,25 +17,25 @@
#include "dawn/common/SlabAllocator.h"
#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/opengl/BindGroupGL.h"
namespace dawn::native::opengl {
- class BindGroup;
- class Device;
+class Device;
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
+class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
- Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup);
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
- private:
- ~BindGroupLayout() override = default;
- SlabAllocator<BindGroup> mBindGroupAllocator;
- };
+ private:
+ ~BindGroupLayout() override = default;
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp
index fde83bc0e4c..c05730bbdd3 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp
@@ -14,171 +14,174 @@
#include "dawn/native/opengl/BufferGL.h"
+#include <algorithm>
+#include <utility>
+#include <vector>
+
#include "dawn/native/CommandBuffer.h"
#include "dawn/native/opengl/DeviceGL.h"
namespace dawn::native::opengl {
- // Buffer
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
- const BufferDescriptor* descriptor,
- bool shouldLazyClear) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
- if (descriptor->mappedAtCreation) {
- DAWN_TRY(buffer->MapAtCreationInternal());
- }
+// Buffer
- return std::move(buffer);
+// static
+ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
+ const BufferDescriptor* descriptor,
+ bool shouldLazyClear) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreationInternal());
}
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- mAllocatedSize = std::max(GetSize(), uint64_t(4u));
-
- device->gl.GenBuffers(1, &mBuffer);
- device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !descriptor->mappedAtCreation) {
- std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
- device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
- GL_STATIC_DRAW);
- } else {
- // Buffers start zeroed if you pass nullptr to glBufferData.
- device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
- }
+ return std::move(buffer);
+}
+
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ mAllocatedSize = std::max(GetSize(), uint64_t(4u));
+
+ device->gl.GenBuffers(1, &mBuffer);
+ device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !descriptor->mappedAtCreation) {
+ std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
+ device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(), GL_STATIC_DRAW);
+ } else {
+ // Buffers start zeroed if you pass nullptr to glBufferData.
+ device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
}
+}
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
- : Buffer(device, descriptor) {
- if (!shouldLazyClear) {
- SetIsDataInitialized();
- }
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
+ : Buffer(device, descriptor) {
+ if (!shouldLazyClear) {
+ SetIsDataInitialized();
}
+}
- Buffer::~Buffer() = default;
-
- GLuint Buffer::GetHandle() const {
- return mBuffer;
- }
+Buffer::~Buffer() = default;
- bool Buffer::EnsureDataInitialized() {
- if (!NeedsInitialization()) {
- return false;
- }
+GLuint Buffer::GetHandle() const {
+ return mBuffer;
+}
- InitializeToZero();
- return true;
+bool Buffer::EnsureDataInitialized() {
+ if (!NeedsInitialization()) {
+ return false;
}
- bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
+ InitializeToZero();
+ return true;
+}
- InitializeToZero();
- return true;
+bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
}
- bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero();
- return true;
- }
-
- void Buffer::InitializeToZero() {
- ASSERT(NeedsInitialization());
-
- const uint64_t size = GetAllocatedSize();
- Device* device = ToBackend(GetDevice());
-
- const std::vector<uint8_t> clearValues(size, 0u);
- device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
- device->IncrementLazyClearCountForTesting();
-
+ if (IsFullBufferRange(offset, size)) {
SetIsDataInitialized();
+ return false;
}
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
- // driver to migrate it to shared memory.
- return true;
- }
+ InitializeToZero();
+ return true;
+}
- MaybeError Buffer::MapAtCreationImpl() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
- return {};
+bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
}
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
- // so we extend the range to be 4 bytes.
- if (size == 0) {
- if (offset != 0) {
- offset -= 4;
- }
- size = 4;
- }
-
- EnsureDataInitialized();
-
- // This does GPU->CPU synchronization, we could require a high
- // version of OpenGL that would let us map the buffer unsynchronized.
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- void* mappedData = nullptr;
- if (mode & wgpu::MapMode::Read) {
- mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
- }
-
- // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
- // the resource but OpenGL gives us the pointer at offset. Remove the offset.
- mMappedData = static_cast<uint8_t*>(mappedData) - offset;
- return {};
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
}
- void* Buffer::GetMappedPointerImpl() {
- // The mapping offset has already been removed.
- return mMappedData;
+ InitializeToZero();
+ return true;
+}
+
+void Buffer::InitializeToZero() {
+ ASSERT(NeedsInitialization());
+
+ const uint64_t size = GetAllocatedSize();
+ Device* device = ToBackend(GetDevice());
+
+ const std::vector<uint8_t> clearValues(size, 0u);
+ device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
+ device->IncrementLazyClearCountForTesting();
+
+ SetIsDataInitialized();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
+ // driver to migrate it to shared memory.
+ return true;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
+ return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
+ // so we extend the range to be 4 bytes.
+ if (size == 0) {
+ if (offset != 0) {
+ offset -= 4;
+ }
+ size = 4;
}
- void Buffer::UnmapImpl() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- gl.UnmapBuffer(GL_ARRAY_BUFFER);
- mMappedData = nullptr;
+ EnsureDataInitialized();
+
+ // This does GPU->CPU synchronization, we could require a high
+ // version of OpenGL that would let us map the buffer unsynchronized.
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ void* mappedData = nullptr;
+ if (mode & wgpu::MapMode::Read) {
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
}
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
- mBuffer = 0;
- }
+ // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
+ // the resource but OpenGL gives us the pointer at offset. Remove the offset.
+ mMappedData = static_cast<uint8_t*>(mappedData) - offset;
+ return {};
+}
+
+void* Buffer::GetMappedPointerImpl() {
+ // The mapping offset has already been removed.
+ return mMappedData;
+}
+
+void Buffer::UnmapImpl() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ gl.UnmapBuffer(GL_ARRAY_BUFFER);
+ mMappedData = nullptr;
+}
+
+void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
+ mBuffer = 0;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h
index 910e5dbcde7..a86c84157fd 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h
@@ -21,37 +21,37 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
- const BufferDescriptor* descriptor,
- bool shouldLazyClear);
+class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
+ const BufferDescriptor* descriptor,
+ bool shouldLazyClear);
- Buffer(Device* device, const BufferDescriptor* descriptor);
+ Buffer(Device* device, const BufferDescriptor* descriptor);
- GLuint GetHandle() const;
+ GLuint GetHandle() const;
- bool EnsureDataInitialized();
- bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
- bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
+ bool EnsureDataInitialized();
+ bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
+ bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
- private:
- Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
- ~Buffer() override;
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
+ private:
+ Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
+ ~Buffer() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
- void InitializeToZero();
+ void InitializeToZero();
- GLuint mBuffer = 0;
- void* mMappedData = nullptr;
- };
+ GLuint mBuffer = 0;
+ void* mMappedData = nullptr;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp
index ba5b8dd79c3..851413dffef 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/opengl/CommandBufferGL.h"
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
#include "dawn/native/BindGroup.h"
#include "dawn/native/BindGroupTracker.h"
#include "dawn/native/CommandEncoder.h"
@@ -32,1343 +36,1319 @@
#include "dawn/native/opengl/TextureGL.h"
#include "dawn/native/opengl/UtilsGL.h"
-#include <cstring>
-
namespace dawn::native::opengl {
- namespace {
+namespace {
- GLenum IndexFormatType(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return GL_UNSIGNED_SHORT;
- case wgpu::IndexFormat::Uint32:
- return GL_UNSIGNED_INT;
- case wgpu::IndexFormat::Undefined:
- break;
- }
+GLenum IndexFormatType(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return GL_UNSIGNED_SHORT;
+ case wgpu::IndexFormat::Uint32:
+ return GL_UNSIGNED_INT;
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+GLenum VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ return GL_UNSIGNED_BYTE;
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ return GL_BYTE;
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ return GL_UNSIGNED_SHORT;
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
+ return GL_SHORT;
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float16x4:
+ return GL_HALF_FLOAT;
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Float32x4:
+ return GL_FLOAT;
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ return GL_UNSIGNED_INT;
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
+ return GL_INT;
+ default:
UNREACHABLE();
- }
+ }
+}
+
+GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
+ return GL_TRUE;
+ default:
+ return GL_FALSE;
+ }
+}
+
+bool VertexFormatIsInt(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
+// corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
+// This means that we have to re-apply these buffers on a VertexState change.
+class VertexStateBufferBindingTracker {
+ public:
+ void OnSetIndexBuffer(BufferBase* buffer) {
+ mIndexBufferDirty = true;
+ mIndexBuffer = ToBackend(buffer);
+ }
- GLenum VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Unorm8x4:
- return GL_UNSIGNED_BYTE;
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Snorm8x2:
- case wgpu::VertexFormat::Snorm8x4:
- return GL_BYTE;
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Unorm16x4:
- return GL_UNSIGNED_SHORT;
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Snorm16x4:
- return GL_SHORT;
- case wgpu::VertexFormat::Float16x2:
- case wgpu::VertexFormat::Float16x4:
- return GL_HALF_FLOAT;
- case wgpu::VertexFormat::Float32:
- case wgpu::VertexFormat::Float32x2:
- case wgpu::VertexFormat::Float32x3:
- case wgpu::VertexFormat::Float32x4:
- return GL_FLOAT;
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Uint32x4:
- return GL_UNSIGNED_INT;
- case wgpu::VertexFormat::Sint32:
- case wgpu::VertexFormat::Sint32x2:
- case wgpu::VertexFormat::Sint32x3:
- case wgpu::VertexFormat::Sint32x4:
- return GL_INT;
- default:
- UNREACHABLE();
- }
+ void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = ToBackend(buffer);
+ mVertexBufferOffsets[slot] = offset;
+ mDirtyVertexBuffers.set(slot);
+ }
+
+ void OnSetPipeline(RenderPipelineBase* pipeline) {
+ if (mLastPipeline == pipeline) {
+ return;
}
- GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Unorm8x4:
- case wgpu::VertexFormat::Snorm8x2:
- case wgpu::VertexFormat::Snorm8x4:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Unorm16x4:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Snorm16x4:
- return GL_TRUE;
- default:
- return GL_FALSE;
- }
+ mIndexBufferDirty = true;
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+
+ mLastPipeline = pipeline;
+ }
+
+ void Apply(const OpenGLFunctions& gl) {
+ if (mIndexBufferDirty && mIndexBuffer != nullptr) {
+ gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
+ mIndexBufferDirty = false;
}
- bool VertexFormatIsInt(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Uint32x4:
- case wgpu::VertexFormat::Sint32:
- case wgpu::VertexFormat::Sint32x2:
- case wgpu::VertexFormat::Sint32x3:
- case wgpu::VertexFormat::Sint32x4:
- return true;
- default:
- return false;
+ for (VertexBufferSlot slot :
+ IterateBitSet(mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
+ for (VertexAttributeLocation location :
+ IterateBitSet(ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
+ const VertexAttributeInfo& attribute = mLastPipeline->GetAttribute(location);
+
+ GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
+ GLuint buffer = mVertexBuffers[slot]->GetHandle();
+ uint64_t offset = mVertexBufferOffsets[slot];
+
+ const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
+ uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
+ GLenum formatType = VertexFormatType(attribute.format);
+
+ GLboolean normalized = VertexFormatIsNormalized(attribute.format);
+ gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
+ if (VertexFormatIsInt(attribute.format)) {
+ gl.VertexAttribIPointer(
+ attribIndex, components, formatType, vertexBuffer.arrayStride,
+ reinterpret_cast<void*>(static_cast<intptr_t>(offset + attribute.offset)));
+ } else {
+ gl.VertexAttribPointer(
+ attribIndex, components, formatType, normalized, vertexBuffer.arrayStride,
+ reinterpret_cast<void*>(static_cast<intptr_t>(offset + attribute.offset)));
+ }
}
}
- // Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
- // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
- // This means that we have to re-apply these buffers on a VertexState change.
- class VertexStateBufferBindingTracker {
- public:
- void OnSetIndexBuffer(BufferBase* buffer) {
- mIndexBufferDirty = true;
- mIndexBuffer = ToBackend(buffer);
- }
+ mDirtyVertexBuffers.reset();
+ }
- void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
- mVertexBuffers[slot] = ToBackend(buffer);
- mVertexBufferOffsets[slot] = offset;
- mDirtyVertexBuffers.set(slot);
- }
+ private:
+ bool mIndexBufferDirty = false;
+ Buffer* mIndexBuffer = nullptr;
- void OnSetPipeline(RenderPipelineBase* pipeline) {
- if (mLastPipeline == pipeline) {
- return;
- }
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+ ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
+ ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
- mIndexBufferDirty = true;
- mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+ RenderPipelineBase* mLastPipeline = nullptr;
+};
- mLastPipeline = pipeline;
- }
+class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
+ public:
+ void OnSetPipeline(RenderPipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
- void Apply(const OpenGLFunctions& gl) {
- if (mIndexBufferDirty && mIndexBuffer != nullptr) {
- gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
- mIndexBufferDirty = false;
- }
+ void OnSetPipeline(ComputePipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
- for (VertexBufferSlot slot : IterateBitSet(
- mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
- for (VertexAttributeLocation location : IterateBitSet(
- ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
- const VertexAttributeInfo& attribute =
- mLastPipeline->GetAttribute(location);
-
- GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
- GLuint buffer = mVertexBuffers[slot]->GetHandle();
- uint64_t offset = mVertexBufferOffsets[slot];
-
- const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
- uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
- GLenum formatType = VertexFormatType(attribute.format);
-
- GLboolean normalized = VertexFormatIsNormalized(attribute.format);
- gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
- if (VertexFormatIsInt(attribute.format)) {
- gl.VertexAttribIPointer(
- attribIndex, components, formatType, vertexBuffer.arrayStride,
- reinterpret_cast<void*>(
- static_cast<intptr_t>(offset + attribute.offset)));
- } else {
- gl.VertexAttribPointer(attribIndex, components, formatType, normalized,
- vertexBuffer.arrayStride,
- reinterpret_cast<void*>(static_cast<intptr_t>(
- offset + attribute.offset)));
- }
- }
- }
+ void Apply(const OpenGLFunctions& gl) {
+ BeforeApply();
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
+ mDynamicOffsets[index].data());
+ }
+ AfterApply();
+ }
- mDirtyVertexBuffers.reset();
+ private:
+ void ApplyBindGroup(const OpenGLFunctions& gl,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
+ uint32_t currentDynamicOffsetIndex = 0;
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ if (bindingInfo.bindingType == BindingInfoType::Texture) {
+ TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ view->CopyIfNeeded();
}
+ }
- private:
- bool mIndexBufferDirty = false;
- Buffer* mIndexBuffer = nullptr;
-
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
- ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
- ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
- RenderPipelineBase* mLastPipeline = nullptr;
- };
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+ GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+ GLuint index = indices[bindingIndex];
+ GLuint offset = binding.offset;
- class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
- public:
- void OnSetPipeline(RenderPipeline* pipeline) {
- BindGroupTrackerBase::OnSetPipeline(pipeline);
- mPipeline = pipeline;
- }
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicOffsetIndex];
+ ++currentDynamicOffsetIndex;
+ }
- void OnSetPipeline(ComputePipeline* pipeline) {
- BindGroupTrackerBase::OnSetPipeline(pipeline);
- mPipeline = pipeline;
- }
+ GLenum target;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ target = GL_UNIFORM_BUFFER;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ target = GL_SHADER_STORAGE_BUFFER;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
- void Apply(const OpenGLFunctions& gl) {
- BeforeApply();
- for (BindGroupIndex index :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
- mDynamicOffsets[index].data());
+ gl.BindBufferRange(target, index, buffer, offset, binding.size);
+ break;
}
- AfterApply();
- }
- private:
- void ApplyBindGroup(const OpenGLFunctions& gl,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
- const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
- uint32_t currentDynamicOffsetIndex = 0;
-
- for (BindingIndex bindingIndex{0};
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
-
- if (bindingInfo.bindingType == BindingInfoType::Texture) {
- TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
- view->CopyIfNeeded();
+ case BindingInfoType::Sampler: {
+ Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ GLuint samplerIndex = indices[bindingIndex];
+
+ for (PipelineGL::SamplerUnit unit :
+ mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
+ // Only use filtering for certain texture units, because int
+ // and uint texture are only complete without filtering
+ if (unit.shouldUseFiltering) {
+ gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
+ } else {
+ gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
+ }
}
+ break;
}
- for (BindingIndex bindingIndex{0};
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
- GLuint buffer = ToBackend(binding.buffer)->GetHandle();
- GLuint index = indices[bindingIndex];
- GLuint offset = binding.offset;
-
- if (bindingInfo.buffer.hasDynamicOffset) {
- offset += dynamicOffsets[currentDynamicOffsetIndex];
- ++currentDynamicOffsetIndex;
- }
-
- GLenum target;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- target = GL_UNIFORM_BUFFER;
+ case BindingInfoType::Texture: {
+ TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ GLuint handle = view->GetHandle();
+ GLenum target = view->GetGLTarget();
+ GLuint viewIndex = indices[bindingIndex];
+
+ for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
+ gl.ActiveTexture(GL_TEXTURE0 + unit);
+ gl.BindTexture(target, handle);
+ if (ToBackend(view->GetTexture())->GetGLFormat().format ==
+ GL_DEPTH_STENCIL) {
+ Aspect aspect = view->GetAspects();
+ ASSERT(HasOneBit(aspect));
+ switch (aspect) {
+ case Aspect::None:
+ case Aspect::Color:
+ case Aspect::CombinedDepthStencil:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
+ case Aspect::Depth:
+ gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+ GL_DEPTH_COMPONENT);
break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- target = GL_SHADER_STORAGE_BUFFER;
+ case Aspect::Stencil:
+ gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+ GL_STENCIL_INDEX);
break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- gl.BindBufferRange(target, index, buffer, offset, binding.size);
- break;
- }
-
- case BindingInfoType::Sampler: {
- Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- GLuint samplerIndex = indices[bindingIndex];
-
- for (PipelineGL::SamplerUnit unit :
- mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
- // Only use filtering for certain texture units, because int
- // and uint texture are only complete without filtering
- if (unit.shouldUseFiltering) {
- gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
- } else {
- gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
- }
}
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureView* view =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- GLuint handle = view->GetHandle();
- GLenum target = view->GetGLTarget();
- GLuint viewIndex = indices[bindingIndex];
-
- for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
- gl.ActiveTexture(GL_TEXTURE0 + unit);
- gl.BindTexture(target, handle);
- if (ToBackend(view->GetTexture())->GetGLFormat().format ==
- GL_DEPTH_STENCIL) {
- Aspect aspect = view->GetAspects();
- ASSERT(HasOneBit(aspect));
- switch (aspect) {
- case Aspect::None:
- case Aspect::Color:
- case Aspect::CombinedDepthStencil:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- case Aspect::Depth:
- gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
- GL_DEPTH_COMPONENT);
- break;
- case Aspect::Stencil:
- gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
- GL_STENCIL_INDEX);
- break;
- }
- }
- }
- break;
}
+ }
+ break;
+ }
- case BindingInfoType::StorageTexture: {
- TextureView* view =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- Texture* texture = ToBackend(view->GetTexture());
- GLuint handle = texture->GetHandle();
- GLuint imageIndex = indices[bindingIndex];
-
- GLenum access;
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- access = GL_WRITE_ONLY;
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
-
- // OpenGL ES only supports either binding a layer or the entire
- // texture in glBindImageTexture().
- GLboolean isLayered;
- if (view->GetLayerCount() == 1) {
- isLayered = GL_FALSE;
- } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
- isLayered = GL_TRUE;
- } else {
- UNREACHABLE();
- }
+ case BindingInfoType::StorageTexture: {
+ TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ Texture* texture = ToBackend(view->GetTexture());
+ GLuint handle = texture->GetHandle();
+ GLuint imageIndex = indices[bindingIndex];
- gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
- isLayered, view->GetBaseArrayLayer(), access,
- texture->GetGLFormat().internalFormat);
- texture->Touch();
+ GLenum access;
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ access = GL_WRITE_ONLY;
break;
- }
-
- case BindingInfoType::ExternalTexture:
+ case wgpu::StorageTextureAccess::Undefined:
UNREACHABLE();
- break;
}
- }
- }
- PipelineGL* mPipeline = nullptr;
- };
+ // OpenGL ES only supports either binding a layer or the entire
+ // texture in glBindImageTexture().
+ GLboolean isLayered;
+ if (view->GetLayerCount() == 1) {
+ isLayered = GL_FALSE;
+ } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
+ isLayered = GL_TRUE;
+ } else {
+ UNREACHABLE();
+ }
- void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
- const BeginRenderPassCmd* renderPass) {
- ASSERT(renderPass != nullptr);
+ gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(), isLayered,
+ view->GetBaseArrayLayer(), access,
+ texture->GetGLFormat().internalFormat);
+ texture->Touch();
+ break;
+ }
- GLuint readFbo = 0;
- GLuint writeFbo = 0;
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
- if (readFbo == 0) {
- ASSERT(writeFbo == 0);
- gl.GenFramebuffers(1, &readFbo);
- gl.GenFramebuffers(1, &writeFbo);
- }
+ PipelineGL* mPipeline = nullptr;
+};
- TextureView* colorView = ToBackend(renderPass->colorAttachments[i].view.Get());
+void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
+ const BeginRenderPassCmd* renderPass) {
+ ASSERT(renderPass != nullptr);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
- colorView->BindToFramebuffer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+ GLuint readFbo = 0;
+ GLuint writeFbo = 0;
- TextureView* resolveView =
- ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
- resolveView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
- gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0,
- renderPass->width, renderPass->height, GL_COLOR_BUFFER_BIT,
- GL_NEAREST);
- ToBackend(resolveView->GetTexture())->Touch();
- }
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+ if (readFbo == 0) {
+ ASSERT(writeFbo == 0);
+ gl.GenFramebuffers(1, &readFbo);
+ gl.GenFramebuffers(1, &writeFbo);
}
- gl.DeleteFramebuffers(1, &readFbo);
- gl.DeleteFramebuffers(1, &writeFbo);
- }
+ TextureView* colorView = ToBackend(renderPass->colorAttachments[i].view.Get());
- // OpenGL SPEC requires the source/destination region must be a region that is contained
- // within srcImage/dstImage. Here the size of the image refers to the virtual size, while
- // Dawn validates texture copy extent with the physical size, so we need to re-calculate the
- // texture copy extent to ensure it should fit in the virtual size of the subresource.
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- Extent3D validTextureCopyExtent = copySize;
- const TextureBase* texture = textureCopy.texture.Get();
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
- ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
- ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
- if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
- }
- if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
- }
-
- return validTextureCopyExtent;
- }
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
+ colorView->BindToFramebuffer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
- bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
- return format == wgpu::TextureFormat::RGBA8Snorm ||
- format == wgpu::TextureFormat::RG8Snorm ||
- format == wgpu::TextureFormat::R8Snorm;
+ TextureView* resolveView =
+ ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
+ resolveView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+ gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0, renderPass->width,
+ renderPass->height, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+ ToBackend(resolveView->GetTexture())->Touch();
}
- } // namespace
+ }
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
+ gl.DeleteFramebuffers(1, &readFbo);
+ gl.DeleteFramebuffers(1, &writeFbo);
+}
+
+// OpenGL SPEC requires the source/destination region must be a region that is contained
+// within srcImage/dstImage. Here the size of the image refers to the virtual size, while
+// Dawn validates texture copy extent with the physical size, so we need to re-calculate the
+// texture copy extent to ensure it should fit in the virtual size of the subresource.
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+ Extent3D validTextureCopyExtent = copySize;
+ const TextureBase* texture = textureCopy.texture.Get();
+ Extent3D virtualSizeAtLevel =
+ texture->GetMipLevelSingleSubresourceVirtualSize(textureCopy.mipLevel);
+ ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+ ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+ if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+ }
+ if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
}
- MaybeError CommandBuffer::Execute() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ return validTextureCopyExtent;
+}
- auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
- for (size_t i = 0; i < scope.textures.size(); i++) {
- Texture* texture = ToBackend(scope.textures[i]);
+bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
+ return format == wgpu::TextureFormat::RGBA8Snorm || format == wgpu::TextureFormat::RG8Snorm ||
+ format == wgpu::TextureFormat::R8Snorm;
+}
+} // namespace
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(range);
- }
- });
- }
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {}
- for (BufferBase* bufferBase : scope.buffers) {
- ToBackend(bufferBase)->EnsureDataInitialized();
- }
- };
-
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
- for (const SyncScopeResourceUsage& scope :
- GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
- LazyClearSyncScope(scope);
- }
- DAWN_TRY(ExecuteComputePass());
+MaybeError CommandBuffer::Execute() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- nextComputePassNumber++;
- break;
- }
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
+ for (size_t i = 0; i < scope.textures.size(); i++) {
+ Texture* texture = ToBackend(scope.textures[i]);
- case Command::BeginRenderPass: {
- auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
- LazyClearRenderPassAttachments(cmd);
- DAWN_TRY(ExecuteRenderPass(cmd));
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(range);
+ }
+ });
+ }
- nextRenderPassNumber++;
- break;
+ for (BufferBase* bufferBase : scope.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized();
+ }
+ };
+
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope);
}
+ DAWN_TRY(ExecuteComputePass());
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
+ nextComputePassNumber++;
+ break;
+ }
- ToBackend(copy->source)->EnsureDataInitialized();
- ToBackend(copy->destination)
- ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
+ case Command::BeginRenderPass: {
+ auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
+ LazyClearRenderPassAttachments(cmd);
+ DAWN_TRY(ExecuteRenderPass(cmd));
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
- ToBackend(copy->destination)->GetHandle());
- gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
- copy->sourceOffset, copy->destinationOffset, copy->size);
+ nextRenderPassNumber++;
+ break;
+ }
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
break;
}
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- Buffer* buffer = ToBackend(src.buffer.Get());
+ ToBackend(copy->source)->EnsureDataInitialized();
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
- DAWN_INVALID_IF(
- dst.aspect == Aspect::Stencil,
- "Copies to stencil textures are unsupported on the OpenGL backend.");
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, ToBackend(copy->destination)->GetHandle());
+ gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
+ copy->sourceOffset, copy->destinationOffset, copy->size);
- ASSERT(dst.aspect == Aspect::Color);
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ break;
+ }
- buffer->EnsureDataInitialized();
- SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- dst.mipLevel)) {
- dst.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
- }
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ Buffer* buffer = ToBackend(src.buffer.Get());
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
+ DAWN_INVALID_IF(
+ dst.aspect == Aspect::Stencil,
+ "Copies to stencil textures are unsupported on the OpenGL backend.");
- TextureDataLayout dataLayout;
- dataLayout.offset = 0;
- dataLayout.bytesPerRow = src.bytesPerRow;
- dataLayout.rowsPerImage = src.rowsPerImage;
+ ASSERT(dst.aspect == Aspect::Color);
- DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
- copy->copySize);
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- ToBackend(dst.texture)->Touch();
- break;
+ buffer->EnsureDataInitialized();
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ dst.mipLevel)) {
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
}
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Texture* texture = ToBackend(src.texture.Get());
- Buffer* buffer = ToBackend(dst.buffer.Get());
- const Format& formatInfo = texture->GetFormat();
- const GLFormat& format = texture->GetGLFormat();
- GLenum target = texture->GetGLTarget();
-
- // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
- // avoid this codepath. OpenGL does not support readback from non-renderable
- // texture formats.
- if (formatInfo.isCompressed ||
- (TextureFormatIsSnorm(formatInfo.format) &&
- GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
- UNREACHABLE();
- }
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
- buffer->EnsureDataInitializedAsDestination(copy);
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(src, copy->copySize);
- texture->EnsureSubresourceContentInitialized(subresources);
- // The only way to move data from a texture to a buffer in GL is via
- // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
- gl.BindTexture(target, texture->GetHandle());
-
- GLuint readFBO = 0;
- gl.GenFramebuffers(1, &readFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
-
- const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
-
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
- gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
-
- GLenum glAttachment;
- GLenum glFormat;
- GLenum glType;
- switch (src.aspect) {
- case Aspect::Color:
- glAttachment = GL_COLOR_ATTACHMENT0;
- glFormat = format.format;
- glType = format.type;
- break;
- case Aspect::Depth:
- glAttachment = GL_DEPTH_ATTACHMENT;
- glFormat = GL_DEPTH_COMPONENT;
- glType = GL_FLOAT;
- break;
- case Aspect::Stencil:
- glAttachment = GL_STENCIL_ATTACHMENT;
- glFormat = GL_STENCIL_INDEX;
- glType = GL_UNSIGNED_BYTE;
- break;
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = src.bytesPerRow;
+ dataLayout.rowsPerImage = src.rowsPerImage;
- case Aspect::CombinedDepthStencil:
- case Aspect::None:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- }
+ DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
+ copy->copySize);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ ToBackend(dst.texture)->Touch();
+ break;
+ }
- uint8_t* offset =
- reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D: {
- if (texture->GetArrayLayers() == 1) {
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
- texture->GetHandle(), src.mipLevel);
- gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat, glType, offset);
- break;
- }
- // Implementation for 2D array is the same as 3D.
- [[fallthrough]];
- }
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Texture* texture = ToBackend(src.texture.Get());
+ Buffer* buffer = ToBackend(dst.buffer.Get());
+ const Format& formatInfo = texture->GetFormat();
+ const GLFormat& format = texture->GetGLFormat();
+ GLenum target = texture->GetGLTarget();
+
+ // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
+ // avoid this codepath. OpenGL does not support readback from non-renderable
+ // texture formats.
+ if (formatInfo.isCompressed ||
+ (TextureFormatIsSnorm(formatInfo.format) &&
+ GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
+ UNREACHABLE();
+ }
- case wgpu::TextureDimension::e3D: {
- const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
- for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
- gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
- texture->GetHandle(), src.mipLevel,
- src.origin.z + z);
- gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat, glType, offset);
+ buffer->EnsureDataInitializedAsDestination(copy);
- offset += bytesPerImage;
- }
- break;
- }
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+ SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ texture->EnsureSubresourceContentInitialized(subresources);
+ // The only way to move data from a texture to a buffer in GL is via
+ // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
+ gl.BindTexture(target, texture->GetHandle());
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
+ GLuint readFBO = 0;
+ gl.GenFramebuffers(1, &readFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
- gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
+ const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- gl.DeleteFramebuffers(1, &readFBO);
- break;
- }
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
-
- // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
- // is not equal to imageExtentDst. For example when copySize fits in the virtual
- // size of the source image but does not fit in the one of the destination
- // image.
- Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
- Texture* srcTexture = ToBackend(src.texture.Get());
- Texture* dstTexture = ToBackend(dst.texture.Get());
-
- SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
- SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-
- srcTexture->EnsureSubresourceContentInitialized(srcRange);
- if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
- dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- dstTexture->EnsureSubresourceContentInitialized(dstRange);
- }
- CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(),
- srcTexture->GetGLTarget(), src.mipLevel, src.origin,
- dstTexture->GetHandle(), dstTexture->GetGLTarget(),
- dst.mipLevel, dst.origin, copySize);
- ToBackend(dst.texture)->Touch();
- break;
+ GLenum glAttachment;
+ GLenum glFormat;
+ GLenum glType;
+ switch (src.aspect) {
+ case Aspect::Color:
+ glAttachment = GL_COLOR_ATTACHMENT0;
+ glFormat = format.format;
+ glType = format.type;
+ break;
+ case Aspect::Depth:
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ glFormat = GL_DEPTH_COMPONENT;
+ glType = GL_FLOAT;
+ break;
+ case Aspect::Stencil:
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ glFormat = GL_STENCIL_INDEX;
+ glType = GL_UNSIGNED_BYTE;
+ break;
+
+ case Aspect::CombinedDepthStencil:
+ case Aspect::None:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
}
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
+ uint8_t* offset = reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D: {
+ if (texture->GetArrayLayers() == 1) {
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
+ texture->GetHandle(), src.mipLevel);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat, glType, offset);
+ break;
+ }
+ // Implementation for 2D array is the same as 3D.
+ [[fallthrough]];
}
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- bool clearedToZero =
- dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
+ case wgpu::TextureDimension::e3D: {
+ const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
+ for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
+ gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+ texture->GetHandle(), src.mipLevel,
+ src.origin.z + z);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat, glType, offset);
- if (!clearedToZero) {
- const std::vector<uint8_t> clearValues(cmd->size, 0u);
- gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size,
- clearValues.data());
+ offset += bytesPerImage;
+ }
+ break;
}
- break;
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
}
- case Command::ResolveQuerySet: {
- // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
- SkipCommand(&mCommands, type);
- break;
- }
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ gl.DeleteFramebuffers(1, &readFBO);
+ break;
+ }
- case Command::WriteTimestamp: {
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+
+ // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
+ // is not equal to imageExtentDst. For example when copySize fits in the virtual
+ // size of the source image but does not fit in the one of the destination
+ // image.
+ Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
+ Texture* srcTexture = ToBackend(src.texture.Get());
+ Texture* dstTexture = ToBackend(dst.texture.Get());
+
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+ srcTexture->EnsureSubresourceContentInitialized(srcRange);
+ if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
+ dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ dstTexture->EnsureSubresourceContentInitialized(dstRange);
+ }
+ CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(),
+ src.mipLevel, src.origin, dstTexture->GetHandle(),
+ dstTexture->GetGLTarget(), dst.mipLevel, dst.origin, copySize);
+ ToBackend(dst.texture)->Touch();
+ break;
+ }
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(&mCommands, type);
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
break;
}
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- uint64_t offset = write->offset;
- uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- dstBuffer->EnsureDataInitializedAsDestination(offset, size);
+ bool clearedToZero =
+ dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
+ if (!clearedToZero) {
+ const std::vector<uint8_t> clearValues(cmd->size, 0u);
gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
- break;
+ gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size, clearValues.data());
}
- default:
- UNREACHABLE();
+ break;
}
- }
- return {};
- }
+ case Command::ResolveQuerySet: {
+ // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
+ SkipCommand(&mCommands, type);
+ break;
+ }
- MaybeError CommandBuffer::ExecuteComputePass() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- ComputePipeline* lastPipeline = nullptr;
- BindGroupTracker bindGroupTracker = {};
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+ }
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- bindGroupTracker.Apply(gl);
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(&mCommands, type);
+ break;
+ }
- gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
- gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- break;
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ uint64_t offset = write->offset;
+ uint64_t size = write->size;
+ if (size == 0) {
+ continue;
}
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- bindGroupTracker.Apply(gl);
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ dstBuffer->EnsureDataInitializedAsDestination(offset, size);
- uint64_t indirectBufferOffset = dispatch->indirectOffset;
- Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
+ gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
+ break;
+ }
- gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
- gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- break;
- }
+ default:
+ UNREACHABLE();
+ }
+ }
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
- lastPipeline->ApplyNow();
+ return {};
+}
- bindGroupTracker.OnSetPipeline(lastPipeline);
- break;
- }
+MaybeError CommandBuffer::ExecuteComputePass() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ ComputePipeline* lastPipeline = nullptr;
+ BindGroupTracker bindGroupTracker = {};
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(&mCommands, type);
- break;
- }
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ bindGroupTracker.Apply(gl);
- case Command::WriteTimestamp: {
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
- }
+ gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
+ gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+ break;
+ }
- default:
- UNREACHABLE();
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ bindGroupTracker.Apply(gl);
+
+ uint64_t indirectBufferOffset = dispatch->indirectOffset;
+ Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
+
+ gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
+ gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+ break;
}
- }
- // EndComputePass should have been called
- UNREACHABLE();
- }
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
+ lastPipeline->ApplyNow();
- MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- GLuint fbo = 0;
-
- // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
- {
- // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
- // Windows/Intel. It should break any feedback loop before the clears, even if there
- // shouldn't be any negative effects from this. Investigate whether it's actually
- // needed.
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
- // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
- // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
- // be created and destroyed at draw time.
- gl.GenFramebuffers(1, &fbo);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
-
- // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
- // (GL_NONE).
- ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
-
- // Construct GL framebuffer
-
- ColorAttachmentIndex attachmentCount(uint8_t(0));
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- TextureView* textureView = ToBackend(renderPass->colorAttachments[i].view.Get());
- GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
-
- // Attach color buffers.
- textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
- drawBuffers[i] = glAttachment;
- attachmentCount = i;
- attachmentCount++;
+ bindGroupTracker.OnSetPipeline(lastPipeline);
+ break;
}
- gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- TextureView* textureView = ToBackend(renderPass->depthStencilAttachment.view.Get());
- const Format& format = textureView->GetTexture()->GetFormat();
-
- // Attach depth/stencil buffer.
- GLenum glAttachment = 0;
- if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
- glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
- } else if (format.aspects == Aspect::Depth) {
- glAttachment = GL_DEPTH_ATTACHMENT;
- } else if (format.aspects == Aspect::Stencil) {
- glAttachment = GL_STENCIL_ATTACHMENT;
- } else {
- UNREACHABLE();
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
+
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(&mCommands, type);
+ break;
+ }
- textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
}
+
+ default:
+ UNREACHABLE();
}
+ }
- ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
-
- // Set defaults for dynamic state before executing clears and commands.
- PersistentPipelineState persistentPipelineState;
- persistentPipelineState.SetDefaultState(gl);
- gl.BlendColor(0, 0, 0, 0);
- gl.Viewport(0, 0, renderPass->width, renderPass->height);
- gl.DepthRangef(0.0, 1.0);
- gl.Scissor(0, 0, renderPass->width, renderPass->height);
-
- // Clear framebuffer attachments as needed
- {
- for (ColorAttachmentIndex index :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(index);
- auto* attachmentInfo = &renderPass->colorAttachments[index];
-
- // Load op - color
- if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
- gl.ColorMask(true, true, true, true);
-
- wgpu::TextureComponentType baseType =
- attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
- switch (baseType) {
- case wgpu::TextureComponentType::Float: {
- const std::array<float, 4> appliedClearColor =
- ConvertToFloatColor(attachmentInfo->clearColor);
- gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- const std::array<uint32_t, 4> appliedClearColor =
- ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
- gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- const std::array<int32_t, 4> appliedClearColor =
- ConvertToSignedIntegerColor(attachmentInfo->clearColor);
- gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
+ // EndComputePass should have been called
+ UNREACHABLE();
+}
+
+MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ GLuint fbo = 0;
+
+ // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
+ {
+ // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
+ // Windows/Intel. It should break any feedback loop before the clears, even if there
+ // shouldn't be any negative effects from this. Investigate whether it's actually
+ // needed.
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+ // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
+ // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
+ // be created and destroyed at draw time.
+ gl.GenFramebuffers(1, &fbo);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
+
+ // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
+ // (GL_NONE).
+ ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
+
+ // Construct GL framebuffer
+
+ ColorAttachmentIndex attachmentCount(uint8_t(0));
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ TextureView* textureView = ToBackend(renderPass->colorAttachments[i].view.Get());
+ GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
+
+ // Attach color buffers.
+ textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+ drawBuffers[i] = glAttachment;
+ attachmentCount = i;
+ attachmentCount++;
+ }
+ gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ TextureView* textureView = ToBackend(renderPass->depthStencilAttachment.view.Get());
+ const Format& format = textureView->GetTexture()->GetFormat();
+
+ // Attach depth/stencil buffer.
+ GLenum glAttachment = 0;
+ if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (format.aspects == Aspect::Depth) {
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ } else if (format.aspects == Aspect::Stencil) {
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
+ }
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
+ textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+ }
+ }
+
+ ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
+
+ // Set defaults for dynamic state before executing clears and commands.
+ PersistentPipelineState persistentPipelineState;
+ persistentPipelineState.SetDefaultState(gl);
+ gl.BlendColor(0, 0, 0, 0);
+ gl.Viewport(0, 0, renderPass->width, renderPass->height);
+ gl.DepthRangef(0.0, 1.0);
+ gl.Scissor(0, 0, renderPass->width, renderPass->height);
+
+ // Clear framebuffer attachments as needed
+ {
+ for (ColorAttachmentIndex index :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(index);
+ auto* attachmentInfo = &renderPass->colorAttachments[index];
+
+ // Load op - color
+ if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
+ gl.ColorMask(true, true, true, true);
+
+ wgpu::TextureComponentType baseType =
+ attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
+ switch (baseType) {
+ case wgpu::TextureComponentType::Float: {
+ const std::array<float, 4> appliedClearColor =
+ ConvertToFloatColor(attachmentInfo->clearColor);
+ gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
+ break;
}
- }
- if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
- // TODO(natlee@microsoft.com): call glDiscard to do optimization
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
}
}
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto* attachmentInfo = &renderPass->depthStencilAttachment;
- const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
+ if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
+ // TODO(natlee@microsoft.com): call glDiscard to do optimization
+ }
+ }
- // Load op - depth/stencil
- bool doDepthClear = attachmentFormat.HasDepth() &&
- (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
- bool doStencilClear = attachmentFormat.HasStencil() &&
- (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto* attachmentInfo = &renderPass->depthStencilAttachment;
+ const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
- if (doDepthClear) {
- gl.DepthMask(GL_TRUE);
- }
- if (doStencilClear) {
- gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
- }
+ // Load op - depth/stencil
+ bool doDepthClear =
+ attachmentFormat.HasDepth() && (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
+ bool doStencilClear = attachmentFormat.HasStencil() &&
+ (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
- if (doDepthClear && doStencilClear) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
- attachmentInfo->clearStencil);
- } else if (doDepthClear) {
- gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
- } else if (doStencilClear) {
- const GLint clearStencil = attachmentInfo->clearStencil;
- gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
- }
+ if (doDepthClear) {
+ gl.DepthMask(GL_TRUE);
+ }
+ if (doStencilClear) {
+ gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
+ }
+
+ if (doDepthClear && doStencilClear) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
+ attachmentInfo->clearStencil);
+ } else if (doDepthClear) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
+ } else if (doStencilClear) {
+ const GLint clearStencil = attachmentInfo->clearStencil;
+ gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
}
}
+ }
- RenderPipeline* lastPipeline = nullptr;
- uint64_t indexBufferBaseOffset = 0;
- GLenum indexBufferFormat;
- uint32_t indexFormatSize;
-
- VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
- BindGroupTracker bindGroupTracker = {};
-
- auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- if (draw->firstInstance > 0) {
- gl.DrawArraysInstancedBaseInstance(
- lastPipeline->GetGLPrimitiveTopology(), draw->firstVertex,
- draw->vertexCount, draw->instanceCount, draw->firstInstance);
- } else {
- // This branch is only needed on OpenGL < 4.2
- gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
- draw->firstVertex, draw->vertexCount,
- draw->instanceCount);
- }
- break;
+ RenderPipeline* lastPipeline = nullptr;
+ uint64_t indexBufferBaseOffset = 0;
+ GLenum indexBufferFormat;
+ uint32_t indexFormatSize;
+
+ VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
+ BindGroupTracker bindGroupTracker = {};
+
+ auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ if (draw->firstInstance > 0) {
+ gl.DrawArraysInstancedBaseInstance(lastPipeline->GetGLPrimitiveTopology(),
+ draw->firstVertex, draw->vertexCount,
+ draw->instanceCount, draw->firstInstance);
+ } else {
+ // This branch is only needed on OpenGL < 4.2
+ gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
+ draw->firstVertex, draw->vertexCount,
+ draw->instanceCount);
}
+ break;
+ }
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- if (draw->firstInstance > 0) {
- gl.DrawElementsInstancedBaseVertexBaseInstance(
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ if (draw->firstInstance > 0) {
+ gl.DrawElementsInstancedBaseVertexBaseInstance(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount, indexBufferFormat,
+ reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount, draw->baseVertex, draw->firstInstance);
+ } else {
+ // This branch is only needed on OpenGL < 4.2; ES < 3.2
+ if (draw->baseVertex != 0) {
+ gl.DrawElementsInstancedBaseVertex(
lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
indexBufferFormat,
reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
indexBufferBaseOffset),
- draw->instanceCount, draw->baseVertex, draw->firstInstance);
+ draw->instanceCount, draw->baseVertex);
} else {
- // This branch is only needed on OpenGL < 4.2; ES < 3.2
- if (draw->baseVertex != 0) {
- gl.DrawElementsInstancedBaseVertex(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
- indexBufferFormat,
- reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
- indexBufferBaseOffset),
- draw->instanceCount, draw->baseVertex);
- } else {
- // This branch is only needed on OpenGL < 3.2; ES < 3.2
- gl.DrawElementsInstanced(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
- indexBufferFormat,
- reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
- indexBufferBaseOffset),
- draw->instanceCount);
- }
+ // This branch is only needed on OpenGL < 3.2; ES < 3.2
+ gl.DrawElementsInstanced(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ indexBufferFormat,
+ reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount);
}
- break;
}
+ break;
+ }
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
- uint64_t indirectBufferOffset = draw->indirectOffset;
- Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+ uint64_t indirectBufferOffset = draw->indirectOffset;
+ Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
- gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DrawArraysIndirect(
- lastPipeline->GetGLPrimitiveTopology(),
- reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
- break;
- }
+ gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DrawArraysIndirect(
+ lastPipeline->GetGLPrimitiveTopology(),
+ reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
+ break;
+ }
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
- Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(indirectBuffer != nullptr);
+ Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(indirectBuffer != nullptr);
- gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DrawElementsIndirect(
- lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
- reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
- break;
- }
+ gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DrawElementsIndirect(
+ lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
+ reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
+ break;
+ }
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(iter, type);
- break;
- }
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(iter, type);
+ break;
+ }
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
- lastPipeline->ApplyNow(persistentPipelineState);
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
+ lastPipeline->ApplyNow(persistentPipelineState);
- vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
- bindGroupTracker.OnSetPipeline(lastPipeline);
- break;
- }
+ vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
+ bindGroupTracker.OnSetPipeline(lastPipeline);
+ break;
+ }
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- indexBufferBaseOffset = cmd->offset;
- indexBufferFormat = IndexFormatType(cmd->format);
- indexFormatSize = IndexFormatSize(cmd->format);
- vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
- break;
- }
-
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
- cmd->offset);
- break;
- }
+ indexBufferBaseOffset = cmd->offset;
+ indexBufferFormat = IndexFormatType(cmd->format);
+ indexFormatSize = IndexFormatSize(cmd->format);
+ vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
+ break;
+ }
- default:
- UNREACHABLE();
- break;
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
+ cmd->offset);
+ break;
}
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- TextureView* textureView =
- ToBackend(renderPass->colorAttachments[i].view.Get());
- ToBackend(textureView->GetTexture())->Touch();
- }
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- TextureView* textureView =
- ToBackend(renderPass->depthStencilAttachment.view.Get());
- ToBackend(textureView->GetTexture())->Touch();
- }
- if (renderPass->attachmentState->GetSampleCount() > 1) {
- ResolveMultisampledRenderTargets(gl, renderPass);
- }
- gl.DeleteFramebuffers(1, &fbo);
- return {};
- }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- persistentPipelineState.SetStencilReference(gl, cmd->reference);
- break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ TextureView* textureView =
+ ToBackend(renderPass->colorAttachments[i].view.Get());
+ ToBackend(textureView->GetTexture())->Touch();
}
-
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- if (gl.IsAtLeastGL(4, 1)) {
- gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
- } else {
- // Floating-point viewport coords are unsupported on OpenGL ES, but
- // truncation is ok because other APIs do not guarantee subpixel precision
- // either.
- gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
- static_cast<int>(cmd->width), static_cast<int>(cmd->height));
- }
- gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
- break;
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ TextureView* textureView =
+ ToBackend(renderPass->depthStencilAttachment.view.Get());
+ ToBackend(textureView->GetTexture())->Touch();
}
-
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
- break;
+ if (renderPass->attachmentState->GetSampleCount() > 1) {
+ ResolveMultisampledRenderTargets(gl, renderPass);
}
+ gl.DeleteFramebuffers(1, &fbo);
+ return {};
+ }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
- gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
- break;
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ persistentPipelineState.SetStencilReference(gl, cmd->reference);
+ break;
+ }
+
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ if (gl.IsAtLeastGL(4, 1)) {
+ gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
+ } else {
+ // Floating-point viewport coords are unsupported on OpenGL ES, but
+ // truncation is ok because other APIs do not guarantee subpixel precision
+ // either.
+ gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
+ static_cast<int>(cmd->width), static_cast<int>(cmd->height));
}
+ gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
+ break;
+ }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
+ break;
+ }
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- DoRenderBundleCommand(iter, type);
- }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
+ gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
+ break;
+ }
+
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ DoRenderBundleCommand(iter, type);
}
- break;
}
+ break;
+ }
- case Command::BeginOcclusionQuery: {
- return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
- }
+ case Command::BeginOcclusionQuery: {
+ return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
+ }
- case Command::EndOcclusionQuery: {
- return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
- }
+ case Command::EndOcclusionQuery: {
+ return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
+ }
- case Command::WriteTimestamp:
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+ case Command::WriteTimestamp:
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
- default: {
- DoRenderBundleCommand(&mCommands, type);
- break;
- }
+ default: {
+ DoRenderBundleCommand(&mCommands, type);
+ break;
}
}
-
- // EndRenderPass should have been called
- UNREACHABLE();
}
- void DoTexSubImage(const OpenGLFunctions& gl,
- const TextureCopy& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& copySize) {
- Texture* texture = ToBackend(destination.texture.Get());
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
- const GLFormat& format = texture->GetGLFormat();
- GLenum target = texture->GetGLTarget();
- data = static_cast<const uint8_t*>(data) + dataLayout.offset;
- gl.ActiveTexture(GL_TEXTURE0);
- gl.BindTexture(target, texture->GetHandle());
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(destination.aspect).block;
-
- uint32_t x = destination.origin.x;
- uint32_t y = destination.origin.y;
- uint32_t z = destination.origin.z;
- if (texture->GetFormat().isCompressed) {
- size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
- Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
- uint32_t width = std::min(copySize.width, virtSize.width - x);
-
- // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
- // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
- // this limitation by copying the compressed texture data once per row.
- // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
- // Buffer Objects" for more details. For Desktop GL, we use row-by-row
- // copies only for uploads where bytesPerRow is not a multiple of byteSize.
- if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
- size_t imageSize =
- rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
-
- uint32_t height = std::min(copySize.height, virtSize.height - y);
-
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
-
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ // EndRenderPass should have been called
+ UNREACHABLE();
+}
+
+void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize) {
+ Texture* texture = ToBackend(destination.texture.Get());
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+ const GLFormat& format = texture->GetGLFormat();
+ GLenum target = texture->GetGLTarget();
+ data = static_cast<const uint8_t*>(data) + dataLayout.offset;
+ gl.ActiveTexture(GL_TEXTURE0);
+ gl.BindTexture(target, texture->GetHandle());
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(destination.aspect).block;
+
+ uint32_t x = destination.origin.x;
+ uint32_t y = destination.origin.y;
+ uint32_t z = destination.origin.z;
+ if (texture->GetFormat().isCompressed) {
+ size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
+ Extent3D virtSize = texture->GetMipLevelSingleSubresourceVirtualSize(destination.mipLevel);
+ uint32_t width = std::min(copySize.width, virtSize.width - x);
+
+ // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
+ // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
+ // this limitation by copying the compressed texture data once per row.
+ // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
+ // Buffer Objects" for more details. For Desktop GL, we use row-by-row
+ // copies only for uploads where bytesPerRow is not a multiple of byteSize.
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
+ size_t imageSize =
+ rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
+
+ uint32_t height = std::min(copySize.height, virtSize.height - y);
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
+
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+ format.internalFormat, imageSize, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.internalFormat,
+ imageSize, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+ } else {
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+
+ for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+ uint32_t height = std::min(blockInfo.height, virtSize.height - y);
gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
- format.internalFormat, imageSize, data);
- } else {
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
- dataLayout.rowsPerImage * blockInfo.height);
- gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
- copySize.depthOrArrayLayers, format.internalFormat,
- imageSize, data);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ format.internalFormat, rowSize, d);
+ d += dataLayout.bytesPerRow;
}
-
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
} else {
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
- for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+
+ for (y = destination.origin.y; y < destination.origin.y + copySize.height;
+ y += blockInfo.height) {
uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
- height, format.internalFormat, rowSize, d);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
+ height, 1, format.internalFormat, rowSize, d);
d += dataLayout.bytesPerRow;
}
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
-
- for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
-
- for (y = destination.origin.y; y < destination.origin.y + copySize.height;
- y += blockInfo.height) {
- uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
- height, 1, format.internalFormat, rowSize,
- d);
- d += dataLayout.bytesPerRow;
- }
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
}
}
+ }
+ } else {
+ uint32_t width = copySize.width;
+ uint32_t height = copySize.height;
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height, format.format,
+ format.type, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.format, format.type, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
} else {
- uint32_t width = copySize.width;
- uint32_t height = copySize.height;
- if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
- format.format, format.type, data);
- } else {
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
- dataLayout.rowsPerImage * blockInfo.height);
- gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
- copySize.depthOrArrayLayers, format.format, format.type, data);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+ for (; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1, format.format,
+ format.type, d);
+ d += dataLayout.bytesPerRow;
}
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
} else {
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
- for (; y < destination.origin.y + height; ++y) {
- gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+ for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
format.format, format.type, d);
d += dataLayout.bytesPerRow;
}
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
- for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
- for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
- gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
- format.format, format.type, d);
- d += dataLayout.bytesPerRow;
- }
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
}
}
}
}
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h
index 4b3da13263f..d2703772250 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h
@@ -18,32 +18,32 @@
#include "dawn/native/CommandBuffer.h"
namespace dawn::native {
- struct BeginRenderPassCmd;
+struct BeginRenderPassCmd;
} // namespace dawn::native
namespace dawn::native::opengl {
- class Device;
- struct OpenGLFunctions;
+class Device;
+struct OpenGLFunctions;
- class CommandBuffer final : public CommandBufferBase {
- public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+ public:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- MaybeError Execute();
+ MaybeError Execute();
- private:
- MaybeError ExecuteComputePass();
- MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
- };
+ private:
+ MaybeError ExecuteComputePass();
+ MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
+};
- // Like glTexSubImage*, the "data" argument is either a pointer to image data or
- // an offset if a PBO is bound.
- void DoTexSubImage(const OpenGLFunctions& gl,
- const TextureCopy& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& copySize);
+// Like glTexSubImage*, the "data" argument is either a pointer to image data or
+// an offset if a PBO is bound.
+void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize);
} // namespace dawn::native::opengl
#endif // SRC_DAWN_NATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp
index b53541156c8..35d2abda2f1 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp
@@ -18,28 +18,27 @@
namespace dawn::native::opengl {
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
-
- ComputePipeline::~ComputePipeline() = default;
-
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
- DeleteProgram(ToBackend(GetDevice())->gl);
- }
-
- MaybeError ComputePipeline::Initialize() {
- DAWN_TRY(
- InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
- return {};
- }
-
- void ComputePipeline::ApplyNow() {
- PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
- }
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+}
+
+ComputePipeline::~ComputePipeline() = default;
+
+void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
+ DeleteProgram(ToBackend(GetDevice())->gl);
+}
+
+MaybeError ComputePipeline::Initialize() {
+ DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+ return {};
+}
+
+void ComputePipeline::ApplyNow() {
+ PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h
index 00a3ded504b..b90bb08f03a 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h
@@ -23,23 +23,22 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
+class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(Device* device,
+ const ComputePipelineDescriptor* descriptor);
- void ApplyNow();
+ void ApplyNow();
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- private:
- using ComputePipelineBase::ComputePipelineBase;
- ~ComputePipeline() override;
- void DestroyImpl() override;
- };
+ private:
+ using ComputePipelineBase::ComputePipelineBase;
+ ~ComputePipeline() override;
+ void DestroyImpl() override;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp
index 6ca733d3b4e..274cc1aed53 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp
@@ -34,282 +34,303 @@
namespace dawn::native::opengl {
- // static
- ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
- const DeviceDescriptor* descriptor,
- const OpenGLFunctions& functions) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
- DAWN_TRY(device->Initialize(descriptor));
- return device;
- }
-
- Device::Device(AdapterBase* adapter,
- const DeviceDescriptor* descriptor,
- const OpenGLFunctions& functions)
- : DeviceBase(adapter, descriptor), gl(functions) {
- }
-
- Device::~Device() {
- Destroy();
- }
-
- MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
- InitTogglesFromDriver();
- mFormatTable = BuildGLFormatTable();
-
- return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
- }
-
- void Device::InitTogglesFromDriver() {
- bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
-
- bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
-
- // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
- bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
-
- bool supportsSnormRead =
- gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
-
- bool supportsDepthStencilRead =
- gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
-
- bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
- gl.IsGLExtensionSupported("GL_OES_sample_variables");
-
- // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
- // procs without the extension suffix.
- // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
-
- // supportsBaseVertex |=
- // (gl.IsAtLeastGLES(2, 0) &&
- // (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
- // gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
- // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
-
- // supportsBaseInstance |=
- // (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
- // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
-
- // TODO(crbug.com/dawn/343): Investigate emulation.
- SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
- SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
- SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
- SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
- SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
- SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
- SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
- // For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
- SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
- }
-
- const GLFormat& Device::GetGLFormat(const Format& format) {
- ASSERT(format.isSupported);
- ASSERT(format.GetIndex() < mFormatTable.size());
-
- const GLFormat& result = mFormatTable[format.GetIndex()];
- ASSERT(result.isSupportedOnBackend);
- return result;
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return AcquireRef(new Buffer(this, descriptor));
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(this, descriptor));
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return AcquireRef(new QuerySet(this, descriptor));
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(this, descriptor));
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return AcquireRef(new SwapChain(this, descriptor));
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return AcquireRef(new Texture(this, descriptor));
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
- }
-
- void Device::SubmitFenceSync() {
- GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
- IncrementLastSubmittedCommandSerial();
- mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
- }
-
- MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
- ::EGLImage image) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
- wgpu::TextureUsage::StorageBinding),
- "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
- wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
-
- return {};
- }
- TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
- ::EGLImage image) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
+// static
+ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
+ DAWN_TRY(device->Initialize(descriptor));
+ return device;
+}
+
+Device::Device(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions)
+ : DeviceBase(adapter, descriptor), gl(functions) {}
+
+Device::~Device() {
+ Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+ InitTogglesFromDriver();
+ mFormatTable = BuildGLFormatTable(GetBGRAInternalFormat());
+
+ return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
+
+void Device::InitTogglesFromDriver() {
+ bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
+
+ bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
+
+ // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
+ bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
+
+ bool supportsSnormRead =
+ gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
+
+ bool supportsDepthRead = gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth");
+
+ bool supportsStencilRead =
+ gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_stencil");
+
+ bool supportsDepthStencilRead =
+ gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
+
+ // Desktop GL supports BGRA textures via swizzling in the driver; ES requires an extension.
+ bool supportsBGRARead =
+ gl.GetVersion().IsDesktop() || gl.IsGLExtensionSupported("GL_EXT_read_format_bgra");
+
+ bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
+ gl.IsGLExtensionSupported("GL_OES_sample_variables");
+
+ // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
+ // procs without the extension suffix.
+ // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
+
+ // supportsBaseVertex |=
+ // (gl.IsAtLeastGLES(2, 0) &&
+ // (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
+ // gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
+
+ // supportsBaseInstance |=
+ // (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
+
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
+ SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
+ SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
+ SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
+ SetToggle(Toggle::DisableDepthRead, !supportsDepthRead);
+ SetToggle(Toggle::DisableStencilRead, !supportsStencilRead);
+ SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
+ SetToggle(Toggle::DisableBGRARead, !supportsBGRARead);
+ SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
+ SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
+ // For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
+ SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
+}
+
+const GLFormat& Device::GetGLFormat(const Format& format) {
+ ASSERT(format.isSupported);
+ ASSERT(format.GetIndex() < mFormatTable.size());
+
+ const GLFormat& result = mFormatTable[format.GetIndex()];
+ ASSERT(result.isSupportedOnBackend);
+ return result;
+}
+
+GLenum Device::GetBGRAInternalFormat() const {
+ if (gl.IsGLExtensionSupported("GL_EXT_texture_format_BGRA8888") ||
+ gl.IsGLExtensionSupported("GL_APPLE_texture_format_BGRA8888")) {
+ return GL_BGRA8_EXT;
+ } else {
+ // Desktop GL will swizzle to/from RGBA8 for BGRA formats.
+ return GL_RGBA8;
+ }
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
+ return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return AcquireRef(new Buffer(this, descriptor));
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(this, descriptor));
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ return ShaderModule::Create(this, descriptor, parseResult, compilationMessages);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new SwapChain(this, descriptor));
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return AcquireRef(new Texture(this, descriptor));
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+}
+
+void Device::SubmitFenceSync() {
+ GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+ IncrementLastSubmittedCommandSerial();
+ mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
+}
+
+MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+ ::EGLImage image) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+ descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ DAWN_INVALID_IF(descriptor->usage &
+ (wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding),
+ "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
+ wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
+
+ return {};
+}
+TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+ ::EGLImage image) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
+ }
+ if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
+ return nullptr;
+ }
+
+ GLuint tex;
+ gl.GenTextures(1, &tex);
+ gl.BindTexture(GL_TEXTURE_2D, tex);
+ gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+ GLint width, height, internalFormat;
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
+
+ if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
+ textureDescriptor->size.height != static_cast<uint32_t>(height) ||
+ textureDescriptor->size.depthOrArrayLayers != 1) {
+ ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
+ "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
+ width, height, &textureDescriptor->size));
+ gl.DeleteTextures(1, &tex);
+ return nullptr;
+ }
+
+ // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
+ // in the passed-in TextureDescriptor.
+ return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
+}
+
+MaybeError Device::TickImpl() {
+ return {};
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial fenceSerial{0};
+ while (!mFencesInFlight.empty()) {
+ auto [sync, tentativeSerial] = mFencesInFlight.front();
+
+ // Fence are added in order, so we can stop searching as soon
+ // as we see one that's not ready.
+
+ // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
+ if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
+ gl.Flush();
}
- if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
- return nullptr;
- }
-
- GLuint tex;
- gl.GenTextures(1, &tex);
- gl.BindTexture(GL_TEXTURE_2D, tex);
- gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
-
- GLint width, height, internalFormat;
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
-
- if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
- textureDescriptor->size.height != static_cast<uint32_t>(height) ||
- textureDescriptor->size.depthOrArrayLayers != 1) {
- ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
- "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
- width, height, &textureDescriptor->size));
- gl.DeleteTextures(1, &tex);
- return nullptr;
+ GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
+ if (result == GL_TIMEOUT_EXPIRED) {
+ return fenceSerial;
}
+ // Update fenceSerial since fence is ready.
+ fenceSerial = tentativeSerial;
- // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
- // in the passed-in TextureDescriptor.
- return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
- }
-
- MaybeError Device::TickImpl() {
- return {};
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial fenceSerial{0};
- while (!mFencesInFlight.empty()) {
- auto [sync, tentativeSerial] = mFencesInFlight.front();
+ gl.DeleteSync(sync);
- // Fence are added in order, so we can stop searching as soon
- // as we see one that's not ready.
+ mFencesInFlight.pop();
- // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
- if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
- gl.Flush();
- }
- GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
- if (result == GL_TIMEOUT_EXPIRED) {
- return fenceSerial;
- }
- // Update fenceSerial since fence is ready.
- fenceSerial = tentativeSerial;
-
- gl.DeleteSync(sync);
-
- mFencesInFlight.pop();
-
- ASSERT(fenceSerial > GetCompletedCommandSerial());
- }
- return fenceSerial;
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
}
+ return fenceSerial;
+}
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
- }
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
+}
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
- }
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
+}
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
- }
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
+}
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
- }
+void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+}
- MaybeError Device::WaitForIdleForDestruction() {
- gl.Finish();
- DAWN_TRY(CheckPassedSerials());
- ASSERT(mFencesInFlight.empty());
+MaybeError Device::WaitForIdleForDestruction() {
+ gl.Finish();
+ DAWN_TRY(CheckPassedSerials());
+ ASSERT(mFencesInFlight.empty());
- return {};
- }
+ return {};
+}
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+}
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+}
- float Device::GetTimestampPeriodInNS() const {
- return 1.0f;
- }
+float Device::GetTimestampPeriodInNS() const {
+ return 1.0f;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h
index 3c9cd01f882..d9158952022 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h
@@ -15,6 +15,10 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_DEVICEGL_H_
#define SRC_DAWN_NATIVE_OPENGL_DEVICEGL_H_
+#include <memory>
+#include <queue>
+#include <utility>
+
#include "dawn/native/dawn_platform.h"
#include "dawn/common/Platform.h"
@@ -24,107 +28,103 @@
#include "dawn/native/opengl/GLFormat.h"
#include "dawn/native/opengl/OpenGLFunctions.h"
-#include <queue>
-
// Remove windows.h macros after glad's include of windows.h
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "dawn/common/windows_with_undefs.h"
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include "dawn/common/windows_with_undefs.h"
#endif
typedef void* EGLImage;
namespace dawn::native::opengl {
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
- const DeviceDescriptor* descriptor,
- const OpenGLFunctions& functions);
- ~Device() override;
-
- MaybeError Initialize(const DeviceDescriptor* descriptor);
-
- // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
- const OpenGLFunctions gl;
-
- const GLFormat& GetGLFormat(const Format& format);
-
- void SubmitFenceSync();
-
- MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
- ::EGLImage image);
- TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
- ::EGLImage image);
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
-
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- Device(AdapterBase* adapter,
- const DeviceDescriptor* descriptor,
- const OpenGLFunctions& functions);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
-
- void InitTogglesFromDriver();
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
-
- GLFormatTable mFormatTable;
- };
+class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions);
+ ~Device() override;
+
+ MaybeError Initialize(const DeviceDescriptor* descriptor);
+
+ // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
+ const OpenGLFunctions gl;
+
+ const GLFormat& GetGLFormat(const Format& format);
+
+ void SubmitFenceSync();
+
+ MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor, ::EGLImage image);
+ TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+ ::EGLImage image);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ Device(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+
+ void InitTogglesFromDriver();
+ GLenum GetBGRAInternalFormat() const;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
+
+ GLFormatTable mFormatTable;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h b/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h
index 2ebd2fbb9e7..1ba0409060d 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h
@@ -19,47 +19,47 @@
namespace dawn::native::opengl {
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class PersistentPipelineState;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class SwapChain;
- class Texture;
- class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class PersistentPipelineState;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class SwapChain;
+class Texture;
+class TextureView;
- struct OpenGLBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
+struct OpenGLBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+};
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
- return ToBackendBase<OpenGLBackendTraits>(common);
- }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
+ return ToBackendBase<OpenGLBackendTraits>(common);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp
index dac02a6ad08..6fbca797284 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp
@@ -16,32 +16,32 @@
namespace dawn::native::opengl {
- GLFormatTable BuildGLFormatTable() {
- GLFormatTable table;
+GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA) {
+ GLFormatTable table;
- using Type = GLFormat::ComponentType;
+ using Type = GLFormat::ComponentType;
- auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
- GLenum format, GLenum type, Type componentType) {
- FormatIndex index = ComputeFormatIndex(dawnFormat);
- ASSERT(index < table.size());
+ auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat, GLenum format,
+ GLenum type, Type componentType) {
+ FormatIndex index = ComputeFormatIndex(dawnFormat);
+ ASSERT(index < table.size());
- table[index].internalFormat = internalFormat;
- table[index].format = format;
- table[index].type = type;
- table[index].componentType = componentType;
- table[index].isSupportedOnBackend = true;
- };
+ table[index].internalFormat = internalFormat;
+ table[index].format = format;
+ table[index].type = type;
+ table[index].componentType = componentType;
+ table[index].isSupportedOnBackend = true;
+ };
- // It's dangerous to go alone, take this:
- //
- // [ANGLE's formatutils.cpp]
- // [ANGLE's formatutilsgl.cpp]
- //
- // The format tables in these files are extremely complete and the best reference on GL
- // format support, enums, etc.
+ // It's dangerous to go alone, take this:
+ //
+ // [ANGLE's formatutils.cpp]
+ // [ANGLE's formatutilsgl.cpp]
+ //
+ // The format tables in these files are extremely complete and the best reference on GL
+ // format support, enums, etc.
- // clang-format off
+ // clang-format off
// 1 byte color formats
AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
@@ -71,8 +71,7 @@ namespace dawn::native::opengl {
AddFormat(wgpu::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
AddFormat(wgpu::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
- // This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
- AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BGRA8Unorm, internalFormatForBGRA, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
AddFormat(wgpu::TextureFormat::RG11B10Ufloat, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
AddFormat(wgpu::TextureFormat::RGB9E5Ufloat, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, Type::Float);
@@ -114,9 +113,9 @@ namespace dawn::native::opengl {
AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- // clang-format on
+ // clang-format on
- return table;
- }
+ return table;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h
index 292fb4a6d3e..6d1d81e58e8 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h
@@ -20,22 +20,20 @@
namespace dawn::native::opengl {
- class Device;
-
- struct GLFormat {
- GLenum internalFormat = 0;
- GLenum format = 0;
- GLenum type = 0;
- bool isSupportedOnBackend = false;
-
- // OpenGL has different functions depending on the format component type, for example
- // glClearBufferfv is only valid on formats with the Float ComponentType
- enum ComponentType { Float, Int, Uint, DepthStencil };
- ComponentType componentType;
- };
-
- using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
- GLFormatTable BuildGLFormatTable();
+struct GLFormat {
+ GLenum internalFormat = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ bool isSupportedOnBackend = false;
+
+ // OpenGL has different functions depending on the format component type, for example
+ // glClearBufferfv is only valid on formats with the Float ComponentType
+ enum ComponentType { Float, Int, Uint, DepthStencil };
+ ComponentType componentType;
+};
+
+using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
+GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA);
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
index b01e7e3b4f0..409acf1ed7c 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
@@ -18,71 +18,69 @@
namespace dawn::native::opengl {
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
- PresentCallback present,
- void* presentUserdata)
- : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
- }
-
- NativeSwapChainImpl::~NativeSwapChainImpl() {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.DeleteTextures(1, &mBackTexture);
- gl.DeleteFramebuffers(1, &mBackFBO);
- }
-
- void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.GenTextures(1, &mBackTexture);
- gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
- gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
-
- gl.GenFramebuffers(1, &mBackFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
- mBackTexture, 0);
- }
-
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- if (format != WGPUTextureFormat_RGBA8Unorm) {
- return "unsupported format";
- }
- ASSERT(width > 0);
- ASSERT(height > 0);
- mWidth = width;
- mHeight = height;
-
- const OpenGLFunctions& gl = mDevice->gl;
- gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
- // Reallocate the texture
- gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
- nullptr);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- nextTexture->texture.u32 = mBackTexture;
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::Present() {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
- gl.Scissor(0, 0, mWidth, mHeight);
- gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
- GL_NEAREST);
-
- mPresentCallback(mPresentUserdata);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
+ PresentCallback present,
+ void* presentUserdata)
+ : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {}
+
+NativeSwapChainImpl::~NativeSwapChainImpl() {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.DeleteTextures(1, &mBackTexture);
+ gl.DeleteFramebuffers(1, &mBackFBO);
+}
+
+void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.GenTextures(1, &mBackTexture);
+ gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+ gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+
+ gl.GenFramebuffers(1, &mBackFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mBackTexture,
+ 0);
+}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ if (format != WGPUTextureFormat_RGBA8Unorm) {
+ return "unsupported format";
}
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+ mWidth = width;
+ mHeight = height;
+
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+ // Reallocate the texture
+ gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ nextTexture->texture.u32 = mBackTexture;
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::Present() {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+ gl.Scissor(0, 0, mWidth, mHeight);
+ gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+
+ mPresentCallback(mPresentUserdata);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h
index 6d52074cfef..1a2013e1ed9 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h
@@ -22,36 +22,36 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextGL;
+class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextGL;
- NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
- ~NativeSwapChainImpl();
+ NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
+ ~NativeSwapChainImpl();
- void Init(DawnWSIContextGL* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
+ void Init(DawnWSIContextGL* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
- wgpu::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
- private:
- PresentCallback mPresentCallback;
- void* mPresentUserdata;
+ private:
+ PresentCallback mPresentCallback;
+ void* mPresentUserdata;
- uint32_t mWidth = 0;
- uint32_t mHeight = 0;
- GLuint mBackFBO = 0;
- GLuint mBackTexture = 0;
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ GLuint mBackFBO = 0;
+ GLuint mBackTexture = 0;
- Device* mDevice = nullptr;
- };
+ Device* mDevice = nullptr;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp
index 739de6258db..c77c1d0f794 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp
@@ -23,43 +23,39 @@
namespace dawn::native::opengl {
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
- }
-
- AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
- }
-
- DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
- PresentCallback present,
- void* presentUserdata) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(
- new NativeSwapChainImpl(backendDevice, present, presentUserdata));
- impl.textureUsage = WGPUTextureUsage_Present;
-
- return impl;
- }
-
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
-
- ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
- : ExternalImageDescriptor(ExternalImageType::EGLImage) {
- }
-
- WGPUTexture WrapExternalEGLImage(WGPUDevice device,
- const ExternalImageDescriptorEGLImage* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
- TextureBase* texture =
- backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
- return ToAPI(texture);
- }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {}
+
+AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {}
+
+DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+ PresentCallback present,
+ void* presentUserdata) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(
+ new NativeSwapChainImpl(backendDevice, present, presentUserdata));
+ impl.textureUsage = WGPUTextureUsage_Present;
+
+ return impl;
+}
+
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
+
+ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
+ : ExternalImageDescriptor(ExternalImageType::EGLImage) {}
+
+WGPUTexture WrapExternalEGLImage(WGPUDevice device,
+ const ExternalImageDescriptorEGLImage* descriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ TextureBase* texture =
+ backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
+ return ToAPI(texture);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp
index 45f8354a2fd..ccd07202c2d 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp
@@ -18,44 +18,44 @@
namespace dawn::native::opengl {
- MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
- DAWN_TRY(mVersion.Initialize(getProc));
- if (mVersion.IsES()) {
- DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
- } else {
- DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
- }
+MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
+ DAWN_TRY(mVersion.Initialize(getProc));
+ if (mVersion.IsES()) {
+ DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+ } else {
+ DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+ }
- InitializeSupportedGLExtensions();
+ InitializeSupportedGLExtensions();
- return {};
- }
+ return {};
+}
- void OpenGLFunctions::InitializeSupportedGLExtensions() {
- int32_t numExtensions;
- GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
+void OpenGLFunctions::InitializeSupportedGLExtensions() {
+ int32_t numExtensions;
+ GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
- for (int32_t i = 0; i < numExtensions; ++i) {
- const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
- mSupportedGLExtensionsSet.insert(extensionName);
- }
+ for (int32_t i = 0; i < numExtensions; ++i) {
+ const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
+ mSupportedGLExtensionsSet.insert(extensionName);
}
+}
- bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
- ASSERT(extension != nullptr);
- return mSupportedGLExtensionsSet.count(extension) != 0;
- }
+bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
+ ASSERT(extension != nullptr);
+ return mSupportedGLExtensionsSet.count(extension) != 0;
+}
- const OpenGLVersion& OpenGLFunctions::GetVersion() const {
- return mVersion;
- }
+const OpenGLVersion& OpenGLFunctions::GetVersion() const {
+ return mVersion;
+}
- bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
- return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
- }
+bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
+ return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
+}
- bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
- return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
- }
+bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
+ return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h
index 128e5ec2131..4ab6e09644c 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h
@@ -15,6 +15,7 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_OPENGLFUNCTIONS_H_
#define SRC_DAWN_NATIVE_OPENGL_OPENGLFUNCTIONS_H_
+#include <string>
#include <unordered_set>
#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
@@ -22,23 +23,23 @@
namespace dawn::native::opengl {
- struct OpenGLFunctions : OpenGLFunctionsBase {
- public:
- MaybeError Initialize(GetProcAddress getProc);
+struct OpenGLFunctions : OpenGLFunctionsBase {
+ public:
+ MaybeError Initialize(GetProcAddress getProc);
- const OpenGLVersion& GetVersion() const;
- bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
- bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
+ const OpenGLVersion& GetVersion() const;
+ bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
+ bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
- bool IsGLExtensionSupported(const char* extension) const;
+ bool IsGLExtensionSupported(const char* extension) const;
- private:
- void InitializeSupportedGLExtensions();
+ private:
+ void InitializeSupportedGLExtensions();
- OpenGLVersion mVersion;
+ OpenGLVersion mVersion;
- std::unordered_set<std::string> mSupportedGLExtensionsSet;
- };
+ std::unordered_set<std::string> mSupportedGLExtensionsSet;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp
index 60fffff2a65..297b5fb7b60 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp
@@ -15,62 +15,63 @@
#include "dawn/native/opengl/OpenGLVersion.h"
#include <cctype>
+#include <string>
#include <tuple>
namespace dawn::native::opengl {
- MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
- PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
- if (getString == nullptr) {
- return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
- }
-
- std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
+MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
+ PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
+ if (getString == nullptr) {
+ return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
+ }
- if (version.find("OpenGL ES") != std::string::npos) {
- // ES spec states that the GL_VERSION string will be in the following format:
- // "OpenGL ES N.M vendor-specific information"
- mStandard = Standard::ES;
- mMajorVersion = version[10] - '0';
- mMinorVersion = version[12] - '0';
+ std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
- // The minor version shouldn't get to two digits.
- ASSERT(version.size() <= 13 || !isdigit(version[13]));
- } else {
- // OpenGL spec states the GL_VERSION string will be in the following format:
- // <version number><space><vendor-specific information>
- // The version number is either of the form major number.minor number or major
- // number.minor number.release number, where the numbers all have one or more
- // digits
- mStandard = Standard::Desktop;
- mMajorVersion = version[0] - '0';
- mMinorVersion = version[2] - '0';
+ if (version.find("OpenGL ES") != std::string::npos) {
+ // ES spec states that the GL_VERSION string will be in the following format:
+ // "OpenGL ES N.M vendor-specific information"
+ mStandard = Standard::ES;
+ mMajorVersion = version[10] - '0';
+ mMinorVersion = version[12] - '0';
- // The minor version shouldn't get to two digits.
- ASSERT(version.size() <= 3 || !isdigit(version[3]));
- }
+ // The minor version shouldn't get to two digits.
+ ASSERT(version.size() <= 13 || !isdigit(version[13]));
+ } else {
+ // OpenGL spec states the GL_VERSION string will be in the following format:
+ // <version number><space><vendor-specific information>
+ // The version number is either of the form major number.minor number or major
+ // number.minor number.release number, where the numbers all have one or more
+ // digits
+ mStandard = Standard::Desktop;
+ mMajorVersion = version[0] - '0';
+ mMinorVersion = version[2] - '0';
- return {};
+ // The minor version shouldn't get to two digits.
+ ASSERT(version.size() <= 3 || !isdigit(version[3]));
}
- bool OpenGLVersion::IsDesktop() const {
- return mStandard == Standard::Desktop;
- }
+ return {};
+}
- bool OpenGLVersion::IsES() const {
- return mStandard == Standard::ES;
- }
+bool OpenGLVersion::IsDesktop() const {
+ return mStandard == Standard::Desktop;
+}
- uint32_t OpenGLVersion::GetMajor() const {
- return mMajorVersion;
- }
+bool OpenGLVersion::IsES() const {
+ return mStandard == Standard::ES;
+}
- uint32_t OpenGLVersion::GetMinor() const {
- return mMinorVersion;
- }
+uint32_t OpenGLVersion::GetMajor() const {
+ return mMajorVersion;
+}
- bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
- return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
- }
+uint32_t OpenGLVersion::GetMinor() const {
+ return mMinorVersion;
+}
+
+bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
+ return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h
index d575ba8ae39..a9a296fb1fe 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h
@@ -19,25 +19,25 @@
namespace dawn::native::opengl {
- struct OpenGLVersion {
- public:
- MaybeError Initialize(GetProcAddress getProc);
-
- bool IsDesktop() const;
- bool IsES() const;
- uint32_t GetMajor() const;
- uint32_t GetMinor() const;
- bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
-
- private:
- enum class Standard {
- Desktop,
- ES,
- };
- uint32_t mMajorVersion;
- uint32_t mMinorVersion;
- Standard mStandard;
+struct OpenGLVersion {
+ public:
+ MaybeError Initialize(GetProcAddress getProc);
+
+ bool IsDesktop() const;
+ bool IsES() const;
+ uint32_t GetMajor() const;
+ uint32_t GetMinor() const;
+ bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
+
+ private:
+ enum class Standard {
+ Desktop,
+ ES,
};
+ uint32_t mMajorVersion;
+ uint32_t mMinorVersion;
+ Standard mStandard;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
index 446ab1adf46..8c1689743cf 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
@@ -18,41 +18,41 @@
namespace dawn::native::opengl {
- void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
- CallGLStencilFunc(gl);
+void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
+ CallGLStencilFunc(gl);
+}
+
+void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+ GLenum stencilBackCompareFunction,
+ GLenum stencilFrontCompareFunction,
+ uint32_t stencilReadMask) {
+ if (mStencilBackCompareFunction == stencilBackCompareFunction &&
+ mStencilFrontCompareFunction == stencilFrontCompareFunction &&
+ mStencilReadMask == stencilReadMask) {
+ return;
}
- void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
- GLenum stencilBackCompareFunction,
- GLenum stencilFrontCompareFunction,
- uint32_t stencilReadMask) {
- if (mStencilBackCompareFunction == stencilBackCompareFunction &&
- mStencilFrontCompareFunction == stencilFrontCompareFunction &&
- mStencilReadMask == stencilReadMask) {
- return;
- }
-
- mStencilBackCompareFunction = stencilBackCompareFunction;
- mStencilFrontCompareFunction = stencilFrontCompareFunction;
- mStencilReadMask = stencilReadMask;
- CallGLStencilFunc(gl);
- }
-
- void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
- uint32_t stencilReference) {
- if (mStencilReference == stencilReference) {
- return;
- }
+ mStencilBackCompareFunction = stencilBackCompareFunction;
+ mStencilFrontCompareFunction = stencilFrontCompareFunction;
+ mStencilReadMask = stencilReadMask;
+ CallGLStencilFunc(gl);
+}
- mStencilReference = stencilReference;
- CallGLStencilFunc(gl);
+void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
+ uint32_t stencilReference) {
+ if (mStencilReference == stencilReference) {
+ return;
}
- void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
- gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
- mStencilReadMask);
- gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
- mStencilReadMask);
- }
+ mStencilReference = stencilReference;
+ CallGLStencilFunc(gl);
+}
+
+void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
+ gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
+ mStencilReadMask);
+ gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
+ mStencilReadMask);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h
index fdfe293b570..8dec4b58df8 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h
@@ -20,25 +20,25 @@
namespace dawn::native::opengl {
- struct OpenGLFunctions;
-
- class PersistentPipelineState {
- public:
- void SetDefaultState(const OpenGLFunctions& gl);
- void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
- GLenum stencilBackCompareFunction,
- GLenum stencilFrontCompareFunction,
- uint32_t stencilReadMask);
- void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
-
- private:
- void CallGLStencilFunc(const OpenGLFunctions& gl);
-
- GLenum mStencilBackCompareFunction = GL_ALWAYS;
- GLenum mStencilFrontCompareFunction = GL_ALWAYS;
- GLuint mStencilReadMask = 0xffffffff;
- GLuint mStencilReference = 0;
- };
+struct OpenGLFunctions;
+
+class PersistentPipelineState {
+ public:
+ void SetDefaultState(const OpenGLFunctions& gl);
+ void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+ GLenum stencilBackCompareFunction,
+ GLenum stencilFrontCompareFunction,
+ uint32_t stencilReadMask);
+ void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
+
+ private:
+ void CallGLStencilFunc(const OpenGLFunctions& gl);
+
+ GLenum mStencilBackCompareFunction = GL_ALWAYS;
+ GLenum mStencilFrontCompareFunction = GL_ALWAYS;
+ GLuint mStencilReadMask = 0xffffffff;
+ GLuint mStencilReference = 0;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp
index 8890e6826c8..2ddabce6654 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/opengl/PipelineGL.h"
+#include <set>
+#include <sstream>
+#include <string>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/Device.h"
@@ -24,195 +28,190 @@
#include "dawn/native/opengl/SamplerGL.h"
#include "dawn/native/opengl/ShaderModuleGL.h"
-#include <set>
-#include <sstream>
-
namespace dawn::native::opengl {
- namespace {
+namespace {
- GLenum GLShaderType(SingleShaderStage stage) {
- switch (stage) {
- case SingleShaderStage::Vertex:
- return GL_VERTEX_SHADER;
- case SingleShaderStage::Fragment:
- return GL_FRAGMENT_SHADER;
- case SingleShaderStage::Compute:
- return GL_COMPUTE_SHADER;
- }
- UNREACHABLE();
- }
-
- } // namespace
-
- PipelineGL::PipelineGL() : mProgram(0) {
+GLenum GLShaderType(SingleShaderStage stage) {
+ switch (stage) {
+ case SingleShaderStage::Vertex:
+ return GL_VERTEX_SHADER;
+ case SingleShaderStage::Fragment:
+ return GL_FRAGMENT_SHADER;
+ case SingleShaderStage::Compute:
+ return GL_COMPUTE_SHADER;
}
+ UNREACHABLE();
+}
- PipelineGL::~PipelineGL() = default;
-
- MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
- const PipelineLayout* layout,
- const PerStage<ProgrammableStage>& stages) {
- auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
- const char* source) -> ResultOrError<GLuint> {
- GLuint shader = gl.CreateShader(type);
- gl.ShaderSource(shader, 1, &source, nullptr);
- gl.CompileShader(shader);
-
- GLint compileStatus = GL_FALSE;
- gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
- if (compileStatus == GL_FALSE) {
- GLint infoLogLength = 0;
- gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
-
- if (infoLogLength > 1) {
- std::vector<char> buffer(infoLogLength);
- gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
- return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
- source, buffer.data());
- }
- }
- return shader;
- };
+} // namespace
- mProgram = gl.CreateProgram();
+PipelineGL::PipelineGL() : mProgram(0) {}
- // Compute the set of active stages.
- wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- if (stages[stage].module != nullptr) {
- activeStages |= StageBit(stage);
- }
- }
-
- // Create an OpenGL shader for each stage and gather the list of combined samplers.
- PerStage<CombinedSamplerInfo> combinedSamplers;
- bool needsDummySampler = false;
- std::vector<GLuint> glShaders;
- for (SingleShaderStage stage : IterateStages(activeStages)) {
- const ShaderModule* module = ToBackend(stages[stage].module.Get());
- std::string glsl;
- DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
- &combinedSamplers[stage], layout,
- &needsDummySampler));
- GLuint shader;
- DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
- gl.AttachShader(mProgram, shader);
- glShaders.push_back(shader);
- }
-
- if (needsDummySampler) {
- SamplerDescriptor desc = {};
- ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
- ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
- ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
- mDummySampler =
- ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
- }
+PipelineGL::~PipelineGL() = default;
- // Link all the shaders together.
- gl.LinkProgram(mProgram);
+MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
+ const PipelineLayout* layout,
+ const PerStage<ProgrammableStage>& stages) {
+ auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
+ const char* source) -> ResultOrError<GLuint> {
+ GLuint shader = gl.CreateShader(type);
+ gl.ShaderSource(shader, 1, &source, nullptr);
+ gl.CompileShader(shader);
- GLint linkStatus = GL_FALSE;
- gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
- if (linkStatus == GL_FALSE) {
+ GLint compileStatus = GL_FALSE;
+ gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
+ if (compileStatus == GL_FALSE) {
GLint infoLogLength = 0;
- gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
+ gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
- gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
- return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+ gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
+ return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s", source,
+ buffer.data());
}
}
+ return shader;
+ };
- // Compute links between stages for combined samplers, then bind them to texture units
- gl.UseProgram(mProgram);
- const auto& indices = layout->GetBindingIndexInfo();
+ mProgram = gl.CreateProgram();
- std::set<CombinedSampler> combinedSamplersSet;
- for (SingleShaderStage stage : IterateStages(activeStages)) {
- for (const CombinedSampler& combined : combinedSamplers[stage]) {
- combinedSamplersSet.insert(combined);
- }
+ // Compute the set of active stages.
+ wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ if (stages[stage].module != nullptr) {
+ activeStages |= StageBit(stage);
}
+ }
- mUnitsForSamplers.resize(layout->GetNumSamplers());
- mUnitsForTextures.resize(layout->GetNumSampledTextures());
-
- GLuint textureUnit = layout->GetTextureUnitsUsed();
- for (const auto& combined : combinedSamplersSet) {
- const std::string& name = combined.GetName();
- GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+ // Create an OpenGL shader for each stage and gather the list of combined samplers.
+ PerStage<CombinedSamplerInfo> combinedSamplers;
+ bool needsPlaceholderSampler = false;
+ std::vector<GLuint> glShaders;
+ for (SingleShaderStage stage : IterateStages(activeStages)) {
+ const ShaderModule* module = ToBackend(stages[stage].module.Get());
+ std::string glsl;
+ DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
+ &combinedSamplers[stage], layout,
+ &needsPlaceholderSampler));
+ GLuint shader;
+ DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
+ gl.AttachShader(mProgram, shader);
+ glShaders.push_back(shader);
+ }
- if (location == -1) {
- continue;
- }
+ if (needsPlaceholderSampler) {
+ SamplerDescriptor desc = {};
+ ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
+ ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
+ ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
+ mPlaceholderSampler =
+ ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
+ }
- gl.Uniform1i(location, textureUnit);
+ // Link all the shaders together.
+ gl.LinkProgram(mProgram);
- bool shouldUseFiltering;
- {
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.textureLocation.group);
- BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
+ GLint linkStatus = GL_FALSE;
+ gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
+ if (linkStatus == GL_FALSE) {
+ GLint infoLogLength = 0;
+ gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
- GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
- mUnitsForTextures[textureIndex].push_back(textureUnit);
+ if (infoLogLength > 1) {
+ std::vector<char> buffer(infoLogLength);
+ gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
+ return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+ }
+ }
- shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
- wgpu::TextureSampleType::Float;
- }
- {
- if (combined.useDummySampler) {
- mDummySamplerUnits.push_back(textureUnit);
- } else {
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.samplerLocation.group);
- BindingIndex bindingIndex =
- bgl->GetBindingIndex(combined.samplerLocation.binding);
-
- GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
- mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
- }
- }
+ // Compute links between stages for combined samplers, then bind them to texture units
+ gl.UseProgram(mProgram);
+ const auto& indices = layout->GetBindingIndexInfo();
- textureUnit++;
+ std::set<CombinedSampler> combinedSamplersSet;
+ for (SingleShaderStage stage : IterateStages(activeStages)) {
+ for (const CombinedSampler& combined : combinedSamplers[stage]) {
+ combinedSamplersSet.insert(combined);
}
+ }
+
+ mUnitsForSamplers.resize(layout->GetNumSamplers());
+ mUnitsForTextures.resize(layout->GetNumSampledTextures());
+
+ GLuint textureUnit = layout->GetTextureUnitsUsed();
+ for (const auto& combined : combinedSamplersSet) {
+ const std::string& name = combined.GetName();
+ GLint location = gl.GetUniformLocation(mProgram, name.c_str());
- for (GLuint glShader : glShaders) {
- gl.DetachShader(mProgram, glShader);
- gl.DeleteShader(glShader);
+ if (location == -1) {
+ continue;
}
- return {};
- }
+ gl.Uniform1i(location, textureUnit);
- void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
- gl.DeleteProgram(mProgram);
- }
+ bool shouldUseFiltering;
+ {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.textureLocation.group);
+ BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
- const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
- GLuint index) const {
- ASSERT(index < mUnitsForSamplers.size());
- return mUnitsForSamplers[index];
- }
+ GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+ mUnitsForTextures[textureIndex].push_back(textureUnit);
+
+ shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
+ wgpu::TextureSampleType::Float;
+ }
+ {
+ if (combined.usePlaceholderSampler) {
+ mPlaceholderSamplerUnits.push_back(textureUnit);
+ } else {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.samplerLocation.group);
+ BindingIndex bindingIndex = bgl->GetBindingIndex(combined.samplerLocation.binding);
- const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
- ASSERT(index < mUnitsForTextures.size());
- return mUnitsForTextures[index];
+ GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+ mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
+ }
+ }
+
+ textureUnit++;
}
- GLuint PipelineGL::GetProgramHandle() const {
- return mProgram;
+ for (GLuint glShader : glShaders) {
+ gl.DetachShader(mProgram, glShader);
+ gl.DeleteShader(glShader);
}
- void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
- gl.UseProgram(mProgram);
- for (GLuint unit : mDummySamplerUnits) {
- ASSERT(mDummySampler.Get() != nullptr);
- gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle());
- }
+ return {};
+}
+
+void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
+ gl.DeleteProgram(mProgram);
+}
+
+const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
+ GLuint index) const {
+ ASSERT(index < mUnitsForSamplers.size());
+ return mUnitsForSamplers[index];
+}
+
+const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
+ ASSERT(index < mUnitsForTextures.size());
+ return mUnitsForTextures[index];
+}
+
+GLuint PipelineGL::GetProgramHandle() const {
+ return mProgram;
+}
+
+void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
+ gl.UseProgram(mProgram);
+ for (GLuint unit : mPlaceholderSamplerUnits) {
+ ASSERT(mPlaceholderSampler.Get() != nullptr);
+ gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
}
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h
index da6fa281a6e..c838bc79fdb 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h
@@ -15,54 +15,54 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_PIPELINEGL_H_
#define SRC_DAWN_NATIVE_OPENGL_PIPELINEGL_H_
+#include <vector>
+
#include "dawn/native/Pipeline.h"
#include "dawn/native/PerStage.h"
#include "dawn/native/opengl/opengl_platform.h"
-#include <vector>
-
namespace dawn::native {
- struct ProgrammableStage;
+struct ProgrammableStage;
} // namespace dawn::native
namespace dawn::native::opengl {
- struct OpenGLFunctions;
- class PipelineLayout;
- class Sampler;
+struct OpenGLFunctions;
+class PipelineLayout;
+class Sampler;
- class PipelineGL {
- public:
- PipelineGL();
- ~PipelineGL();
+class PipelineGL {
+ public:
+ PipelineGL();
+ ~PipelineGL();
- // For each unit a sampler is bound to we need to know if we should use filtering or not
- // because int and uint texture are only complete without filtering.
- struct SamplerUnit {
- GLuint unit;
- bool shouldUseFiltering;
- };
- const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
- const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
- GLuint GetProgramHandle() const;
+ // For each unit a sampler is bound to we need to know if we should use filtering or not
+ // because int and uint texture are only complete without filtering.
+ struct SamplerUnit {
+ GLuint unit;
+ bool shouldUseFiltering;
+ };
+ const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
+ const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
+ GLuint GetProgramHandle() const;
- protected:
- void ApplyNow(const OpenGLFunctions& gl);
- MaybeError InitializeBase(const OpenGLFunctions& gl,
- const PipelineLayout* layout,
- const PerStage<ProgrammableStage>& stages);
- void DeleteProgram(const OpenGLFunctions& gl);
+ protected:
+ void ApplyNow(const OpenGLFunctions& gl);
+ MaybeError InitializeBase(const OpenGLFunctions& gl,
+ const PipelineLayout* layout,
+ const PerStage<ProgrammableStage>& stages);
+ void DeleteProgram(const OpenGLFunctions& gl);
- private:
- GLuint mProgram;
- std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
- std::vector<std::vector<GLuint>> mUnitsForTextures;
- std::vector<GLuint> mDummySamplerUnits;
- // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
- // destruction complex as it requires the sampler to be destroyed before the sampler cache.
- Ref<Sampler> mDummySampler;
- };
+ private:
+ GLuint mProgram;
+ std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
+ std::vector<std::vector<GLuint>> mUnitsForTextures;
+ std::vector<GLuint> mPlaceholderSamplerUnits;
+ // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
+ // destruction complex as it requires the sampler to be destroyed before the sampler cache.
+ Ref<Sampler> mPlaceholderSampler;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp
index 7dd54ab473e..c2d793d7b8f 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp
@@ -20,76 +20,75 @@
namespace dawn::native::opengl {
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
- GLuint uboIndex = 0;
- GLuint samplerIndex = 0;
- GLuint sampledTextureIndex = 0;
- GLuint ssboIndex = 0;
- GLuint storageTextureIndex = 0;
+PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor) {
+ GLuint uboIndex = 0;
+ GLuint samplerIndex = 0;
+ GLuint sampledTextureIndex = 0;
+ GLuint ssboIndex = 0;
+ GLuint storageTextureIndex = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
- mIndexInfo[group].resize(bgl->GetBindingCount());
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+ mIndexInfo[group].resize(bgl->GetBindingCount());
- for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- mIndexInfo[group][bindingIndex] = uboIndex;
- uboIndex++;
- break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- mIndexInfo[group][bindingIndex] = ssboIndex;
- ssboIndex++;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
+ for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ mIndexInfo[group][bindingIndex] = uboIndex;
+ uboIndex++;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ mIndexInfo[group][bindingIndex] = ssboIndex;
+ ssboIndex++;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ break;
- case BindingInfoType::Sampler:
- mIndexInfo[group][bindingIndex] = samplerIndex;
- samplerIndex++;
- break;
+ case BindingInfoType::Sampler:
+ mIndexInfo[group][bindingIndex] = samplerIndex;
+ samplerIndex++;
+ break;
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
- mIndexInfo[group][bindingIndex] = sampledTextureIndex;
- sampledTextureIndex++;
- break;
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ mIndexInfo[group][bindingIndex] = sampledTextureIndex;
+ sampledTextureIndex++;
+ break;
- case BindingInfoType::StorageTexture:
- mIndexInfo[group][bindingIndex] = storageTextureIndex;
- storageTextureIndex++;
- break;
- }
+ case BindingInfoType::StorageTexture:
+ mIndexInfo[group][bindingIndex] = storageTextureIndex;
+ storageTextureIndex++;
+ break;
}
}
-
- mNumSamplers = samplerIndex;
- mNumSampledTextures = sampledTextureIndex;
}
- const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
- return mIndexInfo;
- }
+ mNumSamplers = samplerIndex;
+ mNumSampledTextures = sampledTextureIndex;
+}
- GLuint PipelineLayout::GetTextureUnitsUsed() const {
- return 0;
- }
+const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
+ return mIndexInfo;
+}
- size_t PipelineLayout::GetNumSamplers() const {
- return mNumSamplers;
- }
+GLuint PipelineLayout::GetTextureUnitsUsed() const {
+ return 0;
+}
- size_t PipelineLayout::GetNumSampledTextures() const {
- return mNumSampledTextures;
- }
+size_t PipelineLayout::GetNumSamplers() const {
+ return mNumSamplers;
+}
+
+size_t PipelineLayout::GetNumSampledTextures() const {
+ return mNumSampledTextures;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h
index a315221ba77..a278e2c3c23 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h
@@ -24,26 +24,26 @@
namespace dawn::native::opengl {
- class Device;
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
-
- using BindingIndexInfo =
- ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
- const BindingIndexInfo& GetBindingIndexInfo() const;
-
- GLuint GetTextureUnitsUsed() const;
- size_t GetNumSamplers() const;
- size_t GetNumSampledTextures() const;
-
- private:
- ~PipelineLayout() override = default;
- BindingIndexInfo mIndexInfo;
- size_t mNumSamplers;
- size_t mNumSampledTextures;
- };
+class Device;
+
+class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+
+ using BindingIndexInfo =
+ ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
+ const BindingIndexInfo& GetBindingIndexInfo() const;
+
+ GLuint GetTextureUnitsUsed() const;
+ size_t GetNumSamplers() const;
+ size_t GetNumSampledTextures() const;
+
+ private:
+ ~PipelineLayout() override = default;
+ BindingIndexInfo mIndexInfo;
+ size_t mNumSamplers;
+ size_t mNumSampledTextures;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp
index cdf98580cd6..dc8424c3732 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp
@@ -18,10 +18,9 @@
namespace dawn::native::opengl {
- QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
- : QuerySetBase(device, descriptor) {
- }
+QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {}
- QuerySet::~QuerySet() = default;
+QuerySet::~QuerySet() = default;
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h
index 7e2165451e3..1121113fb99 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h
@@ -19,15 +19,15 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class QuerySet final : public QuerySetBase {
- public:
- QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
- private:
- ~QuerySet() override;
- };
+ private:
+ ~QuerySet() override;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp
index 44a77a29b4e..68eb918ba7c 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp
@@ -23,60 +23,56 @@
namespace dawn::native::opengl {
- Queue::Queue(Device* device, const QueueDescriptor* descriptor)
- : QueueBase(device, descriptor) {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->Execute());
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
- device->SubmitFenceSync();
- return {};
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->Execute());
}
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
- MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ device->SubmitFenceSync();
+ return {};
+}
- ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
- return {};
- }
+ ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
+ return {};
+}
- MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) {
- DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
- "Writes to stencil textures unsupported on the OpenGL backend.");
+MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
+ "Writes to stencil textures unsupported on the OpenGL backend.");
- TextureCopy textureCopy;
- textureCopy.texture = destination.texture;
- textureCopy.mipLevel = destination.mipLevel;
- textureCopy.origin = destination.origin;
- textureCopy.aspect =
- SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
- SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
- if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
- destination.mipLevel)) {
- destination.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
- }
- DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
- ToBackend(destination.texture)->Touch();
- return {};
+ SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
+ if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
+ destination.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
}
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
+ ToBackend(destination.texture)->Touch();
+ return {};
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h
index c3471057501..962dd213034 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h
@@ -19,23 +19,23 @@
namespace dawn::native::opengl {
- class Device;
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device, const QueueDescriptor* descriptor);
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) override;
- MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) override;
- };
+class Device;
+
+class Queue final : public QueueBase {
+ public:
+ Queue(Device* device, const QueueDescriptor* descriptor);
+
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
+ MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) override;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp
index 5e4ddce6e72..54b6ffba668 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp
@@ -21,325 +21,324 @@
namespace dawn::native::opengl {
- namespace {
-
- GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return GL_POINTS;
- case wgpu::PrimitiveTopology::LineList:
- return GL_LINES;
- case wgpu::PrimitiveTopology::LineStrip:
- return GL_LINE_STRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return GL_TRIANGLES;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return GL_TRIANGLE_STRIP;
- }
- UNREACHABLE();
- }
-
- void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
- wgpu::FrontFace face,
- wgpu::CullMode mode) {
- // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
- // which is different from WebGPU and other backends (Y axis is down).
- GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
- gl.FrontFace(direction);
-
- if (mode == wgpu::CullMode::None) {
- gl.Disable(GL_CULL_FACE);
- } else {
- gl.Enable(GL_CULL_FACE);
-
- GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
- gl.CullFace(cullMode);
- }
- }
-
- GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return GL_ZERO;
- case wgpu::BlendFactor::One:
- return GL_ONE;
- case wgpu::BlendFactor::Src:
- return GL_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return GL_ONE_MINUS_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return GL_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return GL_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return GL_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return GL_ONE_MINUS_DST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return GL_DST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return GL_ONE_MINUS_DST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return GL_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::Constant:
- return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
- }
- UNREACHABLE();
- }
-
- GLenum GLBlendMode(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return GL_FUNC_ADD;
- case wgpu::BlendOperation::Subtract:
- return GL_FUNC_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return GL_FUNC_REVERSE_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return GL_MIN;
- case wgpu::BlendOperation::Max:
- return GL_MAX;
- }
- UNREACHABLE();
- }
-
- void ApplyColorState(const OpenGLFunctions& gl,
- ColorAttachmentIndex attachment,
- const ColorTargetState* state) {
- GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
- if (state->blend != nullptr) {
- gl.Enablei(GL_BLEND, colorBuffer);
- gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
- GLBlendMode(state->blend->alpha.operation));
- gl.BlendFuncSeparatei(colorBuffer,
- GLBlendFactor(state->blend->color.srcFactor, false),
- GLBlendFactor(state->blend->color.dstFactor, false),
- GLBlendFactor(state->blend->alpha.srcFactor, true),
- GLBlendFactor(state->blend->alpha.dstFactor, true));
- } else {
- gl.Disablei(GL_BLEND, colorBuffer);
- }
- gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
- state->writeMask & wgpu::ColorWriteMask::Green,
- state->writeMask & wgpu::ColorWriteMask::Blue,
- state->writeMask & wgpu::ColorWriteMask::Alpha);
- }
-
- void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
- if (state->blend != nullptr) {
- gl.Enable(GL_BLEND);
- gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
- GLBlendMode(state->blend->alpha.operation));
- gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
- GLBlendFactor(state->blend->color.dstFactor, false),
- GLBlendFactor(state->blend->alpha.srcFactor, true),
- GLBlendFactor(state->blend->alpha.dstFactor, true));
- } else {
- gl.Disable(GL_BLEND);
- }
- gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
- state->writeMask & wgpu::ColorWriteMask::Green,
- state->writeMask & wgpu::ColorWriteMask::Blue,
- state->writeMask & wgpu::ColorWriteMask::Alpha);
- }
-
- bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
- return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
- lhs.dstFactor == rhs.dstFactor;
- }
-
- GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
- switch (stencilOperation) {
- case wgpu::StencilOperation::Keep:
- return GL_KEEP;
- case wgpu::StencilOperation::Zero:
- return GL_ZERO;
- case wgpu::StencilOperation::Replace:
- return GL_REPLACE;
- case wgpu::StencilOperation::Invert:
- return GL_INVERT;
- case wgpu::StencilOperation::IncrementClamp:
- return GL_INCR;
- case wgpu::StencilOperation::DecrementClamp:
- return GL_DECR;
- case wgpu::StencilOperation::IncrementWrap:
- return GL_INCR_WRAP;
- case wgpu::StencilOperation::DecrementWrap:
- return GL_DECR_WRAP;
- }
- UNREACHABLE();
- }
-
- void ApplyDepthStencilState(const OpenGLFunctions& gl,
- const DepthStencilState* descriptor,
- PersistentPipelineState* persistentPipelineState) {
- // Depth writes only occur if depth is enabled
- if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
- !descriptor->depthWriteEnabled) {
- gl.Disable(GL_DEPTH_TEST);
- } else {
- gl.Enable(GL_DEPTH_TEST);
- }
-
- if (descriptor->depthWriteEnabled) {
- gl.DepthMask(GL_TRUE);
- } else {
- gl.DepthMask(GL_FALSE);
- }
-
- gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
-
- if (StencilTestEnabled(descriptor)) {
- gl.Enable(GL_STENCIL_TEST);
- } else {
- gl.Disable(GL_STENCIL_TEST);
- }
-
- GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
- GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
- persistentPipelineState->SetStencilFuncsAndMask(
- gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
-
- gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
- OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
- OpenGLStencilOperation(descriptor->stencilBack.passOp));
- gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
- OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
- OpenGLStencilOperation(descriptor->stencilFront.passOp));
-
- gl.StencilMask(descriptor->stencilWriteMask);
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
+namespace {
+
+GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return GL_POINTS;
+ case wgpu::PrimitiveTopology::LineList:
+ return GL_LINES;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return GL_LINE_STRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return GL_TRIANGLES;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return GL_TRIANGLE_STRIP;
}
-
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
- : RenderPipelineBase(device, descriptor),
- mVertexArrayObject(0),
- mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
+ UNREACHABLE();
+}
+
+void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
+ wgpu::FrontFace face,
+ wgpu::CullMode mode) {
+ // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
+ // which is different from WebGPU and other backends (Y axis is down).
+ GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
+ gl.FrontFace(direction);
+
+ if (mode == wgpu::CullMode::None) {
+ gl.Disable(GL_CULL_FACE);
+ } else {
+ gl.Enable(GL_CULL_FACE);
+
+ GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
+ gl.CullFace(cullMode);
}
-
- MaybeError RenderPipeline::Initialize() {
- DAWN_TRY(
- InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
- CreateVAOForVertexState();
- return {};
+}
+
+GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return GL_ZERO;
+ case wgpu::BlendFactor::One:
+ return GL_ONE;
+ case wgpu::BlendFactor::Src:
+ return GL_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return GL_ONE_MINUS_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return GL_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return GL_ONE_MINUS_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return GL_DST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return GL_ONE_MINUS_DST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return GL_DST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return GL_ONE_MINUS_DST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return GL_SRC_ALPHA_SATURATE;
+ case wgpu::BlendFactor::Constant:
+ return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
}
-
- RenderPipeline::~RenderPipeline() = default;
-
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.DeleteVertexArrays(1, &mVertexArrayObject);
- gl.BindVertexArray(0);
- DeleteProgram(gl);
+ UNREACHABLE();
+}
+
+GLenum GLBlendMode(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return GL_FUNC_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return GL_FUNC_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return GL_FUNC_REVERSE_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return GL_MIN;
+ case wgpu::BlendOperation::Max:
+ return GL_MAX;
+ }
+ UNREACHABLE();
+}
+
+void ApplyColorState(const OpenGLFunctions& gl,
+ ColorAttachmentIndex attachment,
+ const ColorTargetState* state) {
+ GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
+ if (state->blend != nullptr) {
+ gl.Enablei(GL_BLEND, colorBuffer);
+ gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
+ gl.BlendFuncSeparatei(colorBuffer, GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
+ } else {
+ gl.Disablei(GL_BLEND, colorBuffer);
+ }
+ gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
+}
+
+void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
+ if (state->blend != nullptr) {
+ gl.Enable(GL_BLEND);
+ gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
+ gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
+ } else {
+ gl.Disable(GL_BLEND);
+ }
+ gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
+}
+
+bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
+ return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
+ lhs.dstFactor == rhs.dstFactor;
+}
+
+GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
+ switch (stencilOperation) {
+ case wgpu::StencilOperation::Keep:
+ return GL_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return GL_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return GL_REPLACE;
+ case wgpu::StencilOperation::Invert:
+ return GL_INVERT;
+ case wgpu::StencilOperation::IncrementClamp:
+ return GL_INCR;
+ case wgpu::StencilOperation::DecrementClamp:
+ return GL_DECR;
+ case wgpu::StencilOperation::IncrementWrap:
+ return GL_INCR_WRAP;
+ case wgpu::StencilOperation::DecrementWrap:
+ return GL_DECR_WRAP;
+ }
+ UNREACHABLE();
+}
+
+void ApplyDepthStencilState(const OpenGLFunctions& gl,
+ const DepthStencilState* descriptor,
+ PersistentPipelineState* persistentPipelineState) {
+ // Depth writes only occur if depth is enabled
+ if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+ !descriptor->depthWriteEnabled) {
+ gl.Disable(GL_DEPTH_TEST);
+ } else {
+ gl.Enable(GL_DEPTH_TEST);
}
- GLenum RenderPipeline::GetGLPrimitiveTopology() const {
- return mGlPrimitiveTopology;
+ if (descriptor->depthWriteEnabled) {
+ gl.DepthMask(GL_TRUE);
+ } else {
+ gl.DepthMask(GL_FALSE);
}
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
- RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
- ASSERT(!IsError());
- return mAttributesUsingVertexBuffer[slot];
+ gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
+
+ if (StencilTestEnabled(descriptor)) {
+ gl.Enable(GL_STENCIL_TEST);
+ } else {
+ gl.Disable(GL_STENCIL_TEST);
}
- void RenderPipeline::CreateVAOForVertexState() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.GenVertexArrays(1, &mVertexArrayObject);
- gl.BindVertexArray(mVertexArrayObject);
-
- for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
- const auto& attribute = GetAttribute(location);
- GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
- gl.EnableVertexAttribArray(glAttrib);
-
- mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
- const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
-
- if (vertexBuffer.arrayStride == 0) {
- // Emulate a stride of zero (constant vertex attribute) by
- // setting the attribute instance divisor to a huge number.
- gl.VertexAttribDivisor(glAttrib, 0xffffffff);
- } else {
- switch (vertexBuffer.stepMode) {
- case wgpu::VertexStepMode::Vertex:
- break;
- case wgpu::VertexStepMode::Instance:
- gl.VertexAttribDivisor(glAttrib, 1);
- break;
- }
+ GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
+ GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
+ persistentPipelineState->SetStencilFuncsAndMask(gl, backCompareFunction, frontCompareFunction,
+ descriptor->stencilReadMask);
+
+ gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
+ OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
+ OpenGLStencilOperation(descriptor->stencilBack.passOp));
+ gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
+ OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
+ OpenGLStencilOperation(descriptor->stencilFront.passOp));
+
+ gl.StencilMask(descriptor->stencilWriteMask);
+}
+
+} // anonymous namespace
+
+// static
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+}
+
+RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
+ : RenderPipelineBase(device, descriptor),
+ mVertexArrayObject(0),
+ mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {}
+
+MaybeError RenderPipeline::Initialize() {
+ DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+ CreateVAOForVertexState();
+ return {};
+}
+
+RenderPipeline::~RenderPipeline() = default;
+
+void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.DeleteVertexArrays(1, &mVertexArrayObject);
+ gl.BindVertexArray(0);
+ DeleteProgram(gl);
+}
+
+GLenum RenderPipeline::GetGLPrimitiveTopology() const {
+ return mGlPrimitiveTopology;
+}
+
+ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
+RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
+ ASSERT(!IsError());
+ return mAttributesUsingVertexBuffer[slot];
+}
+
+void RenderPipeline::CreateVAOForVertexState() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.GenVertexArrays(1, &mVertexArrayObject);
+ gl.BindVertexArray(mVertexArrayObject);
+
+ for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
+ const auto& attribute = GetAttribute(location);
+ GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
+ gl.EnableVertexAttribArray(glAttrib);
+
+ mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
+ const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
+
+ if (vertexBuffer.arrayStride == 0) {
+ // Emulate a stride of zero (constant vertex attribute) by
+ // setting the attribute instance divisor to a huge number.
+ gl.VertexAttribDivisor(glAttrib, 0xffffffff);
+ } else {
+ switch (vertexBuffer.stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ break;
+ case wgpu::VertexStepMode::Instance:
+ gl.VertexAttribDivisor(glAttrib, 1);
+ break;
+ case wgpu::VertexStepMode::VertexBufferNotUsed:
+ UNREACHABLE();
}
}
}
+}
- void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- PipelineGL::ApplyNow(gl);
+void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ PipelineGL::ApplyNow(gl);
- ASSERT(mVertexArrayObject);
- gl.BindVertexArray(mVertexArrayObject);
+ ASSERT(mVertexArrayObject);
+ gl.BindVertexArray(mVertexArrayObject);
- ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
+ ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
- ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
+ ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
- gl.SampleMaski(0, GetSampleMask());
- if (IsAlphaToCoverageEnabled()) {
- gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
- } else {
- gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
- }
+ gl.SampleMaski(0, GetSampleMask());
+ if (IsAlphaToCoverageEnabled()) {
+ gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ } else {
+ gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ }
- if (IsDepthBiasEnabled()) {
- gl.Enable(GL_POLYGON_OFFSET_FILL);
- float depthBias = GetDepthBias();
- float slopeScale = GetDepthBiasSlopeScale();
- if (gl.PolygonOffsetClamp != nullptr) {
- gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
- } else {
- gl.PolygonOffset(slopeScale, depthBias);
- }
+ if (IsDepthBiasEnabled()) {
+ gl.Enable(GL_POLYGON_OFFSET_FILL);
+ float depthBias = GetDepthBias();
+ float slopeScale = GetDepthBiasSlopeScale();
+ if (gl.PolygonOffsetClamp != nullptr) {
+ gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
} else {
- gl.Disable(GL_POLYGON_OFFSET_FILL);
+ gl.PolygonOffset(slopeScale, depthBias);
}
+ } else {
+ gl.Disable(GL_POLYGON_OFFSET_FILL);
+ }
- if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
- for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
- }
- } else {
- const ColorTargetState* prevDescriptor = nullptr;
- for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
- if (!prevDescriptor) {
- ApplyColorState(gl, descriptor);
- prevDescriptor = descriptor;
- } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
- // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
- // per color target. Add validation to prevent this as it is not.
+ if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
+ for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+ ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
+ }
+ } else {
+ const ColorTargetState* prevDescriptor = nullptr;
+ for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+ const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
+ if (!prevDescriptor) {
+ ApplyColorState(gl, descriptor);
+ prevDescriptor = descriptor;
+ } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
+ // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
+ // per color target. Add validation to prevent this as it is not.
+ ASSERT(false);
+ } else if (descriptor->blend != nullptr) {
+ if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
+ !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
+ descriptor->writeMask != prevDescriptor->writeMask) {
+ // TODO(crbug.com/dawn/582)
ASSERT(false);
- } else if (descriptor->blend != nullptr) {
- if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
- !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
- descriptor->writeMask != prevDescriptor->writeMask) {
- // TODO(crbug.com/dawn/582)
- ASSERT(false);
- }
}
}
}
}
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h
index c1cb7f27e8a..f059724c0fe 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h
@@ -15,47 +15,47 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_RENDERPIPELINEGL_H_
#define SRC_DAWN_NATIVE_OPENGL_RENDERPIPELINEGL_H_
+#include <vector>
+
#include "dawn/native/RenderPipeline.h"
#include "dawn/native/opengl/PipelineGL.h"
#include "dawn/native/opengl/opengl_platform.h"
-#include <vector>
-
namespace dawn::native::opengl {
- class Device;
- class PersistentPipelineState;
+class Device;
+class PersistentPipelineState;
- class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
+class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
- GLenum GetGLPrimitiveTopology() const;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
- VertexBufferSlot slot) const;
+ GLenum GetGLPrimitiveTopology() const;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
+ VertexBufferSlot slot) const;
- void ApplyNow(PersistentPipelineState& persistentPipelineState);
+ void ApplyNow(PersistentPipelineState& persistentPipelineState);
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- private:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
- ~RenderPipeline() override;
- void DestroyImpl() override;
+ private:
+ RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ ~RenderPipeline() override;
+ void DestroyImpl() override;
- void CreateVAOForVertexState();
+ void CreateVAOForVertexState();
- // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
- GLuint mVertexArrayObject;
- GLenum mGlPrimitiveTopology;
+ // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
+ GLuint mVertexArrayObject;
+ GLenum mGlPrimitiveTopology;
- ityp::array<VertexBufferSlot,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
- kMaxVertexBuffers>
- mAttributesUsingVertexBuffer;
- };
+ ityp::array<VertexBufferSlot,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
+ kMaxVertexBuffers>
+ mAttributesUsingVertexBuffer;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp
index 77905305704..b40e1d6592f 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp
@@ -20,111 +20,109 @@
namespace dawn::native::opengl {
- namespace {
- GLenum MagFilterMode(wgpu::FilterMode filter) {
- switch (filter) {
+namespace {
+GLenum MagFilterMode(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Nearest:
+ return GL_NEAREST;
+ case wgpu::FilterMode::Linear:
+ return GL_LINEAR;
+ }
+ UNREACHABLE();
+}
+
+GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
+ switch (minFilter) {
+ case wgpu::FilterMode::Nearest:
+ switch (mipMapFilter) {
case wgpu::FilterMode::Nearest:
- return GL_NEAREST;
+ return GL_NEAREST_MIPMAP_NEAREST;
case wgpu::FilterMode::Linear:
- return GL_LINEAR;
+ return GL_NEAREST_MIPMAP_LINEAR;
}
- UNREACHABLE();
- }
-
- GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
- switch (minFilter) {
+ case wgpu::FilterMode::Linear:
+ switch (mipMapFilter) {
case wgpu::FilterMode::Nearest:
- switch (mipMapFilter) {
- case wgpu::FilterMode::Nearest:
- return GL_NEAREST_MIPMAP_NEAREST;
- case wgpu::FilterMode::Linear:
- return GL_NEAREST_MIPMAP_LINEAR;
- }
+ return GL_LINEAR_MIPMAP_NEAREST;
case wgpu::FilterMode::Linear:
- switch (mipMapFilter) {
- case wgpu::FilterMode::Nearest:
- return GL_LINEAR_MIPMAP_NEAREST;
- case wgpu::FilterMode::Linear:
- return GL_LINEAR_MIPMAP_LINEAR;
- }
+ return GL_LINEAR_MIPMAP_LINEAR;
}
- UNREACHABLE();
- }
-
- GLenum WrapMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return GL_REPEAT;
- case wgpu::AddressMode::MirrorRepeat:
- return GL_MIRRORED_REPEAT;
- case wgpu::AddressMode::ClampToEdge:
- return GL_CLAMP_TO_EDGE;
- }
- UNREACHABLE();
- }
-
- } // namespace
-
- Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.GenSamplers(1, &mFilteringHandle);
- SetupGLSampler(mFilteringHandle, descriptor, false);
-
- gl.GenSamplers(1, &mNonFilteringHandle);
- SetupGLSampler(mNonFilteringHandle, descriptor, true);
}
+ UNREACHABLE();
+}
+
+GLenum WrapMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return GL_REPEAT;
+ case wgpu::AddressMode::MirrorRepeat:
+ return GL_MIRRORED_REPEAT;
+ case wgpu::AddressMode::ClampToEdge:
+ return GL_CLAMP_TO_EDGE;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.GenSamplers(1, &mFilteringHandle);
+ SetupGLSampler(mFilteringHandle, descriptor, false);
+
+ gl.GenSamplers(1, &mNonFilteringHandle);
+ SetupGLSampler(mNonFilteringHandle, descriptor, true);
+}
+
+Sampler::~Sampler() = default;
+
+void Sampler::DestroyImpl() {
+ SamplerBase::DestroyImpl();
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.DeleteSamplers(1, &mFilteringHandle);
+ gl.DeleteSamplers(1, &mNonFilteringHandle);
+}
+
+void Sampler::SetupGLSampler(GLuint sampler,
+ const SamplerDescriptor* descriptor,
+ bool forceNearest) {
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
+
+ if (forceNearest) {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+ } else {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, MagFilterMode(descriptor->magFilter));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
+ MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
+ }
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
- Sampler::~Sampler() = default;
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
- void Sampler::DestroyImpl() {
- SamplerBase::DestroyImpl();
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.DeleteSamplers(1, &mFilteringHandle);
- gl.DeleteSamplers(1, &mNonFilteringHandle);
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
+ gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
+ ToOpenGLCompareFunction(descriptor->compare));
}
- void Sampler::SetupGLSampler(GLuint sampler,
- const SamplerDescriptor* descriptor,
- bool forceNearest) {
- Device* device = ToBackend(GetDevice());
- const OpenGLFunctions& gl = device->gl;
-
- if (forceNearest) {
- gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
- } else {
- gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
- MagFilterMode(descriptor->magFilter));
- gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
- MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
- }
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
-
- gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
- gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
- gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
- ToOpenGLCompareFunction(descriptor->compare));
- }
-
- if (gl.IsAtLeastGL(4, 6) ||
- gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
- gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
- }
+ if (gl.IsAtLeastGL(4, 6) || gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
}
+}
- GLuint Sampler::GetFilteringHandle() const {
- return mFilteringHandle;
- }
+GLuint Sampler::GetFilteringHandle() const {
+ return mFilteringHandle;
+}
- GLuint Sampler::GetNonFilteringHandle() const {
- return mNonFilteringHandle;
- }
+GLuint Sampler::GetNonFilteringHandle() const {
+ return mNonFilteringHandle;
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h
index 1afb6129a42..82ea9bb7e85 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h
@@ -21,27 +21,27 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class Sampler final : public SamplerBase {
- public:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+ public:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
- GLuint GetFilteringHandle() const;
- GLuint GetNonFilteringHandle() const;
+ GLuint GetFilteringHandle() const;
+ GLuint GetNonFilteringHandle() const;
- private:
- ~Sampler() override;
- void DestroyImpl() override;
+ private:
+ ~Sampler() override;
+ void DestroyImpl() override;
- void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
+ void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
- GLuint mFilteringHandle;
+ GLuint mFilteringHandle;
- // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
- // for everything, which is important to preserve texture completeness for u/int textures.
- GLuint mNonFilteringHandle;
- };
+ // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
+ // for everything, which is important to preserve texture completeness for u/int textures.
+ GLuint mNonFilteringHandle;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp
index 6bda26bd081..253600b6c35 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/opengl/ShaderModuleGL.h"
+#include <sstream>
+#include <utility>
+
#include "dawn/native/BindGroupLayout.h"
#include "dawn/native/TintUtils.h"
#include "dawn/native/opengl/DeviceGL.h"
@@ -21,157 +24,156 @@
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-#include <tint/tint.h>
-
-#include <sstream>
+#include "tint/tint.h"
namespace dawn::native::opengl {
- std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
- std::ostringstream o;
- o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
- << static_cast<uint32_t>(bindingNumber);
- return o.str();
- }
-
- bool operator<(const BindingLocation& a, const BindingLocation& b) {
- return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
+std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
+ std::ostringstream o;
+ o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
+ << static_cast<uint32_t>(bindingNumber);
+ return o.str();
+}
+
+bool operator<(const BindingLocation& a, const BindingLocation& b) {
+ return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
+}
+
+bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
+ return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
+ std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
+}
+
+std::string CombinedSampler::GetName() const {
+ std::ostringstream o;
+ o << "dawn_combined";
+ if (usePlaceholderSampler) {
+ o << "_placeholder_sampler";
+ } else {
+ o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
+ << static_cast<uint32_t>(samplerLocation.binding);
}
-
- bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
- return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) <
- std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation);
- }
-
- std::string CombinedSampler::GetName() const {
- std::ostringstream o;
- o << "dawn_combined";
- if (useDummySampler) {
- o << "_dummy_sampler";
+ o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
+ << static_cast<uint32_t>(textureLocation.binding);
+ return o.str();
+}
+
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(
+ Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult, compilationMessages));
+ return module;
+}
+
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {}
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ DAWN_TRY(InitializeBase(parseResult, compilationMessages));
+
+ return {};
+}
+
+ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
+ SingleShaderStage stage,
+ CombinedSamplerInfo* combinedSamplers,
+ const PipelineLayout* layout,
+ bool* needsPlaceholderSampler) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ nullptr, nullptr));
+ const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
+
+ tint::writer::glsl::Options tintOptions;
+ using Version = tint::writer::glsl::Version;
+ tintOptions.version =
+ Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
+ version.GetMajor(), version.GetMinor());
+
+ using tint::transform::BindingPoint;
+ // When textures are accessed without a sampler (e.g., textureLoad()),
+ // GetSamplerTextureUses() will return this sentinel value.
+ BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
+
+ tint::inspector::Inspector inspector(&program);
+ // Find all the sampler/texture pairs for this entry point, and create
+ // CombinedSamplers for them. CombinedSampler records the binding points
+ // of the original texture and sampler, and generates a unique name. The
+ // corresponding uniforms will be retrieved by these generated names
+ // in PipelineGL. Any texture-only references will have
+ // "usePlaceholderSampler" set to true, and only the texture binding point
+ // will be used in naming them. In addition, Dawn will bind a
+ // non-filtering sampler for them (see PipelineGL).
+ auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
+ for (const auto& use : uses) {
+ combinedSamplers->emplace_back();
+
+ CombinedSampler* info = &combinedSamplers->back();
+ if (use.sampler_binding_point == placeholderBindingPoint) {
+ info->usePlaceholderSampler = true;
+ *needsPlaceholderSampler = true;
} else {
- o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
- << static_cast<uint32_t>(samplerLocation.binding);
+ info->usePlaceholderSampler = false;
}
- o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
- << static_cast<uint32_t>(textureLocation.binding);
- return o.str();
+ info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
+ info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
+ info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
+ info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
+ tintOptions.binding_map[use] = info->GetName();
}
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
+ if (*needsPlaceholderSampler) {
+ tintOptions.placeholder_binding_point = placeholderBindingPoint;
}
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- DAWN_TRY(InitializeBase(parseResult));
-
- return {};
- }
-
- ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
- SingleShaderStage stage,
- CombinedSamplerInfo* combinedSamplers,
- const PipelineLayout* layout,
- bool* needsDummySampler) const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
-
- AddExternalTextureTransform(layout, &transformManager, &transformInputs);
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
- nullptr, nullptr));
- const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
-
- tint::writer::glsl::Options tintOptions;
- using Version = tint::writer::glsl::Version;
- tintOptions.version =
- Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
- version.GetMajor(), version.GetMinor());
-
- using tint::transform::BindingPoint;
- // When textures are accessed without a sampler (e.g., textureLoad()),
- // GetSamplerTextureUses() will return this sentinel value.
- BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
-
- tint::inspector::Inspector inspector(&program);
- // Find all the sampler/texture pairs for this entry point, and create
- // CombinedSamplers for them. CombinedSampler records the binding points
- // of the original texture and sampler, and generates a unique name. The
- // corresponding uniforms will be retrieved by these generated names
- // in PipelineGL. Any texture-only references will have
- // "useDummySampler" set to true, and only the texture binding point
- // will be used in naming them. In addition, Dawn will bind a
- // non-filtering sampler for them (see PipelineGL).
- auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
- for (const auto& use : uses) {
- combinedSamplers->emplace_back();
-
- CombinedSampler* info = &combinedSamplers->back();
- if (use.sampler_binding_point == placeholderBindingPoint) {
- info->useDummySampler = true;
- *needsDummySampler = true;
- } else {
- info->useDummySampler = false;
+ // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
+ // mapping from the original group/binding pair to a binding-only
+ // value. This mapping will be used by Tint to remap all global
+ // variables to the 1D space.
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase::BindingMap& bindingMap =
+ layout->GetBindGroupLayout(group)->GetBindingMap();
+ for (const auto& it : bindingMap) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
+ const BindingInfo& bindingInfo =
+ layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
}
- info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
- info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
- info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
- info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
- tintOptions.binding_map[use] = info->GetName();
- }
- if (*needsDummySampler) {
- tintOptions.placeholder_binding_point = placeholderBindingPoint;
- }
- // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
- // mapping from the original group/binding pair to a binding-only
- // value. This mapping will be used by Tint to remap all global
- // variables to the 1D space.
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase::BindingMap& bindingMap =
- layout->GetBindGroupLayout(group)->GetBindingMap();
- for (const auto& it : bindingMap) {
- BindingNumber bindingNumber = it.first;
- BindingIndex bindingIndex = it.second;
- const BindingInfo& bindingInfo =
- layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
- if (!(bindingInfo.visibility & StageBit(stage))) {
- continue;
- }
-
- uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(bindingNumber)};
- BindingPoint dstBindingPoint{0, shaderIndex};
- tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
- }
- tintOptions.allow_collisions = true;
+ uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingNumber)};
+ BindingPoint dstBindingPoint{0, shaderIndex};
+ tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
}
- auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
- DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.",
- result.error);
- std::string glsl = std::move(result.glsl);
+ tintOptions.allow_collisions = true;
+ }
+ auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.", result.error);
+ std::string glsl = std::move(result.glsl);
- if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
+ if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
- GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- return glsl;
+ GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
}
+ return glsl;
+}
+
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h
index 89fecee9198..4dcff097fe8 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h
@@ -15,55 +15,62 @@
#ifndef SRC_DAWN_NATIVE_OPENGL_SHADERMODULEGL_H_
#define SRC_DAWN_NATIVE_OPENGL_SHADERMODULEGL_H_
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
#include "dawn/native/ShaderModule.h"
#include "dawn/native/opengl/opengl_platform.h"
namespace dawn::native::opengl {
- class Device;
- class PipelineLayout;
-
- std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
-
- struct BindingLocation {
- BindGroupIndex group;
- BindingNumber binding;
- };
- bool operator<(const BindingLocation& a, const BindingLocation& b);
-
- struct CombinedSampler {
- BindingLocation samplerLocation;
- BindingLocation textureLocation;
- // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
- // one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused.
- bool useDummySampler;
- std::string GetName() const;
- };
- bool operator<(const CombinedSampler& a, const CombinedSampler& b);
-
- using CombinedSamplerInfo = std::vector<CombinedSampler>;
-
- using BindingInfoArrayTable =
- std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
- SingleShaderStage stage,
- CombinedSamplerInfo* combinedSamplers,
- const PipelineLayout* layout,
- bool* needsDummySampler) const;
-
- private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
+class Device;
+class PipelineLayout;
+
+std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
+
+struct BindingLocation {
+ BindGroupIndex group;
+ BindingNumber binding;
+};
+bool operator<(const BindingLocation& a, const BindingLocation& b);
+
+struct CombinedSampler {
+ BindingLocation samplerLocation;
+ BindingLocation textureLocation;
+ // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
+ // one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
+ // unused.
+ bool usePlaceholderSampler;
+ std::string GetName() const;
+};
+bool operator<(const CombinedSampler& a, const CombinedSampler& b);
+
+using CombinedSamplerInfo = std::vector<CombinedSampler>;
+
+using BindingInfoArrayTable = std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
+
+class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+
+ ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
+ SingleShaderStage stage,
+ CombinedSamplerInfo* combinedSamplers,
+ const PipelineLayout* layout,
+ bool* needsPlaceholderSampler) const;
+
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override = default;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp
index e59bb9ff8d5..8501ee7ace2 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp
@@ -18,34 +18,33 @@
#include "dawn/native/opengl/Forward.h"
#include "dawn/native/opengl/TextureGL.h"
-#include <dawn/dawn_wsi.h>
+#include "dawn/dawn_wsi.h"
namespace dawn::native::opengl {
- SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- im.Init(im.userData, nullptr);
- }
-
- SwapChain::~SwapChain() {
- }
-
- TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
- GLuint nativeTexture = next.texture.u32;
- return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
- TextureBase::TextureState::OwnedExternal);
- }
-
- MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
+SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ im.Init(im.userData, nullptr);
+}
+
+SwapChain::~SwapChain() {}
+
+TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
}
+ GLuint nativeTexture = next.texture.u32;
+ return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
+ TextureBase::TextureState::OwnedExternal);
+}
+
+MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h
index d84c43c6ae4..0c1456489be 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h
@@ -21,17 +21,17 @@
namespace dawn::native::opengl {
- class Device;
+class Device;
- class SwapChain final : public OldSwapChainBase {
- public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+class SwapChain final : public OldSwapChainBase {
+ public:
+ SwapChain(Device* device, const SwapChainDescriptor* descriptor);
- protected:
- ~SwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
- };
+ protected:
+ ~SwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp
index d7f1c10a2af..88130bd8c61 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/opengl/TextureGL.h"
+#include <limits>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
@@ -25,281 +27,300 @@
namespace dawn::native::opengl {
- namespace {
-
- GLenum TargetForTexture(const TextureDescriptor* descriptor) {
- switch (descriptor->dimension) {
- case wgpu::TextureDimension::e2D:
- if (descriptor->size.depthOrArrayLayers > 1) {
- ASSERT(descriptor->sampleCount == 1);
- return GL_TEXTURE_2D_ARRAY;
- } else {
- if (descriptor->sampleCount > 1) {
- return GL_TEXTURE_2D_MULTISAMPLE;
- } else {
- return GL_TEXTURE_2D;
- }
- }
- case wgpu::TextureDimension::e3D:
- ASSERT(descriptor->sampleCount == 1);
- return GL_TEXTURE_3D;
-
- case wgpu::TextureDimension::e1D:
- break;
- }
- UNREACHABLE();
- }
+namespace {
- GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
- uint32_t arrayLayerCount,
- uint32_t sampleCount) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
- case wgpu::TextureViewDimension::e2DArray:
- if (sampleCount > 1) {
- ASSERT(arrayLayerCount == 1);
- return GL_TEXTURE_2D_MULTISAMPLE;
- }
- ASSERT(sampleCount == 1);
- return GL_TEXTURE_2D_ARRAY;
- case wgpu::TextureViewDimension::Cube:
- ASSERT(sampleCount == 1);
- ASSERT(arrayLayerCount == 6);
- return GL_TEXTURE_CUBE_MAP;
- case wgpu::TextureViewDimension::CubeArray:
- ASSERT(sampleCount == 1);
- ASSERT(arrayLayerCount % 6 == 0);
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- case wgpu::TextureViewDimension::e3D:
- return GL_TEXTURE_3D;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- GLuint GenTexture(const OpenGLFunctions& gl) {
- GLuint handle = 0;
- gl.GenTextures(1, &handle);
- return handle;
- }
-
- bool RequiresCreatingNewTextureView(const TextureBase* texture,
- const TextureViewDescriptor* textureViewDescriptor) {
- constexpr wgpu::TextureUsage kShaderUsageNeedsView =
- wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
- constexpr wgpu::TextureUsage kUsageNeedsView =
- kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
- if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
- return false;
+GLenum TargetForTexture(const TextureDescriptor* descriptor) {
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e2D:
+ if (descriptor->size.depthOrArrayLayers > 1) {
+ ASSERT(descriptor->sampleCount == 1);
+ return GL_TEXTURE_2D_ARRAY;
+ } else {
+ if (descriptor->sampleCount > 1) {
+ return GL_TEXTURE_2D_MULTISAMPLE;
+ } else {
+ return GL_TEXTURE_2D;
+ }
}
+ case wgpu::TextureDimension::e3D:
+ ASSERT(descriptor->sampleCount == 1);
+ return GL_TEXTURE_3D;
- if (texture->GetFormat().format != textureViewDescriptor->format &&
- !texture->GetFormat().HasDepthOrStencil()) {
- // Color format reinterpretation required. Note: Depth/stencil formats don't support
- // reinterpretation.
- return true;
+ case wgpu::TextureDimension::e1D:
+ break;
+ }
+ UNREACHABLE();
+}
+
+GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
+ uint32_t arrayLayerCount,
+ uint32_t sampleCount) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e2D:
+ return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ if (sampleCount > 1) {
+ ASSERT(arrayLayerCount == 1);
+ return GL_TEXTURE_2D_MULTISAMPLE;
}
+ ASSERT(sampleCount == 1);
+ return GL_TEXTURE_2D_ARRAY;
+ case wgpu::TextureViewDimension::Cube:
+ ASSERT(sampleCount == 1);
+ ASSERT(arrayLayerCount == 6);
+ return GL_TEXTURE_CUBE_MAP;
+ case wgpu::TextureViewDimension::CubeArray:
+ ASSERT(sampleCount == 1);
+ ASSERT(arrayLayerCount % 6 == 0);
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ case wgpu::TextureViewDimension::e3D:
+ return GL_TEXTURE_3D;
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+GLuint GenTexture(const OpenGLFunctions& gl) {
+ GLuint handle = 0;
+ gl.GenTextures(1, &handle);
+ return handle;
+}
+
+bool RequiresCreatingNewTextureView(const TextureBase* texture,
+ const TextureViewDescriptor* textureViewDescriptor) {
+ constexpr wgpu::TextureUsage kShaderUsageNeedsView =
+ wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+ constexpr wgpu::TextureUsage kUsageNeedsView =
+ kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
+ if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
+ return false;
+ }
- // Reinterpretation not required. Now, we only need a new view if the view dimension or
- // set of subresources for the shader is different from the base texture.
- if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
- return false;
- }
+ if (texture->GetFormat().format != textureViewDescriptor->format &&
+ !texture->GetFormat().HasDepthOrStencil()) {
+ // Color format reinterpretation required. Note: Depth/stencil formats don't support
+ // reinterpretation.
+ return true;
+ }
- if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
- (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D &&
- textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
- // If the view has a different number of array layers, we need a new view.
- // And, if the original texture is a 2D texture with one array layer, we need a new
- // view to view it as a 2D array texture.
- return true;
- }
+ // Reinterpretation not required. Now, we only need a new view if the view dimension or
+ // set of subresources for the shader is different from the base texture.
+ if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
+ return false;
+ }
- if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
- return true;
- }
+ if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+ (texture->GetArrayLayers() == 1 && texture->GetDimension() == wgpu::TextureDimension::e2D &&
+ textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+ // If the view has a different number of array layers, we need a new view.
+ // And, if the original texture is a 2D texture with one array layer, we need a new
+ // view to view it as a 2D array texture.
+ return true;
+ }
- if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
- (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
- textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
- // We need a separate view for one of the depth or stencil planes
- // because each glTextureView needs it's own handle to set
- // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
- // extra handle since it is likely sampled less often.
- return true;
- }
+ if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+ return true;
+ }
- switch (textureViewDescriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return true;
- default:
- break;
- }
+ if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
+ (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
+ textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+ // We need a separate view for one of the depth or stencil planes
+ // because each glTextureView needs it's own handle to set
+ // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
+ // extra handle since it is likely sampled less often.
+ return true;
+ }
- return false;
- }
+ switch (textureViewDescriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return true;
+ default:
+ break;
+ }
- void AllocateTexture(const OpenGLFunctions& gl,
- GLenum target,
- GLsizei samples,
- GLuint levels,
- GLenum internalFormat,
- const Extent3D& size) {
- // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to
- // be GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
- // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
- switch (target) {
- case GL_TEXTURE_2D_ARRAY:
- case GL_TEXTURE_3D:
- gl.TexStorage3D(target, levels, internalFormat, size.width, size.height,
- size.depthOrArrayLayers);
- break;
- case GL_TEXTURE_2D:
- case GL_TEXTURE_CUBE_MAP:
- gl.TexStorage2D(target, levels, internalFormat, size.width, size.height);
- break;
- case GL_TEXTURE_2D_MULTISAMPLE:
- gl.TexStorage2DMultisample(target, samples, internalFormat, size.width,
- size.height, true);
- break;
- default:
- UNREACHABLE();
- }
- }
+ return false;
+}
+
+void AllocateTexture(const OpenGLFunctions& gl,
+ GLenum target,
+ GLsizei samples,
+ GLuint levels,
+ GLenum internalFormat,
+ const Extent3D& size) {
+ // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to
+ // be GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
+ // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
+ switch (target) {
+ case GL_TEXTURE_2D_ARRAY:
+ case GL_TEXTURE_3D:
+ gl.TexStorage3D(target, levels, internalFormat, size.width, size.height,
+ size.depthOrArrayLayers);
+ break;
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_CUBE_MAP:
+ gl.TexStorage2D(target, levels, internalFormat, size.width, size.height);
+ break;
+ case GL_TEXTURE_2D_MULTISAMPLE:
+ gl.TexStorage2DMultisample(target, samples, internalFormat, size.width, size.height,
+ true);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
- } // namespace
+} // namespace
- // Texture
+// Texture
- Texture::Texture(Device* device, const TextureDescriptor* descriptor)
- : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+Texture::Texture(Device* device, const TextureDescriptor* descriptor)
+ : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- uint32_t levels = GetNumMipLevels();
+ uint32_t levels = GetNumMipLevels();
- const GLFormat& glFormat = GetGLFormat();
+ const GLFormat& glFormat = GetGLFormat();
- gl.BindTexture(mTarget, mHandle);
+ gl.BindTexture(mTarget, mHandle);
- AllocateTexture(gl, mTarget, GetSampleCount(), levels, glFormat.internalFormat, GetSize());
+ AllocateTexture(gl, mTarget, GetSampleCount(), levels, glFormat.internalFormat, GetSize());
- // The texture is not complete if it uses mipmapping and not all levels up to
- // MAX_LEVEL have been defined.
- gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
+ // The texture is not complete if it uses mipmapping and not all levels up to
+ // MAX_LEVEL have been defined.
+ gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- GetDevice()->ConsumedError(
- ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
- }
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ GetDevice()->ConsumedError(
+ ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
-
- void Texture::Touch() {
- mGenID++;
+}
+
+void Texture::Touch() {
+ mGenID++;
+}
+
+uint32_t Texture::GetGenID() const {
+ return mGenID;
+}
+
+Texture::Texture(Device* device,
+ const TextureDescriptor* descriptor,
+ GLuint handle,
+ TextureState state)
+ : TextureBase(device, descriptor, state), mHandle(handle) {
+ mTarget = TargetForTexture(descriptor);
+}
+
+Texture::~Texture() {}
+
+void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ mHandle = 0;
}
+}
- uint32_t Texture::GetGenID() const {
- return mGenID;
- }
+GLuint Texture::GetHandle() const {
+ return mHandle;
+}
- Texture::Texture(Device* device,
- const TextureDescriptor* descriptor,
- GLuint handle,
- TextureState state)
- : TextureBase(device, descriptor, state), mHandle(handle) {
- mTarget = TargetForTexture(descriptor);
- }
-
- Texture::~Texture() {
- }
+GLenum Texture::GetGLTarget() const {
+ return mTarget;
+}
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
- if (GetTextureState() == TextureState::OwnedInternal) {
- ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
- mHandle = 0;
- }
- }
+const GLFormat& Texture::GetGLFormat() const {
+ return ToBackend(GetDevice())->GetGLFormat(GetFormat());
+}
- GLuint Texture::GetHandle() const {
- return mHandle;
- }
-
- GLenum Texture::GetGLTarget() const {
- return mTarget;
- }
-
- const GLFormat& Texture::GetGLFormat() const {
- return ToBackend(GetDevice())->GetGLFormat(GetFormat());
+MaybeError Texture::ClearTexture(const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
+ if (GetFormat().isCompressed) {
+ return {};
}
- MaybeError Texture::ClearTexture(const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
- if (GetFormat().isCompressed) {
- return {};
- }
-
- Device* device = ToBackend(GetDevice());
- const OpenGLFunctions& gl = device->gl;
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
- uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+ uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
- if (GetFormat().isRenderable) {
- if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
- GLfloat depth = fClearColor;
- GLint stencil = clearColor;
- if (range.aspects & Aspect::Depth) {
- gl.DepthMask(GL_TRUE);
- }
- if (range.aspects & Aspect::Stencil) {
- gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
- }
+ if (GetFormat().isRenderable) {
+ if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
+ GLfloat depth = fClearColor;
+ GLint stencil = clearColor;
+ if (range.aspects & Aspect::Depth) {
+ gl.DepthMask(GL_TRUE);
+ }
+ if (range.aspects & Aspect::Stencil) {
+ gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
+ }
- auto DoClear = [&](Aspect aspects) {
- if (aspects == (Aspect::Depth | Aspect::Stencil)) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
- } else if (aspects == Aspect::Depth) {
- gl.ClearBufferfv(GL_DEPTH, 0, &depth);
- } else if (aspects == Aspect::Stencil) {
- gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
- } else {
- UNREACHABLE();
- }
- };
-
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- gl.Disable(GL_SCISSOR_TEST);
-
- GLenum attachment;
- if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
- attachment = GL_DEPTH_STENCIL_ATTACHMENT;
- } else if (range.aspects == Aspect::Depth) {
- attachment = GL_DEPTH_ATTACHMENT;
- } else if (range.aspects == Aspect::Stencil) {
- attachment = GL_STENCIL_ATTACHMENT;
+ auto DoClear = [&](Aspect aspects) {
+ if (aspects == (Aspect::Depth | Aspect::Stencil)) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+ } else if (aspects == Aspect::Depth) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+ } else if (aspects == Aspect::Stencil) {
+ gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
} else {
UNREACHABLE();
}
+ };
+
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+ gl.Disable(GL_SCISSOR_TEST);
+
+ GLenum attachment;
+ if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ attachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (range.aspects == Aspect::Depth) {
+ attachment = GL_DEPTH_ATTACHMENT;
+ } else if (range.aspects == Aspect::Stencil) {
+ attachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
+ }
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (GetArrayLayers() == 1) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ if (GetArrayLayers() == 1) {
+ Aspect aspectsToClear = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, 0, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspectsToClear |= aspect;
+ }
+
+ if (aspectsToClear == Aspect::None) {
+ continue;
+ }
+
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment, GetGLTarget(),
+ GetHandle(), static_cast<GLint>(level));
+ DoClear(aspectsToClear);
+ } else {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
Aspect aspectsToClear = Aspect::None;
for (Aspect aspect : IterateEnumMask(range.aspects)) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, 0,
+ SubresourceRange::SingleMipAndLayer(level, layer,
aspect))) {
// Skip lazy clears if already initialized.
continue;
@@ -311,222 +332,53 @@ namespace dawn::native::opengl {
continue;
}
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
- GetGLTarget(), GetHandle(),
- static_cast<GLint>(level));
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+ GetHandle(), static_cast<GLint>(level),
+ static_cast<GLint>(layer));
DoClear(aspectsToClear);
- } else {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- Aspect aspectsToClear = Aspect::None;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer,
- aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- aspectsToClear |= aspect;
- }
-
- if (aspectsToClear == Aspect::None) {
- continue;
- }
-
- gl.FramebufferTextureLayer(
- GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
- static_cast<GLint>(level), static_cast<GLint>(layer));
- DoClear(aspectsToClear);
- }
}
- break;
-
- case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
- UNREACHABLE();
- }
- }
-
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &framebuffer);
- } else {
- ASSERT(range.aspects == Aspect::Color);
-
- // For gl.ClearBufferiv/uiv calls
- constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
- constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
- std::array<GLuint, 4> clearColorData;
- clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
-
- // For gl.ClearBufferfv calls
- constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
- constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
- std::array<GLfloat, 4> fClearColorData;
- fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
-
- static constexpr uint32_t MAX_TEXEL_SIZE = 16;
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
- ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
-
- // For gl.ClearTexSubImage calls
- constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
-
- wgpu::TextureComponentType baseType =
- GetFormat().GetAspectInfo(Aspect::Color).baseType;
-
- const GLFormat& glFormat = GetGLFormat();
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- Extent3D mipSize = GetMipLevelPhysicalSize(level);
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
}
- if (gl.IsAtLeastGL(4, 4)) {
- gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
- static_cast<GLint>(layer), mipSize.width,
- mipSize.height, mipSize.depthOrArrayLayers,
- glFormat.format, glFormat.type,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataBytes0.data()
- : kClearColorDataBytes255.data());
- continue;
- }
-
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
-
- GLenum attachment = GL_COLOR_ATTACHMENT0;
- gl.DrawBuffers(1, &attachment);
-
- gl.Disable(GL_SCISSOR_TEST);
- gl.ColorMask(true, true, true, true);
-
- auto DoClear = [&]() {
- switch (baseType) {
- case wgpu::TextureComponentType::Float: {
- gl.ClearBufferfv(GL_COLOR, 0,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataFloat0.data()
- : kClearColorDataFloat1.data());
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- gl.ClearBufferuiv(GL_COLOR, 0,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataUint0.data()
- : kClearColorDataUint1.data());
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- gl.ClearBufferiv(GL_COLOR, 0,
- reinterpret_cast<const GLint*>(
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataUint0.data()
- : kClearColorDataUint1.data()));
- break;
- }
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- };
+ break;
- if (GetArrayLayers() == 1) {
- switch (GetDimension()) {
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- case wgpu::TextureDimension::e2D:
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
- GetGLTarget(), GetHandle(), level);
- DoClear();
- break;
- case wgpu::TextureDimension::e3D:
- uint32_t depth =
- GetMipLevelVirtualSize(level).depthOrArrayLayers;
- for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
- GetHandle(), level, z);
- DoClear();
- }
- break;
- }
-
- } else {
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
- level, layer);
- DoClear();
- }
-
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
- }
+ case wgpu::TextureDimension::e1D:
+ case wgpu::TextureDimension::e3D:
+ UNREACHABLE();
}
}
+
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &framebuffer);
} else {
ASSERT(range.aspects == Aspect::Color);
- // create temp buffer with clear color to copy to the texture image
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
- ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
-
- // Make sure that we are not rounding
- ASSERT(bytesPerRow % blockInfo.byteSize == 0);
- ASSERT(largestMipSize.height % blockInfo.height == 0);
+ // For gl.ClearBufferiv/uiv calls
+ constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
+ constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
+ std::array<GLuint, 4> clearColorData;
+ clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
- uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
- (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- if (bufferSize64 > std::numeric_limits<size_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
- size_t bufferSize = static_cast<size_t>(bufferSize64);
+ // For gl.ClearBufferfv calls
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
+ std::array<GLfloat, 4> fClearColorData;
+ fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
- dawn::native::BufferDescriptor descriptor = {};
- descriptor.mappedAtCreation = true;
- descriptor.usage = wgpu::BufferUsage::CopySrc;
- descriptor.size = bufferSize;
+ static constexpr uint32_t MAX_TEXEL_SIZE = 16;
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+ ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
- // We don't count the lazy clear of srcBuffer because it is an internal buffer.
- // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
- Ref<Buffer> srcBuffer;
- DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
+ // For gl.ClearTexSubImage calls
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
- // Fill the buffer with clear color
- memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
- srcBuffer->Unmap();
+ wgpu::TextureComponentType baseType = GetFormat().GetAspectInfo(Aspect::Color).baseType;
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+ const GLFormat& glFormat = GetGLFormat();
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
++level) {
- TextureCopy textureCopy;
- textureCopy.texture = this;
- textureCopy.mipLevel = level;
- textureCopy.origin = {};
- textureCopy.aspect = Aspect::Color;
-
- TextureDataLayout dataLayout;
- dataLayout.offset = 0;
- dataLayout.bytesPerRow = bytesPerRow;
- dataLayout.rowsPerImage = largestMipSize.height;
-
- Extent3D mipSize = GetMipLevelPhysicalSize(level);
-
+ Extent3D mipSize = GetMipLevelSingleSubresourcePhysicalSize(level);
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
@@ -535,156 +387,300 @@ namespace dawn::native::opengl {
// Skip lazy clears if already initialized.
continue;
}
+ if (gl.IsAtLeastGL(4, 4)) {
+ gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
+ static_cast<GLint>(layer), mipSize.width,
+ mipSize.height, mipSize.depthOrArrayLayers,
+ glFormat.format, glFormat.type,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataBytes0.data()
+ : kClearColorDataBytes255.data());
+ continue;
+ }
- textureCopy.origin.z = layer;
- DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
- }
- }
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- }
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
- }
- Touch();
- return {};
- }
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
- }
- }
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ gl.DrawBuffers(1, &attachment);
+
+ gl.Disable(GL_SCISSOR_TEST);
+ gl.ColorMask(true, true, true, true);
+
+ auto DoClear = [&]() {
+ switch (baseType) {
+ case wgpu::TextureComponentType::Float: {
+ gl.ClearBufferfv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataFloat0.data()
+ : kClearColorDataFloat1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ gl.ClearBufferuiv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ gl.ClearBufferiv(GL_COLOR, 0,
+ reinterpret_cast<const GLint*>(
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data()));
+ break;
+ }
- // TextureView
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ };
- TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
- mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
- texture->GetSampleCount());
+ if (GetArrayLayers() == 1) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ case wgpu::TextureDimension::e2D:
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+ GetGLTarget(), GetHandle(), level);
+ DoClear();
+ break;
+ case wgpu::TextureDimension::e3D:
+ uint32_t depth = GetMipLevelSingleSubresourceVirtualSize(level)
+ .depthOrArrayLayers;
+ for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+ GetHandle(), level, z);
+ DoClear();
+ }
+ break;
+ }
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return;
- }
+ } else {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+ level, layer);
+ DoClear();
+ }
- if (!RequiresCreatingNewTextureView(texture, descriptor)) {
- mHandle = ToBackend(texture)->GetHandle();
- } else {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- if (gl.IsAtLeastGL(4, 3)) {
- mHandle = GenTexture(gl);
- const Texture* textureGL = ToBackend(texture);
- gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), GetInternalFormat(),
- descriptor->baseMipLevel, descriptor->mipLevelCount,
- descriptor->baseArrayLayer, descriptor->arrayLayerCount);
- mOwnsHandle = true;
- } else {
- // Simulate glTextureView() with texture-to-texture copies.
- mUseCopy = true;
- mHandle = 0;
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+ }
}
}
- }
-
- TextureView::~TextureView() {
- }
+ } else {
+ ASSERT(range.aspects == Aspect::Color);
+
+ // create temp buffer with clear color to copy to the texture image
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+ ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
+
+ Extent3D largestMipSize = GetMipLevelSingleSubresourcePhysicalSize(range.baseMipLevel);
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
+
+ // Make sure that we are not rounding
+ ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+ ASSERT(largestMipSize.height % blockInfo.height == 0);
+
+ uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
+ (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ if (bufferSize64 > std::numeric_limits<size_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+ size_t bufferSize = static_cast<size_t>(bufferSize64);
+
+ dawn::native::BufferDescriptor descriptor = {};
+ descriptor.mappedAtCreation = true;
+ descriptor.usage = wgpu::BufferUsage::CopySrc;
+ descriptor.size = bufferSize;
+
+ // We don't count the lazy clear of srcBuffer because it is an internal buffer.
+ // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
+ Ref<Buffer> srcBuffer;
+ DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
+
+ // Fill the buffer with clear color
+ memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
+ srcBuffer->Unmap();
+
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.mipLevel = level;
+ textureCopy.origin = {};
+ textureCopy.aspect = Aspect::Color;
+
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = bytesPerRow;
+ dataLayout.rowsPerImage = largestMipSize.height;
+
+ Extent3D mipSize = GetMipLevelSingleSubresourcePhysicalSize(level);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- void TextureView::DestroyImpl() {
- TextureViewBase::DestroyImpl();
- if (mOwnsHandle) {
- ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ textureCopy.origin.z = layer;
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
+ }
}
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
-
- GLuint TextureView::GetHandle() const {
- ASSERT(mHandle != 0);
- return mHandle;
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
}
+ Touch();
+ return {};
+}
- GLenum TextureView::GetGLTarget() const {
- return mTarget;
+void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
}
+}
- void TextureView::BindToFramebuffer(GLenum target, GLenum attachment) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+// TextureView
- // Use the base texture where possible to minimize the amount of copying required on GLES.
- bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
- !GetTexture()->GetFormat().HasDepthOrStencil();
-
- GLuint handle, textarget, mipLevel, arrayLayer;
- if (useOwnView) {
- // Use our own texture handle and target which points to a subset of the texture's
- // subresources.
- handle = GetHandle();
- textarget = GetGLTarget();
- mipLevel = 0;
- arrayLayer = 0;
- } else {
- // Use the texture's handle and target, with the view's base mip level and base array
+TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
+ mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
+ texture->GetSampleCount());
- handle = ToBackend(GetTexture())->GetHandle();
- textarget = ToBackend(GetTexture())->GetGLTarget();
- mipLevel = GetBaseMipLevel();
- arrayLayer = GetBaseArrayLayer();
- }
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return;
+ }
- ASSERT(handle != 0);
- if (textarget == GL_TEXTURE_2D_ARRAY || textarget == GL_TEXTURE_3D) {
- gl.FramebufferTextureLayer(target, attachment, handle, mipLevel, arrayLayer);
+ if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+ mHandle = ToBackend(texture)->GetHandle();
+ } else {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ if (gl.IsAtLeastGL(4, 3)) {
+ mHandle = GenTexture(gl);
+ const Texture* textureGL = ToBackend(texture);
+ gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), GetInternalFormat(),
+ descriptor->baseMipLevel, descriptor->mipLevelCount,
+ descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+ mOwnsHandle = true;
} else {
- gl.FramebufferTexture2D(target, attachment, textarget, handle, mipLevel);
+ // Simulate glTextureView() with texture-to-texture copies.
+ mUseCopy = true;
+ mHandle = 0;
}
}
+}
- void TextureView::CopyIfNeeded() {
- if (!mUseCopy) {
- return;
- }
+TextureView::~TextureView() {}
- const Texture* texture = ToBackend(GetTexture());
- if (mGenID == texture->GetGenID()) {
- return;
- }
+void TextureView::DestroyImpl() {
+ TextureViewBase::DestroyImpl();
+ if (mOwnsHandle) {
+ ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ }
+}
+
+GLuint TextureView::GetHandle() const {
+ ASSERT(mHandle != 0);
+ return mHandle;
+}
+
+GLenum TextureView::GetGLTarget() const {
+ return mTarget;
+}
+
+void TextureView::BindToFramebuffer(GLenum target, GLenum attachment) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ // Use the base texture where possible to minimize the amount of copying required on GLES.
+ bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
+ !GetTexture()->GetFormat().HasDepthOrStencil();
+
+ GLuint handle, textarget, mipLevel, arrayLayer;
+ if (useOwnView) {
+ // Use our own texture handle and target which points to a subset of the texture's
+ // subresources.
+ handle = GetHandle();
+ textarget = GetGLTarget();
+ mipLevel = 0;
+ arrayLayer = 0;
+ } else {
+ // Use the texture's handle and target, with the view's base mip level and base array
+
+ handle = ToBackend(GetTexture())->GetHandle();
+ textarget = ToBackend(GetTexture())->GetGLTarget();
+ mipLevel = GetBaseMipLevel();
+ arrayLayer = GetBaseArrayLayer();
+ }
- Device* device = ToBackend(GetDevice());
- const OpenGLFunctions& gl = device->gl;
- uint32_t srcLevel = GetBaseMipLevel();
- uint32_t numLevels = GetLevelCount();
+ ASSERT(handle != 0);
+ if (textarget == GL_TEXTURE_2D_ARRAY || textarget == GL_TEXTURE_3D) {
+ gl.FramebufferTextureLayer(target, attachment, handle, mipLevel, arrayLayer);
+ } else {
+ gl.FramebufferTexture2D(target, attachment, textarget, handle, mipLevel);
+ }
+}
- uint32_t width = texture->GetWidth() >> srcLevel;
- uint32_t height = texture->GetHeight() >> srcLevel;
- Extent3D size{width, height, GetLayerCount()};
+void TextureView::CopyIfNeeded() {
+ if (!mUseCopy) {
+ return;
+ }
- if (mHandle == 0) {
- mHandle = GenTexture(gl);
- gl.BindTexture(mTarget, mHandle);
- AllocateTexture(gl, mTarget, texture->GetSampleCount(), numLevels, GetInternalFormat(),
- size);
- mOwnsHandle = true;
- }
+ const Texture* texture = ToBackend(GetTexture());
+ if (mGenID == texture->GetGenID()) {
+ return;
+ }
- Origin3D src{0, 0, GetBaseArrayLayer()};
- Origin3D dst{0, 0, 0};
- for (GLuint level = 0; level < numLevels; ++level) {
- CopyImageSubData(gl, GetAspects(), texture->GetHandle(), texture->GetGLTarget(),
- srcLevel + level, src, mHandle, mTarget, level, dst, size);
- }
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
+ uint32_t srcLevel = GetBaseMipLevel();
+ uint32_t numLevels = GetLevelCount();
+
+ uint32_t width = texture->GetWidth() >> srcLevel;
+ uint32_t height = texture->GetHeight() >> srcLevel;
+ Extent3D size{width, height, GetLayerCount()};
- mGenID = texture->GetGenID();
+ if (mHandle == 0) {
+ mHandle = GenTexture(gl);
+ gl.BindTexture(mTarget, mHandle);
+ AllocateTexture(gl, mTarget, texture->GetSampleCount(), numLevels, GetInternalFormat(),
+ size);
+ mOwnsHandle = true;
}
- GLenum TextureView::GetInternalFormat() const {
- // Depth/stencil don't support reinterpretation, and the aspect is specified at
- // bind time. In that case, we use the base texture format.
- const Format& format =
- GetFormat().HasDepthOrStencil() ? GetTexture()->GetFormat() : GetFormat();
- const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(format);
- return glFormat.internalFormat;
+ Origin3D src{0, 0, GetBaseArrayLayer()};
+ Origin3D dst{0, 0, 0};
+ for (GLuint level = 0; level < numLevels; ++level) {
+ CopyImageSubData(gl, GetAspects(), texture->GetHandle(), texture->GetGLTarget(),
+ srcLevel + level, src, mHandle, mTarget, level, dst, size);
}
+ mGenID = texture->GetGenID();
+}
+
+GLenum TextureView::GetInternalFormat() const {
+ // Depth/stencil don't support reinterpretation, and the aspect is specified at
+ // bind time. In that case, we use the base texture format.
+ const Format& format =
+ GetFormat().HasDepthOrStencil() ? GetTexture()->GetFormat() : GetFormat();
+ const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(format);
+ return glFormat.internalFormat;
+}
+
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h
index 9d03201df57..c9bf63f87b8 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h
@@ -21,57 +21,54 @@
namespace dawn::native::opengl {
- class Device;
- struct GLFormat;
-
- class Texture final : public TextureBase {
- public:
- Texture(Device* device, const TextureDescriptor* descriptor);
- Texture(Device* device,
- const TextureDescriptor* descriptor,
- GLuint handle,
- TextureState state);
-
- GLuint GetHandle() const;
- GLenum GetGLTarget() const;
- const GLFormat& GetGLFormat() const;
- uint32_t GetGenID() const;
- void Touch();
-
- void EnsureSubresourceContentInitialized(const SubresourceRange& range);
-
- private:
- ~Texture() override;
-
- void DestroyImpl() override;
- MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
-
- GLuint mHandle;
- GLenum mTarget;
- uint32_t mGenID = 0;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- GLuint GetHandle() const;
- GLenum GetGLTarget() const;
- void BindToFramebuffer(GLenum target, GLenum attachment);
- void CopyIfNeeded();
-
- private:
- ~TextureView() override;
- void DestroyImpl() override;
- GLenum GetInternalFormat() const;
-
- // TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
- GLuint mHandle;
- GLenum mTarget;
- bool mOwnsHandle;
- bool mUseCopy = false;
- uint32_t mGenID = 0;
- };
+class Device;
+struct GLFormat;
+
+class Texture final : public TextureBase {
+ public:
+ Texture(Device* device, const TextureDescriptor* descriptor);
+ Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
+
+ GLuint GetHandle() const;
+ GLenum GetGLTarget() const;
+ const GLFormat& GetGLFormat() const;
+ uint32_t GetGenID() const;
+ void Touch();
+
+ void EnsureSubresourceContentInitialized(const SubresourceRange& range);
+
+ private:
+ ~Texture() override;
+
+ void DestroyImpl() override;
+ MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
+
+ GLuint mHandle;
+ GLenum mTarget;
+ uint32_t mGenID = 0;
+};
+
+class TextureView final : public TextureViewBase {
+ public:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ GLuint GetHandle() const;
+ GLenum GetGLTarget() const;
+ void BindToFramebuffer(GLenum target, GLenum attachment);
+ void CopyIfNeeded();
+
+ private:
+ ~TextureView() override;
+ void DestroyImpl() override;
+ GLenum GetInternalFormat() const;
+
+ // TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
+ GLuint mHandle;
+ GLenum mTarget;
+ bool mOwnsHandle;
+ bool mUseCopy = false;
+ uint32_t mGenID = 0;
+};
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp
index 746f93bb3fd..e35b9a14aa9 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp
@@ -20,134 +20,134 @@
namespace dawn::native::opengl {
- GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
- switch (compareFunction) {
- case wgpu::CompareFunction::Never:
- return GL_NEVER;
- case wgpu::CompareFunction::Less:
- return GL_LESS;
- case wgpu::CompareFunction::LessEqual:
- return GL_LEQUAL;
- case wgpu::CompareFunction::Greater:
- return GL_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return GL_GEQUAL;
- case wgpu::CompareFunction::NotEqual:
- return GL_NOTEQUAL;
- case wgpu::CompareFunction::Equal:
- return GL_EQUAL;
- case wgpu::CompareFunction::Always:
- return GL_ALWAYS;
+GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
+ switch (compareFunction) {
+ case wgpu::CompareFunction::Never:
+ return GL_NEVER;
+ case wgpu::CompareFunction::Less:
+ return GL_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return GL_LEQUAL;
+ case wgpu::CompareFunction::Greater:
+ return GL_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return GL_GEQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return GL_NOTEQUAL;
+ case wgpu::CompareFunction::Equal:
+ return GL_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return GL_ALWAYS;
- case wgpu::CompareFunction::Undefined:
- break;
- }
- UNREACHABLE();
+ case wgpu::CompareFunction::Undefined:
+ break;
}
+ UNREACHABLE();
+}
- GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
- switch (depthStencilFormat) {
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Stencil8:
- return 0xFF;
+GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
+ switch (depthStencilFormat) {
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ case wgpu::TextureFormat::Stencil8:
+ return 0xFF;
- default:
- UNREACHABLE();
- }
+ default:
+ UNREACHABLE();
}
+}
- void CopyImageSubData(const OpenGLFunctions& gl,
- Aspect srcAspects,
- GLuint srcHandle,
- GLenum srcTarget,
- GLint srcLevel,
- const Origin3D& src,
- GLuint dstHandle,
- GLenum dstTarget,
- GLint dstLevel,
- const Origin3D& dst,
- const Extent3D& size) {
- if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
- gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
- dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
- size.depthOrArrayLayers);
- return;
- }
+void CopyImageSubData(const OpenGLFunctions& gl,
+ Aspect srcAspects,
+ GLuint srcHandle,
+ GLenum srcTarget,
+ GLint srcLevel,
+ const Origin3D& src,
+ GLuint dstHandle,
+ GLenum dstTarget,
+ GLint dstLevel,
+ const Origin3D& dst,
+ const Extent3D& size) {
+ if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
+ gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
+ dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
+ size.depthOrArrayLayers);
+ return;
+ }
- GLint prevReadFBO = 0, prevDrawFBO = 0;
- gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
- gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
+ GLint prevReadFBO = 0, prevDrawFBO = 0;
+ gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
+ gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
- // Generate temporary framebuffers for the blits.
- GLuint readFBO = 0, drawFBO = 0;
- gl.GenFramebuffers(1, &readFBO);
- gl.GenFramebuffers(1, &drawFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
+ // Generate temporary framebuffers for the blits.
+ GLuint readFBO = 0, drawFBO = 0;
+ gl.GenFramebuffers(1, &readFBO);
+ gl.GenFramebuffers(1, &drawFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
- // Reset state that may affect glBlitFramebuffer().
- gl.Disable(GL_SCISSOR_TEST);
- GLenum blitMask = 0;
- if (srcAspects & Aspect::Color) {
- blitMask |= GL_COLOR_BUFFER_BIT;
- }
- if (srcAspects & Aspect::Depth) {
- blitMask |= GL_DEPTH_BUFFER_BIT;
- }
- if (srcAspects & Aspect::Stencil) {
- blitMask |= GL_STENCIL_BUFFER_BIT;
- }
+ // Reset state that may affect glBlitFramebuffer().
+ gl.Disable(GL_SCISSOR_TEST);
+ GLenum blitMask = 0;
+ if (srcAspects & Aspect::Color) {
+ blitMask |= GL_COLOR_BUFFER_BIT;
+ }
+ if (srcAspects & Aspect::Depth) {
+ blitMask |= GL_DEPTH_BUFFER_BIT;
+ }
+ if (srcAspects & Aspect::Stencil) {
+ blitMask |= GL_STENCIL_BUFFER_BIT;
+ }
- // Iterate over all layers, doing a single blit for each.
- for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
- // Set attachments for all aspects.
- for (Aspect aspect : IterateEnumMask(srcAspects)) {
- GLenum glAttachment;
- switch (aspect) {
- case Aspect::Color:
- glAttachment = GL_COLOR_ATTACHMENT0;
- break;
- case Aspect::Depth:
- glAttachment = GL_DEPTH_ATTACHMENT;
- break;
- case Aspect::Stencil:
- glAttachment = GL_STENCIL_ATTACHMENT;
- break;
- case Aspect::CombinedDepthStencil:
- case Aspect::None:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- }
- if (srcTarget == GL_TEXTURE_2D) {
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
- srcLevel);
- } else {
- gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle,
- srcLevel, src.z + layer);
- }
- if (dstTarget == GL_TEXTURE_2D) {
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
- dstLevel);
- } else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
- GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
- dstLevel);
- } else {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle,
- dstLevel, dst.z + layer);
- }
+ // Iterate over all layers, doing a single blit for each.
+ for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
+ // Set attachments for all aspects.
+ for (Aspect aspect : IterateEnumMask(srcAspects)) {
+ GLenum glAttachment;
+ switch (aspect) {
+ case Aspect::Color:
+ glAttachment = GL_COLOR_ATTACHMENT0;
+ break;
+ case Aspect::Depth:
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ break;
+ case Aspect::Stencil:
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ break;
+ case Aspect::CombinedDepthStencil:
+ case Aspect::None:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
+ }
+ if (srcTarget == GL_TEXTURE_2D) {
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
+ srcLevel);
+ } else {
+ gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle, srcLevel,
+ src.z + layer);
+ }
+ if (dstTarget == GL_TEXTURE_2D) {
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
+ dstLevel);
+ } else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
+ GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
+ dstLevel);
+ } else {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle, dstLevel,
+ dst.z + layer);
}
- gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
- dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
}
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &readFBO);
- gl.DeleteFramebuffers(1, &drawFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
+ gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
+ dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
}
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &readFBO);
+ gl.DeleteFramebuffers(1, &drawFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
+}
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h
index 78e12dbbd00..97d4f2a6de0 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h
@@ -20,21 +20,21 @@
#include "dawn/native/opengl/opengl_platform.h"
namespace dawn::native::opengl {
- struct OpenGLFunctions;
+struct OpenGLFunctions;
- GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
- GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
- void CopyImageSubData(const OpenGLFunctions& gl,
- Aspect srcAspects,
- GLuint srcHandle,
- GLenum srcTarget,
- GLint srcLevel,
- const Origin3D& src,
- GLuint dstHandle,
- GLenum dstTarget,
- GLint dstLevel,
- const Origin3D& dst,
- const Extent3D& size);
+GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
+GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
+void CopyImageSubData(const OpenGLFunctions& gl,
+ Aspect srcAspects,
+ GLuint srcHandle,
+ GLenum srcTarget,
+ GLint srcLevel,
+ const Origin3D& src,
+ GLuint dstHandle,
+ GLenum dstTarget,
+ GLint dstLevel,
+ const Origin3D& dst,
+ const Extent3D& size);
} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json b/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json
index 8e006337b9f..8c110d012bd 100644
--- a/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json
@@ -18,6 +18,8 @@
"supported_extensions": [
"GL_EXT_texture_compression_s3tc",
"GL_EXT_texture_compression_s3tc_srgb",
- "GL_OES_EGL_image"
+ "GL_OES_EGL_image",
+ "GL_EXT_texture_format_BGRA8888",
+ "GL_APPLE_texture_format_BGRA8888"
]
}
diff --git a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp
index a7ab910e514..722476e8cc3 100644
--- a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp
@@ -14,6 +14,12 @@
#include "dawn/native/utils/WGPUHelpers.h"
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <mutex>
+#include <sstream>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Constants.h"
#include "dawn/native/BindGroup.h"
@@ -25,168 +31,156 @@
#include "dawn/native/Sampler.h"
#include "dawn/native/ShaderModule.h"
-#include <cstring>
-#include <iomanip>
-#include <limits>
-#include <mutex>
-#include <sstream>
-
namespace dawn::native::utils {
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device,
- const char* source) {
- ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = source;
- ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &wgslDesc;
- return device->CreateShaderModule(&descriptor);
- }
-
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- const void* data,
- uint64_t size) {
- BufferDescriptor descriptor;
- descriptor.size = size;
- descriptor.usage = usage;
- descriptor.mappedAtCreation = true;
- Ref<BufferBase> buffer;
- DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
- memcpy(buffer->GetMappedRange(0, size), data, size);
- buffer->Unmap();
- return buffer;
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& bindGroupLayout) {
- PipelineLayoutDescriptor descriptor;
- descriptor.bindGroupLayoutCount = 1;
- BindGroupLayoutBase* bgl = bindGroupLayout.Get();
- descriptor.bindGroupLayouts = &bgl;
- return device->CreatePipelineLayout(&descriptor);
- }
-
- ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
- DeviceBase* device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
- bool allowInternalBinding) {
- std::vector<BindGroupLayoutEntry> entries;
- for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
- entries.push_back(entry);
- }
-
- BindGroupLayoutDescriptor descriptor;
- descriptor.entryCount = static_cast<uint32_t>(entries.size());
- descriptor.entries = entries.data();
- return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset,
- uint64_t bufferMinBindingSize) {
- binding = entryBinding;
- visibility = entryVisibility;
- buffer.type = bufferType;
- buffer.hasDynamicOffset = bufferHasDynamicOffset;
- buffer.minBindingSize = bufferMinBindingSize;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType) {
- binding = entryBinding;
- visibility = entryVisibility;
- sampler.type = samplerType;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension textureViewDimension,
- bool textureMultisampled) {
- binding = entryBinding;
- visibility = entryVisibility;
- texture.sampleType = textureSampleType;
- texture.viewDimension = textureViewDimension;
- texture.multisampled = textureMultisampled;
+ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source) {
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = source;
+ ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &wgslDesc;
+ return device->CreateShaderModule(&descriptor);
+}
+
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ const void* data,
+ uint64_t size) {
+ BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage;
+ descriptor.mappedAtCreation = true;
+ Ref<BufferBase> buffer;
+ DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
+ memcpy(buffer->GetMappedRange(0, size), data, size);
+ buffer->Unmap();
+ return buffer;
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& bindGroupLayout) {
+ PipelineLayoutDescriptor descriptor;
+ descriptor.bindGroupLayoutCount = 1;
+ BindGroupLayoutBase* bgl = bindGroupLayout.Get();
+ descriptor.bindGroupLayouts = &bgl;
+ return device->CreatePipelineLayout(&descriptor);
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+ DeviceBase* device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+ bool allowInternalBinding) {
+ std::vector<BindGroupLayoutEntry> entries;
+ for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+ entries.push_back(entry);
}
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension textureViewDimension) {
- binding = entryBinding;
- visibility = entryVisibility;
- storageTexture.access = storageTextureAccess;
- storageTexture.format = format;
- storageTexture.viewDimension = textureViewDimension;
+ BindGroupLayoutDescriptor descriptor;
+ descriptor.entryCount = static_cast<uint32_t>(entries.size());
+ descriptor.entries = entries.data();
+ return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset,
+ uint64_t bufferMinBindingSize) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ buffer.type = bufferType;
+ buffer.hasDynamicOffset = bufferHasDynamicOffset;
+ buffer.minBindingSize = bufferMinBindingSize;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ sampler.type = samplerType;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension textureViewDimension,
+ bool textureMultisampled) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ texture.sampleType = textureSampleType;
+ texture.viewDimension = textureViewDimension;
+ texture.multisampled = textureMultisampled;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension textureViewDimension) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ storageTexture.access = storageTextureAccess;
+ storageTexture.format = format;
+ storageTexture.viewDimension = textureViewDimension;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ const BindGroupLayoutEntry& entry)
+ : BindGroupLayoutEntry(entry) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const Ref<SamplerBase>& sampler)
+ : binding(binding), sampler(sampler) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const Ref<TextureViewBase>& textureView)
+ : binding(binding), textureView(textureView) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size)
+ : binding(binding), buffer(buffer), offset(offset), size(size) {}
+
+BindingInitializationHelper::~BindingInitializationHelper() = default;
+
+BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+ BindGroupEntry result;
+
+ result.binding = binding;
+ result.sampler = sampler.Get();
+ result.textureView = textureView.Get();
+ result.buffer = buffer.Get();
+ result.offset = offset;
+ result.size = size;
+
+ return result;
+}
+
+ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+ std::vector<BindGroupEntry> entries;
+ for (const BindingInitializationHelper& helper : entriesInitializer) {
+ entries.push_back(helper.GetAsBinding());
}
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- const BindGroupLayoutEntry& entry)
- : BindGroupLayoutEntry(entry) {
- }
+ BindGroupDescriptor descriptor;
+ descriptor.layout = layout.Get();
+ descriptor.entryCount = entries.size();
+ descriptor.entries = entries.data();
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const Ref<SamplerBase>& sampler)
- : binding(binding), sampler(sampler) {
- }
+ return device->CreateBindGroup(&descriptor);
+}
- BindingInitializationHelper::BindingInitializationHelper(
- uint32_t binding,
- const Ref<TextureViewBase>& textureView)
- : binding(binding), textureView(textureView) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size)
- : binding(binding), buffer(buffer), offset(offset), size(size) {
- }
-
- BindingInitializationHelper::~BindingInitializationHelper() = default;
-
- BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
- BindGroupEntry result;
-
- result.binding = binding;
- result.sampler = sampler.Get();
- result.textureView = textureView.Get();
- result.buffer = buffer.Get();
- result.offset = offset;
- result.size = size;
-
- return result;
- }
-
- ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer) {
- std::vector<BindGroupEntry> entries;
- for (const BindingInitializationHelper& helper : entriesInitializer) {
- entries.push_back(helper.GetAsBinding());
- }
-
- BindGroupDescriptor descriptor;
- descriptor.layout = layout.Get();
- descriptor.entryCount = entries.size();
- descriptor.entries = entries.data();
-
- return device->CreateBindGroup(&descriptor);
- }
-
- const char* GetLabelForTrace(const char* label) {
- return (label == nullptr || strlen(label) == 0) ? "None" : label;
- }
+const char* GetLabelForTrace(const char* label) {
+ return (label == nullptr || strlen(label) == 0) ? "None" : label;
+}
} // namespace dawn::native::utils
diff --git a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h
index c46c3be026b..9eab9906689 100644
--- a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h
@@ -15,108 +15,107 @@
#ifndef SRC_DAWN_NATIVE_UTILS_WGPUHELPERS_H_
#define SRC_DAWN_NATIVE_UTILS_WGPUHELPERS_H_
-#include <dawn/native/dawn_platform.h>
-
#include <array>
#include <initializer_list>
#include <vector>
#include "dawn/common/RefCounted.h"
#include "dawn/native/Error.h"
+#include "dawn/native/dawn_platform.h"
namespace dawn::native::utils {
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
-
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- const void* data,
- uint64_t size);
-
- template <typename T>
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- std::initializer_list<T> data) {
- return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& bindGroupLayout);
-
- // Helpers to make creating bind group layouts look nicer:
- //
- // utils::MakeBindGroupLayout(device, {
- // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
- // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
- // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
- // });
-
- struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset = false,
- uint64_t bufferMinBindingSize = 0);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
- bool textureMultisampled = false);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
-
- explicit BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
- };
-
- ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
- DeviceBase* device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
- bool allowInternalBinding = false);
-
- // Helpers to make creating bind groups look nicer:
- //
- // utils::MakeBindGroup(device, layout, {
- // {0, mySampler},
- // {1, myBuffer, offset, size},
- // {3, myTextureView}
- // });
-
- // Structure with one constructor per-type of bindings, so that the initializer_list accepts
- // bindings with the right type and no extra information.
- struct BindingInitializationHelper {
- BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
- BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
- BindingInitializationHelper(uint32_t binding,
- const Ref<BufferBase>& buffer,
- uint64_t offset = 0,
- uint64_t size = wgpu::kWholeSize);
- ~BindingInitializationHelper();
-
- BindGroupEntry GetAsBinding() const;
-
- uint32_t binding;
- Ref<SamplerBase> sampler;
- Ref<TextureViewBase> textureView;
- Ref<BufferBase> buffer;
- uint64_t offset = 0;
- uint64_t size = 0;
- };
-
- ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer);
-
- const char* GetLabelForTrace(const char* label);
+ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
+
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ const void* data,
+ uint64_t size);
+
+template <typename T>
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ std::initializer_list<T> data) {
+ return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& bindGroupLayout);
+
+// Helpers to make creating bind group layouts look nicer:
+//
+// utils::MakeBindGroupLayout(device, {
+// {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+// {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+// {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+// });
+
+struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset = false,
+ uint64_t bufferMinBindingSize = 0);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+ bool textureMultisampled = false);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+
+ explicit BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
+};
+
+ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+ DeviceBase* device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+ bool allowInternalBinding = false);
+
+// Helpers to make creating bind groups look nicer:
+//
+// utils::MakeBindGroup(device, layout, {
+// {0, mySampler},
+// {1, myBuffer, offset, size},
+// {3, myTextureView}
+// });
+
+// Structure with one constructor per-type of bindings, so that the initializer_list accepts
+// bindings with the right type and no extra information.
+struct BindingInitializationHelper {
+ BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
+ BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
+ BindingInitializationHelper(uint32_t binding,
+ const Ref<BufferBase>& buffer,
+ uint64_t offset = 0,
+ uint64_t size = wgpu::kWholeSize);
+ ~BindingInitializationHelper();
+
+ BindGroupEntry GetAsBinding() const;
+
+ uint32_t binding;
+ Ref<SamplerBase> sampler;
+ Ref<TextureViewBase> textureView;
+ Ref<BufferBase> buffer;
+ uint64_t offset = 0;
+ uint64_t size = 0;
+};
+
+ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer);
+
+const char* GetLabelForTrace(const char* label);
} // namespace dawn::native::utils
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp
index 5862bf899bf..fbdb709d1ad 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/vulkan/AdapterVk.h"
+#include <algorithm>
+#include <string>
+
#include "dawn/native/Limits.h"
#include "dawn/native/vulkan/BackendVk.h"
#include "dawn/native/vulkan/DeviceVk.h"
@@ -22,157 +25,170 @@
namespace dawn::native::vulkan {
- Adapter::Adapter(InstanceBase* instance,
- VulkanInstance* vulkanInstance,
- VkPhysicalDevice physicalDevice)
- : AdapterBase(instance, wgpu::BackendType::Vulkan),
- mPhysicalDevice(physicalDevice),
- mVulkanInstance(vulkanInstance) {
- }
+Adapter::Adapter(InstanceBase* instance,
+ VulkanInstance* vulkanInstance,
+ VkPhysicalDevice physicalDevice)
+ : AdapterBase(instance, wgpu::BackendType::Vulkan),
+ mPhysicalDevice(physicalDevice),
+ mVulkanInstance(vulkanInstance) {}
- const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
- return mDeviceInfo;
- }
+Adapter::~Adapter() = default;
- VkPhysicalDevice Adapter::GetPhysicalDevice() const {
- return mPhysicalDevice;
- }
+const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
+ return mDeviceInfo;
+}
- VulkanInstance* Adapter::GetVulkanInstance() const {
- return mVulkanInstance.Get();
- }
+VkPhysicalDevice Adapter::GetPhysicalDevice() const {
+ return mPhysicalDevice;
+}
- bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
- ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
- format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_S8_UINT);
+VulkanInstance* Adapter::GetVulkanInstance() const {
+ return mVulkanInstance.Get();
+}
- VkFormatProperties properties;
- mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
- &properties);
- return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
- }
+bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
+ ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
+ format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_S8_UINT);
- MaybeError Adapter::InitializeImpl() {
- DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+ VkFormatProperties properties;
+ mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
+ &properties);
+ return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+}
- if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
- mDriverDescription = mDeviceInfo.driverProperties.driverName;
- if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
- mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
- }
- } else {
- mDriverDescription =
- "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
- }
+MaybeError Adapter::InitializeImpl() {
+ DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
- mDeviceId = mDeviceInfo.properties.deviceID;
- mVendorId = mDeviceInfo.properties.vendorID;
- mName = mDeviceInfo.properties.deviceName;
-
- switch (mDeviceInfo.properties.deviceType) {
- case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
- mAdapterType = wgpu::AdapterType::IntegratedGPU;
- break;
- case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
- mAdapterType = wgpu::AdapterType::DiscreteGPU;
- break;
- case VK_PHYSICAL_DEVICE_TYPE_CPU:
- mAdapterType = wgpu::AdapterType::CPU;
- break;
- default:
- mAdapterType = wgpu::AdapterType::Unknown;
- break;
+ if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
+ mDriverDescription = mDeviceInfo.driverProperties.driverName;
+ if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
+ mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
}
+ } else {
+ mDriverDescription =
+ "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
+ }
- return {};
+ mDeviceId = mDeviceInfo.properties.deviceID;
+ mVendorId = mDeviceInfo.properties.vendorID;
+ mName = mDeviceInfo.properties.deviceName;
+
+ switch (mDeviceInfo.properties.deviceType) {
+ case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_CPU:
+ mAdapterType = wgpu::AdapterType::CPU;
+ break;
+ default:
+ mAdapterType = wgpu::AdapterType::Unknown;
+ break;
}
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- // Needed for viewport Y-flip.
- if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
- return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
- }
+ return {};
+}
- // Needed for security
- if (!mDeviceInfo.features.robustBufferAccess) {
- return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
- }
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ // Needed for viewport Y-flip.
+ if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
+ return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
+ }
- if (!mDeviceInfo.features.textureCompressionBC &&
- !(mDeviceInfo.features.textureCompressionETC2 &&
- mDeviceInfo.features.textureCompressionASTC_LDR)) {
- return DAWN_INTERNAL_ERROR(
- "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
- "textureCompressionASTC required.");
- }
+ // Needed for security
+ if (!mDeviceInfo.features.robustBufferAccess) {
+ return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
+ }
- // Needed for the respective WebGPU features.
- if (!mDeviceInfo.features.depthBiasClamp) {
- return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
- }
- if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
- return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
- }
- if (!mDeviceInfo.features.fullDrawIndexUint32) {
- return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
- }
- if (!mDeviceInfo.features.imageCubeArray) {
- return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
- }
- if (!mDeviceInfo.features.independentBlend) {
- return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
- }
- if (!mDeviceInfo.features.sampleRateShading) {
- return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
- }
+ if (!mDeviceInfo.features.textureCompressionBC &&
+ !(mDeviceInfo.features.textureCompressionETC2 &&
+ mDeviceInfo.features.textureCompressionASTC_LDR)) {
+ return DAWN_INTERNAL_ERROR(
+ "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
+ "textureCompressionASTC required.");
+ }
- // Initialize supported extensions
- if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- }
+ // Needed for the respective WebGPU features.
+ if (!mDeviceInfo.features.depthBiasClamp) {
+ return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
+ }
+ if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
+ return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
+ }
+ if (!mDeviceInfo.features.fullDrawIndexUint32) {
+ return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
+ }
+ if (!mDeviceInfo.features.imageCubeArray) {
+ return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
+ }
+ if (!mDeviceInfo.features.independentBlend) {
+ return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
+ }
+ if (!mDeviceInfo.features.sampleRateShading) {
+ return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
+ }
- if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
- }
+ // Initialize supported extensions
+ if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ }
- if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
- }
+ if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+ }
- if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
- }
+ if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+ }
- if (mDeviceInfo.features.depthClamp == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::DepthClamping);
- }
+ if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
- if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
+ if (mDeviceInfo.features.depthClamp == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+ }
- if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
- mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
- }
+ if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
- if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
- mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
- }
+ if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ }
+
+ if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ }
+
+ if (mDeviceInfo.features.drawIndirectFirstInstance == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::IndirectFirstInstance);
+ }
+
+ if (mDeviceInfo.HasExt(DeviceExt::ShaderIntegerDotProduct) &&
+ mDeviceInfo.shaderIntegerDotProductProperties
+ .integerDotProduct4x8BitPackedSignedAccelerated == VK_TRUE &&
+ mDeviceInfo.shaderIntegerDotProductProperties
+ .integerDotProduct4x8BitPackedUnsignedAccelerated == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::ChromiumExperimentalDp4a);
+ }
#if defined(DAWN_USE_SYNC_FDS)
- // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
- // features.
- mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+ // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
+ // features.
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
#endif
- return {};
- }
+ return {};
+}
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- GetDefaultLimits(&limits->v1);
- CombinedLimits baseLimits = *limits;
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ GetDefaultLimits(&limits->v1);
+ CombinedLimits baseLimits = *limits;
- const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
+ const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
#define CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, compareOp, msgSegment) \
do { \
@@ -191,163 +207,156 @@ namespace dawn::native::vulkan {
#define CHECK_AND_SET_V1_MIN_LIMIT(vulkanName, webgpuName) \
CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, >, "most")
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
- limits->v1.maxTextureDimension2D = std::min({
- static_cast<uint32_t>(vkLimits.maxImageDimension2D),
- static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
- static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
- static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
- static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
- static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
- static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
- });
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
- CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
- CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
- maxDynamicUniformBuffersPerPipelineLayout);
- CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
- maxDynamicStorageBuffersPerPipelineLayout);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
- maxSampledTexturesPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
- maxStorageBuffersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
- maxStorageTexturesPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
- maxUniformBuffersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
- CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
-
- CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment,
- minUniformBufferOffsetAlignment);
- CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment,
- minStorageBufferOffsetAlignment);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
- CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
-
- if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
- vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
- return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
- }
- limits->v1.maxVertexBufferArrayStride = std::min(
- vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
+ limits->v1.maxTextureDimension2D = std::min({
+ static_cast<uint32_t>(vkLimits.maxImageDimension2D),
+ static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
+ static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
+ static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
+ static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
+ static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
+ static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
+ });
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
+ maxDynamicUniformBuffersPerPipelineLayout);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
+ maxDynamicStorageBuffersPerPipelineLayout);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
+ maxSampledTexturesPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
+ maxStorageBuffersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
+ maxStorageTexturesPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
+ maxUniformBuffersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
+
+ CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment, minUniformBufferOffsetAlignment);
+ CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment, minStorageBufferOffsetAlignment);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
+
+ if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
+ vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
+ }
+ limits->v1.maxVertexBufferArrayStride =
+ std::min(vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
- if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
- vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for maxInterStageShaderComponents");
- }
- limits->v1.maxInterStageShaderComponents =
- std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations,
- maxComputeInvocationsPerWorkgroup);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
- limits->v1.maxComputeWorkgroupsPerDimension = std::min({
- vkLimits.maxComputeWorkGroupCount[0],
- vkLimits.maxComputeWorkGroupCount[1],
- vkLimits.maxComputeWorkGroupCount[2],
- });
-
- if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
- return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
- }
- if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
- vkLimits.framebufferColorSampleCounts)) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for framebufferColorSampleCounts");
- }
- if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
- vkLimits.framebufferDepthSampleCounts)) {
+ if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
+ vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxInterStageShaderComponents");
+ }
+ limits->v1.maxInterStageShaderComponents =
+ std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations, maxComputeInvocationsPerWorkgroup);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
+ limits->v1.maxComputeWorkgroupsPerDimension = std::min({
+ vkLimits.maxComputeWorkGroupCount[0],
+ vkLimits.maxComputeWorkGroupCount[1],
+ vkLimits.maxComputeWorkGroupCount[2],
+ });
+
+ if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
+ }
+ if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+ vkLimits.framebufferColorSampleCounts)) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for framebufferColorSampleCounts");
+ }
+ if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+ vkLimits.framebufferDepthSampleCounts)) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for framebufferDepthSampleCounts");
+ }
+
+ // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
+ // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
+ // storage buffers. Mesa llvmpipe driver also puts 8 here.
+ uint32_t vendorId = mDeviceInfo.properties.vendorID;
+ if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) && !gpu_info::IsMesa(vendorId) &&
+ !gpu_info::IsNvidia(vendorId)) {
+ if (vkLimits.maxFragmentCombinedOutputResources <
+ kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
+ baseLimits.v1.maxStorageBuffersPerShaderStage) {
return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for framebufferDepthSampleCounts");
+ "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
}
- // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
- // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
- // storage buffers. Mesa llvmpipe driver also puts 8 here.
- uint32_t vendorId = mDeviceInfo.properties.vendorID;
- if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
- !gpu_info::IsMesa(vendorId) && !gpu_info::IsNvidia(vendorId)) {
- if (vkLimits.maxFragmentCombinedOutputResources <
- kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
- baseLimits.v1.maxStorageBuffersPerShaderStage) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
- }
-
- uint32_t maxFragmentCombinedOutputResources =
- kMaxColorAttachments + limits->v1.maxStorageTexturesPerShaderStage +
- limits->v1.maxStorageBuffersPerShaderStage;
-
- if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
- // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
- // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
- // to fit within the Vulkan limit.
- uint32_t countOverLimit = maxFragmentCombinedOutputResources -
- vkLimits.maxFragmentCombinedOutputResources;
-
- uint32_t maxStorageTexturesOverBase =
- limits->v1.maxStorageTexturesPerShaderStage -
- baseLimits.v1.maxStorageTexturesPerShaderStage;
- uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
- baseLimits.v1.maxStorageBuffersPerShaderStage;
-
- // Reduce the number of resources by half the overage count, but clamp to
- // to ensure we don't go below the base limits.
- uint32_t numFewerStorageTextures =
- std::min(countOverLimit / 2, maxStorageTexturesOverBase);
- uint32_t numFewerStorageBuffers =
- std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
-
- if (numFewerStorageTextures == maxStorageTexturesOverBase) {
- // If |numFewerStorageTextures| was clamped, subtract the remaining
- // from the storage buffers.
- numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
- ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
- } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
- // If |numFewerStorageBuffers| was clamped, subtract the remaining
- // from the storage textures.
- numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
- ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
- }
- limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
- limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
+ uint32_t maxFragmentCombinedOutputResources = kMaxColorAttachments +
+ limits->v1.maxStorageTexturesPerShaderStage +
+ limits->v1.maxStorageBuffersPerShaderStage;
+
+ if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
+ // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
+ // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
+ // to fit within the Vulkan limit.
+ uint32_t countOverLimit =
+ maxFragmentCombinedOutputResources - vkLimits.maxFragmentCombinedOutputResources;
+
+ uint32_t maxStorageTexturesOverBase = limits->v1.maxStorageTexturesPerShaderStage -
+ baseLimits.v1.maxStorageTexturesPerShaderStage;
+ uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
+ baseLimits.v1.maxStorageBuffersPerShaderStage;
+
+ // Reduce the number of resources by half the overage count, but clamp to
+ // to ensure we don't go below the base limits.
+ uint32_t numFewerStorageTextures =
+ std::min(countOverLimit / 2, maxStorageTexturesOverBase);
+ uint32_t numFewerStorageBuffers =
+ std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
+
+ if (numFewerStorageTextures == maxStorageTexturesOverBase) {
+ // If |numFewerStorageTextures| was clamped, subtract the remaining
+ // from the storage buffers.
+ numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
+ ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
+ } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
+ // If |numFewerStorageBuffers| was clamped, subtract the remaining
+ // from the storage textures.
+ numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
+ ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
}
+ limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
+ limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
}
-
- return {};
}
- bool Adapter::SupportsExternalImages() const {
- // Via dawn::native::vulkan::WrapVulkanImage
- return external_memory::Service::CheckSupport(mDeviceInfo) &&
- external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
- mVulkanInstance->GetFunctions());
- }
+ return {};
+}
- ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
- }
+bool Adapter::SupportsExternalImages() const {
+ // Via dawn::native::vulkan::WrapVulkanImage
+ return external_memory::Service::CheckSupport(mDeviceInfo) &&
+ external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
+ mVulkanInstance->GetFunctions());
+}
+
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h
index 7616cdaefce..9cb5234a650 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h
@@ -23,36 +23,35 @@
namespace dawn::native::vulkan {
- class VulkanInstance;
+class VulkanInstance;
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance,
- VulkanInstance* vulkanInstance,
- VkPhysicalDevice physicalDevice);
- ~Adapter() override = default;
+class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance,
+ VulkanInstance* vulkanInstance,
+ VkPhysicalDevice physicalDevice);
+ ~Adapter() override;
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
- const VulkanDeviceInfo& GetDeviceInfo() const;
- VkPhysicalDevice GetPhysicalDevice() const;
- VulkanInstance* GetVulkanInstance() const;
+ const VulkanDeviceInfo& GetDeviceInfo() const;
+ VkPhysicalDevice GetPhysicalDevice() const;
+ VulkanInstance* GetVulkanInstance() const;
- bool IsDepthStencilFormatSupported(VkFormat format);
+ bool IsDepthStencilFormatSupported(VkFormat format);
- private:
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+ private:
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
- ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
- const DeviceDescriptor* descriptor) override;
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
- VkPhysicalDevice mPhysicalDevice;
- Ref<VulkanInstance> mVulkanInstance;
- VulkanDeviceInfo mDeviceInfo = {};
- };
+ VkPhysicalDevice mPhysicalDevice;
+ Ref<VulkanInstance> mVulkanInstance;
+ VulkanDeviceInfo mDeviceInfo = {};
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
index b7da2473904..fccf6001e42 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
@@ -14,42 +14,47 @@
#include "dawn/native/vulkan/BackendVk.h"
+#include <algorithm>
+#include <string>
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/Log.h"
#include "dawn/common/SystemUtils.h"
#include "dawn/native/Instance.h"
#include "dawn/native/VulkanBackend.h"
#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
// TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
#if defined(DAWN_ENABLE_SWIFTSHADER)
-# if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
+#if DAWN_PLATFORM_IS(LINUX) || DAWN_PLATFORM_IS(FUSCHIA)
constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
-# elif defined(DAWN_PLATFORM_WINDOWS)
+#elif DAWN_PLATFORM_IS(WINDOWS)
constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
-# elif defined(DAWN_PLATFORM_MACOS)
+#elif DAWN_PLATFORM_IS(MACOS)
constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
-# else
-# error "Unimplemented Swiftshader Vulkan backend platform"
-# endif
+#else
+#error "Unimplemented Swiftshader Vulkan backend platform"
+#endif
#endif
-#if defined(DAWN_PLATFORM_LINUX)
-# if defined(DAWN_PLATFORM_ANDROID)
+#if DAWN_PLATFORM_IS(LINUX)
+#if DAWN_PLATFORM_IS(ANDROID)
constexpr char kVulkanLibName[] = "libvulkan.so";
-# else
+#else
constexpr char kVulkanLibName[] = "libvulkan.so.1";
-# endif
-#elif defined(DAWN_PLATFORM_WINDOWS)
+#endif
+#elif DAWN_PLATFORM_IS(WINDOWS)
constexpr char kVulkanLibName[] = "vulkan-1.dll";
-#elif defined(DAWN_PLATFORM_MACOS)
+#elif DAWN_PLATFORM_IS(MACOS)
constexpr char kVulkanLibName[] = "libvulkan.dylib";
-#elif defined(DAWN_PLATFORM_FUCHSIA)
+#elif DAWN_PLATFORM_IS(FUCHSIA)
constexpr char kVulkanLibName[] = "libvulkan.so";
#else
-# error "Unimplemented Vulkan backend platform"
+#error "Unimplemented Vulkan backend platform"
#endif
struct SkippedMessage {
@@ -81,364 +86,397 @@ constexpr SkippedMessage kSkippedMessages[] = {
namespace dawn::native::vulkan {
- namespace {
+namespace {
- static constexpr ICD kICDs[] = {
- ICD::None,
+static constexpr ICD kICDs[] = {
+// Other drivers should not be loaded with MSAN because they don't have MSAN instrumentation.
+// MSAN will produce false positives since it cannot detect changes to memory that the driver
+// has made.
+#if !defined(MEMORY_SANITIZER)
+ ICD::None,
+#endif
#if defined(DAWN_ENABLE_SWIFTSHADER)
- ICD::SwiftShader,
+ ICD::SwiftShader,
#endif // defined(DAWN_ENABLE_SWIFTSHADER)
- };
-
- // Suppress validation errors that are known. Returns false in that case.
- bool ShouldReportDebugMessage(const char* messageId, const char* message) {
- for (const SkippedMessage& msg : kSkippedMessages) {
- if (strstr(messageId, msg.messageId) != nullptr &&
- strstr(message, msg.messageContents) != nullptr) {
- return false;
- }
- }
- return true;
- }
+};
- VKAPI_ATTR VkBool32 VKAPI_CALL
- OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
- VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
- const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
- void* /* pUserData */) {
- if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
- dawn::WarningLog() << pCallbackData->pMessage;
- ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
- }
- return VK_FALSE;
+// Suppress validation errors that are known. Returns false in that case.
+bool ShouldReportDebugMessage(const char* messageId, const char* message) {
+ for (const SkippedMessage& msg : kSkippedMessages) {
+ if (strstr(messageId, msg.messageId) != nullptr &&
+ strstr(message, msg.messageContents) != nullptr) {
+ return false;
}
-
- // A debug callback specifically for instance creation so that we don't fire an ASSERT when
- // the instance fails creation in an expected manner (for example the system not having
- // Vulkan drivers).
- VKAPI_ATTR VkBool32 VKAPI_CALL OnInstanceCreationDebugUtilsCallback(
- VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
- VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
- const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
- void* /* pUserData */) {
+ }
+ return true;
+}
+
+VKAPI_ATTR VkBool32 VKAPI_CALL
+OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* pUserData) {
+ if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
+ if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
+ dawn::ErrorLog() << pCallbackData->pMessage;
+
+ if (pUserData != nullptr) {
+ // Look through all the object labels attached to the debug message and try to parse
+ // a device debug prefix out of one of them. If a debug prefix is found and matches
+ // a registered device, forward the message on to it.
+ for (uint32_t i = 0; i < pCallbackData->objectCount; ++i) {
+ const VkDebugUtilsObjectNameInfoEXT& object = pCallbackData->pObjects[i];
+ std::string deviceDebugPrefix =
+ GetDeviceDebugPrefixFromDebugName(object.pObjectName);
+ if (deviceDebugPrefix.empty()) {
+ continue;
+ }
+
+ VulkanInstance* instance = reinterpret_cast<VulkanInstance*>(pUserData);
+ if (instance->HandleDeviceMessage(std::move(deviceDebugPrefix),
+ pCallbackData->pMessage)) {
+ return VK_FALSE;
+ }
+ }
+ }
+ } else {
dawn::WarningLog() << pCallbackData->pMessage;
- return VK_FALSE;
- }
-
- } // anonymous namespace
-
- VulkanInstance::VulkanInstance() = default;
-
- VulkanInstance::~VulkanInstance() {
- if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
- mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
- mDebugUtilsMessenger = VK_NULL_HANDLE;
- }
-
- // VkPhysicalDevices are destroyed when the VkInstance is destroyed
- if (mInstance != VK_NULL_HANDLE) {
- mFunctions.DestroyInstance(mInstance, nullptr);
- mInstance = VK_NULL_HANDLE;
}
}
-
- const VulkanFunctions& VulkanInstance::GetFunctions() const {
- return mFunctions;
- }
-
- VkInstance VulkanInstance::GetVkInstance() const {
- return mInstance;
- }
-
- const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
- return mGlobalInfo;
- }
-
- const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
- return mPhysicalDevices;
+ return VK_FALSE;
+}
+
+// A debug callback specifically for instance creation so that we don't fire an ASSERT when
+// the instance fails creation in an expected manner (for example the system not having
+// Vulkan drivers).
+VKAPI_ATTR VkBool32 VKAPI_CALL
+OnInstanceCreationDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* /* pUserData */) {
+ dawn::WarningLog() << pCallbackData->pMessage;
+ return VK_FALSE;
+}
+
+} // anonymous namespace
+
+VulkanInstance::VulkanInstance() = default;
+
+VulkanInstance::~VulkanInstance() {
+ ASSERT(mMessageListenerDevices.empty());
+
+ if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
+ mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
+ mDebugUtilsMessenger = VK_NULL_HANDLE;
}
- // static
- ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance,
- ICD icd) {
- Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
- DAWN_TRY(vulkanInstance->Initialize(instance, icd));
- return std::move(vulkanInstance);
+ // VkPhysicalDevices are destroyed when the VkInstance is destroyed
+ if (mInstance != VK_NULL_HANDLE) {
+ mFunctions.DestroyInstance(mInstance, nullptr);
+ mInstance = VK_NULL_HANDLE;
}
-
- MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
- // These environment variables need only be set while loading procs and gathering device
- // info.
- ScopedEnvironmentVar vkICDFilenames;
- ScopedEnvironmentVar vkLayerPath;
-
- const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
-
- auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
- std::string list;
- bool first = true;
- for (const std::string& path : searchPaths) {
- if (!first) {
- list += ", ";
- }
- first = false;
- list += (path + name);
+}
+
+const VulkanFunctions& VulkanInstance::GetFunctions() const {
+ return mFunctions;
+}
+
+VkInstance VulkanInstance::GetVkInstance() const {
+ return mInstance;
+}
+
+const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
+ return mGlobalInfo;
+}
+
+const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
+ return mPhysicalDevices;
+}
+
+// static
+ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance, ICD icd) {
+ Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
+ DAWN_TRY(vulkanInstance->Initialize(instance, icd));
+ return std::move(vulkanInstance);
+}
+
+MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
+ // These environment variables need only be set while loading procs and gathering device
+ // info.
+ ScopedEnvironmentVar vkICDFilenames;
+ ScopedEnvironmentVar vkLayerPath;
+
+ const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
+
+ auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
+ std::string list;
+ bool first = true;
+ for (const std::string& path : searchPaths) {
+ if (!first) {
+ list += ", ";
}
- return list;
- };
-
- auto LoadVulkan = [&](const char* libName) -> MaybeError {
- for (const std::string& path : searchPaths) {
- std::string resolvedPath = path + libName;
- if (mVulkanLib.Open(resolvedPath)) {
- return {};
- }
- }
- return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
- CommaSeparatedResolvedSearchPaths(libName));
- };
-
- switch (icd) {
- case ICD::None: {
- DAWN_TRY(LoadVulkan(kVulkanLibName));
- // Succesfully loaded driver; break.
- break;
+ first = false;
+ list += (path + name);
+ }
+ return list;
+ };
+
+ auto LoadVulkan = [&](const char* libName) -> MaybeError {
+ for (const std::string& path : searchPaths) {
+ std::string resolvedPath = path + libName;
+ if (mVulkanLib.Open(resolvedPath)) {
+ return {};
}
- case ICD::SwiftShader: {
+ }
+ return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
+ CommaSeparatedResolvedSearchPaths(libName));
+ };
+
+ switch (icd) {
+ case ICD::None: {
+ DAWN_TRY(LoadVulkan(kVulkanLibName));
+ // Succesfully loaded driver; break.
+ break;
+ }
+ case ICD::SwiftShader: {
#if defined(DAWN_ENABLE_SWIFTSHADER)
- DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
- break;
+ DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
+ break;
#endif // defined(DAWN_ENABLE_SWIFTSHADER)
// ICD::SwiftShader should not be passed if SwiftShader is not enabled.
- UNREACHABLE();
- }
+ UNREACHABLE();
}
+ }
- if (instance->IsBackendValidationEnabled()) {
+ if (instance->IsBackendValidationEnabled()) {
#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
- auto execDir = GetExecutableDirectory();
- std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
- if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
- return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
- }
+ auto execDir = GetExecutableDirectory();
+ std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
+ if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
+ return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
+ }
#else
- dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
- "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
+ dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
+ "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
#endif
- }
+ }
- DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
+ DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
- DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
+ DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
- VulkanGlobalKnobs usedGlobalKnobs = {};
- DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
- *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
+ VulkanGlobalKnobs usedGlobalKnobs = {};
+ DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
+ *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
- DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
+ DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
- if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
- DAWN_TRY(RegisterDebugUtils());
- }
+ if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
+ DAWN_TRY(RegisterDebugUtils());
+ }
- DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
+ DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
- return {};
- }
+ return {};
+}
- ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(
- const InstanceBase* instance) {
- VulkanGlobalKnobs usedKnobs = {};
- std::vector<const char*> layerNames;
- InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
-
- auto UseLayerIfAvailable = [&](VulkanLayer layer) {
- if (mGlobalInfo.layers[layer]) {
- layerNames.push_back(GetVulkanLayerInfo(layer).name);
- usedKnobs.layers.set(layer, true);
- extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
- }
- };
+ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(const InstanceBase* instance) {
+ VulkanGlobalKnobs usedKnobs = {};
+ std::vector<const char*> layerNames;
+ InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+
+ auto UseLayerIfAvailable = [&](VulkanLayer layer) {
+ if (mGlobalInfo.layers[layer]) {
+ layerNames.push_back(GetVulkanLayerInfo(layer).name);
+ usedKnobs.layers.set(layer, true);
+ extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
+ }
+ };
- // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
- // layer crashes when used without vktrace server started. See this vktrace issue:
- // https://github.com/LunarG/VulkanTools/issues/254
- // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
- // by other layers.
+ // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
+ // layer crashes when used without vktrace server started. See this vktrace issue:
+ // https://github.com/LunarG/VulkanTools/issues/254
+ // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
+ // by other layers.
#if defined(DAWN_USE_VKTRACE)
- UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
+ UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
#endif
- // RenderDoc installs a layer at the system level for its capture but we don't want to use
- // it unless we are debugging in RenderDoc so we hide it behind a macro.
+ // RenderDoc installs a layer at the system level for its capture but we don't want to use
+ // it unless we are debugging in RenderDoc so we hide it behind a macro.
#if defined(DAWN_USE_RENDERDOC)
- UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
+ UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
#endif
- if (instance->IsBackendValidationEnabled()) {
- UseLayerIfAvailable(VulkanLayer::Validation);
- }
-
- // Always use the Fuchsia swapchain layer if available.
- UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
-
- // Available and known instance extensions default to being requested, but some special
- // cases are removed.
- usedKnobs.extensions = extensionsToRequest;
-
- std::vector<const char*> extensionNames;
- for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
- const InstanceExtInfo& info = GetInstanceExtInfo(ext);
+ if (instance->IsBackendValidationEnabled()) {
+ UseLayerIfAvailable(VulkanLayer::Validation);
+ }
- if (info.versionPromoted > mGlobalInfo.apiVersion) {
- extensionNames.push_back(info.name);
- }
- }
+ // Always use the Fuchsia swapchain layer if available.
+ UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
- VkApplicationInfo appInfo;
- appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
- appInfo.pNext = nullptr;
- appInfo.pApplicationName = nullptr;
- appInfo.applicationVersion = 0;
- appInfo.pEngineName = nullptr;
- appInfo.engineVersion = 0;
- // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
- // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
- // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
- // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
- // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
- // treat 1.2 as the highest API version dawn targets.
- if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
- appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
- } else {
- appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
- }
+ // Available and known instance extensions default to being requested, but some special
+ // cases are removed.
+ usedKnobs.extensions = extensionsToRequest;
- VkInstanceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.pApplicationInfo = &appInfo;
- createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
- createInfo.ppEnabledLayerNames = layerNames.data();
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
- createInfo.ppEnabledExtensionNames = extensionNames.data();
-
- PNextChainBuilder createInfoChain(&createInfo);
-
- // Register the debug callback for instance creation so we receive message for any errors
- // (validation or other).
- VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
- if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
- utilsMessengerCreateInfo.flags = 0;
- utilsMessengerCreateInfo.messageSeverity =
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
- utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
- utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
- utilsMessengerCreateInfo.pUserData = nullptr;
-
- createInfoChain.Add(&utilsMessengerCreateInfo,
- VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
- }
+ std::vector<const char*> extensionNames;
+ for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
+ const InstanceExtInfo& info = GetInstanceExtInfo(ext);
- // Try to turn on synchronization validation if the instance was created with backend
- // validation enabled.
- VkValidationFeaturesEXT validationFeatures;
- VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
- VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
- if (instance->IsBackendValidationEnabled() &&
- usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
- validationFeatures.enabledValidationFeatureCount = 1;
- validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
- validationFeatures.disabledValidationFeatureCount = 0;
- validationFeatures.pDisabledValidationFeatures = nullptr;
-
- createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
+ if (info.versionPromoted > mGlobalInfo.apiVersion) {
+ extensionNames.push_back(info.name);
}
+ }
- DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
- "vkCreateInstance"));
-
- return usedKnobs;
+ VkApplicationInfo appInfo;
+ appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+ appInfo.pNext = nullptr;
+ appInfo.pApplicationName = nullptr;
+ appInfo.applicationVersion = 0;
+ appInfo.pEngineName = nullptr;
+ appInfo.engineVersion = 0;
+ appInfo.apiVersion = std::min(mGlobalInfo.apiVersion, VK_API_VERSION_1_3);
+
+ VkInstanceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.pApplicationInfo = &appInfo;
+ createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
+ createInfo.ppEnabledLayerNames = layerNames.data();
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+ PNextChainBuilder createInfoChain(&createInfo);
+
+ // Register the debug callback for instance creation so we receive message for any errors
+ // (validation or other).
+ VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
+ if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
+ utilsMessengerCreateInfo.flags = 0;
+ utilsMessengerCreateInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
+ utilsMessengerCreateInfo.pUserData = nullptr;
+
+ createInfoChain.Add(&utilsMessengerCreateInfo,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
}
- MaybeError VulkanInstance::RegisterDebugUtils() {
- VkDebugUtilsMessengerCreateInfoEXT createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
- createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
- createInfo.pfnUserCallback = OnDebugUtilsCallback;
- createInfo.pUserData = nullptr;
-
- return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(
- mInstance, &createInfo, nullptr, &*mDebugUtilsMessenger),
- "vkCreateDebugUtilsMessengerEXT");
+ // Try to turn on synchronization validation if the instance was created with backend
+ // validation enabled.
+ VkValidationFeaturesEXT validationFeatures;
+ VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
+ VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
+ if (instance->IsBackendValidationEnabled() &&
+ usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
+ validationFeatures.enabledValidationFeatureCount = 1;
+ validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
+ validationFeatures.disabledValidationFeatureCount = 0;
+ validationFeatures.pDisabledValidationFeatures = nullptr;
+
+ createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
}
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::Vulkan) {
+ DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
+ "vkCreateInstance"));
+
+ return usedKnobs;
+}
+
+MaybeError VulkanInstance::RegisterDebugUtils() {
+ VkDebugUtilsMessengerCreateInfoEXT createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ createInfo.pfnUserCallback = OnDebugUtilsCallback;
+ createInfo.pUserData = this;
+
+ return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(mInstance, &createInfo, nullptr,
+ &*mDebugUtilsMessenger),
+ "vkCreateDebugUtilsMessengerEXT");
+}
+
+void VulkanInstance::StartListeningForDeviceMessages(Device* device) {
+ std::lock_guard<std::mutex> lock(mMessageListenerDevicesMutex);
+ mMessageListenerDevices.insert({device->GetDebugPrefix(), device});
+}
+void VulkanInstance::StopListeningForDeviceMessages(Device* device) {
+ std::lock_guard<std::mutex> lock(mMessageListenerDevicesMutex);
+ mMessageListenerDevices.erase(device->GetDebugPrefix());
+}
+bool VulkanInstance::HandleDeviceMessage(std::string deviceDebugPrefix, std::string message) {
+ std::lock_guard<std::mutex> lock(mMessageListenerDevicesMutex);
+ auto it = mMessageListenerDevices.find(deviceDebugPrefix);
+ if (it != mMessageListenerDevices.end()) {
+ it->second->OnDebugMessage(std::move(message));
+ return true;
}
+ return false;
+}
- Backend::~Backend() = default;
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Vulkan) {}
- std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
+Backend::~Backend() = default;
+
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
}
+ return result.AcquireSuccess();
+}
- ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
- std::vector<Ref<AdapterBase>> adapters;
+ std::vector<Ref<AdapterBase>> adapters;
- InstanceBase* instance = GetInstance();
- for (ICD icd : kICDs) {
-#if defined(DAWN_PLATFORM_MACOS)
- // On Mac, we don't expect non-Swiftshader Vulkan to be available.
- if (icd == ICD::None) {
- continue;
- }
-#endif // defined(DAWN_PLATFORM_MACOS)
- if (options->forceSwiftShader && icd != ICD::SwiftShader) {
- continue;
- }
- if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
- DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
- return {};
- }())) {
- // Instance failed to initialize.
+ InstanceBase* instance = GetInstance();
+ for (ICD icd : kICDs) {
+#if DAWN_PLATFORM_IS(MACOS)
+ // On Mac, we don't expect non-Swiftshader Vulkan to be available.
+ if (icd == ICD::None) {
+ continue;
+ }
+#endif // DAWN_PLATFORM_IS(MACOS)
+ if (options->forceSwiftShader && icd != ICD::SwiftShader) {
+ continue;
+ }
+ if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
+ DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
+ return {};
+ }())) {
+ // Instance failed to initialize.
+ continue;
+ }
+ const std::vector<VkPhysicalDevice>& physicalDevices =
+ mVulkanInstances[icd]->GetPhysicalDevices();
+ for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
+ Ref<Adapter> adapter =
+ AcquireRef(new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
+ if (instance->ConsumedError(adapter->Initialize())) {
continue;
}
- const std::vector<VkPhysicalDevice>& physicalDevices =
- mVulkanInstances[icd]->GetPhysicalDevices();
- for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
- Ref<Adapter> adapter = AcquireRef(
- new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
- if (instance->ConsumedError(adapter->Initialize())) {
- continue;
- }
- adapters.push_back(std::move(adapter));
- }
+ adapters.push_back(std::move(adapter));
}
- return adapters;
}
+ return adapters;
+}
- BackendConnection* Connect(InstanceBase* instance) {
- return new Backend(instance);
- }
+BackendConnection* Connect(InstanceBase* instance) {
+ return new Backend(instance);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h
index 15c558a5f4a..f912344d312 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h
@@ -15,6 +15,11 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_BACKENDVK_H_
#define SRC_DAWN_NATIVE_VULKAN_BACKENDVK_H_
+#include <mutex>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
#include "dawn/native/BackendConnection.h"
#include "dawn/common/DynamicLib.h"
@@ -25,61 +30,74 @@
namespace dawn::native::vulkan {
- enum class ICD {
- None,
- SwiftShader,
- };
-
- // VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
- // on that instance, Vulkan functions loaded from the library, and global information
- // gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
- // and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
- // on an instance are no longer in use, the instance is deleted. This can be particuarly useful
- // when we create multiple instances to selectively discover ICDs (like only
- // SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
- // can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
- class VulkanInstance : public RefCounted {
- public:
- static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
- ~VulkanInstance();
-
- const VulkanFunctions& GetFunctions() const;
- VkInstance GetVkInstance() const;
- const VulkanGlobalInfo& GetGlobalInfo() const;
- const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
-
- private:
- VulkanInstance();
-
- MaybeError Initialize(const InstanceBase* instance, ICD icd);
- ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
-
- MaybeError RegisterDebugUtils();
-
- DynamicLib mVulkanLib;
- VulkanGlobalInfo mGlobalInfo = {};
- VkInstance mInstance = VK_NULL_HANDLE;
- VulkanFunctions mFunctions;
-
- VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
-
- std::vector<VkPhysicalDevice> mPhysicalDevices;
- };
-
- class Backend : public BackendConnection {
- public:
- explicit Backend(InstanceBase* instance);
- ~Backend() override;
-
- MaybeError Initialize();
-
- std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
-
- private:
- ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
- };
+enum class ICD {
+ None,
+ SwiftShader,
+};
+
+class Device;
+
+// VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
+// on that instance, Vulkan functions loaded from the library, and global information
+// gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
+// and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
+// on an instance are no longer in use, the instance is deleted. This can be particuarly useful
+// when we create multiple instances to selectively discover ICDs (like only
+// SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
+// can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
+class VulkanInstance : public RefCounted {
+ public:
+ static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
+ ~VulkanInstance() override;
+
+ const VulkanFunctions& GetFunctions() const;
+ VkInstance GetVkInstance() const;
+ const VulkanGlobalInfo& GetGlobalInfo() const;
+ const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
+
+ // TODO(dawn:831): This set of functions guards may need to be adjusted when Dawn is updated
+ // to support multithreading.
+ void StartListeningForDeviceMessages(Device* device);
+ void StopListeningForDeviceMessages(Device* device);
+ bool HandleDeviceMessage(std::string deviceDebugPrefix, std::string message);
+
+ private:
+ VulkanInstance();
+
+ MaybeError Initialize(const InstanceBase* instance, ICD icd);
+ ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
+
+ MaybeError RegisterDebugUtils();
+
+ DynamicLib mVulkanLib;
+ VulkanGlobalInfo mGlobalInfo = {};
+ VkInstance mInstance = VK_NULL_HANDLE;
+ VulkanFunctions mFunctions;
+
+ VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
+
+ std::vector<VkPhysicalDevice> mPhysicalDevices;
+
+ // Devices keep the VulkanInstance alive, so as long as devices remove themselves from this
+ // map on destruction the pointers it contains should remain valid.
+ std::unordered_map<std::string, Device*> mMessageListenerDevices;
+ std::mutex mMessageListenerDevicesMutex;
+};
+
+class Backend : public BackendConnection {
+ public:
+ explicit Backend(InstanceBase* instance);
+ ~Backend() override;
+
+ MaybeError Initialize();
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
+
+ private:
+ ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
index 8ed9b93a566..a87a91fc981 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
@@ -14,185 +14,183 @@
#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include <map>
+#include <utility>
+
#include "dawn/common/BitSetIterator.h"
#include "dawn/common/ityp_vector.h"
#include "dawn/native/CacheKey.h"
-#include "dawn/native/vulkan/BindGroupVk.h"
#include "dawn/native/vulkan/DescriptorSetAllocator.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <map>
-
namespace dawn::native::vulkan {
- namespace {
+namespace {
- VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
- VkShaderStageFlags flags = 0;
+VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
+ VkShaderStageFlags flags = 0;
- if (stages & wgpu::ShaderStage::Vertex) {
- flags |= VK_SHADER_STAGE_VERTEX_BIT;
- }
- if (stages & wgpu::ShaderStage::Fragment) {
- flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
- }
- if (stages & wgpu::ShaderStage::Compute) {
- flags |= VK_SHADER_STAGE_COMPUTE_BIT;
- }
-
- return flags;
- }
-
- } // anonymous namespace
-
- VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (bindingInfo.buffer.hasDynamicOffset) {
- return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
- }
- return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (bindingInfo.buffer.hasDynamicOffset) {
- return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
- }
- return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- case BindingInfoType::Sampler:
- return VK_DESCRIPTOR_TYPE_SAMPLER;
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
- return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
- case BindingInfoType::StorageTexture:
- return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
- }
- UNREACHABLE();
+ if (stages & wgpu::ShaderStage::Vertex) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
-
- // static
- ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- Ref<BindGroupLayout> bgl =
- AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
- DAWN_TRY(bgl->Initialize());
- return bgl;
+ if (stages & wgpu::ShaderStage::Fragment) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ if (stages & wgpu::ShaderStage::Compute) {
+ flags |= VK_SHADER_STAGE_COMPUTE_BIT;
}
- MaybeError BindGroupLayout::Initialize() {
- // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
- // one entry per binding set. This might be optimized by computing continuous ranges of
- // bindings of the same type.
- ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
- bindings.reserve(GetBindingCount());
-
- for (const auto& [_, bindingIndex] : GetBindingMap()) {
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-
- VkDescriptorSetLayoutBinding vkBinding;
- vkBinding.binding = static_cast<uint32_t>(bindingIndex);
- vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
- vkBinding.descriptorCount = 1;
- vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
- vkBinding.pImmutableSamplers = nullptr;
-
- bindings.emplace_back(vkBinding);
- }
-
- VkDescriptorSetLayoutCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
- createInfo.pBindings = bindings.data();
-
- // Record cache key information now since the createInfo is not stored.
- GetCacheKey()->Record(createInfo);
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
- device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateDescriptorSetLayout"));
+ return flags;
+}
+
+} // anonymous namespace
+
+VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+ }
+ return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+ }
+ return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ case BindingInfoType::Sampler:
+ return VK_DESCRIPTOR_TYPE_SAMPLER;
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ case BindingInfoType::StorageTexture:
+ return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ }
+ UNREACHABLE();
+}
+
+// static
+ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ Ref<BindGroupLayout> bgl =
+ AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+ DAWN_TRY(bgl->Initialize());
+ return bgl;
+}
+
+MaybeError BindGroupLayout::Initialize() {
+ // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
+ // one entry per binding set. This might be optimized by computing continuous ranges of
+ // bindings of the same type.
+ ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
+ bindings.reserve(GetBindingCount());
+
+ for (const auto& [_, bindingIndex] : GetBindingMap()) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ VkDescriptorSetLayoutBinding vkBinding;
+ vkBinding.binding = static_cast<uint32_t>(bindingIndex);
+ vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
+ vkBinding.descriptorCount = 1;
+ vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+ vkBinding.pImmutableSamplers = nullptr;
+
+ bindings.emplace_back(vkBinding);
+ }
- // Compute the size of descriptor pools used for this layout.
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
+ VkDescriptorSetLayoutCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
+ createInfo.pBindings = bindings.data();
- for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
- VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
+ // Record cache key information now since the createInfo is not stored.
+ mCacheKey.Record(createInfo);
- // map::operator[] will return 0 if the key doesn't exist.
- descriptorCountPerType[vulkanType]++;
- }
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(device->GetVkDevice(), &createInfo,
+ nullptr, &*mHandle),
+ "CreateDescriptorSetLayout"));
- // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
- // counts.
- mDescriptorSetAllocator =
- DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
+ // Compute the size of descriptor pools used for this layout.
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
- SetLabelImpl();
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
- return {};
+ // map::operator[] will return 0 if the key doesn't exist.
+ descriptorCountPerType[vulkanType]++;
}
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
+ // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
+ // counts.
+ mDescriptorSetAllocator =
+ DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
- BindGroupLayout::~BindGroupLayout() = default;
+ SetLabelImpl();
- void BindGroupLayout::DestroyImpl() {
- BindGroupLayoutBase::DestroyImpl();
+ return {};
+}
- Device* device = ToBackend(GetDevice());
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
- // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
- // so we can destroy mHandle immediately instead of using the FencedDeleter.
- // (Swiftshader implements this wrong b/154522740).
- // In practice, the GPU is done with all descriptor sets because bind group deallocation
- // refs the bind group layout so that once the bind group is finished being used, we can
- // recycle its descriptor set.
- if (mHandle != VK_NULL_HANDLE) {
- device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
- mHandle = VK_NULL_HANDLE;
- }
- mDescriptorSetAllocator = nullptr;
- }
-
- VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
- return mHandle;
- }
+BindGroupLayout::~BindGroupLayout() = default;
- ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
- Device* device,
- const BindGroupDescriptor* descriptor) {
- DescriptorSetAllocation descriptorSetAllocation;
- DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
+void BindGroupLayout::DestroyImpl() {
+ BindGroupLayoutBase::DestroyImpl();
- return AcquireRef(
- mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
- DescriptorSetAllocation* descriptorSetAllocation) {
- mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
- mBindGroupAllocator.Deallocate(bindGroup);
- }
+ Device* device = ToBackend(GetDevice());
- void BindGroupLayout::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_BindGroupLayout", GetLabel());
+ // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
+ // so we can destroy mHandle immediately instead of using the FencedDeleter.
+ // (Swiftshader implements this wrong b/154522740).
+ // In practice, the GPU is done with all descriptor sets because bind group deallocation
+ // refs the bind group layout so that once the bind group is finished being used, we can
+ // recycle its descriptor set.
+ if (mHandle != VK_NULL_HANDLE) {
+ device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
+ mHandle = VK_NULL_HANDLE;
}
+ mDescriptorSetAllocator = nullptr;
+}
+
+VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
+ return mHandle;
+}
+
+ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+ Device* device,
+ const BindGroupDescriptor* descriptor) {
+ DescriptorSetAllocation descriptorSetAllocation;
+ DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
+
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
+}
+
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+ DescriptorSetAllocation* descriptorSetAllocation) {
+ mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
+ mBindGroupAllocator.Deallocate(bindGroup);
+}
+
+void BindGroupLayout::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_BindGroupLayout", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h
index 8dc41c49118..924e1218ad5 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h
@@ -15,69 +15,69 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
#define SRC_DAWN_NATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
+#include <vector>
+
#include "dawn/native/BindGroupLayout.h"
#include "dawn/common/SlabAllocator.h"
#include "dawn/common/vulkan_platform.h"
-
-#include <vector>
+#include "dawn/native/vulkan/BindGroupVk.h"
namespace dawn::native {
- class CacheKey;
+class CacheKey;
} // namespace dawn::native
namespace dawn::native::vulkan {
- class BindGroup;
- struct DescriptorSetAllocation;
- class DescriptorSetAllocator;
- class Device;
-
- VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
-
- // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
- // it's hard to have something where we can mix different types of descriptor sets because
- // we don't know if their vector of number of descriptors will be similar.
- //
- // That's why that in addition to containing the VkDescriptorSetLayout to create
- // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
- // sets.
- //
- // The allocations is done with one pool per descriptor set, which is inefficient, but at least
- // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
- // is important because creating them can incur GPU memory allocation which is usually an
- // expensive syscall.
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static ResultOrError<Ref<BindGroupLayout>> Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- VkDescriptorSetLayout GetHandle() const;
-
- ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup,
- DescriptorSetAllocation* descriptorSetAllocation);
-
- private:
- ~BindGroupLayout() override;
- MaybeError Initialize();
- void DestroyImpl() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
- Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
- };
+struct DescriptorSetAllocation;
+class DescriptorSetAllocator;
+class Device;
+
+VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
+
+// In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
+// it's hard to have something where we can mix different types of descriptor sets because
+// we don't know if their vector of number of descriptors will be similar.
+//
+// That's why that in addition to containing the VkDescriptorSetLayout to create
+// VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
+// sets.
+//
+// The allocations is done with one pool per descriptor set, which is inefficient, but at least
+// the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
+// is important because creating them can incur GPU memory allocation which is usually an
+// expensive syscall.
+class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static ResultOrError<Ref<BindGroupLayout>> Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ VkDescriptorSetLayout GetHandle() const;
+
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup,
+ DescriptorSetAllocation* descriptorSetAllocation);
+
+ private:
+ ~BindGroupLayout() override;
+ MaybeError Initialize();
+ void DestroyImpl() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp
index b55c10f62ed..0eafbdb0a71 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp
@@ -28,137 +28,135 @@
namespace dawn::native::vulkan {
- // static
- ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
-
- BindGroup::BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- DescriptorSetAllocation descriptorSetAllocation)
- : BindGroupBase(this, device, descriptor),
- mDescriptorSetAllocation(descriptorSetAllocation) {
- // Now do a write of a single descriptor set with all possible chained data allocated on the
- // stack.
- const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
- ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
- bindingCount);
- ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
- writeBufferInfo(bindingCount);
- ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
- writeImageInfo(bindingCount);
-
- uint32_t numWrites = 0;
- for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
- const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
-
- auto& write = writes[numWrites];
- write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- write.pNext = nullptr;
- write.dstSet = GetHandle();
- write.dstBinding = static_cast<uint32_t>(bindingIndex);
- write.dstArrayElement = 0;
- write.descriptorCount = 1;
- write.descriptorType = VulkanDescriptorType(bindingInfo);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
-
- VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Buffer was destroyed. Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeBufferInfo[numWrites].buffer = handle;
- writeBufferInfo[numWrites].offset = binding.offset;
- writeBufferInfo[numWrites].range = binding.size;
- write.pBufferInfo = &writeBufferInfo[numWrites];
- break;
+// static
+ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
+
+BindGroup::BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation)
+ : BindGroupBase(this, device, descriptor), mDescriptorSetAllocation(descriptorSetAllocation) {
+ // Now do a write of a single descriptor set with all possible chained data allocated on the
+ // stack.
+ const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
+ ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
+ bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup> writeBufferInfo(
+ bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup> writeImageInfo(
+ bindingCount);
+
+ uint32_t numWrites = 0;
+ for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
+ const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
+
+ auto& write = writes[numWrites];
+ write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write.pNext = nullptr;
+ write.dstSet = GetHandle();
+ write.dstBinding = static_cast<uint32_t>(bindingIndex);
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VulkanDescriptorType(bindingInfo);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+ VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Buffer was destroyed. Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
}
+ writeBufferInfo[numWrites].buffer = handle;
+ writeBufferInfo[numWrites].offset = binding.offset;
+ writeBufferInfo[numWrites].range = binding.size;
+ write.pBufferInfo = &writeBufferInfo[numWrites];
+ break;
+ }
- case BindingInfoType::Sampler: {
- Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
- writeImageInfo[numWrites].sampler = sampler->GetHandle();
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
- }
+ case BindingInfoType::Sampler: {
+ Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
+ writeImageInfo[numWrites].sampler = sampler->GetHandle();
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
- case BindingInfoType::Texture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
- VkImageView handle = view->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Texture was destroyed before the TextureView was created.
- // Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeImageInfo[numWrites].imageView = handle;
-
- // The layout may be GENERAL here because of interactions between the Sampled
- // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
- writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
- ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
-
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
+ case BindingInfoType::Texture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ VkImageView handle = view->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Texture was destroyed before the TextureView was created.
+ // Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
}
+ writeImageInfo[numWrites].imageView = handle;
+
+ // The layout may be GENERAL here because of interactions between the Sampled
+ // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
+ writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
+ ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
+
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
- case BindingInfoType::StorageTexture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
- VkImageView handle = view->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Texture was destroyed before the TextureView was created.
- // Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeImageInfo[numWrites].imageView = handle;
- writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
-
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
+ case BindingInfoType::StorageTexture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ VkImageView handle = view->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Texture was destroyed before the TextureView was created.
+ // Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
}
+ writeImageInfo[numWrites].imageView = handle;
+ writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
- case BindingInfoType::ExternalTexture:
- UNREACHABLE();
- break;
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
}
- numWrites++;
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
}
- // TODO(crbug.com/dawn/855): Batch these updates
- device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
- nullptr);
-
- SetLabelImpl();
+ numWrites++;
}
- BindGroup::~BindGroup() = default;
+ // TODO(crbug.com/dawn/855): Batch these updates
+ device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0, nullptr);
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
- }
+ SetLabelImpl();
+}
- VkDescriptorSet BindGroup::GetHandle() const {
- return mDescriptorSetAllocation.set;
- }
+BindGroup::~BindGroup() = default;
- void BindGroup::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mDescriptorSetAllocation.set, "Dawn_BindGroup",
- GetLabel());
- }
+void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
+}
+
+VkDescriptorSet BindGroup::GetHandle() const {
+ return mDescriptorSetAllocation.set;
+}
+
+void BindGroup::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mDescriptorSetAllocation.set, "Dawn_BindGroup",
+ GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h
index af7680aebf8..9d1d9a4459e 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h
@@ -19,36 +19,35 @@
#include "dawn/common/PlacementAllocated.h"
#include "dawn/common/vulkan_platform.h"
-#include "dawn/native/vulkan/BindGroupLayoutVk.h"
#include "dawn/native/vulkan/DescriptorSetAllocation.h"
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static ResultOrError<Ref<BindGroup>> Create(Device* device,
- const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
- BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- DescriptorSetAllocation descriptorSetAllocation);
+ BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation);
- VkDescriptorSet GetHandle() const;
+ VkDescriptorSet GetHandle() const;
- private:
- ~BindGroup() override;
+ private:
+ ~BindGroup() override;
- void DestroyImpl() override;
+ void DestroyImpl() override;
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- // The descriptor set in this allocation outlives the BindGroup because it is owned by
- // the BindGroupLayout which is referenced by the BindGroup.
- DescriptorSetAllocation mDescriptorSetAllocation;
- };
+ // The descriptor set in this allocation outlives the BindGroup because it is owned by
+ // the BindGroupLayout which is referenced by the BindGroup.
+ DescriptorSetAllocation mDescriptorSetAllocation;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp
index 4045c50e755..5bec8da4aca 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp
@@ -14,6 +14,11 @@
#include "dawn/native/vulkan/BufferVk.h"
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <utility>
+
#include "dawn/native/CommandBuffer.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
@@ -22,391 +27,386 @@
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <cstring>
-
namespace dawn::native::vulkan {
- namespace {
-
- VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
- VkBufferUsageFlags flags = 0;
-
- if (usage & wgpu::BufferUsage::CopySrc) {
- flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- }
- if (usage & wgpu::BufferUsage::Index) {
- flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Vertex) {
- flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Uniform) {
- flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
- }
- if (usage &
- (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
- flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- }
-
- return flags;
- }
-
- VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
- VkPipelineStageFlags flags = 0;
-
- if (usage & kMappableBufferUsages) {
- flags |= VK_PIPELINE_STAGE_HOST_BIT;
- }
- if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
- if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
- flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- }
- if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
- kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
- flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
-
- return flags;
- }
-
- VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
- VkAccessFlags flags = 0;
-
- if (usage & wgpu::BufferUsage::MapRead) {
- flags |= VK_ACCESS_HOST_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::MapWrite) {
- flags |= VK_ACCESS_HOST_WRITE_BIT;
- }
- if (usage & wgpu::BufferUsage::CopySrc) {
- flags |= VK_ACCESS_TRANSFER_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
- if (usage & wgpu::BufferUsage::Index) {
- flags |= VK_ACCESS_INDEX_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Vertex) {
- flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Uniform) {
- flags |= VK_ACCESS_UNIFORM_READ_BIT;
- }
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (usage & kReadOnlyStorageBuffer) {
- flags |= VK_ACCESS_SHADER_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
-
- return flags;
- }
+namespace {
- } // namespace
+VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
+ VkBufferUsageFlags flags = 0;
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return std::move(buffer);
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Vertex) {
+ flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Uniform) {
+ flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+ flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- // vkCmdFillBuffer requires the size to be a multiple of 4.
- constexpr size_t kAlignment = 4u;
+ return flags;
+}
- uint32_t extraBytes = 0u;
- if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
- // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
- // is equal to the whole buffer size. Allocate at least one more byte so it
- // is valid to setVertex/IndexBuffer with a zero-sized range at the end
- // of the buffer with (offset=buffer.size, size=0).
- extraBytes = 1u;
- }
+VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
+ VkPipelineStageFlags flags = 0;
- uint64_t size = GetSize();
- if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
+ if (usage & kMappableBufferUsages) {
+ flags |= VK_PIPELINE_STAGE_HOST_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
+ flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer |
+ kReadOnlyStorageBuffer)) {
+ flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
- size += extraBytes;
+ return flags;
+}
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- // Also, Vulkan requires the size to be non-zero.
- size = std::max(size, uint64_t(4u));
+VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
+ VkAccessFlags flags = 0;
- if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- mAllocatedSize = Align(size, kAlignment);
-
- // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
- // some constants to the size passed and align it, but for values close to the maximum
- // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
- // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
- // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
- // safely return an OOM error.
- if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
- }
+ if (usage & wgpu::BufferUsage::MapRead) {
+ flags |= VK_ACCESS_HOST_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::MapWrite) {
+ flags |= VK_ACCESS_HOST_WRITE_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ flags |= VK_ACCESS_TRANSFER_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ flags |= VK_ACCESS_INDEX_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Vertex) {
+ flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Uniform) {
+ flags |= VK_ACCESS_UNIFORM_READ_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (usage & kReadOnlyStorageBuffer) {
+ flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
- VkBufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.size = mAllocatedSize;
- // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
- // and robust resource initialization.
- createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = 0;
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkOOMThenSuccess(
- device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "vkCreateBuffer"));
-
- // Gather requirements for the buffer's memory and allocate it.
- VkMemoryRequirements requirements;
- device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
-
- MemoryKind requestKind = MemoryKind::Linear;
- if (GetUsage() & kMappableBufferUsages) {
- requestKind = MemoryKind::LinearMappable;
- }
- DAWN_TRY_ASSIGN(mMemoryAllocation,
- device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
-
- // Finally associate it with the buffer.
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
- ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
- mMemoryAllocation.GetOffset()),
- "vkBindBufferMemory"));
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
- ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
- }
+ return flags;
+}
+
+} // namespace
+
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return std::move(buffer);
+}
+
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ // vkCmdFillBuffer requires the size to be a multiple of 4.
+ constexpr size_t kAlignment = 4u;
+
+ uint32_t extraBytes = 0u;
+ if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
+ // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
+ // is equal to the whole buffer size. Allocate at least one more byte so it
+ // is valid to setVertex/IndexBuffer with a zero-sized range at the end
+ // of the buffer with (offset=buffer.size, size=0).
+ extraBytes = 1u;
+ }
- // Initialize the padding bytes to zero.
- if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- uint32_t clearSize = Align(paddingBytes, 4);
- uint64_t clearOffset = GetAllocatedSize() - clearSize;
+ uint64_t size = GetSize();
+ if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- ClearBuffer(recordingContext, 0, clearOffset, clearSize);
- }
- }
+ size += extraBytes;
- SetLabelImpl();
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ // Also, Vulkan requires the size to be non-zero.
+ size = std::max(size, uint64_t(4u));
- return {};
+ if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ mAllocatedSize = Align(size, kAlignment);
+
+ // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
+ // some constants to the size passed and align it, but for values close to the maximum
+ // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
+ // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
+ // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
+ // safely return an OOM error.
+ if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
}
- Buffer::~Buffer() = default;
-
- VkBuffer Buffer::GetHandle() const {
- return mHandle;
+ VkBufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.size = mAllocatedSize;
+ // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+ // and robust resource initialization.
+ createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = 0;
+
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "vkCreateBuffer"));
+
+ // Gather requirements for the buffer's memory and allocate it.
+ VkMemoryRequirements requirements;
+ device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+ MemoryKind requestKind = MemoryKind::Linear;
+ if (GetUsage() & kMappableBufferUsages) {
+ requestKind = MemoryKind::LinearMappable;
+ }
+ DAWN_TRY_ASSIGN(mMemoryAllocation,
+ device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
+
+ // Finally associate it with the buffer.
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "vkBindBufferMemory"));
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
+ ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
}
- void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::BufferUsage usage) {
- VkBufferMemoryBarrier barrier;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
+ // Initialize the padding bytes to zero.
+ if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
+ uint32_t clearSize = Align(paddingBytes, 4);
+ uint64_t clearOffset = GetAllocatedSize() - clearSize;
- if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
- ASSERT(srcStages != 0 && dstStages != 0);
- ToBackend(GetDevice())
- ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 1u, &barrier, 0, nullptr);
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ ClearBuffer(recordingContext, 0, clearOffset, clearSize);
}
}
- bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
- VkBufferMemoryBarrier* barrier,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- bool lastIncludesTarget = IsSubset(usage, mLastUsage);
- bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+ SetLabelImpl();
- // We can skip transitions to already current read-only usages.
- if (lastIncludesTarget && lastReadOnly) {
- return false;
- }
+ return {};
+}
- // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
- if (mLastUsage == wgpu::BufferUsage::None) {
- mLastUsage = usage;
- return false;
- }
+Buffer::~Buffer() = default;
- *srcStages |= VulkanPipelineStage(mLastUsage);
- *dstStages |= VulkanPipelineStage(usage);
+VkBuffer Buffer::GetHandle() const {
+ return mHandle;
+}
- barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
- barrier->pNext = nullptr;
- barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
- barrier->dstAccessMask = VulkanAccessFlags(usage);
- barrier->srcQueueFamilyIndex = 0;
- barrier->dstQueueFamilyIndex = 0;
- barrier->buffer = mHandle;
- barrier->offset = 0;
- // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
- barrier->size = GetAllocatedSize();
-
- mLastUsage = usage;
+void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::BufferUsage usage) {
+ VkBufferMemoryBarrier barrier;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
- return true;
+ if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
+ ASSERT(srcStages != 0 && dstStages != 0);
+ ToBackend(GetDevice())
+ ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 1u, &barrier, 0, nullptr);
}
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): Handle CPU-visible memory on UMA
- return mMemoryAllocation.GetMappedPointer() != nullptr;
+}
+
+bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+ VkBufferMemoryBarrier* barrier,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ bool lastIncludesTarget = IsSubset(usage, mLastUsage);
+ bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+
+ // We can skip transitions to already current read-only usages.
+ if (lastIncludesTarget && lastReadOnly) {
+ return false;
}
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
+ // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
+ if (mLastUsage == wgpu::BufferUsage::None) {
+ mLastUsage = usage;
+ return false;
}
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- Device* device = ToBackend(GetDevice());
+ *srcStages |= VulkanPipelineStage(mLastUsage);
+ *dstStages |= VulkanPipelineStage(usage);
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier->pNext = nullptr;
+ barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
+ barrier->dstAccessMask = VulkanAccessFlags(usage);
+ barrier->srcQueueFamilyIndex = 0;
+ barrier->dstQueueFamilyIndex = 0;
+ barrier->buffer = mHandle;
+ barrier->offset = 0;
+ // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+ barrier->size = GetAllocatedSize();
- // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
- EnsureDataInitialized(recordingContext);
+ mLastUsage = usage;
- if (mode & wgpu::MapMode::Read) {
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
- }
- return {};
- }
+ return true;
+}
- void Buffer::UnmapImpl() {
- // No need to do anything, we keep CPU-visible memory mapped at all time.
- }
+bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return mMemoryAllocation.GetMappedPointer() != nullptr;
+}
- void* Buffer::GetMappedPointerImpl() {
- uint8_t* memory = mMemoryAllocation.GetMappedPointer();
- ASSERT(memory != nullptr);
- return memory;
- }
+MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+}
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ Device* device = ToBackend(GetDevice());
- ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
+ // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
+ EnsureDataInitialized(recordingContext);
- bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- InitializeToZero(recordingContext);
- return true;
+ if (mode & wgpu::MapMode::Read) {
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
}
+ return {};
+}
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
+void Buffer::UnmapImpl() {
+ // No need to do anything, we keep CPU-visible memory mapped at all time.
+}
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
+void* Buffer::GetMappedPointerImpl() {
+ uint8_t* memory = mMemoryAllocation.GetMappedPointer();
+ ASSERT(memory != nullptr);
+ return memory;
+}
- InitializeToZero(recordingContext);
- return true;
- }
+void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
+ ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
- InitializeToZero(recordingContext);
- return true;
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
- void Buffer::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Buffer", GetLabel());
+bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
+ if (!NeedsInitialization()) {
+ return false;
}
- void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
- ASSERT(NeedsInitialization());
+ InitializeToZero(recordingContext);
+ return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
- ClearBuffer(recordingContext, 0u);
- GetDevice()->IncrementLazyClearCountForTesting();
+ if (IsFullBufferRange(offset, size)) {
SetIsDataInitialized();
+ return false;
}
- void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
- uint32_t clearValue,
- uint64_t offset,
- uint64_t size) {
- ASSERT(recordingContext != nullptr);
- size = size > 0 ? size : GetAllocatedSize();
- ASSERT(size > 0);
+ InitializeToZero(recordingContext);
+ return true;
+}
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
- Device* device = ToBackend(GetDevice());
- // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
- // Note: Allocated size must be a multiple of 4.
- ASSERT(size % 4 == 0);
- device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size,
- clearValue);
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
}
+
+ InitializeToZero(recordingContext);
+ return true;
+}
+
+void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Buffer", GetLabel());
+}
+
+void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
+ ASSERT(NeedsInitialization());
+
+ ClearBuffer(recordingContext, 0u);
+ GetDevice()->IncrementLazyClearCountForTesting();
+ SetIsDataInitialized();
+}
+
+void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
+ uint32_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ ASSERT(recordingContext != nullptr);
+ size = size > 0 ? size : GetAllocatedSize();
+ ASSERT(size > 0);
+
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ Device* device = ToBackend(GetDevice());
+ // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+ // Note: Allocated size must be a multiple of 4.
+ ASSERT(size % 4 == 0);
+ device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size, clearValue);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h
index 185170ea141..8e955085da9 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h
@@ -23,59 +23,58 @@
namespace dawn::native::vulkan {
- struct CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
-
- VkBuffer GetHandle() const;
-
- // Transitions the buffer to be used as `usage`, recording any necessary barrier in
- // `commands`.
- // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
- void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
- bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
- VkBufferMemoryBarrier* barrier,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
-
- // All the Ensure methods return true if the buffer was initialized to zero.
- bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- uint64_t offset,
- uint64_t size);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- const CopyTextureToBufferCmd* copy);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~Buffer() override;
- using BufferBase::BufferBase;
-
- MaybeError Initialize(bool mappedAtCreation);
- void InitializeToZero(CommandRecordingContext* recordingContext);
- void ClearBuffer(CommandRecordingContext* recordingContext,
- uint32_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- VkBuffer mHandle = VK_NULL_HANDLE;
- ResourceMemoryAllocation mMemoryAllocation;
-
- wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
- };
+struct CommandRecordingContext;
+class Device;
+
+class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
+
+ VkBuffer GetHandle() const;
+
+ // Transitions the buffer to be used as `usage`, recording any necessary barrier in
+ // `commands`.
+ // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+ void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
+ bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+ VkBufferMemoryBarrier* barrier,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+
+ // All the Ensure methods return true if the buffer was initialized to zero.
+ bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~Buffer() override;
+ using BufferBase::BufferBase;
+
+ MaybeError Initialize(bool mappedAtCreation);
+ void InitializeToZero(CommandRecordingContext* recordingContext);
+ void ClearBuffer(CommandRecordingContext* recordingContext,
+ uint32_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ VkBuffer mHandle = VK_NULL_HANDLE;
+ ResourceMemoryAllocation mMemoryAllocation;
+
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.cpp
index 3930b54ca37..d89649e7462 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.cpp
@@ -12,246 +12,254 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstring>
+
#include "dawn/native/vulkan/CacheKeyVk.h"
#include "dawn/native/vulkan/RenderPassCache.h"
-#include <cstring>
-
namespace dawn::native {
- template <>
- void CacheKeySerializer<VkDescriptorSetLayoutBinding>::Serialize(
- CacheKey* key,
- const VkDescriptorSetLayoutBinding& t) {
- key->Record(t.binding, t.descriptorType, t.descriptorCount, t.stageFlags);
- }
-
- template <>
- void CacheKeySerializer<VkDescriptorSetLayoutCreateInfo>::Serialize(
- CacheKey* key,
- const VkDescriptorSetLayoutCreateInfo& t) {
- key->Record(t.flags).RecordIterable(t.pBindings, t.bindingCount);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPushConstantRange>::Serialize(CacheKey* key,
- const VkPushConstantRange& t) {
- key->Record(t.stageFlags, t.offset, t.size);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineLayoutCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineLayoutCreateInfo& t) {
- // The set layouts are not serialized here because they are pointers to backend objects.
- // They need to be cross-referenced with the frontend objects and serialized from there.
- key->Record(t.flags).RecordIterable(t.pPushConstantRanges, t.pushConstantRangeCount);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>::Serialize(
- CacheKey* key,
- const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT& t) {
- key->Record(t.requiredSubgroupSize);
- }
-
- template <>
- void CacheKeySerializer<VkSpecializationMapEntry>::Serialize(
- CacheKey* key,
- const VkSpecializationMapEntry& t) {
- key->Record(t.constantID, t.offset, t.size);
- }
-
- template <>
- void CacheKeySerializer<VkSpecializationInfo>::Serialize(CacheKey* key,
- const VkSpecializationInfo& t) {
- key->RecordIterable(t.pMapEntries, t.mapEntryCount)
- .RecordIterable(static_cast<const uint8_t*>(t.pData), t.dataSize);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineShaderStageCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineShaderStageCreateInfo& t) {
- // The shader module is not serialized here because it is a pointer to a backend object.
- key->Record(t.flags, t.stage)
- .RecordIterable(t.pName, strlen(t.pName))
- .Record(t.pSpecializationInfo);
- vulkan::SerializePnext<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkComputePipelineCreateInfo>::Serialize(
- CacheKey* key,
- const VkComputePipelineCreateInfo& t) {
- // The pipeline layout is not serialized here because it is a pointer to a backend object.
- // It needs to be cross-referenced with the frontend objects and serialized from there. The
- // base pipeline information is also currently not recorded since we do not use them in our
- // backend implementation. If we decide to use them later on, they also need to be
- // cross-referenced from the frontend.
- key->Record(t.flags, t.stage);
- }
-
- template <>
- void CacheKeySerializer<VkVertexInputBindingDescription>::Serialize(
- CacheKey* key,
- const VkVertexInputBindingDescription& t) {
- key->Record(t.binding, t.stride, t.inputRate);
- }
-
- template <>
- void CacheKeySerializer<VkVertexInputAttributeDescription>::Serialize(
- CacheKey* key,
- const VkVertexInputAttributeDescription& t) {
- key->Record(t.location, t.binding, t.format, t.offset);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineVertexInputStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineVertexInputStateCreateInfo& t) {
- key->Record(t.flags)
- .RecordIterable(t.pVertexBindingDescriptions, t.vertexBindingDescriptionCount)
- .RecordIterable(t.pVertexAttributeDescriptions, t.vertexAttributeDescriptionCount);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineInputAssemblyStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineInputAssemblyStateCreateInfo& t) {
- key->Record(t.flags, t.topology, t.primitiveRestartEnable);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineTessellationStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineTessellationStateCreateInfo& t) {
- key->Record(t.flags, t.patchControlPoints);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkViewport>::Serialize(CacheKey* key, const VkViewport& t) {
- key->Record(t.x, t.y, t.width, t.height, t.minDepth, t.maxDepth);
- }
-
- template <>
- void CacheKeySerializer<VkOffset2D>::Serialize(CacheKey* key, const VkOffset2D& t) {
- key->Record(t.x, t.y);
- }
-
- template <>
- void CacheKeySerializer<VkExtent2D>::Serialize(CacheKey* key, const VkExtent2D& t) {
- key->Record(t.width, t.height);
- }
-
- template <>
- void CacheKeySerializer<VkRect2D>::Serialize(CacheKey* key, const VkRect2D& t) {
- key->Record(t.offset, t.extent);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineViewportStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineViewportStateCreateInfo& t) {
- key->Record(t.flags)
- .RecordIterable(t.pViewports, t.viewportCount)
- .RecordIterable(t.pScissors, t.scissorCount);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineRasterizationStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineRasterizationStateCreateInfo& t) {
- key->Record(t.flags, t.depthClampEnable, t.rasterizerDiscardEnable, t.polygonMode,
- t.cullMode, t.frontFace, t.depthBiasEnable, t.depthBiasConstantFactor,
- t.depthBiasClamp, t.depthBiasSlopeFactor, t.lineWidth);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineMultisampleStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineMultisampleStateCreateInfo& t) {
- key->Record(t.flags, t.rasterizationSamples, t.sampleShadingEnable, t.minSampleShading,
- t.pSampleMask, t.alphaToCoverageEnable, t.alphaToOneEnable);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkStencilOpState>::Serialize(CacheKey* key, const VkStencilOpState& t) {
- key->Record(t.failOp, t.passOp, t.depthFailOp, t.compareOp, t.compareMask, t.writeMask,
- t.reference);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineDepthStencilStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineDepthStencilStateCreateInfo& t) {
- key->Record(t.flags, t.depthTestEnable, t.depthWriteEnable, t.depthCompareOp,
- t.depthBoundsTestEnable, t.stencilTestEnable, t.front, t.back, t.minDepthBounds,
- t.maxDepthBounds);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineColorBlendAttachmentState>::Serialize(
- CacheKey* key,
- const VkPipelineColorBlendAttachmentState& t) {
- key->Record(t.blendEnable, t.srcColorBlendFactor, t.dstColorBlendFactor, t.colorBlendOp,
- t.srcAlphaBlendFactor, t.dstAlphaBlendFactor, t.alphaBlendOp, t.colorWriteMask);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineColorBlendStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineColorBlendStateCreateInfo& t) {
- key->Record(t.flags, t.logicOpEnable, t.logicOp)
- .RecordIterable(t.pAttachments, t.attachmentCount)
- .Record(t.blendConstants);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<VkPipelineDynamicStateCreateInfo>::Serialize(
- CacheKey* key,
- const VkPipelineDynamicStateCreateInfo& t) {
- key->Record(t.flags).RecordIterable(t.pDynamicStates, t.dynamicStateCount);
- vulkan::SerializePnext<>(key, &t);
- }
-
- template <>
- void CacheKeySerializer<vulkan::RenderPassCacheQuery>::Serialize(
- CacheKey* key,
- const vulkan::RenderPassCacheQuery& t) {
- key->Record(t.colorMask.to_ulong(), t.resolveTargetMask.to_ulong())
- .RecordIterable(t.colorFormats)
- .RecordIterable(t.colorLoadOp)
- .RecordIterable(t.colorStoreOp)
- .Record(t.hasDepthStencil, t.depthStencilFormat, t.depthLoadOp, t.depthStoreOp,
- t.stencilLoadOp, t.stencilStoreOp, t.readOnlyDepthStencil, t.sampleCount);
+template <>
+void CacheKeySerializer<VkDescriptorSetLayoutBinding>::Serialize(
+ CacheKey* key,
+ const VkDescriptorSetLayoutBinding& t) {
+ key->Record(t.binding, t.descriptorType, t.descriptorCount, t.stageFlags);
+}
+
+template <>
+void CacheKeySerializer<VkDescriptorSetLayoutCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkDescriptorSetLayoutCreateInfo& t) {
+ key->Record(t.flags).RecordIterable(t.pBindings, t.bindingCount);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPushConstantRange>::Serialize(CacheKey* key,
+ const VkPushConstantRange& t) {
+ key->Record(t.stageFlags, t.offset, t.size);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineLayoutCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineLayoutCreateInfo& t) {
+ // The set layouts are not serialized here because they are pointers to backend objects.
+ // They need to be cross-referenced with the frontend objects and serialized from there.
+ key->Record(t.flags).RecordIterable(t.pPushConstantRanges, t.pushConstantRangeCount);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>::Serialize(
+ CacheKey* key,
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT& t) {
+ key->Record(t.requiredSubgroupSize);
+}
+
+template <>
+void CacheKeySerializer<VkSpecializationMapEntry>::Serialize(CacheKey* key,
+ const VkSpecializationMapEntry& t) {
+ key->Record(t.constantID, t.offset, t.size);
+}
+
+template <>
+void CacheKeySerializer<VkSpecializationInfo>::Serialize(CacheKey* key,
+ const VkSpecializationInfo& t) {
+ key->RecordIterable(t.pMapEntries, t.mapEntryCount)
+ .RecordIterable(static_cast<const uint8_t*>(t.pData), t.dataSize);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineShaderStageCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineShaderStageCreateInfo& t) {
+ // The shader module is not serialized here because it is a pointer to a backend object.
+ key->Record(t.flags, t.stage)
+ .RecordIterable(t.pName, strlen(t.pName))
+ .Record(t.pSpecializationInfo);
+ vulkan::SerializePnext<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkComputePipelineCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkComputePipelineCreateInfo& t) {
+ // The pipeline layout is not serialized here because it is a pointer to a backend object.
+ // It needs to be cross-referenced with the frontend objects and serialized from there. The
+ // base pipeline information is also currently not recorded since we do not use them in our
+ // backend implementation. If we decide to use them later on, they also need to be
+ // cross-referenced from the frontend.
+ key->Record(t.flags, t.stage);
+}
+
+template <>
+void CacheKeySerializer<VkVertexInputBindingDescription>::Serialize(
+ CacheKey* key,
+ const VkVertexInputBindingDescription& t) {
+ key->Record(t.binding, t.stride, t.inputRate);
+}
+
+template <>
+void CacheKeySerializer<VkVertexInputAttributeDescription>::Serialize(
+ CacheKey* key,
+ const VkVertexInputAttributeDescription& t) {
+ key->Record(t.location, t.binding, t.format, t.offset);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineVertexInputStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineVertexInputStateCreateInfo& t) {
+ key->Record(t.flags)
+ .RecordIterable(t.pVertexBindingDescriptions, t.vertexBindingDescriptionCount)
+ .RecordIterable(t.pVertexAttributeDescriptions, t.vertexAttributeDescriptionCount);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineInputAssemblyStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineInputAssemblyStateCreateInfo& t) {
+ key->Record(t.flags, t.topology, t.primitiveRestartEnable);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineTessellationStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineTessellationStateCreateInfo& t) {
+ key->Record(t.flags, t.patchControlPoints);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkViewport>::Serialize(CacheKey* key, const VkViewport& t) {
+ key->Record(t.x, t.y, t.width, t.height, t.minDepth, t.maxDepth);
+}
+
+template <>
+void CacheKeySerializer<VkOffset2D>::Serialize(CacheKey* key, const VkOffset2D& t) {
+ key->Record(t.x, t.y);
+}
+
+template <>
+void CacheKeySerializer<VkExtent2D>::Serialize(CacheKey* key, const VkExtent2D& t) {
+ key->Record(t.width, t.height);
+}
+
+template <>
+void CacheKeySerializer<VkRect2D>::Serialize(CacheKey* key, const VkRect2D& t) {
+ key->Record(t.offset, t.extent);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineViewportStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineViewportStateCreateInfo& t) {
+ key->Record(t.flags)
+ .RecordIterable(t.pViewports, t.viewportCount)
+ .RecordIterable(t.pScissors, t.scissorCount);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineRasterizationStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineRasterizationStateCreateInfo& t) {
+ key->Record(t.flags, t.depthClampEnable, t.rasterizerDiscardEnable, t.polygonMode, t.cullMode,
+ t.frontFace, t.depthBiasEnable, t.depthBiasConstantFactor, t.depthBiasClamp,
+ t.depthBiasSlopeFactor, t.lineWidth);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineMultisampleStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineMultisampleStateCreateInfo& t) {
+ key->Record(t.flags, t.rasterizationSamples, t.sampleShadingEnable, t.minSampleShading,
+ t.pSampleMask, t.alphaToCoverageEnable, t.alphaToOneEnable);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkStencilOpState>::Serialize(CacheKey* key, const VkStencilOpState& t) {
+ key->Record(t.failOp, t.passOp, t.depthFailOp, t.compareOp, t.compareMask, t.writeMask,
+ t.reference);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineDepthStencilStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineDepthStencilStateCreateInfo& t) {
+ key->Record(t.flags, t.depthTestEnable, t.depthWriteEnable, t.depthCompareOp,
+ t.depthBoundsTestEnable, t.stencilTestEnable, t.front, t.back, t.minDepthBounds,
+ t.maxDepthBounds);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineColorBlendAttachmentState>::Serialize(
+ CacheKey* key,
+ const VkPipelineColorBlendAttachmentState& t) {
+ key->Record(t.blendEnable, t.srcColorBlendFactor, t.dstColorBlendFactor, t.colorBlendOp,
+ t.srcAlphaBlendFactor, t.dstAlphaBlendFactor, t.alphaBlendOp, t.colorWriteMask);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineColorBlendStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineColorBlendStateCreateInfo& t) {
+ key->Record(t.flags, t.logicOpEnable, t.logicOp)
+ .RecordIterable(t.pAttachments, t.attachmentCount)
+ .Record(t.blendConstants);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineDynamicStateCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkPipelineDynamicStateCreateInfo& t) {
+ key->Record(t.flags).RecordIterable(t.pDynamicStates, t.dynamicStateCount);
+ vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<vulkan::RenderPassCacheQuery>::Serialize(
+ CacheKey* key,
+ const vulkan::RenderPassCacheQuery& t) {
+ key->Record(t.colorMask.to_ulong(), t.resolveTargetMask.to_ulong(), t.sampleCount);
+
+ // Manually iterate the color attachment indices and their corresponding format/load/store
+ // ops because the data is sparse and may be uninitialized. Since we record the colorMask
+ // member above, recording sparse data should be fine here.
+ for (ColorAttachmentIndex i : IterateBitSet(t.colorMask)) {
+ key->Record(t.colorFormats[i], t.colorLoadOp[i], t.colorStoreOp[i]);
}
- template <>
- void CacheKeySerializer<VkGraphicsPipelineCreateInfo>::Serialize(
- CacheKey* key,
- const VkGraphicsPipelineCreateInfo& t) {
- // The pipeline layout and render pass are not serialized here because they are pointers to
- // backend objects. They need to be cross-referenced with the frontend objects and
- // serialized from there. The base pipeline information is also currently not recorded since
- // we do not use them in our backend implementation. If we decide to use them later on, they
- // also need to be cross-referenced from the frontend.
- key->Record(t.flags)
- .RecordIterable(t.pStages, t.stageCount)
- .Record(t.pVertexInputState, t.pInputAssemblyState, t.pTessellationState,
- t.pViewportState, t.pRasterizationState, t.pMultisampleState,
- t.pDepthStencilState, t.pColorBlendState, t.pDynamicState, t.subpass);
- vulkan::SerializePnext<>(key, &t);
+ // Serialize the depth-stencil toggle bit, and the parameters if applicable.
+ key->Record(t.hasDepthStencil);
+ if (t.hasDepthStencil) {
+ key->Record(t.depthStencilFormat, t.depthLoadOp, t.depthStoreOp, t.stencilLoadOp,
+ t.stencilStoreOp, t.readOnlyDepthStencil);
}
+}
+
+template <>
+void CacheKeySerializer<VkGraphicsPipelineCreateInfo>::Serialize(
+ CacheKey* key,
+ const VkGraphicsPipelineCreateInfo& t) {
+ // The pipeline layout and render pass are not serialized here because they are pointers to
+ // backend objects. They need to be cross-referenced with the frontend objects and
+ // serialized from there. The base pipeline information is also currently not recorded since
+ // we do not use them in our backend implementation. If we decide to use them later on, they
+ // also need to be cross-referenced from the frontend.
+ key->Record(t.flags)
+ .RecordIterable(t.pStages, t.stageCount)
+ .Record(t.pVertexInputState, t.pInputAssemblyState, t.pTessellationState, t.pViewportState,
+ t.pRasterizationState, t.pMultisampleState, t.pDepthStencilState,
+ t.pColorBlendState, t.pDynamicState, t.subpass);
+ vulkan::SerializePnext<>(key, &t);
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.h
index 0569fadf3da..80b04dbd9a2 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CacheKeyVk.h
@@ -15,89 +15,85 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_CACHEKEYVK_H_
#define SRC_DAWN_NATIVE_VULKAN_CACHEKEYVK_H_
+#include <map>
+
#include "dawn/common/Assert.h"
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/CacheKey.h"
#include "icd/generated/vk_typemap_helper.h"
-#include <map>
-
namespace dawn::native::vulkan {
- namespace detail {
+namespace detail {
- template <typename... VK_STRUCT_TYPES>
- void ValidatePnextImpl(const VkBaseOutStructure* root) {
- const VkBaseOutStructure* next =
- reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
- while (next != nullptr) {
- // Assert that the type of each pNext struct is exactly one of the specified
- // templates.
- ASSERT(((LvlTypeMap<VK_STRUCT_TYPES>::kSType == next->sType ? 1 : 0) + ... + 0) ==
- 1);
- next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
- }
- }
+template <typename... VK_STRUCT_TYPES>
+void ValidatePnextImpl(const VkBaseOutStructure* root) {
+ const VkBaseOutStructure* next = reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
+ while (next != nullptr) {
+ // Assert that the type of each pNext struct is exactly one of the specified
+ // templates.
+ ASSERT(((LvlTypeMap<VK_STRUCT_TYPES>::kSType == next->sType ? 1 : 0) + ... + 0) == 1);
+ next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
+ }
+}
- template <typename VK_STRUCT_TYPE>
- void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
- const VkBaseOutStructure* next =
- reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
- const VK_STRUCT_TYPE* found = nullptr;
- while (next != nullptr) {
- if (LvlTypeMap<VK_STRUCT_TYPE>::kSType == next->sType) {
- if (found == nullptr) {
- found = reinterpret_cast<const VK_STRUCT_TYPE*>(next);
- } else {
- // Fail an assert here since that means that the chain had more than one of
- // the same typed chained object.
- ASSERT(false);
- }
- }
- next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
- }
- if (found != nullptr) {
- key->Record(found);
+template <typename VK_STRUCT_TYPE>
+void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
+ const VkBaseOutStructure* next = reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
+ const VK_STRUCT_TYPE* found = nullptr;
+ while (next != nullptr) {
+ if (LvlTypeMap<VK_STRUCT_TYPE>::kSType == next->sType) {
+ if (found == nullptr) {
+ found = reinterpret_cast<const VK_STRUCT_TYPE*>(next);
+ } else {
+ // Fail an assert here since that means that the chain had more than one of
+ // the same typed chained object.
+ ASSERT(false);
}
}
+ next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
+ }
+ if (found != nullptr) {
+ key->Record(found);
+ }
+}
- template <typename VK_STRUCT_TYPE,
- typename... VK_STRUCT_TYPES,
- typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
- void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
- SerializePnextImpl<VK_STRUCT_TYPE>(key, root);
- SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
- }
+template <typename VK_STRUCT_TYPE,
+ typename... VK_STRUCT_TYPES,
+ typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
+void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
+ SerializePnextImpl<VK_STRUCT_TYPE>(key, root);
+ SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
+}
- template <typename VK_STRUCT_TYPE>
- const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
- // Sanity checks to ensure proper type safety.
- static_assert(
- offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
- offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
- "Argument type is not a proper Vulkan structure type");
- return reinterpret_cast<const VkBaseOutStructure*>(t);
- }
+template <typename VK_STRUCT_TYPE>
+const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
+ // Checks to ensure proper type safety.
+ static_assert(offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+ offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+ "Argument type is not a proper Vulkan structure type");
+ return reinterpret_cast<const VkBaseOutStructure*>(t);
+}
- } // namespace detail
+} // namespace detail
- template <typename... VK_STRUCT_TYPES,
- typename VK_STRUCT_TYPE,
- typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
- void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
- const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
- detail::ValidatePnextImpl<VK_STRUCT_TYPES...>(root);
- detail::SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
- }
+template <typename... VK_STRUCT_TYPES,
+ typename VK_STRUCT_TYPE,
+ typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
+void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
+ const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
+ detail::ValidatePnextImpl<VK_STRUCT_TYPES...>(root);
+ detail::SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
+}
- // Empty template specialization so that we can put this in to ensure failures occur if new
- // extensions are added without updating serialization.
- template <typename VK_STRUCT_TYPE>
- void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
- const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
- detail::ValidatePnextImpl<>(root);
- }
+// Empty template specialization so that we can put this in to ensure failures occur if new
+// extensions are added without updating serialization.
+template <typename VK_STRUCT_TYPE>
+void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
+ const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
+ detail::ValidatePnextImpl<>(root);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp
index 2e94f6a2132..fca86c2db72 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/vulkan/CommandBufferVk.h"
+#include <algorithm>
+#include <vector>
+
#include "dawn/native/BindGroupTracker.h"
#include "dawn/native/CommandEncoder.h"
#include "dawn/native/CommandValidation.h"
@@ -36,1296 +39,1270 @@
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <algorithm>
-
namespace dawn::native::vulkan {
- namespace {
-
- VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return VK_INDEX_TYPE_UINT16;
- case wgpu::IndexFormat::Uint32:
- return VK_INDEX_TYPE_UINT32;
- case wgpu::IndexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
- Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
- return imageExtentSrc.width == imageExtentDst.width &&
- imageExtentSrc.height == imageExtentDst.height &&
- imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
- }
-
- VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize,
- Aspect aspect) {
- const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
- const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
-
- VkImageCopy region;
- region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
- region.srcSubresource.mipLevel = srcCopy.mipLevel;
- region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
- region.dstSubresource.mipLevel = dstCopy.mipLevel;
-
- bool has3DTextureInCopy = false;
-
- region.srcOffset.x = srcCopy.origin.x;
- region.srcOffset.y = srcCopy.origin.y;
- switch (srcTexture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- region.srcOffset.z = 0;
- break;
- case wgpu::TextureDimension::e2D:
- region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
- region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
- region.srcOffset.z = 0;
- break;
- case wgpu::TextureDimension::e3D:
- has3DTextureInCopy = true;
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- region.srcOffset.z = srcCopy.origin.z;
- break;
- }
+namespace {
- region.dstOffset.x = dstCopy.origin.x;
- region.dstOffset.y = dstCopy.origin.y;
- switch (dstTexture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- region.dstSubresource.baseArrayLayer = 0;
- region.dstSubresource.layerCount = 1;
- region.dstOffset.z = 0;
- break;
- case wgpu::TextureDimension::e2D:
- region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
- region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
- region.dstOffset.z = 0;
- break;
- case wgpu::TextureDimension::e3D:
- has3DTextureInCopy = true;
- region.dstSubresource.baseArrayLayer = 0;
- region.dstSubresource.layerCount = 1;
- region.dstOffset.z = dstCopy.origin.z;
- break;
- }
+VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return VK_INDEX_TYPE_UINT16;
+ case wgpu::IndexFormat::Uint32:
+ return VK_INDEX_TYPE_UINT32;
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
+
+bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
+ Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
+ return imageExtentSrc.width == imageExtentDst.width &&
+ imageExtentSrc.height == imageExtentDst.height &&
+ imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
+}
+
+VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize,
+ Aspect aspect) {
+ const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
+ const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
+
+ VkImageCopy region;
+ region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
+ region.srcSubresource.mipLevel = srcCopy.mipLevel;
+ region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
+ region.dstSubresource.mipLevel = dstCopy.mipLevel;
+
+ bool has3DTextureInCopy = false;
+
+ region.srcOffset.x = srcCopy.origin.x;
+ region.srcOffset.y = srcCopy.origin.y;
+ switch (srcTexture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e2D:
+ region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
+ region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.srcOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffset.z = srcCopy.origin.z;
+ break;
+ }
- ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
- Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
- region.extent.width = imageExtent.width;
- region.extent.height = imageExtent.height;
- region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
+ region.dstOffset.x = dstCopy.origin.x;
+ region.dstOffset.y = dstCopy.origin.y;
+ switch (dstTexture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ region.dstOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e2D:
+ region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
+ region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.dstOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ region.dstOffset.z = dstCopy.origin.z;
+ break;
+ }
- return region;
+ ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
+ Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
+ region.extent.width = imageExtent.width;
+ region.extent.height = imageExtent.height;
+ region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
+
+ return region;
+}
+
+class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+ public:
+ DescriptorSetTracker() = default;
+
+ void Apply(Device* device,
+ CommandRecordingContext* recordingContext,
+ VkPipelineBindPoint bindPoint) {
+ BeforeApply();
+ for (BindGroupIndex dirtyIndex : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
+ const uint32_t* dynamicOffset =
+ mDynamicOffsetCounts[dirtyIndex] > 0 ? mDynamicOffsets[dirtyIndex].data() : nullptr;
+ device->fn.CmdBindDescriptorSets(recordingContext->commandBuffer, bindPoint,
+ ToBackend(mPipelineLayout)->GetHandle(),
+ static_cast<uint32_t>(dirtyIndex), 1, &*set,
+ mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
+ }
+ AfterApply();
+ }
+};
+
+// Records the necessary barriers for a synchronization scope using the resource usage
+// data pre-computed in the frontend. Also performs lazy initialization if required.
+void TransitionAndClearForSyncScope(Device* device,
+ CommandRecordingContext* recordingContext,
+ const SyncScopeResourceUsage& scope) {
+ std::vector<VkBufferMemoryBarrier> bufferBarriers;
+ std::vector<VkImageMemoryBarrier> imageBarriers;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
+ for (size_t i = 0; i < scope.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(scope.buffers[i]);
+ buffer->EnsureDataInitialized(recordingContext);
+
+ VkBufferMemoryBarrier bufferBarrier;
+ if (buffer->TransitionUsageAndGetResourceBarrier(scope.bufferUsages[i], &bufferBarrier,
+ &srcStages, &dstStages)) {
+ bufferBarriers.push_back(bufferBarrier);
}
+ }
- class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
- public:
- DescriptorSetTracker() = default;
-
- void Apply(Device* device,
- CommandRecordingContext* recordingContext,
- VkPipelineBindPoint bindPoint) {
- BeforeApply();
- for (BindGroupIndex dirtyIndex :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
- const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
- ? mDynamicOffsets[dirtyIndex].data()
- : nullptr;
- device->fn.CmdBindDescriptorSets(
- recordingContext->commandBuffer, bindPoint,
- ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
- 1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
- }
- AfterApply();
- }
- };
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
- // Records the necessary barriers for a synchronization scope using the resource usage
- // data pre-computed in the frontend. Also performs lazy initialization if required.
- void TransitionAndClearForSyncScope(Device* device,
- CommandRecordingContext* recordingContext,
- const SyncScopeResourceUsage& scope) {
- std::vector<VkBufferMemoryBarrier> bufferBarriers;
- std::vector<VkImageMemoryBarrier> imageBarriers;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- for (size_t i = 0; i < scope.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(scope.buffers[i]);
- buffer->EnsureDataInitialized(recordingContext);
-
- VkBufferMemoryBarrier bufferBarrier;
- if (buffer->TransitionUsageAndGetResourceBarrier(
- scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
- bufferBarriers.push_back(bufferBarrier);
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(recordingContext, range);
}
- }
+ });
+ texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers,
+ &srcStages, &dstStages);
+ }
- for (size_t i = 0; i < scope.textures.size(); ++i) {
- Texture* texture = ToBackend(scope.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- });
- texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
- &imageBarriers, &srcStages, &dstStages);
- }
+ if (bufferBarriers.size() || imageBarriers.size()) {
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, bufferBarriers.size(), bufferBarriers.data(),
+ imageBarriers.size(), imageBarriers.data());
+ }
+}
- if (bufferBarriers.size() || imageBarriers.size()) {
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
- 0, 0, nullptr, bufferBarriers.size(),
- bufferBarriers.data(), imageBarriers.size(),
- imageBarriers.data());
- }
- }
+MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
+ Device* device,
+ BeginRenderPassCmd* renderPass) {
+ VkCommandBuffer commands = recordingContext->commandBuffer;
- MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
- Device* device,
- BeginRenderPassCmd* renderPass) {
- VkCommandBuffer commands = recordingContext->commandBuffer;
+ // Query a VkRenderPass from the cache
+ VkRenderPass renderPassVK = VK_NULL_HANDLE;
+ {
+ RenderPassCacheQuery query;
- // Query a VkRenderPass from the cache
- VkRenderPass renderPassVK = VK_NULL_HANDLE;
- {
- RenderPassCacheQuery query;
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ const auto& attachmentInfo = renderPass->colorAttachments[i];
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- const auto& attachmentInfo = renderPass->colorAttachments[i];
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+ query.SetColor(i, attachmentInfo.view->GetFormat().format, attachmentInfo.loadOp,
+ attachmentInfo.storeOp, hasResolveTarget);
+ }
- query.SetColor(i, attachmentInfo.view->GetFormat().format,
- attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
- }
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ const auto& attachmentInfo = renderPass->depthStencilAttachment;
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- const auto& attachmentInfo = renderPass->depthStencilAttachment;
+ query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
+ attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+ attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+ attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
+ }
- query.SetDepthStencil(
- attachmentInfo.view->GetTexture()->GetFormat().format,
- attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
- attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
- attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
- }
+ query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
- query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
+ DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
+ }
- DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
+ // Create a framebuffer that will be used once for the render pass and gather the clear
+ // values for the attachments at the same time.
+ std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
+ VkFramebuffer framebuffer = VK_NULL_HANDLE;
+ uint32_t attachmentCount = 0;
+ {
+ // Fill in the attachment info that will be chained in the framebuffer create info.
+ std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ auto& attachmentInfo = renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+ if (view == nullptr) {
+ continue;
}
- // Create a framebuffer that will be used once for the render pass and gather the clear
- // values for the attachments at the same time.
- std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
- VkFramebuffer framebuffer = VK_NULL_HANDLE;
- uint32_t attachmentCount = 0;
- {
- // Fill in the attachment info that will be chained in the framebuffer create info.
- std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
- if (view == nullptr) {
- continue;
- }
+ attachments[attachmentCount] = view->GetHandle();
- attachments[attachmentCount] = view->GetHandle();
-
- switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Float: {
- const std::array<float, 4> appliedClearColor =
- ConvertToFloatColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.float32[i] =
- appliedClearColor[i];
- }
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- const std::array<uint32_t, 4> appliedClearColor =
- ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
- }
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- const std::array<int32_t, 4> appliedClearColor =
- ConvertToSignedIntegerColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
- }
- break;
- }
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
+ switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+ case wgpu::TextureComponentType::Float: {
+ const std::array<float, 4> appliedClearColor =
+ ConvertToFloatColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.float32[i] = appliedClearColor[i];
}
- attachmentCount++;
+ break;
}
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- TextureView* view = ToBackend(attachmentInfo.view.Get());
-
- attachments[attachmentCount] = view->GetHandle();
-
- clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
- clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
-
- attachmentCount++;
+ case wgpu::TextureComponentType::Uint: {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
+ }
+ break;
}
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
- TextureView* view =
- ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
-
- attachments[attachmentCount] = view->GetHandle();
-
- attachmentCount++;
+ case wgpu::TextureComponentType::Sint: {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
}
+ break;
}
- // Chain attachments and create the framebuffer
- VkFramebufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.renderPass = renderPassVK;
- createInfo.attachmentCount = attachmentCount;
- createInfo.pAttachments = AsVkArray(attachments.data());
- createInfo.width = renderPass->width;
- createInfo.height = renderPass->height;
- createInfo.layers = 1;
-
- DAWN_TRY(
- CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
- nullptr, &*framebuffer),
- "CreateFramebuffer"));
-
- // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
- // commands currently being recorded are finished.
- device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
}
-
- VkRenderPassBeginInfo beginInfo;
- beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
- beginInfo.pNext = nullptr;
- beginInfo.renderPass = renderPassVK;
- beginInfo.framebuffer = framebuffer;
- beginInfo.renderArea.offset.x = 0;
- beginInfo.renderArea.offset.y = 0;
- beginInfo.renderArea.extent.width = renderPass->width;
- beginInfo.renderArea.extent.height = renderPass->height;
- beginInfo.clearValueCount = attachmentCount;
- beginInfo.pClearValues = clearValues.data();
-
- device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
-
- return {};
+ attachmentCount++;
}
- // Reset the query sets used on render pass because the reset command must be called outside
- // render pass.
- void ResetUsedQuerySetsOnRenderPass(Device* device,
- VkCommandBuffer commands,
- QuerySetBase* querySet,
- const std::vector<bool>& availability) {
- ASSERT(availability.size() == querySet->GetQueryAvailability().size());
-
- auto currentIt = availability.begin();
- auto lastIt = availability.end();
- // Traverse the used queries which availability are true.
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No used queries need to be reset
- if (firstTrueIt == lastIt) {
- break;
- }
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+ attachments[attachmentCount] = view->GetHandle();
- uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
- uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+ clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
+ clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
+
+ attachmentCount++;
+ }
- // Reset the queries between firstTrueIt and nextFalseIt (which is at most
- // lastIt)
- device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
- queryCount);
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+ TextureView* view = ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
- // Set current iterator to next false
- currentIt = nextFalseIt;
+ attachments[attachmentCount] = view->GetHandle();
+
+ attachmentCount++;
}
}
- void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
- Device* device,
- WriteTimestampCmd* cmd) {
- VkCommandBuffer commands = recordingContext->commandBuffer;
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ // Chain attachments and create the framebuffer
+ VkFramebufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPassVK;
+ createInfo.attachmentCount = attachmentCount;
+ createInfo.pAttachments = AsVkArray(attachments.data());
+ createInfo.width = renderPass->width;
+ createInfo.height = renderPass->height;
+ createInfo.layers = 1;
+
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
+ nullptr, &*framebuffer),
+ "CreateFramebuffer"));
+
+ // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
+ // commands currently being recorded are finished.
+ device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
+ }
- device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
- querySet->GetHandle(), cmd->queryIndex);
+ VkRenderPassBeginInfo beginInfo;
+ beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.renderPass = renderPassVK;
+ beginInfo.framebuffer = framebuffer;
+ beginInfo.renderArea.offset.x = 0;
+ beginInfo.renderArea.offset.y = 0;
+ beginInfo.renderArea.extent.width = renderPass->width;
+ beginInfo.renderArea.extent.height = renderPass->height;
+ beginInfo.clearValueCount = attachmentCount;
+ beginInfo.pClearValues = clearValues.data();
+
+ device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
+
+ return {};
+}
+
+// Reset the query sets used on render pass because the reset command must be called outside
+// render pass.
+void ResetUsedQuerySetsOnRenderPass(Device* device,
+ VkCommandBuffer commands,
+ QuerySetBase* querySet,
+ const std::vector<bool>& availability) {
+ ASSERT(availability.size() == querySet->GetQueryAvailability().size());
+
+ auto currentIt = availability.begin();
+ auto lastIt = availability.end();
+ // Traverse the used queries which availability are true.
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No used queries need to be reset
+ if (firstTrueIt == lastIt) {
+ break;
}
- void RecordResolveQuerySetCmd(VkCommandBuffer commands,
- Device* device,
- QuerySet* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- Buffer* destination,
- uint64_t destinationOffset) {
- const std::vector<bool>& availability = querySet->GetQueryAvailability();
-
- auto currentIt = availability.begin() + firstQuery;
- auto lastIt = availability.begin() + firstQuery + queryCount;
-
- // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No available query found for resolving
- if (firstTrueIt == lastIt) {
- break;
- }
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
-
- // The query index of firstTrueIt where the resolving starts
- uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
- // The queries count between firstTrueIt and nextFalseIt need to be resolved
- uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
- // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
- uint32_t resolveDestinationOffset =
- destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+ uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
+ uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
- // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
- device->fn.CmdCopyQueryPoolResults(
- commands, querySet->GetHandle(), resolveQueryIndex, resolveQueryCount,
- destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
- VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ // Reset the queries between firstTrueIt and nextFalseIt (which is at most
+ // lastIt)
+ device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
+ queryCount);
- // Set current iterator to next false
- currentIt = nextFalseIt;
- }
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
+ }
+}
+
+void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
+ Device* device,
+ WriteTimestampCmd* cmd) {
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+ device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ querySet->GetHandle(), cmd->queryIndex);
+}
+
+void RecordResolveQuerySetCmd(VkCommandBuffer commands,
+ Device* device,
+ QuerySet* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ Buffer* destination,
+ uint64_t destinationOffset) {
+ const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+ auto currentIt = availability.begin() + firstQuery;
+ auto lastIt = availability.begin() + firstQuery + queryCount;
+
+ // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No available query found for resolving
+ if (firstTrueIt == lastIt) {
+ break;
}
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
- } // anonymous namespace
+ // The query index of firstTrueIt where the resolving starts
+ uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+ // The queries count between firstTrueIt and nextFalseIt need to be resolved
+ uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
+ // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+ uint32_t resolveDestinationOffset =
+ destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
+ // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+ device->fn.CmdCopyQueryPoolResults(commands, querySet->GetHandle(), resolveQueryIndex,
+ resolveQueryCount, destination->GetHandle(),
+ resolveDestinationOffset, sizeof(uint64_t),
+ VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
- MaybeError CommandBuffer::RecordCopyImageWithTemporaryBuffer(
- CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
- ASSERT(srcCopy.aspect == dstCopy.aspect);
- dawn::native::Format format = srcCopy.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
- ASSERT(copySize.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
- // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
- // because it isn't a hard constraint in Vulkan.
- uint64_t tempBufferSize =
- widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
- BufferDescriptor tempBufferDescriptor;
- tempBufferDescriptor.size = tempBufferSize;
- tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-
- Device* device = ToBackend(GetDevice());
- Ref<BufferBase> tempBufferBase;
- DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
- Buffer* tempBuffer = ToBackend(tempBufferBase.Get());
-
- BufferCopy tempBufferCopy;
- tempBufferCopy.buffer = tempBuffer;
- tempBufferCopy.rowsPerImage = heightInBlocks;
- tempBufferCopy.offset = 0;
- tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
-
- VkCommandBuffer commands = recordingContext->commandBuffer;
- VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
- VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
-
- tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- VkBufferImageCopy srcToTempBufferRegion =
- ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
-
- // The Dawn CopySrc usage is always mapped to GENERAL
- device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
-
- tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
- VkBufferImageCopy tempBufferToDstRegion =
- ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
- &tempBufferToDstRegion);
-
- recordingContext->tempBuffers.emplace_back(tempBuffer);
-
- return {};
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
}
+}
+
+} // anonymous namespace
+
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {}
+
+MaybeError CommandBuffer::RecordCopyImageWithTemporaryBuffer(
+ CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+ ASSERT(srcCopy.aspect == dstCopy.aspect);
+ dawn::native::Format format = srcCopy.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+ ASSERT(copySize.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+ // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
+ // because it isn't a hard constraint in Vulkan.
+ uint64_t tempBufferSize =
+ widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
+ BufferDescriptor tempBufferDescriptor;
+ tempBufferDescriptor.size = tempBufferSize;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+ Device* device = ToBackend(GetDevice());
+ Ref<BufferBase> tempBufferBase;
+ DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+ Buffer* tempBuffer = ToBackend(tempBufferBase.Get());
+
+ BufferCopy tempBufferCopy;
+ tempBufferCopy.buffer = tempBuffer;
+ tempBufferCopy.rowsPerImage = heightInBlocks;
+ tempBufferCopy.offset = 0;
+ tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
+
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+ VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
+ VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
+
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ VkBufferImageCopy srcToTempBufferRegion =
+ ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
+
+ // The Dawn CopySrc usage is always mapped to GENERAL
+ device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
+
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ VkBufferImageCopy tempBufferToDstRegion =
+ ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+ &tempBufferToDstRegion);
+
+ recordingContext->tempBuffers.emplace_back(tempBuffer);
+
+ return {};
+}
+
+MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+
+ // Records the necessary barriers for the resource usage pre-computed by the frontend.
+ // And resets the used query sets which are rewritten on the render pass.
+ auto PrepareResourcesForRenderPass = [](Device* device,
+ CommandRecordingContext* recordingContext,
+ const RenderPassResourceUsage& usages) {
+ TransitionAndClearForSyncScope(device, recordingContext, usages);
+
+ // Reset all query set used on current render pass together before beginning render pass
+ // because the reset command must be called outside render pass
+ for (size_t i = 0; i < usages.querySets.size(); ++i) {
+ ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
+ usages.querySets[i], usages.queryAvailabilities[i]);
+ }
+ };
+
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
- MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- // Records the necessary barriers for the resource usage pre-computed by the frontend.
- // And resets the used query sets which are rewritten on the render pass.
- auto PrepareResourcesForRenderPass = [](Device* device,
- CommandRecordingContext* recordingContext,
- const RenderPassResourceUsage& usages) {
- TransitionAndClearForSyncScope(device, recordingContext, usages);
-
- // Reset all query set used on current render pass together before beginning render pass
- // because the reset command must be called outside render pass
- for (size_t i = 0; i < usages.querySets.size(); ++i) {
- ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
- usages.querySets[i], usages.queryAvailabilities[i]);
- }
- };
-
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
+ Buffer* srcBuffer = ToBackend(copy->source.Get());
+ Buffer* dstBuffer = ToBackend(copy->destination.Get());
- Buffer* srcBuffer = ToBackend(copy->source.Get());
- Buffer* dstBuffer = ToBackend(copy->destination.Get());
+ srcBuffer->EnsureDataInitialized(recordingContext);
+ dstBuffer->EnsureDataInitializedAsDestination(recordingContext,
+ copy->destinationOffset, copy->size);
- srcBuffer->EnsureDataInitialized(recordingContext);
- dstBuffer->EnsureDataInitializedAsDestination(
- recordingContext, copy->destinationOffset, copy->size);
+ srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ VkBufferCopy region;
+ region.srcOffset = copy->sourceOffset;
+ region.dstOffset = copy->destinationOffset;
+ region.size = copy->size;
- VkBufferCopy region;
- region.srcOffset = copy->sourceOffset;
- region.dstOffset = copy->destinationOffset;
- region.size = copy->size;
+ VkBuffer srcHandle = srcBuffer->GetHandle();
+ VkBuffer dstHandle = dstBuffer->GetHandle();
+ device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
+ break;
+ }
- VkBuffer srcHandle = srcBuffer->GetHandle();
- VkBuffer dstHandle = dstBuffer->GetHandle();
- device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
- break;
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ auto& src = copy->source;
+ auto& dst = copy->destination;
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
-
- ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
+ ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
- VkBufferImageCopy region =
- ComputeBufferImageCopyRegion(src, dst, copy->copySize);
- VkImageSubresourceLayers subresource = region.imageSubresource;
+ VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, dst, copy->copySize);
+ VkImageSubresourceLayers subresource = region.imageSubresource;
- SubresourceRange range =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- subresource.mipLevel)) {
- // Since texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- ToBackend(src.buffer)
- ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ subresource.mipLevel)) {
+ // Since texture has been overwritten, it has been "initialized"
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
- VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
- VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ ToBackend(src.buffer)
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ ToBackend(dst.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
+ VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ break;
+ }
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
- &region);
- break;
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
}
+ auto& src = copy->source;
+ auto& dst = copy->destination;
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
+ ToBackend(dst.buffer)->EnsureDataInitializedAsDestination(recordingContext, copy);
- ToBackend(dst.buffer)
- ->EnsureDataInitializedAsDestination(recordingContext, copy);
+ VkBufferImageCopy region = ComputeBufferImageCopyRegion(dst, src, copy->copySize);
- VkBufferImageCopy region =
- ComputeBufferImageCopyRegion(dst, src, copy->copySize);
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
- SubresourceRange range =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+ ToBackend(src.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
- ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, range);
+ ToBackend(src.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
+ ToBackend(dst.buffer)
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
- ToBackend(dst.buffer)
- ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ VkImage srcImage = ToBackend(src.texture)->GetHandle();
+ VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
+ // The Dawn CopySrc usage is always mapped to GENERAL
+ device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ dstBuffer, 1, &region);
+ break;
+ }
- VkImage srcImage = ToBackend(src.texture)->GetHandle();
- VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
- // The Dawn CopySrc usage is always mapped to GENERAL
- device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- dstBuffer, 1, &region);
- break;
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ TextureCopy& src = copy->source;
+ TextureCopy& dst = copy->destination;
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+ ToBackend(src.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ dst.mipLevel)) {
+ // Since destination texture has been overwritten, it has been "initialized"
+ dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ ToBackend(dst.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
}
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- TextureCopy& src = copy->source;
- TextureCopy& dst = copy->destination;
- SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
- SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-
- ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- dst.mipLevel)) {
- // Since destination texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
- }
+ if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be GENERAL instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
+ copy->copySize.depthOrArrayLayers));
+ }
- if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
- // When there are overlapped subresources, the layout of the overlapped
- // subresources should all be GENERAL instead of what we set now. Currently
- // it is not allowed to copy with overlapped subresources, but we still
- // add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
- copy->copySize.depthOrArrayLayers));
- }
+ ToBackend(src.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, srcRange);
+ ToBackend(dst.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, dstRange);
+
+ // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
+ // because as Vulkan SPEC always validates image copies with the virtual size of
+ // the image subresource, when the extent that fits in the copy region of one
+ // subresource but does not fit in the one of another subresource, we will fail
+ // to find a valid extent to satisfy the requirements on both source and
+ // destination image subresource. For example, when the source is the first
+ // level of a 16x16 texture in BC format, and the destination is the third level
+ // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
+ // the extent of vkCmdCopyImage.
+ // Our workaround for this issue is replacing the texture-to-texture copy with
+ // one texture-to-buffer copy and one buffer-to-texture copy.
+ bool copyUsingTemporaryBuffer =
+ device->IsToggleEnabled(
+ Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
+ src.texture->GetFormat().isCompressed &&
+ !HasSameTextureCopyExtent(src, dst, copy->copySize);
+
+ if (!copyUsingTemporaryBuffer) {
+ VkImage srcImage = ToBackend(src.texture)->GetHandle();
+ VkImage dstImage = ToBackend(dst.texture)->GetHandle();
- // TODO after Yunchao's CL
- ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
- srcRange);
- ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
- dstRange);
-
- // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
- // because as Vulkan SPEC always validates image copies with the virtual size of
- // the image subresource, when the extent that fits in the copy region of one
- // subresource but does not fit in the one of another subresource, we will fail
- // to find a valid extent to satisfy the requirements on both source and
- // destination image subresource. For example, when the source is the first
- // level of a 16x16 texture in BC format, and the destination is the third level
- // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
- // the extent of vkCmdCopyImage.
- // Our workaround for this issue is replacing the texture-to-texture copy with
- // one texture-to-buffer copy and one buffer-to-texture copy.
- bool copyUsingTemporaryBuffer =
- device->IsToggleEnabled(
- Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
- src.texture->GetFormat().isCompressed &&
- !HasSameTextureCopyExtent(src, dst, copy->copySize);
-
- if (!copyUsingTemporaryBuffer) {
- VkImage srcImage = ToBackend(src.texture)->GetHandle();
- VkImage dstImage = ToBackend(dst.texture)->GetHandle();
-
- for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
- ASSERT(dst.texture->GetFormat().aspects & aspect);
- VkImageCopy region =
- ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
- // the copy command.
- device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- 1, &region);
- }
- } else {
- DAWN_TRY(RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
- copy->copySize));
+ for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
+ ASSERT(dst.texture->GetFormat().aspects & aspect);
+ VkImageCopy region =
+ ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
+ // the copy command.
+ device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+ &region);
}
+ } else {
+ DAWN_TRY(RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
+ copy->copySize));
+ }
+
+ break;
+ }
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
break;
}
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
- }
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+ bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+ recordingContext, cmd->offset, cmd->size);
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
- recordingContext, cmd->offset, cmd->size);
+ if (!clearedToZero) {
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ device->fn.CmdFillBuffer(recordingContext->commandBuffer,
+ dstBuffer->GetHandle(), cmd->offset, cmd->size, 0u);
+ }
- if (!clearedToZero) {
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- device->fn.CmdFillBuffer(recordingContext->commandBuffer,
- dstBuffer->GetHandle(), cmd->offset, cmd->size,
- 0u);
- }
+ break;
+ }
- break;
- }
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+ PrepareResourcesForRenderPass(
+ device, recordingContext,
+ GetResourceUsages().renderPasses[nextRenderPassNumber]);
- PrepareResourcesForRenderPass(
- device, recordingContext,
- GetResourceUsages().renderPasses[nextRenderPassNumber]);
+ LazyClearRenderPassAttachments(cmd);
+ DAWN_TRY(RecordRenderPass(recordingContext, cmd));
- LazyClearRenderPassAttachments(cmd);
- DAWN_TRY(RecordRenderPass(recordingContext, cmd));
+ nextRenderPassNumber++;
+ break;
+ }
- nextRenderPassNumber++;
- break;
- }
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
+ DAWN_TRY(RecordComputePass(
+ recordingContext, GetResourceUsages().computePasses[nextComputePassNumber]));
- DAWN_TRY(RecordComputePass(
- recordingContext,
- GetResourceUsages().computePasses[nextComputePassNumber]));
+ nextComputePassNumber++;
+ break;
+ }
- nextComputePassNumber++;
- break;
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ Buffer* destination = ToBackend(cmd->destination.Get());
+
+ destination->EnsureDataInitializedAsDestination(
+ recordingContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
+
+ // vkCmdCopyQueryPoolResults only can retrieve available queries because
+ // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
+ // as 0s, we need to clear the resolving region of the destination buffer to 0s.
+ auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
+ auto endIt =
+ querySet->GetQueryAvailability().begin() + cmd->firstQuery + cmd->queryCount;
+ bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+ if (hasUnavailableQueries) {
+ destination->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ device->fn.CmdFillBuffer(commands, destination->GetHandle(),
+ cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t), 0u);
}
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- Buffer* destination = ToBackend(cmd->destination.Get());
-
- destination->EnsureDataInitializedAsDestination(
- recordingContext, cmd->destinationOffset,
- cmd->queryCount * sizeof(uint64_t));
-
- // vkCmdCopyQueryPoolResults only can retrieve available queries because
- // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
- // as 0s, we need to clear the resolving region of the destination buffer to 0s.
- auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
- auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
- cmd->queryCount;
- bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
- if (hasUnavailableQueries) {
- destination->TransitionUsageNow(recordingContext,
- wgpu::BufferUsage::CopyDst);
- device->fn.CmdFillBuffer(commands, destination->GetHandle(),
- cmd->destinationOffset,
- cmd->queryCount * sizeof(uint64_t), 0u);
- }
+ destination->TransitionUsageNow(recordingContext, wgpu::BufferUsage::QueryResolve);
- destination->TransitionUsageNow(recordingContext,
- wgpu::BufferUsage::QueryResolve);
+ RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
+ cmd->queryCount, destination, cmd->destinationOffset);
- RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
- cmd->queryCount, destination, cmd->destinationOffset);
+ break;
+ }
- break;
- }
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
- // The query must be reset between uses.
- device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
- cmd->queryIndex, 1);
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
+ }
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::InsertDebugMarker);
}
+ break;
+ }
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::InsertDebugMarker);
- }
- break;
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(&mCommands, Command::PopDebugGroup);
}
+ break;
+ }
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- mCommands.NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(&mCommands, Command::PopDebugGroup);
- }
- break;
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::PushDebugGroup);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::PushDebugGroup);
- }
- break;
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
}
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
- dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
- VkBufferCopy copy;
- copy.srcOffset = uploadHandle.startOffset;
- copy.dstOffset = offset;
- copy.size = size;
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- device->fn.CmdCopyBuffer(
- commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
- dstBuffer->GetHandle(), 1, &copy);
- break;
- }
+ VkBufferCopy copy;
+ copy.srcOffset = uploadHandle.startOffset;
+ copy.dstOffset = offset;
+ copy.size = size;
- default:
- break;
+ device->fn.CmdCopyBuffer(commands,
+ ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+ dstBuffer->GetHandle(), 1, &copy);
+ break;
}
- }
- return {};
+ default:
+ break;
+ }
}
- MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
- const ComputePassResourceUsage& resourceUsages) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
+ return {};
+}
- uint64_t currentDispatch = 0;
- DescriptorSetTracker descriptorSets = {};
+MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
+ uint64_t currentDispatch = 0;
+ DescriptorSetTracker descriptorSets = {};
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
- TransitionAndClearForSyncScope(device, recordingContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
- currentDispatch++;
- break;
- }
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
+ device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
+ break;
+ }
- TransitionAndClearForSyncScope(device, recordingContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
- device->fn.CmdDispatchIndirect(
- commands, indirectBuffer,
- static_cast<VkDeviceSize>(dispatch->indirectOffset));
- currentDispatch++;
- break;
- }
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ device->fn.CmdDispatchIndirect(commands, indirectBuffer,
+ static_cast<VkDeviceSize>(dispatch->indirectOffset));
+ currentDispatch++;
+ break;
+ }
- BindGroup* bindGroup = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
- device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
- pipeline->GetHandle());
- descriptorSets.OnSetPipeline(pipeline);
- break;
- }
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::InsertDebugMarker);
- }
- break;
- }
+ device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
+ pipeline->GetHandle());
+ descriptorSets.OnSetPipeline(pipeline);
+ break;
+ }
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- mCommands.NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(&mCommands, Command::PopDebugGroup);
- }
- break;
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::InsertDebugMarker);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::PushDebugGroup);
- }
- break;
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(&mCommands, Command::PopDebugGroup);
}
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::PushDebugGroup);
+ }
+ break;
+ }
- // The query must be reset between uses.
- device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
- cmd->queryIndex, 1);
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
- }
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
- default:
- UNREACHABLE();
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
}
+
+ default:
+ UNREACHABLE();
}
+ }
+
+ // EndComputePass should have been called
+ UNREACHABLE();
+}
+
+MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPassCmd) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
- // EndComputePass should have been called
- UNREACHABLE();
+ DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
+
+ // Set the default value for the dynamic state
+ {
+ device->fn.CmdSetLineWidth(commands, 1.0f);
+ device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
+
+ device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
+
+ float blendConstants[4] = {
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ };
+ device->fn.CmdSetBlendConstants(commands, blendConstants);
+
+ // The viewport and scissor default to cover all of the attachments
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = static_cast<float>(renderPassCmd->height);
+ viewport.width = static_cast<float>(renderPassCmd->width);
+ viewport.height = -static_cast<float>(renderPassCmd->height);
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+
+ VkRect2D scissorRect;
+ scissorRect.offset.x = 0;
+ scissorRect.offset.y = 0;
+ scissorRect.extent.width = renderPassCmd->width;
+ scissorRect.extent.height = renderPassCmd->height;
+ device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
}
- MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPassCmd) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
-
- // Set the default value for the dynamic state
- {
- device->fn.CmdSetLineWidth(commands, 1.0f);
- device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
-
- device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
-
- float blendConstants[4] = {
- 0.0f,
- 0.0f,
- 0.0f,
- 0.0f,
- };
- device->fn.CmdSetBlendConstants(commands, blendConstants);
-
- // The viewport and scissor default to cover all of the attachments
- VkViewport viewport;
- viewport.x = 0.0f;
- viewport.y = static_cast<float>(renderPassCmd->height);
- viewport.width = static_cast<float>(renderPassCmd->width);
- viewport.height = -static_cast<float>(renderPassCmd->height);
- viewport.minDepth = 0.0f;
- viewport.maxDepth = 1.0f;
- device->fn.CmdSetViewport(commands, 0, 1, &viewport);
-
- VkRect2D scissorRect;
- scissorRect.offset.x = 0;
- scissorRect.offset.y = 0;
- scissorRect.extent.width = renderPassCmd->width;
- scissorRect.extent.height = renderPassCmd->height;
- device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
- }
+ DescriptorSetTracker descriptorSets = {};
+ RenderPipeline* lastPipeline = nullptr;
- DescriptorSetTracker descriptorSets = {};
- RenderPipeline* lastPipeline = nullptr;
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
+ draw->firstVertex, draw->firstInstance);
+ break;
+ }
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
- draw->firstVertex, draw->firstInstance);
- break;
- }
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
+ draw->firstIndex, draw->baseVertex, draw->firstInstance);
+ break;
+ }
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
- draw->firstIndex, draw->baseVertex,
- draw->firstInstance);
- break;
- }
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
+ static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
+ break;
+ }
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
- static_cast<VkDeviceSize>(draw->indirectOffset), 1,
- 0);
- break;
- }
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndexedIndirect(commands, buffer->GetHandle(),
+ static_cast<VkDeviceSize>(draw->indirectOffset),
+ 1, 0);
+ break;
+ }
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndexedIndirect(
- commands, buffer->GetHandle(),
- static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
- break;
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(iter, Command::InsertDebugMarker);
}
+ break;
+ }
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(iter, Command::InsertDebugMarker);
- }
- break;
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ iter->NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(iter, Command::PopDebugGroup);
}
+ break;
+ }
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- iter->NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(iter, Command::PopDebugGroup);
- }
- break;
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(iter, Command::PushDebugGroup);
}
+ break;
+ }
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(iter, Command::PushDebugGroup);
- }
- break;
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- BindGroup* bindGroup = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
- device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
- VulkanIndexType(cmd->format));
- break;
- }
+ device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
+ VulkanIndexType(cmd->format));
+ break;
+ }
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
- device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
- pipeline->GetHandle());
- lastPipeline = pipeline;
+ device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->GetHandle());
+ lastPipeline = pipeline;
- descriptorSets.OnSetPipeline(pipeline);
- break;
- }
+ descriptorSets.OnSetPipeline(pipeline);
+ break;
+ }
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
- VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
+ VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
- device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
- &*buffer, &offset);
- break;
- }
+ device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
+ &*buffer, &offset);
+ break;
+ }
- default:
- UNREACHABLE();
- break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ device->fn.CmdEndRenderPass(commands);
+ return {};
}
- };
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- device->fn.CmdEndRenderPass(commands);
- return {};
- }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
+ device->fn.CmdSetBlendConstants(commands, blendConstants.data());
+ break;
+ }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
- device->fn.CmdSetBlendConstants(commands, blendConstants.data());
- break;
- }
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
+ cmd->reference);
+ break;
+ }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
- cmd->reference);
- break;
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ VkViewport viewport;
+ viewport.x = cmd->x;
+ viewport.y = cmd->y + cmd->height;
+ viewport.width = cmd->width;
+ viewport.height = -cmd->height;
+ viewport.minDepth = cmd->minDepth;
+ viewport.maxDepth = cmd->maxDepth;
+
+ // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
+ // height = 0 so use that to do an empty viewport.
+ if (viewport.width == 0) {
+ viewport.height = 0;
+
+ // Set the viewport x range to a range that's always valid.
+ viewport.x = 0;
+ viewport.width = 1;
}
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- VkViewport viewport;
- viewport.x = cmd->x;
- viewport.y = cmd->y + cmd->height;
- viewport.width = cmd->width;
- viewport.height = -cmd->height;
- viewport.minDepth = cmd->minDepth;
- viewport.maxDepth = cmd->maxDepth;
-
- // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
- // height = 0 so use that to do an empty viewport.
- if (viewport.width == 0) {
- viewport.height = 0;
-
- // Set the viewport x range to a range that's always valid.
- viewport.x = 0;
- viewport.width = 1;
- }
-
- device->fn.CmdSetViewport(commands, 0, 1, &viewport);
- break;
- }
+ device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+ break;
+ }
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- VkRect2D rect;
- rect.offset.x = cmd->x;
- rect.offset.y = cmd->y;
- rect.extent.width = cmd->width;
- rect.extent.height = cmd->height;
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ VkRect2D rect;
+ rect.offset.x = cmd->x;
+ rect.offset.y = cmd->y;
+ rect.extent.width = cmd->width;
+ rect.extent.height = cmd->height;
- device->fn.CmdSetScissor(commands, 0, 1, &rect);
- break;
- }
+ device->fn.CmdSetScissor(commands, 0, 1, &rect);
+ break;
+ }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- EncodeRenderBundleCommand(iter, type);
- }
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ EncodeRenderBundleCommand(iter, type);
}
- break;
}
+ break;
+ }
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
- device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
- cmd->queryIndex, 0);
- break;
- }
+ device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+ cmd->queryIndex, 0);
+ break;
+ }
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
- device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
- cmd->queryIndex);
- break;
- }
+ device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+ cmd->queryIndex);
+ break;
+ }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
- }
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
+ }
- default: {
- EncodeRenderBundleCommand(&mCommands, type);
- break;
- }
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
}
}
-
- // EndRenderPass should have been called
- UNREACHABLE();
}
+ // EndRenderPass should have been called
+ UNREACHABLE();
+}
+
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h
index e4ec4109cc0..dbb7fdcd7f5 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h
@@ -21,34 +21,34 @@
#include "dawn/common/vulkan_platform.h"
namespace dawn::native {
- struct BeginRenderPassCmd;
- struct TextureCopy;
+struct BeginRenderPassCmd;
+struct TextureCopy;
} // namespace dawn::native
namespace dawn::native::vulkan {
- struct CommandRecordingContext;
- class Device;
+struct CommandRecordingContext;
+class Device;
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
- MaybeError RecordCommands(CommandRecordingContext* recordingContext);
+ MaybeError RecordCommands(CommandRecordingContext* recordingContext);
- private:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+ private:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
- const ComputePassResourceUsage& resourceUsages);
- MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPass);
- MaybeError RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize);
- };
+ MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages);
+ MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPass);
+ MaybeError RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize);
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h
index 94e2e04d8f7..b5ced101ebb 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h
@@ -14,26 +14,27 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
#define SRC_DAWN_NATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
-#include "dawn/common/vulkan_platform.h"
+#include <vector>
+#include "dawn/common/vulkan_platform.h"
#include "dawn/native/vulkan/BufferVk.h"
namespace dawn::native::vulkan {
- // Used to track operations that are handled after recording.
- // Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
- struct CommandRecordingContext {
- VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
- std::vector<VkSemaphore> waitSemaphores = {};
- std::vector<VkSemaphore> signalSemaphores = {};
+// Used to track operations that are handled after recording.
+// Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
+struct CommandRecordingContext {
+ VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> waitSemaphores = {};
+ std::vector<VkSemaphore> signalSemaphores = {};
- // The internal buffers used in the workaround of texture-to-texture copies with compressed
- // formats.
- std::vector<Ref<Buffer>> tempBuffers;
+ // The internal buffers used in the workaround of texture-to-texture copies with compressed
+ // formats.
+ std::vector<Ref<Buffer>> tempBuffers;
- // For Device state tracking only.
- VkCommandPool commandPool = VK_NULL_HANDLE;
- bool used = false;
- };
+ // For Device state tracking only.
+ VkCommandPool commandPool = VK_NULL_HANDLE;
+ bool used = false;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp
index 9a3fa8b6762..21937981dd3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp
@@ -14,110 +14,117 @@
#include "dawn/native/vulkan/ComputePipelineVk.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
#include "dawn/native/CreatePipelineAsyncTask.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineCacheVk.h"
#include "dawn/native/vulkan/PipelineLayoutVk.h"
#include "dawn/native/vulkan/ShaderModuleVk.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <utility>
-
namespace dawn::native::vulkan {
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+}
+
+MaybeError ComputePipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ const PipelineLayout* layout = ToBackend(GetLayout());
+
+ // Vulkan devices need cache UUID field to be serialized into pipeline cache keys.
+ mCacheKey.Record(device->GetDeviceInfo().properties.pipelineCacheUUID);
+
+ VkComputePipelineCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.layout = layout->GetHandle();
+ createInfo.basePipelineHandle = VkPipeline{};
+ createInfo.basePipelineIndex = -1;
+
+ createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ createInfo.stage.pNext = nullptr;
+ createInfo.stage.flags = 0;
+ createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+ // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule* module = ToBackend(computeStage.module.Get());
+ const ShaderModule::Spirv* spirv;
+ DAWN_TRY_ASSIGN((std::tie(createInfo.stage.module, spirv)),
+ module->GetHandleAndSpirv(computeStage.entryPoint.c_str(), layout));
+
+ createInfo.stage.pName = computeStage.entryPoint.c_str();
+
+ std::vector<OverridableConstantScalar> specializationDataEntries;
+ std::vector<VkSpecializationMapEntry> specializationMapEntries;
+ VkSpecializationInfo specializationInfo{};
+ createInfo.stage.pSpecializationInfo = GetVkSpecializationInfo(
+ computeStage, &specializationInfo, &specializationDataEntries, &specializationMapEntries);
+
+ PNextChainBuilder stageExtChain(&createInfo.stage);
+
+ VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
+ uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
+ if (computeSubgroupSize != 0u) {
+ ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
+ subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
+ stageExtChain.Add(
+ &subgroupSizeInfo,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
}
- MaybeError ComputePipeline::Initialize() {
- VkComputePipelineCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.layout = ToBackend(GetLayout())->GetHandle();
- createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
- createInfo.basePipelineIndex = -1;
-
- createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- createInfo.stage.pNext = nullptr;
- createInfo.stage.flags = 0;
- createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
- // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- ShaderModule* module = ToBackend(computeStage.module.Get());
- PipelineLayout* layout = ToBackend(GetLayout());
- const ShaderModule::Spirv* spirv;
- DAWN_TRY_ASSIGN((std::tie(createInfo.stage.module, spirv)),
- module->GetHandleAndSpirv(computeStage.entryPoint.c_str(), layout));
-
- createInfo.stage.pName = computeStage.entryPoint.c_str();
-
- std::vector<OverridableConstantScalar> specializationDataEntries;
- std::vector<VkSpecializationMapEntry> specializationMapEntries;
- VkSpecializationInfo specializationInfo{};
- createInfo.stage.pSpecializationInfo =
- GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
- &specializationMapEntries);
-
- Device* device = ToBackend(GetDevice());
-
- PNextChainBuilder stageExtChain(&createInfo.stage);
-
- VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
- uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
- if (computeSubgroupSize != 0u) {
- ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
- subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
- stageExtChain.Add(
- &subgroupSizeInfo,
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
- }
-
- // Record cache key information now since the createInfo is not stored.
- GetCacheKey()
- ->Record(createInfo, static_cast<const ComputePipeline*>(this)->GetLayout())
- .RecordIterable(*spirv);
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
- &createInfo, nullptr, &*mHandle),
- "CreateComputePipeline"));
-
- SetLabelImpl();
-
- return {};
- }
+ // Record cache key information now since the createInfo is not stored.
+ mCacheKey.Record(createInfo, layout).RecordIterable(*spirv);
- void ComputePipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_ComputePipeline", GetLabel());
- }
+ // Try to see if we have anything in the blob cache.
+ Ref<PipelineCache> cache = ToBackend(GetDevice()->GetOrCreatePipelineCache(GetCacheKey()));
+ DAWN_TRY(
+ CheckVkSuccess(device->fn.CreateComputePipelines(device->GetVkDevice(), cache->GetHandle(),
+ 1, &createInfo, nullptr, &*mHandle),
+ "CreateComputePipeline"));
+ // TODO(dawn:549): Flush is currently in the same thread, but perhaps deferrable.
+ DAWN_TRY(cache->FlushIfNeeded());
- ComputePipeline::~ComputePipeline() = default;
+ SetLabelImpl();
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
+ return {};
+}
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
+void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_ComputePipeline", GetLabel());
+}
- VkPipeline ComputePipeline::GetHandle() const {
- return mHandle;
- }
+ComputePipeline::~ComputePipeline() = default;
+
+void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
+
+VkPipeline ComputePipeline::GetHandle() const {
+ return mHandle;
+}
+
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h
index c2b7d83cf4a..2159db8f291 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h
@@ -22,31 +22,30 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
+class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
- VkPipeline GetHandle() const;
+ VkPipeline GetHandle() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- private:
- ~ComputePipeline() override;
- void DestroyImpl() override;
- using ComputePipelineBase::ComputePipelineBase;
+ private:
+ ~ComputePipeline() override;
+ void DestroyImpl() override;
+ using ComputePipelineBase::ComputePipelineBase;
- VkPipeline mHandle = VK_NULL_HANDLE;
- };
+ VkPipeline mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h
index ffe543e78c1..ad0c6f83bc1 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h
@@ -19,12 +19,12 @@
namespace dawn::native::vulkan {
- // Contains a descriptor set along with data necessary to track its allocation.
- struct DescriptorSetAllocation {
- VkDescriptorSet set = VK_NULL_HANDLE;
- uint32_t poolIndex;
- uint16_t setIndex;
- };
+// Contains a descriptor set along with data necessary to track its allocation.
+struct DescriptorSetAllocation {
+ VkDescriptorSet set = VK_NULL_HANDLE;
+ uint32_t poolIndex;
+ uint16_t setIndex;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
index 0f89d614548..422ff78f515 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+#include <utility>
+
#include "dawn/native/vulkan/BindGroupLayoutVk.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
@@ -21,168 +23,167 @@
namespace dawn::native::vulkan {
- // TODO(enga): Figure out this value.
- static constexpr uint32_t kMaxDescriptorsPerPool = 512;
-
- // static
- Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
- return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+// TODO(enga): Figure out this value.
+static constexpr uint32_t kMaxDescriptorsPerPool = 512;
+
+// static
+Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
+ return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+}
+
+DescriptorSetAllocator::DescriptorSetAllocator(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
+ : ObjectBase(layout->GetDevice()), mLayout(layout) {
+ ASSERT(layout != nullptr);
+
+ // Compute the total number of descriptors for this layout.
+ uint32_t totalDescriptorCount = 0;
+ mPoolSizes.reserve(descriptorCountPerType.size());
+ for (const auto& [type, count] : descriptorCountPerType) {
+ ASSERT(count > 0);
+ totalDescriptorCount += count;
+ mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
}
- DescriptorSetAllocator::DescriptorSetAllocator(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
- : ObjectBase(layout->GetDevice()), mLayout(layout) {
- ASSERT(layout != nullptr);
-
- // Compute the total number of descriptors for this layout.
- uint32_t totalDescriptorCount = 0;
- mPoolSizes.reserve(descriptorCountPerType.size());
- for (const auto& [type, count] : descriptorCountPerType) {
- ASSERT(count > 0);
- totalDescriptorCount += count;
- mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
- }
-
- if (totalDescriptorCount == 0) {
- // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
- // number of pools, each of which has non-zero descriptor counts.
- // Since the descriptor set layout is empty, we should be able to allocate
- // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
- // The type of this descriptor pool doesn't matter because it is never used.
- mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
- mMaxSets = kMaxDescriptorsPerPool;
- } else {
- ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
- static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
-
- // Compute the total number of descriptors sets that fits given the max.
- mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
- ASSERT(mMaxSets > 0);
-
- // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
- for (auto& poolSize : mPoolSizes) {
- poolSize.descriptorCount *= mMaxSets;
- }
+ if (totalDescriptorCount == 0) {
+ // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
+ // number of pools, each of which has non-zero descriptor counts.
+ // Since the descriptor set layout is empty, we should be able to allocate
+ // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
+ // The type of this descriptor pool doesn't matter because it is never used.
+ mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
+ mMaxSets = kMaxDescriptorsPerPool;
+ } else {
+ ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
+ static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
+
+ // Compute the total number of descriptors sets that fits given the max.
+ mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
+ ASSERT(mMaxSets > 0);
+
+ // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
+ for (auto& poolSize : mPoolSizes) {
+ poolSize.descriptorCount *= mMaxSets;
}
}
-
- DescriptorSetAllocator::~DescriptorSetAllocator() {
- for (auto& pool : mDescriptorPools) {
- ASSERT(pool.freeSetIndices.size() == mMaxSets);
- if (pool.vkPool != VK_NULL_HANDLE) {
- Device* device = ToBackend(GetDevice());
- device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
- }
+}
+
+DescriptorSetAllocator::~DescriptorSetAllocator() {
+ for (auto& pool : mDescriptorPools) {
+ ASSERT(pool.freeSetIndices.size() == mMaxSets);
+ if (pool.vkPool != VK_NULL_HANDLE) {
+ Device* device = ToBackend(GetDevice());
+ device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
}
}
+}
- ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
- if (mAvailableDescriptorPoolIndices.empty()) {
- DAWN_TRY(AllocateDescriptorPool());
- }
-
- ASSERT(!mAvailableDescriptorPoolIndices.empty());
+ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
+ if (mAvailableDescriptorPoolIndices.empty()) {
+ DAWN_TRY(AllocateDescriptorPool());
+ }
- const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
- DescriptorPool* pool = &mDescriptorPools[poolIndex];
+ ASSERT(!mAvailableDescriptorPoolIndices.empty());
- ASSERT(!pool->freeSetIndices.empty());
+ const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
+ DescriptorPool* pool = &mDescriptorPools[poolIndex];
- SetIndex setIndex = pool->freeSetIndices.back();
- pool->freeSetIndices.pop_back();
+ ASSERT(!pool->freeSetIndices.empty());
- if (pool->freeSetIndices.empty()) {
- mAvailableDescriptorPoolIndices.pop_back();
- }
+ SetIndex setIndex = pool->freeSetIndices.back();
+ pool->freeSetIndices.pop_back();
- return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+ if (pool->freeSetIndices.empty()) {
+ mAvailableDescriptorPoolIndices.pop_back();
}
- void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
- ASSERT(allocationInfo != nullptr);
- ASSERT(allocationInfo->set != VK_NULL_HANDLE);
-
- // We can't reuse the descriptor set right away because the Vulkan spec says in the
- // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
- // host execution of the command and the end of the draw/dispatch.
- Device* device = ToBackend(GetDevice());
- const ExecutionSerial serial = device->GetPendingCommandSerial();
- mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex},
- serial);
-
- if (mLastDeallocationSerial != serial) {
- device->EnqueueDeferredDeallocation(this);
- mLastDeallocationSerial = serial;
- }
+ return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+}
- // Clear the content of allocation so that use after frees are more visible.
- *allocationInfo = {};
- }
+void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
+ ASSERT(allocationInfo != nullptr);
+ ASSERT(allocationInfo->set != VK_NULL_HANDLE);
- void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
- for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
- ASSERT(dealloc.poolIndex < mDescriptorPools.size());
+ // We can't reuse the descriptor set right away because the Vulkan spec says in the
+ // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
+ // host execution of the command and the end of the draw/dispatch.
+ Device* device = ToBackend(GetDevice());
+ const ExecutionSerial serial = device->GetPendingCommandSerial();
+ mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex}, serial);
- auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
- if (freeSetIndices.empty()) {
- mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
- }
- freeSetIndices.emplace_back(dealloc.setIndex);
- }
- mPendingDeallocations.ClearUpTo(completedSerial);
+ if (mLastDeallocationSerial != serial) {
+ device->EnqueueDeferredDeallocation(this);
+ mLastDeallocationSerial = serial;
}
- MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
- VkDescriptorPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.maxSets = mMaxSets;
- createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
- createInfo.pPoolSizes = mPoolSizes.data();
-
- Device* device = ToBackend(GetDevice());
-
- VkDescriptorPool descriptorPool;
- DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
- nullptr, &*descriptorPool),
- "CreateDescriptorPool"));
-
- std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
-
- VkDescriptorSetAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.descriptorPool = descriptorPool;
- allocateInfo.descriptorSetCount = mMaxSets;
- allocateInfo.pSetLayouts = AsVkArray(layouts.data());
-
- std::vector<VkDescriptorSet> sets(mMaxSets);
- MaybeError result =
- CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
- AsVkArray(sets.data())),
- "AllocateDescriptorSets");
- if (result.IsError()) {
- // On an error we can destroy the pool immediately because no command references it.
- device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
- DAWN_TRY(std::move(result));
- }
+ // Clear the content of allocation so that use after frees are more visible.
+ *allocationInfo = {};
+}
- std::vector<SetIndex> freeSetIndices;
- freeSetIndices.reserve(mMaxSets);
+void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
+ for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
+ ASSERT(dealloc.poolIndex < mDescriptorPools.size());
- for (SetIndex i = 0; i < mMaxSets; ++i) {
- freeSetIndices.push_back(i);
+ auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
+ if (freeSetIndices.empty()) {
+ mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
}
+ freeSetIndices.emplace_back(dealloc.setIndex);
+ }
+ mPendingDeallocations.ClearUpTo(completedSerial);
+}
+
+MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
+ VkDescriptorPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.maxSets = mMaxSets;
+ createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
+ createInfo.pPoolSizes = mPoolSizes.data();
+
+ Device* device = ToBackend(GetDevice());
+
+ VkDescriptorPool descriptorPool;
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
+ nullptr, &*descriptorPool),
+ "CreateDescriptorPool"));
+
+ std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
+
+ VkDescriptorSetAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.descriptorPool = descriptorPool;
+ allocateInfo.descriptorSetCount = mMaxSets;
+ allocateInfo.pSetLayouts = AsVkArray(layouts.data());
+
+ std::vector<VkDescriptorSet> sets(mMaxSets);
+ MaybeError result =
+ CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
+ AsVkArray(sets.data())),
+ "AllocateDescriptorSets");
+ if (result.IsError()) {
+ // On an error we can destroy the pool immediately because no command references it.
+ device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
+ DAWN_TRY(std::move(result));
+ }
- mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
- mDescriptorPools.emplace_back(
- DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
+ std::vector<SetIndex> freeSetIndices;
+ freeSetIndices.reserve(mMaxSets);
- return {};
+ for (SetIndex i = 0; i < mMaxSets; ++i) {
+ freeSetIndices.push_back(i);
}
+ mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
+ mDescriptorPools.emplace_back(
+ DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
+
+ return {};
+}
+
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h
index cc3a96edd75..98664374a68 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
#define SRC_DAWN_NATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
+#include <map>
+#include <vector>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/Error.h"
@@ -22,54 +25,51 @@
#include "dawn/native/ObjectBase.h"
#include "dawn/native/vulkan/DescriptorSetAllocation.h"
-#include <map>
-#include <vector>
-
namespace dawn::native::vulkan {
- class BindGroupLayout;
+class BindGroupLayout;
- class DescriptorSetAllocator : public ObjectBase {
- using PoolIndex = uint32_t;
- using SetIndex = uint16_t;
+class DescriptorSetAllocator : public ObjectBase {
+ using PoolIndex = uint32_t;
+ using SetIndex = uint16_t;
- public:
- static Ref<DescriptorSetAllocator> Create(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+ public:
+ static Ref<DescriptorSetAllocator> Create(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
- ResultOrError<DescriptorSetAllocation> Allocate();
- void Deallocate(DescriptorSetAllocation* allocationInfo);
- void FinishDeallocation(ExecutionSerial completedSerial);
+ ResultOrError<DescriptorSetAllocation> Allocate();
+ void Deallocate(DescriptorSetAllocation* allocationInfo);
+ void FinishDeallocation(ExecutionSerial completedSerial);
- private:
- DescriptorSetAllocator(BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
- ~DescriptorSetAllocator();
+ private:
+ DescriptorSetAllocator(BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+ ~DescriptorSetAllocator() override;
- MaybeError AllocateDescriptorPool();
+ MaybeError AllocateDescriptorPool();
- BindGroupLayout* mLayout;
+ BindGroupLayout* mLayout;
- std::vector<VkDescriptorPoolSize> mPoolSizes;
- SetIndex mMaxSets;
+ std::vector<VkDescriptorPoolSize> mPoolSizes;
+ SetIndex mMaxSets;
- struct DescriptorPool {
- VkDescriptorPool vkPool;
- std::vector<VkDescriptorSet> sets;
- std::vector<SetIndex> freeSetIndices;
- };
+ struct DescriptorPool {
+ VkDescriptorPool vkPool;
+ std::vector<VkDescriptorSet> sets;
+ std::vector<SetIndex> freeSetIndices;
+ };
- std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
- std::vector<DescriptorPool> mDescriptorPools;
+ std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
+ std::vector<DescriptorPool> mDescriptorPools;
- struct Deallocation {
- PoolIndex poolIndex;
- SetIndex setIndex;
- };
- SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
- ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+ struct Deallocation {
+ PoolIndex poolIndex;
+ SetIndex setIndex;
};
+ SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
+ ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp
index c3ded043aad..3605dc027df 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp
@@ -14,11 +14,13 @@
#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/common/Log.h"
#include "dawn/common/Platform.h"
#include "dawn/native/BackendConnection.h"
#include "dawn/native/ChainUtils_autogen.h"
#include "dawn/native/Error.h"
#include "dawn/native/ErrorData.h"
+#include "dawn/native/Instance.h"
#include "dawn/native/VulkanBackend.h"
#include "dawn/native/vulkan/AdapterVk.h"
#include "dawn/native/vulkan/BackendVk.h"
@@ -28,6 +30,7 @@
#include "dawn/native/vulkan/CommandBufferVk.h"
#include "dawn/native/vulkan/ComputePipelineVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineCacheVk.h"
#include "dawn/native/vulkan/PipelineLayoutVk.h"
#include "dawn/native/vulkan/QuerySetVk.h"
#include "dawn/native/vulkan/QueueVk.h"
@@ -44,1016 +47,1049 @@
namespace dawn::native::vulkan {
- // static
- ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
- const DeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize(descriptor));
- return device;
- }
-
- Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
- : DeviceBase(adapter, descriptor) {
- InitTogglesFromDriver();
- }
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize(descriptor));
+ return device;
+}
- MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
- // Copy the adapter's device info to the device so that we can change the "knobs"
- mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
+ : DeviceBase(adapter, descriptor), mDebugPrefix(GetNextDeviceDebugPrefix()) {
+ InitTogglesFromDriver();
+}
- // Initialize the "instance" procs of our local function table.
- VulkanFunctions* functions = GetMutableFunctions();
- *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+ // Copy the adapter's device info to the device so that we can change the "knobs"
+ mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
- // Two things are crucial if device initialization fails: the function pointers to destroy
- // objects, and the fence deleter that calls these functions. Do not do anything before
- // these two are set up, so that a failed initialization doesn't cause a crash in
- // DestroyImpl()
- {
- VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
+ // Initialize the "instance" procs of our local function table.
+ VulkanFunctions* functions = GetMutableFunctions();
+ *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
- VulkanDeviceKnobs usedDeviceKnobs = {};
- DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
- *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
+ // Two things are crucial if device initialization fails: the function pointers to destroy
+ // objects, and the fence deleter that calls these functions. Do not do anything before
+ // these two are set up, so that a failed initialization doesn't cause a crash in
+ // DestroyImpl()
+ {
+ VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
- DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
-
- // The queue can be loaded before the fenced deleter because their lifetime is tied to
- // the device.
- GatherQueueFromDevice();
-
- mDeleter = std::make_unique<FencedDeleter>(this);
- }
+ VulkanDeviceKnobs usedDeviceKnobs = {};
+ DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
+ *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
- mRenderPassCache = std::make_unique<RenderPassCache>(this);
- mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
+ DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
- mExternalMemoryService = std::make_unique<external_memory::Service>(this);
- mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+ // The queue can be loaded before the fenced deleter because their lifetime is tied to
+ // the device.
+ GatherQueueFromDevice();
- DAWN_TRY(PrepareRecordingContext());
-
- // The environment can request to various options for depth-stencil formats that could be
- // unavailable. Override the decision if it is not applicable.
- ApplyDepthStencilFormatToggles();
-
- // The environment can only request to use VK_KHR_zero_initialize_workgroup_memory when the
- // extension is available. Override the decision if it is no applicable.
- ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
-
- SetLabelImpl();
-
- return DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue));
+ mDeleter = std::make_unique<FencedDeleter>(this);
}
- Device::~Device() {
- Destroy();
+ mRenderPassCache = std::make_unique<RenderPassCache>(this);
+ mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
+
+ mExternalMemoryService = std::make_unique<external_memory::Service>(this);
+ mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+
+ DAWN_TRY(PrepareRecordingContext());
+
+ // The environment can request to various options for depth-stencil formats that could be
+ // unavailable. Override the decision if it is not applicable.
+ ApplyDepthStencilFormatToggles();
+
+ // The environment can only request to use VK_KHR_zero_initialize_workgroup_memory when the
+ // extension is available. Override the decision if it is no applicable.
+ ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+
+ SetLabelImpl();
+
+ ToBackend(GetAdapter())->GetVulkanInstance()->StartListeningForDeviceMessages(this);
+
+ return DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue));
+}
+
+Device::~Device() {
+ Destroy();
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ return ShaderModule::Create(this, descriptor, parseResult, compilationMessages);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+}
+Ref<PipelineCacheBase> Device::GetOrCreatePipelineCacheImpl(const CacheKey& key) {
+ return PipelineCache::Create(this, key);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
+
+MaybeError Device::TickImpl() {
+ RecycleCompletedCommands();
+
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+ for (Ref<DescriptorSetAllocator>& allocator :
+ mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+ allocator->FinishDeallocation(completedSerial);
}
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+ mResourceMemoryAllocator->Tick(completedSerial);
+ mDeleter->Tick(completedSerial);
+ mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+ if (mRecordingContext.used) {
+ DAWN_TRY(SubmitPendingCommands());
}
- MaybeError Device::TickImpl() {
- RecycleCompletedCommands();
+ DAWN_TRY(CheckDebugLayerAndGenerateErrors());
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
+ return {};
+}
- for (Ref<DescriptorSetAllocator>& allocator :
- mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
- allocator->FinishDeallocation(completedSerial);
- }
+VkInstance Device::GetVkInstance() const {
+ return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
+}
+const VulkanDeviceInfo& Device::GetDeviceInfo() const {
+ return mDeviceInfo;
+}
- mResourceMemoryAllocator->Tick(completedSerial);
- mDeleter->Tick(completedSerial);
- mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+const VulkanGlobalInfo& Device::GetGlobalInfo() const {
+ return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
+}
- if (mRecordingContext.used) {
- DAWN_TRY(SubmitPendingCommands());
- }
+VkDevice Device::GetVkDevice() const {
+ return mVkDevice;
+}
- return {};
- }
+uint32_t Device::GetGraphicsQueueFamily() const {
+ return mQueueFamily;
+}
- VkInstance Device::GetVkInstance() const {
- return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
- }
- const VulkanDeviceInfo& Device::GetDeviceInfo() const {
- return mDeviceInfo;
- }
+VkQueue Device::GetQueue() const {
+ return mQueue;
+}
- const VulkanGlobalInfo& Device::GetGlobalInfo() const {
- return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
- }
+FencedDeleter* Device::GetFencedDeleter() const {
+ return mDeleter.get();
+}
- VkDevice Device::GetVkDevice() const {
- return mVkDevice;
- }
+RenderPassCache* Device::GetRenderPassCache() const {
+ return mRenderPassCache.get();
+}
- uint32_t Device::GetGraphicsQueueFamily() const {
- return mQueueFamily;
- }
+ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
+ return mResourceMemoryAllocator.get();
+}
- VkQueue Device::GetQueue() const {
- return mQueue;
- }
+void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
+ mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
+}
- FencedDeleter* Device::GetFencedDeleter() const {
- return mDeleter.get();
- }
+CommandRecordingContext* Device::GetPendingRecordingContext() {
+ ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
+ mRecordingContext.used = true;
+ return &mRecordingContext;
+}
- RenderPassCache* Device::GetRenderPassCache() const {
- return mRenderPassCache.get();
+MaybeError Device::SubmitPendingCommands() {
+ if (!mRecordingContext.used) {
+ return {};
}
- ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
- return mResourceMemoryAllocator.get();
- }
+ DAWN_TRY(
+ CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer), "vkEndCommandBuffer"));
+
+ std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+
+ VkSubmitInfo submitInfo;
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
+ submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
+ submitInfo.pWaitDstStageMask = dstStageMasks.data();
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
+ submitInfo.signalSemaphoreCount =
+ static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
+ submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
+
+ VkFence fence = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(fence, GetUnusedFence());
+ DAWN_TRY_WITH_CLEANUP(
+ CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
+ // If submitting to the queue fails, move the fence back into the unused fence
+ // list, as if it were never acquired. Not doing so would leak the fence since
+ // it would be neither in the unused list nor in the in-flight list.
+ mUnusedFences.push_back(fence);
+ });
- void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
- mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
+ // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
+ // soon as the current submission is finished.
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
}
-
- CommandRecordingContext* Device::GetPendingRecordingContext() {
- ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
- mRecordingContext.used = true;
- return &mRecordingContext;
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
}
- MaybeError Device::SubmitPendingCommands() {
- if (!mRecordingContext.used) {
- return {};
- }
+ IncrementLastSubmittedCommandSerial();
+ ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
+ mFencesInFlight.emplace(fence, lastSubmittedSerial);
- DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
- "vkEndCommandBuffer"));
-
- std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
- VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
-
- VkSubmitInfo submitInfo;
- submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
- submitInfo.waitSemaphoreCount =
- static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
- submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
- submitInfo.pWaitDstStageMask = dstStageMasks.data();
- submitInfo.commandBufferCount = 1;
- submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
- submitInfo.signalSemaphoreCount =
- static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
- submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
-
- VkFence fence = VK_NULL_HANDLE;
- DAWN_TRY_ASSIGN(fence, GetUnusedFence());
- DAWN_TRY_WITH_CLEANUP(
- CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
- // If submitting to the queue fails, move the fence back into the unused fence
- // list, as if it were never acquired. Not doing so would leak the fence since
- // it would be neither in the unused list nor in the in-flight list.
- mUnusedFences.push_back(fence);
- });
+ CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
+ mRecordingContext = CommandRecordingContext();
+ DAWN_TRY(PrepareRecordingContext());
- // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
- // soon as the current submission is finished.
- for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
- for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
+ return {};
+}
- IncrementLastSubmittedCommandSerial();
- ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
- mFencesInFlight.emplace(fence, lastSubmittedSerial);
+ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
+ VulkanDeviceKnobs usedKnobs = {};
- CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
- mRecordingContext.commandBuffer};
- mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
- mRecordingContext = CommandRecordingContext();
- DAWN_TRY(PrepareRecordingContext());
+ // Default to asking for all avilable known extensions.
+ usedKnobs.extensions = mDeviceInfo.extensions;
- return {};
- }
-
- ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
- VulkanDeviceKnobs usedKnobs = {};
-
- // Default to asking for all avilable known extensions.
- usedKnobs.extensions = mDeviceInfo.extensions;
+ // However only request the extensions that haven't been promoted in the device's apiVersion
+ std::vector<const char*> extensionNames;
+ for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
+ const DeviceExtInfo& info = GetDeviceExtInfo(ext);
- // However only request the extensions that haven't been promoted in the device's apiVersion
- std::vector<const char*> extensionNames;
- for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
- const DeviceExtInfo& info = GetDeviceExtInfo(ext);
-
- if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
- extensionNames.push_back(info.name);
- }
- }
-
- // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
- // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
- // promoted as a core API in Vulkan 1.1.
- //
- // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
- // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
- VkPhysicalDeviceFeatures2 features2 = {};
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features2.pNext = nullptr;
- PNextChainBuilder featuresChain(&features2);
-
- // Required for core WebGPU features.
- usedKnobs.features.depthBiasClamp = VK_TRUE;
- usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
- usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
- usedKnobs.features.imageCubeArray = VK_TRUE;
- usedKnobs.features.independentBlend = VK_TRUE;
- usedKnobs.features.sampleRateShading = VK_TRUE;
-
- if (IsRobustnessEnabled()) {
- usedKnobs.features.robustBufferAccess = VK_TRUE;
- }
-
- if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
- ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
-
- // Always request all the features from VK_EXT_subgroup_size_control when available.
- usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
- featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
-
- mComputeSubgroupSize = FindComputeSubgroupSize();
+ if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
+ extensionNames.push_back(info.name);
}
+ }
- if (mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
- ASSERT(usedKnobs.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory));
+ // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
+ // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
+ // promoted as a core API in Vulkan 1.1.
+ //
+ // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
+ // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder featuresChain(&features2);
+
+ // Required for core WebGPU features.
+ usedKnobs.features.depthBiasClamp = VK_TRUE;
+ usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+ usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
+ usedKnobs.features.imageCubeArray = VK_TRUE;
+ usedKnobs.features.independentBlend = VK_TRUE;
+ usedKnobs.features.sampleRateShading = VK_TRUE;
+
+ if (IsRobustnessEnabled()) {
+ usedKnobs.features.robustBufferAccess = VK_TRUE;
+ }
- usedKnobs.zeroInitializeWorkgroupMemoryFeatures.sType =
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
+ if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
- // Always allow initializing workgroup memory with OpConstantNull when available.
- // Note that the driver still won't initialize workgroup memory unless the workgroup
- // variable is explicitly initialized with OpConstantNull.
- usedKnobs.zeroInitializeWorkgroupMemoryFeatures.shaderZeroInitializeWorkgroupMemory =
- VK_TRUE;
- featuresChain.Add(&usedKnobs.zeroInitializeWorkgroupMemoryFeatures);
- }
+ // Always request all the features from VK_EXT_subgroup_size_control when available.
+ usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
+ featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
- if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
- usedKnobs.features.samplerAnisotropy = VK_TRUE;
- }
+ mComputeSubgroupSize = FindComputeSubgroupSize();
+ }
- if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
- VK_TRUE);
- usedKnobs.features.textureCompressionBC = VK_TRUE;
- }
+ if (mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+ ASSERT(usedKnobs.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory));
- if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
- VK_TRUE);
- usedKnobs.features.textureCompressionETC2 = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
- VK_TRUE);
- usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
- }
+ usedKnobs.zeroInitializeWorkgroupMemoryFeatures.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
- if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
- VK_TRUE);
- usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
- }
+ // Always allow initializing workgroup memory with OpConstantNull when available.
+ // Note that the driver still won't initialize workgroup memory unless the workgroup
+ // variable is explicitly initialized with OpConstantNull.
+ usedKnobs.zeroInitializeWorkgroupMemoryFeatures.shaderZeroInitializeWorkgroupMemory =
+ VK_TRUE;
+ featuresChain.Add(&usedKnobs.zeroInitializeWorkgroupMemoryFeatures);
+ }
- if (IsFeatureEnabled(Feature::ShaderFloat16)) {
- const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
- ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
- deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
- deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
- deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
- deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
-
- usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
- usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
- usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
-
- featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
- featuresChain.Add(&usedKnobs._16BitStorageFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
- }
+ if (mDeviceInfo.HasExt(DeviceExt::ShaderIntegerDotProduct)) {
+ ASSERT(usedKnobs.HasExt(DeviceExt::ShaderIntegerDotProduct));
- if (IsFeatureEnabled(Feature::DepthClamping)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
- usedKnobs.features.depthClamp = VK_TRUE;
- }
+ usedKnobs.shaderIntegerDotProductFeatures.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES;
- // Find a universal queue family
- {
- // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
- constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
- int universalQueueFamily = -1;
- for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
- if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
- kUniversalFlags) {
- universalQueueFamily = i;
- break;
- }
- }
+ usedKnobs.shaderIntegerDotProductFeatures.shaderIntegerDotProduct = VK_TRUE;
+ featuresChain.Add(&usedKnobs.shaderIntegerDotProductFeatures);
+ }
- if (universalQueueFamily == -1) {
- return DAWN_INTERNAL_ERROR("No universal queue family");
- }
- mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
- }
+ if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
+ usedKnobs.features.samplerAnisotropy = VK_TRUE;
+ }
- // Choose to create a single universal queue
- std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
- float zero = 0.0f;
- {
- VkDeviceQueueCreateInfo queueCreateInfo;
- queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
- queueCreateInfo.pNext = nullptr;
- queueCreateInfo.flags = 0;
- queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
- queueCreateInfo.queueCount = 1;
- queueCreateInfo.pQueuePriorities = &zero;
-
- queuesToRequest.push_back(queueCreateInfo);
- }
+ if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC == VK_TRUE);
+ usedKnobs.features.textureCompressionBC = VK_TRUE;
+ }
- VkDeviceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
- createInfo.pQueueCreateInfos = queuesToRequest.data();
- createInfo.enabledLayerCount = 0;
- createInfo.ppEnabledLayerNames = nullptr;
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
- createInfo.ppEnabledExtensionNames = extensionNames.data();
-
- // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
- // covered by VkPhysicalDeviceFeatures can be enabled.
- if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
- features2.features = usedKnobs.features;
- createInfo.pNext = &features2;
- createInfo.pEnabledFeatures = nullptr;
- } else {
- ASSERT(features2.pNext == nullptr);
- createInfo.pEnabledFeatures = &usedKnobs.features;
- }
+ if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 == VK_TRUE);
+ usedKnobs.features.textureCompressionETC2 = VK_TRUE;
+ }
- DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
- "vkCreateDevice"));
+ if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
+ }
- return usedKnobs;
+ if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
+ VK_TRUE);
+ usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
}
- uint32_t Device::FindComputeSubgroupSize() const {
- if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
- return 0;
- }
+ if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+ const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+ ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+ deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+ deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
+ deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
+
+ usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
+
+ featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ featuresChain.Add(&usedKnobs._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
- const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
- mDeviceInfo.subgroupSizeControlProperties;
+ if (IsFeatureEnabled(Feature::DepthClamping)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
+ usedKnobs.features.depthClamp = VK_TRUE;
+ }
- if (ext.minSubgroupSize == ext.maxSubgroupSize) {
- return 0;
+ // Find a universal queue family
+ {
+ // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
+ constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
+ int universalQueueFamily = -1;
+ for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
+ if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) == kUniversalFlags) {
+ universalQueueFamily = i;
+ break;
+ }
}
- // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
- // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
- // following heuristics, which may need to be adjusted in the future for other
- // architectures, or if a specific API is added to let client code select the size..
- //
- // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
- uint32_t subgroupSize = ext.minSubgroupSize * 2;
- if (subgroupSize <= ext.maxSubgroupSize) {
- return subgroupSize;
- } else {
- return ext.minSubgroupSize;
+ if (universalQueueFamily == -1) {
+ return DAWN_INTERNAL_ERROR("No universal queue family");
}
+ mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
}
- void Device::GatherQueueFromDevice() {
- fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+ // Choose to create a single universal queue
+ std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+ float zero = 0.0f;
+ {
+ VkDeviceQueueCreateInfo queueCreateInfo;
+ queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queueCreateInfo.pNext = nullptr;
+ queueCreateInfo.flags = 0;
+ queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
+ queueCreateInfo.queueCount = 1;
+ queueCreateInfo.pQueuePriorities = &zero;
+
+ queuesToRequest.push_back(queueCreateInfo);
}
- // Note that this function is called before mDeviceInfo is initialized.
- void Device::InitTogglesFromDriver() {
- // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
- // Vulkan SPEC and drivers.
- SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
-
- // By default try to use D32S8 for Depth24PlusStencil8
- SetToggle(Toggle::VulkanUseD32S8, true);
-
- // By default try to initialize workgroup memory with OpConstantNull according to the Vulkan
- // extension VK_KHR_zero_initialize_workgroup_memory.
- SetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, true);
-
- // By default try to use S8 if available.
- SetToggle(Toggle::VulkanUseS8, true);
+ VkDeviceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
+ createInfo.pQueueCreateInfos = queuesToRequest.data();
+ createInfo.enabledLayerCount = 0;
+ createInfo.ppEnabledLayerNames = nullptr;
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+ // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
+ // covered by VkPhysicalDeviceFeatures can be enabled.
+ if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
+ features2.features = usedKnobs.features;
+ createInfo.pNext = &features2;
+ createInfo.pEnabledFeatures = nullptr;
+ } else {
+ ASSERT(features2.pNext == nullptr);
+ createInfo.pEnabledFeatures = &usedKnobs.features;
}
- void Device::ApplyDepthStencilFormatToggles() {
- bool supportsD32s8 =
- ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
- bool supportsD24s8 =
- ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
- bool supportsS8 = ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_S8_UINT);
+ DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
+ "vkCreateDevice"));
- ASSERT(supportsD32s8 || supportsD24s8);
+ return usedKnobs;
+}
- if (!supportsD24s8) {
- ForceSetToggle(Toggle::VulkanUseD32S8, true);
- }
- if (!supportsD32s8) {
- ForceSetToggle(Toggle::VulkanUseD32S8, false);
- }
- if (!supportsS8) {
- ForceSetToggle(Toggle::VulkanUseS8, false);
- }
+uint32_t Device::FindComputeSubgroupSize() const {
+ if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ return 0;
}
- void Device::ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle() {
- if (!mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
- ForceSetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, false);
- }
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
+ mDeviceInfo.subgroupSizeControlProperties;
- VulkanFunctions* Device::GetMutableFunctions() {
- return const_cast<VulkanFunctions*>(&fn);
+ if (ext.minSubgroupSize == ext.maxSubgroupSize) {
+ return 0;
}
- ResultOrError<VkFence> Device::GetUnusedFence() {
- if (!mUnusedFences.empty()) {
- VkFence fence = mUnusedFences.back();
- DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
+ // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
+ // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
+ // following heuristics, which may need to be adjusted in the future for other
+ // architectures, or if a specific API is added to let client code select the size..
+ //
+ // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
+ uint32_t subgroupSize = ext.minSubgroupSize * 2;
+ if (subgroupSize <= ext.maxSubgroupSize) {
+ return subgroupSize;
+ } else {
+ return ext.minSubgroupSize;
+ }
+}
- mUnusedFences.pop_back();
- return fence;
- }
+void Device::GatherQueueFromDevice() {
+ fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+}
- VkFenceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
+// Note that this function is called before mDeviceInfo is initialized.
+void Device::InitTogglesFromDriver() {
+ // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
+ // Vulkan SPEC and drivers.
+ SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
- VkFence fence = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
- "vkCreateFence"));
+ // By default try to use D32S8 for Depth24PlusStencil8
+ SetToggle(Toggle::VulkanUseD32S8, true);
- return fence;
- }
+ // By default try to initialize workgroup memory with OpConstantNull according to the Vulkan
+ // extension VK_KHR_zero_initialize_workgroup_memory.
+ SetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, true);
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial fenceSerial(0);
- while (!mFencesInFlight.empty()) {
- VkFence fence = mFencesInFlight.front().first;
- ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
- VkResult result = VkResult::WrapUnsafe(
- INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
-
- // Fence are added in order, so we can stop searching as soon
- // as we see one that's not ready.
- if (result == VK_NOT_READY) {
- return fenceSerial;
- } else {
- DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
- }
+ // By default try to use S8 if available.
+ SetToggle(Toggle::VulkanUseS8, true);
+}
- // Update fenceSerial since fence is ready.
- fenceSerial = tentativeSerial;
+void Device::ApplyDepthStencilFormatToggles() {
+ bool supportsD32s8 =
+ ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
+ bool supportsD24s8 =
+ ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
+ bool supportsS8 = ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_S8_UINT);
- mUnusedFences.push_back(fence);
+ ASSERT(supportsD32s8 || supportsD24s8);
- ASSERT(fenceSerial > GetCompletedCommandSerial());
- mFencesInFlight.pop();
- }
- return fenceSerial;
+ if (!supportsD24s8) {
+ ForceSetToggle(Toggle::VulkanUseD32S8, true);
}
-
- MaybeError Device::PrepareRecordingContext() {
- ASSERT(!mRecordingContext.used);
- ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
- ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
-
- // First try to recycle unused command pools.
- if (!mUnusedCommands.empty()) {
- CommandPoolAndBuffer commands = mUnusedCommands.back();
- mUnusedCommands.pop_back();
- DAWN_TRY_WITH_CLEANUP(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
- "vkResetCommandPool"),
- {
- // vkResetCommandPool failed (it may return out-of-memory).
- // Free the commands in the cleanup step before returning to
- // reclaim memory.
-
- // The VkCommandBuffer memory should be wholly owned by the
- // pool and freed when it is destroyed, but that's not the
- // case in some drivers and they leak memory. So we call
- // FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, commands.pool, 1,
- &commands.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
- });
-
- mRecordingContext.commandBuffer = commands.commandBuffer;
- mRecordingContext.commandPool = commands.pool;
- } else {
- // Create a new command pool for our commands and allocate the command buffer.
- VkCommandPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
- createInfo.queueFamilyIndex = mQueueFamily;
-
- DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
- &*mRecordingContext.commandPool),
- "vkCreateCommandPool"));
-
- VkCommandBufferAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.commandPool = mRecordingContext.commandPool;
- allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
- allocateInfo.commandBufferCount = 1;
-
- DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
- &mRecordingContext.commandBuffer),
- "vkAllocateCommandBuffers"));
- }
-
- // Start the recording of commands in the command buffer.
- VkCommandBufferBeginInfo beginInfo;
- beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
- beginInfo.pNext = nullptr;
- beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
- beginInfo.pInheritanceInfo = nullptr;
-
- return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
- "vkBeginCommandBuffer");
+ if (!supportsD32s8) {
+ ForceSetToggle(Toggle::VulkanUseD32S8, false);
}
-
- void Device::RecycleCompletedCommands() {
- for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
- mUnusedCommands.push_back(commands);
- }
- mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+ if (!supportsS8) {
+ ForceSetToggle(Toggle::VulkanUseS8, false);
}
+}
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
+void Device::ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle() {
+ if (!mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+ ForceSetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, false);
}
+}
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
- // calling this function.
- ASSERT(size != 0);
+VulkanFunctions* Device::GetMutableFunctions() {
+ return const_cast<VulkanFunctions*>(&fn);
+}
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+ResultOrError<VkFence> Device::GetUnusedFence() {
+ if (!mUnusedFences.empty()) {
+ VkFence fence = mUnusedFences.back();
+ DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
- ToBackend(destination)
- ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
-
- // There is no need of a barrier to make host writes available and visible to the copy
- // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
- // does an implicit availability, visibility and domain operation.
+ mUnusedFences.pop_back();
+ return fence;
+ }
- // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
- // buffer.
- ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ VkFenceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+
+ VkFence fence = VK_NULL_HANDLE;
+ DAWN_TRY(
+ CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence), "vkCreateFence"));
+
+ return fence;
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial fenceSerial(0);
+ while (!mFencesInFlight.empty()) {
+ VkFence fence = mFencesInFlight.front().first;
+ ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
+ VkResult result = VkResult::WrapUnsafe(
+ INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
+
+ // Fence are added in order, so we can stop searching as soon
+ // as we see one that's not ready.
+ if (result == VK_NOT_READY) {
+ return fenceSerial;
+ } else {
+ DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
+ }
- VkBufferCopy copy;
- copy.srcOffset = sourceOffset;
- copy.dstOffset = destinationOffset;
- copy.size = size;
+ // Update fenceSerial since fence is ready.
+ fenceSerial = tentativeSerial;
- this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
- ToBackend(source)->GetBufferHandle(),
- ToBackend(destination)->GetHandle(), 1, &copy);
+ mUnusedFences.push_back(fence);
- return {};
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
+ mFencesInFlight.pop();
}
+ return fenceSerial;
+}
+
+MaybeError Device::PrepareRecordingContext() {
+ ASSERT(!mRecordingContext.used);
+ ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
+ ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
+
+ // First try to recycle unused command pools.
+ if (!mUnusedCommands.empty()) {
+ CommandPoolAndBuffer commands = mUnusedCommands.back();
+ mUnusedCommands.pop_back();
+ DAWN_TRY_WITH_CLEANUP(
+ CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0), "vkResetCommandPool"),
+ {
+ // vkResetCommandPool failed (it may return out-of-memory).
+ // Free the commands in the cleanup step before returning to
+ // reclaim memory.
+
+ // The VkCommandBuffer memory should be wholly owned by the
+ // pool and freed when it is destroyed, but that's not the
+ // case in some drivers and they leak memory. So we call
+ // FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+ });
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- // There is no need of a barrier to make host writes available and visible to the copy
- // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
- // does an implicit availability, visibility and domain operation.
-
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+ mRecordingContext.commandBuffer = commands.commandBuffer;
+ mRecordingContext.commandPool = commands.pool;
+ } else {
+ // Create a new command pool for our commands and allocate the command buffer.
+ VkCommandPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+ createInfo.queueFamilyIndex = mQueueFamily;
+
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateCommandPool(mVkDevice, &createInfo, nullptr, &*mRecordingContext.commandPool),
+ "vkCreateCommandPool"));
+
+ VkCommandBufferAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.commandPool = mRecordingContext.commandPool;
+ allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ allocateInfo.commandBufferCount = 1;
+
+ DAWN_TRY(CheckVkSuccess(
+ fn.AllocateCommandBuffers(mVkDevice, &allocateInfo, &mRecordingContext.commandBuffer),
+ "vkAllocateCommandBuffers"));
+ }
- VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
- VkImageSubresourceLayers subresource = region.imageSubresource;
+ // Start the recording of commands in the command buffer.
+ VkCommandBufferBeginInfo beginInfo;
+ beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ beginInfo.pInheritanceInfo = nullptr;
- SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+ return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
+ "vkBeginCommandBuffer");
+}
- if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
- subresource.mipLevel)) {
- // Since texture has been overwritten, it has been "initialized"
- dst->texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
- // texture.
- ToBackend(dst->texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
- VkImage dstImage = ToBackend(dst->texture)->GetHandle();
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
- ToBackend(source)->GetBufferHandle(), dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
- return {};
+void Device::RecycleCompletedCommands() {
+ for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
+ mUnusedCommands.push_back(commands);
+ }
+ mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
+ // calling this function.
+ ASSERT(size != 0);
+
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
+
+ // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+ // buffer.
+ ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkBufferCopy copy;
+ copy.srcOffset = sourceOffset;
+ copy.dstOffset = destinationOffset;
+ copy.size = size;
+
+ this->fn.CmdCopyBuffer(recordingContext->commandBuffer, ToBackend(source)->GetBufferHandle(),
+ ToBackend(destination)->GetHandle(), 1, &copy);
+
+ return {};
+}
+
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
+
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
+ VkImageSubresourceLayers subresource = region.imageSubresource;
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels, subresource.mipLevel)) {
+ // Since texture has been overwritten, it has been "initialized"
+ dst->texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+ // texture.
+ ToBackend(dst->texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkImage dstImage = ToBackend(dst->texture)->GetHandle();
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+ ToBackend(source)->GetBufferHandle(), dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ return {};
+}
+
+MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ VkImage image,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles,
+ VkSemaphore* outSignalSemaphore,
+ VkDeviceMemory* outAllocation,
+ std::vector<VkSemaphore>* outWaitSemaphores) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
+
+ wgpu::TextureUsage usage = textureDescriptor->usage;
+ if (internalUsageDesc != nullptr) {
+ usage |= internalUsageDesc->internalUsage;
}
- MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- VkImage image,
- const std::vector<ExternalSemaphoreHandle>& waitHandles,
- VkSemaphore* outSignalSemaphore,
- VkDeviceMemory* outAllocation,
- std::vector<VkSemaphore>* outWaitSemaphores) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
-
- wgpu::TextureUsage usage = textureDescriptor->usage;
- if (internalUsageDesc != nullptr) {
- usage |= internalUsageDesc->internalUsage;
- }
+ // Check services support this combination of handle type / image info
+ DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
+ "External semaphore usage not supported");
+
+ DAWN_INVALID_IF(!mExternalMemoryService->SupportsImportMemory(
+ VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TILING_OPTIMAL,
+ VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
+ VK_IMAGE_CREATE_ALIAS_BIT_KHR),
+ "External memory usage not supported");
+
+ // Create an external semaphore to signal when the texture is done being used
+ DAWN_TRY_ASSIGN(*outSignalSemaphore, mExternalSemaphoreService->CreateExportableSemaphore());
+
+ // Import the external image's memory
+ external_memory::MemoryImportParams importParams;
+ DAWN_TRY_ASSIGN(importParams, mExternalMemoryService->GetMemoryImportParams(descriptor, image));
+ DAWN_TRY_ASSIGN(*outAllocation,
+ mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
+
+ // Import semaphores we have to wait on before using the texture
+ for (const ExternalSemaphoreHandle& handle : waitHandles) {
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
+ outWaitSemaphores->push_back(semaphore);
+ }
- // Check services support this combination of handle type / image info
- DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
- "External semaphore usage not supported");
-
- DAWN_INVALID_IF(
- !mExternalMemoryService->SupportsImportMemory(
- VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
- VK_IMAGE_TILING_OPTIMAL,
- VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
- VK_IMAGE_CREATE_ALIAS_BIT_KHR),
- "External memory usage not supported");
-
- // Create an external semaphore to signal when the texture is done being used
- DAWN_TRY_ASSIGN(*outSignalSemaphore,
- mExternalSemaphoreService->CreateExportableSemaphore());
-
- // Import the external image's memory
- external_memory::MemoryImportParams importParams;
- DAWN_TRY_ASSIGN(importParams,
- mExternalMemoryService->GetMemoryImportParams(descriptor, image));
- DAWN_TRY_ASSIGN(*outAllocation,
- mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
-
- // Import semaphores we have to wait on before using the texture
- for (const ExternalSemaphoreHandle& handle : waitHandles) {
- VkSemaphore semaphore = VK_NULL_HANDLE;
- DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
- outWaitSemaphores->push_back(semaphore);
- }
+ return {};
+}
+
+bool Device::SignalAndExportExternalTexture(
+ Texture* texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info,
+ std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
+ return !ConsumedError([&]() -> MaybeError {
+ DAWN_TRY(ValidateObject(texture));
+
+ VkSemaphore signalSemaphore;
+ VkImageLayout releasedOldLayout;
+ VkImageLayout releasedNewLayout;
+ DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore, &releasedOldLayout,
+ &releasedNewLayout));
+
+ ExternalSemaphoreHandle semaphoreHandle;
+ DAWN_TRY_ASSIGN(semaphoreHandle,
+ mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
+ semaphoreHandles->push_back(semaphoreHandle);
+ info->releasedOldLayout = releasedOldLayout;
+ info->releasedNewLayout = releasedNewLayout;
+ info->isInitialized =
+ texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
return {};
+ }());
+}
+
+TextureBase* Device::CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ // Initial validation
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
}
-
- bool Device::SignalAndExportExternalTexture(
- Texture* texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info,
- std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
- return !ConsumedError([&]() -> MaybeError {
- DAWN_TRY(ValidateObject(texture));
-
- VkSemaphore signalSemaphore;
- VkImageLayout releasedOldLayout;
- VkImageLayout releasedNewLayout;
- DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore,
- &releasedOldLayout, &releasedNewLayout));
-
- ExternalSemaphoreHandle semaphoreHandle;
- DAWN_TRY_ASSIGN(semaphoreHandle,
- mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
- semaphoreHandles->push_back(semaphoreHandle);
- info->releasedOldLayout = releasedOldLayout;
- info->releasedNewLayout = releasedNewLayout;
- info->isInitialized =
- texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
-
- return {};
- }());
+ if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
+ "validating that a Vulkan image can be wrapped with %s.",
+ textureDescriptor)) {
+ return nullptr;
}
- TextureBase* Device::CreateTextureWrappingVulkanImage(
- const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- const std::vector<ExternalSemaphoreHandle>& waitHandles) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- // Initial validation
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
- }
- if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
- "validating that a Vulkan image can be wrapped with %s.",
- textureDescriptor)) {
- return nullptr;
+ VkSemaphore signalSemaphore = VK_NULL_HANDLE;
+ VkDeviceMemory allocation = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> waitSemaphores;
+ waitSemaphores.reserve(waitHandles.size());
+
+ // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+ // if a failure happems.
+ Texture* result = nullptr;
+ // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
+ if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+ mExternalMemoryService.get()),
+ &result) ||
+ ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
+ waitHandles, &signalSemaphore, &allocation,
+ &waitSemaphores)) ||
+ ConsumedError(
+ result->BindExternalMemory(descriptor, signalSemaphore, allocation, waitSemaphores))) {
+ // Delete the Texture if it was created
+ if (result != nullptr) {
+ result->Release();
}
- VkSemaphore signalSemaphore = VK_NULL_HANDLE;
- VkDeviceMemory allocation = VK_NULL_HANDLE;
- std::vector<VkSemaphore> waitSemaphores;
- waitSemaphores.reserve(waitHandles.size());
-
- // Cleanup in case of a failure, the image creation doesn't acquire the external objects
- // if a failure happems.
- Texture* result = nullptr;
- // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
- if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
- mExternalMemoryService.get()),
- &result) ||
- ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
- waitHandles, &signalSemaphore, &allocation,
- &waitSemaphores)) ||
- ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
- waitSemaphores))) {
- // Delete the Texture if it was created
- if (result != nullptr) {
- result->Release();
- }
+ // Clear the signal semaphore
+ fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
- // Clear the signal semaphore
- fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+ // Clear image memory
+ fn.FreeMemory(GetVkDevice(), allocation, nullptr);
- // Clear image memory
- fn.FreeMemory(GetVkDevice(), allocation, nullptr);
-
- // Clear any wait semaphores we were able to import
- for (VkSemaphore semaphore : waitSemaphores) {
- fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
- }
- return nullptr;
+ // Clear any wait semaphores we were able to import
+ for (VkSemaphore semaphore : waitSemaphores) {
+ fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
}
-
- return result;
+ return nullptr;
}
- uint32_t Device::GetComputeSubgroupSize() const {
- return mComputeSubgroupSize;
- }
+ return result;
+}
- MaybeError Device::WaitForIdleForDestruction() {
- // Immediately tag the recording context as unused so we don't try to submit it in Tick.
- // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
- // ShutDownImpl
- if (mRecordingContext.used) {
- CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
- mRecordingContext.commandBuffer};
- mUnusedCommands.push_back(commands);
- mRecordingContext = CommandRecordingContext();
- }
+uint32_t Device::GetComputeSubgroupSize() const {
+ return mComputeSubgroupSize;
+}
- VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
- // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
- // about, Device lost, which means workloads running on the GPU are no longer accessible
- // (so they are as good as waited on) or success.
- DAWN_UNUSED(waitIdleResult);
-
- // Make sure all fences are complete by explicitly waiting on them all
- while (!mFencesInFlight.empty()) {
- VkFence fence = mFencesInFlight.front().first;
- ExecutionSerial fenceSerial = mFencesInFlight.front().second;
- ASSERT(fenceSerial > GetCompletedCommandSerial());
-
- VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
- do {
- // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
- // the device lost came from the ErrorInjector and we need to wait without allowing
- // any more error to be injected. This is because the device lost was "fake" and
- // commands might still be running.
- if (GetState() == State::Disconnected) {
- result = VkResult::WrapUnsafe(
- fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
- continue;
- }
-
- result = VkResult::WrapUnsafe(
- INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
- VK_ERROR_DEVICE_LOST));
- } while (result == VK_TIMEOUT);
- // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
- // about (and we need to keep going with the destruction of all fences), or device
- // loss, which means the workload on the GPU is no longer accessible and we can
- // safely destroy the fence.
-
- fn.DestroyFence(mVkDevice, fence, nullptr);
- mFencesInFlight.pop();
- }
+void Device::OnDebugMessage(std::string message) {
+ mDebugMessages.push_back(std::move(message));
+}
+
+MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled() || mDebugMessages.empty()) {
return {};
}
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
+ auto error = DAWN_INTERNAL_ERROR("The Vulkan validation layer reported uncaught errors.");
- // We failed during initialization so early that we don't even have a VkDevice. There is
- // nothing to do.
- if (mVkDevice == VK_NULL_HANDLE) {
- return;
- }
+ AppendDebugLayerMessages(error.get());
- // The deleter is the second thing we initialize. If it is not present, it means that
- // only the VkDevice was created and nothing else. Destroy the device and do nothing else
- // because the function pointers might not have been loaded (and there is nothing to
- // destroy anyway).
- if (mDeleter == nullptr) {
- fn.DestroyDevice(mVkDevice, nullptr);
- mVkDevice = VK_NULL_HANDLE;
- return;
- }
+ return std::move(error);
+}
- // Enough of the Device's initialization happened that we can now do regular robust
- // deinitialization.
-
- // Immediately tag the recording context as unused so we don't try to submit it in Tick.
- mRecordingContext.used = false;
- if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
- // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
- // destroyed, but that's not the case in some drivers and the leak memory.
- // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
- &mRecordingContext.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
- }
+void Device::AppendDebugLayerMessages(ErrorData* error) {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+ return;
+ }
- for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
- fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
- }
- mRecordingContext.waitSemaphores.clear();
+ while (!mDebugMessages.empty()) {
+ error->AppendBackendMessage(std::move(mDebugMessages.back()));
+ mDebugMessages.pop_back();
+ }
+}
+
+MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
+ // ShutDownImpl
+ if (mRecordingContext.used) {
+ CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mUnusedCommands.push_back(commands);
+ mRecordingContext = CommandRecordingContext();
+ }
- for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
- fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
- }
- mRecordingContext.signalSemaphores.clear();
-
- // Some commands might still be marked as in-flight if we shut down because of a device
- // loss. Recycle them as unused so that we free them below.
- RecycleCompletedCommands();
- ASSERT(mCommandsInFlight.Empty());
-
- for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
- // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
- // destroyed, but that's not the case in some drivers and the leak memory.
- // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
- }
- mUnusedCommands.clear();
+ VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
+ // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+ // about, Device lost, which means workloads running on the GPU are no longer accessible
+ // (so they are as good as waited on) or success.
+ DAWN_UNUSED(waitIdleResult);
+
+ // Make sure all fences are complete by explicitly waiting on them all
+ while (!mFencesInFlight.empty()) {
+ VkFence fence = mFencesInFlight.front().first;
+ ExecutionSerial fenceSerial = mFencesInFlight.front().second;
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
+
+ VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
+ do {
+ // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
+ // the device lost came from the ErrorInjector and we need to wait without allowing
+ // any more error to be injected. This is because the device lost was "fake" and
+ // commands might still be running.
+ if (GetState() == State::Disconnected) {
+ result =
+ VkResult::WrapUnsafe(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
+ continue;
+ }
- // Some fences might still be marked as in-flight if we shut down because of a device loss.
- // Delete them since at this point all commands are complete.
- while (!mFencesInFlight.empty()) {
- fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
- mFencesInFlight.pop();
- }
+ result = VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN(
+ fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX), VK_ERROR_DEVICE_LOST));
+ } while (result == VK_TIMEOUT);
+ // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
+ // about (and we need to keep going with the destruction of all fences), or device
+ // loss, which means the workload on the GPU is no longer accessible and we can
+ // safely destroy the fence.
- for (VkFence fence : mUnusedFences) {
- fn.DestroyFence(mVkDevice, fence, nullptr);
- }
- mUnusedFences.clear();
+ fn.DestroyFence(mVkDevice, fence, nullptr);
+ mFencesInFlight.pop();
+ }
+ return {};
+}
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
- for (Ref<DescriptorSetAllocator>& allocator :
- mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
- allocator->FinishDeallocation(completedSerial);
- }
+void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
- // Releasing the uploader enqueues buffers to be released.
- // Call Tick() again to clear them before releasing the deleter.
- mResourceMemoryAllocator->Tick(completedSerial);
- mDeleter->Tick(completedSerial);
- mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
-
- // Allow recycled memory to be deleted.
- mResourceMemoryAllocator->DestroyPool();
-
- // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
- // to them are guaranteed to be finished executing.
- mRenderPassCache = nullptr;
-
- // We need handle deleting all child objects by calling Tick() again with a large serial to
- // force all operations to look as if they were completed, and delete all objects before
- // destroying the Deleter and vkDevice.
- ASSERT(mDeleter != nullptr);
- mDeleter->Tick(kMaxExecutionSerial);
- mDeleter = nullptr;
-
- // VkQueues are destroyed when the VkDevice is destroyed
- // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
- // child objects have been deleted.
- ASSERT(mVkDevice != VK_NULL_HANDLE);
+ // We failed during initialization so early that we don't even have a VkDevice. There is
+ // nothing to do.
+ if (mVkDevice == VK_NULL_HANDLE) {
+ return;
+ }
+
+ // The deleter is the second thing we initialize. If it is not present, it means that
+ // only the VkDevice was created and nothing else. Destroy the device and do nothing else
+ // because the function pointers might not have been loaded (and there is nothing to
+ // destroy anyway).
+ if (mDeleter == nullptr) {
fn.DestroyDevice(mVkDevice, nullptr);
mVkDevice = VK_NULL_HANDLE;
+ return;
+ }
+
+ // Enough of the Device's initialization happened that we can now do regular robust
+ // deinitialization.
+
+ ToBackend(GetAdapter())->GetVulkanInstance()->StopListeningForDeviceMessages(this);
+
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ mRecordingContext.used = false;
+ if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
+ &mRecordingContext.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
}
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
}
+ mRecordingContext.waitSemaphores.clear();
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+ }
+ mRecordingContext.signalSemaphores.clear();
+
+ // Some commands might still be marked as in-flight if we shut down because of a device
+ // loss. Recycle them as unused so that we free them below.
+ RecycleCompletedCommands();
+ ASSERT(mCommandsInFlight.Empty());
+
+ for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
}
+ mUnusedCommands.clear();
- float Device::GetTimestampPeriodInNS() const {
- return mDeviceInfo.properties.limits.timestampPeriod;
+ // Some fences might still be marked as in-flight if we shut down because of a device loss.
+ // Delete them since at this point all commands are complete.
+ while (!mFencesInFlight.empty()) {
+ fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
+ mFencesInFlight.pop();
}
- void Device::SetLabelImpl() {
- SetDebugName(this, VK_OBJECT_TYPE_DEVICE, mVkDevice, "Dawn_Device", GetLabel());
+ for (VkFence fence : mUnusedFences) {
+ fn.DestroyFence(mVkDevice, fence, nullptr);
}
+ mUnusedFences.clear();
+
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
+ for (Ref<DescriptorSetAllocator>& allocator :
+ mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+ allocator->FinishDeallocation(completedSerial);
+ }
+
+ // Releasing the uploader enqueues buffers to be released.
+ // Call Tick() again to clear them before releasing the deleter.
+ mResourceMemoryAllocator->Tick(completedSerial);
+ mDeleter->Tick(completedSerial);
+ mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+ // Allow recycled memory to be deleted.
+ mResourceMemoryAllocator->DestroyPool();
+
+ // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
+ // to them are guaranteed to be finished executing.
+ mRenderPassCache = nullptr;
+
+ // We need handle deleting all child objects by calling Tick() again with a large serial to
+ // force all operations to look as if they were completed, and delete all objects before
+ // destroying the Deleter and vkDevice.
+ ASSERT(mDeleter != nullptr);
+ mDeleter->Tick(kMaxExecutionSerial);
+ mDeleter = nullptr;
+
+ // VkQueues are destroyed when the VkDevice is destroyed
+ // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
+ // child objects have been deleted.
+ ASSERT(mVkDevice != VK_NULL_HANDLE);
+ fn.DestroyDevice(mVkDevice, nullptr);
+ mVkDevice = VK_NULL_HANDLE;
+}
+
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+}
+
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+}
+
+float Device::GetTimestampPeriodInNS() const {
+ return mDeviceInfo.properties.limits.timestampPeriod;
+}
+
+void Device::SetLabelImpl() {
+ SetDebugName(this, VK_OBJECT_TYPE_DEVICE, mVkDevice, "Dawn_Device", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h
index 7cfa9f0d6e1..b0638379bc6 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h
@@ -15,11 +15,16 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_DEVICEVK_H_
#define SRC_DAWN_NATIVE_VULKAN_DEVICEVK_H_
-#include "dawn/native/dawn_platform.h"
+#include <memory>
+#include <queue>
+#include <string>
+#include <utility>
+#include <vector>
#include "dawn/common/SerialQueue.h"
#include "dawn/native/Commands.h"
#include "dawn/native/Device.h"
+#include "dawn/native/dawn_platform.h"
#include "dawn/native/vulkan/CommandRecordingContext.h"
#include "dawn/native/vulkan/DescriptorSetAllocator.h"
#include "dawn/native/vulkan/Forward.h"
@@ -29,187 +34,192 @@
#include "dawn/native/vulkan/external_memory/MemoryService.h"
#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
-#include <memory>
-#include <queue>
-
namespace dawn::native::vulkan {
- class Adapter;
- class BindGroupLayout;
- class BufferUploader;
- class FencedDeleter;
- class RenderPassCache;
- class ResourceMemoryAllocator;
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Ref<Device>> Create(Adapter* adapter,
- const DeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize(const DeviceDescriptor* descriptor);
-
- // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
- const VulkanFunctions fn;
-
- VkInstance GetVkInstance() const;
- const VulkanDeviceInfo& GetDeviceInfo() const;
- const VulkanGlobalInfo& GetGlobalInfo() const;
- VkDevice GetVkDevice() const;
- uint32_t GetGraphicsQueueFamily() const;
- VkQueue GetQueue() const;
-
- FencedDeleter* GetFencedDeleter() const;
- RenderPassCache* GetRenderPassCache() const;
- ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
-
- CommandRecordingContext* GetPendingRecordingContext();
- MaybeError SubmitPendingCommands();
-
- void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
-
- // Dawn Native API
-
- TextureBase* CreateTextureWrappingVulkanImage(
- const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- const std::vector<ExternalSemaphoreHandle>& waitHandles);
- bool SignalAndExportExternalTexture(Texture* texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info,
- std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
- // needs to be set.
- uint32_t GetComputeSubgroupSize() const;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- void SetLabelImpl() override;
-
- private:
- Device(Adapter* adapter, const DeviceDescriptor* descriptor);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
- void GatherQueueFromDevice();
-
- uint32_t FindComputeSubgroupSize() const;
- void InitTogglesFromDriver();
- void ApplyDepthStencilFormatToggles();
- void ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- // To make it easier to use fn it is a public const member. However
- // the Device is allowed to mutate them through these private methods.
- VulkanFunctions* GetMutableFunctions();
-
- VulkanDeviceInfo mDeviceInfo = {};
- VkDevice mVkDevice = VK_NULL_HANDLE;
- uint32_t mQueueFamily = 0;
- VkQueue mQueue = VK_NULL_HANDLE;
- uint32_t mComputeSubgroupSize = 0;
-
- SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
- mDescriptorAllocatorsPendingDeallocation;
- std::unique_ptr<FencedDeleter> mDeleter;
- std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
- std::unique_ptr<RenderPassCache> mRenderPassCache;
-
- std::unique_ptr<external_memory::Service> mExternalMemoryService;
- std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
-
- ResultOrError<VkFence> GetUnusedFence();
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- // We track which operations are in flight on the GPU with an increasing serial.
- // This works only because we have a single queue. Each submit to a queue is associated
- // to a serial and a fence, such that when the fence is "ready" we know the operations
- // have finished.
- std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
- // Fences in the unused list aren't reset yet.
- std::vector<VkFence> mUnusedFences;
-
- MaybeError PrepareRecordingContext();
- void RecycleCompletedCommands();
-
- struct CommandPoolAndBuffer {
- VkCommandPool pool = VK_NULL_HANDLE;
- VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
- };
- SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
- // Command pools in the unused list haven't been reset yet.
- std::vector<CommandPoolAndBuffer> mUnusedCommands;
- // There is always a valid recording context stored in mRecordingContext
- CommandRecordingContext mRecordingContext;
-
- MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- VkImage image,
- const std::vector<ExternalSemaphoreHandle>& waitHandles,
- VkSemaphore* outSignalSemaphore,
- VkDeviceMemory* outAllocation,
- std::vector<VkSemaphore>* outWaitSemaphores);
+class BufferUploader;
+class FencedDeleter;
+class RenderPassCache;
+class ResourceMemoryAllocator;
+
+class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize(const DeviceDescriptor* descriptor);
+
+ // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
+ const VulkanFunctions fn;
+
+ VkInstance GetVkInstance() const;
+ const VulkanDeviceInfo& GetDeviceInfo() const;
+ const VulkanGlobalInfo& GetGlobalInfo() const;
+ VkDevice GetVkDevice() const;
+ uint32_t GetGraphicsQueueFamily() const;
+ VkQueue GetQueue() const;
+
+ FencedDeleter* GetFencedDeleter() const;
+ RenderPassCache* GetRenderPassCache() const;
+ ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
+
+ CommandRecordingContext* GetPendingRecordingContext();
+ MaybeError SubmitPendingCommands();
+
+ void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
+
+ // Dawn Native API
+
+ TextureBase* CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles);
+ bool SignalAndExportExternalTexture(Texture* texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info,
+ std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
+ // needs to be set.
+ uint32_t GetComputeSubgroupSize() const;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ void SetLabelImpl() override;
+
+ void OnDebugMessage(std::string message);
+
+ // Used to associate this device with validation layer messages.
+ const char* GetDebugPrefix() { return mDebugPrefix.c_str(); }
+
+ private:
+ Device(Adapter* adapter, const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ Ref<PipelineCacheBase> GetOrCreatePipelineCacheImpl(const CacheKey& key) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
+ void GatherQueueFromDevice();
+
+ uint32_t FindComputeSubgroupSize() const;
+ void InitTogglesFromDriver();
+ void ApplyDepthStencilFormatToggles();
+ void ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+
+ MaybeError CheckDebugLayerAndGenerateErrors();
+ void AppendDebugLayerMessages(ErrorData* error) override;
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ // To make it easier to use fn it is a public const member. However
+ // the Device is allowed to mutate them through these private methods.
+ VulkanFunctions* GetMutableFunctions();
+
+ VulkanDeviceInfo mDeviceInfo = {};
+ VkDevice mVkDevice = VK_NULL_HANDLE;
+ uint32_t mQueueFamily = 0;
+ VkQueue mQueue = VK_NULL_HANDLE;
+ uint32_t mComputeSubgroupSize = 0;
+
+ SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
+ mDescriptorAllocatorsPendingDeallocation;
+ std::unique_ptr<FencedDeleter> mDeleter;
+ std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
+ std::unique_ptr<RenderPassCache> mRenderPassCache;
+
+ std::unique_ptr<external_memory::Service> mExternalMemoryService;
+ std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
+
+ ResultOrError<VkFence> GetUnusedFence();
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ // We track which operations are in flight on the GPU with an increasing serial.
+ // This works only because we have a single queue. Each submit to a queue is associated
+ // to a serial and a fence, such that when the fence is "ready" we know the operations
+ // have finished.
+ std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
+ // Fences in the unused list aren't reset yet.
+ std::vector<VkFence> mUnusedFences;
+
+ // For capturing messages generated by the Vulkan debug layer.
+ const std::string mDebugPrefix;
+ std::vector<std::string> mDebugMessages;
+
+ MaybeError PrepareRecordingContext();
+ void RecycleCompletedCommands();
+
+ struct CommandPoolAndBuffer {
+ VkCommandPool pool = VK_NULL_HANDLE;
+ VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
};
+ SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
+ // Command pools in the unused list haven't been reset yet.
+ std::vector<CommandPoolAndBuffer> mUnusedCommands;
+ // There is always a valid recording context stored in mRecordingContext
+ CommandRecordingContext mRecordingContext;
+
+ MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ VkImage image,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles,
+ VkSemaphore* outSignalSemaphore,
+ VkDeviceMemory* outAllocation,
+ std::vector<VkSemaphore>* outWaitSemaphores);
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h
index d5a607e5666..c2d1433e4ca 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h
@@ -19,20 +19,20 @@
namespace dawn::native::vulkan {
-#if DAWN_PLATFORM_LINUX
- // File descriptor
- using ExternalMemoryHandle = int;
- // File descriptor
- using ExternalSemaphoreHandle = int;
-#elif DAWN_PLATFORM_FUCHSIA
- // Really a Zircon vmo handle.
- using ExternalMemoryHandle = zx_handle_t;
- // Really a Zircon event handle.
- using ExternalSemaphoreHandle = zx_handle_t;
+#if DAWN_PLATFORM_IS(LINUX)
+// File descriptor
+using ExternalMemoryHandle = int;
+// File descriptor
+using ExternalSemaphoreHandle = int;
+#elif DAWN_PLATFORM_IS(FUCHSIA)
+// Really a Zircon vmo handle.
+using ExternalMemoryHandle = zx_handle_t;
+// Really a Zircon event handle.
+using ExternalSemaphoreHandle = zx_handle_t;
#else
- // Generic types so that the Null service can compile, not used for real handles
- using ExternalMemoryHandle = void*;
- using ExternalSemaphoreHandle = void*;
+// Generic types so that the Null service can compile, not used for real handles
+using ExternalMemoryHandle = void*;
+using ExternalSemaphoreHandle = void*;
#endif
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp
index 09c91b43c34..167e588549b 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp
@@ -18,166 +18,165 @@
namespace dawn::native::vulkan {
- FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {
- }
+FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {}
+
+FencedDeleter::~FencedDeleter() {
+ ASSERT(mBuffersToDelete.Empty());
+ ASSERT(mDescriptorPoolsToDelete.Empty());
+ ASSERT(mFramebuffersToDelete.Empty());
+ ASSERT(mImagesToDelete.Empty());
+ ASSERT(mImageViewsToDelete.Empty());
+ ASSERT(mMemoriesToDelete.Empty());
+ ASSERT(mPipelinesToDelete.Empty());
+ ASSERT(mPipelineLayoutsToDelete.Empty());
+ ASSERT(mQueryPoolsToDelete.Empty());
+ ASSERT(mRenderPassesToDelete.Empty());
+ ASSERT(mSamplersToDelete.Empty());
+ ASSERT(mSemaphoresToDelete.Empty());
+ ASSERT(mShaderModulesToDelete.Empty());
+ ASSERT(mSurfacesToDelete.Empty());
+ ASSERT(mSwapChainsToDelete.Empty());
+}
- FencedDeleter::~FencedDeleter() {
- ASSERT(mBuffersToDelete.Empty());
- ASSERT(mDescriptorPoolsToDelete.Empty());
- ASSERT(mFramebuffersToDelete.Empty());
- ASSERT(mImagesToDelete.Empty());
- ASSERT(mImageViewsToDelete.Empty());
- ASSERT(mMemoriesToDelete.Empty());
- ASSERT(mPipelinesToDelete.Empty());
- ASSERT(mPipelineLayoutsToDelete.Empty());
- ASSERT(mQueryPoolsToDelete.Empty());
- ASSERT(mRenderPassesToDelete.Empty());
- ASSERT(mSamplersToDelete.Empty());
- ASSERT(mSemaphoresToDelete.Empty());
- ASSERT(mShaderModulesToDelete.Empty());
- ASSERT(mSurfacesToDelete.Empty());
- ASSERT(mSwapChainsToDelete.Empty());
- }
+void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
+ mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
+}
- void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
- mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
- }
+void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
+ mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
+}
- void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
- mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
- }
+void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
+ mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
+}
- void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
- mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
- }
+void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
+ mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkImage image) {
+ mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkImageView view) {
+ mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
+ mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
+ mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
+ mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+}
- void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
- mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
+ mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
+ mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
+ mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
+ mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
+ mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
+ mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::Tick(ExecutionSerial completedSerial) {
+ VkDevice vkDevice = mDevice->GetVkDevice();
+ VkInstance instance = mDevice->GetVkInstance();
+
+ // Buffers and images must be deleted before memories because it is invalid to free memory
+ // that still have resources bound to it.
+ for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
}
+ mBuffersToDelete.ClearUpTo(completedSerial);
+ for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyImage(vkDevice, image, nullptr);
+ }
+ mImagesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkImage image) {
- mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+ for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
}
+ mMemoriesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkImageView view) {
- mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+ for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
}
+ mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
- mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+ for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
}
+ mRenderPassesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
- mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+ for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
}
+ mFramebuffersToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
- mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+ for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
}
+ mImageViewsToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
- mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+ for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
}
+ mShaderModulesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
- mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+ for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
}
+ mPipelinesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
- mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+ // Vulkan swapchains must be destroyed before their corresponding VkSurface
+ for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
+ }
+ mSwapChainsToDelete.ClearUpTo(completedSerial);
+ for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
}
+ mSurfacesToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
- mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+ for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
}
+ mSemaphoresToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
- mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
+ for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
}
+ mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
- mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
+ for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
}
+ mQueryPoolsToDelete.ClearUpTo(completedSerial);
- void FencedDeleter::Tick(ExecutionSerial completedSerial) {
- VkDevice vkDevice = mDevice->GetVkDevice();
- VkInstance instance = mDevice->GetVkInstance();
-
- // Buffers and images must be deleted before memories because it is invalid to free memory
- // that still have resources bound to it.
- for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
- }
- mBuffersToDelete.ClearUpTo(completedSerial);
- for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyImage(vkDevice, image, nullptr);
- }
- mImagesToDelete.ClearUpTo(completedSerial);
-
- for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
- }
- mMemoriesToDelete.ClearUpTo(completedSerial);
-
- for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
- }
- mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
-
- for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
- }
- mRenderPassesToDelete.ClearUpTo(completedSerial);
-
- for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
- }
- mFramebuffersToDelete.ClearUpTo(completedSerial);
-
- for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
- }
- mImageViewsToDelete.ClearUpTo(completedSerial);
-
- for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
- }
- mShaderModulesToDelete.ClearUpTo(completedSerial);
-
- for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
- }
- mPipelinesToDelete.ClearUpTo(completedSerial);
-
- // Vulkan swapchains must be destroyed before their corresponding VkSurface
- for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
- }
- mSwapChainsToDelete.ClearUpTo(completedSerial);
- for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
- }
- mSurfacesToDelete.ClearUpTo(completedSerial);
-
- for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
- }
- mSemaphoresToDelete.ClearUpTo(completedSerial);
-
- for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
- }
- mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
-
- for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
- }
- mQueryPoolsToDelete.ClearUpTo(completedSerial);
-
- for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
- }
- mSamplersToDelete.ClearUpTo(completedSerial);
+ for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
}
+ mSamplersToDelete.ClearUpTo(completedSerial);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h
index aefea7e38b8..4c90615b819 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h
@@ -21,49 +21,49 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class FencedDeleter {
- public:
- explicit FencedDeleter(Device* device);
- ~FencedDeleter();
+class FencedDeleter {
+ public:
+ explicit FencedDeleter(Device* device);
+ ~FencedDeleter();
- void DeleteWhenUnused(VkBuffer buffer);
- void DeleteWhenUnused(VkDescriptorPool pool);
- void DeleteWhenUnused(VkDeviceMemory memory);
- void DeleteWhenUnused(VkFramebuffer framebuffer);
- void DeleteWhenUnused(VkImage image);
- void DeleteWhenUnused(VkImageView view);
- void DeleteWhenUnused(VkPipelineLayout layout);
- void DeleteWhenUnused(VkRenderPass renderPass);
- void DeleteWhenUnused(VkPipeline pipeline);
- void DeleteWhenUnused(VkQueryPool querypool);
- void DeleteWhenUnused(VkSampler sampler);
- void DeleteWhenUnused(VkSemaphore semaphore);
- void DeleteWhenUnused(VkShaderModule module);
- void DeleteWhenUnused(VkSurfaceKHR surface);
- void DeleteWhenUnused(VkSwapchainKHR swapChain);
+ void DeleteWhenUnused(VkBuffer buffer);
+ void DeleteWhenUnused(VkDescriptorPool pool);
+ void DeleteWhenUnused(VkDeviceMemory memory);
+ void DeleteWhenUnused(VkFramebuffer framebuffer);
+ void DeleteWhenUnused(VkImage image);
+ void DeleteWhenUnused(VkImageView view);
+ void DeleteWhenUnused(VkPipelineLayout layout);
+ void DeleteWhenUnused(VkRenderPass renderPass);
+ void DeleteWhenUnused(VkPipeline pipeline);
+ void DeleteWhenUnused(VkQueryPool querypool);
+ void DeleteWhenUnused(VkSampler sampler);
+ void DeleteWhenUnused(VkSemaphore semaphore);
+ void DeleteWhenUnused(VkShaderModule module);
+ void DeleteWhenUnused(VkSurfaceKHR surface);
+ void DeleteWhenUnused(VkSwapchainKHR swapChain);
- void Tick(ExecutionSerial completedSerial);
+ void Tick(ExecutionSerial completedSerial);
- private:
- Device* mDevice = nullptr;
- SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
- SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
- SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
- SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
- SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
- SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
- SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
- SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
- SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
- SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
- SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
- SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
- SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
- SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
- SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
- };
+ private:
+ Device* mDevice = nullptr;
+ SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
+ SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
+ SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
+ SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
+ SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
+ SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
+ SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
+ SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
+ SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
+ SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
+ SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
+ SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
+ SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
+ SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
+ SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h
index e6ac30be352..f541ebb3ab0 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h
@@ -19,50 +19,52 @@
namespace dawn::native::vulkan {
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class ResourceHeap;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class PipelineCache;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class ResourceHeap;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
- struct VulkanBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = ResourceHeap;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
+struct VulkanBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineCacheType = PipelineCache;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = ResourceHeap;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+};
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
- return ToBackendBase<VulkanBackendTraits>(common);
- }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
+ return ToBackendBase<VulkanBackendTraits>(common);
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
index e16ae2caeca..6a6f4e2dbc3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
@@ -14,212 +14,212 @@
#include "dawn/native/vulkan/NativeSwapChainImplVk.h"
+#include <limits>
+
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
#include "dawn/native/vulkan/TextureVk.h"
-#include <limits>
-
namespace dawn::native::vulkan {
- namespace {
-
- bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
- bool turnOffVsync,
- VkPresentModeKHR* presentMode) {
- if (turnOffVsync) {
- for (const auto& availablePresentMode : availablePresentModes) {
- if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
- *presentMode = availablePresentMode;
- return true;
- }
- }
- return false;
- }
+namespace {
- *presentMode = VK_PRESENT_MODE_FIFO_KHR;
- return true;
- }
-
- bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
- NativeSwapChainImpl::ChosenConfig* config,
- bool turnOffVsync) {
- VkPresentModeKHR presentMode;
- if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
- return false;
+bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
+ bool turnOffVsync,
+ VkPresentModeKHR* presentMode) {
+ if (turnOffVsync) {
+ for (const auto& availablePresentMode : availablePresentModes) {
+ if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+ *presentMode = availablePresentMode;
+ return true;
}
- // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
- // driver. Need to generalize
- config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
- config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
- config->format = wgpu::TextureFormat::BGRA8Unorm;
- config->minImageCount = 3;
- // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
- // on Linux
- config->preTransform = info.capabilities.currentTransform;
- config->presentMode = presentMode;
- config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-
- return true;
}
- } // anonymous namespace
-
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
- : mSurface(surface), mDevice(device) {
- // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
- // will return a correct result before a SwapChain is created.
- UpdateSurfaceConfig();
+ return false;
}
- NativeSwapChainImpl::~NativeSwapChainImpl() {
- if (mSwapChain != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
- mSwapChain = VK_NULL_HANDLE;
- }
- if (mSurface != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
- mSurface = VK_NULL_HANDLE;
- }
+ *presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ return true;
+}
+
+bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
+ NativeSwapChainImpl::ChosenConfig* config,
+ bool turnOffVsync) {
+ VkPresentModeKHR presentMode;
+ if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
+ return false;
}
+ // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
+ // driver. Need to generalize
+ config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
+ config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ config->format = wgpu::TextureFormat::BGRA8Unorm;
+ config->minImageCount = 3;
+ // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
+ // on Linux
+ config->preTransform = info.capabilities.currentTransform;
+ config->presentMode = presentMode;
+ config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
+ return true;
+}
+} // anonymous namespace
+
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
+ : mSurface(surface), mDevice(device) {
+ // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
+ // will return a correct result before a SwapChain is created.
+ UpdateSurfaceConfig();
+}
+
+NativeSwapChainImpl::~NativeSwapChainImpl() {
+ if (mSwapChain != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+ mSwapChain = VK_NULL_HANDLE;
+ }
+ if (mSurface != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
+ mSurface = VK_NULL_HANDLE;
+ }
+}
- void NativeSwapChainImpl::UpdateSurfaceConfig() {
- if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
- &mInfo)) {
- ASSERT(false);
- }
+void NativeSwapChainImpl::UpdateSurfaceConfig() {
+ if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
+ &mInfo)) {
+ ASSERT(false);
+ }
- if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
- ASSERT(false);
- }
+ if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
+ ASSERT(false);
+ }
+}
+
+void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
+ UpdateSurfaceConfig();
+}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ UpdateSurfaceConfig();
+
+ ASSERT(mInfo.capabilities.minImageExtent.width <= width);
+ ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
+ ASSERT(mInfo.capabilities.minImageExtent.height <= height);
+ ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+ // TODO(crbug.com/dawn/269): need to check usage works too
+
+ // Create the swapchain with the configuration we chose
+ VkSwapchainKHR oldSwapchain = mSwapChain;
+ VkSwapchainCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.surface = mSurface;
+ createInfo.minImageCount = mConfig.minImageCount;
+ createInfo.imageFormat = mConfig.nativeFormat;
+ createInfo.imageColorSpace = mConfig.colorSpace;
+ createInfo.imageExtent.width = width;
+ createInfo.imageExtent.height = height;
+ createInfo.imageArrayLayers = 1;
+ createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
+ mDevice->GetValidInternalFormat(mConfig.format));
+ createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = nullptr;
+ createInfo.preTransform = mConfig.preTransform;
+ createInfo.compositeAlpha = mConfig.compositeAlpha;
+ createInfo.presentMode = mConfig.presentMode;
+ createInfo.clipped = false;
+ createInfo.oldSwapchain = oldSwapchain;
+
+ if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
+ &*mSwapChain) != VK_SUCCESS) {
+ ASSERT(false);
}
- void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
- UpdateSurfaceConfig();
+ // Gather the swapchain's images. Implementations are allowed to return more images than the
+ // number we asked for.
+ uint32_t count = 0;
+ if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count, nullptr) !=
+ VK_SUCCESS) {
+ ASSERT(false);
}
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- UpdateSurfaceConfig();
+ ASSERT(count >= mConfig.minImageCount);
+ mSwapChainImages.resize(count);
+ if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+ AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
+ ASSERT(false);
+ }
- ASSERT(mInfo.capabilities.minImageExtent.width <= width);
- ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
- ASSERT(mInfo.capabilities.minImageExtent.height <= height);
- ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+ if (oldSwapchain != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
+ }
- ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
- // TODO(crbug.com/dawn/269): need to check usage works too
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
- // Create the swapchain with the configuration we chose
- VkSwapchainKHR oldSwapchain = mSwapChain;
- VkSwapchainCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ // Transiently create a semaphore that will be signaled when the presentation engine is done
+ // with the swapchain image. Further operations on the image will wait for this semaphore.
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ {
+ VkSemaphoreCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.surface = mSurface;
- createInfo.minImageCount = mConfig.minImageCount;
- createInfo.imageFormat = mConfig.nativeFormat;
- createInfo.imageColorSpace = mConfig.colorSpace;
- createInfo.imageExtent.width = width;
- createInfo.imageExtent.height = height;
- createInfo.imageArrayLayers = 1;
- createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
- mDevice->GetValidInternalFormat(mConfig.format));
- createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.preTransform = mConfig.preTransform;
- createInfo.compositeAlpha = mConfig.compositeAlpha;
- createInfo.presentMode = mConfig.presentMode;
- createInfo.clipped = false;
- createInfo.oldSwapchain = oldSwapchain;
-
- if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
- &*mSwapChain) != VK_SUCCESS) {
+ if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
+ &*semaphore) != VK_SUCCESS) {
ASSERT(false);
}
-
- // Gather the swapchain's images. Implementations are allowed to return more images than the
- // number we asked for.
- uint32_t count = 0;
- if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
- nullptr) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- ASSERT(count >= mConfig.minImageCount);
- mSwapChainImages.resize(count);
- if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
- AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- if (oldSwapchain != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
- }
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
}
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- // Transiently create a semaphore that will be signaled when the presentation engine is done
- // with the swapchain image. Further operations on the image will wait for this semaphore.
- VkSemaphore semaphore = VK_NULL_HANDLE;
- {
- VkSemaphoreCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
- &*semaphore) != VK_SUCCESS) {
- ASSERT(false);
- }
- }
-
- if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
- std::numeric_limits<uint64_t>::max(), semaphore,
- VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
- ASSERT(false);
- }
+ if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
+ std::numeric_limits<uint64_t>::max(), semaphore, VkFence{},
+ &mLastImageIndex) != VK_SUCCESS) {
+ ASSERT(false);
+ }
- nextTexture->texture.u64 =
-#if defined(DAWN_PLATFORM_64_BIT)
- reinterpret_cast<uint64_t>
+ nextTexture->texture.u64 =
+#if DAWN_PLATFORM_IS(64_BIT)
+ reinterpret_cast<uint64_t>
#endif
- (*mSwapChainImages[mLastImageIndex]);
- mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
+ (*mSwapChainImages[mLastImageIndex]);
+ mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::Present() {
+ // This assumes that the image has already been transitioned to the PRESENT layout and
+ // writes were made available to the stage.
+
+ // Assuming that the present queue is the same as the graphics queue, the proper
+ // synchronization has already been done on the queue so we don't need to wait on any
+ // semaphores.
+ VkPresentInfoKHR presentInfo;
+ presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ presentInfo.pNext = nullptr;
+ presentInfo.waitSemaphoreCount = 0;
+ presentInfo.pWaitSemaphores = nullptr;
+ presentInfo.swapchainCount = 1;
+ presentInfo.pSwapchains = &*mSwapChain;
+ presentInfo.pImageIndices = &mLastImageIndex;
+ presentInfo.pResults = nullptr;
+
+ VkQueue queue = mDevice->GetQueue();
+ if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
+ ASSERT(false);
}
- DawnSwapChainError NativeSwapChainImpl::Present() {
- // This assumes that the image has already been transitioned to the PRESENT layout and
- // writes were made available to the stage.
-
- // Assuming that the present queue is the same as the graphics queue, the proper
- // synchronization has already been done on the queue so we don't need to wait on any
- // semaphores.
- VkPresentInfoKHR presentInfo;
- presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
- presentInfo.pNext = nullptr;
- presentInfo.waitSemaphoreCount = 0;
- presentInfo.pWaitSemaphores = nullptr;
- presentInfo.swapchainCount = 1;
- presentInfo.pSwapchains = &*mSwapChain;
- presentInfo.pImageIndices = &mLastImageIndex;
- presentInfo.pResults = nullptr;
-
- VkQueue queue = mDevice->GetQueue();
- if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
- ASSERT(false);
- }
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+}
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return mConfig.format;
- }
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return mConfig.format;
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h
index 5dc309fd2ad..db2247206b4 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h
@@ -15,56 +15,57 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
#define SRC_DAWN_NATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
-#include "dawn/native/vulkan/VulkanInfo.h"
+#include <vector>
#include "dawn/dawn_wsi.h"
#include "dawn/native/dawn_platform.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
namespace dawn::native::vulkan {
- class Device;
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextVulkan;
-
- NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
- ~NativeSwapChainImpl();
-
- void Init(DawnWSIContextVulkan* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
-
- wgpu::TextureFormat GetPreferredFormat() const;
-
- struct ChosenConfig {
- VkFormat nativeFormat;
- wgpu::TextureFormat format;
- VkColorSpaceKHR colorSpace;
- VkSurfaceTransformFlagBitsKHR preTransform;
- uint32_t minImageCount;
- VkPresentModeKHR presentMode;
- VkCompositeAlphaFlagBitsKHR compositeAlpha;
- };
+class Device;
+
+class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextVulkan;
+
+ NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
+ ~NativeSwapChainImpl();
+
+ void Init(DawnWSIContextVulkan* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+
+ wgpu::TextureFormat GetPreferredFormat() const;
+
+ struct ChosenConfig {
+ VkFormat nativeFormat;
+ wgpu::TextureFormat format;
+ VkColorSpaceKHR colorSpace;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ uint32_t minImageCount;
+ VkPresentModeKHR presentMode;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ };
- private:
- void UpdateSurfaceConfig();
+ private:
+ void UpdateSurfaceConfig();
- VkSurfaceKHR mSurface = VK_NULL_HANDLE;
- VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
- std::vector<VkImage> mSwapChainImages;
- uint32_t mLastImageIndex = 0;
+ VkSurfaceKHR mSurface = VK_NULL_HANDLE;
+ VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+ std::vector<VkImage> mSwapChainImages;
+ uint32_t mLastImageIndex = 0;
- VulkanSurfaceInfo mInfo;
+ VulkanSurfaceInfo mInfo;
- ChosenConfig mConfig;
+ ChosenConfig mConfig;
- Device* mDevice = nullptr;
- };
+ Device* mDevice = nullptr;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.cpp
new file mode 100644
index 00000000000..28aedbcd860
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.cpp
@@ -0,0 +1,91 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/PipelineCacheVk.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+// static
+Ref<PipelineCache> PipelineCache::Create(DeviceBase* device, const CacheKey& key) {
+ Ref<PipelineCache> cache = AcquireRef(new PipelineCache(device, key));
+ cache->Initialize();
+ return cache;
+}
+
+PipelineCache::PipelineCache(DeviceBase* device, const CacheKey& key)
+ : PipelineCacheBase(device->GetBlobCache(), key), mDevice(device) {}
+
+PipelineCache::~PipelineCache() {
+ if (mHandle == VK_NULL_HANDLE) {
+ return;
+ }
+ Device* device = ToBackend(GetDevice());
+ device->fn.DestroyPipelineCache(device->GetVkDevice(), mHandle, nullptr);
+ mHandle = VK_NULL_HANDLE;
+}
+
+DeviceBase* PipelineCache::GetDevice() const {
+ return mDevice;
+}
+
+VkPipelineCache PipelineCache::GetHandle() const {
+ return mHandle;
+}
+
+MaybeError PipelineCache::SerializeToBlobImpl(Blob* blob) {
+ if (mHandle == VK_NULL_HANDLE) {
+ // Pipeline cache isn't created successfully
+ return {};
+ }
+
+ size_t bufferSize;
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.GetPipelineCacheData(device->GetVkDevice(), mHandle, &bufferSize, nullptr),
+ "GetPipelineCacheData"));
+ if (bufferSize == 0) {
+ return {};
+ }
+ *blob = CreateBlob(bufferSize);
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.GetPipelineCacheData(device->GetVkDevice(), mHandle, &bufferSize, blob->Data()),
+ "GetPipelineCacheData"));
+ return {};
+}
+
+void PipelineCache::Initialize() {
+ Blob blob = PipelineCacheBase::Initialize();
+
+ VkPipelineCacheCreateInfo createInfo;
+ createInfo.flags = 0;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.initialDataSize = blob.Size();
+ createInfo.pInitialData = blob.Data();
+
+ Device* device = ToBackend(GetDevice());
+ mHandle = VK_NULL_HANDLE;
+ GetDevice()->ConsumedError(CheckVkSuccess(
+ device->fn.CreatePipelineCache(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreatePipelineCache"));
+}
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.h
new file mode 100644
index 00000000000..605991f3b7d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineCacheVk.h
@@ -0,0 +1,49 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_VULKAN_PIPELINECACHEVK_H_
+#define SRC_DAWN_NATIVE_VULKAN_PIPELINECACHEVK_H_
+
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PipelineCache.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native {
+class DeviceBase;
+}
+
+namespace dawn::native::vulkan {
+
+class PipelineCache final : public PipelineCacheBase {
+ public:
+ static Ref<PipelineCache> Create(DeviceBase* device, const CacheKey& key);
+
+ DeviceBase* GetDevice() const;
+ VkPipelineCache GetHandle() const;
+
+ private:
+ explicit PipelineCache(DeviceBase* device, const CacheKey& key);
+ ~PipelineCache() override;
+
+ void Initialize();
+ MaybeError SerializeToBlobImpl(Blob* blob) override;
+
+ DeviceBase* mDevice;
+ VkPipelineCache mHandle = VK_NULL_HANDLE;
+};
+
+} // namespace dawn::native::vulkan
+
+#endif // SRC_DAWN_NATIVE_VULKAN_PIPELINECACHEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp
index 560731b856b..48ffc0f0008 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp
@@ -23,67 +23,67 @@
namespace dawn::native::vulkan {
- // static
- ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
- DAWN_TRY(layout->Initialize());
- return layout;
- }
+// static
+ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+ DAWN_TRY(layout->Initialize());
+ return layout;
+}
- MaybeError PipelineLayout::Initialize() {
- // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
- // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
- // this constraints at the Dawn level?
- uint32_t numSetLayouts = 0;
- std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
- std::array<const CachedObject*, kMaxBindGroups> cachedObjects;
- for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bindGroupLayout = GetBindGroupLayout(setIndex);
- setLayouts[numSetLayouts] = ToBackend(bindGroupLayout)->GetHandle();
- cachedObjects[numSetLayouts] = bindGroupLayout;
- numSetLayouts++;
- }
+MaybeError PipelineLayout::Initialize() {
+ // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
+ // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
+ // this constraints at the Dawn level?
+ uint32_t numSetLayouts = 0;
+ std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
+ std::array<const CachedObject*, kMaxBindGroups> cachedObjects;
+ for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bindGroupLayout = GetBindGroupLayout(setIndex);
+ setLayouts[numSetLayouts] = ToBackend(bindGroupLayout)->GetHandle();
+ cachedObjects[numSetLayouts] = bindGroupLayout;
+ numSetLayouts++;
+ }
- VkPipelineLayoutCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.setLayoutCount = numSetLayouts;
- createInfo.pSetLayouts = AsVkArray(setLayouts.data());
- createInfo.pushConstantRangeCount = 0;
- createInfo.pPushConstantRanges = nullptr;
+ VkPipelineLayoutCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.setLayoutCount = numSetLayouts;
+ createInfo.pSetLayouts = AsVkArray(setLayouts.data());
+ createInfo.pushConstantRangeCount = 0;
+ createInfo.pPushConstantRanges = nullptr;
- // Record cache key information now since the createInfo is not stored.
- GetCacheKey()->RecordIterable(cachedObjects.data(), numSetLayouts).Record(createInfo);
+ // Record cache key information now since the createInfo is not stored.
+ mCacheKey.RecordIterable(cachedObjects.data(), numSetLayouts).Record(createInfo);
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreatePipelineLayout"));
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreatePipelineLayout"));
- SetLabelImpl();
+ SetLabelImpl();
- return {};
- }
+ return {};
+}
- PipelineLayout::~PipelineLayout() = default;
+PipelineLayout::~PipelineLayout() = default;
- void PipelineLayout::DestroyImpl() {
- PipelineLayoutBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
+void PipelineLayout::DestroyImpl() {
+ PipelineLayoutBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
- VkPipelineLayout PipelineLayout::GetHandle() const {
- return mHandle;
- }
+VkPipelineLayout PipelineLayout::GetHandle() const {
+ return mHandle;
+}
- void PipelineLayout::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_PipelineLayout", GetLabel());
- }
+void PipelineLayout::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_PipelineLayout", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h
index 26f9f4e59d5..ca157f8d3d1 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h
@@ -22,28 +22,27 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static ResultOrError<Ref<PipelineLayout>> Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static ResultOrError<Ref<PipelineLayout>> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
- VkPipelineLayout GetHandle() const;
+ VkPipelineLayout GetHandle() const;
- private:
- ~PipelineLayout() override;
- void DestroyImpl() override;
+ private:
+ ~PipelineLayout() override;
+ void DestroyImpl() override;
- using PipelineLayoutBase::PipelineLayoutBase;
- MaybeError Initialize();
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- VkPipelineLayout mHandle = VK_NULL_HANDLE;
- };
+ VkPipelineLayout mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp
index 04895f083d4..aa1aac2e7b2 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/vulkan/QuerySetVk.h"
+#include <algorithm>
+#include <vector>
+
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
@@ -22,95 +25,91 @@
namespace dawn::native::vulkan {
- namespace {
- VkQueryType VulkanQueryType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return VK_QUERY_TYPE_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return VK_QUERY_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return VK_QUERY_TYPE_TIMESTAMP;
- }
- UNREACHABLE();
- }
-
- VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
- std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
- VkQueryPipelineStatisticFlags pipelineStatistics = 0;
- for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
- switch (pipelineStatisticsSet[i]) {
- case wgpu::PipelineStatisticName::ClipperInvocations:
- pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
- pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
- break;
- case wgpu::PipelineStatisticName::ComputeShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::FragmentShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::VertexShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
- break;
- }
- }
-
- return pipelineStatistics;
+namespace {
+VkQueryType VulkanQueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return VK_QUERY_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return VK_QUERY_TYPE_TIMESTAMP;
+ }
+ UNREACHABLE();
+}
+
+VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
+ std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
+ VkQueryPipelineStatisticFlags pipelineStatistics = 0;
+ for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
+ switch (pipelineStatisticsSet[i]) {
+ case wgpu::PipelineStatisticName::ClipperInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ComputeShaderInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::FragmentShaderInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::VertexShaderInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
+ break;
}
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(queryset->Initialize());
- return queryset;
}
- MaybeError QuerySet::Initialize() {
- VkQueryPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
- createInfo.pNext = NULL;
- createInfo.flags = 0;
- createInfo.queryType = VulkanQueryType(GetQueryType());
- createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
- if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
- createInfo.pipelineStatistics =
- VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
- }
+ return pipelineStatistics;
+}
+} // anonymous namespace
+
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(queryset->Initialize());
+ return queryset;
+}
+
+MaybeError QuerySet::Initialize() {
+ VkQueryPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ createInfo.pNext = NULL;
+ createInfo.flags = 0;
+ createInfo.queryType = VulkanQueryType(GetQueryType());
+ createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
+ if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
+ createInfo.pipelineStatistics = VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
+ }
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkOOMThenSuccess(
- device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "vkCreateQueryPool"));
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "vkCreateQueryPool"));
- SetLabelImpl();
+ SetLabelImpl();
- return {};
- }
+ return {};
+}
- VkQueryPool QuerySet::GetHandle() const {
- return mHandle;
- }
+VkQueryPool QuerySet::GetHandle() const {
+ return mHandle;
+}
- QuerySet::~QuerySet() = default;
+QuerySet::~QuerySet() = default;
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
+void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
- void QuerySet::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_QuerySet", GetLabel());
- }
+void QuerySet::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_QuerySet", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h
index d0a3c9379c3..dfee3515ae6 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h
@@ -21,26 +21,26 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class QuerySet final : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
+class QuerySet final : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
- VkQueryPool GetHandle() const;
+ VkQueryPool GetHandle() const;
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
- // Dawn API
- void DestroyImpl() override;
- void SetLabelImpl() override;
+ // Dawn API
+ void DestroyImpl() override;
+ void SetLabelImpl() override;
- VkQueryPool mHandle = VK_NULL_HANDLE;
- };
+ VkQueryPool mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp
index 2166be1f549..b0e40a7cfbd 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp
@@ -28,47 +28,43 @@
namespace dawn::native::vulkan {
- // static
- Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
- Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
- queue->Initialize();
- return queue;
- }
+// static
+Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
+ Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
+ queue->Initialize();
+ return queue;
+}
- Queue::Queue(Device* device, const QueueDescriptor* descriptor)
- : QueueBase(device, descriptor) {
- }
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
- Queue::~Queue() {
- }
+Queue::~Queue() {}
- void Queue::Initialize() {
- SetLabelImpl();
- }
+void Queue::Initialize() {
+ SetLabelImpl();
+}
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->Tick());
+ DAWN_TRY(device->Tick());
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferVk::RecordCommands");
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
+ }
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
- DAWN_TRY(device->SubmitPendingCommands());
+ DAWN_TRY(device->SubmitPendingCommands());
- return {};
- }
+ return {};
+}
- void Queue::SetLabelImpl() {
- Device* device = ToBackend(GetDevice());
- // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
- // so it doesn't always change the default queue's label.
- SetDebugName(device, VK_OBJECT_TYPE_QUEUE, device->GetQueue(), "Dawn_Queue", GetLabel());
- }
+void Queue::SetLabelImpl() {
+ Device* device = ToBackend(GetDevice());
+ // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
+ // so it doesn't always change the default queue's label.
+ SetDebugName(device, VK_OBJECT_TYPE_QUEUE, device->GetQueue(), "Dawn_Queue", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h
index 2bca3be76a6..470efd72a83 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h
@@ -19,24 +19,24 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class Queue final : public QueueBase {
- public:
- static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+ public:
+ static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
- private:
- Queue(Device* device, const QueueDescriptor* descriptor);
- ~Queue() override;
- using QueueBase::QueueBase;
+ private:
+ Queue(Device* device, const QueueDescriptor* descriptor);
+ ~Queue() override;
+ using QueueBase::QueueBase;
- void Initialize();
+ void Initialize();
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- // Dawn API
- void SetLabelImpl() override;
- };
+ // Dawn API
+ void SetLabelImpl() override;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp
index f1735ee8dc2..f948a4edf78 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp
@@ -22,281 +22,279 @@
namespace dawn::native::vulkan {
- namespace {
- VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
- switch (op) {
- case wgpu::LoadOp::Load:
- return VK_ATTACHMENT_LOAD_OP_LOAD;
- case wgpu::LoadOp::Clear:
- return VK_ATTACHMENT_LOAD_OP_CLEAR;
- case wgpu::LoadOp::Undefined:
- UNREACHABLE();
- break;
- }
+namespace {
+VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
+ switch (op) {
+ case wgpu::LoadOp::Load:
+ return VK_ATTACHMENT_LOAD_OP_LOAD;
+ case wgpu::LoadOp::Clear:
+ return VK_ATTACHMENT_LOAD_OP_CLEAR;
+ case wgpu::LoadOp::Undefined:
UNREACHABLE();
- }
-
- VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
- // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
- // extension.
- switch (op) {
- case wgpu::StoreOp::Store:
- return VK_ATTACHMENT_STORE_OP_STORE;
- case wgpu::StoreOp::Discard:
- return VK_ATTACHMENT_STORE_OP_DONT_CARE;
- case wgpu::StoreOp::Undefined:
- UNREACHABLE();
- break;
- }
- UNREACHABLE();
- }
- } // anonymous namespace
-
- // RenderPassCacheQuery
-
- void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
- wgpu::TextureFormat format,
- wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- bool hasResolveTarget) {
- colorMask.set(index);
- colorFormats[index] = format;
- colorLoadOp[index] = loadOp;
- colorStoreOp[index] = storeOp;
- resolveTargetMask[index] = hasResolveTarget;
+ break;
}
-
- void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
- wgpu::LoadOp depthLoadOpIn,
- wgpu::StoreOp depthStoreOpIn,
- wgpu::LoadOp stencilLoadOpIn,
- wgpu::StoreOp stencilStoreOpIn,
- bool readOnly) {
- hasDepthStencil = true;
- depthStencilFormat = format;
- depthLoadOp = depthLoadOpIn;
- depthStoreOp = depthStoreOpIn;
- stencilLoadOp = stencilLoadOpIn;
- stencilStoreOp = stencilStoreOpIn;
- readOnlyDepthStencil = readOnly;
+ UNREACHABLE();
+}
+
+VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
+ // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
+ // extension.
+ switch (op) {
+ case wgpu::StoreOp::Store:
+ return VK_ATTACHMENT_STORE_OP_STORE;
+ case wgpu::StoreOp::Discard:
+ return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
}
-
- void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
- this->sampleCount = sampleCount;
+ UNREACHABLE();
+}
+} // anonymous namespace
+
+// RenderPassCacheQuery
+
+void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ bool hasResolveTarget) {
+ colorMask.set(index);
+ colorFormats[index] = format;
+ colorLoadOp[index] = loadOp;
+ colorStoreOp[index] = storeOp;
+ resolveTargetMask[index] = hasResolveTarget;
+}
+
+void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOpIn,
+ wgpu::StoreOp depthStoreOpIn,
+ wgpu::LoadOp stencilLoadOpIn,
+ wgpu::StoreOp stencilStoreOpIn,
+ bool readOnly) {
+ hasDepthStencil = true;
+ depthStencilFormat = format;
+ depthLoadOp = depthLoadOpIn;
+ depthStoreOp = depthStoreOpIn;
+ stencilLoadOp = stencilLoadOpIn;
+ stencilStoreOp = stencilStoreOpIn;
+ readOnlyDepthStencil = readOnly;
+}
+
+void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
+ this->sampleCount = sampleCount;
+}
+
+// RenderPassCache
+
+RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {}
+
+RenderPassCache::~RenderPassCache() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ for (auto [_, renderPass] : mCache) {
+ mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
}
- // RenderPassCache
+ mCache.clear();
+}
- RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {
+ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto it = mCache.find(query);
+ if (it != mCache.end()) {
+ return VkRenderPass(it->second);
}
- RenderPassCache::~RenderPassCache() {
- std::lock_guard<std::mutex> lock(mMutex);
- for (auto [_, renderPass] : mCache) {
- mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
- }
-
- mCache.clear();
+ VkRenderPass renderPass;
+ DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
+ mCache.emplace(query, renderPass);
+ return renderPass;
+}
+
+ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
+ const RenderPassCacheQuery& query) const {
+ // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
+ // Precompute them as they must be pointer-chained in VkSubpassDescription.
+ // Note that both colorAttachmentRefs and resolveAttachmentRefs can be sparse with holes
+ // filled with VK_ATTACHMENT_UNUSED.
+ ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+ colorAttachmentRefs;
+ ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+ resolveAttachmentRefs;
+ VkAttachmentReference depthStencilAttachmentRef;
+
+ for (ColorAttachmentIndex i(uint8_t(0)); i < kMaxColorAttachmentsTyped; i++) {
+ colorAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+ resolveAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+ // The Khronos Vulkan validation layer will complain if not set
+ colorAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ resolveAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
- ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto it = mCache.find(query);
- if (it != mCache.end()) {
- return VkRenderPass(it->second);
- }
-
- VkRenderPass renderPass;
- DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
- mCache.emplace(query, renderPass);
- return renderPass;
+ // Contains the attachment description that will be chained in the create info
+ // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
+ constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
+ std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
+
+ VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
+
+ uint32_t attachmentCount = 0;
+ ColorAttachmentIndex highestColorAttachmentIndexPlusOne(static_cast<uint8_t>(0));
+ for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+ auto& attachmentRef = colorAttachmentRefs[i];
+ auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+ attachmentRef.attachment = attachmentCount;
+ attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+ attachmentDesc.samples = vkSampleCount;
+ attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
+ attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ attachmentCount++;
+ highestColorAttachmentIndexPlusOne =
+ ColorAttachmentIndex(static_cast<uint8_t>(static_cast<uint8_t>(i) + 1u));
}
- ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
- const RenderPassCacheQuery& query) const {
- // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
- // Precompute them as they must be pointer-chained in VkSubpassDescription.
- // Note that both colorAttachmentRefs and resolveAttachmentRefs can be sparse with holes
- // filled with VK_ATTACHMENT_UNUSED.
- ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
- colorAttachmentRefs;
- ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
- resolveAttachmentRefs;
- VkAttachmentReference depthStencilAttachmentRef;
-
- for (ColorAttachmentIndex i(uint8_t(0)); i < kMaxColorAttachmentsTyped; i++) {
- colorAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
- resolveAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
- // The Khronos Vulkan validation layer will complain if not set
- colorAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- resolveAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- }
-
- // Contains the attachment description that will be chained in the create info
- // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
- constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
- std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
-
- VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
-
- uint32_t attachmentCount = 0;
- ColorAttachmentIndex highestColorAttachmentIndexPlusOne(static_cast<uint8_t>(0));
- for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
- auto& attachmentRef = colorAttachmentRefs[i];
- auto& attachmentDesc = attachmentDescs[attachmentCount];
-
- attachmentRef.attachment = attachmentCount;
- attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
- attachmentDesc.samples = vkSampleCount;
- attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
- attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
- attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- attachmentCount++;
- highestColorAttachmentIndexPlusOne =
- ColorAttachmentIndex(static_cast<uint8_t>(static_cast<uint8_t>(i) + 1u));
- }
-
- VkAttachmentReference* depthStencilAttachment = nullptr;
- if (query.hasDepthStencil) {
- auto& attachmentDesc = attachmentDescs[attachmentCount];
+ VkAttachmentReference* depthStencilAttachment = nullptr;
+ if (query.hasDepthStencil) {
+ auto& attachmentDesc = attachmentDescs[attachmentCount];
- depthStencilAttachment = &depthStencilAttachmentRef;
+ depthStencilAttachment = &depthStencilAttachmentRef;
- depthStencilAttachmentRef.attachment = attachmentCount;
- depthStencilAttachmentRef.layout =
- query.readOnlyDepthStencil ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
- : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depthStencilAttachmentRef.attachment = attachmentCount;
+ depthStencilAttachmentRef.layout = query.readOnlyDepthStencil
+ ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
- attachmentDesc.samples = vkSampleCount;
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
+ attachmentDesc.samples = vkSampleCount;
- attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
- attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
- attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
- attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
+ attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
+ attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
+ attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
- // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
- // the only subpass's layout.
- attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
- attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
+ // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
+ // the only subpass's layout.
+ attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
+ attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
- attachmentCount++;
- }
-
- for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
- auto& attachmentRef = resolveAttachmentRefs[i];
- auto& attachmentDesc = attachmentDescs[attachmentCount];
+ attachmentCount++;
+ }
- attachmentRef.attachment = attachmentCount;
- attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
+ auto& attachmentRef = resolveAttachmentRefs[i];
+ auto& attachmentDesc = attachmentDescs[attachmentCount];
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
- attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
- attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
- attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
- attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentRef.attachment = attachmentCount;
+ attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- attachmentCount++;
- }
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+ attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
+ attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
- VkSubpassDescription subpassDesc;
- subpassDesc.flags = 0;
- subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
- subpassDesc.inputAttachmentCount = 0;
- subpassDesc.pInputAttachments = nullptr;
- subpassDesc.colorAttachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
- subpassDesc.pColorAttachments = colorAttachmentRefs.data();
- subpassDesc.pResolveAttachments = resolveAttachmentRefs.data();
- subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
- subpassDesc.preserveAttachmentCount = 0;
- subpassDesc.pPreserveAttachments = nullptr;
-
- // Chain everything in VkRenderPassCreateInfo
- VkRenderPassCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.attachmentCount = attachmentCount;
- createInfo.pAttachments = attachmentDescs.data();
- createInfo.subpassCount = 1;
- createInfo.pSubpasses = &subpassDesc;
- createInfo.dependencyCount = 0;
- createInfo.pDependencies = nullptr;
-
- // Create the render pass from the zillion parameters
- VkRenderPass renderPass;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
- nullptr, &*renderPass),
- "CreateRenderPass"));
- return renderPass;
+ attachmentCount++;
}
- // RenderPassCache
-
- size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
- size_t hash = Hash(query.colorMask);
-
- HashCombine(&hash, Hash(query.resolveTargetMask));
+ // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
+ VkSubpassDescription subpassDesc;
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ subpassDesc.colorAttachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+ subpassDesc.pColorAttachments = colorAttachmentRefs.data();
+ subpassDesc.pResolveAttachments = resolveAttachmentRefs.data();
+ subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ // Chain everything in VkRenderPassCreateInfo
+ VkRenderPassCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = attachmentCount;
+ createInfo.pAttachments = attachmentDescs.data();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ // Create the render pass from the zillion parameters
+ VkRenderPass renderPass;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo, nullptr, &*renderPass),
+ "CreateRenderPass"));
+ return renderPass;
+}
+
+// RenderPassCache
+
+size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
+ size_t hash = Hash(query.colorMask);
+
+ HashCombine(&hash, Hash(query.resolveTargetMask));
+
+ for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+ HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
+ }
- for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
- HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
- }
+ HashCombine(&hash, query.hasDepthStencil);
+ if (query.hasDepthStencil) {
+ HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
+ query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
+ }
- HashCombine(&hash, query.hasDepthStencil);
- if (query.hasDepthStencil) {
- HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
- query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
- }
+ HashCombine(&hash, query.sampleCount);
- HashCombine(&hash, query.sampleCount);
+ return hash;
+}
- return hash;
+bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
+ const RenderPassCacheQuery& b) const {
+ if (a.colorMask != b.colorMask) {
+ return false;
}
- bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
- const RenderPassCacheQuery& b) const {
- if (a.colorMask != b.colorMask) {
- return false;
- }
+ if (a.resolveTargetMask != b.resolveTargetMask) {
+ return false;
+ }
- if (a.resolveTargetMask != b.resolveTargetMask) {
- return false;
- }
+ if (a.sampleCount != b.sampleCount) {
+ return false;
+ }
- if (a.sampleCount != b.sampleCount) {
+ for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
+ if ((a.colorFormats[i] != b.colorFormats[i]) || (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
+ (a.colorStoreOp[i] != b.colorStoreOp[i])) {
return false;
}
+ }
- for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
- if ((a.colorFormats[i] != b.colorFormats[i]) ||
- (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
- (a.colorStoreOp[i] != b.colorStoreOp[i])) {
- return false;
- }
- }
+ if (a.hasDepthStencil != b.hasDepthStencil) {
+ return false;
+ }
- if (a.hasDepthStencil != b.hasDepthStencil) {
+ if (a.hasDepthStencil) {
+ if ((a.depthStencilFormat != b.depthStencilFormat) || (a.depthLoadOp != b.depthLoadOp) ||
+ (a.stencilLoadOp != b.stencilLoadOp) || (a.depthStoreOp != b.depthStoreOp) ||
+ (a.stencilStoreOp != b.stencilStoreOp) ||
+ (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
return false;
}
-
- if (a.hasDepthStencil) {
- if ((a.depthStencilFormat != b.depthStencilFormat) ||
- (a.depthLoadOp != b.depthLoadOp) || (a.stencilLoadOp != b.stencilLoadOp) ||
- (a.depthStoreOp != b.depthStoreOp) || (a.stencilStoreOp != b.stencilStoreOp) ||
- (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
- return false;
- }
- }
-
- return true;
}
+
+ return true;
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h
index 9e46d408711..45a9de77fea 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h
@@ -15,6 +15,11 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_RENDERPASSCACHE_H_
#define SRC_DAWN_NATIVE_VULKAN_RENDERPASSCACHE_H_
+#include <array>
+#include <bitset>
+#include <mutex>
+#include <unordered_map>
+
#include "dawn/common/Constants.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
@@ -23,83 +28,76 @@
#include "dawn/native/IntegerTypes.h"
#include "dawn/native/dawn_platform.h"
-#include <array>
-#include <bitset>
-#include <mutex>
-#include <unordered_map>
-
namespace dawn::native::vulkan {
- class Device;
-
- // This is a key to query the RenderPassCache, it can be sparse meaning that only the
- // information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
- // be uninintialized.
- struct RenderPassCacheQuery {
- // Use these helpers to build the query, they make sure all relevant data is initialized and
- // masks set.
- void SetColor(ColorAttachmentIndex index,
- wgpu::TextureFormat format,
- wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- bool hasResolveTarget);
- void SetDepthStencil(wgpu::TextureFormat format,
- wgpu::LoadOp depthLoadOp,
- wgpu::StoreOp depthStoreOp,
- wgpu::LoadOp stencilLoadOp,
- wgpu::StoreOp stencilStoreOp,
- bool readOnly);
- void SetSampleCount(uint32_t sampleCount);
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
- ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
- ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
- ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
-
- bool hasDepthStencil = false;
- wgpu::TextureFormat depthStencilFormat;
- wgpu::LoadOp depthLoadOp;
- wgpu::StoreOp depthStoreOp;
- wgpu::LoadOp stencilLoadOp;
- wgpu::StoreOp stencilStoreOp;
- bool readOnlyDepthStencil;
-
- uint32_t sampleCount;
+class Device;
+
+// This is a key to query the RenderPassCache, it can be sparse meaning that only the
+// information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
+// be uninintialized.
+struct RenderPassCacheQuery {
+ // Use these helpers to build the query, they make sure all relevant data is initialized and
+ // masks set.
+ void SetColor(ColorAttachmentIndex index,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ bool hasResolveTarget);
+ void SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOp,
+ wgpu::StoreOp depthStoreOp,
+ wgpu::LoadOp stencilLoadOp,
+ wgpu::StoreOp stencilStoreOp,
+ bool readOnly);
+ void SetSampleCount(uint32_t sampleCount);
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
+ ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
+ ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
+ ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
+
+ bool hasDepthStencil = false;
+ wgpu::TextureFormat depthStencilFormat;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
+ wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
+ bool readOnlyDepthStencil;
+
+ uint32_t sampleCount;
+};
+
+// Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
+// render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
+// when creating render pass and framebuffer so that we can always make sure the order of
+// attachments in the rendering pipeline matches the one of the framebuffer.
+// All the operations on RenderPassCache are guaranteed to be thread-safe.
+// TODO(cwallez@chromium.org): Make it an LRU cache somehow?
+class RenderPassCache {
+ public:
+ explicit RenderPassCache(Device* device);
+ ~RenderPassCache();
+
+ ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
+
+ private:
+ // Does the actual VkRenderPass creation on a cache miss.
+ ResultOrError<VkRenderPass> CreateRenderPassForQuery(const RenderPassCacheQuery& query) const;
+
+ // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
+ // keys.
+ struct CacheFuncs {
+ size_t operator()(const RenderPassCacheQuery& query) const;
+ bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
};
+ using Cache = std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
- // Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
- // render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
- // when creating render pass and framebuffer so that we can always make sure the order of
- // attachments in the rendering pipeline matches the one of the framebuffer.
- // All the operations on RenderPassCache are guaranteed to be thread-safe.
- // TODO(cwallez@chromium.org): Make it an LRU cache somehow?
- class RenderPassCache {
- public:
- explicit RenderPassCache(Device* device);
- ~RenderPassCache();
-
- ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
-
- private:
- // Does the actual VkRenderPass creation on a cache miss.
- ResultOrError<VkRenderPass> CreateRenderPassForQuery(
- const RenderPassCacheQuery& query) const;
-
- // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
- // keys.
- struct CacheFuncs {
- size_t operator()(const RenderPassCacheQuery& query) const;
- bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
- };
- using Cache =
- std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
-
- Device* mDevice = nullptr;
-
- std::mutex mMutex;
- Cache mCache;
- };
+ Device* mDevice = nullptr;
+
+ std::mutex mMutex;
+ Cache mCache;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp
index 9b5349dcea3..47c3c850769 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp
@@ -14,9 +14,14 @@
#include "dawn/native/vulkan/RenderPipelineVk.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
#include "dawn/native/CreatePipelineAsyncTask.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineCacheVk.h"
#include "dawn/native/vulkan/PipelineLayoutVk.h"
#include "dawn/native/vulkan/RenderPassCache.h"
#include "dawn/native/vulkan/ShaderModuleVk.h"
@@ -26,620 +31,620 @@
namespace dawn::native::vulkan {
- namespace {
+namespace {
- VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
- switch (stepMode) {
- case wgpu::VertexStepMode::Vertex:
- return VK_VERTEX_INPUT_RATE_VERTEX;
- case wgpu::VertexStepMode::Instance:
- return VK_VERTEX_INPUT_RATE_INSTANCE;
- }
+VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
+ switch (stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ return VK_VERTEX_INPUT_RATE_VERTEX;
+ case wgpu::VertexStepMode::Instance:
+ return VK_VERTEX_INPUT_RATE_INSTANCE;
+ case wgpu::VertexStepMode::VertexBufferNotUsed:
UNREACHABLE();
- }
-
- VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return VK_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::Uint8x4:
- return VK_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Sint8x2:
- return VK_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Sint8x4:
- return VK_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::Unorm8x2:
- return VK_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::Unorm8x4:
- return VK_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Snorm8x2:
- return VK_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Snorm8x4:
- return VK_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::Uint16x2:
- return VK_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::Uint16x4:
- return VK_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Sint16x2:
- return VK_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Sint16x4:
- return VK_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::Unorm16x2:
- return VK_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::Unorm16x4:
- return VK_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Snorm16x2:
- return VK_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Snorm16x4:
- return VK_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Float16x2:
- return VK_FORMAT_R16G16_SFLOAT;
- case wgpu::VertexFormat::Float16x4:
- return VK_FORMAT_R16G16B16A16_SFLOAT;
- case wgpu::VertexFormat::Float32:
- return VK_FORMAT_R32_SFLOAT;
- case wgpu::VertexFormat::Float32x2:
- return VK_FORMAT_R32G32_SFLOAT;
- case wgpu::VertexFormat::Float32x3:
- return VK_FORMAT_R32G32B32_SFLOAT;
- case wgpu::VertexFormat::Float32x4:
- return VK_FORMAT_R32G32B32A32_SFLOAT;
- case wgpu::VertexFormat::Uint32:
- return VK_FORMAT_R32_UINT;
- case wgpu::VertexFormat::Uint32x2:
- return VK_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::Uint32x3:
- return VK_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::Uint32x4:
- return VK_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Sint32:
- return VK_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Sint32x2:
- return VK_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Sint32x3:
- return VK_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Sint32x4:
- return VK_FORMAT_R32G32B32A32_SINT;
- default:
- UNREACHABLE();
- }
- }
-
- VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
- switch (topology) {
- case wgpu::PrimitiveTopology::PointList:
- return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
- case wgpu::PrimitiveTopology::LineList:
- return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
- case wgpu::PrimitiveTopology::LineStrip:
- return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
- }
+ }
+}
+
+VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return VK_FORMAT_R8G8_UINT;
+ case wgpu::VertexFormat::Uint8x4:
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case wgpu::VertexFormat::Sint8x2:
+ return VK_FORMAT_R8G8_SINT;
+ case wgpu::VertexFormat::Sint8x4:
+ return VK_FORMAT_R8G8B8A8_SINT;
+ case wgpu::VertexFormat::Unorm8x2:
+ return VK_FORMAT_R8G8_UNORM;
+ case wgpu::VertexFormat::Unorm8x4:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::VertexFormat::Snorm8x2:
+ return VK_FORMAT_R8G8_SNORM;
+ case wgpu::VertexFormat::Snorm8x4:
+ return VK_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::VertexFormat::Uint16x2:
+ return VK_FORMAT_R16G16_UINT;
+ case wgpu::VertexFormat::Uint16x4:
+ return VK_FORMAT_R16G16B16A16_UINT;
+ case wgpu::VertexFormat::Sint16x2:
+ return VK_FORMAT_R16G16_SINT;
+ case wgpu::VertexFormat::Sint16x4:
+ return VK_FORMAT_R16G16B16A16_SINT;
+ case wgpu::VertexFormat::Unorm16x2:
+ return VK_FORMAT_R16G16_UNORM;
+ case wgpu::VertexFormat::Unorm16x4:
+ return VK_FORMAT_R16G16B16A16_UNORM;
+ case wgpu::VertexFormat::Snorm16x2:
+ return VK_FORMAT_R16G16_SNORM;
+ case wgpu::VertexFormat::Snorm16x4:
+ return VK_FORMAT_R16G16B16A16_SNORM;
+ case wgpu::VertexFormat::Float16x2:
+ return VK_FORMAT_R16G16_SFLOAT;
+ case wgpu::VertexFormat::Float16x4:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+ case wgpu::VertexFormat::Float32:
+ return VK_FORMAT_R32_SFLOAT;
+ case wgpu::VertexFormat::Float32x2:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case wgpu::VertexFormat::Float32x3:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ case wgpu::VertexFormat::Float32x4:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case wgpu::VertexFormat::Uint32:
+ return VK_FORMAT_R32_UINT;
+ case wgpu::VertexFormat::Uint32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case wgpu::VertexFormat::Uint32x3:
+ return VK_FORMAT_R32G32B32_UINT;
+ case wgpu::VertexFormat::Uint32x4:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case wgpu::VertexFormat::Sint32:
+ return VK_FORMAT_R32_SINT;
+ case wgpu::VertexFormat::Sint32x2:
+ return VK_FORMAT_R32G32_SINT;
+ case wgpu::VertexFormat::Sint32x3:
+ return VK_FORMAT_R32G32B32_SINT;
+ case wgpu::VertexFormat::Sint32x4:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ default:
UNREACHABLE();
- }
-
- bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
- // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
- // primitive restart be only enabled on primitive topologies that support restarting.
- switch (topology) {
- case wgpu::PrimitiveTopology::PointList:
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::TriangleList:
- return false;
- case wgpu::PrimitiveTopology::LineStrip:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return true;
+ }
+}
+
+VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
+ switch (topology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+ case wgpu::PrimitiveTopology::LineList:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+ }
+ UNREACHABLE();
+}
+
+bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
+ // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
+ // primitive restart be only enabled on primitive topologies that support restarting.
+ switch (topology) {
+ case wgpu::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::TriangleList:
+ return false;
+ case wgpu::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
+ switch (face) {
+ case wgpu::FrontFace::CCW:
+ return VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ case wgpu::FrontFace::CW:
+ return VK_FRONT_FACE_CLOCKWISE;
+ }
+ UNREACHABLE();
+}
+
+VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return VK_CULL_MODE_NONE;
+ case wgpu::CullMode::Front:
+ return VK_CULL_MODE_FRONT_BIT;
+ case wgpu::CullMode::Back:
+ return VK_CULL_MODE_BACK_BIT;
+ }
+ UNREACHABLE();
+}
+
+VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return VK_BLEND_FACTOR_ZERO;
+ case wgpu::BlendFactor::One:
+ return VK_BLEND_FACTOR_ONE;
+ case wgpu::BlendFactor::Src:
+ return VK_BLEND_FACTOR_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return VK_BLEND_FACTOR_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return VK_BLEND_FACTOR_DST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return VK_BLEND_FACTOR_DST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+ case wgpu::BlendFactor::Constant:
+ return VK_BLEND_FACTOR_CONSTANT_COLOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+ }
+ UNREACHABLE();
+}
+
+VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return VK_BLEND_OP_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return VK_BLEND_OP_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return VK_BLEND_OP_REVERSE_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return VK_BLEND_OP_MIN;
+ case wgpu::BlendOperation::Max:
+ return VK_BLEND_OP_MAX;
+ }
+ UNREACHABLE();
+}
+
+VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
+ bool isDeclaredInFragmentShader) {
+ // Vulkan and Dawn color write masks match, static assert it and return the mask
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
+ VK_COLOR_COMPONENT_R_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
+ VK_COLOR_COMPONENT_G_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
+ VK_COLOR_COMPONENT_B_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
+ VK_COLOR_COMPONENT_A_BIT);
+
+ // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
+ // attachment writes are undefined for components which do not correspond to a fragment
+ // shader outputs", we set the color write mask to 0 to prevent such undefined values
+ // being written into the color attachments.
+ return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
+ : static_cast<VkColorComponentFlags>(0);
+}
+
+VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
+ bool isDeclaredInFragmentShader) {
+ VkPipelineColorBlendAttachmentState attachment;
+ attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
+ if (attachment.blendEnable) {
+ attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
+ attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
+ attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
+ attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
+ attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
+ attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
+ } else {
+ // Swiftshader's Vulkan implementation appears to expect these values to be valid
+ // even when blending is not enabled.
+ attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ }
+ attachment.colorWriteMask = VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+ return attachment;
+}
+
+VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
+ switch (op) {
+ case wgpu::StencilOperation::Keep:
+ return VK_STENCIL_OP_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return VK_STENCIL_OP_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return VK_STENCIL_OP_REPLACE;
+ case wgpu::StencilOperation::IncrementClamp:
+ return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case wgpu::StencilOperation::DecrementClamp:
+ return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case wgpu::StencilOperation::Invert:
+ return VK_STENCIL_OP_INVERT;
+ case wgpu::StencilOperation::IncrementWrap:
+ return VK_STENCIL_OP_INCREMENT_AND_WRAP;
+ case wgpu::StencilOperation::DecrementWrap:
+ return VK_STENCIL_OP_DECREMENT_AND_WRAP;
+ }
+ UNREACHABLE();
+}
+
+VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+ VkPipelineDepthStencilStateCreateInfo depthStencilState;
+ depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depthStencilState.pNext = nullptr;
+ depthStencilState.flags = 0;
+
+ // Depth writes only occur if depth is enabled
+ depthStencilState.depthTestEnable =
+ (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+ !descriptor->depthWriteEnabled)
+ ? VK_FALSE
+ : VK_TRUE;
+ depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
+ depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
+ depthStencilState.depthBoundsTestEnable = false;
+ depthStencilState.minDepthBounds = 0.0f;
+ depthStencilState.maxDepthBounds = 1.0f;
+
+ depthStencilState.stencilTestEnable = StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
+
+ depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
+ depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
+ depthStencilState.front.depthFailOp = VulkanStencilOp(descriptor->stencilFront.depthFailOp);
+ depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
+
+ depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
+ depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
+ depthStencilState.back.depthFailOp = VulkanStencilOp(descriptor->stencilBack.depthFailOp);
+ depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
+
+ // Dawn doesn't have separate front and back stencil masks.
+ depthStencilState.front.compareMask = descriptor->stencilReadMask;
+ depthStencilState.back.compareMask = descriptor->stencilReadMask;
+ depthStencilState.front.writeMask = descriptor->stencilWriteMask;
+ depthStencilState.back.writeMask = descriptor->stencilWriteMask;
+
+ // The stencil reference is always dynamic
+ depthStencilState.front.reference = 0;
+ depthStencilState.back.reference = 0;
+
+ return depthStencilState;
+}
+
+} // anonymous namespace
+
+// static
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+}
+
+MaybeError RenderPipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ const PipelineLayout* layout = ToBackend(GetLayout());
+
+ // Vulkan devices need cache UUID field to be serialized into pipeline cache keys.
+ mCacheKey.Record(device->GetDeviceInfo().properties.pipelineCacheUUID);
+
+ // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
+ std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
+ std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
+ std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
+ std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
+ uint32_t stageCount = 0;
+
+ for (auto stage : IterateStages(this->GetStageMask())) {
+ VkPipelineShaderStageCreateInfo shaderStage;
+
+ const ProgrammableStage& programmableStage = GetStage(stage);
+ ShaderModule* module = ToBackend(programmableStage.module.Get());
+ const ShaderModule::Spirv* spirv;
+ DAWN_TRY_ASSIGN(std::tie(shaderStage.module, spirv),
+ module->GetHandleAndSpirv(programmableStage.entryPoint.c_str(), layout));
+
+ shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shaderStage.pNext = nullptr;
+ shaderStage.flags = 0;
+ shaderStage.pSpecializationInfo = nullptr;
+ shaderStage.pName = programmableStage.entryPoint.c_str();
+
+ switch (stage) {
+ case dawn::native::SingleShaderStage::Vertex: {
+ shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
+ break;
}
- UNREACHABLE();
- }
-
- VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
- switch (face) {
- case wgpu::FrontFace::CCW:
- return VK_FRONT_FACE_COUNTER_CLOCKWISE;
- case wgpu::FrontFace::CW:
- return VK_FRONT_FACE_CLOCKWISE;
+ case dawn::native::SingleShaderStage::Fragment: {
+ shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
}
- UNREACHABLE();
- }
-
- VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return VK_CULL_MODE_NONE;
- case wgpu::CullMode::Front:
- return VK_CULL_MODE_FRONT_BIT;
- case wgpu::CullMode::Back:
- return VK_CULL_MODE_BACK_BIT;
+ default: {
+ // For render pipeline only Vertex and Fragment stage is possible
+ DAWN_UNREACHABLE();
+ break;
}
- UNREACHABLE();
}
- VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return VK_BLEND_FACTOR_ZERO;
- case wgpu::BlendFactor::One:
- return VK_BLEND_FACTOR_ONE;
- case wgpu::BlendFactor::Src:
- return VK_BLEND_FACTOR_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return VK_BLEND_FACTOR_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return VK_BLEND_FACTOR_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return VK_BLEND_FACTOR_DST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::Constant:
- return VK_BLEND_FACTOR_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
- }
- UNREACHABLE();
- }
+ shaderStage.pSpecializationInfo =
+ GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
+ &specializationDataEntriesPerStages[stageCount],
+ &specializationMapEntriesPerStages[stageCount]);
- VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return VK_BLEND_OP_ADD;
- case wgpu::BlendOperation::Subtract:
- return VK_BLEND_OP_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return VK_BLEND_OP_REVERSE_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return VK_BLEND_OP_MIN;
- case wgpu::BlendOperation::Max:
- return VK_BLEND_OP_MAX;
- }
- UNREACHABLE();
- }
+ DAWN_ASSERT(stageCount < 2);
+ shaderStages[stageCount] = shaderStage;
+ stageCount++;
- VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
- bool isDeclaredInFragmentShader) {
- // Vulkan and Dawn color write masks match, static assert it and return the mask
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
- VK_COLOR_COMPONENT_R_BIT);
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
- VK_COLOR_COMPONENT_G_BIT);
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
- VK_COLOR_COMPONENT_B_BIT);
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
- VK_COLOR_COMPONENT_A_BIT);
-
- // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
- // attachment writes are undefined for components which do not correspond to a fragment
- // shader outputs", we set the color write mask to 0 to prevent such undefined values
- // being written into the color attachments.
- return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
- : static_cast<VkColorComponentFlags>(0);
- }
+ // Record cache key for each shader since it will become inaccessible later on.
+ mCacheKey.Record(stage).RecordIterable(*spirv);
+ }
- VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
- bool isDeclaredInFragmentShader) {
- VkPipelineColorBlendAttachmentState attachment;
- attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
- if (attachment.blendEnable) {
- attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
- attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
- attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
- attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
- attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
- attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
- } else {
- // Swiftshader's Vulkan implementation appears to expect these values to be valid
- // even when blending is not enabled.
- attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
- attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
- attachment.colorBlendOp = VK_BLEND_OP_ADD;
- attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
- attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
- attachment.alphaBlendOp = VK_BLEND_OP_ADD;
- }
- attachment.colorWriteMask =
- VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
- return attachment;
+ PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
+ VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
+ ComputeVertexInputDesc(&tempAllocations);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssembly;
+ inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssembly.pNext = nullptr;
+ inputAssembly.flags = 0;
+ inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
+ inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
+
+ // A placeholder viewport/scissor info. The validation layers force use to provide at least
+ // one scissor and one viewport here, even if we choose to make them dynamic.
+ VkViewport viewportDesc;
+ viewportDesc.x = 0.0f;
+ viewportDesc.y = 0.0f;
+ viewportDesc.width = 1.0f;
+ viewportDesc.height = 1.0f;
+ viewportDesc.minDepth = 0.0f;
+ viewportDesc.maxDepth = 1.0f;
+ VkRect2D scissorRect;
+ scissorRect.offset.x = 0;
+ scissorRect.offset.y = 0;
+ scissorRect.extent.width = 1;
+ scissorRect.extent.height = 1;
+ VkPipelineViewportStateCreateInfo viewport;
+ viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport.pNext = nullptr;
+ viewport.flags = 0;
+ viewport.viewportCount = 1;
+ viewport.pViewports = &viewportDesc;
+ viewport.scissorCount = 1;
+ viewport.pScissors = &scissorRect;
+
+ VkPipelineRasterizationStateCreateInfo rasterization;
+ rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization.pNext = nullptr;
+ rasterization.flags = 0;
+ rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
+ rasterization.rasterizerDiscardEnable = VK_FALSE;
+ rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization.cullMode = VulkanCullMode(GetCullMode());
+ rasterization.frontFace = VulkanFrontFace(GetFrontFace());
+ rasterization.depthBiasEnable = IsDepthBiasEnabled();
+ rasterization.depthBiasConstantFactor = GetDepthBias();
+ rasterization.depthBiasClamp = GetDepthBiasClamp();
+ rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
+ rasterization.lineWidth = 1.0f;
+
+ VkPipelineMultisampleStateCreateInfo multisample;
+ multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample.pNext = nullptr;
+ multisample.flags = 0;
+ multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
+ multisample.sampleShadingEnable = VK_FALSE;
+ multisample.minSampleShading = 0.0f;
+ // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
+ // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
+ // we have to assert that this length is indeed 1.
+ ASSERT(multisample.rasterizationSamples <= 32);
+ VkSampleMask sampleMask = GetSampleMask();
+ multisample.pSampleMask = &sampleMask;
+ multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
+ multisample.alphaToOneEnable = VK_FALSE;
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilState =
+ ComputeDepthStencilDesc(GetDepthStencilState());
+
+ VkPipelineColorBlendStateCreateInfo colorBlend;
+ // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
+ // definition scope as same as colorBlend
+ ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
+ colorBlendAttachments;
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ // Initialize the "blend state info" that will be chained in the "create info" from the
+ // data pre-computed in the ColorState
+ for (auto& blend : colorBlendAttachments) {
+ blend.blendEnable = VK_FALSE;
+ blend.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ blend.colorBlendOp = VK_BLEND_OP_ADD;
+ blend.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ blend.alphaBlendOp = VK_BLEND_OP_ADD;
+ blend.colorWriteMask = 0;
}
- VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
- switch (op) {
- case wgpu::StencilOperation::Keep:
- return VK_STENCIL_OP_KEEP;
- case wgpu::StencilOperation::Zero:
- return VK_STENCIL_OP_ZERO;
- case wgpu::StencilOperation::Replace:
- return VK_STENCIL_OP_REPLACE;
- case wgpu::StencilOperation::IncrementClamp:
- return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
- case wgpu::StencilOperation::DecrementClamp:
- return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
- case wgpu::StencilOperation::Invert:
- return VK_STENCIL_OP_INVERT;
- case wgpu::StencilOperation::IncrementWrap:
- return VK_STENCIL_OP_INCREMENT_AND_WRAP;
- case wgpu::StencilOperation::DecrementWrap:
- return VK_STENCIL_OP_DECREMENT_AND_WRAP;
- }
- UNREACHABLE();
+ const auto& fragmentOutputsWritten =
+ GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
+ ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+ GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ const ColorTargetState* target = GetColorTargetState(i);
+ colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
}
- VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
- const DepthStencilState* descriptor) {
- VkPipelineDepthStencilStateCreateInfo depthStencilState;
- depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
- depthStencilState.pNext = nullptr;
- depthStencilState.flags = 0;
-
- // Depth writes only occur if depth is enabled
- depthStencilState.depthTestEnable =
- (descriptor->depthCompare == wgpu::CompareFunction::Always &&
- !descriptor->depthWriteEnabled)
- ? VK_FALSE
- : VK_TRUE;
- depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
- depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
- depthStencilState.depthBoundsTestEnable = false;
- depthStencilState.minDepthBounds = 0.0f;
- depthStencilState.maxDepthBounds = 1.0f;
-
- depthStencilState.stencilTestEnable =
- StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
-
- depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
- depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
- depthStencilState.front.depthFailOp =
- VulkanStencilOp(descriptor->stencilFront.depthFailOp);
- depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
-
- depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
- depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
- depthStencilState.back.depthFailOp =
- VulkanStencilOp(descriptor->stencilBack.depthFailOp);
- depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
-
- // Dawn doesn't have separate front and back stencil masks.
- depthStencilState.front.compareMask = descriptor->stencilReadMask;
- depthStencilState.back.compareMask = descriptor->stencilReadMask;
- depthStencilState.front.writeMask = descriptor->stencilWriteMask;
- depthStencilState.back.writeMask = descriptor->stencilWriteMask;
-
- // The stencil reference is always dynamic
- depthStencilState.front.reference = 0;
- depthStencilState.back.reference = 0;
-
- return depthStencilState;
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
+ colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlend.pNext = nullptr;
+ colorBlend.flags = 0;
+ // LogicOp isn't supported so we disable it.
+ colorBlend.logicOpEnable = VK_FALSE;
+ colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
+ colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+ colorBlend.pAttachments = colorBlendAttachments.data();
+ // The blend constant is always dynamic so we fill in a placeholder value
+ colorBlend.blendConstants[0] = 0.0f;
+ colorBlend.blendConstants[1] = 0.0f;
+ colorBlend.blendConstants[2] = 0.0f;
+ colorBlend.blendConstants[3] = 0.0f;
}
- MaybeError RenderPipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
- PipelineLayout* layout = ToBackend(GetLayout());
-
- // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
- std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
- std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
- std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
- std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
- uint32_t stageCount = 0;
-
- for (auto stage : IterateStages(this->GetStageMask())) {
- VkPipelineShaderStageCreateInfo shaderStage;
-
- const ProgrammableStage& programmableStage = GetStage(stage);
- ShaderModule* module = ToBackend(programmableStage.module.Get());
- const ShaderModule::Spirv* spirv;
- DAWN_TRY_ASSIGN(
- std::tie(shaderStage.module, spirv),
- module->GetHandleAndSpirv(programmableStage.entryPoint.c_str(), layout));
-
- shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- shaderStage.pNext = nullptr;
- shaderStage.flags = 0;
- shaderStage.pSpecializationInfo = nullptr;
- shaderStage.pName = programmableStage.entryPoint.c_str();
-
- switch (stage) {
- case dawn::native::SingleShaderStage::Vertex: {
- shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
- break;
- }
- case dawn::native::SingleShaderStage::Fragment: {
- shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
- break;
- }
- default: {
- // For render pipeline only Vertex and Fragment stage is possible
- DAWN_UNREACHABLE();
- break;
- }
- }
-
- shaderStage.pSpecializationInfo =
- GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
- &specializationDataEntriesPerStages[stageCount],
- &specializationMapEntriesPerStages[stageCount]);
-
- DAWN_ASSERT(stageCount < 2);
- shaderStages[stageCount] = shaderStage;
- stageCount++;
-
- // Record cache key for each shader since it will become inaccessible later on.
- GetCacheKey()->Record(stage).RecordIterable(*spirv);
+ // Tag all state as dynamic but stencil masks and depth bias.
+ VkDynamicState dynamicStates[] = {
+ VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ };
+ VkPipelineDynamicStateCreateInfo dynamic;
+ dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic.pNext = nullptr;
+ dynamic.flags = 0;
+ dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
+ dynamic.pDynamicStates = dynamicStates;
+
+ // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
+ // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
+ // has resolve target and whether depth/stencil attachment is read-only also don't matter,
+ // so set them both to false.
+ VkRenderPass renderPass = VK_NULL_HANDLE;
+ {
+ RenderPassCacheQuery query;
+
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load, wgpu::StoreOp::Store,
+ false);
}
- PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
- VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
- ComputeVertexInputDesc(&tempAllocations);
-
- VkPipelineInputAssemblyStateCreateInfo inputAssembly;
- inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
- inputAssembly.pNext = nullptr;
- inputAssembly.flags = 0;
- inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
- inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
-
- // A dummy viewport/scissor info. The validation layers force use to provide at least one
- // scissor and one viewport here, even if we choose to make them dynamic.
- VkViewport viewportDesc;
- viewportDesc.x = 0.0f;
- viewportDesc.y = 0.0f;
- viewportDesc.width = 1.0f;
- viewportDesc.height = 1.0f;
- viewportDesc.minDepth = 0.0f;
- viewportDesc.maxDepth = 1.0f;
- VkRect2D scissorRect;
- scissorRect.offset.x = 0;
- scissorRect.offset.y = 0;
- scissorRect.extent.width = 1;
- scissorRect.extent.height = 1;
- VkPipelineViewportStateCreateInfo viewport;
- viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
- viewport.pNext = nullptr;
- viewport.flags = 0;
- viewport.viewportCount = 1;
- viewport.pViewports = &viewportDesc;
- viewport.scissorCount = 1;
- viewport.pScissors = &scissorRect;
-
- VkPipelineRasterizationStateCreateInfo rasterization;
- rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
- rasterization.pNext = nullptr;
- rasterization.flags = 0;
- rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
- rasterization.rasterizerDiscardEnable = VK_FALSE;
- rasterization.polygonMode = VK_POLYGON_MODE_FILL;
- rasterization.cullMode = VulkanCullMode(GetCullMode());
- rasterization.frontFace = VulkanFrontFace(GetFrontFace());
- rasterization.depthBiasEnable = IsDepthBiasEnabled();
- rasterization.depthBiasConstantFactor = GetDepthBias();
- rasterization.depthBiasClamp = GetDepthBiasClamp();
- rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
- rasterization.lineWidth = 1.0f;
-
- VkPipelineMultisampleStateCreateInfo multisample;
- multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
- multisample.pNext = nullptr;
- multisample.flags = 0;
- multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
- multisample.sampleShadingEnable = VK_FALSE;
- multisample.minSampleShading = 0.0f;
- // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
- // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
- // we have to assert that this length is indeed 1.
- ASSERT(multisample.rasterizationSamples <= 32);
- VkSampleMask sampleMask = GetSampleMask();
- multisample.pSampleMask = &sampleMask;
- multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
- multisample.alphaToOneEnable = VK_FALSE;
-
- VkPipelineDepthStencilStateCreateInfo depthStencilState =
- ComputeDepthStencilDesc(GetDepthStencilState());
-
- VkPipelineColorBlendStateCreateInfo colorBlend;
- // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
- // definition scope as same as colorBlend
- ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
- colorBlendAttachments;
- if (GetStageMask() & wgpu::ShaderStage::Fragment) {
- // Initialize the "blend state info" that will be chained in the "create info" from the
- // data pre-computed in the ColorState
- for (auto& blend : colorBlendAttachments) {
- blend.blendEnable = VK_FALSE;
- blend.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
- blend.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
- blend.colorBlendOp = VK_BLEND_OP_ADD;
- blend.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
- blend.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
- blend.alphaBlendOp = VK_BLEND_OP_ADD;
- blend.colorWriteMask = 0;
- }
-
- const auto& fragmentOutputsWritten =
- GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
- ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
- GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorTargetState* target = GetColorTargetState(i);
- colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
- }
-
- colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
- colorBlend.pNext = nullptr;
- colorBlend.flags = 0;
- // LogicOp isn't supported so we disable it.
- colorBlend.logicOpEnable = VK_FALSE;
- colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
- colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
- colorBlend.pAttachments = colorBlendAttachments.data();
- // The blend constant is always dynamic so we fill in a dummy value
- colorBlend.blendConstants[0] = 0.0f;
- colorBlend.blendConstants[1] = 0.0f;
- colorBlend.blendConstants[2] = 0.0f;
- colorBlend.blendConstants[3] = 0.0f;
+ if (HasDepthStencilAttachment()) {
+ query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load, wgpu::StoreOp::Store,
+ wgpu::LoadOp::Load, wgpu::StoreOp::Store, false);
}
- // Tag all state as dynamic but stencil masks and depth bias.
- VkDynamicState dynamicStates[] = {
- VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
- VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
- VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
- };
- VkPipelineDynamicStateCreateInfo dynamic;
- dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
- dynamic.pNext = nullptr;
- dynamic.flags = 0;
- dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
- dynamic.pDynamicStates = dynamicStates;
-
- // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
- // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
- // has resolve target and whether depth/stencil attachment is read-only also don't matter,
- // so set them both to false.
- VkRenderPass renderPass = VK_NULL_HANDLE;
- {
- RenderPassCacheQuery query;
-
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
- wgpu::StoreOp::Store, false);
- }
+ query.SetSampleCount(GetSampleCount());
- if (HasDepthStencilAttachment()) {
- query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
- wgpu::StoreOp::Store, wgpu::LoadOp::Load,
- wgpu::StoreOp::Store, false);
- }
-
- query.SetSampleCount(GetSampleCount());
-
- GetCacheKey()->Record(query);
- DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
- }
-
- // The create info chains in a bunch of things created on the stack here or inside state
- // objects.
- VkGraphicsPipelineCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.stageCount = stageCount;
- createInfo.pStages = shaderStages.data();
- createInfo.pVertexInputState = &vertexInputCreateInfo;
- createInfo.pInputAssemblyState = &inputAssembly;
- createInfo.pTessellationState = nullptr;
- createInfo.pViewportState = &viewport;
- createInfo.pRasterizationState = &rasterization;
- createInfo.pMultisampleState = &multisample;
- createInfo.pDepthStencilState = &depthStencilState;
- createInfo.pColorBlendState =
- (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
- createInfo.pDynamicState = &dynamic;
- createInfo.layout = ToBackend(GetLayout())->GetHandle();
- createInfo.renderPass = renderPass;
- createInfo.subpass = 0;
- createInfo.basePipelineHandle = VkPipeline{};
- createInfo.basePipelineIndex = -1;
-
- // Record cache key information now since createInfo is not stored.
- GetCacheKey()->Record(createInfo,
- static_cast<const RenderPipeline*>(this)->GetLayout()->GetCacheKey());
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
- &createInfo, nullptr, &*mHandle),
- "CreateGraphicsPipeline"));
-
- SetLabelImpl();
-
- return {};
+ mCacheKey.Record(query);
+ DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
}
- void RenderPipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_RenderPipeline", GetLabel());
- }
-
- VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
- PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
- // Fill in the "binding info" that will be chained in the create info
- uint32_t bindingCount = 0;
- for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
-
- VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
- bindingDesc->binding = static_cast<uint8_t>(slot);
- bindingDesc->stride = bindingInfo.arrayStride;
- bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
-
- bindingCount++;
- }
-
- // Fill in the "attribute info" that will be chained in the create info
- uint32_t attributeCount = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
-
- VkVertexInputAttributeDescription* attributeDesc =
- &tempAllocations->attributes[attributeCount];
- attributeDesc->location = static_cast<uint8_t>(loc);
- attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
- attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
- attributeDesc->offset = attributeInfo.offset;
-
- attributeCount++;
- }
-
- // Build the create info
- VkPipelineVertexInputStateCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.vertexBindingDescriptionCount = bindingCount;
- createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
- createInfo.vertexAttributeDescriptionCount = attributeCount;
- createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
- return createInfo;
+ // The create info chains in a bunch of things created on the stack here or inside state
+ // objects.
+ VkGraphicsPipelineCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.stageCount = stageCount;
+ createInfo.pStages = shaderStages.data();
+ createInfo.pVertexInputState = &vertexInputCreateInfo;
+ createInfo.pInputAssemblyState = &inputAssembly;
+ createInfo.pTessellationState = nullptr;
+ createInfo.pViewportState = &viewport;
+ createInfo.pRasterizationState = &rasterization;
+ createInfo.pMultisampleState = &multisample;
+ createInfo.pDepthStencilState = &depthStencilState;
+ createInfo.pColorBlendState =
+ (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
+ createInfo.pDynamicState = &dynamic;
+ createInfo.layout = ToBackend(GetLayout())->GetHandle();
+ createInfo.renderPass = renderPass;
+ createInfo.subpass = 0;
+ createInfo.basePipelineHandle = VkPipeline{};
+ createInfo.basePipelineIndex = -1;
+
+ // Record cache key information now since createInfo is not stored.
+ mCacheKey.Record(createInfo, layout->GetCacheKey());
+
+ // Try to see if we have anything in the blob cache.
+ Ref<PipelineCache> cache = ToBackend(GetDevice()->GetOrCreatePipelineCache(GetCacheKey()));
+ DAWN_TRY(
+ CheckVkSuccess(device->fn.CreateGraphicsPipelines(device->GetVkDevice(), cache->GetHandle(),
+ 1, &createInfo, nullptr, &*mHandle),
+ "CreateGraphicsPipelines"));
+ // TODO(dawn:549): Flush is currently in the same thread, but perhaps deferrable.
+ DAWN_TRY(cache->FlushIfNeeded());
+
+ SetLabelImpl();
+
+ return {};
+}
+
+void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_RenderPipeline", GetLabel());
+}
+
+VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
+ // Fill in the "binding info" that will be chained in the create info
+ uint32_t bindingCount = 0;
+ for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
+
+ VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
+ bindingDesc->binding = static_cast<uint8_t>(slot);
+ bindingDesc->stride = bindingInfo.arrayStride;
+ bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
+
+ bindingCount++;
}
- RenderPipeline::~RenderPipeline() = default;
+ // Fill in the "attribute info" that will be chained in the create info
+ uint32_t attributeCount = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
+ VkVertexInputAttributeDescription* attributeDesc =
+ &tempAllocations->attributes[attributeCount];
+ attributeDesc->location = static_cast<uint8_t>(loc);
+ attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
+ attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
+ attributeDesc->offset = attributeInfo.offset;
- VkPipeline RenderPipeline::GetHandle() const {
- return mHandle;
+ attributeCount++;
}
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ // Build the create info
+ VkPipelineVertexInputStateCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.vertexBindingDescriptionCount = bindingCount;
+ createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
+ createInfo.vertexAttributeDescriptionCount = attributeCount;
+ createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
+ return createInfo;
+}
+
+RenderPipeline::~RenderPipeline() = default;
+
+void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
+
+VkPipeline RenderPipeline::GetHandle() const {
+ return mHandle;
+}
+
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h
index 2c99f7e3314..25462695900 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h
@@ -22,37 +22,37 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
+class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
- VkPipeline GetHandle() const;
+ VkPipeline GetHandle() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- private:
- ~RenderPipeline() override;
- void DestroyImpl() override;
- using RenderPipelineBase::RenderPipelineBase;
+ private:
+ ~RenderPipeline() override;
+ void DestroyImpl() override;
+ using RenderPipelineBase::RenderPipelineBase;
- struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
- std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
- std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
- };
- VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
- PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
-
- VkPipeline mHandle = VK_NULL_HANDLE;
+ struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
+ std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
+ std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
};
+ VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
+
+ VkPipeline mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp
index 94ce7fc9340..e1a5d19564d 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp
@@ -16,16 +16,15 @@
namespace dawn::native::vulkan {
- ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
- : mMemory(memory), mMemoryType(memoryType) {
- }
+ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
+ : mMemory(memory), mMemoryType(memoryType) {}
- VkDeviceMemory ResourceHeap::GetMemory() const {
- return mMemory;
- }
+VkDeviceMemory ResourceHeap::GetMemory() const {
+ return mMemory;
+}
- size_t ResourceHeap::GetMemoryType() const {
- return mMemoryType;
- }
+size_t ResourceHeap::GetMemoryType() const {
+ return mMemoryType;
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h
index b7d80689205..d1c2d4aa124 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h
@@ -20,19 +20,19 @@
namespace dawn::native::vulkan {
- // Wrapper for physical memory used with or without a resource object.
- class ResourceHeap : public ResourceHeapBase {
- public:
- ResourceHeap(VkDeviceMemory memory, size_t memoryType);
- ~ResourceHeap() = default;
-
- VkDeviceMemory GetMemory() const;
- size_t GetMemoryType() const;
-
- private:
- VkDeviceMemory mMemory = VK_NULL_HANDLE;
- size_t mMemoryType = 0;
- };
+// Wrapper for physical memory used with or without a resource object.
+class ResourceHeap : public ResourceHeapBase {
+ public:
+ ResourceHeap(VkDeviceMemory memory, size_t memoryType);
+ ~ResourceHeap() override = default;
+
+ VkDeviceMemory GetMemory() const;
+ size_t GetMemoryType() const;
+
+ private:
+ VkDeviceMemory mMemory = VK_NULL_HANDLE;
+ size_t mMemoryType = 0;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
index 783300f0d8f..390b3262cf3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -14,6 +14,9 @@
#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include <algorithm>
+#include <utility>
+
#include "dawn/common/Math.h"
#include "dawn/native/BuddyMemoryAllocator.h"
#include "dawn/native/ResourceHeapAllocator.h"
@@ -24,270 +27,263 @@
namespace dawn::native::vulkan {
- namespace {
-
- // TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
- // suballocate but it should ideally depend on the size of the memory heaps and other
- // factors.
- constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MiB
-
- // Have each bucket of the buddy system allocate at least some resource of the maximum
- // size
- constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
-
- } // anonymous namespace
-
- // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
- // service suballocation requests, but for a single Vulkan memory type.
-
- class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
- public:
- SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
- : mDevice(device),
- mMemoryTypeIndex(memoryTypeIndex),
- mMemoryHeapSize(memoryHeapSize),
- mPooledMemoryAllocator(this),
- mBuddySystem(
- // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
- // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
- uint64_t(1) << Log2(mMemoryHeapSize),
- // Take the min in the very unlikely case the memory heap is tiny.
- std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
- &mPooledMemoryAllocator) {
- ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
- }
- ~SingleTypeAllocator() override = default;
+namespace {
+
+// TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
+// suballocate but it should ideally depend on the size of the memory heaps and other
+// factors.
+constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MiB
+
+// Have each bucket of the buddy system allocate at least some resource of the maximum
+// size
+constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
+
+} // anonymous namespace
+
+// SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
+// service suballocation requests, but for a single Vulkan memory type.
+
+class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
+ public:
+ SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
+ : mDevice(device),
+ mMemoryTypeIndex(memoryTypeIndex),
+ mMemoryHeapSize(memoryHeapSize),
+ mPooledMemoryAllocator(this),
+ mBuddySystem(
+ // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
+ // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
+ uint64_t(1) << Log2(mMemoryHeapSize),
+ // Take the min in the very unlikely case the memory heap is tiny.
+ std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
+ &mPooledMemoryAllocator) {
+ ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
+ }
+ ~SingleTypeAllocator() override = default;
- void DestroyPool() {
- mPooledMemoryAllocator.DestroyPool();
- }
+ void DestroyPool() { mPooledMemoryAllocator.DestroyPool(); }
- ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
- return mBuddySystem.Allocate(size, alignment);
- }
+ ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
+ return mBuddySystem.Allocate(size, alignment);
+ }
- void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
- mBuddySystem.Deallocate(allocation);
- }
+ void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
+ mBuddySystem.Deallocate(allocation);
+ }
- // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+ // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override {
- if (size > mMemoryHeapSize) {
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
- }
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
+ if (size > mMemoryHeapSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
+ }
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.allocationSize = size;
- allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.allocationSize = size;
+ allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- // First check OOM that we want to surface to the application.
- DAWN_TRY(CheckVkOOMThenSuccess(
- mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
- &*allocatedMemory),
- "vkAllocateMemory"));
+ // First check OOM that we want to surface to the application.
+ DAWN_TRY(
+ CheckVkOOMThenSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
- ASSERT(allocatedMemory != VK_NULL_HANDLE);
- return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
- }
+ ASSERT(allocatedMemory != VK_NULL_HANDLE);
+ return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
+ }
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
- }
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
+ }
- private:
- Device* mDevice;
- size_t mMemoryTypeIndex;
- VkDeviceSize mMemoryHeapSize;
- PooledResourceMemoryAllocator mPooledMemoryAllocator;
- BuddyMemoryAllocator mBuddySystem;
- };
+ private:
+ Device* mDevice;
+ size_t mMemoryTypeIndex;
+ VkDeviceSize mMemoryHeapSize;
+ PooledResourceMemoryAllocator mPooledMemoryAllocator;
+ BuddyMemoryAllocator mBuddySystem;
+};
- // Implementation of ResourceMemoryAllocator
+// Implementation of ResourceMemoryAllocator
- ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
- mAllocatorsPerType.reserve(info.memoryTypes.size());
+ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+ mAllocatorsPerType.reserve(info.memoryTypes.size());
- for (size_t i = 0; i < info.memoryTypes.size(); i++) {
- mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
- mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
- }
+ for (size_t i = 0; i < info.memoryTypes.size(); i++) {
+ mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
+ mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
}
-
- ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
-
- ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
- const VkMemoryRequirements& requirements,
- MemoryKind kind) {
- // The Vulkan spec guarantees at least on memory type is valid.
- int memoryType = FindBestTypeIndex(requirements, kind);
- ASSERT(memoryType >= 0);
-
- VkDeviceSize size = requirements.size;
-
- // Sub-allocate non-mappable resources because at the moment the mapped pointer
- // is part of the resource and not the heap, which doesn't match the Vulkan model.
- // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
- if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable &&
- !mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
- // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
- // hardware puts information on the memory's page table entry and allocating a linear
- // resource in the same page as a non-linear (aka opaque) resource can cause issues.
- // Probably because some texture compression flags are stored on the page table entry,
- // and allocating a linear resource removes these flags.
- //
- // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
- // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
- // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
- // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
- // with a more efficient algorithm later.
- uint64_t alignment =
- std::max(requirements.alignment,
- mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
-
- ResourceMemoryAllocation subAllocation;
- DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
- requirements.size, alignment));
- if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(subAllocation);
- }
- }
-
- // If sub-allocation failed, allocate memory just for it.
- std::unique_ptr<ResourceHeapBase> resourceHeap;
- DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
-
- void* mappedPointer = nullptr;
- if (kind == MemoryKind::LinearMappable) {
- DAWN_TRY_WITH_CLEANUP(
- CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
- ToBackend(resourceHeap.get())->GetMemory(), 0,
- size, 0, &mappedPointer),
- "vkMapMemory"),
- {
- mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap));
- });
+}
+
+ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
+
+ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
+ const VkMemoryRequirements& requirements,
+ MemoryKind kind) {
+ // The Vulkan spec guarantees at least on memory type is valid.
+ int memoryType = FindBestTypeIndex(requirements, kind);
+ ASSERT(memoryType >= 0);
+
+ VkDeviceSize size = requirements.size;
+
+ // Sub-allocate non-mappable resources because at the moment the mapped pointer
+ // is part of the resource and not the heap, which doesn't match the Vulkan model.
+ // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
+ if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable &&
+ !mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+ // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
+ // hardware puts information on the memory's page table entry and allocating a linear
+ // resource in the same page as a non-linear (aka opaque) resource can cause issues.
+ // Probably because some texture compression flags are stored on the page table entry,
+ // and allocating a linear resource removes these flags.
+ //
+ // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
+ // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
+ // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
+ // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
+ // with a more efficient algorithm later.
+ uint64_t alignment =
+ std::max(requirements.alignment,
+ mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
+
+ ResourceMemoryAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
+ requirements.size, alignment));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(subAllocation);
}
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
- return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
- static_cast<uint8_t*>(mappedPointer));
}
- void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
- switch (allocation->GetInfo().mMethod) {
- // Some memory allocation can never be initialized, for example when wrapping
- // swapchain VkImages with a Texture.
- case AllocationMethod::kInvalid:
- break;
-
- // For direct allocation we can put the memory for deletion immediately and the fence
- // deleter will make sure the resources are freed before the memory.
- case AllocationMethod::kDirect: {
- ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
- allocation->Invalidate();
- mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
- delete heap;
- break;
- }
+ // If sub-allocation failed, allocate memory just for it.
+ std::unique_ptr<ResourceHeapBase> resourceHeap;
+ DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
+
+ void* mappedPointer = nullptr;
+ if (kind == MemoryKind::LinearMappable) {
+ DAWN_TRY_WITH_CLEANUP(
+ CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+ ToBackend(resourceHeap.get())->GetMemory(), 0,
+ size, 0, &mappedPointer),
+ "vkMapMemory"),
+ { mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap)); });
+ }
- // Suballocations aren't freed immediately, otherwise another resource allocation could
- // happen just after that aliases the old one and would require a barrier.
- // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
- // latency to reclaim memory.
- case AllocationMethod::kSubAllocated:
- mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
- break;
-
- default:
- UNREACHABLE();
- break;
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+ static_cast<uint8_t*>(mappedPointer));
+}
+
+void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
+ switch (allocation->GetInfo().mMethod) {
+ // Some memory allocation can never be initialized, for example when wrapping
+ // swapchain VkImages with a Texture.
+ case AllocationMethod::kInvalid:
+ break;
+
+ // For direct allocation we can put the memory for deletion immediately and the fence
+ // deleter will make sure the resources are freed before the memory.
+ case AllocationMethod::kDirect: {
+ ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
+ allocation->Invalidate();
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
+ delete heap;
+ break;
}
- // Invalidate the underlying resource heap in case the client accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation->Invalidate();
+ // Suballocations aren't freed immediately, otherwise another resource allocation could
+ // happen just after that aliases the old one and would require a barrier.
+ // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
+ // latency to reclaim memory.
+ case AllocationMethod::kSubAllocated:
+ mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
}
- void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
- for (const ResourceMemoryAllocation& allocation :
- mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
- ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
- size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+ // Invalidate the underlying resource heap in case the client accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation->Invalidate();
+}
- mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
- }
+void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
+ for (const ResourceMemoryAllocation& allocation :
+ mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+ size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
- mSubAllocationsToDelete.ClearUpTo(completedSerial);
+ mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
}
- int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
- MemoryKind kind) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
- bool mappable = kind == MemoryKind::LinearMappable;
-
- // Find a suitable memory type for this allocation
- int bestType = -1;
- for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
- // Resource must support this memory type
- if ((requirements.memoryTypeBits & (1 << i)) == 0) {
- continue;
- }
+ mSubAllocationsToDelete.ClearUpTo(completedSerial);
+}
- // Mappable resource must be host visible
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
- continue;
- }
+int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+ bool mappable = kind == MemoryKind::LinearMappable;
- // Mappable must also be host coherent.
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
- continue;
- }
+ // Find a suitable memory type for this allocation
+ int bestType = -1;
+ for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+ // Resource must support this memory type
+ if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+ continue;
+ }
- // Found the first candidate memory type
- if (bestType == -1) {
- bestType = static_cast<int>(i);
- continue;
- }
+ // Mappable resource must be host visible
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ continue;
+ }
- // For non-mappable resources, favor device local memory.
- bool currentDeviceLocal =
- info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
- bool bestDeviceLocal =
- info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
- if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
- if (currentDeviceLocal) {
- bestType = static_cast<int>(i);
- }
- continue;
- }
+ // Mappable must also be host coherent.
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+ continue;
+ }
- // All things equal favor the memory in the biggest heap
- VkDeviceSize bestTypeHeapSize =
- info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
- VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
- if (candidateHeapSize > bestTypeHeapSize) {
+ // Found the first candidate memory type
+ if (bestType == -1) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+
+ // For non-mappable resources, favor device local memory.
+ bool currentDeviceLocal =
+ info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ bool bestDeviceLocal =
+ info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
+ if (currentDeviceLocal) {
bestType = static_cast<int>(i);
- continue;
}
+ continue;
}
- return bestType;
+ // All things equal favor the memory in the biggest heap
+ VkDeviceSize bestTypeHeapSize = info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+ VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+ if (candidateHeapSize > bestTypeHeapSize) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
}
- void ResourceMemoryAllocator::DestroyPool() {
- for (auto& alloc : mAllocatorsPerType) {
- alloc->DestroyPool();
- }
+ return bestType;
+}
+
+void ResourceMemoryAllocator::DestroyPool() {
+ for (auto& alloc : mAllocatorsPerType) {
+ alloc->DestroyPool();
}
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
index c846ed9f425..1ece6d7592b 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
@@ -15,6 +15,9 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
#define SRC_DAWN_NATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+#include <memory>
+#include <vector>
+
#include "dawn/common/SerialQueue.h"
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/Error.h"
@@ -22,44 +25,41 @@
#include "dawn/native/PooledResourceMemoryAllocator.h"
#include "dawn/native/ResourceMemoryAllocation.h"
-#include <memory>
-#include <vector>
-
namespace dawn::native::vulkan {
- class Device;
+class Device;
- // Various kinds of memory that influence the result of the allocation. For example, to take
- // into account mappability and Vulkan's bufferImageGranularity.
- enum class MemoryKind {
- Linear,
- LinearMappable,
- Opaque,
- };
+// Various kinds of memory that influence the result of the allocation. For example, to take
+// into account mappability and Vulkan's bufferImageGranularity.
+enum class MemoryKind {
+ Linear,
+ LinearMappable,
+ Opaque,
+};
- class ResourceMemoryAllocator {
- public:
- explicit ResourceMemoryAllocator(Device* device);
- ~ResourceMemoryAllocator();
+class ResourceMemoryAllocator {
+ public:
+ explicit ResourceMemoryAllocator(Device* device);
+ ~ResourceMemoryAllocator();
- ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
- MemoryKind kind);
- void Deallocate(ResourceMemoryAllocation* allocation);
+ ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
+ MemoryKind kind);
+ void Deallocate(ResourceMemoryAllocation* allocation);
- void DestroyPool();
+ void DestroyPool();
- void Tick(ExecutionSerial completedSerial);
+ void Tick(ExecutionSerial completedSerial);
- int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
+ int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
- private:
- Device* mDevice;
+ private:
+ Device* mDevice;
- class SingleTypeAllocator;
- std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
+ class SingleTypeAllocator;
+ std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
- SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
- };
+ SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp
index 629fd1bd518..fb464915c57 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/vulkan/SamplerVk.h"
+#include <algorithm>
+
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
@@ -21,110 +23,109 @@
namespace dawn::native::vulkan {
- namespace {
- VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return VK_SAMPLER_ADDRESS_MODE_REPEAT;
- case wgpu::AddressMode::MirrorRepeat:
- return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
- case wgpu::AddressMode::ClampToEdge:
- return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
- }
- UNREACHABLE();
- }
-
- VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
- switch (filter) {
- case wgpu::FilterMode::Linear:
- return VK_FILTER_LINEAR;
- case wgpu::FilterMode::Nearest:
- return VK_FILTER_NEAREST;
- }
- UNREACHABLE();
- }
-
- VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
- switch (filter) {
- case wgpu::FilterMode::Linear:
- return VK_SAMPLER_MIPMAP_MODE_LINEAR;
- case wgpu::FilterMode::Nearest:
- return VK_SAMPLER_MIPMAP_MODE_NEAREST;
- }
- UNREACHABLE();
- }
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
- const SamplerDescriptor* descriptor) {
- Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
- DAWN_TRY(sampler->Initialize(descriptor));
- return sampler;
+namespace {
+VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case wgpu::AddressMode::MirrorRepeat:
+ return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case wgpu::AddressMode::ClampToEdge:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
}
-
- MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
- VkSamplerCreateInfo createInfo = {};
- createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
- createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
- createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
- createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
- createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
- createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
- createInfo.mipLodBias = 0.0f;
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
- createInfo.compareEnable = VK_TRUE;
- } else {
- // Still set the compareOp so it's not garbage.
- createInfo.compareOp = VK_COMPARE_OP_NEVER;
- createInfo.compareEnable = VK_FALSE;
- }
- createInfo.minLod = descriptor->lodMinClamp;
- createInfo.maxLod = descriptor->lodMaxClamp;
- createInfo.unnormalizedCoordinates = VK_FALSE;
-
- Device* device = ToBackend(GetDevice());
- uint16_t maxAnisotropy = GetMaxAnisotropy();
- if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
- createInfo.anisotropyEnable = VK_TRUE;
- // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
- createInfo.maxAnisotropy =
- std::min(static_cast<float>(maxAnisotropy),
- device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
- } else {
- createInfo.anisotropyEnable = VK_FALSE;
- createInfo.maxAnisotropy = 1;
- }
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateSampler"));
-
- SetLabelImpl();
-
- return {};
+ UNREACHABLE();
+}
+
+VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Linear:
+ return VK_FILTER_LINEAR;
+ case wgpu::FilterMode::Nearest:
+ return VK_FILTER_NEAREST;
+ }
+ UNREACHABLE();
+}
+
+VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Linear:
+ return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ case wgpu::FilterMode::Nearest:
+ return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ }
+ UNREACHABLE();
+}
+} // anonymous namespace
+
+// static
+ResultOrError<Ref<Sampler>> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+ DAWN_TRY(sampler->Initialize(descriptor));
+ return sampler;
+}
+
+MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+ VkSamplerCreateInfo createInfo = {};
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
+ createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
+ createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
+ createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
+ createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
+ createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
+ createInfo.mipLodBias = 0.0f;
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
+ createInfo.compareEnable = VK_TRUE;
+ } else {
+ // Still set the compareOp so it's not garbage.
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ createInfo.compareEnable = VK_FALSE;
+ }
+ createInfo.minLod = descriptor->lodMinClamp;
+ createInfo.maxLod = descriptor->lodMaxClamp;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ Device* device = ToBackend(GetDevice());
+ uint16_t maxAnisotropy = GetMaxAnisotropy();
+ if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
+ createInfo.anisotropyEnable = VK_TRUE;
+ // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
+ createInfo.maxAnisotropy =
+ std::min(static_cast<float>(maxAnisotropy),
+ device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
+ } else {
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1;
}
- Sampler::~Sampler() = default;
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateSampler"));
- void Sampler::DestroyImpl() {
- SamplerBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
+ SetLabelImpl();
- VkSampler Sampler::GetHandle() const {
- return mHandle;
- }
+ return {};
+}
+
+Sampler::~Sampler() = default;
- void Sampler::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Sampler", GetLabel());
+void Sampler::DestroyImpl() {
+ SamplerBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
+
+VkSampler Sampler::GetHandle() const {
+ return mHandle;
+}
+
+void Sampler::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Sampler", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h
index 078e02d7184..038e1bc9d36 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h
@@ -22,26 +22,25 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class Sampler final : public SamplerBase {
- public:
- static ResultOrError<Ref<Sampler>> Create(Device* device,
- const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+ public:
+ static ResultOrError<Ref<Sampler>> Create(Device* device, const SamplerDescriptor* descriptor);
- VkSampler GetHandle() const;
+ VkSampler GetHandle() const;
- private:
- ~Sampler() override;
- void DestroyImpl() override;
- using SamplerBase::SamplerBase;
- MaybeError Initialize(const SamplerDescriptor* descriptor);
+ private:
+ ~Sampler() override;
+ void DestroyImpl() override;
+ using SamplerBase::SamplerBase;
+ MaybeError Initialize(const SamplerDescriptor* descriptor);
- // Dawn API
- void SetLabelImpl() override;
+ // Dawn API
+ void SetLabelImpl() override;
- VkSampler mHandle = VK_NULL_HANDLE;
- };
+ VkSampler mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp
index 81e136845f6..28e295dfa88 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include <spirv-tools/libspirv.hpp>
+
+#include <map>
+
#include "dawn/native/SpirvValidation.h"
#include "dawn/native/TintUtils.h"
#include "dawn/native/vulkan/BindGroupLayoutVk.h"
@@ -24,233 +28,231 @@
#include "dawn/native/vulkan/VulkanError.h"
#include "dawn/platform/DawnPlatform.h"
#include "dawn/platform/tracing/TraceEvent.h"
-
-#include <tint/tint.h>
-#include <spirv-tools/libspirv.hpp>
+#include "tint/tint.h"
namespace dawn::native::vulkan {
- ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
- Device* device)
- : mDevice(device) {
- }
+ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
+ Device* device)
+ : mDevice(device) {}
- ShaderModule::ConcurrentTransformedShaderModuleCache::
- ~ConcurrentTransformedShaderModuleCache() {
- std::lock_guard<std::mutex> lock(mMutex);
- for (const auto& [_, moduleAndSpirv] : mTransformedShaderModuleCache) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(moduleAndSpirv.first);
- }
+ShaderModule::ConcurrentTransformedShaderModuleCache::~ConcurrentTransformedShaderModuleCache() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ for (const auto& [_, moduleAndSpirv] : mTransformedShaderModuleCache) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(moduleAndSpirv.first);
}
-
- std::optional<ShaderModule::ModuleAndSpirv>
- ShaderModule::ConcurrentTransformedShaderModuleCache::Find(
- const PipelineLayoutEntryPointPair& key) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto iter = mTransformedShaderModuleCache.find(key);
- if (iter != mTransformedShaderModuleCache.end()) {
- return std::make_pair(iter->second.first, iter->second.second.get());
- }
- return {};
- }
-
- ShaderModule::ModuleAndSpirv ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGet(
- const PipelineLayoutEntryPointPair& key,
- VkShaderModule module,
- std::vector<uint32_t>&& spirv) {
- ASSERT(module != VK_NULL_HANDLE);
- std::lock_guard<std::mutex> lock(mMutex);
- auto iter = mTransformedShaderModuleCache.find(key);
- if (iter == mTransformedShaderModuleCache.end()) {
- mTransformedShaderModuleCache.emplace(
- key, std::make_pair(module, std::unique_ptr<Spirv>(new Spirv(spirv))));
- } else {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
- }
- // Now the key should exist in the map, so find it again and return it.
- iter = mTransformedShaderModuleCache.find(key);
+}
+
+std::optional<ShaderModule::ModuleAndSpirv>
+ShaderModule::ConcurrentTransformedShaderModuleCache::Find(
+ const PipelineLayoutEntryPointPair& key) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto iter = mTransformedShaderModuleCache.find(key);
+ if (iter != mTransformedShaderModuleCache.end()) {
return std::make_pair(iter->second.first, iter->second.second.get());
}
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor),
- mTransformedShaderModuleCache(
- std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {
+ return {};
+}
+
+ShaderModule::ModuleAndSpirv ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGet(
+ const PipelineLayoutEntryPointPair& key,
+ VkShaderModule module,
+ std::vector<uint32_t>&& spirv) {
+ ASSERT(module != VK_NULL_HANDLE);
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto iter = mTransformedShaderModuleCache.find(key);
+ if (iter == mTransformedShaderModuleCache.end()) {
+ mTransformedShaderModuleCache.emplace(
+ key, std::make_pair(module, std::unique_ptr<Spirv>(new Spirv(spirv))));
+ } else {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
}
+ // Now the key should exist in the map, so find it again and return it.
+ iter = mTransformedShaderModuleCache.find(key);
+ return std::make_pair(iter->second.first, iter->second.second.get());
+}
+
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(
+ Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult, compilationMessages));
+ return module;
+}
+
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor),
+ mTransformedShaderModuleCache(
+ std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {}
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ if (GetDevice()->IsRobustnessEnabled()) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- if (GetDevice()->IsRobustnessEnabled()) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- tint::transform::Robustness robustness;
- tint::transform::DataMap transformInputs;
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
- transformInputs, nullptr, nullptr));
- // Rather than use a new ParseResult object, we just reuse the original parseResult
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- }
-
- return InitializeBase(parseResult);
- }
+ tint::transform::Robustness robustness;
+ tint::transform::DataMap transformInputs;
- void ShaderModule::DestroyImpl() {
- ShaderModuleBase::DestroyImpl();
- // Remove reference to internal cache to trigger cleanup.
- mTransformedShaderModuleCache = nullptr;
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
+ transformInputs, nullptr, nullptr));
+ // Rather than use a new ParseResult object, we just reuse the original parseResult
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
}
- ShaderModule::~ShaderModule() = default;
+ return InitializeBase(parseResult, compilationMessages);
+}
- ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
- const char* entryPointName,
- PipelineLayout* layout) {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleVk::GetHandleAndSpirv");
+void ShaderModule::DestroyImpl() {
+ ShaderModuleBase::DestroyImpl();
+ // Remove reference to internal cache to trigger cleanup.
+ mTransformedShaderModuleCache = nullptr;
+}
- // If the shader was destroyed, we should never call this function.
- ASSERT(IsAlive());
+ShaderModule::~ShaderModule() = default;
- ScopedTintICEHandler scopedICEHandler(GetDevice());
+ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
+ const char* entryPointName,
+ const PipelineLayout* layout) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleVk::GetHandleAndSpirv");
- // Check to see if we have the handle and spirv cached already.
- auto cacheKey = std::make_pair(layout, entryPointName);
- auto handleAndSpirv = mTransformedShaderModuleCache->Find(cacheKey);
- if (handleAndSpirv.has_value()) {
- return std::move(*handleAndSpirv);
- }
+ // If the shader was destroyed, we should never call this function.
+ ASSERT(IsAlive());
- // Creation of module and spirv is deferred to this point when using tint generator
-
- // Remap BindingNumber to BindingIndex in WGSL shader
- using BindingRemapper = tint::transform::BindingRemapper;
- using BindingPoint = tint::transform::BindingPoint;
- BindingRemapper::BindingPoints bindingPoints;
- BindingRemapper::AccessControls accessControls;
-
- const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
-
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
- const auto& groupBindingInfo = moduleBindingInfo[group];
- for (const auto& it : groupBindingInfo) {
- BindingNumber binding = it.first;
- BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
-
- BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(bindingIndex)};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
- }
- }
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
- tint::transform::Manager transformManager;
- transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
- // Many Vulkan drivers can't handle multi-entrypoint shader modules.
- transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+ // Check to see if we have the handle and spirv cached already.
+ auto cacheKey = std::make_pair(layout, entryPointName);
+ auto handleAndSpirv = mTransformedShaderModuleCache->Find(cacheKey);
+ if (handleAndSpirv.has_value()) {
+ return std::move(*handleAndSpirv);
+ }
- tint::transform::DataMap transformInputs;
- transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls),
- /* mayCollide */ false);
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
-
- // Transform external textures into the binding locations specified in the bgl
- // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
- tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
- for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
-
- ExternalTextureBindingExpansionMap expansions =
- bgl->GetExternalTextureBindingExpansionMap();
-
- std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
- expansions.begin();
-
- while (it != expansions.end()) {
- newBindingsMap[{static_cast<uint32_t>(i),
- static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
- {static_cast<uint32_t>(i),
- static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
- {static_cast<uint32_t>(i),
- static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
- it++;
+ // Creation of module and spirv is deferred to this point when using tint generator
+
+ // Remap BindingNumber to BindingIndex in WGSL shader
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+ for (const auto& it : groupBindingInfo) {
+ BindingNumber binding = it.first;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
}
}
+ }
- if (!newBindingsMap.empty()) {
- transformManager.Add<tint::transform::MultiplanarExternalTexture>();
- transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
- newBindingsMap);
+ tint::transform::Manager transformManager;
+ // Many Vulkan drivers can't handle multi-entrypoint shader modules.
+ transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+ // Run the binding remapper after SingleEntryPoint to avoid collisions with unused entryPoints.
+ transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+
+ tint::transform::DataMap transformInputs;
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls),
+ /* mayCollide */ false);
+
+ // Transform external textures into the binding locations specified in the bgl
+ // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
+ tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+ ExternalTextureBindingExpansionMap expansions =
+ bgl->GetExternalTextureBindingExpansionMap();
+
+ std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+ expansions.begin();
+
+ while (it != expansions.end()) {
+ newBindingsMap[{static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
+ {static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
+ {static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
+ it++;
}
+ }
- tint::Program program;
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
- transformInputs, nullptr, nullptr));
- }
+ if (!newBindingsMap.empty()) {
+ transformManager.Add<tint::transform::MultiplanarExternalTexture>();
+ transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+ newBindingsMap);
+ }
+
+ tint::Program program;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ nullptr, nullptr));
+ }
#if TINT_BUILD_SPV_WRITER
- tint::writer::spirv::Options options;
- options.emit_vertex_point_size = true;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- options.use_zero_initialize_workgroup_memory_extension =
- GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
-
- Spirv spirv;
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
- auto result = tint::writer::spirv::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
- result.error);
-
- spirv = std::move(result.spirv);
- }
+ tint::writer::spirv::Options options;
+ options.emit_vertex_point_size = true;
+ options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ options.use_zero_initialize_workgroup_memory_extension =
+ GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
+
+ Spirv spirv;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
+ auto result = tint::writer::spirv::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
+ result.error);
+
+ spirv = std::move(result.spirv);
+ }
- DAWN_TRY(
- ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
+ DAWN_TRY(ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
- VkShaderModuleCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.codeSize = spirv.size() * sizeof(uint32_t);
- createInfo.pCode = spirv.data();
+ VkShaderModuleCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.codeSize = spirv.size() * sizeof(uint32_t);
+ createInfo.pCode = spirv.data();
- Device* device = ToBackend(GetDevice());
+ Device* device = ToBackend(GetDevice());
- VkShaderModule newHandle = VK_NULL_HANDLE;
- {
- TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
- DAWN_TRY(CheckVkSuccess(device->fn.CreateShaderModule(
- device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
- "CreateShaderModule"));
- }
- ModuleAndSpirv moduleAndSpirv;
- if (newHandle != VK_NULL_HANDLE) {
- moduleAndSpirv =
- mTransformedShaderModuleCache->AddOrGet(cacheKey, newHandle, std::move(spirv));
- }
+ VkShaderModule newHandle = VK_NULL_HANDLE;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
+ "CreateShaderModule"));
+ }
+ ModuleAndSpirv moduleAndSpirv;
+ if (newHandle != VK_NULL_HANDLE) {
+ moduleAndSpirv =
+ mTransformedShaderModuleCache->AddOrGet(cacheKey, newHandle, std::move(spirv));
+ }
- SetDebugName(ToBackend(GetDevice()), moduleAndSpirv.first, "Dawn_ShaderModule", GetLabel());
+ SetDebugName(ToBackend(GetDevice()), moduleAndSpirv.first, "Dawn_ShaderModule", GetLabel());
- return std::move(moduleAndSpirv);
+ return std::move(moduleAndSpirv);
#else
- return DAWN_INTERNAL_ERROR("TINT_BUILD_SPV_WRITER is not defined.");
+ return DAWN_INTERNAL_ERROR("TINT_BUILD_SPV_WRITER is not defined.");
#endif
- }
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h
index 8688cccc610..bd1c686b8ba 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h
@@ -15,63 +15,66 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_SHADERMODULEVK_H_
#define SRC_DAWN_NATIVE_VULKAN_SHADERMODULEVK_H_
-#include "dawn/native/ShaderModule.h"
-
-#include "dawn/common/vulkan_platform.h"
-#include "dawn/native/Error.h"
-
#include <memory>
#include <mutex>
-#include <optional>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <optional> // NOLINT(build/include_order))
+#include <unordered_map>
#include <utility>
#include <vector>
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
namespace dawn::native::vulkan {
- class Device;
- class PipelineLayout;
+class Device;
+class PipelineLayout;
- class ShaderModule final : public ShaderModuleBase {
- public:
- using Spirv = std::vector<uint32_t>;
- using ModuleAndSpirv = std::pair<VkShaderModule, const Spirv*>;
+class ShaderModule final : public ShaderModuleBase {
+ public:
+ using Spirv = std::vector<uint32_t>;
+ using ModuleAndSpirv = std::pair<VkShaderModule, const Spirv*>;
+
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+ ResultOrError<ModuleAndSpirv> GetHandleAndSpirv(const char* entryPointName,
+ const PipelineLayout* layout);
- ResultOrError<ModuleAndSpirv> GetHandleAndSpirv(const char* entryPointName,
- PipelineLayout* layout);
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+ void DestroyImpl() override;
+
+ // New handles created by GetHandleAndSpirv at pipeline creation time.
+ class ConcurrentTransformedShaderModuleCache {
+ public:
+ explicit ConcurrentTransformedShaderModuleCache(Device* device);
+ ~ConcurrentTransformedShaderModuleCache();
+
+ std::optional<ModuleAndSpirv> Find(const PipelineLayoutEntryPointPair& key);
+ ModuleAndSpirv AddOrGet(const PipelineLayoutEntryPointPair& key,
+ VkShaderModule module,
+ std::vector<uint32_t>&& spirv);
private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- void DestroyImpl() override;
-
- // New handles created by GetHandleAndSpirv at pipeline creation time.
- class ConcurrentTransformedShaderModuleCache {
- public:
- explicit ConcurrentTransformedShaderModuleCache(Device* device);
- ~ConcurrentTransformedShaderModuleCache();
-
- std::optional<ModuleAndSpirv> Find(const PipelineLayoutEntryPointPair& key);
- ModuleAndSpirv AddOrGet(const PipelineLayoutEntryPointPair& key,
- VkShaderModule module,
- std::vector<uint32_t>&& spirv);
-
- private:
- using Entry = std::pair<VkShaderModule, std::unique_ptr<Spirv>>;
-
- Device* mDevice;
- std::mutex mMutex;
- std::unordered_map<PipelineLayoutEntryPointPair,
- Entry,
- PipelineLayoutEntryPointPairHashFunc>
+ using Entry = std::pair<VkShaderModule, std::unique_ptr<Spirv>>;
+
+ Device* mDevice;
+ std::mutex mMutex;
+ std::
+ unordered_map<PipelineLayoutEntryPointPair, Entry, PipelineLayoutEntryPointPairHashFunc>
mTransformedShaderModuleCache;
- };
- std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
};
+ std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp
index 97b0c61dfc2..f5b35bd54d3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp
@@ -22,55 +22,54 @@
namespace dawn::native::vulkan {
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {}
- MaybeError StagingBuffer::Initialize() {
- VkBufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.size = GetSize();
- createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = 0;
+MaybeError StagingBuffer::Initialize() {
+ VkBufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.size = GetSize();
+ createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = 0;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
- "vkCreateBuffer"));
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
+ "vkCreateBuffer"));
- VkMemoryRequirements requirements;
- mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
- DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
- requirements, MemoryKind::LinearMappable));
+ DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
+ requirements, MemoryKind::LinearMappable));
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
- ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
- mAllocation.GetOffset()),
- "vkBindBufferMemory"));
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
+ ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
+ mAllocation.GetOffset()),
+ "vkBindBufferMemory"));
- mMappedPointer = mAllocation.GetMappedPointer();
- if (mMappedPointer == nullptr) {
- return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
- }
+ mMappedPointer = mAllocation.GetMappedPointer();
+ if (mMappedPointer == nullptr) {
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
+ }
- SetDebugName(mDevice, mBuffer, "Dawn_StagingBuffer");
+ SetDebugName(mDevice, mBuffer, "Dawn_StagingBuffer");
- return {};
- }
+ return {};
+}
- StagingBuffer::~StagingBuffer() {
- mMappedPointer = nullptr;
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
- mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
- }
+StagingBuffer::~StagingBuffer() {
+ mMappedPointer = nullptr;
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
+ mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
+}
- VkBuffer StagingBuffer::GetBufferHandle() const {
- return mBuffer;
- }
+VkBuffer StagingBuffer::GetBufferHandle() const {
+ return mBuffer;
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h
index e69634ccceb..dbd48ed2aa9 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h
@@ -21,22 +21,22 @@
namespace dawn::native::vulkan {
- class Device;
+class Device;
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
+class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
- VkBuffer GetBufferHandle() const;
+ VkBuffer GetBufferHandle() const;
- MaybeError Initialize() override;
+ MaybeError Initialize() override;
- private:
- Device* mDevice;
- VkBuffer mBuffer;
- ResourceMemoryAllocation mAllocation;
- };
+ private:
+ Device* mDevice;
+ VkBuffer mBuffer;
+ ResourceMemoryAllocation mAllocation;
+};
} // namespace dawn::native::vulkan
#endif // SRC_DAWN_NATIVE_VULKAN_STAGINGBUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp
index cd68c9a2a03..0a17bafb945 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp
@@ -14,6 +14,10 @@
#include "dawn/native/vulkan/SwapChainVk.h"
+#include <algorithm>
+#include <limits>
+#include <utility>
+
#include "dawn/common/Compiler.h"
#include "dawn/native/Instance.h"
#include "dawn/native/Surface.h"
@@ -24,678 +28,692 @@
#include "dawn/native/vulkan/TextureVk.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <algorithm>
-
#if defined(DAWN_USE_X11)
-# include "dawn/native/XlibXcbFunctions.h"
+#include "dawn/native/XlibXcbFunctions.h"
#endif // defined(DAWN_USE_X11)
namespace dawn::native::vulkan {
- // OldSwapChain
+// OldSwapChain
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
- }
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+}
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextVulkan wsiContext = {};
- im.Init(im.userData, &wsiContext);
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextVulkan wsiContext = {};
+ im.Init(im.userData, &wsiContext);
- ASSERT(im.textureUsage != WGPUTextureUsage_None);
- mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
- }
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+}
- OldSwapChain::~OldSwapChain() {
- }
-
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+OldSwapChain::~OldSwapChain() {}
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
- VkImage nativeTexture = VkImage::CreateFromHandle(image);
- return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
- .Detach();
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
}
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
- Device* device = ToBackend(GetDevice());
+ ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
+ VkImage nativeTexture = VkImage::CreateFromHandle(image);
+ return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+}
- // Perform the necessary pipeline barriers for the texture to be used with the usage
- // requested by the implementation.
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- ToBackend(view->GetTexture())
- ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+ Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->SubmitPendingCommands());
+ // Perform the necessary pipeline barriers for the texture to be used with the usage
+ // requested by the implementation.
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ ToBackend(view->GetTexture())
+ ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
- return {};
- }
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ return {};
+}
- // SwapChain
+// SwapChain
- namespace {
+namespace {
- ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
- const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
- const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
- VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
+ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
+ const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
+ const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
+ VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
- // May not be used in the platform-specific switches below.
- DAWN_UNUSED(info);
- DAWN_UNUSED(fn);
- DAWN_UNUSED(instance);
+ // May not be used in the platform-specific switches below.
+ DAWN_UNUSED(info);
+ DAWN_UNUSED(fn);
+ DAWN_UNUSED(instance);
- switch (surface->GetType()) {
+ switch (surface->GetType()) {
#if defined(DAWN_ENABLE_BACKEND_METAL)
- case Surface::Type::MetalLayer:
- if (info.HasExt(InstanceExt::MetalSurface)) {
- VkMetalSurfaceCreateInfoEXT createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.pLayer = surface->GetMetalLayer();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
- "CreateMetalSurface"));
- return vkSurface;
- }
- break;
+ case Surface::Type::MetalLayer:
+ if (info.HasExt(InstanceExt::MetalSurface)) {
+ VkMetalSurfaceCreateInfoEXT createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.pLayer = surface->GetMetalLayer();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateMetalSurface"));
+ return vkSurface;
+ }
+ break;
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_PLATFORM_WINDOWS)
- case Surface::Type::WindowsHWND:
- if (info.HasExt(InstanceExt::Win32Surface)) {
- VkWin32SurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
- createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateWin32Surface"));
- return vkSurface;
- }
- break;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_PLATFORM_ANDROID)
- case Surface::Type::AndroidWindow: {
- if (info.HasExt(InstanceExt::AndroidSurface)) {
- ASSERT(surface->GetAndroidNativeWindow() != nullptr);
-
- VkAndroidSurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.window =
- static_cast<struct ANativeWindow*>(surface->GetAndroidNativeWindow());
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateAndroidSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateAndroidSurfaceKHR"));
- return vkSurface;
- }
-
- break;
- }
-
-#endif // defined(DAWN_PLATFORM_ANDROID)
-
-#if defined(DAWN_USE_X11)
- case Surface::Type::XlibWindow: {
- if (info.HasExt(InstanceExt::XlibSurface)) {
- VkXlibSurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
- createInfo.window = surface->GetXWindow();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateXlibSurface"));
- return vkSurface;
- }
-
- // Fall back to using XCB surfaces if the Xlib extension isn't available.
- // See https://xcb.freedesktop.org/MixingCalls/ for more information about
- // interoperability between Xlib and XCB
- const XlibXcbFunctions* xlibXcb =
- adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
- ASSERT(xlibXcb != nullptr);
-
- if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
- VkXcbSurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- // The XCB connection lives as long as the X11 display.
- createInfo.connection = xlibXcb->xGetXCBConnection(
- static_cast<Display*>(surface->GetXDisplay()));
- createInfo.window = surface->GetXWindow();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateXcbSurfaceKHR"));
- return vkSurface;
- }
- break;
- }
-#endif // defined(DAWN_USE_X11)
-
- default:
- break;
+#if DAWN_PLATFORM_IS(WINDOWS)
+ case Surface::Type::WindowsHWND:
+ if (info.HasExt(InstanceExt::Win32Surface)) {
+ VkWin32SurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
+ createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateWin32Surface"));
+ return vkSurface;
+ }
+ break;
+#endif // DAWN_PLATFORM_IS(WINDOWS)
+
+#if DAWN_PLATFORM_IS(ANDROID)
+ case Surface::Type::AndroidWindow: {
+ if (info.HasExt(InstanceExt::AndroidSurface)) {
+ ASSERT(surface->GetAndroidNativeWindow() != nullptr);
+
+ VkAndroidSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.window =
+ static_cast<struct ANativeWindow*>(surface->GetAndroidNativeWindow());
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateAndroidSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateAndroidSurfaceKHR"));
+ return vkSurface;
}
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
- surface->GetType());
+ break;
}
- VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Fifo:
- return VK_PRESENT_MODE_FIFO_KHR;
- case wgpu::PresentMode::Immediate:
- return VK_PRESENT_MODE_IMMEDIATE_KHR;
- case wgpu::PresentMode::Mailbox:
- return VK_PRESENT_MODE_MAILBOX_KHR;
+#endif // DAWN_PLATFORM_IS(ANDROID)
+
+#if defined(DAWN_USE_WAYLAND)
+ case Surface::Type::WaylandSurface: {
+ if (info.HasExt(InstanceExt::XlibSurface)) {
+ VkWaylandSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.display = static_cast<struct wl_display*>(surface->GetWaylandDisplay());
+ createInfo.surface = static_cast<struct wl_surface*>(surface->GetWaylandSurface());
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateWaylandSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateWaylandSurface"));
+ return vkSurface;
}
- UNREACHABLE();
+ break;
}
+#endif // defined(DAWN_USE_WAYLAND)
- uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
- switch (mode) {
- case VK_PRESENT_MODE_FIFO_KHR:
- case VK_PRESENT_MODE_IMMEDIATE_KHR:
- return 2;
- case VK_PRESENT_MODE_MAILBOX_KHR:
- return 3;
- default:
- break;
+#if defined(DAWN_USE_X11)
+ case Surface::Type::XlibWindow: {
+ if (info.HasExt(InstanceExt::XlibSurface)) {
+ VkXlibSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
+ createInfo.window = surface->GetXWindow();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateXlibSurface"));
+ return vkSurface;
}
- UNREACHABLE();
- }
- } // anonymous namespace
+ // Fall back to using XCB surfaces if the Xlib extension isn't available.
+ // See https://xcb.freedesktop.org/MixingCalls/ for more information about
+ // interoperability between Xlib and XCB
+ const XlibXcbFunctions* xlibXcb = adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
+ ASSERT(xlibXcb != nullptr);
+
+ if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
+ VkXcbSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ // The XCB connection lives as long as the X11 display.
+ createInfo.connection =
+ xlibXcb->xGetXCBConnection(static_cast<Display*>(surface->GetXDisplay()));
+ createInfo.window = surface->GetXWindow();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateXcbSurfaceKHR"));
+ return vkSurface;
+ }
+ break;
+ }
+#endif // defined(DAWN_USE_X11)
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
+ default:
+ break;
}
- SwapChain::~SwapChain() = default;
-
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
+ surface->GetType());
+}
+
+VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Fifo:
+ return VK_PRESENT_MODE_FIFO_KHR;
+ case wgpu::PresentMode::Immediate:
+ return VK_PRESENT_MODE_IMMEDIATE_KHR;
+ case wgpu::PresentMode::Mailbox:
+ return VK_PRESENT_MODE_MAILBOX_KHR;
+ }
+ UNREACHABLE();
+}
+
+uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
+ switch (mode) {
+ case VK_PRESENT_MODE_FIFO_KHR:
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return 2;
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return 3;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // anonymous namespace
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+}
+
+SwapChain::~SwapChain() = default;
+
+void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+}
+
+// Note that when we need to re-create the swapchain because it is out of date,
+// previousSwapChain can be set to `this`.
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ Device* device = ToBackend(GetDevice());
+ Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
+
+ VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
+
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
+ // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
+
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
+ "Vulkan SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
+
+ // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+ SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+ // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
+ // Vulkan devices on different VkInstances. Probably needs to block too!
+ VkInstance previousInstance = ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
+ DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
+ "Vulkan SwapChain cannot switch between Vulkan instances.");
+
+ // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
+ // VkSurfaceKHR provided since they are on the same instance.
+ std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
+
+ // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
+ // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
+ // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
+ // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
+ // using the fenced deleter makes the code simpler).
+ std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
+ ToBackend(previousSwapChain->GetDevice())
+ ->GetFencedDeleter()
+ ->DeleteWhenUnused(previousVkSwapChain);
}
- // Note that when we need to re-create the swapchain because it is out of date,
- // previousSwapChain can be set to `this`.
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- Device* device = ToBackend(GetDevice());
- Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
-
- VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
-
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
- // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
-
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
- "Vulkan SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
-
- // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
- SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
-
- // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
- // Vulkan devices on different VkInstances. Probably needs to block too!
- VkInstance previousInstance =
- ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
- DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
- "Vulkan SwapChain cannot switch between Vulkan instances.");
-
- // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
- // VkSurfaceKHR provided since they are on the same instance.
- std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
-
- // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
- // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
- // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
- // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
- // using the fenced deleter makes the code simpler).
- std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
- ToBackend(previousSwapChain->GetDevice())
- ->GetFencedDeleter()
- ->DeleteWhenUnused(previousVkSwapChain);
- }
-
- if (mVkSurface == VK_NULL_HANDLE) {
- DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
- }
-
- VulkanSurfaceInfo surfaceInfo;
- DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
-
- DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
-
- // TODO Choose config instead of hardcoding
- VkSwapchainCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.surface = mVkSurface;
- createInfo.minImageCount = mConfig.targetImageCount;
- createInfo.imageFormat = mConfig.format;
- createInfo.imageColorSpace = mConfig.colorSpace;
- createInfo.imageExtent = mConfig.extent;
- createInfo.imageArrayLayers = 1;
- createInfo.imageUsage = mConfig.usage;
- createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.preTransform = mConfig.transform;
- createInfo.compositeAlpha = mConfig.alphaMode;
- createInfo.presentMode = mConfig.presentMode;
- createInfo.clipped = false;
- createInfo.oldSwapchain = previousVkSwapChain;
-
- DAWN_TRY(CheckVkSuccess(device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo,
- nullptr, &*mSwapChain),
- "CreateSwapChain"));
-
- // Gather the swapchain's images. Implementations are allowed to return more images than the
- // number we asked for.
- uint32_t count = 0;
- DAWN_TRY(CheckVkSuccess(
- device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
- "GetSwapChainImages1"));
-
- mSwapChainImages.resize(count);
- DAWN_TRY(CheckVkSuccess(
- device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
- AsVkArray(mSwapChainImages.data())),
- "GetSwapChainImages2"));
-
- return {};
+ if (mVkSurface == VK_NULL_HANDLE) {
+ DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
}
- ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
- const VulkanSurfaceInfo& surfaceInfo) const {
- Config config;
-
- // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
- // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
- // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
- // FIFO.
- {
- auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
- VkPresentModeKHR target) -> bool {
- return std::find(modes.begin(), modes.end(), target) != modes.end();
- };
-
- VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
- const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
- VK_PRESENT_MODE_IMMEDIATE_KHR,
- VK_PRESENT_MODE_MAILBOX_KHR,
- VK_PRESENT_MODE_FIFO_KHR,
- };
-
- // Go to the target mode.
- size_t modeIndex = 0;
- while (kPresentModeFallbacks[modeIndex] != targetMode) {
- modeIndex++;
- }
+ VulkanSurfaceInfo surfaceInfo;
+ DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
+
+ DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
+
+ // TODO(dawn:269): Choose config instead of hardcoding
+ VkSwapchainCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.surface = mVkSurface;
+ createInfo.minImageCount = mConfig.targetImageCount;
+ createInfo.imageFormat = mConfig.format;
+ createInfo.imageColorSpace = mConfig.colorSpace;
+ createInfo.imageExtent = mConfig.extent;
+ createInfo.imageArrayLayers = 1;
+ createInfo.imageUsage = mConfig.usage;
+ createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = nullptr;
+ createInfo.preTransform = mConfig.transform;
+ createInfo.compositeAlpha = mConfig.alphaMode;
+ createInfo.presentMode = mConfig.presentMode;
+ createInfo.clipped = false;
+ createInfo.oldSwapchain = previousVkSwapChain;
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo, nullptr, &*mSwapChain),
+ "CreateSwapChain"));
+
+ // Gather the swapchain's images. Implementations are allowed to return more images than the
+ // number we asked for.
+ uint32_t count = 0;
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
+ "GetSwapChainImages1"));
+
+ mSwapChainImages.resize(count);
+ DAWN_TRY(
+ CheckVkSuccess(device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
+ AsVkArray(mSwapChainImages.data())),
+ "GetSwapChainImages2"));
+
+ return {};
+}
+
+ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
+ const VulkanSurfaceInfo& surfaceInfo) const {
+ Config config;
+
+ // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
+ // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
+ // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
+ // FIFO.
+ {
+ auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
+ VkPresentModeKHR target) -> bool {
+ return std::find(modes.begin(), modes.end(), target) != modes.end();
+ };
- // Find the first available fallback.
- while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
- modeIndex++;
- }
+ VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
+ const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
+ VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+ };
- ASSERT(modeIndex < kPresentModeFallbacks.size());
- config.presentMode = kPresentModeFallbacks[modeIndex];
+ // Go to the target mode.
+ size_t modeIndex = 0;
+ while (kPresentModeFallbacks[modeIndex] != targetMode) {
+ modeIndex++;
}
- // Choose the target width or do a blit.
- if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
- GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
- GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
- GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
- config.needsBlit = true;
- } else {
- config.extent.width = GetWidth();
- config.extent.height = GetHeight();
+ // Find the first available fallback.
+ while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
+ modeIndex++;
}
- // Choose the target usage or do a blit.
- VkImageUsageFlags targetUsages =
- VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
- VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
- if (!IsSubset(targetUsages, supportedUsages)) {
- config.needsBlit = true;
- } else {
- config.usage = targetUsages;
- config.wgpuUsage = GetUsage();
- }
+ ASSERT(modeIndex < kPresentModeFallbacks.size());
+ config.presentMode = kPresentModeFallbacks[modeIndex];
+ }
- // Only support BGRA8Unorm (and RGBA8Unorm on android) with SRGB color space for now.
- config.wgpuFormat = GetFormat();
- config.format = VulkanImageFormat(ToBackend(GetDevice()), config.wgpuFormat);
- config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ // Choose the target width or do a blit.
+ if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
+ GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
+ GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
+ GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
+ config.needsBlit = true;
+ } else {
+ config.extent.width = GetWidth();
+ config.extent.height = GetHeight();
+ }
- bool formatIsSupported = false;
- for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
- if (format.format == config.format && format.colorSpace == config.colorSpace) {
- formatIsSupported = true;
- break;
- }
- }
- if (!formatIsSupported) {
- return DAWN_INTERNAL_ERROR(absl::StrFormat(
- "Vulkan SwapChain must support %s with sRGB colorspace.", config.wgpuFormat));
+ // Choose the target usage or do a blit.
+ VkImageUsageFlags targetUsages =
+ VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
+ VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
+ if (!IsSubset(targetUsages, supportedUsages)) {
+ config.needsBlit = true;
+ } else {
+ config.usage = targetUsages;
+ config.wgpuUsage = GetUsage();
+ }
+
+ // Only support BGRA8Unorm (and RGBA8Unorm on android) with SRGB color space for now.
+ config.wgpuFormat = GetFormat();
+ config.format = VulkanImageFormat(ToBackend(GetDevice()), config.wgpuFormat);
+ config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+
+ bool formatIsSupported = false;
+ for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
+ if (format.format == config.format && format.colorSpace == config.colorSpace) {
+ formatIsSupported = true;
+ break;
}
+ }
+ if (!formatIsSupported) {
+ return DAWN_INTERNAL_ERROR(absl::StrFormat(
+ "Vulkan SwapChain must support %s with sRGB colorspace.", config.wgpuFormat));
+ }
- // Only the identity transform with opaque alpha is supported for now.
- DAWN_INVALID_IF((surfaceInfo.capabilities.supportedTransforms &
- VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
- "Vulkan SwapChain must support the identity transform.");
+ // Only the identity transform with opaque alpha is supported for now.
+ DAWN_INVALID_IF(
+ (surfaceInfo.capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
+ "Vulkan SwapChain must support the identity transform.");
- config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
- config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-#if !defined(DAWN_PLATFORM_ANDROID)
- DAWN_INVALID_IF((surfaceInfo.capabilities.supportedCompositeAlpha &
- VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
- "Vulkan SwapChain must support opaque alpha.");
+ config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+#if !DAWN_PLATFORM_IS(ANDROID)
+ DAWN_INVALID_IF(
+ (surfaceInfo.capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
+ "Vulkan SwapChain must support opaque alpha.");
#else
- // TODO(dawn:286): investigate composite alpha for WebGPU native
- VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
- VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
- VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
- VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
- VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
- };
- for (uint32_t i = 0; i < 4; i++) {
- if (surfaceInfo.capabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
- config.alphaMode = compositeAlphaFlags[i];
- break;
- }
+ // TODO(dawn:286): investigate composite alpha for WebGPU native
+ VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+ };
+ for (uint32_t i = 0; i < 4; i++) {
+ if (surfaceInfo.capabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
+ config.alphaMode = compositeAlphaFlags[i];
+ break;
}
-#endif // #if !defined(DAWN_PLATFORM_ANDROID)
+ }
+#endif // #if !DAWN_PLATFORM_IS(ANDROID)
- // Choose the number of images for the swapchain= and clamp it to the min and max from the
- // surface capabilities. maxImageCount = 0 means there is no limit.
- ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
- surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
- uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
+ // Choose the number of images for the swapchain= and clamp it to the min and max from the
+ // surface capabilities. maxImageCount = 0 means there is no limit.
+ ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
+ surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
+ uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
- targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
- if (surfaceInfo.capabilities.maxImageCount != 0) {
- targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
- }
+ targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
+ if (surfaceInfo.capabilities.maxImageCount != 0) {
+ targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
+ }
- config.targetImageCount = targetCount;
-
- // Choose a valid config for the swapchain texture that will receive the blit.
- if (config.needsBlit) {
- // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
- // the case it is very likely that the target extent works, but clamp it just in case.
- // Using the target extent for the blit is better when possible so that texels don't
- // get stretched. This case is exposed by having the special "-1" value in both
- // dimensions of the extent.
- constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
- if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
- surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
- // extent = clamp(targetExtent, minExtent, maxExtent)
- config.extent.width = GetWidth();
- config.extent.width =
- std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
- config.extent.width =
- std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
-
- config.extent.height = GetHeight();
- config.extent.height =
- std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
- config.extent.height =
- std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
- } else {
- // If it is not an adaptable swapchain, just use the current extent for the blit
- // texture.
- config.extent = surfaceInfo.capabilities.currentExtent;
- }
+ config.targetImageCount = targetCount;
+
+ // Choose a valid config for the swapchain texture that will receive the blit.
+ if (config.needsBlit) {
+ // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
+ // the case it is very likely that the target extent works, but clamp it just in case.
+ // Using the target extent for the blit is better when possible so that texels don't
+ // get stretched. This case is exposed by having the special "-1" value in both
+ // dimensions of the extent.
+ constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
+ if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
+ surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
+ // extent = clamp(targetExtent, minExtent, maxExtent)
+ config.extent.width = GetWidth();
+ config.extent.width =
+ std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
+ config.extent.width =
+ std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
- // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
- // then we'll need to have a second fallback that uses a blit shader :(
- if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
- return DAWN_INTERNAL_ERROR(
- "SwapChain cannot fallback to a blit because of a missing "
- "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
- }
- config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- config.wgpuUsage = wgpu::TextureUsage::CopyDst;
+ config.extent.height = GetHeight();
+ config.extent.height =
+ std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
+ config.extent.height =
+ std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
+ } else {
+ // If it is not an adaptable swapchain, just use the current extent for the blit
+ // texture.
+ config.extent = surfaceInfo.capabilities.currentExtent;
}
- return config;
+ // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
+ // then we'll need to have a second fallback that uses a blit shader :(
+ if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
+ return DAWN_INTERNAL_ERROR(
+ "SwapChain cannot fallback to a blit because of a missing "
+ "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+ }
+ config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ config.wgpuUsage = wgpu::TextureUsage::CopyDst;
}
- MaybeError SwapChain::PresentImpl() {
- Device* device = ToBackend(GetDevice());
-
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-
- if (mConfig.needsBlit) {
- // TODO ditto same as present below: eagerly transition the blit texture to CopySrc.
- mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
- mBlitTexture->GetAllSubresources());
- mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
- mTexture->GetAllSubresources());
-
- VkImageBlit region;
- region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
- region.srcSubresource.mipLevel = 0;
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- region.srcOffsets[0] = {0, 0, 0};
- region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
- static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
-
- region.dstSubresource = region.srcSubresource;
- region.dstOffsets[0] = {0, 0, 0};
- region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
- static_cast<int32_t>(mTexture->GetHeight()), 1};
-
- device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
- mBlitTexture->GetCurrentLayoutForSwapChain(),
- mTexture->GetHandle(), mTexture->GetCurrentLayoutForSwapChain(),
- 1, &region, VK_FILTER_LINEAR);
-
- // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
- // instead of creating a new one every time. This will involve "un-destroying" the
- // texture or making the blit texture "external".
- mBlitTexture->APIDestroy();
- mBlitTexture = nullptr;
- }
+ return config;
+}
- // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
- // presentable texture to present at the end of submits that use them and ideally even
- // folding that in the free layout transition at the end of render passes.
- mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
- mTexture->GetAllSubresources());
+MaybeError SwapChain::PresentImpl() {
+ Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->SubmitPendingCommands());
-
- // Assuming that the present queue is the same as the graphics queue, the proper
- // synchronization has already been done on the queue so we don't need to wait on any
- // semaphores.
- // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
- VkPresentInfoKHR presentInfo;
- presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
- presentInfo.pNext = nullptr;
- presentInfo.waitSemaphoreCount = 0;
- presentInfo.pWaitSemaphores = nullptr;
- presentInfo.swapchainCount = 1;
- presentInfo.pSwapchains = &*mSwapChain;
- presentInfo.pImageIndices = &mLastImageIndex;
- presentInfo.pResults = nullptr;
-
- // Free the texture before present so error handling doesn't skip that step.
- mTexture->APIDestroy();
- mTexture = nullptr;
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- VkResult result =
- VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
-
- switch (result) {
- case VK_SUCCESS:
- // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
- // exactly, but can still be used to present to the surface successfully", so we
- // can also treat it as a "success" error code of vkQueuePresentKHR().
- case VK_SUBOPTIMAL_KHR:
- return {};
-
- // This present cannot be recovered. Re-initialize the VkSwapchain so that future
- // presents work..
- case VK_ERROR_OUT_OF_DATE_KHR:
- return Initialize(this);
-
- // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
- case VK_ERROR_SURFACE_LOST_KHR:
- default:
- return CheckVkSuccess(::VkResult(result), "QueuePresent");
- }
- }
+ if (mConfig.needsBlit) {
+ // TODO(dawn:269): ditto same as present below: eagerly transition the blit texture to
+ // CopySrc.
+ mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+ mBlitTexture->GetAllSubresources());
+ mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+ mTexture->GetAllSubresources());
- ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
- return GetCurrentTextureViewInternal();
+ VkImageBlit region;
+ region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ region.srcSubresource.mipLevel = 0;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffsets[0] = {0, 0, 0};
+ region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
+ static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
+
+ region.dstSubresource = region.srcSubresource;
+ region.dstOffsets[0] = {0, 0, 0};
+ region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
+ static_cast<int32_t>(mTexture->GetHeight()), 1};
+
+ device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
+ mBlitTexture->GetCurrentLayoutForSwapChain(), mTexture->GetHandle(),
+ mTexture->GetCurrentLayoutForSwapChain(), 1, &region,
+ VK_FILTER_LINEAR);
+
+ // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
+ // instead of creating a new one every time. This will involve "un-destroying" the
+ // texture or making the blit texture "external".
+ mBlitTexture->APIDestroy();
+ mBlitTexture = nullptr;
}
- ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
- Device* device = ToBackend(GetDevice());
-
- // Transiently create a semaphore that will be signaled when the presentation engine is done
- // with the swapchain image. Further operations on the image will wait for this semaphore.
- VkSemaphoreCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
- "CreateSemaphore"));
-
- VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
- device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
- VkFence{}, &mLastImageIndex));
-
- if (result == VK_SUCCESS) {
- // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
- // used instead of directly on the recording context?
- device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
- } else {
- // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
- // we get a chance.
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
- }
+ // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+ // presentable texture to present at the end of submits that use them and ideally even
+ // folding that in the free layout transition at the end of render passes.
+ mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
+ mTexture->GetAllSubresources());
+
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ // Assuming that the present queue is the same as the graphics queue, the proper
+ // synchronization has already been done on the queue so we don't need to wait on any
+ // semaphores.
+ // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
+ VkPresentInfoKHR presentInfo;
+ presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ presentInfo.pNext = nullptr;
+ presentInfo.waitSemaphoreCount = 0;
+ presentInfo.pWaitSemaphores = nullptr;
+ presentInfo.swapchainCount = 1;
+ presentInfo.pSwapchains = &*mSwapChain;
+ presentInfo.pImageIndices = &mLastImageIndex;
+ presentInfo.pResults = nullptr;
+
+ // Free the texture before present so error handling doesn't skip that step.
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+
+ VkResult result =
+ VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
+
+ switch (result) {
+ case VK_SUCCESS:
+ // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
+ // exactly, but can still be used to present to the surface successfully", so we
+ // can also treat it as a "success" error code of vkQueuePresentKHR().
+ case VK_SUBOPTIMAL_KHR:
+ return {};
+
+ // This present cannot be recovered. Re-initialize the VkSwapchain so that future
+ // presents work..
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ return Initialize(this);
+
+ // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+ case VK_ERROR_SURFACE_LOST_KHR:
+ default:
+ return CheckVkSuccess(::VkResult(result), "QueuePresent");
+ }
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+ return GetCurrentTextureViewInternal();
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
+ Device* device = ToBackend(GetDevice());
+
+ // Transiently create a semaphore that will be signaled when the presentation engine is done
+ // with the swapchain image. Further operations on the image will wait for this semaphore.
+ VkSemaphoreCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
+ "CreateSemaphore"));
+
+ VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
+ device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
+ VkFence{}, &mLastImageIndex));
+
+ if (result == VK_SUCCESS) {
+ // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
+ // used instead of directly on the recording context?
+ device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+ } else {
+ // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
+ // we get a chance.
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
+ }
- switch (result) {
- // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
- // the swapchain is in a suboptimal state?
- case VK_SUBOPTIMAL_KHR:
- case VK_SUCCESS:
- break;
-
- case VK_ERROR_OUT_OF_DATE_KHR: {
- // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
- // swapchains always return that they are out of date.
- if (isReentrant) {
- // TODO(crbug.com/dawn/269): Allow losing the surface instead?
- return DAWN_INTERNAL_ERROR(
- "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
- }
-
- // Re-initialize the VkSwapchain and try getting the texture again.
- DAWN_TRY(Initialize(this));
- return GetCurrentTextureViewInternal(true);
+ switch (result) {
+ // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
+ // the swapchain is in a suboptimal state?
+ case VK_SUBOPTIMAL_KHR:
+ case VK_SUCCESS:
+ break;
+
+ case VK_ERROR_OUT_OF_DATE_KHR: {
+ // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
+ // swapchains always return that they are out of date.
+ if (isReentrant) {
+ // TODO(crbug.com/dawn/269): Allow losing the surface instead?
+ return DAWN_INTERNAL_ERROR(
+ "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
}
- // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
- case VK_ERROR_SURFACE_LOST_KHR:
- default:
- DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
+ // Re-initialize the VkSwapchain and try getting the texture again.
+ DAWN_TRY(Initialize(this));
+ return GetCurrentTextureViewInternal(true);
}
- TextureDescriptor textureDesc;
- textureDesc.size.width = mConfig.extent.width;
- textureDesc.size.height = mConfig.extent.height;
- textureDesc.format = mConfig.wgpuFormat;
- textureDesc.usage = mConfig.wgpuUsage;
+ // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+ case VK_ERROR_SURFACE_LOST_KHR:
+ default:
+ DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
+ }
- VkImage currentImage = mSwapChainImages[mLastImageIndex];
- mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
+ TextureDescriptor textureDesc;
+ textureDesc.size.width = mConfig.extent.width;
+ textureDesc.size.height = mConfig.extent.height;
+ textureDesc.format = mConfig.wgpuFormat;
+ textureDesc.usage = mConfig.wgpuUsage;
- // In the happy path we can use the swapchain image directly.
- if (!mConfig.needsBlit) {
- return mTexture->CreateView();
- }
+ VkImage currentImage = mSwapChainImages[mLastImageIndex];
+ mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
- // The blit texture always perfectly matches what the user requested for the swapchain.
- // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
- TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
- DAWN_TRY_ASSIGN(mBlitTexture,
- Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
- return mBlitTexture->CreateView();
+ // In the happy path we can use the swapchain image directly.
+ if (!mConfig.needsBlit) {
+ return mTexture->CreateView();
}
- void SwapChain::DetachFromSurfaceImpl() {
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
- }
+ // The blit texture always perfectly matches what the user requested for the swapchain.
+ // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
+ TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
+ DAWN_TRY_ASSIGN(mBlitTexture, Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
+ return mBlitTexture->CreateView();
+}
- if (mBlitTexture != nullptr) {
- mBlitTexture->APIDestroy();
- mBlitTexture = nullptr;
- }
+void SwapChain::DetachFromSurfaceImpl() {
+ if (mTexture != nullptr) {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+ }
- // The swapchain images are destroyed with the swapchain.
- if (mSwapChain != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
- mSwapChain = VK_NULL_HANDLE;
- }
+ if (mBlitTexture != nullptr) {
+ mBlitTexture->APIDestroy();
+ mBlitTexture = nullptr;
+ }
- if (mVkSurface != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
- mVkSurface = VK_NULL_HANDLE;
- }
+ // The swapchain images are destroyed with the swapchain.
+ if (mSwapChain != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+ mSwapChain = VK_NULL_HANDLE;
+ }
+
+ if (mVkSurface != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
+ mVkSurface = VK_NULL_HANDLE;
}
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h
index a3503ce4915..7163de71ef0 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h
@@ -15,83 +15,83 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_SWAPCHAINVK_H_
#define SRC_DAWN_NATIVE_VULKAN_SWAPCHAINVK_H_
+#include <vector>
+
#include "dawn/native/SwapChain.h"
#include "dawn/common/vulkan_platform.h"
-#include <vector>
-
namespace dawn::native::vulkan {
- class Device;
- class Texture;
- struct VulkanSurfaceInfo;
-
- class OldSwapChain : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+class Device;
+class Texture;
+struct VulkanSurfaceInfo;
+
+class OldSwapChain : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* texture) override;
+
+ private:
+ wgpu::TextureUsage mTextureUsage;
+};
+
+class SwapChain : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+ void DestroyImpl() override;
+
+ struct Config {
+ // Information that's passed to vulkan swapchain creation.
+ VkPresentModeKHR presentMode;
+ VkExtent2D extent;
+ VkImageUsageFlags usage;
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+ uint32_t targetImageCount;
+ VkSurfaceTransformFlagBitsKHR transform;
+ VkCompositeAlphaFlagBitsKHR alphaMode;
+
+ // Redundant information but as WebGPU enums to create the wgpu::Texture that
+ // encapsulates the native swapchain texture.
+ wgpu::TextureUsage wgpuUsage;
+ wgpu::TextureFormat wgpuFormat;
+
+ // Information about the blit workarounds we need to do (if any)
+ bool needsBlit = false;
+ };
+ ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewInternal(bool isReentrant = false);
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
+ // NewSwapChainBase implementation
+ MaybeError PresentImpl() override;
+ ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* texture) override;
+ Config mConfig;
- private:
- wgpu::TextureUsage mTextureUsage;
- };
+ VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
+ VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+ std::vector<VkImage> mSwapChainImages;
+ uint32_t mLastImageIndex = 0;
- class SwapChain : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
- void DestroyImpl() override;
-
- struct Config {
- // Information that's passed to vulkan swapchain creation.
- VkPresentModeKHR presentMode;
- VkExtent2D extent;
- VkImageUsageFlags usage;
- VkFormat format;
- VkColorSpaceKHR colorSpace;
- uint32_t targetImageCount;
- VkSurfaceTransformFlagBitsKHR transform;
- VkCompositeAlphaFlagBitsKHR alphaMode;
-
- // Redundant information but as WebGPU enums to create the wgpu::Texture that
- // encapsulates the native swapchain texture.
- wgpu::TextureUsage wgpuUsage;
- wgpu::TextureFormat wgpuFormat;
-
- // Information about the blit workarounds we need to do (if any)
- bool needsBlit = false;
- };
- ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewInternal(bool isReentrant = false);
-
- // NewSwapChainBase implementation
- MaybeError PresentImpl() override;
- ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
-
- Config mConfig;
-
- VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
- VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
- std::vector<VkImage> mSwapChainImages;
- uint32_t mLastImageIndex = 0;
-
- Ref<Texture> mBlitTexture;
- Ref<Texture> mTexture;
- };
+ Ref<Texture> mBlitTexture;
+ Ref<Texture> mTexture;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp
index 06dd6887b9c..404815dca5c 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/vulkan/TextureVk.h"
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Math.h"
#include "dawn/native/DynamicUploader.h"
@@ -33,1396 +35,1380 @@
namespace dawn::native::vulkan {
- namespace {
- // Converts an Dawn texture dimension to a Vulkan image view type.
- // Contrary to image types, image view types include arrayness and cubemapness
- VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e1D:
- return VK_IMAGE_VIEW_TYPE_1D;
- case wgpu::TextureViewDimension::e2D:
- return VK_IMAGE_VIEW_TYPE_2D;
- case wgpu::TextureViewDimension::e2DArray:
- return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
- case wgpu::TextureViewDimension::Cube:
- return VK_IMAGE_VIEW_TYPE_CUBE;
- case wgpu::TextureViewDimension::CubeArray:
- return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
- case wgpu::TextureViewDimension::e3D:
- return VK_IMAGE_VIEW_TYPE_3D;
-
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
-
- // Computes which vulkan access type could be required for the given Dawn usage.
- // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
- // the previous usage is readonly because an execution dependency is sufficient.
- VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
- VkAccessFlags flags = 0;
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- flags |= VK_ACCESS_TRANSFER_READ_BIT;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- flags |= VK_ACCESS_SHADER_READ_BIT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
- } else {
- flags |=
- VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- }
- }
- if (usage & kReadOnlyRenderAttachment) {
- flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
- }
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- // The Vulkan spec has the following note:
- //
- // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
- // processing, or perform any visibility operations (as vkQueuePresentKHR performs
- // automatic visibility operations). To achieve this, the dstAccessMask member of
- // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
- // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
- //
- // So on the transition to Present we don't need an access flag. The other
- // direction doesn't matter because swapchain textures always start a new frame
- // as uninitialized.
- flags |= 0;
- }
-
- return flags;
- }
-
- // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
- VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
- VkPipelineStageFlags flags = 0;
-
- if (usage == wgpu::TextureUsage::None) {
- // This only happens when a texture is initially created (and for srcAccessMask) in
- // which case there is no need to wait on anything to stop accessing this texture.
- return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
- }
- if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
- // introducing FS -> VS dependencies that would prevent parallelization on tiler
- // GPUs
- flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |=
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
- VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
- } else {
- flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
- }
- }
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- // The Vulkan spec has the following note:
- //
- // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
- // processing, or perform any visibility operations (as vkQueuePresentKHR performs
- // automatic visibility operations). To achieve this, the dstAccessMask member of
- // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
- // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
- //
- // So on the transition to Present we use the "bottom of pipe" stage. The other
- // direction doesn't matter because swapchain textures always start a new frame
- // as uninitialized.
- flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
- }
+namespace {
+// Converts an Dawn texture dimension to a Vulkan image view type.
+// Contrary to image types, image view types include arrayness and cubemapness
+VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ return VK_IMAGE_VIEW_TYPE_1D;
+ case wgpu::TextureViewDimension::e2D:
+ return VK_IMAGE_VIEW_TYPE_2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case wgpu::TextureViewDimension::Cube:
+ return VK_IMAGE_VIEW_TYPE_CUBE;
+ case wgpu::TextureViewDimension::CubeArray:
+ return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ case wgpu::TextureViewDimension::e3D:
+ return VK_IMAGE_VIEW_TYPE_3D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+}
- // A zero value isn't a valid pipeline stage mask
- ASSERT(flags != 0);
- return flags;
- }
+// Computes which vulkan access type could be required for the given Dawn usage.
+// TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
+// the previous usage is readonly because an execution dependency is sufficient.
+VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
+ VkAccessFlags flags = 0;
- VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
- wgpu::TextureUsage lastUsage,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- VkImageMemoryBarrier barrier;
- barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- barrier.pNext = nullptr;
- barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
- barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
- barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
- barrier.newLayout = VulkanImageLayout(texture, usage);
- barrier.image = texture->GetHandle();
- barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
- barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
- barrier.subresourceRange.levelCount = range.levelCount;
- barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
- barrier.subresourceRange.layerCount = range.layerCount;
-
- barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- return barrier;
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ flags |= VK_ACCESS_TRANSFER_READ_BIT;
+ }
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else {
+ flags |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
+ }
+ if (usage & kReadOnlyRenderAttachment) {
+ flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+ }
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ // The Vulkan spec has the following note:
+ //
+ // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+ // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+ // processing, or perform any visibility operations (as vkQueuePresentKHR performs
+ // automatic visibility operations). To achieve this, the dstAccessMask member of
+ // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+ // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+ //
+ // So on the transition to Present we don't need an access flag. The other
+ // direction doesn't matter because swapchain textures always start a new frame
+ // as uninitialized.
+ flags |= 0;
+ }
- void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
- const Extent3D& size = texture.GetSize();
-
- info->mipLevels = texture.GetNumMipLevels();
- info->samples = VulkanSampleCount(texture.GetSampleCount());
-
- // Fill in the image type, and paper over differences in how the array layer count is
- // specified between WebGPU and Vulkan.
- switch (texture.GetDimension()) {
- case wgpu::TextureDimension::e1D:
- info->imageType = VK_IMAGE_TYPE_1D;
- info->extent = {size.width, 1, 1};
- info->arrayLayers = 1;
- break;
-
- case wgpu::TextureDimension::e2D:
- info->imageType = VK_IMAGE_TYPE_2D;
- info->extent = {size.width, size.height, 1};
- info->arrayLayers = size.depthOrArrayLayers;
- break;
-
- case wgpu::TextureDimension::e3D:
- info->imageType = VK_IMAGE_TYPE_3D;
- info->extent = {size.width, size.height, size.depthOrArrayLayers};
- info->arrayLayers = 1;
- break;
- }
- }
+ return flags;
+}
- } // namespace
-
- // Converts Dawn texture format to Vulkan formats.
- VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return VK_FORMAT_R8_UNORM;
- case wgpu::TextureFormat::R8Snorm:
- return VK_FORMAT_R8_SNORM;
- case wgpu::TextureFormat::R8Uint:
- return VK_FORMAT_R8_UINT;
- case wgpu::TextureFormat::R8Sint:
- return VK_FORMAT_R8_SINT;
-
- case wgpu::TextureFormat::R16Uint:
- return VK_FORMAT_R16_UINT;
- case wgpu::TextureFormat::R16Sint:
- return VK_FORMAT_R16_SINT;
- case wgpu::TextureFormat::R16Float:
- return VK_FORMAT_R16_SFLOAT;
- case wgpu::TextureFormat::RG8Unorm:
- return VK_FORMAT_R8G8_UNORM;
- case wgpu::TextureFormat::RG8Snorm:
- return VK_FORMAT_R8G8_SNORM;
- case wgpu::TextureFormat::RG8Uint:
- return VK_FORMAT_R8G8_UINT;
- case wgpu::TextureFormat::RG8Sint:
- return VK_FORMAT_R8G8_SINT;
-
- case wgpu::TextureFormat::R32Uint:
- return VK_FORMAT_R32_UINT;
- case wgpu::TextureFormat::R32Sint:
- return VK_FORMAT_R32_SINT;
- case wgpu::TextureFormat::R32Float:
- return VK_FORMAT_R32_SFLOAT;
- case wgpu::TextureFormat::RG16Uint:
- return VK_FORMAT_R16G16_UINT;
- case wgpu::TextureFormat::RG16Sint:
- return VK_FORMAT_R16G16_SINT;
- case wgpu::TextureFormat::RG16Float:
- return VK_FORMAT_R16G16_SFLOAT;
- case wgpu::TextureFormat::RGBA8Unorm:
- return VK_FORMAT_R8G8B8A8_UNORM;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return VK_FORMAT_R8G8B8A8_SRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return VK_FORMAT_R8G8B8A8_SNORM;
- case wgpu::TextureFormat::RGBA8Uint:
- return VK_FORMAT_R8G8B8A8_UINT;
- case wgpu::TextureFormat::RGBA8Sint:
- return VK_FORMAT_R8G8B8A8_SINT;
- case wgpu::TextureFormat::BGRA8Unorm:
- return VK_FORMAT_B8G8R8A8_UNORM;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return VK_FORMAT_B8G8R8A8_SRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
-
- case wgpu::TextureFormat::RG32Uint:
- return VK_FORMAT_R32G32_UINT;
- case wgpu::TextureFormat::RG32Sint:
- return VK_FORMAT_R32G32_SINT;
- case wgpu::TextureFormat::RG32Float:
- return VK_FORMAT_R32G32_SFLOAT;
- case wgpu::TextureFormat::RGBA16Uint:
- return VK_FORMAT_R16G16B16A16_UINT;
- case wgpu::TextureFormat::RGBA16Sint:
- return VK_FORMAT_R16G16B16A16_SINT;
- case wgpu::TextureFormat::RGBA16Float:
- return VK_FORMAT_R16G16B16A16_SFLOAT;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return VK_FORMAT_R32G32B32A32_UINT;
- case wgpu::TextureFormat::RGBA32Sint:
- return VK_FORMAT_R32G32B32A32_SINT;
- case wgpu::TextureFormat::RGBA32Float:
- return VK_FORMAT_R32G32B32A32_SFLOAT;
-
- case wgpu::TextureFormat::Depth16Unorm:
- return VK_FORMAT_D16_UNORM;
- case wgpu::TextureFormat::Depth32Float:
- return VK_FORMAT_D32_SFLOAT;
- case wgpu::TextureFormat::Depth24Plus:
- return VK_FORMAT_D32_SFLOAT;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- // Depth24PlusStencil8 maps to either of these two formats because only requires
- // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
- // the environment, default to using D32S8, and availability information so we know
- // that the format is available.
- if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
- return VK_FORMAT_D32_SFLOAT_S8_UINT;
- } else {
- return VK_FORMAT_D24_UNORM_S8_UINT;
- }
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return VK_FORMAT_D24_UNORM_S8_UINT;
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return VK_FORMAT_D32_SFLOAT_S8_UINT;
- case wgpu::TextureFormat::Stencil8:
- // Try to use the stencil8 format if possible, otherwise use whatever format we can
- // use that contains a stencil8 component.
- if (device->IsToggleEnabled(Toggle::VulkanUseS8)) {
- return VK_FORMAT_S8_UINT;
- } else {
- return VulkanImageFormat(device, wgpu::TextureFormat::Depth24PlusStencil8);
- }
+// Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
+VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
+ VkPipelineStageFlags flags = 0;
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return VK_FORMAT_BC2_UNORM_BLOCK;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return VK_FORMAT_BC2_SRGB_BLOCK;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return VK_FORMAT_BC3_UNORM_BLOCK;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return VK_FORMAT_BC3_SRGB_BLOCK;
- case wgpu::TextureFormat::BC4RSnorm:
- return VK_FORMAT_BC4_SNORM_BLOCK;
- case wgpu::TextureFormat::BC4RUnorm:
- return VK_FORMAT_BC4_UNORM_BLOCK;
- case wgpu::TextureFormat::BC5RGSnorm:
- return VK_FORMAT_BC5_SNORM_BLOCK;
- case wgpu::TextureFormat::BC5RGUnorm:
- return VK_FORMAT_BC5_UNORM_BLOCK;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return VK_FORMAT_BC6H_SFLOAT_BLOCK;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return VK_FORMAT_BC6H_UFLOAT_BLOCK;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return VK_FORMAT_BC7_UNORM_BLOCK;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return VK_FORMAT_BC7_SRGB_BLOCK;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
- case wgpu::TextureFormat::EACR11Unorm:
- return VK_FORMAT_EAC_R11_UNORM_BLOCK;
- case wgpu::TextureFormat::EACR11Snorm:
- return VK_FORMAT_EAC_R11_SNORM_BLOCK;
- case wgpu::TextureFormat::EACRG11Unorm:
- return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
- case wgpu::TextureFormat::EACRG11Snorm:
- return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC5x4Unorm:
- return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC5x5Unorm:
- return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC6x5Unorm:
- return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC6x6Unorm:
- return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x5Unorm:
- return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x6Unorm:
- return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x8Unorm:
- return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x5Unorm:
- return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x6Unorm:
- return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x8Unorm:
- return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x10Unorm:
- return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC12x10Unorm:
- return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC12x12Unorm:
- return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
-
- case wgpu::TextureFormat::Undefined:
- break;
+ if (usage == wgpu::TextureUsage::None) {
+ // This only happens when a texture is initially created (and for srcAccessMask) in
+ // which case there is no need to wait on anything to stop accessing this texture.
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ }
+ if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
+ // introducing FS -> VS dependencies that would prevent parallelization on tiler
+ // GPUs
+ flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ } else {
+ flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
- UNREACHABLE();
+ }
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ // The Vulkan spec has the following note:
+ //
+ // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+ // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+ // processing, or perform any visibility operations (as vkQueuePresentKHR performs
+ // automatic visibility operations). To achieve this, the dstAccessMask member of
+ // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+ // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+ //
+ // So on the transition to Present we use the "bottom of pipe" stage. The other
+ // direction doesn't matter because swapchain textures always start a new frame
+ // as uninitialized.
+ flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
}
- // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
- // between color and depth attachment usages.
- VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
- VkImageUsageFlags flags = 0;
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
- // If the sampled texture is a depth/stencil texture, its image layout will be set
- // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
- // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
- if (format.HasDepthOrStencil() && format.isRenderable) {
- flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ // A zero value isn't a valid pipeline stage mask
+ ASSERT(flags != 0);
+ return flags;
+}
+
+VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
+ wgpu::TextureUsage lastUsage,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ VkImageMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
+ barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
+ barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
+ barrier.newLayout = VulkanImageLayout(texture, usage);
+ barrier.image = texture->GetHandle();
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
+ barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
+ barrier.subresourceRange.levelCount = range.levelCount;
+ barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
+ barrier.subresourceRange.layerCount = range.layerCount;
+
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ return barrier;
+}
+
+void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
+ const Extent3D& size = texture.GetSize();
+
+ info->mipLevels = texture.GetNumMipLevels();
+ info->samples = VulkanSampleCount(texture.GetSampleCount());
+
+ // Fill in the image type, and paper over differences in how the array layer count is
+ // specified between WebGPU and Vulkan.
+ switch (texture.GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ info->imageType = VK_IMAGE_TYPE_1D;
+ info->extent = {size.width, 1, 1};
+ info->arrayLayers = 1;
+ break;
+
+ case wgpu::TextureDimension::e2D:
+ info->imageType = VK_IMAGE_TYPE_2D;
+ info->extent = {size.width, size.height, 1};
+ info->arrayLayers = size.depthOrArrayLayers;
+ break;
+
+ case wgpu::TextureDimension::e3D:
+ info->imageType = VK_IMAGE_TYPE_3D;
+ info->extent = {size.width, size.height, size.depthOrArrayLayers};
+ info->arrayLayers = 1;
+ break;
+ }
+}
+
+} // namespace
+
+// Converts Dawn texture format to Vulkan formats.
+VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return VK_FORMAT_R8_UNORM;
+ case wgpu::TextureFormat::R8Snorm:
+ return VK_FORMAT_R8_SNORM;
+ case wgpu::TextureFormat::R8Uint:
+ return VK_FORMAT_R8_UINT;
+ case wgpu::TextureFormat::R8Sint:
+ return VK_FORMAT_R8_SINT;
+
+ case wgpu::TextureFormat::R16Uint:
+ return VK_FORMAT_R16_UINT;
+ case wgpu::TextureFormat::R16Sint:
+ return VK_FORMAT_R16_SINT;
+ case wgpu::TextureFormat::R16Float:
+ return VK_FORMAT_R16_SFLOAT;
+ case wgpu::TextureFormat::RG8Unorm:
+ return VK_FORMAT_R8G8_UNORM;
+ case wgpu::TextureFormat::RG8Snorm:
+ return VK_FORMAT_R8G8_SNORM;
+ case wgpu::TextureFormat::RG8Uint:
+ return VK_FORMAT_R8G8_UINT;
+ case wgpu::TextureFormat::RG8Sint:
+ return VK_FORMAT_R8G8_SINT;
+
+ case wgpu::TextureFormat::R32Uint:
+ return VK_FORMAT_R32_UINT;
+ case wgpu::TextureFormat::R32Sint:
+ return VK_FORMAT_R32_SINT;
+ case wgpu::TextureFormat::R32Float:
+ return VK_FORMAT_R32_SFLOAT;
+ case wgpu::TextureFormat::RG16Uint:
+ return VK_FORMAT_R16G16_UINT;
+ case wgpu::TextureFormat::RG16Sint:
+ return VK_FORMAT_R16G16_SINT;
+ case wgpu::TextureFormat::RG16Float:
+ return VK_FORMAT_R16G16_SFLOAT;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return VK_FORMAT_R8G8B8A8_SRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return VK_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return VK_FORMAT_R8G8B8A8_SINT;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return VK_FORMAT_B8G8R8A8_UNORM;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return VK_FORMAT_B8G8R8A8_SRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return VK_FORMAT_R32G32_UINT;
+ case wgpu::TextureFormat::RG32Sint:
+ return VK_FORMAT_R32G32_SINT;
+ case wgpu::TextureFormat::RG32Float:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return VK_FORMAT_R16G16B16A16_UINT;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return VK_FORMAT_R16G16B16A16_SINT;
+ case wgpu::TextureFormat::RGBA16Float:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ case wgpu::TextureFormat::RGBA32Float:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return VK_FORMAT_D16_UNORM;
+ case wgpu::TextureFormat::Depth32Float:
+ return VK_FORMAT_D32_SFLOAT;
+ case wgpu::TextureFormat::Depth24Plus:
+ return VK_FORMAT_D32_SFLOAT;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ // Depth24PlusStencil8 maps to either of these two formats because only requires
+ // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
+ // the environment, default to using D32S8, and availability information so we know
+ // that the format is available.
+ if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ } else {
+ return VK_FORMAT_D24_UNORM_S8_UINT;
}
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= VK_IMAGE_USAGE_STORAGE_BIT;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return VK_FORMAT_D24_UNORM_S8_UINT;
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ case wgpu::TextureFormat::Stencil8:
+ // Try to use the stencil8 format if possible, otherwise use whatever format we can
+ // use that contains a stencil8 component.
+ if (device->IsToggleEnabled(Toggle::VulkanUseS8)) {
+ return VK_FORMAT_S8_UINT;
} else {
- flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ return VulkanImageFormat(device, wgpu::TextureFormat::Depth24PlusStencil8);
}
- }
- if (usage & kReadOnlyRenderAttachment) {
- flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
- }
- return flags;
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return VK_FORMAT_BC2_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return VK_FORMAT_BC2_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return VK_FORMAT_BC3_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return VK_FORMAT_BC3_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return VK_FORMAT_BC4_SNORM_BLOCK;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return VK_FORMAT_BC4_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return VK_FORMAT_BC5_SNORM_BLOCK;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return VK_FORMAT_BC5_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return VK_FORMAT_BC6H_SFLOAT_BLOCK;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return VK_FORMAT_BC7_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return VK_FORMAT_BC7_SRGB_BLOCK;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
+ case wgpu::TextureFormat::EACR11Unorm:
+ return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACR11Snorm:
+ return VK_FORMAT_EAC_R11_SNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Unorm:
+ return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+
+ case wgpu::TextureFormat::Undefined:
+ break;
}
+ UNREACHABLE();
+}
- // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
- // layout must match the layout given to various Vulkan operations as well as the layout given
- // to descriptor set writes.
- VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
- if (usage == wgpu::TextureUsage::None) {
- return VK_IMAGE_LAYOUT_UNDEFINED;
- }
+// Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
+// between color and depth attachment usages.
+VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
+ VkImageUsageFlags flags = 0;
- if (!wgpu::HasZeroOrOneBits(usage)) {
- // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
- // appear we might need additional special-casing.
- ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
-
- // WebGPU requires both aspects to be readonly if the attachment's format does have
- // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
- // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
- // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
- // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
- // it, and WebGPU doesn't need that currently.
- return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
- }
-
- // Usage has a single bit so we can switch on its value directly.
- switch (usage) {
- case wgpu::TextureUsage::CopyDst:
- return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
-
- // The layout returned here is the one that will be used at bindgroup creation time.
- // The bindgrpup's layout must match the runtime layout of the image when it is
- // used via the bindgroup, but we don't know exactly what it will be yet. So we
- // have to prepare for the pessimistic case.
- case wgpu::TextureUsage::TextureBinding:
- // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
- // same time.
- if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
- return VK_IMAGE_LAYOUT_GENERAL;
- }
- // The sampled image can be used as a readonly depth/stencil attachment at the same
- // time if it is a depth/stencil renderable format, so the image layout need to be
- // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
- if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
- return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
- }
- return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
-
- // Vulkan texture copy functions require the image to be in _one_ known layout.
- // Depending on whether parts of the texture have been transitioned to only CopySrc
- // or a combination with something else, the texture could be in a combination of
- // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
- // GENERAL.
- // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
- // once and can instead track subresources so we should lift this limitation.
- case wgpu::TextureUsage::CopySrc:
- // Read-only and write-only storage textures must use general layout because load
- // and store operations on storage images can only be done on the images in
- // VK_IMAGE_LAYOUT_GENERAL layout.
- case wgpu::TextureUsage::StorageBinding:
- return VK_IMAGE_LAYOUT_GENERAL;
-
- case wgpu::TextureUsage::RenderAttachment:
- if (texture->GetFormat().HasDepthOrStencil()) {
- return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- } else {
- return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- }
-
- case kReadOnlyRenderAttachment:
- return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
-
- case kPresentTextureUsage:
- return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
- case wgpu::TextureUsage::None:
- break;
- }
- UNREACHABLE();
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
-
- VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
- switch (sampleCount) {
- case 1:
- return VK_SAMPLE_COUNT_1_BIT;
- case 4:
- return VK_SAMPLE_COUNT_4_BIT;
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ // If the sampled texture is a depth/stencil texture, its image layout will be set
+ // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
+ // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
+ if (format.HasDepthOrStencil() && format.isRenderable) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
- UNREACHABLE();
}
-
- MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
- const TextureDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- return {};
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
-
- bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
- const VkImageCreateInfo& imageCreateInfo) {
- ASSERT(device);
-
- VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
- VkImageFormatProperties properties;
- if (device->fn.GetPhysicalDeviceImageFormatProperties(
- physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
- imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
- &properties) != VK_SUCCESS) {
- UNREACHABLE();
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ } else {
+ flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
-
- return properties.sampleCounts & imageCreateInfo.samples;
}
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor,
- VkImageUsageFlags extraUsages) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
- return std::move(texture);
+ if (usage & kReadOnlyRenderAttachment) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
- // static
- ResultOrError<Texture*> Texture::CreateFromExternal(
- Device* device,
- const ExternalImageDescriptorVk* descriptor,
- const TextureDescriptor* textureDescriptor,
- external_memory::Service* externalMemoryService) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
- return texture.Detach();
- }
+ return flags;
+}
- // static
- Ref<Texture> Texture::CreateForSwapChain(Device* device,
- const TextureDescriptor* descriptor,
- VkImage nativeImage) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- texture->InitializeForSwapChain(nativeImage);
- return texture;
+// Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
+// layout must match the layout given to various Vulkan operations as well as the layout given
+// to descriptor set writes.
+VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
+ if (usage == wgpu::TextureUsage::None) {
+ return VK_IMAGE_LAYOUT_UNDEFINED;
}
- Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
- : TextureBase(device, descriptor, state),
- // A usage of none will make sure the texture is transitioned before its first use as
- // required by the Vulkan spec.
- mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
- (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
- : GetFormat().aspects),
- GetArrayLayers(),
- GetNumMipLevels(),
- wgpu::TextureUsage::None)) {
+ if (!wgpu::HasZeroOrOneBits(usage)) {
+ // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
+ // appear we might need additional special-casing.
+ ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
+
+ // WebGPU requires both aspects to be readonly if the attachment's format does have
+ // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
+ // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
+ // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
+ // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
+ // it, and WebGPU doesn't need that currently.
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
}
- MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
- Device* device = ToBackend(GetDevice());
-
- // Create the Vulkan image "container". We don't need to check that the format supports the
- // combination of sample, usage etc. because validation should have been done in the Dawn
- // frontend already based on the minimum supported formats in the Vulkan spec
- VkImageCreateInfo createInfo = {};
- FillVulkanCreateInfoSizesAndType(*this, &createInfo);
-
- PNextChainBuilder createInfoChain(&createInfo);
-
- createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- createInfo.format = VulkanImageFormat(device, GetFormat().format);
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- VkImageFormatListCreateInfo imageFormatListInfo = {};
- std::vector<VkFormat> viewFormats;
- if (GetViewFormats().any()) {
- createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
- if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
- createInfoChain.Add(&imageFormatListInfo,
- VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
- viewFormats.push_back(VulkanImageFormat(device, GetFormat().format));
- for (FormatIndex i : IterateBitSet(GetViewFormats())) {
- const Format& viewFormat = device->GetValidInternalFormat(i);
- viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
- }
-
- imageFormatListInfo.viewFormatCount = viewFormats.size();
- imageFormatListInfo.pViewFormats = viewFormats.data();
+ // Usage has a single bit so we can switch on its value directly.
+ switch (usage) {
+ case wgpu::TextureUsage::CopyDst:
+ return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ // The layout returned here is the one that will be used at bindgroup creation time.
+ // The bindgrpup's layout must match the runtime layout of the image when it is
+ // used via the bindgroup, but we don't know exactly what it will be yet. So we
+ // have to prepare for the pessimistic case.
+ case wgpu::TextureUsage::TextureBinding:
+ // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
+ // same time.
+ if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
+ return VK_IMAGE_LAYOUT_GENERAL;
+ }
+ // The sampled image can be used as a readonly depth/stencil attachment at the same
+ // time if it is a depth/stencil renderable format, so the image layout need to be
+ // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
+ if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+ }
+ return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ // Vulkan texture copy functions require the image to be in _one_ known layout.
+ // Depending on whether parts of the texture have been transitioned to only CopySrc
+ // or a combination with something else, the texture could be in a combination of
+ // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
+ // GENERAL.
+ // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
+ // once and can instead track subresources so we should lift this limitation.
+ case wgpu::TextureUsage::CopySrc:
+ // Read-only and write-only storage textures must use general layout because load
+ // and store operations on storage images can only be done on the images in
+ // VK_IMAGE_LAYOUT_GENERAL layout.
+ case wgpu::TextureUsage::StorageBinding:
+ return VK_IMAGE_LAYOUT_GENERAL;
+
+ case wgpu::TextureUsage::RenderAttachment:
+ if (texture->GetFormat().HasDepthOrStencil()) {
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
- }
- ASSERT(IsSampleCountSupported(device, createInfo));
+ case kReadOnlyRenderAttachment:
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
- if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
- createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- }
+ case kPresentTextureUsage:
+ return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
- // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
- // that are used in vkCmdClearColorImage() must have been created with this flag, which is
- // also required for the implementation of robust resource initialization.
- createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ case wgpu::TextureUsage::None:
+ break;
+ }
+ UNREACHABLE();
+}
+
+VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
+ switch (sampleCount) {
+ case 1:
+ return VK_SAMPLE_COUNT_1_BIT;
+ case 4:
+ return VK_SAMPLE_COUNT_4_BIT;
+ }
+ UNREACHABLE();
+}
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateImage"));
+MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*, const TextureDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
- // Create the image memory and associate it with the container
- VkMemoryRequirements requirements;
- device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
- DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
- requirements, MemoryKind::Opaque));
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+ descriptor->size.depthOrArrayLayers);
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
- ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
- mMemoryAllocation.GetOffset()),
- "BindImageMemory"));
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
- GetAllSubresources(), TextureBase::ClearValue::NonZero));
- }
+ return {};
+}
- SetLabelImpl();
+bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo) {
+ ASSERT(device);
- return {};
+ VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
+ VkImageFormatProperties properties;
+ if (device->fn.GetPhysicalDeviceImageFormatProperties(
+ physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
+ imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
+ &properties) != VK_SUCCESS) {
+ UNREACHABLE();
}
- // Internally managed, but imported from external handle
- MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
- external_memory::Service* externalMemoryService) {
- Device* device = ToBackend(GetDevice());
- VkFormat format = VulkanImageFormat(device, GetFormat().format);
- VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
- DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
- &mSupportsDisjointVkImage),
- "Creating an image from external memory is not supported.");
- // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
- // multiplanar formats, so we need to correct it to Color here.
- if (ShouldCombineMultiPlaneBarriers()) {
- mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
- ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
- wgpu::TextureUsage::None);
- }
-
- mExternalState = ExternalState::PendingAcquire;
-
- mPendingAcquireOldLayout = descriptor->releasedOldLayout;
- mPendingAcquireNewLayout = descriptor->releasedNewLayout;
-
- VkImageCreateInfo baseCreateInfo = {};
- FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
-
- PNextChainBuilder createInfoChain(&baseCreateInfo);
-
- baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- baseCreateInfo.format = format;
- baseCreateInfo.usage = usage;
- baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- baseCreateInfo.queueFamilyIndexCount = 0;
- baseCreateInfo.pQueueFamilyIndices = nullptr;
-
- // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
- // that are used in vkCmdClearColorImage() must have been created with this flag, which is
- // also required for the implementation of robust resource initialization.
- baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- VkImageFormatListCreateInfo imageFormatListInfo = {};
- std::vector<VkFormat> viewFormats;
- if (GetViewFormats().any()) {
- baseCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
- if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
- createInfoChain.Add(&imageFormatListInfo,
- VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
- for (FormatIndex i : IterateBitSet(GetViewFormats())) {
- const Format& viewFormat = device->GetValidInternalFormat(i);
- viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
- }
-
- imageFormatListInfo.viewFormatCount = viewFormats.size();
- imageFormatListInfo.pViewFormats = viewFormats.data();
+ return properties.sampleCounts & imageCreateInfo.samples;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImageUsageFlags extraUsages) {
+ Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
+ return std::move(texture);
+}
+
+// static
+ResultOrError<Texture*> Texture::CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptorVk* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ external_memory::Service* externalMemoryService) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
+ return texture.Detach();
+}
+
+// static
+Ref<Texture> Texture::CreateForSwapChain(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImage nativeImage) {
+ Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ texture->InitializeForSwapChain(nativeImage);
+ return texture;
+}
+
+Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state),
+ // A usage of none will make sure the texture is transitioned before its first use as
+ // required by the Vulkan spec.
+ mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+ (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
+ : GetFormat().aspects),
+ GetArrayLayers(),
+ GetNumMipLevels(),
+ wgpu::TextureUsage::None)) {}
+
+MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
+ Device* device = ToBackend(GetDevice());
+
+ // Create the Vulkan image "container". We don't need to check that the format supports the
+ // combination of sample, usage etc. because validation should have been done in the Dawn
+ // frontend already based on the minimum supported formats in the Vulkan spec
+ VkImageCreateInfo createInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &createInfo);
+
+ PNextChainBuilder createInfoChain(&createInfo);
+
+ createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ createInfo.format = VulkanImageFormat(device, GetFormat().format);
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkImageFormatListCreateInfo imageFormatListInfo = {};
+ std::vector<VkFormat> viewFormats;
+ if (GetViewFormats().any()) {
+ createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+ createInfoChain.Add(&imageFormatListInfo,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+ viewFormats.push_back(VulkanImageFormat(device, GetFormat().format));
+ for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+ const Format& viewFormat = device->GetValidInternalFormat(i);
+ viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
}
- }
- DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+ imageFormatListInfo.viewFormatCount = viewFormats.size();
+ imageFormatListInfo.pViewFormats = viewFormats.data();
+ }
+ }
- SetLabelHelper("Dawn_ExternalTexture");
+ ASSERT(IsSampleCountSupported(device, createInfo));
- return {};
+ if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
+ createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
}
- void Texture::InitializeForSwapChain(VkImage nativeImage) {
- mHandle = nativeImage;
- SetLabelHelper("Dawn_SwapChainTexture");
- }
+ // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+ // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+ // also required for the implementation of robust resource initialization.
+ createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores) {
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
- "BindImageMemory (external)"));
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateImage"));
- // Don't clear imported texture if already initialized
- if (descriptor->isInitialized) {
- SetIsSubresourceContentInitialized(true, GetAllSubresources());
- }
+ // Create the image memory and associate it with the container
+ VkMemoryRequirements requirements;
+ device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
- // Success, acquire all the external objects.
- mExternalAllocation = externalMemoryAllocation;
- mSignalSemaphore = signalSemaphore;
- mWaitRequirements = std::move(waitSemaphores);
- return {};
- }
+ DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
+ requirements, MemoryKind::Opaque));
- MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
- VkSemaphore* signalSemaphore,
- VkImageLayout* releasedOldLayout,
- VkImageLayout* releasedNewLayout) {
- Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "BindImageMemory"));
- DAWN_INVALID_IF(mExternalState == ExternalState::Released,
- "Can't export a signal semaphore from signaled texture %s.", this);
-
- DAWN_INVALID_IF(
- mExternalAllocation == VK_NULL_HANDLE,
- "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
-
- ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
-
- // Release the texture
- mExternalState = ExternalState::Released;
-
- Aspect aspects = ComputeAspectsForSubresourceStorage();
- ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
- wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
-
- VkImageMemoryBarrier barrier;
- barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- barrier.pNext = nullptr;
- barrier.image = GetHandle();
- barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
- barrier.subresourceRange.baseMipLevel = 0;
- barrier.subresourceRange.levelCount = 1;
- barrier.subresourceRange.baseArrayLayer = 0;
- barrier.subresourceRange.layerCount = 1;
-
- barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
- barrier.dstAccessMask = 0; // The barrier must be paired with another barrier that will
- // specify the dst access mask on the importing queue.
-
- barrier.oldLayout = VulkanImageLayout(this, usage);
- if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
- // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
- // special value to indicate no layout transition should be done.
- barrier.newLayout = barrier.oldLayout;
- } else {
- barrier.newLayout = desiredLayout;
- }
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
+ GetAllSubresources(), TextureBase::ClearValue::NonZero));
+ }
- barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+ SetLabelImpl();
+
+ return {};
+}
+
+// Internally managed, but imported from external handle
+MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+ external_memory::Service* externalMemoryService) {
+ Device* device = ToBackend(GetDevice());
+ VkFormat format = VulkanImageFormat(device, GetFormat().format);
+ VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
+ DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
+ &mSupportsDisjointVkImage),
+ "Creating an image from external memory is not supported.");
+ // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
+ // multiplanar formats, so we need to correct it to Color here.
+ if (ShouldCombineMultiPlaneBarriers()) {
+ mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+ ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
+ wgpu::TextureUsage::None);
+ }
- VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
- VkPipelineStageFlags dstStages =
- VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // We don't know when the importing queue will need
- // the texture, so pass
- // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
- // the barrier happens-before any usage in the
- // importing queue.
+ mExternalState = ExternalState::PendingAcquire;
+
+ mPendingAcquireOldLayout = descriptor->releasedOldLayout;
+ mPendingAcquireNewLayout = descriptor->releasedNewLayout;
+
+ VkImageCreateInfo baseCreateInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
+
+ PNextChainBuilder createInfoChain(&baseCreateInfo);
+
+ baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ baseCreateInfo.format = format;
+ baseCreateInfo.usage = usage;
+ baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ baseCreateInfo.queueFamilyIndexCount = 0;
+ baseCreateInfo.pQueueFamilyIndices = nullptr;
+
+ // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+ // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+ // also required for the implementation of robust resource initialization.
+ baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkImageFormatListCreateInfo imageFormatListInfo = {};
+ std::vector<VkFormat> viewFormats;
+ if (GetViewFormats().any()) {
+ baseCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+ createInfoChain.Add(&imageFormatListInfo,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+ for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+ const Format& viewFormat = device->GetValidInternalFormat(i);
+ viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
+ }
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 0, nullptr, 1, &barrier);
+ imageFormatListInfo.viewFormatCount = viewFormats.size();
+ imageFormatListInfo.pViewFormats = viewFormats.data();
+ }
+ }
- // Queue submit to signal we are done with the texture
- recordingContext->signalSemaphores.push_back(mSignalSemaphore);
- DAWN_TRY(device->SubmitPendingCommands());
+ DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
- // Write out the layouts and signal semaphore
- *releasedOldLayout = barrier.oldLayout;
- *releasedNewLayout = barrier.newLayout;
- *signalSemaphore = mSignalSemaphore;
+ SetLabelHelper("Dawn_ExternalTexture");
- mSignalSemaphore = VK_NULL_HANDLE;
+ return {};
+}
- // Destroy the texture so it can't be used again
- Destroy();
- return {};
- }
+void Texture::InitializeForSwapChain(VkImage nativeImage) {
+ mHandle = nativeImage;
+ SetLabelHelper("Dawn_SwapChainTexture");
+}
- Texture::~Texture() {
- }
+MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores) {
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
+ "BindImageMemory (external)"));
- void Texture::SetLabelHelper(const char* prefix) {
- SetDebugName(ToBackend(GetDevice()), mHandle, prefix, GetLabel());
+ // Don't clear imported texture if already initialized
+ if (descriptor->isInitialized) {
+ SetIsSubresourceContentInitialized(true, GetAllSubresources());
}
- void Texture::SetLabelImpl() {
- SetLabelHelper("Dawn_InternalTexture");
+ // Success, acquire all the external objects.
+ mExternalAllocation = externalMemoryAllocation;
+ mSignalSemaphore = signalSemaphore;
+ mWaitRequirements = std::move(waitSemaphores);
+ return {};
+}
+
+MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
+ VkSemaphore* signalSemaphore,
+ VkImageLayout* releasedOldLayout,
+ VkImageLayout* releasedNewLayout) {
+ Device* device = ToBackend(GetDevice());
+
+ DAWN_INVALID_IF(mExternalState == ExternalState::Released,
+ "Can't export a signal semaphore from signaled texture %s.", this);
+
+ DAWN_INVALID_IF(mExternalAllocation == VK_NULL_HANDLE,
+ "Can't export a signal semaphore from destroyed or non-external texture %s.",
+ this);
+
+ ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
+
+ // Release the texture
+ mExternalState = ExternalState::Released;
+
+ Aspect aspects = ComputeAspectsForSubresourceStorage();
+ ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+ wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
+
+ VkImageMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.image = GetHandle();
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
+ barrier.subresourceRange.baseMipLevel = 0;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.subresourceRange.baseArrayLayer = 0;
+ barrier.subresourceRange.layerCount = 1;
+
+ barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
+ barrier.dstAccessMask = 0; // The barrier must be paired with another barrier that will
+ // specify the dst access mask on the importing queue.
+
+ barrier.oldLayout = VulkanImageLayout(this, usage);
+ if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
+ // special value to indicate no layout transition should be done.
+ barrier.newLayout = barrier.oldLayout;
+ } else {
+ barrier.newLayout = desiredLayout;
}
- void Texture::DestroyImpl() {
- if (GetTextureState() == TextureState::OwnedInternal) {
- Device* device = ToBackend(GetDevice());
+ barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
- // For textures created from a VkImage, the allocation if kInvalid so the Device knows
- // to skip the deallocation of the (absence of) VkDeviceMemory.
- device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+ VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
+ VkPipelineStageFlags dstStages =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // We don't know when the importing queue will need
+ // the texture, so pass
+ // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
+ // the barrier happens-before any usage in the
+ // importing queue.
- if (mHandle != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- }
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 0, nullptr, 1, &barrier);
- if (mExternalAllocation != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
- }
+ // Queue submit to signal we are done with the texture
+ recordingContext->signalSemaphores.push_back(mSignalSemaphore);
+ DAWN_TRY(device->SubmitPendingCommands());
- mHandle = VK_NULL_HANDLE;
- mExternalAllocation = VK_NULL_HANDLE;
- // If a signal semaphore exists it should be requested before we delete the texture
- ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
- }
- // For Vulkan, we currently run the base destruction code after the internal changes because
- // of the dependency on the texture state which the base code overwrites too early.
- TextureBase::DestroyImpl();
- }
+ // Write out the layouts and signal semaphore
+ *releasedOldLayout = barrier.oldLayout;
+ *releasedNewLayout = barrier.newLayout;
+ *signalSemaphore = mSignalSemaphore;
- VkImage Texture::GetHandle() const {
- return mHandle;
- }
+ mSignalSemaphore = VK_NULL_HANDLE;
- void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
- std::vector<VkImageMemoryBarrier>* barriers,
- size_t transitionBarrierStart) {
- ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
-
- // transitionBarrierStart specify the index where barriers for current transition start in
- // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
- // have already added into the vector during current transition.
- ASSERT(barriers->size() - transitionBarrierStart <= 1);
-
- if (mExternalState == ExternalState::PendingAcquire) {
- if (barriers->size() == transitionBarrierStart) {
- barriers->push_back(
- BuildMemoryBarrier(this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
- SubresourceRange::SingleMipAndLayer(
- 0, 0, ComputeAspectsForSubresourceStorage())));
- }
+ // Destroy the texture so it can't be used again
+ Destroy();
+ return {};
+}
- VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
- // Transfer texture from external queue to graphics queue
- barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
- barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
-
- // srcAccessMask means nothing when importing. Queue transfers require a barrier on
- // both the importing and exporting queues. The exporting queue should have specified
- // this.
- barrier->srcAccessMask = 0;
-
- // This should be the first barrier after import.
- ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
-
- // Save the desired layout. We may need to transition through an intermediate
- // |mPendingAcquireLayout| first.
- VkImageLayout desiredLayout = barrier->newLayout;
-
- bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
-
- // We don't care about the pending old layout if the texture is uninitialized. The
- // driver is free to discard it. Also it is invalid to transition to layout UNDEFINED or
- // PREINITIALIZED. If the embedder provided no new layout, or we don't care about the
- // previous contents, we can skip the layout transition.
- // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkImageMemoryBarrier-newLayout-01198
- if (!isInitialized || mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
- mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
- barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- barrier->newLayout = desiredLayout;
- } else {
- barrier->oldLayout = mPendingAcquireOldLayout;
- barrier->newLayout = mPendingAcquireNewLayout;
- }
+Texture::~Texture() {}
- // If these are unequal, we need an another barrier to transition the layout.
- if (barrier->newLayout != desiredLayout) {
- VkImageMemoryBarrier layoutBarrier;
- layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- layoutBarrier.pNext = nullptr;
- layoutBarrier.image = GetHandle();
- layoutBarrier.subresourceRange = barrier->subresourceRange;
-
- // Transition from the acquired new layout to the desired layout.
- layoutBarrier.oldLayout = barrier->newLayout;
- layoutBarrier.newLayout = desiredLayout;
-
- // We already transitioned these.
- layoutBarrier.srcAccessMask = 0;
- layoutBarrier.dstAccessMask = 0;
- layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
-
- barriers->push_back(layoutBarrier);
- }
+void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), mHandle, prefix, GetLabel());
+}
- mExternalState = ExternalState::Acquired;
- }
+void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+}
- mLastExternalState = mExternalState;
+void Texture::DestroyImpl() {
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ Device* device = ToBackend(GetDevice());
- recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
- mWaitRequirements.begin(), mWaitRequirements.end());
- mWaitRequirements.clear();
- }
+ // For textures created from a VkImage, the allocation if kInvalid so the Device knows
+ // to skip the deallocation of the (absence of) VkDeviceMemory.
+ device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
- bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
- // Reuse the texture directly and avoid encoding barriers when it isn't needed.
- bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
- if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
- return true;
+ if (mHandle != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
}
- return false;
- }
- // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
- // this limitation by combining the usages in the two planes of `textureUsages` into a
- // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
- // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
- bool Texture::ShouldCombineDepthStencilBarriers() const {
- // If the Stencil8 format is being emulated then memory barriers also need to include
- // the depth aspect. (See: crbug.com/dawn/1331)
- if (GetFormat().format == wgpu::TextureFormat::Stencil8 &&
- !GetDevice()->IsToggleEnabled(Toggle::VulkanUseS8)) {
- return true;
+ if (mExternalAllocation != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
}
- return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
- }
- // The Vulkan spec requires:
- // "If image has a single-plane color format or is not disjoint, then the aspectMask member of
- // subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
- // For multi-planar formats, we currently only support import them in non-disjoint way.
- bool Texture::ShouldCombineMultiPlaneBarriers() const {
- // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
- ASSERT(!mSupportsDisjointVkImage);
- return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
+ mHandle = VK_NULL_HANDLE;
+ mExternalAllocation = VK_NULL_HANDLE;
+ // If a signal semaphore exists it should be requested before we delete the texture
+ ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
}
-
- Aspect Texture::ComputeAspectsForSubresourceStorage() const {
- if (ShouldCombineDepthStencilBarriers()) {
- return Aspect::CombinedDepthStencil;
+ // For Vulkan, we currently run the base destruction code after the internal changes because
+ // of the dependency on the texture state which the base code overwrites too early.
+ TextureBase::DestroyImpl();
+}
+
+VkImage Texture::GetHandle() const {
+ return mHandle;
+}
+
+void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart) {
+ ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+
+ // transitionBarrierStart specify the index where barriers for current transition start in
+ // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
+ // have already added into the vector during current transition.
+ ASSERT(barriers->size() - transitionBarrierStart <= 1);
+
+ if (mExternalState == ExternalState::PendingAcquire) {
+ if (barriers->size() == transitionBarrierStart) {
+ barriers->push_back(BuildMemoryBarrier(
+ this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+ SubresourceRange::SingleMipAndLayer(0, 0, ComputeAspectsForSubresourceStorage())));
}
- // Force to use Aspect::Color for Aspect::Plane0/1.
- if (ShouldCombineMultiPlaneBarriers()) {
- return Aspect::Color;
- }
- return GetFormat().aspects;
- }
- void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const TextureSubresourceUsage& textureUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- if (ShouldCombineBarriers()) {
- Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
- SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
- GetNumMipLevels());
- textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- SubresourceRange updateRange = range;
- updateRange.aspects = combinedAspect;
-
- combinedUsages.Update(
- updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
- *combinedUsage |= usage;
- });
- });
-
- TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
- dstStages);
+ VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
+ // Transfer texture from external queue to graphics queue
+ barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+ barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
+
+ // srcAccessMask means nothing when importing. Queue transfers require a barrier on
+ // both the importing and exporting queues. The exporting queue should have specified
+ // this.
+ barrier->srcAccessMask = 0;
+
+ // This should be the first barrier after import.
+ ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
+
+ // Save the desired layout. We may need to transition through an intermediate
+ // |mPendingAcquireLayout| first.
+ VkImageLayout desiredLayout = barrier->newLayout;
+
+ bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
+
+ // We don't care about the pending old layout if the texture is uninitialized. The
+ // driver is free to discard it. Also it is invalid to transition to layout UNDEFINED or
+ // PREINITIALIZED. If the embedder provided no new layout, or we don't care about the
+ // previous contents, we can skip the layout transition.
+ // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkImageMemoryBarrier-newLayout-01198
+ if (!isInitialized || mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
+ mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+ barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ barrier->newLayout = desiredLayout;
} else {
- TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
- dstStages);
+ barrier->oldLayout = mPendingAcquireOldLayout;
+ barrier->newLayout = mPendingAcquireNewLayout;
}
- }
- void Texture::TransitionUsageForPassImpl(
- CommandRecordingContext* recordingContext,
- const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- size_t transitionBarrierStart = imageBarriers->size();
- const Format& format = GetFormat();
-
- wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
- wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
-
- mSubresourceLastUsages->Merge(
- subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
- const wgpu::TextureUsage& newUsage) {
- if (newUsage == wgpu::TextureUsage::None ||
- CanReuseWithoutBarrier(*lastUsage, newUsage)) {
- return;
- }
+ // If these are unequal, we need an another barrier to transition the layout.
+ if (barrier->newLayout != desiredLayout) {
+ VkImageMemoryBarrier layoutBarrier;
+ layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ layoutBarrier.pNext = nullptr;
+ layoutBarrier.image = GetHandle();
+ layoutBarrier.subresourceRange = barrier->subresourceRange;
+
+ // Transition from the acquired new layout to the desired layout.
+ layoutBarrier.oldLayout = barrier->newLayout;
+ layoutBarrier.newLayout = desiredLayout;
+
+ // We already transitioned these.
+ layoutBarrier.srcAccessMask = 0;
+ layoutBarrier.dstAccessMask = 0;
+ layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+
+ barriers->push_back(layoutBarrier);
+ }
- imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
+ mExternalState = ExternalState::Acquired;
+ }
- allLastUsages |= *lastUsage;
- allUsages |= newUsage;
+ mLastExternalState = mExternalState;
- *lastUsage = newUsage;
- });
+ recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
+ mWaitRequirements.begin(), mWaitRequirements.end());
+ mWaitRequirements.clear();
+}
- if (mExternalState != ExternalState::InternalOnly) {
- TweakTransitionForExternalUsage(recordingContext, imageBarriers,
- transitionBarrierStart);
+bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
+ // Reuse the texture directly and avoid encoding barriers when it isn't needed.
+ bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
+ if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
+ return true;
+ }
+ return false;
+}
+
+// Base Vulkan doesn't support transitioning depth and stencil separately. We work around
+// this limitation by combining the usages in the two planes of `textureUsages` into a
+// single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
+// for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
+bool Texture::ShouldCombineDepthStencilBarriers() const {
+ // If the Stencil8 format is being emulated then memory barriers also need to include
+ // the depth aspect. (See: crbug.com/dawn/1331)
+ if (GetFormat().format == wgpu::TextureFormat::Stencil8 &&
+ !GetDevice()->IsToggleEnabled(Toggle::VulkanUseS8)) {
+ return true;
+ }
+ return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
+}
+
+// The Vulkan spec requires:
+// "If image has a single-plane color format or is not disjoint, then the aspectMask member of
+// subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
+// For multi-planar formats, we currently only support import them in non-disjoint way.
+bool Texture::ShouldCombineMultiPlaneBarriers() const {
+ // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
+ ASSERT(!mSupportsDisjointVkImage);
+ return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
+}
+
+Aspect Texture::ComputeAspectsForSubresourceStorage() const {
+ if (ShouldCombineDepthStencilBarriers()) {
+ return Aspect::CombinedDepthStencil;
+ }
+ // Force to use Aspect::Color for Aspect::Plane0/1.
+ if (ShouldCombineMultiPlaneBarriers()) {
+ return Aspect::Color;
+ }
+ return GetFormat().aspects;
+}
+
+void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const TextureSubresourceUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ if (ShouldCombineBarriers()) {
+ Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
+ SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
+ GetNumMipLevels());
+ textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ SubresourceRange updateRange = range;
+ updateRange.aspects = combinedAspect;
+
+ combinedUsages.Update(updateRange,
+ [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
+ *combinedUsage |= usage;
+ });
+ });
+
+ TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
+ dstStages);
+ } else {
+ TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
+ dstStages);
+ }
+}
+
+void Texture::TransitionUsageForPassImpl(
+ CommandRecordingContext* recordingContext,
+ const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ size_t transitionBarrierStart = imageBarriers->size();
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+
+ mSubresourceLastUsages->Merge(subresourceUsages, [&](const SubresourceRange& range,
+ wgpu::TextureUsage* lastUsage,
+ const wgpu::TextureUsage& newUsage) {
+ if (newUsage == wgpu::TextureUsage::None || CanReuseWithoutBarrier(*lastUsage, newUsage)) {
+ return;
}
- *srcStages |= VulkanPipelineStage(allLastUsages, format);
- *dstStages |= VulkanPipelineStage(allUsages, format);
+ imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
+
+ allLastUsages |= *lastUsage;
+ allUsages |= newUsage;
+
+ *lastUsage = newUsage;
+ });
+
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, imageBarriers, transitionBarrierStart);
}
- void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- std::vector<VkImageMemoryBarrier> barriers;
+ *srcStages |= VulkanPipelineStage(allLastUsages, format);
+ *dstStages |= VulkanPipelineStage(allUsages, format);
+}
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
+void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ std::vector<VkImageMemoryBarrier> barriers;
- TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
- if (mExternalState != ExternalState::InternalOnly) {
- TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
- }
+ TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
- if (!barriers.empty()) {
- ASSERT(srcStages != 0 && dstStages != 0);
- ToBackend(GetDevice())
- ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 0, nullptr, barriers.size(), barriers.data());
- }
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
}
- void Texture::TransitionUsageAndGetResourceBarrier(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- if (ShouldCombineBarriers()) {
- SubresourceRange updatedRange = range;
- updatedRange.aspects = ComputeAspectsForSubresourceStorage();
- TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
- dstStages);
- } else {
- TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
- dstStages);
- }
+ if (!barriers.empty()) {
+ ASSERT(srcStages != 0 && dstStages != 0);
+ ToBackend(GetDevice())
+ ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 0, nullptr, barriers.size(), barriers.data());
+ }
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ if (ShouldCombineBarriers()) {
+ SubresourceRange updatedRange = range;
+ updatedRange.aspects = ComputeAspectsForSubresourceStorage();
+ TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
+ dstStages);
+ } else {
+ TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages, dstStages);
}
+}
+
+void Texture::TransitionUsageAndGetResourceBarrierImpl(
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ ASSERT(imageBarriers != nullptr);
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+ mSubresourceLastUsages->Update(
+ range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
+ if (CanReuseWithoutBarrier(*lastUsage, usage)) {
+ return;
+ }
- void Texture::TransitionUsageAndGetResourceBarrierImpl(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- ASSERT(imageBarriers != nullptr);
- const Format& format = GetFormat();
-
- wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
- mSubresourceLastUsages->Update(
- range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
- if (CanReuseWithoutBarrier(*lastUsage, usage)) {
- return;
- }
+ imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
- imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
+ allLastUsages |= *lastUsage;
+ *lastUsage = usage;
+ });
- allLastUsages |= *lastUsage;
- *lastUsage = usage;
- });
+ *srcStages |= VulkanPipelineStage(allLastUsages, format);
+ *dstStages |= VulkanPipelineStage(usage, format);
+}
- *srcStages |= VulkanPipelineStage(allLastUsages, format);
- *dstStages |= VulkanPipelineStage(usage, format);
- }
+MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ Device* device = ToBackend(GetDevice());
- MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- Device* device = ToBackend(GetDevice());
+ const bool isZero = clearValue == TextureBase::ClearValue::Zero;
+ uint32_t uClearColor = isZero ? 0 : 1;
+ int32_t sClearColor = isZero ? 0 : 1;
+ float fClearColor = isZero ? 0.f : 1.f;
- const bool isZero = clearValue == TextureBase::ClearValue::Zero;
- uint32_t uClearColor = isZero ? 0 : 1;
- int32_t sClearColor = isZero ? 0 : 1;
- float fClearColor = isZero ? 0.f : 1.f;
+ TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
- TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkImageSubresourceRange imageRange = {};
+ imageRange.levelCount = 1;
+ imageRange.layerCount = 1;
- VkImageSubresourceRange imageRange = {};
- imageRange.levelCount = 1;
- imageRange.layerCount = 1;
+ if (GetFormat().isCompressed) {
+ if (range.aspects == Aspect::None) {
+ return {};
+ }
+ // need to clear the texture with a copy from buffer
+ ASSERT(range.aspects == Aspect::Color);
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
+
+ Extent3D largestMipSize = GetMipLevelSingleSubresourcePhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow = Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ device->GetOptimalBytesPerRowAlignment());
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(
+ uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(), blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
+
+ std::vector<VkBufferImageCopy> regions;
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ Extent3D copySize = GetMipLevelSingleSubresourcePhysicalSize(level);
+ imageRange.baseMipLevel = level;
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- if (GetFormat().isCompressed) {
- if (range.aspects == Aspect::None) {
- return {};
+ TextureDataLayout dataLayout;
+ dataLayout.offset = uploadHandle.startOffset;
+ dataLayout.rowsPerImage = copySize.height / blockInfo.height;
+ dataLayout.bytesPerRow = bytesPerRow;
+ TextureCopy textureCopy;
+ textureCopy.aspect = range.aspects;
+ textureCopy.mipLevel = level;
+ textureCopy.origin = {0, 0, layer};
+ textureCopy.texture = this;
+
+ regions.push_back(ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
}
- // need to clear the texture with a copy from buffer
- ASSERT(range.aspects == Aspect::Color);
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
- device->GetOptimalBytesPerRowAlignment());
- uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
-
- std::vector<VkBufferImageCopy> regions;
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- Extent3D copySize = GetMipLevelPhysicalSize(level);
- imageRange.baseMipLevel = level;
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ }
+ device->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+ ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+ GetHandle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ regions.size(), regions.data());
+ } else {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ imageRange.baseMipLevel = level;
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ Aspect aspects = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
// Skip lazy clears if already initialized.
continue;
}
-
- TextureDataLayout dataLayout;
- dataLayout.offset = uploadHandle.startOffset;
- dataLayout.rowsPerImage = copySize.height / blockInfo.height;
- dataLayout.bytesPerRow = bytesPerRow;
- TextureCopy textureCopy;
- textureCopy.aspect = range.aspects;
- textureCopy.mipLevel = level;
- textureCopy.origin = {0, 0, layer};
- textureCopy.texture = this;
-
- regions.push_back(
- ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
+ aspects |= aspect;
}
- }
- device->fn.CmdCopyBufferToImage(
- recordingContext->commandBuffer,
- ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
- } else {
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- imageRange.baseMipLevel = level;
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- Aspect aspects = Aspect::None;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- aspects |= aspect;
- }
- if (aspects == Aspect::None) {
- continue;
- }
+ if (aspects == Aspect::None) {
+ continue;
+ }
- imageRange.aspectMask = VulkanAspectMask(aspects);
- imageRange.baseArrayLayer = layer;
-
- if (aspects &
- (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
- VkClearDepthStencilValue clearDepthStencilValue[1];
- clearDepthStencilValue[0].depth = fClearColor;
- clearDepthStencilValue[0].stencil = uClearColor;
- device->fn.CmdClearDepthStencilImage(
- recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
- &imageRange);
- } else {
- ASSERT(aspects == Aspect::Color);
- VkClearColorValue clearColorValue;
- switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Float:
- clearColorValue.float32[0] = fClearColor;
- clearColorValue.float32[1] = fClearColor;
- clearColorValue.float32[2] = fClearColor;
- clearColorValue.float32[3] = fClearColor;
- break;
- case wgpu::TextureComponentType::Sint:
- clearColorValue.int32[0] = sClearColor;
- clearColorValue.int32[1] = sClearColor;
- clearColorValue.int32[2] = sClearColor;
- clearColorValue.int32[3] = sClearColor;
- break;
- case wgpu::TextureComponentType::Uint:
- clearColorValue.uint32[0] = uClearColor;
- clearColorValue.uint32[1] = uClearColor;
- clearColorValue.uint32[2] = uClearColor;
- clearColorValue.uint32[3] = uClearColor;
- break;
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- &clearColorValue, 1, &imageRange);
+ imageRange.aspectMask = VulkanAspectMask(aspects);
+ imageRange.baseArrayLayer = layer;
+
+ if (aspects & (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
+ VkClearDepthStencilValue clearDepthStencilValue[1];
+ clearDepthStencilValue[0].depth = fClearColor;
+ clearDepthStencilValue[0].stencil = uClearColor;
+ device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer,
+ GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ clearDepthStencilValue, 1, &imageRange);
+ } else {
+ ASSERT(aspects == Aspect::Color);
+ VkClearColorValue clearColorValue;
+ switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+ case wgpu::TextureComponentType::Float:
+ clearColorValue.float32[0] = fClearColor;
+ clearColorValue.float32[1] = fClearColor;
+ clearColorValue.float32[2] = fClearColor;
+ clearColorValue.float32[3] = fClearColor;
+ break;
+ case wgpu::TextureComponentType::Sint:
+ clearColorValue.int32[0] = sClearColor;
+ clearColorValue.int32[1] = sClearColor;
+ clearColorValue.int32[2] = sClearColor;
+ clearColorValue.int32[3] = sClearColor;
+ break;
+ case wgpu::TextureComponentType::Uint:
+ clearColorValue.uint32[0] = uClearColor;
+ clearColorValue.uint32[1] = uClearColor;
+ clearColorValue.uint32[2] = uClearColor;
+ clearColorValue.uint32[3] = uClearColor;
+ break;
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
}
+ device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ &clearColorValue, 1, &imageRange);
}
}
}
-
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
- }
- return {};
}
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could contain dirty
- // bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
- }
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
}
+ return {};
+}
- VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
- ASSERT(GetFormat().aspects == Aspect::Color);
- return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
}
-
- // static
- ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
- DAWN_TRY(view->Initialize(descriptor));
- return view;
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could contain dirty
+ // bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
+ }
+}
+
+VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
+ ASSERT(GetFormat().aspects == Aspect::Color);
+ return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
+}
+
+// static
+ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+ DAWN_TRY(view->Initialize(descriptor));
+ return view;
+}
+
+MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ if ((GetTexture()->GetInternalUsage() &
+ ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
+ // If the texture view has no other usage than CopySrc and CopyDst, then it can't
+ // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
+ // validation errors warn if you create such a vkImageView, so return early.
+ return {};
}
- MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
- if ((GetTexture()->GetInternalUsage() &
- ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
- // If the texture view has no other usage than CopySrc and CopyDst, then it can't
- // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
- // validation errors warn if you create such a vkImageView, so return early.
- return {};
- }
-
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return {};
- }
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return {};
+ }
- Device* device = ToBackend(GetTexture()->GetDevice());
-
- VkImageViewCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.image = ToBackend(GetTexture())->GetHandle();
- createInfo.viewType = VulkanImageViewType(descriptor->dimension);
-
- const Format& textureFormat = GetTexture()->GetFormat();
- if (textureFormat.HasStencil() &&
- (textureFormat.HasDepth() || !device->IsToggleEnabled(Toggle::VulkanUseS8))) {
- // Unlike multi-planar formats, depth-stencil formats have multiple aspects but are not
- // created with VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
- // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkImageViewCreateInfo.html#VUID-VkImageViewCreateInfo-image-01762
- // Without, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, the view format must match the texture
- // format.
- createInfo.format = VulkanImageFormat(device, textureFormat.format);
- } else {
- createInfo.format = VulkanImageFormat(device, descriptor->format);
- }
+ Device* device = ToBackend(GetTexture()->GetDevice());
+
+ VkImageViewCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.image = ToBackend(GetTexture())->GetHandle();
+ createInfo.viewType = VulkanImageViewType(descriptor->dimension);
+
+ const Format& textureFormat = GetTexture()->GetFormat();
+ if (textureFormat.HasStencil() &&
+ (textureFormat.HasDepth() || !device->IsToggleEnabled(Toggle::VulkanUseS8))) {
+ // Unlike multi-planar formats, depth-stencil formats have multiple aspects but are not
+ // created with VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
+ // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkImageViewCreateInfo.html#VUID-VkImageViewCreateInfo-image-01762
+ // Without, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, the view format must match the texture
+ // format.
+ createInfo.format = VulkanImageFormat(device, textureFormat.format);
+ } else {
+ createInfo.format = VulkanImageFormat(device, descriptor->format);
+ }
- createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
- VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
+ createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
- const SubresourceRange& subresources = GetSubresourceRange();
- createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
- createInfo.subresourceRange.levelCount = subresources.levelCount;
- createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
- createInfo.subresourceRange.layerCount = subresources.layerCount;
- createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
+ const SubresourceRange& subresources = GetSubresourceRange();
+ createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
+ createInfo.subresourceRange.levelCount = subresources.levelCount;
+ createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
+ createInfo.subresourceRange.layerCount = subresources.layerCount;
+ createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateImageView"));
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateImageView"));
- SetLabelImpl();
+ SetLabelImpl();
- return {};
- }
+ return {};
+}
- TextureView::~TextureView() {
- }
+TextureView::~TextureView() {}
- void TextureView::DestroyImpl() {
- Device* device = ToBackend(GetTexture()->GetDevice());
+void TextureView::DestroyImpl() {
+ Device* device = ToBackend(GetTexture()->GetDevice());
- if (mHandle != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
+ if (mHandle != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
}
+}
- VkImageView TextureView::GetHandle() const {
- return mHandle;
- }
+VkImageView TextureView::GetHandle() const {
+ return mHandle;
+}
- void TextureView::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_TextureView", GetLabel());
- }
+void TextureView::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_TextureView", GetLabel());
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h
index c19d3b8df1c..4be64775c59 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h
@@ -15,182 +15,182 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_TEXTUREVK_H_
#define SRC_DAWN_NATIVE_VULKAN_TEXTUREVK_H_
-#include "dawn/native/Texture.h"
+#include <memory>
+#include <vector>
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/PassResourceUsage.h"
#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/Texture.h"
#include "dawn/native/vulkan/ExternalHandle.h"
#include "dawn/native/vulkan/external_memory/MemoryService.h"
namespace dawn::native::vulkan {
- struct CommandRecordingContext;
- class Device;
- class Texture;
-
- VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
- VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
- VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
- VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
-
- MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
- const TextureDescriptor* descriptor);
-
- bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
- const VkImageCreateInfo& imageCreateInfo);
-
- class Texture final : public TextureBase {
- public:
- // Used to create a regular texture from a descriptor.
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor,
- VkImageUsageFlags extraUsages = 0);
-
- // Creates a texture and initializes it with a VkImage that references an external memory
- // object. Before the texture can be used, the VkDeviceMemory associated with the external
- // image must be bound via Texture::BindExternalMemory.
- static ResultOrError<Texture*> CreateFromExternal(
- Device* device,
- const ExternalImageDescriptorVk* descriptor,
- const TextureDescriptor* textureDescriptor,
- external_memory::Service* externalMemoryService);
-
- // Creates a texture that wraps a swapchain-allocated VkImage.
- static Ref<Texture> CreateForSwapChain(Device* device,
- const TextureDescriptor* descriptor,
- VkImage nativeImage);
-
- VkImage GetHandle() const;
-
- // Transitions the texture to be used as `usage`, recording any necessary barrier in
- // `commands`.
- // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
- void TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const TextureSubresourceUsage& textureUsages,
+struct CommandRecordingContext;
+class Device;
+class Texture;
+
+VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
+VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
+VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
+VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
+
+MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
+ const TextureDescriptor* descriptor);
+
+bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo);
+
+class Texture final : public TextureBase {
+ public:
+ // Used to create a regular texture from a descriptor.
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImageUsageFlags extraUsages = 0);
+
+ // Creates a texture and initializes it with a VkImage that references an external memory
+ // object. Before the texture can be used, the VkDeviceMemory associated with the external
+ // image must be bound via Texture::BindExternalMemory.
+ static ResultOrError<Texture*> CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptorVk* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ external_memory::Service* externalMemoryService);
+
+ // Creates a texture that wraps a swapchain-allocated VkImage.
+ static Ref<Texture> CreateForSwapChain(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImage nativeImage);
+
+ VkImage GetHandle() const;
+
+ // Transitions the texture to be used as `usage`, recording any necessary barrier in
+ // `commands`.
+ // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+ void TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const TextureSubresourceUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range);
+
+ VkImageLayout GetCurrentLayoutForSwapChain() const;
+
+ // Binds externally allocated memory to the VkImage and on success, takes ownership of
+ // semaphores.
+ MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores);
+
+ MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
+ VkSemaphore* signalSemaphore,
+ VkImageLayout* releasedOldLayout,
+ VkImageLayout* releasedNewLayout);
+
+ void SetLabelHelper(const char* prefix);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~Texture() override;
+ Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+
+ MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
+ MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+ external_memory::Service* externalMemoryService);
+ void InitializeForSwapChain(VkImage nativeImage);
+
+ void DestroyImpl() override;
+ MaybeError ClearTexture(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue);
+
+ // Implementation details of the barrier computations for the texture.
+ void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+ void TransitionUsageForPassImpl(CommandRecordingContext* recordingContext,
+ const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
-
- void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- const SubresourceRange& range);
-
- VkImageLayout GetCurrentLayoutForSwapChain() const;
-
- // Binds externally allocated memory to the VkImage and on success, takes ownership of
- // semaphores.
- MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores);
-
- MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
- VkSemaphore* signalSemaphore,
- VkImageLayout* releasedOldLayout,
- VkImageLayout* releasedNewLayout);
-
- void SetLabelHelper(const char* prefix);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~Texture() override;
- Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
-
- MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
- MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
- external_memory::Service* externalMemoryService);
- void InitializeForSwapChain(VkImage nativeImage);
-
- void DestroyImpl() override;
- MaybeError ClearTexture(CommandRecordingContext* recordingContext,
- const SubresourceRange& range,
- TextureBase::ClearValue);
-
- // Implementation details of the barrier computations for the texture.
- void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+ void TransitionUsageAndGetResourceBarrierImpl(wgpu::TextureUsage usage,
const SubresourceRange& range,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
- void TransitionUsageForPassImpl(
- CommandRecordingContext* recordingContext,
- const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
- void TransitionUsageAndGetResourceBarrierImpl(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
- void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
- std::vector<VkImageMemoryBarrier>* barriers,
- size_t transitionBarrierStart);
- bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
-
- // In base Vulkan, Depth and stencil can only be transitioned together. This function
- // indicates whether we should combine depth and stencil barriers to accommodate this
- // limitation.
- bool ShouldCombineDepthStencilBarriers() const;
-
- // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
- // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
- bool ShouldCombineMultiPlaneBarriers() const;
-
- bool ShouldCombineBarriers() const {
- return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
- }
-
- // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
- // doing the workaround for combined depth and stencil barriers, or combining multi-plane
- // barriers.
- Aspect ComputeAspectsForSubresourceStorage() const;
-
- VkImage mHandle = VK_NULL_HANDLE;
- ResourceMemoryAllocation mMemoryAllocation;
- VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
-
- enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
- ExternalState mExternalState = ExternalState::InternalOnly;
- ExternalState mLastExternalState = ExternalState::InternalOnly;
-
- VkImageLayout mPendingAcquireOldLayout;
- VkImageLayout mPendingAcquireNewLayout;
-
- VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
- std::vector<VkSemaphore> mWaitRequirements;
-
- // Note that in early Vulkan versions it is not possible to transition depth and stencil
- // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
- // storage.
- std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
-
- bool mSupportsDisjointVkImage = false;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
- VkImageView GetHandle() const;
-
- private:
- ~TextureView() override;
- void DestroyImpl() override;
- using TextureViewBase::TextureViewBase;
- MaybeError Initialize(const TextureViewDescriptor* descriptor);
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkImageView mHandle = VK_NULL_HANDLE;
- };
+ void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart);
+ bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
+
+ // In base Vulkan, Depth and stencil can only be transitioned together. This function
+ // indicates whether we should combine depth and stencil barriers to accommodate this
+ // limitation.
+ bool ShouldCombineDepthStencilBarriers() const;
+
+ // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
+ // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
+ bool ShouldCombineMultiPlaneBarriers() const;
+
+ bool ShouldCombineBarriers() const {
+ return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
+ }
+
+ // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
+ // doing the workaround for combined depth and stencil barriers, or combining multi-plane
+ // barriers.
+ Aspect ComputeAspectsForSubresourceStorage() const;
+
+ VkImage mHandle = VK_NULL_HANDLE;
+ ResourceMemoryAllocation mMemoryAllocation;
+ VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
+
+ enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
+ ExternalState mExternalState = ExternalState::InternalOnly;
+ ExternalState mLastExternalState = ExternalState::InternalOnly;
+
+ VkImageLayout mPendingAcquireOldLayout;
+ VkImageLayout mPendingAcquireNewLayout;
+
+ VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> mWaitRequirements;
+
+ // Note that in early Vulkan versions it is not possible to transition depth and stencil
+ // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
+ // storage.
+ std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
+
+ bool mSupportsDisjointVkImage = false;
+};
+
+class TextureView final : public TextureViewBase {
+ public:
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+ VkImageView GetHandle() const;
+
+ private:
+ ~TextureView() override;
+ void DestroyImpl() override;
+ using TextureViewBase::TextureViewBase;
+ MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkImageView mHandle = VK_NULL_HANDLE;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp
index 0d0e86fa69b..2595112faa5 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp
@@ -26,267 +26,294 @@
namespace dawn::native::vulkan {
+constexpr char kDeviceDebugPrefix[] = "DawnDbg=";
+constexpr char kDeviceDebugSeparator[] = ";";
+
#define VK_OBJECT_TYPE_GETTER(object, objectType) \
template <> \
VkObjectType GetVkObjectType<object>(object handle) { \
return objectType; \
}
- VK_OBJECT_TYPE_GETTER(VkBuffer, VK_OBJECT_TYPE_BUFFER)
- VK_OBJECT_TYPE_GETTER(VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
- VK_OBJECT_TYPE_GETTER(VkDescriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET)
- VK_OBJECT_TYPE_GETTER(VkPipeline, VK_OBJECT_TYPE_PIPELINE)
- VK_OBJECT_TYPE_GETTER(VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
- VK_OBJECT_TYPE_GETTER(VkQueryPool, VK_OBJECT_TYPE_QUERY_POOL)
- VK_OBJECT_TYPE_GETTER(VkSampler, VK_OBJECT_TYPE_SAMPLER)
- VK_OBJECT_TYPE_GETTER(VkShaderModule, VK_OBJECT_TYPE_SHADER_MODULE)
- VK_OBJECT_TYPE_GETTER(VkImage, VK_OBJECT_TYPE_IMAGE)
- VK_OBJECT_TYPE_GETTER(VkImageView, VK_OBJECT_TYPE_IMAGE_VIEW)
+VK_OBJECT_TYPE_GETTER(VkBuffer, VK_OBJECT_TYPE_BUFFER)
+VK_OBJECT_TYPE_GETTER(VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
+VK_OBJECT_TYPE_GETTER(VkDescriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET)
+VK_OBJECT_TYPE_GETTER(VkPipeline, VK_OBJECT_TYPE_PIPELINE)
+VK_OBJECT_TYPE_GETTER(VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
+VK_OBJECT_TYPE_GETTER(VkQueryPool, VK_OBJECT_TYPE_QUERY_POOL)
+VK_OBJECT_TYPE_GETTER(VkSampler, VK_OBJECT_TYPE_SAMPLER)
+VK_OBJECT_TYPE_GETTER(VkShaderModule, VK_OBJECT_TYPE_SHADER_MODULE)
+VK_OBJECT_TYPE_GETTER(VkImage, VK_OBJECT_TYPE_IMAGE)
+VK_OBJECT_TYPE_GETTER(VkImageView, VK_OBJECT_TYPE_IMAGE_VIEW)
#undef VK_OBJECT_TYPE_GETTER
- VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
- switch (op) {
- case wgpu::CompareFunction::Never:
- return VK_COMPARE_OP_NEVER;
- case wgpu::CompareFunction::Less:
- return VK_COMPARE_OP_LESS;
- case wgpu::CompareFunction::LessEqual:
- return VK_COMPARE_OP_LESS_OR_EQUAL;
- case wgpu::CompareFunction::Greater:
- return VK_COMPARE_OP_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return VK_COMPARE_OP_GREATER_OR_EQUAL;
- case wgpu::CompareFunction::Equal:
- return VK_COMPARE_OP_EQUAL;
- case wgpu::CompareFunction::NotEqual:
- return VK_COMPARE_OP_NOT_EQUAL;
- case wgpu::CompareFunction::Always:
- return VK_COMPARE_OP_ALWAYS;
-
- case wgpu::CompareFunction::Undefined:
- break;
- }
- UNREACHABLE();
+VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
+ switch (op) {
+ case wgpu::CompareFunction::Never:
+ return VK_COMPARE_OP_NEVER;
+ case wgpu::CompareFunction::Less:
+ return VK_COMPARE_OP_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case wgpu::CompareFunction::Greater:
+ return VK_COMPARE_OP_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case wgpu::CompareFunction::Equal:
+ return VK_COMPARE_OP_EQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return VK_COMPARE_OP_NOT_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return VK_COMPARE_OP_ALWAYS;
+
+ case wgpu::CompareFunction::Undefined:
+ break;
}
+ UNREACHABLE();
+}
+
+// Convert Dawn texture aspects to Vulkan texture aspect flags
+VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
+ VkImageAspectFlags flags = 0;
+ for (Aspect aspect : IterateEnumMask(aspects)) {
+ switch (aspect) {
+ case Aspect::Color:
+ flags |= VK_IMAGE_ASPECT_COLOR_BIT;
+ break;
+ case Aspect::Depth:
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ break;
+ case Aspect::Stencil:
+ flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ break;
+
+ case Aspect::CombinedDepthStencil:
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+ break;
+
+ case Aspect::Plane0:
+ flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
+ break;
+ case Aspect::Plane1:
+ flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
+ break;
- // Convert Dawn texture aspects to Vulkan texture aspect flags
- VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
- VkImageAspectFlags flags = 0;
- for (Aspect aspect : IterateEnumMask(aspects)) {
- switch (aspect) {
- case Aspect::Color:
- flags |= VK_IMAGE_ASPECT_COLOR_BIT;
- break;
- case Aspect::Depth:
- flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
- break;
- case Aspect::Stencil:
- flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
- break;
-
- case Aspect::CombinedDepthStencil:
- flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
- break;
-
- case Aspect::Plane0:
- flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
- break;
- case Aspect::Plane1:
- flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
- break;
-
- case Aspect::None:
- UNREACHABLE();
- }
+ case Aspect::None:
+ UNREACHABLE();
}
- return flags;
+ }
+ return flags;
+}
+
+// Vulkan SPEC requires the source/destination region specified by each element of
+// pRegions must be a region that is contained within srcImage/dstImage. Here the size of
+// the image refers to the virtual size, while Dawn validates texture copy extent with the
+// physical size, so we need to re-calculate the texture copy extent to ensure it should fit
+// in the virtual size of the subresource.
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+ Extent3D validTextureCopyExtent = copySize;
+ const TextureBase* texture = textureCopy.texture.Get();
+ Extent3D virtualSizeAtLevel =
+ texture->GetMipLevelSingleSubresourceVirtualSize(textureCopy.mipLevel);
+ ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+ ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+ if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+ }
+ if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
}
- // Vulkan SPEC requires the source/destination region specified by each element of
- // pRegions must be a region that is contained within srcImage/dstImage. Here the size of
- // the image refers to the virtual size, while Dawn validates texture copy extent with the
- // physical size, so we need to re-calculate the texture copy extent to ensure it should fit
- // in the virtual size of the subresource.
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
- Extent3D validTextureCopyExtent = copySize;
- const TextureBase* texture = textureCopy.texture.Get();
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
- ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
- ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
- if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
- }
- if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+ return validTextureCopyExtent;
+}
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ TextureDataLayout passDataLayout;
+ passDataLayout.offset = bufferCopy.offset;
+ passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
+ passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
+ return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
+}
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ const Texture* texture = ToBackend(textureCopy.texture.Get());
+
+ VkBufferImageCopy region;
+
+ region.bufferOffset = dataLayout.offset;
+ // In Vulkan the row length is in texels while it is in bytes for Dawn
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+ ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
+ region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
+ region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
+
+ region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
+ region.imageSubresource.mipLevel = textureCopy.mipLevel;
+
+ switch (textureCopy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = 0;
+ region.imageOffset.z = 0;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+
+ ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+ region.imageExtent.width = copySize.width;
+ region.imageExtent.height = 1;
+ region.imageExtent.depth = 1;
+ break;
+
+ case wgpu::TextureDimension::e2D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = 0;
+ region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
+ region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
+
+ Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+ region.imageExtent.width = imageExtent.width;
+ region.imageExtent.height = imageExtent.height;
+ region.imageExtent.depth = 1;
+ break;
}
- return validTextureCopyExtent;
+ case wgpu::TextureDimension::e3D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = textureCopy.origin.z;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+
+ ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+ region.imageExtent.width = copySize.width;
+ region.imageExtent.height = copySize.height;
+ region.imageExtent.depth = copySize.depthOrArrayLayers;
+ break;
+ }
}
- VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- TextureDataLayout passDataLayout;
- passDataLayout.offset = bufferCopy.offset;
- passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
- passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
- return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
- }
+ return region;
+}
- VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- const Texture* texture = ToBackend(textureCopy.texture.Get());
-
- VkBufferImageCopy region;
-
- region.bufferOffset = dataLayout.offset;
- // In Vulkan the row length is in texels while it is in bytes for Dawn
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
- ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
- region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
- region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
-
- region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
- region.imageSubresource.mipLevel = textureCopy.mipLevel;
-
- switch (textureCopy.texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = 0;
- region.imageOffset.z = 0;
- region.imageSubresource.baseArrayLayer = 0;
- region.imageSubresource.layerCount = 1;
-
- ASSERT(!textureCopy.texture->GetFormat().isCompressed);
- region.imageExtent.width = copySize.width;
- region.imageExtent.height = 1;
- region.imageExtent.depth = 1;
- break;
+void SetDebugNameInternal(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label) {
+ if (!objectHandle) {
+ return;
+ }
- case wgpu::TextureDimension::e2D: {
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = textureCopy.origin.y;
- region.imageOffset.z = 0;
- region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
- region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
-
- Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
- region.imageExtent.width = imageExtent.width;
- region.imageExtent.height = imageExtent.height;
- region.imageExtent.depth = 1;
- break;
- }
-
- case wgpu::TextureDimension::e3D: {
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = textureCopy.origin.y;
- region.imageOffset.z = textureCopy.origin.z;
- region.imageSubresource.baseArrayLayer = 0;
- region.imageSubresource.layerCount = 1;
-
- ASSERT(!textureCopy.texture->GetFormat().isCompressed);
- region.imageExtent.width = copySize.width;
- region.imageExtent.height = copySize.height;
- region.imageExtent.depth = copySize.depthOrArrayLayers;
- break;
- }
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ VkDebugUtilsObjectNameInfoEXT objectNameInfo;
+ objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ objectNameInfo.pNext = nullptr;
+ objectNameInfo.objectType = objectType;
+ objectNameInfo.objectHandle = objectHandle;
+
+ std::ostringstream objectNameStream;
+ // Prefix with the device's message ID so that if this label appears in a validation
+ // message it can be parsed out and the message can be associated with the right device.
+ objectNameStream << device->GetDebugPrefix() << kDeviceDebugSeparator << prefix;
+ if (!label.empty() && device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ objectNameStream << "_" << label;
}
+ std::string objectName = objectNameStream.str();
+ objectNameInfo.pObjectName = objectName.c_str();
+ device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+ }
+}
+
+std::string GetNextDeviceDebugPrefix() {
+ static uint64_t nextDeviceDebugId = 1;
+ std::ostringstream objectName;
+ objectName << kDeviceDebugPrefix << nextDeviceDebugId++;
+ return objectName.str();
+}
+
+std::string GetDeviceDebugPrefixFromDebugName(const char* debugName) {
+ if (debugName == nullptr) {
+ return {};
+ }
- return region;
+ if (strncmp(debugName, kDeviceDebugPrefix, sizeof(kDeviceDebugPrefix) - 1) != 0) {
+ return {};
}
- void SetDebugNameInternal(Device* device,
- VkObjectType objectType,
- uint64_t objectHandle,
- const char* prefix,
- std::string label) {
- if (!objectHandle) {
- return;
- }
+ const char* separator = strstr(debugName + sizeof(kDeviceDebugPrefix), kDeviceDebugSeparator);
+ if (separator == nullptr) {
+ return {};
+ }
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- VkDebugUtilsObjectNameInfoEXT objectNameInfo;
- objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
- objectNameInfo.pNext = nullptr;
- objectNameInfo.objectType = objectType;
- objectNameInfo.objectHandle = objectHandle;
-
- if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
- objectNameInfo.pObjectName = prefix;
- device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
- return;
- }
-
- std::string objectName = prefix;
- objectName += "_";
- objectName += label;
- objectNameInfo.pObjectName = objectName.c_str();
- device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
- }
+ size_t length = separator - debugName;
+ return std::string(debugName, length);
+}
+
+VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<OverridableConstantScalar>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
+ ASSERT(specializationInfo);
+ ASSERT(specializationDataEntries);
+ ASSERT(specializationMapEntries);
+
+ if (programmableStage.constants.size() == 0) {
+ return nullptr;
}
- VkSpecializationInfo* GetVkSpecializationInfo(
- const ProgrammableStage& programmableStage,
- VkSpecializationInfo* specializationInfo,
- std::vector<OverridableConstantScalar>* specializationDataEntries,
- std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
- ASSERT(specializationInfo);
- ASSERT(specializationDataEntries);
- ASSERT(specializationMapEntries);
-
- if (programmableStage.constants.size() == 0) {
- return nullptr;
- }
+ const EntryPointMetadata& entryPointMetaData =
+ programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
- const EntryPointMetadata& entryPointMetaData =
- programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
-
- for (const auto& pipelineConstant : programmableStage.constants) {
- const std::string& identifier = pipelineConstant.first;
- double value = pipelineConstant.second;
-
- // This is already validated so `identifier` must exist
- const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
-
- specializationMapEntries->push_back(
- VkSpecializationMapEntry{moduleConstant.id,
- static_cast<uint32_t>(specializationDataEntries->size() *
- sizeof(OverridableConstantScalar)),
- sizeof(OverridableConstantScalar)});
-
- OverridableConstantScalar entry{};
- switch (moduleConstant.type) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- entry.b = static_cast<int32_t>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- entry.f32 = static_cast<float>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- entry.i32 = static_cast<int32_t>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- entry.u32 = static_cast<uint32_t>(value);
- break;
- default:
- UNREACHABLE();
- }
- specializationDataEntries->push_back(entry);
- }
+ for (const auto& pipelineConstant : programmableStage.constants) {
+ const std::string& identifier = pipelineConstant.first;
+ double value = pipelineConstant.second;
- specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
- specializationInfo->pMapEntries = specializationMapEntries->data();
- specializationInfo->dataSize =
- specializationDataEntries->size() * sizeof(OverridableConstantScalar);
- specializationInfo->pData = specializationDataEntries->data();
+ // This is already validated so `identifier` must exist
+ const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
- return specializationInfo;
+ specializationMapEntries->push_back(
+ VkSpecializationMapEntry{moduleConstant.id,
+ static_cast<uint32_t>(specializationDataEntries->size() *
+ sizeof(OverridableConstantScalar)),
+ sizeof(OverridableConstantScalar)});
+
+ OverridableConstantScalar entry{};
+ switch (moduleConstant.type) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ entry.b = static_cast<int32_t>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ entry.f32 = static_cast<float>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ entry.i32 = static_cast<int32_t>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ entry.u32 = static_cast<uint32_t>(value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ specializationDataEntries->push_back(entry);
}
+ specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
+ specializationInfo->pMapEntries = specializationMapEntries->data();
+ specializationInfo->dataSize =
+ specializationDataEntries->size() * sizeof(OverridableConstantScalar);
+ specializationInfo->pData = specializationDataEntries->data();
+
+ return specializationInfo;
+}
+
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h
index 650fe6fc889..7c63b1dfcdd 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h
@@ -15,138 +15,143 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_UTILSVULKAN_H_
#define SRC_DAWN_NATIVE_VULKAN_UTILSVULKAN_H_
+#include <string>
+#include <vector>
+
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/Commands.h"
#include "dawn/native/dawn_platform.h"
namespace dawn::native {
- struct ProgrammableStage;
- union OverridableConstantScalar;
+struct ProgrammableStage;
+union OverridableConstantScalar;
} // namespace dawn::native
namespace dawn::native::vulkan {
- class Device;
+class Device;
- // A Helper type used to build a pNext chain of extension structs.
- // Usage is:
- // 1) Create instance, passing the address of the first struct in the chain. This requires
- // pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
- // of it.
- //
- // 2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
- //
- // 3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
- // with a given VkStructureType value while appending it to the chain.
- //
- // Examples:
- // VkPhysicalFeatures2 features2 = {
- // .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
- // .pNext = nullptr,
- // };
- //
- // PNextChainBuilder featuresChain(&features2);
- //
- // featuresChain.Add(&featuresExtensions.subgroupSizeControl,
- // VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+// A Helper type used to build a pNext chain of extension structs.
+// Usage is:
+// 1) Create instance, passing the address of the first struct in the chain. This requires
+// pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
+// of it.
+//
+// 2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
+//
+// 3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
+// with a given VkStructureType value while appending it to the chain.
+//
+// Examples:
+// VkPhysicalFeatures2 features2 = {
+// .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+// .pNext = nullptr,
+// };
+//
+// PNextChainBuilder featuresChain(&features2);
+//
+// featuresChain.Add(&featuresExtensions.subgroupSizeControl,
+// VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+//
+struct PNextChainBuilder {
+ // Constructor takes the address of a Vulkan structure instance, and
+ // walks its pNext chain to record the current location of its tail.
//
- struct PNextChainBuilder {
- // Constructor takes the address of a Vulkan structure instance, and
- // walks its pNext chain to record the current location of its tail.
- //
- // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
- // which is why the VkBaseOutStructure* casts below are necessary.
- template <typename VK_STRUCT_TYPE>
- explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
- : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
- while (mCurrent->pNext != nullptr) {
- mCurrent = mCurrent->pNext;
- }
- }
-
- // Add one item to the chain. |vk_struct| must be a Vulkan structure
- // that is already initialized.
- template <typename VK_STRUCT_TYPE>
- void Add(VK_STRUCT_TYPE* vkStruct) {
- // Sanity checks to ensure proper type safety.
- static_assert(
- offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
- offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
- "Argument type is not a proper Vulkan structure type");
- vkStruct->pNext = nullptr;
-
- mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+ // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
+ // which is why the VkBaseOutStructure* casts below are necessary.
+ template <typename VK_STRUCT_TYPE>
+ explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
+ : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
+ while (mCurrent->pNext != nullptr) {
mCurrent = mCurrent->pNext;
}
+ }
- // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
- template <typename VK_STRUCT_TYPE>
- void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
- vkStruct->sType = sType;
- Add(vkStruct);
- }
-
- private:
- VkBaseOutStructure* mCurrent;
- };
-
- VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
-
- VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
-
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
-
- VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
- VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
-
- // Gets the associated VkObjectType for any non-dispatchable handle
- template <class HandleType>
- VkObjectType GetVkObjectType(HandleType handle);
-
- void SetDebugNameInternal(Device* device,
- VkObjectType objectType,
- uint64_t objectHandle,
- const char* prefix,
- std::string label);
-
- // The majority of Vulkan handles are "non-dispatchable". Dawn wraps these by overriding
- // VK_DEFINE_NON_DISPATCHABLE_HANDLE to add some capabilities like making null comparisons
- // easier. In those cases we can make setting the debug name a bit easier by getting the
- // object type automatically and handling the indirection to the native handle.
- template <typename Tag, typename HandleType>
- void SetDebugName(Device* device,
- detail::VkHandle<Tag, HandleType> objectHandle,
- const char* prefix,
- std::string label = "") {
- SetDebugNameInternal(device, GetVkObjectType(objectHandle),
- reinterpret_cast<uint64_t>(objectHandle.GetHandle()), prefix, label);
+ // Add one item to the chain. |vk_struct| must be a Vulkan structure
+ // that is already initialized.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct) {
+ // Checks to ensure proper type safety.
+ static_assert(offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+ offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+ "Argument type is not a proper Vulkan structure type");
+ vkStruct->pNext = nullptr;
+
+ mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+ mCurrent = mCurrent->pNext;
}
- // Handles like VkQueue and VKDevice require a special path because they are dispatchable, so
- // they require an explicit VkObjectType and cast to a uint64_t directly rather than by getting
- // the non-dispatchable wrapper's underlying handle.
- template <typename HandleType>
- void SetDebugName(Device* device,
- VkObjectType objectType,
- HandleType objectHandle,
- const char* prefix,
- std::string label = "") {
- SetDebugNameInternal(device, objectType, reinterpret_cast<uint64_t>(objectHandle), prefix,
- label);
+ // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
+ vkStruct->sType = sType;
+ Add(vkStruct);
}
- // Returns nullptr or &specializationInfo
- // specializationInfo, specializationDataEntries, specializationMapEntries needs to
- // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
- VkSpecializationInfo* GetVkSpecializationInfo(
- const ProgrammableStage& programmableStage,
- VkSpecializationInfo* specializationInfo,
- std::vector<OverridableConstantScalar>* specializationDataEntries,
- std::vector<VkSpecializationMapEntry>* specializationMapEntries);
+ private:
+ VkBaseOutStructure* mCurrent;
+};
+
+VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
+
+VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
+
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+// Gets the associated VkObjectType for any non-dispatchable handle
+template <class HandleType>
+VkObjectType GetVkObjectType(HandleType handle);
+
+void SetDebugNameInternal(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label);
+
+// The majority of Vulkan handles are "non-dispatchable". Dawn wraps these by overriding
+// VK_DEFINE_NON_DISPATCHABLE_HANDLE to add some capabilities like making null comparisons
+// easier. In those cases we can make setting the debug name a bit easier by getting the
+// object type automatically and handling the indirection to the native handle.
+template <typename Tag, typename HandleType>
+void SetDebugName(Device* device,
+ detail::VkHandle<Tag, HandleType> objectHandle,
+ const char* prefix,
+ std::string label = "") {
+ SetDebugNameInternal(device, GetVkObjectType(objectHandle),
+ reinterpret_cast<uint64_t>(objectHandle.GetHandle()), prefix, label);
+}
+
+// Handles like VkQueue and VKDevice require a special path because they are dispatchable, so
+// they require an explicit VkObjectType and cast to a uint64_t directly rather than by getting
+// the non-dispatchable wrapper's underlying handle.
+template <typename HandleType>
+void SetDebugName(Device* device,
+ VkObjectType objectType,
+ HandleType objectHandle,
+ const char* prefix,
+ std::string label = "") {
+ SetDebugNameInternal(device, objectType, reinterpret_cast<uint64_t>(objectHandle), prefix,
+ label);
+}
+
+std::string GetNextDeviceDebugPrefix();
+std::string GetDeviceDebugPrefixFromDebugName(const char* debugName);
+
+// Returns nullptr or &specializationInfo
+// specializationInfo, specializationDataEntries, specializationMapEntries needs to
+// be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
+VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<OverridableConstantScalar>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries);
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp
index e8f630a824e..0f460abe309 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp
@@ -28,102 +28,95 @@
namespace dawn::native::vulkan {
- VkInstance GetInstance(WGPUDevice device) {
- Device* backendDevice = ToBackend(FromAPI(device));
- return backendDevice->GetVkInstance();
- }
-
- DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
- const char* pName) {
- Device* backendDevice = ToBackend(FromAPI(device));
- return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
- }
-
- // Explicitly export this function because it uses the "native" type for surfaces while the
- // header as seen in this file uses the wrapped type.
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
- Device* backendDevice = ToBackend(FromAPI(device));
- VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
-
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
- impl.textureUsage = WGPUTextureUsage_Present;
-
- return impl;
- }
-
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {
- }
-
-#if defined(DAWN_PLATFORM_LINUX)
- ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
- : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {
- }
-
- ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
- : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {
- }
-
- ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
- : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {
- }
-
- ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
- : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
- }
-#endif // DAWN_PLATFORM_LINUX
-
- WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
-#if defined(DAWN_PLATFORM_LINUX)
- switch (descriptor->GetType()) {
- case ExternalImageType::OpaqueFD:
- case ExternalImageType::DmaBuf: {
- Device* backendDevice = ToBackend(FromAPI(device));
- const ExternalImageDescriptorFD* fdDescriptor =
- static_cast<const ExternalImageDescriptorFD*>(descriptor);
-
- return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
- fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
- }
- default:
- return nullptr;
+VkInstance GetInstance(WGPUDevice device) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ return backendDevice->GetVkInstance();
+}
+
+DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
+}
+
+// Explicitly export this function because it uses the "native" type for surfaces while the
+// header as seen in this file uses the wrapped type.
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
+
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+ impl.textureUsage = WGPUTextureUsage_Present;
+
+ return impl;
+}
+
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
+
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {}
+
+#if DAWN_PLATFORM_IS(LINUX)
+ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
+ : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {}
+
+ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
+ : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {}
+
+ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
+ : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {}
+
+ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
+ : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {}
+#endif // DAWN_PLATFORM_IS(LINUX)
+
+WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
+#if DAWN_PLATFORM_IS(LINUX)
+ switch (descriptor->GetType()) {
+ case ExternalImageType::OpaqueFD:
+ case ExternalImageType::DmaBuf: {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ const ExternalImageDescriptorFD* fdDescriptor =
+ static_cast<const ExternalImageDescriptorFD*>(descriptor);
+
+ return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
+ fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
}
-#else
- return nullptr;
-#endif // DAWN_PLATFORM_LINUX
+ default:
+ return nullptr;
}
-
- bool ExportVulkanImage(WGPUTexture texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info) {
- if (texture == nullptr) {
- return false;
- }
-#if defined(DAWN_PLATFORM_LINUX)
- switch (info->GetType()) {
- case ExternalImageType::OpaqueFD:
- case ExternalImageType::DmaBuf: {
- Texture* backendTexture = ToBackend(FromAPI(texture));
- Device* device = ToBackend(backendTexture->GetDevice());
- ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
-
- return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
- &fdInfo->semaphoreHandles);
- }
- default:
- return false;
- }
#else
+ return nullptr;
+#endif // DAWN_PLATFORM_IS(LINUX)
+}
+
+bool ExportVulkanImage(WGPUTexture texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info) {
+ if (texture == nullptr) {
return false;
-#endif // DAWN_PLATFORM_LINUX
}
+#if DAWN_PLATFORM_IS(LINUX)
+ switch (info->GetType()) {
+ case ExternalImageType::OpaqueFD:
+ case ExternalImageType::DmaBuf: {
+ Texture* backendTexture = ToBackend(FromAPI(texture));
+ Device* device = ToBackend(backendTexture->GetDevice());
+ ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
+
+ return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
+ &fdInfo->semaphoreHandles);
+ }
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif // DAWN_PLATFORM_IS(LINUX)
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp
index 49416b9c1d9..3b7b37cde30 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp
@@ -18,92 +18,92 @@
namespace dawn::native::vulkan {
- const char* VkResultAsString(::VkResult result) {
- // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
- // the original VkResult enum.
- int32_t code = static_cast<int32_t>(result);
+const char* VkResultAsString(::VkResult result) {
+ // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
+ // the original VkResult enum.
+ int32_t code = static_cast<int32_t>(result);
- switch (code) {
- case VK_SUCCESS:
- return "VK_SUCCESS";
- case VK_NOT_READY:
- return "VK_NOT_READY";
- case VK_TIMEOUT:
- return "VK_TIMEOUT";
- case VK_EVENT_SET:
- return "VK_EVENT_SET";
- case VK_EVENT_RESET:
- return "VK_EVENT_RESET";
- case VK_INCOMPLETE:
- return "VK_INCOMPLETE";
- case VK_ERROR_OUT_OF_HOST_MEMORY:
- return "VK_ERROR_OUT_OF_HOST_MEMORY";
- case VK_ERROR_OUT_OF_DEVICE_MEMORY:
- return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
- case VK_ERROR_INITIALIZATION_FAILED:
- return "VK_ERROR_INITIALIZATION_FAILED";
- case VK_ERROR_DEVICE_LOST:
- return "VK_ERROR_DEVICE_LOST";
- case VK_ERROR_MEMORY_MAP_FAILED:
- return "VK_ERROR_MEMORY_MAP_FAILED";
- case VK_ERROR_LAYER_NOT_PRESENT:
- return "VK_ERROR_LAYER_NOT_PRESENT";
- case VK_ERROR_EXTENSION_NOT_PRESENT:
- return "VK_ERROR_EXTENSION_NOT_PRESENT";
- case VK_ERROR_FEATURE_NOT_PRESENT:
- return "VK_ERROR_FEATURE_NOT_PRESENT";
- case VK_ERROR_INCOMPATIBLE_DRIVER:
- return "VK_ERROR_INCOMPATIBLE_DRIVER";
- case VK_ERROR_TOO_MANY_OBJECTS:
- return "VK_ERROR_TOO_MANY_OBJECTS";
- case VK_ERROR_FORMAT_NOT_SUPPORTED:
- return "VK_ERROR_FORMAT_NOT_SUPPORTED";
- case VK_ERROR_FRAGMENTED_POOL:
- return "VK_ERROR_FRAGMENTED_POOL";
+ switch (code) {
+ case VK_SUCCESS:
+ return "VK_SUCCESS";
+ case VK_NOT_READY:
+ return "VK_NOT_READY";
+ case VK_TIMEOUT:
+ return "VK_TIMEOUT";
+ case VK_EVENT_SET:
+ return "VK_EVENT_SET";
+ case VK_EVENT_RESET:
+ return "VK_EVENT_RESET";
+ case VK_INCOMPLETE:
+ return "VK_INCOMPLETE";
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ case VK_ERROR_INITIALIZATION_FAILED:
+ return "VK_ERROR_INITIALIZATION_FAILED";
+ case VK_ERROR_DEVICE_LOST:
+ return "VK_ERROR_DEVICE_LOST";
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ return "VK_ERROR_MEMORY_MAP_FAILED";
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ return "VK_ERROR_LAYER_NOT_PRESENT";
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ return "VK_ERROR_EXTENSION_NOT_PRESENT";
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return "VK_ERROR_FEATURE_NOT_PRESENT";
+ case VK_ERROR_INCOMPATIBLE_DRIVER:
+ return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return "VK_ERROR_TOO_MANY_OBJECTS";
+ case VK_ERROR_FORMAT_NOT_SUPPORTED:
+ return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+ case VK_ERROR_FRAGMENTED_POOL:
+ return "VK_ERROR_FRAGMENTED_POOL";
- case VK_ERROR_SURFACE_LOST_KHR:
- return "VK_ERROR_SURFACE_LOST_KHR";
- case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
- return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+ case VK_ERROR_SURFACE_LOST_KHR:
+ return "VK_ERROR_SURFACE_LOST_KHR";
+ case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+ return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
- case VK_FAKE_DEVICE_OOM_FOR_TESTING:
- return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
- case VK_FAKE_ERROR_FOR_TESTING:
- return "VK_FAKE_ERROR_FOR_TESTING";
- default:
- return "<Unknown VkResult>";
- }
+ case VK_FAKE_DEVICE_OOM_FOR_TESTING:
+ return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
+ case VK_FAKE_ERROR_FOR_TESTING:
+ return "VK_FAKE_ERROR_FOR_TESTING";
+ default:
+ return "<Unknown VkResult>";
}
+}
- MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
- if (DAWN_LIKELY(result == VK_SUCCESS)) {
- return {};
- }
+MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
+ if (DAWN_LIKELY(result == VK_SUCCESS)) {
+ return {};
+ }
- std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+ std::string message = std::string(context) + " failed with " + VkResultAsString(result);
- if (result == VK_ERROR_DEVICE_LOST) {
- return DAWN_DEVICE_LOST_ERROR(message);
- } else {
- return DAWN_INTERNAL_ERROR(message);
- }
+ if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
}
+}
- MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
- if (DAWN_LIKELY(result == VK_SUCCESS)) {
- return {};
- }
+MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
+ if (DAWN_LIKELY(result == VK_SUCCESS)) {
+ return {};
+ }
- std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+ std::string message = std::string(context) + " failed with " + VkResultAsString(result);
- if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
- result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
- return DAWN_OUT_OF_MEMORY_ERROR(message);
- } else if (result == VK_ERROR_DEVICE_LOST) {
- return DAWN_DEVICE_LOST_ERROR(message);
- } else {
- return DAWN_INTERNAL_ERROR(message);
- }
+ if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
+ result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+ return DAWN_OUT_OF_MEMORY_ERROR(message);
+ } else if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
}
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h
index 90d1242b4dc..0651feea67c 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h
@@ -23,11 +23,11 @@ constexpr VkResult VK_FAKE_DEVICE_OOM_FOR_TESTING = static_cast<VkResult>(VK_RES
namespace dawn::native::vulkan {
- // Returns a string version of the result.
- const char* VkResultAsString(::VkResult result);
+// Returns a string version of the result.
+const char* VkResultAsString(::VkResult result);
- MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
- MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
+MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
+MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
// Returns a success only if result if VK_SUCCESS, an error with the context and stringified
// result value instead. Can be used like this:
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp
index 3f54e546fc9..439e95d1291 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp
@@ -14,323 +14,325 @@
#include "dawn/native/vulkan/VulkanExtensions.h"
-#include "dawn/common/Assert.h"
-#include "dawn/common/vulkan_platform.h"
-
#include <array>
#include <limits>
-namespace dawn::native::vulkan {
+#include "dawn/common/Assert.h"
+#include "dawn/common/vulkan_platform.h"
- static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
- static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
- static constexpr uint32_t VulkanVersion_1_3 = VK_MAKE_VERSION(1, 3, 0);
- static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
-
- // A static array for InstanceExtInfo that can be indexed with InstanceExts.
- // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
- // assert will fire if it isn't in the correct order.
- static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
- static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
- //
- {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
- VulkanVersion_1_1},
- {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
- VulkanVersion_1_1},
- {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
- VulkanVersion_1_1},
-
- {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
- {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
- {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
- {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
- {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
- {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
- {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
- {InstanceExt::AndroidSurface, "VK_KHR_android_surface", NeverPromoted},
-
- {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
- {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
- //
- }};
-
- const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
- uint32_t index = static_cast<uint32_t>(ext);
- ASSERT(index < sInstanceExtInfos.size());
- ASSERT(sInstanceExtInfos[index].index == ext);
- return sInstanceExtInfos[index];
- }
+namespace dawn::native::vulkan {
- std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
- std::unordered_map<std::string, InstanceExt> result;
- for (const InstanceExtInfo& info : sInstanceExtInfos) {
- result[info.name] = info.index;
- }
- return result;
+static constexpr uint32_t VulkanVersion_1_1 = VK_API_VERSION_1_1;
+static constexpr uint32_t VulkanVersion_1_2 = VK_API_VERSION_1_2;
+static constexpr uint32_t VulkanVersion_1_3 = VK_API_VERSION_1_3;
+static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
+
+// A static array for InstanceExtInfo that can be indexed with InstanceExts.
+// GetInstanceExtInfo checks that "index" matches the index used to access this array so an
+// assert will fire if it isn't in the correct order.
+static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
+static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
+ //
+ {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+
+ {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
+ {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
+ {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
+ {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
+ {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
+ {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
+ {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
+ {InstanceExt::AndroidSurface, "VK_KHR_android_surface", NeverPromoted},
+
+ {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
+ {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
+ //
+}};
+
+const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sInstanceExtInfos.size());
+ ASSERT(sInstanceExtInfos[index].index == ext);
+ return sInstanceExtInfos[index];
+}
+
+std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
+ std::unordered_map<std::string, InstanceExt> result;
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ result[info.name] = info.index;
}
-
- InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
- // We need to check that all transitive dependencies of extensions are advertised.
- // To do that in a single pass and no data structures, the extensions are topologically
- // sorted in the definition of InstanceExt.
- // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
- // dependency check will first assert all its dependents have been visited.
- InstanceExtSet visitedSet;
- InstanceExtSet trimmedSet;
-
- auto HasDep = [&](InstanceExt ext) -> bool {
- ASSERT(visitedSet[ext]);
- return trimmedSet[ext];
- };
-
- for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
- InstanceExt ext = static_cast<InstanceExt>(i);
-
- bool hasDependencies = false;
- switch (ext) {
- case InstanceExt::GetPhysicalDeviceProperties2:
- case InstanceExt::Surface:
- case InstanceExt::DebugUtils:
- case InstanceExt::ValidationFeatures:
- hasDependencies = true;
- break;
-
- case InstanceExt::ExternalMemoryCapabilities:
- case InstanceExt::ExternalSemaphoreCapabilities:
- hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
- break;
-
- case InstanceExt::AndroidSurface:
- case InstanceExt::FuchsiaImagePipeSurface:
- case InstanceExt::MetalSurface:
- case InstanceExt::WaylandSurface:
- case InstanceExt::Win32Surface:
- case InstanceExt::XcbSurface:
- case InstanceExt::XlibSurface:
- hasDependencies = HasDep(InstanceExt::Surface);
- break;
-
- case InstanceExt::EnumCount:
- UNREACHABLE();
- }
-
- trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
- visitedSet.set(ext, true);
+ return result;
+}
+
+InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
+ // We need to check that all transitive dependencies of extensions are advertised.
+ // To do that in a single pass and no data structures, the extensions are topologically
+ // sorted in the definition of InstanceExt.
+ // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
+ // dependency check will first assert all its dependents have been visited.
+ InstanceExtSet visitedSet;
+ InstanceExtSet trimmedSet;
+
+ auto HasDep = [&](InstanceExt ext) -> bool {
+ ASSERT(visitedSet[ext]);
+ return trimmedSet[ext];
+ };
+
+ for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
+ InstanceExt ext = static_cast<InstanceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ case InstanceExt::GetPhysicalDeviceProperties2:
+ case InstanceExt::Surface:
+ case InstanceExt::DebugUtils:
+ case InstanceExt::ValidationFeatures:
+ hasDependencies = true;
+ break;
+
+ case InstanceExt::ExternalMemoryCapabilities:
+ case InstanceExt::ExternalSemaphoreCapabilities:
+ hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case InstanceExt::AndroidSurface:
+ case InstanceExt::FuchsiaImagePipeSurface:
+ case InstanceExt::MetalSurface:
+ case InstanceExt::WaylandSurface:
+ case InstanceExt::Win32Surface:
+ case InstanceExt::XcbSurface:
+ case InstanceExt::XlibSurface:
+ hasDependencies = HasDep(InstanceExt::Surface);
+ break;
+
+ case InstanceExt::EnumCount:
+ UNREACHABLE();
}
- return trimmedSet;
+ trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+ visitedSet.set(ext, true);
}
- void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
- for (const InstanceExtInfo& info : sInstanceExtInfos) {
- if (info.versionPromoted <= version) {
- extensions->set(info.index, true);
- }
- }
- }
-
- static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
- static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
- //
- {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
- {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
- {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
- VulkanVersion_1_1},
- {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
- VulkanVersion_1_1},
- {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
- {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
- VulkanVersion_1_1},
- {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
- VulkanVersion_1_1},
- {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
- {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
- {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
- {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
-
- {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
- {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
- {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
-
- {DeviceExt::ZeroInitializeWorkgroupMemory, "VK_KHR_zero_initialize_workgroup_memory",
- VulkanVersion_1_3},
-
- {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
- {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
- {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
- {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
- {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
-
- {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
- {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
- {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
- //
- }};
-
- const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
- uint32_t index = static_cast<uint32_t>(ext);
- ASSERT(index < sDeviceExtInfos.size());
- ASSERT(sDeviceExtInfos[index].index == ext);
- return sDeviceExtInfos[index];
- }
+ return trimmedSet;
+}
- std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
- std::unordered_map<std::string, DeviceExt> result;
- for (const DeviceExtInfo& info : sDeviceExtInfos) {
- result[info.name] = info.index;
+void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->set(info.index, true);
}
- return result;
}
-
- DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
- const InstanceExtSet& instanceExts,
- uint32_t icdVersion) {
- // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
- // an explanation of what happens.
- DeviceExtSet visitedSet;
- DeviceExtSet trimmedSet;
-
- auto HasDep = [&](DeviceExt ext) -> bool {
- ASSERT(visitedSet[ext]);
- return trimmedSet[ext];
- };
-
- for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
- DeviceExt ext = static_cast<DeviceExt>(i);
-
- bool hasDependencies = false;
- switch (ext) {
- // Happy extensions don't need anybody else!
- case DeviceExt::BindMemory2:
- case DeviceExt::GetMemoryRequirements2:
- case DeviceExt::Maintenance1:
- case DeviceExt::ImageFormatList:
- case DeviceExt::StorageBufferStorageClass:
- hasDependencies = true;
- break;
-
- // Physical device extensions technically don't require the instance to support
- // them but VulkanFunctions only loads the function pointers if the instance
- // advertises the extension. So if we didn't have this check, we'd risk a calling
- // a nullptr.
- case DeviceExt::GetPhysicalDeviceProperties2:
- hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
- break;
- case DeviceExt::ExternalMemoryCapabilities:
- hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
- case DeviceExt::ExternalSemaphoreCapabilities:
- hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::ImageDrmFormatModifier:
- hasDependencies = HasDep(DeviceExt::BindMemory2) &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
- HasDep(DeviceExt::ImageFormatList) &&
- HasDep(DeviceExt::SamplerYCbCrConversion);
- break;
-
- case DeviceExt::Swapchain:
- hasDependencies = instanceExts[InstanceExt::Surface];
- break;
-
- case DeviceExt::SamplerYCbCrConversion:
- hasDependencies = HasDep(DeviceExt::Maintenance1) &&
- HasDep(DeviceExt::BindMemory2) &&
- HasDep(DeviceExt::GetMemoryRequirements2) &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::DriverProperties:
- case DeviceExt::ShaderFloat16Int8:
- hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::ExternalMemory:
- hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
- break;
-
- case DeviceExt::ExternalSemaphore:
- hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
- break;
-
- case DeviceExt::ExternalMemoryFD:
- case DeviceExt::ExternalMemoryZirconHandle:
- hasDependencies = HasDep(DeviceExt::ExternalMemory);
- break;
-
- case DeviceExt::ExternalMemoryDmaBuf:
- hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
- break;
-
- case DeviceExt::ExternalSemaphoreFD:
- case DeviceExt::ExternalSemaphoreZirconHandle:
- hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
- break;
-
- case DeviceExt::_16BitStorage:
- hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
- HasDep(DeviceExt::StorageBufferStorageClass);
- break;
-
- case DeviceExt::SubgroupSizeControl:
- // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
- // don't need to check for it as it also requires Vulkan 1.1 in which
- // VK_KHR_get_physical_device_properties2 was promoted.
- hasDependencies = icdVersion >= VulkanVersion_1_1;
- break;
-
- case DeviceExt::ZeroInitializeWorkgroupMemory:
- hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::EnumCount:
- UNREACHABLE();
- }
-
- trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
- visitedSet.set(ext, true);
- }
-
- return trimmedSet;
+}
+
+static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
+static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
+ //
+ {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
+ {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
+ {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
+ VulkanVersion_1_1},
+ {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
+ {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
+ {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
+ {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
+
+ {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
+ {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
+ {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
+
+ {DeviceExt::ShaderIntegerDotProduct, "VK_KHR_shader_integer_dot_product", VulkanVersion_1_3},
+ {DeviceExt::ZeroInitializeWorkgroupMemory, "VK_KHR_zero_initialize_workgroup_memory",
+ VulkanVersion_1_3},
+
+ {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
+ {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
+ {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
+
+ {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
+ {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
+ {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
+ //
+}};
+
+const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sDeviceExtInfos.size());
+ ASSERT(sDeviceExtInfos[index].index == ext);
+ return sDeviceExtInfos[index];
+}
+
+std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
+ std::unordered_map<std::string, DeviceExt> result;
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ result[info.name] = info.index;
}
-
- void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
- for (const DeviceExtInfo& info : sDeviceExtInfos) {
- if (info.versionPromoted <= version) {
- extensions->set(info.index, true);
- }
+ return result;
+}
+
+DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion) {
+ // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
+ // an explanation of what happens.
+ DeviceExtSet visitedSet;
+ DeviceExtSet trimmedSet;
+
+ auto HasDep = [&](DeviceExt ext) -> bool {
+ ASSERT(visitedSet[ext]);
+ return trimmedSet[ext];
+ };
+
+ for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
+ DeviceExt ext = static_cast<DeviceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ // Happy extensions don't need anybody else!
+ case DeviceExt::BindMemory2:
+ case DeviceExt::GetMemoryRequirements2:
+ case DeviceExt::Maintenance1:
+ case DeviceExt::ImageFormatList:
+ case DeviceExt::StorageBufferStorageClass:
+ hasDependencies = true;
+ break;
+
+ // Physical device extensions technically don't require the instance to support
+ // them but VulkanFunctions only loads the function pointers if the instance
+ // advertises the extension. So if we didn't have this check, we'd risk a calling
+ // a nullptr.
+ case DeviceExt::GetPhysicalDeviceProperties2:
+ hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
+ break;
+ case DeviceExt::ExternalMemoryCapabilities:
+ hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+ case DeviceExt::ExternalSemaphoreCapabilities:
+ hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ImageDrmFormatModifier:
+ hasDependencies = HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::ImageFormatList) &&
+ HasDep(DeviceExt::SamplerYCbCrConversion);
+ break;
+
+ case DeviceExt::Swapchain:
+ hasDependencies = instanceExts[InstanceExt::Surface];
+ break;
+
+ case DeviceExt::SamplerYCbCrConversion:
+ hasDependencies = HasDep(DeviceExt::Maintenance1) &&
+ HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetMemoryRequirements2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::DriverProperties:
+ case DeviceExt::ShaderFloat16Int8:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ExternalMemory:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
+ break;
+
+ case DeviceExt::ExternalSemaphore:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
+ break;
+
+ case DeviceExt::ExternalMemoryFD:
+ case DeviceExt::ExternalMemoryZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalMemory);
+ break;
+
+ case DeviceExt::ExternalMemoryDmaBuf:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
+ break;
+
+ case DeviceExt::ExternalSemaphoreFD:
+ case DeviceExt::ExternalSemaphoreZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
+ break;
+
+ case DeviceExt::_16BitStorage:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::StorageBufferStorageClass);
+ break;
+
+ case DeviceExt::SubgroupSizeControl:
+ // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
+ // don't need to check for it as it also requires Vulkan 1.1 in which
+ // VK_KHR_get_physical_device_properties2 was promoted.
+ hasDependencies = icdVersion >= VulkanVersion_1_1;
+ break;
+
+ case DeviceExt::ShaderIntegerDotProduct:
+ case DeviceExt::ZeroInitializeWorkgroupMemory:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::EnumCount:
+ UNREACHABLE();
}
- }
- // A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
- // GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
- // assert will fire if it isn't in the correct order.
- static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
- static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
- //
- {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
- {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
- {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
- {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
- //
- }};
-
- const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
- uint32_t index = static_cast<uint32_t>(layer);
- ASSERT(index < sVulkanLayerInfos.size());
- ASSERT(sVulkanLayerInfos[index].layer == layer);
- return sVulkanLayerInfos[index];
+ trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+ visitedSet.set(ext, true);
}
- std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
- std::unordered_map<std::string, VulkanLayer> result;
- for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
- result[info.name] = info.layer;
+ return trimmedSet;
+}
+
+void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->set(info.index, true);
}
- return result;
}
+}
+
+// A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
+// GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
+// assert will fire if it isn't in the correct order.
+static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
+static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
+ //
+ {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
+ {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
+ {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
+ {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
+ //
+}};
+
+const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
+ uint32_t index = static_cast<uint32_t>(layer);
+ ASSERT(index < sVulkanLayerInfos.size());
+ ASSERT(sVulkanLayerInfos[index].layer == layer);
+ return sVulkanLayerInfos[index];
+}
+
+std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
+ std::unordered_map<std::string, VulkanLayer> result;
+ for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
+ result[info.name] = info.layer;
+ }
+ return result;
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h
index 1aea2bc5700..6912528404e 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h
@@ -15,151 +15,153 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_VULKANEXTENSIONS_H_
#define SRC_DAWN_NATIVE_VULKAN_VULKANEXTENSIONS_H_
-#include "dawn/common/ityp_bitset.h"
-
+#include <string>
#include <unordered_map>
+#include "dawn/common/ityp_bitset.h"
+
namespace dawn::native::vulkan {
- // The list of known instance extensions. They must be in dependency order (this is checked
- // inside EnsureDependencies)
- enum class InstanceExt {
- // Promoted to 1.1
- GetPhysicalDeviceProperties2,
- ExternalMemoryCapabilities,
- ExternalSemaphoreCapabilities,
-
- // Surface extensions
- Surface,
- FuchsiaImagePipeSurface,
- MetalSurface,
- WaylandSurface,
- Win32Surface,
- XcbSurface,
- XlibSurface,
- AndroidSurface,
-
- // Others
- DebugUtils,
- ValidationFeatures,
-
- EnumCount,
- };
-
- // A bitset that is indexed with InstanceExt.
- using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
-
- // Information about a known instance extension.
- struct InstanceExtInfo {
- InstanceExt index;
- const char* name;
- // The version in which this extension was promoted as built with VK_MAKE_VERSION,
- // or NeverPromoted if it was never promoted.
- uint32_t versionPromoted;
- };
-
- // Returns the information about a known InstanceExt
- const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
- // Returns a map that maps a Vulkan extension name to its InstanceExt.
- std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
-
- // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
- void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
- // From a set of extensions advertised as supported by the instance (or promoted), remove all
- // extensions that don't have all their transitive dependencies in advertisedExts.
- InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
-
- // The list of known device extensions. They must be in dependency order (this is checked
- // inside EnsureDependencies)
- enum class DeviceExt {
- // Promoted to 1.1
- BindMemory2,
- Maintenance1,
- StorageBufferStorageClass,
- GetPhysicalDeviceProperties2,
- GetMemoryRequirements2,
- ExternalMemoryCapabilities,
- ExternalSemaphoreCapabilities,
- ExternalMemory,
- ExternalSemaphore,
- _16BitStorage,
- SamplerYCbCrConversion,
-
- // Promoted to 1.2
- DriverProperties,
- ImageFormatList,
- ShaderFloat16Int8,
-
- // Promoted to 1.3
- ZeroInitializeWorkgroupMemory,
-
- // External* extensions
- ExternalMemoryFD,
- ExternalMemoryDmaBuf,
- ExternalMemoryZirconHandle,
- ExternalSemaphoreFD,
- ExternalSemaphoreZirconHandle,
-
- // Others
- ImageDrmFormatModifier,
- Swapchain,
- SubgroupSizeControl,
-
- EnumCount,
- };
-
- // A bitset that is indexed with DeviceExt.
- using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
-
- // Information about a known device extension.
- struct DeviceExtInfo {
- DeviceExt index;
- const char* name;
- // The version in which this extension was promoted as built with VK_MAKE_VERSION,
- // or NeverPromoted if it was never promoted.
- uint32_t versionPromoted;
- };
-
- // Returns the information about a known DeviceExt
- const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
- // Returns a map that maps a Vulkan extension name to its DeviceExt.
- std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
-
- // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
- void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
- // From a set of extensions advertised as supported by the device (or promoted), remove all
- // extensions that don't have all their transitive dependencies in advertisedExts or in
- // instanceExts.
- DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
- const InstanceExtSet& instanceExts,
- uint32_t icdVersion);
-
- // The list of all known Vulkan layers.
- enum class VulkanLayer {
- Validation,
- LunargVkTrace,
- RenderDocCapture,
-
- // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
- // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
- FuchsiaImagePipeSwapchain,
-
- EnumCount,
- };
-
- // A bitset that is indexed with VulkanLayer.
- using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
-
- // Information about a known layer
- struct VulkanLayerInfo {
- VulkanLayer layer;
- const char* name;
- };
-
- // Returns the information about a known VulkanLayer
- const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
- // Returns a map that maps a Vulkan layer name to its VulkanLayer.
- std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
+// The list of known instance extensions. They must be in dependency order (this is checked
+// inside EnsureDependencies)
+enum class InstanceExt {
+ // Promoted to 1.1
+ GetPhysicalDeviceProperties2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+
+ // Surface extensions
+ Surface,
+ FuchsiaImagePipeSurface,
+ MetalSurface,
+ WaylandSurface,
+ Win32Surface,
+ XcbSurface,
+ XlibSurface,
+ AndroidSurface,
+
+ // Others
+ DebugUtils,
+ ValidationFeatures,
+
+ EnumCount,
+};
+
+// A bitset that is indexed with InstanceExt.
+using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
+
+// Information about a known instance extension.
+struct InstanceExtInfo {
+ InstanceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_API_VERSION_1_x,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+};
+
+// Returns the information about a known InstanceExt
+const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
+// Returns a map that maps a Vulkan extension name to its InstanceExt.
+std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
+
+// Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
+// From a set of extensions advertised as supported by the instance (or promoted), remove all
+// extensions that don't have all their transitive dependencies in advertisedExts.
+InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
+
+// The list of known device extensions. They must be in dependency order (this is checked
+// inside EnsureDependencies)
+enum class DeviceExt {
+ // Promoted to 1.1
+ BindMemory2,
+ Maintenance1,
+ StorageBufferStorageClass,
+ GetPhysicalDeviceProperties2,
+ GetMemoryRequirements2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+ ExternalMemory,
+ ExternalSemaphore,
+ _16BitStorage,
+ SamplerYCbCrConversion,
+
+ // Promoted to 1.2
+ DriverProperties,
+ ImageFormatList,
+ ShaderFloat16Int8,
+
+ // Promoted to 1.3
+ ShaderIntegerDotProduct,
+ ZeroInitializeWorkgroupMemory,
+
+ // External* extensions
+ ExternalMemoryFD,
+ ExternalMemoryDmaBuf,
+ ExternalMemoryZirconHandle,
+ ExternalSemaphoreFD,
+ ExternalSemaphoreZirconHandle,
+
+ // Others
+ ImageDrmFormatModifier,
+ Swapchain,
+ SubgroupSizeControl,
+
+ EnumCount,
+};
+
+// A bitset that is indexed with DeviceExt.
+using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
+
+// Information about a known device extension.
+struct DeviceExtInfo {
+ DeviceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_API_VERSION_1_x,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+};
+
+// Returns the information about a known DeviceExt
+const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
+// Returns a map that maps a Vulkan extension name to its DeviceExt.
+std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
+
+// Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
+// From a set of extensions advertised as supported by the device (or promoted), remove all
+// extensions that don't have all their transitive dependencies in advertisedExts or in
+// instanceExts.
+DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion);
+
+// The list of all known Vulkan layers.
+enum class VulkanLayer {
+ Validation,
+ LunargVkTrace,
+ RenderDocCapture,
+
+ // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
+ // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
+ FuchsiaImagePipeSwapchain,
+
+ EnumCount,
+};
+
+// A bitset that is indexed with VulkanLayer.
+using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
+
+// Information about a known layer
+struct VulkanLayerInfo {
+ VulkanLayer layer;
+ const char* name;
+};
+
+// Returns the information about a known VulkanLayer
+const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
+// Returns a map that maps a Vulkan layer name to its VulkanLayer.
+std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp
index 48e970994a4..1bb163727b3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp
@@ -14,325 +14,379 @@
#include "dawn/native/vulkan/VulkanFunctions.h"
+#include <string>
+#include <utility>
+
#include "dawn/common/DynamicLib.h"
#include "dawn/native/vulkan/VulkanInfo.h"
namespace dawn::native::vulkan {
-#define GET_GLOBAL_PROC(name) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(nullptr, "vk" #name)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
- } \
- } while (0)
+namespace {
+
+#if DAWN_NO_SANITIZE_VK_FN
+
+template <typename F>
+struct AsVkNoSanitizeFn;
+
+// SwiftShader does not export function pointer type information.
+// So, when fuzzing with UBSAN, fuzzers break whenever calling
+// a vk* function since it thinks the type of the function pointer
+// does not match. Context: crbug.com/1296934.
+
+// Workaround this problem by proxying through a std::function
+// in UBSAN builds. The std::function delegates to a Call method
+// which does the same cast of the function pointer type, however
+// the Call method is tagged with
+// `__attribute__((no_sanitize("function")))` to silence the error.
+template <typename R, typename... Args>
+struct AsVkNoSanitizeFn<R(VKAPI_PTR*)(Args...)> {
+ auto operator()(void(VKAPI_PTR* addr)()) {
+ return [addr](Args&&... args) -> R { return Call(addr, std::forward<Args>(args)...); };
+ }
+
+ private:
+ __attribute__((no_sanitize("function"))) static R Call(void(VKAPI_PTR* addr)(),
+ Args&&... args) {
+ return reinterpret_cast<R(VKAPI_PTR*)(Args...)>(addr)(std::forward<Args>(args)...);
+ }
+};
+template <typename F>
+auto AsVkFn(void(VKAPI_PTR* addr)()) {
+ return AsVkNoSanitizeFn<F>{}(addr);
+}
+
+#else
+
+template <typename F>
+F AsVkFn(void(VKAPI_PTR* addr)()) {
+ return reinterpret_cast<F>(addr);
+}
- MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
- if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
- return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
- }
+#endif
- GET_GLOBAL_PROC(CreateInstance);
- GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
- GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
+} // anonymous namespace
- // Is not available in Vulkan 1.0, so allow nullptr
- EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
- GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+#define GET_GLOBAL_PROC(name) \
+ do { \
+ name = AsVkFn<PFN_vk##name>(GetInstanceProcAddr(nullptr, "vk" #name)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
+ } \
+ } while (0)
- return {};
+MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
+ if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
+ return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
}
-#define GET_INSTANCE_PROC_BASE(name, procName) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName); \
- } \
+ GET_GLOBAL_PROC(CreateInstance);
+ GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
+ GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
+
+ // Is not available in Vulkan 1.0, so allow nullptr
+ EnumerateInstanceVersion = AsVkFn<PFN_vkEnumerateInstanceVersion>(
+ GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+
+ return {};
+}
+
+#define GET_INSTANCE_PROC_BASE(name, procName) \
+ do { \
+ name = AsVkFn<PFN_vk##name>(GetInstanceProcAddr(instance, "vk" #procName)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName); \
+ } \
} while (0)
#define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
#define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
- MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
- const VulkanGlobalInfo& globalInfo) {
- // Load this proc first so that we can destroy the instance even if some other
- // GET_INSTANCE_PROC fails
- GET_INSTANCE_PROC(DestroyInstance);
-
- GET_INSTANCE_PROC(CreateDevice);
- GET_INSTANCE_PROC(DestroyDevice);
- GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
- GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
- GET_INSTANCE_PROC(EnumeratePhysicalDevices);
- GET_INSTANCE_PROC(GetDeviceProcAddr);
- GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
- GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
-
- if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
- GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
- GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
- GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
- GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
- GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
- }
-
- // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
- // support the vendor entrypoint in GetProcAddress.
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
- } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
- }
-
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
- } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
- }
-
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
- GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
- } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
- }
-
- if (globalInfo.HasExt(InstanceExt::Surface)) {
- GET_INSTANCE_PROC(DestroySurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
- }
+MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
+ const VulkanGlobalInfo& globalInfo) {
+ // Load this proc first so that we can destroy the instance even if some other
+ // GET_INSTANCE_PROC fails
+ GET_INSTANCE_PROC(DestroyInstance);
+
+ GET_INSTANCE_PROC(CreateDevice);
+ GET_INSTANCE_PROC(DestroyDevice);
+ GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
+ GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
+ GET_INSTANCE_PROC(EnumeratePhysicalDevices);
+ GET_INSTANCE_PROC(GetDeviceProcAddr);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+
+ if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
+ GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
+ GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
+ GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
+ GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
+ GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
+ }
+
+ // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
+ // support the vendor entrypoint in GetProcAddress.
+ if (globalInfo.apiVersion >= VK_API_VERSION_1_1) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
+ } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
+ }
+
+ if (globalInfo.apiVersion >= VK_API_VERSION_1_1) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+ } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
+ }
+
+ if (globalInfo.apiVersion >= VK_API_VERSION_1_1) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
+ } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
+ }
+
+ if (globalInfo.HasExt(InstanceExt::Surface)) {
+ GET_INSTANCE_PROC(DestroySurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+ }
#if defined(VK_USE_PLATFORM_FUCHSIA)
- if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
- GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
- }
+ if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
+ GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
+ }
#endif // defined(VK_USE_PLATFORM_FUCHSIA)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
- GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
- }
+ if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
+ GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
+ }
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_PLATFORM_WINDOWS)
- if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
- GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
- }
-#endif // defined(DAWN_PLATFORM_WINDOWS)
+#if defined(DAWN_USE_WAYLAND)
+ if (globalInfo.HasExt(InstanceExt::WaylandSurface)) {
+ GET_INSTANCE_PROC(CreateWaylandSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceWaylandPresentationSupportKHR);
+ }
+#endif // defined(DAWN_USE_WAYLAND)
-#if defined(DAWN_PLATFORM_ANDROID)
- if (globalInfo.HasExt(InstanceExt::AndroidSurface)) {
- GET_INSTANCE_PROC(CreateAndroidSurfaceKHR);
- }
-#endif // defined(DAWN_PLATFORM_ANDROID)
+#if DAWN_PLATFORM_IS(WINDOWS)
+ if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
+ GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
+ }
+#endif // DAWN_PLATFORM_IS(WINDOWS)
+
+#if DAWN_PLATFORM_IS(ANDROID)
+ if (globalInfo.HasExt(InstanceExt::AndroidSurface)) {
+ GET_INSTANCE_PROC(CreateAndroidSurfaceKHR);
+ }
+#endif // DAWN_PLATFORM_IS(ANDROID)
#if defined(DAWN_USE_X11)
- if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
- GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
- }
- if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
- GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
- }
+ if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
+ GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
+ }
+ if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
+ GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
+ }
#endif // defined(DAWN_USE_X11)
- return {};
+ return {};
+}
+
+#define GET_DEVICE_PROC(name) \
+ do { \
+ name = AsVkFn<PFN_vk##name>(GetDeviceProcAddr(device, "vk" #name)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
+ } \
+ } while (0)
+
+MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo) {
+ GET_DEVICE_PROC(AllocateCommandBuffers);
+ GET_DEVICE_PROC(AllocateDescriptorSets);
+ GET_DEVICE_PROC(AllocateMemory);
+ GET_DEVICE_PROC(BeginCommandBuffer);
+ GET_DEVICE_PROC(BindBufferMemory);
+ GET_DEVICE_PROC(BindImageMemory);
+ GET_DEVICE_PROC(CmdBeginQuery);
+ GET_DEVICE_PROC(CmdBeginRenderPass);
+ GET_DEVICE_PROC(CmdBindDescriptorSets);
+ GET_DEVICE_PROC(CmdBindIndexBuffer);
+ GET_DEVICE_PROC(CmdBindPipeline);
+ GET_DEVICE_PROC(CmdBindVertexBuffers);
+ GET_DEVICE_PROC(CmdBlitImage);
+ GET_DEVICE_PROC(CmdClearAttachments);
+ GET_DEVICE_PROC(CmdClearColorImage);
+ GET_DEVICE_PROC(CmdClearDepthStencilImage);
+ GET_DEVICE_PROC(CmdCopyBuffer);
+ GET_DEVICE_PROC(CmdCopyBufferToImage);
+ GET_DEVICE_PROC(CmdCopyImage);
+ GET_DEVICE_PROC(CmdCopyImageToBuffer);
+ GET_DEVICE_PROC(CmdCopyQueryPoolResults);
+ GET_DEVICE_PROC(CmdDispatch);
+ GET_DEVICE_PROC(CmdDispatchIndirect);
+ GET_DEVICE_PROC(CmdDraw);
+ GET_DEVICE_PROC(CmdDrawIndexed);
+ GET_DEVICE_PROC(CmdDrawIndexedIndirect);
+ GET_DEVICE_PROC(CmdDrawIndirect);
+ GET_DEVICE_PROC(CmdEndQuery);
+ GET_DEVICE_PROC(CmdEndRenderPass);
+ GET_DEVICE_PROC(CmdExecuteCommands);
+ GET_DEVICE_PROC(CmdFillBuffer);
+ GET_DEVICE_PROC(CmdNextSubpass);
+ GET_DEVICE_PROC(CmdPipelineBarrier);
+ GET_DEVICE_PROC(CmdPushConstants);
+ GET_DEVICE_PROC(CmdResetEvent);
+ GET_DEVICE_PROC(CmdResetQueryPool);
+ GET_DEVICE_PROC(CmdResolveImage);
+ GET_DEVICE_PROC(CmdSetBlendConstants);
+ GET_DEVICE_PROC(CmdSetDepthBias);
+ GET_DEVICE_PROC(CmdSetDepthBounds);
+ GET_DEVICE_PROC(CmdSetEvent);
+ GET_DEVICE_PROC(CmdSetLineWidth);
+ GET_DEVICE_PROC(CmdSetScissor);
+ GET_DEVICE_PROC(CmdSetStencilCompareMask);
+ GET_DEVICE_PROC(CmdSetStencilReference);
+ GET_DEVICE_PROC(CmdSetStencilWriteMask);
+ GET_DEVICE_PROC(CmdSetViewport);
+ GET_DEVICE_PROC(CmdUpdateBuffer);
+ GET_DEVICE_PROC(CmdWaitEvents);
+ GET_DEVICE_PROC(CmdWriteTimestamp);
+ GET_DEVICE_PROC(CreateBuffer);
+ GET_DEVICE_PROC(CreateBufferView);
+ GET_DEVICE_PROC(CreateCommandPool);
+ GET_DEVICE_PROC(CreateComputePipelines);
+ GET_DEVICE_PROC(CreateDescriptorPool);
+ GET_DEVICE_PROC(CreateDescriptorSetLayout);
+ GET_DEVICE_PROC(CreateEvent);
+ GET_DEVICE_PROC(CreateFence);
+ GET_DEVICE_PROC(CreateFramebuffer);
+ GET_DEVICE_PROC(CreateGraphicsPipelines);
+ GET_DEVICE_PROC(CreateImage);
+ GET_DEVICE_PROC(CreateImageView);
+ GET_DEVICE_PROC(CreatePipelineCache);
+ GET_DEVICE_PROC(CreatePipelineLayout);
+ GET_DEVICE_PROC(CreateQueryPool);
+ GET_DEVICE_PROC(CreateRenderPass);
+ GET_DEVICE_PROC(CreateSampler);
+ GET_DEVICE_PROC(CreateSemaphore);
+ GET_DEVICE_PROC(CreateShaderModule);
+ GET_DEVICE_PROC(DestroyBuffer);
+ GET_DEVICE_PROC(DestroyBufferView);
+ GET_DEVICE_PROC(DestroyCommandPool);
+ GET_DEVICE_PROC(DestroyDescriptorPool);
+ GET_DEVICE_PROC(DestroyDescriptorSetLayout);
+ GET_DEVICE_PROC(DestroyEvent);
+ GET_DEVICE_PROC(DestroyFence);
+ GET_DEVICE_PROC(DestroyFramebuffer);
+ GET_DEVICE_PROC(DestroyImage);
+ GET_DEVICE_PROC(DestroyImageView);
+ GET_DEVICE_PROC(DestroyPipeline);
+ GET_DEVICE_PROC(DestroyPipelineCache);
+ GET_DEVICE_PROC(DestroyPipelineLayout);
+ GET_DEVICE_PROC(DestroyQueryPool);
+ GET_DEVICE_PROC(DestroyRenderPass);
+ GET_DEVICE_PROC(DestroySampler);
+ GET_DEVICE_PROC(DestroySemaphore);
+ GET_DEVICE_PROC(DestroyShaderModule);
+ GET_DEVICE_PROC(DeviceWaitIdle);
+ GET_DEVICE_PROC(EndCommandBuffer);
+ GET_DEVICE_PROC(FlushMappedMemoryRanges);
+ GET_DEVICE_PROC(FreeCommandBuffers);
+ GET_DEVICE_PROC(FreeDescriptorSets);
+ GET_DEVICE_PROC(FreeMemory);
+ GET_DEVICE_PROC(GetBufferMemoryRequirements);
+ GET_DEVICE_PROC(GetDeviceMemoryCommitment);
+ GET_DEVICE_PROC(GetDeviceQueue);
+ GET_DEVICE_PROC(GetEventStatus);
+ GET_DEVICE_PROC(GetFenceStatus);
+ GET_DEVICE_PROC(GetImageMemoryRequirements);
+ GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
+ GET_DEVICE_PROC(GetImageSubresourceLayout);
+ GET_DEVICE_PROC(GetPipelineCacheData);
+ GET_DEVICE_PROC(GetQueryPoolResults);
+ GET_DEVICE_PROC(GetRenderAreaGranularity);
+ GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
+ GET_DEVICE_PROC(MapMemory);
+ GET_DEVICE_PROC(MergePipelineCaches);
+ GET_DEVICE_PROC(QueueBindSparse);
+ GET_DEVICE_PROC(QueueSubmit);
+ GET_DEVICE_PROC(QueueWaitIdle);
+ GET_DEVICE_PROC(ResetCommandBuffer);
+ GET_DEVICE_PROC(ResetCommandPool);
+ GET_DEVICE_PROC(ResetDescriptorPool);
+ GET_DEVICE_PROC(ResetEvent);
+ GET_DEVICE_PROC(ResetFences);
+ GET_DEVICE_PROC(SetEvent);
+ GET_DEVICE_PROC(UnmapMemory);
+ GET_DEVICE_PROC(UpdateDescriptorSets);
+ GET_DEVICE_PROC(WaitForFences);
+
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
+ GET_DEVICE_PROC(GetMemoryFdKHR);
+ GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
}
-#define GET_DEVICE_PROC(name) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetDeviceProcAddr(device, "vk" #name)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
- } \
- } while (0)
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+ GET_DEVICE_PROC(ImportSemaphoreFdKHR);
+ GET_DEVICE_PROC(GetSemaphoreFdKHR);
+ }
- MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
- const VulkanDeviceInfo& deviceInfo) {
- GET_DEVICE_PROC(AllocateCommandBuffers);
- GET_DEVICE_PROC(AllocateDescriptorSets);
- GET_DEVICE_PROC(AllocateMemory);
- GET_DEVICE_PROC(BeginCommandBuffer);
- GET_DEVICE_PROC(BindBufferMemory);
- GET_DEVICE_PROC(BindImageMemory);
- GET_DEVICE_PROC(CmdBeginQuery);
- GET_DEVICE_PROC(CmdBeginRenderPass);
- GET_DEVICE_PROC(CmdBindDescriptorSets);
- GET_DEVICE_PROC(CmdBindIndexBuffer);
- GET_DEVICE_PROC(CmdBindPipeline);
- GET_DEVICE_PROC(CmdBindVertexBuffers);
- GET_DEVICE_PROC(CmdBlitImage);
- GET_DEVICE_PROC(CmdClearAttachments);
- GET_DEVICE_PROC(CmdClearColorImage);
- GET_DEVICE_PROC(CmdClearDepthStencilImage);
- GET_DEVICE_PROC(CmdCopyBuffer);
- GET_DEVICE_PROC(CmdCopyBufferToImage);
- GET_DEVICE_PROC(CmdCopyImage);
- GET_DEVICE_PROC(CmdCopyImageToBuffer);
- GET_DEVICE_PROC(CmdCopyQueryPoolResults);
- GET_DEVICE_PROC(CmdDispatch);
- GET_DEVICE_PROC(CmdDispatchIndirect);
- GET_DEVICE_PROC(CmdDraw);
- GET_DEVICE_PROC(CmdDrawIndexed);
- GET_DEVICE_PROC(CmdDrawIndexedIndirect);
- GET_DEVICE_PROC(CmdDrawIndirect);
- GET_DEVICE_PROC(CmdEndQuery);
- GET_DEVICE_PROC(CmdEndRenderPass);
- GET_DEVICE_PROC(CmdExecuteCommands);
- GET_DEVICE_PROC(CmdFillBuffer);
- GET_DEVICE_PROC(CmdNextSubpass);
- GET_DEVICE_PROC(CmdPipelineBarrier);
- GET_DEVICE_PROC(CmdPushConstants);
- GET_DEVICE_PROC(CmdResetEvent);
- GET_DEVICE_PROC(CmdResetQueryPool);
- GET_DEVICE_PROC(CmdResolveImage);
- GET_DEVICE_PROC(CmdSetBlendConstants);
- GET_DEVICE_PROC(CmdSetDepthBias);
- GET_DEVICE_PROC(CmdSetDepthBounds);
- GET_DEVICE_PROC(CmdSetEvent);
- GET_DEVICE_PROC(CmdSetLineWidth);
- GET_DEVICE_PROC(CmdSetScissor);
- GET_DEVICE_PROC(CmdSetStencilCompareMask);
- GET_DEVICE_PROC(CmdSetStencilReference);
- GET_DEVICE_PROC(CmdSetStencilWriteMask);
- GET_DEVICE_PROC(CmdSetViewport);
- GET_DEVICE_PROC(CmdUpdateBuffer);
- GET_DEVICE_PROC(CmdWaitEvents);
- GET_DEVICE_PROC(CmdWriteTimestamp);
- GET_DEVICE_PROC(CreateBuffer);
- GET_DEVICE_PROC(CreateBufferView);
- GET_DEVICE_PROC(CreateCommandPool);
- GET_DEVICE_PROC(CreateComputePipelines);
- GET_DEVICE_PROC(CreateDescriptorPool);
- GET_DEVICE_PROC(CreateDescriptorSetLayout);
- GET_DEVICE_PROC(CreateEvent);
- GET_DEVICE_PROC(CreateFence);
- GET_DEVICE_PROC(CreateFramebuffer);
- GET_DEVICE_PROC(CreateGraphicsPipelines);
- GET_DEVICE_PROC(CreateImage);
- GET_DEVICE_PROC(CreateImageView);
- GET_DEVICE_PROC(CreatePipelineCache);
- GET_DEVICE_PROC(CreatePipelineLayout);
- GET_DEVICE_PROC(CreateQueryPool);
- GET_DEVICE_PROC(CreateRenderPass);
- GET_DEVICE_PROC(CreateSampler);
- GET_DEVICE_PROC(CreateSemaphore);
- GET_DEVICE_PROC(CreateShaderModule);
- GET_DEVICE_PROC(DestroyBuffer);
- GET_DEVICE_PROC(DestroyBufferView);
- GET_DEVICE_PROC(DestroyCommandPool);
- GET_DEVICE_PROC(DestroyDescriptorPool);
- GET_DEVICE_PROC(DestroyDescriptorSetLayout);
- GET_DEVICE_PROC(DestroyEvent);
- GET_DEVICE_PROC(DestroyFence);
- GET_DEVICE_PROC(DestroyFramebuffer);
- GET_DEVICE_PROC(DestroyImage);
- GET_DEVICE_PROC(DestroyImageView);
- GET_DEVICE_PROC(DestroyPipeline);
- GET_DEVICE_PROC(DestroyPipelineCache);
- GET_DEVICE_PROC(DestroyPipelineLayout);
- GET_DEVICE_PROC(DestroyQueryPool);
- GET_DEVICE_PROC(DestroyRenderPass);
- GET_DEVICE_PROC(DestroySampler);
- GET_DEVICE_PROC(DestroySemaphore);
- GET_DEVICE_PROC(DestroyShaderModule);
- GET_DEVICE_PROC(DeviceWaitIdle);
- GET_DEVICE_PROC(EndCommandBuffer);
- GET_DEVICE_PROC(FlushMappedMemoryRanges);
- GET_DEVICE_PROC(FreeCommandBuffers);
- GET_DEVICE_PROC(FreeDescriptorSets);
- GET_DEVICE_PROC(FreeMemory);
- GET_DEVICE_PROC(GetBufferMemoryRequirements);
- GET_DEVICE_PROC(GetDeviceMemoryCommitment);
- GET_DEVICE_PROC(GetDeviceQueue);
- GET_DEVICE_PROC(GetEventStatus);
- GET_DEVICE_PROC(GetFenceStatus);
- GET_DEVICE_PROC(GetImageMemoryRequirements);
- GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
- GET_DEVICE_PROC(GetImageSubresourceLayout);
- GET_DEVICE_PROC(GetPipelineCacheData);
- GET_DEVICE_PROC(GetQueryPoolResults);
- GET_DEVICE_PROC(GetRenderAreaGranularity);
- GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
- GET_DEVICE_PROC(MapMemory);
- GET_DEVICE_PROC(MergePipelineCaches);
- GET_DEVICE_PROC(QueueBindSparse);
- GET_DEVICE_PROC(QueueSubmit);
- GET_DEVICE_PROC(QueueWaitIdle);
- GET_DEVICE_PROC(ResetCommandBuffer);
- GET_DEVICE_PROC(ResetCommandPool);
- GET_DEVICE_PROC(ResetDescriptorPool);
- GET_DEVICE_PROC(ResetEvent);
- GET_DEVICE_PROC(ResetFences);
- GET_DEVICE_PROC(SetEvent);
- GET_DEVICE_PROC(UnmapMemory);
- GET_DEVICE_PROC(UpdateDescriptorSets);
- GET_DEVICE_PROC(WaitForFences);
-
- if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
- GET_DEVICE_PROC(GetMemoryFdKHR);
- GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
- GET_DEVICE_PROC(ImportSemaphoreFdKHR);
- GET_DEVICE_PROC(GetSemaphoreFdKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
- GET_DEVICE_PROC(CreateSwapchainKHR);
- GET_DEVICE_PROC(DestroySwapchainKHR);
- GET_DEVICE_PROC(GetSwapchainImagesKHR);
- GET_DEVICE_PROC(AcquireNextImageKHR);
- GET_DEVICE_PROC(QueuePresentKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
- GET_DEVICE_PROC(GetBufferMemoryRequirements2);
- GET_DEVICE_PROC(GetImageMemoryRequirements2);
- GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
- }
+ if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
+ GET_DEVICE_PROC(CreateSwapchainKHR);
+ GET_DEVICE_PROC(DestroySwapchainKHR);
+ GET_DEVICE_PROC(GetSwapchainImagesKHR);
+ GET_DEVICE_PROC(AcquireNextImageKHR);
+ GET_DEVICE_PROC(QueuePresentKHR);
+ }
+
+ if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
+ GET_DEVICE_PROC(GetBufferMemoryRequirements2);
+ GET_DEVICE_PROC(GetImageMemoryRequirements2);
+ GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
+ }
#if VK_USE_PLATFORM_FUCHSIA
- if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
- GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
- GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
- }
-
- if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
- GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
- GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
- }
-#endif
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
+ GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
+ }
- return {};
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+ GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
}
+#endif
+
+ return {};
+}
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h
index 447c98d1831..c4988e91eae 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h
@@ -15,6 +15,7 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_VULKANFUNCTIONS_H_
#define SRC_DAWN_NATIVE_VULKAN_VULKANFUNCTIONS_H_
+#include "dawn/common/Compiler.h"
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/Error.h"
@@ -23,307 +24,333 @@ class DynamicLib;
namespace dawn::native::vulkan {
- struct VulkanGlobalInfo;
- struct VulkanDeviceInfo;
-
- // Stores the Vulkan entry points. Also loads them from the dynamic library
- // and the vkGet*ProcAddress entry points.
- struct VulkanFunctions {
- MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
- MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
- MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
-
- // ---------- Global procs
-
- // Initial proc from which we can get all the others
- PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
-
- PFN_vkCreateInstance CreateInstance = nullptr;
- PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
- PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
- // DestroyInstance isn't technically a global proc but we want to be able to use it
- // before querying the instance procs in case we need to error out during initialization.
- PFN_vkDestroyInstance DestroyInstance = nullptr;
-
- // Core Vulkan 1.1
- PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
-
- // ---------- Instance procs
-
- // Core Vulkan 1.0
- PFN_vkCreateDevice CreateDevice = nullptr;
- PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
- PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
- PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
- PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
- PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
- PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties =
- nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
- PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties =
- nullptr;
- PFN_vkGetPhysicalDeviceSparseImageFormatProperties
- GetPhysicalDeviceSparseImageFormatProperties = nullptr;
- // Not technically an instance proc but we want to be able to use it as soon as the
- // device is created.
- PFN_vkDestroyDevice DestroyDevice = nullptr;
-
- // VK_EXT_debug_utils
- PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
- PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
- PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
- PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
- PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
- PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
- PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
- PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
- PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
- PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
- PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
-
- // VK_KHR_surface
- PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR =
- nullptr;
- PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
- nullptr;
-
- // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
- // present.
-
- // VK_KHR_external_memory_capabilities
- PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
- nullptr;
-
- // VK_KHR_external_semaphore_capabilities
- PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
- GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
-
- // VK_KHR_get_physical_device_properties2
- PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
- PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
- nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
- nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
- GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
+struct VulkanGlobalInfo;
+struct VulkanDeviceInfo;
+
+#if defined(UNDEFINED_SANITIZER) && DAWN_COMPILER_IS(CLANG)
+#define DAWN_NO_SANITIZE_VK_FN 1
+#else
+#define DAWN_NO_SANITIZE_VK_FN 0
+#endif
+
+template <typename F>
+struct VkFnImpl;
+
+// Override the type of Vulkan functions to be a bound std::function if
+// DAWN_NO_SANITIZE_VK_FN is set. See comment at AsVkNoSanitizeFn in VulkanFunctions.cpp
+// for more information.
+template <typename R, typename... Args>
+struct VkFnImpl<R(VKAPI_PTR*)(Args...)> {
+#if DAWN_NO_SANITIZE_VK_FN
+ using type = std::function<R(Args...)>;
+#else
+ using type = R(VKAPI_PTR*)(Args...);
+#endif
+};
+
+template <typename F>
+using VkFn = typename VkFnImpl<F>::type;
+
+// Stores the Vulkan entry points. Also loads them from the dynamic library
+// and the vkGet*ProcAddress entry points.
+struct VulkanFunctions {
+ MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
+ MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
+ MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
+
+ // ---------- Global procs
+
+ // Initial proc from which we can get all the others
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
+
+ VkFn<PFN_vkCreateInstance> CreateInstance = nullptr;
+ VkFn<PFN_vkEnumerateInstanceExtensionProperties> EnumerateInstanceExtensionProperties = nullptr;
+ VkFn<PFN_vkEnumerateInstanceLayerProperties> EnumerateInstanceLayerProperties = nullptr;
+ // DestroyInstance isn't technically a global proc but we want to be able to use it
+ // before querying the instance procs in case we need to error out during initialization.
+ VkFn<PFN_vkDestroyInstance> DestroyInstance = nullptr;
+
+ // Core Vulkan 1.1
+ VkFn<PFN_vkEnumerateInstanceVersion> EnumerateInstanceVersion = nullptr;
+
+ // ---------- Instance procs
+
+ // Core Vulkan 1.0
+ VkFn<PFN_vkCreateDevice> CreateDevice = nullptr;
+ VkFn<PFN_vkEnumerateDeviceExtensionProperties> EnumerateDeviceExtensionProperties = nullptr;
+ VkFn<PFN_vkEnumerateDeviceLayerProperties> EnumerateDeviceLayerProperties = nullptr;
+ VkFn<PFN_vkEnumeratePhysicalDevices> EnumeratePhysicalDevices = nullptr;
+ VkFn<PFN_vkGetDeviceProcAddr> GetDeviceProcAddr = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceFeatures> GetPhysicalDeviceFeatures = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceFormatProperties> GetPhysicalDeviceFormatProperties = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceImageFormatProperties> GetPhysicalDeviceImageFormatProperties =
+ nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceMemoryProperties> GetPhysicalDeviceMemoryProperties = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceProperties> GetPhysicalDeviceProperties = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceQueueFamilyProperties> GetPhysicalDeviceQueueFamilyProperties =
+ nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSparseImageFormatProperties>
+ GetPhysicalDeviceSparseImageFormatProperties = nullptr;
+ // Not technically an instance proc but we want to be able to use it as soon as the
+ // device is created.
+ VkFn<PFN_vkDestroyDevice> DestroyDevice = nullptr;
+
+ // VK_EXT_debug_utils
+ VkFn<PFN_vkCmdBeginDebugUtilsLabelEXT> CmdBeginDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkCmdEndDebugUtilsLabelEXT> CmdEndDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkCmdInsertDebugUtilsLabelEXT> CmdInsertDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkCreateDebugUtilsMessengerEXT> CreateDebugUtilsMessengerEXT = nullptr;
+ VkFn<PFN_vkDestroyDebugUtilsMessengerEXT> DestroyDebugUtilsMessengerEXT = nullptr;
+ VkFn<PFN_vkQueueBeginDebugUtilsLabelEXT> QueueBeginDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkQueueEndDebugUtilsLabelEXT> QueueEndDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkQueueInsertDebugUtilsLabelEXT> QueueInsertDebugUtilsLabelEXT = nullptr;
+ VkFn<PFN_vkSetDebugUtilsObjectNameEXT> SetDebugUtilsObjectNameEXT = nullptr;
+ VkFn<PFN_vkSetDebugUtilsObjectTagEXT> SetDebugUtilsObjectTagEXT = nullptr;
+ VkFn<PFN_vkSubmitDebugUtilsMessageEXT> SubmitDebugUtilsMessageEXT = nullptr;
+
+ // VK_KHR_surface
+ VkFn<PFN_vkDestroySurfaceKHR> DestroySurfaceKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSurfaceSupportKHR> GetPhysicalDeviceSurfaceSupportKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR> GetPhysicalDeviceSurfaceCapabilitiesKHR =
+ nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR> GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSurfacePresentModesKHR> GetPhysicalDeviceSurfacePresentModesKHR =
+ nullptr;
+
+ // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
+ // present.
+
+ // VK_KHR_external_memory_capabilities
+ VkFn<PFN_vkGetPhysicalDeviceExternalBufferProperties>
+ GetPhysicalDeviceExternalBufferProperties = nullptr;
+
+ // VK_KHR_external_semaphore_capabilities
+ VkFn<PFN_vkGetPhysicalDeviceExternalSemaphoreProperties>
+ GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
+
+ // VK_KHR_get_physical_device_properties2
+ VkFn<PFN_vkGetPhysicalDeviceFeatures2> GetPhysicalDeviceFeatures2 = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceProperties2> GetPhysicalDeviceProperties2 = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceFormatProperties2> GetPhysicalDeviceFormatProperties2 = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceImageFormatProperties2> GetPhysicalDeviceImageFormatProperties2 =
+ nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceQueueFamilyProperties2> GetPhysicalDeviceQueueFamilyProperties2 =
+ nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceMemoryProperties2> GetPhysicalDeviceMemoryProperties2 = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceSparseImageFormatProperties2>
+ GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
#if defined(VK_USE_PLATFORM_FUCHSIA)
- // FUCHSIA_image_pipe_surface
- PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
+ // FUCHSIA_image_pipe_surface
+ VkFn<PFN_vkCreateImagePipeSurfaceFUCHSIA> CreateImagePipeSurfaceFUCHSIA = nullptr;
#endif // defined(VK_USE_PLATFORM_FUCHSIA)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- // EXT_metal_surface
- PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
+ // EXT_metal_surface
+ VkFn<PFN_vkCreateMetalSurfaceEXT> CreateMetalSurfaceEXT = nullptr;
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_PLATFORM_WINDOWS)
- // KHR_win32_surface
- PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
- GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
+#if defined(DAWN_USE_WAYLAND)
+ // KHR_wayland_surface
+ PFN_vkCreateWaylandSurfaceKHR CreateWaylandSurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR
+ GetPhysicalDeviceWaylandPresentationSupportKHR = nullptr;
+#endif // defined(DAWN_USE_WAYLAND)
+
+#if DAWN_PLATFORM_IS(WINDOWS)
+ // KHR_win32_surface
+ VkFn<PFN_vkCreateWin32SurfaceKHR> CreateWin32SurfaceKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>
+ GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
+#endif // DAWN_PLATFORM_IS(WINDOWS)
-#if defined(DAWN_PLATFORM_ANDROID)
- PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR = nullptr;
-#endif // defined(DAWN_PLATFORM_ANDROID)
+#if DAWN_PLATFORM_IS(ANDROID)
+ VkFn<PFN_vkCreateAndroidSurfaceKHR> CreateAndroidSurfaceKHR = nullptr;
+#endif // DAWN_PLATFORM_IS(ANDROID)
#if defined(DAWN_USE_X11)
- // KHR_xlib_surface
- PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
- GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
-
- // KHR_xcb_surface
- PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
- GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
+ // KHR_xlib_surface
+ VkFn<PFN_vkCreateXlibSurfaceKHR> CreateXlibSurfaceKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>
+ GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
+
+ // KHR_xcb_surface
+ VkFn<PFN_vkCreateXcbSurfaceKHR> CreateXcbSurfaceKHR = nullptr;
+ VkFn<PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR>
+ GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
#endif // defined(DAWN_USE_X11)
- // ---------- Device procs
-
- // Core Vulkan 1.0
- PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
- PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
- PFN_vkAllocateMemory AllocateMemory = nullptr;
- PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
- PFN_vkBindBufferMemory BindBufferMemory = nullptr;
- PFN_vkBindImageMemory BindImageMemory = nullptr;
- PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
- PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
- PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
- PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
- PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
- PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
- PFN_vkCmdBlitImage CmdBlitImage = nullptr;
- PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
- PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
- PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
- PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
- PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
- PFN_vkCmdCopyImage CmdCopyImage = nullptr;
- PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
- PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
- PFN_vkCmdDispatch CmdDispatch = nullptr;
- PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
- PFN_vkCmdDraw CmdDraw = nullptr;
- PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
- PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
- PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
- PFN_vkCmdEndQuery CmdEndQuery = nullptr;
- PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
- PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
- PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
- PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
- PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
- PFN_vkCmdPushConstants CmdPushConstants = nullptr;
- PFN_vkCmdResetEvent CmdResetEvent = nullptr;
- PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
- PFN_vkCmdResolveImage CmdResolveImage = nullptr;
- PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
- PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
- PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
- PFN_vkCmdSetEvent CmdSetEvent = nullptr;
- PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
- PFN_vkCmdSetScissor CmdSetScissor = nullptr;
- PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
- PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
- PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
- PFN_vkCmdSetViewport CmdSetViewport = nullptr;
- PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
- PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
- PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
- PFN_vkCreateBuffer CreateBuffer = nullptr;
- PFN_vkCreateBufferView CreateBufferView = nullptr;
- PFN_vkCreateCommandPool CreateCommandPool = nullptr;
- PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
- PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
- PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
- PFN_vkCreateEvent CreateEvent = nullptr;
- PFN_vkCreateFence CreateFence = nullptr;
- PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
- PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
- PFN_vkCreateImage CreateImage = nullptr;
- PFN_vkCreateImageView CreateImageView = nullptr;
- PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
- PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
- PFN_vkCreateQueryPool CreateQueryPool = nullptr;
- PFN_vkCreateRenderPass CreateRenderPass = nullptr;
- PFN_vkCreateSampler CreateSampler = nullptr;
- PFN_vkCreateSemaphore CreateSemaphore = nullptr;
- PFN_vkCreateShaderModule CreateShaderModule = nullptr;
- PFN_vkDestroyBuffer DestroyBuffer = nullptr;
- PFN_vkDestroyBufferView DestroyBufferView = nullptr;
- PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
- PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
- PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
- PFN_vkDestroyEvent DestroyEvent = nullptr;
- PFN_vkDestroyFence DestroyFence = nullptr;
- PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
- PFN_vkDestroyImage DestroyImage = nullptr;
- PFN_vkDestroyImageView DestroyImageView = nullptr;
- PFN_vkDestroyPipeline DestroyPipeline = nullptr;
- PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
- PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
- PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
- PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
- PFN_vkDestroySampler DestroySampler = nullptr;
- PFN_vkDestroySemaphore DestroySemaphore = nullptr;
- PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
- PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
- PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
- PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
- PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
- PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
- PFN_vkFreeMemory FreeMemory = nullptr;
- PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
- PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
- PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
- PFN_vkGetEventStatus GetEventStatus = nullptr;
- PFN_vkGetFenceStatus GetFenceStatus = nullptr;
- PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
- PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
- PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
- PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
- PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
- PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
- PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
- PFN_vkMapMemory MapMemory = nullptr;
- PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
- PFN_vkQueueBindSparse QueueBindSparse = nullptr;
- PFN_vkQueueSubmit QueueSubmit = nullptr;
- PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
- PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
- PFN_vkResetCommandPool ResetCommandPool = nullptr;
- PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
- PFN_vkResetEvent ResetEvent = nullptr;
- PFN_vkResetFences ResetFences = nullptr;
- PFN_vkSetEvent SetEvent = nullptr;
- PFN_vkUnmapMemory UnmapMemory = nullptr;
- PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
- PFN_vkWaitForFences WaitForFences = nullptr;
-
- // VK_KHR_external_memory_fd
- PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
- PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
-
- // VK_KHR_external_semaphore_fd
- PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
- PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
-
- // VK_KHR_get_memory_requirements2
- PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
- PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
- PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
-
- // VK_KHR_swapchain
- PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
- PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
- PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
- PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
- PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
+ // ---------- Device procs
+
+ // Core Vulkan 1.0
+ VkFn<PFN_vkAllocateCommandBuffers> AllocateCommandBuffers = nullptr;
+ VkFn<PFN_vkAllocateDescriptorSets> AllocateDescriptorSets = nullptr;
+ VkFn<PFN_vkAllocateMemory> AllocateMemory = nullptr;
+ VkFn<PFN_vkBeginCommandBuffer> BeginCommandBuffer = nullptr;
+ VkFn<PFN_vkBindBufferMemory> BindBufferMemory = nullptr;
+ VkFn<PFN_vkBindImageMemory> BindImageMemory = nullptr;
+ VkFn<PFN_vkCmdBeginQuery> CmdBeginQuery = nullptr;
+ VkFn<PFN_vkCmdBeginRenderPass> CmdBeginRenderPass = nullptr;
+ VkFn<PFN_vkCmdBindDescriptorSets> CmdBindDescriptorSets = nullptr;
+ VkFn<PFN_vkCmdBindIndexBuffer> CmdBindIndexBuffer = nullptr;
+ VkFn<PFN_vkCmdBindPipeline> CmdBindPipeline = nullptr;
+ VkFn<PFN_vkCmdBindVertexBuffers> CmdBindVertexBuffers = nullptr;
+ VkFn<PFN_vkCmdBlitImage> CmdBlitImage = nullptr;
+ VkFn<PFN_vkCmdClearAttachments> CmdClearAttachments = nullptr;
+ VkFn<PFN_vkCmdClearColorImage> CmdClearColorImage = nullptr;
+ VkFn<PFN_vkCmdClearDepthStencilImage> CmdClearDepthStencilImage = nullptr;
+ VkFn<PFN_vkCmdCopyBuffer> CmdCopyBuffer = nullptr;
+ VkFn<PFN_vkCmdCopyBufferToImage> CmdCopyBufferToImage = nullptr;
+ VkFn<PFN_vkCmdCopyImage> CmdCopyImage = nullptr;
+ VkFn<PFN_vkCmdCopyImageToBuffer> CmdCopyImageToBuffer = nullptr;
+ VkFn<PFN_vkCmdCopyQueryPoolResults> CmdCopyQueryPoolResults = nullptr;
+ VkFn<PFN_vkCmdDispatch> CmdDispatch = nullptr;
+ VkFn<PFN_vkCmdDispatchIndirect> CmdDispatchIndirect = nullptr;
+ VkFn<PFN_vkCmdDraw> CmdDraw = nullptr;
+ VkFn<PFN_vkCmdDrawIndexed> CmdDrawIndexed = nullptr;
+ VkFn<PFN_vkCmdDrawIndexedIndirect> CmdDrawIndexedIndirect = nullptr;
+ VkFn<PFN_vkCmdDrawIndirect> CmdDrawIndirect = nullptr;
+ VkFn<PFN_vkCmdEndQuery> CmdEndQuery = nullptr;
+ VkFn<PFN_vkCmdEndRenderPass> CmdEndRenderPass = nullptr;
+ VkFn<PFN_vkCmdExecuteCommands> CmdExecuteCommands = nullptr;
+ VkFn<PFN_vkCmdFillBuffer> CmdFillBuffer = nullptr;
+ VkFn<PFN_vkCmdNextSubpass> CmdNextSubpass = nullptr;
+ VkFn<PFN_vkCmdPipelineBarrier> CmdPipelineBarrier = nullptr;
+ VkFn<PFN_vkCmdPushConstants> CmdPushConstants = nullptr;
+ VkFn<PFN_vkCmdResetEvent> CmdResetEvent = nullptr;
+ VkFn<PFN_vkCmdResetQueryPool> CmdResetQueryPool = nullptr;
+ VkFn<PFN_vkCmdResolveImage> CmdResolveImage = nullptr;
+ VkFn<PFN_vkCmdSetBlendConstants> CmdSetBlendConstants = nullptr;
+ VkFn<PFN_vkCmdSetDepthBias> CmdSetDepthBias = nullptr;
+ VkFn<PFN_vkCmdSetDepthBounds> CmdSetDepthBounds = nullptr;
+ VkFn<PFN_vkCmdSetEvent> CmdSetEvent = nullptr;
+ VkFn<PFN_vkCmdSetLineWidth> CmdSetLineWidth = nullptr;
+ VkFn<PFN_vkCmdSetScissor> CmdSetScissor = nullptr;
+ VkFn<PFN_vkCmdSetStencilCompareMask> CmdSetStencilCompareMask = nullptr;
+ VkFn<PFN_vkCmdSetStencilReference> CmdSetStencilReference = nullptr;
+ VkFn<PFN_vkCmdSetStencilWriteMask> CmdSetStencilWriteMask = nullptr;
+ VkFn<PFN_vkCmdSetViewport> CmdSetViewport = nullptr;
+ VkFn<PFN_vkCmdUpdateBuffer> CmdUpdateBuffer = nullptr;
+ VkFn<PFN_vkCmdWaitEvents> CmdWaitEvents = nullptr;
+ VkFn<PFN_vkCmdWriteTimestamp> CmdWriteTimestamp = nullptr;
+ VkFn<PFN_vkCreateBuffer> CreateBuffer = nullptr;
+ VkFn<PFN_vkCreateBufferView> CreateBufferView = nullptr;
+ VkFn<PFN_vkCreateCommandPool> CreateCommandPool = nullptr;
+ VkFn<PFN_vkCreateComputePipelines> CreateComputePipelines = nullptr;
+ VkFn<PFN_vkCreateDescriptorPool> CreateDescriptorPool = nullptr;
+ VkFn<PFN_vkCreateDescriptorSetLayout> CreateDescriptorSetLayout = nullptr;
+ VkFn<PFN_vkCreateEvent> CreateEvent = nullptr;
+ VkFn<PFN_vkCreateFence> CreateFence = nullptr;
+ VkFn<PFN_vkCreateFramebuffer> CreateFramebuffer = nullptr;
+ VkFn<PFN_vkCreateGraphicsPipelines> CreateGraphicsPipelines = nullptr;
+ VkFn<PFN_vkCreateImage> CreateImage = nullptr;
+ VkFn<PFN_vkCreateImageView> CreateImageView = nullptr;
+ VkFn<PFN_vkCreatePipelineCache> CreatePipelineCache = nullptr;
+ VkFn<PFN_vkCreatePipelineLayout> CreatePipelineLayout = nullptr;
+ VkFn<PFN_vkCreateQueryPool> CreateQueryPool = nullptr;
+ VkFn<PFN_vkCreateRenderPass> CreateRenderPass = nullptr;
+ VkFn<PFN_vkCreateSampler> CreateSampler = nullptr;
+ VkFn<PFN_vkCreateSemaphore> CreateSemaphore = nullptr;
+ VkFn<PFN_vkCreateShaderModule> CreateShaderModule = nullptr;
+ VkFn<PFN_vkDestroyBuffer> DestroyBuffer = nullptr;
+ VkFn<PFN_vkDestroyBufferView> DestroyBufferView = nullptr;
+ VkFn<PFN_vkDestroyCommandPool> DestroyCommandPool = nullptr;
+ VkFn<PFN_vkDestroyDescriptorPool> DestroyDescriptorPool = nullptr;
+ VkFn<PFN_vkDestroyDescriptorSetLayout> DestroyDescriptorSetLayout = nullptr;
+ VkFn<PFN_vkDestroyEvent> DestroyEvent = nullptr;
+ VkFn<PFN_vkDestroyFence> DestroyFence = nullptr;
+ VkFn<PFN_vkDestroyFramebuffer> DestroyFramebuffer = nullptr;
+ VkFn<PFN_vkDestroyImage> DestroyImage = nullptr;
+ VkFn<PFN_vkDestroyImageView> DestroyImageView = nullptr;
+ VkFn<PFN_vkDestroyPipeline> DestroyPipeline = nullptr;
+ VkFn<PFN_vkDestroyPipelineCache> DestroyPipelineCache = nullptr;
+ VkFn<PFN_vkDestroyPipelineLayout> DestroyPipelineLayout = nullptr;
+ VkFn<PFN_vkDestroyQueryPool> DestroyQueryPool = nullptr;
+ VkFn<PFN_vkDestroyRenderPass> DestroyRenderPass = nullptr;
+ VkFn<PFN_vkDestroySampler> DestroySampler = nullptr;
+ VkFn<PFN_vkDestroySemaphore> DestroySemaphore = nullptr;
+ VkFn<PFN_vkDestroyShaderModule> DestroyShaderModule = nullptr;
+ VkFn<PFN_vkDeviceWaitIdle> DeviceWaitIdle = nullptr;
+ VkFn<PFN_vkEndCommandBuffer> EndCommandBuffer = nullptr;
+ VkFn<PFN_vkFlushMappedMemoryRanges> FlushMappedMemoryRanges = nullptr;
+ VkFn<PFN_vkFreeCommandBuffers> FreeCommandBuffers = nullptr;
+ VkFn<PFN_vkFreeDescriptorSets> FreeDescriptorSets = nullptr;
+ VkFn<PFN_vkFreeMemory> FreeMemory = nullptr;
+ VkFn<PFN_vkGetBufferMemoryRequirements> GetBufferMemoryRequirements = nullptr;
+ VkFn<PFN_vkGetDeviceMemoryCommitment> GetDeviceMemoryCommitment = nullptr;
+ VkFn<PFN_vkGetDeviceQueue> GetDeviceQueue = nullptr;
+ VkFn<PFN_vkGetEventStatus> GetEventStatus = nullptr;
+ VkFn<PFN_vkGetFenceStatus> GetFenceStatus = nullptr;
+ VkFn<PFN_vkGetImageMemoryRequirements> GetImageMemoryRequirements = nullptr;
+ VkFn<PFN_vkGetImageSparseMemoryRequirements> GetImageSparseMemoryRequirements = nullptr;
+ VkFn<PFN_vkGetImageSubresourceLayout> GetImageSubresourceLayout = nullptr;
+ VkFn<PFN_vkGetPipelineCacheData> GetPipelineCacheData = nullptr;
+ VkFn<PFN_vkGetQueryPoolResults> GetQueryPoolResults = nullptr;
+ VkFn<PFN_vkGetRenderAreaGranularity> GetRenderAreaGranularity = nullptr;
+ VkFn<PFN_vkInvalidateMappedMemoryRanges> InvalidateMappedMemoryRanges = nullptr;
+ VkFn<PFN_vkMapMemory> MapMemory = nullptr;
+ VkFn<PFN_vkMergePipelineCaches> MergePipelineCaches = nullptr;
+ VkFn<PFN_vkQueueBindSparse> QueueBindSparse = nullptr;
+ VkFn<PFN_vkQueueSubmit> QueueSubmit = nullptr;
+ VkFn<PFN_vkQueueWaitIdle> QueueWaitIdle = nullptr;
+ VkFn<PFN_vkResetCommandBuffer> ResetCommandBuffer = nullptr;
+ VkFn<PFN_vkResetCommandPool> ResetCommandPool = nullptr;
+ VkFn<PFN_vkResetDescriptorPool> ResetDescriptorPool = nullptr;
+ VkFn<PFN_vkResetEvent> ResetEvent = nullptr;
+ VkFn<PFN_vkResetFences> ResetFences = nullptr;
+ VkFn<PFN_vkSetEvent> SetEvent = nullptr;
+ VkFn<PFN_vkUnmapMemory> UnmapMemory = nullptr;
+ VkFn<PFN_vkUpdateDescriptorSets> UpdateDescriptorSets = nullptr;
+ VkFn<PFN_vkWaitForFences> WaitForFences = nullptr;
+
+ // VK_KHR_external_memory_fd
+ VkFn<PFN_vkGetMemoryFdKHR> GetMemoryFdKHR = nullptr;
+ VkFn<PFN_vkGetMemoryFdPropertiesKHR> GetMemoryFdPropertiesKHR = nullptr;
+
+ // VK_KHR_external_semaphore_fd
+ VkFn<PFN_vkImportSemaphoreFdKHR> ImportSemaphoreFdKHR = nullptr;
+ VkFn<PFN_vkGetSemaphoreFdKHR> GetSemaphoreFdKHR = nullptr;
+
+ // VK_KHR_get_memory_requirements2
+ VkFn<PFN_vkGetBufferMemoryRequirements2KHR> GetBufferMemoryRequirements2 = nullptr;
+ VkFn<PFN_vkGetImageMemoryRequirements2KHR> GetImageMemoryRequirements2 = nullptr;
+ VkFn<PFN_vkGetImageSparseMemoryRequirements2KHR> GetImageSparseMemoryRequirements2 = nullptr;
+
+ // VK_KHR_swapchain
+ VkFn<PFN_vkCreateSwapchainKHR> CreateSwapchainKHR = nullptr;
+ VkFn<PFN_vkDestroySwapchainKHR> DestroySwapchainKHR = nullptr;
+ VkFn<PFN_vkGetSwapchainImagesKHR> GetSwapchainImagesKHR = nullptr;
+ VkFn<PFN_vkAcquireNextImageKHR> AcquireNextImageKHR = nullptr;
+ VkFn<PFN_vkQueuePresentKHR> QueuePresentKHR = nullptr;
#if VK_USE_PLATFORM_FUCHSIA
- // VK_FUCHSIA_external_memory
- PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
- PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
- nullptr;
-
- // VK_FUCHSIA_external_semaphore
- PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
- PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
+ // VK_FUCHSIA_external_memory
+ VkFn<PFN_vkGetMemoryZirconHandleFUCHSIA> GetMemoryZirconHandleFUCHSIA = nullptr;
+ VkFn<PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA> GetMemoryZirconHandlePropertiesFUCHSIA =
+ nullptr;
+
+ // VK_FUCHSIA_external_semaphore
+ VkFn<PFN_vkImportSemaphoreZirconHandleFUCHSIA> ImportSemaphoreZirconHandleFUCHSIA = nullptr;
+ VkFn<PFN_vkGetSemaphoreZirconHandleFUCHSIA> GetSemaphoreZirconHandleFUCHSIA = nullptr;
#endif
- };
-
- // Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
- // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
- // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
- // about handling error cases.
- class VkResult {
- public:
- constexpr static VkResult WrapUnsafe(::VkResult value) {
- return VkResult(value);
- }
-
- constexpr operator ::VkResult() const {
- return mValue;
- }
-
- private:
- // Private. Use VkResult::WrapUnsafe instead.
- explicit constexpr VkResult(::VkResult value) : mValue(value) {
- }
-
- ::VkResult mValue;
- };
+};
+
+// Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
+// default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
+// ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
+// about handling error cases.
+class VkResult {
+ public:
+ constexpr static VkResult WrapUnsafe(::VkResult value) { return VkResult(value); }
+
+ constexpr operator ::VkResult() const { return mValue; }
+
+ private:
+ // Private. Use VkResult::WrapUnsafe instead.
+ explicit constexpr VkResult(::VkResult value) : mValue(value) {}
+
+ ::VkResult mValue;
+};
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp
index a734a9b6f45..116fd7222d6 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp
@@ -14,321 +14,323 @@
#include "dawn/native/vulkan/VulkanInfo.h"
+#include <cstring>
+#include <string>
+#include <unordered_map>
+#include <utility>
+
#include "dawn/native/vulkan/AdapterVk.h"
#include "dawn/native/vulkan/BackendVk.h"
#include "dawn/native/vulkan/UtilsVulkan.h"
#include "dawn/native/vulkan/VulkanError.h"
-#include <cstring>
-
namespace dawn::native::vulkan {
- namespace {
- ResultOrError<InstanceExtSet> GatherInstanceExtensions(
- const char* layerName,
- const dawn::native::vulkan::VulkanFunctions& vkFunctions,
- const std::unordered_map<std::string, InstanceExt>& knownExts) {
- uint32_t count = 0;
- VkResult vkResult = VkResult::WrapUnsafe(
- vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
- if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
- }
+namespace {
+ResultOrError<InstanceExtSet> GatherInstanceExtensions(
+ const char* layerName,
+ const dawn::native::vulkan::VulkanFunctions& vkFunctions,
+ const std::unordered_map<std::string, InstanceExt>& knownExts) {
+ uint32_t count = 0;
+ VkResult vkResult = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
+ if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
+ }
- std::vector<VkExtensionProperties> extensions(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceExtensionProperties(
- layerName, &count, extensions.data()),
- "vkEnumerateInstanceExtensionProperties"));
-
- InstanceExtSet result;
- for (const VkExtensionProperties& extension : extensions) {
- auto it = knownExts.find(extension.extensionName);
- if (it != knownExts.end()) {
- result.set(it->second, true);
- }
- }
+ std::vector<VkExtensionProperties> extensions(count);
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, extensions.data()),
+ "vkEnumerateInstanceExtensionProperties"));
- return result;
+ InstanceExtSet result;
+ for (const VkExtensionProperties& extension : extensions) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ result.set(it->second, true);
}
-
- } // namespace
-
- bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
- return extensions[ext];
}
- bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
- return extensions[ext];
- }
+ return result;
+}
- ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
- VulkanGlobalInfo info = {};
- // Gather info on available API version
- {
- info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
- if (vkFunctions.EnumerateInstanceVersion != nullptr) {
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
- "vkEnumerateInstanceVersion"));
- }
- }
+} // namespace
- // Gather the info about the instance layers
- {
- uint32_t count = 0;
- VkResult result =
- VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
- // From the Vulkan spec result should be success if there are 0 layers,
- // incomplete otherwise. This means that both values represent a success.
- // This is the same for all Enumarte functions
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
- }
+bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
+ return extensions[ext];
+}
- std::vector<VkLayerProperties> layersProperties(count);
- DAWN_TRY(CheckVkSuccess(
- vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
- "vkEnumerateInstanceLayerProperties"));
-
- std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
- for (const VkLayerProperties& layer : layersProperties) {
- auto it = knownLayers.find(layer.layerName);
- if (it != knownLayers.end()) {
- info.layers.set(it->second, true);
- }
- }
- }
+bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
+ return extensions[ext];
+}
- // Gather the info about the instance extensions
- {
- std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
-
- DAWN_TRY_ASSIGN(info.extensions,
- GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
- MarkPromotedExtensions(&info.extensions, info.apiVersion);
- info.extensions = EnsureDependencies(info.extensions);
-
- for (VulkanLayer layer : IterateBitSet(info.layers)) {
- DAWN_TRY_ASSIGN(info.layerExtensions[layer],
- GatherInstanceExtensions(GetVulkanLayerInfo(layer).name,
- vkFunctions, knownExts));
- MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
- info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
- }
+ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
+ VulkanGlobalInfo info = {};
+ // Gather info on available API version
+ {
+ info.apiVersion = VK_API_VERSION_1_0;
+ if (vkFunctions.EnumerateInstanceVersion != nullptr) {
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
+ "vkEnumerateInstanceVersion"));
}
-
- return std::move(info);
}
- ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
- VkInstance instance,
- const VulkanFunctions& vkFunctions) {
+ // Gather the info about the instance layers
+ {
uint32_t count = 0;
VkResult result =
- VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+ VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
+ // From the Vulkan spec result should be success if there are 0 layers,
+ // incomplete otherwise. This means that both values represent a success.
+ // This is the same for all Enumarte functions
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
}
- std::vector<VkPhysicalDevice> physicalDevices(count);
+ std::vector<VkLayerProperties> layersProperties(count);
DAWN_TRY(CheckVkSuccess(
- vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
- "vkEnumeratePhysicalDevices"));
-
- return std::move(physicalDevices);
+ vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
+ "vkEnumerateInstanceLayerProperties"));
+
+ std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
+ for (const VkLayerProperties& layer : layersProperties) {
+ auto it = knownLayers.find(layer.layerName);
+ if (it != knownLayers.end()) {
+ info.layers.set(it->second, true);
+ }
+ }
}
- ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
- VulkanDeviceInfo info = {};
- VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
- const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
- const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
-
- // Query the device properties first to get the ICD's `apiVersion`
- vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
+ // Gather the info about the instance extensions
+ {
+ std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
- // Gather info about device memory.
- {
- VkPhysicalDeviceMemoryProperties memory;
- vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
+ DAWN_TRY_ASSIGN(info.extensions, GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
+ MarkPromotedExtensions(&info.extensions, info.apiVersion);
+ info.extensions = EnsureDependencies(info.extensions);
- info.memoryTypes.assign(memory.memoryTypes,
- memory.memoryTypes + memory.memoryTypeCount);
- info.memoryHeaps.assign(memory.memoryHeaps,
- memory.memoryHeaps + memory.memoryHeapCount);
+ for (VulkanLayer layer : IterateBitSet(info.layers)) {
+ DAWN_TRY_ASSIGN(
+ info.layerExtensions[layer],
+ GatherInstanceExtensions(GetVulkanLayerInfo(layer).name, vkFunctions, knownExts));
+ MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
+ info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
}
+ }
- // Gather info about device queue families
- {
- uint32_t count = 0;
- vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
+ return std::move(info);
+}
+
+ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+ VkInstance instance,
+ const VulkanFunctions& vkFunctions) {
+ uint32_t count = 0;
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
+ }
- info.queueFamilies.resize(count);
- vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
- info.queueFamilies.data());
- }
+ std::vector<VkPhysicalDevice> physicalDevices(count);
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
+ "vkEnumeratePhysicalDevices"));
- // Gather the info about the device layers
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(
- vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
- }
+ return std::move(physicalDevices);
+}
- info.layers.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
- physicalDevice, &count, info.layers.data()),
- "vkEnumerateDeviceLayerProperties"));
- }
+ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+ VulkanDeviceInfo info = {};
+ VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+ const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
+ const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
- // Gather the info about the device extensions
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
- }
+ // Query the device properties first to get the ICD's `apiVersion`
+ vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
- std::vector<VkExtensionProperties> extensionsProperties;
- extensionsProperties.resize(count);
- DAWN_TRY(
- CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, extensionsProperties.data()),
- "vkEnumerateDeviceExtensionProperties"));
+ // Gather info about device memory.
+ {
+ VkPhysicalDeviceMemoryProperties memory;
+ vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
- std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+ info.memoryTypes.assign(memory.memoryTypes, memory.memoryTypes + memory.memoryTypeCount);
+ info.memoryHeaps.assign(memory.memoryHeaps, memory.memoryHeaps + memory.memoryHeapCount);
+ }
- for (const VkExtensionProperties& extension : extensionsProperties) {
- auto it = knownExts.find(extension.extensionName);
- if (it != knownExts.end()) {
- info.extensions.set(it->second, true);
- }
- }
+ // Gather info about device queue families
+ {
+ uint32_t count = 0;
+ vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
- MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
- info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
- info.properties.apiVersion);
- }
+ info.queueFamilies.resize(count);
+ vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
+ info.queueFamilies.data());
+ }
- // Gather general and extension features and properties
- //
- // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
- // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
- // because these extensions (transitively) depend on it in `EnsureDependencies`
- VkPhysicalDeviceFeatures2 features2 = {};
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features2.pNext = nullptr;
- PNextChainBuilder featuresChain(&features2);
-
- VkPhysicalDeviceProperties2 properties2 = {};
- properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- features2.pNext = nullptr;
- PNextChainBuilder propertiesChain(&properties2);
-
- if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
- featuresChain.Add(&info.shaderFloat16Int8Features,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ // Gather the info about the device layers
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
}
- if (info.extensions[DeviceExt::_16BitStorage]) {
- featuresChain.Add(&info._16BitStorageFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
- }
+ info.layers.resize(count);
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, info.layers.data()),
+ "vkEnumerateDeviceLayerProperties"));
+ }
- if (info.extensions[DeviceExt::SubgroupSizeControl]) {
- featuresChain.Add(&info.subgroupSizeControlFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
- propertiesChain.Add(
- &info.subgroupSizeControlProperties,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
+ // Gather the info about the device extensions
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
}
- if (info.extensions[DeviceExt::DriverProperties]) {
- propertiesChain.Add(&info.driverProperties,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
- }
+ std::vector<VkExtensionProperties> extensionsProperties;
+ extensionsProperties.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, extensionsProperties.data()),
+ "vkEnumerateDeviceExtensionProperties"));
- // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
- // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
- //
- // Note that info.properties has already been filled at the start of this function to get
- // `apiVersion`.
- ASSERT(info.properties.apiVersion != 0);
- if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
- vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
- vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
- info.features = features2.features;
- } else {
- ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
- vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
+ std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+
+ for (const VkExtensionProperties& extension : extensionsProperties) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ info.extensions.set(it->second, true);
+ }
}
- // TODO(cwallez@chromium.org): gather info about formats
+ MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
+ info.extensions =
+ EnsureDependencies(info.extensions, globalInfo.extensions, info.properties.apiVersion);
+ }
- return std::move(info);
+ // Gather general and extension features and properties
+ //
+ // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
+ // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
+ // because these extensions (transitively) depend on it in `EnsureDependencies`
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder featuresChain(&features2);
+
+ VkPhysicalDeviceProperties2 properties2 = {};
+ properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder propertiesChain(&properties2);
+
+ if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
+ featuresChain.Add(&info.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
}
- ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface) {
- VulkanSurfaceInfo info = {};
+ if (info.extensions[DeviceExt::_16BitStorage]) {
+ featuresChain.Add(&info._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
- VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
- const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+ if (info.extensions[DeviceExt::SubgroupSizeControl]) {
+ featuresChain.Add(&info.subgroupSizeControlFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+ propertiesChain.Add(&info.subgroupSizeControlProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
+ }
- // Get the surface capabilities
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
- physicalDevice, surface, &info.capabilities),
- "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
+ if (info.extensions[DeviceExt::DriverProperties]) {
+ propertiesChain.Add(&info.driverProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
+ }
- // Query which queue families support presenting this surface
- {
- size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
- info.supportedQueueFamilies.resize(nQueueFamilies, false);
+ if (info.extensions[DeviceExt::ShaderIntegerDotProduct]) {
+ propertiesChain.Add(
+ &info.shaderIntegerDotProductProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR);
+ }
- for (uint32_t i = 0; i < nQueueFamilies; ++i) {
- VkBool32 supported = VK_FALSE;
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
- physicalDevice, i, surface, &supported),
- "vkGetPhysicalDeviceSurfaceSupportKHR"));
+ // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
+ // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
+ //
+ // Note that info.properties has already been filled at the start of this function to get
+ // `apiVersion`.
+ ASSERT(info.properties.apiVersion != 0);
+ if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
+ vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
+ vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
+ info.features = features2.features;
+ } else {
+ ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
+ vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
+ }
- info.supportedQueueFamilies[i] = (supported == VK_TRUE);
- }
- }
+ // TODO(cwallez@chromium.org): gather info about formats
- // Gather supported formats
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
- }
+ return std::move(info);
+}
+
+ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter, VkSurfaceKHR surface) {
+ VulkanSurfaceInfo info = {};
+
+ VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+ const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+ // Get the surface capabilities
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
+ physicalDevice, surface, &info.capabilities),
+ "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
- info.formats.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, &count, info.formats.data()),
- "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+ // Query which queue families support presenting this surface
+ {
+ size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
+ info.supportedQueueFamilies.resize(nQueueFamilies, false);
+
+ for (uint32_t i = 0; i < nQueueFamilies; ++i) {
+ VkBool32 supported = VK_FALSE;
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, i, surface, &supported),
+ "vkGetPhysicalDeviceSurfaceSupportKHR"));
+
+ info.supportedQueueFamilies[i] = (supported == VK_TRUE);
}
+ }
- // Gather supported presents modes
- {
- uint32_t count = 0;
- VkResult result =
- VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
- }
+ // Gather supported formats
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
+ }
+
+ info.formats.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, info.formats.data()),
+ "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+ }
- info.presentModes.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, info.presentModes.data()),
- "vkGetPhysicalDeviceSurfacePresentModesKHR"));
+ // Gather supported presents modes
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
}
- return std::move(info);
+ info.presentModes.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, info.presentModes.data()),
+ "vkGetPhysicalDeviceSurfacePresentModesKHR"));
}
+ return std::move(info);
+}
+
} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h
index f75a11f406b..39dd12d66c3 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h
@@ -15,77 +15,77 @@
#ifndef SRC_DAWN_NATIVE_VULKAN_VULKANINFO_H_
#define SRC_DAWN_NATIVE_VULKAN_VULKANINFO_H_
+#include <vector>
+
#include "dawn/common/ityp_array.h"
#include "dawn/common/vulkan_platform.h"
#include "dawn/native/Error.h"
#include "dawn/native/vulkan/VulkanExtensions.h"
-#include <vector>
-
namespace dawn::native::vulkan {
- class Adapter;
- class Backend;
- struct VulkanFunctions;
-
- // Global information - gathered before the instance is created
- struct VulkanGlobalKnobs {
- VulkanLayerSet layers;
- ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
- layerExtensions;
-
- // During information gathering `extensions` only contains the instance's extensions but
- // during the instance creation logic it becomes the OR of the instance's extensions and
- // the selected layers' extensions.
- InstanceExtSet extensions;
- bool HasExt(InstanceExt ext) const;
- };
-
- struct VulkanGlobalInfo : VulkanGlobalKnobs {
- uint32_t apiVersion;
- };
-
- // Device information - gathered before the device is created.
- struct VulkanDeviceKnobs {
- VkPhysicalDeviceFeatures features;
- VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
- VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR
- zeroInitializeWorkgroupMemoryFeatures;
-
- bool HasExt(DeviceExt ext) const;
- DeviceExtSet extensions;
- };
-
- struct VulkanDeviceInfo : VulkanDeviceKnobs {
- VkPhysicalDeviceProperties properties;
- VkPhysicalDeviceDriverProperties driverProperties;
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
-
- std::vector<VkQueueFamilyProperties> queueFamilies;
-
- std::vector<VkMemoryType> memoryTypes;
- std::vector<VkMemoryHeap> memoryHeaps;
-
- std::vector<VkLayerProperties> layers;
- // TODO(cwallez@chromium.org): layer instance extensions
- };
-
- struct VulkanSurfaceInfo {
- VkSurfaceCapabilitiesKHR capabilities;
- std::vector<VkSurfaceFormatKHR> formats;
- std::vector<VkPresentModeKHR> presentModes;
- std::vector<bool> supportedQueueFamilies;
- };
-
- ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
- ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
- VkInstance instance,
- const VulkanFunctions& vkFunctions);
- ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
- ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface);
+class Adapter;
+class Backend;
+struct VulkanFunctions;
+
+// Global information - gathered before the instance is created
+struct VulkanGlobalKnobs {
+ VulkanLayerSet layers;
+ ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
+ layerExtensions;
+
+ // During information gathering `extensions` only contains the instance's extensions but
+ // during the instance creation logic it becomes the OR of the instance's extensions and
+ // the selected layers' extensions.
+ InstanceExtSet extensions;
+ bool HasExt(InstanceExt ext) const;
+};
+
+struct VulkanGlobalInfo : VulkanGlobalKnobs {
+ uint32_t apiVersion;
+};
+
+// Device information - gathered before the device is created.
+struct VulkanDeviceKnobs {
+ VkPhysicalDeviceFeatures features;
+ VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
+ VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
+ VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
+ VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR zeroInitializeWorkgroupMemoryFeatures;
+ VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR shaderIntegerDotProductFeatures;
+
+ bool HasExt(DeviceExt ext) const;
+ DeviceExtSet extensions;
+};
+
+struct VulkanDeviceInfo : VulkanDeviceKnobs {
+ VkPhysicalDeviceProperties properties;
+ VkPhysicalDeviceDriverProperties driverProperties;
+ VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
+ VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR shaderIntegerDotProductProperties;
+
+ std::vector<VkQueueFamilyProperties> queueFamilies;
+
+ std::vector<VkMemoryType> memoryTypes;
+ std::vector<VkMemoryHeap> memoryHeaps;
+
+ std::vector<VkLayerProperties> layers;
+ // TODO(cwallez@chromium.org): layer instance extensions
+};
+
+struct VulkanSurfaceInfo {
+ VkSurfaceCapabilitiesKHR capabilities;
+ std::vector<VkSurfaceFormatKHR> formats;
+ std::vector<VkPresentModeKHR> presentModes;
+ std::vector<bool> supportedQueueFamilies;
+};
+
+ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
+ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+ VkInstance instance,
+ const VulkanFunctions& vkFunctions);
+ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter, VkSurfaceKHR surface);
} // namespace dawn::native::vulkan
#endif // SRC_DAWN_NATIVE_VULKAN_VULKANINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h
index b03fb104995..a15309a9a96 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h
@@ -21,58 +21,58 @@
#include "dawn/native/vulkan/ExternalHandle.h"
namespace dawn::native::vulkan {
- class Device;
- struct VulkanDeviceInfo;
+class Device;
+struct VulkanDeviceInfo;
} // namespace dawn::native::vulkan
-namespace dawn::native { namespace vulkan::external_memory {
+namespace dawn::native::vulkan::external_memory {
- struct MemoryImportParams {
- VkDeviceSize allocationSize;
- uint32_t memoryTypeIndex;
- };
+struct MemoryImportParams {
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+};
- class Service {
- public:
- explicit Service(Device* device);
- ~Service();
+class Service {
+ public:
+ explicit Service(Device* device);
+ ~Service();
- static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
+ static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
- // True if the device reports it supports importing external memory.
- bool SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags);
+ // True if the device reports it supports importing external memory.
+ bool SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags);
- // True if the device reports it supports creating VkImages from external memory.
- bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage,
- bool* supportsDisjoint);
+ // True if the device reports it supports creating VkImages from external memory.
+ bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint);
- // Returns the parameters required for importing memory
- ResultOrError<MemoryImportParams> GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image);
+ // Returns the parameters required for importing memory
+ ResultOrError<MemoryImportParams> GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image);
- // Given an external handle pointing to memory, import it into a VkDeviceMemory
- ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image);
+ // Given an external handle pointing to memory, import it into a VkDeviceMemory
+ ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image);
- // Create a VkImage for the given handle type
- ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo);
+ // Create a VkImage for the given handle type
+ ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo);
- private:
- Device* mDevice = nullptr;
+ private:
+ Device* mDevice = nullptr;
- // True if early checks pass that determine if the service is supported
- bool mSupported = false;
- };
+ // True if early checks pass that determine if the service is supported
+ bool mSupported = false;
+};
-}} // namespace dawn::native::vulkan::external_memory
+} // namespace dawn::native::vulkan::external_memory
#endif // SRC_DAWN_NATIVE_VULKAN_EXTERNAL_MEMORY_MEMORYSERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
index 675e7820595..0729d4713c0 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <vector>
+
#include "dawn/common/Assert.h"
#include "dawn/native/vulkan/AdapterVk.h"
#include "dawn/native/vulkan/BackendVk.h"
@@ -21,337 +23,321 @@
#include "dawn/native/vulkan/VulkanError.h"
#include "dawn/native/vulkan/external_memory/MemoryService.h"
-namespace dawn::native { namespace vulkan::external_memory {
-
- namespace {
-
- bool GetFormatModifierProps(const VulkanFunctions& fn,
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- uint64_t modifier,
- VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
- std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
- VkFormatProperties2 formatProps = {};
- formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
- PNextChainBuilder formatPropsChain(&formatProps);
-
- VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
- formatModifierPropsList.drmFormatModifierCount = 0;
- formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
- formatPropsChain.Add(&formatModifierPropsList,
- VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
-
- fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
-
- uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
- formatModifierPropsVector.resize(modifierCount);
- formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
-
- fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
- for (const auto& props : formatModifierPropsVector) {
- if (props.drmFormatModifier == modifier) {
- *formatModifierProps = props;
- return true;
- }
- }
- return false;
- }
-
- // Some modifiers use multiple planes (for example, see the comment for
- // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
- ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- uint64_t modifier) {
- VkDrmFormatModifierPropertiesEXT props;
- if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
- return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
- }
- return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
- }
-
- bool IsMultiPlanarVkFormat(VkFormat format) {
- switch (format) {
- case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
- case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
- case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
- case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
- case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
- case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
- case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
- case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
- case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
- case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
- case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
- case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
- case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
- case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
- case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
- case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
- case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
- case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
- case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
- case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
- return true;
-
- default:
- return false;
- }
+namespace dawn::native::vulkan::external_memory {
+
+namespace {
+
+bool GetFormatModifierProps(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier,
+ VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
+ std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
+ VkFormatProperties2 formatProps = {};
+ formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+ PNextChainBuilder formatPropsChain(&formatProps);
+
+ VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
+ formatModifierPropsList.drmFormatModifierCount = 0;
+ formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
+ formatPropsChain.Add(&formatModifierPropsList,
+ VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
+
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+
+ uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
+ formatModifierPropsVector.resize(modifierCount);
+ formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
+
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+ for (const auto& props : formatModifierPropsVector) {
+ if (props.drmFormatModifier == modifier) {
+ *formatModifierProps = props;
+ return true;
}
-
- bool SupportsDisjoint(const VulkanFunctions& fn,
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- uint64_t modifier) {
- if (IsMultiPlanarVkFormat(format)) {
- VkDrmFormatModifierPropertiesEXT props;
- return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
- (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
- }
+ }
+ return false;
+}
+
+// Some modifiers use multiple planes (for example, see the comment for
+// I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
+ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier) {
+ VkDrmFormatModifierPropertiesEXT props;
+ if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
+ return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
+ }
+ return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
+}
+
+bool IsMultiPlanarVkFormat(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+ case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+ case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+ case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+ case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
+ return true;
+
+ default:
return false;
- }
-
- } // anonymous namespace
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
}
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
- deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+}
+
+bool SupportsDisjoint(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier) {
+ if (IsMultiPlanarVkFormat(format)) {
+ VkDrmFormatModifierPropertiesEXT props;
+ return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
+ (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
}
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- return mSupported && (!IsMultiPlanarVkFormat(format) ||
- (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
- mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
+ return false;
+}
+
+} // namespace
+
+Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
+ deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+}
+
+bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ return mSupported && (!IsMultiPlanarVkFormat(format) ||
+ (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
+ mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
+}
+
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+ if (descriptor->GetType() != ExternalImageType::DmaBuf) {
+ return false;
+ }
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+
+ // Verify plane count for the modifier.
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ uint32_t planeCount = 0;
+ if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
+ dmaBufDescriptor->drmModifier),
+ &planeCount)) {
+ return false;
+ }
+ if (planeCount == 0) {
+ return false;
+ }
+ // Only support the NV12 multi-planar format for now.
+ if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
+ return false;
+ }
+ *supportsDisjoint =
+ SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
+
+ // Verify that the format modifier of the external memory and the requested Vulkan format
+ // are actually supported together in a dma-buf import.
+ VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
+ imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+ imageFormatInfo.format = format;
+ imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+ imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ imageFormatInfo.usage = usage;
+ imageFormatInfo.flags = 0;
+ PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
+
+ VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
+ externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ imageFormatInfoChain.Add(&externalImageFormatInfo,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
+
+ VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
+ drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ imageFormatInfoChain.Add(&drmModifierInfo,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
+
+ // For mutable vkimage of multi-planar format, we also need to make sure the each
+ // plane's view format can be supported.
+ std::array<VkFormat, 2> viewFormats;
+ VkImageFormatListCreateInfo imageFormatListInfo = {};
+
+ if (planeCount > 1) {
+ ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
+ viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
+ imageFormatListInfo.viewFormatCount = 2;
+ imageFormatListInfo.pViewFormats = viewFormats.data();
+ imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ imageFormatInfoChain.Add(&imageFormatListInfo,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
}
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage,
- bool* supportsDisjoint) {
- *supportsDisjoint = false;
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
- if (descriptor->GetType() != ExternalImageType::DmaBuf) {
- return false;
- }
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
-
- // Verify plane count for the modifier.
- VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
- uint32_t planeCount = 0;
- if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
- dmaBufDescriptor->drmModifier),
- &planeCount)) {
- return false;
- }
- if (planeCount == 0) {
- return false;
- }
- // Only support the NV12 multi-planar format for now.
- if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
- return false;
- }
- *supportsDisjoint =
- SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
-
- // Verify that the format modifier of the external memory and the requested Vulkan format
- // are actually supported together in a dma-buf import.
- VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
- imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
- imageFormatInfo.format = format;
- imageFormatInfo.type = VK_IMAGE_TYPE_2D;
- imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
- imageFormatInfo.usage = usage;
- imageFormatInfo.flags = 0;
- PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
-
- VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
- externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- imageFormatInfoChain.Add(&externalImageFormatInfo,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
-
- VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
- drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
- drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- imageFormatInfoChain.Add(
- &drmModifierInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
-
- // For mutable vkimage of multi-planar format, we also need to make sure the each
- // plane's view format can be supported.
- std::array<VkFormat, 2> viewFormats;
- VkImageFormatListCreateInfo imageFormatListInfo = {};
-
- if (planeCount > 1) {
- ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
- viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
- imageFormatListInfo.viewFormatCount = 2;
- imageFormatListInfo.pViewFormats = viewFormats.data();
- imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
- imageFormatInfoChain.Add(&imageFormatListInfo,
- VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
- }
-
- VkImageFormatProperties2 imageFormatProps = {};
- imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
- PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
-
- VkExternalImageFormatProperties externalImageFormatProps = {};
- imageFormatPropsChain.Add(&externalImageFormatProps,
- VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
+ VkImageFormatProperties2 imageFormatProps = {};
+ imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+ PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
- VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- physicalDevice, &imageFormatInfo, &imageFormatProps));
- if (result != VK_SUCCESS) {
- return false;
- }
- VkExternalMemoryFeatureFlags featureFlags =
- externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
- return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
- }
+ VkExternalImageFormatProperties externalImageFormatProps = {};
+ imageFormatPropsChain.Add(&externalImageFormatProps,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
- "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
-
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
- VkDevice device = mDevice->GetVkDevice();
-
- // Get the valid memory types for the VkImage.
- VkMemoryRequirements memoryRequirements;
- mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
-
- VkMemoryFdPropertiesKHR fdProperties;
- fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
- fdProperties.pNext = nullptr;
-
- // Get the valid memory types that the external memory can be imported as.
- mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- dmaBufDescriptor->memoryFD, &fdProperties);
- // Choose the best memory type that satisfies both the image's constraint and the
- // import's constraint.
- memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
- int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
- memoryRequirements, MemoryKind::Opaque);
- DAWN_INVALID_IF(memoryTypeIndex == -1,
- "Unable to find an appropriate memory type for import.");
-
- MemoryImportParams params = {memoryRequirements.size,
- static_cast<uint32_t>(memoryTypeIndex)};
- return params;
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ physicalDevice, &imageFormatInfo, &imageFormatProps));
+ if (result != VK_SUCCESS) {
+ return false;
}
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
-
- VkMemoryAllocateInfo memoryAllocateInfo = {};
- memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- memoryAllocateInfo.allocationSize = importParams.allocationSize;
- memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
- PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
-
- VkImportMemoryFdInfoKHR importMemoryFdInfo;
- importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- importMemoryFdInfo.fd = handle;
- memoryAllocateInfoChain.Add(&importMemoryFdInfo,
- VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
-
- VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
- memoryDedicatedAllocateInfo.image = image;
- memoryDedicatedAllocateInfo.buffer = VkBuffer{};
- memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
- VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
+ VkExternalMemoryFeatureFlags featureFlags =
+ externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
+ return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
+}
+
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+ "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
+
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkDevice device = mDevice->GetVkDevice();
+
+ // Get the valid memory types for the VkImage.
+ VkMemoryRequirements memoryRequirements;
+ mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
+
+ VkMemoryFdPropertiesKHR fdProperties;
+ fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+ fdProperties.pNext = nullptr;
+
+ // Get the valid memory types that the external memory can be imported as.
+ mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ dmaBufDescriptor->memoryFD, &fdProperties);
+ // Choose the best memory type that satisfies both the image's constraint and the
+ // import's constraint.
+ memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
+ int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
+ memoryRequirements, MemoryKind::Opaque);
+ DAWN_INVALID_IF(memoryTypeIndex == -1, "Unable to find an appropriate memory type for import.");
+
+ MemoryImportParams params = {memoryRequirements.size, static_cast<uint32_t>(memoryTypeIndex)};
+ return params;
+}
+
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+ VkMemoryAllocateInfo memoryAllocateInfo = {};
+ memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memoryAllocateInfo.allocationSize = importParams.allocationSize;
+ memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+ PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
+
+ VkImportMemoryFdInfoKHR importMemoryFdInfo;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ importMemoryFdInfo.fd = handle;
+ memoryAllocateInfoChain.Add(&importMemoryFdInfo, VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
+
+ VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
+ memoryDedicatedAllocateInfo.image = image;
+ memoryDedicatedAllocateInfo.buffer = VkBuffer{};
+ memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+}
+
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+ "ExternalImageDescriptor is not a dma-buf descriptor.");
+
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ VkDevice device = mDevice->GetVkDevice();
+
+ uint32_t planeCount;
+ DAWN_TRY_ASSIGN(planeCount,
+ GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
+ dmaBufDescriptor->drmModifier));
+
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+
+ PNextChainBuilder createInfoChain(&createInfo);
+
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ createInfoChain.Add(&externalMemoryImageCreateInfo,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+ VkSubresourceLayout planeLayouts[ExternalImageDescriptorDmaBuf::kMaxPlanes];
+ for (uint32_t plane = 0u; plane < planeCount; ++plane) {
+ planeLayouts[plane].offset = dmaBufDescriptor->planeLayouts[plane].offset;
+ planeLayouts[plane].size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
+ planeLayouts[plane].rowPitch = dmaBufDescriptor->planeLayouts[plane].stride;
+ planeLayouts[plane].arrayPitch = 0; // Not an array texture
+ planeLayouts[plane].depthPitch = 0; // Not a depth texture
}
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
- "ExternalImageDescriptor is not a dma-buf descriptor.");
-
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
- VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
- VkDevice device = mDevice->GetVkDevice();
-
- uint32_t planeCount;
- DAWN_TRY_ASSIGN(planeCount,
- GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
- dmaBufDescriptor->drmModifier));
-
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
-
- PNextChainBuilder createInfoChain(&createInfo);
-
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
- externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- createInfoChain.Add(&externalMemoryImageCreateInfo,
- VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
- // For single plane formats.
- VkSubresourceLayout planeLayout = {};
- planeLayout.offset = 0;
- planeLayout.size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
- planeLayout.rowPitch = dmaBufDescriptor->stride;
- planeLayout.arrayPitch = 0; // Not an array texture
- planeLayout.depthPitch = 0; // Not a depth texture
-
- VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
- explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
- explicitCreateInfo.drmFormatModifierPlaneCount = 1;
- explicitCreateInfo.pPlaneLayouts = &planeLayout;
-
- // For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
- // due to the lack of knowledge about the required 'offset'. Alternatively
- // VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
- // modifier.
- VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
- listCreateInfo.drmFormatModifierCount = 1;
- listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
-
- if (planeCount > 1) {
- // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
- // VkImageView can be plane's format which might differ from the image's format.
- createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
- createInfoChain.Add(&listCreateInfo,
- VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
- } else {
- createInfoChain.Add(
- &explicitCreateInfo,
- VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
- }
+ VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
+ explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ explicitCreateInfo.drmFormatModifierPlaneCount = planeCount;
+ explicitCreateInfo.pPlaneLayouts = &planeLayouts[0];
- // Create a new VkImage with tiling equal to the DRM format modifier.
- VkImage image;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
+ if (planeCount > 1) {
+ // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
+ // VkImageView can be plane's format which might differ from the image's format.
+ createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
}
+ createInfoChain.Add(&explicitCreateInfo,
+ VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
+
+ // Create a new VkImage with tiling equal to the DRM format modifier.
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+}
-}} // namespace dawn::native::vulkan::external_memory
+} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
index 7b3c2399162..5cb8c4d2954 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
@@ -15,51 +15,51 @@
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/external_memory/MemoryService.h"
-namespace dawn::native { namespace vulkan::external_memory {
+namespace dawn::native::vulkan::external_memory {
- Service::Service(Device* device) : mDevice(device) {
- DAWN_UNUSED(mDevice);
- DAWN_UNUSED(mSupported);
- }
+Service::Service(Device* device) : mDevice(device) {
+ DAWN_UNUSED(mDevice);
+ DAWN_UNUSED(mSupported);
+}
- Service::~Service() = default;
+Service::~Service() = default;
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return false;
- }
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return false;
+}
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- return false;
- }
+bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ return false;
+}
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage,
- bool* supportsDisjoint) {
- *supportsDisjoint = false;
- return false;
- }
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return false;
+}
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
-}} // namespace dawn::native::vulkan::external_memory
+} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index ad5461726de..f6f1fe44be1 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -21,140 +21,138 @@
#include "dawn/native/vulkan/VulkanError.h"
#include "dawn/native/vulkan/external_memory/MemoryService.h"
-namespace dawn::native { namespace vulkan::external_memory {
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
-
- VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
- externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
- externalFormatInfo.pNext = nullptr;
- externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
- VkPhysicalDeviceImageFormatInfo2 formatInfo;
- formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
- formatInfo.pNext = &externalFormatInfo;
- formatInfo.format = format;
- formatInfo.type = type;
- formatInfo.tiling = tiling;
- formatInfo.usage = usage;
- formatInfo.flags = flags;
-
- VkExternalImageFormatProperties externalFormatProperties;
- externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
- externalFormatProperties.pNext = nullptr;
-
- VkImageFormatProperties2 formatProperties;
- formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
- formatProperties.pNext = &externalFormatProperties;
-
- VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
-
- // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
- if (result != VK_SUCCESS) {
- return false;
- }
-
- // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
- VkFlags memoryFlags =
- externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
- return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage,
- bool* supportsDisjoint) {
- *supportsDisjoint = false;
- return mSupported;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
- "ExternalImageDescriptor is not an OpaqueFD descriptor.");
-
- const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
- static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
-
- MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
- opaqueFDDescriptor->memoryTypeIndex};
- return params;
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
-
- VkMemoryRequirements requirements;
- mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
- DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
- "Requested allocation size (%u) is smaller than the image requires (%u).",
- importParams.allocationSize, requirements.size);
-
- VkImportMemoryFdInfoKHR importMemoryFdInfo;
- importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
- importMemoryFdInfo.pNext = nullptr;
- importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
- importMemoryFdInfo.fd = handle;
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = &importMemoryFdInfo;
- allocateInfo.allocationSize = importParams.allocationSize;
- allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
+namespace dawn::native::vulkan::external_memory {
+
+Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
+}
+
+bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
}
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
- externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- externalMemoryImageCreateInfo.pNext = nullptr;
- externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
-
- PNextChainBuilder createInfoChain(&createInfo);
- createInfoChain.Add(&externalMemoryImageCreateInfo,
- VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
- ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
- VkImage image;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
+ VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+ externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+ externalFormatInfo.pNext = nullptr;
+ externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ VkPhysicalDeviceImageFormatInfo2 formatInfo;
+ formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+ formatInfo.pNext = &externalFormatInfo;
+ formatInfo.format = format;
+ formatInfo.type = type;
+ formatInfo.tiling = tiling;
+ formatInfo.usage = usage;
+ formatInfo.flags = flags;
+
+ VkExternalImageFormatProperties externalFormatProperties;
+ externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+ externalFormatProperties.pNext = nullptr;
+
+ VkImageFormatProperties2 formatProperties;
+ formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+ formatProperties.pNext = &externalFormatProperties;
+
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
+
+ // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+ if (result != VK_SUCCESS) {
+ return false;
}
-}} // namespace dawn::native::vulkan::external_memory
+ // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+ VkFlags memoryFlags = externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+ return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+}
+
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return mSupported;
+}
+
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
+ "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+}
+
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+ "Requested allocation size (%u) is smaller than the image requires (%u).",
+ importParams.allocationSize, requirements.size);
+
+ VkImportMemoryFdInfoKHR importMemoryFdInfo;
+ importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+ importMemoryFdInfo.pNext = nullptr;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ importMemoryFdInfo.fd = handle;
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = &importMemoryFdInfo;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+}
+
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ PNextChainBuilder createInfoChain(&createInfo);
+ createInfoChain.Add(&externalMemoryImageCreateInfo,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+}
+
+} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 96c04a7c39a..cec5cc4af70 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -21,142 +21,139 @@
#include "dawn/native/vulkan/VulkanError.h"
#include "dawn/native/vulkan/external_memory/MemoryService.h"
-namespace dawn::native { namespace vulkan::external_memory {
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
-
- VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
- externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
- externalFormatInfo.pNext = nullptr;
- externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-
- VkPhysicalDeviceImageFormatInfo2 formatInfo;
- formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
- formatInfo.pNext = &externalFormatInfo;
- formatInfo.format = format;
- formatInfo.type = type;
- formatInfo.tiling = tiling;
- formatInfo.usage = usage;
- formatInfo.flags = flags;
-
- VkExternalImageFormatProperties externalFormatProperties;
- externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
- externalFormatProperties.pNext = nullptr;
-
- VkImageFormatProperties2 formatProperties;
- formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
- formatProperties.pNext = &externalFormatProperties;
-
- VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
-
- // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
- if (result != VK_SUCCESS) {
- return false;
- }
-
- // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
- VkFlags memoryFlags =
- externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
- return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage,
- bool* supportsDisjoint) {
- *supportsDisjoint = false;
- return mSupported;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
- "ExternalImageDescriptor is not an OpaqueFD descriptor.");
-
- const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
- static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
-
- MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
- opaqueFDDescriptor->memoryTypeIndex};
- return params;
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
-
- VkMemoryRequirements requirements;
- mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
- DAWN_INVALID_IF(
- requirements.size > importParams.allocationSize,
- "Requested allocation size (%u) is smaller than the required image size (%u).",
- importParams.allocationSize, requirements.size);
-
- VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
- importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
- importMemoryHandleInfo.pNext = nullptr;
- importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
- importMemoryHandleInfo.handle = handle;
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = &importMemoryHandleInfo;
- allocateInfo.allocationSize = importParams.allocationSize;
- allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
+namespace dawn::native::vulkan::external_memory {
+
+Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
+}
+
+bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
}
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
- externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- externalMemoryImageCreateInfo.pNext = nullptr;
- externalMemoryImageCreateInfo.handleTypes =
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-
- PNextChainBuilder createInfoChain(&createInfo);
- createInfoChain.Add(&externalMemoryImageCreateInfo,
- VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
- ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
- VkImage image;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
+ VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+ externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+ externalFormatInfo.pNext = nullptr;
+ externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+ VkPhysicalDeviceImageFormatInfo2 formatInfo;
+ formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+ formatInfo.pNext = &externalFormatInfo;
+ formatInfo.format = format;
+ formatInfo.type = type;
+ formatInfo.tiling = tiling;
+ formatInfo.usage = usage;
+ formatInfo.flags = flags;
+
+ VkExternalImageFormatProperties externalFormatProperties;
+ externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+ externalFormatProperties.pNext = nullptr;
+
+ VkImageFormatProperties2 formatProperties;
+ formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+ formatProperties.pNext = &externalFormatProperties;
+
+ VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+
+ // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+ if (result != VK_SUCCESS) {
+ return false;
}
-}} // namespace dawn::native::vulkan::external_memory
+ // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+ VkFlags memoryFlags = externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+ return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+}
+
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return mSupported;
+}
+
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
+ "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+}
+
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
+
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+ "Requested allocation size (%u) is smaller than the required image size (%u).",
+ importParams.allocationSize, requirements.size);
+
+ VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
+ importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
+ importMemoryHandleInfo.pNext = nullptr;
+ importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+ importMemoryHandleInfo.handle = handle;
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = &importMemoryHandleInfo;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+}
+
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+ PNextChainBuilder createInfoChain(&createInfo);
+ createInfoChain.Add(&externalMemoryImageCreateInfo,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+}
+
+} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
index 259afa8df44..e27689fbf4e 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
@@ -22,39 +22,39 @@
#include "dawn/native/vulkan/VulkanInfo.h"
namespace dawn::native::vulkan {
- class Device;
+class Device;
} // namespace dawn::native::vulkan
-namespace dawn::native { namespace vulkan::external_semaphore {
+namespace dawn::native::vulkan::external_semaphore {
- class Service {
- public:
- explicit Service(Device* device);
- ~Service();
+class Service {
+ public:
+ explicit Service(Device* device);
+ ~Service();
- static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn);
+ static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn);
- // True if the device reports it supports this feature
- bool Supported();
+ // True if the device reports it supports this feature
+ bool Supported();
- // Given an external handle, import it into a VkSemaphore
- ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
+ // Given an external handle, import it into a VkSemaphore
+ ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
- // Create a VkSemaphore that is exportable into an external handle later
- ResultOrError<VkSemaphore> CreateExportableSemaphore();
+ // Create a VkSemaphore that is exportable into an external handle later
+ ResultOrError<VkSemaphore> CreateExportableSemaphore();
- // Export a VkSemaphore into an external handle
- ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
+ // Export a VkSemaphore into an external handle
+ ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
- private:
- Device* mDevice = nullptr;
+ private:
+ Device* mDevice = nullptr;
- // True if early checks pass that determine if the service is supported
- bool mSupported = false;
- };
+ // True if early checks pass that determine if the service is supported
+ bool mSupported = false;
+};
-}} // namespace dawn::native::vulkan::external_semaphore
+} // namespace dawn::native::vulkan::external_semaphore
#endif // SRC_DAWN_NATIVE_VULKAN_EXTERNAL_SEMAPHORE_SEMAPHORESERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
index 7e2b619f5ca..9c1d923daec 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <utility>
+
#include "dawn/native/vulkan/AdapterVk.h"
#include "dawn/native/vulkan/BackendVk.h"
#include "dawn/native/vulkan/DeviceVk.h"
@@ -25,113 +27,112 @@ static constexpr VkExternalSemaphoreHandleTypeFlagBits kHandleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif // defined(DAWN_USE_SYNC_FDS)
-namespace dawn::native { namespace vulkan::external_semaphore {
+namespace dawn::native::vulkan::external_semaphore {
- Service::Service(Device* device)
- : mDevice(device),
- mSupported(CheckSupport(device->GetDeviceInfo(),
- ToBackend(device->GetAdapter())->GetPhysicalDevice(),
- device->fn)) {
- }
+Service::Service(Device* device)
+ : mDevice(device),
+ mSupported(CheckSupport(device->GetDeviceInfo(),
+ ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+ device->fn)) {}
- Service::~Service() = default;
+Service::~Service() = default;
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
- return false;
- }
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+ return false;
+ }
- VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
- semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
- semaphoreInfo.pNext = nullptr;
- semaphoreInfo.handleType = kHandleType;
+ VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.handleType = kHandleType;
- VkExternalSemaphorePropertiesKHR semaphoreProperties;
- semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
- semaphoreProperties.pNext = nullptr;
+ VkExternalSemaphorePropertiesKHR semaphoreProperties;
+ semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+ semaphoreProperties.pNext = nullptr;
- fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
- &semaphoreProperties);
+ fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+ &semaphoreProperties);
- VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+ VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
- return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
- }
+ return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+}
- bool Service::Supported() {
- return mSupported;
- }
+bool Service::Supported() {
+ return mSupported;
+}
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- VkSemaphoreCreateInfo info;
- info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- info.pNext = nullptr;
- info.flags = 0;
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
- "vkCreateSemaphore"));
-
- VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
- importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
- importSemaphoreFdInfo.pNext = nullptr;
- importSemaphoreFdInfo.semaphore = semaphore;
- importSemaphoreFdInfo.flags = 0;
- importSemaphoreFdInfo.handleType = kHandleType;
- importSemaphoreFdInfo.fd = handle;
-
- MaybeError status = CheckVkSuccess(
- mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
- "vkImportSemaphoreFdKHR");
-
- if (status.IsError()) {
- mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
- DAWN_TRY(std::move(status));
- }
-
- return semaphore;
- }
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
- exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
- exportSemaphoreInfo.pNext = nullptr;
- exportSemaphoreInfo.handleTypes = kHandleType;
-
- VkSemaphoreCreateInfo semaphoreCreateInfo;
- semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
- semaphoreCreateInfo.flags = 0;
-
- VkSemaphore signalSemaphore;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &*signalSemaphore),
- "vkCreateSemaphore"));
- return signalSemaphore;
- }
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ VkSemaphoreCreateInfo info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ info.pNext = nullptr;
+ info.flags = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+ "vkCreateSemaphore"));
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
- semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
- semaphoreGetFdInfo.pNext = nullptr;
- semaphoreGetFdInfo.semaphore = semaphore;
- semaphoreGetFdInfo.handleType = kHandleType;
+ VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
+ importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+ importSemaphoreFdInfo.pNext = nullptr;
+ importSemaphoreFdInfo.semaphore = semaphore;
+ importSemaphoreFdInfo.flags = 0;
+ importSemaphoreFdInfo.handleType = kHandleType;
+ importSemaphoreFdInfo.fd = handle;
- int fd = -1;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
- "vkGetSemaphoreFdKHR"));
+ MaybeError status = CheckVkSuccess(
+ mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
+ "vkImportSemaphoreFdKHR");
- ASSERT(fd >= 0);
- return fd;
+ if (status.IsError()) {
+ mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+ DAWN_TRY(std::move(status));
}
-}} // namespace dawn::native::vulkan::external_semaphore
+ return semaphore;
+}
+
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+ exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+ exportSemaphoreInfo.pNext = nullptr;
+ exportSemaphoreInfo.handleTypes = kHandleType;
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo;
+ semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+ semaphoreCreateInfo.flags = 0;
+
+ VkSemaphore signalSemaphore;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+ nullptr, &*signalSemaphore),
+ "vkCreateSemaphore"));
+ return signalSemaphore;
+}
+
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
+ semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+ semaphoreGetFdInfo.pNext = nullptr;
+ semaphoreGetFdInfo.semaphore = semaphore;
+ semaphoreGetFdInfo.handleType = kHandleType;
+
+ int fd = -1;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
+ "vkGetSemaphoreFdKHR"));
+
+ ASSERT(fd >= 0);
+ return fd;
+}
+
+} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
index 3146e3771ad..1963524c820 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
@@ -15,36 +15,36 @@
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
-namespace dawn::native { namespace vulkan::external_semaphore {
+namespace dawn::native::vulkan::external_semaphore {
- Service::Service(Device* device) : mDevice(device) {
- DAWN_UNUSED(mDevice);
- DAWN_UNUSED(mSupported);
- }
+Service::Service(Device* device) : mDevice(device) {
+ DAWN_UNUSED(mDevice);
+ DAWN_UNUSED(mSupported);
+}
- Service::~Service() = default;
+Service::~Service() = default;
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- return false;
- }
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ return false;
+}
- bool Service::Supported() {
- return false;
- }
+bool Service::Supported() {
+ return false;
+}
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
-}} // namespace dawn::native::vulkan::external_semaphore
+} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
index 03fa79c65d2..af1f3f3e779 100644
--- a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -12,124 +12,121 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <utility>
+
#include "dawn/native/vulkan/AdapterVk.h"
#include "dawn/native/vulkan/BackendVk.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/VulkanError.h"
#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
-namespace dawn::native { namespace vulkan::external_semaphore {
-
- Service::Service(Device* device)
- : mDevice(device),
- mSupported(CheckSupport(device->GetDeviceInfo(),
- ToBackend(device->GetAdapter())->GetPhysicalDevice(),
- device->fn)) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
- return false;
- }
-
- VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
- semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
- semaphoreInfo.pNext = nullptr;
- semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- VkExternalSemaphorePropertiesKHR semaphoreProperties;
- semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
- semaphoreProperties.pNext = nullptr;
+namespace dawn::native::vulkan::external_semaphore {
- fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
- &semaphoreProperties);
+Service::Service(Device* device)
+ : mDevice(device),
+ mSupported(CheckSupport(device->GetDeviceInfo(),
+ ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+ device->fn)) {}
- VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
-
- return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
- }
-
- bool Service::Supported() {
- return mSupported;
- }
-
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID,
- "Importing a semaphore with an invalid handle.");
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- VkSemaphoreCreateInfo info;
- info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- info.pNext = nullptr;
- info.flags = 0;
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
- "vkCreateSemaphore"));
-
- VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
- importSemaphoreHandleInfo.sType =
- VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
- importSemaphoreHandleInfo.pNext = nullptr;
- importSemaphoreHandleInfo.semaphore = semaphore;
- importSemaphoreHandleInfo.flags = 0;
- importSemaphoreHandleInfo.handleType =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
- importSemaphoreHandleInfo.handle = handle;
-
- MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
- mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
- "vkImportSemaphoreZirconHandleFUCHSIA");
-
- if (status.IsError()) {
- mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
- DAWN_TRY(std::move(status));
- }
-
- return semaphore;
- }
+Service::~Service() = default;
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
- exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
- exportSemaphoreInfo.pNext = nullptr;
- exportSemaphoreInfo.handleTypes =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- VkSemaphoreCreateInfo semaphoreCreateInfo;
- semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
- semaphoreCreateInfo.flags = 0;
-
- VkSemaphore signalSemaphore;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &*signalSemaphore),
- "vkCreateSemaphore"));
- return signalSemaphore;
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+ return false;
}
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
- semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
- semaphoreGetHandleInfo.pNext = nullptr;
- semaphoreGetHandleInfo.semaphore = semaphore;
- semaphoreGetHandleInfo.handleType =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- zx_handle_t handle = ZX_HANDLE_INVALID;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
- mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
- "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
-
- ASSERT(handle != ZX_HANDLE_INVALID);
- return handle;
+ VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkExternalSemaphorePropertiesKHR semaphoreProperties;
+ semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+ semaphoreProperties.pNext = nullptr;
+
+ fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+ &semaphoreProperties);
+
+ VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+ return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+}
+
+bool Service::Supported() {
+ return mSupported;
+}
+
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing a semaphore with an invalid handle.");
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ VkSemaphoreCreateInfo info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ info.pNext = nullptr;
+ info.flags = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+ "vkCreateSemaphore"));
+
+ VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
+ importSemaphoreHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
+ importSemaphoreHandleInfo.pNext = nullptr;
+ importSemaphoreHandleInfo.semaphore = semaphore;
+ importSemaphoreHandleInfo.flags = 0;
+ importSemaphoreHandleInfo.handleType =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+ importSemaphoreHandleInfo.handle = handle;
+
+ MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
+ "vkImportSemaphoreZirconHandleFUCHSIA");
+
+ if (status.IsError()) {
+ mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+ DAWN_TRY(std::move(status));
}
-}} // namespace dawn::native::vulkan::external_semaphore
+ return semaphore;
+}
+
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+ exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+ exportSemaphoreInfo.pNext = nullptr;
+ exportSemaphoreInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo;
+ semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+ semaphoreCreateInfo.flags = 0;
+
+ VkSemaphore signalSemaphore;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+ nullptr, &*signalSemaphore),
+ "vkCreateSemaphore"));
+ return signalSemaphore;
+}
+
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
+ semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
+ semaphoreGetHandleInfo.pNext = nullptr;
+ semaphoreGetHandleInfo.semaphore = semaphore;
+ semaphoreGetHandleInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ zx_handle_t handle = ZX_HANDLE_INVALID;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
+ "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
+
+ ASSERT(handle != ZX_HANDLE_INVALID);
+ return handle;
+}
+
+} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp
index e42ec8977a5..2550f4b1f5b 100644
--- a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp
+++ b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp
@@ -14,6 +14,8 @@
#include "dawn/native/webgpu_absl_format.h"
+#include <string>
+
#include "dawn/native/AttachmentState.h"
#include "dawn/native/BindingInfo.h"
#include "dawn/native/Device.h"
@@ -28,414 +30,411 @@
namespace dawn::native {
- //
- // Structs
- //
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Color* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append(absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b,
- value->a));
- return {true};
- }
+//
+// Structs
+//
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Extent3D* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]",
- value->width, value->height, value->depthOrArrayLayers));
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(const Color* value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Origin3D* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
+ s->Append(
+ absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b, value->a));
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Extent3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const BindingInfo& value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- static const auto* const fmt =
- new absl::ParsedFormat<'u', 's', 's', 's'>("{ binding: %u, visibility: %s, %s: %s }");
- switch (value.bindingType) {
- case BindingInfoType::Buffer:
- s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
- value.visibility, value.bindingType, value.buffer));
- break;
- case BindingInfoType::Sampler:
- s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
- value.visibility, value.bindingType, value.sampler));
- break;
- case BindingInfoType::Texture:
- s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
- value.visibility, value.bindingType, value.texture));
- break;
- case BindingInfoType::StorageTexture:
- s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
- value.visibility, value.bindingType,
- value.storageTexture));
- break;
- case BindingInfoType::ExternalTexture:
- break;
- }
+ s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]", value->width,
+ value->height, value->depthOrArrayLayers));
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Origin3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
+ s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const BindingInfo& value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ static const auto* const fmt =
+ new absl::ParsedFormat<'u', 's', 's', 's'>("{ binding: %u, visibility: %s, %s: %s }");
+ switch (value.bindingType) {
+ case BindingInfoType::Buffer:
+ s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+ value.bindingType, value.buffer));
+ break;
+ case BindingInfoType::Sampler:
+ s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+ value.bindingType, value.sampler));
+ break;
+ case BindingInfoType::Texture:
+ s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+ value.bindingType, value.texture));
+ break;
+ case BindingInfoType::StorageTexture:
+ s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+ value.bindingType, value.storageTexture));
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
+ }
+ return {true};
+}
- //
- // Objects
- //
+//
+// Objects
+//
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const DeviceBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append("[Device");
- const std::string& label = value->GetLabel();
- if (!label.empty()) {
- s->Append(absl::StrFormat(" \"%s\"", label));
- }
- s->Append("]");
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const DeviceBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const ApiObjectBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append("[");
- if (value->IsError()) {
- s->Append("Invalid ");
- }
- s->Append(ObjectTypeAsString(value->GetType()));
- const std::string& label = value->GetLabel();
- if (!label.empty()) {
- s->Append(absl::StrFormat(" \"%s\"", label));
- }
- s->Append("]");
+ s->Append("[Device");
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ s->Append("]");
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const ApiObjectBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const TextureViewBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
- s->Append("[");
- if (value->IsError()) {
- s->Append("Invalid ");
- }
- s->Append(ObjectTypeAsString(value->GetType()));
- const std::string& label = value->GetLabel();
- if (!label.empty()) {
- s->Append(absl::StrFormat(" \"%s\"", label));
- }
- const std::string& textureLabel = value->GetTexture()->GetLabel();
- if (!textureLabel.empty()) {
- s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
- }
- s->Append("]");
+ s->Append("[");
+ if (value->IsError()) {
+ s->Append("Invalid ");
+ }
+ s->Append(ObjectTypeAsString(value->GetType()));
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ s->Append("]");
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const TextureViewBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append("[");
+ if (value->IsError()) {
+ s->Append("Invalid ");
+ }
+ s->Append(ObjectTypeAsString(value->GetType()));
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ const std::string& textureLabel = value->GetTexture()->GetLabel();
+ if (!textureLabel.empty()) {
+ s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
+ }
+ s->Append("]");
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const AttachmentState* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
return {true};
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const AttachmentState* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == nullptr) {
- s->Append("[null]");
- return {true};
- }
-
- s->Append("{ colorFormats: [");
-
- ColorAttachmentIndex nextColorIndex(uint8_t(0));
-
- bool needsComma = false;
- for (ColorAttachmentIndex i : IterateBitSet(value->GetColorAttachmentsMask())) {
- while (nextColorIndex < i) {
- s->Append(absl::StrFormat("%s, ", wgpu::TextureFormat::Undefined));
- nextColorIndex++;
- needsComma = false;
- }
-
- if (needsComma) {
- s->Append(", ");
- }
+ s->Append("{ colorFormats: [");
- s->Append(absl::StrFormat("%s", value->GetColorAttachmentFormat(i)));
+ ColorAttachmentIndex nextColorIndex(uint8_t(0));
+ bool needsComma = false;
+ for (ColorAttachmentIndex i : IterateBitSet(value->GetColorAttachmentsMask())) {
+ while (nextColorIndex < i) {
+ s->Append(absl::StrFormat("%s, ", wgpu::TextureFormat::Undefined));
nextColorIndex++;
- needsComma = true;
+ needsComma = false;
}
- s->Append("], ");
-
- if (value->HasDepthStencilAttachment()) {
- s->Append(absl::StrFormat("depthStencilFormat: %s, ", value->GetDepthStencilFormat()));
+ if (needsComma) {
+ s->Append(", ");
}
- s->Append(absl::StrFormat("sampleCount: %u }", value->GetSampleCount()));
+ s->Append(absl::StrFormat("%s", value->GetColorAttachmentFormat(i)));
- return {true};
+ nextColorIndex++;
+ needsComma = true;
}
- //
- // Enums
- //
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
- AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
- if (value == Aspect::None) {
- s->Append("None");
- return {true};
- }
+ s->Append("], ");
- bool first = true;
-
- if (value & Aspect::Color) {
- first = false;
- s->Append("Color");
- value &= ~Aspect::Color;
- }
+ if (value->HasDepthStencilAttachment()) {
+ s->Append(absl::StrFormat("depthStencilFormat: %s, ", value->GetDepthStencilFormat()));
+ }
- if (value & Aspect::Depth) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("Depth");
- value &= ~Aspect::Depth;
- }
+ s->Append(absl::StrFormat("sampleCount: %u }", value->GetSampleCount()));
- if (value & Aspect::Stencil) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("Stencil");
- value &= ~Aspect::Stencil;
- }
+ return {true};
+}
- // Output any remaining flags as a hex value
- if (static_cast<bool>(value)) {
- if (!first) {
- s->Append("|");
- }
- s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
- }
+//
+// Enums
+//
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
+ if (value == Aspect::None) {
+ s->Append("None");
return {true};
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SampleTypeBit value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- if (value == SampleTypeBit::None) {
- s->Append("None");
- return {true};
- }
+ bool first = true;
- bool first = true;
+ if (value & Aspect::Color) {
+ first = false;
+ s->Append("Color");
+ value &= ~Aspect::Color;
+ }
- if (value & SampleTypeBit::Float) {
- first = false;
- s->Append("Float");
- value &= ~SampleTypeBit::Float;
+ if (value & Aspect::Depth) {
+ if (!first) {
+ s->Append("|");
}
+ first = false;
+ s->Append("Depth");
+ value &= ~Aspect::Depth;
+ }
- if (value & SampleTypeBit::UnfilterableFloat) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("UnfilterableFloat");
- value &= ~SampleTypeBit::UnfilterableFloat;
+ if (value & Aspect::Stencil) {
+ if (!first) {
+ s->Append("|");
}
+ first = false;
+ s->Append("Stencil");
+ value &= ~Aspect::Stencil;
+ }
- if (value & SampleTypeBit::Depth) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("Depth");
- value &= ~SampleTypeBit::Depth;
+ // Output any remaining flags as a hex value
+ if (static_cast<bool>(value)) {
+ if (!first) {
+ s->Append("|");
}
+ s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
+ }
- if (value & SampleTypeBit::Sint) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("Sint");
- value &= ~SampleTypeBit::Sint;
- }
+ return {true};
+}
- if (value & SampleTypeBit::Uint) {
- if (!first) {
- s->Append("|");
- }
- first = false;
- s->Append("Uint");
- value &= ~SampleTypeBit::Uint;
- }
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SampleTypeBit value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == SampleTypeBit::None) {
+ s->Append("None");
+ return {true};
+ }
- // Output any remaining flags as a hex value
- if (static_cast<bool>(value)) {
- if (!first) {
- s->Append("|");
- }
- s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
- }
+ bool first = true;
- return {true};
+ if (value & SampleTypeBit::Float) {
+ first = false;
+ s->Append("Float");
+ value &= ~SampleTypeBit::Float;
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- BindingInfoType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case BindingInfoType::Buffer:
- s->Append("buffer");
- break;
- case BindingInfoType::Sampler:
- s->Append("sampler");
- break;
- case BindingInfoType::Texture:
- s->Append("texture");
- break;
- case BindingInfoType::StorageTexture:
- s->Append("storageTexture");
- break;
- case BindingInfoType::ExternalTexture:
- s->Append("externalTexture");
- break;
+ if (value & SampleTypeBit::UnfilterableFloat) {
+ if (!first) {
+ s->Append("|");
}
- return {true};
+ first = false;
+ s->Append("UnfilterableFloat");
+ value &= ~SampleTypeBit::UnfilterableFloat;
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SingleShaderStage value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case SingleShaderStage::Compute:
- s->Append("Compute");
- break;
- case SingleShaderStage::Vertex:
- s->Append("Vertex");
- break;
- case SingleShaderStage::Fragment:
- s->Append("Fragment");
- break;
+ if (value & SampleTypeBit::Depth) {
+ if (!first) {
+ s->Append("|");
}
- return {true};
+ first = false;
+ s->Append("Depth");
+ value &= ~SampleTypeBit::Depth;
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- VertexFormatBaseType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case VertexFormatBaseType::Float:
- s->Append("Float");
- break;
- case VertexFormatBaseType::Uint:
- s->Append("Uint");
- break;
- case VertexFormatBaseType::Sint:
- s->Append("Sint");
- break;
+ if (value & SampleTypeBit::Sint) {
+ if (!first) {
+ s->Append("|");
}
- return {true};
+ first = false;
+ s->Append("Sint");
+ value &= ~SampleTypeBit::Sint;
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterStageComponentType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterStageComponentType::Float:
- s->Append("Float");
- break;
- case InterStageComponentType::Uint:
- s->Append("Uint");
- break;
- case InterStageComponentType::Sint:
- s->Append("Sint");
- break;
+ if (value & SampleTypeBit::Uint) {
+ if (!first) {
+ s->Append("|");
}
- return {true};
+ first = false;
+ s->Append("Uint");
+ value &= ~SampleTypeBit::Uint;
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterpolationType::Perspective:
- s->Append("Perspective");
- break;
- case InterpolationType::Linear:
- s->Append("Linear");
- break;
- case InterpolationType::Flat:
- s->Append("Flat");
- break;
+ // Output any remaining flags as a hex value
+ if (static_cast<bool>(value)) {
+ if (!first) {
+ s->Append("|");
}
- return {true};
+ s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
}
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationSampling value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterpolationSampling::None:
- s->Append("None");
- break;
- case InterpolationSampling::Center:
- s->Append("Center");
- break;
- case InterpolationSampling::Centroid:
- s->Append("Centroid");
- break;
- case InterpolationSampling::Sample:
- s->Append("Sample");
- break;
- }
- return {true};
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ BindingInfoType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case BindingInfoType::Buffer:
+ s->Append("buffer");
+ break;
+ case BindingInfoType::Sampler:
+ s->Append("sampler");
+ break;
+ case BindingInfoType::Texture:
+ s->Append("texture");
+ break;
+ case BindingInfoType::StorageTexture:
+ s->Append("storageTexture");
+ break;
+ case BindingInfoType::ExternalTexture:
+ s->Append("externalTexture");
+ break;
+ }
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SingleShaderStage value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case SingleShaderStage::Compute:
+ s->Append("Compute");
+ break;
+ case SingleShaderStage::Vertex:
+ s->Append("Vertex");
+ break;
+ case SingleShaderStage::Fragment:
+ s->Append("Fragment");
+ break;
+ }
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ VertexFormatBaseType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case VertexFormatBaseType::Float:
+ s->Append("Float");
+ break;
+ case VertexFormatBaseType::Uint:
+ s->Append("Uint");
+ break;
+ case VertexFormatBaseType::Sint:
+ s->Append("Sint");
+ break;
+ }
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterStageComponentType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterStageComponentType::Float:
+ s->Append("Float");
+ break;
+ case InterStageComponentType::Uint:
+ s->Append("Uint");
+ break;
+ case InterStageComponentType::Sint:
+ s->Append("Sint");
+ break;
+ }
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationType::Perspective:
+ s->Append("Perspective");
+ break;
+ case InterpolationType::Linear:
+ s->Append("Linear");
+ break;
+ case InterpolationType::Flat:
+ s->Append("Flat");
+ break;
+ }
+ return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationSampling value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationSampling::None:
+ s->Append("None");
+ break;
+ case InterpolationSampling::Center:
+ s->Append("Center");
+ break;
+ case InterpolationSampling::Centroid:
+ s->Append("Centroid");
+ break;
+ case InterpolationSampling::Sample:
+ s->Append("Sample");
+ break;
}
+ return {true};
+}
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h
index 0c141a3efb0..4c0c667f10f 100644
--- a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h
+++ b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h
@@ -21,113 +21,109 @@
namespace dawn::native {
- //
- // Structs
- //
-
- struct Color;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Color* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- struct Extent3D;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Extent3D* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- struct Origin3D;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const Origin3D* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- struct BindingInfo;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const BindingInfo& value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- //
- // Objects
- //
-
- class DeviceBase;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const DeviceBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- class ApiObjectBase;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const ApiObjectBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- // Special case for TextureViews, since frequently the texture will be the
- // thing that's labeled.
- class TextureViewBase;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const TextureViewBase* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- class AttachmentState;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- const AttachmentState* value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- //
- // Enums
- //
-
- enum class Aspect : uint8_t;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
- AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
-
- enum class BindingInfoType;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- BindingInfoType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class SampleTypeBit : uint8_t;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SampleTypeBit value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class SingleShaderStage;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SingleShaderStage value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class VertexFormatBaseType;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- VertexFormatBaseType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class InterStageComponentType;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterStageComponentType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class InterpolationType;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- enum class InterpolationSampling;
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationSampling value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
+//
+// Structs
+//
+
+struct Color;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(const Color* value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
+
+struct Extent3D;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Extent3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+struct Origin3D;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Origin3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+struct BindingInfo;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const BindingInfo& value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+//
+// Objects
+//
+
+class DeviceBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const DeviceBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+class ApiObjectBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const ApiObjectBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+// Special case for TextureViews, since frequently the texture will be the
+// thing that's labeled.
+class TextureViewBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const TextureViewBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+class AttachmentState;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const AttachmentState* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+//
+// Enums
+//
+
+enum class Aspect : uint8_t;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
+
+enum class BindingInfoType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ BindingInfoType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+enum class SampleTypeBit : uint8_t;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(SampleTypeBit value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
+
+enum class SingleShaderStage;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SingleShaderStage value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+enum class VertexFormatBaseType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ VertexFormatBaseType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+enum class InterStageComponentType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterStageComponentType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+enum class InterpolationType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+enum class InterpolationSampling;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationSampling value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt
index 558362a20bf..8c7cdb65549 100644
--- a/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen")
-set(IDLGEN_TOOL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tools/src/cmd/idlgen")
+set(DAWN_NODE_GEN_DIR "${DAWN_BUILD_GEN_DIR}/node")
+set(IDLGEN_TOOL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tools/src/cmd/idlgen")
# idlgen() is a function that uses the tools/cmd/idlgen/main.go tool to generate
# code from an IDL file and template.
@@ -75,7 +75,7 @@ target_include_directories(dawn_node PRIVATE
"${CMAKE_SOURCE_DIR}"
"${DAWN_THIRD_PARTY_DIR}"
"${NODE_API_HEADERS_DIR}/include"
- "${GEN_DIR}"
+ "${DAWN_NODE_GEN_DIR}"
)
# To reduce the build dependencies for compiling the dawn.node targets, we do
@@ -92,14 +92,14 @@ string(REGEX MATCHALL "napi_[a-z0-9_]*" NAPI_SYMBOLS "${NAPI_SYMBOLS_JS_CONTENT}
if (WIN32)
# Generate the NapiSymbols.def file from the Napi symbol list
- set(NAPI_SYMBOLS_DEF "${GEN_DIR}/NapiSymbols.def")
+ set(NAPI_SYMBOLS_DEF "${DAWN_NODE_GEN_DIR}/NapiSymbols.def")
list(TRANSFORM NAPI_SYMBOLS PREPEND " ")
list(TRANSFORM NAPI_SYMBOLS APPEND "\n")
string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
string(PREPEND NAPI_SYMBOLS "LIBRARY node.exe\nEXPORTS\n")
file(GENERATE OUTPUT "${NAPI_SYMBOLS_DEF}" CONTENT "${NAPI_SYMBOLS}")
# Generate the NapiSymbols.lib from the NapiSymbols.def file
- set(NAPI_SYMBOLS_LIB "${GEN_DIR}/NapiSymbols.lib")
+ set(NAPI_SYMBOLS_LIB "${DAWN_NODE_GEN_DIR}/NapiSymbols.lib")
# Resolve path to lib.exe
get_filename_component(VS_BIN_DIR "${CMAKE_LINKER}" DIRECTORY)
set(LIB_EXE "${VS_BIN_DIR}/lib.exe")
@@ -116,7 +116,7 @@ if (WIN32)
target_link_libraries(dawn_node "${NAPI_SYMBOLS_LIB}")
else()
# Generate the NapiSymbols.h file from the Napi symbol list
- set(NAPI_SYMBOLS_H "${GEN_DIR}/NapiSymbols.h")
+ set(NAPI_SYMBOLS_H "${DAWN_NODE_GEN_DIR}/NapiSymbols.h")
list(TRANSFORM NAPI_SYMBOLS PREPEND "NAPI_SYMBOL(")
list(TRANSFORM NAPI_SYMBOLS APPEND ")\n")
string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
diff --git a/chromium/third_party/dawn/src/dawn/node/Module.cpp b/chromium/third_party/dawn/src/dawn/node/Module.cpp
index f87631b0e55..ba2ad04cb35 100644
--- a/chromium/third_party/dawn/src/dawn/node/Module.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/Module.cpp
@@ -12,38 +12,43 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
#include "dawn/dawn_proc.h"
#include "src/dawn/node/binding/Flags.h"
#include "src/dawn/node/binding/GPU.h"
namespace {
- Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
- const auto& env = info.Env();
+Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
+ const auto& env = info.Env();
- std::tuple<std::vector<std::string>> args;
- auto res = wgpu::interop::FromJS(info, args);
- if (res != wgpu::interop::Success) {
- Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
- return env.Undefined();
- }
+ std::tuple<std::vector<std::string>> args;
+ auto res = wgpu::interop::FromJS(info, args);
+ if (res != wgpu::interop::Success) {
+ Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
+ return env.Undefined();
+ }
- wgpu::binding::Flags flags;
+ wgpu::binding::Flags flags;
- // Parse out the key=value flags out of the input args array
- for (const auto& arg : std::get<0>(args)) {
- const size_t sep_index = arg.find("=");
- if (sep_index == std::string::npos) {
- Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
- .ThrowAsJavaScriptException();
- return env.Undefined();
- }
- flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
+ // Parse out the key=value flags out of the input args array
+ for (const auto& arg : std::get<0>(args)) {
+ const size_t sep_index = arg.find("=");
+ if (sep_index == std::string::npos) {
+ Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
+ .ThrowAsJavaScriptException();
+ return env.Undefined();
}
-
- // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
- return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+ flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
}
+ // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
+ return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+}
+
} // namespace
// Initialize() initializes the Dawn node module, registering all the WebGPU
diff --git a/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp b/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp
index a557eca4d50..01a184f03bd 100644
--- a/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp
@@ -22,7 +22,7 @@
// which we can use to produce weak-symbol stubs.
#ifdef _WIN32
-# error "NapiSymbols.cpp is not used on Windows"
+#error "NapiSymbols.cpp is not used on Windows"
#endif
#define NAPI_SYMBOL(NAME) \
diff --git a/chromium/third_party/dawn/src/dawn/node/README.md b/chromium/third_party/dawn/src/dawn/node/README.md
index 1d317649649..8e492a7fbe1 100644
--- a/chromium/third_party/dawn/src/dawn/node/README.md
+++ b/chromium/third_party/dawn/src/dawn/node/README.md
@@ -77,7 +77,7 @@ The `--flag` parameter must be passed in multiple times, once for each flag begi
For example, on Windows, to use the d3dcompiler_47.dll from a Chromium checkout, and to dump shader output, we could run the following using Git Bash:
```sh
-./src/dawn/node/tools/run-cts --verbose --dawn-node=/c/src/dawn/build/Debug/dawn.node --cts=/c/src/gpuweb-cts --flag=dlldir="C:\src\chromium\src\out\Release" --flag=enable-dawn-features=dump_shaders 'webgpu:shader,execution,builtin,abs:integer_builtin_functions,abs_unsigned:storageClass="storage";storageMode="read_write";containerType="vector";isAtomic=false;baseType="u32";type="vec2%3Cu32%3E"'
+./src/dawn/node/tools/run-cts --verbose --dawn-node=/c/src/dawn/build/Debug/dawn.node --cts=/c/src/webgpu-cts --flag=dlldir="C:\src\chromium\src\out\Release" --flag=enable-dawn-features=dump_shaders 'webgpu:shader,execution,builtin,abs:integer_builtin_functions,abs_unsigned:storageClass="storage";storageMode="read_write";containerType="vector";isAtomic=false;baseType="u32";type="vec2%3Cu32%3E"'
```
Note that we pass `--verbose` above so that all test output, including the dumped shader, is written to stdout.
@@ -101,7 +101,7 @@ Open or create the `.vscode/launch.json` file, and add:
"outFiles": [ "./**/*.js" ],
"args": [
"-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
- "--", "dummy-arg",
+ "--", "placeholder-arg",
"--gpu-provider",
"[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
"[test-query]", // REPLACE: [test-query]
@@ -127,7 +127,7 @@ cd <cts-root-dir>
[path-to-node] \ # for example <dawn-root-dir>/third_party/node/<arch>/node
-e "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');" \
-- \
- dummy-arg \
+ placeholder-arg \
--gpu-provider [path to dawn.node] \
[test-query]
```
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp
index a978fa8e3b2..5926255bfc3 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp
@@ -19,42 +19,41 @@
namespace wgpu::binding {
- AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
- }
-
- void AsyncRunner::Begin() {
- assert(count_ != std::numeric_limits<decltype(count_)>::max());
- if (count_++ == 0) {
- QueueTick();
- }
- }
+AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {}
- void AsyncRunner::End() {
- assert(count_ > 0);
- count_--;
+void AsyncRunner::Begin() {
+ assert(count_ != std::numeric_limits<decltype(count_)>::max());
+ if (count_++ == 0) {
+ QueueTick();
}
-
- void AsyncRunner::QueueTick() {
- // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
- // called.
- if (tick_queued_) {
- return;
- }
- tick_queued_ = true;
- env_.Global()
- .Get("setImmediate")
- .As<Napi::Function>()
- .Call({
- // TODO(crbug.com/dawn/1127): Create once, reuse.
- Napi::Function::New(env_,
- [this](const Napi::CallbackInfo&) {
- tick_queued_ = false;
- if (count_ > 0) {
- device_.Tick();
- QueueTick();
- }
- }),
- });
+}
+
+void AsyncRunner::End() {
+ assert(count_ > 0);
+ count_--;
+}
+
+void AsyncRunner::QueueTick() {
+ // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
+ // called.
+ if (tick_queued_) {
+ return;
}
+ tick_queued_ = true;
+ env_.Global()
+ .Get("setImmediate")
+ .As<Napi::Function>()
+ .Call({
+ // TODO(crbug.com/dawn/1127): Create once, reuse.
+ Napi::Function::New(env_,
+ [this](const Napi::CallbackInfo&) {
+ tick_queued_ = false;
+ if (count_ > 0) {
+ device_.Tick();
+ QueueTick();
+ }
+ }),
+ });
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h
index 53e75d7793c..0c733b977f7 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h
@@ -17,61 +17,59 @@
#include <stdint.h>
#include <memory>
+#include <utility>
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
namespace wgpu::binding {
- // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
- // tasks in flight.
- class AsyncRunner {
- public:
- AsyncRunner(Napi::Env env, wgpu::Device device);
+// AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
+// tasks in flight.
+class AsyncRunner {
+ public:
+ AsyncRunner(Napi::Env env, wgpu::Device device);
- // Begin() should be called when a new asynchronous task is started.
- // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
- // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
- // thread is idle. This will be repeatedly called until the number of executing asynchronous
- // tasks reaches 0 again.
- void Begin();
+ // Begin() should be called when a new asynchronous task is started.
+ // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
+ // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
+ // thread is idle. This will be repeatedly called until the number of executing asynchronous
+ // tasks reaches 0 again.
+ void Begin();
- // End() should be called once the asynchronous task has finished.
- // Every call to Begin() should eventually result in a call to End().
- void End();
+ // End() should be called once the asynchronous task has finished.
+ // Every call to Begin() should eventually result in a call to End().
+ void End();
- private:
- void QueueTick();
- Napi::Env env_;
- wgpu::Device const device_;
- uint64_t count_ = 0;
- bool tick_queued_ = false;
- };
+ private:
+ void QueueTick();
+ Napi::Env env_;
+ wgpu::Device const device_;
+ uint64_t count_ = 0;
+ bool tick_queued_ = false;
+};
- // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
- // AsyncRunner::End() on destruction.
- class AsyncTask {
- public:
- inline AsyncTask(AsyncTask&&) = default;
+// AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
+// AsyncRunner::End() on destruction.
+class AsyncTask {
+ public:
+ inline AsyncTask(AsyncTask&&) = default;
- // Constructor.
- // Calls AsyncRunner::Begin()
- inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
- runner_->Begin();
- }
+ // Constructor.
+ // Calls AsyncRunner::Begin()
+ explicit inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
+ runner_->Begin();
+ }
- // Destructor.
- // Calls AsyncRunner::End()
- inline ~AsyncTask() {
- runner_->End();
- }
+ // Destructor.
+ // Calls AsyncRunner::End()
+ inline ~AsyncTask() { runner_->End(); }
- private:
- AsyncTask(const AsyncTask&) = delete;
- AsyncTask& operator=(const AsyncTask&) = delete;
- std::shared_ptr<AsyncRunner> runner_;
- };
+ private:
+ AsyncTask(const AsyncTask&) = delete;
+ AsyncTask& operator=(const AsyncTask&) = delete;
+ std::shared_ptr<AsyncRunner> runner_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt
index 2bf586b301a..cd3fb36db19 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt
@@ -73,7 +73,7 @@ target_include_directories(dawn_node_binding
"${CMAKE_SOURCE_DIR}"
"${DAWN_THIRD_PARTY_DIR}"
"${NODE_API_HEADERS_DIR}/include"
- "${GEN_DIR}"
+ "${DAWN_NODE_GEN_DIR}"
)
target_link_libraries(dawn_node_binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp
index 9c116f2276b..cad53f67e4b 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp
@@ -24,1198 +24,1199 @@
namespace wgpu::binding {
- Converter::~Converter() {
- for (auto& free : free_) {
- free();
- }
- }
-
- bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
- out = {};
- if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
- out.depthOrArrayLayers = dict->depthOrArrayLayers;
- out.width = dict->width;
- out.height = dict->height;
- return true;
- }
- if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
- switch (vec->size()) {
- default:
- case 3:
- out.depthOrArrayLayers = (*vec)[2];
- case 2: // fallthrough
- out.height = (*vec)[1];
- case 1: // fallthrough
- out.width = (*vec)[0];
- return true;
- case 0:
- break;
- }
- }
- Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
- return false;
+Converter::~Converter() {
+ for (auto& free : free_) {
+ free();
}
-
- bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
- out = {};
- out.x = in.x;
- out.y = in.y;
- out.z = in.z;
+}
+
+bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
+ out.depthOrArrayLayers = dict->depthOrArrayLayers;
+ out.width = dict->width;
+ out.height = dict->height;
return true;
}
-
- bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
- out = {};
- if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
- out.r = dict->r;
- out.g = dict->g;
- out.b = dict->b;
- out.a = dict->a;
- return true;
- }
- if (auto* vec = std::get_if<std::vector<double>>(&in)) {
- switch (vec->size()) {
- default:
- case 4:
- out.a = (*vec)[3];
- case 3: // fallthrough
- out.b = (*vec)[2];
- case 2: // fallthrough
- out.g = (*vec)[1];
- case 1: // fallthrough
- out.r = (*vec)[0];
- return true;
- case 0:
- break;
- }
- }
- Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::Origin3D& out,
- const std::vector<interop::GPUIntegerCoordinate>& in) {
- out = {};
- switch (in.size()) {
+ if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
+ switch (vec->size()) {
default:
case 3:
- out.z = in[2];
+ out.depthOrArrayLayers = (*vec)[2];
case 2: // fallthrough
- out.y = in[1];
+ out.height = (*vec)[1];
case 1: // fallthrough
- out.x = in[0];
+ out.width = (*vec)[0];
+ return true;
case 0:
break;
}
+ }
+ Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
+ out = {};
+ out.x = in.x;
+ out.y = in.y;
+ out.z = in.z;
+ return true;
+}
+
+bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
+ out.r = dict->r;
+ out.g = dict->g;
+ out.b = dict->b;
+ out.a = dict->a;
return true;
}
-
- bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
- out = wgpu::TextureAspect::All;
- switch (in) {
- case interop::GPUTextureAspect::kAll:
- out = wgpu::TextureAspect::All;
- return true;
- case interop::GPUTextureAspect::kStencilOnly:
- out = wgpu::TextureAspect::StencilOnly;
- return true;
- case interop::GPUTextureAspect::kDepthOnly:
- out = wgpu::TextureAspect::DepthOnly;
+ if (auto* vec = std::get_if<std::vector<double>>(&in)) {
+ switch (vec->size()) {
+ default:
+ case 4:
+ out.a = (*vec)[3];
+ case 3: // fallthrough
+ out.b = (*vec)[2];
+ case 2: // fallthrough
+ out.g = (*vec)[1];
+ case 1: // fallthrough
+ out.r = (*vec)[0];
return true;
+ case 0:
+ break;
}
- Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
- out = {};
- return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
- Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
}
-
- bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
- out = {};
- out.buffer = *in.buffer.As<GPUBuffer>();
- return Convert(out.layout.offset, in.offset) &&
- Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
- Convert(out.layout.rowsPerImage, in.rowsPerImage);
+ Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::Origin3D& out, const std::vector<interop::GPUIntegerCoordinate>& in) {
+ out = {};
+ switch (in.size()) {
+ default:
+ case 3:
+ out.z = in[2];
+ case 2: // fallthrough
+ out.y = in[1];
+ case 1: // fallthrough
+ out.x = in[0];
+ case 0:
+ break;
}
-
- bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
- out = {};
- if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
- std::visit(
- [&](auto&& v) {
- auto arr = v.ArrayBuffer();
- out.data = arr.Data();
- out.size = arr.ByteLength();
- out.bytesPerElement = v.ElementSize();
- },
- *view);
+ return true;
+}
+
+bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
+ out = wgpu::TextureAspect::All;
+ switch (in) {
+ case interop::GPUTextureAspect::kAll:
+ out = wgpu::TextureAspect::All;
return true;
- }
- if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
- out.data = arr->Data();
- out.size = arr->ByteLength();
- out.bytesPerElement = 1;
+ case interop::GPUTextureAspect::kStencilOnly:
+ out = wgpu::TextureAspect::StencilOnly;
+ return true;
+ case interop::GPUTextureAspect::kDepthOnly:
+ out = wgpu::TextureAspect::DepthOnly;
return true;
- }
- Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
- out = {};
- return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
- Convert(out.rowsPerImage, in.rowsPerImage);
- }
-
- bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
- out = wgpu::TextureFormat::Undefined;
- switch (in) {
- case interop::GPUTextureFormat::kR8Unorm:
- out = wgpu::TextureFormat::R8Unorm;
- return true;
- case interop::GPUTextureFormat::kR8Snorm:
- out = wgpu::TextureFormat::R8Snorm;
- return true;
- case interop::GPUTextureFormat::kR8Uint:
- out = wgpu::TextureFormat::R8Uint;
- return true;
- case interop::GPUTextureFormat::kR8Sint:
- out = wgpu::TextureFormat::R8Sint;
- return true;
- case interop::GPUTextureFormat::kR16Uint:
- out = wgpu::TextureFormat::R16Uint;
- return true;
- case interop::GPUTextureFormat::kR16Sint:
- out = wgpu::TextureFormat::R16Sint;
- return true;
- case interop::GPUTextureFormat::kR16Float:
- out = wgpu::TextureFormat::R16Float;
- return true;
- case interop::GPUTextureFormat::kRg8Unorm:
- out = wgpu::TextureFormat::RG8Unorm;
- return true;
- case interop::GPUTextureFormat::kRg8Snorm:
- out = wgpu::TextureFormat::RG8Snorm;
- return true;
- case interop::GPUTextureFormat::kRg8Uint:
- out = wgpu::TextureFormat::RG8Uint;
- return true;
- case interop::GPUTextureFormat::kRg8Sint:
- out = wgpu::TextureFormat::RG8Sint;
- return true;
- case interop::GPUTextureFormat::kR32Uint:
- out = wgpu::TextureFormat::R32Uint;
- return true;
- case interop::GPUTextureFormat::kR32Sint:
- out = wgpu::TextureFormat::R32Sint;
- return true;
- case interop::GPUTextureFormat::kR32Float:
- out = wgpu::TextureFormat::R32Float;
- return true;
- case interop::GPUTextureFormat::kRg16Uint:
- out = wgpu::TextureFormat::RG16Uint;
- return true;
- case interop::GPUTextureFormat::kRg16Sint:
- out = wgpu::TextureFormat::RG16Sint;
- return true;
- case interop::GPUTextureFormat::kRg16Float:
- out = wgpu::TextureFormat::RG16Float;
- return true;
- case interop::GPUTextureFormat::kRgba8Unorm:
- out = wgpu::TextureFormat::RGBA8Unorm;
- return true;
- case interop::GPUTextureFormat::kRgba8UnormSrgb:
- out = wgpu::TextureFormat::RGBA8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kRgba8Snorm:
- out = wgpu::TextureFormat::RGBA8Snorm;
- return true;
- case interop::GPUTextureFormat::kRgba8Uint:
- out = wgpu::TextureFormat::RGBA8Uint;
- return true;
- case interop::GPUTextureFormat::kRgba8Sint:
- out = wgpu::TextureFormat::RGBA8Sint;
- return true;
- case interop::GPUTextureFormat::kBgra8Unorm:
- out = wgpu::TextureFormat::BGRA8Unorm;
- return true;
- case interop::GPUTextureFormat::kBgra8UnormSrgb:
- out = wgpu::TextureFormat::BGRA8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kRgb9E5Ufloat:
- out = wgpu::TextureFormat::RGB9E5Ufloat;
- return true;
- case interop::GPUTextureFormat::kRgb10A2Unorm:
- out = wgpu::TextureFormat::RGB10A2Unorm;
- return true;
- case interop::GPUTextureFormat::kRg11B10Ufloat:
- out = wgpu::TextureFormat::RG11B10Ufloat;
- return true;
- case interop::GPUTextureFormat::kRg32Uint:
- out = wgpu::TextureFormat::RG32Uint;
- return true;
- case interop::GPUTextureFormat::kRg32Sint:
- out = wgpu::TextureFormat::RG32Sint;
- return true;
- case interop::GPUTextureFormat::kRg32Float:
- out = wgpu::TextureFormat::RG32Float;
- return true;
- case interop::GPUTextureFormat::kRgba16Uint:
- out = wgpu::TextureFormat::RGBA16Uint;
- return true;
- case interop::GPUTextureFormat::kRgba16Sint:
- out = wgpu::TextureFormat::RGBA16Sint;
- return true;
- case interop::GPUTextureFormat::kRgba16Float:
- out = wgpu::TextureFormat::RGBA16Float;
- return true;
- case interop::GPUTextureFormat::kRgba32Uint:
- out = wgpu::TextureFormat::RGBA32Uint;
- return true;
- case interop::GPUTextureFormat::kRgba32Sint:
- out = wgpu::TextureFormat::RGBA32Sint;
- return true;
- case interop::GPUTextureFormat::kRgba32Float:
- out = wgpu::TextureFormat::RGBA32Float;
- return true;
- case interop::GPUTextureFormat::kStencil8:
- out = wgpu::TextureFormat::Stencil8;
- return true;
- case interop::GPUTextureFormat::kDepth16Unorm:
- out = wgpu::TextureFormat::Depth16Unorm;
- return true;
- case interop::GPUTextureFormat::kDepth24Plus:
- out = wgpu::TextureFormat::Depth24Plus;
- return true;
- case interop::GPUTextureFormat::kDepth24PlusStencil8:
- out = wgpu::TextureFormat::Depth24PlusStencil8;
- return true;
- case interop::GPUTextureFormat::kDepth32Float:
- out = wgpu::TextureFormat::Depth32Float;
- return true;
- case interop::GPUTextureFormat::kDepth24UnormStencil8:
- out = wgpu::TextureFormat::Depth24UnormStencil8;
- return true;
- case interop::GPUTextureFormat::kDepth32FloatStencil8:
- out = wgpu::TextureFormat::Depth32FloatStencil8;
- return true;
- case interop::GPUTextureFormat::kBc1RgbaUnorm:
- out = wgpu::TextureFormat::BC1RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc2RgbaUnorm:
- out = wgpu::TextureFormat::BC2RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc3RgbaUnorm:
- out = wgpu::TextureFormat::BC3RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc4RUnorm:
- out = wgpu::TextureFormat::BC4RUnorm;
- return true;
- case interop::GPUTextureFormat::kBc4RSnorm:
- out = wgpu::TextureFormat::BC4RSnorm;
- return true;
- case interop::GPUTextureFormat::kBc5RgUnorm:
- out = wgpu::TextureFormat::BC5RGUnorm;
- return true;
- case interop::GPUTextureFormat::kBc5RgSnorm:
- out = wgpu::TextureFormat::BC5RGSnorm;
- return true;
- case interop::GPUTextureFormat::kBc6HRgbUfloat:
- out = wgpu::TextureFormat::BC6HRGBUfloat;
- return true;
- case interop::GPUTextureFormat::kBc6HRgbFloat:
- out = wgpu::TextureFormat::BC6HRGBFloat;
- return true;
- case interop::GPUTextureFormat::kBc7RgbaUnorm:
- out = wgpu::TextureFormat::BC7RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
- out = wgpu::TextureFormat::ETC2RGB8Unorm;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
- out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
- out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
- out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
- out = wgpu::TextureFormat::ETC2RGBA8Unorm;
- return true;
- case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
- out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kEacR11Unorm:
- out = wgpu::TextureFormat::EACR11Unorm;
- return true;
- case interop::GPUTextureFormat::kEacR11Snorm:
- out = wgpu::TextureFormat::EACR11Snorm;
- return true;
- case interop::GPUTextureFormat::kEacRg11Unorm:
- out = wgpu::TextureFormat::EACRG11Unorm;
- return true;
- case interop::GPUTextureFormat::kEacRg11Snorm:
- out = wgpu::TextureFormat::EACRG11Snorm;
- return true;
- case interop::GPUTextureFormat::kAstc4X4Unorm:
- out = wgpu::TextureFormat::ASTC4x4Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
- out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc5X4Unorm:
- out = wgpu::TextureFormat::ASTC5x4Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
- out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc5X5Unorm:
- out = wgpu::TextureFormat::ASTC5x5Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
- out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc6X5Unorm:
- out = wgpu::TextureFormat::ASTC6x5Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
- out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc6X6Unorm:
- out = wgpu::TextureFormat::ASTC6x6Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
- out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc8X5Unorm:
- out = wgpu::TextureFormat::ASTC8x5Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
- out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc8X6Unorm:
- out = wgpu::TextureFormat::ASTC8x6Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
- out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc8X8Unorm:
- out = wgpu::TextureFormat::ASTC8x8Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
- out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc10X5Unorm:
- out = wgpu::TextureFormat::ASTC10x5Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
- out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc10X6Unorm:
- out = wgpu::TextureFormat::ASTC10x6Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
- out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc10X8Unorm:
- out = wgpu::TextureFormat::ASTC10x8Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
- out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc10X10Unorm:
- out = wgpu::TextureFormat::ASTC10x10Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
- out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc12X10Unorm:
- out = wgpu::TextureFormat::ASTC12x10Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
- out = wgpu::TextureFormat::ASTC12x10UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kAstc12X12Unorm:
- out = wgpu::TextureFormat::ASTC12x12Unorm;
- return true;
- case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
- out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
- out = static_cast<wgpu::TextureUsage>(in.value);
- return true;
- }
-
- bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
- out = static_cast<wgpu::ColorWriteMask>(in.value);
- return true;
- }
-
- bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
- out = static_cast<wgpu::BufferUsage>(in.value);
- return true;
}
-
- bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
- out = static_cast<wgpu::MapMode>(in.value);
+ Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
+ out = {};
+ return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
+ Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+}
+
+bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
+ out = {};
+ out.buffer = *in.buffer.As<GPUBuffer>();
+ return Convert(out.layout.offset, in.offset) &&
+ Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
+ Convert(out.layout.rowsPerImage, in.rowsPerImage);
+}
+
+bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
+ out = {};
+ if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
+ std::visit(
+ [&](auto&& v) {
+ auto arr = v.ArrayBuffer();
+ out.data = arr.Data();
+ out.size = arr.ByteLength();
+ out.bytesPerElement = v.ElementSize();
+ },
+ *view);
return true;
}
-
- bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
- out = static_cast<wgpu::ShaderStage>(in.value);
+ if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
+ out.data = arr->Data();
+ out.size = arr->ByteLength();
+ out.bytesPerElement = 1;
return true;
}
-
- bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
- out = wgpu::TextureDimension::e1D;
- switch (in) {
- case interop::GPUTextureDimension::k1D:
- out = wgpu::TextureDimension::e1D;
- return true;
- case interop::GPUTextureDimension::k2D:
- out = wgpu::TextureDimension::e2D;
- return true;
- case interop::GPUTextureDimension::k3D:
- out = wgpu::TextureDimension::e3D;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureViewDimension& out,
- const interop::GPUTextureViewDimension& in) {
- out = wgpu::TextureViewDimension::Undefined;
- switch (in) {
- case interop::GPUTextureViewDimension::k1D:
- out = wgpu::TextureViewDimension::e1D;
- return true;
- case interop::GPUTextureViewDimension::k2D:
- out = wgpu::TextureViewDimension::e2D;
- return true;
- case interop::GPUTextureViewDimension::k2DArray:
- out = wgpu::TextureViewDimension::e2DArray;
- return true;
- case interop::GPUTextureViewDimension::kCube:
- out = wgpu::TextureViewDimension::Cube;
- return true;
- case interop::GPUTextureViewDimension::kCubeArray:
- out = wgpu::TextureViewDimension::CubeArray;
- return true;
- case interop::GPUTextureViewDimension::k3D:
- out = wgpu::TextureViewDimension::e3D;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
- const interop::GPUProgrammableStage& in) {
- out = {};
- out.module = *in.module.As<GPUShaderModule>();
-
- // Replace nulls in the entryPoint name with another character that's disallowed in
- // identifiers. This is so that using "main\0" doesn't match an entryPoint named "main".
- // TODO(dawn:1345): Replace with a way to size strings explicitly in webgpu.h
- char* entryPoint = Allocate<char>(in.entryPoint.size() + 1);
- entryPoint[in.entryPoint.size()] = '\0';
- for (size_t i = 0; i < in.entryPoint.size(); i++) {
- if (in.entryPoint[i] == '\0') {
- entryPoint[i] = '#';
- } else {
- entryPoint[i] = in.entryPoint[i];
- }
- }
- out.entryPoint = entryPoint;
-
- return Convert(out.constants, out.constantCount, in.constants);
- }
-
- bool Converter::Convert(wgpu::ConstantEntry& out,
- const std::string& in_name,
- wgpu::interop::GPUPipelineConstantValue in_value) {
- out.key = in_name.c_str();
- out.value = in_value;
- return true;
+ Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
+ out = {};
+ return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
+ Convert(out.rowsPerImage, in.rowsPerImage);
+}
+
+bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
+ out = wgpu::TextureFormat::Undefined;
+ switch (in) {
+ case interop::GPUTextureFormat::kR8Unorm:
+ out = wgpu::TextureFormat::R8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Snorm:
+ out = wgpu::TextureFormat::R8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Uint:
+ out = wgpu::TextureFormat::R8Uint;
+ return true;
+ case interop::GPUTextureFormat::kR8Sint:
+ out = wgpu::TextureFormat::R8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Uint:
+ out = wgpu::TextureFormat::R16Uint;
+ return true;
+ case interop::GPUTextureFormat::kR16Sint:
+ out = wgpu::TextureFormat::R16Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Float:
+ out = wgpu::TextureFormat::R16Float;
+ return true;
+ case interop::GPUTextureFormat::kRg8Unorm:
+ out = wgpu::TextureFormat::RG8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Snorm:
+ out = wgpu::TextureFormat::RG8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Uint:
+ out = wgpu::TextureFormat::RG8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg8Sint:
+ out = wgpu::TextureFormat::RG8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Uint:
+ out = wgpu::TextureFormat::R32Uint;
+ return true;
+ case interop::GPUTextureFormat::kR32Sint:
+ out = wgpu::TextureFormat::R32Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Float:
+ out = wgpu::TextureFormat::R32Float;
+ return true;
+ case interop::GPUTextureFormat::kRg16Uint:
+ out = wgpu::TextureFormat::RG16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Sint:
+ out = wgpu::TextureFormat::RG16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Float:
+ out = wgpu::TextureFormat::RG16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Unorm:
+ out = wgpu::TextureFormat::RGBA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8UnormSrgb:
+ out = wgpu::TextureFormat::RGBA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Snorm:
+ out = wgpu::TextureFormat::RGBA8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Uint:
+ out = wgpu::TextureFormat::RGBA8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Sint:
+ out = wgpu::TextureFormat::RGBA8Sint;
+ return true;
+ case interop::GPUTextureFormat::kBgra8Unorm:
+ out = wgpu::TextureFormat::BGRA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kBgra8UnormSrgb:
+ out = wgpu::TextureFormat::BGRA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgb9E5Ufloat:
+ out = wgpu::TextureFormat::RGB9E5Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRgb10A2Unorm:
+ out = wgpu::TextureFormat::RGB10A2Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg11B10Ufloat:
+ out = wgpu::TextureFormat::RG11B10Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRg32Uint:
+ out = wgpu::TextureFormat::RG32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Sint:
+ out = wgpu::TextureFormat::RG32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Float:
+ out = wgpu::TextureFormat::RG32Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Uint:
+ out = wgpu::TextureFormat::RGBA16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Sint:
+ out = wgpu::TextureFormat::RGBA16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Float:
+ out = wgpu::TextureFormat::RGBA16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Uint:
+ out = wgpu::TextureFormat::RGBA32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Sint:
+ out = wgpu::TextureFormat::RGBA32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Float:
+ out = wgpu::TextureFormat::RGBA32Float;
+ return true;
+ case interop::GPUTextureFormat::kStencil8:
+ out = wgpu::TextureFormat::Stencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth16Unorm:
+ out = wgpu::TextureFormat::Depth16Unorm;
+ return true;
+ case interop::GPUTextureFormat::kDepth24Plus:
+ out = wgpu::TextureFormat::Depth24Plus;
+ return true;
+ case interop::GPUTextureFormat::kDepth24PlusStencil8:
+ out = wgpu::TextureFormat::Depth24PlusStencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth32Float:
+ out = wgpu::TextureFormat::Depth32Float;
+ return true;
+ case interop::GPUTextureFormat::kDepth32FloatStencil8:
+ out = wgpu::TextureFormat::Depth32FloatStencil8;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnorm:
+ out = wgpu::TextureFormat::BC1RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnorm:
+ out = wgpu::TextureFormat::BC2RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnorm:
+ out = wgpu::TextureFormat::BC3RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc4RUnorm:
+ out = wgpu::TextureFormat::BC4RUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc4RSnorm:
+ out = wgpu::TextureFormat::BC4RSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgUnorm:
+ out = wgpu::TextureFormat::BC5RGUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgSnorm:
+ out = wgpu::TextureFormat::BC5RGSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbUfloat:
+ out = wgpu::TextureFormat::BC6HRGBUfloat;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbFloat:
+ out = wgpu::TextureFormat::BC6HRGBFloat;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnorm:
+ out = wgpu::TextureFormat::BC7RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
+ out = wgpu::TextureFormat::ETC2RGB8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
+ out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
+ out = wgpu::TextureFormat::ETC2RGBA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEacR11Unorm:
+ out = wgpu::TextureFormat::EACR11Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEacR11Snorm:
+ out = wgpu::TextureFormat::EACR11Snorm;
+ return true;
+ case interop::GPUTextureFormat::kEacRg11Unorm:
+ out = wgpu::TextureFormat::EACRG11Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEacRg11Snorm:
+ out = wgpu::TextureFormat::EACRG11Snorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc4X4Unorm:
+ out = wgpu::TextureFormat::ASTC4x4Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
+ out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X4Unorm:
+ out = wgpu::TextureFormat::ASTC5x4Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
+ out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X5Unorm:
+ out = wgpu::TextureFormat::ASTC5x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X5Unorm:
+ out = wgpu::TextureFormat::ASTC6x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X6Unorm:
+ out = wgpu::TextureFormat::ASTC6x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X5Unorm:
+ out = wgpu::TextureFormat::ASTC8x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X6Unorm:
+ out = wgpu::TextureFormat::ASTC8x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X8Unorm:
+ out = wgpu::TextureFormat::ASTC8x8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X5Unorm:
+ out = wgpu::TextureFormat::ASTC10x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X6Unorm:
+ out = wgpu::TextureFormat::ASTC10x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X8Unorm:
+ out = wgpu::TextureFormat::ASTC10x8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X10Unorm:
+ out = wgpu::TextureFormat::ASTC10x10Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X10Unorm:
+ out = wgpu::TextureFormat::ASTC12x10Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
+ out = wgpu::TextureFormat::ASTC12x10UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X12Unorm:
+ out = wgpu::TextureFormat::ASTC12x12Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
+ out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
+ return true;
}
-
- bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
- out = {};
- return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
- Convert(out.srcFactor, in.srcFactor);
+ Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
+ out = static_cast<wgpu::TextureUsage>(in.value);
+ return true;
+}
+
+bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
+ out = static_cast<wgpu::ColorWriteMask>(in.value);
+ return true;
+}
+
+bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
+ out = static_cast<wgpu::BufferUsage>(in.value);
+ return true;
+}
+
+bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
+ out = static_cast<wgpu::MapMode>(in.value);
+ return true;
+}
+
+bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
+ out = static_cast<wgpu::ShaderStage>(in.value);
+ return true;
+}
+
+bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
+ out = wgpu::TextureDimension::e1D;
+ switch (in) {
+ case interop::GPUTextureDimension::k1D:
+ out = wgpu::TextureDimension::e1D;
+ return true;
+ case interop::GPUTextureDimension::k2D:
+ out = wgpu::TextureDimension::e2D;
+ return true;
+ case interop::GPUTextureDimension::k3D:
+ out = wgpu::TextureDimension::e3D;
+ return true;
}
-
- bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
- out = wgpu::BlendFactor::Zero;
- switch (in) {
- case interop::GPUBlendFactor::kZero:
- out = wgpu::BlendFactor::Zero;
- return true;
- case interop::GPUBlendFactor::kOne:
- out = wgpu::BlendFactor::One;
- return true;
- case interop::GPUBlendFactor::kSrc:
- out = wgpu::BlendFactor::Src;
- return true;
- case interop::GPUBlendFactor::kOneMinusSrc:
- out = wgpu::BlendFactor::OneMinusSrc;
- return true;
- case interop::GPUBlendFactor::kSrcAlpha:
- out = wgpu::BlendFactor::SrcAlpha;
- return true;
- case interop::GPUBlendFactor::kOneMinusSrcAlpha:
- out = wgpu::BlendFactor::OneMinusSrcAlpha;
- return true;
- case interop::GPUBlendFactor::kDst:
- out = wgpu::BlendFactor::Dst;
- return true;
- case interop::GPUBlendFactor::kOneMinusDst:
- out = wgpu::BlendFactor::OneMinusDst;
- return true;
- case interop::GPUBlendFactor::kDstAlpha:
- out = wgpu::BlendFactor::DstAlpha;
- return true;
- case interop::GPUBlendFactor::kOneMinusDstAlpha:
- out = wgpu::BlendFactor::OneMinusDstAlpha;
- return true;
- case interop::GPUBlendFactor::kSrcAlphaSaturated:
- out = wgpu::BlendFactor::SrcAlphaSaturated;
- return true;
- case interop::GPUBlendFactor::kConstant:
- out = wgpu::BlendFactor::Constant;
- return true;
- case interop::GPUBlendFactor::kOneMinusConstant:
- out = wgpu::BlendFactor::OneMinusConstant;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in) {
+ out = wgpu::TextureViewDimension::Undefined;
+ switch (in) {
+ case interop::GPUTextureViewDimension::k1D:
+ out = wgpu::TextureViewDimension::e1D;
+ return true;
+ case interop::GPUTextureViewDimension::k2D:
+ out = wgpu::TextureViewDimension::e2D;
+ return true;
+ case interop::GPUTextureViewDimension::k2DArray:
+ out = wgpu::TextureViewDimension::e2DArray;
+ return true;
+ case interop::GPUTextureViewDimension::kCube:
+ out = wgpu::TextureViewDimension::Cube;
+ return true;
+ case interop::GPUTextureViewDimension::kCubeArray:
+ out = wgpu::TextureViewDimension::CubeArray;
+ return true;
+ case interop::GPUTextureViewDimension::k3D:
+ out = wgpu::TextureViewDimension::e3D;
+ return true;
+ default:
+ break;
}
-
- bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
- out = wgpu::BlendOperation::Add;
- switch (in) {
- case interop::GPUBlendOperation::kAdd:
- out = wgpu::BlendOperation::Add;
- return true;
- case interop::GPUBlendOperation::kSubtract:
- out = wgpu::BlendOperation::Subtract;
- return true;
- case interop::GPUBlendOperation::kReverseSubtract:
- out = wgpu::BlendOperation::ReverseSubtract;
- return true;
- case interop::GPUBlendOperation::kMin:
- out = wgpu::BlendOperation::Min;
- return true;
- case interop::GPUBlendOperation::kMax:
- out = wgpu::BlendOperation::Max;
- return true;
- default:
- break;
+ Napi::Error::New(env, "invalid value for GPUTextureViewDimension").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in) {
+ out = {};
+ out.module = *in.module.As<GPUShaderModule>();
+
+ // Replace nulls in the entryPoint name with another character that's disallowed in
+ // identifiers. This is so that using "main\0" doesn't match an entryPoint named "main".
+ // TODO(dawn:1345): Replace with a way to size strings explicitly in webgpu.h
+ char* entryPoint = Allocate<char>(in.entryPoint.size() + 1);
+ entryPoint[in.entryPoint.size()] = '\0';
+ for (size_t i = 0; i < in.entryPoint.size(); i++) {
+ if (in.entryPoint[i] == '\0') {
+ entryPoint[i] = '#';
+ } else {
+ entryPoint[i] = in.entryPoint[i];
}
- Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
- return false;
}
-
- bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
- out = {};
- return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
- }
-
- bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
- out = {};
- return Convert(out.topology, in.topology) &&
- Convert(out.stripIndexFormat, in.stripIndexFormat) &&
- Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
- }
-
- bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
- out = {};
- return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
- Convert(out.writeMask, in.writeMask);
- }
-
- bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
- out = {};
- return Convert(out.format, in.format) &&
- Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
- Convert(out.depthCompare, in.depthCompare) &&
- Convert(out.stencilFront, in.stencilFront) &&
- Convert(out.stencilBack, in.stencilBack) &&
- Convert(out.stencilReadMask, in.stencilReadMask) &&
- Convert(out.stencilWriteMask, in.stencilWriteMask) &&
- Convert(out.depthBias, in.depthBias) &&
- Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
- Convert(out.depthBiasClamp, in.depthBiasClamp);
- }
-
- bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
- out = {};
- return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
- Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
- }
-
- bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
- out = {};
- return Convert(out.targets, out.targetCount, in.targets) && //
- Convert(out.module, in.module) && //
- Convert(out.entryPoint, in.entryPoint) && //
- Convert(out.constants, out.constantCount, in.constants);
+ out.entryPoint = entryPoint;
+
+ return Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::ConstantEntry& out,
+ const std::string& in_name,
+ wgpu::interop::GPUPipelineConstantValue in_value) {
+ out.key = in_name.c_str();
+ out.value = in_value;
+ return true;
+}
+
+bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
+ out = {};
+ return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
+ Convert(out.srcFactor, in.srcFactor);
+}
+
+bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
+ out = wgpu::BlendFactor::Zero;
+ switch (in) {
+ case interop::GPUBlendFactor::kZero:
+ out = wgpu::BlendFactor::Zero;
+ return true;
+ case interop::GPUBlendFactor::kOne:
+ out = wgpu::BlendFactor::One;
+ return true;
+ case interop::GPUBlendFactor::kSrc:
+ out = wgpu::BlendFactor::Src;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrc:
+ out = wgpu::BlendFactor::OneMinusSrc;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlpha:
+ out = wgpu::BlendFactor::SrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrcAlpha:
+ out = wgpu::BlendFactor::OneMinusSrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kDst:
+ out = wgpu::BlendFactor::Dst;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDst:
+ out = wgpu::BlendFactor::OneMinusDst;
+ return true;
+ case interop::GPUBlendFactor::kDstAlpha:
+ out = wgpu::BlendFactor::DstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDstAlpha:
+ out = wgpu::BlendFactor::OneMinusDstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlphaSaturated:
+ out = wgpu::BlendFactor::SrcAlphaSaturated;
+ return true;
+ case interop::GPUBlendFactor::kConstant:
+ out = wgpu::BlendFactor::Constant;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusConstant:
+ out = wgpu::BlendFactor::OneMinusConstant;
+ return true;
+ default:
+ break;
}
-
- bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
- out = wgpu::PrimitiveTopology::LineList;
- switch (in) {
- case interop::GPUPrimitiveTopology::kPointList:
- out = wgpu::PrimitiveTopology::PointList;
- return true;
- case interop::GPUPrimitiveTopology::kLineList:
- out = wgpu::PrimitiveTopology::LineList;
- return true;
- case interop::GPUPrimitiveTopology::kLineStrip:
- out = wgpu::PrimitiveTopology::LineStrip;
- return true;
- case interop::GPUPrimitiveTopology::kTriangleList:
- out = wgpu::PrimitiveTopology::TriangleList;
- return true;
- case interop::GPUPrimitiveTopology::kTriangleStrip:
- out = wgpu::PrimitiveTopology::TriangleStrip;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
- .ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
+ out = wgpu::BlendOperation::Add;
+ switch (in) {
+ case interop::GPUBlendOperation::kAdd:
+ out = wgpu::BlendOperation::Add;
+ return true;
+ case interop::GPUBlendOperation::kSubtract:
+ out = wgpu::BlendOperation::Subtract;
+ return true;
+ case interop::GPUBlendOperation::kReverseSubtract:
+ out = wgpu::BlendOperation::ReverseSubtract;
+ return true;
+ case interop::GPUBlendOperation::kMin:
+ out = wgpu::BlendOperation::Min;
+ return true;
+ case interop::GPUBlendOperation::kMax:
+ out = wgpu::BlendOperation::Max;
+ return true;
+ default:
+ break;
}
-
- bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
- out = wgpu::FrontFace::CW;
- switch (in) {
- case interop::GPUFrontFace::kCw:
- out = wgpu::FrontFace::CW;
- return true;
- case interop::GPUFrontFace::kCcw:
- out = wgpu::FrontFace::CCW;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
+ out = {};
+ return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
+}
+
+bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
+ out = {};
+ return Convert(out.topology, in.topology) &&
+ Convert(out.stripIndexFormat, in.stripIndexFormat) &&
+ Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
+}
+
+bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
+ out = {};
+ return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
+ Convert(out.writeMask, in.writeMask);
+}
+
+bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
+ out = {};
+ return Convert(out.format, in.format) && Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
+ Convert(out.depthCompare, in.depthCompare) &&
+ Convert(out.stencilFront, in.stencilFront) && Convert(out.stencilBack, in.stencilBack) &&
+ Convert(out.stencilReadMask, in.stencilReadMask) &&
+ Convert(out.stencilWriteMask, in.stencilWriteMask) &&
+ Convert(out.depthBias, in.depthBias) &&
+ Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
+ Convert(out.depthBiasClamp, in.depthBiasClamp);
+}
+
+bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
+ out = {};
+ return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
+ Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
+}
+
+bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
+ out = {};
+ return Convert(out.targets, out.targetCount, in.targets) && //
+ Convert(out.module, in.module) && //
+ Convert(out.entryPoint, in.entryPoint) && //
+ Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
+ out = wgpu::PrimitiveTopology::LineList;
+ switch (in) {
+ case interop::GPUPrimitiveTopology::kPointList:
+ out = wgpu::PrimitiveTopology::PointList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineList:
+ out = wgpu::PrimitiveTopology::LineList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineStrip:
+ out = wgpu::PrimitiveTopology::LineStrip;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleList:
+ out = wgpu::PrimitiveTopology::TriangleList;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleStrip:
+ out = wgpu::PrimitiveTopology::TriangleStrip;
+ return true;
}
-
- bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
- out = wgpu::CullMode::None;
- switch (in) {
- case interop::GPUCullMode::kNone:
- out = wgpu::CullMode::None;
- return true;
- case interop::GPUCullMode::kFront:
- out = wgpu::CullMode::Front;
- return true;
- case interop::GPUCullMode::kBack:
- out = wgpu::CullMode::Back;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUPrimitiveTopology").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
+ out = wgpu::FrontFace::CW;
+ switch (in) {
+ case interop::GPUFrontFace::kCw:
+ out = wgpu::FrontFace::CW;
+ return true;
+ case interop::GPUFrontFace::kCcw:
+ out = wgpu::FrontFace::CCW;
+ return true;
}
-
- bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
- out = wgpu::CompareFunction::Undefined;
- switch (in) {
- case interop::GPUCompareFunction::kNever:
- out = wgpu::CompareFunction::Never;
- return true;
- case interop::GPUCompareFunction::kLess:
- out = wgpu::CompareFunction::Less;
- return true;
- case interop::GPUCompareFunction::kLessEqual:
- out = wgpu::CompareFunction::LessEqual;
- return true;
- case interop::GPUCompareFunction::kGreater:
- out = wgpu::CompareFunction::Greater;
- return true;
- case interop::GPUCompareFunction::kGreaterEqual:
- out = wgpu::CompareFunction::GreaterEqual;
- return true;
- case interop::GPUCompareFunction::kEqual:
- out = wgpu::CompareFunction::Equal;
- return true;
- case interop::GPUCompareFunction::kNotEqual:
- out = wgpu::CompareFunction::NotEqual;
- return true;
- case interop::GPUCompareFunction::kAlways:
- out = wgpu::CompareFunction::Always;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
+ out = wgpu::CullMode::None;
+ switch (in) {
+ case interop::GPUCullMode::kNone:
+ out = wgpu::CullMode::None;
+ return true;
+ case interop::GPUCullMode::kFront:
+ out = wgpu::CullMode::Front;
+ return true;
+ case interop::GPUCullMode::kBack:
+ out = wgpu::CullMode::Back;
+ return true;
}
-
- bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
- out = wgpu::IndexFormat::Undefined;
- switch (in) {
- case interop::GPUIndexFormat::kUint16:
- out = wgpu::IndexFormat::Uint16;
- return true;
- case interop::GPUIndexFormat::kUint32:
- out = wgpu::IndexFormat::Uint32;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
+ out = wgpu::CompareFunction::Undefined;
+ switch (in) {
+ case interop::GPUCompareFunction::kNever:
+ out = wgpu::CompareFunction::Never;
+ return true;
+ case interop::GPUCompareFunction::kLess:
+ out = wgpu::CompareFunction::Less;
+ return true;
+ case interop::GPUCompareFunction::kLessEqual:
+ out = wgpu::CompareFunction::LessEqual;
+ return true;
+ case interop::GPUCompareFunction::kGreater:
+ out = wgpu::CompareFunction::Greater;
+ return true;
+ case interop::GPUCompareFunction::kGreaterEqual:
+ out = wgpu::CompareFunction::GreaterEqual;
+ return true;
+ case interop::GPUCompareFunction::kEqual:
+ out = wgpu::CompareFunction::Equal;
+ return true;
+ case interop::GPUCompareFunction::kNotEqual:
+ out = wgpu::CompareFunction::NotEqual;
+ return true;
+ case interop::GPUCompareFunction::kAlways:
+ out = wgpu::CompareFunction::Always;
+ return true;
}
-
- bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
- out = wgpu::StencilOperation::Zero;
- switch (in) {
- case interop::GPUStencilOperation::kKeep:
- out = wgpu::StencilOperation::Keep;
- return true;
- case interop::GPUStencilOperation::kZero:
- out = wgpu::StencilOperation::Zero;
- return true;
- case interop::GPUStencilOperation::kReplace:
- out = wgpu::StencilOperation::Replace;
- return true;
- case interop::GPUStencilOperation::kInvert:
- out = wgpu::StencilOperation::Invert;
- return true;
- case interop::GPUStencilOperation::kIncrementClamp:
- out = wgpu::StencilOperation::IncrementClamp;
- return true;
- case interop::GPUStencilOperation::kDecrementClamp:
- out = wgpu::StencilOperation::DecrementClamp;
- return true;
- case interop::GPUStencilOperation::kIncrementWrap:
- out = wgpu::StencilOperation::IncrementWrap;
- return true;
- case interop::GPUStencilOperation::kDecrementWrap:
- out = wgpu::StencilOperation::DecrementWrap;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
+ out = wgpu::IndexFormat::Undefined;
+ switch (in) {
+ case interop::GPUIndexFormat::kUint16:
+ out = wgpu::IndexFormat::Uint16;
+ return true;
+ case interop::GPUIndexFormat::kUint32:
+ out = wgpu::IndexFormat::Uint32;
+ return true;
}
-
- bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
- return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
- Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+ Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
+ out = wgpu::StencilOperation::Zero;
+ switch (in) {
+ case interop::GPUStencilOperation::kKeep:
+ out = wgpu::StencilOperation::Keep;
+ return true;
+ case interop::GPUStencilOperation::kZero:
+ out = wgpu::StencilOperation::Zero;
+ return true;
+ case interop::GPUStencilOperation::kReplace:
+ out = wgpu::StencilOperation::Replace;
+ return true;
+ case interop::GPUStencilOperation::kInvert:
+ out = wgpu::StencilOperation::Invert;
+ return true;
+ case interop::GPUStencilOperation::kIncrementClamp:
+ out = wgpu::StencilOperation::IncrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kDecrementClamp:
+ out = wgpu::StencilOperation::DecrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kIncrementWrap:
+ out = wgpu::StencilOperation::IncrementWrap;
+ return true;
+ case interop::GPUStencilOperation::kDecrementWrap:
+ out = wgpu::StencilOperation::DecrementWrap;
+ return true;
}
-
- bool Converter::Convert(wgpu::VertexBufferLayout& out,
- const interop::GPUVertexBufferLayout& in) {
- out = {};
- return Convert(out.attributes, out.attributeCount, in.attributes) &&
- Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+ Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
+ return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
+ Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+}
+
+bool Converter::Convert(wgpu::VertexBufferLayout& out, const interop::GPUVertexBufferLayout& in) {
+ out = {};
+ return Convert(out.attributes, out.attributeCount, in.attributes) &&
+ Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+}
+
+bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
+ out = {};
+ return Convert(out.module, in.module) && Convert(out.buffers, out.bufferCount, in.buffers) &&
+ Convert(out.entryPoint, in.entryPoint) &&
+ Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
+ out = wgpu::VertexStepMode::Instance;
+ switch (in) {
+ case interop::GPUVertexStepMode::kInstance:
+ out = wgpu::VertexStepMode::Instance;
+ return true;
+ case interop::GPUVertexStepMode::kVertex:
+ out = wgpu::VertexStepMode::Vertex;
+ return true;
+ default:
+ break;
}
-
- bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
- out = {};
- return Convert(out.module, in.module) &&
- Convert(out.buffers, out.bufferCount, in.buffers) &&
- Convert(out.entryPoint, in.entryPoint) &&
- Convert(out.constants, out.constantCount, in.constants);
+ Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
+ return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
+ Convert(out.shaderLocation, in.shaderLocation);
+}
+
+bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
+ out = wgpu::VertexFormat::Undefined;
+ switch (in) {
+ case interop::GPUVertexFormat::kUint8X2:
+ out = wgpu::VertexFormat::Uint8x2;
+ return true;
+ case interop::GPUVertexFormat::kUint8X4:
+ out = wgpu::VertexFormat::Uint8x4;
+ return true;
+ case interop::GPUVertexFormat::kSint8X2:
+ out = wgpu::VertexFormat::Sint8x2;
+ return true;
+ case interop::GPUVertexFormat::kSint8X4:
+ out = wgpu::VertexFormat::Sint8x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X2:
+ out = wgpu::VertexFormat::Unorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X4:
+ out = wgpu::VertexFormat::Unorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X2:
+ out = wgpu::VertexFormat::Snorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X4:
+ out = wgpu::VertexFormat::Snorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kUint16X2:
+ out = wgpu::VertexFormat::Uint16x2;
+ return true;
+ case interop::GPUVertexFormat::kUint16X4:
+ out = wgpu::VertexFormat::Uint16x4;
+ return true;
+ case interop::GPUVertexFormat::kSint16X2:
+ out = wgpu::VertexFormat::Sint16x2;
+ return true;
+ case interop::GPUVertexFormat::kSint16X4:
+ out = wgpu::VertexFormat::Sint16x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X2:
+ out = wgpu::VertexFormat::Unorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X4:
+ out = wgpu::VertexFormat::Unorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X2:
+ out = wgpu::VertexFormat::Snorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X4:
+ out = wgpu::VertexFormat::Snorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X2:
+ out = wgpu::VertexFormat::Float16x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X4:
+ out = wgpu::VertexFormat::Float16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat32:
+ out = wgpu::VertexFormat::Float32;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X2:
+ out = wgpu::VertexFormat::Float32x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X3:
+ out = wgpu::VertexFormat::Float32x3;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X4:
+ out = wgpu::VertexFormat::Float32x4;
+ return true;
+ case interop::GPUVertexFormat::kUint32:
+ out = wgpu::VertexFormat::Uint32;
+ return true;
+ case interop::GPUVertexFormat::kUint32X2:
+ out = wgpu::VertexFormat::Uint32x2;
+ return true;
+ case interop::GPUVertexFormat::kUint32X3:
+ out = wgpu::VertexFormat::Uint32x3;
+ return true;
+ case interop::GPUVertexFormat::kUint32X4:
+ out = wgpu::VertexFormat::Uint32x4;
+ return true;
+ case interop::GPUVertexFormat::kSint32:
+ out = wgpu::VertexFormat::Sint32;
+ return true;
+ case interop::GPUVertexFormat::kSint32X2:
+ out = wgpu::VertexFormat::Sint32x2;
+ return true;
+ case interop::GPUVertexFormat::kSint32X3:
+ out = wgpu::VertexFormat::Sint32x3;
+ return true;
+ case interop::GPUVertexFormat::kSint32X4:
+ out = wgpu::VertexFormat::Sint32x4;
+ return true;
+ default:
+ break;
}
-
- bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
- out = wgpu::VertexStepMode::Instance;
- switch (in) {
- case interop::GPUVertexStepMode::kInstance:
- out = wgpu::VertexStepMode::Instance;
- return true;
- case interop::GPUVertexStepMode::kVertex:
- out = wgpu::VertexStepMode::Vertex;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in) {
+ out = {};
+ return Convert(out.view, in.view) && //
+ Convert(out.resolveTarget, in.resolveTarget) && //
+ Convert(out.clearValue, in.clearValue) && //
+ Convert(out.loadOp, in.loadOp) && //
+ Convert(out.storeOp, in.storeOp);
+}
+
+bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in) {
+ out = {};
+ return Convert(out.view, in.view) && //
+ Convert(out.depthClearValue, in.depthClearValue) && //
+ Convert(out.depthLoadOp, in.depthLoadOp) && //
+ Convert(out.depthStoreOp, in.depthStoreOp) && //
+ Convert(out.depthReadOnly, in.depthReadOnly) && //
+ Convert(out.stencilClearValue, in.stencilClearValue) && //
+ Convert(out.stencilLoadOp, in.stencilLoadOp) && //
+ Convert(out.stencilStoreOp, in.stencilStoreOp) && //
+ Convert(out.stencilReadOnly, in.stencilReadOnly);
+}
+
+bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
+ out = wgpu::LoadOp::Clear;
+ switch (in) {
+ case interop::GPULoadOp::kLoad:
+ out = wgpu::LoadOp::Load;
+ return true;
+ case interop::GPULoadOp::kClear:
+ out = wgpu::LoadOp::Clear;
+ return true;
}
-
- bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
- return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
- Convert(out.shaderLocation, in.shaderLocation);
+ Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
+ out = wgpu::StoreOp::Store;
+ switch (in) {
+ case interop::GPUStoreOp::kStore:
+ out = wgpu::StoreOp::Store;
+ return true;
+ case interop::GPUStoreOp::kDiscard:
+ out = wgpu::StoreOp::Discard;
+ return true;
}
+ Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
+ return false;
+}
- bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
- out = wgpu::VertexFormat::Undefined;
- switch (in) {
- case interop::GPUVertexFormat::kUint8X2:
- out = wgpu::VertexFormat::Uint8x2;
- return true;
- case interop::GPUVertexFormat::kUint8X4:
- out = wgpu::VertexFormat::Uint8x4;
- return true;
- case interop::GPUVertexFormat::kSint8X2:
- out = wgpu::VertexFormat::Sint8x2;
- return true;
- case interop::GPUVertexFormat::kSint8X4:
- out = wgpu::VertexFormat::Sint8x4;
- return true;
- case interop::GPUVertexFormat::kUnorm8X2:
- out = wgpu::VertexFormat::Unorm8x2;
- return true;
- case interop::GPUVertexFormat::kUnorm8X4:
- out = wgpu::VertexFormat::Unorm8x4;
- return true;
- case interop::GPUVertexFormat::kSnorm8X2:
- out = wgpu::VertexFormat::Snorm8x2;
- return true;
- case interop::GPUVertexFormat::kSnorm8X4:
- out = wgpu::VertexFormat::Snorm8x4;
- return true;
- case interop::GPUVertexFormat::kUint16X2:
- out = wgpu::VertexFormat::Uint16x2;
- return true;
- case interop::GPUVertexFormat::kUint16X4:
- out = wgpu::VertexFormat::Uint16x4;
- return true;
- case interop::GPUVertexFormat::kSint16X2:
- out = wgpu::VertexFormat::Sint16x2;
- return true;
- case interop::GPUVertexFormat::kSint16X4:
- out = wgpu::VertexFormat::Sint16x4;
- return true;
- case interop::GPUVertexFormat::kUnorm16X2:
- out = wgpu::VertexFormat::Unorm16x2;
- return true;
- case interop::GPUVertexFormat::kUnorm16X4:
- out = wgpu::VertexFormat::Unorm16x4;
- return true;
- case interop::GPUVertexFormat::kSnorm16X2:
- out = wgpu::VertexFormat::Snorm16x2;
- return true;
- case interop::GPUVertexFormat::kSnorm16X4:
- out = wgpu::VertexFormat::Snorm16x4;
- return true;
- case interop::GPUVertexFormat::kFloat16X2:
- out = wgpu::VertexFormat::Float16x2;
- return true;
- case interop::GPUVertexFormat::kFloat16X4:
- out = wgpu::VertexFormat::Float16x4;
- return true;
- case interop::GPUVertexFormat::kFloat32:
- out = wgpu::VertexFormat::Float32;
- return true;
- case interop::GPUVertexFormat::kFloat32X2:
- out = wgpu::VertexFormat::Float32x2;
- return true;
- case interop::GPUVertexFormat::kFloat32X3:
- out = wgpu::VertexFormat::Float32x3;
- return true;
- case interop::GPUVertexFormat::kFloat32X4:
- out = wgpu::VertexFormat::Float32x4;
- return true;
- case interop::GPUVertexFormat::kUint32:
- out = wgpu::VertexFormat::Uint32;
- return true;
- case interop::GPUVertexFormat::kUint32X2:
- out = wgpu::VertexFormat::Uint32x2;
- return true;
- case interop::GPUVertexFormat::kUint32X3:
- out = wgpu::VertexFormat::Uint32x3;
- return true;
- case interop::GPUVertexFormat::kUint32X4:
- out = wgpu::VertexFormat::Uint32x4;
- return true;
- case interop::GPUVertexFormat::kSint32:
- out = wgpu::VertexFormat::Sint32;
- return true;
- case interop::GPUVertexFormat::kSint32X2:
- out = wgpu::VertexFormat::Sint32x2;
- return true;
- case interop::GPUVertexFormat::kSint32X3:
- out = wgpu::VertexFormat::Sint32x3;
- return true;
- case interop::GPUVertexFormat::kSint32X4:
- out = wgpu::VertexFormat::Sint32x4;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
+ out = {};
+ if (!Convert(out.binding, in.binding)) {
return false;
}
- bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
- const interop::GPURenderPassColorAttachment& in) {
- out = {};
- return Convert(out.view, in.view) && //
- Convert(out.resolveTarget, in.resolveTarget) && //
- Convert(out.clearValue, in.clearValue) && //
- Convert(out.loadOp, in.loadOp) && //
- Convert(out.storeOp, in.storeOp);
- }
-
- bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
- const interop::GPURenderPassDepthStencilAttachment& in) {
- out = {};
- return Convert(out.view, in.view) && //
- Convert(out.depthClearValue, in.depthClearValue) && //
- Convert(out.depthLoadOp, in.depthLoadOp) && //
- Convert(out.depthStoreOp, in.depthStoreOp) && //
- Convert(out.depthReadOnly, in.depthReadOnly) && //
- Convert(out.stencilClearValue, in.stencilClearValue) && //
- Convert(out.stencilLoadOp, in.stencilLoadOp) && //
- Convert(out.stencilStoreOp, in.stencilStoreOp) && //
- Convert(out.stencilReadOnly, in.stencilReadOnly);
- }
-
- bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
- out = wgpu::LoadOp::Clear;
- switch (in) {
- case interop::GPULoadOp::kLoad:
- out = wgpu::LoadOp::Load;
- return true;
- case interop::GPULoadOp::kClear:
- out = wgpu::LoadOp::Clear;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
- return false;
+ if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
+ return Convert(out.sampler, *res);
}
-
- bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
- out = wgpu::StoreOp::Store;
- switch (in) {
- case interop::GPUStoreOp::kStore:
- out = wgpu::StoreOp::Store;
- return true;
- case interop::GPUStoreOp::kDiscard:
- out = wgpu::StoreOp::Discard;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
- return false;
+ if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
+ return Convert(out.textureView, *res);
}
-
- bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
- out = {};
- if (!Convert(out.binding, in.binding)) {
+ if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
+ auto buffer = res->buffer.As<GPUBuffer>();
+ out.size = wgpu::kWholeSize;
+ if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
return false;
}
-
- if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
- return Convert(out.sampler, *res);
- }
- if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
- return Convert(out.textureView, *res);
- }
- if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
- auto buffer = res->buffer.As<GPUBuffer>();
- out.size = wgpu::kWholeSize;
- if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
- return false;
- }
- out.buffer = *buffer;
- return true;
- }
- if (auto* res =
- std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
- // TODO(crbug.com/dawn/1129): External textures
- UNIMPLEMENTED();
- }
- Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
- .ThrowAsJavaScriptException();
- return false;
+ out.buffer = *buffer;
+ return true;
}
-
- bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
- const interop::GPUBindGroupLayoutEntry& in) {
+ if (auto* res = std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
// TODO(crbug.com/dawn/1129): External textures
- return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
- Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
- Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+ UNIMPLEMENTED();
}
-
- bool Converter::Convert(wgpu::BufferBindingLayout& out,
- const interop::GPUBufferBindingLayout& in) {
- return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
- Convert(out.minBindingSize, in.minBindingSize);
- }
-
- bool Converter::Convert(wgpu::SamplerBindingLayout& out,
- const interop::GPUSamplerBindingLayout& in) {
- return Convert(out.type, in.type);
- }
-
- bool Converter::Convert(wgpu::TextureBindingLayout& out,
- const interop::GPUTextureBindingLayout& in) {
- return Convert(out.sampleType, in.sampleType) &&
- Convert(out.viewDimension, in.viewDimension) &&
- Convert(out.multisampled, in.multisampled);
- }
-
- bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
- const interop::GPUStorageTextureBindingLayout& in) {
- return Convert(out.access, in.access) && Convert(out.format, in.format) &&
- Convert(out.viewDimension, in.viewDimension);
- }
-
- bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
- out = wgpu::BufferBindingType::Undefined;
- switch (in) {
- case interop::GPUBufferBindingType::kUniform:
- out = wgpu::BufferBindingType::Uniform;
- return true;
- case interop::GPUBufferBindingType::kStorage:
- out = wgpu::BufferBindingType::Storage;
- return true;
- case interop::GPUBufferBindingType::kReadOnlyStorage:
- out = wgpu::BufferBindingType::ReadOnlyStorage;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUBufferBindingType")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
- out = wgpu::TextureSampleType::Undefined;
- switch (in) {
- case interop::GPUTextureSampleType::kFloat:
- out = wgpu::TextureSampleType::Float;
- return true;
- case interop::GPUTextureSampleType::kUnfilterableFloat:
- out = wgpu::TextureSampleType::UnfilterableFloat;
- return true;
- case interop::GPUTextureSampleType::kDepth:
- out = wgpu::TextureSampleType::Depth;
- return true;
- case interop::GPUTextureSampleType::kSint:
- out = wgpu::TextureSampleType::Sint;
- return true;
- case interop::GPUTextureSampleType::kUint:
- out = wgpu::TextureSampleType::Uint;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureSampleType")
- .ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
+ .ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in) {
+ // TODO(crbug.com/dawn/1129): External textures
+ return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
+ Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
+ Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+}
+
+bool Converter::Convert(wgpu::BufferBindingLayout& out, const interop::GPUBufferBindingLayout& in) {
+ return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
+ Convert(out.minBindingSize, in.minBindingSize);
+}
+
+bool Converter::Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in) {
+ return Convert(out.type, in.type);
+}
+
+bool Converter::Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in) {
+ return Convert(out.sampleType, in.sampleType) && Convert(out.viewDimension, in.viewDimension) &&
+ Convert(out.multisampled, in.multisampled);
+}
+
+bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in) {
+ return Convert(out.access, in.access) && Convert(out.format, in.format) &&
+ Convert(out.viewDimension, in.viewDimension);
+}
+
+bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
+ out = wgpu::BufferBindingType::Undefined;
+ switch (in) {
+ case interop::GPUBufferBindingType::kUniform:
+ out = wgpu::BufferBindingType::Uniform;
+ return true;
+ case interop::GPUBufferBindingType::kStorage:
+ out = wgpu::BufferBindingType::Storage;
+ return true;
+ case interop::GPUBufferBindingType::kReadOnlyStorage:
+ out = wgpu::BufferBindingType::ReadOnlyStorage;
+ return true;
}
-
- bool Converter::Convert(wgpu::SamplerBindingType& out,
- const interop::GPUSamplerBindingType& in) {
- out = wgpu::SamplerBindingType::Undefined;
- switch (in) {
- case interop::GPUSamplerBindingType::kFiltering:
- out = wgpu::SamplerBindingType::Filtering;
- return true;
- case interop::GPUSamplerBindingType::kNonFiltering:
- out = wgpu::SamplerBindingType::NonFiltering;
- return true;
- case interop::GPUSamplerBindingType::kComparison:
- out = wgpu::SamplerBindingType::Comparison;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
- .ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUBufferBindingType").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
+ out = wgpu::TextureSampleType::Undefined;
+ switch (in) {
+ case interop::GPUTextureSampleType::kFloat:
+ out = wgpu::TextureSampleType::Float;
+ return true;
+ case interop::GPUTextureSampleType::kUnfilterableFloat:
+ out = wgpu::TextureSampleType::UnfilterableFloat;
+ return true;
+ case interop::GPUTextureSampleType::kDepth:
+ out = wgpu::TextureSampleType::Depth;
+ return true;
+ case interop::GPUTextureSampleType::kSint:
+ out = wgpu::TextureSampleType::Sint;
+ return true;
+ case interop::GPUTextureSampleType::kUint:
+ out = wgpu::TextureSampleType::Uint;
+ return true;
}
-
- bool Converter::Convert(wgpu::StorageTextureAccess& out,
- const interop::GPUStorageTextureAccess& in) {
- out = wgpu::StorageTextureAccess::Undefined;
- switch (in) {
- case interop::GPUStorageTextureAccess::kWriteOnly:
- out = wgpu::StorageTextureAccess::WriteOnly;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
- .ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUTextureSampleType").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::SamplerBindingType& out, const interop::GPUSamplerBindingType& in) {
+ out = wgpu::SamplerBindingType::Undefined;
+ switch (in) {
+ case interop::GPUSamplerBindingType::kFiltering:
+ out = wgpu::SamplerBindingType::Filtering;
+ return true;
+ case interop::GPUSamplerBindingType::kNonFiltering:
+ out = wgpu::SamplerBindingType::NonFiltering;
+ return true;
+ case interop::GPUSamplerBindingType::kComparison:
+ out = wgpu::SamplerBindingType::Comparison;
+ return true;
}
-
- bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
- out = wgpu::QueryType::Occlusion;
- switch (in) {
- case interop::GPUQueryType::kOcclusion:
- out = wgpu::QueryType::Occlusion;
- return true;
- case interop::GPUQueryType::kTimestamp:
- out = wgpu::QueryType::Timestamp;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUSamplerBindingType").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in) {
+ out = wgpu::StorageTextureAccess::Undefined;
+ switch (in) {
+ case interop::GPUStorageTextureAccess::kWriteOnly:
+ out = wgpu::StorageTextureAccess::WriteOnly;
+ return true;
}
-
- bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
- out = wgpu::AddressMode::Repeat;
- switch (in) {
- case interop::GPUAddressMode::kClampToEdge:
- out = wgpu::AddressMode::ClampToEdge;
- return true;
- case interop::GPUAddressMode::kRepeat:
- out = wgpu::AddressMode::Repeat;
- return true;
- case interop::GPUAddressMode::kMirrorRepeat:
- out = wgpu::AddressMode::MirrorRepeat;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUStorageTextureAccess").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
+ out = wgpu::QueryType::Occlusion;
+ switch (in) {
+ case interop::GPUQueryType::kOcclusion:
+ out = wgpu::QueryType::Occlusion;
+ return true;
+ case interop::GPUQueryType::kTimestamp:
+ out = wgpu::QueryType::Timestamp;
+ return true;
}
-
- bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
- out = wgpu::FilterMode::Nearest;
- switch (in) {
- case interop::GPUFilterMode::kNearest:
- out = wgpu::FilterMode::Nearest;
- return true;
- case interop::GPUFilterMode::kLinear:
- out = wgpu::FilterMode::Linear;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
- return false;
+ Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
+ out = wgpu::AddressMode::Repeat;
+ switch (in) {
+ case interop::GPUAddressMode::kClampToEdge:
+ out = wgpu::AddressMode::ClampToEdge;
+ return true;
+ case interop::GPUAddressMode::kRepeat:
+ out = wgpu::AddressMode::Repeat;
+ return true;
+ case interop::GPUAddressMode::kMirrorRepeat:
+ out = wgpu::AddressMode::MirrorRepeat;
+ return true;
}
-
- bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
- const interop::GPUComputePipelineDescriptor& in) {
- return Convert(out.label, in.label) && //
- Convert(out.layout, in.layout) && //
- Convert(out.compute, in.compute);
+ Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
+ out = wgpu::FilterMode::Nearest;
+ switch (in) {
+ case interop::GPUFilterMode::kNearest:
+ out = wgpu::FilterMode::Nearest;
+ return true;
+ case interop::GPUFilterMode::kLinear:
+ out = wgpu::FilterMode::Linear;
+ return true;
}
-
- bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
- const interop::GPURenderPipelineDescriptor& in) {
- wgpu::RenderPipelineDescriptor desc{};
- return Convert(out.label, in.label) && //
- Convert(out.layout, in.layout) && //
- Convert(out.vertex, in.vertex) && //
- Convert(out.primitive, in.primitive) && //
- Convert(out.depthStencil, in.depthStencil) && //
- Convert(out.multisample, in.multisample) && //
- Convert(out.fragment, in.fragment);
+ Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUMipmapFilterMode& in) {
+ out = wgpu::FilterMode::Nearest;
+ switch (in) {
+ case interop::GPUMipmapFilterMode::kNearest:
+ out = wgpu::FilterMode::Nearest;
+ return true;
+ case interop::GPUMipmapFilterMode::kLinear:
+ out = wgpu::FilterMode::Linear;
+ return true;
}
+ Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+ return false;
+}
+
+bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in) {
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.compute, in.compute);
+}
+
+bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in) {
+ wgpu::RenderPipelineDescriptor desc{};
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.vertex, in.vertex) && //
+ Convert(out.primitive, in.primitive) && //
+ Convert(out.depthStencil, in.depthStencil) && //
+ Convert(out.multisample, in.multisample) && //
+ Convert(out.fragment, in.fragment);
+}
+
+bool Converter::Convert(wgpu::PipelineLayout& out, const interop::GPUAutoLayoutMode& in) {
+ out = nullptr;
+ return true;
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Converter.h b/chromium/third_party/dawn/src/dawn/node/binding/Converter.h
index befa1fa35ba..e0ef58c0175 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Converter.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Converter.h
@@ -16,24 +16,27 @@
#define SRC_DAWN_NODE_BINDING_CONVERTER_H_
#include <functional>
+#include <string>
#include <type_traits>
+#include <unordered_map>
+#include <utility>
+#include <vector>
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/binding/Errors.h"
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
- // binding implementation type.
- template <typename T>
- struct ImplOfTraits {};
+// ImplOfTraits is a traits helper that is used to associate the interop interface type to the
+// binding implementation type.
+template <typename T>
+struct ImplOfTraits {};
- // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
- // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
+// DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
+// `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
#define DECLARE_IMPL(NAME) \
class NAME; \
template <> \
@@ -41,370 +44,359 @@ namespace wgpu::binding {
using type = binding::NAME; \
}
- // Declare the interop interface to binding implementations
- DECLARE_IMPL(GPUBindGroup);
- DECLARE_IMPL(GPUBindGroupLayout);
- DECLARE_IMPL(GPUBuffer);
- DECLARE_IMPL(GPUPipelineLayout);
- DECLARE_IMPL(GPUQuerySet);
- DECLARE_IMPL(GPURenderBundle);
- DECLARE_IMPL(GPURenderPipeline);
- DECLARE_IMPL(GPUSampler);
- DECLARE_IMPL(GPUShaderModule);
- DECLARE_IMPL(GPUTexture);
- DECLARE_IMPL(GPUTextureView);
+// Declare the interop interface to binding implementations
+DECLARE_IMPL(GPUBindGroup);
+DECLARE_IMPL(GPUBindGroupLayout);
+DECLARE_IMPL(GPUBuffer);
+DECLARE_IMPL(GPUPipelineLayout);
+DECLARE_IMPL(GPUQuerySet);
+DECLARE_IMPL(GPURenderBundle);
+DECLARE_IMPL(GPURenderPipeline);
+DECLARE_IMPL(GPUSampler);
+DECLARE_IMPL(GPUShaderModule);
+DECLARE_IMPL(GPUTexture);
+DECLARE_IMPL(GPUTextureView);
#undef DECLARE_IMPL
- // Helper for obtaining the binding implementation type from the interop interface type
- template <typename T>
- using ImplOf = typename ImplOfTraits<T>::type;
-
- // Converter is a utility class for converting IDL generated interop types into Dawn types.
- // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
- // heap allocations for conversions of vector or optional types. These pointers are
- // automatically freed when the Converter is destructed.
- class Converter {
- public:
- Converter(Napi::Env e) : env(e) {
- }
- ~Converter();
+// Helper for obtaining the binding implementation type from the interop interface type
+template <typename T>
+using ImplOf = typename ImplOfTraits<T>::type;
+
+// Converter is a utility class for converting IDL generated interop types into Dawn types.
+// As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
+// heap allocations for conversions of vector or optional types. These pointers are
+// automatically freed when the Converter is destructed.
+class Converter {
+ public:
+ explicit Converter(Napi::Env e) : env(e) {}
+ ~Converter();
+
+ // Conversion function. Converts the interop type IN to the Dawn type OUT.
+ // Returns true on success, false on failure.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
+ return Convert(std::forward<OUT>(out), std::forward<IN>(in));
+ }
- // Conversion function. Converts the interop type IN to the Dawn type OUT.
- // Returns true on success, false on failure.
- template <typename OUT, typename IN>
- [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
- return Convert(std::forward<OUT>(out), std::forward<IN>(in));
- }
+ // Vector conversion function. Converts the vector of interop type IN to a pointer of
+ // elements of Dawn type OUT, which is assigned to 'out_els'.
+ // out_count is assigned the number of elements in 'in'.
+ // Returns true on success, false on failure.
+ // The pointer assigned to 'out_els' is valid until the Converter is destructed.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT*& out_els,
+ uint32_t& out_count,
+ const std::vector<IN>& in) {
+ return Convert(out_els, out_count, in);
+ }
- // Vector conversion function. Converts the vector of interop type IN to a pointer of
- // elements of Dawn type OUT, which is assigned to 'out_els'.
- // out_count is assigned the number of elements in 'in'.
- // Returns true on success, false on failure.
- // The pointer assigned to 'out_els' is valid until the Converter is destructed.
- template <typename OUT, typename IN>
- [[nodiscard]] inline bool operator()(OUT*& out_els,
- uint32_t& out_count,
- const std::vector<IN>& in) {
- return Convert(out_els, out_count, in);
- }
+ // Returns the Env that this Converter was constructed with.
+ inline Napi::Env Env() const { return env; }
- // Returns the Env that this Converter was constructed with.
- inline Napi::Env Env() const {
- return env;
- }
+ // BufferSource is the converted type of interop::BufferSource.
+ struct BufferSource {
+ void* data;
+ size_t size; // in bytes
+ size_t bytesPerElement; // 1 for ArrayBuffers
+ };
- // BufferSource is the converted type of interop::BufferSource.
- struct BufferSource {
- void* data;
- size_t size; // in bytes
- size_t bytesPerElement; // 1 for ArrayBuffers
- };
+ private:
+ // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
+ [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
- private:
- // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
- [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
- [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
+ [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
- [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out,
+ const std::vector<interop::GPUIntegerCoordinate>& in);
- [[nodiscard]] bool Convert(wgpu::Origin3D& out,
- const std::vector<interop::GPUIntegerCoordinate>& in);
+ [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
- [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
+ [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in);
- [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
- const interop::GPUImageCopyTexture& in);
+ [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in);
- [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
- const interop::GPUImageCopyBuffer& in);
+ [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
- [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
+ [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in);
- [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
- const interop::GPUImageDataLayout& in);
+ [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
- [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
+ [[nodiscard]] bool Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in);
- [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
- const interop::GPUTextureUsageFlags& in);
+ [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in);
- [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
- const interop::GPUColorWriteFlags& in);
+ [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
- [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
+ [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
- [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
+ [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
- [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
+ [[nodiscard]] bool Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in);
- [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
- const interop::GPUTextureDimension& in);
+ [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in);
- [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
- const interop::GPUTextureViewDimension& in);
+ [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in);
- [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
- const interop::GPUProgrammableStage& in);
+ [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
+ const std::string& in_name,
+ wgpu::interop::GPUPipelineConstantValue in_value);
- [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
- const std::string& in_name,
- wgpu::interop::GPUPipelineConstantValue in_value);
+ [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
- [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
+ [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
- [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
+ [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
- [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
+ [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
- [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
+ [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
- [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
+ [[nodiscard]] bool Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in);
- [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
- const interop::GPUColorTargetState& in);
+ [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
+ const interop::GPUDepthStencilState& in);
- [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
- const interop::GPUDepthStencilState& in);
+ [[nodiscard]] bool Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in);
- [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
- const interop::GPUMultisampleState& in);
+ [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
- [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
+ [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
+ const interop::GPUPrimitiveTopology& in);
- [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
- const interop::GPUPrimitiveTopology& in);
+ [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
- [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
+ [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
- [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
+ [[nodiscard]] bool Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in);
- [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
- const interop::GPUCompareFunction& in);
+ [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
- [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
+ [[nodiscard]] bool Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in);
- [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
- const interop::GPUStencilOperation& in);
+ [[nodiscard]] bool Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in);
- [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
- const interop::GPUStencilFaceState& in);
+ [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
- [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
+ [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
+ const interop::GPUVertexBufferLayout& in);
- [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
- const interop::GPUVertexBufferLayout& in);
+ [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
- [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
+ [[nodiscard]] bool Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in);
- [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
- const interop::GPUVertexAttribute& in);
+ [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
- [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
+ [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in);
- [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
- const interop::GPURenderPassColorAttachment& in);
+ [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in);
- [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
- const interop::GPURenderPassDepthStencilAttachment& in);
+ [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
- [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
+ [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
- [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
+ [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
- [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
+ [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in);
- [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
- const interop::GPUBindGroupLayoutEntry& in);
+ [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
+ const interop::GPUBufferBindingLayout& in);
- [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
- const interop::GPUBufferBindingLayout& in);
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in);
- [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
- const interop::GPUSamplerBindingLayout& in);
+ [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in);
- [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
- const interop::GPUTextureBindingLayout& in);
+ [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in);
- [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
- const interop::GPUStorageTextureBindingLayout& in);
+ [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
+ const interop::GPUBufferBindingType& in);
- [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
- const interop::GPUBufferBindingType& in);
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
+ const interop::GPUSamplerBindingType& in);
- [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
- const interop::GPUSamplerBindingType& in);
+ [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
+ const interop::GPUTextureSampleType& in);
- [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
- const interop::GPUTextureSampleType& in);
+ [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in);
- [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
- const interop::GPUStorageTextureAccess& in);
+ [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
- [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
+ [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
- [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
+ [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
- [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
+ [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUMipmapFilterMode& in);
- [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
- const interop::GPUComputePipelineDescriptor& in);
+ [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in);
- [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
- const interop::GPURenderPipelineDescriptor& in);
+ [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in);
- // std::string to C string
- inline bool Convert(const char*& out, const std::string& in) {
- out = in.c_str();
- return true;
- }
+ [[nodiscard]] bool Convert(wgpu::PipelineLayout& out, const interop::GPUAutoLayoutMode& in);
- // Pass-through (no conversion)
- template <typename T>
- inline bool Convert(T& out, const T& in) {
- out = in;
- return true;
- }
+ // std::string to C string
+ inline bool Convert(const char*& out, const std::string& in) {
+ out = in.c_str();
+ return true;
+ }
- // Integral number conversion, with dynamic limit checking
- template <typename OUT,
- typename IN,
- typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
- inline bool Convert(OUT& out, const IN& in) {
- out = static_cast<OUT>(in);
- if (static_cast<IN>(out) != in) {
- Napi::Error::New(env, "Integer value (" + std::to_string(in) +
- ") cannot be converted to the Dawn data type without "
- "truncation of the value")
- .ThrowAsJavaScriptException();
- return false;
- }
- return true;
- }
+ // Pass-through (no conversion)
+ template <typename T>
+ inline bool Convert(T& out, const T& in) {
+ out = in;
+ return true;
+ }
- // ClampedInteger<T>
- template <typename T>
- inline bool Convert(T& out, const interop::ClampedInteger<T>& in) {
- out = in;
- return true;
+ // Integral number conversion, with dynamic limit checking
+ template <typename OUT,
+ typename IN,
+ typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
+ inline bool Convert(OUT& out, const IN& in) {
+ out = static_cast<OUT>(in);
+ if (static_cast<IN>(out) != in) {
+ Napi::Error::New(env, "Integer value (" + std::to_string(in) +
+ ") cannot be converted to the Dawn data type without "
+ "truncation of the value")
+ .ThrowAsJavaScriptException();
+ return false;
}
+ return true;
+ }
- // EnforceRangeInteger<T>
- template <typename T>
- inline bool Convert(T& out, const interop::EnforceRangeInteger<T>& in) {
- out = in;
- return true;
- }
+ // ClampedInteger<T>
+ template <typename T>
+ inline bool Convert(T& out, const interop::ClampedInteger<T>& in) {
+ out = in;
+ return true;
+ }
- template <typename OUT, typename... IN_TYPES>
- inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
- return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+ // EnforceRangeInteger<T>
+ template <typename T>
+ inline bool Convert(T& out, const interop::EnforceRangeInteger<T>& in) {
+ out = in;
+ return true;
+ }
+
+ template <typename OUT, typename... IN_TYPES>
+ inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
+ return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+ }
+
+ // If the std::optional does not have a value, then Convert() simply returns true and 'out'
+ // is not assigned a new value.
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ return Convert(out, in.value());
}
+ return true;
+ }
- // If the std::optional does not have a value, then Convert() simply returns true and 'out'
- // is not assigned a new value.
- template <typename OUT, typename IN>
- inline bool Convert(OUT& out, const std::optional<IN>& in) {
- if (in.has_value()) {
- return Convert(out, in.value());
+ // std::optional -> T*
+ // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
+ // whether 'in' has a value.
+ template <typename OUT,
+ typename IN,
+ typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
+ inline bool Convert(OUT*& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ auto* el = Allocate<std::remove_const_t<OUT>>();
+ if (!Convert(*el, in.value())) {
+ return false;
}
- return true;
+ out = el;
+ } else {
+ out = nullptr;
}
+ return true;
+ }
- // std::optional -> T*
- // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
- // whether 'in' has a value.
- template <typename OUT,
- typename IN,
- typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
- inline bool Convert(OUT*& out, const std::optional<IN>& in) {
- if (in.has_value()) {
- auto* el = Allocate<std::remove_const_t<OUT>>();
- if (!Convert(*el, in.value())) {
- return false;
- }
- out = el;
- } else {
- out = nullptr;
- }
- return true;
+ // interop::Interface -> Dawn object
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
+ using Impl = ImplOf<IN>;
+ out = *in.template As<Impl>();
+ if (!out) {
+ LOG("Dawn object has been destroyed. This should not happen");
+ return false;
}
+ return true;
+ }
- // interop::Interface -> Dawn object
- template <typename OUT, typename IN>
- inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
- using Impl = ImplOf<IN>;
- out = *in.template As<Impl>();
- if (!out) {
- LOG("Dawn object has been destroyed. This should not happen");
- return false;
- }
+ // vector -> raw pointer + count
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
+ if (in.size() == 0) {
+ out_els = nullptr;
+ out_count = 0;
return true;
}
-
- // vector -> raw pointer + count
- template <typename OUT, typename IN>
- inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
- if (in.size() == 0) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
- for (size_t i = 0; i < in.size(); i++) {
- if (!Convert(els[i], in[i])) {
- return false;
- }
+ auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+ for (size_t i = 0; i < in.size(); i++) {
+ if (!Convert(els[i], in[i])) {
+ return false;
}
- out_els = els;
- return Convert(out_count, in.size());
}
+ out_els = els;
+ return Convert(out_count, in.size());
+ }
- // unordered_map -> raw pointer + count
- template <typename OUT, typename IN_KEY, typename IN_VALUE>
- inline bool Convert(OUT*& out_els,
- uint32_t& out_count,
- const std::unordered_map<IN_KEY, IN_VALUE>& in) {
- if (in.size() == 0) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
- size_t i = 0;
- for (auto& [key, value] : in) {
- if (!Convert(els[i++], key, value)) {
- return false;
- }
+ // unordered_map -> raw pointer + count
+ template <typename OUT, typename IN_KEY, typename IN_VALUE>
+ inline bool Convert(OUT*& out_els,
+ uint32_t& out_count,
+ const std::unordered_map<IN_KEY, IN_VALUE>& in) {
+ if (in.size() == 0) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
+ }
+ auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+ size_t i = 0;
+ for (auto& [key, value] : in) {
+ if (!Convert(els[i++], key, value)) {
+ return false;
}
- out_els = els;
- return Convert(out_count, in.size());
}
+ out_els = els;
+ return Convert(out_count, in.size());
+ }
- // std::optional<T> -> raw pointer + count
- template <typename OUT, typename IN>
- inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
- if (!in.has_value()) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- return Convert(out_els, out_count, in.value());
+ // std::optional<T> -> raw pointer + count
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
+ if (!in.has_value()) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
}
+ return Convert(out_els, out_count, in.value());
+ }
- Napi::Env env;
+ Napi::Env env;
- // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
- // the first element. The array is freed when the Converter is destructed.
- template <typename T>
- T* Allocate(size_t n = 1) {
- auto* ptr = new T[n]{};
- free_.emplace_back([ptr] { delete[] ptr; });
- return ptr;
- }
+ // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
+ // the first element. The array is freed when the Converter is destructed.
+ template <typename T>
+ T* Allocate(size_t n = 1) {
+ auto* ptr = new T[n]{};
+ free_.emplace_back([ptr] { delete[] ptr; });
+ return ptr;
+ }
- std::vector<std::function<void()>> free_;
- };
+ std::vector<std::function<void()>> free_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp
index 62def5d588e..4efa4898d3d 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp
@@ -16,164 +16,161 @@
namespace wgpu::binding {
- namespace {
- constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
- constexpr char kWrongDocumentError[] = "WrongDocumentError";
- constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
- constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
- constexpr char kNotFoundError[] = "NotFoundError";
- constexpr char kNotSupportedError[] = "NotSupportedError";
- constexpr char kInUseAttributeError[] = "InUseAttributeError";
- constexpr char kInvalidStateError[] = "InvalidStateError";
- constexpr char kSyntaxError[] = "SyntaxError";
- constexpr char kInvalidModificationError[] = "InvalidModificationError";
- constexpr char kNamespaceError[] = "NamespaceError";
- constexpr char kSecurityError[] = "SecurityError";
- constexpr char kNetworkError[] = "NetworkError";
- constexpr char kAbortError[] = "AbortError";
- constexpr char kURLMismatchError[] = "URLMismatchError";
- constexpr char kQuotaExceededError[] = "QuotaExceededError";
- constexpr char kTimeoutError[] = "TimeoutError";
- constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
- constexpr char kDataCloneError[] = "DataCloneError";
- constexpr char kEncodingError[] = "EncodingError";
- constexpr char kNotReadableError[] = "NotReadableError";
- constexpr char kUnknownError[] = "UnknownError";
- constexpr char kConstraintError[] = "ConstraintError";
- constexpr char kDataError[] = "DataError";
- constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
- constexpr char kReadOnlyError[] = "ReadOnlyError";
- constexpr char kVersionError[] = "VersionError";
- constexpr char kOperationError[] = "OperationError";
- constexpr char kNotAllowedError[] = "NotAllowedError";
-
- static Napi::Error New(Napi::Env env,
- std::string name,
- std::string message,
- unsigned short code = 0) {
- auto err = Napi::Error::New(env);
- err.Set("name", name);
- err.Set("message", message.empty() ? name : message);
- err.Set("code", static_cast<double>(code));
- return err;
- }
-
- } // namespace
-
- Napi::Error Errors::HierarchyRequestError(Napi::Env env, std::string message) {
- return New(env, kHierarchyRequestError, message);
- }
-
- Napi::Error Errors::WrongDocumentError(Napi::Env env, std::string message) {
- return New(env, kWrongDocumentError, message);
- }
-
- Napi::Error Errors::InvalidCharacterError(Napi::Env env, std::string message) {
- return New(env, kInvalidCharacterError, message);
- }
-
- Napi::Error Errors::NoModificationAllowedError(Napi::Env env, std::string message) {
- return New(env, kNoModificationAllowedError, message);
- }
-
- Napi::Error Errors::NotFoundError(Napi::Env env, std::string message) {
- return New(env, kNotFoundError, message);
- }
-
- Napi::Error Errors::NotSupportedError(Napi::Env env, std::string message) {
- return New(env, kNotSupportedError, message);
- }
-
- Napi::Error Errors::InUseAttributeError(Napi::Env env, std::string message) {
- return New(env, kInUseAttributeError, message);
- }
-
- Napi::Error Errors::InvalidStateError(Napi::Env env, std::string message) {
- return New(env, kInvalidStateError, message);
- }
-
- Napi::Error Errors::SyntaxError(Napi::Env env, std::string message) {
- return New(env, kSyntaxError, message);
- }
-
- Napi::Error Errors::InvalidModificationError(Napi::Env env, std::string message) {
- return New(env, kInvalidModificationError, message);
- }
-
- Napi::Error Errors::NamespaceError(Napi::Env env, std::string message) {
- return New(env, kNamespaceError, message);
- }
-
- Napi::Error Errors::SecurityError(Napi::Env env, std::string message) {
- return New(env, kSecurityError, message);
- }
-
- Napi::Error Errors::NetworkError(Napi::Env env, std::string message) {
- return New(env, kNetworkError, message);
- }
-
- Napi::Error Errors::AbortError(Napi::Env env, std::string message) {
- return New(env, kAbortError, message);
- }
-
- Napi::Error Errors::URLMismatchError(Napi::Env env, std::string message) {
- return New(env, kURLMismatchError, message);
- }
-
- Napi::Error Errors::QuotaExceededError(Napi::Env env, std::string message) {
- return New(env, kQuotaExceededError, message);
- }
-
- Napi::Error Errors::TimeoutError(Napi::Env env, std::string message) {
- return New(env, kTimeoutError, message);
- }
-
- Napi::Error Errors::InvalidNodeTypeError(Napi::Env env, std::string message) {
- return New(env, kInvalidNodeTypeError, message);
- }
-
- Napi::Error Errors::DataCloneError(Napi::Env env, std::string message) {
- return New(env, kDataCloneError, message);
- }
-
- Napi::Error Errors::EncodingError(Napi::Env env, std::string message) {
- return New(env, kEncodingError, message);
- }
-
- Napi::Error Errors::NotReadableError(Napi::Env env, std::string message) {
- return New(env, kNotReadableError, message);
- }
-
- Napi::Error Errors::UnknownError(Napi::Env env, std::string message) {
- return New(env, kUnknownError, message);
- }
-
- Napi::Error Errors::ConstraintError(Napi::Env env, std::string message) {
- return New(env, kConstraintError, message);
- }
-
- Napi::Error Errors::DataError(Napi::Env env, std::string message) {
- return New(env, kDataError, message);
- }
+namespace {
+constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
+constexpr char kWrongDocumentError[] = "WrongDocumentError";
+constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
+constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
+constexpr char kNotFoundError[] = "NotFoundError";
+constexpr char kNotSupportedError[] = "NotSupportedError";
+constexpr char kInUseAttributeError[] = "InUseAttributeError";
+constexpr char kInvalidStateError[] = "InvalidStateError";
+constexpr char kSyntaxError[] = "SyntaxError";
+constexpr char kInvalidModificationError[] = "InvalidModificationError";
+constexpr char kNamespaceError[] = "NamespaceError";
+constexpr char kSecurityError[] = "SecurityError";
+constexpr char kNetworkError[] = "NetworkError";
+constexpr char kAbortError[] = "AbortError";
+constexpr char kURLMismatchError[] = "URLMismatchError";
+constexpr char kQuotaExceededError[] = "QuotaExceededError";
+constexpr char kTimeoutError[] = "TimeoutError";
+constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
+constexpr char kDataCloneError[] = "DataCloneError";
+constexpr char kEncodingError[] = "EncodingError";
+constexpr char kNotReadableError[] = "NotReadableError";
+constexpr char kUnknownError[] = "UnknownError";
+constexpr char kConstraintError[] = "ConstraintError";
+constexpr char kDataError[] = "DataError";
+constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
+constexpr char kReadOnlyError[] = "ReadOnlyError";
+constexpr char kVersionError[] = "VersionError";
+constexpr char kOperationError[] = "OperationError";
+constexpr char kNotAllowedError[] = "NotAllowedError";
+
+static Napi::Error New(Napi::Env env, std::string name, std::string message, uint16_t code = 0) {
+ auto err = Napi::Error::New(env);
+ err.Set("name", name);
+ err.Set("message", message.empty() ? name : message);
+ err.Set("code", static_cast<double>(code));
+ return err;
+}
+
+} // namespace
+
+Napi::Error Errors::HierarchyRequestError(Napi::Env env, std::string message) {
+ return New(env, kHierarchyRequestError, message);
+}
+
+Napi::Error Errors::WrongDocumentError(Napi::Env env, std::string message) {
+ return New(env, kWrongDocumentError, message);
+}
+
+Napi::Error Errors::InvalidCharacterError(Napi::Env env, std::string message) {
+ return New(env, kInvalidCharacterError, message);
+}
+
+Napi::Error Errors::NoModificationAllowedError(Napi::Env env, std::string message) {
+ return New(env, kNoModificationAllowedError, message);
+}
+
+Napi::Error Errors::NotFoundError(Napi::Env env, std::string message) {
+ return New(env, kNotFoundError, message);
+}
+
+Napi::Error Errors::NotSupportedError(Napi::Env env, std::string message) {
+ return New(env, kNotSupportedError, message);
+}
+
+Napi::Error Errors::InUseAttributeError(Napi::Env env, std::string message) {
+ return New(env, kInUseAttributeError, message);
+}
+
+Napi::Error Errors::InvalidStateError(Napi::Env env, std::string message) {
+ return New(env, kInvalidStateError, message);
+}
+
+Napi::Error Errors::SyntaxError(Napi::Env env, std::string message) {
+ return New(env, kSyntaxError, message);
+}
+
+Napi::Error Errors::InvalidModificationError(Napi::Env env, std::string message) {
+ return New(env, kInvalidModificationError, message);
+}
+
+Napi::Error Errors::NamespaceError(Napi::Env env, std::string message) {
+ return New(env, kNamespaceError, message);
+}
+
+Napi::Error Errors::SecurityError(Napi::Env env, std::string message) {
+ return New(env, kSecurityError, message);
+}
+
+Napi::Error Errors::NetworkError(Napi::Env env, std::string message) {
+ return New(env, kNetworkError, message);
+}
+
+Napi::Error Errors::AbortError(Napi::Env env, std::string message) {
+ return New(env, kAbortError, message);
+}
+
+Napi::Error Errors::URLMismatchError(Napi::Env env, std::string message) {
+ return New(env, kURLMismatchError, message);
+}
+
+Napi::Error Errors::QuotaExceededError(Napi::Env env, std::string message) {
+ return New(env, kQuotaExceededError, message);
+}
+
+Napi::Error Errors::TimeoutError(Napi::Env env, std::string message) {
+ return New(env, kTimeoutError, message);
+}
+
+Napi::Error Errors::InvalidNodeTypeError(Napi::Env env, std::string message) {
+ return New(env, kInvalidNodeTypeError, message);
+}
+
+Napi::Error Errors::DataCloneError(Napi::Env env, std::string message) {
+ return New(env, kDataCloneError, message);
+}
+
+Napi::Error Errors::EncodingError(Napi::Env env, std::string message) {
+ return New(env, kEncodingError, message);
+}
+
+Napi::Error Errors::NotReadableError(Napi::Env env, std::string message) {
+ return New(env, kNotReadableError, message);
+}
+
+Napi::Error Errors::UnknownError(Napi::Env env, std::string message) {
+ return New(env, kUnknownError, message);
+}
+
+Napi::Error Errors::ConstraintError(Napi::Env env, std::string message) {
+ return New(env, kConstraintError, message);
+}
+
+Napi::Error Errors::DataError(Napi::Env env, std::string message) {
+ return New(env, kDataError, message);
+}
- Napi::Error Errors::TransactionInactiveError(Napi::Env env, std::string message) {
- return New(env, kTransactionInactiveError, message);
- }
+Napi::Error Errors::TransactionInactiveError(Napi::Env env, std::string message) {
+ return New(env, kTransactionInactiveError, message);
+}
- Napi::Error Errors::ReadOnlyError(Napi::Env env, std::string message) {
- return New(env, kReadOnlyError, message);
- }
+Napi::Error Errors::ReadOnlyError(Napi::Env env, std::string message) {
+ return New(env, kReadOnlyError, message);
+}
- Napi::Error Errors::VersionError(Napi::Env env, std::string message) {
- return New(env, kVersionError, message);
- }
+Napi::Error Errors::VersionError(Napi::Env env, std::string message) {
+ return New(env, kVersionError, message);
+}
- Napi::Error Errors::OperationError(Napi::Env env, std::string message) {
- return New(env, kOperationError, message);
- }
+Napi::Error Errors::OperationError(Napi::Env env, std::string message) {
+ return New(env, kOperationError, message);
+}
- Napi::Error Errors::NotAllowedError(Napi::Env env, std::string message) {
- return New(env, kNotAllowedError, message);
- }
+Napi::Error Errors::NotAllowedError(Napi::Env env, std::string message) {
+ return New(env, kNotAllowedError, message);
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Errors.h b/chromium/third_party/dawn/src/dawn/node/binding/Errors.h
index 640a7024184..9e30ef767a6 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Errors.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Errors.h
@@ -15,45 +15,47 @@
#ifndef SRC_DAWN_NODE_BINDING_ERRORS_H_
#define SRC_DAWN_NODE_BINDING_ERRORS_H_
+#include <string>
+
#include "src/dawn/node/interop/Napi.h"
namespace wgpu::binding {
- // Errors contains static helper methods for creating DOMException error
- // messages as documented at:
- // https://heycam.github.io/webidl/#idl-DOMException-error-names
- class Errors {
- public:
- static Napi::Error HierarchyRequestError(Napi::Env, std::string message = {});
- static Napi::Error WrongDocumentError(Napi::Env, std::string message = {});
- static Napi::Error InvalidCharacterError(Napi::Env, std::string message = {});
- static Napi::Error NoModificationAllowedError(Napi::Env, std::string message = {});
- static Napi::Error NotFoundError(Napi::Env, std::string message = {});
- static Napi::Error NotSupportedError(Napi::Env, std::string message = {});
- static Napi::Error InUseAttributeError(Napi::Env, std::string message = {});
- static Napi::Error InvalidStateError(Napi::Env, std::string message = {});
- static Napi::Error SyntaxError(Napi::Env, std::string message = {});
- static Napi::Error InvalidModificationError(Napi::Env, std::string message = {});
- static Napi::Error NamespaceError(Napi::Env, std::string message = {});
- static Napi::Error SecurityError(Napi::Env, std::string message = {});
- static Napi::Error NetworkError(Napi::Env, std::string message = {});
- static Napi::Error AbortError(Napi::Env, std::string message = {});
- static Napi::Error URLMismatchError(Napi::Env, std::string message = {});
- static Napi::Error QuotaExceededError(Napi::Env, std::string message = {});
- static Napi::Error TimeoutError(Napi::Env, std::string message = {});
- static Napi::Error InvalidNodeTypeError(Napi::Env, std::string message = {});
- static Napi::Error DataCloneError(Napi::Env, std::string message = {});
- static Napi::Error EncodingError(Napi::Env, std::string message = {});
- static Napi::Error NotReadableError(Napi::Env, std::string message = {});
- static Napi::Error UnknownError(Napi::Env, std::string message = {});
- static Napi::Error ConstraintError(Napi::Env, std::string message = {});
- static Napi::Error DataError(Napi::Env, std::string message = {});
- static Napi::Error TransactionInactiveError(Napi::Env, std::string message = {});
- static Napi::Error ReadOnlyError(Napi::Env, std::string message = {});
- static Napi::Error VersionError(Napi::Env, std::string message = {});
- static Napi::Error OperationError(Napi::Env, std::string message = {});
- static Napi::Error NotAllowedError(Napi::Env, std::string message = {});
- };
+// Errors contains static helper methods for creating DOMException error
+// messages as documented at:
+// https://heycam.github.io/webidl/#idl-DOMException-error-names
+class Errors {
+ public:
+ static Napi::Error HierarchyRequestError(Napi::Env, std::string message = {});
+ static Napi::Error WrongDocumentError(Napi::Env, std::string message = {});
+ static Napi::Error InvalidCharacterError(Napi::Env, std::string message = {});
+ static Napi::Error NoModificationAllowedError(Napi::Env, std::string message = {});
+ static Napi::Error NotFoundError(Napi::Env, std::string message = {});
+ static Napi::Error NotSupportedError(Napi::Env, std::string message = {});
+ static Napi::Error InUseAttributeError(Napi::Env, std::string message = {});
+ static Napi::Error InvalidStateError(Napi::Env, std::string message = {});
+ static Napi::Error SyntaxError(Napi::Env, std::string message = {});
+ static Napi::Error InvalidModificationError(Napi::Env, std::string message = {});
+ static Napi::Error NamespaceError(Napi::Env, std::string message = {});
+ static Napi::Error SecurityError(Napi::Env, std::string message = {});
+ static Napi::Error NetworkError(Napi::Env, std::string message = {});
+ static Napi::Error AbortError(Napi::Env, std::string message = {});
+ static Napi::Error URLMismatchError(Napi::Env, std::string message = {});
+ static Napi::Error QuotaExceededError(Napi::Env, std::string message = {});
+ static Napi::Error TimeoutError(Napi::Env, std::string message = {});
+ static Napi::Error InvalidNodeTypeError(Napi::Env, std::string message = {});
+ static Napi::Error DataCloneError(Napi::Env, std::string message = {});
+ static Napi::Error EncodingError(Napi::Env, std::string message = {});
+ static Napi::Error NotReadableError(Napi::Env, std::string message = {});
+ static Napi::Error UnknownError(Napi::Env, std::string message = {});
+ static Napi::Error ConstraintError(Napi::Env, std::string message = {});
+ static Napi::Error DataError(Napi::Env, std::string message = {});
+ static Napi::Error TransactionInactiveError(Napi::Env, std::string message = {});
+ static Napi::Error ReadOnlyError(Napi::Env, std::string message = {});
+ static Napi::Error VersionError(Napi::Env, std::string message = {});
+ static Napi::Error OperationError(Napi::Env, std::string message = {});
+ static Napi::Error NotAllowedError(Napi::Env, std::string message = {});
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp
index 40b0560eeea..75fd2ce4b26 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp
@@ -15,15 +15,15 @@
#include "src/dawn/node/binding/Flags.h"
namespace wgpu::binding {
- void Flags::Set(const std::string& key, const std::string& value) {
- flags_[key] = value;
- }
+void Flags::Set(const std::string& key, const std::string& value) {
+ flags_[key] = value;
+}
- std::optional<std::string> Flags::Get(const std::string& key) const {
- auto iter = flags_.find(key);
- if (iter != flags_.end()) {
- return iter->second;
- }
- return {};
+std::optional<std::string> Flags::Get(const std::string& key) const {
+ auto iter = flags_.find(key);
+ if (iter != flags_.end()) {
+ return iter->second;
}
+ return {};
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Flags.h b/chromium/third_party/dawn/src/dawn/node/binding/Flags.h
index b4d4e292d4a..2989868608a 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/Flags.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Flags.h
@@ -20,16 +20,16 @@
#include <unordered_map>
namespace wgpu::binding {
- // Flags maintains a key-value mapping of input flags passed into the module's create()
- // function, used to configure dawn_node.
- class Flags {
- public:
- void Set(const std::string& key, const std::string& value);
- std::optional<std::string> Get(const std::string& key) const;
+// Flags maintains a key-value mapping of input flags passed into the module's create()
+// function, used to configure dawn_node.
+class Flags {
+ public:
+ void Set(const std::string& key, const std::string& value);
+ std::optional<std::string> Get(const std::string& key) const;
- private:
- std::unordered_map<std::string, std::string> flags_;
- };
+ private:
+ std::unordered_map<std::string, std::string> flags_;
+};
} // namespace wgpu::binding
#endif // SRC_DAWN_NODE_BINDING_FLAGS_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp
index 43472ec5461..beeaa83863b 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp
@@ -14,152 +14,159 @@
#include "src/dawn/node/binding/GPU.h"
-#include "src/dawn/node/binding/GPUAdapter.h"
-
+#include <algorithm>
#include <cstdlib>
+#include <string>
+#include <utility>
+
+#include "src/dawn/node/binding/GPUAdapter.h"
#if defined(_WIN32)
-# include <Windows.h>
+#include <Windows.h>
#endif
namespace {
- std::string GetEnvVar(const char* varName) {
+std::string GetEnvVar(const char* varName) {
#if defined(_WIN32)
- // Use _dupenv_s to avoid unsafe warnings about std::getenv
- char* value = nullptr;
- _dupenv_s(&value, nullptr, varName);
- if (value) {
- std::string result = value;
- free(value);
- return result;
- }
- return "";
+ // Use _dupenv_s to avoid unsafe warnings about std::getenv
+ char* value = nullptr;
+ _dupenv_s(&value, nullptr, varName);
+ if (value) {
+ std::string result = value;
+ free(value);
+ return result;
+ }
+ return "";
#else
- if (auto* val = std::getenv(varName)) {
- return val;
- }
- return "";
-#endif
+ if (auto* val = std::getenv(varName)) {
+ return val;
}
+ return "";
+#endif
+}
- void SetDllDir(const char* dir) {
- (void)dir;
+void SetDllDir(const char* dir) {
+ (void)dir;
#if defined(_WIN32)
- ::SetDllDirectory(dir);
+ ::SetDllDirectory(dir);
#endif
- }
+}
} // namespace
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPU
- ////////////////////////////////////////////////////////////////////////////////
- GPU::GPU(Flags flags) : flags_(std::move(flags)) {
- // TODO: Disable in 'release'
- instance_.EnableBackendValidation(true);
- instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
-
- // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
- if (auto dir = flags_.Get("dlldir")) {
- SetDllDir(dir->c_str());
- }
- instance_.DiscoverDefaultAdapters();
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPU
+////////////////////////////////////////////////////////////////////////////////
+GPU::GPU(Flags flags) : flags_(std::move(flags)) {
+ // TODO(dawn:1123): Disable in 'release'
+ instance_.EnableBackendValidation(true);
+ instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
+
+ // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
+ if (auto dir = flags_.Get("dlldir")) {
+ SetDllDir(dir->c_str());
+ }
+ instance_.DiscoverDefaultAdapters();
+}
+
+interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) {
+ auto promise =
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(env, PROMISE_INFO);
+
+ if (options.forceFallbackAdapter) {
+ // Software adapters are not currently supported.
+ promise.Resolve({});
+ return promise;
}
- interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
- Napi::Env env,
- interop::GPURequestAdapterOptions options) {
- auto promise = interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(
- env, PROMISE_INFO);
-
- if (options.forceFallbackAdapter) {
- // Software adapters are not currently supported.
- promise.Resolve({});
- return promise;
- }
-
- auto adapters = instance_.GetAdapters();
- if (adapters.empty()) {
- promise.Resolve({});
- return promise;
- }
+ auto adapters = instance_.GetAdapters();
+ if (adapters.empty()) {
+ promise.Resolve({});
+ return promise;
+ }
#if defined(_WIN32)
- constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
+ constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
#elif defined(__linux__)
- constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
+ constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
#elif defined(__APPLE__)
- constexpr auto defaultBackendType = wgpu::BackendType::Metal;
+ constexpr auto defaultBackendType = wgpu::BackendType::Metal;
#else
-# error "Unsupported platform"
+#error "Unsupported platform"
#endif
- auto targetBackendType = defaultBackendType;
- std::string forceBackend;
-
- // Check for override from env var
- if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
- forceBackend = envVar;
- }
+ auto targetBackendType = defaultBackendType;
+ std::string forceBackend;
- // Check for override from flag
- if (auto f = flags_.Get("dawn-backend")) {
- forceBackend = *f;
- }
-
- std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
- [](char c) { return std::tolower(c); });
+ // Check for override from env var
+ if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
+ forceBackend = envVar;
+ }
- // Default to first adapter if a backend is not specified
- size_t adapterIndex = 0;
+ // Check for override from flag
+ if (auto f = flags_.Get("dawn-backend")) {
+ forceBackend = *f;
+ }
- if (!forceBackend.empty()) {
- if (forceBackend == "null") {
- targetBackendType = wgpu::BackendType::Null;
- } else if (forceBackend == "webgpu") {
- targetBackendType = wgpu::BackendType::WebGPU;
- } else if (forceBackend == "d3d11") {
- targetBackendType = wgpu::BackendType::D3D11;
- } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
- targetBackendType = wgpu::BackendType::D3D12;
- } else if (forceBackend == "metal") {
- targetBackendType = wgpu::BackendType::Metal;
- } else if (forceBackend == "vulkan" || forceBackend == "vk") {
- targetBackendType = wgpu::BackendType::Vulkan;
- } else if (forceBackend == "opengl" || forceBackend == "gl") {
- targetBackendType = wgpu::BackendType::OpenGL;
- } else if (forceBackend == "opengles" || forceBackend == "gles") {
- targetBackendType = wgpu::BackendType::OpenGLES;
- } else {
- promise.Reject("unknown backend '" + forceBackend + "'");
- return promise;
- }
+ std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
+ [](char c) { return std::tolower(c); });
+
+ // Default to first adapter if a backend is not specified
+ size_t adapterIndex = 0;
+
+ if (!forceBackend.empty()) {
+ if (forceBackend == "null") {
+ targetBackendType = wgpu::BackendType::Null;
+ } else if (forceBackend == "webgpu") {
+ targetBackendType = wgpu::BackendType::WebGPU;
+ } else if (forceBackend == "d3d11") {
+ targetBackendType = wgpu::BackendType::D3D11;
+ } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
+ targetBackendType = wgpu::BackendType::D3D12;
+ } else if (forceBackend == "metal") {
+ targetBackendType = wgpu::BackendType::Metal;
+ } else if (forceBackend == "vulkan" || forceBackend == "vk") {
+ targetBackendType = wgpu::BackendType::Vulkan;
+ } else if (forceBackend == "opengl" || forceBackend == "gl") {
+ targetBackendType = wgpu::BackendType::OpenGL;
+ } else if (forceBackend == "opengles" || forceBackend == "gles") {
+ targetBackendType = wgpu::BackendType::OpenGLES;
+ } else {
+ promise.Reject("unknown backend '" + forceBackend + "'");
+ return promise;
}
+ }
- bool found = false;
- for (size_t i = 0; i < adapters.size(); ++i) {
- wgpu::AdapterProperties props;
- adapters[i].GetProperties(&props);
- if (props.backendType == targetBackendType) {
- adapterIndex = i;
- found = true;
- break;
- }
+ bool found = false;
+ for (size_t i = 0; i < adapters.size(); ++i) {
+ wgpu::AdapterProperties props;
+ adapters[i].GetProperties(&props);
+ if (props.backendType == targetBackendType) {
+ adapterIndex = i;
+ found = true;
+ break;
}
- if (!found) {
- if (!forceBackend.empty()) {
- promise.Reject("backend '" + forceBackend + "' not found");
- } else {
- promise.Reject("no suitable backends found");
- }
- return promise;
+ }
+ if (!found) {
+ if (!forceBackend.empty()) {
+ promise.Reject("backend '" + forceBackend + "' not found");
+ } else {
+ promise.Reject("no suitable backends found");
}
-
- auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
- promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
return promise;
}
+ auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
+ promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+ return promise;
+}
+
+interop::GPUTextureFormat GPU::getPreferredCanvasFormat(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPU.h b/chromium/third_party/dawn/src/dawn/node/binding/GPU.h
index b6ac9f36c83..aa6956e5a8f 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPU.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPU.h
@@ -23,20 +23,21 @@
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
- class GPU final : public interop::GPU {
- public:
- GPU(Flags flags);
-
- // interop::GPU interface compliance
- interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
- Napi::Env env,
- interop::GPURequestAdapterOptions options) override;
-
- private:
- const Flags flags_;
- dawn::native::Instance instance_;
- };
+// GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
+class GPU final : public interop::GPU {
+ public:
+ GPU(Flags flags);
+
+ // interop::GPU interface compliance
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) override;
+ interop::GPUTextureFormat getPreferredCanvasFormat(Napi::Env) override;
+
+ private:
+ const Flags flags_;
+ dawn::native::Instance instance_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp
index d604431236e..511225e2978 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp
@@ -15,6 +15,7 @@
#include "src/dawn/node/binding/GPUAdapter.h"
#include <unordered_set>
+#include <vector>
#include "src/dawn/node/binding/Errors.h"
#include "src/dawn/node/binding/Flags.h"
@@ -22,31 +23,34 @@
#include "src/dawn/node/binding/GPUSupportedLimits.h"
namespace {
- // TODO(amaiorano): Move to utility header
- std::vector<std::string> Split(const std::string& s, char delim) {
- if (s.empty())
- return {};
-
- std::vector<std::string> result;
- const size_t lastIndex = s.length() - 1;
- size_t startIndex = 0;
- size_t i = startIndex;
-
- while (i <= lastIndex) {
- if (s[i] == delim) {
- auto token = s.substr(startIndex, i - startIndex);
- if (!token.empty()) // Discard empty tokens
- result.push_back(token);
- startIndex = i + 1;
- } else if (i == lastIndex) {
- auto token = s.substr(startIndex, i - startIndex + 1);
- if (!token.empty()) // Discard empty tokens
- result.push_back(token);
+// TODO(amaiorano): Move to utility header
+std::vector<std::string> Split(const std::string& s, char delim) {
+ if (s.empty()) {
+ return {};
+ }
+
+ std::vector<std::string> result;
+ const size_t lastIndex = s.length() - 1;
+ size_t startIndex = 0;
+ size_t i = startIndex;
+
+ while (i <= lastIndex) {
+ if (s[i] == delim) {
+ auto token = s.substr(startIndex, i - startIndex);
+ if (!token.empty()) { // Discard empty tokens
+ result.push_back(token);
+ }
+ startIndex = i + 1;
+ } else if (i == lastIndex) {
+ auto token = s.substr(startIndex, i - startIndex + 1);
+ if (!token.empty()) { // Discard empty tokens
+ result.push_back(token);
}
- ++i;
}
- return result;
+ ++i;
}
+ return result;
+}
} // namespace
#define FOR_EACH_LIMIT(X) \
@@ -79,193 +83,191 @@ namespace {
namespace wgpu::binding {
- namespace {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::binding::<anon>::Features
- // Implements interop::GPUSupportedFeatures
- ////////////////////////////////////////////////////////////////////////////////
- class Features : public interop::GPUSupportedFeatures {
- public:
- Features(WGPUDeviceProperties properties) {
- if (properties.depth24UnormStencil8) {
- enabled_.emplace(interop::GPUFeatureName::kDepth24UnormStencil8);
- }
- if (properties.depth32FloatStencil8) {
- enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
- }
- if (properties.timestampQuery) {
- enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
- }
- if (properties.textureCompressionBC) {
- enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
- }
- if (properties.textureCompressionETC2) {
- enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
- }
- if (properties.textureCompressionASTC) {
- enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
- }
- if (properties.timestampQuery) {
- enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
- }
-
- // TODO(dawn:1123) add support for these extensions when possible.
- // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
- // wgpu::interop::GPUFeatureName::kDepthClipControl
- }
-
- bool has(interop::GPUFeatureName feature) {
- return enabled_.count(feature) != 0;
- }
+namespace {
- // interop::GPUSupportedFeatures compliance
- bool has(Napi::Env, std::string name) override {
- interop::GPUFeatureName feature;
- if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
- return has(feature);
- }
- return false;
- }
- std::vector<std::string> keys(Napi::Env) override {
- std::vector<std::string> out;
- out.reserve(enabled_.size());
- for (auto feature : enabled_) {
- out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
- }
- return out;
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::binding::<anon>::Features
+// Implements interop::GPUSupportedFeatures
+////////////////////////////////////////////////////////////////////////////////
+class Features : public interop::GPUSupportedFeatures {
+ public:
+ explicit Features(WGPUDeviceProperties properties) {
+ if (properties.depth32FloatStencil8) {
+ enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
+ }
+ if (properties.timestampQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+ }
+ if (properties.textureCompressionBC) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
+ }
+ if (properties.textureCompressionETC2) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
+ }
+ if (properties.textureCompressionASTC) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
+ }
+ if (properties.timestampQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+ }
- private:
- std::unordered_set<interop::GPUFeatureName> enabled_;
- };
+ // TODO(dawn:1123) add support for these extensions when possible.
+ // wgpu::interop::GPUFeatureName::kDepthClipControl
+ // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
+ // wgpu::interop::GPUFeatureName::kShaderF16
+ // wgpu::interop::GPUFeatureName::kBgra8UnormStorage
+ }
- } // namespace
+ bool has(interop::GPUFeatureName feature) { return enabled_.count(feature) != 0; }
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUAdapter
- // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
- ////////////////////////////////////////////////////////////////////////////////
- GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags)
- : adapter_(a), flags_(flags) {
+ // interop::GPUSupportedFeatures compliance
+ bool has(Napi::Env, std::string name) override {
+ interop::GPUFeatureName feature;
+ if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
+ return has(feature);
+ }
+ return false;
}
-
- std::string GPUAdapter::getName(Napi::Env) {
- return "dawn-adapter";
+ std::vector<std::string> keys(Napi::Env) override {
+ std::vector<std::string> out;
+ out.reserve(enabled_.size());
+ for (auto feature : enabled_) {
+ out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
+ }
+ return out;
}
- interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
- return interop::GPUSupportedFeatures::Create<Features>(env,
- adapter_.GetAdapterProperties());
- }
+ private:
+ std::unordered_set<interop::GPUFeatureName> enabled_;
+};
- interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
- WGPUSupportedLimits limits{};
- if (!adapter_.GetLimits(&limits)) {
- Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
- }
+} // namespace
- wgpu::SupportedLimits wgpuLimits{};
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUAdapter
+// TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
+////////////////////////////////////////////////////////////////////////////////
+GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags) : adapter_(a), flags_(flags) {}
+
+// TODO(dawn:1133): Avoid the extra copy by making the generator make a virtual method with const
+// std::string&
+interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
+ return interop::GPUSupportedFeatures::Create<Features>(env, adapter_.GetAdapterProperties());
+}
+
+interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
+ WGPUSupportedLimits limits{};
+ if (!adapter_.GetLimits(&limits)) {
+ Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
+ }
+
+ wgpu::SupportedLimits wgpuLimits{};
#define COPY_LIMIT(LIMIT) wgpuLimits.limits.LIMIT = limits.limits.LIMIT;
- FOR_EACH_LIMIT(COPY_LIMIT)
+ FOR_EACH_LIMIT(COPY_LIMIT)
#undef COPY_LIMIT
- return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
- }
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
+}
- bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
- UNIMPLEMENTED();
- }
+bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
+ UNIMPLEMENTED();
+}
- interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
- Napi::Env env,
- interop::GPUDeviceDescriptor descriptor) {
- wgpu::DeviceDescriptor desc{}; // TODO(crbug.com/dawn/1133): Fill in.
- interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
-
- std::vector<wgpu::FeatureName> requiredFeatures;
- // See src/dawn/native/Features.cpp for enum <-> string mappings.
- for (auto required : descriptor.requiredFeatures) {
- switch (required) {
- case interop::GPUFeatureName::kTextureCompressionBc:
- requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
- continue;
- case interop::GPUFeatureName::kTextureCompressionEtc2:
- requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
- continue;
- case interop::GPUFeatureName::kTextureCompressionAstc:
- requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
- continue;
- case interop::GPUFeatureName::kTimestampQuery:
- requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
- continue;
- case interop::GPUFeatureName::kDepth24UnormStencil8:
- requiredFeatures.emplace_back(wgpu::FeatureName::Depth24UnormStencil8);
- continue;
- case interop::GPUFeatureName::kDepth32FloatStencil8:
- requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
- continue;
- case interop::GPUFeatureName::kDepthClipControl:
- case interop::GPUFeatureName::kIndirectFirstInstance:
- // TODO(dawn:1123) Add support for these extensions when possible.
- continue;
- }
- UNIMPLEMENTED("required: ", required);
+interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) {
+ wgpu::DeviceDescriptor desc{}; // TODO(crbug.com/dawn/1133): Fill in.
+ interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
+
+ std::vector<wgpu::FeatureName> requiredFeatures;
+ // See src/dawn/native/Features.cpp for enum <-> string mappings.
+ for (auto required : descriptor.requiredFeatures) {
+ switch (required) {
+ case interop::GPUFeatureName::kTextureCompressionBc:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
+ continue;
+ case interop::GPUFeatureName::kTextureCompressionEtc2:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
+ continue;
+ case interop::GPUFeatureName::kTextureCompressionAstc:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
+ continue;
+ case interop::GPUFeatureName::kTimestampQuery:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
+ continue;
+ case interop::GPUFeatureName::kDepth32FloatStencil8:
+ requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
+ continue;
+ case interop::GPUFeatureName::kDepthClipControl:
+ case interop::GPUFeatureName::kShaderF16:
+ case interop::GPUFeatureName::kIndirectFirstInstance:
+ case interop::GPUFeatureName::kBgra8UnormStorage:
+ // TODO(dawn:1123) Add support for these extensions when possible.
+ continue;
}
+ UNIMPLEMENTED("required: ", required);
+ }
- wgpu::RequiredLimits limits;
+ wgpu::RequiredLimits limits;
#define COPY_LIMIT(LIMIT) \
if (descriptor.requiredLimits.count(#LIMIT)) { \
limits.limits.LIMIT = descriptor.requiredLimits[#LIMIT]; \
descriptor.requiredLimits.erase(#LIMIT); \
}
- FOR_EACH_LIMIT(COPY_LIMIT)
+ FOR_EACH_LIMIT(COPY_LIMIT)
#undef COPY_LIMIT
- for (auto [key, _] : descriptor.requiredLimits) {
- promise.Reject(binding::Errors::OperationError(env, "Unknown limit \"" + key + "\""));
- return promise;
- }
+ for (auto [key, _] : descriptor.requiredLimits) {
+ promise.Reject(binding::Errors::OperationError(env, "Unknown limit \"" + key + "\""));
+ return promise;
+ }
- // Propogate enabled/disabled dawn features
- // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
- // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
- // call by storing them on the stack.
- std::vector<std::string> enabledToggles;
- std::vector<std::string> disabledToggles;
- std::vector<const char*> forceEnabledToggles;
- std::vector<const char*> forceDisabledToggles;
- if (auto values = flags_.Get("enable-dawn-features")) {
- enabledToggles = Split(*values, ',');
- for (auto& t : enabledToggles) {
- forceEnabledToggles.emplace_back(t.c_str());
- }
+ // Propogate enabled/disabled dawn features
+ // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
+ // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
+ // call by storing them on the stack.
+ std::vector<std::string> enabledToggles;
+ std::vector<std::string> disabledToggles;
+ std::vector<const char*> forceEnabledToggles;
+ std::vector<const char*> forceDisabledToggles;
+ if (auto values = flags_.Get("enable-dawn-features")) {
+ enabledToggles = Split(*values, ',');
+ for (auto& t : enabledToggles) {
+ forceEnabledToggles.emplace_back(t.c_str());
}
- if (auto values = flags_.Get("disable-dawn-features")) {
- disabledToggles = Split(*values, ',');
- for (auto& t : disabledToggles) {
- forceDisabledToggles.emplace_back(t.c_str());
- }
+ }
+ if (auto values = flags_.Get("disable-dawn-features")) {
+ disabledToggles = Split(*values, ',');
+ for (auto& t : disabledToggles) {
+ forceDisabledToggles.emplace_back(t.c_str());
}
+ }
- desc.requiredFeaturesCount = requiredFeatures.size();
- desc.requiredFeatures = requiredFeatures.data();
- desc.requiredLimits = &limits;
-
- DawnTogglesDeviceDescriptor togglesDesc = {};
- desc.nextInChain = &togglesDesc;
- togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
- togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
- togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
- togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
-
- auto wgpu_device = adapter_.CreateDevice(&desc);
- if (wgpu_device) {
- promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
- } else {
- promise.Reject(binding::Errors::OperationError(env, "failed to create device"));
- }
- return promise;
+ desc.requiredFeaturesCount = requiredFeatures.size();
+ desc.requiredFeatures = requiredFeatures.data();
+ desc.requiredLimits = &limits;
+
+ DawnTogglesDeviceDescriptor togglesDesc = {};
+ desc.nextInChain = &togglesDesc;
+ togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+ togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+ togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+ togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+
+ auto wgpu_device = adapter_.CreateDevice(&desc);
+ if (wgpu_device) {
+ promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
+ } else {
+ promise.Reject(binding::Errors::OperationError(env, "failed to create device"));
}
+ return promise;
+}
+
+interop::Promise<interop::Interface<interop::GPUAdapterInfo>> GPUAdapter::requestAdapterInfo(
+ Napi::Env,
+ std::vector<std::string> unmaskHints) {
+ UNIMPLEMENTED();
+}
+
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h
index 4a071399c9f..60b8436ab06 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h
@@ -15,33 +15,37 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUADAPTER_H_
#define SRC_DAWN_NODE_BINDING_GPUADAPTER_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- class Flags;
-
- // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
- class GPUAdapter final : public interop::GPUAdapter {
- public:
- GPUAdapter(dawn::native::Adapter a, const Flags& flags);
-
- // interop::GPUAdapter interface compliance
- std::string getName(Napi::Env) override;
- interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
- interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
- bool getIsFallbackAdapter(Napi::Env) override;
- interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
- Napi::Env env,
- interop::GPUDeviceDescriptor descriptor) override;
-
- private:
- dawn::native::Adapter adapter_;
- const Flags& flags_;
- };
+class Flags;
+
+// GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
+class GPUAdapter final : public interop::GPUAdapter {
+ public:
+ GPUAdapter(dawn::native::Adapter a, const Flags& flags);
+
+ // interop::GPUAdapter interface compliance
+ interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUAdapterInfo>> requestAdapterInfo(
+ Napi::Env,
+ std::vector<std::string> unmaskHints) override;
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ bool getIsFallbackAdapter(Napi::Env) override;
+
+ private:
+ dawn::native::Adapter adapter_;
+ const Flags& flags_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp
index 1170cefa6f5..eef344fd8c2 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp
@@ -14,23 +14,23 @@
#include "src/dawn/node/binding/GPUBindGroup.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBindGroup
- ////////////////////////////////////////////////////////////////////////////////
- GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBindGroup
+////////////////////////////////////////////////////////////////////////////////
+GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {}
- std::variant<std::string, interop::UndefinedType> GPUBindGroup::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUBindGroup::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUBindGroup::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUBindGroup::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h
index 5a2e93ec83a..92d9ed7ed1f 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h
@@ -15,31 +15,30 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUBINDGROUP_H_
#define SRC_DAWN_NODE_BINDING_GPUBINDGROUP_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
- class GPUBindGroup final : public interop::GPUBindGroup {
- public:
- GPUBindGroup(wgpu::BindGroup group);
+// GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
+class GPUBindGroup final : public interop::GPUBindGroup {
+ public:
+ explicit GPUBindGroup(wgpu::BindGroup group);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::BindGroup&() const {
- return group_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroup&() const { return group_; }
- // interop::GPUBindGroup interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUBindGroup interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::BindGroup group_;
- };
+ private:
+ wgpu::BindGroup group_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp
index 56421503853..95387371375 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp
@@ -14,24 +14,23 @@
#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBindGroupLayout
- ////////////////////////////////////////////////////////////////////////////////
- GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
- : layout_(std::move(layout)) {
- }
-
- std::variant<std::string, interop::UndefinedType> GPUBindGroupLayout::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUBindGroupLayout::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBindGroupLayout
+////////////////////////////////////////////////////////////////////////////////
+GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout) : layout_(std::move(layout)) {}
+
+std::string GPUBindGroupLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUBindGroupLayout::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h
index 02cb0099c82..919acd1acc3 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h
@@ -15,32 +15,31 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
#define SRC_DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
- // wgpu::BindGroupLayout.
- class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
- public:
- GPUBindGroupLayout(wgpu::BindGroupLayout layout);
+// GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
+// wgpu::BindGroupLayout.
+class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
+ public:
+ explicit GPUBindGroupLayout(wgpu::BindGroupLayout layout);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::BindGroupLayout&() const {
- return layout_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroupLayout&() const { return layout_; }
- // interop::GPUBindGroupLayout interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUBindGroupLayout interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::BindGroupLayout layout_;
- };
+ private:
+ wgpu::BindGroupLayout layout_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp
index 9224d944378..afe0735f68c 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp
@@ -15,6 +15,7 @@
#include "src/dawn/node/binding/GPUBuffer.h"
#include <memory>
+#include <utility>
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/Errors.h"
@@ -22,157 +23,166 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBuffer
- // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
- // robustly passing, pull out validation and see what / if breaks.
- ////////////////////////////////////////////////////////////////////////////////
- GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
- wgpu::BufferDescriptor desc,
- wgpu::Device device,
- std::shared_ptr<AsyncRunner> async)
- : buffer_(std::move(buffer)),
- desc_(desc),
- device_(std::move(device)),
- async_(std::move(async)) {
- if (desc.mappedAtCreation) {
- state_ = State::MappedAtCreation;
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBuffer
+// TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
+// robustly passing, pull out validation and see what / if breaks.
+////////////////////////////////////////////////////////////////////////////////
+GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async)
+ : buffer_(std::move(buffer)),
+ desc_(desc),
+ device_(std::move(device)),
+ async_(std::move(async)) {
+ if (desc.mappedAtCreation) {
+ state_ = State::MappedAtCreation;
+ }
+}
+
+interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ wgpu::MapMode md{};
+ Converter conv(env);
+ if (!conv(md, mode)) {
+ interop::Promise<void> promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ return promise;
}
- interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
- interop::GPUMapModeFlags mode,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- wgpu::MapMode md{};
- Converter conv(env);
- if (!conv(md, mode)) {
- interop::Promise<void> promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- if (state_ != State::Unmapped) {
- interop::Promise<void> promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- device_.InjectError(wgpu::ErrorType::Validation,
- "mapAsync called on buffer that is not in the unmapped state");
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- interop::Promise<void> promise;
- AsyncTask task;
- State& state;
- };
- auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_, state_};
- auto promise = ctx->promise;
-
- uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
-
- state_ = State::MappingPending;
-
- buffer_.MapAsync(
- md, offset, s,
- [](WGPUBufferMapAsyncStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- c->state = State::Unmapped;
- switch (status) {
- case WGPUBufferMapAsyncStatus_Force32:
- UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
- break;
- case WGPUBufferMapAsyncStatus_Success:
- c->promise.Resolve();
- c->state = State::Mapped;
- break;
- case WGPUBufferMapAsyncStatus_Error:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
- case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
- c->promise.Reject(Errors::AbortError(c->env));
- break;
- case WGPUBufferMapAsyncStatus_Unknown:
- case WGPUBufferMapAsyncStatus_DeviceLost:
- // TODO: The spec is a bit vague around what the promise should do
- // here.
- c->promise.Reject(Errors::UnknownError(c->env));
- break;
- }
- },
- ctx);
-
+ if (state_ != State::Unmapped) {
+ interop::Promise<void> promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ device_.InjectError(wgpu::ErrorType::Validation,
+ "mapAsync called on buffer that is not in the unmapped state");
return promise;
}
- interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ State& state;
+ };
+ auto ctx =
+ new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_), state_};
+ auto promise = ctx->promise;
+
+ uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+
+ state_ = State::MappingPending;
+
+ buffer_.MapAsync(
+ md, offset, s,
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ c->state = State::Unmapped;
+ switch (status) {
+ case WGPUBufferMapAsyncStatus_Force32:
+ UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+ break;
+ case WGPUBufferMapAsyncStatus_Success:
+ c->promise.Resolve();
+ c->state = State::Mapped;
+ break;
+ case WGPUBufferMapAsyncStatus_Error:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
+ case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
+ c->promise.Reject(Errors::AbortError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_Unknown:
+ case WGPUBufferMapAsyncStatus_DeviceLost:
+ // TODO(dawn:1123): The spec is a bit vague around what the promise should
+ // do here.
+ c->promise.Reject(Errors::UnknownError(c->env));
+ break;
+ }
+ },
+ ctx);
- uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+ return promise;
+}
- uint64_t start = offset;
- uint64_t end = offset + s;
- for (auto& mapping : mapped_) {
- if (mapping.Intersects(start, end)) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
- }
+interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+
+ uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
- auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
- ? buffer_.GetMappedRange(offset, s)
- : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
- if (!ptr) {
+ uint64_t start = offset;
+ uint64_t end = offset + s;
+ for (auto& mapping : mapped_) {
+ if (mapping.Intersects(start, end)) {
Errors::OperationError(env).ThrowAsJavaScriptException();
return {};
}
- auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
- // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
- mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
- return array_buffer;
}
- void GPUBuffer::unmap(Napi::Env env) {
- buffer_.Unmap();
+ auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
+ ? buffer_.GetMappedRange(offset, s)
+ : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
+ if (!ptr) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+ auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
+ // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
+ mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
+ return array_buffer;
+}
+
+void GPUBuffer::unmap(Napi::Env env) {
+ buffer_.Unmap();
+
+ if (state_ != State::Destroyed && state_ != State::Unmapped) {
+ DetachMappings();
+ state_ = State::Unmapped;
+ }
+}
- if (state_ != State::Destroyed && state_ != State::Unmapped) {
- DetachMappings();
- state_ = State::Unmapped;
- }
+void GPUBuffer::destroy(Napi::Env) {
+ if (state_ == State::Destroyed) {
+ return;
}
- void GPUBuffer::destroy(Napi::Env) {
- if (state_ == State::Destroyed) {
- return;
- }
+ if (state_ != State::Unmapped) {
+ DetachMappings();
+ }
- if (state_ != State::Unmapped) {
- DetachMappings();
- }
+ buffer_.Destroy();
+ state_ = State::Destroyed;
+}
- buffer_.Destroy();
- state_ = State::Destroyed;
- }
+interop::GPUSize64 GPUBuffer::getSize(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUBuffer::DetachMappings() {
- for (auto& mapping : mapped_) {
- mapping.buffer.Value().Detach();
- }
- mapped_.clear();
- }
+interop::GPUBufferUsageFlags GPUBuffer::getUsage(Napi::Env) {
+ UNIMPLEMENTED();
+}
- std::variant<std::string, interop::UndefinedType> GPUBuffer::getLabel(Napi::Env) {
- UNIMPLEMENTED();
+void GPUBuffer::DetachMappings() {
+ for (auto& mapping : mapped_) {
+ mapping.buffer.Value().Detach();
}
+ mapped_.clear();
+}
- void GPUBuffer::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+std::string GPUBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUBuffer::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h
index 953c7c158d9..250f41b7f55 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h
@@ -15,75 +15,74 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUBUFFER_H_
#define SRC_DAWN_NODE_BINDING_GPUBUFFER_H_
+#include <memory>
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/binding/AsyncRunner.h"
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
- class GPUBuffer final : public interop::GPUBuffer {
- public:
- GPUBuffer(wgpu::Buffer buffer,
- wgpu::BufferDescriptor desc,
- wgpu::Device device,
- std::shared_ptr<AsyncRunner> async);
+// GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
+class GPUBuffer final : public interop::GPUBuffer {
+ public:
+ GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async);
- // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
- const wgpu::BufferDescriptor& Desc() const {
- return desc_;
- }
+ // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
+ const wgpu::BufferDescriptor& Desc() const { return desc_; }
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Buffer&() const {
- return buffer_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Buffer&() const { return buffer_; }
- // interop::GPUBuffer interface compliance
- interop::Promise<void> mapAsync(Napi::Env env,
- interop::GPUMapModeFlags mode,
+ // interop::GPUBuffer interface compliance
+ interop::Promise<void> mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ interop::ArrayBuffer getMappedRange(Napi::Env env,
interop::GPUSize64 offset,
std::optional<interop::GPUSize64> size) override;
- interop::ArrayBuffer getMappedRange(Napi::Env env,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void unmap(Napi::Env) override;
- void destroy(Napi::Env) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- void DetachMappings();
+ void unmap(Napi::Env) override;
+ void destroy(Napi::Env) override;
+ interop::GPUSize64 getSize(Napi::Env) override;
+ interop::GPUBufferUsageFlags getUsage(Napi::Env) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- struct Mapping {
- uint64_t start;
- uint64_t end;
- inline bool Intersects(uint64_t s, uint64_t e) const {
- return s < end && e > start;
- }
- Napi::Reference<interop::ArrayBuffer> buffer;
- };
+ private:
+ void DetachMappings();
- // https://www.w3.org/TR/webgpu/#buffer-interface
- enum class State {
- Unmapped,
- Mapped,
- MappedAtCreation,
- MappingPending,
- Destroyed,
- };
+ struct Mapping {
+ uint64_t start;
+ uint64_t end;
+ inline bool Intersects(uint64_t s, uint64_t e) const { return s < end && e > start; }
+ Napi::Reference<interop::ArrayBuffer> buffer;
+ };
- wgpu::Buffer buffer_;
- wgpu::BufferDescriptor const desc_;
- wgpu::Device const device_;
- std::shared_ptr<AsyncRunner> async_;
- State state_ = State::Unmapped;
- std::vector<Mapping> mapped_;
+ // https://www.w3.org/TR/webgpu/#buffer-interface
+ enum class State {
+ Unmapped,
+ Mapped,
+ MappedAtCreation,
+ MappingPending,
+ Destroyed,
};
+ wgpu::Buffer buffer_;
+ wgpu::BufferDescriptor const desc_;
+ wgpu::Device const device_;
+ std::shared_ptr<AsyncRunner> async_;
+ State state_ = State::Unmapped;
+ std::vector<Mapping> mapped_;
+};
+
} // namespace wgpu::binding
#endif // SRC_DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp
index ac86c1dccd2..4e7794edff8 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp
@@ -14,24 +14,24 @@
#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUCommandBuffer
- ////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
- GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
- }
+GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {}
- std::variant<std::string, interop::UndefinedType> GPUCommandBuffer::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUCommandBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUCommandBuffer::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUCommandBuffer::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h
index 5507ae0b5a7..b8ab08e0b0c 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h
@@ -15,32 +15,31 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
#define SRC_DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
- // wgpu::CommandBuffer.
- class GPUCommandBuffer final : public interop::GPUCommandBuffer {
- public:
- GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
+// GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
+// wgpu::CommandBuffer.
+class GPUCommandBuffer final : public interop::GPUCommandBuffer {
+ public:
+ explicit GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::CommandBuffer&() const {
- return cmd_buf_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::CommandBuffer&() const { return cmd_buf_; }
- // interop::GPUCommandBuffer interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUCommandBuffer interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::CommandBuffer cmd_buf_;
- };
+ private:
+ wgpu::CommandBuffer cmd_buf_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp
index b707966280c..99ea8e07d74 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPUCommandEncoder.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPU.h"
#include "src/dawn/node/binding/GPUBuffer.h"
@@ -26,191 +28,189 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUCommandEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
- }
-
- interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
- Napi::Env env,
- interop::GPURenderPassDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::RenderPassDescriptor desc{};
- // TODO(dawn:1250) handle timestampWrites
- if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
- !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
- !conv(desc.label, descriptor.label) ||
- !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
- return {};
- }
-
- return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
- env, enc_.BeginRenderPass(&desc));
- }
-
- interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
- Napi::Env env,
- interop::GPUComputePassDescriptor descriptor) {
- wgpu::ComputePassDescriptor desc{};
- // TODO(dawn:1250) handle timestampWrites
- return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
- env, enc_.BeginComputePass(&desc));
- }
-
- void GPUCommandEncoder::clearBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || //
- !conv(s, size)) {
- return;
- }
-
- enc_.ClearBuffer(b, offset, s);
- }
-
- void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> source,
- interop::GPUSize64 sourceOffset,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset,
- interop::GPUSize64 size) {
- Converter conv(env);
-
- wgpu::Buffer src{};
- wgpu::Buffer dst{};
- if (!conv(src, source) || //
- !conv(dst, destination)) {
- return;
- }
-
- enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
- }
-
- void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
- interop::GPUImageCopyBuffer source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyBuffer src{};
- wgpu::ImageCopyTexture dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyBufferToTexture(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyBuffer destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyTexture src{};
- wgpu::ImageCopyBuffer dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyTextureToBuffer(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyTexture src{};
- wgpu::ImageCopyTexture dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyTextureToTexture(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPUCommandEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPUCommandEncoder::writeTimestamp(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- if (!conv(q, querySet)) {
- return;
- }
-
- enc_.WriteTimestamp(q, queryIndex);
- }
-
- void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 firstQuery,
- interop::GPUSize32 queryCount,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- uint32_t f = 0;
- uint32_t c = 0;
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(q, querySet) || //
- !conv(f, firstQuery) || //
- !conv(c, queryCount) || //
- !conv(b, destination) || //
- !conv(o, destinationOffset)) {
- return;
- }
-
- enc_.ResolveQuerySet(q, f, c, b, o);
- }
-
- interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
- Napi::Env env,
- interop::GPUCommandBufferDescriptor descriptor) {
- wgpu::CommandBufferDescriptor desc{};
- return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
- }
-
- std::variant<std::string, interop::UndefinedType> GPUCommandEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUCommandEncoder::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUCommandEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {}
+
+interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
+ Napi::Env env,
+ interop::GPURenderPassDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPassDescriptor desc{};
+ // TODO(dawn:1250) handle timestampWrites
+ if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
+ !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
+ !conv(desc.label, descriptor.label) ||
+ !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
+ return {};
+ }
+
+ return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(env,
+ enc_.BeginRenderPass(&desc));
+}
+
+interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
+ Napi::Env env,
+ interop::GPUComputePassDescriptor descriptor) {
+ wgpu::ComputePassDescriptor desc{};
+ // TODO(dawn:1250) handle timestampWrites
+ return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
+ env, enc_.BeginComputePass(&desc));
+}
+
+void GPUCommandEncoder::clearBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(s, size)) {
+ return;
+ }
+
+ enc_.ClearBuffer(b, offset, s);
+}
+
+void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) {
+ Converter conv(env);
+
+ wgpu::Buffer src{};
+ wgpu::Buffer dst{};
+ if (!conv(src, source) || //
+ !conv(dst, destination)) {
+ return;
+ }
+
+ enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+}
+
+void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyBuffer src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyBufferToTexture(&src, &dst, &size);
+}
+
+void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyBuffer dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToBuffer(&src, &dst, &size);
+}
+
+void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToTexture(&src, &dst, &size);
+}
+
+void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+}
+
+void GPUCommandEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+}
+
+void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPUCommandEncoder::writeTimestamp(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ if (!conv(q, querySet)) {
+ return;
+ }
+
+ enc_.WriteTimestamp(q, queryIndex);
+}
+
+void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ uint32_t f = 0;
+ uint32_t c = 0;
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(q, querySet) || //
+ !conv(f, firstQuery) || //
+ !conv(c, queryCount) || //
+ !conv(b, destination) || //
+ !conv(o, destinationOffset)) {
+ return;
+ }
+
+ enc_.ResolveQuerySet(q, f, c, b, o);
+}
+
+interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) {
+ wgpu::CommandBufferDescriptor desc{};
+ return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
+}
+
+std::string GPUCommandEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUCommandEncoder::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h
index f425d469c1d..a3878c62bc7 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h
@@ -15,70 +15,71 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
#define SRC_DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
- // wgpu::CommandEncoder.
- class GPUCommandEncoder final : public interop::GPUCommandEncoder {
- public:
- GPUCommandEncoder(wgpu::CommandEncoder enc);
+// GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
+// wgpu::CommandEncoder.
+class GPUCommandEncoder final : public interop::GPUCommandEncoder {
+ public:
+ explicit GPUCommandEncoder(wgpu::CommandEncoder enc);
- // interop::GPUCommandEncoder interface compliance
- interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
- Napi::Env,
- interop::GPURenderPassDescriptor descriptor) override;
- interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
- Napi::Env,
- interop::GPUComputePassDescriptor descriptor) override;
- void clearBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void copyBufferToBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> source,
- interop::GPUSize64 sourceOffset,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset,
- interop::GPUSize64 size) override;
- void copyBufferToTexture(Napi::Env,
- interop::GPUImageCopyBuffer source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) override;
- void copyTextureToBuffer(Napi::Env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyBuffer destination,
- interop::GPUExtent3D copySize) override;
- void copyTextureToTexture(Napi::Env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void writeTimestamp(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void resolveQuerySet(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 firstQuery,
- interop::GPUSize32 queryCount,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset) override;
- interop::Interface<interop::GPUCommandBuffer> finish(
- Napi::Env env,
- interop::GPUCommandBufferDescriptor descriptor) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUCommandEncoder interface compliance
+ interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
+ Napi::Env,
+ interop::GPURenderPassDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
+ Napi::Env,
+ interop::GPUComputePassDescriptor descriptor) override;
+ void clearBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void copyBufferToBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) override;
+ void copyBufferToTexture(Napi::Env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToBuffer(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToTexture(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void writeTimestamp(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void resolveQuerySet(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) override;
+ interop::Interface<interop::GPUCommandBuffer> finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::CommandEncoder enc_;
- };
+ private:
+ wgpu::CommandEncoder enc_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp
index b08518ed72a..0fc30ae4695 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPUComputePassEncoder.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPUBindGroup.h"
#include "src/dawn/node/binding/GPUBuffer.h"
@@ -23,106 +25,100 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUComputePassEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
- : enc_(std::move(enc)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUComputePassEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc) : enc_(std::move(enc)) {}
+
+void GPUComputePassEncoder::setPipeline(Napi::Env,
+ interop::Interface<interop::GPUComputePipeline> pipeline) {
+ enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+}
+
+void GPUComputePassEncoder::dispatchWorkgroups(Napi::Env,
+ interop::GPUSize32 workgroupCountX,
+ interop::GPUSize32 workgroupCountY,
+ interop::GPUSize32 workgroupCountZ) {
+ enc_.DispatchWorkgroups(workgroupCountX, workgroupCountY, workgroupCountZ);
+}
+
+void GPUComputePassEncoder::dispatchWorkgroupsIndirect(
+ Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ enc_.DispatchWorkgroupsIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+}
+
+void GPUComputePassEncoder::end(Napi::Env) {
+ enc_.End();
+}
+
+void GPUComputePassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
}
- void GPUComputePassEncoder::setPipeline(
- Napi::Env,
- interop::Interface<interop::GPUComputePipeline> pipeline) {
- enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
- }
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
- void GPUComputePassEncoder::dispatch(Napi::Env,
- interop::GPUSize32 workgroupCountX,
- interop::GPUSize32 workgroupCountY,
- interop::GPUSize32 workgroupCountZ) {
- enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
- }
+void GPUComputePassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
- void GPUComputePassEncoder::dispatchIndirect(
- Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
}
- void GPUComputePassEncoder::end(Napi::Env) {
- enc_.End();
+ if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+ Napi::RangeError::New(env, "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+ .ThrowAsJavaScriptException();
+ return;
}
- void GPUComputePassEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ if (dynamicOffsetsDataLength > dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+ Napi::RangeError::New(env,
+ "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+ "bound of dynamicOffsetData")
+ .ThrowAsJavaScriptException();
+ return;
}
- void GPUComputePassEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
- Napi::RangeError::New(env,
- "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
- .ThrowAsJavaScriptException();
- return;
- }
-
- if (dynamicOffsetsDataLength >
- dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
- Napi::RangeError::New(env,
- "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
- "bound of dynamicOffsetData")
- .ThrowAsJavaScriptException();
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
- void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
+void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+}
- void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
+void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+}
- void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
+void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+}
- std::variant<std::string, interop::UndefinedType> GPUComputePassEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUComputePassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUComputePassEncoder::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUComputePassEncoder::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h
index 2e8cb1384f3..6f82a46115c 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h
@@ -15,55 +15,54 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
#define SRC_DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
- // wgpu::ComputePassEncoder.
- class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
- public:
- GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
+// GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
+// wgpu::ComputePassEncoder.
+class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
+ public:
+ explicit GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ComputePassEncoder&() const {
- return enc_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePassEncoder&() const { return enc_; }
- // interop::GPUComputePassEncoder interface compliance
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPUComputePipeline> pipeline) override;
- void dispatch(Napi::Env,
- interop::GPUSize32 workgroupCountX,
- interop::GPUSize32 workgroupCountY,
- interop::GPUSize32 workgroupCountZ) override;
- void dispatchIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void end(Napi::Env) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUComputePassEncoder interface compliance
+ void setPipeline(Napi::Env, interop::Interface<interop::GPUComputePipeline> pipeline) override;
+ void dispatchWorkgroups(Napi::Env,
+ interop::GPUSize32 workgroupCountX,
+ interop::GPUSize32 workgroupCountY,
+ interop::GPUSize32 workgroupCountZ) override;
+ void dispatchWorkgroupsIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void end(Napi::Env) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::ComputePassEncoder enc_;
- };
+ private:
+ wgpu::ComputePassEncoder enc_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp
index 9ae09241e65..c2bfedf1e71 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp
@@ -14,33 +14,33 @@
#include "src/dawn/node/binding/GPUComputePipeline.h"
+#include <utility>
+
#include "src/dawn/node/binding/GPUBindGroupLayout.h"
#include "src/dawn/node/binding/GPUBuffer.h"
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUComputePipeline
- ////////////////////////////////////////////////////////////////////////////////
- GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
- : pipeline_(std::move(pipeline)) {
- }
-
- interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
- Napi::Env env,
- uint32_t index) {
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, pipeline_.GetBindGroupLayout(index));
- }
-
- std::variant<std::string, interop::UndefinedType> GPUComputePipeline::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUComputePipeline::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUComputePipeline
+////////////////////////////////////////////////////////////////////////////////
+GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
+ : pipeline_(std::move(pipeline)) {}
+
+interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+}
+
+std::string GPUComputePipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUComputePipeline::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h
index df8ccfabf51..45af9f787c3 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h
@@ -15,34 +15,33 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUCOMPUTEPIPELINE_H_
#define SRC_DAWN_NODE_BINDING_GPUCOMPUTEPIPELINE_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
- // wgpu::ComputePipeline.
- class GPUComputePipeline final : public interop::GPUComputePipeline {
- public:
- GPUComputePipeline(wgpu::ComputePipeline pipeline);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ComputePipeline&() const {
- return pipeline_;
- }
-
- // interop::GPUComputePipeline interface compliance
- interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
- uint32_t index) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::ComputePipeline pipeline_;
- };
+// GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
+// wgpu::ComputePipeline.
+class GPUComputePipeline final : public interop::GPUComputePipeline {
+ public:
+ explicit GPUComputePipeline(wgpu::ComputePipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePipeline&() const { return pipeline_; }
+
+ // interop::GPUComputePipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::ComputePipeline pipeline_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp
index 05546948e5f..5239ce16e12 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp
@@ -15,6 +15,8 @@
#include "src/dawn/node/binding/GPUDevice.h"
#include <memory>
+#include <utility>
+#include <vector>
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/Errors.h"
@@ -37,494 +39,496 @@
namespace wgpu::binding {
- namespace {
+namespace {
- class DeviceLostInfo : public interop::GPUDeviceLostInfo {
- public:
- DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
- : reason_(reason), message_(message) {
- }
- std::variant<interop::GPUDeviceLostReason, interop::UndefinedType> getReason(
- Napi::Env env) override {
- return reason_;
- }
- std::string getMessage(Napi::Env) override {
- return message_;
- }
-
- private:
- interop::GPUDeviceLostReason reason_;
- std::string message_;
- };
-
- class OOMError : public interop::GPUOutOfMemoryError {};
- class ValidationError : public interop::GPUValidationError {
- public:
- ValidationError(std::string message) : message_(std::move(message)) {
- }
-
- std::string getMessage(Napi::Env) override {
- return message_;
- };
-
- private:
- std::string message_;
- };
-
- } // namespace
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUDevice
- ////////////////////////////////////////////////////////////////////////////////
- GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
- : env_(env),
- device_(device),
- async_(std::make_shared<AsyncRunner>(env, device)),
- lost_promise_(env, PROMISE_INFO) {
- device_.SetLoggingCallback(
- [](WGPULoggingType type, char const* message, void* userdata) {
- std::cout << type << ": " << message << std::endl;
- },
- nullptr);
- device_.SetUncapturedErrorCallback(
- [](WGPUErrorType type, char const* message, void* userdata) {
- std::cout << type << ": " << message << std::endl;
- },
- nullptr);
-
- device_.SetDeviceLostCallback(
- [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
- auto r = interop::GPUDeviceLostReason::kDestroyed;
- switch (reason) {
- case WGPUDeviceLostReason_Force32:
- UNREACHABLE("WGPUDeviceLostReason_Force32");
- break;
- case WGPUDeviceLostReason_Destroyed:
- case WGPUDeviceLostReason_Undefined:
- r = interop::GPUDeviceLostReason::kDestroyed;
- break;
- }
- auto* self = static_cast<GPUDevice*>(userdata);
- if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
- self->lost_promise_.Resolve(
- interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
- }
- },
- this);
+class DeviceLostInfo : public interop::GPUDeviceLostInfo {
+ public:
+ DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+ : reason_(reason), message_(message) {}
+ std::variant<interop::GPUDeviceLostReason, interop::UndefinedType> getReason(
+ Napi::Env env) override {
+ return reason_;
}
-
- GPUDevice::~GPUDevice() {
- }
-
- interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
- class Features : public interop::GPUSupportedFeatures {
- public:
- bool has(Napi::Env, std::string feature) override {
- UNIMPLEMENTED();
+ std::string getMessage(Napi::Env) override { return message_; }
+
+ private:
+ interop::GPUDeviceLostReason reason_;
+ std::string message_;
+};
+
+class OOMError : public interop::GPUOutOfMemoryError {
+ public:
+ explicit OOMError(std::string message) : message_(std::move(message)) {}
+
+ std::string getMessage(Napi::Env) override { return message_; };
+
+ private:
+ std::string message_;
+};
+
+class ValidationError : public interop::GPUValidationError {
+ public:
+ explicit ValidationError(std::string message) : message_(std::move(message)) {}
+
+ std::string getMessage(Napi::Env) override { return message_; };
+
+ private:
+ std::string message_;
+};
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUDevice
+////////////////////////////////////////////////////////////////////////////////
+GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
+ : env_(env),
+ device_(device),
+ async_(std::make_shared<AsyncRunner>(env, device)),
+ lost_promise_(env, PROMISE_INFO) {
+ device_.SetLoggingCallback(
+ [](WGPULoggingType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+ device_.SetUncapturedErrorCallback(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+
+ device_.SetDeviceLostCallback(
+ [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
+ auto r = interop::GPUDeviceLostReason::kDestroyed;
+ switch (reason) {
+ case WGPUDeviceLostReason_Force32:
+ UNREACHABLE("WGPUDeviceLostReason_Force32");
+ break;
+ case WGPUDeviceLostReason_Destroyed:
+ case WGPUDeviceLostReason_Undefined:
+ r = interop::GPUDeviceLostReason::kDestroyed;
+ break;
}
- std::vector<std::string> keys(Napi::Env) override {
- UNIMPLEMENTED();
+ auto* self = static_cast<GPUDevice*>(userdata);
+ if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
+ self->lost_promise_.Resolve(
+ interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
}
- };
- return interop::GPUSupportedFeatures::Create<Features>(env);
+ },
+ this);
+}
+
+GPUDevice::~GPUDevice() {}
+
+interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
+ class Features : public interop::GPUSupportedFeatures {
+ public:
+ bool has(Napi::Env, std::string feature) override { UNIMPLEMENTED(); }
+ std::vector<std::string> keys(Napi::Env) override { UNIMPLEMENTED(); }
+ };
+ return interop::GPUSupportedFeatures::Create<Features>(env);
+}
+
+interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
+ wgpu::SupportedLimits limits{};
+ if (!device_.GetLimits(&limits)) {
+ Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
}
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
+}
- interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
- wgpu::SupportedLimits limits{};
- if (!device_.GetLimits(&limits)) {
- Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
- }
- return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
- }
-
- interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
- return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
- }
+interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
+ return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+}
- void GPUDevice::destroy(Napi::Env env) {
- if (lost_promise_.GetState() == interop::PromiseState::Pending) {
- lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
- env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
- }
- device_.Destroy();
+void GPUDevice::destroy(Napi::Env env) {
+ if (lost_promise_.GetState() == interop::PromiseState::Pending) {
+ lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+ env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
}
-
- interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
- Napi::Env env,
- interop::GPUBufferDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::BufferDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
- !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
- return {};
- }
- return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
- device_, async_);
- }
-
- interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
- Napi::Env env,
- interop::GPUTextureDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::TextureDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) || //
- !conv(desc.size, descriptor.size) || //
- !conv(desc.dimension, descriptor.dimension) || //
- !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
- !conv(desc.sampleCount, descriptor.sampleCount) || //
- !conv(desc.format, descriptor.format)) {
- return {};
- }
- return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+ device_.Destroy();
+}
+
+interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BufferDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
+ !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
+ return {};
}
-
- interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
- Napi::Env env,
- interop::GPUSamplerDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::SamplerDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || //
- !conv(desc.addressModeU, descriptor.addressModeU) || //
- !conv(desc.addressModeV, descriptor.addressModeV) || //
- !conv(desc.addressModeW, descriptor.addressModeW) || //
- !conv(desc.magFilter, descriptor.magFilter) || //
- !conv(desc.minFilter, descriptor.minFilter) || //
- !conv(desc.mipmapFilter, descriptor.mipmapFilter) || //
- !conv(desc.lodMinClamp, descriptor.lodMinClamp) || //
- !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) || //
- !conv(desc.compare, descriptor.compare) || //
- !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
- return {};
- }
- return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+ return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc, device_,
+ async_);
+}
+
+interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
+ Napi::Env env,
+ interop::GPUTextureDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::TextureDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) || //
+ !conv(desc.size, descriptor.size) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.sampleCount, descriptor.sampleCount) || //
+ !conv(desc.format, descriptor.format) || //
+ !conv(desc.viewFormats, desc.viewFormatCount, descriptor.viewFormats)) {
+ return {};
}
-
- interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
- Napi::Env,
- interop::GPUExternalTextureDescriptor descriptor) {
- UNIMPLEMENTED();
+ return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+}
+
+interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
+ Napi::Env env,
+ interop::GPUSamplerDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::SamplerDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || //
+ !conv(desc.addressModeU, descriptor.addressModeU) || //
+ !conv(desc.addressModeV, descriptor.addressModeV) || //
+ !conv(desc.addressModeW, descriptor.addressModeW) || //
+ !conv(desc.magFilter, descriptor.magFilter) || //
+ !conv(desc.minFilter, descriptor.minFilter) || //
+ !conv(desc.mipmapFilter, descriptor.mipmapFilter) || //
+ !conv(desc.lodMinClamp, descriptor.lodMinClamp) || //
+ !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) || //
+ !conv(desc.compare, descriptor.compare) || //
+ !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
+ return {};
}
-
- interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
- Napi::Env env,
- interop::GPUBindGroupLayoutDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::BindGroupLayoutDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.entries, desc.entryCount, descriptor.entries)) {
- return {};
- }
-
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, device_.CreateBindGroupLayout(&desc));
+ return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+}
+
+interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) {
+ UNIMPLEMENTED();
+}
+
+interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
+ Napi::Env env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BindGroupLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
}
- interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
- Napi::Env env,
- interop::GPUPipelineLayoutDescriptor descriptor) {
- Converter conv(env);
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, device_.CreateBindGroupLayout(&desc));
+}
- wgpu::PipelineLayoutDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
- return {};
- }
+interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
+ Napi::Env env,
+ interop::GPUPipelineLayoutDescriptor descriptor) {
+ Converter conv(env);
- return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
- env, device_.CreatePipelineLayout(&desc));
+ wgpu::PipelineLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
+ return {};
}
- interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
- Napi::Env env,
- interop::GPUBindGroupDescriptor descriptor) {
- Converter conv(env);
+ return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
+ env, device_.CreatePipelineLayout(&desc));
+}
- wgpu::BindGroupDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
- !conv(desc.entries, desc.entryCount, descriptor.entries)) {
- return {};
- }
+interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
+ Napi::Env env,
+ interop::GPUBindGroupDescriptor descriptor) {
+ Converter conv(env);
- return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+ wgpu::BindGroupDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
}
- interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
- Napi::Env env,
- interop::GPUShaderModuleDescriptor descriptor) {
- Converter conv(env);
+ return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+}
- wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
- wgpu::ShaderModuleDescriptor sm_desc{};
- if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
- return {};
- }
- sm_desc.nextInChain = &wgsl_desc;
+interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
+ Napi::Env env,
+ interop::GPUShaderModuleDescriptor descriptor) {
+ Converter conv(env);
- return interop::GPUShaderModule::Create<GPUShaderModule>(
- env, device_.CreateShaderModule(&sm_desc), async_);
+ wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
+ wgpu::ShaderModuleDescriptor sm_desc{};
+ if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
+ return {};
}
+ sm_desc.nextInChain = &wgsl_desc;
- interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
- Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) {
- Converter conv(env);
+ return interop::GPUShaderModule::Create<GPUShaderModule>(
+ env, device_.CreateShaderModule(&sm_desc), async_);
+}
- wgpu::ComputePipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- return {};
- }
+interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
+ Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ Converter conv(env);
- return interop::GPUComputePipeline::Create<GPUComputePipeline>(
- env, device_.CreateComputePipeline(&desc));
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
}
- interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
- Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) {
- Converter conv(env);
+ return interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ env, device_.CreateComputePipeline(&desc));
+}
- wgpu::RenderPipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- return {};
- }
+interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ Converter conv(env);
- return interop::GPURenderPipeline::Create<GPURenderPipeline>(
- env, device_.CreateRenderPipeline(&desc));
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
}
- interop::Promise<interop::Interface<interop::GPUComputePipeline>>
- GPUDevice::createComputePipelineAsync(Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
-
- Converter conv(env);
-
- wgpu::ComputePipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- Promise promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- device_.CreateComputePipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
- char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
- c->env, pipeline));
- break;
- default:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- }
- },
- ctx);
+ return interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ env, device_.CreateRenderPipeline(&desc));
+}
- return promise;
- }
+interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+GPUDevice::createComputePipelineAsync(Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
- interop::Promise<interop::Interface<interop::GPURenderPipeline>>
- GPUDevice::createRenderPipelineAsync(Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
-
- Converter conv(env);
-
- wgpu::RenderPipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- Promise promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- device_.CreateRenderPipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
- char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
- c->env, pipeline));
- break;
- default:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- }
- },
- ctx);
+ Converter conv(env);
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ Promise promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
return promise;
}
- interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
- Napi::Env env,
- interop::GPUCommandEncoderDescriptor descriptor) {
- wgpu::CommandEncoderDescriptor desc{};
- return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
- env, device_.CreateCommandEncoder(&desc));
- }
-
- interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
- Napi::Env env,
- interop::GPURenderBundleEncoderDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::RenderBundleEncoderDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
- !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
- !conv(desc.sampleCount, descriptor.sampleCount) ||
- !conv(desc.depthReadOnly, descriptor.depthReadOnly) ||
- !conv(desc.stencilReadOnly, descriptor.stencilReadOnly)) {
- return {};
- }
-
- return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
- env, device_.CreateRenderBundleEncoder(&desc));
- }
-
- interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
- Napi::Env env,
- interop::GPUQuerySetDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::QuerySetDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
- !conv(desc.count, descriptor.count)) {
- return {};
- }
-
- return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
- }
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+ auto promise = ctx->promise;
+
+ device_.CreateComputePipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, char const* message,
+ void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(
+ interop::GPUComputePipeline::Create<GPUComputePipeline>(c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
- Napi::Env env) {
- return lost_promise_;
- }
+ return promise;
+}
- void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
- wgpu::ErrorFilter f;
- switch (filter) {
- case interop::GPUErrorFilter::kOutOfMemory:
- f = wgpu::ErrorFilter::OutOfMemory;
- break;
- case interop::GPUErrorFilter::kValidation:
- f = wgpu::ErrorFilter::Validation;
- break;
- default:
- Napi::Error::New(env, "unhandled GPUErrorFilter value")
- .ThrowAsJavaScriptException();
- return;
- }
- device_.PushErrorScope(f);
- }
+interop::Promise<interop::Interface<interop::GPURenderPipeline>>
+GPUDevice::createRenderPipelineAsync(Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
- interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
- using Promise = interop::Promise<std::optional<interop::GPUError>>;
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto* ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- device_.PopErrorScope(
- [](WGPUErrorType type, char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- auto env = c->env;
- switch (type) {
- case WGPUErrorType::WGPUErrorType_NoError:
- c->promise.Resolve({});
- break;
- case WGPUErrorType::WGPUErrorType_OutOfMemory:
- c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
- break;
- case WGPUErrorType::WGPUErrorType_Validation:
- c->promise.Resolve(
- interop::GPUValidationError::Create<ValidationError>(env, message));
- break;
- case WGPUErrorType::WGPUErrorType_Unknown:
- case WGPUErrorType::WGPUErrorType_DeviceLost:
- c->promise.Reject(Errors::OperationError(env, message));
- break;
- default:
- c->promise.Reject("unhandled error type");
- break;
- }
- },
- ctx);
+ Converter conv(env);
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ Promise promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
return promise;
}
- std::variant<std::string, interop::UndefinedType> GPUDevice::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUDevice::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
-
- interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
- // TODO(dawn:1348): Implement support for the "unhandlederror" event.
- UNIMPLEMENTED();
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+ auto promise = ctx->promise;
+
+ device_.CreateRenderPipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, char const* message,
+ void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(
+ interop::GPURenderPipeline::Create<GPURenderPipeline>(c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+}
+
+interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) {
+ wgpu::CommandEncoderDescriptor desc{};
+ return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
+ env, device_.CreateCommandEncoder(&desc));
+}
+
+interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
+ Napi::Env env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderBundleEncoderDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
+ !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
+ !conv(desc.sampleCount, descriptor.sampleCount) ||
+ !conv(desc.depthReadOnly, descriptor.depthReadOnly) ||
+ !conv(desc.stencilReadOnly, descriptor.stencilReadOnly)) {
+ return {};
}
- void GPUDevice::setOnuncapturederror(Napi::Env,
- interop::Interface<interop::EventHandler> value) {
- // TODO(dawn:1348): Implement support for the "unhandlederror" event.
- UNIMPLEMENTED();
- }
+ return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
+ env, device_.CreateRenderBundleEncoder(&desc));
+}
- void GPUDevice::addEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
- // TODO(dawn:1348): Implement support for the "unhandlederror" event.
- UNIMPLEMENTED();
- }
+interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
+ Napi::Env env,
+ interop::GPUQuerySetDescriptor descriptor) {
+ Converter conv(env);
- void GPUDevice::removeEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
- // TODO(dawn:1348): Implement support for the "unhandlederror" event.
- UNIMPLEMENTED();
+ wgpu::QuerySetDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
+ !conv(desc.count, descriptor.count)) {
+ return {};
}
- bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
- // TODO(dawn:1348): Implement support for the "unhandlederror" event.
- UNIMPLEMENTED();
+ return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+}
+
+interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(Napi::Env env) {
+ return lost_promise_;
+}
+
+void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
+ wgpu::ErrorFilter f;
+ switch (filter) {
+ case interop::GPUErrorFilter::kOutOfMemory:
+ f = wgpu::ErrorFilter::OutOfMemory;
+ break;
+ case interop::GPUErrorFilter::kValidation:
+ f = wgpu::ErrorFilter::Validation;
+ break;
+ default:
+ Napi::Error::New(env, "unhandled GPUErrorFilter value").ThrowAsJavaScriptException();
+ return;
}
+ device_.PushErrorScope(f);
+}
+
+interop::Promise<std::optional<interop::Interface<interop::GPUError>>> GPUDevice::popErrorScope(
+ Napi::Env env) {
+ using Promise = interop::Promise<std::optional<interop::Interface<interop::GPUError>>>;
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto* ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+ auto promise = ctx->promise;
+
+ device_.PopErrorScope(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ auto env = c->env;
+ switch (type) {
+ case WGPUErrorType::WGPUErrorType_NoError:
+ c->promise.Resolve({});
+ break;
+ case WGPUErrorType::WGPUErrorType_OutOfMemory: {
+ interop::Interface<interop::GPUError> err{
+ interop::GPUOutOfMemoryError::Create<OOMError>(env, message)};
+ c->promise.Resolve(err);
+ break;
+ }
+ case WGPUErrorType::WGPUErrorType_Validation: {
+ interop::Interface<interop::GPUError> err{
+ interop::GPUValidationError::Create<ValidationError>(env, message)};
+ c->promise.Resolve(err);
+ break;
+ }
+ case WGPUErrorType::WGPUErrorType_Unknown:
+ case WGPUErrorType::WGPUErrorType_DeviceLost:
+ c->promise.Reject(Errors::OperationError(env, message));
+ break;
+ default:
+ c->promise.Reject("unhandled error type");
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+}
+
+std::string GPUDevice::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUDevice::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
+
+interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
+ // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+ UNIMPLEMENTED();
+}
+
+void GPUDevice::setOnuncapturederror(Napi::Env, interop::Interface<interop::EventHandler> value) {
+ // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+ UNIMPLEMENTED();
+}
+
+void GPUDevice::addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
+ // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+ UNIMPLEMENTED();
+}
+
+void GPUDevice::removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
+ // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+ UNIMPLEMENTED();
+}
+
+bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
+ // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h
index 9dc249cb414..02a2ea80b13 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h
@@ -15,101 +15,103 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUDEVICE_H_
#define SRC_DAWN_NODE_BINDING_GPUDEVICE_H_
-#include "dawn/webgpu_cpp.h"
+#include <memory>
+#include <string>
+#include "dawn/webgpu_cpp.h"
#include "src/dawn/node/binding/AsyncRunner.h"
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
- class GPUDevice final : public interop::GPUDevice {
- public:
- GPUDevice(Napi::Env env, wgpu::Device device);
- ~GPUDevice();
+// GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
+class GPUDevice final : public interop::GPUDevice {
+ public:
+ GPUDevice(Napi::Env env, wgpu::Device device);
+ ~GPUDevice();
- // interop::GPUDevice interface compliance
- interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
- interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
- interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
- void destroy(Napi::Env) override;
- interop::Interface<interop::GPUBuffer> createBuffer(
- Napi::Env env,
- interop::GPUBufferDescriptor descriptor) override;
- interop::Interface<interop::GPUTexture> createTexture(
- Napi::Env,
- interop::GPUTextureDescriptor descriptor) override;
- interop::Interface<interop::GPUSampler> createSampler(
- Napi::Env,
- interop::GPUSamplerDescriptor descriptor) override;
- interop::Interface<interop::GPUExternalTexture> importExternalTexture(
- Napi::Env,
- interop::GPUExternalTextureDescriptor descriptor) override;
- interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
- Napi::Env,
- interop::GPUBindGroupLayoutDescriptor descriptor) override;
- interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
- Napi::Env,
- interop::GPUPipelineLayoutDescriptor descriptor) override;
- interop::Interface<interop::GPUBindGroup> createBindGroup(
- Napi::Env,
- interop::GPUBindGroupDescriptor descriptor) override;
- interop::Interface<interop::GPUShaderModule> createShaderModule(
- Napi::Env,
- interop::GPUShaderModuleDescriptor descriptor) override;
- interop::Interface<interop::GPUComputePipeline> createComputePipeline(
- Napi::Env,
- interop::GPUComputePipelineDescriptor descriptor) override;
- interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
- Napi::Env,
- interop::GPURenderPipelineDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPUComputePipeline>>
- createComputePipelineAsync(Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
- Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) override;
- interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
- Napi::Env env,
- interop::GPUCommandEncoderDescriptor descriptor) override;
- interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
- Napi::Env,
- interop::GPURenderBundleEncoderDescriptor descriptor) override;
- interop::Interface<interop::GPUQuerySet> createQuerySet(
- Napi::Env,
- interop::GPUQuerySetDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
- Napi::Env env) override;
- void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
- interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
- interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
- void setOnuncapturederror(Napi::Env,
- interop::Interface<interop::EventHandler> value) override;
- void addEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
- void removeEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
- bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
+ // interop::GPUDevice interface compliance
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
+ void destroy(Napi::Env) override;
+ interop::Interface<interop::GPUBuffer> createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) override;
+ interop::Interface<interop::GPUTexture> createTexture(
+ Napi::Env,
+ interop::GPUTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUSampler> createSampler(
+ Napi::Env,
+ interop::GPUSamplerDescriptor descriptor) override;
+ interop::Interface<interop::GPUExternalTexture> importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
+ Napi::Env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
+ Napi::Env,
+ interop::GPUPipelineLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroup> createBindGroup(
+ Napi::Env,
+ interop::GPUBindGroupDescriptor descriptor) override;
+ interop::Interface<interop::GPUShaderModule> createShaderModule(
+ Napi::Env,
+ interop::GPUShaderModuleDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePipeline> createComputePipeline(
+ Napi::Env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
+ Napi::Env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUComputePipeline>> createComputePipelineAsync(
+ Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
+ Napi::Env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPUQuerySet> createQuerySet(
+ Napi::Env,
+ interop::GPUQuerySetDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
+ Napi::Env env) override;
+ void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
+ interop::Promise<std::optional<interop::Interface<interop::GPUError>>> popErrorScope(
+ Napi::Env env) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+ interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
+ void setOnuncapturederror(Napi::Env, interop::Interface<interop::EventHandler> value) override;
+ void addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
+ void removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
+ bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
- private:
- void QueueTick();
+ private:
+ void QueueTick();
- Napi::Env env_;
- wgpu::Device device_;
- std::shared_ptr<AsyncRunner> async_;
+ Napi::Env env_;
+ wgpu::Device device_;
+ std::shared_ptr<AsyncRunner> async_;
- // This promise's JS object lives as long as the device because it is stored in .lost
- // of the wrapper JS object.
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise_;
- };
+ // This promise's JS object lives as long as the device because it is stored in .lost
+ // of the wrapper JS object.
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp
index 4e0b5a9ef28..e6f1c1a19bc 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp
@@ -14,23 +14,23 @@
#include "src/dawn/node/binding/GPUPipelineLayout.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUPipelineLayout
- ////////////////////////////////////////////////////////////////////////////////
- GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUPipelineLayout
+////////////////////////////////////////////////////////////////////////////////
+GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {}
- std::variant<std::string, interop::UndefinedType> GPUPipelineLayout::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUPipelineLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUPipelineLayout::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUPipelineLayout::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h
index f6cfa140594..f432e7afa6b 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h
@@ -15,32 +15,31 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
#define SRC_DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
- // wgpu::PipelineLayout.
- class GPUPipelineLayout final : public interop::GPUPipelineLayout {
- public:
- GPUPipelineLayout(wgpu::PipelineLayout layout);
+// GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
+// wgpu::PipelineLayout.
+class GPUPipelineLayout final : public interop::GPUPipelineLayout {
+ public:
+ explicit GPUPipelineLayout(wgpu::PipelineLayout layout);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::PipelineLayout&() const {
- return layout_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::PipelineLayout&() const { return layout_; }
- // interop::GPUPipelineLayout interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUPipelineLayout interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::PipelineLayout layout_;
- };
+ private:
+ wgpu::PipelineLayout layout_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp
index e9f0e3f9796..08a8a4700bc 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp
@@ -14,26 +14,35 @@
#include "src/dawn/node/binding/GPUQuerySet.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUQuerySet
- ////////////////////////////////////////////////////////////////////////////////
- GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUQuerySet
+////////////////////////////////////////////////////////////////////////////////
+GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {}
+
+void GPUQuerySet::destroy(Napi::Env) {
+ query_set_.Destroy();
+}
+
+interop::GPUQueryType GPUQuerySet::getType(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUQuerySet::destroy(Napi::Env) {
- query_set_.Destroy();
- }
+interop::GPUSize32 GPUQuerySet::getCount(Napi::Env) {
+ UNIMPLEMENTED();
+}
- std::variant<std::string, interop::UndefinedType> GPUQuerySet::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUQuerySet::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUQuerySet::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUQuerySet::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h
index ea01cf3f1f0..d2bced17fa1 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h
@@ -15,32 +15,33 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUQUERYSET_H_
#define SRC_DAWN_NODE_BINDING_GPUQUERYSET_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
- class GPUQuerySet final : public interop::GPUQuerySet {
- public:
- GPUQuerySet(wgpu::QuerySet query_set);
+// GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
+class GPUQuerySet final : public interop::GPUQuerySet {
+ public:
+ explicit GPUQuerySet(wgpu::QuerySet query_set);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::QuerySet&() const {
- return query_set_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::QuerySet&() const { return query_set_; }
- // interop::GPUQuerySet interface compliance
- void destroy(Napi::Env) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUQuerySet interface compliance
+ void destroy(Napi::Env) override;
+ interop::GPUQueryType getType(Napi::Env) override;
+ interop::GPUSize32 getCount(Napi::Env) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::QuerySet query_set_;
- };
+ private:
+ wgpu::QuerySet query_set_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp
index 0e3a0beec75..d2ab9126f52 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp
@@ -17,6 +17,7 @@
#include <cassert>
#include <limits>
#include <memory>
+#include <utility>
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPUBuffer.h"
@@ -25,137 +26,134 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUQueue
- ////////////////////////////////////////////////////////////////////////////////
- GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
- : queue_(std::move(queue)), async_(std::move(async)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUQueue
+////////////////////////////////////////////////////////////////////////////////
+GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
+ : queue_(std::move(queue)), async_(std::move(async)) {}
+
+void GPUQueue::submit(Napi::Env env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
+ std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
+ for (size_t i = 0; i < commandBuffers.size(); i++) {
+ bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
}
-
- void GPUQueue::submit(
- Napi::Env env,
- std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
- std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
- for (size_t i = 0; i < commandBuffers.size(); i++) {
- bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
- }
- Converter conv(env);
- uint32_t bufs_size;
- if (!conv(bufs_size, bufs.size())) {
- return;
- }
- queue_.Submit(bufs_size, bufs.data());
+ Converter conv(env);
+ uint32_t bufs_size;
+ if (!conv(bufs_size, bufs.size())) {
+ return;
}
-
- interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
- struct Context {
- Napi::Env env;
- interop::Promise<void> promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- queue_.OnSubmittedWorkDone(
- 0,
- [](WGPUQueueWorkDoneStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
- Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
- .ThrowAsJavaScriptException();
- }
- c->promise.Resolve();
- },
- ctx);
-
- return promise;
- }
-
- void GPUQueue::writeBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 bufferOffset,
- interop::BufferSource data,
- interop::GPUSize64 dataOffsetElements,
- std::optional<interop::GPUSize64> sizeElements) {
- wgpu::Buffer buf = *buffer.As<GPUBuffer>();
- Converter::BufferSource src{};
- Converter conv(env);
- if (!conv(src, data)) {
- return;
- }
-
- // Note that in the JS semantics of WebGPU, writeBuffer works in number of elements of the
- // typed arrays.
- if (dataOffsetElements > uint64_t(src.size / src.bytesPerElement)) {
- binding::Errors::OperationError(env, "dataOffset is larger than data's size.")
- .ThrowAsJavaScriptException();
- return;
- }
- uint64_t dataOffset = dataOffsetElements * src.bytesPerElement;
- src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
- src.size -= dataOffset;
-
- // Size defaults to dataSize - dataOffset. Instead of computing in elements, we directly
- // use it in bytes, and convert the provided value, if any, in bytes.
- uint64_t size64 = uint64_t(src.size);
- if (sizeElements.has_value()) {
- if (sizeElements.value() > std::numeric_limits<uint64_t>::max() / src.bytesPerElement) {
- binding::Errors::OperationError(env, "size overflows.")
+ queue_.Submit(bufs_size, bufs.data());
+}
+
+interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_)};
+ auto promise = ctx->promise;
+
+ queue_.OnSubmittedWorkDone(
+ 0,
+ [](WGPUQueueWorkDoneStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
+ Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
.ThrowAsJavaScriptException();
- return;
}
- size64 = sizeElements.value() * src.bytesPerElement;
- }
-
- if (size64 > uint64_t(src.size)) {
- binding::Errors::OperationError(env, "size + dataOffset is larger than data's size.")
- .ThrowAsJavaScriptException();
- return;
- }
-
- if (size64 % 4 != 0) {
- binding::Errors::OperationError(env, "size is not a multiple of 4 bytes.")
- .ThrowAsJavaScriptException();
- return;
- }
-
- assert(size64 <= std::numeric_limits<size_t>::max());
- queue_.WriteBuffer(buf, bufferOffset, src.data, static_cast<size_t>(size64));
+ c->promise.Resolve();
+ },
+ ctx);
+
+ return promise;
+}
+
+void GPUQueue::writeBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffsetElements,
+ std::optional<interop::GPUSize64> sizeElements) {
+ wgpu::Buffer buf = *buffer.As<GPUBuffer>();
+ Converter::BufferSource src{};
+ Converter conv(env);
+ if (!conv(src, data)) {
+ return;
}
- void GPUQueue::writeTexture(Napi::Env env,
- interop::GPUImageCopyTexture destination,
- interop::BufferSource data,
- interop::GPUImageDataLayout dataLayout,
- interop::GPUExtent3D size) {
- wgpu::ImageCopyTexture dst{};
- Converter::BufferSource src{};
- wgpu::TextureDataLayout layout{};
- wgpu::Extent3D sz{};
- Converter conv(env);
- if (!conv(dst, destination) || //
- !conv(src, data) || //
- !conv(layout, dataLayout) || //
- !conv(sz, size)) {
+ // Note that in the JS semantics of WebGPU, writeBuffer works in number of elements of the
+ // typed arrays.
+ if (dataOffsetElements > uint64_t(src.size / src.bytesPerElement)) {
+ binding::Errors::OperationError(env, "dataOffset is larger than data's size.")
+ .ThrowAsJavaScriptException();
+ return;
+ }
+ uint64_t dataOffset = dataOffsetElements * src.bytesPerElement;
+ src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
+ src.size -= dataOffset;
+
+ // Size defaults to dataSize - dataOffset. Instead of computing in elements, we directly
+ // use it in bytes, and convert the provided value, if any, in bytes.
+ uint64_t size64 = uint64_t(src.size);
+ if (sizeElements.has_value()) {
+ if (sizeElements.value() > std::numeric_limits<uint64_t>::max() / src.bytesPerElement) {
+ binding::Errors::OperationError(env, "size overflows.").ThrowAsJavaScriptException();
return;
}
-
- queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+ size64 = sizeElements.value() * src.bytesPerElement;
}
- void GPUQueue::copyExternalImageToTexture(Napi::Env,
- interop::GPUImageCopyExternalImage source,
- interop::GPUImageCopyTextureTagged destination,
- interop::GPUExtent3D copySize) {
- UNIMPLEMENTED();
+ if (size64 > uint64_t(src.size)) {
+ binding::Errors::OperationError(env, "size + dataOffset is larger than data's size.")
+ .ThrowAsJavaScriptException();
+ return;
}
- std::variant<std::string, interop::UndefinedType> GPUQueue::getLabel(Napi::Env) {
- UNIMPLEMENTED();
+ if (size64 % 4 != 0) {
+ binding::Errors::OperationError(env, "size is not a multiple of 4 bytes.")
+ .ThrowAsJavaScriptException();
+ return;
}
- void GPUQueue::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
+ assert(size64 <= std::numeric_limits<size_t>::max());
+ queue_.WriteBuffer(buf, bufferOffset, src.data, static_cast<size_t>(size64));
+}
+
+void GPUQueue::writeTexture(Napi::Env env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) {
+ wgpu::ImageCopyTexture dst{};
+ Converter::BufferSource src{};
+ wgpu::TextureDataLayout layout{};
+ wgpu::Extent3D sz{};
+ Converter conv(env);
+ if (!conv(dst, destination) || //
+ !conv(src, data) || //
+ !conv(layout, dataLayout) || //
+ !conv(sz, size)) {
+ return;
}
+ queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+}
+
+void GPUQueue::copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) {
+ UNIMPLEMENTED();
+}
+
+std::string GPUQueue::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUQueue::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
+
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h
index 3710405f7a0..58e52091095 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h
@@ -15,47 +15,49 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUQUEUE_H_
#define SRC_DAWN_NODE_BINDING_GPUQUEUE_H_
+#include <memory>
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/binding/AsyncRunner.h"
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
- class GPUQueue final : public interop::GPUQueue {
- public:
- GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
-
- // interop::GPUQueue interface compliance
- void submit(
- Napi::Env,
- std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
- interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
- void writeBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 bufferOffset,
- interop::BufferSource data,
- interop::GPUSize64 dataOffset,
- std::optional<interop::GPUSize64> size) override;
- void writeTexture(Napi::Env,
- interop::GPUImageCopyTexture destination,
- interop::BufferSource data,
- interop::GPUImageDataLayout dataLayout,
- interop::GPUExtent3D size) override;
- void copyExternalImageToTexture(Napi::Env,
- interop::GPUImageCopyExternalImage source,
- interop::GPUImageCopyTextureTagged destination,
- interop::GPUExtent3D copySize) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::Queue queue_;
- std::shared_ptr<AsyncRunner> async_;
- };
+// GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
+class GPUQueue final : public interop::GPUQueue {
+ public:
+ GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
+
+ // interop::GPUQueue interface compliance
+ void submit(Napi::Env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
+ interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
+ void writeBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffset,
+ std::optional<interop::GPUSize64> size) override;
+ void writeTexture(Napi::Env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) override;
+ void copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::Queue queue_;
+ std::shared_ptr<AsyncRunner> async_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp
index e2c786ee193..5c0ff2edfe3 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPURenderBundle.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPUBuffer.h"
#include "src/dawn/node/binding/GPURenderPipeline.h"
@@ -21,19 +23,17 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderBundle
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderBundle
+////////////////////////////////////////////////////////////////////////////////
+GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {}
- std::variant<std::string, interop::UndefinedType> GPURenderBundle::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPURenderBundle::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPURenderBundle::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPURenderBundle::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h
index 7eeabb22cb1..23d0ad267e0 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h
@@ -15,32 +15,31 @@
#ifndef SRC_DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
#define SRC_DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
- // wgpu::RenderBundle.
- class GPURenderBundle final : public interop::GPURenderBundle {
- public:
- GPURenderBundle(wgpu::RenderBundle bundle);
+// GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
+// wgpu::RenderBundle.
+class GPURenderBundle final : public interop::GPURenderBundle {
+ public:
+ explicit GPURenderBundle(wgpu::RenderBundle bundle);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderBundle&() const {
- return bundle_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderBundle&() const { return bundle_; }
- // interop::GPURenderBundle interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPURenderBundle interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::RenderBundle bundle_;
- };
+ private:
+ wgpu::RenderBundle bundle_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp
index 87ecd347456..801eaa2c96f 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPURenderBundleEncoder.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPUBindGroup.h"
#include "src/dawn/node/binding/GPUBuffer.h"
@@ -23,171 +25,168 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderBundleEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
- : enc_(std::move(enc)) {
- }
-
- interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
- Napi::Env env,
- interop::GPURenderBundleDescriptor descriptor) {
- wgpu::RenderBundleDescriptor desc{};
-
- return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
- }
-
- void GPURenderBundleEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
- }
-
- void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
-
- void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPURenderBundleEncoder::setPipeline(
- Napi::Env env,
- interop::Interface<interop::GPURenderPipeline> pipeline) {
- Converter conv(env);
-
- wgpu::RenderPipeline p{};
- if (!conv(p, pipeline)) {
- return;
- }
-
- enc_.SetPipeline(p);
- }
-
- void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- wgpu::IndexFormat f{};
- uint64_t o = 0;
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || //
- !conv(f, indexFormat) || //
- !conv(o, offset) || //
- !conv(s, size)) {
- return;
- }
-
- enc_.SetIndexBuffer(b, f, o, s);
- }
-
- void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || !conv(s, size)) {
- return;
- }
- enc_.SetVertexBuffer(slot, b, offset, s);
- }
-
- void GPURenderBundleEncoder::draw(Napi::Env env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) {
- enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
- }
-
- void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) {
- enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
- }
-
- void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndirect(b, o);
- }
-
- void GPURenderBundleEncoder::drawIndexedIndirect(
- Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndexedIndirect(b, o);
- }
-
- std::variant<std::string, interop::UndefinedType> GPURenderBundleEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderBundleEncoder::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderBundleEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
+ : enc_(std::move(enc)) {}
+
+interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
+ Napi::Env env,
+ interop::GPURenderBundleDescriptor descriptor) {
+ wgpu::RenderBundleDescriptor desc{};
+
+ return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+}
+
+void GPURenderBundleEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
+
+void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
+
+void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+}
+
+void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+}
+
+void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPURenderBundleEncoder::setPipeline(Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+
+ wgpu::RenderPipeline p{};
+ if (!conv(p, pipeline)) {
+ return;
+ }
+
+ enc_.SetPipeline(p);
+}
+
+void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f{};
+ uint64_t o = 0;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(o, offset) || //
+ !conv(s, size)) {
+ return;
+ }
+
+ enc_.SetIndexBuffer(b, f, o, s);
+}
+
+void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+}
+
+void GPURenderBundleEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+}
+
+void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+}
+
+void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+}
+
+void GPURenderBundleEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+}
+
+std::string GPURenderBundleEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPURenderBundleEncoder::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h
index 8d8d5a36376..130a5bdd06c 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h
@@ -15,72 +15,73 @@
#ifndef SRC_DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
#define SRC_DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
- // wgpu::RenderBundleEncoder.
- class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
- public:
- GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
+// GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
+// wgpu::RenderBundleEncoder.
+class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
+ public:
+ explicit GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
- // interop::GPURenderBundleEncoder interface compliance
- interop::Interface<interop::GPURenderBundle> finish(
- Napi::Env,
- interop::GPURenderBundleDescriptor descriptor) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPURenderPipeline> pipeline) override;
- void setIndexBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void setVertexBuffer(Napi::Env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void draw(Napi::Env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndexed(Napi::Env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void drawIndexedIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPURenderBundleEncoder interface compliance
+ interop::Interface<interop::GPURenderBundle> finish(
+ Napi::Env,
+ interop::GPURenderBundleDescriptor descriptor) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env, interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::RenderBundleEncoder enc_;
- };
+ private:
+ wgpu::RenderBundleEncoder enc_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp
index 0297d65d66a..ec686bcb69d 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPURenderPassEncoder.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/GPUBindGroup.h"
#include "src/dawn/node/binding/GPUBuffer.h"
@@ -24,232 +26,227 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderPassEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
- }
-
- void GPURenderPassEncoder::setViewport(Napi::Env,
- float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
- enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
- }
-
- void GPURenderPassEncoder::setScissorRect(Napi::Env,
- interop::GPUIntegerCoordinate x,
- interop::GPUIntegerCoordinate y,
- interop::GPUIntegerCoordinate width,
- interop::GPUIntegerCoordinate height) {
- enc_.SetScissorRect(x, y, width, height);
- }
-
- void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
- Converter conv(env);
-
- wgpu::Color c{};
- if (!conv(c, color)) {
- return;
- }
-
- enc_.SetBlendConstant(&c);
- }
-
- void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
- enc_.SetStencilReference(reference);
- }
-
- void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
- enc_.BeginOcclusionQuery(queryIndex);
- }
-
- void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
- enc_.EndOcclusionQuery();
- }
-
- void GPURenderPassEncoder::executeBundles(
- Napi::Env env,
- std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
- Converter conv(env);
-
- wgpu::RenderBundle* bundles = nullptr;
- uint32_t bundleCount = 0;
- if (!conv(bundles, bundleCount, bundles_in)) {
- return;
- }
-
- enc_.ExecuteBundles(bundleCount, bundles);
- }
-
- void GPURenderPassEncoder::end(Napi::Env) {
- enc_.End();
- }
-
- void GPURenderPassEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
- }
-
- void GPURenderPassEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
- Napi::RangeError::New(env,
- "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
- .ThrowAsJavaScriptException();
- return;
- }
-
- if (dynamicOffsetsDataLength >
- dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
- Napi::RangeError::New(env,
- "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
- "bound of dynamicOffsetData")
- .ThrowAsJavaScriptException();
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
-
- void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPURenderPassEncoder::setPipeline(
- Napi::Env env,
- interop::Interface<interop::GPURenderPipeline> pipeline) {
- Converter conv(env);
- wgpu::RenderPipeline rp{};
- if (!conv(rp, pipeline)) {
- return;
- }
- enc_.SetPipeline(rp);
- }
-
- void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- wgpu::IndexFormat f;
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || //
- !conv(f, indexFormat) || //
- !conv(s, size)) {
- return;
- }
- enc_.SetIndexBuffer(b, f, offset, s);
- }
-
- void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || !conv(s, size)) {
- return;
- }
- enc_.SetVertexBuffer(slot, b, offset, s);
- }
-
- void GPURenderPassEncoder::draw(Napi::Env env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) {
- enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
- }
-
- void GPURenderPassEncoder::drawIndexed(Napi::Env env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) {
- enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
- }
-
- void GPURenderPassEncoder::drawIndirect(Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndirect(b, o);
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderPassEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {}
+
+void GPURenderPassEncoder::setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
+ enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+}
+
+void GPURenderPassEncoder::setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) {
+ enc_.SetScissorRect(x, y, width, height);
+}
+
+void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
+ Converter conv(env);
- void GPURenderPassEncoder::drawIndexedIndirect(
- Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
+ wgpu::Color c{};
+ if (!conv(c, color)) {
+ return;
+ }
+
+ enc_.SetBlendConstant(&c);
+}
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndexedIndirect(b, o);
- }
-
- std::variant<std::string, interop::UndefinedType> GPURenderPassEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPassEncoder::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
+ enc_.SetStencilReference(reference);
+}
+
+void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
+ enc_.BeginOcclusionQuery(queryIndex);
+}
+
+void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
+ enc_.EndOcclusionQuery();
+}
+
+void GPURenderPassEncoder::executeBundles(
+ Napi::Env env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
+ Converter conv(env);
+
+ wgpu::RenderBundle* bundles = nullptr;
+ uint32_t bundleCount = 0;
+ if (!conv(bundles, bundleCount, bundles_in)) {
+ return;
+ }
+
+ enc_.ExecuteBundles(bundleCount, bundles);
+}
+
+void GPURenderPassEncoder::end(Napi::Env) {
+ enc_.End();
+}
+
+void GPURenderPassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
+
+void GPURenderPassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+ Napi::RangeError::New(env, "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+ .ThrowAsJavaScriptException();
+ return;
+ }
+
+ if (dynamicOffsetsDataLength > dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+ Napi::RangeError::New(env,
+ "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+ "bound of dynamicOffsetData")
+ .ThrowAsJavaScriptException();
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
+
+void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+}
+
+void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+}
+
+void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPURenderPassEncoder::setPipeline(Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+ wgpu::RenderPipeline rp{};
+ if (!conv(rp, pipeline)) {
+ return;
+ }
+ enc_.SetPipeline(rp);
+}
+
+void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(s, size)) {
+ return;
+ }
+ enc_.SetIndexBuffer(b, f, offset, s);
+}
+
+void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+}
+
+void GPURenderPassEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+}
+
+void GPURenderPassEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+}
+
+void GPURenderPassEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+}
+
+void GPURenderPassEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+}
+
+std::string GPURenderPassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPURenderPassEncoder::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h
index ca5d857b531..a7cfb8eb81d 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h
@@ -15,94 +15,92 @@
#ifndef SRC_DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
#define SRC_DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+#include <string>
+#include <vector>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
- // wgpu::RenderPassEncoder.
- class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
- public:
- GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
+// GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
+// wgpu::RenderPassEncoder.
+class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
+ public:
+ explicit GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderPassEncoder&() const {
- return enc_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPassEncoder&() const { return enc_; }
- // interop::GPURenderPassEncoder interface compliance
- void setViewport(Napi::Env,
- float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) override;
- void setScissorRect(Napi::Env,
- interop::GPUIntegerCoordinate x,
- interop::GPUIntegerCoordinate y,
- interop::GPUIntegerCoordinate width,
- interop::GPUIntegerCoordinate height) override;
- void setBlendConstant(Napi::Env, interop::GPUColor color) override;
- void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
- void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
- void endOcclusionQuery(Napi::Env) override;
- void executeBundles(
- Napi::Env,
- std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
- void end(Napi::Env) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPURenderPipeline> pipeline) override;
- void setIndexBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void setVertexBuffer(Napi::Env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void draw(Napi::Env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndexed(Napi::Env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void drawIndexedIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPURenderPassEncoder interface compliance
+ void setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) override;
+ void setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) override;
+ void setBlendConstant(Napi::Env, interop::GPUColor color) override;
+ void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
+ void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
+ void endOcclusionQuery(Napi::Env) override;
+ void executeBundles(Napi::Env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
+ void end(Napi::Env) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env, interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::RenderPassEncoder enc_;
- };
+ private:
+ wgpu::RenderPassEncoder enc_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp
index 8618f1fc3b0..cd236c08a50 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp
@@ -14,33 +14,33 @@
#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include <utility>
+
#include "src/dawn/node/binding/GPUBindGroupLayout.h"
#include "src/dawn/node/binding/GPUBuffer.h"
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderPipeline
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
- : pipeline_(std::move(pipeline)) {
- }
-
- interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
- Napi::Env env,
- uint32_t index) {
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, pipeline_.GetBindGroupLayout(index));
- }
-
- std::variant<std::string, interop::UndefinedType> GPURenderPipeline::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPipeline::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderPipeline
+////////////////////////////////////////////////////////////////////////////////
+GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
+ : pipeline_(std::move(pipeline)) {}
+
+interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+}
+
+std::string GPURenderPipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPURenderPipeline::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h
index e35bb85d64f..26f8ad6e1fd 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h
@@ -15,34 +15,33 @@
#ifndef SRC_DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
#define SRC_DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
- // wgpu::RenderPipeline.
- class GPURenderPipeline final : public interop::GPURenderPipeline {
- public:
- GPURenderPipeline(wgpu::RenderPipeline pipeline);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderPipeline&() const {
- return pipeline_;
- }
-
- // interop::GPURenderPipeline interface compliance
- interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
- uint32_t index) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::RenderPipeline pipeline_;
- };
+// GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
+// wgpu::RenderPipeline.
+class GPURenderPipeline final : public interop::GPURenderPipeline {
+ public:
+ explicit GPURenderPipeline(wgpu::RenderPipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPipeline&() const { return pipeline_; }
+
+ // interop::GPURenderPipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::RenderPipeline pipeline_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp
index c1076e961fd..77f395d6ae5 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp
@@ -14,23 +14,24 @@
#include "src/dawn/node/binding/GPUSampler.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUSampler
- ////////////////////////////////////////////////////////////////////////////////
- GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUSampler
+////////////////////////////////////////////////////////////////////////////////
+GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {}
- std::variant<std::string, interop::UndefinedType> GPUSampler::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUSampler::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUSampler::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUSampler::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h
index 0eddc8fc83f..578469d8053 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h
@@ -15,30 +15,29 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUSAMPLER_H_
#define SRC_DAWN_NODE_BINDING_GPUSAMPLER_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
- class GPUSampler final : public interop::GPUSampler {
- public:
- GPUSampler(wgpu::Sampler sampler);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Sampler&() const {
- return sampler_;
- }
-
- // interop::GPUSampler interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::Sampler sampler_;
- };
+// GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
+class GPUSampler final : public interop::GPUSampler {
+ public:
+ explicit GPUSampler(wgpu::Sampler sampler);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Sampler&() const { return sampler_; }
+
+ // interop::GPUSampler interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::Sampler sampler_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp
index a599f05da1f..610f6c88e2d 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp
@@ -15,112 +15,101 @@
#include "src/dawn/node/binding/GPUShaderModule.h"
#include <memory>
+#include <utility>
+#include <vector>
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUShaderModule
- ////////////////////////////////////////////////////////////////////////////////
- GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
- : shader_(std::move(shader)), async_(std::move(async)) {
- }
-
- interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
- GPUShaderModule::compilationInfo(Napi::Env env) {
- struct GPUCompilationMessage : public interop::GPUCompilationMessage {
- WGPUCompilationMessage message;
-
- GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
- }
- std::string getMessage(Napi::Env) override {
- return message.message;
- }
- interop::GPUCompilationMessageType getType(Napi::Env) override {
- switch (message.type) {
- case WGPUCompilationMessageType_Error:
- return interop::GPUCompilationMessageType::kError;
- case WGPUCompilationMessageType_Warning:
- return interop::GPUCompilationMessageType::kWarning;
- case WGPUCompilationMessageType_Info:
- return interop::GPUCompilationMessageType::kInfo;
- default:
- UNIMPLEMENTED();
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUShaderModule
+////////////////////////////////////////////////////////////////////////////////
+GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
+ : shader_(std::move(shader)), async_(std::move(async)) {}
+
+interop::Promise<interop::Interface<interop::GPUCompilationInfo>> GPUShaderModule::compilationInfo(
+ Napi::Env env) {
+ struct GPUCompilationMessage : public interop::GPUCompilationMessage {
+ WGPUCompilationMessage message;
+
+ explicit GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {}
+ std::string getMessage(Napi::Env) override { return message.message; }
+ interop::GPUCompilationMessageType getType(Napi::Env) override {
+ switch (message.type) {
+ case WGPUCompilationMessageType_Error:
+ return interop::GPUCompilationMessageType::kError;
+ case WGPUCompilationMessageType_Warning:
+ return interop::GPUCompilationMessageType::kWarning;
+ case WGPUCompilationMessageType_Info:
+ return interop::GPUCompilationMessageType::kInfo;
+ default:
+ UNIMPLEMENTED();
}
- uint64_t getLineNum(Napi::Env) override {
- return message.lineNum;
+ }
+ uint64_t getLineNum(Napi::Env) override { return message.lineNum; }
+ uint64_t getLinePos(Napi::Env) override { return message.linePos; }
+ uint64_t getOffset(Napi::Env) override { return message.offset; }
+ uint64_t getLength(Napi::Env) override { return message.length; }
+ };
+
+ using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+
+ struct GPUCompilationInfo : public interop::GPUCompilationInfo {
+ std::vector<Napi::ObjectReference> messages;
+
+ GPUCompilationInfo(Napi::Env env, Messages msgs) {
+ messages.reserve(msgs.size());
+ for (auto& msg : msgs) {
+ messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
}
- uint64_t getLinePos(Napi::Env) override {
- return message.linePos;
+ }
+ Messages getMessages(Napi::Env) override {
+ Messages out;
+ out.reserve(messages.size());
+ for (auto& msg : messages) {
+ out.emplace_back(msg.Value());
}
- uint64_t getOffset(Napi::Env) override {
- return message.offset;
+ return out;
+ }
+ };
+
+ using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+ auto promise = ctx->promise;
+
+ shader_.GetCompilationInfo(
+ [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
+ void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ Messages messages(compilationInfo->messageCount);
+ for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
+ auto& msg = compilationInfo->messages[i];
+ messages[i] =
+ interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
}
- uint64_t getLength(Napi::Env) override {
- return message.length;
- }
- };
- using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+ c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+ c->env, c->env, std::move(messages)));
+ },
+ ctx);
- struct GPUCompilationInfo : public interop::GPUCompilationInfo {
- std::vector<Napi::ObjectReference> messages;
+ return promise;
+}
- GPUCompilationInfo(Napi::Env env, Messages msgs) {
- messages.reserve(msgs.size());
- for (auto& msg : msgs) {
- messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
- }
- }
- Messages getMessages(Napi::Env) override {
- Messages out;
- out.reserve(messages.size());
- for (auto& msg : messages) {
- out.emplace_back(msg.Value());
- }
- return out;
- }
- };
-
- using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- shader_.GetCompilationInfo(
- [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
- void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- Messages messages(compilationInfo->messageCount);
- for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
- auto& msg = compilationInfo->messages[i];
- messages[i] =
- interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
- }
-
- c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
- c->env, c->env, std::move(messages)));
- },
- ctx);
-
- return promise;
- }
-
- std::variant<std::string, interop::UndefinedType> GPUShaderModule::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUShaderModule::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+std::string GPUShaderModule::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUShaderModule::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h
index 2f7396ae3fc..ccaf65c11ca 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h
@@ -15,36 +15,36 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUSHADERMODULE_H_
#define SRC_DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+#include <memory>
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/binding/AsyncRunner.h"
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
- // wgpu::ShaderModule.
- class GPUShaderModule final : public interop::GPUShaderModule {
- public:
- GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ShaderModule&() const {
- return shader_;
- }
-
- // interop::GPUShaderModule interface compliance
- interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
- Napi::Env) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::ShaderModule shader_;
- std::shared_ptr<AsyncRunner> async_;
- };
+// GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
+// wgpu::ShaderModule.
+class GPUShaderModule final : public interop::GPUShaderModule {
+ public:
+ GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ShaderModule&() const { return shader_; }
+
+ // interop::GPUShaderModule interface compliance
+ interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
+ Napi::Env) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::ShaderModule shader_;
+ std::shared_ptr<AsyncRunner> async_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp
index 23c19b21875..83a5cb2b294 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp
@@ -14,118 +14,123 @@
#include "src/dawn/node/binding/GPUSupportedLimits.h"
-namespace wgpu::binding {
+#include <utility>
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUSupportedLimits
- ////////////////////////////////////////////////////////////////////////////////
+namespace wgpu::binding {
- GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits)
- : limits_(std::move(limits)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUSupportedLimits
+////////////////////////////////////////////////////////////////////////////////
- uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
- return limits_.limits.maxTextureDimension1D;
- }
+GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits) : limits_(std::move(limits)) {}
- uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
- return limits_.limits.maxTextureDimension2D;
- }
+uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
+ return limits_.limits.maxTextureDimension1D;
+}
- uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
- return limits_.limits.maxTextureDimension3D;
- }
+uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
+ return limits_.limits.maxTextureDimension2D;
+}
- uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
- return limits_.limits.maxTextureArrayLayers;
- }
+uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
+ return limits_.limits.maxTextureDimension3D;
+}
- uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
- return limits_.limits.maxBindGroups;
- }
+uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
+ return limits_.limits.maxTextureArrayLayers;
+}
- uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
- return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
- }
+uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
+ return limits_.limits.maxBindGroups;
+}
- uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
- return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
- }
+uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
+ return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
+}
- uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
- return limits_.limits.maxSampledTexturesPerShaderStage;
- }
+uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
+ return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
+}
- uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
- return limits_.limits.maxSamplersPerShaderStage;
- }
+uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
+ return limits_.limits.maxSampledTexturesPerShaderStage;
+}
- uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
- return limits_.limits.maxStorageBuffersPerShaderStage;
- }
+uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxSamplersPerShaderStage;
+}
- uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
- return limits_.limits.maxStorageTexturesPerShaderStage;
- }
+uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxStorageBuffersPerShaderStage;
+}
- uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
- return limits_.limits.maxUniformBuffersPerShaderStage;
- }
+uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
+ return limits_.limits.maxStorageTexturesPerShaderStage;
+}
- uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
- return limits_.limits.maxUniformBufferBindingSize;
- }
+uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxUniformBuffersPerShaderStage;
+}
- uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
- return limits_.limits.maxStorageBufferBindingSize;
- }
+uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
+ return limits_.limits.maxUniformBufferBindingSize;
+}
- uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
- return limits_.limits.minUniformBufferOffsetAlignment;
- }
+uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
+ return limits_.limits.maxStorageBufferBindingSize;
+}
- uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
- return limits_.limits.minStorageBufferOffsetAlignment;
- }
+uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
+ return limits_.limits.minUniformBufferOffsetAlignment;
+}
- uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
- return limits_.limits.maxVertexBuffers;
- }
+uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
+ return limits_.limits.minStorageBufferOffsetAlignment;
+}
- uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
- return limits_.limits.maxVertexAttributes;
- }
+uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
+ return limits_.limits.maxVertexBuffers;
+}
- uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
- return limits_.limits.maxVertexBufferArrayStride;
- }
+uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
+ return limits_.limits.maxVertexAttributes;
+}
- uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
- return limits_.limits.maxInterStageShaderComponents;
- }
+uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
+ return limits_.limits.maxVertexBufferArrayStride;
+}
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupStorageSize;
- }
+uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
+ return limits_.limits.maxInterStageShaderComponents;
+}
- uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
- return limits_.limits.maxComputeInvocationsPerWorkgroup;
- }
+uint32_t GPUSupportedLimits::getMaxInterStageShaderVariables(Napi::Env) {
+ UNIMPLEMENTED();
+ // return limits_.limits.maxInterStageShaderVariables;
+}
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeX;
- }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupStorageSize;
+}
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeY;
- }
+uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
+ return limits_.limits.maxComputeInvocationsPerWorkgroup;
+}
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeZ;
- }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeX;
+}
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupsPerDimension;
- }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeY;
+}
+
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeZ;
+}
+
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupsPerDimension;
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h
index 27ea31982fc..246da4d41fd 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h
@@ -23,42 +23,43 @@
namespace wgpu::binding {
- // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
- class GPUSupportedLimits final : public interop::GPUSupportedLimits {
- public:
- GPUSupportedLimits(wgpu::SupportedLimits);
+// GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
+class GPUSupportedLimits final : public interop::GPUSupportedLimits {
+ public:
+ explicit GPUSupportedLimits(wgpu::SupportedLimits);
- // interop::GPUSupportedLimits interface compliance
- uint32_t getMaxTextureDimension1D(Napi::Env) override;
- uint32_t getMaxTextureDimension2D(Napi::Env) override;
- uint32_t getMaxTextureDimension3D(Napi::Env) override;
- uint32_t getMaxTextureArrayLayers(Napi::Env) override;
- uint32_t getMaxBindGroups(Napi::Env) override;
- uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
- uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
- uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
- uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
- uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
- uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
- uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
- uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
- uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
- uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
- uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
- uint32_t getMaxVertexBuffers(Napi::Env) override;
- uint32_t getMaxVertexAttributes(Napi::Env) override;
- uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
- uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
- uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
+ // interop::GPUSupportedLimits interface compliance
+ uint32_t getMaxTextureDimension1D(Napi::Env) override;
+ uint32_t getMaxTextureDimension2D(Napi::Env) override;
+ uint32_t getMaxTextureDimension3D(Napi::Env) override;
+ uint32_t getMaxTextureArrayLayers(Napi::Env) override;
+ uint32_t getMaxBindGroups(Napi::Env) override;
+ uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
+ uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
+ uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
+ uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMaxVertexBuffers(Napi::Env) override;
+ uint32_t getMaxVertexAttributes(Napi::Env) override;
+ uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
+ uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
+ uint32_t getMaxInterStageShaderVariables(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
+ uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
- private:
- wgpu::SupportedLimits limits_;
- };
+ private:
+ wgpu::SupportedLimits limits_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp
index ac0465f8cc0..38aaac598dc 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp
@@ -14,6 +14,8 @@
#include "src/dawn/node/binding/GPUTexture.h"
+#include <utility>
+
#include "src/dawn/node/binding/Converter.h"
#include "src/dawn/node/binding/Errors.h"
#include "src/dawn/node/binding/GPUTextureView.h"
@@ -21,44 +23,75 @@
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUTexture
- ////////////////////////////////////////////////////////////////////////////////
- GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUTexture
+////////////////////////////////////////////////////////////////////////////////
+GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {}
- interop::Interface<interop::GPUTextureView> GPUTexture::createView(
- Napi::Env env,
- interop::GPUTextureViewDescriptor descriptor) {
- if (!texture_) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
-
- wgpu::TextureViewDescriptor desc{};
- Converter conv(env);
- if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) || //
- !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
- !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) || //
- !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) || //
- !conv(desc.format, descriptor.format) || //
- !conv(desc.dimension, descriptor.dimension) || //
- !conv(desc.aspect, descriptor.aspect)) {
- return {};
- }
- return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+interop::Interface<interop::GPUTextureView> GPUTexture::createView(
+ Napi::Env env,
+ interop::GPUTextureViewDescriptor descriptor) {
+ if (!texture_) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
}
- void GPUTexture::destroy(Napi::Env) {
- texture_.Destroy();
+ wgpu::TextureViewDescriptor desc{};
+ Converter conv(env);
+ if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) || //
+ !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) || //
+ !conv(desc.format, descriptor.format) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.aspect, descriptor.aspect)) {
+ return {};
}
+ return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+}
- std::variant<std::string, interop::UndefinedType> GPUTexture::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+void GPUTexture::destroy(Napi::Env) {
+ texture_.Destroy();
+}
- void GPUTexture::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+interop::GPUIntegerCoordinate GPUTexture::getWidth(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUIntegerCoordinate GPUTexture::getHeight(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUIntegerCoordinate GPUTexture::getDepthOrArrayLayers(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUIntegerCoordinate GPUTexture::getMipLevelCount(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUSize32 GPUTexture::getSampleCount(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUTextureDimension GPUTexture::getDimension(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUTextureFormat GPUTexture::getFormat(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+interop::GPUTextureUsageFlags GPUTexture::getUsage(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+std::string GPUTexture::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
+
+void GPUTexture::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h
index fc2582cf6ad..e108d781849 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h
@@ -15,35 +15,42 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUTEXTURE_H_
#define SRC_DAWN_NODE_BINDING_GPUTEXTURE_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
- class GPUTexture final : public interop::GPUTexture {
- public:
- GPUTexture(wgpu::Texture texture);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Texture&() const {
- return texture_;
- }
-
- // interop::GPUTexture interface compliance
- interop::Interface<interop::GPUTextureView> createView(
- Napi::Env,
- interop::GPUTextureViewDescriptor descriptor) override;
- void destroy(Napi::Env) override;
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-
- private:
- wgpu::Texture texture_;
- };
+// GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
+class GPUTexture final : public interop::GPUTexture {
+ public:
+ explicit GPUTexture(wgpu::Texture texture);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Texture&() const { return texture_; }
+
+ // interop::GPUTexture interface compliance
+ interop::Interface<interop::GPUTextureView> createView(
+ Napi::Env,
+ interop::GPUTextureViewDescriptor descriptor) override;
+ void destroy(Napi::Env) override;
+ interop::GPUIntegerCoordinate getWidth(Napi::Env) override;
+ interop::GPUIntegerCoordinate getHeight(Napi::Env) override;
+ interop::GPUIntegerCoordinate getDepthOrArrayLayers(Napi::Env) override;
+ interop::GPUIntegerCoordinate getMipLevelCount(Napi::Env) override;
+ interop::GPUSize32 getSampleCount(Napi::Env) override;
+ interop::GPUTextureDimension getDimension(Napi::Env) override;
+ interop::GPUTextureFormat getFormat(Napi::Env) override;
+ interop::GPUTextureUsageFlags getUsage(Napi::Env) override;
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
+
+ private:
+ wgpu::Texture texture_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp
index 43c88f1f30c..98ce3a046dc 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp
@@ -14,23 +14,23 @@
#include "src/dawn/node/binding/GPUTextureView.h"
+#include <utility>
+
#include "src/dawn/node/utils/Debug.h"
namespace wgpu::binding {
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUTextureView
- ////////////////////////////////////////////////////////////////////////////////
- GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
- }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUTextureView
+////////////////////////////////////////////////////////////////////////////////
+GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {}
- std::variant<std::string, interop::UndefinedType> GPUTextureView::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
+std::string GPUTextureView::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+}
- void GPUTextureView::setLabel(Napi::Env,
- std::variant<std::string, interop::UndefinedType> value) {
- UNIMPLEMENTED();
- }
+void GPUTextureView::setLabel(Napi::Env, std::string value) {
+ UNIMPLEMENTED();
+}
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h
index 5f4db27ef85..494c3074999 100644
--- a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h
@@ -15,32 +15,31 @@
#ifndef SRC_DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
#define SRC_DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+#include <string>
+
#include "dawn/native/DawnNative.h"
#include "dawn/webgpu_cpp.h"
-
#include "src/dawn/node/interop/Napi.h"
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
- // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
- // wgpu::TextureView.
- class GPUTextureView final : public interop::GPUTextureView {
- public:
- GPUTextureView(wgpu::TextureView view);
+// GPUTextureView is an implementation of interop::GPUTextureView that wraps a
+// wgpu::TextureView.
+class GPUTextureView final : public interop::GPUTextureView {
+ public:
+ explicit GPUTextureView(wgpu::TextureView view);
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::TextureView&() const {
- return view_;
- }
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::TextureView&() const { return view_; }
- // interop::GPUTextureView interface compliance
- std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+ // interop::GPUTextureView interface compliance
+ std::string getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::string value) override;
- private:
- wgpu::TextureView view_;
- };
+ private:
+ wgpu::TextureView view_;
+};
} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl b/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl
index b36c667b2d8..44638f62ab7 100644
--- a/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl
@@ -86,3 +86,5 @@ typedef(ArrayBufferView or ArrayBuffer) BufferSource;
};
[LegacyNoInterfaceObject] interface EventHandler{};
+
+enum PredefinedColorSpace { "srgb", "display-p3" };
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt
index 1199636099b..f7742435533 100644
--- a/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt
@@ -13,7 +13,7 @@
# limitations under the License.
# Paths to generated files
-set(INTEROP_GEN_DIR "${GEN_DIR}/src/dawn/node/interop")
+set(INTEROP_GEN_DIR "${DAWN_NODE_GEN_DIR}/src/dawn/node/interop")
set(INTEROP_WEBGPU_H "${INTEROP_GEN_DIR}/WebGPU.h")
set(INTEROP_WEBGPU_CPP "${INTEROP_GEN_DIR}/WebGPU.cpp")
@@ -54,7 +54,7 @@ target_include_directories(dawn_node_interop
"${CMAKE_SOURCE_DIR}"
"${DAWN_THIRD_PARTY_DIR}"
"${NODE_API_HEADERS_DIR}/include"
- "${GEN_DIR}"
+ "${DAWN_NODE_GEN_DIR}"
)
target_link_libraries(dawn_node_interop
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp b/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp
index 151d852b8d7..830351673d7 100644
--- a/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp
@@ -16,155 +16,155 @@
namespace wgpu::interop {
- Result Success;
-
- Result Error(std::string msg) {
- return {msg};
- }
-
- Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
- if (value.IsBoolean()) {
- out = value.ToBoolean();
- return Success;
- }
- return Error("value is not a boolean");
- }
- Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
- if (value.IsString()) {
- out = value.ToString();
- return Success;
- }
- return Error("value is not a string");
- }
- Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int64Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
- if (value.IsNumber()) {
- // Note that the JS Number type only stores doubles, so the max integer
- // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
- // with 1 implicit bit). This is why there's no UInt64Value() function.
- out = static_cast<uint64_t>(value.ToNumber().Int64Value());
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().FloatValue();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().DoubleValue();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<UndefinedType>::FromJS(Napi::Env, Napi::Value value, UndefinedType&) {
- if (value.IsUndefined()) {
- return Success;
- }
- return Error("value is undefined");
- }
- Napi::Value Converter<UndefinedType>::ToJS(Napi::Env env, UndefinedType) {
- return env.Undefined();
- }
+Result Success;
+
+Result Error(std::string msg) {
+ return {msg};
+}
+
+Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
+ if (value.IsBoolean()) {
+ out = value.ToBoolean();
+ return Success;
+ }
+ return Error("value is not a boolean");
+}
+Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
+ if (value.IsString()) {
+ out = value.ToString();
+ return Success;
+ }
+ return Error("value is not a string");
+}
+Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int64Value();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
+ if (value.IsNumber()) {
+ // Note that the JS Number type only stores doubles, so the max integer
+ // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
+ // with 1 implicit bit). This is why there's no UInt64Value() function.
+ out = static_cast<uint64_t>(value.ToNumber().Int64Value());
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().FloatValue();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().DoubleValue();
+ return Success;
+ }
+ return Error("value is not a number");
+}
+Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
+ return Napi::Value::From(env, value);
+}
+
+Result Converter<UndefinedType>::FromJS(Napi::Env, Napi::Value value, UndefinedType&) {
+ if (value.IsUndefined()) {
+ return Success;
+ }
+ return Error("value is undefined");
+}
+Napi::Value Converter<UndefinedType>::ToJS(Napi::Env env, UndefinedType) {
+ return env.Undefined();
+}
} // namespace wgpu::interop
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/Core.h b/chromium/third_party/dawn/src/dawn/node/interop/Core.h
index 8408d157e8a..4bf9ee79654 100644
--- a/chromium/third_party/dawn/src/dawn/node/interop/Core.h
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Core.h
@@ -19,352 +19,310 @@
#define SRC_DAWN_NODE_INTEROP_CORE_H_
#include <cstdint>
-#include <optional>
+#include <limits>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <optional> // NOLINT(build/include_order)
#include <string>
#include <type_traits>
#include <unordered_map>
-#include <variant>
+#include <utility>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <variant> // NOLINT(build/include_order)
#include <vector>
#include "src/dawn/node/interop/Napi.h"
-
#include "src/dawn/node/utils/Debug.h"
#define ENABLE_INTEROP_LOGGING 0 // Enable for verbose interop logging
#if ENABLE_INTEROP_LOGGING
-# define INTEROP_LOG(...) LOG(__VA_ARGS__)
+#define INTEROP_LOG(...) LOG(__VA_ARGS__)
#else
-# define INTEROP_LOG(...)
+#define INTEROP_LOG(...)
#endif
// A helper macro for constructing a PromiseInfo with the current file, function and line.
// See PromiseInfo
-#define PROMISE_INFO \
- ::wgpu::interop::PromiseInfo { \
- __FILE__, __FUNCTION__, __LINE__ \
- }
+#define PROMISE_INFO \
+ ::wgpu::interop::PromiseInfo { __FILE__, __FUNCTION__, __LINE__ }
namespace wgpu::interop {
- ////////////////////////////////////////////////////////////////////////////////
- // Primitive JavaScript types
- ////////////////////////////////////////////////////////////////////////////////
- using Object = Napi::Object;
- using ArrayBuffer = Napi::ArrayBuffer;
- using Int8Array = Napi::TypedArrayOf<int8_t>;
- using Int16Array = Napi::TypedArrayOf<int16_t>;
- using Int32Array = Napi::TypedArrayOf<int32_t>;
- using Uint8Array = Napi::TypedArrayOf<uint8_t>;
- using Uint16Array = Napi::TypedArrayOf<uint16_t>;
- using Uint32Array = Napi::TypedArrayOf<uint32_t>;
- using Float32Array = Napi::TypedArrayOf<float>;
- using Float64Array = Napi::TypedArrayOf<double>;
- using DataView = Napi::TypedArray;
-
- // Datatype used for undefined values.
- struct UndefinedType {};
- static constexpr UndefinedType Undefined;
-
- template <typename T>
- using FrozenArray = std::vector<T>;
-
- // A wrapper class for integers that's as transparent as possible and is used to distinguish
- // that the type is tagged with the [Clamp] WebIDL attribute.
- template <typename T>
- struct ClampedInteger {
- static_assert(std::is_integral_v<T>);
-
- using IntegerType = T;
- ClampedInteger() : value(0) {
- }
- ClampedInteger(T value) : value(value) {
- }
- operator T() const {
- return value;
- }
- T value;
- };
-
- // A wrapper class for integers that's as transparent as possible and is used to distinguish
- // that the type is tagged with the [EnforceRange] WebIDL attribute.
- template <typename T>
- struct EnforceRangeInteger {
- static_assert(std::is_integral_v<T>);
-
- using IntegerType = T;
- EnforceRangeInteger() : value(0) {
- }
- EnforceRangeInteger(T value) : value(value) {
- }
- operator T() const {
- return value;
- }
- T value;
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Result
- ////////////////////////////////////////////////////////////////////////////////
-
- // Result is used to hold an success / error state by functions that perform JS <-> C++
- // conversion
- struct [[nodiscard]] Result {
- // Returns true if the operation succeeded, false if there was an error
- inline operator bool() const {
- return error.empty();
- }
-
- // If Result is an error, then a new Error is returned with the
- // stringified values append to the error message.
- // If Result is a success, then a success Result is returned.
- template <typename... VALUES>
- Result Append(VALUES&&... values) {
- if (*this) {
- return *this;
- }
- std::stringstream ss;
- ss << error << "\n";
- utils::Write(ss, std::forward<VALUES>(values)...);
- return {ss.str()};
- }
-
- // The error message, if the operation failed.
- std::string error;
- };
-
- // A successful result
- extern Result Success;
-
- // Returns a Result with the given error message
- Result Error(std::string msg);
-
- ////////////////////////////////////////////////////////////////////////////////
- // Interface<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Interface<T> is a templated wrapper around a JavaScript object, which
- // implements the template-generated interface type T. Interfaces are returned
- // by either calling T::Bind() or T::Create().
- template <typename T>
- class Interface {
- public:
- // Constructs an Interface with no JS object.
- inline Interface() {
- }
-
- // Constructs an Interface wrapping the given JS object.
- // The JS object must have been created with a call to T::Bind().
- explicit inline Interface(Napi::Object o) : object(o) {
- }
-
- // Implicit conversion operators to Napi objects.
- inline operator napi_value() const {
- return object;
- }
- inline operator const Napi::Value&() const {
- return object;
- }
- inline operator const Napi::Object&() const {
- return object;
- }
-
- // Member and dereference operators
- inline T* operator->() const {
- return T::Unwrap(object);
- }
- inline T* operator*() const {
- return T::Unwrap(object);
- }
-
- // As<IMPL>() returns the unwrapped object cast to the implementation type.
- // The interface implementation *must* be of the template type IMPL.
- template <typename IMPL>
- inline IMPL* As() const {
- return static_cast<IMPL*>(T::Unwrap(object));
- }
-
- private:
- Napi::Object object;
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Promise<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Info holds details about where the promise was constructed.
- // Used for printing debug messages when a promise is finalized without being resolved
- // or rejected.
- // Use the PROMISE_INFO macro to populate this structure.
- struct PromiseInfo {
- const char* file = nullptr;
- const char* function = nullptr;
- int line = 0;
- };
-
- enum class PromiseState {
- Pending,
- Resolved,
- Rejected,
- };
-
- namespace detail {
- // Base class for Promise<T> specializations.
- class PromiseBase {
- public:
- // Implicit conversion operators to Napi promises.
- inline operator napi_value() const {
- return state_->deferred.Promise();
- }
- inline operator Napi::Value() const {
- return state_->deferred.Promise();
- }
- inline operator Napi::Promise() const {
- return state_->deferred.Promise();
- }
-
- // Reject() rejects the promise with the given failure value.
- void Reject(Napi::Value value) const {
- state_->deferred.Reject(value);
- state_->state = PromiseState::Rejected;
- }
- void Reject(Napi::Error err) const {
- Reject(err.Value());
- }
- void Reject(std::string err) const {
- Reject(Napi::Error::New(state_->deferred.Env(), err));
- }
-
- PromiseState GetState() const {
- return state_->state;
- }
-
- protected:
- void Resolve(Napi::Value value) const {
- state_->deferred.Resolve(value);
- state_->state = PromiseState::Resolved;
- }
+////////////////////////////////////////////////////////////////////////////////
+// Primitive JavaScript types
+////////////////////////////////////////////////////////////////////////////////
+using Object = Napi::Object;
+using ArrayBuffer = Napi::ArrayBuffer;
+using Int8Array = Napi::TypedArrayOf<int8_t>;
+using Int16Array = Napi::TypedArrayOf<int16_t>;
+using Int32Array = Napi::TypedArrayOf<int32_t>;
+using Uint8Array = Napi::TypedArrayOf<uint8_t>;
+using Uint16Array = Napi::TypedArrayOf<uint16_t>;
+using Uint32Array = Napi::TypedArrayOf<uint32_t>;
+using Float32Array = Napi::TypedArrayOf<float>;
+using Float64Array = Napi::TypedArrayOf<double>;
+using DataView = Napi::TypedArray;
+
+// Datatype used for undefined values.
+struct UndefinedType {};
+static constexpr UndefinedType Undefined;
+
+template <typename T>
+using FrozenArray = std::vector<T>;
+
+// A wrapper class for integers that's as transparent as possible and is used to distinguish
+// that the type is tagged with the [Clamp] WebIDL attribute.
+template <typename T>
+struct ClampedInteger {
+ static_assert(std::is_integral_v<T>);
+
+ using IntegerType = T;
+ ClampedInteger() : value(0) {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ ClampedInteger(T value) : value(value) {}
+ operator T() const { return value; }
+ T value;
+};
+
+// A wrapper class for integers that's as transparent as possible and is used to distinguish
+// that the type is tagged with the [EnforceRange] WebIDL attribute.
+template <typename T>
+struct EnforceRangeInteger {
+ static_assert(std::is_integral_v<T>);
+
+ using IntegerType = T;
+ EnforceRangeInteger() : value(0) {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ EnforceRangeInteger(T value) : value(value) {}
+ operator T() const { return value; }
+ T value;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Result
+////////////////////////////////////////////////////////////////////////////////
+
+// Result is used to hold an success / error state by functions that perform JS <-> C++
+// conversion
+struct [[nodiscard]] Result {
+ // Returns true if the operation succeeded, false if there was an error
+ inline operator bool() const { return error.empty(); }
+
+ // If Result is an error, then a new Error is returned with the
+ // stringified values append to the error message.
+ // If Result is a success, then a success Result is returned.
+ template <typename... VALUES>
+ Result Append(VALUES&&... values) {
+ if (*this) {
+ return *this;
+ }
+ std::stringstream ss;
+ ss << error << "\n";
+ utils::Write(ss, std::forward<VALUES>(values)...);
+ return {ss.str()};
+ }
- struct State {
- Napi::Promise::Deferred deferred;
- PromiseInfo info;
- PromiseState state = PromiseState::Pending;
- };
-
- PromiseBase(Napi::Env env, const PromiseInfo& info)
- : state_(new State{Napi::Promise::Deferred::New(env), info}) {
- state_->deferred.Promise().AddFinalizer(
- [](Napi::Env, State* state) {
- if (state->state == PromiseState::Pending) {
- ::wgpu::utils::Fatal("Promise not resolved or rejected",
- state->info.file, state->info.line,
- state->info.function);
- }
- delete state;
- },
- state_);
- }
+ // The error message, if the operation failed.
+ std::string error;
+};
+
+// A successful result
+extern Result Success;
+
+// Returns a Result with the given error message
+Result Error(std::string msg);
+
+////////////////////////////////////////////////////////////////////////////////
+// Interface<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Interface<T> is a templated wrapper around a JavaScript object, which
+// implements the template-generated interface type T. Interfaces are returned
+// by either calling T::Bind() or T::Create().
+template <typename T>
+class Interface {
+ public:
+ // Constructs an Interface with no JS object.
+ inline Interface() {}
+
+ // Constructs an Interface wrapping the given JS object.
+ // The JS object must have been created with a call to T::Bind().
+ explicit inline Interface(Napi::Object o) : object(o) {}
+
+ // Implicit conversion operators to Napi objects.
+ inline operator napi_value() const { return object; }
+ inline operator const Napi::Value&() const { return object; }
+ inline operator const Napi::Object&() const { return object; }
+
+ // Member and dereference operators
+ inline T* operator->() const { return T::Unwrap(object); }
+ inline T* operator*() const { return T::Unwrap(object); }
+
+ // As<IMPL>() returns the unwrapped object cast to the implementation type.
+ // The interface implementation *must* be of the template type IMPL.
+ template <typename IMPL>
+ inline IMPL* As() const {
+ return static_cast<IMPL*>(T::Unwrap(object));
+ }
- State* const state_;
- };
- } // namespace detail
-
- // Promise<T> is a templated wrapper around a JavaScript promise, which can
- // resolve to the template type T.
- template <typename T>
- class Promise : public detail::PromiseBase {
- public:
- // Constructor
- Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
- }
+ private:
+ Napi::Object object;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Promise<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Info holds details about where the promise was constructed.
+// Used for printing debug messages when a promise is finalized without being resolved
+// or rejected.
+// Use the PROMISE_INFO macro to populate this structure.
+struct PromiseInfo {
+ const char* file = nullptr;
+ const char* function = nullptr;
+ int line = 0;
+};
+
+enum class PromiseState {
+ Pending,
+ Resolved,
+ Rejected,
+};
+
+namespace detail {
+// Base class for Promise<T> specializations.
+class PromiseBase {
+ public:
+ // Implicit conversion operators to Napi promises.
+ inline operator napi_value() const { return state_->deferred.Promise(); }
+ inline operator Napi::Value() const { return state_->deferred.Promise(); }
+ inline operator Napi::Promise() const { return state_->deferred.Promise(); }
+
+ // Reject() rejects the promise with the given failure value.
+ void Reject(Napi::Value value) const {
+ state_->deferred.Reject(value);
+ state_->state = PromiseState::Rejected;
+ }
+ void Reject(Napi::Error err) const { Reject(err.Value()); }
+ void Reject(std::string err) const { Reject(Napi::Error::New(state_->deferred.Env(), err)); }
- // Resolve() fulfills the promise with the given value.
- void Resolve(T&& value) const {
- PromiseBase::Resolve(ToJS(state_->deferred.Env(), std::forward<T>(value)));
- }
- };
+ PromiseState GetState() const { return state_->state; }
- // Specialization for Promises that resolve with no value
- template <>
- class Promise<void> : public detail::PromiseBase {
- public:
- // Constructor
- Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
- }
+ protected:
+ void Resolve(Napi::Value value) const {
+ state_->deferred.Resolve(value);
+ state_->state = PromiseState::Resolved;
+ }
- // Resolve() fulfills the promise.
- void Resolve() const {
- PromiseBase::Resolve(state_->deferred.Env().Undefined());
- }
+ struct State {
+ Napi::Promise::Deferred deferred;
+ PromiseInfo info;
+ PromiseState state = PromiseState::Pending;
};
- ////////////////////////////////////////////////////////////////////////////////
- // Converter<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Converter<T> is specialized for each type T which can be converted from C++
- // to JavaScript, or JavaScript to C++.
- // Each specialization of Converter<T> is expected to have two static methods
- // with the signatures:
- //
- // // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
- // static Result FromJS(Napi::Env, Napi::Value in, T& out);
- //
- // // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
- // // this value.
- // static Napi::Value ToJS(Napi::Env, T in);
- template <typename T>
- class Converter {};
-
- template <>
- class Converter<Napi::Object> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
- if (value.IsObject()) {
- out = value.ToObject();
- return Success;
- }
- return Error("value is not an object");
- }
- static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
- return value;
- }
- };
+ PromiseBase(Napi::Env env, const PromiseInfo& info)
+ : state_(new State{Napi::Promise::Deferred::New(env), info}) {
+ state_->deferred.Promise().AddFinalizer(
+ [](Napi::Env, State* state) {
+ if (state->state == PromiseState::Pending) {
+ ::wgpu::utils::Fatal("Promise not resolved or rejected", state->info.file,
+ state->info.line, state->info.function);
+ }
+ delete state;
+ },
+ state_);
+ }
- template <>
- class Converter<ArrayBuffer> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
- if (value.IsArrayBuffer()) {
- out = value.As<ArrayBuffer>();
- return Success;
- }
- return Error("value is not a ArrayBuffer");
- }
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
+ State* const state_;
+};
+} // namespace detail
+
+// Promise<T> is a templated wrapper around a JavaScript promise, which can
+// resolve to the template type T.
+template <typename T>
+class Promise : public detail::PromiseBase {
+ public:
+ // Constructor
+ Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {}
+
+ // Resolve() fulfills the promise with the given value.
+ void Resolve(T&& value) const {
+ PromiseBase::Resolve(ToJS(state_->deferred.Env(), std::forward<T>(value)));
+ }
+};
+
+// Specialization for Promises that resolve with no value
+template <>
+class Promise<void> : public detail::PromiseBase {
+ public:
+ // Constructor
+ Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {}
+
+ // Resolve() fulfills the promise.
+ void Resolve() const { PromiseBase::Resolve(state_->deferred.Env().Undefined()); }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Converter<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Converter<T> is specialized for each type T which can be converted from C++
+// to JavaScript, or JavaScript to C++.
+// Each specialization of Converter<T> is expected to have two static methods
+// with the signatures:
+//
+// // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
+// static Result FromJS(Napi::Env, Napi::Value in, T& out);
+//
+// // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
+// // this value.
+// static Napi::Value ToJS(Napi::Env, T in);
+template <typename T>
+class Converter {};
+
+template <>
+class Converter<Napi::Object> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
+ if (value.IsObject()) {
+ out = value.ToObject();
+ return Success;
}
- };
-
- template <>
- class Converter<Napi::TypedArray> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
- if (value.IsTypedArray()) {
- out = value.As<Napi::TypedArray>();
- return Success;
- }
- return Error("value is not a TypedArray");
+ return Error("value is not an object");
+ }
+ static inline Napi::Value ToJS(Napi::Env, Napi::Object value) { return value; }
+};
+
+template <>
+class Converter<ArrayBuffer> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
+ if (value.IsArrayBuffer()) {
+ out = value.As<ArrayBuffer>();
+ return Success;
}
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
+ return Error("value is not a ArrayBuffer");
+ }
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
+
+template <>
+class Converter<Napi::TypedArray> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
+ if (value.IsTypedArray()) {
+ out = value.As<Napi::TypedArray>();
+ return Success;
}
- };
+ return Error("value is not a TypedArray");
+ }
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
- template <typename T>
- class Converter<Napi::TypedArrayOf<T>> {
- public:
- // clang-format off
+template <typename T>
+class Converter<Napi::TypedArrayOf<T>> {
+ public:
+ // clang-format off
// The Napi element type of T
static constexpr napi_typedarray_type element_type =
std::is_same<T, int8_t>::value ? napi_int8_array
@@ -378,432 +336,416 @@ namespace wgpu::interop {
: std::is_same<T, int64_t>::value ? napi_bigint64_array
: std::is_same<T, uint64_t>::value ? napi_biguint64_array
: static_cast<napi_typedarray_type>(-1);
- // clang-format on
- static_assert(static_cast<int>(element_type) >= 0,
- "unsupported T type for Napi::TypedArrayOf<T>");
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
- if (value.IsTypedArray()) {
- auto arr = value.As<Napi::TypedArrayOf<T>>();
- if (arr.TypedArrayType() == element_type) {
- out = arr;
- return Success;
- }
- return Error("value is not a TypedArray of the correct element type");
- }
- return Error("value is not a TypedArray");
- }
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
- }
- };
-
- template <>
- class Converter<std::string> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, std::string&);
- static Napi::Value ToJS(Napi::Env, std::string);
- };
-
- template <>
- class Converter<bool> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, bool&);
- static Napi::Value ToJS(Napi::Env, bool);
- };
-
- template <>
- class Converter<int8_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int8_t&);
- static Napi::Value ToJS(Napi::Env, int8_t);
- };
-
- template <>
- class Converter<uint8_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
- static Napi::Value ToJS(Napi::Env, uint8_t);
- };
-
- template <>
- class Converter<int16_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int16_t&);
- static Napi::Value ToJS(Napi::Env, int16_t);
- };
-
- template <>
- class Converter<uint16_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
- static Napi::Value ToJS(Napi::Env, uint16_t);
- };
-
- template <>
- class Converter<int32_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int32_t&);
- static Napi::Value ToJS(Napi::Env, int32_t);
- };
-
- template <>
- class Converter<uint32_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
- static Napi::Value ToJS(Napi::Env, uint32_t);
- };
-
- template <>
- class Converter<int64_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int64_t&);
- static Napi::Value ToJS(Napi::Env, int64_t);
- };
-
- template <>
- class Converter<uint64_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
- static Napi::Value ToJS(Napi::Env, uint64_t);
- };
-
- template <>
- class Converter<float> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, float&);
- static Napi::Value ToJS(Napi::Env, float);
- };
-
- template <>
- class Converter<double> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, double&);
- static Napi::Value ToJS(Napi::Env, double);
- };
-
- // [Clamp]ed integers must convert values outside of the integer range by clamping them.
- template <typename T>
- class Converter<ClampedInteger<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, ClampedInteger<T>& out) {
- double doubleValue;
- Result res = Converter<double>::FromJS(env, value, doubleValue);
- if (!res) {
- return res;
- }
-
- // Check for clamping first.
- constexpr T kMin = std::numeric_limits<T>::min();
- constexpr T kMax = std::numeric_limits<T>::max();
- if (doubleValue < kMin) {
- out = kMin;
- return Success;
- }
- if (doubleValue > kMax) {
- out = kMax;
+ // clang-format on
+ static_assert(static_cast<int>(element_type) >= 0,
+ "unsupported T type for Napi::TypedArrayOf<T>");
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
+ if (value.IsTypedArray()) {
+ auto arr = value.As<Napi::TypedArrayOf<T>>();
+ if (arr.TypedArrayType() == element_type) {
+ out = arr;
return Success;
}
-
- // Yay, no clamping! We can convert the integer type as usual.
- T correctValue;
- res = Converter<T>::FromJS(env, value, correctValue);
- if (!res) {
- return res;
- }
- out = correctValue;
- return Success;
- }
- static Napi::Value ToJS(Napi::Env env, const ClampedInteger<T>& value) {
- return Converter<T>::ToJS(env, value.value);
+ return Error("value is not a TypedArray of the correct element type");
}
- };
-
- // [EnforceRange] integers cause a TypeError when converted from out of range values
- template <typename T>
- class Converter<EnforceRangeInteger<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, EnforceRangeInteger<T>& out) {
- double doubleValue;
- Result res = Converter<double>::FromJS(env, value, doubleValue);
- if (!res) {
- return res;
- }
-
- // Check for out of range and throw a type error.
- constexpr double kMin = static_cast<double>(std::numeric_limits<T>::min());
- constexpr double kMax = static_cast<double>(std::numeric_limits<T>::max());
- if (!(kMin <= doubleValue && doubleValue <= kMax)) {
- return Error("Values are out of the range of that integer.");
- }
-
- // Yay, no error! We can convert the integer type as usual.
- T correctValue;
- res = Converter<T>::FromJS(env, value, correctValue);
- if (!res) {
- return res;
- }
- out = correctValue;
+ return Error("value is not a TypedArray");
+ }
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
+
+template <>
+class Converter<std::string> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, std::string&);
+ static Napi::Value ToJS(Napi::Env, std::string);
+};
+
+template <>
+class Converter<bool> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, bool&);
+ static Napi::Value ToJS(Napi::Env, bool);
+};
+
+template <>
+class Converter<int8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int8_t&);
+ static Napi::Value ToJS(Napi::Env, int8_t);
+};
+
+template <>
+class Converter<uint8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
+ static Napi::Value ToJS(Napi::Env, uint8_t);
+};
+
+template <>
+class Converter<int16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int16_t&);
+ static Napi::Value ToJS(Napi::Env, int16_t);
+};
+
+template <>
+class Converter<uint16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
+ static Napi::Value ToJS(Napi::Env, uint16_t);
+};
+
+template <>
+class Converter<int32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int32_t&);
+ static Napi::Value ToJS(Napi::Env, int32_t);
+};
+
+template <>
+class Converter<uint32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
+ static Napi::Value ToJS(Napi::Env, uint32_t);
+};
+
+template <>
+class Converter<int64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int64_t&);
+ static Napi::Value ToJS(Napi::Env, int64_t);
+};
+
+template <>
+class Converter<uint64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
+ static Napi::Value ToJS(Napi::Env, uint64_t);
+};
+
+template <>
+class Converter<float> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, float&);
+ static Napi::Value ToJS(Napi::Env, float);
+};
+
+template <>
+class Converter<double> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, double&);
+ static Napi::Value ToJS(Napi::Env, double);
+};
+
+// [Clamp]ed integers must convert values outside of the integer range by clamping them.
+template <typename T>
+class Converter<ClampedInteger<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, ClampedInteger<T>& out) {
+ double doubleValue;
+ Result res = Converter<double>::FromJS(env, value, doubleValue);
+ if (!res) {
+ return res;
+ }
+
+ // Check for clamping first.
+ constexpr T kMin = std::numeric_limits<T>::min();
+ constexpr T kMax = std::numeric_limits<T>::max();
+ if (doubleValue < kMin) {
+ out = kMin;
return Success;
}
- static Napi::Value ToJS(Napi::Env env, const EnforceRangeInteger<T>& value) {
- return Converter<T>::ToJS(env, value.value);
- }
- };
-
- template <>
- class Converter<UndefinedType> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, UndefinedType&);
- static Napi::Value ToJS(Napi::Env, UndefinedType);
- };
-
- template <typename T>
- class Converter<Interface<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
- if (!value.IsObject()) {
- return Error("value is not object");
- }
- auto obj = value.As<Napi::Object>();
- if (!T::Unwrap(obj)) {
- return Error("object is not of the correct interface type");
- }
- out = Interface<T>(obj);
+ if (doubleValue > kMax) {
+ out = kMax;
return Success;
}
- static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
- return {env, value};
- }
- };
- template <typename T>
- class Converter<std::optional<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
- if (value.IsNull() || value.IsUndefined()) {
- out.reset();
- return Success;
- }
- T v{};
- auto res = Converter<T>::FromJS(env, value, v);
- if (!res) {
- return res;
- }
- out = std::move(v);
- return Success;
+ // Yay, no clamping! We can convert the integer type as usual.
+ T correctValue;
+ res = Converter<T>::FromJS(env, value, correctValue);
+ if (!res) {
+ return res;
}
- static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
- if (value.has_value()) {
- return Converter<T>::ToJS(env, value.value());
- }
- return env.Null();
- }
- };
+ out = correctValue;
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, const ClampedInteger<T>& value) {
+ return Converter<T>::ToJS(env, value.value);
+ }
+};
+
+// [EnforceRange] integers cause a TypeError when converted from out of range values
+template <typename T>
+class Converter<EnforceRangeInteger<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, EnforceRangeInteger<T>& out) {
+ double doubleValue;
+ Result res = Converter<double>::FromJS(env, value, doubleValue);
+ if (!res) {
+ return res;
+ }
+
+ // Check for out of range and throw a type error.
+ constexpr double kMin = static_cast<double>(std::numeric_limits<T>::min());
+ constexpr double kMax = static_cast<double>(std::numeric_limits<T>::max());
+ if (!(kMin <= doubleValue && doubleValue <= kMax)) {
+ return Error("Values are out of the range of that integer.");
+ }
+
+ // Yay, no error! We can convert the integer type as usual.
+ T correctValue;
+ res = Converter<T>::FromJS(env, value, correctValue);
+ if (!res) {
+ return res;
+ }
+ out = correctValue;
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, const EnforceRangeInteger<T>& value) {
+ return Converter<T>::ToJS(env, value.value);
+ }
+};
+
+template <>
+class Converter<UndefinedType> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, UndefinedType&);
+ static Napi::Value ToJS(Napi::Env, UndefinedType);
+};
+
+template <typename T>
+class Converter<Interface<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not object");
+ }
+ auto obj = value.As<Napi::Object>();
+ if (!T::Unwrap(obj)) {
+ return Error("object is not of the correct interface type");
+ }
+ out = Interface<T>(obj);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) { return {env, value}; }
+};
- template <typename T>
- class Converter<std::vector<T>> {
- public:
- static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
- if (!value.IsArray()) {
- return Error("value is not an array");
- }
- auto arr = value.As<Napi::Array>();
- std::vector<T> vec(arr.Length());
- for (size_t i = 0; i < vec.size(); i++) {
- auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
- if (!res) {
- return res.Append("for array element ", i);
- }
- }
- out = std::move(vec);
+template <typename T>
+class Converter<std::optional<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ out.reset();
return Success;
}
- static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
- auto arr = Napi::Array::New(env, vec.size());
- for (size_t i = 0; i < vec.size(); i++) {
- arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
- }
- return arr;
+ T v{};
+ auto res = Converter<T>::FromJS(env, value, v);
+ if (!res) {
+ return res;
}
- };
-
- template <typename K, typename V>
- class Converter<std::unordered_map<K, V>> {
- public:
- static inline Result FromJS(Napi::Env env,
- Napi::Value value,
- std::unordered_map<K, V>& out) {
- if (!value.IsObject()) {
- return Error("value is not an object");
- }
- auto obj = value.ToObject();
- auto keys = obj.GetPropertyNames();
- std::unordered_map<K, V> map(keys.Length());
- for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
- K key{};
- V value{};
- auto key_res = Converter<K>::FromJS(env, keys[i], key);
- if (!key_res) {
- return key_res.Append("for object key");
- }
- auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
- if (!value_res) {
- return value_res.Append("for object value of key: ", key);
- }
- map[key] = value;
- }
- out = std::move(map);
- return Success;
- }
- static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
- auto obj = Napi::Object::New(env);
- for (auto it : value) {
- obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
- }
- return obj;
+ out = std::move(v);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
+ if (value.has_value()) {
+ return Converter<T>::ToJS(env, value.value());
}
- };
-
- template <typename... TYPES>
- class Converter<std::variant<TYPES...>> {
- template <typename TY>
- static inline Result TryFromJS(Napi::Env env,
- Napi::Value value,
- std::variant<TYPES...>& out) {
- TY v{};
- auto res = Converter<TY>::FromJS(env, value, v);
+ return env.Null();
+ }
+};
+
+template <typename T>
+class Converter<std::vector<T>> {
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
+ if (!value.IsArray()) {
+ return Error("value is not an array");
+ }
+ auto arr = value.As<Napi::Array>();
+ std::vector<T> vec(arr.Length());
+ for (size_t i = 0; i < vec.size(); i++) {
+ auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
if (!res) {
- return Error("no possible types matched");
+ return res.Append("for array element ", i);
}
- out = std::move(v);
- return Success;
}
-
- template <typename T0, typename T1, typename... TN>
- static inline Result TryFromJS(Napi::Env env,
- Napi::Value value,
- std::variant<TYPES...>& out) {
- if (TryFromJS<T0>(env, value, out)) {
- return Success;
- }
- return TryFromJS<T1, TN...>(env, value, out);
+ out = std::move(vec);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
+ auto arr = Napi::Array::New(env, vec.size());
+ for (size_t i = 0; i < vec.size(); i++) {
+ arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
}
+ return arr;
+ }
+};
- public:
- static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
- return TryFromJS<TYPES...>(env, value, out);
- }
- static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
- return std::visit(
- [&](auto&& v) {
- using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
- return Converter<T>::ToJS(env, v);
- },
- value);
+template <typename K, typename V>
+class Converter<std::unordered_map<K, V>> {
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::unordered_map<K, V>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not an object");
}
- };
-
- template <typename T>
- class Converter<Promise<T>> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
- UNIMPLEMENTED();
+ auto obj = value.ToObject();
+ auto keys = obj.GetPropertyNames();
+ std::unordered_map<K, V> map(keys.Length());
+ for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
+ K key{};
+ V value{};
+ auto key_res = Converter<K>::FromJS(env, keys[i], key);
+ if (!key_res) {
+ return key_res.Append("for object key");
+ }
+ auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
+ if (!value_res) {
+ return value_res.Append("for object value of key: ", key);
+ }
+ map[key] = value;
}
- static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
- return promise;
+ out = std::move(map);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
+ auto obj = Napi::Object::New(env);
+ for (auto it : value) {
+ obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
}
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Helpers
- ////////////////////////////////////////////////////////////////////////////////
-
- // FromJS() is a helper function which delegates to
- // Converter<T>::FromJS()
- template <typename T>
- inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
- return Converter<T>::FromJS(env, value, out);
+ return obj;
+ }
+};
+
+template <typename... TYPES>
+class Converter<std::variant<TYPES...>> {
+ template <typename TY>
+ static inline Result TryFromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+ TY v{};
+ auto res = Converter<TY>::FromJS(env, value, v);
+ if (!res) {
+ return Error("no possible types matched");
+ }
+ out = std::move(v);
+ return Success;
}
- // FromJSOptional() is similar to FromJS(), but if 'value' is either null
- // or undefined then 'out' is left unassigned.
- template <typename T>
- inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
- if (value.IsNull() || value.IsUndefined()) {
+ template <typename T0, typename T1, typename... TN>
+ static inline Result TryFromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+ if (TryFromJS<T0>(env, value, out)) {
return Success;
}
- return Converter<T>::FromJS(env, value, out);
+ return TryFromJS<T1, TN...>(env, value, out);
}
- // ToJS() is a helper function which delegates to Converter<T>::ToJS()
- template <typename T>
- inline Napi::Value ToJS(Napi::Env env, T&& value) {
- return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
- env, std::forward<T>(value));
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+ return TryFromJS<TYPES...>(env, value, out);
}
-
- // DefaultedParameter can be used in the tuple parameter types passed to
- // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
- // that have a default value. If the argument is omitted in the call, then
- // DefaultedParameter::default_value will be assigned to
- // DefaultedParameter::value.
- template <typename T>
- struct DefaultedParameter {
- T value; // The argument value assigned by FromJS()
- T default_value; // The default value if no argument supplied
-
- // Implicit conversion operator. Returns value.
- inline operator const T&() const {
- return value;
- }
- };
-
- // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
- template <typename T>
- struct IsDefaultedParameter {
- static constexpr bool value = false;
- };
- template <typename T>
- struct IsDefaultedParameter<DefaultedParameter<T>> {
- static constexpr bool value = true;
- };
-
- // FromJS() is a helper function for bulk converting the arguments of 'info'.
- // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
- // Parameters may be of the templated DefaultedParameter type, in which case
- // the parameter will default to the default-value if omitted.
- template <typename PARAM_TYPES, int BASE_INDEX = 0>
- inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
- if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
- using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
- auto& value = info[BASE_INDEX];
- auto& out = std::get<BASE_INDEX>(args);
- if constexpr (IsDefaultedParameter<T>::value) {
- // Parameter has a default value.
- // Check whether the argument was provided.
- if (value.IsNull() || value.IsUndefined()) {
- // Use default value for this parameter
- out.value = out.default_value;
- } else {
- // Argument was provided
- auto res = FromJS(info.Env(), value, out.value);
- if (!res) {
- return res;
- }
- }
+ static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
+ return std::visit(
+ [&](auto&& v) {
+ using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
+ return Converter<T>::ToJS(env, v);
+ },
+ value);
+ }
+};
+
+template <typename T>
+class Converter<Promise<T>> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) { UNIMPLEMENTED(); }
+ static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) { return promise; }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////////////
+
+// FromJS() is a helper function which delegates to
+// Converter<T>::FromJS()
+template <typename T>
+inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
+ return Converter<T>::FromJS(env, value, out);
+}
+
+// FromJSOptional() is similar to FromJS(), but if 'value' is either null
+// or undefined then 'out' is left unassigned.
+template <typename T>
+inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ return Success;
+ }
+ return Converter<T>::FromJS(env, value, out);
+}
+
+// ToJS() is a helper function which delegates to Converter<T>::ToJS()
+template <typename T>
+inline Napi::Value ToJS(Napi::Env env, T&& value) {
+ return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(env,
+ std::forward<T>(value));
+}
+
+// DefaultedParameter can be used in the tuple parameter types passed to
+// FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
+// that have a default value. If the argument is omitted in the call, then
+// DefaultedParameter::default_value will be assigned to
+// DefaultedParameter::value.
+template <typename T>
+struct DefaultedParameter {
+ T value; // The argument value assigned by FromJS()
+ T default_value; // The default value if no argument supplied
+
+ // Implicit conversion operator. Returns value.
+ inline operator const T&() const { return value; }
+};
+
+// IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
+template <typename T>
+struct IsDefaultedParameter {
+ static constexpr bool value = false;
+};
+template <typename T>
+struct IsDefaultedParameter<DefaultedParameter<T>> {
+ static constexpr bool value = true;
+};
+
+// FromJS() is a helper function for bulk converting the arguments of 'info'.
+// PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
+// Parameters may be of the templated DefaultedParameter type, in which case
+// the parameter will default to the default-value if omitted.
+template <typename PARAM_TYPES, int BASE_INDEX = 0>
+inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
+ if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
+ using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
+ auto& value = info[BASE_INDEX];
+ auto& out = std::get<BASE_INDEX>(args);
+ if constexpr (IsDefaultedParameter<T>::value) {
+ // Parameter has a default value.
+ // Check whether the argument was provided.
+ if (value.IsNull() || value.IsUndefined()) {
+ // Use default value for this parameter
+ out.value = out.default_value;
} else {
- // Parameter does not have a default value.
- auto res = FromJS(info.Env(), value, out);
+ // Argument was provided
+ auto res = FromJS(info.Env(), value, out.value);
if (!res) {
return res;
}
}
- // Convert the rest of the arguments
- return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
} else {
- return Success;
+ // Parameter does not have a default value.
+ auto res = FromJS(info.Env(), value, out);
+ if (!res) {
+ return res;
+ }
}
+ // Convert the rest of the arguments
+ return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
+ } else {
+ return Success;
}
+}
} // namespace wgpu::interop
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/go.mod b/chromium/third_party/dawn/src/dawn/node/tools/go.mod
deleted file mode 100644
index b5eb8dfb508..00000000000
--- a/chromium/third_party/dawn/src/dawn/node/tools/go.mod
+++ /dev/null
@@ -1,9 +0,0 @@
-module dawn.googlesource.com/dawn/src/dawn/node/tools
-
-go 1.16
-
-require (
- github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
- github.com/mattn/go-colorable v0.1.9
- github.com/mattn/go-isatty v0.0.14 // indirect
-)
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/go.sum b/chromium/third_party/dawn/src/dawn/node/tools/go.sum
deleted file mode 100644
index 42c01181c64..00000000000
--- a/chromium/third_party/dawn/src/dawn/node/tools/go.sum
+++ /dev/null
@@ -1,33 +0,0 @@
-github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094 h1:CTVJdI6oUCRNucMEmoh3c2U88DesoPtefsxKhoZ1WuQ=
-github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094/go.mod h1:bV550SPlMos7UhMprxlm14XTBTpKHSUZ8Q4Id5qQuyw=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go
index 9985203dff6..b681ad06e26 100644
--- a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go
+++ b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go
@@ -65,6 +65,10 @@ func run() error {
// Open up the output file
out := os.Stdout
if outputPath != "" {
+ dir := filepath.Dir(outputPath)
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return fmt.Errorf("failed to create output directory '%v'", dir)
+ }
file, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("failed to open output file '%v'", outputPath)
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go
index 28eb60dd1d4..afbe830045a 100644
--- a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go
+++ b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go
@@ -39,6 +39,7 @@ import (
"time"
"unicode/utf8"
+ "dawn.googlesource.com/dawn/tools/src/utils"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
@@ -233,6 +234,7 @@ func run() error {
return fmt.Sprintf(`require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/%v.ts');`, main)
},
stdout: stdout,
+ colors: colors,
}
if logFilename != "" {
@@ -332,7 +334,7 @@ type logger struct {
// newLogger creates a new logger instance.
func newLogger(writer io.Writer) logger {
- return logger{writer, 0, map[int]result{}}
+ return logger{colorable.NewNonColorable(writer), 0, map[int]result{}}
}
// logResult writes the test results to the log file in sequential order.
@@ -347,7 +349,7 @@ func (l *logger) logResults(res result) {
if !ok {
break
}
- fmt.Fprintf(l.writer, "%v [%v]\n", logRes.testcase, logRes.status)
+ fmt.Fprintf(l.writer, "%v [%v]\n%v", logRes.testcase, logRes.status, logRes.message)
l.idx++
}
}
@@ -389,6 +391,7 @@ type runner struct {
results testcaseStatuses
log logger
stdout io.WriteCloser
+ colors bool // Colors enabled?
}
// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
@@ -457,8 +460,8 @@ func (r *runner) gatherTestCases(query string, verbose bool) error {
"--", // Start of arguments
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
+ // start at 1, so just inject a placeholder argument.
+ "placeholder-arg",
"--list",
}, query)
@@ -603,11 +606,14 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
"--",
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
+ // start at 1, so just inject a placeholder argument.
+ "placeholder-arg",
// Actual arguments begin here
"--gpu-provider", r.dawnNode,
}
+ if r.colors {
+ args = append(args, "--colors")
+ }
for _, f := range r.flags {
args = append(args, "--gpu-provider-flag", f)
}
@@ -643,8 +649,6 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
case <-ctx.Done(): // cancelled
return ctx.Err()
}
-
- return nil
}
stopServer = func() {
if port > 0 {
@@ -806,7 +810,9 @@ func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
buf := &bytes.Buffer{}
fmt.Fprint(buf, statusColor[res.status])
if res.message != "" {
- fmt.Fprintf(buf, "%v - %v:\n%v", name, res.status, res.message)
+ fmt.Fprintf(buf, "%v - %v:\n", name, res.status)
+ fmt.Fprintf(buf, ansiReset)
+ fmt.Fprintf(buf, "%v", res.message)
} else {
fmt.Fprintf(buf, "%v - %v", name, res.status)
}
@@ -908,7 +914,7 @@ var statuses = []status{pass, warn, fail, skip, timeout}
var statusColor = map[status]string{
pass: green,
warn: yellow,
- skip: blue,
+ skip: cyan,
timeout: yellow,
fail: red,
}
@@ -940,11 +946,15 @@ func (r *runner) runTestcase(query string) result {
"--",
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
+ // start at 1, so just inject a placeholder argument.
+ "placeholder-arg",
// Actual arguments begin here
"--gpu-provider", r.dawnNode,
"--verbose",
+ "--quiet",
+ }
+ if r.colors {
+ args = append(args, "--colors")
}
for _, f := range r.flags {
args = append(args, "--gpu-provider-flag", f)
@@ -1157,24 +1167,22 @@ func saveExpectations(path string, ex testcaseStatuses) error {
// directory, falling back to PATH. This is used as the default for the --node
// command line flag.
func defaultNodePath() string {
- if dir := thisDir(); dir != "" {
- if dawnRoot := getDawnRoot(); dawnRoot != "" {
- node := filepath.Join(dawnRoot, "third_party/node")
- if info, err := os.Stat(node); err == nil && info.IsDir() {
- path := ""
- switch fmt.Sprintf("%v/%v", runtime.GOOS, runtime.GOARCH) { // See `go tool dist list`
- case "darwin/amd64":
- path = filepath.Join(node, "node-darwin-x64/bin/node")
- case "darwin/arm64":
- path = filepath.Join(node, "node-darwin-arm64/bin/node")
- case "linux/amd64":
- path = filepath.Join(node, "node-linux-x64/bin/node")
- case "windows/amd64":
- path = filepath.Join(node, "node.exe")
- }
- if _, err := os.Stat(path); err == nil {
- return path
- }
+ if dawnRoot := utils.DawnRoot(); dawnRoot != "" {
+ node := filepath.Join(dawnRoot, "third_party/node")
+ if info, err := os.Stat(node); err == nil && info.IsDir() {
+ path := ""
+ switch fmt.Sprintf("%v/%v", runtime.GOOS, runtime.GOARCH) { // See `go tool dist list`
+ case "darwin/amd64":
+ path = filepath.Join(node, "node-darwin-x64/bin/node")
+ case "darwin/arm64":
+ path = filepath.Join(node, "node-darwin-arm64/bin/node")
+ case "linux/amd64":
+ path = filepath.Join(node, "node-linux-x64/bin/node")
+ case "windows/amd64":
+ path = filepath.Join(node, "node.exe")
+ }
+ if _, err := os.Stat(path); err == nil {
+ return path
}
}
}
@@ -1189,52 +1197,15 @@ func defaultNodePath() string {
// defaultCtsPath looks for the webgpu-cts directory in dawn's third_party
// directory. This is used as the default for the --cts command line flag.
func defaultCtsPath() string {
- if dir := thisDir(); dir != "" {
- if dawnRoot := getDawnRoot(); dawnRoot != "" {
- cts := filepath.Join(dawnRoot, "third_party/webgpu-cts")
- if info, err := os.Stat(cts); err == nil && info.IsDir() {
- return cts
- }
- }
- }
-
- return ""
-}
-
-// getDawnRoot returns the path to the dawn project's root directory or empty
-// string if not found.
-func getDawnRoot() string {
- return getPathOfFileInParentDirs(thisDir(), "DEPS")
-}
-
-// getPathOfFileInParentDirs looks for file with `name` in paths starting from
-// `path`, and up into parent directories, returning the clean path in which the
-// file is found, or empty string if not found.
-func getPathOfFileInParentDirs(path string, name string) string {
- sep := string(filepath.Separator)
- path, _ = filepath.Abs(path)
- numDirs := strings.Count(path, sep) + 1
- for i := 0; i < numDirs; i++ {
- test := filepath.Join(path, name)
- if _, err := os.Stat(test); err == nil {
- return filepath.Clean(path)
+ if dawnRoot := utils.DawnRoot(); dawnRoot != "" {
+ cts := filepath.Join(dawnRoot, "third_party/webgpu-cts")
+ if info, err := os.Stat(cts); err == nil && info.IsDir() {
+ return cts
}
-
- path = path + sep + ".."
}
return ""
}
-// thisDir returns the path to the directory that holds the .go file of the
-// caller function
-func thisDir() string {
- _, file, _, ok := runtime.Caller(1)
- if !ok {
- return ""
- }
- return filepath.Dir(file)
-}
-
type muxWriter struct {
data chan []byte
err chan error
diff --git a/chromium/third_party/dawn/src/dawn/node/utils/Debug.h b/chromium/third_party/dawn/src/dawn/node/utils/Debug.h
index bd7f4e4b2f6..c68b858c694 100644
--- a/chromium/third_party/dawn/src/dawn/node/utils/Debug.h
+++ b/chromium/third_party/dawn/src/dawn/node/utils/Debug.h
@@ -16,109 +16,112 @@
#define SRC_DAWN_NODE_UTILS_DEBUG_H_
#include <iostream>
-#include <optional>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <optional> // NOLINT(build/include_order)
#include <sstream>
#include <unordered_map>
-#include <variant>
+#include <utility>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <variant> // NOLINT(build/include_order)
#include <vector>
#include "dawn/webgpu_cpp_print.h"
namespace wgpu::utils {
- // Write() is a helper for printing container types to the std::ostream.
- // Write() is used by the LOG() macro below.
-
- // Forward declarations
- inline std::ostream& Write(std::ostream& out) {
- return out;
+// Write() is a helper for printing container types to the std::ostream.
+// Write() is used by the LOG() macro below.
+
+// Forward declarations
+inline std::ostream& Write(std::ostream& out) {
+ return out;
+}
+template <typename T>
+inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
+template <typename T>
+inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
+template <typename K, typename V>
+inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
+template <typename... TYS>
+inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
+template <typename VALUE>
+std::ostream& Write(std::ostream& out, VALUE&& value);
+
+// Write() implementations
+template <typename T>
+std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
+ if (value.has_value()) {
+ return Write(out, value.value());
}
- template <typename T>
- inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
- template <typename T>
- inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
- template <typename K, typename V>
- inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
- template <typename... TYS>
- inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
- template <typename VALUE>
- std::ostream& Write(std::ostream& out, VALUE&& value);
-
- // Write() implementations
- template <typename T>
- std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
- if (value.has_value()) {
- return Write(out, value.value());
+ return out << "<undefined>";
+}
+
+template <typename T>
+std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
+ out << "[";
+ bool first = true;
+ for (const auto& el : value) {
+ if (!first) {
+ out << ", ";
}
- return out << "<undefined>";
+ first = false;
+ Write(out, el);
}
-
- template <typename T>
- std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
- out << "[";
- bool first = true;
- for (const auto& el : value) {
- if (!first) {
- out << ", ";
- }
- first = false;
- Write(out, el);
+ return out << "]";
+}
+
+template <typename K, typename V>
+std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
+ out << "{";
+ bool first = true;
+ for (auto& [key, value] : value) {
+ if (!first) {
+ out << ", ";
}
- return out << "]";
- }
-
- template <typename K, typename V>
- std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
- out << "{";
- bool first = true;
- for (auto& [key, value] : value) {
- if (!first) {
- out << ", ";
- }
- first = false;
- Write(out, key);
- out << ": ";
- Write(out, value);
- }
- return out << "}";
- }
-
- template <typename... TYS>
- std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
- std::visit([&](auto&& v) { Write(out, v); }, value);
- return out;
+ first = false;
+ Write(out, key);
+ out << ": ";
+ Write(out, value);
}
-
- template <typename VALUE>
- std::ostream& Write(std::ostream& out, VALUE&& value) {
- return out << std::forward<VALUE>(value);
- }
-
- template <typename FIRST, typename... REST>
- inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
- Write(out, std::forward<FIRST>(first));
- Write(out, std::forward<REST>(rest)...);
- return out;
- }
-
- // Fatal() prints a message to stdout with the given file, line, function and optional message,
- // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
- // UNIMPLEMENTED() macro below.
- template <typename... MSG_ARGS>
- [[noreturn]] inline void Fatal(const char* reason,
- const char* file,
- int line,
- const char* function,
- MSG_ARGS&&... msg_args) {
- std::stringstream msg;
- msg << file << ":" << line << ": " << reason << ": " << function << "()";
- if constexpr (sizeof...(msg_args) > 0) {
- msg << " ";
- Write(msg, std::forward<MSG_ARGS>(msg_args)...);
- }
- std::cout << msg.str() << std::endl;
- abort();
+ return out << "}";
+}
+
+template <typename... TYS>
+std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
+ std::visit([&](auto&& v) { Write(out, v); }, value);
+ return out;
+}
+
+template <typename VALUE>
+std::ostream& Write(std::ostream& out, VALUE&& value) {
+ return out << std::forward<VALUE>(value);
+}
+
+template <typename FIRST, typename... REST>
+inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
+ Write(out, std::forward<FIRST>(first));
+ Write(out, std::forward<REST>(rest)...);
+ return out;
+}
+
+// Fatal() prints a message to stdout with the given file, line, function and optional message,
+// then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
+// UNIMPLEMENTED() macro below.
+template <typename... MSG_ARGS>
+[[noreturn]] inline void Fatal(const char* reason,
+ const char* file,
+ int line,
+ const char* function,
+ MSG_ARGS&&... msg_args) {
+ std::stringstream msg;
+ msg << file << ":" << line << ": " << reason << ": " << function << "()";
+ if constexpr (sizeof...(msg_args) > 0) {
+ msg << " ";
+ Write(msg, std::forward<MSG_ARGS>(msg_args)...);
}
+ std::cout << msg.str() << std::endl;
+ abort();
+}
// LOG() prints the current file, line and function to stdout, followed by a
// string representation of all the variadic arguments.
diff --git a/chromium/third_party/dawn/src/dawn/platform/BUILD.gn b/chromium/third_party/dawn/src/dawn/platform/BUILD.gn
index 2d1cb00900a..c2c81444dc0 100644
--- a/chromium/third_party/dawn/src/dawn/platform/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/platform/BUILD.gn
@@ -35,7 +35,7 @@ dawn_component("platform") {
deps = [ "${dawn_root}/src/dawn/common" ]
public_deps = [
- # DawnPlatform.h has #include <dawn/webgpu.h>
+ # DawnPlatform.h has #include "dawn/webgpu.h"
"${dawn_root}/include/dawn:headers",
]
}
diff --git a/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt
index c1dfaf9c029..d9625a74115 100644
--- a/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-add_library(dawn_platform ${DAWN_DUMMY_FILE})
+add_library(dawn_platform ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_platform)
target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")
diff --git a/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp b/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp
index 2706316acd2..0d52a33f4c9 100644
--- a/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp
+++ b/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp
@@ -13,51 +13,53 @@
// limitations under the License.
#include "dawn/platform/DawnPlatform.h"
-#include "dawn/platform/WorkerThread.h"
+
+#include <memory>
#include "dawn/common/Assert.h"
+#include "dawn/platform/WorkerThread.h"
namespace dawn::platform {
- CachingInterface::CachingInterface() = default;
-
- CachingInterface::~CachingInterface() = default;
-
- Platform::Platform() = default;
-
- Platform::~Platform() = default;
-
- const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
- static unsigned char disabled = 0;
- return &disabled;
- }
-
- double Platform::MonotonicallyIncreasingTime() {
- return 0;
- }
-
- uint64_t Platform::AddTraceEvent(char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- double timestamp,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags) {
- // AddTraceEvent cannot be called if events are disabled.
- ASSERT(false);
- return 0;
- }
-
- dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
- size_t fingerprintSize) {
- return nullptr;
- }
-
- std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
- return std::make_unique<AsyncWorkerThreadPool>();
- }
+CachingInterface::CachingInterface() = default;
+
+CachingInterface::~CachingInterface() = default;
+
+Platform::Platform() = default;
+
+Platform::~Platform() = default;
+
+const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
+ static unsigned char disabled = 0;
+ return &disabled;
+}
+
+double Platform::MonotonicallyIncreasingTime() {
+ return 0;
+}
+
+uint64_t Platform::AddTraceEvent(char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ double timestamp,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags) {
+ // AddTraceEvent cannot be called if events are disabled.
+ ASSERT(false);
+ return 0;
+}
+
+dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
+ size_t fingerprintSize) {
+ return nullptr;
+}
+
+std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
+ return std::make_unique<AsyncWorkerThreadPool>();
+}
} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp
index ea1c26eb7b6..bf71df76249 100644
--- a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp
+++ b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp
@@ -22,75 +22,69 @@
namespace {
- class AsyncWaitableEventImpl {
- public:
- AsyncWaitableEventImpl() : mIsComplete(false) {
- }
+class AsyncWaitableEventImpl {
+ public:
+ AsyncWaitableEventImpl() : mIsComplete(false) {}
- void Wait() {
- std::unique_lock<std::mutex> lock(mMutex);
- mCondition.wait(lock, [this] { return mIsComplete; });
- }
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mCondition.wait(lock, [this] { return mIsComplete; });
+ }
+
+ bool IsComplete() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mIsComplete;
+ }
- bool IsComplete() {
+ void MarkAsComplete() {
+ {
std::lock_guard<std::mutex> lock(mMutex);
- return mIsComplete;
+ mIsComplete = true;
}
+ mCondition.notify_all();
+ }
- void MarkAsComplete() {
- {
- std::lock_guard<std::mutex> lock(mMutex);
- mIsComplete = true;
- }
- mCondition.notify_all();
- }
+ private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mIsComplete;
+};
- private:
- std::mutex mMutex;
- std::condition_variable mCondition;
- bool mIsComplete;
- };
+class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
+ public:
+ AsyncWaitableEvent() : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {}
- class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
- public:
- AsyncWaitableEvent() : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {
- }
+ void Wait() override { mWaitableEventImpl->Wait(); }
- void Wait() override {
- mWaitableEventImpl->Wait();
- }
-
- bool IsComplete() override {
- return mWaitableEventImpl->IsComplete();
- }
+ bool IsComplete() override { return mWaitableEventImpl->IsComplete(); }
- std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
- return mWaitableEventImpl;
- }
+ std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
+ return mWaitableEventImpl;
+ }
- private:
- std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
- };
+ private:
+ std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
+};
} // anonymous namespace
namespace dawn::platform {
- std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
- dawn::platform::PostWorkerTaskCallback callback,
- void* userdata) {
- std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
+std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
+ dawn::platform::PostWorkerTaskCallback callback,
+ void* userdata) {
+ std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
- std::function<void()> doTask =
- [callback, userdata, waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
- callback(userdata);
- waitableEventImpl->MarkAsComplete();
- };
+ std::function<void()> doTask = [callback, userdata,
+ waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
+ callback(userdata);
+ waitableEventImpl->MarkAsComplete();
+ };
- std::thread thread(doTask);
- thread.detach();
+ std::thread thread(doTask);
+ thread.detach();
- return waitableEvent;
- }
+ return waitableEvent;
+}
} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h
index 949fdff4969..39932d5671b 100644
--- a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h
+++ b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h
@@ -15,17 +15,19 @@
#ifndef SRC_DAWN_PLATFORM_WORKERTHREAD_H_
#define SRC_DAWN_PLATFORM_WORKERTHREAD_H_
+#include <memory>
+
#include "dawn/common/NonCopyable.h"
#include "dawn/platform/DawnPlatform.h"
namespace dawn::platform {
- class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
- public:
- std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
- dawn::platform::PostWorkerTaskCallback callback,
- void* userdata) override;
- };
+class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
+ public:
+ std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
+ dawn::platform::PostWorkerTaskCallback callback,
+ void* userdata) override;
+};
} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp
index 7445d98663a..03b266fa2e9 100644
--- a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp
@@ -18,41 +18,41 @@
namespace dawn::platform::tracing {
- const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
- static unsigned char disabled = 0;
- if (platform == nullptr) {
- return &disabled;
- }
-
- const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
- if (categoryEnabledFlag != nullptr) {
- return categoryEnabledFlag;
- }
-
+const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
+ static unsigned char disabled = 0;
+ if (platform == nullptr) {
return &disabled;
}
- TraceEventHandle AddTraceEvent(Platform* platform,
- char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags) {
- ASSERT(platform != nullptr);
-
- double timestamp = platform->MonotonicallyIncreasingTime();
- if (timestamp != 0) {
- TraceEventHandle handle =
- platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
- argNames, argTypes, argValues, flags);
- return handle;
- }
-
- return static_cast<TraceEventHandle>(0);
+ const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
+ if (categoryEnabledFlag != nullptr) {
+ return categoryEnabledFlag;
}
+ return &disabled;
+}
+
+TraceEventHandle AddTraceEvent(Platform* platform,
+ char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags) {
+ ASSERT(platform != nullptr);
+
+ double timestamp = platform->MonotonicallyIncreasingTime();
+ if (timestamp != 0) {
+ TraceEventHandle handle =
+ platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
+ argNames, argTypes, argValues, flags);
+ return handle;
+ }
+
+ return static_cast<TraceEventHandle>(0);
+}
+
} // namespace dawn::platform::tracing
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h
index 3c97f542b0c..d1f7fe87bf5 100644
--- a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h
@@ -15,37 +15,35 @@
#ifndef SRC_DAWN_PLATFORM_TRACING_EVENTTRACER_H_
#define SRC_DAWN_PLATFORM_TRACING_EVENTTRACER_H_
-#include "dawn/platform/dawn_platform_export.h"
-
#include <cstdint>
+#include "dawn/platform/dawn_platform_export.h"
+
namespace dawn::platform {
- class Platform;
- enum class TraceCategory;
+class Platform;
+enum class TraceCategory;
- namespace tracing {
+namespace tracing {
- using TraceEventHandle = uint64_t;
+using TraceEventHandle = uint64_t;
- DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(
- Platform* platform,
- TraceCategory category);
+DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform,
+ TraceCategory category);
- // TODO(enga): Simplify this API.
- DAWN_PLATFORM_EXPORT TraceEventHandle
- AddTraceEvent(Platform* platform,
- char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags);
+// TODO(enga): Simplify this API.
+DAWN_PLATFORM_EXPORT TraceEventHandle AddTraceEvent(Platform* platform,
+ char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags);
- } // namespace tracing
+} // namespace tracing
} // namespace dawn::platform
#endif // SRC_DAWN_PLATFORM_TRACING_EVENTTRACER_H_
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h b/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h
index 0baf98fb856..76cbc560377 100644
--- a/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h
@@ -758,86 +758,62 @@
namespace dawn::platform::TraceEvent {
- // Specify these values when the corresponding argument of addTraceEvent is not
- // used.
- const int zeroNumArgs = 0;
- const unsigned long long noEventId = 0;
-
- // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
- // are mangled with the Process ID so that they are unlikely to collide when the
- // same pointer is used on different processes.
- class TraceID {
- public:
- explicit TraceID(const void* id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(reinterpret_cast<uintptr_t>(id))) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- explicit TraceID(unsigned long long id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned long id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned int id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned short id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(long long id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(long id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(int id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(short id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(signed char id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
+// Specify these values when the corresponding argument of addTraceEvent is not
+// used.
+const int zeroNumArgs = 0;
+const uint64_t noEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are mangled with the Process ID so that they are unlikely to collide when the
+// same pointer is used on different processes.
+class TraceID {
+ public:
+ explicit TraceID(const void* id, unsigned char* flags)
+ : m_data(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ explicit TraceID(uint64_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+ explicit TraceID(uint32_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+ explicit TraceID(uint16_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+ explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) { (void)flags; }
+ explicit TraceID(int64_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(int32_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(int16_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(signed char id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
- unsigned long long data() const {
- return m_data;
- }
+ uint64_t data() const { return m_data; }
- private:
- unsigned long long m_data;
- };
+ private:
+ uint64_t m_data;
+};
- // Simple union to store various types as unsigned long long.
- union TraceValueUnion {
- bool m_bool;
- unsigned long long m_uint;
- long long m_int;
- double m_double;
- const void* m_pointer;
- const char* m_string;
- };
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool m_bool;
+ uint64_t m_uint;
+ int64_t m_int;
+ double m_double;
+ const void* m_pointer;
+ const char* m_string;
+};
- // Simple container for const char* that should be copied instead of retained.
- class TraceStringWithCopy {
- public:
- explicit TraceStringWithCopy(const char* str) : m_str(str) {
- }
- operator const char*() const {
- return m_str;
- }
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : m_str(str) {}
+ operator const char*() const { return m_str; }
- private:
- const char* m_str;
- };
+ private:
+ const char* m_str;
+};
// Define setTraceValue for each allowed type. It stores the type and
// value in the return arguments. This allows this API to avoid declaring any
@@ -853,138 +829,136 @@ namespace dawn::platform::TraceEvent {
#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
*type = value_type_id; \
- *value = static_cast<unsigned long long>(arg); \
+ *value = static_cast<uint64_t>(arg); \
}
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
- INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&,
- m_string,
- TRACE_VALUE_TYPE_COPY_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint32_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int32_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, m_string, TRACE_VALUE_TYPE_COPY_STRING)
#undef INTERNAL_DECLARE_SET_TRACE_VALUE
#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
- static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
- TraceValueUnion typeValue;
- typeValue.m_string = arg.data();
- *type = TRACE_VALUE_TYPE_COPY_STRING;
- *value = typeValue.m_uint;
- }
-
- // These addTraceEvent template functions are defined here instead of in the
- // macro, because the arg values could be temporary string objects. In order to
- // store pointers to the internal c_str and pass through to the tracing API, the
- // arg values must live throughout these procedures.
-
- static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
- dawn::platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/) {
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
- zeroNumArgs, 0, 0, 0, flags);
- }
-
- template <class ARG1_TYPE>
- static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
- dawn::platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/,
- const char* arg1Name,
- const ARG1_TYPE& arg1Val) {
- const int numArgs = 1;
- unsigned char argTypes[1];
- uint64_t argValues[1];
- setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
- &arg1Name, argTypes, argValues, flags);
+static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
+ TraceValueUnion typeValue;
+ typeValue.m_string = arg.data();
+ *type = TRACE_VALUE_TYPE_COPY_STRING;
+ *value = typeValue.m_uint;
+}
+
+// These addTraceEvent template functions are defined here instead of in the
+// macro, because the arg values could be temporary string objects. In order to
+// store pointers to the internal c_str and pass through to the tracing API, the
+// arg values must live throughout these procedures.
+
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, zeroNumArgs,
+ 0, 0, 0, flags);
+}
+
+template <class ARG1_TYPE>
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/,
+ const char* arg1Name,
+ const ARG1_TYPE& arg1Val) {
+ const int numArgs = 1;
+ unsigned char argTypes[1];
+ uint64_t argValues[1];
+ setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+ &arg1Name, argTypes, argValues, flags);
+}
+
+template <class ARG1_TYPE, class ARG2_TYPE>
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/,
+ const char* arg1Name,
+ const ARG1_TYPE& arg1Val,
+ const char* arg2Name,
+ const ARG2_TYPE& arg2Val) {
+ const int numArgs = 2;
+ const char* argNames[2] = {arg1Name, arg2Name};
+ unsigned char argTypes[2];
+ uint64_t argValues[2];
+ setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+ setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+ argNames, argTypes, argValues, flags);
+}
+
+// Used by TRACE_EVENTx macro. Do not use directly.
+class TraceEndOnScopeClose {
+ public:
+ // Note: members of m_data intentionally left uninitialized. See initialize.
+ TraceEndOnScopeClose() : m_pdata(0) {}
+ ~TraceEndOnScopeClose() {
+ if (m_pdata) {
+ addEventIfEnabled();
+ }
}
- template <class ARG1_TYPE, class ARG2_TYPE>
- static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
- dawn::platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/,
- const char* arg1Name,
- const ARG1_TYPE& arg1Val,
- const char* arg2Name,
- const ARG2_TYPE& arg2Val) {
- const int numArgs = 2;
- const char* argNames[2] = {arg1Name, arg2Name};
- unsigned char argTypes[2];
- uint64_t argValues[2];
- setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
- setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
- argNames, argTypes, argValues, flags);
+ void initialize(dawn::platform::Platform* platform,
+ const unsigned char* categoryEnabled,
+ const char* name) {
+ m_data.platform = platform;
+ m_data.categoryEnabled = categoryEnabled;
+ m_data.name = name;
+ m_pdata = &m_data;
}
- // Used by TRACE_EVENTx macro. Do not use directly.
- class TraceEndOnScopeClose {
- public:
- // Note: members of m_data intentionally left uninitialized. See initialize.
- TraceEndOnScopeClose() : m_pdata(0) {
- }
- ~TraceEndOnScopeClose() {
- if (m_pdata)
- addEventIfEnabled();
- }
-
- void initialize(dawn::platform::Platform* platform,
- const unsigned char* categoryEnabled,
- const char* name) {
- m_data.platform = platform;
- m_data.categoryEnabled = categoryEnabled;
- m_data.name = name;
- m_pdata = &m_data;
- }
-
- private:
- // Add the end event if the category is still enabled.
- void addEventIfEnabled() {
- // Only called when m_pdata is non-null.
- if (*m_pdata->categoryEnabled) {
- TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
- m_pdata->categoryEnabled, m_pdata->name, noEventId,
- zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
- }
+ private:
+ // Add the end event if the category is still enabled.
+ void addEventIfEnabled() {
+ // Only called when m_pdata is non-null.
+ if (*m_pdata->categoryEnabled) {
+ TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
+ m_pdata->categoryEnabled, m_pdata->name, noEventId,
+ zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
}
+ }
- // This Data struct workaround is to avoid initializing all the members
- // in Data during construction of this object, since this object is always
- // constructed, even when tracing is disabled. If the members of Data were
- // members of this class instead, compiler warnings occur about potential
- // uninitialized accesses.
- struct Data {
- dawn::platform::Platform* platform;
- const unsigned char* categoryEnabled;
- const char* name;
- };
- Data* m_pdata;
- Data m_data;
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ dawn::platform::Platform* platform;
+ const unsigned char* categoryEnabled;
+ const char* name;
};
+ Data* m_pdata;
+ Data m_data;
+};
} // namespace dawn::platform::TraceEvent
diff --git a/chromium/third_party/dawn/src/dawn/samples/Animometer.cpp b/chromium/third_party/dawn/src/dawn/samples/Animometer.cpp
index 273cbce4fee..2138bb9b625 100644
--- a/chromium/third_party/dawn/src/dawn/samples/Animometer.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/Animometer.cpp
@@ -12,6 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstdio>
+#include <cstdlib>
+#include <vector>
+
#include "dawn/samples/SampleUtils.h"
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
@@ -19,10 +23,6 @@
#include "dawn/utils/SystemUtils.h"
#include "dawn/utils/WGPUHelpers.h"
-#include <cstdio>
-#include <cstdlib>
-#include <vector>
-
wgpu::Device device;
wgpu::Queue queue;
wgpu::SwapChain swapchain;
@@ -31,7 +31,8 @@ wgpu::BindGroup bindGroup;
wgpu::Buffer ubo;
float RandomFloat(float min, float max) {
- float zeroOne = rand() / float(RAND_MAX);
+ // NOLINTNEXTLINE(runtime/threadsafe_fn)
+ float zeroOne = rand() / static_cast<float>(RAND_MAX);
return zeroOne * (max - min) + min;
}
@@ -71,7 +72,7 @@ void init() {
@builtin(position) Position : vec4<f32>;
};
- @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+ @vertex fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
var positions : array<vec4<f32>, 3> = array<vec4<f32>, 3>(
vec4<f32>( 0.0, 0.1, 0.0, 1.0),
vec4<f32>(-0.1, -0.1, 0.0, 1.0),
@@ -111,7 +112,7 @@ void init() {
})");
wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
- @stage(fragment) fn main(@location(0) v_color : vec4<f32>) -> @location(0) vec4<f32> {
+ @fragment fn main(@location(0) v_color : vec4<f32>) -> @location(0) vec4<f32> {
return v_color;
})");
diff --git a/chromium/third_party/dawn/src/dawn/samples/BUILD.gn b/chromium/third_party/dawn/src/dawn/samples/BUILD.gn
index 26067fed2cc..58874cf623e 100644
--- a/chromium/third_party/dawn/src/dawn/samples/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/samples/BUILD.gn
@@ -34,6 +34,7 @@ static_library("utils") {
# Export all of these as public deps so that `gn check` allows includes
public_deps = [
"${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc_shared",
"${dawn_root}/src/dawn/common",
"${dawn_root}/src/dawn/native",
"${dawn_root}/src/dawn/utils",
diff --git a/chromium/third_party/dawn/src/dawn/samples/CHelloTriangle.cpp b/chromium/third_party/dawn/src/dawn/samples/CHelloTriangle.cpp
index ed1507e4bb1..a912b00f1d8 100644
--- a/chromium/third_party/dawn/src/dawn/samples/CHelloTriangle.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/CHelloTriangle.cpp
@@ -32,7 +32,7 @@ void init() {
swapChainFormat = static_cast<WGPUTextureFormat>(GetPreferredSwapChainTextureFormat());
const char* vs = R"(
- @stage(vertex) fn main(
+ @vertex fn main(
@builtin(vertex_index) VertexIndex : u32
) -> @builtin(position) vec4<f32> {
var pos = array<vec2<f32>, 3>(
@@ -45,7 +45,7 @@ void init() {
WGPUShaderModule vsModule = utils::CreateShaderModule(device, vs).Release();
const char* fs = R"(
- @stage(fragment) fn main() -> @location(0) vec4<f32> {
+ @fragment fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
})";
WGPUShaderModule fsModule = utils::CreateShaderModule(device, fs).Release();
diff --git a/chromium/third_party/dawn/src/dawn/samples/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/samples/CMakeLists.txt
index 07d596edba5..1a8e007dd9a 100644
--- a/chromium/third_party/dawn/src/dawn/samples/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/samples/CMakeLists.txt
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-add_library(dawn_sample_utils STATIC ${DAWN_DUMMY_FILE})
+add_library(dawn_sample_utils STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_sample_utils)
target_sources(dawn_sample_utils PRIVATE
"SampleUtils.cpp"
diff --git a/chromium/third_party/dawn/src/dawn/samples/ComputeBoids.cpp b/chromium/third_party/dawn/src/dawn/samples/ComputeBoids.cpp
index d91b4a4bd89..8237db41e38 100644
--- a/chromium/third_party/dawn/src/dawn/samples/ComputeBoids.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/ComputeBoids.cpp
@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <array>
+#include <cstring>
+#include <random>
+#include <vector>
+
#include "dawn/samples/SampleUtils.h"
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
@@ -19,10 +24,6 @@
#include "dawn/utils/SystemUtils.h"
#include "dawn/utils/WGPUHelpers.h"
-#include <array>
-#include <cstring>
-#include <random>
-
wgpu::Device device;
wgpu::Queue queue;
wgpu::SwapChain swapchain;
@@ -101,7 +102,7 @@ void initRender() {
@location(2) a_pos : vec2<f32>;
};
- @stage(vertex)
+ @vertex
fn main(input : VertexIn) -> @builtin(position) vec4<f32> {
var angle : f32 = -atan2(input.a_particleVel.x, input.a_particleVel.y);
var pos : vec2<f32> = vec2<f32>(
@@ -112,7 +113,7 @@ void initRender() {
)");
wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
- @stage(fragment)
+ @fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}
@@ -169,7 +170,7 @@ void initSim() {
@binding(2) @group(0) var<storage, read_write> particlesB : Particles;
// https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
- @stage(compute) @workgroup_size(1)
+ @compute @workgroup_size(1)
fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
var index : u32 = GlobalInvocationID.x;
if (index >= params.particleCount) {
@@ -275,7 +276,7 @@ wgpu::CommandBuffer createCommandBuffer(const wgpu::TextureView backbufferView,
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
pass.SetPipeline(updatePipeline);
pass.SetBindGroup(0, updateBGs[i]);
- pass.Dispatch(kNumParticles);
+ pass.DispatchWorkgroups(kNumParticles);
pass.End();
}
diff --git a/chromium/third_party/dawn/src/dawn/samples/CppHelloTriangle.cpp b/chromium/third_party/dawn/src/dawn/samples/CppHelloTriangle.cpp
index 3c3ba51ace3..5202f0d473c 100644
--- a/chromium/third_party/dawn/src/dawn/samples/CppHelloTriangle.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/CppHelloTriangle.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <vector>
+
#include "dawn/samples/SampleUtils.h"
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
@@ -19,8 +21,6 @@
#include "dawn/utils/SystemUtils.h"
#include "dawn/utils/WGPUHelpers.h"
-#include <vector>
-
wgpu::Device device;
wgpu::Buffer indexBuffer;
@@ -95,7 +95,7 @@ void init() {
initTextures();
wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
- @stage(vertex) fn main(@location(0) pos : vec4<f32>)
+ @vertex fn main(@location(0) pos : vec4<f32>)
-> @builtin(position) vec4<f32> {
return pos;
})");
@@ -104,7 +104,7 @@ void init() {
@group(0) @binding(0) var mySampler: sampler;
@group(0) @binding(1) var myTexture : texture_2d<f32>;
- @stage(fragment) fn main(@builtin(position) FragCoord : vec4<f32>)
+ @fragment fn main(@builtin(position) FragCoord : vec4<f32>)
-> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, FragCoord.xy / vec2<f32>(640.0, 480.0));
})");
diff --git a/chromium/third_party/dawn/src/dawn/samples/ManualSwapChainTest.cpp b/chromium/third_party/dawn/src/dawn/samples/ManualSwapChainTest.cpp
index 9c6e757409b..c6e61cc1cec 100644
--- a/chromium/third_party/dawn/src/dawn/samples/ManualSwapChainTest.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/ManualSwapChainTest.cpp
@@ -51,20 +51,22 @@
// - TODO can't be tested yet: check cycling the same window over multiple devices.
// - TODO can't be tested yet: check cycling the same window over multiple formats.
+#include <algorithm>
+#include <memory>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "GLFW/glfw3.h"
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
#include "dawn/utils/GLFWUtils.h"
#include "dawn/utils/ScopedAutoreleasePool.h"
#include "dawn/utils/WGPUHelpers.h"
-
-#include <dawn/dawn_proc.h>
-#include <dawn/native/DawnNative.h>
-#include <dawn/webgpu_cpp.h>
-#include "GLFW/glfw3.h"
-
-#include <memory>
-#include <unordered_map>
+#include "dawn/webgpu_cpp.h"
struct WindowData {
GLFWwindow* window = nullptr;
@@ -265,7 +267,7 @@ int main(int argc, const char* argv[]) {
}
// Choose an adapter we like.
- // TODO: allow switching the window between devices.
+ // TODO(dawn:269): allow switching the window between devices.
DawnProcTable procs = dawn::native::GetProcs();
dawnProcSetProcs(&procs);
@@ -314,7 +316,7 @@ int main(int argc, const char* argv[]) {
// The hacky pipeline to render a triangle.
utils::ComboRenderPipelineDescriptor pipelineDesc;
pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
- @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32)
+ @vertex fn main(@builtin(vertex_index) VertexIndex : u32)
-> @builtin(position) vec4<f32> {
var pos = array<vec2<f32>, 3>(
vec2<f32>( 0.0, 0.5),
@@ -324,7 +326,7 @@ int main(int argc, const char* argv[]) {
return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
})");
pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
- @stage(fragment) fn main() -> @location(0) vec4<f32> {
+ @fragment fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
})");
// BGRA shouldn't be hardcoded. Consider having a map[format -> pipeline].
diff --git a/chromium/third_party/dawn/src/dawn/samples/SampleUtils.cpp b/chromium/third_party/dawn/src/dawn/samples/SampleUtils.cpp
index bce64bc308f..ac102a67d66 100644
--- a/chromium/third_party/dawn/src/dawn/samples/SampleUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/samples/SampleUtils.cpp
@@ -14,6 +14,12 @@
#include "dawn/samples/SampleUtils.h"
+#include <algorithm>
+#include <cstring>
+#include <memory>
+#include <string>
+#include <vector>
+
#include "GLFW/glfw3.h"
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
@@ -27,9 +33,6 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
-#include <algorithm>
-#include <cstring>
-
void PrintDeviceError(WGPUErrorType errorType, const char* message, void*) {
const char* errorTypeName = "";
switch (errorType) {
@@ -75,7 +78,7 @@ static wgpu::BackendType backendType = wgpu::BackendType::OpenGLES;
#elif defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
static wgpu::BackendType backendType = wgpu::BackendType::OpenGL;
#else
-# error
+#error
#endif
static CmdBufType cmdBufType = CmdBufType::Terrible;
diff --git a/chromium/third_party/dawn/src/dawn/samples/SampleUtils.h b/chromium/third_party/dawn/src/dawn/samples/SampleUtils.h
index d7f6d172e91..59528bd0932 100644
--- a/chromium/third_party/dawn/src/dawn/samples/SampleUtils.h
+++ b/chromium/third_party/dawn/src/dawn/samples/SampleUtils.h
@@ -15,8 +15,8 @@
#ifndef SRC_DAWN_SAMPLES_SAMPLEUTILS_H_
#define SRC_DAWN_SAMPLES_SAMPLEUTILS_H_
-#include <dawn/dawn_wsi.h>
-#include <dawn/webgpu_cpp.h>
+#include "dawn/dawn_wsi.h"
+#include "dawn/webgpu_cpp.h"
bool InitSample(int argc, const char** argv);
void DoFlush();
diff --git a/chromium/third_party/dawn/src/dawn/tests/BUILD.gn b/chromium/third_party/dawn/src/dawn/tests/BUILD.gn
index 0fee0dd776a..69c41a2e1a7 100644
--- a/chromium/third_party/dawn/src/dawn/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/tests/BUILD.gn
@@ -157,19 +157,32 @@ source_set("native_mocks_sources") {
configs += [ "${dawn_root}/src/dawn/native:internal" ]
sources = [
+ "unittests/native/mocks/BindGroupLayoutMock.cpp",
"unittests/native/mocks/BindGroupLayoutMock.h",
+ "unittests/native/mocks/BindGroupMock.cpp",
"unittests/native/mocks/BindGroupMock.h",
+ "unittests/native/mocks/BufferMock.cpp",
+ "unittests/native/mocks/BufferMock.h",
+ "unittests/native/mocks/CommandBufferMock.cpp",
"unittests/native/mocks/CommandBufferMock.h",
+ "unittests/native/mocks/ComputePipelineMock.cpp",
"unittests/native/mocks/ComputePipelineMock.h",
"unittests/native/mocks/DeviceMock.h",
+ "unittests/native/mocks/ExternalTextureMock.cpp",
"unittests/native/mocks/ExternalTextureMock.h",
+ "unittests/native/mocks/PipelineLayoutMock.cpp",
"unittests/native/mocks/PipelineLayoutMock.h",
+ "unittests/native/mocks/QuerySetMock.cpp",
"unittests/native/mocks/QuerySetMock.h",
+ "unittests/native/mocks/RenderPipelineMock.cpp",
"unittests/native/mocks/RenderPipelineMock.h",
+ "unittests/native/mocks/SamplerMock.cpp",
"unittests/native/mocks/SamplerMock.h",
"unittests/native/mocks/ShaderModuleMock.cpp",
"unittests/native/mocks/ShaderModuleMock.h",
+ "unittests/native/mocks/SwapChainMock.cpp",
"unittests/native/mocks/SwapChainMock.h",
+ "unittests/native/mocks/TextureMock.cpp",
"unittests/native/mocks/TextureMock.h",
]
}
@@ -239,6 +252,7 @@ dawn_test("dawn_unittests") {
"unittests/ToBackendTests.cpp",
"unittests/TypedIntegerTests.cpp",
"unittests/VersionTests.cpp",
+ "unittests/native/BlobTests.cpp",
"unittests/native/CacheKeyTests.cpp",
"unittests/native/CommandBufferEncodingTests.cpp",
"unittests/native/CreatePipelineAsyncTaskTests.cpp",
@@ -294,7 +308,6 @@ dawn_test("dawn_unittests") {
"unittests/wire/WireBasicTests.cpp",
"unittests/wire/WireBufferMappingTests.cpp",
"unittests/wire/WireCreatePipelineAsyncTests.cpp",
- "unittests/wire/WireDestroyObjectTests.cpp",
"unittests/wire/WireDisconnectTests.cpp",
"unittests/wire/WireErrorCallbackTests.cpp",
"unittests/wire/WireExtensionTests.cpp",
@@ -309,7 +322,6 @@ dawn_test("dawn_unittests") {
"unittests/wire/WireShaderModuleTests.cpp",
"unittests/wire/WireTest.cpp",
"unittests/wire/WireTest.h",
- "unittests/wire/WireWGPUDevicePropertiesTests.cpp",
]
if (is_win) {
@@ -330,31 +342,76 @@ dawn_test("dawn_unittests") {
}
###############################################################################
-# Dawn end2end tests targets
+# Dawn test infrastructure targets
###############################################################################
-source_set("end2end_tests_sources") {
- configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+source_set("test_infra_sources") {
+ configs += [ "${dawn_root}/src/dawn/native:internal" ]
testonly = true
deps = [
- ":gmock_and_gtest",
"${dawn_root}/src/dawn:cpp",
"${dawn_root}/src/dawn:proc",
"${dawn_root}/src/dawn/common",
-
- # Statically linked because the end2end white_box tests use Dawn internals.
+ "${dawn_root}/src/dawn/native:sources",
"${dawn_root}/src/dawn/native:static",
"${dawn_root}/src/dawn/utils",
"${dawn_root}/src/dawn/wire",
]
+ public_deps = [ ":gmock_and_gtest" ]
+
+ if (dawn_supports_glfw_for_windowing || dawn_enable_opengl) {
+ assert(dawn_supports_glfw_for_windowing)
+ public_deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+ }
+
sources = [
+ "DawnTest.cpp",
"DawnTest.h",
"MockCallback.h",
"ParamGenerator.h",
"ToggleParser.cpp",
"ToggleParser.h",
+ ]
+}
+
+###############################################################################
+# Dawn end2end tests targets
+###############################################################################
+
+# Source code for mocks used for end2end testing are separated from the rest of
+# sources so that they aren't included in non-test builds.
+source_set("end2end_mocks_sources") {
+ configs += [ "${dawn_root}/src/dawn/native:internal" ]
+ testonly = true
+
+ deps = [
+ ":gmock_and_gtest",
+ "${dawn_root}/src/dawn/platform",
+ ]
+
+ sources = [
+ "end2end/mocks/CachingInterfaceMock.cpp",
+ "end2end/mocks/CachingInterfaceMock.h",
+ ]
+}
+
+source_set("end2end_tests_sources") {
+ testonly = true
+
+ deps = [
+ ":end2end_mocks_sources",
+ ":test_infra_sources",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:headers",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ sources = [
"end2end/AdapterDiscoveryTests.cpp",
"end2end/BasicTests.cpp",
"end2end/BindGroupTests.cpp",
@@ -382,6 +439,7 @@ source_set("end2end_tests_sources") {
"end2end/DepthStencilStateTests.cpp",
"end2end/DestroyTests.cpp",
"end2end/DeviceInitializationTests.cpp",
+ "end2end/DeviceLifetimeTests.cpp",
"end2end/DeviceLostTests.cpp",
"end2end/DrawIndexedIndirectTests.cpp",
"end2end/DrawIndexedTests.cpp",
@@ -389,6 +447,7 @@ source_set("end2end_tests_sources") {
"end2end/DrawTests.cpp",
"end2end/DynamicBufferOffsetTests.cpp",
"end2end/EntryPointTests.cpp",
+ "end2end/ExperimentalDP4aTests.cpp",
"end2end/ExternalTextureTests.cpp",
"end2end/FirstIndexOffsetTests.cpp",
"end2end/GpuMemorySynchronizationTests.cpp",
@@ -401,6 +460,7 @@ source_set("end2end_tests_sources") {
"end2end/NonzeroTextureCreationTests.cpp",
"end2end/ObjectCachingTests.cpp",
"end2end/OpArrayLengthTests.cpp",
+ "end2end/PipelineCachingTests.cpp",
"end2end/PipelineLayoutTests.cpp",
"end2end/PrimitiveStateTests.cpp",
"end2end/PrimitiveTopologyTests.cpp",
@@ -432,12 +492,6 @@ source_set("end2end_tests_sources") {
"end2end/ViewportTests.cpp",
]
- # Validation tests that need OS windows live in end2end tests.
- sources += [
- "unittests/validation/ValidationTest.cpp",
- "unittests/validation/ValidationTest.h",
- ]
-
libs = []
if (dawn_enable_d3d12) {
@@ -460,17 +514,12 @@ source_set("end2end_tests_sources") {
frameworks = [ "IOSurface.framework" ]
}
- if (dawn_enable_opengl) {
- assert(dawn_supports_glfw_for_windowing)
- }
-
if (dawn_supports_glfw_for_windowing) {
sources += [
"end2end/SwapChainTests.cpp",
"end2end/SwapChainValidationTests.cpp",
"end2end/WindowSurfaceTests.cpp",
]
- deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
}
if (dawn_enable_d3d12 || (dawn_enable_vulkan && is_chromeos) ||
@@ -491,23 +540,19 @@ source_set("white_box_tests_sources") {
testonly = true
deps = [
- ":gmock_and_gtest",
+ ":test_infra_sources",
"${dawn_root}/src/dawn:cpp",
"${dawn_root}/src/dawn:proc",
"${dawn_root}/src/dawn/common",
- "${dawn_root}/src/dawn/native:sources",
- # Statically linked because the end2end white_box tests use Dawn internals.
+ # Statically linked and with sources because the end2end white_box tests use Dawn internals.
+ "${dawn_root}/src/dawn/native:sources",
"${dawn_root}/src/dawn/native:static",
"${dawn_root}/src/dawn/utils",
"${dawn_root}/src/dawn/wire",
]
- sources = [
- "DawnTest.h",
- "ParamGenerator.h",
- "ToggleParser.h",
- ]
+ sources = []
if (dawn_enable_vulkan) {
deps += [ "${dawn_vulkan_headers_dir}:vulkan_headers" ]
@@ -551,13 +596,9 @@ source_set("white_box_tests_sources") {
sources += [ "white_box/MetalAutoreleasePoolTests.mm" ]
}
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
- }
-
if (dawn_enable_opengles && defined(dawn_angle_dir)) {
sources += [ "white_box/EGLImageWrappingTests.cpp" ]
- deps += [ "${dawn_angle_dir}:libEGL" ]
+ include_dirs = [ "${dawn_angle_dir}/include" ]
}
libs = []
@@ -566,21 +607,11 @@ source_set("white_box_tests_sources") {
dawn_test("dawn_end2end_tests") {
deps = [
":end2end_tests_sources",
- ":gmock_and_gtest",
+ ":test_infra_sources",
":white_box_tests_sources",
- "${dawn_root}/src/dawn:cpp",
- "${dawn_root}/src/dawn:proc",
- "${dawn_root}/src/dawn/common",
- "${dawn_root}/src/dawn/native:static",
- "${dawn_root}/src/dawn/utils",
- "${dawn_root}/src/dawn/wire",
- ]
-
- sources = [
- "DawnTest.cpp",
- "DawnTest.h",
]
+ sources = []
libs = []
# When building inside Chromium, use their gtest main function because it is
@@ -591,10 +622,6 @@ dawn_test("dawn_end2end_tests") {
sources += [ "End2EndTestsMain.cpp" ]
}
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
- }
-
if (is_chromeos) {
libs += [ "gbm" ]
}
@@ -606,22 +633,16 @@ dawn_test("dawn_end2end_tests") {
dawn_test("dawn_perf_tests") {
deps = [
- ":gmock_and_gtest",
+ ":test_infra_sources",
"${dawn_root}/src/dawn:cpp",
"${dawn_root}/src/dawn:proc",
"${dawn_root}/src/dawn/common",
- "${dawn_root}/src/dawn/native",
"${dawn_root}/src/dawn/platform",
"${dawn_root}/src/dawn/utils",
"${dawn_root}/src/dawn/wire",
]
sources = [
- "DawnTest.cpp",
- "DawnTest.h",
- "ParamGenerator.h",
- "ToggleParser.cpp",
- "ToggleParser.h",
"perf_tests/BufferUploadPerf.cpp",
"perf_tests/DawnPerfTest.cpp",
"perf_tests/DawnPerfTest.h",
@@ -646,8 +667,4 @@ dawn_test("dawn_perf_tests") {
if (dawn_enable_metal) {
frameworks = [ "IOSurface.framework" ]
}
-
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
- }
}
diff --git a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp
index f97e6b94275..17bea5f9ff9 100644
--- a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp
@@ -19,91 +19,88 @@
#include "GLFW/glfw3.h"
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
-# include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/OpenGLBackend.h"
#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
namespace utils {
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL)
- BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_NULL)
- BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
#endif
- BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
- : mWindow(window), mDevice(device) {
- }
+BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
+ : mWindow(window), mDevice(device) {}
- void DiscoverAdapter(dawn::native::Instance* instance,
- GLFWwindow* window,
- wgpu::BackendType type) {
- DAWN_UNUSED(type);
- DAWN_UNUSED(window);
+void DiscoverAdapter(dawn::native::Instance* instance, GLFWwindow* window, wgpu::BackendType type) {
+ DAWN_UNUSED(type);
+ DAWN_UNUSED(window);
- if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
+ if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- glfwMakeContextCurrent(window);
- auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
- if (type == wgpu::BackendType::OpenGL) {
- dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
- adapterOptions.getProc = getProc;
- instance->DiscoverAdapters(&adapterOptions);
- } else {
- dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
- adapterOptions.getProc = getProc;
- instance->DiscoverAdapters(&adapterOptions);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+ glfwMakeContextCurrent(window);
+ auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+ if (type == wgpu::BackendType::OpenGL) {
+ dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
+ adapterOptions.getProc = getProc;
+ instance->DiscoverAdapters(&adapterOptions);
} else {
- instance->DiscoverDefaultAdapters();
+ dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
+ adapterOptions.getProc = getProc;
+ instance->DiscoverAdapters(&adapterOptions);
}
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+ } else {
+ instance->DiscoverDefaultAdapters();
}
+}
- BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
- switch (type) {
+BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
+ switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- case wgpu::BackendType::D3D12:
- return CreateD3D12Binding(window, device);
+ case wgpu::BackendType::D3D12:
+ return CreateD3D12Binding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL)
- case wgpu::BackendType::Metal:
- return CreateMetalBinding(window, device);
+ case wgpu::BackendType::Metal:
+ return CreateMetalBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_NULL)
- case wgpu::BackendType::Null:
- return CreateNullBinding(window, device);
+ case wgpu::BackendType::Null:
+ return CreateNullBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- case wgpu::BackendType::OpenGL:
- return CreateOpenGLBinding(window, device);
+ case wgpu::BackendType::OpenGL:
+ return CreateOpenGLBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- case wgpu::BackendType::OpenGLES:
- return CreateOpenGLBinding(window, device);
+ case wgpu::BackendType::OpenGLES:
+ return CreateOpenGLBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- case wgpu::BackendType::Vulkan:
- return CreateVulkanBinding(window, device);
+ case wgpu::BackendType::Vulkan:
+ return CreateVulkanBinding(window, device);
#endif
- default:
- return nullptr;
- }
+ default:
+ return nullptr;
}
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h
index 7c988e7dd50..7871934ab06 100644
--- a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h
+++ b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h
@@ -22,24 +22,22 @@ struct GLFWwindow;
namespace utils {
- class BackendBinding {
- public:
- virtual ~BackendBinding() = default;
+class BackendBinding {
+ public:
+ virtual ~BackendBinding() = default;
- virtual uint64_t GetSwapChainImplementation() = 0;
- virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
+ virtual uint64_t GetSwapChainImplementation() = 0;
+ virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
- protected:
- BackendBinding(GLFWwindow* window, WGPUDevice device);
+ protected:
+ BackendBinding(GLFWwindow* window, WGPUDevice device);
- GLFWwindow* mWindow = nullptr;
- WGPUDevice mDevice = nullptr;
- };
+ GLFWwindow* mWindow = nullptr;
+ WGPUDevice mDevice = nullptr;
+};
- void DiscoverAdapter(dawn::native::Instance* instance,
- GLFWwindow* window,
- wgpu::BackendType type);
- BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
+void DiscoverAdapter(dawn::native::Instance* instance, GLFWwindow* window, wgpu::BackendType type);
+BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt
index 60124835166..a05bcba97fb 100644
--- a/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-add_library(dawn_utils STATIC ${DAWN_DUMMY_FILE})
+add_library(dawn_utils STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_utils)
target_sources(dawn_utils PRIVATE
"ComboRenderBundleEncoderDescriptor.cpp"
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
index 9c413d218ec..4b21e3257b8 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
@@ -18,11 +18,11 @@
namespace utils {
- ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
- wgpu::RenderBundleEncoderDescriptor* descriptor = this;
+ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
+ wgpu::RenderBundleEncoderDescriptor* descriptor = this;
- descriptor->colorFormatsCount = 0;
- descriptor->colorFormats = &cColorFormats[0];
- }
+ descriptor->colorFormatsCount = 0;
+ descriptor->colorFormats = &cColorFormats[0];
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
index bd0abe37d1e..3f4db0a6da2 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
@@ -15,20 +15,19 @@
#ifndef SRC_DAWN_UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
#define SRC_DAWN_UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
-#include <dawn/webgpu_cpp.h>
+#include <array>
#include "dawn/common/Constants.h"
-
-#include <array>
+#include "dawn/webgpu_cpp.h"
namespace utils {
- class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
- public:
- ComboRenderBundleEncoderDescriptor();
+class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
+ public:
+ ComboRenderBundleEncoderDescriptor();
- std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
- };
+ std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
+};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
index 1114af433d1..78afd809656 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
@@ -18,128 +18,128 @@
namespace utils {
- ComboVertexState::ComboVertexState() {
- vertexBufferCount = 0;
+ComboVertexState::ComboVertexState() {
+ vertexBufferCount = 0;
+
+ // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+ wgpu::VertexAttribute vertexAttribute;
+ vertexAttribute.shaderLocation = 0;
+ vertexAttribute.offset = 0;
+ vertexAttribute.format = wgpu::VertexFormat::Float32;
+ for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+ cAttributes[i] = vertexAttribute;
+ }
+ for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+ cVertexBuffers[i].arrayStride = 0;
+ cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+ cVertexBuffers[i].attributeCount = 0;
+ cVertexBuffers[i].attributes = nullptr;
+ }
+ // cVertexBuffers[i].attributes points to somewhere in cAttributes.
+ // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
+ // cVertexBuffers[2].attributes should point to &cAttributes[5].
+ cVertexBuffers[0].attributes = &cAttributes[0];
+}
+
+ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
+ wgpu::RenderPipelineDescriptor* descriptor = this;
+
+ // Set defaults for the vertex state.
+ {
+ wgpu::VertexState* vertex = &descriptor->vertex;
+ vertex->module = nullptr;
+ vertex->entryPoint = "main";
+ vertex->bufferCount = 0;
// Fill the default values for vertexBuffers and vertexAttributes in buffers.
- wgpu::VertexAttribute vertexAttribute;
- vertexAttribute.shaderLocation = 0;
- vertexAttribute.offset = 0;
- vertexAttribute.format = wgpu::VertexFormat::Float32;
for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
- cAttributes[i] = vertexAttribute;
+ cAttributes[i].shaderLocation = 0;
+ cAttributes[i].offset = 0;
+ cAttributes[i].format = wgpu::VertexFormat::Float32;
}
for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
- cVertexBuffers[i].arrayStride = 0;
- cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
- cVertexBuffers[i].attributeCount = 0;
- cVertexBuffers[i].attributes = nullptr;
+ cBuffers[i].arrayStride = 0;
+ cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+ cBuffers[i].attributeCount = 0;
+ cBuffers[i].attributes = nullptr;
}
- // cVertexBuffers[i].attributes points to somewhere in cAttributes.
- // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
- // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
- // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
- // cVertexBuffers[2].attributes should point to &cAttributes[5].
- cVertexBuffers[0].attributes = &cAttributes[0];
+ // cBuffers[i].attributes points to somewhere in cAttributes.
+ // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
+ // cBuffers[2].attributes should point to &cAttributes[5].
+ cBuffers[0].attributes = &cAttributes[0];
+ vertex->buffers = &cBuffers[0];
}
- ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
- wgpu::RenderPipelineDescriptor* descriptor = this;
-
- // Set defaults for the vertex state.
- {
- wgpu::VertexState* vertex = &descriptor->vertex;
- vertex->module = nullptr;
- vertex->entryPoint = "main";
- vertex->bufferCount = 0;
-
- // Fill the default values for vertexBuffers and vertexAttributes in buffers.
- for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
- cAttributes[i].shaderLocation = 0;
- cAttributes[i].offset = 0;
- cAttributes[i].format = wgpu::VertexFormat::Float32;
- }
- for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
- cBuffers[i].arrayStride = 0;
- cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
- cBuffers[i].attributeCount = 0;
- cBuffers[i].attributes = nullptr;
- }
- // cBuffers[i].attributes points to somewhere in cAttributes.
- // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
- // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
- // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
- // cBuffers[2].attributes should point to &cAttributes[5].
- cBuffers[0].attributes = &cAttributes[0];
- vertex->buffers = &cBuffers[0];
- }
-
- // Set the defaults for the primitive state
- {
- wgpu::PrimitiveState* primitive = &descriptor->primitive;
- primitive->topology = wgpu::PrimitiveTopology::TriangleList;
- primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
- primitive->frontFace = wgpu::FrontFace::CCW;
- primitive->cullMode = wgpu::CullMode::None;
- }
-
- // Set the defaults for the depth-stencil state
- {
- wgpu::StencilFaceState stencilFace;
- stencilFace.compare = wgpu::CompareFunction::Always;
- stencilFace.failOp = wgpu::StencilOperation::Keep;
- stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
- stencilFace.passOp = wgpu::StencilOperation::Keep;
-
- cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
- cDepthStencil.depthWriteEnabled = false;
- cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
- cDepthStencil.stencilBack = stencilFace;
- cDepthStencil.stencilFront = stencilFace;
- cDepthStencil.stencilReadMask = 0xff;
- cDepthStencil.stencilWriteMask = 0xff;
- cDepthStencil.depthBias = 0;
- cDepthStencil.depthBiasSlopeScale = 0.0;
- cDepthStencil.depthBiasClamp = 0.0;
- }
+ // Set the defaults for the primitive state
+ {
+ wgpu::PrimitiveState* primitive = &descriptor->primitive;
+ primitive->topology = wgpu::PrimitiveTopology::TriangleList;
+ primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
+ primitive->frontFace = wgpu::FrontFace::CCW;
+ primitive->cullMode = wgpu::CullMode::None;
+ }
- // Set the defaults for the multisample state
- {
- wgpu::MultisampleState* multisample = &descriptor->multisample;
- multisample->count = 1;
- multisample->mask = 0xFFFFFFFF;
- multisample->alphaToCoverageEnabled = false;
- }
+ // Set the defaults for the depth-stencil state
+ {
+ wgpu::StencilFaceState stencilFace;
+ stencilFace.compare = wgpu::CompareFunction::Always;
+ stencilFace.failOp = wgpu::StencilOperation::Keep;
+ stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+ stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+ cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
+ cDepthStencil.depthWriteEnabled = false;
+ cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ cDepthStencil.stencilBack = stencilFace;
+ cDepthStencil.stencilFront = stencilFace;
+ cDepthStencil.stencilReadMask = 0xff;
+ cDepthStencil.stencilWriteMask = 0xff;
+ cDepthStencil.depthBias = 0;
+ cDepthStencil.depthBiasSlopeScale = 0.0;
+ cDepthStencil.depthBiasClamp = 0.0;
+ }
- // Set the defaults for the fragment state
- {
- cFragment.module = nullptr;
- cFragment.entryPoint = "main";
- cFragment.targetCount = 1;
- cFragment.targets = &cTargets[0];
- descriptor->fragment = &cFragment;
-
- wgpu::BlendComponent blendComponent;
- blendComponent.srcFactor = wgpu::BlendFactor::One;
- blendComponent.dstFactor = wgpu::BlendFactor::Zero;
- blendComponent.operation = wgpu::BlendOperation::Add;
-
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
- cTargets[i].blend = nullptr;
- cTargets[i].writeMask = wgpu::ColorWriteMask::All;
-
- cBlends[i].color = blendComponent;
- cBlends[i].alpha = blendComponent;
- }
- }
+ // Set the defaults for the multisample state
+ {
+ wgpu::MultisampleState* multisample = &descriptor->multisample;
+ multisample->count = 1;
+ multisample->mask = 0xFFFFFFFF;
+ multisample->alphaToCoverageEnabled = false;
}
- wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
- wgpu::TextureFormat format) {
- this->depthStencil = &cDepthStencil;
- cDepthStencil.format = format;
- return &cDepthStencil;
+ // Set the defaults for the fragment state
+ {
+ cFragment.module = nullptr;
+ cFragment.entryPoint = "main";
+ cFragment.targetCount = 1;
+ cFragment.targets = &cTargets[0];
+ descriptor->fragment = &cFragment;
+
+ wgpu::BlendComponent blendComponent;
+ blendComponent.srcFactor = wgpu::BlendFactor::One;
+ blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+ blendComponent.operation = wgpu::BlendOperation::Add;
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
+ cTargets[i].blend = nullptr;
+ cTargets[i].writeMask = wgpu::ColorWriteMask::All;
+
+ cBlends[i].color = blendComponent;
+ cBlends[i].alpha = blendComponent;
+ }
}
+}
+
+wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
+ wgpu::TextureFormat format) {
+ this->depthStencil = &cDepthStencil;
+ cDepthStencil.format = format;
+ return &cDepthStencil;
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h
index 1de61f50c8a..b1e5c2a6606 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h
@@ -15,49 +15,48 @@
#ifndef SRC_DAWN_UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
#define SRC_DAWN_UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
-#include <dawn/webgpu_cpp.h>
+#include <array>
#include "dawn/common/Constants.h"
-
-#include <array>
+#include "dawn/webgpu_cpp.h"
namespace utils {
- // Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
- class ComboVertexState {
- public:
- ComboVertexState();
-
- ComboVertexState(const ComboVertexState&) = delete;
- ComboVertexState& operator=(const ComboVertexState&) = delete;
- ComboVertexState(ComboVertexState&&) = delete;
- ComboVertexState& operator=(ComboVertexState&&) = delete;
-
- uint32_t vertexBufferCount;
- std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
- std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
- };
-
- class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
- public:
- ComboRenderPipelineDescriptor();
-
- ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
- ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
- ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
- ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
-
- wgpu::DepthStencilState* EnableDepthStencil(
- wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
-
- std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
- std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
- std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
- std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
-
- wgpu::FragmentState cFragment;
- wgpu::DepthStencilState cDepthStencil;
- };
+// Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
+class ComboVertexState {
+ public:
+ ComboVertexState();
+
+ ComboVertexState(const ComboVertexState&) = delete;
+ ComboVertexState& operator=(const ComboVertexState&) = delete;
+ ComboVertexState(ComboVertexState&&) = delete;
+ ComboVertexState& operator=(ComboVertexState&&) = delete;
+
+ uint32_t vertexBufferCount;
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+};
+
+class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
+ public:
+ ComboRenderPipelineDescriptor();
+
+ ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
+ ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
+
+ wgpu::DepthStencilState* EnableDepthStencil(
+ wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
+
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+ std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
+ std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
+
+ wgpu::FragmentState cFragment;
+ wgpu::DepthStencilState cDepthStencil;
+};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp b/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp
index 9ed65b2b776..bdf44fec7da 100644
--- a/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <memory>
+
#include "dawn/utils/BackendBinding.h"
#include "dawn/common/Assert.h"
@@ -21,35 +23,31 @@
#define GLFW_EXPOSE_NATIVE_WIN32
#include "GLFW/glfw3native.h"
-#include <memory>
-
namespace utils {
- class D3D12Binding : public BackendBinding {
- public:
- D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
+class D3D12Binding : public BackendBinding {
+ public:
+ D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- HWND win32Window = glfwGetWin32Window(mWindow);
- mSwapchainImpl =
- dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ HWND win32Window = glfwGetWin32Window(mWindow);
+ mSwapchainImpl = dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
}
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- ASSERT(mSwapchainImpl.userData != nullptr);
- return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
- }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ ASSERT(mSwapchainImpl.userData != nullptr);
+ return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+};
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
- return new D3D12Binding(window, device);
- }
+BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
+ return new D3D12Binding(window, device);
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp b/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp
index b52b38f3f8f..a8b2d240869 100644
--- a/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp
@@ -16,14 +16,14 @@
namespace utils {
- class EmptyDebugLogger : public PlatformDebugLogger {
- public:
- EmptyDebugLogger() = default;
- ~EmptyDebugLogger() override = default;
- };
+class EmptyDebugLogger : public PlatformDebugLogger {
+ public:
+ EmptyDebugLogger() = default;
+ ~EmptyDebugLogger() override = default;
+};
- PlatformDebugLogger* CreatePlatformDebugLogger() {
- return new EmptyDebugLogger();
- }
+PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new EmptyDebugLogger();
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp
index d8c162c4637..74e8c73f5d8 100644
--- a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp
@@ -12,75 +12,96 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/GLFWUtils.h"
+#include <cstdlib>
+#include <utility>
#include "GLFW/glfw3.h"
#include "dawn/common/Platform.h"
+#include "dawn/utils/GLFWUtils.h"
-#include <cstdlib>
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# define GLFW_EXPOSE_NATIVE_WIN32
-#elif defined(DAWN_USE_X11)
-# define GLFW_EXPOSE_NATIVE_X11
+#if DAWN_PLATFORM_IS(WINDOWS)
+#define GLFW_EXPOSE_NATIVE_WIN32
+#endif
+#if defined(DAWN_USE_X11)
+#define GLFW_EXPOSE_NATIVE_X11
+#endif
+#if defined(DAWN_USE_WAYLAND)
+#define GLFW_EXPOSE_NATIVE_WAYLAND
#endif
#include "GLFW/glfw3native.h"
namespace utils {
- void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
- if (type == wgpu::BackendType::OpenGL) {
- // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
- // texture views.
- glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
- glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
- glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
- glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
- } else if (type == wgpu::BackendType::OpenGLES) {
- glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
- glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
- glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
- glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
- } else {
- // Without this GLFW will initialize a GL context on the window, which prevents using
- // the window with other APIs (by crashing in weird ways).
- glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
- }
+void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
+ if (type == wgpu::BackendType::OpenGL) {
+ // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
+ // texture views.
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ } else if (type == wgpu::BackendType::OpenGLES) {
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+ glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+ } else {
+ // Without this GLFW will initialize a GL context on the window, which prevents using
+ // the window with other APIs (by crashing in weird ways).
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
}
+}
- wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window) {
- std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
- SetupWindowAndGetSurfaceDescriptor(window);
+wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window) {
+ std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+ SetupWindowAndGetSurfaceDescriptor(window);
- wgpu::SurfaceDescriptor descriptor;
- descriptor.nextInChain = chainedDescriptor.get();
- wgpu::Surface surface = instance.CreateSurface(&descriptor);
+ wgpu::SurfaceDescriptor descriptor;
+ descriptor.nextInChain = chainedDescriptor.get();
+ wgpu::Surface surface = instance.CreateSurface(&descriptor);
- return surface;
- }
+ return surface;
+}
-#if defined(DAWN_PLATFORM_WINDOWS)
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
- std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
- desc->hwnd = glfwGetWin32Window(window);
- desc->hinstance = GetModuleHandle(nullptr);
- return std::move(desc);
- }
-#elif defined(DAWN_USE_X11)
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
- std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
- desc->display = glfwGetX11Display();
- desc->window = glfwGetX11Window(window);
- return std::move(desc);
- }
-#elif defined(DAWN_ENABLE_BACKEND_METAL)
- // SetupWindowAndGetSurfaceDescriptor defined in GLFWUtils_metal.mm
-#else
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow*) {
- return nullptr;
- }
+// SetupWindowAndGetSurfaceDescriptorCocoa defined in GLFWUtils_metal.mm
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorCocoa(GLFWwindow* window);
+
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
+ switch (glfwGetPlatform()) {
+#if DAWN_PLATFORM_IS(WINDOWS)
+ case GLFW_PLATFORM_WIN32: {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
+ desc->hwnd = glfwGetWin32Window(window);
+ desc->hinstance = GetModuleHandle(nullptr);
+ return std::move(desc);
+ }
+#endif
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ case GLFW_PLATFORM_COCOA:
+ return SetupWindowAndGetSurfaceDescriptorCocoa(window);
#endif
+#if defined(DAWN_USE_WAYLAND)
+ case GLFW_PLATFORM_WAYLAND: {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromWaylandSurface> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromWaylandSurface>();
+ desc->display = glfwGetWaylandDisplay();
+ desc->surface = glfwGetWaylandWindow(window);
+ return std::move(desc);
+ }
+#endif
+#if defined(DAWN_USE_X11)
+ case GLFW_PLATFORM_X11: {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
+ desc->display = glfwGetX11Display();
+ desc->window = glfwGetX11Window(window);
+ return std::move(desc);
+ }
+#endif
+ default:
+ return nullptr;
+ }
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h
index 11ec67304a1..2fe5b10ffdd 100644
--- a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h
@@ -15,27 +15,27 @@
#ifndef SRC_DAWN_UTILS_GLFWUTILS_H_
#define SRC_DAWN_UTILS_GLFWUTILS_H_
-#include "dawn/webgpu_cpp.h"
-
#include <memory>
+#include "dawn/webgpu_cpp.h"
+
struct GLFWwindow;
namespace utils {
- // Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
- // the specified backend.
- void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
+// Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
+// the specified backend.
+void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
- // Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
- // calls `instance.CreateSurface` with the correct descriptor for this window.
- // Returns a null wgpu::Surface on failure.
- wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window);
+// Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
+// calls `instance.CreateSurface` with the correct descriptor for this window.
+// Returns a null wgpu::Surface on failure.
+wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window);
- // Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
- // CreateSurface. Useful to be able to modify the descriptor for testing, or when trying to
- // avoid using the global proc table.
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window);
+// Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
+// CreateSurface. Useful to be able to modify the descriptor for testing, or when trying to
+// avoid using the global proc table.
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm
index 324988764c9..d07eecaf0ba 100644
--- a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm
@@ -13,7 +13,7 @@
// limitations under the License.
#if !defined(DAWN_ENABLE_BACKEND_METAL)
-# error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
+#error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
#include "dawn/utils/GLFWUtils.h"
@@ -28,26 +28,26 @@
namespace utils {
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
- if (@available(macOS 10.11, *)) {
- NSWindow* nsWindow = glfwGetCocoaWindow(window);
- NSView* view = [nsWindow contentView];
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorCocoa(GLFWwindow* window) {
+ if (@available(macOS 10.11, *)) {
+ NSWindow* nsWindow = glfwGetCocoaWindow(window);
+ NSView* view = [nsWindow contentView];
- // Create a CAMetalLayer that covers the whole window that will be passed to
- // CreateSurface.
- [view setWantsLayer:YES];
- [view setLayer:[CAMetalLayer layer]];
+ // Create a CAMetalLayer that covers the whole window that will be passed to
+ // CreateSurface.
+ [view setWantsLayer:YES];
+ [view setLayer:[CAMetalLayer layer]];
- // Use retina if the window was created with retina support.
- [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
+ // Use retina if the window was created with retina support.
+ [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
- std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
- desc->layer = [view layer];
- return std::move(desc);
- }
-
- return nullptr;
+ std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
+ desc->layer = [view layer];
+ return std::move(desc);
}
+ return nullptr;
+}
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp b/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp
index 4caa7acc695..198a5fd2ec7 100644
--- a/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp
@@ -15,24 +15,23 @@
// A mock GLFW implementation that supports Fuchsia, but only implements
// the functions called from Dawn.
+#include <dlfcn.h>
+
// NOTE: This must be included before GLFW/glfw3.h because the latter will
// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
-// the first header to do so for sanity reasons (e.g. undefining weird
+// the first header to do so for validity reasons (e.g. undefining weird
// macros on Windows and Linux).
// clang-format off
#include "dawn/common/vulkan_platform.h"
#include "dawn/common/Assert.h"
-#include <GLFW/glfw3.h>
+#include "GLFW/glfw3.h"
// clang-format on
-#include <dlfcn.h>
-
int glfwInit(void) {
return GLFW_TRUE;
}
-void glfwDefaultWindowHints(void) {
-}
+void glfwDefaultWindowHints(void) {}
void glfwWindowHint(int hint, int value) {
DAWN_UNUSED(hint);
diff --git a/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm b/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm
index b35245cf739..ef343617689 100644
--- a/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm
+++ b/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm
@@ -25,111 +25,109 @@
#import <QuartzCore/CAMetalLayer.h>
namespace utils {
- class SwapChainImplMTL {
- public:
- using WSIContext = DawnWSIContextMetal;
+class SwapChainImplMTL {
+ public:
+ using WSIContext = DawnWSIContextMetal;
- SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {
- }
+ SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {}
- ~SwapChainImplMTL() {
- [mCurrentTexture release];
- [mCurrentDrawable release];
- }
+ ~SwapChainImplMTL() {
+ [mCurrentTexture release];
+ [mCurrentDrawable release];
+ }
- void Init(DawnWSIContextMetal* ctx) {
- mMtlDevice = ctx->device;
- mCommandQueue = ctx->queue;
- }
+ void Init(DawnWSIContextMetal* ctx) {
+ mMtlDevice = ctx->device;
+ mCommandQueue = ctx->queue;
+ }
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- if (format != WGPUTextureFormat_BGRA8Unorm) {
- return "unsupported format";
- }
- ASSERT(width > 0);
- ASSERT(height > 0);
-
- NSView* contentView = [mNsWindow contentView];
- [contentView setWantsLayer:YES];
-
- CGSize size = {};
- size.width = width;
- size.height = height;
-
- mLayer = [CAMetalLayer layer];
- [mLayer setDevice:mMtlDevice];
- [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
- [mLayer setDrawableSize:size];
-
- constexpr uint32_t kFramebufferOnlyTextureUsages =
- WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
- bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
- if (hasOnlyFramebufferUsages) {
- [mLayer setFramebufferOnly:YES];
- }
-
- [contentView setLayer:mLayer];
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ if (format != WGPUTextureFormat_BGRA8Unorm) {
+ return "unsupported format";
+ }
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+
+ NSView* contentView = [mNsWindow contentView];
+ [contentView setWantsLayer:YES];
+
+ CGSize size = {};
+ size.width = width;
+ size.height = height;
+
+ mLayer = [CAMetalLayer layer];
+ [mLayer setDevice:mMtlDevice];
+ [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
+ [mLayer setDrawableSize:size];
+
+ constexpr uint32_t kFramebufferOnlyTextureUsages =
+ WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
+ bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
+ if (hasOnlyFramebufferUsages) {
+ [mLayer setFramebufferOnly:YES];
}
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- [mCurrentDrawable release];
- mCurrentDrawable = [mLayer nextDrawable];
- [mCurrentDrawable retain];
+ [contentView setLayer:mLayer];
- [mCurrentTexture release];
- mCurrentTexture = mCurrentDrawable.texture;
- [mCurrentTexture retain];
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
- nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ [mCurrentDrawable release];
+ mCurrentDrawable = [mLayer nextDrawable];
+ [mCurrentDrawable retain];
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+ [mCurrentTexture release];
+ mCurrentTexture = mCurrentDrawable.texture;
+ [mCurrentTexture retain];
- DawnSwapChainError Present() {
- id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
- [commandBuffer presentDrawable:mCurrentDrawable];
- [commandBuffer commit];
+ nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
- private:
- id mNsWindow = nil;
- id<MTLDevice> mMtlDevice = nil;
- id<MTLCommandQueue> mCommandQueue = nil;
+ DawnSwapChainError Present() {
+ id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
+ [commandBuffer presentDrawable:mCurrentDrawable];
+ [commandBuffer commit];
- CAMetalLayer* mLayer = nullptr;
- id<CAMetalDrawable> mCurrentDrawable = nil;
- id<MTLTexture> mCurrentTexture = nil;
- };
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
- class MetalBinding : public BackendBinding {
- public:
- MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
+ private:
+ id mNsWindow = nil;
+ id<MTLDevice> mMtlDevice = nil;
+ id<MTLCommandQueue> mCommandQueue = nil;
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = CreateSwapChainImplementation(
- new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
+ CAMetalLayer* mLayer = nullptr;
+ id<CAMetalDrawable> mCurrentDrawable = nil;
+ id<MTLTexture> mCurrentTexture = nil;
+};
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return WGPUTextureFormat_BGRA8Unorm;
- }
+class MetalBinding : public BackendBinding {
+ public:
+ MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl =
+ CreateSwapChainImplementation(new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
- BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
- return new MetalBinding(window, device);
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_BGRA8Unorm;
}
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
+ return new MetalBinding(window, device);
}
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp
index c33b6dda40f..cddc0e66b36 100644
--- a/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp
@@ -12,36 +12,35 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <memory>
+
#include "dawn/utils/BackendBinding.h"
#include "dawn/common/Assert.h"
#include "dawn/native/NullBackend.h"
-#include <memory>
-
namespace utils {
- class NullBinding : public BackendBinding {
- public:
- NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
+class NullBinding : public BackendBinding {
+ public:
+ NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return WGPUTextureFormat_RGBA8Unorm;
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
}
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_RGBA8Unorm;
+ }
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+};
- BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
- return new NullBinding(window, device);
- }
+BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
+ return new NullBinding(window, device);
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp
index 818b27bf717..ac749b15006 100644
--- a/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp
@@ -12,66 +12,63 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/Timer.h"
-
#include <CoreServices/CoreServices.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
-namespace utils {
+#include "dawn/utils/Timer.h"
- class OSXTimer : public Timer {
- public:
- OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
- }
+namespace utils {
- ~OSXTimer() override = default;
+class OSXTimer : public Timer {
+ public:
+ OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {}
- void Start() override {
- mStartTime = mach_absolute_time();
- // Cache secondCoeff
- GetSecondCoeff();
- mRunning = true;
- }
+ ~OSXTimer() override = default;
- void Stop() override {
- mStopTime = mach_absolute_time();
- mRunning = false;
- }
+ void Start() override {
+ mStartTime = mach_absolute_time();
+ // Cache secondCoeff
+ GetSecondCoeff();
+ mRunning = true;
+ }
- double GetElapsedTime() const override {
- if (mRunning) {
- return mSecondCoeff * (mach_absolute_time() - mStartTime);
- } else {
- return mSecondCoeff * (mStopTime - mStartTime);
- }
- }
+ void Stop() override {
+ mStopTime = mach_absolute_time();
+ mRunning = false;
+ }
- double GetAbsoluteTime() override {
- return GetSecondCoeff() * mach_absolute_time();
+ double GetElapsedTime() const override {
+ if (mRunning) {
+ return mSecondCoeff * (mach_absolute_time() - mStartTime);
+ } else {
+ return mSecondCoeff * (mStopTime - mStartTime);
}
+ }
- private:
- double GetSecondCoeff() {
- // If this is the first time we've run, get the timebase.
- if (mSecondCoeff == 0.0) {
- mach_timebase_info_data_t timebaseInfo;
- mach_timebase_info(&timebaseInfo);
+ double GetAbsoluteTime() override { return GetSecondCoeff() * mach_absolute_time(); }
- mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
- }
+ private:
+ double GetSecondCoeff() {
+ // If this is the first time we've run, get the timebase.
+ if (mSecondCoeff == 0.0) {
+ mach_timebase_info_data_t timebaseInfo;
+ mach_timebase_info(&timebaseInfo);
- return mSecondCoeff;
+ mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
}
- bool mRunning;
- uint64_t mStartTime;
- uint64_t mStopTime;
- double mSecondCoeff;
- };
-
- Timer* CreateTimer() {
- return new OSXTimer();
+ return mSecondCoeff;
}
+ bool mRunning;
+ uint64_t mStartTime;
+ uint64_t mStopTime;
+ double mSecondCoeff;
+};
+
+Timer* CreateTimer() {
+ return new OSXTimer();
+}
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h
index 543fb6f73ea..86318192b8c 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h
@@ -21,8 +21,8 @@
namespace utils {
- // The returned CALayer is autoreleased.
- void* CreateDummyCALayer();
+// The returned CALayer is autoreleased.
+void* CreatePlaceholderCALayer();
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm
index c006976409f..171f5fcbf65 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm
+++ b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm
@@ -18,8 +18,8 @@
namespace utils {
- void* CreateDummyCALayer() {
- return [CALayer layer];
- }
+void* CreatePlaceholderCALayer() {
+ return [CALayer layer];
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp
index 35972afc648..9d752552c21 100644
--- a/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstdio>
+
#include "dawn/utils/BackendBinding.h"
#include "dawn/common/Assert.h"
@@ -20,36 +22,34 @@
#include "dawn/dawn_wsi.h"
#include "dawn/native/OpenGLBackend.h"
-#include <cstdio>
#include "GLFW/glfw3.h"
namespace utils {
- class OpenGLBinding : public BackendBinding {
- public:
- OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
+class OpenGLBinding : public BackendBinding {
+ public:
+ OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
- mDevice,
- [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
- mWindow);
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
+ mDevice,
+ [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
+ mWindow);
}
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
- }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+};
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
- return new OpenGLBinding(window, device);
- }
+BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
+ return new OpenGLBinding(window, device);
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h b/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h
index 23ff4c8fa07..a67af179ea1 100644
--- a/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h
+++ b/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h
@@ -17,12 +17,12 @@
namespace utils {
- class PlatformDebugLogger {
- public:
- virtual ~PlatformDebugLogger() = default;
- };
+class PlatformDebugLogger {
+ public:
+ virtual ~PlatformDebugLogger() = default;
+};
- PlatformDebugLogger* CreatePlatformDebugLogger();
+PlatformDebugLogger* CreatePlatformDebugLogger();
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp
index 18eb5e6bf95..9b43e185bbd 100644
--- a/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp
@@ -12,63 +12,60 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/Timer.h"
-
#include <stdint.h>
#include <time.h>
-namespace utils {
+#include "dawn/utils/Timer.h"
- namespace {
+namespace utils {
- uint64_t GetCurrentTimeNs() {
- struct timespec currentTime;
- clock_gettime(CLOCK_MONOTONIC, &currentTime);
- return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
- }
+namespace {
- } // anonymous namespace
+uint64_t GetCurrentTimeNs() {
+ struct timespec currentTime;
+ clock_gettime(CLOCK_MONOTONIC, &currentTime);
+ return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
+}
- class PosixTimer : public Timer {
- public:
- PosixTimer() : Timer(), mRunning(false) {
- }
+} // anonymous namespace
- ~PosixTimer() override = default;
+class PosixTimer : public Timer {
+ public:
+ PosixTimer() : Timer(), mRunning(false) {}
- void Start() override {
- mStartTimeNs = GetCurrentTimeNs();
- mRunning = true;
- }
+ ~PosixTimer() override = default;
- void Stop() override {
- mStopTimeNs = GetCurrentTimeNs();
- mRunning = false;
- }
+ void Start() override {
+ mStartTimeNs = GetCurrentTimeNs();
+ mRunning = true;
+ }
- double GetElapsedTime() const override {
- uint64_t endTimeNs;
- if (mRunning) {
- endTimeNs = GetCurrentTimeNs();
- } else {
- endTimeNs = mStopTimeNs;
- }
+ void Stop() override {
+ mStopTimeNs = GetCurrentTimeNs();
+ mRunning = false;
+ }
- return (endTimeNs - mStartTimeNs) * 1e-9;
+ double GetElapsedTime() const override {
+ uint64_t endTimeNs;
+ if (mRunning) {
+ endTimeNs = GetCurrentTimeNs();
+ } else {
+ endTimeNs = mStopTimeNs;
}
- double GetAbsoluteTime() override {
- return GetCurrentTimeNs() * 1e-9;
- }
+ return (endTimeNs - mStartTimeNs) * 1e-9;
+ }
- private:
- bool mRunning;
- uint64_t mStartTimeNs;
- uint64_t mStopTimeNs;
- };
+ double GetAbsoluteTime() override { return GetCurrentTimeNs() * 1e-9; }
- Timer* CreateTimer() {
- return new PosixTimer();
- }
+ private:
+ bool mRunning;
+ uint64_t mStartTimeNs;
+ uint64_t mStopTimeNs;
+};
+
+Timer* CreateTimer() {
+ return new PosixTimer();
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp
index 2f5f050a44b..da5b098b3d1 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp
@@ -18,17 +18,16 @@
namespace utils {
- ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
- DAWN_UNUSED(mPool);
- }
+ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
+ DAWN_UNUSED(mPool);
+}
- ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
+ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
- ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
- }
+ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {}
- ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
- return *this;
- }
+ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+ return *this;
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h
index bad97041dd5..4f724ca43aa 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h
@@ -15,46 +15,46 @@
#ifndef SRC_DAWN_UTILS_SCOPEDAUTORELEASEPOOL_H_
#define SRC_DAWN_UTILS_SCOPEDAUTORELEASEPOOL_H_
-#include "dawn/common/Compiler.h"
-
#include <cstddef>
+#include "dawn/common/Compiler.h"
+
namespace utils {
- /**
- * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
- * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
- * is a no-op.
- *
- * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
- * expects a pool to always be available in each thread. If a pool is not available, then
- * autoreleased objects will never be released and will leak.
- *
- * In long-running blocks of code or loops, it is important to periodically create and drain
- * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
- * per-test. In graphics applications it's advised to create an autoreleasepool around the
- * frame loop. Ex.)
- * void frame() {
- * // Any protocol objects will be reclaimed when this object falls out of scope.
- * utils::ScopedAutoreleasePool pool;
- *
- * // do rendering ...
- * }
- */
- class [[nodiscard]] ScopedAutoreleasePool {
- public:
- ScopedAutoreleasePool();
- ~ScopedAutoreleasePool();
-
- ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
- ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
-
- ScopedAutoreleasePool(ScopedAutoreleasePool &&);
- ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
-
- private:
- void* mPool = nullptr;
- };
+/**
+ * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
+ * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
+ * is a no-op.
+ *
+ * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
+ * expects a pool to always be available in each thread. If a pool is not available, then
+ * autoreleased objects will never be released and will leak.
+ *
+ * In long-running blocks of code or loops, it is important to periodically create and drain
+ * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
+ * per-test. In graphics applications it's advised to create an autoreleasepool around the
+ * frame loop. Ex.)
+ * void frame() {
+ * // Any protocol objects will be reclaimed when this object falls out of scope.
+ * utils::ScopedAutoreleasePool pool;
+ *
+ * // do rendering ...
+ * }
+ */
+class [[nodiscard]] ScopedAutoreleasePool {
+ public:
+ ScopedAutoreleasePool();
+ ~ScopedAutoreleasePool();
+
+ ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
+ ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
+
+ ScopedAutoreleasePool(ScopedAutoreleasePool&&);
+ ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
+
+ private:
+ void* mPool = nullptr;
+};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm
index c4cb9a285a2..7aa54724559 100644
--- a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm
@@ -18,27 +18,26 @@
namespace utils {
- ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {
- }
+ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {}
- ScopedAutoreleasePool::~ScopedAutoreleasePool() {
- if (mPool != nullptr) {
- [static_cast<NSAutoreleasePool*>(mPool) release];
- mPool = nullptr;
- }
+ScopedAutoreleasePool::~ScopedAutoreleasePool() {
+ if (mPool != nullptr) {
+ [static_cast<NSAutoreleasePool*>(mPool) release];
+ mPool = nullptr;
}
+}
+
+ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+ mPool = rhs.mPool;
+ rhs.mPool = nullptr;
+}
- ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+ if (&rhs != this) {
mPool = rhs.mPool;
rhs.mPool = nullptr;
}
-
- ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
- if (&rhs != this) {
- mPool = rhs.mPool;
- rhs.mPool = nullptr;
- }
- return *this;
- }
+ return *this;
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp
index 221fb711120..693a6ceb10b 100644
--- a/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp
@@ -16,26 +16,26 @@
#include "dawn/common/Platform.h"
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <Windows.h>
-#elif defined(DAWN_PLATFORM_POSIX)
-# include <unistd.h>
+#if DAWN_PLATFORM_IS(WINDOWS)
+#include <Windows.h>
+#elif DAWN_PLATFORM_IS(POSIX)
+#include <unistd.h>
#else
-# error "Unsupported platform."
+#error "Unsupported platform."
#endif
namespace utils {
-#if defined(DAWN_PLATFORM_WINDOWS)
- void USleep(unsigned int usecs) {
- Sleep(static_cast<DWORD>(usecs / 1000));
- }
-#elif defined(DAWN_PLATFORM_POSIX)
- void USleep(unsigned int usecs) {
- usleep(usecs);
- }
+#if DAWN_PLATFORM_IS(WINDOWS)
+void USleep(unsigned int usecs) {
+ Sleep(static_cast<DWORD>(usecs / 1000));
+}
+#elif DAWN_PLATFORM_IS(POSIX)
+void USleep(unsigned int usecs) {
+ usleep(usecs);
+}
#else
-# error "Implement USleep for your platform."
+#error "Implement USleep for your platform."
#endif
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h
index 1be5439b607..bf14cdd1755 100644
--- a/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h
@@ -17,7 +17,7 @@
namespace utils {
- void USleep(unsigned int usecs);
+void USleep(unsigned int usecs);
}
#endif // SRC_DAWN_UTILS_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp
index b99243b435f..3e100d53ea1 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp
@@ -18,42 +18,40 @@
namespace utils {
- TerribleCommandBuffer::TerribleCommandBuffer() {
- }
+TerribleCommandBuffer::TerribleCommandBuffer() {}
- TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
- : mHandler(handler) {
- }
+TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
+ : mHandler(handler) {}
- void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
- mHandler = handler;
- }
+void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
+ mHandler = handler;
+}
- size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
- return sizeof(mBuffer);
- }
+size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
+ return sizeof(mBuffer);
+}
- void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
- // Note: This returns non-null even if size is zero.
- if (size > sizeof(mBuffer)) {
+void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
+ // Note: This returns non-null even if size is zero.
+ if (size > sizeof(mBuffer)) {
+ return nullptr;
+ }
+ char* result = &mBuffer[mOffset];
+ if (sizeof(mBuffer) - size < mOffset) {
+ if (!Flush()) {
return nullptr;
}
- char* result = &mBuffer[mOffset];
- if (sizeof(mBuffer) - size < mOffset) {
- if (!Flush()) {
- return nullptr;
- }
- return GetCmdSpace(size);
- }
-
- mOffset += size;
- return result;
+ return GetCmdSpace(size);
}
- bool TerribleCommandBuffer::Flush() {
- bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
- mOffset = 0;
- return success;
- }
+ mOffset += size;
+ return result;
+}
+
+bool TerribleCommandBuffer::Flush() {
+ bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
+ mOffset = 0;
+ return success;
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h
index cf6520bd6e3..3fac52fb052 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h
@@ -19,23 +19,23 @@
namespace utils {
- class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
- public:
- TerribleCommandBuffer();
- explicit TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
+class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
+ public:
+ TerribleCommandBuffer();
+ explicit TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
- void SetHandler(dawn::wire::CommandHandler* handler);
+ void SetHandler(dawn::wire::CommandHandler* handler);
- size_t GetMaximumAllocationSize() const override;
+ size_t GetMaximumAllocationSize() const override;
- void* GetCmdSpace(size_t size) override;
- bool Flush() override;
+ void* GetCmdSpace(size_t size) override;
+ bool Flush() override;
- private:
- dawn::wire::CommandHandler* mHandler = nullptr;
- size_t mOffset = 0;
- char mBuffer[1000000];
- };
+ private:
+ dawn::wire::CommandHandler* mHandler = nullptr;
+ size_t mOffset = 0;
+ char mBuffer[1000000];
+};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp
index 31535f26440..e5b41cf0a9f 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp
@@ -12,170 +12,168 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/TestUtils.h"
+#include <algorithm>
+#include <vector>
#include "dawn/common/Assert.h"
#include "dawn/common/Constants.h"
#include "dawn/common/Math.h"
+#include "dawn/utils/TestUtils.h"
#include "dawn/utils/TextureUtils.h"
#include "dawn/utils/WGPUHelpers.h"
-#include <vector>
-
namespace utils {
- uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
- const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
- const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
- ASSERT(width % blockWidth == 0);
- return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
- }
-
- TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
- wgpu::TextureFormat format,
- wgpu::Extent3D textureSizeAtLevel0,
- uint32_t mipmapLevel,
- wgpu::TextureDimension dimension,
- uint32_t rowsPerImage) {
- // Compressed texture formats not supported in this function yet.
- ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
-
- TextureDataCopyLayout layout;
-
- layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
- std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
- textureSizeAtLevel0.depthOrArrayLayers};
-
- if (dimension == wgpu::TextureDimension::e3D) {
- layout.mipSize.depthOrArrayLayers =
- std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
- }
-
- layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
-
- if (rowsPerImage == wgpu::kCopyStrideUndefined) {
- rowsPerImage = layout.mipSize.height;
- }
- layout.rowsPerImage = rowsPerImage;
-
- uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
- layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
-
- layout.byteLength =
- RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
-
- const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
- layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
- layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
- layout.texelBlockCount = layout.byteLength / bytesPerTexel;
-
- return layout;
+uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+ const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
+ const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+ ASSERT(width % blockWidth == 0);
+ return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
+}
+
+TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ wgpu::TextureDimension dimension,
+ uint32_t rowsPerImage) {
+ // Compressed texture formats not supported in this function yet.
+ ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
+
+ TextureDataCopyLayout layout;
+
+ layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
+ std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
+ textureSizeAtLevel0.depthOrArrayLayers};
+
+ if (dimension == wgpu::TextureDimension::e3D) {
+ layout.mipSize.depthOrArrayLayers =
+ std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
}
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat) {
- uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
- uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
- uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
- ASSERT(copyExtent.width % blockWidth == 0);
- uint32_t widthInBlocks = copyExtent.width / blockWidth;
- ASSERT(copyExtent.height % blockHeight == 0);
- uint32_t heightInBlocks = copyExtent.height / blockHeight;
- return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
- copyExtent.depthOrArrayLayers, blockSize);
- }
+ layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- uint64_t widthInBlocks,
- uint64_t heightInBlocks,
- uint64_t depth,
- uint64_t bytesPerBlock) {
- if (depth == 0) {
- return 0;
- }
-
- uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
- uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
- if (heightInBlocks != 0) {
- uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
- uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
- requiredBytesInCopy += lastImageBytes;
- }
- return requiredBytesInCopy;
+ if (rowsPerImage == wgpu::kCopyStrideUndefined) {
+ rowsPerImage = layout.mipSize.height;
}
-
- uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat) {
- return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
- utils::GetTexelBlockSizeInBytes(textureFormat);
+ layout.rowsPerImage = rowsPerImage;
+
+ uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+ layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+
+ layout.byteLength =
+ RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
+
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+ layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+ layout.texelBlockCount = layout.byteLength / bytesPerTexel;
+
+ return layout;
+}
+
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat) {
+ uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
+ uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
+ uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
+ ASSERT(copyExtent.width % blockWidth == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockWidth;
+ ASSERT(copyExtent.height % blockHeight == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockHeight;
+ return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
+ copyExtent.depthOrArrayLayers, blockSize);
+}
+
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ uint64_t widthInBlocks,
+ uint64_t heightInBlocks,
+ uint64_t depth,
+ uint64_t bytesPerBlock) {
+ if (depth == 0) {
+ return 0;
}
- void UnalignDynamicUploader(wgpu::Device device) {
- std::vector<uint8_t> data = {1};
-
- wgpu::TextureDescriptor descriptor = {};
- descriptor.size = {1, 1, 1};
- descriptor.format = wgpu::TextureFormat::R8Unorm;
- descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
- wgpu::Texture texture = device.CreateTexture(&descriptor);
-
- wgpu::ImageCopyTexture imageCopyTexture =
- utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
- wgpu::TextureDataLayout textureDataLayout =
- utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
- wgpu::Extent3D copyExtent = {1, 1, 1};
-
- // WriteTexture with exactly 1 byte of data.
- device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
- &copyExtent);
+ uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
+ uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
+ if (heightInBlocks != 0) {
+ uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
+ uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
+ requiredBytesInCopy += lastImageBytes;
}
-
- uint32_t VertexFormatSize(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Snorm8x2:
- return 2;
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Unorm8x4:
- case wgpu::VertexFormat::Snorm8x4:
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Float16x2:
- case wgpu::VertexFormat::Float32:
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Sint32:
- return 4;
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Unorm16x4:
- case wgpu::VertexFormat::Snorm16x4:
- case wgpu::VertexFormat::Float16x4:
- case wgpu::VertexFormat::Float32x2:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Sint32x2:
- return 8;
- case wgpu::VertexFormat::Float32x3:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Sint32x3:
- return 12;
- case wgpu::VertexFormat::Float32x4:
- case wgpu::VertexFormat::Uint32x4:
- case wgpu::VertexFormat::Sint32x4:
- return 16;
- case wgpu::VertexFormat::Undefined:
- break;
- }
- UNREACHABLE();
+ return requiredBytesInCopy;
+}
+
+uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat) {
+ return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
+ utils::GetTexelBlockSizeInBytes(textureFormat);
+}
+
+void UnalignDynamicUploader(wgpu::Device device) {
+ std::vector<uint8_t> data = {1};
+
+ wgpu::TextureDescriptor descriptor = {};
+ descriptor.size = {1, 1, 1};
+ descriptor.format = wgpu::TextureFormat::R8Unorm;
+ descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+ wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+ wgpu::TextureDataLayout textureDataLayout =
+ utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+ wgpu::Extent3D copyExtent = {1, 1, 1};
+
+ // WriteTexture with exactly 1 byte of data.
+ device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
+ &copyExtent);
+}
+
+uint32_t VertexFormatSize(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Snorm8x2:
+ return 2;
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Sint32:
+ return 4;
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x4:
+ case wgpu::VertexFormat::Float16x4:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Sint32x2:
+ return 8;
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Sint32x3:
+ return 12;
+ case wgpu::VertexFormat::Float32x4:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32x4:
+ return 16;
+ case wgpu::VertexFormat::Undefined:
+ break;
}
+ UNREACHABLE();
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TestUtils.h b/chromium/third_party/dawn/src/dawn/utils/TestUtils.h
index e33cb1b5b7c..5e119cf38f3 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TestUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/TestUtils.h
@@ -15,52 +15,52 @@
#ifndef SRC_DAWN_UTILS_TESTUTILS_H_
#define SRC_DAWN_UTILS_TESTUTILS_H_
-#include <dawn/webgpu_cpp.h>
+#include "dawn/webgpu_cpp.h"
namespace utils {
- struct TextureDataCopyLayout {
- uint64_t byteLength;
- uint64_t texelBlockCount;
- uint32_t bytesPerRow;
- uint32_t rowsPerImage;
- uint32_t texelBlocksPerRow;
- uint32_t bytesPerImage;
- uint32_t texelBlocksPerImage;
- wgpu::Extent3D mipSize;
- };
+struct TextureDataCopyLayout {
+ uint64_t byteLength;
+ uint64_t texelBlockCount;
+ uint32_t bytesPerRow;
+ uint32_t rowsPerImage;
+ uint32_t texelBlocksPerRow;
+ uint32_t bytesPerImage;
+ uint32_t texelBlocksPerImage;
+ wgpu::Extent3D mipSize;
+};
- uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
- TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
- wgpu::TextureFormat format,
- wgpu::Extent3D textureSizeAtLevel0,
- uint32_t mipmapLevel,
- wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
+TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat);
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- uint64_t widthInBlocks,
- uint64_t heightInBlocks,
- uint64_t depth,
- uint64_t bytesPerBlock);
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat);
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ uint64_t widthInBlocks,
+ uint64_t heightInBlocks,
+ uint64_t depth,
+ uint64_t bytesPerBlock);
- uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat);
+uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat);
- // A helper function used for testing DynamicUploader offset alignment.
- // A call of this function will do a Queue::WriteTexture with 1 byte of data,
- // so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
- // in it will contain 1 byte of data.
- void UnalignDynamicUploader(wgpu::Device device);
+// A helper function used for testing DynamicUploader offset alignment.
+// A call of this function will do a Queue::WriteTexture with 1 byte of data,
+// so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
+// in it will contain 1 byte of data.
+void UnalignDynamicUploader(wgpu::Device device);
- uint32_t VertexFormatSize(wgpu::VertexFormat format);
+uint32_t VertexFormatSize(wgpu::VertexFormat format);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp
index ce5de07a0a1..f0c4505e6cd 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp
@@ -15,756 +15,755 @@
#include "dawn/utils/TextureUtils.h"
namespace utils {
- bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- return true;
-
- default:
- return false;
- }
+bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ return true;
+
+ default:
+ return false;
}
-
- bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return true;
-
- default:
- return false;
- }
+}
+
+bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return true;
+
+ default:
+ return false;
}
-
- bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return true;
-
- default:
- return false;
- }
+}
+
+bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return true;
+
+ default:
+ return false;
}
-
- bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return true;
-
- default:
- return false;
- }
+}
+
+bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth32Float:
+ return true;
+ default:
+ return false;
}
+}
- bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth32Float:
- return true;
- default:
- return false;
- }
+bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
+ if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
+ IsASTCTextureFormat(textureFormat)) {
+ return false;
}
- bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
- if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
- IsASTCTextureFormat(textureFormat)) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return false;
- }
-
- switch (textureFormat) {
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- return false;
-
- default:
- return true;
- }
- }
- bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGB10A2Unorm:
- return true;
-
- default:
- return false;
- }
+ default:
+ return true;
}
-
- bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGB10A2Unorm:
- return true;
-
- default:
- return false;
- }
+}
+
+bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return true;
+
+ default:
+ return false;
}
-
- bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat) {
- return textureFormat == wgpu::TextureFormat::Stencil8;
+}
+
+bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return true;
+
+ default:
+ return false;
}
-
- uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::Stencil8:
- return 1u;
-
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- return 2u;
-
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return 4u;
-
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- return 8u;
-
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- return 16u;
-
- case wgpu::TextureFormat::Depth16Unorm:
- return 2u;
-
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32Float:
- return 4u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- return 8u;
-
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return 16u;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- return 8u;
-
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 16u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 16u;
-
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
+}
+
+bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat) {
+ return textureFormat == wgpu::TextureFormat::Stencil8;
+}
+
+uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::Stencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ return 2u;
+
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return 4u;
+
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ return 8u;
+
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return 16u;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return 2u;
+
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32Float:
+ return 4u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ return 8u;
+
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return 16u;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ return 8u;
+
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 16u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 16u;
+
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ case wgpu::TextureFormat::Undefined:
+ break;
}
-
- uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Stencil8:
- return 1u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 4u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- return 4u;
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- return 5u;
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- return 6u;
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- return 8u;
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- return 10u;
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 12u;
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
+ UNREACHABLE();
+}
+
+uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ case wgpu::TextureFormat::Stencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 4u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ case wgpu::TextureFormat::Undefined:
+ break;
}
-
- uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Stencil8:
- return 1u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 4u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- return 4u;
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- return 5u;
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- return 6u;
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- return 8u;
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- return 10u;
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 12u;
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
+ UNREACHABLE();
+}
+
+uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ case wgpu::TextureFormat::Stencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 4u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ case wgpu::TextureFormat::Undefined:
+ break;
}
-
- const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return "f32";
-
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA32Uint:
- return "u32";
-
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA32Sint:
- return "i32";
-
- default:
- UNREACHABLE();
- }
+ UNREACHABLE();
+}
+
+const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return "f32";
+
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "u32";
+
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "i32";
+
+ default:
+ UNREACHABLE();
}
-
- uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- return 1u;
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- return 2u;
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- return 4u;
- default:
- UNREACHABLE();
- }
+}
+
+uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ return 1u;
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ return 2u;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return 4u;
+ default:
+ UNREACHABLE();
}
-
- const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::RGBA8Unorm:
- return "rgba8unorm";
- case wgpu::TextureFormat::RGBA8Snorm:
- return "rgba8snorm";
- case wgpu::TextureFormat::RGBA8Uint:
- return "rgba8uint";
- case wgpu::TextureFormat::RGBA8Sint:
- return "rgba8sint";
- case wgpu::TextureFormat::RGBA16Uint:
- return "rgba16uint";
- case wgpu::TextureFormat::RGBA16Sint:
- return "rgba16sint";
- case wgpu::TextureFormat::RGBA16Float:
- return "rgba16float";
- case wgpu::TextureFormat::R32Uint:
- return "r32uint";
- case wgpu::TextureFormat::R32Sint:
- return "r32sint";
- case wgpu::TextureFormat::R32Float:
- return "r32float";
- case wgpu::TextureFormat::RG32Uint:
- return "rg32uint";
- case wgpu::TextureFormat::RG32Sint:
- return "rg32sint";
- case wgpu::TextureFormat::RG32Float:
- return "rg32float";
- case wgpu::TextureFormat::RGBA32Uint:
- return "rgba32uint";
- case wgpu::TextureFormat::RGBA32Sint:
- return "rgba32sint";
- case wgpu::TextureFormat::RGBA32Float:
- return "rgba32float";
-
- // The below do not currently exist in the WGSL spec, but are used
- // for tests that expect compilation failure.
- case wgpu::TextureFormat::R8Unorm:
- return "r8unorm";
- case wgpu::TextureFormat::R8Snorm:
- return "r8snorm";
- case wgpu::TextureFormat::R8Uint:
- return "r8uint";
- case wgpu::TextureFormat::R8Sint:
- return "r8sint";
- case wgpu::TextureFormat::R16Uint:
- return "r16uint";
- case wgpu::TextureFormat::R16Sint:
- return "r16sint";
- case wgpu::TextureFormat::R16Float:
- return "r16float";
- case wgpu::TextureFormat::RG8Unorm:
- return "rg8unorm";
- case wgpu::TextureFormat::RG8Snorm:
- return "rg8snorm";
- case wgpu::TextureFormat::RG8Uint:
- return "rg8uint";
- case wgpu::TextureFormat::RG8Sint:
- return "rg8sint";
- case wgpu::TextureFormat::RG16Uint:
- return "rg16uint";
- case wgpu::TextureFormat::RG16Sint:
- return "rg16sint";
- case wgpu::TextureFormat::RG16Float:
- return "rg16float";
- case wgpu::TextureFormat::RGB10A2Unorm:
- return "rgb10a2unorm";
- case wgpu::TextureFormat::RG11B10Ufloat:
- return "rg11b10ufloat";
-
- default:
- UNREACHABLE();
- }
+}
+
+const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return "rgba8unorm";
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return "rgba8snorm";
+ case wgpu::TextureFormat::RGBA8Uint:
+ return "rgba8uint";
+ case wgpu::TextureFormat::RGBA8Sint:
+ return "rgba8sint";
+ case wgpu::TextureFormat::RGBA16Uint:
+ return "rgba16uint";
+ case wgpu::TextureFormat::RGBA16Sint:
+ return "rgba16sint";
+ case wgpu::TextureFormat::RGBA16Float:
+ return "rgba16float";
+ case wgpu::TextureFormat::R32Uint:
+ return "r32uint";
+ case wgpu::TextureFormat::R32Sint:
+ return "r32sint";
+ case wgpu::TextureFormat::R32Float:
+ return "r32float";
+ case wgpu::TextureFormat::RG32Uint:
+ return "rg32uint";
+ case wgpu::TextureFormat::RG32Sint:
+ return "rg32sint";
+ case wgpu::TextureFormat::RG32Float:
+ return "rg32float";
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "rgba32uint";
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "rgba32sint";
+ case wgpu::TextureFormat::RGBA32Float:
+ return "rgba32float";
+
+ // The below do not currently exist in the WGSL spec, but are used
+ // for tests that expect compilation failure.
+ case wgpu::TextureFormat::R8Unorm:
+ return "r8unorm";
+ case wgpu::TextureFormat::R8Snorm:
+ return "r8snorm";
+ case wgpu::TextureFormat::R8Uint:
+ return "r8uint";
+ case wgpu::TextureFormat::R8Sint:
+ return "r8sint";
+ case wgpu::TextureFormat::R16Uint:
+ return "r16uint";
+ case wgpu::TextureFormat::R16Sint:
+ return "r16sint";
+ case wgpu::TextureFormat::R16Float:
+ return "r16float";
+ case wgpu::TextureFormat::RG8Unorm:
+ return "rg8unorm";
+ case wgpu::TextureFormat::RG8Snorm:
+ return "rg8snorm";
+ case wgpu::TextureFormat::RG8Uint:
+ return "rg8uint";
+ case wgpu::TextureFormat::RG8Sint:
+ return "rg8sint";
+ case wgpu::TextureFormat::RG16Uint:
+ return "rg16uint";
+ case wgpu::TextureFormat::RG16Sint:
+ return "rg16sint";
+ case wgpu::TextureFormat::RG16Float:
+ return "rg16float";
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return "rgb10a2unorm";
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return "rg11b10ufloat";
+
+ default:
+ UNREACHABLE();
}
-
- wgpu::TextureDimension ViewDimensionToTextureDimension(
- const wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return wgpu::TextureDimension::e2D;
- case wgpu::TextureViewDimension::e3D:
- return wgpu::TextureDimension::e3D;
- // TODO(crbug.com/dawn/814): Implement for 1D texture.
- case wgpu::TextureViewDimension::e1D:
- default:
- UNREACHABLE();
- break;
- }
+}
+
+wgpu::TextureDimension ViewDimensionToTextureDimension(const wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return wgpu::TextureDimension::e2D;
+ case wgpu::TextureViewDimension::e3D:
+ return wgpu::TextureDimension::e3D;
+ // TODO(crbug.com/dawn/814): Implement for 1D texture.
+ case wgpu::TextureViewDimension::e1D:
+ default:
+ UNREACHABLE();
+ break;
}
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h
index 26e60bf2de4..f92b19b518f 100644
--- a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h
@@ -17,235 +17,234 @@
#include <array>
-#include <dawn/webgpu_cpp.h>
+#include "dawn/webgpu_cpp.h"
#include "dawn/common/Assert.h"
namespace utils {
- static constexpr std::array<wgpu::TextureFormat, 95> kAllTextureFormats = {
- wgpu::TextureFormat::R8Unorm,
- wgpu::TextureFormat::R8Snorm,
- wgpu::TextureFormat::R8Uint,
- wgpu::TextureFormat::R8Sint,
- wgpu::TextureFormat::R16Uint,
- wgpu::TextureFormat::R16Sint,
- wgpu::TextureFormat::R16Float,
- wgpu::TextureFormat::RG8Unorm,
- wgpu::TextureFormat::RG8Snorm,
- wgpu::TextureFormat::RG8Uint,
- wgpu::TextureFormat::RG8Sint,
- wgpu::TextureFormat::R32Float,
- wgpu::TextureFormat::R32Uint,
- wgpu::TextureFormat::R32Sint,
- wgpu::TextureFormat::RG16Uint,
- wgpu::TextureFormat::RG16Sint,
- wgpu::TextureFormat::RG16Float,
- wgpu::TextureFormat::RGBA8Unorm,
- wgpu::TextureFormat::RGBA8UnormSrgb,
- wgpu::TextureFormat::RGBA8Snorm,
- wgpu::TextureFormat::RGBA8Uint,
- wgpu::TextureFormat::RGBA8Sint,
- wgpu::TextureFormat::BGRA8Unorm,
- wgpu::TextureFormat::BGRA8UnormSrgb,
- wgpu::TextureFormat::RGB10A2Unorm,
- wgpu::TextureFormat::RG11B10Ufloat,
- wgpu::TextureFormat::RGB9E5Ufloat,
- wgpu::TextureFormat::RG32Float,
- wgpu::TextureFormat::RG32Uint,
- wgpu::TextureFormat::RG32Sint,
- wgpu::TextureFormat::RGBA16Uint,
- wgpu::TextureFormat::RGBA16Sint,
- wgpu::TextureFormat::RGBA16Float,
- wgpu::TextureFormat::RGBA32Float,
- wgpu::TextureFormat::RGBA32Uint,
- wgpu::TextureFormat::RGBA32Sint,
- wgpu::TextureFormat::Depth16Unorm,
- wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus,
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- wgpu::TextureFormat::Stencil8,
- wgpu::TextureFormat::BC1RGBAUnorm,
- wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm,
- wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm,
- wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm,
- wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm,
- wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat,
- wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm,
- wgpu::TextureFormat::BC7RGBAUnormSrgb,
- wgpu::TextureFormat::ETC2RGB8Unorm,
- wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm,
- wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm,
- wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm,
- wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm,
- wgpu::TextureFormat::EACRG11Snorm,
- wgpu::TextureFormat::ASTC4x4Unorm,
- wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm,
- wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm,
- wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm,
- wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm,
- wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm,
- wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm,
- wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm,
- wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm,
- wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm,
- wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm,
- wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm,
- wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm,
- wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm,
- wgpu::TextureFormat::ASTC12x12UnormSrgb};
-
- static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
- wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R8Snorm,
- wgpu::TextureFormat::R8Uint, wgpu::TextureFormat::R8Sint,
- wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R16Sint,
- wgpu::TextureFormat::R16Float, wgpu::TextureFormat::RG8Unorm,
- wgpu::TextureFormat::RG8Snorm, wgpu::TextureFormat::RG8Uint,
- wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::R32Float,
- wgpu::TextureFormat::R32Uint, wgpu::TextureFormat::R32Sint,
- wgpu::TextureFormat::RG16Uint, wgpu::TextureFormat::RG16Sint,
- wgpu::TextureFormat::RG16Float, wgpu::TextureFormat::RGBA8Unorm,
- wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
- wgpu::TextureFormat::RGBA8Uint, wgpu::TextureFormat::RGBA8Sint,
- wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
- wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Ufloat,
- wgpu::TextureFormat::RGB9E5Ufloat, wgpu::TextureFormat::RG32Float,
- wgpu::TextureFormat::RG32Uint, wgpu::TextureFormat::RG32Sint,
- wgpu::TextureFormat::RGBA16Uint, wgpu::TextureFormat::RGBA16Sint,
- wgpu::TextureFormat::RGBA16Float, wgpu::TextureFormat::RGBA32Float,
- wgpu::TextureFormat::RGBA32Uint, wgpu::TextureFormat::RGBA32Sint,
- wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
- };
-
- static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
- wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
-
- static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
- wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm};
-
- static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
- wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
- };
-
- static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
- wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
- wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm,
- wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb};
- static_assert(kCompressedFormats.size() ==
- kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
- "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
-
- static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
- wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
- };
- static constexpr std::array<wgpu::TextureFormat, 4> kStencilFormats = {
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- wgpu::TextureFormat::Stencil8,
- };
- static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- };
-
- bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
-
- bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
- bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
- bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
-
- bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
- bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
-
- bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
- bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);
- bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat);
-
- uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
- uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
- uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
-
- const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
- const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
- uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
-
- wgpu::TextureDimension ViewDimensionToTextureDimension(
- const wgpu::TextureViewDimension dimension);
+static constexpr std::array<wgpu::TextureFormat, 95> kAllTextureFormats = {
+ wgpu::TextureFormat::R8Unorm,
+ wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint,
+ wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint,
+ wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float,
+ wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm,
+ wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint,
+ wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint,
+ wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint,
+ wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float,
+ wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb,
+ wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint,
+ wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm,
+ wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm,
+ wgpu::TextureFormat::RG11B10Ufloat,
+ wgpu::TextureFormat::RGB9E5Ufloat,
+ wgpu::TextureFormat::RG32Float,
+ wgpu::TextureFormat::RG32Uint,
+ wgpu::TextureFormat::RG32Sint,
+ wgpu::TextureFormat::RGBA16Uint,
+ wgpu::TextureFormat::RGBA16Sint,
+ wgpu::TextureFormat::RGBA16Float,
+ wgpu::TextureFormat::RGBA32Float,
+ wgpu::TextureFormat::RGBA32Uint,
+ wgpu::TextureFormat::RGBA32Sint,
+ wgpu::TextureFormat::Depth16Unorm,
+ wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus,
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+ wgpu::TextureFormat::Stencil8,
+ wgpu::TextureFormat::BC1RGBAUnorm,
+ wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm,
+ wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm,
+ wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm,
+ wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm,
+ wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat,
+ wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm,
+ wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8Unorm,
+ wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm,
+ wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm,
+ wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm,
+ wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm,
+ wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm,
+ wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm,
+ wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm,
+ wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm,
+ wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm,
+ wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm,
+ wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm,
+ wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm,
+ wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm,
+ wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm,
+ wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm,
+ wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm,
+ wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm,
+ wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm,
+ wgpu::TextureFormat::ASTC12x12UnormSrgb};
+
+static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
+ wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint, wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float, wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm, wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint, wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint, wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float, wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint, wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Ufloat,
+ wgpu::TextureFormat::RGB9E5Ufloat, wgpu::TextureFormat::RG32Float,
+ wgpu::TextureFormat::RG32Uint, wgpu::TextureFormat::RG32Sint,
+ wgpu::TextureFormat::RGBA16Uint, wgpu::TextureFormat::RGBA16Sint,
+ wgpu::TextureFormat::RGBA16Float, wgpu::TextureFormat::RGBA32Float,
+ wgpu::TextureFormat::RGBA32Uint, wgpu::TextureFormat::RGBA32Sint,
+ wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+};
+
+static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
+
+static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm};
+
+static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
+};
+
+static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb};
+static_assert(kCompressedFormats.size() ==
+ kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
+ "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
+
+static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
+ wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
+};
+static constexpr std::array<wgpu::TextureFormat, 4> kStencilFormats = {
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+ wgpu::TextureFormat::Stencil8,
+};
+static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+};
+
+bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+
+bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
+bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
+bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
+
+bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
+bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
+
+bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
+bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);
+bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat);
+
+uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
+uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
+
+const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
+const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
+uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
+
+wgpu::TextureDimension ViewDimensionToTextureDimension(const wgpu::TextureViewDimension dimension);
} // namespace utils
#endif // SRC_DAWN_UTILS_TEXTUREUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/Timer.h b/chromium/third_party/dawn/src/dawn/utils/Timer.h
index cc6134399b1..a7438e78a88 100644
--- a/chromium/third_party/dawn/src/dawn/utils/Timer.h
+++ b/chromium/third_party/dawn/src/dawn/utils/Timer.h
@@ -17,24 +17,23 @@
namespace utils {
- class Timer {
- public:
- virtual ~Timer() {
- }
-
- // Timer functionality: Use start() and stop() to record the duration and use
- // getElapsedTime() to query that duration. If getElapsedTime() is called in between, it
- // will report the elapsed time since start().
- virtual void Start() = 0;
- virtual void Stop() = 0;
- virtual double GetElapsedTime() const = 0;
-
- // Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
- // origin. This time moves forward regardless of start()/stop().
- virtual double GetAbsoluteTime() = 0;
- };
-
- Timer* CreateTimer();
+class Timer {
+ public:
+ virtual ~Timer() {}
+
+ // Timer functionality: Use start() and stop() to record the duration and use
+ // getElapsedTime() to query that duration. If getElapsedTime() is called in between, it
+ // will report the elapsed time since start().
+ virtual void Start() = 0;
+ virtual void Stop() = 0;
+ virtual double GetElapsedTime() const = 0;
+
+ // Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
+ // origin. This time moves forward regardless of start()/stop().
+ virtual double GetAbsoluteTime() = 0;
+};
+
+Timer* CreateTimer();
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp
index fc94090a0b2..1736de82320 100644
--- a/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <memory>
+
#include "dawn/utils/BackendBinding.h"
#include "dawn/common/Assert.h"
@@ -20,38 +22,35 @@
// Include GLFW after VulkanBackend so that it declares the Vulkan-specific functions
#include "GLFW/glfw3.h"
-#include <memory>
-
namespace utils {
- class VulkanBinding : public BackendBinding {
- public:
- VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- VkSurfaceKHR surface = VK_NULL_HANDLE;
- if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
- nullptr, &surface) != VK_SUCCESS) {
- ASSERT(false);
- }
+class VulkanBinding : public BackendBinding {
+ public:
+ VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
- mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ VkSurfaceKHR surface = VK_NULL_HANDLE;
+ if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
+ nullptr, &surface) != VK_SUCCESS) {
+ ASSERT(false);
}
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- ASSERT(mSwapchainImpl.userData != nullptr);
- return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+
+ mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
}
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ ASSERT(mSwapchainImpl.userData != nullptr);
+ return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+};
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
- return new VulkanBinding(window, device);
- }
+BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
+ return new VulkanBinding(window, device);
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp
index 537cdcbcdbd..dda2cabe2bb 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp
@@ -14,378 +14,379 @@
#include "dawn/utils/WGPUHelpers.h"
-#include "dawn/common/Constants.h"
-#include "dawn/common/Log.h"
-
-#include "spirv-tools/optimizer.hpp"
-
#include <cstring>
#include <iomanip>
#include <limits>
#include <mutex>
#include <sstream>
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/Numeric.h"
+
+#include "spirv-tools/optimizer.hpp"
+
namespace utils {
- wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
- // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
- // aren't RAII, we don't return directly on success and instead always go through the code
- // path that destroys the SPIRV-Tools objects.
- wgpu::ShaderModule result = nullptr;
-
- spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
- ASSERT(context != nullptr);
-
- spv_binary spirv = nullptr;
- spv_diagnostic diagnostic = nullptr;
- if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
- ASSERT(spirv != nullptr);
- ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
-
- wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
- spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
- spirvDesc.code = spirv->code;
-
- wgpu::ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &spirvDesc;
- result = device.CreateShaderModule(&descriptor);
- } else {
- ASSERT(diagnostic != nullptr);
- dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
- << diagnostic->position.line + 1 << ":"
- << diagnostic->position.column + 1 << ": " << diagnostic->error;
- }
+wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
+ // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
+ // aren't RAII, we don't return directly on success and instead always go through the code
+ // path that destroys the SPIRV-Tools objects.
+ wgpu::ShaderModule result = nullptr;
- spvDiagnosticDestroy(diagnostic);
- spvBinaryDestroy(spirv);
- spvContextDestroy(context);
+ spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+ ASSERT(context != nullptr);
- return result;
- }
+ spv_binary spirv = nullptr;
+ spv_diagnostic diagnostic = nullptr;
+ if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
+ ASSERT(spirv != nullptr);
+ ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
+
+ wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
+ spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
+ spirvDesc.code = spirv->code;
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
- wgpu::ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = source;
wgpu::ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &wgslDesc;
- return device.CreateShaderModule(&descriptor);
+ descriptor.nextInChain = &spirvDesc;
+ result = device.CreateShaderModule(&descriptor);
+ } else {
+ ASSERT(diagnostic != nullptr);
+ dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
+ << diagnostic->position.line + 1 << ":"
+ << diagnostic->position.column + 1 << ": " << diagnostic->error;
}
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- const void* data,
- uint64_t size,
- wgpu::BufferUsage usage) {
- wgpu::BufferDescriptor descriptor;
- descriptor.size = size;
- descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
- wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
-
- device.GetQueue().WriteBuffer(buffer, 0, data, size);
- return buffer;
+ spvDiagnosticDestroy(diagnostic);
+ spvBinaryDestroy(spirv);
+ spvContextDestroy(context);
+
+ return result;
+}
+
+wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
+ wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = source;
+ wgpu::ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &wgslDesc;
+ return device.CreateShaderModule(&descriptor);
+}
+
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ const void* data,
+ uint64_t size,
+ wgpu::BufferUsage usage) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ device.GetQueue().WriteBuffer(buffer, 0, data, size);
+ return buffer;
+}
+
+ComboRenderPassDescriptor::ComboRenderPassDescriptor(
+ std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil) {
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+ cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
+ cColorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
}
- ComboRenderPassDescriptor::ComboRenderPassDescriptor(
- std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
- wgpu::TextureView depthStencil) {
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
- cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
- cColorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
- }
-
- cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
- cDepthStencilAttachmentInfo.stencilClearValue = 0;
- cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
- cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-
- colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
- uint32_t colorAttachmentIndex = 0;
- for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
- if (colorAttachment.Get() != nullptr) {
- cColorAttachments[colorAttachmentIndex].view = colorAttachment;
- }
- ++colorAttachmentIndex;
- }
- colorAttachments = cColorAttachments.data();
-
- if (depthStencil.Get() != nullptr) {
- cDepthStencilAttachmentInfo.view = depthStencil;
- depthStencilAttachment = &cDepthStencilAttachmentInfo;
- } else {
- depthStencilAttachment = nullptr;
+ cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
+ cDepthStencilAttachmentInfo.stencilClearValue = 0;
+ cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+ cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+ colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
+ uint32_t colorAttachmentIndex = 0;
+ for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
+ if (colorAttachment.Get() != nullptr) {
+ cColorAttachments[colorAttachmentIndex].view = colorAttachment;
}
+ ++colorAttachmentIndex;
}
+ colorAttachments = cColorAttachments.data();
- ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
- *this = other;
+ if (depthStencil.Get() != nullptr) {
+ cDepthStencilAttachmentInfo.view = depthStencil;
+ depthStencilAttachment = &cDepthStencilAttachmentInfo;
+ } else {
+ depthStencilAttachment = nullptr;
}
+}
- const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
- const ComboRenderPassDescriptor& otherRenderPass) {
- cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
- cColorAttachments = otherRenderPass.cColorAttachments;
- colorAttachmentCount = otherRenderPass.colorAttachmentCount;
+ComboRenderPassDescriptor::~ComboRenderPassDescriptor() = default;
- colorAttachments = cColorAttachments.data();
-
- if (otherRenderPass.depthStencilAttachment != nullptr) {
- // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
- depthStencilAttachment = &cDepthStencilAttachmentInfo;
- } else {
- depthStencilAttachment = nullptr;
- }
+ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
+ *this = other;
+}
- return *this;
- }
- void ComboRenderPassDescriptor::UnsetDepthStencilLoadStoreOpsForFormat(
- wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth16Unorm:
- cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
- cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
- break;
- case wgpu::TextureFormat::Stencil8:
- cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
- cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
- break;
- default:
- break;
- }
- }
+const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
+ const ComboRenderPassDescriptor& otherRenderPass) {
+ cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
+ cColorAttachments = otherRenderPass.cColorAttachments;
+ colorAttachmentCount = otherRenderPass.colorAttachmentCount;
- BasicRenderPass::BasicRenderPass()
- : width(0),
- height(0),
- color(nullptr),
- colorFormat(wgpu::TextureFormat::RGBA8Unorm),
- renderPassInfo({}) {
- }
+ colorAttachments = cColorAttachments.data();
- BasicRenderPass::BasicRenderPass(uint32_t texWidth,
- uint32_t texHeight,
- wgpu::Texture colorAttachment,
- wgpu::TextureFormat textureFormat)
- : width(texWidth),
- height(texHeight),
- color(colorAttachment),
- colorFormat(textureFormat),
- renderPassInfo({colorAttachment.CreateView()}) {
+ if (otherRenderPass.depthStencilAttachment != nullptr) {
+ // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
+ depthStencilAttachment = &cDepthStencilAttachmentInfo;
+ } else {
+ depthStencilAttachment = nullptr;
}
- BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
- uint32_t width,
- uint32_t height,
- wgpu::TextureFormat format) {
- DAWN_ASSERT(width > 0 && height > 0);
-
- wgpu::TextureDescriptor descriptor;
- descriptor.dimension = wgpu::TextureDimension::e2D;
- descriptor.size.width = width;
- descriptor.size.height = height;
- descriptor.size.depthOrArrayLayers = 1;
- descriptor.sampleCount = 1;
- descriptor.format = format;
- descriptor.mipLevelCount = 1;
- descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
- wgpu::Texture color = device.CreateTexture(&descriptor);
-
- return BasicRenderPass(width, height, color);
+ return *this;
+}
+void ComboRenderPassDescriptor::UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth16Unorm:
+ cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+ cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+ break;
+ case wgpu::TextureFormat::Stencil8:
+ cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+ cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+ break;
+ default:
+ break;
}
-
- wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
- uint64_t offset,
+}
+
+BasicRenderPass::BasicRenderPass()
+ : width(0),
+ height(0),
+ color(nullptr),
+ colorFormat(wgpu::TextureFormat::RGBA8Unorm),
+ renderPassInfo({}) {}
+
+BasicRenderPass::BasicRenderPass(uint32_t texWidth,
+ uint32_t texHeight,
+ wgpu::Texture colorAttachment,
+ wgpu::TextureFormat textureFormat)
+ : width(texWidth),
+ height(texHeight),
+ color(colorAttachment),
+ colorFormat(textureFormat),
+ renderPassInfo({colorAttachment.CreateView()}) {}
+
+BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
+ uint32_t width,
+ uint32_t height,
+ wgpu::TextureFormat format) {
+ DAWN_ASSERT(width > 0 && height > 0);
+
+ wgpu::TextureDescriptor descriptor;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
+ descriptor.size.width = width;
+ descriptor.size.height = height;
+ descriptor.size.depthOrArrayLayers = 1;
+ descriptor.sampleCount = 1;
+ descriptor.format = format;
+ descriptor.mipLevelCount = 1;
+ descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture color = device.CreateTexture(&descriptor);
+
+ return BasicRenderPass(width, height, color);
+}
+
+wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ wgpu::ImageCopyBuffer imageCopyBuffer = {};
+ imageCopyBuffer.buffer = buffer;
+ imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
+
+ return imageCopyBuffer;
+}
+
+wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
+ uint32_t mipLevel,
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect) {
+ wgpu::ImageCopyTexture imageCopyTexture;
+ imageCopyTexture.texture = texture;
+ imageCopyTexture.mipLevel = mipLevel;
+ imageCopyTexture.origin = origin;
+ imageCopyTexture.aspect = aspect;
+
+ return imageCopyTexture;
+}
+
+wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
uint32_t bytesPerRow,
uint32_t rowsPerImage) {
- wgpu::ImageCopyBuffer imageCopyBuffer = {};
- imageCopyBuffer.buffer = buffer;
- imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
-
- return imageCopyBuffer;
- }
-
- wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
- uint32_t mipLevel,
- wgpu::Origin3D origin,
- wgpu::TextureAspect aspect) {
- wgpu::ImageCopyTexture imageCopyTexture;
- imageCopyTexture.texture = texture;
- imageCopyTexture.mipLevel = mipLevel;
- imageCopyTexture.origin = origin;
- imageCopyTexture.aspect = aspect;
-
- return imageCopyTexture;
- }
-
- wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- wgpu::TextureDataLayout textureDataLayout;
- textureDataLayout.offset = offset;
- textureDataLayout.bytesPerRow = bytesPerRow;
- textureDataLayout.rowsPerImage = rowsPerImage;
-
- return textureDataLayout;
+ wgpu::TextureDataLayout textureDataLayout;
+ textureDataLayout.offset = offset;
+ textureDataLayout.bytesPerRow = bytesPerRow;
+ textureDataLayout.rowsPerImage = rowsPerImage;
+
+ return textureDataLayout;
+}
+
+wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout) {
+ wgpu::PipelineLayoutDescriptor descriptor;
+ if (bindGroupLayout != nullptr) {
+ descriptor.bindGroupLayoutCount = 1;
+ descriptor.bindGroupLayouts = bindGroupLayout;
+ } else {
+ descriptor.bindGroupLayoutCount = 0;
+ descriptor.bindGroupLayouts = nullptr;
}
-
- wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
- const wgpu::BindGroupLayout* bindGroupLayout) {
- wgpu::PipelineLayoutDescriptor descriptor;
- if (bindGroupLayout != nullptr) {
- descriptor.bindGroupLayoutCount = 1;
- descriptor.bindGroupLayouts = bindGroupLayout;
- } else {
- descriptor.bindGroupLayoutCount = 0;
- descriptor.bindGroupLayouts = nullptr;
- }
- return device.CreatePipelineLayout(&descriptor);
- }
-
- wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
- std::vector<wgpu::BindGroupLayout> bgls) {
- wgpu::PipelineLayoutDescriptor descriptor;
- descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
- descriptor.bindGroupLayouts = bgls.data();
- return device.CreatePipelineLayout(&descriptor);
- }
-
- wgpu::BindGroupLayout MakeBindGroupLayout(
- const wgpu::Device& device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
- std::vector<wgpu::BindGroupLayoutEntry> entries;
- for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
- entries.push_back(entry);
- }
-
- wgpu::BindGroupLayoutDescriptor descriptor;
- descriptor.entryCount = static_cast<uint32_t>(entries.size());
- descriptor.entries = entries.data();
- return device.CreateBindGroupLayout(&descriptor);
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset,
- uint64_t bufferMinBindingSize) {
- binding = entryBinding;
- visibility = entryVisibility;
- buffer.type = bufferType;
- buffer.hasDynamicOffset = bufferHasDynamicOffset;
- buffer.minBindingSize = bufferMinBindingSize;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType) {
- binding = entryBinding;
- visibility = entryVisibility;
- sampler.type = samplerType;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension textureViewDimension,
- bool textureMultisampled) {
- binding = entryBinding;
- visibility = entryVisibility;
- texture.sampleType = textureSampleType;
- texture.viewDimension = textureViewDimension;
- texture.multisampled = textureMultisampled;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension textureViewDimension) {
- binding = entryBinding;
- visibility = entryVisibility;
- storageTexture.access = storageTextureAccess;
- storageTexture.format = format;
- storageTexture.viewDimension = textureViewDimension;
- }
-
- // ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
- // of declaring a new one every time it's needed.
- wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::ExternalTextureBindingLayout* bindingLayout) {
- binding = entryBinding;
- visibility = entryVisibility;
- nextInChain = bindingLayout;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- const wgpu::BindGroupLayoutEntry& entry)
- : wgpu::BindGroupLayoutEntry(entry) {
+ return device.CreatePipelineLayout(&descriptor);
+}
+
+wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls) {
+ wgpu::PipelineLayoutDescriptor descriptor;
+ descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
+ descriptor.bindGroupLayouts = bgls.data();
+ return device.CreatePipelineLayout(&descriptor);
+}
+
+wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
+ std::vector<wgpu::BindGroupLayoutEntry> entries;
+ for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+ entries.push_back(entry);
}
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::Sampler& sampler)
- : binding(binding), sampler(sampler) {
+ wgpu::BindGroupLayoutDescriptor descriptor;
+ descriptor.entryCount = static_cast<uint32_t>(entries.size());
+ descriptor.entries = entries.data();
+ return device.CreateBindGroupLayout(&descriptor);
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset,
+ uint64_t bufferMinBindingSize) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ buffer.type = bufferType;
+ buffer.hasDynamicOffset = bufferHasDynamicOffset;
+ buffer.minBindingSize = bufferMinBindingSize;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ sampler.type = samplerType;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension textureViewDimension,
+ bool textureMultisampled) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ texture.sampleType = textureSampleType;
+ texture.viewDimension = textureViewDimension;
+ texture.multisampled = textureMultisampled;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension textureViewDimension) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ storageTexture.access = storageTextureAccess;
+ storageTexture.format = format;
+ storageTexture.viewDimension = textureViewDimension;
+}
+
+// ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
+// of declaring a new one every time it's needed.
+wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::ExternalTextureBindingLayout* bindingLayout) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ nextInChain = bindingLayout;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ const wgpu::BindGroupLayoutEntry& entry)
+ : wgpu::BindGroupLayoutEntry(entry) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::Sampler& sampler)
+ : binding(binding), sampler(sampler) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::TextureView& textureView)
+ : binding(binding), textureView(textureView) {}
+
+BindingInitializationHelper::BindingInitializationHelper(
+ uint32_t binding,
+ const wgpu::ExternalTexture& externalTexture)
+ : binding(binding) {
+ externalTextureBindingEntry.externalTexture = externalTexture;
+}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::Buffer& buffer,
+ uint64_t offset,
+ uint64_t size)
+ : binding(binding), buffer(buffer), offset(offset), size(size) {}
+
+BindingInitializationHelper::BindingInitializationHelper(const BindingInitializationHelper&) =
+ default;
+
+BindingInitializationHelper::~BindingInitializationHelper() = default;
+
+wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+ wgpu::BindGroupEntry result;
+
+ result.binding = binding;
+ result.sampler = sampler;
+ result.textureView = textureView;
+ result.buffer = buffer;
+ result.offset = offset;
+ result.size = size;
+ if (externalTextureBindingEntry.externalTexture != nullptr) {
+ result.nextInChain = &externalTextureBindingEntry;
}
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::TextureView& textureView)
- : binding(binding), textureView(textureView) {
- }
+ return result;
+}
- BindingInitializationHelper::BindingInitializationHelper(
- uint32_t binding,
- const wgpu::ExternalTexture& externalTexture)
- : binding(binding) {
- externalTextureBindingEntry.externalTexture = externalTexture;
+wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+ std::vector<wgpu::BindGroupEntry> entries;
+ for (const BindingInitializationHelper& helper : entriesInitializer) {
+ entries.push_back(helper.GetAsBinding());
}
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::Buffer& buffer,
- uint64_t offset,
- uint64_t size)
- : binding(binding), buffer(buffer), offset(offset), size(size) {
- }
+ wgpu::BindGroupDescriptor descriptor;
+ descriptor.layout = layout;
+ descriptor.entryCount = checked_cast<uint32_t>(entries.size());
+ descriptor.entries = entries.data();
- wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
- wgpu::BindGroupEntry result;
-
- result.binding = binding;
- result.sampler = sampler;
- result.textureView = textureView;
- result.buffer = buffer;
- result.offset = offset;
- result.size = size;
- if (externalTextureBindingEntry.externalTexture != nullptr) {
- result.nextInChain = &externalTextureBindingEntry;
- }
-
- return result;
- }
-
- wgpu::BindGroup MakeBindGroup(
- const wgpu::Device& device,
- const wgpu::BindGroupLayout& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer) {
- std::vector<wgpu::BindGroupEntry> entries;
- for (const BindingInitializationHelper& helper : entriesInitializer) {
- entries.push_back(helper.GetAsBinding());
- }
-
- wgpu::BindGroupDescriptor descriptor;
- descriptor.layout = layout;
- descriptor.entryCount = entries.size();
- descriptor.entries = entries.data();
-
- return device.CreateBindGroup(&descriptor);
- }
+ return device.CreateBindGroup(&descriptor);
+}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h
index 8279def8211..24864e6265d 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h
@@ -15,167 +15,168 @@
#ifndef SRC_DAWN_UTILS_WGPUHELPERS_H_
#define SRC_DAWN_UTILS_WGPUHELPERS_H_
-#include <dawn/webgpu_cpp.h>
-
#include <array>
#include <initializer_list>
#include <vector>
#include "dawn/common/Constants.h"
#include "dawn/utils/TextureUtils.h"
+#include "dawn/webgpu_cpp.h"
namespace utils {
- enum Expectation { Success, Failure };
+enum Expectation { Success, Failure };
+
+wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
+wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
+
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ const void* data,
+ uint64_t size,
+ wgpu::BufferUsage usage);
+
+template <typename T>
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ wgpu::BufferUsage usage,
+ std::initializer_list<T> data) {
+ return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
+}
+
+wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset = 0,
+ uint32_t bytesPerRow = wgpu::kCopyStrideUndefined,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+wgpu::ImageCopyTexture CreateImageCopyTexture(
+ wgpu::Texture texture,
+ uint32_t level = 0,
+ wgpu::Origin3D origin = {0, 0, 0},
+ wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
+wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
- wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
+struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
+ public:
+ ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil = wgpu::TextureView());
+ ~ComboRenderPassDescriptor();
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- const void* data,
- uint64_t size,
- wgpu::BufferUsage usage);
+ ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
+ const ComboRenderPassDescriptor& operator=(const ComboRenderPassDescriptor& otherRenderPass);
- template <typename T>
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- wgpu::BufferUsage usage,
- std::initializer_list<T> data) {
- return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
- }
+ void UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format);
- wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
- wgpu::ImageCopyTexture CreateImageCopyTexture(
- wgpu::Texture texture,
- uint32_t level,
- wgpu::Origin3D origin,
- wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
- wgpu::TextureDataLayout CreateTextureDataLayout(
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
-
- struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
- public:
- ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
- wgpu::TextureView depthStencil = wgpu::TextureView());
-
- ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
- const ComboRenderPassDescriptor& operator=(
- const ComboRenderPassDescriptor& otherRenderPass);
-
- void UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format);
-
- std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
- wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
- };
-
- struct BasicRenderPass {
- public:
- BasicRenderPass();
- BasicRenderPass(uint32_t width,
- uint32_t height,
- wgpu::Texture color,
- wgpu::TextureFormat texture = kDefaultColorFormat);
-
- static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
- uint32_t width;
- uint32_t height;
- wgpu::Texture color;
- wgpu::TextureFormat colorFormat;
- utils::ComboRenderPassDescriptor renderPassInfo;
- };
- BasicRenderPass CreateBasicRenderPass(
- const wgpu::Device& device,
- uint32_t width,
- uint32_t height,
- wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
-
- wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
- const wgpu::BindGroupLayout* bindGroupLayout);
-
- wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
- std::vector<wgpu::BindGroupLayout> bgls);
-
- extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
-
- // Helpers to make creating bind group layouts look nicer:
- //
- // utils::MakeBindGroupLayout(device, {
- // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
- // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
- // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
- // });
-
- struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset = false,
- uint64_t bufferMinBindingSize = 0);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
- bool textureMultisampled = false);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::ExternalTextureBindingLayout* bindingLayout);
-
- BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
- };
-
- wgpu::BindGroupLayout MakeBindGroupLayout(
- const wgpu::Device& device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
-
- // Helpers to make creating bind groups look nicer:
- //
- // utils::MakeBindGroup(device, layout, {
- // {0, mySampler},
- // {1, myBuffer, offset, size},
- // {3, myTextureView}
- // });
-
- // Structure with one constructor per-type of bindings, so that the initializer_list accepts
- // bindings with the right type and no extra information.
- struct BindingInitializationHelper {
- BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
- BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
- BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
- BindingInitializationHelper(uint32_t binding,
- const wgpu::Buffer& buffer,
- uint64_t offset = 0,
- uint64_t size = wgpu::kWholeSize);
-
- wgpu::BindGroupEntry GetAsBinding() const;
-
- uint32_t binding;
- wgpu::Sampler sampler;
- wgpu::TextureView textureView;
- wgpu::Buffer buffer;
- wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
- uint64_t offset = 0;
- uint64_t size = 0;
- };
-
- wgpu::BindGroup MakeBindGroup(
- const wgpu::Device& device,
- const wgpu::BindGroupLayout& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer);
+ std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
+ wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
+};
+
+struct BasicRenderPass {
+ public:
+ BasicRenderPass();
+ BasicRenderPass(uint32_t width,
+ uint32_t height,
+ wgpu::Texture color,
+ wgpu::TextureFormat texture = kDefaultColorFormat);
+
+ static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+ uint32_t width;
+ uint32_t height;
+ wgpu::Texture color;
+ wgpu::TextureFormat colorFormat;
+ utils::ComboRenderPassDescriptor renderPassInfo;
+};
+BasicRenderPass CreateBasicRenderPass(
+ const wgpu::Device& device,
+ uint32_t width,
+ uint32_t height,
+ wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
+
+wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout);
+
+wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls);
+
+extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
+
+// Helpers to make creating bind group layouts look nicer:
+//
+// utils::MakeBindGroupLayout(device, {
+// {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+// {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+// {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+// });
+
+struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset = false,
+ uint64_t bufferMinBindingSize = 0);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+ bool textureMultisampled = false);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::ExternalTextureBindingLayout* bindingLayout);
+
+ // NOLINTNEXTLINE(runtime/explicit)
+ BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
+};
+
+wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
+
+// Helpers to make creating bind groups look nicer:
+//
+// utils::MakeBindGroup(device, layout, {
+// {0, mySampler},
+// {1, myBuffer, offset, size},
+// {3, myTextureView}
+// });
+
+// Structure with one constructor per-type of bindings, so that the initializer_list accepts
+// bindings with the right type and no extra information.
+struct BindingInitializationHelper {
+ BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
+ BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
+ BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
+ BindingInitializationHelper(uint32_t binding,
+ const wgpu::Buffer& buffer,
+ uint64_t offset = 0,
+ uint64_t size = wgpu::kWholeSize);
+ BindingInitializationHelper(const BindingInitializationHelper&);
+ ~BindingInitializationHelper();
+
+ wgpu::BindGroupEntry GetAsBinding() const;
+
+ uint32_t binding;
+ wgpu::Sampler sampler;
+ wgpu::TextureView textureView;
+ wgpu::Buffer buffer;
+ wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
+ uint64_t offset = 0;
+ uint64_t size = 0;
+};
+
+wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp b/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp
index 159c71a940c..3f7fbed9c30 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp
@@ -12,100 +12,99 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <array>
+#include <thread>
+
#include "dawn/utils/PlatformDebugLogger.h"
#include "dawn/common/Assert.h"
#include "dawn/common/windows_with_undefs.h"
-#include <array>
-#include <thread>
-
namespace utils {
- class WindowsDebugLogger : public PlatformDebugLogger {
- public:
- WindowsDebugLogger() : PlatformDebugLogger() {
- if (IsDebuggerPresent()) {
- // This condition is true when running inside Visual Studio or some other debugger.
- // Messages are already printed there so we don't need to do anything.
- return;
- }
-
- mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
- ASSERT(mShouldExitHandle != nullptr);
-
- mThread = std::thread(
- [](HANDLE shouldExit) {
- // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
- // for the layout of this struct.
- struct {
- DWORD process_id;
- char data[4096 - sizeof(DWORD)];
- }* dbWinBuffer = nullptr;
-
- HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
- 0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
- ASSERT(file != nullptr);
- ASSERT(file != INVALID_HANDLE_VALUE);
-
- dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
- MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
- ASSERT(dbWinBuffer != nullptr);
-
- HANDLE dbWinBufferReady =
- CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
- ASSERT(dbWinBufferReady != nullptr);
-
- HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
- ASSERT(dbWinDataReady != nullptr);
-
- std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
- while (true) {
- SetEvent(dbWinBufferReady);
- DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
- FALSE, INFINITE);
- if (wait == WAIT_OBJECT_0) {
- break;
- }
- ASSERT(wait == WAIT_OBJECT_0 + 1);
- fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
- dbWinBuffer->data);
- fflush(stderr);
+class WindowsDebugLogger : public PlatformDebugLogger {
+ public:
+ WindowsDebugLogger() : PlatformDebugLogger() {
+ if (IsDebuggerPresent()) {
+ // This condition is true when running inside Visual Studio or some other debugger.
+ // Messages are already printed there so we don't need to do anything.
+ return;
+ }
+
+ mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
+ ASSERT(mShouldExitHandle != nullptr);
+
+ mThread = std::thread(
+ [](HANDLE shouldExit) {
+ // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
+ // for the layout of this struct.
+ struct {
+ DWORD process_id;
+ char data[4096 - sizeof(DWORD)];
+ }* dbWinBuffer = nullptr;
+
+ HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, 0,
+ sizeof(*dbWinBuffer), "DBWIN_BUFFER");
+ ASSERT(file != nullptr);
+ ASSERT(file != INVALID_HANDLE_VALUE);
+
+ dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
+ MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
+ ASSERT(dbWinBuffer != nullptr);
+
+ HANDLE dbWinBufferReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
+ ASSERT(dbWinBufferReady != nullptr);
+
+ HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
+ ASSERT(dbWinDataReady != nullptr);
+
+ std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
+ while (true) {
+ SetEvent(dbWinBufferReady);
+ DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
+ FALSE, INFINITE);
+ if (wait == WAIT_OBJECT_0) {
+ break;
}
+ ASSERT(wait == WAIT_OBJECT_0 + 1);
+ fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
+ dbWinBuffer->data);
+ fflush(stderr);
+ }
+
+ CloseHandle(dbWinDataReady);
+ CloseHandle(dbWinBufferReady);
+ UnmapViewOfFile(dbWinBuffer);
+ CloseHandle(file);
+ },
+ mShouldExitHandle);
+ }
- CloseHandle(dbWinDataReady);
- CloseHandle(dbWinBufferReady);
- UnmapViewOfFile(dbWinBuffer);
- CloseHandle(file);
- },
- mShouldExitHandle);
+ ~WindowsDebugLogger() override {
+ if (IsDebuggerPresent()) {
+ // This condition is true when running inside Visual Studio or some other debugger.
+ // Messages are already printed there so we don't need to do anything.
+ return;
}
- ~WindowsDebugLogger() override {
- if (IsDebuggerPresent()) {
- // This condition is true when running inside Visual Studio or some other debugger.
- // Messages are already printed there so we don't need to do anything.
- return;
- }
-
- if (mShouldExitHandle != nullptr) {
- BOOL result = SetEvent(mShouldExitHandle);
- ASSERT(result != 0);
- CloseHandle(mShouldExitHandle);
- }
-
- if (mThread.joinable()) {
- mThread.join();
- }
+ if (mShouldExitHandle != nullptr) {
+ BOOL result = SetEvent(mShouldExitHandle);
+ ASSERT(result != 0);
+ CloseHandle(mShouldExitHandle);
}
- private:
- std::thread mThread;
- HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
- };
-
- PlatformDebugLogger* CreatePlatformDebugLogger() {
- return new WindowsDebugLogger();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
}
+ private:
+ std::thread mThread;
+ HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
+};
+
+PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new WindowsDebugLogger();
+}
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp
index ca165d02754..99e1d734d09 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp
@@ -12,78 +12,77 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/Timer.h"
-
#include <windows.h>
+#include "dawn/utils/Timer.h"
+
namespace utils {
- class WindowsTimer : public Timer {
- public:
- WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
- }
+class WindowsTimer : public Timer {
+ public:
+ WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {}
- ~WindowsTimer() override = default;
+ ~WindowsTimer() override = default;
- void Start() override {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- mStartTime = curTime.QuadPart;
+ void Start() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+ mStartTime = curTime.QuadPart;
- // Cache the frequency
- GetFrequency();
+ // Cache the frequency
+ GetFrequency();
- mRunning = true;
- }
+ mRunning = true;
+ }
- void Stop() override {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- mStopTime = curTime.QuadPart;
+ void Stop() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+ mStopTime = curTime.QuadPart;
- mRunning = false;
- }
-
- double GetElapsedTime() const override {
- LONGLONG endTime;
- if (mRunning) {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- endTime = curTime.QuadPart;
- } else {
- endTime = mStopTime;
- }
-
- return static_cast<double>(endTime - mStartTime) / mFrequency;
- }
+ mRunning = false;
+ }
- double GetAbsoluteTime() override {
+ double GetElapsedTime() const override {
+ LONGLONG endTime;
+ if (mRunning) {
LARGE_INTEGER curTime;
QueryPerformanceCounter(&curTime);
-
- return static_cast<double>(curTime.QuadPart) / GetFrequency();
+ endTime = curTime.QuadPart;
+ } else {
+ endTime = mStopTime;
}
- private:
- LONGLONG GetFrequency() {
- if (mFrequency == 0) {
- LARGE_INTEGER frequency = {};
- QueryPerformanceFrequency(&frequency);
+ return static_cast<double>(endTime - mStartTime) / mFrequency;
+ }
- mFrequency = frequency.QuadPart;
- }
+ double GetAbsoluteTime() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
- return mFrequency;
- }
+ return static_cast<double>(curTime.QuadPart) / GetFrequency();
+ }
+
+ private:
+ LONGLONG GetFrequency() {
+ if (mFrequency == 0) {
+ LARGE_INTEGER frequency = {};
+ QueryPerformanceFrequency(&frequency);
- bool mRunning;
- LONGLONG mStartTime;
- LONGLONG mStopTime;
- LONGLONG mFrequency;
- };
+ mFrequency = frequency.QuadPart;
+ }
- Timer* CreateTimer() {
- return new WindowsTimer();
+ return mFrequency;
}
+ bool mRunning;
+ LONGLONG mStartTime;
+ LONGLONG mStopTime;
+ LONGLONG mFrequency;
+};
+
+Timer* CreateTimer() {
+ return new WindowsTimer();
+}
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp b/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp
index 73eed81899d..609e8ff9d32 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp
@@ -12,7 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/utils/WireHelper.h"
+#include <algorithm>
+#include <cstring>
+#include <fstream>
+#include <iomanip>
+#include <set>
+#include <sstream>
+#include <string>
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
@@ -20,159 +26,139 @@
#include "dawn/dawn_proc.h"
#include "dawn/native/DawnNative.h"
#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/utils/WireHelper.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
-#include <algorithm>
-#include <cstring>
-#include <fstream>
-#include <iomanip>
-#include <set>
-#include <sstream>
-
namespace utils {
- namespace {
-
- class WireServerTraceLayer : public dawn::wire::CommandHandler {
- public:
- WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
- : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
- const char* sep = GetPathSeparator();
- if (mDir.size() > 0 && mDir.back() != *sep) {
- mDir += sep;
- }
- }
-
- void BeginWireTrace(const char* name) {
- std::string filename = name;
- // Replace slashes in gtest names with underscores so everything is in one
- // directory.
- std::replace(filename.begin(), filename.end(), '/', '_');
- std::replace(filename.begin(), filename.end(), '\\', '_');
-
- // Prepend the filename with the directory.
- filename = mDir + filename;
-
- ASSERT(!mFile.is_open());
- mFile.open(filename,
- std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
-
- // Write the initial 8 bytes. This means the fuzzer should never inject an
- // error.
- const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
- mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex),
- sizeof(injectedErrorIndex));
- }
-
- const volatile char* HandleCommands(const volatile char* commands,
- size_t size) override {
- if (mFile.is_open()) {
- mFile.write(const_cast<const char*>(commands), size);
- }
- return mHandler->HandleCommands(commands, size);
- }
-
- private:
- std::string mDir;
- dawn::wire::CommandHandler* mHandler;
- std::ofstream mFile;
- };
-
- class WireHelperDirect : public WireHelper {
- public:
- WireHelperDirect() {
- dawnProcSetProcs(&dawn::native::GetProcs());
- }
-
- std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
- ASSERT(backendDevice != nullptr);
- return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
- }
-
- void BeginWireTrace(const char* name) override {
- }
-
- bool FlushClient() override {
- return true;
- }
-
- bool FlushServer() override {
- return true;
- }
- };
-
- class WireHelperProxy : public WireHelper {
- public:
- explicit WireHelperProxy(const char* wireTraceDir) {
- mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
- mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
-
- dawn::wire::WireServerDescriptor serverDesc = {};
- serverDesc.procs = &dawn::native::GetProcs();
- serverDesc.serializer = mS2cBuf.get();
-
- mWireServer.reset(new dawn::wire::WireServer(serverDesc));
- mC2sBuf->SetHandler(mWireServer.get());
-
- if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
- mWireServerTraceLayer.reset(
- new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
- mC2sBuf->SetHandler(mWireServerTraceLayer.get());
- }
-
- dawn::wire::WireClientDescriptor clientDesc = {};
- clientDesc.serializer = mC2sBuf.get();
-
- mWireClient.reset(new dawn::wire::WireClient(clientDesc));
- mS2cBuf->SetHandler(mWireClient.get());
- dawnProcSetProcs(&dawn::wire::client::GetProcs());
- }
-
- std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
- ASSERT(backendDevice != nullptr);
-
- auto reservation = mWireClient->ReserveDevice();
- mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
- dawn::native::GetProcs().deviceRelease(backendDevice);
-
- return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
- }
-
- void BeginWireTrace(const char* name) override {
- if (mWireServerTraceLayer) {
- return mWireServerTraceLayer->BeginWireTrace(name);
- }
- }
-
- bool FlushClient() override {
- return mC2sBuf->Flush();
- }
-
- bool FlushServer() override {
- return mS2cBuf->Flush();
- }
-
- private:
- std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
- std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
- std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
- std::unique_ptr<dawn::wire::WireServer> mWireServer;
- std::unique_ptr<dawn::wire::WireClient> mWireClient;
- };
-
- } // anonymous namespace
-
- std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
- if (useWire) {
- return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
- } else {
- return std::unique_ptr<WireHelper>(new WireHelperDirect());
+namespace {
+
+class WireServerTraceLayer : public dawn::wire::CommandHandler {
+ public:
+ WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
+ : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
+ const char* sep = GetPathSeparator();
+ if (mDir.size() > 0 && mDir.back() != *sep) {
+ mDir += sep;
}
}
- WireHelper::~WireHelper() {
- dawnProcSetProcs(nullptr);
+ void BeginWireTrace(const char* name) {
+ std::string filename = name;
+ // Replace slashes in gtest names with underscores so everything is in one
+ // directory.
+ std::replace(filename.begin(), filename.end(), '/', '_');
+ std::replace(filename.begin(), filename.end(), '\\', '_');
+
+ // Prepend the filename with the directory.
+ filename = mDir + filename;
+
+ ASSERT(!mFile.is_open());
+ mFile.open(filename, std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
+
+ // Write the initial 8 bytes. This means the fuzzer should never inject an
+ // error.
+ const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
+ mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex), sizeof(injectedErrorIndex));
+ }
+
+ const volatile char* HandleCommands(const volatile char* commands, size_t size) override {
+ if (mFile.is_open()) {
+ mFile.write(const_cast<const char*>(commands), size);
+ }
+ return mHandler->HandleCommands(commands, size);
+ }
+
+ private:
+ std::string mDir;
+ dawn::wire::CommandHandler* mHandler;
+ std::ofstream mFile;
+};
+
+class WireHelperDirect : public WireHelper {
+ public:
+ explicit WireHelperDirect(const DawnProcTable& procs) { dawnProcSetProcs(&procs); }
+
+ wgpu::Instance RegisterInstance(WGPUInstance backendInstance) override {
+ ASSERT(backendInstance != nullptr);
+ return wgpu::Instance(backendInstance);
}
+ void BeginWireTrace(const char* name) override {}
+
+ bool FlushClient() override { return true; }
+
+ bool FlushServer() override { return true; }
+};
+
+class WireHelperProxy : public WireHelper {
+ public:
+ explicit WireHelperProxy(const char* wireTraceDir, const DawnProcTable& procs) {
+ mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+ mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+ dawn::wire::WireServerDescriptor serverDesc = {};
+ serverDesc.procs = &procs;
+ serverDesc.serializer = mS2cBuf.get();
+
+ mWireServer.reset(new dawn::wire::WireServer(serverDesc));
+ mC2sBuf->SetHandler(mWireServer.get());
+
+ if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
+ mWireServerTraceLayer.reset(new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
+ mC2sBuf->SetHandler(mWireServerTraceLayer.get());
+ }
+
+ dawn::wire::WireClientDescriptor clientDesc = {};
+ clientDesc.serializer = mC2sBuf.get();
+
+ mWireClient.reset(new dawn::wire::WireClient(clientDesc));
+ mS2cBuf->SetHandler(mWireClient.get());
+ dawnProcSetProcs(&dawn::wire::client::GetProcs());
+ }
+
+ wgpu::Instance RegisterInstance(WGPUInstance backendInstance) override {
+ ASSERT(backendInstance != nullptr);
+
+ auto reservation = mWireClient->ReserveInstance();
+ mWireServer->InjectInstance(backendInstance, reservation.id, reservation.generation);
+
+ return wgpu::Instance::Acquire(reservation.instance);
+ }
+
+ void BeginWireTrace(const char* name) override {
+ if (mWireServerTraceLayer) {
+ return mWireServerTraceLayer->BeginWireTrace(name);
+ }
+ }
+
+ bool FlushClient() override { return mC2sBuf->Flush(); }
+
+ bool FlushServer() override { return mS2cBuf->Flush(); }
+
+ private:
+ std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+ std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
+ std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
+ std::unique_ptr<dawn::wire::WireServer> mWireServer;
+ std::unique_ptr<dawn::wire::WireClient> mWireClient;
+};
+
+} // anonymous namespace
+
+std::unique_ptr<WireHelper> CreateWireHelper(const DawnProcTable& procs,
+ bool useWire,
+ const char* wireTraceDir) {
+ if (useWire) {
+ return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir, procs));
+ } else {
+ return std::unique_ptr<WireHelper>(new WireHelperDirect(procs));
+ }
+}
+
+WireHelper::~WireHelper() {
+ dawnProcSetProcs(nullptr);
+}
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WireHelper.h b/chromium/third_party/dawn/src/dawn/utils/WireHelper.h
index 740c4c06245..a81b919e12b 100644
--- a/chromium/third_party/dawn/src/dawn/utils/WireHelper.h
+++ b/chromium/third_party/dawn/src/dawn/utils/WireHelper.h
@@ -15,29 +15,33 @@
#ifndef SRC_DAWN_UTILS_WIREHELPER_H_
#define SRC_DAWN_UTILS_WIREHELPER_H_
-#include "dawn/webgpu_cpp.h"
-
#include <cstdint>
#include <memory>
+#include <utility>
+
+#include "dawn/webgpu_cpp.h"
namespace utils {
- class WireHelper {
- public:
- virtual ~WireHelper();
+class WireHelper {
+ public:
+ virtual ~WireHelper();
- // Registers the device on the wire, if present.
- // Returns a pair of the client device and backend device.
- // The function should take ownership of |backendDevice|.
- virtual std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) = 0;
+ // Registers the instance on the wire, if present.
+ // Returns the wgpu::Instance which is the client instance on the wire, and
+ // the backend instance without the wire.
+ // The function should not take ownership of |backendInstance|.
+ virtual wgpu::Instance RegisterInstance(WGPUInstance backendInstance) = 0;
- virtual void BeginWireTrace(const char* name) = 0;
+ virtual void BeginWireTrace(const char* name) = 0;
- virtual bool FlushClient() = 0;
- virtual bool FlushServer() = 0;
- };
+ virtual bool FlushClient() = 0;
+ virtual bool FlushServer() = 0;
+};
- std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir = nullptr);
+std::unique_ptr<WireHelper> CreateWireHelper(const DawnProcTable& procs,
+ bool useWire,
+ const char* wireTraceDir = nullptr);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/wire/BUILD.gn b/chromium/third_party/dawn/src/dawn/wire/BUILD.gn
index bff2136e0f7..7189bd5fd35 100644
--- a/chromium/third_party/dawn/src/dawn/wire/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/wire/BUILD.gn
@@ -89,11 +89,17 @@ dawn_component("wire") {
"client/LimitsAndFeatures.cpp",
"client/LimitsAndFeatures.h",
"client/ObjectAllocator.h",
+ "client/ObjectBase.cpp",
+ "client/ObjectBase.h",
+ "client/QuerySet.cpp",
+ "client/QuerySet.h",
"client/Queue.cpp",
"client/Queue.h",
"client/RequestTracker.h",
"client/ShaderModule.cpp",
"client/ShaderModule.h",
+ "client/Texture.cpp",
+ "client/Texture.h",
"server/ObjectStorage.h",
"server/Server.cpp",
"server/Server.h",
diff --git a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h
index 48f88f81a1d..1ae8451109b 100644
--- a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h
+++ b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h
@@ -15,70 +15,65 @@
#ifndef SRC_DAWN_WIRE_BUFFERCONSUMER_H_
#define SRC_DAWN_WIRE_BUFFERCONSUMER_H_
-#include "dawn/wire/WireResult.h"
-
#include <cstddef>
+#include "dawn/wire/WireResult.h"
+
namespace dawn::wire {
- // BufferConsumer is a utility class that allows reading bytes from a buffer
- // while simultaneously decrementing the amount of remaining space by exactly
- // the amount read. It helps prevent bugs where incrementing a pointer and
- // decrementing a size value are not kept in sync.
- // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
- template <typename BufferT>
- class BufferConsumer {
- static_assert(sizeof(BufferT) == 1,
- "BufferT must be 1-byte, but may have const/volatile qualifiers.");
-
- public:
- BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
- }
-
- BufferT* Buffer() const {
- return mBuffer;
- }
- size_t AvailableSize() const {
- return mSize;
- }
-
- protected:
- template <typename T, typename N>
- WireResult NextN(N count, T** data);
-
- template <typename T>
- WireResult Next(T** data);
-
- template <typename T>
- WireResult Peek(T** data);
-
- private:
- BufferT* mBuffer;
- size_t mSize;
- };
-
- class SerializeBuffer : public BufferConsumer<char> {
- public:
- using BufferConsumer::BufferConsumer;
- using BufferConsumer::Next;
- using BufferConsumer::NextN;
- };
-
- class DeserializeBuffer : public BufferConsumer<const volatile char> {
- public:
- using BufferConsumer::BufferConsumer;
- using BufferConsumer::Peek;
-
- template <typename T, typename N>
- WireResult ReadN(N count, const volatile T** data) {
- return NextN(count, data);
- }
-
- template <typename T>
- WireResult Read(const volatile T** data) {
- return Next(data);
- }
- };
+// BufferConsumer is a utility class that allows reading bytes from a buffer
+// while simultaneously decrementing the amount of remaining space by exactly
+// the amount read. It helps prevent bugs where incrementing a pointer and
+// decrementing a size value are not kept in sync.
+// BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
+template <typename BufferT>
+class BufferConsumer {
+ static_assert(sizeof(BufferT) == 1,
+ "BufferT must be 1-byte, but may have const/volatile qualifiers.");
+
+ public:
+ BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {}
+
+ BufferT* Buffer() const { return mBuffer; }
+ size_t AvailableSize() const { return mSize; }
+
+ protected:
+ template <typename T, typename N>
+ WireResult NextN(N count, T** data);
+
+ template <typename T>
+ WireResult Next(T** data);
+
+ template <typename T>
+ WireResult Peek(T** data);
+
+ private:
+ BufferT* mBuffer;
+ size_t mSize;
+};
+
+class SerializeBuffer : public BufferConsumer<char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Next;
+ using BufferConsumer::NextN;
+};
+
+class DeserializeBuffer : public BufferConsumer<const volatile char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Peek;
+
+ template <typename T, typename N>
+ WireResult ReadN(N count, const volatile T** data) {
+ return NextN(count, data);
+ }
+
+ template <typename T>
+ WireResult Read(const volatile T** data) {
+ return Next(data);
+ }
+};
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h
index 11c771b71dd..6b5d0a1323d 100644
--- a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h
+++ b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h
@@ -22,52 +22,52 @@
namespace dawn::wire {
- template <typename BufferT>
- template <typename T>
- WireResult BufferConsumer<BufferT>::Peek(T** data) {
- if (sizeof(T) > mSize) {
- return WireResult::FatalError;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- return WireResult::Success;
+template <typename BufferT>
+template <typename T>
+WireResult BufferConsumer<BufferT>::Peek(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
}
- template <typename BufferT>
- template <typename T>
- WireResult BufferConsumer<BufferT>::Next(T** data) {
- if (sizeof(T) > mSize) {
- return WireResult::FatalError;
- }
+ *data = reinterpret_cast<T*>(mBuffer);
+ return WireResult::Success;
+}
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += sizeof(T);
- mSize -= sizeof(T);
- return WireResult::Success;
+template <typename BufferT>
+template <typename T>
+WireResult BufferConsumer<BufferT>::Next(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
}
- template <typename BufferT>
- template <typename T, typename N>
- WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
- static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += sizeof(T);
+ mSize -= sizeof(T);
+ return WireResult::Success;
+}
- constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
- if (count > kMaxCountWithoutOverflows) {
- return WireResult::FatalError;
- }
+template <typename BufferT>
+template <typename T, typename N>
+WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
+ static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
- // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
- size_t totalSize = sizeof(T) * count;
- if (totalSize > mSize) {
- return WireResult::FatalError;
- }
+ constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+ if (count > kMaxCountWithoutOverflows) {
+ return WireResult::FatalError;
+ }
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += totalSize;
- mSize -= totalSize;
- return WireResult::Success;
+ // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
+ size_t totalSize = sizeof(T) * count;
+ if (totalSize > mSize) {
+ return WireResult::FatalError;
}
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += totalSize;
+ mSize -= totalSize;
+ return WireResult::Success;
+}
+
} // namespace dawn::wire
#endif // SRC_DAWN_WIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt
index 4afb6bd6766..e389b43c9bc 100644
--- a/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt
@@ -18,7 +18,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
)
-add_library(dawn_wire ${DAWN_DUMMY_FILE})
+add_library(dawn_wire ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_wire)
target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")
@@ -62,11 +62,17 @@ target_sources(dawn_wire PRIVATE
"client/LimitsAndFeatures.cpp"
"client/LimitsAndFeatures.h"
"client/ObjectAllocator.h"
+ "client/ObjectBase.cpp"
+ "client/ObjectBase.h"
+ "client/QuerySet.cpp"
+ "client/QuerySet.h"
"client/Queue.cpp"
"client/Queue.h"
"client/RequestTracker.h"
"client/ShaderModule.cpp"
"client/ShaderModule.h"
+ "client/Texture.cpp"
+ "client/Texture.h"
"server/ObjectStorage.h"
"server/Server.cpp"
"server/Server.h"
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp
index 81136867a58..7dd1075ead1 100644
--- a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp
@@ -14,66 +14,68 @@
#include "dawn/wire/ChunkedCommandHandler.h"
-#include "dawn/common/Alloc.h"
-
#include <algorithm>
#include <cstring>
+#include <utility>
+
+#include "dawn/common/Alloc.h"
namespace dawn::wire {
- ChunkedCommandHandler::~ChunkedCommandHandler() = default;
-
- const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
- size_t size) {
- if (mChunkedCommandRemainingSize > 0) {
- // If there is a chunked command in flight, append the command data.
- // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
- // in-flight chunked command, and then pass the rest along to a second call to
- // |HandleCommandsImpl|.
- size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
-
- memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
- const_cast<const char*>(commands), chunkSize);
- mChunkedCommandPutOffset += chunkSize;
- mChunkedCommandRemainingSize -= chunkSize;
-
- commands += chunkSize;
- size -= chunkSize;
-
- if (mChunkedCommandRemainingSize == 0) {
- // Once the chunked command is complete, pass the data to the command handler
- // implemenation.
- auto chunkedCommandData = std::move(mChunkedCommandData);
- if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) ==
- nullptr) {
- // |HandleCommandsImpl| returns nullptr on error. Forward any errors
- // out.
- return nullptr;
- }
+ChunkedCommandHandler::ChunkedCommandHandler() = default;
+
+ChunkedCommandHandler::~ChunkedCommandHandler() = default;
+
+const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
+ size_t size) {
+ if (mChunkedCommandRemainingSize > 0) {
+ // If there is a chunked command in flight, append the command data.
+ // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
+ // in-flight chunked command, and then pass the rest along to a second call to
+ // |HandleCommandsImpl|.
+ size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
+
+ memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
+ const_cast<const char*>(commands), chunkSize);
+ mChunkedCommandPutOffset += chunkSize;
+ mChunkedCommandRemainingSize -= chunkSize;
+
+ commands += chunkSize;
+ size -= chunkSize;
+
+ if (mChunkedCommandRemainingSize == 0) {
+ // Once the chunked command is complete, pass the data to the command handler
+ // implemenation.
+ auto chunkedCommandData = std::move(mChunkedCommandData);
+ if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) == nullptr) {
+ // |HandleCommandsImpl| returns nullptr on error. Forward any errors
+ // out.
+ return nullptr;
}
}
-
- return HandleCommandsImpl(commands, size);
}
- ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
- const volatile char* commands,
- size_t commandSize,
- size_t initialSize) {
- ASSERT(!mChunkedCommandData);
-
- // Reserve space for all the command data we're expecting, and copy the initial data
- // to the start of the memory.
- mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
- if (!mChunkedCommandData) {
- return ChunkedCommandsResult::Error;
- }
+ return HandleCommandsImpl(commands, size);
+}
- memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
- mChunkedCommandPutOffset = initialSize;
- mChunkedCommandRemainingSize = commandSize - initialSize;
+ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
+ const volatile char* commands,
+ size_t commandSize,
+ size_t initialSize) {
+ ASSERT(!mChunkedCommandData);
- return ChunkedCommandsResult::Consumed;
+ // Reserve space for all the command data we're expecting, and copy the initial data
+ // to the start of the memory.
+ mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
+ if (!mChunkedCommandData) {
+ return ChunkedCommandsResult::Error;
}
+ memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
+ mChunkedCommandPutOffset = initialSize;
+ mChunkedCommandRemainingSize = commandSize - initialSize;
+
+ return ChunkedCommandsResult::Consumed;
+}
+
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h
index a0aaf4e7f5c..713ed38c147 100644
--- a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h
@@ -15,56 +15,57 @@
#ifndef SRC_DAWN_WIRE_CHUNKEDCOMMANDHANDLER_H_
#define SRC_DAWN_WIRE_CHUNKEDCOMMANDHANDLER_H_
+#include <cstdint>
+#include <limits>
+#include <memory>
+
#include "dawn/common/Assert.h"
#include "dawn/wire/Wire.h"
#include "dawn/wire/WireCmd_autogen.h"
-#include <cstdint>
-#include <memory>
-
namespace dawn::wire {
- class ChunkedCommandHandler : public CommandHandler {
- public:
- const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
- ~ChunkedCommandHandler() override;
+class ChunkedCommandHandler : public CommandHandler {
+ public:
+ ChunkedCommandHandler();
+ ~ChunkedCommandHandler() override;
+
+ const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
- protected:
- enum class ChunkedCommandsResult {
- Passthrough,
- Consumed,
- Error,
- };
+ protected:
+ enum class ChunkedCommandsResult {
+ Passthrough,
+ Consumed,
+ Error,
+ };
- // Returns |true| if the commands were entirely consumed into the chunked command vector
- // and should be handled later once we receive all the command data.
- // Returns |false| if commands should be handled now immediately.
- ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
- uint64_t commandSize64 =
- reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
+ // Returns |true| if the commands were entirely consumed into the chunked command vector
+ // and should be handled later once we receive all the command data.
+ // Returns |false| if commands should be handled now immediately.
+ ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
+ uint64_t commandSize64 = reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
- if (commandSize64 > std::numeric_limits<size_t>::max()) {
- return ChunkedCommandsResult::Error;
- }
- size_t commandSize = static_cast<size_t>(commandSize64);
- if (size < commandSize) {
- return BeginChunkedCommandData(commands, commandSize, size);
- }
- return ChunkedCommandsResult::Passthrough;
+ if (commandSize64 > std::numeric_limits<size_t>::max()) {
+ return ChunkedCommandsResult::Error;
}
+ size_t commandSize = static_cast<size_t>(commandSize64);
+ if (size < commandSize) {
+ return BeginChunkedCommandData(commands, commandSize, size);
+ }
+ return ChunkedCommandsResult::Passthrough;
+ }
- private:
- virtual const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) = 0;
+ private:
+ virtual const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) = 0;
- ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
- size_t commandSize,
- size_t initialSize);
+ ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
+ size_t commandSize,
+ size_t initialSize);
- size_t mChunkedCommandRemainingSize = 0;
- size_t mChunkedCommandPutOffset = 0;
- std::unique_ptr<char[]> mChunkedCommandData;
- };
+ size_t mChunkedCommandRemainingSize = 0;
+ size_t mChunkedCommandPutOffset = 0;
+ std::unique_ptr<char[]> mChunkedCommandData;
+};
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp
index b2e4a56d9ad..380fae1575a 100644
--- a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp
@@ -16,23 +16,22 @@
namespace dawn::wire {
- ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
- : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {
- }
-
- void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
- size_t remainingSize) {
- while (remainingSize > 0) {
- size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
- void* dst = mSerializer->GetCmdSpace(chunkSize);
- if (dst == nullptr) {
- return;
- }
- memcpy(dst, allocatedBuffer, chunkSize);
+ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
+ : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {}
- allocatedBuffer += chunkSize;
- remainingSize -= chunkSize;
+void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
+ size_t remainingSize) {
+ while (remainingSize > 0) {
+ size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
+ void* dst = mSerializer->GetCmdSpace(chunkSize);
+ if (dst == nullptr) {
+ return;
}
+ memcpy(dst, allocatedBuffer, chunkSize);
+
+ allocatedBuffer += chunkSize;
+ remainingSize -= chunkSize;
}
+}
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h
index 363d2990c2d..7ac72e5ddb1 100644
--- a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h
@@ -15,99 +15,100 @@
#ifndef SRC_DAWN_WIRE_CHUNKEDCOMMANDSERIALIZER_H_
#define SRC_DAWN_WIRE_CHUNKEDCOMMANDSERIALIZER_H_
+#include <algorithm>
+#include <cstring>
+#include <memory>
+#include <utility>
+
#include "dawn/common/Alloc.h"
#include "dawn/common/Compiler.h"
#include "dawn/wire/Wire.h"
#include "dawn/wire/WireCmd_autogen.h"
-#include <algorithm>
-#include <cstring>
-#include <memory>
-
namespace dawn::wire {
- class ChunkedCommandSerializer {
- public:
- explicit ChunkedCommandSerializer(CommandSerializer* serializer);
-
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
+class ChunkedCommandSerializer {
+ public:
+ explicit ChunkedCommandSerializer(CommandSerializer* serializer);
+
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ SerializeCommandImpl(
+ cmd,
+ [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
+ return cmd.Serialize(requiredSize, serializeBuffer);
+ },
+ extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+ }
+
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
+ SerializeCommand(cmd, objectIdProvider, 0,
+ [](SerializeBuffer*) { return WireResult::Success; });
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ const ObjectIdProvider& objectIdProvider,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ SerializeCommandImpl(
+ cmd,
+ [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
+ SerializeBuffer* serializeBuffer) {
+ return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
+ },
+ extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+ }
+
+ private:
+ template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
+ void SerializeCommandImpl(const Cmd& cmd,
+ SerializeCmdFn&& SerializeCmd,
size_t extraSize,
ExtraSizeSerializeFn&& SerializeExtraSize) {
- SerializeCommandImpl(
- cmd,
- [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
- return cmd.Serialize(requiredSize, serializeBuffer);
- },
- extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
- }
-
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
- SerializeCommand(cmd, objectIdProvider, 0,
- [](SerializeBuffer*) { return WireResult::Success; });
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- const ObjectIdProvider& objectIdProvider,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- SerializeCommandImpl(
- cmd,
- [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
- SerializeBuffer* serializeBuffer) {
- return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
- },
- extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
- }
-
- private:
- template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
- void SerializeCommandImpl(const Cmd& cmd,
- SerializeCmdFn&& SerializeCmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + extraSize;
-
- if (requiredSize <= mMaxAllocationSize) {
- char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
- if (allocatedBuffer != nullptr) {
- SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
- WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
- WireResult r2 = SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
- mSerializer->OnSerializeError();
- }
+ size_t commandSize = cmd.GetRequiredSize();
+ size_t requiredSize = commandSize + extraSize;
+
+ if (requiredSize <= mMaxAllocationSize) {
+ char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
+ if (allocatedBuffer != nullptr) {
+ SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+ mSerializer->OnSerializeError();
}
- return;
}
+ return;
+ }
- auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
- if (!cmdSpace) {
- return;
- }
- SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
- WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
- WireResult r2 = SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
- mSerializer->OnSerializeError();
- return;
- }
- SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+ auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
+ if (!cmdSpace) {
+ return;
+ }
+ SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+ mSerializer->OnSerializeError();
+ return;
}
+ SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+ }
- void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
+ void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
- CommandSerializer* mSerializer;
- size_t mMaxAllocationSize;
- };
+ CommandSerializer* mSerializer;
+ size_t mMaxAllocationSize;
+};
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp
index 2d5a9f8125b..0e5688a1193 100644
--- a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp
@@ -16,34 +16,35 @@
namespace dawn::wire {
- // Note: Upon updating this list, please also update serialization/deserialization
- // of limit structs on Adapter/Device initialization.
- bool IsFeatureSupported(WGPUFeatureName feature) {
- switch (feature) {
- case WGPUFeatureName_Undefined:
- case WGPUFeatureName_Force32:
- case WGPUFeatureName_DawnNative:
- return false;
- case WGPUFeatureName_Depth24UnormStencil8:
- case WGPUFeatureName_Depth32FloatStencil8:
- case WGPUFeatureName_TimestampQuery:
- case WGPUFeatureName_PipelineStatisticsQuery:
- case WGPUFeatureName_TextureCompressionBC:
- case WGPUFeatureName_TextureCompressionETC2:
- case WGPUFeatureName_TextureCompressionASTC:
- case WGPUFeatureName_IndirectFirstInstance:
- case WGPUFeatureName_DepthClamping:
- case WGPUFeatureName_DawnShaderFloat16:
- case WGPUFeatureName_DawnInternalUsages:
- case WGPUFeatureName_DawnMultiPlanarFormats:
- return true;
- }
-
- // Catch-all, for unsupported features.
- // "default:" is not used so we get compiler errors for
- // newly added, unhandled features, but still catch completely
- // unknown enums.
- return false;
+// Note: Upon updating this list, please also update serialization/deserialization
+// of limit structs on Adapter/Device initialization.
+bool IsFeatureSupported(WGPUFeatureName feature) {
+ switch (feature) {
+ case WGPUFeatureName_Undefined:
+ case WGPUFeatureName_Force32:
+ case WGPUFeatureName_DawnNative:
+ return false;
+ case WGPUFeatureName_Depth24UnormStencil8:
+ case WGPUFeatureName_Depth32FloatStencil8:
+ case WGPUFeatureName_TimestampQuery:
+ case WGPUFeatureName_PipelineStatisticsQuery:
+ case WGPUFeatureName_TextureCompressionBC:
+ case WGPUFeatureName_TextureCompressionETC2:
+ case WGPUFeatureName_TextureCompressionASTC:
+ case WGPUFeatureName_IndirectFirstInstance:
+ case WGPUFeatureName_DepthClamping:
+ case WGPUFeatureName_DawnShaderFloat16:
+ case WGPUFeatureName_DawnInternalUsages:
+ case WGPUFeatureName_DawnMultiPlanarFormats:
+ case WGPUFeatureName_ChromiumExperimentalDp4a:
+ return true;
}
+ // Catch-all, for unsupported features.
+ // "default:" is not used so we get compiler errors for
+ // newly added, unhandled features, but still catch completely
+ // unknown enums.
+ return false;
+}
+
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h
index 00451559188..26a82d9b034 100644
--- a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h
+++ b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h
@@ -15,11 +15,11 @@
#ifndef SRC_DAWN_WIRE_SUPPORTEDFEATURES_H_
#define SRC_DAWN_WIRE_SUPPORTEDFEATURES_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
namespace dawn::wire {
- bool IsFeatureSupported(WGPUFeatureName feature);
+bool IsFeatureSupported(WGPUFeatureName feature);
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/Wire.cpp b/chromium/third_party/dawn/src/dawn/wire/Wire.cpp
index af3e6be57a6..d7528518646 100644
--- a/chromium/third_party/dawn/src/dawn/wire/Wire.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/Wire.cpp
@@ -16,13 +16,12 @@
namespace dawn::wire {
- CommandSerializer::CommandSerializer() = default;
- CommandSerializer::~CommandSerializer() = default;
+CommandSerializer::CommandSerializer() = default;
+CommandSerializer::~CommandSerializer() = default;
- void CommandSerializer::OnSerializeError() {
- }
+void CommandSerializer::OnSerializeError() {}
- CommandHandler::CommandHandler() = default;
- CommandHandler::~CommandHandler() = default;
+CommandHandler::CommandHandler() = default;
+CommandHandler::~CommandHandler() = default;
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp
index 0446da89240..624cc032d85 100644
--- a/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp
@@ -17,66 +17,65 @@
namespace dawn::wire {
- WireClient::WireClient(const WireClientDescriptor& descriptor)
- : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {
- }
+WireClient::WireClient(const WireClientDescriptor& descriptor)
+ : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {}
- WireClient::~WireClient() {
- mImpl.reset();
- }
+WireClient::~WireClient() {
+ mImpl.reset();
+}
- const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
- return mImpl->HandleCommands(commands, size);
- }
+const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
+ return mImpl->HandleCommands(commands, size);
+}
- ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
- return mImpl->ReserveTexture(device);
- }
+ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
+ return mImpl->ReserveTexture(device);
+}
- ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
- return mImpl->ReserveSwapChain(device);
- }
+ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
+ return mImpl->ReserveSwapChain(device);
+}
- ReservedDevice WireClient::ReserveDevice() {
- return mImpl->ReserveDevice();
- }
+ReservedDevice WireClient::ReserveDevice() {
+ return mImpl->ReserveDevice();
+}
- ReservedInstance WireClient::ReserveInstance() {
- return mImpl->ReserveInstance();
- }
+ReservedInstance WireClient::ReserveInstance() {
+ return mImpl->ReserveInstance();
+}
- void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
- mImpl->ReclaimTextureReservation(reservation);
- }
+void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
+ mImpl->ReclaimTextureReservation(reservation);
+}
- void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
- mImpl->ReclaimSwapChainReservation(reservation);
- }
+void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ mImpl->ReclaimSwapChainReservation(reservation);
+}
- void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
- mImpl->ReclaimDeviceReservation(reservation);
- }
+void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+ mImpl->ReclaimDeviceReservation(reservation);
+}
- void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
- mImpl->ReclaimInstanceReservation(reservation);
- }
+void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+ mImpl->ReclaimInstanceReservation(reservation);
+}
- void WireClient::Disconnect() {
- mImpl->Disconnect();
- }
+void WireClient::Disconnect() {
+ mImpl->Disconnect();
+}
- namespace client {
- MemoryTransferService::MemoryTransferService() = default;
+namespace client {
+MemoryTransferService::MemoryTransferService() = default;
- MemoryTransferService::~MemoryTransferService() = default;
+MemoryTransferService::~MemoryTransferService() = default;
- MemoryTransferService::ReadHandle::ReadHandle() = default;
+MemoryTransferService::ReadHandle::ReadHandle() = default;
- MemoryTransferService::ReadHandle::~ReadHandle() = default;
+MemoryTransferService::ReadHandle::~ReadHandle() = default;
- MemoryTransferService::WriteHandle::WriteHandle() = default;
+MemoryTransferService::WriteHandle::WriteHandle() = default;
- MemoryTransferService::WriteHandle::~WriteHandle() = default;
- } // namespace client
+MemoryTransferService::WriteHandle::~WriteHandle() = default;
+} // namespace client
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp
index e0a3432867b..9e4fadc5126 100644
--- a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp
@@ -17,44 +17,44 @@
#include <algorithm>
namespace dawn::wire {
- WireDeserializeAllocator::WireDeserializeAllocator() {
- Reset();
+WireDeserializeAllocator::WireDeserializeAllocator() {
+ Reset();
+}
+
+WireDeserializeAllocator::~WireDeserializeAllocator() {
+ Reset();
+}
+
+void* WireDeserializeAllocator::GetSpace(size_t size) {
+ // Return space in the current buffer if possible first.
+ if (mRemainingSize >= size) {
+ char* buffer = mCurrentBuffer;
+ mCurrentBuffer += size;
+ mRemainingSize -= size;
+ return buffer;
}
- WireDeserializeAllocator::~WireDeserializeAllocator() {
- Reset();
+ // Otherwise allocate a new buffer and try again.
+ size_t allocationSize = std::max(size, size_t(2048));
+ char* allocation = static_cast<char*>(malloc(allocationSize));
+ if (allocation == nullptr) {
+ return nullptr;
}
- void* WireDeserializeAllocator::GetSpace(size_t size) {
- // Return space in the current buffer if possible first.
- if (mRemainingSize >= size) {
- char* buffer = mCurrentBuffer;
- mCurrentBuffer += size;
- mRemainingSize -= size;
- return buffer;
- }
-
- // Otherwise allocate a new buffer and try again.
- size_t allocationSize = std::max(size, size_t(2048));
- char* allocation = static_cast<char*>(malloc(allocationSize));
- if (allocation == nullptr) {
- return nullptr;
- }
-
- mAllocations.push_back(allocation);
- mCurrentBuffer = allocation;
- mRemainingSize = allocationSize;
- return GetSpace(size);
- }
-
- void WireDeserializeAllocator::Reset() {
- for (auto allocation : mAllocations) {
- free(allocation);
- }
- mAllocations.clear();
+ mAllocations.push_back(allocation);
+ mCurrentBuffer = allocation;
+ mRemainingSize = allocationSize;
+ return GetSpace(size);
+}
- // The initial buffer is the inline buffer so that some allocations can be skipped
- mCurrentBuffer = mStaticBuffer;
- mRemainingSize = sizeof(mStaticBuffer);
+void WireDeserializeAllocator::Reset() {
+ for (auto* allocation : mAllocations) {
+ free(allocation);
}
+ mAllocations.clear();
+
+ // The initial buffer is the inline buffer so that some allocations can be skipped
+ mCurrentBuffer = mStaticBuffer;
+ mRemainingSize = sizeof(mStaticBuffer);
+}
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h
index 372b237a63f..6293624e11d 100644
--- a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h
@@ -15,29 +15,29 @@
#ifndef SRC_DAWN_WIRE_WIREDESERIALIZEALLOCATOR_H_
#define SRC_DAWN_WIRE_WIREDESERIALIZEALLOCATOR_H_
-#include "dawn/wire/WireCmd_autogen.h"
-
#include <vector>
+#include "dawn/wire/WireCmd_autogen.h"
+
namespace dawn::wire {
- // A really really simple implementation of the DeserializeAllocator. It's main feature
- // is that it has some inline storage so as to avoid allocations for the majority of
- // commands.
- class WireDeserializeAllocator : public DeserializeAllocator {
- public:
- WireDeserializeAllocator();
- virtual ~WireDeserializeAllocator();
-
- void* GetSpace(size_t size) override;
-
- void Reset();
-
- private:
- size_t mRemainingSize = 0;
- char* mCurrentBuffer = nullptr;
- char mStaticBuffer[2048];
- std::vector<char*> mAllocations;
- };
+// A really really simple implementation of the DeserializeAllocator. It's main feature
+// is that it has some inline storage so as to avoid allocations for the majority of
+// commands.
+class WireDeserializeAllocator : public DeserializeAllocator {
+ public:
+ WireDeserializeAllocator();
+ virtual ~WireDeserializeAllocator();
+
+ void* GetSpace(size_t size) override;
+
+ void Reset();
+
+ private:
+ size_t mRemainingSize = 0;
+ char* mCurrentBuffer = nullptr;
+ char mStaticBuffer[2048];
+ std::vector<char*> mAllocations;
+};
} // namespace dawn::wire
#endif // SRC_DAWN_WIRE_WIREDESERIALIZEALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireResult.h b/chromium/third_party/dawn/src/dawn/wire/WireResult.h
index aeb1fdfdebf..41f8a56e528 100644
--- a/chromium/third_party/dawn/src/dawn/wire/WireResult.h
+++ b/chromium/third_party/dawn/src/dawn/wire/WireResult.h
@@ -19,10 +19,10 @@
namespace dawn::wire {
- enum class [[nodiscard]] WireResult{
- Success,
- FatalError,
- };
+enum class [[nodiscard]] WireResult{
+ Success,
+ FatalError,
+};
// Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
#define WIRE_TRY(EXPR) \
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp
index bf9b0a11fd0..b864112769c 100644
--- a/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp
@@ -17,67 +17,70 @@
namespace dawn::wire {
- WireServer::WireServer(const WireServerDescriptor& descriptor)
- : mImpl(new server::Server(*descriptor.procs,
- descriptor.serializer,
- descriptor.memoryTransferService)) {
- }
-
- WireServer::~WireServer() {
- mImpl.reset();
- }
-
- const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
- return mImpl->HandleCommands(commands, size);
- }
-
- bool WireServer::InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
- }
-
- bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
- }
-
- bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
- return mImpl->InjectDevice(device, id, generation);
- }
-
- bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
- return mImpl->InjectInstance(instance, id, generation);
- }
-
- WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
- return mImpl->GetDevice(id, generation);
- }
-
- namespace server {
- MemoryTransferService::MemoryTransferService() = default;
-
- MemoryTransferService::~MemoryTransferService() = default;
-
- MemoryTransferService::ReadHandle::ReadHandle() = default;
-
- MemoryTransferService::ReadHandle::~ReadHandle() = default;
-
- MemoryTransferService::WriteHandle::WriteHandle() = default;
-
- MemoryTransferService::WriteHandle::~WriteHandle() = default;
-
- void MemoryTransferService::WriteHandle::SetTarget(void* data) {
- mTargetData = data;
- }
- void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
- mDataLength = dataLength;
- }
- } // namespace server
+WireServer::WireServer(const WireServerDescriptor& descriptor)
+ : mImpl(new server::Server(*descriptor.procs,
+ descriptor.serializer,
+ descriptor.memoryTransferService)) {}
+
+WireServer::~WireServer() {
+ mImpl.reset();
+}
+
+const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
+ return mImpl->HandleCommands(commands, size);
+}
+
+bool WireServer::InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
+}
+
+bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
+}
+
+bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+ return mImpl->InjectDevice(device, id, generation);
+}
+
+bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+ return mImpl->InjectInstance(instance, id, generation);
+}
+
+WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
+ return mImpl->GetDevice(id, generation);
+}
+
+bool WireServer::IsDeviceKnown(WGPUDevice device) const {
+ return mImpl->IsDeviceKnown(device);
+}
+
+namespace server {
+MemoryTransferService::MemoryTransferService() = default;
+
+MemoryTransferService::~MemoryTransferService() = default;
+
+MemoryTransferService::ReadHandle::ReadHandle() = default;
+
+MemoryTransferService::ReadHandle::~ReadHandle() = default;
+
+MemoryTransferService::WriteHandle::WriteHandle() = default;
+
+MemoryTransferService::WriteHandle::~WriteHandle() = default;
+
+void MemoryTransferService::WriteHandle::SetTarget(void* data) {
+ mTargetData = data;
+}
+void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
+ mDataLength = dataLength;
+}
+} // namespace server
} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp
index b2dcc87f63d..3e9cbd2ac75 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp
@@ -19,115 +19,117 @@
namespace dawn::wire::client {
- Adapter::~Adapter() {
- mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
- request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
- "Adapter destroyed before callback", request->userdata);
- });
+Adapter::Adapter(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
+
+Adapter::~Adapter() {
+ mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+ request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
+ "Adapter destroyed before callback", request->userdata);
+ });
+}
+
+void Adapter::CancelCallbacksForDisconnect() {
+ mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+ request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
+ request->userdata);
+ });
+}
+
+bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+ return mLimitsAndFeatures.GetLimits(limits);
+}
+
+bool Adapter::HasFeature(WGPUFeatureName feature) const {
+ return mLimitsAndFeatures.HasFeature(feature);
+}
+
+size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
+ return mLimitsAndFeatures.EnumerateFeatures(features);
+}
+
+void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
+ return mLimitsAndFeatures.SetLimits(limits);
+}
+
+void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+}
+
+void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
+ mProperties = *properties;
+ mProperties.nextInChain = nullptr;
+}
+
+void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+ *properties = mProperties;
+}
+
+void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
+ return;
}
- void Adapter::CancelCallbacksForDisconnect() {
- mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
- request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
- request->userdata);
- });
- }
-
- bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
- return mLimitsAndFeatures.GetLimits(limits);
- }
-
- bool Adapter::HasFeature(WGPUFeatureName feature) const {
- return mLimitsAndFeatures.HasFeature(feature);
- }
-
- size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
- return mLimitsAndFeatures.EnumerateFeatures(features);
- }
-
- void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
- return mLimitsAndFeatures.SetLimits(limits);
- }
-
- void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
- return mLimitsAndFeatures.SetFeatures(features, featuresCount);
- }
-
- void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
- mProperties = *properties;
- mProperties.nextInChain = nullptr;
+ auto* allocation = client->DeviceAllocator().New(client);
+ uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
+
+ AdapterRequestDeviceCmd cmd;
+ cmd.adapterId = this->id;
+ cmd.requestSerial = serial;
+ cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+ cmd.descriptor = descriptor;
+
+ client->SerializeCommand(cmd);
+}
+
+bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
+ uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ // May have been deleted or recreated so this isn't an error.
+ if (adapter == nullptr) {
+ return true;
}
-
- void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
- *properties = mProperties;
+ return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits, featuresCount,
+ features);
+}
+
+bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ RequestDeviceData request;
+ if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
- return;
- }
-
- auto* allocation = client->DeviceAllocator().New(client);
- uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
-
- AdapterRequestDeviceCmd cmd;
- cmd.adapterId = this->id;
- cmd.requestSerial = serial;
- cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
- cmd.descriptor = descriptor;
+ Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
- client->SerializeCommand(cmd);
+ // If the return status is a failure we should give a null device to the callback and
+ // free the allocation.
+ if (status != WGPURequestDeviceStatus_Success) {
+ client->DeviceAllocator().Free(device);
+ request.callback(status, nullptr, message, request.userdata);
+ return true;
}
- bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
- uint64_t requestSerial,
- WGPURequestDeviceStatus status,
- const char* message,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features) {
- // May have been deleted or recreated so this isn't an error.
- if (adapter == nullptr) {
- return true;
- }
- return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits,
- featuresCount, features);
- }
+ device->SetLimits(limits);
+ device->SetFeatures(features, featuresCount);
- bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
- WGPURequestDeviceStatus status,
- const char* message,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features) {
- RequestDeviceData request;
- if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
-
- // If the return status is a failure we should give a null device to the callback and
- // free the allocation.
- if (status != WGPURequestDeviceStatus_Success) {
- client->DeviceAllocator().Free(device);
- request.callback(status, nullptr, message, request.userdata);
- return true;
- }
-
- device->SetLimits(limits);
- device->SetFeatures(features, featuresCount);
-
- request.callback(status, ToAPI(device), message, request.userdata);
- return true;
- }
+ request.callback(status, ToAPI(device), message, request.userdata);
+ return true;
+}
- WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
- dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
- return nullptr;
- }
+WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
+ dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
+ return nullptr;
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h
index 4afc687a304..024da0b2496 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h
@@ -15,7 +15,7 @@
#ifndef SRC_DAWN_WIRE_CLIENT_ADAPTER_H_
#define SRC_DAWN_WIRE_CLIENT_ADAPTER_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireCmd_autogen.h"
@@ -25,45 +25,45 @@
namespace dawn::wire::client {
- class Adapter final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
+class Adapter final : public ObjectBase {
+ public:
+ Adapter(Client* client, uint32_t refcount, uint32_t id);
+ ~Adapter();
- ~Adapter();
- void CancelCallbacksForDisconnect() override;
+ void CancelCallbacksForDisconnect() override;
- bool GetLimits(WGPUSupportedLimits* limits) const;
- bool HasFeature(WGPUFeatureName feature) const;
- size_t EnumerateFeatures(WGPUFeatureName* features) const;
- void SetLimits(const WGPUSupportedLimits* limits);
- void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
- void SetProperties(const WGPUAdapterProperties* properties);
- void GetProperties(WGPUAdapterProperties* properties) const;
- void RequestDevice(const WGPUDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+ void SetProperties(const WGPUAdapterProperties* properties);
+ void GetProperties(WGPUAdapterProperties* properties) const;
+ void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
- bool OnRequestDeviceCallback(uint64_t requestSerial,
- WGPURequestDeviceStatus status,
- const char* message,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features);
+ bool OnRequestDeviceCallback(uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features);
- // Unimplementable. Only availale in dawn_native.
- WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
+ // Unimplementable. Only availale in dawn_native.
+ WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
- private:
- LimitsAndFeatures mLimitsAndFeatures;
- WGPUAdapterProperties mProperties;
+ private:
+ LimitsAndFeatures mLimitsAndFeatures;
+ WGPUAdapterProperties mProperties;
- struct RequestDeviceData {
- WGPURequestDeviceCallback callback = nullptr;
- ObjectId deviceObjectId;
- void* userdata = nullptr;
- };
- RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+ struct RequestDeviceData {
+ WGPURequestDeviceCallback callback = nullptr;
+ ObjectId deviceObjectId;
+ void* userdata = nullptr;
};
+ RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h b/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h
index 1998d5493e6..672b80a699e 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h
@@ -21,8 +21,10 @@
#include "dawn/wire/client/Buffer.h"
#include "dawn/wire/client/Device.h"
#include "dawn/wire/client/Instance.h"
+#include "dawn/wire/client/QuerySet.h"
#include "dawn/wire/client/Queue.h"
#include "dawn/wire/client/ShaderModule.h"
+#include "dawn/wire/client/Texture.h"
#include "dawn/wire/client/ApiObjects_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp
index 21db737a088..eb9fa617d0d 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp
@@ -14,6 +14,9 @@
#include "dawn/wire/client/Buffer.h"
+#include <limits>
+#include <utility>
+
#include "dawn/wire/BufferConsumer_impl.h"
#include "dawn/wire/WireCmd_autogen.h"
#include "dawn/wire/client/Client.h"
@@ -21,386 +24,396 @@
namespace dawn::wire::client {
- // static
- WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
- Client* wireClient = device->client;
-
- bool mappable =
- (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
- descriptor->mappedAtCreation;
- if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
- device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
- return device->CreateErrorBuffer();
- }
+// static
+WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
+ Client* wireClient = device->client;
- std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
-
- DeviceCreateBufferCmd cmd;
- cmd.deviceId = device->id;
- cmd.descriptor = descriptor;
- cmd.readHandleCreateInfoLength = 0;
- cmd.readHandleCreateInfo = nullptr;
- cmd.writeHandleCreateInfoLength = 0;
- cmd.writeHandleCreateInfo = nullptr;
-
- if (mappable) {
- if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
- // Create the read handle on buffer creation.
- readHandle.reset(
- wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
- if (readHandle == nullptr) {
- device->InjectError(WGPUErrorType_OutOfMemory,
- "Failed to create buffer mapping");
- return device->CreateErrorBuffer();
- }
- cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
- }
+ bool mappable =
+ (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
+ descriptor->mappedAtCreation;
+ if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
+ return device->CreateErrorBuffer();
+ }
- if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 ||
- descriptor->mappedAtCreation) {
- // Create the write handle on buffer creation.
- writeHandle.reset(
- wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
- if (writeHandle == nullptr) {
- device->InjectError(WGPUErrorType_OutOfMemory,
- "Failed to create buffer mapping");
- return device->CreateErrorBuffer();
- }
- cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+ std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+
+ DeviceCreateBufferCmd cmd;
+ cmd.deviceId = device->id;
+ cmd.descriptor = descriptor;
+ cmd.readHandleCreateInfoLength = 0;
+ cmd.readHandleCreateInfo = nullptr;
+ cmd.writeHandleCreateInfoLength = 0;
+ cmd.writeHandleCreateInfo = nullptr;
+
+ if (mappable) {
+ if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
+ // Create the read handle on buffer creation.
+ readHandle.reset(
+ wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
+ if (readHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ return CreateError(device, descriptor);
}
+ cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
}
- // Create the buffer and send the creation command.
- // This must happen after any potential device->CreateErrorBuffer()
- // as server expects allocating ids to be monotonically increasing
- auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
- Buffer* buffer = bufferObjectAndSerial->object.get();
- buffer->mDevice = device;
- buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
- buffer->mSize = descriptor->size;
- buffer->mDestructWriteHandleOnUnmap = false;
-
- if (descriptor->mappedAtCreation) {
- // If the buffer is mapped at creation, a write handle is created and will be
- // destructed on unmap if the buffer doesn't have MapWrite usage
- // The buffer is mapped right now.
- buffer->mMapState = MapState::MappedAtCreation;
-
- // This flag is for write handle created by mappedAtCreation
- // instead of MapWrite usage. We don't have such a case for read handle
- buffer->mDestructWriteHandleOnUnmap =
- (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
-
- buffer->mMapOffset = 0;
- buffer->mMapSize = buffer->mSize;
- ASSERT(writeHandle != nullptr);
- buffer->mMappedData = writeHandle->GetData();
+ if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 || descriptor->mappedAtCreation) {
+ // Create the write handle on buffer creation.
+ writeHandle.reset(
+ wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
+ if (writeHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ return CreateError(device, descriptor);
+ }
+ cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
}
-
- cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
-
- wireClient->SerializeCommand(
- cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
- [&](SerializeBuffer* serializeBuffer) {
- if (readHandle != nullptr) {
- char* readHandleBuffer;
- WIRE_TRY(
- serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
- // Serialize the ReadHandle into the space after the command.
- readHandle->SerializeCreate(readHandleBuffer);
- buffer->mReadHandle = std::move(readHandle);
- }
- if (writeHandle != nullptr) {
- char* writeHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(cmd.writeHandleCreateInfoLength,
- &writeHandleBuffer));
- // Serialize the WriteHandle into the space after the command.
- writeHandle->SerializeCreate(writeHandleBuffer);
- buffer->mWriteHandle = std::move(writeHandle);
- }
-
- return WireResult::Success;
- });
- return ToAPI(buffer);
}
- // static
- WGPUBuffer Buffer::CreateError(Device* device) {
- auto* allocation = device->client->BufferAllocator().New(device->client);
- allocation->object->mDevice = device;
- allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
+ // Create the buffer and send the creation command.
+ // This must happen after any potential device->CreateErrorBuffer()
+ // as server expects allocating ids to be monotonically increasing
+ auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
+ Buffer* buffer = bufferObjectAndSerial->object.get();
+ buffer->mDevice = device;
+ buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
+ buffer->mSize = descriptor->size;
+ buffer->mUsage = static_cast<WGPUBufferUsage>(descriptor->usage);
+ buffer->mDestructWriteHandleOnUnmap = false;
+
+ if (descriptor->mappedAtCreation) {
+ // If the buffer is mapped at creation, a write handle is created and will be
+ // destructed on unmap if the buffer doesn't have MapWrite usage
+ // The buffer is mapped right now.
+ buffer->mMapState = MapState::MappedAtCreation;
+
+ // This flag is for write handle created by mappedAtCreation
+ // instead of MapWrite usage. We don't have such a case for read handle
+ buffer->mDestructWriteHandleOnUnmap = (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
+
+ buffer->mMapOffset = 0;
+ buffer->mMapSize = buffer->mSize;
+ ASSERT(writeHandle != nullptr);
+ buffer->mMappedData = writeHandle->GetData();
+ }
- DeviceCreateErrorBufferCmd cmd;
- cmd.self = ToAPI(device);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
- device->client->SerializeCommand(cmd);
+ cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+
+ wireClient->SerializeCommand(
+ cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
+ [&](SerializeBuffer* serializeBuffer) {
+ if (readHandle != nullptr) {
+ char* readHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
+ // Serialize the ReadHandle into the space after the command.
+ readHandle->SerializeCreate(readHandleBuffer);
+ buffer->mReadHandle = std::move(readHandle);
+ }
+ if (writeHandle != nullptr) {
+ char* writeHandleBuffer;
+ WIRE_TRY(
+ serializeBuffer->NextN(cmd.writeHandleCreateInfoLength, &writeHandleBuffer));
+ // Serialize the WriteHandle into the space after the command.
+ writeHandle->SerializeCreate(writeHandleBuffer);
+ buffer->mWriteHandle = std::move(writeHandle);
+ }
- return ToAPI(allocation->object.get());
+ return WireResult::Success;
+ });
+ return ToAPI(buffer);
+}
+
+// static
+WGPUBuffer Buffer::CreateError(Device* device, const WGPUBufferDescriptor* descriptor) {
+ auto* allocation = device->client->BufferAllocator().New(device->client);
+ allocation->object->mDevice = device;
+ allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
+ allocation->object->mSize = descriptor->size;
+ allocation->object->mUsage = static_cast<WGPUBufferUsage>(descriptor->usage);
+
+ DeviceCreateErrorBufferCmd cmd;
+ cmd.self = ToAPI(device);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+ device->client->SerializeCommand(cmd);
+
+ return ToAPI(allocation->object.get());
+}
+
+Buffer::Buffer(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
+
+Buffer::~Buffer() {
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+ FreeMappedData();
+}
+
+void Buffer::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+}
+
+void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
+ mRequests.CloseAll([status](MapRequestData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
+ }
+ });
+}
+
+void Buffer::MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
}
- Buffer::~Buffer() {
- ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- FreeMappedData();
+ // Handle the defaulting of size required by WebGPU.
+ if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
+ size = mSize - offset;
}
- void Buffer::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+ // Create the request structure that will hold information while this mapping is
+ // in flight.
+ MapRequestData request = {};
+ request.callback = callback;
+ request.userdata = userdata;
+ request.offset = offset;
+ request.size = size;
+ if (mode & WGPUMapMode_Read) {
+ request.type = MapRequestType::Read;
+ } else if (mode & WGPUMapMode_Write) {
+ request.type = MapRequestType::Write;
}
- void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
- mRequests.CloseAll([status](MapRequestData* request) {
- if (request->callback != nullptr) {
- request->callback(status, request->userdata);
- }
- });
+ uint64_t serial = mRequests.Add(std::move(request));
+
+ // Serialize the command to send to the server.
+ BufferMapAsyncCmd cmd;
+ cmd.bufferId = this->id;
+ cmd.requestSerial = serial;
+ cmd.mode = mode;
+ cmd.offset = offset;
+ cmd.size = size;
+
+ client->SerializeCommand(cmd);
+}
+
+bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo) {
+ MapRequestData request;
+ if (!mRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- void Buffer::MapAsync(WGPUMapModeFlags mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
- }
-
- // Handle the defaulting of size required by WebGPU.
- if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
- size = mSize - offset;
- }
-
- // Create the request structure that will hold information while this mapping is
- // in flight.
- MapRequestData request = {};
- request.callback = callback;
- request.userdata = userdata;
- request.offset = offset;
- request.size = size;
- if (mode & WGPUMapMode_Read) {
- request.type = MapRequestType::Read;
- } else if (mode & WGPUMapMode_Write) {
- request.type = MapRequestType::Write;
+ auto FailRequest = [&request]() -> bool {
+ if (request.callback != nullptr) {
+ request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
}
+ return false;
+ };
- uint64_t serial = mRequests.Add(std::move(request));
-
- // Serialize the command to send to the server.
- BufferMapAsyncCmd cmd;
- cmd.bufferId = this->id;
- cmd.requestSerial = serial;
- cmd.mode = mode;
- cmd.offset = offset;
- cmd.size = size;
-
- client->SerializeCommand(cmd);
+ // Take into account the client-side status of the request if the server says it is a
+ // success.
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ status = request.clientStatus;
}
- bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo) {
- MapRequestData request;
- if (!mRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- auto FailRequest = [&request]() -> bool {
- if (request.callback != nullptr) {
- request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
- }
- return false;
- };
-
- // Take into account the client-side status of the request if the server says it is a
- // success.
- if (status == WGPUBufferMapAsyncStatus_Success) {
- status = request.clientStatus;
- }
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ switch (request.type) {
+ case MapRequestType::Read: {
+ if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
+ // This is the size of data deserialized from the command stream, which must
+ // be CPU-addressable.
+ return FailRequest();
+ }
- if (status == WGPUBufferMapAsyncStatus_Success) {
- switch (request.type) {
- case MapRequestType::Read: {
- if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
- // This is the size of data deserialized from the command stream, which must
- // be CPU-addressable.
- return FailRequest();
- }
-
- // Validate to prevent bad map request; buffer destroyed during map request
- if (mReadHandle == nullptr) {
- return FailRequest();
- }
- // Update user map data with server returned data
- if (!mReadHandle->DeserializeDataUpdate(
- readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
- request.offset, request.size)) {
- return FailRequest();
- }
- mMapState = MapState::MappedForRead;
- mMappedData = const_cast<void*>(mReadHandle->GetData());
- break;
+ // Validate to prevent bad map request; buffer destroyed during map request
+ if (mReadHandle == nullptr) {
+ return FailRequest();
}
- case MapRequestType::Write: {
- if (mWriteHandle == nullptr) {
- return FailRequest();
- }
- mMapState = MapState::MappedForWrite;
- mMappedData = mWriteHandle->GetData();
- break;
+ // Update user map data with server returned data
+ if (!mReadHandle->DeserializeDataUpdate(
+ readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
+ request.offset, request.size)) {
+ return FailRequest();
}
- default:
- UNREACHABLE();
+ mMapState = MapState::MappedForRead;
+ mMappedData = const_cast<void*>(mReadHandle->GetData());
+ break;
}
-
- mMapOffset = request.offset;
- mMapSize = request.size;
+ case MapRequestType::Write: {
+ if (mWriteHandle == nullptr) {
+ return FailRequest();
+ }
+ mMapState = MapState::MappedForWrite;
+ mMappedData = mWriteHandle->GetData();
+ break;
+ }
+ default:
+ UNREACHABLE();
}
- if (request.callback) {
- request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
- }
+ mMapOffset = request.offset;
+ mMapSize = request.size;
+ }
- return true;
+ if (request.callback) {
+ request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
}
- void* Buffer::GetMappedRange(size_t offset, size_t size) {
- if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
- return nullptr;
- }
- return static_cast<uint8_t*>(mMappedData) + offset;
+ return true;
+}
+
+void* Buffer::GetMappedRange(size_t offset, size_t size) {
+ if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
}
+ return static_cast<uint8_t*>(mMappedData) + offset;
+}
- const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
- if (!(IsMappedForWriting() || IsMappedForReading()) ||
- !CheckGetMappedRangeOffsetSize(offset, size)) {
- return nullptr;
- }
- return static_cast<uint8_t*>(mMappedData) + offset;
+const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
+ if (!(IsMappedForWriting() || IsMappedForReading()) ||
+ !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
}
+ return static_cast<uint8_t*>(mMappedData) + offset;
+}
+
+void Buffer::Unmap() {
+ // Invalidate the local pointer, and cancel all other in-flight requests that would
+ // turn into errors anyway (you can't double map). This prevents race when the following
+ // happens, where the application code would have unmapped a buffer but still receive a
+ // callback:
+ // - Client -> Server: MapRequest1, Unmap, MapRequest2
+ // - Server -> Client: Result of MapRequest1
+ // - Unmap locally on the client
+ // - Server -> Client: Result of MapRequest2
+
+ // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
+ if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
+ mWriteHandle != nullptr) {
+ // Writes need to be flushed before Unmap is sent. Unmap calls all associated
+ // in-flight callbacks which may read the updated data.
+
+ // Get the serialization size of data update writes.
+ size_t writeDataUpdateInfoLength =
+ mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
+
+ BufferUpdateMappedDataCmd cmd;
+ cmd.bufferId = id;
+ cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
+ cmd.writeDataUpdateInfo = nullptr;
+ cmd.offset = mMapOffset;
+ cmd.size = mMapSize;
+
+ client->SerializeCommand(
+ cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+ char* writeHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
+
+ // Serialize flush metadata into the space after the command.
+ // This closes the handle for writing.
+ mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
- void Buffer::Unmap() {
- // Invalidate the local pointer, and cancel all other in-flight requests that would
- // turn into errors anyway (you can't double map). This prevents race when the following
- // happens, where the application code would have unmapped a buffer but still receive a
- // callback:
- // - Client -> Server: MapRequest1, Unmap, MapRequest2
- // - Server -> Client: Result of MapRequest1
- // - Unmap locally on the client
- // - Server -> Client: Result of MapRequest2
-
- // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
- if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
- mWriteHandle != nullptr) {
- // Writes need to be flushed before Unmap is sent. Unmap calls all associated
- // in-flight callbacks which may read the updated data.
-
- // Get the serialization size of data update writes.
- size_t writeDataUpdateInfoLength =
- mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
-
- BufferUpdateMappedDataCmd cmd;
- cmd.bufferId = id;
- cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
- cmd.writeDataUpdateInfo = nullptr;
- cmd.offset = mMapOffset;
- cmd.size = mMapSize;
-
- client->SerializeCommand(
- cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- char* writeHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
-
- // Serialize flush metadata into the space after the command.
- // This closes the handle for writing.
- mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
-
- return WireResult::Success;
- });
-
- // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
- // for mappedAtCreation usage. It is destroyed on unmap after flush to server
- // instead of at buffer destruction.
- if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
- mWriteHandle = nullptr;
- if (mReadHandle) {
- // If it's both mappedAtCreation and MapRead we need to reset
- // mMappedData to readHandle's GetData(). This could be changed to
- // merging read/write handle in future
- mMappedData = const_cast<void*>(mReadHandle->GetData());
- }
+ return WireResult::Success;
+ });
+
+ // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
+ // for mappedAtCreation usage. It is destroyed on unmap after flush to server
+ // instead of at buffer destruction.
+ if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
+ mWriteHandle = nullptr;
+ if (mReadHandle) {
+ // If it's both mappedAtCreation and MapRead we need to reset
+ // mMappedData to readHandle's GetData(). This could be changed to
+ // merging read/write handle in future
+ mMappedData = const_cast<void*>(mReadHandle->GetData());
}
}
+ }
- // Free map access tokens
- mMapState = MapState::Unmapped;
- mMapOffset = 0;
- mMapSize = 0;
+ // Free map access tokens
+ mMapState = MapState::Unmapped;
+ mMapOffset = 0;
+ mMapSize = 0;
- // Tag all mapping requests still in flight as unmapped before callback.
- mRequests.ForAll([](MapRequestData* request) {
- if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
- request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
- }
- });
+ // Tag all mapping requests still in flight as unmapped before callback.
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
+ }
+ });
- BufferUnmapCmd cmd;
- cmd.self = ToAPI(this);
- client->SerializeCommand(cmd);
- }
+ BufferUnmapCmd cmd;
+ cmd.self = ToAPI(this);
+ client->SerializeCommand(cmd);
+}
- void Buffer::Destroy() {
- // Remove the current mapping and destroy Read/WriteHandles.
- FreeMappedData();
+void Buffer::Destroy() {
+ // Remove the current mapping and destroy Read/WriteHandles.
+ FreeMappedData();
- // Tag all mapping requests still in flight as destroyed before callback.
- mRequests.ForAll([](MapRequestData* request) {
- if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
- request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
- }
- });
+ // Tag all mapping requests still in flight as destroyed before callback.
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
+ }
+ });
- BufferDestroyCmd cmd;
- cmd.self = ToAPI(this);
- client->SerializeCommand(cmd);
- }
+ BufferDestroyCmd cmd;
+ cmd.self = ToAPI(this);
+ client->SerializeCommand(cmd);
+}
- bool Buffer::IsMappedForReading() const {
- return mMapState == MapState::MappedForRead;
- }
+WGPUBufferUsage Buffer::GetUsage() const {
+ return mUsage;
+}
- bool Buffer::IsMappedForWriting() const {
- return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
- }
+uint64_t Buffer::GetSize() const {
+ return mSize;
+}
- bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
- if (offset % 8 != 0 || size % 4 != 0) {
- return false;
- }
+bool Buffer::IsMappedForReading() const {
+ return mMapState == MapState::MappedForRead;
+}
- if (size > mMapSize || offset < mMapOffset) {
- return false;
- }
+bool Buffer::IsMappedForWriting() const {
+ return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
+}
+
+bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
+ if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+ return false;
+ }
+
+ size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
- size_t offsetInMappedRange = offset - mMapOffset;
- return offsetInMappedRange <= mMapSize - size;
+ if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
+ return false;
}
- void Buffer::FreeMappedData() {
+ size_t offsetInMappedRange = offset - mMapOffset;
+ return offsetInMappedRange <= mMapSize - rangeSize;
+}
+
+void Buffer::FreeMappedData() {
#if defined(DAWN_ENABLE_ASSERTS)
- // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
- // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
- // interaction of mapping and GC.
- if (mMappedData) {
- memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
- }
+ // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
+ // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
+ // interaction of mapping and GC.
+ if (mMappedData) {
+ memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
+ }
#endif // defined(DAWN_ENABLE_ASSERTS)
- mMapOffset = 0;
- mMapSize = 0;
- mReadHandle = nullptr;
- mWriteHandle = nullptr;
- mMappedData = nullptr;
- }
+ mMapOffset = 0;
+ mMapSize = 0;
+ mReadHandle = nullptr;
+ mWriteHandle = nullptr;
+ mMappedData = nullptr;
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h
index c1539c514ef..2644a4fe741 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h
@@ -15,94 +15,99 @@
#ifndef SRC_DAWN_WIRE_CLIENT_BUFFER_H_
#define SRC_DAWN_WIRE_CLIENT_BUFFER_H_
-#include <dawn/webgpu.h>
+#include <memory>
+#include "dawn/webgpu.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/client/ObjectBase.h"
#include "dawn/wire/client/RequestTracker.h"
namespace dawn::wire::client {
- class Device;
-
- class Buffer final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
-
- static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
- static WGPUBuffer CreateError(Device* device);
-
- ~Buffer();
-
- bool OnMapAsyncCallback(uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo);
- void MapAsync(WGPUMapModeFlags mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata);
- void* GetMappedRange(size_t offset, size_t size);
- const void* GetConstMappedRange(size_t offset, size_t size);
- void Unmap();
-
- void Destroy();
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
-
- bool IsMappedForReading() const;
- bool IsMappedForWriting() const;
- bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
-
- void FreeMappedData();
-
- Device* mDevice;
-
- enum class MapRequestType { None, Read, Write };
-
- enum class MapState {
- Unmapped,
- MappedForRead,
- MappedForWrite,
- MappedAtCreation,
- };
-
- // We want to defer all the validation to the server, which means we could have multiple
- // map request in flight at a single time and need to track them separately.
- // On well-behaved applications, only one request should exist at a single time.
- struct MapRequestData {
- WGPUBufferMapCallback callback = nullptr;
- void* userdata = nullptr;
- size_t offset = 0;
- size_t size = 0;
-
- // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
- // precedence over the success value returned from the server. However Error statuses
- // from the server take precedence over the client-side status.
- WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
-
- MapRequestType type = MapRequestType::None;
- };
- RequestTracker<MapRequestData> mRequests;
- uint64_t mSize = 0;
-
- // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
- // requests.
- // TODO(enga): Use a tagged pointer to save space.
- std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
- std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
- MapState mMapState = MapState::Unmapped;
- bool mDestructWriteHandleOnUnmap = false;
-
- void* mMappedData = nullptr;
- size_t mMapOffset = 0;
- size_t mMapSize = 0;
-
- std::weak_ptr<bool> mDeviceIsAlive;
+class Device;
+
+class Buffer final : public ObjectBase {
+ public:
+ static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
+ static WGPUBuffer CreateError(Device* device, const WGPUBufferDescriptor* descriptor);
+
+ Buffer(Client* client, uint32_t refcount, uint32_t id);
+ ~Buffer();
+
+ bool OnMapAsyncCallback(uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo);
+ void MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* GetMappedRange(size_t offset, size_t size);
+ const void* GetConstMappedRange(size_t offset, size_t size);
+ void Unmap();
+
+ void Destroy();
+
+ // Note that these values can be arbitrary since they aren't validated in the wire client.
+ WGPUBufferUsage GetUsage() const;
+ uint64_t GetSize() const;
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
+
+ bool IsMappedForReading() const;
+ bool IsMappedForWriting() const;
+ bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+
+ void FreeMappedData();
+
+ Device* mDevice;
+
+ enum class MapRequestType { None, Read, Write };
+
+ enum class MapState {
+ Unmapped,
+ MappedForRead,
+ MappedForWrite,
+ MappedAtCreation,
+ };
+
+ // We want to defer all the validation to the server, which means we could have multiple
+ // map request in flight at a single time and need to track them separately.
+ // On well-behaved applications, only one request should exist at a single time.
+ struct MapRequestData {
+ WGPUBufferMapCallback callback = nullptr;
+ void* userdata = nullptr;
+ size_t offset = 0;
+ size_t size = 0;
+
+ // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
+ // precedence over the success value returned from the server. However Error statuses
+ // from the server take precedence over the client-side status.
+ WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
+
+ MapRequestType type = MapRequestType::None;
};
+ RequestTracker<MapRequestData> mRequests;
+ uint64_t mSize = 0;
+ WGPUBufferUsage mUsage;
+
+ // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
+ // requests.
+ // TODO(enga): Use a tagged pointer to save space.
+ std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
+ MapState mMapState = MapState::Unmapped;
+ bool mDestructWriteHandleOnUnmap = false;
+
+ void* mMappedData = nullptr;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
+
+ std::weak_ptr<bool> mDeviceIsAlive;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp
index 5db8444b271..f5fed4ddc18 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp
@@ -19,153 +19,150 @@
namespace dawn::wire::client {
- namespace {
-
- class NoopCommandSerializer final : public CommandSerializer {
- public:
- static NoopCommandSerializer* GetInstance() {
- static NoopCommandSerializer gNoopCommandSerializer;
- return &gNoopCommandSerializer;
- }
-
- ~NoopCommandSerializer() = default;
-
- size_t GetMaximumAllocationSize() const final {
- return 0;
- }
- void* GetCmdSpace(size_t size) final {
- return nullptr;
- }
- bool Flush() final {
- return false;
- }
- };
-
- } // anonymous namespace
-
- Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
- : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
- if (mMemoryTransferService == nullptr) {
- // If a MemoryTransferService is not provided, fall back to inline memory.
- mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
- mMemoryTransferService = mOwnedMemoryTransferService.get();
- }
- }
-
- Client::~Client() {
- DestroyAllObjects();
- }
-
- void Client::DestroyAllObjects() {
- for (auto& objectList : mObjects) {
- ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
- if (objectType == ObjectType::Device) {
- continue;
- }
- while (!objectList.empty()) {
- ObjectBase* object = objectList.head()->value();
-
- DestroyObjectCmd cmd;
- cmd.objectType = objectType;
- cmd.objectId = object->id;
- SerializeCommand(cmd);
- FreeObject(objectType, object);
- }
- }
-
- while (!mObjects[ObjectType::Device].empty()) {
- ObjectBase* object = mObjects[ObjectType::Device].head()->value();
-
- DestroyObjectCmd cmd;
- cmd.objectType = ObjectType::Device;
- cmd.objectId = object->id;
- SerializeCommand(cmd);
- FreeObject(ObjectType::Device, object);
- }
- }
-
- ReservedTexture Client::ReserveTexture(WGPUDevice device) {
- auto* allocation = TextureAllocator().New(this);
-
- ReservedTexture result;
- result.texture = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- result.deviceId = FromAPI(device)->id;
- result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
- return result;
- }
-
- ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
- auto* allocation = SwapChainAllocator().New(this);
+namespace {
- ReservedSwapChain result;
- result.swapchain = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- result.deviceId = FromAPI(device)->id;
- result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
- return result;
+class NoopCommandSerializer final : public CommandSerializer {
+ public:
+ static NoopCommandSerializer* GetInstance() {
+ static NoopCommandSerializer gNoopCommandSerializer;
+ return &gNoopCommandSerializer;
}
- ReservedDevice Client::ReserveDevice() {
- auto* allocation = DeviceAllocator().New(this);
-
- ReservedDevice result;
- result.device = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- return result;
- }
+ ~NoopCommandSerializer() override = default;
- ReservedInstance Client::ReserveInstance() {
- auto* allocation = InstanceAllocator().New(this);
+ size_t GetMaximumAllocationSize() const final { return 0; }
+ void* GetCmdSpace(size_t size) final { return nullptr; }
+ bool Flush() final { return false; }
+};
- ReservedInstance result;
- result.instance = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- return result;
- }
+} // anonymous namespace
- void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
- TextureAllocator().Free(FromAPI(reservation.texture));
+Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
+ : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
+ if (mMemoryTransferService == nullptr) {
+ // If a MemoryTransferService is not provided, fall back to inline memory.
+ mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+ mMemoryTransferService = mOwnedMemoryTransferService.get();
}
-
- void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
- SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+}
+
+Client::~Client() {
+ DestroyAllObjects();
+}
+
+void Client::DestroyAllObjects() {
+ // Free all devices first since they may hold references to other objects
+ // like the default queue. The Device destructor releases the default queue,
+ // which would be invalid if the queue was already freed.
+ while (!mObjects[ObjectType::Device].empty()) {
+ ObjectBase* object = mObjects[ObjectType::Device].head()->value();
+
+ DestroyObjectCmd cmd;
+ cmd.objectType = ObjectType::Device;
+ cmd.objectId = object->id;
+ SerializeCommand(cmd);
+ FreeObject(ObjectType::Device, object);
}
- void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
- DeviceAllocator().Free(FromAPI(reservation.device));
- }
+ for (auto& objectList : mObjects) {
+ ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
+ if (objectType == ObjectType::Device) {
+ continue;
+ }
+ while (!objectList.empty()) {
+ ObjectBase* object = objectList.head()->value();
- void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
- InstanceAllocator().Free(FromAPI(reservation.instance));
+ DestroyObjectCmd cmd;
+ cmd.objectType = objectType;
+ cmd.objectId = object->id;
+ SerializeCommand(cmd);
+ FreeObject(objectType, object);
+ }
}
-
- void Client::Disconnect() {
- mDisconnected = true;
- mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
-
- auto& deviceList = mObjects[ObjectType::Device];
- {
- for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
- device = device->next()) {
- static_cast<Device*>(device->value())
- ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
- }
+}
+
+ReservedTexture Client::ReserveTexture(WGPUDevice device) {
+ auto* allocation = TextureAllocator().New(this);
+
+ ReservedTexture result;
+ result.texture = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ result.deviceId = FromAPI(device)->id;
+ result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+ return result;
+}
+
+ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
+ auto* allocation = SwapChainAllocator().New(this);
+
+ ReservedSwapChain result;
+ result.swapchain = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ result.deviceId = FromAPI(device)->id;
+ result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+ return result;
+}
+
+ReservedDevice Client::ReserveDevice() {
+ auto* allocation = DeviceAllocator().New(this);
+
+ ReservedDevice result;
+ result.device = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ return result;
+}
+
+ReservedInstance Client::ReserveInstance() {
+ auto* allocation = InstanceAllocator().New(this);
+
+ ReservedInstance result;
+ result.instance = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ return result;
+}
+
+void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
+ TextureAllocator().Free(FromAPI(reservation.texture));
+}
+
+void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+}
+
+void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+ DeviceAllocator().Free(FromAPI(reservation.device));
+}
+
+void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+ InstanceAllocator().Free(FromAPI(reservation.instance));
+}
+
+void Client::Disconnect() {
+ mDisconnected = true;
+ mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
+
+ auto& deviceList = mObjects[ObjectType::Device];
+ {
+ for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
+ device = device->next()) {
+ static_cast<Device*>(device->value())
+ ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
}
- for (auto& objectList : mObjects) {
- for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
- object = object->next()) {
- object->value()->CancelCallbacksForDisconnect();
- }
+ }
+ for (auto& objectList : mObjects) {
+ for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
+ object = object->next()) {
+ object->value()->CancelCallbacksForDisconnect();
}
}
+}
- bool Client::IsDisconnected() const {
- return mDisconnected;
- }
+bool Client::IsDisconnected() const {
+ return mDisconnected;
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Client.h b/chromium/third_party/dawn/src/dawn/wire/client/Client.h
index 47cc53c5d19..d045f0c06ed 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Client.h
@@ -15,12 +15,13 @@
#ifndef SRC_DAWN_WIRE_CLIENT_CLIENT_H_
#define SRC_DAWN_WIRE_CLIENT_CLIENT_H_
-#include <dawn/webgpu.h>
-#include <dawn/wire/Wire.h>
+#include <memory>
#include "dawn/common/LinkedList.h"
#include "dawn/common/NonCopyable.h"
+#include "dawn/webgpu.h"
#include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/Wire.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireCmd_autogen.h"
#include "dawn/wire/WireDeserializeAllocator.h"
@@ -28,67 +29,64 @@
namespace dawn::wire::client {
- class Device;
- class MemoryTransferService;
+class Device;
+class MemoryTransferService;
- class Client : public ClientBase {
- public:
- Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
- ~Client() override;
+class Client : public ClientBase {
+ public:
+ Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
+ ~Client() override;
- // ChunkedCommandHandler implementation
- const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) override;
+ // ChunkedCommandHandler implementation
+ const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) override;
- MemoryTransferService* GetMemoryTransferService() const {
- return mMemoryTransferService;
- }
+ MemoryTransferService* GetMemoryTransferService() const { return mMemoryTransferService; }
- ReservedTexture ReserveTexture(WGPUDevice device);
- ReservedSwapChain ReserveSwapChain(WGPUDevice device);
- ReservedDevice ReserveDevice();
- ReservedInstance ReserveInstance();
+ ReservedTexture ReserveTexture(WGPUDevice device);
+ ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+ ReservedDevice ReserveDevice();
+ ReservedInstance ReserveInstance();
- void ReclaimTextureReservation(const ReservedTexture& reservation);
- void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
- void ReclaimDeviceReservation(const ReservedDevice& reservation);
- void ReclaimInstanceReservation(const ReservedInstance& reservation);
+ void ReclaimTextureReservation(const ReservedTexture& reservation);
+ void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+ void ReclaimDeviceReservation(const ReservedDevice& reservation);
+ void ReclaimInstanceReservation(const ReservedInstance& reservation);
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- mSerializer.SerializeCommand(cmd, *this);
- }
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ mSerializer.SerializeCommand(cmd, *this);
+ }
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
- }
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
+ }
- void Disconnect();
- bool IsDisconnected() const;
+ void Disconnect();
+ bool IsDisconnected() const;
- template <typename T>
- void TrackObject(T* object) {
- mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
- }
+ template <typename T>
+ void TrackObject(T* object) {
+ mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
+ }
- private:
- void DestroyAllObjects();
+ private:
+ void DestroyAllObjects();
#include "dawn/wire/client/ClientPrototypes_autogen.inc"
- ChunkedCommandSerializer mSerializer;
- WireDeserializeAllocator mAllocator;
- MemoryTransferService* mMemoryTransferService = nullptr;
- std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+ ChunkedCommandSerializer mSerializer;
+ WireDeserializeAllocator mAllocator;
+ MemoryTransferService* mMemoryTransferService = nullptr;
+ std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
- PerObjectType<LinkedList<ObjectBase>> mObjects;
- bool mDisconnected = false;
- };
+ PerObjectType<LinkedList<ObjectBase>> mObjects;
+ bool mDisconnected = false;
+};
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp
index 7b99dc60b47..9103854331d 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp
@@ -12,122 +12,122 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <limits>
+
#include "dawn/common/Assert.h"
#include "dawn/wire/client/Client.h"
#include "dawn/wire/client/Device.h"
-#include <limits>
-
namespace dawn::wire::client {
- bool Client::DoDeviceUncapturedErrorCallback(Device* device,
- WGPUErrorType errorType,
- const char* message) {
- switch (errorType) {
- case WGPUErrorType_NoError:
- case WGPUErrorType_Validation:
- case WGPUErrorType_OutOfMemory:
- case WGPUErrorType_Unknown:
- case WGPUErrorType_DeviceLost:
- break;
- default:
- return false;
- }
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleError(errorType, message);
+bool Client::DoDeviceUncapturedErrorCallback(Device* device,
+ WGPUErrorType errorType,
+ const char* message) {
+ switch (errorType) {
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
+ break;
+ default:
+ return false;
+ }
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
return true;
}
+ device->HandleError(errorType, message);
+ return true;
+}
- bool Client::DoDeviceLoggingCallback(Device* device,
- WGPULoggingType loggingType,
- const char* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleLogging(loggingType, message);
+bool Client::DoDeviceLoggingCallback(Device* device,
+ WGPULoggingType loggingType,
+ const char* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
return true;
}
+ device->HandleLogging(loggingType, message);
+ return true;
+}
- bool Client::DoDeviceLostCallback(Device* device,
- WGPUDeviceLostReason reason,
- char const* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleDeviceLost(reason, message);
+bool Client::DoDeviceLostCallback(Device* device,
+ WGPUDeviceLostReason reason,
+ char const* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
return true;
}
+ device->HandleDeviceLost(reason, message);
+ return true;
+}
- bool Client::DoDevicePopErrorScopeCallback(Device* device,
- uint64_t requestSerial,
- WGPUErrorType errorType,
- const char* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
+bool Client::DoDevicePopErrorScopeCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUErrorType errorType,
+ const char* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
+ return true;
}
+ return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
+}
- bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
- uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo) {
- // The buffer might have been deleted or recreated so this isn't an error.
- if (buffer == nullptr) {
- return true;
- }
- return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
- readDataUpdateInfo);
+bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
+ uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo) {
+ // The buffer might have been deleted or recreated so this isn't an error.
+ if (buffer == nullptr) {
+ return true;
}
+ return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
+ readDataUpdateInfo);
+}
- bool Client::DoQueueWorkDoneCallback(Queue* queue,
- uint64_t requestSerial,
- WGPUQueueWorkDoneStatus status) {
- // The queue might have been deleted or recreated so this isn't an error.
- if (queue == nullptr) {
- return true;
- }
- return queue->OnWorkDoneCallback(requestSerial, status);
+bool Client::DoQueueWorkDoneCallback(Queue* queue,
+ uint64_t requestSerial,
+ WGPUQueueWorkDoneStatus status) {
+ // The queue might have been deleted or recreated so this isn't an error.
+ if (queue == nullptr) {
+ return true;
}
+ return queue->OnWorkDoneCallback(requestSerial, status);
+}
- bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
- uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- // The device might have been deleted or recreated so this isn't an error.
- if (device == nullptr) {
- return true;
- }
- return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ // The device might have been deleted or recreated so this isn't an error.
+ if (device == nullptr) {
+ return true;
}
+ return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+}
- bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
- uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- // The device might have been deleted or recreated so this isn't an error.
- if (device == nullptr) {
- return true;
- }
- return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ // The device might have been deleted or recreated so this isn't an error.
+ if (device == nullptr) {
+ return true;
}
+ return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+}
- bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
- uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info) {
- // The shader module might have been deleted or recreated so this isn't an error.
- if (shaderModule == nullptr) {
- return true;
- }
- return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
+ uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ // The shader module might have been deleted or recreated so this isn't an error.
+ if (shaderModule == nullptr) {
+ return true;
}
+ return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
index e04ce803f1e..0174d01f6ca 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
@@ -12,120 +12,109 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstring>
+#include <memory>
+#include <utility>
+
#include "dawn/common/Alloc.h"
#include "dawn/common/Assert.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/client/Client.h"
-#include <cstring>
-
namespace dawn::wire::client {
- class InlineMemoryTransferService : public MemoryTransferService {
- class ReadHandleImpl : public ReadHandle {
- public:
- explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
- : mStagingData(std::move(stagingData)), mSize(size) {
- }
+class InlineMemoryTransferService : public MemoryTransferService {
+ class ReadHandleImpl : public ReadHandle {
+ public:
+ explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+ : mStagingData(std::move(stagingData)), mSize(size) {}
- ~ReadHandleImpl() override = default;
+ ~ReadHandleImpl() override = default;
- size_t SerializeCreateSize() override {
- return 0;
- }
+ size_t SerializeCreateSize() override { return 0; }
- void SerializeCreate(void*) override {
- }
+ void SerializeCreate(void*) override {}
- const void* GetData() override {
- return mStagingData.get();
- }
+ const void* GetData() override { return mStagingData.get(); }
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override {
- if (deserializeSize != size || deserializePointer == nullptr) {
- return false;
- }
-
- if (offset > mSize || size > mSize - offset) {
- return false;
- }
-
- void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
- memcpy(start, deserializePointer, size);
- return true;
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override {
+ if (deserializeSize != size || deserializePointer == nullptr) {
+ return false;
}
- private:
- std::unique_ptr<uint8_t[]> mStagingData;
- size_t mSize;
- };
-
- class WriteHandleImpl : public WriteHandle {
- public:
- explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
- : mStagingData(std::move(stagingData)), mSize(size) {
+ if (offset > mSize || size > mSize - offset) {
+ return false;
}
- ~WriteHandleImpl() override = default;
+ void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
+ memcpy(start, deserializePointer, size);
+ return true;
+ }
- size_t SerializeCreateSize() override {
- return 0;
- }
+ private:
+ std::unique_ptr<uint8_t[]> mStagingData;
+ size_t mSize;
+ };
- void SerializeCreate(void*) override {
- }
+ class WriteHandleImpl : public WriteHandle {
+ public:
+ explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+ : mStagingData(std::move(stagingData)), mSize(size) {}
- void* GetData() override {
- return mStagingData.get();
- }
+ ~WriteHandleImpl() override = default;
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
- ASSERT(offset <= mSize);
- ASSERT(size <= mSize - offset);
- return size;
- }
+ size_t SerializeCreateSize() override { return 0; }
- void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
- ASSERT(mStagingData != nullptr);
- ASSERT(serializePointer != nullptr);
- ASSERT(offset <= mSize);
- ASSERT(size <= mSize - offset);
- memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
- }
+ void SerializeCreate(void*) override {}
- private:
- std::unique_ptr<uint8_t[]> mStagingData;
- size_t mSize;
- };
+ void* GetData() override { return mStagingData.get(); }
- public:
- InlineMemoryTransferService() {
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+ ASSERT(offset <= mSize);
+ ASSERT(size <= mSize - offset);
+ return size;
}
- ~InlineMemoryTransferService() override = default;
- ReadHandle* CreateReadHandle(size_t size) override {
- auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
- if (stagingData) {
- return new ReadHandleImpl(std::move(stagingData), size);
- }
- return nullptr;
+ void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
+ ASSERT(mStagingData != nullptr);
+ ASSERT(serializePointer != nullptr);
+ ASSERT(offset <= mSize);
+ ASSERT(size <= mSize - offset);
+ memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
}
- WriteHandle* CreateWriteHandle(size_t size) override {
- auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
- if (stagingData) {
- memset(stagingData.get(), 0, size);
- return new WriteHandleImpl(std::move(stagingData), size);
- }
- return nullptr;
- }
+ private:
+ std::unique_ptr<uint8_t[]> mStagingData;
+ size_t mSize;
};
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
- return std::make_unique<InlineMemoryTransferService>();
+ public:
+ InlineMemoryTransferService() {}
+ ~InlineMemoryTransferService() override = default;
+
+ ReadHandle* CreateReadHandle(size_t size) override {
+ auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+ if (stagingData) {
+ return new ReadHandleImpl(std::move(stagingData), size);
+ }
+ return nullptr;
}
+ WriteHandle* CreateWriteHandle(size_t size) override {
+ auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+ if (stagingData) {
+ memset(stagingData.get(), 0, size);
+ return new WriteHandleImpl(std::move(stagingData), size);
+ }
+ return nullptr;
+ }
+};
+
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+ return std::make_unique<InlineMemoryTransferService>();
+}
+
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
index 44ca3edc2d2..46016db211b 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
@@ -19,87 +19,82 @@
namespace dawn::wire::client {
- MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
- : ReadHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
- mService->OnReadHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
- return mService->OnReadHandleSerializeCreateSize(this);
- }
-
- void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
- mService->OnReadHandleSerializeCreate(this, serializePointer);
- }
-
- const void* MockMemoryTransferService::MockReadHandle::GetData() {
- return mService->OnReadHandleGetData(this);
- }
-
- bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
- const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return mService->OnReadHandleDeserializeDataUpdate(
- this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
- size);
- }
-
- MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
- : WriteHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
- mService->OnWriteHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
- return mService->OnWriteHandleSerializeCreateSize(this);
- }
-
- void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
- mService->OnWriteHandleSerializeCreate(this, serializePointer);
- }
-
- void* MockMemoryTransferService::MockWriteHandle::GetData() {
- return mService->OnWriteHandleGetData(this);
- }
-
- size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
- size_t size) {
- return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
- }
-
- void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
- size_t offset,
- size_t size) {
- mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
- }
-
- MockMemoryTransferService::MockMemoryTransferService() = default;
- MockMemoryTransferService::~MockMemoryTransferService() = default;
-
- MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(
- size_t size) {
- return OnCreateReadHandle(size);
- }
-
- MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(
- size_t size) {
- return OnCreateWriteHandle(size);
- }
-
- MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
- return new MockReadHandle(this);
- }
-
- MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
- return new MockWriteHandle(this);
- }
+MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+ : ReadHandle(), mService(service) {}
+
+MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+ mService->OnReadHandleDestroy(this);
+}
+
+size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
+ return mService->OnReadHandleSerializeCreateSize(this);
+}
+
+void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
+ mService->OnReadHandleSerializeCreate(this, serializePointer);
+}
+
+const void* MockMemoryTransferService::MockReadHandle::GetData() {
+ return mService->OnReadHandleGetData(this);
+}
+
+bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
+ const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return mService->OnReadHandleDeserializeDataUpdate(
+ this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset, size);
+}
+
+MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+ : WriteHandle(), mService(service) {}
+
+MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+ mService->OnWriteHandleDestroy(this);
+}
+
+size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
+ return mService->OnWriteHandleSerializeCreateSize(this);
+}
+
+void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
+ mService->OnWriteHandleSerializeCreate(this, serializePointer);
+}
+
+void* MockMemoryTransferService::MockWriteHandle::GetData() {
+ return mService->OnWriteHandleGetData(this);
+}
+
+size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
+ size_t size) {
+ return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
+}
+
+void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
+ size_t offset,
+ size_t size) {
+ mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
+}
+
+MockMemoryTransferService::MockMemoryTransferService() = default;
+MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(size_t size) {
+ return OnCreateReadHandle(size);
+}
+
+MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(size_t size) {
+ return OnCreateWriteHandle(size);
+}
+
+MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+ return new MockReadHandle(this);
+}
+
+MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+ return new MockWriteHandle(this);
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h
index 7e33afaa08a..1e67d0b2cf9 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h
@@ -22,78 +22,78 @@
namespace dawn::wire::client {
- class MockMemoryTransferService : public MemoryTransferService {
+class MockMemoryTransferService : public MemoryTransferService {
+ public:
+ class MockReadHandle : public ReadHandle {
public:
- class MockReadHandle : public ReadHandle {
- public:
- explicit MockReadHandle(MockMemoryTransferService* service);
- ~MockReadHandle() override;
-
- size_t SerializeCreateSize() override;
- void SerializeCreate(void* serializePointer) override;
- const void* GetData() override;
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- class MockWriteHandle : public WriteHandle {
- public:
- explicit MockWriteHandle(MockMemoryTransferService* service);
- ~MockWriteHandle() override;
-
- size_t SerializeCreateSize() override;
- void SerializeCreate(void* serializePointer) override;
- void* GetData() override;
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
- void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- MockMemoryTransferService();
- ~MockMemoryTransferService() override;
-
- ReadHandle* CreateReadHandle(size_t) override;
- WriteHandle* CreateWriteHandle(size_t) override;
-
- MockReadHandle* NewReadHandle();
- MockWriteHandle* NewWriteHandle();
-
- MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
- MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
-
- MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
- MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
- MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
- MOCK_METHOD(bool,
- OnReadHandleDeserializeDataUpdate,
- (const ReadHandle*,
- const uint32_t* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size));
- MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
-
- MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
- MOCK_METHOD(void,
- OnWriteHandleSerializeCreate,
- (const void* WriteHandle, void* serializePointer));
- MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
- MOCK_METHOD(size_t,
- OnWriteHandleSizeOfSerializeDataUpdate,
- (const void* WriteHandle, size_t offset, size_t size));
- MOCK_METHOD(size_t,
- OnWriteHandleSerializeDataUpdate,
- (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
- MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+ explicit MockReadHandle(MockMemoryTransferService* service);
+ ~MockReadHandle() override;
+
+ size_t SerializeCreateSize() override;
+ void SerializeCreate(void* serializePointer) override;
+ const void* GetData() override;
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override;
+
+ private:
+ MockMemoryTransferService* mService;
};
+ class MockWriteHandle : public WriteHandle {
+ public:
+ explicit MockWriteHandle(MockMemoryTransferService* service);
+ ~MockWriteHandle() override;
+
+ size_t SerializeCreateSize() override;
+ void SerializeCreate(void* serializePointer) override;
+ void* GetData() override;
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+ void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
+
+ private:
+ MockMemoryTransferService* mService;
+ };
+
+ MockMemoryTransferService();
+ ~MockMemoryTransferService() override;
+
+ ReadHandle* CreateReadHandle(size_t) override;
+ WriteHandle* CreateWriteHandle(size_t) override;
+
+ MockReadHandle* NewReadHandle();
+ MockWriteHandle* NewWriteHandle();
+
+ MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
+ MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
+
+ MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
+ MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
+ MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
+ MOCK_METHOD(bool,
+ OnReadHandleDeserializeDataUpdate,
+ (const ReadHandle*,
+ const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size));
+ MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
+
+ MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
+ MOCK_METHOD(void,
+ OnWriteHandleSerializeCreate,
+ (const void* WriteHandle, void* serializePointer));
+ MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
+ MOCK_METHOD(size_t,
+ OnWriteHandleSizeOfSerializeDataUpdate,
+ (const void* WriteHandle, size_t offset, size_t size));
+ MOCK_METHOD(size_t,
+ OnWriteHandleSerializeDataUpdate,
+ (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
+ MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+};
+
} // namespace dawn::wire::client
#endif // SRC_DAWN_WIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp
index 9378bd5b45e..4b86888883f 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp
@@ -14,6 +14,8 @@
#include "dawn/wire/client/Device.h"
+#include <utility>
+
#include "dawn/common/Assert.h"
#include "dawn/common/Log.h"
#include "dawn/wire/client/ApiObjects_autogen.h"
@@ -22,304 +24,315 @@
namespace dawn::wire::client {
- Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
- : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
+Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
+ : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
#if defined(DAWN_ENABLE_ASSERTS)
- mErrorCallback = [](WGPUErrorType, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
- "probably not intended. If you really want to ignore errors "
- "and suppress this message, set the callback to null.";
- }
- };
-
- mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
- "intended. If you really want to ignore device lost "
- "and suppress this message, set the callback to null.";
- }
- };
+ mErrorCallback = [](WGPUErrorType, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+ "probably not intended. If you really want to ignore errors "
+ "and suppress this message, set the callback to null.";
+ }
+ };
+
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+ "intended. If you really want to ignore device lost "
+ "and suppress this message, set the callback to null.";
+ }
+ };
#endif // DAWN_ENABLE_ASSERTS
- }
-
- Device::~Device() {
- mErrorScopes.CloseAll([](ErrorScopeData* request) {
- request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
- request->userdata);
- });
-
- mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
- if (request->createComputePipelineAsyncCallback != nullptr) {
- request->createComputePipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", request->userdata);
- } else {
- ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
- request->createRenderPipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", request->userdata);
- }
- });
- }
-
- bool Device::GetLimits(WGPUSupportedLimits* limits) const {
- return mLimitsAndFeatures.GetLimits(limits);
- }
-
- bool Device::HasFeature(WGPUFeatureName feature) const {
- return mLimitsAndFeatures.HasFeature(feature);
- }
-
- size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
- return mLimitsAndFeatures.EnumerateFeatures(features);
- }
-
- void Device::SetLimits(const WGPUSupportedLimits* limits) {
- return mLimitsAndFeatures.SetLimits(limits);
- }
+}
+
+Device::~Device() {
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
+ request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(
+ WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", request->userdata);
+ } else {
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(
+ WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", request->userdata);
+ }
+ });
- void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
- return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+ if (mQueue != nullptr) {
+ GetProcs().queueRelease(ToAPI(mQueue));
}
+}
- void Device::HandleError(WGPUErrorType errorType, const char* message) {
- if (mErrorCallback) {
- mErrorCallback(errorType, message, mErrorUserdata);
- }
- }
+bool Device::GetLimits(WGPUSupportedLimits* limits) const {
+ return mLimitsAndFeatures.GetLimits(limits);
+}
- void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
- if (mLoggingCallback) {
- // Since client always run in single thread, calling the callback directly is safe.
- mLoggingCallback(loggingType, message, mLoggingUserdata);
- }
- }
+bool Device::HasFeature(WGPUFeatureName feature) const {
+ return mLimitsAndFeatures.HasFeature(feature);
+}
- void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
- if (mDeviceLostCallback && !mDidRunLostCallback) {
- mDidRunLostCallback = true;
- mDeviceLostCallback(reason, message, mDeviceLostUserdata);
- }
- }
+size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
+ return mLimitsAndFeatures.EnumerateFeatures(features);
+}
- void Device::CancelCallbacksForDisconnect() {
- mErrorScopes.CloseAll([](ErrorScopeData* request) {
- request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
- });
-
- mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
- if (request->createComputePipelineAsyncCallback != nullptr) {
- request->createComputePipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
- request->userdata);
- } else {
- ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
- request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
- nullptr, "Device lost",
- request->userdata);
- }
- });
- }
+void Device::SetLimits(const WGPUSupportedLimits* limits) {
+ return mLimitsAndFeatures.SetLimits(limits);
+}
- std::weak_ptr<bool> Device::GetAliveWeakPtr() {
- return mIsAlive;
- }
+void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+}
- void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
- mErrorCallback = errorCallback;
- mErrorUserdata = errorUserdata;
+void Device::HandleError(WGPUErrorType errorType, const char* message) {
+ if (mErrorCallback) {
+ mErrorCallback(errorType, message, mErrorUserdata);
}
+}
- void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
- mLoggingCallback = callback;
- mLoggingUserdata = userdata;
+void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
+ if (mLoggingCallback) {
+ // Since client always run in single thread, calling the callback directly is safe.
+ mLoggingCallback(loggingType, message, mLoggingUserdata);
}
+}
- void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
- mDeviceLostCallback = callback;
- mDeviceLostUserdata = userdata;
+void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
+ if (mDeviceLostCallback && !mDidRunLostCallback) {
+ mDidRunLostCallback = true;
+ mDeviceLostCallback(reason, message, mDeviceLostUserdata);
}
-
- bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
- // TODO(crbug.com/dawn/1324) Replace bool return with void when users are updated.
- if (client->IsDisconnected()) {
- callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
- return true;
+}
+
+void Device::CancelCallbacksForDisconnect() {
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+ nullptr, "Device lost", request->userdata);
+ } else {
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+ nullptr, "Device lost", request->userdata);
}
-
- uint64_t serial = mErrorScopes.Add({callback, userdata});
- DevicePopErrorScopeCmd cmd;
- cmd.deviceId = this->id;
- cmd.requestSerial = serial;
- client->SerializeCommand(cmd);
+ });
+}
+
+std::weak_ptr<bool> Device::GetAliveWeakPtr() {
+ return mIsAlive;
+}
+
+void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
+ mErrorCallback = errorCallback;
+ mErrorUserdata = errorUserdata;
+}
+
+void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
+ mLoggingCallback = callback;
+ mLoggingUserdata = userdata;
+}
+
+void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+}
+
+bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
+ // TODO(crbug.com/dawn/1324) Replace bool return with void when users are updated.
+ if (client->IsDisconnected()) {
+ callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
return true;
}
- bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
- WGPUErrorType type,
- const char* message) {
- switch (type) {
- case WGPUErrorType_NoError:
- case WGPUErrorType_Validation:
- case WGPUErrorType_OutOfMemory:
- case WGPUErrorType_Unknown:
- case WGPUErrorType_DeviceLost:
- break;
- default:
- return false;
- }
-
- ErrorScopeData request;
- if (!mErrorScopes.Acquire(requestSerial, &request)) {
+ uint64_t serial = mErrorScopes.Add({callback, userdata});
+ DevicePopErrorScopeCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.requestSerial = serial;
+ client->SerializeCommand(cmd);
+ return true;
+}
+
+bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
+ WGPUErrorType type,
+ const char* message) {
+ switch (type) {
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
+ break;
+ default:
return false;
- }
-
- request.callback(type, message, request.userdata);
- return true;
}
- void Device::InjectError(WGPUErrorType type, const char* message) {
- DeviceInjectErrorCmd cmd;
- cmd.self = ToAPI(this);
- cmd.type = type;
- cmd.message = message;
- client->SerializeCommand(cmd);
+ ErrorScopeData request;
+ if (!mErrorScopes.Acquire(requestSerial, &request)) {
+ return false;
}
- WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
+ request.callback(type, message, request.userdata);
+ return true;
+}
+
+void Device::InjectError(WGPUErrorType type, const char* message) {
+ DeviceInjectErrorCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.type = type;
+ cmd.message = message;
+ client->SerializeCommand(cmd);
+}
+
+WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+}
+
+WGPUBuffer Device::CreateErrorBuffer() {
+ WGPUBufferDescriptor fakeDescriptor = {};
+ return Buffer::CreateError(this, &fakeDescriptor);
+}
+
+WGPUQuerySet Device::CreateQuerySet(const WGPUQuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+}
+
+WGPUTexture Device::CreateTexture(const WGPUTextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+}
+
+WGPUQueue Device::GetQueue() {
+ // The queue is lazily created because if a Device is created by
+ // Reserve/Inject, we cannot send the GetQueue message until
+ // it has been injected on the Server. It cannot happen immediately
+ // on construction.
+ if (mQueue == nullptr) {
+ // Get the primary queue for this device.
+ auto* allocation = client->QueueAllocator().New(client);
+ mQueue = allocation->object.get();
+
+ DeviceGetQueueCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
- WGPUBuffer Device::CreateErrorBuffer() {
- return Buffer::CreateError(this);
+ client->SerializeCommand(cmd);
}
- WGPUQueue Device::GetQueue() {
- // The queue is lazily created because if a Device is created by
- // Reserve/Inject, we cannot send the GetQueue message until
- // it has been injected on the Server. It cannot happen immediately
- // on construction.
- if (mQueue == nullptr) {
- // Get the primary queue for this device.
- auto* allocation = client->QueueAllocator().New(client);
- mQueue = allocation->object.get();
-
- DeviceGetQueueCmd cmd;
- cmd.self = ToAPI(this);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
-
- client->SerializeCommand(cmd);
- }
+ mQueue->refcount++;
+ return ToAPI(mQueue);
+}
- mQueue->refcount++;
- return ToAPI(mQueue);
+void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "GPU device disconnected", userdata);
}
- void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "GPU device disconnected", userdata);
- }
+ auto* allocation = client->ComputePipelineAllocator().New(client);
- auto* allocation = client->ComputePipelineAllocator().New(client);
+ CreatePipelineAsyncRequest request = {};
+ request.createComputePipelineAsyncCallback = callback;
+ request.userdata = userdata;
+ request.pipelineObjectID = allocation->object->id;
- CreatePipelineAsyncRequest request = {};
- request.createComputePipelineAsyncCallback = callback;
- request.userdata = userdata;
- request.pipelineObjectID = allocation->object->id;
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
- uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+ DeviceCreateComputePipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
+ cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
- DeviceCreateComputePipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
- cmd.descriptor = descriptor;
- cmd.requestSerial = serial;
- cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
+ client->SerializeCommand(cmd);
+}
- client->SerializeCommand(cmd);
+bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- CreatePipelineAsyncRequest request;
- if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- auto pipelineAllocation =
- client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
-
- // If the return status is a failure we should give a null pipeline to the callback and
- // free the allocation.
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- client->ComputePipelineAllocator().Free(pipelineAllocation);
- request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
- return true;
- }
-
- WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
- request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
+ auto* pipelineAllocation =
+ client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
+ // If the return status is a failure we should give a null pipeline to the callback and
+ // free the allocation.
+ if (status != WGPUCreatePipelineAsyncStatus_Success) {
+ client->ComputePipelineAllocator().Free(pipelineAllocation);
+ request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
return true;
}
- void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "GPU device disconnected", userdata);
- }
+ WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
+ request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
- auto* allocation = client->RenderPipelineAllocator().New(client);
+ return true;
+}
- CreatePipelineAsyncRequest request = {};
- request.createRenderPipelineAsyncCallback = callback;
- request.userdata = userdata;
- request.pipelineObjectID = allocation->object->id;
+void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "GPU device disconnected", userdata);
+ }
- uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+ auto* allocation = client->RenderPipelineAllocator().New(client);
- DeviceCreateRenderPipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
- cmd.descriptor = descriptor;
- cmd.requestSerial = serial;
- cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+ CreatePipelineAsyncRequest request = {};
+ request.createRenderPipelineAsyncCallback = callback;
+ request.userdata = userdata;
+ request.pipelineObjectID = allocation->object->id;
- client->SerializeCommand(cmd);
- }
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
- bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- CreatePipelineAsyncRequest request;
- if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
- return false;
- }
+ DeviceCreateRenderPipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
+ cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
- auto pipelineAllocation =
- client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+ client->SerializeCommand(cmd);
+}
- // If the return status is a failure we should give a null pipeline to the callback and
- // free the allocation.
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- client->RenderPipelineAllocator().Free(pipelineAllocation);
- request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
- return true;
- }
+bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
- WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
- request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+ auto* pipelineAllocation =
+ client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+ // If the return status is a failure we should give a null pipeline to the callback and
+ // free the allocation.
+ if (status != WGPUCreatePipelineAsyncStatus_Success) {
+ client->RenderPipelineAllocator().Free(pipelineAllocation);
+ request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
return true;
}
+ WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
+ request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+ return true;
+}
+
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Device.h b/chromium/third_party/dawn/src/dawn/wire/client/Device.h
index e254c123b94..f932607cdd6 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Device.h
@@ -15,96 +15,95 @@
#ifndef SRC_DAWN_WIRE_CLIENT_DEVICE_H_
#define SRC_DAWN_WIRE_CLIENT_DEVICE_H_
-#include <dawn/webgpu.h>
+#include <memory>
#include "dawn/common/LinkedList.h"
+#include "dawn/webgpu.h"
#include "dawn/wire/WireCmd_autogen.h"
#include "dawn/wire/client/ApiObjects_autogen.h"
#include "dawn/wire/client/LimitsAndFeatures.h"
#include "dawn/wire/client/ObjectBase.h"
#include "dawn/wire/client/RequestTracker.h"
-#include <memory>
-
namespace dawn::wire::client {
- class Client;
- class Queue;
-
- class Device final : public ObjectBase {
- public:
- Device(Client* client, uint32_t refcount, uint32_t id);
- ~Device();
-
- void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
- void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
- void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
- void InjectError(WGPUErrorType type, const char* message);
- void PushErrorScope(WGPUErrorFilter filter);
- bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
- WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
- WGPUBuffer CreateErrorBuffer();
- WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
- void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void HandleError(WGPUErrorType errorType, const char* message);
- void HandleLogging(WGPULoggingType loggingType, const char* message);
- void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
- bool OnPopErrorScopeCallback(uint64_t requestSerial,
- WGPUErrorType type,
- const char* message);
- bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message);
- bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message);
-
- bool GetLimits(WGPUSupportedLimits* limits) const;
- bool HasFeature(WGPUFeatureName feature) const;
- size_t EnumerateFeatures(WGPUFeatureName* features) const;
- void SetLimits(const WGPUSupportedLimits* limits);
- void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
-
- WGPUQueue GetQueue();
-
- void CancelCallbacksForDisconnect() override;
-
- std::weak_ptr<bool> GetAliveWeakPtr();
-
- private:
- LimitsAndFeatures mLimitsAndFeatures;
- struct ErrorScopeData {
- WGPUErrorCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<ErrorScopeData> mErrorScopes;
-
- struct CreatePipelineAsyncRequest {
- WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
- WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
- void* userdata = nullptr;
- ObjectId pipelineObjectID;
- };
- RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
-
- WGPUErrorCallback mErrorCallback = nullptr;
- WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
- WGPULoggingCallback mLoggingCallback = nullptr;
- bool mDidRunLostCallback = false;
- void* mErrorUserdata = nullptr;
- void* mDeviceLostUserdata = nullptr;
- void* mLoggingUserdata = nullptr;
-
- Queue* mQueue = nullptr;
-
- std::shared_ptr<bool> mIsAlive;
+class Client;
+class Queue;
+
+class Device final : public ObjectBase {
+ public:
+ Device(Client* client, uint32_t refcount, uint32_t id);
+ ~Device();
+
+ void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
+ void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
+ void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
+ void InjectError(WGPUErrorType type, const char* message);
+ void PushErrorScope(WGPUErrorFilter filter);
+ bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
+ WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
+ WGPUBuffer CreateErrorBuffer();
+ WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
+ void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ WGPUQuerySet CreateQuerySet(const WGPUQuerySetDescriptor* descriptor);
+ WGPUTexture CreateTexture(const WGPUTextureDescriptor* descriptor);
+
+ void HandleError(WGPUErrorType errorType, const char* message);
+ void HandleLogging(WGPULoggingType loggingType, const char* message);
+ void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
+ bool OnPopErrorScopeCallback(uint64_t requestSerial, WGPUErrorType type, const char* message);
+ bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message);
+ bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message);
+
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+
+ WGPUQueue GetQueue();
+
+ void CancelCallbacksForDisconnect() override;
+
+ std::weak_ptr<bool> GetAliveWeakPtr();
+
+ private:
+ LimitsAndFeatures mLimitsAndFeatures;
+ struct ErrorScopeData {
+ WGPUErrorCallback callback = nullptr;
+ void* userdata = nullptr;
+ };
+ RequestTracker<ErrorScopeData> mErrorScopes;
+
+ struct CreatePipelineAsyncRequest {
+ WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
+ WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
+ void* userdata = nullptr;
+ ObjectId pipelineObjectID;
};
+ RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
+
+ WGPUErrorCallback mErrorCallback = nullptr;
+ WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
+ WGPULoggingCallback mLoggingCallback = nullptr;
+ bool mDidRunLostCallback = false;
+ void* mErrorUserdata = nullptr;
+ void* mDeviceLostUserdata = nullptr;
+ void* mLoggingUserdata = nullptr;
+
+ Queue* mQueue = nullptr;
+
+ std::shared_ptr<bool> mIsAlive;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp
index de27d4740e8..8e441d97276 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp
@@ -18,84 +18,86 @@
namespace dawn::wire::client {
- Instance::~Instance() {
- mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
- request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
- "Instance destroyed before callback", request->userdata);
- });
+Instance::Instance(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
+
+Instance::~Instance() {
+ mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+ request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
+ "Instance destroyed before callback", request->userdata);
+ });
+}
+
+void Instance::CancelCallbacksForDisconnect() {
+ mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+ request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
+ request->userdata);
+ });
+}
+
+void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
+ return;
}
- void Instance::CancelCallbacksForDisconnect() {
- mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
- request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
- request->userdata);
- });
+ auto* allocation = client->AdapterAllocator().New(client);
+ uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
+
+ InstanceRequestAdapterCmd cmd;
+ cmd.instanceId = this->id;
+ cmd.requestSerial = serial;
+ cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+ cmd.options = options;
+
+ client->SerializeCommand(cmd);
+}
+
+bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
+ uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ // May have been deleted or recreated so this isn't an error.
+ if (instance == nullptr) {
+ return true;
}
-
- void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
- WGPURequestAdapterCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
- return;
- }
-
- auto* allocation = client->AdapterAllocator().New(client);
- uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
-
- InstanceRequestAdapterCmd cmd;
- cmd.instanceId = this->id;
- cmd.requestSerial = serial;
- cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
- cmd.options = options;
-
- client->SerializeCommand(cmd);
+ return instance->OnRequestAdapterCallback(requestSerial, status, message, properties, limits,
+ featuresCount, features);
+}
+
+bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ RequestAdapterData request;
+ if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
- uint64_t requestSerial,
- WGPURequestAdapterStatus status,
- const char* message,
- const WGPUAdapterProperties* properties,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features) {
- // May have been deleted or recreated so this isn't an error.
- if (instance == nullptr) {
- return true;
- }
- return instance->OnRequestAdapterCallback(requestSerial, status, message, properties,
- limits, featuresCount, features);
- }
+ Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
- bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
- WGPURequestAdapterStatus status,
- const char* message,
- const WGPUAdapterProperties* properties,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features) {
- RequestAdapterData request;
- if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
-
- // If the return status is a failure we should give a null adapter to the callback and
- // free the allocation.
- if (status != WGPURequestAdapterStatus_Success) {
- client->AdapterAllocator().Free(adapter);
- request.callback(status, nullptr, message, request.userdata);
- return true;
- }
-
- adapter->SetProperties(properties);
- adapter->SetLimits(limits);
- adapter->SetFeatures(features, featuresCount);
-
- request.callback(status, ToAPI(adapter), message, request.userdata);
+ // If the return status is a failure we should give a null adapter to the callback and
+ // free the allocation.
+ if (status != WGPURequestAdapterStatus_Success) {
+ client->AdapterAllocator().Free(adapter);
+ request.callback(status, nullptr, message, request.userdata);
return true;
}
+ adapter->SetProperties(properties);
+ adapter->SetLimits(limits);
+ adapter->SetFeatures(features, featuresCount);
+
+ request.callback(status, ToAPI(adapter), message, request.userdata);
+ return true;
+}
+
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Instance.h b/chromium/third_party/dawn/src/dawn/wire/client/Instance.h
index b13d5079342..625bd96b576 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Instance.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Instance.h
@@ -15,7 +15,7 @@
#ifndef SRC_DAWN_WIRE_CLIENT_INSTANCE_H_
#define SRC_DAWN_WIRE_CLIENT_INSTANCE_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireCmd_autogen.h"
@@ -24,32 +24,32 @@
namespace dawn::wire::client {
- class Instance final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
-
- ~Instance();
- void CancelCallbacksForDisconnect() override;
-
- void RequestAdapter(const WGPURequestAdapterOptions* options,
- WGPURequestAdapterCallback callback,
- void* userdata);
- bool OnRequestAdapterCallback(uint64_t requestSerial,
- WGPURequestAdapterStatus status,
- const char* message,
- const WGPUAdapterProperties* properties,
- const WGPUSupportedLimits* limits,
- uint32_t featuresCount,
- const WGPUFeatureName* features);
-
- private:
- struct RequestAdapterData {
- WGPURequestAdapterCallback callback = nullptr;
- ObjectId adapterObjectId;
- void* userdata = nullptr;
- };
- RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+class Instance final : public ObjectBase {
+ public:
+ Instance(Client* client, uint32_t refcount, uint32_t id);
+ ~Instance();
+
+ void CancelCallbacksForDisconnect() override;
+
+ void RequestAdapter(const WGPURequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata);
+ bool OnRequestAdapterCallback(uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features);
+
+ private:
+ struct RequestAdapterData {
+ WGPURequestAdapterCallback callback = nullptr;
+ ObjectId adapterObjectId;
+ void* userdata = nullptr;
};
+ RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp
index a2c753c9cc2..03a65004ec6 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp
@@ -19,45 +19,49 @@
namespace dawn::wire::client {
- bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
- ASSERT(limits != nullptr);
- if (limits->nextInChain != nullptr) {
- return false;
- }
- *limits = mLimits;
- return true;
- }
+LimitsAndFeatures::LimitsAndFeatures() = default;
+
+LimitsAndFeatures::~LimitsAndFeatures() = default;
- bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
- return mFeatures.count(feature) != 0;
+bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
}
+ *limits = mLimits;
+ return true;
+}
- size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
- if (features != nullptr) {
- for (WGPUFeatureName f : mFeatures) {
- *features = f;
- ++features;
- }
+bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
+ return mFeatures.count(feature) != 0;
+}
+
+size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
+ if (features != nullptr) {
+ for (WGPUFeatureName f : mFeatures) {
+ *features = f;
+ ++features;
}
- return mFeatures.size();
}
+ return mFeatures.size();
+}
- void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
- ASSERT(limits != nullptr);
- mLimits = *limits;
- mLimits.nextInChain = nullptr;
- }
+void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
+ ASSERT(limits != nullptr);
+ mLimits = *limits;
+ mLimits.nextInChain = nullptr;
+}
- void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
- ASSERT(features != nullptr || featuresCount == 0);
- for (uint32_t i = 0; i < featuresCount; ++i) {
- // Filter out features that the server supports, but the client does not.
- // (Could be different versions)
- if (!IsFeatureSupported(features[i])) {
- continue;
- }
- mFeatures.insert(features[i]);
+void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ ASSERT(features != nullptr || featuresCount == 0);
+ for (uint32_t i = 0; i < featuresCount; ++i) {
+ // Filter out features that the server supports, but the client does not.
+ // (Could be different versions)
+ if (!IsFeatureSupported(features[i])) {
+ continue;
}
+ mFeatures.insert(features[i]);
}
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h
index bdc903b72be..cfe9353ff09 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h
@@ -15,25 +15,28 @@
#ifndef SRC_DAWN_WIRE_CLIENT_LIMITSANDFEATURES_H_
#define SRC_DAWN_WIRE_CLIENT_LIMITSANDFEATURES_H_
-#include <dawn/webgpu.h>
-
#include <unordered_set>
+#include "dawn/webgpu.h"
+
namespace dawn::wire::client {
- class LimitsAndFeatures {
- public:
- bool GetLimits(WGPUSupportedLimits* limits) const;
- bool HasFeature(WGPUFeatureName feature) const;
- size_t EnumerateFeatures(WGPUFeatureName* features) const;
+class LimitsAndFeatures {
+ public:
+ LimitsAndFeatures();
+ ~LimitsAndFeatures();
+
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
- void SetLimits(const WGPUSupportedLimits* limits);
- void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
- private:
- WGPUSupportedLimits mLimits;
- std::unordered_set<WGPUFeatureName> mFeatures;
- };
+ private:
+ WGPUSupportedLimits mLimits;
+ std::unordered_set<WGPUFeatureName> mFeatures;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h b/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h
index 67ab46766cf..60b8fa875f5 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h
@@ -15,96 +15,94 @@
#ifndef SRC_DAWN_WIRE_CLIENT_OBJECTALLOCATOR_H_
#define SRC_DAWN_WIRE_CLIENT_OBJECTALLOCATOR_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/Compiler.h"
-#include "dawn/wire/WireCmd_autogen.h"
-
#include <limits>
#include <memory>
+#include <utility>
#include <vector>
-namespace dawn::wire::client {
-
- template <typename T>
- class ObjectAllocator {
- public:
- struct ObjectAndSerial {
- ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
- : object(std::move(object)), generation(generation) {
- }
- std::unique_ptr<T> object;
- uint32_t generation;
- };
-
- ObjectAllocator() {
- // ID 0 is nullptr
- mObjects.emplace_back(nullptr, 0);
- }
-
- template <typename Client>
- ObjectAndSerial* New(Client* client) {
- uint32_t id = GetNewId();
- auto object = std::make_unique<T>(client, 1, id);
- client->TrackObject(object.get());
-
- if (id >= mObjects.size()) {
- ASSERT(id == mObjects.size());
- mObjects.emplace_back(std::move(object), 0);
- } else {
- ASSERT(mObjects[id].object == nullptr);
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/WireCmd_autogen.h"
- mObjects[id].generation++;
- // The generation should never overflow. We don't recycle ObjectIds that would
- // overflow their next generation.
- ASSERT(mObjects[id].generation != 0);
+namespace dawn::wire::client {
- mObjects[id].object = std::move(object);
- }
+template <typename T>
+class ObjectAllocator {
+ public:
+ struct ObjectAndSerial {
+ ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
+ : object(std::move(object)), generation(generation) {}
+ std::unique_ptr<T> object;
+ uint32_t generation;
+ };
- return &mObjects[id];
- }
- void Free(T* obj) {
- ASSERT(obj->IsInList());
- if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
- // Only recycle this ObjectId if the generation won't overflow on the next
- // allocation.
- FreeId(obj->id);
- }
- mObjects[obj->id].object = nullptr;
+ ObjectAllocator() {
+ // ID 0 is nullptr
+ mObjects.emplace_back(nullptr, 0);
+ }
+
+ template <typename Client>
+ ObjectAndSerial* New(Client* client) {
+ uint32_t id = GetNewId();
+ auto object = std::make_unique<T>(client, 1, id);
+ client->TrackObject(object.get());
+
+ if (id >= mObjects.size()) {
+ ASSERT(id == mObjects.size());
+ mObjects.emplace_back(std::move(object), 0);
+ } else {
+ ASSERT(mObjects[id].object == nullptr);
+
+ mObjects[id].generation++;
+ // The generation should never overflow. We don't recycle ObjectIds that would
+ // overflow their next generation.
+ ASSERT(mObjects[id].generation != 0);
+
+ mObjects[id].object = std::move(object);
}
- T* GetObject(uint32_t id) {
- if (id >= mObjects.size()) {
- return nullptr;
- }
- return mObjects[id].object.get();
+ return &mObjects[id];
+ }
+ void Free(T* obj) {
+ ASSERT(obj->IsInList());
+ if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
+ // Only recycle this ObjectId if the generation won't overflow on the next
+ // allocation.
+ FreeId(obj->id);
}
+ mObjects[obj->id].object = nullptr;
+ }
- uint32_t GetGeneration(uint32_t id) {
- if (id >= mObjects.size()) {
- return 0;
- }
- return mObjects[id].generation;
+ T* GetObject(uint32_t id) {
+ if (id >= mObjects.size()) {
+ return nullptr;
}
+ return mObjects[id].object.get();
+ }
- private:
- uint32_t GetNewId() {
- if (mFreeIds.empty()) {
- return mCurrentId++;
- }
- uint32_t id = mFreeIds.back();
- mFreeIds.pop_back();
- return id;
- }
- void FreeId(uint32_t id) {
- mFreeIds.push_back(id);
+ uint32_t GetGeneration(uint32_t id) {
+ if (id >= mObjects.size()) {
+ return 0;
}
+ return mObjects[id].generation;
+ }
- // 0 is an ID reserved to represent nullptr
- uint32_t mCurrentId = 1;
- std::vector<uint32_t> mFreeIds;
- std::vector<ObjectAndSerial> mObjects;
- };
+ private:
+ uint32_t GetNewId() {
+ if (mFreeIds.empty()) {
+ return mCurrentId++;
+ }
+ uint32_t id = mFreeIds.back();
+ mFreeIds.pop_back();
+ return id;
+ }
+ void FreeId(uint32_t id) { mFreeIds.push_back(id); }
+
+ // 0 is an ID reserved to represent nullptr
+ uint32_t mCurrentId = 1;
+ std::vector<uint32_t> mFreeIds;
+ std::vector<ObjectAndSerial> mObjects;
+};
} // namespace dawn::wire::client
#endif // SRC_DAWN_WIRE_CLIENT_OBJECTALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression_test.cc b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.cpp
index f37816f438b..1d581ef58fa 100644
--- a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression_test.cc
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.cpp
@@ -1,4 +1,4 @@
-// Copyright 2020 The Tint Authors.
+// Copyright 2022 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,18 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/ast/test_helper.h"
+#include "dawn/wire/client/ObjectBase.h"
-namespace tint::ast {
-namespace {
+namespace dawn::wire::client {
-using UintLiteralExpressionTest = TestHelper;
+ObjectBase::ObjectBase(Client* client, uint32_t refcount, uint32_t id)
+ : client(client), refcount(refcount), id(id) {}
-TEST_F(UintLiteralExpressionTest, Value) {
- auto* u = create<UintLiteralExpression>(47);
- ASSERT_TRUE(u->Is<UintLiteralExpression>());
- EXPECT_EQ(u->value, 47u);
+ObjectBase::~ObjectBase() {
+ RemoveFromList();
}
-} // namespace
-} // namespace tint::ast
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h
index b56816c6d97..417c7c7b64a 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h
@@ -15,36 +15,30 @@
#ifndef SRC_DAWN_WIRE_CLIENT_OBJECTBASE_H_
#define SRC_DAWN_WIRE_CLIENT_OBJECTBASE_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/common/LinkedList.h"
#include "dawn/wire/ObjectType_autogen.h"
namespace dawn::wire::client {
- class Client;
-
- // All objects on the client side have:
- // - A pointer to the Client to get where to serialize commands
- // - The external reference count
- // - An ID that is used to refer to this object when talking with the server side
- // - A next/prev pointer. They are part of a linked list of objects of the same type.
- struct ObjectBase : public LinkNode<ObjectBase> {
- ObjectBase(Client* client, uint32_t refcount, uint32_t id)
- : client(client), refcount(refcount), id(id) {
- }
-
- ~ObjectBase() {
- RemoveFromList();
- }
-
- virtual void CancelCallbacksForDisconnect() {
- }
-
- Client* const client;
- uint32_t refcount;
- const uint32_t id;
- };
+class Client;
+
+// All objects on the client side have:
+// - A pointer to the Client to get where to serialize commands
+// - The external reference count
+// - An ID that is used to refer to this object when talking with the server side
+// - A next/prev pointer. They are part of a linked list of objects of the same type.
+struct ObjectBase : public LinkNode<ObjectBase> {
+ ObjectBase(Client* client, uint32_t refcount, uint32_t id);
+ ~ObjectBase();
+
+ virtual void CancelCallbacksForDisconnect() {}
+
+ Client* const client;
+ uint32_t refcount;
+ const uint32_t id;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.cpp b/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.cpp
new file mode 100644
index 00000000000..c9b1a2dac2b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.cpp
@@ -0,0 +1,54 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/QuerySet.h"
+
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+// static
+WGPUQuerySet QuerySet::Create(Device* device, const WGPUQuerySetDescriptor* descriptor) {
+ Client* wireClient = device->client;
+ auto* objectAndSerial = wireClient->QuerySetAllocator().New(wireClient);
+
+ // Copy over descriptor data for reflection.
+ QuerySet* querySet = objectAndSerial->object.get();
+ querySet->mType = descriptor->type;
+ querySet->mCount = descriptor->count;
+
+ // Send the Device::CreateQuerySet command without modifications.
+ DeviceCreateQuerySetCmd cmd;
+ cmd.self = ToAPI(device);
+ cmd.selfId = device->id;
+ cmd.descriptor = descriptor;
+ cmd.result = ObjectHandle{querySet->id, objectAndSerial->generation};
+ wireClient->SerializeCommand(cmd);
+
+ return ToAPI(querySet);
+}
+
+QuerySet::QuerySet(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
+QuerySet::~QuerySet() = default;
+
+WGPUQueryType QuerySet::GetType() const {
+ return mType;
+}
+
+uint32_t QuerySet::GetCount() const {
+ return mCount;
+}
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.h b/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.h
new file mode 100644
index 00000000000..4afb9dc7c60
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/QuerySet.h
@@ -0,0 +1,44 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_WIRE_CLIENT_QUERYSET_H_
+#define SRC_DAWN_WIRE_CLIENT_QUERYSET_H_
+
+#include "dawn/webgpu.h"
+
+#include "dawn/wire/client/ObjectBase.h"
+
+namespace dawn::wire::client {
+
+class Device;
+
+class QuerySet final : public ObjectBase {
+ public:
+ static WGPUQuerySet Create(Device* device, const WGPUQuerySetDescriptor* descriptor);
+
+ QuerySet(Client* client, uint32_t refcount, uint32_t id);
+ ~QuerySet();
+
+ // Note that these values can be arbitrary since they aren't validated in the wire client.
+ WGPUQueryType GetType() const;
+ uint32_t GetCount() const;
+
+ private:
+ WGPUQueryType mType;
+ uint32_t mCount;
+};
+
+} // namespace dawn::wire::client
+
+#endif // SRC_DAWN_WIRE_CLIENT_QUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp
index 37d97d7c501..618f99c65a1 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp
@@ -19,80 +19,79 @@
namespace dawn::wire::client {
- Queue::~Queue() {
- ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
- }
+Queue::Queue(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
- bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
- OnWorkDoneData request;
- if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
- return false;
- }
+Queue::~Queue() {
+ ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
+}
- request.callback(status, request.userdata);
- return true;
+bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
+ OnWorkDoneData request;
+ if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- void Queue::OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
- return;
- }
-
- uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
+ request.callback(status, request.userdata);
+ return true;
+}
- QueueOnSubmittedWorkDoneCmd cmd;
- cmd.queueId = this->id;
- cmd.signalValue = signalValue;
- cmd.requestSerial = serial;
-
- client->SerializeCommand(cmd);
+void Queue::OnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
+ return;
}
- void Queue::WriteBuffer(WGPUBuffer cBuffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- Buffer* buffer = FromAPI(cBuffer);
-
- QueueWriteBufferCmd cmd;
- cmd.queueId = id;
- cmd.bufferId = buffer->id;
- cmd.bufferOffset = bufferOffset;
- cmd.data = static_cast<const uint8_t*>(data);
- cmd.size = size;
-
- client->SerializeCommand(cmd);
- }
-
- void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize) {
- QueueWriteTextureCmd cmd;
- cmd.queueId = id;
- cmd.destination = destination;
- cmd.data = static_cast<const uint8_t*>(data);
- cmd.dataSize = dataSize;
- cmd.dataLayout = dataLayout;
- cmd.writeSize = writeSize;
-
- client->SerializeCommand(cmd);
- }
-
- void Queue::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
- }
-
- void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
- mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
- if (request->callback != nullptr) {
- request->callback(status, request->userdata);
- }
- });
- }
+ uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
+
+ QueueOnSubmittedWorkDoneCmd cmd;
+ cmd.queueId = this->id;
+ cmd.signalValue = signalValue;
+ cmd.requestSerial = serial;
+
+ client->SerializeCommand(cmd);
+}
+
+void Queue::WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size) {
+ Buffer* buffer = FromAPI(cBuffer);
+
+ QueueWriteBufferCmd cmd;
+ cmd.queueId = id;
+ cmd.bufferId = buffer->id;
+ cmd.bufferOffset = bufferOffset;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.size = size;
+
+ client->SerializeCommand(cmd);
+}
+
+void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ QueueWriteTextureCmd cmd;
+ cmd.queueId = id;
+ cmd.destination = destination;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.dataSize = dataSize;
+ cmd.dataLayout = dataLayout;
+ cmd.writeSize = writeSize;
+
+ client->SerializeCommand(cmd);
+}
+
+void Queue::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
+}
+
+void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
+ mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
+ }
+ });
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Queue.h b/chromium/third_party/dawn/src/dawn/wire/client/Queue.h
index bd29a6f52dd..35e583ebe41 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/Queue.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Queue.h
@@ -15,7 +15,7 @@
#ifndef SRC_DAWN_WIRE_CLIENT_QUEUE_H_
#define SRC_DAWN_WIRE_CLIENT_QUEUE_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/wire/WireClient.h"
#include "dawn/wire/client/ObjectBase.h"
@@ -23,34 +23,34 @@
namespace dawn::wire::client {
- class Queue final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
- ~Queue();
-
- bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
-
- // Dawn API
- void OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata);
- void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
- void WriteTexture(const WGPUImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize);
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
-
- struct OnWorkDoneData {
- WGPUQueueWorkDoneCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+class Queue final : public ObjectBase {
+ public:
+ Queue(Client* client, uint32_t refcount, uint32_t id);
+ ~Queue();
+
+ bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
+
+ // Dawn API
+ void OnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata);
+ void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
+ void WriteTexture(const WGPUImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize);
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
+
+ struct OnWorkDoneData {
+ WGPUQueueWorkDoneCallback callback = nullptr;
+ void* userdata = nullptr;
};
+ RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h b/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h
index 31fe313bfaf..13b7294c851 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h
@@ -15,67 +15,66 @@
#ifndef SRC_DAWN_WIRE_CLIENT_REQUESTTRACKER_H_
#define SRC_DAWN_WIRE_CLIENT_REQUESTTRACKER_H_
-#include "dawn/common/Assert.h"
-#include "dawn/common/NonCopyable.h"
-
#include <cstdint>
#include <map>
+#include <utility>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/NonCopyable.h"
namespace dawn::wire::client {
- class Device;
- class MemoryTransferService;
+class Device;
+class MemoryTransferService;
- template <typename Request>
- class RequestTracker : NonCopyable {
- public:
- ~RequestTracker() {
- ASSERT(mRequests.empty());
- }
+template <typename Request>
+class RequestTracker : NonCopyable {
+ public:
+ ~RequestTracker() { ASSERT(mRequests.empty()); }
- uint64_t Add(Request&& request) {
- mSerial++;
- mRequests.emplace(mSerial, request);
- return mSerial;
- }
+ uint64_t Add(Request&& request) {
+ mSerial++;
+ mRequests.emplace(mSerial, request);
+ return mSerial;
+ }
- bool Acquire(uint64_t serial, Request* request) {
- auto it = mRequests.find(serial);
- if (it == mRequests.end()) {
- return false;
- }
- *request = std::move(it->second);
- mRequests.erase(it);
- return true;
+ bool Acquire(uint64_t serial, Request* request) {
+ auto it = mRequests.find(serial);
+ if (it == mRequests.end()) {
+ return false;
}
+ *request = std::move(it->second);
+ mRequests.erase(it);
+ return true;
+ }
- template <typename CloseFunc>
- void CloseAll(CloseFunc&& closeFunc) {
- // Call closeFunc on all requests while handling reentrancy where the callback of some
- // requests may add some additional requests. We guarantee all callbacks for requests
- // are called exactly onces, so keep closing new requests if the first batch added more.
- // It is fine to loop infinitely here if that's what the application makes use do.
- while (!mRequests.empty()) {
- // Move mRequests to a local variable so that further reentrant modifications of
- // mRequests don't invalidate the iterators.
- auto allRequests = std::move(mRequests);
- for (auto& [_, request] : allRequests) {
- closeFunc(&request);
- }
+ template <typename CloseFunc>
+ void CloseAll(CloseFunc&& closeFunc) {
+ // Call closeFunc on all requests while handling reentrancy where the callback of some
+ // requests may add some additional requests. We guarantee all callbacks for requests
+ // are called exactly onces, so keep closing new requests if the first batch added more.
+ // It is fine to loop infinitely here if that's what the application makes use do.
+ while (!mRequests.empty()) {
+ // Move mRequests to a local variable so that further reentrant modifications of
+ // mRequests don't invalidate the iterators.
+ auto allRequests = std::move(mRequests);
+ for (auto& [_, request] : allRequests) {
+ closeFunc(&request);
}
}
+ }
- template <typename F>
- void ForAll(F&& f) {
- for (auto& [_, request] : mRequests) {
- f(&request);
- }
+ template <typename F>
+ void ForAll(F&& f) {
+ for (auto& [_, request] : mRequests) {
+ f(&request);
}
+ }
- private:
- uint64_t mSerial;
- std::map<uint64_t, Request> mRequests;
- };
+ private:
+ uint64_t mSerial = 0;
+ std::map<uint64_t, Request> mRequests;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp
index ce25ef77767..63bc534b371 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp
@@ -18,47 +18,49 @@
namespace dawn::wire::client {
- ShaderModule::~ShaderModule() {
- ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
- }
+ShaderModule::ShaderModule(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
- void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
- return;
- }
+ShaderModule::~ShaderModule() {
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
+}
- uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
+void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
+ return;
+ }
- ShaderModuleGetCompilationInfoCmd cmd;
- cmd.shaderModuleId = this->id;
- cmd.requestSerial = serial;
+ uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
- client->SerializeCommand(cmd);
- }
+ ShaderModuleGetCompilationInfoCmd cmd;
+ cmd.shaderModuleId = this->id;
+ cmd.requestSerial = serial;
- bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info) {
- CompilationInfoRequest request;
- if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
- return false;
- }
+ client->SerializeCommand(cmd);
+}
- request.callback(status, info, request.userdata);
- return true;
+bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ CompilationInfoRequest request;
+ if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
+ return false;
}
- void ShaderModule::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
- }
+ request.callback(status, info, request.userdata);
+ return true;
+}
- void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
- mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
- if (request->callback != nullptr) {
- request->callback(status, nullptr, request->userdata);
- }
- });
- }
+void ShaderModule::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
+}
+
+void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
+ mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, nullptr, request->userdata);
+ }
+ });
+}
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h
index c74c0d270c4..5ae920d8bc1 100644
--- a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h
@@ -15,33 +15,33 @@
#ifndef SRC_DAWN_WIRE_CLIENT_SHADERMODULE_H_
#define SRC_DAWN_WIRE_CLIENT_SHADERMODULE_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#include "dawn/wire/client/ObjectBase.h"
#include "dawn/wire/client/RequestTracker.h"
namespace dawn::wire::client {
- class ShaderModule final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
- ~ShaderModule();
-
- void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
- bool GetCompilationInfoCallback(uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info);
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
-
- struct CompilationInfoRequest {
- WGPUCompilationInfoCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+class ShaderModule final : public ObjectBase {
+ public:
+ ShaderModule(Client* client, uint32_t refcount, uint32_t id);
+ ~ShaderModule();
+
+ void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
+ bool GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info);
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
+
+ struct CompilationInfoRequest {
+ WGPUCompilationInfoCallback callback = nullptr;
+ void* userdata = nullptr;
};
+ RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+};
} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Texture.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Texture.cpp
new file mode 100644
index 00000000000..f4c0b972365
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Texture.cpp
@@ -0,0 +1,82 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Texture.h"
+
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+// static
+WGPUTexture Texture::Create(Device* device, const WGPUTextureDescriptor* descriptor) {
+ Client* wireClient = device->client;
+ auto* textureObjectAndSerial = wireClient->TextureAllocator().New(wireClient);
+
+ // Copy over descriptor data for reflection.
+ Texture* texture = textureObjectAndSerial->object.get();
+ texture->mSize = descriptor->size;
+ texture->mMipLevelCount = descriptor->mipLevelCount;
+ texture->mSampleCount = descriptor->sampleCount;
+ texture->mDimension = descriptor->dimension;
+ texture->mFormat = descriptor->format;
+ texture->mUsage = static_cast<WGPUTextureUsage>(descriptor->usage);
+
+ // Send the Device::CreateTexture command without modifications.
+ DeviceCreateTextureCmd cmd;
+ cmd.self = ToAPI(device);
+ cmd.selfId = device->id;
+ cmd.descriptor = descriptor;
+ cmd.result = ObjectHandle{texture->id, textureObjectAndSerial->generation};
+ wireClient->SerializeCommand(cmd);
+
+ return ToAPI(texture);
+}
+
+Texture::Texture(Client* c, uint32_t r, uint32_t i) : ObjectBase(c, r, i) {}
+Texture::~Texture() = default;
+
+uint32_t Texture::GetWidth() const {
+ return mSize.width;
+}
+
+uint32_t Texture::GetHeight() const {
+ return mSize.height;
+}
+
+uint32_t Texture::GetDepthOrArrayLayers() const {
+ return mSize.depthOrArrayLayers;
+}
+
+uint32_t Texture::GetMipLevelCount() const {
+ return mMipLevelCount;
+}
+
+uint32_t Texture::GetSampleCount() const {
+ return mSampleCount;
+}
+
+WGPUTextureDimension Texture::GetDimension() const {
+ return mDimension;
+}
+
+WGPUTextureFormat Texture::GetFormat() const {
+ return mFormat;
+}
+
+WGPUTextureUsage Texture::GetUsage() const {
+ return mUsage;
+}
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Texture.h b/chromium/third_party/dawn/src/dawn/wire/client/Texture.h
new file mode 100644
index 00000000000..0bce2ea66ca
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Texture.h
@@ -0,0 +1,54 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_WIRE_CLIENT_TEXTURE_H_
+#define SRC_DAWN_WIRE_CLIENT_TEXTURE_H_
+
+#include "dawn/webgpu.h"
+
+#include "dawn/wire/client/ObjectBase.h"
+
+namespace dawn::wire::client {
+
+class Device;
+
+class Texture final : public ObjectBase {
+ public:
+ static WGPUTexture Create(Device* device, const WGPUTextureDescriptor* descriptor);
+
+ Texture(Client* client, uint32_t refcount, uint32_t id);
+ ~Texture();
+
+ // Note that these values can be arbitrary since they aren't validated in the wire client.
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ uint32_t GetDepthOrArrayLayers() const;
+ uint32_t GetMipLevelCount() const;
+ uint32_t GetSampleCount() const;
+ WGPUTextureDimension GetDimension() const;
+ WGPUTextureFormat GetFormat() const;
+ WGPUTextureUsage GetUsage() const;
+
+ private:
+ WGPUExtent3D mSize;
+ uint32_t mMipLevelCount;
+ uint32_t mSampleCount;
+ WGPUTextureDimension mDimension;
+ WGPUTextureFormat mFormat;
+ WGPUTextureUsage mUsage;
+};
+
+} // namespace dawn::wire::client
+
+#endif // SRC_DAWN_WIRE_CLIENT_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h b/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h
index 5487735371e..1bef0ad844b 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h
@@ -15,213 +15,240 @@
#ifndef SRC_DAWN_WIRE_SERVER_OBJECTSTORAGE_H_
#define SRC_DAWN_WIRE_SERVER_OBJECTSTORAGE_H_
-#include "dawn/wire/WireCmd_autogen.h"
-#include "dawn/wire/WireServer.h"
-
#include <algorithm>
#include <map>
+#include <memory>
#include <unordered_set>
+#include <utility>
+#include <vector>
-namespace dawn::wire::server {
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireServer.h"
- struct DeviceInfo {
- std::unordered_set<uint64_t> childObjectTypesAndIds;
- Server* server;
- ObjectHandle self;
- };
-
- // Whether this object has been allocated, or reserved for async object creation.
- // Used by the KnownObjects queries
- enum class AllocationState : uint32_t {
- Free,
- Reserved,
- Allocated,
- };
-
- template <typename T>
- struct ObjectDataBase {
- // The backend-provided handle and generation to this object.
- T handle;
- uint32_t generation = 0;
-
- AllocationState state;
-
- // This points to an allocation that is owned by the device.
- DeviceInfo* deviceInfo = nullptr;
- };
-
- // Stores what the backend knows about the type.
- template <typename T>
- struct ObjectData : public ObjectDataBase<T> {};
-
- enum class BufferMapWriteState { Unmapped, Mapped, MapError };
-
- template <>
- struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
- // TODO(enga): Use a tagged pointer to save space.
- std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
- BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
- WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
- // Indicate if writeHandle needs to be destroyed on unmap
- bool mappedAtCreation = false;
- };
-
- // Pack the ObjectType and ObjectId as a single value for storage in
- // an std::unordered_set. This lets us avoid providing our own hash and
- // equality comparison operators.
- inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
- static_assert(sizeof(ObjectType) * 8 <= 32);
- static_assert(sizeof(ObjectId) * 8 <= 32);
- return (static_cast<uint64_t>(type) << 32) + id;
- }
+namespace dawn::wire::server {
- inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
- ObjectType type = static_cast<ObjectType>(payload >> 32);
- ObjectId id = payload & 0xFFFFFFFF;
- return std::make_pair(type, id);
+// Whether this object has been allocated, or reserved for async object creation.
+// Used by the KnownObjects queries
+enum class AllocationState : uint32_t {
+ Free,
+ Reserved,
+ Allocated,
+};
+
+template <typename T>
+struct ObjectDataBase {
+ // The backend-provided handle and generation to this object.
+ T handle;
+ uint32_t generation = 0;
+
+ AllocationState state;
+};
+
+// Stores what the backend knows about the type.
+template <typename T>
+struct ObjectData : public ObjectDataBase<T> {};
+
+enum class BufferMapWriteState { Unmapped, Mapped, MapError };
+
+template <>
+struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
+ // TODO(enga): Use a tagged pointer to save space.
+ std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
+ std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
+ BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
+ WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
+ // Indicate if writeHandle needs to be destroyed on unmap
+ bool mappedAtCreation = false;
+};
+
+struct DeviceInfo {
+ Server* server;
+ ObjectHandle self;
+};
+
+template <>
+struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
+ // Store |info| as a separate allocation so that its address does not move.
+ // The pointer to |info| is used as the userdata to device callback.
+ std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
+};
+
+// Keeps track of the mapping between client IDs and backend objects.
+template <typename T>
+class KnownObjectsBase {
+ public:
+ using Data = ObjectData<T>;
+
+ KnownObjectsBase() {
+ // Reserve ID 0 so that it can be used to represent nullptr for optional object values
+ // in the wire format. However don't tag it as allocated so that it is an error to ask
+ // KnownObjects for ID 0.
+ Data reservation;
+ reservation.handle = nullptr;
+ reservation.state = AllocationState::Free;
+ mKnown.push_back(std::move(reservation));
}
- template <>
- struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
- // Store |info| as a separate allocation so that its address does not move.
- // The pointer to |info| is stored in device child objects.
- std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
- };
-
- // Keeps track of the mapping between client IDs and backend objects.
- template <typename T>
- class KnownObjects {
- public:
- using Data = ObjectData<T>;
-
- KnownObjects() {
- // Reserve ID 0 so that it can be used to represent nullptr for optional object values
- // in the wire format. However don't tag it as allocated so that it is an error to ask
- // KnownObjects for ID 0.
- Data reservation;
- reservation.handle = nullptr;
- reservation.state = AllocationState::Free;
- mKnown.push_back(std::move(reservation));
+ // Get a backend objects for a given client ID.
+ // Returns nullptr if the ID hasn't previously been allocated.
+ const Data* Get(uint32_t id) const {
+ if (id >= mKnown.size()) {
+ return nullptr;
}
- // Get a backend objects for a given client ID.
- // Returns nullptr if the ID hasn't previously been allocated.
- const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
- if (id >= mKnown.size()) {
- return nullptr;
- }
+ const Data* data = &mKnown[id];
+ if (data->state != AllocationState::Allocated) {
+ return nullptr;
+ }
+ return data;
+ }
+ Data* Get(uint32_t id) {
+ if (id >= mKnown.size()) {
+ return nullptr;
+ }
- const Data* data = &mKnown[id];
+ Data* data = &mKnown[id];
+ if (data->state != AllocationState::Allocated) {
+ return nullptr;
+ }
+ return data;
+ }
- if (data->state != expected) {
- return nullptr;
- }
+ Data* FillReservation(uint32_t id, T handle) {
+ ASSERT(id < mKnown.size());
+ Data* data = &mKnown[id];
+ ASSERT(data->state == AllocationState::Reserved);
+ data->handle = handle;
+ data->state = AllocationState::Allocated;
+ return data;
+ }
- return data;
+ // Allocates the data for a given ID and returns it.
+ // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
+ // reserved for nullptr). Invalidates all the Data*
+ Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
+ if (id == 0 || id > mKnown.size()) {
+ return nullptr;
}
- Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
- if (id >= mKnown.size()) {
- return nullptr;
- }
- Data* data = &mKnown[id];
+ Data data;
+ data.state = state;
+ data.handle = nullptr;
- if (data->state != expected) {
- return nullptr;
- }
+ if (id >= mKnown.size()) {
+ mKnown.push_back(std::move(data));
+ return &mKnown.back();
+ }
- return data;
+ if (mKnown[id].state != AllocationState::Free) {
+ return nullptr;
}
- // Allocates the data for a given ID and returns it.
- // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
- // reserved for nullptr). Invalidates all the Data*
- Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
- if (id == 0 || id > mKnown.size()) {
- return nullptr;
- }
+ mKnown[id] = std::move(data);
+ return &mKnown[id];
+ }
- Data data;
- data.state = state;
- data.handle = nullptr;
+ // Marks an ID as deallocated
+ void Free(uint32_t id) {
+ ASSERT(id < mKnown.size());
+ mKnown[id].state = AllocationState::Free;
+ }
- if (id >= mKnown.size()) {
- mKnown.push_back(std::move(data));
- return &mKnown.back();
+ std::vector<T> AcquireAllHandles() {
+ std::vector<T> objects;
+ for (Data& data : mKnown) {
+ if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+ objects.push_back(data.handle);
+ data.state = AllocationState::Free;
+ data.handle = nullptr;
}
+ }
- if (mKnown[id].state != AllocationState::Free) {
- return nullptr;
- }
+ return objects;
+ }
- mKnown[id] = std::move(data);
- return &mKnown[id];
+ std::vector<T> GetAllHandles() const {
+ std::vector<T> objects;
+ for (const Data& data : mKnown) {
+ if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+ objects.push_back(data.handle);
+ }
}
- // Marks an ID as deallocated
- void Free(uint32_t id) {
- ASSERT(id < mKnown.size());
- mKnown[id].state = AllocationState::Free;
- }
+ return objects;
+ }
- std::vector<T> AcquireAllHandles() {
- std::vector<T> objects;
- for (Data& data : mKnown) {
- if (data.state == AllocationState::Allocated && data.handle != nullptr) {
- objects.push_back(data.handle);
- data.state = AllocationState::Free;
- data.handle = nullptr;
- }
- }
+ protected:
+ std::vector<Data> mKnown;
+};
+
+template <typename T>
+class KnownObjects : public KnownObjectsBase<T> {
+ public:
+ KnownObjects() = default;
+};
+
+template <>
+class KnownObjects<WGPUDevice> : public KnownObjectsBase<WGPUDevice> {
+ public:
+ KnownObjects() = default;
+
+ Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
+ Data* data = KnownObjectsBase<WGPUDevice>::Allocate(id, state);
+ AddToKnownSet(data);
+ return data;
+ }
- return objects;
- }
+ Data* FillReservation(uint32_t id, WGPUDevice handle) {
+ Data* data = KnownObjectsBase<WGPUDevice>::FillReservation(id, handle);
+ AddToKnownSet(data);
+ return data;
+ }
- std::vector<T> GetAllHandles() {
- std::vector<T> objects;
- for (Data& data : mKnown) {
- if (data.state == AllocationState::Allocated && data.handle != nullptr) {
- objects.push_back(data.handle);
- }
- }
+ void Free(uint32_t id) {
+ mKnownSet.erase(mKnown[id].handle);
+ KnownObjectsBase<WGPUDevice>::Free(id);
+ }
- return objects;
- }
+ bool IsKnown(WGPUDevice device) const { return mKnownSet.count(device) != 0; }
- private:
- std::vector<Data> mKnown;
- };
-
- // ObjectIds are lost in deserialization. Store the ids of deserialized
- // objects here so they can be used in command handlers. This is useful
- // for creating ReturnWireCmds which contain client ids
- template <typename T>
- class ObjectIdLookupTable {
- public:
- void Store(T key, ObjectId id) {
- mTable[key] = id;
+ private:
+ void AddToKnownSet(Data* data) {
+ if (data != nullptr && data->state == AllocationState::Allocated &&
+ data->handle != nullptr) {
+ mKnownSet.insert(data->handle);
}
-
- // Return the cached ObjectId, or 0 (null handle)
- ObjectId Get(T key) const {
- const auto it = mTable.find(key);
- if (it != mTable.end()) {
- return it->second;
- }
- return 0;
+ }
+ std::unordered_set<WGPUDevice> mKnownSet;
+};
+
+// ObjectIds are lost in deserialization. Store the ids of deserialized
+// objects here so they can be used in command handlers. This is useful
+// for creating ReturnWireCmds which contain client ids
+template <typename T>
+class ObjectIdLookupTable {
+ public:
+ void Store(T key, ObjectId id) { mTable[key] = id; }
+
+ // Return the cached ObjectId, or 0 (null handle)
+ ObjectId Get(T key) const {
+ const auto it = mTable.find(key);
+ if (it != mTable.end()) {
+ return it->second;
}
+ return 0;
+ }
- void Remove(T key) {
- auto it = mTable.find(key);
- if (it != mTable.end()) {
- mTable.erase(it);
- }
+ void Remove(T key) {
+ auto it = mTable.find(key);
+ if (it != mTable.end()) {
+ mTable.erase(it);
}
+ }
- private:
- std::map<T, ObjectId> mTable;
- };
+ private:
+ std::map<T, ObjectId> mTable;
+};
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp
index b0d4ba2534f..2796966a4cd 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp
@@ -17,197 +17,177 @@
namespace dawn::wire::server {
- Server::Server(const DawnProcTable& procs,
- CommandSerializer* serializer,
- MemoryTransferService* memoryTransferService)
- : mSerializer(serializer),
- mProcs(procs),
- mMemoryTransferService(memoryTransferService),
- mIsAlive(std::make_shared<bool>(true)) {
- if (mMemoryTransferService == nullptr) {
- // If a MemoryTransferService is not provided, fallback to inline memory.
- mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
- mMemoryTransferService = mOwnedMemoryTransferService.get();
- }
+CallbackUserdata::CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
+ : server(server), serverIsAlive(serverIsAlive) {}
+
+Server::Server(const DawnProcTable& procs,
+ CommandSerializer* serializer,
+ MemoryTransferService* memoryTransferService)
+ : mSerializer(serializer),
+ mProcs(procs),
+ mMemoryTransferService(memoryTransferService),
+ mIsAlive(std::make_shared<bool>(true)) {
+ if (mMemoryTransferService == nullptr) {
+ // If a MemoryTransferService is not provided, fallback to inline memory.
+ mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+ mMemoryTransferService = mOwnedMemoryTransferService.get();
}
+}
- Server::~Server() {
- // Un-set the error and lost callbacks since we cannot forward them
- // after the server has been destroyed.
- for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
- ClearDeviceCallbacks(device);
- }
- DestroyAllObjects(mProcs);
+Server::~Server() {
+ // Un-set the error and lost callbacks since we cannot forward them
+ // after the server has been destroyed.
+ for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
+ ClearDeviceCallbacks(device);
+ }
+ DestroyAllObjects(mProcs);
+}
+
+bool Server::InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ ASSERT(texture != nullptr);
+ ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr || device->generation != deviceGeneration) {
+ return false;
}
- bool Server::InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- ASSERT(texture != nullptr);
- ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
- if (device == nullptr || device->generation != deviceGeneration) {
- return false;
- }
-
- ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = texture;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->deviceInfo = device->info.get();
-
- if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
- return false;
- }
-
- // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.textureReference(texture);
-
- return true;
+ ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
}
- bool Server::InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- ASSERT(swapchain != nullptr);
- ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
- if (device == nullptr || device->generation != deviceGeneration) {
- return false;
- }
-
- ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = swapchain;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->deviceInfo = device->info.get();
-
- if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
- return false;
- }
-
- // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.swapChainReference(swapchain);
-
- return true;
+ data->handle = texture;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+
+ // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.textureReference(texture);
+
+ return true;
+}
+
+bool Server::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ ASSERT(swapchain != nullptr);
+ ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr || device->generation != deviceGeneration) {
+ return false;
}
- bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
- ASSERT(device != nullptr);
- ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = device;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->info->server = this;
- data->info->self = ObjectHandle{id, generation};
-
- // The device is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.deviceReference(device);
-
- // Set callbacks to forward errors to the client.
- SetForwardingDeviceCallbacks(data);
- return true;
+ ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
}
- bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
- ASSERT(instance != nullptr);
- ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
+ data->handle = swapchain;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
- data->handle = instance;
- data->generation = generation;
- data->state = AllocationState::Allocated;
+ // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.swapChainReference(swapchain);
- // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.instanceReference(instance);
+ return true;
+}
- return true;
+bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+ ASSERT(device != nullptr);
+ ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
}
- WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
- ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
- if (data == nullptr || data->generation != generation) {
- return nullptr;
- }
- return data->handle;
+ data->handle = device;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+ data->info->server = this;
+ data->info->self = ObjectHandle{id, generation};
+
+ // The device is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.deviceReference(device);
+
+ // Set callbacks to forward errors to the client.
+ SetForwardingDeviceCallbacks(data);
+ return true;
+}
+
+bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+ ASSERT(instance != nullptr);
+ ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
}
- void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
- // Note: these callbacks are manually inlined here since they do not acquire and
- // free their userdata. Also unlike other callbacks, these are cleared and unset when
- // the server is destroyed, so we don't need to check if the server is still alive
- // inside them.
- mProcs.deviceSetUncapturedErrorCallback(
- deviceObject->handle,
- [](WGPUErrorType type, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnUncapturedError(info->self, type, message);
- },
- deviceObject->info.get());
- // Set callback to post warning and other infomation to client.
- // Almost the same with UncapturedError.
- mProcs.deviceSetLoggingCallback(
- deviceObject->handle,
- [](WGPULoggingType type, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnLogging(info->self, type, message);
- },
- deviceObject->info.get());
- mProcs.deviceSetDeviceLostCallback(
- deviceObject->handle,
- [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnDeviceLost(info->self, reason, message);
- },
- deviceObject->info.get());
- }
+ data->handle = instance;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
- void Server::ClearDeviceCallbacks(WGPUDevice device) {
- // Un-set the error and lost callbacks since we cannot forward them
- // after the server has been destroyed.
- mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
- mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
- mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
- }
+ // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.instanceReference(instance);
- bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
- auto [_, inserted] = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
- if (!inserted) {
- // An object of this type and id already exists.
- return false;
- }
- return true;
- }
+ return true;
+}
- bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
- auto& children = info->childObjectTypesAndIds;
- auto it = children.find(PackObjectTypeAndId(type, id));
- if (it == children.end()) {
- // An object of this type and id was already deleted.
- return false;
- }
- children.erase(it);
- return true;
+WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
+ ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
+ if (data == nullptr || data->generation != generation) {
+ return nullptr;
}
+ return data->handle;
+}
+
+bool Server::IsDeviceKnown(WGPUDevice device) const {
+ return DeviceObjects().IsKnown(device);
+}
+
+void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
+ // Note: these callbacks are manually inlined here since they do not acquire and
+ // free their userdata. Also unlike other callbacks, these are cleared and unset when
+ // the server is destroyed, so we don't need to check if the server is still alive
+ // inside them.
+ // Also, the device is special-cased in Server::DoDestroyObject to call
+ // ClearDeviceCallbacks. This ensures that callbacks will not fire after |deviceObject|
+ // is freed.
+ mProcs.deviceSetUncapturedErrorCallback(
+ deviceObject->handle,
+ [](WGPUErrorType type, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnUncapturedError(info->self, type, message);
+ },
+ deviceObject->info.get());
+ // Set callback to post warning and other infomation to client.
+ // Almost the same with UncapturedError.
+ mProcs.deviceSetLoggingCallback(
+ deviceObject->handle,
+ [](WGPULoggingType type, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnLogging(info->self, type, message);
+ },
+ deviceObject->info.get());
+ mProcs.deviceSetDeviceLostCallback(
+ deviceObject->handle,
+ [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnDeviceLost(info->self, reason, message);
+ },
+ deviceObject->info.get());
+}
+
+void Server::ClearDeviceCallbacks(WGPUDevice device) {
+ // Un-set the error and lost callbacks since we cannot forward them
+ // after the server has been destroyed.
+ mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
+ mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
+ mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
+}
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/Server.h b/chromium/third_party/dawn/src/dawn/wire/server/Server.h
index caa1bc78f15..281275685c0 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn/wire/server/Server.h
@@ -15,228 +15,226 @@
#ifndef SRC_DAWN_WIRE_SERVER_SERVER_H_
#define SRC_DAWN_WIRE_SERVER_SERVER_H_
+#include <memory>
+#include <utility>
+
#include "dawn/wire/ChunkedCommandSerializer.h"
#include "dawn/wire/server/ServerBase_autogen.h"
namespace dawn::wire::server {
- class Server;
- class MemoryTransferService;
-
- // CallbackUserdata and its derived classes are intended to be created by
- // Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
- // callbacks.
- // It contains a pointer back to the Server so that the callback can call the
- // Server to perform operations like serialization, and it contains a weak pointer
- // |serverIsAlive|. If the weak pointer has expired, it means the server has
- // been destroyed and the callback must not use the Server pointer.
- // To assist with checking |serverIsAlive| and lifetime management of the userdata,
- // |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
- // return early if |serverIsAlive| has expired, and then forward the arguments
- // to userdata->server->MyCallbackHandler.
- //
- // Example Usage:
- //
- // struct MyUserdata : CallbackUserdata { uint32_t foo; };
- //
- // auto userdata = MakeUserdata<MyUserdata>();
- // userdata->foo = 2;
- //
- // callMyCallbackHandler(
- // ForwardToServer<&Server::MyCallbackHandler>,
- // userdata.release());
- //
- // void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
- struct CallbackUserdata {
- Server* const server;
- std::weak_ptr<bool> const serverIsAlive;
-
- CallbackUserdata() = delete;
- CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
- : server(server), serverIsAlive(serverIsAlive) {
- }
- };
-
- template <auto F>
- struct ForwardToServerHelper {
- template <typename _>
- struct ExtractedTypes;
-
- // An internal structure used to unpack the various types that compose the type of F
- template <typename Return, typename Class, typename Userdata, typename... Args>
- struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
- using UntypedCallback = Return (*)(Args..., void*);
- static Return Callback(Args... args, void* userdata) {
- // Acquire the userdata, and cast it to UserdataT.
- std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
- if (data->serverIsAlive.expired()) {
- // Do nothing if the server has already been destroyed.
- return;
- }
- // Forward the arguments and the typed userdata to the Server:: member function.
- (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
+class Server;
+class MemoryTransferService;
+
+// CallbackUserdata and its derived classes are intended to be created by
+// Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
+// callbacks.
+// It contains a pointer back to the Server so that the callback can call the
+// Server to perform operations like serialization, and it contains a weak pointer
+// |serverIsAlive|. If the weak pointer has expired, it means the server has
+// been destroyed and the callback must not use the Server pointer.
+// To assist with checking |serverIsAlive| and lifetime management of the userdata,
+// |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
+// return early if |serverIsAlive| has expired, and then forward the arguments
+// to userdata->server->MyCallbackHandler.
+//
+// Example Usage:
+//
+// struct MyUserdata : CallbackUserdata { uint32_t foo; };
+//
+// auto userdata = MakeUserdata<MyUserdata>();
+// userdata->foo = 2;
+//
+// callMyCallbackHandler(
+// ForwardToServer<&Server::MyCallbackHandler>,
+// userdata.release());
+//
+// void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
+struct CallbackUserdata {
+ Server* const server;
+ std::weak_ptr<bool> const serverIsAlive;
+
+ CallbackUserdata() = delete;
+ CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive);
+};
+
+template <auto F>
+struct ForwardToServerHelper {
+ template <typename _>
+ struct ExtractedTypes;
+
+ // An internal structure used to unpack the various types that compose the type of F
+ template <typename Return, typename Class, typename Userdata, typename... Args>
+ struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
+ using UntypedCallback = Return (*)(Args..., void*);
+ static Return Callback(Args... args, void* userdata) {
+ // Acquire the userdata, and cast it to UserdataT.
+ std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
+ if (data->serverIsAlive.expired()) {
+ // Do nothing if the server has already been destroyed.
+ return;
}
- };
-
- static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
- return ExtractedTypes<decltype(F)>::Callback;
+ // Forward the arguments and the typed userdata to the Server:: member function.
+ (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
}
};
- template <auto F>
- constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
-
- struct MapUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle buffer;
- WGPUBuffer bufferObj;
- uint64_t requestSerial;
- uint64_t offset;
- uint64_t size;
- WGPUMapModeFlags mode;
- };
-
- struct ErrorScopeUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle device;
- uint64_t requestSerial;
- };
-
- struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle shaderModule;
- uint64_t requestSerial;
- };
-
- struct QueueWorkDoneUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle queue;
- uint64_t requestSerial;
- };
-
- struct CreatePipelineAsyncUserData : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
+ static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
+ return ExtractedTypes<decltype(F)>::Callback;
+ }
+};
- ObjectHandle device;
- uint64_t requestSerial;
- ObjectId pipelineObjectID;
- };
-
- struct RequestAdapterUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle instance;
- uint64_t requestSerial;
- ObjectId adapterObjectId;
- };
-
- struct RequestDeviceUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle adapter;
- uint64_t requestSerial;
- ObjectId deviceObjectId;
- };
-
- class Server : public ServerBase {
- public:
- Server(const DawnProcTable& procs,
- CommandSerializer* serializer,
- MemoryTransferService* memoryTransferService);
- ~Server() override;
-
- // ChunkedCommandHandler implementation
- const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) override;
-
- bool InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
-
- bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
-
- WGPUDevice GetDevice(uint32_t id, uint32_t generation);
-
- template <typename T,
- typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
- std::unique_ptr<T> MakeUserdata() {
- return std::unique_ptr<T>(new T(this, mIsAlive));
- }
-
- private:
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- mSerializer.SerializeCommand(cmd);
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
- }
-
- void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
- void ClearDeviceCallbacks(WGPUDevice device);
-
- // Error callbacks
- void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
- void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
- void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
- void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
- WGPUErrorType type,
- const char* message);
- void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
- void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
- void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
- WGPUCreatePipelineAsyncStatus status,
- WGPUComputePipeline pipeline,
- const char* message);
- void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
- WGPUCreatePipelineAsyncStatus status,
- WGPURenderPipeline pipeline,
- const char* message);
- void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info);
- void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
- WGPURequestAdapterStatus status,
- WGPUAdapter adapter,
- const char* message);
- void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
- WGPURequestDeviceStatus status,
- WGPUDevice device,
- const char* message);
+template <auto F>
+constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
+
+struct MapUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle buffer;
+ WGPUBuffer bufferObj;
+ uint64_t requestSerial;
+ uint64_t offset;
+ uint64_t size;
+ WGPUMapModeFlags mode;
+};
+
+struct ErrorScopeUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle device;
+ uint64_t requestSerial;
+};
+
+struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle shaderModule;
+ uint64_t requestSerial;
+};
+
+struct QueueWorkDoneUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle queue;
+ uint64_t requestSerial;
+};
+
+struct CreatePipelineAsyncUserData : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle device;
+ uint64_t requestSerial;
+ ObjectId pipelineObjectID;
+};
+
+struct RequestAdapterUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle instance;
+ uint64_t requestSerial;
+ ObjectId adapterObjectId;
+};
+
+struct RequestDeviceUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle adapter;
+ uint64_t requestSerial;
+ ObjectId deviceObjectId;
+};
+
+class Server : public ServerBase {
+ public:
+ Server(const DawnProcTable& procs,
+ CommandSerializer* serializer,
+ MemoryTransferService* memoryTransferService);
+ ~Server() override;
+
+ // ChunkedCommandHandler implementation
+ const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) override;
+
+ bool InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
+ bool InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
+ bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+ bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+ WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+ bool IsDeviceKnown(WGPUDevice device) const;
+
+ template <typename T,
+ typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
+ std::unique_ptr<T> MakeUserdata() {
+ return std::unique_ptr<T>(new T(this, mIsAlive));
+ }
+
+ private:
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ mSerializer.SerializeCommand(cmd);
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
+ }
+
+ void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
+ void ClearDeviceCallbacks(WGPUDevice device);
+
+ // Error callbacks
+ void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
+ void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
+ void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
+ void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+ WGPUErrorType type,
+ const char* message);
+ void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
+ void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
+ void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPUComputePipeline pipeline,
+ const char* message);
+ void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPURenderPipeline pipeline,
+ const char* message);
+ void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info);
+ void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
+ WGPURequestAdapterStatus status,
+ WGPUAdapter adapter,
+ const char* message);
+ void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
+ WGPURequestDeviceStatus status,
+ WGPUDevice device,
+ const char* message);
#include "dawn/wire/server/ServerPrototypes_autogen.inc"
- WireDeserializeAllocator mAllocator;
- ChunkedCommandSerializer mSerializer;
- DawnProcTable mProcs;
- std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
- MemoryTransferService* mMemoryTransferService = nullptr;
-
- std::shared_ptr<bool> mIsAlive;
- };
+ WireDeserializeAllocator mAllocator;
+ ChunkedCommandSerializer mSerializer;
+ DawnProcTable mProcs;
+ std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+ MemoryTransferService* mMemoryTransferService = nullptr;
- bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
- bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+ std::shared_ptr<bool> mIsAlive;
+};
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp
index 05184552ef5..9735d26cd42 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp
@@ -12,99 +12,97 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/wire/server/Server.h"
+#include <vector>
#include "dawn/wire/SupportedFeatures.h"
+#include "dawn/wire/server/Server.h"
namespace dawn::wire::server {
- bool Server::DoAdapterRequestDevice(ObjectId adapterId,
- uint64_t requestSerial,
- ObjectHandle deviceHandle,
- const WGPUDeviceDescriptor* descriptor) {
- auto* adapter = AdapterObjects().Get(adapterId);
- if (adapter == nullptr) {
- return false;
- }
+bool Server::DoAdapterRequestDevice(ObjectId adapterId,
+ uint64_t requestSerial,
+ ObjectHandle deviceHandle,
+ const WGPUDeviceDescriptor* descriptor) {
+ auto* adapter = AdapterObjects().Get(adapterId);
+ if (adapter == nullptr) {
+ return false;
+ }
- auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
+ auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
- resultData->generation = deviceHandle.generation;
+ resultData->generation = deviceHandle.generation;
+
+ auto userdata = MakeUserdata<RequestDeviceUserdata>();
+ userdata->adapter = ObjectHandle{adapterId, adapter->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->deviceObjectId = deviceHandle.id;
+
+ mProcs.adapterRequestDevice(adapter->handle, descriptor,
+ ForwardToServer<&Server::OnRequestDeviceCallback>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
+ WGPURequestDeviceStatus status,
+ WGPUDevice device,
+ const char* message) {
+ ReturnAdapterRequestDeviceCallbackCmd cmd = {};
+ cmd.adapter = data->adapter;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.message = message;
+
+ if (status != WGPURequestDeviceStatus_Success) {
+ // Free the ObjectId which will make it unusable.
+ DeviceObjects().Free(data->deviceObjectId);
+ ASSERT(device == nullptr);
+ SerializeCommand(cmd);
+ return;
+ }
- auto userdata = MakeUserdata<RequestDeviceUserdata>();
- userdata->adapter = ObjectHandle{adapterId, adapter->generation};
- userdata->requestSerial = requestSerial;
- userdata->deviceObjectId = deviceHandle.id;
+ std::vector<WGPUFeatureName> features;
- mProcs.adapterRequestDevice(adapter->handle, descriptor,
- ForwardToServer<&Server::OnRequestDeviceCallback>,
- userdata.release());
- return true;
- }
+ size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
+ features.resize(featuresCount);
+ mProcs.deviceEnumerateFeatures(device, features.data());
- void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
- WGPURequestDeviceStatus status,
- WGPUDevice device,
- const char* message) {
- auto* deviceObject = DeviceObjects().Get(data->deviceObjectId, AllocationState::Reserved);
- // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
- // they move from Reserved to Allocated, or if they are destroyed here.
- ASSERT(deviceObject != nullptr);
-
- ReturnAdapterRequestDeviceCallbackCmd cmd = {};
- cmd.adapter = data->adapter;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.message = message;
-
- if (status != WGPURequestDeviceStatus_Success) {
+ // The client should only be able to request supported features, so all enumerated
+ // features that were enabled must also be supported by the wire.
+ // Note: We fail the callback here, instead of immediately upon receiving
+ // the request to preserve callback ordering.
+ for (WGPUFeatureName f : features) {
+ if (!IsFeatureSupported(f)) {
+ // Release the device.
+ mProcs.deviceRelease(device);
// Free the ObjectId which will make it unusable.
DeviceObjects().Free(data->deviceObjectId);
- ASSERT(device == nullptr);
+
+ cmd.status = WGPURequestDeviceStatus_Error;
+ cmd.message = "Requested feature not supported.";
SerializeCommand(cmd);
return;
}
+ }
- std::vector<WGPUFeatureName> features;
-
- size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
- features.resize(featuresCount);
- mProcs.deviceEnumerateFeatures(device, features.data());
-
- // The client should only be able to request supported features, so all enumerated
- // features that were enabled must also be supported by the wire.
- // Note: We fail the callback here, instead of immediately upon receiving
- // the request to preserve callback ordering.
- for (WGPUFeatureName f : features) {
- if (!IsFeatureSupported(f)) {
- // Release the device.
- mProcs.deviceRelease(device);
- // Free the ObjectId which will make it unusable.
- DeviceObjects().Free(data->deviceObjectId);
-
- cmd.status = WGPURequestDeviceStatus_Error;
- cmd.message = "Requested feature not supported.";
- SerializeCommand(cmd);
- return;
- }
- }
-
- cmd.featuresCount = features.size();
- cmd.features = features.data();
+ cmd.featuresCount = static_cast<uint32_t>(features.size());
+ cmd.features = features.data();
- WGPUSupportedLimits limits = {};
- mProcs.deviceGetLimits(device, &limits);
- cmd.limits = &limits;
+ WGPUSupportedLimits limits = {};
+ mProcs.deviceGetLimits(device, &limits);
+ cmd.limits = &limits;
- // Assign the handle and allocated status if the device is created successfully.
- deviceObject->state = AllocationState::Allocated;
- deviceObject->handle = device;
- SetForwardingDeviceCallbacks(deviceObject);
+ // Assign the handle and allocated status if the device is created successfully.
+ auto* deviceObject = DeviceObjects().FillReservation(data->deviceObjectId, device);
+ ASSERT(deviceObject != nullptr);
+ deviceObject->info->server = this;
+ deviceObject->info->self = ObjectHandle{data->deviceObjectId, deviceObject->generation};
+ SetForwardingDeviceCallbacks(deviceObject);
- SerializeCommand(cmd);
- }
+ SerializeCommand(cmd);
+}
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp
index 44664da8193..61d5e15423c 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp
@@ -12,271 +12,263 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <limits>
+#include <memory>
+
#include "dawn/common/Assert.h"
#include "dawn/wire/BufferConsumer_impl.h"
#include "dawn/wire/WireCmd_autogen.h"
#include "dawn/wire/server/Server.h"
-#include <memory>
-
namespace dawn::wire::server {
- bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
- auto* buffer = BufferObjects().Get(cmd.selfId);
- DAWN_ASSERT(buffer != nullptr);
+bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
+ auto* buffer = BufferObjects().Get(cmd.selfId);
+ DAWN_ASSERT(buffer != nullptr);
- if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
- // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
- // writeHandle could have possibly been deleted if buffer is already destroyed so we
- // don't assert it's non-null
- buffer->writeHandle = nullptr;
- }
+ if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
+ // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
+ // writeHandle could have possibly been deleted if buffer is already destroyed so we
+ // don't assert it's non-null
+ buffer->writeHandle = nullptr;
+ }
- buffer->mapWriteState = BufferMapWriteState::Unmapped;
+ buffer->mapWriteState = BufferMapWriteState::Unmapped;
- return true;
- }
+ return true;
+}
- bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
- // Destroying a buffer does an implicit unmapping.
- auto* buffer = BufferObjects().Get(cmd.selfId);
- DAWN_ASSERT(buffer != nullptr);
+bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
+ // Destroying a buffer does an implicit unmapping.
+ auto* buffer = BufferObjects().Get(cmd.selfId);
+ DAWN_ASSERT(buffer != nullptr);
- // The buffer was destroyed. Clear the Read/WriteHandle.
- buffer->readHandle = nullptr;
- buffer->writeHandle = nullptr;
- buffer->mapWriteState = BufferMapWriteState::Unmapped;
+ // The buffer was destroyed. Clear the Read/WriteHandle.
+ buffer->readHandle = nullptr;
+ buffer->writeHandle = nullptr;
+ buffer->mapWriteState = BufferMapWriteState::Unmapped;
- return true;
+ return true;
+}
+
+bool Server::DoBufferMapAsync(ObjectId bufferId,
+ uint64_t requestSerial,
+ WGPUMapModeFlags mode,
+ uint64_t offset64,
+ uint64_t size64) {
+ // These requests are just forwarded to the buffer, with userdata containing what the
+ // client will require in the return command.
+
+ // The null object isn't valid as `self`
+ if (bufferId == 0) {
+ return false;
}
- bool Server::DoBufferMapAsync(ObjectId bufferId,
- uint64_t requestSerial,
- WGPUMapModeFlags mode,
- uint64_t offset64,
- uint64_t size64) {
- // These requests are just forwarded to the buffer, with userdata containing what the
- // client will require in the return command.
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (buffer == nullptr) {
+ return false;
+ }
- // The null object isn't valid as `self`
- if (bufferId == 0) {
- return false;
- }
+ std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
+ userdata->buffer = ObjectHandle{bufferId, buffer->generation};
+ userdata->bufferObj = buffer->handle;
+ userdata->requestSerial = requestSerial;
+ userdata->mode = mode;
+
+ // Make sure that the deserialized offset and size are no larger than
+ // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
+ // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
+ // client does the default size computation, we should always have a valid actual size here
+ // in server. All other invalid actual size can be caught by dawn native side validation.
+ if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
+ OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
+ return true;
+ }
- auto* buffer = BufferObjects().Get(bufferId);
- if (buffer == nullptr) {
- return false;
- }
+ size_t offset = static_cast<size_t>(offset64);
+ size_t size = static_cast<size_t>(size64);
- std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
- userdata->buffer = ObjectHandle{bufferId, buffer->generation};
- userdata->bufferObj = buffer->handle;
- userdata->requestSerial = requestSerial;
- userdata->mode = mode;
-
- // Make sure that the deserialized offset and size are no larger than
- // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
- // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
- // client does the default size computation, we should always have a valid actual size here
- // in server. All other invalid actual size can be caught by dawn native side validation.
- if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
- OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
- return true;
- }
+ userdata->offset = offset;
+ userdata->size = size;
- size_t offset = static_cast<size_t>(offset64);
- size_t size = static_cast<size_t>(size64);
+ mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
+ ForwardToServer<&Server::OnBufferMapAsyncCallback>, userdata.release());
- userdata->offset = offset;
- userdata->size = size;
+ return true;
+}
- mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
- ForwardToServer<&Server::OnBufferMapAsyncCallback>,
- userdata.release());
+bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
+ const WGPUBufferDescriptor* descriptor,
+ ObjectHandle bufferResult,
+ uint64_t readHandleCreateInfoLength,
+ const uint8_t* readHandleCreateInfo,
+ uint64_t writeHandleCreateInfoLength,
+ const uint8_t* writeHandleCreateInfo) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
+ }
- return true;
+ // Create and register the buffer object.
+ auto* resultData = BufferObjects().Allocate(bufferResult.id);
+ if (resultData == nullptr) {
+ return false;
+ }
+ resultData->generation = bufferResult.generation;
+ resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
+ resultData->usage = descriptor->usage;
+ resultData->mappedAtCreation = descriptor->mappedAtCreation;
+
+ // isReadMode and isWriteMode could be true at the same time if usage contains
+ // WGPUMapMode_Read and buffer is mappedAtCreation
+ bool isReadMode = descriptor->usage & WGPUMapMode_Read;
+ bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
+
+ // This is the size of data deserialized from the command stream to create the read/write
+ // handle, which must be CPU-addressable.
+ if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+ writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+ readHandleCreateInfoLength >
+ std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
+ return false;
}
- bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
- const WGPUBufferDescriptor* descriptor,
- ObjectHandle bufferResult,
- uint64_t readHandleCreateInfoLength,
- const uint8_t* readHandleCreateInfo,
- uint64_t writeHandleCreateInfoLength,
- const uint8_t* writeHandleCreateInfo) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
+ if (isWriteMode) {
+ MemoryTransferService::WriteHandle* writeHandle = nullptr;
+ // Deserialize metadata produced from the client to create a companion server handle.
+ if (!mMemoryTransferService->DeserializeWriteHandle(
+ writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
+ &writeHandle)) {
return false;
}
+ ASSERT(writeHandle != nullptr);
+ resultData->writeHandle.reset(writeHandle);
+ writeHandle->SetDataLength(descriptor->size);
+
+ if (descriptor->mappedAtCreation) {
+ void* mapping = mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
+ if (mapping == nullptr) {
+ // A zero mapping is used to indicate an allocation error of an error buffer.
+ // This is a valid case and isn't fatal. Remember the buffer is an error so as
+ // to skip subsequent mapping operations.
+ resultData->mapWriteState = BufferMapWriteState::MapError;
+ return true;
+ }
+ ASSERT(mapping != nullptr);
+ writeHandle->SetTarget(mapping);
- // Create and register the buffer object.
- auto* resultData = BufferObjects().Allocate(bufferResult.id);
- if (resultData == nullptr) {
- return false;
- }
- resultData->generation = bufferResult.generation;
- resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
- resultData->deviceInfo = device->info.get();
- resultData->usage = descriptor->usage;
- resultData->mappedAtCreation = descriptor->mappedAtCreation;
- if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
- return false;
+ resultData->mapWriteState = BufferMapWriteState::Mapped;
}
+ }
- // isReadMode and isWriteMode could be true at the same time if usage contains
- // WGPUMapMode_Read and buffer is mappedAtCreation
- bool isReadMode = descriptor->usage & WGPUMapMode_Read;
- bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
-
- // This is the size of data deserialized from the command stream to create the read/write
- // handle, which must be CPU-addressable.
- if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
- writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
- readHandleCreateInfoLength >
- std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
+ if (isReadMode) {
+ MemoryTransferService::ReadHandle* readHandle = nullptr;
+ // Deserialize metadata produced from the client to create a companion server handle.
+ if (!mMemoryTransferService->DeserializeReadHandle(
+ readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
+ &readHandle)) {
return false;
}
+ ASSERT(readHandle != nullptr);
- if (isWriteMode) {
- MemoryTransferService::WriteHandle* writeHandle = nullptr;
- // Deserialize metadata produced from the client to create a companion server handle.
- if (!mMemoryTransferService->DeserializeWriteHandle(
- writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
- &writeHandle)) {
- return false;
- }
- ASSERT(writeHandle != nullptr);
- resultData->writeHandle.reset(writeHandle);
- writeHandle->SetDataLength(descriptor->size);
-
- if (descriptor->mappedAtCreation) {
- void* mapping =
- mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
- if (mapping == nullptr) {
- // A zero mapping is used to indicate an allocation error of an error buffer.
- // This is a valid case and isn't fatal. Remember the buffer is an error so as
- // to skip subsequent mapping operations.
- resultData->mapWriteState = BufferMapWriteState::MapError;
- return true;
- }
- ASSERT(mapping != nullptr);
- writeHandle->SetTarget(mapping);
-
- resultData->mapWriteState = BufferMapWriteState::Mapped;
- }
- }
-
- if (isReadMode) {
- MemoryTransferService::ReadHandle* readHandle = nullptr;
- // Deserialize metadata produced from the client to create a companion server handle.
- if (!mMemoryTransferService->DeserializeReadHandle(
- readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
- &readHandle)) {
- return false;
- }
- ASSERT(readHandle != nullptr);
-
- resultData->readHandle.reset(readHandle);
- }
-
- return true;
+ resultData->readHandle.reset(readHandle);
}
- bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
- uint64_t writeDataUpdateInfoLength,
- const uint8_t* writeDataUpdateInfo,
- uint64_t offset,
- uint64_t size) {
- // The null object isn't valid as `self`
- if (bufferId == 0) {
- return false;
- }
+ return true;
+}
+
+bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
+ uint64_t writeDataUpdateInfoLength,
+ const uint8_t* writeDataUpdateInfo,
+ uint64_t offset,
+ uint64_t size) {
+ // The null object isn't valid as `self`
+ if (bufferId == 0) {
+ return false;
+ }
- if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
- offset > std::numeric_limits<size_t>::max() ||
- size > std::numeric_limits<size_t>::max()) {
- return false;
- }
+ if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
+ offset > std::numeric_limits<size_t>::max() || size > std::numeric_limits<size_t>::max()) {
+ return false;
+ }
- auto* buffer = BufferObjects().Get(bufferId);
- if (buffer == nullptr) {
- return false;
- }
- switch (buffer->mapWriteState) {
- case BufferMapWriteState::Unmapped:
- return false;
- case BufferMapWriteState::MapError:
- // The buffer is mapped but there was an error allocating mapped data.
- // Do not perform the memcpy.
- return true;
- case BufferMapWriteState::Mapped:
- break;
- }
- if (!buffer->writeHandle) {
- // This check is performed after the check for the MapError state. It is permissible
- // to Unmap and attempt to update mapped data of an error buffer.
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (buffer == nullptr) {
+ return false;
+ }
+ switch (buffer->mapWriteState) {
+ case BufferMapWriteState::Unmapped:
return false;
- }
+ case BufferMapWriteState::MapError:
+ // The buffer is mapped but there was an error allocating mapped data.
+ // Do not perform the memcpy.
+ return true;
+ case BufferMapWriteState::Mapped:
+ break;
+ }
+ if (!buffer->writeHandle) {
+ // This check is performed after the check for the MapError state. It is permissible
+ // to Unmap and attempt to update mapped data of an error buffer.
+ return false;
+ }
- // Deserialize the flush info and flush updated data from the handle into the target
- // of the handle. The target is set via WriteHandle::SetTarget.
- return buffer->writeHandle->DeserializeDataUpdate(
- writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
- static_cast<size_t>(offset), static_cast<size_t>(size));
+ // Deserialize the flush info and flush updated data from the handle into the target
+ // of the handle. The target is set via WriteHandle::SetTarget.
+ return buffer->writeHandle->DeserializeDataUpdate(
+ writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
+ static_cast<size_t>(offset), static_cast<size_t>(size));
+}
+
+void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
+ // Skip sending the callback if the buffer has already been destroyed.
+ auto* bufferData = BufferObjects().Get(data->buffer.id);
+ if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
+ return;
}
- void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
- // Skip sending the callback if the buffer has already been destroyed.
- auto* bufferData = BufferObjects().Get(data->buffer.id);
- if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
- return;
+ bool isRead = data->mode & WGPUMapMode_Read;
+ bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
+
+ ReturnBufferMapAsyncCallbackCmd cmd;
+ cmd.buffer = data->buffer;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.readDataUpdateInfoLength = 0;
+ cmd.readDataUpdateInfo = nullptr;
+
+ const void* readData = nullptr;
+ if (isSuccess) {
+ if (isRead) {
+ // Get the serialization size of the message to initialize ReadHandle data.
+ readData = mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
+ cmd.readDataUpdateInfoLength =
+ bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
+ } else {
+ ASSERT(data->mode & WGPUMapMode_Write);
+ // The in-flight map request returned successfully.
+ bufferData->mapWriteState = BufferMapWriteState::Mapped;
+ // Set the target of the WriteHandle to the mapped buffer data.
+ // writeHandle Target always refers to the buffer base address.
+ // but we call getMappedRange exactly with the range of data that is potentially
+ // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
+ // subset of the buffer is actually mapped) in case the implementation does some
+ // range tracking.
+ bufferData->writeHandle->SetTarget(static_cast<uint8_t*>(mProcs.bufferGetMappedRange(
+ data->bufferObj, data->offset, data->size)) -
+ data->offset);
}
+ }
- bool isRead = data->mode & WGPUMapMode_Read;
- bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
-
- ReturnBufferMapAsyncCallbackCmd cmd;
- cmd.buffer = data->buffer;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.readDataUpdateInfoLength = 0;
- cmd.readDataUpdateInfo = nullptr;
-
- const void* readData = nullptr;
- if (isSuccess) {
- if (isRead) {
- // Get the serialization size of the message to initialize ReadHandle data.
- readData =
- mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
- cmd.readDataUpdateInfoLength =
- bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
- } else {
- ASSERT(data->mode & WGPUMapMode_Write);
- // The in-flight map request returned successfully.
- bufferData->mapWriteState = BufferMapWriteState::Mapped;
- // Set the target of the WriteHandle to the mapped buffer data.
- // writeHandle Target always refers to the buffer base address.
- // but we call getMappedRange exactly with the range of data that is potentially
- // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
- // subset of the buffer is actually mapped) in case the implementation does some
- // range tracking.
- bufferData->writeHandle->SetTarget(
- static_cast<uint8_t*>(
- mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size)) -
- data->offset);
- }
+ SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+ if (isSuccess && isRead) {
+ char* readHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
+ // The in-flight map request returned successfully.
+ bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
+ readHandleBuffer);
}
-
- SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- if (isSuccess && isRead) {
- char* readHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
- // The in-flight map request returned successfully.
- bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
- readHandleBuffer);
- }
- return WireResult::Success;
- });
- }
+ return WireResult::Success;
+ });
+}
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp
index 45fb6b8472f..27978155a35 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp
@@ -16,185 +16,165 @@
namespace dawn::wire::server {
- namespace {
-
- template <ObjectType objectType, typename Pipeline>
- void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
- WGPUCreatePipelineAsyncStatus status,
- Pipeline pipeline,
- CreatePipelineAsyncUserData* data) {
- // May be null if the device was destroyed. Device destruction destroys child
- // objects on the wire.
- auto* pipelineObject =
- knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
- // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
- // they move from Reserved to Allocated, or if they are destroyed here.
- ASSERT(pipelineObject != nullptr);
-
- if (status == WGPUCreatePipelineAsyncStatus_Success) {
- // Assign the handle and allocated status if the pipeline is created successfully.
- pipelineObject->state = AllocationState::Allocated;
- pipelineObject->handle = pipeline;
-
- // This should be impossible to fail. It would require a command to be sent that
- // creates a duplicate ObjectId, which would fail validation.
- bool success = TrackDeviceChild(pipelineObject->deviceInfo, objectType,
- data->pipelineObjectID);
- ASSERT(success);
- } else {
- // Otherwise, free the ObjectId which will make it unusable.
- knownObjects->Free(data->pipelineObjectID);
- ASSERT(pipeline == nullptr);
- }
- }
-
- } // anonymous namespace
-
- void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
- ReturnDeviceUncapturedErrorCallbackCmd cmd;
- cmd.device = device;
- cmd.type = type;
- cmd.message = message;
-
- SerializeCommand(cmd);
+namespace {
+
+template <ObjectType objectType, typename Pipeline>
+void HandleCreatePipelineAsyncCallback(KnownObjects<Pipeline>* knownObjects,
+ WGPUCreatePipelineAsyncStatus status,
+ Pipeline pipeline,
+ CreatePipelineAsyncUserData* data) {
+ if (status == WGPUCreatePipelineAsyncStatus_Success) {
+ auto* pipelineObject = knownObjects->FillReservation(data->pipelineObjectID, pipeline);
+ ASSERT(pipelineObject != nullptr);
+ } else {
+ // Otherwise, free the ObjectId which will make it unusable.
+ knownObjects->Free(data->pipelineObjectID);
+ ASSERT(pipeline == nullptr);
}
+}
- void Server::OnDeviceLost(ObjectHandle device,
- WGPUDeviceLostReason reason,
- const char* message) {
- ReturnDeviceLostCallbackCmd cmd;
- cmd.device = device;
- cmd.reason = reason;
- cmd.message = message;
+} // anonymous namespace
- SerializeCommand(cmd);
- }
+void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
+ ReturnDeviceUncapturedErrorCallbackCmd cmd;
+ cmd.device = device;
+ cmd.type = type;
+ cmd.message = message;
- void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
- ReturnDeviceLoggingCallbackCmd cmd;
- cmd.device = device;
- cmd.type = type;
- cmd.message = message;
+ SerializeCommand(cmd);
+}
- SerializeCommand(cmd);
- }
+void Server::OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message) {
+ ReturnDeviceLostCallbackCmd cmd;
+ cmd.device = device;
+ cmd.reason = reason;
+ cmd.message = message;
- bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
+ SerializeCommand(cmd);
+}
- auto userdata = MakeUserdata<ErrorScopeUserdata>();
- userdata->requestSerial = requestSerial;
- userdata->device = ObjectHandle{deviceId, device->generation};
+void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
+ ReturnDeviceLoggingCallbackCmd cmd;
+ cmd.device = device;
+ cmd.type = type;
+ cmd.message = message;
- mProcs.devicePopErrorScope(device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>,
- userdata.release());
- return true;
- }
+ SerializeCommand(cmd);
+}
- void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
- WGPUErrorType type,
- const char* message) {
- ReturnDevicePopErrorScopeCallbackCmd cmd;
- cmd.device = userdata->device;
- cmd.requestSerial = userdata->requestSerial;
- cmd.type = type;
- cmd.message = message;
-
- SerializeCommand(cmd);
+bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
}
- bool Server::DoDeviceCreateComputePipelineAsync(
- ObjectId deviceId,
- uint64_t requestSerial,
- ObjectHandle pipelineObjectHandle,
- const WGPUComputePipelineDescriptor* descriptor) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- auto* resultData =
- ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
-
- resultData->generation = pipelineObjectHandle.generation;
- resultData->deviceInfo = device->info.get();
-
- auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
- userdata->device = ObjectHandle{deviceId, device->generation};
- userdata->requestSerial = requestSerial;
- userdata->pipelineObjectID = pipelineObjectHandle.id;
-
- mProcs.deviceCreateComputePipelineAsync(
- device->handle, descriptor,
- ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>, userdata.release());
- return true;
+ auto userdata = MakeUserdata<ErrorScopeUserdata>();
+ userdata->requestSerial = requestSerial;
+ userdata->device = ObjectHandle{deviceId, device->generation};
+
+ mProcs.devicePopErrorScope(device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+ WGPUErrorType type,
+ const char* message) {
+ ReturnDevicePopErrorScopeCallbackCmd cmd;
+ cmd.device = userdata->device;
+ cmd.requestSerial = userdata->requestSerial;
+ cmd.type = type;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+}
+
+bool Server::DoDeviceCreateComputePipelineAsync(ObjectId deviceId,
+ uint64_t requestSerial,
+ ObjectHandle pipelineObjectHandle,
+ const WGPUComputePipelineDescriptor* descriptor) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
}
- void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
- WGPUCreatePipelineAsyncStatus status,
- WGPUComputePipeline pipeline,
- const char* message) {
- HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
- &ComputePipelineObjects(), status, pipeline, data);
-
- ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
- cmd.device = data->device;
- cmd.status = status;
- cmd.requestSerial = data->requestSerial;
- cmd.message = message;
-
- SerializeCommand(cmd);
+ auto* resultData =
+ ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
}
- bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
- uint64_t requestSerial,
- ObjectHandle pipelineObjectHandle,
- const WGPURenderPipelineDescriptor* descriptor) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- auto* resultData =
- RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
-
- resultData->generation = pipelineObjectHandle.generation;
- resultData->deviceInfo = device->info.get();
-
- auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
- userdata->device = ObjectHandle{deviceId, device->generation};
- userdata->requestSerial = requestSerial;
- userdata->pipelineObjectID = pipelineObjectHandle.id;
-
- mProcs.deviceCreateRenderPipelineAsync(
- device->handle, descriptor,
- ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>, userdata.release());
- return true;
+ resultData->generation = pipelineObjectHandle.generation;
+
+ auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+ userdata->device = ObjectHandle{deviceId, device->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+ mProcs.deviceCreateComputePipelineAsync(
+ device->handle, descriptor, ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPUComputePipeline pipeline,
+ const char* message) {
+ HandleCreatePipelineAsyncCallback<ObjectType::ComputePipeline>(&ComputePipelineObjects(),
+ status, pipeline, data);
+
+ ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
+ cmd.device = data->device;
+ cmd.status = status;
+ cmd.requestSerial = data->requestSerial;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+}
+
+bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
+ uint64_t requestSerial,
+ ObjectHandle pipelineObjectHandle,
+ const WGPURenderPipelineDescriptor* descriptor) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
}
- void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
- WGPUCreatePipelineAsyncStatus status,
- WGPURenderPipeline pipeline,
- const char* message) {
- HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
- &RenderPipelineObjects(), status, pipeline, data);
-
- ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
- cmd.device = data->device;
- cmd.status = status;
- cmd.requestSerial = data->requestSerial;
- cmd.message = message;
-
- SerializeCommand(cmd);
+ auto* resultData =
+ RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
}
+ resultData->generation = pipelineObjectHandle.generation;
+
+ auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+ userdata->device = ObjectHandle{deviceId, device->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+ mProcs.deviceCreateRenderPipelineAsync(
+ device->handle, descriptor, ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPURenderPipeline pipeline,
+ const char* message) {
+ HandleCreatePipelineAsyncCallback<ObjectType::RenderPipeline>(&RenderPipelineObjects(), status,
+ pipeline, data);
+
+ ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
+ cmd.device = data->device;
+ cmd.status = status;
+ cmd.requestSerial = data->requestSerial;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+}
+
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
index 0e6b30aba67..6f5884a7984 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
@@ -12,83 +12,79 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstring>
+#include <memory>
+
#include "dawn/common/Assert.h"
#include "dawn/wire/WireServer.h"
#include "dawn/wire/server/Server.h"
-#include <cstring>
-
namespace dawn::wire::server {
- class InlineMemoryTransferService : public MemoryTransferService {
+class InlineMemoryTransferService : public MemoryTransferService {
+ public:
+ class ReadHandleImpl : public ReadHandle {
public:
- class ReadHandleImpl : public ReadHandle {
- public:
- ReadHandleImpl() {
- }
- ~ReadHandleImpl() override = default;
-
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
- return size;
- }
-
- void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) override {
- if (size > 0) {
- ASSERT(data != nullptr);
- ASSERT(serializePointer != nullptr);
- memcpy(serializePointer, data, size);
- }
- }
- };
+ ReadHandleImpl() {}
+ ~ReadHandleImpl() override = default;
- class WriteHandleImpl : public WriteHandle {
- public:
- WriteHandleImpl() {
- }
- ~WriteHandleImpl() override = default;
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override { return size; }
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override {
- if (deserializeSize != size || mTargetData == nullptr ||
- deserializePointer == nullptr) {
- return false;
- }
- if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
- return false;
- }
- memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
- return true;
+ void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) override {
+ if (size > 0) {
+ ASSERT(data != nullptr);
+ ASSERT(serializePointer != nullptr);
+ memcpy(serializePointer, data, size);
}
- };
-
- InlineMemoryTransferService() {
}
- ~InlineMemoryTransferService() override = default;
+ };
- bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) override {
- ASSERT(readHandle != nullptr);
- *readHandle = new ReadHandleImpl();
- return true;
- }
+ class WriteHandleImpl : public WriteHandle {
+ public:
+ WriteHandleImpl() {}
+ ~WriteHandleImpl() override = default;
- bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) override {
- ASSERT(writeHandle != nullptr);
- *writeHandle = new WriteHandleImpl();
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override {
+ if (deserializeSize != size || mTargetData == nullptr ||
+ deserializePointer == nullptr) {
+ return false;
+ }
+ if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
+ return false;
+ }
+ memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
return true;
}
};
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
- return std::make_unique<InlineMemoryTransferService>();
+ InlineMemoryTransferService() {}
+ ~InlineMemoryTransferService() override = default;
+
+ bool DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) override {
+ ASSERT(readHandle != nullptr);
+ *readHandle = new ReadHandleImpl();
+ return true;
}
+ bool DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) override {
+ ASSERT(writeHandle != nullptr);
+ *writeHandle = new WriteHandleImpl();
+ return true;
+ }
+};
+
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+ return std::make_unique<InlineMemoryTransferService>();
+}
+
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp
index d39dadec93d..dcdf62cc24e 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp
@@ -12,89 +12,83 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/wire/server/Server.h"
+#include <algorithm>
+#include <vector>
#include "dawn/wire/SupportedFeatures.h"
-
-#include <algorithm>
+#include "dawn/wire/server/Server.h"
namespace dawn::wire::server {
- bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
- uint64_t requestSerial,
- ObjectHandle adapterHandle,
- const WGPURequestAdapterOptions* options) {
- auto* instance = InstanceObjects().Get(instanceId);
- if (instance == nullptr) {
- return false;
- }
-
- auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
-
- resultData->generation = adapterHandle.generation;
-
- auto userdata = MakeUserdata<RequestAdapterUserdata>();
- userdata->instance = ObjectHandle{instanceId, instance->generation};
- userdata->requestSerial = requestSerial;
- userdata->adapterObjectId = adapterHandle.id;
-
- mProcs.instanceRequestAdapter(instance->handle, options,
- ForwardToServer<&Server::OnRequestAdapterCallback>,
- userdata.release());
- return true;
+bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
+ uint64_t requestSerial,
+ ObjectHandle adapterHandle,
+ const WGPURequestAdapterOptions* options) {
+ auto* instance = InstanceObjects().Get(instanceId);
+ if (instance == nullptr) {
+ return false;
}
- void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
- WGPURequestAdapterStatus status,
- WGPUAdapter adapter,
- const char* message) {
- auto* adapterObject =
- AdapterObjects().Get(data->adapterObjectId, AllocationState::Reserved);
- // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
- // they move from Reserved to Allocated, or if they are destroyed here.
- ASSERT(adapterObject != nullptr);
-
- ReturnInstanceRequestAdapterCallbackCmd cmd = {};
- cmd.instance = data->instance;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.message = message;
-
- if (status != WGPURequestAdapterStatus_Success) {
- // Free the ObjectId which will make it unusable.
- AdapterObjects().Free(data->adapterObjectId);
- ASSERT(adapter == nullptr);
- SerializeCommand(cmd);
- return;
- }
-
- WGPUAdapterProperties properties = {};
- WGPUSupportedLimits limits = {};
- std::vector<WGPUFeatureName> features;
-
- // Assign the handle and allocated status if the adapter is created successfully.
- adapterObject->state = AllocationState::Allocated;
- adapterObject->handle = adapter;
-
- size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
- features.resize(featuresCount);
- mProcs.adapterEnumerateFeatures(adapter, features.data());
-
- // Hide features the wire cannot support.
- auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
-
- cmd.featuresCount = std::distance(features.begin(), it);
- cmd.features = features.data();
-
- mProcs.adapterGetProperties(adapter, &properties);
- mProcs.adapterGetLimits(adapter, &limits);
- cmd.properties = &properties;
- cmd.limits = &limits;
+ auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
+ resultData->generation = adapterHandle.generation;
+
+ auto userdata = MakeUserdata<RequestAdapterUserdata>();
+ userdata->instance = ObjectHandle{instanceId, instance->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->adapterObjectId = adapterHandle.id;
+
+ mProcs.instanceRequestAdapter(instance->handle, options,
+ ForwardToServer<&Server::OnRequestAdapterCallback>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
+ WGPURequestAdapterStatus status,
+ WGPUAdapter adapter,
+ const char* message) {
+ ReturnInstanceRequestAdapterCallbackCmd cmd = {};
+ cmd.instance = data->instance;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.message = message;
+
+ if (status != WGPURequestAdapterStatus_Success) {
+ // Free the ObjectId which will make it unusable.
+ AdapterObjects().Free(data->adapterObjectId);
+ ASSERT(adapter == nullptr);
SerializeCommand(cmd);
+ return;
}
+ WGPUAdapterProperties properties = {};
+ WGPUSupportedLimits limits = {};
+ std::vector<WGPUFeatureName> features;
+
+ // Assign the handle and allocated status if the adapter is created successfully.
+ auto* adapterObject = AdapterObjects().FillReservation(data->adapterObjectId, adapter);
+ ASSERT(adapterObject != nullptr);
+
+ size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
+ features.resize(featuresCount);
+ mProcs.adapterEnumerateFeatures(adapter, features.data());
+
+ // Hide features the wire cannot support.
+ auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
+
+ cmd.featuresCount = static_cast<uint32_t>(std::distance(features.begin(), it));
+ cmd.features = features.data();
+
+ mProcs.adapterGetProperties(adapter, &properties);
+ mProcs.adapterGetLimits(adapter, &limits);
+ cmd.properties = &properties;
+ cmd.limits = &limits;
+
+ SerializeCommand(cmd);
+}
+
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
index 758c344e1ba..20fd8b6dd96 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
@@ -18,74 +18,71 @@
namespace dawn::wire::server {
- MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
- : ReadHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
- mService->OnReadHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
- size_t size) {
- return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
- }
-
- void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) {
- mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
- }
-
- MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
- : WriteHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
- mService->OnWriteHandleDestroy(this);
- }
-
- const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
- return reinterpret_cast<const uint32_t*>(mTargetData);
- }
-
- bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
- const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return mService->OnWriteHandleDeserializeDataUpdate(
- this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
- size);
- }
-
- MockMemoryTransferService::MockMemoryTransferService() = default;
- MockMemoryTransferService::~MockMemoryTransferService() = default;
-
- bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
- deserializeSize, readHandle);
- }
-
- bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
- deserializeSize, writeHandle);
- }
-
- MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
- return new MockReadHandle(this);
- }
-
- MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
- return new MockWriteHandle(this);
- }
+MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+ : ReadHandle(), mService(service) {}
+
+MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+ mService->OnReadHandleDestroy(this);
+}
+
+size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
+ size_t size) {
+ return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
+}
+
+void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) {
+ mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
+}
+
+MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+ : WriteHandle(), mService(service) {}
+
+MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+ mService->OnWriteHandleDestroy(this);
+}
+
+const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
+ return reinterpret_cast<const uint32_t*>(mTargetData);
+}
+
+bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
+ const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return mService->OnWriteHandleDeserializeDataUpdate(
+ this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset, size);
+}
+
+MockMemoryTransferService::MockMemoryTransferService() = default;
+MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+ deserializeSize, readHandle);
+}
+
+bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+ deserializeSize, writeHandle);
+}
+
+MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+ return new MockReadHandle(this);
+}
+
+MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+ return new MockWriteHandle(this);
+}
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h
index 6cc72a695fe..1d02ccb280f 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h
@@ -22,87 +22,87 @@
namespace dawn::wire::server {
- class MockMemoryTransferService : public MemoryTransferService {
+class MockMemoryTransferService : public MemoryTransferService {
+ public:
+ class MockReadHandle : public ReadHandle {
public:
- class MockReadHandle : public ReadHandle {
- public:
- explicit MockReadHandle(MockMemoryTransferService* service);
- ~MockReadHandle() override;
+ explicit MockReadHandle(MockMemoryTransferService* service);
+ ~MockReadHandle() override;
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
- void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) override;
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+ void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) override;
- private:
- MockMemoryTransferService* mService;
- };
-
- class MockWriteHandle : public WriteHandle {
- public:
- explicit MockWriteHandle(MockMemoryTransferService* service);
- ~MockWriteHandle() override;
-
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override;
+ private:
+ MockMemoryTransferService* mService;
+ };
- const uint32_t* GetData() const;
+ class MockWriteHandle : public WriteHandle {
+ public:
+ explicit MockWriteHandle(MockMemoryTransferService* service);
+ ~MockWriteHandle() override;
- private:
- MockMemoryTransferService* mService;
- };
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override;
- MockMemoryTransferService();
- ~MockMemoryTransferService() override;
+ const uint32_t* GetData() const;
- bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) override;
-
- bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) override;
-
- MockReadHandle* NewReadHandle();
- MockWriteHandle* NewWriteHandle();
-
- MOCK_METHOD(bool,
- OnDeserializeReadHandle,
- (const uint32_t* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle));
-
- MOCK_METHOD(bool,
- OnDeserializeWriteHandle,
- (const uint32_t* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle));
-
- MOCK_METHOD(size_t,
- OnReadHandleSizeOfSerializeDataUpdate,
- (const ReadHandle* readHandle, size_t offset, size_t size));
- MOCK_METHOD(void,
- OnReadHandleSerializeDataUpdate,
- (const ReadHandle* readHandle,
- const void* data,
- size_t offset,
- size_t size,
- void* serializePointer));
- MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
-
- MOCK_METHOD(bool,
- OnWriteHandleDeserializeDataUpdate,
- (const WriteHandle* writeHandle,
- const uint32_t* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size));
- MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+ private:
+ MockMemoryTransferService* mService;
};
+ MockMemoryTransferService();
+ ~MockMemoryTransferService() override;
+
+ bool DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) override;
+
+ bool DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) override;
+
+ MockReadHandle* NewReadHandle();
+ MockWriteHandle* NewWriteHandle();
+
+ MOCK_METHOD(bool,
+ OnDeserializeReadHandle,
+ (const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle));
+
+ MOCK_METHOD(bool,
+ OnDeserializeWriteHandle,
+ (const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle));
+
+ MOCK_METHOD(size_t,
+ OnReadHandleSizeOfSerializeDataUpdate,
+ (const ReadHandle* readHandle, size_t offset, size_t size));
+ MOCK_METHOD(void,
+ OnReadHandleSerializeDataUpdate,
+ (const ReadHandle* readHandle,
+ const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer));
+ MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
+
+ MOCK_METHOD(bool,
+ OnWriteHandleDeserializeDataUpdate,
+ (const WriteHandle* writeHandle,
+ const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size));
+ MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+};
+
} // namespace dawn::wire::server
#endif // SRC_DAWN_WIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp
index 68e1ea82ee4..3e440082543 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp
@@ -12,92 +12,81 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <limits>
+
#include "dawn/common/Assert.h"
#include "dawn/wire/server/Server.h"
namespace dawn::wire::server {
- void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
- ReturnQueueWorkDoneCallbackCmd cmd;
- cmd.queue = data->queue;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
+void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
+ ReturnQueueWorkDoneCallbackCmd cmd;
+ cmd.queue = data->queue;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+
+ SerializeCommand(cmd);
+}
- SerializeCommand(cmd);
+bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
+ uint64_t signalValue,
+ uint64_t requestSerial) {
+ auto* queue = QueueObjects().Get(queueId);
+ if (queue == nullptr) {
+ return false;
}
- bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
- uint64_t signalValue,
- uint64_t requestSerial) {
- auto* queue = QueueObjects().Get(queueId);
- if (queue == nullptr) {
- return false;
- }
+ auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
+ userdata->queue = ObjectHandle{queueId, queue->generation};
+ userdata->requestSerial = requestSerial;
- auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
- userdata->queue = ObjectHandle{queueId, queue->generation};
- userdata->requestSerial = requestSerial;
+ mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
+ ForwardToServer<&Server::OnQueueWorkDone>, userdata.release());
+ return true;
+}
- mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
- ForwardToServer<&Server::OnQueueWorkDone>,
- userdata.release());
- return true;
+bool Server::DoQueueWriteBuffer(ObjectId queueId,
+ ObjectId bufferId,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size) {
+ // The null object isn't valid as `self` or `buffer` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (queue == nullptr || buffer == nullptr) {
+ return false;
}
- bool Server::DoQueueWriteBuffer(ObjectId queueId,
- ObjectId bufferId,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size) {
- // The null object isn't valid as `self` or `buffer` so we can combine the check with the
- // check that the ID is valid.
- auto* queue = QueueObjects().Get(queueId);
- auto* buffer = BufferObjects().Get(bufferId);
- if (queue == nullptr || buffer == nullptr) {
- return false;
- }
-
- if (size > std::numeric_limits<size_t>::max()) {
- auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
- if (device == nullptr) {
- return false;
- }
- return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
- WGPUErrorType_OutOfMemory,
- "Data size too large for write texture.");
- }
-
- mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
- static_cast<size_t>(size));
- return true;
+ if (size > std::numeric_limits<size_t>::max()) {
+ return false;
}
- bool Server::DoQueueWriteTexture(ObjectId queueId,
- const WGPUImageCopyTexture* destination,
- const uint8_t* data,
- uint64_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize) {
- // The null object isn't valid as `self` so we can combine the check with the
- // check that the ID is valid.
- auto* queue = QueueObjects().Get(queueId);
- if (queue == nullptr) {
- return false;
- }
+ mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
+ static_cast<size_t>(size));
+ return true;
+}
- if (dataSize > std::numeric_limits<size_t>::max()) {
- auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
- if (device == nullptr) {
- return false;
- }
- return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
- WGPUErrorType_OutOfMemory,
- "Data size too large for write texture.");
- }
+bool Server::DoQueueWriteTexture(ObjectId queueId,
+ const WGPUImageCopyTexture* destination,
+ const uint8_t* data,
+ uint64_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ // The null object isn't valid as `self` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ if (queue == nullptr) {
+ return false;
+ }
- mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
- dataLayout, writeSize);
- return true;
+ if (dataSize > std::numeric_limits<size_t>::max()) {
+ return false;
}
+ mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
+ dataLayout, writeSize);
+ return true;
+}
+
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp
index 8785e0d648e..9a3f3da7f38 100644
--- a/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp
@@ -12,38 +12,38 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn/wire/server/Server.h"
-
#include <memory>
-namespace dawn::wire::server {
-
- bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
- auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
- if (shaderModule == nullptr) {
- return false;
- }
+#include "dawn/wire/server/Server.h"
- auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
- userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
- userdata->requestSerial = requestSerial;
+namespace dawn::wire::server {
- mProcs.shaderModuleGetCompilationInfo(
- shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
- userdata.release());
- return true;
+bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
+ auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
+ if (shaderModule == nullptr) {
+ return false;
}
- void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info) {
- ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
- cmd.shaderModule = data->shaderModule;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.info = info;
-
- SerializeCommand(cmd);
- }
+ auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
+ userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
+ userdata->requestSerial = requestSerial;
+
+ mProcs.shaderModuleGetCompilationInfo(
+ shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
+ userdata.release());
+ return true;
+}
+
+void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
+ cmd.shaderModule = data->shaderModule;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.info = info;
+
+ SerializeCommand(cmd);
+}
} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
index 6e6e37c7942..7d76b377923 100644
--- a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
+++ b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_ENUMCLASSBITMASKS_H_
#define SRC_INCLUDE_DAWN_ENUMCLASSBITMASKS_H_
-#include <dawn/EnumClassBitmasks.h>
+#include "dawn/EnumClassBitmasks.h"
#endif // SRC_INCLUDE_DAWN_ENUMCLASSBITMASKS_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
index 2fd5e968485..2bea48b5eac 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_DAWN_PROC_H_
#define SRC_INCLUDE_DAWN_DAWN_PROC_H_
-#include <dawn/dawn_proc.h>
+#include "dawn/dawn_proc.h"
#endif // SRC_INCLUDE_DAWN_DAWN_PROC_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
index 4028e66ff76..126e50cafd8 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_DAWN_THREAD_DISPATCH_PROC_H_
#define SRC_INCLUDE_DAWN_DAWN_THREAD_DISPATCH_PROC_H_
-#include <dawn/dawn_thread_dispatch_proc.h>
+#include "dawn/dawn_thread_dispatch_proc.h"
#endif // SRC_INCLUDE_DAWN_DAWN_THREAD_DISPATCH_PROC_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
index 414ea3a9276..3803d3911d1 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_DAWN_WSI_H_
#define SRC_INCLUDE_DAWN_DAWN_WSI_H_
-#include <dawn/dawn_wsi.h>
+#include "dawn/dawn_wsi.h"
#endif // SRC_INCLUDE_DAWN_DAWN_WSI_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/webgpu.h b/chromium/third_party/dawn/src/include/dawn/webgpu.h
index 1efae373bb1..2fec78c3c27 100644
--- a/chromium/third_party/dawn/src/include/dawn/webgpu.h
+++ b/chromium/third_party/dawn/src/include/dawn/webgpu.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_WEBGPU_H_
#define SRC_INCLUDE_DAWN_WEBGPU_H_
-#include <dawn/webgpu.h>
+#include "dawn/webgpu.h"
#endif // SRC_INCLUDE_DAWN_WEBGPU_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index 8678ba44e34..c740c81b0d9 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_D3D12BACKEND_H_
#define SRC_INCLUDE_DAWN_NATIVE_D3D12BACKEND_H_
-#include <dawn/native/D3D12Backend.h>
+#include "dawn/native/D3D12Backend.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_D3D12BACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index 75708b66c37..197187f21bf 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_DAWNNATIVE_H_
#define SRC_INCLUDE_DAWN_NATIVE_DAWNNATIVE_H_
-#include <dawn/native/DawnNative.h>
+#include "dawn/native/DawnNative.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_DAWNNATIVE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
index 27a3c31041a..38ce8748f84 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_METALBACKEND_H_
#define SRC_INCLUDE_DAWN_NATIVE_METALBACKEND_H_
-#include <dawn/native/MetalBackend.h>
+#include "dawn/native/MetalBackend.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_METALBACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h b/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
index dbf6e0de1a0..e09f36b6514 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
#define SRC_INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
-#include <dawn/native/NullBackend.h>
+#include "dawn/native/NullBackend.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
index cc40eb629e6..73800a0d943 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_OPENGLBACKEND_H_
#define SRC_INCLUDE_DAWN_NATIVE_OPENGLBACKEND_H_
-#include <dawn/native/OpenGLBackend.h>
+#include "dawn/native/OpenGLBackend.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_OPENGLBACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index 59250282308..82e2e5304df 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_VULKANBACKEND_H_
#define SRC_INCLUDE_DAWN_NATIVE_VULKANBACKEND_H_
-#include <dawn/native/VulkanBackend.h>
+#include "dawn/native/VulkanBackend.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_VULKANBACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h b/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
index d9d386ef853..7edd9bc4e77 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
#define SRC_INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
-#include <dawn/native/dawn_native_export.h>
+#include "dawn/native/dawn_native_export.h"
#endif // SRC_INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
index b364202592c..75faafc91af 100644
--- a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
+++ b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_PLATFORM_DAWNPLATFORM_H_
#define SRC_INCLUDE_DAWN_PLATFORM_DAWNPLATFORM_H_
-#include <dawn/platform/DawnPlatform.h>
+#include "dawn/platform/DawnPlatform.h"
#endif // SRC_INCLUDE_DAWN_PLATFORM_DAWNPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
index 70a081b1e84..65924db21d3 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_WIRE_WIRE_H_
#define SRC_INCLUDE_DAWN_WIRE_WIRE_H_
-#include <dawn/wire/Wire.h>
+#include "dawn/wire/Wire.h"
#endif // SRC_INCLUDE_DAWN_WIRE_WIRE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 2cd7baa2c95..bf4b589f7ac 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_WIRE_WIRECLIENT_H_
#define SRC_INCLUDE_DAWN_WIRE_WIRECLIENT_H_
-#include <dawn/wire/WireClient.h>
+#include "dawn/wire/WireClient.h"
#endif // SRC_INCLUDE_DAWN_WIRE_WIRECLIENT_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index 1868aa81582..d332eaec88f 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_WIRE_WIRESERVER_H_
#define SRC_INCLUDE_DAWN_WIRE_WIRESERVER_H_
-#include <dawn/wire/WireServer.h>
+#include "dawn/wire/WireServer.h"
#endif // SRC_INCLUDE_DAWN_WIRE_WIRESERVER_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h b/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
index d41e6b6cc76..b1a885e502d 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
#define SRC_INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
-#include <dawn/wire/dawn_wire_export.h>
+#include "dawn/wire/dawn_wire_export.h"
#endif // SRC_INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
diff --git a/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h b/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h
index cd509aff4b5..adf4e4440db 100644
--- a/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h
+++ b/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h
@@ -15,6 +15,6 @@
#ifndef SRC_INCLUDE_WEBGPU_WEBGPU_CPP_H_
#define SRC_INCLUDE_WEBGPU_WEBGPU_CPP_H_
-#include <dawn/webgpu_cpp.h>
+#include "dawn/webgpu_cpp.h"
#endif // SRC_INCLUDE_WEBGPU_WEBGPU_CPP_H_
diff --git a/chromium/third_party/dawn/src/tint/.clang-format b/chromium/third_party/dawn/src/tint/.clang-format
deleted file mode 100644
index 2fb833a5df1..00000000000
--- a/chromium/third_party/dawn/src/tint/.clang-format
+++ /dev/null
@@ -1,2 +0,0 @@
-# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
-BasedOnStyle: Chromium
diff --git a/chromium/third_party/dawn/src/tint/BUILD.gn b/chromium/third_party/dawn/src/tint/BUILD.gn
index 5594bf87dac..e7171a9e184 100644
--- a/chromium/third_party/dawn/src/tint/BUILD.gn
+++ b/chromium/third_party/dawn/src/tint/BUILD.gn
@@ -222,12 +222,16 @@ libtint_source_set("libtint_core_all_src") {
"ast/disable_validation_attribute.h",
"ast/discard_statement.cc",
"ast/discard_statement.h",
- "ast/else_statement.cc",
- "ast/else_statement.h",
+ "ast/enable.cc",
+ "ast/enable.h",
"ast/expression.cc",
"ast/expression.h",
+ "ast/extension.cc",
+ "ast/extension.h",
"ast/external_texture.cc",
"ast/external_texture.h",
+ "ast/f16.cc",
+ "ast/f16.h",
"ast/f32.cc",
"ast/f32.h",
"ast/fallthrough_statement.cc",
@@ -288,8 +292,6 @@ libtint_source_set("libtint_core_all_src") {
"ast/sampled_texture.h",
"ast/sampler.cc",
"ast/sampler.h",
- "ast/sint_literal_expression.cc",
- "ast/sint_literal_expression.h",
"ast/stage_attribute.cc",
"ast/stage_attribute.h",
"ast/statement.cc",
@@ -322,8 +324,6 @@ libtint_source_set("libtint_core_all_src") {
"ast/type_name.h",
"ast/u32.cc",
"ast/u32.h",
- "ast/uint_literal_expression.cc",
- "ast/uint_literal_expression.h",
"ast/unary_op.cc",
"ast/unary_op.h",
"ast/unary_op_expression.cc",
@@ -338,9 +338,6 @@ libtint_source_set("libtint_core_all_src") {
"ast/void.h",
"ast/workgroup_attribute.cc",
"ast/workgroup_attribute.h",
- "builtin_table.cc",
- "builtin_table.h",
- "builtin_table.inl",
"castable.cc",
"castable.h",
"clone_context.cc",
@@ -363,6 +360,8 @@ libtint_source_set("libtint_core_all_src") {
"inspector/resource_binding.h",
"inspector/scalar.cc",
"inspector/scalar.h",
+ "number.cc",
+ "number.h",
"program.cc",
"program.h",
"program_builder.cc",
@@ -371,55 +370,72 @@ libtint_source_set("libtint_core_all_src") {
"program_id.h",
"reader/reader.cc",
"reader/reader.h",
+ "resolver/const_eval.cc",
+ "resolver/const_eval.h",
+ "resolver/ctor_conv_intrinsic.cc",
+ "resolver/ctor_conv_intrinsic.h",
"resolver/dependency_graph.cc",
"resolver/dependency_graph.h",
+ "resolver/intrinsic_table.cc",
+ "resolver/intrinsic_table.h",
+ "resolver/intrinsic_table.inl",
"resolver/resolver.cc",
"resolver/resolver.h",
"resolver/resolver_constants.cc",
- "resolver/resolver_validation.cc",
+ "resolver/sem_helper.cc",
+ "resolver/sem_helper.h",
+ "resolver/uniformity.cc",
+ "resolver/uniformity.h",
+ "resolver/validator.cc",
+ "resolver/validator.h",
"scope_stack.h",
+ "sem/abstract_float.h",
+ "sem/abstract_int.h",
+ "sem/abstract_numeric.h",
"sem/array.h",
- "sem/atomic_type.h",
+ "sem/atomic.h",
"sem/behavior.h",
"sem/binding_point.h",
- "sem/bool_type.h",
+ "sem/bool.h",
"sem/builtin.h",
"sem/builtin_type.h",
"sem/call.h",
"sem/call_target.h",
"sem/constant.h",
- "sem/depth_multisampled_texture_type.h",
- "sem/depth_texture_type.h",
+ "sem/depth_multisampled_texture.h",
+ "sem/depth_texture.h",
"sem/expression.h",
- "sem/external_texture_type.h",
- "sem/f32_type.h",
+ "sem/external_texture.h",
+ "sem/f16.h",
+ "sem/f32.h",
"sem/for_loop_statement.h",
- "sem/i32_type.h",
+ "sem/i32.h",
"sem/if_statement.h",
"sem/info.h",
"sem/loop_statement.h",
- "sem/matrix_type.h",
+ "sem/materialize.h",
+ "sem/matrix.h",
"sem/module.h",
- "sem/multisampled_texture_type.h",
+ "sem/multisampled_texture.h",
"sem/node.h",
"sem/parameter_usage.h",
"sem/pipeline_stage_set.h",
- "sem/pointer_type.h",
- "sem/reference_type.h",
- "sem/sampled_texture_type.h",
+ "sem/pointer.h",
+ "sem/reference.h",
+ "sem/sampled_texture.h",
+ "sem/sampler.h",
"sem/sampler_texture_pair.h",
- "sem/sampler_type.h",
- "sem/storage_texture_type.h",
+ "sem/storage_texture.h",
"sem/switch_statement.h",
- "sem/texture_type.h",
+ "sem/texture.h",
"sem/type.h",
"sem/type_constructor.h",
"sem/type_conversion.h",
"sem/type_manager.h",
"sem/type_mappings.h",
- "sem/u32_type.h",
- "sem/vector_type.h",
- "sem/void_type.h",
+ "sem/u32.h",
+ "sem/vector.h",
+ "sem/void.h",
"source.cc",
"source.h",
"symbol.cc",
@@ -451,12 +467,12 @@ libtint_source_set("libtint_core_all_src") {
"transform/decompose_strided_array.h",
"transform/decompose_strided_matrix.cc",
"transform/decompose_strided_matrix.h",
+ "transform/disable_uniformity_analysis.cc",
+ "transform/disable_uniformity_analysis.h",
"transform/expand_compound_assignment.cc",
"transform/expand_compound_assignment.h",
"transform/first_index_offset.cc",
"transform/first_index_offset.h",
- "transform/fold_constants.cc",
- "transform/fold_constants.h",
"transform/fold_trivial_single_use_lets.cc",
"transform/fold_trivial_single_use_lets.h",
"transform/for_loop_to_loop.cc",
@@ -511,7 +527,10 @@ libtint_source_set("libtint_core_all_src") {
"transform/wrap_arrays_in_structs.h",
"transform/zero_init_workgroup_memory.cc",
"transform/zero_init_workgroup_memory.h",
+ "utils/bitcast.h",
"utils/block_allocator.h",
+ "utils/compiler_macros.h",
+ "utils/concat.h",
"utils/crc32.h",
"utils/debugger.cc",
"utils/debugger.h",
@@ -527,6 +546,8 @@ libtint_source_set("libtint_core_all_src") {
"writer/append_vector.h",
"writer/array_length_from_uniform_options.cc",
"writer/array_length_from_uniform_options.h",
+ "writer/flatten_bindings.cc",
+ "writer/flatten_bindings.h",
"writer/float_to_string.cc",
"writer/float_to_string.h",
"writer/generate_external_texture_bindings.cc",
@@ -550,16 +571,22 @@ libtint_source_set("libtint_core_all_src") {
libtint_source_set("libtint_sem_src") {
sources = [
+ "sem/abstract_float.cc",
+ "sem/abstract_float.h",
+ "sem/abstract_int.cc",
+ "sem/abstract_int.h",
+ "sem/abstract_numeric.cc",
+ "sem/abstract_numeric.h",
"sem/array.cc",
"sem/array.h",
- "sem/atomic_type.cc",
- "sem/atomic_type.h",
+ "sem/atomic.cc",
+ "sem/atomic.h",
"sem/behavior.cc",
"sem/behavior.h",
"sem/binding_point.h",
"sem/block_statement.cc",
- "sem/bool_type.cc",
- "sem/bool_type.h",
+ "sem/bool.cc",
+ "sem/bool.h",
"sem/builtin.cc",
"sem/builtin.h",
"sem/builtin_type.cc",
@@ -570,55 +597,59 @@ libtint_source_set("libtint_sem_src") {
"sem/call_target.h",
"sem/constant.cc",
"sem/constant.h",
- "sem/depth_multisampled_texture_type.cc",
- "sem/depth_multisampled_texture_type.h",
- "sem/depth_texture_type.cc",
- "sem/depth_texture_type.h",
+ "sem/depth_multisampled_texture.cc",
+ "sem/depth_multisampled_texture.h",
+ "sem/depth_texture.cc",
+ "sem/depth_texture.h",
"sem/expression.cc",
"sem/expression.h",
- "sem/external_texture_type.cc",
- "sem/external_texture_type.h",
- "sem/f32_type.cc",
- "sem/f32_type.h",
+ "sem/external_texture.cc",
+ "sem/external_texture.h",
+ "sem/f16.cc",
+ "sem/f16.h",
+ "sem/f32.cc",
+ "sem/f32.h",
"sem/for_loop_statement.cc",
"sem/for_loop_statement.h",
"sem/function.cc",
- "sem/i32_type.cc",
- "sem/i32_type.h",
+ "sem/i32.cc",
+ "sem/i32.h",
"sem/if_statement.cc",
"sem/if_statement.h",
"sem/info.cc",
"sem/info.h",
"sem/loop_statement.cc",
"sem/loop_statement.h",
- "sem/matrix_type.cc",
- "sem/matrix_type.h",
+ "sem/materialize.cc",
+ "sem/materialize.h",
+ "sem/matrix.cc",
+ "sem/matrix.h",
"sem/member_accessor_expression.cc",
"sem/module.cc",
"sem/module.h",
- "sem/multisampled_texture_type.cc",
- "sem/multisampled_texture_type.h",
+ "sem/multisampled_texture.cc",
+ "sem/multisampled_texture.h",
"sem/node.cc",
"sem/node.h",
"sem/parameter_usage.cc",
"sem/parameter_usage.h",
"sem/pipeline_stage_set.h",
- "sem/pointer_type.cc",
- "sem/pointer_type.h",
- "sem/reference_type.cc",
- "sem/reference_type.h",
- "sem/sampled_texture_type.cc",
- "sem/sampled_texture_type.h",
- "sem/sampler_type.cc",
- "sem/sampler_type.h",
+ "sem/pointer.cc",
+ "sem/pointer.h",
+ "sem/reference.cc",
+ "sem/reference.h",
+ "sem/sampled_texture.cc",
+ "sem/sampled_texture.h",
+ "sem/sampler.cc",
+ "sem/sampler.h",
"sem/statement.cc",
- "sem/storage_texture_type.cc",
- "sem/storage_texture_type.h",
+ "sem/storage_texture.cc",
+ "sem/storage_texture.h",
"sem/struct.cc",
"sem/switch_statement.cc",
"sem/switch_statement.h",
- "sem/texture_type.cc",
- "sem/texture_type.h",
+ "sem/texture.cc",
+ "sem/texture.h",
"sem/type.cc",
"sem/type.h",
"sem/type_constructor.cc",
@@ -628,13 +659,13 @@ libtint_source_set("libtint_sem_src") {
"sem/type_manager.cc",
"sem/type_manager.h",
"sem/type_mappings.h",
- "sem/u32_type.cc",
- "sem/u32_type.h",
+ "sem/u32.cc",
+ "sem/u32.h",
"sem/variable.cc",
- "sem/vector_type.cc",
- "sem/vector_type.h",
- "sem/void_type.cc",
- "sem/void_type.h",
+ "sem/vector.cc",
+ "sem/vector.h",
+ "sem/void.cc",
+ "sem/void.h",
]
public_deps = [ ":libtint_core_all_src" ]
diff --git a/chromium/third_party/dawn/src/tint/CMakeLists.txt b/chromium/third_party/dawn/src/tint/CMakeLists.txt
index 527eeafa426..6ffc36a9c0d 100644
--- a/chromium/third_party/dawn/src/tint/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/tint/CMakeLists.txt
@@ -62,18 +62,17 @@ set(TINT_LIB_SRCS
../../include/tint/tint.h
ast/access.cc
ast/access.h
- ast/attribute.cc
- ast/attribute.h
ast/alias.cc
ast/alias.h
- ast/index_accessor_expression.cc
- ast/index_accessor_expression.h
ast/array.cc
ast/array.h
ast/assignment_statement.cc
ast/assignment_statement.h
+ ast/ast_type.cc # TODO(bclayton) - rename to type.cc
ast/atomic.cc
ast/atomic.h
+ ast/attribute.cc
+ ast/attribute.h
ast/binary_expression.cc
ast/binary_expression.h
ast/binding_attribute.cc
@@ -104,18 +103,22 @@ set(TINT_LIB_SRCS
ast/continue_statement.h
ast/depth_multisampled_texture.cc
ast/depth_multisampled_texture.h
- ast/disable_validation_attribute.cc
- ast/disable_validation_attribute.h
ast/depth_texture.cc
ast/depth_texture.h
+ ast/disable_validation_attribute.cc
+ ast/disable_validation_attribute.h
ast/discard_statement.cc
ast/discard_statement.h
- ast/else_statement.cc
- ast/else_statement.h
+ ast/enable.cc
+ ast/enable.h
ast/expression.cc
ast/expression.h
+ ast/extension.cc
+ ast/extension.h
ast/external_texture.cc
ast/external_texture.h
+ ast/f16.cc
+ ast/f16.h
ast/f32.cc
ast/f32.h
ast/fallthrough_statement.cc
@@ -138,6 +141,8 @@ set(TINT_LIB_SRCS
ast/if_statement.h
ast/increment_decrement_statement.cc
ast/increment_decrement_statement.h
+ ast/index_accessor_expression.cc
+ ast/index_accessor_expression.h
ast/int_literal_expression.cc
ast/int_literal_expression.h
ast/internal_attribute.cc
@@ -174,8 +179,6 @@ set(TINT_LIB_SRCS
ast/sampled_texture.h
ast/sampler.cc
ast/sampler.h
- ast/sint_literal_expression.cc
- ast/sint_literal_expression.h
ast/stage_attribute.cc
ast/stage_attribute.h
ast/statement.cc
@@ -201,18 +204,13 @@ set(TINT_LIB_SRCS
ast/texture.cc
ast/texture.h
ast/traverse_expressions.h
- ast/type_name.cc
- ast/type_name.h
- ast/ast_type.cc # TODO(bclayton) - rename to type.cc
- ast/type.h
ast/type_decl.cc
ast/type_decl.h
ast/type_name.cc
ast/type_name.h
+ ast/type.h
ast/u32.cc
ast/u32.h
- ast/uint_literal_expression.cc
- ast/uint_literal_expression.h
ast/unary_op_expression.cc
ast/unary_op_expression.h
ast/unary_op.cc
@@ -227,9 +225,6 @@ set(TINT_LIB_SRCS
ast/void.h
ast/workgroup_attribute.cc
ast/workgroup_attribute.h
- builtin_table.cc
- builtin_table.h
- builtin_table.inl
castable.cc
castable.h
clone_context.cc
@@ -244,6 +239,8 @@ set(TINT_LIB_SRCS
inspector/resource_binding.h
inspector/scalar.cc
inspector/scalar.h
+ number.cc
+ number.h
program_builder.cc
program_builder.h
program_id.cc
@@ -252,22 +249,42 @@ set(TINT_LIB_SRCS
program.h
reader/reader.cc
reader/reader.h
+ resolver/const_eval.cc
+ resolver/const_eval.h
+ resolver/ctor_conv_intrinsic.cc
+ resolver/ctor_conv_intrinsic.h
resolver/dependency_graph.cc
resolver/dependency_graph.h
- resolver/resolver.cc
+ resolver/intrinsic_table.cc
+ resolver/intrinsic_table.h
+ resolver/intrinsic_table.inl
resolver/resolver_constants.cc
- resolver/resolver_validation.cc
+ resolver/resolver.cc
resolver/resolver.h
+ resolver/sem_helper.cc
+ resolver/sem_helper.h
+ resolver/uniformity.cc
+ resolver/uniformity.h
+ resolver/validator.cc
+ resolver/validator.h
scope_stack.h
+ sem/abstract_float.cc
+ sem/abstract_float.h
+ sem/abstract_int.cc
+ sem/abstract_int.h
+ sem/abstract_numeric.cc
+ sem/abstract_numeric.h
sem/array.cc
sem/array.h
- sem/atomic_type.cc
- sem/atomic_type.h
+ sem/atomic.cc
+ sem/atomic.h
sem/behavior.cc
sem/behavior.h
sem/binding_point.h
sem/block_statement.cc
sem/block_statement.h
+ sem/bool.cc
+ sem/bool.h
sem/builtin_type.cc
sem/builtin_type.h
sem/builtin.cc
@@ -278,26 +295,76 @@ set(TINT_LIB_SRCS
sem/call.h
sem/constant.cc
sem/constant.h
- sem/depth_multisampled_texture_type.cc
- sem/depth_multisampled_texture_type.h
+ sem/depth_multisampled_texture.cc
+ sem/depth_multisampled_texture.h
+ sem/depth_texture.cc
+ sem/depth_texture.h
sem/expression.cc
sem/expression.h
+ sem/external_texture.cc
+ sem/external_texture.h
+ sem/f16.cc
+ sem/f16.h
+ sem/f32.cc
+ sem/f32.h
+ sem/for_loop_statement.cc
+ sem/for_loop_statement.h
sem/function.cc
+ sem/i32.cc
+ sem/i32.h
+ sem/if_statement.cc
+ sem/if_statement.h
sem/info.cc
sem/info.h
+ sem/loop_statement.cc
+ sem/loop_statement.h
+ sem/materialize.cc
+ sem/materialize.h
+ sem/matrix.cc
+ sem/matrix.h
sem/member_accessor_expression.cc
+ sem/module.cc
+ sem/module.h
+ sem/multisampled_texture.cc
+ sem/multisampled_texture.h
+ sem/node.cc
+ sem/node.h
sem/parameter_usage.cc
sem/parameter_usage.h
sem/pipeline_stage_set.h
- sem/node.cc
- sem/node.h
- sem/module.cc
- sem/module.h
+ sem/pointer.cc
+ sem/pointer.h
+ sem/reference.cc
+ sem/reference.h
+ sem/sampled_texture.cc
+ sem/sampled_texture.h
sem/sampler_texture_pair.h
+ sem/sampler.cc
+ sem/sampler.h
sem/statement.cc
+ sem/storage_texture.cc
+ sem/storage_texture.h
sem/struct.cc
+ sem/switch_statement.cc
+ sem/switch_statement.h
+ sem/texture.cc
+ sem/texture.h
+ sem/type_constructor.cc
+ sem/type_constructor.h
+ sem/type_conversion.cc
+ sem/type_conversion.h
+ sem/type_manager.cc
+ sem/type_manager.h
sem/type_mappings.h
+ sem/type.cc
+ sem/type.h
+ sem/u32.cc
+ sem/u32.h
sem/variable.cc
+ sem/vector.cc
+ sem/vector.h
+ sem/void.cc
+ sem/void.h
symbol_table.cc
symbol_table.h
symbol.cc
@@ -317,28 +384,28 @@ set(TINT_LIB_SRCS
transform/builtin_polyfill.h
transform/calculate_array_length.cc
transform/calculate_array_length.h
- transform/combine_samplers.cc
- transform/combine_samplers.h
transform/canonicalize_entry_point_io.cc
transform/canonicalize_entry_point_io.h
+ transform/combine_samplers.cc
+ transform/combine_samplers.h
transform/decompose_memory_access.cc
transform/decompose_memory_access.h
transform/decompose_strided_array.cc
transform/decompose_strided_array.h
transform/decompose_strided_matrix.cc
transform/decompose_strided_matrix.h
+ transform/disable_uniformity_analysis.cc
+ transform/disable_uniformity_analysis.h
+ transform/expand_compound_assignment.cc
+ transform/expand_compound_assignment.h
transform/first_index_offset.cc
transform/first_index_offset.h
- transform/fold_constants.cc
- transform/fold_constants.h
transform/fold_trivial_single_use_lets.cc
transform/fold_trivial_single_use_lets.h
- transform/localize_struct_array_assignment.cc
- transform/localize_struct_array_assignment.h
transform/for_loop_to_loop.cc
transform/for_loop_to_loop.h
- transform/expand_compound_assignment.cc
- transform/expand_compound_assignment.h
+ transform/localize_struct_array_assignment.cc
+ transform/localize_struct_array_assignment.h
transform/loop_to_for_loop.cc
transform/loop_to_for_loop.h
transform/manager.cc
@@ -353,10 +420,10 @@ set(TINT_LIB_SRCS
transform/promote_initializers_to_const_var.h
transform/promote_side_effects_to_decl.cc
transform/promote_side_effects_to_decl.h
- transform/remove_phonies.cc
- transform/remove_phonies.h
transform/remove_continue_in_switch.cc
transform/remove_continue_in_switch.h
+ transform/remove_phonies.cc
+ transform/remove_phonies.h
transform/remove_unreachable_statements.cc
transform/remove_unreachable_statements.h
transform/renamer.cc
@@ -373,69 +440,24 @@ set(TINT_LIB_SRCS
transform/unshadow.h
transform/unwind_discard_functions.cc
transform/unwind_discard_functions.h
- transform/vectorize_scalar_matrix_constructors.cc
- transform/vectorize_scalar_matrix_constructors.h
+ transform/utils/get_insertion_point.cc
+ transform/utils/get_insertion_point.h
+ transform/utils/hoist_to_decl_before.cc
+ transform/utils/hoist_to_decl_before.h
transform/var_for_dynamic_index.cc
transform/var_for_dynamic_index.h
+ transform/vectorize_scalar_matrix_constructors.cc
+ transform/vectorize_scalar_matrix_constructors.h
transform/vertex_pulling.cc
transform/vertex_pulling.h
transform/wrap_arrays_in_structs.cc
transform/wrap_arrays_in_structs.h
transform/zero_init_workgroup_memory.cc
transform/zero_init_workgroup_memory.h
- transform/utils/get_insertion_point.cc
- transform/utils/get_insertion_point.h
- transform/utils/hoist_to_decl_before.cc
- transform/utils/hoist_to_decl_before.h
- sem/bool_type.cc
- sem/bool_type.h
- sem/depth_texture_type.cc
- sem/depth_texture_type.h
- sem/external_texture_type.cc
- sem/external_texture_type.h
- sem/f32_type.cc
- sem/f32_type.h
- sem/for_loop_statement.cc
- sem/for_loop_statement.h
- sem/i32_type.cc
- sem/i32_type.h
- sem/if_statement.cc
- sem/if_statement.h
- sem/loop_statement.cc
- sem/loop_statement.h
- sem/matrix_type.cc
- sem/matrix_type.h
- sem/multisampled_texture_type.cc
- sem/multisampled_texture_type.h
- sem/pointer_type.cc
- sem/pointer_type.h
- sem/reference_type.cc
- sem/reference_type.h
- sem/sampled_texture_type.cc
- sem/sampled_texture_type.h
- sem/sampler_type.cc
- sem/sampler_type.h
- sem/storage_texture_type.cc
- sem/storage_texture_type.h
- sem/switch_statement.cc
- sem/switch_statement.h
- sem/texture_type.cc
- sem/texture_type.h
- sem/type_constructor.cc
- sem/type_constructor.h
- sem/type_conversion.cc
- sem/type_conversion.h
- sem/type.cc
- sem/type.h
- sem/type_manager.cc
- sem/type_manager.h
- sem/u32_type.cc
- sem/u32_type.h
- sem/vector_type.cc
- sem/vector_type.h
- sem/void_type.cc
- sem/void_type.h
+ utils/bitcast.h
utils/block_allocator.h
+ utils/compiler_macros.h
+ utils/concat.h
utils/crc32.h
utils/enum_set.h
utils/hash.h
@@ -449,6 +471,8 @@ set(TINT_LIB_SRCS
writer/append_vector.h
writer/array_length_from_uniform_options.cc
writer/array_length_from_uniform_options.h
+ writer/flatten_bindings.cc
+ writer/flatten_bindings.h
writer/float_to_string.cc
writer/float_to_string.h
writer/generate_external_texture_bindings.cc
@@ -671,8 +695,10 @@ if(TINT_BUILD_TESTS)
ast/depth_multisampled_texture_test.cc
ast/depth_texture_test.cc
ast/discard_statement_test.cc
- ast/else_statement_test.cc
+ ast/enable_test.cc
+ ast/extension_test.cc
ast/external_texture_test.cc
+ ast/f16_test.cc
ast/f32_test.cc
ast/fallthrough_statement_test.cc
ast/float_literal_expression_test.cc
@@ -700,7 +726,6 @@ if(TINT_BUILD_TESTS)
ast/return_statement_test.cc
ast/sampled_texture_test.cc
ast/sampler_test.cc
- ast/sint_literal_expression_test.cc
ast/stage_attribute_test.cc
ast/storage_texture_test.cc
ast/stride_attribute_test.cc
@@ -714,13 +739,11 @@ if(TINT_BUILD_TESTS)
ast/texture_test.cc
ast/traverse_expressions_test.cc
ast/u32_test.cc
- ast/uint_literal_expression_test.cc
ast/unary_op_expression_test.cc
ast/variable_decl_statement_test.cc
ast/variable_test.cc
ast/vector_test.cc
ast/workgroup_attribute_test.cc
- builtin_table_test.cc
castable_test.cc
clone_context_test.cc
debug_test.cc
@@ -728,29 +751,33 @@ if(TINT_BUILD_TESTS)
diagnostic/diagnostic_test.cc
diagnostic/formatter_test.cc
diagnostic/printer_test.cc
+ number_test.cc
+ program_builder_test.cc
program_test.cc
resolver/array_accessor_test.cc
resolver/assignment_validation_test.cc
resolver/atomics_test.cc
resolver/atomics_validation_test.cc
+ resolver/attribute_validation_test.cc
resolver/bitcast_validation_test.cc
- resolver/builtins_validation_test.cc
resolver/builtin_test.cc
resolver/builtin_validation_test.cc
+ resolver/builtins_validation_test.cc
resolver/call_test.cc
resolver/call_validation_test.cc
resolver/compound_assignment_validation_test.cc
resolver/compound_statement_test.cc
resolver/control_block_validation_test.cc
- resolver/attribute_validation_test.cc
resolver/dependency_graph_test.cc
resolver/entry_point_validation_test.cc
resolver/function_validation_test.cc
resolver/host_shareable_validation_test.cc
resolver/increment_decrement_validation_test.cc
resolver/inferred_type_test.cc
+ resolver/intrinsic_table_test.cc
resolver/is_host_shareable_test.cc
resolver/is_storeable_test.cc
+ resolver/materialize_test.cc
resolver/pipeline_overridable_constant_test.cc
resolver/ptr_ref_test.cc
resolver/ptr_ref_validation_test.cc
@@ -760,6 +787,7 @@ if(TINT_BUILD_TESTS)
resolver/resolver_test_helper.h
resolver/resolver_test.cc
resolver/side_effects_test.cc
+ resolver/source_variable_test.cc
resolver/storage_class_layout_validation_test.cc
resolver/storage_class_validation_test.cc
resolver/struct_layout_test.cc
@@ -768,30 +796,35 @@ if(TINT_BUILD_TESTS)
resolver/type_constructor_validation_test.cc
resolver/type_validation_test.cc
resolver/validation_test.cc
+ resolver/validator_is_storeable_test.cc
resolver/var_let_test.cc
resolver/var_let_validation_test.cc
scope_stack_test.cc
- sem/atomic_type_test.cc
- sem/bool_type_test.cc
+ sem/atomic.cc
+ sem/bool_test.cc
sem/builtin_test.cc
- sem/depth_multisampled_texture_type_test.cc
- sem/depth_texture_type_test.cc
- sem/external_texture_type_test.cc
- sem/f32_type_test.cc
- sem/i32_type_test.cc
- sem/matrix_type_test.cc
- sem/multisampled_texture_type_test.cc
- sem/pointer_type_test.cc
- sem/reference_type_test.cc
- sem/sampled_texture_type_test.cc
- sem/sampler_type_test.cc
+ sem/constant_test.cc
+ sem/depth_multisampled_texture_test.cc
+ sem/depth_texture_test.cc
+ sem/expression_test.cc
+ sem/external_texture_test.cc
+ sem/f16_test.cc
+ sem/f32_test.cc
+ sem/i32_test.cc
+ sem/matrix_test.cc
+ sem/multisampled_texture_test.cc
+ sem/pointer_test.cc
+ sem/reference_test.cc
+ sem/sampled_texture_test.cc
+ sem/sampler_test.cc
sem/sem_array_test.cc
sem/sem_struct_test.cc
- sem/storage_texture_type_test.cc
- sem/texture_type_test.cc
+ sem/storage_texture_test.cc
+ sem/texture_test.cc
sem/type_manager_test.cc
- sem/u32_type_test.cc
- sem/vector_type_test.cc
+ sem/type_test.cc
+ sem/u32_test.cc
+ sem/vector_test.cc
source_test.cc
symbol_table_test.cc
symbol_test.cc
@@ -799,6 +832,7 @@ if(TINT_BUILD_TESTS)
text/unicode_test.cc
traits_test.cc
transform/transform_test.cc
+ utils/bitcast_test.cc
utils/block_allocator_test.cc
utils/crc32_test.cc
utils/defer_test.cc
@@ -808,6 +842,7 @@ if(TINT_BUILD_TESTS)
utils/io/tmpfile_test.cc
utils/map_test.cc
utils/math_test.cc
+ utils/result_test.cc
utils/reverse_test.cc
utils/scoped_assignment_test.cc
utils/string_test.cc
@@ -815,11 +850,19 @@ if(TINT_BUILD_TESTS)
utils/unique_allocator_test.cc
utils/unique_vector_test.cc
writer/append_vector_test.cc
+ writer/flatten_bindings_test.cc
writer/float_to_string_test.cc
writer/generate_external_texture_bindings_test.cc
writer/text_generator_test.cc
)
+ # Uniformity analysis tests depend on WGSL reader
+ if(${TINT_BUILD_WGSL_READER})
+ list(APPEND TINT_TEST_SRCS
+ resolver/uniformity_test.cc
+ )
+ endif()
+
# Inspector tests depend on WGSL reader
if(${TINT_BUILD_WGSL_READER})
list(APPEND TINT_TEST_SRCS
@@ -886,9 +929,9 @@ if(TINT_BUILD_TESTS)
reader/wgsl/parser_impl_const_literal_test.cc
reader/wgsl/parser_impl_continue_stmt_test.cc
reader/wgsl/parser_impl_continuing_stmt_test.cc
- reader/wgsl/parser_impl_depth_texture_type_test.cc
- reader/wgsl/parser_impl_external_texture_type_test.cc
- reader/wgsl/parser_impl_elseif_stmt_test.cc
+ reader/wgsl/parser_impl_depth_texture_test.cc
+ reader/wgsl/parser_impl_enable_directive_test.cc
+ reader/wgsl/parser_impl_external_texture_test.cc
reader/wgsl/parser_impl_equality_expression_test.cc
reader/wgsl/parser_impl_error_msg_test.cc
reader/wgsl/parser_impl_error_resync_test.cc
@@ -914,14 +957,14 @@ if(TINT_BUILD_TESTS)
reader/wgsl/parser_impl_primary_expression_test.cc
reader/wgsl/parser_impl_relational_expression_test.cc
reader/wgsl/parser_impl_reserved_keyword_test.cc
- reader/wgsl/parser_impl_sampled_texture_type_test.cc
- reader/wgsl/parser_impl_sampler_type_test.cc
+ reader/wgsl/parser_impl_sampled_texture_test.cc
+ reader/wgsl/parser_impl_sampler_test.cc
reader/wgsl/parser_impl_shift_expression_test.cc
reader/wgsl/parser_impl_singular_expression_test.cc
reader/wgsl/parser_impl_statement_test.cc
reader/wgsl/parser_impl_statements_test.cc
reader/wgsl/parser_impl_storage_class_test.cc
- reader/wgsl/parser_impl_storage_texture_type_test.cc
+ reader/wgsl/parser_impl_storage_texture_test.cc
reader/wgsl/parser_impl_struct_body_decl_test.cc
reader/wgsl/parser_impl_struct_decl_test.cc
reader/wgsl/parser_impl_struct_attribute_decl_test.cc
@@ -934,7 +977,7 @@ if(TINT_BUILD_TESTS)
reader/wgsl/parser_impl_test_helper.cc
reader/wgsl/parser_impl_test_helper.h
reader/wgsl/parser_impl_texel_format_test.cc
- reader/wgsl/parser_impl_texture_sampler_types_test.cc
+ reader/wgsl/parser_impl_texture_sampler_test.cc
reader/wgsl/parser_impl_type_alias_test.cc
reader/wgsl/parser_impl_type_decl_test.cc
reader/wgsl/parser_impl_unary_expression_test.cc
@@ -1001,6 +1044,7 @@ if(TINT_BUILD_TESTS)
writer/wgsl/generator_impl_constructor_test.cc
writer/wgsl/generator_impl_continue_test.cc
writer/wgsl/generator_impl_discard_test.cc
+ writer/wgsl/generator_impl_enable_test.cc
writer/wgsl/generator_impl_fallthrough_test.cc
writer/wgsl/generator_impl_function_test.cc
writer/wgsl/generator_impl_global_decl_test.cc
@@ -1032,8 +1076,9 @@ if(TINT_BUILD_TESTS)
transform/decompose_memory_access_test.cc
transform/decompose_strided_array_test.cc
transform/decompose_strided_matrix_test.cc
+ transform/disable_uniformity_analysis_test.cc
+ transform/expand_compound_assignment_test.cc
transform/first_index_offset_test.cc
- transform/fold_constants_test.cc
transform/fold_trivial_single_use_lets_test.cc
transform/for_loop_to_loop_test.cc
transform/expand_compound_assignment.cc
diff --git a/chromium/third_party/dawn/src/tint/ast/access.cc b/chromium/third_party/dawn/src/tint/ast/access.cc
index 575463dfb3d..642d8521d08 100644
--- a/chromium/third_party/dawn/src/tint/ast/access.cc
+++ b/chromium/third_party/dawn/src/tint/ast/access.cc
@@ -17,25 +17,25 @@
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, Access access) {
- switch (access) {
- case ast::Access::kUndefined: {
- out << "undefined";
- break;
+ switch (access) {
+ case ast::Access::kUndefined: {
+ out << "undefined";
+ break;
+ }
+ case ast::Access::kRead: {
+ out << "read";
+ break;
+ }
+ case ast::Access::kReadWrite: {
+ out << "read_write";
+ break;
+ }
+ case ast::Access::kWrite: {
+ out << "write";
+ break;
+ }
}
- case ast::Access::kRead: {
- out << "read";
- break;
- }
- case ast::Access::kReadWrite: {
- out << "read_write";
- break;
- }
- case ast::Access::kWrite: {
- out << "write";
- break;
- }
- }
- return out;
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/access.h b/chromium/third_party/dawn/src/tint/ast/access.h
index aa9f656957a..3714b708039 100644
--- a/chromium/third_party/dawn/src/tint/ast/access.h
+++ b/chromium/third_party/dawn/src/tint/ast/access.h
@@ -22,16 +22,16 @@ namespace tint::ast {
/// The access control settings
enum Access {
- /// Not declared in the source
- kUndefined = 0,
- /// Read only
- kRead,
- /// Write only
- kWrite,
- /// Read write
- kReadWrite,
- // Last valid access mode
- kLastValid = kReadWrite,
+ /// Not declared in the source
+ kUndefined = 0,
+ /// Read only
+ kRead,
+ /// Write only
+ kWrite,
+ /// Read write
+ kReadWrite,
+ // Last valid access mode
+ kLastValid = kReadWrite,
};
/// @param out the std::ostream to write to
diff --git a/chromium/third_party/dawn/src/tint/ast/alias.cc b/chromium/third_party/dawn/src/tint/ast/alias.cc
index 1f76749dcde..fa98cd486b7 100644
--- a/chromium/third_party/dawn/src/tint/ast/alias.cc
+++ b/chromium/third_party/dawn/src/tint/ast/alias.cc
@@ -20,12 +20,9 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Alias);
namespace tint::ast {
-Alias::Alias(ProgramID pid,
- const Source& src,
- const Symbol& n,
- const Type* subtype)
+Alias::Alias(ProgramID pid, const Source& src, const Symbol& n, const Type* subtype)
: Base(pid, src, n), type(subtype) {
- TINT_ASSERT(AST, type);
+ TINT_ASSERT(AST, type);
}
Alias::Alias(Alias&&) = default;
@@ -33,11 +30,11 @@ Alias::Alias(Alias&&) = default;
Alias::~Alias() = default;
const Alias* Alias::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto sym = ctx->Clone(name);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<Alias>(src, sym, ty);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto sym = ctx->Clone(name);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<Alias>(src, sym, ty);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/alias.h b/chromium/third_party/dawn/src/tint/ast/alias.h
index f527d43f9db..87ce57874de 100644
--- a/chromium/third_party/dawn/src/tint/ast/alias.h
+++ b/chromium/third_party/dawn/src/tint/ast/alias.h
@@ -23,28 +23,25 @@ namespace tint::ast {
/// A type alias type. Holds a name and pointer to another type.
class Alias final : public Castable<Alias, TypeDecl> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param name the symbol for the alias
- /// @param subtype the alias'd type
- Alias(ProgramID pid,
- const Source& src,
- const Symbol& name,
- const Type* subtype);
- /// Move constructor
- Alias(Alias&&);
- /// Destructor
- ~Alias() override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Alias* Clone(CloneContext* ctx) const override;
-
- /// the alias type
- const Type* const type;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param name the symbol for the alias
+ /// @param subtype the alias'd type
+ Alias(ProgramID pid, const Source& src, const Symbol& name, const Type* subtype);
+ /// Move constructor
+ Alias(Alias&&);
+ /// Destructor
+ ~Alias() override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Alias* Clone(CloneContext* ctx) const override;
+
+ /// the alias type
+ const Type* const type;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/alias_test.cc b/chromium/third_party/dawn/src/tint/ast/alias_test.cc
index 6669e525e1e..c73475168e7 100644
--- a/chromium/third_party/dawn/src/tint/ast/alias_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/alias_test.cc
@@ -33,10 +33,10 @@ namespace {
using AstAliasTest = TestHelper;
TEST_F(AstAliasTest, Create) {
- auto* u32 = create<U32>();
- auto* a = Alias("a_type", u32);
- EXPECT_EQ(a->name, Symbol(1, ID()));
- EXPECT_EQ(a->type, u32);
+ auto* u32 = create<U32>();
+ auto* a = Alias("a_type", u32);
+ EXPECT_EQ(a->name, Symbol(1, ID()));
+ EXPECT_EQ(a->type, u32);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/array.cc b/chromium/third_party/dawn/src/tint/ast/array.cc
index 58d8a45f83a..0389ed0d9c4 100644
--- a/chromium/third_party/dawn/src/tint/ast/array.cc
+++ b/chromium/third_party/dawn/src/tint/ast/array.cc
@@ -24,17 +24,16 @@ namespace tint::ast {
namespace {
// Returns the string representation of an array size expression.
-std::string SizeExprToString(const Expression* size,
- const SymbolTable& symbols) {
- if (auto* ident = size->As<IdentifierExpression>()) {
- return symbols.NameFor(ident->symbol);
- }
- if (auto* literal = size->As<IntLiteralExpression>()) {
- return std::to_string(literal->ValueAsU32());
- }
- // This will never be exposed to the user as the Resolver will reject this
- // expression for array size.
- return "<invalid>";
+std::string SizeExprToString(const Expression* size, const SymbolTable& symbols) {
+ if (auto* ident = size->As<IdentifierExpression>()) {
+ return symbols.NameFor(ident->symbol);
+ }
+ if (auto* literal = size->As<IntLiteralExpression>()) {
+ return std::to_string(literal->value);
+ }
+ // This will never be exposed to the user as the Resolver will reject this
+ // expression for array size.
+ return "<invalid>";
}
} // namespace
@@ -50,27 +49,27 @@ Array::Array(Array&&) = default;
Array::~Array() = default;
std::string Array::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- for (auto* attr : attributes) {
- if (auto* stride = attr->As<ast::StrideAttribute>()) {
- out << "@stride(" << stride->stride << ") ";
+ std::ostringstream out;
+ for (auto* attr : attributes) {
+ if (auto* stride = attr->As<ast::StrideAttribute>()) {
+ out << "@stride(" << stride->stride << ") ";
+ }
+ }
+ out << "array<" << type->FriendlyName(symbols);
+ if (!IsRuntimeArray()) {
+ out << ", " << SizeExprToString(count, symbols);
}
- }
- out << "array<" << type->FriendlyName(symbols);
- if (!IsRuntimeArray()) {
- out << ", " << SizeExprToString(count, symbols);
- }
- out << ">";
- return out.str();
+ out << ">";
+ return out.str();
}
const Array* Array::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- auto* cnt = ctx->Clone(count);
- auto attrs = ctx->Clone(attributes);
- return ctx->dst->create<Array>(src, ty, cnt, attrs);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ auto* cnt = ctx->Clone(count);
+ auto attrs = ctx->Clone(attributes);
+ return ctx->dst->create<Array>(src, ty, cnt, attrs);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/array.h b/chromium/third_party/dawn/src/tint/ast/array.h
index 0867c6d35e8..e92902d4cb1 100644
--- a/chromium/third_party/dawn/src/tint/ast/array.h
+++ b/chromium/third_party/dawn/src/tint/ast/array.h
@@ -29,45 +29,45 @@ namespace tint::ast {
/// An array type. If size is zero then it is a runtime array.
class Array final : public Castable<Array, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param subtype the type of the array elements
- /// @param count the number of elements in the array. nullptr represents a
- /// runtime-sized array.
- /// @param attributes the array attributes
- Array(ProgramID pid,
- const Source& src,
- const Type* subtype,
- const Expression* count,
- AttributeList attributes);
- /// Move constructor
- Array(Array&&);
- ~Array() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param subtype the type of the array elements
+ /// @param count the number of elements in the array. nullptr represents a
+ /// runtime-sized array.
+ /// @param attributes the array attributes
+ Array(ProgramID pid,
+ const Source& src,
+ const Type* subtype,
+ const Expression* count,
+ AttributeList attributes);
+ /// Move constructor
+ Array(Array&&);
+ ~Array() override;
- /// @returns true if this is a runtime array.
- /// i.e. the size is determined at runtime
- bool IsRuntimeArray() const { return count == nullptr; }
+ /// @returns true if this is a runtime array.
+ /// i.e. the size is determined at runtime
+ bool IsRuntimeArray() const { return count == nullptr; }
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Array* Clone(CloneContext* ctx) const override;
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Array* Clone(CloneContext* ctx) const override;
- /// the array element type
- const Type* const type;
+ /// the array element type
+ const Type* const type;
- /// the array size in elements, or nullptr for a runtime array
- const Expression* const count;
+ /// the array size in elements, or nullptr for a runtime array
+ const Expression* const count;
- /// the array attributes
- const AttributeList attributes;
+ /// the array attributes
+ const AttributeList attributes;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/array_test.cc b/chromium/third_party/dawn/src/tint/ast/array_test.cc
index e255920761d..baed0791e71 100644
--- a/chromium/third_party/dawn/src/tint/ast/array_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/array_test.cc
@@ -16,53 +16,54 @@
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using AstArrayTest = TestHelper;
TEST_F(AstArrayTest, CreateSizedArray) {
- auto* u32 = create<U32>();
- auto* count = Expr(3);
- auto* arr = create<Array>(u32, count, AttributeList{});
- EXPECT_EQ(arr->type, u32);
- EXPECT_EQ(arr->count, count);
- EXPECT_TRUE(arr->Is<Array>());
- EXPECT_FALSE(arr->IsRuntimeArray());
+ auto* u32 = create<U32>();
+ auto* count = Expr(3_u);
+ auto* arr = create<Array>(u32, count, AttributeList{});
+ EXPECT_EQ(arr->type, u32);
+ EXPECT_EQ(arr->count, count);
+ EXPECT_TRUE(arr->Is<Array>());
+ EXPECT_FALSE(arr->IsRuntimeArray());
}
TEST_F(AstArrayTest, CreateRuntimeArray) {
- auto* u32 = create<U32>();
- auto* arr = create<Array>(u32, nullptr, AttributeList{});
- EXPECT_EQ(arr->type, u32);
- EXPECT_EQ(arr->count, nullptr);
- EXPECT_TRUE(arr->Is<Array>());
- EXPECT_TRUE(arr->IsRuntimeArray());
+ auto* u32 = create<U32>();
+ auto* arr = create<Array>(u32, nullptr, AttributeList{});
+ EXPECT_EQ(arr->type, u32);
+ EXPECT_EQ(arr->count, nullptr);
+ EXPECT_TRUE(arr->Is<Array>());
+ EXPECT_TRUE(arr->IsRuntimeArray());
}
TEST_F(AstArrayTest, FriendlyName_RuntimeSized) {
- auto* i32 = create<I32>();
- auto* arr = create<Array>(i32, nullptr, AttributeList{});
- EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32>");
+ auto* i32 = create<I32>();
+ auto* arr = create<Array>(i32, nullptr, AttributeList{});
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32>");
}
TEST_F(AstArrayTest, FriendlyName_LiteralSized) {
- auto* i32 = create<I32>();
- auto* arr = create<Array>(i32, Expr(5), AttributeList{});
- EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, 5>");
+ auto* i32 = create<I32>();
+ auto* arr = create<Array>(i32, Expr(5_u), AttributeList{});
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, 5>");
}
TEST_F(AstArrayTest, FriendlyName_ConstantSized) {
- auto* i32 = create<I32>();
- auto* arr = create<Array>(i32, Expr("size"), AttributeList{});
- EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, size>");
+ auto* i32 = create<I32>();
+ auto* arr = create<Array>(i32, Expr("size"), AttributeList{});
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, size>");
}
TEST_F(AstArrayTest, FriendlyName_WithStride) {
- auto* i32 = create<I32>();
- auto* arr =
- create<Array>(i32, Expr(5), AttributeList{create<StrideAttribute>(32)});
- EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(32) array<i32, 5>");
+ auto* i32 = create<I32>();
+ auto* arr = create<Array>(i32, Expr(5_u), AttributeList{create<StrideAttribute>(32)});
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(32) array<i32, 5>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/assignment_statement.cc b/chromium/third_party/dawn/src/tint/ast/assignment_statement.cc
index 575e5d2a444..d7d7bc582a8 100644
--- a/chromium/third_party/dawn/src/tint/ast/assignment_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/assignment_statement.cc
@@ -25,10 +25,10 @@ AssignmentStatement::AssignmentStatement(ProgramID pid,
const Expression* l,
const Expression* r)
: Base(pid, src), lhs(l), rhs(r) {
- TINT_ASSERT(AST, lhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
- TINT_ASSERT(AST, rhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
+ TINT_ASSERT(AST, lhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
+ TINT_ASSERT(AST, rhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
}
AssignmentStatement::AssignmentStatement(AssignmentStatement&&) = default;
@@ -36,11 +36,11 @@ AssignmentStatement::AssignmentStatement(AssignmentStatement&&) = default;
AssignmentStatement::~AssignmentStatement() = default;
const AssignmentStatement* AssignmentStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* l = ctx->Clone(lhs);
- auto* r = ctx->Clone(rhs);
- return ctx->dst->create<AssignmentStatement>(src, l, r);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* l = ctx->Clone(lhs);
+ auto* r = ctx->Clone(rhs);
+ return ctx->dst->create<AssignmentStatement>(src, l, r);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/assignment_statement.h b/chromium/third_party/dawn/src/tint/ast/assignment_statement.h
index 5fb8e3a2243..9def075e76f 100644
--- a/chromium/third_party/dawn/src/tint/ast/assignment_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/assignment_statement.h
@@ -21,33 +21,32 @@
namespace tint::ast {
/// An assignment statement
-class AssignmentStatement final
- : public Castable<AssignmentStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the assignment statement source
- /// @param lhs the left side of the expression
- /// @param rhs the right side of the expression
- AssignmentStatement(ProgramID program_id,
- const Source& source,
- const Expression* lhs,
- const Expression* rhs);
- /// Move constructor
- AssignmentStatement(AssignmentStatement&&);
- ~AssignmentStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const AssignmentStatement* Clone(CloneContext* ctx) const override;
-
- /// left side expression
- const Expression* const lhs;
-
- /// right side expression
- const Expression* const rhs;
+class AssignmentStatement final : public Castable<AssignmentStatement, Statement> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the assignment statement source
+ /// @param lhs the left side of the expression
+ /// @param rhs the right side of the expression
+ AssignmentStatement(ProgramID program_id,
+ const Source& source,
+ const Expression* lhs,
+ const Expression* rhs);
+ /// Move constructor
+ AssignmentStatement(AssignmentStatement&&);
+ ~AssignmentStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const AssignmentStatement* Clone(CloneContext* ctx) const override;
+
+ /// left side expression
+ const Expression* const lhs;
+
+ /// right side expression
+ const Expression* const rhs;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/assignment_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/assignment_statement_test.cc
index 39f8b83c730..477501bb375 100644
--- a/chromium/third_party/dawn/src/tint/ast/assignment_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/assignment_statement_test.cc
@@ -17,75 +17,76 @@
#include "gtest/gtest-spi.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using AssignmentStatementTest = TestHelper;
TEST_F(AssignmentStatementTest, Creation) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- auto* stmt = create<AssignmentStatement>(lhs, rhs);
- EXPECT_EQ(stmt->lhs, lhs);
- EXPECT_EQ(stmt->rhs, rhs);
+ auto* stmt = create<AssignmentStatement>(lhs, rhs);
+ EXPECT_EQ(stmt->lhs, lhs);
+ EXPECT_EQ(stmt->rhs, rhs);
}
TEST_F(AssignmentStatementTest, CreationWithSource) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
-
- auto* stmt =
- create<AssignmentStatement>(Source{Source::Location{20, 2}}, lhs, rhs);
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
+
+ auto* stmt = create<AssignmentStatement>(Source{Source::Location{20, 2}}, lhs, rhs);
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(AssignmentStatementTest, IsAssign) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- auto* stmt = create<AssignmentStatement>(lhs, rhs);
- EXPECT_TRUE(stmt->Is<AssignmentStatement>());
+ auto* stmt = create<AssignmentStatement>(lhs, rhs);
+ EXPECT_TRUE(stmt->Is<AssignmentStatement>());
}
TEST_F(AssignmentStatementTest, Assert_Null_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<AssignmentStatement>(nullptr, b.Expr(1));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<AssignmentStatement>(nullptr, b.Expr(1_i));
+ },
+ "internal compiler error");
}
TEST_F(AssignmentStatementTest, Assert_Null_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<AssignmentStatement>(b.Expr(1), nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<AssignmentStatement>(b.Expr(1_i), nullptr);
+ },
+ "internal compiler error");
}
TEST_F(AssignmentStatementTest, Assert_DifferentProgramID_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<AssignmentStatement>(b2.Expr("lhs"), b1.Expr("rhs"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<AssignmentStatement>(b2.Expr("lhs"), b1.Expr("rhs"));
+ },
+ "internal compiler error");
}
TEST_F(AssignmentStatementTest, Assert_DifferentProgramID_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<AssignmentStatement>(b1.Expr("lhs"), b2.Expr("rhs"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<AssignmentStatement>(b1.Expr("lhs"), b2.Expr("rhs"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/atomic.cc b/chromium/third_party/dawn/src/tint/ast/atomic.cc
index 714dda4b298..ce7019b8530 100644
--- a/chromium/third_party/dawn/src/tint/ast/atomic.cc
+++ b/chromium/third_party/dawn/src/tint/ast/atomic.cc
@@ -24,9 +24,9 @@ Atomic::Atomic(ProgramID pid, const Source& src, const Type* const subtype)
: Base(pid, src), type(subtype) {}
std::string Atomic::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "atomic<" << type->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "atomic<" << type->FriendlyName(symbols) << ">";
+ return out.str();
}
Atomic::Atomic(Atomic&&) = default;
@@ -34,10 +34,10 @@ Atomic::Atomic(Atomic&&) = default;
Atomic::~Atomic() = default;
const Atomic* Atomic::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<Atomic>(src, ty);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<Atomic>(src, ty);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/atomic.h b/chromium/third_party/dawn/src/tint/ast/atomic.h
index 734898f9415..5f634222f1e 100644
--- a/chromium/third_party/dawn/src/tint/ast/atomic.h
+++ b/chromium/third_party/dawn/src/tint/ast/atomic.h
@@ -23,28 +23,28 @@ namespace tint::ast {
/// An atomic type.
class Atomic final : public Castable<Atomic, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param subtype the pointee type
- Atomic(ProgramID pid, const Source& src, const Type* const subtype);
- /// Move constructor
- Atomic(Atomic&&);
- ~Atomic() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Atomic* Clone(CloneContext* ctx) const override;
-
- /// the pointee type
- const Type* const type;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param subtype the pointee type
+ Atomic(ProgramID pid, const Source& src, const Type* const subtype);
+ /// Move constructor
+ Atomic(Atomic&&);
+ ~Atomic() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Atomic* Clone(CloneContext* ctx) const override;
+
+ /// the pointee type
+ const Type* const type;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/atomic_test.cc b/chromium/third_party/dawn/src/tint/ast/atomic_test.cc
index cd192c16194..2b0ada948c5 100644
--- a/chromium/third_party/dawn/src/tint/ast/atomic_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/atomic_test.cc
@@ -23,15 +23,15 @@ namespace {
using AstAtomicTest = TestHelper;
TEST_F(AstAtomicTest, Creation) {
- auto* i32 = create<I32>();
- auto* p = create<Atomic>(i32);
- EXPECT_EQ(p->type, i32);
+ auto* i32 = create<I32>();
+ auto* p = create<Atomic>(i32);
+ EXPECT_EQ(p->type, i32);
}
TEST_F(AstAtomicTest, FriendlyName) {
- auto* i32 = create<I32>();
- auto* p = create<Atomic>(i32);
- EXPECT_EQ(p->FriendlyName(Symbols()), "atomic<i32>");
+ auto* i32 = create<I32>();
+ auto* p = create<Atomic>(i32);
+ EXPECT_EQ(p->FriendlyName(Symbols()), "atomic<i32>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/attribute.h b/chromium/third_party/dawn/src/tint/ast/attribute.h
index b5c0ffcf76b..cb9bf766d41 100644
--- a/chromium/third_party/dawn/src/tint/ast/attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/attribute.h
@@ -24,17 +24,17 @@ namespace tint::ast {
/// The base class for all attributes
class Attribute : public Castable<Attribute, Node> {
- public:
- ~Attribute() override;
+ public:
+ ~Attribute() override;
- /// @returns the WGSL name for the attribute
- virtual std::string Name() const = 0;
+ /// @returns the WGSL name for the attribute
+ virtual std::string Name() const = 0;
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Attribute(ProgramID pid, const Source& src) : Base(pid, src) {}
+ protected:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Attribute(ProgramID pid, const Source& src) : Base(pid, src) {}
};
/// A list of attributes
@@ -44,24 +44,24 @@ using AttributeList = std::vector<const Attribute*>;
/// @returns true if `attributes` includes a attribute of type `T`
template <typename T>
bool HasAttribute(const AttributeList& attributes) {
- for (auto* attr : attributes) {
- if (attr->Is<T>()) {
- return true;
+ for (auto* attr : attributes) {
+ if (attr->Is<T>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
/// @param attributes the list of attributes to search
/// @returns a pointer to `T` from `attributes` if found, otherwise nullptr.
template <typename T>
const T* GetAttribute(const AttributeList& attributes) {
- for (auto* attr : attributes) {
- if (attr->Is<T>()) {
- return attr->As<T>();
+ for (auto* attr : attributes) {
+ if (attr->Is<T>()) {
+ return attr->As<T>();
+ }
}
- }
- return nullptr;
+ return nullptr;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/binary_expression.cc b/chromium/third_party/dawn/src/tint/ast/binary_expression.cc
index 6d67851238e..e3ccd8b8824 100644
--- a/chromium/third_party/dawn/src/tint/ast/binary_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/binary_expression.cc
@@ -26,11 +26,11 @@ BinaryExpression::BinaryExpression(ProgramID pid,
const Expression* l,
const Expression* r)
: Base(pid, src), op(o), lhs(l), rhs(r) {
- TINT_ASSERT(AST, lhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
- TINT_ASSERT(AST, rhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
- TINT_ASSERT(AST, op != BinaryOp::kNone);
+ TINT_ASSERT(AST, lhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
+ TINT_ASSERT(AST, rhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
+ TINT_ASSERT(AST, op != BinaryOp::kNone);
}
BinaryExpression::BinaryExpression(BinaryExpression&&) = default;
@@ -38,11 +38,11 @@ BinaryExpression::BinaryExpression(BinaryExpression&&) = default;
BinaryExpression::~BinaryExpression() = default;
const BinaryExpression* BinaryExpression::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* l = ctx->Clone(lhs);
- auto* r = ctx->Clone(rhs);
- return ctx->dst->create<BinaryExpression>(src, op, l, r);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* l = ctx->Clone(lhs);
+ auto* r = ctx->Clone(rhs);
+ return ctx->dst->create<BinaryExpression>(src, op, l, r);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/binary_expression.h b/chromium/third_party/dawn/src/tint/ast/binary_expression.h
index ab3f6be56d0..ad59da401de 100644
--- a/chromium/third_party/dawn/src/tint/ast/binary_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/binary_expression.h
@@ -21,240 +21,240 @@ namespace tint::ast {
/// The operator type
enum class BinaryOp {
- kNone = 0,
- kAnd, // &
- kOr, // |
- kXor,
- kLogicalAnd, // &&
- kLogicalOr, // ||
- kEqual,
- kNotEqual,
- kLessThan,
- kGreaterThan,
- kLessThanEqual,
- kGreaterThanEqual,
- kShiftLeft,
- kShiftRight,
- kAdd,
- kSubtract,
- kMultiply,
- kDivide,
- kModulo,
+ kNone = 0,
+ kAnd, // &
+ kOr, // |
+ kXor,
+ kLogicalAnd, // &&
+ kLogicalOr, // ||
+ kEqual,
+ kNotEqual,
+ kLessThan,
+ kGreaterThan,
+ kLessThanEqual,
+ kGreaterThanEqual,
+ kShiftLeft,
+ kShiftRight,
+ kAdd,
+ kSubtract,
+ kMultiply,
+ kDivide,
+ kModulo,
};
/// An binary expression
class BinaryExpression final : public Castable<BinaryExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the binary expression source
- /// @param op the operation type
- /// @param lhs the left side of the expression
- /// @param rhs the right side of the expression
- BinaryExpression(ProgramID program_id,
- const Source& source,
- BinaryOp op,
- const Expression* lhs,
- const Expression* rhs);
- /// Move constructor
- BinaryExpression(BinaryExpression&&);
- ~BinaryExpression() override;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the binary expression source
+ /// @param op the operation type
+ /// @param lhs the left side of the expression
+ /// @param rhs the right side of the expression
+ BinaryExpression(ProgramID program_id,
+ const Source& source,
+ BinaryOp op,
+ const Expression* lhs,
+ const Expression* rhs);
+ /// Move constructor
+ BinaryExpression(BinaryExpression&&);
+ ~BinaryExpression() override;
- /// @returns true if the op is and
- bool IsAnd() const { return op == BinaryOp::kAnd; }
- /// @returns true if the op is or
- bool IsOr() const { return op == BinaryOp::kOr; }
- /// @returns true if the op is xor
- bool IsXor() const { return op == BinaryOp::kXor; }
- /// @returns true if the op is logical and
- bool IsLogicalAnd() const { return op == BinaryOp::kLogicalAnd; }
- /// @returns true if the op is logical or
- bool IsLogicalOr() const { return op == BinaryOp::kLogicalOr; }
- /// @returns true if the op is equal
- bool IsEqual() const { return op == BinaryOp::kEqual; }
- /// @returns true if the op is not equal
- bool IsNotEqual() const { return op == BinaryOp::kNotEqual; }
- /// @returns true if the op is less than
- bool IsLessThan() const { return op == BinaryOp::kLessThan; }
- /// @returns true if the op is greater than
- bool IsGreaterThan() const { return op == BinaryOp::kGreaterThan; }
- /// @returns true if the op is less than equal
- bool IsLessThanEqual() const { return op == BinaryOp::kLessThanEqual; }
- /// @returns true if the op is greater than equal
- bool IsGreaterThanEqual() const { return op == BinaryOp::kGreaterThanEqual; }
- /// @returns true if the op is shift left
- bool IsShiftLeft() const { return op == BinaryOp::kShiftLeft; }
- /// @returns true if the op is shift right
- bool IsShiftRight() const { return op == BinaryOp::kShiftRight; }
- /// @returns true if the op is add
- bool IsAdd() const { return op == BinaryOp::kAdd; }
- /// @returns true if the op is subtract
- bool IsSubtract() const { return op == BinaryOp::kSubtract; }
- /// @returns true if the op is multiply
- bool IsMultiply() const { return op == BinaryOp::kMultiply; }
- /// @returns true if the op is divide
- bool IsDivide() const { return op == BinaryOp::kDivide; }
- /// @returns true if the op is modulo
- bool IsModulo() const { return op == BinaryOp::kModulo; }
- /// @returns true if the op is an arithmetic operation
- bool IsArithmetic() const;
- /// @returns true if the op is a comparison operation
- bool IsComparison() const;
- /// @returns true if the op is a bitwise operation
- bool IsBitwise() const;
- /// @returns true if the op is a bit shift operation
- bool IsBitshift() const;
- /// @returns true if the op is a logical expression
- bool IsLogical() const;
+ /// @returns true if the op is and
+ bool IsAnd() const { return op == BinaryOp::kAnd; }
+ /// @returns true if the op is or
+ bool IsOr() const { return op == BinaryOp::kOr; }
+ /// @returns true if the op is xor
+ bool IsXor() const { return op == BinaryOp::kXor; }
+ /// @returns true if the op is logical and
+ bool IsLogicalAnd() const { return op == BinaryOp::kLogicalAnd; }
+ /// @returns true if the op is logical or
+ bool IsLogicalOr() const { return op == BinaryOp::kLogicalOr; }
+ /// @returns true if the op is equal
+ bool IsEqual() const { return op == BinaryOp::kEqual; }
+ /// @returns true if the op is not equal
+ bool IsNotEqual() const { return op == BinaryOp::kNotEqual; }
+ /// @returns true if the op is less than
+ bool IsLessThan() const { return op == BinaryOp::kLessThan; }
+ /// @returns true if the op is greater than
+ bool IsGreaterThan() const { return op == BinaryOp::kGreaterThan; }
+ /// @returns true if the op is less than equal
+ bool IsLessThanEqual() const { return op == BinaryOp::kLessThanEqual; }
+ /// @returns true if the op is greater than equal
+ bool IsGreaterThanEqual() const { return op == BinaryOp::kGreaterThanEqual; }
+ /// @returns true if the op is shift left
+ bool IsShiftLeft() const { return op == BinaryOp::kShiftLeft; }
+ /// @returns true if the op is shift right
+ bool IsShiftRight() const { return op == BinaryOp::kShiftRight; }
+ /// @returns true if the op is add
+ bool IsAdd() const { return op == BinaryOp::kAdd; }
+ /// @returns true if the op is subtract
+ bool IsSubtract() const { return op == BinaryOp::kSubtract; }
+ /// @returns true if the op is multiply
+ bool IsMultiply() const { return op == BinaryOp::kMultiply; }
+ /// @returns true if the op is divide
+ bool IsDivide() const { return op == BinaryOp::kDivide; }
+ /// @returns true if the op is modulo
+ bool IsModulo() const { return op == BinaryOp::kModulo; }
+ /// @returns true if the op is an arithmetic operation
+ bool IsArithmetic() const;
+ /// @returns true if the op is a comparison operation
+ bool IsComparison() const;
+ /// @returns true if the op is a bitwise operation
+ bool IsBitwise() const;
+ /// @returns true if the op is a bit shift operation
+ bool IsBitshift() const;
+ /// @returns true if the op is a logical expression
+ bool IsLogical() const;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BinaryExpression* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BinaryExpression* Clone(CloneContext* ctx) const override;
- /// the binary op type
- const BinaryOp op;
- /// the left side expression
- const Expression* const lhs;
- /// the right side expression
- const Expression* const rhs;
+ /// the binary op type
+ const BinaryOp op;
+ /// the left side expression
+ const Expression* const lhs;
+ /// the right side expression
+ const Expression* const rhs;
};
/// @param op the operator
/// @returns true if the op is an arithmetic operation
inline bool IsArithmetic(BinaryOp op) {
- switch (op) {
- case ast::BinaryOp::kAdd:
- case ast::BinaryOp::kSubtract:
- case ast::BinaryOp::kMultiply:
- case ast::BinaryOp::kDivide:
- case ast::BinaryOp::kModulo:
- return true;
- default:
- return false;
- }
+ switch (op) {
+ case ast::BinaryOp::kAdd:
+ case ast::BinaryOp::kSubtract:
+ case ast::BinaryOp::kMultiply:
+ case ast::BinaryOp::kDivide:
+ case ast::BinaryOp::kModulo:
+ return true;
+ default:
+ return false;
+ }
}
/// @param op the operator
/// @returns true if the op is a comparison operation
inline bool IsComparison(BinaryOp op) {
- switch (op) {
- case ast::BinaryOp::kEqual:
- case ast::BinaryOp::kNotEqual:
- case ast::BinaryOp::kLessThan:
- case ast::BinaryOp::kLessThanEqual:
- case ast::BinaryOp::kGreaterThan:
- case ast::BinaryOp::kGreaterThanEqual:
- return true;
- default:
- return false;
- }
+ switch (op) {
+ case ast::BinaryOp::kEqual:
+ case ast::BinaryOp::kNotEqual:
+ case ast::BinaryOp::kLessThan:
+ case ast::BinaryOp::kLessThanEqual:
+ case ast::BinaryOp::kGreaterThan:
+ case ast::BinaryOp::kGreaterThanEqual:
+ return true;
+ default:
+ return false;
+ }
}
/// @param op the operator
/// @returns true if the op is a bitwise operation
inline bool IsBitwise(BinaryOp op) {
- switch (op) {
- case ast::BinaryOp::kAnd:
- case ast::BinaryOp::kOr:
- case ast::BinaryOp::kXor:
- return true;
- default:
- return false;
- }
+ switch (op) {
+ case ast::BinaryOp::kAnd:
+ case ast::BinaryOp::kOr:
+ case ast::BinaryOp::kXor:
+ return true;
+ default:
+ return false;
+ }
}
/// @param op the operator
/// @returns true if the op is a bit shift operation
inline bool IsBitshift(BinaryOp op) {
- switch (op) {
- case ast::BinaryOp::kShiftLeft:
- case ast::BinaryOp::kShiftRight:
- return true;
- default:
- return false;
- }
+ switch (op) {
+ case ast::BinaryOp::kShiftLeft:
+ case ast::BinaryOp::kShiftRight:
+ return true;
+ default:
+ return false;
+ }
}
inline bool BinaryExpression::IsLogical() const {
- switch (op) {
- case ast::BinaryOp::kLogicalAnd:
- case ast::BinaryOp::kLogicalOr:
- return true;
- default:
- return false;
- }
+ switch (op) {
+ case ast::BinaryOp::kLogicalAnd:
+ case ast::BinaryOp::kLogicalOr:
+ return true;
+ default:
+ return false;
+ }
}
inline bool BinaryExpression::IsArithmetic() const {
- return ast::IsArithmetic(op);
+ return ast::IsArithmetic(op);
}
inline bool BinaryExpression::IsComparison() const {
- return ast::IsComparison(op);
+ return ast::IsComparison(op);
}
inline bool BinaryExpression::IsBitwise() const {
- return ast::IsBitwise(op);
+ return ast::IsBitwise(op);
}
inline bool BinaryExpression::IsBitshift() const {
- return ast::IsBitshift(op);
+ return ast::IsBitshift(op);
}
/// @returns the human readable name of the given BinaryOp
/// @param op the BinaryOp
constexpr const char* FriendlyName(BinaryOp op) {
- switch (op) {
- case BinaryOp::kNone:
- return "none";
- case BinaryOp::kAnd:
- return "and";
- case BinaryOp::kOr:
- return "or";
- case BinaryOp::kXor:
- return "xor";
- case BinaryOp::kLogicalAnd:
- return "logical_and";
- case BinaryOp::kLogicalOr:
- return "logical_or";
- case BinaryOp::kEqual:
- return "equal";
- case BinaryOp::kNotEqual:
- return "not_equal";
- case BinaryOp::kLessThan:
- return "less_than";
- case BinaryOp::kGreaterThan:
- return "greater_than";
- case BinaryOp::kLessThanEqual:
- return "less_than_equal";
- case BinaryOp::kGreaterThanEqual:
- return "greater_than_equal";
- case BinaryOp::kShiftLeft:
- return "shift_left";
- case BinaryOp::kShiftRight:
- return "shift_right";
- case BinaryOp::kAdd:
- return "add";
- case BinaryOp::kSubtract:
- return "subtract";
- case BinaryOp::kMultiply:
- return "multiply";
- case BinaryOp::kDivide:
- return "divide";
- case BinaryOp::kModulo:
- return "modulo";
- }
- return "INVALID";
+ switch (op) {
+ case BinaryOp::kNone:
+ return "none";
+ case BinaryOp::kAnd:
+ return "and";
+ case BinaryOp::kOr:
+ return "or";
+ case BinaryOp::kXor:
+ return "xor";
+ case BinaryOp::kLogicalAnd:
+ return "logical_and";
+ case BinaryOp::kLogicalOr:
+ return "logical_or";
+ case BinaryOp::kEqual:
+ return "equal";
+ case BinaryOp::kNotEqual:
+ return "not_equal";
+ case BinaryOp::kLessThan:
+ return "less_than";
+ case BinaryOp::kGreaterThan:
+ return "greater_than";
+ case BinaryOp::kLessThanEqual:
+ return "less_than_equal";
+ case BinaryOp::kGreaterThanEqual:
+ return "greater_than_equal";
+ case BinaryOp::kShiftLeft:
+ return "shift_left";
+ case BinaryOp::kShiftRight:
+ return "shift_right";
+ case BinaryOp::kAdd:
+ return "add";
+ case BinaryOp::kSubtract:
+ return "subtract";
+ case BinaryOp::kMultiply:
+ return "multiply";
+ case BinaryOp::kDivide:
+ return "divide";
+ case BinaryOp::kModulo:
+ return "modulo";
+ }
+ return "INVALID";
}
/// @param out the std::ostream to write to
/// @param op the BinaryOp
/// @return the std::ostream so calls can be chained
inline std::ostream& operator<<(std::ostream& out, BinaryOp op) {
- out << FriendlyName(op);
- return out;
+ out << FriendlyName(op);
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/binary_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/binary_expression_test.cc
index 5bec07b3b9a..18ded61d3e6 100644
--- a/chromium/third_party/dawn/src/tint/ast/binary_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/binary_expression_test.cc
@@ -21,72 +21,69 @@ namespace {
using BinaryExpressionTest = TestHelper;
TEST_F(BinaryExpressionTest, Creation) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- auto* r = create<BinaryExpression>(BinaryOp::kEqual, lhs, rhs);
- EXPECT_EQ(r->lhs, lhs);
- EXPECT_EQ(r->rhs, rhs);
- EXPECT_EQ(r->op, BinaryOp::kEqual);
+ auto* r = create<BinaryExpression>(BinaryOp::kEqual, lhs, rhs);
+ EXPECT_EQ(r->lhs, lhs);
+ EXPECT_EQ(r->rhs, rhs);
+ EXPECT_EQ(r->op, BinaryOp::kEqual);
}
TEST_F(BinaryExpressionTest, Creation_WithSource) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- auto* r = create<BinaryExpression>(Source{Source::Location{20, 2}},
- BinaryOp::kEqual, lhs, rhs);
- auto src = r->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* r = create<BinaryExpression>(Source{Source::Location{20, 2}}, BinaryOp::kEqual, lhs, rhs);
+ auto src = r->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(BinaryExpressionTest, IsBinary) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- auto* r = create<BinaryExpression>(BinaryOp::kEqual, lhs, rhs);
- EXPECT_TRUE(r->Is<BinaryExpression>());
+ auto* r = create<BinaryExpression>(BinaryOp::kEqual, lhs, rhs);
+ EXPECT_TRUE(r->Is<BinaryExpression>());
}
TEST_F(BinaryExpressionTest, Assert_Null_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<BinaryExpression>(BinaryOp::kEqual, nullptr, b.Expr("rhs"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<BinaryExpression>(BinaryOp::kEqual, nullptr, b.Expr("rhs"));
+ },
+ "internal compiler error");
}
TEST_F(BinaryExpressionTest, Assert_Null_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<BinaryExpression>(BinaryOp::kEqual, b.Expr("lhs"), nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<BinaryExpression>(BinaryOp::kEqual, b.Expr("lhs"), nullptr);
+ },
+ "internal compiler error");
}
TEST_F(BinaryExpressionTest, Assert_DifferentProgramID_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<BinaryExpression>(BinaryOp::kEqual, b2.Expr("lhs"),
- b1.Expr("rhs"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<BinaryExpression>(BinaryOp::kEqual, b2.Expr("lhs"), b1.Expr("rhs"));
+ },
+ "internal compiler error");
}
TEST_F(BinaryExpressionTest, Assert_DifferentProgramID_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<BinaryExpression>(BinaryOp::kEqual, b1.Expr("lhs"),
- b2.Expr("rhs"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<BinaryExpression>(BinaryOp::kEqual, b1.Expr("lhs"), b2.Expr("rhs"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/binding_attribute.cc b/chromium/third_party/dawn/src/tint/ast/binding_attribute.cc
index 2fdfe205375..b9282f2aeea 100644
--- a/chromium/third_party/dawn/src/tint/ast/binding_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/binding_attribute.cc
@@ -22,21 +22,19 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::BindingAttribute);
namespace tint::ast {
-BindingAttribute::BindingAttribute(ProgramID pid,
- const Source& src,
- uint32_t val)
+BindingAttribute::BindingAttribute(ProgramID pid, const Source& src, uint32_t val)
: Base(pid, src), value(val) {}
BindingAttribute::~BindingAttribute() = default;
std::string BindingAttribute::Name() const {
- return "binding";
+ return "binding";
}
const BindingAttribute* BindingAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<BindingAttribute>(src, value);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<BindingAttribute>(src, value);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/binding_attribute.h b/chromium/third_party/dawn/src/tint/ast/binding_attribute.h
index 6ae6a0ec15e..33c5f695421 100644
--- a/chromium/third_party/dawn/src/tint/ast/binding_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/binding_attribute.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// A binding attribute
class BindingAttribute final : public Castable<BindingAttribute, Attribute> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the binding value
- BindingAttribute(ProgramID pid, const Source& src, uint32_t value);
- ~BindingAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BindingAttribute* Clone(CloneContext* ctx) const override;
-
- /// the binding value
- const uint32_t value;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param value the binding value
+ BindingAttribute(ProgramID pid, const Source& src, uint32_t value);
+ ~BindingAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BindingAttribute* Clone(CloneContext* ctx) const override;
+
+ /// the binding value
+ const uint32_t value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/binding_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/binding_attribute_test.cc
index 85156518404..f51fc25c270 100644
--- a/chromium/third_party/dawn/src/tint/ast/binding_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/binding_attribute_test.cc
@@ -20,8 +20,8 @@ namespace {
using BindingAttributeTest = TestHelper;
TEST_F(BindingAttributeTest, Creation) {
- auto* d = create<BindingAttribute>(2);
- EXPECT_EQ(2u, d->value);
+ auto* d = create<BindingAttribute>(2);
+ EXPECT_EQ(2u, d->value);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/bitcast_expression.cc b/chromium/third_party/dawn/src/tint/ast/bitcast_expression.cc
index 9626c47590f..a81c5dd47e3 100644
--- a/chromium/third_party/dawn/src/tint/ast/bitcast_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bitcast_expression.cc
@@ -25,20 +25,20 @@ BitcastExpression::BitcastExpression(ProgramID pid,
const Type* t,
const Expression* e)
: Base(pid, src), type(t), expr(e) {
- TINT_ASSERT(AST, type);
- TINT_ASSERT(AST, expr);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
+ TINT_ASSERT(AST, type);
+ TINT_ASSERT(AST, expr);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
}
BitcastExpression::BitcastExpression(BitcastExpression&&) = default;
BitcastExpression::~BitcastExpression() = default;
const BitcastExpression* BitcastExpression::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* t = ctx->Clone(type);
- auto* e = ctx->Clone(expr);
- return ctx->dst->create<BitcastExpression>(src, t, e);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* t = ctx->Clone(type);
+ auto* e = ctx->Clone(expr);
+ return ctx->dst->create<BitcastExpression>(src, t, e);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bitcast_expression.h b/chromium/third_party/dawn/src/tint/ast/bitcast_expression.h
index 8eacf7156ad..a231cd286c5 100644
--- a/chromium/third_party/dawn/src/tint/ast/bitcast_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/bitcast_expression.h
@@ -26,30 +26,30 @@ namespace tint::ast {
/// A bitcast expression
class BitcastExpression final : public Castable<BitcastExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the bitcast expression source
- /// @param type the type
- /// @param expr the expr
- BitcastExpression(ProgramID program_id,
- const Source& source,
- const Type* type,
- const Expression* expr);
- /// Move constructor
- BitcastExpression(BitcastExpression&&);
- ~BitcastExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BitcastExpression* Clone(CloneContext* ctx) const override;
-
- /// the target cast type
- const Type* const type;
- /// the expression
- const Expression* const expr;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the bitcast expression source
+ /// @param type the type
+ /// @param expr the expr
+ BitcastExpression(ProgramID program_id,
+ const Source& source,
+ const Type* type,
+ const Expression* expr);
+ /// Move constructor
+ BitcastExpression(BitcastExpression&&);
+ ~BitcastExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BitcastExpression* Clone(CloneContext* ctx) const override;
+
+ /// the target cast type
+ const Type* const type;
+ /// the expression
+ const Expression* const expr;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bitcast_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/bitcast_expression_test.cc
index 170d765ddb6..7478456897f 100644
--- a/chromium/third_party/dawn/src/tint/ast/bitcast_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bitcast_expression_test.cc
@@ -23,56 +23,55 @@ namespace {
using BitcastExpressionTest = TestHelper;
TEST_F(BitcastExpressionTest, Create) {
- auto* expr = Expr("expr");
+ auto* expr = Expr("expr");
- auto* exp = create<BitcastExpression>(ty.f32(), expr);
- EXPECT_TRUE(exp->type->Is<ast::F32>());
- ASSERT_EQ(exp->expr, expr);
+ auto* exp = create<BitcastExpression>(ty.f32(), expr);
+ EXPECT_TRUE(exp->type->Is<ast::F32>());
+ ASSERT_EQ(exp->expr, expr);
}
TEST_F(BitcastExpressionTest, CreateWithSource) {
- auto* expr = Expr("expr");
+ auto* expr = Expr("expr");
- auto* exp = create<BitcastExpression>(Source{Source::Location{20, 2}},
- ty.f32(), expr);
- auto src = exp->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* exp = create<BitcastExpression>(Source{Source::Location{20, 2}}, ty.f32(), expr);
+ auto src = exp->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(BitcastExpressionTest, IsBitcast) {
- auto* expr = Expr("expr");
+ auto* expr = Expr("expr");
- auto* exp = create<BitcastExpression>(ty.f32(), expr);
- EXPECT_TRUE(exp->Is<BitcastExpression>());
+ auto* exp = create<BitcastExpression>(ty.f32(), expr);
+ EXPECT_TRUE(exp->Is<BitcastExpression>());
}
TEST_F(BitcastExpressionTest, Assert_Null_Type) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<BitcastExpression>(nullptr, b.Expr("idx"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<BitcastExpression>(nullptr, b.Expr("idx"));
+ },
+ "internal compiler error");
}
TEST_F(BitcastExpressionTest, Assert_Null_Expr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<BitcastExpression>(b.ty.f32(), nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<BitcastExpression>(b.ty.f32(), nullptr);
+ },
+ "internal compiler error");
}
TEST_F(BitcastExpressionTest, Assert_DifferentProgramID_Expr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<BitcastExpression>(b1.ty.f32(), b2.Expr("idx"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<BitcastExpression>(b1.ty.f32(), b2.Expr("idx"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/block_statement.cc b/chromium/third_party/dawn/src/tint/ast/block_statement.cc
index 4368e24a2da..7d4f49237ae 100644
--- a/chromium/third_party/dawn/src/tint/ast/block_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/block_statement.cc
@@ -20,14 +20,12 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::BlockStatement);
namespace tint::ast {
-BlockStatement::BlockStatement(ProgramID pid,
- const Source& src,
- const StatementList& stmts)
+BlockStatement::BlockStatement(ProgramID pid, const Source& src, const StatementList& stmts)
: Base(pid, src), statements(std::move(stmts)) {
- for (auto* stmt : statements) {
- TINT_ASSERT(AST, stmt);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, stmt, program_id);
- }
+ for (auto* stmt : statements) {
+ TINT_ASSERT(AST, stmt);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, stmt, program_id);
+ }
}
BlockStatement::BlockStatement(BlockStatement&&) = default;
@@ -35,10 +33,10 @@ BlockStatement::BlockStatement(BlockStatement&&) = default;
BlockStatement::~BlockStatement() = default;
const BlockStatement* BlockStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto stmts = ctx->Clone(statements);
- return ctx->dst->create<BlockStatement>(src, stmts);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto stmts = ctx->Clone(statements);
+ return ctx->dst->create<BlockStatement>(src, stmts);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/block_statement.h b/chromium/third_party/dawn/src/tint/ast/block_statement.h
index 2afce5e535f..48ea35a0fed 100644
--- a/chromium/third_party/dawn/src/tint/ast/block_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/block_statement.h
@@ -23,34 +23,30 @@ namespace tint::ast {
/// A block statement
class BlockStatement final : public Castable<BlockStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the block statement source
- /// @param statements the statements
- BlockStatement(ProgramID program_id,
- const Source& source,
- const StatementList& statements);
- /// Move constructor
- BlockStatement(BlockStatement&&);
- ~BlockStatement() override;
-
- /// @returns true if the block has no statements
- bool Empty() const { return statements.empty(); }
-
- /// @returns the last statement in the block or nullptr if block empty
- const Statement* Last() const {
- return statements.empty() ? nullptr : statements.back();
- }
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BlockStatement* Clone(CloneContext* ctx) const override;
-
- /// the statement list
- const StatementList statements;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the block statement source
+ /// @param statements the statements
+ BlockStatement(ProgramID program_id, const Source& source, const StatementList& statements);
+ /// Move constructor
+ BlockStatement(BlockStatement&&);
+ ~BlockStatement() override;
+
+ /// @returns true if the block has no statements
+ bool Empty() const { return statements.empty(); }
+
+ /// @returns the last statement in the block or nullptr if block empty
+ const Statement* Last() const { return statements.empty() ? nullptr : statements.back(); }
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BlockStatement* Clone(CloneContext* ctx) const override;
+
+ /// the statement list
+ const StatementList statements;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/block_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/block_statement_test.cc
index 17ae585afb9..4097b209acd 100644
--- a/chromium/third_party/dawn/src/tint/ast/block_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/block_statement_test.cc
@@ -23,46 +23,44 @@ namespace {
using BlockStatementTest = TestHelper;
TEST_F(BlockStatementTest, Creation) {
- auto* d = create<DiscardStatement>();
- auto* ptr = d;
+ auto* d = create<DiscardStatement>();
+ auto* ptr = d;
- auto* b = create<BlockStatement>(StatementList{d});
+ auto* b = create<BlockStatement>(StatementList{d});
- ASSERT_EQ(b->statements.size(), 1u);
- EXPECT_EQ(b->statements[0], ptr);
+ ASSERT_EQ(b->statements.size(), 1u);
+ EXPECT_EQ(b->statements[0], ptr);
}
TEST_F(BlockStatementTest, Creation_WithSource) {
- auto* b = create<BlockStatement>(Source{Source::Location{20, 2}},
- ast::StatementList{});
- auto src = b->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* b = create<BlockStatement>(Source{Source::Location{20, 2}}, ast::StatementList{});
+ auto src = b->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(BlockStatementTest, IsBlock) {
- auto* b = create<BlockStatement>(ast::StatementList{});
- EXPECT_TRUE(b->Is<BlockStatement>());
+ auto* b = create<BlockStatement>(ast::StatementList{});
+ EXPECT_TRUE(b->Is<BlockStatement>());
}
TEST_F(BlockStatementTest, Assert_Null_Statement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<BlockStatement>(ast::StatementList{nullptr});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<BlockStatement>(ast::StatementList{nullptr});
+ },
+ "internal compiler error");
}
TEST_F(BlockStatementTest, Assert_DifferentProgramID_Statement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<BlockStatement>(
- ast::StatementList{b2.create<DiscardStatement>()});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<BlockStatement>(ast::StatementList{b2.create<DiscardStatement>()});
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/bool.cc b/chromium/third_party/dawn/src/tint/ast/bool.cc
index 3f126e6d017..af951e75381 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bool.cc
@@ -27,12 +27,12 @@ Bool::Bool(Bool&&) = default;
Bool::~Bool() = default;
std::string Bool::FriendlyName(const SymbolTable&) const {
- return "bool";
+ return "bool";
}
const Bool* Bool::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<Bool>(src);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<Bool>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bool.h b/chromium/third_party/dawn/src/tint/ast/bool.h
index 50a4fcb7e8c..bfe3b781233 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool.h
+++ b/chromium/third_party/dawn/src/tint/ast/bool.h
@@ -29,24 +29,24 @@ namespace tint::ast {
/// A boolean type
class Bool final : public Castable<Bool, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Bool(ProgramID pid, const Source& src);
- /// Move constructor
- Bool(Bool&&);
- ~Bool() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Bool* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Bool(ProgramID pid, const Source& src);
+ /// Move constructor
+ Bool(Bool&&);
+ ~Bool() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Bool* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.cc
index c86036c9d8d..cfaacb95ae5 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.cc
@@ -20,18 +20,15 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::BoolLiteralExpression);
namespace tint::ast {
-BoolLiteralExpression::BoolLiteralExpression(ProgramID pid,
- const Source& src,
- bool val)
+BoolLiteralExpression::BoolLiteralExpression(ProgramID pid, const Source& src, bool val)
: Base(pid, src), value(val) {}
BoolLiteralExpression::~BoolLiteralExpression() = default;
-const BoolLiteralExpression* BoolLiteralExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<BoolLiteralExpression>(src, value);
+const BoolLiteralExpression* BoolLiteralExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<BoolLiteralExpression>(src, value);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.h b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.h
index 9fbd7758602..f2c4c3f84c5 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression.h
@@ -22,24 +22,23 @@
namespace tint::ast {
/// A boolean literal
-class BoolLiteralExpression final
- : public Castable<BoolLiteralExpression, LiteralExpression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the bool literals value
- BoolLiteralExpression(ProgramID pid, const Source& src, bool value);
- ~BoolLiteralExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BoolLiteralExpression* Clone(CloneContext* ctx) const override;
-
- /// The boolean literal value
- const bool value;
+class BoolLiteralExpression final : public Castable<BoolLiteralExpression, LiteralExpression> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param value the bool literals value
+ BoolLiteralExpression(ProgramID pid, const Source& src, bool value);
+ ~BoolLiteralExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BoolLiteralExpression* Clone(CloneContext* ctx) const override;
+
+ /// The boolean literal value
+ const bool value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression_test.cc
index ac7380281cf..efaa4eee39d 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool_literal_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bool_literal_expression_test.cc
@@ -20,15 +20,15 @@ namespace {
using BoolLiteralExpressionTest = TestHelper;
TEST_F(BoolLiteralExpressionTest, True) {
- auto* b = create<BoolLiteralExpression>(true);
- ASSERT_TRUE(b->Is<BoolLiteralExpression>());
- ASSERT_TRUE(b->value);
+ auto* b = create<BoolLiteralExpression>(true);
+ ASSERT_TRUE(b->Is<BoolLiteralExpression>());
+ ASSERT_TRUE(b->value);
}
TEST_F(BoolLiteralExpressionTest, False) {
- auto* b = create<BoolLiteralExpression>(false);
- ASSERT_TRUE(b->Is<BoolLiteralExpression>());
- ASSERT_FALSE(b->value);
+ auto* b = create<BoolLiteralExpression>(false);
+ ASSERT_TRUE(b->Is<BoolLiteralExpression>());
+ ASSERT_FALSE(b->value);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/bool_test.cc b/chromium/third_party/dawn/src/tint/ast/bool_test.cc
index 665b23f7a52..d3842b93ca4 100644
--- a/chromium/third_party/dawn/src/tint/ast/bool_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/bool_test.cc
@@ -22,8 +22,8 @@ namespace {
using AstBoolTest = TestHelper;
TEST_F(AstBoolTest, FriendlyName) {
- auto* b = create<Bool>();
- EXPECT_EQ(b->FriendlyName(Symbols()), "bool");
+ auto* b = create<Bool>();
+ EXPECT_EQ(b->FriendlyName(Symbols()), "bool");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/break_statement.cc b/chromium/third_party/dawn/src/tint/ast/break_statement.cc
index 57488335044..02900141ea6 100644
--- a/chromium/third_party/dawn/src/tint/ast/break_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/break_statement.cc
@@ -20,17 +20,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::BreakStatement);
namespace tint::ast {
-BreakStatement::BreakStatement(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+BreakStatement::BreakStatement(ProgramID pid, const Source& src) : Base(pid, src) {}
BreakStatement::BreakStatement(BreakStatement&&) = default;
BreakStatement::~BreakStatement() = default;
const BreakStatement* BreakStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<BreakStatement>(src);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<BreakStatement>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/break_statement.h b/chromium/third_party/dawn/src/tint/ast/break_statement.h
index bb5b00c99b2..29e5eeb012a 100644
--- a/chromium/third_party/dawn/src/tint/ast/break_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/break_statement.h
@@ -21,20 +21,20 @@ namespace tint::ast {
/// An break statement
class BreakStatement final : public Castable<BreakStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- BreakStatement(ProgramID pid, const Source& src);
- /// Move constructor
- BreakStatement(BreakStatement&&);
- ~BreakStatement() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ BreakStatement(ProgramID pid, const Source& src);
+ /// Move constructor
+ BreakStatement(BreakStatement&&);
+ ~BreakStatement() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BreakStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BreakStatement* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/break_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/break_statement_test.cc
index dba28cf6370..d2a10435377 100644
--- a/chromium/third_party/dawn/src/tint/ast/break_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/break_statement_test.cc
@@ -22,15 +22,15 @@ namespace {
using BreakStatementTest = TestHelper;
TEST_F(BreakStatementTest, Creation_WithSource) {
- auto* stmt = create<BreakStatement>(Source{Source::Location{20, 2}});
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt = create<BreakStatement>(Source{Source::Location{20, 2}});
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(BreakStatementTest, IsBreak) {
- auto* stmt = create<BreakStatement>();
- EXPECT_TRUE(stmt->Is<BreakStatement>());
+ auto* stmt = create<BreakStatement>();
+ EXPECT_TRUE(stmt->Is<BreakStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin.cc b/chromium/third_party/dawn/src/tint/ast/builtin.cc
index 364041177d5..d215a5c3eef 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin.cc
+++ b/chromium/third_party/dawn/src/tint/ast/builtin.cc
@@ -17,64 +17,64 @@
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, Builtin builtin) {
- switch (builtin) {
- case Builtin::kNone: {
- out << "none";
- break;
+ switch (builtin) {
+ case Builtin::kNone: {
+ out << "none";
+ break;
+ }
+ case Builtin::kPosition: {
+ out << "position";
+ break;
+ }
+ case Builtin::kVertexIndex: {
+ out << "vertex_index";
+ break;
+ }
+ case Builtin::kInstanceIndex: {
+ out << "instance_index";
+ break;
+ }
+ case Builtin::kFrontFacing: {
+ out << "front_facing";
+ break;
+ }
+ case Builtin::kFragDepth: {
+ out << "frag_depth";
+ break;
+ }
+ case Builtin::kLocalInvocationId: {
+ out << "local_invocation_id";
+ break;
+ }
+ case Builtin::kLocalInvocationIndex: {
+ out << "local_invocation_index";
+ break;
+ }
+ case Builtin::kGlobalInvocationId: {
+ out << "global_invocation_id";
+ break;
+ }
+ case Builtin::kWorkgroupId: {
+ out << "workgroup_id";
+ break;
+ }
+ case Builtin::kNumWorkgroups: {
+ out << "num_workgroups";
+ break;
+ }
+ case Builtin::kSampleIndex: {
+ out << "sample_index";
+ break;
+ }
+ case Builtin::kSampleMask: {
+ out << "sample_mask";
+ break;
+ }
+ case Builtin::kPointSize: {
+ out << "pointsize";
+ }
}
- case Builtin::kPosition: {
- out << "position";
- break;
- }
- case Builtin::kVertexIndex: {
- out << "vertex_index";
- break;
- }
- case Builtin::kInstanceIndex: {
- out << "instance_index";
- break;
- }
- case Builtin::kFrontFacing: {
- out << "front_facing";
- break;
- }
- case Builtin::kFragDepth: {
- out << "frag_depth";
- break;
- }
- case Builtin::kLocalInvocationId: {
- out << "local_invocation_id";
- break;
- }
- case Builtin::kLocalInvocationIndex: {
- out << "local_invocation_index";
- break;
- }
- case Builtin::kGlobalInvocationId: {
- out << "global_invocation_id";
- break;
- }
- case Builtin::kWorkgroupId: {
- out << "workgroup_id";
- break;
- }
- case Builtin::kNumWorkgroups: {
- out << "num_workgroups";
- break;
- }
- case Builtin::kSampleIndex: {
- out << "sample_index";
- break;
- }
- case Builtin::kSampleMask: {
- out << "sample_mask";
- break;
- }
- case Builtin::kPointSize: {
- out << "pointsize";
- }
- }
- return out;
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin.h b/chromium/third_party/dawn/src/tint/ast/builtin.h
index a0b8d5dc28e..699632a0e99 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin.h
+++ b/chromium/third_party/dawn/src/tint/ast/builtin.h
@@ -21,23 +21,23 @@ namespace tint::ast {
/// The builtin identifiers
enum class Builtin {
- kNone = -1,
- kPosition,
- kVertexIndex,
- kInstanceIndex,
- kFrontFacing,
- kFragDepth,
- kLocalInvocationId,
- kLocalInvocationIndex,
- kGlobalInvocationId,
- kWorkgroupId,
- kNumWorkgroups,
- kSampleIndex,
- kSampleMask,
-
- // Below are not currently WGSL builtins, but are included in this enum as
- // they are used by certain backends.
- kPointSize,
+ kNone = -1,
+ kPosition,
+ kVertexIndex,
+ kInstanceIndex,
+ kFrontFacing,
+ kFragDepth,
+ kLocalInvocationId,
+ kLocalInvocationIndex,
+ kGlobalInvocationId,
+ kWorkgroupId,
+ kNumWorkgroups,
+ kSampleIndex,
+ kSampleMask,
+
+ // Below are not currently WGSL builtins, but are included in this enum as
+ // they are used by certain backends.
+ kPointSize,
};
/// @param out the std::ostream to write to
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin_attribute.cc b/chromium/third_party/dawn/src/tint/ast/builtin_attribute.cc
index f3d013a59f7..03e47b60749 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/builtin_attribute.cc
@@ -28,13 +28,13 @@ BuiltinAttribute::BuiltinAttribute(ProgramID pid, const Source& src, Builtin b)
BuiltinAttribute::~BuiltinAttribute() = default;
std::string BuiltinAttribute::Name() const {
- return "builtin";
+ return "builtin";
}
const BuiltinAttribute* BuiltinAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<BuiltinAttribute>(src, builtin);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<BuiltinAttribute>(src, builtin);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin_attribute.h b/chromium/third_party/dawn/src/tint/ast/builtin_attribute.h
index cc562f44665..75898be5964 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/builtin_attribute.h
@@ -24,25 +24,25 @@ namespace tint::ast {
/// A builtin attribute
class BuiltinAttribute final : public Castable<BuiltinAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param builtin the builtin value
- BuiltinAttribute(ProgramID pid, const Source& src, Builtin builtin);
- ~BuiltinAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const BuiltinAttribute* Clone(CloneContext* ctx) const override;
-
- /// The builtin value
- const Builtin builtin;
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param builtin the builtin value
+ BuiltinAttribute(ProgramID pid, const Source& src, Builtin builtin);
+ ~BuiltinAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const BuiltinAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The builtin value
+ const Builtin builtin;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/builtin_attribute_test.cc
index 82b681dc9be..a57f5b1f08e 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/builtin_attribute_test.cc
@@ -20,8 +20,8 @@ namespace {
using BuiltinAttributeTest = TestHelper;
TEST_F(BuiltinAttributeTest, Creation) {
- auto* d = create<BuiltinAttribute>(Builtin::kFragDepth);
- EXPECT_EQ(Builtin::kFragDepth, d->builtin);
+ auto* d = create<BuiltinAttribute>(Builtin::kFragDepth);
+ EXPECT_EQ(Builtin::kFragDepth, d->builtin);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.cc b/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.cc
index 85567e08ca9..21cbd931d0c 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.cc
@@ -14,25 +14,22 @@
#include "src/tint/ast/builtin_texture_helper_test.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
-namespace tint::ast::builtin::test {
+using namespace tint::number_suffixes; // NOLINT
-using u32 = ProgramBuilder::u32;
-using i32 = ProgramBuilder::i32;
-using f32 = ProgramBuilder::f32;
+namespace tint::ast::builtin::test {
-TextureOverloadCase::TextureOverloadCase(
- ValidTextureOverload o,
- const char* desc,
- TextureKind tk,
- ast::SamplerKind sk,
- ast::TextureDimension dims,
- TextureDataType datatype,
- const char* f,
- std::function<ExpressionList(ProgramBuilder*)> a)
+TextureOverloadCase::TextureOverloadCase(ValidTextureOverload o,
+ const char* desc,
+ TextureKind tk,
+ ast::SamplerKind sk,
+ ast::TextureDimension dims,
+ TextureDataType datatype,
+ const char* f,
+ std::function<ExpressionList(ProgramBuilder*)> a)
: overload(o),
description(desc),
texture_kind(tk),
@@ -41,14 +38,13 @@ TextureOverloadCase::TextureOverloadCase(
texture_data_type(datatype),
function(f),
args(std::move(a)) {}
-TextureOverloadCase::TextureOverloadCase(
- ValidTextureOverload o,
- const char* desc,
- TextureKind tk,
- ast::TextureDimension dims,
- TextureDataType datatype,
- const char* f,
- std::function<ExpressionList(ProgramBuilder*)> a)
+TextureOverloadCase::TextureOverloadCase(ValidTextureOverload o,
+ const char* desc,
+ TextureKind tk,
+ ast::TextureDimension dims,
+ TextureDataType datatype,
+ const char* f,
+ std::function<ExpressionList(ProgramBuilder*)> a)
: overload(o),
description(desc),
texture_kind(tk),
@@ -56,15 +52,14 @@ TextureOverloadCase::TextureOverloadCase(
texture_data_type(datatype),
function(f),
args(std::move(a)) {}
-TextureOverloadCase::TextureOverloadCase(
- ValidTextureOverload o,
- const char* d,
- Access acc,
- ast::TexelFormat fmt,
- ast::TextureDimension dims,
- TextureDataType datatype,
- const char* f,
- std::function<ExpressionList(ProgramBuilder*)> a)
+TextureOverloadCase::TextureOverloadCase(ValidTextureOverload o,
+ const char* d,
+ Access acc,
+ ast::TexelFormat fmt,
+ ast::TextureDimension dims,
+ TextureDataType datatype,
+ const char* f,
+ std::function<ExpressionList(ProgramBuilder*)> a)
: overload(o),
description(d),
texture_kind(TextureKind::kStorage),
@@ -78,2203 +73,2195 @@ TextureOverloadCase::TextureOverloadCase(const TextureOverloadCase&) = default;
TextureOverloadCase::~TextureOverloadCase() = default;
std::ostream& operator<<(std::ostream& out, const TextureKind& kind) {
- switch (kind) {
- case TextureKind::kRegular:
- out << "regular";
- break;
- case TextureKind::kDepth:
- out << "depth";
- break;
- case TextureKind::kDepthMultisampled:
- out << "depth-multisampled";
- break;
- case TextureKind::kMultisampled:
- out << "multisampled";
- break;
- case TextureKind::kStorage:
- out << "storage";
- break;
- }
- return out;
+ switch (kind) {
+ case TextureKind::kRegular:
+ out << "regular";
+ break;
+ case TextureKind::kDepth:
+ out << "depth";
+ break;
+ case TextureKind::kDepthMultisampled:
+ out << "depth-multisampled";
+ break;
+ case TextureKind::kMultisampled:
+ out << "multisampled";
+ break;
+ case TextureKind::kStorage:
+ out << "storage";
+ break;
+ }
+ return out;
}
std::ostream& operator<<(std::ostream& out, const TextureDataType& ty) {
- switch (ty) {
- case TextureDataType::kF32:
- out << "f32";
- break;
- case TextureDataType::kU32:
- out << "u32";
- break;
- case TextureDataType::kI32:
- out << "i32";
- break;
- }
- return out;
+ switch (ty) {
+ case TextureDataType::kF32:
+ out << "f32";
+ break;
+ case TextureDataType::kU32:
+ out << "u32";
+ break;
+ case TextureDataType::kI32:
+ out << "i32";
+ break;
+ }
+ return out;
}
std::ostream& operator<<(std::ostream& out, const TextureOverloadCase& data) {
- out << "TextureOverloadCase " << static_cast<int>(data.overload) << "\n";
- out << data.description << "\n";
- out << "texture_kind: " << data.texture_kind << "\n";
- out << "sampler_kind: ";
- if (data.texture_kind != TextureKind::kStorage) {
- out << data.sampler_kind;
- } else {
- out << "<unused>";
- }
- out << "\n";
- out << "access: " << data.access << "\n";
- out << "texel_format: " << data.texel_format << "\n";
- out << "texture_dimension: " << data.texture_dimension << "\n";
- out << "texture_data_type: " << data.texture_data_type << "\n";
- return out;
+ out << "TextureOverloadCase " << static_cast<int>(data.overload) << "\n";
+ out << data.description << "\n";
+ out << "texture_kind: " << data.texture_kind << "\n";
+ out << "sampler_kind: ";
+ if (data.texture_kind != TextureKind::kStorage) {
+ out << data.sampler_kind;
+ } else {
+ out << "<unused>";
+ }
+ out << "\n";
+ out << "access: " << data.access << "\n";
+ out << "texel_format: " << data.texel_format << "\n";
+ out << "texture_dimension: " << data.texture_dimension << "\n";
+ out << "texture_data_type: " << data.texture_data_type << "\n";
+ return out;
}
-const ast::Type* TextureOverloadCase::BuildResultVectorComponentType(
- ProgramBuilder* b) const {
- switch (texture_data_type) {
- case ast::builtin::test::TextureDataType::kF32:
- return b->ty.f32();
- case ast::builtin::test::TextureDataType::kU32:
- return b->ty.u32();
- case ast::builtin::test::TextureDataType::kI32:
- return b->ty.i32();
- }
+const ast::Type* TextureOverloadCase::BuildResultVectorComponentType(ProgramBuilder* b) const {
+ switch (texture_data_type) {
+ case ast::builtin::test::TextureDataType::kF32:
+ return b->ty.f32();
+ case ast::builtin::test::TextureDataType::kU32:
+ return b->ty.u32();
+ case ast::builtin::test::TextureDataType::kI32:
+ return b->ty.i32();
+ }
- TINT_UNREACHABLE(AST, b->Diagnostics());
- return {};
+ TINT_UNREACHABLE(AST, b->Diagnostics());
+ return {};
}
-const ast::Variable* TextureOverloadCase::BuildTextureVariable(
- ProgramBuilder* b) const {
- AttributeList attrs = {
- b->create<ast::GroupAttribute>(0),
- b->create<ast::BindingAttribute>(0),
- };
- switch (texture_kind) {
- case ast::builtin::test::TextureKind::kRegular:
- return b->Global("texture",
- b->ty.sampled_texture(texture_dimension,
- BuildResultVectorComponentType(b)),
- attrs);
+const ast::Variable* TextureOverloadCase::BuildTextureVariable(ProgramBuilder* b) const {
+ AttributeList attrs = {
+ b->create<ast::GroupAttribute>(0),
+ b->create<ast::BindingAttribute>(0),
+ };
+ switch (texture_kind) {
+ case ast::builtin::test::TextureKind::kRegular:
+ return b->Global(
+ "texture",
+ b->ty.sampled_texture(texture_dimension, BuildResultVectorComponentType(b)), attrs);
- case ast::builtin::test::TextureKind::kDepth:
- return b->Global("texture", b->ty.depth_texture(texture_dimension),
- attrs);
+ case ast::builtin::test::TextureKind::kDepth:
+ return b->Global("texture", b->ty.depth_texture(texture_dimension), attrs);
- case ast::builtin::test::TextureKind::kDepthMultisampled:
- return b->Global("texture",
- b->ty.depth_multisampled_texture(texture_dimension),
- attrs);
+ case ast::builtin::test::TextureKind::kDepthMultisampled:
+ return b->Global("texture", b->ty.depth_multisampled_texture(texture_dimension), attrs);
- case ast::builtin::test::TextureKind::kMultisampled:
- return b->Global(
- "texture",
- b->ty.multisampled_texture(texture_dimension,
- BuildResultVectorComponentType(b)),
- attrs);
+ case ast::builtin::test::TextureKind::kMultisampled:
+ return b->Global(
+ "texture",
+ b->ty.multisampled_texture(texture_dimension, BuildResultVectorComponentType(b)),
+ attrs);
- case ast::builtin::test::TextureKind::kStorage: {
- auto* st = b->ty.storage_texture(texture_dimension, texel_format, access);
- return b->Global("texture", st, attrs);
+ case ast::builtin::test::TextureKind::kStorage: {
+ auto* st = b->ty.storage_texture(texture_dimension, texel_format, access);
+ return b->Global("texture", st, attrs);
+ }
}
- }
- TINT_UNREACHABLE(AST, b->Diagnostics());
- return nullptr;
+ TINT_UNREACHABLE(AST, b->Diagnostics());
+ return nullptr;
}
-const ast::Variable* TextureOverloadCase::BuildSamplerVariable(
- ProgramBuilder* b) const {
- AttributeList attrs = {
- b->create<ast::GroupAttribute>(0),
- b->create<ast::BindingAttribute>(1),
- };
- return b->Global("sampler", b->ty.sampler(sampler_kind), attrs);
+const ast::Variable* TextureOverloadCase::BuildSamplerVariable(ProgramBuilder* b) const {
+ AttributeList attrs = {
+ b->create<ast::GroupAttribute>(0),
+ b->create<ast::BindingAttribute>(1),
+ };
+ return b->Global("sampler", b->ty.sampler(sampler_kind), attrs);
}
std::vector<TextureOverloadCase> TextureOverloadCase::ValidCases() {
- return {
- {
- ValidTextureOverload::kDimensions1d,
- "textureDimensions(t : texture_1d<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k1d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensions2d,
- "textureDimensions(t : texture_2d<f32>) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensions2dLevel,
- "textureDimensions(t : texture_2d<f32>,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensions2dArray,
- "textureDimensions(t : texture_2d_array<f32>) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensions2dArrayLevel,
- "textureDimensions(t : texture_2d_array<f32>,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensions3d,
- "textureDimensions(t : texture_3d<f32>) -> vec3<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensions3dLevel,
- "textureDimensions(t : texture_3d<f32>,\n"
- " level : i32) -> vec3<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsCube,
- "textureDimensions(t : texture_cube<f32>) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsCubeLevel,
- "textureDimensions(t : texture_cube<f32>,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsCubeArray,
- "textureDimensions(t : texture_cube_array<f32>) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsCubeArrayLevel,
- "textureDimensions(t : texture_cube_array<f32>,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsMultisampled2d,
- "textureDimensions(t : texture_multisampled_2d<f32>)-> vec2<i32>",
- TextureKind::kMultisampled,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsDepth2d,
- "textureDimensions(t : texture_depth_2d) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsDepth2dLevel,
- "textureDimensions(t : texture_depth_2d,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsDepth2dArray,
- "textureDimensions(t : texture_depth_2d_array) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsDepth2dArrayLevel,
- "textureDimensions(t : texture_depth_2d_array,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsDepthCube,
- "textureDimensions(t : texture_depth_cube) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsDepthCubeLevel,
- "textureDimensions(t : texture_depth_cube,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsDepthCubeArray,
- "textureDimensions(t : texture_depth_cube_array) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsDepthCubeArrayLevel,
- "textureDimensions(t : texture_depth_cube_array,\n"
- " level : i32) -> vec2<i32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture", 1); },
- },
- {
- ValidTextureOverload::kDimensionsDepthMultisampled2d,
- "textureDimensions(t : texture_depth_multisampled_2d) -> vec2<i32>",
- TextureKind::kDepthMultisampled,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsStorageWO1d,
- "textureDimensions(t : texture_storage_1d<rgba32float>) -> i32",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k1d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsStorageWO2d,
- "textureDimensions(t : texture_storage_2d<rgba32float>) -> "
- "vec2<i32>",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsStorageWO2dArray,
- "textureDimensions(t : texture_storage_2d_array<rgba32float>) -> "
- "vec2<i32>",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kDimensionsStorageWO3d,
- "textureDimensions(t : texture_storage_3d<rgba32float>) -> "
- "vec3<i32>",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureDimensions",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
+ return {
+ {
+ ValidTextureOverload::kDimensions1d,
+ "textureDimensions(t : texture_1d<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k1d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensions2d,
+ "textureDimensions(t : texture_2d<f32>) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensions2dLevel,
+ "textureDimensions(t : texture_2d<f32>,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensions2dArray,
+ "textureDimensions(t : texture_2d_array<f32>) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensions2dArrayLevel,
+ "textureDimensions(t : texture_2d_array<f32>,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensions3d,
+ "textureDimensions(t : texture_3d<f32>) -> vec3<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensions3dLevel,
+ "textureDimensions(t : texture_3d<f32>,\n"
+ " level : i32) -> vec3<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsCube,
+ "textureDimensions(t : texture_cube<f32>) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsCubeLevel,
+ "textureDimensions(t : texture_cube<f32>,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsCubeArray,
+ "textureDimensions(t : texture_cube_array<f32>) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsCubeArrayLevel,
+ "textureDimensions(t : texture_cube_array<f32>,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsMultisampled2d,
+ "textureDimensions(t : texture_multisampled_2d<f32>)-> vec2<i32>",
+ TextureKind::kMultisampled,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepth2d,
+ "textureDimensions(t : texture_depth_2d) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepth2dLevel,
+ "textureDimensions(t : texture_depth_2d,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepth2dArray,
+ "textureDimensions(t : texture_depth_2d_array) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepth2dArrayLevel,
+ "textureDimensions(t : texture_depth_2d_array,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepthCube,
+ "textureDimensions(t : texture_depth_cube) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepthCubeLevel,
+ "textureDimensions(t : texture_depth_cube,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepthCubeArray,
+ "textureDimensions(t : texture_depth_cube_array) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepthCubeArrayLevel,
+ "textureDimensions(t : texture_depth_cube_array,\n"
+ " level : i32) -> vec2<i32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture", 1_i); },
+ },
+ {
+ ValidTextureOverload::kDimensionsDepthMultisampled2d,
+ "textureDimensions(t : texture_depth_multisampled_2d) -> vec2<i32>",
+ TextureKind::kDepthMultisampled,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsStorageWO1d,
+ "textureDimensions(t : texture_storage_1d<rgba32float>) -> i32",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k1d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsStorageWO2d,
+ "textureDimensions(t : texture_storage_2d<rgba32float>) -> "
+ "vec2<i32>",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsStorageWO2dArray,
+ "textureDimensions(t : texture_storage_2d_array<rgba32float>) -> "
+ "vec2<i32>",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kDimensionsStorageWO3d,
+ "textureDimensions(t : texture_storage_3d<rgba32float>) -> "
+ "vec3<i32>",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureDimensions",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
- {
- ValidTextureOverload::kGather2dF32,
- "textureGather(component : i32,\n"
- " t : texture_2d<T>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f)); // coords
- },
- },
- {
- ValidTextureOverload::kGather2dOffsetF32,
- "textureGather(component : i32,\n"
- " t : texture_2d<T>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " offset : vec2<i32>) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- b->vec2<i32>(3, 4)); // offset
- },
- },
- {
- ValidTextureOverload::kGather2dArrayF32,
- "textureGather(component : i32,\n"
- " t : texture_2d_array<T>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3); // array index
- },
- },
- {
- ValidTextureOverload::kGather2dArrayOffsetF32,
- "textureGather(component : i32,\n"
- " t : texture_2d_array<T>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " offset : vec2<i32>) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kGatherCubeF32,
- "textureGather(component : i32,\n"
- " t : texture_cube<T>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f)); // coords
- },
- },
- {
- ValidTextureOverload::kGatherCubeArrayF32,
- "textureGather(component : i32,\n"
- " t : texture_cube_array<T>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32) -> vec4<T>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList(0, // component
- "texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4); // array_index
- },
- },
- {
- ValidTextureOverload::kGatherDepth2dF32,
- "textureGather(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f)); // coords
- },
- },
- {
- ValidTextureOverload::kGatherDepth2dOffsetF32,
- "textureGather(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- b->vec2<i32>(3, 4)); // offset
- },
- },
- {
- ValidTextureOverload::kGatherDepth2dArrayF32,
- "textureGather(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3); // array_index
- },
- },
- {
- ValidTextureOverload::kGatherDepth2dArrayOffsetF32,
- "textureGather(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kGatherDepthCubeF32,
- "textureGather(t : texture_depth_cube,\n"
- " s : sampler,\n"
- " coords : vec3<f32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f)); // coords
- },
- },
- {
- ValidTextureOverload::kGatherDepthCubeArrayF32,
- "textureGather(t : texture_depth_cube_array,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureGather",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4); // array_index
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepth2dF32,
- "textureGatherCompare(t : texture_depth_2d,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " depth_ref : f32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepth2dOffsetF32,
- "textureGatherCompare(t : texture_depth_2d,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " depth_ref : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f, // depth_ref
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepth2dArrayF32,
- "textureGatherCompare(t : texture_depth_2d_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32,
- "textureGatherCompare(t : texture_depth_2d_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4.f, // depth_ref
- b->vec2<i32>(5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepthCubeF32,
- "textureGatherCompare(t : texture_depth_cube,\n"
- " s : sampler_comparison,\n"
- " coords : vec3<f32>,\n"
- " depth_ref : f32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kGatherCompareDepthCubeArrayF32,
- "textureGatherCompare(t : texture_depth_cube_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32) -> vec4<f32>",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureGatherCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4, // array_index
- 5.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kNumLayers2dArray,
- "textureNumLayers(t : texture_2d_array<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureNumLayers",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLayersCubeArray,
- "textureNumLayers(t : texture_cube_array<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureNumLayers",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLayersDepth2dArray,
- "textureNumLayers(t : texture_depth_2d_array) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureNumLayers",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLayersDepthCubeArray,
- "textureNumLayers(t : texture_depth_cube_array) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureNumLayers",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLayersStorageWO2dArray,
- "textureNumLayers(t : texture_storage_2d_array<rgba32float>) -> i32",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureNumLayers",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevels2d,
- "textureNumLevels(t : texture_2d<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevels2dArray,
- "textureNumLevels(t : texture_2d_array<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevels3d,
- "textureNumLevels(t : texture_3d<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsCube,
- "textureNumLevels(t : texture_cube<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsCubeArray,
- "textureNumLevels(t : texture_cube_array<f32>) -> i32",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsDepth2d,
- "textureNumLevels(t : texture_depth_2d) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsDepth2dArray,
- "textureNumLevels(t : texture_depth_2d_array) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsDepthCube,
- "textureNumLevels(t : texture_depth_cube) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumLevelsDepthCubeArray,
- "textureNumLevels(t : texture_depth_cube_array) -> i32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureNumLevels",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kNumSamplesMultisampled2d,
- "textureNumSamples(t : texture_multisampled_2d<f32>) -> i32",
- TextureKind::kMultisampled,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureNumSamples",
- [](ProgramBuilder* b) { return b->ExprList("texture"); },
- },
- {
- ValidTextureOverload::kSample1dF32,
- "textureSample(t : texture_1d<f32>,\n"
- " s : sampler,\n"
- " coords : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k1d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- 1.0f); // coords
- },
- },
- {
- ValidTextureOverload::kSample2dF32,
- "textureSample(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f)); // coords
- },
- },
- {
- ValidTextureOverload::kSample2dOffsetF32,
- "textureSample(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- b->vec2<i32>(3, 4)); // offset
- },
- },
- {
- ValidTextureOverload::kSample2dArrayF32,
- "textureSample(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3); // array_index
- },
- },
- {
- ValidTextureOverload::kSample2dArrayOffsetF32,
- "textureSample(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSample3dF32,
- "textureSample(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f)); // coords
- },
- },
- {
- ValidTextureOverload::kSample3dOffsetF32,
- "textureSample(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>\n"
- " offset : vec3<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- b->vec3<i32>(4, 5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleCubeF32,
- "textureSample(t : texture_cube<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f)); // coords
- },
- },
- {
- ValidTextureOverload::kSampleCubeArrayF32,
- "textureSample(t : texture_cube_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4); // array_index
- },
- },
- {
- ValidTextureOverload::kSampleDepth2dF32,
- "textureSample(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f)); // coords
- },
- },
- {
- ValidTextureOverload::kSampleDepth2dOffsetF32,
- "textureSample(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- b->vec2<i32>(3, 4)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleDepth2dArrayF32,
- "textureSample(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3); // array_index
- },
- },
- {
- ValidTextureOverload::kSampleDepth2dArrayOffsetF32,
- "textureSample(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleDepthCubeF32,
- "textureSample(t : texture_depth_cube,\n"
- " s : sampler,\n"
- " coords : vec3<f32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f)); // coords
- },
- },
- {
- ValidTextureOverload::kSampleDepthCubeArrayF32,
- "textureSample(t : texture_depth_cube_array,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSample",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4); // array_index
- },
- },
- {
- ValidTextureOverload::kSampleBias2dF32,
- "textureSampleBias(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " bias : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f); // bias
- },
- },
- {
- ValidTextureOverload::kSampleBias2dOffsetF32,
- "textureSampleBias(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " bias : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f, // bias
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleBias2dArrayF32,
- "textureSampleBias(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " bias : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 4, // array_index
- 3.f); // bias
- },
- },
- {
- ValidTextureOverload::kSampleBias2dArrayOffsetF32,
- "textureSampleBias(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " bias : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4.f, // bias
- b->vec2<i32>(5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleBias3dF32,
- "textureSampleBias(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " bias : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // bias
- },
- },
- {
- ValidTextureOverload::kSampleBias3dOffsetF32,
- "textureSampleBias(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " bias : f32,\n"
- " offset : vec3<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f, // bias
- b->vec3<i32>(5, 6, 7)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleBiasCubeF32,
- "textureSampleBias(t : texture_cube<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " bias : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // bias
- },
- },
- {
- ValidTextureOverload::kSampleBiasCubeArrayF32,
- "textureSampleBias(t : texture_cube_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " bias : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSampleBias",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 3, // array_index
- 4.f); // bias
- },
- },
- {
- ValidTextureOverload::kSampleLevel2dF32,
- "textureSampleLevel(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " level : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevel2dOffsetF32,
- "textureSampleLevel(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " level : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f, // level
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleLevel2dArrayF32,
- "textureSampleLevel(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " level : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4.f); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevel2dArrayOffsetF32,
- "textureSampleLevel(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " level : f32,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4.f, // level
- b->vec2<i32>(5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleLevel3dF32,
- "textureSampleLevel(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " level : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevel3dOffsetF32,
- "textureSampleLevel(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " level : f32,\n"
- " offset : vec3<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f, // level
- b->vec3<i32>(5, 6, 7)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleLevelCubeF32,
- "textureSampleLevel(t : texture_cube<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " level : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevelCubeArrayF32,
- "textureSampleLevel(t : texture_cube_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " level : f32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4, // array_index
- 5.f); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepth2dF32,
- "textureSampleLevel(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepth2dOffsetF32,
- "textureSampleLevel(t : texture_depth_2d,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " level : i32,\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // level
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepth2dArrayF32,
- "textureSampleLevel(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32,
- "textureSampleLevel(t : texture_depth_2d_array,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " level : i32,\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- 4, // level
- b->vec2<i32>(5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepthCubeF32,
- "textureSampleLevel(t : texture_depth_cube,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4); // level
- },
- },
- {
- ValidTextureOverload::kSampleLevelDepthCubeArrayF32,
- "textureSampleLevel(t : texture_depth_cube_array,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSampleLevel",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4, // array_index
- 5); // level
- },
- },
- {
- ValidTextureOverload::kSampleGrad2dF32,
- "textureSampleGrad(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>\n"
- " ddx : vec2<f32>,\n"
- " ddy : vec2<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.0f, 2.0f), // coords
- b->vec2<f32>(3.0f, 4.0f), // ddx
- b->vec2<f32>(5.0f, 6.0f)); // ddy
- },
- },
- {
- ValidTextureOverload::kSampleGrad2dOffsetF32,
- "textureSampleGrad(t : texture_2d<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " ddx : vec2<f32>,\n"
- " ddy : vec2<f32>,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- b->vec2<f32>(3.f, 4.f), // ddx
- b->vec2<f32>(5.f, 6.f), // ddy
- b->vec2<i32>(7, 7)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleGrad2dArrayF32,
- "textureSampleGrad(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " ddx : vec2<f32>,\n"
- " ddy : vec2<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<f32>(4.f, 5.f), // ddx
- b->vec2<f32>(6.f, 7.f)); // ddy
- },
- },
- {
- ValidTextureOverload::kSampleGrad2dArrayOffsetF32,
- "textureSampleGrad(t : texture_2d_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " ddx : vec2<f32>,\n"
- " ddy : vec2<f32>,\n"
- " offset : vec2<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3, // array_index
- b->vec2<f32>(4.f, 5.f), // ddx
- b->vec2<f32>(6.f, 7.f), // ddy
- b->vec2<i32>(6, 7)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleGrad3dF32,
- "textureSampleGrad(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " ddx : vec3<f32>,\n"
- " ddy : vec3<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- b->vec3<f32>(4.f, 5.f, 6.f), // ddx
- b->vec3<f32>(7.f, 8.f, 9.f)); // ddy
- },
- },
- {
- ValidTextureOverload::kSampleGrad3dOffsetF32,
- "textureSampleGrad(t : texture_3d<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " ddx : vec3<f32>,\n"
- " ddy : vec3<f32>,\n"
- " offset : vec3<i32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- b->vec3<f32>(4.f, 5.f, 6.f), // ddx
- b->vec3<f32>(7.f, 8.f, 9.f), // ddy
- b->vec3<i32>(0, 1, 2)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleGradCubeF32,
- "textureSampleGrad(t : texture_cube<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " ddx : vec3<f32>,\n"
- " ddy : vec3<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- b->vec3<f32>(4.f, 5.f, 6.f), // ddx
- b->vec3<f32>(7.f, 8.f, 9.f)); // ddy
- },
- },
- {
- ValidTextureOverload::kSampleGradCubeArrayF32,
- "textureSampleGrad(t : texture_cube_array<f32>,\n"
- " s : sampler,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " ddx : vec3<f32>,\n"
- " ddy : vec3<f32>) -> vec4<f32>",
- TextureKind::kRegular,
- ast::SamplerKind::kSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSampleGrad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4, // array_index
- b->vec3<f32>(5.f, 6.f, 7.f), // ddx
- b->vec3<f32>(8.f, 9.f, 10.f)); // ddy
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepth2dF32,
- "textureSampleCompare(t : texture_depth_2d,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " depth_ref : f32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepth2dOffsetF32,
- "textureSampleCompare(t : texture_depth_2d,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " depth_ref : f32,\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 3.f, // depth_ref
- b->vec2<i32>(4, 5)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepth2dArrayF32,
- "textureSampleCompare(t : texture_depth_2d_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 4, // array_index
- 3.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32,
- "textureSampleCompare(t : texture_depth_2d_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec2<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32,\n"
- " offset : vec2<i32>) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec2<f32>(1.f, 2.f), // coords
- 4, // array_index
- 3.f, // depth_ref
- b->vec2<i32>(5, 6)); // offset
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepthCubeF32,
- "textureSampleCompare(t : texture_depth_cube,\n"
- " s : sampler_comparison,\n"
- " coords : vec3<f32>,\n"
- " depth_ref : f32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::kCube,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kSampleCompareDepthCubeArrayF32,
- "textureSampleCompare(t : texture_depth_cube_array,\n"
- " s : sampler_comparison,\n"
- " coords : vec3<f32>,\n"
- " array_index : i32,\n"
- " depth_ref : f32) -> f32",
- TextureKind::kDepth,
- ast::SamplerKind::kComparisonSampler,
- ast::TextureDimension::kCubeArray,
- TextureDataType::kF32,
- "textureSampleCompare",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- "sampler", // s
- b->vec3<f32>(1.f, 2.f, 3.f), // coords
- 4, // array_index
- 5.f); // depth_ref
- },
- },
- {
- ValidTextureOverload::kLoad1dLevelF32,
- "textureLoad(t : texture_1d<f32>,\n"
- " coords : i32,\n"
- " level : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::TextureDimension::k1d,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- 1, // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad1dLevelU32,
- "textureLoad(t : texture_1d<u32>,\n"
- " coords : i32,\n"
- " level : i32) -> vec4<u32>",
- TextureKind::kRegular,
- ast::TextureDimension::k1d,
- TextureDataType::kU32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- 1, // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad1dLevelI32,
- "textureLoad(t : texture_1d<i32>,\n"
- " coords : i32,\n"
- " level : i32) -> vec4<i32>",
- TextureKind::kRegular,
- ast::TextureDimension::k1d,
- TextureDataType::kI32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- 1, // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dLevelF32,
- "textureLoad(t : texture_2d<f32>,\n"
- " coords : vec2<i32>,\n"
- " level : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dLevelU32,
- "textureLoad(t : texture_2d<u32>,\n"
- " coords : vec2<i32>,\n"
- " level : i32) -> vec4<u32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2d,
- TextureDataType::kU32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dLevelI32,
- "textureLoad(t : texture_2d<i32>,\n"
- " coords : vec2<i32>,\n"
- " level : i32) -> vec4<i32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2d,
- TextureDataType::kI32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dArrayLevelF32,
- "textureLoad(t : texture_2d_array<f32>,\n"
- " coords : vec2<i32>,\n"
- " array_index : i32,\n"
- " level : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3, // array_index
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dArrayLevelU32,
- "textureLoad(t : texture_2d_array<u32>,\n"
- " coords : vec2<i32>,\n"
- " array_index : i32,\n"
- " level : i32) -> vec4<u32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2dArray,
- TextureDataType::kU32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3, // array_index
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoad2dArrayLevelI32,
- "textureLoad(t : texture_2d_array<i32>,\n"
- " coords : vec2<i32>,\n"
- " array_index : i32,\n"
- " level : i32) -> vec4<i32>",
- TextureKind::kRegular,
- ast::TextureDimension::k2dArray,
- TextureDataType::kI32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3, // array_index
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoad3dLevelF32,
- "textureLoad(t : texture_3d<f32>,\n"
- " coords : vec3<i32>,\n"
- " level : i32) -> vec4<f32>",
- TextureKind::kRegular,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec3<i32>(1, 2, 3), // coords
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoad3dLevelU32,
- "textureLoad(t : texture_3d<u32>,\n"
- " coords : vec3<i32>,\n"
- " level : i32) -> vec4<u32>",
- TextureKind::kRegular,
- ast::TextureDimension::k3d,
- TextureDataType::kU32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec3<i32>(1, 2, 3), // coords
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoad3dLevelI32,
- "textureLoad(t : texture_3d<i32>,\n"
- " coords : vec3<i32>,\n"
- " level : i32) -> vec4<i32>",
- TextureKind::kRegular,
- ast::TextureDimension::k3d,
- TextureDataType::kI32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec3<i32>(1, 2, 3), // coords
- 4); // level
- },
- },
- {
- ValidTextureOverload::kLoadMultisampled2dF32,
- "textureLoad(t : texture_multisampled_2d<f32>,\n"
- " coords : vec2<i32>,\n"
- " sample_index : i32) -> vec4<f32>",
- TextureKind::kMultisampled,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // sample_index
- },
- },
- {
- ValidTextureOverload::kLoadMultisampled2dU32,
- "textureLoad(t : texture_multisampled_2d<u32>,\n"
- " coords : vec2<i32>,\n"
- " sample_index : i32) -> vec4<u32>",
- TextureKind::kMultisampled,
- ast::TextureDimension::k2d,
- TextureDataType::kU32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // sample_index
- },
- },
- {
- ValidTextureOverload::kLoadMultisampled2dI32,
- "textureLoad(t : texture_multisampled_2d<i32>,\n"
- " coords : vec2<i32>,\n"
- " sample_index : i32) -> vec4<i32>",
- TextureKind::kMultisampled,
- ast::TextureDimension::k2d,
- TextureDataType::kI32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // sample_index
- },
- },
- {
- ValidTextureOverload::kLoadDepth2dLevelF32,
- "textureLoad(t : texture_depth_2d,\n"
- " coords : vec2<i32>,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3); // level
- },
- },
- {
- ValidTextureOverload::kLoadDepth2dArrayLevelF32,
- "textureLoad(t : texture_depth_2d_array,\n"
- " coords : vec2<i32>,\n"
- " array_index : i32,\n"
- " level : i32) -> f32",
- TextureKind::kDepth,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureLoad",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3, // array_index
- 4); // level
- },
- },
- {
- ValidTextureOverload::kStoreWO1dRgba32float,
- "textureStore(t : texture_storage_1d<rgba32float>,\n"
- " coords : i32,\n"
- " value : vec4<T>)",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k1d,
- TextureDataType::kF32,
- "textureStore",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- 1, // coords
- b->vec4<f32>(2.f, 3.f, 4.f, 5.f)); // value
- },
- },
- {
- ValidTextureOverload::kStoreWO2dRgba32float,
- "textureStore(t : texture_storage_2d<rgba32float>,\n"
- " coords : vec2<i32>,\n"
- " value : vec4<T>)",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k2d,
- TextureDataType::kF32,
- "textureStore",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- b->vec4<f32>(3.f, 4.f, 5.f, 6.f)); // value
- },
- },
- {
- ValidTextureOverload::kStoreWO2dArrayRgba32float,
- "textureStore(t : texture_storage_2d_array<rgba32float>,\n"
- " coords : vec2<i32>,\n"
- " array_index : i32,\n"
- " value : vec4<T>)",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k2dArray,
- TextureDataType::kF32,
- "textureStore",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec2<i32>(1, 2), // coords
- 3, // array_index
- b->vec4<f32>(4.f, 5.f, 6.f, 7.f)); // value
- },
- },
- {
- ValidTextureOverload::kStoreWO3dRgba32float,
- "textureStore(t : texture_storage_3d<rgba32float>,\n"
- " coords : vec3<i32>,\n"
- " value : vec4<T>)",
- ast::Access::kWrite,
- ast::TexelFormat::kRgba32Float,
- ast::TextureDimension::k3d,
- TextureDataType::kF32,
- "textureStore",
- [](ProgramBuilder* b) {
- return b->ExprList("texture", // t
- b->vec3<i32>(1, 2, 3), // coords
- b->vec4<f32>(4.f, 5.f, 6.f, 7.f)); // value
- },
- },
- };
+ {
+ ValidTextureOverload::kGather2dF32,
+ "textureGather(component : i32,\n"
+ " t : texture_2d<T>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kGather2dOffsetF32,
+ "textureGather(component : i32,\n"
+ " t : texture_2d<T>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " offset : vec2<i32>) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<i32>(3_i, 4_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGather2dArrayF32,
+ "textureGather(component : i32,\n"
+ " t : texture_2d_array<T>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i); // array index
+ },
+ },
+ {
+ ValidTextureOverload::kGather2dArrayOffsetF32,
+ "textureGather(component : i32,\n"
+ " t : texture_2d_array<T>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " offset : vec2<i32>) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCubeF32,
+ "textureGather(component : i32,\n"
+ " t : texture_cube<T>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCubeArrayF32,
+ "textureGather(component : i32,\n"
+ " t : texture_cube_array<T>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32) -> vec4<T>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList(0_i, // component
+ "texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepth2dF32,
+ "textureGather(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepth2dOffsetF32,
+ "textureGather(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<i32>(3_i, 4_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepth2dArrayF32,
+ "textureGather(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepth2dArrayOffsetF32,
+ "textureGather(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepthCubeF32,
+ "textureGather(t : texture_depth_cube,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kGatherDepthCubeArrayF32,
+ "textureGather(t : texture_depth_cube_array,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureGather",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepth2dF32,
+ "textureGatherCompare(t : texture_depth_2d,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " depth_ref : f32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepth2dOffsetF32,
+ "textureGatherCompare(t : texture_depth_2d,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " depth_ref : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f, // depth_ref
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepth2dArrayF32,
+ "textureGatherCompare(t : texture_depth_2d_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32,
+ "textureGatherCompare(t : texture_depth_2d_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_f, // depth_ref
+ b->vec2<i32>(5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepthCubeF32,
+ "textureGatherCompare(t : texture_depth_cube,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec3<f32>,\n"
+ " depth_ref : f32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kGatherCompareDepthCubeArrayF32,
+ "textureGatherCompare(t : texture_depth_cube_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32) -> vec4<f32>",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureGatherCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i, // array_index
+ 5_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kNumLayers2dArray,
+ "textureNumLayers(t : texture_2d_array<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureNumLayers",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLayersCubeArray,
+ "textureNumLayers(t : texture_cube_array<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureNumLayers",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLayersDepth2dArray,
+ "textureNumLayers(t : texture_depth_2d_array) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureNumLayers",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLayersDepthCubeArray,
+ "textureNumLayers(t : texture_depth_cube_array) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureNumLayers",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLayersStorageWO2dArray,
+ "textureNumLayers(t : texture_storage_2d_array<rgba32float>) -> i32",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureNumLayers",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevels2d,
+ "textureNumLevels(t : texture_2d<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevels2dArray,
+ "textureNumLevels(t : texture_2d_array<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevels3d,
+ "textureNumLevels(t : texture_3d<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsCube,
+ "textureNumLevels(t : texture_cube<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsCubeArray,
+ "textureNumLevels(t : texture_cube_array<f32>) -> i32",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsDepth2d,
+ "textureNumLevels(t : texture_depth_2d) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsDepth2dArray,
+ "textureNumLevels(t : texture_depth_2d_array) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsDepthCube,
+ "textureNumLevels(t : texture_depth_cube) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumLevelsDepthCubeArray,
+ "textureNumLevels(t : texture_depth_cube_array) -> i32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureNumLevels",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kNumSamplesMultisampled2d,
+ "textureNumSamples(t : texture_multisampled_2d<f32>) -> i32",
+ TextureKind::kMultisampled,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureNumSamples",
+ [](ProgramBuilder* b) { return b->ExprList("texture"); },
+ },
+ {
+ ValidTextureOverload::kSample1dF32,
+ "textureSample(t : texture_1d<f32>,\n"
+ " s : sampler,\n"
+ " coords : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k1d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ 1_f); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSample2dF32,
+ "textureSample(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSample2dOffsetF32,
+ "textureSample(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<i32>(3_i, 4_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSample2dArrayF32,
+ "textureSample(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kSample2dArrayOffsetF32,
+ "textureSample(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSample3dF32,
+ "textureSample(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSample3dOffsetF32,
+ "textureSample(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>\n"
+ " offset : vec3<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ b->vec3<i32>(4_i, 5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCubeF32,
+ "textureSample(t : texture_cube<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCubeArrayF32,
+ "textureSample(t : texture_cube_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepth2dF32,
+ "textureSample(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepth2dOffsetF32,
+ "textureSample(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<i32>(3_i, 4_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepth2dArrayF32,
+ "textureSample(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepth2dArrayOffsetF32,
+ "textureSample(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepthCubeF32,
+ "textureSample(t : texture_depth_cube,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f)); // coords
+ },
+ },
+ {
+ ValidTextureOverload::kSampleDepthCubeArrayF32,
+ "textureSample(t : texture_depth_cube_array,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSample",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i); // array_index
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias2dF32,
+ "textureSampleBias(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " bias : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f); // bias
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias2dOffsetF32,
+ "textureSampleBias(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " bias : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f, // bias
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias2dArrayF32,
+ "textureSampleBias(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " bias : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 4_i, // array_index
+ 3_f); // bias
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias2dArrayOffsetF32,
+ "textureSampleBias(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " bias : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_f, // bias
+ b->vec2<i32>(5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias3dF32,
+ "textureSampleBias(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " bias : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // bias
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBias3dOffsetF32,
+ "textureSampleBias(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " bias : f32,\n"
+ " offset : vec3<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f, // bias
+ b->vec3<i32>(5_i, 6_i, 7_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBiasCubeF32,
+ "textureSampleBias(t : texture_cube<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " bias : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // bias
+ },
+ },
+ {
+ ValidTextureOverload::kSampleBiasCubeArrayF32,
+ "textureSampleBias(t : texture_cube_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " bias : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSampleBias",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 3_i, // array_index
+ 4_f); // bias
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel2dF32,
+ "textureSampleLevel(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " level : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel2dOffsetF32,
+ "textureSampleLevel(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " level : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f, // level
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel2dArrayF32,
+ "textureSampleLevel(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " level : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_f); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel2dArrayOffsetF32,
+ "textureSampleLevel(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " level : f32,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_f, // level
+ b->vec2<i32>(5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel3dF32,
+ "textureSampleLevel(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " level : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevel3dOffsetF32,
+ "textureSampleLevel(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " level : f32,\n"
+ " offset : vec3<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f, // level
+ b->vec3<i32>(5_i, 6_i, 7_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelCubeF32,
+ "textureSampleLevel(t : texture_cube<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " level : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelCubeArrayF32,
+ "textureSampleLevel(t : texture_cube_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " level : f32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i, // array_index
+ 5_f); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepth2dF32,
+ "textureSampleLevel(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepth2dOffsetF32,
+ "textureSampleLevel(t : texture_depth_2d,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " level : i32,\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // level
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepth2dArrayF32,
+ "textureSampleLevel(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32,
+ "textureSampleLevel(t : texture_depth_2d_array,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " level : i32,\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ 4_i, // level
+ b->vec2<i32>(5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepthCubeF32,
+ "textureSampleLevel(t : texture_depth_cube,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleLevelDepthCubeArrayF32,
+ "textureSampleLevel(t : texture_depth_cube_array,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSampleLevel",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i, // array_index
+ 5_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad2dF32,
+ "textureSampleGrad(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>\n"
+ " ddx : vec2<f32>,\n"
+ " ddy : vec2<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<f32>(3_f, 4_f), // ddx
+ b->vec2<f32>(5_f, 6_f)); // ddy
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad2dOffsetF32,
+ "textureSampleGrad(t : texture_2d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " ddx : vec2<f32>,\n"
+ " ddy : vec2<f32>,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ b->vec2<f32>(3_f, 4_f), // ddx
+ b->vec2<f32>(5_f, 6_f), // ddy
+ b->vec2<i32>(7_i, 7_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad2dArrayF32,
+ "textureSampleGrad(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " ddx : vec2<f32>,\n"
+ " ddy : vec2<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<f32>(4_f, 5_f), // ddx
+ b->vec2<f32>(6_f, 7_f)); // ddy
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad2dArrayOffsetF32,
+ "textureSampleGrad(t : texture_2d_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " ddx : vec2<f32>,\n"
+ " ddy : vec2<f32>,\n"
+ " offset : vec2<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_i, // array_index
+ b->vec2<f32>(4_f, 5_f), // ddx
+ b->vec2<f32>(6_f, 7_f), // ddy
+ b->vec2<i32>(6_i, 7_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad3dF32,
+ "textureSampleGrad(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " ddx : vec3<f32>,\n"
+ " ddy : vec3<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ b->vec3<f32>(4_f, 5_f, 6_f), // ddx
+ b->vec3<f32>(7_f, 8_f, 9_f)); // ddy
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGrad3dOffsetF32,
+ "textureSampleGrad(t : texture_3d<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " ddx : vec3<f32>,\n"
+ " ddy : vec3<f32>,\n"
+ " offset : vec3<i32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ b->vec3<f32>(4_f, 5_f, 6_f), // ddx
+ b->vec3<f32>(7_f, 8_f, 9_f), // ddy
+ b->vec3<i32>(0_i, 1_i, 2_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGradCubeF32,
+ "textureSampleGrad(t : texture_cube<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " ddx : vec3<f32>,\n"
+ " ddy : vec3<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ b->vec3<f32>(4_f, 5_f, 6_f), // ddx
+ b->vec3<f32>(7_f, 8_f, 9_f)); // ddy
+ },
+ },
+ {
+ ValidTextureOverload::kSampleGradCubeArrayF32,
+ "textureSampleGrad(t : texture_cube_array<f32>,\n"
+ " s : sampler,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " ddx : vec3<f32>,\n"
+ " ddy : vec3<f32>) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::SamplerKind::kSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSampleGrad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i, // array_index
+ b->vec3<f32>(5_f, 6_f, 7_f), // ddx
+ b->vec3<f32>(8_f, 9_f, 10_f)); // ddy
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepth2dF32,
+ "textureSampleCompare(t : texture_depth_2d,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " depth_ref : f32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepth2dOffsetF32,
+ "textureSampleCompare(t : texture_depth_2d,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " depth_ref : f32,\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 3_f, // depth_ref
+ b->vec2<i32>(4_i, 5_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepth2dArrayF32,
+ "textureSampleCompare(t : texture_depth_2d_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 4_i, // array_index
+ 3_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32,
+ "textureSampleCompare(t : texture_depth_2d_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec2<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32,\n"
+ " offset : vec2<i32>) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec2<f32>(1_f, 2_f), // coords
+ 4_i, // array_index
+ 3_f, // depth_ref
+ b->vec2<i32>(5_i, 6_i)); // offset
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepthCubeF32,
+ "textureSampleCompare(t : texture_depth_cube,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec3<f32>,\n"
+ " depth_ref : f32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::kCube,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kSampleCompareDepthCubeArrayF32,
+ "textureSampleCompare(t : texture_depth_cube_array,\n"
+ " s : sampler_comparison,\n"
+ " coords : vec3<f32>,\n"
+ " array_index : i32,\n"
+ " depth_ref : f32) -> f32",
+ TextureKind::kDepth,
+ ast::SamplerKind::kComparisonSampler,
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::kF32,
+ "textureSampleCompare",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ "sampler", // s
+ b->vec3<f32>(1_f, 2_f, 3_f), // coords
+ 4_i, // array_index
+ 5_f); // depth_ref
+ },
+ },
+ {
+ ValidTextureOverload::kLoad1dLevelF32,
+ "textureLoad(t : texture_1d<f32>,\n"
+ " coords : i32,\n"
+ " level : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k1d,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ 1_i, // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad1dLevelU32,
+ "textureLoad(t : texture_1d<u32>,\n"
+ " coords : i32,\n"
+ " level : i32) -> vec4<u32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k1d,
+ TextureDataType::kU32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ 1_i, // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad1dLevelI32,
+ "textureLoad(t : texture_1d<i32>,\n"
+ " coords : i32,\n"
+ " level : i32) -> vec4<i32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k1d,
+ TextureDataType::kI32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ 1_i, // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dLevelF32,
+ "textureLoad(t : texture_2d<f32>,\n"
+ " coords : vec2<i32>,\n"
+ " level : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dLevelU32,
+ "textureLoad(t : texture_2d<u32>,\n"
+ " coords : vec2<i32>,\n"
+ " level : i32) -> vec4<u32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2d,
+ TextureDataType::kU32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dLevelI32,
+ "textureLoad(t : texture_2d<i32>,\n"
+ " coords : vec2<i32>,\n"
+ " level : i32) -> vec4<i32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2d,
+ TextureDataType::kI32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dArrayLevelF32,
+ "textureLoad(t : texture_2d_array<f32>,\n"
+ " coords : vec2<i32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i, // array_index
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dArrayLevelU32,
+ "textureLoad(t : texture_2d_array<u32>,\n"
+ " coords : vec2<i32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> vec4<u32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kU32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i, // array_index
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad2dArrayLevelI32,
+ "textureLoad(t : texture_2d_array<i32>,\n"
+ " coords : vec2<i32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> vec4<i32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kI32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i, // array_index
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad3dLevelF32,
+ "textureLoad(t : texture_3d<f32>,\n"
+ " coords : vec3<i32>,\n"
+ " level : i32) -> vec4<f32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec3<i32>(1_i, 2_i, 3_i), // coords
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad3dLevelU32,
+ "textureLoad(t : texture_3d<u32>,\n"
+ " coords : vec3<i32>,\n"
+ " level : i32) -> vec4<u32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k3d,
+ TextureDataType::kU32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec3<i32>(1_i, 2_i, 3_i), // coords
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoad3dLevelI32,
+ "textureLoad(t : texture_3d<i32>,\n"
+ " coords : vec3<i32>,\n"
+ " level : i32) -> vec4<i32>",
+ TextureKind::kRegular,
+ ast::TextureDimension::k3d,
+ TextureDataType::kI32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec3<i32>(1_i, 2_i, 3_i), // coords
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoadMultisampled2dF32,
+ "textureLoad(t : texture_multisampled_2d<f32>,\n"
+ " coords : vec2<i32>,\n"
+ " sample_index : i32) -> vec4<f32>",
+ TextureKind::kMultisampled,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // sample_index
+ },
+ },
+ {
+ ValidTextureOverload::kLoadMultisampled2dU32,
+ "textureLoad(t : texture_multisampled_2d<u32>,\n"
+ " coords : vec2<i32>,\n"
+ " sample_index : i32) -> vec4<u32>",
+ TextureKind::kMultisampled,
+ ast::TextureDimension::k2d,
+ TextureDataType::kU32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // sample_index
+ },
+ },
+ {
+ ValidTextureOverload::kLoadMultisampled2dI32,
+ "textureLoad(t : texture_multisampled_2d<i32>,\n"
+ " coords : vec2<i32>,\n"
+ " sample_index : i32) -> vec4<i32>",
+ TextureKind::kMultisampled,
+ ast::TextureDimension::k2d,
+ TextureDataType::kI32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // sample_index
+ },
+ },
+ {
+ ValidTextureOverload::kLoadDepth2dLevelF32,
+ "textureLoad(t : texture_depth_2d,\n"
+ " coords : vec2<i32>,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kLoadDepth2dArrayLevelF32,
+ "textureLoad(t : texture_depth_2d_array,\n"
+ " coords : vec2<i32>,\n"
+ " array_index : i32,\n"
+ " level : i32) -> f32",
+ TextureKind::kDepth,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureLoad",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i, // array_index
+ 4_i); // level
+ },
+ },
+ {
+ ValidTextureOverload::kStoreWO1dRgba32float,
+ "textureStore(t : texture_storage_1d<rgba32float>,\n"
+ " coords : i32,\n"
+ " value : vec4<T>)",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k1d,
+ TextureDataType::kF32,
+ "textureStore",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ 1_i, // coords
+ b->vec4<f32>(2_f, 3_f, 4_f, 5_f)); // value
+ },
+ },
+ {
+ ValidTextureOverload::kStoreWO2dRgba32float,
+ "textureStore(t : texture_storage_2d<rgba32float>,\n"
+ " coords : vec2<i32>,\n"
+ " value : vec4<T>)",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k2d,
+ TextureDataType::kF32,
+ "textureStore",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ b->vec4<f32>(3_f, 4_f, 5_f, 6_f)); // value
+ },
+ },
+ {
+ ValidTextureOverload::kStoreWO2dArrayRgba32float,
+ "textureStore(t : texture_storage_2d_array<rgba32float>,\n"
+ " coords : vec2<i32>,\n"
+ " array_index : i32,\n"
+ " value : vec4<T>)",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k2dArray,
+ TextureDataType::kF32,
+ "textureStore",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec2<i32>(1_i, 2_i), // coords
+ 3_i, // array_index
+ b->vec4<f32>(4_f, 5_f, 6_f, 7_f)); // value
+ },
+ },
+ {
+ ValidTextureOverload::kStoreWO3dRgba32float,
+ "textureStore(t : texture_storage_3d<rgba32float>,\n"
+ " coords : vec3<i32>,\n"
+ " value : vec4<T>)",
+ ast::Access::kWrite,
+ ast::TexelFormat::kRgba32Float,
+ ast::TextureDimension::k3d,
+ TextureDataType::kF32,
+ "textureStore",
+ [](ProgramBuilder* b) {
+ return b->ExprList("texture", // t
+ b->vec3<i32>(1_i, 2_i, 3_i), // coords
+ b->vec4<f32>(4_f, 5_f, 6_f, 7_f)); // value
+ },
+ },
+ };
}
bool ReturnsVoid(ValidTextureOverload texture_overload) {
- switch (texture_overload) {
- case ValidTextureOverload::kStoreWO1dRgba32float:
- case ValidTextureOverload::kStoreWO2dRgba32float:
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return true;
- default:
- return false;
- }
+ switch (texture_overload) {
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return true;
+ default:
+ return false;
+ }
}
} // namespace tint::ast::builtin::test
diff --git a/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.h b/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.h
index ad60757ecac..d2737e194cf 100644
--- a/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.h
+++ b/chromium/third_party/dawn/src/tint/ast/builtin_texture_helper_test.h
@@ -19,17 +19,11 @@
#include "src/tint/ast/access.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
namespace tint::ast::builtin::test {
-enum class TextureKind {
- kRegular,
- kDepth,
- kDepthMultisampled,
- kMultisampled,
- kStorage
-};
+enum class TextureKind { kRegular, kDepth, kDepthMultisampled, kMultisampled, kStorage };
enum class TextureDataType { kF32, kU32, kI32 };
std::ostream& operator<<(std::ostream& out, const TextureKind& kind);
@@ -37,144 +31,144 @@ std::ostream& operator<<(std::ostream& out, const TextureDataType& ty);
/// Non-exhaustive list of valid texture overloads
enum class ValidTextureOverload {
- kDimensions1d,
- kDimensions2d,
- kDimensions2dLevel,
- kDimensions2dArray,
- kDimensions2dArrayLevel,
- kDimensions3d,
- kDimensions3dLevel,
- kDimensionsCube,
- kDimensionsCubeLevel,
- kDimensionsCubeArray,
- kDimensionsCubeArrayLevel,
- kDimensionsMultisampled2d,
- kDimensionsDepth2d,
- kDimensionsDepth2dLevel,
- kDimensionsDepth2dArray,
- kDimensionsDepth2dArrayLevel,
- kDimensionsDepthCube,
- kDimensionsDepthCubeLevel,
- kDimensionsDepthCubeArray,
- kDimensionsDepthCubeArrayLevel,
- kDimensionsDepthMultisampled2d,
- kDimensionsStorageWO1d,
- kDimensionsStorageWO2d,
- kDimensionsStorageWO2dArray,
- kDimensionsStorageWO3d,
- kGather2dF32,
- kGather2dOffsetF32,
- kGather2dArrayF32,
- kGather2dArrayOffsetF32,
- kGatherCubeF32,
- kGatherCubeArrayF32,
- kGatherDepth2dF32,
- kGatherDepth2dOffsetF32,
- kGatherDepth2dArrayF32,
- kGatherDepth2dArrayOffsetF32,
- kGatherDepthCubeF32,
- kGatherDepthCubeArrayF32,
- kGatherCompareDepth2dF32,
- kGatherCompareDepth2dOffsetF32,
- kGatherCompareDepth2dArrayF32,
- kGatherCompareDepth2dArrayOffsetF32,
- kGatherCompareDepthCubeF32,
- kGatherCompareDepthCubeArrayF32,
- kNumLayers2dArray,
- kNumLayersCubeArray,
- kNumLayersDepth2dArray,
- kNumLayersDepthCubeArray,
- kNumLayersStorageWO2dArray,
- kNumLevels2d,
- kNumLevels2dArray,
- kNumLevels3d,
- kNumLevelsCube,
- kNumLevelsCubeArray,
- kNumLevelsDepth2d,
- kNumLevelsDepth2dArray,
- kNumLevelsDepthCube,
- kNumLevelsDepthCubeArray,
- kNumSamplesMultisampled2d,
- kNumSamplesDepthMultisampled2d,
- kSample1dF32,
- kSample2dF32,
- kSample2dOffsetF32,
- kSample2dArrayF32,
- kSample2dArrayOffsetF32,
- kSample3dF32,
- kSample3dOffsetF32,
- kSampleCubeF32,
- kSampleCubeArrayF32,
- kSampleDepth2dF32,
- kSampleDepth2dOffsetF32,
- kSampleDepth2dArrayF32,
- kSampleDepth2dArrayOffsetF32,
- kSampleDepthCubeF32,
- kSampleDepthCubeArrayF32,
- kSampleBias2dF32,
- kSampleBias2dOffsetF32,
- kSampleBias2dArrayF32,
- kSampleBias2dArrayOffsetF32,
- kSampleBias3dF32,
- kSampleBias3dOffsetF32,
- kSampleBiasCubeF32,
- kSampleBiasCubeArrayF32,
- kSampleLevel2dF32,
- kSampleLevel2dOffsetF32,
- kSampleLevel2dArrayF32,
- kSampleLevel2dArrayOffsetF32,
- kSampleLevel3dF32,
- kSampleLevel3dOffsetF32,
- kSampleLevelCubeF32,
- kSampleLevelCubeArrayF32,
- kSampleLevelDepth2dF32,
- kSampleLevelDepth2dOffsetF32,
- kSampleLevelDepth2dArrayF32,
- kSampleLevelDepth2dArrayOffsetF32,
- kSampleLevelDepthCubeF32,
- kSampleLevelDepthCubeArrayF32,
- kSampleGrad2dF32,
- kSampleGrad2dOffsetF32,
- kSampleGrad2dArrayF32,
- kSampleGrad2dArrayOffsetF32,
- kSampleGrad3dF32,
- kSampleGrad3dOffsetF32,
- kSampleGradCubeF32,
- kSampleGradCubeArrayF32,
- kSampleCompareDepth2dF32,
- kSampleCompareDepth2dOffsetF32,
- kSampleCompareDepth2dArrayF32,
- kSampleCompareDepth2dArrayOffsetF32,
- kSampleCompareDepthCubeF32,
- kSampleCompareDepthCubeArrayF32,
- kSampleCompareLevelDepth2dF32,
- kSampleCompareLevelDepth2dOffsetF32,
- kSampleCompareLevelDepth2dArrayF32,
- kSampleCompareLevelDepth2dArrayOffsetF32,
- kSampleCompareLevelDepthCubeF32,
- kSampleCompareLevelDepthCubeArrayF32,
- kLoad1dLevelF32,
- kLoad1dLevelU32,
- kLoad1dLevelI32,
- kLoad2dLevelF32,
- kLoad2dLevelU32,
- kLoad2dLevelI32,
- kLoad2dArrayLevelF32,
- kLoad2dArrayLevelU32,
- kLoad2dArrayLevelI32,
- kLoad3dLevelF32,
- kLoad3dLevelU32,
- kLoad3dLevelI32,
- kLoadMultisampled2dF32,
- kLoadMultisampled2dU32,
- kLoadMultisampled2dI32,
- kLoadDepth2dLevelF32,
- kLoadDepth2dArrayLevelF32,
- kLoadDepthMultisampled2dF32,
- kStoreWO1dRgba32float, // Not permutated for all texel formats
- kStoreWO2dRgba32float, // Not permutated for all texel formats
- kStoreWO2dArrayRgba32float, // Not permutated for all texel formats
- kStoreWO3dRgba32float, // Not permutated for all texel formats
+ kDimensions1d,
+ kDimensions2d,
+ kDimensions2dLevel,
+ kDimensions2dArray,
+ kDimensions2dArrayLevel,
+ kDimensions3d,
+ kDimensions3dLevel,
+ kDimensionsCube,
+ kDimensionsCubeLevel,
+ kDimensionsCubeArray,
+ kDimensionsCubeArrayLevel,
+ kDimensionsMultisampled2d,
+ kDimensionsDepth2d,
+ kDimensionsDepth2dLevel,
+ kDimensionsDepth2dArray,
+ kDimensionsDepth2dArrayLevel,
+ kDimensionsDepthCube,
+ kDimensionsDepthCubeLevel,
+ kDimensionsDepthCubeArray,
+ kDimensionsDepthCubeArrayLevel,
+ kDimensionsDepthMultisampled2d,
+ kDimensionsStorageWO1d,
+ kDimensionsStorageWO2d,
+ kDimensionsStorageWO2dArray,
+ kDimensionsStorageWO3d,
+ kGather2dF32,
+ kGather2dOffsetF32,
+ kGather2dArrayF32,
+ kGather2dArrayOffsetF32,
+ kGatherCubeF32,
+ kGatherCubeArrayF32,
+ kGatherDepth2dF32,
+ kGatherDepth2dOffsetF32,
+ kGatherDepth2dArrayF32,
+ kGatherDepth2dArrayOffsetF32,
+ kGatherDepthCubeF32,
+ kGatherDepthCubeArrayF32,
+ kGatherCompareDepth2dF32,
+ kGatherCompareDepth2dOffsetF32,
+ kGatherCompareDepth2dArrayF32,
+ kGatherCompareDepth2dArrayOffsetF32,
+ kGatherCompareDepthCubeF32,
+ kGatherCompareDepthCubeArrayF32,
+ kNumLayers2dArray,
+ kNumLayersCubeArray,
+ kNumLayersDepth2dArray,
+ kNumLayersDepthCubeArray,
+ kNumLayersStorageWO2dArray,
+ kNumLevels2d,
+ kNumLevels2dArray,
+ kNumLevels3d,
+ kNumLevelsCube,
+ kNumLevelsCubeArray,
+ kNumLevelsDepth2d,
+ kNumLevelsDepth2dArray,
+ kNumLevelsDepthCube,
+ kNumLevelsDepthCubeArray,
+ kNumSamplesMultisampled2d,
+ kNumSamplesDepthMultisampled2d,
+ kSample1dF32,
+ kSample2dF32,
+ kSample2dOffsetF32,
+ kSample2dArrayF32,
+ kSample2dArrayOffsetF32,
+ kSample3dF32,
+ kSample3dOffsetF32,
+ kSampleCubeF32,
+ kSampleCubeArrayF32,
+ kSampleDepth2dF32,
+ kSampleDepth2dOffsetF32,
+ kSampleDepth2dArrayF32,
+ kSampleDepth2dArrayOffsetF32,
+ kSampleDepthCubeF32,
+ kSampleDepthCubeArrayF32,
+ kSampleBias2dF32,
+ kSampleBias2dOffsetF32,
+ kSampleBias2dArrayF32,
+ kSampleBias2dArrayOffsetF32,
+ kSampleBias3dF32,
+ kSampleBias3dOffsetF32,
+ kSampleBiasCubeF32,
+ kSampleBiasCubeArrayF32,
+ kSampleLevel2dF32,
+ kSampleLevel2dOffsetF32,
+ kSampleLevel2dArrayF32,
+ kSampleLevel2dArrayOffsetF32,
+ kSampleLevel3dF32,
+ kSampleLevel3dOffsetF32,
+ kSampleLevelCubeF32,
+ kSampleLevelCubeArrayF32,
+ kSampleLevelDepth2dF32,
+ kSampleLevelDepth2dOffsetF32,
+ kSampleLevelDepth2dArrayF32,
+ kSampleLevelDepth2dArrayOffsetF32,
+ kSampleLevelDepthCubeF32,
+ kSampleLevelDepthCubeArrayF32,
+ kSampleGrad2dF32,
+ kSampleGrad2dOffsetF32,
+ kSampleGrad2dArrayF32,
+ kSampleGrad2dArrayOffsetF32,
+ kSampleGrad3dF32,
+ kSampleGrad3dOffsetF32,
+ kSampleGradCubeF32,
+ kSampleGradCubeArrayF32,
+ kSampleCompareDepth2dF32,
+ kSampleCompareDepth2dOffsetF32,
+ kSampleCompareDepth2dArrayF32,
+ kSampleCompareDepth2dArrayOffsetF32,
+ kSampleCompareDepthCubeF32,
+ kSampleCompareDepthCubeArrayF32,
+ kSampleCompareLevelDepth2dF32,
+ kSampleCompareLevelDepth2dOffsetF32,
+ kSampleCompareLevelDepth2dArrayF32,
+ kSampleCompareLevelDepth2dArrayOffsetF32,
+ kSampleCompareLevelDepthCubeF32,
+ kSampleCompareLevelDepthCubeArrayF32,
+ kLoad1dLevelF32,
+ kLoad1dLevelU32,
+ kLoad1dLevelI32,
+ kLoad2dLevelF32,
+ kLoad2dLevelU32,
+ kLoad2dLevelI32,
+ kLoad2dArrayLevelF32,
+ kLoad2dArrayLevelU32,
+ kLoad2dArrayLevelI32,
+ kLoad3dLevelF32,
+ kLoad3dLevelU32,
+ kLoad3dLevelI32,
+ kLoadMultisampled2dF32,
+ kLoadMultisampled2dU32,
+ kLoadMultisampled2dI32,
+ kLoadDepth2dLevelF32,
+ kLoadDepth2dArrayLevelF32,
+ kLoadDepthMultisampled2dF32,
+ kStoreWO1dRgba32float, // Not permutated for all texel formats
+ kStoreWO2dRgba32float, // Not permutated for all texel formats
+ kStoreWO2dArrayRgba32float, // Not permutated for all texel formats
+ kStoreWO3dRgba32float, // Not permutated for all texel formats
};
/// @param texture_overload the ValidTextureOverload
@@ -183,77 +177,76 @@ bool ReturnsVoid(ValidTextureOverload texture_overload);
/// Describes a texture builtin overload
struct TextureOverloadCase {
- /// Constructor for textureSample...() functions
- TextureOverloadCase(ValidTextureOverload,
- const char*,
- TextureKind,
- ast::SamplerKind,
- ast::TextureDimension,
- TextureDataType,
- const char*,
- std::function<ExpressionList(ProgramBuilder*)>);
- /// Constructor for textureLoad() functions with non-storage textures
- TextureOverloadCase(ValidTextureOverload,
- const char*,
- TextureKind,
- ast::TextureDimension,
- TextureDataType,
- const char*,
- std::function<ExpressionList(ProgramBuilder*)>);
- /// Constructor for textureLoad() with storage textures
- TextureOverloadCase(ValidTextureOverload,
- const char*,
- Access,
- ast::TexelFormat,
- ast::TextureDimension,
- TextureDataType,
- const char*,
- std::function<ExpressionList(ProgramBuilder*)>);
- /// Copy constructor
- TextureOverloadCase(const TextureOverloadCase&);
- /// Destructor
- ~TextureOverloadCase();
+ /// Constructor for textureSample...() functions
+ TextureOverloadCase(ValidTextureOverload,
+ const char*,
+ TextureKind,
+ ast::SamplerKind,
+ ast::TextureDimension,
+ TextureDataType,
+ const char*,
+ std::function<ExpressionList(ProgramBuilder*)>);
+ /// Constructor for textureLoad() functions with non-storage textures
+ TextureOverloadCase(ValidTextureOverload,
+ const char*,
+ TextureKind,
+ ast::TextureDimension,
+ TextureDataType,
+ const char*,
+ std::function<ExpressionList(ProgramBuilder*)>);
+ /// Constructor for textureLoad() with storage textures
+ TextureOverloadCase(ValidTextureOverload,
+ const char*,
+ Access,
+ ast::TexelFormat,
+ ast::TextureDimension,
+ TextureDataType,
+ const char*,
+ std::function<ExpressionList(ProgramBuilder*)>);
+ /// Copy constructor
+ TextureOverloadCase(const TextureOverloadCase&);
+ /// Destructor
+ ~TextureOverloadCase();
- /// @return a vector containing a large number (non-exhaustive) of valid
- /// texture overloads.
- static std::vector<TextureOverloadCase> ValidCases();
+ /// @return a vector containing a large number (non-exhaustive) of valid
+ /// texture overloads.
+ static std::vector<TextureOverloadCase> ValidCases();
- /// @param builder the AST builder used for the test
- /// @returns the vector component type of the texture function return value
- const ast::Type* BuildResultVectorComponentType(
- ProgramBuilder* builder) const;
- /// @param builder the AST builder used for the test
- /// @returns a variable holding the test texture, automatically registered as
- /// a global variable.
- const ast::Variable* BuildTextureVariable(ProgramBuilder* builder) const;
- /// @param builder the AST builder used for the test
- /// @returns a Variable holding the test sampler, automatically registered as
- /// a global variable.
- const ast::Variable* BuildSamplerVariable(ProgramBuilder* builder) const;
+ /// @param builder the AST builder used for the test
+ /// @returns the vector component type of the texture function return value
+ const ast::Type* BuildResultVectorComponentType(ProgramBuilder* builder) const;
+ /// @param builder the AST builder used for the test
+ /// @returns a variable holding the test texture, automatically registered as
+ /// a global variable.
+ const ast::Variable* BuildTextureVariable(ProgramBuilder* builder) const;
+ /// @param builder the AST builder used for the test
+ /// @returns a Variable holding the test sampler, automatically registered as
+ /// a global variable.
+ const ast::Variable* BuildSamplerVariable(ProgramBuilder* builder) const;
- /// The enumerator for this overload
- const ValidTextureOverload overload;
- /// A human readable description of the overload
- const char* const description;
- /// The texture kind for the texture parameter
- const TextureKind texture_kind;
- /// The sampler kind for the sampler parameter
- /// Used only when texture_kind is not kStorage
- ast::SamplerKind const sampler_kind = ast::SamplerKind::kSampler;
- /// The access control for the storage texture
- /// Used only when texture_kind is kStorage
- Access const access = Access::kReadWrite;
- /// The image format for the storage texture
- /// Used only when texture_kind is kStorage
- ast::TexelFormat const texel_format = ast::TexelFormat::kNone;
- /// The dimensions of the texture parameter
- ast::TextureDimension const texture_dimension;
- /// The data type of the texture parameter
- const TextureDataType texture_data_type;
- /// Name of the function. e.g. `textureSample`, `textureSampleGrad`, etc
- const char* const function;
- /// A function that builds the AST arguments for the overload
- std::function<ExpressionList(ProgramBuilder*)> const args;
+ /// The enumerator for this overload
+ const ValidTextureOverload overload;
+ /// A human readable description of the overload
+ const char* const description;
+ /// The texture kind for the texture parameter
+ const TextureKind texture_kind;
+ /// The sampler kind for the sampler parameter
+ /// Used only when texture_kind is not kStorage
+ ast::SamplerKind const sampler_kind = ast::SamplerKind::kSampler;
+ /// The access control for the storage texture
+ /// Used only when texture_kind is kStorage
+ Access const access = Access::kReadWrite;
+ /// The image format for the storage texture
+ /// Used only when texture_kind is kStorage
+ ast::TexelFormat const texel_format = ast::TexelFormat::kNone;
+ /// The dimensions of the texture parameter
+ ast::TextureDimension const texture_dimension;
+ /// The data type of the texture parameter
+ const TextureDataType texture_data_type;
+ /// Name of the function. e.g. `textureSample`, `textureSampleGrad`, etc
+ const char* const function;
+ /// A function that builds the AST arguments for the overload
+ std::function<ExpressionList(ProgramBuilder*)> const args;
};
std::ostream& operator<<(std::ostream& out, const TextureOverloadCase& data);
diff --git a/chromium/third_party/dawn/src/tint/ast/call_expression.cc b/chromium/third_party/dawn/src/tint/ast/call_expression.cc
index cfb6bd935c1..68b6dc344a3 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/call_expression.cc
@@ -22,14 +22,14 @@ namespace tint::ast {
namespace {
CallExpression::Target ToTarget(const IdentifierExpression* name) {
- CallExpression::Target target;
- target.name = name;
- return target;
+ CallExpression::Target target;
+ target.name = name;
+ return target;
}
CallExpression::Target ToTarget(const Type* type) {
- CallExpression::Target target;
- target.type = type;
- return target;
+ CallExpression::Target target;
+ target.type = type;
+ return target;
}
} // namespace
@@ -38,25 +38,22 @@ CallExpression::CallExpression(ProgramID pid,
const IdentifierExpression* name,
ExpressionList a)
: Base(pid, src), target(ToTarget(name)), args(a) {
- TINT_ASSERT(AST, name);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, name, program_id);
- for (auto* arg : args) {
- TINT_ASSERT(AST, arg);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, arg, program_id);
- }
+ TINT_ASSERT(AST, name);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, name, program_id);
+ for (auto* arg : args) {
+ TINT_ASSERT(AST, arg);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, arg, program_id);
+ }
}
-CallExpression::CallExpression(ProgramID pid,
- const Source& src,
- const Type* type,
- ExpressionList a)
+CallExpression::CallExpression(ProgramID pid, const Source& src, const Type* type, ExpressionList a)
: Base(pid, src), target(ToTarget(type)), args(a) {
- TINT_ASSERT(AST, type);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
- for (auto* arg : args) {
- TINT_ASSERT(AST, arg);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, arg, program_id);
- }
+ TINT_ASSERT(AST, type);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
+ for (auto* arg : args) {
+ TINT_ASSERT(AST, arg);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, arg, program_id);
+ }
}
CallExpression::CallExpression(CallExpression&&) = default;
@@ -64,13 +61,11 @@ CallExpression::CallExpression(CallExpression&&) = default;
CallExpression::~CallExpression() = default;
const CallExpression* CallExpression::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto p = ctx->Clone(args);
- return target.name
- ? ctx->dst->create<CallExpression>(src, ctx->Clone(target.name), p)
- : ctx->dst->create<CallExpression>(src, ctx->Clone(target.type),
- p);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto p = ctx->Clone(args);
+ return target.name ? ctx->dst->create<CallExpression>(src, ctx->Clone(target.name), p)
+ : ctx->dst->create<CallExpression>(src, ctx->Clone(target.type), p);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/call_expression.h b/chromium/third_party/dawn/src/tint/ast/call_expression.h
index aab2b812bd0..9f197114a56 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/call_expression.h
@@ -31,52 +31,52 @@ namespace tint::ast {
/// * sem::TypeConstructor
/// * sem::TypeConversion
class CallExpression final : public Castable<CallExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the call expression source
- /// @param name the function or type name
- /// @param args the arguments
- CallExpression(ProgramID program_id,
- const Source& source,
- const IdentifierExpression* name,
- ExpressionList args);
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the call expression source
+ /// @param name the function or type name
+ /// @param args the arguments
+ CallExpression(ProgramID program_id,
+ const Source& source,
+ const IdentifierExpression* name,
+ ExpressionList args);
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the call expression source
- /// @param type the type
- /// @param args the arguments
- CallExpression(ProgramID program_id,
- const Source& source,
- const Type* type,
- ExpressionList args);
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the call expression source
+ /// @param type the type
+ /// @param args the arguments
+ CallExpression(ProgramID program_id,
+ const Source& source,
+ const Type* type,
+ ExpressionList args);
- /// Move constructor
- CallExpression(CallExpression&&);
- ~CallExpression() override;
+ /// Move constructor
+ CallExpression(CallExpression&&);
+ ~CallExpression() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const CallExpression* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const CallExpression* Clone(CloneContext* ctx) const override;
- /// Target is either an identifier, or a Type.
- /// One of these must be nullptr and the other a non-nullptr.
- struct Target {
- /// name is a function or builtin to call, or type name to construct or
- /// cast-to
- const IdentifierExpression* name = nullptr;
- /// type to construct or cast-to
- const Type* type = nullptr;
- };
+ /// Target is either an identifier, or a Type.
+ /// One of these must be nullptr and the other a non-nullptr.
+ struct Target {
+ /// name is a function or builtin to call, or type name to construct or
+ /// cast-to
+ const IdentifierExpression* name = nullptr;
+ /// type to construct or cast-to
+ const Type* type = nullptr;
+ };
- /// The target function
- const Target target;
+ /// The target function
+ const Target target;
- /// The arguments
- const ExpressionList args;
+ /// The arguments
+ const ExpressionList args;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/call_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/call_expression_test.cc
index 4da5048e864..5cb31cb95cd 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/call_expression_test.cc
@@ -21,126 +21,124 @@ namespace {
using CallExpressionTest = TestHelper;
TEST_F(CallExpressionTest, CreationIdentifier) {
- auto* func = Expr("func");
- ExpressionList params;
- params.push_back(Expr("param1"));
- params.push_back(Expr("param2"));
-
- auto* stmt = create<CallExpression>(func, params);
- EXPECT_EQ(stmt->target.name, func);
- EXPECT_EQ(stmt->target.type, nullptr);
-
- const auto& vec = stmt->args;
- ASSERT_EQ(vec.size(), 2u);
- EXPECT_EQ(vec[0], params[0]);
- EXPECT_EQ(vec[1], params[1]);
+ auto* func = Expr("func");
+ ExpressionList params;
+ params.push_back(Expr("param1"));
+ params.push_back(Expr("param2"));
+
+ auto* stmt = create<CallExpression>(func, params);
+ EXPECT_EQ(stmt->target.name, func);
+ EXPECT_EQ(stmt->target.type, nullptr);
+
+ const auto& vec = stmt->args;
+ ASSERT_EQ(vec.size(), 2u);
+ EXPECT_EQ(vec[0], params[0]);
+ EXPECT_EQ(vec[1], params[1]);
}
TEST_F(CallExpressionTest, CreationIdentifier_WithSource) {
- auto* func = Expr("func");
- auto* stmt = create<CallExpression>(Source{{20, 2}}, func, ExpressionList{});
- EXPECT_EQ(stmt->target.name, func);
- EXPECT_EQ(stmt->target.type, nullptr);
-
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* func = Expr("func");
+ auto* stmt = create<CallExpression>(Source{{20, 2}}, func, ExpressionList{});
+ EXPECT_EQ(stmt->target.name, func);
+ EXPECT_EQ(stmt->target.type, nullptr);
+
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(CallExpressionTest, CreationType) {
- auto* type = ty.f32();
- ExpressionList params;
- params.push_back(Expr("param1"));
- params.push_back(Expr("param2"));
-
- auto* stmt = create<CallExpression>(type, params);
- EXPECT_EQ(stmt->target.name, nullptr);
- EXPECT_EQ(stmt->target.type, type);
-
- const auto& vec = stmt->args;
- ASSERT_EQ(vec.size(), 2u);
- EXPECT_EQ(vec[0], params[0]);
- EXPECT_EQ(vec[1], params[1]);
+ auto* type = ty.f32();
+ ExpressionList params;
+ params.push_back(Expr("param1"));
+ params.push_back(Expr("param2"));
+
+ auto* stmt = create<CallExpression>(type, params);
+ EXPECT_EQ(stmt->target.name, nullptr);
+ EXPECT_EQ(stmt->target.type, type);
+
+ const auto& vec = stmt->args;
+ ASSERT_EQ(vec.size(), 2u);
+ EXPECT_EQ(vec[0], params[0]);
+ EXPECT_EQ(vec[1], params[1]);
}
TEST_F(CallExpressionTest, CreationType_WithSource) {
- auto* type = ty.f32();
- auto* stmt = create<CallExpression>(Source{{20, 2}}, type, ExpressionList{});
- EXPECT_EQ(stmt->target.name, nullptr);
- EXPECT_EQ(stmt->target.type, type);
-
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* type = ty.f32();
+ auto* stmt = create<CallExpression>(Source{{20, 2}}, type, ExpressionList{});
+ EXPECT_EQ(stmt->target.name, nullptr);
+ EXPECT_EQ(stmt->target.type, type);
+
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(CallExpressionTest, IsCall) {
- auto* func = Expr("func");
- auto* stmt = create<CallExpression>(func, ExpressionList{});
- EXPECT_TRUE(stmt->Is<CallExpression>());
+ auto* func = Expr("func");
+ auto* stmt = create<CallExpression>(func, ExpressionList{});
+ EXPECT_TRUE(stmt->Is<CallExpression>());
}
TEST_F(CallExpressionTest, Assert_Null_Identifier) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CallExpression>(static_cast<IdentifierExpression*>(nullptr),
- ExpressionList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CallExpression>(static_cast<IdentifierExpression*>(nullptr), ExpressionList{});
+ },
+ "internal compiler error");
}
TEST_F(CallExpressionTest, Assert_Null_Type) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CallExpression>(static_cast<Type*>(nullptr), ExpressionList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CallExpression>(static_cast<Type*>(nullptr), ExpressionList{});
+ },
+ "internal compiler error");
}
TEST_F(CallExpressionTest, Assert_Null_Param) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- ExpressionList params;
- params.push_back(b.Expr("param1"));
- params.push_back(nullptr);
- params.push_back(b.Expr("param2"));
- b.create<CallExpression>(b.Expr("func"), params);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ ExpressionList params;
+ params.push_back(b.Expr("param1"));
+ params.push_back(nullptr);
+ params.push_back(b.Expr("param2"));
+ b.create<CallExpression>(b.Expr("func"), params);
+ },
+ "internal compiler error");
}
TEST_F(CallExpressionTest, Assert_DifferentProgramID_Identifier) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CallExpression>(b2.Expr("func"), ExpressionList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CallExpression>(b2.Expr("func"), ExpressionList{});
+ },
+ "internal compiler error");
}
TEST_F(CallExpressionTest, Assert_DifferentProgramID_Type) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CallExpression>(b2.ty.f32(), ExpressionList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CallExpression>(b2.ty.f32(), ExpressionList{});
+ },
+ "internal compiler error");
}
TEST_F(CallExpressionTest, Assert_DifferentProgramID_Param) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CallExpression>(b1.Expr("func"),
- ExpressionList{b2.Expr("param1")});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CallExpression>(b1.Expr("func"), ExpressionList{b2.Expr("param1")});
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/call_statement.cc b/chromium/third_party/dawn/src/tint/ast/call_statement.cc
index 717b1a9a502..5e98fc9f333 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/call_statement.cc
@@ -20,12 +20,10 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::CallStatement);
namespace tint::ast {
-CallStatement::CallStatement(ProgramID pid,
- const Source& src,
- const CallExpression* call)
+CallStatement::CallStatement(ProgramID pid, const Source& src, const CallExpression* call)
: Base(pid, src), expr(call) {
- TINT_ASSERT(AST, expr);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
+ TINT_ASSERT(AST, expr);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
}
CallStatement::CallStatement(CallStatement&&) = default;
@@ -33,10 +31,10 @@ CallStatement::CallStatement(CallStatement&&) = default;
CallStatement::~CallStatement() = default;
const CallStatement* CallStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* call = ctx->Clone(expr);
- return ctx->dst->create<CallStatement>(src, call);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* call = ctx->Clone(expr);
+ return ctx->dst->create<CallStatement>(src, call);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/call_statement.h b/chromium/third_party/dawn/src/tint/ast/call_statement.h
index 7b0677b8297..d0d9f5301b8 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/call_statement.h
@@ -22,24 +22,24 @@ namespace tint::ast {
/// A call expression
class CallStatement final : public Castable<CallStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node for the statement
- /// @param call the function
- CallStatement(ProgramID pid, const Source& src, const CallExpression* call);
- /// Move constructor
- CallStatement(CallStatement&&);
- ~CallStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const CallStatement* Clone(CloneContext* ctx) const override;
-
- /// The call expression
- const CallExpression* const expr;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node for the statement
+ /// @param call the function
+ CallStatement(ProgramID pid, const Source& src, const CallExpression* call);
+ /// Move constructor
+ CallStatement(CallStatement&&);
+ ~CallStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const CallStatement* Clone(CloneContext* ctx) const override;
+
+ /// The call expression
+ const CallExpression* const expr;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/call_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/call_statement_test.cc
index 1f06a99af2a..342659731f9 100644
--- a/chromium/third_party/dawn/src/tint/ast/call_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/call_statement_test.cc
@@ -23,35 +23,34 @@ namespace {
using CallStatementTest = TestHelper;
TEST_F(CallStatementTest, Creation) {
- auto* expr = create<CallExpression>(Expr("func"), ExpressionList{});
+ auto* expr = create<CallExpression>(Expr("func"), ExpressionList{});
- auto* c = create<CallStatement>(expr);
- EXPECT_EQ(c->expr, expr);
+ auto* c = create<CallStatement>(expr);
+ EXPECT_EQ(c->expr, expr);
}
TEST_F(CallStatementTest, IsCall) {
- auto* c = create<CallStatement>(Call("f"));
- EXPECT_TRUE(c->Is<CallStatement>());
+ auto* c = create<CallStatement>(Call("f"));
+ EXPECT_TRUE(c->Is<CallStatement>());
}
TEST_F(CallStatementTest, Assert_Null_Call) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CallStatement>(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CallStatement>(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(CallStatementTest, Assert_DifferentProgramID_Call) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CallStatement>(
- b2.create<CallExpression>(b2.Expr("func"), ExpressionList{}));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CallStatement>(b2.create<CallExpression>(b2.Expr("func"), ExpressionList{}));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/case_statement.cc b/chromium/third_party/dawn/src/tint/ast/case_statement.cc
index d4a5559c0e1..bf1f0bf797b 100644
--- a/chromium/third_party/dawn/src/tint/ast/case_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/case_statement.cc
@@ -25,12 +25,12 @@ CaseStatement::CaseStatement(ProgramID pid,
CaseSelectorList s,
const BlockStatement* b)
: Base(pid, src), selectors(s), body(b) {
- TINT_ASSERT(AST, body);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
- for (auto* selector : selectors) {
- TINT_ASSERT(AST, selector);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, selector, program_id);
- }
+ TINT_ASSERT(AST, body);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
+ for (auto* selector : selectors) {
+ TINT_ASSERT(AST, selector);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, selector, program_id);
+ }
}
CaseStatement::CaseStatement(CaseStatement&&) = default;
@@ -38,11 +38,11 @@ CaseStatement::CaseStatement(CaseStatement&&) = default;
CaseStatement::~CaseStatement() = default;
const CaseStatement* CaseStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto sel = ctx->Clone(selectors);
- auto* b = ctx->Clone(body);
- return ctx->dst->create<CaseStatement>(src, sel, b);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto sel = ctx->Clone(selectors);
+ auto* b = ctx->Clone(body);
+ return ctx->dst->create<CaseStatement>(src, sel, b);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/case_statement.h b/chromium/third_party/dawn/src/tint/ast/case_statement.h
index d030e703296..19ca69308f6 100644
--- a/chromium/third_party/dawn/src/tint/ast/case_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/case_statement.h
@@ -27,34 +27,34 @@ using CaseSelectorList = std::vector<const IntLiteralExpression*>;
/// A case statement
class CaseStatement final : public Castable<CaseStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param selectors the case selectors
- /// @param body the case body
- CaseStatement(ProgramID pid,
- const Source& src,
- CaseSelectorList selectors,
- const BlockStatement* body);
- /// Move constructor
- CaseStatement(CaseStatement&&);
- ~CaseStatement() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param selectors the case selectors
+ /// @param body the case body
+ CaseStatement(ProgramID pid,
+ const Source& src,
+ CaseSelectorList selectors,
+ const BlockStatement* body);
+ /// Move constructor
+ CaseStatement(CaseStatement&&);
+ ~CaseStatement() override;
- /// @returns true if this is a default statement
- bool IsDefault() const { return selectors.empty(); }
+ /// @returns true if this is a default statement
+ bool IsDefault() const { return selectors.empty(); }
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const CaseStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const CaseStatement* Clone(CloneContext* ctx) const override;
- /// The case selectors, empty if none set
- const CaseSelectorList selectors;
+ /// The case selectors, empty if none set
+ const CaseSelectorList selectors;
- /// The case body
- const BlockStatement* const body;
+ /// The case body
+ const BlockStatement* const body;
};
/// A list of case statements
diff --git a/chromium/third_party/dawn/src/tint/ast/case_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/case_statement_test.cc
index d6b217f5164..12fcbda9261 100644
--- a/chromium/third_party/dawn/src/tint/ast/case_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/case_statement_test.cc
@@ -19,116 +19,116 @@
#include "src/tint/ast/if_statement.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using CaseStatementTest = TestHelper;
TEST_F(CaseStatementTest, Creation_i32) {
- CaseSelectorList b;
- auto* selector = create<SintLiteralExpression>(2);
- b.push_back(selector);
-
- auto* discard = create<DiscardStatement>();
- auto* body = create<BlockStatement>(StatementList{discard});
-
- auto* c = create<CaseStatement>(b, body);
- ASSERT_EQ(c->selectors.size(), 1u);
- EXPECT_EQ(c->selectors[0], selector);
- ASSERT_EQ(c->body->statements.size(), 1u);
- EXPECT_EQ(c->body->statements[0], discard);
+ CaseSelectorList b;
+ auto* selector = Expr(2_i);
+ b.push_back(selector);
+
+ auto* discard = create<DiscardStatement>();
+ auto* body = create<BlockStatement>(StatementList{discard});
+
+ auto* c = create<CaseStatement>(b, body);
+ ASSERT_EQ(c->selectors.size(), 1u);
+ EXPECT_EQ(c->selectors[0], selector);
+ ASSERT_EQ(c->body->statements.size(), 1u);
+ EXPECT_EQ(c->body->statements[0], discard);
}
TEST_F(CaseStatementTest, Creation_u32) {
- CaseSelectorList b;
- auto* selector = create<UintLiteralExpression>(2u);
- b.push_back(selector);
-
- auto* discard = create<DiscardStatement>();
- auto* body = create<BlockStatement>(StatementList{discard});
-
- auto* c = create<CaseStatement>(b, body);
- ASSERT_EQ(c->selectors.size(), 1u);
- EXPECT_EQ(c->selectors[0], selector);
- ASSERT_EQ(c->body->statements.size(), 1u);
- EXPECT_EQ(c->body->statements[0], discard);
+ CaseSelectorList b;
+ auto* selector = Expr(2_u);
+ b.push_back(selector);
+
+ auto* discard = create<DiscardStatement>();
+ auto* body = create<BlockStatement>(StatementList{discard});
+
+ auto* c = create<CaseStatement>(b, body);
+ ASSERT_EQ(c->selectors.size(), 1u);
+ EXPECT_EQ(c->selectors[0], selector);
+ ASSERT_EQ(c->body->statements.size(), 1u);
+ EXPECT_EQ(c->body->statements[0], discard);
}
TEST_F(CaseStatementTest, Creation_WithSource) {
- CaseSelectorList b;
- b.push_back(create<SintLiteralExpression>(2));
-
- auto* body = create<BlockStatement>(StatementList{
- create<DiscardStatement>(),
- });
- auto* c = create<CaseStatement>(Source{Source::Location{20, 2}}, b, body);
- auto src = c->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ CaseSelectorList b;
+ b.push_back(Expr(2_i));
+
+ auto* body = create<BlockStatement>(StatementList{
+ create<DiscardStatement>(),
+ });
+ auto* c = create<CaseStatement>(Source{Source::Location{20, 2}}, b, body);
+ auto src = c->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(CaseStatementTest, IsDefault_WithoutSelectors) {
- auto* body = create<BlockStatement>(StatementList{
- create<DiscardStatement>(),
- });
- auto* c = create<CaseStatement>(CaseSelectorList{}, body);
- EXPECT_TRUE(c->IsDefault());
+ auto* body = create<BlockStatement>(StatementList{
+ create<DiscardStatement>(),
+ });
+ auto* c = create<CaseStatement>(CaseSelectorList{}, body);
+ EXPECT_TRUE(c->IsDefault());
}
TEST_F(CaseStatementTest, IsDefault_WithSelectors) {
- CaseSelectorList b;
- b.push_back(create<SintLiteralExpression>(2));
+ CaseSelectorList b;
+ b.push_back(Expr(2_i));
- auto* c = create<CaseStatement>(b, create<BlockStatement>(StatementList{}));
- EXPECT_FALSE(c->IsDefault());
+ auto* c = create<CaseStatement>(b, create<BlockStatement>(StatementList{}));
+ EXPECT_FALSE(c->IsDefault());
}
TEST_F(CaseStatementTest, IsCase) {
- auto* c = create<CaseStatement>(CaseSelectorList{},
- create<BlockStatement>(StatementList{}));
- EXPECT_TRUE(c->Is<CaseStatement>());
+ auto* c = create<CaseStatement>(CaseSelectorList{}, create<BlockStatement>(StatementList{}));
+ EXPECT_TRUE(c->Is<CaseStatement>());
}
TEST_F(CaseStatementTest, Assert_Null_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CaseStatement>(CaseSelectorList{}, nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CaseStatement>(CaseSelectorList{}, nullptr);
+ },
+ "internal compiler error");
}
TEST_F(CaseStatementTest, Assert_Null_Selector) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CaseStatement>(CaseSelectorList{nullptr},
- b.create<BlockStatement>(StatementList{}));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CaseStatement>(CaseSelectorList{nullptr},
+ b.create<BlockStatement>(StatementList{}));
+ },
+ "internal compiler error");
}
TEST_F(CaseStatementTest, Assert_DifferentProgramID_Call) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CaseStatement>(CaseSelectorList{},
- b2.create<BlockStatement>(StatementList{}));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CaseStatement>(CaseSelectorList{},
+ b2.create<BlockStatement>(StatementList{}));
+ },
+ "internal compiler error");
}
TEST_F(CaseStatementTest, Assert_DifferentProgramID_Selector) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CaseStatement>(
- CaseSelectorList{b2.create<SintLiteralExpression>(2)},
- b1.create<BlockStatement>(StatementList{}));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CaseStatement>(CaseSelectorList{b2.Expr(2_i)},
+ b1.create<BlockStatement>(StatementList{}));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.cc b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.cc
index 2df37f78471..848d5003917 100644
--- a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.cc
@@ -26,24 +26,22 @@ CompoundAssignmentStatement::CompoundAssignmentStatement(ProgramID pid,
const Expression* r,
BinaryOp o)
: Base(pid, src), lhs(l), rhs(r), op(o) {
- TINT_ASSERT(AST, lhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
- TINT_ASSERT(AST, rhs);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
+ TINT_ASSERT(AST, lhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
+ TINT_ASSERT(AST, rhs);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, rhs, program_id);
}
-CompoundAssignmentStatement::CompoundAssignmentStatement(
- CompoundAssignmentStatement&&) = default;
+CompoundAssignmentStatement::CompoundAssignmentStatement(CompoundAssignmentStatement&&) = default;
CompoundAssignmentStatement::~CompoundAssignmentStatement() = default;
-const CompoundAssignmentStatement* CompoundAssignmentStatement::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* l = ctx->Clone(lhs);
- auto* r = ctx->Clone(rhs);
- return ctx->dst->create<CompoundAssignmentStatement>(src, l, r, op);
+const CompoundAssignmentStatement* CompoundAssignmentStatement::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* l = ctx->Clone(lhs);
+ auto* r = ctx->Clone(rhs);
+ return ctx->dst->create<CompoundAssignmentStatement>(src, l, r, op);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.h b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.h
index 030efeaf511..ba9a558a6b2 100644
--- a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement.h
@@ -22,38 +22,37 @@
namespace tint::ast {
/// A compound assignment statement
-class CompoundAssignmentStatement final
- : public Castable<CompoundAssignmentStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the compound assignment statement source
- /// @param lhs the left side of the expression
- /// @param rhs the right side of the expression
- /// @param op the binary operator
- CompoundAssignmentStatement(ProgramID program_id,
- const Source& source,
- const Expression* lhs,
- const Expression* rhs,
- BinaryOp op);
- /// Move constructor
- CompoundAssignmentStatement(CompoundAssignmentStatement&&);
- ~CompoundAssignmentStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const CompoundAssignmentStatement* Clone(CloneContext* ctx) const override;
-
- /// left side expression
- const Expression* const lhs;
-
- /// right side expression
- const Expression* const rhs;
-
- /// the binary operator
- const BinaryOp op;
+class CompoundAssignmentStatement final : public Castable<CompoundAssignmentStatement, Statement> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the compound assignment statement source
+ /// @param lhs the left side of the expression
+ /// @param rhs the right side of the expression
+ /// @param op the binary operator
+ CompoundAssignmentStatement(ProgramID program_id,
+ const Source& source,
+ const Expression* lhs,
+ const Expression* rhs,
+ BinaryOp op);
+ /// Move constructor
+ CompoundAssignmentStatement(CompoundAssignmentStatement&&);
+ ~CompoundAssignmentStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const CompoundAssignmentStatement* Clone(CloneContext* ctx) const override;
+
+ /// left side expression
+ const Expression* const lhs;
+
+ /// right side expression
+ const Expression* const rhs;
+
+ /// the binary operator
+ const BinaryOp op;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement_test.cc
index 080f93eda40..7560889b676 100644
--- a/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/compound_assignment_statement_test.cc
@@ -17,83 +17,80 @@
#include "gtest/gtest-spi.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using CompoundAssignmentStatementTest = TestHelper;
TEST_F(CompoundAssignmentStatementTest, Creation) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
- auto op = BinaryOp::kAdd;
-
- auto* stmt = create<CompoundAssignmentStatement>(lhs, rhs, op);
- EXPECT_EQ(stmt->lhs, lhs);
- EXPECT_EQ(stmt->rhs, rhs);
- EXPECT_EQ(stmt->op, op);
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
+ auto op = BinaryOp::kAdd;
+
+ auto* stmt = create<CompoundAssignmentStatement>(lhs, rhs, op);
+ EXPECT_EQ(stmt->lhs, lhs);
+ EXPECT_EQ(stmt->rhs, rhs);
+ EXPECT_EQ(stmt->op, op);
}
TEST_F(CompoundAssignmentStatementTest, CreationWithSource) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
- auto op = BinaryOp::kMultiply;
-
- auto* stmt = create<CompoundAssignmentStatement>(
- Source{Source::Location{20, 2}}, lhs, rhs, op);
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
+ auto op = BinaryOp::kMultiply;
+
+ auto* stmt = create<CompoundAssignmentStatement>(Source{Source::Location{20, 2}}, lhs, rhs, op);
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(CompoundAssignmentStatementTest, IsCompoundAssign) {
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
- auto op = BinaryOp::kSubtract;
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
+ auto op = BinaryOp::kSubtract;
- auto* stmt = create<CompoundAssignmentStatement>(lhs, rhs, op);
- EXPECT_TRUE(stmt->Is<CompoundAssignmentStatement>());
+ auto* stmt = create<CompoundAssignmentStatement>(lhs, rhs, op);
+ EXPECT_TRUE(stmt->Is<CompoundAssignmentStatement>());
}
TEST_F(CompoundAssignmentStatementTest, Assert_Null_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CompoundAssignmentStatement>(nullptr, b.Expr(1),
- BinaryOp::kAdd);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CompoundAssignmentStatement>(nullptr, b.Expr(1_i), BinaryOp::kAdd);
+ },
+ "internal compiler error");
}
TEST_F(CompoundAssignmentStatementTest, Assert_Null_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<CompoundAssignmentStatement>(b.Expr(1), nullptr,
- BinaryOp::kAdd);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<CompoundAssignmentStatement>(b.Expr(1_i), nullptr, BinaryOp::kAdd);
+ },
+ "internal compiler error");
}
TEST_F(CompoundAssignmentStatementTest, Assert_DifferentProgramID_LHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CompoundAssignmentStatement>(b2.Expr("lhs"), b1.Expr("rhs"),
- BinaryOp::kAdd);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CompoundAssignmentStatement>(b2.Expr("lhs"), b1.Expr("rhs"), BinaryOp::kAdd);
+ },
+ "internal compiler error");
}
TEST_F(CompoundAssignmentStatementTest, Assert_DifferentProgramID_RHS) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<CompoundAssignmentStatement>(b1.Expr("lhs"), b2.Expr("rhs"),
- BinaryOp::kAdd);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<CompoundAssignmentStatement>(b1.Expr("lhs"), b2.Expr("rhs"), BinaryOp::kAdd);
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/continue_statement.cc b/chromium/third_party/dawn/src/tint/ast/continue_statement.cc
index e7d669f33c3..8ae4b9c1eee 100644
--- a/chromium/third_party/dawn/src/tint/ast/continue_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/continue_statement.cc
@@ -20,17 +20,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::ContinueStatement);
namespace tint::ast {
-ContinueStatement::ContinueStatement(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+ContinueStatement::ContinueStatement(ProgramID pid, const Source& src) : Base(pid, src) {}
ContinueStatement::ContinueStatement(ContinueStatement&&) = default;
ContinueStatement::~ContinueStatement() = default;
const ContinueStatement* ContinueStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<ContinueStatement>(src);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<ContinueStatement>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/continue_statement.h b/chromium/third_party/dawn/src/tint/ast/continue_statement.h
index 5761bfcd1bd..17d8586d900 100644
--- a/chromium/third_party/dawn/src/tint/ast/continue_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/continue_statement.h
@@ -21,20 +21,20 @@ namespace tint::ast {
/// An continue statement
class ContinueStatement final : public Castable<ContinueStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- ContinueStatement(ProgramID pid, const Source& src);
- /// Move constructor
- ContinueStatement(ContinueStatement&&);
- ~ContinueStatement() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ ContinueStatement(ProgramID pid, const Source& src);
+ /// Move constructor
+ ContinueStatement(ContinueStatement&&);
+ ~ContinueStatement() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const ContinueStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const ContinueStatement* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/continue_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/continue_statement_test.cc
index 943cdee4b0a..31cabddf602 100644
--- a/chromium/third_party/dawn/src/tint/ast/continue_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/continue_statement_test.cc
@@ -22,15 +22,15 @@ namespace {
using ContinueStatementTest = TestHelper;
TEST_F(ContinueStatementTest, Creation_WithSource) {
- auto* stmt = create<ContinueStatement>(Source{Source::Location{20, 2}});
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt = create<ContinueStatement>(Source{Source::Location{20, 2}});
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(ContinueStatementTest, IsContinue) {
- auto* stmt = create<ContinueStatement>();
- EXPECT_TRUE(stmt->Is<ContinueStatement>());
+ auto* stmt = create<ContinueStatement>();
+ EXPECT_TRUE(stmt->Is<ContinueStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.cc b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.cc
index 9b09864afe4..66c5a86c525 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.cc
@@ -22,7 +22,7 @@ namespace tint::ast {
namespace {
bool IsValidDepthDimension(TextureDimension dim) {
- return dim == TextureDimension::k2d;
+ return dim == TextureDimension::k2d;
}
} // namespace
@@ -31,24 +31,22 @@ DepthMultisampledTexture::DepthMultisampledTexture(ProgramID pid,
const Source& src,
TextureDimension d)
: Base(pid, src, d) {
- TINT_ASSERT(AST, IsValidDepthDimension(dim));
+ TINT_ASSERT(AST, IsValidDepthDimension(dim));
}
-DepthMultisampledTexture::DepthMultisampledTexture(DepthMultisampledTexture&&) =
- default;
+DepthMultisampledTexture::DepthMultisampledTexture(DepthMultisampledTexture&&) = default;
DepthMultisampledTexture::~DepthMultisampledTexture() = default;
std::string DepthMultisampledTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_depth_multisampled_" << dim;
- return out.str();
+ std::ostringstream out;
+ out << "texture_depth_multisampled_" << dim;
+ return out.str();
}
-const DepthMultisampledTexture* DepthMultisampledTexture::Clone(
- CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<DepthMultisampledTexture>(src, dim);
+const DepthMultisampledTexture* DepthMultisampledTexture::Clone(CloneContext* ctx) const {
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<DepthMultisampledTexture>(src, dim);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.h b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.h
index 67f6ab0077e..d15ac7a7def 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture.h
@@ -22,29 +22,26 @@
namespace tint::ast {
/// A multisampled depth texture type.
-class DepthMultisampledTexture final
- : public Castable<DepthMultisampledTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- DepthMultisampledTexture(ProgramID pid,
- const Source& src,
- TextureDimension dim);
- /// Move constructor
- DepthMultisampledTexture(DepthMultisampledTexture&&);
- ~DepthMultisampledTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const DepthMultisampledTexture* Clone(CloneContext* ctx) const override;
+class DepthMultisampledTexture final : public Castable<DepthMultisampledTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ DepthMultisampledTexture(ProgramID pid, const Source& src, TextureDimension dim);
+ /// Move constructor
+ DepthMultisampledTexture(DepthMultisampledTexture&&);
+ ~DepthMultisampledTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const DepthMultisampledTexture* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture_test.cc
index 0ae0adac95a..1de335d674e 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/depth_multisampled_texture_test.cc
@@ -22,13 +22,13 @@ namespace {
using AstDepthMultisampledTextureTest = TestHelper;
TEST_F(AstDepthMultisampledTextureTest, Dim) {
- auto* d = create<DepthMultisampledTexture>(TextureDimension::k2d);
- EXPECT_EQ(d->dim, TextureDimension::k2d);
+ auto* d = create<DepthMultisampledTexture>(TextureDimension::k2d);
+ EXPECT_EQ(d->dim, TextureDimension::k2d);
}
TEST_F(AstDepthMultisampledTextureTest, FriendlyName) {
- auto* d = create<DepthMultisampledTexture>(TextureDimension::k2d);
- EXPECT_EQ(d->FriendlyName(Symbols()), "texture_depth_multisampled_2d");
+ auto* d = create<DepthMultisampledTexture>(TextureDimension::k2d);
+ EXPECT_EQ(d->FriendlyName(Symbols()), "texture_depth_multisampled_2d");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_texture.cc b/chromium/third_party/dawn/src/tint/ast/depth_texture.cc
index fc1c9239878..6c0858ffbb8 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/depth_texture.cc
@@ -22,15 +22,15 @@ namespace tint::ast {
namespace {
bool IsValidDepthDimension(TextureDimension dim) {
- return dim == TextureDimension::k2d || dim == TextureDimension::k2dArray ||
- dim == TextureDimension::kCube || dim == TextureDimension::kCubeArray;
+ return dim == TextureDimension::k2d || dim == TextureDimension::k2dArray ||
+ dim == TextureDimension::kCube || dim == TextureDimension::kCubeArray;
}
} // namespace
DepthTexture::DepthTexture(ProgramID pid, const Source& src, TextureDimension d)
: Base(pid, src, d) {
- TINT_ASSERT(AST, IsValidDepthDimension(dim));
+ TINT_ASSERT(AST, IsValidDepthDimension(dim));
}
DepthTexture::DepthTexture(DepthTexture&&) = default;
@@ -38,14 +38,14 @@ DepthTexture::DepthTexture(DepthTexture&&) = default;
DepthTexture::~DepthTexture() = default;
std::string DepthTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_depth_" << dim;
- return out.str();
+ std::ostringstream out;
+ out << "texture_depth_" << dim;
+ return out.str();
}
const DepthTexture* DepthTexture::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<DepthTexture>(src, dim);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<DepthTexture>(src, dim);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_texture.h b/chromium/third_party/dawn/src/tint/ast/depth_texture.h
index ddf4e9589bf..42349e32e12 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/depth_texture.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// A depth texture type.
class DepthTexture final : public Castable<DepthTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- DepthTexture(ProgramID pid, const Source& src, TextureDimension dim);
- /// Move constructor
- DepthTexture(DepthTexture&&);
- ~DepthTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const DepthTexture* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ DepthTexture(ProgramID pid, const Source& src, TextureDimension dim);
+ /// Move constructor
+ DepthTexture(DepthTexture&&);
+ ~DepthTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const DepthTexture* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/depth_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/depth_texture_test.cc
index 4ec2aaa8246..15dc356e09c 100644
--- a/chromium/third_party/dawn/src/tint/ast/depth_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/depth_texture_test.cc
@@ -22,20 +22,20 @@ namespace {
using AstDepthTextureTest = TestHelper;
TEST_F(AstDepthTextureTest, IsTexture) {
- Texture* ty = create<DepthTexture>(TextureDimension::kCube);
- EXPECT_TRUE(ty->Is<DepthTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
+ Texture* ty = create<DepthTexture>(TextureDimension::kCube);
+ EXPECT_TRUE(ty->Is<DepthTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
}
TEST_F(AstDepthTextureTest, Dim) {
- auto* d = create<DepthTexture>(TextureDimension::kCube);
- EXPECT_EQ(d->dim, TextureDimension::kCube);
+ auto* d = create<DepthTexture>(TextureDimension::kCube);
+ EXPECT_EQ(d->dim, TextureDimension::kCube);
}
TEST_F(AstDepthTextureTest, FriendlyName) {
- auto* d = create<DepthTexture>(TextureDimension::kCube);
- EXPECT_EQ(d->FriendlyName(Symbols()), "texture_depth_cube");
+ auto* d = create<DepthTexture>(TextureDimension::kCube);
+ EXPECT_EQ(d->FriendlyName(Symbols()), "texture_depth_cube");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.cc b/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.cc
index cd009b4349c..4bc9f74c650 100644
--- a/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.cc
@@ -20,36 +20,33 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::DisableValidationAttribute);
namespace tint::ast {
-DisableValidationAttribute::DisableValidationAttribute(ProgramID pid,
- DisabledValidation val)
+DisableValidationAttribute::DisableValidationAttribute(ProgramID pid, DisabledValidation val)
: Base(pid), validation(val) {}
DisableValidationAttribute::~DisableValidationAttribute() = default;
std::string DisableValidationAttribute::InternalName() const {
- switch (validation) {
- case DisabledValidation::kFunctionHasNoBody:
- return "disable_validation__function_has_no_body";
- case DisabledValidation::kBindingPointCollision:
- return "disable_validation__binding_point_collision";
- case DisabledValidation::kIgnoreStorageClass:
- return "disable_validation__ignore_storage_class";
- case DisabledValidation::kEntryPointParameter:
- return "disable_validation__entry_point_parameter";
- case DisabledValidation::kIgnoreConstructibleFunctionParameter:
- return "disable_validation__ignore_constructible_function_parameter";
- case DisabledValidation::kIgnoreStrideAttribute:
- return "disable_validation__ignore_stride";
- case DisabledValidation::kIgnoreInvalidPointerArgument:
- return "disable_validation__ignore_invalid_pointer_argument";
- }
- return "<invalid>";
+ switch (validation) {
+ case DisabledValidation::kFunctionHasNoBody:
+ return "disable_validation__function_has_no_body";
+ case DisabledValidation::kBindingPointCollision:
+ return "disable_validation__binding_point_collision";
+ case DisabledValidation::kIgnoreStorageClass:
+ return "disable_validation__ignore_storage_class";
+ case DisabledValidation::kEntryPointParameter:
+ return "disable_validation__entry_point_parameter";
+ case DisabledValidation::kIgnoreConstructibleFunctionParameter:
+ return "disable_validation__ignore_constructible_function_parameter";
+ case DisabledValidation::kIgnoreStrideAttribute:
+ return "disable_validation__ignore_stride";
+ case DisabledValidation::kIgnoreInvalidPointerArgument:
+ return "disable_validation__ignore_invalid_pointer_argument";
+ }
+ return "<invalid>";
}
-const DisableValidationAttribute* DisableValidationAttribute::Clone(
- CloneContext* ctx) const {
- return ctx->dst->ASTNodes().Create<DisableValidationAttribute>(ctx->dst->ID(),
- validation);
+const DisableValidationAttribute* DisableValidationAttribute::Clone(CloneContext* ctx) const {
+ return ctx->dst->ASTNodes().Create<DisableValidationAttribute>(ctx->dst->ID(), validation);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.h b/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.h
index 5bc37ee3961..db70ad4c9ad 100644
--- a/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/disable_validation_attribute.h
@@ -24,28 +24,28 @@ namespace tint::ast {
/// Enumerator of validation features that can be disabled with a
/// DisableValidationAttribute attribute.
enum class DisabledValidation {
- /// When applied to a function, the validator will not complain there is no
- /// body to a function.
- kFunctionHasNoBody,
- /// When applied to a module-scoped variable, the validator will not complain
- /// if two resource variables have the same binding points.
- kBindingPointCollision,
- /// When applied to a variable, the validator will not complain about the
- /// declared storage class.
- kIgnoreStorageClass,
- /// When applied to an entry-point function parameter, the validator will not
- /// check for entry IO attributes.
- kEntryPointParameter,
- /// When applied to a function parameter, the validator will not
- /// check if parameter type is constructible
- kIgnoreConstructibleFunctionParameter,
- /// When applied to a member attribute, a stride attribute may be applied to
- /// non-array types.
- kIgnoreStrideAttribute,
- /// When applied to a pointer function parameter, the validator will not
- /// require a function call argument passed for that parameter to have a
- /// certain form.
- kIgnoreInvalidPointerArgument,
+ /// When applied to a function, the validator will not complain there is no
+ /// body to a function.
+ kFunctionHasNoBody,
+ /// When applied to a module-scoped variable, the validator will not complain
+ /// if two resource variables have the same binding points.
+ kBindingPointCollision,
+ /// When applied to a variable, the validator will not complain about the
+ /// declared storage class.
+ kIgnoreStorageClass,
+ /// When applied to an entry-point function parameter, the validator will not
+ /// check for entry IO attributes.
+ kEntryPointParameter,
+ /// When applied to a function parameter, the validator will not
+ /// check if parameter type is constructible
+ kIgnoreConstructibleFunctionParameter,
+ /// When applied to a member attribute, a stride attribute may be applied to
+ /// non-array types.
+ kIgnoreStrideAttribute,
+ /// When applied to a pointer function parameter, the validator will not
+ /// require a function call argument passed for that parameter to have a
+ /// certain form.
+ kIgnoreInvalidPointerArgument,
};
/// An internal attribute used to tell the validator to ignore specific
@@ -53,27 +53,26 @@ enum class DisabledValidation {
/// would otherwise cause validation errors.
class DisableValidationAttribute final
: public Castable<DisableValidationAttribute, InternalAttribute> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param validation the validation to disable
- explicit DisableValidationAttribute(ProgramID program_id,
- DisabledValidation validation);
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param validation the validation to disable
+ explicit DisableValidationAttribute(ProgramID program_id, DisabledValidation validation);
- /// Destructor
- ~DisableValidationAttribute() override;
+ /// Destructor
+ ~DisableValidationAttribute() override;
- /// @return a short description of the internal attribute which will be
- /// displayed in WGSL as `@internal(<name>)` (but is not parsable).
- std::string InternalName() const override;
+ /// @return a short description of the internal attribute which will be
+ /// displayed in WGSL as `@internal(<name>)` (but is not parsable).
+ std::string InternalName() const override;
- /// Performs a deep clone of this object using the CloneContext `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned object
- const DisableValidationAttribute* Clone(CloneContext* ctx) const override;
+ /// Performs a deep clone of this object using the CloneContext `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned object
+ const DisableValidationAttribute* Clone(CloneContext* ctx) const override;
- /// The validation that this attribute disables
- const DisabledValidation validation;
+ /// The validation that this attribute disables
+ const DisabledValidation validation;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/discard_statement.cc b/chromium/third_party/dawn/src/tint/ast/discard_statement.cc
index ba081ab906f..7ca673f6298 100644
--- a/chromium/third_party/dawn/src/tint/ast/discard_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/discard_statement.cc
@@ -20,17 +20,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::DiscardStatement);
namespace tint::ast {
-DiscardStatement::DiscardStatement(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+DiscardStatement::DiscardStatement(ProgramID pid, const Source& src) : Base(pid, src) {}
DiscardStatement::DiscardStatement(DiscardStatement&&) = default;
DiscardStatement::~DiscardStatement() = default;
const DiscardStatement* DiscardStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<DiscardStatement>(src);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<DiscardStatement>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/discard_statement.h b/chromium/third_party/dawn/src/tint/ast/discard_statement.h
index c17813edf7d..9d18c744545 100644
--- a/chromium/third_party/dawn/src/tint/ast/discard_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/discard_statement.h
@@ -21,20 +21,20 @@ namespace tint::ast {
/// A discard statement
class DiscardStatement final : public Castable<DiscardStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- DiscardStatement(ProgramID pid, const Source& src);
- /// Move constructor
- DiscardStatement(DiscardStatement&&);
- ~DiscardStatement() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ DiscardStatement(ProgramID pid, const Source& src);
+ /// Move constructor
+ DiscardStatement(DiscardStatement&&);
+ ~DiscardStatement() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const DiscardStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const DiscardStatement* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/discard_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/discard_statement_test.cc
index 4101ef14993..ee16ed34dd5 100644
--- a/chromium/third_party/dawn/src/tint/ast/discard_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/discard_statement_test.cc
@@ -22,25 +22,25 @@ namespace {
using DiscardStatementTest = TestHelper;
TEST_F(DiscardStatementTest, Creation) {
- auto* stmt = create<DiscardStatement>();
- EXPECT_EQ(stmt->source.range.begin.line, 0u);
- EXPECT_EQ(stmt->source.range.begin.column, 0u);
- EXPECT_EQ(stmt->source.range.end.line, 0u);
- EXPECT_EQ(stmt->source.range.end.column, 0u);
+ auto* stmt = create<DiscardStatement>();
+ EXPECT_EQ(stmt->source.range.begin.line, 0u);
+ EXPECT_EQ(stmt->source.range.begin.column, 0u);
+ EXPECT_EQ(stmt->source.range.end.line, 0u);
+ EXPECT_EQ(stmt->source.range.end.column, 0u);
}
TEST_F(DiscardStatementTest, Creation_WithSource) {
- auto* stmt = create<DiscardStatement>(
- Source{Source::Range{Source::Location{20, 2}, Source::Location{20, 5}}});
- EXPECT_EQ(stmt->source.range.begin.line, 20u);
- EXPECT_EQ(stmt->source.range.begin.column, 2u);
- EXPECT_EQ(stmt->source.range.end.line, 20u);
- EXPECT_EQ(stmt->source.range.end.column, 5u);
+ auto* stmt = create<DiscardStatement>(
+ Source{Source::Range{Source::Location{20, 2}, Source::Location{20, 5}}});
+ EXPECT_EQ(stmt->source.range.begin.line, 20u);
+ EXPECT_EQ(stmt->source.range.begin.column, 2u);
+ EXPECT_EQ(stmt->source.range.end.line, 20u);
+ EXPECT_EQ(stmt->source.range.end.column, 5u);
}
TEST_F(DiscardStatementTest, IsDiscard) {
- auto* stmt = create<DiscardStatement>();
- EXPECT_TRUE(stmt->Is<DiscardStatement>());
+ auto* stmt = create<DiscardStatement>();
+ EXPECT_TRUE(stmt->Is<DiscardStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/else_statement.cc b/chromium/third_party/dawn/src/tint/ast/else_statement.cc
deleted file mode 100644
index d047177a7fc..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/else_statement.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/ast/else_statement.h"
-
-#include "src/tint/program_builder.h"
-
-TINT_INSTANTIATE_TYPEINFO(tint::ast::ElseStatement);
-
-namespace tint::ast {
-
-ElseStatement::ElseStatement(ProgramID pid,
- const Source& src,
- const Expression* cond,
- const BlockStatement* b)
- : Base(pid, src), condition(cond), body(b) {
- TINT_ASSERT(AST, body);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
-}
-
-ElseStatement::ElseStatement(ElseStatement&&) = default;
-
-ElseStatement::~ElseStatement() = default;
-
-const ElseStatement* ElseStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* cond = ctx->Clone(condition);
- auto* b = ctx->Clone(body);
- return ctx->dst->create<ElseStatement>(src, cond, b);
-}
-
-} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/else_statement.h b/chromium/third_party/dawn/src/tint/ast/else_statement.h
deleted file mode 100644
index c8c6e51fda6..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/else_statement.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_AST_ELSE_STATEMENT_H_
-#define SRC_TINT_AST_ELSE_STATEMENT_H_
-
-#include <vector>
-
-#include "src/tint/ast/block_statement.h"
-#include "src/tint/ast/expression.h"
-
-namespace tint::ast {
-
-/// An else statement
-class ElseStatement final : public Castable<ElseStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param condition the else condition
- /// @param body the else body
- ElseStatement(ProgramID pid,
- const Source& src,
- const Expression* condition,
- const BlockStatement* body);
- /// Move constructor
- ElseStatement(ElseStatement&&);
- ~ElseStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const ElseStatement* Clone(CloneContext* ctx) const override;
-
- /// The else condition or nullptr if none set
- const Expression* const condition;
-
- /// The else body
- const BlockStatement* const body;
-};
-
-/// A list of else statements
-using ElseStatementList = std::vector<const ElseStatement*>;
-
-} // namespace tint::ast
-
-#endif // SRC_TINT_AST_ELSE_STATEMENT_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/else_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/else_statement_test.cc
deleted file mode 100644
index a0884388228..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/else_statement_test.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "gtest/gtest-spi.h"
-#include "src/tint/ast/discard_statement.h"
-#include "src/tint/ast/if_statement.h"
-#include "src/tint/ast/test_helper.h"
-
-namespace tint::ast {
-namespace {
-
-using ElseStatementTest = TestHelper;
-
-TEST_F(ElseStatementTest, Creation) {
- auto* cond = Expr(true);
- auto* body = create<BlockStatement>(StatementList{
- create<DiscardStatement>(),
- });
- auto* discard = body->statements[0];
-
- auto* e = create<ElseStatement>(cond, body);
- EXPECT_EQ(e->condition, cond);
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_EQ(e->body->statements[0], discard);
-}
-
-TEST_F(ElseStatementTest, Creation_WithSource) {
- auto* e = create<ElseStatement>(Source{Source::Location{20, 2}}, Expr(true),
- Block());
- auto src = e->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
-}
-
-TEST_F(ElseStatementTest, IsElse) {
- auto* e = create<ElseStatement>(nullptr, Block());
- EXPECT_TRUE(e->Is<ElseStatement>());
-}
-
-TEST_F(ElseStatementTest, HasCondition) {
- auto* cond = Expr(true);
- auto* e = create<ElseStatement>(cond, Block());
- EXPECT_TRUE(e->condition);
-}
-
-TEST_F(ElseStatementTest, HasContition_NullCondition) {
- auto* e = create<ElseStatement>(nullptr, Block());
- EXPECT_FALSE(e->condition);
-}
-
-TEST_F(ElseStatementTest, Assert_Null_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<ElseStatement>(b.Expr(true), nullptr);
- },
- "internal compiler error");
-}
-
-TEST_F(ElseStatementTest, Assert_DifferentProgramID_Condition) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<ElseStatement>(b2.Expr(true), b1.Block());
- },
- "internal compiler error");
-}
-
-TEST_F(ElseStatementTest, Assert_DifferentProgramID_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<ElseStatement>(b1.Expr(true), b2.Block());
- },
- "internal compiler error");
-}
-
-} // namespace
-} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/enable.cc b/chromium/third_party/dawn/src/tint/ast/enable.cc
new file mode 100644
index 00000000000..ef43200c052
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/enable.cc
@@ -0,0 +1,34 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/ast/enable.h"
+
+#include "src/tint/program_builder.h"
+#include "src/tint/sem/variable.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::ast::Enable);
+
+namespace tint::ast {
+
+Enable::Enable(ProgramID pid, const Source& src, Extension ext) : Base(pid, src), extension(ext) {}
+
+Enable::Enable(Enable&&) = default;
+
+Enable::~Enable() = default;
+
+const Enable* Enable::Clone(CloneContext* ctx) const {
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<Enable>(src, extension);
+}
+} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/enable.h b/chromium/third_party/dawn/src/tint/ast/enable.h
new file mode 100644
index 00000000000..674d9cb42f7
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/enable.h
@@ -0,0 +1,59 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_AST_ENABLE_H_
+#define SRC_TINT_AST_ENABLE_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "src/tint/ast/extension.h"
+#include "src/tint/ast/node.h"
+
+namespace tint::ast {
+
+/// An "enable" directive. Example:
+/// ```
+/// // Enable an extension named "f16"
+/// enable f16;
+/// ```
+class Enable final : public Castable<Enable, Node> {
+ public:
+ /// Create a extension
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param ext the extension
+ Enable(ProgramID pid, const Source& src, Extension ext);
+ /// Move constructor
+ Enable(Enable&&);
+
+ ~Enable() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const Enable* Clone(CloneContext* ctx) const override;
+
+ /// The extension name
+ const Extension extension;
+};
+
+/// A list of enables
+using EnableList = std::vector<const Enable*>;
+
+} // namespace tint::ast
+
+#endif // SRC_TINT_AST_ENABLE_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/enable_test.cc b/chromium/third_party/dawn/src/tint/ast/enable_test.cc
new file mode 100644
index 00000000000..e8b6e5c20fc
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/enable_test.cc
@@ -0,0 +1,34 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/ast/enable.h"
+
+#include "src/tint/ast/test_helper.h"
+
+namespace tint::ast {
+namespace {
+
+using EnableTest = TestHelper;
+
+TEST_F(EnableTest, Creation) {
+ auto* ext = create<ast::Enable>(Source{{{20, 2}, {20, 5}}}, Extension::kF16);
+ EXPECT_EQ(ext->source.range.begin.line, 20u);
+ EXPECT_EQ(ext->source.range.begin.column, 2u);
+ EXPECT_EQ(ext->source.range.end.line, 20u);
+ EXPECT_EQ(ext->source.range.end.column, 5u);
+ EXPECT_EQ(ext->extension, Extension::kF16);
+}
+
+} // namespace
+} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/expression.h b/chromium/third_party/dawn/src/tint/ast/expression.h
index 2338dfc3ab4..dc69ff8e4e6 100644
--- a/chromium/third_party/dawn/src/tint/ast/expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/expression.h
@@ -25,16 +25,16 @@ namespace tint::ast {
/// Base expression class
class Expression : public Castable<Expression, Node> {
- public:
- ~Expression() override;
-
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Expression(ProgramID pid, const Source& src);
- /// Move constructor
- Expression(Expression&&);
+ public:
+ ~Expression() override;
+
+ protected:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Expression(ProgramID pid, const Source& src);
+ /// Move constructor
+ Expression(Expression&&);
};
/// A list of expressions
diff --git a/chromium/third_party/dawn/src/tint/ast/extension.cc b/chromium/third_party/dawn/src/tint/ast/extension.cc
new file mode 100644
index 00000000000..f03e3a02985
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/extension.cc
@@ -0,0 +1,51 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/ast/extension.h"
+
+namespace tint::ast {
+
+Extension ParseExtension(const std::string& name) {
+ if (name == "chromium_experimental_dp4a") {
+ return Extension::kChromiumExperimentalDP4a;
+ }
+ if (name == "chromium_disable_uniformity_analysis") {
+ return Extension::kChromiumDisableUniformityAnalysis;
+ }
+ if (name == "f16") {
+ return Extension::kF16;
+ }
+ return Extension::kNone;
+}
+
+const char* str(Extension ext) {
+ switch (ext) {
+ case Extension::kChromiumExperimentalDP4a:
+ return "chromium_experimental_dp4a";
+ case Extension::kChromiumDisableUniformityAnalysis:
+ return "chromium_disable_uniformity_analysis";
+ case Extension::kF16:
+ return "f16";
+ case Extension::kNone:
+ return "<none>";
+ }
+ return "<unknown>";
+}
+
+std::ostream& operator<<(std::ostream& out, Extension i) {
+ out << str(i);
+ return out;
+}
+
+} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/extension.h b/chromium/third_party/dawn/src/tint/ast/extension.h
new file mode 100644
index 00000000000..21e9ac15b38
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/extension.h
@@ -0,0 +1,68 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_AST_EXTENSION_H_
+#define SRC_TINT_AST_EXTENSION_H_
+
+#include <sstream>
+#include <string>
+
+#include "src/tint/utils/unique_vector.h"
+
+namespace tint::ast {
+
+/// An enumerator of WGSL extensions
+enum class Extension {
+ /// WGSL Extension "f16"
+ kF16,
+
+ /// An extension for the experimental feature
+ /// "chromium_experimental_dp4a".
+ /// See crbug.com/tint/1497 for more details
+ kChromiumExperimentalDP4a,
+ /// A Chromium-specific extension for disabling uniformity analysis.
+ kChromiumDisableUniformityAnalysis,
+
+ /// Reserved for representing "No extension required" or "Not a valid extension".
+ kNone,
+};
+
+/// Convert a string of extension name into one of Extension enum value, the result will be
+/// Extension::kNone if the name is not a known extension name. A extension node of kind
+/// kNone must not exist in the AST tree, and using a unknown extension name in WGSL code
+/// should result in a shader-creation error.
+/// @param name string of the extension name
+/// @return the Extension enum value for the extension of given name, or kNone if no known extension
+/// has the given name
+Extension ParseExtension(const std::string& name);
+
+/// Convert the Extension enum value to corresponding extension name string.
+/// @param ext the Extension enum value
+/// @return string of the extension name corresponding to the given kind, or
+/// an empty string if the given enum value is kNone or don't have a
+/// known corresponding name
+const char* ExtensionName(Extension ext);
+
+/// @returns the name of the extension.
+const char* str(Extension i);
+
+/// Emits the name of the extension type.
+std::ostream& operator<<(std::ostream& out, Extension i);
+
+// A unique vector of extensions
+using Extensions = utils::UniqueVector<Extension>;
+
+} // namespace tint::ast
+
+#endif // SRC_TINT_AST_EXTENSION_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/extension_test.cc b/chromium/third_party/dawn/src/tint/ast/extension_test.cc
new file mode 100644
index 00000000000..ed27674b9e0
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/extension_test.cc
@@ -0,0 +1,36 @@
+
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/ast/extension.h"
+
+#include "gtest/gtest.h"
+
+namespace tint::ast {
+namespace {
+
+TEST(ExtensionTest, NameToKind_InvalidName) {
+ EXPECT_EQ(ParseExtension("f16"), Extension::kF16);
+ EXPECT_EQ(ParseExtension(""), Extension::kNone);
+ EXPECT_EQ(ParseExtension("__ImpossibleExtensionName"), Extension::kNone);
+ EXPECT_EQ(ParseExtension("123"), Extension::kNone);
+}
+
+TEST(ExtensionTest, KindToName) {
+ EXPECT_EQ(std::string(str(Extension::kF16)), "f16");
+ EXPECT_EQ(std::string(str(Extension::kNone)), "<none>");
+}
+
+} // namespace
+} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/external_texture.cc b/chromium/third_party/dawn/src/tint/ast/external_texture.cc
index 01f66f1b49e..b88de9055ae 100644
--- a/chromium/third_party/dawn/src/tint/ast/external_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/external_texture.cc
@@ -29,11 +29,11 @@ ExternalTexture::ExternalTexture(ExternalTexture&&) = default;
ExternalTexture::~ExternalTexture() = default;
std::string ExternalTexture::FriendlyName(const SymbolTable&) const {
- return "texture_external";
+ return "texture_external";
}
const ExternalTexture* ExternalTexture::Clone(CloneContext* ctx) const {
- return ctx->dst->create<ExternalTexture>();
+ return ctx->dst->create<ExternalTexture>();
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/external_texture.h b/chromium/third_party/dawn/src/tint/ast/external_texture.h
index 614bb1047f6..17224cffcfc 100644
--- a/chromium/third_party/dawn/src/tint/ast/external_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/external_texture.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// An external texture type
class ExternalTexture final : public Castable<ExternalTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- ExternalTexture(ProgramID pid, const Source& src);
-
- /// Move constructor
- ExternalTexture(ExternalTexture&&);
- ~ExternalTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const ExternalTexture* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ ExternalTexture(ProgramID pid, const Source& src);
+
+ /// Move constructor
+ ExternalTexture(ExternalTexture&&);
+ ~ExternalTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const ExternalTexture* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/external_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/external_texture_test.cc
index b4a186ec7ce..dfe097e3dfe 100644
--- a/chromium/third_party/dawn/src/tint/ast/external_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/external_texture_test.cc
@@ -22,22 +22,22 @@ namespace {
using AstExternalTextureTest = TestHelper;
TEST_F(AstExternalTextureTest, IsTexture) {
- Texture* ty = create<ExternalTexture>();
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_TRUE(ty->Is<ExternalTexture>());
- EXPECT_FALSE(ty->Is<MultisampledTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
+ Texture* ty = create<ExternalTexture>();
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_TRUE(ty->Is<ExternalTexture>());
+ EXPECT_FALSE(ty->Is<MultisampledTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
}
TEST_F(AstExternalTextureTest, Dim) {
- auto* ty = create<ExternalTexture>();
- EXPECT_EQ(ty->dim, ast::TextureDimension::k2d);
+ auto* ty = create<ExternalTexture>();
+ EXPECT_EQ(ty->dim, ast::TextureDimension::k2d);
}
TEST_F(AstExternalTextureTest, FriendlyName) {
- auto* ty = create<ExternalTexture>();
- EXPECT_EQ(ty->FriendlyName(Symbols()), "texture_external");
+ auto* ty = create<ExternalTexture>();
+ EXPECT_EQ(ty->FriendlyName(Symbols()), "texture_external");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/f16.cc b/chromium/third_party/dawn/src/tint/ast/f16.cc
new file mode 100644
index 00000000000..0eb1be5fdd4
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/f16.cc
@@ -0,0 +1,38 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/ast/f16.h"
+
+#include "src/tint/program_builder.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::ast::F16);
+
+namespace tint::ast {
+
+F16::F16(ProgramID pid, const Source& src) : Base(pid, src) {}
+
+F16::F16(F16&&) = default;
+
+F16::~F16() = default;
+
+std::string F16::FriendlyName(const SymbolTable&) const {
+ return "f16";
+}
+
+const F16* F16::Clone(CloneContext* ctx) const {
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<F16>(src);
+}
+
+} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/f16.h b/chromium/third_party/dawn/src/tint/ast/f16.h
new file mode 100644
index 00000000000..1b84f09529e
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/ast/f16.h
@@ -0,0 +1,48 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_AST_F16_H_
+#define SRC_TINT_AST_F16_H_
+
+#include <string>
+
+#include "src/tint/ast/type.h"
+
+namespace tint::ast {
+
+/// A float 16 type
+class F16 : public Castable<F16, Type> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ F16(ProgramID pid, const Source& src);
+ /// Move constructor
+ F16(F16&&);
+ ~F16() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const F16* Clone(CloneContext* ctx) const override;
+};
+
+} // namespace tint::ast
+
+#endif // SRC_TINT_AST_F16_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/f16_test.cc
index 79be9b2c622..48ab284f55b 100644
--- a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/f16_test.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Tint Authors.
+// Copyright 2022 The Tint Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,17 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "src/tint/ast/f16.h"
+
#include "src/tint/ast/test_helper.h"
namespace tint::ast {
namespace {
-using SintLiteralExpressionTest = TestHelper;
+using AstF16Test = TestHelper;
-TEST_F(SintLiteralExpressionTest, Value) {
- auto* i = create<SintLiteralExpression>(47);
- ASSERT_TRUE(i->Is<SintLiteralExpression>());
- EXPECT_EQ(i->value, 47);
+TEST_F(AstF16Test, FriendlyName) {
+ auto* f = create<F16>();
+ EXPECT_EQ(f->FriendlyName(Symbols()), "f16");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/f32.cc b/chromium/third_party/dawn/src/tint/ast/f32.cc
index 04126934aab..b731e65c9df 100644
--- a/chromium/third_party/dawn/src/tint/ast/f32.cc
+++ b/chromium/third_party/dawn/src/tint/ast/f32.cc
@@ -27,12 +27,12 @@ F32::F32(F32&&) = default;
F32::~F32() = default;
std::string F32::FriendlyName(const SymbolTable&) const {
- return "f32";
+ return "f32";
}
const F32* F32::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<F32>(src);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<F32>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/f32.h b/chromium/third_party/dawn/src/tint/ast/f32.h
index 321cf3eb4eb..db81491d815 100644
--- a/chromium/third_party/dawn/src/tint/ast/f32.h
+++ b/chromium/third_party/dawn/src/tint/ast/f32.h
@@ -23,24 +23,24 @@ namespace tint::ast {
/// A float 32 type
class F32 final : public Castable<F32, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- F32(ProgramID pid, const Source& src);
- /// Move constructor
- F32(F32&&);
- ~F32() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const F32* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ F32(ProgramID pid, const Source& src);
+ /// Move constructor
+ F32(F32&&);
+ ~F32() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const F32* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/f32_test.cc b/chromium/third_party/dawn/src/tint/ast/f32_test.cc
index 73791e9425d..793289fff8d 100644
--- a/chromium/third_party/dawn/src/tint/ast/f32_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/f32_test.cc
@@ -22,8 +22,8 @@ namespace {
using AstF32Test = TestHelper;
TEST_F(AstF32Test, FriendlyName) {
- auto* f = create<F32>();
- EXPECT_EQ(f->FriendlyName(Symbols()), "f32");
+ auto* f = create<F32>();
+ EXPECT_EQ(f->FriendlyName(Symbols()), "f32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.cc b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.cc
index ff9b3b971c4..446534d354f 100644
--- a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.cc
@@ -20,18 +20,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::FallthroughStatement);
namespace tint::ast {
-FallthroughStatement::FallthroughStatement(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+FallthroughStatement::FallthroughStatement(ProgramID pid, const Source& src) : Base(pid, src) {}
FallthroughStatement::FallthroughStatement(FallthroughStatement&&) = default;
FallthroughStatement::~FallthroughStatement() = default;
-const FallthroughStatement* FallthroughStatement::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<FallthroughStatement>(src);
+const FallthroughStatement* FallthroughStatement::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<FallthroughStatement>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.h b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.h
index 262bd87ced6..b313efbd6dd 100644
--- a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement.h
@@ -20,22 +20,21 @@
namespace tint::ast {
/// An fallthrough statement
-class FallthroughStatement final
- : public Castable<FallthroughStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- FallthroughStatement(ProgramID pid, const Source& src);
- /// Move constructor
- FallthroughStatement(FallthroughStatement&&);
- ~FallthroughStatement() override;
+class FallthroughStatement final : public Castable<FallthroughStatement, Statement> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ FallthroughStatement(ProgramID pid, const Source& src);
+ /// Move constructor
+ FallthroughStatement(FallthroughStatement&&);
+ ~FallthroughStatement() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const FallthroughStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const FallthroughStatement* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement_test.cc
index 3069f03a024..f823e91700b 100644
--- a/chromium/third_party/dawn/src/tint/ast/fallthrough_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/fallthrough_statement_test.cc
@@ -22,23 +22,23 @@ namespace {
using FallthroughStatementTest = TestHelper;
TEST_F(FallthroughStatementTest, Creation) {
- auto* stmt = create<FallthroughStatement>();
- EXPECT_EQ(stmt->source.range.begin.line, 0u);
- EXPECT_EQ(stmt->source.range.begin.column, 0u);
- EXPECT_EQ(stmt->source.range.end.line, 0u);
- EXPECT_EQ(stmt->source.range.end.column, 0u);
+ auto* stmt = create<FallthroughStatement>();
+ EXPECT_EQ(stmt->source.range.begin.line, 0u);
+ EXPECT_EQ(stmt->source.range.begin.column, 0u);
+ EXPECT_EQ(stmt->source.range.end.line, 0u);
+ EXPECT_EQ(stmt->source.range.end.column, 0u);
}
TEST_F(FallthroughStatementTest, Creation_WithSource) {
- auto* stmt = create<FallthroughStatement>(Source{Source::Location{20, 2}});
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt = create<FallthroughStatement>(Source{Source::Location{20, 2}});
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(FallthroughStatementTest, IsFallthrough) {
- auto* stmt = create<FallthroughStatement>();
- EXPECT_TRUE(stmt->Is<FallthroughStatement>());
+ auto* stmt = create<FallthroughStatement>();
+ EXPECT_TRUE(stmt->Is<FallthroughStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/float_literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/float_literal_expression.cc
index d22cec9286a..36cb42a609e 100644
--- a/chromium/third_party/dawn/src/tint/ast/float_literal_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/float_literal_expression.cc
@@ -24,16 +24,27 @@ namespace tint::ast {
FloatLiteralExpression::FloatLiteralExpression(ProgramID pid,
const Source& src,
- float val)
- : Base(pid, src), value(val) {}
+ double val,
+ Suffix suf)
+ : Base(pid, src), value(val), suffix(suf) {}
FloatLiteralExpression::~FloatLiteralExpression() = default;
-const FloatLiteralExpression* FloatLiteralExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<FloatLiteralExpression>(src, value);
+const FloatLiteralExpression* FloatLiteralExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<FloatLiteralExpression>(src, value, suffix);
+}
+
+std::ostream& operator<<(std::ostream& out, FloatLiteralExpression::Suffix suffix) {
+ switch (suffix) {
+ default:
+ return out;
+ case FloatLiteralExpression::Suffix::kF:
+ return out << "f";
+ case FloatLiteralExpression::Suffix::kH:
+ return out << "h";
+ }
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/float_literal_expression.h b/chromium/third_party/dawn/src/tint/ast/float_literal_expression.h
index e2b0fa3dbd5..7f03caf692c 100644
--- a/chromium/third_party/dawn/src/tint/ast/float_literal_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/float_literal_expression.h
@@ -22,26 +22,45 @@
namespace tint::ast {
/// A float literal
-class FloatLiteralExpression final
- : public Castable<FloatLiteralExpression, LiteralExpression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the float literals value
- FloatLiteralExpression(ProgramID pid, const Source& src, float value);
- ~FloatLiteralExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const FloatLiteralExpression* Clone(CloneContext* ctx) const override;
-
- /// The float literal value
- const float value;
+class FloatLiteralExpression final : public Castable<FloatLiteralExpression, LiteralExpression> {
+ public:
+ /// Literal suffix
+ enum class Suffix {
+ /// No suffix
+ kNone,
+ /// 'f' suffix (f32)
+ kF,
+ /// 'h' suffix (f16)
+ kH,
+ };
+
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param val the literal value
+ /// @param suf the literal suffix
+ FloatLiteralExpression(ProgramID pid, const Source& src, double val, Suffix suf);
+ ~FloatLiteralExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const FloatLiteralExpression* Clone(CloneContext* ctx) const override;
+
+ /// The literal value
+ const double value;
+
+ /// The literal suffix
+ const Suffix suffix;
};
+/// Writes the float literal suffix to the std::ostream.
+/// @param out the std::ostream to write to
+/// @param suffix the suffix to write
+/// @returns out so calls can be chained
+std::ostream& operator<<(std::ostream& out, FloatLiteralExpression::Suffix suffix);
+
} // namespace tint::ast
#endif // SRC_TINT_AST_FLOAT_LITERAL_EXPRESSION_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/float_literal_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/float_literal_expression_test.cc
index dfb912db103..5ec5f0fccd8 100644
--- a/chromium/third_party/dawn/src/tint/ast/float_literal_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/float_literal_expression_test.cc
@@ -19,10 +19,37 @@ namespace {
using FloatLiteralExpressionTest = TestHelper;
-TEST_F(FloatLiteralExpressionTest, Value) {
- auto* f = create<FloatLiteralExpression>(47.2f);
- ASSERT_TRUE(f->Is<FloatLiteralExpression>());
- EXPECT_EQ(f->value, 47.2f);
+TEST_F(FloatLiteralExpressionTest, SuffixNone) {
+ auto* i = create<FloatLiteralExpression>(42.0, FloatLiteralExpression::Suffix::kNone);
+ ASSERT_TRUE(i->Is<FloatLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, FloatLiteralExpression::Suffix::kNone);
+}
+
+TEST_F(FloatLiteralExpressionTest, SuffixF) {
+ auto* i = create<FloatLiteralExpression>(42.0, FloatLiteralExpression::Suffix::kF);
+ ASSERT_TRUE(i->Is<FloatLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, FloatLiteralExpression::Suffix::kF);
+}
+
+TEST_F(FloatLiteralExpressionTest, SuffixH) {
+ auto* i = create<FloatLiteralExpression>(42.0, FloatLiteralExpression::Suffix::kH);
+ ASSERT_TRUE(i->Is<FloatLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, FloatLiteralExpression::Suffix::kH);
+}
+
+TEST_F(FloatLiteralExpressionTest, SuffixStringStream) {
+ auto to_str = [](FloatLiteralExpression::Suffix suffix) {
+ std::stringstream ss;
+ ss << suffix;
+ return ss.str();
+ };
+
+ EXPECT_EQ("", to_str(FloatLiteralExpression::Suffix::kNone));
+ EXPECT_EQ("f", to_str(FloatLiteralExpression::Suffix::kF));
+ EXPECT_EQ("h", to_str(FloatLiteralExpression::Suffix::kH));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/for_loop_statement.cc b/chromium/third_party/dawn/src/tint/ast/for_loop_statement.cc
index 0a08893f40a..804389c8c50 100644
--- a/chromium/third_party/dawn/src/tint/ast/for_loop_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/for_loop_statement.cc
@@ -26,17 +26,13 @@ ForLoopStatement::ForLoopStatement(ProgramID pid,
const Expression* cond,
const Statement* cont,
const BlockStatement* b)
- : Base(pid, src),
- initializer(init),
- condition(cond),
- continuing(cont),
- body(b) {
- TINT_ASSERT(AST, body);
-
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, initializer, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, continuing, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
+ : Base(pid, src), initializer(init), condition(cond), continuing(cont), body(b) {
+ TINT_ASSERT(AST, body);
+
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, initializer, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, continuing, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
}
ForLoopStatement::ForLoopStatement(ForLoopStatement&&) = default;
@@ -44,14 +40,14 @@ ForLoopStatement::ForLoopStatement(ForLoopStatement&&) = default;
ForLoopStatement::~ForLoopStatement() = default;
const ForLoopStatement* ForLoopStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
-
- auto* init = ctx->Clone(initializer);
- auto* cond = ctx->Clone(condition);
- auto* cont = ctx->Clone(continuing);
- auto* b = ctx->Clone(body);
- return ctx->dst->create<ForLoopStatement>(src, init, cond, cont, b);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+
+ auto* init = ctx->Clone(initializer);
+ auto* cond = ctx->Clone(condition);
+ auto* cont = ctx->Clone(continuing);
+ auto* b = ctx->Clone(body);
+ return ctx->dst->create<ForLoopStatement>(src, init, cond, cont, b);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/for_loop_statement.h b/chromium/third_party/dawn/src/tint/ast/for_loop_statement.h
index 904d3276e79..464ea499e8f 100644
--- a/chromium/third_party/dawn/src/tint/ast/for_loop_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/for_loop_statement.h
@@ -23,41 +23,41 @@ class Expression;
/// A for loop statement
class ForLoopStatement final : public Castable<ForLoopStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the for loop statement source
- /// @param initializer the optional loop initializer statement
- /// @param condition the optional loop condition expression
- /// @param continuing the optional continuing statement
- /// @param body the loop body
- ForLoopStatement(ProgramID program_id,
- Source const& source,
- const Statement* initializer,
- const Expression* condition,
- const Statement* continuing,
- const BlockStatement* body);
- /// Move constructor
- ForLoopStatement(ForLoopStatement&&);
- ~ForLoopStatement() override;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the for loop statement source
+ /// @param initializer the optional loop initializer statement
+ /// @param condition the optional loop condition expression
+ /// @param continuing the optional continuing statement
+ /// @param body the loop body
+ ForLoopStatement(ProgramID program_id,
+ Source const& source,
+ const Statement* initializer,
+ const Expression* condition,
+ const Statement* continuing,
+ const BlockStatement* body);
+ /// Move constructor
+ ForLoopStatement(ForLoopStatement&&);
+ ~ForLoopStatement() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const ForLoopStatement* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const ForLoopStatement* Clone(CloneContext* ctx) const override;
- /// The initializer statement
- const Statement* const initializer;
+ /// The initializer statement
+ const Statement* const initializer;
- /// The condition expression
- const Expression* const condition;
+ /// The condition expression
+ const Expression* const condition;
- /// The continuing statement
- const Statement* const continuing;
+ /// The continuing statement
+ const Statement* const continuing;
- /// The loop body block
- const BlockStatement* const body;
+ /// The loop body block
+ const BlockStatement* const body;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/for_loop_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/for_loop_statement_test.cc
index 9c03a3071ef..5e2a0003c93 100644
--- a/chromium/third_party/dawn/src/tint/ast/for_loop_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/for_loop_statement_test.cc
@@ -16,86 +16,87 @@
#include "src/tint/ast/binary_expression.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using ForLoopStatementTest = TestHelper;
TEST_F(ForLoopStatementTest, Creation) {
- auto* init = Decl(Var("i", ty.u32()));
- auto* cond =
- create<BinaryExpression>(BinaryOp::kLessThan, Expr("i"), Expr(5u));
- auto* cont = Assign("i", Add("i", 1));
- auto* body = Block(Return());
- auto* l = For(init, cond, cont, body);
+ auto* init = Decl(Var("i", ty.u32()));
+ auto* cond = create<BinaryExpression>(BinaryOp::kLessThan, Expr("i"), Expr(5_u));
+ auto* cont = Assign("i", Add("i", 1_u));
+ auto* body = Block(Return());
+ auto* l = For(init, cond, cont, body);
- EXPECT_EQ(l->initializer, init);
- EXPECT_EQ(l->condition, cond);
- EXPECT_EQ(l->continuing, cont);
- EXPECT_EQ(l->body, body);
+ EXPECT_EQ(l->initializer, init);
+ EXPECT_EQ(l->condition, cond);
+ EXPECT_EQ(l->continuing, cont);
+ EXPECT_EQ(l->body, body);
}
TEST_F(ForLoopStatementTest, Creation_WithSource) {
- auto* body = Block(Return());
- auto* l = For(Source{{20u, 2u}}, nullptr, nullptr, nullptr, body);
- auto src = l->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* body = Block(Return());
+ auto* l = For(Source{{20u, 2u}}, nullptr, nullptr, nullptr, body);
+ auto src = l->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(ForLoopStatementTest, Creation_Null_InitCondCont) {
- auto* body = Block(Return());
- auto* l = For(nullptr, nullptr, nullptr, body);
- EXPECT_EQ(l->body, body);
+ auto* body = Block(Return());
+ auto* l = For(nullptr, nullptr, nullptr, body);
+ EXPECT_EQ(l->body, body);
}
TEST_F(ForLoopStatementTest, Assert_Null_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.For(nullptr, nullptr, nullptr, nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.For(nullptr, nullptr, nullptr, nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ForLoopStatementTest, Assert_DifferentProgramID_Initializer) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.For(b2.Block(), nullptr, nullptr, b1.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.For(b2.Block(), nullptr, nullptr, b1.Block());
+ },
+ "internal compiler error");
}
TEST_F(ForLoopStatementTest, Assert_DifferentProgramID_Condition) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.For(nullptr, b2.Expr(true), nullptr, b1.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.For(nullptr, b2.Expr(true), nullptr, b1.Block());
+ },
+ "internal compiler error");
}
TEST_F(ForLoopStatementTest, Assert_DifferentProgramID_Continuing) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.For(nullptr, nullptr, b2.Block(), b1.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.For(nullptr, nullptr, b2.Block(), b1.Block());
+ },
+ "internal compiler error");
}
TEST_F(ForLoopStatementTest, Assert_DifferentProgramID_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.For(nullptr, nullptr, nullptr, b2.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.For(nullptr, nullptr, nullptr, b2.Block());
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/function.cc b/chromium/third_party/dawn/src/tint/ast/function.cc
index 885ca5100dc..d8485d6cc9b 100644
--- a/chromium/third_party/dawn/src/tint/ast/function.cc
+++ b/chromium/third_party/dawn/src/tint/ast/function.cc
@@ -37,20 +37,20 @@ Function::Function(ProgramID pid,
body(b),
attributes(std::move(attrs)),
return_type_attributes(std::move(return_type_attrs)) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
- for (auto* param : params) {
- TINT_ASSERT(AST, param && param->is_const);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, param, program_id);
- }
- TINT_ASSERT(AST, symbol.IsValid());
- TINT_ASSERT(AST, return_type);
- for (auto* attr : attributes) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
- }
- for (auto* attr : return_type_attributes) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
- }
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
+ for (auto* param : params) {
+ TINT_ASSERT(AST, param && param->is_const);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, param, program_id);
+ }
+ TINT_ASSERT(AST, symbol.IsValid());
+ TINT_ASSERT(AST, return_type);
+ for (auto* attr : attributes) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
+ }
+ for (auto* attr : return_type_attributes) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
+ }
}
Function::Function(Function&&) = default;
@@ -58,49 +58,49 @@ Function::Function(Function&&) = default;
Function::~Function() = default;
PipelineStage Function::PipelineStage() const {
- if (auto* stage = GetAttribute<StageAttribute>(attributes)) {
- return stage->stage;
- }
- return PipelineStage::kNone;
+ if (auto* stage = GetAttribute<StageAttribute>(attributes)) {
+ return stage->stage;
+ }
+ return PipelineStage::kNone;
}
const Function* Function::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto sym = ctx->Clone(symbol);
- auto p = ctx->Clone(params);
- auto* ret = ctx->Clone(return_type);
- auto* b = ctx->Clone(body);
- auto attrs = ctx->Clone(attributes);
- auto ret_attrs = ctx->Clone(return_type_attributes);
- return ctx->dst->create<Function>(src, sym, p, ret, b, attrs, ret_attrs);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto sym = ctx->Clone(symbol);
+ auto p = ctx->Clone(params);
+ auto* ret = ctx->Clone(return_type);
+ auto* b = ctx->Clone(body);
+ auto attrs = ctx->Clone(attributes);
+ auto ret_attrs = ctx->Clone(return_type_attributes);
+ return ctx->dst->create<Function>(src, sym, p, ret, b, attrs, ret_attrs);
}
const Function* FunctionList::Find(Symbol sym) const {
- for (auto* func : *this) {
- if (func->symbol == sym) {
- return func;
+ for (auto* func : *this) {
+ if (func->symbol == sym) {
+ return func;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
const Function* FunctionList::Find(Symbol sym, PipelineStage stage) const {
- for (auto* func : *this) {
- if (func->symbol == sym && func->PipelineStage() == stage) {
- return func;
+ for (auto* func : *this) {
+ if (func->symbol == sym && func->PipelineStage() == stage) {
+ return func;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
bool FunctionList::HasStage(ast::PipelineStage stage) const {
- for (auto* func : *this) {
- if (func->PipelineStage() == stage) {
- return true;
+ for (auto* func : *this) {
+ if (func->PipelineStage() == stage) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/function.h b/chromium/third_party/dawn/src/tint/ast/function.h
index e23edf7952d..843877efde3 100644
--- a/chromium/third_party/dawn/src/tint/ast/function.h
+++ b/chromium/third_party/dawn/src/tint/ast/function.h
@@ -33,82 +33,82 @@ namespace tint::ast {
/// A Function statement.
class Function final : public Castable<Function, Node> {
- public:
- /// Create a function
- /// @param program_id the identifier of the program that owns this node
- /// @param source the variable source
- /// @param symbol the function symbol
- /// @param params the function parameters
- /// @param return_type the return type
- /// @param body the function body
- /// @param attributes the function attributes
- /// @param return_type_attributes the return type attributes
- Function(ProgramID program_id,
- const Source& source,
- Symbol symbol,
- VariableList params,
- const Type* return_type,
- const BlockStatement* body,
- AttributeList attributes,
- AttributeList return_type_attributes);
- /// Move constructor
- Function(Function&&);
-
- ~Function() override;
-
- /// @returns the functions pipeline stage or None if not set
- ast::PipelineStage PipelineStage() const;
-
- /// @returns true if this function is an entry point
- bool IsEntryPoint() const { return PipelineStage() != PipelineStage::kNone; }
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const Function* Clone(CloneContext* ctx) const override;
-
- /// The function symbol
- const Symbol symbol;
-
- /// The function params
- const VariableList params;
-
- /// The function return type
- const Type* const return_type;
-
- /// The function body
- const BlockStatement* const body;
-
- /// The attributes attached to this function
- const AttributeList attributes;
-
- /// The attributes attached to the function return type.
- const AttributeList return_type_attributes;
+ public:
+ /// Create a function
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the variable source
+ /// @param symbol the function symbol
+ /// @param params the function parameters
+ /// @param return_type the return type
+ /// @param body the function body
+ /// @param attributes the function attributes
+ /// @param return_type_attributes the return type attributes
+ Function(ProgramID program_id,
+ const Source& source,
+ Symbol symbol,
+ VariableList params,
+ const Type* return_type,
+ const BlockStatement* body,
+ AttributeList attributes,
+ AttributeList return_type_attributes);
+ /// Move constructor
+ Function(Function&&);
+
+ ~Function() override;
+
+ /// @returns the functions pipeline stage or None if not set
+ ast::PipelineStage PipelineStage() const;
+
+ /// @returns true if this function is an entry point
+ bool IsEntryPoint() const { return PipelineStage() != PipelineStage::kNone; }
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const Function* Clone(CloneContext* ctx) const override;
+
+ /// The function symbol
+ const Symbol symbol;
+
+ /// The function params
+ const VariableList params;
+
+ /// The function return type
+ const Type* const return_type;
+
+ /// The function body
+ const BlockStatement* const body;
+
+ /// The attributes attached to this function
+ const AttributeList attributes;
+
+ /// The attributes attached to the function return type.
+ const AttributeList return_type_attributes;
};
/// A list of functions
class FunctionList : public std::vector<const Function*> {
- public:
- /// Appends f to the end of the list
- /// @param f the function to append to this list
- void Add(const Function* f) { this->emplace_back(f); }
-
- /// Returns the function with the given name
- /// @param sym the function symbol to search for
- /// @returns the associated function or nullptr if none exists
- const Function* Find(Symbol sym) const;
-
- /// Returns the function with the given name
- /// @param sym the function symbol to search for
- /// @param stage the pipeline stage
- /// @returns the associated function or nullptr if none exists
- const Function* Find(Symbol sym, PipelineStage stage) const;
-
- /// @param stage the pipeline stage
- /// @returns true if the Builder contains an entrypoint function with
- /// the given stage
- bool HasStage(PipelineStage stage) const;
+ public:
+ /// Appends f to the end of the list
+ /// @param f the function to append to this list
+ void Add(const Function* f) { this->emplace_back(f); }
+
+ /// Returns the function with the given name
+ /// @param sym the function symbol to search for
+ /// @returns the associated function or nullptr if none exists
+ const Function* Find(Symbol sym) const;
+
+ /// Returns the function with the given name
+ /// @param sym the function symbol to search for
+ /// @param stage the pipeline stage
+ /// @returns the associated function or nullptr if none exists
+ const Function* Find(Symbol sym, PipelineStage stage) const;
+
+ /// @param stage the pipeline stage
+ /// @returns true if the Builder contains an entrypoint function with
+ /// the given stage
+ bool HasStage(PipelineStage stage) const;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/function_test.cc b/chromium/third_party/dawn/src/tint/ast/function_test.cc
index 7851ff99197..bd05fd827db 100644
--- a/chromium/third_party/dawn/src/tint/ast/function_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/function_test.cc
@@ -18,176 +18,173 @@
#include "src/tint/ast/test_helper.h"
#include "src/tint/ast/workgroup_attribute.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using FunctionTest = TestHelper;
TEST_F(FunctionTest, Creation) {
- VariableList params;
- params.push_back(Param("var", ty.i32()));
- auto* var = params[0];
-
- auto* f = Func("func", params, ty.void_(), StatementList{}, AttributeList{});
- EXPECT_EQ(f->symbol, Symbols().Get("func"));
- ASSERT_EQ(f->params.size(), 1u);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
- EXPECT_EQ(f->params[0], var);
+ VariableList params;
+ params.push_back(Param("var", ty.i32()));
+ auto* var = params[0];
+
+ auto* f = Func("func", params, ty.void_(), StatementList{}, AttributeList{});
+ EXPECT_EQ(f->symbol, Symbols().Get("func"));
+ ASSERT_EQ(f->params.size(), 1u);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ EXPECT_EQ(f->params[0], var);
}
TEST_F(FunctionTest, Creation_WithSource) {
- VariableList params;
- params.push_back(Param("var", ty.i32()));
-
- auto* f = Func(Source{Source::Location{20, 2}}, "func", params, ty.void_(),
- StatementList{}, AttributeList{});
- auto src = f->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ VariableList params;
+ params.push_back(Param("var", ty.i32()));
+
+ auto* f = Func(Source{Source::Location{20, 2}}, "func", params, ty.void_(), StatementList{},
+ AttributeList{});
+ auto src = f->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(FunctionTest, Assert_InvalidName) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Func("", VariableList{}, b.ty.void_(), StatementList{},
- AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Func("", VariableList{}, b.ty.void_(), StatementList{}, AttributeList{});
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_Null_ReturnType) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Func("f", VariableList{}, nullptr, StatementList{}, AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Func("f", VariableList{}, nullptr, StatementList{}, AttributeList{});
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_Null_Param) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- VariableList params;
- params.push_back(b.Param("var", b.ty.i32()));
- params.push_back(nullptr);
-
- b.Func("f", params, b.ty.void_(), StatementList{}, AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ VariableList params;
+ params.push_back(b.Param("var", b.ty.i32()));
+ params.push_back(nullptr);
+
+ b.Func("f", params, b.ty.void_(), StatementList{}, AttributeList{});
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_DifferentProgramID_Symbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Func(b2.Sym("func"), VariableList{}, b1.ty.void_(), StatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Func(b2.Sym("func"), VariableList{}, b1.ty.void_(), StatementList{});
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_DifferentProgramID_Param) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Func("func", VariableList{b2.Param("var", b2.ty.i32())},
- b1.ty.void_(), StatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Func("func", VariableList{b2.Param("var", b2.ty.i32())}, b1.ty.void_(),
+ StatementList{});
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_DifferentProgramID_Attr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Func("func", VariableList{}, b1.ty.void_(), StatementList{},
- AttributeList{
- b2.WorkgroupSize(2, 4, 6),
- });
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Func("func", VariableList{}, b1.ty.void_(), StatementList{},
+ AttributeList{
+ b2.WorkgroupSize(2_i, 4_i, 6_i),
+ });
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_DifferentProgramID_ReturnAttr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Func("func", VariableList{}, b1.ty.void_(), StatementList{},
- AttributeList{},
- AttributeList{
- b2.WorkgroupSize(2, 4, 6),
- });
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Func("func", VariableList{}, b1.ty.void_(), StatementList{}, AttributeList{},
+ AttributeList{
+ b2.WorkgroupSize(2_i, 4_i, 6_i),
+ });
+ },
+ "internal compiler error");
}
TEST_F(FunctionTest, Assert_NonConstParam) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- VariableList params;
- params.push_back(b.Var("var", b.ty.i32(), ast::StorageClass::kNone));
-
- b.Func("f", params, b.ty.void_(), StatementList{}, AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ VariableList params;
+ params.push_back(b.Var("var", b.ty.i32(), ast::StorageClass::kNone));
+
+ b.Func("f", params, b.ty.void_(), StatementList{}, AttributeList{});
+ },
+ "internal compiler error");
}
using FunctionListTest = TestHelper;
TEST_F(FunctionListTest, FindSymbol) {
- auto* func = Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{});
- FunctionList list;
- list.Add(func);
- EXPECT_EQ(func, list.Find(Symbols().Register("main")));
+ auto* func = Func("main", VariableList{}, ty.f32(), StatementList{}, ast::AttributeList{});
+ FunctionList list;
+ list.Add(func);
+ EXPECT_EQ(func, list.Find(Symbols().Register("main")));
}
TEST_F(FunctionListTest, FindSymbolMissing) {
- FunctionList list;
- EXPECT_EQ(nullptr, list.Find(Symbols().Register("Missing")));
+ FunctionList list;
+ EXPECT_EQ(nullptr, list.Find(Symbols().Register("Missing")));
}
TEST_F(FunctionListTest, FindSymbolStage) {
- auto* fs = Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{
- Stage(PipelineStage::kFragment),
- });
- auto* vs = Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{
- Stage(PipelineStage::kVertex),
- });
- FunctionList list;
- list.Add(fs);
- list.Add(vs);
- EXPECT_EQ(fs,
- list.Find(Symbols().Register("main"), PipelineStage::kFragment));
- EXPECT_EQ(vs, list.Find(Symbols().Register("main"), PipelineStage::kVertex));
+ auto* fs = Func("main", VariableList{}, ty.f32(), StatementList{},
+ ast::AttributeList{
+ Stage(PipelineStage::kFragment),
+ });
+ auto* vs = Func("main", VariableList{}, ty.f32(), StatementList{},
+ ast::AttributeList{
+ Stage(PipelineStage::kVertex),
+ });
+ FunctionList list;
+ list.Add(fs);
+ list.Add(vs);
+ EXPECT_EQ(fs, list.Find(Symbols().Register("main"), PipelineStage::kFragment));
+ EXPECT_EQ(vs, list.Find(Symbols().Register("main"), PipelineStage::kVertex));
}
TEST_F(FunctionListTest, FindSymbolStageMissing) {
- FunctionList list;
- list.Add(Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{
- Stage(PipelineStage::kFragment),
- }));
- EXPECT_EQ(nullptr,
- list.Find(Symbols().Register("main"), PipelineStage::kVertex));
+ FunctionList list;
+ list.Add(Func("main", VariableList{}, ty.f32(), StatementList{},
+ ast::AttributeList{
+ Stage(PipelineStage::kFragment),
+ }));
+ EXPECT_EQ(nullptr, list.Find(Symbols().Register("main"), PipelineStage::kVertex));
}
TEST_F(FunctionListTest, HasStage) {
- FunctionList list;
- list.Add(Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{
- Stage(PipelineStage::kFragment),
- }));
- EXPECT_TRUE(list.HasStage(PipelineStage::kFragment));
- EXPECT_FALSE(list.HasStage(PipelineStage::kVertex));
+ FunctionList list;
+ list.Add(Func("main", VariableList{}, ty.f32(), StatementList{},
+ ast::AttributeList{
+ Stage(PipelineStage::kFragment),
+ }));
+ EXPECT_TRUE(list.HasStage(PipelineStage::kFragment));
+ EXPECT_FALSE(list.HasStage(PipelineStage::kVertex));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/group_attribute.cc b/chromium/third_party/dawn/src/tint/ast/group_attribute.cc
index 9cd82b933da..394a6907718 100644
--- a/chromium/third_party/dawn/src/tint/ast/group_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/group_attribute.cc
@@ -28,13 +28,13 @@ GroupAttribute::GroupAttribute(ProgramID pid, const Source& src, uint32_t val)
GroupAttribute::~GroupAttribute() = default;
std::string GroupAttribute::Name() const {
- return "group";
+ return "group";
}
const GroupAttribute* GroupAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<GroupAttribute>(src, value);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<GroupAttribute>(src, value);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/group_attribute.h b/chromium/third_party/dawn/src/tint/ast/group_attribute.h
index de7015b2359..a55946177b5 100644
--- a/chromium/third_party/dawn/src/tint/ast/group_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/group_attribute.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// A group attribute
class GroupAttribute final : public Castable<GroupAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the group value
- GroupAttribute(ProgramID pid, const Source& src, uint32_t value);
- ~GroupAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const GroupAttribute* Clone(CloneContext* ctx) const override;
-
- /// The group value
- const uint32_t value;
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param value the group value
+ GroupAttribute(ProgramID pid, const Source& src, uint32_t value);
+ ~GroupAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const GroupAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The group value
+ const uint32_t value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/group_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/group_attribute_test.cc
index 38cd30933b4..53167bbb7a1 100644
--- a/chromium/third_party/dawn/src/tint/ast/group_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/group_attribute_test.cc
@@ -20,8 +20,8 @@ namespace {
using GroupAttributeTest = TestHelper;
TEST_F(GroupAttributeTest, Creation) {
- auto* d = create<GroupAttribute>(2);
- EXPECT_EQ(2u, d->value);
+ auto* d = create<GroupAttribute>(2);
+ EXPECT_EQ(2u, d->value);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/i32.cc b/chromium/third_party/dawn/src/tint/ast/i32.cc
index 294699df72f..46fe75ed78a 100644
--- a/chromium/third_party/dawn/src/tint/ast/i32.cc
+++ b/chromium/third_party/dawn/src/tint/ast/i32.cc
@@ -27,12 +27,12 @@ I32::I32(I32&&) = default;
I32::~I32() = default;
std::string I32::FriendlyName(const SymbolTable&) const {
- return "i32";
+ return "i32";
}
const I32* I32::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<I32>(src);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<I32>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/i32.h b/chromium/third_party/dawn/src/tint/ast/i32.h
index 62043e67311..acafd37cfdb 100644
--- a/chromium/third_party/dawn/src/tint/ast/i32.h
+++ b/chromium/third_party/dawn/src/tint/ast/i32.h
@@ -23,24 +23,24 @@ namespace tint::ast {
/// A signed int 32 type.
class I32 final : public Castable<I32, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- I32(ProgramID pid, const Source& src);
- /// Move constructor
- I32(I32&&);
- ~I32() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const I32* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ I32(ProgramID pid, const Source& src);
+ /// Move constructor
+ I32(I32&&);
+ ~I32() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const I32* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/i32_test.cc b/chromium/third_party/dawn/src/tint/ast/i32_test.cc
index 7a632a77721..ee220cb91ac 100644
--- a/chromium/third_party/dawn/src/tint/ast/i32_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/i32_test.cc
@@ -22,8 +22,8 @@ namespace {
using AstI32Test = TestHelper;
TEST_F(AstI32Test, FriendlyName) {
- auto* i = create<I32>();
- EXPECT_EQ(i->FriendlyName(Symbols()), "i32");
+ auto* i = create<I32>();
+ EXPECT_EQ(i->FriendlyName(Symbols()), "i32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/id_attribute.cc b/chromium/third_party/dawn/src/tint/ast/id_attribute.cc
index ac5957d4b3a..b6e19574b36 100644
--- a/chromium/third_party/dawn/src/tint/ast/id_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/id_attribute.cc
@@ -28,13 +28,13 @@ IdAttribute::IdAttribute(ProgramID pid, const Source& src, uint32_t val)
IdAttribute::~IdAttribute() = default;
std::string IdAttribute::Name() const {
- return "id";
+ return "id";
}
const IdAttribute* IdAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<IdAttribute>(src, value);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<IdAttribute>(src, value);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/id_attribute.h b/chromium/third_party/dawn/src/tint/ast/id_attribute.h
index 4789e0d24ee..5e3ec12f782 100644
--- a/chromium/third_party/dawn/src/tint/ast/id_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/id_attribute.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// An id attribute for pipeline-overridable constants
class IdAttribute final : public Castable<IdAttribute, Attribute> {
- public:
- /// Create an id attribute.
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param val the numeric id value
- IdAttribute(ProgramID pid, const Source& src, uint32_t val);
- ~IdAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const IdAttribute* Clone(CloneContext* ctx) const override;
-
- /// The id value
- const uint32_t value;
+ public:
+ /// Create an id attribute.
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param val the numeric id value
+ IdAttribute(ProgramID pid, const Source& src, uint32_t val);
+ ~IdAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IdAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The id value
+ const uint32_t value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/id_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/id_attribute_test.cc
index af623ba60e7..6957d66b637 100644
--- a/chromium/third_party/dawn/src/tint/ast/id_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/id_attribute_test.cc
@@ -22,8 +22,8 @@ namespace {
using IdAttributeTest = TestHelper;
TEST_F(IdAttributeTest, Creation) {
- auto* d = create<IdAttribute>(12);
- EXPECT_EQ(12u, d->value);
+ auto* d = create<IdAttribute>(12);
+ EXPECT_EQ(12u, d->value);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/identifier_expression.cc b/chromium/third_party/dawn/src/tint/ast/identifier_expression.cc
index ff04f737b51..453ae696622 100644
--- a/chromium/third_party/dawn/src/tint/ast/identifier_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/identifier_expression.cc
@@ -20,24 +20,21 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::IdentifierExpression);
namespace tint::ast {
-IdentifierExpression::IdentifierExpression(ProgramID pid,
- const Source& src,
- Symbol sym)
+IdentifierExpression::IdentifierExpression(ProgramID pid, const Source& src, Symbol sym)
: Base(pid, src), symbol(sym) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
- TINT_ASSERT(AST, symbol.IsValid());
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
+ TINT_ASSERT(AST, symbol.IsValid());
}
IdentifierExpression::IdentifierExpression(IdentifierExpression&&) = default;
IdentifierExpression::~IdentifierExpression() = default;
-const IdentifierExpression* IdentifierExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto sym = ctx->Clone(symbol);
- return ctx->dst->create<IdentifierExpression>(src, sym);
+const IdentifierExpression* IdentifierExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto sym = ctx->Clone(symbol);
+ return ctx->dst->create<IdentifierExpression>(src, sym);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/identifier_expression.h b/chromium/third_party/dawn/src/tint/ast/identifier_expression.h
index 80e013e6f22..c3e1c30e544 100644
--- a/chromium/third_party/dawn/src/tint/ast/identifier_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/identifier_expression.h
@@ -20,26 +20,25 @@
namespace tint::ast {
/// An identifier expression
-class IdentifierExpression final
- : public Castable<IdentifierExpression, Expression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param sym the symbol for the identifier
- IdentifierExpression(ProgramID pid, const Source& src, Symbol sym);
- /// Move constructor
- IdentifierExpression(IdentifierExpression&&);
- ~IdentifierExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const IdentifierExpression* Clone(CloneContext* ctx) const override;
-
- /// The symbol for the identifier
- const Symbol symbol;
+class IdentifierExpression final : public Castable<IdentifierExpression, Expression> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param sym the symbol for the identifier
+ IdentifierExpression(ProgramID pid, const Source& src, Symbol sym);
+ /// Move constructor
+ IdentifierExpression(IdentifierExpression&&);
+ ~IdentifierExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IdentifierExpression* Clone(CloneContext* ctx) const override;
+
+ /// The symbol for the identifier
+ const Symbol symbol;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/identifier_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/identifier_expression_test.cc
index 0609eb11c3a..1f8033d0f66 100644
--- a/chromium/third_party/dawn/src/tint/ast/identifier_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/identifier_expression_test.cc
@@ -21,41 +21,41 @@ namespace {
using IdentifierExpressionTest = TestHelper;
TEST_F(IdentifierExpressionTest, Creation) {
- auto* i = Expr("ident");
- EXPECT_EQ(i->symbol, Symbol(1, ID()));
+ auto* i = Expr("ident");
+ EXPECT_EQ(i->symbol, Symbol(1, ID()));
}
TEST_F(IdentifierExpressionTest, Creation_WithSource) {
- auto* i = Expr(Source{Source::Location{20, 2}}, "ident");
- EXPECT_EQ(i->symbol, Symbol(1, ID()));
+ auto* i = Expr(Source{Source::Location{20, 2}}, "ident");
+ EXPECT_EQ(i->symbol, Symbol(1, ID()));
- auto src = i->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto src = i->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(IdentifierExpressionTest, IsIdentifier) {
- auto* i = Expr("ident");
- EXPECT_TRUE(i->Is<IdentifierExpression>());
+ auto* i = Expr("ident");
+ EXPECT_TRUE(i->Is<IdentifierExpression>());
}
TEST_F(IdentifierExpressionTest, Assert_InvalidSymbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Expr("");
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Expr("");
+ },
+ "internal compiler error");
}
TEST_F(IdentifierExpressionTest, Assert_DifferentProgramID_Symbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Expr(b2.Sym("b2"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Expr(b2.Sym("b2"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/if_statement.cc b/chromium/third_party/dawn/src/tint/ast/if_statement.cc
index 4b78da32520..c8fd374042b 100644
--- a/chromium/third_party/dawn/src/tint/ast/if_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/if_statement.cc
@@ -24,19 +24,16 @@ IfStatement::IfStatement(ProgramID pid,
const Source& src,
const Expression* cond,
const BlockStatement* b,
- ElseStatementList else_stmts)
- : Base(pid, src),
- condition(cond),
- body(b),
- else_statements(std::move(else_stmts)) {
- TINT_ASSERT(AST, condition);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
- TINT_ASSERT(AST, body);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
- for (auto* el : else_statements) {
- TINT_ASSERT(AST, el);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, el, program_id);
- }
+ const Statement* else_stmt)
+ : Base(pid, src), condition(cond), body(b), else_statement(else_stmt) {
+ TINT_ASSERT(AST, condition);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
+ TINT_ASSERT(AST, body);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
+ if (else_statement) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, else_statement, program_id);
+ TINT_ASSERT(AST, (else_statement->IsAnyOf<ast::IfStatement, ast::BlockStatement>()));
+ }
}
IfStatement::IfStatement(IfStatement&&) = default;
@@ -44,12 +41,12 @@ IfStatement::IfStatement(IfStatement&&) = default;
IfStatement::~IfStatement() = default;
const IfStatement* IfStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* cond = ctx->Clone(condition);
- auto* b = ctx->Clone(body);
- auto el = ctx->Clone(else_statements);
- return ctx->dst->create<IfStatement>(src, cond, b, el);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* cond = ctx->Clone(condition);
+ auto* b = ctx->Clone(body);
+ auto* el = ctx->Clone(else_statement);
+ return ctx->dst->create<IfStatement>(src, cond, b, el);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/if_statement.h b/chromium/third_party/dawn/src/tint/ast/if_statement.h
index df843f87277..75e6eee1ef6 100644
--- a/chromium/third_party/dawn/src/tint/ast/if_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/if_statement.h
@@ -17,42 +17,43 @@
#include <utility>
-#include "src/tint/ast/else_statement.h"
+#include "src/tint/ast/block_statement.h"
+#include "src/tint/ast/expression.h"
namespace tint::ast {
/// An if statement
class IfStatement final : public Castable<IfStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param condition the if condition
- /// @param body the if body
- /// @param else_stmts the else statements
- IfStatement(ProgramID pid,
- const Source& src,
- const Expression* condition,
- const BlockStatement* body,
- ElseStatementList else_stmts);
- /// Move constructor
- IfStatement(IfStatement&&);
- ~IfStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const IfStatement* Clone(CloneContext* ctx) const override;
-
- /// The if condition or nullptr if none set
- const Expression* const condition;
-
- /// The if body
- const BlockStatement* const body;
-
- /// The else statements
- const ElseStatementList else_statements;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param condition the if condition
+ /// @param body the if body
+ /// @param else_stmt the else statement, or nullptr
+ IfStatement(ProgramID pid,
+ const Source& src,
+ const Expression* condition,
+ const BlockStatement* body,
+ const Statement* else_stmt);
+ /// Move constructor
+ IfStatement(IfStatement&&);
+ ~IfStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IfStatement* Clone(CloneContext* ctx) const override;
+
+ /// The if condition or nullptr if none set
+ const Expression* const condition;
+
+ /// The if body
+ const BlockStatement* const body;
+
+ /// The optional else statement, or nullptr
+ const Statement* else_statement;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/if_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/if_statement_test.cc
index 83f998ae15e..9115cb76e8e 100644
--- a/chromium/third_party/dawn/src/tint/ast/if_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/if_statement_test.cc
@@ -24,80 +24,73 @@ namespace {
using IfStatementTest = TestHelper;
TEST_F(IfStatementTest, Creation) {
- auto* cond = Expr("cond");
- auto* stmt = create<IfStatement>(Source{Source::Location{20, 2}}, cond,
- Block(create<DiscardStatement>()),
- ElseStatementList{});
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* cond = Expr("cond");
+ auto* stmt = If(Source{Source::Location{20, 2}}, cond, Block(create<DiscardStatement>()));
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(IfStatementTest, IsIf) {
- auto* stmt = create<IfStatement>(Expr(true), Block(), ElseStatementList{});
- EXPECT_TRUE(stmt->Is<IfStatement>());
+ auto* stmt = If(Expr(true), Block());
+ EXPECT_TRUE(stmt->Is<IfStatement>());
}
TEST_F(IfStatementTest, Assert_Null_Condition) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<IfStatement>(nullptr, b.Block(), ElseStatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.If(nullptr, b.Block());
+ },
+ "internal compiler error");
}
TEST_F(IfStatementTest, Assert_Null_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<IfStatement>(b.Expr(true), nullptr, ElseStatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.If(b.Expr(true), nullptr);
+ },
+ "internal compiler error");
}
-TEST_F(IfStatementTest, Assert_Null_ElseStatement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- auto* body = b.create<BlockStatement>(StatementList{});
- b.create<IfStatement>(b.Expr(true), body, ElseStatementList{nullptr});
- },
- "internal compiler error");
+TEST_F(IfStatementTest, Assert_InvalidElse) {
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.If(b.Expr(true), b.Block(), b.Else(b.CallStmt(b.Call("foo"))));
+ },
+ "internal compiler error");
}
TEST_F(IfStatementTest, Assert_DifferentProgramID_Cond) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<IfStatement>(b2.Expr(true), b1.Block(), ElseStatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.If(b2.Expr(true), b1.Block());
+ },
+ "internal compiler error");
}
TEST_F(IfStatementTest, Assert_DifferentProgramID_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<IfStatement>(b1.Expr(true), b2.Block(), ElseStatementList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.If(b1.Expr(true), b2.Block());
+ },
+ "internal compiler error");
}
TEST_F(IfStatementTest, Assert_DifferentProgramID_ElseStatement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<IfStatement>(
- b1.Expr(true), b1.Block(),
- ElseStatementList{
- b2.create<ElseStatement>(b2.Expr("ident"), b2.Block()),
- });
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.If(b1.Expr(true), b1.Block(), b2.Else(b2.If(b2.Expr("ident"), b2.Block())));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.cc b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.cc
index 4ec7f87591f..99c65cb4f75 100644
--- a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.cc
@@ -25,20 +25,18 @@ IncrementDecrementStatement::IncrementDecrementStatement(ProgramID pid,
const Expression* l,
bool inc)
: Base(pid, src), lhs(l), increment(inc) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, lhs, program_id);
}
-IncrementDecrementStatement::IncrementDecrementStatement(
- IncrementDecrementStatement&&) = default;
+IncrementDecrementStatement::IncrementDecrementStatement(IncrementDecrementStatement&&) = default;
IncrementDecrementStatement::~IncrementDecrementStatement() = default;
-const IncrementDecrementStatement* IncrementDecrementStatement::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* l = ctx->Clone(lhs);
- return ctx->dst->create<IncrementDecrementStatement>(src, l, increment);
+const IncrementDecrementStatement* IncrementDecrementStatement::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* l = ctx->Clone(lhs);
+ return ctx->dst->create<IncrementDecrementStatement>(src, l, increment);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.h b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.h
index f7882da152d..05b8478e948 100644
--- a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement.h
@@ -21,33 +21,29 @@
namespace tint::ast {
/// An increment or decrement statement
-class IncrementDecrementStatement final
- : public Castable<IncrementDecrementStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param lhs the LHS expression
- /// @param inc `true` for increment, `false` for decrement
- IncrementDecrementStatement(ProgramID pid,
- const Source& src,
- const Expression* lhs,
- bool inc);
- /// Move constructor
- IncrementDecrementStatement(IncrementDecrementStatement&&);
- ~IncrementDecrementStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const IncrementDecrementStatement* Clone(CloneContext* ctx) const override;
-
- /// The LHS expression.
- const Expression* const lhs;
-
- /// `true` for increment, `false` for decrement.
- bool increment;
+class IncrementDecrementStatement final : public Castable<IncrementDecrementStatement, Statement> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param lhs the LHS expression
+ /// @param inc `true` for increment, `false` for decrement
+ IncrementDecrementStatement(ProgramID pid, const Source& src, const Expression* lhs, bool inc);
+ /// Move constructor
+ IncrementDecrementStatement(IncrementDecrementStatement&&);
+ ~IncrementDecrementStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IncrementDecrementStatement* Clone(CloneContext* ctx) const override;
+
+ /// The LHS expression.
+ const Expression* const lhs;
+
+ /// `true` for increment, `false` for decrement.
+ bool increment;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement_test.cc
index ccb5f078528..6a5ae67df91 100644
--- a/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/increment_decrement_statement_test.cc
@@ -23,45 +23,44 @@ namespace {
using IncrementDecrementStatementTest = TestHelper;
TEST_F(IncrementDecrementStatementTest, Creation) {
- auto* expr = Expr("expr");
+ auto* expr = Expr("expr");
- auto* i = create<IncrementDecrementStatement>(expr, true);
- EXPECT_EQ(i->lhs, expr);
- EXPECT_TRUE(i->increment);
+ auto* i = create<IncrementDecrementStatement>(expr, true);
+ EXPECT_EQ(i->lhs, expr);
+ EXPECT_TRUE(i->increment);
}
TEST_F(IncrementDecrementStatementTest, Creation_WithSource) {
- auto* expr = Expr("expr");
- auto* i = create<IncrementDecrementStatement>(Source{Source::Location{20, 2}},
- expr, true);
- auto src = i->source;
- EXPECT_EQ(i->lhs, expr);
- EXPECT_TRUE(i->increment);
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* expr = Expr("expr");
+ auto* i = create<IncrementDecrementStatement>(Source{Source::Location{20, 2}}, expr, true);
+ auto src = i->source;
+ EXPECT_EQ(i->lhs, expr);
+ EXPECT_TRUE(i->increment);
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(IncrementDecrementStatementTest, IsIncrementDecrement) {
- auto* expr = Expr("expr");
- auto* i = create<IncrementDecrementStatement>(expr, true);
- EXPECT_TRUE(i->Is<IncrementDecrementStatement>());
+ auto* expr = Expr("expr");
+ auto* i = create<IncrementDecrementStatement>(expr, true);
+ EXPECT_TRUE(i->Is<IncrementDecrementStatement>());
}
TEST_F(IncrementDecrementStatementTest, Decrement) {
- auto* expr = Expr("expr");
- auto* i = create<IncrementDecrementStatement>(expr, false);
- EXPECT_EQ(i->lhs, expr);
- EXPECT_FALSE(i->increment);
+ auto* expr = Expr("expr");
+ auto* i = create<IncrementDecrementStatement>(expr, false);
+ EXPECT_EQ(i->lhs, expr);
+ EXPECT_FALSE(i->increment);
}
TEST_F(IncrementDecrementStatementTest, Assert_DifferentProgramID_Expr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<IncrementDecrementStatement>(b2.Expr(true), true);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<IncrementDecrementStatement>(b2.Expr(true), true);
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.cc b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.cc
index 6fed4eae5ac..232bc793c23 100644
--- a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.cc
@@ -25,24 +25,22 @@ IndexAccessorExpression::IndexAccessorExpression(ProgramID pid,
const Expression* obj,
const Expression* idx)
: Base(pid, src), object(obj), index(idx) {
- TINT_ASSERT(AST, object);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, object, program_id);
- TINT_ASSERT(AST, idx);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, idx, program_id);
+ TINT_ASSERT(AST, object);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, object, program_id);
+ TINT_ASSERT(AST, idx);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, idx, program_id);
}
-IndexAccessorExpression::IndexAccessorExpression(IndexAccessorExpression&&) =
- default;
+IndexAccessorExpression::IndexAccessorExpression(IndexAccessorExpression&&) = default;
IndexAccessorExpression::~IndexAccessorExpression() = default;
-const IndexAccessorExpression* IndexAccessorExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* obj = ctx->Clone(object);
- auto* idx = ctx->Clone(index);
- return ctx->dst->create<IndexAccessorExpression>(src, obj, idx);
+const IndexAccessorExpression* IndexAccessorExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* obj = ctx->Clone(object);
+ auto* idx = ctx->Clone(index);
+ return ctx->dst->create<IndexAccessorExpression>(src, obj, idx);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.h b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.h
index a5e48ccd3d4..c36f6b89115 100644
--- a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression.h
@@ -20,33 +20,32 @@
namespace tint::ast {
/// An index accessor expression
-class IndexAccessorExpression final
- : public Castable<IndexAccessorExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the index accessor source
- /// @param obj the object
- /// @param idx the index expression
- IndexAccessorExpression(ProgramID program_id,
- const Source& source,
- const Expression* obj,
- const Expression* idx);
- /// Move constructor
- IndexAccessorExpression(IndexAccessorExpression&&);
- ~IndexAccessorExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const IndexAccessorExpression* Clone(CloneContext* ctx) const override;
-
- /// the array, vector or matrix
- const Expression* const object;
-
- /// the index expression
- const Expression* const index;
+class IndexAccessorExpression final : public Castable<IndexAccessorExpression, Expression> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the index accessor source
+ /// @param obj the object
+ /// @param idx the index expression
+ IndexAccessorExpression(ProgramID program_id,
+ const Source& source,
+ const Expression* obj,
+ const Expression* idx);
+ /// Move constructor
+ IndexAccessorExpression(IndexAccessorExpression&&);
+ ~IndexAccessorExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IndexAccessorExpression* Clone(CloneContext* ctx) const override;
+
+ /// the array, vector or matrix
+ const Expression* const object;
+
+ /// the index expression
+ const Expression* const index;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression_test.cc
index 45fef2b889c..efdcecb55b3 100644
--- a/chromium/third_party/dawn/src/tint/ast/index_accessor_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/index_accessor_expression_test.cc
@@ -21,68 +21,68 @@ namespace {
using IndexAccessorExpressionTest = TestHelper;
TEST_F(IndexAccessorExpressionTest, Create) {
- auto* obj = Expr("obj");
- auto* idx = Expr("idx");
+ auto* obj = Expr("obj");
+ auto* idx = Expr("idx");
- auto* exp = IndexAccessor(obj, idx);
- ASSERT_EQ(exp->object, obj);
- ASSERT_EQ(exp->index, idx);
+ auto* exp = IndexAccessor(obj, idx);
+ ASSERT_EQ(exp->object, obj);
+ ASSERT_EQ(exp->index, idx);
}
TEST_F(IndexAccessorExpressionTest, CreateWithSource) {
- auto* obj = Expr("obj");
- auto* idx = Expr("idx");
+ auto* obj = Expr("obj");
+ auto* idx = Expr("idx");
- auto* exp = IndexAccessor(Source{{20, 2}}, obj, idx);
- auto src = exp->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* exp = IndexAccessor(Source{{20, 2}}, obj, idx);
+ auto src = exp->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(IndexAccessorExpressionTest, IsIndexAccessor) {
- auto* obj = Expr("obj");
- auto* idx = Expr("idx");
+ auto* obj = Expr("obj");
+ auto* idx = Expr("idx");
- auto* exp = IndexAccessor(obj, idx);
- EXPECT_TRUE(exp->Is<IndexAccessorExpression>());
+ auto* exp = IndexAccessor(obj, idx);
+ EXPECT_TRUE(exp->Is<IndexAccessorExpression>());
}
TEST_F(IndexAccessorExpressionTest, Assert_Null_Array) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.IndexAccessor(nullptr, b.Expr("idx"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.IndexAccessor(nullptr, b.Expr("idx"));
+ },
+ "internal compiler error");
}
TEST_F(IndexAccessorExpressionTest, Assert_Null_Index) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.IndexAccessor(b.Expr("arr"), nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.IndexAccessor(b.Expr("arr"), nullptr);
+ },
+ "internal compiler error");
}
TEST_F(IndexAccessorExpressionTest, Assert_DifferentProgramID_Array) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.IndexAccessor(b2.Expr("arr"), b1.Expr("idx"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.IndexAccessor(b2.Expr("arr"), b1.Expr("idx"));
+ },
+ "internal compiler error");
}
TEST_F(IndexAccessorExpressionTest, Assert_DifferentProgramID_Index) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.IndexAccessor(b1.Expr("arr"), b2.Expr("idx"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.IndexAccessor(b1.Expr("arr"), b2.Expr("idx"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/int_literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/int_literal_expression.cc
index 8cd68c987d8..7e11f7ec5a4 100644
--- a/chromium/third_party/dawn/src/tint/ast/int_literal_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/int_literal_expression.cc
@@ -14,13 +14,35 @@
#include "src/tint/ast/int_literal_expression.h"
+#include "src/tint/program_builder.h"
+
TINT_INSTANTIATE_TYPEINFO(tint::ast::IntLiteralExpression);
namespace tint::ast {
-IntLiteralExpression::IntLiteralExpression(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+IntLiteralExpression::IntLiteralExpression(ProgramID pid,
+ const Source& src,
+ int64_t val,
+ Suffix suf)
+ : Base(pid, src), value(val), suffix(suf) {}
IntLiteralExpression::~IntLiteralExpression() = default;
+const IntLiteralExpression* IntLiteralExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<IntLiteralExpression>(src, value, suffix);
+}
+
+std::ostream& operator<<(std::ostream& out, IntLiteralExpression::Suffix suffix) {
+ switch (suffix) {
+ default:
+ return out;
+ case IntLiteralExpression::Suffix::kI:
+ return out << "i";
+ case IntLiteralExpression::Suffix::kU:
+ return out << "u";
+ }
+}
+
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/int_literal_expression.h b/chromium/third_party/dawn/src/tint/ast/int_literal_expression.h
index 9ca31053cec..8ff58eaf229 100644
--- a/chromium/third_party/dawn/src/tint/ast/int_literal_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/int_literal_expression.h
@@ -19,24 +19,46 @@
namespace tint::ast {
-/// An integer literal. This could be either signed or unsigned.
-class IntLiteralExpression
- : public Castable<IntLiteralExpression, LiteralExpression> {
- public:
- ~IntLiteralExpression() override;
-
- /// @returns the literal value as a u32
- virtual uint32_t ValueAsU32() const = 0;
-
- /// @returns the literal value as an i32
- int32_t ValueAsI32() const { return static_cast<int32_t>(ValueAsU32()); }
-
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- IntLiteralExpression(ProgramID pid, const Source& src);
-}; // namespace ast
+/// An integer literal. The literal may have an 'i', 'u' or no suffix.
+class IntLiteralExpression : public Castable<IntLiteralExpression, LiteralExpression> {
+ public:
+ /// Literal suffix
+ enum class Suffix {
+ /// No suffix
+ kNone,
+ /// 'i' suffix (i32)
+ kI,
+ /// 'u' suffix (u32)
+ kU,
+ };
+
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param val the literal value
+ /// @param suf the literal suffix
+ IntLiteralExpression(ProgramID pid, const Source& src, int64_t val, Suffix suf);
+
+ ~IntLiteralExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const IntLiteralExpression* Clone(CloneContext* ctx) const override;
+
+ /// The literal value
+ const int64_t value;
+
+ /// The literal suffix
+ const Suffix suffix;
+};
+
+/// Writes the integer literal suffix to the std::ostream.
+/// @param out the std::ostream to write to
+/// @param suffix the suffix to write
+/// @returns out so calls can be chained
+std::ostream& operator<<(std::ostream& out, IntLiteralExpression::Suffix suffix);
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/int_literal_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/int_literal_expression_test.cc
index d04def1b912..969e1b94eaa 100644
--- a/chromium/third_party/dawn/src/tint/ast/int_literal_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/int_literal_expression_test.cc
@@ -19,14 +19,37 @@ namespace {
using IntLiteralExpressionTest = TestHelper;
-TEST_F(IntLiteralExpressionTest, Sint_IsInt) {
- auto* i = create<SintLiteralExpression>(47);
- ASSERT_TRUE(i->Is<IntLiteralExpression>());
+TEST_F(IntLiteralExpressionTest, SuffixNone) {
+ auto* i = create<IntLiteralExpression>(42, IntLiteralExpression::Suffix::kNone);
+ ASSERT_TRUE(i->Is<IntLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, IntLiteralExpression::Suffix::kNone);
}
-TEST_F(IntLiteralExpressionTest, Uint_IsInt) {
- auto* i = create<UintLiteralExpression>(42);
- EXPECT_TRUE(i->Is<IntLiteralExpression>());
+TEST_F(IntLiteralExpressionTest, SuffixI) {
+ auto* i = create<IntLiteralExpression>(42, IntLiteralExpression::Suffix::kI);
+ ASSERT_TRUE(i->Is<IntLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, IntLiteralExpression::Suffix::kI);
+}
+
+TEST_F(IntLiteralExpressionTest, SuffixU) {
+ auto* i = create<IntLiteralExpression>(42, IntLiteralExpression::Suffix::kU);
+ ASSERT_TRUE(i->Is<IntLiteralExpression>());
+ EXPECT_EQ(i->value, 42);
+ EXPECT_EQ(i->suffix, IntLiteralExpression::Suffix::kU);
+}
+
+TEST_F(IntLiteralExpressionTest, SuffixStringStream) {
+ auto to_str = [](IntLiteralExpression::Suffix suffix) {
+ std::stringstream ss;
+ ss << suffix;
+ return ss.str();
+ };
+
+ EXPECT_EQ("", to_str(IntLiteralExpression::Suffix::kNone));
+ EXPECT_EQ("i", to_str(IntLiteralExpression::Suffix::kI));
+ EXPECT_EQ("u", to_str(IntLiteralExpression::Suffix::kU));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/internal_attribute.cc b/chromium/third_party/dawn/src/tint/ast/internal_attribute.cc
index b42af9cf85d..180e90983a9 100644
--- a/chromium/third_party/dawn/src/tint/ast/internal_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/internal_attribute.cc
@@ -23,7 +23,7 @@ InternalAttribute::InternalAttribute(ProgramID pid) : Base(pid, Source{}) {}
InternalAttribute::~InternalAttribute() = default;
std::string InternalAttribute::Name() const {
- return "internal";
+ return "internal";
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/internal_attribute.h b/chromium/third_party/dawn/src/tint/ast/internal_attribute.h
index 7e35dcb3f8b..bb135591841 100644
--- a/chromium/third_party/dawn/src/tint/ast/internal_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/internal_attribute.h
@@ -25,20 +25,20 @@ namespace tint::ast {
/// These attributes are not produced by generators, but instead are usually
/// created by transforms for consumption by a particular backend.
class InternalAttribute : public Castable<InternalAttribute, Attribute> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- explicit InternalAttribute(ProgramID program_id);
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ explicit InternalAttribute(ProgramID program_id);
- /// Destructor
- ~InternalAttribute() override;
+ /// Destructor
+ ~InternalAttribute() override;
- /// @return a short description of the internal attribute which will be
- /// displayed in WGSL as `@internal(<name>)` (but is not parsable).
- virtual std::string InternalName() const = 0;
+ /// @return a short description of the internal attribute which will be
+ /// displayed in WGSL as `@internal(<name>)` (but is not parsable).
+ virtual std::string InternalName() const = 0;
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.cc b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.cc
index 6f452a9cc59..909e827d59a 100644
--- a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.cc
@@ -31,54 +31,53 @@ InterpolateAttribute::InterpolateAttribute(ProgramID pid,
InterpolateAttribute::~InterpolateAttribute() = default;
std::string InterpolateAttribute::Name() const {
- return "interpolate";
+ return "interpolate";
}
-const InterpolateAttribute* InterpolateAttribute::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<InterpolateAttribute>(src, type, sampling);
+const InterpolateAttribute* InterpolateAttribute::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<InterpolateAttribute>(src, type, sampling);
}
std::ostream& operator<<(std::ostream& out, InterpolationType type) {
- switch (type) {
- case InterpolationType::kPerspective: {
- out << "perspective";
- break;
+ switch (type) {
+ case InterpolationType::kPerspective: {
+ out << "perspective";
+ break;
+ }
+ case InterpolationType::kLinear: {
+ out << "linear";
+ break;
+ }
+ case InterpolationType::kFlat: {
+ out << "flat";
+ break;
+ }
}
- case InterpolationType::kLinear: {
- out << "linear";
- break;
- }
- case InterpolationType::kFlat: {
- out << "flat";
- break;
- }
- }
- return out;
+ return out;
}
std::ostream& operator<<(std::ostream& out, InterpolationSampling sampling) {
- switch (sampling) {
- case InterpolationSampling::kNone: {
- out << "none";
- break;
- }
- case InterpolationSampling::kCenter: {
- out << "center";
- break;
- }
- case InterpolationSampling::kCentroid: {
- out << "centroid";
- break;
- }
- case InterpolationSampling::kSample: {
- out << "sample";
- break;
+ switch (sampling) {
+ case InterpolationSampling::kNone: {
+ out << "none";
+ break;
+ }
+ case InterpolationSampling::kCenter: {
+ out << "center";
+ break;
+ }
+ case InterpolationSampling::kCentroid: {
+ out << "centroid";
+ break;
+ }
+ case InterpolationSampling::kSample: {
+ out << "sample";
+ break;
+ }
}
- }
- return out;
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.h b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.h
index a659ef9fc9f..4b2a2df787c 100644
--- a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute.h
@@ -29,34 +29,33 @@ enum class InterpolationType { kPerspective, kLinear, kFlat };
enum class InterpolationSampling { kNone = -1, kCenter, kCentroid, kSample };
/// An interpolate attribute
-class InterpolateAttribute final
- : public Castable<InterpolateAttribute, Attribute> {
- public:
- /// Create an interpolate attribute.
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- InterpolateAttribute(ProgramID pid,
- const Source& src,
- InterpolationType type,
- InterpolationSampling sampling);
- ~InterpolateAttribute() override;
+class InterpolateAttribute final : public Castable<InterpolateAttribute, Attribute> {
+ public:
+ /// Create an interpolate attribute.
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ InterpolateAttribute(ProgramID pid,
+ const Source& src,
+ InterpolationType type,
+ InterpolationSampling sampling);
+ ~InterpolateAttribute() override;
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const InterpolateAttribute* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const InterpolateAttribute* Clone(CloneContext* ctx) const override;
- /// The interpolation type
- const InterpolationType type;
+ /// The interpolation type
+ const InterpolationType type;
- /// The interpolation sampling
- const InterpolationSampling sampling;
+ /// The interpolation sampling
+ const InterpolationSampling sampling;
};
/// @param out the std::ostream to write to
diff --git a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute_test.cc
index 8bf85e1db49..d8b66019709 100644
--- a/chromium/third_party/dawn/src/tint/ast/interpolate_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/interpolate_attribute_test.cc
@@ -22,10 +22,10 @@ namespace {
using InterpolateAttributeTest = TestHelper;
TEST_F(InterpolateAttributeTest, Creation) {
- auto* d = create<InterpolateAttribute>(InterpolationType::kLinear,
- InterpolationSampling::kCenter);
- EXPECT_EQ(InterpolationType::kLinear, d->type);
- EXPECT_EQ(InterpolationSampling::kCenter, d->sampling);
+ auto* d =
+ create<InterpolateAttribute>(InterpolationType::kLinear, InterpolationSampling::kCenter);
+ EXPECT_EQ(InterpolationType::kLinear, d->type);
+ EXPECT_EQ(InterpolationSampling::kCenter, d->sampling);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/invariant_attribute.cc b/chromium/third_party/dawn/src/tint/ast/invariant_attribute.cc
index 31dd4b7d2c7..1b0f126fd2d 100644
--- a/chromium/third_party/dawn/src/tint/ast/invariant_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/invariant_attribute.cc
@@ -20,19 +20,18 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::InvariantAttribute);
namespace tint::ast {
-InvariantAttribute::InvariantAttribute(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+InvariantAttribute::InvariantAttribute(ProgramID pid, const Source& src) : Base(pid, src) {}
InvariantAttribute::~InvariantAttribute() = default;
std::string InvariantAttribute::Name() const {
- return "invariant";
+ return "invariant";
}
const InvariantAttribute* InvariantAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<InvariantAttribute>(src);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<InvariantAttribute>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/invariant_attribute.h b/chromium/third_party/dawn/src/tint/ast/invariant_attribute.h
index 375eb3c0a2e..6bb42fc6b63 100644
--- a/chromium/third_party/dawn/src/tint/ast/invariant_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/invariant_attribute.h
@@ -22,23 +22,22 @@
namespace tint::ast {
/// The invariant attribute
-class InvariantAttribute final
- : public Castable<InvariantAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- InvariantAttribute(ProgramID pid, const Source& src);
- ~InvariantAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const InvariantAttribute* Clone(CloneContext* ctx) const override;
+class InvariantAttribute final : public Castable<InvariantAttribute, Attribute> {
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ InvariantAttribute(ProgramID pid, const Source& src);
+ ~InvariantAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const InvariantAttribute* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/literal_expression.cc
index 6863357a6ae..d05279d0f73 100644
--- a/chromium/third_party/dawn/src/tint/ast/literal_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/literal_expression.cc
@@ -18,8 +18,7 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::LiteralExpression);
namespace tint::ast {
-LiteralExpression::LiteralExpression(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+LiteralExpression::LiteralExpression(ProgramID pid, const Source& src) : Base(pid, src) {}
LiteralExpression::~LiteralExpression() = default;
diff --git a/chromium/third_party/dawn/src/tint/ast/literal_expression.h b/chromium/third_party/dawn/src/tint/ast/literal_expression.h
index e794f59bc5b..56fc1f0f802 100644
--- a/chromium/third_party/dawn/src/tint/ast/literal_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/literal_expression.h
@@ -23,14 +23,14 @@ namespace tint::ast {
/// Base class for a literal value expressions
class LiteralExpression : public Castable<LiteralExpression, Expression> {
- public:
- ~LiteralExpression() override;
-
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the input source
- LiteralExpression(ProgramID pid, const Source& src);
+ public:
+ ~LiteralExpression() override;
+
+ protected:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the input source
+ LiteralExpression(ProgramID pid, const Source& src);
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/location_attribute.cc b/chromium/third_party/dawn/src/tint/ast/location_attribute.cc
index e6d4f68dc16..1eae823adeb 100644
--- a/chromium/third_party/dawn/src/tint/ast/location_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/location_attribute.cc
@@ -22,21 +22,19 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::LocationAttribute);
namespace tint::ast {
-LocationAttribute::LocationAttribute(ProgramID pid,
- const Source& src,
- uint32_t val)
+LocationAttribute::LocationAttribute(ProgramID pid, const Source& src, uint32_t val)
: Base(pid, src), value(val) {}
LocationAttribute::~LocationAttribute() = default;
std::string LocationAttribute::Name() const {
- return "location";
+ return "location";
}
const LocationAttribute* LocationAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<LocationAttribute>(src, value);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<LocationAttribute>(src, value);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/location_attribute.h b/chromium/third_party/dawn/src/tint/ast/location_attribute.h
index eefd863e9e9..3646c544fc6 100644
--- a/chromium/third_party/dawn/src/tint/ast/location_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/location_attribute.h
@@ -23,25 +23,25 @@ namespace tint::ast {
/// A location attribute
class LocationAttribute final : public Castable<LocationAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the location value
- LocationAttribute(ProgramID pid, const Source& src, uint32_t value);
- ~LocationAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const LocationAttribute* Clone(CloneContext* ctx) const override;
-
- /// The location value
- const uint32_t value;
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param value the location value
+ LocationAttribute(ProgramID pid, const Source& src, uint32_t value);
+ ~LocationAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const LocationAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The location value
+ const uint32_t value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/location_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/location_attribute_test.cc
index e71b7fb1566..a1562d561ab 100644
--- a/chromium/third_party/dawn/src/tint/ast/location_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/location_attribute_test.cc
@@ -20,8 +20,8 @@ namespace {
using LocationAttributeTest = TestHelper;
TEST_F(LocationAttributeTest, Creation) {
- auto* d = create<LocationAttribute>(2);
- EXPECT_EQ(2u, d->value);
+ auto* d = create<LocationAttribute>(2);
+ EXPECT_EQ(2u, d->value);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/loop_statement.cc b/chromium/third_party/dawn/src/tint/ast/loop_statement.cc
index 35f8d7937b8..9d14960c97f 100644
--- a/chromium/third_party/dawn/src/tint/ast/loop_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/loop_statement.cc
@@ -25,9 +25,9 @@ LoopStatement::LoopStatement(ProgramID pid,
const BlockStatement* b,
const BlockStatement* cont)
: Base(pid, src), body(b), continuing(cont) {
- TINT_ASSERT(AST, body);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, continuing, program_id);
+ TINT_ASSERT(AST, body);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, body, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, continuing, program_id);
}
LoopStatement::LoopStatement(LoopStatement&&) = default;
@@ -35,11 +35,11 @@ LoopStatement::LoopStatement(LoopStatement&&) = default;
LoopStatement::~LoopStatement() = default;
const LoopStatement* LoopStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* b = ctx->Clone(body);
- auto* cont = ctx->Clone(continuing);
- return ctx->dst->create<LoopStatement>(src, b, cont);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* b = ctx->Clone(body);
+ auto* cont = ctx->Clone(continuing);
+ return ctx->dst->create<LoopStatement>(src, b, cont);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/loop_statement.h b/chromium/third_party/dawn/src/tint/ast/loop_statement.h
index 921b68eabe8..5a044fead84 100644
--- a/chromium/third_party/dawn/src/tint/ast/loop_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/loop_statement.h
@@ -21,31 +21,31 @@ namespace tint::ast {
/// A loop statement
class LoopStatement final : public Castable<LoopStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the loop statement source
- /// @param body the body statements
- /// @param continuing the continuing statements
- LoopStatement(ProgramID program_id,
- const Source& source,
- const BlockStatement* body,
- const BlockStatement* continuing);
- /// Move constructor
- LoopStatement(LoopStatement&&);
- ~LoopStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const LoopStatement* Clone(CloneContext* ctx) const override;
-
- /// The loop body
- const BlockStatement* const body;
-
- /// The continuing statements
- const BlockStatement* const continuing;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the loop statement source
+ /// @param body the body statements
+ /// @param continuing the continuing statements
+ LoopStatement(ProgramID program_id,
+ const Source& source,
+ const BlockStatement* body,
+ const BlockStatement* continuing);
+ /// Move constructor
+ LoopStatement(LoopStatement&&);
+ ~LoopStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const LoopStatement* Clone(CloneContext* ctx) const override;
+
+ /// The loop body
+ const BlockStatement* const body;
+
+ /// The continuing statements
+ const BlockStatement* const continuing;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/loop_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/loop_statement_test.cc
index 9c8e812be57..c28665b0b70 100644
--- a/chromium/third_party/dawn/src/tint/ast/loop_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/loop_statement_test.cc
@@ -25,78 +25,77 @@ namespace {
using LoopStatementTest = TestHelper;
TEST_F(LoopStatementTest, Creation) {
- auto* body = Block(create<DiscardStatement>());
- auto* b = body->Last();
+ auto* body = Block(create<DiscardStatement>());
+ auto* b = body->Last();
- auto* continuing = Block(create<DiscardStatement>());
+ auto* continuing = Block(create<DiscardStatement>());
- auto* l = create<LoopStatement>(body, continuing);
- ASSERT_EQ(l->body->statements.size(), 1u);
- EXPECT_EQ(l->body->statements[0], b);
- ASSERT_EQ(l->continuing->statements.size(), 1u);
- EXPECT_EQ(l->continuing->statements[0], continuing->Last());
+ auto* l = create<LoopStatement>(body, continuing);
+ ASSERT_EQ(l->body->statements.size(), 1u);
+ EXPECT_EQ(l->body->statements[0], b);
+ ASSERT_EQ(l->continuing->statements.size(), 1u);
+ EXPECT_EQ(l->continuing->statements[0], continuing->Last());
}
TEST_F(LoopStatementTest, Creation_WithSource) {
- auto* body = Block(create<DiscardStatement>());
+ auto* body = Block(create<DiscardStatement>());
- auto* continuing = Block(create<DiscardStatement>());
+ auto* continuing = Block(create<DiscardStatement>());
- auto* l =
- create<LoopStatement>(Source{Source::Location{20, 2}}, body, continuing);
- auto src = l->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* l = create<LoopStatement>(Source{Source::Location{20, 2}}, body, continuing);
+ auto src = l->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(LoopStatementTest, IsLoop) {
- auto* l = create<LoopStatement>(Block(), Block());
- EXPECT_TRUE(l->Is<LoopStatement>());
+ auto* l = create<LoopStatement>(Block(), Block());
+ EXPECT_TRUE(l->Is<LoopStatement>());
}
TEST_F(LoopStatementTest, HasContinuing_WithoutContinuing) {
- auto* body = Block(create<DiscardStatement>());
+ auto* body = Block(create<DiscardStatement>());
- auto* l = create<LoopStatement>(body, nullptr);
- EXPECT_FALSE(l->continuing);
+ auto* l = create<LoopStatement>(body, nullptr);
+ EXPECT_FALSE(l->continuing);
}
TEST_F(LoopStatementTest, HasContinuing_WithContinuing) {
- auto* body = Block(create<DiscardStatement>());
+ auto* body = Block(create<DiscardStatement>());
- auto* continuing = Block(create<DiscardStatement>());
+ auto* continuing = Block(create<DiscardStatement>());
- auto* l = create<LoopStatement>(body, continuing);
- EXPECT_TRUE(l->continuing);
+ auto* l = create<LoopStatement>(body, continuing);
+ EXPECT_TRUE(l->continuing);
}
TEST_F(LoopStatementTest, Assert_Null_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<LoopStatement>(nullptr, nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<LoopStatement>(nullptr, nullptr);
+ },
+ "internal compiler error");
}
TEST_F(LoopStatementTest, Assert_DifferentProgramID_Body) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<LoopStatement>(b2.Block(), b1.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<LoopStatement>(b2.Block(), b1.Block());
+ },
+ "internal compiler error");
}
TEST_F(LoopStatementTest, Assert_DifferentProgramID_Continuing) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<LoopStatement>(b1.Block(), b2.Block());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<LoopStatement>(b1.Block(), b2.Block());
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/matrix.cc b/chromium/third_party/dawn/src/tint/ast/matrix.cc
index b9baa27693b..1f74a26ec05 100644
--- a/chromium/third_party/dawn/src/tint/ast/matrix.cc
+++ b/chromium/third_party/dawn/src/tint/ast/matrix.cc
@@ -20,17 +20,13 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Matrix);
namespace tint::ast {
-Matrix::Matrix(ProgramID pid,
- const Source& src,
- const Type* subtype,
- uint32_t r,
- uint32_t c)
+Matrix::Matrix(ProgramID pid, const Source& src, const Type* subtype, uint32_t r, uint32_t c)
: Base(pid, src), type(subtype), rows(r), columns(c) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, subtype, program_id);
- TINT_ASSERT(AST, rows > 1);
- TINT_ASSERT(AST, rows < 5);
- TINT_ASSERT(AST, columns > 1);
- TINT_ASSERT(AST, columns < 5);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, subtype, program_id);
+ TINT_ASSERT(AST, rows > 1);
+ TINT_ASSERT(AST, rows < 5);
+ TINT_ASSERT(AST, columns > 1);
+ TINT_ASSERT(AST, columns < 5);
}
Matrix::Matrix(Matrix&&) = default;
@@ -38,17 +34,19 @@ Matrix::Matrix(Matrix&&) = default;
Matrix::~Matrix() = default;
std::string Matrix::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "mat" << columns << "x" << rows << "<" << type->FriendlyName(symbols)
- << ">";
- return out.str();
+ std::ostringstream out;
+ out << "mat" << columns << "x" << rows;
+ if (type) {
+ out << "<" << type->FriendlyName(symbols) << ">";
+ }
+ return out.str();
}
const Matrix* Matrix::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<Matrix>(src, ty, rows, columns);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<Matrix>(src, ty, rows, columns);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/matrix.h b/chromium/third_party/dawn/src/tint/ast/matrix.h
index cb1f6d9d37d..620f28a38c9 100644
--- a/chromium/third_party/dawn/src/tint/ast/matrix.h
+++ b/chromium/third_party/dawn/src/tint/ast/matrix.h
@@ -23,44 +23,40 @@ namespace tint::ast {
/// A matrix type
class Matrix final : public Castable<Matrix, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param subtype the declared type of the matrix components. May be null for
- /// matrix constructors, where the element type will be inferred from
- /// the constructor arguments
- /// @param rows the number of rows in the matrix
- /// @param columns the number of columns in the matrix
- Matrix(ProgramID pid,
- const Source& src,
- const Type* subtype,
- uint32_t rows,
- uint32_t columns);
- /// Move constructor
- Matrix(Matrix&&);
- ~Matrix() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param subtype the declared type of the matrix components. May be null for
+ /// matrix constructors, where the element type will be inferred from
+ /// the constructor arguments
+ /// @param rows the number of rows in the matrix
+ /// @param columns the number of columns in the matrix
+ Matrix(ProgramID pid, const Source& src, const Type* subtype, uint32_t rows, uint32_t columns);
+ /// Move constructor
+ Matrix(Matrix&&);
+ ~Matrix() override;
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Matrix* Clone(CloneContext* ctx) const override;
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Matrix* Clone(CloneContext* ctx) const override;
- /// The declared type of the matrix components. May be null for matrix
- /// constructors, where the element type will be inferred from the constructor
- /// arguments
- const Type* const type;
+ /// The declared type of the matrix components. May be null for matrix
+ /// constructors, where the element type will be inferred from the constructor
+ /// arguments
+ const Type* const type;
- /// The number of rows in the matrix
- const uint32_t rows;
+ /// The number of rows in the matrix
+ const uint32_t rows;
- /// The number of columns in the matrix
- const uint32_t columns;
+ /// The number of columns in the matrix
+ const uint32_t columns;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/matrix_test.cc b/chromium/third_party/dawn/src/tint/ast/matrix_test.cc
index 54c11eb29df..66ea84de9f9 100644
--- a/chromium/third_party/dawn/src/tint/ast/matrix_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/matrix_test.cc
@@ -33,17 +33,22 @@ namespace {
using AstMatrixTest = TestHelper;
TEST_F(AstMatrixTest, Creation) {
- auto* i32 = create<I32>();
- auto* m = create<Matrix>(i32, 2, 4);
- EXPECT_EQ(m->type, i32);
- EXPECT_EQ(m->rows, 2u);
- EXPECT_EQ(m->columns, 4u);
+ auto* i32 = create<I32>();
+ auto* m = create<Matrix>(i32, 2, 4);
+ EXPECT_EQ(m->type, i32);
+ EXPECT_EQ(m->rows, 2u);
+ EXPECT_EQ(m->columns, 4u);
}
TEST_F(AstMatrixTest, FriendlyName) {
- auto* i32 = create<I32>();
- auto* m = create<Matrix>(i32, 3, 2);
- EXPECT_EQ(m->FriendlyName(Symbols()), "mat2x3<i32>");
+ auto* i32 = create<I32>();
+ auto* m = create<Matrix>(i32, 3, 2);
+ EXPECT_EQ(m->FriendlyName(Symbols()), "mat2x3<i32>");
+}
+
+TEST_F(AstMatrixTest, FriendlyName_WithoutType) {
+ auto* m = create<Matrix>(nullptr, 3, 2);
+ EXPECT_EQ(m->FriendlyName(Symbols()), "mat2x3");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.cc b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.cc
index 71f1991efe8..a087ea458cb 100644
--- a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.cc
@@ -20,30 +20,27 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::MemberAccessorExpression);
namespace tint::ast {
-MemberAccessorExpression::MemberAccessorExpression(
- ProgramID pid,
- const Source& src,
- const Expression* str,
- const IdentifierExpression* mem)
+MemberAccessorExpression::MemberAccessorExpression(ProgramID pid,
+ const Source& src,
+ const Expression* str,
+ const IdentifierExpression* mem)
: Base(pid, src), structure(str), member(mem) {
- TINT_ASSERT(AST, structure);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, structure, program_id);
- TINT_ASSERT(AST, member);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, member, program_id);
+ TINT_ASSERT(AST, structure);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, structure, program_id);
+ TINT_ASSERT(AST, member);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, member, program_id);
}
-MemberAccessorExpression::MemberAccessorExpression(MemberAccessorExpression&&) =
- default;
+MemberAccessorExpression::MemberAccessorExpression(MemberAccessorExpression&&) = default;
MemberAccessorExpression::~MemberAccessorExpression() = default;
-const MemberAccessorExpression* MemberAccessorExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* str = ctx->Clone(structure);
- auto* mem = ctx->Clone(member);
- return ctx->dst->create<MemberAccessorExpression>(src, str, mem);
+const MemberAccessorExpression* MemberAccessorExpression::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* str = ctx->Clone(structure);
+ auto* mem = ctx->Clone(member);
+ return ctx->dst->create<MemberAccessorExpression>(src, str, mem);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.h b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.h
index 800d0180516..07054caddb7 100644
--- a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression.h
@@ -20,33 +20,32 @@
namespace tint::ast {
/// A member accessor expression
-class MemberAccessorExpression final
- : public Castable<MemberAccessorExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the member accessor expression source
- /// @param structure the structure
- /// @param member the member
- MemberAccessorExpression(ProgramID program_id,
- const Source& source,
- const Expression* structure,
- const IdentifierExpression* member);
- /// Move constructor
- MemberAccessorExpression(MemberAccessorExpression&&);
- ~MemberAccessorExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const MemberAccessorExpression* Clone(CloneContext* ctx) const override;
-
- /// The structure
- const Expression* const structure;
-
- /// The member expression
- const IdentifierExpression* const member;
+class MemberAccessorExpression final : public Castable<MemberAccessorExpression, Expression> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the member accessor expression source
+ /// @param structure the structure
+ /// @param member the member
+ MemberAccessorExpression(ProgramID program_id,
+ const Source& source,
+ const Expression* structure,
+ const IdentifierExpression* member);
+ /// Move constructor
+ MemberAccessorExpression(MemberAccessorExpression&&);
+ ~MemberAccessorExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const MemberAccessorExpression* Clone(CloneContext* ctx) const override;
+
+ /// The structure
+ const Expression* const structure;
+
+ /// The member expression
+ const IdentifierExpression* const member;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression_test.cc
index ed134a040ec..d56b740df92 100644
--- a/chromium/third_party/dawn/src/tint/ast/member_accessor_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/member_accessor_expression_test.cc
@@ -21,66 +21,63 @@ namespace {
using MemberAccessorExpressionTest = TestHelper;
TEST_F(MemberAccessorExpressionTest, Creation) {
- auto* str = Expr("structure");
- auto* mem = Expr("member");
+ auto* str = Expr("structure");
+ auto* mem = Expr("member");
- auto* stmt = create<MemberAccessorExpression>(str, mem);
- EXPECT_EQ(stmt->structure, str);
- EXPECT_EQ(stmt->member, mem);
+ auto* stmt = create<MemberAccessorExpression>(str, mem);
+ EXPECT_EQ(stmt->structure, str);
+ EXPECT_EQ(stmt->member, mem);
}
TEST_F(MemberAccessorExpressionTest, Creation_WithSource) {
- auto* stmt = create<MemberAccessorExpression>(
- Source{Source::Location{20, 2}}, Expr("structure"), Expr("member"));
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt = create<MemberAccessorExpression>(Source{Source::Location{20, 2}},
+ Expr("structure"), Expr("member"));
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(MemberAccessorExpressionTest, IsMemberAccessor) {
- auto* stmt =
- create<MemberAccessorExpression>(Expr("structure"), Expr("member"));
- EXPECT_TRUE(stmt->Is<MemberAccessorExpression>());
+ auto* stmt = create<MemberAccessorExpression>(Expr("structure"), Expr("member"));
+ EXPECT_TRUE(stmt->Is<MemberAccessorExpression>());
}
TEST_F(MemberAccessorExpressionTest, Assert_Null_Struct) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<MemberAccessorExpression>(nullptr, b.Expr("member"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<MemberAccessorExpression>(nullptr, b.Expr("member"));
+ },
+ "internal compiler error");
}
TEST_F(MemberAccessorExpressionTest, Assert_Null_Member) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<MemberAccessorExpression>(b.Expr("struct"), nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<MemberAccessorExpression>(b.Expr("struct"), nullptr);
+ },
+ "internal compiler error");
}
TEST_F(MemberAccessorExpressionTest, Assert_DifferentProgramID_Struct) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<MemberAccessorExpression>(b2.Expr("structure"),
- b1.Expr("member"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<MemberAccessorExpression>(b2.Expr("structure"), b1.Expr("member"));
+ },
+ "internal compiler error");
}
TEST_F(MemberAccessorExpressionTest, Assert_DifferentProgramID_Member) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<MemberAccessorExpression>(b1.Expr("structure"),
- b2.Expr("member"));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<MemberAccessorExpression>(b1.Expr("structure"), b2.Expr("member"));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/module.cc b/chromium/third_party/dawn/src/tint/ast/module.cc
index 8fd5caef91e..40dff985997 100644
--- a/chromium/third_party/dawn/src/tint/ast/module.cc
+++ b/chromium/third_party/dawn/src/tint/ast/module.cc
@@ -25,101 +25,107 @@ namespace tint::ast {
Module::Module(ProgramID pid, const Source& src) : Base(pid, src) {}
-Module::Module(ProgramID pid,
- const Source& src,
- std::vector<const ast::Node*> global_decls)
+Module::Module(ProgramID pid, const Source& src, std::vector<const ast::Node*> global_decls)
: Base(pid, src), global_declarations_(std::move(global_decls)) {
- for (auto* decl : global_declarations_) {
- if (decl == nullptr) {
- continue;
+ for (auto* decl : global_declarations_) {
+ if (decl == nullptr) {
+ continue;
+ }
+ diag::List diags;
+ BinGlobalDeclaration(decl, diags);
}
- diag::List diags;
- BinGlobalDeclaration(decl, diags);
- }
}
Module::~Module() = default;
const ast::TypeDecl* Module::LookupType(Symbol name) const {
- for (auto* ty : TypeDecls()) {
- if (ty->name == name) {
- return ty;
+ for (auto* ty : TypeDecls()) {
+ if (ty->name == name) {
+ return ty;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
void Module::AddGlobalDeclaration(const tint::ast::Node* decl) {
- diag::List diags;
- BinGlobalDeclaration(decl, diags);
- global_declarations_.emplace_back(decl);
+ diag::List diags;
+ BinGlobalDeclaration(decl, diags);
+ global_declarations_.emplace_back(decl);
+}
+
+void Module::BinGlobalDeclaration(const tint::ast::Node* decl, diag::List& diags) {
+ Switch(
+ decl, //
+ [&](const ast::TypeDecl* type) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
+ type_decls_.push_back(type);
+ },
+ [&](const Function* func) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, func, program_id);
+ functions_.push_back(func);
+ },
+ [&](const Variable* var) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, var, program_id);
+ global_variables_.push_back(var);
+ },
+ [&](const Enable* enable) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, enable, program_id);
+ enables_.push_back(enable);
+ },
+ [&](Default) { TINT_ICE(AST, diags) << "Unknown global declaration type"; });
}
-void Module::BinGlobalDeclaration(const tint::ast::Node* decl,
- diag::List& diags) {
- Switch(
- decl, //
- [&](const ast::TypeDecl* type) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
- type_decls_.push_back(type);
- },
- [&](const Function* func) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, func, program_id);
- functions_.push_back(func);
- },
- [&](const Variable* var) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, var, program_id);
- global_variables_.push_back(var);
- },
- [&](Default) {
- TINT_ICE(AST, diags) << "Unknown global declaration type";
- });
+void Module::AddEnable(const ast::Enable* enable) {
+ TINT_ASSERT(AST, enable);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, enable, program_id);
+ global_declarations_.push_back(enable);
+ enables_.push_back(enable);
}
void Module::AddGlobalVariable(const ast::Variable* var) {
- TINT_ASSERT(AST, var);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, var, program_id);
- global_variables_.push_back(var);
- global_declarations_.push_back(var);
+ TINT_ASSERT(AST, var);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, var, program_id);
+ global_variables_.push_back(var);
+ global_declarations_.push_back(var);
}
void Module::AddTypeDecl(const ast::TypeDecl* type) {
- TINT_ASSERT(AST, type);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
- type_decls_.push_back(type);
- global_declarations_.push_back(type);
+ TINT_ASSERT(AST, type);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, type, program_id);
+ type_decls_.push_back(type);
+ global_declarations_.push_back(type);
}
void Module::AddFunction(const ast::Function* func) {
- TINT_ASSERT(AST, func);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, func, program_id);
- functions_.push_back(func);
- global_declarations_.push_back(func);
+ TINT_ASSERT(AST, func);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, func, program_id);
+ functions_.push_back(func);
+ global_declarations_.push_back(func);
}
const Module* Module::Clone(CloneContext* ctx) const {
- auto* out = ctx->dst->create<Module>();
- out->Copy(ctx, this);
- return out;
+ auto* out = ctx->dst->create<Module>();
+ out->Copy(ctx, this);
+ return out;
}
void Module::Copy(CloneContext* ctx, const Module* src) {
- ctx->Clone(global_declarations_, src->global_declarations_);
-
- // During the clone, declarations may have been placed into the module.
- // Clear everything out, as we're about to re-bin the declarations.
- type_decls_.clear();
- functions_.clear();
- global_variables_.clear();
-
- for (auto* decl : global_declarations_) {
- if (!decl) {
- TINT_ICE(AST, ctx->dst->Diagnostics())
- << "src global declaration was nullptr";
- continue;
+ ctx->Clone(global_declarations_, src->global_declarations_);
+
+ // During the clone, declarations may have been placed into the module.
+ // Clear everything out, as we're about to re-bin the declarations.
+ type_decls_.clear();
+ functions_.clear();
+ global_variables_.clear();
+ enables_.clear();
+
+ for (auto* decl : global_declarations_) {
+ if (!decl) {
+ TINT_ICE(AST, ctx->dst->Diagnostics()) << "src global declaration was nullptr";
+ continue;
+ }
+ BinGlobalDeclaration(decl, ctx->dst->Diagnostics());
}
- BinGlobalDeclaration(decl, ctx->dst->Diagnostics());
- }
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/module.h b/chromium/third_party/dawn/src/tint/ast/module.h
index 03e64d940f9..45b1ec6ebe4 100644
--- a/chromium/third_party/dawn/src/tint/ast/module.h
+++ b/chromium/third_party/dawn/src/tint/ast/module.h
@@ -18,6 +18,7 @@
#include <string>
#include <vector>
+#include "src/tint/ast/enable.h"
#include "src/tint/ast/function.h"
#include "src/tint/ast/type.h"
@@ -28,94 +29,98 @@ class TypeDecl;
/// Module holds the top-level AST types, functions and global variables used by
/// a Program.
class Module final : public Castable<Module, Node> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Module(ProgramID pid, const Source& src);
-
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param global_decls the list of global types, functions, and variables, in
- /// the order they were declared in the source program
- Module(ProgramID pid,
- const Source& src,
- std::vector<const Node*> global_decls);
-
- /// Destructor
- ~Module() override;
-
- /// @returns the declaration-ordered global declarations for the module
- const std::vector<const Node*>& GlobalDeclarations() const {
- return global_declarations_;
- }
-
- /// Add a global variable to the Builder
- /// @param var the variable to add
- void AddGlobalVariable(const Variable* var);
-
- /// @returns true if the module has the global declaration `decl`
- /// @param decl the declaration to check
- bool HasGlobalDeclaration(Node* decl) const {
- for (auto* d : global_declarations_) {
- if (d == decl) {
- return true;
- }
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Module(ProgramID pid, const Source& src);
+
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param global_decls the list of global types, functions, and variables, in
+ /// the order they were declared in the source program
+ Module(ProgramID pid, const Source& src, std::vector<const Node*> global_decls);
+
+ /// Destructor
+ ~Module() override;
+
+ /// @returns the declaration-ordered global declarations for the module
+ const std::vector<const Node*>& GlobalDeclarations() const { return global_declarations_; }
+
+ /// Add a enable directive to the Builder
+ /// @param ext the enable directive to add
+ void AddEnable(const Enable* ext);
+
+ /// Add a global variable to the Builder
+ /// @param var the variable to add
+ void AddGlobalVariable(const Variable* var);
+
+ /// @returns true if the module has the global declaration `decl`
+ /// @param decl the declaration to check
+ bool HasGlobalDeclaration(Node* decl) const {
+ for (auto* d : global_declarations_) {
+ if (d == decl) {
+ return true;
+ }
+ }
+ return false;
}
- return false;
- }
-
- /// Adds a global declaration to the Builder.
- /// @param decl the declaration to add
- void AddGlobalDeclaration(const tint::ast::Node* decl);
-
- /// @returns the global variables for the module
- const VariableList& GlobalVariables() const { return global_variables_; }
-
- /// @returns the global variables for the module
- VariableList& GlobalVariables() { return global_variables_; }
-
- /// Adds a type declaration to the Builder.
- /// @param decl the type declaration to add
- void AddTypeDecl(const TypeDecl* decl);
-
- /// @returns the TypeDecl registered as a TypeDecl()
- /// @param name the name of the type to search for
- const TypeDecl* LookupType(Symbol name) const;
-
- /// @returns the declared types in the module
- const std::vector<const TypeDecl*>& TypeDecls() const { return type_decls_; }
-
- /// Add a function to the Builder
- /// @param func the function to add
- void AddFunction(const Function* func);
-
- /// @returns the functions declared in the module
- const FunctionList& Functions() const { return functions_; }
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const Module* Clone(CloneContext* ctx) const override;
-
- /// Copy copies the content of the Module src into this module.
- /// @param ctx the clone context
- /// @param src the module to copy into this module
- void Copy(CloneContext* ctx, const Module* src);
-
- private:
- /// Adds `decl` to either:
- /// * #global_declarations_
- /// * #type_decls_
- /// * #functions_
- void BinGlobalDeclaration(const tint::ast::Node* decl, diag::List& diags);
-
- std::vector<const Node*> global_declarations_;
- std::vector<const TypeDecl*> type_decls_;
- FunctionList functions_;
- VariableList global_variables_;
+
+ /// Adds a global declaration to the Builder.
+ /// @param decl the declaration to add
+ void AddGlobalDeclaration(const tint::ast::Node* decl);
+
+ /// @returns the global variables for the module
+ const VariableList& GlobalVariables() const { return global_variables_; }
+
+ /// @returns the global variables for the module
+ VariableList& GlobalVariables() { return global_variables_; }
+
+ /// @returns the extension set for the module
+ const EnableList& Enables() const { return enables_; }
+
+ /// Adds a type declaration to the Builder.
+ /// @param decl the type declaration to add
+ void AddTypeDecl(const TypeDecl* decl);
+
+ /// @returns the TypeDecl registered as a TypeDecl()
+ /// @param name the name of the type to search for
+ const TypeDecl* LookupType(Symbol name) const;
+
+ /// @returns the declared types in the module
+ const std::vector<const TypeDecl*>& TypeDecls() const { return type_decls_; }
+
+ /// Add a function to the Builder
+ /// @param func the function to add
+ void AddFunction(const Function* func);
+
+ /// @returns the functions declared in the module
+ const FunctionList& Functions() const { return functions_; }
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const Module* Clone(CloneContext* ctx) const override;
+
+ /// Copy copies the content of the Module src into this module.
+ /// @param ctx the clone context
+ /// @param src the module to copy into this module
+ void Copy(CloneContext* ctx, const Module* src);
+
+ private:
+ /// Adds `decl` to either:
+ /// * #global_declarations_
+ /// * #type_decls_
+ /// * #functions_
+ void BinGlobalDeclaration(const tint::ast::Node* decl, diag::List& diags);
+
+ std::vector<const Node*> global_declarations_;
+ std::vector<const TypeDecl*> type_decls_;
+ FunctionList functions_;
+ VariableList global_variables_;
+ EnableList enables_;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/module_clone_test.cc b/chromium/third_party/dawn/src/tint/ast/module_clone_test.cc
index 3cb900bf31e..bd96b2609f5 100644
--- a/chromium/third_party/dawn/src/tint/ast/module_clone_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/module_clone_test.cc
@@ -23,9 +23,9 @@ namespace {
TEST(ModuleCloneTest, Clone) {
#if TINT_BUILD_WGSL_READER && TINT_BUILD_WGSL_WRITER
- // Shader that exercises the bulk of the AST nodes and types.
- // See also fuzzers/tint_ast_clone_fuzzer.cc for further coverage of cloning.
- Source::File file("test.wgsl", R"(struct S0 {
+ // Shader that exercises the bulk of the AST nodes and types.
+ // See also fuzzers/tint_ast_clone_fuzzer.cc for further coverage of cloning.
+ Source::File file("test.wgsl", R"(struct S0 {
@size(4)
m0 : u32,
m1 : array<u32>,
@@ -99,7 +99,7 @@ fn f1(p0 : f32, p1 : i32) -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn main() {
f1(1.0, 2);
}
@@ -116,62 +116,62 @@ let declaration_order_check_4 : i32 = 1;
)");
- // Parse the wgsl, create the src program
- auto src = reader::wgsl::Parse(&file);
+ // Parse the wgsl, create the src program
+ auto src = reader::wgsl::Parse(&file);
- ASSERT_TRUE(src.IsValid()) << diag::Formatter().format(src.Diagnostics());
+ ASSERT_TRUE(src.IsValid()) << diag::Formatter().format(src.Diagnostics());
- // Clone the src program to dst
- Program dst(src.Clone());
+ // Clone the src program to dst
+ Program dst(src.Clone());
- ASSERT_TRUE(dst.IsValid()) << diag::Formatter().format(dst.Diagnostics());
+ ASSERT_TRUE(dst.IsValid()) << diag::Formatter().format(dst.Diagnostics());
- // Expect the printed strings to match
- EXPECT_EQ(Program::printer(&src), Program::printer(&dst));
+ // Expect the printed strings to match
+ EXPECT_EQ(Program::printer(&src), Program::printer(&dst));
- // Check that none of the AST nodes or type pointers in dst are found in src
- std::unordered_set<const ast::Node*> src_nodes;
- for (auto* src_node : src.ASTNodes().Objects()) {
- src_nodes.emplace(src_node);
- }
- std::unordered_set<const sem::Type*> src_types;
- for (auto* src_type : src.Types()) {
- src_types.emplace(src_type);
- }
- for (auto* dst_node : dst.ASTNodes().Objects()) {
- ASSERT_EQ(src_nodes.count(dst_node), 0u);
- }
- for (auto* dst_type : dst.Types()) {
- ASSERT_EQ(src_types.count(dst_type), 0u);
- }
+ // Check that none of the AST nodes or type pointers in dst are found in src
+ std::unordered_set<const ast::Node*> src_nodes;
+ for (auto* src_node : src.ASTNodes().Objects()) {
+ src_nodes.emplace(src_node);
+ }
+ std::unordered_set<const sem::Type*> src_types;
+ for (auto* src_type : src.Types()) {
+ src_types.emplace(src_type);
+ }
+ for (auto* dst_node : dst.ASTNodes().Objects()) {
+ ASSERT_EQ(src_nodes.count(dst_node), 0u);
+ }
+ for (auto* dst_type : dst.Types()) {
+ ASSERT_EQ(src_types.count(dst_type), 0u);
+ }
- // Regenerate the wgsl for the src program. We use this instead of the
- // original source so that reformatting doesn't impact the final wgsl
- // comparison.
- writer::wgsl::Options options;
- std::string src_wgsl;
- {
- auto result = writer::wgsl::Generate(&src, options);
- ASSERT_TRUE(result.success) << result.error;
- src_wgsl = result.wgsl;
-
- // Move the src program to a temporary that'll be dropped, so that the src
- // program is released before we attempt to print the dst program. This
- // guarantee that all the source program nodes and types are destructed and
- // freed. ASAN should error if there's any remaining references in dst when
- // we try to reconstruct the WGSL.
- auto tmp = std::move(src);
- }
+ // Regenerate the wgsl for the src program. We use this instead of the
+ // original source so that reformatting doesn't impact the final wgsl
+ // comparison.
+ writer::wgsl::Options options;
+ std::string src_wgsl;
+ {
+ auto result = writer::wgsl::Generate(&src, options);
+ ASSERT_TRUE(result.success) << result.error;
+ src_wgsl = result.wgsl;
+
+ // Move the src program to a temporary that'll be dropped, so that the src
+ // program is released before we attempt to print the dst program. This
+ // guarantee that all the source program nodes and types are destructed and
+ // freed. ASAN should error if there's any remaining references in dst when
+ // we try to reconstruct the WGSL.
+ auto tmp = std::move(src);
+ }
- // Print the dst module, check it matches the original source
- auto result = writer::wgsl::Generate(&dst, options);
- ASSERT_TRUE(result.success);
- auto dst_wgsl = result.wgsl;
- ASSERT_EQ(src_wgsl, dst_wgsl);
+ // Print the dst module, check it matches the original source
+ auto result = writer::wgsl::Generate(&dst, options);
+ ASSERT_TRUE(result.success);
+ auto dst_wgsl = result.wgsl;
+ ASSERT_EQ(src_wgsl, dst_wgsl);
#else // #if TINT_BUILD_WGSL_READER && TINT_BUILD_WGSL_WRITER
- GTEST_SKIP() << "ModuleCloneTest requires TINT_BUILD_WGSL_READER and "
- "TINT_BUILD_WGSL_WRITER to be enabled";
+ GTEST_SKIP() << "ModuleCloneTest requires TINT_BUILD_WGSL_READER and "
+ "TINT_BUILD_WGSL_WRITER to be enabled";
#endif
}
diff --git a/chromium/third_party/dawn/src/tint/ast/module_test.cc b/chromium/third_party/dawn/src/tint/ast/module_test.cc
index 0d655092d0c..6ac610ef2e6 100644
--- a/chromium/third_party/dawn/src/tint/ast/module_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/module_test.cc
@@ -22,119 +22,112 @@ namespace {
using ModuleTest = TestHelper;
TEST_F(ModuleTest, Creation) {
- EXPECT_EQ(Program(std::move(*this)).AST().Functions().size(), 0u);
+ EXPECT_EQ(Program(std::move(*this)).AST().Functions().size(), 0u);
}
TEST_F(ModuleTest, LookupFunction) {
- auto* func = Func("main", VariableList{}, ty.f32(), StatementList{},
- ast::AttributeList{});
+ auto* func = Func("main", VariableList{}, ty.f32(), StatementList{}, ast::AttributeList{});
- Program program(std::move(*this));
- EXPECT_EQ(func,
- program.AST().Functions().Find(program.Symbols().Get("main")));
+ Program program(std::move(*this));
+ EXPECT_EQ(func, program.AST().Functions().Find(program.Symbols().Get("main")));
}
TEST_F(ModuleTest, LookupFunctionMissing) {
- Program program(std::move(*this));
- EXPECT_EQ(nullptr,
- program.AST().Functions().Find(program.Symbols().Get("Missing")));
+ Program program(std::move(*this));
+ EXPECT_EQ(nullptr, program.AST().Functions().Find(program.Symbols().Get("Missing")));
}
TEST_F(ModuleTest, Assert_Null_GlobalVariable) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder builder;
- builder.AST().AddGlobalVariable(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder builder;
+ builder.AST().AddGlobalVariable(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ModuleTest, Assert_Null_TypeDecl) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder builder;
- builder.AST().AddTypeDecl(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder builder;
+ builder.AST().AddTypeDecl(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ModuleTest, Assert_DifferentProgramID_Function) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.AST().AddFunction(b2.create<ast::Function>(
- b2.Symbols().Register("func"), VariableList{}, b2.ty.f32(),
- b2.Block(), AttributeList{}, AttributeList{}));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.AST().AddFunction(b2.create<ast::Function>(b2.Symbols().Register("func"),
+ VariableList{}, b2.ty.f32(), b2.Block(),
+ AttributeList{}, AttributeList{}));
+ },
+ "internal compiler error");
}
TEST_F(ModuleTest, Assert_DifferentProgramID_GlobalVariable) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.AST().AddGlobalVariable(
- b2.Var("var", b2.ty.i32(), ast::StorageClass::kPrivate));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.AST().AddGlobalVariable(b2.Var("var", b2.ty.i32(), ast::StorageClass::kPrivate));
+ },
+ "internal compiler error");
}
TEST_F(ModuleTest, Assert_Null_Function) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder builder;
- builder.AST().AddFunction(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder builder;
+ builder.AST().AddFunction(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ModuleTest, CloneOrder) {
- // Create a program with a function, alias decl and var decl.
- Program p = [] {
- ProgramBuilder b;
- b.Func("F", {}, b.ty.void_(), {});
- b.Alias("A", b.ty.u32());
- b.Global("V", b.ty.i32(), ast::StorageClass::kPrivate);
- return Program(std::move(b));
- }();
-
- // Clone the program, using ReplaceAll() to create new module-scope
- // declarations. We want to test that these are added just before the
- // declaration that triggered the ReplaceAll().
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &p);
- ctx.ReplaceAll([&](const ast::Function*) -> const ast::Function* {
- ctx.dst->Alias("inserted_before_F", cloned.ty.u32());
- return nullptr;
- });
- ctx.ReplaceAll([&](const ast::Alias*) -> const ast::Alias* {
- ctx.dst->Alias("inserted_before_A", cloned.ty.u32());
- return nullptr;
- });
- ctx.ReplaceAll([&](const ast::Variable*) -> const ast::Variable* {
- ctx.dst->Alias("inserted_before_V", cloned.ty.u32());
- return nullptr;
- });
- ctx.Clone();
-
- auto& decls = cloned.AST().GlobalDeclarations();
- ASSERT_EQ(decls.size(), 6u);
- EXPECT_TRUE(decls[1]->Is<ast::Function>());
- EXPECT_TRUE(decls[3]->Is<ast::Alias>());
- EXPECT_TRUE(decls[5]->Is<ast::Variable>());
-
- ASSERT_TRUE(decls[0]->Is<ast::Alias>());
- ASSERT_TRUE(decls[2]->Is<ast::Alias>());
- ASSERT_TRUE(decls[4]->Is<ast::Alias>());
-
- ASSERT_EQ(cloned.Symbols().NameFor(decls[0]->As<ast::Alias>()->name),
- "inserted_before_F");
- ASSERT_EQ(cloned.Symbols().NameFor(decls[2]->As<ast::Alias>()->name),
- "inserted_before_A");
- ASSERT_EQ(cloned.Symbols().NameFor(decls[4]->As<ast::Alias>()->name),
- "inserted_before_V");
+ // Create a program with a function, alias decl and var decl.
+ Program p = [] {
+ ProgramBuilder b;
+ b.Func("F", {}, b.ty.void_(), {});
+ b.Alias("A", b.ty.u32());
+ b.Global("V", b.ty.i32(), ast::StorageClass::kPrivate);
+ return Program(std::move(b));
+ }();
+
+ // Clone the program, using ReplaceAll() to create new module-scope
+ // declarations. We want to test that these are added just before the
+ // declaration that triggered the ReplaceAll().
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &p);
+ ctx.ReplaceAll([&](const ast::Function*) -> const ast::Function* {
+ ctx.dst->Alias("inserted_before_F", cloned.ty.u32());
+ return nullptr;
+ });
+ ctx.ReplaceAll([&](const ast::Alias*) -> const ast::Alias* {
+ ctx.dst->Alias("inserted_before_A", cloned.ty.u32());
+ return nullptr;
+ });
+ ctx.ReplaceAll([&](const ast::Variable*) -> const ast::Variable* {
+ ctx.dst->Alias("inserted_before_V", cloned.ty.u32());
+ return nullptr;
+ });
+ ctx.Clone();
+
+ auto& decls = cloned.AST().GlobalDeclarations();
+ ASSERT_EQ(decls.size(), 6u);
+ EXPECT_TRUE(decls[1]->Is<ast::Function>());
+ EXPECT_TRUE(decls[3]->Is<ast::Alias>());
+ EXPECT_TRUE(decls[5]->Is<ast::Variable>());
+
+ ASSERT_TRUE(decls[0]->Is<ast::Alias>());
+ ASSERT_TRUE(decls[2]->Is<ast::Alias>());
+ ASSERT_TRUE(decls[4]->Is<ast::Alias>());
+
+ ASSERT_EQ(cloned.Symbols().NameFor(decls[0]->As<ast::Alias>()->name), "inserted_before_F");
+ ASSERT_EQ(cloned.Symbols().NameFor(decls[2]->As<ast::Alias>()->name), "inserted_before_A");
+ ASSERT_EQ(cloned.Symbols().NameFor(decls[4]->As<ast::Alias>()->name), "inserted_before_V");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/multisampled_texture.cc b/chromium/third_party/dawn/src/tint/ast/multisampled_texture.cc
index fd5fc59f742..91f8edf5dd9 100644
--- a/chromium/third_party/dawn/src/tint/ast/multisampled_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/multisampled_texture.cc
@@ -25,26 +25,24 @@ MultisampledTexture::MultisampledTexture(ProgramID pid,
TextureDimension d,
const Type* ty)
: Base(pid, src, d), type(ty) {
- TINT_ASSERT(AST, type);
+ TINT_ASSERT(AST, type);
}
MultisampledTexture::MultisampledTexture(MultisampledTexture&&) = default;
MultisampledTexture::~MultisampledTexture() = default;
-std::string MultisampledTexture::FriendlyName(
- const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "texture_multisampled_" << dim << "<" << type->FriendlyName(symbols)
- << ">";
- return out.str();
+std::string MultisampledTexture::FriendlyName(const SymbolTable& symbols) const {
+ std::ostringstream out;
+ out << "texture_multisampled_" << dim << "<" << type->FriendlyName(symbols) << ">";
+ return out.str();
}
const MultisampledTexture* MultisampledTexture::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<MultisampledTexture>(src, dim, ty);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<MultisampledTexture>(src, dim, ty);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/multisampled_texture.h b/chromium/third_party/dawn/src/tint/ast/multisampled_texture.h
index 92141241eec..1d955056149 100644
--- a/chromium/third_party/dawn/src/tint/ast/multisampled_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/multisampled_texture.h
@@ -22,34 +22,30 @@
namespace tint::ast {
/// A multisampled texture type.
-class MultisampledTexture final
- : public Castable<MultisampledTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- /// @param type the data type of the multisampled texture
- MultisampledTexture(ProgramID pid,
- const Source& src,
- TextureDimension dim,
- const Type* type);
- /// Move constructor
- MultisampledTexture(MultisampledTexture&&);
- ~MultisampledTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const MultisampledTexture* Clone(CloneContext* ctx) const override;
-
- /// The subtype of the multisampled texture
- const Type* const type;
+class MultisampledTexture final : public Castable<MultisampledTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ /// @param type the data type of the multisampled texture
+ MultisampledTexture(ProgramID pid, const Source& src, TextureDimension dim, const Type* type);
+ /// Move constructor
+ MultisampledTexture(MultisampledTexture&&);
+ ~MultisampledTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const MultisampledTexture* Clone(CloneContext* ctx) const override;
+
+ /// The subtype of the multisampled texture
+ const Type* const type;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/multisampled_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/multisampled_texture_test.cc
index 14b32527650..bc3b87f75bc 100644
--- a/chromium/third_party/dawn/src/tint/ast/multisampled_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/multisampled_texture_test.cc
@@ -38,30 +38,30 @@ namespace {
using AstMultisampledTextureTest = TestHelper;
TEST_F(AstMultisampledTextureTest, IsTexture) {
- auto* f32 = create<F32>();
- Texture* ty = create<MultisampledTexture>(TextureDimension::kCube, f32);
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_TRUE(ty->Is<MultisampledTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
+ auto* f32 = create<F32>();
+ Texture* ty = create<MultisampledTexture>(TextureDimension::kCube, f32);
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_TRUE(ty->Is<MultisampledTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
}
TEST_F(AstMultisampledTextureTest, Dim) {
- auto* f32 = create<F32>();
- auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->dim, TextureDimension::k3d);
+ auto* f32 = create<F32>();
+ auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->dim, TextureDimension::k3d);
}
TEST_F(AstMultisampledTextureTest, Type) {
- auto* f32 = create<F32>();
- auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->type, f32);
+ auto* f32 = create<F32>();
+ auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->type, f32);
}
TEST_F(AstMultisampledTextureTest, FriendlyName) {
- auto* f32 = create<F32>();
- auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->FriendlyName(Symbols()), "texture_multisampled_3d<f32>");
+ auto* f32 = create<F32>();
+ auto* s = create<MultisampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "texture_multisampled_3d<f32>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/node.h b/chromium/third_party/dawn/src/tint/ast/node.h
index 90699adb266..19d768228ae 100644
--- a/chromium/third_party/dawn/src/tint/ast/node.h
+++ b/chromium/third_party/dawn/src/tint/ast/node.h
@@ -19,38 +19,29 @@
#include "src/tint/clone_context.h"
-// Forward declarations
-namespace tint {
-class CloneContext;
-} // namespace tint
-namespace tint::sem {
-class Type;
-class Info;
-} // namespace tint::sem
-
namespace tint::ast {
/// AST base class node
class Node : public Castable<Node, Cloneable> {
- public:
- ~Node() override;
+ public:
+ ~Node() override;
- /// The identifier of the program that owns this node
- const ProgramID program_id;
+ /// The identifier of the program that owns this node
+ const ProgramID program_id;
- /// The node source data
- const Source source;
+ /// The node source data
+ const Source source;
- protected:
- /// Create a new node
- /// @param pid the identifier of the program that owns this node
- /// @param src the input source for the node
- Node(ProgramID pid, const Source& src);
- /// Move constructor
- Node(Node&&);
+ protected:
+ /// Create a new node
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the input source for the node
+ Node(ProgramID pid, const Source& src);
+ /// Move constructor
+ Node(Node&&);
- private:
- Node(const Node&) = delete;
+ private:
+ Node(const Node&) = delete;
};
} // namespace tint::ast
@@ -60,7 +51,7 @@ namespace tint {
/// @param node a pointer to an AST node
/// @returns the ProgramID of the given AST node.
inline ProgramID ProgramIDOf(const ast::Node* node) {
- return node ? node->program_id : ProgramID();
+ return node ? node->program_id : ProgramID();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/ast/phony_expression.cc b/chromium/third_party/dawn/src/tint/ast/phony_expression.cc
index eb52063f46b..a3fd4fd9465 100644
--- a/chromium/third_party/dawn/src/tint/ast/phony_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/phony_expression.cc
@@ -20,17 +20,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::PhonyExpression);
namespace tint::ast {
-PhonyExpression::PhonyExpression(ProgramID pid, const Source& src)
- : Base(pid, src) {}
+PhonyExpression::PhonyExpression(ProgramID pid, const Source& src) : Base(pid, src) {}
PhonyExpression::PhonyExpression(PhonyExpression&&) = default;
PhonyExpression::~PhonyExpression() = default;
const PhonyExpression* PhonyExpression::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<PhonyExpression>(src);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<PhonyExpression>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/phony_expression.h b/chromium/third_party/dawn/src/tint/ast/phony_expression.h
index b6012b0c1d3..4fc32dd53d2 100644
--- a/chromium/third_party/dawn/src/tint/ast/phony_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/phony_expression.h
@@ -22,20 +22,20 @@ namespace tint::ast {
/// Represents the `_` of a phony assignment `_ = <expr>`
/// @see https://www.w3.org/TR/WGSL/#phony-assignment-section
class PhonyExpression final : public Castable<PhonyExpression, Expression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- PhonyExpression(ProgramID pid, const Source& src);
- /// Move constructor
- PhonyExpression(PhonyExpression&&);
- ~PhonyExpression() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ PhonyExpression(ProgramID pid, const Source& src);
+ /// Move constructor
+ PhonyExpression(PhonyExpression&&);
+ ~PhonyExpression() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const PhonyExpression* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const PhonyExpression* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/phony_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/phony_expression_test.cc
index f15b2ffd11b..8a4dd231995 100644
--- a/chromium/third_party/dawn/src/tint/ast/phony_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/phony_expression_test.cc
@@ -20,20 +20,20 @@ namespace {
using IdentifierExpressionTest = TestHelper;
TEST_F(IdentifierExpressionTest, Creation) {
- EXPECT_NE(Phony(), nullptr);
+ EXPECT_NE(Phony(), nullptr);
}
TEST_F(IdentifierExpressionTest, Creation_WithSource) {
- auto* p = Phony(Source{{20, 2}});
+ auto* p = Phony(Source{{20, 2}});
- auto src = p->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto src = p->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(IdentifierExpressionTest, IsPhony) {
- auto* p = Phony();
- EXPECT_TRUE(p->Is<PhonyExpression>());
+ auto* p = Phony();
+ EXPECT_TRUE(p->Is<PhonyExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/pipeline_stage.cc b/chromium/third_party/dawn/src/tint/ast/pipeline_stage.cc
index fc604a1a8da..79157dae8f1 100644
--- a/chromium/third_party/dawn/src/tint/ast/pipeline_stage.cc
+++ b/chromium/third_party/dawn/src/tint/ast/pipeline_stage.cc
@@ -17,25 +17,25 @@
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, PipelineStage stage) {
- switch (stage) {
- case PipelineStage::kNone: {
- out << "none";
- break;
+ switch (stage) {
+ case PipelineStage::kNone: {
+ out << "none";
+ break;
+ }
+ case PipelineStage::kVertex: {
+ out << "vertex";
+ break;
+ }
+ case PipelineStage::kFragment: {
+ out << "fragment";
+ break;
+ }
+ case PipelineStage::kCompute: {
+ out << "compute";
+ break;
+ }
}
- case PipelineStage::kVertex: {
- out << "vertex";
- break;
- }
- case PipelineStage::kFragment: {
- out << "fragment";
- break;
- }
- case PipelineStage::kCompute: {
- out << "compute";
- break;
- }
- }
- return out;
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/pointer.cc b/chromium/third_party/dawn/src/tint/ast/pointer.cc
index 74b1da74004..42c3fa9e864 100644
--- a/chromium/third_party/dawn/src/tint/ast/pointer.cc
+++ b/chromium/third_party/dawn/src/tint/ast/pointer.cc
@@ -28,17 +28,17 @@ Pointer::Pointer(ProgramID pid,
: Base(pid, src), type(subtype), storage_class(sc), access(ac) {}
std::string Pointer::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "ptr<";
- if (storage_class != ast::StorageClass::kNone) {
- out << storage_class << ", ";
- }
- out << type->FriendlyName(symbols);
- if (access != ast::Access::kUndefined) {
- out << ", " << access;
- }
- out << ">";
- return out.str();
+ std::ostringstream out;
+ out << "ptr<";
+ if (storage_class != ast::StorageClass::kNone) {
+ out << storage_class << ", ";
+ }
+ out << type->FriendlyName(symbols);
+ if (access != ast::Access::kUndefined) {
+ out << ", " << access;
+ }
+ out << ">";
+ return out.str();
}
Pointer::Pointer(Pointer&&) = default;
@@ -46,10 +46,10 @@ Pointer::Pointer(Pointer&&) = default;
Pointer::~Pointer() = default;
const Pointer* Pointer::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<Pointer>(src, ty, storage_class, access);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<Pointer>(src, ty, storage_class, access);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/pointer.h b/chromium/third_party/dawn/src/tint/ast/pointer.h
index 9742e78c442..030e844eb35 100644
--- a/chromium/third_party/dawn/src/tint/ast/pointer.h
+++ b/chromium/third_party/dawn/src/tint/ast/pointer.h
@@ -25,40 +25,40 @@ namespace tint::ast {
/// A pointer type.
class Pointer final : public Castable<Pointer, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param subtype the pointee type
- /// @param storage_class the storage class of the pointer
- /// @param access the access control of the pointer
- Pointer(ProgramID pid,
- const Source& src,
- const Type* const subtype,
- ast::StorageClass storage_class,
- ast::Access access);
- /// Move constructor
- Pointer(Pointer&&);
- ~Pointer() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param subtype the pointee type
+ /// @param storage_class the storage class of the pointer
+ /// @param access the access control of the pointer
+ Pointer(ProgramID pid,
+ const Source& src,
+ const Type* const subtype,
+ ast::StorageClass storage_class,
+ ast::Access access);
+ /// Move constructor
+ Pointer(Pointer&&);
+ ~Pointer() override;
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Pointer* Clone(CloneContext* ctx) const override;
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Pointer* Clone(CloneContext* ctx) const override;
- /// The pointee type
- const Type* const type;
+ /// The pointee type
+ const Type* const type;
- /// The storage class of the pointer
- ast::StorageClass const storage_class;
+ /// The storage class of the pointer
+ ast::StorageClass const storage_class;
- /// The access control of the pointer
- ast::Access const access;
+ /// The access control of the pointer
+ ast::Access const access;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/pointer_test.cc b/chromium/third_party/dawn/src/tint/ast/pointer_test.cc
index c755e1e1179..dbfd3b097b9 100644
--- a/chromium/third_party/dawn/src/tint/ast/pointer_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/pointer_test.cc
@@ -23,25 +23,23 @@ namespace {
using AstPointerTest = TestHelper;
TEST_F(AstPointerTest, Creation) {
- auto* i32 = create<I32>();
- auto* p = create<Pointer>(i32, ast::StorageClass::kStorage, Access::kRead);
- EXPECT_EQ(p->type, i32);
- EXPECT_EQ(p->storage_class, ast::StorageClass::kStorage);
- EXPECT_EQ(p->access, Access::kRead);
+ auto* i32 = create<I32>();
+ auto* p = create<Pointer>(i32, ast::StorageClass::kStorage, Access::kRead);
+ EXPECT_EQ(p->type, i32);
+ EXPECT_EQ(p->storage_class, ast::StorageClass::kStorage);
+ EXPECT_EQ(p->access, Access::kRead);
}
TEST_F(AstPointerTest, FriendlyName) {
- auto* i32 = create<I32>();
- auto* p =
- create<Pointer>(i32, ast::StorageClass::kWorkgroup, Access::kUndefined);
- EXPECT_EQ(p->FriendlyName(Symbols()), "ptr<workgroup, i32>");
+ auto* i32 = create<I32>();
+ auto* p = create<Pointer>(i32, ast::StorageClass::kWorkgroup, Access::kUndefined);
+ EXPECT_EQ(p->FriendlyName(Symbols()), "ptr<workgroup, i32>");
}
TEST_F(AstPointerTest, FriendlyNameWithAccess) {
- auto* i32 = create<I32>();
- auto* p =
- create<Pointer>(i32, ast::StorageClass::kStorage, Access::kReadWrite);
- EXPECT_EQ(p->FriendlyName(Symbols()), "ptr<storage, i32, read_write>");
+ auto* i32 = create<I32>();
+ auto* p = create<Pointer>(i32, ast::StorageClass::kStorage, Access::kReadWrite);
+ EXPECT_EQ(p->FriendlyName(Symbols()), "ptr<storage, i32, read_write>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/return_statement.cc b/chromium/third_party/dawn/src/tint/ast/return_statement.cc
index 0adbecb0a92..976c063ca4a 100644
--- a/chromium/third_party/dawn/src/tint/ast/return_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/return_statement.cc
@@ -23,11 +23,9 @@ namespace tint::ast {
ReturnStatement::ReturnStatement(ProgramID pid, const Source& src)
: Base(pid, src), value(nullptr) {}
-ReturnStatement::ReturnStatement(ProgramID pid,
- const Source& src,
- const Expression* val)
+ReturnStatement::ReturnStatement(ProgramID pid, const Source& src, const Expression* val)
: Base(pid, src), value(val) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, value, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, value, program_id);
}
ReturnStatement::ReturnStatement(ReturnStatement&&) = default;
@@ -35,10 +33,10 @@ ReturnStatement::ReturnStatement(ReturnStatement&&) = default;
ReturnStatement::~ReturnStatement() = default;
const ReturnStatement* ReturnStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ret = ctx->Clone(value);
- return ctx->dst->create<ReturnStatement>(src, ret);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ret = ctx->Clone(value);
+ return ctx->dst->create<ReturnStatement>(src, ret);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/return_statement.h b/chromium/third_party/dawn/src/tint/ast/return_statement.h
index d43874cc842..34d86782367 100644
--- a/chromium/third_party/dawn/src/tint/ast/return_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/return_statement.h
@@ -22,29 +22,29 @@ namespace tint::ast {
/// A return statement
class ReturnStatement final : public Castable<ReturnStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- ReturnStatement(ProgramID pid, const Source& src);
-
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the return value
- ReturnStatement(ProgramID pid, const Source& src, const Expression* value);
- /// Move constructor
- ReturnStatement(ReturnStatement&&);
- ~ReturnStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const ReturnStatement* Clone(CloneContext* ctx) const override;
-
- /// The value returned. May be null.
- const Expression* const value;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ ReturnStatement(ProgramID pid, const Source& src);
+
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param value the return value
+ ReturnStatement(ProgramID pid, const Source& src, const Expression* value);
+ /// Move constructor
+ ReturnStatement(ReturnStatement&&);
+ ~ReturnStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const ReturnStatement* Clone(CloneContext* ctx) const override;
+
+ /// The value returned. May be null.
+ const Expression* const value;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/return_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/return_statement_test.cc
index d17a89d1a0d..bbe860d360a 100644
--- a/chromium/third_party/dawn/src/tint/ast/return_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/return_statement_test.cc
@@ -23,43 +23,43 @@ namespace {
using ReturnStatementTest = TestHelper;
TEST_F(ReturnStatementTest, Creation) {
- auto* expr = Expr("expr");
+ auto* expr = Expr("expr");
- auto* r = create<ReturnStatement>(expr);
- EXPECT_EQ(r->value, expr);
+ auto* r = create<ReturnStatement>(expr);
+ EXPECT_EQ(r->value, expr);
}
TEST_F(ReturnStatementTest, Creation_WithSource) {
- auto* r = create<ReturnStatement>(Source{Source::Location{20, 2}});
- auto src = r->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* r = create<ReturnStatement>(Source{Source::Location{20, 2}});
+ auto src = r->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(ReturnStatementTest, IsReturn) {
- auto* r = create<ReturnStatement>();
- EXPECT_TRUE(r->Is<ReturnStatement>());
+ auto* r = create<ReturnStatement>();
+ EXPECT_TRUE(r->Is<ReturnStatement>());
}
TEST_F(ReturnStatementTest, WithoutValue) {
- auto* r = create<ReturnStatement>();
- EXPECT_EQ(r->value, nullptr);
+ auto* r = create<ReturnStatement>();
+ EXPECT_EQ(r->value, nullptr);
}
TEST_F(ReturnStatementTest, WithValue) {
- auto* expr = Expr("expr");
- auto* r = create<ReturnStatement>(expr);
- EXPECT_NE(r->value, nullptr);
+ auto* expr = Expr("expr");
+ auto* r = create<ReturnStatement>(expr);
+ EXPECT_NE(r->value, nullptr);
}
TEST_F(ReturnStatementTest, Assert_DifferentProgramID_Expr) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<ReturnStatement>(b2.Expr(true));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<ReturnStatement>(b2.Expr(true));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/sampled_texture.cc b/chromium/third_party/dawn/src/tint/ast/sampled_texture.cc
index 21cb0f59322..9c4cea631fd 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampled_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/sampled_texture.cc
@@ -20,12 +20,9 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::SampledTexture);
namespace tint::ast {
-SampledTexture::SampledTexture(ProgramID pid,
- const Source& src,
- TextureDimension d,
- const Type* ty)
+SampledTexture::SampledTexture(ProgramID pid, const Source& src, TextureDimension d, const Type* ty)
: Base(pid, src, d), type(ty) {
- TINT_ASSERT(AST, type);
+ TINT_ASSERT(AST, type);
}
SampledTexture::SampledTexture(SampledTexture&&) = default;
@@ -33,16 +30,16 @@ SampledTexture::SampledTexture(SampledTexture&&) = default;
SampledTexture::~SampledTexture() = default;
std::string SampledTexture::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "texture_" << dim << "<" << type->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "texture_" << dim << "<" << type->FriendlyName(symbols) << ">";
+ return out.str();
}
const SampledTexture* SampledTexture::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<SampledTexture>(src, dim, ty);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<SampledTexture>(src, dim, ty);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/sampled_texture.h b/chromium/third_party/dawn/src/tint/ast/sampled_texture.h
index d7c28f5df68..f68fccf57ab 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampled_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/sampled_texture.h
@@ -23,32 +23,29 @@ namespace tint::ast {
/// A sampled texture type.
class SampledTexture final : public Castable<SampledTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- /// @param type the data type of the sampled texture
- SampledTexture(ProgramID pid,
- const Source& src,
- TextureDimension dim,
- const Type* type);
- /// Move constructor
- SampledTexture(SampledTexture&&);
- ~SampledTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const SampledTexture* Clone(CloneContext* ctx) const override;
-
- /// The subtype of the sampled texture
- const Type* const type;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ /// @param type the data type of the sampled texture
+ SampledTexture(ProgramID pid, const Source& src, TextureDimension dim, const Type* type);
+ /// Move constructor
+ SampledTexture(SampledTexture&&);
+ ~SampledTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const SampledTexture* Clone(CloneContext* ctx) const override;
+
+ /// The subtype of the sampled texture
+ const Type* const type;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/sampled_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/sampled_texture_test.cc
index d89ff96bd43..f85d2dff43d 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampled_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/sampled_texture_test.cc
@@ -23,29 +23,29 @@ namespace {
using AstSampledTextureTest = TestHelper;
TEST_F(AstSampledTextureTest, IsTexture) {
- auto* f32 = create<F32>();
- Texture* ty = create<SampledTexture>(TextureDimension::kCube, f32);
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_TRUE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
+ auto* f32 = create<F32>();
+ Texture* ty = create<SampledTexture>(TextureDimension::kCube, f32);
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_TRUE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
}
TEST_F(AstSampledTextureTest, Dim) {
- auto* f32 = create<F32>();
- auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->dim, TextureDimension::k3d);
+ auto* f32 = create<F32>();
+ auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->dim, TextureDimension::k3d);
}
TEST_F(AstSampledTextureTest, Type) {
- auto* f32 = create<F32>();
- auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->type, f32);
+ auto* f32 = create<F32>();
+ auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->type, f32);
}
TEST_F(AstSampledTextureTest, FriendlyName) {
- auto* f32 = create<F32>();
- auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
- EXPECT_EQ(s->FriendlyName(Symbols()), "texture_3d<f32>");
+ auto* f32 = create<F32>();
+ auto* s = create<SampledTexture>(TextureDimension::k3d, f32);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "texture_3d<f32>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/sampler.cc b/chromium/third_party/dawn/src/tint/ast/sampler.cc
index 68eddaf0742..5d88bf86067 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampler.cc
+++ b/chromium/third_party/dawn/src/tint/ast/sampler.cc
@@ -21,31 +21,30 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Sampler);
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, SamplerKind kind) {
- switch (kind) {
- case SamplerKind::kSampler:
- out << "sampler";
- break;
- case SamplerKind::kComparisonSampler:
- out << "comparison_sampler";
- break;
- }
- return out;
+ switch (kind) {
+ case SamplerKind::kSampler:
+ out << "sampler";
+ break;
+ case SamplerKind::kComparisonSampler:
+ out << "comparison_sampler";
+ break;
+ }
+ return out;
}
-Sampler::Sampler(ProgramID pid, const Source& src, SamplerKind k)
- : Base(pid, src), kind(k) {}
+Sampler::Sampler(ProgramID pid, const Source& src, SamplerKind k) : Base(pid, src), kind(k) {}
Sampler::Sampler(Sampler&&) = default;
Sampler::~Sampler() = default;
std::string Sampler::FriendlyName(const SymbolTable&) const {
- return kind == SamplerKind::kSampler ? "sampler" : "sampler_comparison";
+ return kind == SamplerKind::kSampler ? "sampler" : "sampler_comparison";
}
const Sampler* Sampler::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<Sampler>(src, kind);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<Sampler>(src, kind);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/sampler.h b/chromium/third_party/dawn/src/tint/ast/sampler.h
index 18c6fa2c3ef..067fc386181 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampler.h
+++ b/chromium/third_party/dawn/src/tint/ast/sampler.h
@@ -23,10 +23,10 @@ namespace tint::ast {
/// The different kinds of samplers
enum class SamplerKind {
- /// A regular sampler
- kSampler,
- /// A comparison sampler
- kComparisonSampler
+ /// A regular sampler
+ kSampler,
+ /// A comparison sampler
+ kComparisonSampler
};
/// @param out the std::ostream to write to
@@ -36,31 +36,31 @@ std::ostream& operator<<(std::ostream& out, SamplerKind kind);
/// A sampler type.
class Sampler final : public Castable<Sampler, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param kind the kind of sampler
- Sampler(ProgramID pid, const Source& src, SamplerKind kind);
- /// Move constructor
- Sampler(Sampler&&);
- ~Sampler() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param kind the kind of sampler
+ Sampler(ProgramID pid, const Source& src, SamplerKind kind);
+ /// Move constructor
+ Sampler(Sampler&&);
+ ~Sampler() override;
- /// @returns true if this is a comparison sampler
- bool IsComparison() const { return kind == SamplerKind::kComparisonSampler; }
+ /// @returns true if this is a comparison sampler
+ bool IsComparison() const { return kind == SamplerKind::kComparisonSampler; }
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Sampler* Clone(CloneContext* ctx) const override;
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Sampler* Clone(CloneContext* ctx) const override;
- /// The sampler type
- const SamplerKind kind;
+ /// The sampler type
+ const SamplerKind kind;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/sampler_test.cc b/chromium/third_party/dawn/src/tint/ast/sampler_test.cc
index 16e21290049..14500d95af0 100644
--- a/chromium/third_party/dawn/src/tint/ast/sampler_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/sampler_test.cc
@@ -22,24 +22,24 @@ namespace {
using AstSamplerTest = TestHelper;
TEST_F(AstSamplerTest, Creation) {
- auto* s = create<Sampler>(SamplerKind::kSampler);
- EXPECT_EQ(s->kind, SamplerKind::kSampler);
+ auto* s = create<Sampler>(SamplerKind::kSampler);
+ EXPECT_EQ(s->kind, SamplerKind::kSampler);
}
TEST_F(AstSamplerTest, Creation_ComparisonSampler) {
- auto* s = create<Sampler>(SamplerKind::kComparisonSampler);
- EXPECT_EQ(s->kind, SamplerKind::kComparisonSampler);
- EXPECT_TRUE(s->IsComparison());
+ auto* s = create<Sampler>(SamplerKind::kComparisonSampler);
+ EXPECT_EQ(s->kind, SamplerKind::kComparisonSampler);
+ EXPECT_TRUE(s->IsComparison());
}
TEST_F(AstSamplerTest, FriendlyNameSampler) {
- auto* s = create<Sampler>(SamplerKind::kSampler);
- EXPECT_EQ(s->FriendlyName(Symbols()), "sampler");
+ auto* s = create<Sampler>(SamplerKind::kSampler);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "sampler");
}
TEST_F(AstSamplerTest, FriendlyNameComparisonSampler) {
- auto* s = create<Sampler>(SamplerKind::kComparisonSampler);
- EXPECT_EQ(s->FriendlyName(Symbols()), "sampler_comparison");
+ auto* s = create<Sampler>(SamplerKind::kComparisonSampler);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "sampler_comparison");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.cc
deleted file mode 100644
index 5a215ece801..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/ast/sint_literal_expression.h"
-
-#include "src/tint/program_builder.h"
-
-TINT_INSTANTIATE_TYPEINFO(tint::ast::SintLiteralExpression);
-
-namespace tint::ast {
-
-SintLiteralExpression::SintLiteralExpression(ProgramID pid,
- const Source& src,
- int32_t val)
- : Base(pid, src), value(val) {}
-
-SintLiteralExpression::~SintLiteralExpression() = default;
-
-uint32_t SintLiteralExpression::ValueAsU32() const {
- return static_cast<uint32_t>(value);
-}
-
-const SintLiteralExpression* SintLiteralExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<SintLiteralExpression>(src, value);
-}
-
-} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.h b/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.h
deleted file mode 100644
index 74d60fc250f..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/sint_literal_expression.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_AST_SINT_LITERAL_EXPRESSION_H_
-#define SRC_TINT_AST_SINT_LITERAL_EXPRESSION_H_
-
-#include <string>
-
-#include "src/tint/ast/int_literal_expression.h"
-
-namespace tint::ast {
-
-/// A signed int literal
-class SintLiteralExpression final
- : public Castable<SintLiteralExpression, IntLiteralExpression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the signed int literals value
- SintLiteralExpression(ProgramID pid, const Source& src, int32_t value);
- ~SintLiteralExpression() override;
-
- /// @returns the literal value as a u32
- uint32_t ValueAsU32() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const SintLiteralExpression* Clone(CloneContext* ctx) const override;
-
- /// The int literal value
- const int32_t value;
-};
-
-} // namespace tint::ast
-
-#endif // SRC_TINT_AST_SINT_LITERAL_EXPRESSION_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/stage_attribute.cc b/chromium/third_party/dawn/src/tint/ast/stage_attribute.cc
index 0b596336d81..51cfe8c2629 100644
--- a/chromium/third_party/dawn/src/tint/ast/stage_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/stage_attribute.cc
@@ -22,21 +22,19 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::StageAttribute);
namespace tint::ast {
-StageAttribute::StageAttribute(ProgramID pid,
- const Source& src,
- PipelineStage s)
+StageAttribute::StageAttribute(ProgramID pid, const Source& src, PipelineStage s)
: Base(pid, src), stage(s) {}
StageAttribute::~StageAttribute() = default;
std::string StageAttribute::Name() const {
- return "stage";
+ return "stage";
}
const StageAttribute* StageAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<StageAttribute>(src, stage);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<StageAttribute>(src, stage);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/stage_attribute.h b/chromium/third_party/dawn/src/tint/ast/stage_attribute.h
index 6195eaa9972..a447d1f78c8 100644
--- a/chromium/third_party/dawn/src/tint/ast/stage_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/stage_attribute.h
@@ -24,27 +24,25 @@ namespace tint::ast {
/// A workgroup attribute
class StageAttribute final : public Castable<StageAttribute, Attribute> {
- public:
- /// constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param stage the pipeline stage
- /// @param source the source of this attribute
- StageAttribute(ProgramID program_id,
- const Source& source,
- PipelineStage stage);
- ~StageAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StageAttribute* Clone(CloneContext* ctx) const override;
-
- /// The pipeline stage
- const PipelineStage stage;
+ public:
+ /// constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param stage the pipeline stage
+ /// @param source the source of this attribute
+ StageAttribute(ProgramID program_id, const Source& source, PipelineStage stage);
+ ~StageAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StageAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The pipeline stage
+ const PipelineStage stage;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/stage_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/stage_attribute_test.cc
index f1d8b5a4902..d4baaf2cbb1 100644
--- a/chromium/third_party/dawn/src/tint/ast/stage_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/stage_attribute_test.cc
@@ -23,8 +23,8 @@ namespace {
using StageAttributeTest = TestHelper;
TEST_F(StageAttributeTest, Creation_1param) {
- auto* d = create<StageAttribute>(PipelineStage::kFragment);
- EXPECT_EQ(d->stage, PipelineStage::kFragment);
+ auto* d = create<StageAttribute>(PipelineStage::kFragment);
+ EXPECT_EQ(d->stage, PipelineStage::kFragment);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/statement.cc b/chromium/third_party/dawn/src/tint/ast/statement.cc
index 0d8750dcefd..12a1cc9406e 100644
--- a/chromium/third_party/dawn/src/tint/ast/statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/statement.cc
@@ -37,49 +37,46 @@ Statement::Statement(Statement&&) = default;
Statement::~Statement() = default;
const char* Statement::Name() const {
- if (Is<AssignmentStatement>()) {
- return "assignment statement";
- }
- if (Is<BlockStatement>()) {
- return "block statement";
- }
- if (Is<BreakStatement>()) {
- return "break statement";
- }
- if (Is<CaseStatement>()) {
- return "case statement";
- }
- if (Is<CallStatement>()) {
- return "function call";
- }
- if (Is<ContinueStatement>()) {
- return "continue statement";
- }
- if (Is<DiscardStatement>()) {
- return "discard statement";
- }
- if (Is<ElseStatement>()) {
- return "else statement";
- }
- if (Is<FallthroughStatement>()) {
- return "fallthrough statement";
- }
- if (Is<IfStatement>()) {
- return "if statement";
- }
- if (Is<LoopStatement>()) {
- return "loop statement";
- }
- if (Is<ReturnStatement>()) {
- return "return statement";
- }
- if (Is<SwitchStatement>()) {
- return "switch statement";
- }
- if (Is<VariableDeclStatement>()) {
- return "variable declaration";
- }
- return "statement";
+ if (Is<AssignmentStatement>()) {
+ return "assignment statement";
+ }
+ if (Is<BlockStatement>()) {
+ return "block statement";
+ }
+ if (Is<BreakStatement>()) {
+ return "break statement";
+ }
+ if (Is<CaseStatement>()) {
+ return "case statement";
+ }
+ if (Is<CallStatement>()) {
+ return "function call";
+ }
+ if (Is<ContinueStatement>()) {
+ return "continue statement";
+ }
+ if (Is<DiscardStatement>()) {
+ return "discard statement";
+ }
+ if (Is<FallthroughStatement>()) {
+ return "fallthrough statement";
+ }
+ if (Is<IfStatement>()) {
+ return "if statement";
+ }
+ if (Is<LoopStatement>()) {
+ return "loop statement";
+ }
+ if (Is<ReturnStatement>()) {
+ return "return statement";
+ }
+ if (Is<SwitchStatement>()) {
+ return "switch statement";
+ }
+ if (Is<VariableDeclStatement>()) {
+ return "variable declaration";
+ }
+ return "statement";
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/statement.h b/chromium/third_party/dawn/src/tint/ast/statement.h
index 931e1b5ab6c..94de2472416 100644
--- a/chromium/third_party/dawn/src/tint/ast/statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/statement.h
@@ -23,19 +23,19 @@ namespace tint::ast {
/// Base statement class
class Statement : public Castable<Statement, Node> {
- public:
- ~Statement() override;
-
- /// @returns the human readable name for the statement type.
- const char* Name() const;
-
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of the expression
- Statement(ProgramID pid, const Source& src);
- /// Move constructor
- Statement(Statement&&);
+ public:
+ ~Statement() override;
+
+ /// @returns the human readable name for the statement type.
+ const char* Name() const;
+
+ protected:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of the expression
+ Statement(ProgramID pid, const Source& src);
+ /// Move constructor
+ Statement(Statement&&);
};
/// A list of statements
diff --git a/chromium/third_party/dawn/src/tint/ast/storage_class.cc b/chromium/third_party/dawn/src/tint/ast/storage_class.cc
index 00d6d92fe07..31612717d61 100644
--- a/chromium/third_party/dawn/src/tint/ast/storage_class.cc
+++ b/chromium/third_party/dawn/src/tint/ast/storage_class.cc
@@ -17,33 +17,33 @@
namespace tint::ast {
const char* ToString(StorageClass sc) {
- switch (sc) {
- case StorageClass::kInvalid:
- return "invalid";
- case StorageClass::kNone:
- return "none";
- case StorageClass::kInput:
- return "in";
- case StorageClass::kOutput:
- return "out";
- case StorageClass::kUniform:
- return "uniform";
- case StorageClass::kWorkgroup:
- return "workgroup";
- case StorageClass::kUniformConstant:
- return "uniform_constant";
- case StorageClass::kStorage:
- return "storage";
- case StorageClass::kPrivate:
- return "private";
- case StorageClass::kFunction:
- return "function";
- }
- return "<unknown>";
+ switch (sc) {
+ case StorageClass::kInvalid:
+ return "invalid";
+ case StorageClass::kNone:
+ return "none";
+ case StorageClass::kInput:
+ return "in";
+ case StorageClass::kOutput:
+ return "out";
+ case StorageClass::kUniform:
+ return "uniform";
+ case StorageClass::kWorkgroup:
+ return "workgroup";
+ case StorageClass::kHandle:
+ return "handle";
+ case StorageClass::kStorage:
+ return "storage";
+ case StorageClass::kPrivate:
+ return "private";
+ case StorageClass::kFunction:
+ return "function";
+ }
+ return "<unknown>";
}
std::ostream& operator<<(std::ostream& out, StorageClass sc) {
- out << ToString(sc);
- return out;
+ out << ToString(sc);
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/storage_class.h b/chromium/third_party/dawn/src/tint/ast/storage_class.h
index dc7a94a4dee..d4e64792d03 100644
--- a/chromium/third_party/dawn/src/tint/ast/storage_class.h
+++ b/chromium/third_party/dawn/src/tint/ast/storage_class.h
@@ -21,23 +21,23 @@ namespace tint::ast {
/// Storage class of a given pointer.
enum class StorageClass {
- kInvalid = -1,
- kNone,
- kInput,
- kOutput,
- kUniform,
- kWorkgroup,
- kUniformConstant,
- kStorage,
- kPrivate,
- kFunction
+ kInvalid = -1,
+ kNone,
+ kInput,
+ kOutput,
+ kUniform,
+ kWorkgroup,
+ kHandle,
+ kStorage,
+ kPrivate,
+ kFunction
};
/// @returns true if the StorageClass is host-shareable
/// @param sc the StorageClass
/// @see https://gpuweb.github.io/gpuweb/wgsl.html#host-shareable
inline bool IsHostShareable(StorageClass sc) {
- return sc == ast::StorageClass::kUniform || sc == ast::StorageClass::kStorage;
+ return sc == ast::StorageClass::kUniform || sc == ast::StorageClass::kStorage;
}
/// @param sc the StorageClass
diff --git a/chromium/third_party/dawn/src/tint/ast/storage_texture.cc b/chromium/third_party/dawn/src/tint/ast/storage_texture.cc
index 170b2ee7526..ccc250d9fbc 100644
--- a/chromium/third_party/dawn/src/tint/ast/storage_texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/storage_texture.cc
@@ -26,60 +26,60 @@ namespace tint::ast {
// Note, these names match the names in the WGSL spec. This behaviour is used
// in the WGSL writer to emit the texture format names.
std::ostream& operator<<(std::ostream& out, TexelFormat format) {
- switch (format) {
- case TexelFormat::kNone:
- out << "none";
- break;
- case TexelFormat::kR32Uint:
- out << "r32uint";
- break;
- case TexelFormat::kR32Sint:
- out << "r32sint";
- break;
- case TexelFormat::kR32Float:
- out << "r32float";
- break;
- case TexelFormat::kRgba8Unorm:
- out << "rgba8unorm";
- break;
- case TexelFormat::kRgba8Snorm:
- out << "rgba8snorm";
- break;
- case TexelFormat::kRgba8Uint:
- out << "rgba8uint";
- break;
- case TexelFormat::kRgba8Sint:
- out << "rgba8sint";
- break;
- case TexelFormat::kRg32Uint:
- out << "rg32uint";
- break;
- case TexelFormat::kRg32Sint:
- out << "rg32sint";
- break;
- case TexelFormat::kRg32Float:
- out << "rg32float";
- break;
- case TexelFormat::kRgba16Uint:
- out << "rgba16uint";
- break;
- case TexelFormat::kRgba16Sint:
- out << "rgba16sint";
- break;
- case TexelFormat::kRgba16Float:
- out << "rgba16float";
- break;
- case TexelFormat::kRgba32Uint:
- out << "rgba32uint";
- break;
- case TexelFormat::kRgba32Sint:
- out << "rgba32sint";
- break;
- case TexelFormat::kRgba32Float:
- out << "rgba32float";
- break;
- }
- return out;
+ switch (format) {
+ case TexelFormat::kNone:
+ out << "none";
+ break;
+ case TexelFormat::kR32Uint:
+ out << "r32uint";
+ break;
+ case TexelFormat::kR32Sint:
+ out << "r32sint";
+ break;
+ case TexelFormat::kR32Float:
+ out << "r32float";
+ break;
+ case TexelFormat::kRgba8Unorm:
+ out << "rgba8unorm";
+ break;
+ case TexelFormat::kRgba8Snorm:
+ out << "rgba8snorm";
+ break;
+ case TexelFormat::kRgba8Uint:
+ out << "rgba8uint";
+ break;
+ case TexelFormat::kRgba8Sint:
+ out << "rgba8sint";
+ break;
+ case TexelFormat::kRg32Uint:
+ out << "rg32uint";
+ break;
+ case TexelFormat::kRg32Sint:
+ out << "rg32sint";
+ break;
+ case TexelFormat::kRg32Float:
+ out << "rg32float";
+ break;
+ case TexelFormat::kRgba16Uint:
+ out << "rgba16uint";
+ break;
+ case TexelFormat::kRgba16Sint:
+ out << "rgba16sint";
+ break;
+ case TexelFormat::kRgba16Float:
+ out << "rgba16float";
+ break;
+ case TexelFormat::kRgba32Uint:
+ out << "rgba32uint";
+ break;
+ case TexelFormat::kRgba32Sint:
+ out << "rgba32sint";
+ break;
+ case TexelFormat::kRgba32Float:
+ out << "rgba32float";
+ break;
+ }
+ return out;
}
StorageTexture::StorageTexture(ProgramID pid,
@@ -95,50 +95,50 @@ StorageTexture::StorageTexture(StorageTexture&&) = default;
StorageTexture::~StorageTexture() = default;
std::string StorageTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_storage_" << dim << "<" << format << ", " << access << ">";
- return out.str();
+ std::ostringstream out;
+ out << "texture_storage_" << dim << "<" << format << ", " << access << ">";
+ return out.str();
}
const StorageTexture* StorageTexture::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<StorageTexture>(src, dim, format, ty, access);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<StorageTexture>(src, dim, format, ty, access);
}
Type* StorageTexture::SubtypeFor(TexelFormat format, ProgramBuilder& builder) {
- switch (format) {
- case TexelFormat::kR32Uint:
- case TexelFormat::kRgba8Uint:
- case TexelFormat::kRg32Uint:
- case TexelFormat::kRgba16Uint:
- case TexelFormat::kRgba32Uint: {
- return builder.create<U32>();
- }
-
- case TexelFormat::kR32Sint:
- case TexelFormat::kRgba8Sint:
- case TexelFormat::kRg32Sint:
- case TexelFormat::kRgba16Sint:
- case TexelFormat::kRgba32Sint: {
- return builder.create<I32>();
+ switch (format) {
+ case TexelFormat::kR32Uint:
+ case TexelFormat::kRgba8Uint:
+ case TexelFormat::kRg32Uint:
+ case TexelFormat::kRgba16Uint:
+ case TexelFormat::kRgba32Uint: {
+ return builder.create<U32>();
+ }
+
+ case TexelFormat::kR32Sint:
+ case TexelFormat::kRgba8Sint:
+ case TexelFormat::kRg32Sint:
+ case TexelFormat::kRgba16Sint:
+ case TexelFormat::kRgba32Sint: {
+ return builder.create<I32>();
+ }
+
+ case TexelFormat::kRgba8Unorm:
+ case TexelFormat::kRgba8Snorm:
+ case TexelFormat::kR32Float:
+ case TexelFormat::kRg32Float:
+ case TexelFormat::kRgba16Float:
+ case TexelFormat::kRgba32Float: {
+ return builder.create<F32>();
+ }
+
+ case TexelFormat::kNone:
+ break;
}
- case TexelFormat::kRgba8Unorm:
- case TexelFormat::kRgba8Snorm:
- case TexelFormat::kR32Float:
- case TexelFormat::kRg32Float:
- case TexelFormat::kRgba16Float:
- case TexelFormat::kRgba32Float: {
- return builder.create<F32>();
- }
-
- case TexelFormat::kNone:
- break;
- }
-
- return nullptr;
+ return nullptr;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/storage_texture.h b/chromium/third_party/dawn/src/tint/ast/storage_texture.h
index ca962c1ebaf..3cf779e244d 100644
--- a/chromium/third_party/dawn/src/tint/ast/storage_texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/storage_texture.h
@@ -24,23 +24,23 @@ namespace tint::ast {
/// The texel format in the storage texture
enum class TexelFormat {
- kNone = -1,
- kRgba8Unorm,
- kRgba8Snorm,
- kRgba8Uint,
- kRgba8Sint,
- kRgba16Uint,
- kRgba16Sint,
- kRgba16Float,
- kR32Uint,
- kR32Sint,
- kR32Float,
- kRg32Uint,
- kRg32Sint,
- kRg32Float,
- kRgba32Uint,
- kRgba32Sint,
- kRgba32Float,
+ kNone = -1,
+ kRgba8Unorm,
+ kRgba8Snorm,
+ kRgba8Uint,
+ kRgba8Sint,
+ kRgba16Uint,
+ kRgba16Sint,
+ kRgba16Float,
+ kR32Uint,
+ kR32Sint,
+ kR32Float,
+ kRg32Uint,
+ kRg32Sint,
+ kRg32Float,
+ kRgba32Uint,
+ kRgba32Sint,
+ kRgba32Float,
};
/// @param out the std::ostream to write to
@@ -50,48 +50,48 @@ std::ostream& operator<<(std::ostream& out, TexelFormat format);
/// A storage texture type.
class StorageTexture final : public Castable<StorageTexture, Texture> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- /// @param format the image format of the texture
- /// @param subtype the storage subtype. Use SubtypeFor() to calculate this.
- /// @param access_control the access control for the texture.
- StorageTexture(ProgramID pid,
- const Source& src,
- TextureDimension dim,
- TexelFormat format,
- const Type* subtype,
- Access access_control);
-
- /// Move constructor
- StorageTexture(StorageTexture&&);
- ~StorageTexture() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const StorageTexture* Clone(CloneContext* ctx) const override;
-
- /// @param format the storage texture image format
- /// @param builder the ProgramBuilder used to build the returned type
- /// @returns the storage texture subtype for the given TexelFormat
- static Type* SubtypeFor(TexelFormat format, ProgramBuilder& builder);
-
- /// The image format
- const TexelFormat format;
-
- /// The storage subtype
- const Type* const type;
-
- /// The access control
- const Access access;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ /// @param format the image format of the texture
+ /// @param subtype the storage subtype. Use SubtypeFor() to calculate this.
+ /// @param access_control the access control for the texture.
+ StorageTexture(ProgramID pid,
+ const Source& src,
+ TextureDimension dim,
+ TexelFormat format,
+ const Type* subtype,
+ Access access_control);
+
+ /// Move constructor
+ StorageTexture(StorageTexture&&);
+ ~StorageTexture() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const StorageTexture* Clone(CloneContext* ctx) const override;
+
+ /// @param format the storage texture image format
+ /// @param builder the ProgramBuilder used to build the returned type
+ /// @returns the storage texture subtype for the given TexelFormat
+ static Type* SubtypeFor(TexelFormat format, ProgramBuilder& builder);
+
+ /// The image format
+ const TexelFormat format;
+
+ /// The storage subtype
+ const Type* const type;
+
+ /// The access control
+ const Access access;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/storage_texture_test.cc b/chromium/third_party/dawn/src/tint/ast/storage_texture_test.cc
index 77447a8488c..8f1b221dd95 100644
--- a/chromium/third_party/dawn/src/tint/ast/storage_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/storage_texture_test.cc
@@ -22,71 +22,63 @@ namespace {
using AstStorageTextureTest = TestHelper;
TEST_F(AstStorageTextureTest, IsTexture) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
- Texture* ty =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Float, subtype, Access::kRead);
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_TRUE(ty->Is<StorageTexture>());
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
+ Texture* ty = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Float,
+ subtype, Access::kRead);
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_TRUE(ty->Is<StorageTexture>());
}
TEST_F(AstStorageTextureTest, Dim) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
- auto* s =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Float, subtype, Access::kRead);
- EXPECT_EQ(s->dim, TextureDimension::k2dArray);
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
+ auto* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Float, subtype,
+ Access::kRead);
+ EXPECT_EQ(s->dim, TextureDimension::k2dArray);
}
TEST_F(AstStorageTextureTest, Format) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
- auto* s =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Float, subtype, Access::kRead);
- EXPECT_EQ(s->format, TexelFormat::kRgba32Float);
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
+ auto* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Float, subtype,
+ Access::kRead);
+ EXPECT_EQ(s->format, TexelFormat::kRgba32Float);
}
TEST_F(AstStorageTextureTest, FriendlyName) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
- auto* s =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Float, subtype, Access::kRead);
- EXPECT_EQ(s->FriendlyName(Symbols()),
- "texture_storage_2d_array<rgba32float, read>");
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
+ auto* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Float, subtype,
+ Access::kRead);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "texture_storage_2d_array<rgba32float, read>");
}
TEST_F(AstStorageTextureTest, F32) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
- Type* s =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Float, subtype, Access::kRead);
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Float, *this);
+ Type* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Float, subtype,
+ Access::kRead);
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type->Is<F32>());
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type->Is<F32>());
}
TEST_F(AstStorageTextureTest, U32) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRg32Uint, *this);
- Type* s =
- create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRg32Uint,
- subtype, Access::kRead);
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRg32Uint, *this);
+ Type* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRg32Uint, subtype,
+ Access::kRead);
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type->Is<U32>());
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type->Is<U32>());
}
TEST_F(AstStorageTextureTest, I32) {
- auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Sint, *this);
- Type* s =
- create<StorageTexture>(TextureDimension::k2dArray,
- TexelFormat::kRgba32Sint, subtype, Access::kRead);
+ auto* subtype = StorageTexture::SubtypeFor(TexelFormat::kRgba32Sint, *this);
+ Type* s = create<StorageTexture>(TextureDimension::k2dArray, TexelFormat::kRgba32Sint, subtype,
+ Access::kRead);
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type->Is<I32>());
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type->Is<I32>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/stride_attribute.cc b/chromium/third_party/dawn/src/tint/ast/stride_attribute.cc
index 3eb1f6d6881..14a07333193 100644
--- a/chromium/third_party/dawn/src/tint/ast/stride_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/stride_attribute.cc
@@ -28,13 +28,13 @@ StrideAttribute::StrideAttribute(ProgramID pid, const Source& src, uint32_t s)
StrideAttribute::~StrideAttribute() = default;
std::string StrideAttribute::Name() const {
- return "stride";
+ return "stride";
}
const StrideAttribute* StrideAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<StrideAttribute>(src, stride);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<StrideAttribute>(src, stride);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/stride_attribute.h b/chromium/third_party/dawn/src/tint/ast/stride_attribute.h
index 232bae7823f..4315f217487 100644
--- a/chromium/third_party/dawn/src/tint/ast/stride_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/stride_attribute.h
@@ -25,25 +25,25 @@ namespace tint::ast {
/// A stride attribute used by the SPIR-V reader for strided arrays and
/// matrices.
class StrideAttribute final : public Castable<StrideAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param stride the stride value
- StrideAttribute(ProgramID pid, const Source& src, uint32_t stride);
- ~StrideAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StrideAttribute* Clone(CloneContext* ctx) const override;
-
- /// The stride value
- const uint32_t stride;
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param stride the stride value
+ StrideAttribute(ProgramID pid, const Source& src, uint32_t stride);
+ ~StrideAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StrideAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The stride value
+ const uint32_t stride;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/stride_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/stride_attribute_test.cc
index 1eba1e1b572..61c4fb5dadb 100644
--- a/chromium/third_party/dawn/src/tint/ast/stride_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/stride_attribute_test.cc
@@ -20,17 +20,17 @@ namespace {
using StrideAttributeTest = TestHelper;
TEST_F(StrideAttributeTest, Creation) {
- auto* d = create<StrideAttribute>(2);
- EXPECT_EQ(2u, d->stride);
+ auto* d = create<StrideAttribute>(2);
+ EXPECT_EQ(2u, d->stride);
}
TEST_F(StrideAttributeTest, Source) {
- auto* d = create<StrideAttribute>(
- Source{Source::Range{Source::Location{1, 2}, Source::Location{3, 4}}}, 2);
- EXPECT_EQ(d->source.range.begin.line, 1u);
- EXPECT_EQ(d->source.range.begin.column, 2u);
- EXPECT_EQ(d->source.range.end.line, 3u);
- EXPECT_EQ(d->source.range.end.column, 4u);
+ auto* d = create<StrideAttribute>(
+ Source{Source::Range{Source::Location{1, 2}, Source::Location{3, 4}}}, 2);
+ EXPECT_EQ(d->source.range.begin.line, 1u);
+ EXPECT_EQ(d->source.range.begin.column, 2u);
+ EXPECT_EQ(d->source.range.end.line, 3u);
+ EXPECT_EQ(d->source.range.end.column, 4u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/struct.cc b/chromium/third_party/dawn/src/tint/ast/struct.cc
index 4f82c3ac241..19a30de9264 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct.cc
@@ -22,20 +22,16 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Struct);
namespace tint::ast {
-Struct::Struct(ProgramID pid,
- const Source& src,
- Symbol n,
- StructMemberList m,
- AttributeList attrs)
+Struct::Struct(ProgramID pid, const Source& src, Symbol n, StructMemberList m, AttributeList attrs)
: Base(pid, src, n), members(std::move(m)), attributes(std::move(attrs)) {
- for (auto* mem : members) {
- TINT_ASSERT(AST, mem);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, mem, program_id);
- }
- for (auto* attr : attributes) {
- TINT_ASSERT(AST, attr);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
- }
+ for (auto* mem : members) {
+ TINT_ASSERT(AST, mem);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, mem, program_id);
+ }
+ for (auto* attr : attributes) {
+ TINT_ASSERT(AST, attr);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
+ }
}
Struct::Struct(Struct&&) = default;
@@ -43,12 +39,12 @@ Struct::Struct(Struct&&) = default;
Struct::~Struct() = default;
const Struct* Struct::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto n = ctx->Clone(name);
- auto mem = ctx->Clone(members);
- auto attrs = ctx->Clone(attributes);
- return ctx->dst->create<Struct>(src, n, mem, attrs);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto n = ctx->Clone(name);
+ auto mem = ctx->Clone(members);
+ auto attrs = ctx->Clone(attributes);
+ return ctx->dst->create<Struct>(src, n, mem, attrs);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct.h b/chromium/third_party/dawn/src/tint/ast/struct.h
index 7e44da40da2..5c28b4cd3a4 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct.h
+++ b/chromium/third_party/dawn/src/tint/ast/struct.h
@@ -26,34 +26,34 @@ namespace tint::ast {
/// A struct statement.
class Struct final : public Castable<Struct, TypeDecl> {
- public:
- /// Create a new struct statement
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node for the import statement
- /// @param name The name of the structure
- /// @param members The struct members
- /// @param attributes The struct attributes
- Struct(ProgramID pid,
- const Source& src,
- Symbol name,
- StructMemberList members,
- AttributeList attributes);
- /// Move constructor
- Struct(Struct&&);
-
- ~Struct() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const Struct* Clone(CloneContext* ctx) const override;
-
- /// The members
- const StructMemberList members;
-
- /// The struct attributes
- const AttributeList attributes;
+ public:
+ /// Create a new struct statement
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node for the import statement
+ /// @param name The name of the structure
+ /// @param members The struct members
+ /// @param attributes The struct attributes
+ Struct(ProgramID pid,
+ const Source& src,
+ Symbol name,
+ StructMemberList members,
+ AttributeList attributes);
+ /// Move constructor
+ Struct(Struct&&);
+
+ ~Struct() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const Struct* Clone(CloneContext* ctx) const override;
+
+ /// The members
+ const StructMemberList members;
+
+ /// The struct attributes
+ const AttributeList attributes;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member.cc b/chromium/third_party/dawn/src/tint/ast/struct_member.cc
index b1cff3b0edd..611348420d4 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member.cc
@@ -26,13 +26,13 @@ StructMember::StructMember(ProgramID pid,
const ast::Type* ty,
AttributeList attrs)
: Base(pid, src), symbol(sym), type(ty), attributes(std::move(attrs)) {
- TINT_ASSERT(AST, type);
- TINT_ASSERT(AST, symbol.IsValid());
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
- for (auto* attr : attributes) {
- TINT_ASSERT(AST, attr);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
- }
+ TINT_ASSERT(AST, type);
+ TINT_ASSERT(AST, symbol.IsValid());
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
+ for (auto* attr : attributes) {
+ TINT_ASSERT(AST, attr);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, attr, program_id);
+ }
}
StructMember::StructMember(StructMember&&) = default;
@@ -40,12 +40,12 @@ StructMember::StructMember(StructMember&&) = default;
StructMember::~StructMember() = default;
const StructMember* StructMember::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto sym = ctx->Clone(symbol);
- auto* ty = ctx->Clone(type);
- auto attrs = ctx->Clone(attributes);
- return ctx->dst->create<StructMember>(src, sym, ty, attrs);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto sym = ctx->Clone(symbol);
+ auto* ty = ctx->Clone(type);
+ auto attrs = ctx->Clone(attributes);
+ return ctx->dst->create<StructMember>(src, sym, ty, attrs);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member.h b/chromium/third_party/dawn/src/tint/ast/struct_member.h
index 7d21c9d323b..022a34cafe4 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member.h
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member.h
@@ -29,37 +29,37 @@ namespace tint::ast {
/// A struct member statement.
class StructMember final : public Castable<StructMember, Node> {
- public:
- /// Create a new struct member statement
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node for the struct member statement
- /// @param sym The struct member symbol
- /// @param type The struct member type
- /// @param attributes The struct member attributes
- StructMember(ProgramID pid,
- const Source& src,
- const Symbol& sym,
- const ast::Type* type,
- AttributeList attributes);
- /// Move constructor
- StructMember(StructMember&&);
+ public:
+ /// Create a new struct member statement
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node for the struct member statement
+ /// @param sym The struct member symbol
+ /// @param type The struct member type
+ /// @param attributes The struct member attributes
+ StructMember(ProgramID pid,
+ const Source& src,
+ const Symbol& sym,
+ const ast::Type* type,
+ AttributeList attributes);
+ /// Move constructor
+ StructMember(StructMember&&);
- ~StructMember() override;
+ ~StructMember() override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StructMember* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StructMember* Clone(CloneContext* ctx) const override;
- /// The symbol
- const Symbol symbol;
+ /// The symbol
+ const Symbol symbol;
- /// The type
- const ast::Type* const type;
+ /// The type
+ const ast::Type* const type;
- /// The attributes
- const AttributeList attributes;
+ /// The attributes
+ const AttributeList attributes;
};
/// A list of struct members
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.cc
index 8736dd102df..f586e7eea7d 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.cc
@@ -23,22 +23,19 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::StructMemberAlignAttribute);
namespace tint::ast {
-StructMemberAlignAttribute::StructMemberAlignAttribute(ProgramID pid,
- const Source& src,
- uint32_t a)
+StructMemberAlignAttribute::StructMemberAlignAttribute(ProgramID pid, const Source& src, uint32_t a)
: Base(pid, src), align(a) {}
StructMemberAlignAttribute::~StructMemberAlignAttribute() = default;
std::string StructMemberAlignAttribute::Name() const {
- return "align";
+ return "align";
}
-const StructMemberAlignAttribute* StructMemberAlignAttribute::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<StructMemberAlignAttribute>(src, align);
+const StructMemberAlignAttribute* StructMemberAlignAttribute::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<StructMemberAlignAttribute>(src, align);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.h b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.h
index d8e629ea18c..10a6507d47f 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute.h
@@ -23,27 +23,26 @@
namespace tint::ast {
/// A struct member align attribute
-class StructMemberAlignAttribute final
- : public Castable<StructMemberAlignAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param align the align value
- StructMemberAlignAttribute(ProgramID pid, const Source& src, uint32_t align);
- ~StructMemberAlignAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StructMemberAlignAttribute* Clone(CloneContext* ctx) const override;
-
- /// The align value
- const uint32_t align;
+class StructMemberAlignAttribute final : public Castable<StructMemberAlignAttribute, Attribute> {
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param align the align value
+ StructMemberAlignAttribute(ProgramID pid, const Source& src, uint32_t align);
+ ~StructMemberAlignAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StructMemberAlignAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The align value
+ const uint32_t align;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute_test.cc
index 47b57aa403d..5b4ff487a30 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_align_attribute_test.cc
@@ -22,8 +22,8 @@ namespace {
using StructMemberAlignAttributeTest = TestHelper;
TEST_F(StructMemberAlignAttributeTest, Creation) {
- auto* d = create<StructMemberAlignAttribute>(2);
- EXPECT_EQ(2u, d->align);
+ auto* d = create<StructMemberAlignAttribute>(2);
+ EXPECT_EQ(2u, d->align);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.cc
index 5d9ec16e20f..0a33127bf99 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.cc
@@ -30,14 +30,13 @@ StructMemberOffsetAttribute::StructMemberOffsetAttribute(ProgramID pid,
StructMemberOffsetAttribute::~StructMemberOffsetAttribute() = default;
std::string StructMemberOffsetAttribute::Name() const {
- return "offset";
+ return "offset";
}
-const StructMemberOffsetAttribute* StructMemberOffsetAttribute::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<StructMemberOffsetAttribute>(src, offset);
+const StructMemberOffsetAttribute* StructMemberOffsetAttribute::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<StructMemberOffsetAttribute>(src, offset);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.h b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.h
index b1a25e08fd7..92cc68ec1da 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute.h
@@ -31,29 +31,26 @@ namespace tint::ast {
/// trivial for the Resolver to handle `@offset(n)` or `@size(n)` /
/// `@align(n)` attributes, so this is what we do, keeping all the layout
/// logic in one place.
-class StructMemberOffsetAttribute final
- : public Castable<StructMemberOffsetAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param offset the offset value
- StructMemberOffsetAttribute(ProgramID pid,
- const Source& src,
- uint32_t offset);
- ~StructMemberOffsetAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StructMemberOffsetAttribute* Clone(CloneContext* ctx) const override;
-
- /// The offset value
- const uint32_t offset;
+class StructMemberOffsetAttribute final : public Castable<StructMemberOffsetAttribute, Attribute> {
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param offset the offset value
+ StructMemberOffsetAttribute(ProgramID pid, const Source& src, uint32_t offset);
+ ~StructMemberOffsetAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StructMemberOffsetAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The offset value
+ const uint32_t offset;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute_test.cc
index 80399262342..3c0eb41c02f 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_offset_attribute_test.cc
@@ -20,8 +20,8 @@ namespace {
using StructMemberOffsetAttributeTest = TestHelper;
TEST_F(StructMemberOffsetAttributeTest, Creation) {
- auto* d = create<StructMemberOffsetAttribute>(2);
- EXPECT_EQ(2u, d->offset);
+ auto* d = create<StructMemberOffsetAttribute>(2);
+ EXPECT_EQ(2u, d->offset);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.cc
index 5e01c7d6cc3..a7f291b63a9 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.cc
@@ -23,22 +23,19 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::StructMemberSizeAttribute);
namespace tint::ast {
-StructMemberSizeAttribute::StructMemberSizeAttribute(ProgramID pid,
- const Source& src,
- uint32_t sz)
+StructMemberSizeAttribute::StructMemberSizeAttribute(ProgramID pid, const Source& src, uint32_t sz)
: Base(pid, src), size(sz) {}
StructMemberSizeAttribute::~StructMemberSizeAttribute() = default;
std::string StructMemberSizeAttribute::Name() const {
- return "size";
+ return "size";
}
-const StructMemberSizeAttribute* StructMemberSizeAttribute::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<StructMemberSizeAttribute>(src, size);
+const StructMemberSizeAttribute* StructMemberSizeAttribute::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<StructMemberSizeAttribute>(src, size);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.h b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.h
index 967e602b2ca..0c4ddd6c056 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute.h
@@ -23,27 +23,26 @@
namespace tint::ast {
/// A struct member size attribute
-class StructMemberSizeAttribute final
- : public Castable<StructMemberSizeAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param size the size value
- StructMemberSizeAttribute(ProgramID pid, const Source& src, uint32_t size);
- ~StructMemberSizeAttribute() override;
-
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const StructMemberSizeAttribute* Clone(CloneContext* ctx) const override;
-
- /// The size value
- const uint32_t size;
+class StructMemberSizeAttribute final : public Castable<StructMemberSizeAttribute, Attribute> {
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param size the size value
+ StructMemberSizeAttribute(ProgramID pid, const Source& src, uint32_t size);
+ ~StructMemberSizeAttribute() override;
+
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const StructMemberSizeAttribute* Clone(CloneContext* ctx) const override;
+
+ /// The size value
+ const uint32_t size;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute_test.cc
index 8998d1d861d..a9d4637510c 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_size_attribute_test.cc
@@ -22,8 +22,8 @@ namespace {
using StructMemberSizeAttributeTest = TestHelper;
TEST_F(StructMemberSizeAttributeTest, Creation) {
- auto* d = create<StructMemberSizeAttribute>(2);
- EXPECT_EQ(2u, d->size);
+ auto* d = create<StructMemberSizeAttribute>(2);
+ EXPECT_EQ(2u, d->size);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_member_test.cc b/chromium/third_party/dawn/src/tint/ast/struct_member_test.cc
index 9fd4d7b129c..cec82ae03b0 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_member_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_member_test.cc
@@ -21,75 +21,74 @@ namespace {
using StructMemberTest = TestHelper;
TEST_F(StructMemberTest, Creation) {
- auto* st = Member("a", ty.i32(), {MemberSize(4)});
- EXPECT_EQ(st->symbol, Symbol(1, ID()));
- EXPECT_TRUE(st->type->Is<ast::I32>());
- EXPECT_EQ(st->attributes.size(), 1u);
- EXPECT_TRUE(st->attributes[0]->Is<StructMemberSizeAttribute>());
- EXPECT_EQ(st->source.range.begin.line, 0u);
- EXPECT_EQ(st->source.range.begin.column, 0u);
- EXPECT_EQ(st->source.range.end.line, 0u);
- EXPECT_EQ(st->source.range.end.column, 0u);
+ auto* st = Member("a", ty.i32(), {MemberSize(4)});
+ EXPECT_EQ(st->symbol, Symbol(1, ID()));
+ EXPECT_TRUE(st->type->Is<ast::I32>());
+ EXPECT_EQ(st->attributes.size(), 1u);
+ EXPECT_TRUE(st->attributes[0]->Is<StructMemberSizeAttribute>());
+ EXPECT_EQ(st->source.range.begin.line, 0u);
+ EXPECT_EQ(st->source.range.begin.column, 0u);
+ EXPECT_EQ(st->source.range.end.line, 0u);
+ EXPECT_EQ(st->source.range.end.column, 0u);
}
TEST_F(StructMemberTest, CreationWithSource) {
- auto* st = Member(
- Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 8}}},
- "a", ty.i32());
- EXPECT_EQ(st->symbol, Symbol(1, ID()));
- EXPECT_TRUE(st->type->Is<ast::I32>());
- EXPECT_EQ(st->attributes.size(), 0u);
- EXPECT_EQ(st->source.range.begin.line, 27u);
- EXPECT_EQ(st->source.range.begin.column, 4u);
- EXPECT_EQ(st->source.range.end.line, 27u);
- EXPECT_EQ(st->source.range.end.column, 8u);
+ auto* st = Member(Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 8}}}, "a",
+ ty.i32());
+ EXPECT_EQ(st->symbol, Symbol(1, ID()));
+ EXPECT_TRUE(st->type->Is<ast::I32>());
+ EXPECT_EQ(st->attributes.size(), 0u);
+ EXPECT_EQ(st->source.range.begin.line, 27u);
+ EXPECT_EQ(st->source.range.begin.column, 4u);
+ EXPECT_EQ(st->source.range.end.line, 27u);
+ EXPECT_EQ(st->source.range.end.column, 8u);
}
TEST_F(StructMemberTest, Assert_Empty_Symbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Member("", b.ty.i32());
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Member("", b.ty.i32());
+ },
+ "internal compiler error");
}
TEST_F(StructMemberTest, Assert_Null_Type) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Member("a", nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Member("a", nullptr);
+ },
+ "internal compiler error");
}
TEST_F(StructMemberTest, Assert_Null_Attribute) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Member("a", b.ty.i32(), {b.MemberSize(4), nullptr});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Member("a", b.ty.i32(), {b.MemberSize(4), nullptr});
+ },
+ "internal compiler error");
}
TEST_F(StructMemberTest, Assert_DifferentProgramID_Symbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Member(b2.Sym("a"), b1.ty.i32(), {b1.MemberSize(4)});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Member(b2.Sym("a"), b1.ty.i32(), {b1.MemberSize(4)});
+ },
+ "internal compiler error");
}
TEST_F(StructMemberTest, Assert_DifferentProgramID_Attribute) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Member("a", b1.ty.i32(), {b2.MemberSize(4)});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Member("a", b1.ty.i32(), {b2.MemberSize(4)});
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/struct_test.cc b/chromium/third_party/dawn/src/tint/ast/struct_test.cc
index 2c629a2d25f..895d501e5db 100644
--- a/chromium/third_party/dawn/src/tint/ast/struct_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/struct_test.cc
@@ -32,99 +32,92 @@ namespace tint::ast {
namespace {
using AstStructTest = TestHelper;
-using SpirvBlockAttribute =
- transform::AddSpirvBlockAttribute::SpirvBlockAttribute;
+using SpirvBlockAttribute = transform::AddSpirvBlockAttribute::SpirvBlockAttribute;
TEST_F(AstStructTest, Creation) {
- auto name = Sym("s");
- auto* s = create<Struct>(name, StructMemberList{Member("a", ty.i32())},
- AttributeList{});
- EXPECT_EQ(s->name, name);
- EXPECT_EQ(s->members.size(), 1u);
- EXPECT_TRUE(s->attributes.empty());
- EXPECT_EQ(s->source.range.begin.line, 0u);
- EXPECT_EQ(s->source.range.begin.column, 0u);
- EXPECT_EQ(s->source.range.end.line, 0u);
- EXPECT_EQ(s->source.range.end.column, 0u);
+ auto name = Sym("s");
+ auto* s = create<Struct>(name, StructMemberList{Member("a", ty.i32())}, AttributeList{});
+ EXPECT_EQ(s->name, name);
+ EXPECT_EQ(s->members.size(), 1u);
+ EXPECT_TRUE(s->attributes.empty());
+ EXPECT_EQ(s->source.range.begin.line, 0u);
+ EXPECT_EQ(s->source.range.begin.column, 0u);
+ EXPECT_EQ(s->source.range.end.line, 0u);
+ EXPECT_EQ(s->source.range.end.column, 0u);
}
TEST_F(AstStructTest, Creation_WithAttributes) {
- auto name = Sym("s");
- AttributeList attrs;
- attrs.push_back(ASTNodes().Create<SpirvBlockAttribute>(ID()));
+ auto name = Sym("s");
+ AttributeList attrs;
+ attrs.push_back(ASTNodes().Create<SpirvBlockAttribute>(ID()));
- auto* s =
- create<Struct>(name, StructMemberList{Member("a", ty.i32())}, attrs);
- EXPECT_EQ(s->name, name);
- EXPECT_EQ(s->members.size(), 1u);
- ASSERT_EQ(s->attributes.size(), 1u);
- EXPECT_TRUE(s->attributes[0]->Is<SpirvBlockAttribute>());
- EXPECT_EQ(s->source.range.begin.line, 0u);
- EXPECT_EQ(s->source.range.begin.column, 0u);
- EXPECT_EQ(s->source.range.end.line, 0u);
- EXPECT_EQ(s->source.range.end.column, 0u);
+ auto* s = create<Struct>(name, StructMemberList{Member("a", ty.i32())}, attrs);
+ EXPECT_EQ(s->name, name);
+ EXPECT_EQ(s->members.size(), 1u);
+ ASSERT_EQ(s->attributes.size(), 1u);
+ EXPECT_TRUE(s->attributes[0]->Is<SpirvBlockAttribute>());
+ EXPECT_EQ(s->source.range.begin.line, 0u);
+ EXPECT_EQ(s->source.range.begin.column, 0u);
+ EXPECT_EQ(s->source.range.end.line, 0u);
+ EXPECT_EQ(s->source.range.end.column, 0u);
}
TEST_F(AstStructTest, CreationWithSourceAndAttributes) {
- auto name = Sym("s");
- auto* s = create<Struct>(
- Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 8}}},
- name, StructMemberList{Member("a", ty.i32())},
- AttributeList{ASTNodes().Create<SpirvBlockAttribute>(ID())});
- EXPECT_EQ(s->name, name);
- EXPECT_EQ(s->members.size(), 1u);
- ASSERT_EQ(s->attributes.size(), 1u);
- EXPECT_TRUE(s->attributes[0]->Is<SpirvBlockAttribute>());
- EXPECT_EQ(s->source.range.begin.line, 27u);
- EXPECT_EQ(s->source.range.begin.column, 4u);
- EXPECT_EQ(s->source.range.end.line, 27u);
- EXPECT_EQ(s->source.range.end.column, 8u);
+ auto name = Sym("s");
+ auto* s =
+ create<Struct>(Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 8}}},
+ name, StructMemberList{Member("a", ty.i32())},
+ AttributeList{ASTNodes().Create<SpirvBlockAttribute>(ID())});
+ EXPECT_EQ(s->name, name);
+ EXPECT_EQ(s->members.size(), 1u);
+ ASSERT_EQ(s->attributes.size(), 1u);
+ EXPECT_TRUE(s->attributes[0]->Is<SpirvBlockAttribute>());
+ EXPECT_EQ(s->source.range.begin.line, 27u);
+ EXPECT_EQ(s->source.range.begin.column, 4u);
+ EXPECT_EQ(s->source.range.end.line, 27u);
+ EXPECT_EQ(s->source.range.end.column, 8u);
}
TEST_F(AstStructTest, Assert_Null_StructMember) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<Struct>(b.Sym("S"),
- StructMemberList{b.Member("a", b.ty.i32()), nullptr},
- AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<Struct>(b.Sym("S"), StructMemberList{b.Member("a", b.ty.i32()), nullptr},
+ AttributeList{});
+ },
+ "internal compiler error");
}
TEST_F(AstStructTest, Assert_Null_Attribute) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<Struct>(b.Sym("S"),
- StructMemberList{b.Member("a", b.ty.i32())},
- AttributeList{nullptr});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<Struct>(b.Sym("S"), StructMemberList{b.Member("a", b.ty.i32())},
+ AttributeList{nullptr});
+ },
+ "internal compiler error");
}
TEST_F(AstStructTest, Assert_DifferentProgramID_StructMember) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<Struct>(b1.Sym("S"),
- StructMemberList{b2.Member("a", b2.ty.i32())},
- AttributeList{});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<Struct>(b1.Sym("S"), StructMemberList{b2.Member("a", b2.ty.i32())},
+ AttributeList{});
+ },
+ "internal compiler error");
}
TEST_F(AstStructTest, Assert_DifferentProgramID_Attribute) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<Struct>(
- b1.Sym("S"), StructMemberList{b1.Member("a", b1.ty.i32())},
- AttributeList{b2.ASTNodes().Create<SpirvBlockAttribute>(b2.ID())});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<Struct>(b1.Sym("S"), StructMemberList{b1.Member("a", b1.ty.i32())},
+ AttributeList{b2.ASTNodes().Create<SpirvBlockAttribute>(b2.ID())});
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/switch_statement.cc b/chromium/third_party/dawn/src/tint/ast/switch_statement.cc
index 13c455a2ae5..08095a109c3 100644
--- a/chromium/third_party/dawn/src/tint/ast/switch_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/switch_statement.cc
@@ -25,12 +25,12 @@ SwitchStatement::SwitchStatement(ProgramID pid,
const Expression* cond,
CaseStatementList b)
: Base(pid, src), condition(cond), body(b) {
- TINT_ASSERT(AST, condition);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
- for (auto* stmt : body) {
- TINT_ASSERT(AST, stmt);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, stmt, program_id);
- }
+ TINT_ASSERT(AST, condition);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, condition, program_id);
+ for (auto* stmt : body) {
+ TINT_ASSERT(AST, stmt);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, stmt, program_id);
+ }
}
SwitchStatement::SwitchStatement(SwitchStatement&&) = default;
@@ -38,11 +38,11 @@ SwitchStatement::SwitchStatement(SwitchStatement&&) = default;
SwitchStatement::~SwitchStatement() = default;
const SwitchStatement* SwitchStatement::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* cond = ctx->Clone(condition);
- auto b = ctx->Clone(body);
- return ctx->dst->create<SwitchStatement>(src, cond, b);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* cond = ctx->Clone(condition);
+ auto b = ctx->Clone(body);
+ return ctx->dst->create<SwitchStatement>(src, cond, b);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/switch_statement.h b/chromium/third_party/dawn/src/tint/ast/switch_statement.h
index 34394c83d18..5ac13b7b3ed 100644
--- a/chromium/third_party/dawn/src/tint/ast/switch_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/switch_statement.h
@@ -22,35 +22,35 @@ namespace tint::ast {
/// A switch statement
class SwitchStatement final : public Castable<SwitchStatement, Statement> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param condition the switch condition
- /// @param body the switch body
- SwitchStatement(ProgramID pid,
- const Source& src,
- const Expression* condition,
- CaseStatementList body);
- /// Move constructor
- SwitchStatement(SwitchStatement&&);
- ~SwitchStatement() override;
-
- /// @returns true if this is a default statement
- bool IsDefault() const { return condition == nullptr; }
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const SwitchStatement* Clone(CloneContext* ctx) const override;
-
- /// The switch condition or nullptr if none set
- const Expression* const condition;
-
- /// The Switch body
- const CaseStatementList body;
- SwitchStatement(const SwitchStatement&) = delete;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param condition the switch condition
+ /// @param body the switch body
+ SwitchStatement(ProgramID pid,
+ const Source& src,
+ const Expression* condition,
+ CaseStatementList body);
+ /// Move constructor
+ SwitchStatement(SwitchStatement&&);
+ ~SwitchStatement() override;
+
+ /// @returns true if this is a default statement
+ bool IsDefault() const { return condition == nullptr; }
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const SwitchStatement* Clone(CloneContext* ctx) const override;
+
+ /// The switch condition or nullptr if none set
+ const Expression* const condition;
+
+ /// The Switch body
+ const CaseStatementList body;
+ SwitchStatement(const SwitchStatement&) = delete;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/switch_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/switch_statement_test.cc
index 3b927ce39d6..19d1cfbaa9d 100644
--- a/chromium/third_party/dawn/src/tint/ast/switch_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/switch_statement_test.cc
@@ -17,99 +17,100 @@
#include "gtest/gtest-spi.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using SwitchStatementTest = TestHelper;
TEST_F(SwitchStatementTest, Creation) {
- CaseSelectorList lit;
- lit.push_back(create<SintLiteralExpression>(1));
-
- auto* ident = Expr("ident");
- CaseStatementList body;
- auto* case_stmt = create<CaseStatement>(lit, Block());
- body.push_back(case_stmt);
-
- auto* stmt = create<SwitchStatement>(ident, body);
- EXPECT_EQ(stmt->condition, ident);
- ASSERT_EQ(stmt->body.size(), 1u);
- EXPECT_EQ(stmt->body[0], case_stmt);
+ CaseSelectorList lit;
+ lit.push_back(Expr(1_u));
+
+ auto* ident = Expr("ident");
+ CaseStatementList body;
+ auto* case_stmt = create<CaseStatement>(lit, Block());
+ body.push_back(case_stmt);
+
+ auto* stmt = create<SwitchStatement>(ident, body);
+ EXPECT_EQ(stmt->condition, ident);
+ ASSERT_EQ(stmt->body.size(), 1u);
+ EXPECT_EQ(stmt->body[0], case_stmt);
}
TEST_F(SwitchStatementTest, Creation_WithSource) {
- auto* ident = Expr("ident");
+ auto* ident = Expr("ident");
- auto* stmt = create<SwitchStatement>(Source{Source::Location{20, 2}}, ident,
- CaseStatementList());
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt =
+ create<SwitchStatement>(Source{Source::Location{20, 2}}, ident, CaseStatementList());
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(SwitchStatementTest, IsSwitch) {
- CaseSelectorList lit;
- lit.push_back(create<SintLiteralExpression>(2));
+ CaseSelectorList lit;
+ lit.push_back(Expr(2_i));
- auto* ident = Expr("ident");
- CaseStatementList body;
- body.push_back(create<CaseStatement>(lit, Block()));
+ auto* ident = Expr("ident");
+ CaseStatementList body;
+ body.push_back(create<CaseStatement>(lit, Block()));
- auto* stmt = create<SwitchStatement>(ident, body);
- EXPECT_TRUE(stmt->Is<SwitchStatement>());
+ auto* stmt = create<SwitchStatement>(ident, body);
+ EXPECT_TRUE(stmt->Is<SwitchStatement>());
}
TEST_F(SwitchStatementTest, Assert_Null_Condition) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- CaseStatementList cases;
- cases.push_back(
- b.create<CaseStatement>(CaseSelectorList{b.Expr(1)}, b.Block()));
- b.create<SwitchStatement>(nullptr, cases);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ CaseStatementList cases;
+ cases.push_back(b.create<CaseStatement>(CaseSelectorList{b.Expr(1_i)}, b.Block()));
+ b.create<SwitchStatement>(nullptr, cases);
+ },
+ "internal compiler error");
}
TEST_F(SwitchStatementTest, Assert_Null_CaseStatement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<SwitchStatement>(b.Expr(true), CaseStatementList{nullptr});
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<SwitchStatement>(b.Expr(true), CaseStatementList{nullptr});
+ },
+ "internal compiler error");
}
TEST_F(SwitchStatementTest, Assert_DifferentProgramID_Condition) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<SwitchStatement>(b2.Expr(true), CaseStatementList{
- b1.create<CaseStatement>(
- CaseSelectorList{
- b1.Expr(1),
- },
- b1.Block()),
- });
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<SwitchStatement>(b2.Expr(true), CaseStatementList{
+ b1.create<CaseStatement>(
+ CaseSelectorList{
+ b1.Expr(1_i),
+ },
+ b1.Block()),
+ });
+ },
+ "internal compiler error");
}
TEST_F(SwitchStatementTest, Assert_DifferentProgramID_CaseStatement) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<SwitchStatement>(b1.Expr(true), CaseStatementList{
- b2.create<CaseStatement>(
- CaseSelectorList{
- b2.Expr(1),
- },
- b2.Block()),
- });
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<SwitchStatement>(b1.Expr(true), CaseStatementList{
+ b2.create<CaseStatement>(
+ CaseSelectorList{
+ b2.Expr(1_i),
+ },
+ b2.Block()),
+ });
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/texture.cc b/chromium/third_party/dawn/src/tint/ast/texture.cc
index d88db6e4385..27eb094793d 100644
--- a/chromium/third_party/dawn/src/tint/ast/texture.cc
+++ b/chromium/third_party/dawn/src/tint/ast/texture.cc
@@ -19,66 +19,65 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Texture);
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, TextureDimension dim) {
- switch (dim) {
- case TextureDimension::kNone:
- out << "None";
- break;
- case TextureDimension::k1d:
- out << "1d";
- break;
- case TextureDimension::k2d:
- out << "2d";
- break;
- case TextureDimension::k2dArray:
- out << "2d_array";
- break;
- case TextureDimension::k3d:
- out << "3d";
- break;
- case TextureDimension::kCube:
- out << "cube";
- break;
- case TextureDimension::kCubeArray:
- out << "cube_array";
- break;
- }
- return out;
+ switch (dim) {
+ case TextureDimension::kNone:
+ out << "None";
+ break;
+ case TextureDimension::k1d:
+ out << "1d";
+ break;
+ case TextureDimension::k2d:
+ out << "2d";
+ break;
+ case TextureDimension::k2dArray:
+ out << "2d_array";
+ break;
+ case TextureDimension::k3d:
+ out << "3d";
+ break;
+ case TextureDimension::kCube:
+ out << "cube";
+ break;
+ case TextureDimension::kCubeArray:
+ out << "cube_array";
+ break;
+ }
+ return out;
}
bool IsTextureArray(TextureDimension dim) {
- switch (dim) {
- case TextureDimension::k2dArray:
- case TextureDimension::kCubeArray:
- return true;
- case TextureDimension::k2d:
- case TextureDimension::kNone:
- case TextureDimension::k1d:
- case TextureDimension::k3d:
- case TextureDimension::kCube:
- return false;
- }
- return false;
+ switch (dim) {
+ case TextureDimension::k2dArray:
+ case TextureDimension::kCubeArray:
+ return true;
+ case TextureDimension::k2d:
+ case TextureDimension::kNone:
+ case TextureDimension::k1d:
+ case TextureDimension::k3d:
+ case TextureDimension::kCube:
+ return false;
+ }
+ return false;
}
int NumCoordinateAxes(TextureDimension dim) {
- switch (dim) {
- case TextureDimension::kNone:
- return 0;
- case TextureDimension::k1d:
- return 1;
- case TextureDimension::k2d:
- case TextureDimension::k2dArray:
- return 2;
- case TextureDimension::k3d:
- case TextureDimension::kCube:
- case TextureDimension::kCubeArray:
- return 3;
- }
- return 0;
+ switch (dim) {
+ case TextureDimension::kNone:
+ return 0;
+ case TextureDimension::k1d:
+ return 1;
+ case TextureDimension::k2d:
+ case TextureDimension::k2dArray:
+ return 2;
+ case TextureDimension::k3d:
+ case TextureDimension::kCube:
+ case TextureDimension::kCubeArray:
+ return 3;
+ }
+ return 0;
}
-Texture::Texture(ProgramID pid, const Source& src, TextureDimension d)
- : Base(pid, src), dim(d) {}
+Texture::Texture(ProgramID pid, const Source& src, TextureDimension d) : Base(pid, src), dim(d) {}
Texture::Texture(Texture&&) = default;
diff --git a/chromium/third_party/dawn/src/tint/ast/texture.h b/chromium/third_party/dawn/src/tint/ast/texture.h
index 716cdf07a94..9a4199bcd78 100644
--- a/chromium/third_party/dawn/src/tint/ast/texture.h
+++ b/chromium/third_party/dawn/src/tint/ast/texture.h
@@ -21,20 +21,20 @@ namespace tint::ast {
/// The dimensionality of the texture
enum class TextureDimension {
- /// Invalid texture
- kNone = -1,
- /// 1 dimensional texture
- k1d,
- /// 2 dimensional texture
- k2d,
- /// 2 dimensional array texture
- k2dArray,
- /// 3 dimensional texture
- k3d,
- /// cube texture
- kCube,
- /// cube array texture
- kCubeArray,
+ /// Invalid texture
+ kNone = -1,
+ /// 1 dimensional texture
+ k1d,
+ /// 2 dimensional texture
+ k2d,
+ /// 2 dimensional array texture
+ k2dArray,
+ /// 3 dimensional texture
+ k3d,
+ /// cube texture
+ kCube,
+ /// cube array texture
+ kCubeArray,
};
/// @param out the std::ostream to write to
@@ -62,18 +62,18 @@ int NumCoordinateAxes(TextureDimension dim);
/// A texture type.
class Texture : public Castable<Texture, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param dim the dimensionality of the texture
- Texture(ProgramID pid, const Source& src, TextureDimension dim);
- /// Move constructor
- Texture(Texture&&);
- ~Texture() override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param dim the dimensionality of the texture
+ Texture(ProgramID pid, const Source& src, TextureDimension dim);
+ /// Move constructor
+ Texture(Texture&&);
+ ~Texture() override;
- /// The texture dimension
- const TextureDimension dim;
+ /// The texture dimension
+ const TextureDimension dim;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/texture_test.cc b/chromium/third_party/dawn/src/tint/ast/texture_test.cc
index 84298b8981e..7b7c0b01eb3 100644
--- a/chromium/third_party/dawn/src/tint/ast/texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/texture_test.cc
@@ -33,23 +33,23 @@ namespace {
using AstTextureTypeTest = TestHelper;
TEST_F(AstTextureTypeTest, IsTextureArray) {
- EXPECT_EQ(false, IsTextureArray(TextureDimension::kNone));
- EXPECT_EQ(false, IsTextureArray(TextureDimension::k1d));
- EXPECT_EQ(false, IsTextureArray(TextureDimension::k2d));
- EXPECT_EQ(true, IsTextureArray(TextureDimension::k2dArray));
- EXPECT_EQ(false, IsTextureArray(TextureDimension::k3d));
- EXPECT_EQ(false, IsTextureArray(TextureDimension::kCube));
- EXPECT_EQ(true, IsTextureArray(TextureDimension::kCubeArray));
+ EXPECT_EQ(false, IsTextureArray(TextureDimension::kNone));
+ EXPECT_EQ(false, IsTextureArray(TextureDimension::k1d));
+ EXPECT_EQ(false, IsTextureArray(TextureDimension::k2d));
+ EXPECT_EQ(true, IsTextureArray(TextureDimension::k2dArray));
+ EXPECT_EQ(false, IsTextureArray(TextureDimension::k3d));
+ EXPECT_EQ(false, IsTextureArray(TextureDimension::kCube));
+ EXPECT_EQ(true, IsTextureArray(TextureDimension::kCubeArray));
}
TEST_F(AstTextureTypeTest, NumCoordinateAxes) {
- EXPECT_EQ(0, NumCoordinateAxes(TextureDimension::kNone));
- EXPECT_EQ(1, NumCoordinateAxes(TextureDimension::k1d));
- EXPECT_EQ(2, NumCoordinateAxes(TextureDimension::k2d));
- EXPECT_EQ(2, NumCoordinateAxes(TextureDimension::k2dArray));
- EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::k3d));
- EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::kCube));
- EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::kCubeArray));
+ EXPECT_EQ(0, NumCoordinateAxes(TextureDimension::kNone));
+ EXPECT_EQ(1, NumCoordinateAxes(TextureDimension::k1d));
+ EXPECT_EQ(2, NumCoordinateAxes(TextureDimension::k2d));
+ EXPECT_EQ(2, NumCoordinateAxes(TextureDimension::k2dArray));
+ EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::k3d));
+ EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::kCube));
+ EXPECT_EQ(3, NumCoordinateAxes(TextureDimension::kCubeArray));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/traverse_expressions.h b/chromium/third_party/dawn/src/tint/ast/traverse_expressions.h
index 77f27a26736..59b00e98292 100644
--- a/chromium/third_party/dawn/src/tint/ast/traverse_expressions.h
+++ b/chromium/third_party/dawn/src/tint/ast/traverse_expressions.h
@@ -32,20 +32,20 @@ namespace tint::ast {
/// The action to perform after calling the TraverseExpressions() callback
/// function.
enum class TraverseAction {
- /// Stop traversal immediately.
- Stop,
- /// Descend into this expression.
- Descend,
- /// Do not descend into this expression.
- Skip,
+ /// Stop traversal immediately.
+ Stop,
+ /// Descend into this expression.
+ Descend,
+ /// Do not descend into this expression.
+ Skip,
};
/// The order TraverseExpressions() will traverse expressions
enum class TraverseOrder {
- /// Expressions will be traversed from left to right
- LeftToRight,
- /// Expressions will be traversed from right to left
- RightToLeft,
+ /// Expressions will be traversed from left to right
+ LeftToRight,
+ /// Expressions will be traversed from right to left
+ RightToLeft,
};
/// TraverseExpressions performs a depth-first traversal of the expression nodes
@@ -54,97 +54,110 @@ enum class TraverseOrder {
/// @param root the root expression node
/// @param diags the diagnostics used for error messages
/// @param callback the callback function. Must be of the signature:
-/// `TraverseAction(const T*)` where T is an ast::Expression type.
+/// `TraverseAction(const T* expr)` or `TraverseAction(const T* expr, size_t depth)` where T
+/// is an ast::Expression type.
/// @return true on success, false on error
template <TraverseOrder ORDER = TraverseOrder::LeftToRight, typename CALLBACK>
-bool TraverseExpressions(const ast::Expression* root,
- diag::List& diags,
- CALLBACK&& callback) {
- using EXPR_TYPE = std::remove_pointer_t<traits::ParameterType<CALLBACK, 0>>;
- std::vector<const ast::Expression*> to_visit{root};
-
- auto push_pair = [&](const ast::Expression* left,
- const ast::Expression* right) {
- if (ORDER == TraverseOrder::LeftToRight) {
- to_visit.push_back(right);
- to_visit.push_back(left);
- } else {
- to_visit.push_back(left);
- to_visit.push_back(right);
- }
- };
- auto push_list = [&](const std::vector<const ast::Expression*>& exprs) {
- if (ORDER == TraverseOrder::LeftToRight) {
- for (auto* expr : utils::Reverse(exprs)) {
- to_visit.push_back(expr);
- }
- } else {
- for (auto* expr : exprs) {
- to_visit.push_back(expr);
- }
- }
- };
-
- while (!to_visit.empty()) {
- auto* expr = to_visit.back();
- to_visit.pop_back();
-
- if (auto* filtered = expr->As<EXPR_TYPE>()) {
- switch (callback(filtered)) {
- case TraverseAction::Stop:
- return true;
- case TraverseAction::Skip:
- continue;
- case TraverseAction::Descend:
- break;
- }
- }
+bool TraverseExpressions(const ast::Expression* root, diag::List& diags, CALLBACK&& callback) {
+ using EXPR_TYPE = std::remove_pointer_t<traits::ParameterType<CALLBACK, 0>>;
+ constexpr static bool kHasDepthArg = traits::SignatureOfT<CALLBACK>::parameter_count == 2;
+
+ struct Pending {
+ const ast::Expression* expr;
+ size_t depth;
+ };
+
+ std::vector<Pending> to_visit{{root, 0}};
+
+ auto push_single = [&](const ast::Expression* expr, size_t depth) {
+ to_visit.push_back({expr, depth});
+ };
+ auto push_pair = [&](const ast::Expression* left, const ast::Expression* right, size_t depth) {
+ if (ORDER == TraverseOrder::LeftToRight) {
+ to_visit.push_back({right, depth});
+ to_visit.push_back({left, depth});
+ } else {
+ to_visit.push_back({left, depth});
+ to_visit.push_back({right, depth});
+ }
+ };
+ auto push_list = [&](const std::vector<const ast::Expression*>& exprs, size_t depth) {
+ if (ORDER == TraverseOrder::LeftToRight) {
+ for (auto* expr : utils::Reverse(exprs)) {
+ to_visit.push_back({expr, depth});
+ }
+ } else {
+ for (auto* expr : exprs) {
+ to_visit.push_back({expr, depth});
+ }
+ }
+ };
+
+ while (!to_visit.empty()) {
+ auto p = to_visit.back();
+ to_visit.pop_back();
+ const ast::Expression* expr = p.expr;
+
+ if (auto* filtered = expr->template As<EXPR_TYPE>()) {
+ TraverseAction result;
+ if constexpr (kHasDepthArg) {
+ result = callback(filtered, p.depth);
+ } else {
+ result = callback(filtered);
+ }
+
+ switch (result) {
+ case TraverseAction::Stop:
+ return true;
+ case TraverseAction::Skip:
+ continue;
+ case TraverseAction::Descend:
+ break;
+ }
+ }
- bool ok = Switch(
- expr,
- [&](const IndexAccessorExpression* idx) {
- push_pair(idx->object, idx->index);
- return true;
- },
- [&](const BinaryExpression* bin_op) {
- push_pair(bin_op->lhs, bin_op->rhs);
- return true;
- },
- [&](const BitcastExpression* bitcast) {
- to_visit.push_back(bitcast->expr);
- return true;
- },
- [&](const CallExpression* call) {
- // TODO(crbug.com/tint/1257): Resolver breaks if we actually include
- // the function name in the traversal. to_visit.push_back(call->func);
- push_list(call->args);
- return true;
- },
- [&](const MemberAccessorExpression* member) {
- // TODO(crbug.com/tint/1257): Resolver breaks if we actually include
- // the member name in the traversal. push_pair(member->structure,
- // member->member);
- to_visit.push_back(member->structure);
- return true;
- },
- [&](const UnaryOpExpression* unary) {
- to_visit.push_back(unary->expr);
- return true;
- },
- [&](Default) {
- if (expr->IsAnyOf<LiteralExpression, IdentifierExpression,
- PhonyExpression>()) {
- return true; // Leaf expression
- }
- TINT_ICE(AST, diags)
- << "unhandled expression type: " << expr->TypeInfo().name;
- return false;
- });
- if (!ok) {
- return false;
+ bool ok = Switch(
+ expr,
+ [&](const IndexAccessorExpression* idx) {
+ push_pair(idx->object, idx->index, p.depth + 1);
+ return true;
+ },
+ [&](const BinaryExpression* bin_op) {
+ push_pair(bin_op->lhs, bin_op->rhs, p.depth + 1);
+ return true;
+ },
+ [&](const BitcastExpression* bitcast) {
+ push_single(bitcast->expr, p.depth + 1);
+ return true;
+ },
+ [&](const CallExpression* call) {
+ // TODO(crbug.com/tint/1257): Resolver breaks if we actually include
+ // the function name in the traversal. push_single(call->func);
+ push_list(call->args, p.depth + 1);
+ return true;
+ },
+ [&](const MemberAccessorExpression* member) {
+ // TODO(crbug.com/tint/1257): Resolver breaks if we actually include
+ // the member name in the traversal. push_pair(member->member, p.depth + 1);
+ push_single(member->structure, p.depth + 1);
+ return true;
+ },
+ [&](const UnaryOpExpression* unary) {
+ push_single(unary->expr, p.depth + 1);
+ return true;
+ },
+ [&](Default) {
+ if (expr->IsAnyOf<LiteralExpression, IdentifierExpression, PhonyExpression>()) {
+ return true; // Leaf expression
+ }
+ TINT_ICE(AST, diags) << "unhandled expression type: " << expr->TypeInfo().name;
+ return false;
+ });
+ if (!ok) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/traverse_expressions_test.cc b/chromium/third_party/dawn/src/tint/ast/traverse_expressions_test.cc
index cacc93f875e..e79decb03df 100644
--- a/chromium/third_party/dawn/src/tint/ast/traverse_expressions_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/traverse_expressions_test.cc
@@ -16,219 +16,232 @@
#include "gmock/gmock.h"
#include "src/tint/ast/test_helper.h"
+using ::testing::ElementsAre;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
-using ::testing::ElementsAre;
-
using TraverseExpressionsTest = TestHelper;
TEST_F(TraverseExpressionsTest, DescendIndexAccessor) {
- std::vector<const ast::Expression*> e = {Expr(1), Expr(1), Expr(1), Expr(1)};
- std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]),
- IndexAccessor(e[2], e[3])};
- auto* root = IndexAccessor(i[0], i[1]);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, i[0], e[0], e[1], i[1], e[2], e[3]));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, i[1], e[3], e[2], i[0], e[1], e[0]));
- }
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]), IndexAccessor(e[2], e[3])};
+ auto* root = IndexAccessor(i[0], i[1]);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, i[0], e[0], e[1], i[1], e[2], e[3]));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, i[1], e[3], e[2], i[0], e[1], e[0]));
+ }
}
TEST_F(TraverseExpressionsTest, DescendBinaryExpression) {
- std::vector<const ast::Expression*> e = {Expr(1), Expr(1), Expr(1), Expr(1)};
- std::vector<const ast::Expression*> i = {Add(e[0], e[1]), Sub(e[2], e[3])};
- auto* root = Mul(i[0], i[1]);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, i[0], e[0], e[1], i[1], e[2], e[3]));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, i[1], e[3], e[2], i[0], e[1], e[0]));
- }
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> i = {Add(e[0], e[1]), Sub(e[2], e[3])};
+ auto* root = Mul(i[0], i[1]);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, i[0], e[0], e[1], i[1], e[2], e[3]));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, i[1], e[3], e[2], i[0], e[1], e[0]));
+ }
+}
+
+TEST_F(TraverseExpressionsTest, Depth) {
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> i = {Add(e[0], e[1]), Sub(e[2], e[3])};
+ auto* root = Mul(i[0], i[1]);
+
+ size_t j = 0;
+ size_t depths[] = {0, 1, 2, 2, 1, 2, 2};
+ {
+ TraverseExpressions<TraverseOrder::LeftToRight>( //
+ root, Diagnostics(), [&](const ast::Expression* expr, size_t depth) {
+ (void)expr;
+ EXPECT_THAT(depth, depths[j++]);
+ return ast::TraverseAction::Descend;
+ });
+ }
}
TEST_F(TraverseExpressionsTest, DescendBitcastExpression) {
- auto* e = Expr(1);
- auto* b0 = Bitcast<i32>(e);
- auto* b1 = Bitcast<i32>(b0);
- auto* b2 = Bitcast<i32>(b1);
- auto* root = Bitcast<i32>(b2);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, b2, b1, b0, e));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, b2, b1, b0, e));
- }
+ auto* e = Expr(1_i);
+ auto* b0 = Bitcast<i32>(e);
+ auto* b1 = Bitcast<i32>(b0);
+ auto* b2 = Bitcast<i32>(b1);
+ auto* root = Bitcast<i32>(b2);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, b2, b1, b0, e));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, b2, b1, b0, e));
+ }
}
TEST_F(TraverseExpressionsTest, DescendCallExpression) {
- std::vector<const ast::Expression*> e = {Expr(1), Expr(1), Expr(1), Expr(1)};
- std::vector<const ast::Expression*> c = {Call("a", e[0], e[1]),
- Call("b", e[2], e[3])};
- auto* root = Call("c", c[0], c[1]);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, c[0], e[0], e[1], c[1], e[2], e[3]));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, c[1], e[3], e[2], c[0], e[1], e[0]));
- }
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> c = {Call("a", e[0], e[1]), Call("b", e[2], e[3])};
+ auto* root = Call("c", c[0], c[1]);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, c[0], e[0], e[1], c[1], e[2], e[3]));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, c[1], e[3], e[2], c[0], e[1], e[0]));
+ }
}
// TODO(crbug.com/tint/1257): Test ignores member accessor 'member' field.
// Replace with the test below when fixed.
TEST_F(TraverseExpressionsTest, DescendMemberIndexExpression) {
- auto* e = Expr(1);
- auto* m = MemberAccessor(e, Expr("a"));
- auto* root = MemberAccessor(m, Expr("b"));
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, m, e));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, m, e));
- }
+ auto* e = Expr(1_i);
+ auto* m = MemberAccessor(e, Expr("a"));
+ auto* root = MemberAccessor(m, Expr("b"));
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, m, e));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, m, e));
+ }
}
// TODO(crbug.com/tint/1257): The correct test for DescendMemberIndexExpression.
TEST_F(TraverseExpressionsTest, DISABLED_DescendMemberIndexExpression) {
- auto* e = Expr(1);
- std::vector<const ast::IdentifierExpression*> i = {Expr("a"), Expr("b")};
- auto* m = MemberAccessor(e, i[0]);
- auto* root = MemberAccessor(m, i[1]);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, m, e, i[0], i[1]));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, i[1], m, i[0], e));
- }
+ auto* e = Expr(1_i);
+ std::vector<const ast::IdentifierExpression*> i = {Expr("a"), Expr("b")};
+ auto* m = MemberAccessor(e, i[0]);
+ auto* root = MemberAccessor(m, i[1]);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, m, e, i[0], i[1]));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, i[1], m, i[0], e));
+ }
}
TEST_F(TraverseExpressionsTest, DescendUnaryExpression) {
- auto* e = Expr(1);
- auto* u0 = AddressOf(e);
- auto* u1 = Deref(u0);
- auto* u2 = AddressOf(u1);
- auto* root = Deref(u2);
- {
- std::vector<const ast::Expression*> l2r;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- l2r.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(l2r, ElementsAre(root, u2, u1, u0, e));
- }
- {
- std::vector<const ast::Expression*> r2l;
- TraverseExpressions<TraverseOrder::RightToLeft>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- r2l.push_back(expr);
- return ast::TraverseAction::Descend;
- });
- EXPECT_THAT(r2l, ElementsAre(root, u2, u1, u0, e));
- }
+ auto* e = Expr(1_i);
+ auto* u0 = AddressOf(e);
+ auto* u1 = Deref(u0);
+ auto* u2 = AddressOf(u1);
+ auto* root = Deref(u2);
+ {
+ std::vector<const ast::Expression*> l2r;
+ TraverseExpressions<TraverseOrder::LeftToRight>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ l2r.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(l2r, ElementsAre(root, u2, u1, u0, e));
+ }
+ {
+ std::vector<const ast::Expression*> r2l;
+ TraverseExpressions<TraverseOrder::RightToLeft>(root, Diagnostics(),
+ [&](const ast::Expression* expr) {
+ r2l.push_back(expr);
+ return ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(r2l, ElementsAre(root, u2, u1, u0, e));
+ }
}
TEST_F(TraverseExpressionsTest, Skip) {
- std::vector<const ast::Expression*> e = {Expr(1), Expr(1), Expr(1), Expr(1)};
- std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]),
- IndexAccessor(e[2], e[3])};
- auto* root = IndexAccessor(i[0], i[1]);
- std::vector<const ast::Expression*> order;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- order.push_back(expr);
- return expr == i[0] ? ast::TraverseAction::Skip
- : ast::TraverseAction::Descend;
- });
- EXPECT_THAT(order, ElementsAre(root, i[0], i[1], e[2], e[3]));
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]), IndexAccessor(e[2], e[3])};
+ auto* root = IndexAccessor(i[0], i[1]);
+ std::vector<const ast::Expression*> order;
+ TraverseExpressions<TraverseOrder::LeftToRight>(
+ root, Diagnostics(), [&](const ast::Expression* expr) {
+ order.push_back(expr);
+ return expr == i[0] ? ast::TraverseAction::Skip : ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(order, ElementsAre(root, i[0], i[1], e[2], e[3]));
}
TEST_F(TraverseExpressionsTest, Stop) {
- std::vector<const ast::Expression*> e = {Expr(1), Expr(1), Expr(1), Expr(1)};
- std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]),
- IndexAccessor(e[2], e[3])};
- auto* root = IndexAccessor(i[0], i[1]);
- std::vector<const ast::Expression*> order;
- TraverseExpressions<TraverseOrder::LeftToRight>(
- root, Diagnostics(), [&](const ast::Expression* expr) {
- order.push_back(expr);
- return expr == i[0] ? ast::TraverseAction::Stop
- : ast::TraverseAction::Descend;
- });
- EXPECT_THAT(order, ElementsAre(root, i[0]));
+ std::vector<const ast::Expression*> e = {Expr(1_i), Expr(1_i), Expr(1_i), Expr(1_i)};
+ std::vector<const ast::Expression*> i = {IndexAccessor(e[0], e[1]), IndexAccessor(e[2], e[3])};
+ auto* root = IndexAccessor(i[0], i[1]);
+ std::vector<const ast::Expression*> order;
+ TraverseExpressions<TraverseOrder::LeftToRight>(
+ root, Diagnostics(), [&](const ast::Expression* expr) {
+ order.push_back(expr);
+ return expr == i[0] ? ast::TraverseAction::Stop : ast::TraverseAction::Descend;
+ });
+ EXPECT_THAT(order, ElementsAre(root, i[0]));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/type.h b/chromium/third_party/dawn/src/tint/ast/type.h
index f9c8e3146ab..4fee565e1a5 100644
--- a/chromium/third_party/dawn/src/tint/ast/type.h
+++ b/chromium/third_party/dawn/src/tint/ast/type.h
@@ -29,21 +29,21 @@ class SymbolTable;
namespace tint::ast {
/// Base class for a type in the system
class Type : public Castable<Type, Node> {
- public:
- /// Move constructor
- Type(Type&&);
- ~Type() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- virtual std::string FriendlyName(const SymbolTable& symbols) const = 0;
-
- protected:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Type(ProgramID pid, const Source& src);
+ public:
+ /// Move constructor
+ Type(Type&&);
+ ~Type() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ virtual std::string FriendlyName(const SymbolTable& symbols) const = 0;
+
+ protected:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Type(ProgramID pid, const Source& src);
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/type_decl.cc b/chromium/third_party/dawn/src/tint/ast/type_decl.cc
index 04c221c1dbf..a1a06058464 100644
--- a/chromium/third_party/dawn/src/tint/ast/type_decl.cc
+++ b/chromium/third_party/dawn/src/tint/ast/type_decl.cc
@@ -20,9 +20,8 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::TypeDecl);
namespace tint::ast {
-TypeDecl::TypeDecl(ProgramID pid, const Source& src, Symbol n)
- : Base(pid, src), name(n) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, name, program_id);
+TypeDecl::TypeDecl(ProgramID pid, const Source& src, Symbol n) : Base(pid, src), name(n) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, name, program_id);
}
TypeDecl::TypeDecl(TypeDecl&&) = default;
diff --git a/chromium/third_party/dawn/src/tint/ast/type_decl.h b/chromium/third_party/dawn/src/tint/ast/type_decl.h
index de3bd0a1d87..2b8487a4cae 100644
--- a/chromium/third_party/dawn/src/tint/ast/type_decl.h
+++ b/chromium/third_party/dawn/src/tint/ast/type_decl.h
@@ -23,19 +23,19 @@ namespace tint::ast {
/// The base class for type declarations.
class TypeDecl : public Castable<TypeDecl, Node> {
- public:
- /// Create a new struct statement
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node for the import statement
- /// @param name The name of the structure
- TypeDecl(ProgramID pid, const Source& src, Symbol name);
- /// Move constructor
- TypeDecl(TypeDecl&&);
-
- ~TypeDecl() override;
-
- /// The name of the type declaration
- const Symbol name;
+ public:
+ /// Create a new struct statement
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node for the import statement
+ /// @param name The name of the structure
+ TypeDecl(ProgramID pid, const Source& src, Symbol name);
+ /// Move constructor
+ TypeDecl(TypeDecl&&);
+
+ ~TypeDecl() override;
+
+ /// The name of the type declaration
+ const Symbol name;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/type_name.cc b/chromium/third_party/dawn/src/tint/ast/type_name.cc
index 8e84a555d0c..8eb7a1ad003 100644
--- a/chromium/third_party/dawn/src/tint/ast/type_name.cc
+++ b/chromium/third_party/dawn/src/tint/ast/type_name.cc
@@ -20,21 +20,20 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::TypeName);
namespace tint::ast {
-TypeName::TypeName(ProgramID pid, const Source& src, Symbol n)
- : Base(pid, src), name(n) {}
+TypeName::TypeName(ProgramID pid, const Source& src, Symbol n) : Base(pid, src), name(n) {}
TypeName::~TypeName() = default;
TypeName::TypeName(TypeName&&) = default;
std::string TypeName::FriendlyName(const SymbolTable& symbols) const {
- return symbols.NameFor(name);
+ return symbols.NameFor(name);
}
const TypeName* TypeName::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- auto n = ctx->Clone(name);
- return ctx->dst->create<TypeName>(src, n);
+ auto src = ctx->Clone(source);
+ auto n = ctx->Clone(name);
+ return ctx->dst->create<TypeName>(src, n);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/type_name.h b/chromium/third_party/dawn/src/tint/ast/type_name.h
index bd3968aa69a..3bb556a9b90 100644
--- a/chromium/third_party/dawn/src/tint/ast/type_name.h
+++ b/chromium/third_party/dawn/src/tint/ast/type_name.h
@@ -23,29 +23,29 @@ namespace tint::ast {
/// A named type (i.e. struct or alias)
class TypeName final : public Castable<TypeName, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param name the type name
- TypeName(ProgramID pid, const Source& src, Symbol name);
- /// Move constructor
- TypeName(TypeName&&);
- /// Destructor
- ~TypeName() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const TypeName* Clone(CloneContext* ctx) const override;
-
- /// The type name
- Symbol name;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param name the type name
+ TypeName(ProgramID pid, const Source& src, Symbol name);
+ /// Move constructor
+ TypeName(TypeName&&);
+ /// Destructor
+ ~TypeName() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const TypeName* Clone(CloneContext* ctx) const override;
+
+ /// The type name
+ Symbol name;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/u32.cc b/chromium/third_party/dawn/src/tint/ast/u32.cc
index 6cabe5b6303..ac9c4908f06 100644
--- a/chromium/third_party/dawn/src/tint/ast/u32.cc
+++ b/chromium/third_party/dawn/src/tint/ast/u32.cc
@@ -27,12 +27,12 @@ U32::~U32() = default;
U32::U32(U32&&) = default;
std::string U32::FriendlyName(const SymbolTable&) const {
- return "u32";
+ return "u32";
}
const U32* U32::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<U32>(src);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<U32>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/u32.h b/chromium/third_party/dawn/src/tint/ast/u32.h
index ede477b42b8..8ede11cbb49 100644
--- a/chromium/third_party/dawn/src/tint/ast/u32.h
+++ b/chromium/third_party/dawn/src/tint/ast/u32.h
@@ -23,24 +23,24 @@ namespace tint::ast {
/// A unsigned int 32 type.
class U32 final : public Castable<U32, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- U32(ProgramID pid, const Source& src);
- /// Move constructor
- U32(U32&&);
- ~U32() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const U32* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ U32(ProgramID pid, const Source& src);
+ /// Move constructor
+ U32(U32&&);
+ ~U32() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const U32* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/u32_test.cc b/chromium/third_party/dawn/src/tint/ast/u32_test.cc
index 30a8840b55a..a3a380b329c 100644
--- a/chromium/third_party/dawn/src/tint/ast/u32_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/u32_test.cc
@@ -22,8 +22,8 @@ namespace {
using AstU32Test = TestHelper;
TEST_F(AstU32Test, FriendlyName) {
- auto* u = create<U32>();
- EXPECT_EQ(u->FriendlyName(Symbols()), "u32");
+ auto* u = create<U32>();
+ EXPECT_EQ(u->FriendlyName(Symbols()), "u32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.cc b/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.cc
deleted file mode 100644
index 7af33251287..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/ast/uint_literal_expression.h"
-
-#include "src/tint/program_builder.h"
-
-TINT_INSTANTIATE_TYPEINFO(tint::ast::UintLiteralExpression);
-
-namespace tint::ast {
-
-UintLiteralExpression::UintLiteralExpression(ProgramID pid,
- const Source& src,
- uint32_t val)
- : Base(pid, src), value(val) {}
-
-UintLiteralExpression::~UintLiteralExpression() = default;
-
-uint32_t UintLiteralExpression::ValueAsU32() const {
- return value;
-}
-
-const UintLiteralExpression* UintLiteralExpression::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- return ctx->dst->create<UintLiteralExpression>(src, value);
-}
-
-} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.h b/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.h
deleted file mode 100644
index 35ef5fa5a26..00000000000
--- a/chromium/third_party/dawn/src/tint/ast/uint_literal_expression.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_AST_UINT_LITERAL_EXPRESSION_H_
-#define SRC_TINT_AST_UINT_LITERAL_EXPRESSION_H_
-
-#include <string>
-
-#include "src/tint/ast/int_literal_expression.h"
-
-namespace tint::ast {
-
-/// A uint literal
-class UintLiteralExpression final
- : public Castable<UintLiteralExpression, IntLiteralExpression> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param value the uint literals value
- UintLiteralExpression(ProgramID pid, const Source& src, uint32_t value);
- ~UintLiteralExpression() override;
-
- /// @returns the literal value as a u32
- uint32_t ValueAsU32() const override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const UintLiteralExpression* Clone(CloneContext* ctx) const override;
-
- /// The int literal value
- const uint32_t value;
-};
-
-} // namespace tint::ast
-
-#endif // SRC_TINT_AST_UINT_LITERAL_EXPRESSION_H_
diff --git a/chromium/third_party/dawn/src/tint/ast/unary_op.cc b/chromium/third_party/dawn/src/tint/ast/unary_op.cc
index b90c71dc9f8..e0afe8d120f 100644
--- a/chromium/third_party/dawn/src/tint/ast/unary_op.cc
+++ b/chromium/third_party/dawn/src/tint/ast/unary_op.cc
@@ -17,29 +17,29 @@
namespace tint::ast {
std::ostream& operator<<(std::ostream& out, UnaryOp mod) {
- switch (mod) {
- case UnaryOp::kAddressOf: {
- out << "address-of";
- break;
+ switch (mod) {
+ case UnaryOp::kAddressOf: {
+ out << "address-of";
+ break;
+ }
+ case UnaryOp::kComplement: {
+ out << "complement";
+ break;
+ }
+ case UnaryOp::kIndirection: {
+ out << "indirection";
+ break;
+ }
+ case UnaryOp::kNegation: {
+ out << "negation";
+ break;
+ }
+ case UnaryOp::kNot: {
+ out << "not";
+ break;
+ }
}
- case UnaryOp::kComplement: {
- out << "complement";
- break;
- }
- case UnaryOp::kIndirection: {
- out << "indirection";
- break;
- }
- case UnaryOp::kNegation: {
- out << "negation";
- break;
- }
- case UnaryOp::kNot: {
- out << "not";
- break;
- }
- }
- return out;
+ return out;
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/unary_op.h b/chromium/third_party/dawn/src/tint/ast/unary_op.h
index 93b67222050..a861af30378 100644
--- a/chromium/third_party/dawn/src/tint/ast/unary_op.h
+++ b/chromium/third_party/dawn/src/tint/ast/unary_op.h
@@ -21,11 +21,11 @@ namespace tint::ast {
/// The unary op
enum class UnaryOp {
- kAddressOf, // &EXPR
- kComplement, // ~EXPR
- kIndirection, // *EXPR
- kNegation, // -EXPR
- kNot, // !EXPR
+ kAddressOf, // &EXPR
+ kComplement, // ~EXPR
+ kIndirection, // *EXPR
+ kNegation, // -EXPR
+ kNot, // !EXPR
};
/// @param out the std::ostream to write to
diff --git a/chromium/third_party/dawn/src/tint/ast/unary_op_expression.cc b/chromium/third_party/dawn/src/tint/ast/unary_op_expression.cc
index 7636a5450a4..80e4e9061a2 100644
--- a/chromium/third_party/dawn/src/tint/ast/unary_op_expression.cc
+++ b/chromium/third_party/dawn/src/tint/ast/unary_op_expression.cc
@@ -25,8 +25,8 @@ UnaryOpExpression::UnaryOpExpression(ProgramID pid,
UnaryOp o,
const Expression* e)
: Base(pid, src), op(o), expr(e) {
- TINT_ASSERT(AST, expr);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
+ TINT_ASSERT(AST, expr);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, expr, program_id);
}
UnaryOpExpression::UnaryOpExpression(UnaryOpExpression&&) = default;
@@ -34,10 +34,10 @@ UnaryOpExpression::UnaryOpExpression(UnaryOpExpression&&) = default;
UnaryOpExpression::~UnaryOpExpression() = default;
const UnaryOpExpression* UnaryOpExpression::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* e = ctx->Clone(expr);
- return ctx->dst->create<UnaryOpExpression>(src, op, e);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* e = ctx->Clone(expr);
+ return ctx->dst->create<UnaryOpExpression>(src, op, e);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/unary_op_expression.h b/chromium/third_party/dawn/src/tint/ast/unary_op_expression.h
index 11641084909..22093fbd1e2 100644
--- a/chromium/third_party/dawn/src/tint/ast/unary_op_expression.h
+++ b/chromium/third_party/dawn/src/tint/ast/unary_op_expression.h
@@ -22,31 +22,31 @@ namespace tint::ast {
/// A unary op expression
class UnaryOpExpression final : public Castable<UnaryOpExpression, Expression> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the unary op expression source
- /// @param op the op
- /// @param expr the expr
- UnaryOpExpression(ProgramID program_id,
- const Source& source,
- UnaryOp op,
- const Expression* expr);
- /// Move constructor
- UnaryOpExpression(UnaryOpExpression&&);
- ~UnaryOpExpression() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const UnaryOpExpression* Clone(CloneContext* ctx) const override;
-
- /// The op
- const UnaryOp op;
-
- /// The expression
- const Expression* const expr;
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the unary op expression source
+ /// @param op the op
+ /// @param expr the expr
+ UnaryOpExpression(ProgramID program_id,
+ const Source& source,
+ UnaryOp op,
+ const Expression* expr);
+ /// Move constructor
+ UnaryOpExpression(UnaryOpExpression&&);
+ ~UnaryOpExpression() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const UnaryOpExpression* Clone(CloneContext* ctx) const override;
+
+ /// The op
+ const UnaryOp op;
+
+ /// The expression
+ const Expression* const expr;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/unary_op_expression_test.cc b/chromium/third_party/dawn/src/tint/ast/unary_op_expression_test.cc
index 5baf5daf6ae..e1ec8dd6256 100644
--- a/chromium/third_party/dawn/src/tint/ast/unary_op_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/unary_op_expression_test.cc
@@ -23,45 +23,44 @@ namespace {
using UnaryOpExpressionTest = TestHelper;
TEST_F(UnaryOpExpressionTest, Creation) {
- auto* ident = Expr("ident");
+ auto* ident = Expr("ident");
- auto* u = create<UnaryOpExpression>(UnaryOp::kNot, ident);
- EXPECT_EQ(u->op, UnaryOp::kNot);
- EXPECT_EQ(u->expr, ident);
+ auto* u = create<UnaryOpExpression>(UnaryOp::kNot, ident);
+ EXPECT_EQ(u->op, UnaryOp::kNot);
+ EXPECT_EQ(u->expr, ident);
}
TEST_F(UnaryOpExpressionTest, Creation_WithSource) {
- auto* ident = Expr("ident");
- auto* u = create<UnaryOpExpression>(Source{Source::Location{20, 2}},
- UnaryOp::kNot, ident);
- auto src = u->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* ident = Expr("ident");
+ auto* u = create<UnaryOpExpression>(Source{Source::Location{20, 2}}, UnaryOp::kNot, ident);
+ auto src = u->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(UnaryOpExpressionTest, IsUnaryOp) {
- auto* ident = Expr("ident");
- auto* u = create<UnaryOpExpression>(UnaryOp::kNot, ident);
- EXPECT_TRUE(u->Is<UnaryOpExpression>());
+ auto* ident = Expr("ident");
+ auto* u = create<UnaryOpExpression>(UnaryOp::kNot, ident);
+ EXPECT_TRUE(u->Is<UnaryOpExpression>());
}
TEST_F(UnaryOpExpressionTest, Assert_Null_Expression) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<UnaryOpExpression>(UnaryOp::kNot, nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<UnaryOpExpression>(UnaryOp::kNot, nullptr);
+ },
+ "internal compiler error");
}
TEST_F(UnaryOpExpressionTest, Assert_DifferentProgramID_Expression) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<UnaryOpExpression>(UnaryOp::kNot, b2.Expr(true));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<UnaryOpExpression>(UnaryOp::kNot, b2.Expr(true));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/variable.cc b/chromium/third_party/dawn/src/tint/ast/variable.cc
index cea9f847f5b..26991f252a2 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable.cc
+++ b/chromium/third_party/dawn/src/tint/ast/variable.cc
@@ -40,10 +40,10 @@ Variable::Variable(ProgramID pid,
attributes(std::move(attrs)),
declared_storage_class(dsc),
declared_access(da) {
- TINT_ASSERT(AST, symbol.IsValid());
- TINT_ASSERT(AST, is_overridable ? is_const : true);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, constructor, program_id);
+ TINT_ASSERT(AST, symbol.IsValid());
+ TINT_ASSERT(AST, is_overridable ? is_const : true);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, symbol, program_id);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, constructor, program_id);
}
Variable::Variable(Variable&&) = default;
@@ -51,27 +51,26 @@ Variable::Variable(Variable&&) = default;
Variable::~Variable() = default;
VariableBindingPoint Variable::BindingPoint() const {
- const GroupAttribute* group = nullptr;
- const BindingAttribute* binding = nullptr;
- for (auto* attr : attributes) {
- if (auto* g = attr->As<GroupAttribute>()) {
- group = g;
- } else if (auto* b = attr->As<BindingAttribute>()) {
- binding = b;
+ const GroupAttribute* group = nullptr;
+ const BindingAttribute* binding = nullptr;
+ for (auto* attr : attributes) {
+ if (auto* g = attr->As<GroupAttribute>()) {
+ group = g;
+ } else if (auto* b = attr->As<BindingAttribute>()) {
+ binding = b;
+ }
}
- }
- return VariableBindingPoint{group, binding};
+ return VariableBindingPoint{group, binding};
}
const Variable* Variable::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- auto sym = ctx->Clone(symbol);
- auto* ty = ctx->Clone(type);
- auto* ctor = ctx->Clone(constructor);
- auto attrs = ctx->Clone(attributes);
- return ctx->dst->create<Variable>(src, sym, declared_storage_class,
- declared_access, ty, is_const,
- is_overridable, ctor, attrs);
+ auto src = ctx->Clone(source);
+ auto sym = ctx->Clone(symbol);
+ auto* ty = ctx->Clone(type);
+ auto* ctor = ctx->Clone(constructor);
+ auto attrs = ctx->Clone(attributes);
+ return ctx->dst->create<Variable>(src, sym, declared_storage_class, declared_access, ty,
+ is_const, is_overridable, ctor, attrs);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/variable.h b/chromium/third_party/dawn/src/tint/ast/variable.h
index 208f0b138d2..58022558f51 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable.h
+++ b/chromium/third_party/dawn/src/tint/ast/variable.h
@@ -35,14 +35,14 @@ namespace tint::ast {
/// VariableBindingPoint holds a group and binding attribute.
struct VariableBindingPoint {
- /// The `@group` part of the binding point
- const GroupAttribute* group = nullptr;
- /// The `@binding` part of the binding point
- const BindingAttribute* binding = nullptr;
-
- /// @returns true if the BindingPoint has a valid group and binding
- /// attribute.
- inline operator bool() const { return group && binding; }
+ /// The `@group` part of the binding point
+ const GroupAttribute* group = nullptr;
+ /// The `@binding` part of the binding point
+ const BindingAttribute* binding = nullptr;
+
+ /// @returns true if the BindingPoint has a valid group and binding
+ /// attribute.
+ inline operator bool() const { return group && binding; }
};
/// A Variable statement.
@@ -115,67 +115,67 @@ struct VariableBindingPoint {
/// - "let" is always StorageClass::kNone.
/// - formal parameter is always StorageClass::kNone.
class Variable final : public Castable<Variable, Node> {
- public:
- /// Create a variable
- /// @param program_id the identifier of the program that owns this node
- /// @param source the variable source
- /// @param sym the variable symbol
- /// @param declared_storage_class the declared storage class
- /// @param declared_access the declared access control
- /// @param type the declared variable type
- /// @param is_const true if the variable is const
- /// @param is_overridable true if the variable is pipeline-overridable
- /// @param constructor the constructor expression
- /// @param attributes the variable attributes
- Variable(ProgramID program_id,
- const Source& source,
- const Symbol& sym,
- StorageClass declared_storage_class,
- Access declared_access,
- const ast::Type* type,
- bool is_const,
- bool is_overridable,
- const Expression* constructor,
- AttributeList attributes);
- /// Move constructor
- Variable(Variable&&);
-
- ~Variable() override;
-
- /// @returns the binding point information for the variable
- VariableBindingPoint BindingPoint() const;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const Variable* Clone(CloneContext* ctx) const override;
-
- /// The variable symbol
- const Symbol symbol;
-
- /// The declared variable type. This is null if the type is inferred, e.g.:
- /// let f = 1.0;
- /// var i = 1;
- const ast::Type* const type;
-
- /// True if this is a constant, false otherwise
- const bool is_const;
-
- /// True if this is a pipeline-overridable constant, false otherwise
- const bool is_overridable;
-
- /// The constructor expression or nullptr if none set
- const Expression* const constructor;
-
- /// The attributes attached to this variable
- const AttributeList attributes;
-
- /// The declared storage class
- const StorageClass declared_storage_class;
-
- /// The declared access control
- const Access declared_access;
+ public:
+ /// Create a variable
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the variable source
+ /// @param sym the variable symbol
+ /// @param declared_storage_class the declared storage class
+ /// @param declared_access the declared access control
+ /// @param type the declared variable type
+ /// @param is_const true if the variable is const
+ /// @param is_overridable true if the variable is pipeline-overridable
+ /// @param constructor the constructor expression
+ /// @param attributes the variable attributes
+ Variable(ProgramID program_id,
+ const Source& source,
+ const Symbol& sym,
+ StorageClass declared_storage_class,
+ Access declared_access,
+ const ast::Type* type,
+ bool is_const,
+ bool is_overridable,
+ const Expression* constructor,
+ AttributeList attributes);
+ /// Move constructor
+ Variable(Variable&&);
+
+ ~Variable() override;
+
+ /// @returns the binding point information for the variable
+ VariableBindingPoint BindingPoint() const;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const Variable* Clone(CloneContext* ctx) const override;
+
+ /// The variable symbol
+ const Symbol symbol;
+
+ /// The declared variable type. This is null if the type is inferred, e.g.:
+ /// let f = 1.0;
+ /// var i = 1;
+ const ast::Type* const type;
+
+ /// True if this is a constant, false otherwise
+ const bool is_const;
+
+ /// True if this is a pipeline-overridable constant, false otherwise
+ const bool is_overridable;
+
+ /// The constructor expression or nullptr if none set
+ const Expression* const constructor;
+
+ /// The attributes attached to this variable
+ const AttributeList attributes;
+
+ /// The declared storage class
+ const StorageClass declared_storage_class;
+
+ /// The declared access control
+ const Access declared_access;
};
/// A list of variables
diff --git a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.cc b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.cc
index 16d11068368..fdde149f033 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.cc
+++ b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.cc
@@ -20,24 +20,21 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::VariableDeclStatement);
namespace tint::ast {
-VariableDeclStatement::VariableDeclStatement(ProgramID pid,
- const Source& src,
- const Variable* var)
+VariableDeclStatement::VariableDeclStatement(ProgramID pid, const Source& src, const Variable* var)
: Base(pid, src), variable(var) {
- TINT_ASSERT(AST, variable);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, variable, program_id);
+ TINT_ASSERT(AST, variable);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, variable, program_id);
}
VariableDeclStatement::VariableDeclStatement(VariableDeclStatement&&) = default;
VariableDeclStatement::~VariableDeclStatement() = default;
-const VariableDeclStatement* VariableDeclStatement::Clone(
- CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* var = ctx->Clone(variable);
- return ctx->dst->create<VariableDeclStatement>(src, var);
+const VariableDeclStatement* VariableDeclStatement::Clone(CloneContext* ctx) const {
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* var = ctx->Clone(variable);
+ return ctx->dst->create<VariableDeclStatement>(src, var);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.h b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.h
index 2edee636379..3f3ae273e3f 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.h
+++ b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement.h
@@ -21,28 +21,25 @@
namespace tint::ast {
/// A variable declaration statement
-class VariableDeclStatement final
- : public Castable<VariableDeclStatement, Statement> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param source the variable statement source
- /// @param variable the variable
- VariableDeclStatement(ProgramID program_id,
- const Source& source,
- const Variable* variable);
- /// Move constructor
- VariableDeclStatement(VariableDeclStatement&&);
- ~VariableDeclStatement() override;
-
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const VariableDeclStatement* Clone(CloneContext* ctx) const override;
-
- /// The variable
- const Variable* const variable;
+class VariableDeclStatement final : public Castable<VariableDeclStatement, Statement> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param source the variable statement source
+ /// @param variable the variable
+ VariableDeclStatement(ProgramID program_id, const Source& source, const Variable* variable);
+ /// Move constructor
+ VariableDeclStatement(VariableDeclStatement&&);
+ ~VariableDeclStatement() override;
+
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const VariableDeclStatement* Clone(CloneContext* ctx) const override;
+
+ /// The variable
+ const Variable* const variable;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement_test.cc b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement_test.cc
index e628d96b837..2cd4d4dd758 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable_decl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/variable_decl_statement_test.cc
@@ -23,47 +23,45 @@ namespace {
using VariableDeclStatementTest = TestHelper;
TEST_F(VariableDeclStatementTest, Creation) {
- auto* var = Var("a", ty.f32(), StorageClass::kNone);
+ auto* var = Var("a", ty.f32(), StorageClass::kNone);
- auto* stmt = create<VariableDeclStatement>(var);
- EXPECT_EQ(stmt->variable, var);
+ auto* stmt = create<VariableDeclStatement>(var);
+ EXPECT_EQ(stmt->variable, var);
}
TEST_F(VariableDeclStatementTest, Creation_WithSource) {
- auto* var = Var("a", ty.f32(), StorageClass::kNone);
+ auto* var = Var("a", ty.f32(), StorageClass::kNone);
- auto* stmt =
- create<VariableDeclStatement>(Source{Source::Location{20, 2}}, var);
- auto src = stmt->source;
- EXPECT_EQ(src.range.begin.line, 20u);
- EXPECT_EQ(src.range.begin.column, 2u);
+ auto* stmt = create<VariableDeclStatement>(Source{Source::Location{20, 2}}, var);
+ auto src = stmt->source;
+ EXPECT_EQ(src.range.begin.line, 20u);
+ EXPECT_EQ(src.range.begin.column, 2u);
}
TEST_F(VariableDeclStatementTest, IsVariableDecl) {
- auto* var = Var("a", ty.f32(), StorageClass::kNone);
+ auto* var = Var("a", ty.f32(), StorageClass::kNone);
- auto* stmt = create<VariableDeclStatement>(var);
- EXPECT_TRUE(stmt->Is<VariableDeclStatement>());
+ auto* stmt = create<VariableDeclStatement>(var);
+ EXPECT_TRUE(stmt->Is<VariableDeclStatement>());
}
TEST_F(VariableDeclStatementTest, Assert_Null_Variable) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.create<VariableDeclStatement>(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.create<VariableDeclStatement>(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(VariableDeclStatementTest, Assert_DifferentProgramID_Variable) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.create<VariableDeclStatement>(
- b2.Var("a", b2.ty.f32(), StorageClass::kNone));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.create<VariableDeclStatement>(b2.Var("a", b2.ty.f32(), StorageClass::kNone));
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/variable_test.cc b/chromium/third_party/dawn/src/tint/ast/variable_test.cc
index 334bcb30ae4..e62ec1eb053 100644
--- a/chromium/third_party/dawn/src/tint/ast/variable_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/variable_test.cc
@@ -17,137 +17,136 @@
#include "src/tint/ast/id_attribute.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using VariableTest = TestHelper;
TEST_F(VariableTest, Creation) {
- auto* v = Var("my_var", ty.i32(), StorageClass::kFunction);
-
- EXPECT_EQ(v->symbol, Symbol(1, ID()));
- EXPECT_EQ(v->declared_storage_class, StorageClass::kFunction);
- EXPECT_TRUE(v->type->Is<ast::I32>());
- EXPECT_EQ(v->source.range.begin.line, 0u);
- EXPECT_EQ(v->source.range.begin.column, 0u);
- EXPECT_EQ(v->source.range.end.line, 0u);
- EXPECT_EQ(v->source.range.end.column, 0u);
+ auto* v = Var("my_var", ty.i32(), StorageClass::kFunction);
+
+ EXPECT_EQ(v->symbol, Symbol(1, ID()));
+ EXPECT_EQ(v->declared_storage_class, StorageClass::kFunction);
+ EXPECT_TRUE(v->type->Is<ast::I32>());
+ EXPECT_EQ(v->source.range.begin.line, 0u);
+ EXPECT_EQ(v->source.range.begin.column, 0u);
+ EXPECT_EQ(v->source.range.end.line, 0u);
+ EXPECT_EQ(v->source.range.end.column, 0u);
}
TEST_F(VariableTest, CreationWithSource) {
- auto* v = Var(
- Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 5}}},
- "i", ty.f32(), StorageClass::kPrivate, nullptr, AttributeList{});
-
- EXPECT_EQ(v->symbol, Symbol(1, ID()));
- EXPECT_EQ(v->declared_storage_class, StorageClass::kPrivate);
- EXPECT_TRUE(v->type->Is<ast::F32>());
- EXPECT_EQ(v->source.range.begin.line, 27u);
- EXPECT_EQ(v->source.range.begin.column, 4u);
- EXPECT_EQ(v->source.range.end.line, 27u);
- EXPECT_EQ(v->source.range.end.column, 5u);
+ auto* v = Var(Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 5}}}, "i",
+ ty.f32(), StorageClass::kPrivate, nullptr, AttributeList{});
+
+ EXPECT_EQ(v->symbol, Symbol(1, ID()));
+ EXPECT_EQ(v->declared_storage_class, StorageClass::kPrivate);
+ EXPECT_TRUE(v->type->Is<ast::F32>());
+ EXPECT_EQ(v->source.range.begin.line, 27u);
+ EXPECT_EQ(v->source.range.begin.column, 4u);
+ EXPECT_EQ(v->source.range.end.line, 27u);
+ EXPECT_EQ(v->source.range.end.column, 5u);
}
TEST_F(VariableTest, CreationEmpty) {
- auto* v = Var(
- Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 7}}},
- "a_var", ty.i32(), StorageClass::kWorkgroup, nullptr, AttributeList{});
-
- EXPECT_EQ(v->symbol, Symbol(1, ID()));
- EXPECT_EQ(v->declared_storage_class, StorageClass::kWorkgroup);
- EXPECT_TRUE(v->type->Is<ast::I32>());
- EXPECT_EQ(v->source.range.begin.line, 27u);
- EXPECT_EQ(v->source.range.begin.column, 4u);
- EXPECT_EQ(v->source.range.end.line, 27u);
- EXPECT_EQ(v->source.range.end.column, 7u);
+ auto* v = Var(Source{Source::Range{Source::Location{27, 4}, Source::Location{27, 7}}}, "a_var",
+ ty.i32(), StorageClass::kWorkgroup, nullptr, AttributeList{});
+
+ EXPECT_EQ(v->symbol, Symbol(1, ID()));
+ EXPECT_EQ(v->declared_storage_class, StorageClass::kWorkgroup);
+ EXPECT_TRUE(v->type->Is<ast::I32>());
+ EXPECT_EQ(v->source.range.begin.line, 27u);
+ EXPECT_EQ(v->source.range.begin.column, 4u);
+ EXPECT_EQ(v->source.range.end.line, 27u);
+ EXPECT_EQ(v->source.range.end.column, 7u);
}
TEST_F(VariableTest, Assert_MissingSymbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Var("", b.ty.i32(), StorageClass::kNone);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Var("", b.ty.i32(), StorageClass::kNone);
+ },
+ "internal compiler error");
}
TEST_F(VariableTest, Assert_DifferentProgramID_Symbol) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Var(b2.Sym("x"), b1.ty.f32(), StorageClass::kNone);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Var(b2.Sym("x"), b1.ty.f32(), StorageClass::kNone);
+ },
+ "internal compiler error");
}
TEST_F(VariableTest, Assert_DifferentProgramID_Constructor) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b1;
- ProgramBuilder b2;
- b1.Var("x", b1.ty.f32(), StorageClass::kNone, b2.Expr(1.2f));
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b1;
+ ProgramBuilder b2;
+ b1.Var("x", b1.ty.f32(), StorageClass::kNone, b2.Expr(1.2_f));
+ },
+ "internal compiler error");
}
TEST_F(VariableTest, WithAttributes) {
- auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
- AttributeList{
- create<LocationAttribute>(1),
- create<BuiltinAttribute>(Builtin::kPosition),
- create<IdAttribute>(1200),
- });
-
- auto& attributes = var->attributes;
- EXPECT_TRUE(ast::HasAttribute<ast::LocationAttribute>(attributes));
- EXPECT_TRUE(ast::HasAttribute<ast::BuiltinAttribute>(attributes));
- EXPECT_TRUE(ast::HasAttribute<ast::IdAttribute>(attributes));
-
- auto* location = ast::GetAttribute<ast::LocationAttribute>(attributes);
- ASSERT_NE(nullptr, location);
- EXPECT_EQ(1u, location->value);
+ auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
+ AttributeList{
+ create<LocationAttribute>(1),
+ create<BuiltinAttribute>(Builtin::kPosition),
+ create<IdAttribute>(1200),
+ });
+
+ auto& attributes = var->attributes;
+ EXPECT_TRUE(ast::HasAttribute<ast::LocationAttribute>(attributes));
+ EXPECT_TRUE(ast::HasAttribute<ast::BuiltinAttribute>(attributes));
+ EXPECT_TRUE(ast::HasAttribute<ast::IdAttribute>(attributes));
+
+ auto* location = ast::GetAttribute<ast::LocationAttribute>(attributes);
+ ASSERT_NE(nullptr, location);
+ EXPECT_EQ(1u, location->value);
}
TEST_F(VariableTest, BindingPoint) {
- auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
- AttributeList{
- create<BindingAttribute>(2),
- create<GroupAttribute>(1),
- });
- EXPECT_TRUE(var->BindingPoint());
- ASSERT_NE(var->BindingPoint().binding, nullptr);
- ASSERT_NE(var->BindingPoint().group, nullptr);
- EXPECT_EQ(var->BindingPoint().binding->value, 2u);
- EXPECT_EQ(var->BindingPoint().group->value, 1u);
+ auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
+ AttributeList{
+ create<BindingAttribute>(2),
+ create<GroupAttribute>(1),
+ });
+ EXPECT_TRUE(var->BindingPoint());
+ ASSERT_NE(var->BindingPoint().binding, nullptr);
+ ASSERT_NE(var->BindingPoint().group, nullptr);
+ EXPECT_EQ(var->BindingPoint().binding->value, 2u);
+ EXPECT_EQ(var->BindingPoint().group->value, 1u);
}
TEST_F(VariableTest, BindingPointAttributes) {
- auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
- AttributeList{});
- EXPECT_FALSE(var->BindingPoint());
- EXPECT_EQ(var->BindingPoint().group, nullptr);
- EXPECT_EQ(var->BindingPoint().binding, nullptr);
+ auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr, AttributeList{});
+ EXPECT_FALSE(var->BindingPoint());
+ EXPECT_EQ(var->BindingPoint().group, nullptr);
+ EXPECT_EQ(var->BindingPoint().binding, nullptr);
}
TEST_F(VariableTest, BindingPointMissingGroupAttribute) {
- auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
- AttributeList{
- create<BindingAttribute>(2),
- });
- EXPECT_FALSE(var->BindingPoint());
- ASSERT_NE(var->BindingPoint().binding, nullptr);
- EXPECT_EQ(var->BindingPoint().binding->value, 2u);
- EXPECT_EQ(var->BindingPoint().group, nullptr);
+ auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
+ AttributeList{
+ create<BindingAttribute>(2),
+ });
+ EXPECT_FALSE(var->BindingPoint());
+ ASSERT_NE(var->BindingPoint().binding, nullptr);
+ EXPECT_EQ(var->BindingPoint().binding->value, 2u);
+ EXPECT_EQ(var->BindingPoint().group, nullptr);
}
TEST_F(VariableTest, BindingPointMissingBindingAttribute) {
- auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
- AttributeList{create<GroupAttribute>(1)});
- EXPECT_FALSE(var->BindingPoint());
- ASSERT_NE(var->BindingPoint().group, nullptr);
- EXPECT_EQ(var->BindingPoint().group->value, 1u);
- EXPECT_EQ(var->BindingPoint().binding, nullptr);
+ auto* var = Var("my_var", ty.i32(), StorageClass::kFunction, nullptr,
+ AttributeList{create<GroupAttribute>(1)});
+ EXPECT_FALSE(var->BindingPoint());
+ ASSERT_NE(var->BindingPoint().group, nullptr);
+ EXPECT_EQ(var->BindingPoint().group->value, 1u);
+ EXPECT_EQ(var->BindingPoint().binding, nullptr);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/vector.cc b/chromium/third_party/dawn/src/tint/ast/vector.cc
index d47aad3a8c5..43478dfda42 100644
--- a/chromium/third_party/dawn/src/tint/ast/vector.cc
+++ b/chromium/third_party/dawn/src/tint/ast/vector.cc
@@ -20,14 +20,11 @@ TINT_INSTANTIATE_TYPEINFO(tint::ast::Vector);
namespace tint::ast {
-Vector::Vector(ProgramID pid,
- Source const& src,
- const Type* subtype,
- uint32_t w)
+Vector::Vector(ProgramID pid, Source const& src, const Type* subtype, uint32_t w)
: Base(pid, src), type(subtype), width(w) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, subtype, program_id);
- TINT_ASSERT(AST, width > 1);
- TINT_ASSERT(AST, width < 5);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(AST, subtype, program_id);
+ TINT_ASSERT(AST, width > 1);
+ TINT_ASSERT(AST, width < 5);
}
Vector::Vector(Vector&&) = default;
@@ -35,19 +32,19 @@ Vector::Vector(Vector&&) = default;
Vector::~Vector() = default;
std::string Vector::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "vec" << width;
- if (type) {
- out << "<" << type->FriendlyName(symbols) << ">";
- }
- return out.str();
+ std::ostringstream out;
+ out << "vec" << width;
+ if (type) {
+ out << "<" << type->FriendlyName(symbols) << ">";
+ }
+ return out.str();
}
const Vector* Vector::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* ty = ctx->Clone(type);
- return ctx->dst->create<Vector>(src, ty, width);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* ty = ctx->Clone(type);
+ return ctx->dst->create<Vector>(src, ty, width);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/vector.h b/chromium/third_party/dawn/src/tint/ast/vector.h
index dfa2ac24b23..6b2d9141b88 100644
--- a/chromium/third_party/dawn/src/tint/ast/vector.h
+++ b/chromium/third_party/dawn/src/tint/ast/vector.h
@@ -23,36 +23,36 @@ namespace tint::ast {
/// A vector type.
class Vector final : public Castable<Vector, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param subtype the declared type of the vector components. May be null
- /// for vector constructors, where the element type will be inferred
- /// from the constructor arguments
- /// @param width the number of elements in the vector
- Vector(ProgramID pid, Source const& src, const Type* subtype, uint32_t width);
- /// Move constructor
- Vector(Vector&&);
- ~Vector() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Vector* Clone(CloneContext* ctx) const override;
-
- /// The declared type of the vector components. May be null for vector
- /// constructors, where the element type will be inferred from the constructor
- /// arguments
- const Type* const type;
-
- /// The number of elements in the vector
- const uint32_t width;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param subtype the declared type of the vector components. May be null
+ /// for vector constructors, where the element type will be inferred
+ /// from the constructor arguments
+ /// @param width the number of elements in the vector
+ Vector(ProgramID pid, Source const& src, const Type* subtype, uint32_t width);
+ /// Move constructor
+ Vector(Vector&&);
+ ~Vector() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Vector* Clone(CloneContext* ctx) const override;
+
+ /// The declared type of the vector components. May be null for vector
+ /// constructors, where the element type will be inferred from the constructor
+ /// arguments
+ const Type* const type;
+
+ /// The number of elements in the vector
+ const uint32_t width;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/vector_test.cc b/chromium/third_party/dawn/src/tint/ast/vector_test.cc
index 19c7c4ef86d..a701852df5d 100644
--- a/chromium/third_party/dawn/src/tint/ast/vector_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/vector_test.cc
@@ -23,16 +23,16 @@ namespace {
using AstVectorTest = TestHelper;
TEST_F(AstVectorTest, Creation) {
- auto* i32 = create<I32>();
- auto* v = create<Vector>(i32, 2);
- EXPECT_EQ(v->type, i32);
- EXPECT_EQ(v->width, 2u);
+ auto* i32 = create<I32>();
+ auto* v = create<Vector>(i32, 2);
+ EXPECT_EQ(v->type, i32);
+ EXPECT_EQ(v->width, 2u);
}
TEST_F(AstVectorTest, FriendlyName) {
- auto* f32 = create<F32>();
- auto* v = create<Vector>(f32, 3);
- EXPECT_EQ(v->FriendlyName(Symbols()), "vec3<f32>");
+ auto* f32 = create<F32>();
+ auto* v = create<Vector>(f32, 3);
+ EXPECT_EQ(v->FriendlyName(Symbols()), "vec3<f32>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/ast/void.cc b/chromium/third_party/dawn/src/tint/ast/void.cc
index 34314ae5951..5cc89635a7b 100644
--- a/chromium/third_party/dawn/src/tint/ast/void.cc
+++ b/chromium/third_party/dawn/src/tint/ast/void.cc
@@ -27,12 +27,12 @@ Void::Void(Void&&) = default;
Void::~Void() = default;
std::string Void::FriendlyName(const SymbolTable&) const {
- return "void";
+ return "void";
}
const Void* Void::Clone(CloneContext* ctx) const {
- auto src = ctx->Clone(source);
- return ctx->dst->create<Void>(src);
+ auto src = ctx->Clone(source);
+ return ctx->dst->create<Void>(src);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/void.h b/chromium/third_party/dawn/src/tint/ast/void.h
index 55ddb35c72f..33f5b5bd1dd 100644
--- a/chromium/third_party/dawn/src/tint/ast/void.h
+++ b/chromium/third_party/dawn/src/tint/ast/void.h
@@ -23,24 +23,24 @@ namespace tint::ast {
/// A void type
class Void final : public Castable<Void, Type> {
- public:
- /// Constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- Void(ProgramID pid, const Source& src);
- /// Move constructor
- Void(Void&&);
- ~Void() override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// Clones this type and all transitive types using the `CloneContext` `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned type
- const Void* Clone(CloneContext* ctx) const override;
+ public:
+ /// Constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ Void(ProgramID pid, const Source& src);
+ /// Move constructor
+ Void(Void&&);
+ ~Void() override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// Clones this type and all transitive types using the `CloneContext` `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned type
+ const Void* Clone(CloneContext* ctx) const override;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.cc b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.cc
index 1f7a112a036..74ecdbe019e 100644
--- a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.cc
@@ -32,16 +32,16 @@ WorkgroupAttribute::WorkgroupAttribute(ProgramID pid,
WorkgroupAttribute::~WorkgroupAttribute() = default;
std::string WorkgroupAttribute::Name() const {
- return "workgroup_size";
+ return "workgroup_size";
}
const WorkgroupAttribute* WorkgroupAttribute::Clone(CloneContext* ctx) const {
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx->Clone(source);
- auto* x_ = ctx->Clone(x);
- auto* y_ = ctx->Clone(y);
- auto* z_ = ctx->Clone(z);
- return ctx->dst->create<WorkgroupAttribute>(src, x_, y_, z_);
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx->Clone(source);
+ auto* x_ = ctx->Clone(x);
+ auto* y_ = ctx->Clone(y);
+ auto* z_ = ctx->Clone(z);
+ return ctx->dst->create<WorkgroupAttribute>(src, x_, y_, z_);
}
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.h b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.h
index 232201b8d83..536ce155fdc 100644
--- a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.h
+++ b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute.h
@@ -28,41 +28,40 @@ class Expression;
namespace tint::ast {
/// A workgroup attribute
-class WorkgroupAttribute final
- : public Castable<WorkgroupAttribute, Attribute> {
- public:
- /// constructor
- /// @param pid the identifier of the program that owns this node
- /// @param src the source of this node
- /// @param x the workgroup x dimension expression
- /// @param y the optional workgroup y dimension expression
- /// @param z the optional workgroup z dimension expression
- WorkgroupAttribute(ProgramID pid,
- const Source& src,
- const ast::Expression* x,
- const ast::Expression* y = nullptr,
- const ast::Expression* z = nullptr);
+class WorkgroupAttribute final : public Castable<WorkgroupAttribute, Attribute> {
+ public:
+ /// constructor
+ /// @param pid the identifier of the program that owns this node
+ /// @param src the source of this node
+ /// @param x the workgroup x dimension expression
+ /// @param y the optional workgroup y dimension expression
+ /// @param z the optional workgroup z dimension expression
+ WorkgroupAttribute(ProgramID pid,
+ const Source& src,
+ const ast::Expression* x,
+ const ast::Expression* y = nullptr,
+ const ast::Expression* z = nullptr);
- ~WorkgroupAttribute() override;
+ ~WorkgroupAttribute() override;
- /// @returns the workgroup dimensions
- std::array<const ast::Expression*, 3> Values() const { return {x, y, z}; }
+ /// @returns the workgroup dimensions
+ std::array<const ast::Expression*, 3> Values() const { return {x, y, z}; }
- /// @returns the WGSL name for the attribute
- std::string Name() const override;
+ /// @returns the WGSL name for the attribute
+ std::string Name() const override;
- /// Clones this node and all transitive child nodes using the `CloneContext`
- /// `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned node
- const WorkgroupAttribute* Clone(CloneContext* ctx) const override;
+ /// Clones this node and all transitive child nodes using the `CloneContext`
+ /// `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned node
+ const WorkgroupAttribute* Clone(CloneContext* ctx) const override;
- /// The workgroup x dimension.
- const ast::Expression* const x;
- /// The optional workgroup y dimension. May be null.
- const ast::Expression* const y = nullptr;
- /// The optional workgroup z dimension. May be null.
- const ast::Expression* const z = nullptr;
+ /// The workgroup x dimension.
+ const ast::Expression* const x;
+ /// The optional workgroup y dimension. May be null.
+ const ast::Expression* const y = nullptr;
+ /// The optional workgroup z dimension. May be null.
+ const ast::Expression* const z = nullptr;
};
} // namespace tint::ast
diff --git a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute_test.cc b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute_test.cc
index 928b5fea2c4..d4fb6c7580d 100644
--- a/chromium/third_party/dawn/src/tint/ast/workgroup_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/ast/workgroup_attribute_test.cc
@@ -17,61 +17,63 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/ast/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::ast {
namespace {
using WorkgroupAttributeTest = TestHelper;
TEST_F(WorkgroupAttributeTest, Creation_1param) {
- auto* d = WorkgroupSize(2);
- auto values = d->Values();
+ auto* d = WorkgroupSize(2_i);
+ auto values = d->Values();
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
- EXPECT_EQ(values[1], nullptr);
- EXPECT_EQ(values[2], nullptr);
+ EXPECT_EQ(values[1], nullptr);
+ EXPECT_EQ(values[2], nullptr);
}
TEST_F(WorkgroupAttributeTest, Creation_2param) {
- auto* d = WorkgroupSize(2, 4);
- auto values = d->Values();
+ auto* d = WorkgroupSize(2_i, 4_i);
+ auto values = d->Values();
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 4);
- EXPECT_EQ(values[2], nullptr);
+ EXPECT_EQ(values[2], nullptr);
}
TEST_F(WorkgroupAttributeTest, Creation_3param) {
- auto* d = WorkgroupSize(2, 4, 6);
- auto values = d->Values();
+ auto* d = WorkgroupSize(2_i, 4_i, 6_i);
+ auto values = d->Values();
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 4);
- ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->ValueAsU32(), 6u);
+ ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->value, 6);
}
TEST_F(WorkgroupAttributeTest, Creation_WithIdentifier) {
- auto* d = WorkgroupSize(2, 4, "depth");
- auto values = d->Values();
+ auto* d = WorkgroupSize(2_i, 4_i, "depth");
+ auto values = d->Values();
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 4);
- auto* z_ident = As<ast::IdentifierExpression>(values[2]);
- ASSERT_TRUE(z_ident);
- EXPECT_EQ(Symbols().NameFor(z_ident->symbol), "depth");
+ auto* z_ident = As<ast::IdentifierExpression>(values[2]);
+ ASSERT_TRUE(z_ident);
+ EXPECT_EQ(Symbols().NameFor(z_ident->symbol), "depth");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/bench/benchmark.cc b/chromium/third_party/dawn/src/tint/bench/benchmark.cc
index 4fd96e2c891..c00e51f9281 100644
--- a/chromium/third_party/dawn/src/tint/bench/benchmark.cc
+++ b/chromium/third_party/dawn/src/tint/bench/benchmark.cc
@@ -31,93 +31,93 @@ std::filesystem::path kInputFileDir;
/// @returns true if we successfully read the file.
template <typename T>
std::variant<std::vector<T>, Error> ReadFile(const std::string& input_file) {
- FILE* file = nullptr;
+ FILE* file = nullptr;
#if defined(_MSC_VER)
- fopen_s(&file, input_file.c_str(), "rb");
+ fopen_s(&file, input_file.c_str(), "rb");
#else
- file = fopen(input_file.c_str(), "rb");
+ file = fopen(input_file.c_str(), "rb");
#endif
- if (!file) {
- return Error{"Failed to open " + input_file};
- }
+ if (!file) {
+ return Error{"Failed to open " + input_file};
+ }
- fseek(file, 0, SEEK_END);
- const auto file_size = static_cast<size_t>(ftell(file));
- if (0 != (file_size % sizeof(T))) {
- std::stringstream err;
- err << "File " << input_file
- << " does not contain an integral number of objects: " << file_size
- << " bytes in the file, require " << sizeof(T) << " bytes per object";
- fclose(file);
- return Error{err.str()};
- }
- fseek(file, 0, SEEK_SET);
+ fseek(file, 0, SEEK_END);
+ const auto file_size = static_cast<size_t>(ftell(file));
+ if (0 != (file_size % sizeof(T))) {
+ std::stringstream err;
+ err << "File " << input_file
+ << " does not contain an integral number of objects: " << file_size
+ << " bytes in the file, require " << sizeof(T) << " bytes per object";
+ fclose(file);
+ return Error{err.str()};
+ }
+ fseek(file, 0, SEEK_SET);
- std::vector<T> buffer;
- buffer.resize(file_size / sizeof(T));
+ std::vector<T> buffer;
+ buffer.resize(file_size / sizeof(T));
- size_t bytes_read = fread(buffer.data(), 1, file_size, file);
- fclose(file);
- if (bytes_read != file_size) {
- return Error{"Failed to read " + input_file};
- }
+ size_t bytes_read = fread(buffer.data(), 1, file_size, file);
+ fclose(file);
+ if (bytes_read != file_size) {
+ return Error{"Failed to read " + input_file};
+ }
- return buffer;
+ return buffer;
}
bool FindBenchmarkInputDir() {
- // Attempt to find the benchmark input files by searching up from the current
- // working directory.
- auto path = std::filesystem::current_path();
- while (std::filesystem::is_directory(path)) {
- auto test = path / "test" / "tint" / "benchmark";
- if (std::filesystem::is_directory(test)) {
- kInputFileDir = test;
- return true;
- }
- auto parent = path.parent_path();
- if (path == parent) {
- break;
+ // Attempt to find the benchmark input files by searching up from the current
+ // working directory.
+ auto path = std::filesystem::current_path();
+ while (std::filesystem::is_directory(path)) {
+ auto test = path / "test" / "tint" / "benchmark";
+ if (std::filesystem::is_directory(test)) {
+ kInputFileDir = test;
+ return true;
+ }
+ auto parent = path.parent_path();
+ if (path == parent) {
+ break;
+ }
+ path = parent;
}
- path = parent;
- }
- return false;
+ return false;
}
} // namespace
std::variant<tint::Source::File, Error> LoadInputFile(std::string name) {
- auto path = (kInputFileDir / name).string();
- auto data = ReadFile<uint8_t>(path);
- if (auto* buf = std::get_if<std::vector<uint8_t>>(&data)) {
- return tint::Source::File(path, std::string(buf->begin(), buf->end()));
- }
- return std::get<Error>(data);
+ auto path = (kInputFileDir / name).string();
+ auto data = ReadFile<uint8_t>(path);
+ if (auto* buf = std::get_if<std::vector<uint8_t>>(&data)) {
+ return tint::Source::File(path, std::string(buf->begin(), buf->end()));
+ }
+ return std::get<Error>(data);
}
std::variant<ProgramAndFile, Error> LoadProgram(std::string name) {
- auto res = bench::LoadInputFile(name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- return *err;
- }
- auto& file = std::get<Source::File>(res);
- auto program = reader::wgsl::Parse(&file);
- if (program.Diagnostics().contains_errors()) {
- return Error{program.Diagnostics().str()};
- }
- return ProgramAndFile{std::move(program), std::move(file)};
+ auto res = bench::LoadInputFile(name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ return *err;
+ }
+ auto& file = std::get<Source::File>(res);
+ auto program = reader::wgsl::Parse(&file);
+ if (program.Diagnostics().contains_errors()) {
+ return Error{program.Diagnostics().str()};
+ }
+ return ProgramAndFile{std::move(program), std::move(file)};
}
} // namespace tint::bench
int main(int argc, char** argv) {
- benchmark::Initialize(&argc, argv);
- if (benchmark::ReportUnrecognizedArguments(argc, argv)) {
- return 1;
- }
- if (!tint::bench::FindBenchmarkInputDir()) {
- std::cerr << "failed to locate benchmark input files" << std::endl;
- return 1;
- }
- benchmark::RunSpecifiedBenchmarks();
+ benchmark::Initialize(&argc, argv);
+ if (benchmark::ReportUnrecognizedArguments(argc, argv)) {
+ return 1;
+ }
+ if (!tint::bench::FindBenchmarkInputDir()) {
+ std::cerr << "failed to locate benchmark input files" << std::endl;
+ return 1;
+ }
+ benchmark::RunSpecifiedBenchmarks();
}
diff --git a/chromium/third_party/dawn/src/tint/bench/benchmark.h b/chromium/third_party/dawn/src/tint/bench/benchmark.h
index 96a935d5eff..733b1a7bb20 100644
--- a/chromium/third_party/dawn/src/tint/bench/benchmark.h
+++ b/chromium/third_party/dawn/src/tint/bench/benchmark.h
@@ -17,6 +17,7 @@
#include <memory>
#include <string>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
#include <variant> // NOLINT: Found C system header after C++ system header.
#include "benchmark/benchmark.h"
@@ -27,16 +28,16 @@ namespace tint::bench {
/// Error indicates an operation did not complete successfully.
struct Error {
- /// The error message.
- std::string msg;
+ /// The error message.
+ std::string msg;
};
/// ProgramAndFile holds a Program and a Source::File.
struct ProgramAndFile {
- /// The tint program parsed from file.
- Program program;
- /// The source file
- Source::File file;
+ /// The tint program parsed from file.
+ Program program;
+ /// The source file
+ Source::File file;
};
/// LoadInputFile attempts to load a benchmark input file with the given file
@@ -52,24 +53,23 @@ std::variant<Source::File, Error> LoadInputFile(std::string name);
std::variant<ProgramAndFile, Error> LoadProgram(std::string name);
/// Declares a benchmark with the given function and WGSL file name
-#define TINT_BENCHMARK_WGSL_PROGRAM(FUNC, WGSL_NAME) \
- BENCHMARK_CAPTURE(FUNC, WGSL_NAME, WGSL_NAME);
+#define TINT_BENCHMARK_WGSL_PROGRAM(FUNC, WGSL_NAME) BENCHMARK_CAPTURE(FUNC, WGSL_NAME, WGSL_NAME);
/// Declares a set of benchmarks for the given function using a list of WGSL
/// files in `<tint>/test/benchmark`.
-#define TINT_BENCHMARK_WGSL_PROGRAMS(FUNC) \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "animometer.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "bloom-vertical-blur.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "cluster-lights.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "empty.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "metaball-isosurface.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "particles.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "shadow-fragment.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-compute.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-fragment.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-vertex.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "skinned-shadowed-pbr-fragment.wgsl"); \
- TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "skinned-shadowed-pbr-vertex.wgsl");
+#define TINT_BENCHMARK_WGSL_PROGRAMS(FUNC) \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "animometer.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "bloom-vertical-blur.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "cluster-lights.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "empty.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "metaball-isosurface.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "particles.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "shadow-fragment.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-compute.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-fragment.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "simple-vertex.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "skinned-shadowed-pbr-fragment.wgsl"); \
+ TINT_BENCHMARK_WGSL_PROGRAM(FUNC, "skinned-shadowed-pbr-vertex.wgsl");
} // namespace tint::bench
diff --git a/chromium/third_party/dawn/src/tint/builtin_table.cc b/chromium/third_party/dawn/src/tint/builtin_table.cc
deleted file mode 100644
index 13e7c5feb7d..00000000000
--- a/chromium/third_party/dawn/src/tint/builtin_table.cc
+++ /dev/null
@@ -1,1169 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/builtin_table.h"
-
-#include <algorithm>
-#include <limits>
-#include <unordered_map>
-#include <utility>
-
-#include "src/tint/program_builder.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/pipeline_stage_set.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-#include "src/tint/utils/hash.h"
-#include "src/tint/utils/map.h"
-#include "src/tint/utils/math.h"
-#include "src/tint/utils/scoped_assignment.h"
-
-namespace tint {
-namespace {
-
-// Forward declarations
-struct OverloadInfo;
-class Matchers;
-class NumberMatcher;
-class TypeMatcher;
-
-/// A special type that matches all TypeMatchers
-class Any final : public Castable<Any, sem::Type> {
- public:
- Any() = default;
- ~Any() override = default;
-
- // Stub implementations for sem::Type conformance.
- size_t Hash() const override { return 0; }
- bool Equals(const sem::Type&) const override { return false; }
- std::string FriendlyName(const SymbolTable&) const override {
- return "<any>";
- }
-};
-
-/// Number is an 32 bit unsigned integer, which can be in one of three states:
-/// * Invalid - Number has not been assigned a value
-/// * Valid - a fixed integer value
-/// * Any - matches any other non-invalid number
-struct Number {
- static const Number any;
- static const Number invalid;
-
- /// Constructed as a valid number with the value v
- explicit Number(uint32_t v) : value_(v), state_(kValid) {}
-
- /// @returns the value of the number
- inline uint32_t Value() const { return value_; }
-
- /// @returns the true if the number is valid
- inline bool IsValid() const { return state_ == kValid; }
-
- /// @returns the true if the number is any
- inline bool IsAny() const { return state_ == kAny; }
-
- /// Assignment operator.
- /// The number becomes valid, with the value n
- inline Number& operator=(uint32_t n) {
- value_ = n;
- state_ = kValid;
- return *this;
- }
-
- private:
- enum State {
- kInvalid,
- kValid,
- kAny,
- };
-
- constexpr explicit Number(State state) : state_(state) {}
-
- uint32_t value_ = 0;
- State state_ = kInvalid;
-};
-
-const Number Number::any{Number::kAny};
-const Number Number::invalid{Number::kInvalid};
-
-/// ClosedState holds the state of the open / closed numbers and types.
-/// Used by the MatchState.
-class ClosedState {
- public:
- explicit ClosedState(ProgramBuilder& b) : builder(b) {}
-
- /// If the type with index `idx` is open, then it is closed with type `ty` and
- /// Type() returns true. If the type is closed, then `Type()` returns true iff
- /// it is equal to `ty`.
- bool Type(uint32_t idx, const sem::Type* ty) {
- auto res = types_.emplace(idx, ty);
- return res.second || res.first->second == ty;
- }
-
- /// If the number with index `idx` is open, then it is closed with number
- /// `number` and Num() returns true. If the number is closed, then `Num()`
- /// returns true iff it is equal to `ty`.
- bool Num(uint32_t idx, Number number) {
- auto res = numbers_.emplace(idx, number.Value());
- return res.second || res.first->second == number.Value();
- }
-
- /// Type returns the closed type with index `idx`.
- /// An ICE is raised if the type is not closed.
- const sem::Type* Type(uint32_t idx) const {
- auto it = types_.find(idx);
- if (it == types_.end()) {
- TINT_ICE(Resolver, builder.Diagnostics())
- << "type with index " << idx << " is not closed";
- return nullptr;
- }
- TINT_ASSERT(Resolver, it != types_.end());
- return it->second;
- }
-
- /// Type returns the number type with index `idx`.
- /// An ICE is raised if the number is not closed.
- Number Num(uint32_t idx) const {
- auto it = numbers_.find(idx);
- if (it == numbers_.end()) {
- TINT_ICE(Resolver, builder.Diagnostics())
- << "number with index " << idx << " is not closed";
- return Number::invalid;
- }
- return Number(it->second);
- }
-
- private:
- ProgramBuilder& builder;
- std::unordered_map<uint32_t, const sem::Type*> types_;
- std::unordered_map<uint32_t, uint32_t> numbers_;
-};
-
-/// Index type used for matcher indices
-using MatcherIndex = uint8_t;
-
-/// Index value used for open types / numbers that do not have a constraint
-constexpr MatcherIndex kNoMatcher = std::numeric_limits<MatcherIndex>::max();
-
-/// MatchState holds the state used to match an overload.
-class MatchState {
- public:
- MatchState(ProgramBuilder& b,
- ClosedState& c,
- const Matchers& m,
- const OverloadInfo& o,
- MatcherIndex const* matcher_indices)
- : builder(b),
- closed(c),
- matchers(m),
- overload(o),
- matcher_indices_(matcher_indices) {}
-
- /// The program builder
- ProgramBuilder& builder;
- /// The open / closed types and numbers
- ClosedState& closed;
- /// The type and number matchers
- Matchers const& matchers;
- /// The current overload being evaluated
- OverloadInfo const& overload;
-
- /// Type uses the next TypeMatcher from the matcher indices to match the type
- /// `ty`. If the type matches, the canonical expected type is returned. If the
- /// type `ty` does not match, then nullptr is returned.
- /// @note: The matcher indices are progressed on calling.
- const sem::Type* Type(const sem::Type* ty);
-
- /// Num uses the next NumMatcher from the matcher indices to match the number
- /// `num`. If the number matches, the canonical expected number is returned.
- /// If the number `num` does not match, then an invalid number is returned.
- /// @note: The matcher indices are progressed on calling.
- Number Num(Number num);
-
- /// @returns a string representation of the next TypeMatcher from the matcher
- /// indices.
- /// @note: The matcher indices are progressed on calling.
- std::string TypeName();
-
- /// @returns a string representation of the next NumberMatcher from the
- /// matcher indices.
- /// @note: The matcher indices are progressed on calling.
- std::string NumName();
-
- private:
- MatcherIndex const* matcher_indices_ = nullptr;
-};
-
-/// A TypeMatcher is the interface used to match an type used as part of an
-/// overload's parameter or return type.
-class TypeMatcher {
- public:
- /// Destructor
- virtual ~TypeMatcher() = default;
-
- /// Checks whether the given type matches the matcher rules, and returns the
- /// expected, canonicalized type on success.
- /// Match may close open types and numbers in state.
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- virtual const sem::Type* Match(MatchState& state,
- const sem::Type* type) const = 0;
-
- /// @return a string representation of the matcher. Used for printing error
- /// messages when no overload is found.
- virtual std::string String(MatchState& state) const = 0;
-};
-
-/// A NumberMatcher is the interface used to match a number or enumerator used
-/// as part of an overload's parameter or return type.
-class NumberMatcher {
- public:
- /// Destructor
- virtual ~NumberMatcher() = default;
-
- /// Checks whether the given number matches the matcher rules.
- /// Match may close open numbers in state.
- /// @param number the number to match
- /// @returns true if the argument type is as expected.
- virtual Number Match(MatchState& state, Number number) const = 0;
-
- /// @return a string representation of the matcher. Used for printing error
- /// messages when no overload is found.
- virtual std::string String(MatchState& state) const = 0;
-};
-
-/// OpenTypeMatcher is a Matcher for an open type.
-/// The OpenTypeMatcher will match against any type (so long as it is consistent
-/// across all uses in the overload)
-class OpenTypeMatcher : public TypeMatcher {
- public:
- /// Constructor
- explicit OpenTypeMatcher(uint32_t index) : index_(index) {}
-
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override {
- if (type->Is<Any>()) {
- return state.closed.Type(index_);
- }
- return state.closed.Type(index_, type) ? type : nullptr;
- }
-
- std::string String(MatchState& state) const override;
-
- private:
- uint32_t index_;
-};
-
-/// OpenNumberMatcher is a Matcher for an open number.
-/// The OpenNumberMatcher will match against any number (so long as it is
-/// consistent for the overload)
-class OpenNumberMatcher : public NumberMatcher {
- public:
- explicit OpenNumberMatcher(uint32_t index) : index_(index) {}
-
- Number Match(MatchState& state, Number number) const override {
- if (number.IsAny()) {
- return state.closed.Num(index_);
- }
- return state.closed.Num(index_, number) ? number : Number::invalid;
- }
-
- std::string String(MatchState& state) const override;
-
- private:
- uint32_t index_;
-};
-
-////////////////////////////////////////////////////////////////////////////////
-// Binding functions for use in the generated builtin_table.inl
-// TODO(bclayton): See if we can move more of this hand-rolled code to the
-// template
-////////////////////////////////////////////////////////////////////////////////
-using TexelFormat = ast::TexelFormat;
-using Access = ast::Access;
-using StorageClass = ast::StorageClass;
-using ParameterUsage = sem::ParameterUsage;
-using PipelineStageSet = sem::PipelineStageSet;
-using PipelineStage = ast::PipelineStage;
-
-bool match_bool(const sem::Type* ty) {
- return ty->IsAnyOf<Any, sem::Bool>();
-}
-
-const sem::Bool* build_bool(MatchState& state) {
- return state.builder.create<sem::Bool>();
-}
-
-bool match_f32(const sem::Type* ty) {
- return ty->IsAnyOf<Any, sem::F32>();
-}
-
-const sem::I32* build_i32(MatchState& state) {
- return state.builder.create<sem::I32>();
-}
-
-bool match_i32(const sem::Type* ty) {
- return ty->IsAnyOf<Any, sem::I32>();
-}
-
-const sem::U32* build_u32(MatchState& state) {
- return state.builder.create<sem::U32>();
-}
-
-bool match_u32(const sem::Type* ty) {
- return ty->IsAnyOf<Any, sem::U32>();
-}
-
-const sem::F32* build_f32(MatchState& state) {
- return state.builder.create<sem::F32>();
-}
-
-bool match_vec(const sem::Type* ty, Number& N, const sem::Type*& T) {
- if (ty->Is<Any>()) {
- N = Number::any;
- T = ty;
- return true;
- }
-
- if (auto* v = ty->As<sem::Vector>()) {
- N = v->Width();
- T = v->type();
- return true;
- }
- return false;
-}
-
-const sem::Vector* build_vec(MatchState& state, Number N, const sem::Type* el) {
- return state.builder.create<sem::Vector>(el, N.Value());
-}
-
-template <int N>
-bool match_vec(const sem::Type* ty, const sem::Type*& T) {
- if (ty->Is<Any>()) {
- T = ty;
- return true;
- }
-
- if (auto* v = ty->As<sem::Vector>()) {
- if (v->Width() == N) {
- T = v->type();
- return true;
- }
- }
- return false;
-}
-
-bool match_vec2(const sem::Type* ty, const sem::Type*& T) {
- return match_vec<2>(ty, T);
-}
-
-const sem::Vector* build_vec2(MatchState& state, const sem::Type* T) {
- return build_vec(state, Number(2), T);
-}
-
-bool match_vec3(const sem::Type* ty, const sem::Type*& T) {
- return match_vec<3>(ty, T);
-}
-
-const sem::Vector* build_vec3(MatchState& state, const sem::Type* T) {
- return build_vec(state, Number(3), T);
-}
-
-bool match_vec4(const sem::Type* ty, const sem::Type*& T) {
- return match_vec<4>(ty, T);
-}
-
-const sem::Vector* build_vec4(MatchState& state, const sem::Type* T) {
- return build_vec(state, Number(4), T);
-}
-
-bool match_mat(const sem::Type* ty, Number& M, Number& N, const sem::Type*& T) {
- if (ty->Is<Any>()) {
- M = Number::any;
- N = Number::any;
- T = ty;
- return true;
- }
- if (auto* m = ty->As<sem::Matrix>()) {
- M = m->columns();
- N = m->ColumnType()->Width();
- T = m->type();
- return true;
- }
- return false;
-}
-
-const sem::Matrix* build_mat(MatchState& state,
- Number N,
- Number M,
- const sem::Type* T) {
- auto* column_type = state.builder.create<sem::Vector>(T, M.Value());
- return state.builder.create<sem::Matrix>(column_type, N.Value());
-}
-
-bool match_array(const sem::Type* ty, const sem::Type*& T) {
- if (ty->Is<Any>()) {
- T = ty;
- return true;
- }
-
- if (auto* a = ty->As<sem::Array>()) {
- if (a->Count() == 0) {
- T = a->ElemType();
- return true;
- }
- }
- return false;
-}
-
-const sem::Array* build_array(MatchState& state, const sem::Type* el) {
- return state.builder.create<sem::Array>(el,
- /* count */ 0u,
- /* align */ 0u,
- /* size */ 0u,
- /* stride */ 0u,
- /* stride_implicit */ 0u);
-}
-
-bool match_ptr(const sem::Type* ty, Number& S, const sem::Type*& T, Number& A) {
- if (ty->Is<Any>()) {
- S = Number::any;
- T = ty;
- A = Number::any;
- return true;
- }
-
- if (auto* p = ty->As<sem::Pointer>()) {
- S = Number(static_cast<uint32_t>(p->StorageClass()));
- T = p->StoreType();
- A = Number(static_cast<uint32_t>(p->Access()));
- return true;
- }
- return false;
-}
-
-const sem::Pointer* build_ptr(MatchState& state,
- Number S,
- const sem::Type* T,
- Number& A) {
- return state.builder.create<sem::Pointer>(
- T, static_cast<ast::StorageClass>(S.Value()),
- static_cast<ast::Access>(A.Value()));
-}
-
-bool match_atomic(const sem::Type* ty, const sem::Type*& T) {
- if (ty->Is<Any>()) {
- T = ty;
- return true;
- }
-
- if (auto* a = ty->As<sem::Atomic>()) {
- T = a->Type();
- return true;
- }
- return false;
-}
-
-const sem::Atomic* build_atomic(MatchState& state, const sem::Type* T) {
- return state.builder.create<sem::Atomic>(T);
-}
-
-bool match_sampler(const sem::Type* ty) {
- if (ty->Is<Any>()) {
- return true;
- }
- return ty->Is([](const sem::Sampler* s) {
- return s->kind() == ast::SamplerKind::kSampler;
- });
-}
-
-const sem::Sampler* build_sampler(MatchState& state) {
- return state.builder.create<sem::Sampler>(ast::SamplerKind::kSampler);
-}
-
-bool match_sampler_comparison(const sem::Type* ty) {
- if (ty->Is<Any>()) {
- return true;
- }
- return ty->Is([](const sem::Sampler* s) {
- return s->kind() == ast::SamplerKind::kComparisonSampler;
- });
-}
-
-const sem::Sampler* build_sampler_comparison(MatchState& state) {
- return state.builder.create<sem::Sampler>(
- ast::SamplerKind::kComparisonSampler);
-}
-
-bool match_texture(const sem::Type* ty,
- ast::TextureDimension dim,
- const sem::Type*& T) {
- if (ty->Is<Any>()) {
- T = ty;
- return true;
- }
- if (auto* v = ty->As<sem::SampledTexture>()) {
- if (v->dim() == dim) {
- T = v->type();
- return true;
- }
- }
- return false;
-}
-
-#define JOIN(a, b) a##b
-
-#define DECLARE_SAMPLED_TEXTURE(suffix, dim) \
- bool JOIN(match_texture_, suffix)(const sem::Type* ty, \
- const sem::Type*& T) { \
- return match_texture(ty, dim, T); \
- } \
- const sem::SampledTexture* JOIN(build_texture_, suffix)( \
- MatchState & state, const sem::Type* T) { \
- return state.builder.create<sem::SampledTexture>(dim, T); \
- }
-
-DECLARE_SAMPLED_TEXTURE(1d, ast::TextureDimension::k1d)
-DECLARE_SAMPLED_TEXTURE(2d, ast::TextureDimension::k2d)
-DECLARE_SAMPLED_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
-DECLARE_SAMPLED_TEXTURE(3d, ast::TextureDimension::k3d)
-DECLARE_SAMPLED_TEXTURE(cube, ast::TextureDimension::kCube)
-DECLARE_SAMPLED_TEXTURE(cube_array, ast::TextureDimension::kCubeArray)
-#undef DECLARE_SAMPLED_TEXTURE
-
-bool match_texture_multisampled(const sem::Type* ty,
- ast::TextureDimension dim,
- const sem::Type*& T) {
- if (ty->Is<Any>()) {
- T = ty;
- return true;
- }
- if (auto* v = ty->As<sem::MultisampledTexture>()) {
- if (v->dim() == dim) {
- T = v->type();
- return true;
- }
- }
- return false;
-}
-
-#define DECLARE_MULTISAMPLED_TEXTURE(suffix, dim) \
- bool JOIN(match_texture_multisampled_, suffix)(const sem::Type* ty, \
- const sem::Type*& T) { \
- return match_texture_multisampled(ty, dim, T); \
- } \
- const sem::MultisampledTexture* JOIN(build_texture_multisampled_, suffix)( \
- MatchState & state, const sem::Type* T) { \
- return state.builder.create<sem::MultisampledTexture>(dim, T); \
- }
-
-DECLARE_MULTISAMPLED_TEXTURE(2d, ast::TextureDimension::k2d)
-#undef DECLARE_MULTISAMPLED_TEXTURE
-
-bool match_texture_depth(const sem::Type* ty, ast::TextureDimension dim) {
- if (ty->Is<Any>()) {
- return true;
- }
- return ty->Is([&](const sem::DepthTexture* t) { return t->dim() == dim; });
-}
-
-#define DECLARE_DEPTH_TEXTURE(suffix, dim) \
- bool JOIN(match_texture_depth_, suffix)(const sem::Type* ty) { \
- return match_texture_depth(ty, dim); \
- } \
- const sem::DepthTexture* JOIN(build_texture_depth_, \
- suffix)(MatchState & state) { \
- return state.builder.create<sem::DepthTexture>(dim); \
- }
-
-DECLARE_DEPTH_TEXTURE(2d, ast::TextureDimension::k2d)
-DECLARE_DEPTH_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
-DECLARE_DEPTH_TEXTURE(cube, ast::TextureDimension::kCube)
-DECLARE_DEPTH_TEXTURE(cube_array, ast::TextureDimension::kCubeArray)
-#undef DECLARE_DEPTH_TEXTURE
-
-bool match_texture_depth_multisampled_2d(const sem::Type* ty) {
- if (ty->Is<Any>()) {
- return true;
- }
- return ty->Is([&](const sem::DepthMultisampledTexture* t) {
- return t->dim() == ast::TextureDimension::k2d;
- });
-}
-
-sem::DepthMultisampledTexture* build_texture_depth_multisampled_2d(
- MatchState& state) {
- return state.builder.create<sem::DepthMultisampledTexture>(
- ast::TextureDimension::k2d);
-}
-
-bool match_texture_storage(const sem::Type* ty,
- ast::TextureDimension dim,
- Number& F,
- Number& A) {
- if (ty->Is<Any>()) {
- F = Number::any;
- A = Number::any;
- return true;
- }
- if (auto* v = ty->As<sem::StorageTexture>()) {
- if (v->dim() == dim) {
- F = Number(static_cast<uint32_t>(v->texel_format()));
- A = Number(static_cast<uint32_t>(v->access()));
- return true;
- }
- }
- return false;
-}
-
-#define DECLARE_STORAGE_TEXTURE(suffix, dim) \
- bool JOIN(match_texture_storage_, suffix)(const sem::Type* ty, Number& F, \
- Number& A) { \
- return match_texture_storage(ty, dim, F, A); \
- } \
- const sem::StorageTexture* JOIN(build_texture_storage_, suffix)( \
- MatchState & state, Number F, Number A) { \
- auto format = static_cast<TexelFormat>(F.Value()); \
- auto access = static_cast<Access>(A.Value()); \
- auto* T = sem::StorageTexture::SubtypeFor(format, state.builder.Types()); \
- return state.builder.create<sem::StorageTexture>(dim, format, access, T); \
- }
-
-DECLARE_STORAGE_TEXTURE(1d, ast::TextureDimension::k1d)
-DECLARE_STORAGE_TEXTURE(2d, ast::TextureDimension::k2d)
-DECLARE_STORAGE_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
-DECLARE_STORAGE_TEXTURE(3d, ast::TextureDimension::k3d)
-#undef DECLARE_STORAGE_TEXTURE
-
-bool match_texture_external(const sem::Type* ty) {
- return ty->IsAnyOf<Any, sem::ExternalTexture>();
-}
-
-const sem::ExternalTexture* build_texture_external(MatchState& state) {
- return state.builder.create<sem::ExternalTexture>();
-}
-
-// Builtin types starting with a _ prefix cannot be declared in WGSL, so they
-// can only be used as return types. Because of this, they must only match Any,
-// which is used as the return type matcher.
-bool match_modf_result(const sem::Type* ty) {
- return ty->Is<Any>();
-}
-bool match_modf_result_vec(const sem::Type* ty, Number& N) {
- if (!ty->Is<Any>()) {
- return false;
- }
- N = Number::any;
- return true;
-}
-bool match_frexp_result(const sem::Type* ty) {
- return ty->Is<Any>();
-}
-bool match_frexp_result_vec(const sem::Type* ty, Number& N) {
- if (!ty->Is<Any>()) {
- return false;
- }
- N = Number::any;
- return true;
-}
-
-struct NameAndType {
- std::string name;
- sem::Type* type;
-};
-const sem::Struct* build_struct(
- MatchState& state,
- std::string name,
- std::initializer_list<NameAndType> member_names_and_types) {
- uint32_t offset = 0;
- uint32_t max_align = 0;
- sem::StructMemberList members;
- for (auto& m : member_names_and_types) {
- uint32_t align = m.type->Align();
- uint32_t size = m.type->Size();
- offset = utils::RoundUp(align, offset);
- max_align = std::max(max_align, align);
- members.emplace_back(state.builder.create<sem::StructMember>(
- /* declaration */ nullptr,
- /* name */ state.builder.Sym(m.name),
- /* type */ m.type,
- /* index */ static_cast<uint32_t>(members.size()),
- /* offset */ offset,
- /* align */ align,
- /* size */ size));
- offset += size;
- }
- uint32_t size_without_padding = offset;
- uint32_t size_with_padding = utils::RoundUp(max_align, offset);
- return state.builder.create<sem::Struct>(
- /* declaration */ nullptr,
- /* name */ state.builder.Sym(name),
- /* members */ members,
- /* align */ max_align,
- /* size */ size_with_padding,
- /* size_no_padding */ size_without_padding);
-}
-
-const sem::Struct* build_modf_result(MatchState& state) {
- auto* f32 = state.builder.create<sem::F32>();
- return build_struct(state, "__modf_result", {{"fract", f32}, {"whole", f32}});
-}
-const sem::Struct* build_modf_result_vec(MatchState& state, Number& n) {
- auto* vec_f32 = state.builder.create<sem::Vector>(
- state.builder.create<sem::F32>(), n.Value());
- return build_struct(state, "__modf_result_vec" + std::to_string(n.Value()),
- {{"fract", vec_f32}, {"whole", vec_f32}});
-}
-const sem::Struct* build_frexp_result(MatchState& state) {
- auto* f32 = state.builder.create<sem::F32>();
- auto* i32 = state.builder.create<sem::I32>();
- return build_struct(state, "__frexp_result", {{"sig", f32}, {"exp", i32}});
-}
-const sem::Struct* build_frexp_result_vec(MatchState& state, Number& n) {
- auto* vec_f32 = state.builder.create<sem::Vector>(
- state.builder.create<sem::F32>(), n.Value());
- auto* vec_i32 = state.builder.create<sem::Vector>(
- state.builder.create<sem::I32>(), n.Value());
- return build_struct(state, "__frexp_result_vec" + std::to_string(n.Value()),
- {{"sig", vec_f32}, {"exp", vec_i32}});
-}
-
-/// ParameterInfo describes a parameter
-struct ParameterInfo {
- /// The parameter usage (parameter name in definition file)
- const ParameterUsage usage;
-
- /// Pointer to a list of indices that are used to match the parameter type.
- /// The matcher indices index on Matchers::type and / or Matchers::number.
- /// These indices are consumed by the matchers themselves.
- /// The first index is always a TypeMatcher.
- MatcherIndex const* const matcher_indices;
-};
-
-/// OpenTypeInfo describes an open type
-struct OpenTypeInfo {
- /// Name of the open type (e.g. 'T')
- const char* name;
- /// Optional type matcher constraint.
- /// Either an index in Matchers::type, or kNoMatcher
- const MatcherIndex matcher_index;
-};
-
-/// OpenNumberInfo describes an open number
-struct OpenNumberInfo {
- /// Name of the open number (e.g. 'N')
- const char* name;
- /// Optional number matcher constraint.
- /// Either an index in Matchers::number, or kNoMatcher
- const MatcherIndex matcher_index;
-};
-
-/// OverloadInfo describes a single function overload
-struct OverloadInfo {
- /// Total number of parameters for the overload
- const uint8_t num_parameters;
- /// Total number of open types for the overload
- const uint8_t num_open_types;
- /// Total number of open numbers for the overload
- const uint8_t num_open_numbers;
- /// Pointer to the first open type
- OpenTypeInfo const* const open_types;
- /// Pointer to the first open number
- OpenNumberInfo const* const open_numbers;
- /// Pointer to the first parameter
- ParameterInfo const* const parameters;
- /// Pointer to a list of matcher indices that index on Matchers::type and
- /// Matchers::number, used to build the return type. If the function has no
- /// return type then this is null
- MatcherIndex const* const return_matcher_indices;
- /// The pipeline stages that this overload can be used in
- PipelineStageSet supported_stages;
- /// True if the overload is marked as deprecated
- bool is_deprecated;
-};
-
-/// BuiltinInfo describes a builtin function
-struct BuiltinInfo {
- /// Number of overloads of the builtin function
- const uint8_t num_overloads;
- /// Pointer to the start of the overloads for the function
- OverloadInfo const* const overloads;
-};
-
-#include "builtin_table.inl"
-
-/// BuiltinPrototype describes a fully matched builtin function, which is
-/// used as a lookup for building unique sem::Builtin instances.
-struct BuiltinPrototype {
- /// Parameter describes a single parameter
- struct Parameter {
- /// Parameter type
- const sem::Type* const type;
- /// Parameter usage
- ParameterUsage const usage = ParameterUsage::kNone;
- };
-
- /// Hasher provides a hash function for the BuiltinPrototype
- struct Hasher {
- /// @param i the BuiltinPrototype to create a hash for
- /// @return the hash value
- inline std::size_t operator()(const BuiltinPrototype& i) const {
- size_t hash = utils::Hash(i.parameters.size());
- for (auto& p : i.parameters) {
- utils::HashCombine(&hash, p.type, p.usage);
- }
- return utils::Hash(hash, i.type, i.return_type, i.supported_stages,
- i.is_deprecated);
- }
- };
-
- sem::BuiltinType type = sem::BuiltinType::kNone;
- std::vector<Parameter> parameters;
- sem::Type const* return_type = nullptr;
- PipelineStageSet supported_stages;
- bool is_deprecated = false;
-};
-
-/// Equality operator for BuiltinPrototype
-bool operator==(const BuiltinPrototype& a, const BuiltinPrototype& b) {
- if (a.type != b.type || a.supported_stages != b.supported_stages ||
- a.return_type != b.return_type || a.is_deprecated != b.is_deprecated ||
- a.parameters.size() != b.parameters.size()) {
- return false;
- }
- for (size_t i = 0; i < a.parameters.size(); i++) {
- auto& pa = a.parameters[i];
- auto& pb = b.parameters[i];
- if (pa.type != pb.type || pa.usage != pb.usage) {
- return false;
- }
- }
- return true;
-}
-
-/// Impl is the private implementation of the BuiltinTable interface.
-class Impl : public BuiltinTable {
- public:
- explicit Impl(ProgramBuilder& builder);
-
- const sem::Builtin* Lookup(sem::BuiltinType builtin_type,
- const std::vector<const sem::Type*>& args,
- const Source& source) override;
-
- private:
- const sem::Builtin* Match(sem::BuiltinType builtin_type,
- const OverloadInfo& overload,
- const std::vector<const sem::Type*>& args,
- int& match_score);
-
- MatchState Match(ClosedState& closed,
- const OverloadInfo& overload,
- MatcherIndex const* matcher_indices) const;
-
- void PrintOverload(std::ostream& ss,
- const OverloadInfo& overload,
- sem::BuiltinType builtin_type) const;
-
- ProgramBuilder& builder;
- Matchers matchers;
- std::unordered_map<BuiltinPrototype, sem::Builtin*, BuiltinPrototype::Hasher>
- builtins;
-};
-
-/// @return a string representing a call to a builtin with the given argument
-/// types.
-std::string CallSignature(ProgramBuilder& builder,
- sem::BuiltinType builtin_type,
- const std::vector<const sem::Type*>& args) {
- std::stringstream ss;
- ss << sem::str(builtin_type) << "(";
- {
- bool first = true;
- for (auto* arg : args) {
- if (!first) {
- ss << ", ";
- }
- first = false;
- ss << arg->UnwrapRef()->FriendlyName(builder.Symbols());
- }
- }
- ss << ")";
-
- return ss.str();
-}
-
-std::string OpenTypeMatcher::String(MatchState& state) const {
- return state.overload.open_types[index_].name;
-}
-
-std::string OpenNumberMatcher::String(MatchState& state) const {
- return state.overload.open_numbers[index_].name;
-}
-
-Impl::Impl(ProgramBuilder& b) : builder(b) {}
-
-const sem::Builtin* Impl::Lookup(sem::BuiltinType builtin_type,
- const std::vector<const sem::Type*>& args,
- const Source& source) {
- // Candidate holds information about a mismatched overload that could be what
- // the user intended to call.
- struct Candidate {
- const OverloadInfo* overload;
- int score;
- };
-
- // The list of failed matches that had promise.
- std::vector<Candidate> candidates;
-
- auto& builtin = kBuiltins[static_cast<uint32_t>(builtin_type)];
- for (uint32_t o = 0; o < builtin.num_overloads; o++) {
- int match_score = 1000;
- auto& overload = builtin.overloads[o];
- if (auto* match = Match(builtin_type, overload, args, match_score)) {
- return match;
- }
- if (match_score > 0) {
- candidates.emplace_back(Candidate{&overload, match_score});
- }
- }
-
- // Sort the candidates with the most promising first
- std::stable_sort(
- candidates.begin(), candidates.end(),
- [](const Candidate& a, const Candidate& b) { return a.score > b.score; });
-
- // Generate an error message
- std::stringstream ss;
- ss << "no matching call to " << CallSignature(builder, builtin_type, args)
- << std::endl;
- if (!candidates.empty()) {
- ss << std::endl;
- ss << candidates.size() << " candidate function"
- << (candidates.size() > 1 ? "s:" : ":") << std::endl;
- for (auto& candidate : candidates) {
- ss << " ";
- PrintOverload(ss, *candidate.overload, builtin_type);
- ss << std::endl;
- }
- }
- builder.Diagnostics().add_error(diag::System::Resolver, ss.str(), source);
- return nullptr;
-}
-
-const sem::Builtin* Impl::Match(sem::BuiltinType builtin_type,
- const OverloadInfo& overload,
- const std::vector<const sem::Type*>& args,
- int& match_score) {
- // Score wait for argument <-> parameter count matches / mismatches
- constexpr int kScorePerParamArgMismatch = -1;
- constexpr int kScorePerMatchedParam = 2;
- constexpr int kScorePerMatchedOpenType = 1;
- constexpr int kScorePerMatchedOpenNumber = 1;
-
- auto num_parameters = overload.num_parameters;
- auto num_arguments = static_cast<decltype(num_parameters)>(args.size());
-
- bool overload_matched = true;
-
- if (num_parameters != num_arguments) {
- match_score +=
- kScorePerParamArgMismatch * (std::max(num_parameters, num_arguments) -
- std::min(num_parameters, num_arguments));
- overload_matched = false;
- }
-
- ClosedState closed(builder);
-
- std::vector<BuiltinPrototype::Parameter> parameters;
-
- auto num_params = std::min(num_parameters, num_arguments);
- for (uint32_t p = 0; p < num_params; p++) {
- auto& parameter = overload.parameters[p];
- auto* indices = parameter.matcher_indices;
- auto* type = Match(closed, overload, indices).Type(args[p]->UnwrapRef());
- if (type) {
- parameters.emplace_back(
- BuiltinPrototype::Parameter{type, parameter.usage});
- match_score += kScorePerMatchedParam;
- } else {
- overload_matched = false;
- }
- }
-
- if (overload_matched) {
- // Check all constrained open types matched
- for (uint32_t ot = 0; ot < overload.num_open_types; ot++) {
- auto& open_type = overload.open_types[ot];
- if (open_type.matcher_index != kNoMatcher) {
- auto* index = &open_type.matcher_index;
- if (Match(closed, overload, index).Type(closed.Type(ot))) {
- match_score += kScorePerMatchedOpenType;
- } else {
- overload_matched = false;
- }
- }
- }
- }
-
- if (overload_matched) {
- // Check all constrained open numbers matched
- for (uint32_t on = 0; on < overload.num_open_numbers; on++) {
- auto& open_number = overload.open_numbers[on];
- if (open_number.matcher_index != kNoMatcher) {
- auto* index = &open_number.matcher_index;
- if (Match(closed, overload, index).Num(closed.Num(on)).IsValid()) {
- match_score += kScorePerMatchedOpenNumber;
- } else {
- overload_matched = false;
- }
- }
- }
- }
-
- if (!overload_matched) {
- return nullptr;
- }
-
- // Build the return type
- const sem::Type* return_type = nullptr;
- if (auto* indices = overload.return_matcher_indices) {
- Any any;
- return_type = Match(closed, overload, indices).Type(&any);
- if (!return_type) {
- std::stringstream ss;
- PrintOverload(ss, overload, builtin_type);
- TINT_ICE(Resolver, builder.Diagnostics())
- << "MatchState.Match() returned null for " << ss.str();
- return nullptr;
- }
- } else {
- return_type = builder.create<sem::Void>();
- }
-
- BuiltinPrototype builtin;
- builtin.type = builtin_type;
- builtin.return_type = return_type;
- builtin.parameters = std::move(parameters);
- builtin.supported_stages = overload.supported_stages;
- builtin.is_deprecated = overload.is_deprecated;
-
- // De-duplicate builtins that are identical.
- return utils::GetOrCreate(builtins, builtin, [&] {
- std::vector<sem::Parameter*> params;
- params.reserve(builtin.parameters.size());
- for (auto& p : builtin.parameters) {
- params.emplace_back(builder.create<sem::Parameter>(
- nullptr, static_cast<uint32_t>(params.size()), p.type,
- ast::StorageClass::kNone, ast::Access::kUndefined, p.usage));
- }
- return builder.create<sem::Builtin>(
- builtin.type, builtin.return_type, std::move(params),
- builtin.supported_stages, builtin.is_deprecated);
- });
-}
-
-MatchState Impl::Match(ClosedState& closed,
- const OverloadInfo& overload,
- MatcherIndex const* matcher_indices) const {
- return MatchState(builder, closed, matchers, overload, matcher_indices);
-}
-
-void Impl::PrintOverload(std::ostream& ss,
- const OverloadInfo& overload,
- sem::BuiltinType builtin_type) const {
- ClosedState closed(builder);
-
- ss << builtin_type << "(";
- for (uint32_t p = 0; p < overload.num_parameters; p++) {
- auto& parameter = overload.parameters[p];
- if (p > 0) {
- ss << ", ";
- }
- if (parameter.usage != ParameterUsage::kNone) {
- ss << sem::str(parameter.usage) << ": ";
- }
- auto* indices = parameter.matcher_indices;
- ss << Match(closed, overload, indices).TypeName();
- }
- ss << ")";
- if (overload.return_matcher_indices) {
- ss << " -> ";
- auto* indices = overload.return_matcher_indices;
- ss << Match(closed, overload, indices).TypeName();
- }
-
- bool first = true;
- auto separator = [&] {
- ss << (first ? " where: " : ", ");
- first = false;
- };
- for (uint32_t i = 0; i < overload.num_open_types; i++) {
- auto& open_type = overload.open_types[i];
- if (open_type.matcher_index != kNoMatcher) {
- separator();
- ss << open_type.name;
- auto* index = &open_type.matcher_index;
- ss << " is " << Match(closed, overload, index).TypeName();
- }
- }
- for (uint32_t i = 0; i < overload.num_open_numbers; i++) {
- auto& open_number = overload.open_numbers[i];
- if (open_number.matcher_index != kNoMatcher) {
- separator();
- ss << open_number.name;
- auto* index = &open_number.matcher_index;
- ss << " is " << Match(closed, overload, index).NumName();
- }
- }
-}
-
-const sem::Type* MatchState::Type(const sem::Type* ty) {
- MatcherIndex matcher_index = *matcher_indices_++;
- auto* matcher = matchers.type[matcher_index];
- return matcher->Match(*this, ty);
-}
-
-Number MatchState::Num(Number number) {
- MatcherIndex matcher_index = *matcher_indices_++;
- auto* matcher = matchers.number[matcher_index];
- return matcher->Match(*this, number);
-}
-
-std::string MatchState::TypeName() {
- MatcherIndex matcher_index = *matcher_indices_++;
- auto* matcher = matchers.type[matcher_index];
- return matcher->String(*this);
-}
-
-std::string MatchState::NumName() {
- MatcherIndex matcher_index = *matcher_indices_++;
- auto* matcher = matchers.number[matcher_index];
- return matcher->String(*this);
-}
-
-} // namespace
-
-std::unique_ptr<BuiltinTable> BuiltinTable::Create(ProgramBuilder& builder) {
- return std::make_unique<Impl>(builder);
-}
-
-BuiltinTable::~BuiltinTable() = default;
-
-/// TypeInfo for the Any type declared in the anonymous namespace above
-TINT_INSTANTIATE_TYPEINFO(Any);
-
-} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/builtin_table.h b/chromium/third_party/dawn/src/tint/builtin_table.h
deleted file mode 100644
index 4b1b5dc9b49..00000000000
--- a/chromium/third_party/dawn/src/tint/builtin_table.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_BUILTIN_TABLE_H_
-#define SRC_TINT_BUILTIN_TABLE_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "src/tint/sem/builtin.h"
-
-// Forward declarations
-namespace tint {
-class ProgramBuilder;
-} // namespace tint
-
-namespace tint {
-
-/// BuiltinTable is a lookup table of all the WGSL builtin functions
-class BuiltinTable {
- public:
- /// @param builder the program builder
- /// @return a pointer to a newly created BuiltinTable
- static std::unique_ptr<BuiltinTable> Create(ProgramBuilder& builder);
-
- /// Destructor
- virtual ~BuiltinTable();
-
- /// Lookup looks for the builtin overload with the given signature, raising
- /// an error diagnostic if the builtin was not found.
- /// @param type the builtin type
- /// @param args the argument types passed to the builtin function
- /// @param source the source of the builtin call
- /// @return the semantic builtin if found, otherwise nullptr
- virtual const sem::Builtin* Lookup(sem::BuiltinType type,
- const std::vector<const sem::Type*>& args,
- const Source& source) = 0;
-};
-
-} // namespace tint
-
-#endif // SRC_TINT_BUILTIN_TABLE_H_
diff --git a/chromium/third_party/dawn/src/tint/builtin_table.inl b/chromium/third_party/dawn/src/tint/builtin_table.inl
deleted file mode 100644
index fa483597283..00000000000
--- a/chromium/third_party/dawn/src/tint/builtin_table.inl
+++ /dev/null
@@ -1,9638 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
-// using the template:
-// src/tint/builtin_table.inl.tmpl
-// and the builtin defintion file:
-// src/tint/builtins.def
-//
-// Do not modify this file directly
-////////////////////////////////////////////////////////////////////////////////
-
-// clang-format off
-
-/// TypeMatcher for 'type bool'
-/// @see src/tint/builtins.def:68:6
-class Bool : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Bool::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_bool(ty)) {
- return nullptr;
- }
- return build_bool(state);
-}
-
-std::string Bool::String(MatchState&) const {
- return "bool";
-}
-
-/// TypeMatcher for 'type f32'
-/// @see src/tint/builtins.def:69:6
-class F32 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* F32::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_f32(ty)) {
- return nullptr;
- }
- return build_f32(state);
-}
-
-std::string F32::String(MatchState&) const {
- return "f32";
-}
-
-/// TypeMatcher for 'type i32'
-/// @see src/tint/builtins.def:70:6
-class I32 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* I32::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_i32(ty)) {
- return nullptr;
- }
- return build_i32(state);
-}
-
-std::string I32::String(MatchState&) const {
- return "i32";
-}
-
-/// TypeMatcher for 'type u32'
-/// @see src/tint/builtins.def:71:6
-class U32 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* U32::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_u32(ty)) {
- return nullptr;
- }
- return build_u32(state);
-}
-
-std::string U32::String(MatchState&) const {
- return "u32";
-}
-
-/// TypeMatcher for 'type vec2'
-/// @see src/tint/builtins.def:72:6
-class Vec2 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Vec2::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_vec2(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_vec2(state, T);
-}
-
-std::string Vec2::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "vec2<" + T + ">";
-}
-
-/// TypeMatcher for 'type vec3'
-/// @see src/tint/builtins.def:73:6
-class Vec3 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Vec3::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_vec3(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_vec3(state, T);
-}
-
-std::string Vec3::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "vec3<" + T + ">";
-}
-
-/// TypeMatcher for 'type vec4'
-/// @see src/tint/builtins.def:74:6
-class Vec4 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Vec4::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_vec4(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_vec4(state, T);
-}
-
-std::string Vec4::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "vec4<" + T + ">";
-}
-
-/// TypeMatcher for 'type vec'
-/// @see src/tint/builtins.def:75:37
-class Vec : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Vec::Match(MatchState& state, const sem::Type* ty) const {
- Number N = Number::invalid;
- const sem::Type* T = nullptr;
- if (!match_vec(ty, N, T)) {
- return nullptr;
- }
- N = state.Num(N);
- if (!N.IsValid()) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_vec(state, N, T);
-}
-
-std::string Vec::String(MatchState& state) const {
- const std::string N = state.NumName();
- const std::string T = state.TypeName();
- std::stringstream ss;
- ss << "vec" << N << "<" << T << ">";
- return ss.str();
-}
-
-/// TypeMatcher for 'type mat'
-/// @see src/tint/builtins.def:76:37
-class Mat : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Mat::Match(MatchState& state, const sem::Type* ty) const {
- Number N = Number::invalid;
- Number M = Number::invalid;
- const sem::Type* T = nullptr;
- if (!match_mat(ty, N, M, T)) {
- return nullptr;
- }
- N = state.Num(N);
- if (!N.IsValid()) {
- return nullptr;
- }
- M = state.Num(M);
- if (!M.IsValid()) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_mat(state, N, M, T);
-}
-
-std::string Mat::String(MatchState& state) const {
- const std::string N = state.NumName();
- const std::string M = state.NumName();
- const std::string T = state.TypeName();
- std::stringstream ss;
- ss << "mat" << N << "x" << M << "<" << T << ">";
- return ss.str();
-}
-
-/// TypeMatcher for 'type ptr'
-/// @see src/tint/builtins.def:77:6
-class Ptr : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Ptr::Match(MatchState& state, const sem::Type* ty) const {
- Number S = Number::invalid;
- const sem::Type* T = nullptr;
- Number A = Number::invalid;
- if (!match_ptr(ty, S, T, A)) {
- return nullptr;
- }
- S = state.Num(S);
- if (!S.IsValid()) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- A = state.Num(A);
- if (!A.IsValid()) {
- return nullptr;
- }
- return build_ptr(state, S, T, A);
-}
-
-std::string Ptr::String(MatchState& state) const {
- const std::string S = state.NumName();
- const std::string T = state.TypeName();
- const std::string A = state.NumName();
- return "ptr<" + S + ", " + T + ", " + A + ">";
-}
-
-/// TypeMatcher for 'type atomic'
-/// @see src/tint/builtins.def:78:6
-class Atomic : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Atomic::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_atomic(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_atomic(state, T);
-}
-
-std::string Atomic::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "atomic<" + T + ">";
-}
-
-/// TypeMatcher for 'type array'
-/// @see src/tint/builtins.def:79:6
-class Array : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Array::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_array(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_array(state, T);
-}
-
-std::string Array::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "array<" + T + ">";
-}
-
-/// TypeMatcher for 'type sampler'
-/// @see src/tint/builtins.def:80:6
-class Sampler : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Sampler::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_sampler(ty)) {
- return nullptr;
- }
- return build_sampler(state);
-}
-
-std::string Sampler::String(MatchState&) const {
- return "sampler";
-}
-
-/// TypeMatcher for 'type sampler_comparison'
-/// @see src/tint/builtins.def:81:6
-class SamplerComparison : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* SamplerComparison::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_sampler_comparison(ty)) {
- return nullptr;
- }
- return build_sampler_comparison(state);
-}
-
-std::string SamplerComparison::String(MatchState&) const {
- return "sampler_comparison";
-}
-
-/// TypeMatcher for 'type texture_1d'
-/// @see src/tint/builtins.def:82:6
-class Texture1D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Texture1D::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_1d(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_1d(state, T);
-}
-
-std::string Texture1D::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_1d<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_2d'
-/// @see src/tint/builtins.def:83:6
-class Texture2D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Texture2D::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_2d(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_2d(state, T);
-}
-
-std::string Texture2D::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_2d<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_2d_array'
-/// @see src/tint/builtins.def:84:6
-class Texture2DArray : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Texture2DArray::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_2d_array(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_2d_array(state, T);
-}
-
-std::string Texture2DArray::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_2d_array<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_3d'
-/// @see src/tint/builtins.def:85:6
-class Texture3D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Texture3D::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_3d(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_3d(state, T);
-}
-
-std::string Texture3D::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_3d<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_cube'
-/// @see src/tint/builtins.def:86:6
-class TextureCube : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureCube::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_cube(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_cube(state, T);
-}
-
-std::string TextureCube::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_cube<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_cube_array'
-/// @see src/tint/builtins.def:87:6
-class TextureCubeArray : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureCubeArray::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_cube_array(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_cube_array(state, T);
-}
-
-std::string TextureCubeArray::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_cube_array<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_multisampled_2d'
-/// @see src/tint/builtins.def:88:6
-class TextureMultisampled2D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureMultisampled2D::Match(MatchState& state, const sem::Type* ty) const {
- const sem::Type* T = nullptr;
- if (!match_texture_multisampled_2d(ty, T)) {
- return nullptr;
- }
- T = state.Type(T);
- if (T == nullptr) {
- return nullptr;
- }
- return build_texture_multisampled_2d(state, T);
-}
-
-std::string TextureMultisampled2D::String(MatchState& state) const {
- const std::string T = state.TypeName();
- return "texture_multisampled_2d<" + T + ">";
-}
-
-/// TypeMatcher for 'type texture_depth_2d'
-/// @see src/tint/builtins.def:89:6
-class TextureDepth2D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureDepth2D::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_depth_2d(ty)) {
- return nullptr;
- }
- return build_texture_depth_2d(state);
-}
-
-std::string TextureDepth2D::String(MatchState&) const {
- return "texture_depth_2d";
-}
-
-/// TypeMatcher for 'type texture_depth_2d_array'
-/// @see src/tint/builtins.def:90:6
-class TextureDepth2DArray : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureDepth2DArray::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_depth_2d_array(ty)) {
- return nullptr;
- }
- return build_texture_depth_2d_array(state);
-}
-
-std::string TextureDepth2DArray::String(MatchState&) const {
- return "texture_depth_2d_array";
-}
-
-/// TypeMatcher for 'type texture_depth_cube'
-/// @see src/tint/builtins.def:91:6
-class TextureDepthCube : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureDepthCube::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_depth_cube(ty)) {
- return nullptr;
- }
- return build_texture_depth_cube(state);
-}
-
-std::string TextureDepthCube::String(MatchState&) const {
- return "texture_depth_cube";
-}
-
-/// TypeMatcher for 'type texture_depth_cube_array'
-/// @see src/tint/builtins.def:92:6
-class TextureDepthCubeArray : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureDepthCubeArray::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_depth_cube_array(ty)) {
- return nullptr;
- }
- return build_texture_depth_cube_array(state);
-}
-
-std::string TextureDepthCubeArray::String(MatchState&) const {
- return "texture_depth_cube_array";
-}
-
-/// TypeMatcher for 'type texture_depth_multisampled_2d'
-/// @see src/tint/builtins.def:93:6
-class TextureDepthMultisampled2D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureDepthMultisampled2D::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_depth_multisampled_2d(ty)) {
- return nullptr;
- }
- return build_texture_depth_multisampled_2d(state);
-}
-
-std::string TextureDepthMultisampled2D::String(MatchState&) const {
- return "texture_depth_multisampled_2d";
-}
-
-/// TypeMatcher for 'type texture_storage_1d'
-/// @see src/tint/builtins.def:94:6
-class TextureStorage1D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureStorage1D::Match(MatchState& state, const sem::Type* ty) const {
- Number F = Number::invalid;
- Number A = Number::invalid;
- if (!match_texture_storage_1d(ty, F, A)) {
- return nullptr;
- }
- F = state.Num(F);
- if (!F.IsValid()) {
- return nullptr;
- }
- A = state.Num(A);
- if (!A.IsValid()) {
- return nullptr;
- }
- return build_texture_storage_1d(state, F, A);
-}
-
-std::string TextureStorage1D::String(MatchState& state) const {
- const std::string F = state.NumName();
- const std::string A = state.NumName();
- return "texture_storage_1d<" + F + ", " + A + ">";
-}
-
-/// TypeMatcher for 'type texture_storage_2d'
-/// @see src/tint/builtins.def:95:6
-class TextureStorage2D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureStorage2D::Match(MatchState& state, const sem::Type* ty) const {
- Number F = Number::invalid;
- Number A = Number::invalid;
- if (!match_texture_storage_2d(ty, F, A)) {
- return nullptr;
- }
- F = state.Num(F);
- if (!F.IsValid()) {
- return nullptr;
- }
- A = state.Num(A);
- if (!A.IsValid()) {
- return nullptr;
- }
- return build_texture_storage_2d(state, F, A);
-}
-
-std::string TextureStorage2D::String(MatchState& state) const {
- const std::string F = state.NumName();
- const std::string A = state.NumName();
- return "texture_storage_2d<" + F + ", " + A + ">";
-}
-
-/// TypeMatcher for 'type texture_storage_2d_array'
-/// @see src/tint/builtins.def:96:6
-class TextureStorage2DArray : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureStorage2DArray::Match(MatchState& state, const sem::Type* ty) const {
- Number F = Number::invalid;
- Number A = Number::invalid;
- if (!match_texture_storage_2d_array(ty, F, A)) {
- return nullptr;
- }
- F = state.Num(F);
- if (!F.IsValid()) {
- return nullptr;
- }
- A = state.Num(A);
- if (!A.IsValid()) {
- return nullptr;
- }
- return build_texture_storage_2d_array(state, F, A);
-}
-
-std::string TextureStorage2DArray::String(MatchState& state) const {
- const std::string F = state.NumName();
- const std::string A = state.NumName();
- return "texture_storage_2d_array<" + F + ", " + A + ">";
-}
-
-/// TypeMatcher for 'type texture_storage_3d'
-/// @see src/tint/builtins.def:97:6
-class TextureStorage3D : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureStorage3D::Match(MatchState& state, const sem::Type* ty) const {
- Number F = Number::invalid;
- Number A = Number::invalid;
- if (!match_texture_storage_3d(ty, F, A)) {
- return nullptr;
- }
- F = state.Num(F);
- if (!F.IsValid()) {
- return nullptr;
- }
- A = state.Num(A);
- if (!A.IsValid()) {
- return nullptr;
- }
- return build_texture_storage_3d(state, F, A);
-}
-
-std::string TextureStorage3D::String(MatchState& state) const {
- const std::string F = state.NumName();
- const std::string A = state.NumName();
- return "texture_storage_3d<" + F + ", " + A + ">";
-}
-
-/// TypeMatcher for 'type texture_external'
-/// @see src/tint/builtins.def:98:6
-class TextureExternal : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* TextureExternal::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_texture_external(ty)) {
- return nullptr;
- }
- return build_texture_external(state);
-}
-
-std::string TextureExternal::String(MatchState&) const {
- return "texture_external";
-}
-
-/// TypeMatcher for 'type __modf_result'
-/// @see src/tint/builtins.def:100:6
-class ModfResult : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* ModfResult::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_modf_result(ty)) {
- return nullptr;
- }
- return build_modf_result(state);
-}
-
-std::string ModfResult::String(MatchState&) const {
- return "__modf_result";
-}
-
-/// TypeMatcher for 'type __modf_result_vec'
-/// @see src/tint/builtins.def:101:42
-class ModfResultVec : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* ModfResultVec::Match(MatchState& state, const sem::Type* ty) const {
- Number N = Number::invalid;
- if (!match_modf_result_vec(ty, N)) {
- return nullptr;
- }
- N = state.Num(N);
- if (!N.IsValid()) {
- return nullptr;
- }
- return build_modf_result_vec(state, N);
-}
-
-std::string ModfResultVec::String(MatchState& state) const {
- const std::string N = state.NumName();
- std::stringstream ss;
- ss << "__modf_result_vec" << N;
- return ss.str();
-}
-
-/// TypeMatcher for 'type __frexp_result'
-/// @see src/tint/builtins.def:102:6
-class FrexpResult : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* FrexpResult::Match(MatchState& state, const sem::Type* ty) const {
- if (!match_frexp_result(ty)) {
- return nullptr;
- }
- return build_frexp_result(state);
-}
-
-std::string FrexpResult::String(MatchState&) const {
- return "__frexp_result";
-}
-
-/// TypeMatcher for 'type __frexp_result_vec'
-/// @see src/tint/builtins.def:103:43
-class FrexpResultVec : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* FrexpResultVec::Match(MatchState& state, const sem::Type* ty) const {
- Number N = Number::invalid;
- if (!match_frexp_result_vec(ty, N)) {
- return nullptr;
- }
- N = state.Num(N);
- if (!N.IsValid()) {
- return nullptr;
- }
- return build_frexp_result_vec(state, N);
-}
-
-std::string FrexpResultVec::String(MatchState& state) const {
- const std::string N = state.NumName();
- std::stringstream ss;
- ss << "__frexp_result_vec" << N;
- return ss.str();
-}
-
-/// TypeMatcher for 'match fiu32'
-/// @see src/tint/builtins.def:111:7
-class Fiu32 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules, and returns the
- /// expected, canonicalized type on success.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Fiu32::Match(MatchState& state, const sem::Type* ty) const {
- if (match_f32(ty)) {
- return build_f32(state);
- }
- if (match_i32(ty)) {
- return build_i32(state);
- }
- if (match_u32(ty)) {
- return build_u32(state);
- }
- return nullptr;
-}
-
-std::string Fiu32::String(MatchState&) const {
- return "f32, i32 or u32";
-}
-
-/// TypeMatcher for 'match iu32'
-/// @see src/tint/builtins.def:112:7
-class Iu32 : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules, and returns the
- /// expected, canonicalized type on success.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Iu32::Match(MatchState& state, const sem::Type* ty) const {
- if (match_i32(ty)) {
- return build_i32(state);
- }
- if (match_u32(ty)) {
- return build_u32(state);
- }
- return nullptr;
-}
-
-std::string Iu32::String(MatchState&) const {
- return "i32 or u32";
-}
-
-/// TypeMatcher for 'match scalar'
-/// @see src/tint/builtins.def:113:7
-class Scalar : public TypeMatcher {
- public:
- /// Checks whether the given type matches the matcher rules, and returns the
- /// expected, canonicalized type on success.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param type the type to match
- /// @returns the canonicalized type on match, otherwise nullptr
- const sem::Type* Match(MatchState& state,
- const sem::Type* type) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-const sem::Type* Scalar::Match(MatchState& state, const sem::Type* ty) const {
- if (match_f32(ty)) {
- return build_f32(state);
- }
- if (match_i32(ty)) {
- return build_i32(state);
- }
- if (match_u32(ty)) {
- return build_u32(state);
- }
- if (match_bool(ty)) {
- return build_bool(state);
- }
- return nullptr;
-}
-
-std::string Scalar::String(MatchState&) const {
- return "f32, i32, u32 or bool";
-}
-
-/// EnumMatcher for 'match f32_texel_format'
-/// @see src/tint/builtins.def:124:7
-class F32TexelFormat : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number F32TexelFormat::Match(MatchState&, Number number) const {
- switch (static_cast<TexelFormat>(number.Value())) {
- case TexelFormat::kRgba8Unorm:
- case TexelFormat::kRgba8Snorm:
- case TexelFormat::kRgba16Float:
- case TexelFormat::kR32Float:
- case TexelFormat::kRg32Float:
- case TexelFormat::kRgba32Float:
- return number;
- default:
- return Number::invalid;
- }
-}
-
-std::string F32TexelFormat::String(MatchState&) const {
- return "rgba8unorm, rgba8snorm, rgba16float, r32float, rg32float or rgba32float";
-}
-
-/// EnumMatcher for 'match i32_texel_format'
-/// @see src/tint/builtins.def:126:7
-class I32TexelFormat : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number I32TexelFormat::Match(MatchState&, Number number) const {
- switch (static_cast<TexelFormat>(number.Value())) {
- case TexelFormat::kRgba8Sint:
- case TexelFormat::kRgba16Sint:
- case TexelFormat::kR32Sint:
- case TexelFormat::kRg32Sint:
- case TexelFormat::kRgba32Sint:
- return number;
- default:
- return Number::invalid;
- }
-}
-
-std::string I32TexelFormat::String(MatchState&) const {
- return "rgba8sint, rgba16sint, r32sint, rg32sint or rgba32sint";
-}
-
-/// EnumMatcher for 'match u32_texel_format'
-/// @see src/tint/builtins.def:128:7
-class U32TexelFormat : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number U32TexelFormat::Match(MatchState&, Number number) const {
- switch (static_cast<TexelFormat>(number.Value())) {
- case TexelFormat::kRgba8Uint:
- case TexelFormat::kRgba16Uint:
- case TexelFormat::kR32Uint:
- case TexelFormat::kRg32Uint:
- case TexelFormat::kRgba32Uint:
- return number;
- default:
- return Number::invalid;
- }
-}
-
-std::string U32TexelFormat::String(MatchState&) const {
- return "rgba8uint, rgba16uint, r32uint, rg32uint or rgba32uint";
-}
-
-/// EnumMatcher for 'match write_only'
-/// @see src/tint/builtins.def:131:7
-class WriteOnly : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number WriteOnly::Match(MatchState&, Number number) const {
- if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kWrite)) {
- return Number(static_cast<uint32_t>(Access::kWrite));
- }
- return Number::invalid;
-}
-
-std::string WriteOnly::String(MatchState&) const {
- return "write";
-}
-
-/// EnumMatcher for 'match function_private_workgroup'
-/// @see src/tint/builtins.def:133:7
-class FunctionPrivateWorkgroup : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number FunctionPrivateWorkgroup::Match(MatchState&, Number number) const {
- switch (static_cast<StorageClass>(number.Value())) {
- case StorageClass::kFunction:
- case StorageClass::kPrivate:
- case StorageClass::kWorkgroup:
- return number;
- default:
- return Number::invalid;
- }
-}
-
-std::string FunctionPrivateWorkgroup::String(MatchState&) const {
- return "function, private or workgroup";
-}
-
-/// EnumMatcher for 'match workgroup_or_storage'
-/// @see src/tint/builtins.def:134:7
-class WorkgroupOrStorage : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number WorkgroupOrStorage::Match(MatchState&, Number number) const {
- switch (static_cast<StorageClass>(number.Value())) {
- case StorageClass::kWorkgroup:
- case StorageClass::kStorage:
- return number;
- default:
- return Number::invalid;
- }
-}
-
-std::string WorkgroupOrStorage::String(MatchState&) const {
- return "workgroup or storage";
-}
-
-/// EnumMatcher for 'match storage'
-class Storage : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number Storage::Match(MatchState&, Number number) const {
- if (number.IsAny() || number.Value() == static_cast<uint32_t>(StorageClass::kStorage)) {
- return Number(static_cast<uint32_t>(StorageClass::kStorage));
- }
- return Number::invalid;
-}
-
-std::string Storage::String(MatchState&) const {
- return "storage";
-}
-
-/// EnumMatcher for 'match write'
-class Write : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number Write::Match(MatchState&, Number number) const {
- if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kWrite)) {
- return Number(static_cast<uint32_t>(Access::kWrite));
- }
- return Number::invalid;
-}
-
-std::string Write::String(MatchState&) const {
- return "write";
-}
-
-/// EnumMatcher for 'match read_write'
-class ReadWrite : public NumberMatcher {
- public:
- /// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
- /// @param state the MatchState
- /// @param number the enum value as a Number
- /// @return true if the enum value matches the set
- Number Match(MatchState& state, Number number) const override;
- /// @param state the MatchState
- /// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
-};
-
-Number ReadWrite::Match(MatchState&, Number number) const {
- if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kReadWrite)) {
- return Number(static_cast<uint32_t>(Access::kReadWrite));
- }
- return Number::invalid;
-}
-
-std::string ReadWrite::String(MatchState&) const {
- return "read_write";
-}
-
-/// Matchers holds type and number matchers
-class Matchers {
- private:
- OpenTypeMatcher open_type_0_{0};
- OpenNumberMatcher open_number_0_{0};
- OpenNumberMatcher open_number_1_{1};
- Bool Bool_;
- F32 F32_;
- I32 I32_;
- U32 U32_;
- Vec2 Vec2_;
- Vec3 Vec3_;
- Vec4 Vec4_;
- Vec Vec_;
- Mat Mat_;
- Ptr Ptr_;
- Atomic Atomic_;
- Array Array_;
- Sampler Sampler_;
- SamplerComparison SamplerComparison_;
- Texture1D Texture1D_;
- Texture2D Texture2D_;
- Texture2DArray Texture2DArray_;
- Texture3D Texture3D_;
- TextureCube TextureCube_;
- TextureCubeArray TextureCubeArray_;
- TextureMultisampled2D TextureMultisampled2D_;
- TextureDepth2D TextureDepth2D_;
- TextureDepth2DArray TextureDepth2DArray_;
- TextureDepthCube TextureDepthCube_;
- TextureDepthCubeArray TextureDepthCubeArray_;
- TextureDepthMultisampled2D TextureDepthMultisampled2D_;
- TextureStorage1D TextureStorage1D_;
- TextureStorage2D TextureStorage2D_;
- TextureStorage2DArray TextureStorage2DArray_;
- TextureStorage3D TextureStorage3D_;
- TextureExternal TextureExternal_;
- ModfResult ModfResult_;
- ModfResultVec ModfResultVec_;
- FrexpResult FrexpResult_;
- FrexpResultVec FrexpResultVec_;
- Fiu32 Fiu32_;
- Iu32 Iu32_;
- Scalar Scalar_;
- F32TexelFormat F32TexelFormat_;
- I32TexelFormat I32TexelFormat_;
- U32TexelFormat U32TexelFormat_;
- WriteOnly WriteOnly_;
- FunctionPrivateWorkgroup FunctionPrivateWorkgroup_;
- WorkgroupOrStorage WorkgroupOrStorage_;
- Storage Storage_;
- Write Write_;
- ReadWrite ReadWrite_;
-
- public:
- /// Constructor
- Matchers();
- /// Destructor
- ~Matchers();
-
- /// The open-types, types, and type matchers
- TypeMatcher const* const type[39] = {
- /* [0] */ &open_type_0_,
- /* [1] */ &Bool_,
- /* [2] */ &F32_,
- /* [3] */ &I32_,
- /* [4] */ &U32_,
- /* [5] */ &Vec2_,
- /* [6] */ &Vec3_,
- /* [7] */ &Vec4_,
- /* [8] */ &Vec_,
- /* [9] */ &Mat_,
- /* [10] */ &Ptr_,
- /* [11] */ &Atomic_,
- /* [12] */ &Array_,
- /* [13] */ &Sampler_,
- /* [14] */ &SamplerComparison_,
- /* [15] */ &Texture1D_,
- /* [16] */ &Texture2D_,
- /* [17] */ &Texture2DArray_,
- /* [18] */ &Texture3D_,
- /* [19] */ &TextureCube_,
- /* [20] */ &TextureCubeArray_,
- /* [21] */ &TextureMultisampled2D_,
- /* [22] */ &TextureDepth2D_,
- /* [23] */ &TextureDepth2DArray_,
- /* [24] */ &TextureDepthCube_,
- /* [25] */ &TextureDepthCubeArray_,
- /* [26] */ &TextureDepthMultisampled2D_,
- /* [27] */ &TextureStorage1D_,
- /* [28] */ &TextureStorage2D_,
- /* [29] */ &TextureStorage2DArray_,
- /* [30] */ &TextureStorage3D_,
- /* [31] */ &TextureExternal_,
- /* [32] */ &ModfResult_,
- /* [33] */ &ModfResultVec_,
- /* [34] */ &FrexpResult_,
- /* [35] */ &FrexpResultVec_,
- /* [36] */ &Fiu32_,
- /* [37] */ &Iu32_,
- /* [38] */ &Scalar_,
- };
-
- /// The open-numbers, and number matchers
- NumberMatcher const* const number[11] = {
- /* [0] */ &open_number_0_,
- /* [1] */ &open_number_1_,
- /* [2] */ &F32TexelFormat_,
- /* [3] */ &I32TexelFormat_,
- /* [4] */ &U32TexelFormat_,
- /* [5] */ &WriteOnly_,
- /* [6] */ &FunctionPrivateWorkgroup_,
- /* [7] */ &WorkgroupOrStorage_,
- /* [8] */ &Storage_,
- /* [9] */ &Write_,
- /* [10] */ &ReadWrite_,
- };
-};
-
-Matchers::Matchers() = default;
-Matchers::~Matchers() = default;
-
-constexpr MatcherIndex kMatcherIndices[] = {
- /* [0] */ 10,
- /* [1] */ 0,
- /* [2] */ 11,
- /* [3] */ 0,
- /* [4] */ 10,
- /* [5] */ 8,
- /* [6] */ 12,
- /* [7] */ 0,
- /* [8] */ 0,
- /* [9] */ 9,
- /* [10] */ 1,
- /* [11] */ 0,
- /* [12] */ 2,
- /* [13] */ 9,
- /* [14] */ 0,
- /* [15] */ 1,
- /* [16] */ 2,
- /* [17] */ 9,
- /* [18] */ 0,
- /* [19] */ 0,
- /* [20] */ 2,
- /* [21] */ 8,
- /* [22] */ 0,
- /* [23] */ 2,
- /* [24] */ 8,
- /* [25] */ 0,
- /* [26] */ 1,
- /* [27] */ 29,
- /* [28] */ 0,
- /* [29] */ 1,
- /* [30] */ 30,
- /* [31] */ 0,
- /* [32] */ 1,
- /* [33] */ 28,
- /* [34] */ 0,
- /* [35] */ 1,
- /* [36] */ 27,
- /* [37] */ 0,
- /* [38] */ 1,
- /* [39] */ 8,
- /* [40] */ 0,
- /* [41] */ 0,
- /* [42] */ 30,
- /* [43] */ 4,
- /* [44] */ 9,
- /* [45] */ 29,
- /* [46] */ 4,
- /* [47] */ 9,
- /* [48] */ 28,
- /* [49] */ 4,
- /* [50] */ 9,
- /* [51] */ 27,
- /* [52] */ 4,
- /* [53] */ 9,
- /* [54] */ 30,
- /* [55] */ 3,
- /* [56] */ 9,
- /* [57] */ 29,
- /* [58] */ 3,
- /* [59] */ 9,
- /* [60] */ 28,
- /* [61] */ 3,
- /* [62] */ 9,
- /* [63] */ 27,
- /* [64] */ 3,
- /* [65] */ 9,
- /* [66] */ 30,
- /* [67] */ 2,
- /* [68] */ 9,
- /* [69] */ 29,
- /* [70] */ 2,
- /* [71] */ 9,
- /* [72] */ 28,
- /* [73] */ 2,
- /* [74] */ 9,
- /* [75] */ 27,
- /* [76] */ 2,
- /* [77] */ 9,
- /* [78] */ 8,
- /* [79] */ 0,
- /* [80] */ 3,
- /* [81] */ 7,
- /* [82] */ 2,
- /* [83] */ 17,
- /* [84] */ 2,
- /* [85] */ 5,
- /* [86] */ 3,
- /* [87] */ 5,
- /* [88] */ 2,
- /* [89] */ 16,
- /* [90] */ 2,
- /* [91] */ 6,
- /* [92] */ 2,
- /* [93] */ 18,
- /* [94] */ 2,
- /* [95] */ 20,
- /* [96] */ 2,
- /* [97] */ 19,
- /* [98] */ 2,
- /* [99] */ 6,
- /* [100] */ 3,
- /* [101] */ 35,
- /* [102] */ 0,
- /* [103] */ 33,
- /* [104] */ 0,
- /* [105] */ 5,
- /* [106] */ 0,
- /* [107] */ 7,
- /* [108] */ 3,
- /* [109] */ 7,
- /* [110] */ 4,
- /* [111] */ 15,
- /* [112] */ 0,
- /* [113] */ 7,
- /* [114] */ 0,
- /* [115] */ 16,
- /* [116] */ 0,
- /* [117] */ 17,
- /* [118] */ 0,
- /* [119] */ 18,
- /* [120] */ 0,
- /* [121] */ 21,
- /* [122] */ 0,
- /* [123] */ 19,
- /* [124] */ 0,
- /* [125] */ 20,
- /* [126] */ 0,
- /* [127] */ 15,
- /* [128] */ 2,
- /* [129] */ 14,
- /* [130] */ 24,
- /* [131] */ 23,
- /* [132] */ 25,
- /* [133] */ 22,
- /* [134] */ 26,
- /* [135] */ 13,
- /* [136] */ 31,
- /* [137] */ 32,
- /* [138] */ 34,
-};
-
-// Assert that the MatcherIndex is big enough to index all the matchers, plus
-// kNoMatcher.
-static_assert(static_cast<int>(sizeof(kMatcherIndices) / sizeof(kMatcherIndices[0])) <
- static_cast<int>(std::numeric_limits<MatcherIndex>::max() - 1),
- "MatcherIndex is not large enough to index kMatcherIndices");
-
-constexpr ParameterInfo kParameters[] = {
- {
- /* [0] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [1] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [2] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [3] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [4] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [5] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [6] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [7] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [8] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [9] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [10] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [11] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [12] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [13] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[95],
- },
- {
- /* [14] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [15] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [16] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [17] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [18] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [19] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [20] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [21] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [22] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [23] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [24] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [25] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [26] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [27] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [28] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [29] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [30] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [31] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [32] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [33] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [34] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [35] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [36] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [37] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [38] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [39] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [40] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [41] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [42] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [43] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [44] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [45] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [46] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [47] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [48] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [49] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [50] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [51] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [52] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [53] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [54] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [55] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [56] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [57] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [58] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [59] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [60] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [61] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [62] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [63] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [64] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [65] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [66] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [67] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [68] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [69] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [70] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [71] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [72] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [73] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [74] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [75] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [76] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [77] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [78] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [79] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [80] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [81] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [82] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [83] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [84] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [85] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [86] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [87] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [88] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[95],
- },
- {
- /* [89] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [90] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [91] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [92] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [93] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [94] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [95] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [96] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [97] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [98] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [99] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[125],
- },
- {
- /* [100] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [101] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [102] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [103] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [104] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [105] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [106] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [107] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [108] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [109] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [110] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [111] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [112] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [113] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [114] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [115] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [116] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [117] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [118] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [119] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [120] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [121] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [122] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [123] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [124] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [125] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [126] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [127] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [128] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [129] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [130] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [131] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [132] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [133] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [134] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [135] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [136] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [137] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [138] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [139] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [140] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [141] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [142] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [143] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [144] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [145] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [146] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [147] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [148] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [149] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [150] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [151] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [152] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [153] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [154] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [155] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [156] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [157] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [158] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [159] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [160] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [161] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [162] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [163] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[95],
- },
- {
- /* [164] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [165] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [166] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [167] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [168] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [169] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [170] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [171] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [172] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [173] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [174] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [175] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [176] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [177] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [178] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [179] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [180] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [181] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [182] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [183] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [184] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [185] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [186] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [187] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [188] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [189] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [190] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [191] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [192] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [193] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [194] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [195] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [196] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [197] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [198] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [199] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [200] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [201] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [202] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [203] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [204] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [205] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [206] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [207] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [208] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[97],
- },
- {
- /* [209] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [210] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [211] */
- /* usage */ ParameterUsage::kDdx,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [212] */
- /* usage */ ParameterUsage::kDdy,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [213] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [214] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [215] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [216] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [217] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [218] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [219] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[123],
- },
- {
- /* [220] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [221] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [222] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [223] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [224] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [225] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [226] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [227] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [228] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [229] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [230] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[97],
- },
- {
- /* [231] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [232] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [233] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [234] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [235] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [236] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [237] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [238] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[83],
- },
- {
- /* [239] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [240] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [241] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [242] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [243] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [244] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [245] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [246] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [247] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [248] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [249] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [250] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [251] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [252] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [253] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [254] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [255] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [256] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [257] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [258] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [259] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [260] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [261] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [262] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[69],
- },
- {
- /* [263] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [264] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [265] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [266] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [267] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [268] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [269] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [270] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[95],
- },
- {
- /* [271] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [272] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [273] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [274] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [275] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [276] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [277] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [278] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [279] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [280] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [281] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [282] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [283] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [284] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [285] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [286] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [287] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [288] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [289] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [290] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [291] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [292] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [293] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [294] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [295] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[129],
- },
- {
- /* [296] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [297] */
- /* usage */ ParameterUsage::kDepthRef,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [298] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [299] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [300] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [301] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [302] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [303] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [304] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [305] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [306] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [307] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [308] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [309] */
- /* usage */ ParameterUsage::kOffset,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [310] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[97],
- },
- {
- /* [311] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [312] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [313] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [314] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[45],
- },
- {
- /* [315] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [316] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [317] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[109],
- },
- {
- /* [318] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [319] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [320] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [321] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [322] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [323] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [324] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [325] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [326] */
- /* usage */ ParameterUsage::kComponent,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [327] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [328] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [329] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [330] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [331] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [332] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [333] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [334] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [335] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [336] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [337] */
- /* usage */ ParameterUsage::kBias,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [338] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [339] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [340] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [341] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [342] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [343] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [344] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [345] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [346] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[57],
- },
- {
- /* [347] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [348] */
- /* usage */ ParameterUsage::kArrayIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [349] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[107],
- },
- {
- /* [350] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [351] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [352] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [353] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [354] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [355] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [356] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [357] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [358] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [359] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [360] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [361] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [362] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [363] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [364] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [365] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [366] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [367] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [368] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[97],
- },
- {
- /* [369] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [370] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [371] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [372] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [373] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [374] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[93],
- },
- {
- /* [375] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [376] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [377] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [378] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [379] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [380] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [381] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [382] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [383] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[136],
- },
- {
- /* [384] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [385] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [386] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[89],
- },
- {
- /* [387] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [388] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [389] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[127],
- },
- {
- /* [390] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [391] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [392] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[75],
- },
- {
- /* [393] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [394] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [395] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [396] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [397] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [398] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [399] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [400] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [401] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [402] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [403] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [404] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[72],
- },
- {
- /* [405] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [406] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [407] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[66],
- },
- {
- /* [408] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [409] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [410] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [411] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [412] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [413] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [414] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [415] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [416] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[63],
- },
- {
- /* [417] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [418] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[107],
- },
- {
- /* [419] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [420] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [421] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [422] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[60],
- },
- {
- /* [423] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [424] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[107],
- },
- {
- /* [425] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[54],
- },
- {
- /* [426] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [427] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[107],
- },
- {
- /* [428] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[51],
- },
- {
- /* [429] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [430] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[109],
- },
- {
- /* [431] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [432] */
- /* usage */ ParameterUsage::kSampler,
- /* matcher indices */ &kMatcherIndices[135],
- },
- {
- /* [433] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [434] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[48],
- },
- {
- /* [435] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [436] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[109],
- },
- {
- /* [437] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[42],
- },
- {
- /* [438] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [439] */
- /* usage */ ParameterUsage::kValue,
- /* matcher indices */ &kMatcherIndices[109],
- },
- {
- /* [440] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[111],
- },
- {
- /* [441] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [442] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [443] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [444] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [445] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [446] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [447] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [448] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [449] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[119],
- },
- {
- /* [450] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[99],
- },
- {
- /* [451] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [452] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[121],
- },
- {
- /* [453] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [454] */
- /* usage */ ParameterUsage::kSampleIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [455] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [456] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [457] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [458] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[134],
- },
- {
- /* [459] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [460] */
- /* usage */ ParameterUsage::kSampleIndex,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [461] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [462] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [463] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[10],
- },
- {
- /* [464] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [465] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [466] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[24],
- },
- {
- /* [467] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [468] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [469] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [470] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [471] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [472] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [473] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [474] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [475] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [476] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [477] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [478] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[10],
- },
- {
- /* [479] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [480] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [481] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [482] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [483] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [484] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [485] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [486] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [487] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [488] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [489] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [490] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [491] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [492] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [493] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [494] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [495] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [496] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [497] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [498] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [499] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [500] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [501] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[136],
- },
- {
- /* [502] */
- /* usage */ ParameterUsage::kCoords,
- /* matcher indices */ &kMatcherIndices[85],
- },
- {
- /* [503] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [504] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [505] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [506] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [507] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [508] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [509] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [510] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[91],
- },
- {
- /* [511] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [512] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [513] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [514] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [515] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [516] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [517] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [518] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[78],
- },
- {
- /* [519] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [520] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [521] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [522] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [523] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [524] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [525] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [526] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [527] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [528] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [529] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[111],
- },
- {
- /* [530] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [531] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [532] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [533] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [534] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [535] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [536] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [537] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [538] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [539] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [540] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [541] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[119],
- },
- {
- /* [542] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [543] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [544] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [545] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[123],
- },
- {
- /* [546] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [547] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [548] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [549] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[125],
- },
- {
- /* [550] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [551] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [552] */
- /* usage */ ParameterUsage::kLevel,
- /* matcher indices */ &kMatcherIndices[55],
- },
- {
- /* [553] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [554] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [555] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [556] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[121],
- },
- {
- /* [557] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [558] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[125],
- },
- {
- /* [559] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [560] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [561] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[134],
- },
- {
- /* [562] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[36],
- },
- {
- /* [563] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[33],
- },
- {
- /* [564] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[27],
- },
- {
- /* [565] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[30],
- },
- {
- /* [566] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[136],
- },
- {
- /* [567] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [568] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [569] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [570] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[123],
- },
- {
- /* [571] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [572] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [573] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[119],
- },
- {
- /* [574] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [575] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [576] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[81],
- },
- {
- /* [577] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [578] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [579] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[87],
- },
- {
- /* [580] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [581] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [582] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [583] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [584] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[111],
- },
- {
- /* [585] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [586] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[125],
- },
- {
- /* [587] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [588] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [589] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[27],
- },
- {
- /* [590] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[111],
- },
- {
- /* [591] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[115],
- },
- {
- /* [592] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[117],
- },
- {
- /* [593] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[119],
- },
- {
- /* [594] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[123],
- },
- {
- /* [595] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[125],
- },
- {
- /* [596] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[133],
- },
- {
- /* [597] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[131],
- },
- {
- /* [598] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[130],
- },
- {
- /* [599] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[132],
- },
- {
- /* [600] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[121],
- },
- {
- /* [601] */
- /* usage */ ParameterUsage::kTexture,
- /* matcher indices */ &kMatcherIndices[134],
- },
- {
- /* [602] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [603] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [604] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [605] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [606] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [607] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [608] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [609] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [610] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [611] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [612] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[43],
- },
- {
- /* [613] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [614] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [615] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [616] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [617] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [618] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [619] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [620] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [621] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [622] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [623] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [624] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [625] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [626] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [627] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [628] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [629] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [630] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [631] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [632] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [633] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [634] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [635] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [636] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [637] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [638] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [639] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [640] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [641] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [642] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [643] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [644] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [645] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [646] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [647] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [648] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [649] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [650] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[17],
- },
- {
- /* [651] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [652] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [653] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[13],
- },
- {
- /* [654] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [655] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [656] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [657] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [658] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [659] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[1],
- },
- {
- /* [660] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [661] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [662] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [663] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [664] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [665] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [666] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [667] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [668] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [669] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [670] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [671] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [672] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[4],
- },
- {
- /* [673] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[24],
- },
- {
- /* [674] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[10],
- },
- {
- /* [675] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[24],
- },
- {
- /* [676] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[10],
- },
- {
- /* [677] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [678] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [679] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[39],
- },
- {
- /* [680] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [681] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[0],
- },
- {
- /* [682] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [683] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [684] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [685] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [686] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
- {
- /* [687] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [688] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[21],
- },
- {
- /* [689] */
- /* usage */ ParameterUsage::kNone,
- /* matcher indices */ &kMatcherIndices[12],
- },
-};
-
-constexpr OpenTypeInfo kOpenTypes[] = {
- {
- /* [0] */
- /* name */ "T",
- /* matcher index */ 37,
- },
- {
- /* [1] */
- /* name */ "T",
- /* matcher index */ 36,
- },
- {
- /* [2] */
- /* name */ "T",
- /* matcher index */ kNoMatcher,
- },
- {
- /* [3] */
- /* name */ "T",
- /* matcher index */ 38,
- },
-};
-
-constexpr OpenNumberInfo kOpenNumbers[] = {
- {
- /* [0] */
- /* name */ "F",
- /* matcher index */ kNoMatcher,
- },
- {
- /* [1] */
- /* name */ "A",
- /* matcher index */ 5,
- },
- {
- /* [2] */
- /* name */ "M",
- /* matcher index */ kNoMatcher,
- },
- {
- /* [3] */
- /* name */ "N",
- /* matcher index */ kNoMatcher,
- },
- {
- /* [4] */
- /* name */ "A",
- /* matcher index */ kNoMatcher,
- },
- {
- /* [5] */
- /* name */ "S",
- /* matcher index */ 7,
- },
-};
-
-constexpr OverloadInfo kOverloads[] = {
- {
- /* [0] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[584],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [1] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[529],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [2] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[582],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [3] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[533],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [4] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[581],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [5] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[535],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [6] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[573],
- /* return matcher indices */ &kMatcherIndices[99],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [7] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[541],
- /* return matcher indices */ &kMatcherIndices[99],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [8] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[570],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [9] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[545],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [10] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[558],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [11] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[549],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [12] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[556],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [13] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[554],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [14] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[479],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [15] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[555],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [16] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[551],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [17] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[557],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [18] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[547],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [19] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[559],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [20] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[523],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [21] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[561],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [22] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[0],
- /* parameters */ &kParameters[562],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [23] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[0],
- /* parameters */ &kParameters[563],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [24] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[0],
- /* parameters */ &kParameters[564],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [25] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[0],
- /* parameters */ &kParameters[565],
- /* return matcher indices */ &kMatcherIndices[99],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [26] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[566],
- /* return matcher indices */ &kMatcherIndices[85],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [27] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[222],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [28] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[193],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [29] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[213],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [30] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[25],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [31] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[226],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [32] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[173],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [33] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[230],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [34] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[163],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [35] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[234],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [36] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[83],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [37] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[148],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [38] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[55],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [39] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[246],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [40] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[133],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [41] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[383],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [42] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[389],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [43] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[386],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [44] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[250],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [45] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[238],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [46] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[138],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [47] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[374],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [48] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[266],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [49] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[368],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [50] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[270],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [51] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[362],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [52] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[298],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [53] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[302],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [54] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[183],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [55] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[350],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [56] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[338],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [57] */
- /* num parameters */ 4,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[326],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [58] */
- /* num parameters */ 5,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[178],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [59] */
- /* num parameters */ 5,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[168],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [60] */
- /* num parameters */ 6,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[49],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [61] */
- /* num parameters */ 4,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[218],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [62] */
- /* num parameters */ 5,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[98],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [63] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[431],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [64] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[306],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [65] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[290],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [66] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[113],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [67] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[419],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [68] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[286],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [69] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[392],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [70] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[404],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [71] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[262],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [72] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[407],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [73] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[416],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [74] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[422],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [75] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[346],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [76] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[425],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [77] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[428],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [78] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[434],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [79] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[314],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [80] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[437],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [81] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[590],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [82] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[591],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [83] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[592],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [84] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[593],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [85] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[594],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [86] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[595],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [87] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[596],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [88] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[597],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [89] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[598],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [90] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[599],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [91] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[440],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [92] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[446],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [93] */
- /* num parameters */ 4,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[322],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [94] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[449],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [95] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[452],
- /* return matcher indices */ &kMatcherIndices[113],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [96] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[455],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [97] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[330],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [98] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[458],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [99] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[501],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [100] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[334],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [101] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[198],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [102] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[188],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [103] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[37],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [104] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[318],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [105] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[108],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [106] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[310],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [107] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[88],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [108] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[158],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [109] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[43],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [110] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[31],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [111] */
- /* num parameters */ 7,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[0],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [112] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[73],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [113] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[19],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [114] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[208],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [115] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[13],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [116] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[282],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [117] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[143],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [118] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[203],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [119] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[61],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [120] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[254],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [121] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[123],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [122] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[258],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [123] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[118],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [124] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[128],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [125] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[7],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [126] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[242],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [127] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[153],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [128] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[294],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [129] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[78],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [130] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[93],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [131] */
- /* num parameters */ 6,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[67],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [132] */
- /* num parameters */ 4,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[278],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [133] */
- /* num parameters */ 5,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[103],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [134] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[585],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [135] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[586],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [136] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[587],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [137] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[588],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [138] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[0],
- /* parameters */ &kParameters[589],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [139] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[395],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [140] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[398],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [141] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[401],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [142] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[3],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[476],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [143] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[3],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[461],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [144] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[3],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[464],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [145] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[649],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [146] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[648],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [147] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[652],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [148] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[651],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [149] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[655],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [150] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[654],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [151] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[511],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [152] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[513],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [153] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[657],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [154] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[656],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [155] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[646],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [156] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[645],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [157] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[644],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [158] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[643],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [159] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[642],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [160] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[641],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [161] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[640],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [162] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[639],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [163] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[638],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [164] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[637],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [165] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[636],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [166] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[635],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [167] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[634],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [168] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[633],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [169] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[632],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [170] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[631],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [171] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[377],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [172] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[371],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [173] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[659],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [174] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[658],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [175] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[630],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [176] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[629],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [177] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[628],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [178] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[627],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [179] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[626],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [180] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[625],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [181] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[359],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [182] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[356],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [183] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[624],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [184] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[623],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [185] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[622],
- /* return matcher indices */ &kMatcherIndices[138],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [186] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[621],
- /* return matcher indices */ &kMatcherIndices[101],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [187] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[620],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [188] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[619],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [189] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[618],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [190] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[617],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [191] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[616],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [192] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[615],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment),
- /* is_deprecated */ false,
- },
- {
- /* [193] */
- /* num parameters */ 4,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[274],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [194] */
- /* num parameters */ 4,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[342],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [195] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[614],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [196] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[613],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [197] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[515],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [198] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[517],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [199] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[610],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [200] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[609],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [201] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[608],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [202] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[607],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [203] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[606],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [204] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[605],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [205] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[531],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [206] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[519],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [207] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[521],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [208] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[527],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [209] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[661],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [210] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[660],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [211] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[583],
- /* return matcher indices */ &kMatcherIndices[137],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [212] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[604],
- /* return matcher indices */ &kMatcherIndices[103],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [213] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[663],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [214] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[662],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [215] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[413],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [216] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[410],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [217] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[665],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [218] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[664],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [219] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[380],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [220] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[473],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [221] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[600],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [222] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[601],
- /* return matcher indices */ &kMatcherIndices[55],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [223] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[505],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [224] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[507],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [225] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[537],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [226] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[539],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [227] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[572],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [228] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[571],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [229] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[689],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [230] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[668],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [231] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[671],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [232] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[670],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [233] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[569],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [234] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[568],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [235] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[567],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [236] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[574],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [237] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[674],
- /* return matcher indices */ &kMatcherIndices[10],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [238] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[673],
- /* return matcher indices */ &kMatcherIndices[10],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [239] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[684],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [240] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[685],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [241] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[686],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [242] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[687],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [243] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[560],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [244] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[688],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [245] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[676],
- /* return matcher indices */ &kMatcherIndices[10],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [246] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[675],
- /* return matcher indices */ &kMatcherIndices[10],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [247] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[470],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ true,
- },
- {
- /* [248] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[467],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ true,
- },
- {
- /* [249] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[683],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [250] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[682],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [251] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[495],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [252] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[497],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [253] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[678],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [254] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[677],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [255] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[680],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [256] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[669],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [257] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[667],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [258] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[666],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [259] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[647],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [260] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[679],
- /* return matcher indices */ &kMatcherIndices[39],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [261] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 2,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[2],
- /* parameters */ &kParameters[653],
- /* return matcher indices */ &kMatcherIndices[9],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [262] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[612],
- /* return matcher indices */ &kMatcherIndices[87],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [263] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[611],
- /* return matcher indices */ &kMatcherIndices[87],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [264] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[603],
- /* return matcher indices */ &kMatcherIndices[87],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [265] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[602],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [266] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[553],
- /* return matcher indices */ &kMatcherIndices[81],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [267] */
- /* num parameters */ 0,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[690],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [268] */
- /* num parameters */ 0,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[690],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [269] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[577],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [270] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[443],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [271] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[543],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [272] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[575],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [273] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[576],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [274] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[578],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [275] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[579],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [276] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[580],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [277] */
- /* num parameters */ 3,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[365],
- /* return matcher indices */ &kMatcherIndices[21],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [278] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[1],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[503],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [279] */
- /* num parameters */ 1,
- /* num open types */ 0,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[3],
- /* parameters */ &kParameters[650],
- /* return matcher indices */ &kMatcherIndices[12],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [280] */
- /* num parameters */ 2,
- /* num open types */ 0,
- /* num open numbers */ 0,
- /* open types */ &kOpenTypes[4],
- /* open numbers */ &kOpenNumbers[6],
- /* parameters */ &kParameters[509],
- /* return matcher indices */ &kMatcherIndices[91],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [281] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[2],
- /* open numbers */ &kOpenNumbers[4],
- /* parameters */ &kParameters[672],
- /* return matcher indices */ &kMatcherIndices[43],
- /* supported_stages */ PipelineStageSet(PipelineStage::kVertex, PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [282] */
- /* num parameters */ 1,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[681],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [283] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[493],
- /* return matcher indices */ nullptr,
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [284] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[491],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [285] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[489],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [286] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[487],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [287] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[485],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [288] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[483],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [289] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[481],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [290] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[525],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [291] */
- /* num parameters */ 2,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[499],
- /* return matcher indices */ &kMatcherIndices[1],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
- {
- /* [292] */
- /* num parameters */ 3,
- /* num open types */ 1,
- /* num open numbers */ 1,
- /* open types */ &kOpenTypes[0],
- /* open numbers */ &kOpenNumbers[5],
- /* parameters */ &kParameters[353],
- /* return matcher indices */ &kMatcherIndices[105],
- /* supported_stages */ PipelineStageSet(PipelineStage::kFragment, PipelineStage::kCompute),
- /* is_deprecated */ false,
- },
-};
-
-constexpr BuiltinInfo kBuiltins[] = {
- {
- /* [0] */
- /* fn abs<T : fiu32>(T) -> T */
- /* fn abs<N : num, T : fiu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[259],
- },
- {
- /* [1] */
- /* fn acos(f32) -> f32 */
- /* fn acos<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[253],
- },
- {
- /* [2] */
- /* fn all(bool) -> bool */
- /* fn all<N : num>(vec<N, bool>) -> bool */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[245],
- },
- {
- /* [3] */
- /* fn any(bool) -> bool */
- /* fn any<N : num>(vec<N, bool>) -> bool */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[237],
- },
- {
- /* [4] */
- /* fn arrayLength<T, A : access>(ptr<storage, array<T>, A>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[281],
- },
- {
- /* [5] */
- /* fn asin(f32) -> f32 */
- /* fn asin<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[231],
- },
- {
- /* [6] */
- /* fn atan(f32) -> f32 */
- /* fn atan<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[229],
- },
- {
- /* [7] */
- /* fn atan2(f32, f32) -> f32 */
- /* fn atan2<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[223],
- },
- {
- /* [8] */
- /* fn ceil(f32) -> f32 */
- /* fn ceil<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[217],
- },
- {
- /* [9] */
- /* fn clamp<T : fiu32>(T, T, T) -> T */
- /* fn clamp<N : num, T : fiu32>(vec<N, T>, vec<N, T>, vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[215],
- },
- {
- /* [10] */
- /* fn cos(f32) -> f32 */
- /* fn cos<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[213],
- },
- {
- /* [11] */
- /* fn cosh(f32) -> f32 */
- /* fn cosh<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[209],
- },
- {
- /* [12] */
- /* fn countLeadingZeros<T : iu32>(T) -> T */
- /* fn countLeadingZeros<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[173],
- },
- {
- /* [13] */
- /* fn countOneBits<T : iu32>(T) -> T */
- /* fn countOneBits<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[153],
- },
- {
- /* [14] */
- /* fn countTrailingZeros<T : iu32>(T) -> T */
- /* fn countTrailingZeros<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[149],
- },
- {
- /* [15] */
- /* fn cross(vec3<f32>, vec3<f32>) -> vec3<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[280],
- },
- {
- /* [16] */
- /* fn degrees(f32) -> f32 */
- /* fn degrees<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[147],
- },
- {
- /* [17] */
- /* fn determinant<N : num>(mat<N, N, f32>) -> f32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[279],
- },
- {
- /* [18] */
- /* fn distance(f32, f32) -> f32 */
- /* fn distance<N : num>(vec<N, f32>, vec<N, f32>) -> f32 */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[151],
- },
- {
- /* [19] */
- /* fn dot<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[278],
- },
- {
- /* [20] */
- /* fn dpdx(f32) -> f32 */
- /* fn dpdx<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[155],
- },
- {
- /* [21] */
- /* fn dpdxCoarse(f32) -> f32 */
- /* fn dpdxCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[157],
- },
- {
- /* [22] */
- /* fn dpdxFine(f32) -> f32 */
- /* fn dpdxFine<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[159],
- },
- {
- /* [23] */
- /* fn dpdy(f32) -> f32 */
- /* fn dpdy<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[161],
- },
- {
- /* [24] */
- /* fn dpdyCoarse(f32) -> f32 */
- /* fn dpdyCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[163],
- },
- {
- /* [25] */
- /* fn dpdyFine(f32) -> f32 */
- /* fn dpdyFine<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[165],
- },
- {
- /* [26] */
- /* fn exp(f32) -> f32 */
- /* fn exp<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[167],
- },
- {
- /* [27] */
- /* fn exp2(f32) -> f32 */
- /* fn exp2<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[169],
- },
- {
- /* [28] */
- /* fn extractBits<T : iu32>(T, u32, u32) -> T */
- /* fn extractBits<N : num, T : iu32>(vec<N, T>, u32, u32) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[171],
- },
- {
- /* [29] */
- /* fn faceForward<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[277],
- },
- {
- /* [30] */
- /* fn firstLeadingBit<T : iu32>(T) -> T */
- /* fn firstLeadingBit<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[175],
- },
- {
- /* [31] */
- /* fn firstTrailingBit<T : iu32>(T) -> T */
- /* fn firstTrailingBit<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[177],
- },
- {
- /* [32] */
- /* fn floor(f32) -> f32 */
- /* fn floor<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[179],
- },
- {
- /* [33] */
- /* fn fma(f32, f32, f32) -> f32 */
- /* fn fma<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[181],
- },
- {
- /* [34] */
- /* fn fract(f32) -> f32 */
- /* fn fract<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[183],
- },
- {
- /* [35] */
- /* fn frexp(f32) -> __frexp_result */
- /* fn frexp<N : num>(vec<N, f32>) -> __frexp_result_vec<N> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[185],
- },
- {
- /* [36] */
- /* fn fwidth(f32) -> f32 */
- /* fn fwidth<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[187],
- },
- {
- /* [37] */
- /* fn fwidthCoarse(f32) -> f32 */
- /* fn fwidthCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[189],
- },
- {
- /* [38] */
- /* fn fwidthFine(f32) -> f32 */
- /* fn fwidthFine<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[191],
- },
- {
- /* [39] */
- /* fn insertBits<T : iu32>(T, T, u32, u32) -> T */
- /* fn insertBits<N : num, T : iu32>(vec<N, T>, vec<N, T>, u32, u32) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[193],
- },
- {
- /* [40] */
- /* fn inverseSqrt(f32) -> f32 */
- /* fn inverseSqrt<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[195],
- },
- {
- /* [41] */
- /* fn ldexp(f32, i32) -> f32 */
- /* fn ldexp<N : num>(vec<N, f32>, vec<N, i32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[197],
- },
- {
- /* [42] */
- /* fn length(f32) -> f32 */
- /* fn length<N : num>(vec<N, f32>) -> f32 */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[199],
- },
- {
- /* [43] */
- /* fn log(f32) -> f32 */
- /* fn log<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[201],
- },
- {
- /* [44] */
- /* fn log2(f32) -> f32 */
- /* fn log2<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[203],
- },
- {
- /* [45] */
- /* fn max<T : fiu32>(T, T) -> T */
- /* fn max<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[205],
- },
- {
- /* [46] */
- /* fn min<T : fiu32>(T, T) -> T */
- /* fn min<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[207],
- },
- {
- /* [47] */
- /* fn mix(f32, f32, f32) -> f32 */
- /* fn mix<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* fn mix<N : num>(vec<N, f32>, vec<N, f32>, f32) -> vec<N, f32> */
- /* num overloads */ 3,
- /* overloads */ &kOverloads[139],
- },
- {
- /* [48] */
- /* fn modf(f32) -> __modf_result */
- /* fn modf<N : num>(vec<N, f32>) -> __modf_result_vec<N> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[211],
- },
- {
- /* [49] */
- /* fn normalize<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[276],
- },
- {
- /* [50] */
- /* fn pack2x16float(vec2<f32>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[275],
- },
- {
- /* [51] */
- /* fn pack2x16snorm(vec2<f32>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[274],
- },
- {
- /* [52] */
- /* fn pack2x16unorm(vec2<f32>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[269],
- },
- {
- /* [53] */
- /* fn pack4x8snorm(vec4<f32>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[273],
- },
- {
- /* [54] */
- /* fn pack4x8unorm(vec4<f32>) -> u32 */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[272],
- },
- {
- /* [55] */
- /* fn pow(f32, f32) -> f32 */
- /* fn pow<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[225],
- },
- {
- /* [56] */
- /* fn radians(f32) -> f32 */
- /* fn radians<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[227],
- },
- {
- /* [57] */
- /* fn reflect<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[271],
- },
- {
- /* [58] */
- /* fn refract<N : num>(vec<N, f32>, vec<N, f32>, f32) -> vec<N, f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[270],
- },
- {
- /* [59] */
- /* fn reverseBits<T : iu32>(T) -> T */
- /* fn reverseBits<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[233],
- },
- {
- /* [60] */
- /* fn round(f32) -> f32 */
- /* fn round<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[235],
- },
- {
- /* [61] */
- /* fn select<T : scalar>(T, T, bool) -> T */
- /* fn select<T : scalar, N : num>(vec<N, T>, vec<N, T>, bool) -> vec<N, T> */
- /* fn select<N : num, T : scalar>(vec<N, T>, vec<N, T>, vec<N, bool>) -> vec<N, T> */
- /* num overloads */ 3,
- /* overloads */ &kOverloads[142],
- },
- {
- /* [62] */
- /* fn sign(f32) -> f32 */
- /* fn sign<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[239],
- },
- {
- /* [63] */
- /* fn sin(f32) -> f32 */
- /* fn sin<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[241],
- },
- {
- /* [64] */
- /* fn sinh(f32) -> f32 */
- /* fn sinh<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[243],
- },
- {
- /* [65] */
- /* fn smoothstep(f32, f32, f32) -> f32 */
- /* fn smoothstep<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[219],
- },
- {
- /* [66] */
- /* fn smoothStep(f32, f32, f32) -> f32 */
- /* fn smoothStep<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[247],
- },
- {
- /* [67] */
- /* fn sqrt(f32) -> f32 */
- /* fn sqrt<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[249],
- },
- {
- /* [68] */
- /* fn step(f32, f32) -> f32 */
- /* fn step<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[251],
- },
- {
- /* [69] */
- /* fn storageBarrier() */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[268],
- },
- {
- /* [70] */
- /* fn tan(f32) -> f32 */
- /* fn tan<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[255],
- },
- {
- /* [71] */
- /* fn tanh(f32) -> f32 */
- /* fn tanh<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[257],
- },
- {
- /* [72] */
- /* fn transpose<M : num, N : num>(mat<M, N, f32>) -> mat<N, M, f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[261],
- },
- {
- /* [73] */
- /* fn trunc(f32) -> f32 */
- /* fn trunc<N : num>(vec<N, f32>) -> vec<N, f32> */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[145],
- },
- {
- /* [74] */
- /* fn unpack2x16float(u32) -> vec2<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[262],
- },
- {
- /* [75] */
- /* fn unpack2x16snorm(u32) -> vec2<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[263],
- },
- {
- /* [76] */
- /* fn unpack2x16unorm(u32) -> vec2<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[264],
- },
- {
- /* [77] */
- /* fn unpack4x8snorm(u32) -> vec4<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[265],
- },
- {
- /* [78] */
- /* fn unpack4x8unorm(u32) -> vec4<f32> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[266],
- },
- {
- /* [79] */
- /* fn workgroupBarrier() */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[267],
- },
- {
- /* [80] */
- /* fn textureDimensions<T : fiu32>(texture: texture_1d<T>) -> i32 */
- /* fn textureDimensions<T : fiu32>(texture: texture_1d<T>, level: i32) -> i32 */
- /* fn textureDimensions<T : fiu32>(texture: texture_2d<T>) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_2d<T>, level: i32) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_2d_array<T>) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_2d_array<T>, level: i32) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_3d<T>) -> vec3<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_3d<T>, level: i32) -> vec3<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_cube<T>) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_cube<T>, level: i32) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_cube_array<T>) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_cube_array<T>, level: i32) -> vec2<i32> */
- /* fn textureDimensions<T : fiu32>(texture: texture_multisampled_2d<T>) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_2d) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_2d_array) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_cube) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_cube_array) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32> */
- /* fn textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32> */
- /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_1d<F, A>) -> i32 */
- /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_2d<F, A>) -> vec2<i32> */
- /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_2d_array<F, A>) -> vec2<i32> */
- /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_3d<F, A>) -> vec3<i32> */
- /* fn textureDimensions(texture: texture_external) -> vec2<i32> */
- /* num overloads */ 27,
- /* overloads */ &kOverloads[0],
- },
- {
- /* [81] */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>) -> vec4<T> */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<T> */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<T> */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<T> */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_cube<T>, sampler: sampler, coords: vec3<f32>) -> vec4<T> */
- /* fn textureGather<T : fiu32>(component: i32, texture: texture_cube_array<T>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<T> */
- /* fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
- /* fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32> */
- /* fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureGather(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
- /* fn textureGather(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32> */
- /* num overloads */ 12,
- /* overloads */ &kOverloads[57],
- },
- {
- /* [82] */
- /* fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> vec4<f32> */
- /* fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> vec4<f32> */
- /* fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureGatherCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> vec4<f32> */
- /* fn textureGatherCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> vec4<f32> */
- /* num overloads */ 6,
- /* overloads */ &kOverloads[116],
- },
- {
- /* [83] */
- /* fn textureNumLayers<T : fiu32>(texture: texture_2d_array<T>) -> i32 */
- /* fn textureNumLayers<T : fiu32>(texture: texture_cube_array<T>) -> i32 */
- /* fn textureNumLayers(texture: texture_depth_2d_array) -> i32 */
- /* fn textureNumLayers(texture: texture_depth_cube_array) -> i32 */
- /* fn textureNumLayers<F : texel_format, A : write_only>(texture: texture_storage_2d_array<F, A>) -> i32 */
- /* num overloads */ 5,
- /* overloads */ &kOverloads[134],
- },
- {
- /* [84] */
- /* fn textureNumLevels<T : fiu32>(texture: texture_1d<T>) -> i32 */
- /* fn textureNumLevels<T : fiu32>(texture: texture_2d<T>) -> i32 */
- /* fn textureNumLevels<T : fiu32>(texture: texture_2d_array<T>) -> i32 */
- /* fn textureNumLevels<T : fiu32>(texture: texture_3d<T>) -> i32 */
- /* fn textureNumLevels<T : fiu32>(texture: texture_cube<T>) -> i32 */
- /* fn textureNumLevels<T : fiu32>(texture: texture_cube_array<T>) -> i32 */
- /* fn textureNumLevels(texture: texture_depth_2d) -> i32 */
- /* fn textureNumLevels(texture: texture_depth_2d_array) -> i32 */
- /* fn textureNumLevels(texture: texture_depth_cube) -> i32 */
- /* fn textureNumLevels(texture: texture_depth_cube_array) -> i32 */
- /* num overloads */ 10,
- /* overloads */ &kOverloads[81],
- },
- {
- /* [85] */
- /* fn textureNumSamples<T : fiu32>(texture: texture_multisampled_2d<T>) -> i32 */
- /* fn textureNumSamples(texture: texture_depth_multisampled_2d) -> i32 */
- /* num overloads */ 2,
- /* overloads */ &kOverloads[221],
- },
- {
- /* [86] */
- /* fn textureSample(texture: texture_1d<f32>, sampler: sampler, coords: f32) -> vec4<f32> */
- /* fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32> */
- /* fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, offset: vec3<i32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
- /* fn textureSample(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32> */
- /* fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> f32 */
- /* fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> f32 */
- /* fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> f32 */
- /* fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> f32 */
- /* fn textureSample(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> f32 */
- /* fn textureSample(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> f32 */
- /* num overloads */ 15,
- /* overloads */ &kOverloads[42],
- },
- {
- /* [87] */
- /* fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32, offset: vec3<i32>) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32> */
- /* fn textureSampleBias(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, bias: f32) -> vec4<f32> */
- /* num overloads */ 8,
- /* overloads */ &kOverloads[100],
- },
- {
- /* [88] */
- /* fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32 */
- /* fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32 */
- /* fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32 */
- /* fn textureSampleCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32 */
- /* num overloads */ 6,
- /* overloads */ &kOverloads[128],
- },
- {
- /* [89] */
- /* fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32 */
- /* fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32 */
- /* fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleCompareLevel(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32 */
- /* fn textureSampleCompareLevel(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32 */
- /* num overloads */ 6,
- /* overloads */ &kOverloads[122],
- },
- {
- /* [90] */
- /* fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>, offset: vec3<i32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
- /* fn textureSampleGrad(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
- /* num overloads */ 8,
- /* overloads */ &kOverloads[108],
- },
- {
- /* [91] */
- /* fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32, offset: vec2<i32>) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32, offset: vec3<i32>) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, level: f32) -> vec4<f32> */
- /* fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32) -> f32 */
- /* fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32) -> f32 */
- /* fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32, offset: vec2<i32>) -> f32 */
- /* fn textureSampleLevel(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>, level: i32) -> f32 */
- /* fn textureSampleLevel(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32, level: i32) -> f32 */
- /* fn textureSampleLevel(texture: texture_external, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
- /* num overloads */ 15,
- /* overloads */ &kOverloads[27],
- },
- {
- /* [92] */
- /* fn textureStore(texture: texture_storage_1d<f32_texel_format, write>, coords: i32, value: vec4<f32>) */
- /* fn textureStore(texture: texture_storage_2d<f32_texel_format, write>, coords: vec2<i32>, value: vec4<f32>) */
- /* fn textureStore(texture: texture_storage_2d_array<f32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<f32>) */
- /* fn textureStore(texture: texture_storage_3d<f32_texel_format, write>, coords: vec3<i32>, value: vec4<f32>) */
- /* fn textureStore(texture: texture_storage_1d<i32_texel_format, write>, coords: i32, value: vec4<i32>) */
- /* fn textureStore(texture: texture_storage_2d<i32_texel_format, write>, coords: vec2<i32>, value: vec4<i32>) */
- /* fn textureStore(texture: texture_storage_2d_array<i32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<i32>) */
- /* fn textureStore(texture: texture_storage_3d<i32_texel_format, write>, coords: vec3<i32>, value: vec4<i32>) */
- /* fn textureStore(texture: texture_storage_1d<u32_texel_format, write>, coords: i32, value: vec4<u32>) */
- /* fn textureStore(texture: texture_storage_2d<u32_texel_format, write>, coords: vec2<i32>, value: vec4<u32>) */
- /* fn textureStore(texture: texture_storage_2d_array<u32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<u32>) */
- /* fn textureStore(texture: texture_storage_3d<u32_texel_format, write>, coords: vec3<i32>, value: vec4<u32>) */
- /* num overloads */ 12,
- /* overloads */ &kOverloads[69],
- },
- {
- /* [93] */
- /* fn textureLoad<T : fiu32>(texture: texture_1d<T>, coords: i32, level: i32) -> vec4<T> */
- /* fn textureLoad<T : fiu32>(texture: texture_2d<T>, coords: vec2<i32>, level: i32) -> vec4<T> */
- /* fn textureLoad<T : fiu32>(texture: texture_2d_array<T>, coords: vec2<i32>, array_index: i32, level: i32) -> vec4<T> */
- /* fn textureLoad<T : fiu32>(texture: texture_3d<T>, coords: vec3<i32>, level: i32) -> vec4<T> */
- /* fn textureLoad<T : fiu32>(texture: texture_multisampled_2d<T>, coords: vec2<i32>, sample_index: i32) -> vec4<T> */
- /* fn textureLoad(texture: texture_depth_2d, coords: vec2<i32>, level: i32) -> f32 */
- /* fn textureLoad(texture: texture_depth_2d_array, coords: vec2<i32>, array_index: i32, level: i32) -> f32 */
- /* fn textureLoad(texture: texture_depth_multisampled_2d, coords: vec2<i32>, sample_index: i32) -> f32 */
- /* fn textureLoad(texture: texture_external, coords: vec2<i32>) -> vec4<f32> */
- /* num overloads */ 9,
- /* overloads */ &kOverloads[91],
- },
- {
- /* [94] */
- /* fn atomicLoad<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[282],
- },
- {
- /* [95] */
- /* fn atomicStore<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[283],
- },
- {
- /* [96] */
- /* fn atomicAdd<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[284],
- },
- {
- /* [97] */
- /* fn atomicSub<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[285],
- },
- {
- /* [98] */
- /* fn atomicMax<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[286],
- },
- {
- /* [99] */
- /* fn atomicMin<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[287],
- },
- {
- /* [100] */
- /* fn atomicAnd<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[288],
- },
- {
- /* [101] */
- /* fn atomicOr<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[289],
- },
- {
- /* [102] */
- /* fn atomicXor<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[290],
- },
- {
- /* [103] */
- /* fn atomicExchange<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[291],
- },
- {
- /* [104] */
- /* fn atomicCompareExchangeWeak<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T, T) -> vec2<T> */
- /* num overloads */ 1,
- /* overloads */ &kOverloads[292],
- },
-};
-
-// clang-format on
diff --git a/chromium/third_party/dawn/src/tint/builtin_table_test.cc b/chromium/third_party/dawn/src/tint/builtin_table_test.cc
deleted file mode 100644
index b8454d100f8..00000000000
--- a/chromium/third_party/dawn/src/tint/builtin_table_test.cc
+++ /dev/null
@@ -1,601 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/builtin_table.h"
-
-#include "gmock/gmock.h"
-#include "src/tint/program_builder.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-
-namespace tint {
-namespace {
-
-using ::testing::HasSubstr;
-
-using BuiltinType = sem::BuiltinType;
-using Parameter = sem::Parameter;
-using ParameterUsage = sem::ParameterUsage;
-
-class BuiltinTableTest : public testing::Test, public ProgramBuilder {
- public:
- std::unique_ptr<BuiltinTable> table = BuiltinTable::Create(*this);
-};
-
-TEST_F(BuiltinTableTest, MatchF32) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kCos, {f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kCos);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), f32);
-}
-
-TEST_F(BuiltinTableTest, MismatchF32) {
- auto* i32 = create<sem::I32>();
- auto* result = table->Lookup(BuiltinType::kCos, {i32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchU32) {
- auto* f32 = create<sem::F32>();
- auto* u32 = create<sem::U32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* result = table->Lookup(BuiltinType::kUnpack2x16float, {u32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kUnpack2x16float);
- EXPECT_EQ(result->ReturnType(), vec2_f32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), u32);
-}
-
-TEST_F(BuiltinTableTest, MismatchU32) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kUnpack2x16float, {f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchI32) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), vec4_f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kLevel);
-}
-
-TEST_F(BuiltinTableTest, MismatchI32) {
- auto* f32 = create<sem::F32>();
- auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
- auto* result = table->Lookup(BuiltinType::kTextureLoad, {tex, f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchIU32AsI32) {
- auto* i32 = create<sem::I32>();
- auto* result = table->Lookup(BuiltinType::kCountOneBits, {i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kCountOneBits);
- EXPECT_EQ(result->ReturnType(), i32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), i32);
-}
-
-TEST_F(BuiltinTableTest, MatchIU32AsU32) {
- auto* u32 = create<sem::U32>();
- auto* result = table->Lookup(BuiltinType::kCountOneBits, {u32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kCountOneBits);
- EXPECT_EQ(result->ReturnType(), u32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), u32);
-}
-
-TEST_F(BuiltinTableTest, MismatchIU32) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kCountOneBits, {f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchFIU32AsI32) {
- auto* i32 = create<sem::I32>();
- auto* result = table->Lookup(BuiltinType::kClamp, {i32, i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kClamp);
- EXPECT_EQ(result->ReturnType(), i32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), i32);
- EXPECT_EQ(result->Parameters()[1]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
-}
-
-TEST_F(BuiltinTableTest, MatchFIU32AsU32) {
- auto* u32 = create<sem::U32>();
- auto* result = table->Lookup(BuiltinType::kClamp, {u32, u32, u32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kClamp);
- EXPECT_EQ(result->ReturnType(), u32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), u32);
- EXPECT_EQ(result->Parameters()[1]->Type(), u32);
- EXPECT_EQ(result->Parameters()[2]->Type(), u32);
-}
-
-TEST_F(BuiltinTableTest, MatchFIU32AsF32) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kClamp, {f32, f32, f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kClamp);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), f32);
- EXPECT_EQ(result->Parameters()[1]->Type(), f32);
- EXPECT_EQ(result->Parameters()[2]->Type(), f32);
-}
-
-TEST_F(BuiltinTableTest, MismatchFIU32) {
- auto* bool_ = create<sem::Bool>();
- auto* result =
- table->Lookup(BuiltinType::kClamp, {bool_, bool_, bool_}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchBool) {
- auto* f32 = create<sem::F32>();
- auto* bool_ = create<sem::Bool>();
- auto* result =
- table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kSelect);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), f32);
- EXPECT_EQ(result->Parameters()[1]->Type(), f32);
- EXPECT_EQ(result->Parameters()[2]->Type(), bool_);
-}
-
-TEST_F(BuiltinTableTest, MismatchBool) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kSelect, {f32, f32, f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchPointer) {
- auto* i32 = create<sem::I32>();
- auto* atomicI32 = create<sem::Atomic>(i32);
- auto* ptr = create<sem::Pointer>(atomicI32, ast::StorageClass::kWorkgroup,
- ast::Access::kReadWrite);
- auto* result = table->Lookup(BuiltinType::kAtomicLoad, {ptr}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kAtomicLoad);
- EXPECT_EQ(result->ReturnType(), i32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), ptr);
-}
-
-TEST_F(BuiltinTableTest, MismatchPointer) {
- auto* i32 = create<sem::I32>();
- auto* atomicI32 = create<sem::Atomic>(i32);
- auto* result = table->Lookup(BuiltinType::kAtomicLoad, {atomicI32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchArray) {
- auto* arr = create<sem::Array>(create<sem::U32>(), 0u, 4u, 4u, 4u, 4u);
- auto* arr_ptr = create<sem::Pointer>(arr, ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* result = table->Lookup(BuiltinType::kArrayLength, {arr_ptr}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kArrayLength);
- EXPECT_TRUE(result->ReturnType()->Is<sem::U32>());
- ASSERT_EQ(result->Parameters().size(), 1u);
- auto* param_type = result->Parameters()[0]->Type();
- ASSERT_TRUE(param_type->Is<sem::Pointer>());
- EXPECT_TRUE(param_type->As<sem::Pointer>()->StoreType()->Is<sem::Array>());
-}
-
-TEST_F(BuiltinTableTest, MismatchArray) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kArrayLength, {f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchSampler) {
- auto* f32 = create<sem::F32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- auto* result = table->Lookup(BuiltinType::kTextureSample,
- {tex, sampler, vec2_f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureSample);
- EXPECT_EQ(result->ReturnType(), vec4_f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), sampler);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kSampler);
- EXPECT_EQ(result->Parameters()[2]->Type(), vec2_f32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kCoords);
-}
-
-TEST_F(BuiltinTableTest, MismatchSampler) {
- auto* f32 = create<sem::F32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
- auto* result = table->Lookup(BuiltinType::kTextureSample,
- {tex, f32, vec2_f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchSampledTexture) {
- auto* i32 = create<sem::I32>();
- auto* f32 = create<sem::F32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), vec4_f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kLevel);
-}
-
-TEST_F(BuiltinTableTest, MatchMultisampledTexture) {
- auto* i32 = create<sem::I32>();
- auto* f32 = create<sem::F32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* tex = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), vec4_f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kSampleIndex);
-}
-
-TEST_F(BuiltinTableTest, MatchDepthTexture) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* tex = create<sem::DepthTexture>(ast::TextureDimension::k2d);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kLevel);
-}
-
-TEST_F(BuiltinTableTest, MatchDepthMultisampledTexture) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* tex = create<sem::DepthMultisampledTexture>(ast::TextureDimension::k2d);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), i32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kSampleIndex);
-}
-
-TEST_F(BuiltinTableTest, MatchExternalTexture) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* tex = create<sem::ExternalTexture>();
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureLoad);
- EXPECT_EQ(result->ReturnType(), vec4_f32);
- ASSERT_EQ(result->Parameters().size(), 2u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
-}
-
-TEST_F(BuiltinTableTest, MatchWOStorageTexture) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* vec4_f32 = create<sem::Vector>(f32, 4u);
- auto* subtype =
- sem::StorageTexture::SubtypeFor(ast::TexelFormat::kR32Float, Types());
- auto* tex = create<sem::StorageTexture>(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Float,
- ast::Access::kWrite, subtype);
-
- auto* result = table->Lookup(BuiltinType::kTextureStore,
- {tex, vec2_i32, vec4_f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kTextureStore);
- EXPECT_TRUE(result->ReturnType()->Is<sem::Void>());
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), tex);
- EXPECT_EQ(result->Parameters()[0]->Usage(), ParameterUsage::kTexture);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_i32);
- EXPECT_EQ(result->Parameters()[1]->Usage(), ParameterUsage::kCoords);
- EXPECT_EQ(result->Parameters()[2]->Type(), vec4_f32);
- EXPECT_EQ(result->Parameters()[2]->Usage(), ParameterUsage::kValue);
-}
-
-TEST_F(BuiltinTableTest, MismatchTexture) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
- auto* vec2_i32 = create<sem::Vector>(i32, 2u);
- auto* result =
- table->Lookup(BuiltinType::kTextureLoad, {f32, vec2_i32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, ImplicitLoadOnReference) {
- auto* f32 = create<sem::F32>();
- auto* result =
- table->Lookup(BuiltinType::kCos,
- {create<sem::Reference>(f32, ast::StorageClass::kFunction,
- ast::Access::kReadWrite)},
- Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kCos);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), f32);
-}
-
-TEST_F(BuiltinTableTest, MatchOpenType) {
- auto* f32 = create<sem::F32>();
- auto* result = table->Lookup(BuiltinType::kClamp, {f32, f32, f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kClamp);
- EXPECT_EQ(result->ReturnType(), f32);
- EXPECT_EQ(result->Parameters()[0]->Type(), f32);
- EXPECT_EQ(result->Parameters()[1]->Type(), f32);
- EXPECT_EQ(result->Parameters()[2]->Type(), f32);
-}
-
-TEST_F(BuiltinTableTest, MismatchOpenType) {
- auto* f32 = create<sem::F32>();
- auto* u32 = create<sem::U32>();
- auto* result = table->Lookup(BuiltinType::kClamp, {f32, u32, f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchOpenSizeVector) {
- auto* f32 = create<sem::F32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* result = table->Lookup(BuiltinType::kClamp,
- {vec2_f32, vec2_f32, vec2_f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kClamp);
- EXPECT_EQ(result->ReturnType(), vec2_f32);
- ASSERT_EQ(result->Parameters().size(), 3u);
- EXPECT_EQ(result->Parameters()[0]->Type(), vec2_f32);
- EXPECT_EQ(result->Parameters()[1]->Type(), vec2_f32);
- EXPECT_EQ(result->Parameters()[2]->Type(), vec2_f32);
-}
-
-TEST_F(BuiltinTableTest, MismatchOpenSizeVector) {
- auto* f32 = create<sem::F32>();
- auto* u32 = create<sem::U32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* result =
- table->Lookup(BuiltinType::kClamp, {vec2_f32, u32, vec2_f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, MatchOpenSizeMatrix) {
- auto* f32 = create<sem::F32>();
- auto* vec3_f32 = create<sem::Vector>(f32, 3u);
- auto* mat3_f32 = create<sem::Matrix>(vec3_f32, 3u);
- auto* result = table->Lookup(BuiltinType::kDeterminant, {mat3_f32}, Source{});
- ASSERT_NE(result, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
- EXPECT_EQ(result->Type(), BuiltinType::kDeterminant);
- EXPECT_EQ(result->ReturnType(), f32);
- ASSERT_EQ(result->Parameters().size(), 1u);
- EXPECT_EQ(result->Parameters()[0]->Type(), mat3_f32);
-}
-
-TEST_F(BuiltinTableTest, MismatchOpenSizeMatrix) {
- auto* f32 = create<sem::F32>();
- auto* vec2_f32 = create<sem::Vector>(f32, 2u);
- auto* mat3x2_f32 = create<sem::Matrix>(vec2_f32, 3u);
- auto* result =
- table->Lookup(BuiltinType::kDeterminant, {mat3x2_f32}, Source{});
- ASSERT_EQ(result, nullptr);
- ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
-}
-
-TEST_F(BuiltinTableTest, OverloadOrderByNumberOfParameters) {
- // None of the arguments match, so expect the overloads with 2 parameters to
- // come first
- auto* bool_ = create<sem::Bool>();
- table->Lookup(BuiltinType::kTextureDimensions, {bool_, bool_}, Source{});
- ASSERT_EQ(Diagnostics().str(),
- R"(error: no matching call to textureDimensions(bool, bool)
-
-27 candidate functions:
- textureDimensions(texture: texture_1d<T>, level: i32) -> i32 where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_3d<T>, level: i32) -> vec3<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_1d<T>) -> i32 where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d_array<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_3d<T>) -> vec3<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube_array<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_multisampled_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_depth_2d) -> vec2<i32>
- textureDimensions(texture: texture_depth_2d_array) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube_array) -> vec2<i32>
- textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32>
- textureDimensions(texture: texture_storage_1d<F, A>) -> i32 where: A is write
- textureDimensions(texture: texture_storage_2d<F, A>) -> vec2<i32> where: A is write
- textureDimensions(texture: texture_storage_2d_array<F, A>) -> vec2<i32> where: A is write
- textureDimensions(texture: texture_storage_3d<F, A>) -> vec3<i32> where: A is write
- textureDimensions(texture: texture_external) -> vec2<i32>
-)");
-}
-
-TEST_F(BuiltinTableTest, OverloadOrderByMatchingParameter) {
- auto* tex = create<sem::DepthTexture>(ast::TextureDimension::k2d);
- auto* bool_ = create<sem::Bool>();
- table->Lookup(BuiltinType::kTextureDimensions, {tex, bool_}, Source{});
- ASSERT_EQ(
- Diagnostics().str(),
- R"(error: no matching call to textureDimensions(texture_depth_2d, bool)
-
-27 candidate functions:
- textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_2d) -> vec2<i32>
- textureDimensions(texture: texture_1d<T>, level: i32) -> i32 where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_3d<T>, level: i32) -> vec3<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32>
- textureDimensions(texture: texture_1d<T>) -> i32 where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_2d_array<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_3d<T>) -> vec3<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_cube_array<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_multisampled_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
- textureDimensions(texture: texture_depth_2d_array) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube) -> vec2<i32>
- textureDimensions(texture: texture_depth_cube_array) -> vec2<i32>
- textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32>
- textureDimensions(texture: texture_storage_1d<F, A>) -> i32 where: A is write
- textureDimensions(texture: texture_storage_2d<F, A>) -> vec2<i32> where: A is write
- textureDimensions(texture: texture_storage_2d_array<F, A>) -> vec2<i32> where: A is write
- textureDimensions(texture: texture_storage_3d<F, A>) -> vec3<i32> where: A is write
- textureDimensions(texture: texture_external) -> vec2<i32>
-)");
-}
-
-TEST_F(BuiltinTableTest, SameOverloadReturnsSameBuiltinPointer) {
- auto* f32 = create<sem::F32>();
- auto* vec2_f32 = create<sem::Vector>(create<sem::F32>(), 2u);
- auto* bool_ = create<sem::Bool>();
- auto* a = table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
- ASSERT_NE(a, nullptr) << Diagnostics().str();
-
- auto* b = table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
- ASSERT_NE(b, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
-
- auto* c = table->Lookup(BuiltinType::kSelect, {vec2_f32, vec2_f32, bool_},
- Source{});
- ASSERT_NE(c, nullptr) << Diagnostics().str();
- ASSERT_EQ(Diagnostics().str(), "");
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(b, c);
-}
-
-} // namespace
-} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/castable.h b/chromium/third_party/dawn/src/tint/castable.h
index 211f2fb8d6e..048d1e52589 100644
--- a/chromium/third_party/dawn/src/tint/castable.h
+++ b/chromium/third_party/dawn/src/tint/castable.h
@@ -21,24 +21,24 @@
#include <utility>
#include "src/tint/traits.h"
+#include "src/tint/utils/bitcast.h"
#include "src/tint/utils/crc32.h"
+#include "src/tint/utils/defer.h"
#if defined(__clang__)
/// Temporarily disable certain warnings when using Castable API
-#define TINT_CASTABLE_PUSH_DISABLE_WARNINGS() \
- _Pragma("clang diagnostic push") /**/ \
- _Pragma("clang diagnostic ignored \"-Wundefined-var-template\"") /**/ \
- static_assert(true, "require extra semicolon")
+#define TINT_CASTABLE_PUSH_DISABLE_WARNINGS() \
+ _Pragma("clang diagnostic push") /**/ \
+ _Pragma("clang diagnostic ignored \"-Wundefined-var-template\"") /**/ \
+ static_assert(true, "require extra semicolon")
/// Restore disabled warnings
#define TINT_CASTABLE_POP_DISABLE_WARNINGS() \
- _Pragma("clang diagnostic pop") /**/ \
- static_assert(true, "require extra semicolon")
+ _Pragma("clang diagnostic pop") /**/ \
+ static_assert(true, "require extra semicolon")
#else
-#define TINT_CASTABLE_PUSH_DISABLE_WARNINGS() \
- static_assert(true, "require extra semicolon")
-#define TINT_CASTABLE_POP_DISABLE_WARNINGS() \
- static_assert(true, "require extra semicolon")
+#define TINT_CASTABLE_PUSH_DISABLE_WARNINGS() static_assert(true, "require extra semicolon")
+#define TINT_CASTABLE_POP_DISABLE_WARNINGS() static_assert(true, "require extra semicolon")
#endif
TINT_CASTABLE_PUSH_DISABLE_WARNINGS();
@@ -62,190 +62,184 @@ namespace tint {
/// True if all template types that are not Ignore derive from CastableBase
template <typename... TYPES>
static constexpr bool IsCastable =
- ((traits::IsTypeOrDerived<TYPES, CastableBase> ||
- std::is_same_v<TYPES, Ignore>)&&...) &&
+ ((traits::IsTypeOrDerived<TYPES, CastableBase> || std::is_same_v<TYPES, Ignore>)&&...) &&
!(std::is_same_v<TYPES, Ignore> && ...);
/// Helper macro to instantiate the TypeInfo<T> template for `CLASS`.
-#define TINT_INSTANTIATE_TYPEINFO(CLASS) \
- TINT_CASTABLE_PUSH_DISABLE_WARNINGS(); \
- template <> \
- const tint::TypeInfo tint::detail::TypeInfoOf<CLASS>::info{ \
- &tint::detail::TypeInfoOf<CLASS::TrueBase>::info, \
- #CLASS, \
- tint::TypeInfo::HashCodeOf<CLASS>(), \
- tint::TypeInfo::FullHashCodeOf<CLASS>(), \
- }; \
- TINT_CASTABLE_POP_DISABLE_WARNINGS()
+#define TINT_INSTANTIATE_TYPEINFO(CLASS) \
+ TINT_CASTABLE_PUSH_DISABLE_WARNINGS(); \
+ template <> \
+ const tint::TypeInfo tint::detail::TypeInfoOf<CLASS>::info{ \
+ &tint::detail::TypeInfoOf<CLASS::TrueBase>::info, \
+ #CLASS, \
+ tint::TypeInfo::HashCodeOf<CLASS>(), \
+ tint::TypeInfo::FullHashCodeOf<CLASS>(), \
+ }; \
+ TINT_CASTABLE_POP_DISABLE_WARNINGS()
/// Bit flags that can be passed to the template parameter `FLAGS` of Is() and
/// As().
enum CastFlags {
- /// Disables the static_assert() inside Is(), that compile-time-verifies that
- /// the cast is possible. This flag may be useful for highly-generic template
- /// code that needs to compile for template permutations that generate
- /// impossible casts.
- kDontErrorOnImpossibleCast = 1,
+ /// Disables the static_assert() inside Is(), that compile-time-verifies that
+ /// the cast is possible. This flag may be useful for highly-generic template
+ /// code that needs to compile for template permutations that generate
+ /// impossible casts.
+ kDontErrorOnImpossibleCast = 1,
};
/// TypeInfo holds type information for a Castable type.
struct TypeInfo {
- /// The type of a hash code
- using HashCode = uint64_t;
-
- /// The base class of this type
- const TypeInfo* base;
- /// The type name
- const char* name;
- /// The type hash code
- const HashCode hashcode;
- /// The type hash code bitwise-or'd with all ancestor's hashcodes.
- const HashCode full_hashcode;
-
- /// @param type the test type info
- /// @returns true if the class with this TypeInfo is of, or derives from the
- /// class with the given TypeInfo.
- inline bool Is(const tint::TypeInfo* type) const {
- // Optimization: Check whether the all the bits of the type's hashcode can
- // be found in the full_hashcode. If a single bit is missing, then we
- // can quickly tell that that this TypeInfo does not derive from `type`.
- if ((full_hashcode & type->hashcode) != type->hashcode) {
- return false;
+ /// The type of a hash code
+ using HashCode = uint64_t;
+
+ /// The base class of this type
+ const TypeInfo* base;
+ /// The type name
+ const char* name;
+ /// The type hash code
+ const HashCode hashcode;
+ /// The type hash code bitwise-or'd with all ancestor's hashcodes.
+ const HashCode full_hashcode;
+
+ /// @param type the test type info
+ /// @returns true if the class with this TypeInfo is of, or derives from the
+ /// class with the given TypeInfo.
+ inline bool Is(const tint::TypeInfo* type) const {
+ // Optimization: Check whether the all the bits of the type's hashcode can
+ // be found in the full_hashcode. If a single bit is missing, then we
+ // can quickly tell that that this TypeInfo does not derive from `type`.
+ if ((full_hashcode & type->hashcode) != type->hashcode) {
+ return false;
+ }
+
+ // Walk the base types, starting with this TypeInfo, to see if any of the
+ // pointers match `type`.
+ for (auto* ti = this; ti != nullptr; ti = ti->base) {
+ if (ti == type) {
+ return true;
+ }
+ }
+ return false;
}
- // Walk the base types, starting with this TypeInfo, to see if any of the
- // pointers match `type`.
- for (auto* ti = this; ti != nullptr; ti = ti->base) {
- if (ti == type) {
- return true;
- }
+ /// @returns true if `type` derives from the class `TO`
+ /// @param type the object type to test from, which must be, or derive from
+ /// type `FROM`.
+ /// @see CastFlags
+ template <typename TO, typename FROM, int FLAGS = 0>
+ static inline bool Is(const tint::TypeInfo* type) {
+ constexpr const bool downcast = std::is_base_of<FROM, TO>::value;
+ constexpr const bool upcast = std::is_base_of<TO, FROM>::value;
+ constexpr const bool nocast = std::is_same<FROM, TO>::value;
+ constexpr const bool assert_is_castable = (FLAGS & kDontErrorOnImpossibleCast) == 0;
+
+ static_assert(upcast || downcast || nocast || !assert_is_castable, "impossible cast");
+
+ if (upcast || nocast) {
+ return true;
+ }
+
+ return type->Is(&Of<std::remove_cv_t<TO>>());
}
- return false;
- }
-
- /// @returns true if `type` derives from the class `TO`
- /// @param type the object type to test from, which must be, or derive from
- /// type `FROM`.
- /// @see CastFlags
- template <typename TO, typename FROM, int FLAGS = 0>
- static inline bool Is(const tint::TypeInfo* type) {
- constexpr const bool downcast = std::is_base_of<FROM, TO>::value;
- constexpr const bool upcast = std::is_base_of<TO, FROM>::value;
- constexpr const bool nocast = std::is_same<FROM, TO>::value;
- constexpr const bool assert_is_castable =
- (FLAGS & kDontErrorOnImpossibleCast) == 0;
-
- static_assert(upcast || downcast || nocast || !assert_is_castable,
- "impossible cast");
-
- if (upcast || nocast) {
- return true;
+
+ /// @returns the static TypeInfo for the type T
+ template <typename T>
+ static const TypeInfo& Of() {
+ return detail::TypeInfoOf<std::remove_cv_t<T>>::info;
}
- return type->Is(&Of<std::remove_cv_t<TO>>());
- }
-
- /// @returns the static TypeInfo for the type T
- template <typename T>
- static const TypeInfo& Of() {
- return detail::TypeInfoOf<std::remove_cv_t<T>>::info;
- }
-
- /// @returns a compile-time hashcode for the type `T`.
- /// @note the returned hashcode will have at most 2 bits set, as the hashes
- /// are expected to be used in bloom-filters which will quickly saturate when
- /// multiple hashcodes are bitwise-or'd together.
- template <typename T>
- static constexpr HashCode HashCodeOf() {
- static_assert(IsCastable<T>, "T is not Castable");
- static_assert(
- std::is_same_v<T, std::remove_cv_t<T>>,
- "Strip const / volatile decorations before calling HashCodeOf");
- /// Use the compiler's "pretty" function name, which includes the template
- /// type, to obtain a unique hash value.
+ /// @returns a compile-time hashcode for the type `T`.
+ /// @note the returned hashcode will have at most 2 bits set, as the hashes
+ /// are expected to be used in bloom-filters which will quickly saturate when
+ /// multiple hashcodes are bitwise-or'd together.
+ template <typename T>
+ static constexpr HashCode HashCodeOf() {
+ static_assert(IsCastable<T>, "T is not Castable");
+ static_assert(std::is_same_v<T, std::remove_cv_t<T>>,
+ "Strip const / volatile decorations before calling HashCodeOf");
+ /// Use the compiler's "pretty" function name, which includes the template
+ /// type, to obtain a unique hash value.
#ifdef _MSC_VER
- constexpr uint32_t crc = utils::CRC32(__FUNCSIG__);
+ constexpr uint32_t crc = utils::CRC32(__FUNCSIG__);
#else
- constexpr uint32_t crc = utils::CRC32(__PRETTY_FUNCTION__);
+ constexpr uint32_t crc = utils::CRC32(__PRETTY_FUNCTION__);
#endif
- constexpr uint32_t bit_a = (crc & 63);
- constexpr uint32_t bit_b = ((crc >> 6) & 63);
- return (static_cast<HashCode>(1) << bit_a) |
- (static_cast<HashCode>(1) << bit_b);
- }
-
- /// @returns the hashcode of the given type, bitwise-or'd with the hashcodes
- /// of all base classes.
- template <typename T>
- static constexpr HashCode FullHashCodeOf() {
- if constexpr (std::is_same_v<T, CastableBase>) {
- return HashCodeOf<CastableBase>();
- } else {
- return HashCodeOf<T>() | FullHashCodeOf<typename T::TrueBase>();
+ constexpr uint32_t bit_a = (crc & 63);
+ constexpr uint32_t bit_b = ((crc >> 6) & 63);
+ return (static_cast<HashCode>(1) << bit_a) | (static_cast<HashCode>(1) << bit_b);
}
- }
-
- /// @returns the bitwise-or'd hashcodes of all the types of the tuple `TUPLE`.
- /// @see HashCodeOf
- template <typename TUPLE>
- static constexpr HashCode CombinedHashCodeOfTuple() {
- constexpr auto kCount = std::tuple_size_v<TUPLE>;
- if constexpr (kCount == 0) {
- return 0;
- } else if constexpr (kCount == 1) {
- return HashCodeOf<std::remove_cv_t<std::tuple_element_t<0, TUPLE>>>();
- } else {
- constexpr auto kMid = kCount / 2;
- return CombinedHashCodeOfTuple<traits::SliceTuple<0, kMid, TUPLE>>() |
- CombinedHashCodeOfTuple<
- traits::SliceTuple<kMid, kCount - kMid, TUPLE>>();
+
+ /// @returns the hashcode of the given type, bitwise-or'd with the hashcodes
+ /// of all base classes.
+ template <typename T>
+ static constexpr HashCode FullHashCodeOf() {
+ if constexpr (std::is_same_v<T, CastableBase>) {
+ return HashCodeOf<CastableBase>();
+ } else {
+ return HashCodeOf<T>() | FullHashCodeOf<typename T::TrueBase>();
+ }
}
- }
-
- /// @returns the bitwise-or'd hashcodes of all the template parameter types.
- /// @see HashCodeOf
- template <typename... TYPES>
- static constexpr HashCode CombinedHashCodeOf() {
- return CombinedHashCodeOfTuple<std::tuple<TYPES...>>();
- }
-
- /// @returns true if this TypeInfo is of, or derives from any of the types in
- /// `TUPLE`.
- template <typename TUPLE>
- inline bool IsAnyOfTuple() const {
- constexpr auto kCount = std::tuple_size_v<TUPLE>;
- if constexpr (kCount == 0) {
- return false;
- } else if constexpr (kCount == 1) {
- return Is(&Of<std::tuple_element_t<0, TUPLE>>());
- } else if constexpr (kCount == 2) {
- return Is(&Of<std::tuple_element_t<0, TUPLE>>()) ||
- Is(&Of<std::tuple_element_t<1, TUPLE>>());
- } else if constexpr (kCount == 3) {
- return Is(&Of<std::tuple_element_t<0, TUPLE>>()) ||
- Is(&Of<std::tuple_element_t<1, TUPLE>>()) ||
- Is(&Of<std::tuple_element_t<2, TUPLE>>());
- } else {
- // Optimization: Compare the object's hashcode to the bitwise-or of all
- // the tested type's hashcodes. If there's no intersection of bits in
- // the two masks, then we can guarantee that the type is not in `TO`.
- if (full_hashcode & TypeInfo::CombinedHashCodeOfTuple<TUPLE>()) {
- // Possibly one of the types in `TUPLE`.
- // Split the search in two, and scan each block.
- static constexpr auto kMid = kCount / 2;
- return IsAnyOfTuple<traits::SliceTuple<0, kMid, TUPLE>>() ||
- IsAnyOfTuple<traits::SliceTuple<kMid, kCount - kMid, TUPLE>>();
- }
- return false;
+
+ /// @returns the bitwise-or'd hashcodes of all the types of the tuple `TUPLE`.
+ /// @see HashCodeOf
+ template <typename TUPLE>
+ static constexpr HashCode CombinedHashCodeOfTuple() {
+ constexpr auto kCount = std::tuple_size_v<TUPLE>;
+ if constexpr (kCount == 0) {
+ return 0;
+ } else if constexpr (kCount == 1) {
+ return HashCodeOf<std::remove_cv_t<std::tuple_element_t<0, TUPLE>>>();
+ } else {
+ constexpr auto kMid = kCount / 2;
+ return CombinedHashCodeOfTuple<traits::SliceTuple<0, kMid, TUPLE>>() |
+ CombinedHashCodeOfTuple<traits::SliceTuple<kMid, kCount - kMid, TUPLE>>();
+ }
+ }
+
+ /// @returns the bitwise-or'd hashcodes of all the template parameter types.
+ /// @see HashCodeOf
+ template <typename... TYPES>
+ static constexpr HashCode CombinedHashCodeOf() {
+ return CombinedHashCodeOfTuple<std::tuple<TYPES...>>();
+ }
+
+ /// @returns true if this TypeInfo is of, or derives from any of the types in
+ /// `TUPLE`.
+ template <typename TUPLE>
+ inline bool IsAnyOfTuple() const {
+ constexpr auto kCount = std::tuple_size_v<TUPLE>;
+ if constexpr (kCount == 0) {
+ return false;
+ } else if constexpr (kCount == 1) {
+ return Is(&Of<std::tuple_element_t<0, TUPLE>>());
+ } else if constexpr (kCount == 2) {
+ return Is(&Of<std::tuple_element_t<0, TUPLE>>()) ||
+ Is(&Of<std::tuple_element_t<1, TUPLE>>());
+ } else if constexpr (kCount == 3) {
+ return Is(&Of<std::tuple_element_t<0, TUPLE>>()) ||
+ Is(&Of<std::tuple_element_t<1, TUPLE>>()) ||
+ Is(&Of<std::tuple_element_t<2, TUPLE>>());
+ } else {
+ // Optimization: Compare the object's hashcode to the bitwise-or of all
+ // the tested type's hashcodes. If there's no intersection of bits in
+ // the two masks, then we can guarantee that the type is not in `TO`.
+ if (full_hashcode & TypeInfo::CombinedHashCodeOfTuple<TUPLE>()) {
+ // Possibly one of the types in `TUPLE`.
+ // Split the search in two, and scan each block.
+ static constexpr auto kMid = kCount / 2;
+ return IsAnyOfTuple<traits::SliceTuple<0, kMid, TUPLE>>() ||
+ IsAnyOfTuple<traits::SliceTuple<kMid, kCount - kMid, TUPLE>>();
+ }
+ return false;
+ }
+ }
+
+ /// @returns true if this TypeInfo is of, or derives from any of the types in
+ /// `TYPES`.
+ template <typename... TYPES>
+ inline bool IsAnyOf() const {
+ return IsAnyOfTuple<std::tuple<TYPES...>>();
}
- }
-
- /// @returns true if this TypeInfo is of, or derives from any of the types in
- /// `TYPES`.
- template <typename... TYPES>
- inline bool IsAnyOf() const {
- return IsAnyOfTuple<std::tuple<TYPES...>>();
- }
};
namespace detail {
@@ -255,8 +249,8 @@ namespace detail {
/// `T`.
template <typename T>
struct TypeInfoOf {
- /// The unique TypeInfo for the type T.
- static const TypeInfo info;
+ /// The unique TypeInfo for the type T.
+ static const TypeInfo info;
};
/// A placeholder structure used for template parameters that need a default
@@ -271,10 +265,10 @@ struct Infer;
/// @see CastFlags
template <typename TO, int FLAGS = 0, typename FROM = detail::Infer>
inline bool Is(FROM* obj) {
- if (obj == nullptr) {
- return false;
- }
- return TypeInfo::Is<TO, FROM, FLAGS>(&obj->TypeInfo());
+ if (obj == nullptr) {
+ return false;
+ }
+ return TypeInfo::Is<TO, FROM, FLAGS>(&obj->TypeInfo());
}
/// @returns true if `obj` is a valid pointer, and is of, or derives from the
@@ -283,13 +277,9 @@ inline bool Is(FROM* obj) {
/// @param pred predicate function with signature `bool(const TYPE*)` called iff
/// object is of, or derives from the class `TYPE`.
/// @see CastFlags
-template <typename TYPE,
- int FLAGS = 0,
- typename OBJ = detail::Infer,
- typename Pred = detail::Infer>
+template <typename TYPE, int FLAGS = 0, typename OBJ = detail::Infer, typename Pred = detail::Infer>
inline bool Is(OBJ* obj, Pred&& pred) {
- return Is<TYPE, FLAGS, OBJ>(obj) &&
- pred(static_cast<std::add_const_t<TYPE>*>(obj));
+ return Is<TYPE, FLAGS, OBJ>(obj) && pred(static_cast<std::add_const_t<TYPE>*>(obj));
}
/// @returns true if `obj` is a valid pointer, and is of, or derives from any of
@@ -297,10 +287,10 @@ inline bool Is(OBJ* obj, Pred&& pred) {
/// @param obj the object to query.
template <typename... TYPES, typename OBJ>
inline bool IsAnyOf(OBJ* obj) {
- if (!obj) {
- return false;
- }
- return obj->TypeInfo().template IsAnyOf<TYPES...>();
+ if (!obj) {
+ return false;
+ }
+ return obj->TypeInfo().template IsAnyOf<TYPES...>();
}
/// @returns obj dynamically cast to the type `TO` or `nullptr` if
@@ -309,8 +299,8 @@ inline bool IsAnyOf(OBJ* obj) {
/// @see CastFlags
template <typename TO, int FLAGS = 0, typename FROM = detail::Infer>
inline TO* As(FROM* obj) {
- auto* as_castable = static_cast<CastableBase*>(obj);
- return Is<TO, FLAGS>(obj) ? static_cast<TO*>(as_castable) : nullptr;
+ auto* as_castable = static_cast<CastableBase*>(obj);
+ return Is<TO, FLAGS>(obj) ? static_cast<TO*>(as_castable) : nullptr;
}
/// @returns obj dynamically cast to the type `TO` or `nullptr` if
@@ -319,8 +309,8 @@ inline TO* As(FROM* obj) {
/// @see CastFlags
template <typename TO, int FLAGS = 0, typename FROM = detail::Infer>
inline const TO* As(const FROM* obj) {
- auto* as_castable = static_cast<const CastableBase*>(obj);
- return Is<TO, FLAGS>(obj) ? static_cast<const TO*>(as_castable) : nullptr;
+ auto* as_castable = static_cast<const CastableBase*>(obj);
+ return Is<TO, FLAGS>(obj) ? static_cast<const TO*>(as_castable) : nullptr;
}
/// CastableBase is the base class for all Castable objects.
@@ -328,61 +318,61 @@ inline const TO* As(const FROM* obj) {
/// Castable helper template.
/// @see Castable
class CastableBase {
- public:
- /// Copy constructor
- CastableBase(const CastableBase&) = default;
-
- /// Destructor
- virtual ~CastableBase() = default;
-
- /// Copy assignment
- /// @param other the CastableBase to copy
- /// @returns the new CastableBase
- CastableBase& operator=(const CastableBase& other) = default;
-
- /// @returns the TypeInfo of the object
- virtual const tint::TypeInfo& TypeInfo() const = 0;
-
- /// @returns true if this object is of, or derives from the class `TO`
- template <typename TO>
- inline bool Is() const {
- return tint::Is<TO>(this);
- }
-
- /// @returns true if this object is of, or derives from the class `TO` and
- /// pred(const TO*) returns true
- /// @param pred predicate function with signature `bool(const TO*)` called iff
- /// object is of, or derives from the class `TO`.
- template <typename TO, int FLAGS = 0, typename Pred = detail::Infer>
- inline bool Is(Pred&& pred) const {
- return tint::Is<TO, FLAGS>(this, std::forward<Pred>(pred));
- }
-
- /// @returns true if this object is of, or derives from any of the `TO`
- /// classes.
- template <typename... TO>
- inline bool IsAnyOf() const {
- return tint::IsAnyOf<TO...>(this);
- }
-
- /// @returns this object dynamically cast to the type `TO` or `nullptr` if
- /// this object does not derive from `TO`.
- /// @see CastFlags
- template <typename TO, int FLAGS = 0>
- inline TO* As() {
- return tint::As<TO, FLAGS>(this);
- }
-
- /// @returns this object dynamically cast to the type `TO` or `nullptr` if
- /// this object does not derive from `TO`.
- /// @see CastFlags
- template <typename TO, int FLAGS = 0>
- inline const TO* As() const {
- return tint::As<const TO, FLAGS>(this);
- }
-
- protected:
- CastableBase() = default;
+ public:
+ /// Copy constructor
+ CastableBase(const CastableBase&) = default;
+
+ /// Destructor
+ virtual ~CastableBase() = default;
+
+ /// Copy assignment
+ /// @param other the CastableBase to copy
+ /// @returns the new CastableBase
+ CastableBase& operator=(const CastableBase& other) = default;
+
+ /// @returns the TypeInfo of the object
+ virtual const tint::TypeInfo& TypeInfo() const = 0;
+
+ /// @returns true if this object is of, or derives from the class `TO`
+ template <typename TO>
+ inline bool Is() const {
+ return tint::Is<TO>(this);
+ }
+
+ /// @returns true if this object is of, or derives from the class `TO` and
+ /// pred(const TO*) returns true
+ /// @param pred predicate function with signature `bool(const TO*)` called iff
+ /// object is of, or derives from the class `TO`.
+ template <typename TO, int FLAGS = 0, typename Pred = detail::Infer>
+ inline bool Is(Pred&& pred) const {
+ return tint::Is<TO, FLAGS>(this, std::forward<Pred>(pred));
+ }
+
+ /// @returns true if this object is of, or derives from any of the `TO`
+ /// classes.
+ template <typename... TO>
+ inline bool IsAnyOf() const {
+ return tint::IsAnyOf<TO...>(this);
+ }
+
+ /// @returns this object dynamically cast to the type `TO` or `nullptr` if
+ /// this object does not derive from `TO`.
+ /// @see CastFlags
+ template <typename TO, int FLAGS = 0>
+ inline TO* As() {
+ return tint::As<TO, FLAGS>(this);
+ }
+
+ /// @returns this object dynamically cast to the type `TO` or `nullptr` if
+ /// this object does not derive from `TO`.
+ /// @see CastFlags
+ template <typename TO, int FLAGS = 0>
+ inline const TO* As() const {
+ return tint::As<const TO, FLAGS>(this);
+ }
+
+ protected:
+ CastableBase() = default;
};
/// Castable is a helper to derive `CLASS` from `BASE`, automatically
@@ -406,64 +396,60 @@ class CastableBase {
/// ```
template <typename CLASS, typename BASE = CastableBase>
class Castable : public BASE {
- public:
- // Inherit the `BASE` class constructors.
- using BASE::BASE;
-
- /// A type alias for `CLASS` to easily access the `BASE` class members.
- /// Base actually aliases to the Castable instead of `BASE` so that you can
- /// use Base in the `CLASS` constructor.
- using Base = Castable;
-
- /// A type alias for `BASE`.
- using TrueBase = BASE;
-
- /// @returns the TypeInfo of the object
- const tint::TypeInfo& TypeInfo() const override {
- return TypeInfo::Of<CLASS>();
- }
-
- /// @returns true if this object is of, or derives from the class `TO`
- /// @see CastFlags
- template <typename TO, int FLAGS = 0>
- inline bool Is() const {
- return tint::Is<TO, FLAGS>(static_cast<const CLASS*>(this));
- }
-
- /// @returns true if this object is of, or derives from the class `TO` and
- /// pred(const TO*) returns true
- /// @param pred predicate function with signature `bool(const TO*)` called iff
- /// object is of, or derives from the class `TO`.
- template <int FLAGS = 0, typename Pred = detail::Infer>
- inline bool Is(Pred&& pred) const {
- using TO =
- typename std::remove_pointer<traits::ParameterType<Pred, 0>>::type;
- return tint::Is<TO, FLAGS>(static_cast<const CLASS*>(this),
- std::forward<Pred>(pred));
- }
-
- /// @returns true if this object is of, or derives from any of the `TO`
- /// classes.
- template <typename... TO>
- inline bool IsAnyOf() const {
- return tint::IsAnyOf<TO...>(static_cast<const CLASS*>(this));
- }
-
- /// @returns this object dynamically cast to the type `TO` or `nullptr` if
- /// this object does not derive from `TO`.
- /// @see CastFlags
- template <typename TO, int FLAGS = 0>
- inline TO* As() {
- return tint::As<TO, FLAGS>(this);
- }
-
- /// @returns this object dynamically cast to the type `TO` or `nullptr` if
- /// this object does not derive from `TO`.
- /// @see CastFlags
- template <typename TO, int FLAGS = 0>
- inline const TO* As() const {
- return tint::As<const TO, FLAGS>(this);
- }
+ public:
+ // Inherit the `BASE` class constructors.
+ using BASE::BASE;
+
+ /// A type alias for `CLASS` to easily access the `BASE` class members.
+ /// Base actually aliases to the Castable instead of `BASE` so that you can
+ /// use Base in the `CLASS` constructor.
+ using Base = Castable;
+
+ /// A type alias for `BASE`.
+ using TrueBase = BASE;
+
+ /// @returns the TypeInfo of the object
+ const tint::TypeInfo& TypeInfo() const override { return TypeInfo::Of<CLASS>(); }
+
+ /// @returns true if this object is of, or derives from the class `TO`
+ /// @see CastFlags
+ template <typename TO, int FLAGS = 0>
+ inline bool Is() const {
+ return tint::Is<TO, FLAGS>(static_cast<const CLASS*>(this));
+ }
+
+ /// @returns true if this object is of, or derives from the class `TO` and
+ /// pred(const TO*) returns true
+ /// @param pred predicate function with signature `bool(const TO*)` called iff
+ /// object is of, or derives from the class `TO`.
+ template <int FLAGS = 0, typename Pred = detail::Infer>
+ inline bool Is(Pred&& pred) const {
+ using TO = typename std::remove_pointer<traits::ParameterType<Pred, 0>>::type;
+ return tint::Is<TO, FLAGS>(static_cast<const CLASS*>(this), std::forward<Pred>(pred));
+ }
+
+ /// @returns true if this object is of, or derives from any of the `TO`
+ /// classes.
+ template <typename... TO>
+ inline bool IsAnyOf() const {
+ return tint::IsAnyOf<TO...>(static_cast<const CLASS*>(this));
+ }
+
+ /// @returns this object dynamically cast to the type `TO` or `nullptr` if
+ /// this object does not derive from `TO`.
+ /// @see CastFlags
+ template <typename TO, int FLAGS = 0>
+ inline TO* As() {
+ return tint::As<TO, FLAGS>(this);
+ }
+
+ /// @returns this object dynamically cast to the type `TO` or `nullptr` if
+ /// this object does not derive from `TO`.
+ /// @see CastFlags
+ template <typename TO, int FLAGS = 0>
+ inline const TO* As() const {
+ return tint::As<const TO, FLAGS>(this);
+ }
};
namespace detail {
@@ -474,51 +460,50 @@ struct CastableCommonBaseImpl {};
/// Alias to typename CastableCommonBaseImpl<TYPES>::type
template <typename... TYPES>
-using CastableCommonBase =
- typename detail::CastableCommonBaseImpl<TYPES...>::type;
+using CastableCommonBase = typename detail::CastableCommonBaseImpl<TYPES...>::type;
/// CastableCommonBaseImpl template specialization for a single type
template <typename T>
struct CastableCommonBaseImpl<T> {
- /// Common base class of a single type is itself
- using type = T;
+ /// Common base class of a single type is itself
+ using type = T;
};
/// CastableCommonBaseImpl A <-> CastableBase specialization
template <typename A>
struct CastableCommonBaseImpl<A, CastableBase> {
- /// Common base class for A and CastableBase is CastableBase
- using type = CastableBase;
+ /// Common base class for A and CastableBase is CastableBase
+ using type = CastableBase;
};
/// CastableCommonBaseImpl T <-> Ignore specialization
template <typename T>
struct CastableCommonBaseImpl<T, Ignore> {
- /// Resolves to T as the other type is ignored
- using type = T;
+ /// Resolves to T as the other type is ignored
+ using type = T;
};
/// CastableCommonBaseImpl Ignore <-> T specialization
template <typename T>
struct CastableCommonBaseImpl<Ignore, T> {
- /// Resolves to T as the other type is ignored
- using type = T;
+ /// Resolves to T as the other type is ignored
+ using type = T;
};
/// CastableCommonBaseImpl A <-> B specialization
template <typename A, typename B>
struct CastableCommonBaseImpl<A, B> {
- /// The common base class for A, B and OTHERS
- using type = std::conditional_t<traits::IsTypeOrDerived<A, B>,
- B, // A derives from B
- CastableCommonBase<A, typename B::TrueBase>>;
+ /// The common base class for A, B and OTHERS
+ using type = std::conditional_t<traits::IsTypeOrDerived<A, B>,
+ B, // A derives from B
+ CastableCommonBase<A, typename B::TrueBase>>;
};
/// CastableCommonBaseImpl 3+ types specialization
template <typename A, typename B, typename... OTHERS>
struct CastableCommonBaseImpl<A, B, OTHERS...> {
- /// The common base class for A, B and OTHERS
- using type = CastableCommonBase<CastableCommonBase<A, B>, OTHERS...>;
+ /// The common base class for A, B and OTHERS
+ using type = CastableCommonBase<CastableCommonBase<A, B>, OTHERS...>;
};
} // namespace detail
@@ -547,29 +532,27 @@ namespace detail {
/// @note does not handle the Default case
/// @see Switch().
template <typename FN>
-using SwitchCaseType = std::remove_pointer_t<
- traits::ParameterType<std::remove_reference_t<FN>, 0>>;
+using SwitchCaseType = std::remove_pointer_t<traits::ParameterType<std::remove_reference_t<FN>, 0>>;
/// Evaluates to true if the function `FN` has the signature of a Default case
/// in a Switch().
/// @see Switch().
template <typename FN>
inline constexpr bool IsDefaultCase =
- std::is_same_v<traits::ParameterType<std::remove_reference_t<FN>, 0>,
- Default>;
+ std::is_same_v<traits::ParameterType<std::remove_reference_t<FN>, 0>, Default>;
/// Searches the list of Switch cases for a Default case, returning the index of
/// the Default case. If the a Default case is not found in the tuple, then -1
/// is returned.
template <typename TUPLE, std::size_t START_IDX = 0>
constexpr int IndexOfDefaultCase() {
- if constexpr (START_IDX < std::tuple_size_v<TUPLE>) {
- return IsDefaultCase<std::tuple_element_t<START_IDX, TUPLE>>
- ? static_cast<int>(START_IDX)
- : IndexOfDefaultCase<TUPLE, START_IDX + 1>();
- } else {
- return -1;
- }
+ if constexpr (START_IDX < std::tuple_size_v<TUPLE>) {
+ return IsDefaultCase<std::tuple_element_t<START_IDX, TUPLE>>
+ ? static_cast<int>(START_IDX)
+ : IndexOfDefaultCase<TUPLE, START_IDX + 1>();
+ } else {
+ return -1;
+ }
}
/// The implementation of Switch() for non-Default cases.
@@ -586,102 +569,119 @@ inline bool NonDefaultCases(T* object,
const TypeInfo* type,
RETURN_TYPE* result,
std::tuple<CASES...>&& cases) {
- using Cases = std::tuple<CASES...>;
-
- (void)result; // Not always used, avoid warning.
-
- static constexpr bool kHasReturnType = !std::is_same_v<RETURN_TYPE, void>;
- static constexpr size_t kNumCases = sizeof...(CASES);
-
- if constexpr (kNumCases == 0) {
- // No cases. Nothing to do.
- return false;
- } else if constexpr (kNumCases == 1) { // NOLINT: cpplint doesn't understand
- // `else if constexpr`
- // Single case.
- using CaseFunc = std::tuple_element_t<0, Cases>;
- static_assert(!IsDefaultCase<CaseFunc>,
- "NonDefaultCases called with a Default case");
- // Attempt to dynamically cast the object to the handler type. If that
- // succeeds, call the case handler with the cast object.
- using CaseType = SwitchCaseType<CaseFunc>;
- if (type->Is(&TypeInfo::Of<CaseType>())) {
- auto* ptr = static_cast<CaseType*>(object);
- if constexpr (kHasReturnType) {
- *result = static_cast<RETURN_TYPE>(std::get<0>(cases)(ptr));
- } else {
- std::get<0>(cases)(ptr);
- }
- return true;
- }
- return false;
- } else {
- // Multiple cases.
- // Check the hashcode bits to see if there's any possibility of a case
- // matching in these cases. If there isn't, we can skip all these cases.
- if (type->full_hashcode &
- TypeInfo::CombinedHashCodeOf<SwitchCaseType<CASES>...>()) {
- // There's a possibility. We need to scan further.
- // Split the cases into two, and recurse.
- constexpr size_t kMid = kNumCases / 2;
- return NonDefaultCases(object, type, result,
- traits::Slice<0, kMid>(cases)) ||
- NonDefaultCases(object, type, result,
- traits::Slice<kMid, kNumCases - kMid>(cases));
+ using Cases = std::tuple<CASES...>;
+
+ (void)result; // Not always used, avoid warning.
+
+ static constexpr bool kHasReturnType = !std::is_same_v<RETURN_TYPE, void>;
+ static constexpr size_t kNumCases = sizeof...(CASES);
+
+ if constexpr (kNumCases == 0) {
+ // No cases. Nothing to do.
+ return false;
+ } else if constexpr (kNumCases == 1) { // NOLINT: cpplint doesn't understand
+ // `else if constexpr`
+ // Single case.
+ using CaseFunc = std::tuple_element_t<0, Cases>;
+ static_assert(!IsDefaultCase<CaseFunc>, "NonDefaultCases called with a Default case");
+ // Attempt to dynamically cast the object to the handler type. If that
+ // succeeds, call the case handler with the cast object.
+ using CaseType = SwitchCaseType<CaseFunc>;
+ if (type->Is(&TypeInfo::Of<CaseType>())) {
+ auto* ptr = static_cast<CaseType*>(object);
+ if constexpr (kHasReturnType) {
+ new (result) RETURN_TYPE(static_cast<RETURN_TYPE>(std::get<0>(cases)(ptr)));
+ } else {
+ std::get<0>(cases)(ptr);
+ }
+ return true;
+ }
+ return false;
} else {
- return false;
+ // Multiple cases.
+ // Check the hashcode bits to see if there's any possibility of a case
+ // matching in these cases. If there isn't, we can skip all these cases.
+ if (type->full_hashcode & TypeInfo::CombinedHashCodeOf<SwitchCaseType<CASES>...>()) {
+ // There's a possibility. We need to scan further.
+ // Split the cases into two, and recurse.
+ constexpr size_t kMid = kNumCases / 2;
+ return NonDefaultCases(object, type, result, traits::Slice<0, kMid>(cases)) ||
+ NonDefaultCases(object, type, result,
+ traits::Slice<kMid, kNumCases - kMid>(cases));
+ } else {
+ return false;
+ }
}
- }
}
/// The implementation of Switch() for all cases.
/// @see NonDefaultCases
template <typename T, typename RETURN_TYPE, typename... CASES>
-inline void SwitchCases(T* object,
- RETURN_TYPE* result,
- std::tuple<CASES...>&& cases) {
- using Cases = std::tuple<CASES...>;
- static constexpr int kDefaultIndex = detail::IndexOfDefaultCase<Cases>();
- static_assert(
- kDefaultIndex == -1 || kDefaultIndex == std::tuple_size_v<Cases> - 1,
- "Default case must be last in Switch()");
- static constexpr bool kHasDefaultCase = kDefaultIndex >= 0;
- static constexpr bool kHasReturnType = !std::is_same_v<RETURN_TYPE, void>;
-
- if (object) {
- auto* type = &object->TypeInfo();
- if constexpr (kHasDefaultCase) {
- // Evaluate non-default cases.
- if (!detail::NonDefaultCases<T>(object, type, result,
- traits::Slice<0, kDefaultIndex>(cases))) {
- // Nothing matched. Evaluate default case.
- if constexpr (kHasReturnType) {
- *result =
- static_cast<RETURN_TYPE>(std::get<kDefaultIndex>(cases)({}));
+inline void SwitchCases(T* object, RETURN_TYPE* result, std::tuple<CASES...>&& cases) {
+ using Cases = std::tuple<CASES...>;
+
+ static constexpr int kDefaultIndex = detail::IndexOfDefaultCase<Cases>();
+ static constexpr bool kHasDefaultCase = kDefaultIndex >= 0;
+ static constexpr bool kHasReturnType = !std::is_same_v<RETURN_TYPE, void>;
+
+ // Static assertions
+ static constexpr bool kDefaultIsOK =
+ kDefaultIndex == -1 || kDefaultIndex == std::tuple_size_v<Cases> - 1;
+ static constexpr bool kReturnIsOK =
+ kHasDefaultCase || !kHasReturnType || std::is_constructible_v<RETURN_TYPE>;
+ static_assert(kDefaultIsOK, "Default case must be last in Switch()");
+ static_assert(kReturnIsOK,
+ "Switch() requires either a Default case or a return type that is either void or "
+ "default-constructable");
+
+ // If the static asserts have fired, don't bother spewing more errors below
+ static constexpr bool kAllOK = kDefaultIsOK && kReturnIsOK;
+ if constexpr (kAllOK) {
+ if (object) {
+ auto* type = &object->TypeInfo();
+ if constexpr (kHasDefaultCase) {
+ // Evaluate non-default cases.
+ if (!detail::NonDefaultCases<T>(object, type, result,
+ traits::Slice<0, kDefaultIndex>(cases))) {
+ // Nothing matched. Evaluate default case.
+ if constexpr (kHasReturnType) {
+ new (result) RETURN_TYPE(
+ static_cast<RETURN_TYPE>(std::get<kDefaultIndex>(cases)({})));
+ } else {
+ std::get<kDefaultIndex>(cases)({});
+ }
+ }
+ } else {
+ if (!detail::NonDefaultCases<T>(object, type, result, std::move(cases))) {
+ // Nothing matched. No default case.
+ if constexpr (kHasReturnType) {
+ new (result) RETURN_TYPE();
+ }
+ }
+ }
} else {
- std::get<kDefaultIndex>(cases)({});
+ // Object is nullptr, so no cases can match
+ if constexpr (kHasDefaultCase) {
+ // Evaluate default case.
+ if constexpr (kHasReturnType) {
+ new (result)
+ RETURN_TYPE(static_cast<RETURN_TYPE>(std::get<kDefaultIndex>(cases)({})));
+ } else {
+ std::get<kDefaultIndex>(cases)({});
+ }
+ } else {
+ // No default case, no case can match.
+ if constexpr (kHasReturnType) {
+ new (result) RETURN_TYPE();
+ }
+ }
}
- }
- } else {
- detail::NonDefaultCases<T>(object, type, result, std::move(cases));
}
- } else {
- // Object is nullptr, so no cases can match
- if constexpr (kHasDefaultCase) {
- // Evaluate default case.
- if constexpr (kHasReturnType) {
- *result = static_cast<RETURN_TYPE>(std::get<kDefaultIndex>(cases)({}));
- } else {
- std::get<kDefaultIndex>(cases)({});
- }
- }
- }
}
/// Resolves to T if T is not nullptr_t, otherwise resolves to Ignore.
template <typename T>
-using NullptrToIgnore =
- std::conditional_t<std::is_same_v<T, std::nullptr_t>, Ignore, T>;
+using NullptrToIgnore = std::conditional_t<std::is_same_v<T, std::nullptr_t>, Ignore, T>;
/// Resolves to `const TYPE` if any of `CASE_RETURN_TYPES` are const or
/// pointer-to-const, otherwise resolves to TYPE.
@@ -693,55 +693,46 @@ using PropagateReturnConst = std::conditional_t<
TYPE>; // No: Passthrough
/// SwitchReturnTypeImpl is the implementation of SwitchReturnType
-template <bool IS_CASTABLE,
- typename REQUESTED_TYPE,
- typename... CASE_RETURN_TYPES>
+template <bool IS_CASTABLE, typename REQUESTED_TYPE, typename... CASE_RETURN_TYPES>
struct SwitchReturnTypeImpl;
/// SwitchReturnTypeImpl specialization for non-castable case types and an
/// explicitly specified return type.
template <typename REQUESTED_TYPE, typename... CASE_RETURN_TYPES>
-struct SwitchReturnTypeImpl</*IS_CASTABLE*/ false,
- REQUESTED_TYPE,
- CASE_RETURN_TYPES...> {
- /// Resolves to `REQUESTED_TYPE`
- using type = REQUESTED_TYPE;
+struct SwitchReturnTypeImpl</*IS_CASTABLE*/ false, REQUESTED_TYPE, CASE_RETURN_TYPES...> {
+ /// Resolves to `REQUESTED_TYPE`
+ using type = REQUESTED_TYPE;
};
/// SwitchReturnTypeImpl specialization for non-castable case types and an
/// inferred return type.
template <typename... CASE_RETURN_TYPES>
-struct SwitchReturnTypeImpl</*IS_CASTABLE*/ false,
- Infer,
- CASE_RETURN_TYPES...> {
- /// Resolves to the common type for all the cases return types.
- using type = std::common_type_t<CASE_RETURN_TYPES...>;
+struct SwitchReturnTypeImpl</*IS_CASTABLE*/ false, Infer, CASE_RETURN_TYPES...> {
+ /// Resolves to the common type for all the cases return types.
+ using type = std::common_type_t<CASE_RETURN_TYPES...>;
};
/// SwitchReturnTypeImpl specialization for castable case types and an
/// explicitly specified return type.
template <typename REQUESTED_TYPE, typename... CASE_RETURN_TYPES>
-struct SwitchReturnTypeImpl</*IS_CASTABLE*/ true,
- REQUESTED_TYPE,
- CASE_RETURN_TYPES...> {
- public:
- /// Resolves to `const REQUESTED_TYPE*` or `REQUESTED_TYPE*`
- using type = PropagateReturnConst<std::remove_pointer_t<REQUESTED_TYPE>,
- CASE_RETURN_TYPES...>*;
+struct SwitchReturnTypeImpl</*IS_CASTABLE*/ true, REQUESTED_TYPE, CASE_RETURN_TYPES...> {
+ public:
+ /// Resolves to `const REQUESTED_TYPE*` or `REQUESTED_TYPE*`
+ using type = PropagateReturnConst<std::remove_pointer_t<REQUESTED_TYPE>, CASE_RETURN_TYPES...>*;
};
/// SwitchReturnTypeImpl specialization for castable case types and an infered
/// return type.
template <typename... CASE_RETURN_TYPES>
struct SwitchReturnTypeImpl</*IS_CASTABLE*/ true, Infer, CASE_RETURN_TYPES...> {
- private:
- using InferredType = CastableCommonBase<
- detail::NullptrToIgnore<std::remove_pointer_t<CASE_RETURN_TYPES>>...>;
-
- public:
- /// `const T*` or `T*`, where T is the common base type for all the castable
- /// case types.
- using type = PropagateReturnConst<InferredType, CASE_RETURN_TYPES...>*;
+ private:
+ using InferredType =
+ CastableCommonBase<detail::NullptrToIgnore<std::remove_pointer_t<CASE_RETURN_TYPES>>...>;
+
+ public:
+ /// `const T*` or `T*`, where T is the common base type for all the castable
+ /// case types.
+ using type = PropagateReturnConst<InferredType, CASE_RETURN_TYPES...>*;
};
/// Resolves to the return type for a Switch() with the requested return type
@@ -790,23 +781,25 @@ using SwitchReturnType = typename SwitchReturnTypeImpl<
/// @param cases the switch cases
/// @return the value returned by the called case. If no cases matched, then the
/// zero value for the consistent case type.
-template <typename RETURN_TYPE = detail::Infer,
- typename T = CastableBase,
- typename... CASES>
+template <typename RETURN_TYPE = detail::Infer, typename T = CastableBase, typename... CASES>
inline auto Switch(T* object, CASES&&... cases) {
- using ReturnType =
- detail::SwitchReturnType<RETURN_TYPE, traits::ReturnType<CASES>...>;
- static constexpr bool kHasReturnType = !std::is_same_v<ReturnType, void>;
-
- if constexpr (kHasReturnType) {
- ReturnType res = {};
- detail::SwitchCases(object, &res,
- std::forward_as_tuple(std::forward<CASES>(cases)...));
- return res;
- } else {
- detail::SwitchCases<T, void>(
- object, nullptr, std::forward_as_tuple(std::forward<CASES>(cases)...));
- }
+ using ReturnType = detail::SwitchReturnType<RETURN_TYPE, traits::ReturnType<CASES>...>;
+ static constexpr bool kHasReturnType = !std::is_same_v<ReturnType, void>;
+
+ if constexpr (kHasReturnType) {
+ // Replacement for std::aligned_storage as this is broken on earlier versions of MSVC.
+ struct alignas(alignof(ReturnType)) ReturnStorage {
+ uint8_t data[sizeof(ReturnType)];
+ };
+ ReturnStorage storage;
+ auto* res = utils::Bitcast<ReturnType*>(&storage);
+ TINT_DEFER(res->~ReturnType());
+ detail::SwitchCases(object, res, std::forward_as_tuple(std::forward<CASES>(cases)...));
+ return *res;
+ } else {
+ detail::SwitchCases<T, void>(object, nullptr,
+ std::forward_as_tuple(std::forward<CASES>(cases)...));
+ }
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/castable_bench.cc b/chromium/third_party/dawn/src/tint/castable_bench.cc
index 839a932f5cf..7c7e0ef34bf 100644
--- a/chromium/third_party/dawn/src/tint/castable_bench.cc
+++ b/chromium/third_party/dawn/src/tint/castable_bench.cc
@@ -100,127 +100,127 @@ using AllTypes = std::tuple<Base,
CCC>;
std::vector<std::unique_ptr<Base>> MakeObjects() {
- std::vector<std::unique_ptr<Base>> out;
- out.emplace_back(std::make_unique<Base>());
- out.emplace_back(std::make_unique<A>());
- out.emplace_back(std::make_unique<AA>());
- out.emplace_back(std::make_unique<AAA>());
- out.emplace_back(std::make_unique<AAB>());
- out.emplace_back(std::make_unique<AAC>());
- out.emplace_back(std::make_unique<AB>());
- out.emplace_back(std::make_unique<ABA>());
- out.emplace_back(std::make_unique<ABB>());
- out.emplace_back(std::make_unique<ABC>());
- out.emplace_back(std::make_unique<AC>());
- out.emplace_back(std::make_unique<ACA>());
- out.emplace_back(std::make_unique<ACB>());
- out.emplace_back(std::make_unique<ACC>());
- out.emplace_back(std::make_unique<B>());
- out.emplace_back(std::make_unique<BA>());
- out.emplace_back(std::make_unique<BAA>());
- out.emplace_back(std::make_unique<BAB>());
- out.emplace_back(std::make_unique<BAC>());
- out.emplace_back(std::make_unique<BB>());
- out.emplace_back(std::make_unique<BBA>());
- out.emplace_back(std::make_unique<BBB>());
- out.emplace_back(std::make_unique<BBC>());
- out.emplace_back(std::make_unique<BC>());
- out.emplace_back(std::make_unique<BCA>());
- out.emplace_back(std::make_unique<BCB>());
- out.emplace_back(std::make_unique<BCC>());
- out.emplace_back(std::make_unique<C>());
- out.emplace_back(std::make_unique<CA>());
- out.emplace_back(std::make_unique<CAA>());
- out.emplace_back(std::make_unique<CAB>());
- out.emplace_back(std::make_unique<CAC>());
- out.emplace_back(std::make_unique<CB>());
- out.emplace_back(std::make_unique<CBA>());
- out.emplace_back(std::make_unique<CBB>());
- out.emplace_back(std::make_unique<CBC>());
- out.emplace_back(std::make_unique<CC>());
- out.emplace_back(std::make_unique<CCA>());
- out.emplace_back(std::make_unique<CCB>());
- out.emplace_back(std::make_unique<CCC>());
- return out;
+ std::vector<std::unique_ptr<Base>> out;
+ out.emplace_back(std::make_unique<Base>());
+ out.emplace_back(std::make_unique<A>());
+ out.emplace_back(std::make_unique<AA>());
+ out.emplace_back(std::make_unique<AAA>());
+ out.emplace_back(std::make_unique<AAB>());
+ out.emplace_back(std::make_unique<AAC>());
+ out.emplace_back(std::make_unique<AB>());
+ out.emplace_back(std::make_unique<ABA>());
+ out.emplace_back(std::make_unique<ABB>());
+ out.emplace_back(std::make_unique<ABC>());
+ out.emplace_back(std::make_unique<AC>());
+ out.emplace_back(std::make_unique<ACA>());
+ out.emplace_back(std::make_unique<ACB>());
+ out.emplace_back(std::make_unique<ACC>());
+ out.emplace_back(std::make_unique<B>());
+ out.emplace_back(std::make_unique<BA>());
+ out.emplace_back(std::make_unique<BAA>());
+ out.emplace_back(std::make_unique<BAB>());
+ out.emplace_back(std::make_unique<BAC>());
+ out.emplace_back(std::make_unique<BB>());
+ out.emplace_back(std::make_unique<BBA>());
+ out.emplace_back(std::make_unique<BBB>());
+ out.emplace_back(std::make_unique<BBC>());
+ out.emplace_back(std::make_unique<BC>());
+ out.emplace_back(std::make_unique<BCA>());
+ out.emplace_back(std::make_unique<BCB>());
+ out.emplace_back(std::make_unique<BCC>());
+ out.emplace_back(std::make_unique<C>());
+ out.emplace_back(std::make_unique<CA>());
+ out.emplace_back(std::make_unique<CAA>());
+ out.emplace_back(std::make_unique<CAB>());
+ out.emplace_back(std::make_unique<CAC>());
+ out.emplace_back(std::make_unique<CB>());
+ out.emplace_back(std::make_unique<CBA>());
+ out.emplace_back(std::make_unique<CBB>());
+ out.emplace_back(std::make_unique<CBC>());
+ out.emplace_back(std::make_unique<CC>());
+ out.emplace_back(std::make_unique<CCA>());
+ out.emplace_back(std::make_unique<CCB>());
+ out.emplace_back(std::make_unique<CCC>());
+ return out;
}
void CastableLargeSwitch(::benchmark::State& state) {
- auto objects = MakeObjects();
- size_t i = 0;
- for (auto _ : state) {
- auto* object = objects[i % objects.size()].get();
- Switch(
- object, //
- [&](const AAA*) { ::benchmark::DoNotOptimize(i += 40); },
- [&](const AAB*) { ::benchmark::DoNotOptimize(i += 50); },
- [&](const AAC*) { ::benchmark::DoNotOptimize(i += 60); },
- [&](const ABA*) { ::benchmark::DoNotOptimize(i += 80); },
- [&](const ABB*) { ::benchmark::DoNotOptimize(i += 90); },
- [&](const ABC*) { ::benchmark::DoNotOptimize(i += 100); },
- [&](const ACA*) { ::benchmark::DoNotOptimize(i += 120); },
- [&](const ACB*) { ::benchmark::DoNotOptimize(i += 130); },
- [&](const ACC*) { ::benchmark::DoNotOptimize(i += 140); },
- [&](const BAA*) { ::benchmark::DoNotOptimize(i += 170); },
- [&](const BAB*) { ::benchmark::DoNotOptimize(i += 180); },
- [&](const BAC*) { ::benchmark::DoNotOptimize(i += 190); },
- [&](const BBA*) { ::benchmark::DoNotOptimize(i += 210); },
- [&](const BBB*) { ::benchmark::DoNotOptimize(i += 220); },
- [&](const BBC*) { ::benchmark::DoNotOptimize(i += 230); },
- [&](const BCA*) { ::benchmark::DoNotOptimize(i += 250); },
- [&](const BCB*) { ::benchmark::DoNotOptimize(i += 260); },
- [&](const BCC*) { ::benchmark::DoNotOptimize(i += 270); },
- [&](const CA*) { ::benchmark::DoNotOptimize(i += 290); },
- [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); },
- [&](const CAB*) { ::benchmark::DoNotOptimize(i += 310); },
- [&](const CAC*) { ::benchmark::DoNotOptimize(i += 320); },
- [&](const CBA*) { ::benchmark::DoNotOptimize(i += 340); },
- [&](const CBB*) { ::benchmark::DoNotOptimize(i += 350); },
- [&](const CBC*) { ::benchmark::DoNotOptimize(i += 360); },
- [&](const CCA*) { ::benchmark::DoNotOptimize(i += 380); },
- [&](const CCB*) { ::benchmark::DoNotOptimize(i += 390); },
- [&](const CCC*) { ::benchmark::DoNotOptimize(i += 400); },
- [&](Default) { ::benchmark::DoNotOptimize(i += 123); });
- i = (i * 31) ^ (i << 5);
- }
+ auto objects = MakeObjects();
+ size_t i = 0;
+ for (auto _ : state) {
+ auto* object = objects[i % objects.size()].get();
+ Switch(
+ object, //
+ [&](const AAA*) { ::benchmark::DoNotOptimize(i += 40); },
+ [&](const AAB*) { ::benchmark::DoNotOptimize(i += 50); },
+ [&](const AAC*) { ::benchmark::DoNotOptimize(i += 60); },
+ [&](const ABA*) { ::benchmark::DoNotOptimize(i += 80); },
+ [&](const ABB*) { ::benchmark::DoNotOptimize(i += 90); },
+ [&](const ABC*) { ::benchmark::DoNotOptimize(i += 100); },
+ [&](const ACA*) { ::benchmark::DoNotOptimize(i += 120); },
+ [&](const ACB*) { ::benchmark::DoNotOptimize(i += 130); },
+ [&](const ACC*) { ::benchmark::DoNotOptimize(i += 140); },
+ [&](const BAA*) { ::benchmark::DoNotOptimize(i += 170); },
+ [&](const BAB*) { ::benchmark::DoNotOptimize(i += 180); },
+ [&](const BAC*) { ::benchmark::DoNotOptimize(i += 190); },
+ [&](const BBA*) { ::benchmark::DoNotOptimize(i += 210); },
+ [&](const BBB*) { ::benchmark::DoNotOptimize(i += 220); },
+ [&](const BBC*) { ::benchmark::DoNotOptimize(i += 230); },
+ [&](const BCA*) { ::benchmark::DoNotOptimize(i += 250); },
+ [&](const BCB*) { ::benchmark::DoNotOptimize(i += 260); },
+ [&](const BCC*) { ::benchmark::DoNotOptimize(i += 270); },
+ [&](const CA*) { ::benchmark::DoNotOptimize(i += 290); },
+ [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); },
+ [&](const CAB*) { ::benchmark::DoNotOptimize(i += 310); },
+ [&](const CAC*) { ::benchmark::DoNotOptimize(i += 320); },
+ [&](const CBA*) { ::benchmark::DoNotOptimize(i += 340); },
+ [&](const CBB*) { ::benchmark::DoNotOptimize(i += 350); },
+ [&](const CBC*) { ::benchmark::DoNotOptimize(i += 360); },
+ [&](const CCA*) { ::benchmark::DoNotOptimize(i += 380); },
+ [&](const CCB*) { ::benchmark::DoNotOptimize(i += 390); },
+ [&](const CCC*) { ::benchmark::DoNotOptimize(i += 400); },
+ [&](Default) { ::benchmark::DoNotOptimize(i += 123); });
+ i = (i * 31) ^ (i << 5);
+ }
}
BENCHMARK(CastableLargeSwitch);
void CastableMediumSwitch(::benchmark::State& state) {
- auto objects = MakeObjects();
- size_t i = 0;
- for (auto _ : state) {
- auto* object = objects[i % objects.size()].get();
- Switch(
- object, //
- [&](const ACB*) { ::benchmark::DoNotOptimize(i += 130); },
- [&](const BAA*) { ::benchmark::DoNotOptimize(i += 170); },
- [&](const BAB*) { ::benchmark::DoNotOptimize(i += 180); },
- [&](const BBA*) { ::benchmark::DoNotOptimize(i += 210); },
- [&](const BBB*) { ::benchmark::DoNotOptimize(i += 220); },
- [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); },
- [&](const CCA*) { ::benchmark::DoNotOptimize(i += 380); },
- [&](const CCB*) { ::benchmark::DoNotOptimize(i += 390); },
- [&](const CCC*) { ::benchmark::DoNotOptimize(i += 400); },
- [&](Default) { ::benchmark::DoNotOptimize(i += 123); });
- i = (i * 31) ^ (i << 5);
- }
+ auto objects = MakeObjects();
+ size_t i = 0;
+ for (auto _ : state) {
+ auto* object = objects[i % objects.size()].get();
+ Switch(
+ object, //
+ [&](const ACB*) { ::benchmark::DoNotOptimize(i += 130); },
+ [&](const BAA*) { ::benchmark::DoNotOptimize(i += 170); },
+ [&](const BAB*) { ::benchmark::DoNotOptimize(i += 180); },
+ [&](const BBA*) { ::benchmark::DoNotOptimize(i += 210); },
+ [&](const BBB*) { ::benchmark::DoNotOptimize(i += 220); },
+ [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); },
+ [&](const CCA*) { ::benchmark::DoNotOptimize(i += 380); },
+ [&](const CCB*) { ::benchmark::DoNotOptimize(i += 390); },
+ [&](const CCC*) { ::benchmark::DoNotOptimize(i += 400); },
+ [&](Default) { ::benchmark::DoNotOptimize(i += 123); });
+ i = (i * 31) ^ (i << 5);
+ }
}
BENCHMARK(CastableMediumSwitch);
void CastableSmallSwitch(::benchmark::State& state) {
- auto objects = MakeObjects();
- size_t i = 0;
- for (auto _ : state) {
- auto* object = objects[i % objects.size()].get();
- Switch(
- object, //
- [&](const AAB*) { ::benchmark::DoNotOptimize(i += 30); },
- [&](const CAC*) { ::benchmark::DoNotOptimize(i += 290); },
- [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); });
- i = (i * 31) ^ (i << 5);
- }
+ auto objects = MakeObjects();
+ size_t i = 0;
+ for (auto _ : state) {
+ auto* object = objects[i % objects.size()].get();
+ Switch(
+ object, //
+ [&](const AAB*) { ::benchmark::DoNotOptimize(i += 30); },
+ [&](const CAC*) { ::benchmark::DoNotOptimize(i += 290); },
+ [&](const CAA*) { ::benchmark::DoNotOptimize(i += 300); });
+ i = (i * 31) ^ (i << 5);
+ }
}
BENCHMARK(CastableSmallSwitch);
diff --git a/chromium/third_party/dawn/src/tint/castable_test.cc b/chromium/third_party/dawn/src/tint/castable_test.cc
index 7ed66cbfa9d..52efd80e9dc 100644
--- a/chromium/third_party/dawn/src/tint/castable_test.cc
+++ b/chromium/third_party/dawn/src/tint/castable_test.cc
@@ -34,684 +34,706 @@ struct Iguana : public tint::Castable<Iguana, Lizard> {};
namespace {
TEST(CastableBase, Is) {
- std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
- std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
- std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
+ std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
+ std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
+ std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
- ASSERT_TRUE(frog->Is<Animal>());
- ASSERT_TRUE(bear->Is<Animal>());
- ASSERT_TRUE(gecko->Is<Animal>());
+ ASSERT_TRUE(frog->Is<Animal>());
+ ASSERT_TRUE(bear->Is<Animal>());
+ ASSERT_TRUE(gecko->Is<Animal>());
- ASSERT_TRUE(frog->Is<Amphibian>());
- ASSERT_FALSE(bear->Is<Amphibian>());
- ASSERT_FALSE(gecko->Is<Amphibian>());
+ ASSERT_TRUE(frog->Is<Amphibian>());
+ ASSERT_FALSE(bear->Is<Amphibian>());
+ ASSERT_FALSE(gecko->Is<Amphibian>());
- ASSERT_FALSE(frog->Is<Mammal>());
- ASSERT_TRUE(bear->Is<Mammal>());
- ASSERT_FALSE(gecko->Is<Mammal>());
+ ASSERT_FALSE(frog->Is<Mammal>());
+ ASSERT_TRUE(bear->Is<Mammal>());
+ ASSERT_FALSE(gecko->Is<Mammal>());
- ASSERT_FALSE(frog->Is<Reptile>());
- ASSERT_FALSE(bear->Is<Reptile>());
- ASSERT_TRUE(gecko->Is<Reptile>());
+ ASSERT_FALSE(frog->Is<Reptile>());
+ ASSERT_FALSE(bear->Is<Reptile>());
+ ASSERT_TRUE(gecko->Is<Reptile>());
}
TEST(CastableBase, Is_kDontErrorOnImpossibleCast) {
- // Unlike TEST(CastableBase, Is), we're dynamically querying [A -> B] without
- // going via CastableBase.
- auto frog = std::make_unique<Frog>();
- auto bear = std::make_unique<Bear>();
- auto gecko = std::make_unique<Gecko>();
-
- ASSERT_TRUE((frog->Is<Animal, kDontErrorOnImpossibleCast>()));
- ASSERT_TRUE((bear->Is<Animal, kDontErrorOnImpossibleCast>()));
- ASSERT_TRUE((gecko->Is<Animal, kDontErrorOnImpossibleCast>()));
-
- ASSERT_TRUE((frog->Is<Amphibian, kDontErrorOnImpossibleCast>()));
- ASSERT_FALSE((bear->Is<Amphibian, kDontErrorOnImpossibleCast>()));
- ASSERT_FALSE((gecko->Is<Amphibian, kDontErrorOnImpossibleCast>()));
-
- ASSERT_FALSE((frog->Is<Mammal, kDontErrorOnImpossibleCast>()));
- ASSERT_TRUE((bear->Is<Mammal, kDontErrorOnImpossibleCast>()));
- ASSERT_FALSE((gecko->Is<Mammal, kDontErrorOnImpossibleCast>()));
-
- ASSERT_FALSE((frog->Is<Reptile, kDontErrorOnImpossibleCast>()));
- ASSERT_FALSE((bear->Is<Reptile, kDontErrorOnImpossibleCast>()));
- ASSERT_TRUE((gecko->Is<Reptile, kDontErrorOnImpossibleCast>()));
+ // Unlike TEST(CastableBase, Is), we're dynamically querying [A -> B] without
+ // going via CastableBase.
+ auto frog = std::make_unique<Frog>();
+ auto bear = std::make_unique<Bear>();
+ auto gecko = std::make_unique<Gecko>();
+
+ ASSERT_TRUE((frog->Is<Animal, kDontErrorOnImpossibleCast>()));
+ ASSERT_TRUE((bear->Is<Animal, kDontErrorOnImpossibleCast>()));
+ ASSERT_TRUE((gecko->Is<Animal, kDontErrorOnImpossibleCast>()));
+
+ ASSERT_TRUE((frog->Is<Amphibian, kDontErrorOnImpossibleCast>()));
+ ASSERT_FALSE((bear->Is<Amphibian, kDontErrorOnImpossibleCast>()));
+ ASSERT_FALSE((gecko->Is<Amphibian, kDontErrorOnImpossibleCast>()));
+
+ ASSERT_FALSE((frog->Is<Mammal, kDontErrorOnImpossibleCast>()));
+ ASSERT_TRUE((bear->Is<Mammal, kDontErrorOnImpossibleCast>()));
+ ASSERT_FALSE((gecko->Is<Mammal, kDontErrorOnImpossibleCast>()));
+
+ ASSERT_FALSE((frog->Is<Reptile, kDontErrorOnImpossibleCast>()));
+ ASSERT_FALSE((bear->Is<Reptile, kDontErrorOnImpossibleCast>()));
+ ASSERT_TRUE((gecko->Is<Reptile, kDontErrorOnImpossibleCast>()));
}
TEST(CastableBase, IsWithPredicate) {
- std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
-
- frog->Is<Animal>([&frog](const Animal* a) {
- EXPECT_EQ(a, frog.get());
- return true;
- });
-
- ASSERT_TRUE((frog->Is<Animal>([](const Animal*) { return true; })));
- ASSERT_FALSE((frog->Is<Animal>([](const Animal*) { return false; })));
-
- // Predicate not called if cast is invalid
- auto expect_not_called = [] { FAIL() << "Should not be called"; };
- ASSERT_FALSE((frog->Is<Bear>([&](const Animal*) {
- expect_not_called();
- return true;
- })));
+ std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
+
+ frog->Is<Animal>([&frog](const Animal* a) {
+ EXPECT_EQ(a, frog.get());
+ return true;
+ });
+
+ ASSERT_TRUE((frog->Is<Animal>([](const Animal*) { return true; })));
+ ASSERT_FALSE((frog->Is<Animal>([](const Animal*) { return false; })));
+
+ // Predicate not called if cast is invalid
+ auto expect_not_called = [] { FAIL() << "Should not be called"; };
+ ASSERT_FALSE((frog->Is<Bear>([&](const Animal*) {
+ expect_not_called();
+ return true;
+ })));
}
TEST(CastableBase, IsAnyOf) {
- std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
- std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
- std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
-
- ASSERT_TRUE((frog->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
- ASSERT_TRUE((frog->IsAnyOf<Mammal, Amphibian>()));
- ASSERT_TRUE((frog->IsAnyOf<Amphibian, Reptile>()));
- ASSERT_FALSE((frog->IsAnyOf<Mammal, Reptile>()));
-
- ASSERT_TRUE((bear->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
- ASSERT_TRUE((bear->IsAnyOf<Mammal, Amphibian>()));
- ASSERT_TRUE((bear->IsAnyOf<Mammal, Reptile>()));
- ASSERT_FALSE((bear->IsAnyOf<Amphibian, Reptile>()));
-
- ASSERT_TRUE((gecko->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
- ASSERT_TRUE((gecko->IsAnyOf<Mammal, Reptile>()));
- ASSERT_TRUE((gecko->IsAnyOf<Amphibian, Reptile>()));
- ASSERT_FALSE((gecko->IsAnyOf<Mammal, Amphibian>()));
+ std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
+ std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
+ std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
+
+ ASSERT_TRUE((frog->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
+ ASSERT_TRUE((frog->IsAnyOf<Mammal, Amphibian>()));
+ ASSERT_TRUE((frog->IsAnyOf<Amphibian, Reptile>()));
+ ASSERT_FALSE((frog->IsAnyOf<Mammal, Reptile>()));
+
+ ASSERT_TRUE((bear->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
+ ASSERT_TRUE((bear->IsAnyOf<Mammal, Amphibian>()));
+ ASSERT_TRUE((bear->IsAnyOf<Mammal, Reptile>()));
+ ASSERT_FALSE((bear->IsAnyOf<Amphibian, Reptile>()));
+
+ ASSERT_TRUE((gecko->IsAnyOf<Animal, Mammal, Amphibian, Reptile>()));
+ ASSERT_TRUE((gecko->IsAnyOf<Mammal, Reptile>()));
+ ASSERT_TRUE((gecko->IsAnyOf<Amphibian, Reptile>()));
+ ASSERT_FALSE((gecko->IsAnyOf<Mammal, Amphibian>()));
}
TEST(CastableBase, As) {
- std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
- std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
- std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
+ std::unique_ptr<CastableBase> frog = std::make_unique<Frog>();
+ std::unique_ptr<CastableBase> bear = std::make_unique<Bear>();
+ std::unique_ptr<CastableBase> gecko = std::make_unique<Gecko>();
- ASSERT_EQ(frog->As<Animal>(), static_cast<Animal*>(frog.get()));
- ASSERT_EQ(bear->As<Animal>(), static_cast<Animal*>(bear.get()));
- ASSERT_EQ(gecko->As<Animal>(), static_cast<Animal*>(gecko.get()));
+ ASSERT_EQ(frog->As<Animal>(), static_cast<Animal*>(frog.get()));
+ ASSERT_EQ(bear->As<Animal>(), static_cast<Animal*>(bear.get()));
+ ASSERT_EQ(gecko->As<Animal>(), static_cast<Animal*>(gecko.get()));
- ASSERT_EQ(frog->As<Amphibian>(), static_cast<Amphibian*>(frog.get()));
- ASSERT_EQ(bear->As<Amphibian>(), nullptr);
- ASSERT_EQ(gecko->As<Amphibian>(), nullptr);
+ ASSERT_EQ(frog->As<Amphibian>(), static_cast<Amphibian*>(frog.get()));
+ ASSERT_EQ(bear->As<Amphibian>(), nullptr);
+ ASSERT_EQ(gecko->As<Amphibian>(), nullptr);
- ASSERT_EQ(frog->As<Mammal>(), nullptr);
- ASSERT_EQ(bear->As<Mammal>(), static_cast<Mammal*>(bear.get()));
- ASSERT_EQ(gecko->As<Mammal>(), nullptr);
+ ASSERT_EQ(frog->As<Mammal>(), nullptr);
+ ASSERT_EQ(bear->As<Mammal>(), static_cast<Mammal*>(bear.get()));
+ ASSERT_EQ(gecko->As<Mammal>(), nullptr);
- ASSERT_EQ(frog->As<Reptile>(), nullptr);
- ASSERT_EQ(bear->As<Reptile>(), nullptr);
- ASSERT_EQ(gecko->As<Reptile>(), static_cast<Reptile*>(gecko.get()));
+ ASSERT_EQ(frog->As<Reptile>(), nullptr);
+ ASSERT_EQ(bear->As<Reptile>(), nullptr);
+ ASSERT_EQ(gecko->As<Reptile>(), static_cast<Reptile*>(gecko.get()));
}
TEST(CastableBase, As_kDontErrorOnImpossibleCast) {
- // Unlike TEST(CastableBase, As), we're dynamically casting [A -> B] without
- // going via CastableBase.
- auto frog = std::make_unique<Frog>();
- auto bear = std::make_unique<Bear>();
- auto gecko = std::make_unique<Gecko>();
-
- ASSERT_EQ((frog->As<Animal, kDontErrorOnImpossibleCast>()),
- static_cast<Animal*>(frog.get()));
- ASSERT_EQ((bear->As<Animal, kDontErrorOnImpossibleCast>()),
- static_cast<Animal*>(bear.get()));
- ASSERT_EQ((gecko->As<Animal, kDontErrorOnImpossibleCast>()),
- static_cast<Animal*>(gecko.get()));
-
- ASSERT_EQ((frog->As<Amphibian, kDontErrorOnImpossibleCast>()),
- static_cast<Amphibian*>(frog.get()));
- ASSERT_EQ((bear->As<Amphibian, kDontErrorOnImpossibleCast>()), nullptr);
- ASSERT_EQ((gecko->As<Amphibian, kDontErrorOnImpossibleCast>()), nullptr);
-
- ASSERT_EQ((frog->As<Mammal, kDontErrorOnImpossibleCast>()), nullptr);
- ASSERT_EQ((bear->As<Mammal, kDontErrorOnImpossibleCast>()),
- static_cast<Mammal*>(bear.get()));
- ASSERT_EQ((gecko->As<Mammal, kDontErrorOnImpossibleCast>()), nullptr);
-
- ASSERT_EQ((frog->As<Reptile, kDontErrorOnImpossibleCast>()), nullptr);
- ASSERT_EQ((bear->As<Reptile, kDontErrorOnImpossibleCast>()), nullptr);
- ASSERT_EQ((gecko->As<Reptile, kDontErrorOnImpossibleCast>()),
- static_cast<Reptile*>(gecko.get()));
+ // Unlike TEST(CastableBase, As), we're dynamically casting [A -> B] without
+ // going via CastableBase.
+ auto frog = std::make_unique<Frog>();
+ auto bear = std::make_unique<Bear>();
+ auto gecko = std::make_unique<Gecko>();
+
+ ASSERT_EQ((frog->As<Animal, kDontErrorOnImpossibleCast>()), static_cast<Animal*>(frog.get()));
+ ASSERT_EQ((bear->As<Animal, kDontErrorOnImpossibleCast>()), static_cast<Animal*>(bear.get()));
+ ASSERT_EQ((gecko->As<Animal, kDontErrorOnImpossibleCast>()), static_cast<Animal*>(gecko.get()));
+
+ ASSERT_EQ((frog->As<Amphibian, kDontErrorOnImpossibleCast>()),
+ static_cast<Amphibian*>(frog.get()));
+ ASSERT_EQ((bear->As<Amphibian, kDontErrorOnImpossibleCast>()), nullptr);
+ ASSERT_EQ((gecko->As<Amphibian, kDontErrorOnImpossibleCast>()), nullptr);
+
+ ASSERT_EQ((frog->As<Mammal, kDontErrorOnImpossibleCast>()), nullptr);
+ ASSERT_EQ((bear->As<Mammal, kDontErrorOnImpossibleCast>()), static_cast<Mammal*>(bear.get()));
+ ASSERT_EQ((gecko->As<Mammal, kDontErrorOnImpossibleCast>()), nullptr);
+
+ ASSERT_EQ((frog->As<Reptile, kDontErrorOnImpossibleCast>()), nullptr);
+ ASSERT_EQ((bear->As<Reptile, kDontErrorOnImpossibleCast>()), nullptr);
+ ASSERT_EQ((gecko->As<Reptile, kDontErrorOnImpossibleCast>()),
+ static_cast<Reptile*>(gecko.get()));
}
TEST(Castable, Is) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- ASSERT_TRUE(frog->Is<Animal>());
- ASSERT_TRUE(bear->Is<Animal>());
- ASSERT_TRUE(gecko->Is<Animal>());
+ ASSERT_TRUE(frog->Is<Animal>());
+ ASSERT_TRUE(bear->Is<Animal>());
+ ASSERT_TRUE(gecko->Is<Animal>());
- ASSERT_TRUE(frog->Is<Amphibian>());
- ASSERT_FALSE(bear->Is<Amphibian>());
- ASSERT_FALSE(gecko->Is<Amphibian>());
+ ASSERT_TRUE(frog->Is<Amphibian>());
+ ASSERT_FALSE(bear->Is<Amphibian>());
+ ASSERT_FALSE(gecko->Is<Amphibian>());
- ASSERT_FALSE(frog->Is<Mammal>());
- ASSERT_TRUE(bear->Is<Mammal>());
- ASSERT_FALSE(gecko->Is<Mammal>());
+ ASSERT_FALSE(frog->Is<Mammal>());
+ ASSERT_TRUE(bear->Is<Mammal>());
+ ASSERT_FALSE(gecko->Is<Mammal>());
- ASSERT_FALSE(frog->Is<Reptile>());
- ASSERT_FALSE(bear->Is<Reptile>());
- ASSERT_TRUE(gecko->Is<Reptile>());
+ ASSERT_FALSE(frog->Is<Reptile>());
+ ASSERT_FALSE(bear->Is<Reptile>());
+ ASSERT_TRUE(gecko->Is<Reptile>());
}
TEST(Castable, IsWithPredicate) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
-
- frog->Is([&frog](const Animal* a) {
- EXPECT_EQ(a, frog.get());
- return true;
- });
-
- ASSERT_TRUE((frog->Is([](const Animal*) { return true; })));
- ASSERT_FALSE((frog->Is([](const Animal*) { return false; })));
-
- // Predicate not called if cast is invalid
- auto expect_not_called = [] { FAIL() << "Should not be called"; };
- ASSERT_FALSE((frog->Is([&](const Bear*) {
- expect_not_called();
- return true;
- })));
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+
+ frog->Is([&frog](const Animal* a) {
+ EXPECT_EQ(a, frog.get());
+ return true;
+ });
+
+ ASSERT_TRUE((frog->Is([](const Animal*) { return true; })));
+ ASSERT_FALSE((frog->Is([](const Animal*) { return false; })));
+
+ // Predicate not called if cast is invalid
+ auto expect_not_called = [] { FAIL() << "Should not be called"; };
+ ASSERT_FALSE((frog->Is([&](const Bear*) {
+ expect_not_called();
+ return true;
+ })));
}
TEST(Castable, As) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- ASSERT_EQ(frog->As<Animal>(), static_cast<Animal*>(frog.get()));
- ASSERT_EQ(bear->As<Animal>(), static_cast<Animal*>(bear.get()));
- ASSERT_EQ(gecko->As<Animal>(), static_cast<Animal*>(gecko.get()));
+ ASSERT_EQ(frog->As<Animal>(), static_cast<Animal*>(frog.get()));
+ ASSERT_EQ(bear->As<Animal>(), static_cast<Animal*>(bear.get()));
+ ASSERT_EQ(gecko->As<Animal>(), static_cast<Animal*>(gecko.get()));
- ASSERT_EQ(frog->As<Amphibian>(), static_cast<Amphibian*>(frog.get()));
- ASSERT_EQ(bear->As<Amphibian>(), nullptr);
- ASSERT_EQ(gecko->As<Amphibian>(), nullptr);
+ ASSERT_EQ(frog->As<Amphibian>(), static_cast<Amphibian*>(frog.get()));
+ ASSERT_EQ(bear->As<Amphibian>(), nullptr);
+ ASSERT_EQ(gecko->As<Amphibian>(), nullptr);
- ASSERT_EQ(frog->As<Mammal>(), nullptr);
- ASSERT_EQ(bear->As<Mammal>(), static_cast<Mammal*>(bear.get()));
- ASSERT_EQ(gecko->As<Mammal>(), nullptr);
+ ASSERT_EQ(frog->As<Mammal>(), nullptr);
+ ASSERT_EQ(bear->As<Mammal>(), static_cast<Mammal*>(bear.get()));
+ ASSERT_EQ(gecko->As<Mammal>(), nullptr);
- ASSERT_EQ(frog->As<Reptile>(), nullptr);
- ASSERT_EQ(bear->As<Reptile>(), nullptr);
- ASSERT_EQ(gecko->As<Reptile>(), static_cast<Reptile*>(gecko.get()));
+ ASSERT_EQ(frog->As<Reptile>(), nullptr);
+ ASSERT_EQ(bear->As<Reptile>(), nullptr);
+ ASSERT_EQ(gecko->As<Reptile>(), static_cast<Reptile*>(gecko.get()));
}
TEST(Castable, SwitchNoDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- bool frog_matched_amphibian = false;
- Switch(
- frog.get(), //
- [&](Reptile*) { FAIL() << "frog is not reptile"; },
- [&](Mammal*) { FAIL() << "frog is not mammal"; },
- [&](Amphibian* amphibian) {
- EXPECT_EQ(amphibian, frog.get());
- frog_matched_amphibian = true;
- });
- EXPECT_TRUE(frog_matched_amphibian);
- }
- {
- bool bear_matched_mammal = false;
- Switch(
- bear.get(), //
- [&](Reptile*) { FAIL() << "bear is not reptile"; },
- [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
- [&](Mammal* mammal) {
- EXPECT_EQ(mammal, bear.get());
- bear_matched_mammal = true;
- });
- EXPECT_TRUE(bear_matched_mammal);
- }
- {
- bool gecko_matched_reptile = false;
- Switch(
- gecko.get(), //
- [&](Mammal*) { FAIL() << "gecko is not mammal"; },
- [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
- [&](Reptile* reptile) {
- EXPECT_EQ(reptile, gecko.get());
- gecko_matched_reptile = true;
- });
- EXPECT_TRUE(gecko_matched_reptile);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ bool frog_matched_amphibian = false;
+ Switch(
+ frog.get(), //
+ [&](Reptile*) { FAIL() << "frog is not reptile"; },
+ [&](Mammal*) { FAIL() << "frog is not mammal"; },
+ [&](Amphibian* amphibian) {
+ EXPECT_EQ(amphibian, frog.get());
+ frog_matched_amphibian = true;
+ });
+ EXPECT_TRUE(frog_matched_amphibian);
+ }
+ {
+ bool bear_matched_mammal = false;
+ Switch(
+ bear.get(), //
+ [&](Reptile*) { FAIL() << "bear is not reptile"; },
+ [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
+ [&](Mammal* mammal) {
+ EXPECT_EQ(mammal, bear.get());
+ bear_matched_mammal = true;
+ });
+ EXPECT_TRUE(bear_matched_mammal);
+ }
+ {
+ bool gecko_matched_reptile = false;
+ Switch(
+ gecko.get(), //
+ [&](Mammal*) { FAIL() << "gecko is not mammal"; },
+ [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
+ [&](Reptile* reptile) {
+ EXPECT_EQ(reptile, gecko.get());
+ gecko_matched_reptile = true;
+ });
+ EXPECT_TRUE(gecko_matched_reptile);
+ }
}
TEST(Castable, SwitchWithUnusedDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- bool frog_matched_amphibian = false;
- Switch(
- frog.get(), //
- [&](Reptile*) { FAIL() << "frog is not reptile"; },
- [&](Mammal*) { FAIL() << "frog is not mammal"; },
- [&](Amphibian* amphibian) {
- EXPECT_EQ(amphibian, frog.get());
- frog_matched_amphibian = true;
- },
- [&](Default) { FAIL() << "default should not have been selected"; });
- EXPECT_TRUE(frog_matched_amphibian);
- }
- {
- bool bear_matched_mammal = false;
- Switch(
- bear.get(), //
- [&](Reptile*) { FAIL() << "bear is not reptile"; },
- [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
- [&](Mammal* mammal) {
- EXPECT_EQ(mammal, bear.get());
- bear_matched_mammal = true;
- },
- [&](Default) { FAIL() << "default should not have been selected"; });
- EXPECT_TRUE(bear_matched_mammal);
- }
- {
- bool gecko_matched_reptile = false;
- Switch(
- gecko.get(), //
- [&](Mammal*) { FAIL() << "gecko is not mammal"; },
- [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
- [&](Reptile* reptile) {
- EXPECT_EQ(reptile, gecko.get());
- gecko_matched_reptile = true;
- },
- [&](Default) { FAIL() << "default should not have been selected"; });
- EXPECT_TRUE(gecko_matched_reptile);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ bool frog_matched_amphibian = false;
+ Switch(
+ frog.get(), //
+ [&](Reptile*) { FAIL() << "frog is not reptile"; },
+ [&](Mammal*) { FAIL() << "frog is not mammal"; },
+ [&](Amphibian* amphibian) {
+ EXPECT_EQ(amphibian, frog.get());
+ frog_matched_amphibian = true;
+ },
+ [&](Default) { FAIL() << "default should not have been selected"; });
+ EXPECT_TRUE(frog_matched_amphibian);
+ }
+ {
+ bool bear_matched_mammal = false;
+ Switch(
+ bear.get(), //
+ [&](Reptile*) { FAIL() << "bear is not reptile"; },
+ [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
+ [&](Mammal* mammal) {
+ EXPECT_EQ(mammal, bear.get());
+ bear_matched_mammal = true;
+ },
+ [&](Default) { FAIL() << "default should not have been selected"; });
+ EXPECT_TRUE(bear_matched_mammal);
+ }
+ {
+ bool gecko_matched_reptile = false;
+ Switch(
+ gecko.get(), //
+ [&](Mammal*) { FAIL() << "gecko is not mammal"; },
+ [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
+ [&](Reptile* reptile) {
+ EXPECT_EQ(reptile, gecko.get());
+ gecko_matched_reptile = true;
+ },
+ [&](Default) { FAIL() << "default should not have been selected"; });
+ EXPECT_TRUE(gecko_matched_reptile);
+ }
}
TEST(Castable, SwitchDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- bool frog_matched_default = false;
- Switch(
- frog.get(), //
- [&](Reptile*) { FAIL() << "frog is not reptile"; },
- [&](Mammal*) { FAIL() << "frog is not mammal"; },
- [&](Default) { frog_matched_default = true; });
- EXPECT_TRUE(frog_matched_default);
- }
- {
- bool bear_matched_default = false;
- Switch(
- bear.get(), //
- [&](Reptile*) { FAIL() << "bear is not reptile"; },
- [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
- [&](Default) { bear_matched_default = true; });
- EXPECT_TRUE(bear_matched_default);
- }
- {
- bool gecko_matched_default = false;
- Switch(
- gecko.get(), //
- [&](Mammal*) { FAIL() << "gecko is not mammal"; },
- [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
- [&](Default) { gecko_matched_default = true; });
- EXPECT_TRUE(gecko_matched_default);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ bool frog_matched_default = false;
+ Switch(
+ frog.get(), //
+ [&](Reptile*) { FAIL() << "frog is not reptile"; },
+ [&](Mammal*) { FAIL() << "frog is not mammal"; },
+ [&](Default) { frog_matched_default = true; });
+ EXPECT_TRUE(frog_matched_default);
+ }
+ {
+ bool bear_matched_default = false;
+ Switch(
+ bear.get(), //
+ [&](Reptile*) { FAIL() << "bear is not reptile"; },
+ [&](Amphibian*) { FAIL() << "bear is not amphibian"; },
+ [&](Default) { bear_matched_default = true; });
+ EXPECT_TRUE(bear_matched_default);
+ }
+ {
+ bool gecko_matched_default = false;
+ Switch(
+ gecko.get(), //
+ [&](Mammal*) { FAIL() << "gecko is not mammal"; },
+ [&](Amphibian*) { FAIL() << "gecko is not amphibian"; },
+ [&](Default) { gecko_matched_default = true; });
+ EXPECT_TRUE(gecko_matched_default);
+ }
}
TEST(Castable, SwitchMatchFirst) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- {
- bool frog_matched_animal = false;
- Switch(
- frog.get(),
- [&](Animal* animal) {
- EXPECT_EQ(animal, frog.get());
- frog_matched_animal = true;
- },
- [&](Amphibian*) { FAIL() << "animal should have been matched first"; });
- EXPECT_TRUE(frog_matched_animal);
- }
- {
- bool frog_matched_amphibian = false;
- Switch(
- frog.get(),
- [&](Amphibian* amphibain) {
- EXPECT_EQ(amphibain, frog.get());
- frog_matched_amphibian = true;
- },
- [&](Animal*) { FAIL() << "amphibian should have been matched first"; });
- EXPECT_TRUE(frog_matched_amphibian);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ {
+ bool frog_matched_animal = false;
+ Switch(
+ frog.get(),
+ [&](Animal* animal) {
+ EXPECT_EQ(animal, frog.get());
+ frog_matched_animal = true;
+ },
+ [&](Amphibian*) { FAIL() << "animal should have been matched first"; });
+ EXPECT_TRUE(frog_matched_animal);
+ }
+ {
+ bool frog_matched_amphibian = false;
+ Switch(
+ frog.get(),
+ [&](Amphibian* amphibain) {
+ EXPECT_EQ(amphibain, frog.get());
+ frog_matched_amphibian = true;
+ },
+ [&](Animal*) { FAIL() << "amphibian should have been matched first"; });
+ EXPECT_TRUE(frog_matched_amphibian);
+ }
}
TEST(Castable, SwitchReturnValueWithDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- const char* result = Switch(
- frog.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; }, //
- [](Default) { return "unknown"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(std::string(result), "amphibian");
- }
- {
- const char* result = Switch(
- bear.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; }, //
- [](Default) { return "unknown"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(std::string(result), "mammal");
- }
- {
- const char* result = Switch(
- gecko.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; }, //
- [](Default) { return "unknown"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(std::string(result), "unknown");
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ const char* result = Switch(
+ frog.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; }, //
+ [](Default) { return "unknown"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(std::string(result), "amphibian");
+ }
+ {
+ const char* result = Switch(
+ bear.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; }, //
+ [](Default) { return "unknown"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(std::string(result), "mammal");
+ }
+ {
+ const char* result = Switch(
+ gecko.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; }, //
+ [](Default) { return "unknown"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(std::string(result), "unknown");
+ }
}
TEST(Castable, SwitchReturnValueWithoutDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- const char* result = Switch(
- frog.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(std::string(result), "amphibian");
- }
- {
- const char* result = Switch(
- bear.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(std::string(result), "mammal");
- }
- {
- auto* result = Switch(
- gecko.get(), //
- [](Mammal*) { return "mammal"; }, //
- [](Amphibian*) { return "amphibian"; });
- static_assert(std::is_same_v<decltype(result), const char*>);
- EXPECT_EQ(result, nullptr);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ const char* result = Switch(
+ frog.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(std::string(result), "amphibian");
+ }
+ {
+ const char* result = Switch(
+ bear.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(std::string(result), "mammal");
+ }
+ {
+ auto* result = Switch(
+ gecko.get(), //
+ [](Mammal*) { return "mammal"; }, //
+ [](Amphibian*) { return "amphibian"; });
+ static_assert(std::is_same_v<decltype(result), const char*>);
+ EXPECT_EQ(result, nullptr);
+ }
}
TEST(Castable, SwitchInferPODReturnTypeWithDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto result = Switch(
- frog.get(), //
- [](Mammal*) { return 1; }, //
- [](Amphibian*) { return 2.0f; }, //
- [](Default) { return 3.0; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 2.0);
- }
- {
- auto result = Switch(
- bear.get(), //
- [](Mammal*) { return 1.0; }, //
- [](Amphibian*) { return 2.0f; }, //
- [](Default) { return 3; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 1.0);
- }
- {
- auto result = Switch(
- gecko.get(), //
- [](Mammal*) { return 1.0f; }, //
- [](Amphibian*) { return 2; }, //
- [](Default) { return 3.0; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 3.0);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto result = Switch(
+ frog.get(), //
+ [](Mammal*) { return 1; }, //
+ [](Amphibian*) { return 2.0f; }, //
+ [](Default) { return 3.0; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 2.0);
+ }
+ {
+ auto result = Switch(
+ bear.get(), //
+ [](Mammal*) { return 1.0; }, //
+ [](Amphibian*) { return 2.0f; }, //
+ [](Default) { return 3; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 1.0);
+ }
+ {
+ auto result = Switch(
+ gecko.get(), //
+ [](Mammal*) { return 1.0f; }, //
+ [](Amphibian*) { return 2; }, //
+ [](Default) { return 3.0; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 3.0);
+ }
}
TEST(Castable, SwitchInferPODReturnTypeWithoutDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto result = Switch(
- frog.get(), //
- [](Mammal*) { return 1; }, //
- [](Amphibian*) { return 2.0f; });
- static_assert(std::is_same_v<decltype(result), float>);
- EXPECT_EQ(result, 2.0f);
- }
- {
- auto result = Switch(
- bear.get(), //
- [](Mammal*) { return 1.0f; }, //
- [](Amphibian*) { return 2; });
- static_assert(std::is_same_v<decltype(result), float>);
- EXPECT_EQ(result, 1.0f);
- }
- {
- auto result = Switch(
- gecko.get(), //
- [](Mammal*) { return 1.0; }, //
- [](Amphibian*) { return 2.0f; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 0.0);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto result = Switch(
+ frog.get(), //
+ [](Mammal*) { return 1; }, //
+ [](Amphibian*) { return 2.0f; });
+ static_assert(std::is_same_v<decltype(result), float>);
+ EXPECT_EQ(result, 2.0f);
+ }
+ {
+ auto result = Switch(
+ bear.get(), //
+ [](Mammal*) { return 1.0f; }, //
+ [](Amphibian*) { return 2; });
+ static_assert(std::is_same_v<decltype(result), float>);
+ EXPECT_EQ(result, 1.0f);
+ }
+ {
+ auto result = Switch(
+ gecko.get(), //
+ [](Mammal*) { return 1.0; }, //
+ [](Amphibian*) { return 2.0f; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 0.0);
+ }
}
TEST(Castable, SwitchInferCastableReturnTypeWithDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto* result = Switch(
- frog.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian*) { return nullptr; }, //
- [](Default) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), Mammal*>);
- EXPECT_EQ(result, nullptr);
- }
- {
- auto* result = Switch(
- bear.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return const_cast<const Amphibian*>(p); },
- [](Default) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), const Animal*>);
- EXPECT_EQ(result, bear.get());
- }
- {
- auto* result = Switch(
- gecko.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return p; }, //
- [](Default) -> CastableBase* { return nullptr; });
- static_assert(std::is_same_v<decltype(result), CastableBase*>);
- EXPECT_EQ(result, nullptr);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto* result = Switch(
+ frog.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian*) { return nullptr; }, //
+ [](Default) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), Mammal*>);
+ EXPECT_EQ(result, nullptr);
+ }
+ {
+ auto* result = Switch(
+ bear.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return const_cast<const Amphibian*>(p); },
+ [](Default) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), const Animal*>);
+ EXPECT_EQ(result, bear.get());
+ }
+ {
+ auto* result = Switch(
+ gecko.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return p; }, //
+ [](Default) -> CastableBase* { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), CastableBase*>);
+ EXPECT_EQ(result, nullptr);
+ }
}
TEST(Castable, SwitchInferCastableReturnTypeWithoutDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto* result = Switch(
- frog.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian*) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), Mammal*>);
- EXPECT_EQ(result, nullptr);
- }
- {
- auto* result = Switch(
- bear.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return const_cast<const Amphibian*>(p); }); //
- static_assert(std::is_same_v<decltype(result), const Animal*>);
- EXPECT_EQ(result, bear.get());
- }
- {
- auto* result = Switch(
- gecko.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return p; });
- static_assert(std::is_same_v<decltype(result), Animal*>);
- EXPECT_EQ(result, nullptr);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto* result = Switch(
+ frog.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian*) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), Mammal*>);
+ EXPECT_EQ(result, nullptr);
+ }
+ {
+ auto* result = Switch(
+ bear.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return const_cast<const Amphibian*>(p); }); //
+ static_assert(std::is_same_v<decltype(result), const Animal*>);
+ EXPECT_EQ(result, bear.get());
+ }
+ {
+ auto* result = Switch(
+ gecko.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return p; });
+ static_assert(std::is_same_v<decltype(result), Animal*>);
+ EXPECT_EQ(result, nullptr);
+ }
}
TEST(Castable, SwitchExplicitPODReturnTypeWithDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto result = Switch<double>(
- frog.get(), //
- [](Mammal*) { return 1; }, //
- [](Amphibian*) { return 2.0f; }, //
- [](Default) { return 3.0; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 2.0f);
- }
- {
- auto result = Switch<double>(
- bear.get(), //
- [](Mammal*) { return 1; }, //
- [](Amphibian*) { return 2; }, //
- [](Default) { return 3; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 1.0f);
- }
- {
- auto result = Switch<double>(
- gecko.get(), //
- [](Mammal*) { return 1.0f; }, //
- [](Amphibian*) { return 2.0f; }, //
- [](Default) { return 3.0f; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 3.0f);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto result = Switch<double>(
+ frog.get(), //
+ [](Mammal*) { return 1; }, //
+ [](Amphibian*) { return 2.0f; }, //
+ [](Default) { return 3.0; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 2.0f);
+ }
+ {
+ auto result = Switch<double>(
+ bear.get(), //
+ [](Mammal*) { return 1; }, //
+ [](Amphibian*) { return 2; }, //
+ [](Default) { return 3; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 1.0f);
+ }
+ {
+ auto result = Switch<double>(
+ gecko.get(), //
+ [](Mammal*) { return 1.0f; }, //
+ [](Amphibian*) { return 2.0f; }, //
+ [](Default) { return 3.0f; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 3.0f);
+ }
}
TEST(Castable, SwitchExplicitPODReturnTypeWithoutDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto result = Switch<double>(
- frog.get(), //
- [](Mammal*) { return 1; }, //
- [](Amphibian*) { return 2.0f; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 2.0f);
- }
- {
- auto result = Switch<double>(
- bear.get(), //
- [](Mammal*) { return 1.0f; }, //
- [](Amphibian*) { return 2; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 1.0f);
- }
- {
- auto result = Switch<double>(
- gecko.get(), //
- [](Mammal*) { return 1.0; }, //
- [](Amphibian*) { return 2.0f; });
- static_assert(std::is_same_v<decltype(result), double>);
- EXPECT_EQ(result, 0.0);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto result = Switch<double>(
+ frog.get(), //
+ [](Mammal*) { return 1; }, //
+ [](Amphibian*) { return 2.0f; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 2.0f);
+ }
+ {
+ auto result = Switch<double>(
+ bear.get(), //
+ [](Mammal*) { return 1.0f; }, //
+ [](Amphibian*) { return 2; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 1.0f);
+ }
+ {
+ auto result = Switch<double>(
+ gecko.get(), //
+ [](Mammal*) { return 1.0; }, //
+ [](Amphibian*) { return 2.0f; });
+ static_assert(std::is_same_v<decltype(result), double>);
+ EXPECT_EQ(result, 0.0);
+ }
}
TEST(Castable, SwitchExplicitCastableReturnTypeWithDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto* result = Switch<Animal>(
- frog.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian*) { return nullptr; }, //
- [](Default) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), Animal*>);
- EXPECT_EQ(result, nullptr);
- }
- {
- auto* result = Switch<CastableBase>(
- bear.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return const_cast<const Amphibian*>(p); },
- [](Default) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), const CastableBase*>);
- EXPECT_EQ(result, bear.get());
- }
- {
- auto* result = Switch<const Animal>(
- gecko.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return p; }, //
- [](Default) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), const Animal*>);
- EXPECT_EQ(result, nullptr);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto* result = Switch<Animal>(
+ frog.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian*) { return nullptr; }, //
+ [](Default) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), Animal*>);
+ EXPECT_EQ(result, nullptr);
+ }
+ {
+ auto* result = Switch<CastableBase>(
+ bear.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return const_cast<const Amphibian*>(p); },
+ [](Default) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), const CastableBase*>);
+ EXPECT_EQ(result, bear.get());
+ }
+ {
+ auto* result = Switch<const Animal>(
+ gecko.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return p; }, //
+ [](Default) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), const Animal*>);
+ EXPECT_EQ(result, nullptr);
+ }
}
TEST(Castable, SwitchExplicitCastableReturnTypeWithoutDefault) {
- std::unique_ptr<Animal> frog = std::make_unique<Frog>();
- std::unique_ptr<Animal> bear = std::make_unique<Bear>();
- std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
- {
- auto* result = Switch<Animal>(
- frog.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian*) { return nullptr; });
- static_assert(std::is_same_v<decltype(result), Animal*>);
- EXPECT_EQ(result, nullptr);
- }
- {
- auto* result = Switch<CastableBase>(
- bear.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return const_cast<const Amphibian*>(p); }); //
- static_assert(std::is_same_v<decltype(result), const CastableBase*>);
- EXPECT_EQ(result, bear.get());
- }
- {
- auto* result = Switch<const Animal*>(
- gecko.get(), //
- [](Mammal* p) { return p; }, //
- [](Amphibian* p) { return p; });
- static_assert(std::is_same_v<decltype(result), const Animal*>);
- EXPECT_EQ(result, nullptr);
- }
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ std::unique_ptr<Animal> bear = std::make_unique<Bear>();
+ std::unique_ptr<Animal> gecko = std::make_unique<Gecko>();
+ {
+ auto* result = Switch<Animal>(
+ frog.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian*) { return nullptr; });
+ static_assert(std::is_same_v<decltype(result), Animal*>);
+ EXPECT_EQ(result, nullptr);
+ }
+ {
+ auto* result = Switch<CastableBase>(
+ bear.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return const_cast<const Amphibian*>(p); }); //
+ static_assert(std::is_same_v<decltype(result), const CastableBase*>);
+ EXPECT_EQ(result, bear.get());
+ }
+ {
+ auto* result = Switch<const Animal*>(
+ gecko.get(), //
+ [](Mammal* p) { return p; }, //
+ [](Amphibian* p) { return p; });
+ static_assert(std::is_same_v<decltype(result), const Animal*>);
+ EXPECT_EQ(result, nullptr);
+ }
}
TEST(Castable, SwitchNull) {
- Animal* null = nullptr;
- Switch(
- null, //
- [&](Amphibian*) { FAIL() << "should not be called"; },
- [&](Animal*) { FAIL() << "should not be called"; });
+ Animal* null = nullptr;
+ Switch(
+ null, //
+ [&](Amphibian*) { FAIL() << "should not be called"; },
+ [&](Animal*) { FAIL() << "should not be called"; });
}
TEST(Castable, SwitchNullNoDefault) {
- Animal* null = nullptr;
- bool default_called = false;
- Switch(
- null, //
- [&](Amphibian*) { FAIL() << "should not be called"; },
- [&](Animal*) { FAIL() << "should not be called"; },
- [&](Default) { default_called = true; });
- EXPECT_TRUE(default_called);
+ Animal* null = nullptr;
+ bool default_called = false;
+ Switch(
+ null, //
+ [&](Amphibian*) { FAIL() << "should not be called"; },
+ [&](Animal*) { FAIL() << "should not be called"; },
+ [&](Default) { default_called = true; });
+ EXPECT_TRUE(default_called);
+}
+
+TEST(Castable, SwitchReturnNoDefaultConstructor) {
+ struct Object {
+ explicit Object(int v) : value(v) {}
+ int value;
+ };
+
+ std::unique_ptr<Animal> frog = std::make_unique<Frog>();
+ {
+ auto result = Switch(
+ frog.get(), //
+ [](Mammal*) { return Object(1); }, //
+ [](Amphibian*) { return Object(2); }, //
+ [](Default) { return Object(3); });
+ static_assert(std::is_same_v<decltype(result), Object>);
+ EXPECT_EQ(result.value, 2);
+ }
+ {
+ auto result = Switch(
+ frog.get(), //
+ [](Mammal*) { return Object(1); }, //
+ [](Default) { return Object(3); });
+ static_assert(std::is_same_v<decltype(result), Object>);
+ EXPECT_EQ(result.value, 3);
+ }
}
// IsCastable static tests
@@ -736,8 +758,7 @@ static_assert(std::is_same_v<Gecko, CastableCommonBase<Gecko>>);
static_assert(std::is_same_v<Iguana, CastableCommonBase<Iguana>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Animal, Animal>>);
-static_assert(
- std::is_same_v<Amphibian, CastableCommonBase<Amphibian, Amphibian>>);
+static_assert(std::is_same_v<Amphibian, CastableCommonBase<Amphibian, Amphibian>>);
static_assert(std::is_same_v<Mammal, CastableCommonBase<Mammal, Mammal>>);
static_assert(std::is_same_v<Reptile, CastableCommonBase<Reptile, Reptile>>);
static_assert(std::is_same_v<Frog, CastableCommonBase<Frog, Frog>>);
@@ -746,10 +767,8 @@ static_assert(std::is_same_v<Lizard, CastableCommonBase<Lizard, Lizard>>);
static_assert(std::is_same_v<Gecko, CastableCommonBase<Gecko, Gecko>>);
static_assert(std::is_same_v<Iguana, CastableCommonBase<Iguana, Iguana>>);
-static_assert(
- std::is_same_v<CastableBase, CastableCommonBase<CastableBase, Animal>>);
-static_assert(
- std::is_same_v<CastableBase, CastableCommonBase<Animal, CastableBase>>);
+static_assert(std::is_same_v<CastableBase, CastableCommonBase<CastableBase, Animal>>);
+static_assert(std::is_same_v<CastableBase, CastableCommonBase<Animal, CastableBase>>);
static_assert(std::is_same_v<Amphibian, CastableCommonBase<Amphibian, Frog>>);
static_assert(std::is_same_v<Amphibian, CastableCommonBase<Frog, Amphibian>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Reptile, Frog>>);
@@ -759,19 +778,14 @@ static_assert(std::is_same_v<Animal, CastableCommonBase<Frog, Bear>>);
static_assert(std::is_same_v<Lizard, CastableCommonBase<Gecko, Iguana>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Bear, Frog, Iguana>>);
-static_assert(
- std::is_same_v<Lizard, CastableCommonBase<Lizard, Gecko, Iguana>>);
-static_assert(
- std::is_same_v<Lizard, CastableCommonBase<Gecko, Iguana, Lizard>>);
-static_assert(
- std::is_same_v<Lizard, CastableCommonBase<Gecko, Lizard, Iguana>>);
+static_assert(std::is_same_v<Lizard, CastableCommonBase<Lizard, Gecko, Iguana>>);
+static_assert(std::is_same_v<Lizard, CastableCommonBase<Gecko, Iguana, Lizard>>);
+static_assert(std::is_same_v<Lizard, CastableCommonBase<Gecko, Lizard, Iguana>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Frog, Gecko, Iguana>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Gecko, Iguana, Frog>>);
static_assert(std::is_same_v<Animal, CastableCommonBase<Gecko, Frog, Iguana>>);
-static_assert(
- std::is_same_v<CastableBase,
- CastableCommonBase<Bear, Frog, Iguana, CastableBase>>);
+static_assert(std::is_same_v<CastableBase, CastableCommonBase<Bear, Frog, Iguana, CastableBase>>);
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/clone_context.cc b/chromium/third_party/dawn/src/tint/clone_context.cc
index afdf488cf1e..0a9e606742e 100644
--- a/chromium/third_party/dawn/src/tint/clone_context.cc
+++ b/chromium/third_party/dawn/src/tint/clone_context.cc
@@ -26,91 +26,85 @@ namespace tint {
CloneContext::ListTransforms::ListTransforms() = default;
CloneContext::ListTransforms::~ListTransforms() = default;
-CloneContext::CloneContext(ProgramBuilder* to,
- Program const* from,
- bool auto_clone_symbols)
+CloneContext::CloneContext(ProgramBuilder* to, Program const* from, bool auto_clone_symbols)
: dst(to), src(from) {
- if (auto_clone_symbols) {
- // Almost all transforms will want to clone all symbols before doing any
- // work, to avoid any newly created symbols clashing with existing symbols
- // in the source program and causing them to be renamed.
- from->Symbols().Foreach([&](Symbol s, const std::string&) { Clone(s); });
- }
+ if (auto_clone_symbols) {
+ // Almost all transforms will want to clone all symbols before doing any
+ // work, to avoid any newly created symbols clashing with existing symbols
+ // in the source program and causing them to be renamed.
+ from->Symbols().Foreach([&](Symbol s, const std::string&) { Clone(s); });
+ }
}
-CloneContext::CloneContext(ProgramBuilder* builder)
- : CloneContext(builder, nullptr, false) {}
+CloneContext::CloneContext(ProgramBuilder* builder) : CloneContext(builder, nullptr, false) {}
CloneContext::~CloneContext() = default;
Symbol CloneContext::Clone(Symbol s) {
- if (!src) {
- return s; // In-place clone
- }
- return utils::GetOrCreate(cloned_symbols_, s, [&]() -> Symbol {
- if (symbol_transform_) {
- return symbol_transform_(s);
+ if (!src) {
+ return s; // In-place clone
}
- return dst->Symbols().New(src->Symbols().NameFor(s));
- });
+ return utils::GetOrCreate(cloned_symbols_, s, [&]() -> Symbol {
+ if (symbol_transform_) {
+ return symbol_transform_(s);
+ }
+ return dst->Symbols().New(src->Symbols().NameFor(s));
+ });
}
void CloneContext::Clone() {
- dst->AST().Copy(this, &src->AST());
+ dst->AST().Copy(this, &src->AST());
}
ast::FunctionList CloneContext::Clone(const ast::FunctionList& v) {
- ast::FunctionList out;
- out.reserve(v.size());
- for (const ast::Function* el : v) {
- out.Add(Clone(el));
- }
- return out;
+ ast::FunctionList out;
+ out.reserve(v.size());
+ for (const ast::Function* el : v) {
+ out.Add(Clone(el));
+ }
+ return out;
}
const tint::Cloneable* CloneContext::CloneCloneable(const Cloneable* object) {
- // If the input is nullptr, there's nothing to clone - just return nullptr.
- if (object == nullptr) {
- return nullptr;
- }
-
- // Was Replace() called for this object?
- auto it = replacements_.find(object);
- if (it != replacements_.end()) {
- return it->second();
- }
-
- // Attempt to clone using the registered replacer functions.
- auto& typeinfo = object->TypeInfo();
- for (auto& transform : transforms_) {
- if (typeinfo.Is(transform.typeinfo)) {
- if (auto* transformed = transform.function(object)) {
- return transformed;
- }
- break;
+ // If the input is nullptr, there's nothing to clone - just return nullptr.
+ if (object == nullptr) {
+ return nullptr;
+ }
+
+ // Was Replace() called for this object?
+ auto it = replacements_.find(object);
+ if (it != replacements_.end()) {
+ return it->second();
+ }
+
+ // Attempt to clone using the registered replacer functions.
+ auto& typeinfo = object->TypeInfo();
+ for (auto& transform : transforms_) {
+ if (typeinfo.Is(transform.typeinfo)) {
+ if (auto* transformed = transform.function(object)) {
+ return transformed;
+ }
+ break;
+ }
}
- }
- // No transform for this type, or the transform returned nullptr.
- // Clone with T::Clone().
- return object->Clone(this);
+ // No transform for this type, or the transform returned nullptr.
+ // Clone with T::Clone().
+ return object->Clone(this);
}
-void CloneContext::CheckedCastFailure(const Cloneable* got,
- const TypeInfo& expected) {
- TINT_ICE(Clone, Diagnostics())
- << "Cloned object was not of the expected type\n"
- << "got: " << got->TypeInfo().name << "\n"
- << "expected: " << expected.name;
+void CloneContext::CheckedCastFailure(const Cloneable* got, const TypeInfo& expected) {
+ TINT_ICE(Clone, Diagnostics()) << "Cloned object was not of the expected type\n"
+ << "got: " << got->TypeInfo().name << "\n"
+ << "expected: " << expected.name;
}
diag::List& CloneContext::Diagnostics() const {
- return dst->Diagnostics();
+ return dst->Diagnostics();
}
CloneContext::CloneableTransform::CloneableTransform() = default;
-CloneContext::CloneableTransform::CloneableTransform(
- const CloneableTransform&) = default;
+CloneContext::CloneableTransform::CloneableTransform(const CloneableTransform&) = default;
CloneContext::CloneableTransform::~CloneableTransform() = default;
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/clone_context.h b/chromium/third_party/dawn/src/tint/clone_context.h
index 35bb231afe3..e027565aa15 100644
--- a/chromium/third_party/dawn/src/tint/clone_context.h
+++ b/chromium/third_party/dawn/src/tint/clone_context.h
@@ -17,6 +17,7 @@
#include <algorithm>
#include <functional>
+#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
@@ -46,539 +47,528 @@ ProgramID ProgramIDOf(const ProgramBuilder*);
/// Cloneable is the base class for all objects that can be cloned
class Cloneable : public Castable<Cloneable> {
- public:
- /// Performs a deep clone of this object using the CloneContext `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned object
- virtual const Cloneable* Clone(CloneContext* ctx) const = 0;
+ public:
+ /// Performs a deep clone of this object using the CloneContext `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned object
+ virtual const Cloneable* Clone(CloneContext* ctx) const = 0;
};
/// @returns an invalid ProgramID
inline ProgramID ProgramIDOf(const Cloneable*) {
- return ProgramID();
+ return ProgramID();
}
/// CloneContext holds the state used while cloning AST nodes.
class CloneContext {
- /// ParamTypeIsPtrOf<F, T> is true iff the first parameter of
- /// F is a pointer of (or derives from) type T.
- template <typename F, typename T>
- static constexpr bool ParamTypeIsPtrOf = traits::IsTypeOrDerived<
- typename std::remove_pointer<traits::ParameterType<F, 0>>::type,
- T>;
-
- public:
- /// SymbolTransform is a function that takes a symbol and returns a new
- /// symbol.
- using SymbolTransform = std::function<Symbol(Symbol)>;
-
- /// Constructor for cloning objects from `from` into `to`.
- /// @param to the target ProgramBuilder to clone into
- /// @param from the source Program to clone from
- /// @param auto_clone_symbols clone all symbols in `from` before returning
- CloneContext(ProgramBuilder* to,
- Program const* from,
- bool auto_clone_symbols = true);
-
- /// Constructor for cloning objects from and to the ProgramBuilder `builder`.
- /// @param builder the ProgramBuilder
- explicit CloneContext(ProgramBuilder* builder);
-
- /// Destructor
- ~CloneContext();
-
- /// Clones the Node or sem::Type `a` into the ProgramBuilder #dst if `a` is
- /// not null. If `a` is null, then Clone() returns null.
- ///
- /// Clone() may use a function registered with ReplaceAll() to create a
- /// transformed version of the object. See ReplaceAll() for more information.
- ///
- /// If the CloneContext is cloning from a Program to a ProgramBuilder, then
- /// the Node or sem::Type `a` must be owned by the Program #src.
- ///
- /// @param object the type deriving from Cloneable to clone
- /// @return the cloned node
- template <typename T>
- const T* Clone(const T* object) {
- if (src) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object);
- }
- if (auto* cloned = CloneCloneable(object)) {
- auto* out = CheckedCast<T>(cloned);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, out);
- return out;
- }
- return nullptr;
- }
-
- /// Clones the Node or sem::Type `a` into the ProgramBuilder #dst if `a` is
- /// not null. If `a` is null, then Clone() returns null.
- ///
- /// Unlike Clone(), this method does not invoke or use any transformations
- /// registered by ReplaceAll().
- ///
- /// If the CloneContext is cloning from a Program to a ProgramBuilder, then
- /// the Node or sem::Type `a` must be owned by the Program #src.
- ///
- /// @param a the type deriving from Cloneable to clone
- /// @return the cloned node
- template <typename T>
- const T* CloneWithoutTransform(const T* a) {
- // If the input is nullptr, there's nothing to clone - just return nullptr.
- if (a == nullptr) {
- return nullptr;
- }
- if (src) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, a);
- }
- auto* c = a->Clone(this);
- return CheckedCast<T>(c);
- }
-
- /// Clones the Source `s` into #dst
- /// TODO(bclayton) - Currently this 'clone' is a shallow copy. If/when
- /// `Source.File`s are owned by the Program this should make a copy of the
- /// file.
- /// @param s the `Source` to clone
- /// @return the cloned source
- Source Clone(const Source& s) const { return s; }
-
- /// Clones the Symbol `s` into #dst
- ///
- /// The Symbol `s` must be owned by the Program #src.
- ///
- /// @param s the Symbol to clone
- /// @return the cloned source
- Symbol Clone(Symbol s);
-
- /// Clones each of the elements of the vector `v` into the ProgramBuilder
- /// #dst.
- ///
- /// All the elements of the vector `v` must be owned by the Program #src.
- ///
- /// @param v the vector to clone
- /// @return the cloned vector
- template <typename T>
- std::vector<T> Clone(const std::vector<T>& v) {
- std::vector<T> out;
- out.reserve(v.size());
- for (auto& el : v) {
- out.emplace_back(Clone(el));
+ /// ParamTypeIsPtrOf<F, T> is true iff the first parameter of
+ /// F is a pointer of (or derives from) type T.
+ template <typename F, typename T>
+ static constexpr bool ParamTypeIsPtrOf =
+ traits::IsTypeOrDerived<typename std::remove_pointer<traits::ParameterType<F, 0>>::type, T>;
+
+ public:
+ /// SymbolTransform is a function that takes a symbol and returns a new
+ /// symbol.
+ using SymbolTransform = std::function<Symbol(Symbol)>;
+
+ /// Constructor for cloning objects from `from` into `to`.
+ /// @param to the target ProgramBuilder to clone into
+ /// @param from the source Program to clone from
+ /// @param auto_clone_symbols clone all symbols in `from` before returning
+ CloneContext(ProgramBuilder* to, Program const* from, bool auto_clone_symbols = true);
+
+ /// Constructor for cloning objects from and to the ProgramBuilder `builder`.
+ /// @param builder the ProgramBuilder
+ explicit CloneContext(ProgramBuilder* builder);
+
+ /// Destructor
+ ~CloneContext();
+
+ /// Clones the Node or sem::Type `a` into the ProgramBuilder #dst if `a` is
+ /// not null. If `a` is null, then Clone() returns null.
+ ///
+ /// Clone() may use a function registered with ReplaceAll() to create a
+ /// transformed version of the object. See ReplaceAll() for more information.
+ ///
+ /// If the CloneContext is cloning from a Program to a ProgramBuilder, then
+ /// the Node or sem::Type `a` must be owned by the Program #src.
+ ///
+ /// @param object the type deriving from Cloneable to clone
+ /// @return the cloned node
+ template <typename T>
+ const T* Clone(const T* object) {
+ if (src) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object);
+ }
+ if (auto* cloned = CloneCloneable(object)) {
+ auto* out = CheckedCast<T>(cloned);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, out);
+ return out;
+ }
+ return nullptr;
}
- return out;
- }
-
- /// Clones each of the elements of the vector `v` using the ProgramBuilder
- /// #dst, inserting any additional elements into the list that were registered
- /// with calls to InsertBefore().
- ///
- /// All the elements of the vector `v` must be owned by the Program #src.
- ///
- /// @param v the vector to clone
- /// @return the cloned vector
- template <typename T>
- std::vector<T*> Clone(const std::vector<T*>& v) {
- std::vector<T*> out;
- Clone(out, v);
- return out;
- }
-
- /// Clones each of the elements of the vector `from` into the vector `to`,
- /// inserting any additional elements into the list that were registered with
- /// calls to InsertBefore().
- ///
- /// All the elements of the vector `from` must be owned by the Program #src.
- ///
- /// @param from the vector to clone
- /// @param to the cloned result
- template <typename T>
- void Clone(std::vector<T*>& to, const std::vector<T*>& from) {
- to.reserve(from.size());
-
- auto list_transform_it = list_transforms_.find(&from);
- if (list_transform_it != list_transforms_.end()) {
- const auto& transforms = list_transform_it->second;
- for (auto* o : transforms.insert_front_) {
- to.emplace_back(CheckedCast<T>(o));
- }
- for (auto& el : from) {
- auto insert_before_it = transforms.insert_before_.find(el);
- if (insert_before_it != transforms.insert_before_.end()) {
- for (auto insert : insert_before_it->second) {
- to.emplace_back(CheckedCast<T>(insert));
- }
+
+ /// Clones the Node or sem::Type `a` into the ProgramBuilder #dst if `a` is
+ /// not null. If `a` is null, then Clone() returns null.
+ ///
+ /// Unlike Clone(), this method does not invoke or use any transformations
+ /// registered by ReplaceAll().
+ ///
+ /// If the CloneContext is cloning from a Program to a ProgramBuilder, then
+ /// the Node or sem::Type `a` must be owned by the Program #src.
+ ///
+ /// @param a the type deriving from Cloneable to clone
+ /// @return the cloned node
+ template <typename T>
+ const T* CloneWithoutTransform(const T* a) {
+ // If the input is nullptr, there's nothing to clone - just return nullptr.
+ if (a == nullptr) {
+ return nullptr;
}
- if (transforms.remove_.count(el) == 0) {
- to.emplace_back(Clone(el));
+ if (src) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, a);
}
- auto insert_after_it = transforms.insert_after_.find(el);
- if (insert_after_it != transforms.insert_after_.end()) {
- for (auto insert : insert_after_it->second) {
- to.emplace_back(CheckedCast<T>(insert));
- }
+ auto* c = a->Clone(this);
+ return CheckedCast<T>(c);
+ }
+
+ /// Clones the Source `s` into #dst
+ /// TODO(bclayton) - Currently this 'clone' is a shallow copy. If/when
+ /// `Source.File`s are owned by the Program this should make a copy of the
+ /// file.
+ /// @param s the `Source` to clone
+ /// @return the cloned source
+ Source Clone(const Source& s) const { return s; }
+
+ /// Clones the Symbol `s` into #dst
+ ///
+ /// The Symbol `s` must be owned by the Program #src.
+ ///
+ /// @param s the Symbol to clone
+ /// @return the cloned source
+ Symbol Clone(Symbol s);
+
+ /// Clones each of the elements of the vector `v` into the ProgramBuilder
+ /// #dst.
+ ///
+ /// All the elements of the vector `v` must be owned by the Program #src.
+ ///
+ /// @param v the vector to clone
+ /// @return the cloned vector
+ template <typename T>
+ std::vector<T> Clone(const std::vector<T>& v) {
+ std::vector<T> out;
+ out.reserve(v.size());
+ for (auto& el : v) {
+ out.emplace_back(Clone(el));
}
- }
- for (auto* o : transforms.insert_back_) {
- to.emplace_back(CheckedCast<T>(o));
- }
- } else {
- for (auto& el : from) {
- to.emplace_back(Clone(el));
-
- // Clone(el) may have inserted after
- list_transform_it = list_transforms_.find(&from);
+ return out;
+ }
+
+ /// Clones each of the elements of the vector `v` using the ProgramBuilder
+ /// #dst, inserting any additional elements into the list that were registered
+ /// with calls to InsertBefore().
+ ///
+ /// All the elements of the vector `v` must be owned by the Program #src.
+ ///
+ /// @param v the vector to clone
+ /// @return the cloned vector
+ template <typename T>
+ std::vector<T*> Clone(const std::vector<T*>& v) {
+ std::vector<T*> out;
+ Clone(out, v);
+ return out;
+ }
+
+ /// Clones each of the elements of the vector `from` into the vector `to`,
+ /// inserting any additional elements into the list that were registered with
+ /// calls to InsertBefore().
+ ///
+ /// All the elements of the vector `from` must be owned by the Program #src.
+ ///
+ /// @param from the vector to clone
+ /// @param to the cloned result
+ template <typename T>
+ void Clone(std::vector<T*>& to, const std::vector<T*>& from) {
+ to.reserve(from.size());
+
+ auto list_transform_it = list_transforms_.find(&from);
if (list_transform_it != list_transforms_.end()) {
- const auto& transforms = list_transform_it->second;
+ const auto& transforms = list_transform_it->second;
+ for (auto* o : transforms.insert_front_) {
+ to.emplace_back(CheckedCast<T>(o));
+ }
+ for (auto& el : from) {
+ auto insert_before_it = transforms.insert_before_.find(el);
+ if (insert_before_it != transforms.insert_before_.end()) {
+ for (auto insert : insert_before_it->second) {
+ to.emplace_back(CheckedCast<T>(insert));
+ }
+ }
+ if (transforms.remove_.count(el) == 0) {
+ to.emplace_back(Clone(el));
+ }
+ auto insert_after_it = transforms.insert_after_.find(el);
+ if (insert_after_it != transforms.insert_after_.end()) {
+ for (auto insert : insert_after_it->second) {
+ to.emplace_back(CheckedCast<T>(insert));
+ }
+ }
+ }
+ for (auto* o : transforms.insert_back_) {
+ to.emplace_back(CheckedCast<T>(o));
+ }
+ } else {
+ for (auto& el : from) {
+ to.emplace_back(Clone(el));
+
+ // Clone(el) may have inserted after
+ list_transform_it = list_transforms_.find(&from);
+ if (list_transform_it != list_transforms_.end()) {
+ const auto& transforms = list_transform_it->second;
+
+ auto insert_after_it = transforms.insert_after_.find(el);
+ if (insert_after_it != transforms.insert_after_.end()) {
+ for (auto insert : insert_after_it->second) {
+ to.emplace_back(CheckedCast<T>(insert));
+ }
+ }
+ }
+ }
+
+ // Clone(el)s may have inserted back
+ list_transform_it = list_transforms_.find(&from);
+ if (list_transform_it != list_transforms_.end()) {
+ const auto& transforms = list_transform_it->second;
- auto insert_after_it = transforms.insert_after_.find(el);
- if (insert_after_it != transforms.insert_after_.end()) {
- for (auto insert : insert_after_it->second) {
- to.emplace_back(CheckedCast<T>(insert));
+ for (auto* o : transforms.insert_back_) {
+ to.emplace_back(CheckedCast<T>(o));
+ }
}
- }
}
- }
+ }
- // Clone(el)s may have inserted back
- list_transform_it = list_transforms_.find(&from);
- if (list_transform_it != list_transforms_.end()) {
- const auto& transforms = list_transform_it->second;
+ /// Clones each of the elements of the vector `v` into the ProgramBuilder
+ /// #dst.
+ ///
+ /// All the elements of the vector `v` must be owned by the Program #src.
+ ///
+ /// @param v the vector to clone
+ /// @return the cloned vector
+ ast::FunctionList Clone(const ast::FunctionList& v);
+
+ /// ReplaceAll() registers `replacer` to be called whenever the Clone() method
+ /// is called with a Cloneable type that matches (or derives from) the type of
+ /// the single parameter of `replacer`.
+ /// The returned Cloneable of `replacer` will be used as the replacement for
+ /// all references to the object that's being cloned. This returned Cloneable
+ /// must be owned by the Program #dst.
+ ///
+ /// `replacer` must be function-like with the signature: `T* (T*)`
+ /// where `T` is a type deriving from Cloneable.
+ ///
+ /// If `replacer` returns a nullptr then Clone() will call `T::Clone()` to
+ /// clone the object.
+ ///
+ /// Example:
+ ///
+ /// ```
+ /// // Replace all ast::UintLiteralExpressions with the number 42
+ /// CloneCtx ctx(&out, in);
+ /// ctx.ReplaceAll([&] (ast::UintLiteralExpression* l) {
+ /// return ctx->dst->create<ast::UintLiteralExpression>(
+ /// ctx->Clone(l->source),
+ /// ctx->Clone(l->type),
+ /// 42);
+ /// });
+ /// ctx.Clone();
+ /// ```
+ ///
+ /// @warning a single handler can only be registered for any given type.
+ /// Attempting to register two handlers for the same type will result in an
+ /// ICE.
+ /// @warning The replacement object must be of the correct type for all
+ /// references of the original object. A type mismatch will result in an
+ /// assertion in debug builds, and undefined behavior in release builds.
+ /// @param replacer a function or function-like object with the signature
+ /// `T* (T*)`, where `T` derives from Cloneable
+ /// @returns this CloneContext so calls can be chained
+ template <typename F>
+ traits::EnableIf<ParamTypeIsPtrOf<F, Cloneable>, CloneContext>& ReplaceAll(F&& replacer) {
+ using TPtr = traits::ParameterType<F, 0>;
+ using T = typename std::remove_pointer<TPtr>::type;
+ for (auto& transform : transforms_) {
+ if (transform.typeinfo->Is(&TypeInfo::Of<T>()) ||
+ TypeInfo::Of<T>().Is(transform.typeinfo)) {
+ TINT_ICE(Clone, Diagnostics())
+ << "ReplaceAll() called with a handler for type " << TypeInfo::Of<T>().name
+ << " that is already handled by a handler for type "
+ << transform.typeinfo->name;
+ return *this;
+ }
+ }
+ CloneableTransform transform;
+ transform.typeinfo = &TypeInfo::Of<T>();
+ transform.function = [=](const Cloneable* in) { return replacer(in->As<T>()); };
+ transforms_.emplace_back(std::move(transform));
+ return *this;
+ }
- for (auto* o : transforms.insert_back_) {
- to.emplace_back(CheckedCast<T>(o));
+ /// ReplaceAll() registers `replacer` to be called whenever the Clone() method
+ /// is called with a Symbol.
+ /// The returned symbol of `replacer` will be used as the replacement for
+ /// all references to the symbol that's being cloned. This returned Symbol
+ /// must be owned by the Program #dst.
+ /// @param replacer a function the signature `Symbol(Symbol)`.
+ /// @warning a SymbolTransform can only be registered once. Attempting to
+ /// register a SymbolTransform more than once will result in an ICE.
+ /// @returns this CloneContext so calls can be chained
+ CloneContext& ReplaceAll(const SymbolTransform& replacer) {
+ if (symbol_transform_) {
+ TINT_ICE(Clone, Diagnostics()) << "ReplaceAll(const SymbolTransform&) called "
+ "multiple times on the same CloneContext";
+ return *this;
}
- }
+ symbol_transform_ = replacer;
+ return *this;
}
- }
-
- /// Clones each of the elements of the vector `v` into the ProgramBuilder
- /// #dst.
- ///
- /// All the elements of the vector `v` must be owned by the Program #src.
- ///
- /// @param v the vector to clone
- /// @return the cloned vector
- ast::FunctionList Clone(const ast::FunctionList& v);
-
- /// ReplaceAll() registers `replacer` to be called whenever the Clone() method
- /// is called with a Cloneable type that matches (or derives from) the type of
- /// the single parameter of `replacer`.
- /// The returned Cloneable of `replacer` will be used as the replacement for
- /// all references to the object that's being cloned. This returned Cloneable
- /// must be owned by the Program #dst.
- ///
- /// `replacer` must be function-like with the signature: `T* (T*)`
- /// where `T` is a type deriving from Cloneable.
- ///
- /// If `replacer` returns a nullptr then Clone() will call `T::Clone()` to
- /// clone the object.
- ///
- /// Example:
- ///
- /// ```
- /// // Replace all ast::UintLiteralExpressions with the number 42
- /// CloneCtx ctx(&out, in);
- /// ctx.ReplaceAll([&] (ast::UintLiteralExpression* l) {
- /// return ctx->dst->create<ast::UintLiteralExpression>(
- /// ctx->Clone(l->source),
- /// ctx->Clone(l->type),
- /// 42);
- /// });
- /// ctx.Clone();
- /// ```
- ///
- /// @warning a single handler can only be registered for any given type.
- /// Attempting to register two handlers for the same type will result in an
- /// ICE.
- /// @warning The replacement object must be of the correct type for all
- /// references of the original object. A type mismatch will result in an
- /// assertion in debug builds, and undefined behavior in release builds.
- /// @param replacer a function or function-like object with the signature
- /// `T* (T*)`, where `T` derives from Cloneable
- /// @returns this CloneContext so calls can be chained
- template <typename F>
- traits::EnableIf<ParamTypeIsPtrOf<F, Cloneable>, CloneContext>& ReplaceAll(
- F&& replacer) {
- using TPtr = traits::ParameterType<F, 0>;
- using T = typename std::remove_pointer<TPtr>::type;
- for (auto& transform : transforms_) {
- if (transform.typeinfo->Is(&TypeInfo::Of<T>()) ||
- TypeInfo::Of<T>().Is(transform.typeinfo)) {
- TINT_ICE(Clone, Diagnostics())
- << "ReplaceAll() called with a handler for type "
- << TypeInfo::Of<T>().name
- << " that is already handled by a handler for type "
- << transform.typeinfo->name;
+
+ /// Replace replaces all occurrences of `what` in #src with the pointer `with`
+ /// in #dst when calling Clone().
+ /// [DEPRECATED]: This function cannot handle nested replacements. Use the
+ /// overload of Replace() that take a function for the `WITH` argument.
+ /// @param what a pointer to the object in #src that will be replaced with
+ /// `with`
+ /// @param with a pointer to the replacement object owned by #dst that will be
+ /// used as a replacement for `what`
+ /// @warning The replacement object must be of the correct type for all
+ /// references of the original object. A type mismatch will result in an
+ /// assertion in debug builds, and undefined behavior in release builds.
+ /// @returns this CloneContext so calls can be chained
+ template <typename WHAT, typename WITH, typename = traits::EnableIfIsType<WITH, Cloneable>>
+ CloneContext& Replace(const WHAT* what, const WITH* with) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, what);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, with);
+ replacements_[what] = [with]() -> const Cloneable* { return with; };
return *this;
- }
}
- CloneableTransform transform;
- transform.typeinfo = &TypeInfo::Of<T>();
- transform.function = [=](const Cloneable* in) {
- return replacer(in->As<T>());
- };
- transforms_.emplace_back(std::move(transform));
- return *this;
- }
-
- /// ReplaceAll() registers `replacer` to be called whenever the Clone() method
- /// is called with a Symbol.
- /// The returned symbol of `replacer` will be used as the replacement for
- /// all references to the symbol that's being cloned. This returned Symbol
- /// must be owned by the Program #dst.
- /// @param replacer a function the signature `Symbol(Symbol)`.
- /// @warning a SymbolTransform can only be registered once. Attempting to
- /// register a SymbolTransform more than once will result in an ICE.
- /// @returns this CloneContext so calls can be chained
- CloneContext& ReplaceAll(const SymbolTransform& replacer) {
- if (symbol_transform_) {
- TINT_ICE(Clone, Diagnostics())
- << "ReplaceAll(const SymbolTransform&) called "
- "multiple times on the same CloneContext";
- return *this;
+
+ /// Replace replaces all occurrences of `what` in #src with the result of the
+ /// function `with` in #dst when calling Clone(). `with` will be called each
+ /// time `what` is cloned by this context. If `what` is not cloned, then
+ /// `with` may never be called.
+ /// @param what a pointer to the object in #src that will be replaced with
+ /// `with`
+ /// @param with a function that takes no arguments and returns a pointer to
+ /// the replacement object owned by #dst. The returned pointer will be used as
+ /// a replacement for `what`.
+ /// @warning The replacement object must be of the correct type for all
+ /// references of the original object. A type mismatch will result in an
+ /// assertion in debug builds, and undefined behavior in release builds.
+ /// @returns this CloneContext so calls can be chained
+ template <typename WHAT, typename WITH, typename = std::invoke_result_t<WITH>>
+ CloneContext& Replace(const WHAT* what, WITH&& with) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, what);
+ replacements_[what] = with;
+ return *this;
}
- symbol_transform_ = replacer;
- return *this;
- }
-
- /// Replace replaces all occurrences of `what` in #src with the pointer `with`
- /// in #dst when calling Clone().
- /// [DEPRECATED]: This function cannot handle nested replacements. Use the
- /// overload of Replace() that take a function for the `WITH` argument.
- /// @param what a pointer to the object in #src that will be replaced with
- /// `with`
- /// @param with a pointer to the replacement object owned by #dst that will be
- /// used as a replacement for `what`
- /// @warning The replacement object must be of the correct type for all
- /// references of the original object. A type mismatch will result in an
- /// assertion in debug builds, and undefined behavior in release builds.
- /// @returns this CloneContext so calls can be chained
- template <typename WHAT,
- typename WITH,
- typename = traits::EnableIfIsType<WITH, Cloneable>>
- CloneContext& Replace(const WHAT* what, const WITH* with) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, what);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, with);
- replacements_[what] = [with]() -> const Cloneable* { return with; };
- return *this;
- }
-
- /// Replace replaces all occurrences of `what` in #src with the result of the
- /// function `with` in #dst when calling Clone(). `with` will be called each
- /// time `what` is cloned by this context. If `what` is not cloned, then
- /// `with` may never be called.
- /// @param what a pointer to the object in #src that will be replaced with
- /// `with`
- /// @param with a function that takes no arguments and returns a pointer to
- /// the replacement object owned by #dst. The returned pointer will be used as
- /// a replacement for `what`.
- /// @warning The replacement object must be of the correct type for all
- /// references of the original object. A type mismatch will result in an
- /// assertion in debug builds, and undefined behavior in release builds.
- /// @returns this CloneContext so calls can be chained
- template <typename WHAT, typename WITH, typename = std::result_of_t<WITH()>>
- CloneContext& Replace(const WHAT* what, WITH&& with) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, what);
- replacements_[what] = with;
- return *this;
- }
-
- /// Removes `object` from the cloned copy of `vector`.
- /// @param vector the vector in #src
- /// @param object a pointer to the object in #src that will be omitted from
- /// the cloned vector.
- /// @returns this CloneContext so calls can be chained
- template <typename T, typename OBJECT>
- CloneContext& Remove(const std::vector<T>& vector, OBJECT* object) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object);
- if (std::find(vector.begin(), vector.end(), object) == vector.end()) {
- TINT_ICE(Clone, Diagnostics())
- << "CloneContext::Remove() vector does not contain object";
- return *this;
+
+ /// Removes `object` from the cloned copy of `vector`.
+ /// @param vector the vector in #src
+ /// @param object a pointer to the object in #src that will be omitted from
+ /// the cloned vector.
+ /// @returns this CloneContext so calls can be chained
+ template <typename T, typename OBJECT>
+ CloneContext& Remove(const std::vector<T>& vector, OBJECT* object) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object);
+ if (std::find(vector.begin(), vector.end(), object) == vector.end()) {
+ TINT_ICE(Clone, Diagnostics())
+ << "CloneContext::Remove() vector does not contain object";
+ return *this;
+ }
+
+ list_transforms_[&vector].remove_.emplace(object);
+ return *this;
}
- list_transforms_[&vector].remove_.emplace(object);
- return *this;
- }
-
- /// Inserts `object` before any other objects of `vector`, when it is cloned.
- /// @param vector the vector in #src
- /// @param object a pointer to the object in #dst that will be inserted at the
- /// front of the vector
- /// @returns this CloneContext so calls can be chained
- template <typename T, typename OBJECT>
- CloneContext& InsertFront(const std::vector<T>& vector, OBJECT* object) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
- auto& transforms = list_transforms_[&vector];
- auto& list = transforms.insert_front_;
- list.emplace_back(object);
- return *this;
- }
-
- /// Inserts `object` after any other objects of `vector`, when it is cloned.
- /// @param vector the vector in #src
- /// @param object a pointer to the object in #dst that will be inserted at the
- /// end of the vector
- /// @returns this CloneContext so calls can be chained
- template <typename T, typename OBJECT>
- CloneContext& InsertBack(const std::vector<T>& vector, OBJECT* object) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
- auto& transforms = list_transforms_[&vector];
- auto& list = transforms.insert_back_;
- list.emplace_back(object);
- return *this;
- }
-
- /// Inserts `object` before `before` whenever `vector` is cloned.
- /// @param vector the vector in #src
- /// @param before a pointer to the object in #src
- /// @param object a pointer to the object in #dst that will be inserted before
- /// any occurrence of the clone of `before`
- /// @returns this CloneContext so calls can be chained
- template <typename T, typename BEFORE, typename OBJECT>
- CloneContext& InsertBefore(const std::vector<T>& vector,
- const BEFORE* before,
- const OBJECT* object) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, before);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
- if (std::find(vector.begin(), vector.end(), before) == vector.end()) {
- TINT_ICE(Clone, Diagnostics())
- << "CloneContext::InsertBefore() vector does not contain before";
- return *this;
+ /// Inserts `object` before any other objects of `vector`, when it is cloned.
+ /// @param vector the vector in #src
+ /// @param object a pointer to the object in #dst that will be inserted at the
+ /// front of the vector
+ /// @returns this CloneContext so calls can be chained
+ template <typename T, typename OBJECT>
+ CloneContext& InsertFront(const std::vector<T>& vector, OBJECT* object) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
+ auto& transforms = list_transforms_[&vector];
+ auto& list = transforms.insert_front_;
+ list.emplace_back(object);
+ return *this;
}
- auto& transforms = list_transforms_[&vector];
- auto& list = transforms.insert_before_[before];
- list.emplace_back(object);
- return *this;
- }
-
- /// Inserts `object` after `after` whenever `vector` is cloned.
- /// @param vector the vector in #src
- /// @param after a pointer to the object in #src
- /// @param object a pointer to the object in #dst that will be inserted after
- /// any occurrence of the clone of `after`
- /// @returns this CloneContext so calls can be chained
- template <typename T, typename AFTER, typename OBJECT>
- CloneContext& InsertAfter(const std::vector<T>& vector,
- const AFTER* after,
- const OBJECT* object) {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, after);
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
- if (std::find(vector.begin(), vector.end(), after) == vector.end()) {
- TINT_ICE(Clone, Diagnostics())
- << "CloneContext::InsertAfter() vector does not contain after";
- return *this;
+ /// Inserts `object` after any other objects of `vector`, when it is cloned.
+ /// @param vector the vector in #src
+ /// @param object a pointer to the object in #dst that will be inserted at the
+ /// end of the vector
+ /// @returns this CloneContext so calls can be chained
+ template <typename T, typename OBJECT>
+ CloneContext& InsertBack(const std::vector<T>& vector, OBJECT* object) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
+ auto& transforms = list_transforms_[&vector];
+ auto& list = transforms.insert_back_;
+ list.emplace_back(object);
+ return *this;
}
- auto& transforms = list_transforms_[&vector];
- auto& list = transforms.insert_after_[after];
- list.emplace_back(object);
- return *this;
- }
-
- /// Clone performs the clone of the Program's AST nodes, types and symbols
- /// from #src to #dst. Semantic nodes are not cloned, as these will be rebuilt
- /// when the ProgramBuilder #dst builds its Program.
- void Clone();
-
- /// The target ProgramBuilder to clone into.
- ProgramBuilder* const dst;
-
- /// The source Program to clone from.
- Program const* const src;
-
- private:
- struct CloneableTransform {
- /// Constructor
- CloneableTransform();
- /// Copy constructor
- /// @param other the CloneableTransform to copy
- CloneableTransform(const CloneableTransform& other);
- /// Destructor
- ~CloneableTransform();
-
- // TypeInfo of the Cloneable that the transform operates on
- const TypeInfo* typeinfo;
- std::function<const Cloneable*(const Cloneable*)> function;
- };
-
- CloneContext(const CloneContext&) = delete;
- CloneContext& operator=(const CloneContext&) = delete;
-
- /// Cast `obj` from type `FROM` to type `TO`, returning the cast object.
- /// Reports an internal compiler error if the cast failed.
- template <typename TO, typename FROM>
- const TO* CheckedCast(const FROM* obj) {
- if (obj == nullptr) {
- return nullptr;
+ /// Inserts `object` before `before` whenever `vector` is cloned.
+ /// @param vector the vector in #src
+ /// @param before a pointer to the object in #src
+ /// @param object a pointer to the object in #dst that will be inserted before
+ /// any occurrence of the clone of `before`
+ /// @returns this CloneContext so calls can be chained
+ template <typename T, typename BEFORE, typename OBJECT>
+ CloneContext& InsertBefore(const std::vector<T>& vector,
+ const BEFORE* before,
+ const OBJECT* object) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, before);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
+ if (std::find(vector.begin(), vector.end(), before) == vector.end()) {
+ TINT_ICE(Clone, Diagnostics())
+ << "CloneContext::InsertBefore() vector does not contain before";
+ return *this;
+ }
+
+ auto& transforms = list_transforms_[&vector];
+ auto& list = transforms.insert_before_[before];
+ list.emplace_back(object);
+ return *this;
}
- if (const TO* cast = obj->template As<TO>()) {
- return cast;
+
+ /// Inserts `object` after `after` whenever `vector` is cloned.
+ /// @param vector the vector in #src
+ /// @param after a pointer to the object in #src
+ /// @param object a pointer to the object in #dst that will be inserted after
+ /// any occurrence of the clone of `after`
+ /// @returns this CloneContext so calls can be chained
+ template <typename T, typename AFTER, typename OBJECT>
+ CloneContext& InsertAfter(const std::vector<T>& vector,
+ const AFTER* after,
+ const OBJECT* object) {
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, after);
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, object);
+ if (std::find(vector.begin(), vector.end(), after) == vector.end()) {
+ TINT_ICE(Clone, Diagnostics())
+ << "CloneContext::InsertAfter() vector does not contain after";
+ return *this;
+ }
+
+ auto& transforms = list_transforms_[&vector];
+ auto& list = transforms.insert_after_[after];
+ list.emplace_back(object);
+ return *this;
}
- CheckedCastFailure(obj, TypeInfo::Of<TO>());
- return nullptr;
- }
- /// Clones a Cloneable object, using any replacements or transforms that have
- /// been configured.
- const Cloneable* CloneCloneable(const Cloneable* object);
+ /// Clone performs the clone of the Program's AST nodes, types and symbols
+ /// from #src to #dst. Semantic nodes are not cloned, as these will be rebuilt
+ /// when the ProgramBuilder #dst builds its Program.
+ void Clone();
+
+ /// The target ProgramBuilder to clone into.
+ ProgramBuilder* const dst;
+
+ /// The source Program to clone from.
+ Program const* const src;
+
+ private:
+ struct CloneableTransform {
+ /// Constructor
+ CloneableTransform();
+ /// Copy constructor
+ /// @param other the CloneableTransform to copy
+ CloneableTransform(const CloneableTransform& other);
+ /// Destructor
+ ~CloneableTransform();
+
+ // TypeInfo of the Cloneable that the transform operates on
+ const TypeInfo* typeinfo;
+ std::function<const Cloneable*(const Cloneable*)> function;
+ };
- /// Adds an error diagnostic to Diagnostics() that the cloned object was not
- /// of the expected type.
- void CheckedCastFailure(const Cloneable* got, const TypeInfo& expected);
+ CloneContext(const CloneContext&) = delete;
+ CloneContext& operator=(const CloneContext&) = delete;
- /// @returns the diagnostic list of #dst
- diag::List& Diagnostics() const;
+ /// Cast `obj` from type `FROM` to type `TO`, returning the cast object.
+ /// Reports an internal compiler error if the cast failed.
+ template <typename TO, typename FROM>
+ const TO* CheckedCast(const FROM* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ if (const TO* cast = obj->template As<TO>()) {
+ return cast;
+ }
+ CheckedCastFailure(obj, TypeInfo::Of<TO>());
+ return nullptr;
+ }
- /// A vector of const Cloneable*
- using CloneableList = std::vector<const Cloneable*>;
+ /// Clones a Cloneable object, using any replacements or transforms that have
+ /// been configured.
+ const Cloneable* CloneCloneable(const Cloneable* object);
- /// Transformations to be applied to a list (vector)
- struct ListTransforms {
- /// Constructor
- ListTransforms();
- /// Destructor
- ~ListTransforms();
+ /// Adds an error diagnostic to Diagnostics() that the cloned object was not
+ /// of the expected type.
+ void CheckedCastFailure(const Cloneable* got, const TypeInfo& expected);
+
+ /// @returns the diagnostic list of #dst
+ diag::List& Diagnostics() const;
- /// A map of object in #src to omit when cloned into #dst.
- std::unordered_set<const Cloneable*> remove_;
+ /// A vector of const Cloneable*
+ using CloneableList = std::vector<const Cloneable*>;
- /// A list of objects in #dst to insert before any others when the vector is
- /// cloned.
- CloneableList insert_front_;
+ /// Transformations to be applied to a list (vector)
+ struct ListTransforms {
+ /// Constructor
+ ListTransforms();
+ /// Destructor
+ ~ListTransforms();
- /// A list of objects in #dst to insert befor after any others when the
- /// vector is cloned.
- CloneableList insert_back_;
+ /// A map of object in #src to omit when cloned into #dst.
+ std::unordered_set<const Cloneable*> remove_;
- /// A map of object in #src to the list of cloned objects in #dst.
- /// Clone(const std::vector<T*>& v) will use this to insert the map-value
- /// list into the target vector before cloning and inserting the map-key.
- std::unordered_map<const Cloneable*, CloneableList> insert_before_;
+ /// A list of objects in #dst to insert before any others when the vector is
+ /// cloned.
+ CloneableList insert_front_;
- /// A map of object in #src to the list of cloned objects in #dst.
- /// Clone(const std::vector<T*>& v) will use this to insert the map-value
- /// list into the target vector after cloning and inserting the map-key.
- std::unordered_map<const Cloneable*, CloneableList> insert_after_;
- };
+ /// A list of objects in #dst to insert befor after any others when the
+ /// vector is cloned.
+ CloneableList insert_back_;
+
+ /// A map of object in #src to the list of cloned objects in #dst.
+ /// Clone(const std::vector<T*>& v) will use this to insert the map-value
+ /// list into the target vector before cloning and inserting the map-key.
+ std::unordered_map<const Cloneable*, CloneableList> insert_before_;
+
+ /// A map of object in #src to the list of cloned objects in #dst.
+ /// Clone(const std::vector<T*>& v) will use this to insert the map-value
+ /// list into the target vector after cloning and inserting the map-key.
+ std::unordered_map<const Cloneable*, CloneableList> insert_after_;
+ };
- /// A map of object in #src to functions that create their replacement in
- /// #dst
- std::unordered_map<const Cloneable*, std::function<const Cloneable*()>>
- replacements_;
+ /// A map of object in #src to functions that create their replacement in
+ /// #dst
+ std::unordered_map<const Cloneable*, std::function<const Cloneable*()>> replacements_;
- /// A map of symbol in #src to their cloned equivalent in #dst
- std::unordered_map<Symbol, Symbol> cloned_symbols_;
+ /// A map of symbol in #src to their cloned equivalent in #dst
+ std::unordered_map<Symbol, Symbol> cloned_symbols_;
- /// Cloneable transform functions registered with ReplaceAll()
- std::vector<CloneableTransform> transforms_;
+ /// Cloneable transform functions registered with ReplaceAll()
+ std::vector<CloneableTransform> transforms_;
- /// Map of std::vector pointer to transforms for that list
- std::unordered_map<const void*, ListTransforms> list_transforms_;
+ /// Map of std::vector pointer to transforms for that list
+ std::unordered_map<const void*, ListTransforms> list_transforms_;
- /// Symbol transform registered with ReplaceAll()
- SymbolTransform symbol_transform_;
+ /// Symbol transform registered with ReplaceAll()
+ SymbolTransform symbol_transform_;
};
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/clone_context_test.cc b/chromium/third_party/dawn/src/tint/clone_context_test.cc
index 3a5a8c90968..46cc7208652 100644
--- a/chromium/third_party/dawn/src/tint/clone_context_test.cc
+++ b/chromium/third_party/dawn/src/tint/clone_context_test.cc
@@ -21,922 +21,904 @@ namespace tint {
namespace {
struct Allocator {
- template <typename T, typename... ARGS>
- T* Create(ARGS&&... args) {
- return alloc.Create<T>(this, std::forward<ARGS>(args)...);
- }
+ template <typename T, typename... ARGS>
+ T* Create(ARGS&&... args) {
+ return alloc.Create<T>(this, std::forward<ARGS>(args)...);
+ }
- private:
- utils::BlockAllocator<Cloneable> alloc;
+ private:
+ utils::BlockAllocator<Cloneable> alloc;
};
struct Node : public Castable<Node, Cloneable> {
- Node(Allocator* alloc,
- Symbol n,
- const Node* node_a = nullptr,
- const Node* node_b = nullptr,
- const Node* node_c = nullptr)
- : allocator(alloc), name(n), a(node_a), b(node_b), c(node_c) {}
- Allocator* const allocator;
- Symbol name;
- const Node* a = nullptr;
- const Node* b = nullptr;
- const Node* c = nullptr;
- std::vector<const Node*> vec;
-
- Node* Clone(CloneContext* ctx) const override {
- auto* out = allocator->Create<Node>(ctx->Clone(name));
- out->a = ctx->Clone(a);
- out->b = ctx->Clone(b);
- out->c = ctx->Clone(c);
- out->vec = ctx->Clone(vec);
- return out;
- }
+ Node(Allocator* alloc,
+ Symbol n,
+ const Node* node_a = nullptr,
+ const Node* node_b = nullptr,
+ const Node* node_c = nullptr)
+ : allocator(alloc), name(n), a(node_a), b(node_b), c(node_c) {}
+ Allocator* const allocator;
+ Symbol name;
+ const Node* a = nullptr;
+ const Node* b = nullptr;
+ const Node* c = nullptr;
+ std::vector<const Node*> vec;
+
+ Node* Clone(CloneContext* ctx) const override {
+ auto* out = allocator->Create<Node>(ctx->Clone(name));
+ out->a = ctx->Clone(a);
+ out->b = ctx->Clone(b);
+ out->c = ctx->Clone(c);
+ out->vec = ctx->Clone(vec);
+ return out;
+ }
};
struct Replaceable : public Castable<Replaceable, Node> {
- Replaceable(Allocator* alloc,
- Symbol n,
- const Node* node_a = nullptr,
- const Node* node_b = nullptr,
- const Node* node_c = nullptr)
- : Base(alloc, n, node_a, node_b, node_c) {}
+ Replaceable(Allocator* alloc,
+ Symbol n,
+ const Node* node_a = nullptr,
+ const Node* node_b = nullptr,
+ const Node* node_c = nullptr)
+ : Base(alloc, n, node_a, node_b, node_c) {}
};
struct Replacement : public Castable<Replacement, Replaceable> {
- Replacement(Allocator* alloc, Symbol n) : Base(alloc, n) {}
+ Replacement(Allocator* alloc, Symbol n) : Base(alloc, n) {}
};
struct NotANode : public Castable<NotANode, Cloneable> {
- explicit NotANode(Allocator* alloc) : allocator(alloc) {}
+ explicit NotANode(Allocator* alloc) : allocator(alloc) {}
- Allocator* const allocator;
- NotANode* Clone(CloneContext*) const override {
- return allocator->Create<NotANode>();
- }
+ Allocator* const allocator;
+ NotANode* Clone(CloneContext*) const override { return allocator->Create<NotANode>(); }
};
struct ProgramNode : public Castable<ProgramNode, Cloneable> {
- ProgramNode(Allocator* alloc, ProgramID id, ProgramID cloned_id)
- : allocator(alloc), program_id(id), cloned_program_id(cloned_id) {}
+ ProgramNode(Allocator* alloc, ProgramID id, ProgramID cloned_id)
+ : allocator(alloc), program_id(id), cloned_program_id(cloned_id) {}
- Allocator* const allocator;
- const ProgramID program_id;
- const ProgramID cloned_program_id;
+ Allocator* const allocator;
+ const ProgramID program_id;
+ const ProgramID cloned_program_id;
- ProgramNode* Clone(CloneContext*) const override {
- return allocator->Create<ProgramNode>(cloned_program_id, cloned_program_id);
- }
+ ProgramNode* Clone(CloneContext*) const override {
+ return allocator->Create<ProgramNode>(cloned_program_id, cloned_program_id);
+ }
};
ProgramID ProgramIDOf(const ProgramNode* node) {
- return node->program_id;
+ return node->program_id;
}
using CloneContextNodeTest = ::testing::Test;
TEST_F(CloneContextNodeTest, Clone) {
- Allocator alloc;
-
- ProgramBuilder builder;
- Node* original_root;
- {
- auto* a_b = alloc.Create<Node>(builder.Symbols().New("a->b"));
- auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
- auto* b_a = a; // Aliased
- auto* b_b = alloc.Create<Node>(builder.Symbols().New("b->b"));
- auto* b = alloc.Create<Node>(builder.Symbols().New("b"), b_a, b_b);
- auto* c = b; // Aliased
- original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
- }
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // N <──────┐ N <───────────────┘
- // ╭────┼────╮ │ ╭────┼────╮
- // (a) (b) (c) │ (a) (b) (c)
- // N └───┘ N
- //
- // N: Node
-
- ProgramBuilder cloned;
- auto* cloned_root = CloneContext(&cloned, &original).Clone(original_root);
-
- EXPECT_NE(cloned_root->a, nullptr);
- EXPECT_EQ(cloned_root->a->a, nullptr);
- EXPECT_NE(cloned_root->a->b, nullptr);
- EXPECT_EQ(cloned_root->a->c, nullptr);
- EXPECT_NE(cloned_root->b, nullptr);
- EXPECT_NE(cloned_root->b->a, nullptr);
- EXPECT_NE(cloned_root->b->b, nullptr);
- EXPECT_EQ(cloned_root->b->c, nullptr);
- EXPECT_NE(cloned_root->c, nullptr);
-
- EXPECT_NE(cloned_root->a, original_root->a);
- EXPECT_NE(cloned_root->a->b, original_root->a->b);
- EXPECT_NE(cloned_root->b, original_root->b);
- EXPECT_NE(cloned_root->b->a, original_root->b->a);
- EXPECT_NE(cloned_root->b->b, original_root->b->b);
- EXPECT_NE(cloned_root->c, original_root->c);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("a->b"));
- EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->b->b->name, cloned.Symbols().Get("b->b"));
-
- EXPECT_NE(cloned_root->b->a, cloned_root->a); // De-aliased
- EXPECT_NE(cloned_root->c, cloned_root->b); // De-aliased
-
- EXPECT_EQ(cloned_root->b->a->name, cloned_root->a->name);
- EXPECT_EQ(cloned_root->c->name, cloned_root->b->name);
+ Allocator alloc;
+
+ ProgramBuilder builder;
+ Node* original_root;
+ {
+ auto* a_b = alloc.Create<Node>(builder.Symbols().New("a->b"));
+ auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
+ auto* b_a = a; // Aliased
+ auto* b_b = alloc.Create<Node>(builder.Symbols().New("b->b"));
+ auto* b = alloc.Create<Node>(builder.Symbols().New("b"), b_a, b_b);
+ auto* c = b; // Aliased
+ original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
+ }
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // N <──────┐ N <───────────────┘
+ // ╭────┼────╮ │ ╭────┼────╮
+ // (a) (b) (c) │ (a) (b) (c)
+ // N └───┘ N
+ //
+ // N: Node
+
+ ProgramBuilder cloned;
+ auto* cloned_root = CloneContext(&cloned, &original).Clone(original_root);
+
+ EXPECT_NE(cloned_root->a, nullptr);
+ EXPECT_EQ(cloned_root->a->a, nullptr);
+ EXPECT_NE(cloned_root->a->b, nullptr);
+ EXPECT_EQ(cloned_root->a->c, nullptr);
+ EXPECT_NE(cloned_root->b, nullptr);
+ EXPECT_NE(cloned_root->b->a, nullptr);
+ EXPECT_NE(cloned_root->b->b, nullptr);
+ EXPECT_EQ(cloned_root->b->c, nullptr);
+ EXPECT_NE(cloned_root->c, nullptr);
+
+ EXPECT_NE(cloned_root->a, original_root->a);
+ EXPECT_NE(cloned_root->a->b, original_root->a->b);
+ EXPECT_NE(cloned_root->b, original_root->b);
+ EXPECT_NE(cloned_root->b->a, original_root->b->a);
+ EXPECT_NE(cloned_root->b->b, original_root->b->b);
+ EXPECT_NE(cloned_root->c, original_root->c);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("a->b"));
+ EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->b->b->name, cloned.Symbols().Get("b->b"));
+
+ EXPECT_NE(cloned_root->b->a, cloned_root->a); // De-aliased
+ EXPECT_NE(cloned_root->c, cloned_root->b); // De-aliased
+
+ EXPECT_EQ(cloned_root->b->a->name, cloned_root->a->name);
+ EXPECT_EQ(cloned_root->c->name, cloned_root->b->name);
}
TEST_F(CloneContextNodeTest, CloneWithReplaceAll_Cloneable) {
- Allocator alloc;
-
- ProgramBuilder builder;
- Node* original_root;
- {
- auto* a_b = alloc.Create<Replaceable>(builder.Symbols().New("a->b"));
- auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
- auto* b_a = a; // Aliased
- auto* b =
- alloc.Create<Replaceable>(builder.Symbols().New("b"), b_a, nullptr);
- auto* c = b; // Aliased
- original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
- }
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // N <──────┐ R <───────────────┘
- // ╭────┼────╮ │ ╭────┼────╮
- // (a) (b) (c) │ (a) (b) (c)
- // R └───┘
- //
- // N: Node
- // R: Replaceable
-
- ProgramBuilder cloned;
-
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([&](const Replaceable* in) {
- auto out_name = cloned.Symbols().Register(
- "replacement:" + original.Symbols().NameFor(in->name));
- auto b_name = cloned.Symbols().Register(
- "replacement-child:" + original.Symbols().NameFor(in->name));
- auto* out = alloc.Create<Replacement>(out_name);
- out->b = alloc.Create<Node>(b_name);
- out->c = ctx.Clone(in->a);
- return out;
- });
- auto* cloned_root = ctx.Clone(original_root);
-
- // root
- // ╭─────────────────┼──────────────────╮
- // (a) (b) (c)
- // N <──────┐ R <───────────────┘
- // ╭────┼────╮ │ ╭────┼────╮
- // (a) (b) (c) │ (a) (b) (c)
- // R │ N |
- // ╭────┼────╮ └────────────┘
- // (a) (b) (c)
- // N
- //
- // N: Node
- // R: Replacement
-
- EXPECT_NE(cloned_root->a, nullptr);
- EXPECT_EQ(cloned_root->a->a, nullptr);
- EXPECT_NE(cloned_root->a->b, nullptr); // Replaced
- EXPECT_EQ(cloned_root->a->b->a, nullptr); // From replacement
- EXPECT_NE(cloned_root->a->b->b, nullptr); // From replacement
- EXPECT_EQ(cloned_root->a->b->c, nullptr); // From replacement
- EXPECT_EQ(cloned_root->a->c, nullptr);
- EXPECT_NE(cloned_root->b, nullptr);
- EXPECT_EQ(cloned_root->b->a, nullptr); // From replacement
- EXPECT_NE(cloned_root->b->b, nullptr); // From replacement
- EXPECT_NE(cloned_root->b->c, nullptr); // From replacement
- EXPECT_NE(cloned_root->c, nullptr);
-
- EXPECT_NE(cloned_root->a, original_root->a);
- EXPECT_NE(cloned_root->a->b, original_root->a->b);
- EXPECT_NE(cloned_root->b, original_root->b);
- EXPECT_NE(cloned_root->b->a, original_root->b->a);
- EXPECT_NE(cloned_root->c, original_root->c);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("replacement:a->b"));
- EXPECT_EQ(cloned_root->a->b->b->name,
- cloned.Symbols().Get("replacement-child:a->b"));
- EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement:b"));
- EXPECT_EQ(cloned_root->b->b->name,
- cloned.Symbols().Get("replacement-child:b"));
-
- EXPECT_NE(cloned_root->b->c, cloned_root->a); // De-aliased
- EXPECT_NE(cloned_root->c, cloned_root->b); // De-aliased
-
- EXPECT_EQ(cloned_root->b->c->name, cloned_root->a->name);
- EXPECT_EQ(cloned_root->c->name, cloned_root->b->name);
-
- EXPECT_FALSE(Is<Replacement>(cloned_root->a));
- EXPECT_TRUE(Is<Replacement>(cloned_root->a->b));
- EXPECT_FALSE(Is<Replacement>(cloned_root->a->b->b));
- EXPECT_TRUE(Is<Replacement>(cloned_root->b));
- EXPECT_FALSE(Is<Replacement>(cloned_root->b->b));
+ Allocator alloc;
+
+ ProgramBuilder builder;
+ Node* original_root;
+ {
+ auto* a_b = alloc.Create<Replaceable>(builder.Symbols().New("a->b"));
+ auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
+ auto* b_a = a; // Aliased
+ auto* b = alloc.Create<Replaceable>(builder.Symbols().New("b"), b_a, nullptr);
+ auto* c = b; // Aliased
+ original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
+ }
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // N <──────┐ R <───────────────┘
+ // ╭────┼────╮ │ ╭────┼────╮
+ // (a) (b) (c) │ (a) (b) (c)
+ // R └───┘
+ //
+ // N: Node
+ // R: Replaceable
+
+ ProgramBuilder cloned;
+
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([&](const Replaceable* in) {
+ auto out_name =
+ cloned.Symbols().Register("replacement:" + original.Symbols().NameFor(in->name));
+ auto b_name =
+ cloned.Symbols().Register("replacement-child:" + original.Symbols().NameFor(in->name));
+ auto* out = alloc.Create<Replacement>(out_name);
+ out->b = alloc.Create<Node>(b_name);
+ out->c = ctx.Clone(in->a);
+ return out;
+ });
+ auto* cloned_root = ctx.Clone(original_root);
+
+ // root
+ // ╭─────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // N <──────┐ R <───────────────┘
+ // ╭────┼────╮ │ ╭────┼────╮
+ // (a) (b) (c) │ (a) (b) (c)
+ // R │ N |
+ // ╭────┼────╮ └────────────┘
+ // (a) (b) (c)
+ // N
+ //
+ // N: Node
+ // R: Replacement
+
+ EXPECT_NE(cloned_root->a, nullptr);
+ EXPECT_EQ(cloned_root->a->a, nullptr);
+ EXPECT_NE(cloned_root->a->b, nullptr); // Replaced
+ EXPECT_EQ(cloned_root->a->b->a, nullptr); // From replacement
+ EXPECT_NE(cloned_root->a->b->b, nullptr); // From replacement
+ EXPECT_EQ(cloned_root->a->b->c, nullptr); // From replacement
+ EXPECT_EQ(cloned_root->a->c, nullptr);
+ EXPECT_NE(cloned_root->b, nullptr);
+ EXPECT_EQ(cloned_root->b->a, nullptr); // From replacement
+ EXPECT_NE(cloned_root->b->b, nullptr); // From replacement
+ EXPECT_NE(cloned_root->b->c, nullptr); // From replacement
+ EXPECT_NE(cloned_root->c, nullptr);
+
+ EXPECT_NE(cloned_root->a, original_root->a);
+ EXPECT_NE(cloned_root->a->b, original_root->a->b);
+ EXPECT_NE(cloned_root->b, original_root->b);
+ EXPECT_NE(cloned_root->b->a, original_root->b->a);
+ EXPECT_NE(cloned_root->c, original_root->c);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("replacement:a->b"));
+ EXPECT_EQ(cloned_root->a->b->b->name, cloned.Symbols().Get("replacement-child:a->b"));
+ EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement:b"));
+ EXPECT_EQ(cloned_root->b->b->name, cloned.Symbols().Get("replacement-child:b"));
+
+ EXPECT_NE(cloned_root->b->c, cloned_root->a); // De-aliased
+ EXPECT_NE(cloned_root->c, cloned_root->b); // De-aliased
+
+ EXPECT_EQ(cloned_root->b->c->name, cloned_root->a->name);
+ EXPECT_EQ(cloned_root->c->name, cloned_root->b->name);
+
+ EXPECT_FALSE(Is<Replacement>(cloned_root->a));
+ EXPECT_TRUE(Is<Replacement>(cloned_root->a->b));
+ EXPECT_FALSE(Is<Replacement>(cloned_root->a->b->b));
+ EXPECT_TRUE(Is<Replacement>(cloned_root->b));
+ EXPECT_FALSE(Is<Replacement>(cloned_root->b->b));
}
TEST_F(CloneContextNodeTest, CloneWithReplaceAll_Symbols) {
- Allocator alloc;
-
- ProgramBuilder builder;
- Node* original_root;
- {
- auto* a_b = alloc.Create<Node>(builder.Symbols().New("a->b"));
- auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
- auto* b_a = a; // Aliased
- auto* b_b = alloc.Create<Node>(builder.Symbols().New("b->b"));
- auto* b = alloc.Create<Node>(builder.Symbols().New("b"), b_a, b_b);
- auto* c = b; // Aliased
- original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
- }
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // N <──────┐ N <───────────────┘
- // ╭────┼────╮ │ ╭────┼────╮
- // (a) (b) (c) │ (a) (b) (c)
- // N └───┘ N
- //
- // N: Node
-
- ProgramBuilder cloned;
- auto* cloned_root = CloneContext(&cloned, &original, false)
- .ReplaceAll([&](Symbol sym) {
- auto in = original.Symbols().NameFor(sym);
- auto out = "transformed<" + in + ">";
- return cloned.Symbols().New(out);
- })
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("transformed<root>"));
- EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("transformed<a>"));
- EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("transformed<a->b>"));
- EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("transformed<b>"));
- EXPECT_EQ(cloned_root->b->b->name, cloned.Symbols().Get("transformed<b->b>"));
+ Allocator alloc;
+
+ ProgramBuilder builder;
+ Node* original_root;
+ {
+ auto* a_b = alloc.Create<Node>(builder.Symbols().New("a->b"));
+ auto* a = alloc.Create<Node>(builder.Symbols().New("a"), nullptr, a_b);
+ auto* b_a = a; // Aliased
+ auto* b_b = alloc.Create<Node>(builder.Symbols().New("b->b"));
+ auto* b = alloc.Create<Node>(builder.Symbols().New("b"), b_a, b_b);
+ auto* c = b; // Aliased
+ original_root = alloc.Create<Node>(builder.Symbols().New("root"), a, b, c);
+ }
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // N <──────┐ N <───────────────┘
+ // ╭────┼────╮ │ ╭────┼────╮
+ // (a) (b) (c) │ (a) (b) (c)
+ // N └───┘ N
+ //
+ // N: Node
+
+ ProgramBuilder cloned;
+ auto* cloned_root = CloneContext(&cloned, &original, false)
+ .ReplaceAll([&](Symbol sym) {
+ auto in = original.Symbols().NameFor(sym);
+ auto out = "transformed<" + in + ">";
+ return cloned.Symbols().New(out);
+ })
+ .Clone(original_root);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("transformed<root>"));
+ EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("transformed<a>"));
+ EXPECT_EQ(cloned_root->a->b->name, cloned.Symbols().Get("transformed<a->b>"));
+ EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("transformed<b>"));
+ EXPECT_EQ(cloned_root->b->b->name, cloned.Symbols().Get("transformed<b->b>"));
}
TEST_F(CloneContextNodeTest, CloneWithoutTransform) {
- Allocator a;
+ Allocator a;
- ProgramBuilder builder;
- auto* original_node = a.Create<Node>(builder.Symbols().New("root"));
- Program original(std::move(builder));
+ ProgramBuilder builder;
+ auto* original_node = a.Create<Node>(builder.Symbols().New("root"));
+ Program original(std::move(builder));
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([&](const Node*) {
- return a.Create<Replacement>(builder.Symbols().New("<unexpected-node>"));
- });
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([&](const Node*) {
+ return a.Create<Replacement>(builder.Symbols().New("<unexpected-node>"));
+ });
- auto* cloned_node = ctx.CloneWithoutTransform(original_node);
- EXPECT_NE(cloned_node, original_node);
- EXPECT_EQ(cloned_node->name, cloned.Symbols().Get("root"));
+ auto* cloned_node = ctx.CloneWithoutTransform(original_node);
+ EXPECT_NE(cloned_node, original_node);
+ EXPECT_EQ(cloned_node->name, cloned.Symbols().Get("root"));
}
TEST_F(CloneContextNodeTest, CloneWithReplacePointer) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().New("root"));
- original_root->a = a.Create<Node>(builder.Symbols().New("a"));
- original_root->b = a.Create<Node>(builder.Symbols().New("b"));
- original_root->c = a.Create<Node>(builder.Symbols().New("c"));
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // Replaced
-
- ProgramBuilder cloned;
- auto* replacement = a.Create<Node>(cloned.Symbols().New("replacement"));
-
- auto* cloned_root = CloneContext(&cloned, &original)
- .Replace(original_root->b, replacement)
- .Clone(original_root);
-
- EXPECT_NE(cloned_root->a, replacement);
- EXPECT_EQ(cloned_root->b, replacement);
- EXPECT_NE(cloned_root->c, replacement);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement"));
- EXPECT_EQ(cloned_root->c->name, cloned.Symbols().Get("c"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().New("root"));
+ original_root->a = a.Create<Node>(builder.Symbols().New("a"));
+ original_root->b = a.Create<Node>(builder.Symbols().New("b"));
+ original_root->c = a.Create<Node>(builder.Symbols().New("c"));
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // Replaced
+
+ ProgramBuilder cloned;
+ auto* replacement = a.Create<Node>(cloned.Symbols().New("replacement"));
+
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .Replace(original_root->b, replacement)
+ .Clone(original_root);
+
+ EXPECT_NE(cloned_root->a, replacement);
+ EXPECT_EQ(cloned_root->b, replacement);
+ EXPECT_NE(cloned_root->c, replacement);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement"));
+ EXPECT_EQ(cloned_root->c->name, cloned.Symbols().Get("c"));
}
TEST_F(CloneContextNodeTest, CloneWithReplaceFunction) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().New("root"));
- original_root->a = a.Create<Node>(builder.Symbols().New("a"));
- original_root->b = a.Create<Node>(builder.Symbols().New("b"));
- original_root->c = a.Create<Node>(builder.Symbols().New("c"));
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // Replaced
-
- ProgramBuilder cloned;
- auto* replacement = a.Create<Node>(cloned.Symbols().New("replacement"));
-
- auto* cloned_root =
- CloneContext(&cloned, &original)
- .Replace(original_root->b, [=] { return replacement; })
- .Clone(original_root);
-
- EXPECT_NE(cloned_root->a, replacement);
- EXPECT_EQ(cloned_root->b, replacement);
- EXPECT_NE(cloned_root->c, replacement);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement"));
- EXPECT_EQ(cloned_root->c->name, cloned.Symbols().Get("c"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().New("root"));
+ original_root->a = a.Create<Node>(builder.Symbols().New("a"));
+ original_root->b = a.Create<Node>(builder.Symbols().New("b"));
+ original_root->c = a.Create<Node>(builder.Symbols().New("c"));
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // Replaced
+
+ ProgramBuilder cloned;
+ auto* replacement = a.Create<Node>(cloned.Symbols().New("replacement"));
+
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .Replace(original_root->b, [=] { return replacement; })
+ .Clone(original_root);
+
+ EXPECT_NE(cloned_root->a, replacement);
+ EXPECT_EQ(cloned_root->b, replacement);
+ EXPECT_NE(cloned_root->c, replacement);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->a->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->b->name, cloned.Symbols().Get("replacement"));
+ EXPECT_EQ(cloned_root->c->name, cloned.Symbols().Get("c"));
}
TEST_F(CloneContextNodeTest, CloneWithRemove) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* cloned_root = CloneContext(&cloned, &original)
- .Remove(original_root->vec, original_root->vec[1])
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 2u);
-
- EXPECT_NE(cloned_root->vec[0], cloned_root->a);
- EXPECT_NE(cloned_root->vec[1], cloned_root->c);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("c"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .Remove(original_root->vec, original_root->vec[1])
+ .Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 2u);
+
+ EXPECT_NE(cloned_root->vec[0], cloned_root->a);
+ EXPECT_NE(cloned_root->vec[1], cloned_root->c);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("c"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertFront) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
-
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertFront(original_root->vec, insertion)
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 4u);
-
- EXPECT_NE(cloned_root->vec[0], cloned_root->a);
- EXPECT_NE(cloned_root->vec[1], cloned_root->b);
- EXPECT_NE(cloned_root->vec[2], cloned_root->c);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertFront(original_root->vec, insertion)
+ .Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
+
+ EXPECT_NE(cloned_root->vec[0], cloned_root->a);
+ EXPECT_NE(cloned_root->vec[1], cloned_root->b);
+ EXPECT_NE(cloned_root->vec[2], cloned_root->c);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertFront_Empty) {
- Allocator a;
+ Allocator a;
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {};
- Program original(std::move(builder));
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {};
+ Program original(std::move(builder));
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+ ProgramBuilder cloned;
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertFront(original_root->vec, insertion)
- .Clone(original_root);
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertFront(original_root->vec, insertion)
+ .Clone(original_root);
- EXPECT_EQ(cloned_root->vec.size(), 1u);
+ EXPECT_EQ(cloned_root->vec.size(), 1u);
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertBack) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
-
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertBack(original_root->vec, insertion)
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 4u);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("c"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("insertion"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertBack(original_root->vec, insertion)
+ .Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("c"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("insertion"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertBack_Empty) {
- Allocator a;
+ Allocator a;
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {};
- Program original(std::move(builder));
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {};
+ Program original(std::move(builder));
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+ ProgramBuilder cloned;
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertBack(original_root->vec, insertion)
- .Clone(original_root);
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertBack(original_root->vec, insertion)
+ .Clone(original_root);
- EXPECT_EQ(cloned_root->vec.size(), 1u);
+ EXPECT_EQ(cloned_root->vec.size(), 1u);
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertFrontAndBack_Empty) {
- Allocator a;
+ Allocator a;
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {};
- Program original(std::move(builder));
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {};
+ Program original(std::move(builder));
- ProgramBuilder cloned;
- auto* insertion_front =
- a.Create<Node>(cloned.Symbols().New("insertion_front"));
- auto* insertion_back = a.Create<Node>(cloned.Symbols().New("insertion_back"));
+ ProgramBuilder cloned;
+ auto* insertion_front = a.Create<Node>(cloned.Symbols().New("insertion_front"));
+ auto* insertion_back = a.Create<Node>(cloned.Symbols().New("insertion_back"));
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertBack(original_root->vec, insertion_back)
- .InsertFront(original_root->vec, insertion_front)
- .Clone(original_root);
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertBack(original_root->vec, insertion_back)
+ .InsertFront(original_root->vec, insertion_front)
+ .Clone(original_root);
- EXPECT_EQ(cloned_root->vec.size(), 2u);
+ EXPECT_EQ(cloned_root->vec.size(), 2u);
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion_front"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("insertion_back"));
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("insertion_front"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("insertion_back"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertBefore) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
-
- auto* cloned_root =
- CloneContext(&cloned, &original)
- .InsertBefore(original_root->vec, original_root->vec[1], insertion)
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 4u);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("insertion"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
-}
-
-TEST_F(CloneContextNodeTest, CloneWithInsertAfter) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
-
- auto* cloned_root =
- CloneContext(&cloned, &original)
- .InsertAfter(original_root->vec, original_root->vec[1], insertion)
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 4u);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
-}
-
-TEST_F(CloneContextNodeTest, CloneWithInsertAfterInVectorNodeClone) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Replaceable>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
-
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([&](const Replaceable* r) {
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
- ctx.InsertAfter(original_root->vec, r, insertion);
- return nullptr;
- });
- auto* cloned_root = ctx.Clone(original_root);
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertBefore(original_root->vec, original_root->vec[1], insertion)
+ .Clone(original_root);
- EXPECT_EQ(cloned_root->vec.size(), 4u);
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
}
-TEST_F(CloneContextNodeTest, CloneWithInsertBackInVectorNodeClone) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Replaceable>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
+TEST_F(CloneContextNodeTest, CloneWithInsertAfter) {
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
- Program original(std::move(builder));
+ auto* cloned_root = CloneContext(&cloned, &original)
+ .InsertAfter(original_root->vec, original_root->vec[1], insertion)
+ .Clone(original_root);
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([&](const Replaceable* /*r*/) {
- auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
- ctx.InsertBack(original_root->vec, insertion);
- return nullptr;
- });
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
- auto* cloned_root = ctx.Clone(original_root);
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
+}
- EXPECT_EQ(cloned_root->vec.size(), 4u);
+TEST_F(CloneContextNodeTest, CloneWithInsertAfterInVectorNodeClone) {
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Replaceable>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([&](const Replaceable* r) {
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+ ctx.InsertAfter(original_root->vec, r, insertion);
+ return nullptr;
+ });
+
+ auto* cloned_root = ctx.Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
+}
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("c"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("insertion"));
+TEST_F(CloneContextNodeTest, CloneWithInsertBackInVectorNodeClone) {
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Replaceable>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([&](const Replaceable* /*r*/) {
+ auto* insertion = a.Create<Node>(cloned.Symbols().New("insertion"));
+ ctx.InsertBack(original_root->vec, insertion);
+ return nullptr;
+ });
+
+ auto* cloned_root = ctx.Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("b"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("c"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("insertion"));
}
TEST_F(CloneContextNodeTest, CloneWithInsertBeforeAndAfterRemoved) {
- Allocator a;
-
- ProgramBuilder builder;
- auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
- original_root->vec = {
- a.Create<Node>(builder.Symbols().Register("a")),
- a.Create<Node>(builder.Symbols().Register("b")),
- a.Create<Node>(builder.Symbols().Register("c")),
- };
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- auto* insertion_before =
- a.Create<Node>(cloned.Symbols().New("insertion_before"));
- auto* insertion_after =
- a.Create<Node>(cloned.Symbols().New("insertion_after"));
-
- auto* cloned_root = CloneContext(&cloned, &original)
- .InsertBefore(original_root->vec,
- original_root->vec[1], insertion_before)
- .InsertAfter(original_root->vec,
- original_root->vec[1], insertion_after)
- .Remove(original_root->vec, original_root->vec[1])
- .Clone(original_root);
-
- EXPECT_EQ(cloned_root->vec.size(), 4u);
-
- EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
- EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
- EXPECT_EQ(cloned_root->vec[1]->name,
- cloned.Symbols().Get("insertion_before"));
- EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion_after"));
- EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
+ Allocator a;
+
+ ProgramBuilder builder;
+ auto* original_root = a.Create<Node>(builder.Symbols().Register("root"));
+ original_root->vec = {
+ a.Create<Node>(builder.Symbols().Register("a")),
+ a.Create<Node>(builder.Symbols().Register("b")),
+ a.Create<Node>(builder.Symbols().Register("c")),
+ };
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ auto* insertion_before = a.Create<Node>(cloned.Symbols().New("insertion_before"));
+ auto* insertion_after = a.Create<Node>(cloned.Symbols().New("insertion_after"));
+
+ auto* cloned_root =
+ CloneContext(&cloned, &original)
+ .InsertBefore(original_root->vec, original_root->vec[1], insertion_before)
+ .InsertAfter(original_root->vec, original_root->vec[1], insertion_after)
+ .Remove(original_root->vec, original_root->vec[1])
+ .Clone(original_root);
+
+ EXPECT_EQ(cloned_root->vec.size(), 4u);
+
+ EXPECT_EQ(cloned_root->name, cloned.Symbols().Get("root"));
+ EXPECT_EQ(cloned_root->vec[0]->name, cloned.Symbols().Get("a"));
+ EXPECT_EQ(cloned_root->vec[1]->name, cloned.Symbols().Get("insertion_before"));
+ EXPECT_EQ(cloned_root->vec[2]->name, cloned.Symbols().Get("insertion_after"));
+ EXPECT_EQ(cloned_root->vec[3]->name, cloned.Symbols().Get("c"));
}
TEST_F(CloneContextNodeTest, CloneIntoSameBuilder) {
- ProgramBuilder builder;
- CloneContext ctx(&builder);
- Allocator allocator;
- auto* original = allocator.Create<Node>(builder.Symbols().New());
- auto* cloned_a = ctx.Clone(original);
- auto* cloned_b = ctx.Clone(original);
- EXPECT_NE(original, cloned_a);
- EXPECT_NE(original, cloned_b);
-
- EXPECT_NE(cloned_a, cloned_b);
+ ProgramBuilder builder;
+ CloneContext ctx(&builder);
+ Allocator allocator;
+ auto* original = allocator.Create<Node>(builder.Symbols().New());
+ auto* cloned_a = ctx.Clone(original);
+ auto* cloned_b = ctx.Clone(original);
+ EXPECT_NE(original, cloned_a);
+ EXPECT_NE(original, cloned_b);
+
+ EXPECT_NE(cloned_a, cloned_b);
}
TEST_F(CloneContextNodeTest, CloneWithReplaceAll_SameTypeTwice) {
- std::string node_name = TypeInfo::Of<Node>().name;
-
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder cloned;
- Program original;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([](const Node*) { return nullptr; });
- ctx.ReplaceAll([](const Node*) { return nullptr; });
- },
- "internal compiler error: ReplaceAll() called with a handler for type " +
- node_name + " that is already handled by a handler for type " +
- node_name);
+ std::string node_name = TypeInfo::Of<Node>().name;
+
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder cloned;
+ Program original;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([](const Node*) { return nullptr; });
+ ctx.ReplaceAll([](const Node*) { return nullptr; });
+ },
+ "internal compiler error: ReplaceAll() called with a handler for type " + node_name +
+ " that is already handled by a handler for type " + node_name);
}
TEST_F(CloneContextNodeTest, CloneWithReplaceAll_BaseThenDerived) {
- std::string node_name = TypeInfo::Of<Node>().name;
- std::string replaceable_name = TypeInfo::Of<Replaceable>().name;
-
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder cloned;
- Program original;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([](const Node*) { return nullptr; });
- ctx.ReplaceAll([](const Replaceable*) { return nullptr; });
- },
- "internal compiler error: ReplaceAll() called with a handler for type " +
- replaceable_name + " that is already handled by a handler for type " +
- node_name);
+ std::string node_name = TypeInfo::Of<Node>().name;
+ std::string replaceable_name = TypeInfo::Of<Replaceable>().name;
+
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder cloned;
+ Program original;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([](const Node*) { return nullptr; });
+ ctx.ReplaceAll([](const Replaceable*) { return nullptr; });
+ },
+ "internal compiler error: ReplaceAll() called with a handler for type " + replaceable_name +
+ " that is already handled by a handler for type " + node_name);
}
TEST_F(CloneContextNodeTest, CloneWithReplaceAll_DerivedThenBase) {
- std::string node_name = TypeInfo::Of<Node>().name;
- std::string replaceable_name = TypeInfo::Of<Replaceable>().name;
-
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder cloned;
- Program original;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([](const Replaceable*) { return nullptr; });
- ctx.ReplaceAll([](const Node*) { return nullptr; });
- },
- "internal compiler error: ReplaceAll() called with a handler for type " +
- node_name + " that is already handled by a handler for type " +
- replaceable_name);
+ std::string node_name = TypeInfo::Of<Node>().name;
+ std::string replaceable_name = TypeInfo::Of<Replaceable>().name;
+
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder cloned;
+ Program original;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([](const Replaceable*) { return nullptr; });
+ ctx.ReplaceAll([](const Node*) { return nullptr; });
+ },
+ "internal compiler error: ReplaceAll() called with a handler for type " + node_name +
+ " that is already handled by a handler for type " + replaceable_name);
}
TEST_F(CloneContextNodeTest, CloneWithReplacePointer_WithNotANode) {
- EXPECT_FATAL_FAILURE(
- {
- Allocator allocator;
- ProgramBuilder builder;
- auto* original_root =
- allocator.Create<Node>(builder.Symbols().New("root"));
- original_root->a = allocator.Create<Node>(builder.Symbols().New("a"));
- original_root->b = allocator.Create<Node>(builder.Symbols().New("b"));
- original_root->c = allocator.Create<Node>(builder.Symbols().New("c"));
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // Replaced
-
- ProgramBuilder cloned;
- auto* replacement = allocator.Create<NotANode>();
-
- CloneContext ctx(&cloned, &original);
- ctx.Replace(original_root->b, replacement);
-
- ctx.Clone(original_root);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ Allocator allocator;
+ ProgramBuilder builder;
+ auto* original_root = allocator.Create<Node>(builder.Symbols().New("root"));
+ original_root->a = allocator.Create<Node>(builder.Symbols().New("a"));
+ original_root->b = allocator.Create<Node>(builder.Symbols().New("b"));
+ original_root->c = allocator.Create<Node>(builder.Symbols().New("c"));
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // Replaced
+
+ ProgramBuilder cloned;
+ auto* replacement = allocator.Create<NotANode>();
+
+ CloneContext ctx(&cloned, &original);
+ ctx.Replace(original_root->b, replacement);
+
+ ctx.Clone(original_root);
+ },
+ "internal compiler error");
}
TEST_F(CloneContextNodeTest, CloneWithReplaceFunction_WithNotANode) {
- EXPECT_FATAL_FAILURE(
- {
- Allocator allocator;
- ProgramBuilder builder;
- auto* original_root =
- allocator.Create<Node>(builder.Symbols().New("root"));
- original_root->a = allocator.Create<Node>(builder.Symbols().New("a"));
- original_root->b = allocator.Create<Node>(builder.Symbols().New("b"));
- original_root->c = allocator.Create<Node>(builder.Symbols().New("c"));
- Program original(std::move(builder));
-
- // root
- // ╭──────────────────┼──────────────────╮
- // (a) (b) (c)
- // Replaced
-
- ProgramBuilder cloned;
- auto* replacement = allocator.Create<NotANode>();
-
- CloneContext ctx(&cloned, &original);
- ctx.Replace(original_root->b, [=] { return replacement; });
-
- ctx.Clone(original_root);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ Allocator allocator;
+ ProgramBuilder builder;
+ auto* original_root = allocator.Create<Node>(builder.Symbols().New("root"));
+ original_root->a = allocator.Create<Node>(builder.Symbols().New("a"));
+ original_root->b = allocator.Create<Node>(builder.Symbols().New("b"));
+ original_root->c = allocator.Create<Node>(builder.Symbols().New("c"));
+ Program original(std::move(builder));
+
+ // root
+ // ╭──────────────────┼──────────────────╮
+ // (a) (b) (c)
+ // Replaced
+
+ ProgramBuilder cloned;
+ auto* replacement = allocator.Create<NotANode>();
+
+ CloneContext ctx(&cloned, &original);
+ ctx.Replace(original_root->b, [=] { return replacement; });
+
+ ctx.Clone(original_root);
+ },
+ "internal compiler error");
}
using CloneContextTest = ::testing::Test;
TEST_F(CloneContextTest, CloneWithReplaceAll_SymbolsTwice) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder cloned;
- Program original;
- CloneContext ctx(&cloned, &original);
- ctx.ReplaceAll([](const Symbol s) { return s; });
- ctx.ReplaceAll([](const Symbol s) { return s; });
- },
- "internal compiler error: ReplaceAll(const SymbolTransform&) called "
- "multiple times on the same CloneContext");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder cloned;
+ Program original;
+ CloneContext ctx(&cloned, &original);
+ ctx.ReplaceAll([](const Symbol s) { return s; });
+ ctx.ReplaceAll([](const Symbol s) { return s; });
+ },
+ "internal compiler error: ReplaceAll(const SymbolTransform&) called "
+ "multiple times on the same CloneContext");
}
TEST_F(CloneContextTest, CloneNewUnnamedSymbols) {
- ProgramBuilder builder;
- Symbol old_a = builder.Symbols().New();
- Symbol old_b = builder.Symbols().New();
- Symbol old_c = builder.Symbols().New();
- EXPECT_EQ(builder.Symbols().NameFor(old_a), "tint_symbol");
- EXPECT_EQ(builder.Symbols().NameFor(old_b), "tint_symbol_1");
- EXPECT_EQ(builder.Symbols().NameFor(old_c), "tint_symbol_2");
-
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original, false);
- Symbol new_x = cloned.Symbols().New();
- Symbol new_a = ctx.Clone(old_a);
- Symbol new_y = cloned.Symbols().New();
- Symbol new_b = ctx.Clone(old_b);
- Symbol new_z = cloned.Symbols().New();
- Symbol new_c = ctx.Clone(old_c);
-
- EXPECT_EQ(cloned.Symbols().NameFor(new_x), "tint_symbol");
- EXPECT_EQ(cloned.Symbols().NameFor(new_a), "tint_symbol_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_y), "tint_symbol_2");
- EXPECT_EQ(cloned.Symbols().NameFor(new_b), "tint_symbol_1_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_z), "tint_symbol_3");
- EXPECT_EQ(cloned.Symbols().NameFor(new_c), "tint_symbol_2_1");
+ ProgramBuilder builder;
+ Symbol old_a = builder.Symbols().New();
+ Symbol old_b = builder.Symbols().New();
+ Symbol old_c = builder.Symbols().New();
+ EXPECT_EQ(builder.Symbols().NameFor(old_a), "tint_symbol");
+ EXPECT_EQ(builder.Symbols().NameFor(old_b), "tint_symbol_1");
+ EXPECT_EQ(builder.Symbols().NameFor(old_c), "tint_symbol_2");
+
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original, false);
+ Symbol new_x = cloned.Symbols().New();
+ Symbol new_a = ctx.Clone(old_a);
+ Symbol new_y = cloned.Symbols().New();
+ Symbol new_b = ctx.Clone(old_b);
+ Symbol new_z = cloned.Symbols().New();
+ Symbol new_c = ctx.Clone(old_c);
+
+ EXPECT_EQ(cloned.Symbols().NameFor(new_x), "tint_symbol");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_a), "tint_symbol_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_y), "tint_symbol_2");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_b), "tint_symbol_1_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_z), "tint_symbol_3");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_c), "tint_symbol_2_1");
}
TEST_F(CloneContextTest, CloneNewSymbols) {
- ProgramBuilder builder;
- Symbol old_a = builder.Symbols().New("a");
- Symbol old_b = builder.Symbols().New("b");
- Symbol old_c = builder.Symbols().New("c");
- EXPECT_EQ(builder.Symbols().NameFor(old_a), "a");
- EXPECT_EQ(builder.Symbols().NameFor(old_b), "b");
- EXPECT_EQ(builder.Symbols().NameFor(old_c), "c");
-
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original, false);
- Symbol new_x = cloned.Symbols().New("a");
- Symbol new_a = ctx.Clone(old_a);
- Symbol new_y = cloned.Symbols().New("b");
- Symbol new_b = ctx.Clone(old_b);
- Symbol new_z = cloned.Symbols().New("c");
- Symbol new_c = ctx.Clone(old_c);
-
- EXPECT_EQ(cloned.Symbols().NameFor(new_x), "a");
- EXPECT_EQ(cloned.Symbols().NameFor(new_a), "a_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_y), "b");
- EXPECT_EQ(cloned.Symbols().NameFor(new_b), "b_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_z), "c");
- EXPECT_EQ(cloned.Symbols().NameFor(new_c), "c_1");
+ ProgramBuilder builder;
+ Symbol old_a = builder.Symbols().New("a");
+ Symbol old_b = builder.Symbols().New("b");
+ Symbol old_c = builder.Symbols().New("c");
+ EXPECT_EQ(builder.Symbols().NameFor(old_a), "a");
+ EXPECT_EQ(builder.Symbols().NameFor(old_b), "b");
+ EXPECT_EQ(builder.Symbols().NameFor(old_c), "c");
+
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original, false);
+ Symbol new_x = cloned.Symbols().New("a");
+ Symbol new_a = ctx.Clone(old_a);
+ Symbol new_y = cloned.Symbols().New("b");
+ Symbol new_b = ctx.Clone(old_b);
+ Symbol new_z = cloned.Symbols().New("c");
+ Symbol new_c = ctx.Clone(old_c);
+
+ EXPECT_EQ(cloned.Symbols().NameFor(new_x), "a");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_a), "a_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_y), "b");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_b), "b_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_z), "c");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_c), "c_1");
}
TEST_F(CloneContextTest, CloneNewSymbols_AfterCloneSymbols) {
- ProgramBuilder builder;
- Symbol old_a = builder.Symbols().New("a");
- Symbol old_b = builder.Symbols().New("b");
- Symbol old_c = builder.Symbols().New("c");
- EXPECT_EQ(builder.Symbols().NameFor(old_a), "a");
- EXPECT_EQ(builder.Symbols().NameFor(old_b), "b");
- EXPECT_EQ(builder.Symbols().NameFor(old_c), "c");
-
- Program original(std::move(builder));
-
- ProgramBuilder cloned;
- CloneContext ctx(&cloned, &original);
- Symbol new_x = cloned.Symbols().New("a");
- Symbol new_a = ctx.Clone(old_a);
- Symbol new_y = cloned.Symbols().New("b");
- Symbol new_b = ctx.Clone(old_b);
- Symbol new_z = cloned.Symbols().New("c");
- Symbol new_c = ctx.Clone(old_c);
-
- EXPECT_EQ(cloned.Symbols().NameFor(new_x), "a_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_a), "a");
- EXPECT_EQ(cloned.Symbols().NameFor(new_y), "b_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_b), "b");
- EXPECT_EQ(cloned.Symbols().NameFor(new_z), "c_1");
- EXPECT_EQ(cloned.Symbols().NameFor(new_c), "c");
+ ProgramBuilder builder;
+ Symbol old_a = builder.Symbols().New("a");
+ Symbol old_b = builder.Symbols().New("b");
+ Symbol old_c = builder.Symbols().New("c");
+ EXPECT_EQ(builder.Symbols().NameFor(old_a), "a");
+ EXPECT_EQ(builder.Symbols().NameFor(old_b), "b");
+ EXPECT_EQ(builder.Symbols().NameFor(old_c), "c");
+
+ Program original(std::move(builder));
+
+ ProgramBuilder cloned;
+ CloneContext ctx(&cloned, &original);
+ Symbol new_x = cloned.Symbols().New("a");
+ Symbol new_a = ctx.Clone(old_a);
+ Symbol new_y = cloned.Symbols().New("b");
+ Symbol new_b = ctx.Clone(old_b);
+ Symbol new_z = cloned.Symbols().New("c");
+ Symbol new_c = ctx.Clone(old_c);
+
+ EXPECT_EQ(cloned.Symbols().NameFor(new_x), "a_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_a), "a");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_y), "b_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_b), "b");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_z), "c_1");
+ EXPECT_EQ(cloned.Symbols().NameFor(new_c), "c");
}
TEST_F(CloneContextTest, ProgramIDs) {
- ProgramBuilder dst;
- Program src(ProgramBuilder{});
- CloneContext ctx(&dst, &src);
- Allocator allocator;
- auto* cloned = ctx.Clone(allocator.Create<ProgramNode>(src.ID(), dst.ID()));
- EXPECT_EQ(cloned->program_id, dst.ID());
+ ProgramBuilder dst;
+ Program src(ProgramBuilder{});
+ CloneContext ctx(&dst, &src);
+ Allocator allocator;
+ auto* cloned = ctx.Clone(allocator.Create<ProgramNode>(src.ID(), dst.ID()));
+ EXPECT_EQ(cloned->program_id, dst.ID());
}
TEST_F(CloneContextTest, ProgramIDs_Clone_ObjectNotOwnedBySrc) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder dst;
- Program src(ProgramBuilder{});
- CloneContext ctx(&dst, &src);
- Allocator allocator;
- ctx.Clone(allocator.Create<ProgramNode>(ProgramID::New(), dst.ID()));
- },
- R"(internal compiler error: TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object))");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder dst;
+ Program src(ProgramBuilder{});
+ CloneContext ctx(&dst, &src);
+ Allocator allocator;
+ ctx.Clone(allocator.Create<ProgramNode>(ProgramID::New(), dst.ID()));
+ },
+ R"(internal compiler error: TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, src, object))");
}
TEST_F(CloneContextTest, ProgramIDs_Clone_ObjectNotOwnedByDst) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder dst;
- Program src(ProgramBuilder{});
- CloneContext ctx(&dst, &src);
- Allocator allocator;
- ctx.Clone(allocator.Create<ProgramNode>(src.ID(), ProgramID::New()));
- },
- R"(internal compiler error: TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, out))");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder dst;
+ Program src(ProgramBuilder{});
+ CloneContext ctx(&dst, &src);
+ Allocator allocator;
+ ctx.Clone(allocator.Create<ProgramNode>(src.ID(), ProgramID::New()));
+ },
+ R"(internal compiler error: TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Clone, dst, out))");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/cmd/main.cc b/chromium/third_party/dawn/src/tint/cmd/main.cc
index 8a0d2935d0f..438e4a28d15 100644
--- a/chromium/third_party/dawn/src/tint/cmd/main.cc
+++ b/chromium/third_party/dawn/src/tint/cmd/main.cc
@@ -15,7 +15,9 @@
#include <cstdio>
#include <fstream>
#include <iostream>
+#include <limits>
#include <memory>
+#include <optional> // NOLINT(build/include_order)
#include <sstream>
#include <string>
#include <vector>
@@ -36,12 +38,11 @@
namespace {
-[[noreturn]] void TintInternalCompilerErrorReporter(
- const tint::diag::List& diagnostics) {
- auto printer = tint::diag::Printer::create(stderr, true);
- tint::diag::Formatter{}.format(diagnostics, printer.get());
- tint::diag::Style bold_red{tint::diag::Color::kRed, true};
- constexpr const char* please_file_bug = R"(
+[[noreturn]] void TintInternalCompilerErrorReporter(const tint::diag::List& diagnostics) {
+ auto printer = tint::diag::Printer::create(stderr, true);
+ tint::diag::Formatter{}.format(diagnostics, printer.get());
+ tint::diag::Style bold_red{tint::diag::Color::kRed, true};
+ constexpr const char* please_file_bug = R"(
********************************************************************
* The tint shader compiler has encountered an unexpected error. *
* *
@@ -49,42 +50,44 @@ namespace {
* crbug.com/tint with the source program that triggered the bug. *
********************************************************************
)";
- printer->write(please_file_bug, bold_red);
- exit(1);
+ printer->write(please_file_bug, bold_red);
+ exit(1);
}
enum class Format {
- kNone = -1,
- kSpirv,
- kSpvAsm,
- kWgsl,
- kMsl,
- kHlsl,
- kGlsl,
+ kNone = -1,
+ kSpirv,
+ kSpvAsm,
+ kWgsl,
+ kMsl,
+ kHlsl,
+ kGlsl,
};
struct Options {
- bool show_help = false;
+ bool show_help = false;
- std::string input_filename;
- std::string output_file = "-"; // Default to stdout
+ std::string input_filename;
+ std::string output_file = "-"; // Default to stdout
- bool parse_only = false;
- bool disable_workgroup_init = false;
- bool validate = false;
- bool demangle = false;
- bool dump_inspector_bindings = false;
+ bool parse_only = false;
+ bool disable_workgroup_init = false;
+ bool validate = false;
+ bool demangle = false;
+ bool dump_inspector_bindings = false;
- Format format = Format::kNone;
+ Format format = Format::kNone;
- bool emit_single_entry_point = false;
- std::string ep_name;
+ bool emit_single_entry_point = false;
+ std::string ep_name;
- std::vector<std::string> transforms;
+ std::vector<std::string> transforms;
- bool use_fxc = false;
- std::string dxc_path;
- std::string xcrun_path;
+ bool use_fxc = false;
+ std::string dxc_path;
+ std::string xcrun_path;
+ std::vector<std::string> overrides;
+ std::optional<tint::sem::BindingPoint> hlsl_root_constant_binding_point;
};
const char kUsage[] = R"(Usage: tint [options] <input-file>
@@ -111,304 +114,361 @@ ${transforms}
Affects AST dumping, and text-based output languages.
--dump-inspector-bindings -- Dump reflection data about bindins to stdout.
-h -- This help text
+ --hlsl-root-constant-binding-point <group>,<binding> -- Binding point for root constant.
+ Specify the binding point for generated uniform buffer
+ used for num_workgroups in HLSL. If not specified, then
+ default to binding 0 of the largest used group plus 1,
+ or group 0 if no resource bound.
--validate -- Validates the generated shader
--fxc -- Ask to validate HLSL output using FXC instead of DXC.
When specified, automatically enables --validate
--dxc -- Path to DXC executable, used to validate HLSL output.
When specified, automatically enables --validate
--xcrun -- Path to xcrun executable, used to validate MSL output.
- When specified, automatically enables --validate)";
+ When specified, automatically enables --validate
+ --overrides -- Pipeline overrides as NAME=VALUE, comma-separated.)";
Format parse_format(const std::string& fmt) {
- (void)fmt;
+ (void)fmt;
#if TINT_BUILD_SPV_WRITER
- if (fmt == "spirv")
- return Format::kSpirv;
- if (fmt == "spvasm")
- return Format::kSpvAsm;
+ if (fmt == "spirv") {
+ return Format::kSpirv;
+ }
+ if (fmt == "spvasm") {
+ return Format::kSpvAsm;
+ }
#endif // TINT_BUILD_SPV_WRITER
#if TINT_BUILD_WGSL_WRITER
- if (fmt == "wgsl")
- return Format::kWgsl;
+ if (fmt == "wgsl") {
+ return Format::kWgsl;
+ }
#endif // TINT_BUILD_WGSL_WRITER
#if TINT_BUILD_MSL_WRITER
- if (fmt == "msl")
- return Format::kMsl;
+ if (fmt == "msl") {
+ return Format::kMsl;
+ }
#endif // TINT_BUILD_MSL_WRITER
#if TINT_BUILD_HLSL_WRITER
- if (fmt == "hlsl")
- return Format::kHlsl;
+ if (fmt == "hlsl") {
+ return Format::kHlsl;
+ }
#endif // TINT_BUILD_HLSL_WRITER
#if TINT_BUILD_GLSL_WRITER
- if (fmt == "glsl")
- return Format::kGlsl;
+ if (fmt == "glsl") {
+ return Format::kGlsl;
+ }
#endif // TINT_BUILD_GLSL_WRITER
- return Format::kNone;
+ return Format::kNone;
}
-#if TINT_BUILD_SPV_WRITER || TINT_BUILD_WGSL_WRITER || \
- TINT_BUILD_MSL_WRITER || TINT_BUILD_HLSL_WRITER
+#if TINT_BUILD_SPV_WRITER || TINT_BUILD_WGSL_WRITER || TINT_BUILD_MSL_WRITER || \
+ TINT_BUILD_HLSL_WRITER
/// @param input input string
/// @param suffix potential suffix string
/// @returns true if input ends with the given suffix.
bool ends_with(const std::string& input, const std::string& suffix) {
- const auto input_len = input.size();
- const auto suffix_len = suffix.size();
- // Avoid integer overflow.
- return (input_len >= suffix_len) &&
- (input_len - suffix_len == input.rfind(suffix));
+ const auto input_len = input.size();
+ const auto suffix_len = suffix.size();
+ // Avoid integer overflow.
+ return (input_len >= suffix_len) && (input_len - suffix_len == input.rfind(suffix));
}
#endif
/// @param filename the filename to inspect
/// @returns the inferred format for the filename suffix
Format infer_format(const std::string& filename) {
- (void)filename;
+ (void)filename;
#if TINT_BUILD_SPV_WRITER
- if (ends_with(filename, ".spv")) {
- return Format::kSpirv;
- }
- if (ends_with(filename, ".spvasm")) {
- return Format::kSpvAsm;
- }
+ if (ends_with(filename, ".spv")) {
+ return Format::kSpirv;
+ }
+ if (ends_with(filename, ".spvasm")) {
+ return Format::kSpvAsm;
+ }
#endif // TINT_BUILD_SPV_WRITER
#if TINT_BUILD_WGSL_WRITER
- if (ends_with(filename, ".wgsl")) {
- return Format::kWgsl;
- }
+ if (ends_with(filename, ".wgsl")) {
+ return Format::kWgsl;
+ }
#endif // TINT_BUILD_WGSL_WRITER
#if TINT_BUILD_MSL_WRITER
- if (ends_with(filename, ".metal")) {
- return Format::kMsl;
- }
+ if (ends_with(filename, ".metal")) {
+ return Format::kMsl;
+ }
#endif // TINT_BUILD_MSL_WRITER
#if TINT_BUILD_HLSL_WRITER
- if (ends_with(filename, ".hlsl")) {
- return Format::kHlsl;
- }
+ if (ends_with(filename, ".hlsl")) {
+ return Format::kHlsl;
+ }
#endif // TINT_BUILD_HLSL_WRITER
- return Format::kNone;
+ return Format::kNone;
}
-std::vector<std::string> split_transform_names(std::string list) {
- std::vector<std::string> res;
+std::vector<std::string> split_on_comma(std::string list) {
+ std::vector<std::string> res;
- std::stringstream str(list);
- while (str.good()) {
- std::string substr;
- getline(str, substr, ',');
- res.push_back(substr);
- }
- return res;
+ std::stringstream str(list);
+ while (str.good()) {
+ std::string substr;
+ getline(str, substr, ',');
+ res.push_back(substr);
+ }
+ return res;
}
-std::string TextureDimensionToString(
- tint::inspector::ResourceBinding::TextureDimension dim) {
- switch (dim) {
- case tint::inspector::ResourceBinding::TextureDimension::kNone:
- return "None";
- case tint::inspector::ResourceBinding::TextureDimension::k1d:
- return "1d";
- case tint::inspector::ResourceBinding::TextureDimension::k2d:
- return "2d";
- case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
- return "2dArray";
- case tint::inspector::ResourceBinding::TextureDimension::k3d:
- return "3d";
- case tint::inspector::ResourceBinding::TextureDimension::kCube:
- return "Cube";
- case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
- return "CubeArray";
- }
-
- return "Unknown";
+std::optional<uint64_t> parse_unsigned_number(std::string number) {
+ for (char c : number) {
+ if (!std::isdigit(c)) {
+ // Found a non-digital char, return nullopt
+ return std::nullopt;
+ }
+ }
+
+ errno = 0;
+ char* p_end;
+ uint64_t result;
+ // std::strtoull will not throw exception.
+ result = std::strtoull(number.c_str(), &p_end, 10);
+ if ((errno != 0) || (static_cast<size_t>(p_end - number.c_str()) != number.length())) {
+ // Unexpected conversion result
+ return std::nullopt;
+ }
+
+ return result;
}
-std::string SampledKindToString(
- tint::inspector::ResourceBinding::SampledKind kind) {
- switch (kind) {
- case tint::inspector::ResourceBinding::SampledKind::kFloat:
- return "Float";
- case tint::inspector::ResourceBinding::SampledKind::kUInt:
- return "UInt";
- case tint::inspector::ResourceBinding::SampledKind::kSInt:
- return "SInt";
- case tint::inspector::ResourceBinding::SampledKind::kUnknown:
- break;
- }
-
- return "Unknown";
+std::string TextureDimensionToString(tint::inspector::ResourceBinding::TextureDimension dim) {
+ switch (dim) {
+ case tint::inspector::ResourceBinding::TextureDimension::kNone:
+ return "None";
+ case tint::inspector::ResourceBinding::TextureDimension::k1d:
+ return "1d";
+ case tint::inspector::ResourceBinding::TextureDimension::k2d:
+ return "2d";
+ case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+ return "2dArray";
+ case tint::inspector::ResourceBinding::TextureDimension::k3d:
+ return "3d";
+ case tint::inspector::ResourceBinding::TextureDimension::kCube:
+ return "Cube";
+ case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+ return "CubeArray";
+ }
+
+ return "Unknown";
}
-std::string TexelFormatToString(
- tint::inspector::ResourceBinding::TexelFormat format) {
- switch (format) {
- case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
- return "R32Uint";
- case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
- return "R32Sint";
- case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
- return "R32Float";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
- return "Rgba8Unorm";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
- return "Rgba8Snorm";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
- return "Rgba8Uint";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
- return "Rgba8Sint";
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
- return "Rg32Uint";
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
- return "Rg32Sint";
- case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
- return "Rg32Float";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
- return "Rgba16Uint";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
- return "Rgba16Sint";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
- return "Rgba16Float";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
- return "Rgba32Uint";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
- return "Rgba32Sint";
- case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
- return "Rgba32Float";
- case tint::inspector::ResourceBinding::TexelFormat::kNone:
- return "None";
- }
- return "Unknown";
+std::string SampledKindToString(tint::inspector::ResourceBinding::SampledKind kind) {
+ switch (kind) {
+ case tint::inspector::ResourceBinding::SampledKind::kFloat:
+ return "Float";
+ case tint::inspector::ResourceBinding::SampledKind::kUInt:
+ return "UInt";
+ case tint::inspector::ResourceBinding::SampledKind::kSInt:
+ return "SInt";
+ case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+ break;
+ }
+
+ return "Unknown";
}
-std::string ResourceTypeToString(
- tint::inspector::ResourceBinding::ResourceType type) {
- switch (type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- return "UniformBuffer";
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- return "StorageBuffer";
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
- return "ReadOnlyStorageBuffer";
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- return "Sampler";
- case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
- return "ComparisonSampler";
- case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
- return "SampledTexture";
- case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
- return "MultisampledTexture";
- case tint::inspector::ResourceBinding::ResourceType::
- kWriteOnlyStorageTexture:
- return "WriteOnlyStorageTexture";
- case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
- return "DepthTexture";
- case tint::inspector::ResourceBinding::ResourceType::
- kDepthMultisampledTexture:
- return "DepthMultisampledTexture";
- case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
- return "ExternalTexture";
- }
-
- return "Unknown";
+std::string TexelFormatToString(tint::inspector::ResourceBinding::TexelFormat format) {
+ switch (format) {
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
+ return "R32Uint";
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
+ return "R32Sint";
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
+ return "R32Float";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
+ return "Rgba8Unorm";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
+ return "Rgba8Snorm";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
+ return "Rgba8Uint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
+ return "Rgba8Sint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
+ return "Rg32Uint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
+ return "Rg32Sint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
+ return "Rg32Float";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
+ return "Rgba16Uint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
+ return "Rgba16Sint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
+ return "Rgba16Float";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
+ return "Rgba32Uint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
+ return "Rgba32Sint";
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
+ return "Rgba32Float";
+ case tint::inspector::ResourceBinding::TexelFormat::kNone:
+ return "None";
+ }
+ return "Unknown";
}
-bool ParseArgs(const std::vector<std::string>& args, Options* opts) {
- for (size_t i = 1; i < args.size(); ++i) {
- const std::string& arg = args[i];
- if (arg == "--format") {
- ++i;
- if (i >= args.size()) {
- std::cerr << "Missing value for --format argument." << std::endl;
- return false;
- }
- opts->format = parse_format(args[i]);
+std::string ResourceTypeToString(tint::inspector::ResourceBinding::ResourceType type) {
+ switch (type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ return "UniformBuffer";
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ return "StorageBuffer";
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return "ReadOnlyStorageBuffer";
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ return "Sampler";
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ return "ComparisonSampler";
+ case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+ return "SampledTexture";
+ case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+ return "MultisampledTexture";
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return "WriteOnlyStorageTexture";
+ case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+ return "DepthTexture";
+ case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+ return "DepthMultisampledTexture";
+ case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+ return "ExternalTexture";
+ }
- if (opts->format == Format::kNone) {
- std::cerr << "Unknown output format: " << args[i] << std::endl;
- return false;
- }
- } else if (arg == "-ep") {
- if (i + 1 >= args.size()) {
- std::cerr << "Missing value for -ep" << std::endl;
- return false;
- }
- i++;
- opts->ep_name = args[i];
- opts->emit_single_entry_point = true;
-
- } else if (arg == "-o" || arg == "--output-name") {
- ++i;
- if (i >= args.size()) {
- std::cerr << "Missing value for " << arg << std::endl;
- return false;
- }
- opts->output_file = args[i];
-
- } else if (arg == "-h" || arg == "--help") {
- opts->show_help = true;
- } else if (arg == "--transform") {
- ++i;
- if (i >= args.size()) {
- std::cerr << "Missing value for " << arg << std::endl;
- return false;
- }
- opts->transforms = split_transform_names(args[i]);
- } else if (arg == "--parse-only") {
- opts->parse_only = true;
- } else if (arg == "--disable-workgroup-init") {
- opts->disable_workgroup_init = true;
- } else if (arg == "--demangle") {
- opts->demangle = true;
- } else if (arg == "--dump-inspector-bindings") {
- opts->dump_inspector_bindings = true;
- } else if (arg == "--validate") {
- opts->validate = true;
- } else if (arg == "--fxc") {
- opts->validate = true;
- opts->use_fxc = true;
- } else if (arg == "--dxc") {
- ++i;
- if (i >= args.size()) {
- std::cerr << "Missing value for " << arg << std::endl;
- return false;
- }
- opts->dxc_path = args[i];
- opts->validate = true;
- } else if (arg == "--xcrun") {
- ++i;
- if (i >= args.size()) {
- std::cerr << "Missing value for " << arg << std::endl;
- return false;
- }
- opts->xcrun_path = args[i];
- opts->validate = true;
- } else if (!arg.empty()) {
- if (arg[0] == '-') {
- std::cerr << "Unrecognized option: " << arg << std::endl;
- return false;
- }
- if (!opts->input_filename.empty()) {
- std::cerr << "More than one input file specified: '"
- << opts->input_filename << "' and '" << arg << "'"
- << std::endl;
- return false;
- }
- opts->input_filename = arg;
+ return "Unknown";
+}
+
+bool ParseArgs(const std::vector<std::string>& args, Options* opts) {
+ for (size_t i = 1; i < args.size(); ++i) {
+ const std::string& arg = args[i];
+ if (arg == "--format") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for --format argument." << std::endl;
+ return false;
+ }
+ opts->format = parse_format(args[i]);
+
+ if (opts->format == Format::kNone) {
+ std::cerr << "Unknown output format: " << args[i] << std::endl;
+ return false;
+ }
+ } else if (arg == "-ep") {
+ if (i + 1 >= args.size()) {
+ std::cerr << "Missing value for -ep" << std::endl;
+ return false;
+ }
+ i++;
+ opts->ep_name = args[i];
+ opts->emit_single_entry_point = true;
+
+ } else if (arg == "-o" || arg == "--output-name") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ opts->output_file = args[i];
+
+ } else if (arg == "-h" || arg == "--help") {
+ opts->show_help = true;
+ } else if (arg == "--transform") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ opts->transforms = split_on_comma(args[i]);
+ } else if (arg == "--parse-only") {
+ opts->parse_only = true;
+ } else if (arg == "--disable-workgroup-init") {
+ opts->disable_workgroup_init = true;
+ } else if (arg == "--demangle") {
+ opts->demangle = true;
+ } else if (arg == "--dump-inspector-bindings") {
+ opts->dump_inspector_bindings = true;
+ } else if (arg == "--validate") {
+ opts->validate = true;
+ } else if (arg == "--fxc") {
+ opts->validate = true;
+ opts->use_fxc = true;
+ } else if (arg == "--dxc") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ opts->dxc_path = args[i];
+ opts->validate = true;
+ } else if (arg == "--xcrun") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ opts->xcrun_path = args[i];
+ opts->validate = true;
+ } else if (arg == "--overrides") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ opts->overrides = split_on_comma(args[i]);
+ } else if (arg == "--hlsl-root-constant-binding-point") {
+ ++i;
+ if (i >= args.size()) {
+ std::cerr << "Missing value for " << arg << std::endl;
+ return false;
+ }
+ auto binding_points = split_on_comma(args[i]);
+ if (binding_points.size() != 2) {
+ std::cerr << "Invalid binding point for " << arg << ": " << args[i] << std::endl;
+ return false;
+ }
+ auto group = parse_unsigned_number(binding_points[0]);
+ if ((!group.has_value()) || (group.value() > std::numeric_limits<uint32_t>::max())) {
+ std::cerr << "Invalid group for " << arg << ": " << binding_points[0] << std::endl;
+ return false;
+ }
+ auto binding = parse_unsigned_number(binding_points[1]);
+ if ((!binding.has_value()) ||
+ (binding.value() > std::numeric_limits<uint32_t>::max())) {
+ std::cerr << "Invalid binding for " << arg << ": " << binding_points[1]
+ << std::endl;
+ return false;
+ }
+ opts->hlsl_root_constant_binding_point = tint::sem::BindingPoint{
+ static_cast<uint32_t>(group.value()), static_cast<uint32_t>(binding.value())};
+ } else if (!arg.empty()) {
+ if (arg[0] == '-') {
+ std::cerr << "Unrecognized option: " << arg << std::endl;
+ return false;
+ }
+ if (!opts->input_filename.empty()) {
+ std::cerr << "More than one input file specified: '" << opts->input_filename
+ << "' and '" << arg << "'" << std::endl;
+ return false;
+ }
+ opts->input_filename = arg;
+ }
}
- }
- return true;
+ return true;
}
/// Copies the content from the file named `input_file` to `buffer`,
@@ -418,45 +478,45 @@ bool ParseArgs(const std::vector<std::string>& args, Options* opts) {
/// @returns true if we successfully read the file.
template <typename T>
bool ReadFile(const std::string& input_file, std::vector<T>* buffer) {
- if (!buffer) {
- std::cerr << "The buffer pointer was null" << std::endl;
- return false;
- }
+ if (!buffer) {
+ std::cerr << "The buffer pointer was null" << std::endl;
+ return false;
+ }
- FILE* file = nullptr;
+ FILE* file = nullptr;
#if defined(_MSC_VER)
- fopen_s(&file, input_file.c_str(), "rb");
+ fopen_s(&file, input_file.c_str(), "rb");
#else
- file = fopen(input_file.c_str(), "rb");
+ file = fopen(input_file.c_str(), "rb");
#endif
- if (!file) {
- std::cerr << "Failed to open " << input_file << std::endl;
- return false;
- }
-
- fseek(file, 0, SEEK_END);
- const auto file_size = static_cast<size_t>(ftell(file));
- if (0 != (file_size % sizeof(T))) {
- std::cerr << "File " << input_file
- << " does not contain an integral number of objects: "
- << file_size << " bytes in the file, require " << sizeof(T)
- << " bytes per object" << std::endl;
- fclose(file);
- return false;
- }
- fseek(file, 0, SEEK_SET);
+ if (!file) {
+ std::cerr << "Failed to open " << input_file << std::endl;
+ return false;
+ }
+
+ fseek(file, 0, SEEK_END);
+ const auto file_size = static_cast<size_t>(ftell(file));
+ if (0 != (file_size % sizeof(T))) {
+ std::cerr << "File " << input_file
+ << " does not contain an integral number of objects: " << file_size
+ << " bytes in the file, require " << sizeof(T) << " bytes per object"
+ << std::endl;
+ fclose(file);
+ return false;
+ }
+ fseek(file, 0, SEEK_SET);
- buffer->clear();
- buffer->resize(file_size / sizeof(T));
+ buffer->clear();
+ buffer->resize(file_size / sizeof(T));
- size_t bytes_read = fread(buffer->data(), 1, file_size, file);
- fclose(file);
- if (bytes_read != file_size) {
- std::cerr << "Failed to read " << input_file << std::endl;
- return false;
- }
+ size_t bytes_read = fread(buffer->data(), 1, file_size, file);
+ fclose(file);
+ if (bytes_read != file_size) {
+ std::cerr << "Failed to read " << input_file << std::endl;
+ return false;
+ }
- return true;
+ return true;
}
/// Writes the given `buffer` into the file named as `output_file` using the
@@ -466,82 +526,77 @@ bool ReadFile(const std::string& input_file, std::vector<T>* buffer) {
/// like `std::string` and `std::vector` do.
/// @returns true on success
template <typename ContainerT>
-bool WriteFile(const std::string& output_file,
- const std::string mode,
- const ContainerT& buffer) {
- const bool use_stdout = output_file.empty() || output_file == "-";
- FILE* file = stdout;
+bool WriteFile(const std::string& output_file, const std::string mode, const ContainerT& buffer) {
+ const bool use_stdout = output_file.empty() || output_file == "-";
+ FILE* file = stdout;
- if (!use_stdout) {
+ if (!use_stdout) {
#if defined(_MSC_VER)
- fopen_s(&file, output_file.c_str(), mode.c_str());
+ fopen_s(&file, output_file.c_str(), mode.c_str());
#else
- file = fopen(output_file.c_str(), mode.c_str());
+ file = fopen(output_file.c_str(), mode.c_str());
#endif
- if (!file) {
- std::cerr << "Could not open file " << output_file << " for writing"
- << std::endl;
- return false;
+ if (!file) {
+ std::cerr << "Could not open file " << output_file << " for writing" << std::endl;
+ return false;
+ }
}
- }
-
- size_t written =
- fwrite(buffer.data(), sizeof(typename ContainerT::value_type),
- buffer.size(), file);
- if (buffer.size() != written) {
- if (use_stdout) {
- std::cerr << "Could not write all output to standard output" << std::endl;
- } else {
- std::cerr << "Could not write to file " << output_file << std::endl;
- fclose(file);
+
+ size_t written =
+ fwrite(buffer.data(), sizeof(typename ContainerT::value_type), buffer.size(), file);
+ if (buffer.size() != written) {
+ if (use_stdout) {
+ std::cerr << "Could not write all output to standard output" << std::endl;
+ } else {
+ std::cerr << "Could not write to file " << output_file << std::endl;
+ fclose(file);
+ }
+ return false;
+ }
+ if (!use_stdout) {
+ fclose(file);
}
- return false;
- }
- if (!use_stdout) {
- fclose(file);
- }
- return true;
+ return true;
}
#if TINT_BUILD_SPV_WRITER
std::string Disassemble(const std::vector<uint32_t>& data) {
- std::string spv_errors;
- spv_target_env target_env = SPV_ENV_UNIVERSAL_1_0;
-
- auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
- const spv_position_t& position,
- const char* message) {
- switch (level) {
- case SPV_MSG_FATAL:
- case SPV_MSG_INTERNAL_ERROR:
- case SPV_MSG_ERROR:
- spv_errors += "error: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_WARNING:
- spv_errors += "warning: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_INFO:
- spv_errors += "info: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_DEBUG:
- break;
+ std::string spv_errors;
+ spv_target_env target_env = SPV_ENV_UNIVERSAL_1_0;
+
+ auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
+ const spv_position_t& position, const char* message) {
+ switch (level) {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ spv_errors +=
+ "error: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_WARNING:
+ spv_errors +=
+ "warning: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_INFO:
+ spv_errors +=
+ "info: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_DEBUG:
+ break;
+ }
+ };
+
+ spvtools::SpirvTools tools(target_env);
+ tools.SetMessageConsumer(msg_consumer);
+
+ std::string result;
+ if (!tools.Disassemble(
+ data, &result,
+ SPV_BINARY_TO_TEXT_OPTION_INDENT | SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES)) {
+ std::cerr << spv_errors << std::endl;
}
- };
-
- spvtools::SpirvTools tools(target_env);
- tools.SetMessageConsumer(msg_consumer);
-
- std::string result;
- if (!tools.Disassemble(data, &result,
- SPV_BINARY_TO_TEXT_OPTION_INDENT |
- SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES)) {
- std::cerr << spv_errors << std::endl;
- }
- return result;
+ return result;
}
#endif // TINT_BUILD_SPV_WRITER
@@ -551,12 +606,12 @@ std::string Disassemble(const std::vector<uint32_t>& data) {
/// @param program the program
void PrintWGSL(std::ostream& out, const tint::Program& program) {
#if TINT_BUILD_WGSL_WRITER
- tint::writer::wgsl::Options options;
- auto result = tint::writer::wgsl::Generate(&program, options);
- out << std::endl << result.wgsl << std::endl;
+ tint::writer::wgsl::Options options;
+ auto result = tint::writer::wgsl::Generate(&program, options);
+ out << std::endl << result.wgsl << std::endl;
#else
- (void)out;
- (void)program;
+ (void)out;
+ (void)program;
#endif
}
@@ -566,47 +621,46 @@ void PrintWGSL(std::ostream& out, const tint::Program& program) {
/// @returns true on success
bool GenerateSpirv(const tint::Program* program, const Options& options) {
#if TINT_BUILD_SPV_WRITER
- // TODO(jrprice): Provide a way for the user to set non-default options.
- tint::writer::spirv::Options gen_options;
- gen_options.disable_workgroup_init = options.disable_workgroup_init;
- gen_options.generate_external_texture_bindings = true;
- auto result = tint::writer::spirv::Generate(program, gen_options);
- if (!result.success) {
- PrintWGSL(std::cerr, *program);
- std::cerr << "Failed to generate: " << result.error << std::endl;
- return false;
- }
-
- if (options.format == Format::kSpvAsm) {
- if (!WriteFile(options.output_file, "w", Disassemble(result.spirv))) {
- return false;
+ // TODO(jrprice): Provide a way for the user to set non-default options.
+ tint::writer::spirv::Options gen_options;
+ gen_options.disable_workgroup_init = options.disable_workgroup_init;
+ gen_options.generate_external_texture_bindings = true;
+ auto result = tint::writer::spirv::Generate(program, gen_options);
+ if (!result.success) {
+ PrintWGSL(std::cerr, *program);
+ std::cerr << "Failed to generate: " << result.error << std::endl;
+ return false;
}
- } else {
- if (!WriteFile(options.output_file, "wb", result.spirv)) {
- return false;
+
+ if (options.format == Format::kSpvAsm) {
+ if (!WriteFile(options.output_file, "w", Disassemble(result.spirv))) {
+ return false;
+ }
+ } else {
+ if (!WriteFile(options.output_file, "wb", result.spirv)) {
+ return false;
+ }
}
- }
-
- if (options.validate) {
- // Use Vulkan 1.1, since this is what Tint, internally, uses.
- spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_1);
- tools.SetMessageConsumer([](spv_message_level_t, const char*,
- const spv_position_t& pos, const char* msg) {
- std::cerr << (pos.line + 1) << ":" << (pos.column + 1) << ": " << msg
- << std::endl;
- });
- if (!tools.Validate(result.spirv.data(), result.spirv.size(),
- spvtools::ValidatorOptions())) {
- return false;
+
+ if (options.validate) {
+ // Use Vulkan 1.1, since this is what Tint, internally, uses.
+ spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_1);
+ tools.SetMessageConsumer(
+ [](spv_message_level_t, const char*, const spv_position_t& pos, const char* msg) {
+ std::cerr << (pos.line + 1) << ":" << (pos.column + 1) << ": " << msg << std::endl;
+ });
+ if (!tools.Validate(result.spirv.data(), result.spirv.size(),
+ spvtools::ValidatorOptions())) {
+ return false;
+ }
}
- }
- return true;
+ return true;
#else
- (void)program;
- (void)options;
- std::cerr << "SPIR-V writer not enabled in tint build" << std::endl;
- return false;
+ (void)program;
+ (void)options;
+ std::cerr << "SPIR-V writer not enabled in tint build" << std::endl;
+ return false;
#endif // TINT_BUILD_SPV_WRITER
}
@@ -616,37 +670,36 @@ bool GenerateSpirv(const tint::Program* program, const Options& options) {
/// @returns true on success
bool GenerateWgsl(const tint::Program* program, const Options& options) {
#if TINT_BUILD_WGSL_WRITER
- // TODO(jrprice): Provide a way for the user to set non-default options.
- tint::writer::wgsl::Options gen_options;
- auto result = tint::writer::wgsl::Generate(program, gen_options);
- if (!result.success) {
- std::cerr << "Failed to generate: " << result.error << std::endl;
- return false;
- }
+ // TODO(jrprice): Provide a way for the user to set non-default options.
+ tint::writer::wgsl::Options gen_options;
+ auto result = tint::writer::wgsl::Generate(program, gen_options);
+ if (!result.success) {
+ std::cerr << "Failed to generate: " << result.error << std::endl;
+ return false;
+ }
- if (!WriteFile(options.output_file, "w", result.wgsl)) {
- return false;
- }
-
- if (options.validate) {
- // Attempt to re-parse the output program with Tint's WGSL reader.
- auto source = std::make_unique<tint::Source::File>(options.input_filename,
- result.wgsl);
- auto reparsed_program = tint::reader::wgsl::Parse(source.get());
- if (!reparsed_program.IsValid()) {
- auto diag_printer = tint::diag::Printer::create(stderr, true);
- tint::diag::Formatter diag_formatter;
- diag_formatter.format(reparsed_program.Diagnostics(), diag_printer.get());
- return false;
+ if (!WriteFile(options.output_file, "w", result.wgsl)) {
+ return false;
}
- }
- return true;
+ if (options.validate) {
+ // Attempt to re-parse the output program with Tint's WGSL reader.
+ auto source = std::make_unique<tint::Source::File>(options.input_filename, result.wgsl);
+ auto reparsed_program = tint::reader::wgsl::Parse(source.get());
+ if (!reparsed_program.IsValid()) {
+ auto diag_printer = tint::diag::Printer::create(stderr, true);
+ tint::diag::Formatter diag_formatter;
+ diag_formatter.format(reparsed_program.Diagnostics(), diag_printer.get());
+ return false;
+ }
+ }
+
+ return true;
#else
- (void)program;
- (void)options;
- std::cerr << "WGSL writer not enabled in tint build" << std::endl;
- return false;
+ (void)program;
+ (void)options;
+ std::cerr << "WGSL writer not enabled in tint build" << std::endl;
+ return false;
#endif // TINT_BUILD_WGSL_WRITER
}
@@ -656,111 +709,60 @@ bool GenerateWgsl(const tint::Program* program, const Options& options) {
/// @returns true on success
bool GenerateMsl(const tint::Program* program, const Options& options) {
#if TINT_BUILD_MSL_WRITER
- const tint::Program* input_program = program;
-
- // Remap resource numbers to a flat namespace.
- // TODO(crbug.com/tint/1101): Make this more robust for multiple entry points.
- using BindingPoint = tint::transform::BindingPoint;
- tint::transform::BindingRemapper::BindingPoints binding_points;
- uint32_t next_buffer_idx = 0;
- uint32_t next_sampler_idx = 0;
- uint32_t next_texture_idx = 0;
-
- tint::inspector::Inspector inspector(program);
- auto entry_points = inspector.GetEntryPoints();
- for (auto& entry_point : entry_points) {
- auto bindings = inspector.GetResourceBindings(entry_point.name);
- for (auto& binding : bindings) {
- BindingPoint src = {binding.bind_group, binding.binding};
- if (binding_points.count(src)) {
- continue;
- }
- switch (binding.resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- case tint::inspector::ResourceBinding::ResourceType::
- kReadOnlyStorageBuffer:
- binding_points.emplace(src, BindingPoint{0, next_buffer_idx++});
- break;
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
- binding_points.emplace(src, BindingPoint{0, next_sampler_idx++});
- break;
- case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::
- kMultisampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::
- kWriteOnlyStorageTexture:
- case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
- case tint::inspector::ResourceBinding::ResourceType::
- kDepthMultisampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
- binding_points.emplace(src, BindingPoint{0, next_texture_idx++});
- break;
- }
+ // Remap resource numbers to a flat namespace.
+ // TODO(crbug.com/tint/1501): Do this via Options::BindingMap.
+ const tint::Program* input_program = program;
+ auto flattened = tint::writer::FlattenBindings(program);
+ if (flattened) {
+ input_program = &*flattened;
}
- }
-
- // Run the binding remapper transform.
- tint::transform::Output transform_output;
- if (!binding_points.empty()) {
- tint::transform::Manager manager;
- tint::transform::DataMap inputs;
- inputs.Add<tint::transform::BindingRemapper::Remappings>(
- std::move(binding_points),
- tint::transform::BindingRemapper::AccessControls{},
- /* mayCollide */ true);
- manager.Add<tint::transform::BindingRemapper>();
- transform_output = manager.Run(program, inputs);
- input_program = &transform_output.program;
- }
-
- // TODO(jrprice): Provide a way for the user to set non-default options.
- tint::writer::msl::Options gen_options;
- gen_options.disable_workgroup_init = options.disable_workgroup_init;
- gen_options.generate_external_texture_bindings = true;
- auto result = tint::writer::msl::Generate(input_program, gen_options);
- if (!result.success) {
- PrintWGSL(std::cerr, *program);
- std::cerr << "Failed to generate: " << result.error << std::endl;
- return false;
- }
- if (!WriteFile(options.output_file, "w", result.msl)) {
- return false;
- }
+ // TODO(jrprice): Provide a way for the user to set non-default options.
+ tint::writer::msl::Options gen_options;
+ gen_options.disable_workgroup_init = options.disable_workgroup_init;
+ gen_options.generate_external_texture_bindings = true;
+ auto result = tint::writer::msl::Generate(input_program, gen_options);
+ if (!result.success) {
+ PrintWGSL(std::cerr, *program);
+ std::cerr << "Failed to generate: " << result.error << std::endl;
+ return false;
+ }
- if (options.validate) {
- tint::val::Result res;
+ if (!WriteFile(options.output_file, "w", result.msl)) {
+ return false;
+ }
+
+ if (options.validate) {
+ tint::val::Result res;
#ifdef TINT_ENABLE_MSL_VALIDATION_USING_METAL_API
- res = tint::val::MslUsingMetalAPI(result.msl);
+ res = tint::val::MslUsingMetalAPI(result.msl);
#else
#ifdef _WIN32
- const char* default_xcrun_exe = "metal.exe";
+ const char* default_xcrun_exe = "metal.exe";
#else
- const char* default_xcrun_exe = "xcrun";
+ const char* default_xcrun_exe = "xcrun";
#endif
- auto xcrun = tint::utils::Command::LookPath(
- options.xcrun_path.empty() ? default_xcrun_exe : options.xcrun_path);
- if (xcrun.Found()) {
- res = tint::val::Msl(xcrun.Path(), result.msl);
- } else {
- res.output = "xcrun executable not found. Cannot validate.";
- res.failed = true;
- }
+ auto xcrun = tint::utils::Command::LookPath(
+ options.xcrun_path.empty() ? default_xcrun_exe : options.xcrun_path);
+ if (xcrun.Found()) {
+ res = tint::val::Msl(xcrun.Path(), result.msl);
+ } else {
+ res.output = "xcrun executable not found. Cannot validate.";
+ res.failed = true;
+ }
#endif // TINT_ENABLE_MSL_VALIDATION_USING_METAL_API
- if (res.failed) {
- std::cerr << res.output << std::endl;
- return false;
+ if (res.failed) {
+ std::cerr << res.output << std::endl;
+ return false;
+ }
}
- }
- return true;
+ return true;
#else
- (void)program;
- (void)options;
- std::cerr << "MSL writer not enabled in tint build" << std::endl;
- return false;
+ (void)program;
+ (void)options;
+ std::cerr << "MSL writer not enabled in tint build" << std::endl;
+ return false;
#endif // TINT_BUILD_MSL_WRITER
}
@@ -770,69 +772,70 @@ bool GenerateMsl(const tint::Program* program, const Options& options) {
/// @returns true on success
bool GenerateHlsl(const tint::Program* program, const Options& options) {
#if TINT_BUILD_HLSL_WRITER
- // TODO(jrprice): Provide a way for the user to set non-default options.
- tint::writer::hlsl::Options gen_options;
- gen_options.disable_workgroup_init = options.disable_workgroup_init;
- gen_options.generate_external_texture_bindings = true;
- auto result = tint::writer::hlsl::Generate(program, gen_options);
- if (!result.success) {
- PrintWGSL(std::cerr, *program);
- std::cerr << "Failed to generate: " << result.error << std::endl;
- return false;
- }
+ // TODO(jrprice): Provide a way for the user to set non-default options.
+ tint::writer::hlsl::Options gen_options;
+ gen_options.disable_workgroup_init = options.disable_workgroup_init;
+ gen_options.generate_external_texture_bindings = true;
+ gen_options.root_constant_binding_point = options.hlsl_root_constant_binding_point;
+ auto result = tint::writer::hlsl::Generate(program, gen_options);
+ if (!result.success) {
+ PrintWGSL(std::cerr, *program);
+ std::cerr << "Failed to generate: " << result.error << std::endl;
+ return false;
+ }
- if (!WriteFile(options.output_file, "w", result.hlsl)) {
- return false;
- }
+ if (!WriteFile(options.output_file, "w", result.hlsl)) {
+ return false;
+ }
- if (options.validate) {
- tint::val::Result res;
- if (options.use_fxc) {
+ if (options.validate) {
+ tint::val::Result res;
+ if (options.use_fxc) {
#ifdef _WIN32
- res = tint::val::HlslUsingFXC(result.hlsl, result.entry_points);
+ res = tint::val::HlslUsingFXC(result.hlsl, result.entry_points, options.overrides);
#else
- res.failed = true;
- res.output = "FXC can only be used on Windows. Sorry :X";
+ res.failed = true;
+ res.output = "FXC can only be used on Windows. Sorry :X";
#endif // _WIN32
- } else {
- auto dxc = tint::utils::Command::LookPath(
- options.dxc_path.empty() ? "dxc" : options.dxc_path);
- if (dxc.Found()) {
- res = tint::val::HlslUsingDXC(dxc.Path(), result.hlsl,
- result.entry_points);
- } else {
- res.failed = true;
- res.output = "DXC executable not found. Cannot validate";
- }
- }
- if (res.failed) {
- std::cerr << res.output << std::endl;
- return false;
+ } else {
+ auto dxc =
+ tint::utils::Command::LookPath(options.dxc_path.empty() ? "dxc" : options.dxc_path);
+ if (dxc.Found()) {
+ res = tint::val::HlslUsingDXC(dxc.Path(), result.hlsl, result.entry_points,
+ options.overrides);
+ } else {
+ res.failed = true;
+ res.output = "DXC executable not found. Cannot validate";
+ }
+ }
+ if (res.failed) {
+ std::cerr << res.output << std::endl;
+ return false;
+ }
}
- }
- return true;
+ return true;
#else
- (void)program;
- (void)options;
- std::cerr << "HLSL writer not enabled in tint build" << std::endl;
- return false;
+ (void)program;
+ (void)options;
+ std::cerr << "HLSL writer not enabled in tint build" << std::endl;
+ return false;
#endif // TINT_BUILD_HLSL_WRITER
}
#if TINT_BUILD_GLSL_WRITER
EShLanguage pipeline_stage_to_esh_language(tint::ast::PipelineStage stage) {
- switch (stage) {
- case tint::ast::PipelineStage::kFragment:
- return EShLangFragment;
- case tint::ast::PipelineStage::kVertex:
- return EShLangVertex;
- case tint::ast::PipelineStage::kCompute:
- return EShLangCompute;
- default:
- TINT_ASSERT(AST, false);
- return EShLangVertex;
- }
+ switch (stage) {
+ case tint::ast::PipelineStage::kFragment:
+ return EShLangFragment;
+ case tint::ast::PipelineStage::kVertex:
+ return EShLangVertex;
+ case tint::ast::PipelineStage::kCompute:
+ return EShLangCompute;
+ default:
+ TINT_ASSERT(AST, false);
+ return EShLangVertex;
+ }
}
#endif
@@ -842,379 +845,359 @@ EShLanguage pipeline_stage_to_esh_language(tint::ast::PipelineStage stage) {
/// @returns true on success
bool GenerateGlsl(const tint::Program* program, const Options& options) {
#if TINT_BUILD_GLSL_WRITER
- if (options.validate) {
- glslang::InitializeProcess();
- }
-
- auto generate = [&](const tint::Program* prg,
- const std::string entry_point_name) -> bool {
- tint::writer::glsl::Options gen_options;
- gen_options.generate_external_texture_bindings = true;
- auto result =
- tint::writer::glsl::Generate(prg, gen_options, entry_point_name);
- if (!result.success) {
- PrintWGSL(std::cerr, *prg);
- std::cerr << "Failed to generate: " << result.error << std::endl;
- return false;
+ if (options.validate) {
+ glslang::InitializeProcess();
}
- if (!WriteFile(options.output_file, "w", result.glsl)) {
- return false;
- }
+ auto generate = [&](const tint::Program* prg, const std::string entry_point_name) -> bool {
+ tint::writer::glsl::Options gen_options;
+ gen_options.generate_external_texture_bindings = true;
+ auto result = tint::writer::glsl::Generate(prg, gen_options, entry_point_name);
+ if (!result.success) {
+ PrintWGSL(std::cerr, *prg);
+ std::cerr << "Failed to generate: " << result.error << std::endl;
+ return false;
+ }
- if (options.validate) {
- for (auto entry_pt : result.entry_points) {
- EShLanguage lang = pipeline_stage_to_esh_language(entry_pt.second);
- glslang::TShader shader(lang);
- const char* strings[1] = {result.glsl.c_str()};
- int lengths[1] = {static_cast<int>(result.glsl.length())};
- shader.setStringsWithLengths(strings, lengths, 1);
- shader.setEntryPoint("main");
- bool glslang_result =
- shader.parse(&glslang::DefaultTBuiltInResource, 310, EEsProfile,
- false, false, EShMsgDefault);
- if (!glslang_result) {
- std::cerr << "Error parsing GLSL shader:\n"
- << shader.getInfoLog() << "\n"
- << shader.getInfoDebugLog() << "\n";
- return false;
+ if (!WriteFile(options.output_file, "w", result.glsl)) {
+ return false;
}
- }
- }
- return true;
- };
- tint::inspector::Inspector inspector(program);
+ if (options.validate) {
+ for (auto entry_pt : result.entry_points) {
+ EShLanguage lang = pipeline_stage_to_esh_language(entry_pt.second);
+ glslang::TShader shader(lang);
+ const char* strings[1] = {result.glsl.c_str()};
+ int lengths[1] = {static_cast<int>(result.glsl.length())};
+ shader.setStringsWithLengths(strings, lengths, 1);
+ shader.setEntryPoint("main");
+ bool glslang_result = shader.parse(&glslang::DefaultTBuiltInResource, 310,
+ EEsProfile, false, false, EShMsgDefault);
+ if (!glslang_result) {
+ std::cerr << "Error parsing GLSL shader:\n"
+ << shader.getInfoLog() << "\n"
+ << shader.getInfoDebugLog() << "\n";
+ return false;
+ }
+ }
+ }
+ return true;
+ };
+
+ tint::inspector::Inspector inspector(program);
- if (inspector.GetEntryPoints().empty()) {
- // Pass empty string here so that the GLSL generator will generate
- // code for all functions, reachable or not.
- return generate(program, "");
- }
+ if (inspector.GetEntryPoints().empty()) {
+ // Pass empty string here so that the GLSL generator will generate
+ // code for all functions, reachable or not.
+ return generate(program, "");
+ }
- bool success = true;
- for (auto& entry_point : inspector.GetEntryPoints()) {
- success &= generate(program, entry_point.name);
- }
- return success;
+ bool success = true;
+ for (auto& entry_point : inspector.GetEntryPoints()) {
+ success &= generate(program, entry_point.name);
+ }
+ return success;
#else
- (void)program;
- (void)options;
- std::cerr << "GLSL writer not enabled in tint build" << std::endl;
- return false;
+ (void)program;
+ (void)options;
+ std::cerr << "GLSL writer not enabled in tint build" << std::endl;
+ return false;
#endif // TINT_BUILD_GLSL_WRITER
}
} // namespace
int main(int argc, const char** argv) {
- std::vector<std::string> args(argv, argv + argc);
- Options options;
+ std::vector<std::string> args(argv, argv + argc);
+ Options options;
- tint::SetInternalCompilerErrorReporter(&TintInternalCompilerErrorReporter);
+ tint::SetInternalCompilerErrorReporter(&TintInternalCompilerErrorReporter);
#if TINT_BUILD_WGSL_WRITER
- tint::Program::printer = [](const tint::Program* program) {
- auto result = tint::writer::wgsl::Generate(program, {});
- if (!result.error.empty()) {
- return "error: " + result.error;
- }
- return result.wgsl;
- };
+ tint::Program::printer = [](const tint::Program* program) {
+ auto result = tint::writer::wgsl::Generate(program, {});
+ if (!result.error.empty()) {
+ return "error: " + result.error;
+ }
+ return result.wgsl;
+ };
#endif // TINT_BUILD_WGSL_WRITER
- if (!ParseArgs(args, &options)) {
- std::cerr << "Failed to parse arguments." << std::endl;
- return 1;
- }
-
- struct TransformFactory {
- const char* name;
- std::function<void(tint::transform::Manager& manager,
- tint::transform::DataMap& inputs)>
- make;
- };
- std::vector<TransformFactory> transforms = {
- {"first_index_offset",
- [](tint::transform::Manager& m, tint::transform::DataMap& i) {
- i.Add<tint::transform::FirstIndexOffset::BindingPoint>(0, 0);
- m.Add<tint::transform::FirstIndexOffset>();
- }},
- {"fold_trivial_single_use_lets",
- [](tint::transform::Manager& m, tint::transform::DataMap&) {
- m.Add<tint::transform::FoldTrivialSingleUseLets>();
- }},
- {"renamer",
- [](tint::transform::Manager& m, tint::transform::DataMap&) {
- m.Add<tint::transform::Renamer>();
- }},
- {"robustness",
- [](tint::transform::Manager& m, tint::transform::DataMap&) {
- m.Add<tint::transform::Robustness>();
- }},
- };
- auto transform_names = [&] {
- std::stringstream names;
- for (auto& t : transforms) {
- names << " " << t.name << std::endl;
+ if (!ParseArgs(args, &options)) {
+ std::cerr << "Failed to parse arguments." << std::endl;
+ return 1;
}
- return names.str();
- };
- if (options.show_help) {
- std::string usage =
- tint::utils::ReplaceAll(kUsage, "${transforms}", transform_names());
- std::cout << usage << std::endl;
- return 0;
- }
-
- // Implement output format defaults.
- if (options.format == Format::kNone) {
- // Try inferring from filename.
- options.format = infer_format(options.output_file);
- }
- if (options.format == Format::kNone) {
- // Ultimately, default to SPIR-V assembly. That's nice for interactive use.
- options.format = Format::kSpvAsm;
- }
-
- auto diag_printer = tint::diag::Printer::create(stderr, true);
- tint::diag::Formatter diag_formatter;
-
- std::unique_ptr<tint::Program> program;
- std::unique_ptr<tint::Source::File> source_file;
-
- enum class InputFormat {
- kUnknown,
- kWgsl,
- kSpirvBin,
- kSpirvAsm,
- };
- auto input_format = InputFormat::kUnknown;
-
- if (options.input_filename.size() > 5 &&
- options.input_filename.substr(options.input_filename.size() - 5) ==
- ".wgsl") {
- input_format = InputFormat::kWgsl;
- } else if (options.input_filename.size() > 4 &&
- options.input_filename.substr(options.input_filename.size() - 4) ==
- ".spv") {
- input_format = InputFormat::kSpirvBin;
- } else if (options.input_filename.size() > 7 &&
- options.input_filename.substr(options.input_filename.size() - 7) ==
- ".spvasm") {
- input_format = InputFormat::kSpirvAsm;
- }
-
- switch (input_format) {
- case InputFormat::kUnknown: {
- std::cerr << "Unknown input format" << std::endl;
- return 1;
+ struct TransformFactory {
+ const char* name;
+ std::function<void(tint::transform::Manager& manager, tint::transform::DataMap& inputs)>
+ make;
+ };
+ std::vector<TransformFactory> transforms = {
+ {"first_index_offset",
+ [](tint::transform::Manager& m, tint::transform::DataMap& i) {
+ i.Add<tint::transform::FirstIndexOffset::BindingPoint>(0, 0);
+ m.Add<tint::transform::FirstIndexOffset>();
+ }},
+ {"fold_trivial_single_use_lets",
+ [](tint::transform::Manager& m, tint::transform::DataMap&) {
+ m.Add<tint::transform::FoldTrivialSingleUseLets>();
+ }},
+ {"renamer", [](tint::transform::Manager& m,
+ tint::transform::DataMap&) { m.Add<tint::transform::Renamer>(); }},
+ {"robustness", [](tint::transform::Manager& m,
+ tint::transform::DataMap&) { m.Add<tint::transform::Robustness>(); }},
+ };
+ auto transform_names = [&] {
+ std::stringstream names;
+ for (auto& t : transforms) {
+ names << " " << t.name << std::endl;
+ }
+ return names.str();
+ };
+
+ if (options.show_help) {
+ std::string usage = tint::utils::ReplaceAll(kUsage, "${transforms}", transform_names());
+ std::cout << usage << std::endl;
+ return 0;
}
- case InputFormat::kWgsl: {
+
+ // Implement output format defaults.
+ if (options.format == Format::kNone) {
+ // Try inferring from filename.
+ options.format = infer_format(options.output_file);
+ }
+ if (options.format == Format::kNone) {
+ // Ultimately, default to SPIR-V assembly. That's nice for interactive use.
+ options.format = Format::kSpvAsm;
+ }
+
+ auto diag_printer = tint::diag::Printer::create(stderr, true);
+ tint::diag::Formatter diag_formatter;
+
+ std::unique_ptr<tint::Program> program;
+ std::unique_ptr<tint::Source::File> source_file;
+
+ enum class InputFormat {
+ kUnknown,
+ kWgsl,
+ kSpirvBin,
+ kSpirvAsm,
+ };
+ auto input_format = InputFormat::kUnknown;
+
+ if (options.input_filename.size() > 5 &&
+ options.input_filename.substr(options.input_filename.size() - 5) == ".wgsl") {
+ input_format = InputFormat::kWgsl;
+ } else if (options.input_filename.size() > 4 &&
+ options.input_filename.substr(options.input_filename.size() - 4) == ".spv") {
+ input_format = InputFormat::kSpirvBin;
+ } else if (options.input_filename.size() > 7 &&
+ options.input_filename.substr(options.input_filename.size() - 7) == ".spvasm") {
+ input_format = InputFormat::kSpirvAsm;
+ }
+
+ switch (input_format) {
+ case InputFormat::kUnknown: {
+ std::cerr << "Unknown input format" << std::endl;
+ return 1;
+ }
+ case InputFormat::kWgsl: {
#if TINT_BUILD_WGSL_READER
- std::vector<uint8_t> data;
- if (!ReadFile<uint8_t>(options.input_filename, &data)) {
- return 1;
- }
- source_file = std::make_unique<tint::Source::File>(
- options.input_filename, std::string(data.begin(), data.end()));
- program = std::make_unique<tint::Program>(
- tint::reader::wgsl::Parse(source_file.get()));
- break;
+ std::vector<uint8_t> data;
+ if (!ReadFile<uint8_t>(options.input_filename, &data)) {
+ return 1;
+ }
+ source_file = std::make_unique<tint::Source::File>(
+ options.input_filename, std::string(data.begin(), data.end()));
+ program = std::make_unique<tint::Program>(tint::reader::wgsl::Parse(source_file.get()));
+ break;
#else
- std::cerr << "Tint not built with the WGSL reader enabled" << std::endl;
- return 1;
+ std::cerr << "Tint not built with the WGSL reader enabled" << std::endl;
+ return 1;
#endif // TINT_BUILD_WGSL_READER
- }
- case InputFormat::kSpirvBin: {
+ }
+ case InputFormat::kSpirvBin: {
#if TINT_BUILD_SPV_READER
- std::vector<uint32_t> data;
- if (!ReadFile<uint32_t>(options.input_filename, &data)) {
- return 1;
- }
- program =
- std::make_unique<tint::Program>(tint::reader::spirv::Parse(data));
- break;
+ std::vector<uint32_t> data;
+ if (!ReadFile<uint32_t>(options.input_filename, &data)) {
+ return 1;
+ }
+ program = std::make_unique<tint::Program>(tint::reader::spirv::Parse(data));
+ break;
#else
- std::cerr << "Tint not built with the SPIR-V reader enabled" << std::endl;
- return 1;
+ std::cerr << "Tint not built with the SPIR-V reader enabled" << std::endl;
+ return 1;
#endif // TINT_BUILD_SPV_READER
- }
- case InputFormat::kSpirvAsm: {
+ }
+ case InputFormat::kSpirvAsm: {
#if TINT_BUILD_SPV_READER
- std::vector<char> text;
- if (!ReadFile<char>(options.input_filename, &text)) {
- return 1;
- }
- // Use Vulkan 1.1, since this is what Tint, internally, is expecting.
- spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_1);
- tools.SetMessageConsumer([](spv_message_level_t, const char*,
- const spv_position_t& pos, const char* msg) {
- std::cerr << (pos.line + 1) << ":" << (pos.column + 1) << ": " << msg
- << std::endl;
- });
- std::vector<uint32_t> data;
- if (!tools.Assemble(text.data(), text.size(), &data,
- SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS)) {
- return 1;
- }
- program =
- std::make_unique<tint::Program>(tint::reader::spirv::Parse(data));
- break;
+ std::vector<char> text;
+ if (!ReadFile<char>(options.input_filename, &text)) {
+ return 1;
+ }
+ // Use Vulkan 1.1, since this is what Tint, internally, is expecting.
+ spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_1);
+ tools.SetMessageConsumer([](spv_message_level_t, const char*, const spv_position_t& pos,
+ const char* msg) {
+ std::cerr << (pos.line + 1) << ":" << (pos.column + 1) << ": " << msg << std::endl;
+ });
+ std::vector<uint32_t> data;
+ if (!tools.Assemble(text.data(), text.size(), &data,
+ SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS)) {
+ return 1;
+ }
+ program = std::make_unique<tint::Program>(tint::reader::spirv::Parse(data));
+ break;
#else
- std::cerr << "Tint not built with the SPIR-V reader enabled" << std::endl;
- return 1;
+ std::cerr << "Tint not built with the SPIR-V reader enabled" << std::endl;
+ return 1;
#endif // TINT_BUILD_SPV_READER
+ }
+ }
+
+ if (!program) {
+ std::cerr << "Failed to parse input file: " << options.input_filename << std::endl;
+ return 1;
}
- }
-
- if (!program) {
- std::cerr << "Failed to parse input file: " << options.input_filename
- << std::endl;
- return 1;
- }
- if (program->Diagnostics().count() > 0) {
- if (!program->IsValid() && input_format != InputFormat::kWgsl) {
- // Invalid program from a non-wgsl source. Print the WGSL, to help
- // understand the diagnostics.
- PrintWGSL(std::cout, *program);
+ if (program->Diagnostics().count() > 0) {
+ if (!program->IsValid() && input_format != InputFormat::kWgsl) {
+ // Invalid program from a non-wgsl source. Print the WGSL, to help
+ // understand the diagnostics.
+ PrintWGSL(std::cout, *program);
+ }
+ diag_formatter.format(program->Diagnostics(), diag_printer.get());
}
- diag_formatter.format(program->Diagnostics(), diag_printer.get());
- }
-
- if (!program->IsValid()) {
- return 1;
- }
- if (options.parse_only) {
- return 1;
- }
-
- tint::transform::Manager transform_manager;
- tint::transform::DataMap transform_inputs;
- for (const auto& name : options.transforms) {
- // TODO(dsinclair): The vertex pulling transform requires setup code to
- // be run that needs user input. Should we find a way to support that here
- // maybe through a provided file?
-
- bool found = false;
- for (auto& t : transforms) {
- if (t.name == name) {
- t.make(transform_manager, transform_inputs);
- found = true;
- break;
- }
+
+ if (!program->IsValid()) {
+ return 1;
}
- if (!found) {
- std::cerr << "Unknown transform: " << name << std::endl;
- std::cerr << "Available transforms: " << std::endl << transform_names();
- return 1;
+ if (options.parse_only) {
+ return 1;
}
- }
- if (options.emit_single_entry_point) {
- transform_manager.append(
- std::make_unique<tint::transform::SingleEntryPoint>());
- transform_inputs.Add<tint::transform::SingleEntryPoint::Config>(
- options.ep_name);
- }
+ tint::transform::Manager transform_manager;
+ tint::transform::DataMap transform_inputs;
+ for (const auto& name : options.transforms) {
+ // TODO(dsinclair): The vertex pulling transform requires setup code to
+ // be run that needs user input. Should we find a way to support that here
+ // maybe through a provided file?
+
+ bool found = false;
+ for (auto& t : transforms) {
+ if (t.name == name) {
+ t.make(transform_manager, transform_inputs);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ std::cerr << "Unknown transform: " << name << std::endl;
+ std::cerr << "Available transforms: " << std::endl << transform_names();
+ return 1;
+ }
+ }
- switch (options.format) {
- case Format::kMsl: {
+ if (options.emit_single_entry_point) {
+ transform_manager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+ transform_inputs.Add<tint::transform::SingleEntryPoint::Config>(options.ep_name);
+ }
+
+ switch (options.format) {
+ case Format::kMsl: {
#if TINT_BUILD_MSL_WRITER
- transform_inputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kMslKeywords,
- /* preserve_unicode */ false);
- transform_manager.Add<tint::transform::Renamer>();
+ transform_inputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kMslKeywords,
+ /* preserve_unicode */ false);
+ transform_manager.Add<tint::transform::Renamer>();
#endif // TINT_BUILD_MSL_WRITER
- break;
- }
+ break;
+ }
#if TINT_BUILD_GLSL_WRITER
- case Format::kGlsl: {
- break;
- }
+ case Format::kGlsl: {
+ break;
+ }
#endif // TINT_BUILD_GLSL_WRITER
- case Format::kHlsl: {
+ case Format::kHlsl: {
#if TINT_BUILD_HLSL_WRITER
- transform_inputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kHlslKeywords,
- /* preserve_unicode */ false);
- transform_manager.Add<tint::transform::Renamer>();
+ transform_inputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kHlslKeywords,
+ /* preserve_unicode */ false);
+ transform_manager.Add<tint::transform::Renamer>();
#endif // TINT_BUILD_HLSL_WRITER
- break;
+ break;
+ }
+ default:
+ break;
}
- default:
- break;
- }
-
- auto out = transform_manager.Run(program.get(), std::move(transform_inputs));
- if (!out.program.IsValid()) {
- PrintWGSL(std::cerr, out.program);
- diag_formatter.format(out.program.Diagnostics(), diag_printer.get());
- return 1;
- }
-
- *program = std::move(out.program);
-
- if (options.dump_inspector_bindings) {
- std::cout << std::string(80, '-') << std::endl;
- tint::inspector::Inspector inspector(program.get());
- auto entry_points = inspector.GetEntryPoints();
- if (!inspector.error().empty()) {
- std::cerr << "Failed to get entry points from Inspector: "
- << inspector.error() << std::endl;
- return 1;
+
+ auto out = transform_manager.Run(program.get(), std::move(transform_inputs));
+ if (!out.program.IsValid()) {
+ PrintWGSL(std::cerr, out.program);
+ diag_formatter.format(out.program.Diagnostics(), diag_printer.get());
+ return 1;
}
- for (auto& entry_point : entry_points) {
- auto bindings = inspector.GetResourceBindings(entry_point.name);
- if (!inspector.error().empty()) {
- std::cerr << "Failed to get bindings from Inspector: "
- << inspector.error() << std::endl;
+ *program = std::move(out.program);
+
+ if (options.dump_inspector_bindings) {
+ std::cout << std::string(80, '-') << std::endl;
+ tint::inspector::Inspector inspector(program.get());
+ auto entry_points = inspector.GetEntryPoints();
+ if (!inspector.error().empty()) {
+ std::cerr << "Failed to get entry points from Inspector: " << inspector.error()
+ << std::endl;
+ return 1;
+ }
+
+ for (auto& entry_point : entry_points) {
+ auto bindings = inspector.GetResourceBindings(entry_point.name);
+ if (!inspector.error().empty()) {
+ std::cerr << "Failed to get bindings from Inspector: " << inspector.error()
+ << std::endl;
+ return 1;
+ }
+ std::cout << "Entry Point = " << entry_point.name << std::endl;
+ for (auto& binding : bindings) {
+ std::cout << "\t[" << binding.bind_group << "][" << binding.binding
+ << "]:" << std::endl;
+ std::cout << "\t\t resource_type = " << ResourceTypeToString(binding.resource_type)
+ << std::endl;
+ std::cout << "\t\t dim = " << TextureDimensionToString(binding.dim) << std::endl;
+ std::cout << "\t\t sampled_kind = " << SampledKindToString(binding.sampled_kind)
+ << std::endl;
+ std::cout << "\t\t image_format = " << TexelFormatToString(binding.image_format)
+ << std::endl;
+ }
+ }
+ std::cout << std::string(80, '-') << std::endl;
+ }
+
+ bool success = false;
+ switch (options.format) {
+ case Format::kSpirv:
+ case Format::kSpvAsm:
+ success = GenerateSpirv(program.get(), options);
+ break;
+ case Format::kWgsl:
+ success = GenerateWgsl(program.get(), options);
+ break;
+ case Format::kMsl:
+ success = GenerateMsl(program.get(), options);
+ break;
+ case Format::kHlsl:
+ success = GenerateHlsl(program.get(), options);
+ break;
+ case Format::kGlsl:
+ success = GenerateGlsl(program.get(), options);
+ break;
+ default:
+ std::cerr << "Unknown output format specified" << std::endl;
+ return 1;
+ }
+ if (!success) {
return 1;
- }
- std::cout << "Entry Point = " << entry_point.name << std::endl;
- for (auto& binding : bindings) {
- std::cout << "\t[" << binding.bind_group << "][" << binding.binding
- << "]:" << std::endl;
- std::cout << "\t\t resource_type = "
- << ResourceTypeToString(binding.resource_type) << std::endl;
- std::cout << "\t\t dim = " << TextureDimensionToString(binding.dim)
- << std::endl;
- std::cout << "\t\t sampled_kind = "
- << SampledKindToString(binding.sampled_kind) << std::endl;
- std::cout << "\t\t image_format = "
- << TexelFormatToString(binding.image_format) << std::endl;
- }
}
- std::cout << std::string(80, '-') << std::endl;
- }
-
- bool success = false;
- switch (options.format) {
- case Format::kSpirv:
- case Format::kSpvAsm:
- success = GenerateSpirv(program.get(), options);
- break;
- case Format::kWgsl:
- success = GenerateWgsl(program.get(), options);
- break;
- case Format::kMsl:
- success = GenerateMsl(program.get(), options);
- break;
- case Format::kHlsl:
- success = GenerateHlsl(program.get(), options);
- break;
- case Format::kGlsl:
- success = GenerateGlsl(program.get(), options);
- break;
- default:
- std::cerr << "Unknown output format specified" << std::endl;
- return 1;
- }
- if (!success) {
- return 1;
- }
-
- return 0;
+
+ return 0;
}
diff --git a/chromium/third_party/dawn/src/tint/debug.cc b/chromium/third_party/dawn/src/tint/debug.cc
index c51cf3f390b..72da2935e07 100644
--- a/chromium/third_party/dawn/src/tint/debug.cc
+++ b/chromium/third_party/dawn/src/tint/debug.cc
@@ -26,7 +26,7 @@ InternalCompilerErrorReporter* ice_reporter = nullptr;
} // namespace
void SetInternalCompilerErrorReporter(InternalCompilerErrorReporter* reporter) {
- ice_reporter = reporter;
+ ice_reporter = reporter;
}
InternalCompilerError::InternalCompilerError(const char* file,
@@ -36,15 +36,15 @@ InternalCompilerError::InternalCompilerError(const char* file,
: file_(file), line_(line), system_(system), diagnostics_(diagnostics) {}
InternalCompilerError::~InternalCompilerError() {
- auto file = std::make_shared<Source::File>(file_, "");
- Source source{Source::Range{{line_}}, file.get()};
- diagnostics_.add_ice(system_, msg_.str(), source, std::move(file));
+ auto file = std::make_shared<Source::File>(file_, "");
+ Source source{Source::Range{{line_}}, file.get()};
+ diagnostics_.add_ice(system_, msg_.str(), source, std::move(file));
- if (ice_reporter) {
- ice_reporter(diagnostics_);
- }
+ if (ice_reporter) {
+ ice_reporter(diagnostics_);
+ }
- debugger::Break();
+ debugger::Break();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/debug.h b/chromium/third_party/dawn/src/tint/debug.h
index 90e6b66f7b1..86186acaff5 100644
--- a/chromium/third_party/dawn/src/tint/debug.h
+++ b/chromium/third_party/dawn/src/tint/debug.h
@@ -40,37 +40,37 @@ void SetInternalCompilerErrorReporter(InternalCompilerErrorReporter* reporter);
/// InternalCompilerErrorReporter is set, then it is called with the diagnostic
/// list.
class InternalCompilerError {
- public:
- /// Constructor
- /// @param file the file containing the ICE
- /// @param line the line containing the ICE
- /// @param system the Tint system that has raised the ICE
- /// @param diagnostics the list of diagnostics to append the ICE message to
- InternalCompilerError(const char* file,
- size_t line,
- diag::System system,
- diag::List& diagnostics);
-
- /// Destructor.
- /// Adds the internal compiler error message to the diagnostics list, and then
- /// calls the InternalCompilerErrorReporter if one is set.
- ~InternalCompilerError();
-
- /// Appends `arg` to the ICE message.
- /// @param arg the argument to append to the ICE message
- /// @returns this object so calls can be chained
- template <typename T>
- InternalCompilerError& operator<<(T&& arg) {
- msg_ << std::forward<T>(arg);
- return *this;
- }
-
- private:
- char const* const file_;
- const size_t line_;
- diag::System system_;
- diag::List& diagnostics_;
- std::stringstream msg_;
+ public:
+ /// Constructor
+ /// @param file the file containing the ICE
+ /// @param line the line containing the ICE
+ /// @param system the Tint system that has raised the ICE
+ /// @param diagnostics the list of diagnostics to append the ICE message to
+ InternalCompilerError(const char* file,
+ size_t line,
+ diag::System system,
+ diag::List& diagnostics);
+
+ /// Destructor.
+ /// Adds the internal compiler error message to the diagnostics list, and then
+ /// calls the InternalCompilerErrorReporter if one is set.
+ ~InternalCompilerError();
+
+ /// Appends `arg` to the ICE message.
+ /// @param arg the argument to append to the ICE message
+ /// @returns this object so calls can be chained
+ template <typename T>
+ InternalCompilerError& operator<<(T&& arg) {
+ msg_ << std::forward<T>(arg);
+ return *this;
+ }
+
+ private:
+ char const* const file_;
+ const size_t line_;
+ diag::System system_;
+ diag::List& diagnostics_;
+ std::stringstream msg_;
};
} // namespace tint
@@ -81,9 +81,8 @@ class InternalCompilerError {
/// set.
/// The ICE message contains the callsite's file and line.
/// Use the `<<` operator to append an error message to the ICE.
-#define TINT_ICE(system, diagnostics) \
- tint::InternalCompilerError(__FILE__, __LINE__, \
- ::tint::diag::System::system, diagnostics)
+#define TINT_ICE(system, diagnostics) \
+ tint::InternalCompilerError(__FILE__, __LINE__, ::tint::diag::System::system, diagnostics)
/// TINT_UNREACHABLE() is a macro for appending a "TINT_UNREACHABLE"
/// internal compiler error message to the diagnostics list `diagnostics`, and
@@ -91,8 +90,7 @@ class InternalCompilerError {
/// reporter is set.
/// The ICE message contains the callsite's file and line.
/// Use the `<<` operator to append an error message to the ICE.
-#define TINT_UNREACHABLE(system, diagnostics) \
- TINT_ICE(system, diagnostics) << "TINT_UNREACHABLE "
+#define TINT_UNREACHABLE(system, diagnostics) TINT_ICE(system, diagnostics) << "TINT_UNREACHABLE "
/// TINT_UNIMPLEMENTED() is a macro for appending a "TINT_UNIMPLEMENTED"
/// internal compiler error message to the diagnostics list `diagnostics`, and
@@ -101,7 +99,7 @@ class InternalCompilerError {
/// The ICE message contains the callsite's file and line.
/// Use the `<<` operator to append an error message to the ICE.
#define TINT_UNIMPLEMENTED(system, diagnostics) \
- TINT_ICE(system, diagnostics) << "TINT_UNIMPLEMENTED "
+ TINT_ICE(system, diagnostics) << "TINT_UNIMPLEMENTED "
/// TINT_ASSERT() is a macro for checking the expression is true, triggering a
/// TINT_ICE if it is not.
@@ -111,13 +109,12 @@ class InternalCompilerError {
/// may silently fail in builds where SetInternalCompilerErrorReporter() is not
/// called. Only use in places where there's no sensible place to put proper
/// error handling.
-#define TINT_ASSERT(system, condition) \
- do { \
- if (!(condition)) { \
- tint::diag::List diagnostics; \
- TINT_ICE(system, diagnostics) \
- << "TINT_ASSERT(" #system ", " #condition ")"; \
- } \
- } while (false)
+#define TINT_ASSERT(system, condition) \
+ do { \
+ if (!(condition)) { \
+ tint::diag::List diagnostics; \
+ TINT_ICE(system, diagnostics) << "TINT_ASSERT(" #system ", " #condition ")"; \
+ } \
+ } while (false)
#endif // SRC_TINT_DEBUG_H_
diff --git a/chromium/third_party/dawn/src/tint/debug_test.cc b/chromium/third_party/dawn/src/tint/debug_test.cc
index 257b3128935..2d21277abaa 100644
--- a/chromium/third_party/dawn/src/tint/debug_test.cc
+++ b/chromium/third_party/dawn/src/tint/debug_test.cc
@@ -20,21 +20,20 @@ namespace tint {
namespace {
TEST(DebugTest, Unreachable) {
- EXPECT_FATAL_FAILURE(
- {
- diag::List diagnostics;
- TINT_UNREACHABLE(Test, diagnostics);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ diag::List diagnostics;
+ TINT_UNREACHABLE(Test, diagnostics);
+ },
+ "internal compiler error");
}
TEST(DebugTest, AssertTrue) {
- TINT_ASSERT(Test, true);
+ TINT_ASSERT(Test, true);
}
TEST(DebugTest, AssertFalse) {
- EXPECT_FATAL_FAILURE({ TINT_ASSERT(Test, false); },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE({ TINT_ASSERT(Test, false); }, "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/demangler.cc b/chromium/third_party/dawn/src/tint/demangler.cc
index cf5e4d6289b..0116be0ad43 100644
--- a/chromium/third_party/dawn/src/tint/demangler.cc
+++ b/chromium/third_party/dawn/src/tint/demangler.cc
@@ -28,35 +28,34 @@ Demangler::Demangler() = default;
Demangler::~Demangler() = default;
-std::string Demangler::Demangle(const SymbolTable& symbols,
- const std::string& str) const {
- std::stringstream out;
-
- size_t pos = 0;
- for (;;) {
- auto idx = str.find(kSymbol, pos);
- if (idx == std::string::npos) {
- out << str.substr(pos);
- break;
+std::string Demangler::Demangle(const SymbolTable& symbols, const std::string& str) const {
+ std::stringstream out;
+
+ size_t pos = 0;
+ for (;;) {
+ auto idx = str.find(kSymbol, pos);
+ if (idx == std::string::npos) {
+ out << str.substr(pos);
+ break;
+ }
+
+ out << str.substr(pos, idx - pos);
+
+ auto start_idx = idx + kSymbolLen;
+ auto end_idx = start_idx;
+ while (str[end_idx] >= '0' && str[end_idx] <= '9') {
+ end_idx++;
+ }
+ auto len = end_idx - start_idx;
+
+ auto id = str.substr(start_idx, len);
+ Symbol sym(std::stoi(id), symbols.ProgramID());
+ out << symbols.NameFor(sym);
+
+ pos = end_idx;
}
- out << str.substr(pos, idx - pos);
-
- auto start_idx = idx + kSymbolLen;
- auto end_idx = start_idx;
- while (str[end_idx] >= '0' && str[end_idx] <= '9') {
- end_idx++;
- }
- auto len = end_idx - start_idx;
-
- auto id = str.substr(start_idx, len);
- Symbol sym(std::stoi(id), symbols.ProgramID());
- out << symbols.NameFor(sym);
-
- pos = end_idx;
- }
-
- return out.str();
+ return out.str();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/demangler.h b/chromium/third_party/dawn/src/tint/demangler.h
index 8c0c9640512..c187d4814de 100644
--- a/chromium/third_party/dawn/src/tint/demangler.h
+++ b/chromium/third_party/dawn/src/tint/demangler.h
@@ -23,18 +23,17 @@ class SymbolTable;
/// Helper to demangle strings and replace symbols with original names
class Demangler {
- public:
- /// Constructor
- Demangler();
- /// Destructor
- ~Demangler();
-
- /// Transforms given string and replaces any symbols with original names
- /// @param symbols the symbol table
- /// @param str the string to replace
- /// @returns the string with any symbol replacements performed.
- std::string Demangle(const SymbolTable& symbols,
- const std::string& str) const;
+ public:
+ /// Constructor
+ Demangler();
+ /// Destructor
+ ~Demangler();
+
+ /// Transforms given string and replaces any symbols with original names
+ /// @param symbols the symbol table
+ /// @param str the string to replace
+ /// @returns the string with any symbol replacements performed.
+ std::string Demangle(const SymbolTable& symbols, const std::string& str) const;
};
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/demangler_test.cc b/chromium/third_party/dawn/src/tint/demangler_test.cc
index f2c765810c3..8f7627a03c5 100644
--- a/chromium/third_party/dawn/src/tint/demangler_test.cc
+++ b/chromium/third_party/dawn/src/tint/demangler_test.cc
@@ -23,28 +23,28 @@ namespace {
using DemanglerTest = testing::Test;
TEST_F(DemanglerTest, NoSymbols) {
- SymbolTable t{ProgramID::New()};
- t.Register("sym1");
+ SymbolTable t{ProgramID::New()};
+ t.Register("sym1");
- Demangler d;
- EXPECT_EQ("test str", d.Demangle(t, "test str"));
+ Demangler d;
+ EXPECT_EQ("test str", d.Demangle(t, "test str"));
}
TEST_F(DemanglerTest, Symbol) {
- SymbolTable t{ProgramID::New()};
- t.Register("sym1");
+ SymbolTable t{ProgramID::New()};
+ t.Register("sym1");
- Demangler d;
- EXPECT_EQ("test sym1 str", d.Demangle(t, "test $1 str"));
+ Demangler d;
+ EXPECT_EQ("test sym1 str", d.Demangle(t, "test $1 str"));
}
TEST_F(DemanglerTest, MultipleSymbols) {
- SymbolTable t{ProgramID::New()};
- t.Register("sym1");
- t.Register("sym2");
+ SymbolTable t{ProgramID::New()};
+ t.Register("sym1");
+ t.Register("sym2");
- Demangler d;
- EXPECT_EQ("test sym1 sym2 sym1 str", d.Demangle(t, "test $1 $2 $1 str"));
+ Demangler d;
+ EXPECT_EQ("test sym1 sym2 sym1 str", d.Demangle(t, "test $1 $2 $1 str"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.cc b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.cc
index bd3f297cf63..a87e43b7a3c 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.cc
@@ -38,9 +38,9 @@ List& List::operator=(const List& rhs) = default;
List& List::operator=(List&& rhs) = default;
std::string List::str() const {
- diag::Formatter::Style style;
- style.print_newline_at_end = false;
- return Formatter{style}.format(*this);
+ diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+ return Formatter{style}.format(*this);
}
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.h b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.h
index 95df55366a3..de57c992a7f 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.h
+++ b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic.h
@@ -29,220 +29,213 @@ enum class Severity { Note, Warning, Error, InternalCompilerError, Fatal };
/// @return true iff `a` is more than, or of equal severity to `b`
inline bool operator>=(Severity a, Severity b) {
- return static_cast<int>(a) >= static_cast<int>(b);
+ return static_cast<int>(a) >= static_cast<int>(b);
}
/// System is an enumerator of Tint systems that can be the originator of a
/// diagnostic message.
enum class System {
- AST,
- Clone,
- Inspector,
- Program,
- ProgramBuilder,
- Reader,
- Resolver,
- Semantic,
- Symbol,
- Test,
- Transform,
- Utils,
- Writer,
+ AST,
+ Clone,
+ Inspector,
+ Program,
+ ProgramBuilder,
+ Reader,
+ Resolver,
+ Semantic,
+ Symbol,
+ Test,
+ Transform,
+ Utils,
+ Writer,
};
/// Diagnostic holds all the information for a single compiler diagnostic
/// message.
class Diagnostic {
- public:
- /// Constructor
- Diagnostic();
- /// Copy constructor
- Diagnostic(const Diagnostic&);
- /// Destructor
- ~Diagnostic();
-
- /// Copy assignment operator
- /// @return this diagnostic
- Diagnostic& operator=(const Diagnostic&);
-
- /// severity is the severity of the diagnostic message.
- Severity severity = Severity::Error;
- /// source is the location of the diagnostic.
- Source source;
- /// message is the text associated with the diagnostic.
- std::string message;
- /// system is the Tint system that raised the diagnostic.
- System system;
- /// code is the error code, for example a validation error might have the code
- /// `"v-0001"`.
- const char* code = nullptr;
- /// A shared pointer to a Source::File. Only used if the diagnostic Source
- /// points to a file that was created specifically for this diagnostic
- /// (usually an ICE).
- std::shared_ptr<Source::File> owned_file = nullptr;
+ public:
+ /// Constructor
+ Diagnostic();
+ /// Copy constructor
+ Diagnostic(const Diagnostic&);
+ /// Destructor
+ ~Diagnostic();
+
+ /// Copy assignment operator
+ /// @return this diagnostic
+ Diagnostic& operator=(const Diagnostic&);
+
+ /// severity is the severity of the diagnostic message.
+ Severity severity = Severity::Error;
+ /// source is the location of the diagnostic.
+ Source source;
+ /// message is the text associated with the diagnostic.
+ std::string message;
+ /// system is the Tint system that raised the diagnostic.
+ System system;
+ /// code is the error code, for example a validation error might have the code
+ /// `"v-0001"`.
+ const char* code = nullptr;
+ /// A shared pointer to a Source::File. Only used if the diagnostic Source
+ /// points to a file that was created specifically for this diagnostic
+ /// (usually an ICE).
+ std::shared_ptr<Source::File> owned_file = nullptr;
};
/// List is a container of Diagnostic messages.
class List {
- public:
- /// iterator is the type used for range based iteration.
- using iterator = std::vector<Diagnostic>::const_iterator;
-
- /// Constructs the list with no elements.
- List();
-
- /// Copy constructor. Copies the diagnostics from `list` into this list.
- /// @param list the list of diagnostics to copy into this list.
- List(std::initializer_list<Diagnostic> list);
-
- /// Copy constructor. Copies the diagnostics from `list` into this list.
- /// @param list the list of diagnostics to copy into this list.
- List(const List& list);
-
- /// Move constructor. Moves the diagnostics from `list` into this list.
- /// @param list the list of diagnostics to move into this list.
- List(List&& list);
-
- /// Destructor
- ~List();
-
- /// Assignment operator. Copies the diagnostics from `list` into this list.
- /// @param list the list to copy into this list.
- /// @return this list.
- List& operator=(const List& list);
-
- /// Assignment move operator. Moves the diagnostics from `list` into this
- /// list.
- /// @param list the list to move into this list.
- /// @return this list.
- List& operator=(List&& list);
-
- /// adds a diagnostic to the end of this list.
- /// @param diag the diagnostic to append to this list.
- void add(Diagnostic&& diag) {
- if (diag.severity >= Severity::Error) {
- error_count_++;
+ public:
+ /// iterator is the type used for range based iteration.
+ using iterator = std::vector<Diagnostic>::const_iterator;
+
+ /// Constructs the list with no elements.
+ List();
+
+ /// Copy constructor. Copies the diagnostics from `list` into this list.
+ /// @param list the list of diagnostics to copy into this list.
+ List(std::initializer_list<Diagnostic> list);
+
+ /// Copy constructor. Copies the diagnostics from `list` into this list.
+ /// @param list the list of diagnostics to copy into this list.
+ List(const List& list);
+
+ /// Move constructor. Moves the diagnostics from `list` into this list.
+ /// @param list the list of diagnostics to move into this list.
+ List(List&& list);
+
+ /// Destructor
+ ~List();
+
+ /// Assignment operator. Copies the diagnostics from `list` into this list.
+ /// @param list the list to copy into this list.
+ /// @return this list.
+ List& operator=(const List& list);
+
+ /// Assignment move operator. Moves the diagnostics from `list` into this
+ /// list.
+ /// @param list the list to move into this list.
+ /// @return this list.
+ List& operator=(List&& list);
+
+ /// adds a diagnostic to the end of this list.
+ /// @param diag the diagnostic to append to this list.
+ void add(Diagnostic&& diag) {
+ if (diag.severity >= Severity::Error) {
+ error_count_++;
+ }
+ entries_.emplace_back(std::move(diag));
}
- entries_.emplace_back(std::move(diag));
- }
-
- /// adds a list of diagnostics to the end of this list.
- /// @param list the diagnostic to append to this list.
- void add(const List& list) {
- for (auto diag : list) {
- add(std::move(diag));
+
+ /// adds a list of diagnostics to the end of this list.
+ /// @param list the diagnostic to append to this list.
+ void add(const List& list) {
+ for (auto diag : list) {
+ add(std::move(diag));
+ }
+ }
+
+ /// adds the note message with the given Source to the end of this list.
+ /// @param system the system raising the note message
+ /// @param note_msg the note message
+ /// @param source the source of the note diagnostic
+ void add_note(System system, const std::string& note_msg, const Source& source) {
+ diag::Diagnostic note{};
+ note.severity = diag::Severity::Note;
+ note.system = system;
+ note.source = source;
+ note.message = note_msg;
+ add(std::move(note));
+ }
+
+ /// adds the warning message with the given Source to the end of this list.
+ /// @param system the system raising the warning message
+ /// @param warning_msg the warning message
+ /// @param source the source of the warning diagnostic
+ void add_warning(System system, const std::string& warning_msg, const Source& source) {
+ diag::Diagnostic warning{};
+ warning.severity = diag::Severity::Warning;
+ warning.system = system;
+ warning.source = source;
+ warning.message = warning_msg;
+ add(std::move(warning));
+ }
+
+ /// adds the error message without a source to the end of this list.
+ /// @param system the system raising the error message
+ /// @param err_msg the error message
+ void add_error(System system, std::string err_msg) {
+ diag::Diagnostic error{};
+ error.severity = diag::Severity::Error;
+ error.system = system;
+ error.message = std::move(err_msg);
+ add(std::move(error));
}
- }
-
- /// adds the note message with the given Source to the end of this list.
- /// @param system the system raising the note message
- /// @param note_msg the note message
- /// @param source the source of the note diagnostic
- void add_note(System system,
- const std::string& note_msg,
- const Source& source) {
- diag::Diagnostic note{};
- note.severity = diag::Severity::Note;
- note.system = system;
- note.source = source;
- note.message = note_msg;
- add(std::move(note));
- }
-
- /// adds the warning message with the given Source to the end of this list.
- /// @param system the system raising the warning message
- /// @param warning_msg the warning message
- /// @param source the source of the warning diagnostic
- void add_warning(System system,
- const std::string& warning_msg,
- const Source& source) {
- diag::Diagnostic warning{};
- warning.severity = diag::Severity::Warning;
- warning.system = system;
- warning.source = source;
- warning.message = warning_msg;
- add(std::move(warning));
- }
-
- /// adds the error message without a source to the end of this list.
- /// @param system the system raising the error message
- /// @param err_msg the error message
- void add_error(System system, std::string err_msg) {
- diag::Diagnostic error{};
- error.severity = diag::Severity::Error;
- error.system = system;
- error.message = std::move(err_msg);
- add(std::move(error));
- }
-
- /// adds the error message with the given Source to the end of this list.
- /// @param system the system raising the error message
- /// @param err_msg the error message
- /// @param source the source of the error diagnostic
- void add_error(System system, std::string err_msg, const Source& source) {
- diag::Diagnostic error{};
- error.severity = diag::Severity::Error;
- error.system = system;
- error.source = source;
- error.message = std::move(err_msg);
- add(std::move(error));
- }
-
- /// adds the error message with the given code and Source to the end of this
- /// list.
- /// @param system the system raising the error message
- /// @param code the error code
- /// @param err_msg the error message
- /// @param source the source of the error diagnostic
- void add_error(System system,
- const char* code,
- std::string err_msg,
- const Source& source) {
- diag::Diagnostic error{};
- error.code = code;
- error.severity = diag::Severity::Error;
- error.system = system;
- error.source = source;
- error.message = std::move(err_msg);
- add(std::move(error));
- }
-
- /// adds an internal compiler error message to the end of this list.
- /// @param system the system raising the error message
- /// @param err_msg the error message
- /// @param source the source of the internal compiler error
- /// @param file the Source::File owned by this diagnostic
- void add_ice(System system,
- const std::string& err_msg,
- const Source& source,
- std::shared_ptr<Source::File> file) {
- diag::Diagnostic ice{};
- ice.severity = diag::Severity::InternalCompilerError;
- ice.system = system;
- ice.source = source;
- ice.message = err_msg;
- ice.owned_file = std::move(file);
- add(std::move(ice));
- }
-
- /// @returns true iff the diagnostic list contains errors diagnostics (or of
- /// higher severity).
- bool contains_errors() const { return error_count_ > 0; }
- /// @returns the number of error diagnostics (or of higher severity).
- size_t error_count() const { return error_count_; }
- /// @returns the number of entries in the list.
- size_t count() const { return entries_.size(); }
- /// @returns the first diagnostic in the list.
- iterator begin() const { return entries_.begin(); }
- /// @returns the last diagnostic in the list.
- iterator end() const { return entries_.end(); }
-
- /// @returns a formatted string of all the diagnostics in this list.
- std::string str() const;
-
- private:
- std::vector<Diagnostic> entries_;
- size_t error_count_ = 0;
+
+ /// adds the error message with the given Source to the end of this list.
+ /// @param system the system raising the error message
+ /// @param err_msg the error message
+ /// @param source the source of the error diagnostic
+ void add_error(System system, std::string err_msg, const Source& source) {
+ diag::Diagnostic error{};
+ error.severity = diag::Severity::Error;
+ error.system = system;
+ error.source = source;
+ error.message = std::move(err_msg);
+ add(std::move(error));
+ }
+
+ /// adds the error message with the given code and Source to the end of this
+ /// list.
+ /// @param system the system raising the error message
+ /// @param code the error code
+ /// @param err_msg the error message
+ /// @param source the source of the error diagnostic
+ void add_error(System system, const char* code, std::string err_msg, const Source& source) {
+ diag::Diagnostic error{};
+ error.code = code;
+ error.severity = diag::Severity::Error;
+ error.system = system;
+ error.source = source;
+ error.message = std::move(err_msg);
+ add(std::move(error));
+ }
+
+ /// adds an internal compiler error message to the end of this list.
+ /// @param system the system raising the error message
+ /// @param err_msg the error message
+ /// @param source the source of the internal compiler error
+ /// @param file the Source::File owned by this diagnostic
+ void add_ice(System system,
+ const std::string& err_msg,
+ const Source& source,
+ std::shared_ptr<Source::File> file) {
+ diag::Diagnostic ice{};
+ ice.severity = diag::Severity::InternalCompilerError;
+ ice.system = system;
+ ice.source = source;
+ ice.message = err_msg;
+ ice.owned_file = std::move(file);
+ add(std::move(ice));
+ }
+
+ /// @returns true iff the diagnostic list contains errors diagnostics (or of
+ /// higher severity).
+ bool contains_errors() const { return error_count_ > 0; }
+ /// @returns the number of error diagnostics (or of higher severity).
+ size_t error_count() const { return error_count_; }
+ /// @returns the number of entries in the list.
+ size_t count() const { return entries_.size(); }
+ /// @returns the first diagnostic in the list.
+ iterator begin() const { return entries_.begin(); }
+ /// @returns the last diagnostic in the list.
+ iterator end() const { return entries_.end(); }
+
+ /// @returns a formatted string of all the diagnostics in this list.
+ std::string str() const;
+
+ private:
+ std::vector<Diagnostic> entries_;
+ size_t error_count_ = 0;
};
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic_test.cc b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic_test.cc
index 8bf65e61fac..0494e7660e5 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/diagnostic_test.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/diagnostic_test.cc
@@ -21,19 +21,19 @@ namespace tint::diag {
namespace {
TEST(DiagListTest, OwnedFilesShared) {
- auto file = std::make_shared<Source::File>("path", "content");
+ auto file = std::make_shared<Source::File>("path", "content");
- diag::List list_a, list_b;
- {
- diag::Diagnostic diag{};
- diag.source = Source{Source::Range{{0, 0}}, file.get()};
- list_a.add(std::move(diag));
- }
+ diag::List list_a, list_b;
+ {
+ diag::Diagnostic diag{};
+ diag.source = Source{Source::Range{{0, 0}}, file.get()};
+ list_a.add(std::move(diag));
+ }
- list_b = list_a;
+ list_b = list_a;
- ASSERT_EQ(list_b.count(), list_a.count());
- EXPECT_EQ(list_b.begin()->source.file, file.get());
+ ASSERT_EQ(list_b.count(), list_a.count());
+ EXPECT_EQ(list_b.begin()->source.file, file.get());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/formatter.cc b/chromium/third_party/dawn/src/tint/diagnostic/formatter.cc
index 262be862c58..db69397b532 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/formatter.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/formatter.cc
@@ -25,243 +25,239 @@ namespace tint::diag {
namespace {
const char* to_str(Severity severity) {
- switch (severity) {
- case Severity::Note:
- return "note";
- case Severity::Warning:
- return "warning";
- case Severity::Error:
- return "error";
- case Severity::InternalCompilerError:
- return "internal compiler error";
- case Severity::Fatal:
- return "fatal";
- }
- return "";
+ switch (severity) {
+ case Severity::Note:
+ return "note";
+ case Severity::Warning:
+ return "warning";
+ case Severity::Error:
+ return "error";
+ case Severity::InternalCompilerError:
+ return "internal compiler error";
+ case Severity::Fatal:
+ return "fatal";
+ }
+ return "";
}
std::string to_str(const Source::Location& location) {
- std::stringstream ss;
- if (location.line > 0) {
- ss << location.line;
- if (location.column > 0) {
- ss << ":" << location.column;
+ std::stringstream ss;
+ if (location.line > 0) {
+ ss << location.line;
+ if (location.column > 0) {
+ ss << ":" << location.column;
+ }
}
- }
- return ss.str();
+ return ss.str();
}
} // namespace
/// State holds the internal formatter state for a format() call.
struct Formatter::State {
- /// Constructs a State associated with the given printer.
- /// @param p the printer to write formatted messages to.
- explicit State(Printer* p) : printer(p) {}
- ~State() { flush(); }
-
- /// set_style() sets the current style to new_style, flushing any pending
- /// messages to the printer if the style changed.
- /// @param new_style the new style to apply for future written messages.
- void set_style(const diag::Style& new_style) {
- if (style.color != new_style.color || style.bold != new_style.bold) {
- flush();
- style = new_style;
+ /// Constructs a State associated with the given printer.
+ /// @param p the printer to write formatted messages to.
+ explicit State(Printer* p) : printer(p) {}
+ ~State() { flush(); }
+
+ /// set_style() sets the current style to new_style, flushing any pending
+ /// messages to the printer if the style changed.
+ /// @param new_style the new style to apply for future written messages.
+ void set_style(const diag::Style& new_style) {
+ if (style.color != new_style.color || style.bold != new_style.bold) {
+ flush();
+ style = new_style;
+ }
}
- }
-
- /// flush writes any pending messages to the printer, clearing the buffer.
- void flush() {
- auto str = stream.str();
- if (str.length() > 0) {
- printer->write(str, style);
- std::stringstream reset;
- stream.swap(reset);
+
+ /// flush writes any pending messages to the printer, clearing the buffer.
+ void flush() {
+ auto str = stream.str();
+ if (str.length() > 0) {
+ printer->write(str, style);
+ std::stringstream reset;
+ stream.swap(reset);
+ }
+ }
+
+ /// operator<< queues msg to be written to the printer.
+ /// @param msg the value or string to write to the printer
+ /// @returns this State so that calls can be chained
+ template <typename T>
+ State& operator<<(const T& msg) {
+ stream << msg;
+ return *this;
}
- }
-
- /// operator<< queues msg to be written to the printer.
- /// @param msg the value or string to write to the printer
- /// @returns this State so that calls can be chained
- template <typename T>
- State& operator<<(const T& msg) {
- stream << msg;
- return *this;
- }
-
- /// newline queues a newline to be written to the printer.
- void newline() { stream << std::endl; }
-
- /// repeat queues the character c to be written to the printer n times.
- /// @param c the character to print `n` times
- /// @param n the number of times to print character `c`
- void repeat(char c, size_t n) {
- std::fill_n(std::ostream_iterator<char>(stream), n, c);
- }
-
- private:
- Printer* printer;
- diag::Style style;
- std::stringstream stream;
+
+ /// newline queues a newline to be written to the printer.
+ void newline() { stream << std::endl; }
+
+ /// repeat queues the character c to be written to the printer n times.
+ /// @param c the character to print `n` times
+ /// @param n the number of times to print character `c`
+ void repeat(char c, size_t n) { std::fill_n(std::ostream_iterator<char>(stream), n, c); }
+
+ private:
+ Printer* printer;
+ diag::Style style;
+ std::stringstream stream;
};
Formatter::Formatter() {}
Formatter::Formatter(const Style& style) : style_(style) {}
void Formatter::format(const List& list, Printer* printer) const {
- State state{printer};
+ State state{printer};
- bool first = true;
- for (auto diag : list) {
- state.set_style({});
- if (!first) {
- state.newline();
+ bool first = true;
+ for (auto diag : list) {
+ state.set_style({});
+ if (!first) {
+ state.newline();
+ }
+ format(diag, state);
+ first = false;
}
- format(diag, state);
- first = false;
- }
- if (style_.print_newline_at_end) {
- state.newline();
- }
+ if (style_.print_newline_at_end) {
+ state.newline();
+ }
}
void Formatter::format(const Diagnostic& diag, State& state) const {
- auto const& src = diag.source;
- auto const& rng = src.range;
- bool has_code = diag.code != nullptr && diag.code[0] != '\0';
-
- state.set_style({Color::kDefault, true});
-
- struct TextAndColor {
- std::string text;
- Color color;
- bool bold = false;
- };
- std::vector<TextAndColor> prefix;
- prefix.reserve(6);
-
- if (style_.print_file && src.file != nullptr) {
- if (rng.begin.line > 0) {
- prefix.emplace_back(TextAndColor{src.file->path + ":" + to_str(rng.begin),
- Color::kDefault});
- } else {
- prefix.emplace_back(TextAndColor{src.file->path, Color::kDefault});
- }
- } else if (rng.begin.line > 0) {
- prefix.emplace_back(TextAndColor{to_str(rng.begin), Color::kDefault});
- }
-
- Color severity_color = Color::kDefault;
- switch (diag.severity) {
- case Severity::Note:
- break;
- case Severity::Warning:
- severity_color = Color::kYellow;
- break;
- case Severity::Error:
- severity_color = Color::kRed;
- break;
- case Severity::Fatal:
- case Severity::InternalCompilerError:
- severity_color = Color::kMagenta;
- break;
- }
- if (style_.print_severity) {
- prefix.emplace_back(
- TextAndColor{to_str(diag.severity), severity_color, true});
- }
- if (has_code) {
- prefix.emplace_back(TextAndColor{diag.code, severity_color});
- }
-
- for (size_t i = 0; i < prefix.size(); i++) {
- if (i > 0) {
- state << " ";
- }
- state.set_style({prefix[i].color, prefix[i].bold});
- state << prefix[i].text;
- }
-
- state.set_style({Color::kDefault, true});
- if (!prefix.empty()) {
- state << ": ";
- }
- state << diag.message;
-
- if (style_.print_line && src.file && rng.begin.line > 0) {
- state.newline();
- state.set_style({Color::kDefault, false});
-
- for (size_t line_num = rng.begin.line;
- (line_num <= rng.end.line) &&
- (line_num <= src.file->content.lines.size());
- line_num++) {
- auto& line = src.file->content.lines[line_num - 1];
- auto line_len = line.size();
-
- bool is_ascii = true;
- for (auto c : line) {
- if (c == '\t') {
- state.repeat(' ', style_.tab_width);
+ auto const& src = diag.source;
+ auto const& rng = src.range;
+ bool has_code = diag.code != nullptr && diag.code[0] != '\0';
+
+ state.set_style({Color::kDefault, true});
+
+ struct TextAndColor {
+ std::string text;
+ Color color;
+ bool bold = false;
+ };
+ std::vector<TextAndColor> prefix;
+ prefix.reserve(6);
+
+ if (style_.print_file && src.file != nullptr) {
+ if (rng.begin.line > 0) {
+ prefix.emplace_back(
+ TextAndColor{src.file->path + ":" + to_str(rng.begin), Color::kDefault});
} else {
- state << c;
- }
- if (c & 0x80) {
- is_ascii = false;
+ prefix.emplace_back(TextAndColor{src.file->path, Color::kDefault});
}
- }
-
- state.newline();
-
- // If the line contains non-ascii characters, then we cannot assume that
- // a single utf8 code unit represents a single glyph, so don't attempt to
- // draw squiggles.
- if (!is_ascii) {
- continue;
- }
-
- state.set_style({Color::kCyan, false});
-
- // Count the number of glyphs in the line span.
- // start and end use 1-based indexing.
- auto num_glyphs = [&](size_t start, size_t end) {
- size_t count = 0;
- start = (start > 0) ? (start - 1) : 0;
- end = (end > 0) ? (end - 1) : 0;
- for (size_t i = start; (i < end) && (i < line_len); i++) {
- count += (line[i] == '\t') ? style_.tab_width : 1;
+ } else if (rng.begin.line > 0) {
+ prefix.emplace_back(TextAndColor{to_str(rng.begin), Color::kDefault});
+ }
+
+ Color severity_color = Color::kDefault;
+ switch (diag.severity) {
+ case Severity::Note:
+ break;
+ case Severity::Warning:
+ severity_color = Color::kYellow;
+ break;
+ case Severity::Error:
+ severity_color = Color::kRed;
+ break;
+ case Severity::Fatal:
+ case Severity::InternalCompilerError:
+ severity_color = Color::kMagenta;
+ break;
+ }
+ if (style_.print_severity) {
+ prefix.emplace_back(TextAndColor{to_str(diag.severity), severity_color, true});
+ }
+ if (has_code) {
+ prefix.emplace_back(TextAndColor{diag.code, severity_color});
+ }
+
+ for (size_t i = 0; i < prefix.size(); i++) {
+ if (i > 0) {
+ state << " ";
}
- return count;
- };
-
- if (line_num == rng.begin.line && line_num == rng.end.line) {
- // Single line
- state.repeat(' ', num_glyphs(1, rng.begin.column));
- state.repeat('^', std::max<size_t>(
- num_glyphs(rng.begin.column, rng.end.column), 1));
- } else if (line_num == rng.begin.line) {
- // Start of multi-line
- state.repeat(' ', num_glyphs(1, rng.begin.column));
- state.repeat('^', num_glyphs(rng.begin.column, line_len + 1));
- } else if (line_num == rng.end.line) {
- // End of multi-line
- state.repeat('^', num_glyphs(1, rng.end.column));
- } else {
- // Middle of multi-line
- state.repeat('^', num_glyphs(1, line_len + 1));
- }
- state.newline();
+ state.set_style({prefix[i].color, prefix[i].bold});
+ state << prefix[i].text;
+ }
+
+ state.set_style({Color::kDefault, true});
+ if (!prefix.empty()) {
+ state << ": ";
}
+ state << diag.message;
+
+ if (style_.print_line && src.file && rng.begin.line > 0) {
+ state.newline();
+ state.set_style({Color::kDefault, false});
+
+ for (size_t line_num = rng.begin.line;
+ (line_num <= rng.end.line) && (line_num <= src.file->content.lines.size());
+ line_num++) {
+ auto& line = src.file->content.lines[line_num - 1];
+ auto line_len = line.size();
+
+ bool is_ascii = true;
+ for (auto c : line) {
+ if (c == '\t') {
+ state.repeat(' ', style_.tab_width);
+ } else {
+ state << c;
+ }
+ if (c & 0x80) {
+ is_ascii = false;
+ }
+ }
+
+ state.newline();
+
+ // If the line contains non-ascii characters, then we cannot assume that
+ // a single utf8 code unit represents a single glyph, so don't attempt to
+ // draw squiggles.
+ if (!is_ascii) {
+ continue;
+ }
+
+ state.set_style({Color::kCyan, false});
+
+ // Count the number of glyphs in the line span.
+ // start and end use 1-based indexing.
+ auto num_glyphs = [&](size_t start, size_t end) {
+ size_t count = 0;
+ start = (start > 0) ? (start - 1) : 0;
+ end = (end > 0) ? (end - 1) : 0;
+ for (size_t i = start; (i < end) && (i < line_len); i++) {
+ count += (line[i] == '\t') ? style_.tab_width : 1;
+ }
+ return count;
+ };
+
+ if (line_num == rng.begin.line && line_num == rng.end.line) {
+ // Single line
+ state.repeat(' ', num_glyphs(1, rng.begin.column));
+ state.repeat('^',
+ std::max<size_t>(num_glyphs(rng.begin.column, rng.end.column), 1));
+ } else if (line_num == rng.begin.line) {
+ // Start of multi-line
+ state.repeat(' ', num_glyphs(1, rng.begin.column));
+ state.repeat('^', num_glyphs(rng.begin.column, line_len + 1));
+ } else if (line_num == rng.end.line) {
+ // End of multi-line
+ state.repeat('^', num_glyphs(1, rng.end.column));
+ } else {
+ // Middle of multi-line
+ state.repeat('^', num_glyphs(1, line_len + 1));
+ }
+ state.newline();
+ }
- state.set_style({});
- }
+ state.set_style({});
+ }
}
std::string Formatter::format(const List& list) const {
- StringPrinter printer;
- format(list, &printer);
- return printer.str();
+ StringPrinter printer;
+ format(list, &printer);
+ return printer.str();
}
Formatter::~Formatter() = default;
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/formatter.h b/chromium/third_party/dawn/src/tint/diagnostic/formatter.h
index 179bb2dd95e..5810f0b31f5 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/formatter.h
+++ b/chromium/third_party/dawn/src/tint/diagnostic/formatter.h
@@ -25,44 +25,44 @@ class Printer;
/// Formatter are used to print a list of diagnostics messages.
class Formatter {
- public:
- /// Style controls the formatter's output style.
- struct Style {
- /// include the file path for each diagnostic
- bool print_file = true;
- /// include the severity for each diagnostic
- bool print_severity = true;
- /// include the source line(s) for the diagnostic
- bool print_line = true;
- /// print a newline at the end of a diagnostic list
- bool print_newline_at_end = true;
- /// width of a tab character
- size_t tab_width = 2u;
- };
+ public:
+ /// Style controls the formatter's output style.
+ struct Style {
+ /// include the file path for each diagnostic
+ bool print_file = true;
+ /// include the severity for each diagnostic
+ bool print_severity = true;
+ /// include the source line(s) for the diagnostic
+ bool print_line = true;
+ /// print a newline at the end of a diagnostic list
+ bool print_newline_at_end = true;
+ /// width of a tab character
+ size_t tab_width = 2u;
+ };
- /// Constructor for the formatter using a default style.
- Formatter();
+ /// Constructor for the formatter using a default style.
+ Formatter();
- /// Constructor for the formatter using the custom style.
- /// @param style the style used for the formatter.
- explicit Formatter(const Style& style);
+ /// Constructor for the formatter using the custom style.
+ /// @param style the style used for the formatter.
+ explicit Formatter(const Style& style);
- ~Formatter();
+ ~Formatter();
- /// @param list the list of diagnostic messages to format
- /// @param printer the printer used to display the formatted diagnostics
- void format(const List& list, Printer* printer) const;
+ /// @param list the list of diagnostic messages to format
+ /// @param printer the printer used to display the formatted diagnostics
+ void format(const List& list, Printer* printer) const;
- /// @return the list of diagnostics `list` formatted to a string.
- /// @param list the list of diagnostic messages to format
- std::string format(const List& list) const;
+ /// @return the list of diagnostics `list` formatted to a string.
+ /// @param list the list of diagnostic messages to format
+ std::string format(const List& list) const;
- private:
- struct State;
+ private:
+ struct State;
- void format(const Diagnostic& diag, State& state) const;
+ void format(const Diagnostic& diag, State& state) const;
- const Style style_;
+ const Style style_;
};
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/formatter_test.cc b/chromium/third_party/dawn/src/tint/diagnostic/formatter_test.cc
index fc342fb6cc8..df11d74ca0f 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/formatter_test.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/formatter_test.cc
@@ -27,13 +27,13 @@ Diagnostic Diag(Severity severity,
std::string message,
System system,
const char* code = nullptr) {
- Diagnostic d;
- d.severity = severity;
- d.source = source;
- d.message = std::move(message);
- d.system = system;
- d.code = code;
- return d;
+ Diagnostic d;
+ d.severity = severity;
+ d.source = source;
+ d.message = std::move(message);
+ d.system = system;
+ d.code = code;
+ return d;
}
constexpr const char* ascii_content = // Note: words are tab-delimited
@@ -43,120 +43,110 @@ the snake says quack
the snail says ???
)";
-constexpr const char* utf8_content = // Note: words are tab-delimited
+constexpr const char* utf8_content = // Note: words are tab-delimited
"the \xf0\x9f\x90\xb1 says meow\n" // NOLINT: tabs
"the \xf0\x9f\x90\x95 says woof\n" // NOLINT: tabs
"the \xf0\x9f\x90\x8d says quack\n" // NOLINT: tabs
"the \xf0\x9f\x90\x8c says ???\n"; // NOLINT: tabs
class DiagFormatterTest : public testing::Test {
- public:
- Source::File ascii_file{"file.name", ascii_content};
- Source::File utf8_file{"file.name", utf8_content};
- Diagnostic ascii_diag_note =
- Diag(Severity::Note,
- Source{Source::Range{Source::Location{1, 14}}, &ascii_file},
- "purr",
- System::Test);
- Diagnostic ascii_diag_warn =
- Diag(Severity::Warning,
- Source{Source::Range{{2, 14}, {2, 18}}, &ascii_file},
- "grrr",
- System::Test);
- Diagnostic ascii_diag_err =
- Diag(Severity::Error,
- Source{Source::Range{{3, 16}, {3, 21}}, &ascii_file},
- "hiss",
- System::Test,
- "abc123");
- Diagnostic ascii_diag_ice =
- Diag(Severity::InternalCompilerError,
- Source{Source::Range{{4, 16}, {4, 19}}, &ascii_file},
- "unreachable",
- System::Test);
- Diagnostic ascii_diag_fatal =
- Diag(Severity::Fatal,
- Source{Source::Range{{4, 16}, {4, 19}}, &ascii_file},
- "nothing",
- System::Test);
-
- Diagnostic utf8_diag_note =
- Diag(Severity::Note,
- Source{Source::Range{Source::Location{1, 15}}, &utf8_file},
- "purr",
- System::Test);
- Diagnostic utf8_diag_warn =
- Diag(Severity::Warning,
- Source{Source::Range{{2, 15}, {2, 19}}, &utf8_file},
- "grrr",
- System::Test);
- Diagnostic utf8_diag_err =
- Diag(Severity::Error,
- Source{Source::Range{{3, 15}, {3, 20}}, &utf8_file},
- "hiss",
- System::Test,
- "abc123");
- Diagnostic utf8_diag_ice =
- Diag(Severity::InternalCompilerError,
- Source{Source::Range{{4, 15}, {4, 18}}, &utf8_file},
- "unreachable",
- System::Test);
- Diagnostic utf8_diag_fatal =
- Diag(Severity::Fatal,
- Source{Source::Range{{4, 15}, {4, 18}}, &utf8_file},
- "nothing",
- System::Test);
+ public:
+ Source::File ascii_file{"file.name", ascii_content};
+ Source::File utf8_file{"file.name", utf8_content};
+ Diagnostic ascii_diag_note = Diag(Severity::Note,
+ Source{Source::Range{Source::Location{1, 14}}, &ascii_file},
+ "purr",
+ System::Test);
+ Diagnostic ascii_diag_warn = Diag(Severity::Warning,
+ Source{Source::Range{{2, 14}, {2, 18}}, &ascii_file},
+ "grrr",
+ System::Test);
+ Diagnostic ascii_diag_err = Diag(Severity::Error,
+ Source{Source::Range{{3, 16}, {3, 21}}, &ascii_file},
+ "hiss",
+ System::Test,
+ "abc123");
+ Diagnostic ascii_diag_ice = Diag(Severity::InternalCompilerError,
+ Source{Source::Range{{4, 16}, {4, 19}}, &ascii_file},
+ "unreachable",
+ System::Test);
+ Diagnostic ascii_diag_fatal = Diag(Severity::Fatal,
+ Source{Source::Range{{4, 16}, {4, 19}}, &ascii_file},
+ "nothing",
+ System::Test);
+
+ Diagnostic utf8_diag_note = Diag(Severity::Note,
+ Source{Source::Range{Source::Location{1, 15}}, &utf8_file},
+ "purr",
+ System::Test);
+ Diagnostic utf8_diag_warn = Diag(Severity::Warning,
+ Source{Source::Range{{2, 15}, {2, 19}}, &utf8_file},
+ "grrr",
+ System::Test);
+ Diagnostic utf8_diag_err = Diag(Severity::Error,
+ Source{Source::Range{{3, 15}, {3, 20}}, &utf8_file},
+ "hiss",
+ System::Test,
+ "abc123");
+ Diagnostic utf8_diag_ice = Diag(Severity::InternalCompilerError,
+ Source{Source::Range{{4, 15}, {4, 18}}, &utf8_file},
+ "unreachable",
+ System::Test);
+ Diagnostic utf8_diag_fatal = Diag(Severity::Fatal,
+ Source{Source::Range{{4, 15}, {4, 18}}, &utf8_file},
+ "nothing",
+ System::Test);
};
TEST_F(DiagFormatterTest, Simple) {
- Formatter fmt{{false, false, false, false}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(1:14: purr
+ Formatter fmt{{false, false, false, false}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(1:14: purr
2:14: grrr
3:16 abc123: hiss)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, SimpleNewlineAtEnd) {
- Formatter fmt{{false, false, false, true}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(1:14: purr
+ Formatter fmt{{false, false, false, true}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(1:14: purr
2:14: grrr
3:16 abc123: hiss
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, SimpleNoSource) {
- Formatter fmt{{false, false, false, false}};
- auto diag = Diag(Severity::Note, Source{}, "no source!", System::Test);
- auto got = fmt.format(List{diag});
- auto* expect = "no source!";
- ASSERT_EQ(expect, got);
+ Formatter fmt{{false, false, false, false}};
+ auto diag = Diag(Severity::Note, Source{}, "no source!", System::Test);
+ auto got = fmt.format(List{diag});
+ auto* expect = "no source!";
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, WithFile) {
- Formatter fmt{{true, false, false, false}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(file.name:1:14: purr
+ Formatter fmt{{true, false, false, false}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(file.name:1:14: purr
file.name:2:14: grrr
file.name:3:16 abc123: hiss)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, WithSeverity) {
- Formatter fmt{{false, true, false, false}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(1:14 note: purr
+ Formatter fmt{{false, true, false, false}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(1:14 note: purr
2:14 warning: grrr
3:16 error abc123: hiss)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, WithLine) {
- Formatter fmt{{false, false, true, false}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(1:14: purr
+ Formatter fmt{{false, false, true, false}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(1:14: purr
the cat says meow
^
@@ -168,28 +158,28 @@ the dog says woof
the snake says quack
^^^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, UnicodeWithLine) {
- Formatter fmt{{false, false, true, false}};
- auto got = fmt.format(List{utf8_diag_note, utf8_diag_warn, utf8_diag_err});
- auto* expect =
- "1:15: purr\n"
- "the \xf0\x9f\x90\xb1 says meow\n"
- "\n"
- "2:15: grrr\n"
- "the \xf0\x9f\x90\x95 says woof\n"
- "\n"
- "3:15 abc123: hiss\n"
- "the \xf0\x9f\x90\x8d says quack\n";
- ASSERT_EQ(expect, got);
+ Formatter fmt{{false, false, true, false}};
+ auto got = fmt.format(List{utf8_diag_note, utf8_diag_warn, utf8_diag_err});
+ auto* expect =
+ "1:15: purr\n"
+ "the \xf0\x9f\x90\xb1 says meow\n"
+ "\n"
+ "2:15: grrr\n"
+ "the \xf0\x9f\x90\x95 says woof\n"
+ "\n"
+ "3:15 abc123: hiss\n"
+ "the \xf0\x9f\x90\x8d says quack\n";
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, BasicWithFileSeverityLine) {
- Formatter fmt{{true, true, true, false}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(file.name:1:14 note: purr
+ Formatter fmt{{true, true, true, false}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(file.name:1:14 note: purr
the cat says meow
^
@@ -201,16 +191,15 @@ file.name:3:16 error abc123: hiss
the snake says quack
^^^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, BasicWithMultiLine) {
- auto multiline = Diag(Severity::Warning,
- Source{Source::Range{{2, 9}, {4, 15}}, &ascii_file},
- "multiline", System::Test);
- Formatter fmt{{false, false, true, false}};
- auto got = fmt.format(List{multiline});
- auto* expect = R"(2:9: multiline
+ auto multiline = Diag(Severity::Warning, Source{Source::Range{{2, 9}, {4, 15}}, &ascii_file},
+ "multiline", System::Test);
+ Formatter fmt{{false, false, true, false}};
+ auto got = fmt.format(List{multiline});
+ auto* expect = R"(2:9: multiline
the dog says woof
^^^^^^^^^^
the snake says quack
@@ -218,27 +207,26 @@ the snake says quack
the snail says ???
^^^^^^^^^^^^^^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, UnicodeWithMultiLine) {
- auto multiline = Diag(Severity::Warning,
- Source{Source::Range{{2, 9}, {4, 15}}, &utf8_file},
- "multiline", System::Test);
- Formatter fmt{{false, false, true, false}};
- auto got = fmt.format(List{multiline});
- auto* expect =
- "2:9: multiline\n"
- "the \xf0\x9f\x90\x95 says woof\n"
- "the \xf0\x9f\x90\x8d says quack\n"
- "the \xf0\x9f\x90\x8c says ???\n";
- ASSERT_EQ(expect, got);
+ auto multiline = Diag(Severity::Warning, Source{Source::Range{{2, 9}, {4, 15}}, &utf8_file},
+ "multiline", System::Test);
+ Formatter fmt{{false, false, true, false}};
+ auto got = fmt.format(List{multiline});
+ auto* expect =
+ "2:9: multiline\n"
+ "the \xf0\x9f\x90\x95 says woof\n"
+ "the \xf0\x9f\x90\x8d says quack\n"
+ "the \xf0\x9f\x90\x8c says ???\n";
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, BasicWithFileSeverityLineTab4) {
- Formatter fmt{{true, true, true, false, 4u}};
- auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
- auto* expect = R"(file.name:1:14 note: purr
+ Formatter fmt{{true, true, true, false, 4u}};
+ auto got = fmt.format(List{ascii_diag_note, ascii_diag_warn, ascii_diag_err});
+ auto* expect = R"(file.name:1:14 note: purr
the cat says meow
^
@@ -250,16 +238,15 @@ file.name:3:16 error abc123: hiss
the snake says quack
^^^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, BasicWithMultiLineTab4) {
- auto multiline = Diag(Severity::Warning,
- Source{Source::Range{{2, 9}, {4, 15}}, &ascii_file},
- "multiline", System::Test);
- Formatter fmt{{false, false, true, false, 4u}};
- auto got = fmt.format(List{multiline});
- auto* expect = R"(2:9: multiline
+ auto multiline = Diag(Severity::Warning, Source{Source::Range{{2, 9}, {4, 15}}, &ascii_file},
+ "multiline", System::Test);
+ Formatter fmt{{false, false, true, false, 4u}};
+ auto got = fmt.format(List{multiline});
+ auto* expect = R"(2:9: multiline
the dog says woof
^^^^^^^^^^^^
the snake says quack
@@ -267,41 +254,40 @@ the snake says quack
the snail says ???
^^^^^^^^^^^^^^^^^^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, ICE) {
- Formatter fmt{{}};
- auto got = fmt.format(List{ascii_diag_ice});
- auto* expect = R"(file.name:4:16 internal compiler error: unreachable
+ Formatter fmt{{}};
+ auto got = fmt.format(List{ascii_diag_ice});
+ auto* expect = R"(file.name:4:16 internal compiler error: unreachable
the snail says ???
^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, Fatal) {
- Formatter fmt{{}};
- auto got = fmt.format(List{ascii_diag_fatal});
- auto* expect = R"(file.name:4:16 fatal: nothing
+ Formatter fmt{{}};
+ auto got = fmt.format(List{ascii_diag_fatal});
+ auto* expect = R"(file.name:4:16 fatal: nothing
the snail says ???
^^^
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(DiagFormatterTest, RangeOOB) {
- Formatter fmt{{true, true, true, true}};
- diag::List list;
- list.add_error(System::Test, "oob",
- Source{{{10, 20}, {30, 20}}, &ascii_file});
- auto got = fmt.format(list);
- auto* expect = R"(file.name:10:20 error: oob
+ Formatter fmt{{true, true, true, true}};
+ diag::List list;
+ list.add_error(System::Test, "oob", Source{{{10, 20}, {30, 20}}, &ascii_file});
+ auto got = fmt.format(list);
+ auto* expect = R"(file.name:10:20 error: oob
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer.cc b/chromium/third_party/dawn/src/tint/diagnostic/printer.cc
index a3e93bcffd0..a95d9f0b899 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer.cc
@@ -22,11 +22,11 @@ StringPrinter::StringPrinter() = default;
StringPrinter::~StringPrinter() = default;
std::string StringPrinter::str() const {
- return stream.str();
+ return stream.str();
}
void StringPrinter::write(const std::string& str, const Style&) {
- stream << str;
+ stream << str;
}
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer.h b/chromium/third_party/dawn/src/tint/diagnostic/printer.h
index cb38ac89e19..b2ac1051d68 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer.h
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer.h
@@ -25,55 +25,55 @@ class List;
/// Color is an enumerator of colors used by Style.
enum class Color {
- kDefault,
- kBlack,
- kRed,
- kGreen,
- kYellow,
- kBlue,
- kMagenta,
- kCyan,
- kWhite,
+ kDefault,
+ kBlack,
+ kRed,
+ kGreen,
+ kYellow,
+ kBlue,
+ kMagenta,
+ kCyan,
+ kWhite,
};
/// Style describes how a diagnostic message should be printed.
struct Style {
- /// The foreground text color
- Color color = Color::kDefault;
- /// If true the text will be displayed with a strong weight
- bool bold = false;
+ /// The foreground text color
+ Color color = Color::kDefault;
+ /// If true the text will be displayed with a strong weight
+ bool bold = false;
};
/// Printers are used to print formatted diagnostic messages to a terminal.
class Printer {
- public:
- /// @returns a diagnostic Printer
- /// @param out the file to print to.
- /// @param use_colors if true, the printer will use colors if `out` is a
- /// terminal and supports them.
- static std::unique_ptr<Printer> create(FILE* out, bool use_colors);
+ public:
+ /// @returns a diagnostic Printer
+ /// @param out the file to print to.
+ /// @param use_colors if true, the printer will use colors if `out` is a
+ /// terminal and supports them.
+ static std::unique_ptr<Printer> create(FILE* out, bool use_colors);
- virtual ~Printer();
+ virtual ~Printer();
- /// writes the string str to the printer with the given style.
- /// @param str the string to write to the printer
- /// @param style the style used to print `str`
- virtual void write(const std::string& str, const Style& style) = 0;
+ /// writes the string str to the printer with the given style.
+ /// @param str the string to write to the printer
+ /// @param style the style used to print `str`
+ virtual void write(const std::string& str, const Style& style) = 0;
};
/// StringPrinter is an implementation of Printer that writes to a std::string.
class StringPrinter : public Printer {
- public:
- StringPrinter();
- ~StringPrinter() override;
+ public:
+ StringPrinter();
+ ~StringPrinter() override;
- /// @returns the printed string.
- std::string str() const;
+ /// @returns the printed string.
+ std::string str() const;
- void write(const std::string& str, const Style&) override;
+ void write(const std::string& str, const Style&) override;
- private:
- std::stringstream stream;
+ private:
+ std::stringstream stream;
};
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer_linux.cc b/chromium/third_party/dawn/src/tint/diagnostic/printer_linux.cc
index 9d0e3156493..56d77b98532 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer_linux.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer_linux.cc
@@ -22,77 +22,76 @@ namespace tint::diag {
namespace {
bool supports_colors(FILE* f) {
- if (!isatty(fileno(f))) {
- return false;
- }
-
- const char* cterm = getenv("TERM");
- if (cterm == nullptr) {
- return false;
- }
-
- std::string term = getenv("TERM");
- if (term != "cygwin" && term != "linux" && term != "rxvt-unicode-256color" &&
- term != "rxvt-unicode" && term != "screen-256color" && term != "screen" &&
- term != "tmux-256color" && term != "tmux" && term != "xterm-256color" &&
- term != "xterm-color" && term != "xterm") {
- return false;
- }
-
- return true;
+ if (!isatty(fileno(f))) {
+ return false;
+ }
+
+ const char* cterm = getenv("TERM");
+ if (cterm == nullptr) {
+ return false;
+ }
+
+ std::string term = getenv("TERM");
+ if (term != "cygwin" && term != "linux" && term != "rxvt-unicode-256color" &&
+ term != "rxvt-unicode" && term != "screen-256color" && term != "screen" &&
+ term != "tmux-256color" && term != "tmux" && term != "xterm-256color" &&
+ term != "xterm-color" && term != "xterm") {
+ return false;
+ }
+
+ return true;
}
class PrinterLinux : public Printer {
- public:
- PrinterLinux(FILE* f, bool colors)
- : file(f), use_colors(colors && supports_colors(f)) {}
-
- void write(const std::string& str, const Style& style) override {
- write_color(style.color, style.bold);
- fwrite(str.data(), 1, str.size(), file);
- write_color(Color::kDefault, false);
- }
-
- private:
- constexpr const char* color_code(Color color, bool bold) {
- switch (color) {
- case Color::kDefault:
- return bold ? "\u001b[1m" : "\u001b[0m";
- case Color::kBlack:
- return bold ? "\u001b[30;1m" : "\u001b[30m";
- case Color::kRed:
- return bold ? "\u001b[31;1m" : "\u001b[31m";
- case Color::kGreen:
- return bold ? "\u001b[32;1m" : "\u001b[32m";
- case Color::kYellow:
- return bold ? "\u001b[33;1m" : "\u001b[33m";
- case Color::kBlue:
- return bold ? "\u001b[34;1m" : "\u001b[34m";
- case Color::kMagenta:
- return bold ? "\u001b[35;1m" : "\u001b[35m";
- case Color::kCyan:
- return bold ? "\u001b[36;1m" : "\u001b[36m";
- case Color::kWhite:
- return bold ? "\u001b[37;1m" : "\u001b[37m";
+ public:
+ PrinterLinux(FILE* f, bool colors) : file(f), use_colors(colors && supports_colors(f)) {}
+
+ void write(const std::string& str, const Style& style) override {
+ write_color(style.color, style.bold);
+ fwrite(str.data(), 1, str.size(), file);
+ write_color(Color::kDefault, false);
+ }
+
+ private:
+ constexpr const char* color_code(Color color, bool bold) {
+ switch (color) {
+ case Color::kDefault:
+ return bold ? "\u001b[1m" : "\u001b[0m";
+ case Color::kBlack:
+ return bold ? "\u001b[30;1m" : "\u001b[30m";
+ case Color::kRed:
+ return bold ? "\u001b[31;1m" : "\u001b[31m";
+ case Color::kGreen:
+ return bold ? "\u001b[32;1m" : "\u001b[32m";
+ case Color::kYellow:
+ return bold ? "\u001b[33;1m" : "\u001b[33m";
+ case Color::kBlue:
+ return bold ? "\u001b[34;1m" : "\u001b[34m";
+ case Color::kMagenta:
+ return bold ? "\u001b[35;1m" : "\u001b[35m";
+ case Color::kCyan:
+ return bold ? "\u001b[36;1m" : "\u001b[36m";
+ case Color::kWhite:
+ return bold ? "\u001b[37;1m" : "\u001b[37m";
+ }
+ return ""; // unreachable
}
- return ""; // unreachable
- }
- void write_color(Color color, bool bold) {
- if (use_colors) {
- auto* code = color_code(color, bold);
- fwrite(code, 1, strlen(code), file);
+ void write_color(Color color, bool bold) {
+ if (use_colors) {
+ auto* code = color_code(color, bold);
+ fwrite(code, 1, strlen(code), file);
+ }
}
- }
- FILE* const file;
- const bool use_colors;
+ FILE* const file;
+ const bool use_colors;
};
} // namespace
std::unique_ptr<Printer> Printer::create(FILE* out, bool use_colors) {
- return std::make_unique<PrinterLinux>(out, use_colors);
+ return std::make_unique<PrinterLinux>(out, use_colors);
}
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer_other.cc b/chromium/third_party/dawn/src/tint/diagnostic/printer_other.cc
index 65ef0ac94db..9a814fd30ae 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer_other.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer_other.cc
@@ -20,21 +20,21 @@ namespace tint::diag {
namespace {
class PrinterOther : public Printer {
- public:
- explicit PrinterOther(FILE* f) : file(f) {}
+ public:
+ explicit PrinterOther(FILE* f) : file(f) {}
- void write(const std::string& str, const Style&) override {
- fwrite(str.data(), 1, str.size(), file);
- }
+ void write(const std::string& str, const Style&) override {
+ fwrite(str.data(), 1, str.size(), file);
+ }
- private:
- FILE* file;
+ private:
+ FILE* file;
};
} // namespace
std::unique_ptr<Printer> Printer::create(FILE* out, bool) {
- return std::make_unique<PrinterOther>(out);
+ return std::make_unique<PrinterOther>(out);
}
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer_test.cc b/chromium/third_party/dawn/src/tint/diagnostic/printer_test.cc
index bfc9d4339a4..8f117e9de12 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer_test.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer_test.cc
@@ -36,59 +36,59 @@ namespace {
using PrinterTest = testing::Test;
TEST_F(PrinterTest, WithColors) {
- auto printer = Printer::create(stdout, true);
- printer->write("Default", Style{Color::kDefault, false});
- printer->write("Black", Style{Color::kBlack, false});
- printer->write("Red", Style{Color::kRed, false});
- printer->write("Green", Style{Color::kGreen, false});
- printer->write("Yellow", Style{Color::kYellow, false});
- printer->write("Blue", Style{Color::kBlue, false});
- printer->write("Magenta", Style{Color::kMagenta, false});
- printer->write("Cyan", Style{Color::kCyan, false});
- printer->write("White", Style{Color::kWhite, false});
- printf("\n");
+ auto printer = Printer::create(stdout, true);
+ printer->write("Default", Style{Color::kDefault, false});
+ printer->write("Black", Style{Color::kBlack, false});
+ printer->write("Red", Style{Color::kRed, false});
+ printer->write("Green", Style{Color::kGreen, false});
+ printer->write("Yellow", Style{Color::kYellow, false});
+ printer->write("Blue", Style{Color::kBlue, false});
+ printer->write("Magenta", Style{Color::kMagenta, false});
+ printer->write("Cyan", Style{Color::kCyan, false});
+ printer->write("White", Style{Color::kWhite, false});
+ printf("\n");
}
TEST_F(PrinterTest, BoldWithColors) {
- auto printer = Printer::create(stdout, true);
- printer->write("Default", Style{Color::kDefault, true});
- printer->write("Black", Style{Color::kBlack, true});
- printer->write("Red", Style{Color::kRed, true});
- printer->write("Green", Style{Color::kGreen, true});
- printer->write("Yellow", Style{Color::kYellow, true});
- printer->write("Blue", Style{Color::kBlue, true});
- printer->write("Magenta", Style{Color::kMagenta, true});
- printer->write("Cyan", Style{Color::kCyan, true});
- printer->write("White", Style{Color::kWhite, true});
- printf("\n");
+ auto printer = Printer::create(stdout, true);
+ printer->write("Default", Style{Color::kDefault, true});
+ printer->write("Black", Style{Color::kBlack, true});
+ printer->write("Red", Style{Color::kRed, true});
+ printer->write("Green", Style{Color::kGreen, true});
+ printer->write("Yellow", Style{Color::kYellow, true});
+ printer->write("Blue", Style{Color::kBlue, true});
+ printer->write("Magenta", Style{Color::kMagenta, true});
+ printer->write("Cyan", Style{Color::kCyan, true});
+ printer->write("White", Style{Color::kWhite, true});
+ printf("\n");
}
TEST_F(PrinterTest, WithoutColors) {
- auto printer = Printer::create(stdout, false);
- printer->write("Default", Style{Color::kDefault, false});
- printer->write("Black", Style{Color::kBlack, false});
- printer->write("Red", Style{Color::kRed, false});
- printer->write("Green", Style{Color::kGreen, false});
- printer->write("Yellow", Style{Color::kYellow, false});
- printer->write("Blue", Style{Color::kBlue, false});
- printer->write("Magenta", Style{Color::kMagenta, false});
- printer->write("Cyan", Style{Color::kCyan, false});
- printer->write("White", Style{Color::kWhite, false});
- printf("\n");
+ auto printer = Printer::create(stdout, false);
+ printer->write("Default", Style{Color::kDefault, false});
+ printer->write("Black", Style{Color::kBlack, false});
+ printer->write("Red", Style{Color::kRed, false});
+ printer->write("Green", Style{Color::kGreen, false});
+ printer->write("Yellow", Style{Color::kYellow, false});
+ printer->write("Blue", Style{Color::kBlue, false});
+ printer->write("Magenta", Style{Color::kMagenta, false});
+ printer->write("Cyan", Style{Color::kCyan, false});
+ printer->write("White", Style{Color::kWhite, false});
+ printf("\n");
}
TEST_F(PrinterTest, BoldWithoutColors) {
- auto printer = Printer::create(stdout, false);
- printer->write("Default", Style{Color::kDefault, true});
- printer->write("Black", Style{Color::kBlack, true});
- printer->write("Red", Style{Color::kRed, true});
- printer->write("Green", Style{Color::kGreen, true});
- printer->write("Yellow", Style{Color::kYellow, true});
- printer->write("Blue", Style{Color::kBlue, true});
- printer->write("Magenta", Style{Color::kMagenta, true});
- printer->write("Cyan", Style{Color::kCyan, true});
- printer->write("White", Style{Color::kWhite, true});
- printf("\n");
+ auto printer = Printer::create(stdout, false);
+ printer->write("Default", Style{Color::kDefault, true});
+ printer->write("Black", Style{Color::kBlack, true});
+ printer->write("Red", Style{Color::kRed, true});
+ printer->write("Green", Style{Color::kGreen, true});
+ printer->write("Yellow", Style{Color::kYellow, true});
+ printer->write("Blue", Style{Color::kBlue, true});
+ printer->write("Magenta", Style{Color::kMagenta, true});
+ printer->write("Cyan", Style{Color::kCyan, true});
+ printer->write("White", Style{Color::kWhite, true});
+ printf("\n");
}
#endif // ENABLE_PRINTER_TESTS
diff --git a/chromium/third_party/dawn/src/tint/diagnostic/printer_windows.cc b/chromium/third_party/dawn/src/tint/diagnostic/printer_windows.cc
index e8ebc1923b7..fff3db59cbc 100644
--- a/chromium/third_party/dawn/src/tint/diagnostic/printer_windows.cc
+++ b/chromium/third_party/dawn/src/tint/diagnostic/printer_windows.cc
@@ -23,89 +23,86 @@ namespace tint::diag {
namespace {
struct ConsoleInfo {
- HANDLE handle = INVALID_HANDLE_VALUE;
- WORD default_attributes = 0;
- operator bool() const { return handle != INVALID_HANDLE_VALUE; }
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ WORD default_attributes = 0;
+ operator bool() const { return handle != INVALID_HANDLE_VALUE; }
};
ConsoleInfo console_info(FILE* file) {
- if (file == nullptr) {
- return {};
- }
-
- ConsoleInfo console{};
- if (file == stdout) {
- console.handle = GetStdHandle(STD_OUTPUT_HANDLE);
- } else if (file == stderr) {
- console.handle = GetStdHandle(STD_ERROR_HANDLE);
- } else {
- return {};
- }
-
- CONSOLE_SCREEN_BUFFER_INFO info{};
- if (GetConsoleScreenBufferInfo(console.handle, &info) == 0) {
- return {};
- }
-
- console.default_attributes = info.wAttributes;
- return console;
+ if (file == nullptr) {
+ return {};
+ }
+
+ ConsoleInfo console{};
+ if (file == stdout) {
+ console.handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ } else if (file == stderr) {
+ console.handle = GetStdHandle(STD_ERROR_HANDLE);
+ } else {
+ return {};
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO info{};
+ if (GetConsoleScreenBufferInfo(console.handle, &info) == 0) {
+ return {};
+ }
+
+ console.default_attributes = info.wAttributes;
+ return console;
}
class PrinterWindows : public Printer {
- public:
- PrinterWindows(FILE* f, bool use_colors)
- : file(f), console(console_info(use_colors ? f : nullptr)) {}
-
- void write(const std::string& str, const Style& style) override {
- write_color(style.color, style.bold);
- fwrite(str.data(), 1, str.size(), file);
- write_color(Color::kDefault, false);
- }
-
- private:
- WORD attributes(Color color, bool bold) {
- switch (color) {
- case Color::kDefault:
- return console.default_attributes;
- case Color::kBlack:
- return 0;
- case Color::kRed:
- return FOREGROUND_RED | (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kGreen:
- return FOREGROUND_GREEN | (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kYellow:
- return FOREGROUND_RED | FOREGROUND_GREEN |
- (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kBlue:
- return FOREGROUND_BLUE | (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kMagenta:
- return FOREGROUND_RED | FOREGROUND_BLUE |
- (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kCyan:
- return FOREGROUND_GREEN | FOREGROUND_BLUE |
- (bold ? FOREGROUND_INTENSITY : 0);
- case Color::kWhite:
- return FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE |
- (bold ? FOREGROUND_INTENSITY : 0);
+ public:
+ PrinterWindows(FILE* f, bool use_colors)
+ : file(f), console(console_info(use_colors ? f : nullptr)) {}
+
+ void write(const std::string& str, const Style& style) override {
+ write_color(style.color, style.bold);
+ fwrite(str.data(), 1, str.size(), file);
+ write_color(Color::kDefault, false);
+ }
+
+ private:
+ WORD attributes(Color color, bool bold) {
+ switch (color) {
+ case Color::kDefault:
+ return console.default_attributes;
+ case Color::kBlack:
+ return 0;
+ case Color::kRed:
+ return FOREGROUND_RED | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kGreen:
+ return FOREGROUND_GREEN | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kYellow:
+ return FOREGROUND_RED | FOREGROUND_GREEN | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kBlue:
+ return FOREGROUND_BLUE | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kMagenta:
+ return FOREGROUND_RED | FOREGROUND_BLUE | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kCyan:
+ return FOREGROUND_GREEN | FOREGROUND_BLUE | (bold ? FOREGROUND_INTENSITY : 0);
+ case Color::kWhite:
+ return FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE |
+ (bold ? FOREGROUND_INTENSITY : 0);
+ }
+ return 0; // unreachable
}
- return 0; // unreachable
- }
- void write_color(Color color, bool bold) {
- if (console) {
- SetConsoleTextAttribute(console.handle, attributes(color, bold));
- fflush(file);
+ void write_color(Color color, bool bold) {
+ if (console) {
+ SetConsoleTextAttribute(console.handle, attributes(color, bold));
+ fflush(file);
+ }
}
- }
- FILE* const file;
- const ConsoleInfo console;
+ FILE* const file;
+ const ConsoleInfo console;
};
} // namespace
std::unique_ptr<Printer> Printer::create(FILE* out, bool use_colors) {
- return std::make_unique<PrinterWindows>(out, use_colors);
+ return std::make_unique<PrinterWindows>(out, use_colors);
}
} // namespace tint::diag
diff --git a/chromium/third_party/dawn/src/tint/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/tint/fuzzers/BUILD.gn
index 0934ab25f72..2b477627bd5 100644
--- a/chromium/third_party/dawn/src/tint/fuzzers/BUILD.gn
+++ b/chromium/third_party/dawn/src/tint/fuzzers/BUILD.gn
@@ -20,6 +20,7 @@ import("../../../tint_overrides_with_defaults.gni")
if (build_with_chromium) {
import("//testing/libfuzzer/fuzzer_test.gni")
+ import("../../../scripts/dawn_overrides_with_defaults.gni")
fuzzer_corpus_wgsl_dir = "${target_gen_dir}/fuzzer_corpus_wgsl"
fuzzer_corpus_wgsl_stamp = "${fuzzer_corpus_wgsl_dir}.stamp"
@@ -32,6 +33,8 @@ if (build_with_chromium) {
rebase_path(fuzzer_corpus_wgsl_dir, root_build_dir),
]
outputs = [ fuzzer_corpus_wgsl_stamp ]
+
+ deps = [ "${dawn_root}/generator:remove_stale_autogen_files" ]
}
tint_fuzzer_common_libfuzzer_options = [
diff --git a/chromium/third_party/dawn/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn b/chromium/third_party/dawn/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
index 4c63bab817a..69ef3fef911 100644
--- a/chromium/third_party/dawn/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
+++ b/chromium/third_party/dawn/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
@@ -17,11 +17,13 @@ import("../../../../tint_overrides_with_defaults.gni")
if (build_with_chromium) {
import("//third_party/protobuf/proto_library.gni")
+ import("../../../../scripts/dawn_overrides_with_defaults.gni")
proto_library("tint_ast_fuzzer_proto") {
sources = [ "protobufs/tint_ast_fuzzer.proto" ]
generate_python = false
use_protobuf_full = true
+ deps = [ "${dawn_root}/generator:remove_stale_autogen_files" ]
}
source_set("tint_ast_fuzzer") {
@@ -50,12 +52,16 @@ if (build_with_chromium) {
"mutation_finder.h",
"mutation_finders/change_binary_operators.cc",
"mutation_finders/change_binary_operators.h",
+ "mutation_finders/change_unary_operators.cc",
+ "mutation_finders/change_unary_operators.h",
"mutation_finders/replace_identifiers.cc",
"mutation_finders/replace_identifiers.h",
"mutation_finders/wrap_unary_operators.cc",
"mutation_finders/wrap_unary_operators.h",
"mutations/change_binary_operator.cc",
"mutations/change_binary_operator.h",
+ "mutations/change_unary_operator.cc",
+ "mutations/change_unary_operator.h",
"mutations/replace_identifier.cc",
"mutations/replace_identifier.h",
"mutations/wrap_unary_operator.cc",
diff --git a/chromium/third_party/dawn/src/tint/inspector/entry_point.cc b/chromium/third_party/dawn/src/tint/inspector/entry_point.cc
index 623d22a3bb1..6d3419f469b 100644
--- a/chromium/third_party/dawn/src/tint/inspector/entry_point.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/entry_point.cc
@@ -35,34 +35,32 @@ EntryPoint::EntryPoint(EntryPoint&) = default;
EntryPoint::EntryPoint(EntryPoint&&) = default;
EntryPoint::~EntryPoint() = default;
-InterpolationType ASTToInspectorInterpolationType(
- ast::InterpolationType ast_type) {
- switch (ast_type) {
- case ast::InterpolationType::kPerspective:
- return InterpolationType::kPerspective;
- case ast::InterpolationType::kLinear:
- return InterpolationType::kLinear;
- case ast::InterpolationType::kFlat:
- return InterpolationType::kFlat;
- }
+InterpolationType ASTToInspectorInterpolationType(ast::InterpolationType ast_type) {
+ switch (ast_type) {
+ case ast::InterpolationType::kPerspective:
+ return InterpolationType::kPerspective;
+ case ast::InterpolationType::kLinear:
+ return InterpolationType::kLinear;
+ case ast::InterpolationType::kFlat:
+ return InterpolationType::kFlat;
+ }
- return InterpolationType::kUnknown;
+ return InterpolationType::kUnknown;
}
-InterpolationSampling ASTToInspectorInterpolationSampling(
- ast::InterpolationSampling sampling) {
- switch (sampling) {
- case ast::InterpolationSampling::kNone:
- return InterpolationSampling::kNone;
- case ast::InterpolationSampling::kCenter:
- return InterpolationSampling::kCenter;
- case ast::InterpolationSampling::kCentroid:
- return InterpolationSampling::kCentroid;
- case ast::InterpolationSampling::kSample:
- return InterpolationSampling::kSample;
- }
+InterpolationSampling ASTToInspectorInterpolationSampling(ast::InterpolationSampling sampling) {
+ switch (sampling) {
+ case ast::InterpolationSampling::kNone:
+ return InterpolationSampling::kNone;
+ case ast::InterpolationSampling::kCenter:
+ return InterpolationSampling::kCenter;
+ case ast::InterpolationSampling::kCentroid:
+ return InterpolationSampling::kCentroid;
+ case ast::InterpolationSampling::kSample:
+ return InterpolationSampling::kSample;
+ }
- return InterpolationSampling::kUnknown;
+ return InterpolationSampling::kUnknown;
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/entry_point.h b/chromium/third_party/dawn/src/tint/inspector/entry_point.h
index d64a8eb8088..b9ac4e429e0 100644
--- a/chromium/third_party/dawn/src/tint/inspector/entry_point.h
+++ b/chromium/third_party/dawn/src/tint/inspector/entry_point.h
@@ -26,158 +26,149 @@ namespace tint::inspector {
/// Base component type of a stage variable.
enum class ComponentType {
- kUnknown = -1,
- kFloat,
- kUInt,
- kSInt,
+ kUnknown = -1,
+ kFloat,
+ kUInt,
+ kSInt,
};
/// Composition of components of a stage variable.
enum class CompositionType {
- kUnknown = -1,
- kScalar,
- kVec2,
- kVec3,
- kVec4,
+ kUnknown = -1,
+ kScalar,
+ kVec2,
+ kVec3,
+ kVec4,
};
/// Type of interpolation of a stage variable.
enum class InterpolationType { kUnknown = -1, kPerspective, kLinear, kFlat };
/// Type of interpolation sampling of a stage variable.
-enum class InterpolationSampling {
- kUnknown = -1,
- kNone,
- kCenter,
- kCentroid,
- kSample
-};
+enum class InterpolationSampling { kUnknown = -1, kNone, kCenter, kCentroid, kSample };
/// Reflection data about an entry point input or output.
struct StageVariable {
- /// Constructor
- StageVariable();
- /// Copy constructor
- /// @param other the StageVariable to copy
- StageVariable(const StageVariable& other);
- /// Destructor
- ~StageVariable();
-
- /// Name of the variable in the shader.
- std::string name;
- /// Is location attribute present
- bool has_location_attribute = false;
- /// Value of the location attribute, only valid if #has_location_attribute is
- /// true.
- uint32_t location_attribute;
- /// Is Location attribute present
- /// [DEPRECATED]: Use #has_location_attribute
- bool& has_location_decoration = has_location_attribute;
- /// Value of Location Decoration, only valid if #has_location_decoration is
- /// true.
- /// [DEPRECATED]: Use #location_attribute
- uint32_t& location_decoration = location_attribute;
- /// Scalar type that the variable is composed of.
- ComponentType component_type = ComponentType::kUnknown;
- /// How the scalars are composed for the variable.
- CompositionType composition_type = CompositionType::kUnknown;
- /// Interpolation type of the variable.
- InterpolationType interpolation_type = InterpolationType::kUnknown;
- /// Interpolation sampling of the variable.
- InterpolationSampling interpolation_sampling =
- InterpolationSampling::kUnknown;
+ /// Constructor
+ StageVariable();
+ /// Copy constructor
+ /// @param other the StageVariable to copy
+ StageVariable(const StageVariable& other);
+ /// Destructor
+ ~StageVariable();
+
+ /// Name of the variable in the shader.
+ std::string name;
+ /// Is location attribute present
+ bool has_location_attribute = false;
+ /// Value of the location attribute, only valid if #has_location_attribute is
+ /// true.
+ uint32_t location_attribute;
+ /// Is Location attribute present
+ /// [DEPRECATED]: Use #has_location_attribute
+ bool& has_location_decoration = has_location_attribute;
+ /// Value of Location Decoration, only valid if #has_location_decoration is
+ /// true.
+ /// [DEPRECATED]: Use #location_attribute
+ uint32_t& location_decoration = location_attribute;
+ /// Scalar type that the variable is composed of.
+ ComponentType component_type = ComponentType::kUnknown;
+ /// How the scalars are composed for the variable.
+ CompositionType composition_type = CompositionType::kUnknown;
+ /// Interpolation type of the variable.
+ InterpolationType interpolation_type = InterpolationType::kUnknown;
+ /// Interpolation sampling of the variable.
+ InterpolationSampling interpolation_sampling = InterpolationSampling::kUnknown;
};
/// Convert from internal ast::InterpolationType to public ::InterpolationType.
/// @param ast_type internal value to convert from
/// @returns the publicly visible equivalent
-InterpolationType ASTToInspectorInterpolationType(
- ast::InterpolationType ast_type);
+InterpolationType ASTToInspectorInterpolationType(ast::InterpolationType ast_type);
/// Convert from internal ast::InterpolationSampling to public
/// ::InterpolationSampling
/// @param sampling internal value to convert from
/// @returns the publicly visible equivalent
-InterpolationSampling ASTToInspectorInterpolationSampling(
- ast::InterpolationSampling sampling);
+InterpolationSampling ASTToInspectorInterpolationSampling(ast::InterpolationSampling sampling);
/// Reflection data about a pipeline overridable constant referenced by an entry
/// point
struct OverridableConstant {
- /// Name of the constant
- std::string name;
+ /// Name of the constant
+ std::string name;
- /// ID of the constant
- uint16_t numeric_id;
+ /// ID of the constant
+ uint16_t numeric_id;
- /// Type of the scalar
- enum class Type {
- kBool,
- kFloat32,
- kUint32,
- kInt32,
- };
+ /// Type of the scalar
+ enum class Type {
+ kBool,
+ kFloat32,
+ kUint32,
+ kInt32,
+ };
- /// Type of the scalar
- Type type;
+ /// Type of the scalar
+ Type type;
- /// Does this pipeline overridable constant have an initializer?
- bool is_initialized = false;
+ /// Does this pipeline overridable constant have an initializer?
+ bool is_initialized = false;
- /// Does this pipeline overridable constant have a numeric ID specified
- /// explicitly?
- bool is_numeric_id_specified = false;
+ /// Does this pipeline overridable constant have a numeric ID specified
+ /// explicitly?
+ bool is_numeric_id_specified = false;
};
/// Reflection data for an entry point in the shader.
struct EntryPoint {
- /// Constructors
- EntryPoint();
- /// Copy Constructor
- EntryPoint(EntryPoint&);
- /// Move Constructor
- EntryPoint(EntryPoint&&);
- ~EntryPoint();
-
- /// The entry point name
- std::string name;
- /// Remapped entry point name in the backend
- std::string remapped_name;
- /// The entry point stage
- ast::PipelineStage stage = ast::PipelineStage::kNone;
- /// The workgroup x size
- uint32_t workgroup_size_x = 0;
- /// The workgroup y size
- uint32_t workgroup_size_y = 0;
- /// The workgroup z size
- uint32_t workgroup_size_z = 0;
- /// List of the input variable accessed via this entry point.
- std::vector<StageVariable> input_variables;
- /// List of the output variable accessed via this entry point.
- std::vector<StageVariable> output_variables;
- /// List of the pipeline overridable constants accessed via this entry point.
- std::vector<OverridableConstant> overridable_constants;
- /// Does the entry point use the sample_mask builtin as an input builtin
- /// variable.
- bool input_sample_mask_used = false;
- /// Does the entry point use the sample_mask builtin as an output builtin
- /// variable.
- bool output_sample_mask_used = false;
- /// Does the entry point use the position builtin as an input builtin
- /// variable.
- bool input_position_used = false;
- /// Does the entry point use the front_facing builtin
- bool front_facing_used = false;
- /// Does the entry point use the sample_index builtin
- bool sample_index_used = false;
- /// Does the entry point use the num_workgroups builtin
- bool num_workgroups_used = false;
-
- /// @returns the size of the workgroup in {x,y,z} format
- std::tuple<uint32_t, uint32_t, uint32_t> workgroup_size() {
- return std::tuple<uint32_t, uint32_t, uint32_t>(
- workgroup_size_x, workgroup_size_y, workgroup_size_z);
- }
+ /// Constructors
+ EntryPoint();
+ /// Copy Constructor
+ EntryPoint(EntryPoint&);
+ /// Move Constructor
+ EntryPoint(EntryPoint&&);
+ ~EntryPoint();
+
+ /// The entry point name
+ std::string name;
+ /// Remapped entry point name in the backend
+ std::string remapped_name;
+ /// The entry point stage
+ ast::PipelineStage stage = ast::PipelineStage::kNone;
+ /// The workgroup x size
+ uint32_t workgroup_size_x = 0;
+ /// The workgroup y size
+ uint32_t workgroup_size_y = 0;
+ /// The workgroup z size
+ uint32_t workgroup_size_z = 0;
+ /// List of the input variable accessed via this entry point.
+ std::vector<StageVariable> input_variables;
+ /// List of the output variable accessed via this entry point.
+ std::vector<StageVariable> output_variables;
+ /// List of the pipeline overridable constants accessed via this entry point.
+ std::vector<OverridableConstant> overridable_constants;
+ /// Does the entry point use the sample_mask builtin as an input builtin
+ /// variable.
+ bool input_sample_mask_used = false;
+ /// Does the entry point use the sample_mask builtin as an output builtin
+ /// variable.
+ bool output_sample_mask_used = false;
+ /// Does the entry point use the position builtin as an input builtin
+ /// variable.
+ bool input_position_used = false;
+ /// Does the entry point use the front_facing builtin
+ bool front_facing_used = false;
+ /// Does the entry point use the sample_index builtin
+ bool sample_index_used = false;
+ /// Does the entry point use the num_workgroups builtin
+ bool num_workgroups_used = false;
+
+ /// @returns the size of the workgroup in {x,y,z} format
+ std::tuple<uint32_t, uint32_t, uint32_t> workgroup_size() {
+ return std::tuple<uint32_t, uint32_t, uint32_t>(workgroup_size_x, workgroup_size_y,
+ workgroup_size_z);
+ }
};
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/inspector.cc b/chromium/third_party/dawn/src/tint/inspector/inspector.cc
index fd2185cab2b..569a1042e6b 100644
--- a/chromium/third_party/dawn/src/tint/inspector/inspector.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/inspector.cc
@@ -19,29 +19,30 @@
#include "src/tint/ast/bool_literal_expression.h"
#include "src/tint/ast/call_expression.h"
+#include "src/tint/ast/extension.h"
#include "src/tint/ast/float_literal_expression.h"
#include "src/tint/ast/id_attribute.h"
#include "src/tint/ast/interpolate_attribute.h"
#include "src/tint/ast/location_attribute.h"
#include "src/tint/ast/module.h"
-#include "src/tint/ast/sint_literal_expression.h"
-#include "src/tint/ast/uint_literal_expression.h"
#include "src/tint/sem/array.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/f32_type.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/f16.h"
+#include "src/tint/sem/f32.h"
#include "src/tint/sem/function.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/matrix_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/matrix.h"
+#include "src/tint/sem/module.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
-#include "src/tint/sem/u32_type.h"
+#include "src/tint/sem/u32.h"
#include "src/tint/sem/variable.h"
-#include "src/tint/sem/vector_type.h"
-#include "src/tint/sem/void_type.h"
+#include "src/tint/sem/vector.h"
+#include "src/tint/sem/void.h"
#include "src/tint/utils/math.h"
#include "src/tint/utils/unique_vector.h"
@@ -51,75 +52,73 @@ namespace {
void AppendResourceBindings(std::vector<ResourceBinding>* dest,
const std::vector<ResourceBinding>& orig) {
- TINT_ASSERT(Inspector, dest);
- if (!dest) {
- return;
- }
-
- dest->reserve(dest->size() + orig.size());
- dest->insert(dest->end(), orig.begin(), orig.end());
-}
-
-std::tuple<ComponentType, CompositionType> CalculateComponentAndComposition(
- const sem::Type* type) {
- if (type->is_float_scalar()) {
- return {ComponentType::kFloat, CompositionType::kScalar};
- } else if (type->is_float_vector()) {
- auto* vec = type->As<sem::Vector>();
- if (vec->Width() == 2) {
- return {ComponentType::kFloat, CompositionType::kVec2};
- } else if (vec->Width() == 3) {
- return {ComponentType::kFloat, CompositionType::kVec3};
- } else if (vec->Width() == 4) {
- return {ComponentType::kFloat, CompositionType::kVec4};
- }
- } else if (type->is_unsigned_integer_scalar()) {
- return {ComponentType::kUInt, CompositionType::kScalar};
- } else if (type->is_unsigned_integer_vector()) {
- auto* vec = type->As<sem::Vector>();
- if (vec->Width() == 2) {
- return {ComponentType::kUInt, CompositionType::kVec2};
- } else if (vec->Width() == 3) {
- return {ComponentType::kUInt, CompositionType::kVec3};
- } else if (vec->Width() == 4) {
- return {ComponentType::kUInt, CompositionType::kVec4};
- }
- } else if (type->is_signed_integer_scalar()) {
- return {ComponentType::kSInt, CompositionType::kScalar};
- } else if (type->is_signed_integer_vector()) {
- auto* vec = type->As<sem::Vector>();
- if (vec->Width() == 2) {
- return {ComponentType::kSInt, CompositionType::kVec2};
- } else if (vec->Width() == 3) {
- return {ComponentType::kSInt, CompositionType::kVec3};
- } else if (vec->Width() == 4) {
- return {ComponentType::kSInt, CompositionType::kVec4};
- }
- }
- return {ComponentType::kUnknown, CompositionType::kUnknown};
+ TINT_ASSERT(Inspector, dest);
+ if (!dest) {
+ return;
+ }
+
+ dest->reserve(dest->size() + orig.size());
+ dest->insert(dest->end(), orig.begin(), orig.end());
+}
+
+std::tuple<ComponentType, CompositionType> CalculateComponentAndComposition(const sem::Type* type) {
+ if (type->is_float_scalar()) {
+ return {ComponentType::kFloat, CompositionType::kScalar};
+ } else if (type->is_float_vector()) {
+ auto* vec = type->As<sem::Vector>();
+ if (vec->Width() == 2) {
+ return {ComponentType::kFloat, CompositionType::kVec2};
+ } else if (vec->Width() == 3) {
+ return {ComponentType::kFloat, CompositionType::kVec3};
+ } else if (vec->Width() == 4) {
+ return {ComponentType::kFloat, CompositionType::kVec4};
+ }
+ } else if (type->is_unsigned_integer_scalar()) {
+ return {ComponentType::kUInt, CompositionType::kScalar};
+ } else if (type->is_unsigned_integer_vector()) {
+ auto* vec = type->As<sem::Vector>();
+ if (vec->Width() == 2) {
+ return {ComponentType::kUInt, CompositionType::kVec2};
+ } else if (vec->Width() == 3) {
+ return {ComponentType::kUInt, CompositionType::kVec3};
+ } else if (vec->Width() == 4) {
+ return {ComponentType::kUInt, CompositionType::kVec4};
+ }
+ } else if (type->is_signed_integer_scalar()) {
+ return {ComponentType::kSInt, CompositionType::kScalar};
+ } else if (type->is_signed_integer_vector()) {
+ auto* vec = type->As<sem::Vector>();
+ if (vec->Width() == 2) {
+ return {ComponentType::kSInt, CompositionType::kVec2};
+ } else if (vec->Width() == 3) {
+ return {ComponentType::kSInt, CompositionType::kVec3};
+ } else if (vec->Width() == 4) {
+ return {ComponentType::kSInt, CompositionType::kVec4};
+ }
+ }
+ return {ComponentType::kUnknown, CompositionType::kUnknown};
}
std::tuple<InterpolationType, InterpolationSampling> CalculateInterpolationData(
const sem::Type* type,
const ast::AttributeList& attributes) {
- auto* interpolation_attribute =
- ast::GetAttribute<ast::InterpolateAttribute>(attributes);
- if (type->is_integer_scalar_or_vector()) {
- return {InterpolationType::kFlat, InterpolationSampling::kNone};
- }
-
- if (!interpolation_attribute) {
- return {InterpolationType::kPerspective, InterpolationSampling::kCenter};
- }
-
- auto interpolation_type = interpolation_attribute->type;
- auto sampling = interpolation_attribute->sampling;
- if (interpolation_type != ast::InterpolationType::kFlat &&
- sampling == ast::InterpolationSampling::kNone) {
- sampling = ast::InterpolationSampling::kCenter;
- }
- return {ASTToInspectorInterpolationType(interpolation_type),
- ASTToInspectorInterpolationSampling(sampling)};
+ auto* interpolation_attribute = ast::GetAttribute<ast::InterpolateAttribute>(attributes);
+ if (type->is_integer_scalar_or_vector()) {
+ return {InterpolationType::kFlat, InterpolationSampling::kNone};
+ }
+
+ if (!interpolation_attribute) {
+ return {InterpolationType::kPerspective, InterpolationSampling::kCenter};
+ }
+
+ auto interpolation_type = interpolation_attribute->type;
+ auto sampling = interpolation_attribute->sampling;
+ if (interpolation_type != ast::InterpolationType::kFlat &&
+ sampling == ast::InterpolationSampling::kNone) {
+ sampling = ast::InterpolationSampling::kCenter;
+ }
+ return {ASTToInspectorInterpolationType(interpolation_type),
+ ASTToInspectorInterpolationSampling(sampling)};
}
} // namespace
@@ -129,826 +128,767 @@ Inspector::Inspector(const Program* program) : program_(program) {}
Inspector::~Inspector() = default;
std::vector<EntryPoint> Inspector::GetEntryPoints() {
- std::vector<EntryPoint> result;
+ std::vector<EntryPoint> result;
- for (auto* func : program_->AST().Functions()) {
- if (!func->IsEntryPoint()) {
- continue;
- }
-
- auto* sem = program_->Sem().Get(func);
-
- EntryPoint entry_point;
- entry_point.name = program_->Symbols().NameFor(func->symbol);
- entry_point.remapped_name = program_->Symbols().NameFor(func->symbol);
- entry_point.stage = func->PipelineStage();
-
- auto wgsize = sem->WorkgroupSize();
- entry_point.workgroup_size_x = wgsize[0].value;
- entry_point.workgroup_size_y = wgsize[1].value;
- entry_point.workgroup_size_z = wgsize[2].value;
- if (wgsize[0].overridable_const || wgsize[1].overridable_const ||
- wgsize[2].overridable_const) {
- // TODO(crbug.com/tint/713): Handle overridable constants.
- TINT_ASSERT(Inspector, false);
- }
-
- for (auto* param : sem->Parameters()) {
- AddEntryPointInOutVariables(
- program_->Symbols().NameFor(param->Declaration()->symbol),
- param->Type(), param->Declaration()->attributes,
- entry_point.input_variables);
-
- entry_point.input_position_used |=
- ContainsBuiltin(ast::Builtin::kPosition, param->Type(),
- param->Declaration()->attributes);
- entry_point.front_facing_used |=
- ContainsBuiltin(ast::Builtin::kFrontFacing, param->Type(),
- param->Declaration()->attributes);
- entry_point.sample_index_used |=
- ContainsBuiltin(ast::Builtin::kSampleIndex, param->Type(),
- param->Declaration()->attributes);
- entry_point.input_sample_mask_used |=
- ContainsBuiltin(ast::Builtin::kSampleMask, param->Type(),
- param->Declaration()->attributes);
- entry_point.num_workgroups_used |=
- ContainsBuiltin(ast::Builtin::kNumWorkgroups, param->Type(),
- param->Declaration()->attributes);
- }
-
- if (!sem->ReturnType()->Is<sem::Void>()) {
- AddEntryPointInOutVariables("<retval>", sem->ReturnType(),
- func->return_type_attributes,
- entry_point.output_variables);
-
- entry_point.output_sample_mask_used =
- ContainsBuiltin(ast::Builtin::kSampleMask, sem->ReturnType(),
- func->return_type_attributes);
- }
-
- for (auto* var : sem->TransitivelyReferencedGlobals()) {
- auto* decl = var->Declaration();
-
- auto name = program_->Symbols().NameFor(decl->symbol);
-
- auto* global = var->As<sem::GlobalVariable>();
- if (global && global->IsOverridable()) {
- OverridableConstant overridable_constant;
- overridable_constant.name = name;
- overridable_constant.numeric_id = global->ConstantId();
- auto* type = var->Type();
- TINT_ASSERT(Inspector, type->is_scalar());
- if (type->is_bool_scalar_or_vector()) {
- overridable_constant.type = OverridableConstant::Type::kBool;
- } else if (type->is_float_scalar()) {
- overridable_constant.type = OverridableConstant::Type::kFloat32;
- } else if (type->is_signed_integer_scalar()) {
- overridable_constant.type = OverridableConstant::Type::kInt32;
- } else if (type->is_unsigned_integer_scalar()) {
- overridable_constant.type = OverridableConstant::Type::kUint32;
- } else {
- TINT_UNREACHABLE(Inspector, diagnostics_);
+ for (auto* func : program_->AST().Functions()) {
+ if (!func->IsEntryPoint()) {
+ continue;
}
- overridable_constant.is_initialized =
- global->Declaration()->constructor;
- overridable_constant.is_numeric_id_specified =
- ast::HasAttribute<ast::IdAttribute>(
- global->Declaration()->attributes);
+ auto* sem = program_->Sem().Get(func);
+
+ EntryPoint entry_point;
+ entry_point.name = program_->Symbols().NameFor(func->symbol);
+ entry_point.remapped_name = program_->Symbols().NameFor(func->symbol);
+ entry_point.stage = func->PipelineStage();
+
+ auto wgsize = sem->WorkgroupSize();
+ entry_point.workgroup_size_x = wgsize[0].value;
+ entry_point.workgroup_size_y = wgsize[1].value;
+ entry_point.workgroup_size_z = wgsize[2].value;
+ if (wgsize[0].overridable_const || wgsize[1].overridable_const ||
+ wgsize[2].overridable_const) {
+ // TODO(crbug.com/tint/713): Handle overridable constants.
+ TINT_ASSERT(Inspector, false);
+ }
- entry_point.overridable_constants.push_back(overridable_constant);
- }
- }
+ for (auto* param : sem->Parameters()) {
+ AddEntryPointInOutVariables(program_->Symbols().NameFor(param->Declaration()->symbol),
+ param->Type(), param->Declaration()->attributes,
+ entry_point.input_variables);
+
+ entry_point.input_position_used |= ContainsBuiltin(
+ ast::Builtin::kPosition, param->Type(), param->Declaration()->attributes);
+ entry_point.front_facing_used |= ContainsBuiltin(
+ ast::Builtin::kFrontFacing, param->Type(), param->Declaration()->attributes);
+ entry_point.sample_index_used |= ContainsBuiltin(
+ ast::Builtin::kSampleIndex, param->Type(), param->Declaration()->attributes);
+ entry_point.input_sample_mask_used |= ContainsBuiltin(
+ ast::Builtin::kSampleMask, param->Type(), param->Declaration()->attributes);
+ entry_point.num_workgroups_used |= ContainsBuiltin(
+ ast::Builtin::kNumWorkgroups, param->Type(), param->Declaration()->attributes);
+ }
+
+ if (!sem->ReturnType()->Is<sem::Void>()) {
+ AddEntryPointInOutVariables("<retval>", sem->ReturnType(), func->return_type_attributes,
+ entry_point.output_variables);
- result.push_back(std::move(entry_point));
- }
+ entry_point.output_sample_mask_used = ContainsBuiltin(
+ ast::Builtin::kSampleMask, sem->ReturnType(), func->return_type_attributes);
+ }
+
+ for (auto* var : sem->TransitivelyReferencedGlobals()) {
+ auto* decl = var->Declaration();
+
+ auto name = program_->Symbols().NameFor(decl->symbol);
+
+ auto* global = var->As<sem::GlobalVariable>();
+ if (global && global->IsOverridable()) {
+ OverridableConstant overridable_constant;
+ overridable_constant.name = name;
+ overridable_constant.numeric_id = global->ConstantId();
+ auto* type = var->Type();
+ TINT_ASSERT(Inspector, type->is_scalar());
+ if (type->is_bool_scalar_or_vector()) {
+ overridable_constant.type = OverridableConstant::Type::kBool;
+ } else if (type->is_float_scalar()) {
+ overridable_constant.type = OverridableConstant::Type::kFloat32;
+ } else if (type->is_signed_integer_scalar()) {
+ overridable_constant.type = OverridableConstant::Type::kInt32;
+ } else if (type->is_unsigned_integer_scalar()) {
+ overridable_constant.type = OverridableConstant::Type::kUint32;
+ } else {
+ TINT_UNREACHABLE(Inspector, diagnostics_);
+ }
+
+ overridable_constant.is_initialized = global->Declaration()->constructor;
+ overridable_constant.is_numeric_id_specified =
+ ast::HasAttribute<ast::IdAttribute>(global->Declaration()->attributes);
+
+ entry_point.overridable_constants.push_back(overridable_constant);
+ }
+ }
- return result;
+ result.push_back(std::move(entry_point));
+ }
+
+ return result;
}
std::map<uint32_t, Scalar> Inspector::GetConstantIDs() {
- std::map<uint32_t, Scalar> result;
- for (auto* var : program_->AST().GlobalVariables()) {
- auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
- if (!global || !global->IsOverridable()) {
- continue;
- }
+ std::map<uint32_t, Scalar> result;
+ for (auto* var : program_->AST().GlobalVariables()) {
+ auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
+ if (!global || !global->IsOverridable()) {
+ continue;
+ }
- // If there are conflicting defintions for a constant id, that is invalid
- // WGSL, so the resolver should catch it. Thus here the inspector just
- // assumes all definitions of the constant id are the same, so only needs
- // to find the first reference to constant id.
- uint32_t constant_id = global->ConstantId();
- if (result.find(constant_id) != result.end()) {
- continue;
- }
+ // If there are conflicting defintions for a constant id, that is invalid
+ // WGSL, so the resolver should catch it. Thus here the inspector just
+ // assumes all definitions of the constant id are the same, so only needs
+ // to find the first reference to constant id.
+ uint32_t constant_id = global->ConstantId();
+ if (result.find(constant_id) != result.end()) {
+ continue;
+ }
- if (!var->constructor) {
- result[constant_id] = Scalar();
- continue;
- }
+ if (!var->constructor) {
+ result[constant_id] = Scalar();
+ continue;
+ }
- auto* literal = var->constructor->As<ast::LiteralExpression>();
- if (!literal) {
- // This is invalid WGSL, but handling gracefully.
- result[constant_id] = Scalar();
- continue;
- }
+ auto* literal = var->constructor->As<ast::LiteralExpression>();
+ if (!literal) {
+ // This is invalid WGSL, but handling gracefully.
+ result[constant_id] = Scalar();
+ continue;
+ }
- if (auto* l = literal->As<ast::BoolLiteralExpression>()) {
- result[constant_id] = Scalar(l->value);
- continue;
- }
+ if (auto* l = literal->As<ast::BoolLiteralExpression>()) {
+ result[constant_id] = Scalar(l->value);
+ continue;
+ }
- if (auto* l = literal->As<ast::UintLiteralExpression>()) {
- result[constant_id] = Scalar(l->value);
- continue;
- }
+ if (auto* l = literal->As<ast::IntLiteralExpression>()) {
+ switch (l->suffix) {
+ case ast::IntLiteralExpression::Suffix::kNone:
+ case ast::IntLiteralExpression::Suffix::kI:
+ result[constant_id] = Scalar(static_cast<int32_t>(l->value));
+ continue;
+ case ast::IntLiteralExpression::Suffix::kU:
+ result[constant_id] = Scalar(static_cast<uint32_t>(l->value));
+ continue;
+ }
+ }
- if (auto* l = literal->As<ast::SintLiteralExpression>()) {
- result[constant_id] = Scalar(l->value);
- continue;
- }
+ if (auto* l = literal->As<ast::FloatLiteralExpression>()) {
+ result[constant_id] = Scalar(static_cast<float>(l->value));
+ continue;
+ }
- if (auto* l = literal->As<ast::FloatLiteralExpression>()) {
- result[constant_id] = Scalar(l->value);
- continue;
+ result[constant_id] = Scalar();
}
- result[constant_id] = Scalar();
- }
-
- return result;
+ return result;
}
std::map<std::string, uint32_t> Inspector::GetConstantNameToIdMap() {
- std::map<std::string, uint32_t> result;
- for (auto* var : program_->AST().GlobalVariables()) {
- auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
- if (global && global->IsOverridable()) {
- auto name = program_->Symbols().NameFor(var->symbol);
- result[name] = global->ConstantId();
+ std::map<std::string, uint32_t> result;
+ for (auto* var : program_->AST().GlobalVariables()) {
+ auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
+ if (global && global->IsOverridable()) {
+ auto name = program_->Symbols().NameFor(var->symbol);
+ result[name] = global->ConstantId();
+ }
}
- }
- return result;
+ return result;
}
uint32_t Inspector::GetStorageSize(const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return 0;
- }
-
- size_t size = 0;
- auto* func_sem = program_->Sem().Get(func);
- for (auto& ruv : func_sem->TransitivelyReferencedUniformVariables()) {
- size += ruv.first->Type()->UnwrapRef()->Size();
- }
- for (auto& rsv : func_sem->TransitivelyReferencedStorageBufferVariables()) {
- size += rsv.first->Type()->UnwrapRef()->Size();
- }
-
- if (static_cast<uint64_t>(size) >
- static_cast<uint64_t>(std::numeric_limits<uint32_t>::max())) {
- return std::numeric_limits<uint32_t>::max();
- }
- return static_cast<uint32_t>(size);
-}
-
-std::vector<ResourceBinding> Inspector::GetResourceBindings(
- const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
-
- std::vector<ResourceBinding> result;
- for (auto fn : {
- &Inspector::GetUniformBufferResourceBindings,
- &Inspector::GetStorageBufferResourceBindings,
- &Inspector::GetReadOnlyStorageBufferResourceBindings,
- &Inspector::GetSamplerResourceBindings,
- &Inspector::GetComparisonSamplerResourceBindings,
- &Inspector::GetSampledTextureResourceBindings,
- &Inspector::GetMultisampledTextureResourceBindings,
- &Inspector::GetWriteOnlyStorageTextureResourceBindings,
- &Inspector::GetDepthTextureResourceBindings,
- &Inspector::GetDepthMultisampledTextureResourceBindings,
- &Inspector::GetExternalTextureResourceBindings,
- }) {
- AppendResourceBindings(&result, (this->*fn)(entry_point));
- }
- return result;
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return 0;
+ }
+
+ size_t size = 0;
+ auto* func_sem = program_->Sem().Get(func);
+ for (auto& ruv : func_sem->TransitivelyReferencedUniformVariables()) {
+ size += ruv.first->Type()->UnwrapRef()->Size();
+ }
+ for (auto& rsv : func_sem->TransitivelyReferencedStorageBufferVariables()) {
+ size += rsv.first->Type()->UnwrapRef()->Size();
+ }
+
+ if (static_cast<uint64_t>(size) > static_cast<uint64_t>(std::numeric_limits<uint32_t>::max())) {
+ return std::numeric_limits<uint32_t>::max();
+ }
+ return static_cast<uint32_t>(size);
+}
+
+std::vector<ResourceBinding> Inspector::GetResourceBindings(const std::string& entry_point) {
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
+
+ std::vector<ResourceBinding> result;
+ for (auto fn : {
+ &Inspector::GetUniformBufferResourceBindings,
+ &Inspector::GetStorageBufferResourceBindings,
+ &Inspector::GetReadOnlyStorageBufferResourceBindings,
+ &Inspector::GetSamplerResourceBindings,
+ &Inspector::GetComparisonSamplerResourceBindings,
+ &Inspector::GetSampledTextureResourceBindings,
+ &Inspector::GetMultisampledTextureResourceBindings,
+ &Inspector::GetWriteOnlyStorageTextureResourceBindings,
+ &Inspector::GetDepthTextureResourceBindings,
+ &Inspector::GetDepthMultisampledTextureResourceBindings,
+ &Inspector::GetExternalTextureResourceBindings,
+ }) {
+ AppendResourceBindings(&result, (this->*fn)(entry_point));
+ }
+ return result;
}
std::vector<ResourceBinding> Inspector::GetUniformBufferResourceBindings(
const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
-
- std::vector<ResourceBinding> result;
-
- auto* func_sem = program_->Sem().Get(func);
- for (auto& ruv : func_sem->TransitivelyReferencedUniformVariables()) {
- auto* var = ruv.first;
- auto binding_info = ruv.second;
-
- auto* unwrapped_type = var->Type()->UnwrapRef();
-
- ResourceBinding entry;
- entry.resource_type = ResourceBinding::ResourceType::kUniformBuffer;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
- entry.size = unwrapped_type->Size();
- entry.size_no_padding = entry.size;
- if (auto* str = unwrapped_type->As<sem::Struct>()) {
- entry.size_no_padding = str->SizeNoPadding();
- } else {
- entry.size_no_padding = entry.size;
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
}
- result.push_back(entry);
- }
+ std::vector<ResourceBinding> result;
+
+ auto* func_sem = program_->Sem().Get(func);
+ for (auto& ruv : func_sem->TransitivelyReferencedUniformVariables()) {
+ auto* var = ruv.first;
+ auto binding_info = ruv.second;
+
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+
+ ResourceBinding entry;
+ entry.resource_type = ResourceBinding::ResourceType::kUniformBuffer;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
+ entry.size = unwrapped_type->Size();
+ entry.size_no_padding = entry.size;
+ if (auto* str = unwrapped_type->As<sem::Struct>()) {
+ entry.size_no_padding = str->SizeNoPadding();
+ } else {
+ entry.size_no_padding = entry.size;
+ }
+
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetStorageBufferResourceBindings(
const std::string& entry_point) {
- return GetStorageBufferResourceBindingsImpl(entry_point, false);
+ return GetStorageBufferResourceBindingsImpl(entry_point, false);
}
-std::vector<ResourceBinding>
-Inspector::GetReadOnlyStorageBufferResourceBindings(
+std::vector<ResourceBinding> Inspector::GetReadOnlyStorageBufferResourceBindings(
const std::string& entry_point) {
- return GetStorageBufferResourceBindingsImpl(entry_point, true);
+ return GetStorageBufferResourceBindingsImpl(entry_point, true);
}
-std::vector<ResourceBinding> Inspector::GetSamplerResourceBindings(
- const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
+std::vector<ResourceBinding> Inspector::GetSamplerResourceBindings(const std::string& entry_point) {
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
- std::vector<ResourceBinding> result;
+ std::vector<ResourceBinding> result;
- auto* func_sem = program_->Sem().Get(func);
- for (auto& rs : func_sem->TransitivelyReferencedSamplerVariables()) {
- auto binding_info = rs.second;
+ auto* func_sem = program_->Sem().Get(func);
+ for (auto& rs : func_sem->TransitivelyReferencedSamplerVariables()) {
+ auto binding_info = rs.second;
- ResourceBinding entry;
- entry.resource_type = ResourceBinding::ResourceType::kSampler;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
+ ResourceBinding entry;
+ entry.resource_type = ResourceBinding::ResourceType::kSampler;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
- result.push_back(entry);
- }
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetComparisonSamplerResourceBindings(
const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
- std::vector<ResourceBinding> result;
+ std::vector<ResourceBinding> result;
- auto* func_sem = program_->Sem().Get(func);
- for (auto& rcs :
- func_sem->TransitivelyReferencedComparisonSamplerVariables()) {
- auto binding_info = rcs.second;
+ auto* func_sem = program_->Sem().Get(func);
+ for (auto& rcs : func_sem->TransitivelyReferencedComparisonSamplerVariables()) {
+ auto binding_info = rcs.second;
- ResourceBinding entry;
- entry.resource_type = ResourceBinding::ResourceType::kComparisonSampler;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
+ ResourceBinding entry;
+ entry.resource_type = ResourceBinding::ResourceType::kComparisonSampler;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
- result.push_back(entry);
- }
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetSampledTextureResourceBindings(
const std::string& entry_point) {
- return GetSampledTextureResourceBindingsImpl(entry_point, false);
+ return GetSampledTextureResourceBindingsImpl(entry_point, false);
}
std::vector<ResourceBinding> Inspector::GetMultisampledTextureResourceBindings(
const std::string& entry_point) {
- return GetSampledTextureResourceBindingsImpl(entry_point, true);
+ return GetSampledTextureResourceBindingsImpl(entry_point, true);
}
-std::vector<ResourceBinding>
-Inspector::GetWriteOnlyStorageTextureResourceBindings(
+std::vector<ResourceBinding> Inspector::GetWriteOnlyStorageTextureResourceBindings(
const std::string& entry_point) {
- return GetStorageTextureResourceBindingsImpl(entry_point);
+ return GetStorageTextureResourceBindingsImpl(entry_point);
}
std::vector<ResourceBinding> Inspector::GetTextureResourceBindings(
const std::string& entry_point,
const tint::TypeInfo* texture_type,
ResourceBinding::ResourceType resource_type) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
- std::vector<ResourceBinding> result;
- auto* func_sem = program_->Sem().Get(func);
- for (auto& ref :
- func_sem->TransitivelyReferencedVariablesOfType(texture_type)) {
- auto* var = ref.first;
- auto binding_info = ref.second;
+ std::vector<ResourceBinding> result;
+ auto* func_sem = program_->Sem().Get(func);
+ for (auto& ref : func_sem->TransitivelyReferencedVariablesOfType(texture_type)) {
+ auto* var = ref.first;
+ auto binding_info = ref.second;
- ResourceBinding entry;
- entry.resource_type = resource_type;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
+ ResourceBinding entry;
+ entry.resource_type = resource_type;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
- auto* tex = var->Type()->UnwrapRef()->As<sem::Texture>();
- entry.dim =
- TypeTextureDimensionToResourceBindingTextureDimension(tex->dim());
+ auto* tex = var->Type()->UnwrapRef()->As<sem::Texture>();
+ entry.dim = TypeTextureDimensionToResourceBindingTextureDimension(tex->dim());
- result.push_back(entry);
- }
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetDepthTextureResourceBindings(
const std::string& entry_point) {
- return GetTextureResourceBindings(
- entry_point, &TypeInfo::Of<sem::DepthTexture>(),
- ResourceBinding::ResourceType::kDepthTexture);
+ return GetTextureResourceBindings(entry_point, &TypeInfo::Of<sem::DepthTexture>(),
+ ResourceBinding::ResourceType::kDepthTexture);
}
-std::vector<ResourceBinding>
-Inspector::GetDepthMultisampledTextureResourceBindings(
+std::vector<ResourceBinding> Inspector::GetDepthMultisampledTextureResourceBindings(
const std::string& entry_point) {
- return GetTextureResourceBindings(
- entry_point, &TypeInfo::Of<sem::DepthMultisampledTexture>(),
- ResourceBinding::ResourceType::kDepthMultisampledTexture);
+ return GetTextureResourceBindings(entry_point, &TypeInfo::Of<sem::DepthMultisampledTexture>(),
+ ResourceBinding::ResourceType::kDepthMultisampledTexture);
}
std::vector<ResourceBinding> Inspector::GetExternalTextureResourceBindings(
const std::string& entry_point) {
- return GetTextureResourceBindings(
- entry_point, &TypeInfo::Of<sem::ExternalTexture>(),
- ResourceBinding::ResourceType::kExternalTexture);
+ return GetTextureResourceBindings(entry_point, &TypeInfo::Of<sem::ExternalTexture>(),
+ ResourceBinding::ResourceType::kExternalTexture);
}
std::vector<sem::SamplerTexturePair> Inspector::GetSamplerTextureUses(
const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
- GenerateSamplerTargets();
+ GenerateSamplerTargets();
- auto it = sampler_targets_->find(entry_point);
- if (it == sampler_targets_->end()) {
- return {};
- }
- return it->second;
+ auto it = sampler_targets_->find(entry_point);
+ if (it == sampler_targets_->end()) {
+ return {};
+ }
+ return it->second;
}
std::vector<sem::SamplerTexturePair> Inspector::GetSamplerTextureUses(
const std::string& entry_point,
const sem::BindingPoint& placeholder) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
- auto* func_sem = program_->Sem().Get(func);
-
- std::vector<sem::SamplerTexturePair> new_pairs;
- for (auto pair : func_sem->TextureSamplerPairs()) {
- auto* texture = pair.first->As<sem::GlobalVariable>();
- auto* sampler =
- pair.second ? pair.second->As<sem::GlobalVariable>() : nullptr;
- SamplerTexturePair new_pair;
- new_pair.sampler_binding_point =
- sampler ? sampler->BindingPoint() : placeholder;
- new_pair.texture_binding_point = texture->BindingPoint();
- new_pairs.push_back(new_pair);
- }
- return new_pairs;
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
+ auto* func_sem = program_->Sem().Get(func);
+
+ std::vector<sem::SamplerTexturePair> new_pairs;
+ for (auto pair : func_sem->TextureSamplerPairs()) {
+ auto* texture = pair.first->As<sem::GlobalVariable>();
+ auto* sampler = pair.second ? pair.second->As<sem::GlobalVariable>() : nullptr;
+ SamplerTexturePair new_pair;
+ new_pair.sampler_binding_point = sampler ? sampler->BindingPoint() : placeholder;
+ new_pair.texture_binding_point = texture->BindingPoint();
+ new_pairs.push_back(new_pair);
+ }
+ return new_pairs;
}
uint32_t Inspector::GetWorkgroupStorageSize(const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return 0;
- }
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return 0;
+ }
+
+ uint32_t total_size = 0;
+ auto* func_sem = program_->Sem().Get(func);
+ for (const sem::Variable* var : func_sem->TransitivelyReferencedGlobals()) {
+ if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
+ auto* ty = var->Type()->UnwrapRef();
+ uint32_t align = ty->Align();
+ uint32_t size = ty->Size();
+
+ // This essentially matches std430 layout rules from GLSL, which are in
+ // turn specified as an upper bound for Vulkan layout sizing. Since D3D
+ // and Metal are even less specific, we assume Vulkan behavior as a
+ // good-enough approximation everywhere.
+ total_size += utils::RoundUp(align, size);
+ }
+ }
- uint32_t total_size = 0;
- auto* func_sem = program_->Sem().Get(func);
- for (const sem::Variable* var : func_sem->TransitivelyReferencedGlobals()) {
- if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
- auto* ty = var->Type()->UnwrapRef();
- uint32_t align = ty->Align();
- uint32_t size = ty->Size();
+ return total_size;
+}
- // This essentially matches std430 layout rules from GLSL, which are in
- // turn specified as an upper bound for Vulkan layout sizing. Since D3D
- // and Metal are even less specific, we assume Vulkan behavior as a
- // good-enough approximation everywhere.
- total_size += utils::RoundUp(align, size);
+std::vector<std::string> Inspector::GetUsedExtensionNames() {
+ auto& extensions = program_->Sem().Module()->Extensions();
+ std::vector<std::string> out;
+ out.reserve(extensions.size());
+ for (auto ext : extensions) {
+ out.push_back(ast::str(ext));
+ }
+ return out;
+}
+
+std::vector<std::pair<std::string, Source>> Inspector::GetEnableDirectives() {
+ std::vector<std::pair<std::string, Source>> result;
+
+ // Ast nodes for enable directive are stored within global declarations list
+ auto global_decls = program_->AST().GlobalDeclarations();
+ for (auto* node : global_decls) {
+ if (auto* ext = node->As<ast::Enable>()) {
+ result.push_back({ast::str(ext->extension), ext->source});
+ }
}
- }
- return total_size;
+ return result;
}
const ast::Function* Inspector::FindEntryPointByName(const std::string& name) {
- auto* func = program_->AST().Functions().Find(program_->Symbols().Get(name));
- if (!func) {
- diagnostics_.add_error(diag::System::Inspector, name + " was not found!");
- return nullptr;
- }
+ auto* func = program_->AST().Functions().Find(program_->Symbols().Get(name));
+ if (!func) {
+ diagnostics_.add_error(diag::System::Inspector, name + " was not found!");
+ return nullptr;
+ }
- if (!func->IsEntryPoint()) {
- diagnostics_.add_error(diag::System::Inspector,
- name + " is not an entry point!");
- return nullptr;
- }
+ if (!func->IsEntryPoint()) {
+ diagnostics_.add_error(diag::System::Inspector, name + " is not an entry point!");
+ return nullptr;
+ }
- return func;
+ return func;
}
-void Inspector::AddEntryPointInOutVariables(
- std::string name,
- const sem::Type* type,
- const ast::AttributeList& attributes,
- std::vector<StageVariable>& variables) const {
- // Skip builtins.
- if (ast::HasAttribute<ast::BuiltinAttribute>(attributes)) {
- return;
- }
+void Inspector::AddEntryPointInOutVariables(std::string name,
+ const sem::Type* type,
+ const ast::AttributeList& attributes,
+ std::vector<StageVariable>& variables) const {
+ // Skip builtins.
+ if (ast::HasAttribute<ast::BuiltinAttribute>(attributes)) {
+ return;
+ }
- auto* unwrapped_type = type->UnwrapRef();
+ auto* unwrapped_type = type->UnwrapRef();
- if (auto* struct_ty = unwrapped_type->As<sem::Struct>()) {
- // Recurse into members.
- for (auto* member : struct_ty->Members()) {
- AddEntryPointInOutVariables(
- name + "." +
- program_->Symbols().NameFor(member->Declaration()->symbol),
- member->Type(), member->Declaration()->attributes, variables);
+ if (auto* struct_ty = unwrapped_type->As<sem::Struct>()) {
+ // Recurse into members.
+ for (auto* member : struct_ty->Members()) {
+ AddEntryPointInOutVariables(
+ name + "." + program_->Symbols().NameFor(member->Declaration()->symbol),
+ member->Type(), member->Declaration()->attributes, variables);
+ }
+ return;
}
- return;
- }
- // Base case: add the variable.
+ // Base case: add the variable.
- StageVariable stage_variable;
- stage_variable.name = name;
- std::tie(stage_variable.component_type, stage_variable.composition_type) =
- CalculateComponentAndComposition(type);
+ StageVariable stage_variable;
+ stage_variable.name = name;
+ std::tie(stage_variable.component_type, stage_variable.composition_type) =
+ CalculateComponentAndComposition(type);
- auto* location = ast::GetAttribute<ast::LocationAttribute>(attributes);
- TINT_ASSERT(Inspector, location != nullptr);
- stage_variable.has_location_attribute = true;
- stage_variable.location_attribute = location->value;
+ auto* location = ast::GetAttribute<ast::LocationAttribute>(attributes);
+ TINT_ASSERT(Inspector, location != nullptr);
+ stage_variable.has_location_attribute = true;
+ stage_variable.location_attribute = location->value;
- std::tie(stage_variable.interpolation_type,
- stage_variable.interpolation_sampling) =
- CalculateInterpolationData(type, attributes);
+ std::tie(stage_variable.interpolation_type, stage_variable.interpolation_sampling) =
+ CalculateInterpolationData(type, attributes);
- variables.push_back(stage_variable);
+ variables.push_back(stage_variable);
}
bool Inspector::ContainsBuiltin(ast::Builtin builtin,
const sem::Type* type,
const ast::AttributeList& attributes) const {
- auto* unwrapped_type = type->UnwrapRef();
-
- if (auto* struct_ty = unwrapped_type->As<sem::Struct>()) {
- // Recurse into members.
- for (auto* member : struct_ty->Members()) {
- if (ContainsBuiltin(builtin, member->Type(),
- member->Declaration()->attributes)) {
- return true;
- }
+ auto* unwrapped_type = type->UnwrapRef();
+
+ if (auto* struct_ty = unwrapped_type->As<sem::Struct>()) {
+ // Recurse into members.
+ for (auto* member : struct_ty->Members()) {
+ if (ContainsBuiltin(builtin, member->Type(), member->Declaration()->attributes)) {
+ return true;
+ }
+ }
+ return false;
}
- return false;
- }
- // Base case: check for builtin
- auto* builtin_declaration =
- ast::GetAttribute<ast::BuiltinAttribute>(attributes);
- if (!builtin_declaration || builtin_declaration->builtin != builtin) {
- return false;
- }
+ // Base case: check for builtin
+ auto* builtin_declaration = ast::GetAttribute<ast::BuiltinAttribute>(attributes);
+ if (!builtin_declaration || builtin_declaration->builtin != builtin) {
+ return false;
+ }
- return true;
+ return true;
}
std::vector<ResourceBinding> Inspector::GetStorageBufferResourceBindingsImpl(
const std::string& entry_point,
bool read_only) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
-
- auto* func_sem = program_->Sem().Get(func);
- std::vector<ResourceBinding> result;
- for (auto& rsv : func_sem->TransitivelyReferencedStorageBufferVariables()) {
- auto* var = rsv.first;
- auto binding_info = rsv.second;
-
- if (read_only != (var->Access() == ast::Access::kRead)) {
- continue;
- }
-
- auto* unwrapped_type = var->Type()->UnwrapRef();
-
- ResourceBinding entry;
- entry.resource_type =
- read_only ? ResourceBinding::ResourceType::kReadOnlyStorageBuffer
- : ResourceBinding::ResourceType::kStorageBuffer;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
- entry.size = unwrapped_type->Size();
- if (auto* str = unwrapped_type->As<sem::Struct>()) {
- entry.size_no_padding = str->SizeNoPadding();
- } else {
- entry.size_no_padding = entry.size;
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
}
- result.push_back(entry);
- }
+ auto* func_sem = program_->Sem().Get(func);
+ std::vector<ResourceBinding> result;
+ for (auto& rsv : func_sem->TransitivelyReferencedStorageBufferVariables()) {
+ auto* var = rsv.first;
+ auto binding_info = rsv.second;
+
+ if (read_only != (var->Access() == ast::Access::kRead)) {
+ continue;
+ }
+
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+
+ ResourceBinding entry;
+ entry.resource_type = read_only ? ResourceBinding::ResourceType::kReadOnlyStorageBuffer
+ : ResourceBinding::ResourceType::kStorageBuffer;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
+ entry.size = unwrapped_type->Size();
+ if (auto* str = unwrapped_type->As<sem::Struct>()) {
+ entry.size_no_padding = str->SizeNoPadding();
+ } else {
+ entry.size_no_padding = entry.size;
+ }
+
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetSampledTextureResourceBindingsImpl(
const std::string& entry_point,
bool multisampled_only) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
-
- std::vector<ResourceBinding> result;
- auto* func_sem = program_->Sem().Get(func);
- auto referenced_variables =
- multisampled_only
- ? func_sem->TransitivelyReferencedMultisampledTextureVariables()
- : func_sem->TransitivelyReferencedSampledTextureVariables();
- for (auto& ref : referenced_variables) {
- auto* var = ref.first;
- auto binding_info = ref.second;
-
- ResourceBinding entry;
- entry.resource_type =
- multisampled_only ? ResourceBinding::ResourceType::kMultisampledTexture
- : ResourceBinding::ResourceType::kSampledTexture;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
-
- auto* texture_type = var->Type()->UnwrapRef()->As<sem::Texture>();
- entry.dim = TypeTextureDimensionToResourceBindingTextureDimension(
- texture_type->dim());
-
- const sem::Type* base_type = nullptr;
- if (multisampled_only) {
- base_type = texture_type->As<sem::MultisampledTexture>()->type();
- } else {
- base_type = texture_type->As<sem::SampledTexture>()->type();
- }
- entry.sampled_kind = BaseTypeToSampledKind(base_type);
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
+
+ std::vector<ResourceBinding> result;
+ auto* func_sem = program_->Sem().Get(func);
+ auto referenced_variables = multisampled_only
+ ? func_sem->TransitivelyReferencedMultisampledTextureVariables()
+ : func_sem->TransitivelyReferencedSampledTextureVariables();
+ for (auto& ref : referenced_variables) {
+ auto* var = ref.first;
+ auto binding_info = ref.second;
+
+ ResourceBinding entry;
+ entry.resource_type = multisampled_only
+ ? ResourceBinding::ResourceType::kMultisampledTexture
+ : ResourceBinding::ResourceType::kSampledTexture;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
+
+ auto* texture_type = var->Type()->UnwrapRef()->As<sem::Texture>();
+ entry.dim = TypeTextureDimensionToResourceBindingTextureDimension(texture_type->dim());
+
+ const sem::Type* base_type = nullptr;
+ if (multisampled_only) {
+ base_type = texture_type->As<sem::MultisampledTexture>()->type();
+ } else {
+ base_type = texture_type->As<sem::SampledTexture>()->type();
+ }
+ entry.sampled_kind = BaseTypeToSampledKind(base_type);
- result.push_back(entry);
- }
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
std::vector<ResourceBinding> Inspector::GetStorageTextureResourceBindingsImpl(
const std::string& entry_point) {
- auto* func = FindEntryPointByName(entry_point);
- if (!func) {
- return {};
- }
+ auto* func = FindEntryPointByName(entry_point);
+ if (!func) {
+ return {};
+ }
- auto* func_sem = program_->Sem().Get(func);
- std::vector<ResourceBinding> result;
- for (auto& ref :
- func_sem->TransitivelyReferencedVariablesOfType<sem::StorageTexture>()) {
- auto* var = ref.first;
- auto binding_info = ref.second;
+ auto* func_sem = program_->Sem().Get(func);
+ std::vector<ResourceBinding> result;
+ for (auto& ref : func_sem->TransitivelyReferencedVariablesOfType<sem::StorageTexture>()) {
+ auto* var = ref.first;
+ auto binding_info = ref.second;
- auto* texture_type = var->Type()->UnwrapRef()->As<sem::StorageTexture>();
+ auto* texture_type = var->Type()->UnwrapRef()->As<sem::StorageTexture>();
- ResourceBinding entry;
- entry.resource_type =
- ResourceBinding::ResourceType::kWriteOnlyStorageTexture;
- entry.bind_group = binding_info.group->value;
- entry.binding = binding_info.binding->value;
+ ResourceBinding entry;
+ entry.resource_type = ResourceBinding::ResourceType::kWriteOnlyStorageTexture;
+ entry.bind_group = binding_info.group->value;
+ entry.binding = binding_info.binding->value;
- entry.dim = TypeTextureDimensionToResourceBindingTextureDimension(
- texture_type->dim());
+ entry.dim = TypeTextureDimensionToResourceBindingTextureDimension(texture_type->dim());
- auto* base_type = texture_type->type();
- entry.sampled_kind = BaseTypeToSampledKind(base_type);
- entry.image_format = TypeTexelFormatToResourceBindingTexelFormat(
- texture_type->texel_format());
+ auto* base_type = texture_type->type();
+ entry.sampled_kind = BaseTypeToSampledKind(base_type);
+ entry.image_format =
+ TypeTexelFormatToResourceBindingTexelFormat(texture_type->texel_format());
- result.push_back(entry);
- }
+ result.push_back(entry);
+ }
- return result;
+ return result;
}
void Inspector::GenerateSamplerTargets() {
- // Do not re-generate, since |program_| should not change during the lifetime
- // of the inspector.
- if (sampler_targets_ != nullptr) {
- return;
- }
-
- sampler_targets_ = std::make_unique<std::unordered_map<
- std::string, utils::UniqueVector<sem::SamplerTexturePair>>>();
-
- auto& sem = program_->Sem();
-
- for (auto* node : program_->ASTNodes().Objects()) {
- auto* c = node->As<ast::CallExpression>();
- if (!c) {
- continue;
+ // Do not re-generate, since |program_| should not change during the lifetime
+ // of the inspector.
+ if (sampler_targets_ != nullptr) {
+ return;
}
- auto* call = sem.Get(c);
- if (!call) {
- continue;
- }
+ sampler_targets_ = std::make_unique<
+ std::unordered_map<std::string, utils::UniqueVector<sem::SamplerTexturePair>>>();
- auto* i = call->Target()->As<sem::Builtin>();
- if (!i) {
- continue;
- }
+ auto& sem = program_->Sem();
- const auto& signature = i->Signature();
- int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
- if (sampler_index == -1) {
- continue;
- }
+ for (auto* node : program_->ASTNodes().Objects()) {
+ auto* c = node->As<ast::CallExpression>();
+ if (!c) {
+ continue;
+ }
- int texture_index = signature.IndexOf(sem::ParameterUsage::kTexture);
- if (texture_index == -1) {
- continue;
- }
+ auto* call = sem.Get(c)->UnwrapMaterialize()->As<sem::Call>();
+ if (!call) {
+ continue;
+ }
- auto* call_func = call->Stmt()->Function();
- std::vector<const sem::Function*> entry_points;
- if (call_func->Declaration()->IsEntryPoint()) {
- entry_points = {call_func};
- } else {
- entry_points = call_func->AncestorEntryPoints();
- }
+ auto* i = call->Target()->As<sem::Builtin>();
+ if (!i) {
+ continue;
+ }
- if (entry_points.empty()) {
- continue;
- }
+ const auto& signature = i->Signature();
+ int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
+ if (sampler_index == -1) {
+ continue;
+ }
- auto* t = c->args[texture_index];
- auto* s = c->args[sampler_index];
+ int texture_index = signature.IndexOf(sem::ParameterUsage::kTexture);
+ if (texture_index == -1) {
+ continue;
+ }
- GetOriginatingResources(
- std::array<const ast::Expression*, 2>{t, s},
- [&](std::array<const sem::GlobalVariable*, 2> globals) {
- auto* texture = globals[0];
- sem::BindingPoint texture_binding_point = {
- texture->Declaration()->BindingPoint().group->value,
- texture->Declaration()->BindingPoint().binding->value};
+ auto* call_func = call->Stmt()->Function();
+ std::vector<const sem::Function*> entry_points;
+ if (call_func->Declaration()->IsEntryPoint()) {
+ entry_points = {call_func};
+ } else {
+ entry_points = call_func->AncestorEntryPoints();
+ }
- auto* sampler = globals[1];
- sem::BindingPoint sampler_binding_point = {
- sampler->Declaration()->BindingPoint().group->value,
- sampler->Declaration()->BindingPoint().binding->value};
+ if (entry_points.empty()) {
+ continue;
+ }
- for (auto* entry_point : entry_points) {
- const auto& ep_name =
- program_->Symbols().NameFor(entry_point->Declaration()->symbol);
- (*sampler_targets_)[ep_name].add(
- {sampler_binding_point, texture_binding_point});
- }
- });
- }
+ auto* t = c->args[texture_index];
+ auto* s = c->args[sampler_index];
+
+ GetOriginatingResources(std::array<const ast::Expression*, 2>{t, s},
+ [&](std::array<const sem::GlobalVariable*, 2> globals) {
+ auto* texture = globals[0];
+ sem::BindingPoint texture_binding_point = {
+ texture->Declaration()->BindingPoint().group->value,
+ texture->Declaration()->BindingPoint().binding->value};
+
+ auto* sampler = globals[1];
+ sem::BindingPoint sampler_binding_point = {
+ sampler->Declaration()->BindingPoint().group->value,
+ sampler->Declaration()->BindingPoint().binding->value};
+
+ for (auto* entry_point : entry_points) {
+ const auto& ep_name = program_->Symbols().NameFor(
+ entry_point->Declaration()->symbol);
+ (*sampler_targets_)[ep_name].add(
+ {sampler_binding_point, texture_binding_point});
+ }
+ });
+ }
}
template <size_t N, typename F>
-void Inspector::GetOriginatingResources(
- std::array<const ast::Expression*, N> exprs,
- F&& callback) {
- if (!program_->IsValid()) {
- TINT_ICE(Inspector, diagnostics_)
- << "attempting to get originating resources in invalid program";
- return;
- }
-
- auto& sem = program_->Sem();
-
- std::array<const sem::GlobalVariable*, N> globals{};
- std::array<const sem::Parameter*, N> parameters{};
- utils::UniqueVector<const ast::CallExpression*> callsites;
-
- for (size_t i = 0; i < N; i++) {
- auto*& expr = exprs[i];
- // Resolve each of the expressions
- while (true) {
- if (auto* user = sem.Get<sem::VariableUser>(expr)) {
- auto* var = user->Variable();
-
- if (auto* global = tint::As<sem::GlobalVariable>(var)) {
- // Found the global resource declaration.
- globals[i] = global;
- break; // Done with this expression.
- }
-
- if (auto* local = tint::As<sem::LocalVariable>(var)) {
- // Chase the variable
- expr = local->Declaration()->constructor;
- if (!expr) {
+void Inspector::GetOriginatingResources(std::array<const ast::Expression*, N> exprs, F&& callback) {
+ if (!program_->IsValid()) {
+ TINT_ICE(Inspector, diagnostics_)
+ << "attempting to get originating resources in invalid program";
+ return;
+ }
+
+ auto& sem = program_->Sem();
+
+ std::array<const sem::GlobalVariable*, N> globals{};
+ std::array<const sem::Parameter*, N> parameters{};
+ utils::UniqueVector<const ast::CallExpression*> callsites;
+
+ for (size_t i = 0; i < N; i++) {
+ const sem::Variable* source_var = sem.Get(exprs[i])->SourceVariable();
+ if (auto* global = source_var->As<sem::GlobalVariable>()) {
+ globals[i] = global;
+ } else if (auto* param = source_var->As<sem::Parameter>()) {
+ auto* func = tint::As<sem::Function>(param->Owner());
+ if (func->CallSites().empty()) {
+ // One or more of the expressions is a parameter, but this function
+ // is not called. Ignore.
+ return;
+ }
+ for (auto* call : func->CallSites()) {
+ callsites.add(call->Declaration());
+ }
+ parameters[i] = param;
+ } else {
TINT_ICE(Inspector, diagnostics_)
- << "resource variable had no initializer";
+ << "cannot resolve originating resource with expression type "
+ << exprs[i]->TypeInfo().name;
return;
- }
- continue; // Continue chasing the expression in this function
}
+ }
- if (auto* param = tint::As<sem::Parameter>(var)) {
- // Gather each of the callers of this function
- auto* func = tint::As<sem::Function>(param->Owner());
- if (func->CallSites().empty()) {
- // One or more of the expressions is a parameter, but this function
- // is not called. Ignore.
- return;
- }
- for (auto* call : func->CallSites()) {
- callsites.add(call->Declaration());
- }
- // Need to evaluate each function call with the group of
- // expressions, so move on to the next expression.
- parameters[i] = param;
- break;
+ if (callsites.size()) {
+ for (auto* call_expr : callsites) {
+ // Make a copy of the expressions for this callsite
+ std::array<const ast::Expression*, N> call_exprs = exprs;
+ // Patch all the parameter expressions with their argument
+ for (size_t i = 0; i < N; i++) {
+ if (auto* param = parameters[i]) {
+ call_exprs[i] = call_expr->args[param->Index()];
+ }
+ }
+ // Now call GetOriginatingResources() with from the callsite
+ GetOriginatingResources(call_exprs, callback);
}
-
- TINT_ICE(Inspector, diagnostics_)
- << "unexpected variable type " << var->TypeInfo().name;
- }
-
- if (auto* unary = tint::As<ast::UnaryOpExpression>(expr)) {
- switch (unary->op) {
- case ast::UnaryOp::kAddressOf:
- case ast::UnaryOp::kIndirection:
- // `*` and `&` are the only valid unary ops for a resource type,
- // and must be balanced in order for the program to have passed
- // validation. Just skip past these.
- expr = unary->expr;
- continue;
- default: {
- TINT_ICE(Inspector, diagnostics_)
- << "unexpected unary op on resource: " << unary->op;
- return;
- }
- }
- }
-
- TINT_ICE(Inspector, diagnostics_)
- << "cannot resolve originating resource with expression type "
- << expr->TypeInfo().name;
- return;
- }
- }
-
- if (callsites.size()) {
- for (auto* call_expr : callsites) {
- // Make a copy of the expressions for this callsite
- std::array<const ast::Expression*, N> call_exprs = exprs;
- // Patch all the parameter expressions with their argument
- for (size_t i = 0; i < N; i++) {
- if (auto* param = parameters[i]) {
- call_exprs[i] = call_expr->args[param->Index()];
- }
- }
- // Now call GetOriginatingResources() with from the callsite
- GetOriginatingResources(call_exprs, callback);
- }
- } else {
- // All the expressions resolved to globals
- callback(globals);
- }
+ } else {
+ // All the expressions resolved to globals
+ callback(globals);
+ }
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/inspector.h b/chromium/third_party/dawn/src/tint/inspector/inspector.h
index 49896dfe9aa..a5aee178ea0 100644
--- a/chromium/third_party/dawn/src/tint/inspector/inspector.h
+++ b/chromium/third_party/dawn/src/tint/inspector/inspector.h
@@ -20,6 +20,7 @@
#include <string>
#include <tuple>
#include <unordered_map>
+#include <utility>
#include <vector>
#include "src/tint/inspector/entry_point.h"
@@ -36,195 +37,196 @@ using SamplerTexturePair = sem::SamplerTexturePair;
/// Extracts information from a program
class Inspector {
- public:
- /// Constructor
- /// @param program Shader program to extract information from.
- explicit Inspector(const Program* program);
-
- /// Destructor
- ~Inspector();
-
- /// @returns error messages from the Inspector
- std::string error() { return diagnostics_.str(); }
- /// @returns true if an error was encountered
- bool has_error() const { return diagnostics_.contains_errors(); }
-
- /// @returns vector of entry point information
- std::vector<EntryPoint> GetEntryPoints();
-
- /// @returns map of const_id to initial value
- std::map<uint32_t, Scalar> GetConstantIDs();
-
- /// @returns map of module-constant name to pipeline constant ID
- std::map<std::string, uint32_t> GetConstantNameToIdMap();
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns the total size of shared storage required by an entry point,
- /// including all uniform storage buffers.
- uint32_t GetStorageSize(const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the resource bindings.
- std::vector<ResourceBinding> GetResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for uniform buffers.
- std::vector<ResourceBinding> GetUniformBufferResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for storage buffers.
- std::vector<ResourceBinding> GetStorageBufferResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for read-only storage buffers.
- std::vector<ResourceBinding> GetReadOnlyStorageBufferResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for regular samplers.
- std::vector<ResourceBinding> GetSamplerResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for comparison samplers.
- std::vector<ResourceBinding> GetComparisonSamplerResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for sampled textures.
- std::vector<ResourceBinding> GetSampledTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for multisampled textures.
- std::vector<ResourceBinding> GetMultisampledTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for write-only storage textures.
- std::vector<ResourceBinding> GetWriteOnlyStorageTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for depth textures.
- std::vector<ResourceBinding> GetDepthTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for depth textures.
- std::vector<ResourceBinding> GetDepthMultisampledTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for external textures.
- std::vector<ResourceBinding> GetExternalTextureResourceBindings(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the sampler/texture sampling pairs that are used
- /// by that entry point.
- std::vector<sem::SamplerTexturePair> GetSamplerTextureUses(
- const std::string& entry_point);
-
- /// @param entry_point name of the entry point to get information about.
- /// @param placeholder the sampler binding point to use for texture-only
- /// access (e.g., textureLoad)
- /// @returns vector of all of the sampler/texture sampling pairs that are used
- /// by that entry point.
- std::vector<sem::SamplerTexturePair> GetSamplerTextureUses(
- const std::string& entry_point,
- const sem::BindingPoint& placeholder);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns the total size in bytes of all Workgroup storage-class storage
- /// referenced transitively by the entry point.
- uint32_t GetWorkgroupStorageSize(const std::string& entry_point);
-
- private:
- const Program* program_;
- diag::List diagnostics_;
- std::unique_ptr<
- std::unordered_map<std::string,
- utils::UniqueVector<sem::SamplerTexturePair>>>
- sampler_targets_;
-
- /// @param name name of the entry point to find
- /// @returns a pointer to the entry point if it exists, otherwise returns
- /// nullptr and sets the error string.
- const ast::Function* FindEntryPointByName(const std::string& name);
-
- /// Recursively add entry point IO variables.
- /// If `type` is a struct, recurse into members, appending the member name.
- /// Otherwise, add the variable unless it is a builtin.
- /// @param name the name of the variable being added
- /// @param type the type of the variable
- /// @param attributes the variable attributes
- /// @param variables the list to add the variables to
- void AddEntryPointInOutVariables(std::string name,
- const sem::Type* type,
- const ast::AttributeList& attributes,
- std::vector<StageVariable>& variables) const;
-
- /// Recursively determine if the type contains builtin.
- /// If `type` is a struct, recurse into members to check for the attribute.
- /// Otherwise, check `attributes` for the attribute.
- bool ContainsBuiltin(ast::Builtin builtin,
- const sem::Type* type,
- const ast::AttributeList& attributes) const;
-
- /// Gathers all the texture resource bindings of the given type for the given
- /// entry point.
- /// @param entry_point name of the entry point to get information about.
- /// @param texture_type the type of the textures to gather.
- /// @param resource_type the ResourceBinding::ResourceType for the given
- /// texture type.
- /// @returns vector of all of the bindings for depth textures.
- std::vector<ResourceBinding> GetTextureResourceBindings(
- const std::string& entry_point,
- const tint::TypeInfo* texture_type,
- ResourceBinding::ResourceType resource_type);
-
- /// @param entry_point name of the entry point to get information about.
- /// @param read_only if true get only read-only bindings, if false get
- /// write-only bindings.
- /// @returns vector of all of the bindings for the requested storage buffers.
- std::vector<ResourceBinding> GetStorageBufferResourceBindingsImpl(
- const std::string& entry_point,
- bool read_only);
-
- /// @param entry_point name of the entry point to get information about.
- /// @param multisampled_only only get multisampled textures if true, otherwise
- /// only get sampled textures.
- /// @returns vector of all of the bindings for the request storage buffers.
- std::vector<ResourceBinding> GetSampledTextureResourceBindingsImpl(
- const std::string& entry_point,
- bool multisampled_only);
-
- /// @param entry_point name of the entry point to get information about.
- /// @returns vector of all of the bindings for the requested storage textures.
- std::vector<ResourceBinding> GetStorageTextureResourceBindingsImpl(
- const std::string& entry_point);
-
- /// Constructs |sampler_targets_| if it hasn't already been instantiated.
- void GenerateSamplerTargets();
-
- /// For a N-uple of expressions, resolve to the appropriate global resources
- /// and call 'cb'.
- /// 'cb' may be called multiple times.
- /// Assumes that not being able to resolve the resources is an error, so will
- /// invoke TINT_ICE when that occurs.
- /// @tparam N number of expressions in the n-uple
- /// @tparam F type of the callback provided.
- /// @param exprs N-uple of expressions to resolve.
- /// @param cb is a callback function with the signature:
- /// `void(std::array<const sem::GlobalVariable*, N>)`, which is invoked
- /// whenever a set of expressions are resolved to globals.
- template <size_t N, typename F>
- void GetOriginatingResources(std::array<const ast::Expression*, N> exprs,
- F&& cb);
+ public:
+ /// Constructor
+ /// @param program Shader program to extract information from.
+ explicit Inspector(const Program* program);
+
+ /// Destructor
+ ~Inspector();
+
+ /// @returns error messages from the Inspector
+ std::string error() { return diagnostics_.str(); }
+ /// @returns true if an error was encountered
+ bool has_error() const { return diagnostics_.contains_errors(); }
+
+ /// @returns vector of entry point information
+ std::vector<EntryPoint> GetEntryPoints();
+
+ /// @returns map of const_id to initial value
+ std::map<uint32_t, Scalar> GetConstantIDs();
+
+ /// @returns map of module-constant name to pipeline constant ID
+ std::map<std::string, uint32_t> GetConstantNameToIdMap();
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns the total size of shared storage required by an entry point,
+ /// including all uniform storage buffers.
+ uint32_t GetStorageSize(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the resource bindings.
+ std::vector<ResourceBinding> GetResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for uniform buffers.
+ std::vector<ResourceBinding> GetUniformBufferResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for storage buffers.
+ std::vector<ResourceBinding> GetStorageBufferResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for read-only storage buffers.
+ std::vector<ResourceBinding> GetReadOnlyStorageBufferResourceBindings(
+ const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for regular samplers.
+ std::vector<ResourceBinding> GetSamplerResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for comparison samplers.
+ std::vector<ResourceBinding> GetComparisonSamplerResourceBindings(
+ const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for sampled textures.
+ std::vector<ResourceBinding> GetSampledTextureResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for multisampled textures.
+ std::vector<ResourceBinding> GetMultisampledTextureResourceBindings(
+ const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for write-only storage textures.
+ std::vector<ResourceBinding> GetWriteOnlyStorageTextureResourceBindings(
+ const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for depth textures.
+ std::vector<ResourceBinding> GetDepthTextureResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for depth textures.
+ std::vector<ResourceBinding> GetDepthMultisampledTextureResourceBindings(
+ const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for external textures.
+ std::vector<ResourceBinding> GetExternalTextureResourceBindings(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the sampler/texture sampling pairs that are used
+ /// by that entry point.
+ std::vector<sem::SamplerTexturePair> GetSamplerTextureUses(const std::string& entry_point);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @param placeholder the sampler binding point to use for texture-only
+ /// access (e.g., textureLoad)
+ /// @returns vector of all of the sampler/texture sampling pairs that are used
+ /// by that entry point.
+ std::vector<sem::SamplerTexturePair> GetSamplerTextureUses(
+ const std::string& entry_point,
+ const sem::BindingPoint& placeholder);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns the total size in bytes of all Workgroup storage-class storage
+ /// referenced transitively by the entry point.
+ uint32_t GetWorkgroupStorageSize(const std::string& entry_point);
+
+ /// @returns vector of all valid extension names used by the program. There
+ /// will be no duplicated names in the returned vector even if an extension
+ /// is enabled multiple times.
+ std::vector<std::string> GetUsedExtensionNames();
+
+ /// @returns vector of all enable directives used by the program, each
+ /// enable directive represented by a std::pair<std::string,
+ /// tint::Source::Range> for its extension name and its location of the
+ /// extension name. There may be multiple enable directives for a same
+ /// extension.
+ std::vector<std::pair<std::string, Source>> GetEnableDirectives();
+
+ private:
+ const Program* program_;
+ diag::List diagnostics_;
+ std::unique_ptr<std::unordered_map<std::string, utils::UniqueVector<sem::SamplerTexturePair>>>
+ sampler_targets_;
+
+ /// @param name name of the entry point to find
+ /// @returns a pointer to the entry point if it exists, otherwise returns
+ /// nullptr and sets the error string.
+ const ast::Function* FindEntryPointByName(const std::string& name);
+
+ /// Recursively add entry point IO variables.
+ /// If `type` is a struct, recurse into members, appending the member name.
+ /// Otherwise, add the variable unless it is a builtin.
+ /// @param name the name of the variable being added
+ /// @param type the type of the variable
+ /// @param attributes the variable attributes
+ /// @param variables the list to add the variables to
+ void AddEntryPointInOutVariables(std::string name,
+ const sem::Type* type,
+ const ast::AttributeList& attributes,
+ std::vector<StageVariable>& variables) const;
+
+ /// Recursively determine if the type contains builtin.
+ /// If `type` is a struct, recurse into members to check for the attribute.
+ /// Otherwise, check `attributes` for the attribute.
+ bool ContainsBuiltin(ast::Builtin builtin,
+ const sem::Type* type,
+ const ast::AttributeList& attributes) const;
+
+ /// Gathers all the texture resource bindings of the given type for the given
+ /// entry point.
+ /// @param entry_point name of the entry point to get information about.
+ /// @param texture_type the type of the textures to gather.
+ /// @param resource_type the ResourceBinding::ResourceType for the given
+ /// texture type.
+ /// @returns vector of all of the bindings for depth textures.
+ std::vector<ResourceBinding> GetTextureResourceBindings(
+ const std::string& entry_point,
+ const tint::TypeInfo* texture_type,
+ ResourceBinding::ResourceType resource_type);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @param read_only if true get only read-only bindings, if false get
+ /// write-only bindings.
+ /// @returns vector of all of the bindings for the requested storage buffers.
+ std::vector<ResourceBinding> GetStorageBufferResourceBindingsImpl(
+ const std::string& entry_point,
+ bool read_only);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @param multisampled_only only get multisampled textures if true, otherwise
+ /// only get sampled textures.
+ /// @returns vector of all of the bindings for the request storage buffers.
+ std::vector<ResourceBinding> GetSampledTextureResourceBindingsImpl(
+ const std::string& entry_point,
+ bool multisampled_only);
+
+ /// @param entry_point name of the entry point to get information about.
+ /// @returns vector of all of the bindings for the requested storage textures.
+ std::vector<ResourceBinding> GetStorageTextureResourceBindingsImpl(
+ const std::string& entry_point);
+
+ /// Constructs |sampler_targets_| if it hasn't already been instantiated.
+ void GenerateSamplerTargets();
+
+ /// For a N-uple of expressions, resolve to the appropriate global resources
+ /// and call 'cb'.
+ /// 'cb' may be called multiple times.
+ /// Assumes that not being able to resolve the resources is an error, so will
+ /// invoke TINT_ICE when that occurs.
+ /// @tparam N number of expressions in the n-uple
+ /// @tparam F type of the callback provided.
+ /// @param exprs N-uple of expressions to resolve.
+ /// @param cb is a callback function with the signature:
+ /// `void(std::array<const sem::GlobalVariable*, N>)`, which is invoked
+ /// whenever a set of expressions are resolved to globals.
+ template <size_t N, typename F>
+ void GetOriginatingResources(std::array<const ast::Expression*, N> exprs, F&& cb);
};
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/inspector_test.cc b/chromium/third_party/dawn/src/tint/inspector/inspector_test.cc
index 6e3ecb3d13a..986e1692aac 100644
--- a/chromium/third_party/dawn/src/tint/inspector/inspector_test.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/inspector_test.cc
@@ -21,13 +21,15 @@
#include "src/tint/inspector/test_inspector_builder.h"
#include "src/tint/inspector/test_inspector_runner.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/variable.h"
#include "tint/tint.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::inspector {
namespace {
@@ -42,54 +44,43 @@ namespace {
// The returned Inspector from ::Initialize can then be used to test
// expecations.
-class InspectorGetEntryPointTest : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetEntryPointTest : public InspectorBuilder, public testing::Test {};
typedef std::tuple<inspector::ComponentType, inspector::CompositionType>
InspectorGetEntryPointComponentAndCompositionTestParams;
class InspectorGetEntryPointComponentAndCompositionTest
: public InspectorBuilder,
- public testing::TestWithParam<
- InspectorGetEntryPointComponentAndCompositionTestParams> {};
+ public testing::TestWithParam<InspectorGetEntryPointComponentAndCompositionTestParams> {};
struct InspectorGetEntryPointInterpolateTestParams {
- ast::InterpolationType in_type;
- ast::InterpolationSampling in_sampling;
- inspector::InterpolationType out_type;
- inspector::InterpolationSampling out_sampling;
+ ast::InterpolationType in_type;
+ ast::InterpolationSampling in_sampling;
+ inspector::InterpolationType out_type;
+ inspector::InterpolationSampling out_sampling;
};
class InspectorGetEntryPointInterpolateTest
: public InspectorBuilder,
- public testing::TestWithParam<
- InspectorGetEntryPointInterpolateTestParams> {};
-class InspectorGetConstantIDsTest : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetConstantNameToIdMapTest : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetStorageSizeTest : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetResourceBindingsTest : public InspectorBuilder,
- public testing::Test {};
+ public testing::TestWithParam<InspectorGetEntryPointInterpolateTestParams> {};
+class InspectorGetConstantIDsTest : public InspectorBuilder, public testing::Test {};
+class InspectorGetConstantNameToIdMapTest : public InspectorBuilder, public testing::Test {};
+class InspectorGetStorageSizeTest : public InspectorBuilder, public testing::Test {};
+class InspectorGetResourceBindingsTest : public InspectorBuilder, public testing::Test {};
class InspectorGetUniformBufferResourceBindingsTest : public InspectorBuilder,
public testing::Test {};
class InspectorGetStorageBufferResourceBindingsTest : public InspectorBuilder,
public testing::Test {};
-class InspectorGetReadOnlyStorageBufferResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetSamplerResourceBindingsTest : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetComparisonSamplerResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetReadOnlyStorageBufferResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
+class InspectorGetSamplerResourceBindingsTest : public InspectorBuilder, public testing::Test {};
+class InspectorGetComparisonSamplerResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
class InspectorGetSampledTextureResourceBindingsTest : public InspectorBuilder,
public testing::Test {};
-class InspectorGetSampledArrayTextureResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetSampledArrayTextureResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
struct GetSampledTextureTestParams {
- ast::TextureDimension type_dim;
- inspector::ResourceBinding::TextureDimension inspector_dim;
- inspector::ResourceBinding::SampledKind sampled_kind;
+ ast::TextureDimension type_dim;
+ inspector::ResourceBinding::TextureDimension inspector_dim;
+ inspector::ResourceBinding::SampledKind sampled_kind;
};
class InspectorGetSampledTextureResourceBindingsTestWithParam
: public InspectorBuilder,
@@ -97,12 +88,10 @@ class InspectorGetSampledTextureResourceBindingsTestWithParam
class InspectorGetSampledArrayTextureResourceBindingsTestWithParam
: public InspectorBuilder,
public testing::TestWithParam<GetSampledTextureTestParams> {};
-class InspectorGetMultisampledTextureResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
-class InspectorGetMultisampledArrayTextureResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetMultisampledTextureResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
+class InspectorGetMultisampledArrayTextureResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
typedef GetSampledTextureTestParams GetMultisampledTextureTestParams;
class InspectorGetMultisampledArrayTextureResourceBindingsTestWithParam
: public InspectorBuilder,
@@ -113,25 +102,20 @@ class InspectorGetMultisampledTextureResourceBindingsTestWithParam
class InspectorGetStorageTextureResourceBindingsTest : public InspectorBuilder,
public testing::Test {};
struct GetDepthTextureTestParams {
- ast::TextureDimension type_dim;
- inspector::ResourceBinding::TextureDimension inspector_dim;
+ ast::TextureDimension type_dim;
+ inspector::ResourceBinding::TextureDimension inspector_dim;
};
class InspectorGetDepthTextureResourceBindingsTestWithParam
: public InspectorBuilder,
public testing::TestWithParam<GetDepthTextureTestParams> {};
-class InspectorGetDepthMultisampledTextureResourceBindingsTest
- : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetDepthMultisampledTextureResourceBindingsTest : public InspectorBuilder,
+ public testing::Test {};
-typedef std::tuple<ast::TextureDimension, ResourceBinding::TextureDimension>
- DimensionParams;
-typedef std::tuple<ast::TexelFormat,
- ResourceBinding::TexelFormat,
- ResourceBinding::SampledKind>
+typedef std::tuple<ast::TextureDimension, ResourceBinding::TextureDimension> DimensionParams;
+typedef std::tuple<ast::TexelFormat, ResourceBinding::TexelFormat, ResourceBinding::SampledKind>
TexelFormatParams;
-typedef std::tuple<DimensionParams, TexelFormatParams>
- GetStorageTextureTestParams;
+typedef std::tuple<DimensionParams, TexelFormatParams> GetStorageTextureTestParams;
class InspectorGetStorageTextureResourceBindingsTestWithParam
: public InspectorBuilder,
public testing::TestWithParam<GetStorageTextureTestParams> {};
@@ -139,846 +123,799 @@ class InspectorGetStorageTextureResourceBindingsTestWithParam
class InspectorGetExternalTextureResourceBindingsTest : public InspectorBuilder,
public testing::Test {};
-class InspectorGetSamplerTextureUsesTest : public InspectorRunner,
- public testing::Test {};
+class InspectorGetSamplerTextureUsesTest : public InspectorRunner, public testing::Test {};
+
+class InspectorGetWorkgroupStorageSizeTest : public InspectorBuilder, public testing::Test {};
-class InspectorGetWorkgroupStorageSizeTest : public InspectorBuilder,
- public testing::Test {};
+class InspectorGetUsedExtensionNamesTest : public InspectorRunner, public testing::Test {};
+
+class InspectorGetEnableDirectivesTest : public InspectorRunner, public testing::Test {};
// This is a catch all for shaders that have demonstrated regressions/crashes in
// the wild.
class InspectorRegressionTest : public InspectorRunner, public testing::Test {};
TEST_F(InspectorGetEntryPointTest, NoFunctions) {
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(0u, result.size());
+ EXPECT_EQ(0u, result.size());
}
TEST_F(InspectorGetEntryPointTest, NoEntryPoints) {
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(0u, result.size());
+ EXPECT_EQ(0u, result.size());
}
TEST_F(InspectorGetEntryPointTest, OneEntryPoint) {
- MakeEmptyBodyFunction("foo", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("foo", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- // TODO(dsinclair): Update to run the namer transform when available.
+ // TODO(dsinclair): Update to run the namer transform when available.
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ("foo", result[0].name);
- EXPECT_EQ("foo", result[0].remapped_name);
- EXPECT_EQ(ast::PipelineStage::kFragment, result[0].stage);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ("foo", result[0].name);
+ EXPECT_EQ("foo", result[0].remapped_name);
+ EXPECT_EQ(ast::PipelineStage::kFragment, result[0].stage);
}
TEST_F(InspectorGetEntryPointTest, MultipleEntryPoints) {
- MakeEmptyBodyFunction("foo", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("foo", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- MakeEmptyBodyFunction("bar",
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
+ MakeEmptyBodyFunction(
+ "bar", ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- // TODO(dsinclair): Update to run the namer transform when available.
+ // TODO(dsinclair): Update to run the namer transform when available.
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(2u, result.size());
- EXPECT_EQ("foo", result[0].name);
- EXPECT_EQ("foo", result[0].remapped_name);
- EXPECT_EQ(ast::PipelineStage::kFragment, result[0].stage);
- EXPECT_EQ("bar", result[1].name);
- EXPECT_EQ("bar", result[1].remapped_name);
- EXPECT_EQ(ast::PipelineStage::kCompute, result[1].stage);
+ ASSERT_EQ(2u, result.size());
+ EXPECT_EQ("foo", result[0].name);
+ EXPECT_EQ("foo", result[0].remapped_name);
+ EXPECT_EQ(ast::PipelineStage::kFragment, result[0].stage);
+ EXPECT_EQ("bar", result[1].name);
+ EXPECT_EQ("bar", result[1].remapped_name);
+ EXPECT_EQ(ast::PipelineStage::kCompute, result[1].stage);
}
TEST_F(InspectorGetEntryPointTest, MixFunctionsAndEntryPoints) {
- MakeEmptyBodyFunction("func", {});
+ MakeEmptyBodyFunction("func", {});
- MakeCallerBodyFunction("foo", {"func"},
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
+ MakeCallerBodyFunction(
+ "foo", {"func"},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- MakeCallerBodyFunction("bar", {"func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("bar", {"func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- // TODO(dsinclair): Update to run the namer transform when available.
+ // TODO(dsinclair): Update to run the namer transform when available.
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- EXPECT_FALSE(inspector.has_error());
+ auto result = inspector.GetEntryPoints();
+ EXPECT_FALSE(inspector.has_error());
- ASSERT_EQ(2u, result.size());
- EXPECT_EQ("foo", result[0].name);
- EXPECT_EQ("foo", result[0].remapped_name);
- EXPECT_EQ(ast::PipelineStage::kCompute, result[0].stage);
- EXPECT_EQ("bar", result[1].name);
- EXPECT_EQ("bar", result[1].remapped_name);
- EXPECT_EQ(ast::PipelineStage::kFragment, result[1].stage);
+ ASSERT_EQ(2u, result.size());
+ EXPECT_EQ("foo", result[0].name);
+ EXPECT_EQ("foo", result[0].remapped_name);
+ EXPECT_EQ(ast::PipelineStage::kCompute, result[0].stage);
+ EXPECT_EQ("bar", result[1].name);
+ EXPECT_EQ("bar", result[1].remapped_name);
+ EXPECT_EQ(ast::PipelineStage::kFragment, result[1].stage);
}
TEST_F(InspectorGetEntryPointTest, DefaultWorkgroupSize) {
- MakeEmptyBodyFunction("foo",
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(8, 2, 1)});
+ MakeEmptyBodyFunction("foo", ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(8_i, 2_i, 1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
- uint32_t x, y, z;
- std::tie(x, y, z) = result[0].workgroup_size();
- EXPECT_EQ(8u, x);
- EXPECT_EQ(2u, y);
- EXPECT_EQ(1u, z);
+ ASSERT_EQ(1u, result.size());
+ uint32_t x, y, z;
+ std::tie(x, y, z) = result[0].workgroup_size();
+ EXPECT_EQ(8u, x);
+ EXPECT_EQ(2u, y);
+ EXPECT_EQ(1u, z);
}
TEST_F(InspectorGetEntryPointTest, NonDefaultWorkgroupSize) {
- MakeEmptyBodyFunction(
- "foo", {Stage(ast::PipelineStage::kCompute), WorkgroupSize(8, 2, 1)});
+ MakeEmptyBodyFunction("foo",
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(8_i, 2_i, 1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
- uint32_t x, y, z;
- std::tie(x, y, z) = result[0].workgroup_size();
- EXPECT_EQ(8u, x);
- EXPECT_EQ(2u, y);
- EXPECT_EQ(1u, z);
+ ASSERT_EQ(1u, result.size());
+ uint32_t x, y, z;
+ std::tie(x, y, z) = result[0].workgroup_size();
+ EXPECT_EQ(8u, x);
+ EXPECT_EQ(2u, y);
+ EXPECT_EQ(1u, z);
}
TEST_F(InspectorGetEntryPointTest, NoInOutVariables) {
- MakeEmptyBodyFunction("func", {});
+ MakeEmptyBodyFunction("func", {});
- MakeCallerBodyFunction("foo", {"func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("foo", {"func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].input_variables.size());
- EXPECT_EQ(0u, result[0].output_variables.size());
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].input_variables.size());
+ EXPECT_EQ(0u, result[0].output_variables.size());
}
TEST_P(InspectorGetEntryPointComponentAndCompositionTest, Test) {
- ComponentType component;
- CompositionType composition;
- std::tie(component, composition) = GetParam();
- std::function<const ast::Type*()> tint_type =
- GetTypeFunction(component, composition);
-
- auto* in_var = Param("in_var", tint_type(), {Location(0u), Flat()});
- Func("foo", {in_var}, tint_type(), {Return("in_var")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0u)});
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(1u, result.size());
-
- ASSERT_EQ(1u, result[0].input_variables.size());
- EXPECT_EQ("in_var", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(component, result[0].input_variables[0].component_type);
-
- ASSERT_EQ(1u, result[0].output_variables.size());
- EXPECT_EQ("<retval>", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(component, result[0].output_variables[0].component_type);
-}
-INSTANTIATE_TEST_SUITE_P(
- InspectorGetEntryPointTest,
- InspectorGetEntryPointComponentAndCompositionTest,
- testing::Combine(testing::Values(ComponentType::kFloat,
- ComponentType::kSInt,
- ComponentType::kUInt),
- testing::Values(CompositionType::kScalar,
- CompositionType::kVec2,
- CompositionType::kVec3,
- CompositionType::kVec4)));
+ ComponentType component;
+ CompositionType composition;
+ std::tie(component, composition) = GetParam();
+ std::function<const ast::Type*()> tint_type = GetTypeFunction(component, composition);
+
+ auto* in_var = Param("in_var", tint_type(), {Location(0u), Flat()});
+ Func("foo", {in_var}, tint_type(), {Return("in_var")}, {Stage(ast::PipelineStage::kFragment)},
+ {Location(0u)});
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(1u, result.size());
+
+ ASSERT_EQ(1u, result[0].input_variables.size());
+ EXPECT_EQ("in_var", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(component, result[0].input_variables[0].component_type);
+
+ ASSERT_EQ(1u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(component, result[0].output_variables[0].component_type);
+}
+INSTANTIATE_TEST_SUITE_P(InspectorGetEntryPointTest,
+ InspectorGetEntryPointComponentAndCompositionTest,
+ testing::Combine(testing::Values(ComponentType::kFloat,
+ ComponentType::kSInt,
+ ComponentType::kUInt),
+ testing::Values(CompositionType::kScalar,
+ CompositionType::kVec2,
+ CompositionType::kVec3,
+ CompositionType::kVec4)));
TEST_F(InspectorGetEntryPointTest, MultipleInOutVariables) {
- auto* in_var0 = Param("in_var0", ty.u32(), {Location(0u), Flat()});
- auto* in_var1 = Param("in_var1", ty.u32(), {Location(1u), Flat()});
- auto* in_var4 = Param("in_var4", ty.u32(), {Location(4u), Flat()});
- Func("foo", {in_var0, in_var1, in_var4}, ty.u32(), {Return("in_var0")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0u)});
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(1u, result.size());
-
- ASSERT_EQ(3u, result[0].input_variables.size());
- EXPECT_EQ("in_var0", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(InterpolationType::kFlat,
- result[0].input_variables[0].interpolation_type);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
- EXPECT_EQ("in_var1", result[0].input_variables[1].name);
- EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
- EXPECT_EQ(InterpolationType::kFlat,
- result[0].input_variables[1].interpolation_type);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
- EXPECT_EQ("in_var4", result[0].input_variables[2].name);
- EXPECT_TRUE(result[0].input_variables[2].has_location_attribute);
- EXPECT_EQ(4u, result[0].input_variables[2].location_attribute);
- EXPECT_EQ(InterpolationType::kFlat,
- result[0].input_variables[2].interpolation_type);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[2].component_type);
-
- ASSERT_EQ(1u, result[0].output_variables.size());
- EXPECT_EQ("<retval>", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
+ auto* in_var0 = Param("in_var0", ty.u32(), {Location(0u), Flat()});
+ auto* in_var1 = Param("in_var1", ty.u32(), {Location(1u), Flat()});
+ auto* in_var4 = Param("in_var4", ty.u32(), {Location(4u), Flat()});
+ Func("foo", {in_var0, in_var1, in_var4}, ty.u32(), {Return("in_var0")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0u)});
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(1u, result.size());
+
+ ASSERT_EQ(3u, result[0].input_variables.size());
+ EXPECT_EQ("in_var0", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(InterpolationType::kFlat, result[0].input_variables[0].interpolation_type);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
+ EXPECT_EQ("in_var1", result[0].input_variables[1].name);
+ EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
+ EXPECT_EQ(InterpolationType::kFlat, result[0].input_variables[1].interpolation_type);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
+ EXPECT_EQ("in_var4", result[0].input_variables[2].name);
+ EXPECT_TRUE(result[0].input_variables[2].has_location_attribute);
+ EXPECT_EQ(4u, result[0].input_variables[2].location_attribute);
+ EXPECT_EQ(InterpolationType::kFlat, result[0].input_variables[2].interpolation_type);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[2].component_type);
+
+ ASSERT_EQ(1u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
}
TEST_F(InspectorGetEntryPointTest, MultipleEntryPointsInOutVariables) {
- auto* in_var_foo = Param("in_var_foo", ty.u32(), {Location(0u), Flat()});
- Func("foo", {in_var_foo}, ty.u32(), {Return("in_var_foo")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0u)});
-
- auto* in_var_bar = Param("in_var_bar", ty.u32(), {Location(0u), Flat()});
- Func("bar", {in_var_bar}, ty.u32(), {Return("in_var_bar")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(1u)});
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(2u, result.size());
-
- ASSERT_EQ(1u, result[0].input_variables.size());
- EXPECT_EQ("in_var_foo", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(InterpolationType::kFlat,
- result[0].input_variables[0].interpolation_type);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
-
- ASSERT_EQ(1u, result[0].output_variables.size());
- EXPECT_EQ("<retval>", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
-
- ASSERT_EQ(1u, result[1].input_variables.size());
- EXPECT_EQ("in_var_bar", result[1].input_variables[0].name);
- EXPECT_TRUE(result[1].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[1].input_variables[0].location_attribute);
- EXPECT_EQ(InterpolationType::kFlat,
- result[1].input_variables[0].interpolation_type);
- EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[0].component_type);
-
- ASSERT_EQ(1u, result[1].output_variables.size());
- EXPECT_EQ("<retval>", result[1].output_variables[0].name);
- EXPECT_TRUE(result[1].output_variables[0].has_location_attribute);
- EXPECT_EQ(1u, result[1].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[1].output_variables[0].component_type);
+ auto* in_var_foo = Param("in_var_foo", ty.u32(), {Location(0u), Flat()});
+ Func("foo", {in_var_foo}, ty.u32(), {Return("in_var_foo")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0u)});
+
+ auto* in_var_bar = Param("in_var_bar", ty.u32(), {Location(0u), Flat()});
+ Func("bar", {in_var_bar}, ty.u32(), {Return("in_var_bar")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(1u)});
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(2u, result.size());
+
+ ASSERT_EQ(1u, result[0].input_variables.size());
+ EXPECT_EQ("in_var_foo", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(InterpolationType::kFlat, result[0].input_variables[0].interpolation_type);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
+
+ ASSERT_EQ(1u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
+
+ ASSERT_EQ(1u, result[1].input_variables.size());
+ EXPECT_EQ("in_var_bar", result[1].input_variables[0].name);
+ EXPECT_TRUE(result[1].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[1].input_variables[0].location_attribute);
+ EXPECT_EQ(InterpolationType::kFlat, result[1].input_variables[0].interpolation_type);
+ EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[0].component_type);
+
+ ASSERT_EQ(1u, result[1].output_variables.size());
+ EXPECT_EQ("<retval>", result[1].output_variables[0].name);
+ EXPECT_TRUE(result[1].output_variables[0].has_location_attribute);
+ EXPECT_EQ(1u, result[1].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[1].output_variables[0].component_type);
}
TEST_F(InspectorGetEntryPointTest, BuiltInsNotStageVariables) {
- auto* in_var0 =
- Param("in_var0", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
- auto* in_var1 = Param("in_var1", ty.f32(), {Location(0u)});
- Func("foo", {in_var0, in_var1}, ty.f32(), {Return("in_var1")},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kFragDepth)});
- Inspector& inspector = Build();
+ auto* in_var0 = Param("in_var0", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
+ auto* in_var1 = Param("in_var1", ty.f32(), {Location(0u)});
+ Func("foo", {in_var0, in_var1}, ty.f32(), {Return("in_var1")},
+ {Stage(ast::PipelineStage::kFragment)}, {Builtin(ast::Builtin::kFragDepth)});
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].input_variables.size());
- EXPECT_EQ("in_var1", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[0].component_type);
+ ASSERT_EQ(1u, result[0].input_variables.size());
+ EXPECT_EQ("in_var1", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[0].component_type);
- ASSERT_EQ(0u, result[0].output_variables.size());
+ ASSERT_EQ(0u, result[0].output_variables.size());
}
TEST_F(InspectorGetEntryPointTest, InOutStruct) {
- auto* interface = MakeInOutStruct("interface", {{"a", 0u}, {"b", 1u}});
- Func("foo", {Param("param", ty.Of(interface))}, ty.Of(interface),
- {Return("param")}, {Stage(ast::PipelineStage::kFragment)});
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(1u, result.size());
-
- ASSERT_EQ(2u, result[0].input_variables.size());
- EXPECT_EQ("param.a", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
- EXPECT_EQ("param.b", result[0].input_variables[1].name);
- EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
-
- ASSERT_EQ(2u, result[0].output_variables.size());
- EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
- EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
- EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
+ auto* interface = MakeInOutStruct("interface", {{"a", 0u}, {"b", 1u}});
+ Func("foo", {Param("param", ty.Of(interface))}, ty.Of(interface), {Return("param")},
+ {Stage(ast::PipelineStage::kFragment)});
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(1u, result.size());
+
+ ASSERT_EQ(2u, result[0].input_variables.size());
+ EXPECT_EQ("param.a", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
+ EXPECT_EQ("param.b", result[0].input_variables[1].name);
+ EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
+
+ ASSERT_EQ(2u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
+ EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
+ EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
}
TEST_F(InspectorGetEntryPointTest, MultipleEntryPointsInOutSharedStruct) {
- auto* interface = MakeInOutStruct("interface", {{"a", 0u}, {"b", 1u}});
- Func("foo", {}, ty.Of(interface), {Return(Construct(ty.Of(interface)))},
- {Stage(ast::PipelineStage::kFragment)});
- Func("bar", {Param("param", ty.Of(interface))}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(2u, result.size());
-
- ASSERT_EQ(0u, result[0].input_variables.size());
-
- ASSERT_EQ(2u, result[0].output_variables.size());
- EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
- EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
- EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
-
- ASSERT_EQ(2u, result[1].input_variables.size());
- EXPECT_EQ("param.a", result[1].input_variables[0].name);
- EXPECT_TRUE(result[1].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[1].input_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[0].component_type);
- EXPECT_EQ("param.b", result[1].input_variables[1].name);
- EXPECT_TRUE(result[1].input_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[1].input_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[1].component_type);
-
- ASSERT_EQ(0u, result[1].output_variables.size());
+ auto* interface = MakeInOutStruct("interface", {{"a", 0u}, {"b", 1u}});
+ Func("foo", {}, ty.Of(interface), {Return(Construct(ty.Of(interface)))},
+ {Stage(ast::PipelineStage::kFragment)});
+ Func("bar", {Param("param", ty.Of(interface))}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(2u, result.size());
+
+ ASSERT_EQ(0u, result[0].input_variables.size());
+
+ ASSERT_EQ(2u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
+ EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
+ EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
+
+ ASSERT_EQ(2u, result[1].input_variables.size());
+ EXPECT_EQ("param.a", result[1].input_variables[0].name);
+ EXPECT_TRUE(result[1].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[1].input_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[0].component_type);
+ EXPECT_EQ("param.b", result[1].input_variables[1].name);
+ EXPECT_TRUE(result[1].input_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[1].input_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[1].input_variables[1].component_type);
+
+ ASSERT_EQ(0u, result[1].output_variables.size());
}
TEST_F(InspectorGetEntryPointTest, MixInOutVariablesAndStruct) {
- auto* struct_a = MakeInOutStruct("struct_a", {{"a", 0u}, {"b", 1u}});
- auto* struct_b = MakeInOutStruct("struct_b", {{"a", 2u}});
- Func("foo",
- {Param("param_a", ty.Of(struct_a)), Param("param_b", ty.Of(struct_b)),
- Param("param_c", ty.f32(), {Location(3u)}),
- Param("param_d", ty.f32(), {Location(4u)})},
- ty.Of(struct_a), {Return("param_a")},
- {Stage(ast::PipelineStage::kFragment)});
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(1u, result.size());
-
- ASSERT_EQ(5u, result[0].input_variables.size());
- EXPECT_EQ("param_a.a", result[0].input_variables[0].name);
- EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
- EXPECT_EQ("param_a.b", result[0].input_variables[1].name);
- EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
- EXPECT_EQ("param_b.a", result[0].input_variables[2].name);
- EXPECT_TRUE(result[0].input_variables[2].has_location_attribute);
- EXPECT_EQ(2u, result[0].input_variables[2].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[2].component_type);
- EXPECT_EQ("param_c", result[0].input_variables[3].name);
- EXPECT_TRUE(result[0].input_variables[3].has_location_attribute);
- EXPECT_EQ(3u, result[0].input_variables[3].location_attribute);
- EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[3].component_type);
- EXPECT_EQ("param_d", result[0].input_variables[4].name);
- EXPECT_TRUE(result[0].input_variables[4].has_location_attribute);
- EXPECT_EQ(4u, result[0].input_variables[4].location_attribute);
- EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[4].component_type);
-
- ASSERT_EQ(2u, result[0].output_variables.size());
- EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
- EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
- EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
- EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
- EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
- EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
- EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
+ auto* struct_a = MakeInOutStruct("struct_a", {{"a", 0u}, {"b", 1u}});
+ auto* struct_b = MakeInOutStruct("struct_b", {{"a", 2u}});
+ Func("foo",
+ {Param("param_a", ty.Of(struct_a)), Param("param_b", ty.Of(struct_b)),
+ Param("param_c", ty.f32(), {Location(3u)}), Param("param_d", ty.f32(), {Location(4u)})},
+ ty.Of(struct_a), {Return("param_a")}, {Stage(ast::PipelineStage::kFragment)});
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(1u, result.size());
+
+ ASSERT_EQ(5u, result[0].input_variables.size());
+ EXPECT_EQ("param_a.a", result[0].input_variables[0].name);
+ EXPECT_TRUE(result[0].input_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].input_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[0].component_type);
+ EXPECT_EQ("param_a.b", result[0].input_variables[1].name);
+ EXPECT_TRUE(result[0].input_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].input_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[1].component_type);
+ EXPECT_EQ("param_b.a", result[0].input_variables[2].name);
+ EXPECT_TRUE(result[0].input_variables[2].has_location_attribute);
+ EXPECT_EQ(2u, result[0].input_variables[2].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].input_variables[2].component_type);
+ EXPECT_EQ("param_c", result[0].input_variables[3].name);
+ EXPECT_TRUE(result[0].input_variables[3].has_location_attribute);
+ EXPECT_EQ(3u, result[0].input_variables[3].location_attribute);
+ EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[3].component_type);
+ EXPECT_EQ("param_d", result[0].input_variables[4].name);
+ EXPECT_TRUE(result[0].input_variables[4].has_location_attribute);
+ EXPECT_EQ(4u, result[0].input_variables[4].location_attribute);
+ EXPECT_EQ(ComponentType::kFloat, result[0].input_variables[4].component_type);
+
+ ASSERT_EQ(2u, result[0].output_variables.size());
+ EXPECT_EQ("<retval>.a", result[0].output_variables[0].name);
+ EXPECT_TRUE(result[0].output_variables[0].has_location_attribute);
+ EXPECT_EQ(0u, result[0].output_variables[0].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[0].component_type);
+ EXPECT_EQ("<retval>.b", result[0].output_variables[1].name);
+ EXPECT_TRUE(result[0].output_variables[1].has_location_attribute);
+ EXPECT_EQ(1u, result[0].output_variables[1].location_attribute);
+ EXPECT_EQ(ComponentType::kUInt, result[0].output_variables[1].component_type);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantUnreferenced) {
- AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
- MakeEmptyBodyFunction(
- "ep_func", {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
+ MakeEmptyBodyFunction("ep_func", {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].overridable_constants.size());
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].overridable_constants.size());
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantReferencedByEntryPoint) {
- AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
- MakePlainGlobalReferenceBodyFunction(
- "ep_func", "foo", ty.f32(),
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
+ MakePlainGlobalReferenceBodyFunction("ep_func", "foo", ty.f32(),
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].overridable_constants.size());
- EXPECT_EQ("foo", result[0].overridable_constants[0].name);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo", result[0].overridable_constants[0].name);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantReferencedByCallee) {
- AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
- MakePlainGlobalReferenceBodyFunction("callee_func", "foo", ty.f32(), {});
- MakeCallerBodyFunction(
- "ep_func", {"callee_func"},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
+ MakePlainGlobalReferenceBodyFunction("callee_func", "foo", ty.f32(), {});
+ MakeCallerBodyFunction("ep_func", {"callee_func"},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].overridable_constants.size());
- EXPECT_EQ("foo", result[0].overridable_constants[0].name);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo", result[0].overridable_constants[0].name);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantSomeReferenced) {
- AddOverridableConstantWithID("foo", 1, ty.f32(), nullptr);
- AddOverridableConstantWithID("bar", 2, ty.f32(), nullptr);
- MakePlainGlobalReferenceBodyFunction("callee_func", "foo", ty.f32(), {});
- MakeCallerBodyFunction(
- "ep_func", {"callee_func"},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithID("foo", 1, ty.f32(), nullptr);
+ AddOverridableConstantWithID("bar", 2, ty.f32(), nullptr);
+ MakePlainGlobalReferenceBodyFunction("callee_func", "foo", ty.f32(), {});
+ MakeCallerBodyFunction("ep_func", {"callee_func"},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].overridable_constants.size());
- EXPECT_EQ("foo", result[0].overridable_constants[0].name);
- EXPECT_EQ(1, result[0].overridable_constants[0].numeric_id);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo", result[0].overridable_constants[0].name);
+ EXPECT_EQ(1, result[0].overridable_constants[0].numeric_id);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantTypes) {
- AddOverridableConstantWithoutID("bool_var", ty.bool_(), nullptr);
- AddOverridableConstantWithoutID("float_var", ty.f32(), nullptr);
- AddOverridableConstantWithoutID("u32_var", ty.u32(), nullptr);
- AddOverridableConstantWithoutID("i32_var", ty.i32(), nullptr);
-
- MakePlainGlobalReferenceBodyFunction("bool_func", "bool_var", ty.bool_(), {});
- MakePlainGlobalReferenceBodyFunction("float_func", "float_var", ty.f32(), {});
- MakePlainGlobalReferenceBodyFunction("u32_func", "u32_var", ty.u32(), {});
- MakePlainGlobalReferenceBodyFunction("i32_func", "i32_var", ty.i32(), {});
-
- MakeCallerBodyFunction(
- "ep_func", {"bool_func", "float_func", "u32_func", "i32_func"},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetEntryPoints();
-
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(4u, result[0].overridable_constants.size());
- EXPECT_EQ("bool_var", result[0].overridable_constants[0].name);
- EXPECT_EQ(inspector::OverridableConstant::Type::kBool,
- result[0].overridable_constants[0].type);
- EXPECT_EQ("float_var", result[0].overridable_constants[1].name);
- EXPECT_EQ(inspector::OverridableConstant::Type::kFloat32,
- result[0].overridable_constants[1].type);
- EXPECT_EQ("u32_var", result[0].overridable_constants[2].name);
- EXPECT_EQ(inspector::OverridableConstant::Type::kUint32,
- result[0].overridable_constants[2].type);
- EXPECT_EQ("i32_var", result[0].overridable_constants[3].name);
- EXPECT_EQ(inspector::OverridableConstant::Type::kInt32,
- result[0].overridable_constants[3].type);
+ AddOverridableConstantWithoutID("bool_var", ty.bool_(), nullptr);
+ AddOverridableConstantWithoutID("float_var", ty.f32(), nullptr);
+ AddOverridableConstantWithoutID("u32_var", ty.u32(), nullptr);
+ AddOverridableConstantWithoutID("i32_var", ty.i32(), nullptr);
+
+ MakePlainGlobalReferenceBodyFunction("bool_func", "bool_var", ty.bool_(), {});
+ MakePlainGlobalReferenceBodyFunction("float_func", "float_var", ty.f32(), {});
+ MakePlainGlobalReferenceBodyFunction("u32_func", "u32_var", ty.u32(), {});
+ MakePlainGlobalReferenceBodyFunction("i32_func", "i32_var", ty.i32(), {});
+
+ MakeCallerBodyFunction("ep_func", {"bool_func", "float_func", "u32_func", "i32_func"},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetEntryPoints();
+
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(4u, result[0].overridable_constants.size());
+ EXPECT_EQ("bool_var", result[0].overridable_constants[0].name);
+ EXPECT_EQ(inspector::OverridableConstant::Type::kBool, result[0].overridable_constants[0].type);
+ EXPECT_EQ("float_var", result[0].overridable_constants[1].name);
+ EXPECT_EQ(inspector::OverridableConstant::Type::kFloat32,
+ result[0].overridable_constants[1].type);
+ EXPECT_EQ("u32_var", result[0].overridable_constants[2].name);
+ EXPECT_EQ(inspector::OverridableConstant::Type::kUint32,
+ result[0].overridable_constants[2].type);
+ EXPECT_EQ("i32_var", result[0].overridable_constants[3].name);
+ EXPECT_EQ(inspector::OverridableConstant::Type::kInt32,
+ result[0].overridable_constants[3].type);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantInitialized) {
- AddOverridableConstantWithoutID("foo", ty.f32(), Expr(0.0f));
- MakePlainGlobalReferenceBodyFunction(
- "ep_func", "foo", ty.f32(),
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithoutID("foo", ty.f32(), Expr(0_f));
+ MakePlainGlobalReferenceBodyFunction("ep_func", "foo", ty.f32(),
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].overridable_constants.size());
- EXPECT_EQ("foo", result[0].overridable_constants[0].name);
- EXPECT_TRUE(result[0].overridable_constants[0].is_initialized);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo", result[0].overridable_constants[0].name);
+ EXPECT_TRUE(result[0].overridable_constants[0].is_initialized);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantUninitialized) {
- AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
- MakePlainGlobalReferenceBodyFunction(
- "ep_func", "foo", ty.f32(),
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddOverridableConstantWithoutID("foo", ty.f32(), nullptr);
+ MakePlainGlobalReferenceBodyFunction("ep_func", "foo", ty.f32(),
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].overridable_constants.size());
- EXPECT_EQ("foo", result[0].overridable_constants[0].name);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo", result[0].overridable_constants[0].name);
- EXPECT_FALSE(result[0].overridable_constants[0].is_initialized);
+ EXPECT_FALSE(result[0].overridable_constants[0].is_initialized);
}
TEST_F(InspectorGetEntryPointTest, OverridableConstantNumericIDSpecified) {
- AddOverridableConstantWithoutID("foo_no_id", ty.f32(), nullptr);
- AddOverridableConstantWithID("foo_id", 1234, ty.f32(), nullptr);
+ AddOverridableConstantWithoutID("foo_no_id", ty.f32(), nullptr);
+ AddOverridableConstantWithID("foo_id", 1234, ty.f32(), nullptr);
- MakePlainGlobalReferenceBodyFunction("no_id_func", "foo_no_id", ty.f32(), {});
- MakePlainGlobalReferenceBodyFunction("id_func", "foo_id", ty.f32(), {});
+ MakePlainGlobalReferenceBodyFunction("no_id_func", "foo_no_id", ty.f32(), {});
+ MakePlainGlobalReferenceBodyFunction("id_func", "foo_id", ty.f32(), {});
- MakeCallerBodyFunction(
- "ep_func", {"no_id_func", "id_func"},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ MakeCallerBodyFunction("ep_func", {"no_id_func", "id_func"},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(2u, result[0].overridable_constants.size());
- EXPECT_EQ("foo_no_id", result[0].overridable_constants[0].name);
- EXPECT_EQ("foo_id", result[0].overridable_constants[1].name);
- EXPECT_EQ(1234, result[0].overridable_constants[1].numeric_id);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(2u, result[0].overridable_constants.size());
+ EXPECT_EQ("foo_no_id", result[0].overridable_constants[0].name);
+ EXPECT_EQ("foo_id", result[0].overridable_constants[1].name);
+ EXPECT_EQ(1234, result[0].overridable_constants[1].numeric_id);
- EXPECT_FALSE(result[0].overridable_constants[0].is_numeric_id_specified);
- EXPECT_TRUE(result[0].overridable_constants[1].is_numeric_id_specified);
+ EXPECT_FALSE(result[0].overridable_constants[0].is_numeric_id_specified);
+ EXPECT_TRUE(result[0].overridable_constants[1].is_numeric_id_specified);
}
TEST_F(InspectorGetEntryPointTest, NonOverridableConstantSkipped) {
- auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
+ MakeCallerBodyFunction("ep_func", {"ub_func"}, {Stage(ast::PipelineStage::kFragment)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].overridable_constants.size());
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].overridable_constants.size());
}
TEST_F(InspectorGetEntryPointTest, BuiltinNotReferenced) {
- MakeEmptyBodyFunction("ep_func", {Stage(ast::PipelineStage::kFragment)});
+ MakeEmptyBodyFunction("ep_func", {Stage(ast::PipelineStage::kFragment)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_FALSE(result[0].input_sample_mask_used);
- EXPECT_FALSE(result[0].output_sample_mask_used);
- EXPECT_FALSE(result[0].input_position_used);
- EXPECT_FALSE(result[0].front_facing_used);
- EXPECT_FALSE(result[0].sample_index_used);
- EXPECT_FALSE(result[0].num_workgroups_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_FALSE(result[0].input_sample_mask_used);
+ EXPECT_FALSE(result[0].output_sample_mask_used);
+ EXPECT_FALSE(result[0].input_position_used);
+ EXPECT_FALSE(result[0].front_facing_used);
+ EXPECT_FALSE(result[0].sample_index_used);
+ EXPECT_FALSE(result[0].num_workgroups_used);
}
TEST_F(InspectorGetEntryPointTest, InputSampleMaskSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleMask)});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ auto* in_var = Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleMask)});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].input_sample_mask_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].input_sample_mask_used);
}
TEST_F(InspectorGetEntryPointTest, InputSampleMaskStructReferenced) {
- ast::StructMemberList members;
- members.push_back(
- Member("inner_position", ty.u32(), {Builtin(ast::Builtin::kSampleMask)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(Member("inner_position", ty.u32(), {Builtin(ast::Builtin::kSampleMask)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].input_sample_mask_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].input_sample_mask_used);
}
TEST_F(InspectorGetEntryPointTest, OutputSampleMaskSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleMask)});
- Func("ep_func", {in_var}, ty.u32(), {Return("in_var")},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kSampleMask)});
+ auto* in_var = Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleMask)});
+ Func("ep_func", {in_var}, ty.u32(), {Return("in_var")}, {Stage(ast::PipelineStage::kFragment)},
+ {Builtin(ast::Builtin::kSampleMask)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].output_sample_mask_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].output_sample_mask_used);
}
TEST_F(InspectorGetEntryPointTest, OutputSampleMaskStructReferenced) {
- ast::StructMemberList members;
- members.push_back(Member("inner_sample_mask", ty.u32(),
- {Builtin(ast::Builtin::kSampleMask)}));
- Structure("out_struct", members);
+ ast::StructMemberList members;
+ members.push_back(Member("inner_sample_mask", ty.u32(), {Builtin(ast::Builtin::kSampleMask)}));
+ Structure("out_struct", members);
- Func("ep_func", {}, ty.type_name("out_struct"),
- {Decl(Var("out_var", ty.type_name("out_struct"))), Return("out_var")},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {}, ty.type_name("out_struct"),
+ {Decl(Var("out_var", ty.type_name("out_struct"))), Return("out_var")},
+ {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].output_sample_mask_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].output_sample_mask_used);
}
TEST_F(InspectorGetEntryPointTest, InputPositionSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ auto* in_var = Param("in_var", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].input_position_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].input_position_used);
}
TEST_F(InspectorGetEntryPointTest, InputPositionStructReferenced) {
- ast::StructMemberList members;
- members.push_back(Member("inner_position", ty.vec4<f32>(),
- {Builtin(ast::Builtin::kPosition)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(Member("inner_position", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].input_position_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].input_position_used);
}
TEST_F(InspectorGetEntryPointTest, FrontFacingSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.bool_(), {Builtin(ast::Builtin::kFrontFacing)});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ auto* in_var = Param("in_var", ty.bool_(), {Builtin(ast::Builtin::kFrontFacing)});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].front_facing_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].front_facing_used);
}
TEST_F(InspectorGetEntryPointTest, FrontFacingStructReferenced) {
- ast::StructMemberList members;
- members.push_back(Member("inner_position", ty.bool_(),
- {Builtin(ast::Builtin::kFrontFacing)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(Member("inner_position", ty.bool_(), {Builtin(ast::Builtin::kFrontFacing)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].front_facing_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].front_facing_used);
}
TEST_F(InspectorGetEntryPointTest, SampleIndexSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ auto* in_var = Param("in_var", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].sample_index_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].sample_index_used);
}
TEST_F(InspectorGetEntryPointTest, SampleIndexStructReferenced) {
- ast::StructMemberList members;
- members.push_back(Member("inner_position", ty.u32(),
- {Builtin(ast::Builtin::kSampleIndex)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(Member("inner_position", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].sample_index_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].sample_index_used);
}
TEST_F(InspectorGetEntryPointTest, NumWorkgroupsSimpleReferenced) {
- auto* in_var =
- Param("in_var", ty.vec3<u32>(), {Builtin(ast::Builtin::kNumWorkgroups)});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)}, {});
+ auto* in_var = Param("in_var", ty.vec3<u32>(), {Builtin(ast::Builtin::kNumWorkgroups)});
+ Func("ep_func", {in_var}, ty.void_(), {Return()},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].num_workgroups_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].num_workgroups_used);
}
TEST_F(InspectorGetEntryPointTest, NumWorkgroupsStructReferenced) {
- ast::StructMemberList members;
- members.push_back(Member("inner_position", ty.vec3<u32>(),
- {Builtin(ast::Builtin::kNumWorkgroups)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(
+ Member("inner_position", ty.vec3<u32>(), {Builtin(ast::Builtin::kNumWorkgroups)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- EXPECT_TRUE(result[0].num_workgroups_used);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_TRUE(result[0].num_workgroups_used);
}
TEST_F(InspectorGetEntryPointTest, ImplicitInterpolate) {
- ast::StructMemberList members;
- members.push_back(Member("struct_inner", ty.f32(), {Location(0)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ ast::StructMemberList members;
+ members.push_back(Member("struct_inner", ty.f32(), {Location(0)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].input_variables.size());
- EXPECT_EQ(InterpolationType::kPerspective,
- result[0].input_variables[0].interpolation_type);
- EXPECT_EQ(InterpolationSampling::kCenter,
- result[0].input_variables[0].interpolation_sampling);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].input_variables.size());
+ EXPECT_EQ(InterpolationType::kPerspective, result[0].input_variables[0].interpolation_type);
+ EXPECT_EQ(InterpolationSampling::kCenter, result[0].input_variables[0].interpolation_sampling);
}
TEST_P(InspectorGetEntryPointInterpolateTest, Test) {
- auto& params = GetParam();
- ast::StructMemberList members;
- members.push_back(
- Member("struct_inner", ty.f32(),
- {Interpolate(params.in_type, params.in_sampling), Location(0)}));
- Structure("in_struct", members);
- auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
+ auto& params = GetParam();
+ ast::StructMemberList members;
+ members.push_back(Member("struct_inner", ty.f32(),
+ {Interpolate(params.in_type, params.in_sampling), Location(0)}));
+ Structure("in_struct", members);
+ auto* in_var = Param("in_var", ty.type_name("in_struct"), {});
- Func("ep_func", {in_var}, ty.void_(), {Return()},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("ep_func", {in_var}, ty.void_(), {Return()}, {Stage(ast::PipelineStage::kFragment)}, {});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetEntryPoints();
+ auto result = inspector.GetEntryPoints();
- ASSERT_EQ(1u, result.size());
- ASSERT_EQ(1u, result[0].input_variables.size());
- EXPECT_EQ(params.out_type, result[0].input_variables[0].interpolation_type);
- EXPECT_EQ(params.out_sampling,
- result[0].input_variables[0].interpolation_sampling);
+ ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result[0].input_variables.size());
+ EXPECT_EQ(params.out_type, result[0].input_variables[0].interpolation_type);
+ EXPECT_EQ(params.out_sampling, result[0].input_variables[0].interpolation_sampling);
}
INSTANTIATE_TEST_SUITE_P(
@@ -986,33 +923,26 @@ INSTANTIATE_TEST_SUITE_P(
InspectorGetEntryPointInterpolateTest,
testing::Values(
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kCenter,
+ ast::InterpolationType::kPerspective, ast::InterpolationSampling::kCenter,
InterpolationType::kPerspective, InterpolationSampling::kCenter},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kCentroid,
+ ast::InterpolationType::kPerspective, ast::InterpolationSampling::kCentroid,
InterpolationType::kPerspective, InterpolationSampling::kCentroid},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kSample,
+ ast::InterpolationType::kPerspective, ast::InterpolationSampling::kSample,
InterpolationType::kPerspective, InterpolationSampling::kSample},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kNone, InterpolationType::kPerspective,
- InterpolationSampling::kCenter},
+ ast::InterpolationType::kPerspective, ast::InterpolationSampling::kNone,
+ InterpolationType::kPerspective, InterpolationSampling::kCenter},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kCenter, InterpolationType::kLinear,
- InterpolationSampling::kCenter},
+ ast::InterpolationType::kLinear, ast::InterpolationSampling::kCenter,
+ InterpolationType::kLinear, InterpolationSampling::kCenter},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kCentroid, InterpolationType::kLinear,
- InterpolationSampling::kCentroid},
+ ast::InterpolationType::kLinear, ast::InterpolationSampling::kCentroid,
+ InterpolationType::kLinear, InterpolationSampling::kCentroid},
InspectorGetEntryPointInterpolateTestParams{
- ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kSample, InterpolationType::kLinear,
- InterpolationSampling::kSample},
+ ast::InterpolationType::kLinear, ast::InterpolationSampling::kSample,
+ InterpolationType::kLinear, InterpolationSampling::kSample},
InspectorGetEntryPointInterpolateTestParams{
ast::InterpolationType::kLinear, ast::InterpolationSampling::kNone,
InterpolationType::kLinear, InterpolationSampling::kCenter},
@@ -1021,1719 +951,1613 @@ INSTANTIATE_TEST_SUITE_P(
InterpolationType::kFlat, InterpolationSampling::kNone}));
TEST_F(InspectorGetConstantIDsTest, Bool) {
- AddOverridableConstantWithID("foo", 1, ty.bool_(), nullptr);
- AddOverridableConstantWithID("bar", 20, ty.bool_(), Expr(true));
- AddOverridableConstantWithID("baz", 300, ty.bool_(), Expr(false));
+ AddOverridableConstantWithID("foo", 1, ty.bool_(), nullptr);
+ AddOverridableConstantWithID("bar", 20, ty.bool_(), Expr(true));
+ AddOverridableConstantWithID("baz", 300, ty.bool_(), Expr(false));
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetConstantIDs();
- ASSERT_EQ(3u, result.size());
+ auto result = inspector.GetConstantIDs();
+ ASSERT_EQ(3u, result.size());
- ASSERT_TRUE(result.find(1) != result.end());
- EXPECT_TRUE(result[1].IsNull());
+ ASSERT_TRUE(result.find(1) != result.end());
+ EXPECT_TRUE(result[1].IsNull());
- ASSERT_TRUE(result.find(20) != result.end());
- EXPECT_TRUE(result[20].IsBool());
- EXPECT_TRUE(result[20].AsBool());
+ ASSERT_TRUE(result.find(20) != result.end());
+ EXPECT_TRUE(result[20].IsBool());
+ EXPECT_TRUE(result[20].AsBool());
- ASSERT_TRUE(result.find(300) != result.end());
- EXPECT_TRUE(result[300].IsBool());
- EXPECT_FALSE(result[300].AsBool());
+ ASSERT_TRUE(result.find(300) != result.end());
+ EXPECT_TRUE(result[300].IsBool());
+ EXPECT_FALSE(result[300].AsBool());
}
TEST_F(InspectorGetConstantIDsTest, U32) {
- AddOverridableConstantWithID("foo", 1, ty.u32(), nullptr);
- AddOverridableConstantWithID("bar", 20, ty.u32(), Expr(42u));
+ AddOverridableConstantWithID("foo", 1, ty.u32(), nullptr);
+ AddOverridableConstantWithID("bar", 20, ty.u32(), Expr(42_u));
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetConstantIDs();
- ASSERT_EQ(2u, result.size());
+ auto result = inspector.GetConstantIDs();
+ ASSERT_EQ(2u, result.size());
- ASSERT_TRUE(result.find(1) != result.end());
- EXPECT_TRUE(result[1].IsNull());
+ ASSERT_TRUE(result.find(1) != result.end());
+ EXPECT_TRUE(result[1].IsNull());
- ASSERT_TRUE(result.find(20) != result.end());
- EXPECT_TRUE(result[20].IsU32());
- EXPECT_EQ(42u, result[20].AsU32());
+ ASSERT_TRUE(result.find(20) != result.end());
+ EXPECT_TRUE(result[20].IsU32());
+ EXPECT_EQ(42u, result[20].AsU32());
}
TEST_F(InspectorGetConstantIDsTest, I32) {
- AddOverridableConstantWithID("foo", 1, ty.i32(), nullptr);
- AddOverridableConstantWithID("bar", 20, ty.i32(), Expr(-42));
- AddOverridableConstantWithID("baz", 300, ty.i32(), Expr(42));
+ AddOverridableConstantWithID("foo", 1, ty.i32(), nullptr);
+ AddOverridableConstantWithID("bar", 20, ty.i32(), Expr(-42_i));
+ AddOverridableConstantWithID("baz", 300, ty.i32(), Expr(42_i));
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetConstantIDs();
- ASSERT_EQ(3u, result.size());
+ auto result = inspector.GetConstantIDs();
+ ASSERT_EQ(3u, result.size());
- ASSERT_TRUE(result.find(1) != result.end());
- EXPECT_TRUE(result[1].IsNull());
+ ASSERT_TRUE(result.find(1) != result.end());
+ EXPECT_TRUE(result[1].IsNull());
- ASSERT_TRUE(result.find(20) != result.end());
- EXPECT_TRUE(result[20].IsI32());
- EXPECT_EQ(-42, result[20].AsI32());
+ ASSERT_TRUE(result.find(20) != result.end());
+ EXPECT_TRUE(result[20].IsI32());
+ EXPECT_EQ(-42, result[20].AsI32());
- ASSERT_TRUE(result.find(300) != result.end());
- EXPECT_TRUE(result[300].IsI32());
- EXPECT_EQ(42, result[300].AsI32());
+ ASSERT_TRUE(result.find(300) != result.end());
+ EXPECT_TRUE(result[300].IsI32());
+ EXPECT_EQ(42, result[300].AsI32());
}
TEST_F(InspectorGetConstantIDsTest, Float) {
- AddOverridableConstantWithID("foo", 1, ty.f32(), nullptr);
- AddOverridableConstantWithID("bar", 20, ty.f32(), Expr(0.0f));
- AddOverridableConstantWithID("baz", 300, ty.f32(), Expr(-10.0f));
- AddOverridableConstantWithID("x", 4000, ty.f32(), Expr(15.0f));
+ AddOverridableConstantWithID("foo", 1, ty.f32(), nullptr);
+ AddOverridableConstantWithID("bar", 20, ty.f32(), Expr(0_f));
+ AddOverridableConstantWithID("baz", 300, ty.f32(), Expr(-10_f));
+ AddOverridableConstantWithID("x", 4000, ty.f32(), Expr(15_f));
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetConstantIDs();
- ASSERT_EQ(4u, result.size());
+ auto result = inspector.GetConstantIDs();
+ ASSERT_EQ(4u, result.size());
- ASSERT_TRUE(result.find(1) != result.end());
- EXPECT_TRUE(result[1].IsNull());
+ ASSERT_TRUE(result.find(1) != result.end());
+ EXPECT_TRUE(result[1].IsNull());
- ASSERT_TRUE(result.find(20) != result.end());
- EXPECT_TRUE(result[20].IsFloat());
- EXPECT_FLOAT_EQ(0.0, result[20].AsFloat());
+ ASSERT_TRUE(result.find(20) != result.end());
+ EXPECT_TRUE(result[20].IsFloat());
+ EXPECT_FLOAT_EQ(0.0f, result[20].AsFloat());
- ASSERT_TRUE(result.find(300) != result.end());
- EXPECT_TRUE(result[300].IsFloat());
- EXPECT_FLOAT_EQ(-10.0, result[300].AsFloat());
+ ASSERT_TRUE(result.find(300) != result.end());
+ EXPECT_TRUE(result[300].IsFloat());
+ EXPECT_FLOAT_EQ(-10.0f, result[300].AsFloat());
- ASSERT_TRUE(result.find(4000) != result.end());
- EXPECT_TRUE(result[4000].IsFloat());
- EXPECT_FLOAT_EQ(15.0, result[4000].AsFloat());
+ ASSERT_TRUE(result.find(4000) != result.end());
+ EXPECT_TRUE(result[4000].IsFloat());
+ EXPECT_FLOAT_EQ(15.0f, result[4000].AsFloat());
}
TEST_F(InspectorGetConstantNameToIdMapTest, WithAndWithoutIds) {
- AddOverridableConstantWithID("v1", 1, ty.f32(), nullptr);
- AddOverridableConstantWithID("v20", 20, ty.f32(), nullptr);
- AddOverridableConstantWithID("v300", 300, ty.f32(), nullptr);
- auto* a = AddOverridableConstantWithoutID("a", ty.f32(), nullptr);
- auto* b = AddOverridableConstantWithoutID("b", ty.f32(), nullptr);
- auto* c = AddOverridableConstantWithoutID("c", ty.f32(), nullptr);
+ AddOverridableConstantWithID("v1", 1, ty.f32(), nullptr);
+ AddOverridableConstantWithID("v20", 20, ty.f32(), nullptr);
+ AddOverridableConstantWithID("v300", 300, ty.f32(), nullptr);
+ auto* a = AddOverridableConstantWithoutID("a", ty.f32(), nullptr);
+ auto* b = AddOverridableConstantWithoutID("b", ty.f32(), nullptr);
+ auto* c = AddOverridableConstantWithoutID("c", ty.f32(), nullptr);
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetConstantNameToIdMap();
- ASSERT_EQ(6u, result.size());
+ auto result = inspector.GetConstantNameToIdMap();
+ ASSERT_EQ(6u, result.size());
- ASSERT_TRUE(result.count("v1"));
- EXPECT_EQ(result["v1"], 1u);
+ ASSERT_TRUE(result.count("v1"));
+ EXPECT_EQ(result["v1"], 1u);
- ASSERT_TRUE(result.count("v20"));
- EXPECT_EQ(result["v20"], 20u);
+ ASSERT_TRUE(result.count("v20"));
+ EXPECT_EQ(result["v20"], 20u);
- ASSERT_TRUE(result.count("v300"));
- EXPECT_EQ(result["v300"], 300u);
+ ASSERT_TRUE(result.count("v300"));
+ EXPECT_EQ(result["v300"], 300u);
- ASSERT_TRUE(result.count("a"));
- ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(a));
- EXPECT_EQ(result["a"],
- program_->Sem().Get<sem::GlobalVariable>(a)->ConstantId());
+ ASSERT_TRUE(result.count("a"));
+ ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(a));
+ EXPECT_EQ(result["a"], program_->Sem().Get<sem::GlobalVariable>(a)->ConstantId());
- ASSERT_TRUE(result.count("b"));
- ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(b));
- EXPECT_EQ(result["b"],
- program_->Sem().Get<sem::GlobalVariable>(b)->ConstantId());
+ ASSERT_TRUE(result.count("b"));
+ ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(b));
+ EXPECT_EQ(result["b"], program_->Sem().Get<sem::GlobalVariable>(b)->ConstantId());
- ASSERT_TRUE(result.count("c"));
- ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(c));
- EXPECT_EQ(result["c"],
- program_->Sem().Get<sem::GlobalVariable>(c)->ConstantId());
+ ASSERT_TRUE(result.count("c"));
+ ASSERT_TRUE(program_->Sem().Get<sem::GlobalVariable>(c));
+ EXPECT_EQ(result["c"], program_->Sem().Get<sem::GlobalVariable>(c)->ConstantId());
}
TEST_F(InspectorGetStorageSizeTest, Empty) {
- MakeEmptyBodyFunction("ep_func",
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
- Inspector& inspector = Build();
- EXPECT_EQ(0u, inspector.GetStorageSize("ep_func"));
+ MakeEmptyBodyFunction(
+ "ep_func", ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ Inspector& inspector = Build();
+ EXPECT_EQ(0u, inspector.GetStorageSize("ep_func"));
}
TEST_F(InspectorGetStorageSizeTest, Simple_NonStruct) {
- AddUniformBuffer("ub_var", ty.i32(), 0, 0);
- AddStorageBuffer("sb_var", ty.i32(), ast::Access::kReadWrite, 1, 0);
- AddStorageBuffer("rosb_var", ty.i32(), ast::Access::kRead, 1, 1);
- Func("ep_func", {}, ty.void_(),
- {
- Decl(Const("ub", nullptr, Expr("ub_var"))),
- Decl(Const("sb", nullptr, Expr("sb_var"))),
- Decl(Const("rosb", nullptr, Expr("rosb_var"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddUniformBuffer("ub_var", ty.i32(), 0, 0);
+ AddStorageBuffer("sb_var", ty.i32(), ast::Access::kReadWrite, 1, 0);
+ AddStorageBuffer("rosb_var", ty.i32(), ast::Access::kRead, 1, 1);
+ Func("ep_func", {}, ty.void_(),
+ {
+ Decl(Let("ub", nullptr, Expr("ub_var"))),
+ Decl(Let("sb", nullptr, Expr("sb_var"))),
+ Decl(Let("rosb", nullptr, Expr("rosb_var"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- EXPECT_EQ(12u, inspector.GetStorageSize("ep_func"));
+ EXPECT_EQ(12u, inspector.GetStorageSize("ep_func"));
}
TEST_F(InspectorGetStorageSizeTest, Simple_Struct) {
- auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.i32(), ty.i32()});
- AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "ub_var", {{0, ty.i32()}});
+ auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.i32(), ty.i32()});
+ AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
+ MakeStructVariableReferenceBodyFunction("ub_func", "ub_var", {{0, ty.i32()}});
- auto sb = MakeStorageBufferTypes("sb_type", {ty.i32()});
- AddStorageBuffer("sb_var", sb(), ast::Access::kReadWrite, 1, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "sb_var", {{0, ty.i32()}});
+ auto sb = MakeStorageBufferTypes("sb_type", {ty.i32()});
+ AddStorageBuffer("sb_var", sb(), ast::Access::kReadWrite, 1, 0);
+ MakeStructVariableReferenceBodyFunction("sb_func", "sb_var", {{0, ty.i32()}});
- auto ro_sb = MakeStorageBufferTypes("rosb_type", {ty.i32()});
- AddStorageBuffer("rosb_var", ro_sb(), ast::Access::kRead, 1, 1);
- MakeStructVariableReferenceBodyFunction("rosb_func", "rosb_var",
- {{0, ty.i32()}});
+ auto ro_sb = MakeStorageBufferTypes("rosb_type", {ty.i32()});
+ AddStorageBuffer("rosb_var", ro_sb(), ast::Access::kRead, 1, 1);
+ MakeStructVariableReferenceBodyFunction("rosb_func", "rosb_var", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func", "sb_func", "rosb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func", "sb_func", "rosb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- EXPECT_EQ(16u, inspector.GetStorageSize("ep_func"));
+ EXPECT_EQ(16u, inspector.GetStorageSize("ep_func"));
}
TEST_F(InspectorGetStorageSizeTest, NonStructVec3) {
- AddUniformBuffer("ub_var", ty.vec3<f32>(), 0, 0);
- Func("ep_func", {}, ty.void_(),
- {
- Decl(Const("ub", nullptr, Expr("ub_var"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ AddUniformBuffer("ub_var", ty.vec3<f32>(), 0, 0);
+ Func("ep_func", {}, ty.void_(),
+ {
+ Decl(Let("ub", nullptr, Expr("ub_var"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- EXPECT_EQ(12u, inspector.GetStorageSize("ep_func"));
+ EXPECT_EQ(12u, inspector.GetStorageSize("ep_func"));
}
TEST_F(InspectorGetStorageSizeTest, StructVec3) {
- auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.vec3<f32>()});
- AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
- Func("ep_func", {}, ty.void_(),
- {
- Decl(Const("ub", nullptr, Expr("ub_var"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.vec3<f32>()});
+ AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
+ Func("ep_func", {}, ty.void_(),
+ {
+ Decl(Let("ub", nullptr, Expr("ub_var"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- EXPECT_EQ(16u, inspector.GetStorageSize("ep_func"));
+ EXPECT_EQ(16u, inspector.GetStorageSize("ep_func"));
}
TEST_F(InspectorGetResourceBindingsTest, Empty) {
- MakeCallerBodyFunction("ep_func", {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ auto result = inspector.GetResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetResourceBindingsTest, Simple) {
- auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.i32()});
- AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "ub_var", {{0, ty.i32()}});
-
- auto sb = MakeStorageBufferTypes("sb_type", {ty.i32()});
- AddStorageBuffer("sb_var", sb(), ast::Access::kReadWrite, 1, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "sb_var", {{0, ty.i32()}});
-
- auto ro_sb = MakeStorageBufferTypes("rosb_type", {ty.i32()});
- AddStorageBuffer("rosb_var", ro_sb(), ast::Access::kRead, 1, 1);
- MakeStructVariableReferenceBodyFunction("rosb_func", "rosb_var",
- {{0, ty.i32()}});
-
- auto* s_texture_type =
- ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- AddResource("s_texture", s_texture_type, 2, 0);
- AddSampler("s_var", 3, 0);
- AddGlobalVariable("s_coords", ty.f32());
- MakeSamplerReferenceBodyFunction("s_func", "s_texture", "s_var", "s_coords",
- ty.f32(), {});
-
- auto* cs_depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
- AddResource("cs_texture", cs_depth_texture_type, 3, 1);
- AddComparisonSampler("cs_var", 3, 2);
- AddGlobalVariable("cs_coords", ty.vec2<f32>());
- AddGlobalVariable("cs_depth", ty.f32());
- MakeComparisonSamplerReferenceBodyFunction(
- "cs_func", "cs_texture", "cs_var", "cs_coords", "cs_depth", ty.f32(), {});
-
- auto* depth_ms_texture_type =
- ty.depth_multisampled_texture(ast::TextureDimension::k2d);
- AddResource("depth_ms_texture", depth_ms_texture_type, 3, 3);
- Func("depth_ms_func", {}, ty.void_(), {Ignore("depth_ms_texture")});
-
- auto* st_type = MakeStorageTextureTypes(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint);
- AddStorageTexture("st_var", st_type, 4, 0);
- MakeStorageTextureBodyFunction("st_func", "st_var", ty.vec2<i32>(), {});
-
- MakeCallerBodyFunction("ep_func",
- {"ub_func", "sb_func", "rosb_func", "s_func",
- "cs_func", "depth_ms_func", "st_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(9u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[1].resource_type);
- EXPECT_EQ(1u, result[1].bind_group);
- EXPECT_EQ(0u, result[1].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[2].resource_type);
- EXPECT_EQ(1u, result[2].bind_group);
- EXPECT_EQ(1u, result[2].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[3].resource_type);
- EXPECT_EQ(3u, result[3].bind_group);
- EXPECT_EQ(0u, result[3].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler,
- result[4].resource_type);
- EXPECT_EQ(3u, result[4].bind_group);
- EXPECT_EQ(2u, result[4].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture,
- result[5].resource_type);
- EXPECT_EQ(2u, result[5].bind_group);
- EXPECT_EQ(0u, result[5].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kWriteOnlyStorageTexture,
- result[6].resource_type);
- EXPECT_EQ(4u, result[6].bind_group);
- EXPECT_EQ(0u, result[6].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kDepthTexture,
- result[7].resource_type);
- EXPECT_EQ(3u, result[7].bind_group);
- EXPECT_EQ(1u, result[7].binding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kDepthMultisampledTexture,
- result[8].resource_type);
- EXPECT_EQ(3u, result[8].bind_group);
- EXPECT_EQ(3u, result[8].binding);
+ auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.i32()});
+ AddUniformBuffer("ub_var", ty.Of(ub_struct_type), 0, 0);
+ MakeStructVariableReferenceBodyFunction("ub_func", "ub_var", {{0, ty.i32()}});
+
+ auto sb = MakeStorageBufferTypes("sb_type", {ty.i32()});
+ AddStorageBuffer("sb_var", sb(), ast::Access::kReadWrite, 1, 0);
+ MakeStructVariableReferenceBodyFunction("sb_func", "sb_var", {{0, ty.i32()}});
+
+ auto ro_sb = MakeStorageBufferTypes("rosb_type", {ty.i32()});
+ AddStorageBuffer("rosb_var", ro_sb(), ast::Access::kRead, 1, 1);
+ MakeStructVariableReferenceBodyFunction("rosb_func", "rosb_var", {{0, ty.i32()}});
+
+ auto* s_texture_type = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ AddResource("s_texture", s_texture_type, 2, 0);
+ AddSampler("s_var", 3, 0);
+ AddGlobalVariable("s_coords", ty.f32());
+ MakeSamplerReferenceBodyFunction("s_func", "s_texture", "s_var", "s_coords", ty.f32(), {});
+
+ auto* cs_depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
+ AddResource("cs_texture", cs_depth_texture_type, 3, 1);
+ AddComparisonSampler("cs_var", 3, 2);
+ AddGlobalVariable("cs_coords", ty.vec2<f32>());
+ AddGlobalVariable("cs_depth", ty.f32());
+ MakeComparisonSamplerReferenceBodyFunction("cs_func", "cs_texture", "cs_var", "cs_coords",
+ "cs_depth", ty.f32(), {});
+
+ auto* depth_ms_texture_type = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
+ AddResource("depth_ms_texture", depth_ms_texture_type, 3, 3);
+ Func("depth_ms_func", {}, ty.void_(), {Ignore("depth_ms_texture")});
+
+ auto* st_type = MakeStorageTextureTypes(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint);
+ AddStorageTexture("st_var", st_type, 4, 0);
+ MakeStorageTextureBodyFunction("st_func", "st_var", ty.vec2<i32>(), {});
+
+ MakeCallerBodyFunction(
+ "ep_func",
+ {"ub_func", "sb_func", "rosb_func", "s_func", "cs_func", "depth_ms_func", "st_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(9u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[1].resource_type);
+ EXPECT_EQ(1u, result[1].bind_group);
+ EXPECT_EQ(0u, result[1].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[2].resource_type);
+ EXPECT_EQ(1u, result[2].bind_group);
+ EXPECT_EQ(1u, result[2].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[3].resource_type);
+ EXPECT_EQ(3u, result[3].bind_group);
+ EXPECT_EQ(0u, result[3].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler, result[4].resource_type);
+ EXPECT_EQ(3u, result[4].bind_group);
+ EXPECT_EQ(2u, result[4].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture, result[5].resource_type);
+ EXPECT_EQ(2u, result[5].bind_group);
+ EXPECT_EQ(0u, result[5].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kWriteOnlyStorageTexture, result[6].resource_type);
+ EXPECT_EQ(4u, result[6].bind_group);
+ EXPECT_EQ(0u, result[6].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kDepthTexture, result[7].resource_type);
+ EXPECT_EQ(3u, result[7].bind_group);
+ EXPECT_EQ(1u, result[7].binding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kDepthMultisampledTexture, result[8].resource_type);
+ EXPECT_EQ(3u, result[8].bind_group);
+ EXPECT_EQ(3u, result[8].binding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, MissingEntryPoint) {
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_TRUE(inspector.has_error());
- std::string error = inspector.error();
- EXPECT_TRUE(error.find("not found") != std::string::npos);
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_TRUE(inspector.has_error());
+ std::string error = inspector.error();
+ EXPECT_TRUE(error.find("not found") != std::string::npos);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, NonEntryPointFunc) {
- auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ub_func");
- std::string error = inspector.error();
- EXPECT_TRUE(error.find("not an entry point") != std::string::npos);
+ auto result = inspector.GetUniformBufferResourceBindings("ub_func");
+ std::string error = inspector.error();
+ EXPECT_TRUE(error.find("not an entry point") != std::string::npos);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, Simple_NonStruct) {
- AddUniformBuffer("foo_ub", ty.i32(), 0, 0);
- MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.i32(), {});
+ AddUniformBuffer("foo_ub", ty.i32(), 0, 0);
+ MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.i32(), {});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(4u, result[0].size);
- EXPECT_EQ(4u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(4u, result[0].size);
+ EXPECT_EQ(4u, result[0].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, Simple_Struct) {
- auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32()});
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(4u, result[0].size);
- EXPECT_EQ(4u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(4u, result[0].size);
+ EXPECT_EQ(4u, result[0].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, MultipleMembers) {
- auto* foo_struct_type =
- MakeUniformBufferType("foo_type", {ty.i32(), ty.u32(), ty.f32()});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.i32(), ty.u32(), ty.f32()});
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction(
- "ub_func", "foo_ub", {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub",
+ {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, ContainingPadding) {
- auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.vec3<f32>()});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ auto* foo_struct_type = MakeUniformBufferType("foo_type", {ty.vec3<f32>()});
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub",
- {{0, ty.vec3<f32>()}});
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.vec3<f32>()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(16u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(16u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, NonStructVec3) {
- AddUniformBuffer("foo_ub", ty.vec3<f32>(), 0, 0);
- MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.vec3<f32>(), {});
+ AddUniformBuffer("foo_ub", ty.vec3<f32>(), 0, 0);
+ MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.vec3<f32>(), {});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, MultipleUniformBuffers) {
- auto* ub_struct_type =
- MakeUniformBufferType("ub_type", {ty.i32(), ty.u32(), ty.f32()});
- AddUniformBuffer("ub_foo", ty.Of(ub_struct_type), 0, 0);
- AddUniformBuffer("ub_bar", ty.Of(ub_struct_type), 0, 1);
- AddUniformBuffer("ub_baz", ty.Of(ub_struct_type), 2, 0);
-
- auto AddReferenceFunc = [this](const std::string& func_name,
- const std::string& var_name) {
- MakeStructVariableReferenceBodyFunction(
- func_name, var_name, {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
- };
- AddReferenceFunc("ub_foo_func", "ub_foo");
- AddReferenceFunc("ub_bar_func", "ub_bar");
- AddReferenceFunc("ub_baz_func", "ub_baz");
-
- auto FuncCall = [&](const std::string& callee) {
- return create<ast::CallStatement>(Call(callee));
- };
-
- Func("ep_func", ast::VariableList(), ty.void_(),
- ast::StatementList{FuncCall("ub_foo_func"), FuncCall("ub_bar_func"),
- FuncCall("ub_baz_func"), Return()},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(3u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[1].resource_type);
- EXPECT_EQ(0u, result[1].bind_group);
- EXPECT_EQ(1u, result[1].binding);
- EXPECT_EQ(12u, result[1].size);
- EXPECT_EQ(12u, result[1].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[2].resource_type);
- EXPECT_EQ(2u, result[2].bind_group);
- EXPECT_EQ(0u, result[2].binding);
- EXPECT_EQ(12u, result[2].size);
- EXPECT_EQ(12u, result[2].size_no_padding);
+ auto* ub_struct_type = MakeUniformBufferType("ub_type", {ty.i32(), ty.u32(), ty.f32()});
+ AddUniformBuffer("ub_foo", ty.Of(ub_struct_type), 0, 0);
+ AddUniformBuffer("ub_bar", ty.Of(ub_struct_type), 0, 1);
+ AddUniformBuffer("ub_baz", ty.Of(ub_struct_type), 2, 0);
+
+ auto AddReferenceFunc = [this](const std::string& func_name, const std::string& var_name) {
+ MakeStructVariableReferenceBodyFunction(func_name, var_name,
+ {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
+ };
+ AddReferenceFunc("ub_foo_func", "ub_foo");
+ AddReferenceFunc("ub_bar_func", "ub_bar");
+ AddReferenceFunc("ub_baz_func", "ub_baz");
+
+ auto FuncCall = [&](const std::string& callee) {
+ return create<ast::CallStatement>(Call(callee));
+ };
+
+ Func("ep_func", ast::VariableList(), ty.void_(),
+ ast::StatementList{FuncCall("ub_foo_func"), FuncCall("ub_bar_func"),
+ FuncCall("ub_baz_func"), Return()},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(3u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[1].resource_type);
+ EXPECT_EQ(0u, result[1].bind_group);
+ EXPECT_EQ(1u, result[1].binding);
+ EXPECT_EQ(12u, result[1].size);
+ EXPECT_EQ(12u, result[1].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[2].resource_type);
+ EXPECT_EQ(2u, result[2].bind_group);
+ EXPECT_EQ(0u, result[2].binding);
+ EXPECT_EQ(12u, result[2].size);
+ EXPECT_EQ(12u, result[2].size_no_padding);
}
TEST_F(InspectorGetUniformBufferResourceBindingsTest, ContainingArray) {
- // Manually create uniform buffer to make sure it had a valid layout (array
- // with elem stride of 16, and that is 16-byte aligned within the struct)
- auto* foo_struct_type = Structure(
- "foo_type",
- {Member("0i32", ty.i32()),
- Member("b", ty.array(ty.u32(), 4, /*stride*/ 16), {MemberAlign(16)})});
+ // Manually create uniform buffer to make sure it had a valid layout (array
+ // with elem stride of 16, and that is 16-byte aligned within the struct)
+ auto* foo_struct_type = Structure(
+ "foo_type", {Member("0i32", ty.i32()),
+ Member("b", ty.array(ty.u32(), 4_u, /*stride*/ 16), {MemberAlign(16)})});
- AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
+ AddUniformBuffer("foo_ub", ty.Of(foo_struct_type), 0, 0);
- MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("ub_func", "foo_ub", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetUniformBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetUniformBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(80u, result[0].size);
- EXPECT_EQ(80u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kUniformBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(80u, result[0].size);
+ EXPECT_EQ(80u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, Simple_NonStruct) {
- AddStorageBuffer("foo_sb", ty.i32(), ast::Access::kReadWrite, 0, 0);
- MakePlainGlobalReferenceBodyFunction("sb_func", "foo_sb", ty.i32(), {});
+ AddStorageBuffer("foo_sb", ty.i32(), ast::Access::kReadWrite, 0, 0);
+ MakePlainGlobalReferenceBodyFunction("sb_func", "foo_sb", ty.i32(), {});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(4u, result[0].size);
- EXPECT_EQ(4u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(4u, result[0].size);
+ EXPECT_EQ(4u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, Simple_Struct) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(4u, result[0].size);
- EXPECT_EQ(4u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(4u, result[0].size);
+ EXPECT_EQ(4u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, MultipleMembers) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
- ty.i32(),
- ty.u32(),
- ty.f32(),
- });
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
+ ty.i32(),
+ ty.u32(),
+ ty.f32(),
+ });
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction(
- "sb_func", "foo_sb", {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb",
+ {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, MultipleStorageBuffers) {
- auto sb_struct_type = MakeStorageBufferTypes("sb_type", {
- ty.i32(),
- ty.u32(),
- ty.f32(),
- });
- AddStorageBuffer("sb_foo", sb_struct_type(), ast::Access::kReadWrite, 0, 0);
- AddStorageBuffer("sb_bar", sb_struct_type(), ast::Access::kReadWrite, 0, 1);
- AddStorageBuffer("sb_baz", sb_struct_type(), ast::Access::kReadWrite, 2, 0);
-
- auto AddReferenceFunc = [this](const std::string& func_name,
- const std::string& var_name) {
- MakeStructVariableReferenceBodyFunction(
- func_name, var_name, {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
- };
- AddReferenceFunc("sb_foo_func", "sb_foo");
- AddReferenceFunc("sb_bar_func", "sb_bar");
- AddReferenceFunc("sb_baz_func", "sb_baz");
-
- auto FuncCall = [&](const std::string& callee) {
- return create<ast::CallStatement>(Call(callee));
- };
-
- Func("ep_func", ast::VariableList(), ty.void_(),
- ast::StatementList{
- FuncCall("sb_foo_func"),
- FuncCall("sb_bar_func"),
- FuncCall("sb_baz_func"),
- Return(),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(3u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[1].resource_type);
- EXPECT_EQ(0u, result[1].bind_group);
- EXPECT_EQ(1u, result[1].binding);
- EXPECT_EQ(12u, result[1].size);
- EXPECT_EQ(12u, result[1].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[2].resource_type);
- EXPECT_EQ(2u, result[2].bind_group);
- EXPECT_EQ(0u, result[2].binding);
- EXPECT_EQ(12u, result[2].size);
- EXPECT_EQ(12u, result[2].size_no_padding);
+ auto sb_struct_type = MakeStorageBufferTypes("sb_type", {
+ ty.i32(),
+ ty.u32(),
+ ty.f32(),
+ });
+ AddStorageBuffer("sb_foo", sb_struct_type(), ast::Access::kReadWrite, 0, 0);
+ AddStorageBuffer("sb_bar", sb_struct_type(), ast::Access::kReadWrite, 0, 1);
+ AddStorageBuffer("sb_baz", sb_struct_type(), ast::Access::kReadWrite, 2, 0);
+
+ auto AddReferenceFunc = [this](const std::string& func_name, const std::string& var_name) {
+ MakeStructVariableReferenceBodyFunction(func_name, var_name,
+ {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
+ };
+ AddReferenceFunc("sb_foo_func", "sb_foo");
+ AddReferenceFunc("sb_bar_func", "sb_bar");
+ AddReferenceFunc("sb_baz_func", "sb_baz");
+
+ auto FuncCall = [&](const std::string& callee) {
+ return create<ast::CallStatement>(Call(callee));
+ };
+
+ Func("ep_func", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ FuncCall("sb_foo_func"),
+ FuncCall("sb_bar_func"),
+ FuncCall("sb_baz_func"),
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(3u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[1].resource_type);
+ EXPECT_EQ(0u, result[1].bind_group);
+ EXPECT_EQ(1u, result[1].binding);
+ EXPECT_EQ(12u, result[1].size);
+ EXPECT_EQ(12u, result[1].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[2].resource_type);
+ EXPECT_EQ(2u, result[2].bind_group);
+ EXPECT_EQ(0u, result[2].binding);
+ EXPECT_EQ(12u, result[2].size);
+ EXPECT_EQ(12u, result[2].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, ContainingArray) {
- auto foo_struct_type =
- MakeStorageBufferTypes("foo_type", {ty.i32(), ty.array<u32, 4>()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32(), ty.array<u32, 4>()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(20u, result[0].size);
- EXPECT_EQ(20u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(20u, result[0].size);
+ EXPECT_EQ(20u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, ContainingRuntimeArray) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
- ty.i32(),
- ty.array<u32>(),
- });
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
+ ty.i32(),
+ ty.array<u32>(),
+ });
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(8u, result[0].size);
- EXPECT_EQ(8u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(8u, result[0].size);
+ EXPECT_EQ(8u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, ContainingPadding) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.vec3<f32>()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.vec3<f32>()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb",
- {{0, ty.vec3<f32>()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.vec3<f32>()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(16u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(16u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, NonStructVec3) {
- AddStorageBuffer("foo_ub", ty.vec3<f32>(), ast::Access::kReadWrite, 0, 0);
- MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.vec3<f32>(), {});
+ AddStorageBuffer("foo_ub", ty.vec3<f32>(), ast::Access::kReadWrite, 0, 0);
+ MakePlainGlobalReferenceBodyFunction("ub_func", "foo_ub", ty.vec3<f32>(), {});
- MakeCallerBodyFunction("ep_func", {"ub_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"ub_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
}
TEST_F(InspectorGetStorageBufferResourceBindingsTest, SkipReadOnly) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ auto result = inspector.GetStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest, Simple) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
-
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
-
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(4u, result[0].size);
- EXPECT_EQ(4u, result[0].size_no_padding);
-}
-
-TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest,
- MultipleStorageBuffers) {
- auto sb_struct_type = MakeStorageBufferTypes("sb_type", {
- ty.i32(),
- ty.u32(),
- ty.f32(),
- });
- AddStorageBuffer("sb_foo", sb_struct_type(), ast::Access::kRead, 0, 0);
- AddStorageBuffer("sb_bar", sb_struct_type(), ast::Access::kRead, 0, 1);
- AddStorageBuffer("sb_baz", sb_struct_type(), ast::Access::kRead, 2, 0);
-
- auto AddReferenceFunc = [this](const std::string& func_name,
- const std::string& var_name) {
- MakeStructVariableReferenceBodyFunction(
- func_name, var_name, {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
- };
- AddReferenceFunc("sb_foo_func", "sb_foo");
- AddReferenceFunc("sb_bar_func", "sb_bar");
- AddReferenceFunc("sb_baz_func", "sb_baz");
-
- auto FuncCall = [&](const std::string& callee) {
- return create<ast::CallStatement>(Call(callee));
- };
-
- Func("ep_func", ast::VariableList(), ty.void_(),
- ast::StatementList{
- FuncCall("sb_foo_func"),
- FuncCall("sb_bar_func"),
- FuncCall("sb_baz_func"),
- Return(),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(3u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(12u, result[0].size);
- EXPECT_EQ(12u, result[0].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[1].resource_type);
- EXPECT_EQ(0u, result[1].bind_group);
- EXPECT_EQ(1u, result[1].binding);
- EXPECT_EQ(12u, result[1].size);
- EXPECT_EQ(12u, result[1].size_no_padding);
-
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[2].resource_type);
- EXPECT_EQ(2u, result[2].bind_group);
- EXPECT_EQ(0u, result[2].binding);
- EXPECT_EQ(12u, result[2].size);
- EXPECT_EQ(12u, result[2].size_no_padding);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
+
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(4u, result[0].size);
+ EXPECT_EQ(4u, result[0].size_no_padding);
+}
+
+TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest, MultipleStorageBuffers) {
+ auto sb_struct_type = MakeStorageBufferTypes("sb_type", {
+ ty.i32(),
+ ty.u32(),
+ ty.f32(),
+ });
+ AddStorageBuffer("sb_foo", sb_struct_type(), ast::Access::kRead, 0, 0);
+ AddStorageBuffer("sb_bar", sb_struct_type(), ast::Access::kRead, 0, 1);
+ AddStorageBuffer("sb_baz", sb_struct_type(), ast::Access::kRead, 2, 0);
+
+ auto AddReferenceFunc = [this](const std::string& func_name, const std::string& var_name) {
+ MakeStructVariableReferenceBodyFunction(func_name, var_name,
+ {{0, ty.i32()}, {1, ty.u32()}, {2, ty.f32()}});
+ };
+ AddReferenceFunc("sb_foo_func", "sb_foo");
+ AddReferenceFunc("sb_bar_func", "sb_bar");
+ AddReferenceFunc("sb_baz_func", "sb_baz");
+
+ auto FuncCall = [&](const std::string& callee) {
+ return create<ast::CallStatement>(Call(callee));
+ };
+
+ Func("ep_func", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ FuncCall("sb_foo_func"),
+ FuncCall("sb_bar_func"),
+ FuncCall("sb_baz_func"),
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(3u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(12u, result[0].size);
+ EXPECT_EQ(12u, result[0].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[1].resource_type);
+ EXPECT_EQ(0u, result[1].bind_group);
+ EXPECT_EQ(1u, result[1].binding);
+ EXPECT_EQ(12u, result[1].size);
+ EXPECT_EQ(12u, result[1].size_no_padding);
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[2].resource_type);
+ EXPECT_EQ(2u, result[2].bind_group);
+ EXPECT_EQ(0u, result[2].binding);
+ EXPECT_EQ(12u, result[2].size);
+ EXPECT_EQ(12u, result[2].size_no_padding);
}
TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest, ContainingArray) {
- auto foo_struct_type =
- MakeStorageBufferTypes("foo_type", {
- ty.i32(),
- ty.array<u32, 4>(),
- });
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
+ ty.i32(),
+ ty.array<u32, 4>(),
+ });
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(20u, result[0].size);
- EXPECT_EQ(20u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(20u, result[0].size);
+ EXPECT_EQ(20u, result[0].size_no_padding);
}
-TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest,
- ContainingRuntimeArray) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
- ty.i32(),
- ty.array<u32>(),
- });
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
+TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest, ContainingRuntimeArray) {
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {
+ ty.i32(),
+ ty.array<u32>(),
+ });
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kRead, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(8u, result[0].size);
- EXPECT_EQ(8u, result[0].size_no_padding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kReadOnlyStorageBuffer, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(8u, result[0].size);
+ EXPECT_EQ(8u, result[0].size_no_padding);
}
TEST_F(InspectorGetReadOnlyStorageBufferResourceBindingsTest, SkipNonReadOnly) {
- auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
- AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
+ auto foo_struct_type = MakeStorageBufferTypes("foo_type", {ty.i32()});
+ AddStorageBuffer("foo_sb", foo_struct_type(), ast::Access::kReadWrite, 0, 0);
- MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
+ MakeStructVariableReferenceBodyFunction("sb_func", "foo_sb", {{0, ty.i32()}});
- MakeCallerBodyFunction("ep_func", {"sb_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"sb_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ auto result = inspector.GetReadOnlyStorageBufferResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetSamplerResourceBindingsTest, Simple) {
- auto* sampled_texture_type =
- ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.f32());
+ auto* sampled_texture_type = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.f32());
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(1u, result[0].binding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(1u, result[0].binding);
}
TEST_F(InspectorGetSamplerResourceBindingsTest, NoSampler) {
- MakeEmptyBodyFunction("ep_func", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("ep_func", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetSamplerResourceBindingsTest, InFunction) {
- auto* sampled_texture_type =
- ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.f32());
+ auto* sampled_texture_type = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.f32());
- MakeSamplerReferenceBodyFunction("foo_func", "foo_texture", "foo_sampler",
- "foo_coords", ty.f32(), {});
+ MakeSamplerReferenceBodyFunction("foo_func", "foo_texture", "foo_sampler", "foo_coords",
+ ty.f32(), {});
- MakeCallerBodyFunction("ep_func", {"foo_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"foo_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(1u, result[0].binding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampler, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(1u, result[0].binding);
}
TEST_F(InspectorGetSamplerResourceBindingsTest, UnknownEntryPoint) {
- auto* sampled_texture_type =
- ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.f32());
+ auto* sampled_texture_type = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.f32());
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("foo");
- ASSERT_TRUE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("foo");
+ ASSERT_TRUE(inspector.has_error()) << inspector.error();
}
TEST_F(InspectorGetSamplerResourceBindingsTest, SkipsComparisonSamplers) {
- auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
- AddResource("foo_texture", depth_texture_type, 0, 0);
- AddComparisonSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.vec2<f32>());
- AddGlobalVariable("foo_depth", ty.f32());
+ auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
+ AddResource("foo_texture", depth_texture_type, 0, 0);
+ AddComparisonSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.vec2<f32>());
+ AddGlobalVariable("foo_depth", ty.f32());
- MakeComparisonSamplerReferenceBodyFunction(
- "ep", "foo_texture", "foo_sampler", "foo_coords", "foo_depth", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeComparisonSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ "foo_depth", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetComparisonSamplerResourceBindingsTest, Simple) {
- auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
- AddResource("foo_texture", depth_texture_type, 0, 0);
- AddComparisonSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.vec2<f32>());
- AddGlobalVariable("foo_depth", ty.f32());
+ auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
+ AddResource("foo_texture", depth_texture_type, 0, 0);
+ AddComparisonSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.vec2<f32>());
+ AddGlobalVariable("foo_depth", ty.f32());
- MakeComparisonSamplerReferenceBodyFunction(
- "ep", "foo_texture", "foo_sampler", "foo_coords", "foo_depth", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeComparisonSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ "foo_depth", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetComparisonSamplerResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetComparisonSamplerResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler,
- result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(1u, result[0].binding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(1u, result[0].binding);
}
TEST_F(InspectorGetComparisonSamplerResourceBindingsTest, NoSampler) {
- MakeEmptyBodyFunction("ep_func", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("ep_func", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetComparisonSamplerResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetComparisonSamplerResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetComparisonSamplerResourceBindingsTest, InFunction) {
- auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
- AddResource("foo_texture", depth_texture_type, 0, 0);
- AddComparisonSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.vec2<f32>());
- AddGlobalVariable("foo_depth", ty.f32());
+ auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
+ AddResource("foo_texture", depth_texture_type, 0, 0);
+ AddComparisonSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.vec2<f32>());
+ AddGlobalVariable("foo_depth", ty.f32());
- MakeComparisonSamplerReferenceBodyFunction("foo_func", "foo_texture",
- "foo_sampler", "foo_coords",
- "foo_depth", ty.f32(), {});
+ MakeComparisonSamplerReferenceBodyFunction("foo_func", "foo_texture", "foo_sampler",
+ "foo_coords", "foo_depth", ty.f32(), {});
- MakeCallerBodyFunction("ep_func", {"foo_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeCallerBodyFunction("ep_func", {"foo_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetComparisonSamplerResourceBindings("ep_func");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetComparisonSamplerResourceBindings("ep_func");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler,
- result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(1u, result[0].binding);
+ EXPECT_EQ(ResourceBinding::ResourceType::kComparisonSampler, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(1u, result[0].binding);
}
TEST_F(InspectorGetComparisonSamplerResourceBindingsTest, UnknownEntryPoint) {
- auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
- AddResource("foo_texture", depth_texture_type, 0, 0);
- AddComparisonSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.vec2<f32>());
- AddGlobalVariable("foo_depth", ty.f32());
+ auto* depth_texture_type = ty.depth_texture(ast::TextureDimension::k2d);
+ AddResource("foo_texture", depth_texture_type, 0, 0);
+ AddComparisonSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.vec2<f32>());
+ AddGlobalVariable("foo_depth", ty.f32());
- MakeComparisonSamplerReferenceBodyFunction(
- "ep", "foo_texture", "foo_sampler", "foo_coords", "foo_depth", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeComparisonSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ "foo_depth", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSamplerResourceBindings("foo");
- ASSERT_TRUE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSamplerResourceBindings("foo");
+ ASSERT_TRUE(inspector.has_error()) << inspector.error();
}
TEST_F(InspectorGetComparisonSamplerResourceBindingsTest, SkipsSamplers) {
- auto* sampled_texture_type =
- ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- AddGlobalVariable("foo_coords", ty.f32());
+ auto* sampled_texture_type = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ AddGlobalVariable("foo_coords", ty.f32());
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords", ty.f32(),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords", ty.f32(),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetComparisonSamplerResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetComparisonSamplerResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetSampledTextureResourceBindingsTest, Empty) {
- MakeEmptyBodyFunction("foo", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("foo", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSampledTextureResourceBindings("foo");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSampledTextureResourceBindings("foo");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(0u, result.size());
+ EXPECT_EQ(0u, result.size());
}
TEST_P(InspectorGetSampledTextureResourceBindingsTestWithParam, textureSample) {
- auto* sampled_texture_type = ty.sampled_texture(
- GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
- AddGlobalVariable("foo_coords", coord_type);
-
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords",
- GetBaseType(GetParam().sampled_kind),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetSampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture,
- result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
- EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
-
- // Prove that sampled and multi-sampled bindings are accounted
- // for separately.
- auto multisampled_result =
- inspector.GetMultisampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_TRUE(multisampled_result.empty());
+ auto* sampled_texture_type =
+ ty.sampled_texture(GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
+ AddGlobalVariable("foo_coords", coord_type);
+
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ GetBaseType(GetParam().sampled_kind),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetSampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
+ EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
+
+ // Prove that sampled and multi-sampled bindings are accounted
+ // for separately.
+ auto multisampled_result = inspector.GetMultisampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_TRUE(multisampled_result.empty());
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetSampledTextureResourceBindingsTest,
InspectorGetSampledTextureResourceBindingsTestWithParam,
- testing::Values(
- GetSampledTextureTestParams{
- ast::TextureDimension::k1d,
- inspector::ResourceBinding::TextureDimension::k1d,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetSampledTextureTestParams{
- ast::TextureDimension::k2d,
- inspector::ResourceBinding::TextureDimension::k2d,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetSampledTextureTestParams{
- ast::TextureDimension::k3d,
- inspector::ResourceBinding::TextureDimension::k3d,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetSampledTextureTestParams{
- ast::TextureDimension::kCube,
- inspector::ResourceBinding::TextureDimension::kCube,
- inspector::ResourceBinding::SampledKind::kFloat}));
-
-TEST_P(InspectorGetSampledArrayTextureResourceBindingsTestWithParam,
- textureSample) {
- auto* sampled_texture_type = ty.sampled_texture(
- GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
- AddResource("foo_texture", sampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
- AddGlobalVariable("foo_coords", coord_type);
- AddGlobalVariable("foo_array_index", ty.i32());
-
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords", "foo_array_index",
- GetBaseType(GetParam().sampled_kind),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetSampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
- EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
+ testing::Values(GetSampledTextureTestParams{ast::TextureDimension::k1d,
+ inspector::ResourceBinding::TextureDimension::k1d,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetSampledTextureTestParams{ast::TextureDimension::k2d,
+ inspector::ResourceBinding::TextureDimension::k2d,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetSampledTextureTestParams{ast::TextureDimension::k3d,
+ inspector::ResourceBinding::TextureDimension::k3d,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetSampledTextureTestParams{ast::TextureDimension::kCube,
+ inspector::ResourceBinding::TextureDimension::kCube,
+ inspector::ResourceBinding::SampledKind::kFloat}));
+
+TEST_P(InspectorGetSampledArrayTextureResourceBindingsTestWithParam, textureSample) {
+ auto* sampled_texture_type =
+ ty.sampled_texture(GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
+ AddResource("foo_texture", sampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
+ AddGlobalVariable("foo_coords", coord_type);
+ AddGlobalVariable("foo_array_index", ty.i32());
+
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ "foo_array_index", GetBaseType(GetParam().sampled_kind),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetSampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kSampledTexture, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
+ EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetSampledArrayTextureResourceBindingsTest,
InspectorGetSampledArrayTextureResourceBindingsTestWithParam,
testing::Values(
- GetSampledTextureTestParams{
- ast::TextureDimension::k2dArray,
- inspector::ResourceBinding::TextureDimension::k2dArray,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetSampledTextureTestParams{
- ast::TextureDimension::kCubeArray,
- inspector::ResourceBinding::TextureDimension::kCubeArray,
- inspector::ResourceBinding::SampledKind::kFloat}));
-
-TEST_P(InspectorGetMultisampledTextureResourceBindingsTestWithParam,
- textureLoad) {
- auto* multisampled_texture_type = ty.multisampled_texture(
- GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
- AddResource("foo_texture", multisampled_texture_type, 0, 0);
- auto* coord_type = GetCoordsType(GetParam().type_dim, ty.i32());
- AddGlobalVariable("foo_coords", coord_type);
- AddGlobalVariable("foo_sample_index", ty.i32());
-
- Func("ep", ast::VariableList(), ty.void_(),
- ast::StatementList{
- CallStmt(Call("textureLoad", "foo_texture", "foo_coords",
- "foo_sample_index")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetMultisampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kMultisampledTexture,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
- EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
-
- // Prove that sampled and multi-sampled bindings are accounted
- // for separately.
- auto single_sampled_result =
- inspector.GetSampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_TRUE(single_sampled_result.empty());
+ GetSampledTextureTestParams{ast::TextureDimension::k2dArray,
+ inspector::ResourceBinding::TextureDimension::k2dArray,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetSampledTextureTestParams{ast::TextureDimension::kCubeArray,
+ inspector::ResourceBinding::TextureDimension::kCubeArray,
+ inspector::ResourceBinding::SampledKind::kFloat}));
+
+TEST_P(InspectorGetMultisampledTextureResourceBindingsTestWithParam, textureLoad) {
+ auto* multisampled_texture_type =
+ ty.multisampled_texture(GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
+ AddResource("foo_texture", multisampled_texture_type, 0, 0);
+ auto* coord_type = GetCoordsType(GetParam().type_dim, ty.i32());
+ AddGlobalVariable("foo_coords", coord_type);
+ AddGlobalVariable("foo_sample_index", ty.i32());
+
+ Func("ep", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("textureLoad", "foo_texture", "foo_coords", "foo_sample_index")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetMultisampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(ResourceBinding::ResourceType::kMultisampledTexture, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
+ EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
+
+ // Prove that sampled and multi-sampled bindings are accounted
+ // for separately.
+ auto single_sampled_result = inspector.GetSampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_TRUE(single_sampled_result.empty());
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetMultisampledTextureResourceBindingsTest,
InspectorGetMultisampledTextureResourceBindingsTestWithParam,
testing::Values(
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2d,
- inspector::ResourceBinding::TextureDimension::k2d,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2d,
- inspector::ResourceBinding::TextureDimension::k2d,
- inspector::ResourceBinding::SampledKind::kSInt},
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2d,
- inspector::ResourceBinding::TextureDimension::k2d,
- inspector::ResourceBinding::SampledKind::kUInt}));
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2d,
+ inspector::ResourceBinding::TextureDimension::k2d,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2d,
+ inspector::ResourceBinding::TextureDimension::k2d,
+ inspector::ResourceBinding::SampledKind::kSInt},
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2d,
+ inspector::ResourceBinding::TextureDimension::k2d,
+ inspector::ResourceBinding::SampledKind::kUInt}));
TEST_F(InspectorGetMultisampledArrayTextureResourceBindingsTest, Empty) {
- MakeEmptyBodyFunction("foo", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("foo", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetSampledTextureResourceBindings("foo");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result = inspector.GetSampledTextureResourceBindings("foo");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(0u, result.size());
+ EXPECT_EQ(0u, result.size());
}
-TEST_P(InspectorGetMultisampledArrayTextureResourceBindingsTestWithParam,
- DISABLED_textureSample) {
- auto* multisampled_texture_type = ty.multisampled_texture(
- GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
- AddResource("foo_texture", multisampled_texture_type, 0, 0);
- AddSampler("foo_sampler", 0, 1);
- auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
- AddGlobalVariable("foo_coords", coord_type);
- AddGlobalVariable("foo_array_index", ty.i32());
+TEST_P(InspectorGetMultisampledArrayTextureResourceBindingsTestWithParam, DISABLED_textureSample) {
+ auto* multisampled_texture_type =
+ ty.multisampled_texture(GetParam().type_dim, GetBaseType(GetParam().sampled_kind));
+ AddResource("foo_texture", multisampled_texture_type, 0, 0);
+ AddSampler("foo_sampler", 0, 1);
+ auto* coord_type = GetCoordsType(GetParam().type_dim, ty.f32());
+ AddGlobalVariable("foo_coords", coord_type);
+ AddGlobalVariable("foo_array_index", ty.i32());
- MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler",
- "foo_coords", "foo_array_index",
- GetBaseType(GetParam().sampled_kind),
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeSamplerReferenceBodyFunction("ep", "foo_texture", "foo_sampler", "foo_coords",
+ "foo_array_index", GetBaseType(GetParam().sampled_kind),
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetMultisampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ auto result = inspector.GetMultisampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(ResourceBinding::ResourceType::kMultisampledTexture,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
- EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
+ EXPECT_EQ(ResourceBinding::ResourceType::kMultisampledTexture, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
+ EXPECT_EQ(GetParam().sampled_kind, result[0].sampled_kind);
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetMultisampledArrayTextureResourceBindingsTest,
InspectorGetMultisampledArrayTextureResourceBindingsTestWithParam,
testing::Values(
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2dArray,
- inspector::ResourceBinding::TextureDimension::k2dArray,
- inspector::ResourceBinding::SampledKind::kFloat},
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2dArray,
- inspector::ResourceBinding::TextureDimension::k2dArray,
- inspector::ResourceBinding::SampledKind::kSInt},
- GetMultisampledTextureTestParams{
- ast::TextureDimension::k2dArray,
- inspector::ResourceBinding::TextureDimension::k2dArray,
- inspector::ResourceBinding::SampledKind::kUInt}));
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2dArray,
+ inspector::ResourceBinding::TextureDimension::k2dArray,
+ inspector::ResourceBinding::SampledKind::kFloat},
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2dArray,
+ inspector::ResourceBinding::TextureDimension::k2dArray,
+ inspector::ResourceBinding::SampledKind::kSInt},
+ GetMultisampledTextureTestParams{ast::TextureDimension::k2dArray,
+ inspector::ResourceBinding::TextureDimension::k2dArray,
+ inspector::ResourceBinding::SampledKind::kUInt}));
TEST_F(InspectorGetStorageTextureResourceBindingsTest, Empty) {
- MakeEmptyBodyFunction("ep", ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ MakeEmptyBodyFunction("ep", ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetWriteOnlyStorageTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(0u, result.size());
+ auto result = inspector.GetWriteOnlyStorageTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ EXPECT_EQ(0u, result.size());
}
TEST_P(InspectorGetStorageTextureResourceBindingsTestWithParam, Simple) {
- DimensionParams dim_params;
- TexelFormatParams format_params;
- std::tie(dim_params, format_params) = GetParam();
-
- ast::TextureDimension dim;
- ResourceBinding::TextureDimension expected_dim;
- std::tie(dim, expected_dim) = dim_params;
-
- ast::TexelFormat format;
- ResourceBinding::TexelFormat expected_format;
- ResourceBinding::SampledKind expected_kind;
- std::tie(format, expected_format, expected_kind) = format_params;
-
- auto* st_type = MakeStorageTextureTypes(dim, format);
- AddStorageTexture("st_var", st_type, 0, 0);
-
- const ast::Type* dim_type = nullptr;
- switch (dim) {
- case ast::TextureDimension::k1d:
- dim_type = ty.i32();
- break;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- dim_type = ty.vec2<i32>();
- break;
- case ast::TextureDimension::k3d:
- dim_type = ty.vec3<i32>();
- break;
- default:
- break;
- }
-
- ASSERT_FALSE(dim_type == nullptr);
-
- MakeStorageTextureBodyFunction(
- "ep", "st_var", dim_type,
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetWriteOnlyStorageTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
-
- EXPECT_EQ(ResourceBinding::ResourceType::kWriteOnlyStorageTexture,
- result[0].resource_type);
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(expected_dim, result[0].dim);
- EXPECT_EQ(expected_format, result[0].image_format);
- EXPECT_EQ(expected_kind, result[0].sampled_kind);
+ DimensionParams dim_params;
+ TexelFormatParams format_params;
+ std::tie(dim_params, format_params) = GetParam();
+
+ ast::TextureDimension dim;
+ ResourceBinding::TextureDimension expected_dim;
+ std::tie(dim, expected_dim) = dim_params;
+
+ ast::TexelFormat format;
+ ResourceBinding::TexelFormat expected_format;
+ ResourceBinding::SampledKind expected_kind;
+ std::tie(format, expected_format, expected_kind) = format_params;
+
+ auto* st_type = MakeStorageTextureTypes(dim, format);
+ AddStorageTexture("st_var", st_type, 0, 0);
+
+ const ast::Type* dim_type = nullptr;
+ switch (dim) {
+ case ast::TextureDimension::k1d:
+ dim_type = ty.i32();
+ break;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ dim_type = ty.vec2<i32>();
+ break;
+ case ast::TextureDimension::k3d:
+ dim_type = ty.vec3<i32>();
+ break;
+ default:
+ break;
+ }
+
+ ASSERT_FALSE(dim_type == nullptr);
+
+ MakeStorageTextureBodyFunction("ep", "st_var", dim_type,
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetWriteOnlyStorageTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ ASSERT_EQ(1u, result.size());
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kWriteOnlyStorageTexture, result[0].resource_type);
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(expected_dim, result[0].dim);
+ EXPECT_EQ(expected_format, result[0].image_format);
+ EXPECT_EQ(expected_kind, result[0].sampled_kind);
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetStorageTextureResourceBindingsTest,
InspectorGetStorageTextureResourceBindingsTestWithParam,
- testing::Combine(
- testing::Values(
- std::make_tuple(ast::TextureDimension::k1d,
- ResourceBinding::TextureDimension::k1d),
- std::make_tuple(ast::TextureDimension::k2d,
- ResourceBinding::TextureDimension::k2d),
- std::make_tuple(ast::TextureDimension::k2dArray,
- ResourceBinding::TextureDimension::k2dArray),
- std::make_tuple(ast::TextureDimension::k3d,
- ResourceBinding::TextureDimension::k3d)),
- testing::Values(
- std::make_tuple(ast::TexelFormat::kR32Float,
- ResourceBinding::TexelFormat::kR32Float,
- ResourceBinding::SampledKind::kFloat),
- std::make_tuple(ast::TexelFormat::kR32Sint,
- ResourceBinding::TexelFormat::kR32Sint,
- ResourceBinding::SampledKind::kSInt),
- std::make_tuple(ast::TexelFormat::kR32Uint,
- ResourceBinding::TexelFormat::kR32Uint,
- ResourceBinding::SampledKind::kUInt),
- std::make_tuple(ast::TexelFormat::kRg32Float,
- ResourceBinding::TexelFormat::kRg32Float,
- ResourceBinding::SampledKind::kFloat),
- std::make_tuple(ast::TexelFormat::kRg32Sint,
- ResourceBinding::TexelFormat::kRg32Sint,
- ResourceBinding::SampledKind::kSInt),
- std::make_tuple(ast::TexelFormat::kRg32Uint,
- ResourceBinding::TexelFormat::kRg32Uint,
- ResourceBinding::SampledKind::kUInt),
- std::make_tuple(ast::TexelFormat::kRgba16Float,
- ResourceBinding::TexelFormat::kRgba16Float,
- ResourceBinding::SampledKind::kFloat),
- std::make_tuple(ast::TexelFormat::kRgba16Sint,
- ResourceBinding::TexelFormat::kRgba16Sint,
- ResourceBinding::SampledKind::kSInt),
- std::make_tuple(ast::TexelFormat::kRgba16Uint,
- ResourceBinding::TexelFormat::kRgba16Uint,
- ResourceBinding::SampledKind::kUInt),
- std::make_tuple(ast::TexelFormat::kRgba32Float,
- ResourceBinding::TexelFormat::kRgba32Float,
- ResourceBinding::SampledKind::kFloat),
- std::make_tuple(ast::TexelFormat::kRgba32Sint,
- ResourceBinding::TexelFormat::kRgba32Sint,
- ResourceBinding::SampledKind::kSInt),
- std::make_tuple(ast::TexelFormat::kRgba32Uint,
- ResourceBinding::TexelFormat::kRgba32Uint,
- ResourceBinding::SampledKind::kUInt),
- std::make_tuple(ast::TexelFormat::kRgba8Sint,
- ResourceBinding::TexelFormat::kRgba8Sint,
- ResourceBinding::SampledKind::kSInt),
- std::make_tuple(ast::TexelFormat::kRgba8Snorm,
- ResourceBinding::TexelFormat::kRgba8Snorm,
- ResourceBinding::SampledKind::kFloat),
- std::make_tuple(ast::TexelFormat::kRgba8Uint,
- ResourceBinding::TexelFormat::kRgba8Uint,
- ResourceBinding::SampledKind::kUInt),
- std::make_tuple(ast::TexelFormat::kRgba8Unorm,
- ResourceBinding::TexelFormat::kRgba8Unorm,
- ResourceBinding::SampledKind::kFloat))));
-
-TEST_P(InspectorGetDepthTextureResourceBindingsTestWithParam,
- textureDimensions) {
- auto* depth_texture_type = ty.depth_texture(GetParam().type_dim);
- AddResource("dt", depth_texture_type, 0, 0);
-
- Func("ep", ast::VariableList(), ty.void_(),
- ast::StatementList{
- CallStmt(Call("textureDimensions", "dt")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetDepthTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- EXPECT_EQ(ResourceBinding::ResourceType::kDepthTexture,
- result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
+ testing::Combine(testing::Values(std::make_tuple(ast::TextureDimension::k1d,
+ ResourceBinding::TextureDimension::k1d),
+ std::make_tuple(ast::TextureDimension::k2d,
+ ResourceBinding::TextureDimension::k2d),
+ std::make_tuple(ast::TextureDimension::k2dArray,
+ ResourceBinding::TextureDimension::k2dArray),
+ std::make_tuple(ast::TextureDimension::k3d,
+ ResourceBinding::TextureDimension::k3d)),
+ testing::Values(std::make_tuple(ast::TexelFormat::kR32Float,
+ ResourceBinding::TexelFormat::kR32Float,
+ ResourceBinding::SampledKind::kFloat),
+ std::make_tuple(ast::TexelFormat::kR32Sint,
+ ResourceBinding::TexelFormat::kR32Sint,
+ ResourceBinding::SampledKind::kSInt),
+ std::make_tuple(ast::TexelFormat::kR32Uint,
+ ResourceBinding::TexelFormat::kR32Uint,
+ ResourceBinding::SampledKind::kUInt),
+ std::make_tuple(ast::TexelFormat::kRg32Float,
+ ResourceBinding::TexelFormat::kRg32Float,
+ ResourceBinding::SampledKind::kFloat),
+ std::make_tuple(ast::TexelFormat::kRg32Sint,
+ ResourceBinding::TexelFormat::kRg32Sint,
+ ResourceBinding::SampledKind::kSInt),
+ std::make_tuple(ast::TexelFormat::kRg32Uint,
+ ResourceBinding::TexelFormat::kRg32Uint,
+ ResourceBinding::SampledKind::kUInt),
+ std::make_tuple(ast::TexelFormat::kRgba16Float,
+ ResourceBinding::TexelFormat::kRgba16Float,
+ ResourceBinding::SampledKind::kFloat),
+ std::make_tuple(ast::TexelFormat::kRgba16Sint,
+ ResourceBinding::TexelFormat::kRgba16Sint,
+ ResourceBinding::SampledKind::kSInt),
+ std::make_tuple(ast::TexelFormat::kRgba16Uint,
+ ResourceBinding::TexelFormat::kRgba16Uint,
+ ResourceBinding::SampledKind::kUInt),
+ std::make_tuple(ast::TexelFormat::kRgba32Float,
+ ResourceBinding::TexelFormat::kRgba32Float,
+ ResourceBinding::SampledKind::kFloat),
+ std::make_tuple(ast::TexelFormat::kRgba32Sint,
+ ResourceBinding::TexelFormat::kRgba32Sint,
+ ResourceBinding::SampledKind::kSInt),
+ std::make_tuple(ast::TexelFormat::kRgba32Uint,
+ ResourceBinding::TexelFormat::kRgba32Uint,
+ ResourceBinding::SampledKind::kUInt),
+ std::make_tuple(ast::TexelFormat::kRgba8Sint,
+ ResourceBinding::TexelFormat::kRgba8Sint,
+ ResourceBinding::SampledKind::kSInt),
+ std::make_tuple(ast::TexelFormat::kRgba8Snorm,
+ ResourceBinding::TexelFormat::kRgba8Snorm,
+ ResourceBinding::SampledKind::kFloat),
+ std::make_tuple(ast::TexelFormat::kRgba8Uint,
+ ResourceBinding::TexelFormat::kRgba8Uint,
+ ResourceBinding::SampledKind::kUInt),
+ std::make_tuple(ast::TexelFormat::kRgba8Unorm,
+ ResourceBinding::TexelFormat::kRgba8Unorm,
+ ResourceBinding::SampledKind::kFloat))));
+
+TEST_P(InspectorGetDepthTextureResourceBindingsTestWithParam, textureDimensions) {
+ auto* depth_texture_type = ty.depth_texture(GetParam().type_dim);
+ AddResource("dt", depth_texture_type, 0, 0);
+
+ Func("ep", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("textureDimensions", "dt")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetDepthTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kDepthTexture, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(GetParam().inspector_dim, result[0].dim);
}
INSTANTIATE_TEST_SUITE_P(
InspectorGetDepthTextureResourceBindingsTest,
InspectorGetDepthTextureResourceBindingsTestWithParam,
testing::Values(
- GetDepthTextureTestParams{
- ast::TextureDimension::k2d,
- inspector::ResourceBinding::TextureDimension::k2d},
- GetDepthTextureTestParams{
- ast::TextureDimension::k2dArray,
- inspector::ResourceBinding::TextureDimension::k2dArray},
- GetDepthTextureTestParams{
- ast::TextureDimension::kCube,
- inspector::ResourceBinding::TextureDimension::kCube},
- GetDepthTextureTestParams{
- ast::TextureDimension::kCubeArray,
- inspector::ResourceBinding::TextureDimension::kCubeArray}));
-
-TEST_F(InspectorGetDepthMultisampledTextureResourceBindingsTest,
- textureDimensions) {
- auto* depth_ms_texture_type =
- ty.depth_multisampled_texture(ast::TextureDimension::k2d);
- AddResource("tex", depth_ms_texture_type, 0, 0);
-
- Func("ep", ast::VariableList(), ty.void_(),
- ast::StatementList{
- CallStmt(Call("textureDimensions", "tex")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Inspector& inspector = Build();
-
- auto result = inspector.GetDepthMultisampledTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
-
- EXPECT_EQ(ResourceBinding::ResourceType::kDepthMultisampledTexture,
- result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
- EXPECT_EQ(ResourceBinding::TextureDimension::k2d, result[0].dim);
+ GetDepthTextureTestParams{ast::TextureDimension::k2d,
+ inspector::ResourceBinding::TextureDimension::k2d},
+ GetDepthTextureTestParams{ast::TextureDimension::k2dArray,
+ inspector::ResourceBinding::TextureDimension::k2dArray},
+ GetDepthTextureTestParams{ast::TextureDimension::kCube,
+ inspector::ResourceBinding::TextureDimension::kCube},
+ GetDepthTextureTestParams{ast::TextureDimension::kCubeArray,
+ inspector::ResourceBinding::TextureDimension::kCubeArray}));
+
+TEST_F(InspectorGetDepthMultisampledTextureResourceBindingsTest, textureDimensions) {
+ auto* depth_ms_texture_type = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
+ AddResource("tex", depth_ms_texture_type, 0, 0);
+
+ Func("ep", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("textureDimensions", "tex")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ Inspector& inspector = Build();
+
+ auto result = inspector.GetDepthMultisampledTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+
+ EXPECT_EQ(ResourceBinding::ResourceType::kDepthMultisampledTexture, result[0].resource_type);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
+ EXPECT_EQ(ResourceBinding::TextureDimension::k2d, result[0].dim);
}
TEST_F(InspectorGetExternalTextureResourceBindingsTest, Simple) {
- auto* external_texture_type = ty.external_texture();
- AddResource("et", external_texture_type, 0, 0);
+ auto* external_texture_type = ty.external_texture();
+ AddResource("et", external_texture_type, 0, 0);
- Func("ep", ast::VariableList(), ty.void_(),
- ast::StatementList{
- CallStmt(Call("textureDimensions", "et")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("ep", ast::VariableList(), ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("textureDimensions", "et")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- Inspector& inspector = Build();
+ Inspector& inspector = Build();
- auto result = inspector.GetExternalTextureResourceBindings("ep");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(ResourceBinding::ResourceType::kExternalTexture,
- result[0].resource_type);
+ auto result = inspector.GetExternalTextureResourceBindings("ep");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ EXPECT_EQ(ResourceBinding::ResourceType::kExternalTexture, result[0].resource_type);
- ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].bind_group);
- EXPECT_EQ(0u, result[0].binding);
+ ASSERT_EQ(1u, result.size());
+ EXPECT_EQ(0u, result[0].bind_group);
+ EXPECT_EQ(0u, result[0].binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, None) {
- std::string shader = R"(
-@stage(fragment)
+ std::string shader = R"(
+@fragment
fn main() {
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(0u, result.size());
+ ASSERT_EQ(0u, result.size());
}
TEST_F(InspectorGetSamplerTextureUsesTest, Simple) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, UnknownEntryPoint) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("foo");
- ASSERT_TRUE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("foo");
+ ASSERT_TRUE(inspector.has_error()) << inspector.error();
}
TEST_F(InspectorGetSamplerTextureUsesTest, MultipleCalls) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result_0 = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result_0 = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- auto result_1 = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ auto result_1 = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- EXPECT_EQ(result_0, result_1);
+ EXPECT_EQ(result_0, result_1);
}
TEST_F(InspectorGetSamplerTextureUsesTest, BothIndirect) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -2741,26 +2565,26 @@ fn doSample(t: texture_2d<f32>, s: sampler, uv: vec2<f32>) -> vec4<f32> {
return textureSample(t, s, uv);
}
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return doSample(myTexture, mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, SamplerIndirect) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -2768,26 +2592,26 @@ fn doSample(s: sampler, uv: vec2<f32>) -> vec4<f32> {
return textureSample(myTexture, s, uv);
}
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return doSample(mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, TextureIndirect) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -2795,26 +2619,26 @@ fn doSample(t: texture_2d<f32>, uv: vec2<f32>) -> vec4<f32> {
return textureSample(t, mySampler, uv);
}
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return doSample(myTexture, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, NeitherIndirect) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -2822,26 +2646,26 @@ fn doSample(uv: vec2<f32>) -> vec4<f32> {
return textureSample(myTexture, mySampler, uv);
}
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return doSample(fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
}
TEST_F(InspectorGetSamplerTextureUsesTest, Complex) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -2862,152 +2686,265 @@ fn Z(t: texture_2d<f32>, s: sampler, uv: vec2<f32>) -> vec4<f32> {
return X(t, s, uv) + Y(t, s, uv);
}
-@stage(fragment)
+@fragment
fn via_call(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return Z(myTexture, mySampler, fragUV) * fragPosition;
}
-@stage(fragment)
+@fragment
fn via_ptr(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV) + fragPosition;
}
-@stage(fragment)
+@fragment
fn direct(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV) + fragPosition;
})";
- Inspector& inspector = Initialize(shader);
+ Inspector& inspector = Initialize(shader);
- {
- auto result = inspector.GetSamplerTextureUses("via_call");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ {
+ auto result = inspector.GetSamplerTextureUses("via_call");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
- }
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ }
- {
- auto result = inspector.GetSamplerTextureUses("via_ptr");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ {
+ auto result = inspector.GetSamplerTextureUses("via_ptr");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
- }
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ }
- {
- auto result = inspector.GetSamplerTextureUses("direct");
- ASSERT_FALSE(inspector.has_error()) << inspector.error();
+ {
+ auto result = inspector.GetSamplerTextureUses("direct");
+ ASSERT_FALSE(inspector.has_error()) << inspector.error();
- ASSERT_EQ(1u, result.size());
+ ASSERT_EQ(1u, result.size());
- EXPECT_EQ(0u, result[0].sampler_binding_point.group);
- EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
- EXPECT_EQ(0u, result[0].texture_binding_point.group);
- EXPECT_EQ(2u, result[0].texture_binding_point.binding);
- }
+ EXPECT_EQ(0u, result[0].sampler_binding_point.group);
+ EXPECT_EQ(1u, result[0].sampler_binding_point.binding);
+ EXPECT_EQ(0u, result[0].texture_binding_point.group);
+ EXPECT_EQ(2u, result[0].texture_binding_point.binding);
+ }
}
TEST_F(InspectorGetWorkgroupStorageSizeTest, Empty) {
- MakeEmptyBodyFunction("ep_func",
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
- Inspector& inspector = Build();
- EXPECT_EQ(0u, inspector.GetWorkgroupStorageSize("ep_func"));
+ MakeEmptyBodyFunction(
+ "ep_func", ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ Inspector& inspector = Build();
+ EXPECT_EQ(0u, inspector.GetWorkgroupStorageSize("ep_func"));
}
TEST_F(InspectorGetWorkgroupStorageSizeTest, Simple) {
- AddWorkgroupStorage("wg_f32", ty.f32());
- MakePlainGlobalReferenceBodyFunction("f32_func", "wg_f32", ty.f32(), {});
+ AddWorkgroupStorage("wg_f32", ty.f32());
+ MakePlainGlobalReferenceBodyFunction("f32_func", "wg_f32", ty.f32(), {});
- MakeCallerBodyFunction("ep_func", {"f32_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ MakeCallerBodyFunction("ep_func", {"f32_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- Inspector& inspector = Build();
- EXPECT_EQ(4u, inspector.GetWorkgroupStorageSize("ep_func"));
+ Inspector& inspector = Build();
+ EXPECT_EQ(4u, inspector.GetWorkgroupStorageSize("ep_func"));
}
TEST_F(InspectorGetWorkgroupStorageSizeTest, CompoundTypes) {
- // This struct should occupy 68 bytes. 4 from the i32 field, and another 64
- // from the 4-element array with 16-byte stride.
- auto* wg_struct_type = MakeStructType(
- "WgStruct", {ty.i32(), ty.array(ty.i32(), 4, /*stride=*/16)});
- AddWorkgroupStorage("wg_struct_var", ty.Of(wg_struct_type));
- MakeStructVariableReferenceBodyFunction("wg_struct_func", "wg_struct_var",
- {{0, ty.i32()}});
+ // This struct should occupy 68 bytes. 4 from the i32 field, and another 64
+ // from the 4-element array with 16-byte stride.
+ auto* wg_struct_type =
+ MakeStructType("WgStruct", {ty.i32(), ty.array(ty.i32(), 4_u, /*stride=*/16)});
+ AddWorkgroupStorage("wg_struct_var", ty.Of(wg_struct_type));
+ MakeStructVariableReferenceBodyFunction("wg_struct_func", "wg_struct_var", {{0, ty.i32()}});
- // Plus another 4 bytes from this other workgroup-class f32.
- AddWorkgroupStorage("wg_f32", ty.f32());
- MakePlainGlobalReferenceBodyFunction("f32_func", "wg_f32", ty.f32(), {});
+ // Plus another 4 bytes from this other workgroup-class f32.
+ AddWorkgroupStorage("wg_f32", ty.f32());
+ MakePlainGlobalReferenceBodyFunction("f32_func", "wg_f32", ty.f32(), {});
- MakeCallerBodyFunction("ep_func", {"wg_struct_func", "f32_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ MakeCallerBodyFunction("ep_func", {"wg_struct_func", "f32_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- Inspector& inspector = Build();
- EXPECT_EQ(72u, inspector.GetWorkgroupStorageSize("ep_func"));
+ Inspector& inspector = Build();
+ EXPECT_EQ(72u, inspector.GetWorkgroupStorageSize("ep_func"));
}
TEST_F(InspectorGetWorkgroupStorageSizeTest, AlignmentPadding) {
- // vec3<f32> has an alignment of 16 but a size of 12. We leverage this to test
- // that our padded size calculation for workgroup storage is accurate.
- AddWorkgroupStorage("wg_vec3", ty.vec3<f32>());
- MakePlainGlobalReferenceBodyFunction("wg_func", "wg_vec3", ty.vec3<f32>(),
- {});
+ // vec3<f32> has an alignment of 16 but a size of 12. We leverage this to test
+ // that our padded size calculation for workgroup storage is accurate.
+ AddWorkgroupStorage("wg_vec3", ty.vec3<f32>());
+ MakePlainGlobalReferenceBodyFunction("wg_func", "wg_vec3", ty.vec3<f32>(), {});
- MakeCallerBodyFunction("ep_func", {"wg_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ MakeCallerBodyFunction("ep_func", {"wg_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- Inspector& inspector = Build();
- EXPECT_EQ(16u, inspector.GetWorkgroupStorageSize("ep_func"));
+ Inspector& inspector = Build();
+ EXPECT_EQ(16u, inspector.GetWorkgroupStorageSize("ep_func"));
}
TEST_F(InspectorGetWorkgroupStorageSizeTest, StructAlignment) {
- // Per WGSL spec, a struct's size is the offset its last member plus the size
- // of its last member, rounded up to the alignment of its largest member. So
- // here the struct is expected to occupy 1024 bytes of workgroup storage.
- const auto* wg_struct_type = MakeStructTypeFromMembers(
- "WgStruct",
- {MakeStructMember(0, ty.f32(),
- {create<ast::StructMemberAlignAttribute>(1024)})});
+ // Per WGSL spec, a struct's size is the offset its last member plus the size
+ // of its last member, rounded up to the alignment of its largest member. So
+ // here the struct is expected to occupy 1024 bytes of workgroup storage.
+ const auto* wg_struct_type = MakeStructTypeFromMembers(
+ "WgStruct",
+ {MakeStructMember(0, ty.f32(), {create<ast::StructMemberAlignAttribute>(1024)})});
+
+ AddWorkgroupStorage("wg_struct_var", ty.Of(wg_struct_type));
+ MakeStructVariableReferenceBodyFunction("wg_struct_func", "wg_struct_var", {{0, ty.f32()}});
- AddWorkgroupStorage("wg_struct_var", ty.Of(wg_struct_type));
- MakeStructVariableReferenceBodyFunction("wg_struct_func", "wg_struct_var",
- {{0, ty.f32()}});
+ MakeCallerBodyFunction("ep_func", {"wg_struct_func"},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+
+ Inspector& inspector = Build();
+ EXPECT_EQ(1024u, inspector.GetWorkgroupStorageSize("ep_func"));
+}
+
+// Test calling GetUsedExtensionNames on a empty shader.
+TEST_F(InspectorGetUsedExtensionNamesTest, Empty) {
+ std::string shader = "";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetUsedExtensionNames();
+ EXPECT_EQ(result.size(), 0u);
+}
+
+// Test calling GetUsedExtensionNames on a shader with no extension.
+TEST_F(InspectorGetUsedExtensionNamesTest, None) {
+ std::string shader = R"(
+@fragment
+fn main() {
+})";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetUsedExtensionNames();
+ EXPECT_EQ(result.size(), 0u);
+}
+
+// Test calling GetUsedExtensionNames on a shader with valid extension.
+TEST_F(InspectorGetUsedExtensionNamesTest, Simple) {
+ std::string shader = R"(
+enable f16;
+
+@fragment
+fn main() {
+})";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetUsedExtensionNames();
+ EXPECT_EQ(result.size(), 1u);
+ EXPECT_EQ(result[0], "f16");
+}
+
+// Test calling GetUsedExtensionNames on a shader with a extension enabled for
+// multiple times.
+TEST_F(InspectorGetUsedExtensionNamesTest, Duplicated) {
+ std::string shader = R"(
+enable f16;
+enable f16;
+
+@fragment
+fn main() {
+})";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetUsedExtensionNames();
+ EXPECT_EQ(result.size(), 1u);
+ EXPECT_EQ(result[0], "f16");
+}
+
+// Test calling GetEnableDirectives on a empty shader.
+TEST_F(InspectorGetEnableDirectivesTest, Empty) {
+ std::string shader = "";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetEnableDirectives();
+ EXPECT_EQ(result.size(), 0u);
+}
+
+// Test calling GetEnableDirectives on a shader with no extension.
+TEST_F(InspectorGetEnableDirectivesTest, None) {
+ std::string shader = R"(
+@fragment
+fn main() {
+})";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetEnableDirectives();
+ EXPECT_EQ(result.size(), 0u);
+}
+
+// Test calling GetEnableDirectives on a shader with valid extension.
+TEST_F(InspectorGetEnableDirectivesTest, Simple) {
+ std::string shader = R"(
+enable f16;
+
+@fragment
+fn main() {
+})";
+
+ Inspector& inspector = Initialize(shader);
+
+ auto result = inspector.GetEnableDirectives();
+ EXPECT_EQ(result.size(), 1u);
+ EXPECT_EQ(result[0].first, "f16");
+ EXPECT_EQ(result[0].second.range, (Source::Range{{2, 8}, {2, 11}}));
+}
+
+// Test calling GetEnableDirectives on a shader with a extension enabled for
+// multiple times.
+TEST_F(InspectorGetEnableDirectivesTest, Duplicated) {
+ std::string shader = R"(
+enable f16;
+
+enable f16;
+@fragment
+fn main() {
+})";
- MakeCallerBodyFunction("ep_func", {"wg_struct_func"},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Inspector& inspector = Initialize(shader);
- Inspector& inspector = Build();
- EXPECT_EQ(1024u, inspector.GetWorkgroupStorageSize("ep_func"));
+ auto result = inspector.GetEnableDirectives();
+ EXPECT_EQ(result.size(), 2u);
+ EXPECT_EQ(result[0].first, "f16");
+ EXPECT_EQ(result[0].second.range, (Source::Range{{2, 8}, {2, 11}}));
+ EXPECT_EQ(result[1].first, "f16");
+ EXPECT_EQ(result[1].second.range, (Source::Range{{4, 8}, {4, 11}}));
}
// Crash was occuring in ::GenerateSamplerTargets, when
// ::GetSamplerTextureUses was called.
TEST_F(InspectorRegressionTest, tint967) {
- std::string shader = R"(
+ std::string shader = R"(
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@@ -3015,14 +2952,14 @@ fn doSample(t: texture_2d<f32>, s: sampler, uv: vec2<f32>) -> vec4<f32> {
return textureSample(t, s, uv);
}
-@stage(fragment)
+@fragment
fn main(@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return doSample(myTexture, mySampler, fragUV) * fragPosition;
})";
- Inspector& inspector = Initialize(shader);
- auto result = inspector.GetSamplerTextureUses("main");
+ Inspector& inspector = Initialize(shader);
+ auto result = inspector.GetSamplerTextureUses("main");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/inspector/resource_binding.cc b/chromium/third_party/dawn/src/tint/inspector/resource_binding.cc
index 5095e2d905a..3efb59e57b1 100644
--- a/chromium/third_party/dawn/src/tint/inspector/resource_binding.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/resource_binding.cc
@@ -15,100 +15,99 @@
#include "src/tint/inspector/resource_binding.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/f32_type.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/matrix_type.h"
+#include "src/tint/sem/f32.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/matrix.h"
#include "src/tint/sem/type.h"
-#include "src/tint/sem/u32_type.h"
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/u32.h"
+#include "src/tint/sem/vector.h"
namespace tint::inspector {
-ResourceBinding::TextureDimension
-TypeTextureDimensionToResourceBindingTextureDimension(
+ResourceBinding::TextureDimension TypeTextureDimensionToResourceBindingTextureDimension(
const ast::TextureDimension& type_dim) {
- switch (type_dim) {
- case ast::TextureDimension::k1d:
- return ResourceBinding::TextureDimension::k1d;
- case ast::TextureDimension::k2d:
- return ResourceBinding::TextureDimension::k2d;
- case ast::TextureDimension::k2dArray:
- return ResourceBinding::TextureDimension::k2dArray;
- case ast::TextureDimension::k3d:
- return ResourceBinding::TextureDimension::k3d;
- case ast::TextureDimension::kCube:
- return ResourceBinding::TextureDimension::kCube;
- case ast::TextureDimension::kCubeArray:
- return ResourceBinding::TextureDimension::kCubeArray;
- case ast::TextureDimension::kNone:
- return ResourceBinding::TextureDimension::kNone;
- }
- return ResourceBinding::TextureDimension::kNone;
+ switch (type_dim) {
+ case ast::TextureDimension::k1d:
+ return ResourceBinding::TextureDimension::k1d;
+ case ast::TextureDimension::k2d:
+ return ResourceBinding::TextureDimension::k2d;
+ case ast::TextureDimension::k2dArray:
+ return ResourceBinding::TextureDimension::k2dArray;
+ case ast::TextureDimension::k3d:
+ return ResourceBinding::TextureDimension::k3d;
+ case ast::TextureDimension::kCube:
+ return ResourceBinding::TextureDimension::kCube;
+ case ast::TextureDimension::kCubeArray:
+ return ResourceBinding::TextureDimension::kCubeArray;
+ case ast::TextureDimension::kNone:
+ return ResourceBinding::TextureDimension::kNone;
+ }
+ return ResourceBinding::TextureDimension::kNone;
}
ResourceBinding::SampledKind BaseTypeToSampledKind(const sem::Type* base_type) {
- if (!base_type) {
- return ResourceBinding::SampledKind::kUnknown;
- }
+ if (!base_type) {
+ return ResourceBinding::SampledKind::kUnknown;
+ }
- if (auto* at = base_type->As<sem::Array>()) {
- base_type = at->ElemType();
- } else if (auto* mt = base_type->As<sem::Matrix>()) {
- base_type = mt->type();
- } else if (auto* vt = base_type->As<sem::Vector>()) {
- base_type = vt->type();
- }
+ if (auto* at = base_type->As<sem::Array>()) {
+ base_type = at->ElemType();
+ } else if (auto* mt = base_type->As<sem::Matrix>()) {
+ base_type = mt->type();
+ } else if (auto* vt = base_type->As<sem::Vector>()) {
+ base_type = vt->type();
+ }
- if (base_type->Is<sem::F32>()) {
- return ResourceBinding::SampledKind::kFloat;
- } else if (base_type->Is<sem::U32>()) {
- return ResourceBinding::SampledKind::kUInt;
- } else if (base_type->Is<sem::I32>()) {
- return ResourceBinding::SampledKind::kSInt;
- } else {
- return ResourceBinding::SampledKind::kUnknown;
- }
+ if (base_type->Is<sem::F32>()) {
+ return ResourceBinding::SampledKind::kFloat;
+ } else if (base_type->Is<sem::U32>()) {
+ return ResourceBinding::SampledKind::kUInt;
+ } else if (base_type->Is<sem::I32>()) {
+ return ResourceBinding::SampledKind::kSInt;
+ } else {
+ return ResourceBinding::SampledKind::kUnknown;
+ }
}
ResourceBinding::TexelFormat TypeTexelFormatToResourceBindingTexelFormat(
const ast::TexelFormat& image_format) {
- switch (image_format) {
- case ast::TexelFormat::kR32Uint:
- return ResourceBinding::TexelFormat::kR32Uint;
- case ast::TexelFormat::kR32Sint:
- return ResourceBinding::TexelFormat::kR32Sint;
- case ast::TexelFormat::kR32Float:
- return ResourceBinding::TexelFormat::kR32Float;
- case ast::TexelFormat::kRgba8Unorm:
- return ResourceBinding::TexelFormat::kRgba8Unorm;
- case ast::TexelFormat::kRgba8Snorm:
- return ResourceBinding::TexelFormat::kRgba8Snorm;
- case ast::TexelFormat::kRgba8Uint:
- return ResourceBinding::TexelFormat::kRgba8Uint;
- case ast::TexelFormat::kRgba8Sint:
- return ResourceBinding::TexelFormat::kRgba8Sint;
- case ast::TexelFormat::kRg32Uint:
- return ResourceBinding::TexelFormat::kRg32Uint;
- case ast::TexelFormat::kRg32Sint:
- return ResourceBinding::TexelFormat::kRg32Sint;
- case ast::TexelFormat::kRg32Float:
- return ResourceBinding::TexelFormat::kRg32Float;
- case ast::TexelFormat::kRgba16Uint:
- return ResourceBinding::TexelFormat::kRgba16Uint;
- case ast::TexelFormat::kRgba16Sint:
- return ResourceBinding::TexelFormat::kRgba16Sint;
- case ast::TexelFormat::kRgba16Float:
- return ResourceBinding::TexelFormat::kRgba16Float;
- case ast::TexelFormat::kRgba32Uint:
- return ResourceBinding::TexelFormat::kRgba32Uint;
- case ast::TexelFormat::kRgba32Sint:
- return ResourceBinding::TexelFormat::kRgba32Sint;
- case ast::TexelFormat::kRgba32Float:
- return ResourceBinding::TexelFormat::kRgba32Float;
- case ast::TexelFormat::kNone:
- return ResourceBinding::TexelFormat::kNone;
- }
- return ResourceBinding::TexelFormat::kNone;
+ switch (image_format) {
+ case ast::TexelFormat::kR32Uint:
+ return ResourceBinding::TexelFormat::kR32Uint;
+ case ast::TexelFormat::kR32Sint:
+ return ResourceBinding::TexelFormat::kR32Sint;
+ case ast::TexelFormat::kR32Float:
+ return ResourceBinding::TexelFormat::kR32Float;
+ case ast::TexelFormat::kRgba8Unorm:
+ return ResourceBinding::TexelFormat::kRgba8Unorm;
+ case ast::TexelFormat::kRgba8Snorm:
+ return ResourceBinding::TexelFormat::kRgba8Snorm;
+ case ast::TexelFormat::kRgba8Uint:
+ return ResourceBinding::TexelFormat::kRgba8Uint;
+ case ast::TexelFormat::kRgba8Sint:
+ return ResourceBinding::TexelFormat::kRgba8Sint;
+ case ast::TexelFormat::kRg32Uint:
+ return ResourceBinding::TexelFormat::kRg32Uint;
+ case ast::TexelFormat::kRg32Sint:
+ return ResourceBinding::TexelFormat::kRg32Sint;
+ case ast::TexelFormat::kRg32Float:
+ return ResourceBinding::TexelFormat::kRg32Float;
+ case ast::TexelFormat::kRgba16Uint:
+ return ResourceBinding::TexelFormat::kRgba16Uint;
+ case ast::TexelFormat::kRgba16Sint:
+ return ResourceBinding::TexelFormat::kRgba16Sint;
+ case ast::TexelFormat::kRgba16Float:
+ return ResourceBinding::TexelFormat::kRgba16Float;
+ case ast::TexelFormat::kRgba32Uint:
+ return ResourceBinding::TexelFormat::kRgba32Uint;
+ case ast::TexelFormat::kRgba32Sint:
+ return ResourceBinding::TexelFormat::kRgba32Sint;
+ case ast::TexelFormat::kRgba32Float:
+ return ResourceBinding::TexelFormat::kRgba32Float;
+ case ast::TexelFormat::kNone:
+ return ResourceBinding::TexelFormat::kNone;
+ }
+ return ResourceBinding::TexelFormat::kNone;
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/resource_binding.h b/chromium/third_party/dawn/src/tint/inspector/resource_binding.h
index 1801b857d4a..6adab5a7fc4 100644
--- a/chromium/third_party/dawn/src/tint/inspector/resource_binding.h
+++ b/chromium/third_party/dawn/src/tint/inspector/resource_binding.h
@@ -20,94 +20,98 @@
#include "src/tint/ast/storage_texture.h"
#include "src/tint/ast/texture.h"
+// Forward declarations
+namespace tint::sem {
+class Type;
+} // namespace tint::sem
+
namespace tint::inspector {
/// Container for information about how a resource is bound
struct ResourceBinding {
- /// The dimensionality of a texture
- enum class TextureDimension {
- /// Invalid texture
- kNone = -1,
- /// 1 dimensional texture
- k1d,
- /// 2 dimensional texture
- k2d,
- /// 2 dimensional array texture
- k2dArray,
- /// 3 dimensional texture
- k3d,
- /// cube texture
- kCube,
- /// cube array texture
- kCubeArray,
- };
-
- /// Component type of the texture's data. Same as the Sampled Type parameter
- /// in SPIR-V OpTypeImage.
- enum class SampledKind { kUnknown = -1, kFloat, kUInt, kSInt };
-
- /// Enumerator of texel image formats
- enum class TexelFormat {
- kNone = -1,
-
- kRgba8Unorm,
- kRgba8Snorm,
- kRgba8Uint,
- kRgba8Sint,
- kRgba16Uint,
- kRgba16Sint,
- kRgba16Float,
- kR32Uint,
- kR32Sint,
- kR32Float,
- kRg32Uint,
- kRg32Sint,
- kRg32Float,
- kRgba32Uint,
- kRgba32Sint,
- kRgba32Float,
- };
-
- /// kXXX maps to entries returned by GetXXXResourceBindings call.
- enum class ResourceType {
- kUniformBuffer,
- kStorageBuffer,
- kReadOnlyStorageBuffer,
- kSampler,
- kComparisonSampler,
- kSampledTexture,
- kMultisampledTexture,
- kWriteOnlyStorageTexture,
- kDepthTexture,
- kDepthMultisampledTexture,
- kExternalTexture
- };
-
- /// Type of resource that is bound.
- ResourceType resource_type;
- /// Bind group the binding belongs
- uint32_t bind_group;
- /// Identifier to identify this binding within the bind group
- uint32_t binding;
- /// Size for this binding, in bytes, if defined.
- uint64_t size;
- /// Size for this binding without trailing structure padding, in bytes, if
- /// defined.
- uint64_t size_no_padding;
- /// Dimensionality of this binding, if defined.
- TextureDimension dim;
- /// Kind of data being sampled, if defined.
- SampledKind sampled_kind;
- /// Format of data, if defined.
- TexelFormat image_format;
+ /// The dimensionality of a texture
+ enum class TextureDimension {
+ /// Invalid texture
+ kNone = -1,
+ /// 1 dimensional texture
+ k1d,
+ /// 2 dimensional texture
+ k2d,
+ /// 2 dimensional array texture
+ k2dArray,
+ /// 3 dimensional texture
+ k3d,
+ /// cube texture
+ kCube,
+ /// cube array texture
+ kCubeArray,
+ };
+
+ /// Component type of the texture's data. Same as the Sampled Type parameter
+ /// in SPIR-V OpTypeImage.
+ enum class SampledKind { kUnknown = -1, kFloat, kUInt, kSInt };
+
+ /// Enumerator of texel image formats
+ enum class TexelFormat {
+ kNone = -1,
+
+ kRgba8Unorm,
+ kRgba8Snorm,
+ kRgba8Uint,
+ kRgba8Sint,
+ kRgba16Uint,
+ kRgba16Sint,
+ kRgba16Float,
+ kR32Uint,
+ kR32Sint,
+ kR32Float,
+ kRg32Uint,
+ kRg32Sint,
+ kRg32Float,
+ kRgba32Uint,
+ kRgba32Sint,
+ kRgba32Float,
+ };
+
+ /// kXXX maps to entries returned by GetXXXResourceBindings call.
+ enum class ResourceType {
+ kUniformBuffer,
+ kStorageBuffer,
+ kReadOnlyStorageBuffer,
+ kSampler,
+ kComparisonSampler,
+ kSampledTexture,
+ kMultisampledTexture,
+ kWriteOnlyStorageTexture,
+ kDepthTexture,
+ kDepthMultisampledTexture,
+ kExternalTexture
+ };
+
+ /// Type of resource that is bound.
+ ResourceType resource_type;
+ /// Bind group the binding belongs
+ uint32_t bind_group;
+ /// Identifier to identify this binding within the bind group
+ uint32_t binding;
+ /// Size for this binding, in bytes, if defined.
+ uint64_t size;
+ /// Size for this binding without trailing structure padding, in bytes, if
+ /// defined.
+ uint64_t size_no_padding;
+ /// Dimensionality of this binding, if defined.
+ TextureDimension dim;
+ /// Kind of data being sampled, if defined.
+ SampledKind sampled_kind;
+ /// Format of data, if defined.
+ TexelFormat image_format;
};
/// Convert from internal ast::TextureDimension to public
/// ResourceBinding::TextureDimension
/// @param type_dim internal value to convert from
/// @returns the publicly visible equivalent
-ResourceBinding::TextureDimension
-TypeTextureDimensionToResourceBindingTextureDimension(
+ResourceBinding::TextureDimension TypeTextureDimensionToResourceBindingTextureDimension(
const ast::TextureDimension& type_dim);
/// Infer ResourceBinding::SampledKind for a given sem::Type
diff --git a/chromium/third_party/dawn/src/tint/inspector/scalar.cc b/chromium/third_party/dawn/src/tint/inspector/scalar.cc
index a08ce76b947..b0b139f02bd 100644
--- a/chromium/third_party/dawn/src/tint/inspector/scalar.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/scalar.cc
@@ -19,55 +19,55 @@ namespace tint::inspector {
Scalar::Scalar() : type_(kNull) {}
Scalar::Scalar(bool val) : type_(kBool) {
- value_.b = val;
+ value_.b = val;
}
Scalar::Scalar(uint32_t val) : type_(kU32) {
- value_.u = val;
+ value_.u = val;
}
Scalar::Scalar(int32_t val) : type_(kI32) {
- value_.i = val;
+ value_.i = val;
}
Scalar::Scalar(float val) : type_(kFloat) {
- value_.f = val;
+ value_.f = val;
}
bool Scalar::IsNull() const {
- return type_ == kNull;
+ return type_ == kNull;
}
bool Scalar::IsBool() const {
- return type_ == kBool;
+ return type_ == kBool;
}
bool Scalar::IsU32() const {
- return type_ == kU32;
+ return type_ == kU32;
}
bool Scalar::IsI32() const {
- return type_ == kI32;
+ return type_ == kI32;
}
bool Scalar::IsFloat() const {
- return type_ == kFloat;
+ return type_ == kFloat;
}
bool Scalar::AsBool() const {
- return value_.b;
+ return value_.b;
}
uint32_t Scalar::AsU32() const {
- return value_.u;
+ return value_.u;
}
int32_t Scalar::AsI32() const {
- return value_.i;
+ return value_.i;
}
float Scalar::AsFloat() const {
- return value_.f;
+ return value_.f;
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/scalar.h b/chromium/third_party/dawn/src/tint/inspector/scalar.h
index 4c328eedb16..1470b30abad 100644
--- a/chromium/third_party/dawn/src/tint/inspector/scalar.h
+++ b/chromium/third_party/dawn/src/tint/inspector/scalar.h
@@ -21,56 +21,56 @@ namespace tint::inspector {
/// Contains a literal scalar value
class Scalar {
- public:
- /// Null Constructor
- Scalar();
- /// @param val literal scalar value to contain
- explicit Scalar(bool val);
- /// @param val literal scalar value to contain
- explicit Scalar(uint32_t val);
- /// @param val literal scalar value to contain
- explicit Scalar(int32_t val);
- /// @param val literal scalar value to contain
- explicit Scalar(float val);
+ public:
+ /// Null Constructor
+ Scalar();
+ /// @param val literal scalar value to contain
+ explicit Scalar(bool val);
+ /// @param val literal scalar value to contain
+ explicit Scalar(uint32_t val);
+ /// @param val literal scalar value to contain
+ explicit Scalar(int32_t val);
+ /// @param val literal scalar value to contain
+ explicit Scalar(float val);
- /// @returns true if this is a null
- bool IsNull() const;
- /// @returns true if this is a bool
- bool IsBool() const;
- /// @returns true if this is a unsigned integer.
- bool IsU32() const;
- /// @returns true if this is a signed integer.
- bool IsI32() const;
- /// @returns true if this is a float.
- bool IsFloat() const;
+ /// @returns true if this is a null
+ bool IsNull() const;
+ /// @returns true if this is a bool
+ bool IsBool() const;
+ /// @returns true if this is a unsigned integer.
+ bool IsU32() const;
+ /// @returns true if this is a signed integer.
+ bool IsI32() const;
+ /// @returns true if this is a float.
+ bool IsFloat() const;
- /// @returns scalar value if bool, otherwise undefined behaviour.
- bool AsBool() const;
- /// @returns scalar value if unsigned integer, otherwise undefined behaviour.
- uint32_t AsU32() const;
- /// @returns scalar value if signed integer, otherwise undefined behaviour.
- int32_t AsI32() const;
- /// @returns scalar value if float, otherwise undefined behaviour.
- float AsFloat() const;
+ /// @returns scalar value if bool, otherwise undefined behaviour.
+ bool AsBool() const;
+ /// @returns scalar value if unsigned integer, otherwise undefined behaviour.
+ uint32_t AsU32() const;
+ /// @returns scalar value if signed integer, otherwise undefined behaviour.
+ int32_t AsI32() const;
+ /// @returns scalar value if float, otherwise undefined behaviour.
+ float AsFloat() const;
- private:
- typedef enum {
- kNull,
- kBool,
- kU32,
- kI32,
- kFloat,
- } Type;
+ private:
+ typedef enum {
+ kNull,
+ kBool,
+ kU32,
+ kI32,
+ kFloat,
+ } Type;
- typedef union {
- bool b;
- uint32_t u;
- int32_t i;
- float f;
- } Value;
+ typedef union {
+ bool b;
+ uint32_t u;
+ int32_t i;
+ float f;
+ } Value;
- Type type_;
- Value value_;
+ Type type_;
+ Value value_;
};
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.cc b/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.cc
index 978cc8ab1c6..074f29c8581 100644
--- a/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.cc
@@ -27,37 +27,34 @@ namespace tint::inspector {
InspectorBuilder::InspectorBuilder() = default;
InspectorBuilder::~InspectorBuilder() = default;
-void InspectorBuilder::MakeEmptyBodyFunction(std::string name,
- ast::AttributeList attributes) {
- Func(name, ast::VariableList(), ty.void_(), ast::StatementList{Return()},
- attributes);
+void InspectorBuilder::MakeEmptyBodyFunction(std::string name, ast::AttributeList attributes) {
+ Func(name, ast::VariableList(), ty.void_(), ast::StatementList{Return()}, attributes);
}
void InspectorBuilder::MakeCallerBodyFunction(std::string caller,
std::vector<std::string> callees,
ast::AttributeList attributes) {
- ast::StatementList body;
- body.reserve(callees.size() + 1);
- for (auto callee : callees) {
- body.push_back(CallStmt(Call(callee)));
- }
- body.push_back(Return());
-
- Func(caller, ast::VariableList(), ty.void_(), body, attributes);
+ ast::StatementList body;
+ body.reserve(callees.size() + 1);
+ for (auto callee : callees) {
+ body.push_back(CallStmt(Call(callee)));
+ }
+ body.push_back(Return());
+
+ Func(caller, ast::VariableList(), ty.void_(), body, attributes);
}
const ast::Struct* InspectorBuilder::MakeInOutStruct(
std::string name,
std::vector<std::tuple<std::string, uint32_t>> inout_vars) {
- ast::StructMemberList members;
- for (auto var : inout_vars) {
- std::string member_name;
- uint32_t location;
- std::tie(member_name, location) = var;
- members.push_back(
- Member(member_name, ty.u32(), {Location(location), Flat()}));
- }
- return Structure(name, members);
+ ast::StructMemberList members;
+ for (auto var : inout_vars) {
+ std::string member_name;
+ uint32_t location;
+ std::tie(member_name, location) = var;
+ members.push_back(Member(member_name, ty.u32(), {Location(location), Flat()}));
+ }
+ return Structure(name, members);
}
const ast::Function* InspectorBuilder::MakePlainGlobalReferenceBodyFunction(
@@ -65,79 +62,74 @@ const ast::Function* InspectorBuilder::MakePlainGlobalReferenceBodyFunction(
std::string var,
const ast::Type* type,
ast::AttributeList attributes) {
- ast::StatementList stmts;
- stmts.emplace_back(Decl(Var("local_" + var, type)));
- stmts.emplace_back(Assign("local_" + var, var));
- stmts.emplace_back(Return());
+ ast::StatementList stmts;
+ stmts.emplace_back(Decl(Var("local_" + var, type)));
+ stmts.emplace_back(Assign("local_" + var, var));
+ stmts.emplace_back(Return());
- return Func(func, ast::VariableList(), ty.void_(), stmts, attributes);
+ return Func(func, ast::VariableList(), ty.void_(), stmts, attributes);
}
bool InspectorBuilder::ContainsName(const std::vector<StageVariable>& vec,
const std::string& name) {
- for (auto& s : vec) {
- if (s.name == name) {
- return true;
+ for (auto& s : vec) {
+ if (s.name == name) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
-std::string InspectorBuilder::StructMemberName(size_t idx,
- const ast::Type* type) {
- return std::to_string(idx) + type->FriendlyName(Symbols());
+std::string InspectorBuilder::StructMemberName(size_t idx, const ast::Type* type) {
+ return std::to_string(idx) + type->FriendlyName(Symbols());
}
-const ast::Struct* InspectorBuilder::MakeStructType(
- const std::string& name,
- std::vector<const ast::Type*> member_types) {
- ast::StructMemberList members;
- for (auto* type : member_types) {
- members.push_back(MakeStructMember(members.size(), type, {}));
- }
- return MakeStructTypeFromMembers(name, std::move(members));
+const ast::Struct* InspectorBuilder::MakeStructType(const std::string& name,
+ std::vector<const ast::Type*> member_types) {
+ ast::StructMemberList members;
+ for (auto* type : member_types) {
+ members.push_back(MakeStructMember(members.size(), type, {}));
+ }
+ return MakeStructTypeFromMembers(name, std::move(members));
}
-const ast::Struct* InspectorBuilder::MakeStructTypeFromMembers(
- const std::string& name,
- ast::StructMemberList members) {
- return Structure(name, std::move(members));
+const ast::Struct* InspectorBuilder::MakeStructTypeFromMembers(const std::string& name,
+ ast::StructMemberList members) {
+ return Structure(name, std::move(members));
}
-const ast::StructMember* InspectorBuilder::MakeStructMember(
- size_t index,
- const ast::Type* type,
- ast::AttributeList attributes) {
- return Member(StructMemberName(index, type), type, std::move(attributes));
+const ast::StructMember* InspectorBuilder::MakeStructMember(size_t index,
+ const ast::Type* type,
+ ast::AttributeList attributes) {
+ return Member(StructMemberName(index, type), type, std::move(attributes));
}
const ast::Struct* InspectorBuilder::MakeUniformBufferType(
const std::string& name,
std::vector<const ast::Type*> member_types) {
- return MakeStructType(name, member_types);
+ return MakeStructType(name, member_types);
}
std::function<const ast::TypeName*()> InspectorBuilder::MakeStorageBufferTypes(
const std::string& name,
std::vector<const ast::Type*> member_types) {
- MakeStructType(name, member_types);
- return [this, name] { return ty.type_name(name); };
+ MakeStructType(name, member_types);
+ return [this, name] { return ty.type_name(name); };
}
void InspectorBuilder::AddUniformBuffer(const std::string& name,
const ast::Type* type,
uint32_t group,
uint32_t binding) {
- Global(name, type, ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+ Global(name, type, ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
-void InspectorBuilder::AddWorkgroupStorage(const std::string& name,
- const ast::Type* type) {
- Global(name, type, ast::StorageClass::kWorkgroup);
+void InspectorBuilder::AddWorkgroupStorage(const std::string& name, const ast::Type* type) {
+ Global(name, type, ast::StorageClass::kWorkgroup);
}
void InspectorBuilder::AddStorageBuffer(const std::string& name,
@@ -145,76 +137,72 @@ void InspectorBuilder::AddStorageBuffer(const std::string& name,
ast::Access access,
uint32_t group,
uint32_t binding) {
- Global(name, type, ast::StorageClass::kStorage, access,
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+ Global(name, type, ast::StorageClass::kStorage, access,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
void InspectorBuilder::MakeStructVariableReferenceBodyFunction(
std::string func_name,
std::string struct_name,
std::vector<std::tuple<size_t, const ast::Type*>> members) {
- ast::StatementList stmts;
- for (auto member : members) {
- size_t member_idx;
- const ast::Type* member_type;
- std::tie(member_idx, member_type) = member;
- std::string member_name = StructMemberName(member_idx, member_type);
-
- stmts.emplace_back(Decl(Var("local" + member_name, member_type)));
- }
+ ast::StatementList stmts;
+ for (auto member : members) {
+ size_t member_idx;
+ const ast::Type* member_type;
+ std::tie(member_idx, member_type) = member;
+ std::string member_name = StructMemberName(member_idx, member_type);
+
+ stmts.emplace_back(Decl(Var("local" + member_name, member_type)));
+ }
- for (auto member : members) {
- size_t member_idx;
- const ast::Type* member_type;
- std::tie(member_idx, member_type) = member;
- std::string member_name = StructMemberName(member_idx, member_type);
+ for (auto member : members) {
+ size_t member_idx;
+ const ast::Type* member_type;
+ std::tie(member_idx, member_type) = member;
+ std::string member_name = StructMemberName(member_idx, member_type);
- stmts.emplace_back(Assign("local" + member_name,
- MemberAccessor(struct_name, member_name)));
- }
+ stmts.emplace_back(Assign("local" + member_name, MemberAccessor(struct_name, member_name)));
+ }
- stmts.emplace_back(Return());
+ stmts.emplace_back(Return());
- Func(func_name, ast::VariableList(), ty.void_(), stmts, ast::AttributeList{});
+ Func(func_name, ast::VariableList(), ty.void_(), stmts, ast::AttributeList{});
}
-void InspectorBuilder::AddSampler(const std::string& name,
- uint32_t group,
- uint32_t binding) {
- Global(name, sampler_type(),
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+void InspectorBuilder::AddSampler(const std::string& name, uint32_t group, uint32_t binding) {
+ Global(name, sampler_type(),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
void InspectorBuilder::AddComparisonSampler(const std::string& name,
uint32_t group,
uint32_t binding) {
- Global(name, comparison_sampler_type(),
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+ Global(name, comparison_sampler_type(),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
void InspectorBuilder::AddResource(const std::string& name,
const ast::Type* type,
uint32_t group,
uint32_t binding) {
- Global(name, type,
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+ Global(name, type,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
-void InspectorBuilder::AddGlobalVariable(const std::string& name,
- const ast::Type* type) {
- Global(name, type, ast::StorageClass::kPrivate);
+void InspectorBuilder::AddGlobalVariable(const std::string& name, const ast::Type* type) {
+ Global(name, type, ast::StorageClass::kPrivate);
}
const ast::Function* InspectorBuilder::MakeSamplerReferenceBodyFunction(
@@ -224,16 +212,16 @@ const ast::Function* InspectorBuilder::MakeSamplerReferenceBodyFunction(
const std::string& coords_name,
const ast::Type* base_type,
ast::AttributeList attributes) {
- std::string result_name = "sampler_result";
+ std::string result_name = "sampler_result";
- ast::StatementList stmts;
- stmts.emplace_back(Decl(Var(result_name, ty.vec(base_type, 4))));
+ ast::StatementList stmts;
+ stmts.emplace_back(Decl(Var(result_name, ty.vec(base_type, 4))));
- stmts.emplace_back(Assign(result_name, Call("textureSample", texture_name,
- sampler_name, coords_name)));
- stmts.emplace_back(Return());
+ stmts.emplace_back(
+ Assign(result_name, Call("textureSample", texture_name, sampler_name, coords_name)));
+ stmts.emplace_back(Return());
- return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
+ return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
}
const ast::Function* InspectorBuilder::MakeSamplerReferenceBodyFunction(
@@ -244,22 +232,20 @@ const ast::Function* InspectorBuilder::MakeSamplerReferenceBodyFunction(
const std::string& array_index,
const ast::Type* base_type,
ast::AttributeList attributes) {
- std::string result_name = "sampler_result";
+ std::string result_name = "sampler_result";
- ast::StatementList stmts;
+ ast::StatementList stmts;
- stmts.emplace_back(Decl(Var("sampler_result", ty.vec(base_type, 4))));
+ stmts.emplace_back(Decl(Var("sampler_result", ty.vec(base_type, 4))));
- stmts.emplace_back(
- Assign("sampler_result", Call("textureSample", texture_name, sampler_name,
- coords_name, array_index)));
- stmts.emplace_back(Return());
+ stmts.emplace_back(Assign("sampler_result", Call("textureSample", texture_name, sampler_name,
+ coords_name, array_index)));
+ stmts.emplace_back(Return());
- return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
+ return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
}
-const ast::Function*
-InspectorBuilder::MakeComparisonSamplerReferenceBodyFunction(
+const ast::Function* InspectorBuilder::MakeComparisonSamplerReferenceBodyFunction(
const std::string& func_name,
const std::string& texture_name,
const std::string& sampler_name,
@@ -267,66 +253,63 @@ InspectorBuilder::MakeComparisonSamplerReferenceBodyFunction(
const std::string& depth_name,
const ast::Type* base_type,
ast::AttributeList attributes) {
- std::string result_name = "sampler_result";
+ std::string result_name = "sampler_result";
- ast::StatementList stmts;
+ ast::StatementList stmts;
- stmts.emplace_back(Decl(Var("sampler_result", base_type)));
- stmts.emplace_back(
- Assign("sampler_result", Call("textureSampleCompare", texture_name,
- sampler_name, coords_name, depth_name)));
- stmts.emplace_back(Return());
+ stmts.emplace_back(Decl(Var("sampler_result", base_type)));
+ stmts.emplace_back(Assign("sampler_result", Call("textureSampleCompare", texture_name,
+ sampler_name, coords_name, depth_name)));
+ stmts.emplace_back(Return());
- return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
+ return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
}
-const ast::Type* InspectorBuilder::GetBaseType(
- ResourceBinding::SampledKind sampled_kind) {
- switch (sampled_kind) {
- case ResourceBinding::SampledKind::kFloat:
- return ty.f32();
- case ResourceBinding::SampledKind::kSInt:
- return ty.i32();
- case ResourceBinding::SampledKind::kUInt:
- return ty.u32();
- default:
- return nullptr;
- }
+const ast::Type* InspectorBuilder::GetBaseType(ResourceBinding::SampledKind sampled_kind) {
+ switch (sampled_kind) {
+ case ResourceBinding::SampledKind::kFloat:
+ return ty.f32();
+ case ResourceBinding::SampledKind::kSInt:
+ return ty.i32();
+ case ResourceBinding::SampledKind::kUInt:
+ return ty.u32();
+ default:
+ return nullptr;
+ }
}
const ast::Type* InspectorBuilder::GetCoordsType(ast::TextureDimension dim,
const ast::Type* scalar) {
- switch (dim) {
- case ast::TextureDimension::k1d:
- return scalar;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- return create<ast::Vector>(scalar, 2);
- case ast::TextureDimension::k3d:
- case ast::TextureDimension::kCube:
- case ast::TextureDimension::kCubeArray:
- return create<ast::Vector>(scalar, 3);
- default:
- [=]() { FAIL() << "Unsupported texture dimension: " << dim; }();
- }
- return nullptr;
+ switch (dim) {
+ case ast::TextureDimension::k1d:
+ return scalar;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ return create<ast::Vector>(scalar, 2);
+ case ast::TextureDimension::k3d:
+ case ast::TextureDimension::kCube:
+ case ast::TextureDimension::kCubeArray:
+ return create<ast::Vector>(scalar, 3);
+ default:
+ [=]() { FAIL() << "Unsupported texture dimension: " << dim; }();
+ }
+ return nullptr;
}
-const ast::Type* InspectorBuilder::MakeStorageTextureTypes(
- ast::TextureDimension dim,
- ast::TexelFormat format) {
- return ty.storage_texture(dim, format, ast::Access::kWrite);
+const ast::Type* InspectorBuilder::MakeStorageTextureTypes(ast::TextureDimension dim,
+ ast::TexelFormat format) {
+ return ty.storage_texture(dim, format, ast::Access::kWrite);
}
void InspectorBuilder::AddStorageTexture(const std::string& name,
const ast::Type* type,
uint32_t group,
uint32_t binding) {
- Global(name, type,
- ast::AttributeList{
- create<ast::BindingAttribute>(binding),
- create<ast::GroupAttribute>(group),
- });
+ Global(name, type,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(binding),
+ create<ast::GroupAttribute>(group),
+ });
}
const ast::Function* InspectorBuilder::MakeStorageTextureBodyFunction(
@@ -334,64 +317,62 @@ const ast::Function* InspectorBuilder::MakeStorageTextureBodyFunction(
const std::string& st_name,
const ast::Type* dim_type,
ast::AttributeList attributes) {
- ast::StatementList stmts;
+ ast::StatementList stmts;
- stmts.emplace_back(Decl(Var("dim", dim_type)));
- stmts.emplace_back(Assign("dim", Call("textureDimensions", st_name)));
- stmts.emplace_back(Return());
+ stmts.emplace_back(Decl(Var("dim", dim_type)));
+ stmts.emplace_back(Assign("dim", Call("textureDimensions", st_name)));
+ stmts.emplace_back(Return());
- return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
+ return Func(func_name, ast::VariableList(), ty.void_(), stmts, attributes);
}
-std::function<const ast::Type*()> InspectorBuilder::GetTypeFunction(
- ComponentType component,
- CompositionType composition) {
- std::function<const ast::Type*()> func;
- switch (component) {
- case ComponentType::kFloat:
- func = [this]() -> const ast::Type* { return ty.f32(); };
- break;
- case ComponentType::kSInt:
- func = [this]() -> const ast::Type* { return ty.i32(); };
- break;
- case ComponentType::kUInt:
- func = [this]() -> const ast::Type* { return ty.u32(); };
- break;
- case ComponentType::kUnknown:
- return []() -> const ast::Type* { return nullptr; };
- }
-
- uint32_t n;
- switch (composition) {
- case CompositionType::kScalar:
- return func;
- case CompositionType::kVec2:
- n = 2;
- break;
- case CompositionType::kVec3:
- n = 3;
- break;
- case CompositionType::kVec4:
- n = 4;
- break;
- default:
- return []() -> ast::Type* { return nullptr; };
- }
-
- return [this, func, n]() -> const ast::Type* { return ty.vec(func(), n); };
+std::function<const ast::Type*()> InspectorBuilder::GetTypeFunction(ComponentType component,
+ CompositionType composition) {
+ std::function<const ast::Type*()> func;
+ switch (component) {
+ case ComponentType::kFloat:
+ func = [this]() -> const ast::Type* { return ty.f32(); };
+ break;
+ case ComponentType::kSInt:
+ func = [this]() -> const ast::Type* { return ty.i32(); };
+ break;
+ case ComponentType::kUInt:
+ func = [this]() -> const ast::Type* { return ty.u32(); };
+ break;
+ case ComponentType::kUnknown:
+ return []() -> const ast::Type* { return nullptr; };
+ }
+
+ uint32_t n;
+ switch (composition) {
+ case CompositionType::kScalar:
+ return func;
+ case CompositionType::kVec2:
+ n = 2;
+ break;
+ case CompositionType::kVec3:
+ n = 3;
+ break;
+ case CompositionType::kVec4:
+ n = 4;
+ break;
+ default:
+ return []() -> ast::Type* { return nullptr; };
+ }
+
+ return [this, func, n]() -> const ast::Type* { return ty.vec(func(), n); };
}
Inspector& InspectorBuilder::Build() {
- if (inspector_) {
+ if (inspector_) {
+ return *inspector_;
+ }
+ program_ = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program_->IsValid()) << diag::Formatter().format(program_->Diagnostics());
+ }();
+ inspector_ = std::make_unique<Inspector>(program_.get());
return *inspector_;
- }
- program_ = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program_->IsValid())
- << diag::Formatter().format(program_->Diagnostics());
- }();
- inspector_ = std::make_unique<Inspector>(program_.get());
- return *inspector_;
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.h b/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.h
index 97a2ae369af..391e1a1479c 100644
--- a/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.h
+++ b/chromium/third_party/dawn/src/tint/inspector/test_inspector_builder.h
@@ -26,10 +26,10 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/ast/workgroup_attribute.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/variable.h"
#include "tint/tint.h"
@@ -37,346 +37,327 @@ namespace tint::inspector {
/// Utility class for building programs in inspector tests
class InspectorBuilder : public ProgramBuilder {
- public:
- InspectorBuilder();
- ~InspectorBuilder() override;
-
- /// Generates an empty function
- /// @param name name of the function created
- /// @param attributes the function attributes
- void MakeEmptyBodyFunction(std::string name, ast::AttributeList attributes);
-
- /// Generates a function that calls other functions
- /// @param caller name of the function created
- /// @param callees names of the functions to be called
- /// @param attributes the function attributes
- void MakeCallerBodyFunction(std::string caller,
- std::vector<std::string> callees,
- ast::AttributeList attributes);
-
- /// Generates a struct that contains user-defined IO members
- /// @param name the name of the generated struct
- /// @param inout_vars tuples of {name, loc} that will be the struct members
- /// @returns a structure object
- const ast::Struct* MakeInOutStruct(
- std::string name,
- std::vector<std::tuple<std::string, uint32_t>> inout_vars);
-
- // TODO(crbug.com/tint/697): Remove this.
- /// Add In/Out variables to the global variables
- /// @param inout_vars tuples of {in, out} that will be added as entries to the
- /// global variables
- void AddInOutVariables(
- std::vector<std::tuple<std::string, std::string>> inout_vars);
-
- // TODO(crbug.com/tint/697): Remove this.
- /// Generates a function that references in/out variables
- /// @param name name of the function created
- /// @param inout_vars tuples of {in, out} that will be converted into out = in
- /// calls in the function body
- /// @param attributes the function attributes
- void MakeInOutVariableBodyFunction(
- std::string name,
- std::vector<std::tuple<std::string, std::string>> inout_vars,
- ast::AttributeList attributes);
-
- // TODO(crbug.com/tint/697): Remove this.
- /// Generates a function that references in/out variables and calls another
- /// function.
- /// @param caller name of the function created
- /// @param callee name of the function to be called
- /// @param inout_vars tuples of {in, out} that will be converted into out = in
- /// calls in the function body
- /// @param attributes the function attributes
- /// @returns a function object
- const ast::Function* MakeInOutVariableCallerBodyFunction(
- std::string caller,
- std::string callee,
- std::vector<std::tuple<std::string, std::string>> inout_vars,
- ast::AttributeList attributes);
-
- /// Add a pipeline constant to the global variables, with a specific ID.
- /// @param name name of the variable to add
- /// @param id id number for the constant id
- /// @param type type of the variable
- /// @param constructor val to initialize the constant with, if NULL no
- /// constructor will be added.
- /// @returns the constant that was created
- const ast::Variable* AddOverridableConstantWithID(
- std::string name,
- uint32_t id,
- const ast::Type* type,
- const ast::Expression* constructor) {
- return Override(name, type, constructor, {Id(id)});
- }
-
- /// Add a pipeline constant to the global variables, without a specific ID.
- /// @param name name of the variable to add
- /// @param type type of the variable
- /// @param constructor val to initialize the constant with, if NULL no
- /// constructor will be added.
- /// @returns the constant that was created
- const ast::Variable* AddOverridableConstantWithoutID(
- std::string name,
- const ast::Type* type,
- const ast::Expression* constructor) {
- return Override(name, type, constructor);
- }
-
- /// Generates a function that references module-scoped, plain-typed constant
- /// or variable.
- /// @param func name of the function created
- /// @param var name of the constant to be reference
- /// @param type type of the const being referenced
- /// @param attributes the function attributes
- /// @returns a function object
- const ast::Function* MakePlainGlobalReferenceBodyFunction(
- std::string func,
- std::string var,
- const ast::Type* type,
- ast::AttributeList attributes);
-
- /// @param vec Vector of StageVariable to be searched
- /// @param name Name to be searching for
- /// @returns true if name is in vec, otherwise false
- bool ContainsName(const std::vector<StageVariable>& vec,
- const std::string& name);
-
- /// Builds a string for accessing a member in a generated struct
- /// @param idx index of member
- /// @param type type of member
- /// @returns a string for the member
- std::string StructMemberName(size_t idx, const ast::Type* type);
-
- /// Generates a struct type
- /// @param name name for the type
- /// @param member_types a vector of member types
- /// @returns a struct type
- const ast::Struct* MakeStructType(const std::string& name,
- std::vector<const ast::Type*> member_types);
-
- /// Generates a struct type from a list of member nodes.
- /// @param name name for the struct type
- /// @param members a vector of members
- /// @returns a struct type
- const ast::Struct* MakeStructTypeFromMembers(const std::string& name,
- ast::StructMemberList members);
-
- /// Generates a struct member with a specified index and type.
- /// @param index index of the field within the struct
- /// @param type the type of the member field
- /// @param attributes a list of attributes to apply to the member field
- /// @returns a struct member
- const ast::StructMember* MakeStructMember(size_t index,
- const ast::Type* type,
- ast::AttributeList attributes);
-
- /// Generates types appropriate for using in an uniform buffer
- /// @param name name for the type
- /// @param member_types a vector of member types
- /// @returns a struct type that has the layout for an uniform buffer.
- const ast::Struct* MakeUniformBufferType(
- const std::string& name,
- std::vector<const ast::Type*> member_types);
-
- /// Generates types appropriate for using in a storage buffer
- /// @param name name for the type
- /// @param member_types a vector of member types
- /// @returns a function that returns the created structure.
- std::function<const ast::TypeName*()> MakeStorageBufferTypes(
- const std::string& name,
- std::vector<const ast::Type*> member_types);
-
- /// Adds an uniform buffer variable to the program
- /// @param name the name of the variable
- /// @param type the type to use
- /// @param group the binding/group/ to use for the uniform buffer
- /// @param binding the binding number to use for the uniform buffer
- void AddUniformBuffer(const std::string& name,
- const ast::Type* type,
- uint32_t group,
- uint32_t binding);
-
- /// Adds a workgroup storage variable to the program
- /// @param name the name of the variable
- /// @param type the type of the variable
- void AddWorkgroupStorage(const std::string& name, const ast::Type* type);
-
- /// Adds a storage buffer variable to the program
- /// @param name the name of the variable
- /// @param type the type to use
- /// @param access the storage buffer access control
- /// @param group the binding/group to use for the storage buffer
- /// @param binding the binding number to use for the storage buffer
- void AddStorageBuffer(const std::string& name,
- const ast::Type* type,
- ast::Access access,
- uint32_t group,
- uint32_t binding);
-
- /// Generates a function that references a specific struct variable
- /// @param func_name name of the function created
- /// @param struct_name name of the struct variabler to be accessed
- /// @param members list of members to access, by index and type
- void MakeStructVariableReferenceBodyFunction(
- std::string func_name,
- std::string struct_name,
- std::vector<std::tuple<size_t, const ast::Type*>> members);
-
- /// Adds a regular sampler variable to the program
- /// @param name the name of the variable
- /// @param group the binding/group to use for the storage buffer
- /// @param binding the binding number to use for the storage buffer
- void AddSampler(const std::string& name, uint32_t group, uint32_t binding);
-
- /// Adds a comparison sampler variable to the program
- /// @param name the name of the variable
- /// @param group the binding/group to use for the storage buffer
- /// @param binding the binding number to use for the storage buffer
- void AddComparisonSampler(const std::string& name,
- uint32_t group,
- uint32_t binding);
-
- /// Adds a sampler or texture variable to the program
- /// @param name the name of the variable
- /// @param type the type to use
- /// @param group the binding/group to use for the resource
- /// @param binding the binding number to use for the resource
- void AddResource(const std::string& name,
- const ast::Type* type,
- uint32_t group,
- uint32_t binding);
-
- /// Add a module scope private variable to the progames
- /// @param name the name of the variable
- /// @param type the type to use
- void AddGlobalVariable(const std::string& name, const ast::Type* type);
-
- /// Generates a function that references a specific sampler variable
- /// @param func_name name of the function created
- /// @param texture_name name of the texture to be sampled
- /// @param sampler_name name of the sampler to use
- /// @param coords_name name of the coords variable to use
- /// @param base_type sampler base type
- /// @param attributes the function attributes
- /// @returns a function that references all of the values specified
- const ast::Function* MakeSamplerReferenceBodyFunction(
- const std::string& func_name,
- const std::string& texture_name,
- const std::string& sampler_name,
- const std::string& coords_name,
- const ast::Type* base_type,
- ast::AttributeList attributes);
-
- /// Generates a function that references a specific sampler variable
- /// @param func_name name of the function created
- /// @param texture_name name of the texture to be sampled
- /// @param sampler_name name of the sampler to use
- /// @param coords_name name of the coords variable to use
- /// @param array_index name of the array index variable to use
- /// @param base_type sampler base type
- /// @param attributes the function attributes
- /// @returns a function that references all of the values specified
- const ast::Function* MakeSamplerReferenceBodyFunction(
- const std::string& func_name,
- const std::string& texture_name,
- const std::string& sampler_name,
- const std::string& coords_name,
- const std::string& array_index,
- const ast::Type* base_type,
- ast::AttributeList attributes);
-
- /// Generates a function that references a specific comparison sampler
- /// variable.
- /// @param func_name name of the function created
- /// @param texture_name name of the depth texture to use
- /// @param sampler_name name of the sampler to use
- /// @param coords_name name of the coords variable to use
- /// @param depth_name name of the depth reference to use
- /// @param base_type sampler base type
- /// @param attributes the function attributes
- /// @returns a function that references all of the values specified
- const ast::Function* MakeComparisonSamplerReferenceBodyFunction(
- const std::string& func_name,
- const std::string& texture_name,
- const std::string& sampler_name,
- const std::string& coords_name,
- const std::string& depth_name,
- const ast::Type* base_type,
- ast::AttributeList attributes);
-
- /// Gets an appropriate type for the data in a given texture type.
- /// @param sampled_kind type of in the texture
- /// @returns a pointer to a type appropriate for the coord param
- const ast::Type* GetBaseType(ResourceBinding::SampledKind sampled_kind);
-
- /// Gets an appropriate type for the coords parameter depending the the
- /// dimensionality of the texture being sampled.
- /// @param dim dimensionality of the texture being sampled
- /// @param scalar the scalar type
- /// @returns a pointer to a type appropriate for the coord param
- const ast::Type* GetCoordsType(ast::TextureDimension dim,
- const ast::Type* scalar);
-
- /// Generates appropriate types for a Read-Only StorageTexture
- /// @param dim the texture dimension of the storage texture
- /// @param format the texel format of the storage texture
- /// @returns the storage texture type
- const ast::Type* MakeStorageTextureTypes(ast::TextureDimension dim,
- ast::TexelFormat format);
-
- /// Adds a storage texture variable to the program
- /// @param name the name of the variable
- /// @param type the type to use
- /// @param group the binding/group to use for the sampled texture
- /// @param binding the binding57 number to use for the sampled texture
- void AddStorageTexture(const std::string& name,
- const ast::Type* type,
- uint32_t group,
- uint32_t binding);
-
- /// Generates a function that references a storage texture variable.
- /// @param func_name name of the function created
- /// @param st_name name of the storage texture to use
- /// @param dim_type type expected by textureDimensons to return
- /// @param attributes the function attributes
- /// @returns a function that references all of the values specified
- const ast::Function* MakeStorageTextureBodyFunction(
- const std::string& func_name,
- const std::string& st_name,
- const ast::Type* dim_type,
- ast::AttributeList attributes);
-
- /// Get a generator function that returns a type appropriate for a stage
- /// variable with the given combination of component and composition type.
- /// @param component component type of the stage variable
- /// @param composition composition type of the stage variable
- /// @returns a generator function for the stage variable's type.
- std::function<const ast::Type*()> GetTypeFunction(
- ComponentType component,
- CompositionType composition);
-
- /// Build the Program given all of the previous methods called and return an
- /// Inspector for it.
- /// Should only be called once per test.
- /// @returns a reference to the Inspector for the built Program.
- Inspector& Build();
-
- /// @returns the type for a SamplerKind::kSampler
- const ast::Sampler* sampler_type() {
- return ty.sampler(ast::SamplerKind::kSampler);
- }
-
- /// @returns the type for a SamplerKind::kComparison
- const ast::Sampler* comparison_sampler_type() {
- return ty.sampler(ast::SamplerKind::kComparisonSampler);
- }
-
- protected:
- /// Program built by this builder.
- std::unique_ptr<Program> program_;
- /// Inspector for |program_|
- std::unique_ptr<Inspector> inspector_;
+ public:
+ InspectorBuilder();
+ ~InspectorBuilder() override;
+
+ /// Generates an empty function
+ /// @param name name of the function created
+ /// @param attributes the function attributes
+ void MakeEmptyBodyFunction(std::string name, ast::AttributeList attributes);
+
+ /// Generates a function that calls other functions
+ /// @param caller name of the function created
+ /// @param callees names of the functions to be called
+ /// @param attributes the function attributes
+ void MakeCallerBodyFunction(std::string caller,
+ std::vector<std::string> callees,
+ ast::AttributeList attributes);
+
+ /// Generates a struct that contains user-defined IO members
+ /// @param name the name of the generated struct
+ /// @param inout_vars tuples of {name, loc} that will be the struct members
+ /// @returns a structure object
+ const ast::Struct* MakeInOutStruct(std::string name,
+ std::vector<std::tuple<std::string, uint32_t>> inout_vars);
+
+ // TODO(crbug.com/tint/697): Remove this.
+ /// Add In/Out variables to the global variables
+ /// @param inout_vars tuples of {in, out} that will be added as entries to the
+ /// global variables
+ void AddInOutVariables(std::vector<std::tuple<std::string, std::string>> inout_vars);
+
+ // TODO(crbug.com/tint/697): Remove this.
+ /// Generates a function that references in/out variables
+ /// @param name name of the function created
+ /// @param inout_vars tuples of {in, out} that will be converted into out = in
+ /// calls in the function body
+ /// @param attributes the function attributes
+ void MakeInOutVariableBodyFunction(std::string name,
+ std::vector<std::tuple<std::string, std::string>> inout_vars,
+ ast::AttributeList attributes);
+
+ // TODO(crbug.com/tint/697): Remove this.
+ /// Generates a function that references in/out variables and calls another
+ /// function.
+ /// @param caller name of the function created
+ /// @param callee name of the function to be called
+ /// @param inout_vars tuples of {in, out} that will be converted into out = in
+ /// calls in the function body
+ /// @param attributes the function attributes
+ /// @returns a function object
+ const ast::Function* MakeInOutVariableCallerBodyFunction(
+ std::string caller,
+ std::string callee,
+ std::vector<std::tuple<std::string, std::string>> inout_vars,
+ ast::AttributeList attributes);
+
+ /// Add a pipeline constant to the global variables, with a specific ID.
+ /// @param name name of the variable to add
+ /// @param id id number for the constant id
+ /// @param type type of the variable
+ /// @param constructor val to initialize the constant with, if NULL no
+ /// constructor will be added.
+ /// @returns the constant that was created
+ const ast::Variable* AddOverridableConstantWithID(std::string name,
+ uint32_t id,
+ const ast::Type* type,
+ const ast::Expression* constructor) {
+ return Override(name, type, constructor, {Id(id)});
+ }
+
+ /// Add a pipeline constant to the global variables, without a specific ID.
+ /// @param name name of the variable to add
+ /// @param type type of the variable
+ /// @param constructor val to initialize the constant with, if NULL no
+ /// constructor will be added.
+ /// @returns the constant that was created
+ const ast::Variable* AddOverridableConstantWithoutID(std::string name,
+ const ast::Type* type,
+ const ast::Expression* constructor) {
+ return Override(name, type, constructor);
+ }
+
+ /// Generates a function that references module-scoped, plain-typed constant
+ /// or variable.
+ /// @param func name of the function created
+ /// @param var name of the constant to be reference
+ /// @param type type of the const being referenced
+ /// @param attributes the function attributes
+ /// @returns a function object
+ const ast::Function* MakePlainGlobalReferenceBodyFunction(std::string func,
+ std::string var,
+ const ast::Type* type,
+ ast::AttributeList attributes);
+
+ /// @param vec Vector of StageVariable to be searched
+ /// @param name Name to be searching for
+ /// @returns true if name is in vec, otherwise false
+ bool ContainsName(const std::vector<StageVariable>& vec, const std::string& name);
+
+ /// Builds a string for accessing a member in a generated struct
+ /// @param idx index of member
+ /// @param type type of member
+ /// @returns a string for the member
+ std::string StructMemberName(size_t idx, const ast::Type* type);
+
+ /// Generates a struct type
+ /// @param name name for the type
+ /// @param member_types a vector of member types
+ /// @returns a struct type
+ const ast::Struct* MakeStructType(const std::string& name,
+ std::vector<const ast::Type*> member_types);
+
+ /// Generates a struct type from a list of member nodes.
+ /// @param name name for the struct type
+ /// @param members a vector of members
+ /// @returns a struct type
+ const ast::Struct* MakeStructTypeFromMembers(const std::string& name,
+ ast::StructMemberList members);
+
+ /// Generates a struct member with a specified index and type.
+ /// @param index index of the field within the struct
+ /// @param type the type of the member field
+ /// @param attributes a list of attributes to apply to the member field
+ /// @returns a struct member
+ const ast::StructMember* MakeStructMember(size_t index,
+ const ast::Type* type,
+ ast::AttributeList attributes);
+
+ /// Generates types appropriate for using in an uniform buffer
+ /// @param name name for the type
+ /// @param member_types a vector of member types
+ /// @returns a struct type that has the layout for an uniform buffer.
+ const ast::Struct* MakeUniformBufferType(const std::string& name,
+ std::vector<const ast::Type*> member_types);
+
+ /// Generates types appropriate for using in a storage buffer
+ /// @param name name for the type
+ /// @param member_types a vector of member types
+ /// @returns a function that returns the created structure.
+ std::function<const ast::TypeName*()> MakeStorageBufferTypes(
+ const std::string& name,
+ std::vector<const ast::Type*> member_types);
+
+ /// Adds an uniform buffer variable to the program
+ /// @param name the name of the variable
+ /// @param type the type to use
+ /// @param group the binding/group/ to use for the uniform buffer
+ /// @param binding the binding number to use for the uniform buffer
+ void AddUniformBuffer(const std::string& name,
+ const ast::Type* type,
+ uint32_t group,
+ uint32_t binding);
+
+ /// Adds a workgroup storage variable to the program
+ /// @param name the name of the variable
+ /// @param type the type of the variable
+ void AddWorkgroupStorage(const std::string& name, const ast::Type* type);
+
+ /// Adds a storage buffer variable to the program
+ /// @param name the name of the variable
+ /// @param type the type to use
+ /// @param access the storage buffer access control
+ /// @param group the binding/group to use for the storage buffer
+ /// @param binding the binding number to use for the storage buffer
+ void AddStorageBuffer(const std::string& name,
+ const ast::Type* type,
+ ast::Access access,
+ uint32_t group,
+ uint32_t binding);
+
+ /// Generates a function that references a specific struct variable
+ /// @param func_name name of the function created
+ /// @param struct_name name of the struct variabler to be accessed
+ /// @param members list of members to access, by index and type
+ void MakeStructVariableReferenceBodyFunction(
+ std::string func_name,
+ std::string struct_name,
+ std::vector<std::tuple<size_t, const ast::Type*>> members);
+
+ /// Adds a regular sampler variable to the program
+ /// @param name the name of the variable
+ /// @param group the binding/group to use for the storage buffer
+ /// @param binding the binding number to use for the storage buffer
+ void AddSampler(const std::string& name, uint32_t group, uint32_t binding);
+
+ /// Adds a comparison sampler variable to the program
+ /// @param name the name of the variable
+ /// @param group the binding/group to use for the storage buffer
+ /// @param binding the binding number to use for the storage buffer
+ void AddComparisonSampler(const std::string& name, uint32_t group, uint32_t binding);
+
+ /// Adds a sampler or texture variable to the program
+ /// @param name the name of the variable
+ /// @param type the type to use
+ /// @param group the binding/group to use for the resource
+ /// @param binding the binding number to use for the resource
+ void AddResource(const std::string& name,
+ const ast::Type* type,
+ uint32_t group,
+ uint32_t binding);
+
+ /// Add a module scope private variable to the progames
+ /// @param name the name of the variable
+ /// @param type the type to use
+ void AddGlobalVariable(const std::string& name, const ast::Type* type);
+
+ /// Generates a function that references a specific sampler variable
+ /// @param func_name name of the function created
+ /// @param texture_name name of the texture to be sampled
+ /// @param sampler_name name of the sampler to use
+ /// @param coords_name name of the coords variable to use
+ /// @param base_type sampler base type
+ /// @param attributes the function attributes
+ /// @returns a function that references all of the values specified
+ const ast::Function* MakeSamplerReferenceBodyFunction(const std::string& func_name,
+ const std::string& texture_name,
+ const std::string& sampler_name,
+ const std::string& coords_name,
+ const ast::Type* base_type,
+ ast::AttributeList attributes);
+
+ /// Generates a function that references a specific sampler variable
+ /// @param func_name name of the function created
+ /// @param texture_name name of the texture to be sampled
+ /// @param sampler_name name of the sampler to use
+ /// @param coords_name name of the coords variable to use
+ /// @param array_index name of the array index variable to use
+ /// @param base_type sampler base type
+ /// @param attributes the function attributes
+ /// @returns a function that references all of the values specified
+ const ast::Function* MakeSamplerReferenceBodyFunction(const std::string& func_name,
+ const std::string& texture_name,
+ const std::string& sampler_name,
+ const std::string& coords_name,
+ const std::string& array_index,
+ const ast::Type* base_type,
+ ast::AttributeList attributes);
+
+ /// Generates a function that references a specific comparison sampler
+ /// variable.
+ /// @param func_name name of the function created
+ /// @param texture_name name of the depth texture to use
+ /// @param sampler_name name of the sampler to use
+ /// @param coords_name name of the coords variable to use
+ /// @param depth_name name of the depth reference to use
+ /// @param base_type sampler base type
+ /// @param attributes the function attributes
+ /// @returns a function that references all of the values specified
+ const ast::Function* MakeComparisonSamplerReferenceBodyFunction(const std::string& func_name,
+ const std::string& texture_name,
+ const std::string& sampler_name,
+ const std::string& coords_name,
+ const std::string& depth_name,
+ const ast::Type* base_type,
+ ast::AttributeList attributes);
+
+ /// Gets an appropriate type for the data in a given texture type.
+ /// @param sampled_kind type of in the texture
+ /// @returns a pointer to a type appropriate for the coord param
+ const ast::Type* GetBaseType(ResourceBinding::SampledKind sampled_kind);
+
+ /// Gets an appropriate type for the coords parameter depending the the
+ /// dimensionality of the texture being sampled.
+ /// @param dim dimensionality of the texture being sampled
+ /// @param scalar the scalar type
+ /// @returns a pointer to a type appropriate for the coord param
+ const ast::Type* GetCoordsType(ast::TextureDimension dim, const ast::Type* scalar);
+
+ /// Generates appropriate types for a Read-Only StorageTexture
+ /// @param dim the texture dimension of the storage texture
+ /// @param format the texel format of the storage texture
+ /// @returns the storage texture type
+ const ast::Type* MakeStorageTextureTypes(ast::TextureDimension dim, ast::TexelFormat format);
+
+ /// Adds a storage texture variable to the program
+ /// @param name the name of the variable
+ /// @param type the type to use
+ /// @param group the binding/group to use for the sampled texture
+ /// @param binding the binding57 number to use for the sampled texture
+ void AddStorageTexture(const std::string& name,
+ const ast::Type* type,
+ uint32_t group,
+ uint32_t binding);
+
+ /// Generates a function that references a storage texture variable.
+ /// @param func_name name of the function created
+ /// @param st_name name of the storage texture to use
+ /// @param dim_type type expected by textureDimensons to return
+ /// @param attributes the function attributes
+ /// @returns a function that references all of the values specified
+ const ast::Function* MakeStorageTextureBodyFunction(const std::string& func_name,
+ const std::string& st_name,
+ const ast::Type* dim_type,
+ ast::AttributeList attributes);
+
+ /// Get a generator function that returns a type appropriate for a stage
+ /// variable with the given combination of component and composition type.
+ /// @param component component type of the stage variable
+ /// @param composition composition type of the stage variable
+ /// @returns a generator function for the stage variable's type.
+ std::function<const ast::Type*()> GetTypeFunction(ComponentType component,
+ CompositionType composition);
+
+ /// Build the Program given all of the previous methods called and return an
+ /// Inspector for it.
+ /// Should only be called once per test.
+ /// @returns a reference to the Inspector for the built Program.
+ Inspector& Build();
+
+ /// @returns the type for a SamplerKind::kSampler
+ const ast::Sampler* sampler_type() { return ty.sampler(ast::SamplerKind::kSampler); }
+
+ /// @returns the type for a SamplerKind::kComparison
+ const ast::Sampler* comparison_sampler_type() {
+ return ty.sampler(ast::SamplerKind::kComparisonSampler);
+ }
+
+ protected:
+ /// Program built by this builder.
+ std::unique_ptr<Program> program_;
+ /// Inspector for |program_|
+ std::unique_ptr<Inspector> inspector_;
};
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.cc b/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.cc
index 0bb5858cc5d..16e41965cb1 100644
--- a/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.cc
+++ b/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.cc
@@ -20,18 +20,17 @@ InspectorRunner::InspectorRunner() = default;
InspectorRunner::~InspectorRunner() = default;
Inspector& InspectorRunner::Initialize(std::string shader) {
- if (inspector_) {
- return *inspector_;
- }
+ if (inspector_) {
+ return *inspector_;
+ }
- file_ = std::make_unique<Source::File>("test", shader);
- program_ = std::make_unique<Program>(reader::wgsl::Parse(file_.get()));
- [&]() {
- ASSERT_TRUE(program_->IsValid())
- << diag::Formatter().format(program_->Diagnostics());
- }();
- inspector_ = std::make_unique<Inspector>(program_.get());
- return *inspector_;
+ file_ = std::make_unique<Source::File>("test", shader);
+ program_ = std::make_unique<Program>(reader::wgsl::Parse(file_.get()));
+ [&]() {
+ ASSERT_TRUE(program_->IsValid()) << diag::Formatter().format(program_->Diagnostics());
+ }();
+ inspector_ = std::make_unique<Inspector>(program_.get());
+ return *inspector_;
}
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.h b/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.h
index bac3bebdc55..6d46725833a 100644
--- a/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.h
+++ b/chromium/third_party/dawn/src/tint/inspector/test_inspector_runner.h
@@ -25,23 +25,23 @@ namespace tint::inspector {
/// Utility class for running shaders in inspector tests
class InspectorRunner {
- public:
- InspectorRunner();
- virtual ~InspectorRunner();
-
- /// Create a Program with Inspector from the provided WGSL shader.
- /// Should only be called once per test.
- /// @param shader a WGSL shader
- /// @returns a reference to the Inspector for the built Program.
- Inspector& Initialize(std::string shader);
-
- protected:
- /// File created from input shader and used to create Program.
- std::unique_ptr<Source::File> file_;
- /// Program created by this runner.
- std::unique_ptr<Program> program_;
- /// Inspector for |program_|
- std::unique_ptr<Inspector> inspector_;
+ public:
+ InspectorRunner();
+ virtual ~InspectorRunner();
+
+ /// Create a Program with Inspector from the provided WGSL shader.
+ /// Should only be called once per test.
+ /// @param shader a WGSL shader
+ /// @returns a reference to the Inspector for the built Program.
+ Inspector& Initialize(std::string shader);
+
+ protected:
+ /// File created from input shader and used to create Program.
+ std::unique_ptr<Source::File> file_;
+ /// Program created by this runner.
+ std::unique_ptr<Program> program_;
+ /// Inspector for |program_|
+ std::unique_ptr<Inspector> inspector_;
};
} // namespace tint::inspector
diff --git a/chromium/third_party/dawn/src/tint/builtins.def b/chromium/third_party/dawn/src/tint/intrinsics.def
index 443634d5b29..5ac57739a26 100644
--- a/chromium/third_party/dawn/src/tint/builtins.def
+++ b/chromium/third_party/dawn/src/tint/intrinsics.def
@@ -30,7 +30,7 @@ enum storage_class {
workgroup
uniform
storage
- [[internal]] handle
+ @internal handle
}
// https://gpuweb.github.io/gpuweb/wgsl/#memory-access-mode
@@ -62,18 +62,34 @@ enum texel_format {
////////////////////////////////////////////////////////////////////////////////
// WGSL primitive types //
+// Types may be decorated with @precedence(N) to prioritize which type //
+// will be picked when multiple types of a matcher match. //
+// This is used to ensure that abstract numerical types materialize to the //
+// concrete type with the lowest conversion rank. //
+// Types with higher the precedence values will be matched first. //
////////////////////////////////////////////////////////////////////////////////
// https://gpuweb.github.io/gpuweb/wgsl/#plain-types-section
type bool
-type f32
-type i32
-type u32
+@precedence(4) @display("abstract-float") type af
+@precedence(3) @display("abstract-int") type ai
+@precedence(2) type i32
+@precedence(1) type u32
+@precedence(0) type f32
type vec2<T>
type vec3<T>
type vec4<T>
-[[display("vec{N}<{T}>")]] type vec<N: num, T>
-[[display("mat{N}x{M}<{T}>")]] type mat<N: num, M: num, T>
+type mat2x2<T>
+type mat2x3<T>
+type mat2x4<T>
+type mat3x2<T>
+type mat3x3<T>
+type mat3x4<T>
+type mat4x2<T>
+type mat4x3<T>
+type mat4x4<T>
+@display("vec{N}<{T}>") type vec<N: num, T>
+@display("mat{N}x{M}<{T}>") type mat<N: num, M: num, T>
type ptr<S: storage_class, T, A: access>
type atomic<T>
type array<T>
@@ -98,9 +114,11 @@ type texture_storage_3d<F: texel_format, A: access>
type texture_external
type __modf_result
-[[display("__modf_result_vec{N}")]] type __modf_result_vec<N: num>
+@display("__modf_result_vec{N}") type __modf_result_vec<N: num>
type __frexp_result
-[[display("__frexp_result_vec{N}")]] type __frexp_result_vec<N: num>
+@display("__frexp_result_vec{N}") type __frexp_result_vec<N: num>
+
+type __atomic_compare_exchange_result<T>
////////////////////////////////////////////////////////////////////////////////
// Type matchers //
@@ -109,8 +127,15 @@ type __frexp_result
////////////////////////////////////////////////////////////////////////////////
match fiu32: f32 | i32 | u32
+match fi32: f32 | i32
match iu32: i32 | u32
match scalar: f32 | i32 | u32 | bool
+match abstract_or_scalar: ai | af | f32 | i32 | u32 | bool
+match af_f32: af | f32
+match scalar_no_f32: i32 | u32 | bool
+match scalar_no_i32: f32 | u32 | bool
+match scalar_no_u32: f32 | i32 | bool
+match scalar_no_bool: f32 | i32 | u32
////////////////////////////////////////////////////////////////////////////////
// Enum matchers //
@@ -140,7 +165,7 @@ match workgroup_or_storage: workgroup | storage
// functions supported by the WGSL language. This builtin definition //
// language supports simple static-type function declarations, as well as //
// single overload declarations that can match a number of different //
-// argument types via the use of 'open-types' and 'open-numbers'. //
+// argument types via the use of template types and template numbers //
// //
// * Basic example: //
// //
@@ -149,10 +174,9 @@ match workgroup_or_storage: workgroup | storage
// Declares an overload of the function 'isInf' that accepts a single //
// parameter of type 'f32' and returns a 'bool'. //
// //
-// An 'open-type' can be thought as a template type that is determined by the //
-// arguments to the builtin. //
+// A template type is a type determined by the arguments to the builtin. //
// //
-// * Open-type example without constraint: //
+// * Template type example without constraint: //
// //
// fn arrayLength<T>(array<T>) -> u32 //
// //
@@ -161,7 +185,7 @@ match workgroup_or_storage: workgroup | storage
// element type. This overload will always return a value of the same type //
// as its single argument. //
// //
-// * Open-type example with constraint: //
+// * Template type example with constraint: //
// //
// fn abs<T: fiu32>(T) -> T //
// //
@@ -169,10 +193,10 @@ match workgroup_or_storage: workgroup | storage
// argument of type 'f32', 'i32' or 'u32', which returns a value of the //
// same argument type. //
// //
-// Similarly an 'open-number' can be thought as a template number or //
-// enumerator that is determined by the arguments to the builtin. //
+// Similarly a template number is a number or enumerator that is determined //
+// by the arguments to the builtin. //
// //
-// * Open-number example: //
+// * Template number example: //
// //
// fn dpdx<N: num>(vec<N, f32>) -> vec<N, f32> //
// //
@@ -181,54 +205,92 @@ match workgroup_or_storage: workgroup | storage
// the same argument type. //
// //
// //
-// Matching algorithm: //
-// ------------------- //
+// Matching algorithm for a single overload: //
+// ----------------------------------------- //
+// //
+// The goal of matching is to compare a function call's arguments and any //
+// explicitly provided template types in the program source against an //
+// overload declaration in this file, and determine if the call satisfies //
+// the form and type constraints of the overload. If the call matches an //
+// overload, then the overload is added to the list of 'overload candidates' //
+// used for overload resolution (described below). //
// //
-// Prior to matching an overload, all open-types are undefined. //
+// Prior to matching an overload, all template types are undefined. //
// //
-// Open-types become closed-types (pinned to a fixed type) on the first //
-// attempt to match an argument to that open-type. //
-// Once open-types are closed, they remain that type for the rest of the //
-// overload evaluation. //
+// Template types are first defined with the type of the leftmost argument //
+// that matches against that template type name. Subsequent arguments that //
+// attempt to match against the template type name will either reject the //
+// overload or refine the template, in one of 3 ways: //
+// (a) Fail to match, causing the overload to be immediately rejected. //
+// (b) Match the existing template type, either exactly or via implicit //
+// conversion, and overload resolution continues. //
+// (c) Match via implicit conversion of the currently defined template type //
+// to the argument type. In this situation, the template type is refined //
+// with the more constrained argument type, and overload resolution //
+// continues. //
// //
// To better understand, let's consider the following hypothetical overload //
// declaration: //
// //
// fn foo<T: scalar>(T, T); //
// //
-// T - is the open-type //
+// T - is the template type name //
// scalar - is a matcher for the types 'f32', 'i32', 'u32' or 'bool' //
// (declared above) //
-// <T: scalar> - declares the open-type T, with the constraint that T must //
-// match one of 'f32', 'i32', 'u32' or 'bool'. //
+// <T: scalar> - declares the template type T, with the constraint that T //
+// must match one of 'f32', 'i32', 'u32' or 'bool'. //
// //
// The process for resolving this overload is as follows: //
// //
// (1) The overload resolver begins by attempting to match the argument //
// types from left to right. //
-// The first parameter type is compared against the argument type. //
-// As the open-type T has not been closed yet, T is closed as the type //
-// of the first argument. //
+// The first parameter type is compared against the argument type T. //
+// As the template type T has not been defined yet, T is defined as the //
+// type of the first argument. //
// There's no verification that the T type is a scalar at this stage. //
// (2) The second parameter is then compared against the second argument. //
-// As the open-type T is now closed, the argument type is compared //
-// against the value of the closed-type of T. If the types match, then //
-// the overload is still a candidate for matching, otherwise the //
-// overload is no longer considered. //
-// (3) If all the parameters matched, constraints on the open-types need //
-// to be checked next. If the closed-type does not match the 'match' //
-// constraint, then the overload is no longer considered. //
+// As the template type T is now defined the argument type is compared //
+// against the value of the defined type of T. Depending on the //
+// comparison of the argument type to the template type, either the //
+// actions of (a), (b) or (c) from above will occur. //
+// (3) If all the parameters matched, constraints on the template types //
+// need to be checked next. If the defined type does not match the //
+// 'match' constraint, then the overload is no longer considered. //
+// //
+// This algorithm for matching a single overload is less general than the //
+// algorithm described in the WGSL spec. But it makes the same decisions //
+// because the overloads defined by WGSL are monotonic in the sense that once //
+// a template parameter has been refined, there is never a need to backtrack //
+// and un-refine it to match a later argument. //
+// //
+// The algorithm for matching template numbers is similar to matching //
+// template types, except numbers need to exactly match across all uses - //
+// there is no implicit conversion. Template numbers may match integer //
+// numbers or enumerators. //
+// //
+// //
+// Overload resolution for candidate overloads //
+// ------------------------------------------- //
+// //
+// If multiple candidate overloads match a given set of arguments, then a //
+// final overload resolution pass needs to be performed. The arguments and //
+// overload parameter types for each candidate overload are compared, //
+// following the algorithm described at: //
+// https://www.w3.org/TR/WGSL/#overload-resolution-section //
+// //
+// If the candidate list contains a single entry, then that single candidate //
+// is picked, and no overload resolution needs to be performed. //
// //
-// The algorithm for matching open-numbers is almost identical to open-types, //
-// except of course, they match against integer numbers or enumerators //
-// instead of types. //
+// If the candidate list is empty, then the call fails to resolve and an //
+// error diagnostic is raised. //
// //
// //
-// * More examples: //
+// More examples //
+// ------------- //
// //
// fn F() //
// - Function called F. //
-// No open types or numbers, no parameters, no return value //
+// No template types or numbers, no parameters, no return value //
// //
// fn F() -> RETURN_TYPE //
// - Function with RETURN_TYPE as the return type value //
@@ -242,21 +304,21 @@ match workgroup_or_storage: workgroup | storage
// some builtin functions //
// //
// fn F<T>(T) //
-// - Single parameter of unconstrained open-type T (any type) //
+// - Single parameter of unconstrained template type T (any type) //
// //
// fn F<T: scalar>(T) //
-// - Single parameter of constrained open-type T (must be a scalar) //
+// - Single parameter of constrained template type T (must be a scalar) //
// //
// fn F<T: fiu32>(T) -> T //
-// - Single parameter of constrained open-type T (must be a one of fiu32) //
-// Return type matches parameter type //
+// - Single parameter of constrained template type T (must be a one of //
+// fiu32) Return type matches parameter type //
// //
// fn F<T, N: num>(vec<N, T>) //
-// - Single parameter of vector type with open-number size N and element //
-// open-type T //
+// - Single parameter of vector type with template number size N and //
+// element template type T //
// //
// fn F<A: access>(texture_storage_1d<f32_texel_format, A>) //
-// - Single parameter of texture_storage_1d type with open-number //
+// - Single parameter of texture_storage_1d type with template number //
// access-control C, and of a texel format that is listed in //
// f32_texel_format //
// //
@@ -299,18 +361,20 @@ fn determinant<N: num>(mat<N, N, f32>) -> f32
fn distance(f32, f32) -> f32
fn distance<N: num>(vec<N, f32>, vec<N, f32>) -> f32
fn dot<N: num, T: fiu32>(vec<N, T>, vec<N, T>) -> T
-[[stage("fragment")]] fn dpdx(f32) -> f32
-[[stage("fragment")]] fn dpdx<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn dpdxCoarse(f32) -> f32
-[[stage("fragment")]] fn dpdxCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn dpdxFine(f32) -> f32
-[[stage("fragment")]] fn dpdxFine<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn dpdy(f32) -> f32
-[[stage("fragment")]] fn dpdy<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn dpdyCoarse(f32) -> f32
-[[stage("fragment")]] fn dpdyCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn dpdyFine(f32) -> f32
-[[stage("fragment")]] fn dpdyFine<N: num>(vec<N, f32>) -> vec<N, f32>
+fn dot4I8Packed(u32, u32) -> i32
+fn dot4U8Packed(u32, u32) -> u32
+@stage("fragment") fn dpdx(f32) -> f32
+@stage("fragment") fn dpdx<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn dpdxCoarse(f32) -> f32
+@stage("fragment") fn dpdxCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn dpdxFine(f32) -> f32
+@stage("fragment") fn dpdxFine<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn dpdy(f32) -> f32
+@stage("fragment") fn dpdy<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn dpdyCoarse(f32) -> f32
+@stage("fragment") fn dpdyCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn dpdyFine(f32) -> f32
+@stage("fragment") fn dpdyFine<N: num>(vec<N, f32>) -> vec<N, f32>
fn exp(f32) -> f32
fn exp<N: num>(vec<N, f32>) -> vec<N, f32>
fn exp2(f32) -> f32
@@ -330,12 +394,12 @@ fn fract(f32) -> f32
fn fract<N: num>(vec<N, f32>) -> vec<N, f32>
fn frexp(f32) -> __frexp_result
fn frexp<N: num>(vec<N, f32>) -> __frexp_result_vec<N>
-[[stage("fragment")]] fn fwidth(f32) -> f32
-[[stage("fragment")]] fn fwidth<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn fwidthCoarse(f32) -> f32
-[[stage("fragment")]] fn fwidthCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
-[[stage("fragment")]] fn fwidthFine(f32) -> f32
-[[stage("fragment")]] fn fwidthFine<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn fwidth(f32) -> f32
+@stage("fragment") fn fwidth<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn fwidthCoarse(f32) -> f32
+@stage("fragment") fn fwidthCoarse<N: num>(vec<N, f32>) -> vec<N, f32>
+@stage("fragment") fn fwidthFine(f32) -> f32
+@stage("fragment") fn fwidthFine<N: num>(vec<N, f32>) -> vec<N, f32>
fn insertBits<T: iu32>(T, T, u32, u32) -> T
fn insertBits<N: num, T: iu32>(vec<N, T>, vec<N, T>, u32, u32) -> vec<N, T>
fn inverseSqrt(f32) -> f32
@@ -384,13 +448,13 @@ fn sinh(f32) -> f32
fn sinh<N: num>(vec<N, f32>) -> vec<N, f32>
fn smoothstep(f32, f32, f32) -> f32
fn smoothstep<N: num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32>
-[[deprecated]] fn smoothStep(f32, f32, f32) -> f32
-[[deprecated]] fn smoothStep<N: num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32>
+@deprecated fn smoothStep(f32, f32, f32) -> f32
+@deprecated fn smoothStep<N: num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32>
fn sqrt(f32) -> f32
fn sqrt<N: num>(vec<N, f32>) -> vec<N, f32>
fn step(f32, f32) -> f32
fn step<N: num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32>
-[[stage("compute")]] fn storageBarrier()
+@stage("compute") fn storageBarrier()
fn tan(f32) -> f32
fn tan<N: num>(vec<N, f32>) -> vec<N, f32>
fn tanh(f32) -> f32
@@ -403,7 +467,7 @@ fn unpack2x16snorm(u32) -> vec2<f32>
fn unpack2x16unorm(u32) -> vec2<f32>
fn unpack4x8snorm(u32) -> vec4<f32>
fn unpack4x8unorm(u32) -> vec4<f32>
-[[stage("compute")]] fn workgroupBarrier()
+@stage("compute") fn workgroupBarrier()
fn textureDimensions<T: fiu32>(texture: texture_1d<T>) -> i32
fn textureDimensions<T: fiu32>(texture: texture_1d<T>, level: i32) -> i32
@@ -432,22 +496,22 @@ fn textureDimensions<F: texel_format, A: write_only>(texture: texture_storage_2d
fn textureDimensions<F: texel_format, A: write_only>(texture: texture_storage_2d_array<F, A>) -> vec2<i32>
fn textureDimensions<F: texel_format, A: write_only>(texture: texture_storage_3d<F, A>) -> vec3<i32>
fn textureDimensions(texture: texture_external) -> vec2<i32>
-fn textureGather<T: fiu32>(component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>) -> vec4<T>
-fn textureGather<T: fiu32>(component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<T>
-fn textureGather<T: fiu32>(component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<T>
-fn textureGather<T: fiu32>(component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<T>
-fn textureGather<T: fiu32>(component: i32, texture: texture_cube<T>, sampler: sampler, coords: vec3<f32>) -> vec4<T>
-fn textureGather<T: fiu32>(component: i32, texture: texture_cube_array<T>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_cube<T>, sampler: sampler, coords: vec3<f32>) -> vec4<T>
+fn textureGather<T: fiu32>(@const component: i32, texture: texture_cube_array<T>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<T>
fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> vec4<f32>
-fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32>
fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32>
-fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<f32>
+fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<f32>
fn textureGather(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> vec4<f32>
fn textureGather(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32>
fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> vec4<f32>
-fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> vec4<f32>
+fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> vec4<f32>
fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> vec4<f32>
-fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> vec4<f32>
+fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> vec4<f32>
fn textureGatherCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> vec4<f32>
fn textureGatherCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> vec4<f32>
fn textureNumLayers<T: fiu32>(texture: texture_2d_array<T>) -> i32
@@ -467,61 +531,61 @@ fn textureNumLevels(texture: texture_depth_cube) -> i32
fn textureNumLevels(texture: texture_depth_cube_array) -> i32
fn textureNumSamples<T: fiu32>(texture: texture_multisampled_2d<T>) -> i32
fn textureNumSamples(texture: texture_depth_multisampled_2d) -> i32
-[[stage("fragment")]] fn textureSample(texture: texture_1d<f32>, sampler: sampler, coords: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, offset: vec3<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32>
-[[stage("fragment")]] fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> f32
-[[stage("fragment")]] fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, offset: vec2<i32>) -> f32
-[[stage("fragment")]] fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> f32
-[[stage("fragment")]] fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, offset: vec2<i32>) -> f32
-[[stage("fragment")]] fn textureSample(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> f32
-[[stage("fragment")]] fn textureSample(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> f32
-[[stage("fragment")]] fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32, offset: vec2<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32, offset: vec2<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32, offset: vec3<i32>) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleBias(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, bias: f32) -> vec4<f32>
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> f32
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32
-[[stage("fragment")]] fn textureSampleCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32
+@stage("fragment") fn textureSample(texture: texture_1d<f32>, sampler: sampler, coords: f32) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, @const offset: vec3<i32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32>
+@stage("fragment") fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> f32
+@stage("fragment") fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> f32
+@stage("fragment") fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> f32
+@stage("fragment") fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> f32
+@stage("fragment") fn textureSample(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> f32
+@stage("fragment") fn textureSample(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> f32
+@stage("fragment") fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32, @const offset: vec2<i32>) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32, @const offset: vec2<i32>) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32, @const offset: vec3<i32>) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
+@stage("fragment") fn textureSampleBias(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, bias: f32) -> vec4<f32>
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> f32
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> f32
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32
+@stage("fragment") fn textureSampleCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32
fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32
-fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32
+fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> f32
fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32
-fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, offset: vec2<i32>) -> f32
+fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> f32
fn textureSampleCompareLevel(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32
fn textureSampleCompareLevel(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32
fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32>
-fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32>
fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32>
-fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32>
fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
-fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>, offset: vec3<i32>) -> vec4<f32>
+fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>, @const offset: vec3<i32>) -> vec4<f32>
fn textureSampleGrad(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
fn textureSampleGrad(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32) -> vec4<f32>
-fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32, offset: vec2<i32>) -> vec4<f32>
+fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32, @const offset: vec2<i32>) -> vec4<f32>
fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32) -> vec4<f32>
-fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32, offset: vec2<i32>) -> vec4<f32>
+fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32, @const offset: vec2<i32>) -> vec4<f32>
fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32>
-fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32, offset: vec3<i32>) -> vec4<f32>
+fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32, @const offset: vec3<i32>) -> vec4<f32>
fn textureSampleLevel(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32>
fn textureSampleLevel(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, level: f32) -> vec4<f32>
fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32) -> f32
-fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32, offset: vec2<i32>) -> f32
+fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32, @const offset: vec2<i32>) -> f32
fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32) -> f32
-fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32, offset: vec2<i32>) -> f32
+fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32, @const offset: vec2<i32>) -> f32
fn textureSampleLevel(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>, level: i32) -> f32
fn textureSampleLevel(texture: texture_depth_cube_array,sampler: sampler, coords: vec3<f32>, array_index: i32, level: i32) -> f32
fn textureSampleLevel(texture: texture_external, sampler: sampler, coords: vec2<f32>) -> vec4<f32>
@@ -547,14 +611,248 @@ fn textureLoad(texture: texture_depth_2d_array, coords: vec2<i32>, array_index:
fn textureLoad(texture: texture_depth_multisampled_2d, coords: vec2<i32>, sample_index: i32) -> f32
fn textureLoad(texture: texture_external, coords: vec2<i32>) -> vec4<f32>
-[[stage("fragment", "compute")]] fn atomicLoad<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>) -> T
-[[stage("fragment", "compute")]] fn atomicStore<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T)
-[[stage("fragment", "compute")]] fn atomicAdd<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicSub<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicMax<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicMin<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicAnd<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicOr<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicXor<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicExchange<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
-[[stage("fragment", "compute")]] fn atomicCompareExchangeWeak<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T, T) -> vec2<T>
+@stage("fragment", "compute") fn atomicLoad<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>) -> T
+@stage("fragment", "compute") fn atomicStore<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T)
+@stage("fragment", "compute") fn atomicAdd<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicSub<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicMax<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicMin<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicAnd<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicOr<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicXor<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicExchange<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T
+@stage("fragment", "compute") fn atomicCompareExchangeWeak<T: iu32, S: workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T, T) -> __atomic_compare_exchange_result<T>
+
+////////////////////////////////////////////////////////////////////////////////
+// Type constructors //
+////////////////////////////////////////////////////////////////////////////////
+
+// Zero value constructors
+ctor i32() -> i32
+ctor u32() -> u32
+ctor f32() -> f32
+ctor bool() -> bool
+ctor vec2<T: scalar>() -> vec2<T>
+ctor vec3<T: scalar>() -> vec3<T>
+ctor vec4<T: scalar>() -> vec4<T>
+ctor mat2x2() -> mat2x2<f32>
+ctor mat2x3() -> mat2x3<f32>
+ctor mat2x4() -> mat2x4<f32>
+ctor mat3x2() -> mat3x2<f32>
+ctor mat3x3() -> mat3x3<f32>
+ctor mat3x4() -> mat3x4<f32>
+ctor mat4x2() -> mat4x2<f32>
+ctor mat4x3() -> mat4x3<f32>
+ctor mat4x4() -> mat4x4<f32>
+
+// Identity constructors
+ctor i32(i32) -> i32
+ctor u32(u32) -> u32
+ctor f32(f32) -> f32
+ctor bool(bool) -> bool
+ctor vec2<T: scalar>(vec2<T>) -> vec2<T>
+ctor vec3<T: scalar>(vec3<T>) -> vec3<T>
+ctor vec4<T: scalar>(vec4<T>) -> vec4<T>
+ctor mat2x2<f32>(mat2x2<f32>) -> mat2x2<f32>
+ctor mat2x3<f32>(mat2x3<f32>) -> mat2x3<f32>
+ctor mat2x4<f32>(mat2x4<f32>) -> mat2x4<f32>
+ctor mat3x2<f32>(mat3x2<f32>) -> mat3x2<f32>
+ctor mat3x3<f32>(mat3x3<f32>) -> mat3x3<f32>
+ctor mat3x4<f32>(mat3x4<f32>) -> mat3x4<f32>
+ctor mat4x2<f32>(mat4x2<f32>) -> mat4x2<f32>
+ctor mat4x3<f32>(mat4x3<f32>) -> mat4x3<f32>
+ctor mat4x4<f32>(mat4x4<f32>) -> mat4x4<f32>
+
+// Vector constructors
+ctor vec2<T: abstract_or_scalar>(T) -> vec2<T>
+ctor vec2<T: abstract_or_scalar>(x: T, y: T) -> vec2<T>
+ctor vec3<T: abstract_or_scalar>(T) -> vec3<T>
+ctor vec3<T: abstract_or_scalar>(x: T, y: T, z: T) -> vec3<T>
+ctor vec3<T: abstract_or_scalar>(xy: vec2<T>, z: T) -> vec3<T>
+ctor vec3<T: abstract_or_scalar>(x: T, yz: vec2<T>) -> vec3<T>
+ctor vec4<T: abstract_or_scalar>(T) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(x: T, y: T, z: T, w: T) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(xy: vec2<T>, z: T, w: T) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(x: T, yz: vec2<T>, w: T) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(x: T, y: T, zw: vec2<T>) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(xy: vec2<T>, zw: vec2<T>) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(xyz: vec3<T>, w: T) -> vec4<T>
+ctor vec4<T: abstract_or_scalar>(x: T, zyw: vec3<T>) -> vec4<T>
+
+// Matrix constructors
+ctor mat2x2<T: af_f32>(T) -> mat2x2<T>
+ctor mat2x2<T: af_f32>(T, T,
+ T, T) -> mat2x2<T>
+ctor mat2x2<T: af_f32>(vec2<T>, vec2<T>) -> mat2x2<T>
+
+ctor mat2x3<T: af_f32>(T) -> mat2x3<T>
+ctor mat2x3<T: af_f32>(T, T, T,
+ T, T, T) -> mat2x3<T>
+ctor mat2x3<T: af_f32>(vec3<T>, vec3<T>) -> mat2x3<T>
+
+ctor mat2x4<T: af_f32>(T) -> mat2x4<T>
+ctor mat2x4<T: af_f32>(T, T, T, T,
+ T, T, T, T) -> mat2x4<T>
+ctor mat2x4<T: af_f32>(vec4<T>, vec4<T>) -> mat2x4<T>
+
+ctor mat3x2<T: af_f32>(T) -> mat3x2<T>
+ctor mat3x2<T: af_f32>(T, T,
+ T, T,
+ T, T) -> mat3x2<T>
+ctor mat3x2<T: af_f32>(vec2<T>, vec2<T>, vec2<T>) -> mat3x2<T>
+
+ctor mat3x3<T: af_f32>(T) -> mat3x3<T>
+ctor mat3x3<T: af_f32>(T, T, T,
+ T, T, T,
+ T, T, T) -> mat3x3<T>
+ctor mat3x3<T: af_f32>(vec3<T>, vec3<T>, vec3<T>) -> mat3x3<T>
+
+ctor mat3x4<T: af_f32>(T) -> mat3x4<T>
+ctor mat3x4<T: af_f32>(T, T, T, T,
+ T, T, T, T,
+ T, T, T, T) -> mat3x4<T>
+ctor mat3x4<T: af_f32>(vec4<T>, vec4<T>, vec4<T>) -> mat3x4<T>
+
+ctor mat4x2<T: af_f32>(T) -> mat4x2<T>
+ctor mat4x2<T: af_f32>(T, T,
+ T, T,
+ T, T,
+ T, T) -> mat4x2<T>
+ctor mat4x2<T: af_f32>(vec2<T>, vec2<T>, vec2<T>, vec2<T>) -> mat4x2<T>
+
+ctor mat4x3<T: af_f32>(T) -> mat4x3<T>
+ctor mat4x3<T: af_f32>(T, T, T,
+ T, T, T,
+ T, T, T,
+ T, T, T) -> mat4x3<T>
+ctor mat4x3<T: af_f32>(vec3<T>, vec3<T>, vec3<T>, vec3<T>) -> mat4x3<T>
+
+ctor mat4x4<T: af_f32>(T) -> mat4x4<T>
+ctor mat4x4<T: af_f32>(T, T, T, T,
+ T, T, T, T,
+ T, T, T, T,
+ T, T, T, T) -> mat4x4<T>
+ctor mat4x4<T: af_f32>(vec4<T>, vec4<T>, vec4<T>, vec4<T>) -> mat4x4<T>
+
+////////////////////////////////////////////////////////////////////////////////
+// Type conversions //
+////////////////////////////////////////////////////////////////////////////////
+conv f32<T: scalar_no_f32>(T) -> f32
+conv i32<T: scalar_no_i32>(T) -> i32
+conv u32<T: scalar_no_u32>(T) -> u32
+conv bool<T: scalar_no_bool>(T) -> bool
+
+conv vec2<T: f32, U: scalar_no_f32>(vec2<U>) -> vec2<f32>
+conv vec2<T: i32, U: scalar_no_i32>(vec2<U>) -> vec2<i32>
+conv vec2<T: u32, U: scalar_no_u32>(vec2<U>) -> vec2<u32>
+conv vec2<T: bool, U: scalar_no_bool>(vec2<U>) -> vec2<bool>
+
+conv vec3<T: f32, U: scalar_no_f32>(vec3<U>) -> vec3<f32>
+conv vec3<T: i32, U: scalar_no_i32>(vec3<U>) -> vec3<i32>
+conv vec3<T: u32, U: scalar_no_u32>(vec3<U>) -> vec3<u32>
+conv vec3<T: bool, U: scalar_no_bool>(vec3<U>) -> vec3<bool>
+
+conv vec4<T: f32, U: scalar_no_f32>(vec4<U>) -> vec4<f32>
+conv vec4<T: i32, U: scalar_no_i32>(vec4<U>) -> vec4<i32>
+conv vec4<T: u32, U: scalar_no_u32>(vec4<U>) -> vec4<u32>
+conv vec4<T: bool, U: scalar_no_bool>(vec4<U>) -> vec4<bool>
+
+////////////////////////////////////////////////////////////////////////////////
+// Operators //
+// //
+// The operator declarations below declare all the unary and binary operators //
+// supported by the WGSL language (with exception for address-of and //
+// dereference unary operators). //
+// //
+// The syntax is almost identical to builtin functions, except we use 'op' //
+// instead of 'fn'. The resolving rules are identical to builtins, which is //
+// described in detail above. //
+// //
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+// Unary Operators //
+////////////////////////////////////////////////////////////////////////////////
+op ! (bool) -> bool
+op ! <N: num> (vec<N, bool>) -> vec<N, bool>
+
+op ~ <T: iu32>(T) -> T
+op ~ <T: iu32, N: num> (vec<N, T>) -> vec<N, T>
+
+op - <T: fi32>(T) -> T
+op - <T: fi32, N: num> (vec<N, T>) -> vec<N, T>
+
+////////////////////////////////////////////////////////////////////////////////
+// Binary Operators //
+////////////////////////////////////////////////////////////////////////////////
+op + <T: fiu32>(T, T) -> T
+op + <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+op + <T: fiu32, N: num> (vec<N, T>, T) -> vec<N, T>
+op + <T: fiu32, N: num> (T, vec<N, T>) -> vec<N, T>
+op + <N: num, M: num> (mat<N, M, f32>, mat<N, M, f32>) -> mat<N, M, f32>
+
+op - <T: fiu32>(T, T) -> T
+op - <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+op - <T: fiu32, N: num> (vec<N, T>, T) -> vec<N, T>
+op - <T: fiu32, N: num> (T, vec<N, T>) -> vec<N, T>
+op - <N: num, M: num> (mat<N, M, f32>, mat<N, M, f32>) -> mat<N, M, f32>
+
+op * <T: fiu32>(T, T) -> T
+op * <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+op * <T: fiu32, N: num> (vec<N, T>, T) -> vec<N, T>
+op * <T: fiu32, N: num> (T, vec<N, T>) -> vec<N, T>
+op * <N: num, M: num> (f32, mat<N, M, f32>) -> mat<N, M, f32>
+op * <N: num, M: num> (mat<N, M, f32>, f32) -> mat<N, M, f32>
+op * <C: num, R: num> (mat<C, R, f32>, vec<C, f32>) -> vec<R, f32>
+op * <C: num, R: num> (vec<R, f32>, mat<C, R, f32>) -> vec<C, f32>
+op * <K: num, C: num, R: num> (mat<K, R, f32>, mat<C, K, f32>) -> mat<C, R, f32>
+
+op / <T: fiu32>(T, T) -> T
+op / <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+op / <T: fiu32, N: num> (vec<N, T>, T) -> vec<N, T>
+op / <T: fiu32, N: num> (T, vec<N, T>) -> vec<N, T>
+
+op % <T: fiu32>(T, T) -> T
+op % <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+op % <T: fiu32, N: num> (vec<N, T>, T) -> vec<N, T>
+op % <T: fiu32, N: num> (T, vec<N, T>) -> vec<N, T>
+
+op ^ <T: iu32>(T, T) -> T
+op ^ <T: iu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+
+op & (bool, bool) -> bool
+op & <N: num> (vec<N, bool>, vec<N, bool>) -> vec<N, bool>
+op & <T: iu32>(T, T) -> T
+op & <T: iu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+
+op | (bool, bool) -> bool
+op | <N: num> (vec<N, bool>, vec<N, bool>) -> vec<N, bool>
+op | <T: iu32>(T, T) -> T
+op | <T: iu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, T>
+
+op && (bool, bool) -> bool
+op || (bool, bool) -> bool
+
+op == <T: scalar>(T, T) -> bool
+op == <T: scalar, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op != <T: scalar>(T, T) -> bool
+op != <T: scalar, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op < <T: fiu32>(T, T) -> bool
+op < <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op > <T: fiu32>(T, T) -> bool
+op > <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op <= <T: fiu32>(T, T) -> bool
+op <= <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op >= <T: fiu32>(T, T) -> bool
+op >= <T: fiu32, N: num> (vec<N, T>, vec<N, T>) -> vec<N, bool>
+
+op << <T: iu32>(T, u32) -> T
+op << <T: iu32, N: num> (vec<N, T>, vec<N, u32>) -> vec<N, T>
+
+op >> <T: iu32>(T, u32) -> T
+op >> <T: iu32, N: num> (vec<N, T>, vec<N, u32>) -> vec<N, T>
diff --git a/chromium/third_party/dawn/src/tint/number.cc b/chromium/third_party/dawn/src/tint/number.cc
new file mode 100644
index 00000000000..3ead5b041ca
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/number.cc
@@ -0,0 +1,57 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/number.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ostream>
+
+namespace tint {
+
+std::ostream& operator<<(std::ostream& out, ConversionFailure failure) {
+ switch (failure) {
+ case ConversionFailure::kExceedsPositiveLimit:
+ return out << "value exceeds positive limit for type";
+ case ConversionFailure::kExceedsNegativeLimit:
+ return out << "value exceeds negative limit for type";
+ }
+ return out << "<unknown>";
+}
+
+f16::type f16::Quantize(f16::type value) {
+ if (value > kHighest) {
+ return std::numeric_limits<f16::type>::infinity();
+ }
+ if (value < kLowest) {
+ return -std::numeric_limits<f16::type>::infinity();
+ }
+ // Below value must be within the finite range of a f16.
+ uint32_t u32;
+ memcpy(&u32, &value, 4);
+ if ((u32 & 0x7fffffffu) == 0) { // ~sign
+ return value; // +/- zero
+ }
+ if ((u32 & 0x7f800000) == 0x7f800000) { // exponent all 1's
+ return value; // inf or nan
+ }
+ // f32 bits : 1 sign, 8 exponent, 23 mantissa
+ // f16 bits : 1 sign, 5 exponent, 10 mantissa
+ // Mask the value to preserve the sign, exponent and most-significant 10 mantissa bits.
+ u32 = u32 & 0xffffe000u;
+ memcpy(&value, &u32, 4);
+ return value;
+}
+
+} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/number.h b/chromium/third_party/dawn/src/tint/number.h
new file mode 100644
index 00000000000..b4c5ca41779
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/number.h
@@ -0,0 +1,415 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_NUMBER_H_
+#define SRC_TINT_NUMBER_H_
+
+#include <stdint.h>
+#include <functional>
+#include <limits>
+#include <ostream>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <optional> // NOLINT(build/include_order))
+
+#include "src/tint/utils/compiler_macros.h"
+#include "src/tint/utils/result.h"
+
+// Forward declaration
+namespace tint {
+/// Number wraps a integer or floating point number, enforcing explicit casting.
+template <typename T>
+struct Number;
+} // namespace tint
+
+namespace tint::detail {
+/// An empty structure used as a unique template type for Number when
+/// specializing for the f16 type.
+struct NumberKindF16 {};
+
+/// Helper for obtaining the underlying type for a Number.
+template <typename T>
+struct NumberUnwrapper {
+ /// When T is not a Number, then type defined to be T.
+ using type = T;
+};
+
+/// NumberUnwrapper specialization for Number<T>.
+template <typename T>
+struct NumberUnwrapper<Number<T>> {
+ /// The Number's underlying type.
+ using type = typename Number<T>::type;
+};
+
+} // namespace tint::detail
+
+namespace tint {
+
+/// Evaluates to true iff T is a floating-point type or is NumberKindF16.
+template <typename T>
+constexpr bool IsFloatingPoint =
+ std::is_floating_point_v<T> || std::is_same_v<T, detail::NumberKindF16>;
+
+/// Evaluates to true iff T is an integer type.
+template <typename T>
+constexpr bool IsInteger = std::is_integral_v<T>;
+
+/// Evaluates to true iff T is an integer type, floating-point type or is NumberKindF16.
+template <typename T>
+constexpr bool IsNumeric = IsInteger<T> || IsFloatingPoint<T>;
+
+/// Number wraps a integer or floating point number, enforcing explicit casting.
+template <typename T>
+struct Number {
+ static_assert(IsNumeric<T>, "Number<T> constructed with non-numeric type");
+
+ /// type is the underlying type of the Number
+ using type = T;
+
+ /// Highest finite representable value of this type.
+ static constexpr type kHighest = std::numeric_limits<type>::max();
+
+ /// Lowest finite representable value of this type.
+ static constexpr type kLowest = std::numeric_limits<type>::lowest();
+
+ /// Smallest positive normal value of this type.
+ static constexpr type kSmallest =
+ std::is_integral_v<type> ? 0 : std::numeric_limits<type>::min();
+
+ /// Constructor. The value is zero-initialized.
+ Number() = default;
+
+ /// Constructor.
+ /// @param v the value to initialize this Number to
+ template <typename U>
+ explicit Number(U v) : value(static_cast<T>(v)) {}
+
+ /// Constructor.
+ /// @param v the value to initialize this Number to
+ template <typename U>
+ explicit Number(Number<U> v) : value(static_cast<T>(v.value)) {}
+
+ /// Conversion operator
+ /// @returns the value as T
+ operator T() const { return value; }
+
+ /// Negation operator
+ /// @returns the negative value of the number
+ Number operator-() const { return Number(-value); }
+
+ /// Assignment operator
+ /// @param v the new value
+ /// @returns this Number so calls can be chained
+ Number& operator=(T v) {
+ value = v;
+ return *this;
+ }
+
+ /// The number value
+ type value = {};
+};
+
+/// Resolves to the underlying type for a Number.
+template <typename T>
+using UnwrapNumber = typename detail::NumberUnwrapper<T>::type;
+
+/// Writes the number to the ostream.
+/// @param out the std::ostream to write to
+/// @param num the Number
+/// @return the std::ostream so calls can be chained
+template <typename T>
+inline std::ostream& operator<<(std::ostream& out, Number<T> num) {
+ return out << num.value;
+}
+
+/// Equality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly equal.
+template <typename A, typename B>
+bool operator==(Number<A> a, Number<B> b) {
+ using T = decltype(a.value + b.value);
+ return std::equal_to<T>()(static_cast<T>(a.value), static_cast<T>(b.value));
+}
+
+/// Inequality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly unequal.
+template <typename A, typename B>
+bool operator!=(Number<A> a, Number<B> b) {
+ return !(a == b);
+}
+
+/// Equality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly equal.
+template <typename A, typename B>
+std::enable_if_t<IsNumeric<B>, bool> operator==(Number<A> a, B b) {
+ return a == Number<B>(b);
+}
+
+/// Inequality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly unequal.
+template <typename A, typename B>
+std::enable_if_t<IsNumeric<B>, bool> operator!=(Number<A> a, B b) {
+ return !(a == b);
+}
+
+/// Equality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly equal.
+template <typename A, typename B>
+std::enable_if_t<IsNumeric<A>, bool> operator==(A a, Number<B> b) {
+ return Number<A>(a) == b;
+}
+
+/// Inequality operator.
+/// @param a the LHS number
+/// @param b the RHS number
+/// @returns true if the numbers `a` and `b` are exactly unequal.
+template <typename A, typename B>
+std::enable_if_t<IsNumeric<A>, bool> operator!=(A a, Number<B> b) {
+ return !(a == b);
+}
+
+/// The partial specification of Number for f16 type, storing the f16 value as float,
+/// and enforcing proper explicit casting.
+template <>
+struct Number<detail::NumberKindF16> {
+ /// C++ does not have a native float16 type, so we use a 32-bit float instead.
+ using type = float;
+
+ /// Highest finite representable value of this type.
+ static constexpr type kHighest = 65504.0f; // 2¹⁵ × (1 + 1023/1024)
+
+ /// Lowest finite representable value of this type.
+ static constexpr type kLowest = -65504.0f;
+
+ /// Smallest positive normal value of this type.
+ static constexpr type kSmallest = 0.00006103515625f; // 2⁻¹⁴
+
+ /// Constructor. The value is zero-initialized.
+ Number() = default;
+
+ /// Constructor.
+ /// @param v the value to initialize this Number to
+ template <typename U>
+ explicit Number(U v) : value(Quantize(static_cast<type>(v))) {}
+
+ /// Constructor.
+ /// @param v the value to initialize this Number to
+ template <typename U>
+ explicit Number(Number<U> v) : value(Quantize(static_cast<type>(v.value))) {}
+
+ /// Conversion operator
+ /// @returns the value as the internal representation type of F16
+ operator float() const { return value; }
+
+ /// Negation operator
+ /// @returns the negative value of the number
+ Number operator-() const { return Number<detail::NumberKindF16>(-value); }
+
+ /// Assignment operator with parameter as native floating point type
+ /// @param v the new value
+ /// @returns this Number so calls can be chained
+ Number& operator=(type v) {
+ value = Quantize(v);
+ return *this;
+ }
+
+ /// @param value the input float32 value
+ /// @returns the float32 value quantized to the smaller float16 value, through truncation of the
+ /// mantissa bits (no rounding). If the float32 value is too large (positive or negative) to be
+ /// represented by a float16 value, then the returned value will be positive or negative
+ /// infinity.
+ static type Quantize(type value);
+
+ /// The number value, stored as float
+ type value = {};
+};
+
+/// `AInt` is a type alias to `Number<int64_t>`.
+using AInt = Number<int64_t>;
+/// `AFloat` is a type alias to `Number<double>`.
+using AFloat = Number<double>;
+
+/// `i32` is a type alias to `Number<int32_t>`.
+using i32 = Number<int32_t>;
+/// `u32` is a type alias to `Number<uint32_t>`.
+using u32 = Number<uint32_t>;
+/// `f32` is a type alias to `Number<float>`
+using f32 = Number<float>;
+/// `f16` is a type alias to `Number<detail::NumberKindF16>`, which should be IEEE 754 binary16.
+/// However since C++ don't have native binary16 type, the value is stored as float.
+using f16 = Number<detail::NumberKindF16>;
+
+/// Enumerator of failure reasons when converting from one number to another.
+enum class ConversionFailure {
+ kExceedsPositiveLimit, // The value was too big (+'ve) to fit in the target type
+ kExceedsNegativeLimit, // The value was too big (-'ve) to fit in the target type
+};
+
+/// Writes the conversion failure message to the ostream.
+/// @param out the std::ostream to write to
+/// @param failure the ConversionFailure
+/// @return the std::ostream so calls can be chained
+std::ostream& operator<<(std::ostream& out, ConversionFailure failure);
+
+/// Converts a number from one type to another, checking that the value fits in the target type.
+/// @returns the resulting value of the conversion, or a failure reason.
+template <typename TO, typename FROM>
+utils::Result<TO, ConversionFailure> CheckedConvert(Number<FROM> num) {
+ using T = decltype(UnwrapNumber<TO>() + num.value);
+ const auto value = static_cast<T>(num.value);
+ if (value > static_cast<T>(TO::kHighest)) {
+ return ConversionFailure::kExceedsPositiveLimit;
+ }
+ if (value < static_cast<T>(TO::kLowest)) {
+ return ConversionFailure::kExceedsNegativeLimit;
+ }
+ return TO(value); // Success
+}
+
+/// Define 'TINT_HAS_OVERFLOW_BUILTINS' if the compiler provide overflow checking builtins.
+/// If the compiler does not support these builtins, then these are emulated with algorithms
+/// described in:
+/// https://wiki.sei.cmu.edu/confluence/display/c/INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow
+#if defined(__GNUC__) && __GNUC__ >= 5
+#define TINT_HAS_OVERFLOW_BUILTINS
+#elif defined(__clang__)
+#if __has_builtin(__builtin_add_overflow) && __has_builtin(__builtin_mul_overflow)
+#define TINT_HAS_OVERFLOW_BUILTINS
+#endif
+#endif
+
+/// @returns a + b, or an empty optional if the resulting value overflowed the AInt
+inline std::optional<AInt> CheckedAdd(AInt a, AInt b) {
+ int64_t result;
+#ifdef TINT_HAS_OVERFLOW_BUILTINS
+ if (__builtin_add_overflow(a.value, b.value, &result)) {
+ return {};
+ }
+#else // TINT_HAS_OVERFLOW_BUILTINS
+ if (a.value >= 0) {
+ if (AInt::kHighest - a.value < b.value) {
+ return {};
+ }
+ } else {
+ if (b.value < AInt::kLowest - a.value) {
+ return {};
+ }
+ }
+ result = a.value + b.value;
+#endif // TINT_HAS_OVERFLOW_BUILTINS
+ return AInt(result);
+}
+
+/// @returns a * b, or an empty optional if the resulting value overflowed the AInt
+inline std::optional<AInt> CheckedMul(AInt a, AInt b) {
+ int64_t result;
+#ifdef TINT_HAS_OVERFLOW_BUILTINS
+ if (__builtin_mul_overflow(a.value, b.value, &result)) {
+ return {};
+ }
+#else // TINT_HAS_OVERFLOW_BUILTINS
+ if (a > 0) {
+ if (b > 0) {
+ if (a > (AInt::kHighest / b)) {
+ return {};
+ }
+ } else {
+ if (b < (AInt::kLowest / a)) {
+ return {};
+ }
+ }
+ } else {
+ if (b > 0) {
+ if (a < (AInt::kLowest / b)) {
+ return {};
+ }
+ } else {
+ if ((a != 0) && (b < (AInt::kHighest / a))) {
+ return {};
+ }
+ }
+ }
+ result = a.value * b.value;
+#endif // TINT_HAS_OVERFLOW_BUILTINS
+ return AInt(result);
+}
+
+/// @returns a * b + c, or an empty optional if the value overflowed the AInt
+inline std::optional<AInt> CheckedMadd(AInt a, AInt b, AInt c) {
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80635
+ TINT_BEGIN_DISABLE_WARNING(MAYBE_UNINITIALIZED);
+
+ if (auto mul = CheckedMul(a, b)) {
+ return CheckedAdd(mul.value(), c);
+ }
+ return {};
+
+ TINT_END_DISABLE_WARNING(MAYBE_UNINITIALIZED);
+}
+
+} // namespace tint
+
+namespace tint::number_suffixes {
+
+/// Literal suffix for abstract integer literals
+inline AInt operator""_a(unsigned long long int value) { // NOLINT
+ return AInt(static_cast<int64_t>(value));
+}
+
+/// Literal suffix for abstract float literals
+inline AFloat operator""_a(long double value) { // NOLINT
+ return AFloat(static_cast<double>(value));
+}
+
+/// Literal suffix for i32 literals
+inline i32 operator""_i(unsigned long long int value) { // NOLINT
+ return i32(static_cast<int32_t>(value));
+}
+
+/// Literal suffix for u32 literals
+inline u32 operator""_u(unsigned long long int value) { // NOLINT
+ return u32(static_cast<uint32_t>(value));
+}
+
+/// Literal suffix for f32 literals
+inline f32 operator""_f(long double value) { // NOLINT
+ return f32(static_cast<double>(value));
+}
+
+/// Literal suffix for f32 literals
+inline f32 operator""_f(unsigned long long int value) { // NOLINT
+ return f32(static_cast<double>(value));
+}
+
+/// Literal suffix for f16 literals
+inline f16 operator""_h(long double value) { // NOLINT
+ return f16(static_cast<double>(value));
+}
+
+/// Literal suffix for f16 literals
+inline f16 operator""_h(unsigned long long int value) { // NOLINT
+ return f16(static_cast<double>(value));
+}
+
+} // namespace tint::number_suffixes
+
+#endif // SRC_TINT_NUMBER_H_
diff --git a/chromium/third_party/dawn/src/tint/number_test.cc b/chromium/third_party/dawn/src/tint/number_test.cc
new file mode 100644
index 00000000000..34b4d396243
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/number_test.cc
@@ -0,0 +1,308 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cmath>
+#include <tuple>
+#include <vector>
+
+#include "src/tint/program_builder.h"
+#include "src/tint/utils/compiler_macros.h"
+
+#include "gtest/gtest.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint {
+namespace {
+
+constexpr int64_t kHighestI32 = static_cast<int64_t>(std::numeric_limits<int32_t>::max());
+constexpr int64_t kHighestU32 = static_cast<int64_t>(std::numeric_limits<uint32_t>::max());
+constexpr int64_t kLowestI32 = static_cast<int64_t>(std::numeric_limits<int32_t>::min());
+constexpr int64_t kLowestU32 = static_cast<int64_t>(std::numeric_limits<uint32_t>::min());
+
+// Highest float32 value.
+constexpr double kHighestF32 = 0x1.fffffep+127;
+
+// Next ULP up from kHighestF32 for a float64.
+constexpr double kHighestF32NextULP = 0x1.fffffe0000001p+127;
+
+// Smallest positive normal float32 value.
+constexpr double kSmallestF32 = 0x1p-126;
+
+// Highest subnormal value for a float32.
+constexpr double kHighestF32Subnormal = 0x0.fffffep-126;
+
+// Highest float16 value.
+constexpr double kHighestF16 = 0x1.ffcp+15;
+
+// Next ULP up from kHighestF16 for a float64.
+constexpr double kHighestF16NextULP = 0x1.ffc0000000001p+15;
+
+// Smallest positive normal float16 value.
+constexpr double kSmallestF16 = 0x1p-14;
+
+// Highest subnormal value for a float32.
+constexpr double kHighestF16Subnormal = 0x0.ffcp-14;
+
+constexpr double kLowestF32 = -kHighestF32;
+constexpr double kLowestF32NextULP = -kHighestF32NextULP;
+constexpr double kLowestF16 = -kHighestF16;
+constexpr double kLowestF16NextULP = -kHighestF16NextULP;
+
+// MSVC (only in release builds) can grumble about some of the inlined numerical overflow /
+// underflow that's done in this file. We like to think we know what we're doing, so silence the
+// warning.
+TINT_BEGIN_DISABLE_WARNING(CONSTANT_OVERFLOW);
+
+TEST(NumberTest, CheckedConvertIdentity) {
+ EXPECT_EQ(CheckedConvert<AInt>(0_a), 0_a);
+ EXPECT_EQ(CheckedConvert<AFloat>(0_a), 0.0_a);
+ EXPECT_EQ(CheckedConvert<i32>(0_i), 0_i);
+ EXPECT_EQ(CheckedConvert<u32>(0_u), 0_u);
+ EXPECT_EQ(CheckedConvert<f32>(0_f), 0_f);
+ EXPECT_EQ(CheckedConvert<f16>(0_h), 0_h);
+
+ EXPECT_EQ(CheckedConvert<AInt>(1_a), 1_a);
+ EXPECT_EQ(CheckedConvert<AFloat>(1_a), 1.0_a);
+ EXPECT_EQ(CheckedConvert<i32>(1_i), 1_i);
+ EXPECT_EQ(CheckedConvert<u32>(1_u), 1_u);
+ EXPECT_EQ(CheckedConvert<f32>(1_f), 1_f);
+ EXPECT_EQ(CheckedConvert<f16>(1_h), 1_h);
+}
+
+TEST(NumberTest, CheckedConvertLargestValue) {
+ EXPECT_EQ(CheckedConvert<i32>(AInt(kHighestI32)), i32(kHighestI32));
+ EXPECT_EQ(CheckedConvert<u32>(AInt(kHighestU32)), u32(kHighestU32));
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kHighestF32)), f32(kHighestF32));
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kHighestF16)), f16(kHighestF16));
+}
+
+TEST(NumberTest, CheckedConvertLowestValue) {
+ EXPECT_EQ(CheckedConvert<i32>(AInt(kLowestI32)), i32(kLowestI32));
+ EXPECT_EQ(CheckedConvert<u32>(AInt(kLowestU32)), u32(kLowestU32));
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kLowestF32)), f32(kLowestF32));
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kLowestF16)), f16(kLowestF16));
+}
+
+TEST(NumberTest, CheckedConvertSmallestValue) {
+ EXPECT_EQ(CheckedConvert<i32>(AInt(0)), i32(0));
+ EXPECT_EQ(CheckedConvert<u32>(AInt(0)), u32(0));
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kSmallestF32)), f32(kSmallestF32));
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kSmallestF16)), f16(kSmallestF16));
+}
+
+TEST(NumberTest, CheckedConvertExceedsPositiveLimit) {
+ EXPECT_EQ(CheckedConvert<i32>(AInt(kHighestI32 + 1)), ConversionFailure::kExceedsPositiveLimit);
+ EXPECT_EQ(CheckedConvert<u32>(AInt(kHighestU32 + 1)), ConversionFailure::kExceedsPositiveLimit);
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kHighestF32NextULP)),
+ ConversionFailure::kExceedsPositiveLimit);
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kHighestF16NextULP)),
+ ConversionFailure::kExceedsPositiveLimit);
+}
+
+TEST(NumberTest, CheckedConvertExceedsNegativeLimit) {
+ EXPECT_EQ(CheckedConvert<i32>(AInt(kLowestI32 - 1)), ConversionFailure::kExceedsNegativeLimit);
+ EXPECT_EQ(CheckedConvert<u32>(AInt(kLowestU32 - 1)), ConversionFailure::kExceedsNegativeLimit);
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kLowestF32NextULP)),
+ ConversionFailure::kExceedsNegativeLimit);
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kLowestF16NextULP)),
+ ConversionFailure::kExceedsNegativeLimit);
+}
+
+TEST(NumberTest, CheckedConvertSubnormals) {
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(kHighestF32Subnormal)), f32(kHighestF32Subnormal));
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(kHighestF16Subnormal)), f16(kHighestF16Subnormal));
+ EXPECT_EQ(CheckedConvert<f32>(AFloat(-kHighestF32Subnormal)), f32(-kHighestF32Subnormal));
+ EXPECT_EQ(CheckedConvert<f16>(AFloat(-kHighestF16Subnormal)), f16(-kHighestF16Subnormal));
+}
+
+TEST(NumberTest, QuantizeF16) {
+ constexpr float nan = std::numeric_limits<float>::quiet_NaN();
+ constexpr float inf = std::numeric_limits<float>::infinity();
+
+ EXPECT_EQ(f16(0.0), 0.0f);
+ EXPECT_EQ(f16(1.0), 1.0f);
+ EXPECT_EQ(f16(0.00006106496), 0.000061035156f);
+ EXPECT_EQ(f16(1.0004883), 1.0f);
+ EXPECT_EQ(f16(-8196), -8192.f);
+ EXPECT_EQ(f16(65504.003), inf);
+ EXPECT_EQ(f16(-65504.003), -inf);
+ EXPECT_EQ(f16(inf), inf);
+ EXPECT_EQ(f16(-inf), -inf);
+ EXPECT_TRUE(std::isnan(f16(nan)));
+}
+
+using BinaryCheckedCase = std::tuple<std::optional<AInt>, AInt, AInt>;
+
+#undef OVERFLOW // corecrt_math.h :(
+#define OVERFLOW \
+ {}
+
+using CheckedAddTest = testing::TestWithParam<BinaryCheckedCase>;
+TEST_P(CheckedAddTest, Test) {
+ auto expect = std::get<0>(GetParam());
+ auto a = std::get<1>(GetParam());
+ auto b = std::get<2>(GetParam());
+ EXPECT_EQ(CheckedAdd(a, b), expect) << std::hex << "0x" << a << " * 0x" << b;
+ EXPECT_EQ(CheckedAdd(b, a), expect) << std::hex << "0x" << a << " * 0x" << b;
+}
+INSTANTIATE_TEST_SUITE_P(
+ CheckedAddTest,
+ CheckedAddTest,
+ testing::ValuesIn(std::vector<BinaryCheckedCase>{
+ {AInt(0), AInt(0), AInt(0)},
+ {AInt(1), AInt(1), AInt(0)},
+ {AInt(2), AInt(1), AInt(1)},
+ {AInt(0), AInt(-1), AInt(1)},
+ {AInt(3), AInt(2), AInt(1)},
+ {AInt(-1), AInt(-2), AInt(1)},
+ {AInt(0x300), AInt(0x100), AInt(0x200)},
+ {AInt(0x100), AInt(-0x100), AInt(0x200)},
+ {AInt(AInt::kHighest), AInt(1), AInt(AInt::kHighest - 1)},
+ {AInt(AInt::kLowest), AInt(-1), AInt(AInt::kLowest + 1)},
+ {AInt(AInt::kHighest), AInt(0x7fffffff00000000ll), AInt(0x00000000ffffffffll)},
+ {AInt(AInt::kHighest), AInt(AInt::kHighest), AInt(0)},
+ {AInt(AInt::kLowest), AInt(AInt::kLowest), AInt(0)},
+ {OVERFLOW, AInt(1), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(-1), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(2), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(-2), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(10000), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(-10000), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(AInt::kHighest), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(AInt::kLowest), AInt(AInt::kLowest)},
+ ////////////////////////////////////////////////////////////////////////
+ }));
+
+using CheckedMulTest = testing::TestWithParam<BinaryCheckedCase>;
+TEST_P(CheckedMulTest, Test) {
+ auto expect = std::get<0>(GetParam());
+ auto a = std::get<1>(GetParam());
+ auto b = std::get<2>(GetParam());
+ EXPECT_EQ(CheckedMul(a, b), expect) << std::hex << "0x" << a << " * 0x" << b;
+ EXPECT_EQ(CheckedMul(b, a), expect) << std::hex << "0x" << a << " * 0x" << b;
+}
+INSTANTIATE_TEST_SUITE_P(
+ CheckedMulTest,
+ CheckedMulTest,
+ testing::ValuesIn(std::vector<BinaryCheckedCase>{
+ {AInt(0), AInt(0), AInt(0)},
+ {AInt(0), AInt(1), AInt(0)},
+ {AInt(1), AInt(1), AInt(1)},
+ {AInt(-1), AInt(-1), AInt(1)},
+ {AInt(2), AInt(2), AInt(1)},
+ {AInt(-2), AInt(-2), AInt(1)},
+ {AInt(0x20000), AInt(0x100), AInt(0x200)},
+ {AInt(-0x20000), AInt(-0x100), AInt(0x200)},
+ {AInt(0x4000000000000000ll), AInt(0x80000000ll), AInt(0x80000000ll)},
+ {AInt(0x4000000000000000ll), AInt(-0x80000000ll), AInt(-0x80000000ll)},
+ {AInt(0x1000000000000000ll), AInt(0x40000000ll), AInt(0x40000000ll)},
+ {AInt(-0x1000000000000000ll), AInt(-0x40000000ll), AInt(0x40000000ll)},
+ {AInt(0x100000000000000ll), AInt(0x1000000), AInt(0x100000000ll)},
+ {AInt(0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(2)},
+ {AInt(-0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(-2)},
+ {AInt(-0x2000000000000000ll), AInt(-0x1000000000000000ll), AInt(2)},
+ {AInt(-0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(-2)},
+ {AInt(0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(4)},
+ {AInt(-0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(-4)},
+ {AInt(-0x4000000000000000ll), AInt(-0x1000000000000000ll), AInt(4)},
+ {AInt(-0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(-4)},
+ {AInt(-0x8000000000000000ll), AInt(0x1000000000000000ll), AInt(-8)},
+ {AInt(-0x8000000000000000ll), AInt(-0x1000000000000000ll), AInt(8)},
+ {AInt(0), AInt(AInt::kHighest), AInt(0)},
+ {AInt(0), AInt(AInt::kLowest), AInt(0)},
+ {OVERFLOW, AInt(0x1000000000000000ll), AInt(8)},
+ {OVERFLOW, AInt(-0x1000000000000000ll), AInt(-8)},
+ {OVERFLOW, AInt(0x800000000000000ll), AInt(0x10)},
+ {OVERFLOW, AInt(0x80000000ll), AInt(0x100000000ll)},
+ {OVERFLOW, AInt(AInt::kHighest), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(AInt::kHighest), AInt(AInt::kLowest)},
+ ////////////////////////////////////////////////////////////////////////
+ }));
+
+using TernaryCheckedCase = std::tuple<std::optional<AInt>, AInt, AInt, AInt>;
+
+using CheckedMaddTest = testing::TestWithParam<TernaryCheckedCase>;
+TEST_P(CheckedMaddTest, Test) {
+ auto expect = std::get<0>(GetParam());
+ auto a = std::get<1>(GetParam());
+ auto b = std::get<2>(GetParam());
+ auto c = std::get<3>(GetParam());
+ EXPECT_EQ(CheckedMadd(a, b, c), expect)
+ << std::hex << "0x" << a << " * 0x" << b << " + 0x" << c;
+ EXPECT_EQ(CheckedMadd(b, a, c), expect)
+ << std::hex << "0x" << a << " * 0x" << b << " + 0x" << c;
+}
+INSTANTIATE_TEST_SUITE_P(
+ CheckedMaddTest,
+ CheckedMaddTest,
+ testing::ValuesIn(std::vector<TernaryCheckedCase>{
+ {AInt(0), AInt(0), AInt(0), AInt(0)},
+ {AInt(0), AInt(1), AInt(0), AInt(0)},
+ {AInt(1), AInt(1), AInt(1), AInt(0)},
+ {AInt(2), AInt(1), AInt(1), AInt(1)},
+ {AInt(0), AInt(1), AInt(-1), AInt(1)},
+ {AInt(-1), AInt(1), AInt(-2), AInt(1)},
+ {AInt(-1), AInt(-1), AInt(1), AInt(0)},
+ {AInt(2), AInt(2), AInt(1), AInt(0)},
+ {AInt(-2), AInt(-2), AInt(1), AInt(0)},
+ {AInt(0), AInt(AInt::kHighest), AInt(0), AInt(0)},
+ {AInt(0), AInt(AInt::kLowest), AInt(0), AInt(0)},
+ {AInt(3), AInt(1), AInt(2), AInt(1)},
+ {AInt(0x300), AInt(1), AInt(0x100), AInt(0x200)},
+ {AInt(0x100), AInt(1), AInt(-0x100), AInt(0x200)},
+ {AInt(0x20000), AInt(0x100), AInt(0x200), AInt(0)},
+ {AInt(-0x20000), AInt(-0x100), AInt(0x200), AInt(0)},
+ {AInt(0x4000000000000000ll), AInt(0x80000000ll), AInt(0x80000000ll), AInt(0)},
+ {AInt(0x4000000000000000ll), AInt(-0x80000000ll), AInt(-0x80000000ll), AInt(0)},
+ {AInt(0x1000000000000000ll), AInt(0x40000000ll), AInt(0x40000000ll), AInt(0)},
+ {AInt(-0x1000000000000000ll), AInt(-0x40000000ll), AInt(0x40000000ll), AInt(0)},
+ {AInt(0x100000000000000ll), AInt(0x1000000), AInt(0x100000000ll), AInt(0)},
+ {AInt(0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(2), AInt(0)},
+ {AInt(-0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(-2), AInt(0)},
+ {AInt(-0x2000000000000000ll), AInt(-0x1000000000000000ll), AInt(2), AInt(0)},
+ {AInt(-0x2000000000000000ll), AInt(0x1000000000000000ll), AInt(-2), AInt(0)},
+ {AInt(0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(4), AInt(0)},
+ {AInt(-0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(-4), AInt(0)},
+ {AInt(-0x4000000000000000ll), AInt(-0x1000000000000000ll), AInt(4), AInt(0)},
+ {AInt(-0x4000000000000000ll), AInt(0x1000000000000000ll), AInt(-4), AInt(0)},
+ {AInt(-0x8000000000000000ll), AInt(0x1000000000000000ll), AInt(-8), AInt(0)},
+ {AInt(-0x8000000000000000ll), AInt(-0x1000000000000000ll), AInt(8), AInt(0)},
+ {AInt(AInt::kHighest), AInt(1), AInt(1), AInt(AInt::kHighest - 1)},
+ {AInt(AInt::kLowest), AInt(1), AInt(-1), AInt(AInt::kLowest + 1)},
+ {AInt(AInt::kHighest), AInt(1), AInt(0x7fffffff00000000ll), AInt(0x00000000ffffffffll)},
+ {AInt(AInt::kHighest), AInt(1), AInt(AInt::kHighest), AInt(0)},
+ {AInt(AInt::kLowest), AInt(1), AInt(AInt::kLowest), AInt(0)},
+ {OVERFLOW, AInt(0x1000000000000000ll), AInt(8), AInt(0)},
+ {OVERFLOW, AInt(-0x1000000000000000ll), AInt(-8), AInt(0)},
+ {OVERFLOW, AInt(0x800000000000000ll), AInt(0x10), AInt(0)},
+ {OVERFLOW, AInt(0x80000000ll), AInt(0x100000000ll), AInt(0)},
+ {OVERFLOW, AInt(AInt::kHighest), AInt(AInt::kHighest), AInt(0)},
+ {OVERFLOW, AInt(AInt::kHighest), AInt(AInt::kLowest), AInt(0)},
+ {OVERFLOW, AInt(1), AInt(1), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(1), AInt(-1), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(1), AInt(2), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(1), AInt(-2), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(1), AInt(10000), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(1), AInt(-10000), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(1), AInt(AInt::kHighest), AInt(AInt::kHighest)},
+ {OVERFLOW, AInt(1), AInt(AInt::kLowest), AInt(AInt::kLowest)},
+ {OVERFLOW, AInt(1), AInt(AInt::kHighest), AInt(1)},
+ {OVERFLOW, AInt(1), AInt(AInt::kLowest), AInt(-1)},
+ }));
+
+TINT_END_DISABLE_WARNING(CONSTANT_OVERFLOW);
+
+} // namespace
+} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program.cc b/chromium/third_party/dawn/src/tint/program.cc
index a6a6ab7a1e3..6722a09b0ee 100644
--- a/chromium/third_party/dawn/src/tint/program.cc
+++ b/chromium/third_party/dawn/src/tint/program.cc
@@ -24,7 +24,7 @@ namespace tint {
namespace {
std::string DefaultPrinter(const Program*) {
- return "<no program printer assigned>";
+ return "<no program printer assigned>";
}
} // namespace
@@ -43,89 +43,89 @@ Program::Program(Program&& program)
symbols_(std::move(program.symbols_)),
diagnostics_(std::move(program.diagnostics_)),
is_valid_(program.is_valid_) {
- program.AssertNotMoved();
- program.moved_ = true;
+ program.AssertNotMoved();
+ program.moved_ = true;
}
Program::Program(ProgramBuilder&& builder) {
- id_ = builder.ID();
+ id_ = builder.ID();
+
+ is_valid_ = builder.IsValid();
+ if (builder.ResolveOnBuild() && builder.IsValid()) {
+ resolver::Resolver resolver(&builder);
+ if (!resolver.Resolve()) {
+ is_valid_ = false;
+ }
+ }
- is_valid_ = builder.IsValid();
- if (builder.ResolveOnBuild() && builder.IsValid()) {
- resolver::Resolver resolver(&builder);
- if (!resolver.Resolve()) {
- is_valid_ = false;
+ // The above must be called *before* the calls to std::move() below
+ types_ = std::move(builder.Types());
+ ast_nodes_ = std::move(builder.ASTNodes());
+ sem_nodes_ = std::move(builder.SemNodes());
+ ast_ = &builder.AST(); // ast::Module is actually a heap allocation.
+ sem_ = std::move(builder.Sem());
+ symbols_ = std::move(builder.Symbols());
+ diagnostics_.add(std::move(builder.Diagnostics()));
+ builder.MarkAsMoved();
+
+ if (!is_valid_ && !diagnostics_.contains_errors()) {
+ // If the builder claims to be invalid, then we really should have an error
+ // message generated. If we find a situation where the program is not valid
+ // and there are no errors reported, add one here.
+ diagnostics_.add_error(diag::System::Program, "invalid program generated");
}
- }
-
- // The above must be called *before* the calls to std::move() below
- types_ = std::move(builder.Types());
- ast_nodes_ = std::move(builder.ASTNodes());
- sem_nodes_ = std::move(builder.SemNodes());
- ast_ = &builder.AST(); // ast::Module is actually a heap allocation.
- sem_ = std::move(builder.Sem());
- symbols_ = std::move(builder.Symbols());
- diagnostics_.add(std::move(builder.Diagnostics()));
- builder.MarkAsMoved();
-
- if (!is_valid_ && !diagnostics_.contains_errors()) {
- // If the builder claims to be invalid, then we really should have an error
- // message generated. If we find a situation where the program is not valid
- // and there are no errors reported, add one here.
- diagnostics_.add_error(diag::System::Program, "invalid program generated");
- }
}
Program::~Program() = default;
Program& Program::operator=(Program&& program) {
- program.AssertNotMoved();
- program.moved_ = true;
- moved_ = false;
- id_ = std::move(program.id_);
- types_ = std::move(program.types_);
- ast_nodes_ = std::move(program.ast_nodes_);
- sem_nodes_ = std::move(program.sem_nodes_);
- ast_ = std::move(program.ast_);
- sem_ = std::move(program.sem_);
- symbols_ = std::move(program.symbols_);
- diagnostics_ = std::move(program.diagnostics_);
- is_valid_ = program.is_valid_;
- return *this;
+ program.AssertNotMoved();
+ program.moved_ = true;
+ moved_ = false;
+ id_ = std::move(program.id_);
+ types_ = std::move(program.types_);
+ ast_nodes_ = std::move(program.ast_nodes_);
+ sem_nodes_ = std::move(program.sem_nodes_);
+ ast_ = std::move(program.ast_);
+ sem_ = std::move(program.sem_);
+ symbols_ = std::move(program.symbols_);
+ diagnostics_ = std::move(program.diagnostics_);
+ is_valid_ = program.is_valid_;
+ return *this;
}
Program Program::Clone() const {
- AssertNotMoved();
- return Program(CloneAsBuilder());
+ AssertNotMoved();
+ return Program(CloneAsBuilder());
}
ProgramBuilder Program::CloneAsBuilder() const {
- AssertNotMoved();
- ProgramBuilder out;
- CloneContext(&out, this).Clone();
- return out;
+ AssertNotMoved();
+ ProgramBuilder out;
+ CloneContext(&out, this).Clone();
+ return out;
}
bool Program::IsValid() const {
- AssertNotMoved();
- return is_valid_;
+ AssertNotMoved();
+ return is_valid_;
}
const sem::Type* Program::TypeOf(const ast::Expression* expr) const {
- auto* sem = Sem().Get(expr);
- return sem ? sem->Type() : nullptr;
+ auto* sem = Sem().Get(expr);
+ return sem ? sem->Type() : nullptr;
}
const sem::Type* Program::TypeOf(const ast::Type* type) const {
- return Sem().Get(type);
+ return Sem().Get(type);
}
const sem::Type* Program::TypeOf(const ast::TypeDecl* type_decl) const {
- return Sem().Get(type_decl);
+ return Sem().Get(type_decl);
}
void Program::AssertNotMoved() const {
- TINT_ASSERT(Program, !moved_);
+ TINT_ASSERT(Program, !moved_);
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program.h b/chromium/third_party/dawn/src/tint/program.h
index 37b462e55fa..3230e7e786c 100644
--- a/chromium/third_party/dawn/src/tint/program.h
+++ b/chromium/third_party/dawn/src/tint/program.h
@@ -36,142 +36,142 @@ namespace tint {
/// Program holds the AST, Type information and SymbolTable for a tint program.
class Program {
- public:
- /// ASTNodeAllocator is an alias to BlockAllocator<ast::Node>
- using ASTNodeAllocator = utils::BlockAllocator<ast::Node>;
-
- /// SemNodeAllocator is an alias to BlockAllocator<sem::Node>
- using SemNodeAllocator = utils::BlockAllocator<sem::Node>;
-
- /// Constructor
- Program();
-
- /// Move constructor
- /// @param rhs the Program to move
- Program(Program&& rhs);
-
- /// Move constructor from builder
- /// @param builder the builder used to construct the program
- explicit Program(ProgramBuilder&& builder);
-
- /// Destructor
- ~Program();
-
- /// Move assignment operator
- /// @param rhs the Program to move
- /// @return this Program
- Program& operator=(Program&& rhs);
-
- /// @returns the unique identifier for this program
- ProgramID ID() const { return id_; }
-
- /// @returns a reference to the program's types
- const sem::Manager& Types() const {
- AssertNotMoved();
- return types_;
- }
-
- /// @returns a reference to the program's AST nodes storage
- const ASTNodeAllocator& ASTNodes() const {
- AssertNotMoved();
- return ast_nodes_;
- }
-
- /// @returns a reference to the program's semantic nodes storage
- const SemNodeAllocator& SemNodes() const {
- AssertNotMoved();
- return sem_nodes_;
- }
-
- /// @returns a reference to the program's AST root Module
- const ast::Module& AST() const {
- AssertNotMoved();
- return *ast_;
- }
-
- /// @returns a reference to the program's semantic info
- const sem::Info& Sem() const {
- AssertNotMoved();
- return sem_;
- }
-
- /// @returns a reference to the program's SymbolTable
- const SymbolTable& Symbols() const {
- AssertNotMoved();
- return symbols_;
- }
-
- /// @returns a reference to the program's diagnostics
- const diag::List& Diagnostics() const {
- AssertNotMoved();
- return diagnostics_;
- }
-
- /// Performs a deep clone of this program.
- /// The returned Program will contain no pointers to objects owned by this
- /// Program, and so after calling, this Program can be safely destructed.
- /// @return a new Program copied from this Program
- Program Clone() const;
-
- /// Performs a deep clone of this Program's AST nodes, types and symbols into
- /// a new ProgramBuilder. Semantic nodes are not cloned, as these will be
- /// rebuilt when the ProgramBuilder builds its Program.
- /// The returned ProgramBuilder will contain no pointers to objects owned by
- /// this Program, and so after calling, this Program can be safely destructed.
- /// @return a new ProgramBuilder copied from this Program
- ProgramBuilder CloneAsBuilder() const;
-
- /// @returns true if the program has no error diagnostics and is not missing
- /// information
- bool IsValid() const;
-
- /// Helper for returning the resolved semantic type of the expression `expr`.
- /// @param expr the AST expression
- /// @return the resolved semantic type for the expression, or nullptr if the
- /// expression has no resolved type.
- const sem::Type* TypeOf(const ast::Expression* expr) const;
-
- /// Helper for returning the resolved semantic type of the AST type `type`.
- /// @param type the AST type
- /// @return the resolved semantic type for the type, or nullptr if the type
- /// has no resolved type.
- const sem::Type* TypeOf(const ast::Type* type) const;
-
- /// Helper for returning the resolved semantic type of the AST type
- /// declaration `type_decl`.
- /// @param type_decl the AST type declaration
- /// @return the resolved semantic type for the type declaration, or nullptr if
- /// the type declaration has no resolved type.
- const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const;
-
- /// A function that can be used to print a program
- using Printer = std::string (*)(const Program*);
-
- /// The Program printer used for testing and debugging.
- static Printer printer;
-
- private:
- Program(const Program&) = delete;
-
- /// Asserts that the program has not been moved.
- void AssertNotMoved() const;
-
- ProgramID id_;
- sem::Manager types_;
- ASTNodeAllocator ast_nodes_;
- SemNodeAllocator sem_nodes_;
- ast::Module* ast_ = nullptr;
- sem::Info sem_;
- SymbolTable symbols_{id_};
- diag::List diagnostics_;
- bool is_valid_ = false; // Not valid until it is built
- bool moved_ = false;
+ public:
+ /// ASTNodeAllocator is an alias to BlockAllocator<ast::Node>
+ using ASTNodeAllocator = utils::BlockAllocator<ast::Node>;
+
+ /// SemNodeAllocator is an alias to BlockAllocator<sem::Node>
+ using SemNodeAllocator = utils::BlockAllocator<sem::Node>;
+
+ /// Constructor
+ Program();
+
+ /// Move constructor
+ /// @param rhs the Program to move
+ Program(Program&& rhs);
+
+ /// Move constructor from builder
+ /// @param builder the builder used to construct the program
+ explicit Program(ProgramBuilder&& builder);
+
+ /// Destructor
+ ~Program();
+
+ /// Move assignment operator
+ /// @param rhs the Program to move
+ /// @return this Program
+ Program& operator=(Program&& rhs);
+
+ /// @returns the unique identifier for this program
+ ProgramID ID() const { return id_; }
+
+ /// @returns a reference to the program's types
+ const sem::Manager& Types() const {
+ AssertNotMoved();
+ return types_;
+ }
+
+ /// @returns a reference to the program's AST nodes storage
+ const ASTNodeAllocator& ASTNodes() const {
+ AssertNotMoved();
+ return ast_nodes_;
+ }
+
+ /// @returns a reference to the program's semantic nodes storage
+ const SemNodeAllocator& SemNodes() const {
+ AssertNotMoved();
+ return sem_nodes_;
+ }
+
+ /// @returns a reference to the program's AST root Module
+ const ast::Module& AST() const {
+ AssertNotMoved();
+ return *ast_;
+ }
+
+ /// @returns a reference to the program's semantic info
+ const sem::Info& Sem() const {
+ AssertNotMoved();
+ return sem_;
+ }
+
+ /// @returns a reference to the program's SymbolTable
+ const SymbolTable& Symbols() const {
+ AssertNotMoved();
+ return symbols_;
+ }
+
+ /// @returns a reference to the program's diagnostics
+ const diag::List& Diagnostics() const {
+ AssertNotMoved();
+ return diagnostics_;
+ }
+
+ /// Performs a deep clone of this program.
+ /// The returned Program will contain no pointers to objects owned by this
+ /// Program, and so after calling, this Program can be safely destructed.
+ /// @return a new Program copied from this Program
+ Program Clone() const;
+
+ /// Performs a deep clone of this Program's AST nodes, types and symbols into
+ /// a new ProgramBuilder. Semantic nodes are not cloned, as these will be
+ /// rebuilt when the ProgramBuilder builds its Program.
+ /// The returned ProgramBuilder will contain no pointers to objects owned by
+ /// this Program, and so after calling, this Program can be safely destructed.
+ /// @return a new ProgramBuilder copied from this Program
+ ProgramBuilder CloneAsBuilder() const;
+
+ /// @returns true if the program has no error diagnostics and is not missing
+ /// information
+ bool IsValid() const;
+
+ /// Helper for returning the resolved semantic type of the expression `expr`.
+ /// @param expr the AST expression
+ /// @return the resolved semantic type for the expression, or nullptr if the
+ /// expression has no resolved type.
+ const sem::Type* TypeOf(const ast::Expression* expr) const;
+
+ /// Helper for returning the resolved semantic type of the AST type `type`.
+ /// @param type the AST type
+ /// @return the resolved semantic type for the type, or nullptr if the type
+ /// has no resolved type.
+ const sem::Type* TypeOf(const ast::Type* type) const;
+
+ /// Helper for returning the resolved semantic type of the AST type
+ /// declaration `type_decl`.
+ /// @param type_decl the AST type declaration
+ /// @return the resolved semantic type for the type declaration, or nullptr if
+ /// the type declaration has no resolved type.
+ const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const;
+
+ /// A function that can be used to print a program
+ using Printer = std::string (*)(const Program*);
+
+ /// The Program printer used for testing and debugging.
+ static Printer printer;
+
+ private:
+ Program(const Program&) = delete;
+
+ /// Asserts that the program has not been moved.
+ void AssertNotMoved() const;
+
+ ProgramID id_;
+ sem::Manager types_;
+ ASTNodeAllocator ast_nodes_;
+ SemNodeAllocator sem_nodes_;
+ ast::Module* ast_ = nullptr;
+ sem::Info sem_;
+ SymbolTable symbols_{id_};
+ diag::List diagnostics_;
+ bool is_valid_ = false; // Not valid until it is built
+ bool moved_ = false;
};
/// @param program the Program
/// @returns the ProgramID of the Program
inline ProgramID ProgramIDOf(const Program* program) {
- return program->ID();
+ return program->ID();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program_builder.cc b/chromium/third_party/dawn/src/tint/program_builder.cc
index c2f58ecb8b7..cd05fd41b2d 100644
--- a/chromium/third_party/dawn/src/tint/program_builder.cc
+++ b/chromium/third_party/dawn/src/tint/program_builder.cc
@@ -22,13 +22,14 @@
#include "src/tint/sem/expression.h"
#include "src/tint/sem/variable.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint {
ProgramBuilder::VarOptionals::~VarOptionals() = default;
ProgramBuilder::ProgramBuilder()
- : id_(ProgramID::New()),
- ast_(ast_nodes_.Create<ast::Module>(id_, Source{})) {}
+ : id_(ProgramID::New()), ast_(ast_nodes_.Create<ast::Module>(id_, Source{})) {}
ProgramBuilder::ProgramBuilder(ProgramBuilder&& rhs)
: id_(std::move(rhs.id_)),
@@ -39,100 +40,95 @@ ProgramBuilder::ProgramBuilder(ProgramBuilder&& rhs)
sem_(std::move(rhs.sem_)),
symbols_(std::move(rhs.symbols_)),
diagnostics_(std::move(rhs.diagnostics_)) {
- rhs.MarkAsMoved();
+ rhs.MarkAsMoved();
}
ProgramBuilder::~ProgramBuilder() = default;
ProgramBuilder& ProgramBuilder::operator=(ProgramBuilder&& rhs) {
- rhs.MarkAsMoved();
- AssertNotMoved();
- id_ = std::move(rhs.id_);
- types_ = std::move(rhs.types_);
- ast_nodes_ = std::move(rhs.ast_nodes_);
- sem_nodes_ = std::move(rhs.sem_nodes_);
- ast_ = rhs.ast_;
- sem_ = std::move(rhs.sem_);
- symbols_ = std::move(rhs.symbols_);
- diagnostics_ = std::move(rhs.diagnostics_);
-
- return *this;
+ rhs.MarkAsMoved();
+ AssertNotMoved();
+ id_ = std::move(rhs.id_);
+ types_ = std::move(rhs.types_);
+ ast_nodes_ = std::move(rhs.ast_nodes_);
+ sem_nodes_ = std::move(rhs.sem_nodes_);
+ ast_ = rhs.ast_;
+ sem_ = std::move(rhs.sem_);
+ symbols_ = std::move(rhs.symbols_);
+ diagnostics_ = std::move(rhs.diagnostics_);
+
+ return *this;
}
ProgramBuilder ProgramBuilder::Wrap(const Program* program) {
- ProgramBuilder builder;
- builder.id_ = program->ID();
- builder.types_ = sem::Manager::Wrap(program->Types());
- builder.ast_ = builder.create<ast::Module>(
- program->AST().source, program->AST().GlobalDeclarations());
- builder.sem_ = sem::Info::Wrap(program->Sem());
- builder.symbols_ = program->Symbols();
- builder.diagnostics_ = program->Diagnostics();
- return builder;
+ ProgramBuilder builder;
+ builder.id_ = program->ID();
+ builder.types_ = sem::Manager::Wrap(program->Types());
+ builder.ast_ =
+ builder.create<ast::Module>(program->AST().source, program->AST().GlobalDeclarations());
+ builder.sem_ = sem::Info::Wrap(program->Sem());
+ builder.symbols_ = program->Symbols();
+ builder.diagnostics_ = program->Diagnostics();
+ return builder;
}
bool ProgramBuilder::IsValid() const {
- return !diagnostics_.contains_errors();
+ return !diagnostics_.contains_errors();
}
void ProgramBuilder::MarkAsMoved() {
- AssertNotMoved();
- moved_ = true;
+ AssertNotMoved();
+ moved_ = true;
}
void ProgramBuilder::AssertNotMoved() const {
- if (moved_) {
- TINT_ICE(ProgramBuilder, const_cast<ProgramBuilder*>(this)->diagnostics_)
- << "Attempting to use ProgramBuilder after it has been moved";
- }
+ if (moved_) {
+ TINT_ICE(ProgramBuilder, const_cast<ProgramBuilder*>(this)->diagnostics_)
+ << "Attempting to use ProgramBuilder after it has been moved";
+ }
}
const sem::Type* ProgramBuilder::TypeOf(const ast::Expression* expr) const {
- auto* sem = Sem().Get(expr);
- return sem ? sem->Type() : nullptr;
+ auto* sem = Sem().Get(expr);
+ return sem ? sem->Type() : nullptr;
}
const sem::Type* ProgramBuilder::TypeOf(const ast::Variable* var) const {
- auto* sem = Sem().Get(var);
- return sem ? sem->Type() : nullptr;
+ auto* sem = Sem().Get(var);
+ return sem ? sem->Type() : nullptr;
}
const sem::Type* ProgramBuilder::TypeOf(const ast::Type* type) const {
- return Sem().Get(type);
+ return Sem().Get(type);
}
const sem::Type* ProgramBuilder::TypeOf(const ast::TypeDecl* type_decl) const {
- return Sem().Get(type_decl);
+ return Sem().Get(type_decl);
}
-const ast::TypeName* ProgramBuilder::TypesBuilder::Of(
- const ast::TypeDecl* decl) const {
- return type_name(decl->name);
+const ast::TypeName* ProgramBuilder::TypesBuilder::Of(const ast::TypeDecl* decl) const {
+ return type_name(decl->name);
}
ProgramBuilder::TypesBuilder::TypesBuilder(ProgramBuilder* pb) : builder(pb) {}
-const ast::Statement* ProgramBuilder::WrapInStatement(
- const ast::Expression* expr) {
- // Create a temporary variable of inferred type from expr.
- return Decl(Const(symbols_.New(), nullptr, expr));
+const ast::Statement* ProgramBuilder::WrapInStatement(const ast::Expression* expr) {
+ // Create a temporary variable of inferred type from expr.
+ return Decl(Let(symbols_.New(), nullptr, expr));
}
-const ast::VariableDeclStatement* ProgramBuilder::WrapInStatement(
- const ast::Variable* v) {
- return create<ast::VariableDeclStatement>(v);
+const ast::VariableDeclStatement* ProgramBuilder::WrapInStatement(const ast::Variable* v) {
+ return create<ast::VariableDeclStatement>(v);
}
-const ast::Statement* ProgramBuilder::WrapInStatement(
- const ast::Statement* stmt) {
- return stmt;
+const ast::Statement* ProgramBuilder::WrapInStatement(const ast::Statement* stmt) {
+ return stmt;
}
-const ast::Function* ProgramBuilder::WrapInFunction(
- const ast::StatementList stmts) {
- return Func("test_function", {}, ty.void_(), std::move(stmts),
- {create<ast::StageAttribute>(ast::PipelineStage::kCompute),
- WorkgroupSize(1, 1, 1)});
+const ast::Function* ProgramBuilder::WrapInFunction(const ast::StatementList stmts) {
+ return Func(
+ "test_function", {}, ty.void_(), std::move(stmts),
+ {create<ast::StageAttribute>(ast::PipelineStage::kCompute), WorkgroupSize(1_i, 1_i, 1_i)});
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program_builder.h b/chromium/third_party/dawn/src/tint/program_builder.h
index 2f7449ebaac..5f47efb0165 100644
--- a/chromium/third_party/dawn/src/tint/program_builder.h
+++ b/chromium/third_party/dawn/src/tint/program_builder.h
@@ -38,7 +38,10 @@
#include "src/tint/ast/depth_texture.h"
#include "src/tint/ast/disable_validation_attribute.h"
#include "src/tint/ast/discard_statement.h"
+#include "src/tint/ast/enable.h"
+#include "src/tint/ast/extension.h"
#include "src/tint/ast/external_texture.h"
+#include "src/tint/ast/f16.h"
#include "src/tint/ast/f32.h"
#include "src/tint/ast/fallthrough_statement.h"
#include "src/tint/ast/float_literal_expression.h"
@@ -60,7 +63,6 @@
#include "src/tint/ast/return_statement.h"
#include "src/tint/ast/sampled_texture.h"
#include "src/tint/ast/sampler.h"
-#include "src/tint/ast/sint_literal_expression.h"
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/ast/storage_texture.h"
#include "src/tint/ast/stride_attribute.h"
@@ -70,29 +72,30 @@
#include "src/tint/ast/switch_statement.h"
#include "src/tint/ast/type_name.h"
#include "src/tint/ast/u32.h"
-#include "src/tint/ast/uint_literal_expression.h"
#include "src/tint/ast/unary_op_expression.h"
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/ast/vector.h"
#include "src/tint/ast/void.h"
#include "src/tint/ast/workgroup_attribute.h"
+#include "src/tint/number.h"
#include "src/tint/program.h"
#include "src/tint/program_id.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/bool_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/f32_type.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/matrix_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/pointer_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/bool.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/f16.h"
+#include "src/tint/sem/f32.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/matrix.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/pointer.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
-#include "src/tint/sem/u32_type.h"
-#include "src/tint/sem/vector_type.h"
-#include "src/tint/sem/void_type.h"
+#include "src/tint/sem/u32.h"
+#include "src/tint/sem/vector.h"
+#include "src/tint/sem/void.h"
#ifdef INCLUDE_TINT_TINT_H_
#error "internal tint header being #included from tint.h"
@@ -112,2707 +115,2648 @@ namespace tint {
/// To construct a Program, populate the builder and then `std::move` it to a
/// Program.
class ProgramBuilder {
- /// A helper used to disable overloads if the first type in `TYPES` is a
- /// Source. Used to avoid ambiguities in overloads that take a Source as the
- /// first parameter and those that perfectly-forward the first argument.
- template <typename... TYPES>
- using DisableIfSource = traits::EnableIfIsNotType<
- traits::Decay<traits::NthTypeOf<0, TYPES..., void>>,
- Source>;
-
- /// VarOptionals is a helper for accepting a number of optional, extra
- /// arguments for Var() and Global().
- struct VarOptionals {
- template <typename... ARGS>
- explicit VarOptionals(ARGS&&... args) {
- Apply(std::forward<ARGS>(args)...);
- }
- ~VarOptionals();
-
- ast::StorageClass storage = ast::StorageClass::kNone;
- ast::Access access = ast::Access::kUndefined;
- const ast::Expression* constructor = nullptr;
- ast::AttributeList attributes = {};
-
- private:
- void Set(ast::StorageClass sc) { storage = sc; }
- void Set(ast::Access ac) { access = ac; }
- void Set(const ast::Expression* c) { constructor = c; }
- void Set(const ast::AttributeList& l) { attributes = l; }
-
- template <typename FIRST, typename... ARGS>
- void Apply(FIRST&& first, ARGS&&... args) {
- Set(std::forward<FIRST>(first));
- Apply(std::forward<ARGS>(args)...);
- }
- void Apply() {}
- };
-
- public:
- /// ASTNodeAllocator is an alias to BlockAllocator<ast::Node>
- using ASTNodeAllocator = utils::BlockAllocator<ast::Node>;
-
- /// SemNodeAllocator is an alias to BlockAllocator<sem::Node>
- using SemNodeAllocator = utils::BlockAllocator<sem::Node>;
-
- /// `i32` is a type alias to `int`.
- /// Useful for passing to template methods such as `vec2<i32>()` to imitate
- /// WGSL syntax.
- /// Note: this is intentionally not aliased to uint32_t as we want integer
- /// literals passed to the builder to match WGSL's integer literal types.
- using i32 = decltype(1);
- /// `u32` is a type alias to `unsigned int`.
- /// Useful for passing to template methods such as `vec2<u32>()` to imitate
- /// WGSL syntax.
- /// Note: this is intentionally not aliased to uint32_t as we want integer
- /// literals passed to the builder to match WGSL's integer literal types.
- using u32 = decltype(1u);
- /// `f32` is a type alias to `float`
- /// Useful for passing to template methods such as `vec2<f32>()` to imitate
- /// WGSL syntax.
- using f32 = float;
-
- /// Constructor
- ProgramBuilder();
-
- /// Move constructor
- /// @param rhs the builder to move
- ProgramBuilder(ProgramBuilder&& rhs);
-
- /// Destructor
- virtual ~ProgramBuilder();
-
- /// Move assignment operator
- /// @param rhs the builder to move
- /// @return this builder
- ProgramBuilder& operator=(ProgramBuilder&& rhs);
-
- /// Wrap returns a new ProgramBuilder wrapping the Program `program` without
- /// making a deep clone of the Program contents.
- /// ProgramBuilder returned by Wrap() is intended to temporarily extend an
- /// existing immutable program.
- /// As the returned ProgramBuilder wraps `program`, `program` must not be
- /// destructed or assigned while using the returned ProgramBuilder.
- /// TODO(bclayton) - Evaluate whether there are safer alternatives to this
- /// function. See crbug.com/tint/460.
- /// @param program the immutable Program to wrap
- /// @return the ProgramBuilder that wraps `program`
- static ProgramBuilder Wrap(const Program* program);
-
- /// @returns the unique identifier for this program
- ProgramID ID() const { return id_; }
-
- /// @returns a reference to the program's types
- sem::Manager& Types() {
- AssertNotMoved();
- return types_;
- }
-
- /// @returns a reference to the program's types
- const sem::Manager& Types() const {
- AssertNotMoved();
- return types_;
- }
-
- /// @returns a reference to the program's AST nodes storage
- ASTNodeAllocator& ASTNodes() {
- AssertNotMoved();
- return ast_nodes_;
- }
-
- /// @returns a reference to the program's AST nodes storage
- const ASTNodeAllocator& ASTNodes() const {
- AssertNotMoved();
- return ast_nodes_;
- }
-
- /// @returns a reference to the program's semantic nodes storage
- SemNodeAllocator& SemNodes() {
- AssertNotMoved();
- return sem_nodes_;
- }
-
- /// @returns a reference to the program's semantic nodes storage
- const SemNodeAllocator& SemNodes() const {
- AssertNotMoved();
- return sem_nodes_;
- }
-
- /// @returns a reference to the program's AST root Module
- ast::Module& AST() {
- AssertNotMoved();
- return *ast_;
- }
-
- /// @returns a reference to the program's AST root Module
- const ast::Module& AST() const {
- AssertNotMoved();
- return *ast_;
- }
-
- /// @returns a reference to the program's semantic info
- sem::Info& Sem() {
- AssertNotMoved();
- return sem_;
- }
-
- /// @returns a reference to the program's semantic info
- const sem::Info& Sem() const {
- AssertNotMoved();
- return sem_;
- }
-
- /// @returns a reference to the program's SymbolTable
- SymbolTable& Symbols() {
- AssertNotMoved();
- return symbols_;
- }
-
- /// @returns a reference to the program's SymbolTable
- const SymbolTable& Symbols() const {
- AssertNotMoved();
- return symbols_;
- }
-
- /// @returns a reference to the program's diagnostics
- diag::List& Diagnostics() {
- AssertNotMoved();
- return diagnostics_;
- }
-
- /// @returns a reference to the program's diagnostics
- const diag::List& Diagnostics() const {
- AssertNotMoved();
- return diagnostics_;
- }
-
- /// Controls whether the Resolver will be run on the program when it is built.
- /// @param enable the new flag value (defaults to true)
- void SetResolveOnBuild(bool enable) { resolve_on_build_ = enable; }
-
- /// @return true if the Resolver will be run on the program when it is
- /// built.
- bool ResolveOnBuild() const { return resolve_on_build_; }
-
- /// @returns true if the program has no error diagnostics and is not missing
- /// information
- bool IsValid() const;
-
- /// Creates a new ast::Node owned by the ProgramBuilder. When the
- /// ProgramBuilder is destructed, the ast::Node will also be destructed.
- /// @param source the Source of the node
- /// @param args the arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename... ARGS>
- traits::EnableIfIsType<T, ast::Node>* create(const Source& source,
- ARGS&&... args) {
- AssertNotMoved();
- return ast_nodes_.Create<T>(id_, source, std::forward<ARGS>(args)...);
- }
-
- /// Creates a new ast::Node owned by the ProgramBuilder, injecting the current
- /// Source as set by the last call to SetSource() as the only argument to the
- /// constructor.
- /// When the ProgramBuilder is destructed, the ast::Node will also be
- /// destructed.
- /// @returns the node pointer
- template <typename T>
- traits::EnableIfIsType<T, ast::Node>* create() {
- AssertNotMoved();
- return ast_nodes_.Create<T>(id_, source_);
- }
-
- /// Creates a new ast::Node owned by the ProgramBuilder, injecting the current
- /// Source as set by the last call to SetSource() as the first argument to the
- /// constructor.
- /// When the ProgramBuilder is destructed, the ast::Node will also be
- /// destructed.
- /// @param arg0 the first arguments to pass to the type constructor
- /// @param args the remaining arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename ARG0, typename... ARGS>
- traits::EnableIf</* T is ast::Node and ARG0 is not Source */
- traits::IsTypeOrDerived<T, ast::Node> &&
- !traits::IsTypeOrDerived<ARG0, Source>,
- T>*
- create(ARG0&& arg0, ARGS&&... args) {
- AssertNotMoved();
- return ast_nodes_.Create<T>(id_, source_, std::forward<ARG0>(arg0),
- std::forward<ARGS>(args)...);
- }
-
- /// Creates a new sem::Node owned by the ProgramBuilder.
- /// When the ProgramBuilder is destructed, the sem::Node will also be
- /// destructed.
- /// @param args the arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename... ARGS>
- traits::EnableIf<traits::IsTypeOrDerived<T, sem::Node> &&
- !traits::IsTypeOrDerived<T, sem::Type>,
- T>*
- create(ARGS&&... args) {
- AssertNotMoved();
- return sem_nodes_.Create<T>(std::forward<ARGS>(args)...);
- }
-
- /// Creates a new sem::Type owned by the ProgramBuilder.
- /// When the ProgramBuilder is destructed, owned ProgramBuilder and the
- /// returned`Type` will also be destructed.
- /// Types are unique (de-aliased), and so calling create() for the same `T`
- /// and arguments will return the same pointer.
- /// @warning Use this method to acquire a type only if all of its type
- /// information is provided in the constructor arguments `args`.<br>
- /// If the type requires additional configuration after construction that
- /// affect its fundamental type, build the type with `std::make_unique`, make
- /// any necessary alterations and then call unique_type() instead.
- /// @param args the arguments to pass to the type constructor
- /// @returns the de-aliased type pointer
- template <typename T, typename... ARGS>
- traits::EnableIfIsType<T, sem::Type>* create(ARGS&&... args) {
- static_assert(std::is_base_of<sem::Type, T>::value,
- "T does not derive from sem::Type");
- AssertNotMoved();
- return types_.Get<T>(std::forward<ARGS>(args)...);
- }
-
- /// Marks this builder as moved, preventing any further use of the builder.
- void MarkAsMoved();
-
- //////////////////////////////////////////////////////////////////////////////
- // TypesBuilder
- //////////////////////////////////////////////////////////////////////////////
-
- /// TypesBuilder holds basic `tint` types and methods for constructing
- /// complex types.
- class TypesBuilder {
- public:
+ /// A helper used to disable overloads if the first type in `TYPES` is a
+ /// Source. Used to avoid ambiguities in overloads that take a Source as the
+ /// first parameter and those that perfectly-forward the first argument.
+ template <typename... TYPES>
+ using DisableIfSource =
+ traits::EnableIfIsNotType<traits::Decay<traits::NthTypeOf<0, TYPES..., void>>, Source>;
+
+ /// VarOptionals is a helper for accepting a number of optional, extra
+ /// arguments for Var() and Global().
+ struct VarOptionals {
+ template <typename... ARGS>
+ explicit VarOptionals(ARGS&&... args) {
+ Apply(std::forward<ARGS>(args)...);
+ }
+ ~VarOptionals();
+
+ ast::StorageClass storage = ast::StorageClass::kNone;
+ ast::Access access = ast::Access::kUndefined;
+ const ast::Expression* constructor = nullptr;
+ ast::AttributeList attributes = {};
+
+ private:
+ void Set(ast::StorageClass sc) { storage = sc; }
+ void Set(ast::Access ac) { access = ac; }
+ void Set(const ast::Expression* c) { constructor = c; }
+ void Set(const ast::AttributeList& l) { attributes = l; }
+
+ template <typename FIRST, typename... ARGS>
+ void Apply(FIRST&& first, ARGS&&... args) {
+ Set(std::forward<FIRST>(first));
+ Apply(std::forward<ARGS>(args)...);
+ }
+ void Apply() {}
+ };
+
+ public:
+ /// ASTNodeAllocator is an alias to BlockAllocator<ast::Node>
+ using ASTNodeAllocator = utils::BlockAllocator<ast::Node>;
+
+ /// SemNodeAllocator is an alias to BlockAllocator<sem::Node>
+ using SemNodeAllocator = utils::BlockAllocator<sem::Node>;
+
/// Constructor
- /// @param builder the program builder
- explicit TypesBuilder(ProgramBuilder* builder);
+ ProgramBuilder();
- /// @return the tint AST type for the C type `T`.
- template <typename T>
- const ast::Type* Of() const {
- return CToAST<T>::get(this);
- }
+ /// Move constructor
+ /// @param rhs the builder to move
+ ProgramBuilder(ProgramBuilder&& rhs);
- /// @returns a boolean type
- const ast::Bool* bool_() const { return builder->create<ast::Bool>(); }
+ /// Destructor
+ virtual ~ProgramBuilder();
- /// @param source the Source of the node
- /// @returns a boolean type
- const ast::Bool* bool_(const Source& source) const {
- return builder->create<ast::Bool>(source);
+ /// Move assignment operator
+ /// @param rhs the builder to move
+ /// @return this builder
+ ProgramBuilder& operator=(ProgramBuilder&& rhs);
+
+ /// Wrap returns a new ProgramBuilder wrapping the Program `program` without
+ /// making a deep clone of the Program contents.
+ /// ProgramBuilder returned by Wrap() is intended to temporarily extend an
+ /// existing immutable program.
+ /// As the returned ProgramBuilder wraps `program`, `program` must not be
+ /// destructed or assigned while using the returned ProgramBuilder.
+ /// TODO(bclayton) - Evaluate whether there are safer alternatives to this
+ /// function. See crbug.com/tint/460.
+ /// @param program the immutable Program to wrap
+ /// @return the ProgramBuilder that wraps `program`
+ static ProgramBuilder Wrap(const Program* program);
+
+ /// @returns the unique identifier for this program
+ ProgramID ID() const { return id_; }
+
+ /// @returns a reference to the program's types
+ sem::Manager& Types() {
+ AssertNotMoved();
+ return types_;
}
- /// @returns a f32 type
- const ast::F32* f32() const { return builder->create<ast::F32>(); }
+ /// @returns a reference to the program's types
+ const sem::Manager& Types() const {
+ AssertNotMoved();
+ return types_;
+ }
- /// @param source the Source of the node
- /// @returns a f32 type
- const ast::F32* f32(const Source& source) const {
- return builder->create<ast::F32>(source);
+ /// @returns a reference to the program's AST nodes storage
+ ASTNodeAllocator& ASTNodes() {
+ AssertNotMoved();
+ return ast_nodes_;
}
- /// @returns a i32 type
- const ast::I32* i32() const { return builder->create<ast::I32>(); }
+ /// @returns a reference to the program's AST nodes storage
+ const ASTNodeAllocator& ASTNodes() const {
+ AssertNotMoved();
+ return ast_nodes_;
+ }
- /// @param source the Source of the node
- /// @returns a i32 type
- const ast::I32* i32(const Source& source) const {
- return builder->create<ast::I32>(source);
+ /// @returns a reference to the program's semantic nodes storage
+ SemNodeAllocator& SemNodes() {
+ AssertNotMoved();
+ return sem_nodes_;
}
- /// @returns a u32 type
- const ast::U32* u32() const { return builder->create<ast::U32>(); }
+ /// @returns a reference to the program's semantic nodes storage
+ const SemNodeAllocator& SemNodes() const {
+ AssertNotMoved();
+ return sem_nodes_;
+ }
- /// @param source the Source of the node
- /// @returns a u32 type
- const ast::U32* u32(const Source& source) const {
- return builder->create<ast::U32>(source);
+ /// @returns a reference to the program's AST root Module
+ ast::Module& AST() {
+ AssertNotMoved();
+ return *ast_;
}
- /// @returns a void type
- const ast::Void* void_() const { return builder->create<ast::Void>(); }
+ /// @returns a reference to the program's AST root Module
+ const ast::Module& AST() const {
+ AssertNotMoved();
+ return *ast_;
+ }
- /// @param source the Source of the node
- /// @returns a void type
- const ast::Void* void_(const Source& source) const {
- return builder->create<ast::Void>(source);
+ /// @returns a reference to the program's semantic info
+ sem::Info& Sem() {
+ AssertNotMoved();
+ return sem_;
}
- /// @param type vector subtype
- /// @param n vector width in elements
- /// @return the tint AST type for a `n`-element vector of `type`.
- const ast::Vector* vec(const ast::Type* type, uint32_t n) const {
- return builder->create<ast::Vector>(type, n);
+ /// @returns a reference to the program's semantic info
+ const sem::Info& Sem() const {
+ AssertNotMoved();
+ return sem_;
}
- /// @param source the Source of the node
- /// @param type vector subtype
- /// @param n vector width in elements
- /// @return the tint AST type for a `n`-element vector of `type`.
- const ast::Vector* vec(const Source& source,
- const ast::Type* type,
- uint32_t n) const {
- return builder->create<ast::Vector>(source, type, n);
+ /// @returns a reference to the program's SymbolTable
+ SymbolTable& Symbols() {
+ AssertNotMoved();
+ return symbols_;
}
- /// @param type vector subtype
- /// @return the tint AST type for a 2-element vector of `type`.
- const ast::Vector* vec2(const ast::Type* type) const {
- return vec(type, 2u);
+ /// @returns a reference to the program's SymbolTable
+ const SymbolTable& Symbols() const {
+ AssertNotMoved();
+ return symbols_;
}
- /// @param type vector subtype
- /// @return the tint AST type for a 3-element vector of `type`.
- const ast::Vector* vec3(const ast::Type* type) const {
- return vec(type, 3u);
+ /// @returns a reference to the program's diagnostics
+ diag::List& Diagnostics() {
+ AssertNotMoved();
+ return diagnostics_;
}
- /// @param type vector subtype
- /// @return the tint AST type for a 4-element vector of `type`.
- const ast::Vector* vec4(const ast::Type* type) const {
- return vec(type, 4u);
+ /// @returns a reference to the program's diagnostics
+ const diag::List& Diagnostics() const {
+ AssertNotMoved();
+ return diagnostics_;
}
- /// @param n vector width in elements
- /// @return the tint AST type for a `n`-element vector of `type`.
+ /// Controls whether the Resolver will be run on the program when it is built.
+ /// @param enable the new flag value (defaults to true)
+ void SetResolveOnBuild(bool enable) { resolve_on_build_ = enable; }
+
+ /// @return true if the Resolver will be run on the program when it is
+ /// built.
+ bool ResolveOnBuild() const { return resolve_on_build_; }
+
+ /// @returns true if the program has no error diagnostics and is not missing
+ /// information
+ bool IsValid() const;
+
+ /// Creates a new ast::Node owned by the ProgramBuilder. When the
+ /// ProgramBuilder is destructed, the ast::Node will also be destructed.
+ /// @param source the Source of the node
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename... ARGS>
+ traits::EnableIfIsType<T, ast::Node>* create(const Source& source, ARGS&&... args) {
+ AssertNotMoved();
+ return ast_nodes_.Create<T>(id_, source, std::forward<ARGS>(args)...);
+ }
+
+ /// Creates a new ast::Node owned by the ProgramBuilder, injecting the current
+ /// Source as set by the last call to SetSource() as the only argument to the
+ /// constructor.
+ /// When the ProgramBuilder is destructed, the ast::Node will also be
+ /// destructed.
+ /// @returns the node pointer
template <typename T>
- const ast::Vector* vec(uint32_t n) const {
- return vec(Of<T>(), n);
+ traits::EnableIfIsType<T, ast::Node>* create() {
+ AssertNotMoved();
+ return ast_nodes_.Create<T>(id_, source_);
+ }
+
+ /// Creates a new ast::Node owned by the ProgramBuilder, injecting the current
+ /// Source as set by the last call to SetSource() as the first argument to the
+ /// constructor.
+ /// When the ProgramBuilder is destructed, the ast::Node will also be
+ /// destructed.
+ /// @param arg0 the first arguments to pass to the type constructor
+ /// @param args the remaining arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename ARG0, typename... ARGS>
+ traits::EnableIf</* T is ast::Node and ARG0 is not Source */
+ traits::IsTypeOrDerived<T, ast::Node> &&
+ !traits::IsTypeOrDerived<ARG0, Source>,
+ T>*
+ create(ARG0&& arg0, ARGS&&... args) {
+ AssertNotMoved();
+ return ast_nodes_.Create<T>(id_, source_, std::forward<ARG0>(arg0),
+ std::forward<ARGS>(args)...);
+ }
+
+ /// Creates a new sem::Node owned by the ProgramBuilder.
+ /// When the ProgramBuilder is destructed, the sem::Node will also be
+ /// destructed.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename... ARGS>
+ traits::EnableIf<traits::IsTypeOrDerived<T, sem::Node> &&
+ !traits::IsTypeOrDerived<T, sem::Type>,
+ T>*
+ create(ARGS&&... args) {
+ AssertNotMoved();
+ return sem_nodes_.Create<T>(std::forward<ARGS>(args)...);
+ }
+
+ /// Creates a new sem::Type owned by the ProgramBuilder.
+ /// When the ProgramBuilder is destructed, owned ProgramBuilder and the
+ /// returned`Type` will also be destructed.
+ /// Types are unique (de-aliased), and so calling create() for the same `T`
+ /// and arguments will return the same pointer.
+ /// @warning Use this method to acquire a type only if all of its type
+ /// information is provided in the constructor arguments `args`.<br>
+ /// If the type requires additional configuration after construction that
+ /// affect its fundamental type, build the type with `std::make_unique`, make
+ /// any necessary alterations and then call unique_type() instead.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the de-aliased type pointer
+ template <typename T, typename... ARGS>
+ traits::EnableIfIsType<T, sem::Type>* create(ARGS&&... args) {
+ static_assert(std::is_base_of<sem::Type, T>::value, "T does not derive from sem::Type");
+ AssertNotMoved();
+ return types_.Get<T>(std::forward<ARGS>(args)...);
+ }
+
+ /// Marks this builder as moved, preventing any further use of the builder.
+ void MarkAsMoved();
+
+ //////////////////////////////////////////////////////////////////////////////
+ // TypesBuilder
+ //////////////////////////////////////////////////////////////////////////////
+
+ /// TypesBuilder holds basic `tint` types and methods for constructing
+ /// complex types.
+ class TypesBuilder {
+ public:
+ /// Constructor
+ /// @param builder the program builder
+ explicit TypesBuilder(ProgramBuilder* builder);
+
+ /// @return the tint AST type for the C type `T`.
+ template <typename T>
+ const ast::Type* Of() const {
+ return CToAST<T>::get(this);
+ }
+
+ /// @returns a boolean type
+ const ast::Bool* bool_() const { return builder->create<ast::Bool>(); }
+
+ /// @param source the Source of the node
+ /// @returns a boolean type
+ const ast::Bool* bool_(const Source& source) const {
+ return builder->create<ast::Bool>(source);
+ }
+
+ /// @returns a f16 type
+ const ast::F16* f16() const { return builder->create<ast::F16>(); }
+
+ /// @param source the Source of the node
+ /// @returns a f16 type
+ const ast::F16* f16(const Source& source) const {
+ return builder->create<ast::F16>(source);
+ }
+
+ /// @returns a f32 type
+ const ast::F32* f32() const { return builder->create<ast::F32>(); }
+
+ /// @param source the Source of the node
+ /// @returns a f32 type
+ const ast::F32* f32(const Source& source) const {
+ return builder->create<ast::F32>(source);
+ }
+
+ /// @returns a i32 type
+ const ast::I32* i32() const { return builder->create<ast::I32>(); }
+
+ /// @param source the Source of the node
+ /// @returns a i32 type
+ const ast::I32* i32(const Source& source) const {
+ return builder->create<ast::I32>(source);
+ }
+
+ /// @returns a u32 type
+ const ast::U32* u32() const { return builder->create<ast::U32>(); }
+
+ /// @param source the Source of the node
+ /// @returns a u32 type
+ const ast::U32* u32(const Source& source) const {
+ return builder->create<ast::U32>(source);
+ }
+
+ /// @returns a void type
+ const ast::Void* void_() const { return builder->create<ast::Void>(); }
+
+ /// @param source the Source of the node
+ /// @returns a void type
+ const ast::Void* void_(const Source& source) const {
+ return builder->create<ast::Void>(source);
+ }
+
+ /// @param type vector subtype
+ /// @param n vector width in elements
+ /// @return the tint AST type for a `n`-element vector of `type`.
+ const ast::Vector* vec(const ast::Type* type, uint32_t n) const {
+ return builder->create<ast::Vector>(type, n);
+ }
+
+ /// @param source the Source of the node
+ /// @param type vector subtype
+ /// @param n vector width in elements
+ /// @return the tint AST type for a `n`-element vector of `type`.
+ const ast::Vector* vec(const Source& source, const ast::Type* type, uint32_t n) const {
+ return builder->create<ast::Vector>(source, type, n);
+ }
+
+ /// @param type vector subtype
+ /// @return the tint AST type for a 2-element vector of `type`.
+ const ast::Vector* vec2(const ast::Type* type) const { return vec(type, 2u); }
+
+ /// @param type vector subtype
+ /// @return the tint AST type for a 3-element vector of `type`.
+ const ast::Vector* vec3(const ast::Type* type) const { return vec(type, 3u); }
+
+ /// @param type vector subtype
+ /// @return the tint AST type for a 4-element vector of `type`.
+ const ast::Vector* vec4(const ast::Type* type) const { return vec(type, 4u); }
+
+ /// @param n vector width in elements
+ /// @return the tint AST type for a `n`-element vector of `type`.
+ template <typename T>
+ const ast::Vector* vec(uint32_t n) const {
+ return vec(Of<T>(), n);
+ }
+
+ /// @return the tint AST type for a 2-element vector of the C type `T`.
+ template <typename T>
+ const ast::Vector* vec2() const {
+ return vec2(Of<T>());
+ }
+
+ /// @return the tint AST type for a 3-element vector of the C type `T`.
+ template <typename T>
+ const ast::Vector* vec3() const {
+ return vec3(Of<T>());
+ }
+
+ /// @return the tint AST type for a 4-element vector of the C type `T`.
+ template <typename T>
+ const ast::Vector* vec4() const {
+ return vec4(Of<T>());
+ }
+
+ /// @param type matrix subtype
+ /// @param columns number of columns for the matrix
+ /// @param rows number of rows for the matrix
+ /// @return the tint AST type for a matrix of `type`
+ const ast::Matrix* mat(const ast::Type* type, uint32_t columns, uint32_t rows) const {
+ return builder->create<ast::Matrix>(type, rows, columns);
+ }
+
+ /// @param source the Source of the node
+ /// @param type matrix subtype
+ /// @param columns number of columns for the matrix
+ /// @param rows number of rows for the matrix
+ /// @return the tint AST type for a matrix of `type`
+ const ast::Matrix* mat(const Source& source,
+ const ast::Type* type,
+ uint32_t columns,
+ uint32_t rows) const {
+ return builder->create<ast::Matrix>(source, type, rows, columns);
+ }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 2x3 matrix of `type`.
+ const ast::Matrix* mat2x2(const ast::Type* type) const { return mat(type, 2u, 2u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 2x3 matrix of `type`.
+ const ast::Matrix* mat2x3(const ast::Type* type) const { return mat(type, 2u, 3u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 2x4 matrix of `type`.
+ const ast::Matrix* mat2x4(const ast::Type* type) const { return mat(type, 2u, 4u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 3x2 matrix of `type`.
+ const ast::Matrix* mat3x2(const ast::Type* type) const { return mat(type, 3u, 2u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 3x3 matrix of `type`.
+ const ast::Matrix* mat3x3(const ast::Type* type) const { return mat(type, 3u, 3u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 3x4 matrix of `type`.
+ const ast::Matrix* mat3x4(const ast::Type* type) const { return mat(type, 3u, 4u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 4x2 matrix of `type`.
+ const ast::Matrix* mat4x2(const ast::Type* type) const { return mat(type, 4u, 2u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 4x3 matrix of `type`.
+ const ast::Matrix* mat4x3(const ast::Type* type) const { return mat(type, 4u, 3u); }
+
+ /// @param type matrix subtype
+ /// @return the tint AST type for a 4x4 matrix of `type`.
+ const ast::Matrix* mat4x4(const ast::Type* type) const { return mat(type, 4u, 4u); }
+
+ /// @param columns number of columns for the matrix
+ /// @param rows number of rows for the matrix
+ /// @return the tint AST type for a matrix of `type`
+ template <typename T>
+ const ast::Matrix* mat(uint32_t columns, uint32_t rows) const {
+ return mat(Of<T>(), columns, rows);
+ }
+
+ /// @return the tint AST type for a 2x3 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat2x2() const {
+ return mat2x2(Of<T>());
+ }
+
+ /// @return the tint AST type for a 2x3 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat2x3() const {
+ return mat2x3(Of<T>());
+ }
+
+ /// @return the tint AST type for a 2x4 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat2x4() const {
+ return mat2x4(Of<T>());
+ }
+
+ /// @return the tint AST type for a 3x2 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat3x2() const {
+ return mat3x2(Of<T>());
+ }
+
+ /// @return the tint AST type for a 3x3 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat3x3() const {
+ return mat3x3(Of<T>());
+ }
+
+ /// @return the tint AST type for a 3x4 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat3x4() const {
+ return mat3x4(Of<T>());
+ }
+
+ /// @return the tint AST type for a 4x2 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat4x2() const {
+ return mat4x2(Of<T>());
+ }
+
+ /// @return the tint AST type for a 4x3 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat4x3() const {
+ return mat4x3(Of<T>());
+ }
+
+ /// @return the tint AST type for a 4x4 matrix of the C type `T`.
+ template <typename T>
+ const ast::Matrix* mat4x4() const {
+ return mat4x4(Of<T>());
+ }
+
+ /// @param subtype the array element type
+ /// @param n the array size. nullptr represents a runtime-array
+ /// @param attrs the optional attributes for the array
+ /// @return the tint AST type for a array of size `n` of type `T`
+ template <typename EXPR = ast::Expression*>
+ const ast::Array* array(const ast::Type* subtype,
+ EXPR&& n = nullptr,
+ ast::AttributeList attrs = {}) const {
+ return builder->create<ast::Array>(subtype, builder->Expr(std::forward<EXPR>(n)),
+ attrs);
+ }
+
+ /// @param source the Source of the node
+ /// @param subtype the array element type
+ /// @param n the array size. nullptr represents a runtime-array
+ /// @param attrs the optional attributes for the array
+ /// @return the tint AST type for a array of size `n` of type `T`
+ template <typename EXPR = ast::Expression*>
+ const ast::Array* array(const Source& source,
+ const ast::Type* subtype,
+ EXPR&& n = nullptr,
+ ast::AttributeList attrs = {}) const {
+ return builder->create<ast::Array>(source, subtype,
+ builder->Expr(std::forward<EXPR>(n)), attrs);
+ }
+
+ /// @param subtype the array element type
+ /// @param n the array size. nullptr represents a runtime-array
+ /// @param stride the array stride. 0 represents implicit stride
+ /// @return the tint AST type for a array of size `n` of type `T`
+ template <typename EXPR>
+ const ast::Array* array(const ast::Type* subtype, EXPR&& n, uint32_t stride) const {
+ ast::AttributeList attrs;
+ if (stride) {
+ attrs.emplace_back(builder->create<ast::StrideAttribute>(stride));
+ }
+ return array(subtype, std::forward<EXPR>(n), std::move(attrs));
+ }
+
+ /// @param source the Source of the node
+ /// @param subtype the array element type
+ /// @param n the array size. nullptr represents a runtime-array
+ /// @param stride the array stride. 0 represents implicit stride
+ /// @return the tint AST type for a array of size `n` of type `T`
+ template <typename EXPR>
+ const ast::Array* array(const Source& source,
+ const ast::Type* subtype,
+ EXPR&& n,
+ uint32_t stride) const {
+ ast::AttributeList attrs;
+ if (stride) {
+ attrs.emplace_back(builder->create<ast::StrideAttribute>(stride));
+ }
+ return array(source, subtype, std::forward<EXPR>(n), std::move(attrs));
+ }
+
+ /// @return the tint AST type for a runtime-sized array of type `T`
+ template <typename T>
+ const ast::Array* array() const {
+ return array(Of<T>(), nullptr);
+ }
+
+ /// @return the tint AST type for an array of size `N` of type `T`
+ template <typename T, int N>
+ const ast::Array* array() const {
+ return array(Of<T>(), builder->Expr(tint::u32(N)));
+ }
+
+ /// @param stride the array stride
+ /// @return the tint AST type for a runtime-sized array of type `T`
+ template <typename T>
+ const ast::Array* array(uint32_t stride) const {
+ return array(Of<T>(), nullptr, stride);
+ }
+
+ /// @param stride the array stride
+ /// @return the tint AST type for an array of size `N` of type `T`
+ template <typename T, int N>
+ const ast::Array* array(uint32_t stride) const {
+ return array(Of<T>(), builder->Expr(tint::u32(N)), stride);
+ }
+
+ /// Creates a type name
+ /// @param name the name
+ /// @returns the type name
+ template <typename NAME>
+ const ast::TypeName* type_name(NAME&& name) const {
+ return builder->create<ast::TypeName>(builder->Sym(std::forward<NAME>(name)));
+ }
+
+ /// Creates a type name
+ /// @param source the Source of the node
+ /// @param name the name
+ /// @returns the type name
+ template <typename NAME>
+ const ast::TypeName* type_name(const Source& source, NAME&& name) const {
+ return builder->create<ast::TypeName>(source, builder->Sym(std::forward<NAME>(name)));
+ }
+
+ /// Creates an alias type
+ /// @param name the alias name
+ /// @param type the alias type
+ /// @returns the alias pointer
+ template <typename NAME>
+ const ast::Alias* alias(NAME&& name, const ast::Type* type) const {
+ auto sym = builder->Sym(std::forward<NAME>(name));
+ return builder->create<ast::Alias>(sym, type);
+ }
+
+ /// Creates an alias type
+ /// @param source the Source of the node
+ /// @param name the alias name
+ /// @param type the alias type
+ /// @returns the alias pointer
+ template <typename NAME>
+ const ast::Alias* alias(const Source& source, NAME&& name, const ast::Type* type) const {
+ auto sym = builder->Sym(std::forward<NAME>(name));
+ return builder->create<ast::Alias>(source, sym, type);
+ }
+
+ /// @param type the type of the pointer
+ /// @param storage_class the storage class of the pointer
+ /// @param access the optional access control of the pointer
+ /// @return the pointer to `type` with the given ast::StorageClass
+ const ast::Pointer* pointer(const ast::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access = ast::Access::kUndefined) const {
+ return builder->create<ast::Pointer>(type, storage_class, access);
+ }
+
+ /// @param source the Source of the node
+ /// @param type the type of the pointer
+ /// @param storage_class the storage class of the pointer
+ /// @param access the optional access control of the pointer
+ /// @return the pointer to `type` with the given ast::StorageClass
+ const ast::Pointer* pointer(const Source& source,
+ const ast::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access = ast::Access::kUndefined) const {
+ return builder->create<ast::Pointer>(source, type, storage_class, access);
+ }
+
+ /// @param storage_class the storage class of the pointer
+ /// @param access the optional access control of the pointer
+ /// @return the pointer to type `T` with the given ast::StorageClass.
+ template <typename T>
+ const ast::Pointer* pointer(ast::StorageClass storage_class,
+ ast::Access access = ast::Access::kUndefined) const {
+ return pointer(Of<T>(), storage_class, access);
+ }
+
+ /// @param source the Source of the node
+ /// @param type the type of the atomic
+ /// @return the atomic to `type`
+ const ast::Atomic* atomic(const Source& source, const ast::Type* type) const {
+ return builder->create<ast::Atomic>(source, type);
+ }
+
+ /// @param type the type of the atomic
+ /// @return the atomic to `type`
+ const ast::Atomic* atomic(const ast::Type* type) const {
+ return builder->create<ast::Atomic>(type);
+ }
+
+ /// @return the atomic to type `T`
+ template <typename T>
+ const ast::Atomic* atomic() const {
+ return atomic(Of<T>());
+ }
+
+ /// @param kind the kind of sampler
+ /// @returns the sampler
+ const ast::Sampler* sampler(ast::SamplerKind kind) const {
+ return builder->create<ast::Sampler>(kind);
+ }
+
+ /// @param source the Source of the node
+ /// @param kind the kind of sampler
+ /// @returns the sampler
+ const ast::Sampler* sampler(const Source& source, ast::SamplerKind kind) const {
+ return builder->create<ast::Sampler>(source, kind);
+ }
+
+ /// @param dims the dimensionality of the texture
+ /// @returns the depth texture
+ const ast::DepthTexture* depth_texture(ast::TextureDimension dims) const {
+ return builder->create<ast::DepthTexture>(dims);
+ }
+
+ /// @param source the Source of the node
+ /// @param dims the dimensionality of the texture
+ /// @returns the depth texture
+ const ast::DepthTexture* depth_texture(const Source& source,
+ ast::TextureDimension dims) const {
+ return builder->create<ast::DepthTexture>(source, dims);
+ }
+
+ /// @param dims the dimensionality of the texture
+ /// @returns the multisampled depth texture
+ const ast::DepthMultisampledTexture* depth_multisampled_texture(
+ ast::TextureDimension dims) const {
+ return builder->create<ast::DepthMultisampledTexture>(dims);
+ }
+
+ /// @param source the Source of the node
+ /// @param dims the dimensionality of the texture
+ /// @returns the multisampled depth texture
+ const ast::DepthMultisampledTexture* depth_multisampled_texture(
+ const Source& source,
+ ast::TextureDimension dims) const {
+ return builder->create<ast::DepthMultisampledTexture>(source, dims);
+ }
+
+ /// @param dims the dimensionality of the texture
+ /// @param subtype the texture subtype.
+ /// @returns the sampled texture
+ const ast::SampledTexture* sampled_texture(ast::TextureDimension dims,
+ const ast::Type* subtype) const {
+ return builder->create<ast::SampledTexture>(dims, subtype);
+ }
+
+ /// @param source the Source of the node
+ /// @param dims the dimensionality of the texture
+ /// @param subtype the texture subtype.
+ /// @returns the sampled texture
+ const ast::SampledTexture* sampled_texture(const Source& source,
+ ast::TextureDimension dims,
+ const ast::Type* subtype) const {
+ return builder->create<ast::SampledTexture>(source, dims, subtype);
+ }
+
+ /// @param dims the dimensionality of the texture
+ /// @param subtype the texture subtype.
+ /// @returns the multisampled texture
+ const ast::MultisampledTexture* multisampled_texture(ast::TextureDimension dims,
+ const ast::Type* subtype) const {
+ return builder->create<ast::MultisampledTexture>(dims, subtype);
+ }
+
+ /// @param source the Source of the node
+ /// @param dims the dimensionality of the texture
+ /// @param subtype the texture subtype.
+ /// @returns the multisampled texture
+ const ast::MultisampledTexture* multisampled_texture(const Source& source,
+ ast::TextureDimension dims,
+ const ast::Type* subtype) const {
+ return builder->create<ast::MultisampledTexture>(source, dims, subtype);
+ }
+
+ /// @param dims the dimensionality of the texture
+ /// @param format the texel format of the texture
+ /// @param access the access control of the texture
+ /// @returns the storage texture
+ const ast::StorageTexture* storage_texture(ast::TextureDimension dims,
+ ast::TexelFormat format,
+ ast::Access access) const {
+ auto* subtype = ast::StorageTexture::SubtypeFor(format, *builder);
+ return builder->create<ast::StorageTexture>(dims, format, subtype, access);
+ }
+
+ /// @param source the Source of the node
+ /// @param dims the dimensionality of the texture
+ /// @param format the texel format of the texture
+ /// @param access the access control of the texture
+ /// @returns the storage texture
+ const ast::StorageTexture* storage_texture(const Source& source,
+ ast::TextureDimension dims,
+ ast::TexelFormat format,
+ ast::Access access) const {
+ auto* subtype = ast::StorageTexture::SubtypeFor(format, *builder);
+ return builder->create<ast::StorageTexture>(source, dims, format, subtype, access);
+ }
+
+ /// @returns the external texture
+ const ast::ExternalTexture* external_texture() const {
+ return builder->create<ast::ExternalTexture>();
+ }
+
+ /// @param source the Source of the node
+ /// @returns the external texture
+ const ast::ExternalTexture* external_texture(const Source& source) const {
+ return builder->create<ast::ExternalTexture>(source);
+ }
+
+ /// Constructs a TypeName for the type declaration.
+ /// @param type the type
+ /// @return either type or a pointer to a new ast::TypeName
+ const ast::TypeName* Of(const ast::TypeDecl* type) const;
+
+ /// The ProgramBuilder
+ ProgramBuilder* const builder;
+
+ private:
+ /// CToAST<T> is specialized for various `T` types and each specialization
+ /// contains a single static `get()` method for obtaining the corresponding
+ /// AST type for the C type `T`.
+ /// `get()` has the signature:
+ /// `static const ast::Type* get(Types* t)`
+ template <typename T>
+ struct CToAST {};
+ };
+
+ //////////////////////////////////////////////////////////////////////////////
+ // AST helper methods
+ //////////////////////////////////////////////////////////////////////////////
+
+ /// @return a new unnamed symbol
+ Symbol Sym() { return Symbols().New(); }
+
+ /// @param name the symbol string
+ /// @return a Symbol with the given name
+ Symbol Sym(const std::string& name) { return Symbols().Register(name); }
+
+ /// @param sym the symbol
+ /// @return `sym`
+ Symbol Sym(Symbol sym) { return sym; }
+
+ /// @param expr the expression
+ /// @return expr
+ template <typename T>
+ traits::EnableIfIsType<T, ast::Expression>* Expr(T* expr) {
+ return expr;
}
- /// @return the tint AST type for a 2-element vector of the C type `T`.
- template <typename T>
- const ast::Vector* vec2() const {
- return vec2(Of<T>());
+ /// Passthrough for nullptr
+ /// @return nullptr
+ const ast::IdentifierExpression* Expr(std::nullptr_t) { return nullptr; }
+
+ /// @param source the source information
+ /// @param symbol the identifier symbol
+ /// @return an ast::IdentifierExpression with the given symbol
+ const ast::IdentifierExpression* Expr(const Source& source, Symbol symbol) {
+ return create<ast::IdentifierExpression>(source, symbol);
}
- /// @return the tint AST type for a 3-element vector of the C type `T`.
- template <typename T>
- const ast::Vector* vec3() const {
- return vec3(Of<T>());
+ /// @param symbol the identifier symbol
+ /// @return an ast::IdentifierExpression with the given symbol
+ const ast::IdentifierExpression* Expr(Symbol symbol) {
+ return create<ast::IdentifierExpression>(symbol);
}
- /// @return the tint AST type for a 4-element vector of the C type `T`.
- template <typename T>
- const ast::Vector* vec4() const {
- return vec4(Of<T>());
+ /// @param source the source information
+ /// @param variable the AST variable
+ /// @return an ast::IdentifierExpression with the variable's symbol
+ const ast::IdentifierExpression* Expr(const Source& source, const ast::Variable* variable) {
+ return create<ast::IdentifierExpression>(source, variable->symbol);
}
- /// @param type matrix subtype
- /// @param columns number of columns for the matrix
- /// @param rows number of rows for the matrix
- /// @return the tint AST type for a matrix of `type`
- const ast::Matrix* mat(const ast::Type* type,
- uint32_t columns,
- uint32_t rows) const {
- return builder->create<ast::Matrix>(type, rows, columns);
+ /// @param variable the AST variable
+ /// @return an ast::IdentifierExpression with the variable's symbol
+ const ast::IdentifierExpression* Expr(const ast::Variable* variable) {
+ return create<ast::IdentifierExpression>(variable->symbol);
}
- /// @param source the Source of the node
- /// @param type matrix subtype
- /// @param columns number of columns for the matrix
- /// @param rows number of rows for the matrix
- /// @return the tint AST type for a matrix of `type`
- const ast::Matrix* mat(const Source& source,
- const ast::Type* type,
- uint32_t columns,
- uint32_t rows) const {
- return builder->create<ast::Matrix>(source, type, rows, columns);
+ /// @param source the source information
+ /// @param name the identifier name
+ /// @return an ast::IdentifierExpression with the given name
+ const ast::IdentifierExpression* Expr(const Source& source, const char* name) {
+ return create<ast::IdentifierExpression>(source, Symbols().Register(name));
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 2x3 matrix of `type`.
- const ast::Matrix* mat2x2(const ast::Type* type) const {
- return mat(type, 2u, 2u);
+ /// @param name the identifier name
+ /// @return an ast::IdentifierExpression with the given name
+ const ast::IdentifierExpression* Expr(const char* name) {
+ return create<ast::IdentifierExpression>(Symbols().Register(name));
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 2x3 matrix of `type`.
- const ast::Matrix* mat2x3(const ast::Type* type) const {
- return mat(type, 2u, 3u);
+ /// @param source the source information
+ /// @param name the identifier name
+ /// @return an ast::IdentifierExpression with the given name
+ const ast::IdentifierExpression* Expr(const Source& source, const std::string& name) {
+ return create<ast::IdentifierExpression>(source, Symbols().Register(name));
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 2x4 matrix of `type`.
- const ast::Matrix* mat2x4(const ast::Type* type) const {
- return mat(type, 2u, 4u);
+ /// @param name the identifier name
+ /// @return an ast::IdentifierExpression with the given name
+ const ast::IdentifierExpression* Expr(const std::string& name) {
+ return create<ast::IdentifierExpression>(Symbols().Register(name));
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 3x2 matrix of `type`.
- const ast::Matrix* mat3x2(const ast::Type* type) const {
- return mat(type, 3u, 2u);
+ /// @param source the source information
+ /// @param value the boolean value
+ /// @return a Scalar constructor for the given value
+ template <typename BOOL>
+ std::enable_if_t<std::is_same_v<BOOL, bool>, const ast::BoolLiteralExpression*> Expr(
+ const Source& source,
+ BOOL value) {
+ return create<ast::BoolLiteralExpression>(source, value);
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 3x3 matrix of `type`.
- const ast::Matrix* mat3x3(const ast::Type* type) const {
- return mat(type, 3u, 3u);
+ /// @param value the boolean value
+ /// @return a Scalar constructor for the given value
+ template <typename BOOL>
+ std::enable_if_t<std::is_same_v<BOOL, bool>, const ast::BoolLiteralExpression*> Expr(
+ BOOL value) {
+ return create<ast::BoolLiteralExpression>(value);
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 3x4 matrix of `type`.
- const ast::Matrix* mat3x4(const ast::Type* type) const {
- return mat(type, 3u, 4u);
+ /// @param source the source information
+ /// @param value the float value
+ /// @return a 'f'-suffixed FloatLiteralExpression for the f32 value
+ const ast::FloatLiteralExpression* Expr(const Source& source, f32 value) {
+ return create<ast::FloatLiteralExpression>(source, static_cast<double>(value.value),
+ ast::FloatLiteralExpression::Suffix::kF);
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 4x2 matrix of `type`.
- const ast::Matrix* mat4x2(const ast::Type* type) const {
- return mat(type, 4u, 2u);
+ /// @param value the float value
+ /// @return a 'f'-suffixed FloatLiteralExpression for the f32 value
+ const ast::FloatLiteralExpression* Expr(f32 value) {
+ return create<ast::FloatLiteralExpression>(static_cast<double>(value.value),
+ ast::FloatLiteralExpression::Suffix::kF);
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 4x3 matrix of `type`.
- const ast::Matrix* mat4x3(const ast::Type* type) const {
- return mat(type, 4u, 3u);
+ /// @param source the source information
+ /// @param value the float value
+ /// @return a 'h'-suffixed FloatLiteralExpression for the f16 value
+ const ast::FloatLiteralExpression* Expr(const Source& source, f16 value) {
+ return create<ast::FloatLiteralExpression>(source, static_cast<double>(value.value),
+ ast::FloatLiteralExpression::Suffix::kH);
}
- /// @param type matrix subtype
- /// @return the tint AST type for a 4x4 matrix of `type`.
- const ast::Matrix* mat4x4(const ast::Type* type) const {
- return mat(type, 4u, 4u);
+ /// @param value the float value
+ /// @return a 'h'-suffixed FloatLiteralExpression for the f16 value
+ const ast::FloatLiteralExpression* Expr(f16 value) {
+ return create<ast::FloatLiteralExpression>(static_cast<double>(value.value),
+ ast::FloatLiteralExpression::Suffix::kH);
}
- /// @param columns number of columns for the matrix
- /// @param rows number of rows for the matrix
- /// @return the tint AST type for a matrix of `type`
- template <typename T>
- const ast::Matrix* mat(uint32_t columns, uint32_t rows) const {
- return mat(Of<T>(), columns, rows);
+ /// @param source the source information
+ /// @param value the integer value
+ /// @return an unsuffixed IntLiteralExpression for the AInt value
+ const ast::IntLiteralExpression* Expr(const Source& source, AInt value) {
+ return create<ast::IntLiteralExpression>(source, value,
+ ast::IntLiteralExpression::Suffix::kNone);
}
- /// @return the tint AST type for a 2x3 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat2x2() const {
- return mat2x2(Of<T>());
+ /// @param value the integer value
+ /// @return an unsuffixed IntLiteralExpression for the AInt value
+ const ast::IntLiteralExpression* Expr(AInt value) {
+ return create<ast::IntLiteralExpression>(value, ast::IntLiteralExpression::Suffix::kNone);
}
- /// @return the tint AST type for a 2x3 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat2x3() const {
- return mat2x3(Of<T>());
+ /// @param source the source information
+ /// @param value the integer value
+ /// @return an unsuffixed FloatLiteralExpression for the AFloat value
+ const ast::FloatLiteralExpression* Expr(const Source& source, AFloat value) {
+ return create<ast::FloatLiteralExpression>(source, value.value,
+ ast::FloatLiteralExpression::Suffix::kNone);
}
- /// @return the tint AST type for a 2x4 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat2x4() const {
- return mat2x4(Of<T>());
+ /// @param value the integer value
+ /// @return an unsuffixed FloatLiteralExpression for the AFloat value
+ const ast::FloatLiteralExpression* Expr(AFloat value) {
+ return create<ast::FloatLiteralExpression>(value.value,
+ ast::FloatLiteralExpression::Suffix::kNone);
}
- /// @return the tint AST type for a 3x2 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat3x2() const {
- return mat3x2(Of<T>());
+ /// @param source the source information
+ /// @param value the integer value
+ /// @return a signed 'i'-suffixed IntLiteralExpression for the i32 value
+ const ast::IntLiteralExpression* Expr(const Source& source, i32 value) {
+ return create<ast::IntLiteralExpression>(source, value,
+ ast::IntLiteralExpression::Suffix::kI);
}
- /// @return the tint AST type for a 3x3 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat3x3() const {
- return mat3x3(Of<T>());
+ /// @param value the integer value
+ /// @return a signed 'i'-suffixed IntLiteralExpression for the i32 value
+ const ast::IntLiteralExpression* Expr(i32 value) {
+ return create<ast::IntLiteralExpression>(value, ast::IntLiteralExpression::Suffix::kI);
}
- /// @return the tint AST type for a 3x4 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat3x4() const {
- return mat3x4(Of<T>());
+ /// @param source the source information
+ /// @param value the unsigned int value
+ /// @return an unsigned 'u'-suffixed IntLiteralExpression for the u32 value
+ const ast::IntLiteralExpression* Expr(const Source& source, u32 value) {
+ return create<ast::IntLiteralExpression>(source, value,
+ ast::IntLiteralExpression::Suffix::kU);
}
- /// @return the tint AST type for a 4x2 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat4x2() const {
- return mat4x2(Of<T>());
+ /// @param value the unsigned int value
+ /// @return an unsigned 'u'-suffixed IntLiteralExpression for the u32 value
+ const ast::IntLiteralExpression* Expr(u32 value) {
+ return create<ast::IntLiteralExpression>(value, ast::IntLiteralExpression::Suffix::kU);
}
- /// @return the tint AST type for a 4x3 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat4x3() const {
- return mat4x3(Of<T>());
+ /// Converts `arg` to an `ast::Expression` using `Expr()`, then appends it to
+ /// `list`.
+ /// @param list the list to append too
+ /// @param arg the arg to create
+ template <typename ARG>
+ void Append(ast::ExpressionList& list, ARG&& arg) {
+ list.emplace_back(Expr(std::forward<ARG>(arg)));
}
- /// @return the tint AST type for a 4x4 matrix of the C type `T`.
- template <typename T>
- const ast::Matrix* mat4x4() const {
- return mat4x4(Of<T>());
+ /// Converts `arg0` and `args` to `ast::Expression`s using `Expr()`,
+ /// then appends them to `list`.
+ /// @param list the list to append too
+ /// @param arg0 the first argument
+ /// @param args the rest of the arguments
+ template <typename ARG0, typename... ARGS>
+ void Append(ast::ExpressionList& list, ARG0&& arg0, ARGS&&... args) {
+ Append(list, std::forward<ARG0>(arg0));
+ Append(list, std::forward<ARGS>(args)...);
}
- /// @param subtype the array element type
- /// @param n the array size. nullptr represents a runtime-array
- /// @param attrs the optional attributes for the array
- /// @return the tint AST type for a array of size `n` of type `T`
- template <typename EXPR = ast::Expression*>
- const ast::Array* array(const ast::Type* subtype,
- EXPR&& n = nullptr,
- ast::AttributeList attrs = {}) const {
- return builder->create<ast::Array>(
- subtype, builder->Expr(std::forward<EXPR>(n)), attrs);
+ /// @return an empty list of expressions
+ ast::ExpressionList ExprList() { return {}; }
+
+ /// @param args the list of expressions
+ /// @return the list of expressions converted to `ast::Expression`s using
+ /// `Expr()`,
+ template <typename... ARGS>
+ ast::ExpressionList ExprList(ARGS&&... args) {
+ ast::ExpressionList list;
+ list.reserve(sizeof...(args));
+ Append(list, std::forward<ARGS>(args)...);
+ return list;
}
- /// @param source the Source of the node
- /// @param subtype the array element type
- /// @param n the array size. nullptr represents a runtime-array
- /// @param attrs the optional attributes for the array
- /// @return the tint AST type for a array of size `n` of type `T`
- template <typename EXPR = ast::Expression*>
- const ast::Array* array(const Source& source,
- const ast::Type* subtype,
- EXPR&& n = nullptr,
- ast::AttributeList attrs = {}) const {
- return builder->create<ast::Array>(
- source, subtype, builder->Expr(std::forward<EXPR>(n)), attrs);
+ /// @param list the list of expressions
+ /// @return `list`
+ ast::ExpressionList ExprList(ast::ExpressionList list) { return list; }
+
+ /// @param args the arguments for the type constructor
+ /// @return an `ast::CallExpression` of type `ty`, with the values
+ /// of `args` converted to `ast::Expression`s using `Expr()`
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* Construct(ARGS&&... args) {
+ return Construct(ty.Of<T>(), std::forward<ARGS>(args)...);
}
- /// @param subtype the array element type
- /// @param n the array size. nullptr represents a runtime-array
- /// @param stride the array stride. 0 represents implicit stride
- /// @return the tint AST type for a array of size `n` of type `T`
+ /// @param type the type to construct
+ /// @param args the arguments for the constructor
+ /// @return an `ast::CallExpression` of `type` constructed with the
+ /// values `args`.
+ template <typename... ARGS>
+ const ast::CallExpression* Construct(const ast::Type* type, ARGS&&... args) {
+ return Construct(source_, type, std::forward<ARGS>(args)...);
+ }
+
+ /// @param source the source information
+ /// @param type the type to construct
+ /// @param args the arguments for the constructor
+ /// @return an `ast::CallExpression` of `type` constructed with the
+ /// values `args`.
+ template <typename... ARGS>
+ const ast::CallExpression* Construct(const Source& source,
+ const ast::Type* type,
+ ARGS&&... args) {
+ return create<ast::CallExpression>(source, type, ExprList(std::forward<ARGS>(args)...));
+ }
+
+ /// @param expr the expression for the bitcast
+ /// @return an `ast::BitcastExpression` of type `ty`, with the values of
+ /// `expr` converted to `ast::Expression`s using `Expr()`
+ template <typename T, typename EXPR>
+ const ast::BitcastExpression* Bitcast(EXPR&& expr) {
+ return Bitcast(ty.Of<T>(), std::forward<EXPR>(expr));
+ }
+
+ /// @param type the type to cast to
+ /// @param expr the expression for the bitcast
+ /// @return an `ast::BitcastExpression` of `type` constructed with the values
+ /// `expr`.
template <typename EXPR>
- const ast::Array* array(const ast::Type* subtype,
- EXPR&& n,
- uint32_t stride) const {
- ast::AttributeList attrs;
- if (stride) {
- attrs.emplace_back(builder->create<ast::StrideAttribute>(stride));
- }
- return array(subtype, std::forward<EXPR>(n), std::move(attrs));
+ const ast::BitcastExpression* Bitcast(const ast::Type* type, EXPR&& expr) {
+ return create<ast::BitcastExpression>(type, Expr(std::forward<EXPR>(expr)));
}
- /// @param source the Source of the node
- /// @param subtype the array element type
- /// @param n the array size. nullptr represents a runtime-array
- /// @param stride the array stride. 0 represents implicit stride
- /// @return the tint AST type for a array of size `n` of type `T`
+ /// @param source the source information
+ /// @param type the type to cast to
+ /// @param expr the expression for the bitcast
+ /// @return an `ast::BitcastExpression` of `type` constructed with the values
+ /// `expr`.
template <typename EXPR>
- const ast::Array* array(const Source& source,
- const ast::Type* subtype,
- EXPR&& n,
- uint32_t stride) const {
- ast::AttributeList attrs;
- if (stride) {
- attrs.emplace_back(builder->create<ast::StrideAttribute>(stride));
- }
- return array(source, subtype, std::forward<EXPR>(n), std::move(attrs));
- }
-
- /// @return the tint AST type for a runtime-sized array of type `T`
- template <typename T>
- const ast::Array* array() const {
- return array(Of<T>(), nullptr);
+ const ast::BitcastExpression* Bitcast(const Source& source,
+ const ast::Type* type,
+ EXPR&& expr) {
+ return create<ast::BitcastExpression>(source, type, Expr(std::forward<EXPR>(expr)));
}
- /// @return the tint AST type for an array of size `N` of type `T`
- template <typename T, int N>
- const ast::Array* array() const {
- return array(Of<T>(), builder->Expr(N));
+ /// @param args the arguments for the vector constructor
+ /// @param type the vector type
+ /// @param size the vector size
+ /// @return an `ast::CallExpression` of a `size`-element vector of
+ /// type `type`, constructed with the values `args`.
+ template <typename... ARGS>
+ const ast::CallExpression* vec(const ast::Type* type, uint32_t size, ARGS&&... args) {
+ return Construct(ty.vec(type, size), std::forward<ARGS>(args)...);
}
- /// @param stride the array stride
- /// @return the tint AST type for a runtime-sized array of type `T`
- template <typename T>
- const ast::Array* array(uint32_t stride) const {
- return array(Of<T>(), nullptr, stride);
+ /// @param args the arguments for the vector constructor
+ /// @return an `ast::CallExpression` of a 2-element vector of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* vec2(ARGS&&... args) {
+ return Construct(ty.vec2<T>(), std::forward<ARGS>(args)...);
}
- /// @param stride the array stride
- /// @return the tint AST type for an array of size `N` of type `T`
- template <typename T, int N>
- const ast::Array* array(uint32_t stride) const {
- return array(Of<T>(), builder->Expr(N), stride);
+ /// @param args the arguments for the vector constructor
+ /// @return an `ast::CallExpression` of a 3-element vector of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* vec3(ARGS&&... args) {
+ return Construct(ty.vec3<T>(), std::forward<ARGS>(args)...);
}
- /// Creates a type name
- /// @param name the name
- /// @returns the type name
- template <typename NAME>
- const ast::TypeName* type_name(NAME&& name) const {
- return builder->create<ast::TypeName>(
- builder->Sym(std::forward<NAME>(name)));
+ /// @param args the arguments for the vector constructor
+ /// @return an `ast::CallExpression` of a 4-element vector of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* vec4(ARGS&&... args) {
+ return Construct(ty.vec4<T>(), std::forward<ARGS>(args)...);
}
- /// Creates a type name
- /// @param source the Source of the node
- /// @param name the name
- /// @returns the type name
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 2x2 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat2x2(ARGS&&... args) {
+ return Construct(ty.mat2x2<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 2x3 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat2x3(ARGS&&... args) {
+ return Construct(ty.mat2x3<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 2x4 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat2x4(ARGS&&... args) {
+ return Construct(ty.mat2x4<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 3x2 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat3x2(ARGS&&... args) {
+ return Construct(ty.mat3x2<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 3x3 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat3x3(ARGS&&... args) {
+ return Construct(ty.mat3x3<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 3x4 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat3x4(ARGS&&... args) {
+ return Construct(ty.mat3x4<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 4x2 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat4x2(ARGS&&... args) {
+ return Construct(ty.mat4x2<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 4x3 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat4x3(ARGS&&... args) {
+ return Construct(ty.mat4x3<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the matrix constructor
+ /// @return an `ast::CallExpression` of a 4x4 matrix of type
+ /// `T`, constructed with the values `args`.
+ template <typename T, typename... ARGS>
+ const ast::CallExpression* mat4x4(ARGS&&... args) {
+ return Construct(ty.mat4x4<T>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param args the arguments for the array constructor
+ /// @return an `ast::CallExpression` of an array with element type
+ /// `T` and size `N`, constructed with the values `args`.
+ template <typename T, int N, typename... ARGS>
+ const ast::CallExpression* array(ARGS&&... args) {
+ return Construct(ty.array<T, N>(), std::forward<ARGS>(args)...);
+ }
+
+ /// @param subtype the array element type
+ /// @param n the array size. nullptr represents a runtime-array.
+ /// @param args the arguments for the array constructor
+ /// @return an `ast::CallExpression` of an array with element type
+ /// `subtype`, constructed with the values `args`.
+ template <typename EXPR, typename... ARGS>
+ const ast::CallExpression* array(const ast::Type* subtype, EXPR&& n, ARGS&&... args) {
+ return Construct(ty.array(subtype, std::forward<EXPR>(n)), std::forward<ARGS>(args)...);
+ }
+
+ /// Adds the extension to the list of enable directives at the top of the module.
+ /// @param ext the extension to enable
+ /// @return an `ast::Enable` enabling the given extension.
+ const ast::Enable* Enable(ast::Extension ext) {
+ auto* enable = create<ast::Enable>(ext);
+ AST().AddEnable(enable);
+ return enable;
+ }
+
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param optional the optional variable settings.
+ /// Can be any of the following, in any order:
+ /// * ast::StorageClass - specifies the variable storage class
+ /// * ast::Access - specifies the variable's access control
+ /// * ast::Expression* - specifies the variable's initializer expression
+ /// * ast::AttributeList - specifies the variable's attributes
+ /// Note that repeated arguments of the same type will use the last argument's
+ /// value.
+ /// @returns a `ast::Variable` with the given name, type and additional
+ /// options
+ template <typename NAME, typename... OPTIONAL>
+ const ast::Variable* Var(NAME&& name, const ast::Type* type, OPTIONAL&&... optional) {
+ VarOptionals opts(std::forward<OPTIONAL>(optional)...);
+ return create<ast::Variable>(Sym(std::forward<NAME>(name)), opts.storage, opts.access, type,
+ false /* is_const */, false /* is_overridable */,
+ opts.constructor, std::move(opts.attributes));
+ }
+
+ /// @param source the variable source
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param optional the optional variable settings.
+ /// Can be any of the following, in any order:
+ /// * ast::StorageClass - specifies the variable storage class
+ /// * ast::Access - specifies the variable's access control
+ /// * ast::Expression* - specifies the variable's initializer expression
+ /// * ast::AttributeList - specifies the variable's attributes
+ /// Note that repeated arguments of the same type will use the last argument's
+ /// value.
+ /// @returns a `ast::Variable` with the given name, storage and type
+ template <typename NAME, typename... OPTIONAL>
+ const ast::Variable* Var(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ OPTIONAL&&... optional) {
+ VarOptionals opts(std::forward<OPTIONAL>(optional)...);
+ return create<ast::Variable>(source, Sym(std::forward<NAME>(name)), opts.storage,
+ opts.access, type, false /* is_const */,
+ false /* is_overridable */, opts.constructor,
+ std::move(opts.attributes));
+ }
+
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns an immutable `ast::Variable` with the given name and type
template <typename NAME>
- const ast::TypeName* type_name(const Source& source, NAME&& name) const {
- return builder->create<ast::TypeName>(
- source, builder->Sym(std::forward<NAME>(name)));
+ const ast::Variable* Let(NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ return create<ast::Variable>(Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
+ ast::Access::kUndefined, type, true /* is_const */,
+ false /* is_overridable */, constructor, attributes);
}
- /// Creates an alias type
- /// @param name the alias name
- /// @param type the alias type
- /// @returns the alias pointer
+ /// @param source the variable source
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns an immutable `ast::Variable` with the given name and type
template <typename NAME>
- const ast::Alias* alias(NAME&& name, const ast::Type* type) const {
- auto sym = builder->Sym(std::forward<NAME>(name));
- return builder->create<ast::Alias>(sym, type);
+ const ast::Variable* Let(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ return create<ast::Variable>(source, Sym(std::forward<NAME>(name)),
+ ast::StorageClass::kNone, ast::Access::kUndefined, type,
+ true /* is_const */, false /* is_overridable */, constructor,
+ attributes);
}
- /// Creates an alias type
- /// @param source the Source of the node
- /// @param name the alias name
- /// @param type the alias type
- /// @returns the alias pointer
+ /// @param name the parameter name
+ /// @param type the parameter type
+ /// @param attributes optional parameter attributes
+ /// @returns an immutable `ast::Variable` with the given name and type
+ template <typename NAME>
+ const ast::Variable* Param(NAME&& name,
+ const ast::Type* type,
+ ast::AttributeList attributes = {}) {
+ return create<ast::Variable>(Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
+ ast::Access::kUndefined, type, true /* is_const */,
+ false /* is_overridable */, nullptr, attributes);
+ }
+
+ /// @param source the parameter source
+ /// @param name the parameter name
+ /// @param type the parameter type
+ /// @param attributes optional parameter attributes
+ /// @returns an immutable `ast::Variable` with the given name and type
+ template <typename NAME>
+ const ast::Variable* Param(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ ast::AttributeList attributes = {}) {
+ return create<ast::Variable>(source, Sym(std::forward<NAME>(name)),
+ ast::StorageClass::kNone, ast::Access::kUndefined, type,
+ true /* is_const */, false /* is_overridable */, nullptr,
+ attributes);
+ }
+
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param optional the optional variable settings.
+ /// Can be any of the following, in any order:
+ /// * ast::StorageClass - specifies the variable storage class
+ /// * ast::Access - specifies the variable's access control
+ /// * ast::Expression* - specifies the variable's initializer expression
+ /// * ast::AttributeList - specifies the variable's attributes
+ /// Note that repeated arguments of the same type will use the last argument's
+ /// value.
+ /// @returns a new `ast::Variable`, which is automatically registered as a
+ /// global variable with the ast::Module.
+ template <typename NAME, typename... OPTIONAL, typename = DisableIfSource<NAME>>
+ const ast::Variable* Global(NAME&& name, const ast::Type* type, OPTIONAL&&... optional) {
+ auto* var = Var(std::forward<NAME>(name), type, std::forward<OPTIONAL>(optional)...);
+ AST().AddGlobalVariable(var);
+ return var;
+ }
+
+ /// @param source the variable source
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param optional the optional variable settings.
+ /// Can be any of the following, in any order:
+ /// * ast::StorageClass - specifies the variable storage class
+ /// * ast::Access - specifies the variable's access control
+ /// * ast::Expression* - specifies the variable's initializer expression
+ /// * ast::AttributeList - specifies the variable's attributes
+ /// Note that repeated arguments of the same type will use the last argument's
+ /// value.
+ /// @returns a new `ast::Variable`, which is automatically registered as a
+ /// global variable with the ast::Module.
+ template <typename NAME, typename... OPTIONAL>
+ const ast::Variable* Global(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ OPTIONAL&&... optional) {
+ auto* var =
+ Var(source, std::forward<NAME>(name), type, std::forward<OPTIONAL>(optional)...);
+ AST().AddGlobalVariable(var);
+ return var;
+ }
+
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns a const `ast::Variable` constructed by calling Var() with the
+ /// arguments of `args`, which is automatically registered as a global
+ /// variable with the ast::Module.
+ template <typename NAME>
+ const ast::Variable* GlobalConst(NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ auto* var = Let(std::forward<NAME>(name), type, constructor, std::move(attributes));
+ AST().AddGlobalVariable(var);
+ return var;
+ }
+
+ /// @param source the variable source
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns a const `ast::Variable` constructed by calling Var() with the
+ /// arguments of `args`, which is automatically registered as a global
+ /// variable with the ast::Module.
template <typename NAME>
- const ast::Alias* alias(const Source& source,
- NAME&& name,
- const ast::Type* type) const {
- auto sym = builder->Sym(std::forward<NAME>(name));
- return builder->create<ast::Alias>(source, sym, type);
+ const ast::Variable* GlobalConst(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ auto* var = Let(source, std::forward<NAME>(name), type, constructor, std::move(attributes));
+ AST().AddGlobalVariable(var);
+ return var;
+ }
+
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor optional constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns an overridable const `ast::Variable` which is automatically
+ /// registered as a global variable with the ast::Module.
+ template <typename NAME>
+ const ast::Variable* Override(NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ auto* var =
+ create<ast::Variable>(source_, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
+ ast::Access::kUndefined, type, true /* is_const */,
+ true /* is_overridable */, constructor, std::move(attributes));
+ AST().AddGlobalVariable(var);
+ return var;
+ }
+
+ /// @param source the variable source
+ /// @param name the variable name
+ /// @param type the variable type
+ /// @param constructor constructor expression
+ /// @param attributes optional variable attributes
+ /// @returns a const `ast::Variable` constructed by calling Var() with the
+ /// arguments of `args`, which is automatically registered as a global
+ /// variable with the ast::Module.
+ template <typename NAME>
+ const ast::Variable* Override(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ const ast::Expression* constructor,
+ ast::AttributeList attributes = {}) {
+ auto* var =
+ create<ast::Variable>(source, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
+ ast::Access::kUndefined, type, true /* is_const */,
+ true /* is_overridable */, constructor, std::move(attributes));
+ AST().AddGlobalVariable(var);
+ return var;
}
- /// @param type the type of the pointer
- /// @param storage_class the storage class of the pointer
- /// @param access the optional access control of the pointer
- /// @return the pointer to `type` with the given ast::StorageClass
- const ast::Pointer* pointer(
- const ast::Type* type,
- ast::StorageClass storage_class,
- ast::Access access = ast::Access::kUndefined) const {
- return builder->create<ast::Pointer>(type, storage_class, access);
+ /// @param source the source information
+ /// @param expr the expression to take the address of
+ /// @return an ast::UnaryOpExpression that takes the address of `expr`
+ template <typename EXPR>
+ const ast::UnaryOpExpression* AddressOf(const Source& source, EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(source, ast::UnaryOp::kAddressOf,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param source the Source of the node
- /// @param type the type of the pointer
- /// @param storage_class the storage class of the pointer
- /// @param access the optional access control of the pointer
- /// @return the pointer to `type` with the given ast::StorageClass
- const ast::Pointer* pointer(
- const Source& source,
- const ast::Type* type,
- ast::StorageClass storage_class,
- ast::Access access = ast::Access::kUndefined) const {
- return builder->create<ast::Pointer>(source, type, storage_class, access);
+ /// @param expr the expression to take the address of
+ /// @return an ast::UnaryOpExpression that takes the address of `expr`
+ template <typename EXPR>
+ const ast::UnaryOpExpression* AddressOf(EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param storage_class the storage class of the pointer
- /// @param access the optional access control of the pointer
- /// @return the pointer to type `T` with the given ast::StorageClass.
- template <typename T>
- const ast::Pointer* pointer(
- ast::StorageClass storage_class,
- ast::Access access = ast::Access::kUndefined) const {
- return pointer(Of<T>(), storage_class, access);
+ /// @param source the source information
+ /// @param expr the expression to perform an indirection on
+ /// @return an ast::UnaryOpExpression that dereferences the pointer `expr`
+ template <typename EXPR>
+ const ast::UnaryOpExpression* Deref(const Source& source, EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(source, ast::UnaryOp::kIndirection,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param source the Source of the node
- /// @param type the type of the atomic
- /// @return the atomic to `type`
- const ast::Atomic* atomic(const Source& source,
- const ast::Type* type) const {
- return builder->create<ast::Atomic>(source, type);
+ /// @param expr the expression to perform an indirection on
+ /// @return an ast::UnaryOpExpression that dereferences the pointer `expr`
+ template <typename EXPR>
+ const ast::UnaryOpExpression* Deref(EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param type the type of the atomic
- /// @return the atomic to `type`
- const ast::Atomic* atomic(const ast::Type* type) const {
- return builder->create<ast::Atomic>(type);
+ /// @param expr the expression to perform a unary not on
+ /// @return an ast::UnaryOpExpression that is the unary not of the input
+ /// expression
+ template <typename EXPR>
+ const ast::UnaryOpExpression* Not(EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr(std::forward<EXPR>(expr)));
}
- /// @return the atomic to type `T`
- template <typename T>
- const ast::Atomic* atomic() const {
- return atomic(Of<T>());
+ /// @param expr the expression to perform a unary complement on
+ /// @return an ast::UnaryOpExpression that is the unary complement of the
+ /// input expression
+ template <typename EXPR>
+ const ast::UnaryOpExpression* Complement(EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param kind the kind of sampler
- /// @returns the sampler
- const ast::Sampler* sampler(ast::SamplerKind kind) const {
- return builder->create<ast::Sampler>(kind);
+ /// @param expr the expression to perform a unary negation on
+ /// @return an ast::UnaryOpExpression that is the unary negation of the
+ /// input expression
+ template <typename EXPR>
+ const ast::UnaryOpExpression* Negation(EXPR&& expr) {
+ return create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation,
+ Expr(std::forward<EXPR>(expr)));
}
- /// @param source the Source of the node
- /// @param kind the kind of sampler
- /// @returns the sampler
- const ast::Sampler* sampler(const Source& source,
- ast::SamplerKind kind) const {
- return builder->create<ast::Sampler>(source, kind);
+ /// @param source the source information
+ /// @param func the function name
+ /// @param args the function call arguments
+ /// @returns a `ast::CallExpression` to the function `func`, with the
+ /// arguments of `args` converted to `ast::Expression`s using `Expr()`.
+ template <typename NAME, typename... ARGS>
+ const ast::CallExpression* Call(const Source& source, NAME&& func, ARGS&&... args) {
+ return create<ast::CallExpression>(source, Expr(func),
+ ExprList(std::forward<ARGS>(args)...));
}
- /// @param dims the dimensionality of the texture
- /// @returns the depth texture
- const ast::DepthTexture* depth_texture(ast::TextureDimension dims) const {
- return builder->create<ast::DepthTexture>(dims);
+ /// @param func the function name
+ /// @param args the function call arguments
+ /// @returns a `ast::CallExpression` to the function `func`, with the
+ /// arguments of `args` converted to `ast::Expression`s using `Expr()`.
+ template <typename NAME, typename... ARGS, typename = DisableIfSource<NAME>>
+ const ast::CallExpression* Call(NAME&& func, ARGS&&... args) {
+ return create<ast::CallExpression>(Expr(func), ExprList(std::forward<ARGS>(args)...));
}
- /// @param source the Source of the node
- /// @param dims the dimensionality of the texture
- /// @returns the depth texture
- const ast::DepthTexture* depth_texture(const Source& source,
- ast::TextureDimension dims) const {
- return builder->create<ast::DepthTexture>(source, dims);
+ /// @param source the source information
+ /// @param call the call expression to wrap in a call statement
+ /// @returns a `ast::CallStatement` for the given call expression
+ const ast::CallStatement* CallStmt(const Source& source, const ast::CallExpression* call) {
+ return create<ast::CallStatement>(source, call);
}
- /// @param dims the dimensionality of the texture
- /// @returns the multisampled depth texture
- const ast::DepthMultisampledTexture* depth_multisampled_texture(
- ast::TextureDimension dims) const {
- return builder->create<ast::DepthMultisampledTexture>(dims);
+ /// @param call the call expression to wrap in a call statement
+ /// @returns a `ast::CallStatement` for the given call expression
+ const ast::CallStatement* CallStmt(const ast::CallExpression* call) {
+ return create<ast::CallStatement>(call);
}
- /// @param source the Source of the node
- /// @param dims the dimensionality of the texture
- /// @returns the multisampled depth texture
- const ast::DepthMultisampledTexture* depth_multisampled_texture(
- const Source& source,
- ast::TextureDimension dims) const {
- return builder->create<ast::DepthMultisampledTexture>(source, dims);
+ /// @param source the source information
+ /// @returns a `ast::PhonyExpression`
+ const ast::PhonyExpression* Phony(const Source& source) {
+ return create<ast::PhonyExpression>(source);
}
- /// @param dims the dimensionality of the texture
- /// @param subtype the texture subtype.
- /// @returns the sampled texture
- const ast::SampledTexture* sampled_texture(ast::TextureDimension dims,
- const ast::Type* subtype) const {
- return builder->create<ast::SampledTexture>(dims, subtype);
+ /// @returns a `ast::PhonyExpression`
+ const ast::PhonyExpression* Phony() { return create<ast::PhonyExpression>(); }
+
+ /// @param expr the expression to ignore
+ /// @returns a `ast::AssignmentStatement` that assigns 'expr' to the phony
+ /// (underscore) variable.
+ template <typename EXPR>
+ const ast::AssignmentStatement* Ignore(EXPR&& expr) {
+ return create<ast::AssignmentStatement>(Phony(), Expr(expr));
+ }
+
+ /// @param lhs the left hand argument to the addition operation
+ /// @param rhs the right hand argument to the addition operation
+ /// @returns a `ast::BinaryExpression` summing the arguments `lhs` and `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Add(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kAdd, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the and operation
+ /// @param rhs the right hand argument to the and operation
+ /// @returns a `ast::BinaryExpression` bitwise anding `lhs` and `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* And(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kAnd, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the or operation
+ /// @param rhs the right hand argument to the or operation
+ /// @returns a `ast::BinaryExpression` bitwise or-ing `lhs` and `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Or(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kOr, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the subtraction operation
+ /// @param rhs the right hand argument to the subtraction operation
+ /// @returns a `ast::BinaryExpression` subtracting `rhs` from `lhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Sub(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kSubtract, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the multiplication operation
+ /// @param rhs the right hand argument to the multiplication operation
+ /// @returns a `ast::BinaryExpression` multiplying `rhs` from `lhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Mul(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param source the source information
+ /// @param lhs the left hand argument to the multiplication operation
+ /// @param rhs the right hand argument to the multiplication operation
+ /// @returns a `ast::BinaryExpression` multiplying `rhs` from `lhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Mul(const Source& source, LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(source, ast::BinaryOp::kMultiply,
+ Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the division operation
+ /// @param rhs the right hand argument to the division operation
+ /// @returns a `ast::BinaryExpression` dividing `lhs` by `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Div(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kDivide, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the modulo operation
+ /// @param rhs the right hand argument to the modulo operation
+ /// @returns a `ast::BinaryExpression` applying modulo of `lhs` by `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Mod(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the bit shift right operation
+ /// @param rhs the right hand argument to the bit shift right operation
+ /// @returns a `ast::BinaryExpression` bit shifting right `lhs` by `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Shr(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(
+ ast::BinaryOp::kShiftRight, Expr(std::forward<LHS>(lhs)), Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the bit shift left operation
+ /// @param rhs the right hand argument to the bit shift left operation
+ /// @returns a `ast::BinaryExpression` bit shifting left `lhs` by `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Shl(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(
+ ast::BinaryOp::kShiftLeft, Expr(std::forward<LHS>(lhs)), Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the xor operation
+ /// @param rhs the right hand argument to the xor operation
+ /// @returns a `ast::BinaryExpression` bitwise xor-ing `lhs` and `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Xor(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kXor, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the logical and operation
+ /// @param rhs the right hand argument to the logical and operation
+ /// @returns a `ast::BinaryExpression` of `lhs` && `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* LogicalAnd(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalAnd, Expr(std::forward<LHS>(lhs)), Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the logical or operation
+ /// @param rhs the right hand argument to the logical or operation
+ /// @returns a `ast::BinaryExpression` of `lhs` || `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* LogicalOr(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr, Expr(std::forward<LHS>(lhs)), Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the greater than operation
+ /// @param rhs the right hand argument to the greater than operation
+ /// @returns a `ast::BinaryExpression` of `lhs` > `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* GreaterThan(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThan,
+ Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the greater than or equal operation
+ /// @param rhs the right hand argument to the greater than or equal operation
+ /// @returns a `ast::BinaryExpression` of `lhs` >= `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* GreaterThanEqual(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThanEqual,
+ Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the less than operation
+ /// @param rhs the right hand argument to the less than operation
+ /// @returns a `ast::BinaryExpression` of `lhs` < `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* LessThan(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kLessThan, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the less than or equal operation
+ /// @param rhs the right hand argument to the less than or equal operation
+ /// @returns a `ast::BinaryExpression` of `lhs` <= `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* LessThanEqual(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kLessThanEqual,
+ Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the equal expression
+ /// @param rhs the right hand argument to the equal expression
+ /// @returns a `ast::BinaryExpression` comparing `lhs` equal to `rhs`
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* Equal(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param lhs the left hand argument to the not-equal expression
+ /// @param rhs the right hand argument to the not-equal expression
+ /// @returns a `ast::BinaryExpression` comparing `lhs` equal to `rhs` for
+ /// disequality
+ template <typename LHS, typename RHS>
+ const ast::BinaryExpression* NotEqual(LHS&& lhs, RHS&& rhs) {
+ return create<ast::BinaryExpression>(ast::BinaryOp::kNotEqual, Expr(std::forward<LHS>(lhs)),
+ Expr(std::forward<RHS>(rhs)));
+ }
+
+ /// @param source the source information
+ /// @param obj the object for the index accessor expression
+ /// @param idx the index argument for the index accessor expression
+ /// @returns a `ast::IndexAccessorExpression` that indexes `arr` with `idx`
+ template <typename OBJ, typename IDX>
+ const ast::IndexAccessorExpression* IndexAccessor(const Source& source, OBJ&& obj, IDX&& idx) {
+ return create<ast::IndexAccessorExpression>(source, Expr(std::forward<OBJ>(obj)),
+ Expr(std::forward<IDX>(idx)));
+ }
+
+ /// @param obj the object for the index accessor expression
+ /// @param idx the index argument for the index accessor expression
+ /// @returns a `ast::IndexAccessorExpression` that indexes `arr` with `idx`
+ template <typename OBJ, typename IDX>
+ const ast::IndexAccessorExpression* IndexAccessor(OBJ&& obj, IDX&& idx) {
+ return create<ast::IndexAccessorExpression>(Expr(std::forward<OBJ>(obj)),
+ Expr(std::forward<IDX>(idx)));
+ }
+
+ /// @param source the source information
+ /// @param obj the object for the member accessor expression
+ /// @param idx the index argument for the member accessor expression
+ /// @returns a `ast::MemberAccessorExpression` that indexes `obj` with `idx`
+ template <typename OBJ, typename IDX>
+ const ast::MemberAccessorExpression* MemberAccessor(const Source& source,
+ OBJ&& obj,
+ IDX&& idx) {
+ return create<ast::MemberAccessorExpression>(source, Expr(std::forward<OBJ>(obj)),
+ Expr(std::forward<IDX>(idx)));
+ }
+
+ /// @param obj the object for the member accessor expression
+ /// @param idx the index argument for the member accessor expression
+ /// @returns a `ast::MemberAccessorExpression` that indexes `obj` with `idx`
+ template <typename OBJ, typename IDX>
+ const ast::MemberAccessorExpression* MemberAccessor(OBJ&& obj, IDX&& idx) {
+ return create<ast::MemberAccessorExpression>(Expr(std::forward<OBJ>(obj)),
+ Expr(std::forward<IDX>(idx)));
+ }
+
+ /// Creates a ast::StructMemberOffsetAttribute
+ /// @param val the offset value
+ /// @returns the offset attribute pointer
+ const ast::StructMemberOffsetAttribute* MemberOffset(uint32_t val) {
+ return create<ast::StructMemberOffsetAttribute>(source_, val);
+ }
+
+ /// Creates a ast::StructMemberSizeAttribute
+ /// @param source the source information
+ /// @param val the size value
+ /// @returns the size attribute pointer
+ const ast::StructMemberSizeAttribute* MemberSize(const Source& source, uint32_t val) {
+ return create<ast::StructMemberSizeAttribute>(source, val);
+ }
+
+ /// Creates a ast::StructMemberSizeAttribute
+ /// @param val the size value
+ /// @returns the size attribute pointer
+ const ast::StructMemberSizeAttribute* MemberSize(uint32_t val) {
+ return create<ast::StructMemberSizeAttribute>(source_, val);
+ }
+
+ /// Creates a ast::StructMemberAlignAttribute
+ /// @param source the source information
+ /// @param val the align value
+ /// @returns the align attribute pointer
+ const ast::StructMemberAlignAttribute* MemberAlign(const Source& source, uint32_t val) {
+ return create<ast::StructMemberAlignAttribute>(source, val);
+ }
+
+ /// Creates a ast::StructMemberAlignAttribute
+ /// @param val the align value
+ /// @returns the align attribute pointer
+ const ast::StructMemberAlignAttribute* MemberAlign(uint32_t val) {
+ return create<ast::StructMemberAlignAttribute>(source_, val);
+ }
+
+ /// Creates the ast::GroupAttribute
+ /// @param value group attribute index
+ /// @returns the group attribute pointer
+ const ast::GroupAttribute* Group(uint32_t value) { return create<ast::GroupAttribute>(value); }
+
+ /// Creates the ast::BindingAttribute
+ /// @param value the binding index
+ /// @returns the binding deocration pointer
+ const ast::BindingAttribute* Binding(uint32_t value) {
+ return create<ast::BindingAttribute>(value);
+ }
+
+ /// Convenience function to create both a ast::GroupAttribute and
+ /// ast::BindingAttribute
+ /// @param group the group index
+ /// @param binding the binding index
+ /// @returns a attribute list with both the group and binding attributes
+ ast::AttributeList GroupAndBinding(uint32_t group, uint32_t binding) {
+ return {Group(group), Binding(binding)};
+ }
+
+ /// Creates an ast::Function and registers it with the ast::Module.
+ /// @param source the source information
+ /// @param name the function name
+ /// @param params the function parameters
+ /// @param type the function return type
+ /// @param body the function body
+ /// @param attributes the optional function attributes
+ /// @param return_type_attributes the optional function return type
+ /// attributes
+ /// @returns the function pointer
+ template <typename NAME>
+ const ast::Function* Func(const Source& source,
+ NAME&& name,
+ ast::VariableList params,
+ const ast::Type* type,
+ ast::StatementList body,
+ ast::AttributeList attributes = {},
+ ast::AttributeList return_type_attributes = {}) {
+ auto* func = create<ast::Function>(source, Sym(std::forward<NAME>(name)), params, type,
+ create<ast::BlockStatement>(body), attributes,
+ return_type_attributes);
+ AST().AddFunction(func);
+ return func;
+ }
+
+ /// Creates an ast::Function and registers it with the ast::Module.
+ /// @param name the function name
+ /// @param params the function parameters
+ /// @param type the function return type
+ /// @param body the function body
+ /// @param attributes the optional function attributes
+ /// @param return_type_attributes the optional function return type
+ /// attributes
+ /// @returns the function pointer
+ template <typename NAME>
+ const ast::Function* Func(NAME&& name,
+ ast::VariableList params,
+ const ast::Type* type,
+ ast::StatementList body,
+ ast::AttributeList attributes = {},
+ ast::AttributeList return_type_attributes = {}) {
+ auto* func = create<ast::Function>(Sym(std::forward<NAME>(name)), params, type,
+ create<ast::BlockStatement>(body), attributes,
+ return_type_attributes);
+ AST().AddFunction(func);
+ return func;
}
- /// @param source the Source of the node
- /// @param dims the dimensionality of the texture
- /// @param subtype the texture subtype.
- /// @returns the sampled texture
- const ast::SampledTexture* sampled_texture(const Source& source,
- ast::TextureDimension dims,
- const ast::Type* subtype) const {
- return builder->create<ast::SampledTexture>(source, dims, subtype);
+ /// Creates an ast::BreakStatement
+ /// @param source the source information
+ /// @returns the break statement pointer
+ const ast::BreakStatement* Break(const Source& source) {
+ return create<ast::BreakStatement>(source);
}
- /// @param dims the dimensionality of the texture
- /// @param subtype the texture subtype.
- /// @returns the multisampled texture
- const ast::MultisampledTexture* multisampled_texture(
- ast::TextureDimension dims,
- const ast::Type* subtype) const {
- return builder->create<ast::MultisampledTexture>(dims, subtype);
+ /// Creates an ast::BreakStatement
+ /// @returns the break statement pointer
+ const ast::BreakStatement* Break() { return create<ast::BreakStatement>(); }
+
+ /// Creates an ast::ContinueStatement
+ /// @param source the source information
+ /// @returns the continue statement pointer
+ const ast::ContinueStatement* Continue(const Source& source) {
+ return create<ast::ContinueStatement>(source);
}
- /// @param source the Source of the node
- /// @param dims the dimensionality of the texture
- /// @param subtype the texture subtype.
- /// @returns the multisampled texture
- const ast::MultisampledTexture* multisampled_texture(
- const Source& source,
- ast::TextureDimension dims,
- const ast::Type* subtype) const {
- return builder->create<ast::MultisampledTexture>(source, dims, subtype);
+ /// Creates an ast::ContinueStatement
+ /// @returns the continue statement pointer
+ const ast::ContinueStatement* Continue() { return create<ast::ContinueStatement>(); }
+
+ /// Creates an ast::ReturnStatement with no return value
+ /// @param source the source information
+ /// @returns the return statement pointer
+ const ast::ReturnStatement* Return(const Source& source) {
+ return create<ast::ReturnStatement>(source);
}
- /// @param dims the dimensionality of the texture
- /// @param format the texel format of the texture
- /// @param access the access control of the texture
- /// @returns the storage texture
- const ast::StorageTexture* storage_texture(ast::TextureDimension dims,
- ast::TexelFormat format,
- ast::Access access) const {
- auto* subtype = ast::StorageTexture::SubtypeFor(format, *builder);
- return builder->create<ast::StorageTexture>(dims, format, subtype,
- access);
+ /// Creates an ast::ReturnStatement with no return value
+ /// @returns the return statement pointer
+ const ast::ReturnStatement* Return() { return create<ast::ReturnStatement>(); }
+
+ /// Creates an ast::ReturnStatement with the given return value
+ /// @param source the source information
+ /// @param val the return value
+ /// @returns the return statement pointer
+ template <typename EXPR>
+ const ast::ReturnStatement* Return(const Source& source, EXPR&& val) {
+ return create<ast::ReturnStatement>(source, Expr(std::forward<EXPR>(val)));
}
- /// @param source the Source of the node
- /// @param dims the dimensionality of the texture
- /// @param format the texel format of the texture
- /// @param access the access control of the texture
- /// @returns the storage texture
- const ast::StorageTexture* storage_texture(const Source& source,
- ast::TextureDimension dims,
- ast::TexelFormat format,
- ast::Access access) const {
- auto* subtype = ast::StorageTexture::SubtypeFor(format, *builder);
- return builder->create<ast::StorageTexture>(source, dims, format, subtype,
- access);
+ /// Creates an ast::ReturnStatement with the given return value
+ /// @param val the return value
+ /// @returns the return statement pointer
+ template <typename EXPR, typename = DisableIfSource<EXPR>>
+ const ast::ReturnStatement* Return(EXPR&& val) {
+ return create<ast::ReturnStatement>(Expr(std::forward<EXPR>(val)));
}
- /// @returns the external texture
- const ast::ExternalTexture* external_texture() const {
- return builder->create<ast::ExternalTexture>();
+ /// Creates an ast::DiscardStatement
+ /// @param source the source information
+ /// @returns the discard statement pointer
+ const ast::DiscardStatement* Discard(const Source& source) {
+ return create<ast::DiscardStatement>(source);
}
- /// @param source the Source of the node
- /// @returns the external texture
- const ast::ExternalTexture* external_texture(const Source& source) const {
- return builder->create<ast::ExternalTexture>(source);
+ /// Creates an ast::DiscardStatement
+ /// @returns the discard statement pointer
+ const ast::DiscardStatement* Discard() { return create<ast::DiscardStatement>(); }
+
+ /// Creates a ast::Alias registering it with the AST().TypeDecls().
+ /// @param source the source information
+ /// @param name the alias name
+ /// @param type the alias target type
+ /// @returns the alias type
+ template <typename NAME>
+ const ast::Alias* Alias(const Source& source, NAME&& name, const ast::Type* type) {
+ auto* out = ty.alias(source, std::forward<NAME>(name), type);
+ AST().AddTypeDecl(out);
+ return out;
}
- /// Constructs a TypeName for the type declaration.
- /// @param type the type
- /// @return either type or a pointer to a new ast::TypeName
- const ast::TypeName* Of(const ast::TypeDecl* type) const;
+ /// Creates a ast::Alias registering it with the AST().TypeDecls().
+ /// @param name the alias name
+ /// @param type the alias target type
+ /// @returns the alias type
+ template <typename NAME>
+ const ast::Alias* Alias(NAME&& name, const ast::Type* type) {
+ auto* out = ty.alias(std::forward<NAME>(name), type);
+ AST().AddTypeDecl(out);
+ return out;
+ }
- /// The ProgramBuilder
- ProgramBuilder* const builder;
+ /// Creates a ast::Struct registering it with the AST().TypeDecls().
+ /// @param source the source information
+ /// @param name the struct name
+ /// @param members the struct members
+ /// @returns the struct type
+ template <typename NAME>
+ const ast::Struct* Structure(const Source& source, NAME&& name, ast::StructMemberList members) {
+ auto sym = Sym(std::forward<NAME>(name));
+ auto* type = create<ast::Struct>(source, sym, std::move(members), ast::AttributeList{});
+ AST().AddTypeDecl(type);
+ return type;
+ }
- private:
- /// CToAST<T> is specialized for various `T` types and each specialization
- /// contains a single static `get()` method for obtaining the corresponding
- /// AST type for the C type `T`.
- /// `get()` has the signature:
- /// `static const ast::Type* get(Types* t)`
- template <typename T>
- struct CToAST {};
- };
-
- //////////////////////////////////////////////////////////////////////////////
- // AST helper methods
- //////////////////////////////////////////////////////////////////////////////
-
- /// @return a new unnamed symbol
- Symbol Sym() { return Symbols().New(); }
-
- /// @param name the symbol string
- /// @return a Symbol with the given name
- Symbol Sym(const std::string& name) { return Symbols().Register(name); }
-
- /// @param sym the symbol
- /// @return `sym`
- Symbol Sym(Symbol sym) { return sym; }
-
- /// @param expr the expression
- /// @return expr
- template <typename T>
- traits::EnableIfIsType<T, ast::Expression>* Expr(T* expr) {
- return expr;
- }
-
- /// Passthrough for nullptr
- /// @return nullptr
- const ast::IdentifierExpression* Expr(std::nullptr_t) { return nullptr; }
-
- /// @param source the source information
- /// @param symbol the identifier symbol
- /// @return an ast::IdentifierExpression with the given symbol
- const ast::IdentifierExpression* Expr(const Source& source, Symbol symbol) {
- return create<ast::IdentifierExpression>(source, symbol);
- }
-
- /// @param symbol the identifier symbol
- /// @return an ast::IdentifierExpression with the given symbol
- const ast::IdentifierExpression* Expr(Symbol symbol) {
- return create<ast::IdentifierExpression>(symbol);
- }
-
- /// @param source the source information
- /// @param variable the AST variable
- /// @return an ast::IdentifierExpression with the variable's symbol
- const ast::IdentifierExpression* Expr(const Source& source,
- const ast::Variable* variable) {
- return create<ast::IdentifierExpression>(source, variable->symbol);
- }
-
- /// @param variable the AST variable
- /// @return an ast::IdentifierExpression with the variable's symbol
- const ast::IdentifierExpression* Expr(const ast::Variable* variable) {
- return create<ast::IdentifierExpression>(variable->symbol);
- }
-
- /// @param source the source information
- /// @param name the identifier name
- /// @return an ast::IdentifierExpression with the given name
- const ast::IdentifierExpression* Expr(const Source& source,
- const char* name) {
- return create<ast::IdentifierExpression>(source, Symbols().Register(name));
- }
-
- /// @param name the identifier name
- /// @return an ast::IdentifierExpression with the given name
- const ast::IdentifierExpression* Expr(const char* name) {
- return create<ast::IdentifierExpression>(Symbols().Register(name));
- }
-
- /// @param source the source information
- /// @param name the identifier name
- /// @return an ast::IdentifierExpression with the given name
- const ast::IdentifierExpression* Expr(const Source& source,
- const std::string& name) {
- return create<ast::IdentifierExpression>(source, Symbols().Register(name));
- }
-
- /// @param name the identifier name
- /// @return an ast::IdentifierExpression with the given name
- const ast::IdentifierExpression* Expr(const std::string& name) {
- return create<ast::IdentifierExpression>(Symbols().Register(name));
- }
-
- /// @param source the source information
- /// @param value the boolean value
- /// @return a Scalar constructor for the given value
- const ast::BoolLiteralExpression* Expr(const Source& source, bool value) {
- return create<ast::BoolLiteralExpression>(source, value);
- }
-
- /// @param value the boolean value
- /// @return a Scalar constructor for the given value
- const ast::BoolLiteralExpression* Expr(bool value) {
- return create<ast::BoolLiteralExpression>(value);
- }
-
- /// @param source the source information
- /// @param value the float value
- /// @return a Scalar constructor for the given value
- const ast::FloatLiteralExpression* Expr(const Source& source, f32 value) {
- return create<ast::FloatLiteralExpression>(source, value);
- }
-
- /// @param value the float value
- /// @return a Scalar constructor for the given value
- const ast::FloatLiteralExpression* Expr(f32 value) {
- return create<ast::FloatLiteralExpression>(value);
- }
-
- /// @param source the source information
- /// @param value the integer value
- /// @return a Scalar constructor for the given value
- const ast::SintLiteralExpression* Expr(const Source& source, i32 value) {
- return create<ast::SintLiteralExpression>(source, value);
- }
-
- /// @param value the integer value
- /// @return a Scalar constructor for the given value
- const ast::SintLiteralExpression* Expr(i32 value) {
- return create<ast::SintLiteralExpression>(value);
- }
-
- /// @param source the source information
- /// @param value the unsigned int value
- /// @return a Scalar constructor for the given value
- const ast::UintLiteralExpression* Expr(const Source& source, u32 value) {
- return create<ast::UintLiteralExpression>(source, value);
- }
-
- /// @param value the unsigned int value
- /// @return a Scalar constructor for the given value
- const ast::UintLiteralExpression* Expr(u32 value) {
- return create<ast::UintLiteralExpression>(value);
- }
-
- /// Converts `arg` to an `ast::Expression` using `Expr()`, then appends it to
- /// `list`.
- /// @param list the list to append too
- /// @param arg the arg to create
- template <typename ARG>
- void Append(ast::ExpressionList& list, ARG&& arg) {
- list.emplace_back(Expr(std::forward<ARG>(arg)));
- }
-
- /// Converts `arg0` and `args` to `ast::Expression`s using `Expr()`,
- /// then appends them to `list`.
- /// @param list the list to append too
- /// @param arg0 the first argument
- /// @param args the rest of the arguments
- template <typename ARG0, typename... ARGS>
- void Append(ast::ExpressionList& list, ARG0&& arg0, ARGS&&... args) {
- Append(list, std::forward<ARG0>(arg0));
- Append(list, std::forward<ARGS>(args)...);
- }
-
- /// @return an empty list of expressions
- ast::ExpressionList ExprList() { return {}; }
-
- /// @param args the list of expressions
- /// @return the list of expressions converted to `ast::Expression`s using
- /// `Expr()`,
- template <typename... ARGS>
- ast::ExpressionList ExprList(ARGS&&... args) {
- ast::ExpressionList list;
- list.reserve(sizeof...(args));
- Append(list, std::forward<ARGS>(args)...);
- return list;
- }
-
- /// @param list the list of expressions
- /// @return `list`
- ast::ExpressionList ExprList(ast::ExpressionList list) { return list; }
-
- /// @param args the arguments for the type constructor
- /// @return an `ast::CallExpression` of type `ty`, with the values
- /// of `args` converted to `ast::Expression`s using `Expr()`
- template <typename T, typename... ARGS>
- const ast::CallExpression* Construct(ARGS&&... args) {
- return Construct(ty.Of<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param type the type to construct
- /// @param args the arguments for the constructor
- /// @return an `ast::CallExpression` of `type` constructed with the
- /// values `args`.
- template <typename... ARGS>
- const ast::CallExpression* Construct(const ast::Type* type, ARGS&&... args) {
- return Construct(source_, type, std::forward<ARGS>(args)...);
- }
-
- /// @param source the source information
- /// @param type the type to construct
- /// @param args the arguments for the constructor
- /// @return an `ast::CallExpression` of `type` constructed with the
- /// values `args`.
- template <typename... ARGS>
- const ast::CallExpression* Construct(const Source& source,
- const ast::Type* type,
- ARGS&&... args) {
- return create<ast::CallExpression>(source, type,
- ExprList(std::forward<ARGS>(args)...));
- }
-
- /// @param expr the expression for the bitcast
- /// @return an `ast::BitcastExpression` of type `ty`, with the values of
- /// `expr` converted to `ast::Expression`s using `Expr()`
- template <typename T, typename EXPR>
- const ast::BitcastExpression* Bitcast(EXPR&& expr) {
- return Bitcast(ty.Of<T>(), std::forward<EXPR>(expr));
- }
-
- /// @param type the type to cast to
- /// @param expr the expression for the bitcast
- /// @return an `ast::BitcastExpression` of `type` constructed with the values
- /// `expr`.
- template <typename EXPR>
- const ast::BitcastExpression* Bitcast(const ast::Type* type, EXPR&& expr) {
- return create<ast::BitcastExpression>(type, Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param source the source information
- /// @param type the type to cast to
- /// @param expr the expression for the bitcast
- /// @return an `ast::BitcastExpression` of `type` constructed with the values
- /// `expr`.
- template <typename EXPR>
- const ast::BitcastExpression* Bitcast(const Source& source,
- const ast::Type* type,
- EXPR&& expr) {
- return create<ast::BitcastExpression>(source, type,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param args the arguments for the vector constructor
- /// @param type the vector type
- /// @param size the vector size
- /// @return an `ast::CallExpression` of a `size`-element vector of
- /// type `type`, constructed with the values `args`.
- template <typename... ARGS>
- const ast::CallExpression* vec(const ast::Type* type,
- uint32_t size,
- ARGS&&... args) {
- return Construct(ty.vec(type, size), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the vector constructor
- /// @return an `ast::CallExpression` of a 2-element vector of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* vec2(ARGS&&... args) {
- return Construct(ty.vec2<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the vector constructor
- /// @return an `ast::CallExpression` of a 3-element vector of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* vec3(ARGS&&... args) {
- return Construct(ty.vec3<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the vector constructor
- /// @return an `ast::CallExpression` of a 4-element vector of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* vec4(ARGS&&... args) {
- return Construct(ty.vec4<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 2x2 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat2x2(ARGS&&... args) {
- return Construct(ty.mat2x2<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 2x3 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat2x3(ARGS&&... args) {
- return Construct(ty.mat2x3<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 2x4 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat2x4(ARGS&&... args) {
- return Construct(ty.mat2x4<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 3x2 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat3x2(ARGS&&... args) {
- return Construct(ty.mat3x2<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 3x3 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat3x3(ARGS&&... args) {
- return Construct(ty.mat3x3<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 3x4 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat3x4(ARGS&&... args) {
- return Construct(ty.mat3x4<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 4x2 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat4x2(ARGS&&... args) {
- return Construct(ty.mat4x2<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 4x3 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat4x3(ARGS&&... args) {
- return Construct(ty.mat4x3<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the matrix constructor
- /// @return an `ast::CallExpression` of a 4x4 matrix of type
- /// `T`, constructed with the values `args`.
- template <typename T, typename... ARGS>
- const ast::CallExpression* mat4x4(ARGS&&... args) {
- return Construct(ty.mat4x4<T>(), std::forward<ARGS>(args)...);
- }
-
- /// @param args the arguments for the array constructor
- /// @return an `ast::CallExpression` of an array with element type
- /// `T` and size `N`, constructed with the values `args`.
- template <typename T, int N, typename... ARGS>
- const ast::CallExpression* array(ARGS&&... args) {
- return Construct(ty.array<T, N>(), std::forward<ARGS>(args)...);
- }
-
- /// @param subtype the array element type
- /// @param n the array size. nullptr represents a runtime-array.
- /// @param args the arguments for the array constructor
- /// @return an `ast::CallExpression` of an array with element type
- /// `subtype`, constructed with the values `args`.
- template <typename EXPR, typename... ARGS>
- const ast::CallExpression* array(const ast::Type* subtype,
- EXPR&& n,
- ARGS&&... args) {
- return Construct(ty.array(subtype, std::forward<EXPR>(n)),
- std::forward<ARGS>(args)...);
- }
-
- /// @param name the variable name
- /// @param type the variable type
- /// @param optional the optional variable settings.
- /// Can be any of the following, in any order:
- /// * ast::StorageClass - specifies the variable storage class
- /// * ast::Access - specifies the variable's access control
- /// * ast::Expression* - specifies the variable's initializer expression
- /// * ast::AttributeList - specifies the variable's attributes
- /// Note that repeated arguments of the same type will use the last argument's
- /// value.
- /// @returns a `ast::Variable` with the given name, type and additional
- /// options
- template <typename NAME, typename... OPTIONAL>
- const ast::Variable* Var(NAME&& name,
- const ast::Type* type,
- OPTIONAL&&... optional) {
- VarOptionals opts(std::forward<OPTIONAL>(optional)...);
- return create<ast::Variable>(Sym(std::forward<NAME>(name)), opts.storage,
- opts.access, type, false /* is_const */,
- false /* is_overridable */, opts.constructor,
- std::move(opts.attributes));
- }
-
- /// @param source the variable source
- /// @param name the variable name
- /// @param type the variable type
- /// @param optional the optional variable settings.
- /// Can be any of the following, in any order:
- /// * ast::StorageClass - specifies the variable storage class
- /// * ast::Access - specifies the variable's access control
- /// * ast::Expression* - specifies the variable's initializer expression
- /// * ast::AttributeList - specifies the variable's attributes
- /// Note that repeated arguments of the same type will use the last argument's
- /// value.
- /// @returns a `ast::Variable` with the given name, storage and type
- template <typename NAME, typename... OPTIONAL>
- const ast::Variable* Var(const Source& source,
- NAME&& name,
- const ast::Type* type,
- OPTIONAL&&... optional) {
- VarOptionals opts(std::forward<OPTIONAL>(optional)...);
- return create<ast::Variable>(
- source, Sym(std::forward<NAME>(name)), opts.storage, opts.access, type,
- false /* is_const */, false /* is_overridable */, opts.constructor,
- std::move(opts.attributes));
- }
-
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor constructor expression
- /// @param attributes optional variable attributes
- /// @returns a constant `ast::Variable` with the given name and type
- template <typename NAME>
- const ast::Variable* Const(NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- return create<ast::Variable>(
- Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- false /* is_overridable */, constructor, attributes);
- }
-
- /// @param source the variable source
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor constructor expression
- /// @param attributes optional variable attributes
- /// @returns a constant `ast::Variable` with the given name and type
- template <typename NAME>
- const ast::Variable* Const(const Source& source,
- NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- return create<ast::Variable>(
- source, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- false /* is_overridable */, constructor, attributes);
- }
-
- /// @param name the parameter name
- /// @param type the parameter type
- /// @param attributes optional parameter attributes
- /// @returns a constant `ast::Variable` with the given name and type
- template <typename NAME>
- const ast::Variable* Param(NAME&& name,
- const ast::Type* type,
- ast::AttributeList attributes = {}) {
- return create<ast::Variable>(
- Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- false /* is_overridable */, nullptr, attributes);
- }
-
- /// @param source the parameter source
- /// @param name the parameter name
- /// @param type the parameter type
- /// @param attributes optional parameter attributes
- /// @returns a constant `ast::Variable` with the given name and type
- template <typename NAME>
- const ast::Variable* Param(const Source& source,
- NAME&& name,
- const ast::Type* type,
- ast::AttributeList attributes = {}) {
- return create<ast::Variable>(
- source, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- false /* is_overridable */, nullptr, attributes);
- }
-
- /// @param name the variable name
- /// @param type the variable type
- /// @param optional the optional variable settings.
- /// Can be any of the following, in any order:
- /// * ast::StorageClass - specifies the variable storage class
- /// * ast::Access - specifies the variable's access control
- /// * ast::Expression* - specifies the variable's initializer expression
- /// * ast::AttributeList - specifies the variable's attributes
- /// Note that repeated arguments of the same type will use the last argument's
- /// value.
- /// @returns a new `ast::Variable`, which is automatically registered as a
- /// global variable with the ast::Module.
- template <typename NAME,
- typename... OPTIONAL,
- typename = DisableIfSource<NAME>>
- const ast::Variable* Global(NAME&& name,
- const ast::Type* type,
- OPTIONAL&&... optional) {
- auto* var = Var(std::forward<NAME>(name), type,
- std::forward<OPTIONAL>(optional)...);
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param source the variable source
- /// @param name the variable name
- /// @param type the variable type
- /// @param optional the optional variable settings.
- /// Can be any of the following, in any order:
- /// * ast::StorageClass - specifies the variable storage class
- /// * ast::Access - specifies the variable's access control
- /// * ast::Expression* - specifies the variable's initializer expression
- /// * ast::AttributeList - specifies the variable's attributes
- /// Note that repeated arguments of the same type will use the last argument's
- /// value.
- /// @returns a new `ast::Variable`, which is automatically registered as a
- /// global variable with the ast::Module.
- template <typename NAME, typename... OPTIONAL>
- const ast::Variable* Global(const Source& source,
- NAME&& name,
- const ast::Type* type,
- OPTIONAL&&... optional) {
- auto* var = Var(source, std::forward<NAME>(name), type,
- std::forward<OPTIONAL>(optional)...);
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor constructor expression
- /// @param attributes optional variable attributes
- /// @returns a const `ast::Variable` constructed by calling Var() with the
- /// arguments of `args`, which is automatically registered as a global
- /// variable with the ast::Module.
- template <typename NAME>
- const ast::Variable* GlobalConst(NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- auto* var = Const(std::forward<NAME>(name), type, constructor,
- std::move(attributes));
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param source the variable source
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor constructor expression
- /// @param attributes optional variable attributes
- /// @returns a const `ast::Variable` constructed by calling Var() with the
- /// arguments of `args`, which is automatically registered as a global
- /// variable with the ast::Module.
- template <typename NAME>
- const ast::Variable* GlobalConst(const Source& source,
- NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- auto* var = Const(source, std::forward<NAME>(name), type, constructor,
- std::move(attributes));
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor optional constructor expression
- /// @param attributes optional variable attributes
- /// @returns an overridable const `ast::Variable` which is automatically
- /// registered as a global variable with the ast::Module.
- template <typename NAME>
- const ast::Variable* Override(NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- auto* var = create<ast::Variable>(
- source_, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- true /* is_overridable */, constructor, std::move(attributes));
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param source the variable source
- /// @param name the variable name
- /// @param type the variable type
- /// @param constructor constructor expression
- /// @param attributes optional variable attributes
- /// @returns a const `ast::Variable` constructed by calling Var() with the
- /// arguments of `args`, which is automatically registered as a global
- /// variable with the ast::Module.
- template <typename NAME>
- const ast::Variable* Override(const Source& source,
- NAME&& name,
- const ast::Type* type,
- const ast::Expression* constructor,
- ast::AttributeList attributes = {}) {
- auto* var = create<ast::Variable>(
- source, Sym(std::forward<NAME>(name)), ast::StorageClass::kNone,
- ast::Access::kUndefined, type, true /* is_const */,
- true /* is_overridable */, constructor, std::move(attributes));
- AST().AddGlobalVariable(var);
- return var;
- }
-
- /// @param source the source information
- /// @param expr the expression to take the address of
- /// @return an ast::UnaryOpExpression that takes the address of `expr`
- template <typename EXPR>
- const ast::UnaryOpExpression* AddressOf(const Source& source, EXPR&& expr) {
- return create<ast::UnaryOpExpression>(source, ast::UnaryOp::kAddressOf,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param expr the expression to take the address of
- /// @return an ast::UnaryOpExpression that takes the address of `expr`
- template <typename EXPR>
- const ast::UnaryOpExpression* AddressOf(EXPR&& expr) {
- return create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param source the source information
- /// @param expr the expression to perform an indirection on
- /// @return an ast::UnaryOpExpression that dereferences the pointer `expr`
- template <typename EXPR>
- const ast::UnaryOpExpression* Deref(const Source& source, EXPR&& expr) {
- return create<ast::UnaryOpExpression>(source, ast::UnaryOp::kIndirection,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param expr the expression to perform an indirection on
- /// @return an ast::UnaryOpExpression that dereferences the pointer `expr`
- template <typename EXPR>
- const ast::UnaryOpExpression* Deref(EXPR&& expr) {
- return create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param expr the expression to perform a unary not on
- /// @return an ast::UnaryOpExpression that is the unary not of the input
- /// expression
- template <typename EXPR>
- const ast::UnaryOpExpression* Not(EXPR&& expr) {
- return create<ast::UnaryOpExpression>(ast::UnaryOp::kNot,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param expr the expression to perform a unary complement on
- /// @return an ast::UnaryOpExpression that is the unary complement of the
- /// input expression
- template <typename EXPR>
- const ast::UnaryOpExpression* Complement(EXPR&& expr) {
- return create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement,
- Expr(std::forward<EXPR>(expr)));
- }
-
- /// @param source the source information
- /// @param func the function name
- /// @param args the function call arguments
- /// @returns a `ast::CallExpression` to the function `func`, with the
- /// arguments of `args` converted to `ast::Expression`s using `Expr()`.
- template <typename NAME, typename... ARGS>
- const ast::CallExpression* Call(const Source& source,
- NAME&& func,
- ARGS&&... args) {
- return create<ast::CallExpression>(source, Expr(func),
- ExprList(std::forward<ARGS>(args)...));
- }
-
- /// @param func the function name
- /// @param args the function call arguments
- /// @returns a `ast::CallExpression` to the function `func`, with the
- /// arguments of `args` converted to `ast::Expression`s using `Expr()`.
- template <typename NAME, typename... ARGS, typename = DisableIfSource<NAME>>
- const ast::CallExpression* Call(NAME&& func, ARGS&&... args) {
- return create<ast::CallExpression>(Expr(func),
- ExprList(std::forward<ARGS>(args)...));
- }
-
- /// @param source the source information
- /// @param call the call expression to wrap in a call statement
- /// @returns a `ast::CallStatement` for the given call expression
- const ast::CallStatement* CallStmt(const Source& source,
- const ast::CallExpression* call) {
- return create<ast::CallStatement>(source, call);
- }
-
- /// @param call the call expression to wrap in a call statement
- /// @returns a `ast::CallStatement` for the given call expression
- const ast::CallStatement* CallStmt(const ast::CallExpression* call) {
- return create<ast::CallStatement>(call);
- }
-
- /// @param source the source information
- /// @returns a `ast::PhonyExpression`
- const ast::PhonyExpression* Phony(const Source& source) {
- return create<ast::PhonyExpression>(source);
- }
-
- /// @returns a `ast::PhonyExpression`
- const ast::PhonyExpression* Phony() { return create<ast::PhonyExpression>(); }
-
- /// @param expr the expression to ignore
- /// @returns a `ast::AssignmentStatement` that assigns 'expr' to the phony
- /// (underscore) variable.
- template <typename EXPR>
- const ast::AssignmentStatement* Ignore(EXPR&& expr) {
- return create<ast::AssignmentStatement>(Phony(), Expr(expr));
- }
-
- /// @param lhs the left hand argument to the addition operation
- /// @param rhs the right hand argument to the addition operation
- /// @returns a `ast::BinaryExpression` summing the arguments `lhs` and `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Add(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kAdd,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the and operation
- /// @param rhs the right hand argument to the and operation
- /// @returns a `ast::BinaryExpression` bitwise anding `lhs` and `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* And(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kAnd,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the or operation
- /// @param rhs the right hand argument to the or operation
- /// @returns a `ast::BinaryExpression` bitwise or-ing `lhs` and `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Or(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kOr,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the subtraction operation
- /// @param rhs the right hand argument to the subtraction operation
- /// @returns a `ast::BinaryExpression` subtracting `rhs` from `lhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Sub(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kSubtract,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the multiplication operation
- /// @param rhs the right hand argument to the multiplication operation
- /// @returns a `ast::BinaryExpression` multiplying `rhs` from `lhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Mul(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param source the source information
- /// @param lhs the left hand argument to the multiplication operation
- /// @param rhs the right hand argument to the multiplication operation
- /// @returns a `ast::BinaryExpression` multiplying `rhs` from `lhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Mul(const Source& source, LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(source, ast::BinaryOp::kMultiply,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the division operation
- /// @param rhs the right hand argument to the division operation
- /// @returns a `ast::BinaryExpression` dividing `lhs` by `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Div(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kDivide,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the modulo operation
- /// @param rhs the right hand argument to the modulo operation
- /// @returns a `ast::BinaryExpression` applying modulo of `lhs` by `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Mod(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kModulo,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the bit shift right operation
- /// @param rhs the right hand argument to the bit shift right operation
- /// @returns a `ast::BinaryExpression` bit shifting right `lhs` by `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Shr(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kShiftRight,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the bit shift left operation
- /// @param rhs the right hand argument to the bit shift left operation
- /// @returns a `ast::BinaryExpression` bit shifting left `lhs` by `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Shl(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kShiftLeft,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the xor operation
- /// @param rhs the right hand argument to the xor operation
- /// @returns a `ast::BinaryExpression` bitwise xor-ing `lhs` and `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Xor(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kXor,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the logical and operation
- /// @param rhs the right hand argument to the logical and operation
- /// @returns a `ast::BinaryExpression` of `lhs` && `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* LogicalAnd(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the logical or operation
- /// @param rhs the right hand argument to the logical or operation
- /// @returns a `ast::BinaryExpression` of `lhs` || `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* LogicalOr(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the greater than operation
- /// @param rhs the right hand argument to the greater than operation
- /// @returns a `ast::BinaryExpression` of `lhs` > `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* GreaterThan(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThan,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the greater than or equal operation
- /// @param rhs the right hand argument to the greater than or equal operation
- /// @returns a `ast::BinaryExpression` of `lhs` >= `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* GreaterThanEqual(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThanEqual,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the less than operation
- /// @param rhs the right hand argument to the less than operation
- /// @returns a `ast::BinaryExpression` of `lhs` < `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* LessThan(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kLessThan,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the less than or equal operation
- /// @param rhs the right hand argument to the less than or equal operation
- /// @returns a `ast::BinaryExpression` of `lhs` <= `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* LessThanEqual(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kLessThanEqual,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the equal expression
- /// @param rhs the right hand argument to the equal expression
- /// @returns a `ast::BinaryExpression` comparing `lhs` equal to `rhs`
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* Equal(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kEqual,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param lhs the left hand argument to the not-equal expression
- /// @param rhs the right hand argument to the not-equal expression
- /// @returns a `ast::BinaryExpression` comparing `lhs` equal to `rhs` for
- /// disequality
- template <typename LHS, typename RHS>
- const ast::BinaryExpression* NotEqual(LHS&& lhs, RHS&& rhs) {
- return create<ast::BinaryExpression>(ast::BinaryOp::kNotEqual,
- Expr(std::forward<LHS>(lhs)),
- Expr(std::forward<RHS>(rhs)));
- }
-
- /// @param source the source information
- /// @param obj the object for the index accessor expression
- /// @param idx the index argument for the index accessor expression
- /// @returns a `ast::IndexAccessorExpression` that indexes `arr` with `idx`
- template <typename OBJ, typename IDX>
- const ast::IndexAccessorExpression* IndexAccessor(const Source& source,
- OBJ&& obj,
- IDX&& idx) {
- return create<ast::IndexAccessorExpression>(
- source, Expr(std::forward<OBJ>(obj)), Expr(std::forward<IDX>(idx)));
- }
-
- /// @param obj the object for the index accessor expression
- /// @param idx the index argument for the index accessor expression
- /// @returns a `ast::IndexAccessorExpression` that indexes `arr` with `idx`
- template <typename OBJ, typename IDX>
- const ast::IndexAccessorExpression* IndexAccessor(OBJ&& obj, IDX&& idx) {
- return create<ast::IndexAccessorExpression>(Expr(std::forward<OBJ>(obj)),
- Expr(std::forward<IDX>(idx)));
- }
-
- /// @param source the source information
- /// @param obj the object for the member accessor expression
- /// @param idx the index argument for the member accessor expression
- /// @returns a `ast::MemberAccessorExpression` that indexes `obj` with `idx`
- template <typename OBJ, typename IDX>
- const ast::MemberAccessorExpression* MemberAccessor(const Source& source,
- OBJ&& obj,
- IDX&& idx) {
- return create<ast::MemberAccessorExpression>(
- source, Expr(std::forward<OBJ>(obj)), Expr(std::forward<IDX>(idx)));
- }
-
- /// @param obj the object for the member accessor expression
- /// @param idx the index argument for the member accessor expression
- /// @returns a `ast::MemberAccessorExpression` that indexes `obj` with `idx`
- template <typename OBJ, typename IDX>
- const ast::MemberAccessorExpression* MemberAccessor(OBJ&& obj, IDX&& idx) {
- return create<ast::MemberAccessorExpression>(Expr(std::forward<OBJ>(obj)),
- Expr(std::forward<IDX>(idx)));
- }
-
- /// Creates a ast::StructMemberOffsetAttribute
- /// @param val the offset value
- /// @returns the offset attribute pointer
- const ast::StructMemberOffsetAttribute* MemberOffset(uint32_t val) {
- return create<ast::StructMemberOffsetAttribute>(source_, val);
- }
-
- /// Creates a ast::StructMemberSizeAttribute
- /// @param source the source information
- /// @param val the size value
- /// @returns the size attribute pointer
- const ast::StructMemberSizeAttribute* MemberSize(const Source& source,
- uint32_t val) {
- return create<ast::StructMemberSizeAttribute>(source, val);
- }
-
- /// Creates a ast::StructMemberSizeAttribute
- /// @param val the size value
- /// @returns the size attribute pointer
- const ast::StructMemberSizeAttribute* MemberSize(uint32_t val) {
- return create<ast::StructMemberSizeAttribute>(source_, val);
- }
-
- /// Creates a ast::StructMemberAlignAttribute
- /// @param source the source information
- /// @param val the align value
- /// @returns the align attribute pointer
- const ast::StructMemberAlignAttribute* MemberAlign(const Source& source,
- uint32_t val) {
- return create<ast::StructMemberAlignAttribute>(source, val);
- }
-
- /// Creates a ast::StructMemberAlignAttribute
- /// @param val the align value
- /// @returns the align attribute pointer
- const ast::StructMemberAlignAttribute* MemberAlign(uint32_t val) {
- return create<ast::StructMemberAlignAttribute>(source_, val);
- }
-
- /// Creates the ast::GroupAttribute
- /// @param value group attribute index
- /// @returns the group attribute pointer
- const ast::GroupAttribute* Group(uint32_t value) {
- return create<ast::GroupAttribute>(value);
- }
-
- /// Creates the ast::BindingAttribute
- /// @param value the binding index
- /// @returns the binding deocration pointer
- const ast::BindingAttribute* Binding(uint32_t value) {
- return create<ast::BindingAttribute>(value);
- }
-
- /// Convenience function to create both a ast::GroupAttribute and
- /// ast::BindingAttribute
- /// @param group the group index
- /// @param binding the binding index
- /// @returns a attribute list with both the group and binding attributes
- ast::AttributeList GroupAndBinding(uint32_t group, uint32_t binding) {
- return {Group(group), Binding(binding)};
- }
-
- /// Creates an ast::Function and registers it with the ast::Module.
- /// @param source the source information
- /// @param name the function name
- /// @param params the function parameters
- /// @param type the function return type
- /// @param body the function body
- /// @param attributes the optional function attributes
- /// @param return_type_attributes the optional function return type
- /// attributes
- /// @returns the function pointer
- template <typename NAME>
- const ast::Function* Func(const Source& source,
- NAME&& name,
- ast::VariableList params,
- const ast::Type* type,
- ast::StatementList body,
- ast::AttributeList attributes = {},
- ast::AttributeList return_type_attributes = {}) {
- auto* func = create<ast::Function>(
- source, Sym(std::forward<NAME>(name)), params, type,
- create<ast::BlockStatement>(body), attributes, return_type_attributes);
- AST().AddFunction(func);
- return func;
- }
-
- /// Creates an ast::Function and registers it with the ast::Module.
- /// @param name the function name
- /// @param params the function parameters
- /// @param type the function return type
- /// @param body the function body
- /// @param attributes the optional function attributes
- /// @param return_type_attributes the optional function return type
- /// attributes
- /// @returns the function pointer
- template <typename NAME>
- const ast::Function* Func(NAME&& name,
- ast::VariableList params,
- const ast::Type* type,
- ast::StatementList body,
- ast::AttributeList attributes = {},
- ast::AttributeList return_type_attributes = {}) {
- auto* func = create<ast::Function>(Sym(std::forward<NAME>(name)), params,
- type, create<ast::BlockStatement>(body),
- attributes, return_type_attributes);
- AST().AddFunction(func);
- return func;
- }
-
- /// Creates an ast::BreakStatement
- /// @param source the source information
- /// @returns the break statement pointer
- const ast::BreakStatement* Break(const Source& source) {
- return create<ast::BreakStatement>(source);
- }
-
- /// Creates an ast::BreakStatement
- /// @returns the break statement pointer
- const ast::BreakStatement* Break() { return create<ast::BreakStatement>(); }
-
- /// Creates an ast::ContinueStatement
- /// @param source the source information
- /// @returns the continue statement pointer
- const ast::ContinueStatement* Continue(const Source& source) {
- return create<ast::ContinueStatement>(source);
- }
-
- /// Creates an ast::ContinueStatement
- /// @returns the continue statement pointer
- const ast::ContinueStatement* Continue() {
- return create<ast::ContinueStatement>();
- }
-
- /// Creates an ast::ReturnStatement with no return value
- /// @param source the source information
- /// @returns the return statement pointer
- const ast::ReturnStatement* Return(const Source& source) {
- return create<ast::ReturnStatement>(source);
- }
-
- /// Creates an ast::ReturnStatement with no return value
- /// @returns the return statement pointer
- const ast::ReturnStatement* Return() {
- return create<ast::ReturnStatement>();
- }
-
- /// Creates an ast::ReturnStatement with the given return value
- /// @param source the source information
- /// @param val the return value
- /// @returns the return statement pointer
- template <typename EXPR>
- const ast::ReturnStatement* Return(const Source& source, EXPR&& val) {
- return create<ast::ReturnStatement>(source, Expr(std::forward<EXPR>(val)));
- }
-
- /// Creates an ast::ReturnStatement with the given return value
- /// @param val the return value
- /// @returns the return statement pointer
- template <typename EXPR, typename = DisableIfSource<EXPR>>
- const ast::ReturnStatement* Return(EXPR&& val) {
- return create<ast::ReturnStatement>(Expr(std::forward<EXPR>(val)));
- }
-
- /// Creates an ast::DiscardStatement
- /// @param source the source information
- /// @returns the discard statement pointer
- const ast::DiscardStatement* Discard(const Source& source) {
- return create<ast::DiscardStatement>(source);
- }
-
- /// Creates an ast::DiscardStatement
- /// @returns the discard statement pointer
- const ast::DiscardStatement* Discard() {
- return create<ast::DiscardStatement>();
- }
-
- /// Creates a ast::Alias registering it with the AST().TypeDecls().
- /// @param source the source information
- /// @param name the alias name
- /// @param type the alias target type
- /// @returns the alias type
- template <typename NAME>
- const ast::Alias* Alias(const Source& source,
- NAME&& name,
- const ast::Type* type) {
- auto* out = ty.alias(source, std::forward<NAME>(name), type);
- AST().AddTypeDecl(out);
- return out;
- }
-
- /// Creates a ast::Alias registering it with the AST().TypeDecls().
- /// @param name the alias name
- /// @param type the alias target type
- /// @returns the alias type
- template <typename NAME>
- const ast::Alias* Alias(NAME&& name, const ast::Type* type) {
- auto* out = ty.alias(std::forward<NAME>(name), type);
- AST().AddTypeDecl(out);
- return out;
- }
-
- /// Creates a ast::Struct registering it with the AST().TypeDecls().
- /// @param source the source information
- /// @param name the struct name
- /// @param members the struct members
- /// @returns the struct type
- template <typename NAME>
- const ast::Struct* Structure(const Source& source,
- NAME&& name,
- ast::StructMemberList members) {
- auto sym = Sym(std::forward<NAME>(name));
- auto* type = create<ast::Struct>(source, sym, std::move(members),
- ast::AttributeList{});
- AST().AddTypeDecl(type);
- return type;
- }
-
- /// Creates a ast::Struct registering it with the AST().TypeDecls().
- /// @param name the struct name
- /// @param members the struct members
- /// @returns the struct type
- template <typename NAME>
- const ast::Struct* Structure(NAME&& name, ast::StructMemberList members) {
- auto sym = Sym(std::forward<NAME>(name));
- auto* type =
- create<ast::Struct>(sym, std::move(members), ast::AttributeList{});
- AST().AddTypeDecl(type);
- return type;
- }
-
- /// Creates a ast::StructMember
- /// @param source the source information
- /// @param name the struct member name
- /// @param type the struct member type
- /// @param attributes the optional struct member attributes
- /// @returns the struct member pointer
- template <typename NAME>
- const ast::StructMember* Member(const Source& source,
- NAME&& name,
- const ast::Type* type,
- ast::AttributeList attributes = {}) {
- return create<ast::StructMember>(source, Sym(std::forward<NAME>(name)),
- type, std::move(attributes));
- }
-
- /// Creates a ast::StructMember
- /// @param name the struct member name
- /// @param type the struct member type
- /// @param attributes the optional struct member attributes
- /// @returns the struct member pointer
- template <typename NAME>
- const ast::StructMember* Member(NAME&& name,
- const ast::Type* type,
- ast::AttributeList attributes = {}) {
- return create<ast::StructMember>(source_, Sym(std::forward<NAME>(name)),
- type, std::move(attributes));
- }
-
- /// Creates a ast::StructMember with the given byte offset
- /// @param offset the offset to use in the StructMemberOffsetattribute
- /// @param name the struct member name
- /// @param type the struct member type
- /// @returns the struct member pointer
- template <typename NAME>
- const ast::StructMember* Member(uint32_t offset,
- NAME&& name,
- const ast::Type* type) {
- return create<ast::StructMember>(
- source_, Sym(std::forward<NAME>(name)), type,
- ast::AttributeList{
- create<ast::StructMemberOffsetAttribute>(offset),
- });
- }
-
- /// Creates a ast::BlockStatement with input statements
- /// @param source the source information for the block
- /// @param statements statements of block
- /// @returns the block statement pointer
- template <typename... Statements>
- const ast::BlockStatement* Block(const Source& source,
- Statements&&... statements) {
- return create<ast::BlockStatement>(
- source, ast::StatementList{std::forward<Statements>(statements)...});
- }
-
- /// Creates a ast::BlockStatement with input statements
- /// @param statements statements of block
- /// @returns the block statement pointer
- template <typename... STATEMENTS, typename = DisableIfSource<STATEMENTS...>>
- const ast::BlockStatement* Block(STATEMENTS&&... statements) {
- return create<ast::BlockStatement>(
- ast::StatementList{std::forward<STATEMENTS>(statements)...});
- }
-
- /// Creates a ast::ElseStatement with input condition and body
- /// @param condition the else condition expression
- /// @param body the else body
- /// @returns the else statement pointer
- template <typename CONDITION>
- const ast::ElseStatement* Else(CONDITION&& condition,
- const ast::BlockStatement* body) {
- return create<ast::ElseStatement>(Expr(std::forward<CONDITION>(condition)),
- body);
- }
-
- /// Creates a ast::ElseStatement with no condition and body
- /// @param body the else body
- /// @returns the else statement pointer
- const ast::ElseStatement* Else(const ast::BlockStatement* body) {
- return create<ast::ElseStatement>(nullptr, body);
- }
-
- /// Creates a ast::IfStatement with input condition, body, and optional
- /// variadic else statements
- /// @param source the source information for the if statement
- /// @param condition the if statement condition expression
- /// @param body the if statement body
- /// @param elseStatements optional variadic else statements
- /// @returns the if statement pointer
- template <typename CONDITION, typename... ELSE_STATEMENTS>
- const ast::IfStatement* If(const Source& source,
- CONDITION&& condition,
- const ast::BlockStatement* body,
- ELSE_STATEMENTS&&... elseStatements) {
- return create<ast::IfStatement>(
- source, Expr(std::forward<CONDITION>(condition)), body,
- ast::ElseStatementList{
- std::forward<ELSE_STATEMENTS>(elseStatements)...});
- }
-
- /// Creates a ast::IfStatement with input condition, body, and optional
- /// variadic else statements
- /// @param condition the if statement condition expression
- /// @param body the if statement body
- /// @param elseStatements optional variadic else statements
- /// @returns the if statement pointer
- template <typename CONDITION, typename... ELSE_STATEMENTS>
- const ast::IfStatement* If(CONDITION&& condition,
- const ast::BlockStatement* body,
- ELSE_STATEMENTS&&... elseStatements) {
- return create<ast::IfStatement>(
- Expr(std::forward<CONDITION>(condition)), body,
- ast::ElseStatementList{
- std::forward<ELSE_STATEMENTS>(elseStatements)...});
- }
-
- /// Creates a ast::AssignmentStatement with input lhs and rhs expressions
- /// @param source the source information
- /// @param lhs the left hand side expression initializer
- /// @param rhs the right hand side expression initializer
- /// @returns the assignment statement pointer
- template <typename LhsExpressionInit, typename RhsExpressionInit>
- const ast::AssignmentStatement* Assign(const Source& source,
- LhsExpressionInit&& lhs,
- RhsExpressionInit&& rhs) {
- return create<ast::AssignmentStatement>(
- source, Expr(std::forward<LhsExpressionInit>(lhs)),
- Expr(std::forward<RhsExpressionInit>(rhs)));
- }
-
- /// Creates a ast::AssignmentStatement with input lhs and rhs expressions
- /// @param lhs the left hand side expression initializer
- /// @param rhs the right hand side expression initializer
- /// @returns the assignment statement pointer
- template <typename LhsExpressionInit, typename RhsExpressionInit>
- const ast::AssignmentStatement* Assign(LhsExpressionInit&& lhs,
- RhsExpressionInit&& rhs) {
- return create<ast::AssignmentStatement>(
- Expr(std::forward<LhsExpressionInit>(lhs)),
- Expr(std::forward<RhsExpressionInit>(rhs)));
- }
-
- /// Creates a ast::CompoundAssignmentStatement with input lhs and rhs
- /// expressions, and a binary operator.
- /// @param source the source information
- /// @param lhs the left hand side expression initializer
- /// @param rhs the right hand side expression initializer
- /// @param op the binary operator
- /// @returns the compound assignment statement pointer
- template <typename LhsExpressionInit, typename RhsExpressionInit>
- const ast::CompoundAssignmentStatement* CompoundAssign(
- const Source& source,
- LhsExpressionInit&& lhs,
- RhsExpressionInit&& rhs,
- ast::BinaryOp op) {
- return create<ast::CompoundAssignmentStatement>(
- source, Expr(std::forward<LhsExpressionInit>(lhs)),
- Expr(std::forward<RhsExpressionInit>(rhs)), op);
- }
-
- /// Creates a ast::CompoundAssignmentStatement with input lhs and rhs
- /// expressions, and a binary operator.
- /// @param lhs the left hand side expression initializer
- /// @param rhs the right hand side expression initializer
- /// @param op the binary operator
- /// @returns the compound assignment statement pointer
- template <typename LhsExpressionInit, typename RhsExpressionInit>
- const ast::CompoundAssignmentStatement* CompoundAssign(
- LhsExpressionInit&& lhs,
- RhsExpressionInit&& rhs,
- ast::BinaryOp op) {
- return create<ast::CompoundAssignmentStatement>(
- Expr(std::forward<LhsExpressionInit>(lhs)),
- Expr(std::forward<RhsExpressionInit>(rhs)), op);
- }
-
- /// Creates an ast::IncrementDecrementStatement with input lhs.
- /// @param source the source information
- /// @param lhs the left hand side expression initializer
- /// @returns the increment decrement statement pointer
- template <typename LhsExpressionInit>
- const ast::IncrementDecrementStatement* Increment(const Source& source,
- LhsExpressionInit&& lhs) {
- return create<ast::IncrementDecrementStatement>(
- source, Expr(std::forward<LhsExpressionInit>(lhs)), true);
- }
-
- /// Creates a ast::IncrementDecrementStatement with input lhs.
- /// @param lhs the left hand side expression initializer
- /// @returns the increment decrement statement pointer
- template <typename LhsExpressionInit>
- const ast::IncrementDecrementStatement* Increment(LhsExpressionInit&& lhs) {
- return create<ast::IncrementDecrementStatement>(
- Expr(std::forward<LhsExpressionInit>(lhs)), true);
- }
-
- /// Creates an ast::IncrementDecrementStatement with input lhs.
- /// @param source the source information
- /// @param lhs the left hand side expression initializer
- /// @returns the increment decrement statement pointer
- template <typename LhsExpressionInit>
- const ast::IncrementDecrementStatement* Decrement(const Source& source,
- LhsExpressionInit&& lhs) {
- return create<ast::IncrementDecrementStatement>(
- source, Expr(std::forward<LhsExpressionInit>(lhs)), false);
- }
-
- /// Creates a ast::IncrementDecrementStatement with input lhs.
- /// @param lhs the left hand side expression initializer
- /// @returns the increment decrement statement pointer
- template <typename LhsExpressionInit>
- const ast::IncrementDecrementStatement* Decrement(LhsExpressionInit&& lhs) {
- return create<ast::IncrementDecrementStatement>(
- Expr(std::forward<LhsExpressionInit>(lhs)), false);
- }
-
- /// Creates a ast::LoopStatement with input body and optional continuing
- /// @param source the source information
- /// @param body the loop body
- /// @param continuing the optional continuing block
- /// @returns the loop statement pointer
- const ast::LoopStatement* Loop(
- const Source& source,
- const ast::BlockStatement* body,
- const ast::BlockStatement* continuing = nullptr) {
- return create<ast::LoopStatement>(source, body, continuing);
- }
-
- /// Creates a ast::LoopStatement with input body and optional continuing
- /// @param body the loop body
- /// @param continuing the optional continuing block
- /// @returns the loop statement pointer
- const ast::LoopStatement* Loop(
- const ast::BlockStatement* body,
- const ast::BlockStatement* continuing = nullptr) {
- return create<ast::LoopStatement>(body, continuing);
- }
-
- /// Creates a ast::ForLoopStatement with input body and optional initializer,
- /// condition and continuing.
- /// @param source the source information
- /// @param init the optional loop initializer
- /// @param cond the optional loop condition
- /// @param cont the optional loop continuing
- /// @param body the loop body
- /// @returns the for loop statement pointer
- template <typename COND>
- const ast::ForLoopStatement* For(const Source& source,
- const ast::Statement* init,
- COND&& cond,
- const ast::Statement* cont,
- const ast::BlockStatement* body) {
- return create<ast::ForLoopStatement>(
- source, init, Expr(std::forward<COND>(cond)), cont, body);
- }
-
- /// Creates a ast::ForLoopStatement with input body and optional initializer,
- /// condition and continuing.
- /// @param init the optional loop initializer
- /// @param cond the optional loop condition
- /// @param cont the optional loop continuing
- /// @param body the loop body
- /// @returns the for loop statement pointer
- template <typename COND>
- const ast::ForLoopStatement* For(const ast::Statement* init,
- COND&& cond,
- const ast::Statement* cont,
- const ast::BlockStatement* body) {
- return create<ast::ForLoopStatement>(init, Expr(std::forward<COND>(cond)),
- cont, body);
- }
-
- /// Creates a ast::VariableDeclStatement for the input variable
- /// @param source the source information
- /// @param var the variable to wrap in a decl statement
- /// @returns the variable decl statement pointer
- const ast::VariableDeclStatement* Decl(const Source& source,
- const ast::Variable* var) {
- return create<ast::VariableDeclStatement>(source, var);
- }
-
- /// Creates a ast::VariableDeclStatement for the input variable
- /// @param var the variable to wrap in a decl statement
- /// @returns the variable decl statement pointer
- const ast::VariableDeclStatement* Decl(const ast::Variable* var) {
- return create<ast::VariableDeclStatement>(var);
- }
-
- /// Creates a ast::SwitchStatement with input expression and cases
- /// @param source the source information
- /// @param condition the condition expression initializer
- /// @param cases case statements
- /// @returns the switch statement pointer
- template <typename ExpressionInit, typename... Cases>
- const ast::SwitchStatement* Switch(const Source& source,
- ExpressionInit&& condition,
- Cases&&... cases) {
- return create<ast::SwitchStatement>(
- source, Expr(std::forward<ExpressionInit>(condition)),
- ast::CaseStatementList{std::forward<Cases>(cases)...});
- }
-
- /// Creates a ast::SwitchStatement with input expression and cases
- /// @param condition the condition expression initializer
- /// @param cases case statements
- /// @returns the switch statement pointer
- template <typename ExpressionInit,
- typename... Cases,
- typename = DisableIfSource<ExpressionInit>>
- const ast::SwitchStatement* Switch(ExpressionInit&& condition,
- Cases&&... cases) {
- return create<ast::SwitchStatement>(
- Expr(std::forward<ExpressionInit>(condition)),
- ast::CaseStatementList{std::forward<Cases>(cases)...});
- }
-
- /// Creates a ast::CaseStatement with input list of selectors, and body
- /// @param source the source information
- /// @param selectors list of selectors
- /// @param body the case body
- /// @returns the case statement pointer
- const ast::CaseStatement* Case(const Source& source,
- ast::CaseSelectorList selectors,
- const ast::BlockStatement* body = nullptr) {
- return create<ast::CaseStatement>(source, std::move(selectors),
- body ? body : Block());
- }
-
- /// Creates a ast::CaseStatement with input list of selectors, and body
- /// @param selectors list of selectors
- /// @param body the case body
- /// @returns the case statement pointer
- const ast::CaseStatement* Case(ast::CaseSelectorList selectors,
- const ast::BlockStatement* body = nullptr) {
- return create<ast::CaseStatement>(std::move(selectors),
- body ? body : Block());
- }
-
- /// Convenient overload that takes a single selector
- /// @param selector a single case selector
- /// @param body the case body
- /// @returns the case statement pointer
- const ast::CaseStatement* Case(const ast::IntLiteralExpression* selector,
- const ast::BlockStatement* body = nullptr) {
- return Case(ast::CaseSelectorList{selector}, body);
- }
-
- /// Convenience function that creates a 'default' ast::CaseStatement
- /// @param source the source information
- /// @param body the case body
- /// @returns the case statement pointer
- const ast::CaseStatement* DefaultCase(
- const Source& source,
- const ast::BlockStatement* body = nullptr) {
- return Case(source, ast::CaseSelectorList{}, body);
- }
-
- /// Convenience function that creates a 'default' ast::CaseStatement
- /// @param body the case body
- /// @returns the case statement pointer
- const ast::CaseStatement* DefaultCase(
- const ast::BlockStatement* body = nullptr) {
- return Case(ast::CaseSelectorList{}, body);
- }
-
- /// Creates an ast::FallthroughStatement
- /// @param source the source information
- /// @returns the fallthrough statement pointer
- const ast::FallthroughStatement* Fallthrough(const Source& source) {
- return create<ast::FallthroughStatement>(source);
- }
-
- /// Creates an ast::FallthroughStatement
- /// @returns the fallthrough statement pointer
- const ast::FallthroughStatement* Fallthrough() {
- return create<ast::FallthroughStatement>();
- }
-
- /// Creates an ast::BuiltinAttribute
- /// @param source the source information
- /// @param builtin the builtin value
- /// @returns the builtin attribute pointer
- const ast::BuiltinAttribute* Builtin(const Source& source,
- ast::Builtin builtin) {
- return create<ast::BuiltinAttribute>(source, builtin);
- }
-
- /// Creates an ast::BuiltinAttribute
- /// @param builtin the builtin value
- /// @returns the builtin attribute pointer
- const ast::BuiltinAttribute* Builtin(ast::Builtin builtin) {
- return create<ast::BuiltinAttribute>(source_, builtin);
- }
-
- /// Creates an ast::InterpolateAttribute
- /// @param source the source information
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- /// @returns the interpolate attribute pointer
- const ast::InterpolateAttribute* Interpolate(
- const Source& source,
- ast::InterpolationType type,
- ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone) {
- return create<ast::InterpolateAttribute>(source, type, sampling);
- }
-
- /// Creates an ast::InterpolateAttribute
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- /// @returns the interpolate attribute pointer
- const ast::InterpolateAttribute* Interpolate(
- ast::InterpolationType type,
- ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone) {
- return create<ast::InterpolateAttribute>(source_, type, sampling);
- }
-
- /// Creates an ast::InterpolateAttribute using flat interpolation
- /// @param source the source information
- /// @returns the interpolate attribute pointer
- const ast::InterpolateAttribute* Flat(const Source& source) {
- return Interpolate(source, ast::InterpolationType::kFlat);
- }
-
- /// Creates an ast::InterpolateAttribute using flat interpolation
- /// @returns the interpolate attribute pointer
- const ast::InterpolateAttribute* Flat() {
- return Interpolate(ast::InterpolationType::kFlat);
- }
-
- /// Creates an ast::InvariantAttribute
- /// @param source the source information
- /// @returns the invariant attribute pointer
- const ast::InvariantAttribute* Invariant(const Source& source) {
- return create<ast::InvariantAttribute>(source);
- }
-
- /// Creates an ast::InvariantAttribute
- /// @returns the invariant attribute pointer
- const ast::InvariantAttribute* Invariant() {
- return create<ast::InvariantAttribute>(source_);
- }
-
- /// Creates an ast::LocationAttribute
- /// @param source the source information
- /// @param location the location value
- /// @returns the location attribute pointer
- const ast::LocationAttribute* Location(const Source& source,
- uint32_t location) {
- return create<ast::LocationAttribute>(source, location);
- }
-
- /// Creates an ast::LocationAttribute
- /// @param location the location value
- /// @returns the location attribute pointer
- const ast::LocationAttribute* Location(uint32_t location) {
- return create<ast::LocationAttribute>(source_, location);
- }
-
- /// Creates an ast::IdAttribute
- /// @param source the source information
- /// @param id the id value
- /// @returns the override attribute pointer
- const ast::IdAttribute* Id(const Source& source, uint32_t id) {
- return create<ast::IdAttribute>(source, id);
- }
-
- /// Creates an ast::IdAttribute with a constant ID
- /// @param id the optional id value
- /// @returns the override attribute pointer
- const ast::IdAttribute* Id(uint32_t id) { return Id(source_, id); }
-
- /// Creates an ast::StageAttribute
- /// @param source the source information
- /// @param stage the pipeline stage
- /// @returns the stage attribute pointer
- const ast::StageAttribute* Stage(const Source& source,
- ast::PipelineStage stage) {
- return create<ast::StageAttribute>(source, stage);
- }
-
- /// Creates an ast::StageAttribute
- /// @param stage the pipeline stage
- /// @returns the stage attribute pointer
- const ast::StageAttribute* Stage(ast::PipelineStage stage) {
- return create<ast::StageAttribute>(source_, stage);
- }
-
- /// Creates an ast::WorkgroupAttribute
- /// @param x the x dimension expression
- /// @returns the workgroup attribute pointer
- template <typename EXPR_X>
- const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x) {
- return WorkgroupSize(std::forward<EXPR_X>(x), nullptr, nullptr);
- }
-
- /// Creates an ast::WorkgroupAttribute
- /// @param x the x dimension expression
- /// @param y the y dimension expression
- /// @returns the workgroup attribute pointer
- template <typename EXPR_X, typename EXPR_Y>
- const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x, EXPR_Y&& y) {
- return WorkgroupSize(std::forward<EXPR_X>(x), std::forward<EXPR_Y>(y),
- nullptr);
- }
-
- /// Creates an ast::WorkgroupAttribute
- /// @param source the source information
- /// @param x the x dimension expression
- /// @param y the y dimension expression
- /// @param z the z dimension expression
- /// @returns the workgroup attribute pointer
- template <typename EXPR_X, typename EXPR_Y, typename EXPR_Z>
- const ast::WorkgroupAttribute* WorkgroupSize(const Source& source,
- EXPR_X&& x,
- EXPR_Y&& y,
- EXPR_Z&& z) {
- return create<ast::WorkgroupAttribute>(
- source, Expr(std::forward<EXPR_X>(x)), Expr(std::forward<EXPR_Y>(y)),
- Expr(std::forward<EXPR_Z>(z)));
- }
-
- /// Creates an ast::WorkgroupAttribute
- /// @param x the x dimension expression
- /// @param y the y dimension expression
- /// @param z the z dimension expression
- /// @returns the workgroup attribute pointer
- template <typename EXPR_X, typename EXPR_Y, typename EXPR_Z>
- const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x,
- EXPR_Y&& y,
- EXPR_Z&& z) {
- return create<ast::WorkgroupAttribute>(
- source_, Expr(std::forward<EXPR_X>(x)), Expr(std::forward<EXPR_Y>(y)),
- Expr(std::forward<EXPR_Z>(z)));
- }
-
- /// Creates an ast::DisableValidationAttribute
- /// @param validation the validation to disable
- /// @returns the disable validation attribute pointer
- const ast::DisableValidationAttribute* Disable(
- ast::DisabledValidation validation) {
- return ASTNodes().Create<ast::DisableValidationAttribute>(ID(), validation);
- }
-
- /// Sets the current builder source to `src`
- /// @param src the Source used for future create() calls
- void SetSource(const Source& src) {
- AssertNotMoved();
- source_ = src;
- }
-
- /// Sets the current builder source to `loc`
- /// @param loc the Source used for future create() calls
- void SetSource(const Source::Location& loc) {
- AssertNotMoved();
- source_ = Source(loc);
- }
-
- /// Helper for returning the resolved semantic type of the expression `expr`.
- /// @note As the Resolver is run when the Program is built, this will only be
- /// useful for the Resolver itself and tests that use their own Resolver.
- /// @param expr the AST expression
- /// @return the resolved semantic type for the expression, or nullptr if the
- /// expression has no resolved type.
- const sem::Type* TypeOf(const ast::Expression* expr) const;
-
- /// Helper for returning the resolved semantic type of the variable `var`.
- /// @note As the Resolver is run when the Program is built, this will only be
- /// useful for the Resolver itself and tests that use their own Resolver.
- /// @param var the AST variable
- /// @return the resolved semantic type for the variable, or nullptr if the
- /// variable has no resolved type.
- const sem::Type* TypeOf(const ast::Variable* var) const;
-
- /// Helper for returning the resolved semantic type of the AST type `type`.
- /// @note As the Resolver is run when the Program is built, this will only be
- /// useful for the Resolver itself and tests that use their own Resolver.
- /// @param type the AST type
- /// @return the resolved semantic type for the type, or nullptr if the type
- /// has no resolved type.
- const sem::Type* TypeOf(const ast::Type* type) const;
-
- /// Helper for returning the resolved semantic type of the AST type
- /// declaration `type_decl`.
- /// @note As the Resolver is run when the Program is built, this will only be
- /// useful for the Resolver itself and tests that use their own Resolver.
- /// @param type_decl the AST type declaration
- /// @return the resolved semantic type for the type declaration, or nullptr if
- /// the type declaration has no resolved type.
- const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const;
-
- /// Wraps the ast::Expression in a statement. This is used by tests that
- /// construct a partial AST and require the Resolver to reach these
- /// nodes.
- /// @param expr the ast::Expression to be wrapped by an ast::Statement
- /// @return the ast::Statement that wraps the ast::Expression
- const ast::Statement* WrapInStatement(const ast::Expression* expr);
- /// Wraps the ast::Variable in a ast::VariableDeclStatement. This is used by
- /// tests that construct a partial AST and require the Resolver to reach
- /// these nodes.
- /// @param v the ast::Variable to be wrapped by an ast::VariableDeclStatement
- /// @return the ast::VariableDeclStatement that wraps the ast::Variable
- const ast::VariableDeclStatement* WrapInStatement(const ast::Variable* v);
- /// Returns the statement argument. Used as a passthrough-overload by
- /// WrapInFunction().
- /// @param stmt the ast::Statement
- /// @return `stmt`
- const ast::Statement* WrapInStatement(const ast::Statement* stmt);
- /// Wraps the list of arguments in a simple function so that each is reachable
- /// by the Resolver.
- /// @param args a mix of ast::Expression, ast::Statement, ast::Variables.
- /// @returns the function
- template <typename... ARGS>
- const ast::Function* WrapInFunction(ARGS&&... args) {
- ast::StatementList stmts{WrapInStatement(std::forward<ARGS>(args))...};
- return WrapInFunction(std::move(stmts));
- }
- /// @param stmts a list of ast::Statement that will be wrapped by a function,
- /// so that each statement is reachable by the Resolver.
- /// @returns the function
- const ast::Function* WrapInFunction(ast::StatementList stmts);
-
- /// The builder types
- TypesBuilder const ty{this};
-
- protected:
- /// Asserts that the builder has not been moved.
- void AssertNotMoved() const;
-
- private:
- ProgramID id_;
- sem::Manager types_;
- ASTNodeAllocator ast_nodes_;
- SemNodeAllocator sem_nodes_;
- ast::Module* ast_;
- sem::Info sem_;
- SymbolTable symbols_{id_};
- diag::List diagnostics_;
-
- /// The source to use when creating AST nodes without providing a Source as
- /// the first argument.
- Source source_;
-
- /// Set by SetResolveOnBuild(). If set, the Resolver will be run on the
- /// program when built.
- bool resolve_on_build_ = true;
-
- /// Set by MarkAsMoved(). Once set, no methods may be called on this builder.
- bool moved_ = false;
+ /// Creates a ast::Struct registering it with the AST().TypeDecls().
+ /// @param name the struct name
+ /// @param members the struct members
+ /// @returns the struct type
+ template <typename NAME>
+ const ast::Struct* Structure(NAME&& name, ast::StructMemberList members) {
+ auto sym = Sym(std::forward<NAME>(name));
+ auto* type = create<ast::Struct>(sym, std::move(members), ast::AttributeList{});
+ AST().AddTypeDecl(type);
+ return type;
+ }
+
+ /// Creates a ast::StructMember
+ /// @param source the source information
+ /// @param name the struct member name
+ /// @param type the struct member type
+ /// @param attributes the optional struct member attributes
+ /// @returns the struct member pointer
+ template <typename NAME>
+ const ast::StructMember* Member(const Source& source,
+ NAME&& name,
+ const ast::Type* type,
+ ast::AttributeList attributes = {}) {
+ return create<ast::StructMember>(source, Sym(std::forward<NAME>(name)), type,
+ std::move(attributes));
+ }
+
+ /// Creates a ast::StructMember
+ /// @param name the struct member name
+ /// @param type the struct member type
+ /// @param attributes the optional struct member attributes
+ /// @returns the struct member pointer
+ template <typename NAME>
+ const ast::StructMember* Member(NAME&& name,
+ const ast::Type* type,
+ ast::AttributeList attributes = {}) {
+ return create<ast::StructMember>(source_, Sym(std::forward<NAME>(name)), type,
+ std::move(attributes));
+ }
+
+ /// Creates a ast::StructMember with the given byte offset
+ /// @param offset the offset to use in the StructMemberOffsetattribute
+ /// @param name the struct member name
+ /// @param type the struct member type
+ /// @returns the struct member pointer
+ template <typename NAME>
+ const ast::StructMember* Member(uint32_t offset, NAME&& name, const ast::Type* type) {
+ return create<ast::StructMember>(source_, Sym(std::forward<NAME>(name)), type,
+ ast::AttributeList{
+ create<ast::StructMemberOffsetAttribute>(offset),
+ });
+ }
+
+ /// Creates a ast::BlockStatement with input statements
+ /// @param source the source information for the block
+ /// @param statements statements of block
+ /// @returns the block statement pointer
+ template <typename... Statements>
+ const ast::BlockStatement* Block(const Source& source, Statements&&... statements) {
+ return create<ast::BlockStatement>(
+ source, ast::StatementList{std::forward<Statements>(statements)...});
+ }
+
+ /// Creates a ast::BlockStatement with input statements
+ /// @param statements statements of block
+ /// @returns the block statement pointer
+ template <typename... STATEMENTS, typename = DisableIfSource<STATEMENTS...>>
+ const ast::BlockStatement* Block(STATEMENTS&&... statements) {
+ return create<ast::BlockStatement>(
+ ast::StatementList{std::forward<STATEMENTS>(statements)...});
+ }
+
+ /// A wrapper type for the Else statement used to create If statements.
+ struct ElseStmt {
+ /// Default constructor - no else statement.
+ ElseStmt() : stmt(nullptr) {}
+ /// Constructor
+ /// @param s The else statement
+ explicit ElseStmt(const ast::Statement* s) : stmt(s) {}
+ /// The else statement, or nullptr.
+ const ast::Statement* stmt;
+ };
+
+ /// Creates a ast::IfStatement with input condition, body, and optional
+ /// else statement
+ /// @param source the source information for the if statement
+ /// @param condition the if statement condition expression
+ /// @param body the if statement body
+ /// @param else_stmt optional else statement
+ /// @returns the if statement pointer
+ template <typename CONDITION>
+ const ast::IfStatement* If(const Source& source,
+ CONDITION&& condition,
+ const ast::BlockStatement* body,
+ const ElseStmt else_stmt = ElseStmt()) {
+ return create<ast::IfStatement>(source, Expr(std::forward<CONDITION>(condition)), body,
+ else_stmt.stmt);
+ }
+
+ /// Creates a ast::IfStatement with input condition, body, and optional
+ /// else statement
+ /// @param condition the if statement condition expression
+ /// @param body the if statement body
+ /// @param else_stmt optional else statement
+ /// @returns the if statement pointer
+ template <typename CONDITION>
+ const ast::IfStatement* If(CONDITION&& condition,
+ const ast::BlockStatement* body,
+ const ElseStmt else_stmt = ElseStmt()) {
+ return create<ast::IfStatement>(Expr(std::forward<CONDITION>(condition)), body,
+ else_stmt.stmt);
+ }
+
+ /// Creates an Else object.
+ /// @param stmt else statement
+ /// @returns the Else object
+ ElseStmt Else(const ast::Statement* stmt) { return ElseStmt(stmt); }
+
+ /// Creates a ast::AssignmentStatement with input lhs and rhs expressions
+ /// @param source the source information
+ /// @param lhs the left hand side expression initializer
+ /// @param rhs the right hand side expression initializer
+ /// @returns the assignment statement pointer
+ template <typename LhsExpressionInit, typename RhsExpressionInit>
+ const ast::AssignmentStatement* Assign(const Source& source,
+ LhsExpressionInit&& lhs,
+ RhsExpressionInit&& rhs) {
+ return create<ast::AssignmentStatement>(source, Expr(std::forward<LhsExpressionInit>(lhs)),
+ Expr(std::forward<RhsExpressionInit>(rhs)));
+ }
+
+ /// Creates a ast::AssignmentStatement with input lhs and rhs expressions
+ /// @param lhs the left hand side expression initializer
+ /// @param rhs the right hand side expression initializer
+ /// @returns the assignment statement pointer
+ template <typename LhsExpressionInit, typename RhsExpressionInit>
+ const ast::AssignmentStatement* Assign(LhsExpressionInit&& lhs, RhsExpressionInit&& rhs) {
+ return create<ast::AssignmentStatement>(Expr(std::forward<LhsExpressionInit>(lhs)),
+ Expr(std::forward<RhsExpressionInit>(rhs)));
+ }
+
+ /// Creates a ast::CompoundAssignmentStatement with input lhs and rhs
+ /// expressions, and a binary operator.
+ /// @param source the source information
+ /// @param lhs the left hand side expression initializer
+ /// @param rhs the right hand side expression initializer
+ /// @param op the binary operator
+ /// @returns the compound assignment statement pointer
+ template <typename LhsExpressionInit, typename RhsExpressionInit>
+ const ast::CompoundAssignmentStatement* CompoundAssign(const Source& source,
+ LhsExpressionInit&& lhs,
+ RhsExpressionInit&& rhs,
+ ast::BinaryOp op) {
+ return create<ast::CompoundAssignmentStatement>(
+ source, Expr(std::forward<LhsExpressionInit>(lhs)),
+ Expr(std::forward<RhsExpressionInit>(rhs)), op);
+ }
+
+ /// Creates a ast::CompoundAssignmentStatement with input lhs and rhs
+ /// expressions, and a binary operator.
+ /// @param lhs the left hand side expression initializer
+ /// @param rhs the right hand side expression initializer
+ /// @param op the binary operator
+ /// @returns the compound assignment statement pointer
+ template <typename LhsExpressionInit, typename RhsExpressionInit>
+ const ast::CompoundAssignmentStatement* CompoundAssign(LhsExpressionInit&& lhs,
+ RhsExpressionInit&& rhs,
+ ast::BinaryOp op) {
+ return create<ast::CompoundAssignmentStatement>(Expr(std::forward<LhsExpressionInit>(lhs)),
+ Expr(std::forward<RhsExpressionInit>(rhs)),
+ op);
+ }
+
+ /// Creates an ast::IncrementDecrementStatement with input lhs.
+ /// @param source the source information
+ /// @param lhs the left hand side expression initializer
+ /// @returns the increment decrement statement pointer
+ template <typename LhsExpressionInit>
+ const ast::IncrementDecrementStatement* Increment(const Source& source,
+ LhsExpressionInit&& lhs) {
+ return create<ast::IncrementDecrementStatement>(
+ source, Expr(std::forward<LhsExpressionInit>(lhs)), true);
+ }
+
+ /// Creates a ast::IncrementDecrementStatement with input lhs.
+ /// @param lhs the left hand side expression initializer
+ /// @returns the increment decrement statement pointer
+ template <typename LhsExpressionInit>
+ const ast::IncrementDecrementStatement* Increment(LhsExpressionInit&& lhs) {
+ return create<ast::IncrementDecrementStatement>(Expr(std::forward<LhsExpressionInit>(lhs)),
+ true);
+ }
+
+ /// Creates an ast::IncrementDecrementStatement with input lhs.
+ /// @param source the source information
+ /// @param lhs the left hand side expression initializer
+ /// @returns the increment decrement statement pointer
+ template <typename LhsExpressionInit>
+ const ast::IncrementDecrementStatement* Decrement(const Source& source,
+ LhsExpressionInit&& lhs) {
+ return create<ast::IncrementDecrementStatement>(
+ source, Expr(std::forward<LhsExpressionInit>(lhs)), false);
+ }
+
+ /// Creates a ast::IncrementDecrementStatement with input lhs.
+ /// @param lhs the left hand side expression initializer
+ /// @returns the increment decrement statement pointer
+ template <typename LhsExpressionInit>
+ const ast::IncrementDecrementStatement* Decrement(LhsExpressionInit&& lhs) {
+ return create<ast::IncrementDecrementStatement>(Expr(std::forward<LhsExpressionInit>(lhs)),
+ false);
+ }
+
+ /// Creates a ast::LoopStatement with input body and optional continuing
+ /// @param source the source information
+ /// @param body the loop body
+ /// @param continuing the optional continuing block
+ /// @returns the loop statement pointer
+ const ast::LoopStatement* Loop(const Source& source,
+ const ast::BlockStatement* body,
+ const ast::BlockStatement* continuing = nullptr) {
+ return create<ast::LoopStatement>(source, body, continuing);
+ }
+
+ /// Creates a ast::LoopStatement with input body and optional continuing
+ /// @param body the loop body
+ /// @param continuing the optional continuing block
+ /// @returns the loop statement pointer
+ const ast::LoopStatement* Loop(const ast::BlockStatement* body,
+ const ast::BlockStatement* continuing = nullptr) {
+ return create<ast::LoopStatement>(body, continuing);
+ }
+
+ /// Creates a ast::ForLoopStatement with input body and optional initializer,
+ /// condition and continuing.
+ /// @param source the source information
+ /// @param init the optional loop initializer
+ /// @param cond the optional loop condition
+ /// @param cont the optional loop continuing
+ /// @param body the loop body
+ /// @returns the for loop statement pointer
+ template <typename COND>
+ const ast::ForLoopStatement* For(const Source& source,
+ const ast::Statement* init,
+ COND&& cond,
+ const ast::Statement* cont,
+ const ast::BlockStatement* body) {
+ return create<ast::ForLoopStatement>(source, init, Expr(std::forward<COND>(cond)), cont,
+ body);
+ }
+
+ /// Creates a ast::ForLoopStatement with input body and optional initializer,
+ /// condition and continuing.
+ /// @param init the optional loop initializer
+ /// @param cond the optional loop condition
+ /// @param cont the optional loop continuing
+ /// @param body the loop body
+ /// @returns the for loop statement pointer
+ template <typename COND>
+ const ast::ForLoopStatement* For(const ast::Statement* init,
+ COND&& cond,
+ const ast::Statement* cont,
+ const ast::BlockStatement* body) {
+ return create<ast::ForLoopStatement>(init, Expr(std::forward<COND>(cond)), cont, body);
+ }
+
+ /// Creates a ast::VariableDeclStatement for the input variable
+ /// @param source the source information
+ /// @param var the variable to wrap in a decl statement
+ /// @returns the variable decl statement pointer
+ const ast::VariableDeclStatement* Decl(const Source& source, const ast::Variable* var) {
+ return create<ast::VariableDeclStatement>(source, var);
+ }
+
+ /// Creates a ast::VariableDeclStatement for the input variable
+ /// @param var the variable to wrap in a decl statement
+ /// @returns the variable decl statement pointer
+ const ast::VariableDeclStatement* Decl(const ast::Variable* var) {
+ return create<ast::VariableDeclStatement>(var);
+ }
+
+ /// Creates a ast::SwitchStatement with input expression and cases
+ /// @param source the source information
+ /// @param condition the condition expression initializer
+ /// @param cases case statements
+ /// @returns the switch statement pointer
+ template <typename ExpressionInit, typename... Cases>
+ const ast::SwitchStatement* Switch(const Source& source,
+ ExpressionInit&& condition,
+ Cases&&... cases) {
+ return create<ast::SwitchStatement>(source, Expr(std::forward<ExpressionInit>(condition)),
+ ast::CaseStatementList{std::forward<Cases>(cases)...});
+ }
+
+ /// Creates a ast::SwitchStatement with input expression and cases
+ /// @param condition the condition expression initializer
+ /// @param cases case statements
+ /// @returns the switch statement pointer
+ template <typename ExpressionInit,
+ typename... Cases,
+ typename = DisableIfSource<ExpressionInit>>
+ const ast::SwitchStatement* Switch(ExpressionInit&& condition, Cases&&... cases) {
+ return create<ast::SwitchStatement>(Expr(std::forward<ExpressionInit>(condition)),
+ ast::CaseStatementList{std::forward<Cases>(cases)...});
+ }
+
+ /// Creates a ast::CaseStatement with input list of selectors, and body
+ /// @param source the source information
+ /// @param selectors list of selectors
+ /// @param body the case body
+ /// @returns the case statement pointer
+ const ast::CaseStatement* Case(const Source& source,
+ ast::CaseSelectorList selectors,
+ const ast::BlockStatement* body = nullptr) {
+ return create<ast::CaseStatement>(source, std::move(selectors), body ? body : Block());
+ }
+
+ /// Creates a ast::CaseStatement with input list of selectors, and body
+ /// @param selectors list of selectors
+ /// @param body the case body
+ /// @returns the case statement pointer
+ const ast::CaseStatement* Case(ast::CaseSelectorList selectors,
+ const ast::BlockStatement* body = nullptr) {
+ return create<ast::CaseStatement>(std::move(selectors), body ? body : Block());
+ }
+
+ /// Convenient overload that takes a single selector
+ /// @param selector a single case selector
+ /// @param body the case body
+ /// @returns the case statement pointer
+ const ast::CaseStatement* Case(const ast::IntLiteralExpression* selector,
+ const ast::BlockStatement* body = nullptr) {
+ return Case(ast::CaseSelectorList{selector}, body);
+ }
+
+ /// Convenience function that creates a 'default' ast::CaseStatement
+ /// @param source the source information
+ /// @param body the case body
+ /// @returns the case statement pointer
+ const ast::CaseStatement* DefaultCase(const Source& source,
+ const ast::BlockStatement* body = nullptr) {
+ return Case(source, ast::CaseSelectorList{}, body);
+ }
+
+ /// Convenience function that creates a 'default' ast::CaseStatement
+ /// @param body the case body
+ /// @returns the case statement pointer
+ const ast::CaseStatement* DefaultCase(const ast::BlockStatement* body = nullptr) {
+ return Case(ast::CaseSelectorList{}, body);
+ }
+
+ /// Creates an ast::FallthroughStatement
+ /// @param source the source information
+ /// @returns the fallthrough statement pointer
+ const ast::FallthroughStatement* Fallthrough(const Source& source) {
+ return create<ast::FallthroughStatement>(source);
+ }
+
+ /// Creates an ast::FallthroughStatement
+ /// @returns the fallthrough statement pointer
+ const ast::FallthroughStatement* Fallthrough() { return create<ast::FallthroughStatement>(); }
+
+ /// Creates an ast::BuiltinAttribute
+ /// @param source the source information
+ /// @param builtin the builtin value
+ /// @returns the builtin attribute pointer
+ const ast::BuiltinAttribute* Builtin(const Source& source, ast::Builtin builtin) {
+ return create<ast::BuiltinAttribute>(source, builtin);
+ }
+
+ /// Creates an ast::BuiltinAttribute
+ /// @param builtin the builtin value
+ /// @returns the builtin attribute pointer
+ const ast::BuiltinAttribute* Builtin(ast::Builtin builtin) {
+ return create<ast::BuiltinAttribute>(source_, builtin);
+ }
+
+ /// Creates an ast::InterpolateAttribute
+ /// @param source the source information
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ /// @returns the interpolate attribute pointer
+ const ast::InterpolateAttribute* Interpolate(
+ const Source& source,
+ ast::InterpolationType type,
+ ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone) {
+ return create<ast::InterpolateAttribute>(source, type, sampling);
+ }
+
+ /// Creates an ast::InterpolateAttribute
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ /// @returns the interpolate attribute pointer
+ const ast::InterpolateAttribute* Interpolate(
+ ast::InterpolationType type,
+ ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone) {
+ return create<ast::InterpolateAttribute>(source_, type, sampling);
+ }
+
+ /// Creates an ast::InterpolateAttribute using flat interpolation
+ /// @param source the source information
+ /// @returns the interpolate attribute pointer
+ const ast::InterpolateAttribute* Flat(const Source& source) {
+ return Interpolate(source, ast::InterpolationType::kFlat);
+ }
+
+ /// Creates an ast::InterpolateAttribute using flat interpolation
+ /// @returns the interpolate attribute pointer
+ const ast::InterpolateAttribute* Flat() { return Interpolate(ast::InterpolationType::kFlat); }
+
+ /// Creates an ast::InvariantAttribute
+ /// @param source the source information
+ /// @returns the invariant attribute pointer
+ const ast::InvariantAttribute* Invariant(const Source& source) {
+ return create<ast::InvariantAttribute>(source);
+ }
+
+ /// Creates an ast::InvariantAttribute
+ /// @returns the invariant attribute pointer
+ const ast::InvariantAttribute* Invariant() { return create<ast::InvariantAttribute>(source_); }
+
+ /// Creates an ast::LocationAttribute
+ /// @param source the source information
+ /// @param location the location value
+ /// @returns the location attribute pointer
+ const ast::LocationAttribute* Location(const Source& source, uint32_t location) {
+ return create<ast::LocationAttribute>(source, location);
+ }
+
+ /// Creates an ast::LocationAttribute
+ /// @param location the location value
+ /// @returns the location attribute pointer
+ const ast::LocationAttribute* Location(uint32_t location) {
+ return create<ast::LocationAttribute>(source_, location);
+ }
+
+ /// Creates an ast::IdAttribute
+ /// @param source the source information
+ /// @param id the id value
+ /// @returns the override attribute pointer
+ const ast::IdAttribute* Id(const Source& source, uint32_t id) {
+ return create<ast::IdAttribute>(source, id);
+ }
+
+ /// Creates an ast::IdAttribute with a constant ID
+ /// @param id the optional id value
+ /// @returns the override attribute pointer
+ const ast::IdAttribute* Id(uint32_t id) { return Id(source_, id); }
+
+ /// Creates an ast::StageAttribute
+ /// @param source the source information
+ /// @param stage the pipeline stage
+ /// @returns the stage attribute pointer
+ const ast::StageAttribute* Stage(const Source& source, ast::PipelineStage stage) {
+ return create<ast::StageAttribute>(source, stage);
+ }
+
+ /// Creates an ast::StageAttribute
+ /// @param stage the pipeline stage
+ /// @returns the stage attribute pointer
+ const ast::StageAttribute* Stage(ast::PipelineStage stage) {
+ return create<ast::StageAttribute>(source_, stage);
+ }
+
+ /// Creates an ast::WorkgroupAttribute
+ /// @param x the x dimension expression
+ /// @returns the workgroup attribute pointer
+ template <typename EXPR_X>
+ const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x) {
+ return WorkgroupSize(std::forward<EXPR_X>(x), nullptr, nullptr);
+ }
+
+ /// Creates an ast::WorkgroupAttribute
+ /// @param source the source information
+ /// @param x the x dimension expression
+ /// @param y the y dimension expression
+ /// @returns the workgroup attribute pointer
+ template <typename EXPR_X, typename EXPR_Y>
+ const ast::WorkgroupAttribute* WorkgroupSize(const Source& source, EXPR_X&& x, EXPR_Y&& y) {
+ return WorkgroupSize(source, std::forward<EXPR_X>(x), std::forward<EXPR_Y>(y), nullptr);
+ }
+
+ /// Creates an ast::WorkgroupAttribute
+ /// @param x the x dimension expression
+ /// @param y the y dimension expression
+ /// @returns the workgroup attribute pointer
+ template <typename EXPR_X, typename EXPR_Y, typename = DisableIfSource<EXPR_X>>
+ const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x, EXPR_Y&& y) {
+ return WorkgroupSize(std::forward<EXPR_X>(x), std::forward<EXPR_Y>(y), nullptr);
+ }
+
+ /// Creates an ast::WorkgroupAttribute
+ /// @param source the source information
+ /// @param x the x dimension expression
+ /// @param y the y dimension expression
+ /// @param z the z dimension expression
+ /// @returns the workgroup attribute pointer
+ template <typename EXPR_X, typename EXPR_Y, typename EXPR_Z>
+ const ast::WorkgroupAttribute* WorkgroupSize(const Source& source,
+ EXPR_X&& x,
+ EXPR_Y&& y,
+ EXPR_Z&& z) {
+ return create<ast::WorkgroupAttribute>(source, Expr(std::forward<EXPR_X>(x)),
+ Expr(std::forward<EXPR_Y>(y)),
+ Expr(std::forward<EXPR_Z>(z)));
+ }
+
+ /// Creates an ast::WorkgroupAttribute
+ /// @param x the x dimension expression
+ /// @param y the y dimension expression
+ /// @param z the z dimension expression
+ /// @returns the workgroup attribute pointer
+ template <typename EXPR_X, typename EXPR_Y, typename EXPR_Z, typename = DisableIfSource<EXPR_X>>
+ const ast::WorkgroupAttribute* WorkgroupSize(EXPR_X&& x, EXPR_Y&& y, EXPR_Z&& z) {
+ return create<ast::WorkgroupAttribute>(source_, Expr(std::forward<EXPR_X>(x)),
+ Expr(std::forward<EXPR_Y>(y)),
+ Expr(std::forward<EXPR_Z>(z)));
+ }
+
+ /// Creates an ast::DisableValidationAttribute
+ /// @param validation the validation to disable
+ /// @returns the disable validation attribute pointer
+ const ast::DisableValidationAttribute* Disable(ast::DisabledValidation validation) {
+ return ASTNodes().Create<ast::DisableValidationAttribute>(ID(), validation);
+ }
+
+ /// Sets the current builder source to `src`
+ /// @param src the Source used for future create() calls
+ void SetSource(const Source& src) {
+ AssertNotMoved();
+ source_ = src;
+ }
+
+ /// Sets the current builder source to `loc`
+ /// @param loc the Source used for future create() calls
+ void SetSource(const Source::Location& loc) {
+ AssertNotMoved();
+ source_ = Source(loc);
+ }
+
+ /// Helper for returning the resolved semantic type of the expression `expr`.
+ /// @note As the Resolver is run when the Program is built, this will only be
+ /// useful for the Resolver itself and tests that use their own Resolver.
+ /// @param expr the AST expression
+ /// @return the resolved semantic type for the expression, or nullptr if the
+ /// expression has no resolved type.
+ const sem::Type* TypeOf(const ast::Expression* expr) const;
+
+ /// Helper for returning the resolved semantic type of the variable `var`.
+ /// @note As the Resolver is run when the Program is built, this will only be
+ /// useful for the Resolver itself and tests that use their own Resolver.
+ /// @param var the AST variable
+ /// @return the resolved semantic type for the variable, or nullptr if the
+ /// variable has no resolved type.
+ const sem::Type* TypeOf(const ast::Variable* var) const;
+
+ /// Helper for returning the resolved semantic type of the AST type `type`.
+ /// @note As the Resolver is run when the Program is built, this will only be
+ /// useful for the Resolver itself and tests that use their own Resolver.
+ /// @param type the AST type
+ /// @return the resolved semantic type for the type, or nullptr if the type
+ /// has no resolved type.
+ const sem::Type* TypeOf(const ast::Type* type) const;
+
+ /// Helper for returning the resolved semantic type of the AST type
+ /// declaration `type_decl`.
+ /// @note As the Resolver is run when the Program is built, this will only be
+ /// useful for the Resolver itself and tests that use their own Resolver.
+ /// @param type_decl the AST type declaration
+ /// @return the resolved semantic type for the type declaration, or nullptr if
+ /// the type declaration has no resolved type.
+ const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const;
+
+ /// @param type a type
+ /// @returns the name for `type` that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const ast::Type* type) {
+ return type ? type->FriendlyName(Symbols()) : "<null>";
+ }
+
+ /// @param type a type
+ /// @returns the name for `type` that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const sem::Type* type) {
+ return type ? type->FriendlyName(Symbols()) : "<null>";
+ }
+
+ /// Overload of FriendlyName, which removes an ambiguity when passing nullptr.
+ /// Simplifies test code.
+ /// @returns "<null>"
+ std::string FriendlyName(std::nullptr_t) { return "<null>"; }
+
+ /// Wraps the ast::Expression in a statement. This is used by tests that
+ /// construct a partial AST and require the Resolver to reach these
+ /// nodes.
+ /// @param expr the ast::Expression to be wrapped by an ast::Statement
+ /// @return the ast::Statement that wraps the ast::Expression
+ const ast::Statement* WrapInStatement(const ast::Expression* expr);
+ /// Wraps the ast::Variable in a ast::VariableDeclStatement. This is used by
+ /// tests that construct a partial AST and require the Resolver to reach
+ /// these nodes.
+ /// @param v the ast::Variable to be wrapped by an ast::VariableDeclStatement
+ /// @return the ast::VariableDeclStatement that wraps the ast::Variable
+ const ast::VariableDeclStatement* WrapInStatement(const ast::Variable* v);
+ /// Returns the statement argument. Used as a passthrough-overload by
+ /// WrapInFunction().
+ /// @param stmt the ast::Statement
+ /// @return `stmt`
+ const ast::Statement* WrapInStatement(const ast::Statement* stmt);
+ /// Wraps the list of arguments in a simple function so that each is reachable
+ /// by the Resolver.
+ /// @param args a mix of ast::Expression, ast::Statement, ast::Variables.
+ /// @returns the function
+ template <typename... ARGS>
+ const ast::Function* WrapInFunction(ARGS&&... args) {
+ ast::StatementList stmts{WrapInStatement(std::forward<ARGS>(args))...};
+ return WrapInFunction(std::move(stmts));
+ }
+ /// @param stmts a list of ast::Statement that will be wrapped by a function,
+ /// so that each statement is reachable by the Resolver.
+ /// @returns the function
+ const ast::Function* WrapInFunction(ast::StatementList stmts);
+
+ /// The builder types
+ TypesBuilder const ty{this};
+
+ protected:
+ /// Asserts that the builder has not been moved.
+ void AssertNotMoved() const;
+
+ private:
+ ProgramID id_;
+ sem::Manager types_;
+ ASTNodeAllocator ast_nodes_;
+ SemNodeAllocator sem_nodes_;
+ ast::Module* ast_;
+ sem::Info sem_;
+ SymbolTable symbols_{id_};
+ diag::List diagnostics_;
+
+ /// The source to use when creating AST nodes without providing a Source as
+ /// the first argument.
+ Source source_;
+
+ /// Set by SetResolveOnBuild(). If set, the Resolver will be run on the
+ /// program when built.
+ bool resolve_on_build_ = true;
+
+ /// Set by MarkAsMoved(). Once set, no methods may be called on this builder.
+ bool moved_ = false;
};
//! @cond Doxygen_Suppress
// Various template specializations for ProgramBuilder::TypesBuilder::CToAST.
template <>
-struct ProgramBuilder::TypesBuilder::CToAST<ProgramBuilder::i32> {
- static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) {
- return t->i32();
- }
+struct ProgramBuilder::TypesBuilder::CToAST<i32> {
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->i32(); }
+};
+template <>
+struct ProgramBuilder::TypesBuilder::CToAST<u32> {
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->u32(); }
};
template <>
-struct ProgramBuilder::TypesBuilder::CToAST<ProgramBuilder::u32> {
- static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) {
- return t->u32();
- }
+struct ProgramBuilder::TypesBuilder::CToAST<f32> {
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->f32(); }
};
template <>
-struct ProgramBuilder::TypesBuilder::CToAST<ProgramBuilder::f32> {
- static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) {
- return t->f32();
- }
+struct ProgramBuilder::TypesBuilder::CToAST<f16> {
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->f16(); }
};
template <>
struct ProgramBuilder::TypesBuilder::CToAST<bool> {
- static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) {
- return t->bool_();
- }
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->bool_(); }
};
template <>
struct ProgramBuilder::TypesBuilder::CToAST<void> {
- static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) {
- return t->void_();
- }
+ static const ast::Type* get(const ProgramBuilder::TypesBuilder* t) { return t->void_(); }
};
//! @endcond
/// @param builder the ProgramBuilder
/// @returns the ProgramID of the ProgramBuilder
inline ProgramID ProgramIDOf(const ProgramBuilder* builder) {
- return builder->ID();
+ return builder->ID();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program_builder_test.cc b/chromium/third_party/dawn/src/tint/program_builder_test.cc
index f18aa10c037..dd7e7a8c37a 100644
--- a/chromium/third_party/dawn/src/tint/program_builder_test.cc
+++ b/chromium/third_party/dawn/src/tint/program_builder_test.cc
@@ -22,50 +22,50 @@ namespace {
using ProgramBuilderTest = testing::Test;
TEST_F(ProgramBuilderTest, IDsAreUnique) {
- Program program_a(ProgramBuilder{});
- Program program_b(ProgramBuilder{});
- Program program_c(ProgramBuilder{});
- EXPECT_NE(program_a.ID(), program_b.ID());
- EXPECT_NE(program_b.ID(), program_c.ID());
- EXPECT_NE(program_c.ID(), program_a.ID());
+ Program program_a(ProgramBuilder{});
+ Program program_b(ProgramBuilder{});
+ Program program_c(ProgramBuilder{});
+ EXPECT_NE(program_a.ID(), program_b.ID());
+ EXPECT_NE(program_b.ID(), program_c.ID());
+ EXPECT_NE(program_c.ID(), program_a.ID());
}
TEST_F(ProgramBuilderTest, WrapDoesntAffectInner) {
- Program inner([] {
- ProgramBuilder builder;
- auto* ty = builder.ty.f32();
- builder.Func("a", {}, ty, {}, {});
- return builder;
- }());
+ Program inner([] {
+ ProgramBuilder builder;
+ auto* ty = builder.ty.f32();
+ builder.Func("a", {}, ty, {}, {});
+ return builder;
+ }());
- ASSERT_EQ(inner.AST().Functions().size(), 1u);
- ASSERT_TRUE(inner.Symbols().Get("a").IsValid());
- ASSERT_FALSE(inner.Symbols().Get("b").IsValid());
+ ASSERT_EQ(inner.AST().Functions().size(), 1u);
+ ASSERT_TRUE(inner.Symbols().Get("a").IsValid());
+ ASSERT_FALSE(inner.Symbols().Get("b").IsValid());
- ProgramBuilder outer = ProgramBuilder::Wrap(&inner);
+ ProgramBuilder outer = ProgramBuilder::Wrap(&inner);
- ASSERT_EQ(inner.AST().Functions().size(), 1u);
- ASSERT_EQ(outer.AST().Functions().size(), 1u);
- EXPECT_EQ(inner.AST().Functions()[0], outer.AST().Functions()[0]);
- EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
- EXPECT_EQ(inner.Symbols().Get("a"), outer.Symbols().Get("a"));
- EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
- EXPECT_TRUE(outer.Symbols().Get("a").IsValid());
- EXPECT_FALSE(inner.Symbols().Get("b").IsValid());
- EXPECT_FALSE(outer.Symbols().Get("b").IsValid());
+ ASSERT_EQ(inner.AST().Functions().size(), 1u);
+ ASSERT_EQ(outer.AST().Functions().size(), 1u);
+ EXPECT_EQ(inner.AST().Functions()[0], outer.AST().Functions()[0]);
+ EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
+ EXPECT_EQ(inner.Symbols().Get("a"), outer.Symbols().Get("a"));
+ EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
+ EXPECT_TRUE(outer.Symbols().Get("a").IsValid());
+ EXPECT_FALSE(inner.Symbols().Get("b").IsValid());
+ EXPECT_FALSE(outer.Symbols().Get("b").IsValid());
- auto* ty = outer.ty.f32();
- outer.Func("b", {}, ty, {}, {});
+ auto* ty = outer.ty.f32();
+ outer.Func("b", {}, ty, {}, {});
- ASSERT_EQ(inner.AST().Functions().size(), 1u);
- ASSERT_EQ(outer.AST().Functions().size(), 2u);
- EXPECT_EQ(inner.AST().Functions()[0], outer.AST().Functions()[0]);
- EXPECT_EQ(outer.AST().Functions()[1]->symbol, outer.Symbols().Get("b"));
- EXPECT_EQ(inner.Symbols().Get("a"), outer.Symbols().Get("a"));
- EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
- EXPECT_TRUE(outer.Symbols().Get("a").IsValid());
- EXPECT_FALSE(inner.Symbols().Get("b").IsValid());
- EXPECT_TRUE(outer.Symbols().Get("b").IsValid());
+ ASSERT_EQ(inner.AST().Functions().size(), 1u);
+ ASSERT_EQ(outer.AST().Functions().size(), 2u);
+ EXPECT_EQ(inner.AST().Functions()[0], outer.AST().Functions()[0]);
+ EXPECT_EQ(outer.AST().Functions()[1]->symbol, outer.Symbols().Get("b"));
+ EXPECT_EQ(inner.Symbols().Get("a"), outer.Symbols().Get("a"));
+ EXPECT_TRUE(inner.Symbols().Get("a").IsValid());
+ EXPECT_TRUE(outer.Symbols().Get("a").IsValid());
+ EXPECT_FALSE(inner.Symbols().Get("b").IsValid());
+ EXPECT_TRUE(outer.Symbols().Get("b").IsValid());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/program_id.cc b/chromium/third_party/dawn/src/tint/program_id.cc
index 5350de78535..7374df32ad8 100644
--- a/chromium/third_party/dawn/src/tint/program_id.cc
+++ b/chromium/third_party/dawn/src/tint/program_id.cc
@@ -29,7 +29,7 @@ ProgramID::ProgramID() = default;
ProgramID::ProgramID(uint32_t id) : val(id) {}
ProgramID ProgramID::New() {
- return ProgramID(next_program_id++);
+ return ProgramID(next_program_id++);
}
namespace detail {
@@ -44,14 +44,14 @@ void AssertProgramIDsEqual(ProgramID a,
const char* msg,
const char* file,
size_t line) {
- if (a == b) {
- return; // matched
- }
- if (if_valid && (!a || !b)) {
- return; // a or b were not valid
- }
- diag::List diagnostics;
- tint::InternalCompilerError(file, line, system, diagnostics) << msg;
+ if (a == b) {
+ return; // matched
+ }
+ if (if_valid && (!a || !b)) {
+ return; // a or b were not valid
+ }
+ diag::List diagnostics;
+ tint::InternalCompilerError(file, line, system, diagnostics) << msg;
}
} // namespace detail
diff --git a/chromium/third_party/dawn/src/tint/program_id.h b/chromium/third_party/dawn/src/tint/program_id.h
index 09e232fa0d2..c0185436d22 100644
--- a/chromium/third_party/dawn/src/tint/program_id.h
+++ b/chromium/third_party/dawn/src/tint/program_id.h
@@ -34,33 +34,33 @@ namespace tint {
/// owned exclusively by that Program and have accidentally not leaked from
/// another Program.
class ProgramID {
- public:
- /// Constructor
- ProgramID();
+ public:
+ /// Constructor
+ ProgramID();
- /// @returns a new. globally unique ProgramID
- static ProgramID New();
+ /// @returns a new. globally unique ProgramID
+ static ProgramID New();
- /// Equality operator
- /// @param rhs the other ProgramID
- /// @returns true if the ProgramIDs are equal
- bool operator==(const ProgramID& rhs) const { return val == rhs.val; }
+ /// Equality operator
+ /// @param rhs the other ProgramID
+ /// @returns true if the ProgramIDs are equal
+ bool operator==(const ProgramID& rhs) const { return val == rhs.val; }
- /// Inequality operator
- /// @param rhs the other ProgramID
- /// @returns true if the ProgramIDs are not equal
- bool operator!=(const ProgramID& rhs) const { return val != rhs.val; }
+ /// Inequality operator
+ /// @param rhs the other ProgramID
+ /// @returns true if the ProgramIDs are not equal
+ bool operator!=(const ProgramID& rhs) const { return val != rhs.val; }
- /// @returns the numerical identifier value
- uint32_t Value() const { return val; }
+ /// @returns the numerical identifier value
+ uint32_t Value() const { return val; }
- /// @returns true if this ProgramID is valid
- operator bool() const { return val != 0; }
+ /// @returns true if this ProgramID is valid
+ operator bool() const { return val != 0; }
- private:
- explicit ProgramID(uint32_t);
+ private:
+ explicit ProgramID(uint32_t);
- uint32_t val = 0;
+ uint32_t val = 0;
};
/// A simple pass-through function for ProgramID. Intended to be overloaded for
@@ -68,7 +68,7 @@ class ProgramID {
/// @param id a ProgramID
/// @returns id. Simple pass-through function
inline ProgramID ProgramIDOf(ProgramID id) {
- return id;
+ return id;
}
/// Writes the ProgramID to the std::ostream.
@@ -76,8 +76,8 @@ inline ProgramID ProgramIDOf(ProgramID id) {
/// @param id the program identifier to write
/// @returns out so calls can be chained
inline std::ostream& operator<<(std::ostream& out, ProgramID id) {
- out << "Program<" << id.Value() << ">";
- return out;
+ out << "Program<" << id.Value() << ">";
+ return out;
}
namespace detail {
@@ -102,23 +102,21 @@ void AssertProgramIDsEqual(ProgramID a,
/// that the program identifiers for A and B are equal, if both A and B have
/// valid program identifiers.
#if TINT_CHECK_FOR_CROSS_PROGRAM_LEAKS
-#define TINT_ASSERT_PROGRAM_IDS_EQUAL(system, a, b) \
- detail::AssertProgramIDsEqual( \
- ProgramIDOf(a), ProgramIDOf(b), false, tint::diag::System::system, \
- "TINT_ASSERT_PROGRAM_IDS_EQUAL(" #system "," #a ", " #b ")", __FILE__, \
- __LINE__)
-#define TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(system, a, b) \
- detail::AssertProgramIDsEqual( \
- ProgramIDOf(a), ProgramIDOf(b), true, tint::diag::System::system, \
- "TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(" #system ", " #a ", " #b ")", \
- __FILE__, __LINE__)
+#define TINT_ASSERT_PROGRAM_IDS_EQUAL(system, a, b) \
+ detail::AssertProgramIDsEqual( \
+ ProgramIDOf(a), ProgramIDOf(b), false, tint::diag::System::system, \
+ "TINT_ASSERT_PROGRAM_IDS_EQUAL(" #system "," #a ", " #b ")", __FILE__, __LINE__)
+#define TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(system, a, b) \
+ detail::AssertProgramIDsEqual( \
+ ProgramIDOf(a), ProgramIDOf(b), true, tint::diag::System::system, \
+ "TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(" #system ", " #a ", " #b ")", __FILE__, __LINE__)
#else
#define TINT_ASSERT_PROGRAM_IDS_EQUAL(a, b) \
- do { \
- } while (false)
+ do { \
+ } while (false)
#define TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(a, b) \
- do { \
- } while (false)
+ do { \
+ } while (false)
#endif
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/program_test.cc b/chromium/third_party/dawn/src/tint/program_test.cc
index a161ecb4134..3bdf11ac92e 100644
--- a/chromium/third_party/dawn/src/tint/program_test.cc
+++ b/chromium/third_party/dawn/src/tint/program_test.cc
@@ -22,88 +22,88 @@ namespace {
using ProgramTest = ast::TestHelper;
TEST_F(ProgramTest, Unbuilt) {
- Program program;
- EXPECT_FALSE(program.IsValid());
+ Program program;
+ EXPECT_FALSE(program.IsValid());
}
TEST_F(ProgramTest, Creation) {
- Program program(std::move(*this));
- EXPECT_EQ(program.AST().Functions().size(), 0u);
+ Program program(std::move(*this));
+ EXPECT_EQ(program.AST().Functions().size(), 0u);
}
TEST_F(ProgramTest, EmptyIsValid) {
- Program program(std::move(*this));
- EXPECT_TRUE(program.IsValid());
+ Program program(std::move(*this));
+ EXPECT_TRUE(program.IsValid());
}
TEST_F(ProgramTest, IDsAreUnique) {
- Program program_a(ProgramBuilder{});
- Program program_b(ProgramBuilder{});
- Program program_c(ProgramBuilder{});
- EXPECT_NE(program_a.ID(), program_b.ID());
- EXPECT_NE(program_b.ID(), program_c.ID());
- EXPECT_NE(program_c.ID(), program_a.ID());
+ Program program_a(ProgramBuilder{});
+ Program program_b(ProgramBuilder{});
+ Program program_c(ProgramBuilder{});
+ EXPECT_NE(program_a.ID(), program_b.ID());
+ EXPECT_NE(program_b.ID(), program_c.ID());
+ EXPECT_NE(program_c.ID(), program_a.ID());
}
TEST_F(ProgramTest, Assert_GlobalVariable) {
- Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ Global("var", ty.f32(), ast::StorageClass::kPrivate);
- Program program(std::move(*this));
- EXPECT_TRUE(program.IsValid());
+ Program program(std::move(*this));
+ EXPECT_TRUE(program.IsValid());
}
TEST_F(ProgramTest, Assert_NullGlobalVariable) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.AST().AddGlobalVariable(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.AST().AddGlobalVariable(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ProgramTest, Assert_NullTypeDecl) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.AST().AddTypeDecl(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.AST().AddTypeDecl(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ProgramTest, Assert_Null_Function) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.AST().AddFunction(nullptr);
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.AST().AddFunction(nullptr);
+ },
+ "internal compiler error");
}
TEST_F(ProgramTest, DiagnosticsMove) {
- Diagnostics().add_error(diag::System::Program, "an error message");
-
- Program program_a(std::move(*this));
- EXPECT_FALSE(program_a.IsValid());
- EXPECT_EQ(program_a.Diagnostics().count(), 1u);
- EXPECT_EQ(program_a.Diagnostics().error_count(), 1u);
- EXPECT_EQ(program_a.Diagnostics().begin()->message, "an error message");
-
- Program program_b(std::move(program_a));
- EXPECT_FALSE(program_b.IsValid());
- EXPECT_EQ(program_b.Diagnostics().count(), 1u);
- EXPECT_EQ(program_b.Diagnostics().error_count(), 1u);
- EXPECT_EQ(program_b.Diagnostics().begin()->message, "an error message");
+ Diagnostics().add_error(diag::System::Program, "an error message");
+
+ Program program_a(std::move(*this));
+ EXPECT_FALSE(program_a.IsValid());
+ EXPECT_EQ(program_a.Diagnostics().count(), 1u);
+ EXPECT_EQ(program_a.Diagnostics().error_count(), 1u);
+ EXPECT_EQ(program_a.Diagnostics().begin()->message, "an error message");
+
+ Program program_b(std::move(program_a));
+ EXPECT_FALSE(program_b.IsValid());
+ EXPECT_EQ(program_b.Diagnostics().count(), 1u);
+ EXPECT_EQ(program_b.Diagnostics().error_count(), 1u);
+ EXPECT_EQ(program_b.Diagnostics().begin()->message, "an error message");
}
TEST_F(ProgramTest, ReuseMovedFromVariable) {
- Program a(std::move(*this));
- EXPECT_TRUE(a.IsValid());
+ Program a(std::move(*this));
+ EXPECT_TRUE(a.IsValid());
- Program b = std::move(a);
- EXPECT_TRUE(b.IsValid());
+ Program b = std::move(a);
+ EXPECT_TRUE(b.IsValid());
- a = std::move(b);
- EXPECT_TRUE(a.IsValid());
+ a = std::move(b);
+ EXPECT_TRUE(a.IsValid());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/reader.h b/chromium/third_party/dawn/src/tint/reader/reader.h
index 7d97821e35a..8ba0a892394 100644
--- a/chromium/third_party/dawn/src/tint/reader/reader.h
+++ b/chromium/third_party/dawn/src/tint/reader/reader.h
@@ -23,39 +23,39 @@ namespace tint::reader {
/// Base class for input readers
class Reader {
- public:
- virtual ~Reader();
+ public:
+ virtual ~Reader();
- /// Parses the input data
- /// @returns true if the parse was successful
- virtual bool Parse() = 0;
+ /// Parses the input data
+ /// @returns true if the parse was successful
+ virtual bool Parse() = 0;
- /// @returns true if an error was encountered.
- bool has_error() const { return diags_.contains_errors(); }
+ /// @returns true if an error was encountered.
+ bool has_error() const { return diags_.contains_errors(); }
- /// @returns the parser error string
- std::string error() const {
- diag::Formatter formatter{{false, false, false, false}};
- return formatter.format(diags_);
- }
+ /// @returns the parser error string
+ std::string error() const {
+ diag::Formatter formatter{{false, false, false, false}};
+ return formatter.format(diags_);
+ }
- /// @returns the full list of diagnostic messages.
- const diag::List& diagnostics() const { return diags_; }
+ /// @returns the full list of diagnostic messages.
+ const diag::List& diagnostics() const { return diags_; }
- /// @returns the program. The program builder in the parser will be reset
- /// after this.
- virtual Program program() = 0;
+ /// @returns the program. The program builder in the parser will be reset
+ /// after this.
+ virtual Program program() = 0;
- protected:
- /// Constructor
- Reader();
+ protected:
+ /// Constructor
+ Reader();
- /// Sets the diagnostic messages
- /// @param diags the list of diagnostic messages
- void set_diagnostics(const diag::List& diags) { diags_ = diags; }
+ /// Sets the diagnostic messages
+ /// @param diags the list of diagnostic messages
+ void set_diagnostics(const diag::List& diags) { diags_ = diags; }
- /// All diagnostic messages from the reader.
- diag::List diags_;
+ /// All diagnostic messages from the reader.
+ diag::List diags_;
};
} // namespace tint::reader
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/construct.cc b/chromium/third_party/dawn/src/tint/reader/spirv/construct.cc
index 2307c24d45b..9f24d23637e 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/construct.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/construct.cc
@@ -32,25 +32,22 @@ Construct::Construct(const Construct* the_parent,
// it's incidental which will appear on the stack first.
the_kind == kLoop
? this
- : ((parent && parent->depth < the_depth) ? parent->enclosing_loop
- : nullptr)),
+ : ((parent && parent->depth < the_depth) ? parent->enclosing_loop : nullptr)),
enclosing_continue(
// Compute the enclosing continue construct. Doing this in the
// constructor member list lets us make the member const.
// Compare parent depth because loop and continue are siblings and
// it's incidental which will appear on the stack first.
- the_kind == kContinue ? this
- : ((parent && parent->depth < the_depth)
- ? parent->enclosing_continue
- : nullptr)),
+ the_kind == kContinue
+ ? this
+ : ((parent && parent->depth < the_depth) ? parent->enclosing_continue : nullptr)),
enclosing_loop_or_continue_or_switch(
// Compute the enclosing loop or continue or switch construct.
// Doing this in the constructor member list lets us make the
// member const.
// Compare parent depth because loop and continue are siblings and
// it's incidental which will appear on the stack first.
- (the_kind == kLoop || the_kind == kContinue ||
- the_kind == kSwitchSelection)
+ (the_kind == kLoop || the_kind == kContinue || the_kind == kSwitchSelection)
? this
: ((parent && parent->depth < the_depth)
? parent->enclosing_loop_or_continue_or_switch
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/construct.h b/chromium/third_party/dawn/src/tint/reader/spirv/construct.h
index eebb899e38f..de4e477a294 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/construct.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/construct.h
@@ -65,88 +65,84 @@ namespace tint::reader::spirv {
/// - switch-selection: where the header block ends in OpSwitch
///
struct Construct {
- /// Enumeration for the kinds of structured constructs.
- enum Kind {
- /// The whole function.
- kFunction,
- /// A SPIR-V selection construct, header basic block ending in
- /// OpBrancConditional.
- kIfSelection,
- /// A SPIR-V selection construct, header basic block ending in OpSwitch.
- kSwitchSelection,
- /// A SPIR-V loop construct.
- kLoop,
- /// A SPIR-V continue construct.
- kContinue,
- };
+ /// Enumeration for the kinds of structured constructs.
+ enum Kind {
+ /// The whole function.
+ kFunction,
+ /// A SPIR-V selection construct, header basic block ending in
+ /// OpBrancConditional.
+ kIfSelection,
+ /// A SPIR-V selection construct, header basic block ending in OpSwitch.
+ kSwitchSelection,
+ /// A SPIR-V loop construct.
+ kLoop,
+ /// A SPIR-V continue construct.
+ kContinue,
+ };
- /// Constructor
- /// @param the_parent parent construct
- /// @param the_depth construct nesting depth
- /// @param the_kind construct kind
- /// @param the_begin_id block id of the first block in the construct
- /// @param the_end_id block id of the first block after the construct, or 0
- /// @param the_begin_pos block order position of the_begin_id
- /// @param the_end_pos block order position of the_end_id or a too-large value
- /// @param the_scope_end_pos block position of the first block past the end of
- /// the WGSL scope
- Construct(const Construct* the_parent,
- int the_depth,
- Kind the_kind,
- uint32_t the_begin_id,
- uint32_t the_end_id,
- uint32_t the_begin_pos,
- uint32_t the_end_pos,
- uint32_t the_scope_end_pos);
+ /// Constructor
+ /// @param the_parent parent construct
+ /// @param the_depth construct nesting depth
+ /// @param the_kind construct kind
+ /// @param the_begin_id block id of the first block in the construct
+ /// @param the_end_id block id of the first block after the construct, or 0
+ /// @param the_begin_pos block order position of the_begin_id
+ /// @param the_end_pos block order position of the_end_id or a too-large value
+ /// @param the_scope_end_pos block position of the first block past the end of
+ /// the WGSL scope
+ Construct(const Construct* the_parent,
+ int the_depth,
+ Kind the_kind,
+ uint32_t the_begin_id,
+ uint32_t the_end_id,
+ uint32_t the_begin_pos,
+ uint32_t the_end_pos,
+ uint32_t the_scope_end_pos);
- /// @param pos a block position
- /// @returns true if the given block position is inside this construct.
- bool ContainsPos(uint32_t pos) const {
- return begin_pos <= pos && pos < end_pos;
- }
- /// Returns true if the given block position is inside the WGSL scope
- /// corresponding to this construct. A loop construct's WGSL scope encloses
- /// the associated continue construct. Otherwise the WGSL scope extent is the
- /// same as the block extent.
- /// @param pos a block position
- /// @returns true if the given block position is inside the WGSL scope.
- bool ScopeContainsPos(uint32_t pos) const {
- return begin_pos <= pos && pos < scope_end_pos;
- }
+ /// @param pos a block position
+ /// @returns true if the given block position is inside this construct.
+ bool ContainsPos(uint32_t pos) const { return begin_pos <= pos && pos < end_pos; }
+ /// Returns true if the given block position is inside the WGSL scope
+ /// corresponding to this construct. A loop construct's WGSL scope encloses
+ /// the associated continue construct. Otherwise the WGSL scope extent is the
+ /// same as the block extent.
+ /// @param pos a block position
+ /// @returns true if the given block position is inside the WGSL scope.
+ bool ScopeContainsPos(uint32_t pos) const { return begin_pos <= pos && pos < scope_end_pos; }
- /// The nearest enclosing construct other than itself, or nullptr if
- /// this construct represents the entire function.
- const Construct* const parent = nullptr;
- /// The nearest enclosing loop construct, if one exists. Points to `this`
- /// when this is a loop construct.
- const Construct* const enclosing_loop = nullptr;
- /// The nearest enclosing continue construct, if one exists. Points to
- /// `this` when this is a contnue construct.
- const Construct* const enclosing_continue = nullptr;
- /// The nearest enclosing loop construct or continue construct or
- /// switch-selection construct, if one exists. The signficance is
- /// that a high level language "break" will branch to the merge block
- /// of such an enclosing construct. Points to `this` when this is
- /// a loop construct, a continue construct, or a switch-selection construct.
- const Construct* const enclosing_loop_or_continue_or_switch = nullptr;
+ /// The nearest enclosing construct other than itself, or nullptr if
+ /// this construct represents the entire function.
+ const Construct* const parent = nullptr;
+ /// The nearest enclosing loop construct, if one exists. Points to `this`
+ /// when this is a loop construct.
+ const Construct* const enclosing_loop = nullptr;
+ /// The nearest enclosing continue construct, if one exists. Points to
+ /// `this` when this is a contnue construct.
+ const Construct* const enclosing_continue = nullptr;
+ /// The nearest enclosing loop construct or continue construct or
+ /// switch-selection construct, if one exists. The signficance is
+ /// that a high level language "break" will branch to the merge block
+ /// of such an enclosing construct. Points to `this` when this is
+ /// a loop construct, a continue construct, or a switch-selection construct.
+ const Construct* const enclosing_loop_or_continue_or_switch = nullptr;
- /// Control flow nesting depth. The entry block is at nesting depth 0.
- const int depth = 0;
- /// The construct kind
- const Kind kind = kFunction;
- /// The id of the first block in this structure.
- const uint32_t begin_id = 0;
- /// 0 for kFunction, or the id of the block immediately after this construct
- /// in the computed block order.
- const uint32_t end_id = 0;
- /// The position of block #begin_id in the computed block order.
- const uint32_t begin_pos = 0;
- /// The position of block #end_id in the block order, or the number of
- /// block order elements if #end_id is 0.
- const uint32_t end_pos = 0;
- /// The position of the first block after the WGSL scope corresponding to
- /// this construct.
- const uint32_t scope_end_pos = 0;
+ /// Control flow nesting depth. The entry block is at nesting depth 0.
+ const int depth = 0;
+ /// The construct kind
+ const Kind kind = kFunction;
+ /// The id of the first block in this structure.
+ const uint32_t begin_id = 0;
+ /// 0 for kFunction, or the id of the block immediately after this construct
+ /// in the computed block order.
+ const uint32_t end_id = 0;
+ /// The position of block #begin_id in the computed block order.
+ const uint32_t begin_pos = 0;
+ /// The position of block #end_id in the block order, or the number of
+ /// block order elements if #end_id is 0.
+ const uint32_t end_pos = 0;
+ /// The position of the first block after the WGSL scope corresponding to
+ /// this construct.
+ const uint32_t scope_end_pos = 0;
};
/// ConstructList is a list of Construct unique pointers.
@@ -156,31 +152,31 @@ using ConstructList = std::vector<std::unique_ptr<Construct>>;
/// @param kind the construct kind to convert
/// @returns the string representation
inline std::string ToString(Construct::Kind kind) {
- switch (kind) {
- case Construct::kFunction:
- return "Function";
- case Construct::kIfSelection:
- return "IfSelection";
- case Construct::kSwitchSelection:
- return "SwitchSelection";
- case Construct::kLoop:
- return "Loop";
- case Construct::kContinue:
- return "Continue";
- }
- return "NONE";
+ switch (kind) {
+ case Construct::kFunction:
+ return "Function";
+ case Construct::kIfSelection:
+ return "IfSelection";
+ case Construct::kSwitchSelection:
+ return "SwitchSelection";
+ case Construct::kLoop:
+ return "Loop";
+ case Construct::kContinue:
+ return "Continue";
+ }
+ return "NONE";
}
/// Converts a construct into a short summary string.
/// @param c the construct, which can be null
/// @returns a short summary string
inline std::string ToStringBrief(const Construct* c) {
- if (c) {
- std::stringstream ss;
- ss << ToString(c->kind) << "@" << c->begin_id;
- return ss.str();
- }
- return "null";
+ if (c) {
+ std::stringstream ss;
+ ss << ToString(c->kind) << "@" << c->begin_id;
+ return ss.str();
+ }
+ return "null";
}
/// Emits a construct to a stream.
@@ -188,64 +184,61 @@ inline std::string ToStringBrief(const Construct* c) {
/// @param c the structured construct
/// @returns the stream
inline std::ostream& operator<<(std::ostream& o, const Construct& c) {
- o << "Construct{ " << ToString(c.kind) << " [" << c.begin_pos << ","
- << c.end_pos << ")"
- << " begin_id:" << c.begin_id << " end_id:" << c.end_id
- << " depth:" << c.depth;
+ o << "Construct{ " << ToString(c.kind) << " [" << c.begin_pos << "," << c.end_pos << ")"
+ << " begin_id:" << c.begin_id << " end_id:" << c.end_id << " depth:" << c.depth;
- o << " parent:" << ToStringBrief(c.parent);
+ o << " parent:" << ToStringBrief(c.parent);
- if (c.scope_end_pos != c.end_pos) {
- o << " scope:[" << c.begin_pos << "," << c.scope_end_pos << ")";
- }
+ if (c.scope_end_pos != c.end_pos) {
+ o << " scope:[" << c.begin_pos << "," << c.scope_end_pos << ")";
+ }
- if (c.enclosing_loop) {
- o << " in-l:" << ToStringBrief(c.enclosing_loop);
- }
+ if (c.enclosing_loop) {
+ o << " in-l:" << ToStringBrief(c.enclosing_loop);
+ }
- if (c.enclosing_continue) {
- o << " in-c:" << ToStringBrief(c.enclosing_continue);
- }
+ if (c.enclosing_continue) {
+ o << " in-c:" << ToStringBrief(c.enclosing_continue);
+ }
- if ((c.enclosing_loop_or_continue_or_switch != c.enclosing_loop) &&
- (c.enclosing_loop_or_continue_or_switch != c.enclosing_continue)) {
- o << " in-c-l-s:" << ToStringBrief(c.enclosing_loop_or_continue_or_switch);
- }
+ if ((c.enclosing_loop_or_continue_or_switch != c.enclosing_loop) &&
+ (c.enclosing_loop_or_continue_or_switch != c.enclosing_continue)) {
+ o << " in-c-l-s:" << ToStringBrief(c.enclosing_loop_or_continue_or_switch);
+ }
- o << " }";
- return o;
+ o << " }";
+ return o;
}
/// Emits a construct to a stream.
/// @param o the stream
/// @param c the structured construct
/// @returns the stream
-inline std::ostream& operator<<(std::ostream& o,
- const std::unique_ptr<Construct>& c) {
- return o << *(c.get());
+inline std::ostream& operator<<(std::ostream& o, const std::unique_ptr<Construct>& c) {
+ return o << *(c.get());
}
/// Converts a construct to a string.
/// @param c the construct
/// @returns the string representation
inline std::string ToString(const Construct& c) {
- std::stringstream ss;
- ss << c;
- return ss.str();
+ std::stringstream ss;
+ ss << c;
+ return ss.str();
}
/// Converts a construct to a string.
/// @param c the construct
/// @returns the string representation
inline std::string ToString(const Construct* c) {
- return c ? ToString(*c) : ToStringBrief(c);
+ return c ? ToString(*c) : ToStringBrief(c);
}
/// Converts a unique pointer to a construct to a string.
/// @param c the construct
/// @returns the string representation
inline std::string ToString(const std::unique_ptr<Construct>& c) {
- return ToString(*(c.get()));
+ return ToString(*(c.get()));
}
/// Emits a construct list to a stream.
@@ -253,21 +246,21 @@ inline std::string ToString(const std::unique_ptr<Construct>& c) {
/// @param cl the construct list
/// @returns the stream
inline std::ostream& operator<<(std::ostream& o, const ConstructList& cl) {
- o << "ConstructList{\n";
- for (const auto& c : cl) {
- o << " " << c << "\n";
- }
- o << "}";
- return o;
+ o << "ConstructList{\n";
+ for (const auto& c : cl) {
+ o << " " << c << "\n";
+ }
+ o << "}";
+ return o;
}
/// Converts a construct list to a string.
/// @param cl the construct list
/// @returns the string representation
inline std::string ToString(const ConstructList& cl) {
- std::stringstream ss;
- ss << cl;
- return ss.str();
+ std::stringstream ss;
+ ss << cl;
+ return ss.str();
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/entry_point_info.h b/chromium/third_party/dawn/src/tint/reader/spirv/entry_point_info.h
index 374bd8302a8..bc13759f008 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/entry_point_info.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/entry_point_info.h
@@ -24,66 +24,66 @@ namespace tint::reader::spirv {
/// The size of an integer-coordinate grid, in the x, y, and z dimensions.
struct GridSize {
- /// x value
- uint32_t x = 0;
- /// y value
- uint32_t y = 0;
- /// z value
- uint32_t z = 0;
+ /// x value
+ uint32_t x = 0;
+ /// y value
+ uint32_t y = 0;
+ /// z value
+ uint32_t z = 0;
};
/// Entry point information for a function
struct EntryPointInfo {
- /// Constructor.
- /// @param the_name the name of the entry point
- /// @param the_stage the pipeline stage
- /// @param the_owns_inner_implementation if true, this entry point is
- /// responsible for generating the inner implementation function.
- /// @param the_inner_name the name of the inner implementation function of the
- /// entry point
- /// @param the_inputs list of IDs for Input variables used by the shader
- /// @param the_outputs list of IDs for Output variables used by the shader
- /// @param the_wg_size the workgroup_size, for a compute shader
- EntryPointInfo(std::string the_name,
- ast::PipelineStage the_stage,
- bool the_owns_inner_implementation,
- std::string the_inner_name,
- std::vector<uint32_t>&& the_inputs,
- std::vector<uint32_t>&& the_outputs,
- GridSize the_wg_size);
- /// Copy constructor
- /// @param other the other entry point info to be built from
- EntryPointInfo(const EntryPointInfo& other);
- /// Destructor
- ~EntryPointInfo();
+ /// Constructor.
+ /// @param the_name the name of the entry point
+ /// @param the_stage the pipeline stage
+ /// @param the_owns_inner_implementation if true, this entry point is
+ /// responsible for generating the inner implementation function.
+ /// @param the_inner_name the name of the inner implementation function of the
+ /// entry point
+ /// @param the_inputs list of IDs for Input variables used by the shader
+ /// @param the_outputs list of IDs for Output variables used by the shader
+ /// @param the_wg_size the workgroup_size, for a compute shader
+ EntryPointInfo(std::string the_name,
+ ast::PipelineStage the_stage,
+ bool the_owns_inner_implementation,
+ std::string the_inner_name,
+ std::vector<uint32_t>&& the_inputs,
+ std::vector<uint32_t>&& the_outputs,
+ GridSize the_wg_size);
+ /// Copy constructor
+ /// @param other the other entry point info to be built from
+ EntryPointInfo(const EntryPointInfo& other);
+ /// Destructor
+ ~EntryPointInfo();
- /// The entry point name.
- /// In the WGSL output, this function will have pipeline inputs and outputs
- /// as parameters. This function will store them into Private variables,
- /// and then call the "inner" function, named by the next memeber.
- /// Then outputs are copied from the private variables to the return value.
- std::string name;
- /// The entry point stage
- ast::PipelineStage stage = ast::PipelineStage::kNone;
+ /// The entry point name.
+ /// In the WGSL output, this function will have pipeline inputs and outputs
+ /// as parameters. This function will store them into Private variables,
+ /// and then call the "inner" function, named by the next memeber.
+ /// Then outputs are copied from the private variables to the return value.
+ std::string name;
+ /// The entry point stage
+ ast::PipelineStage stage = ast::PipelineStage::kNone;
- /// True when this entry point is responsible for generating the
- /// inner implementation function. False when this is the second entry
- /// point encountered for the same function in SPIR-V. It's unusual, but
- /// possible for the same function to be the implementation for multiple
- /// entry points.
- bool owns_inner_implementation;
- /// The name of the inner implementation function of the entry point.
- std::string inner_name;
- /// IDs of pipeline input variables, sorted and without duplicates.
- std::vector<uint32_t> inputs;
- /// IDs of pipeline output variables, sorted and without duplicates.
- std::vector<uint32_t> outputs;
+ /// True when this entry point is responsible for generating the
+ /// inner implementation function. False when this is the second entry
+ /// point encountered for the same function in SPIR-V. It's unusual, but
+ /// possible for the same function to be the implementation for multiple
+ /// entry points.
+ bool owns_inner_implementation;
+ /// The name of the inner implementation function of the entry point.
+ std::string inner_name;
+ /// IDs of pipeline input variables, sorted and without duplicates.
+ std::vector<uint32_t> inputs;
+ /// IDs of pipeline output variables, sorted and without duplicates.
+ std::vector<uint32_t> outputs;
- /// If this is a compute shader, this is the workgroup size in the x, y,
- /// and z dimensions set via LocalSize, or via the composite value
- /// decorated as the WorkgroupSize BuiltIn. The WorkgroupSize builtin
- /// takes priority.
- GridSize workgroup_size;
+ /// If this is a compute shader, this is the workgroup size in the x, y,
+ /// and z dimensions set via LocalSize, or via the composite value
+ /// decorated as the WorkgroupSize BuiltIn. The WorkgroupSize builtin
+ /// takes priority.
+ GridSize workgroup_size;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.cc b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.cc
index 2cd1daa883d..1d58eee8dcd 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.cc
@@ -21,158 +21,158 @@ EnumConverter::EnumConverter(const FailStream& fs) : fail_stream_(fs) {}
EnumConverter::~EnumConverter() = default;
ast::PipelineStage EnumConverter::ToPipelineStage(SpvExecutionModel model) {
- switch (model) {
- case SpvExecutionModelVertex:
- return ast::PipelineStage::kVertex;
- case SpvExecutionModelFragment:
- return ast::PipelineStage::kFragment;
- case SpvExecutionModelGLCompute:
- return ast::PipelineStage::kCompute;
- default:
- break;
- }
-
- Fail() << "unknown SPIR-V execution model: " << uint32_t(model);
- return ast::PipelineStage::kNone;
+ switch (model) {
+ case SpvExecutionModelVertex:
+ return ast::PipelineStage::kVertex;
+ case SpvExecutionModelFragment:
+ return ast::PipelineStage::kFragment;
+ case SpvExecutionModelGLCompute:
+ return ast::PipelineStage::kCompute;
+ default:
+ break;
+ }
+
+ Fail() << "unknown SPIR-V execution model: " << uint32_t(model);
+ return ast::PipelineStage::kNone;
}
ast::StorageClass EnumConverter::ToStorageClass(const SpvStorageClass sc) {
- switch (sc) {
- case SpvStorageClassInput:
- return ast::StorageClass::kInput;
- case SpvStorageClassOutput:
- return ast::StorageClass::kOutput;
- case SpvStorageClassUniform:
- return ast::StorageClass::kUniform;
- case SpvStorageClassWorkgroup:
- return ast::StorageClass::kWorkgroup;
- case SpvStorageClassUniformConstant:
- return ast::StorageClass::kNone;
- case SpvStorageClassStorageBuffer:
- return ast::StorageClass::kStorage;
- case SpvStorageClassPrivate:
- return ast::StorageClass::kPrivate;
- case SpvStorageClassFunction:
- return ast::StorageClass::kFunction;
- default:
- break;
- }
-
- Fail() << "unknown SPIR-V storage class: " << uint32_t(sc);
- return ast::StorageClass::kInvalid;
+ switch (sc) {
+ case SpvStorageClassInput:
+ return ast::StorageClass::kInput;
+ case SpvStorageClassOutput:
+ return ast::StorageClass::kOutput;
+ case SpvStorageClassUniform:
+ return ast::StorageClass::kUniform;
+ case SpvStorageClassWorkgroup:
+ return ast::StorageClass::kWorkgroup;
+ case SpvStorageClassUniformConstant:
+ return ast::StorageClass::kNone;
+ case SpvStorageClassStorageBuffer:
+ return ast::StorageClass::kStorage;
+ case SpvStorageClassPrivate:
+ return ast::StorageClass::kPrivate;
+ case SpvStorageClassFunction:
+ return ast::StorageClass::kFunction;
+ default:
+ break;
+ }
+
+ Fail() << "unknown SPIR-V storage class: " << uint32_t(sc);
+ return ast::StorageClass::kInvalid;
}
ast::Builtin EnumConverter::ToBuiltin(SpvBuiltIn b) {
- switch (b) {
- case SpvBuiltInPosition:
- return ast::Builtin::kPosition;
- case SpvBuiltInVertexIndex:
- return ast::Builtin::kVertexIndex;
- case SpvBuiltInInstanceIndex:
- return ast::Builtin::kInstanceIndex;
- case SpvBuiltInFrontFacing:
- return ast::Builtin::kFrontFacing;
- case SpvBuiltInFragCoord:
- return ast::Builtin::kPosition;
- case SpvBuiltInFragDepth:
- return ast::Builtin::kFragDepth;
- case SpvBuiltInLocalInvocationId:
- return ast::Builtin::kLocalInvocationId;
- case SpvBuiltInLocalInvocationIndex:
- return ast::Builtin::kLocalInvocationIndex;
- case SpvBuiltInGlobalInvocationId:
- return ast::Builtin::kGlobalInvocationId;
- case SpvBuiltInWorkgroupId:
- return ast::Builtin::kWorkgroupId;
- case SpvBuiltInSampleId:
- return ast::Builtin::kSampleIndex;
- case SpvBuiltInSampleMask:
- return ast::Builtin::kSampleMask;
- default:
- break;
- }
-
- Fail() << "unknown SPIR-V builtin: " << uint32_t(b);
- return ast::Builtin::kNone;
+ switch (b) {
+ case SpvBuiltInPosition:
+ return ast::Builtin::kPosition;
+ case SpvBuiltInVertexIndex:
+ return ast::Builtin::kVertexIndex;
+ case SpvBuiltInInstanceIndex:
+ return ast::Builtin::kInstanceIndex;
+ case SpvBuiltInFrontFacing:
+ return ast::Builtin::kFrontFacing;
+ case SpvBuiltInFragCoord:
+ return ast::Builtin::kPosition;
+ case SpvBuiltInFragDepth:
+ return ast::Builtin::kFragDepth;
+ case SpvBuiltInLocalInvocationId:
+ return ast::Builtin::kLocalInvocationId;
+ case SpvBuiltInLocalInvocationIndex:
+ return ast::Builtin::kLocalInvocationIndex;
+ case SpvBuiltInGlobalInvocationId:
+ return ast::Builtin::kGlobalInvocationId;
+ case SpvBuiltInWorkgroupId:
+ return ast::Builtin::kWorkgroupId;
+ case SpvBuiltInSampleId:
+ return ast::Builtin::kSampleIndex;
+ case SpvBuiltInSampleMask:
+ return ast::Builtin::kSampleMask;
+ default:
+ break;
+ }
+
+ Fail() << "unknown SPIR-V builtin: " << uint32_t(b);
+ return ast::Builtin::kNone;
}
ast::TextureDimension EnumConverter::ToDim(SpvDim dim, bool arrayed) {
- if (arrayed) {
+ if (arrayed) {
+ switch (dim) {
+ case SpvDim2D:
+ return ast::TextureDimension::k2dArray;
+ case SpvDimCube:
+ return ast::TextureDimension::kCubeArray;
+ default:
+ break;
+ }
+ Fail() << "arrayed dimension must be 2D or Cube. Got " << int(dim);
+ return ast::TextureDimension::kNone;
+ }
+ // Assume non-arrayed
switch (dim) {
- case SpvDim2D:
- return ast::TextureDimension::k2dArray;
- case SpvDimCube:
- return ast::TextureDimension::kCubeArray;
- default:
- break;
+ case SpvDim1D:
+ return ast::TextureDimension::k1d;
+ case SpvDim2D:
+ return ast::TextureDimension::k2d;
+ case SpvDim3D:
+ return ast::TextureDimension::k3d;
+ case SpvDimCube:
+ return ast::TextureDimension::kCube;
+ default:
+ break;
}
- Fail() << "arrayed dimension must be 2D or Cube. Got " << int(dim);
+ Fail() << "invalid dimension: " << int(dim);
return ast::TextureDimension::kNone;
- }
- // Assume non-arrayed
- switch (dim) {
- case SpvDim1D:
- return ast::TextureDimension::k1d;
- case SpvDim2D:
- return ast::TextureDimension::k2d;
- case SpvDim3D:
- return ast::TextureDimension::k3d;
- case SpvDimCube:
- return ast::TextureDimension::kCube;
- default:
- break;
- }
- Fail() << "invalid dimension: " << int(dim);
- return ast::TextureDimension::kNone;
}
ast::TexelFormat EnumConverter::ToTexelFormat(SpvImageFormat fmt) {
- switch (fmt) {
- case SpvImageFormatUnknown:
- return ast::TexelFormat::kNone;
-
- // 8 bit channels
- case SpvImageFormatRgba8:
- return ast::TexelFormat::kRgba8Unorm;
- case SpvImageFormatRgba8Snorm:
- return ast::TexelFormat::kRgba8Snorm;
- case SpvImageFormatRgba8ui:
- return ast::TexelFormat::kRgba8Uint;
- case SpvImageFormatRgba8i:
- return ast::TexelFormat::kRgba8Sint;
-
- // 16 bit channels
- case SpvImageFormatRgba16ui:
- return ast::TexelFormat::kRgba16Uint;
- case SpvImageFormatRgba16i:
- return ast::TexelFormat::kRgba16Sint;
- case SpvImageFormatRgba16f:
- return ast::TexelFormat::kRgba16Float;
-
- // 32 bit channels
- case SpvImageFormatR32ui:
- return ast::TexelFormat::kR32Uint;
- case SpvImageFormatR32i:
- return ast::TexelFormat::kR32Sint;
- case SpvImageFormatR32f:
- return ast::TexelFormat::kR32Float;
- case SpvImageFormatRg32ui:
- return ast::TexelFormat::kRg32Uint;
- case SpvImageFormatRg32i:
- return ast::TexelFormat::kRg32Sint;
- case SpvImageFormatRg32f:
- return ast::TexelFormat::kRg32Float;
- case SpvImageFormatRgba32ui:
- return ast::TexelFormat::kRgba32Uint;
- case SpvImageFormatRgba32i:
- return ast::TexelFormat::kRgba32Sint;
- case SpvImageFormatRgba32f:
- return ast::TexelFormat::kRgba32Float;
- default:
- break;
- }
- Fail() << "invalid image format: " << int(fmt);
- return ast::TexelFormat::kNone;
+ switch (fmt) {
+ case SpvImageFormatUnknown:
+ return ast::TexelFormat::kNone;
+
+ // 8 bit channels
+ case SpvImageFormatRgba8:
+ return ast::TexelFormat::kRgba8Unorm;
+ case SpvImageFormatRgba8Snorm:
+ return ast::TexelFormat::kRgba8Snorm;
+ case SpvImageFormatRgba8ui:
+ return ast::TexelFormat::kRgba8Uint;
+ case SpvImageFormatRgba8i:
+ return ast::TexelFormat::kRgba8Sint;
+
+ // 16 bit channels
+ case SpvImageFormatRgba16ui:
+ return ast::TexelFormat::kRgba16Uint;
+ case SpvImageFormatRgba16i:
+ return ast::TexelFormat::kRgba16Sint;
+ case SpvImageFormatRgba16f:
+ return ast::TexelFormat::kRgba16Float;
+
+ // 32 bit channels
+ case SpvImageFormatR32ui:
+ return ast::TexelFormat::kR32Uint;
+ case SpvImageFormatR32i:
+ return ast::TexelFormat::kR32Sint;
+ case SpvImageFormatR32f:
+ return ast::TexelFormat::kR32Float;
+ case SpvImageFormatRg32ui:
+ return ast::TexelFormat::kRg32Uint;
+ case SpvImageFormatRg32i:
+ return ast::TexelFormat::kRg32Sint;
+ case SpvImageFormatRg32f:
+ return ast::TexelFormat::kRg32Float;
+ case SpvImageFormatRgba32ui:
+ return ast::TexelFormat::kRgba32Uint;
+ case SpvImageFormatRgba32i:
+ return ast::TexelFormat::kRgba32Sint;
+ case SpvImageFormatRgba32f:
+ return ast::TexelFormat::kRgba32Float;
+ default:
+ break;
+ }
+ Fail() << "invalid image format: " << int(fmt);
+ return ast::TexelFormat::kNone;
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.h b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.h
index 4a31cfec11d..ac86f718961 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter.h
@@ -20,56 +20,56 @@
#include "src/tint/ast/pipeline_stage.h"
#include "src/tint/ast/storage_class.h"
#include "src/tint/reader/spirv/fail_stream.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
namespace tint::reader::spirv {
/// A converter from SPIR-V enums to Tint AST enums.
class EnumConverter {
- public:
- /// Creates a new enum converter.
- /// @param fail_stream the error reporting stream.
- explicit EnumConverter(const FailStream& fail_stream);
- /// Destructor
- ~EnumConverter();
+ public:
+ /// Creates a new enum converter.
+ /// @param fail_stream the error reporting stream.
+ explicit EnumConverter(const FailStream& fail_stream);
+ /// Destructor
+ ~EnumConverter();
- /// Converts a SPIR-V execution model to a Tint pipeline stage.
- /// On failure, logs an error and returns kNone
- /// @param model the SPIR-V entry point execution model
- /// @returns a Tint AST pipeline stage
- ast::PipelineStage ToPipelineStage(SpvExecutionModel model);
+ /// Converts a SPIR-V execution model to a Tint pipeline stage.
+ /// On failure, logs an error and returns kNone
+ /// @param model the SPIR-V entry point execution model
+ /// @returns a Tint AST pipeline stage
+ ast::PipelineStage ToPipelineStage(SpvExecutionModel model);
- /// Converts a SPIR-V storage class to a Tint storage class.
- /// On failure, logs an error and returns kNone
- /// @param sc the SPIR-V storage class
- /// @returns a Tint AST storage class
- ast::StorageClass ToStorageClass(const SpvStorageClass sc);
+ /// Converts a SPIR-V storage class to a Tint storage class.
+ /// On failure, logs an error and returns kNone
+ /// @param sc the SPIR-V storage class
+ /// @returns a Tint AST storage class
+ ast::StorageClass ToStorageClass(const SpvStorageClass sc);
- /// Converts a SPIR-V Builtin value a Tint Builtin.
- /// On failure, logs an error and returns kNone
- /// @param b the SPIR-V builtin
- /// @returns a Tint AST builtin
- ast::Builtin ToBuiltin(SpvBuiltIn b);
+ /// Converts a SPIR-V Builtin value a Tint Builtin.
+ /// On failure, logs an error and returns kNone
+ /// @param b the SPIR-V builtin
+ /// @returns a Tint AST builtin
+ ast::Builtin ToBuiltin(SpvBuiltIn b);
- /// Converts a possibly arrayed SPIR-V Dim to a Tint texture dimension.
- /// On failure, logs an error and returns kNone
- /// @param dim the SPIR-V Dim value
- /// @param arrayed true if the texture is arrayed
- /// @returns a Tint AST texture dimension
- ast::TextureDimension ToDim(SpvDim dim, bool arrayed);
+ /// Converts a possibly arrayed SPIR-V Dim to a Tint texture dimension.
+ /// On failure, logs an error and returns kNone
+ /// @param dim the SPIR-V Dim value
+ /// @param arrayed true if the texture is arrayed
+ /// @returns a Tint AST texture dimension
+ ast::TextureDimension ToDim(SpvDim dim, bool arrayed);
- /// Converts a SPIR-V Image Format to a TexelFormat
- /// On failure, logs an error and returns kNone
- /// @param fmt the SPIR-V format
- /// @returns a Tint AST format
- ast::TexelFormat ToTexelFormat(SpvImageFormat fmt);
+ /// Converts a SPIR-V Image Format to a TexelFormat
+ /// On failure, logs an error and returns kNone
+ /// @param fmt the SPIR-V format
+ /// @returns a Tint AST format
+ ast::TexelFormat ToTexelFormat(SpvImageFormat fmt);
- private:
- /// Registers a failure and returns a stream for log diagnostics.
- /// @returns a failure stream
- FailStream Fail() { return fail_stream_.Fail(); }
+ private:
+ /// Registers a failure and returns a stream for log diagnostics.
+ /// @returns a failure stream
+ FailStream Fail() { return fail_stream_.Fail(); }
- FailStream fail_stream_;
+ FailStream fail_stream_;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter_test.cc
index 6f9e2a17e56..791d7e352c4 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/enum_converter_test.cc
@@ -24,179 +24,160 @@ namespace {
// Pipeline stage
struct PipelineStageCase {
- SpvExecutionModel model;
- bool expect_success;
- ast::PipelineStage expected;
+ SpvExecutionModel model;
+ bool expect_success;
+ ast::PipelineStage expected;
};
inline std::ostream& operator<<(std::ostream& out, PipelineStageCase psc) {
- out << "PipelineStageCase{ SpvExecutionModel:" << int(psc.model)
- << " expect_success?:" << int(psc.expect_success)
- << " expected:" << int(psc.expected) << "}";
- return out;
+ out << "PipelineStageCase{ SpvExecutionModel:" << int(psc.model)
+ << " expect_success?:" << int(psc.expect_success) << " expected:" << int(psc.expected)
+ << "}";
+ return out;
}
class SpvPipelineStageTest : public testing::TestWithParam<PipelineStageCase> {
- public:
- SpvPipelineStageTest()
- : success_(true),
- fail_stream_(&success_, &errors_),
- converter_(fail_stream_) {}
-
- std::string error() const { return errors_.str(); }
-
- protected:
- bool success_ = true;
- std::stringstream errors_;
- FailStream fail_stream_;
- EnumConverter converter_;
+ public:
+ SpvPipelineStageTest()
+ : success_(true), fail_stream_(&success_, &errors_), converter_(fail_stream_) {}
+
+ std::string error() const { return errors_.str(); }
+
+ protected:
+ bool success_ = true;
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ EnumConverter converter_;
};
TEST_P(SpvPipelineStageTest, Samples) {
- const auto params = GetParam();
-
- const auto result = converter_.ToPipelineStage(params.model);
- EXPECT_EQ(success_, params.expect_success);
- if (params.expect_success) {
- EXPECT_EQ(result, params.expected);
- EXPECT_TRUE(error().empty());
- } else {
- EXPECT_EQ(result, params.expected);
- EXPECT_THAT(error(),
- ::testing::StartsWith("unknown SPIR-V execution model:"));
- }
+ const auto params = GetParam();
+
+ const auto result = converter_.ToPipelineStage(params.model);
+ EXPECT_EQ(success_, params.expect_success);
+ if (params.expect_success) {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_TRUE(error().empty());
+ } else {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_THAT(error(), ::testing::StartsWith("unknown SPIR-V execution model:"));
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- EnumConverterGood,
- SpvPipelineStageTest,
- testing::Values(PipelineStageCase{SpvExecutionModelVertex, true,
- ast::PipelineStage::kVertex},
- PipelineStageCase{SpvExecutionModelFragment, true,
- ast::PipelineStage::kFragment},
- PipelineStageCase{SpvExecutionModelGLCompute, true,
- ast::PipelineStage::kCompute}));
+INSTANTIATE_TEST_SUITE_P(EnumConverterGood,
+ SpvPipelineStageTest,
+ testing::Values(PipelineStageCase{SpvExecutionModelVertex, true,
+ ast::PipelineStage::kVertex},
+ PipelineStageCase{SpvExecutionModelFragment, true,
+ ast::PipelineStage::kFragment},
+ PipelineStageCase{SpvExecutionModelGLCompute, true,
+ ast::PipelineStage::kCompute}));
-INSTANTIATE_TEST_SUITE_P(
- EnumConverterBad,
- SpvPipelineStageTest,
- testing::Values(PipelineStageCase{static_cast<SpvExecutionModel>(9999),
- false, ast::PipelineStage::kNone},
- PipelineStageCase{SpvExecutionModelTessellationControl,
- false, ast::PipelineStage::kNone}));
+INSTANTIATE_TEST_SUITE_P(EnumConverterBad,
+ SpvPipelineStageTest,
+ testing::Values(PipelineStageCase{static_cast<SpvExecutionModel>(9999),
+ false, ast::PipelineStage::kNone},
+ PipelineStageCase{SpvExecutionModelTessellationControl,
+ false, ast::PipelineStage::kNone}));
// Storage class
struct StorageClassCase {
- SpvStorageClass sc;
- bool expect_success;
- ast::StorageClass expected;
+ SpvStorageClass sc;
+ bool expect_success;
+ ast::StorageClass expected;
};
inline std::ostream& operator<<(std::ostream& out, StorageClassCase scc) {
- out << "StorageClassCase{ SpvStorageClass:" << int(scc.sc)
- << " expect_success?:" << int(scc.expect_success)
- << " expected:" << int(scc.expected) << "}";
- return out;
+ out << "StorageClassCase{ SpvStorageClass:" << int(scc.sc)
+ << " expect_success?:" << int(scc.expect_success) << " expected:" << int(scc.expected)
+ << "}";
+ return out;
}
class SpvStorageClassTest : public testing::TestWithParam<StorageClassCase> {
- public:
- SpvStorageClassTest()
- : success_(true),
- fail_stream_(&success_, &errors_),
- converter_(fail_stream_) {}
-
- std::string error() const { return errors_.str(); }
-
- protected:
- bool success_ = true;
- std::stringstream errors_;
- FailStream fail_stream_;
- EnumConverter converter_;
+ public:
+ SpvStorageClassTest()
+ : success_(true), fail_stream_(&success_, &errors_), converter_(fail_stream_) {}
+
+ std::string error() const { return errors_.str(); }
+
+ protected:
+ bool success_ = true;
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ EnumConverter converter_;
};
TEST_P(SpvStorageClassTest, Samples) {
- const auto params = GetParam();
-
- const auto result = converter_.ToStorageClass(params.sc);
- EXPECT_EQ(success_, params.expect_success);
- if (params.expect_success) {
- EXPECT_EQ(result, params.expected);
- EXPECT_TRUE(error().empty());
- } else {
- EXPECT_EQ(result, params.expected);
- EXPECT_THAT(error(),
- ::testing::StartsWith("unknown SPIR-V storage class: "));
- }
+ const auto params = GetParam();
+
+ const auto result = converter_.ToStorageClass(params.sc);
+ EXPECT_EQ(success_, params.expect_success);
+ if (params.expect_success) {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_TRUE(error().empty());
+ } else {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_THAT(error(), ::testing::StartsWith("unknown SPIR-V storage class: "));
+ }
}
INSTANTIATE_TEST_SUITE_P(
EnumConverterGood,
SpvStorageClassTest,
- testing::Values(StorageClassCase{SpvStorageClassInput, true,
- ast::StorageClass::kInput},
- StorageClassCase{SpvStorageClassOutput, true,
- ast::StorageClass::kOutput},
- StorageClassCase{SpvStorageClassUniform, true,
- ast::StorageClass::kUniform},
- StorageClassCase{SpvStorageClassWorkgroup, true,
- ast::StorageClass::kWorkgroup},
- StorageClassCase{SpvStorageClassUniformConstant, true,
- ast::StorageClass::kNone},
- StorageClassCase{SpvStorageClassStorageBuffer, true,
- ast::StorageClass::kStorage},
- StorageClassCase{SpvStorageClassPrivate, true,
- ast::StorageClass::kPrivate},
- StorageClassCase{SpvStorageClassFunction, true,
- ast::StorageClass::kFunction}));
+ testing::Values(
+ StorageClassCase{SpvStorageClassInput, true, ast::StorageClass::kInput},
+ StorageClassCase{SpvStorageClassOutput, true, ast::StorageClass::kOutput},
+ StorageClassCase{SpvStorageClassUniform, true, ast::StorageClass::kUniform},
+ StorageClassCase{SpvStorageClassWorkgroup, true, ast::StorageClass::kWorkgroup},
+ StorageClassCase{SpvStorageClassUniformConstant, true, ast::StorageClass::kNone},
+ StorageClassCase{SpvStorageClassStorageBuffer, true, ast::StorageClass::kStorage},
+ StorageClassCase{SpvStorageClassPrivate, true, ast::StorageClass::kPrivate},
+ StorageClassCase{SpvStorageClassFunction, true, ast::StorageClass::kFunction}));
INSTANTIATE_TEST_SUITE_P(EnumConverterBad,
SpvStorageClassTest,
- testing::Values(StorageClassCase{
- static_cast<SpvStorageClass>(9999), false,
- ast::StorageClass::kInvalid}));
+ testing::Values(StorageClassCase{static_cast<SpvStorageClass>(9999), false,
+ ast::StorageClass::kInvalid}));
// Builtin
struct BuiltinCase {
- SpvBuiltIn builtin;
- bool expect_success;
- ast::Builtin expected;
+ SpvBuiltIn builtin;
+ bool expect_success;
+ ast::Builtin expected;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinCase bc) {
- out << "BuiltinCase{ SpvBuiltIn:" << int(bc.builtin)
- << " expect_success?:" << int(bc.expect_success)
- << " expected:" << int(bc.expected) << "}";
- return out;
+ out << "BuiltinCase{ SpvBuiltIn:" << int(bc.builtin)
+ << " expect_success?:" << int(bc.expect_success) << " expected:" << int(bc.expected) << "}";
+ return out;
}
class SpvBuiltinTest : public testing::TestWithParam<BuiltinCase> {
- public:
- SpvBuiltinTest()
- : success_(true),
- fail_stream_(&success_, &errors_),
- converter_(fail_stream_) {}
-
- std::string error() const { return errors_.str(); }
-
- protected:
- bool success_ = true;
- std::stringstream errors_;
- FailStream fail_stream_;
- EnumConverter converter_;
+ public:
+ SpvBuiltinTest()
+ : success_(true), fail_stream_(&success_, &errors_), converter_(fail_stream_) {}
+
+ std::string error() const { return errors_.str(); }
+
+ protected:
+ bool success_ = true;
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ EnumConverter converter_;
};
TEST_P(SpvBuiltinTest, Samples) {
- const auto params = GetParam();
-
- const auto result = converter_.ToBuiltin(params.builtin);
- EXPECT_EQ(success_, params.expect_success);
- if (params.expect_success) {
- EXPECT_EQ(result, params.expected);
- EXPECT_TRUE(error().empty());
- } else {
- EXPECT_EQ(result, params.expected);
- EXPECT_THAT(error(), ::testing::StartsWith("unknown SPIR-V builtin: "));
- }
+ const auto params = GetParam();
+
+ const auto result = converter_.ToBuiltin(params.builtin);
+ EXPECT_EQ(success_, params.expect_success);
+ if (params.expect_success) {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_TRUE(error().empty());
+ } else {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_THAT(error(), ::testing::StartsWith("unknown SPIR-V builtin: "));
+ }
}
INSTANTIATE_TEST_SUITE_P(
@@ -204,16 +185,12 @@ INSTANTIATE_TEST_SUITE_P(
SpvBuiltinTest,
testing::Values(
BuiltinCase{SpvBuiltInPosition, true, ast::Builtin::kPosition},
- BuiltinCase{SpvBuiltInInstanceIndex, true,
- ast::Builtin::kInstanceIndex},
+ BuiltinCase{SpvBuiltInInstanceIndex, true, ast::Builtin::kInstanceIndex},
BuiltinCase{SpvBuiltInFrontFacing, true, ast::Builtin::kFrontFacing},
BuiltinCase{SpvBuiltInFragCoord, true, ast::Builtin::kPosition},
- BuiltinCase{SpvBuiltInLocalInvocationId, true,
- ast::Builtin::kLocalInvocationId},
- BuiltinCase{SpvBuiltInLocalInvocationIndex, true,
- ast::Builtin::kLocalInvocationIndex},
- BuiltinCase{SpvBuiltInGlobalInvocationId, true,
- ast::Builtin::kGlobalInvocationId},
+ BuiltinCase{SpvBuiltInLocalInvocationId, true, ast::Builtin::kLocalInvocationId},
+ BuiltinCase{SpvBuiltInLocalInvocationIndex, true, ast::Builtin::kLocalInvocationIndex},
+ BuiltinCase{SpvBuiltInGlobalInvocationId, true, ast::Builtin::kGlobalInvocationId},
BuiltinCase{SpvBuiltInWorkgroupId, true, ast::Builtin::kWorkgroupId},
BuiltinCase{SpvBuiltInSampleId, true, ast::Builtin::kSampleIndex},
BuiltinCase{SpvBuiltInSampleMask, true, ast::Builtin::kSampleMask}));
@@ -221,136 +198,127 @@ INSTANTIATE_TEST_SUITE_P(
INSTANTIATE_TEST_SUITE_P(
EnumConverterGood_Output,
SpvBuiltinTest,
- testing::Values(
- BuiltinCase{SpvBuiltInPosition, true, ast::Builtin::kPosition},
- BuiltinCase{SpvBuiltInFragDepth, true, ast::Builtin::kFragDepth},
- BuiltinCase{SpvBuiltInSampleMask, true, ast::Builtin::kSampleMask}));
+ testing::Values(BuiltinCase{SpvBuiltInPosition, true, ast::Builtin::kPosition},
+ BuiltinCase{SpvBuiltInFragDepth, true, ast::Builtin::kFragDepth},
+ BuiltinCase{SpvBuiltInSampleMask, true, ast::Builtin::kSampleMask}));
INSTANTIATE_TEST_SUITE_P(
EnumConverterBad,
SpvBuiltinTest,
- testing::Values(
- BuiltinCase{static_cast<SpvBuiltIn>(9999), false, ast::Builtin::kNone},
- BuiltinCase{static_cast<SpvBuiltIn>(9999), false, ast::Builtin::kNone},
- BuiltinCase{SpvBuiltInNumWorkgroups, false, ast::Builtin::kNone}));
+ testing::Values(BuiltinCase{static_cast<SpvBuiltIn>(9999), false, ast::Builtin::kNone},
+ BuiltinCase{static_cast<SpvBuiltIn>(9999), false, ast::Builtin::kNone},
+ BuiltinCase{SpvBuiltInNumWorkgroups, false, ast::Builtin::kNone}));
// Dim
struct DimCase {
- SpvDim dim;
- bool arrayed;
- bool expect_success;
- ast::TextureDimension expected;
+ SpvDim dim;
+ bool arrayed;
+ bool expect_success;
+ ast::TextureDimension expected;
};
inline std::ostream& operator<<(std::ostream& out, DimCase dc) {
- out << "DimCase{ SpvDim:" << int(dc.dim) << " arrayed?:" << int(dc.arrayed)
- << " expect_success?:" << int(dc.expect_success)
- << " expected:" << int(dc.expected) << "}";
- return out;
+ out << "DimCase{ SpvDim:" << int(dc.dim) << " arrayed?:" << int(dc.arrayed)
+ << " expect_success?:" << int(dc.expect_success) << " expected:" << int(dc.expected) << "}";
+ return out;
}
class SpvDimTest : public testing::TestWithParam<DimCase> {
- public:
- SpvDimTest()
- : success_(true),
- fail_stream_(&success_, &errors_),
- converter_(fail_stream_) {}
-
- std::string error() const { return errors_.str(); }
-
- protected:
- bool success_ = true;
- std::stringstream errors_;
- FailStream fail_stream_;
- EnumConverter converter_;
+ public:
+ SpvDimTest() : success_(true), fail_stream_(&success_, &errors_), converter_(fail_stream_) {}
+
+ std::string error() const { return errors_.str(); }
+
+ protected:
+ bool success_ = true;
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ EnumConverter converter_;
};
TEST_P(SpvDimTest, Samples) {
- const auto params = GetParam();
-
- const auto result = converter_.ToDim(params.dim, params.arrayed);
- EXPECT_EQ(success_, params.expect_success);
- if (params.expect_success) {
- EXPECT_EQ(result, params.expected);
- EXPECT_TRUE(error().empty());
- } else {
- EXPECT_EQ(result, params.expected);
- EXPECT_THAT(error(), ::testing::HasSubstr("dimension"));
- }
+ const auto params = GetParam();
+
+ const auto result = converter_.ToDim(params.dim, params.arrayed);
+ EXPECT_EQ(success_, params.expect_success);
+ if (params.expect_success) {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_TRUE(error().empty());
+ } else {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_THAT(error(), ::testing::HasSubstr("dimension"));
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- EnumConverterGood,
- SpvDimTest,
- testing::Values(
- // Non-arrayed
- DimCase{SpvDim1D, false, true, ast::TextureDimension::k1d},
- DimCase{SpvDim2D, false, true, ast::TextureDimension::k2d},
- DimCase{SpvDim3D, false, true, ast::TextureDimension::k3d},
- DimCase{SpvDimCube, false, true, ast::TextureDimension::kCube},
- // Arrayed
- DimCase{SpvDim2D, true, true, ast::TextureDimension::k2dArray},
- DimCase{SpvDimCube, true, true, ast::TextureDimension::kCubeArray}));
+INSTANTIATE_TEST_SUITE_P(EnumConverterGood,
+ SpvDimTest,
+ testing::Values(
+ // Non-arrayed
+ DimCase{SpvDim1D, false, true, ast::TextureDimension::k1d},
+ DimCase{SpvDim2D, false, true, ast::TextureDimension::k2d},
+ DimCase{SpvDim3D, false, true, ast::TextureDimension::k3d},
+ DimCase{SpvDimCube, false, true, ast::TextureDimension::kCube},
+ // Arrayed
+ DimCase{SpvDim2D, true, true, ast::TextureDimension::k2dArray},
+ DimCase{SpvDimCube, true, true, ast::TextureDimension::kCubeArray}));
-INSTANTIATE_TEST_SUITE_P(
- EnumConverterBad,
- SpvDimTest,
- testing::Values(
- // Invalid SPIR-V dimensionality.
- DimCase{SpvDimMax, false, false, ast::TextureDimension::kNone},
- DimCase{SpvDimMax, true, false, ast::TextureDimension::kNone},
- // Vulkan non-arrayed dimensionalities not supported by WGSL.
- DimCase{SpvDimRect, false, false, ast::TextureDimension::kNone},
- DimCase{SpvDimBuffer, false, false, ast::TextureDimension::kNone},
- DimCase{SpvDimSubpassData, false, false, ast::TextureDimension::kNone},
- // Arrayed dimensionalities not supported by WGSL
- DimCase{SpvDim3D, true, false, ast::TextureDimension::kNone},
- DimCase{SpvDimRect, true, false, ast::TextureDimension::kNone},
- DimCase{SpvDimBuffer, true, false, ast::TextureDimension::kNone},
- DimCase{SpvDimSubpassData, true, false, ast::TextureDimension::kNone}));
+INSTANTIATE_TEST_SUITE_P(EnumConverterBad,
+ SpvDimTest,
+ testing::Values(
+ // Invalid SPIR-V dimensionality.
+ DimCase{SpvDimMax, false, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimMax, true, false, ast::TextureDimension::kNone},
+ // Vulkan non-arrayed dimensionalities not supported by WGSL.
+ DimCase{SpvDimRect, false, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimBuffer, false, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimSubpassData, false, false, ast::TextureDimension::kNone},
+ // Arrayed dimensionalities not supported by WGSL
+ DimCase{SpvDim3D, true, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimRect, true, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimBuffer, true, false, ast::TextureDimension::kNone},
+ DimCase{SpvDimSubpassData, true, false,
+ ast::TextureDimension::kNone}));
// TexelFormat
struct TexelFormatCase {
- SpvImageFormat format;
- bool expect_success;
- ast::TexelFormat expected;
+ SpvImageFormat format;
+ bool expect_success;
+ ast::TexelFormat expected;
};
inline std::ostream& operator<<(std::ostream& out, TexelFormatCase ifc) {
- out << "TexelFormatCase{ SpvImageFormat:" << int(ifc.format)
- << " expect_success?:" << int(ifc.expect_success)
- << " expected:" << int(ifc.expected) << "}";
- return out;
+ out << "TexelFormatCase{ SpvImageFormat:" << int(ifc.format)
+ << " expect_success?:" << int(ifc.expect_success) << " expected:" << int(ifc.expected)
+ << "}";
+ return out;
}
class SpvImageFormatTest : public testing::TestWithParam<TexelFormatCase> {
- public:
- SpvImageFormatTest()
- : success_(true),
- fail_stream_(&success_, &errors_),
- converter_(fail_stream_) {}
-
- std::string error() const { return errors_.str(); }
-
- protected:
- bool success_ = true;
- std::stringstream errors_;
- FailStream fail_stream_;
- EnumConverter converter_;
+ public:
+ SpvImageFormatTest()
+ : success_(true), fail_stream_(&success_, &errors_), converter_(fail_stream_) {}
+
+ std::string error() const { return errors_.str(); }
+
+ protected:
+ bool success_ = true;
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ EnumConverter converter_;
};
TEST_P(SpvImageFormatTest, Samples) {
- const auto params = GetParam();
-
- const auto result = converter_.ToTexelFormat(params.format);
- EXPECT_EQ(success_, params.expect_success) << params;
- if (params.expect_success) {
- EXPECT_EQ(result, params.expected);
- EXPECT_TRUE(error().empty());
- } else {
- EXPECT_EQ(result, params.expected);
- EXPECT_THAT(error(), ::testing::StartsWith("invalid image format: "));
- }
+ const auto params = GetParam();
+
+ const auto result = converter_.ToTexelFormat(params.format);
+ EXPECT_EQ(success_, params.expect_success) << params;
+ if (params.expect_success) {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_TRUE(error().empty());
+ } else {
+ EXPECT_EQ(result, params.expected);
+ EXPECT_THAT(error(), ::testing::StartsWith("invalid image format: "));
+ }
}
INSTANTIATE_TEST_SUITE_P(
@@ -360,39 +328,27 @@ INSTANTIATE_TEST_SUITE_P(
// Unknown. This is used for sampled images.
TexelFormatCase{SpvImageFormatUnknown, true, ast::TexelFormat::kNone},
// 8 bit channels
- TexelFormatCase{SpvImageFormatRgba8, true,
- ast::TexelFormat::kRgba8Unorm},
- TexelFormatCase{SpvImageFormatRgba8Snorm, true,
- ast::TexelFormat::kRgba8Snorm},
- TexelFormatCase{SpvImageFormatRgba8ui, true,
- ast::TexelFormat::kRgba8Uint},
- TexelFormatCase{SpvImageFormatRgba8i, true,
- ast::TexelFormat::kRgba8Sint},
+ TexelFormatCase{SpvImageFormatRgba8, true, ast::TexelFormat::kRgba8Unorm},
+ TexelFormatCase{SpvImageFormatRgba8Snorm, true, ast::TexelFormat::kRgba8Snorm},
+ TexelFormatCase{SpvImageFormatRgba8ui, true, ast::TexelFormat::kRgba8Uint},
+ TexelFormatCase{SpvImageFormatRgba8i, true, ast::TexelFormat::kRgba8Sint},
// 16 bit channels
- TexelFormatCase{SpvImageFormatRgba16ui, true,
- ast::TexelFormat::kRgba16Uint},
- TexelFormatCase{SpvImageFormatRgba16i, true,
- ast::TexelFormat::kRgba16Sint},
- TexelFormatCase{SpvImageFormatRgba16f, true,
- ast::TexelFormat::kRgba16Float},
+ TexelFormatCase{SpvImageFormatRgba16ui, true, ast::TexelFormat::kRgba16Uint},
+ TexelFormatCase{SpvImageFormatRgba16i, true, ast::TexelFormat::kRgba16Sint},
+ TexelFormatCase{SpvImageFormatRgba16f, true, ast::TexelFormat::kRgba16Float},
// 32 bit channels
// ... 1 channel
TexelFormatCase{SpvImageFormatR32ui, true, ast::TexelFormat::kR32Uint},
TexelFormatCase{SpvImageFormatR32i, true, ast::TexelFormat::kR32Sint},
TexelFormatCase{SpvImageFormatR32f, true, ast::TexelFormat::kR32Float},
// ... 2 channels
- TexelFormatCase{SpvImageFormatRg32ui, true,
- ast::TexelFormat::kRg32Uint},
+ TexelFormatCase{SpvImageFormatRg32ui, true, ast::TexelFormat::kRg32Uint},
TexelFormatCase{SpvImageFormatRg32i, true, ast::TexelFormat::kRg32Sint},
- TexelFormatCase{SpvImageFormatRg32f, true,
- ast::TexelFormat::kRg32Float},
+ TexelFormatCase{SpvImageFormatRg32f, true, ast::TexelFormat::kRg32Float},
// ... 4 channels
- TexelFormatCase{SpvImageFormatRgba32ui, true,
- ast::TexelFormat::kRgba32Uint},
- TexelFormatCase{SpvImageFormatRgba32i, true,
- ast::TexelFormat::kRgba32Sint},
- TexelFormatCase{SpvImageFormatRgba32f, true,
- ast::TexelFormat::kRgba32Float}));
+ TexelFormatCase{SpvImageFormatRgba32ui, true, ast::TexelFormat::kRgba32Uint},
+ TexelFormatCase{SpvImageFormatRgba32i, true, ast::TexelFormat::kRgba32Sint},
+ TexelFormatCase{SpvImageFormatRgba32f, true, ast::TexelFormat::kRgba32Float}));
INSTANTIATE_TEST_SUITE_P(
EnumConverterBad,
@@ -400,24 +356,20 @@ INSTANTIATE_TEST_SUITE_P(
testing::Values(
// Scanning in order from the SPIR-V spec.
TexelFormatCase{SpvImageFormatRg16f, false, ast::TexelFormat::kNone},
- TexelFormatCase{SpvImageFormatR11fG11fB10f, false,
- ast::TexelFormat::kNone},
+ TexelFormatCase{SpvImageFormatR11fG11fB10f, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatR16f, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRgb10A2, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg16, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg8, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatR16, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatR8, false, ast::TexelFormat::kNone},
- TexelFormatCase{SpvImageFormatRgba16Snorm, false,
- ast::TexelFormat::kNone},
- TexelFormatCase{SpvImageFormatRg16Snorm, false,
- ast::TexelFormat::kNone},
+ TexelFormatCase{SpvImageFormatRgba16Snorm, false, ast::TexelFormat::kNone},
+ TexelFormatCase{SpvImageFormatRg16Snorm, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg8Snorm, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg16i, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg8i, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatR8i, false, ast::TexelFormat::kNone},
- TexelFormatCase{SpvImageFormatRgb10a2ui, false,
- ast::TexelFormat::kNone},
+ TexelFormatCase{SpvImageFormatRgb10a2ui, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg16ui, false, ast::TexelFormat::kNone},
TexelFormatCase{SpvImageFormatRg8ui, false, ast::TexelFormat::kNone}));
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream.h b/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream.h
index 6160de8f00f..382fe5f260c 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream.h
@@ -23,46 +23,45 @@ namespace tint::reader::spirv {
/// and can be used to record failure by writing the false value
/// to given a pointer-to-bool.
class FailStream {
- public:
- /// Creates a new fail stream
- /// @param status_ptr where we will write false to indicate failure. Assumed
- /// to be a valid pointer to bool.
- /// @param out output stream where a message should be written to explain
- /// the failure
- FailStream(bool* status_ptr, std::ostream* out)
- : status_ptr_(status_ptr), out_(out) {}
- /// Copy constructor
- /// @param other the fail stream to clone
- FailStream(const FailStream& other) = default;
+ public:
+ /// Creates a new fail stream
+ /// @param status_ptr where we will write false to indicate failure. Assumed
+ /// to be a valid pointer to bool.
+ /// @param out output stream where a message should be written to explain
+ /// the failure
+ FailStream(bool* status_ptr, std::ostream* out) : status_ptr_(status_ptr), out_(out) {}
+ /// Copy constructor
+ /// @param other the fail stream to clone
+ FailStream(const FailStream& other) = default;
- /// Converts to a boolean status. A true result indicates success,
- /// and a false result indicates failure.
- /// @returns the status
- operator bool() const { return *status_ptr_; }
- /// Returns the current status value. This can be more readable
- /// the conversion operator.
- /// @returns the status
- bool status() const { return *status_ptr_; }
+ /// Converts to a boolean status. A true result indicates success,
+ /// and a false result indicates failure.
+ /// @returns the status
+ operator bool() const { return *status_ptr_; }
+ /// Returns the current status value. This can be more readable
+ /// the conversion operator.
+ /// @returns the status
+ bool status() const { return *status_ptr_; }
- /// Records failure.
- /// @returns a FailStream
- FailStream& Fail() {
- *status_ptr_ = false;
- return *this;
- }
+ /// Records failure.
+ /// @returns a FailStream
+ FailStream& Fail() {
+ *status_ptr_ = false;
+ return *this;
+ }
- /// Appends the given value to the message output stream.
- /// @param val the value to write to the output stream.
- /// @returns this object
- template <typename T>
- FailStream& operator<<(const T& val) {
- *out_ << val;
- return *this;
- }
+ /// Appends the given value to the message output stream.
+ /// @param val the value to write to the output stream.
+ /// @returns this object
+ template <typename T>
+ FailStream& operator<<(const T& val) {
+ *out_ << val;
+ return *this;
+ }
- private:
- bool* status_ptr_;
- std::ostream* out_;
+ private:
+ bool* status_ptr_;
+ std::ostream* out_;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream_test.cc
index 13c35e563c7..5b8701d54ea 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/fail_stream_test.cc
@@ -24,45 +24,45 @@ using ::testing::Eq;
using FailStreamTest = ::testing::Test;
TEST_F(FailStreamTest, ConversionToBoolIsSameAsStatusMethod) {
- bool flag = true;
- FailStream fs(&flag, nullptr);
+ bool flag = true;
+ FailStream fs(&flag, nullptr);
- EXPECT_TRUE(fs.status());
- EXPECT_TRUE(bool(fs)); // NOLINT
- flag = false;
- EXPECT_FALSE(fs.status());
- EXPECT_FALSE(bool(fs)); // NOLINT
- flag = true;
- EXPECT_TRUE(fs.status());
- EXPECT_TRUE(bool(fs)); // NOLINT
+ EXPECT_TRUE(fs.status());
+ EXPECT_TRUE(bool(fs)); // NOLINT
+ flag = false;
+ EXPECT_FALSE(fs.status());
+ EXPECT_FALSE(bool(fs)); // NOLINT
+ flag = true;
+ EXPECT_TRUE(fs.status());
+ EXPECT_TRUE(bool(fs)); // NOLINT
}
TEST_F(FailStreamTest, FailMethodChangesStatusToFalse) {
- bool flag = true;
- FailStream fs(&flag, nullptr);
- EXPECT_TRUE(flag);
- EXPECT_TRUE(bool(fs)); // NOLINT
- fs.Fail();
- EXPECT_FALSE(flag);
- EXPECT_FALSE(bool(fs)); // NOLINT
+ bool flag = true;
+ FailStream fs(&flag, nullptr);
+ EXPECT_TRUE(flag);
+ EXPECT_TRUE(bool(fs)); // NOLINT
+ fs.Fail();
+ EXPECT_FALSE(flag);
+ EXPECT_FALSE(bool(fs)); // NOLINT
}
TEST_F(FailStreamTest, FailMethodReturnsSelf) {
- bool flag = true;
- FailStream fs(&flag, nullptr);
- FailStream& result = fs.Fail();
- EXPECT_THAT(&result, Eq(&fs));
+ bool flag = true;
+ FailStream fs(&flag, nullptr);
+ FailStream& result = fs.Fail();
+ EXPECT_THAT(&result, Eq(&fs));
}
TEST_F(FailStreamTest, ShiftOperatorAccumulatesValues) {
- bool flag = true;
- std::stringstream ss;
- FailStream fs(&flag, &ss);
+ bool flag = true;
+ std::stringstream ss;
+ FailStream fs(&flag, &ss);
- ss << "prefix ";
- fs << "cat " << 42;
+ ss << "prefix ";
+ fs << "cat " << 42;
- EXPECT_THAT(ss.str(), Eq("prefix cat 42"));
+ EXPECT_THAT(ss.str(), Eq("prefix cat 42"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function.cc
index 3ee9159c80a..d88773e2ed4 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function.cc
@@ -34,8 +34,8 @@
#include "src/tint/ast/unary_op_expression.h"
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/sem/builtin_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/sampled_texture.h"
// Terms:
// CFG: the control flow graph of the function, where basic blocks are the
@@ -137,6 +137,8 @@
// constructs
//
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::reader::spirv {
namespace {
@@ -148,111 +150,111 @@ constexpr uint32_t kMaxVectorLen = 4;
// @param ast_unary_op return parameter
// @returns true if it was a unary operation
bool GetUnaryOp(SpvOp opcode, ast::UnaryOp* ast_unary_op) {
- switch (opcode) {
- case SpvOpSNegate:
- case SpvOpFNegate:
- *ast_unary_op = ast::UnaryOp::kNegation;
- return true;
- case SpvOpLogicalNot:
- *ast_unary_op = ast::UnaryOp::kNot;
- return true;
- case SpvOpNot:
- *ast_unary_op = ast::UnaryOp::kComplement;
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpSNegate:
+ case SpvOpFNegate:
+ *ast_unary_op = ast::UnaryOp::kNegation;
+ return true;
+ case SpvOpLogicalNot:
+ *ast_unary_op = ast::UnaryOp::kNot;
+ return true;
+ case SpvOpNot:
+ *ast_unary_op = ast::UnaryOp::kComplement;
+ return true;
+ default:
+ break;
+ }
+ return false;
}
/// Converts a SPIR-V opcode for a WGSL builtin function, if there is a
/// direct translation. Returns nullptr otherwise.
/// @returns the WGSL builtin function name for the given opcode, or nullptr.
const char* GetUnaryBuiltInFunctionName(SpvOp opcode) {
- switch (opcode) {
- case SpvOpAny:
- return "any";
- case SpvOpAll:
- return "all";
- case SpvOpIsNan:
- return "isNan";
- case SpvOpIsInf:
- return "isInf";
- case SpvOpTranspose:
- return "transpose";
- default:
- break;
- }
- return nullptr;
+ switch (opcode) {
+ case SpvOpAny:
+ return "any";
+ case SpvOpAll:
+ return "all";
+ case SpvOpIsNan:
+ return "isNan";
+ case SpvOpIsInf:
+ return "isInf";
+ case SpvOpTranspose:
+ return "transpose";
+ default:
+ break;
+ }
+ return nullptr;
}
// Converts a SPIR-V opcode to its corresponding AST binary opcode, if any
// @param opcode SPIR-V opcode
// @returns the AST binary op for the given opcode, or kNone
ast::BinaryOp ConvertBinaryOp(SpvOp opcode) {
- switch (opcode) {
- case SpvOpIAdd:
- case SpvOpFAdd:
- return ast::BinaryOp::kAdd;
- case SpvOpISub:
- case SpvOpFSub:
- return ast::BinaryOp::kSubtract;
- case SpvOpIMul:
- case SpvOpFMul:
- case SpvOpVectorTimesScalar:
- case SpvOpMatrixTimesScalar:
- case SpvOpVectorTimesMatrix:
- case SpvOpMatrixTimesVector:
- case SpvOpMatrixTimesMatrix:
- return ast::BinaryOp::kMultiply;
- case SpvOpUDiv:
- case SpvOpSDiv:
- case SpvOpFDiv:
- return ast::BinaryOp::kDivide;
- case SpvOpUMod:
- case SpvOpSMod:
- case SpvOpFRem:
- return ast::BinaryOp::kModulo;
- case SpvOpLogicalEqual:
- case SpvOpIEqual:
- case SpvOpFOrdEqual:
- return ast::BinaryOp::kEqual;
- case SpvOpLogicalNotEqual:
- case SpvOpINotEqual:
- case SpvOpFOrdNotEqual:
- return ast::BinaryOp::kNotEqual;
- case SpvOpBitwiseAnd:
- return ast::BinaryOp::kAnd;
- case SpvOpBitwiseOr:
- return ast::BinaryOp::kOr;
- case SpvOpBitwiseXor:
- return ast::BinaryOp::kXor;
- case SpvOpLogicalAnd:
- return ast::BinaryOp::kAnd;
- case SpvOpLogicalOr:
- return ast::BinaryOp::kOr;
- case SpvOpUGreaterThan:
- case SpvOpSGreaterThan:
- case SpvOpFOrdGreaterThan:
- return ast::BinaryOp::kGreaterThan;
- case SpvOpUGreaterThanEqual:
- case SpvOpSGreaterThanEqual:
- case SpvOpFOrdGreaterThanEqual:
- return ast::BinaryOp::kGreaterThanEqual;
- case SpvOpULessThan:
- case SpvOpSLessThan:
- case SpvOpFOrdLessThan:
- return ast::BinaryOp::kLessThan;
- case SpvOpULessThanEqual:
- case SpvOpSLessThanEqual:
- case SpvOpFOrdLessThanEqual:
- return ast::BinaryOp::kLessThanEqual;
- default:
- break;
- }
- // It's not clear what OpSMod should map to.
- // https://bugs.chromium.org/p/tint/issues/detail?id=52
- return ast::BinaryOp::kNone;
+ switch (opcode) {
+ case SpvOpIAdd:
+ case SpvOpFAdd:
+ return ast::BinaryOp::kAdd;
+ case SpvOpISub:
+ case SpvOpFSub:
+ return ast::BinaryOp::kSubtract;
+ case SpvOpIMul:
+ case SpvOpFMul:
+ case SpvOpVectorTimesScalar:
+ case SpvOpMatrixTimesScalar:
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ return ast::BinaryOp::kMultiply;
+ case SpvOpUDiv:
+ case SpvOpSDiv:
+ case SpvOpFDiv:
+ return ast::BinaryOp::kDivide;
+ case SpvOpUMod:
+ case SpvOpSMod:
+ case SpvOpFRem:
+ return ast::BinaryOp::kModulo;
+ case SpvOpLogicalEqual:
+ case SpvOpIEqual:
+ case SpvOpFOrdEqual:
+ return ast::BinaryOp::kEqual;
+ case SpvOpLogicalNotEqual:
+ case SpvOpINotEqual:
+ case SpvOpFOrdNotEqual:
+ return ast::BinaryOp::kNotEqual;
+ case SpvOpBitwiseAnd:
+ return ast::BinaryOp::kAnd;
+ case SpvOpBitwiseOr:
+ return ast::BinaryOp::kOr;
+ case SpvOpBitwiseXor:
+ return ast::BinaryOp::kXor;
+ case SpvOpLogicalAnd:
+ return ast::BinaryOp::kAnd;
+ case SpvOpLogicalOr:
+ return ast::BinaryOp::kOr;
+ case SpvOpUGreaterThan:
+ case SpvOpSGreaterThan:
+ case SpvOpFOrdGreaterThan:
+ return ast::BinaryOp::kGreaterThan;
+ case SpvOpUGreaterThanEqual:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpFOrdGreaterThanEqual:
+ return ast::BinaryOp::kGreaterThanEqual;
+ case SpvOpULessThan:
+ case SpvOpSLessThan:
+ case SpvOpFOrdLessThan:
+ return ast::BinaryOp::kLessThan;
+ case SpvOpULessThanEqual:
+ case SpvOpSLessThanEqual:
+ case SpvOpFOrdLessThanEqual:
+ return ast::BinaryOp::kLessThanEqual;
+ default:
+ break;
+ }
+ // It's not clear what OpSMod should map to.
+ // https://bugs.chromium.org/p/tint/issues/detail?id=52
+ return ast::BinaryOp::kNone;
}
// If the given SPIR-V opcode is a floating point unordered comparison,
@@ -261,23 +263,23 @@ ast::BinaryOp ConvertBinaryOp(SpvOp opcode) {
// @param opcode SPIR-V opcode
// @returns operation corresponding to negated version of the SPIR-V opcode
ast::BinaryOp NegatedFloatCompare(SpvOp opcode) {
- switch (opcode) {
- case SpvOpFUnordEqual:
- return ast::BinaryOp::kNotEqual;
- case SpvOpFUnordNotEqual:
- return ast::BinaryOp::kEqual;
- case SpvOpFUnordLessThan:
- return ast::BinaryOp::kGreaterThanEqual;
- case SpvOpFUnordLessThanEqual:
- return ast::BinaryOp::kGreaterThan;
- case SpvOpFUnordGreaterThan:
- return ast::BinaryOp::kLessThanEqual;
- case SpvOpFUnordGreaterThanEqual:
- return ast::BinaryOp::kLessThan;
- default:
- break;
- }
- return ast::BinaryOp::kNone;
+ switch (opcode) {
+ case SpvOpFUnordEqual:
+ return ast::BinaryOp::kNotEqual;
+ case SpvOpFUnordNotEqual:
+ return ast::BinaryOp::kEqual;
+ case SpvOpFUnordLessThan:
+ return ast::BinaryOp::kGreaterThanEqual;
+ case SpvOpFUnordLessThanEqual:
+ return ast::BinaryOp::kGreaterThan;
+ case SpvOpFUnordGreaterThan:
+ return ast::BinaryOp::kLessThanEqual;
+ case SpvOpFUnordGreaterThanEqual:
+ return ast::BinaryOp::kLessThan;
+ default:
+ break;
+ }
+ return ast::BinaryOp::kNone;
}
// Returns the WGSL standard library function for the given
@@ -285,286 +287,286 @@ ast::BinaryOp NegatedFloatCompare(SpvOp opcode) {
// and invalid opcodes map to the empty string.
// @returns the WGSL standard function name, or an empty string.
std::string GetGlslStd450FuncName(uint32_t ext_opcode) {
- switch (ext_opcode) {
- case GLSLstd450FAbs:
- case GLSLstd450SAbs:
- return "abs";
- case GLSLstd450Acos:
- return "acos";
- case GLSLstd450Asin:
- return "asin";
- case GLSLstd450Atan:
- return "atan";
- case GLSLstd450Atan2:
- return "atan2";
- case GLSLstd450Ceil:
- return "ceil";
- case GLSLstd450UClamp:
- case GLSLstd450SClamp:
- case GLSLstd450NClamp:
- case GLSLstd450FClamp: // FClamp is less prescriptive about NaN operands
- return "clamp";
- case GLSLstd450Cos:
- return "cos";
- case GLSLstd450Cosh:
- return "cosh";
- case GLSLstd450Cross:
- return "cross";
- case GLSLstd450Degrees:
- return "degrees";
- case GLSLstd450Distance:
- return "distance";
- case GLSLstd450Exp:
- return "exp";
- case GLSLstd450Exp2:
- return "exp2";
- case GLSLstd450FaceForward:
- return "faceForward";
- case GLSLstd450Floor:
- return "floor";
- case GLSLstd450Fma:
- return "fma";
- case GLSLstd450Fract:
- return "fract";
- case GLSLstd450InverseSqrt:
- return "inverseSqrt";
- case GLSLstd450Ldexp:
- return "ldexp";
- case GLSLstd450Length:
- return "length";
- case GLSLstd450Log:
- return "log";
- case GLSLstd450Log2:
- return "log2";
- case GLSLstd450NMax:
- case GLSLstd450FMax: // FMax is less prescriptive about NaN operands
- case GLSLstd450UMax:
- case GLSLstd450SMax:
- return "max";
- case GLSLstd450NMin:
- case GLSLstd450FMin: // FMin is less prescriptive about NaN operands
- case GLSLstd450UMin:
- case GLSLstd450SMin:
- return "min";
- case GLSLstd450FMix:
- return "mix";
- case GLSLstd450Normalize:
- return "normalize";
- case GLSLstd450PackSnorm4x8:
- return "pack4x8snorm";
- case GLSLstd450PackUnorm4x8:
- return "pack4x8unorm";
- case GLSLstd450PackSnorm2x16:
- return "pack2x16snorm";
- case GLSLstd450PackUnorm2x16:
- return "pack2x16unorm";
- case GLSLstd450PackHalf2x16:
- return "pack2x16float";
- case GLSLstd450Pow:
- return "pow";
- case GLSLstd450FSign:
- return "sign";
- case GLSLstd450Radians:
- return "radians";
- case GLSLstd450Reflect:
- return "reflect";
- case GLSLstd450Refract:
- return "refract";
- case GLSLstd450Round:
- case GLSLstd450RoundEven:
- return "round";
- case GLSLstd450Sin:
- return "sin";
- case GLSLstd450Sinh:
- return "sinh";
- case GLSLstd450SmoothStep:
- return "smoothStep";
- case GLSLstd450Sqrt:
- return "sqrt";
- case GLSLstd450Step:
- return "step";
- case GLSLstd450Tan:
- return "tan";
- case GLSLstd450Tanh:
- return "tanh";
- case GLSLstd450Trunc:
- return "trunc";
- case GLSLstd450UnpackSnorm4x8:
- return "unpack4x8snorm";
- case GLSLstd450UnpackUnorm4x8:
- return "unpack4x8unorm";
- case GLSLstd450UnpackSnorm2x16:
- return "unpack2x16snorm";
- case GLSLstd450UnpackUnorm2x16:
- return "unpack2x16unorm";
- case GLSLstd450UnpackHalf2x16:
- return "unpack2x16float";
-
- default:
- // TODO(dneto) - The following are not implemented.
- // They are grouped semantically, as in GLSL.std.450.h.
-
- case GLSLstd450SSign:
-
- case GLSLstd450Asinh:
- case GLSLstd450Acosh:
- case GLSLstd450Atanh:
-
- case GLSLstd450Determinant:
- case GLSLstd450MatrixInverse:
-
- case GLSLstd450Modf:
- case GLSLstd450ModfStruct:
- case GLSLstd450IMix:
-
- case GLSLstd450Frexp:
- case GLSLstd450FrexpStruct:
-
- case GLSLstd450PackDouble2x32:
- case GLSLstd450UnpackDouble2x32:
-
- case GLSLstd450FindILsb:
- case GLSLstd450FindSMsb:
- case GLSLstd450FindUMsb:
-
- case GLSLstd450InterpolateAtCentroid:
- case GLSLstd450InterpolateAtSample:
- case GLSLstd450InterpolateAtOffset:
- break;
- }
- return "";
+ switch (ext_opcode) {
+ case GLSLstd450FAbs:
+ case GLSLstd450SAbs:
+ return "abs";
+ case GLSLstd450Acos:
+ return "acos";
+ case GLSLstd450Asin:
+ return "asin";
+ case GLSLstd450Atan:
+ return "atan";
+ case GLSLstd450Atan2:
+ return "atan2";
+ case GLSLstd450Ceil:
+ return "ceil";
+ case GLSLstd450UClamp:
+ case GLSLstd450SClamp:
+ case GLSLstd450NClamp:
+ case GLSLstd450FClamp: // FClamp is less prescriptive about NaN operands
+ return "clamp";
+ case GLSLstd450Cos:
+ return "cos";
+ case GLSLstd450Cosh:
+ return "cosh";
+ case GLSLstd450Cross:
+ return "cross";
+ case GLSLstd450Degrees:
+ return "degrees";
+ case GLSLstd450Distance:
+ return "distance";
+ case GLSLstd450Exp:
+ return "exp";
+ case GLSLstd450Exp2:
+ return "exp2";
+ case GLSLstd450FaceForward:
+ return "faceForward";
+ case GLSLstd450Floor:
+ return "floor";
+ case GLSLstd450Fma:
+ return "fma";
+ case GLSLstd450Fract:
+ return "fract";
+ case GLSLstd450InverseSqrt:
+ return "inverseSqrt";
+ case GLSLstd450Ldexp:
+ return "ldexp";
+ case GLSLstd450Length:
+ return "length";
+ case GLSLstd450Log:
+ return "log";
+ case GLSLstd450Log2:
+ return "log2";
+ case GLSLstd450NMax:
+ case GLSLstd450FMax: // FMax is less prescriptive about NaN operands
+ case GLSLstd450UMax:
+ case GLSLstd450SMax:
+ return "max";
+ case GLSLstd450NMin:
+ case GLSLstd450FMin: // FMin is less prescriptive about NaN operands
+ case GLSLstd450UMin:
+ case GLSLstd450SMin:
+ return "min";
+ case GLSLstd450FMix:
+ return "mix";
+ case GLSLstd450Normalize:
+ return "normalize";
+ case GLSLstd450PackSnorm4x8:
+ return "pack4x8snorm";
+ case GLSLstd450PackUnorm4x8:
+ return "pack4x8unorm";
+ case GLSLstd450PackSnorm2x16:
+ return "pack2x16snorm";
+ case GLSLstd450PackUnorm2x16:
+ return "pack2x16unorm";
+ case GLSLstd450PackHalf2x16:
+ return "pack2x16float";
+ case GLSLstd450Pow:
+ return "pow";
+ case GLSLstd450FSign:
+ return "sign";
+ case GLSLstd450Radians:
+ return "radians";
+ case GLSLstd450Reflect:
+ return "reflect";
+ case GLSLstd450Refract:
+ return "refract";
+ case GLSLstd450Round:
+ case GLSLstd450RoundEven:
+ return "round";
+ case GLSLstd450Sin:
+ return "sin";
+ case GLSLstd450Sinh:
+ return "sinh";
+ case GLSLstd450SmoothStep:
+ return "smoothstep";
+ case GLSLstd450Sqrt:
+ return "sqrt";
+ case GLSLstd450Step:
+ return "step";
+ case GLSLstd450Tan:
+ return "tan";
+ case GLSLstd450Tanh:
+ return "tanh";
+ case GLSLstd450Trunc:
+ return "trunc";
+ case GLSLstd450UnpackSnorm4x8:
+ return "unpack4x8snorm";
+ case GLSLstd450UnpackUnorm4x8:
+ return "unpack4x8unorm";
+ case GLSLstd450UnpackSnorm2x16:
+ return "unpack2x16snorm";
+ case GLSLstd450UnpackUnorm2x16:
+ return "unpack2x16unorm";
+ case GLSLstd450UnpackHalf2x16:
+ return "unpack2x16float";
+
+ default:
+ // TODO(dneto) - The following are not implemented.
+ // They are grouped semantically, as in GLSL.std.450.h.
+
+ case GLSLstd450SSign:
+
+ case GLSLstd450Asinh:
+ case GLSLstd450Acosh:
+ case GLSLstd450Atanh:
+
+ case GLSLstd450Determinant:
+ case GLSLstd450MatrixInverse:
+
+ case GLSLstd450Modf:
+ case GLSLstd450ModfStruct:
+ case GLSLstd450IMix:
+
+ case GLSLstd450Frexp:
+ case GLSLstd450FrexpStruct:
+
+ case GLSLstd450PackDouble2x32:
+ case GLSLstd450UnpackDouble2x32:
+
+ case GLSLstd450FindILsb:
+ case GLSLstd450FindSMsb:
+ case GLSLstd450FindUMsb:
+
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ break;
+ }
+ return "";
}
// Returns the WGSL standard library function builtin for the
// given instruction, or sem::BuiltinType::kNone
sem::BuiltinType GetBuiltin(SpvOp opcode) {
- switch (opcode) {
- case SpvOpBitCount:
- return sem::BuiltinType::kCountOneBits;
- case SpvOpBitFieldInsert:
- return sem::BuiltinType::kInsertBits;
- case SpvOpBitFieldSExtract:
- case SpvOpBitFieldUExtract:
- return sem::BuiltinType::kExtractBits;
- case SpvOpBitReverse:
- return sem::BuiltinType::kReverseBits;
- case SpvOpDot:
- return sem::BuiltinType::kDot;
- case SpvOpDPdx:
- return sem::BuiltinType::kDpdx;
- case SpvOpDPdy:
- return sem::BuiltinType::kDpdy;
- case SpvOpFwidth:
- return sem::BuiltinType::kFwidth;
- case SpvOpDPdxFine:
- return sem::BuiltinType::kDpdxFine;
- case SpvOpDPdyFine:
- return sem::BuiltinType::kDpdyFine;
- case SpvOpFwidthFine:
- return sem::BuiltinType::kFwidthFine;
- case SpvOpDPdxCoarse:
- return sem::BuiltinType::kDpdxCoarse;
- case SpvOpDPdyCoarse:
- return sem::BuiltinType::kDpdyCoarse;
- case SpvOpFwidthCoarse:
- return sem::BuiltinType::kFwidthCoarse;
- default:
- break;
- }
- return sem::BuiltinType::kNone;
+ switch (opcode) {
+ case SpvOpBitCount:
+ return sem::BuiltinType::kCountOneBits;
+ case SpvOpBitFieldInsert:
+ return sem::BuiltinType::kInsertBits;
+ case SpvOpBitFieldSExtract:
+ case SpvOpBitFieldUExtract:
+ return sem::BuiltinType::kExtractBits;
+ case SpvOpBitReverse:
+ return sem::BuiltinType::kReverseBits;
+ case SpvOpDot:
+ return sem::BuiltinType::kDot;
+ case SpvOpDPdx:
+ return sem::BuiltinType::kDpdx;
+ case SpvOpDPdy:
+ return sem::BuiltinType::kDpdy;
+ case SpvOpFwidth:
+ return sem::BuiltinType::kFwidth;
+ case SpvOpDPdxFine:
+ return sem::BuiltinType::kDpdxFine;
+ case SpvOpDPdyFine:
+ return sem::BuiltinType::kDpdyFine;
+ case SpvOpFwidthFine:
+ return sem::BuiltinType::kFwidthFine;
+ case SpvOpDPdxCoarse:
+ return sem::BuiltinType::kDpdxCoarse;
+ case SpvOpDPdyCoarse:
+ return sem::BuiltinType::kDpdyCoarse;
+ case SpvOpFwidthCoarse:
+ return sem::BuiltinType::kFwidthCoarse;
+ default:
+ break;
+ }
+ return sem::BuiltinType::kNone;
}
// @param opcode a SPIR-V opcode
// @returns true if the given instruction is an image access instruction
// whose first input operand is an OpSampledImage value.
bool IsSampledImageAccess(SpvOp opcode) {
- switch (opcode) {
- case SpvOpImageSampleImplicitLod:
- case SpvOpImageSampleExplicitLod:
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- // WGSL doesn't have *Proj* texturing; spirv reader emulates it.
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- case SpvOpImageGather:
- case SpvOpImageDrefGather:
- case SpvOpImageQueryLod:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ // WGSL doesn't have *Proj* texturing; spirv reader emulates it.
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQueryLod:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// @param opcode a SPIR-V opcode
// @returns true if the given instruction is an image sampling, gather,
// or gather-compare operation.
bool IsImageSamplingOrGatherOrDrefGather(SpvOp opcode) {
- switch (opcode) {
- case SpvOpImageSampleImplicitLod:
- case SpvOpImageSampleExplicitLod:
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- // WGSL doesn't have *Proj* texturing; spirv reader emulates it.
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- case SpvOpImageGather:
- case SpvOpImageDrefGather:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ // WGSL doesn't have *Proj* texturing; spirv reader emulates it.
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// @param opcode a SPIR-V opcode
// @returns true if the given instruction is an image access instruction
// whose first input operand is an OpImage value.
bool IsRawImageAccess(SpvOp opcode) {
- switch (opcode) {
- case SpvOpImageRead:
- case SpvOpImageWrite:
- case SpvOpImageFetch:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpImageRead:
+ case SpvOpImageWrite:
+ case SpvOpImageFetch:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// @param opcode a SPIR-V opcode
// @returns true if the given instruction is an image query instruction
bool IsImageQuery(SpvOp opcode) {
- switch (opcode) {
- case SpvOpImageQuerySize:
- case SpvOpImageQuerySizeLod:
- case SpvOpImageQueryLevels:
- case SpvOpImageQuerySamples:
- case SpvOpImageQueryLod:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpImageQuerySize:
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples:
+ case SpvOpImageQueryLod:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// @returns the merge block ID for the given basic block, or 0 if there is none.
uint32_t MergeFor(const spvtools::opt::BasicBlock& bb) {
- // Get the OpSelectionMerge or OpLoopMerge instruction, if any.
- auto* inst = bb.GetMergeInst();
- return inst == nullptr ? 0 : inst->GetSingleWordInOperand(0);
+ // Get the OpSelectionMerge or OpLoopMerge instruction, if any.
+ auto* inst = bb.GetMergeInst();
+ return inst == nullptr ? 0 : inst->GetSingleWordInOperand(0);
}
// @returns the continue target ID for the given basic block, or 0 if there
// is none.
uint32_t ContinueTargetFor(const spvtools::opt::BasicBlock& bb) {
- // Get the OpLoopMerge instruction, if any.
- auto* inst = bb.GetLoopMergeInst();
- return inst == nullptr ? 0 : inst->GetSingleWordInOperand(1);
+ // Get the OpLoopMerge instruction, if any.
+ auto* inst = bb.GetLoopMergeInst();
+ return inst == nullptr ? 0 : inst->GetSingleWordInOperand(1);
}
// A structured traverser produces the reverse structured post-order of the
@@ -575,157 +577,150 @@ uint32_t ContinueTargetFor(const spvtools::opt::BasicBlock& bb) {
// - a block mentioned as a merge block or continue target for a block in the
// set
class StructuredTraverser {
- public:
- explicit StructuredTraverser(const spvtools::opt::Function& function)
- : function_(function) {
- for (auto& block : function_) {
- id_to_block_[block.id()] = &block;
- }
- }
-
- // Returns the reverse postorder traversal of the CFG, where:
- // - a merge block always follows its associated constructs
- // - a continue target always follows the associated loop construct, if any
- // @returns the IDs of blocks in reverse structured post order
- std::vector<uint32_t> ReverseStructuredPostOrder() {
- visit_order_.clear();
- visited_.clear();
- VisitBackward(function_.entry()->id());
-
- std::vector<uint32_t> order(visit_order_.rbegin(), visit_order_.rend());
- return order;
- }
-
- private:
- // Executes a depth first search of the CFG, where right after we visit a
- // header, we will visit its merge block, then its continue target (if any).
- // Also records the post order ordering.
- void VisitBackward(uint32_t id) {
- if (id == 0)
- return;
- if (visited_.count(id))
- return;
- visited_.insert(id);
-
- const spvtools::opt::BasicBlock* bb =
- id_to_block_[id]; // non-null for valid modules
- VisitBackward(MergeFor(*bb));
- VisitBackward(ContinueTargetFor(*bb));
-
- // Visit successors. We will naturally skip the continue target and merge
- // blocks.
- auto* terminator = bb->terminator();
- auto opcode = terminator->opcode();
- if (opcode == SpvOpBranchConditional) {
- // Visit the false branch, then the true branch, to make them come
- // out in the natural order for an "if".
- VisitBackward(terminator->GetSingleWordInOperand(2));
- VisitBackward(terminator->GetSingleWordInOperand(1));
- } else if (opcode == SpvOpBranch) {
- VisitBackward(terminator->GetSingleWordInOperand(0));
- } else if (opcode == SpvOpSwitch) {
- // TODO(dneto): Consider visiting the labels in literal-value order.
- std::vector<uint32_t> successors;
- bb->ForEachSuccessorLabel([&successors](const uint32_t succ_id) {
- successors.push_back(succ_id);
- });
- for (auto succ_id : successors) {
- VisitBackward(succ_id);
- }
- }
-
- visit_order_.push_back(id);
- }
-
- const spvtools::opt::Function& function_;
- std::unordered_map<uint32_t, const spvtools::opt::BasicBlock*> id_to_block_;
- std::vector<uint32_t> visit_order_;
- std::unordered_set<uint32_t> visited_;
+ public:
+ explicit StructuredTraverser(const spvtools::opt::Function& function) : function_(function) {
+ for (auto& block : function_) {
+ id_to_block_[block.id()] = &block;
+ }
+ }
+
+ // Returns the reverse postorder traversal of the CFG, where:
+ // - a merge block always follows its associated constructs
+ // - a continue target always follows the associated loop construct, if any
+ // @returns the IDs of blocks in reverse structured post order
+ std::vector<uint32_t> ReverseStructuredPostOrder() {
+ visit_order_.clear();
+ visited_.clear();
+ VisitBackward(function_.entry()->id());
+
+ std::vector<uint32_t> order(visit_order_.rbegin(), visit_order_.rend());
+ return order;
+ }
+
+ private:
+ // Executes a depth first search of the CFG, where right after we visit a
+ // header, we will visit its merge block, then its continue target (if any).
+ // Also records the post order ordering.
+ void VisitBackward(uint32_t id) {
+ if (id == 0) {
+ return;
+ }
+ if (visited_.count(id)) {
+ return;
+ }
+ visited_.insert(id);
+
+ const spvtools::opt::BasicBlock* bb = id_to_block_[id]; // non-null for valid modules
+ VisitBackward(MergeFor(*bb));
+ VisitBackward(ContinueTargetFor(*bb));
+
+ // Visit successors. We will naturally skip the continue target and merge
+ // blocks.
+ auto* terminator = bb->terminator();
+ auto opcode = terminator->opcode();
+ if (opcode == SpvOpBranchConditional) {
+ // Visit the false branch, then the true branch, to make them come
+ // out in the natural order for an "if".
+ VisitBackward(terminator->GetSingleWordInOperand(2));
+ VisitBackward(terminator->GetSingleWordInOperand(1));
+ } else if (opcode == SpvOpBranch) {
+ VisitBackward(terminator->GetSingleWordInOperand(0));
+ } else if (opcode == SpvOpSwitch) {
+ // TODO(dneto): Consider visiting the labels in literal-value order.
+ std::vector<uint32_t> successors;
+ bb->ForEachSuccessorLabel(
+ [&successors](const uint32_t succ_id) { successors.push_back(succ_id); });
+ for (auto succ_id : successors) {
+ VisitBackward(succ_id);
+ }
+ }
+
+ visit_order_.push_back(id);
+ }
+
+ const spvtools::opt::Function& function_;
+ std::unordered_map<uint32_t, const spvtools::opt::BasicBlock*> id_to_block_;
+ std::vector<uint32_t> visit_order_;
+ std::unordered_set<uint32_t> visited_;
};
/// A StatementBuilder for ast::SwitchStatement
/// @see StatementBuilder
-struct SwitchStatementBuilder final
- : public Castable<SwitchStatementBuilder, StatementBuilder> {
- /// Constructor
- /// @param cond the switch statement condition
- explicit SwitchStatementBuilder(const ast::Expression* cond)
- : condition(cond) {}
-
- /// @param builder the program builder
- /// @returns the built ast::SwitchStatement
- const ast::SwitchStatement* Build(ProgramBuilder* builder) const override {
- // We've listed cases in reverse order in the switch statement.
- // Reorder them to match the presentation order in WGSL.
- auto reversed_cases = cases;
- std::reverse(reversed_cases.begin(), reversed_cases.end());
-
- return builder->create<ast::SwitchStatement>(Source{}, condition,
- reversed_cases);
- }
-
- /// Switch statement condition
- const ast::Expression* const condition;
- /// Switch statement cases
- ast::CaseStatementList cases;
+struct SwitchStatementBuilder final : public Castable<SwitchStatementBuilder, StatementBuilder> {
+ /// Constructor
+ /// @param cond the switch statement condition
+ explicit SwitchStatementBuilder(const ast::Expression* cond) : condition(cond) {}
+
+ /// @param builder the program builder
+ /// @returns the built ast::SwitchStatement
+ const ast::SwitchStatement* Build(ProgramBuilder* builder) const override {
+ // We've listed cases in reverse order in the switch statement.
+ // Reorder them to match the presentation order in WGSL.
+ auto reversed_cases = cases;
+ std::reverse(reversed_cases.begin(), reversed_cases.end());
+
+ return builder->create<ast::SwitchStatement>(Source{}, condition, reversed_cases);
+ }
+
+ /// Switch statement condition
+ const ast::Expression* const condition;
+ /// Switch statement cases
+ ast::CaseStatementList cases;
};
/// A StatementBuilder for ast::IfStatement
/// @see StatementBuilder
-struct IfStatementBuilder final
- : public Castable<IfStatementBuilder, StatementBuilder> {
- /// Constructor
- /// @param c the if-statement condition
- explicit IfStatementBuilder(const ast::Expression* c) : cond(c) {}
-
- /// @param builder the program builder
- /// @returns the built ast::IfStatement
- const ast::IfStatement* Build(ProgramBuilder* builder) const override {
- return builder->create<ast::IfStatement>(Source{}, cond, body, else_stmts);
- }
-
- /// If-statement condition
- const ast::Expression* const cond;
- /// If-statement block body
- const ast::BlockStatement* body = nullptr;
- /// Optional if-statement else statements
- ast::ElseStatementList else_stmts;
+struct IfStatementBuilder final : public Castable<IfStatementBuilder, StatementBuilder> {
+ /// Constructor
+ /// @param c the if-statement condition
+ explicit IfStatementBuilder(const ast::Expression* c) : cond(c) {}
+
+ /// @param builder the program builder
+ /// @returns the built ast::IfStatement
+ const ast::IfStatement* Build(ProgramBuilder* builder) const override {
+ return builder->create<ast::IfStatement>(Source{}, cond, body, else_stmt);
+ }
+
+ /// If-statement condition
+ const ast::Expression* const cond;
+ /// If-statement block body
+ const ast::BlockStatement* body = nullptr;
+ /// Optional if-statement else statement
+ const ast::Statement* else_stmt = nullptr;
};
/// A StatementBuilder for ast::LoopStatement
/// @see StatementBuilder
-struct LoopStatementBuilder final
- : public Castable<LoopStatementBuilder, StatementBuilder> {
- /// @param builder the program builder
- /// @returns the built ast::LoopStatement
- ast::LoopStatement* Build(ProgramBuilder* builder) const override {
- return builder->create<ast::LoopStatement>(Source{}, body, continuing);
- }
-
- /// Loop-statement block body
- const ast::BlockStatement* body = nullptr;
- /// Loop-statement continuing body
- /// @note the mutable keyword here is required as all non-StatementBuilders
- /// `ast::Node`s are immutable and are referenced with `const` pointers.
- /// StatementBuilders however exist to provide mutable state while the
- /// FunctionEmitter is building the function. All StatementBuilders are
- /// replaced with immutable AST nodes when Finalize() is called.
- mutable const ast::BlockStatement* continuing = nullptr;
+struct LoopStatementBuilder final : public Castable<LoopStatementBuilder, StatementBuilder> {
+ /// @param builder the program builder
+ /// @returns the built ast::LoopStatement
+ ast::LoopStatement* Build(ProgramBuilder* builder) const override {
+ return builder->create<ast::LoopStatement>(Source{}, body, continuing);
+ }
+
+ /// Loop-statement block body
+ const ast::BlockStatement* body = nullptr;
+ /// Loop-statement continuing body
+ /// @note the mutable keyword here is required as all non-StatementBuilders
+ /// `ast::Node`s are immutable and are referenced with `const` pointers.
+ /// StatementBuilders however exist to provide mutable state while the
+ /// FunctionEmitter is building the function. All StatementBuilders are
+ /// replaced with immutable AST nodes when Finalize() is called.
+ mutable const ast::BlockStatement* continuing = nullptr;
};
/// @param decos a list of parsed decorations
/// @returns true if the decorations include a SampleMask builtin
bool HasBuiltinSampleMask(const ast::AttributeList& decos) {
- if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(decos)) {
- return builtin->builtin == ast::Builtin::kSampleMask;
- }
- return false;
+ if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(decos)) {
+ return builtin->builtin == ast::Builtin::kSampleMask;
+ }
+ return false;
}
} // namespace
-BlockInfo::BlockInfo(const spvtools::opt::BasicBlock& bb)
- : basic_block(&bb), id(bb.id()) {}
+BlockInfo::BlockInfo(const spvtools::opt::BasicBlock& bb) : basic_block(&bb), id(bb.id()) {}
BlockInfo::~BlockInfo() = default;
@@ -737,7 +732,7 @@ DefInfo::DefInfo(const spvtools::opt::Instruction& def_inst,
DefInfo::~DefInfo() = default;
ast::Node* StatementBuilder::Clone(CloneContext*) const {
- return nullptr;
+ return nullptr;
}
FunctionEmitter::FunctionEmitter(ParserImpl* pi,
@@ -756,11 +751,10 @@ FunctionEmitter::FunctionEmitter(ParserImpl* pi,
sample_mask_in_id(0u),
sample_mask_out_id(0u),
ep_info_(ep_info) {
- PushNewStatementBlock(nullptr, 0, nullptr);
+ PushNewStatementBlock(nullptr, 0, nullptr);
}
-FunctionEmitter::FunctionEmitter(ParserImpl* pi,
- const spvtools::opt::Function& function)
+FunctionEmitter::FunctionEmitter(ParserImpl* pi, const spvtools::opt::Function& function)
: FunctionEmitter(pi, function, nullptr) {}
FunctionEmitter::FunctionEmitter(FunctionEmitter&& other)
@@ -777,181 +771,172 @@ FunctionEmitter::FunctionEmitter(FunctionEmitter&& other)
sample_mask_in_id(other.sample_mask_out_id),
sample_mask_out_id(other.sample_mask_in_id),
ep_info_(other.ep_info_) {
- other.statements_stack_.clear();
- PushNewStatementBlock(nullptr, 0, nullptr);
+ other.statements_stack_.clear();
+ PushNewStatementBlock(nullptr, 0, nullptr);
}
FunctionEmitter::~FunctionEmitter() = default;
-FunctionEmitter::StatementBlock::StatementBlock(
- const Construct* construct,
- uint32_t end_id,
- FunctionEmitter::CompletionAction completion_action)
- : construct_(construct),
- end_id_(end_id),
- completion_action_(completion_action) {}
+FunctionEmitter::StatementBlock::StatementBlock(const Construct* construct,
+ uint32_t end_id,
+ FunctionEmitter::CompletionAction completion_action)
+ : construct_(construct), end_id_(end_id), completion_action_(completion_action) {}
-FunctionEmitter::StatementBlock::StatementBlock(StatementBlock&& other) =
- default;
+FunctionEmitter::StatementBlock::StatementBlock(StatementBlock&& other) = default;
FunctionEmitter::StatementBlock::~StatementBlock() = default;
void FunctionEmitter::StatementBlock::Finalize(ProgramBuilder* pb) {
- TINT_ASSERT(Reader, !finalized_ /* Finalize() must only be called once */);
+ TINT_ASSERT(Reader, !finalized_ /* Finalize() must only be called once */);
- for (size_t i = 0; i < statements_.size(); i++) {
- if (auto* sb = statements_[i]->As<StatementBuilder>()) {
- statements_[i] = sb->Build(pb);
+ for (size_t i = 0; i < statements_.size(); i++) {
+ if (auto* sb = statements_[i]->As<StatementBuilder>()) {
+ statements_[i] = sb->Build(pb);
+ }
}
- }
- if (completion_action_ != nullptr) {
- completion_action_(statements_);
- }
+ if (completion_action_ != nullptr) {
+ completion_action_(statements_);
+ }
- finalized_ = true;
+ finalized_ = true;
}
void FunctionEmitter::StatementBlock::Add(const ast::Statement* statement) {
- TINT_ASSERT(Reader,
- !finalized_ /* Add() must not be called after Finalize() */);
- statements_.emplace_back(statement);
+ TINT_ASSERT(Reader, !finalized_ /* Add() must not be called after Finalize() */);
+ statements_.emplace_back(statement);
}
void FunctionEmitter::PushNewStatementBlock(const Construct* construct,
uint32_t end_id,
CompletionAction action) {
- statements_stack_.emplace_back(StatementBlock{construct, end_id, action});
+ statements_stack_.emplace_back(StatementBlock{construct, end_id, action});
}
-void FunctionEmitter::PushGuard(const std::string& guard_name,
- uint32_t end_id) {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- TINT_ASSERT(Reader, !guard_name.empty());
- // Guard control flow by the guard variable. Introduce a new
- // if-selection with a then-clause ending at the same block
- // as the statement block at the top of the stack.
- const auto& top = statements_stack_.back();
-
- auto* cond = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(guard_name));
- auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
-
- PushNewStatementBlock(
- top.GetConstruct(), end_id, [=](const ast::StatementList& stmts) {
+void FunctionEmitter::PushGuard(const std::string& guard_name, uint32_t end_id) {
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ TINT_ASSERT(Reader, !guard_name.empty());
+ // Guard control flow by the guard variable. Introduce a new
+ // if-selection with a then-clause ending at the same block
+ // as the statement block at the top of the stack.
+ const auto& top = statements_stack_.back();
+
+ auto* cond =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(guard_name));
+ auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
+
+ PushNewStatementBlock(top.GetConstruct(), end_id, [=](const ast::StatementList& stmts) {
builder->body = create<ast::BlockStatement>(Source{}, stmts);
- });
+ });
}
void FunctionEmitter::PushTrueGuard(uint32_t end_id) {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- const auto& top = statements_stack_.back();
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ const auto& top = statements_stack_.back();
- auto* cond = MakeTrue(Source{});
- auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
+ auto* cond = MakeTrue(Source{});
+ auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
- PushNewStatementBlock(
- top.GetConstruct(), end_id, [=](const ast::StatementList& stmts) {
+ PushNewStatementBlock(top.GetConstruct(), end_id, [=](const ast::StatementList& stmts) {
builder->body = create<ast::BlockStatement>(Source{}, stmts);
- });
+ });
}
const ast::StatementList FunctionEmitter::ast_body() {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- auto& entry = statements_stack_[0];
- entry.Finalize(&builder_);
- return entry.GetStatements();
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ auto& entry = statements_stack_[0];
+ entry.Finalize(&builder_);
+ return entry.GetStatements();
}
-const ast::Statement* FunctionEmitter::AddStatement(
- const ast::Statement* statement) {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- if (statement != nullptr) {
- statements_stack_.back().Add(statement);
- }
- return statement;
+const ast::Statement* FunctionEmitter::AddStatement(const ast::Statement* statement) {
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ if (statement != nullptr) {
+ statements_stack_.back().Add(statement);
+ }
+ return statement;
}
const ast::Statement* FunctionEmitter::LastStatement() {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- auto& statement_list = statements_stack_.back().GetStatements();
- TINT_ASSERT(Reader, !statement_list.empty());
- return statement_list.back();
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ auto& statement_list = statements_stack_.back().GetStatements();
+ TINT_ASSERT(Reader, !statement_list.empty());
+ return statement_list.back();
}
bool FunctionEmitter::Emit() {
- if (failed()) {
- return false;
- }
- // We only care about functions with bodies.
- if (function_.cbegin() == function_.cend()) {
- return true;
- }
+ if (failed()) {
+ return false;
+ }
+ // We only care about functions with bodies.
+ if (function_.cbegin() == function_.cend()) {
+ return true;
+ }
- // The function declaration, corresponding to how it's written in SPIR-V,
- // and without regard to whether it's an entry point.
- FunctionDeclaration decl;
- if (!ParseFunctionDeclaration(&decl)) {
- return false;
- }
-
- bool make_body_function = true;
- if (ep_info_) {
- TINT_ASSERT(Reader, !ep_info_->inner_name.empty());
- if (ep_info_->owns_inner_implementation) {
- // This is an entry point, and we want to emit it as a wrapper around
- // an implementation function.
- decl.name = ep_info_->inner_name;
- } else {
- // This is a second entry point that shares an inner implementation
- // function.
- make_body_function = false;
+ // The function declaration, corresponding to how it's written in SPIR-V,
+ // and without regard to whether it's an entry point.
+ FunctionDeclaration decl;
+ if (!ParseFunctionDeclaration(&decl)) {
+ return false;
}
- }
- if (make_body_function) {
- auto* body = MakeFunctionBody();
- if (!body) {
- return false;
+ bool make_body_function = true;
+ if (ep_info_) {
+ TINT_ASSERT(Reader, !ep_info_->inner_name.empty());
+ if (ep_info_->owns_inner_implementation) {
+ // This is an entry point, and we want to emit it as a wrapper around
+ // an implementation function.
+ decl.name = ep_info_->inner_name;
+ } else {
+ // This is a second entry point that shares an inner implementation
+ // function.
+ make_body_function = false;
+ }
}
- builder_.AST().AddFunction(create<ast::Function>(
- decl.source, builder_.Symbols().Register(decl.name),
- std::move(decl.params), decl.return_type->Build(builder_), body,
- std::move(decl.attributes), ast::AttributeList{}));
- }
+ if (make_body_function) {
+ auto* body = MakeFunctionBody();
+ if (!body) {
+ return false;
+ }
+
+ builder_.AST().AddFunction(
+ create<ast::Function>(decl.source, builder_.Symbols().Register(decl.name),
+ std::move(decl.params), decl.return_type->Build(builder_), body,
+ std::move(decl.attributes), ast::AttributeList{}));
+ }
- if (ep_info_ && !ep_info_->inner_name.empty()) {
- return EmitEntryPointAsWrapper();
- }
+ if (ep_info_ && !ep_info_->inner_name.empty()) {
+ return EmitEntryPointAsWrapper();
+ }
- return success();
+ return success();
}
const ast::BlockStatement* FunctionEmitter::MakeFunctionBody() {
- TINT_ASSERT(Reader, statements_stack_.size() == 1);
+ TINT_ASSERT(Reader, statements_stack_.size() == 1);
- if (!EmitBody()) {
- return nullptr;
- }
+ if (!EmitBody()) {
+ return nullptr;
+ }
- // Set the body of the AST function node.
- if (statements_stack_.size() != 1) {
- Fail() << "internal error: statement-list stack should have 1 "
- "element but has "
- << statements_stack_.size();
- return nullptr;
- }
+ // Set the body of the AST function node.
+ if (statements_stack_.size() != 1) {
+ Fail() << "internal error: statement-list stack should have 1 "
+ "element but has "
+ << statements_stack_.size();
+ return nullptr;
+ }
- statements_stack_[0].Finalize(&builder_);
- auto& statements = statements_stack_[0].GetStatements();
- auto* body = create<ast::BlockStatement>(Source{}, statements);
+ statements_stack_[0].Finalize(&builder_);
+ auto& statements = statements_stack_[0].GetStatements();
+ auto* body = create<ast::BlockStatement>(Source{}, statements);
- // Maintain the invariant by repopulating the one and only element.
- statements_stack_.clear();
- PushNewStatementBlock(constructs_[0].get(), 0, nullptr);
+ // Maintain the invariant by repopulating the one and only element.
+ statements_stack_.clear();
+ PushNewStatementBlock(constructs_[0].get(), 0, nullptr);
- return body;
+ return body;
}
bool FunctionEmitter::EmitPipelineInput(std::string var_name,
@@ -962,149 +947,139 @@ bool FunctionEmitter::EmitPipelineInput(std::string var_name,
const Type* forced_param_type,
ast::VariableList* params,
ast::StatementList* statements) {
- // TODO(dneto): Handle structs where the locations are annotated on members.
- tip_type = tip_type->UnwrapAlias();
- if (auto* ref_type = tip_type->As<Reference>()) {
- tip_type = ref_type->type;
- }
-
- // Recursively flatten matrices, arrays, and structures.
- return Switch(
- tip_type,
- [&](const Matrix* matrix_type) -> bool {
- index_prefix.push_back(0);
- const auto num_columns = static_cast<int>(matrix_type->columns);
- const Type* vec_ty = ty_.Vector(matrix_type->type, matrix_type->rows);
- for (int col = 0; col < num_columns; col++) {
- index_prefix.back() = col;
- if (!EmitPipelineInput(var_name, var_type, attrs, index_prefix,
- vec_ty, forced_param_type, params,
- statements)) {
- return false;
- }
- }
- return success();
- },
- [&](const Array* array_type) -> bool {
- if (array_type->size == 0) {
- return Fail() << "runtime-size array not allowed on pipeline IO";
- }
- index_prefix.push_back(0);
- const Type* elem_ty = array_type->type;
- for (int i = 0; i < static_cast<int>(array_type->size); i++) {
- index_prefix.back() = i;
- if (!EmitPipelineInput(var_name, var_type, attrs, index_prefix,
- elem_ty, forced_param_type, params,
- statements)) {
- return false;
- }
- }
- return success();
- },
- [&](const Struct* struct_type) -> bool {
- const auto& members = struct_type->members;
- index_prefix.push_back(0);
- for (int i = 0; i < static_cast<int>(members.size()); ++i) {
- index_prefix.back() = i;
- ast::AttributeList member_attrs(*attrs);
- if (!parser_impl_.ConvertPipelineDecorations(
- struct_type,
- parser_impl_.GetMemberPipelineDecorations(*struct_type, i),
- &member_attrs)) {
- return false;
- }
- if (!EmitPipelineInput(var_name, var_type, &member_attrs,
- index_prefix, members[i], forced_param_type,
- params, statements)) {
- return false;
- }
- // Copy the location as updated by nested expansion of the member.
- parser_impl_.SetLocation(attrs, GetLocation(member_attrs));
- }
- return success();
- },
- [&](Default) {
- const bool is_builtin =
- ast::HasAttribute<ast::BuiltinAttribute>(*attrs);
-
- const Type* param_type = is_builtin ? forced_param_type : tip_type;
-
- const auto param_name = namer_.MakeDerivedName(var_name + "_param");
- // Create the parameter.
- // TODO(dneto): Note: If the parameter has non-location decorations,
- // then those decoration AST nodes will be reused between multiple
- // elements of a matrix, array, or structure. Normally that's
- // disallowed but currently the SPIR-V reader will make duplicates when
- // the entire AST is cloned at the top level of the SPIR-V reader flow.
- // Consider rewriting this to avoid this node-sharing.
- params->push_back(
- builder_.Param(param_name, param_type->Build(builder_), *attrs));
-
- // Add a body statement to copy the parameter to the corresponding
- // private variable.
- const ast::Expression* param_value = builder_.Expr(param_name);
- const ast::Expression* store_dest = builder_.Expr(var_name);
-
- // Index into the LHS as needed.
- auto* current_type =
- var_type->UnwrapAlias()->UnwrapRef()->UnwrapAlias();
- for (auto index : index_prefix) {
- Switch(
- current_type,
- [&](const Matrix* matrix_type) {
- store_dest =
- builder_.IndexAccessor(store_dest, builder_.Expr(index));
- current_type = ty_.Vector(matrix_type->type, matrix_type->rows);
- },
- [&](const Array* array_type) {
- store_dest =
- builder_.IndexAccessor(store_dest, builder_.Expr(index));
- current_type = array_type->type->UnwrapAlias();
- },
- [&](const Struct* struct_type) {
- store_dest = builder_.MemberAccessor(
- store_dest, builder_.Expr(parser_impl_.GetMemberName(
- *struct_type, index)));
- current_type = struct_type->members[index];
- });
- }
-
- if (is_builtin && (tip_type != forced_param_type)) {
- // The parameter will have the WGSL type, but we need bitcast to
- // the variable store type.
- param_value = create<ast::BitcastExpression>(
- tip_type->Build(builder_), param_value);
- }
-
- statements->push_back(builder_.Assign(store_dest, param_value));
-
- // Increment the location attribute, in case more parameters will
- // follow.
- IncrementLocation(attrs);
+ // TODO(dneto): Handle structs where the locations are annotated on members.
+ tip_type = tip_type->UnwrapAlias();
+ if (auto* ref_type = tip_type->As<Reference>()) {
+ tip_type = ref_type->type;
+ }
+
+ // Recursively flatten matrices, arrays, and structures.
+ return Switch(
+ tip_type,
+ [&](const Matrix* matrix_type) -> bool {
+ index_prefix.push_back(0);
+ const auto num_columns = static_cast<int>(matrix_type->columns);
+ const Type* vec_ty = ty_.Vector(matrix_type->type, matrix_type->rows);
+ for (int col = 0; col < num_columns; col++) {
+ index_prefix.back() = col;
+ if (!EmitPipelineInput(var_name, var_type, attrs, index_prefix, vec_ty,
+ forced_param_type, params, statements)) {
+ return false;
+ }
+ }
+ return success();
+ },
+ [&](const Array* array_type) -> bool {
+ if (array_type->size == 0) {
+ return Fail() << "runtime-size array not allowed on pipeline IO";
+ }
+ index_prefix.push_back(0);
+ const Type* elem_ty = array_type->type;
+ for (int i = 0; i < static_cast<int>(array_type->size); i++) {
+ index_prefix.back() = i;
+ if (!EmitPipelineInput(var_name, var_type, attrs, index_prefix, elem_ty,
+ forced_param_type, params, statements)) {
+ return false;
+ }
+ }
+ return success();
+ },
+ [&](const Struct* struct_type) -> bool {
+ const auto& members = struct_type->members;
+ index_prefix.push_back(0);
+ for (int i = 0; i < static_cast<int>(members.size()); ++i) {
+ index_prefix.back() = i;
+ ast::AttributeList member_attrs(*attrs);
+ if (!parser_impl_.ConvertPipelineDecorations(
+ struct_type, parser_impl_.GetMemberPipelineDecorations(*struct_type, i),
+ &member_attrs)) {
+ return false;
+ }
+ if (!EmitPipelineInput(var_name, var_type, &member_attrs, index_prefix, members[i],
+ forced_param_type, params, statements)) {
+ return false;
+ }
+ // Copy the location as updated by nested expansion of the member.
+ parser_impl_.SetLocation(attrs, GetLocation(member_attrs));
+ }
+ return success();
+ },
+ [&](Default) {
+ const bool is_builtin = ast::HasAttribute<ast::BuiltinAttribute>(*attrs);
+
+ const Type* param_type = is_builtin ? forced_param_type : tip_type;
+
+ const auto param_name = namer_.MakeDerivedName(var_name + "_param");
+ // Create the parameter.
+ // TODO(dneto): Note: If the parameter has non-location decorations,
+ // then those decoration AST nodes will be reused between multiple
+ // elements of a matrix, array, or structure. Normally that's
+ // disallowed but currently the SPIR-V reader will make duplicates when
+ // the entire AST is cloned at the top level of the SPIR-V reader flow.
+ // Consider rewriting this to avoid this node-sharing.
+ params->push_back(builder_.Param(param_name, param_type->Build(builder_), *attrs));
+
+ // Add a body statement to copy the parameter to the corresponding
+ // private variable.
+ const ast::Expression* param_value = builder_.Expr(param_name);
+ const ast::Expression* store_dest = builder_.Expr(var_name);
+
+ // Index into the LHS as needed.
+ auto* current_type = var_type->UnwrapAlias()->UnwrapRef()->UnwrapAlias();
+ for (auto index : index_prefix) {
+ Switch(
+ current_type,
+ [&](const Matrix* matrix_type) {
+ store_dest = builder_.IndexAccessor(store_dest, builder_.Expr(i32(index)));
+ current_type = ty_.Vector(matrix_type->type, matrix_type->rows);
+ },
+ [&](const Array* array_type) {
+ store_dest = builder_.IndexAccessor(store_dest, builder_.Expr(i32(index)));
+ current_type = array_type->type->UnwrapAlias();
+ },
+ [&](const Struct* struct_type) {
+ store_dest = builder_.MemberAccessor(
+ store_dest,
+ builder_.Expr(parser_impl_.GetMemberName(*struct_type, index)));
+ current_type = struct_type->members[index];
+ });
+ }
- return success();
- });
+ if (is_builtin && (tip_type != forced_param_type)) {
+ // The parameter will have the WGSL type, but we need bitcast to
+ // the variable store type.
+ param_value =
+ create<ast::BitcastExpression>(tip_type->Build(builder_), param_value);
+ }
+
+ statements->push_back(builder_.Assign(store_dest, param_value));
+
+ // Increment the location attribute, in case more parameters will
+ // follow.
+ IncrementLocation(attrs);
+
+ return success();
+ });
}
void FunctionEmitter::IncrementLocation(ast::AttributeList* attributes) {
- for (auto*& attr : *attributes) {
- if (auto* loc_attr = attr->As<ast::LocationAttribute>()) {
- // Replace this location attribute with a new one with one higher index.
- // The old one doesn't leak because it's kept in the builder's AST node
- // list.
- attr = builder_.Location(loc_attr->source, loc_attr->value + 1);
- }
- }
+ for (auto*& attr : *attributes) {
+ if (auto* loc_attr = attr->As<ast::LocationAttribute>()) {
+ // Replace this location attribute with a new one with one higher index.
+ // The old one doesn't leak because it's kept in the builder's AST node
+ // list.
+ attr = builder_.Location(loc_attr->source, loc_attr->value + 1);
+ }
+ }
}
-const ast::Attribute* FunctionEmitter::GetLocation(
- const ast::AttributeList& attributes) {
- for (auto* const& attr : attributes) {
- if (attr->Is<ast::LocationAttribute>()) {
- return attr;
+const ast::Attribute* FunctionEmitter::GetLocation(const ast::AttributeList& attributes) {
+ for (auto* const& attr : attributes) {
+ if (attr->Is<ast::LocationAttribute>()) {
+ return attr;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
bool FunctionEmitter::EmitPipelineOutput(std::string var_name,
@@ -1115,3345 +1090,3201 @@ bool FunctionEmitter::EmitPipelineOutput(std::string var_name,
const Type* forced_member_type,
ast::StructMemberList* return_members,
ast::ExpressionList* return_exprs) {
- tip_type = tip_type->UnwrapAlias();
- if (auto* ref_type = tip_type->As<Reference>()) {
- tip_type = ref_type->type;
- }
-
- // Recursively flatten matrices, arrays, and structures.
- return Switch(
- tip_type,
- [&](const Matrix* matrix_type) {
- index_prefix.push_back(0);
- const auto num_columns = static_cast<int>(matrix_type->columns);
- const Type* vec_ty = ty_.Vector(matrix_type->type, matrix_type->rows);
- for (int col = 0; col < num_columns; col++) {
- index_prefix.back() = col;
- if (!EmitPipelineOutput(var_name, var_type, decos, index_prefix,
- vec_ty, forced_member_type, return_members,
- return_exprs)) {
- return false;
- }
- }
- return success();
- },
- [&](const Array* array_type) -> bool {
- if (array_type->size == 0) {
- return Fail() << "runtime-size array not allowed on pipeline IO";
- }
- index_prefix.push_back(0);
- const Type* elem_ty = array_type->type;
- for (int i = 0; i < static_cast<int>(array_type->size); i++) {
- index_prefix.back() = i;
- if (!EmitPipelineOutput(var_name, var_type, decos, index_prefix,
- elem_ty, forced_member_type, return_members,
- return_exprs)) {
- return false;
- }
- }
- return success();
- },
- [&](const Struct* struct_type) -> bool {
- const auto& members = struct_type->members;
- index_prefix.push_back(0);
- for (int i = 0; i < static_cast<int>(members.size()); ++i) {
- index_prefix.back() = i;
- ast::AttributeList member_attrs(*decos);
- if (!parser_impl_.ConvertPipelineDecorations(
- struct_type,
- parser_impl_.GetMemberPipelineDecorations(*struct_type, i),
- &member_attrs)) {
- return false;
- }
- if (!EmitPipelineOutput(var_name, var_type, &member_attrs,
- index_prefix, members[i], forced_member_type,
- return_members, return_exprs)) {
- return false;
- }
- // Copy the location as updated by nested expansion of the member.
- parser_impl_.SetLocation(decos, GetLocation(member_attrs));
- }
- return success();
- },
- [&](Default) {
- const bool is_builtin =
- ast::HasAttribute<ast::BuiltinAttribute>(*decos);
-
- const Type* member_type = is_builtin ? forced_member_type : tip_type;
- // Derive the member name directly from the variable name. They can't
- // collide.
- const auto member_name = namer_.MakeDerivedName(var_name);
- // Create the member.
- // TODO(dneto): Note: If the parameter has non-location decorations,
- // then those decoration AST nodes will be reused between multiple
- // elements of a matrix, array, or structure. Normally that's
- // disallowed but currently the SPIR-V reader will make duplicates when
- // the entire AST is cloned at the top level of the SPIR-V reader flow.
- // Consider rewriting this to avoid this node-sharing.
- return_members->push_back(
- builder_.Member(member_name, member_type->Build(builder_), *decos));
-
- // Create an expression to evaluate the part of the variable indexed by
- // the index_prefix.
- const ast::Expression* load_source = builder_.Expr(var_name);
-
- // Index into the variable as needed to pick out the flattened member.
- auto* current_type =
- var_type->UnwrapAlias()->UnwrapRef()->UnwrapAlias();
- for (auto index : index_prefix) {
- Switch(
- current_type,
- [&](const Matrix* matrix_type) {
- load_source =
- builder_.IndexAccessor(load_source, builder_.Expr(index));
- current_type = ty_.Vector(matrix_type->type, matrix_type->rows);
- },
- [&](const Array* array_type) {
- load_source =
- builder_.IndexAccessor(load_source, builder_.Expr(index));
- current_type = array_type->type->UnwrapAlias();
- },
- [&](const Struct* struct_type) {
- load_source = builder_.MemberAccessor(
- load_source, builder_.Expr(parser_impl_.GetMemberName(
- *struct_type, index)));
- current_type = struct_type->members[index];
- });
- }
-
- if (is_builtin && (tip_type != forced_member_type)) {
- // The member will have the WGSL type, but we need bitcast to
- // the variable store type.
- load_source = create<ast::BitcastExpression>(
- forced_member_type->Build(builder_), load_source);
- }
- return_exprs->push_back(load_source);
-
- // Increment the location attribute, in case more parameters will
- // follow.
- IncrementLocation(decos);
+ tip_type = tip_type->UnwrapAlias();
+ if (auto* ref_type = tip_type->As<Reference>()) {
+ tip_type = ref_type->type;
+ }
+
+ // Recursively flatten matrices, arrays, and structures.
+ return Switch(
+ tip_type,
+ [&](const Matrix* matrix_type) {
+ index_prefix.push_back(0);
+ const auto num_columns = static_cast<int>(matrix_type->columns);
+ const Type* vec_ty = ty_.Vector(matrix_type->type, matrix_type->rows);
+ for (int col = 0; col < num_columns; col++) {
+ index_prefix.back() = col;
+ if (!EmitPipelineOutput(var_name, var_type, decos, index_prefix, vec_ty,
+ forced_member_type, return_members, return_exprs)) {
+ return false;
+ }
+ }
+ return success();
+ },
+ [&](const Array* array_type) -> bool {
+ if (array_type->size == 0) {
+ return Fail() << "runtime-size array not allowed on pipeline IO";
+ }
+ index_prefix.push_back(0);
+ const Type* elem_ty = array_type->type;
+ for (int i = 0; i < static_cast<int>(array_type->size); i++) {
+ index_prefix.back() = i;
+ if (!EmitPipelineOutput(var_name, var_type, decos, index_prefix, elem_ty,
+ forced_member_type, return_members, return_exprs)) {
+ return false;
+ }
+ }
+ return success();
+ },
+ [&](const Struct* struct_type) -> bool {
+ const auto& members = struct_type->members;
+ index_prefix.push_back(0);
+ for (int i = 0; i < static_cast<int>(members.size()); ++i) {
+ index_prefix.back() = i;
+ ast::AttributeList member_attrs(*decos);
+ if (!parser_impl_.ConvertPipelineDecorations(
+ struct_type, parser_impl_.GetMemberPipelineDecorations(*struct_type, i),
+ &member_attrs)) {
+ return false;
+ }
+ if (!EmitPipelineOutput(var_name, var_type, &member_attrs, index_prefix, members[i],
+ forced_member_type, return_members, return_exprs)) {
+ return false;
+ }
+ // Copy the location as updated by nested expansion of the member.
+ parser_impl_.SetLocation(decos, GetLocation(member_attrs));
+ }
+ return success();
+ },
+ [&](Default) {
+ const bool is_builtin = ast::HasAttribute<ast::BuiltinAttribute>(*decos);
+
+ const Type* member_type = is_builtin ? forced_member_type : tip_type;
+ // Derive the member name directly from the variable name. They can't
+ // collide.
+ const auto member_name = namer_.MakeDerivedName(var_name);
+ // Create the member.
+ // TODO(dneto): Note: If the parameter has non-location decorations,
+ // then those decoration AST nodes will be reused between multiple
+ // elements of a matrix, array, or structure. Normally that's
+ // disallowed but currently the SPIR-V reader will make duplicates when
+ // the entire AST is cloned at the top level of the SPIR-V reader flow.
+ // Consider rewriting this to avoid this node-sharing.
+ return_members->push_back(
+ builder_.Member(member_name, member_type->Build(builder_), *decos));
+
+ // Create an expression to evaluate the part of the variable indexed by
+ // the index_prefix.
+ const ast::Expression* load_source = builder_.Expr(var_name);
+
+ // Index into the variable as needed to pick out the flattened member.
+ auto* current_type = var_type->UnwrapAlias()->UnwrapRef()->UnwrapAlias();
+ for (auto index : index_prefix) {
+ Switch(
+ current_type,
+ [&](const Matrix* matrix_type) {
+ load_source =
+ builder_.IndexAccessor(load_source, builder_.Expr(i32(index)));
+ current_type = ty_.Vector(matrix_type->type, matrix_type->rows);
+ },
+ [&](const Array* array_type) {
+ load_source =
+ builder_.IndexAccessor(load_source, builder_.Expr(i32(index)));
+ current_type = array_type->type->UnwrapAlias();
+ },
+ [&](const Struct* struct_type) {
+ load_source = builder_.MemberAccessor(
+ load_source,
+ builder_.Expr(parser_impl_.GetMemberName(*struct_type, index)));
+ current_type = struct_type->members[index];
+ });
+ }
- return success();
- });
-}
+ if (is_builtin && (tip_type != forced_member_type)) {
+ // The member will have the WGSL type, but we need bitcast to
+ // the variable store type.
+ load_source = create<ast::BitcastExpression>(forced_member_type->Build(builder_),
+ load_source);
+ }
+ return_exprs->push_back(load_source);
-bool FunctionEmitter::EmitEntryPointAsWrapper() {
- Source source;
-
- // The statements in the body.
- ast::StatementList stmts;
-
- FunctionDeclaration decl;
- decl.source = source;
- decl.name = ep_info_->name;
- const ast::Type* return_type = nullptr; // Populated below.
-
- // Pipeline inputs become parameters to the wrapper function, and
- // their values are saved into the corresponding private variables that
- // have already been created.
- for (uint32_t var_id : ep_info_->inputs) {
- const auto* var = def_use_mgr_->GetDef(var_id);
- TINT_ASSERT(Reader, var != nullptr);
- TINT_ASSERT(Reader, var->opcode() == SpvOpVariable);
- auto* store_type = GetVariableStoreType(*var);
- auto* forced_param_type = store_type;
- ast::AttributeList param_decos;
- if (!parser_impl_.ConvertDecorationsForVariable(var_id, &forced_param_type,
- &param_decos, true)) {
- // This occurs, and is not an error, for the PointSize builtin.
- if (!success()) {
- // But exit early if an error was logged.
- return false;
- }
- continue;
- }
+ // Increment the location attribute, in case more parameters will
+ // follow.
+ IncrementLocation(decos);
- // We don't have to handle initializers because in Vulkan SPIR-V, Input
- // variables must not have them.
+ return success();
+ });
+}
- const auto var_name = namer_.GetName(var_id);
+bool FunctionEmitter::EmitEntryPointAsWrapper() {
+ Source source;
- bool ok = true;
- if (HasBuiltinSampleMask(param_decos)) {
- // In Vulkan SPIR-V, the sample mask is an array. In WGSL it's a scalar.
- // Use the first element only.
- auto* sample_mask_array_type =
- store_type->UnwrapRef()->UnwrapAlias()->As<Array>();
- TINT_ASSERT(Reader, sample_mask_array_type);
- ok = EmitPipelineInput(var_name, store_type, &param_decos, {0},
- sample_mask_array_type->type, forced_param_type,
- &(decl.params), &stmts);
- } else {
- // The normal path.
- ok = EmitPipelineInput(var_name, store_type, &param_decos, {}, store_type,
- forced_param_type, &(decl.params), &stmts);
- }
- if (!ok) {
- return false;
- }
- }
-
- // Call the inner function. It has no parameters.
- stmts.push_back(create<ast::CallStatement>(
- source,
- create<ast::CallExpression>(
- source,
- create<ast::IdentifierExpression>(
- source, builder_.Symbols().Register(ep_info_->inner_name)),
- ast::ExpressionList{})));
-
- // Pipeline outputs are mapped to the return value.
- if (ep_info_->outputs.empty()) {
- // There is nothing to return.
- return_type = ty_.Void()->Build(builder_);
- } else {
- // Pipeline outputs are converted to a structure that is written
- // to just before returning.
-
- const auto return_struct_name =
- namer_.MakeDerivedName(ep_info_->name + "_out");
- const auto return_struct_sym =
- builder_.Symbols().Register(return_struct_name);
-
- // Define the structure.
- std::vector<const ast::StructMember*> return_members;
- ast::ExpressionList return_exprs;
-
- const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
-
- for (uint32_t var_id : ep_info_->outputs) {
- if (var_id == builtin_position_info.per_vertex_var_id) {
- // The SPIR-V gl_PerVertex variable has already been remapped to
- // a gl_Position variable. Substitute the type.
- const Type* param_type = ty_.Vector(ty_.F32(), 4);
- ast::AttributeList out_decos{
- create<ast::BuiltinAttribute>(source, ast::Builtin::kPosition)};
+ // The statements in the body.
+ ast::StatementList stmts;
- const auto var_name = namer_.GetName(var_id);
- return_members.push_back(
- builder_.Member(var_name, param_type->Build(builder_), out_decos));
- return_exprs.push_back(builder_.Expr(var_name));
+ FunctionDeclaration decl;
+ decl.source = source;
+ decl.name = ep_info_->name;
+ const ast::Type* return_type = nullptr; // Populated below.
- } else {
+ // Pipeline inputs become parameters to the wrapper function, and
+ // their values are saved into the corresponding private variables that
+ // have already been created.
+ for (uint32_t var_id : ep_info_->inputs) {
const auto* var = def_use_mgr_->GetDef(var_id);
TINT_ASSERT(Reader, var != nullptr);
TINT_ASSERT(Reader, var->opcode() == SpvOpVariable);
- const Type* store_type = GetVariableStoreType(*var);
- const Type* forced_member_type = store_type;
- ast::AttributeList out_decos;
- if (!parser_impl_.ConvertDecorationsForVariable(
- var_id, &forced_member_type, &out_decos, true)) {
- // This occurs, and is not an error, for the PointSize builtin.
- if (!success()) {
- // But exit early if an error was logged.
- return false;
- }
- continue;
+ auto* store_type = GetVariableStoreType(*var);
+ auto* forced_param_type = store_type;
+ ast::AttributeList param_decos;
+ if (!parser_impl_.ConvertDecorationsForVariable(var_id, &forced_param_type, &param_decos,
+ true)) {
+ // This occurs, and is not an error, for the PointSize builtin.
+ if (!success()) {
+ // But exit early if an error was logged.
+ return false;
+ }
+ continue;
}
+ // We don't have to handle initializers because in Vulkan SPIR-V, Input
+ // variables must not have them.
+
const auto var_name = namer_.GetName(var_id);
+
bool ok = true;
- if (HasBuiltinSampleMask(out_decos)) {
- // In Vulkan SPIR-V, the sample mask is an array. In WGSL it's a
- // scalar. Use the first element only.
- auto* sample_mask_array_type =
- store_type->UnwrapRef()->UnwrapAlias()->As<Array>();
- TINT_ASSERT(Reader, sample_mask_array_type);
- ok = EmitPipelineOutput(var_name, store_type, &out_decos, {0},
- sample_mask_array_type->type,
- forced_member_type, &return_members,
- &return_exprs);
+ if (HasBuiltinSampleMask(param_decos)) {
+ // In Vulkan SPIR-V, the sample mask is an array. In WGSL it's a scalar.
+ // Use the first element only.
+ auto* sample_mask_array_type = store_type->UnwrapRef()->UnwrapAlias()->As<Array>();
+ TINT_ASSERT(Reader, sample_mask_array_type);
+ ok = EmitPipelineInput(var_name, store_type, &param_decos, {0},
+ sample_mask_array_type->type, forced_param_type, &(decl.params),
+ &stmts);
} else {
- // The normal path.
- ok = EmitPipelineOutput(var_name, store_type, &out_decos, {},
- store_type, forced_member_type,
- &return_members, &return_exprs);
+ // The normal path.
+ ok = EmitPipelineInput(var_name, store_type, &param_decos, {}, store_type,
+ forced_param_type, &(decl.params), &stmts);
}
if (!ok) {
- return false;
+ return false;
}
- }
}
- if (return_members.empty()) {
- // This can occur if only the PointSize member is accessed, because we
- // never emit it.
- return_type = ty_.Void()->Build(builder_);
+ // Call the inner function. It has no parameters.
+ stmts.push_back(create<ast::CallStatement>(
+ source,
+ create<ast::CallExpression>(source,
+ create<ast::IdentifierExpression>(
+ source, builder_.Symbols().Register(ep_info_->inner_name)),
+ ast::ExpressionList{})));
+
+ // Pipeline outputs are mapped to the return value.
+ if (ep_info_->outputs.empty()) {
+ // There is nothing to return.
+ return_type = ty_.Void()->Build(builder_);
} else {
- // Create and register the result type.
- auto* str = create<ast::Struct>(Source{}, return_struct_sym,
- return_members, ast::AttributeList{});
- parser_impl_.AddTypeDecl(return_struct_sym, str);
- return_type = builder_.ty.Of(str);
-
- // Add the return-value statement.
- stmts.push_back(create<ast::ReturnStatement>(
- source,
- builder_.Construct(source, return_type, std::move(return_exprs))));
- }
- }
-
- auto* body = create<ast::BlockStatement>(source, stmts);
- ast::AttributeList fn_attrs;
- fn_attrs.emplace_back(create<ast::StageAttribute>(source, ep_info_->stage));
-
- if (ep_info_->stage == ast::PipelineStage::kCompute) {
- auto& size = ep_info_->workgroup_size;
- if (size.x != 0 && size.y != 0 && size.z != 0) {
- const ast::Expression* x = builder_.Expr(static_cast<int>(size.x));
- const ast::Expression* y =
- size.y ? builder_.Expr(static_cast<int>(size.y)) : nullptr;
- const ast::Expression* z =
- size.z ? builder_.Expr(static_cast<int>(size.z)) : nullptr;
- fn_attrs.emplace_back(create<ast::WorkgroupAttribute>(Source{}, x, y, z));
- }
- }
-
- builder_.AST().AddFunction(
- create<ast::Function>(source, builder_.Symbols().Register(ep_info_->name),
- std::move(decl.params), return_type, body,
- std::move(fn_attrs), ast::AttributeList{}));
-
- return true;
+ // Pipeline outputs are converted to a structure that is written
+ // to just before returning.
+
+ const auto return_struct_name = namer_.MakeDerivedName(ep_info_->name + "_out");
+ const auto return_struct_sym = builder_.Symbols().Register(return_struct_name);
+
+ // Define the structure.
+ std::vector<const ast::StructMember*> return_members;
+ ast::ExpressionList return_exprs;
+
+ const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
+
+ for (uint32_t var_id : ep_info_->outputs) {
+ if (var_id == builtin_position_info.per_vertex_var_id) {
+ // The SPIR-V gl_PerVertex variable has already been remapped to
+ // a gl_Position variable. Substitute the type.
+ const Type* param_type = ty_.Vector(ty_.F32(), 4);
+ ast::AttributeList out_decos{
+ create<ast::BuiltinAttribute>(source, ast::Builtin::kPosition)};
+
+ const auto var_name = namer_.GetName(var_id);
+ return_members.push_back(
+ builder_.Member(var_name, param_type->Build(builder_), out_decos));
+ return_exprs.push_back(builder_.Expr(var_name));
+
+ } else {
+ const auto* var = def_use_mgr_->GetDef(var_id);
+ TINT_ASSERT(Reader, var != nullptr);
+ TINT_ASSERT(Reader, var->opcode() == SpvOpVariable);
+ const Type* store_type = GetVariableStoreType(*var);
+ const Type* forced_member_type = store_type;
+ ast::AttributeList out_decos;
+ if (!parser_impl_.ConvertDecorationsForVariable(var_id, &forced_member_type,
+ &out_decos, true)) {
+ // This occurs, and is not an error, for the PointSize builtin.
+ if (!success()) {
+ // But exit early if an error was logged.
+ return false;
+ }
+ continue;
+ }
+
+ const auto var_name = namer_.GetName(var_id);
+ bool ok = true;
+ if (HasBuiltinSampleMask(out_decos)) {
+ // In Vulkan SPIR-V, the sample mask is an array. In WGSL it's a
+ // scalar. Use the first element only.
+ auto* sample_mask_array_type =
+ store_type->UnwrapRef()->UnwrapAlias()->As<Array>();
+ TINT_ASSERT(Reader, sample_mask_array_type);
+ ok = EmitPipelineOutput(var_name, store_type, &out_decos, {0},
+ sample_mask_array_type->type, forced_member_type,
+ &return_members, &return_exprs);
+ } else {
+ // The normal path.
+ ok = EmitPipelineOutput(var_name, store_type, &out_decos, {}, store_type,
+ forced_member_type, &return_members, &return_exprs);
+ }
+ if (!ok) {
+ return false;
+ }
+ }
+ }
+
+ if (return_members.empty()) {
+ // This can occur if only the PointSize member is accessed, because we
+ // never emit it.
+ return_type = ty_.Void()->Build(builder_);
+ } else {
+ // Create and register the result type.
+ auto* str = create<ast::Struct>(Source{}, return_struct_sym, return_members,
+ ast::AttributeList{});
+ parser_impl_.AddTypeDecl(return_struct_sym, str);
+ return_type = builder_.ty.Of(str);
+
+ // Add the return-value statement.
+ stmts.push_back(create<ast::ReturnStatement>(
+ source, builder_.Construct(source, return_type, std::move(return_exprs))));
+ }
+ }
+
+ auto* body = create<ast::BlockStatement>(source, stmts);
+ ast::AttributeList fn_attrs;
+ fn_attrs.emplace_back(create<ast::StageAttribute>(source, ep_info_->stage));
+
+ if (ep_info_->stage == ast::PipelineStage::kCompute) {
+ auto& size = ep_info_->workgroup_size;
+ if (size.x != 0 && size.y != 0 && size.z != 0) {
+ const ast::Expression* x = builder_.Expr(i32(size.x));
+ const ast::Expression* y = size.y ? builder_.Expr(i32(size.y)) : nullptr;
+ const ast::Expression* z = size.z ? builder_.Expr(i32(size.z)) : nullptr;
+ fn_attrs.emplace_back(create<ast::WorkgroupAttribute>(Source{}, x, y, z));
+ }
+ }
+
+ builder_.AST().AddFunction(create<ast::Function>(
+ source, builder_.Symbols().Register(ep_info_->name), std::move(decl.params), return_type,
+ body, std::move(fn_attrs), ast::AttributeList{}));
+
+ return true;
}
bool FunctionEmitter::ParseFunctionDeclaration(FunctionDeclaration* decl) {
- if (failed()) {
- return false;
- }
+ if (failed()) {
+ return false;
+ }
- const std::string name = namer_.Name(function_.result_id());
+ const std::string name = namer_.Name(function_.result_id());
- // Surprisingly, the "type id" on an OpFunction is the result type of the
- // function, not the type of the function. This is the one exceptional case
- // in SPIR-V where the type ID is not the type of the result ID.
- auto* ret_ty = parser_impl_.ConvertType(function_.type_id());
- if (failed()) {
- return false;
- }
- if (ret_ty == nullptr) {
- return Fail()
- << "internal error: unregistered return type for function with ID "
- << function_.result_id();
- }
-
- ast::VariableList ast_params;
- function_.ForEachParam(
- [this, &ast_params](const spvtools::opt::Instruction* param) {
+ // Surprisingly, the "type id" on an OpFunction is the result type of the
+ // function, not the type of the function. This is the one exceptional case
+ // in SPIR-V where the type ID is not the type of the result ID.
+ auto* ret_ty = parser_impl_.ConvertType(function_.type_id());
+ if (failed()) {
+ return false;
+ }
+ if (ret_ty == nullptr) {
+ return Fail() << "internal error: unregistered return type for function with ID "
+ << function_.result_id();
+ }
+
+ ast::VariableList ast_params;
+ function_.ForEachParam([this, &ast_params](const spvtools::opt::Instruction* param) {
auto* type = parser_impl_.ConvertType(param->type_id());
if (type != nullptr) {
- auto* ast_param = parser_impl_.MakeVariable(
- param->result_id(), ast::StorageClass::kNone, type, true, false,
- nullptr, ast::AttributeList{});
- // Parameters are treated as const declarations.
- ast_params.emplace_back(ast_param);
- // The value is accessible by name.
- identifier_types_.emplace(param->result_id(), type);
+ auto* ast_param =
+ parser_impl_.MakeVariable(param->result_id(), ast::StorageClass::kNone, type, true,
+ false, nullptr, ast::AttributeList{});
+ // Parameters are treated as const declarations.
+ ast_params.emplace_back(ast_param);
+ // The value is accessible by name.
+ identifier_types_.emplace(param->result_id(), type);
} else {
- // We've already logged an error and emitted a diagnostic. Do nothing
- // here.
+ // We've already logged an error and emitted a diagnostic. Do nothing
+ // here.
}
- });
- if (failed()) {
- return false;
- }
- decl->name = name;
- decl->params = std::move(ast_params);
- decl->return_type = ret_ty;
- decl->attributes.clear();
+ });
+ if (failed()) {
+ return false;
+ }
+ decl->name = name;
+ decl->params = std::move(ast_params);
+ decl->return_type = ret_ty;
+ decl->attributes.clear();
- return success();
+ return success();
}
-const Type* FunctionEmitter::GetVariableStoreType(
- const spvtools::opt::Instruction& var_decl_inst) {
- const auto type_id = var_decl_inst.type_id();
- // Normally we use the SPIRV-Tools optimizer to manage types.
- // But when two struct types have the same member types and decorations,
- // but differ only in member names, the two struct types will be
- // represented by a single common internal struct type.
- // So avoid the optimizer's representation and instead follow the
- // SPIR-V instructions themselves.
- const auto* ptr_ty = def_use_mgr_->GetDef(type_id);
- const auto store_ty_id = ptr_ty->GetSingleWordInOperand(1);
- const auto* result = parser_impl_.ConvertType(store_ty_id);
- return result;
+const Type* FunctionEmitter::GetVariableStoreType(const spvtools::opt::Instruction& var_decl_inst) {
+ const auto type_id = var_decl_inst.type_id();
+ // Normally we use the SPIRV-Tools optimizer to manage types.
+ // But when two struct types have the same member types and decorations,
+ // but differ only in member names, the two struct types will be
+ // represented by a single common internal struct type.
+ // So avoid the optimizer's representation and instead follow the
+ // SPIR-V instructions themselves.
+ const auto* ptr_ty = def_use_mgr_->GetDef(type_id);
+ const auto store_ty_id = ptr_ty->GetSingleWordInOperand(1);
+ const auto* result = parser_impl_.ConvertType(store_ty_id);
+ return result;
}
bool FunctionEmitter::EmitBody() {
- RegisterBasicBlocks();
+ RegisterBasicBlocks();
- if (!TerminatorsAreValid()) {
- return false;
- }
- if (!RegisterMerges()) {
- return false;
- }
+ if (!TerminatorsAreValid()) {
+ return false;
+ }
+ if (!RegisterMerges()) {
+ return false;
+ }
- ComputeBlockOrderAndPositions();
- if (!VerifyHeaderContinueMergeOrder()) {
- return false;
- }
- if (!LabelControlFlowConstructs()) {
- return false;
- }
- if (!FindSwitchCaseHeaders()) {
- return false;
- }
- if (!ClassifyCFGEdges()) {
- return false;
- }
- if (!FindIfSelectionInternalHeaders()) {
- return false;
- }
+ ComputeBlockOrderAndPositions();
+ if (!VerifyHeaderContinueMergeOrder()) {
+ return false;
+ }
+ if (!LabelControlFlowConstructs()) {
+ return false;
+ }
+ if (!FindSwitchCaseHeaders()) {
+ return false;
+ }
+ if (!ClassifyCFGEdges()) {
+ return false;
+ }
+ if (!FindIfSelectionInternalHeaders()) {
+ return false;
+ }
- if (!RegisterSpecialBuiltInVariables()) {
- return false;
- }
- if (!RegisterLocallyDefinedValues()) {
- return false;
- }
- FindValuesNeedingNamedOrHoistedDefinition();
+ if (!RegisterSpecialBuiltInVariables()) {
+ return false;
+ }
+ if (!RegisterLocallyDefinedValues()) {
+ return false;
+ }
+ FindValuesNeedingNamedOrHoistedDefinition();
- if (!EmitFunctionVariables()) {
- return false;
- }
- if (!EmitFunctionBodyStatements()) {
- return false;
- }
- return success();
+ if (!EmitFunctionVariables()) {
+ return false;
+ }
+ if (!EmitFunctionBodyStatements()) {
+ return false;
+ }
+ return success();
}
void FunctionEmitter::RegisterBasicBlocks() {
- for (auto& block : function_) {
- block_info_[block.id()] = std::make_unique<BlockInfo>(block);
- }
+ for (auto& block : function_) {
+ block_info_[block.id()] = std::make_unique<BlockInfo>(block);
+ }
}
bool FunctionEmitter::TerminatorsAreValid() {
- if (failed()) {
- return false;
- }
-
- const auto entry_id = function_.begin()->id();
- for (const auto& block : function_) {
- if (!block.terminator()) {
- return Fail() << "Block " << block.id() << " has no terminator";
- }
- }
- for (const auto& block : function_) {
- block.WhileEachSuccessorLabel(
- [this, &block, entry_id](const uint32_t succ_id) -> bool {
- if (succ_id == entry_id) {
- return Fail() << "Block " << block.id()
- << " branches to function entry block " << entry_id;
- }
- if (!GetBlockInfo(succ_id)) {
- return Fail() << "Block " << block.id() << " in function "
- << function_.DefInst().result_id() << " branches to "
- << succ_id << " which is not a block in the function";
- }
- return true;
+ if (failed()) {
+ return false;
+ }
+
+ const auto entry_id = function_.begin()->id();
+ for (const auto& block : function_) {
+ if (!block.terminator()) {
+ return Fail() << "Block " << block.id() << " has no terminator";
+ }
+ }
+ for (const auto& block : function_) {
+ block.WhileEachSuccessorLabel([this, &block, entry_id](const uint32_t succ_id) -> bool {
+ if (succ_id == entry_id) {
+ return Fail() << "Block " << block.id() << " branches to function entry block "
+ << entry_id;
+ }
+ if (!GetBlockInfo(succ_id)) {
+ return Fail() << "Block " << block.id() << " in function "
+ << function_.DefInst().result_id() << " branches to " << succ_id
+ << " which is not a block in the function";
+ }
+ return true;
});
- }
- return success();
+ }
+ return success();
}
bool FunctionEmitter::RegisterMerges() {
- if (failed()) {
- return false;
- }
-
- const auto entry_id = function_.begin()->id();
- for (const auto& block : function_) {
- const auto block_id = block.id();
- auto* block_info = GetBlockInfo(block_id);
- if (!block_info) {
- return Fail() << "internal error: block " << block_id
- << " missing; blocks should already "
- "have been registered";
- }
-
- if (const auto* inst = block.GetMergeInst()) {
- auto terminator_opcode = block.terminator()->opcode();
- switch (inst->opcode()) {
- case SpvOpSelectionMerge:
- if ((terminator_opcode != SpvOpBranchConditional) &&
- (terminator_opcode != SpvOpSwitch)) {
- return Fail() << "Selection header " << block_id
- << " does not end in an OpBranchConditional or "
- "OpSwitch instruction";
- }
- break;
- case SpvOpLoopMerge:
- if ((terminator_opcode != SpvOpBranchConditional) &&
- (terminator_opcode != SpvOpBranch)) {
- return Fail() << "Loop header " << block_id
- << " does not end in an OpBranch or "
- "OpBranchConditional instruction";
- }
- break;
- default:
- break;
- }
-
- const uint32_t header = block.id();
- auto* header_info = block_info;
- const uint32_t merge = inst->GetSingleWordInOperand(0);
- auto* merge_info = GetBlockInfo(merge);
- if (!merge_info) {
- return Fail() << "Structured header block " << header
- << " declares invalid merge block " << merge;
- }
- if (merge == header) {
- return Fail() << "Structured header block " << header
- << " cannot be its own merge block";
- }
- if (merge_info->header_for_merge) {
- return Fail() << "Block " << merge
- << " declared as merge block for more than one header: "
- << merge_info->header_for_merge << ", " << header;
- }
- merge_info->header_for_merge = header;
- header_info->merge_for_header = merge;
-
- if (inst->opcode() == SpvOpLoopMerge) {
- if (header == entry_id) {
- return Fail() << "Function entry block " << entry_id
- << " cannot be a loop header";
- }
- const uint32_t ct = inst->GetSingleWordInOperand(1);
- auto* ct_info = GetBlockInfo(ct);
- if (!ct_info) {
- return Fail() << "Structured header " << header
- << " declares invalid continue target " << ct;
- }
- if (ct == merge) {
- return Fail() << "Invalid structured header block " << header
- << ": declares block " << ct
- << " as both its merge block and continue target";
- }
- if (ct_info->header_for_continue) {
- return Fail()
- << "Block " << ct
- << " declared as continue target for more than one header: "
- << ct_info->header_for_continue << ", " << header;
- }
- ct_info->header_for_continue = header;
- header_info->continue_for_header = ct;
- }
- }
-
- // Check single-block loop cases.
- bool is_single_block_loop = false;
- block_info->basic_block->ForEachSuccessorLabel(
- [&is_single_block_loop, block_id](const uint32_t succ) {
- if (block_id == succ)
- is_single_block_loop = true;
- });
- const auto ct = block_info->continue_for_header;
- block_info->is_continue_entire_loop = ct == block_id;
- if (is_single_block_loop && !block_info->is_continue_entire_loop) {
- return Fail() << "Block " << block_id
- << " branches to itself but is not its own continue target";
- }
- // It's valid for a the header of a multi-block loop header to declare
- // itself as its own continue target.
- }
- return success();
+ if (failed()) {
+ return false;
+ }
+
+ const auto entry_id = function_.begin()->id();
+ for (const auto& block : function_) {
+ const auto block_id = block.id();
+ auto* block_info = GetBlockInfo(block_id);
+ if (!block_info) {
+ return Fail() << "internal error: block " << block_id
+ << " missing; blocks should already "
+ "have been registered";
+ }
+
+ if (const auto* inst = block.GetMergeInst()) {
+ auto terminator_opcode = block.terminator()->opcode();
+ switch (inst->opcode()) {
+ case SpvOpSelectionMerge:
+ if ((terminator_opcode != SpvOpBranchConditional) &&
+ (terminator_opcode != SpvOpSwitch)) {
+ return Fail() << "Selection header " << block_id
+ << " does not end in an OpBranchConditional or "
+ "OpSwitch instruction";
+ }
+ break;
+ case SpvOpLoopMerge:
+ if ((terminator_opcode != SpvOpBranchConditional) &&
+ (terminator_opcode != SpvOpBranch)) {
+ return Fail() << "Loop header " << block_id
+ << " does not end in an OpBranch or "
+ "OpBranchConditional instruction";
+ }
+ break;
+ default:
+ break;
+ }
+
+ const uint32_t header = block.id();
+ auto* header_info = block_info;
+ const uint32_t merge = inst->GetSingleWordInOperand(0);
+ auto* merge_info = GetBlockInfo(merge);
+ if (!merge_info) {
+ return Fail() << "Structured header block " << header
+ << " declares invalid merge block " << merge;
+ }
+ if (merge == header) {
+ return Fail() << "Structured header block " << header
+ << " cannot be its own merge block";
+ }
+ if (merge_info->header_for_merge) {
+ return Fail() << "Block " << merge
+ << " declared as merge block for more than one header: "
+ << merge_info->header_for_merge << ", " << header;
+ }
+ merge_info->header_for_merge = header;
+ header_info->merge_for_header = merge;
+
+ if (inst->opcode() == SpvOpLoopMerge) {
+ if (header == entry_id) {
+ return Fail() << "Function entry block " << entry_id
+ << " cannot be a loop header";
+ }
+ const uint32_t ct = inst->GetSingleWordInOperand(1);
+ auto* ct_info = GetBlockInfo(ct);
+ if (!ct_info) {
+ return Fail() << "Structured header " << header
+ << " declares invalid continue target " << ct;
+ }
+ if (ct == merge) {
+ return Fail() << "Invalid structured header block " << header
+ << ": declares block " << ct
+ << " as both its merge block and continue target";
+ }
+ if (ct_info->header_for_continue) {
+ return Fail() << "Block " << ct
+ << " declared as continue target for more than one header: "
+ << ct_info->header_for_continue << ", " << header;
+ }
+ ct_info->header_for_continue = header;
+ header_info->continue_for_header = ct;
+ }
+ }
+
+ // Check single-block loop cases.
+ bool is_single_block_loop = false;
+ block_info->basic_block->ForEachSuccessorLabel(
+ [&is_single_block_loop, block_id](const uint32_t succ) {
+ if (block_id == succ) {
+ is_single_block_loop = true;
+ }
+ });
+ const auto ct = block_info->continue_for_header;
+ block_info->is_continue_entire_loop = ct == block_id;
+ if (is_single_block_loop && !block_info->is_continue_entire_loop) {
+ return Fail() << "Block " << block_id
+ << " branches to itself but is not its own continue target";
+ }
+ // It's valid for a the header of a multi-block loop header to declare
+ // itself as its own continue target.
+ }
+ return success();
}
void FunctionEmitter::ComputeBlockOrderAndPositions() {
- block_order_ = StructuredTraverser(function_).ReverseStructuredPostOrder();
-
- for (uint32_t i = 0; i < block_order_.size(); ++i) {
- GetBlockInfo(block_order_[i])->pos = i;
- }
- // The invalid block position is not the position of any block that is in the
- // order.
- assert(block_order_.size() <= kInvalidBlockPos);
+ block_order_ = StructuredTraverser(function_).ReverseStructuredPostOrder();
+
+ for (uint32_t i = 0; i < block_order_.size(); ++i) {
+ GetBlockInfo(block_order_[i])->pos = i;
+ }
+ // The invalid block position is not the position of any block that is in the
+ // order.
+ assert(block_order_.size() <= kInvalidBlockPos);
}
bool FunctionEmitter::VerifyHeaderContinueMergeOrder() {
- // Verify interval rules for a structured header block:
- //
- // If the CFG satisfies structured control flow rules, then:
- // If header H is reachable, then the following "interval rules" hold,
- // where M(H) is H's merge block, and CT(H) is H's continue target:
- //
- // Pos(H) < Pos(M(H))
- //
- // If CT(H) exists, then:
- // Pos(H) <= Pos(CT(H))
- // Pos(CT(H)) < Pos(M)
- //
- for (auto block_id : block_order_) {
- const auto* block_info = GetBlockInfo(block_id);
- const auto merge = block_info->merge_for_header;
- if (merge == 0) {
- continue;
- }
- // This is a header.
- const auto header = block_id;
- const auto* header_info = block_info;
- const auto header_pos = header_info->pos;
- const auto merge_pos = GetBlockInfo(merge)->pos;
-
- // Pos(H) < Pos(M(H))
- // Note: When recording merges we made sure H != M(H)
- if (merge_pos <= header_pos) {
- return Fail() << "Header " << header
- << " does not strictly dominate its merge block " << merge;
- // TODO(dneto): Report a path from the entry block to the merge block
- // without going through the header block.
- }
-
- const auto ct = block_info->continue_for_header;
- if (ct == 0) {
- continue;
- }
- // Furthermore, this is a loop header.
- const auto* ct_info = GetBlockInfo(ct);
- const auto ct_pos = ct_info->pos;
- // Pos(H) <= Pos(CT(H))
- if (ct_pos < header_pos) {
- Fail() << "Loop header " << header
- << " does not dominate its continue target " << ct;
- }
- // Pos(CT(H)) < Pos(M(H))
- // Note: When recording merges we made sure CT(H) != M(H)
- if (merge_pos <= ct_pos) {
- return Fail() << "Merge block " << merge << " for loop headed at block "
- << header
- << " appears at or before the loop's continue "
- "construct headed by "
- "block "
- << ct;
- }
- }
- return success();
+ // Verify interval rules for a structured header block:
+ //
+ // If the CFG satisfies structured control flow rules, then:
+ // If header H is reachable, then the following "interval rules" hold,
+ // where M(H) is H's merge block, and CT(H) is H's continue target:
+ //
+ // Pos(H) < Pos(M(H))
+ //
+ // If CT(H) exists, then:
+ // Pos(H) <= Pos(CT(H))
+ // Pos(CT(H)) < Pos(M)
+ //
+ for (auto block_id : block_order_) {
+ const auto* block_info = GetBlockInfo(block_id);
+ const auto merge = block_info->merge_for_header;
+ if (merge == 0) {
+ continue;
+ }
+ // This is a header.
+ const auto header = block_id;
+ const auto* header_info = block_info;
+ const auto header_pos = header_info->pos;
+ const auto merge_pos = GetBlockInfo(merge)->pos;
+
+ // Pos(H) < Pos(M(H))
+ // Note: When recording merges we made sure H != M(H)
+ if (merge_pos <= header_pos) {
+ return Fail() << "Header " << header << " does not strictly dominate its merge block "
+ << merge;
+ // TODO(dneto): Report a path from the entry block to the merge block
+ // without going through the header block.
+ }
+
+ const auto ct = block_info->continue_for_header;
+ if (ct == 0) {
+ continue;
+ }
+ // Furthermore, this is a loop header.
+ const auto* ct_info = GetBlockInfo(ct);
+ const auto ct_pos = ct_info->pos;
+ // Pos(H) <= Pos(CT(H))
+ if (ct_pos < header_pos) {
+ Fail() << "Loop header " << header << " does not dominate its continue target " << ct;
+ }
+ // Pos(CT(H)) < Pos(M(H))
+ // Note: When recording merges we made sure CT(H) != M(H)
+ if (merge_pos <= ct_pos) {
+ return Fail() << "Merge block " << merge << " for loop headed at block " << header
+ << " appears at or before the loop's continue "
+ "construct headed by "
+ "block "
+ << ct;
+ }
+ }
+ return success();
}
bool FunctionEmitter::LabelControlFlowConstructs() {
- // Label each block in the block order with its nearest enclosing structured
- // control flow construct. Populates the |construct| member of BlockInfo.
-
- // Keep a stack of enclosing structured control flow constructs. Start
- // with the synthetic construct representing the entire function.
- //
- // Scan from left to right in the block order, and check conditions
- // on each block in the following order:
- //
- // a. When you reach a merge block, the top of the stack should
- // be the associated header. Pop it off.
- // b. When you reach a header, push it on the stack.
- // c. When you reach a continue target, push it on the stack.
- // (A block can be both a header and a continue target.)
- // c. When you reach a block with an edge branching backward (in the
- // structured order) to block T:
- // T should be a loop header, and the top of the stack should be a
- // continue target associated with T.
- // This is the end of the continue construct. Pop the continue
- // target off the stack.
- //
- // Note: A loop header can declare itself as its own continue target.
- //
- // Note: For a single-block loop, that block is a header, its own
- // continue target, and its own backedge block.
- //
- // Note: We pop the merge off first because a merge block that marks
- // the end of one construct can be a single-block loop. So that block
- // is a merge, a header, a continue target, and a backedge block.
- // But we want to finish processing of the merge before dealing with
- // the loop.
- //
- // In the same scan, mark each basic block with the nearest enclosing
- // header: the most recent header for which we haven't reached its merge
- // block. Also mark the the most recent continue target for which we
- // haven't reached the backedge block.
-
- TINT_ASSERT(Reader, block_order_.size() > 0);
- constructs_.clear();
- const auto entry_id = block_order_[0];
-
- // The stack of enclosing constructs.
- std::vector<Construct*> enclosing;
-
- // Creates a control flow construct and pushes it onto the stack.
- // Its parent is the top of the stack, or nullptr if the stack is empty.
- // Returns the newly created construct.
- auto push_construct = [this, &enclosing](size_t depth, Construct::Kind k,
- uint32_t begin_id,
- uint32_t end_id) -> Construct* {
- const auto begin_pos = GetBlockInfo(begin_id)->pos;
- const auto end_pos =
- end_id == 0 ? uint32_t(block_order_.size()) : GetBlockInfo(end_id)->pos;
- const auto* parent = enclosing.empty() ? nullptr : enclosing.back();
- auto scope_end_pos = end_pos;
- // A loop construct is added right after its associated continue construct.
- // In that case, adjust the parent up.
- if (k == Construct::kLoop) {
- TINT_ASSERT(Reader, parent);
- TINT_ASSERT(Reader, parent->kind == Construct::kContinue);
- scope_end_pos = parent->end_pos;
- parent = parent->parent;
- }
- constructs_.push_back(std::make_unique<Construct>(
- parent, static_cast<int>(depth), k, begin_id, end_id, begin_pos,
- end_pos, scope_end_pos));
- Construct* result = constructs_.back().get();
- enclosing.push_back(result);
- return result;
- };
-
- // Make a synthetic kFunction construct to enclose all blocks in the function.
- push_construct(0, Construct::kFunction, entry_id, 0);
- // The entry block can be a selection construct, so be sure to process
- // it anyway.
-
- for (uint32_t i = 0; i < block_order_.size(); ++i) {
- const auto block_id = block_order_[i];
- TINT_ASSERT(Reader, block_id > 0);
- auto* block_info = GetBlockInfo(block_id);
- TINT_ASSERT(Reader, block_info);
-
- if (enclosing.empty()) {
- return Fail() << "internal error: too many merge blocks before block "
- << block_id;
- }
- const Construct* top = enclosing.back();
-
- while (block_id == top->end_id) {
- // We've reached a predeclared end of the construct. Pop it off the
- // stack.
- enclosing.pop_back();
- if (enclosing.empty()) {
- return Fail() << "internal error: too many merge blocks before block "
- << block_id;
- }
- top = enclosing.back();
- }
-
- const auto merge = block_info->merge_for_header;
- if (merge != 0) {
- // The current block is a header.
- const auto header = block_id;
- const auto* header_info = block_info;
- const auto depth = 1 + top->depth;
- const auto ct = header_info->continue_for_header;
- if (ct != 0) {
- // The current block is a loop header.
- // We should see the continue construct after the loop construct, so
- // push the loop construct last.
-
- // From the interval rule, the continue construct consists of blocks
- // in the block order, starting at the continue target, until just
- // before the merge block.
- top = push_construct(depth, Construct::kContinue, ct, merge);
- // A loop header that is its own continue target will have an
- // empty loop construct. Only create a loop construct when
- // the continue target is *not* the same as the loop header.
- if (header != ct) {
- // From the interval rule, the loop construct consists of blocks
- // in the block order, starting at the header, until just
- // before the continue target.
- top = push_construct(depth, Construct::kLoop, header, ct);
-
- // If the loop header branches to two different blocks inside the loop
- // construct, then the loop body should be modeled as an if-selection
- // construct
- std::vector<uint32_t> targets;
- header_info->basic_block->ForEachSuccessorLabel(
- [&targets](const uint32_t target) { targets.push_back(target); });
- if ((targets.size() == 2u) && targets[0] != targets[1]) {
- const auto target0_pos = GetBlockInfo(targets[0])->pos;
- const auto target1_pos = GetBlockInfo(targets[1])->pos;
- if (top->ContainsPos(target0_pos) &&
- top->ContainsPos(target1_pos)) {
- // Insert a synthetic if-selection
- top = push_construct(depth + 1, Construct::kIfSelection, header,
- ct);
+ // Label each block in the block order with its nearest enclosing structured
+ // control flow construct. Populates the |construct| member of BlockInfo.
+
+ // Keep a stack of enclosing structured control flow constructs. Start
+ // with the synthetic construct representing the entire function.
+ //
+ // Scan from left to right in the block order, and check conditions
+ // on each block in the following order:
+ //
+ // a. When you reach a merge block, the top of the stack should
+ // be the associated header. Pop it off.
+ // b. When you reach a header, push it on the stack.
+ // c. When you reach a continue target, push it on the stack.
+ // (A block can be both a header and a continue target.)
+ // c. When you reach a block with an edge branching backward (in the
+ // structured order) to block T:
+ // T should be a loop header, and the top of the stack should be a
+ // continue target associated with T.
+ // This is the end of the continue construct. Pop the continue
+ // target off the stack.
+ //
+ // Note: A loop header can declare itself as its own continue target.
+ //
+ // Note: For a single-block loop, that block is a header, its own
+ // continue target, and its own backedge block.
+ //
+ // Note: We pop the merge off first because a merge block that marks
+ // the end of one construct can be a single-block loop. So that block
+ // is a merge, a header, a continue target, and a backedge block.
+ // But we want to finish processing of the merge before dealing with
+ // the loop.
+ //
+ // In the same scan, mark each basic block with the nearest enclosing
+ // header: the most recent header for which we haven't reached its merge
+ // block. Also mark the the most recent continue target for which we
+ // haven't reached the backedge block.
+
+ TINT_ASSERT(Reader, block_order_.size() > 0);
+ constructs_.clear();
+ const auto entry_id = block_order_[0];
+
+ // The stack of enclosing constructs.
+ std::vector<Construct*> enclosing;
+
+ // Creates a control flow construct and pushes it onto the stack.
+ // Its parent is the top of the stack, or nullptr if the stack is empty.
+ // Returns the newly created construct.
+ auto push_construct = [this, &enclosing](size_t depth, Construct::Kind k, uint32_t begin_id,
+ uint32_t end_id) -> Construct* {
+ const auto begin_pos = GetBlockInfo(begin_id)->pos;
+ const auto end_pos =
+ end_id == 0 ? uint32_t(block_order_.size()) : GetBlockInfo(end_id)->pos;
+ const auto* parent = enclosing.empty() ? nullptr : enclosing.back();
+ auto scope_end_pos = end_pos;
+ // A loop construct is added right after its associated continue construct.
+ // In that case, adjust the parent up.
+ if (k == Construct::kLoop) {
+ TINT_ASSERT(Reader, parent);
+ TINT_ASSERT(Reader, parent->kind == Construct::kContinue);
+ scope_end_pos = parent->end_pos;
+ parent = parent->parent;
+ }
+ constructs_.push_back(std::make_unique<Construct>(parent, static_cast<int>(depth), k,
+ begin_id, end_id, begin_pos, end_pos,
+ scope_end_pos));
+ Construct* result = constructs_.back().get();
+ enclosing.push_back(result);
+ return result;
+ };
+
+ // Make a synthetic kFunction construct to enclose all blocks in the function.
+ push_construct(0, Construct::kFunction, entry_id, 0);
+ // The entry block can be a selection construct, so be sure to process
+ // it anyway.
+
+ for (uint32_t i = 0; i < block_order_.size(); ++i) {
+ const auto block_id = block_order_[i];
+ TINT_ASSERT(Reader, block_id > 0);
+ auto* block_info = GetBlockInfo(block_id);
+ TINT_ASSERT(Reader, block_info);
+
+ if (enclosing.empty()) {
+ return Fail() << "internal error: too many merge blocks before block " << block_id;
+ }
+ const Construct* top = enclosing.back();
+
+ while (block_id == top->end_id) {
+ // We've reached a predeclared end of the construct. Pop it off the
+ // stack.
+ enclosing.pop_back();
+ if (enclosing.empty()) {
+ return Fail() << "internal error: too many merge blocks before block " << block_id;
+ }
+ top = enclosing.back();
+ }
+
+ const auto merge = block_info->merge_for_header;
+ if (merge != 0) {
+ // The current block is a header.
+ const auto header = block_id;
+ const auto* header_info = block_info;
+ const auto depth = 1 + top->depth;
+ const auto ct = header_info->continue_for_header;
+ if (ct != 0) {
+ // The current block is a loop header.
+ // We should see the continue construct after the loop construct, so
+ // push the loop construct last.
+
+ // From the interval rule, the continue construct consists of blocks
+ // in the block order, starting at the continue target, until just
+ // before the merge block.
+ top = push_construct(depth, Construct::kContinue, ct, merge);
+ // A loop header that is its own continue target will have an
+ // empty loop construct. Only create a loop construct when
+ // the continue target is *not* the same as the loop header.
+ if (header != ct) {
+ // From the interval rule, the loop construct consists of blocks
+ // in the block order, starting at the header, until just
+ // before the continue target.
+ top = push_construct(depth, Construct::kLoop, header, ct);
+
+ // If the loop header branches to two different blocks inside the loop
+ // construct, then the loop body should be modeled as an if-selection
+ // construct
+ std::vector<uint32_t> targets;
+ header_info->basic_block->ForEachSuccessorLabel(
+ [&targets](const uint32_t target) { targets.push_back(target); });
+ if ((targets.size() == 2u) && targets[0] != targets[1]) {
+ const auto target0_pos = GetBlockInfo(targets[0])->pos;
+ const auto target1_pos = GetBlockInfo(targets[1])->pos;
+ if (top->ContainsPos(target0_pos) && top->ContainsPos(target1_pos)) {
+ // Insert a synthetic if-selection
+ top = push_construct(depth + 1, Construct::kIfSelection, header, ct);
+ }
+ }
+ }
+ } else {
+ // From the interval rule, the selection construct consists of blocks
+ // in the block order, starting at the header, until just before the
+ // merge block.
+ const auto branch_opcode = header_info->basic_block->terminator()->opcode();
+ const auto kind = (branch_opcode == SpvOpBranchConditional)
+ ? Construct::kIfSelection
+ : Construct::kSwitchSelection;
+ top = push_construct(depth, kind, header, merge);
}
- }
- }
- } else {
- // From the interval rule, the selection construct consists of blocks
- // in the block order, starting at the header, until just before the
- // merge block.
- const auto branch_opcode =
- header_info->basic_block->terminator()->opcode();
- const auto kind = (branch_opcode == SpvOpBranchConditional)
- ? Construct::kIfSelection
- : Construct::kSwitchSelection;
- top = push_construct(depth, kind, header, merge);
- }
- }
-
- TINT_ASSERT(Reader, top);
- block_info->construct = top;
- }
-
- // At the end of the block list, we should only have the kFunction construct
- // left.
- if (enclosing.size() != 1) {
- return Fail() << "internal error: unbalanced structured constructs when "
- "labeling structured constructs: ended with "
- << enclosing.size() - 1 << " unterminated constructs";
- }
- const auto* top = enclosing[0];
- if (top->kind != Construct::kFunction || top->depth != 0) {
- return Fail() << "internal error: outermost construct is not a function?!";
- }
-
- return success();
+ }
+
+ TINT_ASSERT(Reader, top);
+ block_info->construct = top;
+ }
+
+ // At the end of the block list, we should only have the kFunction construct
+ // left.
+ if (enclosing.size() != 1) {
+ return Fail() << "internal error: unbalanced structured constructs when "
+ "labeling structured constructs: ended with "
+ << enclosing.size() - 1 << " unterminated constructs";
+ }
+ const auto* top = enclosing[0];
+ if (top->kind != Construct::kFunction || top->depth != 0) {
+ return Fail() << "internal error: outermost construct is not a function?!";
+ }
+
+ return success();
}
bool FunctionEmitter::FindSwitchCaseHeaders() {
- if (failed()) {
- return false;
- }
- for (auto& construct : constructs_) {
- if (construct->kind != Construct::kSwitchSelection) {
- continue;
+ if (failed()) {
+ return false;
}
- const auto* branch =
- GetBlockInfo(construct->begin_id)->basic_block->terminator();
+ for (auto& construct : constructs_) {
+ if (construct->kind != Construct::kSwitchSelection) {
+ continue;
+ }
+ const auto* branch = GetBlockInfo(construct->begin_id)->basic_block->terminator();
+
+ // Mark the default block
+ const auto default_id = branch->GetSingleWordInOperand(1);
+ auto* default_block = GetBlockInfo(default_id);
+ // A default target can't be a backedge.
+ if (construct->begin_pos >= default_block->pos) {
+ // An OpSwitch must dominate its cases. Also, it can't be a self-loop
+ // as that would be a backedge, and backedges can only target a loop,
+ // and loops use an OpLoopMerge instruction, which can't precede an
+ // OpSwitch.
+ return Fail() << "Switch branch from block " << construct->begin_id
+ << " to default target block " << default_id << " can't be a back-edge";
+ }
+ // A default target can be the merge block, but can't go past it.
+ if (construct->end_pos < default_block->pos) {
+ return Fail() << "Switch branch from block " << construct->begin_id
+ << " to default block " << default_id
+ << " escapes the selection construct";
+ }
+ if (default_block->default_head_for) {
+ // An OpSwitch must dominate its cases, including the default target.
+ return Fail() << "Block " << default_id
+ << " is declared as the default target for two OpSwitch "
+ "instructions, at blocks "
+ << default_block->default_head_for->begin_id << " and "
+ << construct->begin_id;
+ }
+ if ((default_block->header_for_merge != 0) &&
+ (default_block->header_for_merge != construct->begin_id)) {
+ // The switch instruction for this default block is an alternate path to
+ // the merge block, and hence the merge block is not dominated by its own
+ // (different) header.
+ return Fail() << "Block " << default_block->id
+ << " is the default block for switch-selection header "
+ << construct->begin_id << " and also the merge block for "
+ << default_block->header_for_merge << " (violates dominance rule)";
+ }
- // Mark the default block
- const auto default_id = branch->GetSingleWordInOperand(1);
- auto* default_block = GetBlockInfo(default_id);
- // A default target can't be a backedge.
- if (construct->begin_pos >= default_block->pos) {
- // An OpSwitch must dominate its cases. Also, it can't be a self-loop
- // as that would be a backedge, and backedges can only target a loop,
- // and loops use an OpLoopMerge instruction, which can't precede an
- // OpSwitch.
- return Fail() << "Switch branch from block " << construct->begin_id
- << " to default target block " << default_id
- << " can't be a back-edge";
- }
- // A default target can be the merge block, but can't go past it.
- if (construct->end_pos < default_block->pos) {
- return Fail() << "Switch branch from block " << construct->begin_id
- << " to default block " << default_id
- << " escapes the selection construct";
- }
- if (default_block->default_head_for) {
- // An OpSwitch must dominate its cases, including the default target.
- return Fail() << "Block " << default_id
- << " is declared as the default target for two OpSwitch "
- "instructions, at blocks "
- << default_block->default_head_for->begin_id << " and "
- << construct->begin_id;
- }
- if ((default_block->header_for_merge != 0) &&
- (default_block->header_for_merge != construct->begin_id)) {
- // The switch instruction for this default block is an alternate path to
- // the merge block, and hence the merge block is not dominated by its own
- // (different) header.
- return Fail() << "Block " << default_block->id
- << " is the default block for switch-selection header "
- << construct->begin_id << " and also the merge block for "
- << default_block->header_for_merge
- << " (violates dominance rule)";
- }
-
- default_block->default_head_for = construct.get();
- default_block->default_is_merge = default_block->pos == construct->end_pos;
-
- // Map a case target to the list of values selecting that case.
- std::unordered_map<uint32_t, std::vector<uint64_t>> block_to_values;
- std::vector<uint32_t> case_targets;
- std::unordered_set<uint64_t> case_values;
-
- // Process case targets.
- for (uint32_t iarg = 2; iarg + 1 < branch->NumInOperands(); iarg += 2) {
- const auto value = branch->GetInOperand(iarg).AsLiteralUint64();
- const auto case_target_id = branch->GetSingleWordInOperand(iarg + 1);
-
- if (case_values.count(value)) {
- return Fail() << "Duplicate case value " << value
- << " in OpSwitch in block " << construct->begin_id;
- }
- case_values.insert(value);
- if (block_to_values.count(case_target_id) == 0) {
- case_targets.push_back(case_target_id);
- }
- block_to_values[case_target_id].push_back(value);
- }
-
- for (uint32_t case_target_id : case_targets) {
- auto* case_block = GetBlockInfo(case_target_id);
-
- case_block->case_values = std::make_unique<std::vector<uint64_t>>(
- std::move(block_to_values[case_target_id]));
-
- // A case target can't be a back-edge.
- if (construct->begin_pos >= case_block->pos) {
- // An OpSwitch must dominate its cases. Also, it can't be a self-loop
- // as that would be a backedge, and backedges can only target a loop,
- // and loops use an OpLoopMerge instruction, which can't preceded an
- // OpSwitch.
- return Fail() << "Switch branch from block " << construct->begin_id
- << " to case target block " << case_target_id
- << " can't be a back-edge";
- }
- // A case target can be the merge block, but can't go past it.
- if (construct->end_pos < case_block->pos) {
- return Fail() << "Switch branch from block " << construct->begin_id
- << " to case target block " << case_target_id
- << " escapes the selection construct";
- }
- if (case_block->header_for_merge != 0 &&
- case_block->header_for_merge != construct->begin_id) {
- // The switch instruction for this case block is an alternate path to
- // the merge block, and hence the merge block is not dominated by its
- // own (different) header.
- return Fail() << "Block " << case_block->id
- << " is a case block for switch-selection header "
- << construct->begin_id << " and also the merge block for "
- << case_block->header_for_merge
- << " (violates dominance rule)";
- }
-
- // Mark the target as a case target.
- if (case_block->case_head_for) {
- // An OpSwitch must dominate its cases.
- return Fail()
- << "Block " << case_target_id
- << " is declared as the switch case target for two OpSwitch "
- "instructions, at blocks "
- << case_block->case_head_for->begin_id << " and "
- << construct->begin_id;
- }
- case_block->case_head_for = construct.get();
- }
- }
- return success();
-}
+ default_block->default_head_for = construct.get();
+ default_block->default_is_merge = default_block->pos == construct->end_pos;
-BlockInfo* FunctionEmitter::HeaderIfBreakable(const Construct* c) {
- if (c == nullptr) {
- return nullptr;
- }
- switch (c->kind) {
- case Construct::kLoop:
- case Construct::kSwitchSelection:
- return GetBlockInfo(c->begin_id);
- case Construct::kContinue: {
- const auto* continue_target = GetBlockInfo(c->begin_id);
- return GetBlockInfo(continue_target->header_for_continue);
- }
- default:
- break;
- }
- return nullptr;
-}
+ // Map a case target to the list of values selecting that case.
+ std::unordered_map<uint32_t, std::vector<uint64_t>> block_to_values;
+ std::vector<uint32_t> case_targets;
+ std::unordered_set<uint64_t> case_values;
-const Construct* FunctionEmitter::SiblingLoopConstruct(
- const Construct* c) const {
- if (c == nullptr || c->kind != Construct::kContinue) {
- return nullptr;
- }
- const uint32_t continue_target_id = c->begin_id;
- const auto* continue_target = GetBlockInfo(continue_target_id);
- const uint32_t header_id = continue_target->header_for_continue;
- if (continue_target_id == header_id) {
- // The continue target is the whole loop.
- return nullptr;
- }
- const auto* candidate = GetBlockInfo(header_id)->construct;
- // Walk up the construct tree until we hit the loop. In future
- // we might handle the corner case where the same block is both a
- // loop header and a selection header. For example, where the
- // loop header block has a conditional branch going to distinct
- // targets inside the loop body.
- while (candidate && candidate->kind != Construct::kLoop) {
- candidate = candidate->parent;
- }
- return candidate;
-}
+ // Process case targets.
+ for (uint32_t iarg = 2; iarg + 1 < branch->NumInOperands(); iarg += 2) {
+ const auto value = branch->GetInOperand(iarg).AsLiteralUint64();
+ const auto case_target_id = branch->GetSingleWordInOperand(iarg + 1);
-bool FunctionEmitter::ClassifyCFGEdges() {
- if (failed()) {
- return false;
- }
-
- // Checks validity of CFG edges leaving each basic block. This implicitly
- // checks dominance rules for headers and continue constructs.
- //
- // For each branch encountered, classify each edge (S,T) as:
- // - a back-edge
- // - a structured exit (specific ways of branching to enclosing construct)
- // - a normal (forward) edge, either natural control flow or a case
- // fallthrough
- //
- // If more than one block is targeted by a normal edge, then S must be a
- // structured header.
- //
- // Term: NEC(B) is the nearest enclosing construct for B.
- //
- // If edge (S,T) is a normal edge, and NEC(S) != NEC(T), then
- // T is the header block of its NEC(T), and
- // NEC(S) is the parent of NEC(T).
-
- for (const auto src : block_order_) {
- TINT_ASSERT(Reader, src > 0);
- auto* src_info = GetBlockInfo(src);
- TINT_ASSERT(Reader, src_info);
- const auto src_pos = src_info->pos;
- const auto& src_construct = *(src_info->construct);
-
- // Compute the ordered list of unique successors.
- std::vector<uint32_t> successors;
- {
- std::unordered_set<uint32_t> visited;
- src_info->basic_block->ForEachSuccessorLabel(
- [&successors, &visited](const uint32_t succ) {
- if (visited.count(succ) == 0) {
- successors.push_back(succ);
- visited.insert(succ);
+ if (case_values.count(value)) {
+ return Fail() << "Duplicate case value " << value << " in OpSwitch in block "
+ << construct->begin_id;
}
- });
- }
-
- // There should only be one backedge per backedge block.
- uint32_t num_backedges = 0;
-
- // Track destinations for normal forward edges, either kForward
- // or kCaseFallThrough. These count toward the need
- // to have a merge instruction. We also track kIfBreak edges
- // because when used with normal forward edges, we'll need
- // to generate a flow guard variable.
- std::vector<uint32_t> normal_forward_edges;
- std::vector<uint32_t> if_break_edges;
-
- if (successors.empty() && src_construct.enclosing_continue) {
- // Kill and return are not allowed in a continue construct.
- return Fail() << "Invalid function exit at block " << src
- << " from continue construct starting at "
- << src_construct.enclosing_continue->begin_id;
- }
-
- for (const auto dest : successors) {
- const auto* dest_info = GetBlockInfo(dest);
- // We've already checked terminators are valid.
- TINT_ASSERT(Reader, dest_info);
- const auto dest_pos = dest_info->pos;
-
- // Insert the edge kind entry and keep a handle to update
- // its classification.
- EdgeKind& edge_kind = src_info->succ_edge[dest];
-
- if (src_pos >= dest_pos) {
- // This is a backedge.
- edge_kind = EdgeKind::kBack;
- num_backedges++;
- const auto* continue_construct = src_construct.enclosing_continue;
- if (!continue_construct) {
- return Fail() << "Invalid backedge (" << src << "->" << dest
- << "): " << src << " is not in a continue construct";
- }
- if (src_pos != continue_construct->end_pos - 1) {
- return Fail() << "Invalid exit (" << src << "->" << dest
- << ") from continue construct: " << src
- << " is not the last block in the continue construct "
- "starting at "
- << src_construct.begin_id
- << " (violates post-dominance rule)";
- }
- const auto* ct_info = GetBlockInfo(continue_construct->begin_id);
- TINT_ASSERT(Reader, ct_info);
- if (ct_info->header_for_continue != dest) {
- return Fail()
- << "Invalid backedge (" << src << "->" << dest
- << "): does not branch to the corresponding loop header, "
- "expected "
- << ct_info->header_for_continue;
- }
- } else {
- // This is a forward edge.
- // For now, classify it that way, but we might update it.
- edge_kind = EdgeKind::kForward;
-
- // Exit from a continue construct can only be from the last block.
- const auto* continue_construct = src_construct.enclosing_continue;
- if (continue_construct != nullptr) {
- if (continue_construct->ContainsPos(src_pos) &&
- !continue_construct->ContainsPos(dest_pos) &&
- (src_pos != continue_construct->end_pos - 1)) {
- return Fail() << "Invalid exit (" << src << "->" << dest
- << ") from continue construct: " << src
- << " is not the last block in the continue construct "
- "starting at "
- << continue_construct->begin_id
- << " (violates post-dominance rule)";
- }
- }
-
- // Check valid structured exit cases.
-
- if (edge_kind == EdgeKind::kForward) {
- // Check for a 'break' from a loop or from a switch.
- const auto* breakable_header = HeaderIfBreakable(
- src_construct.enclosing_loop_or_continue_or_switch);
- if (breakable_header != nullptr) {
- if (dest == breakable_header->merge_for_header) {
- // It's a break.
- edge_kind = (breakable_header->construct->kind ==
- Construct::kSwitchSelection)
- ? EdgeKind::kSwitchBreak
- : EdgeKind::kLoopBreak;
+ case_values.insert(value);
+ if (block_to_values.count(case_target_id) == 0) {
+ case_targets.push_back(case_target_id);
}
- }
+ block_to_values[case_target_id].push_back(value);
}
- if (edge_kind == EdgeKind::kForward) {
- // Check for a 'continue' from within a loop.
- const auto* loop_header =
- HeaderIfBreakable(src_construct.enclosing_loop);
- if (loop_header != nullptr) {
- if (dest == loop_header->continue_for_header) {
- // It's a continue.
- edge_kind = EdgeKind::kLoopContinue;
+ for (uint32_t case_target_id : case_targets) {
+ auto* case_block = GetBlockInfo(case_target_id);
+
+ case_block->case_values =
+ std::make_unique<std::vector<uint64_t>>(std::move(block_to_values[case_target_id]));
+
+ // A case target can't be a back-edge.
+ if (construct->begin_pos >= case_block->pos) {
+ // An OpSwitch must dominate its cases. Also, it can't be a self-loop
+ // as that would be a backedge, and backedges can only target a loop,
+ // and loops use an OpLoopMerge instruction, which can't preceded an
+ // OpSwitch.
+ return Fail() << "Switch branch from block " << construct->begin_id
+ << " to case target block " << case_target_id
+ << " can't be a back-edge";
}
- }
- }
-
- if (edge_kind == EdgeKind::kForward) {
- const auto& header_info = *GetBlockInfo(src_construct.begin_id);
- if (dest == header_info.merge_for_header) {
- // Branch to construct's merge block. The loop break and
- // switch break cases have already been covered.
- edge_kind = EdgeKind::kIfBreak;
- }
- }
-
- // A forward edge into a case construct that comes from something
- // other than the OpSwitch is actually a fallthrough.
- if (edge_kind == EdgeKind::kForward) {
- const auto* switch_construct =
- (dest_info->case_head_for ? dest_info->case_head_for
- : dest_info->default_head_for);
- if (switch_construct != nullptr) {
- if (src != switch_construct->begin_id) {
- edge_kind = EdgeKind::kCaseFallThrough;
+ // A case target can be the merge block, but can't go past it.
+ if (construct->end_pos < case_block->pos) {
+ return Fail() << "Switch branch from block " << construct->begin_id
+ << " to case target block " << case_target_id
+ << " escapes the selection construct";
}
- }
+ if (case_block->header_for_merge != 0 &&
+ case_block->header_for_merge != construct->begin_id) {
+ // The switch instruction for this case block is an alternate path to
+ // the merge block, and hence the merge block is not dominated by its
+ // own (different) header.
+ return Fail() << "Block " << case_block->id
+ << " is a case block for switch-selection header "
+ << construct->begin_id << " and also the merge block for "
+ << case_block->header_for_merge << " (violates dominance rule)";
+ }
+
+ // Mark the target as a case target.
+ if (case_block->case_head_for) {
+ // An OpSwitch must dominate its cases.
+ return Fail() << "Block " << case_target_id
+ << " is declared as the switch case target for two OpSwitch "
+ "instructions, at blocks "
+ << case_block->case_head_for->begin_id << " and "
+ << construct->begin_id;
+ }
+ case_block->case_head_for = construct.get();
}
+ }
+ return success();
+}
- // The edge-kind has been finalized.
+BlockInfo* FunctionEmitter::HeaderIfBreakable(const Construct* c) {
+ if (c == nullptr) {
+ return nullptr;
+ }
+ switch (c->kind) {
+ case Construct::kLoop:
+ case Construct::kSwitchSelection:
+ return GetBlockInfo(c->begin_id);
+ case Construct::kContinue: {
+ const auto* continue_target = GetBlockInfo(c->begin_id);
+ return GetBlockInfo(continue_target->header_for_continue);
+ }
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+const Construct* FunctionEmitter::SiblingLoopConstruct(const Construct* c) const {
+ if (c == nullptr || c->kind != Construct::kContinue) {
+ return nullptr;
+ }
+ const uint32_t continue_target_id = c->begin_id;
+ const auto* continue_target = GetBlockInfo(continue_target_id);
+ const uint32_t header_id = continue_target->header_for_continue;
+ if (continue_target_id == header_id) {
+ // The continue target is the whole loop.
+ return nullptr;
+ }
+ const auto* candidate = GetBlockInfo(header_id)->construct;
+ // Walk up the construct tree until we hit the loop. In future
+ // we might handle the corner case where the same block is both a
+ // loop header and a selection header. For example, where the
+ // loop header block has a conditional branch going to distinct
+ // targets inside the loop body.
+ while (candidate && candidate->kind != Construct::kLoop) {
+ candidate = candidate->parent;
+ }
+ return candidate;
+}
+
+bool FunctionEmitter::ClassifyCFGEdges() {
+ if (failed()) {
+ return false;
+ }
- if ((edge_kind == EdgeKind::kForward) ||
- (edge_kind == EdgeKind::kCaseFallThrough)) {
- normal_forward_edges.push_back(dest);
+ // Checks validity of CFG edges leaving each basic block. This implicitly
+ // checks dominance rules for headers and continue constructs.
+ //
+ // For each branch encountered, classify each edge (S,T) as:
+ // - a back-edge
+ // - a structured exit (specific ways of branching to enclosing construct)
+ // - a normal (forward) edge, either natural control flow or a case
+ // fallthrough
+ //
+ // If more than one block is targeted by a normal edge, then S must be a
+ // structured header.
+ //
+ // Term: NEC(B) is the nearest enclosing construct for B.
+ //
+ // If edge (S,T) is a normal edge, and NEC(S) != NEC(T), then
+ // T is the header block of its NEC(T), and
+ // NEC(S) is the parent of NEC(T).
+
+ for (const auto src : block_order_) {
+ TINT_ASSERT(Reader, src > 0);
+ auto* src_info = GetBlockInfo(src);
+ TINT_ASSERT(Reader, src_info);
+ const auto src_pos = src_info->pos;
+ const auto& src_construct = *(src_info->construct);
+
+ // Compute the ordered list of unique successors.
+ std::vector<uint32_t> successors;
+ {
+ std::unordered_set<uint32_t> visited;
+ src_info->basic_block->ForEachSuccessorLabel(
+ [&successors, &visited](const uint32_t succ) {
+ if (visited.count(succ) == 0) {
+ successors.push_back(succ);
+ visited.insert(succ);
+ }
+ });
}
- if (edge_kind == EdgeKind::kIfBreak) {
- if_break_edges.push_back(dest);
+
+ // There should only be one backedge per backedge block.
+ uint32_t num_backedges = 0;
+
+ // Track destinations for normal forward edges, either kForward
+ // or kCaseFallThrough. These count toward the need
+ // to have a merge instruction. We also track kIfBreak edges
+ // because when used with normal forward edges, we'll need
+ // to generate a flow guard variable.
+ std::vector<uint32_t> normal_forward_edges;
+ std::vector<uint32_t> if_break_edges;
+
+ if (successors.empty() && src_construct.enclosing_continue) {
+ // Kill and return are not allowed in a continue construct.
+ return Fail() << "Invalid function exit at block " << src
+ << " from continue construct starting at "
+ << src_construct.enclosing_continue->begin_id;
}
- if ((edge_kind == EdgeKind::kForward) ||
- (edge_kind == EdgeKind::kCaseFallThrough)) {
- // Check for an invalid forward exit out of this construct.
- if (dest_info->pos > src_construct.end_pos) {
- // In most cases we're bypassing the merge block for the source
- // construct.
- auto end_block = src_construct.end_id;
- const char* end_block_desc = "merge block";
- if (src_construct.kind == Construct::kLoop) {
- // For a loop construct, we have two valid places to go: the
- // continue target or the merge for the loop header, which is
- // further down.
- const auto loop_merge =
- GetBlockInfo(src_construct.begin_id)->merge_for_header;
- if (dest_info->pos >= GetBlockInfo(loop_merge)->pos) {
- // We're bypassing the loop's merge block.
- end_block = loop_merge;
- } else {
- // We're bypassing the loop's continue target, and going into
- // the middle of the continue construct.
- end_block_desc = "continue target";
- }
+ for (const auto dest : successors) {
+ const auto* dest_info = GetBlockInfo(dest);
+ // We've already checked terminators are valid.
+ TINT_ASSERT(Reader, dest_info);
+ const auto dest_pos = dest_info->pos;
+
+ // Insert the edge kind entry and keep a handle to update
+ // its classification.
+ EdgeKind& edge_kind = src_info->succ_edge[dest];
+
+ if (src_pos >= dest_pos) {
+ // This is a backedge.
+ edge_kind = EdgeKind::kBack;
+ num_backedges++;
+ const auto* continue_construct = src_construct.enclosing_continue;
+ if (!continue_construct) {
+ return Fail() << "Invalid backedge (" << src << "->" << dest << "): " << src
+ << " is not in a continue construct";
+ }
+ if (src_pos != continue_construct->end_pos - 1) {
+ return Fail() << "Invalid exit (" << src << "->" << dest
+ << ") from continue construct: " << src
+ << " is not the last block in the continue construct "
+ "starting at "
+ << src_construct.begin_id << " (violates post-dominance rule)";
+ }
+ const auto* ct_info = GetBlockInfo(continue_construct->begin_id);
+ TINT_ASSERT(Reader, ct_info);
+ if (ct_info->header_for_continue != dest) {
+ return Fail() << "Invalid backedge (" << src << "->" << dest
+ << "): does not branch to the corresponding loop header, "
+ "expected "
+ << ct_info->header_for_continue;
+ }
+ } else {
+ // This is a forward edge.
+ // For now, classify it that way, but we might update it.
+ edge_kind = EdgeKind::kForward;
+
+ // Exit from a continue construct can only be from the last block.
+ const auto* continue_construct = src_construct.enclosing_continue;
+ if (continue_construct != nullptr) {
+ if (continue_construct->ContainsPos(src_pos) &&
+ !continue_construct->ContainsPos(dest_pos) &&
+ (src_pos != continue_construct->end_pos - 1)) {
+ return Fail()
+ << "Invalid exit (" << src << "->" << dest
+ << ") from continue construct: " << src
+ << " is not the last block in the continue construct "
+ "starting at "
+ << continue_construct->begin_id << " (violates post-dominance rule)";
+ }
+ }
+
+ // Check valid structured exit cases.
+
+ if (edge_kind == EdgeKind::kForward) {
+ // Check for a 'break' from a loop or from a switch.
+ const auto* breakable_header =
+ HeaderIfBreakable(src_construct.enclosing_loop_or_continue_or_switch);
+ if (breakable_header != nullptr) {
+ if (dest == breakable_header->merge_for_header) {
+ // It's a break.
+ edge_kind =
+ (breakable_header->construct->kind == Construct::kSwitchSelection)
+ ? EdgeKind::kSwitchBreak
+ : EdgeKind::kLoopBreak;
+ }
+ }
+ }
+
+ if (edge_kind == EdgeKind::kForward) {
+ // Check for a 'continue' from within a loop.
+ const auto* loop_header = HeaderIfBreakable(src_construct.enclosing_loop);
+ if (loop_header != nullptr) {
+ if (dest == loop_header->continue_for_header) {
+ // It's a continue.
+ edge_kind = EdgeKind::kLoopContinue;
+ }
+ }
+ }
+
+ if (edge_kind == EdgeKind::kForward) {
+ const auto& header_info = *GetBlockInfo(src_construct.begin_id);
+ if (dest == header_info.merge_for_header) {
+ // Branch to construct's merge block. The loop break and
+ // switch break cases have already been covered.
+ edge_kind = EdgeKind::kIfBreak;
+ }
+ }
+
+ // A forward edge into a case construct that comes from something
+ // other than the OpSwitch is actually a fallthrough.
+ if (edge_kind == EdgeKind::kForward) {
+ const auto* switch_construct =
+ (dest_info->case_head_for ? dest_info->case_head_for
+ : dest_info->default_head_for);
+ if (switch_construct != nullptr) {
+ if (src != switch_construct->begin_id) {
+ edge_kind = EdgeKind::kCaseFallThrough;
+ }
+ }
+ }
+
+ // The edge-kind has been finalized.
+
+ if ((edge_kind == EdgeKind::kForward) ||
+ (edge_kind == EdgeKind::kCaseFallThrough)) {
+ normal_forward_edges.push_back(dest);
+ }
+ if (edge_kind == EdgeKind::kIfBreak) {
+ if_break_edges.push_back(dest);
+ }
+
+ if ((edge_kind == EdgeKind::kForward) ||
+ (edge_kind == EdgeKind::kCaseFallThrough)) {
+ // Check for an invalid forward exit out of this construct.
+ if (dest_info->pos > src_construct.end_pos) {
+ // In most cases we're bypassing the merge block for the source
+ // construct.
+ auto end_block = src_construct.end_id;
+ const char* end_block_desc = "merge block";
+ if (src_construct.kind == Construct::kLoop) {
+ // For a loop construct, we have two valid places to go: the
+ // continue target or the merge for the loop header, which is
+ // further down.
+ const auto loop_merge =
+ GetBlockInfo(src_construct.begin_id)->merge_for_header;
+ if (dest_info->pos >= GetBlockInfo(loop_merge)->pos) {
+ // We're bypassing the loop's merge block.
+ end_block = loop_merge;
+ } else {
+ // We're bypassing the loop's continue target, and going into
+ // the middle of the continue construct.
+ end_block_desc = "continue target";
+ }
+ }
+ return Fail() << "Branch from block " << src << " to block " << dest
+ << " is an invalid exit from construct starting at block "
+ << src_construct.begin_id << "; branch bypasses "
+ << end_block_desc << " " << end_block;
+ }
+
+ // Check dominance.
+
+ // Look for edges that violate the dominance condition: a branch
+ // from X to Y where:
+ // If Y is in a nearest enclosing continue construct headed by
+ // CT:
+ // Y is not CT, and
+ // In the structured order, X appears before CT order or
+ // after CT's backedge block.
+ // Otherwise, if Y is in a nearest enclosing construct
+ // headed by H:
+ // Y is not H, and
+ // In the structured order, X appears before H or after H's
+ // merge block.
+
+ const auto& dest_construct = *(dest_info->construct);
+ if (dest != dest_construct.begin_id && !dest_construct.ContainsPos(src_pos)) {
+ return Fail()
+ << "Branch from " << src << " to " << dest << " bypasses "
+ << (dest_construct.kind == Construct::kContinue ? "continue target "
+ : "header ")
+ << dest_construct.begin_id << " (dominance rule violated)";
+ }
+ }
+ } // end forward edge
+ } // end successor
+
+ if (num_backedges > 1) {
+ return Fail() << "Block " << src << " has too many backedges: " << num_backedges;
+ }
+ if ((normal_forward_edges.size() > 1) && (src_info->merge_for_header == 0)) {
+ return Fail() << "Control flow diverges at block " << src << " (to "
+ << normal_forward_edges[0] << ", " << normal_forward_edges[1]
+ << ") but it is not a structured header (it has no merge "
+ "instruction)";
+ }
+ if ((normal_forward_edges.size() + if_break_edges.size() > 1) &&
+ (src_info->merge_for_header == 0)) {
+ // There is a branch to the merge of an if-selection combined
+ // with an other normal forward branch. Control within the
+ // if-selection needs to be gated by a flow predicate.
+ for (auto if_break_dest : if_break_edges) {
+ auto* head_info = GetBlockInfo(GetBlockInfo(if_break_dest)->header_for_merge);
+ // Generate a guard name, but only once.
+ if (head_info->flow_guard_name.empty()) {
+ const std::string guard = "guard" + std::to_string(head_info->id);
+ head_info->flow_guard_name = namer_.MakeDerivedName(guard);
+ }
}
- return Fail()
- << "Branch from block " << src << " to block " << dest
- << " is an invalid exit from construct starting at block "
- << src_construct.begin_id << "; branch bypasses "
- << end_block_desc << " " << end_block;
- }
-
- // Check dominance.
-
- // Look for edges that violate the dominance condition: a branch
- // from X to Y where:
- // If Y is in a nearest enclosing continue construct headed by
- // CT:
- // Y is not CT, and
- // In the structured order, X appears before CT order or
- // after CT's backedge block.
- // Otherwise, if Y is in a nearest enclosing construct
- // headed by H:
- // Y is not H, and
- // In the structured order, X appears before H or after H's
- // merge block.
-
- const auto& dest_construct = *(dest_info->construct);
- if (dest != dest_construct.begin_id &&
- !dest_construct.ContainsPos(src_pos)) {
- return Fail() << "Branch from " << src << " to " << dest
- << " bypasses "
- << (dest_construct.kind == Construct::kContinue
- ? "continue target "
- : "header ")
- << dest_construct.begin_id
- << " (dominance rule violated)";
- }
- }
- } // end forward edge
- } // end successor
-
- if (num_backedges > 1) {
- return Fail() << "Block " << src
- << " has too many backedges: " << num_backedges;
- }
- if ((normal_forward_edges.size() > 1) &&
- (src_info->merge_for_header == 0)) {
- return Fail() << "Control flow diverges at block " << src << " (to "
- << normal_forward_edges[0] << ", "
- << normal_forward_edges[1]
- << ") but it is not a structured header (it has no merge "
- "instruction)";
- }
- if ((normal_forward_edges.size() + if_break_edges.size() > 1) &&
- (src_info->merge_for_header == 0)) {
- // There is a branch to the merge of an if-selection combined
- // with an other normal forward branch. Control within the
- // if-selection needs to be gated by a flow predicate.
- for (auto if_break_dest : if_break_edges) {
- auto* head_info =
- GetBlockInfo(GetBlockInfo(if_break_dest)->header_for_merge);
- // Generate a guard name, but only once.
- if (head_info->flow_guard_name.empty()) {
- const std::string guard = "guard" + std::to_string(head_info->id);
- head_info->flow_guard_name = namer_.MakeDerivedName(guard);
- }
- }
- }
- }
-
- return success();
+ }
+ }
+
+ return success();
}
bool FunctionEmitter::FindIfSelectionInternalHeaders() {
- if (failed()) {
- return false;
- }
- for (auto& construct : constructs_) {
- if (construct->kind != Construct::kIfSelection) {
- continue;
- }
- auto* if_header_info = GetBlockInfo(construct->begin_id);
- const auto* branch = if_header_info->basic_block->terminator();
- const auto true_head = branch->GetSingleWordInOperand(1);
- const auto false_head = branch->GetSingleWordInOperand(2);
-
- auto* true_head_info = GetBlockInfo(true_head);
- auto* false_head_info = GetBlockInfo(false_head);
- const auto true_head_pos = true_head_info->pos;
- const auto false_head_pos = false_head_info->pos;
-
- const bool contains_true = construct->ContainsPos(true_head_pos);
- const bool contains_false = construct->ContainsPos(false_head_pos);
-
- // The cases for each edge are:
- // - kBack: invalid because it's an invalid exit from the selection
- // - kSwitchBreak ; record this for later special processing
- // - kLoopBreak ; record this for later special processing
- // - kLoopContinue ; record this for later special processing
- // - kIfBreak; normal case, may require a guard variable.
- // - kFallThrough; invalid exit from the selection
- // - kForward; normal case
-
- if_header_info->true_kind = if_header_info->succ_edge[true_head];
- if_header_info->false_kind = if_header_info->succ_edge[false_head];
- if (contains_true) {
- if_header_info->true_head = true_head;
- }
- if (contains_false) {
- if_header_info->false_head = false_head;
- }
-
- if (contains_true && (true_head_info->header_for_merge != 0) &&
- (true_head_info->header_for_merge != construct->begin_id)) {
- // The OpBranchConditional instruction for the true head block is an
- // alternate path to the merge block of a construct nested inside the
- // selection, and hence the merge block is not dominated by its own
- // (different) header.
- return Fail() << "Block " << true_head
- << " is the true branch for if-selection header "
- << construct->begin_id
- << " and also the merge block for header block "
- << true_head_info->header_for_merge
- << " (violates dominance rule)";
- }
- if (contains_false && (false_head_info->header_for_merge != 0) &&
- (false_head_info->header_for_merge != construct->begin_id)) {
- // The OpBranchConditional instruction for the false head block is an
- // alternate path to the merge block of a construct nested inside the
- // selection, and hence the merge block is not dominated by its own
- // (different) header.
- return Fail() << "Block " << false_head
- << " is the false branch for if-selection header "
- << construct->begin_id
- << " and also the merge block for header block "
- << false_head_info->header_for_merge
- << " (violates dominance rule)";
- }
-
- if (contains_true && contains_false && (true_head_pos != false_head_pos)) {
- // This construct has both a "then" clause and an "else" clause.
- //
- // We have this structure:
- //
- // Option 1:
- //
- // * condbranch
- // * true-head (start of then-clause)
- // ...
- // * end-then-clause
- // * false-head (start of else-clause)
- // ...
- // * end-false-clause
- // * premerge-head
- // ...
- // * selection merge
- //
- // Option 2:
- //
- // * condbranch
- // * true-head (start of then-clause)
- // ...
- // * end-then-clause
- // * false-head (start of else-clause) and also premerge-head
- // ...
- // * end-false-clause
- // * selection merge
- //
- // Option 3:
- //
- // * condbranch
- // * false-head (start of else-clause)
- // ...
- // * end-else-clause
- // * true-head (start of then-clause) and also premerge-head
- // ...
- // * end-then-clause
- // * selection merge
- //
- // The premerge-head exists if there is a kForward branch from the end
- // of the first clause to a block within the surrounding selection.
- // The first clause might be a then-clause or an else-clause.
- const auto second_head = std::max(true_head_pos, false_head_pos);
- const auto end_first_clause_pos = second_head - 1;
- TINT_ASSERT(Reader, end_first_clause_pos < block_order_.size());
- const auto end_first_clause = block_order_[end_first_clause_pos];
- uint32_t premerge_id = 0;
- uint32_t if_break_id = 0;
- for (auto& then_succ_iter : GetBlockInfo(end_first_clause)->succ_edge) {
- const uint32_t dest_id = then_succ_iter.first;
- const auto edge_kind = then_succ_iter.second;
- switch (edge_kind) {
- case EdgeKind::kIfBreak:
- if_break_id = dest_id;
- break;
- case EdgeKind::kForward: {
- if (construct->ContainsPos(GetBlockInfo(dest_id)->pos)) {
- // It's a premerge.
- if (premerge_id != 0) {
- // TODO(dneto): I think this is impossible to trigger at this
- // point in the flow. It would require a merge instruction to
- // get past the check of "at-most-one-forward-edge".
- return Fail()
- << "invalid structure: then-clause headed by block "
- << true_head << " ending at block " << end_first_clause
- << " has two forward edges to within selection"
- << " going to " << premerge_id << " and " << dest_id;
- }
- premerge_id = dest_id;
- auto* dest_block_info = GetBlockInfo(dest_id);
- if_header_info->premerge_head = dest_id;
- if (dest_block_info->header_for_merge != 0) {
- // Premerge has two edges coming into it, from the then-clause
- // and the else-clause. It's also, by construction, not the
- // merge block of the if-selection. So it must not be a merge
- // block itself. The OpBranchConditional instruction for the
- // false head block is an alternate path to the merge block, and
- // hence the merge block is not dominated by its own (different)
- // header.
- return Fail()
- << "Block " << premerge_id << " is the merge block for "
- << dest_block_info->header_for_merge
- << " but has alternate paths reaching it, starting from"
- << " blocks " << true_head << " and " << false_head
- << " which are the true and false branches for the"
- << " if-selection header block " << construct->begin_id
- << " (violates dominance rule)";
- }
+ if (failed()) {
+ return false;
+ }
+ for (auto& construct : constructs_) {
+ if (construct->kind != Construct::kIfSelection) {
+ continue;
+ }
+ auto* if_header_info = GetBlockInfo(construct->begin_id);
+ const auto* branch = if_header_info->basic_block->terminator();
+ const auto true_head = branch->GetSingleWordInOperand(1);
+ const auto false_head = branch->GetSingleWordInOperand(2);
+
+ auto* true_head_info = GetBlockInfo(true_head);
+ auto* false_head_info = GetBlockInfo(false_head);
+ const auto true_head_pos = true_head_info->pos;
+ const auto false_head_pos = false_head_info->pos;
+
+ const bool contains_true = construct->ContainsPos(true_head_pos);
+ const bool contains_false = construct->ContainsPos(false_head_pos);
+
+ // The cases for each edge are:
+ // - kBack: invalid because it's an invalid exit from the selection
+ // - kSwitchBreak ; record this for later special processing
+ // - kLoopBreak ; record this for later special processing
+ // - kLoopContinue ; record this for later special processing
+ // - kIfBreak; normal case, may require a guard variable.
+ // - kFallThrough; invalid exit from the selection
+ // - kForward; normal case
+
+ if_header_info->true_kind = if_header_info->succ_edge[true_head];
+ if_header_info->false_kind = if_header_info->succ_edge[false_head];
+ if (contains_true) {
+ if_header_info->true_head = true_head;
+ }
+ if (contains_false) {
+ if_header_info->false_head = false_head;
+ }
+
+ if (contains_true && (true_head_info->header_for_merge != 0) &&
+ (true_head_info->header_for_merge != construct->begin_id)) {
+ // The OpBranchConditional instruction for the true head block is an
+ // alternate path to the merge block of a construct nested inside the
+ // selection, and hence the merge block is not dominated by its own
+ // (different) header.
+ return Fail() << "Block " << true_head << " is the true branch for if-selection header "
+ << construct->begin_id << " and also the merge block for header block "
+ << true_head_info->header_for_merge << " (violates dominance rule)";
+ }
+ if (contains_false && (false_head_info->header_for_merge != 0) &&
+ (false_head_info->header_for_merge != construct->begin_id)) {
+ // The OpBranchConditional instruction for the false head block is an
+ // alternate path to the merge block of a construct nested inside the
+ // selection, and hence the merge block is not dominated by its own
+ // (different) header.
+ return Fail() << "Block " << false_head
+ << " is the false branch for if-selection header " << construct->begin_id
+ << " and also the merge block for header block "
+ << false_head_info->header_for_merge << " (violates dominance rule)";
+ }
+
+ if (contains_true && contains_false && (true_head_pos != false_head_pos)) {
+ // This construct has both a "then" clause and an "else" clause.
+ //
+ // We have this structure:
+ //
+ // Option 1:
+ //
+ // * condbranch
+ // * true-head (start of then-clause)
+ // ...
+ // * end-then-clause
+ // * false-head (start of else-clause)
+ // ...
+ // * end-false-clause
+ // * premerge-head
+ // ...
+ // * selection merge
+ //
+ // Option 2:
+ //
+ // * condbranch
+ // * true-head (start of then-clause)
+ // ...
+ // * end-then-clause
+ // * false-head (start of else-clause) and also premerge-head
+ // ...
+ // * end-false-clause
+ // * selection merge
+ //
+ // Option 3:
+ //
+ // * condbranch
+ // * false-head (start of else-clause)
+ // ...
+ // * end-else-clause
+ // * true-head (start of then-clause) and also premerge-head
+ // ...
+ // * end-then-clause
+ // * selection merge
+ //
+ // The premerge-head exists if there is a kForward branch from the end
+ // of the first clause to a block within the surrounding selection.
+ // The first clause might be a then-clause or an else-clause.
+ const auto second_head = std::max(true_head_pos, false_head_pos);
+ const auto end_first_clause_pos = second_head - 1;
+ TINT_ASSERT(Reader, end_first_clause_pos < block_order_.size());
+ const auto end_first_clause = block_order_[end_first_clause_pos];
+ uint32_t premerge_id = 0;
+ uint32_t if_break_id = 0;
+ for (auto& then_succ_iter : GetBlockInfo(end_first_clause)->succ_edge) {
+ const uint32_t dest_id = then_succ_iter.first;
+ const auto edge_kind = then_succ_iter.second;
+ switch (edge_kind) {
+ case EdgeKind::kIfBreak:
+ if_break_id = dest_id;
+ break;
+ case EdgeKind::kForward: {
+ if (construct->ContainsPos(GetBlockInfo(dest_id)->pos)) {
+ // It's a premerge.
+ if (premerge_id != 0) {
+ // TODO(dneto): I think this is impossible to trigger at this
+ // point in the flow. It would require a merge instruction to
+ // get past the check of "at-most-one-forward-edge".
+ return Fail()
+ << "invalid structure: then-clause headed by block "
+ << true_head << " ending at block " << end_first_clause
+ << " has two forward edges to within selection"
+ << " going to " << premerge_id << " and " << dest_id;
+ }
+ premerge_id = dest_id;
+ auto* dest_block_info = GetBlockInfo(dest_id);
+ if_header_info->premerge_head = dest_id;
+ if (dest_block_info->header_for_merge != 0) {
+ // Premerge has two edges coming into it, from the then-clause
+ // and the else-clause. It's also, by construction, not the
+ // merge block of the if-selection. So it must not be a merge
+ // block itself. The OpBranchConditional instruction for the
+ // false head block is an alternate path to the merge block, and
+ // hence the merge block is not dominated by its own (different)
+ // header.
+ return Fail()
+ << "Block " << premerge_id << " is the merge block for "
+ << dest_block_info->header_for_merge
+ << " but has alternate paths reaching it, starting from"
+ << " blocks " << true_head << " and " << false_head
+ << " which are the true and false branches for the"
+ << " if-selection header block " << construct->begin_id
+ << " (violates dominance rule)";
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ if (if_break_id != 0 && premerge_id != 0) {
+ return Fail() << "Block " << end_first_clause << " in if-selection headed at block "
+ << construct->begin_id << " branches to both the merge block "
+ << if_break_id << " and also to block " << premerge_id
+ << " later in the selection";
}
- break;
- }
- default:
- break;
}
- }
- if (if_break_id != 0 && premerge_id != 0) {
- return Fail() << "Block " << end_first_clause
- << " in if-selection headed at block "
- << construct->begin_id
- << " branches to both the merge block " << if_break_id
- << " and also to block " << premerge_id
- << " later in the selection";
- }
- }
- }
- return success();
+ }
+ return success();
}
bool FunctionEmitter::EmitFunctionVariables() {
- if (failed()) {
- return false;
- }
- for (auto& inst : *function_.entry()) {
- if (inst.opcode() != SpvOpVariable) {
- continue;
- }
- auto* var_store_type = GetVariableStoreType(inst);
if (failed()) {
- return false;
- }
- const ast::Expression* constructor = nullptr;
- if (inst.NumInOperands() > 1) {
- // SPIR-V initializers are always constants.
- // (OpenCL also allows the ID of an OpVariable, but we don't handle that
- // here.)
- constructor =
- parser_impl_.MakeConstantExpression(inst.GetSingleWordInOperand(1))
- .expr;
- if (!constructor) {
return false;
- }
- }
- auto* var = parser_impl_.MakeVariable(
- inst.result_id(), ast::StorageClass::kNone, var_store_type, false,
- false, constructor, ast::AttributeList{});
- auto* var_decl_stmt = create<ast::VariableDeclStatement>(Source{}, var);
- AddStatement(var_decl_stmt);
- auto* var_type = ty_.Reference(var_store_type, ast::StorageClass::kNone);
- identifier_types_.emplace(inst.result_id(), var_type);
- }
- return success();
+ }
+ for (auto& inst : *function_.entry()) {
+ if (inst.opcode() != SpvOpVariable) {
+ continue;
+ }
+ auto* var_store_type = GetVariableStoreType(inst);
+ if (failed()) {
+ return false;
+ }
+ const ast::Expression* constructor = nullptr;
+ if (inst.NumInOperands() > 1) {
+ // SPIR-V initializers are always constants.
+ // (OpenCL also allows the ID of an OpVariable, but we don't handle that
+ // here.)
+ constructor = parser_impl_.MakeConstantExpression(inst.GetSingleWordInOperand(1)).expr;
+ if (!constructor) {
+ return false;
+ }
+ }
+ auto* var =
+ parser_impl_.MakeVariable(inst.result_id(), ast::StorageClass::kNone, var_store_type,
+ false, false, constructor, ast::AttributeList{});
+ auto* var_decl_stmt = create<ast::VariableDeclStatement>(Source{}, var);
+ AddStatement(var_decl_stmt);
+ auto* var_type = ty_.Reference(var_store_type, ast::StorageClass::kNone);
+ identifier_types_.emplace(inst.result_id(), var_type);
+ }
+ return success();
}
-TypedExpression FunctionEmitter::AddressOfIfNeeded(
- TypedExpression expr,
- const spvtools::opt::Instruction* inst) {
- if (inst && expr) {
- if (auto* spirv_type = type_mgr_->GetType(inst->type_id())) {
- if (expr.type->Is<Reference>() && spirv_type->AsPointer()) {
- return AddressOf(expr);
- }
- }
- }
- return expr;
+TypedExpression FunctionEmitter::AddressOfIfNeeded(TypedExpression expr,
+ const spvtools::opt::Instruction* inst) {
+ if (inst && expr) {
+ if (auto* spirv_type = type_mgr_->GetType(inst->type_id())) {
+ if (expr.type->Is<Reference>() && spirv_type->AsPointer()) {
+ return AddressOf(expr);
+ }
+ }
+ }
+ return expr;
}
TypedExpression FunctionEmitter::MakeExpression(uint32_t id) {
- if (failed()) {
- return {};
- }
- switch (GetSkipReason(id)) {
- case SkipReason::kDontSkip:
- break;
- case SkipReason::kOpaqueObject:
- Fail() << "internal error: unhandled use of opaque object with ID: "
- << id;
- return {};
- case SkipReason::kSinkPointerIntoUse: {
- // Replace the pointer with its source reference expression.
- auto source_expr = GetDefInfo(id)->sink_pointer_source_expr;
- TINT_ASSERT(Reader, source_expr.type->Is<Reference>());
- return source_expr;
- }
- case SkipReason::kPointSizeBuiltinValue: {
- return {ty_.F32(), create<ast::FloatLiteralExpression>(Source{}, 1.0f)};
- }
- case SkipReason::kPointSizeBuiltinPointer:
- Fail() << "unhandled use of a pointer to the PointSize builtin, with ID: "
- << id;
- return {};
- case SkipReason::kSampleMaskInBuiltinPointer:
- Fail()
- << "unhandled use of a pointer to the SampleMask builtin, with ID: "
- << id;
- return {};
- case SkipReason::kSampleMaskOutBuiltinPointer: {
- // The result type is always u32.
- auto name = namer_.Name(sample_mask_out_id);
- return TypedExpression{ty_.U32(),
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name))};
- }
- }
- auto type_it = identifier_types_.find(id);
- if (type_it != identifier_types_.end()) {
- auto name = namer_.Name(id);
- auto* type = type_it->second;
- return TypedExpression{type,
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name))};
- }
- if (parser_impl_.IsScalarSpecConstant(id)) {
- auto name = namer_.Name(id);
- return TypedExpression{
- parser_impl_.ConvertType(def_use_mgr_->GetDef(id)->type_id()),
- create<ast::IdentifierExpression>(Source{},
- builder_.Symbols().Register(name))};
- }
- if (singly_used_values_.count(id)) {
- auto expr = std::move(singly_used_values_[id]);
- singly_used_values_.erase(id);
- return expr;
- }
- const auto* spirv_constant = constant_mgr_->FindDeclaredConstant(id);
- if (spirv_constant) {
- return parser_impl_.MakeConstantExpression(id);
- }
- const auto* inst = def_use_mgr_->GetDef(id);
- if (inst == nullptr) {
- Fail() << "ID " << id << " does not have a defining SPIR-V instruction";
+ if (failed()) {
+ return {};
+ }
+ switch (GetSkipReason(id)) {
+ case SkipReason::kDontSkip:
+ break;
+ case SkipReason::kOpaqueObject:
+ Fail() << "internal error: unhandled use of opaque object with ID: " << id;
+ return {};
+ case SkipReason::kSinkPointerIntoUse: {
+ // Replace the pointer with its source reference expression.
+ auto source_expr = GetDefInfo(id)->sink_pointer_source_expr;
+ TINT_ASSERT(Reader, source_expr.type->Is<Reference>());
+ return source_expr;
+ }
+ case SkipReason::kPointSizeBuiltinValue: {
+ return {ty_.F32(), create<ast::FloatLiteralExpression>(
+ Source{}, 1.0, ast::FloatLiteralExpression::Suffix::kF)};
+ }
+ case SkipReason::kPointSizeBuiltinPointer:
+ Fail() << "unhandled use of a pointer to the PointSize builtin, with ID: " << id;
+ return {};
+ case SkipReason::kSampleMaskInBuiltinPointer:
+ Fail() << "unhandled use of a pointer to the SampleMask builtin, with ID: " << id;
+ return {};
+ case SkipReason::kSampleMaskOutBuiltinPointer: {
+ // The result type is always u32.
+ auto name = namer_.Name(sample_mask_out_id);
+ return TypedExpression{ty_.U32(), create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register(name))};
+ }
+ }
+ auto type_it = identifier_types_.find(id);
+ if (type_it != identifier_types_.end()) {
+ auto name = namer_.Name(id);
+ auto* type = type_it->second;
+ return TypedExpression{
+ type, create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name))};
+ }
+ if (parser_impl_.IsScalarSpecConstant(id)) {
+ auto name = namer_.Name(id);
+ return TypedExpression{
+ parser_impl_.ConvertType(def_use_mgr_->GetDef(id)->type_id()),
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name))};
+ }
+ if (singly_used_values_.count(id)) {
+ auto expr = std::move(singly_used_values_[id]);
+ singly_used_values_.erase(id);
+ return expr;
+ }
+ const auto* spirv_constant = constant_mgr_->FindDeclaredConstant(id);
+ if (spirv_constant) {
+ return parser_impl_.MakeConstantExpression(id);
+ }
+ const auto* inst = def_use_mgr_->GetDef(id);
+ if (inst == nullptr) {
+ Fail() << "ID " << id << " does not have a defining SPIR-V instruction";
+ return {};
+ }
+ switch (inst->opcode()) {
+ case SpvOpVariable: {
+ // This occurs for module-scope variables.
+ auto name = namer_.Name(inst->result_id());
+ return TypedExpression{
+ parser_impl_.ConvertType(inst->type_id(), PtrAs::Ref),
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name))};
+ }
+ case SpvOpUndef:
+ // Substitute a null value for undef.
+ // This case occurs when OpUndef appears at module scope, as if it were
+ // a constant.
+ return parser_impl_.MakeNullExpression(parser_impl_.ConvertType(inst->type_id()));
+
+ default:
+ break;
+ }
+ if (const spvtools::opt::BasicBlock* const bb = ir_context_.get_instr_block(id)) {
+ if (auto* block = GetBlockInfo(bb->id())) {
+ if (block->pos == kInvalidBlockPos) {
+ // The value came from a block not in the block order.
+ // Substitute a null value.
+ return parser_impl_.MakeNullExpression(parser_impl_.ConvertType(inst->type_id()));
+ }
+ }
+ }
+ Fail() << "unhandled expression for ID " << id << "\n" << inst->PrettyPrint();
return {};
- }
- switch (inst->opcode()) {
- case SpvOpVariable: {
- // This occurs for module-scope variables.
- auto name = namer_.Name(inst->result_id());
- return TypedExpression{
- parser_impl_.ConvertType(inst->type_id(), PtrAs::Ref),
- create<ast::IdentifierExpression>(Source{},
- builder_.Symbols().Register(name))};
- }
- case SpvOpUndef:
- // Substitute a null value for undef.
- // This case occurs when OpUndef appears at module scope, as if it were
- // a constant.
- return parser_impl_.MakeNullExpression(
- parser_impl_.ConvertType(inst->type_id()));
-
- default:
- break;
- }
- if (const spvtools::opt::BasicBlock* const bb =
- ir_context_.get_instr_block(id)) {
- if (auto* block = GetBlockInfo(bb->id())) {
- if (block->pos == kInvalidBlockPos) {
- // The value came from a block not in the block order.
- // Substitute a null value.
- return parser_impl_.MakeNullExpression(
- parser_impl_.ConvertType(inst->type_id()));
- }
- }
- }
- Fail() << "unhandled expression for ID " << id << "\n" << inst->PrettyPrint();
- return {};
}
bool FunctionEmitter::EmitFunctionBodyStatements() {
- // Dump the basic blocks in order, grouped by construct.
-
- // We maintain a stack of StatementBlock objects, where new statements
- // are always written to the topmost entry of the stack. By this point in
- // processing, we have already recorded the interesting control flow
- // boundaries in the BlockInfo and associated Construct objects. As we
- // enter a new statement grouping, we push onto the stack, and also schedule
- // the statement block's completion and removal at a future block's ID.
-
- // Upon entry, the statement stack has one entry representing the whole
- // function.
- TINT_ASSERT(Reader, !constructs_.empty());
- Construct* function_construct = constructs_[0].get();
- TINT_ASSERT(Reader, function_construct != nullptr);
- TINT_ASSERT(Reader, function_construct->kind == Construct::kFunction);
- // Make the first entry valid by filling in the construct field, which
- // had not been computed at the time the entry was first created.
- // TODO(dneto): refactor how the first construct is created vs.
- // this statements stack entry is populated.
- TINT_ASSERT(Reader, statements_stack_.size() == 1);
- statements_stack_[0].SetConstruct(function_construct);
-
- for (auto block_id : block_order()) {
- if (!EmitBasicBlock(*GetBlockInfo(block_id))) {
- return false;
- }
- }
- return success();
+ // Dump the basic blocks in order, grouped by construct.
+
+ // We maintain a stack of StatementBlock objects, where new statements
+ // are always written to the topmost entry of the stack. By this point in
+ // processing, we have already recorded the interesting control flow
+ // boundaries in the BlockInfo and associated Construct objects. As we
+ // enter a new statement grouping, we push onto the stack, and also schedule
+ // the statement block's completion and removal at a future block's ID.
+
+ // Upon entry, the statement stack has one entry representing the whole
+ // function.
+ TINT_ASSERT(Reader, !constructs_.empty());
+ Construct* function_construct = constructs_[0].get();
+ TINT_ASSERT(Reader, function_construct != nullptr);
+ TINT_ASSERT(Reader, function_construct->kind == Construct::kFunction);
+ // Make the first entry valid by filling in the construct field, which
+ // had not been computed at the time the entry was first created.
+ // TODO(dneto): refactor how the first construct is created vs.
+ // this statements stack entry is populated.
+ TINT_ASSERT(Reader, statements_stack_.size() == 1);
+ statements_stack_[0].SetConstruct(function_construct);
+
+ for (auto block_id : block_order()) {
+ if (!EmitBasicBlock(*GetBlockInfo(block_id))) {
+ return false;
+ }
+ }
+ return success();
}
bool FunctionEmitter::EmitBasicBlock(const BlockInfo& block_info) {
- // Close off previous constructs.
- while (!statements_stack_.empty() &&
- (statements_stack_.back().GetEndId() == block_info.id)) {
- statements_stack_.back().Finalize(&builder_);
- statements_stack_.pop_back();
- }
- if (statements_stack_.empty()) {
- return Fail() << "internal error: statements stack empty at block "
- << block_info.id;
- }
-
- // Enter new constructs.
-
- std::vector<const Construct*> entering_constructs; // inner most comes first
- {
- auto* here = block_info.construct;
- auto* const top_construct = statements_stack_.back().GetConstruct();
- while (here != top_construct) {
- // Only enter a construct at its header block.
- if (here->begin_id == block_info.id) {
- entering_constructs.push_back(here);
- }
- here = here->parent;
- }
- }
- // What constructs can we have entered?
- // - It can't be kFunction, because there is only one of those, and it was
- // already on the stack at the outermost level.
- // - We have at most one of kSwitchSelection, or kLoop because each of those
- // is headed by a block with a merge instruction (OpLoopMerge for kLoop,
- // and OpSelectionMerge for kSwitchSelection).
- // - When there is a kIfSelection, it can't contain another construct,
- // because both would have to have their own distinct merge instructions
- // and distinct terminators.
- // - A kContinue can contain a kContinue
- // This is possible in Vulkan SPIR-V, but Tint disallows this by the rule
- // that a block can be continue target for at most one header block. See
- // test DISABLED_BlockIsContinueForMoreThanOneHeader. If we generalize this,
- // then by a dominance argument, the inner loop continue target can only be
- // a single-block loop.
- // TODO(dneto): Handle this case.
- // - If a kLoop is on the outside, its terminator is either:
- // - an OpBranch, in which case there is no other construct.
- // - an OpBranchConditional, in which case there is either an kIfSelection
- // (when both branch targets are different and are inside the loop),
- // or no other construct (because the branch targets are the same,
- // or one of them is a break or continue).
- // - All that's left is a kContinue on the outside, and one of
- // kIfSelection, kSwitchSelection, kLoop on the inside.
- //
- // The kContinue can be the parent of the other. For example, a selection
- // starting at the first block of a continue construct.
- //
- // The kContinue can't be the child of the other because either:
- // - The other can't be kLoop because:
- // - If the kLoop is for a different loop then the kContinue, then
- // the kContinue must be its own loop header, and so the same
- // block is two different loops. That's a contradiction.
- // - If the kLoop is for a the same loop, then this is a contradiction
- // because a kContinue and its kLoop have disjoint block sets.
- // - The other construct can't be a selection because:
- // - The kContinue construct is the entire loop, i.e. the continue
- // target is its own loop header block. But then the continue target
- // has an OpLoopMerge instruction, which contradicts this block being
- // a selection header.
- // - The kContinue is in a multi-block loop that is has a non-empty
- // kLoop; and the selection contains the kContinue block but not the
- // loop block. That breaks dominance rules. That is, the continue
- // target is dominated by that loop header, and so gets found by the
- // block traversal on the outside before the selection is found. The
- // selection is inside the outer loop.
- //
- // So we fall into one of the following cases:
- // - We are entering 0 or 1 constructs, or
- // - We are entering 2 constructs, with the outer one being a kContinue or
- // kLoop, the inner one is not a continue.
- if (entering_constructs.size() > 2) {
- return Fail() << "internal error: bad construct nesting found";
- }
- if (entering_constructs.size() == 2) {
- auto inner_kind = entering_constructs[0]->kind;
- auto outer_kind = entering_constructs[1]->kind;
- if (outer_kind != Construct::kContinue && outer_kind != Construct::kLoop) {
- return Fail()
- << "internal error: bad construct nesting. Only a Continue "
- "or a Loop construct can be outer construct on same block. "
- "Got outer kind "
- << int(outer_kind) << " inner kind " << int(inner_kind);
- }
- if (inner_kind == Construct::kContinue) {
- return Fail() << "internal error: unsupported construct nesting: "
- "Continue around Continue";
- }
- if (inner_kind != Construct::kIfSelection &&
- inner_kind != Construct::kSwitchSelection &&
- inner_kind != Construct::kLoop) {
- return Fail() << "internal error: bad construct nesting. Continue around "
- "something other than if, switch, or loop";
- }
- }
-
- // Enter constructs from outermost to innermost.
- // kLoop and kContinue push a new statement-block onto the stack before
- // emitting statements in the block.
- // kIfSelection and kSwitchSelection emit statements in the block and then
- // emit push a new statement-block. Only emit the statements in the block
- // once.
-
- // Have we emitted the statements for this block?
- bool emitted = false;
-
- // When entering an if-selection or switch-selection, we will emit the WGSL
- // construct to cause the divergent branching. But otherwise, we will
- // emit a "normal" block terminator, which occurs at the end of this method.
- bool has_normal_terminator = true;
-
- for (auto iter = entering_constructs.rbegin();
- iter != entering_constructs.rend(); ++iter) {
- const Construct* construct = *iter;
-
- switch (construct->kind) {
- case Construct::kFunction:
- return Fail() << "internal error: nested function construct";
-
- case Construct::kLoop:
- if (!EmitLoopStart(construct)) {
- return false;
- }
- if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
- return false;
- }
- break;
-
- case Construct::kContinue:
- if (block_info.is_continue_entire_loop) {
- if (!EmitLoopStart(construct)) {
- return false;
- }
- if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
- return false;
- }
- } else {
- if (!EmitContinuingStart(construct)) {
- return false;
- }
- }
- break;
+ // Close off previous constructs.
+ while (!statements_stack_.empty() && (statements_stack_.back().GetEndId() == block_info.id)) {
+ statements_stack_.back().Finalize(&builder_);
+ statements_stack_.pop_back();
+ }
+ if (statements_stack_.empty()) {
+ return Fail() << "internal error: statements stack empty at block " << block_info.id;
+ }
+
+ // Enter new constructs.
- case Construct::kIfSelection:
- if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
- return false;
+ std::vector<const Construct*> entering_constructs; // inner most comes first
+ {
+ auto* here = block_info.construct;
+ auto* const top_construct = statements_stack_.back().GetConstruct();
+ while (here != top_construct) {
+ // Only enter a construct at its header block.
+ if (here->begin_id == block_info.id) {
+ entering_constructs.push_back(here);
+ }
+ here = here->parent;
}
- if (!EmitIfStart(block_info)) {
- return false;
+ }
+ // What constructs can we have entered?
+ // - It can't be kFunction, because there is only one of those, and it was
+ // already on the stack at the outermost level.
+ // - We have at most one of kSwitchSelection, or kLoop because each of those
+ // is headed by a block with a merge instruction (OpLoopMerge for kLoop,
+ // and OpSelectionMerge for kSwitchSelection).
+ // - When there is a kIfSelection, it can't contain another construct,
+ // because both would have to have their own distinct merge instructions
+ // and distinct terminators.
+ // - A kContinue can contain a kContinue
+ // This is possible in Vulkan SPIR-V, but Tint disallows this by the rule
+ // that a block can be continue target for at most one header block. See
+ // test DISABLED_BlockIsContinueForMoreThanOneHeader. If we generalize this,
+ // then by a dominance argument, the inner loop continue target can only be
+ // a single-block loop.
+ // TODO(dneto): Handle this case.
+ // - If a kLoop is on the outside, its terminator is either:
+ // - an OpBranch, in which case there is no other construct.
+ // - an OpBranchConditional, in which case there is either an kIfSelection
+ // (when both branch targets are different and are inside the loop),
+ // or no other construct (because the branch targets are the same,
+ // or one of them is a break or continue).
+ // - All that's left is a kContinue on the outside, and one of
+ // kIfSelection, kSwitchSelection, kLoop on the inside.
+ //
+ // The kContinue can be the parent of the other. For example, a selection
+ // starting at the first block of a continue construct.
+ //
+ // The kContinue can't be the child of the other because either:
+ // - The other can't be kLoop because:
+ // - If the kLoop is for a different loop then the kContinue, then
+ // the kContinue must be its own loop header, and so the same
+ // block is two different loops. That's a contradiction.
+ // - If the kLoop is for a the same loop, then this is a contradiction
+ // because a kContinue and its kLoop have disjoint block sets.
+ // - The other construct can't be a selection because:
+ // - The kContinue construct is the entire loop, i.e. the continue
+ // target is its own loop header block. But then the continue target
+ // has an OpLoopMerge instruction, which contradicts this block being
+ // a selection header.
+ // - The kContinue is in a multi-block loop that is has a non-empty
+ // kLoop; and the selection contains the kContinue block but not the
+ // loop block. That breaks dominance rules. That is, the continue
+ // target is dominated by that loop header, and so gets found by the
+ // block traversal on the outside before the selection is found. The
+ // selection is inside the outer loop.
+ //
+ // So we fall into one of the following cases:
+ // - We are entering 0 or 1 constructs, or
+ // - We are entering 2 constructs, with the outer one being a kContinue or
+ // kLoop, the inner one is not a continue.
+ if (entering_constructs.size() > 2) {
+ return Fail() << "internal error: bad construct nesting found";
+ }
+ if (entering_constructs.size() == 2) {
+ auto inner_kind = entering_constructs[0]->kind;
+ auto outer_kind = entering_constructs[1]->kind;
+ if (outer_kind != Construct::kContinue && outer_kind != Construct::kLoop) {
+ return Fail() << "internal error: bad construct nesting. Only a Continue "
+ "or a Loop construct can be outer construct on same block. "
+ "Got outer kind "
+ << int(outer_kind) << " inner kind " << int(inner_kind);
}
- has_normal_terminator = false;
- break;
-
- case Construct::kSwitchSelection:
- if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
- return false;
+ if (inner_kind == Construct::kContinue) {
+ return Fail() << "internal error: unsupported construct nesting: "
+ "Continue around Continue";
}
- if (!EmitSwitchStart(block_info)) {
- return false;
+ if (inner_kind != Construct::kIfSelection && inner_kind != Construct::kSwitchSelection &&
+ inner_kind != Construct::kLoop) {
+ return Fail() << "internal error: bad construct nesting. Continue around "
+ "something other than if, switch, or loop";
}
- has_normal_terminator = false;
- break;
}
- }
- // If we aren't starting or transitioning, then emit the normal
- // statements now.
- if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
- return false;
- }
+ // Enter constructs from outermost to innermost.
+ // kLoop and kContinue push a new statement-block onto the stack before
+ // emitting statements in the block.
+ // kIfSelection and kSwitchSelection emit statements in the block and then
+ // emit push a new statement-block. Only emit the statements in the block
+ // once.
+
+ // Have we emitted the statements for this block?
+ bool emitted = false;
+
+ // When entering an if-selection or switch-selection, we will emit the WGSL
+ // construct to cause the divergent branching. But otherwise, we will
+ // emit a "normal" block terminator, which occurs at the end of this method.
+ bool has_normal_terminator = true;
+
+ for (auto iter = entering_constructs.rbegin(); iter != entering_constructs.rend(); ++iter) {
+ const Construct* construct = *iter;
+
+ switch (construct->kind) {
+ case Construct::kFunction:
+ return Fail() << "internal error: nested function construct";
+
+ case Construct::kLoop:
+ if (!EmitLoopStart(construct)) {
+ return false;
+ }
+ if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
+ return false;
+ }
+ break;
+
+ case Construct::kContinue:
+ if (block_info.is_continue_entire_loop) {
+ if (!EmitLoopStart(construct)) {
+ return false;
+ }
+ if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
+ return false;
+ }
+ } else {
+ if (!EmitContinuingStart(construct)) {
+ return false;
+ }
+ }
+ break;
+
+ case Construct::kIfSelection:
+ if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
+ return false;
+ }
+ if (!EmitIfStart(block_info)) {
+ return false;
+ }
+ has_normal_terminator = false;
+ break;
+
+ case Construct::kSwitchSelection:
+ if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
+ return false;
+ }
+ if (!EmitSwitchStart(block_info)) {
+ return false;
+ }
+ has_normal_terminator = false;
+ break;
+ }
+ }
- if (has_normal_terminator) {
- if (!EmitNormalTerminator(block_info)) {
- return false;
+ // If we aren't starting or transitioning, then emit the normal
+ // statements now.
+ if (!EmitStatementsInBasicBlock(block_info, &emitted)) {
+ return false;
}
- }
- return success();
+
+ if (has_normal_terminator) {
+ if (!EmitNormalTerminator(block_info)) {
+ return false;
+ }
+ }
+ return success();
}
bool FunctionEmitter::EmitIfStart(const BlockInfo& block_info) {
- // The block is the if-header block. So its construct is the if construct.
- auto* construct = block_info.construct;
- TINT_ASSERT(Reader, construct->kind == Construct::kIfSelection);
- TINT_ASSERT(Reader, construct->begin_id == block_info.id);
-
- const uint32_t true_head = block_info.true_head;
- const uint32_t false_head = block_info.false_head;
- const uint32_t premerge_head = block_info.premerge_head;
-
- const std::string guard_name = block_info.flow_guard_name;
- if (!guard_name.empty()) {
- // Declare the guard variable just before the "if", initialized to true.
- auto* guard_var =
- builder_.Var(guard_name, builder_.ty.bool_(), MakeTrue(Source{}));
- auto* guard_decl = create<ast::VariableDeclStatement>(Source{}, guard_var);
- AddStatement(guard_decl);
- }
-
- const auto condition_id =
- block_info.basic_block->terminator()->GetSingleWordInOperand(0);
- auto* cond = MakeExpression(condition_id).expr;
- if (!cond) {
- return false;
- }
- // Generate the code for the condition.
- auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
-
- // Compute the block IDs that should end the then-clause and the else-clause.
-
- // We need to know where the *emitted* selection should end, i.e. the intended
- // merge block id. That should be the current premerge block, if it exists,
- // or otherwise the declared merge block.
- //
- // This is another way to think about it:
- // If there is a premerge, then there are three cases:
- // - premerge_head is different from the true_head and false_head:
- // - Premerge comes last. In effect, move the selection merge up
- // to where the premerge begins.
- // - premerge_head is the same as the false_head
- // - This is really an if-then without an else clause.
- // Move the merge up to where the premerge is.
- // - premerge_head is the same as the true_head
- // - This is really an if-else without an then clause.
- // Emit it as: if (cond) {} else {....}
- // Move the merge up to where the premerge is.
- const uint32_t intended_merge =
- premerge_head ? premerge_head : construct->end_id;
-
- // then-clause:
- // If true_head exists:
- // spans from true head to the earlier of the false head (if it exists)
- // or the selection merge.
- // Otherwise:
- // ends at from the false head (if it exists), otherwise the selection
- // end.
- const uint32_t then_end = false_head ? false_head : intended_merge;
-
- // else-clause:
- // ends at the premerge head (if it exists) or at the selection end.
- const uint32_t else_end = premerge_head ? premerge_head : intended_merge;
-
- const bool true_is_break = (block_info.true_kind == EdgeKind::kSwitchBreak) ||
- (block_info.true_kind == EdgeKind::kLoopBreak);
- const bool false_is_break =
- (block_info.false_kind == EdgeKind::kSwitchBreak) ||
- (block_info.false_kind == EdgeKind::kLoopBreak);
- const bool true_is_continue = block_info.true_kind == EdgeKind::kLoopContinue;
- const bool false_is_continue =
- block_info.false_kind == EdgeKind::kLoopContinue;
-
- // Push statement blocks for the then-clause and the else-clause.
- // But make sure we do it in the right order.
- auto push_else = [this, builder, else_end, construct, false_is_break,
- false_is_continue]() {
- // Push the else clause onto the stack first.
- PushNewStatementBlock(
- construct, else_end, [=](const ast::StatementList& stmts) {
- // Only set the else-clause if there are statements to fill it.
- if (!stmts.empty()) {
- // The "else" consists of the statement list from the top of
- // statements stack, without an elseif condition.
- auto* else_body = create<ast::BlockStatement>(Source{}, stmts);
- builder->else_stmts.emplace_back(
- create<ast::ElseStatement>(Source{}, nullptr, else_body));
- }
+ // The block is the if-header block. So its construct is the if construct.
+ auto* construct = block_info.construct;
+ TINT_ASSERT(Reader, construct->kind == Construct::kIfSelection);
+ TINT_ASSERT(Reader, construct->begin_id == block_info.id);
+
+ const uint32_t true_head = block_info.true_head;
+ const uint32_t false_head = block_info.false_head;
+ const uint32_t premerge_head = block_info.premerge_head;
+
+ const std::string guard_name = block_info.flow_guard_name;
+ if (!guard_name.empty()) {
+ // Declare the guard variable just before the "if", initialized to true.
+ auto* guard_var = builder_.Var(guard_name, builder_.ty.bool_(), MakeTrue(Source{}));
+ auto* guard_decl = create<ast::VariableDeclStatement>(Source{}, guard_var);
+ AddStatement(guard_decl);
+ }
+
+ const auto condition_id = block_info.basic_block->terminator()->GetSingleWordInOperand(0);
+ auto* cond = MakeExpression(condition_id).expr;
+ if (!cond) {
+ return false;
+ }
+ // Generate the code for the condition.
+ auto* builder = AddStatementBuilder<IfStatementBuilder>(cond);
+
+ // Compute the block IDs that should end the then-clause and the else-clause.
+
+ // We need to know where the *emitted* selection should end, i.e. the intended
+ // merge block id. That should be the current premerge block, if it exists,
+ // or otherwise the declared merge block.
+ //
+ // This is another way to think about it:
+ // If there is a premerge, then there are three cases:
+ // - premerge_head is different from the true_head and false_head:
+ // - Premerge comes last. In effect, move the selection merge up
+ // to where the premerge begins.
+ // - premerge_head is the same as the false_head
+ // - This is really an if-then without an else clause.
+ // Move the merge up to where the premerge is.
+ // - premerge_head is the same as the true_head
+ // - This is really an if-else without an then clause.
+ // Emit it as: if (cond) {} else {....}
+ // Move the merge up to where the premerge is.
+ const uint32_t intended_merge = premerge_head ? premerge_head : construct->end_id;
+
+ // then-clause:
+ // If true_head exists:
+ // spans from true head to the earlier of the false head (if it exists)
+ // or the selection merge.
+ // Otherwise:
+ // ends at from the false head (if it exists), otherwise the selection
+ // end.
+ const uint32_t then_end = false_head ? false_head : intended_merge;
+
+ // else-clause:
+ // ends at the premerge head (if it exists) or at the selection end.
+ const uint32_t else_end = premerge_head ? premerge_head : intended_merge;
+
+ const bool true_is_break = (block_info.true_kind == EdgeKind::kSwitchBreak) ||
+ (block_info.true_kind == EdgeKind::kLoopBreak);
+ const bool false_is_break = (block_info.false_kind == EdgeKind::kSwitchBreak) ||
+ (block_info.false_kind == EdgeKind::kLoopBreak);
+ const bool true_is_continue = block_info.true_kind == EdgeKind::kLoopContinue;
+ const bool false_is_continue = block_info.false_kind == EdgeKind::kLoopContinue;
+
+ // Push statement blocks for the then-clause and the else-clause.
+ // But make sure we do it in the right order.
+ auto push_else = [this, builder, else_end, construct, false_is_break, false_is_continue]() {
+ // Push the else clause onto the stack first.
+ PushNewStatementBlock(construct, else_end, [=](const ast::StatementList& stmts) {
+ // Only set the else-clause if there are statements to fill it.
+ if (!stmts.empty()) {
+ // The "else" consists of the statement list from the top of
+ // statements stack, without an "else if" condition.
+ builder->else_stmt = create<ast::BlockStatement>(Source{}, stmts);
+ }
});
- if (false_is_break) {
- AddStatement(create<ast::BreakStatement>(Source{}));
- }
- if (false_is_continue) {
- AddStatement(create<ast::ContinueStatement>(Source{}));
- }
- };
-
- if (!true_is_break && !true_is_continue &&
- (GetBlockInfo(else_end)->pos < GetBlockInfo(then_end)->pos)) {
- // Process the else-clause first. The then-clause will be empty so avoid
- // pushing onto the stack at all.
- push_else();
- } else {
- // Blocks for the then-clause appear before blocks for the else-clause.
- // So push the else-clause handling onto the stack first. The else-clause
- // might be empty, but this works anyway.
-
- // Handle the premerge, if it exists.
- if (premerge_head) {
- // The top of the stack is the statement block that is the parent of the
- // if-statement. Adding statements now will place them after that 'if'.
- if (guard_name.empty()) {
- // We won't have a flow guard for the premerge.
- // Insert a trivial if(true) { ... } around the blocks from the
- // premerge head until the end of the if-selection. This is needed
- // to ensure uniform reconvergence occurs at the end of the if-selection
- // just like in the original SPIR-V.
- PushTrueGuard(construct->end_id);
- } else {
- // Add a flow guard around the blocks in the premerge area.
- PushGuard(guard_name, construct->end_id);
- }
- }
-
- push_else();
- if (true_head && false_head && !guard_name.empty()) {
- // There are non-trivial then and else clauses.
- // We have to guard the start of the else.
- PushGuard(guard_name, else_end);
- }
-
- // Push the then clause onto the stack.
- PushNewStatementBlock(
- construct, then_end, [=](const ast::StatementList& stmts) {
- builder->body = create<ast::BlockStatement>(Source{}, stmts);
+ if (false_is_break) {
+ AddStatement(create<ast::BreakStatement>(Source{}));
+ }
+ if (false_is_continue) {
+ AddStatement(create<ast::ContinueStatement>(Source{}));
+ }
+ };
+
+ if (!true_is_break && !true_is_continue &&
+ (GetBlockInfo(else_end)->pos < GetBlockInfo(then_end)->pos)) {
+ // Process the else-clause first. The then-clause will be empty so avoid
+ // pushing onto the stack at all.
+ push_else();
+ } else {
+ // Blocks for the then-clause appear before blocks for the else-clause.
+ // So push the else-clause handling onto the stack first. The else-clause
+ // might be empty, but this works anyway.
+
+ // Handle the premerge, if it exists.
+ if (premerge_head) {
+ // The top of the stack is the statement block that is the parent of the
+ // if-statement. Adding statements now will place them after that 'if'.
+ if (guard_name.empty()) {
+ // We won't have a flow guard for the premerge.
+ // Insert a trivial if(true) { ... } around the blocks from the
+ // premerge head until the end of the if-selection. This is needed
+ // to ensure uniform reconvergence occurs at the end of the if-selection
+ // just like in the original SPIR-V.
+ PushTrueGuard(construct->end_id);
+ } else {
+ // Add a flow guard around the blocks in the premerge area.
+ PushGuard(guard_name, construct->end_id);
+ }
+ }
+
+ push_else();
+ if (true_head && false_head && !guard_name.empty()) {
+ // There are non-trivial then and else clauses.
+ // We have to guard the start of the else.
+ PushGuard(guard_name, else_end);
+ }
+
+ // Push the then clause onto the stack.
+ PushNewStatementBlock(construct, then_end, [=](const ast::StatementList& stmts) {
+ builder->body = create<ast::BlockStatement>(Source{}, stmts);
});
- if (true_is_break) {
- AddStatement(create<ast::BreakStatement>(Source{}));
- }
- if (true_is_continue) {
- AddStatement(create<ast::ContinueStatement>(Source{}));
+ if (true_is_break) {
+ AddStatement(create<ast::BreakStatement>(Source{}));
+ }
+ if (true_is_continue) {
+ AddStatement(create<ast::ContinueStatement>(Source{}));
+ }
}
- }
- return success();
+ return success();
}
bool FunctionEmitter::EmitSwitchStart(const BlockInfo& block_info) {
- // The block is the if-header block. So its construct is the if construct.
- auto* construct = block_info.construct;
- TINT_ASSERT(Reader, construct->kind == Construct::kSwitchSelection);
- TINT_ASSERT(Reader, construct->begin_id == block_info.id);
- const auto* branch = block_info.basic_block->terminator();
-
- const auto selector_id = branch->GetSingleWordInOperand(0);
- // Generate the code for the selector.
- auto selector = MakeExpression(selector_id);
- if (!selector) {
- return false;
- }
- // First, push the statement block for the entire switch.
- auto* swch = AddStatementBuilder<SwitchStatementBuilder>(selector.expr);
-
- // Grab a pointer to the case list. It will get buried in the statement block
- // stack.
- PushNewStatementBlock(construct, construct->end_id, nullptr);
-
- // We will push statement-blocks onto the stack to gather the statements in
- // the default clause and cases clauses. Determine the list of blocks
- // that start each clause.
- std::vector<const BlockInfo*> clause_heads;
-
- // Collect the case clauses, even if they are just the merge block.
- // First the default clause.
- const auto default_id = branch->GetSingleWordInOperand(1);
- const auto* default_info = GetBlockInfo(default_id);
- clause_heads.push_back(default_info);
- // Now the case clauses.
- for (uint32_t iarg = 2; iarg + 1 < branch->NumInOperands(); iarg += 2) {
- const auto case_target_id = branch->GetSingleWordInOperand(iarg + 1);
- clause_heads.push_back(GetBlockInfo(case_target_id));
- }
-
- std::stable_sort(clause_heads.begin(), clause_heads.end(),
- [](const BlockInfo* lhs, const BlockInfo* rhs) {
- return lhs->pos < rhs->pos;
- });
- // Remove duplicates
- {
- // Use read index r, and write index w.
- // Invariant: w <= r;
- size_t w = 0;
- for (size_t r = 0; r < clause_heads.size(); ++r) {
- if (clause_heads[r] != clause_heads[w]) {
- ++w; // Advance the write cursor.
- }
- clause_heads[w] = clause_heads[r];
- }
- // We know it's not empty because it always has at least a default clause.
- TINT_ASSERT(Reader, !clause_heads.empty());
- clause_heads.resize(w + 1);
- }
-
- // Push them on in reverse order.
- const auto last_clause_index = clause_heads.size() - 1;
- for (size_t i = last_clause_index;; --i) {
- // Create a list of integer literals for the selector values leading to
- // this case clause.
- ast::CaseSelectorList selectors;
- const auto* values_ptr = clause_heads[i]->case_values.get();
- const bool has_selectors = (values_ptr && !values_ptr->empty());
- if (has_selectors) {
- std::vector<uint64_t> values(values_ptr->begin(), values_ptr->end());
- std::stable_sort(values.begin(), values.end());
- for (auto value : values) {
- // The rest of this module can handle up to 64 bit switch values.
- // The Tint AST handles 32-bit values.
- const uint32_t value32 = uint32_t(value & 0xFFFFFFFF);
- if (selector.type->IsUnsignedScalarOrVector()) {
- selectors.emplace_back(
- create<ast::UintLiteralExpression>(Source{}, value32));
- } else {
- selectors.emplace_back(
- create<ast::SintLiteralExpression>(Source{}, value32));
- }
- }
+ // The block is the if-header block. So its construct is the if construct.
+ auto* construct = block_info.construct;
+ TINT_ASSERT(Reader, construct->kind == Construct::kSwitchSelection);
+ TINT_ASSERT(Reader, construct->begin_id == block_info.id);
+ const auto* branch = block_info.basic_block->terminator();
+
+ const auto selector_id = branch->GetSingleWordInOperand(0);
+ // Generate the code for the selector.
+ auto selector = MakeExpression(selector_id);
+ if (!selector) {
+ return false;
}
+ // First, push the statement block for the entire switch.
+ auto* swch = AddStatementBuilder<SwitchStatementBuilder>(selector.expr);
- // Where does this clause end?
- const auto end_id = (i + 1 < clause_heads.size()) ? clause_heads[i + 1]->id
- : construct->end_id;
+ // Grab a pointer to the case list. It will get buried in the statement block
+ // stack.
+ PushNewStatementBlock(construct, construct->end_id, nullptr);
- // Reserve the case clause slot in swch->cases, push the new statement block
- // for the case, and fill the case clause once the block is generated.
- auto case_idx = swch->cases.size();
- swch->cases.emplace_back(nullptr);
- PushNewStatementBlock(
- construct, end_id, [=](const ast::StatementList& stmts) {
- auto* body = create<ast::BlockStatement>(Source{}, stmts);
- swch->cases[case_idx] =
- create<ast::CaseStatement>(Source{}, selectors, body);
- });
+ // We will push statement-blocks onto the stack to gather the statements in
+ // the default clause and cases clauses. Determine the list of blocks
+ // that start each clause.
+ std::vector<const BlockInfo*> clause_heads;
- if ((default_info == clause_heads[i]) && has_selectors &&
- construct->ContainsPos(default_info->pos)) {
- // Generate a default clause with a just fallthrough.
- auto* stmts = create<ast::BlockStatement>(
- Source{}, ast::StatementList{
- create<ast::FallthroughStatement>(Source{}),
- });
- auto* case_stmt =
- create<ast::CaseStatement>(Source{}, ast::CaseSelectorList{}, stmts);
- swch->cases.emplace_back(case_stmt);
+ // Collect the case clauses, even if they are just the merge block.
+ // First the default clause.
+ const auto default_id = branch->GetSingleWordInOperand(1);
+ const auto* default_info = GetBlockInfo(default_id);
+ clause_heads.push_back(default_info);
+ // Now the case clauses.
+ for (uint32_t iarg = 2; iarg + 1 < branch->NumInOperands(); iarg += 2) {
+ const auto case_target_id = branch->GetSingleWordInOperand(iarg + 1);
+ clause_heads.push_back(GetBlockInfo(case_target_id));
}
- if (i == 0) {
- break;
+ std::stable_sort(
+ clause_heads.begin(), clause_heads.end(),
+ [](const BlockInfo* lhs, const BlockInfo* rhs) { return lhs->pos < rhs->pos; });
+ // Remove duplicates
+ {
+ // Use read index r, and write index w.
+ // Invariant: w <= r;
+ size_t w = 0;
+ for (size_t r = 0; r < clause_heads.size(); ++r) {
+ if (clause_heads[r] != clause_heads[w]) {
+ ++w; // Advance the write cursor.
+ }
+ clause_heads[w] = clause_heads[r];
+ }
+ // We know it's not empty because it always has at least a default clause.
+ TINT_ASSERT(Reader, !clause_heads.empty());
+ clause_heads.resize(w + 1);
+ }
+
+ // Push them on in reverse order.
+ const auto last_clause_index = clause_heads.size() - 1;
+ for (size_t i = last_clause_index;; --i) {
+ // Create a list of integer literals for the selector values leading to
+ // this case clause.
+ ast::CaseSelectorList selectors;
+ const auto* values_ptr = clause_heads[i]->case_values.get();
+ const bool has_selectors = (values_ptr && !values_ptr->empty());
+ if (has_selectors) {
+ std::vector<uint64_t> values(values_ptr->begin(), values_ptr->end());
+ std::stable_sort(values.begin(), values.end());
+ for (auto value : values) {
+ // The rest of this module can handle up to 64 bit switch values.
+ // The Tint AST handles 32-bit values.
+ const uint32_t value32 = uint32_t(value & 0xFFFFFFFF);
+ if (selector.type->IsUnsignedScalarOrVector()) {
+ selectors.emplace_back(create<ast::IntLiteralExpression>(
+ Source{}, value32, ast::IntLiteralExpression::Suffix::kU));
+ } else {
+ selectors.emplace_back(
+ create<ast::IntLiteralExpression>(Source{}, static_cast<int32_t>(value32),
+ ast::IntLiteralExpression::Suffix::kI));
+ }
+ }
+ }
+
+ // Where does this clause end?
+ const auto end_id =
+ (i + 1 < clause_heads.size()) ? clause_heads[i + 1]->id : construct->end_id;
+
+ // Reserve the case clause slot in swch->cases, push the new statement block
+ // for the case, and fill the case clause once the block is generated.
+ auto case_idx = swch->cases.size();
+ swch->cases.emplace_back(nullptr);
+ PushNewStatementBlock(construct, end_id, [=](const ast::StatementList& stmts) {
+ auto* body = create<ast::BlockStatement>(Source{}, stmts);
+ swch->cases[case_idx] = create<ast::CaseStatement>(Source{}, selectors, body);
+ });
+
+ if ((default_info == clause_heads[i]) && has_selectors &&
+ construct->ContainsPos(default_info->pos)) {
+ // Generate a default clause with a just fallthrough.
+ auto* stmts = create<ast::BlockStatement>(
+ Source{}, ast::StatementList{
+ create<ast::FallthroughStatement>(Source{}),
+ });
+ auto* case_stmt = create<ast::CaseStatement>(Source{}, ast::CaseSelectorList{}, stmts);
+ swch->cases.emplace_back(case_stmt);
+ }
+
+ if (i == 0) {
+ break;
+ }
}
- }
- return success();
+ return success();
}
bool FunctionEmitter::EmitLoopStart(const Construct* construct) {
- auto* builder = AddStatementBuilder<LoopStatementBuilder>();
- PushNewStatementBlock(
- construct, construct->end_id, [=](const ast::StatementList& stmts) {
+ auto* builder = AddStatementBuilder<LoopStatementBuilder>();
+ PushNewStatementBlock(construct, construct->end_id, [=](const ast::StatementList& stmts) {
builder->body = create<ast::BlockStatement>(Source{}, stmts);
- });
- return success();
+ });
+ return success();
}
bool FunctionEmitter::EmitContinuingStart(const Construct* construct) {
- // A continue construct has the same depth as its associated loop
- // construct. Start a continue construct.
- auto* loop_candidate = LastStatement();
- auto* loop = loop_candidate->As<LoopStatementBuilder>();
- if (loop == nullptr) {
- return Fail() << "internal error: starting continue construct, "
- "expected loop on top of stack";
- }
- PushNewStatementBlock(
- construct, construct->end_id, [=](const ast::StatementList& stmts) {
+ // A continue construct has the same depth as its associated loop
+ // construct. Start a continue construct.
+ auto* loop_candidate = LastStatement();
+ auto* loop = loop_candidate->As<LoopStatementBuilder>();
+ if (loop == nullptr) {
+ return Fail() << "internal error: starting continue construct, "
+ "expected loop on top of stack";
+ }
+ PushNewStatementBlock(construct, construct->end_id, [=](const ast::StatementList& stmts) {
loop->continuing = create<ast::BlockStatement>(Source{}, stmts);
- });
+ });
- return success();
+ return success();
}
bool FunctionEmitter::EmitNormalTerminator(const BlockInfo& block_info) {
- const auto& terminator = *(block_info.basic_block->terminator());
- switch (terminator.opcode()) {
- case SpvOpReturn:
- AddStatement(create<ast::ReturnStatement>(Source{}));
- return true;
- case SpvOpReturnValue: {
- auto value = MakeExpression(terminator.GetSingleWordInOperand(0));
- if (!value) {
- return false;
- }
- AddStatement(create<ast::ReturnStatement>(Source{}, value.expr));
- }
- return true;
- case SpvOpKill:
- // For now, assume SPIR-V OpKill has same semantics as WGSL discard.
- // TODO(dneto): https://github.com/gpuweb/gpuweb/issues/676
- AddStatement(create<ast::DiscardStatement>(Source{}));
- return true;
- case SpvOpUnreachable:
- // Translate as if it's a return. This avoids the problem where WGSL
- // requires a return statement at the end of the function body.
- {
- const auto* result_type = type_mgr_->GetType(function_.type_id());
- if (result_type->AsVoid() != nullptr) {
- AddStatement(create<ast::ReturnStatement>(Source{}));
- } else {
- auto* ast_type = parser_impl_.ConvertType(function_.type_id());
- AddStatement(create<ast::ReturnStatement>(
- Source{}, parser_impl_.MakeNullValue(ast_type)));
- }
- }
- return true;
- case SpvOpBranch: {
- const auto dest_id = terminator.GetSingleWordInOperand(0);
- AddStatement(MakeBranch(block_info, *GetBlockInfo(dest_id)));
- return true;
- }
- case SpvOpBranchConditional: {
- // If both destinations are the same, then do the same as we would
- // for an unconditional branch (OpBranch).
- const auto true_dest = terminator.GetSingleWordInOperand(1);
- const auto false_dest = terminator.GetSingleWordInOperand(2);
- if (true_dest == false_dest) {
- // This is like an unconditional branch.
- AddStatement(MakeBranch(block_info, *GetBlockInfo(true_dest)));
- return true;
- }
-
- const EdgeKind true_kind = block_info.succ_edge.find(true_dest)->second;
- const EdgeKind false_kind = block_info.succ_edge.find(false_dest)->second;
- auto* const true_info = GetBlockInfo(true_dest);
- auto* const false_info = GetBlockInfo(false_dest);
- auto* cond = MakeExpression(terminator.GetSingleWordInOperand(0)).expr;
- if (!cond) {
- return false;
- }
-
- // We have two distinct destinations. But we only get here if this
- // is a normal terminator; in particular the source block is *not* the
- // start of an if-selection or a switch-selection. So at most one branch
- // is a kForward, kCaseFallThrough, or kIfBreak.
-
- // The fallthrough case is special because WGSL requires the fallthrough
- // statement to be last in the case clause.
- if (true_kind == EdgeKind::kCaseFallThrough) {
- return EmitConditionalCaseFallThrough(block_info, cond, false_kind,
- *false_info, true);
- } else if (false_kind == EdgeKind::kCaseFallThrough) {
- return EmitConditionalCaseFallThrough(block_info, cond, true_kind,
- *true_info, false);
- }
-
- // At this point, at most one edge is kForward or kIfBreak.
-
- // Emit an 'if' statement to express the *other* branch as a conditional
- // break or continue. Either or both of these could be nullptr.
- // (A nullptr is generated for kIfBreak, kForward, or kBack.)
- // Also if one of the branches is an if-break out of an if-selection
- // requiring a flow guard, then get that flow guard name too. It will
- // come from at most one of these two branches.
- std::string flow_guard;
- auto* true_branch =
- MakeBranchDetailed(block_info, *true_info, false, &flow_guard);
- auto* false_branch =
- MakeBranchDetailed(block_info, *false_info, false, &flow_guard);
-
- AddStatement(MakeSimpleIf(cond, true_branch, false_branch));
- if (!flow_guard.empty()) {
- PushGuard(flow_guard, statements_stack_.back().GetEndId());
- }
- return true;
- }
- case SpvOpSwitch:
- // An OpSelectionMerge must precede an OpSwitch. That is clarified
- // in the resolution to Khronos-internal SPIR-V issue 115.
- // A new enough version of the SPIR-V validator checks this case.
- // But issue an error in this case, as a defensive measure.
- return Fail() << "invalid structured control flow: found an OpSwitch "
- "that is not preceded by an "
- "OpSelectionMerge: "
- << terminator.PrettyPrint();
- default:
- break;
- }
- return success();
-}
+ const auto& terminator = *(block_info.basic_block->terminator());
+ switch (terminator.opcode()) {
+ case SpvOpReturn:
+ AddStatement(create<ast::ReturnStatement>(Source{}));
+ return true;
+ case SpvOpReturnValue: {
+ auto value = MakeExpression(terminator.GetSingleWordInOperand(0));
+ if (!value) {
+ return false;
+ }
+ AddStatement(create<ast::ReturnStatement>(Source{}, value.expr));
+ }
+ return true;
+ case SpvOpKill:
+ // For now, assume SPIR-V OpKill has same semantics as WGSL discard.
+ // TODO(dneto): https://github.com/gpuweb/gpuweb/issues/676
+ AddStatement(create<ast::DiscardStatement>(Source{}));
+ return true;
+ case SpvOpUnreachable:
+ // Translate as if it's a return. This avoids the problem where WGSL
+ // requires a return statement at the end of the function body.
+ {
+ const auto* result_type = type_mgr_->GetType(function_.type_id());
+ if (result_type->AsVoid() != nullptr) {
+ AddStatement(create<ast::ReturnStatement>(Source{}));
+ } else {
+ auto* ast_type = parser_impl_.ConvertType(function_.type_id());
+ AddStatement(create<ast::ReturnStatement>(
+ Source{}, parser_impl_.MakeNullValue(ast_type)));
+ }
+ }
+ return true;
+ case SpvOpBranch: {
+ const auto dest_id = terminator.GetSingleWordInOperand(0);
+ AddStatement(MakeBranch(block_info, *GetBlockInfo(dest_id)));
+ return true;
+ }
+ case SpvOpBranchConditional: {
+ // If both destinations are the same, then do the same as we would
+ // for an unconditional branch (OpBranch).
+ const auto true_dest = terminator.GetSingleWordInOperand(1);
+ const auto false_dest = terminator.GetSingleWordInOperand(2);
+ if (true_dest == false_dest) {
+ // This is like an unconditional branch.
+ AddStatement(MakeBranch(block_info, *GetBlockInfo(true_dest)));
+ return true;
+ }
-const ast::Statement* FunctionEmitter::MakeBranchDetailed(
- const BlockInfo& src_info,
- const BlockInfo& dest_info,
- bool forced,
- std::string* flow_guard_name_ptr) const {
- auto kind = src_info.succ_edge.find(dest_info.id)->second;
- switch (kind) {
- case EdgeKind::kBack:
- // Nothing to do. The loop backedge is implicit.
- break;
- case EdgeKind::kSwitchBreak: {
- if (forced) {
- return create<ast::BreakStatement>(Source{});
- }
- // Unless forced, don't bother with a break at the end of a case/default
- // clause.
- const auto header = dest_info.header_for_merge;
- TINT_ASSERT(Reader, header != 0);
- const auto* exiting_construct = GetBlockInfo(header)->construct;
- TINT_ASSERT(Reader,
- exiting_construct->kind == Construct::kSwitchSelection);
- const auto candidate_next_case_pos = src_info.pos + 1;
- // Leaving the last block from the last case?
- if (candidate_next_case_pos == dest_info.pos) {
- // No break needed.
- return nullptr;
- }
- // Leaving the last block from not-the-last-case?
- if (exiting_construct->ContainsPos(candidate_next_case_pos)) {
- const auto* candidate_next_case =
- GetBlockInfo(block_order_[candidate_next_case_pos]);
- if (candidate_next_case->case_head_for == exiting_construct ||
- candidate_next_case->default_head_for == exiting_construct) {
- // No break needed.
- return nullptr;
- }
- }
- // We need a break.
- return create<ast::BreakStatement>(Source{});
- }
- case EdgeKind::kLoopBreak:
- return create<ast::BreakStatement>(Source{});
- case EdgeKind::kLoopContinue:
- // An unconditional continue to the next block is redundant and ugly.
- // Skip it in that case.
- if (dest_info.pos == 1 + src_info.pos) {
- break;
- }
- // Otherwise, emit a regular continue statement.
- return create<ast::ContinueStatement>(Source{});
- case EdgeKind::kIfBreak: {
- const auto& flow_guard =
- GetBlockInfo(dest_info.header_for_merge)->flow_guard_name;
- if (!flow_guard.empty()) {
- if (flow_guard_name_ptr != nullptr) {
- *flow_guard_name_ptr = flow_guard;
- }
- // Signal an exit from the branch.
- return create<ast::AssignmentStatement>(
- Source{},
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(flow_guard)),
- MakeFalse(Source{}));
- }
-
- // For an unconditional branch, the break out to an if-selection
- // merge block is implicit.
- break;
- }
- case EdgeKind::kCaseFallThrough:
- return create<ast::FallthroughStatement>(Source{});
- case EdgeKind::kForward:
- // Unconditional forward branch is implicit.
- break;
- }
- return nullptr;
+ const EdgeKind true_kind = block_info.succ_edge.find(true_dest)->second;
+ const EdgeKind false_kind = block_info.succ_edge.find(false_dest)->second;
+ auto* const true_info = GetBlockInfo(true_dest);
+ auto* const false_info = GetBlockInfo(false_dest);
+ auto* cond = MakeExpression(terminator.GetSingleWordInOperand(0)).expr;
+ if (!cond) {
+ return false;
+ }
+
+ // We have two distinct destinations. But we only get here if this
+ // is a normal terminator; in particular the source block is *not* the
+ // start of an if-selection or a switch-selection. So at most one branch
+ // is a kForward, kCaseFallThrough, or kIfBreak.
+
+ // The fallthrough case is special because WGSL requires the fallthrough
+ // statement to be last in the case clause.
+ if (true_kind == EdgeKind::kCaseFallThrough) {
+ return EmitConditionalCaseFallThrough(block_info, cond, false_kind, *false_info,
+ true);
+ } else if (false_kind == EdgeKind::kCaseFallThrough) {
+ return EmitConditionalCaseFallThrough(block_info, cond, true_kind, *true_info,
+ false);
+ }
+
+ // At this point, at most one edge is kForward or kIfBreak.
+
+ // Emit an 'if' statement to express the *other* branch as a conditional
+ // break or continue. Either or both of these could be nullptr.
+ // (A nullptr is generated for kIfBreak, kForward, or kBack.)
+ // Also if one of the branches is an if-break out of an if-selection
+ // requiring a flow guard, then get that flow guard name too. It will
+ // come from at most one of these two branches.
+ std::string flow_guard;
+ auto* true_branch = MakeBranchDetailed(block_info, *true_info, false, &flow_guard);
+ auto* false_branch = MakeBranchDetailed(block_info, *false_info, false, &flow_guard);
+
+ AddStatement(MakeSimpleIf(cond, true_branch, false_branch));
+ if (!flow_guard.empty()) {
+ PushGuard(flow_guard, statements_stack_.back().GetEndId());
+ }
+ return true;
+ }
+ case SpvOpSwitch:
+ // An OpSelectionMerge must precede an OpSwitch. That is clarified
+ // in the resolution to Khronos-internal SPIR-V issue 115.
+ // A new enough version of the SPIR-V validator checks this case.
+ // But issue an error in this case, as a defensive measure.
+ return Fail() << "invalid structured control flow: found an OpSwitch "
+ "that is not preceded by an "
+ "OpSelectionMerge: "
+ << terminator.PrettyPrint();
+ default:
+ break;
+ }
+ return success();
}
-const ast::Statement* FunctionEmitter::MakeSimpleIf(
- const ast::Expression* condition,
- const ast::Statement* then_stmt,
- const ast::Statement* else_stmt) const {
- if ((then_stmt == nullptr) && (else_stmt == nullptr)) {
+const ast::Statement* FunctionEmitter::MakeBranchDetailed(const BlockInfo& src_info,
+ const BlockInfo& dest_info,
+ bool forced,
+ std::string* flow_guard_name_ptr) const {
+ auto kind = src_info.succ_edge.find(dest_info.id)->second;
+ switch (kind) {
+ case EdgeKind::kBack:
+ // Nothing to do. The loop backedge is implicit.
+ break;
+ case EdgeKind::kSwitchBreak: {
+ if (forced) {
+ return create<ast::BreakStatement>(Source{});
+ }
+ // Unless forced, don't bother with a break at the end of a case/default
+ // clause.
+ const auto header = dest_info.header_for_merge;
+ TINT_ASSERT(Reader, header != 0);
+ const auto* exiting_construct = GetBlockInfo(header)->construct;
+ TINT_ASSERT(Reader, exiting_construct->kind == Construct::kSwitchSelection);
+ const auto candidate_next_case_pos = src_info.pos + 1;
+ // Leaving the last block from the last case?
+ if (candidate_next_case_pos == dest_info.pos) {
+ // No break needed.
+ return nullptr;
+ }
+ // Leaving the last block from not-the-last-case?
+ if (exiting_construct->ContainsPos(candidate_next_case_pos)) {
+ const auto* candidate_next_case =
+ GetBlockInfo(block_order_[candidate_next_case_pos]);
+ if (candidate_next_case->case_head_for == exiting_construct ||
+ candidate_next_case->default_head_for == exiting_construct) {
+ // No break needed.
+ return nullptr;
+ }
+ }
+ // We need a break.
+ return create<ast::BreakStatement>(Source{});
+ }
+ case EdgeKind::kLoopBreak:
+ return create<ast::BreakStatement>(Source{});
+ case EdgeKind::kLoopContinue:
+ // An unconditional continue to the next block is redundant and ugly.
+ // Skip it in that case.
+ if (dest_info.pos == 1 + src_info.pos) {
+ break;
+ }
+ // Otherwise, emit a regular continue statement.
+ return create<ast::ContinueStatement>(Source{});
+ case EdgeKind::kIfBreak: {
+ const auto& flow_guard = GetBlockInfo(dest_info.header_for_merge)->flow_guard_name;
+ if (!flow_guard.empty()) {
+ if (flow_guard_name_ptr != nullptr) {
+ *flow_guard_name_ptr = flow_guard;
+ }
+ // Signal an exit from the branch.
+ return create<ast::AssignmentStatement>(
+ Source{},
+ create<ast::IdentifierExpression>(Source{},
+ builder_.Symbols().Register(flow_guard)),
+ MakeFalse(Source{}));
+ }
+
+ // For an unconditional branch, the break out to an if-selection
+ // merge block is implicit.
+ break;
+ }
+ case EdgeKind::kCaseFallThrough:
+ return create<ast::FallthroughStatement>(Source{});
+ case EdgeKind::kForward:
+ // Unconditional forward branch is implicit.
+ break;
+ }
return nullptr;
- }
- ast::ElseStatementList else_stmts;
- if (else_stmt != nullptr) {
- ast::StatementList stmts{else_stmt};
- else_stmts.emplace_back(create<ast::ElseStatement>(
- Source{}, nullptr, create<ast::BlockStatement>(Source{}, stmts)));
- }
- ast::StatementList if_stmts;
- if (then_stmt != nullptr) {
- if_stmts.emplace_back(then_stmt);
- }
- auto* if_block = create<ast::BlockStatement>(Source{}, if_stmts);
- auto* if_stmt =
- create<ast::IfStatement>(Source{}, condition, if_block, else_stmts);
-
- return if_stmt;
}
-bool FunctionEmitter::EmitConditionalCaseFallThrough(
- const BlockInfo& src_info,
- const ast::Expression* cond,
- EdgeKind other_edge_kind,
- const BlockInfo& other_dest,
- bool fall_through_is_true_branch) {
- // In WGSL, the fallthrough statement must come last in the case clause.
- // So we'll emit an if statement for the other branch, and then emit
- // the fallthrough.
-
- // We have two distinct destinations. But we only get here if this
- // is a normal terminator; in particular the source block is *not* the
- // start of an if-selection. So at most one branch is a kForward or
- // kCaseFallThrough.
- if (other_edge_kind == EdgeKind::kForward) {
- return Fail()
- << "internal error: normal terminator OpBranchConditional has "
- "both forward and fallthrough edges";
- }
- if (other_edge_kind == EdgeKind::kIfBreak) {
- return Fail()
- << "internal error: normal terminator OpBranchConditional has "
- "both IfBreak and fallthrough edges. Violates nesting rule";
- }
- if (other_edge_kind == EdgeKind::kBack) {
- return Fail()
- << "internal error: normal terminator OpBranchConditional has "
- "both backedge and fallthrough edges. Violates nesting rule";
- }
- auto* other_branch = MakeForcedBranch(src_info, other_dest);
- if (other_branch == nullptr) {
- return Fail() << "internal error: expected a branch for edge-kind "
- << int(other_edge_kind);
- }
- if (fall_through_is_true_branch) {
- AddStatement(MakeSimpleIf(cond, nullptr, other_branch));
- } else {
- AddStatement(MakeSimpleIf(cond, other_branch, nullptr));
- }
- AddStatement(create<ast::FallthroughStatement>(Source{}));
-
- return success();
+const ast::Statement* FunctionEmitter::MakeSimpleIf(const ast::Expression* condition,
+ const ast::Statement* then_stmt,
+ const ast::Statement* else_stmt) const {
+ if ((then_stmt == nullptr) && (else_stmt == nullptr)) {
+ return nullptr;
+ }
+ ast::StatementList if_stmts;
+ if (then_stmt != nullptr) {
+ if_stmts.emplace_back(then_stmt);
+ }
+ auto* if_block = create<ast::BlockStatement>(Source{}, if_stmts);
+
+ const ast::Statement* else_block = nullptr;
+ if (else_stmt) {
+ else_block = create<ast::BlockStatement>(ast::StatementList{else_stmt});
+ }
+
+ auto* if_stmt = create<ast::IfStatement>(Source{}, condition, if_block, else_block);
+
+ return if_stmt;
+}
+
+bool FunctionEmitter::EmitConditionalCaseFallThrough(const BlockInfo& src_info,
+ const ast::Expression* cond,
+ EdgeKind other_edge_kind,
+ const BlockInfo& other_dest,
+ bool fall_through_is_true_branch) {
+ // In WGSL, the fallthrough statement must come last in the case clause.
+ // So we'll emit an if statement for the other branch, and then emit
+ // the fallthrough.
+
+ // We have two distinct destinations. But we only get here if this
+ // is a normal terminator; in particular the source block is *not* the
+ // start of an if-selection. So at most one branch is a kForward or
+ // kCaseFallThrough.
+ if (other_edge_kind == EdgeKind::kForward) {
+ return Fail() << "internal error: normal terminator OpBranchConditional has "
+ "both forward and fallthrough edges";
+ }
+ if (other_edge_kind == EdgeKind::kIfBreak) {
+ return Fail() << "internal error: normal terminator OpBranchConditional has "
+ "both IfBreak and fallthrough edges. Violates nesting rule";
+ }
+ if (other_edge_kind == EdgeKind::kBack) {
+ return Fail() << "internal error: normal terminator OpBranchConditional has "
+ "both backedge and fallthrough edges. Violates nesting rule";
+ }
+ auto* other_branch = MakeForcedBranch(src_info, other_dest);
+ if (other_branch == nullptr) {
+ return Fail() << "internal error: expected a branch for edge-kind " << int(other_edge_kind);
+ }
+ if (fall_through_is_true_branch) {
+ AddStatement(MakeSimpleIf(cond, nullptr, other_branch));
+ } else {
+ AddStatement(MakeSimpleIf(cond, other_branch, nullptr));
+ }
+ AddStatement(create<ast::FallthroughStatement>(Source{}));
+
+ return success();
}
bool FunctionEmitter::EmitStatementsInBasicBlock(const BlockInfo& block_info,
bool* already_emitted) {
- if (*already_emitted) {
- // Only emit this part of the basic block once.
- return true;
- }
- // Returns the given list of local definition IDs, sorted by their index.
- auto sorted_by_index = [this](const std::vector<uint32_t>& ids) {
- auto sorted = ids;
- std::stable_sort(sorted.begin(), sorted.end(),
- [this](const uint32_t lhs, const uint32_t rhs) {
- return GetDefInfo(lhs)->index < GetDefInfo(rhs)->index;
- });
- return sorted;
- };
-
- // Emit declarations of hoisted variables, in index order.
- for (auto id : sorted_by_index(block_info.hoisted_ids)) {
- const auto* def_inst = def_use_mgr_->GetDef(id);
- TINT_ASSERT(Reader, def_inst);
- auto* storage_type =
- RemapStorageClass(parser_impl_.ConvertType(def_inst->type_id()), id);
- AddStatement(create<ast::VariableDeclStatement>(
- Source{}, parser_impl_.MakeVariable(id, ast::StorageClass::kNone,
- storage_type, false, false, nullptr,
- ast::AttributeList{})));
- auto* type = ty_.Reference(storage_type, ast::StorageClass::kNone);
- identifier_types_.emplace(id, type);
- }
- // Emit declarations of phi state variables, in index order.
- for (auto id : sorted_by_index(block_info.phis_needing_state_vars)) {
- const auto* def_inst = def_use_mgr_->GetDef(id);
- TINT_ASSERT(Reader, def_inst);
- const auto phi_var_name = GetDefInfo(id)->phi_var;
- TINT_ASSERT(Reader, !phi_var_name.empty());
- auto* var = builder_.Var(
- phi_var_name,
- parser_impl_.ConvertType(def_inst->type_id())->Build(builder_));
- AddStatement(create<ast::VariableDeclStatement>(Source{}, var));
- }
-
- // Emit regular statements.
- const spvtools::opt::BasicBlock& bb = *(block_info.basic_block);
- const auto* terminator = bb.terminator();
- const auto* merge = bb.GetMergeInst(); // Might be nullptr
- for (auto& inst : bb) {
- if (&inst == terminator || &inst == merge || inst.opcode() == SpvOpLabel ||
- inst.opcode() == SpvOpVariable) {
- continue;
- }
- if (!EmitStatement(inst)) {
- return false;
- }
- }
-
- // Emit assignments to carry values to phi nodes in potential destinations.
- // Do it in index order.
- if (!block_info.phi_assignments.empty()) {
- auto sorted = block_info.phi_assignments;
- std::stable_sort(sorted.begin(), sorted.end(),
- [this](const BlockInfo::PhiAssignment& lhs,
- const BlockInfo::PhiAssignment& rhs) {
- return GetDefInfo(lhs.phi_id)->index <
- GetDefInfo(rhs.phi_id)->index;
- });
- for (auto assignment : block_info.phi_assignments) {
- const auto var_name = GetDefInfo(assignment.phi_id)->phi_var;
- auto expr = MakeExpression(assignment.value);
- if (!expr) {
- return false;
- }
- AddStatement(create<ast::AssignmentStatement>(
- Source{},
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(var_name)),
- expr.expr));
+ if (*already_emitted) {
+ // Only emit this part of the basic block once.
+ return true;
+ }
+ // Returns the given list of local definition IDs, sorted by their index.
+ auto sorted_by_index = [this](const std::vector<uint32_t>& ids) {
+ auto sorted = ids;
+ std::stable_sort(sorted.begin(), sorted.end(),
+ [this](const uint32_t lhs, const uint32_t rhs) {
+ return GetDefInfo(lhs)->index < GetDefInfo(rhs)->index;
+ });
+ return sorted;
+ };
+
+ // Emit declarations of hoisted variables, in index order.
+ for (auto id : sorted_by_index(block_info.hoisted_ids)) {
+ const auto* def_inst = def_use_mgr_->GetDef(id);
+ TINT_ASSERT(Reader, def_inst);
+ auto* storage_type = RemapStorageClass(parser_impl_.ConvertType(def_inst->type_id()), id);
+ AddStatement(create<ast::VariableDeclStatement>(
+ Source{}, parser_impl_.MakeVariable(id, ast::StorageClass::kNone, storage_type, false,
+ false, nullptr, ast::AttributeList{})));
+ auto* type = ty_.Reference(storage_type, ast::StorageClass::kNone);
+ identifier_types_.emplace(id, type);
+ }
+ // Emit declarations of phi state variables, in index order.
+ for (auto id : sorted_by_index(block_info.phis_needing_state_vars)) {
+ const auto* def_inst = def_use_mgr_->GetDef(id);
+ TINT_ASSERT(Reader, def_inst);
+ const auto phi_var_name = GetDefInfo(id)->phi_var;
+ TINT_ASSERT(Reader, !phi_var_name.empty());
+ auto* var = builder_.Var(phi_var_name,
+ parser_impl_.ConvertType(def_inst->type_id())->Build(builder_));
+ AddStatement(create<ast::VariableDeclStatement>(Source{}, var));
+ }
+
+ // Emit regular statements.
+ const spvtools::opt::BasicBlock& bb = *(block_info.basic_block);
+ const auto* terminator = bb.terminator();
+ const auto* merge = bb.GetMergeInst(); // Might be nullptr
+ for (auto& inst : bb) {
+ if (&inst == terminator || &inst == merge || inst.opcode() == SpvOpLabel ||
+ inst.opcode() == SpvOpVariable) {
+ continue;
+ }
+ if (!EmitStatement(inst)) {
+ return false;
+ }
+ }
+
+ // Emit assignments to carry values to phi nodes in potential destinations.
+ // Do it in index order.
+ if (!block_info.phi_assignments.empty()) {
+ auto sorted = block_info.phi_assignments;
+ std::stable_sort(
+ sorted.begin(), sorted.end(),
+ [this](const BlockInfo::PhiAssignment& lhs, const BlockInfo::PhiAssignment& rhs) {
+ return GetDefInfo(lhs.phi_id)->index < GetDefInfo(rhs.phi_id)->index;
+ });
+ for (auto assignment : block_info.phi_assignments) {
+ const auto var_name = GetDefInfo(assignment.phi_id)->phi_var;
+ auto expr = MakeExpression(assignment.value);
+ if (!expr) {
+ return false;
+ }
+ AddStatement(create<ast::AssignmentStatement>(
+ Source{},
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(var_name)),
+ expr.expr));
+ }
}
- }
- *already_emitted = true;
- return true;
+ *already_emitted = true;
+ return true;
}
-bool FunctionEmitter::EmitConstDefinition(
- const spvtools::opt::Instruction& inst,
- TypedExpression expr) {
- if (!expr) {
- return false;
- }
+bool FunctionEmitter::EmitConstDefinition(const spvtools::opt::Instruction& inst,
+ TypedExpression expr) {
+ if (!expr) {
+ return false;
+ }
- // Do not generate pointers that we want to sink.
- if (GetDefInfo(inst.result_id())->skip == SkipReason::kSinkPointerIntoUse) {
- return true;
- }
+ // Do not generate pointers that we want to sink.
+ if (GetDefInfo(inst.result_id())->skip == SkipReason::kSinkPointerIntoUse) {
+ return true;
+ }
- expr = AddressOfIfNeeded(expr, &inst);
- auto* ast_const = parser_impl_.MakeVariable(
- inst.result_id(), ast::StorageClass::kNone, expr.type, true, false,
- expr.expr, ast::AttributeList{});
- if (!ast_const) {
- return false;
- }
- AddStatement(create<ast::VariableDeclStatement>(Source{}, ast_const));
- identifier_types_.emplace(inst.result_id(), expr.type);
- return success();
+ expr = AddressOfIfNeeded(expr, &inst);
+ auto* ast_const =
+ parser_impl_.MakeVariable(inst.result_id(), ast::StorageClass::kNone, expr.type, true,
+ false, expr.expr, ast::AttributeList{});
+ if (!ast_const) {
+ return false;
+ }
+ AddStatement(create<ast::VariableDeclStatement>(Source{}, ast_const));
+ identifier_types_.emplace(inst.result_id(), expr.type);
+ return success();
}
-bool FunctionEmitter::EmitConstDefOrWriteToHoistedVar(
- const spvtools::opt::Instruction& inst,
- TypedExpression expr) {
- return WriteIfHoistedVar(inst, expr) || EmitConstDefinition(inst, expr);
+bool FunctionEmitter::EmitConstDefOrWriteToHoistedVar(const spvtools::opt::Instruction& inst,
+ TypedExpression expr) {
+ return WriteIfHoistedVar(inst, expr) || EmitConstDefinition(inst, expr);
}
bool FunctionEmitter::WriteIfHoistedVar(const spvtools::opt::Instruction& inst,
TypedExpression expr) {
- const auto result_id = inst.result_id();
- const auto* def_info = GetDefInfo(result_id);
- if (def_info && def_info->requires_hoisted_def) {
- auto name = namer_.Name(result_id);
- // Emit an assignment of the expression to the hoisted variable.
- AddStatement(create<ast::AssignmentStatement>(
- Source{},
- create<ast::IdentifierExpression>(Source{},
- builder_.Symbols().Register(name)),
- expr.expr));
- return true;
- }
- return false;
+ const auto result_id = inst.result_id();
+ const auto* def_info = GetDefInfo(result_id);
+ if (def_info && def_info->requires_hoisted_def) {
+ auto name = namer_.Name(result_id);
+ // Emit an assignment of the expression to the hoisted variable.
+ AddStatement(create<ast::AssignmentStatement>(
+ Source{},
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name)),
+ expr.expr));
+ return true;
+ }
+ return false;
}
bool FunctionEmitter::EmitStatement(const spvtools::opt::Instruction& inst) {
- if (failed()) {
- return false;
- }
- const auto result_id = inst.result_id();
- const auto type_id = inst.type_id();
-
- if (type_id != 0) {
- const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
- if (type_id == builtin_position_info.struct_type_id) {
- return Fail() << "operations producing a per-vertex structure are not "
- "supported: "
- << inst.PrettyPrint();
- }
- if (type_id == builtin_position_info.pointer_type_id) {
- return Fail() << "operations producing a pointer to a per-vertex "
- "structure are not "
- "supported: "
- << inst.PrettyPrint();
- }
- }
-
- // Handle combinatorial instructions.
- const auto* def_info = GetDefInfo(result_id);
- if (def_info) {
- TypedExpression combinatorial_expr;
- if (def_info->skip == SkipReason::kDontSkip) {
- combinatorial_expr = MaybeEmitCombinatorialValue(inst);
- if (!success()) {
+ if (failed()) {
return false;
- }
- }
- // An access chain or OpCopyObject can generate a skip.
- if (def_info->skip != SkipReason::kDontSkip) {
- return true;
- }
-
- if (combinatorial_expr.expr != nullptr) {
- if (def_info->requires_hoisted_def ||
- def_info->requires_named_const_def || def_info->num_uses != 1) {
- // Generate a const definition or an assignment to a hoisted definition
- // now and later use the const or variable name at the uses of this
- // value.
- return EmitConstDefOrWriteToHoistedVar(inst, combinatorial_expr);
- }
- // It is harmless to defer emitting the expression until it's used.
- // Any supporting statements have already been emitted.
- singly_used_values_.insert(std::make_pair(result_id, combinatorial_expr));
- return success();
- }
- }
- if (failed()) {
- return false;
- }
+ }
+ const auto result_id = inst.result_id();
+ const auto type_id = inst.type_id();
- if (IsImageQuery(inst.opcode())) {
- return EmitImageQuery(inst);
- }
+ if (type_id != 0) {
+ const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
+ if (type_id == builtin_position_info.struct_type_id) {
+ return Fail() << "operations producing a per-vertex structure are not "
+ "supported: "
+ << inst.PrettyPrint();
+ }
+ if (type_id == builtin_position_info.pointer_type_id) {
+ return Fail() << "operations producing a pointer to a per-vertex "
+ "structure are not "
+ "supported: "
+ << inst.PrettyPrint();
+ }
+ }
- if (IsSampledImageAccess(inst.opcode()) || IsRawImageAccess(inst.opcode())) {
- return EmitImageAccess(inst);
- }
+ // Handle combinatorial instructions.
+ const auto* def_info = GetDefInfo(result_id);
+ if (def_info) {
+ TypedExpression combinatorial_expr;
+ if (def_info->skip == SkipReason::kDontSkip) {
+ combinatorial_expr = MaybeEmitCombinatorialValue(inst);
+ if (!success()) {
+ return false;
+ }
+ }
+ // An access chain or OpCopyObject can generate a skip.
+ if (def_info->skip != SkipReason::kDontSkip) {
+ return true;
+ }
- switch (inst.opcode()) {
- case SpvOpNop:
- return true;
+ if (combinatorial_expr.expr != nullptr) {
+ if (def_info->requires_hoisted_def || def_info->requires_named_const_def ||
+ def_info->num_uses != 1) {
+ // Generate a const definition or an assignment to a hoisted definition
+ // now and later use the const or variable name at the uses of this
+ // value.
+ return EmitConstDefOrWriteToHoistedVar(inst, combinatorial_expr);
+ }
+ // It is harmless to defer emitting the expression until it's used.
+ // Any supporting statements have already been emitted.
+ singly_used_values_.insert(std::make_pair(result_id, combinatorial_expr));
+ return success();
+ }
+ }
+ if (failed()) {
+ return false;
+ }
- case SpvOpStore: {
- auto ptr_id = inst.GetSingleWordInOperand(0);
- const auto value_id = inst.GetSingleWordInOperand(1);
+ if (IsImageQuery(inst.opcode())) {
+ return EmitImageQuery(inst);
+ }
- const auto ptr_type_id = def_use_mgr_->GetDef(ptr_id)->type_id();
- const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
- if (ptr_type_id == builtin_position_info.pointer_type_id) {
- return Fail()
- << "storing to the whole per-vertex structure is not supported: "
- << inst.PrettyPrint();
- }
+ if (IsSampledImageAccess(inst.opcode()) || IsRawImageAccess(inst.opcode())) {
+ return EmitImageAccess(inst);
+ }
- TypedExpression rhs = MakeExpression(value_id);
- if (!rhs) {
- return false;
- }
+ switch (inst.opcode()) {
+ case SpvOpNop:
+ return true;
- TypedExpression lhs;
+ case SpvOpStore: {
+ auto ptr_id = inst.GetSingleWordInOperand(0);
+ const auto value_id = inst.GetSingleWordInOperand(1);
+
+ const auto ptr_type_id = def_use_mgr_->GetDef(ptr_id)->type_id();
+ const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
+ if (ptr_type_id == builtin_position_info.pointer_type_id) {
+ return Fail() << "storing to the whole per-vertex structure is not supported: "
+ << inst.PrettyPrint();
+ }
+
+ TypedExpression rhs = MakeExpression(value_id);
+ if (!rhs) {
+ return false;
+ }
+
+ TypedExpression lhs;
+
+ // Handle exceptional cases
+ switch (GetSkipReason(ptr_id)) {
+ case SkipReason::kPointSizeBuiltinPointer:
+ if (IsFloatOne(value_id)) {
+ // Don't store to PointSize
+ return true;
+ }
+ return Fail() << "cannot store a value other than constant 1.0 to "
+ "PointSize builtin: "
+ << inst.PrettyPrint();
+
+ case SkipReason::kSampleMaskOutBuiltinPointer:
+ lhs = MakeExpression(sample_mask_out_id);
+ if (lhs.type->Is<Pointer>()) {
+ // LHS of an assignment must be a reference type.
+ // Convert the LHS to a reference by dereferencing it.
+ lhs = Dereference(lhs);
+ }
+ // The private variable is an array whose element type is already of
+ // the same type as the value being stored into it. Form the
+ // reference into the first element.
+ lhs.expr = create<ast::IndexAccessorExpression>(
+ Source{}, lhs.expr, parser_impl_.MakeNullValue(ty_.I32()));
+ if (auto* ref = lhs.type->As<Reference>()) {
+ lhs.type = ref->type;
+ }
+ if (auto* arr = lhs.type->As<Array>()) {
+ lhs.type = arr->type;
+ }
+ TINT_ASSERT(Reader, lhs.type);
+ break;
+ default:
+ break;
+ }
+
+ // Handle an ordinary store as an assignment.
+ if (!lhs) {
+ lhs = MakeExpression(ptr_id);
+ }
+ if (!lhs) {
+ return false;
+ }
+
+ if (lhs.type->Is<Pointer>()) {
+ // LHS of an assignment must be a reference type.
+ // Convert the LHS to a reference by dereferencing it.
+ lhs = Dereference(lhs);
+ }
+
+ AddStatement(create<ast::AssignmentStatement>(Source{}, lhs.expr, rhs.expr));
+ return success();
+ }
+
+ case SpvOpLoad: {
+ // Memory accesses must be issued in SPIR-V program order.
+ // So represent a load by a new const definition.
+ const auto ptr_id = inst.GetSingleWordInOperand(0);
+ const auto skip_reason = GetSkipReason(ptr_id);
+
+ switch (skip_reason) {
+ case SkipReason::kPointSizeBuiltinPointer:
+ GetDefInfo(inst.result_id())->skip = SkipReason::kPointSizeBuiltinValue;
+ return true;
+ case SkipReason::kSampleMaskInBuiltinPointer: {
+ auto name = namer_.Name(sample_mask_in_id);
+ const ast::Expression* id_expr = create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register(name));
+ // SampleMask is an array in Vulkan SPIR-V. Always access the first
+ // element.
+ id_expr = create<ast::IndexAccessorExpression>(
+ Source{}, id_expr, parser_impl_.MakeNullValue(ty_.I32()));
+
+ auto* loaded_type = parser_impl_.ConvertType(inst.type_id());
+
+ if (!loaded_type->IsIntegerScalar()) {
+ return Fail() << "loading the whole SampleMask input array is not "
+ "supported: "
+ << inst.PrettyPrint();
+ }
+
+ auto expr = TypedExpression{loaded_type, id_expr};
+ return EmitConstDefinition(inst, expr);
+ }
+ default:
+ break;
+ }
+ auto expr = MakeExpression(ptr_id);
+ if (!expr) {
+ return false;
+ }
+
+ // The load result type is the storage type of its operand.
+ if (expr.type->Is<Pointer>()) {
+ expr = Dereference(expr);
+ } else if (auto* ref = expr.type->As<Reference>()) {
+ expr.type = ref->type;
+ } else {
+ Fail() << "OpLoad expression is not a pointer or reference";
+ return false;
+ }
+
+ return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ }
+
+ case SpvOpCopyMemory: {
+ // Generate an assignment.
+ auto lhs = MakeOperand(inst, 0);
+ auto rhs = MakeOperand(inst, 1);
+ // Ignore any potential memory operands. Currently they are all for
+ // concepts not in WGSL:
+ // Volatile
+ // Aligned
+ // Nontemporal
+ // MakePointerAvailable ; Vulkan memory model
+ // MakePointerVisible ; Vulkan memory model
+ // NonPrivatePointer ; Vulkan memory model
+
+ if (!success()) {
+ return false;
+ }
+
+ // LHS and RHS pointers must be reference types in WGSL.
+ if (lhs.type->Is<Pointer>()) {
+ lhs = Dereference(lhs);
+ }
+ if (rhs.type->Is<Pointer>()) {
+ rhs = Dereference(rhs);
+ }
+
+ AddStatement(create<ast::AssignmentStatement>(Source{}, lhs.expr, rhs.expr));
+ return success();
+ }
+
+ case SpvOpCopyObject: {
+ // Arguably, OpCopyObject is purely combinatorial. On the other hand,
+ // it exists to make a new name for something. So we choose to make
+ // a new named constant definition.
+ auto value_id = inst.GetSingleWordInOperand(0);
+ const auto skip = GetSkipReason(value_id);
+ if (skip != SkipReason::kDontSkip) {
+ GetDefInfo(inst.result_id())->skip = skip;
+ GetDefInfo(inst.result_id())->sink_pointer_source_expr =
+ GetDefInfo(value_id)->sink_pointer_source_expr;
+ return true;
+ }
+ auto expr = AddressOfIfNeeded(MakeExpression(value_id), &inst);
+ if (!expr) {
+ return false;
+ }
+ expr.type = RemapStorageClass(expr.type, result_id);
+ return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ }
+
+ case SpvOpPhi: {
+ // Emit a read from the associated state variable.
+ TypedExpression expr{parser_impl_.ConvertType(inst.type_id()),
+ create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register(def_info->phi_var))};
+ return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ }
+
+ case SpvOpOuterProduct:
+ // Synthesize an outer product expression in its own statement.
+ return EmitConstDefOrWriteToHoistedVar(inst, MakeOuterProduct(inst));
+
+ case SpvOpVectorInsertDynamic:
+ // Synthesize a vector insertion in its own statements.
+ return MakeVectorInsertDynamic(inst);
+
+ case SpvOpCompositeInsert:
+ // Synthesize a composite insertion in its own statements.
+ return MakeCompositeInsert(inst);
+
+ case SpvOpFunctionCall:
+ return EmitFunctionCall(inst);
+
+ case SpvOpControlBarrier:
+ return EmitControlBarrier(inst);
+
+ case SpvOpExtInst:
+ if (parser_impl_.IsIgnoredExtendedInstruction(inst)) {
+ return true;
+ }
+ break;
+
+ case SpvOpIAddCarry:
+ case SpvOpISubBorrow:
+ case SpvOpUMulExtended:
+ case SpvOpSMulExtended:
+ return Fail() << "extended arithmetic is not finalized for WGSL: "
+ "https://github.com/gpuweb/gpuweb/issues/1565: "
+ << inst.PrettyPrint();
- // Handle exceptional cases
- switch (GetSkipReason(ptr_id)) {
- case SkipReason::kPointSizeBuiltinPointer:
- if (IsFloatOne(value_id)) {
- // Don't store to PointSize
- return true;
- }
- return Fail() << "cannot store a value other than constant 1.0 to "
- "PointSize builtin: "
- << inst.PrettyPrint();
-
- case SkipReason::kSampleMaskOutBuiltinPointer:
- lhs = MakeExpression(sample_mask_out_id);
- if (lhs.type->Is<Pointer>()) {
- // LHS of an assignment must be a reference type.
- // Convert the LHS to a reference by dereferencing it.
- lhs = Dereference(lhs);
- }
- // The private variable is an array whose element type is already of
- // the same type as the value being stored into it. Form the
- // reference into the first element.
- lhs.expr = create<ast::IndexAccessorExpression>(
- Source{}, lhs.expr, parser_impl_.MakeNullValue(ty_.I32()));
- if (auto* ref = lhs.type->As<Reference>()) {
- lhs.type = ref->type;
- }
- if (auto* arr = lhs.type->As<Array>()) {
- lhs.type = arr->type;
- }
- TINT_ASSERT(Reader, lhs.type);
- break;
default:
- break;
- }
-
- // Handle an ordinary store as an assignment.
- if (!lhs) {
- lhs = MakeExpression(ptr_id);
- }
- if (!lhs) {
- return false;
- }
+ break;
+ }
+ return Fail() << "unhandled instruction with opcode " << inst.opcode() << ": "
+ << inst.PrettyPrint();
+}
- if (lhs.type->Is<Pointer>()) {
- // LHS of an assignment must be a reference type.
- // Convert the LHS to a reference by dereferencing it.
- lhs = Dereference(lhs);
- }
+TypedExpression FunctionEmitter::MakeOperand(const spvtools::opt::Instruction& inst,
+ uint32_t operand_index) {
+ auto expr = MakeExpression(inst.GetSingleWordInOperand(operand_index));
+ if (!expr) {
+ return {};
+ }
+ return parser_impl_.RectifyOperandSignedness(inst, std::move(expr));
+}
- AddStatement(
- create<ast::AssignmentStatement>(Source{}, lhs.expr, rhs.expr));
- return success();
+TypedExpression FunctionEmitter::InferFunctionStorageClass(TypedExpression expr) {
+ TypedExpression result(expr);
+ if (const auto* ref = expr.type->UnwrapAlias()->As<Reference>()) {
+ if (ref->storage_class == ast::StorageClass::kNone) {
+ expr.type = ty_.Reference(ref->type, ast::StorageClass::kFunction);
+ }
+ } else if (const auto* ptr = expr.type->UnwrapAlias()->As<Pointer>()) {
+ if (ptr->storage_class == ast::StorageClass::kNone) {
+ expr.type = ty_.Pointer(ptr->type, ast::StorageClass::kFunction);
+ }
}
+ return expr;
+}
- case SpvOpLoad: {
- // Memory accesses must be issued in SPIR-V program order.
- // So represent a load by a new const definition.
- const auto ptr_id = inst.GetSingleWordInOperand(0);
- const auto skip_reason = GetSkipReason(ptr_id);
+TypedExpression FunctionEmitter::MaybeEmitCombinatorialValue(
+ const spvtools::opt::Instruction& inst) {
+ if (inst.result_id() == 0) {
+ return {};
+ }
- switch (skip_reason) {
- case SkipReason::kPointSizeBuiltinPointer:
- GetDefInfo(inst.result_id())->skip =
- SkipReason::kPointSizeBuiltinValue;
- return true;
- case SkipReason::kSampleMaskInBuiltinPointer: {
- auto name = namer_.Name(sample_mask_in_id);
- const ast::Expression* id_expr = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
- // SampleMask is an array in Vulkan SPIR-V. Always access the first
- // element.
- id_expr = create<ast::IndexAccessorExpression>(
- Source{}, id_expr, parser_impl_.MakeNullValue(ty_.I32()));
-
- auto* loaded_type = parser_impl_.ConvertType(inst.type_id());
-
- if (!loaded_type->IsIntegerScalar()) {
- return Fail() << "loading the whole SampleMask input array is not "
- "supported: "
- << inst.PrettyPrint();
- }
+ const auto opcode = inst.opcode();
- auto expr = TypedExpression{loaded_type, id_expr};
- return EmitConstDefinition(inst, expr);
+ const Type* ast_type = nullptr;
+ if (inst.type_id()) {
+ ast_type = parser_impl_.ConvertType(inst.type_id());
+ if (!ast_type) {
+ Fail() << "couldn't convert result type for: " << inst.PrettyPrint();
+ return {};
}
- default:
- break;
- }
- auto expr = MakeExpression(ptr_id);
- if (!expr) {
- return false;
- }
-
- // The load result type is the storage type of its operand.
- if (expr.type->Is<Pointer>()) {
- expr = Dereference(expr);
- } else if (auto* ref = expr.type->As<Reference>()) {
- expr.type = ref->type;
- } else {
- Fail() << "OpLoad expression is not a pointer or reference";
- return false;
- }
+ }
- return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ auto binary_op = ConvertBinaryOp(opcode);
+ if (binary_op != ast::BinaryOp::kNone) {
+ auto arg0 = MakeOperand(inst, 0);
+ auto arg1 =
+ parser_impl_.RectifySecondOperandSignedness(inst, arg0.type, MakeOperand(inst, 1));
+ if (!arg0 || !arg1) {
+ return {};
+ }
+ auto* binary_expr =
+ create<ast::BinaryExpression>(Source{}, binary_op, arg0.expr, arg1.expr);
+ TypedExpression result{ast_type, binary_expr};
+ return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
}
- case SpvOpCopyMemory: {
- // Generate an assignment.
- auto lhs = MakeOperand(inst, 0);
- auto rhs = MakeOperand(inst, 1);
- // Ignore any potential memory operands. Currently they are all for
- // concepts not in WGSL:
- // Volatile
- // Aligned
- // Nontemporal
- // MakePointerAvailable ; Vulkan memory model
- // MakePointerVisible ; Vulkan memory model
- // NonPrivatePointer ; Vulkan memory model
+ auto unary_op = ast::UnaryOp::kNegation;
+ if (GetUnaryOp(opcode, &unary_op)) {
+ auto arg0 = MakeOperand(inst, 0);
+ auto* unary_expr = create<ast::UnaryOpExpression>(Source{}, unary_op, arg0.expr);
+ TypedExpression result{ast_type, unary_expr};
+ return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
+ }
- if (!success()) {
- return false;
- }
-
- // LHS and RHS pointers must be reference types in WGSL.
- if (lhs.type->Is<Pointer>()) {
- lhs = Dereference(lhs);
- }
- if (rhs.type->Is<Pointer>()) {
- rhs = Dereference(rhs);
- }
-
- AddStatement(
- create<ast::AssignmentStatement>(Source{}, lhs.expr, rhs.expr));
- return success();
- }
-
- case SpvOpCopyObject: {
- // Arguably, OpCopyObject is purely combinatorial. On the other hand,
- // it exists to make a new name for something. So we choose to make
- // a new named constant definition.
- auto value_id = inst.GetSingleWordInOperand(0);
- const auto skip = GetSkipReason(value_id);
- if (skip != SkipReason::kDontSkip) {
- GetDefInfo(inst.result_id())->skip = skip;
- GetDefInfo(inst.result_id())->sink_pointer_source_expr =
- GetDefInfo(value_id)->sink_pointer_source_expr;
- return true;
- }
- auto expr = AddressOfIfNeeded(MakeExpression(value_id), &inst);
- if (!expr) {
- return false;
- }
- expr.type = RemapStorageClass(expr.type, result_id);
- return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ const char* unary_builtin_name = GetUnaryBuiltInFunctionName(opcode);
+ if (unary_builtin_name != nullptr) {
+ ast::ExpressionList params;
+ params.emplace_back(MakeOperand(inst, 0).expr);
+ return {ast_type, create<ast::CallExpression>(
+ Source{},
+ create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register(unary_builtin_name)),
+ std::move(params))};
}
- case SpvOpPhi: {
- // Emit a read from the associated state variable.
- TypedExpression expr{
- parser_impl_.ConvertType(inst.type_id()),
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(def_info->phi_var))};
- return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ const auto builtin = GetBuiltin(opcode);
+ if (builtin != sem::BuiltinType::kNone) {
+ return MakeBuiltinCall(inst);
}
- case SpvOpOuterProduct:
- // Synthesize an outer product expression in its own statement.
- return EmitConstDefOrWriteToHoistedVar(inst, MakeOuterProduct(inst));
+ if (opcode == SpvOpFMod) {
+ return MakeFMod(inst);
+ }
- case SpvOpVectorInsertDynamic:
- // Synthesize a vector insertion in its own statements.
- return MakeVectorInsertDynamic(inst);
+ if (opcode == SpvOpAccessChain || opcode == SpvOpInBoundsAccessChain) {
+ return MakeAccessChain(inst);
+ }
- case SpvOpCompositeInsert:
- // Synthesize a composite insertion in its own statements.
- return MakeCompositeInsert(inst);
+ if (opcode == SpvOpBitcast) {
+ return {ast_type, create<ast::BitcastExpression>(Source{}, ast_type->Build(builder_),
+ MakeOperand(inst, 0).expr)};
+ }
- case SpvOpFunctionCall:
- return EmitFunctionCall(inst);
+ if (opcode == SpvOpShiftLeftLogical || opcode == SpvOpShiftRightLogical ||
+ opcode == SpvOpShiftRightArithmetic) {
+ auto arg0 = MakeOperand(inst, 0);
+ // The second operand must be unsigned. It's ok to wrap the shift amount
+ // since the shift is modulo the bit width of the first operand.
+ auto arg1 = parser_impl_.AsUnsigned(MakeOperand(inst, 1));
- case SpvOpControlBarrier:
- return EmitControlBarrier(inst);
+ switch (opcode) {
+ case SpvOpShiftLeftLogical:
+ binary_op = ast::BinaryOp::kShiftLeft;
+ break;
+ case SpvOpShiftRightLogical:
+ arg0 = parser_impl_.AsUnsigned(arg0);
+ binary_op = ast::BinaryOp::kShiftRight;
+ break;
+ case SpvOpShiftRightArithmetic:
+ arg0 = parser_impl_.AsSigned(arg0);
+ binary_op = ast::BinaryOp::kShiftRight;
+ break;
+ default:
+ break;
+ }
+ TypedExpression result{
+ ast_type, create<ast::BinaryExpression>(Source{}, binary_op, arg0.expr, arg1.expr)};
+ return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
+ }
- case SpvOpExtInst:
- if (parser_impl_.IsIgnoredExtendedInstruction(inst)) {
- return true;
- }
- break;
-
- case SpvOpIAddCarry:
- case SpvOpISubBorrow:
- case SpvOpUMulExtended:
- case SpvOpSMulExtended:
- return Fail() << "extended arithmetic is not finalized for WGSL: "
- "https://github.com/gpuweb/gpuweb/issues/1565: "
- << inst.PrettyPrint();
-
- default:
- break;
- }
- return Fail() << "unhandled instruction with opcode " << inst.opcode() << ": "
- << inst.PrettyPrint();
-}
+ auto negated_op = NegatedFloatCompare(opcode);
+ if (negated_op != ast::BinaryOp::kNone) {
+ auto arg0 = MakeOperand(inst, 0);
+ auto arg1 = MakeOperand(inst, 1);
+ auto* binary_expr =
+ create<ast::BinaryExpression>(Source{}, negated_op, arg0.expr, arg1.expr);
+ auto* negated_expr =
+ create<ast::UnaryOpExpression>(Source{}, ast::UnaryOp::kNot, binary_expr);
+ return {ast_type, negated_expr};
+ }
-TypedExpression FunctionEmitter::MakeOperand(
- const spvtools::opt::Instruction& inst,
- uint32_t operand_index) {
- auto expr = MakeExpression(inst.GetSingleWordInOperand(operand_index));
- if (!expr) {
- return {};
- }
- return parser_impl_.RectifyOperandSignedness(inst, std::move(expr));
-}
+ if (opcode == SpvOpExtInst) {
+ if (parser_impl_.IsIgnoredExtendedInstruction(inst)) {
+ // Ignore it but don't error out.
+ return {};
+ }
+ if (!parser_impl_.IsGlslExtendedInstruction(inst)) {
+ Fail() << "unhandled extended instruction import with ID "
+ << inst.GetSingleWordInOperand(0);
+ return {};
+ }
+ return EmitGlslStd450ExtInst(inst);
+ }
-TypedExpression FunctionEmitter::InferFunctionStorageClass(
- TypedExpression expr) {
- TypedExpression result(expr);
- if (const auto* ref = expr.type->UnwrapAlias()->As<Reference>()) {
- if (ref->storage_class == ast::StorageClass::kNone) {
- expr.type = ty_.Reference(ref->type, ast::StorageClass::kFunction);
+ if (opcode == SpvOpCompositeConstruct) {
+ ast::ExpressionList operands;
+ for (uint32_t iarg = 0; iarg < inst.NumInOperands(); ++iarg) {
+ operands.emplace_back(MakeOperand(inst, iarg).expr);
+ }
+ return {ast_type,
+ builder_.Construct(Source{}, ast_type->Build(builder_), std::move(operands))};
}
- } else if (const auto* ptr = expr.type->UnwrapAlias()->As<Pointer>()) {
- if (ptr->storage_class == ast::StorageClass::kNone) {
- expr.type = ty_.Pointer(ptr->type, ast::StorageClass::kFunction);
+
+ if (opcode == SpvOpCompositeExtract) {
+ return MakeCompositeExtract(inst);
}
- }
- return expr;
-}
-TypedExpression FunctionEmitter::MaybeEmitCombinatorialValue(
- const spvtools::opt::Instruction& inst) {
- if (inst.result_id() == 0) {
- return {};
- }
-
- const auto opcode = inst.opcode();
-
- const Type* ast_type = nullptr;
- if (inst.type_id()) {
- ast_type = parser_impl_.ConvertType(inst.type_id());
- if (!ast_type) {
- Fail() << "couldn't convert result type for: " << inst.PrettyPrint();
- return {};
- }
- }
-
- auto binary_op = ConvertBinaryOp(opcode);
- if (binary_op != ast::BinaryOp::kNone) {
- auto arg0 = MakeOperand(inst, 0);
- auto arg1 = parser_impl_.RectifySecondOperandSignedness(
- inst, arg0.type, MakeOperand(inst, 1));
- if (!arg0 || !arg1) {
- return {};
- }
- auto* binary_expr = create<ast::BinaryExpression>(Source{}, binary_op,
- arg0.expr, arg1.expr);
- TypedExpression result{ast_type, binary_expr};
- return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
- }
-
- auto unary_op = ast::UnaryOp::kNegation;
- if (GetUnaryOp(opcode, &unary_op)) {
- auto arg0 = MakeOperand(inst, 0);
- auto* unary_expr =
- create<ast::UnaryOpExpression>(Source{}, unary_op, arg0.expr);
- TypedExpression result{ast_type, unary_expr};
- return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
- }
-
- const char* unary_builtin_name = GetUnaryBuiltInFunctionName(opcode);
- if (unary_builtin_name != nullptr) {
- ast::ExpressionList params;
- params.emplace_back(MakeOperand(inst, 0).expr);
- return {ast_type,
- create<ast::CallExpression>(
- Source{},
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(unary_builtin_name)),
- std::move(params))};
- }
-
- const auto builtin = GetBuiltin(opcode);
- if (builtin != sem::BuiltinType::kNone) {
- return MakeBuiltinCall(inst);
- }
-
- if (opcode == SpvOpFMod) {
- return MakeFMod(inst);
- }
-
- if (opcode == SpvOpAccessChain || opcode == SpvOpInBoundsAccessChain) {
- return MakeAccessChain(inst);
- }
-
- if (opcode == SpvOpBitcast) {
- return {ast_type,
- create<ast::BitcastExpression>(Source{}, ast_type->Build(builder_),
- MakeOperand(inst, 0).expr)};
- }
-
- if (opcode == SpvOpShiftLeftLogical || opcode == SpvOpShiftRightLogical ||
- opcode == SpvOpShiftRightArithmetic) {
- auto arg0 = MakeOperand(inst, 0);
- // The second operand must be unsigned. It's ok to wrap the shift amount
- // since the shift is modulo the bit width of the first operand.
- auto arg1 = parser_impl_.AsUnsigned(MakeOperand(inst, 1));
+ if (opcode == SpvOpVectorShuffle) {
+ return MakeVectorShuffle(inst);
+ }
- switch (opcode) {
- case SpvOpShiftLeftLogical:
- binary_op = ast::BinaryOp::kShiftLeft;
- break;
- case SpvOpShiftRightLogical:
- arg0 = parser_impl_.AsUnsigned(arg0);
- binary_op = ast::BinaryOp::kShiftRight;
- break;
- case SpvOpShiftRightArithmetic:
- arg0 = parser_impl_.AsSigned(arg0);
- binary_op = ast::BinaryOp::kShiftRight;
- break;
- default:
- break;
+ if (opcode == SpvOpVectorExtractDynamic) {
+ return {ast_type, create<ast::IndexAccessorExpression>(Source{}, MakeOperand(inst, 0).expr,
+ MakeOperand(inst, 1).expr)};
}
- TypedExpression result{
- ast_type, create<ast::BinaryExpression>(Source{}, binary_op, arg0.expr,
- arg1.expr)};
- return parser_impl_.RectifyForcedResultType(result, inst, arg0.type);
- }
-
- auto negated_op = NegatedFloatCompare(opcode);
- if (negated_op != ast::BinaryOp::kNone) {
- auto arg0 = MakeOperand(inst, 0);
- auto arg1 = MakeOperand(inst, 1);
- auto* binary_expr = create<ast::BinaryExpression>(Source{}, negated_op,
- arg0.expr, arg1.expr);
- auto* negated_expr = create<ast::UnaryOpExpression>(
- Source{}, ast::UnaryOp::kNot, binary_expr);
- return {ast_type, negated_expr};
- }
-
- if (opcode == SpvOpExtInst) {
- if (parser_impl_.IsIgnoredExtendedInstruction(inst)) {
- // Ignore it but don't error out.
- return {};
- }
- if (!parser_impl_.IsGlslExtendedInstruction(inst)) {
- Fail() << "unhandled extended instruction import with ID "
- << inst.GetSingleWordInOperand(0);
- return {};
- }
- return EmitGlslStd450ExtInst(inst);
- }
-
- if (opcode == SpvOpCompositeConstruct) {
- ast::ExpressionList operands;
- for (uint32_t iarg = 0; iarg < inst.NumInOperands(); ++iarg) {
- operands.emplace_back(MakeOperand(inst, iarg).expr);
- }
- return {ast_type, builder_.Construct(Source{}, ast_type->Build(builder_),
- std::move(operands))};
- }
-
- if (opcode == SpvOpCompositeExtract) {
- return MakeCompositeExtract(inst);
- }
-
- if (opcode == SpvOpVectorShuffle) {
- return MakeVectorShuffle(inst);
- }
-
- if (opcode == SpvOpVectorExtractDynamic) {
- return {ast_type, create<ast::IndexAccessorExpression>(
- Source{}, MakeOperand(inst, 0).expr,
- MakeOperand(inst, 1).expr)};
- }
-
- if (opcode == SpvOpConvertSToF || opcode == SpvOpConvertUToF ||
- opcode == SpvOpConvertFToS || opcode == SpvOpConvertFToU) {
- return MakeNumericConversion(inst);
- }
-
- if (opcode == SpvOpUndef) {
- // Replace undef with the null value.
- return parser_impl_.MakeNullExpression(ast_type);
- }
-
- if (opcode == SpvOpSelect) {
- return MakeSimpleSelect(inst);
- }
-
- if (opcode == SpvOpArrayLength) {
- return MakeArrayLength(inst);
- }
-
- // builtin readonly function
- // glsl.std.450 readonly function
-
- // Instructions:
- // OpSatConvertSToU // Only in Kernel (OpenCL), not in WebGPU
- // OpSatConvertUToS // Only in Kernel (OpenCL), not in WebGPU
- // OpUConvert // Only needed when multiple widths supported
- // OpSConvert // Only needed when multiple widths supported
- // OpFConvert // Only needed when multiple widths supported
- // OpConvertPtrToU // Not in WebGPU
- // OpConvertUToPtr // Not in WebGPU
- // OpPtrCastToGeneric // Not in Vulkan
- // OpGenericCastToPtr // Not in Vulkan
- // OpGenericCastToPtrExplicit // Not in Vulkan
-
- return {};
-}
-TypedExpression FunctionEmitter::EmitGlslStd450ExtInst(
- const spvtools::opt::Instruction& inst) {
- const auto ext_opcode = inst.GetSingleWordInOperand(1);
-
- if (ext_opcode == GLSLstd450Ldexp) {
- // WGSL requires the second argument to be signed.
- // Use a type constructor to convert it, which is the same as a bitcast.
- // If the value would go from very large positive to negative, then the
- // original result would have been infinity. And since WGSL
- // implementations may assume that infinities are not present, then we
- // don't have to worry about that case.
- auto e1 = MakeOperand(inst, 2);
- auto e2 = ToSignedIfUnsigned(MakeOperand(inst, 3));
-
- return {e1.type, builder_.Call(Source{}, "ldexp",
- ast::ExpressionList{e1.expr, e2.expr})};
- }
-
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
-
- if (result_type->IsScalar()) {
- // Some GLSLstd450 builtins have scalar forms not supported by WGSL.
- // Emulate them.
- switch (ext_opcode) {
- case GLSLstd450Normalize:
- // WGSL does not have scalar form of the normalize builtin.
- // The answer would be 1 anyway, so return that directly.
- return {ty_.F32(), builder_.Expr(1.0f)};
-
- case GLSLstd450FaceForward: {
- // If dot(Nref, Incident) < 0, the result is Normal, otherwise -Normal.
- // Also: select(-normal,normal, Incident*Nref < 0)
- // (The dot product of scalars is their product.)
- // Use a multiply instead of comparing floating point signs. It should
- // be among the fastest operations on a GPU.
- auto normal = MakeOperand(inst, 2);
- auto incident = MakeOperand(inst, 3);
- auto nref = MakeOperand(inst, 4);
- TINT_ASSERT(Reader, normal.type->Is<F32>());
- TINT_ASSERT(Reader, incident.type->Is<F32>());
- TINT_ASSERT(Reader, nref.type->Is<F32>());
- return {ty_.F32(),
- builder_.Call(
- Source{}, "select",
- ast::ExpressionList{
- create<ast::UnaryOpExpression>(
- Source{}, ast::UnaryOp::kNegation, normal.expr),
- normal.expr,
- create<ast::BinaryExpression>(
- Source{}, ast::BinaryOp::kLessThan,
- builder_.Mul({}, incident.expr, nref.expr),
- builder_.Expr(0.0f))})};
- }
-
- case GLSLstd450Reflect: {
- // Compute Incident - 2 * Normal * Normal * Incident
- auto incident = MakeOperand(inst, 2);
- auto normal = MakeOperand(inst, 3);
- TINT_ASSERT(Reader, incident.type->Is<F32>());
- TINT_ASSERT(Reader, normal.type->Is<F32>());
- return {
- ty_.F32(),
- builder_.Sub(
- incident.expr,
- builder_.Mul(2.0f, builder_.Mul(normal.expr,
- builder_.Mul(normal.expr,
- incident.expr))))};
- }
-
- case GLSLstd450Refract: {
- // It's a complicated expression. Compute it in two dimensions, but
- // with a 0-valued y component in both the incident and normal vectors,
- // then take the x component of that result.
- auto incident = MakeOperand(inst, 2);
- auto normal = MakeOperand(inst, 3);
- auto eta = MakeOperand(inst, 4);
- TINT_ASSERT(Reader, incident.type->Is<F32>());
- TINT_ASSERT(Reader, normal.type->Is<F32>());
- TINT_ASSERT(Reader, eta.type->Is<F32>());
- if (!success()) {
- return {};
- }
- const Type* f32 = eta.type;
- return {f32,
- builder_.MemberAccessor(
- builder_.Call(
- Source{}, "refract",
- ast::ExpressionList{
- builder_.vec2<float>(incident.expr, 0.0f),
- builder_.vec2<float>(normal.expr, 0.0f), eta.expr}),
- "x")};
- }
- default:
- break;
- }
- }
-
- const auto name = GetGlslStd450FuncName(ext_opcode);
- if (name.empty()) {
- Fail() << "unhandled GLSL.std.450 instruction " << ext_opcode;
+ if (opcode == SpvOpConvertSToF || opcode == SpvOpConvertUToF || opcode == SpvOpConvertFToS ||
+ opcode == SpvOpConvertFToU) {
+ return MakeNumericConversion(inst);
+ }
+
+ if (opcode == SpvOpUndef) {
+ // Replace undef with the null value.
+ return parser_impl_.MakeNullExpression(ast_type);
+ }
+
+ if (opcode == SpvOpSelect) {
+ return MakeSimpleSelect(inst);
+ }
+
+ if (opcode == SpvOpArrayLength) {
+ return MakeArrayLength(inst);
+ }
+
+ // builtin readonly function
+ // glsl.std.450 readonly function
+
+ // Instructions:
+ // OpSatConvertSToU // Only in Kernel (OpenCL), not in WebGPU
+ // OpSatConvertUToS // Only in Kernel (OpenCL), not in WebGPU
+ // OpUConvert // Only needed when multiple widths supported
+ // OpSConvert // Only needed when multiple widths supported
+ // OpFConvert // Only needed when multiple widths supported
+ // OpConvertPtrToU // Not in WebGPU
+ // OpConvertUToPtr // Not in WebGPU
+ // OpPtrCastToGeneric // Not in Vulkan
+ // OpGenericCastToPtr // Not in Vulkan
+ // OpGenericCastToPtrExplicit // Not in Vulkan
+
return {};
- }
-
- auto* func = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
- ast::ExpressionList operands;
- const Type* first_operand_type = nullptr;
- // All parameters to GLSL.std.450 extended instructions are IDs.
- for (uint32_t iarg = 2; iarg < inst.NumInOperands(); ++iarg) {
- TypedExpression operand = MakeOperand(inst, iarg);
- if (first_operand_type == nullptr) {
- first_operand_type = operand.type;
- }
- operands.emplace_back(operand.expr);
- }
- auto* call = create<ast::CallExpression>(Source{}, func, std::move(operands));
- TypedExpression call_expr{result_type, call};
- return parser_impl_.RectifyForcedResultType(call_expr, inst,
- first_operand_type);
+}
+
+TypedExpression FunctionEmitter::EmitGlslStd450ExtInst(const spvtools::opt::Instruction& inst) {
+ const auto ext_opcode = inst.GetSingleWordInOperand(1);
+
+ if (ext_opcode == GLSLstd450Ldexp) {
+ // WGSL requires the second argument to be signed.
+ // Use a type constructor to convert it, which is the same as a bitcast.
+ // If the value would go from very large positive to negative, then the
+ // original result would have been infinity. And since WGSL
+ // implementations may assume that infinities are not present, then we
+ // don't have to worry about that case.
+ auto e1 = MakeOperand(inst, 2);
+ auto e2 = ToSignedIfUnsigned(MakeOperand(inst, 3));
+
+ return {e1.type, builder_.Call(Source{}, "ldexp", ast::ExpressionList{e1.expr, e2.expr})};
+ }
+
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+
+ if (result_type->IsScalar()) {
+ // Some GLSLstd450 builtins have scalar forms not supported by WGSL.
+ // Emulate them.
+ switch (ext_opcode) {
+ case GLSLstd450Normalize:
+ // WGSL does not have scalar form of the normalize builtin.
+ // The answer would be 1 anyway, so return that directly.
+ return {ty_.F32(), builder_.Expr(1_f)};
+
+ case GLSLstd450FaceForward: {
+ // If dot(Nref, Incident) < 0, the result is Normal, otherwise -Normal.
+ // Also: select(-normal,normal, Incident*Nref < 0)
+ // (The dot product of scalars is their product.)
+ // Use a multiply instead of comparing floating point signs. It should
+ // be among the fastest operations on a GPU.
+ auto normal = MakeOperand(inst, 2);
+ auto incident = MakeOperand(inst, 3);
+ auto nref = MakeOperand(inst, 4);
+ TINT_ASSERT(Reader, normal.type->Is<F32>());
+ TINT_ASSERT(Reader, incident.type->Is<F32>());
+ TINT_ASSERT(Reader, nref.type->Is<F32>());
+ return {ty_.F32(),
+ builder_.Call(
+ Source{}, "select",
+ ast::ExpressionList{create<ast::UnaryOpExpression>(
+ Source{}, ast::UnaryOp::kNegation, normal.expr),
+ normal.expr,
+ create<ast::BinaryExpression>(
+ Source{}, ast::BinaryOp::kLessThan,
+ builder_.Mul({}, incident.expr, nref.expr),
+ builder_.Expr(0_f))})};
+ }
+
+ case GLSLstd450Reflect: {
+ // Compute Incident - 2 * Normal * Normal * Incident
+ auto incident = MakeOperand(inst, 2);
+ auto normal = MakeOperand(inst, 3);
+ TINT_ASSERT(Reader, incident.type->Is<F32>());
+ TINT_ASSERT(Reader, normal.type->Is<F32>());
+ return {
+ ty_.F32(),
+ builder_.Sub(
+ incident.expr,
+ builder_.Mul(2_f, builder_.Mul(normal.expr,
+ builder_.Mul(normal.expr, incident.expr))))};
+ }
+
+ case GLSLstd450Refract: {
+ // It's a complicated expression. Compute it in two dimensions, but
+ // with a 0-valued y component in both the incident and normal vectors,
+ // then take the x component of that result.
+ auto incident = MakeOperand(inst, 2);
+ auto normal = MakeOperand(inst, 3);
+ auto eta = MakeOperand(inst, 4);
+ TINT_ASSERT(Reader, incident.type->Is<F32>());
+ TINT_ASSERT(Reader, normal.type->Is<F32>());
+ TINT_ASSERT(Reader, eta.type->Is<F32>());
+ if (!success()) {
+ return {};
+ }
+ const Type* f32 = eta.type;
+ return {f32, builder_.MemberAccessor(
+ builder_.Call(
+ Source{}, "refract",
+ ast::ExpressionList{
+ builder_.vec2<tint::f32>(incident.expr, 0_f),
+ builder_.vec2<tint::f32>(normal.expr, 0_f), eta.expr}),
+ "x")};
+ }
+ default:
+ break;
+ }
+ }
+
+ const auto name = GetGlslStd450FuncName(ext_opcode);
+ if (name.empty()) {
+ Fail() << "unhandled GLSL.std.450 instruction " << ext_opcode;
+ return {};
+ }
+
+ auto* func = create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+ ast::ExpressionList operands;
+ const Type* first_operand_type = nullptr;
+ // All parameters to GLSL.std.450 extended instructions are IDs.
+ for (uint32_t iarg = 2; iarg < inst.NumInOperands(); ++iarg) {
+ TypedExpression operand = MakeOperand(inst, iarg);
+ if (first_operand_type == nullptr) {
+ first_operand_type = operand.type;
+ }
+ operands.emplace_back(operand.expr);
+ }
+ auto* call = create<ast::CallExpression>(Source{}, func, std::move(operands));
+ TypedExpression call_expr{result_type, call};
+ return parser_impl_.RectifyForcedResultType(call_expr, inst, first_operand_type);
}
ast::IdentifierExpression* FunctionEmitter::Swizzle(uint32_t i) {
- if (i >= kMaxVectorLen) {
- Fail() << "vector component index is larger than " << kMaxVectorLen - 1
- << ": " << i;
- return nullptr;
- }
- const char* names[] = {"x", "y", "z", "w"};
- return create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(names[i & 3]));
+ if (i >= kMaxVectorLen) {
+ Fail() << "vector component index is larger than " << kMaxVectorLen - 1 << ": " << i;
+ return nullptr;
+ }
+ const char* names[] = {"x", "y", "z", "w"};
+ return create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(names[i & 3]));
}
ast::IdentifierExpression* FunctionEmitter::PrefixSwizzle(uint32_t n) {
- switch (n) {
- case 1:
- return create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("x"));
- case 2:
- return create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("xy"));
- case 3:
- return create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("xyz"));
- default:
- break;
- }
- Fail() << "invalid swizzle prefix count: " << n;
- return nullptr;
+ switch (n) {
+ case 1:
+ return create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register("x"));
+ case 2:
+ return create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register("xy"));
+ case 3:
+ return create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register("xyz"));
+ default:
+ break;
+ }
+ Fail() << "invalid swizzle prefix count: " << n;
+ return nullptr;
}
-TypedExpression FunctionEmitter::MakeFMod(
- const spvtools::opt::Instruction& inst) {
- auto x = MakeOperand(inst, 0);
- auto y = MakeOperand(inst, 1);
- if (!x || !y) {
- return {};
- }
- // Emulated with: x - y * floor(x / y)
- auto* div = builder_.Div(x.expr, y.expr);
- auto* floor = builder_.Call("floor", div);
- auto* y_floor = builder_.Mul(y.expr, floor);
- auto* res = builder_.Sub(x.expr, y_floor);
- return {x.type, res};
+TypedExpression FunctionEmitter::MakeFMod(const spvtools::opt::Instruction& inst) {
+ auto x = MakeOperand(inst, 0);
+ auto y = MakeOperand(inst, 1);
+ if (!x || !y) {
+ return {};
+ }
+ // Emulated with: x - y * floor(x / y)
+ auto* div = builder_.Div(x.expr, y.expr);
+ auto* floor = builder_.Call("floor", div);
+ auto* y_floor = builder_.Mul(y.expr, floor);
+ auto* res = builder_.Sub(x.expr, y_floor);
+ return {x.type, res};
}
-TypedExpression FunctionEmitter::MakeAccessChain(
- const spvtools::opt::Instruction& inst) {
- if (inst.NumInOperands() < 1) {
- // Binary parsing will fail on this anyway.
- Fail() << "invalid access chain: has no input operands";
- return {};
- }
-
- const auto base_id = inst.GetSingleWordInOperand(0);
- const auto base_skip = GetSkipReason(base_id);
- if (base_skip != SkipReason::kDontSkip) {
- // This can occur for AccessChain with no indices.
- GetDefInfo(inst.result_id())->skip = base_skip;
- GetDefInfo(inst.result_id())->sink_pointer_source_expr =
- GetDefInfo(base_id)->sink_pointer_source_expr;
- return {};
- }
-
- auto ptr_ty_id = def_use_mgr_->GetDef(base_id)->type_id();
- uint32_t first_index = 1;
- const auto num_in_operands = inst.NumInOperands();
-
- bool sink_pointer = false;
- TypedExpression current_expr;
-
- // If the variable was originally gl_PerVertex, then in the AST we
- // have instead emitted a gl_Position variable.
- // If computing the pointer to the Position builtin, then emit the
- // pointer to the generated gl_Position variable.
- // If computing the pointer to the PointSize builtin, then mark the
- // result as skippable due to being the point-size pointer.
- // If computing the pointer to the ClipDistance or CullDistance builtins,
- // then error out.
- {
- const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
- if (base_id == builtin_position_info.per_vertex_var_id) {
- // We only support the Position member.
- const auto* member_index_inst =
- def_use_mgr_->GetDef(inst.GetSingleWordInOperand(first_index));
- if (member_index_inst == nullptr) {
- Fail()
- << "first index of access chain does not reference an instruction: "
- << inst.PrettyPrint();
+TypedExpression FunctionEmitter::MakeAccessChain(const spvtools::opt::Instruction& inst) {
+ if (inst.NumInOperands() < 1) {
+ // Binary parsing will fail on this anyway.
+ Fail() << "invalid access chain: has no input operands";
return {};
- }
- const auto* member_index_const =
- constant_mgr_->GetConstantFromInst(member_index_inst);
- if (member_index_const == nullptr) {
- Fail() << "first index of access chain into per-vertex structure is "
- "not a constant: "
- << inst.PrettyPrint();
+ }
+
+ const auto base_id = inst.GetSingleWordInOperand(0);
+ const auto base_skip = GetSkipReason(base_id);
+ if (base_skip != SkipReason::kDontSkip) {
+ // This can occur for AccessChain with no indices.
+ GetDefInfo(inst.result_id())->skip = base_skip;
+ GetDefInfo(inst.result_id())->sink_pointer_source_expr =
+ GetDefInfo(base_id)->sink_pointer_source_expr;
return {};
- }
- const auto* member_index_const_int = member_index_const->AsIntConstant();
- if (member_index_const_int == nullptr) {
- Fail() << "first index of access chain into per-vertex structure is "
- "not a constant integer: "
- << inst.PrettyPrint();
+ }
+
+ auto ptr_ty_id = def_use_mgr_->GetDef(base_id)->type_id();
+ uint32_t first_index = 1;
+ const auto num_in_operands = inst.NumInOperands();
+
+ bool sink_pointer = false;
+ TypedExpression current_expr;
+
+ // If the variable was originally gl_PerVertex, then in the AST we
+ // have instead emitted a gl_Position variable.
+ // If computing the pointer to the Position builtin, then emit the
+ // pointer to the generated gl_Position variable.
+ // If computing the pointer to the PointSize builtin, then mark the
+ // result as skippable due to being the point-size pointer.
+ // If computing the pointer to the ClipDistance or CullDistance builtins,
+ // then error out.
+ {
+ const auto& builtin_position_info = parser_impl_.GetBuiltInPositionInfo();
+ if (base_id == builtin_position_info.per_vertex_var_id) {
+ // We only support the Position member.
+ const auto* member_index_inst =
+ def_use_mgr_->GetDef(inst.GetSingleWordInOperand(first_index));
+ if (member_index_inst == nullptr) {
+ Fail() << "first index of access chain does not reference an instruction: "
+ << inst.PrettyPrint();
+ return {};
+ }
+ const auto* member_index_const = constant_mgr_->GetConstantFromInst(member_index_inst);
+ if (member_index_const == nullptr) {
+ Fail() << "first index of access chain into per-vertex structure is "
+ "not a constant: "
+ << inst.PrettyPrint();
+ return {};
+ }
+ const auto* member_index_const_int = member_index_const->AsIntConstant();
+ if (member_index_const_int == nullptr) {
+ Fail() << "first index of access chain into per-vertex structure is "
+ "not a constant integer: "
+ << inst.PrettyPrint();
+ return {};
+ }
+ const auto member_index_value = member_index_const_int->GetZeroExtendedValue();
+ if (member_index_value != builtin_position_info.position_member_index) {
+ if (member_index_value == builtin_position_info.pointsize_member_index) {
+ if (auto* def_info = GetDefInfo(inst.result_id())) {
+ def_info->skip = SkipReason::kPointSizeBuiltinPointer;
+ return {};
+ }
+ } else {
+ // TODO(dneto): Handle ClipDistance and CullDistance
+ Fail() << "accessing per-vertex member " << member_index_value
+ << " is not supported. Only Position is supported, and "
+ "PointSize is ignored";
+ return {};
+ }
+ }
+
+ // Skip past the member index that gets us to Position.
+ first_index = first_index + 1;
+ // Replace the gl_PerVertex reference with the gl_Position reference
+ ptr_ty_id = builtin_position_info.position_member_pointer_type_id;
+
+ auto name = namer_.Name(base_id);
+ current_expr.expr =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+ current_expr.type = parser_impl_.ConvertType(ptr_ty_id, PtrAs::Ref);
+ }
+ }
+
+ // A SPIR-V access chain is a single instruction with multiple indices
+ // walking down into composites. The Tint AST represents this as
+ // ever-deeper nested indexing expressions. Start off with an expression
+ // for the base, and then bury that inside nested indexing expressions.
+ if (!current_expr) {
+ current_expr = InferFunctionStorageClass(MakeOperand(inst, 0));
+ if (current_expr.type->Is<Pointer>()) {
+ current_expr = Dereference(current_expr);
+ }
+ }
+ const auto constants = constant_mgr_->GetOperandConstants(&inst);
+
+ const auto* ptr_type_inst = def_use_mgr_->GetDef(ptr_ty_id);
+ if (!ptr_type_inst || (ptr_type_inst->opcode() != SpvOpTypePointer)) {
+ Fail() << "Access chain %" << inst.result_id() << " base pointer is not of pointer type";
return {};
- }
- const auto member_index_value =
- member_index_const_int->GetZeroExtendedValue();
- if (member_index_value != builtin_position_info.position_member_index) {
- if (member_index_value ==
- builtin_position_info.pointsize_member_index) {
- if (auto* def_info = GetDefInfo(inst.result_id())) {
- def_info->skip = SkipReason::kPointSizeBuiltinPointer;
- return {};
- }
- } else {
- // TODO(dneto): Handle ClipDistance and CullDistance
- Fail() << "accessing per-vertex member " << member_index_value
- << " is not supported. Only Position is supported, and "
- "PointSize is ignored";
- return {};
- }
- }
-
- // Skip past the member index that gets us to Position.
- first_index = first_index + 1;
- // Replace the gl_PerVertex reference with the gl_Position reference
- ptr_ty_id = builtin_position_info.position_member_pointer_type_id;
-
- auto name = namer_.Name(base_id);
- current_expr.expr = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
- current_expr.type = parser_impl_.ConvertType(ptr_ty_id, PtrAs::Ref);
- }
- }
-
- // A SPIR-V access chain is a single instruction with multiple indices
- // walking down into composites. The Tint AST represents this as
- // ever-deeper nested indexing expressions. Start off with an expression
- // for the base, and then bury that inside nested indexing expressions.
- if (!current_expr) {
- current_expr = InferFunctionStorageClass(MakeOperand(inst, 0));
- if (current_expr.type->Is<Pointer>()) {
- current_expr = Dereference(current_expr);
- }
- }
- const auto constants = constant_mgr_->GetOperandConstants(&inst);
-
- const auto* ptr_type_inst = def_use_mgr_->GetDef(ptr_ty_id);
- if (!ptr_type_inst || (ptr_type_inst->opcode() != SpvOpTypePointer)) {
- Fail() << "Access chain %" << inst.result_id()
- << " base pointer is not of pointer type";
- return {};
- }
- SpvStorageClass storage_class =
- static_cast<SpvStorageClass>(ptr_type_inst->GetSingleWordInOperand(0));
- uint32_t pointee_type_id = ptr_type_inst->GetSingleWordInOperand(1);
-
- // Build up a nested expression for the access chain by walking down the type
- // hierarchy, maintaining |pointee_type_id| as the SPIR-V ID of the type of
- // the object pointed to after processing the previous indices.
- for (uint32_t index = first_index; index < num_in_operands; ++index) {
- const auto* index_const =
- constants[index] ? constants[index]->AsIntConstant() : nullptr;
- const int64_t index_const_val =
- index_const ? index_const->GetSignExtendedValue() : 0;
- const ast::Expression* next_expr = nullptr;
-
- const auto* pointee_type_inst = def_use_mgr_->GetDef(pointee_type_id);
- if (!pointee_type_inst) {
- Fail() << "pointee type %" << pointee_type_id
- << " is invalid after following " << (index - first_index)
- << " indices: " << inst.PrettyPrint();
- return {};
- }
- switch (pointee_type_inst->opcode()) {
- case SpvOpTypeVector:
- if (index_const) {
- // Try generating a MemberAccessor expression
- const auto num_elems = pointee_type_inst->GetSingleWordInOperand(1);
- if (index_const_val < 0 || num_elems <= index_const_val) {
- Fail() << "Access chain %" << inst.result_id() << " index %"
- << inst.GetSingleWordInOperand(index) << " value "
- << index_const_val << " is out of bounds for vector of "
- << num_elems << " elements";
+ }
+ SpvStorageClass storage_class =
+ static_cast<SpvStorageClass>(ptr_type_inst->GetSingleWordInOperand(0));
+ uint32_t pointee_type_id = ptr_type_inst->GetSingleWordInOperand(1);
+
+ // Build up a nested expression for the access chain by walking down the type
+ // hierarchy, maintaining |pointee_type_id| as the SPIR-V ID of the type of
+ // the object pointed to after processing the previous indices.
+ for (uint32_t index = first_index; index < num_in_operands; ++index) {
+ const auto* index_const = constants[index] ? constants[index]->AsIntConstant() : nullptr;
+ const int64_t index_const_val = index_const ? index_const->GetSignExtendedValue() : 0;
+ const ast::Expression* next_expr = nullptr;
+
+ const auto* pointee_type_inst = def_use_mgr_->GetDef(pointee_type_id);
+ if (!pointee_type_inst) {
+ Fail() << "pointee type %" << pointee_type_id << " is invalid after following "
+ << (index - first_index) << " indices: " << inst.PrettyPrint();
return {};
- }
- if (uint64_t(index_const_val) >= kMaxVectorLen) {
- Fail() << "internal error: swizzle index " << index_const_val
- << " is too big. Max handled index is " << kMaxVectorLen - 1;
- }
- next_expr = create<ast::MemberAccessorExpression>(
- Source{}, current_expr.expr, Swizzle(uint32_t(index_const_val)));
- } else {
- // Non-constant index. Use array syntax
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, MakeOperand(inst, index).expr);
- }
- // All vector components are the same type.
- pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
- // Sink pointers to vector components.
- sink_pointer = true;
- break;
- case SpvOpTypeMatrix:
- // Use array syntax.
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, MakeOperand(inst, index).expr);
- // All matrix components are the same type.
- pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
- break;
- case SpvOpTypeArray:
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, MakeOperand(inst, index).expr);
- pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
- break;
- case SpvOpTypeRuntimeArray:
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, MakeOperand(inst, index).expr);
- pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
- break;
- case SpvOpTypeStruct: {
- if (!index_const) {
- Fail() << "Access chain %" << inst.result_id() << " index %"
- << inst.GetSingleWordInOperand(index)
- << " is a non-constant index into a structure %"
- << pointee_type_id;
- return {};
- }
- const auto num_members = pointee_type_inst->NumInOperands();
- if ((index_const_val < 0) || num_members <= uint64_t(index_const_val)) {
- Fail() << "Access chain %" << inst.result_id() << " index value "
- << index_const_val << " is out of bounds for structure %"
- << pointee_type_id << " having " << num_members << " members";
- return {};
- }
- auto name =
- namer_.GetMemberName(pointee_type_id, uint32_t(index_const_val));
- auto* member_access = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
-
- next_expr = create<ast::MemberAccessorExpression>(
- Source{}, current_expr.expr, member_access);
- pointee_type_id = pointee_type_inst->GetSingleWordInOperand(
- static_cast<uint32_t>(index_const_val));
- break;
- }
- default:
- Fail() << "Access chain with unknown or invalid pointee type %"
- << pointee_type_id << ": " << pointee_type_inst->PrettyPrint();
- return {};
+ }
+ switch (pointee_type_inst->opcode()) {
+ case SpvOpTypeVector:
+ if (index_const) {
+ // Try generating a MemberAccessor expression
+ const auto num_elems = pointee_type_inst->GetSingleWordInOperand(1);
+ if (index_const_val < 0 || num_elems <= index_const_val) {
+ Fail() << "Access chain %" << inst.result_id() << " index %"
+ << inst.GetSingleWordInOperand(index) << " value " << index_const_val
+ << " is out of bounds for vector of " << num_elems << " elements";
+ return {};
+ }
+ if (uint64_t(index_const_val) >= kMaxVectorLen) {
+ Fail() << "internal error: swizzle index " << index_const_val
+ << " is too big. Max handled index is " << kMaxVectorLen - 1;
+ }
+ next_expr = create<ast::MemberAccessorExpression>(
+ Source{}, current_expr.expr, Swizzle(uint32_t(index_const_val)));
+ } else {
+ // Non-constant index. Use array syntax
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ MakeOperand(inst, index).expr);
+ }
+ // All vector components are the same type.
+ pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
+ // Sink pointers to vector components.
+ sink_pointer = true;
+ break;
+ case SpvOpTypeMatrix:
+ // Use array syntax.
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ MakeOperand(inst, index).expr);
+ // All matrix components are the same type.
+ pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpTypeArray:
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ MakeOperand(inst, index).expr);
+ pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpTypeRuntimeArray:
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ MakeOperand(inst, index).expr);
+ pointee_type_id = pointee_type_inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpTypeStruct: {
+ if (!index_const) {
+ Fail() << "Access chain %" << inst.result_id() << " index %"
+ << inst.GetSingleWordInOperand(index)
+ << " is a non-constant index into a structure %" << pointee_type_id;
+ return {};
+ }
+ const auto num_members = pointee_type_inst->NumInOperands();
+ if ((index_const_val < 0) || num_members <= uint64_t(index_const_val)) {
+ Fail() << "Access chain %" << inst.result_id() << " index value "
+ << index_const_val << " is out of bounds for structure %"
+ << pointee_type_id << " having " << num_members << " members";
+ return {};
+ }
+ auto name = namer_.GetMemberName(pointee_type_id, uint32_t(index_const_val));
+ auto* member_access =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+
+ next_expr = create<ast::MemberAccessorExpression>(Source{}, current_expr.expr,
+ member_access);
+ pointee_type_id = pointee_type_inst->GetSingleWordInOperand(
+ static_cast<uint32_t>(index_const_val));
+ break;
+ }
+ default:
+ Fail() << "Access chain with unknown or invalid pointee type %" << pointee_type_id
+ << ": " << pointee_type_inst->PrettyPrint();
+ return {};
+ }
+ const auto pointer_type_id = type_mgr_->FindPointerToType(pointee_type_id, storage_class);
+ auto* type = parser_impl_.ConvertType(pointer_type_id, PtrAs::Ref);
+ TINT_ASSERT(Reader, type && type->Is<Reference>());
+ current_expr = TypedExpression{type, next_expr};
}
- const auto pointer_type_id =
- type_mgr_->FindPointerToType(pointee_type_id, storage_class);
- auto* type = parser_impl_.ConvertType(pointer_type_id, PtrAs::Ref);
- TINT_ASSERT(Reader, type && type->Is<Reference>());
- current_expr = TypedExpression{type, next_expr};
- }
- if (sink_pointer) {
- // Capture the reference so that we can sink it into the point of use.
- GetDefInfo(inst.result_id())->skip = SkipReason::kSinkPointerIntoUse;
- GetDefInfo(inst.result_id())->sink_pointer_source_expr = current_expr;
- }
+ if (sink_pointer) {
+ // Capture the reference so that we can sink it into the point of use.
+ GetDefInfo(inst.result_id())->skip = SkipReason::kSinkPointerIntoUse;
+ GetDefInfo(inst.result_id())->sink_pointer_source_expr = current_expr;
+ }
- return current_expr;
+ return current_expr;
}
-TypedExpression FunctionEmitter::MakeCompositeExtract(
- const spvtools::opt::Instruction& inst) {
- // This is structurally similar to creating an access chain, but
- // the SPIR-V instruction has literal indices instead of IDs for indices.
+TypedExpression FunctionEmitter::MakeCompositeExtract(const spvtools::opt::Instruction& inst) {
+ // This is structurally similar to creating an access chain, but
+ // the SPIR-V instruction has literal indices instead of IDs for indices.
- auto composite_index = 0;
- auto first_index_position = 1;
- TypedExpression current_expr(MakeOperand(inst, composite_index));
- if (!current_expr) {
- return {};
- }
+ auto composite_index = 0;
+ auto first_index_position = 1;
+ TypedExpression current_expr(MakeOperand(inst, composite_index));
+ if (!current_expr) {
+ return {};
+ }
- const auto composite_id = inst.GetSingleWordInOperand(composite_index);
- auto current_type_id = def_use_mgr_->GetDef(composite_id)->type_id();
+ const auto composite_id = inst.GetSingleWordInOperand(composite_index);
+ auto current_type_id = def_use_mgr_->GetDef(composite_id)->type_id();
- return MakeCompositeValueDecomposition(inst, current_expr, current_type_id,
- first_index_position);
+ return MakeCompositeValueDecomposition(inst, current_expr, current_type_id,
+ first_index_position);
}
TypedExpression FunctionEmitter::MakeCompositeValueDecomposition(
@@ -4461,1681 +4292,1595 @@ TypedExpression FunctionEmitter::MakeCompositeValueDecomposition(
TypedExpression composite,
uint32_t composite_type_id,
int index_start) {
- // This is structurally similar to creating an access chain, but
- // the SPIR-V instruction has literal indices instead of IDs for indices.
-
- // A SPIR-V composite extract is a single instruction with multiple
- // literal indices walking down into composites.
- // A SPIR-V composite insert is similar but also tells you what component
- // to inject. This function is responsible for the the walking-into part
- // of composite-insert.
- //
- // The Tint AST represents this as ever-deeper nested indexing expressions.
- // Start off with an expression for the composite, and then bury that inside
- // nested indexing expressions.
-
- auto current_expr = composite;
- auto current_type_id = composite_type_id;
-
- auto make_index = [this](uint32_t literal) {
- return create<ast::UintLiteralExpression>(Source{}, literal);
- };
-
- // Build up a nested expression for the decomposition by walking down the type
- // hierarchy, maintaining |current_type_id| as the SPIR-V ID of the type of
- // the object pointed to after processing the previous indices.
- const auto num_in_operands = inst.NumInOperands();
- for (uint32_t index = index_start; index < num_in_operands; ++index) {
- const uint32_t index_val = inst.GetSingleWordInOperand(index);
-
- const auto* current_type_inst = def_use_mgr_->GetDef(current_type_id);
- if (!current_type_inst) {
- Fail() << "composite type %" << current_type_id
- << " is invalid after following " << (index - index_start)
- << " indices: " << inst.PrettyPrint();
- return {};
- }
- const char* operation_name = nullptr;
- switch (inst.opcode()) {
- case SpvOpCompositeExtract:
- operation_name = "OpCompositeExtract";
- break;
- case SpvOpCompositeInsert:
- operation_name = "OpCompositeInsert";
- break;
- default:
- Fail() << "internal error: unhandled " << inst.PrettyPrint();
- return {};
- }
- const ast::Expression* next_expr = nullptr;
- switch (current_type_inst->opcode()) {
- case SpvOpTypeVector: {
- // Try generating a MemberAccessor expression. That result in something
- // like "foo.z", which is more idiomatic than "foo[2]".
- const auto num_elems = current_type_inst->GetSingleWordInOperand(1);
- if (num_elems <= index_val) {
- Fail() << operation_name << " %" << inst.result_id()
- << " index value " << index_val
- << " is out of bounds for vector of " << num_elems
- << " elements";
- return {};
- }
- if (index_val >= kMaxVectorLen) {
- Fail() << "internal error: swizzle index " << index_val
- << " is too big. Max handled index is " << kMaxVectorLen - 1;
- return {};
- }
- next_expr = create<ast::MemberAccessorExpression>(
- Source{}, current_expr.expr, Swizzle(index_val));
- // All vector components are the same type.
- current_type_id = current_type_inst->GetSingleWordInOperand(0);
- break;
- }
- case SpvOpTypeMatrix: {
- // Check bounds
- const auto num_elems = current_type_inst->GetSingleWordInOperand(1);
- if (num_elems <= index_val) {
- Fail() << operation_name << " %" << inst.result_id()
- << " index value " << index_val
- << " is out of bounds for matrix of " << num_elems
- << " elements";
- return {};
- }
- if (index_val >= kMaxVectorLen) {
- Fail() << "internal error: swizzle index " << index_val
- << " is too big. Max handled index is " << kMaxVectorLen - 1;
- }
- // Use array syntax.
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, make_index(index_val));
- // All matrix components are the same type.
- current_type_id = current_type_inst->GetSingleWordInOperand(0);
- break;
- }
- case SpvOpTypeArray:
- // The array size could be a spec constant, and so it's not always
- // statically checkable. Instead, rely on a runtime index clamp
- // or runtime check to keep this safe.
- next_expr = create<ast::IndexAccessorExpression>(
- Source{}, current_expr.expr, make_index(index_val));
- current_type_id = current_type_inst->GetSingleWordInOperand(0);
- break;
- case SpvOpTypeRuntimeArray:
- Fail() << "can't do " << operation_name
- << " on a runtime array: " << inst.PrettyPrint();
- return {};
- case SpvOpTypeStruct: {
- const auto num_members = current_type_inst->NumInOperands();
- if (num_members <= index_val) {
- Fail() << operation_name << " %" << inst.result_id()
- << " index value " << index_val
- << " is out of bounds for structure %" << current_type_id
- << " having " << num_members << " members";
- return {};
- }
- auto name = namer_.GetMemberName(current_type_id, uint32_t(index_val));
- auto* member_access = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
-
- next_expr = create<ast::MemberAccessorExpression>(
- Source{}, current_expr.expr, member_access);
- current_type_id = current_type_inst->GetSingleWordInOperand(index_val);
- break;
- }
- default:
- Fail() << operation_name << " with bad type %" << current_type_id
- << ": " << current_type_inst->PrettyPrint();
- return {};
+ // This is structurally similar to creating an access chain, but
+ // the SPIR-V instruction has literal indices instead of IDs for indices.
+
+ // A SPIR-V composite extract is a single instruction with multiple
+ // literal indices walking down into composites.
+ // A SPIR-V composite insert is similar but also tells you what component
+ // to inject. This function is responsible for the the walking-into part
+ // of composite-insert.
+ //
+ // The Tint AST represents this as ever-deeper nested indexing expressions.
+ // Start off with an expression for the composite, and then bury that inside
+ // nested indexing expressions.
+
+ auto current_expr = composite;
+ auto current_type_id = composite_type_id;
+
+ auto make_index = [this](uint32_t literal) {
+ return create<ast::IntLiteralExpression>(Source{}, literal,
+ ast::IntLiteralExpression::Suffix::kU);
+ };
+
+ // Build up a nested expression for the decomposition by walking down the type
+ // hierarchy, maintaining |current_type_id| as the SPIR-V ID of the type of
+ // the object pointed to after processing the previous indices.
+ const auto num_in_operands = inst.NumInOperands();
+ for (uint32_t index = index_start; index < num_in_operands; ++index) {
+ const uint32_t index_val = inst.GetSingleWordInOperand(index);
+
+ const auto* current_type_inst = def_use_mgr_->GetDef(current_type_id);
+ if (!current_type_inst) {
+ Fail() << "composite type %" << current_type_id << " is invalid after following "
+ << (index - index_start) << " indices: " << inst.PrettyPrint();
+ return {};
+ }
+ const char* operation_name = nullptr;
+ switch (inst.opcode()) {
+ case SpvOpCompositeExtract:
+ operation_name = "OpCompositeExtract";
+ break;
+ case SpvOpCompositeInsert:
+ operation_name = "OpCompositeInsert";
+ break;
+ default:
+ Fail() << "internal error: unhandled " << inst.PrettyPrint();
+ return {};
+ }
+ const ast::Expression* next_expr = nullptr;
+ switch (current_type_inst->opcode()) {
+ case SpvOpTypeVector: {
+ // Try generating a MemberAccessor expression. That result in something
+ // like "foo.z", which is more idiomatic than "foo[2]".
+ const auto num_elems = current_type_inst->GetSingleWordInOperand(1);
+ if (num_elems <= index_val) {
+ Fail() << operation_name << " %" << inst.result_id() << " index value "
+ << index_val << " is out of bounds for vector of " << num_elems
+ << " elements";
+ return {};
+ }
+ if (index_val >= kMaxVectorLen) {
+ Fail() << "internal error: swizzle index " << index_val
+ << " is too big. Max handled index is " << kMaxVectorLen - 1;
+ return {};
+ }
+ next_expr = create<ast::MemberAccessorExpression>(Source{}, current_expr.expr,
+ Swizzle(index_val));
+ // All vector components are the same type.
+ current_type_id = current_type_inst->GetSingleWordInOperand(0);
+ break;
+ }
+ case SpvOpTypeMatrix: {
+ // Check bounds
+ const auto num_elems = current_type_inst->GetSingleWordInOperand(1);
+ if (num_elems <= index_val) {
+ Fail() << operation_name << " %" << inst.result_id() << " index value "
+ << index_val << " is out of bounds for matrix of " << num_elems
+ << " elements";
+ return {};
+ }
+ if (index_val >= kMaxVectorLen) {
+ Fail() << "internal error: swizzle index " << index_val
+ << " is too big. Max handled index is " << kMaxVectorLen - 1;
+ }
+ // Use array syntax.
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ make_index(index_val));
+ // All matrix components are the same type.
+ current_type_id = current_type_inst->GetSingleWordInOperand(0);
+ break;
+ }
+ case SpvOpTypeArray:
+ // The array size could be a spec constant, and so it's not always
+ // statically checkable. Instead, rely on a runtime index clamp
+ // or runtime check to keep this safe.
+ next_expr = create<ast::IndexAccessorExpression>(Source{}, current_expr.expr,
+ make_index(index_val));
+ current_type_id = current_type_inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpTypeRuntimeArray:
+ Fail() << "can't do " << operation_name
+ << " on a runtime array: " << inst.PrettyPrint();
+ return {};
+ case SpvOpTypeStruct: {
+ const auto num_members = current_type_inst->NumInOperands();
+ if (num_members <= index_val) {
+ Fail() << operation_name << " %" << inst.result_id() << " index value "
+ << index_val << " is out of bounds for structure %" << current_type_id
+ << " having " << num_members << " members";
+ return {};
+ }
+ auto name = namer_.GetMemberName(current_type_id, uint32_t(index_val));
+ auto* member_access =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+
+ next_expr = create<ast::MemberAccessorExpression>(Source{}, current_expr.expr,
+ member_access);
+ current_type_id = current_type_inst->GetSingleWordInOperand(index_val);
+ break;
+ }
+ default:
+ Fail() << operation_name << " with bad type %" << current_type_id << ": "
+ << current_type_inst->PrettyPrint();
+ return {};
+ }
+ current_expr = TypedExpression{parser_impl_.ConvertType(current_type_id), next_expr};
}
- current_expr =
- TypedExpression{parser_impl_.ConvertType(current_type_id), next_expr};
- }
- return current_expr;
+ return current_expr;
}
const ast::Expression* FunctionEmitter::MakeTrue(const Source& source) const {
- return create<ast::BoolLiteralExpression>(source, true);
+ return create<ast::BoolLiteralExpression>(source, true);
}
const ast::Expression* FunctionEmitter::MakeFalse(const Source& source) const {
- return create<ast::BoolLiteralExpression>(source, false);
+ return create<ast::BoolLiteralExpression>(source, false);
}
-TypedExpression FunctionEmitter::MakeVectorShuffle(
- const spvtools::opt::Instruction& inst) {
- const auto vec0_id = inst.GetSingleWordInOperand(0);
- const auto vec1_id = inst.GetSingleWordInOperand(1);
- const spvtools::opt::Instruction& vec0 = *(def_use_mgr_->GetDef(vec0_id));
- const spvtools::opt::Instruction& vec1 = *(def_use_mgr_->GetDef(vec1_id));
- const auto vec0_len =
- type_mgr_->GetType(vec0.type_id())->AsVector()->element_count();
- const auto vec1_len =
- type_mgr_->GetType(vec1.type_id())->AsVector()->element_count();
-
- // Idiomatic vector accessors.
-
- // Generate an ast::TypeConstructor expression.
- // Assume the literal indices are valid, and there is a valid number of them.
- auto source = GetSourceForInst(inst);
- const Vector* result_type =
- As<Vector>(parser_impl_.ConvertType(inst.type_id()));
- ast::ExpressionList values;
- for (uint32_t i = 2; i < inst.NumInOperands(); ++i) {
- const auto index = inst.GetSingleWordInOperand(i);
- if (index < vec0_len) {
- auto expr = MakeExpression(vec0_id);
- if (!expr) {
- return {};
- }
- values.emplace_back(create<ast::MemberAccessorExpression>(
- source, expr.expr, Swizzle(index)));
- } else if (index < vec0_len + vec1_len) {
- const auto sub_index = index - vec0_len;
- TINT_ASSERT(Reader, sub_index < kMaxVectorLen);
- auto expr = MakeExpression(vec1_id);
- if (!expr) {
- return {};
- }
- values.emplace_back(create<ast::MemberAccessorExpression>(
- source, expr.expr, Swizzle(sub_index)));
- } else if (index == 0xFFFFFFFF) {
- // By rule, this maps to OpUndef. Instead, make it zero.
- values.emplace_back(parser_impl_.MakeNullValue(result_type->type));
- } else {
- Fail() << "invalid vectorshuffle ID %" << inst.result_id()
- << ": index too large: " << index;
- return {};
+TypedExpression FunctionEmitter::MakeVectorShuffle(const spvtools::opt::Instruction& inst) {
+ const auto vec0_id = inst.GetSingleWordInOperand(0);
+ const auto vec1_id = inst.GetSingleWordInOperand(1);
+ const spvtools::opt::Instruction& vec0 = *(def_use_mgr_->GetDef(vec0_id));
+ const spvtools::opt::Instruction& vec1 = *(def_use_mgr_->GetDef(vec1_id));
+ const auto vec0_len = type_mgr_->GetType(vec0.type_id())->AsVector()->element_count();
+ const auto vec1_len = type_mgr_->GetType(vec1.type_id())->AsVector()->element_count();
+
+ // Idiomatic vector accessors.
+
+ // Generate an ast::TypeConstructor expression.
+ // Assume the literal indices are valid, and there is a valid number of them.
+ auto source = GetSourceForInst(inst);
+ const Vector* result_type = As<Vector>(parser_impl_.ConvertType(inst.type_id()));
+ ast::ExpressionList values;
+ for (uint32_t i = 2; i < inst.NumInOperands(); ++i) {
+ const auto index = inst.GetSingleWordInOperand(i);
+ if (index < vec0_len) {
+ auto expr = MakeExpression(vec0_id);
+ if (!expr) {
+ return {};
+ }
+ values.emplace_back(
+ create<ast::MemberAccessorExpression>(source, expr.expr, Swizzle(index)));
+ } else if (index < vec0_len + vec1_len) {
+ const auto sub_index = index - vec0_len;
+ TINT_ASSERT(Reader, sub_index < kMaxVectorLen);
+ auto expr = MakeExpression(vec1_id);
+ if (!expr) {
+ return {};
+ }
+ values.emplace_back(
+ create<ast::MemberAccessorExpression>(source, expr.expr, Swizzle(sub_index)));
+ } else if (index == 0xFFFFFFFF) {
+ // By rule, this maps to OpUndef. Instead, make it zero.
+ values.emplace_back(parser_impl_.MakeNullValue(result_type->type));
+ } else {
+ Fail() << "invalid vectorshuffle ID %" << inst.result_id()
+ << ": index too large: " << index;
+ return {};
+ }
}
- }
- return {result_type,
- builder_.Construct(source, result_type->Build(builder_), values)};
+ return {result_type, builder_.Construct(source, result_type->Build(builder_), values)};
}
bool FunctionEmitter::RegisterSpecialBuiltInVariables() {
- size_t index = def_info_.size();
- for (auto& special_var : parser_impl_.special_builtins()) {
- const auto id = special_var.first;
- const auto builtin = special_var.second;
- const auto* var = def_use_mgr_->GetDef(id);
- def_info_[id] = std::make_unique<DefInfo>(*var, 0, index);
- ++index;
- auto& def = def_info_[id];
- switch (builtin) {
- case SpvBuiltInPointSize:
- def->skip = SkipReason::kPointSizeBuiltinPointer;
- break;
- case SpvBuiltInSampleMask: {
- // Distinguish between input and output variable.
- const auto storage_class =
- static_cast<SpvStorageClass>(var->GetSingleWordInOperand(0));
- if (storage_class == SpvStorageClassInput) {
- sample_mask_in_id = id;
- def->skip = SkipReason::kSampleMaskInBuiltinPointer;
- } else {
- sample_mask_out_id = id;
- def->skip = SkipReason::kSampleMaskOutBuiltinPointer;
- }
- break;
- }
- case SpvBuiltInSampleId:
- case SpvBuiltInInstanceIndex:
- case SpvBuiltInVertexIndex:
- case SpvBuiltInLocalInvocationIndex:
- case SpvBuiltInLocalInvocationId:
- case SpvBuiltInGlobalInvocationId:
- case SpvBuiltInWorkgroupId:
- case SpvBuiltInNumWorkgroups:
- break;
- default:
- return Fail() << "unrecognized special builtin: " << int(builtin);
- }
- }
- return true;
+ size_t index = def_info_.size();
+ for (auto& special_var : parser_impl_.special_builtins()) {
+ const auto id = special_var.first;
+ const auto builtin = special_var.second;
+ const auto* var = def_use_mgr_->GetDef(id);
+ def_info_[id] = std::make_unique<DefInfo>(*var, 0, index);
+ ++index;
+ auto& def = def_info_[id];
+ switch (builtin) {
+ case SpvBuiltInPointSize:
+ def->skip = SkipReason::kPointSizeBuiltinPointer;
+ break;
+ case SpvBuiltInSampleMask: {
+ // Distinguish between input and output variable.
+ const auto storage_class =
+ static_cast<SpvStorageClass>(var->GetSingleWordInOperand(0));
+ if (storage_class == SpvStorageClassInput) {
+ sample_mask_in_id = id;
+ def->skip = SkipReason::kSampleMaskInBuiltinPointer;
+ } else {
+ sample_mask_out_id = id;
+ def->skip = SkipReason::kSampleMaskOutBuiltinPointer;
+ }
+ break;
+ }
+ case SpvBuiltInSampleId:
+ case SpvBuiltInInstanceIndex:
+ case SpvBuiltInVertexIndex:
+ case SpvBuiltInLocalInvocationIndex:
+ case SpvBuiltInLocalInvocationId:
+ case SpvBuiltInGlobalInvocationId:
+ case SpvBuiltInWorkgroupId:
+ case SpvBuiltInNumWorkgroups:
+ break;
+ default:
+ return Fail() << "unrecognized special builtin: " << int(builtin);
+ }
+ }
+ return true;
}
bool FunctionEmitter::RegisterLocallyDefinedValues() {
- // Create a DefInfo for each value definition in this function.
- size_t index = def_info_.size();
- for (auto block_id : block_order_) {
- const auto* block_info = GetBlockInfo(block_id);
- const auto block_pos = block_info->pos;
- for (const auto& inst : *(block_info->basic_block)) {
- const auto result_id = inst.result_id();
- if ((result_id == 0) || inst.opcode() == SpvOpLabel) {
- continue;
- }
- def_info_[result_id] = std::make_unique<DefInfo>(inst, block_pos, index);
- ++index;
- auto& info = def_info_[result_id];
-
- // Determine storage class for pointer values. Do this in order because
- // we might rely on the storage class for a previously-visited definition.
- // Logical pointers can't be transmitted through OpPhi, so remaining
- // pointer definitions are SSA values, and their definitions must be
- // visited before their uses.
- const auto* type = type_mgr_->GetType(inst.type_id());
- if (type) {
- if (type->AsPointer()) {
- if (auto* ast_type = parser_impl_.ConvertType(inst.type_id())) {
- if (auto* ptr = ast_type->As<Pointer>()) {
- info->storage_class = ptr->storage_class;
+ // Create a DefInfo for each value definition in this function.
+ size_t index = def_info_.size();
+ for (auto block_id : block_order_) {
+ const auto* block_info = GetBlockInfo(block_id);
+ const auto block_pos = block_info->pos;
+ for (const auto& inst : *(block_info->basic_block)) {
+ const auto result_id = inst.result_id();
+ if ((result_id == 0) || inst.opcode() == SpvOpLabel) {
+ continue;
+ }
+ def_info_[result_id] = std::make_unique<DefInfo>(inst, block_pos, index);
+ ++index;
+ auto& info = def_info_[result_id];
+
+ // Determine storage class for pointer values. Do this in order because
+ // we might rely on the storage class for a previously-visited definition.
+ // Logical pointers can't be transmitted through OpPhi, so remaining
+ // pointer definitions are SSA values, and their definitions must be
+ // visited before their uses.
+ const auto* type = type_mgr_->GetType(inst.type_id());
+ if (type) {
+ if (type->AsPointer()) {
+ if (auto* ast_type = parser_impl_.ConvertType(inst.type_id())) {
+ if (auto* ptr = ast_type->As<Pointer>()) {
+ info->storage_class = ptr->storage_class;
+ }
+ }
+ switch (inst.opcode()) {
+ case SpvOpUndef:
+ return Fail() << "undef pointer is not valid: " << inst.PrettyPrint();
+ case SpvOpVariable:
+ // Keep the default decision based on the result type.
+ break;
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpCopyObject:
+ // Inherit from the first operand. We need this so we can pick up
+ // a remapped storage buffer.
+ info->storage_class =
+ GetStorageClassForPointerValue(inst.GetSingleWordInOperand(0));
+ break;
+ default:
+ return Fail() << "pointer defined in function from unknown opcode: "
+ << inst.PrettyPrint();
+ }
+ }
+ auto* unwrapped = type;
+ while (auto* ptr = unwrapped->AsPointer()) {
+ unwrapped = ptr->pointee_type();
+ }
+ if (unwrapped->AsSampler() || unwrapped->AsImage() || unwrapped->AsSampledImage()) {
+ // Defer code generation until the instruction that actually acts on
+ // the image.
+ info->skip = SkipReason::kOpaqueObject;
+ }
}
- }
- switch (inst.opcode()) {
- case SpvOpUndef:
- return Fail()
- << "undef pointer is not valid: " << inst.PrettyPrint();
- case SpvOpVariable:
- // Keep the default decision based on the result type.
- break;
- case SpvOpAccessChain:
- case SpvOpInBoundsAccessChain:
- case SpvOpCopyObject:
- // Inherit from the first operand. We need this so we can pick up
- // a remapped storage buffer.
- info->storage_class = GetStorageClassForPointerValue(
- inst.GetSingleWordInOperand(0));
- break;
- default:
- return Fail()
- << "pointer defined in function from unknown opcode: "
- << inst.PrettyPrint();
- }
- }
- auto* unwrapped = type;
- while (auto* ptr = unwrapped->AsPointer()) {
- unwrapped = ptr->pointee_type();
- }
- if (unwrapped->AsSampler() || unwrapped->AsImage() ||
- unwrapped->AsSampledImage()) {
- // Defer code generation until the instruction that actually acts on
- // the image.
- info->skip = SkipReason::kOpaqueObject;
}
- }
}
- }
- return true;
+ return true;
}
ast::StorageClass FunctionEmitter::GetStorageClassForPointerValue(uint32_t id) {
- auto where = def_info_.find(id);
- if (where != def_info_.end()) {
- auto candidate = where->second.get()->storage_class;
- if (candidate != ast::StorageClass::kInvalid) {
- return candidate;
- }
- }
- const auto type_id = def_use_mgr_->GetDef(id)->type_id();
- if (type_id) {
- auto* ast_type = parser_impl_.ConvertType(type_id);
- if (auto* ptr = As<Pointer>(ast_type)) {
- return ptr->storage_class;
- }
- }
- return ast::StorageClass::kInvalid;
+ auto where = def_info_.find(id);
+ if (where != def_info_.end()) {
+ auto candidate = where->second.get()->storage_class;
+ if (candidate != ast::StorageClass::kInvalid) {
+ return candidate;
+ }
+ }
+ const auto type_id = def_use_mgr_->GetDef(id)->type_id();
+ if (type_id) {
+ auto* ast_type = parser_impl_.ConvertType(type_id);
+ if (auto* ptr = As<Pointer>(ast_type)) {
+ return ptr->storage_class;
+ }
+ }
+ return ast::StorageClass::kInvalid;
}
-const Type* FunctionEmitter::RemapStorageClass(const Type* type,
- uint32_t result_id) {
- if (auto* ast_ptr_type = As<Pointer>(type)) {
- // Remap an old-style storage buffer pointer to a new-style storage
- // buffer pointer.
- const auto sc = GetStorageClassForPointerValue(result_id);
- if (ast_ptr_type->storage_class != sc) {
- return ty_.Pointer(ast_ptr_type->type, sc);
- }
- }
- return type;
+const Type* FunctionEmitter::RemapStorageClass(const Type* type, uint32_t result_id) {
+ if (auto* ast_ptr_type = As<Pointer>(type)) {
+ // Remap an old-style storage buffer pointer to a new-style storage
+ // buffer pointer.
+ const auto sc = GetStorageClassForPointerValue(result_id);
+ if (ast_ptr_type->storage_class != sc) {
+ return ty_.Pointer(ast_ptr_type->type, sc);
+ }
+ }
+ return type;
}
void FunctionEmitter::FindValuesNeedingNamedOrHoistedDefinition() {
- // Mark vector operands of OpVectorShuffle as needing a named definition,
- // but only if they are defined in this function as well.
- auto require_named_const_def = [&](const spvtools::opt::Instruction& inst,
- int in_operand_index) {
- const auto id = inst.GetSingleWordInOperand(in_operand_index);
- auto* const operand_def = GetDefInfo(id);
- if (operand_def) {
- operand_def->requires_named_const_def = true;
- }
- };
- for (auto& id_def_info_pair : def_info_) {
- const auto& inst = id_def_info_pair.second->inst;
- const auto opcode = inst.opcode();
- if ((opcode == SpvOpVectorShuffle) || (opcode == SpvOpOuterProduct)) {
- // We might access the vector operands multiple times. Make sure they
- // are evaluated only once.
- require_named_const_def(inst, 0);
- require_named_const_def(inst, 1);
- }
- if (parser_impl_.IsGlslExtendedInstruction(inst)) {
- // Some emulations of GLSLstd450 instructions evaluate certain operands
- // multiple times. Ensure their expressions are evaluated only once.
- switch (inst.GetSingleWordInOperand(1)) {
- case GLSLstd450FaceForward:
- // The "normal" operand expression is used twice in code generation.
- require_named_const_def(inst, 2);
- break;
- case GLSLstd450Reflect:
- require_named_const_def(inst, 2); // Incident
- require_named_const_def(inst, 3); // Normal
- break;
- default:
- break;
- }
- }
- }
-
- // Scan uses of locally defined IDs, in function block order.
- for (auto block_id : block_order_) {
- const auto* block_info = GetBlockInfo(block_id);
- const auto block_pos = block_info->pos;
- for (const auto& inst : *(block_info->basic_block)) {
- // Update bookkeeping for locally-defined IDs used by this instruction.
- inst.ForEachInId([this, block_pos, block_info](const uint32_t* id_ptr) {
- auto* def_info = GetDefInfo(*id_ptr);
- if (def_info) {
- // Update usage count.
- def_info->num_uses++;
- // Update usage span.
- def_info->last_use_pos = std::max(def_info->last_use_pos, block_pos);
-
- // Determine whether this ID is defined in a different construct
- // from this use.
- const auto defining_block = block_order_[def_info->block_pos];
- const auto* def_in_construct =
- GetBlockInfo(defining_block)->construct;
- if (def_in_construct != block_info->construct) {
- def_info->used_in_another_construct = true;
- }
- }
- });
-
- if (inst.opcode() == SpvOpPhi) {
- // Declare a name for the variable used to carry values to a phi.
- const auto phi_id = inst.result_id();
- auto* phi_def_info = GetDefInfo(phi_id);
- phi_def_info->phi_var =
- namer_.MakeDerivedName(namer_.Name(phi_id) + "_phi");
- // Track all the places where we need to mention the variable,
- // so we can place its declaration. First, record the location of
- // the read from the variable.
- uint32_t first_pos = block_pos;
- uint32_t last_pos = block_pos;
- // Record the assignments that will propagate values from predecessor
- // blocks.
- for (uint32_t i = 0; i + 1 < inst.NumInOperands(); i += 2) {
- const uint32_t value_id = inst.GetSingleWordInOperand(i);
- const uint32_t pred_block_id = inst.GetSingleWordInOperand(i + 1);
- auto* pred_block_info = GetBlockInfo(pred_block_id);
- // The predecessor might not be in the block order at all, so we
- // need this guard.
- if (IsInBlockOrder(pred_block_info)) {
- // Record the assignment that needs to occur at the end
- // of the predecessor block.
- pred_block_info->phi_assignments.push_back({phi_id, value_id});
- first_pos = std::min(first_pos, pred_block_info->pos);
- last_pos = std::max(last_pos, pred_block_info->pos);
- }
- }
-
- // Schedule the declaration of the state variable.
- const auto* enclosing_construct =
- GetEnclosingScope(first_pos, last_pos);
- GetBlockInfo(enclosing_construct->begin_id)
- ->phis_needing_state_vars.push_back(phi_id);
- }
- }
- }
-
- // For an ID defined in this function, determine if its evaluation and
- // potential declaration needs special handling:
- // - Compensate for the fact that dominance does not map directly to scope.
- // A definition could dominate its use, but a named definition in WGSL
- // at the location of the definition could go out of scope by the time
- // you reach the use. In that case, we hoist the definition to a basic
- // block at the smallest scope enclosing both the definition and all
- // its uses.
- // - If value is used in a different construct than its definition, then it
- // needs a named constant definition. Otherwise we might sink an
- // expensive computation into control flow, and hence change performance.
- for (auto& id_def_info_pair : def_info_) {
- const auto def_id = id_def_info_pair.first;
- auto* def_info = id_def_info_pair.second.get();
- if (def_info->num_uses == 0) {
- // There is no need to adjust the location of the declaration.
- continue;
- }
- // The first use must be the at the SSA definition, because block order
- // respects dominance.
- const auto first_pos = def_info->block_pos;
- const auto last_use_pos = def_info->last_use_pos;
-
- const auto* def_in_construct =
- GetBlockInfo(block_order_[first_pos])->construct;
- // A definition in the first block of an kIfSelection or kSwitchSelection
- // occurs before the branch, and so that definition should count as
- // having been defined at the scope of the parent construct.
- if (first_pos == def_in_construct->begin_pos) {
- if ((def_in_construct->kind == Construct::kIfSelection) ||
- (def_in_construct->kind == Construct::kSwitchSelection)) {
- def_in_construct = def_in_construct->parent;
- }
- }
-
- bool should_hoist = false;
- if (!def_in_construct->ContainsPos(last_use_pos)) {
- // To satisfy scoping, we have to hoist the definition out to an enclosing
- // construct.
- should_hoist = true;
- } else {
- // Avoid moving combinatorial values across constructs. This is a
- // simple heuristic to avoid changing the cost of an operation
- // by moving it into or out of a loop, for example.
- if ((def_info->storage_class == ast::StorageClass::kInvalid) &&
- def_info->used_in_another_construct) {
- should_hoist = true;
- }
- }
-
- if (should_hoist) {
- const auto* enclosing_construct =
- GetEnclosingScope(first_pos, last_use_pos);
- if (enclosing_construct == def_in_construct) {
- // We can use a plain 'const' definition.
- def_info->requires_named_const_def = true;
- } else {
- // We need to make a hoisted variable definition.
- // TODO(dneto): Handle non-storable types, particularly pointers.
- def_info->requires_hoisted_def = true;
- auto* hoist_to_block = GetBlockInfo(enclosing_construct->begin_id);
- hoist_to_block->hoisted_ids.push_back(def_id);
- }
- }
- }
-}
+ // Mark vector operands of OpVectorShuffle as needing a named definition,
+ // but only if they are defined in this function as well.
+ auto require_named_const_def = [&](const spvtools::opt::Instruction& inst,
+ int in_operand_index) {
+ const auto id = inst.GetSingleWordInOperand(in_operand_index);
+ auto* const operand_def = GetDefInfo(id);
+ if (operand_def) {
+ operand_def->requires_named_const_def = true;
+ }
+ };
+ for (auto& id_def_info_pair : def_info_) {
+ const auto& inst = id_def_info_pair.second->inst;
+ const auto opcode = inst.opcode();
+ if ((opcode == SpvOpVectorShuffle) || (opcode == SpvOpOuterProduct)) {
+ // We might access the vector operands multiple times. Make sure they
+ // are evaluated only once.
+ require_named_const_def(inst, 0);
+ require_named_const_def(inst, 1);
+ }
+ if (parser_impl_.IsGlslExtendedInstruction(inst)) {
+ // Some emulations of GLSLstd450 instructions evaluate certain operands
+ // multiple times. Ensure their expressions are evaluated only once.
+ switch (inst.GetSingleWordInOperand(1)) {
+ case GLSLstd450FaceForward:
+ // The "normal" operand expression is used twice in code generation.
+ require_named_const_def(inst, 2);
+ break;
+ case GLSLstd450Reflect:
+ require_named_const_def(inst, 2); // Incident
+ require_named_const_def(inst, 3); // Normal
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Scan uses of locally defined IDs, in function block order.
+ for (auto block_id : block_order_) {
+ const auto* block_info = GetBlockInfo(block_id);
+ const auto block_pos = block_info->pos;
+ for (const auto& inst : *(block_info->basic_block)) {
+ // Update bookkeeping for locally-defined IDs used by this instruction.
+ inst.ForEachInId([this, block_pos, block_info](const uint32_t* id_ptr) {
+ auto* def_info = GetDefInfo(*id_ptr);
+ if (def_info) {
+ // Update usage count.
+ def_info->num_uses++;
+ // Update usage span.
+ def_info->last_use_pos = std::max(def_info->last_use_pos, block_pos);
+
+ // Determine whether this ID is defined in a different construct
+ // from this use.
+ const auto defining_block = block_order_[def_info->block_pos];
+ const auto* def_in_construct = GetBlockInfo(defining_block)->construct;
+ if (def_in_construct != block_info->construct) {
+ def_info->used_in_another_construct = true;
+ }
+ }
+ });
+
+ if (inst.opcode() == SpvOpPhi) {
+ // Declare a name for the variable used to carry values to a phi.
+ const auto phi_id = inst.result_id();
+ auto* phi_def_info = GetDefInfo(phi_id);
+ phi_def_info->phi_var = namer_.MakeDerivedName(namer_.Name(phi_id) + "_phi");
+ // Track all the places where we need to mention the variable,
+ // so we can place its declaration. First, record the location of
+ // the read from the variable.
+ uint32_t first_pos = block_pos;
+ uint32_t last_pos = block_pos;
+ // Record the assignments that will propagate values from predecessor
+ // blocks.
+ for (uint32_t i = 0; i + 1 < inst.NumInOperands(); i += 2) {
+ const uint32_t value_id = inst.GetSingleWordInOperand(i);
+ const uint32_t pred_block_id = inst.GetSingleWordInOperand(i + 1);
+ auto* pred_block_info = GetBlockInfo(pred_block_id);
+ // The predecessor might not be in the block order at all, so we
+ // need this guard.
+ if (IsInBlockOrder(pred_block_info)) {
+ // Record the assignment that needs to occur at the end
+ // of the predecessor block.
+ pred_block_info->phi_assignments.push_back({phi_id, value_id});
+ first_pos = std::min(first_pos, pred_block_info->pos);
+ last_pos = std::max(last_pos, pred_block_info->pos);
+ }
+ }
+
+ // Schedule the declaration of the state variable.
+ const auto* enclosing_construct = GetEnclosingScope(first_pos, last_pos);
+ GetBlockInfo(enclosing_construct->begin_id)
+ ->phis_needing_state_vars.push_back(phi_id);
+ }
+ }
+ }
+
+ // For an ID defined in this function, determine if its evaluation and
+ // potential declaration needs special handling:
+ // - Compensate for the fact that dominance does not map directly to scope.
+ // A definition could dominate its use, but a named definition in WGSL
+ // at the location of the definition could go out of scope by the time
+ // you reach the use. In that case, we hoist the definition to a basic
+ // block at the smallest scope enclosing both the definition and all
+ // its uses.
+ // - If value is used in a different construct than its definition, then it
+ // needs a named constant definition. Otherwise we might sink an
+ // expensive computation into control flow, and hence change performance.
+ for (auto& id_def_info_pair : def_info_) {
+ const auto def_id = id_def_info_pair.first;
+ auto* def_info = id_def_info_pair.second.get();
+ if (def_info->num_uses == 0) {
+ // There is no need to adjust the location of the declaration.
+ continue;
+ }
+ // The first use must be the at the SSA definition, because block order
+ // respects dominance.
+ const auto first_pos = def_info->block_pos;
+ const auto last_use_pos = def_info->last_use_pos;
+
+ const auto* def_in_construct = GetBlockInfo(block_order_[first_pos])->construct;
+ // A definition in the first block of an kIfSelection or kSwitchSelection
+ // occurs before the branch, and so that definition should count as
+ // having been defined at the scope of the parent construct.
+ if (first_pos == def_in_construct->begin_pos) {
+ if ((def_in_construct->kind == Construct::kIfSelection) ||
+ (def_in_construct->kind == Construct::kSwitchSelection)) {
+ def_in_construct = def_in_construct->parent;
+ }
+ }
+
+ bool should_hoist = false;
+ if (!def_in_construct->ContainsPos(last_use_pos)) {
+ // To satisfy scoping, we have to hoist the definition out to an enclosing
+ // construct.
+ should_hoist = true;
+ } else {
+ // Avoid moving combinatorial values across constructs. This is a
+ // simple heuristic to avoid changing the cost of an operation
+ // by moving it into or out of a loop, for example.
+ if ((def_info->storage_class == ast::StorageClass::kInvalid) &&
+ def_info->used_in_another_construct) {
+ should_hoist = true;
+ }
+ }
-const Construct* FunctionEmitter::GetEnclosingScope(uint32_t first_pos,
- uint32_t last_pos) const {
- const auto* enclosing_construct =
- GetBlockInfo(block_order_[first_pos])->construct;
- TINT_ASSERT(Reader, enclosing_construct != nullptr);
- // Constructs are strictly nesting, so follow parent pointers
- while (enclosing_construct &&
- !enclosing_construct->ScopeContainsPos(last_pos)) {
- // The scope of a continue construct is enclosed in its associated loop
- // construct, but they are siblings in our construct tree.
- const auto* sibling_loop = SiblingLoopConstruct(enclosing_construct);
- // Go to the sibling loop if it exists, otherwise walk up to the parent.
- enclosing_construct =
- sibling_loop ? sibling_loop : enclosing_construct->parent;
- }
- // At worst, we go all the way out to the function construct.
- TINT_ASSERT(Reader, enclosing_construct != nullptr);
- return enclosing_construct;
+ if (should_hoist) {
+ const auto* enclosing_construct = GetEnclosingScope(first_pos, last_use_pos);
+ if (enclosing_construct == def_in_construct) {
+ // We can use a plain 'const' definition.
+ def_info->requires_named_const_def = true;
+ } else {
+ // We need to make a hoisted variable definition.
+ // TODO(dneto): Handle non-storable types, particularly pointers.
+ def_info->requires_hoisted_def = true;
+ auto* hoist_to_block = GetBlockInfo(enclosing_construct->begin_id);
+ hoist_to_block->hoisted_ids.push_back(def_id);
+ }
+ }
+ }
}
-TypedExpression FunctionEmitter::MakeNumericConversion(
- const spvtools::opt::Instruction& inst) {
- const auto opcode = inst.opcode();
- auto* requested_type = parser_impl_.ConvertType(inst.type_id());
- auto arg_expr = MakeOperand(inst, 0);
- if (!arg_expr) {
- return {};
- }
- arg_expr.type = arg_expr.type->UnwrapRef();
+const Construct* FunctionEmitter::GetEnclosingScope(uint32_t first_pos, uint32_t last_pos) const {
+ const auto* enclosing_construct = GetBlockInfo(block_order_[first_pos])->construct;
+ TINT_ASSERT(Reader, enclosing_construct != nullptr);
+ // Constructs are strictly nesting, so follow parent pointers
+ while (enclosing_construct && !enclosing_construct->ScopeContainsPos(last_pos)) {
+ // The scope of a continue construct is enclosed in its associated loop
+ // construct, but they are siblings in our construct tree.
+ const auto* sibling_loop = SiblingLoopConstruct(enclosing_construct);
+ // Go to the sibling loop if it exists, otherwise walk up to the parent.
+ enclosing_construct = sibling_loop ? sibling_loop : enclosing_construct->parent;
+ }
+ // At worst, we go all the way out to the function construct.
+ TINT_ASSERT(Reader, enclosing_construct != nullptr);
+ return enclosing_construct;
+}
- const Type* expr_type = nullptr;
- if ((opcode == SpvOpConvertSToF) || (opcode == SpvOpConvertUToF)) {
- if (arg_expr.type->IsIntegerScalarOrVector()) {
- expr_type = requested_type;
- } else {
- Fail() << "operand for conversion to floating point must be integral "
- "scalar or vector: "
- << inst.PrettyPrint();
+TypedExpression FunctionEmitter::MakeNumericConversion(const spvtools::opt::Instruction& inst) {
+ const auto opcode = inst.opcode();
+ auto* requested_type = parser_impl_.ConvertType(inst.type_id());
+ auto arg_expr = MakeOperand(inst, 0);
+ if (!arg_expr) {
+ return {};
}
- } else if (inst.opcode() == SpvOpConvertFToU) {
- if (arg_expr.type->IsFloatScalarOrVector()) {
- expr_type = parser_impl_.GetUnsignedIntMatchingShape(arg_expr.type);
- } else {
- Fail() << "operand for conversion to unsigned integer must be floating "
- "point scalar or vector: "
- << inst.PrettyPrint();
+ arg_expr.type = arg_expr.type->UnwrapRef();
+
+ const Type* expr_type = nullptr;
+ if ((opcode == SpvOpConvertSToF) || (opcode == SpvOpConvertUToF)) {
+ if (arg_expr.type->IsIntegerScalarOrVector()) {
+ expr_type = requested_type;
+ } else {
+ Fail() << "operand for conversion to floating point must be integral "
+ "scalar or vector: "
+ << inst.PrettyPrint();
+ }
+ } else if (inst.opcode() == SpvOpConvertFToU) {
+ if (arg_expr.type->IsFloatScalarOrVector()) {
+ expr_type = parser_impl_.GetUnsignedIntMatchingShape(arg_expr.type);
+ } else {
+ Fail() << "operand for conversion to unsigned integer must be floating "
+ "point scalar or vector: "
+ << inst.PrettyPrint();
+ }
+ } else if (inst.opcode() == SpvOpConvertFToS) {
+ if (arg_expr.type->IsFloatScalarOrVector()) {
+ expr_type = parser_impl_.GetSignedIntMatchingShape(arg_expr.type);
+ } else {
+ Fail() << "operand for conversion to signed integer must be floating "
+ "point scalar or vector: "
+ << inst.PrettyPrint();
+ }
}
- } else if (inst.opcode() == SpvOpConvertFToS) {
- if (arg_expr.type->IsFloatScalarOrVector()) {
- expr_type = parser_impl_.GetSignedIntMatchingShape(arg_expr.type);
- } else {
- Fail() << "operand for conversion to signed integer must be floating "
- "point scalar or vector: "
- << inst.PrettyPrint();
+ if (expr_type == nullptr) {
+ // The diagnostic has already been emitted.
+ return {};
}
- }
- if (expr_type == nullptr) {
- // The diagnostic has already been emitted.
- return {};
- }
- ast::ExpressionList params;
- params.push_back(arg_expr.expr);
- TypedExpression result{
- expr_type,
- builder_.Construct(GetSourceForInst(inst), expr_type->Build(builder_),
- std::move(params))};
+ ast::ExpressionList params;
+ params.push_back(arg_expr.expr);
+ TypedExpression result{
+ expr_type,
+ builder_.Construct(GetSourceForInst(inst), expr_type->Build(builder_), std::move(params))};
- if (requested_type == expr_type) {
- return result;
- }
- return {requested_type, create<ast::BitcastExpression>(
- GetSourceForInst(inst),
- requested_type->Build(builder_), result.expr)};
+ if (requested_type == expr_type) {
+ return result;
+ }
+ return {requested_type,
+ create<ast::BitcastExpression>(GetSourceForInst(inst), requested_type->Build(builder_),
+ result.expr)};
}
bool FunctionEmitter::EmitFunctionCall(const spvtools::opt::Instruction& inst) {
- // We ignore function attributes such as Inline, DontInline, Pure, Const.
- auto name = namer_.Name(inst.GetSingleWordInOperand(0));
- auto* function = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
-
- ast::ExpressionList args;
- for (uint32_t iarg = 1; iarg < inst.NumInOperands(); ++iarg) {
- auto expr = MakeOperand(inst, iarg);
- if (!expr) {
- return false;
- }
- // Functions cannot use references as parameters, so we need to pass by
- // pointer if the operand is of pointer type.
- expr = AddressOfIfNeeded(
- expr, def_use_mgr_->GetDef(inst.GetSingleWordInOperand(iarg)));
- args.emplace_back(expr.expr);
- }
- if (failed()) {
- return false;
- }
- auto* call_expr =
- create<ast::CallExpression>(Source{}, function, std::move(args));
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
- if (!result_type) {
- return Fail() << "internal error: no mapped type result of call: "
- << inst.PrettyPrint();
- }
+ // We ignore function attributes such as Inline, DontInline, Pure, Const.
+ auto name = namer_.Name(inst.GetSingleWordInOperand(0));
+ auto* function = create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+
+ ast::ExpressionList args;
+ for (uint32_t iarg = 1; iarg < inst.NumInOperands(); ++iarg) {
+ auto expr = MakeOperand(inst, iarg);
+ if (!expr) {
+ return false;
+ }
+ // Functions cannot use references as parameters, so we need to pass by
+ // pointer if the operand is of pointer type.
+ expr = AddressOfIfNeeded(expr, def_use_mgr_->GetDef(inst.GetSingleWordInOperand(iarg)));
+ args.emplace_back(expr.expr);
+ }
+ if (failed()) {
+ return false;
+ }
+ auto* call_expr = create<ast::CallExpression>(Source{}, function, std::move(args));
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+ if (!result_type) {
+ return Fail() << "internal error: no mapped type result of call: " << inst.PrettyPrint();
+ }
- if (result_type->Is<Void>()) {
- return nullptr !=
- AddStatement(create<ast::CallStatement>(Source{}, call_expr));
- }
+ if (result_type->Is<Void>()) {
+ return nullptr != AddStatement(create<ast::CallStatement>(Source{}, call_expr));
+ }
- return EmitConstDefOrWriteToHoistedVar(inst, {result_type, call_expr});
+ return EmitConstDefOrWriteToHoistedVar(inst, {result_type, call_expr});
}
-bool FunctionEmitter::EmitControlBarrier(
- const spvtools::opt::Instruction& inst) {
- uint32_t operands[3];
- for (int i = 0; i < 3; i++) {
- auto id = inst.GetSingleWordInOperand(i);
- if (auto* constant = constant_mgr_->FindDeclaredConstant(id)) {
- operands[i] = constant->GetU32();
+bool FunctionEmitter::EmitControlBarrier(const spvtools::opt::Instruction& inst) {
+ uint32_t operands[3];
+ for (int i = 0; i < 3; i++) {
+ auto id = inst.GetSingleWordInOperand(i);
+ if (auto* constant = constant_mgr_->FindDeclaredConstant(id)) {
+ operands[i] = constant->GetU32();
+ } else {
+ return Fail() << "invalid or missing operands for control barrier";
+ }
+ }
+
+ uint32_t execution = operands[0];
+ uint32_t memory = operands[1];
+ uint32_t semantics = operands[2];
+
+ if (execution != SpvScopeWorkgroup) {
+ return Fail() << "unsupported control barrier execution scope: "
+ << "expected Workgroup (2), got: " << execution;
+ }
+ if (semantics & SpvMemorySemanticsAcquireReleaseMask) {
+ semantics &= ~SpvMemorySemanticsAcquireReleaseMask;
} else {
- return Fail() << "invalid or missing operands for control barrier";
- }
- }
-
- uint32_t execution = operands[0];
- uint32_t memory = operands[1];
- uint32_t semantics = operands[2];
-
- if (execution != SpvScopeWorkgroup) {
- return Fail() << "unsupported control barrier execution scope: "
- << "expected Workgroup (2), got: " << execution;
- }
- if (semantics & SpvMemorySemanticsAcquireReleaseMask) {
- semantics &= ~SpvMemorySemanticsAcquireReleaseMask;
- } else {
- return Fail() << "control barrier semantics requires acquire and release";
- }
- if (semantics & SpvMemorySemanticsWorkgroupMemoryMask) {
- if (memory != SpvScopeWorkgroup) {
- return Fail() << "workgroupBarrier requires workgroup memory scope";
- }
- AddStatement(create<ast::CallStatement>(builder_.Call("workgroupBarrier")));
- semantics &= ~SpvMemorySemanticsWorkgroupMemoryMask;
- }
- if (semantics & SpvMemorySemanticsUniformMemoryMask) {
- if (memory != SpvScopeDevice) {
- return Fail() << "storageBarrier requires device memory scope";
- }
- AddStatement(create<ast::CallStatement>(builder_.Call("storageBarrier")));
- semantics &= ~SpvMemorySemanticsUniformMemoryMask;
- }
- if (semantics) {
- return Fail() << "unsupported control barrier semantics: " << semantics;
- }
- return true;
+ return Fail() << "control barrier semantics requires acquire and release";
+ }
+ if (semantics & SpvMemorySemanticsWorkgroupMemoryMask) {
+ if (memory != SpvScopeWorkgroup) {
+ return Fail() << "workgroupBarrier requires workgroup memory scope";
+ }
+ AddStatement(create<ast::CallStatement>(builder_.Call("workgroupBarrier")));
+ semantics &= ~SpvMemorySemanticsWorkgroupMemoryMask;
+ }
+ if (semantics & SpvMemorySemanticsUniformMemoryMask) {
+ if (memory != SpvScopeDevice) {
+ return Fail() << "storageBarrier requires device memory scope";
+ }
+ AddStatement(create<ast::CallStatement>(builder_.Call("storageBarrier")));
+ semantics &= ~SpvMemorySemanticsUniformMemoryMask;
+ }
+ if (semantics) {
+ return Fail() << "unsupported control barrier semantics: " << semantics;
+ }
+ return true;
}
-TypedExpression FunctionEmitter::MakeBuiltinCall(
- const spvtools::opt::Instruction& inst) {
- const auto builtin = GetBuiltin(inst.opcode());
- auto* name = sem::str(builtin);
- auto* ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
-
- ast::ExpressionList params;
- const Type* first_operand_type = nullptr;
- for (uint32_t iarg = 0; iarg < inst.NumInOperands(); ++iarg) {
- TypedExpression operand = MakeOperand(inst, iarg);
- if (first_operand_type == nullptr) {
- first_operand_type = operand.type;
- }
- params.emplace_back(operand.expr);
- }
- auto* call_expr =
- create<ast::CallExpression>(Source{}, ident, std::move(params));
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
- if (!result_type) {
- Fail() << "internal error: no mapped type result of call: "
- << inst.PrettyPrint();
- return {};
- }
- TypedExpression call{result_type, call_expr};
- return parser_impl_.RectifyForcedResultType(call, inst, first_operand_type);
-}
+TypedExpression FunctionEmitter::MakeBuiltinCall(const spvtools::opt::Instruction& inst) {
+ const auto builtin = GetBuiltin(inst.opcode());
+ auto* name = sem::str(builtin);
+ auto* ident = create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
-TypedExpression FunctionEmitter::MakeSimpleSelect(
- const spvtools::opt::Instruction& inst) {
- auto condition = MakeOperand(inst, 0);
- auto true_value = MakeOperand(inst, 1);
- auto false_value = MakeOperand(inst, 2);
-
- // SPIR-V validation requires:
- // - the condition to be bool or bool vector, so we don't check it here.
- // - true_value false_value, and result type to match.
- // - you can't select over pointers or pointer vectors, unless you also have
- // a VariablePointers* capability, which is not allowed in by WebGPU.
- auto* op_ty = true_value.type;
- if (op_ty->Is<Vector>() || op_ty->IsFloatScalar() ||
- op_ty->IsIntegerScalar() || op_ty->Is<Bool>()) {
ast::ExpressionList params;
- params.push_back(false_value.expr);
- params.push_back(true_value.expr);
- // The condition goes last.
- params.push_back(condition.expr);
- return {op_ty, create<ast::CallExpression>(
- Source{},
- create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("select")),
- std::move(params))};
- }
- return {};
+ const Type* first_operand_type = nullptr;
+ for (uint32_t iarg = 0; iarg < inst.NumInOperands(); ++iarg) {
+ TypedExpression operand = MakeOperand(inst, iarg);
+ if (first_operand_type == nullptr) {
+ first_operand_type = operand.type;
+ }
+ params.emplace_back(operand.expr);
+ }
+ auto* call_expr = create<ast::CallExpression>(Source{}, ident, std::move(params));
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+ if (!result_type) {
+ Fail() << "internal error: no mapped type result of call: " << inst.PrettyPrint();
+ return {};
+ }
+ TypedExpression call{result_type, call_expr};
+ return parser_impl_.RectifyForcedResultType(call, inst, first_operand_type);
}
-Source FunctionEmitter::GetSourceForInst(
- const spvtools::opt::Instruction& inst) const {
- return parser_impl_.GetSourceForInst(&inst);
+TypedExpression FunctionEmitter::MakeSimpleSelect(const spvtools::opt::Instruction& inst) {
+ auto condition = MakeOperand(inst, 0);
+ auto true_value = MakeOperand(inst, 1);
+ auto false_value = MakeOperand(inst, 2);
+
+ // SPIR-V validation requires:
+ // - the condition to be bool or bool vector, so we don't check it here.
+ // - true_value false_value, and result type to match.
+ // - you can't select over pointers or pointer vectors, unless you also have
+ // a VariablePointers* capability, which is not allowed in by WebGPU.
+ auto* op_ty = true_value.type;
+ if (op_ty->Is<Vector>() || op_ty->IsFloatScalar() || op_ty->IsIntegerScalar() ||
+ op_ty->Is<Bool>()) {
+ ast::ExpressionList params;
+ params.push_back(false_value.expr);
+ params.push_back(true_value.expr);
+ // The condition goes last.
+ params.push_back(condition.expr);
+ return {op_ty,
+ create<ast::CallExpression>(Source{},
+ create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register("select")),
+ std::move(params))};
+ }
+ return {};
+}
+
+Source FunctionEmitter::GetSourceForInst(const spvtools::opt::Instruction& inst) const {
+ return parser_impl_.GetSourceForInst(&inst);
}
const spvtools::opt::Instruction* FunctionEmitter::GetImage(
const spvtools::opt::Instruction& inst) {
- if (inst.NumInOperands() == 0) {
- Fail() << "not an image access instruction: " << inst.PrettyPrint();
- return nullptr;
- }
- // The image or sampled image operand is always the first operand.
- const auto image_or_sampled_image_operand_id = inst.GetSingleWordInOperand(0);
- const auto* image = parser_impl_.GetMemoryObjectDeclarationForHandle(
- image_or_sampled_image_operand_id, true);
- if (!image) {
- Fail() << "internal error: couldn't find image for " << inst.PrettyPrint();
- return nullptr;
- }
- return image;
+ if (inst.NumInOperands() == 0) {
+ Fail() << "not an image access instruction: " << inst.PrettyPrint();
+ return nullptr;
+ }
+ // The image or sampled image operand is always the first operand.
+ const auto image_or_sampled_image_operand_id = inst.GetSingleWordInOperand(0);
+ const auto* image =
+ parser_impl_.GetMemoryObjectDeclarationForHandle(image_or_sampled_image_operand_id, true);
+ if (!image) {
+ Fail() << "internal error: couldn't find image for " << inst.PrettyPrint();
+ return nullptr;
+ }
+ return image;
}
-const Texture* FunctionEmitter::GetImageType(
- const spvtools::opt::Instruction& image) {
- const Pointer* ptr_type = parser_impl_.GetTypeForHandleVar(image);
- if (!parser_impl_.success()) {
- Fail();
- return {};
- }
- if (!ptr_type) {
- Fail() << "invalid texture type for " << image.PrettyPrint();
- return {};
- }
- auto* result = ptr_type->type->UnwrapAll()->As<Texture>();
- if (!result) {
- Fail() << "invalid texture type for " << image.PrettyPrint();
- return {};
- }
- return result;
+const Texture* FunctionEmitter::GetImageType(const spvtools::opt::Instruction& image) {
+ const Pointer* ptr_type = parser_impl_.GetTypeForHandleVar(image);
+ if (!parser_impl_.success()) {
+ Fail();
+ return {};
+ }
+ if (!ptr_type) {
+ Fail() << "invalid texture type for " << image.PrettyPrint();
+ return {};
+ }
+ auto* result = ptr_type->type->UnwrapAll()->As<Texture>();
+ if (!result) {
+ Fail() << "invalid texture type for " << image.PrettyPrint();
+ return {};
+ }
+ return result;
}
-const ast::Expression* FunctionEmitter::GetImageExpression(
- const spvtools::opt::Instruction& inst) {
- auto* image = GetImage(inst);
- if (!image) {
- return nullptr;
- }
- auto name = namer_.Name(image->result_id());
- return create<ast::IdentifierExpression>(GetSourceForInst(inst),
- builder_.Symbols().Register(name));
+const ast::Expression* FunctionEmitter::GetImageExpression(const spvtools::opt::Instruction& inst) {
+ auto* image = GetImage(inst);
+ if (!image) {
+ return nullptr;
+ }
+ auto name = namer_.Name(image->result_id());
+ return create<ast::IdentifierExpression>(GetSourceForInst(inst),
+ builder_.Symbols().Register(name));
}
const ast::Expression* FunctionEmitter::GetSamplerExpression(
const spvtools::opt::Instruction& inst) {
- // The sampled image operand is always the first operand.
- const auto image_or_sampled_image_operand_id = inst.GetSingleWordInOperand(0);
- const auto* image = parser_impl_.GetMemoryObjectDeclarationForHandle(
- image_or_sampled_image_operand_id, false);
- if (!image) {
- Fail() << "internal error: couldn't find sampler for "
- << inst.PrettyPrint();
- return nullptr;
- }
- auto name = namer_.Name(image->result_id());
- return create<ast::IdentifierExpression>(GetSourceForInst(inst),
- builder_.Symbols().Register(name));
+ // The sampled image operand is always the first operand.
+ const auto image_or_sampled_image_operand_id = inst.GetSingleWordInOperand(0);
+ const auto* image =
+ parser_impl_.GetMemoryObjectDeclarationForHandle(image_or_sampled_image_operand_id, false);
+ if (!image) {
+ Fail() << "internal error: couldn't find sampler for " << inst.PrettyPrint();
+ return nullptr;
+ }
+ auto name = namer_.Name(image->result_id());
+ return create<ast::IdentifierExpression>(GetSourceForInst(inst),
+ builder_.Symbols().Register(name));
}
bool FunctionEmitter::EmitImageAccess(const spvtools::opt::Instruction& inst) {
- ast::ExpressionList args;
- const auto opcode = inst.opcode();
+ ast::ExpressionList args;
+ const auto opcode = inst.opcode();
- // Form the texture operand.
- const spvtools::opt::Instruction* image = GetImage(inst);
- if (!image) {
- return false;
- }
- args.push_back(GetImageExpression(inst));
-
- // Form the sampler operand, if needed.
- if (IsSampledImageAccess(opcode)) {
- // Form the sampler operand.
- if (auto* sampler = GetSamplerExpression(inst)) {
- args.push_back(sampler);
- } else {
- return false;
+ // Form the texture operand.
+ const spvtools::opt::Instruction* image = GetImage(inst);
+ if (!image) {
+ return false;
+ }
+ args.push_back(GetImageExpression(inst));
+
+ // Form the sampler operand, if needed.
+ if (IsSampledImageAccess(opcode)) {
+ // Form the sampler operand.
+ if (auto* sampler = GetSamplerExpression(inst)) {
+ args.push_back(sampler);
+ } else {
+ return false;
+ }
}
- }
- // Find the texture type.
- const Pointer* texture_ptr_type = parser_impl_.GetTypeForHandleVar(*image);
- if (!texture_ptr_type) {
- return Fail();
- }
- const Texture* texture_type =
- texture_ptr_type->type->UnwrapAll()->As<Texture>();
+ // Find the texture type.
+ const Pointer* texture_ptr_type = parser_impl_.GetTypeForHandleVar(*image);
+ if (!texture_ptr_type) {
+ return Fail();
+ }
+ const Texture* texture_type = texture_ptr_type->type->UnwrapAll()->As<Texture>();
- if (!texture_type) {
- return Fail();
- }
+ if (!texture_type) {
+ return Fail();
+ }
- // This is the SPIR-V operand index. We're done with the first operand.
- uint32_t arg_index = 1;
+ // This is the SPIR-V operand index. We're done with the first operand.
+ uint32_t arg_index = 1;
- // Push the coordinates operands.
- auto coords = MakeCoordinateOperandsForImageAccess(inst);
- if (coords.empty()) {
- return false;
- }
- args.insert(args.end(), coords.begin(), coords.end());
- // Skip the coordinates operand.
- arg_index++;
+ // Push the coordinates operands.
+ auto coords = MakeCoordinateOperandsForImageAccess(inst);
+ if (coords.empty()) {
+ return false;
+ }
+ args.insert(args.end(), coords.begin(), coords.end());
+ // Skip the coordinates operand.
+ arg_index++;
+
+ const auto num_args = inst.NumInOperands();
+
+ // Consumes the depth-reference argument, pushing it onto the end of
+ // the parameter list. Issues a diagnostic and returns false on error.
+ auto consume_dref = [&]() -> bool {
+ if (arg_index < num_args) {
+ args.push_back(MakeOperand(inst, arg_index).expr);
+ arg_index++;
+ } else {
+ return Fail() << "image depth-compare instruction is missing a Dref operand: "
+ << inst.PrettyPrint();
+ }
+ return true;
+ };
- const auto num_args = inst.NumInOperands();
+ std::string builtin_name;
+ bool use_level_of_detail_suffix = true;
+ bool is_dref_sample = false;
+ bool is_gather_or_dref_gather = false;
+ bool is_non_dref_sample = false;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ is_non_dref_sample = true;
+ builtin_name = "textureSample";
+ break;
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ is_dref_sample = true;
+ builtin_name = "textureSampleCompare";
+ if (!consume_dref()) {
+ return false;
+ }
+ break;
+ case SpvOpImageGather:
+ is_gather_or_dref_gather = true;
+ builtin_name = "textureGather";
+ if (!texture_type->Is<DepthTexture>()) {
+ // The explicit component is the *first* argument in WGSL.
+ args.insert(args.begin(), ToI32(MakeOperand(inst, arg_index)).expr);
+ }
+ // Skip over the component operand, even for depth textures.
+ arg_index++;
+ break;
+ case SpvOpImageDrefGather:
+ is_gather_or_dref_gather = true;
+ builtin_name = "textureGatherCompare";
+ if (!consume_dref()) {
+ return false;
+ }
+ break;
+ case SpvOpImageFetch:
+ case SpvOpImageRead:
+ // Read a single texel from a sampled or storage image.
+ builtin_name = "textureLoad";
+ use_level_of_detail_suffix = false;
+ break;
+ case SpvOpImageWrite:
+ builtin_name = "textureStore";
+ use_level_of_detail_suffix = false;
+ if (arg_index < num_args) {
+ auto texel = MakeOperand(inst, arg_index);
+ auto* converted_texel = ConvertTexelForStorage(inst, texel, texture_type);
+ if (!converted_texel) {
+ return false;
+ }
+
+ args.push_back(converted_texel);
+ arg_index++;
+ } else {
+ return Fail() << "image write is missing a Texel operand: " << inst.PrettyPrint();
+ }
+ break;
+ default:
+ return Fail() << "internal error: unrecognized image access: " << inst.PrettyPrint();
+ }
- // Consumes the depth-reference argument, pushing it onto the end of
- // the parameter list. Issues a diagnostic and returns false on error.
- auto consume_dref = [&]() -> bool {
+ // Loop over the image operands, looking for extra operands to the builtin.
+ // Except we uroll the loop.
+ uint32_t image_operands_mask = 0;
if (arg_index < num_args) {
- args.push_back(MakeOperand(inst, arg_index).expr);
- arg_index++;
- } else {
- return Fail()
- << "image depth-compare instruction is missing a Dref operand: "
- << inst.PrettyPrint();
+ image_operands_mask = inst.GetSingleWordInOperand(arg_index);
+ arg_index++;
}
- return true;
- };
-
- std::string builtin_name;
- bool use_level_of_detail_suffix = true;
- bool is_dref_sample = false;
- bool is_gather_or_dref_gather = false;
- bool is_non_dref_sample = false;
- switch (opcode) {
- case SpvOpImageSampleImplicitLod:
- case SpvOpImageSampleExplicitLod:
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- is_non_dref_sample = true;
- builtin_name = "textureSample";
- break;
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- is_dref_sample = true;
- builtin_name = "textureSampleCompare";
- if (!consume_dref()) {
- return false;
- }
- break;
- case SpvOpImageGather:
- is_gather_or_dref_gather = true;
- builtin_name = "textureGather";
- if (!texture_type->Is<DepthTexture>()) {
- // The explicit component is the *first* argument in WGSL.
- args.insert(args.begin(), ToI32(MakeOperand(inst, arg_index)).expr);
- }
- // Skip over the component operand, even for depth textures.
- arg_index++;
- break;
- case SpvOpImageDrefGather:
- is_gather_or_dref_gather = true;
- builtin_name = "textureGatherCompare";
- if (!consume_dref()) {
- return false;
- }
- break;
- case SpvOpImageFetch:
- case SpvOpImageRead:
- // Read a single texel from a sampled or storage image.
- builtin_name = "textureLoad";
- use_level_of_detail_suffix = false;
- break;
- case SpvOpImageWrite:
- builtin_name = "textureStore";
- use_level_of_detail_suffix = false;
- if (arg_index < num_args) {
- auto texel = MakeOperand(inst, arg_index);
- auto* converted_texel =
- ConvertTexelForStorage(inst, texel, texture_type);
- if (!converted_texel) {
- return false;
- }
-
- args.push_back(converted_texel);
+ if (arg_index < num_args && (image_operands_mask & SpvImageOperandsBiasMask)) {
+ if (is_dref_sample) {
+ return Fail() << "WGSL does not support depth-reference sampling with "
+ "level-of-detail bias: "
+ << inst.PrettyPrint();
+ }
+ if (is_gather_or_dref_gather) {
+ return Fail() << "WGSL does not support image gather with "
+ "level-of-detail bias: "
+ << inst.PrettyPrint();
+ }
+ builtin_name += "Bias";
+ args.push_back(MakeOperand(inst, arg_index).expr);
+ image_operands_mask ^= SpvImageOperandsBiasMask;
arg_index++;
- } else {
- return Fail() << "image write is missing a Texel operand: "
- << inst.PrettyPrint();
- }
- break;
- default:
- return Fail() << "internal error: unrecognized image access: "
- << inst.PrettyPrint();
- }
-
- // Loop over the image operands, looking for extra operands to the builtin.
- // Except we uroll the loop.
- uint32_t image_operands_mask = 0;
- if (arg_index < num_args) {
- image_operands_mask = inst.GetSingleWordInOperand(arg_index);
- arg_index++;
- }
- if (arg_index < num_args &&
- (image_operands_mask & SpvImageOperandsBiasMask)) {
- if (is_dref_sample) {
- return Fail() << "WGSL does not support depth-reference sampling with "
- "level-of-detail bias: "
- << inst.PrettyPrint();
- }
- if (is_gather_or_dref_gather) {
- return Fail() << "WGSL does not support image gather with "
- "level-of-detail bias: "
- << inst.PrettyPrint();
- }
- builtin_name += "Bias";
- args.push_back(MakeOperand(inst, arg_index).expr);
- image_operands_mask ^= SpvImageOperandsBiasMask;
- arg_index++;
- }
- if (arg_index < num_args && (image_operands_mask & SpvImageOperandsLodMask)) {
- if (use_level_of_detail_suffix) {
- builtin_name += "Level";
- }
- if (is_dref_sample || is_gather_or_dref_gather) {
- // Metal only supports Lod = 0 for comparison sampling without
- // derivatives.
- // Vulkan SPIR-V does not allow Lod with OpImageGather or
- // OpImageDrefGather.
- if (!IsFloatZero(inst.GetSingleWordInOperand(arg_index))) {
- return Fail() << "WGSL comparison sampling without derivatives "
- "requires level-of-detail 0.0"
- << inst.PrettyPrint();
- }
- // Don't generate the Lod argument.
- } else {
- // Generate the Lod argument.
- TypedExpression lod = MakeOperand(inst, arg_index);
- // When sampling from a depth texture, the Lod operand must be an I32.
- if (texture_type->Is<DepthTexture>()) {
- // Convert it to a signed integer type.
- lod = ToI32(lod);
- }
- args.push_back(lod.expr);
}
+ if (arg_index < num_args && (image_operands_mask & SpvImageOperandsLodMask)) {
+ if (use_level_of_detail_suffix) {
+ builtin_name += "Level";
+ }
+ if (is_dref_sample || is_gather_or_dref_gather) {
+ // Metal only supports Lod = 0 for comparison sampling without
+ // derivatives.
+ // Vulkan SPIR-V does not allow Lod with OpImageGather or
+ // OpImageDrefGather.
+ if (!IsFloatZero(inst.GetSingleWordInOperand(arg_index))) {
+ return Fail() << "WGSL comparison sampling without derivatives "
+ "requires level-of-detail 0.0"
+ << inst.PrettyPrint();
+ }
+ // Don't generate the Lod argument.
+ } else {
+ // Generate the Lod argument.
+ TypedExpression lod = MakeOperand(inst, arg_index);
+ // When sampling from a depth texture, the Lod operand must be an I32.
+ if (texture_type->Is<DepthTexture>()) {
+ // Convert it to a signed integer type.
+ lod = ToI32(lod);
+ }
+ args.push_back(lod.expr);
+ }
- image_operands_mask ^= SpvImageOperandsLodMask;
- arg_index++;
- } else if ((opcode == SpvOpImageFetch || opcode == SpvOpImageRead) &&
- !texture_type
- ->IsAnyOf<DepthMultisampledTexture, MultisampledTexture>()) {
- // textureLoad requires an explicit level-of-detail parameter for
- // non-multisampled texture types.
- args.push_back(parser_impl_.MakeNullValue(ty_.I32()));
- }
- if (arg_index + 1 < num_args &&
- (image_operands_mask & SpvImageOperandsGradMask)) {
- if (is_dref_sample) {
- return Fail() << "WGSL does not support depth-reference sampling with "
- "explicit gradient: "
- << inst.PrettyPrint();
- }
- if (is_gather_or_dref_gather) {
- return Fail() << "WGSL does not support image gather with "
- "explicit gradient: "
- << inst.PrettyPrint();
- }
- builtin_name += "Grad";
- args.push_back(MakeOperand(inst, arg_index).expr);
- args.push_back(MakeOperand(inst, arg_index + 1).expr);
- image_operands_mask ^= SpvImageOperandsGradMask;
- arg_index += 2;
- }
- if (arg_index < num_args &&
- (image_operands_mask & SpvImageOperandsConstOffsetMask)) {
- if (!IsImageSamplingOrGatherOrDrefGather(opcode)) {
- return Fail() << "ConstOffset is only permitted for sampling, gather, or "
- "depth-reference gather operations: "
- << inst.PrettyPrint();
- }
- switch (texture_type->dims) {
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::k3d:
- break;
- default:
- return Fail() << "ConstOffset is only permitted for 2D, 2D Arrayed, "
- "and 3D textures: "
- << inst.PrettyPrint();
- }
-
- args.push_back(ToSignedIfUnsigned(MakeOperand(inst, arg_index)).expr);
- image_operands_mask ^= SpvImageOperandsConstOffsetMask;
- arg_index++;
- }
- if (arg_index < num_args &&
- (image_operands_mask & SpvImageOperandsSampleMask)) {
- // TODO(dneto): only permitted with ImageFetch
- args.push_back(ToI32(MakeOperand(inst, arg_index)).expr);
- image_operands_mask ^= SpvImageOperandsSampleMask;
- arg_index++;
- }
- if (image_operands_mask) {
- return Fail() << "unsupported image operands (" << image_operands_mask
- << "): " << inst.PrettyPrint();
- }
-
- // If any of the arguments are nullptr, then we've failed.
- if (std::any_of(args.begin(), args.end(),
- [](auto* expr) { return expr == nullptr; })) {
- return false;
- }
+ image_operands_mask ^= SpvImageOperandsLodMask;
+ arg_index++;
+ } else if ((opcode == SpvOpImageFetch || opcode == SpvOpImageRead) &&
+ !texture_type->IsAnyOf<DepthMultisampledTexture, MultisampledTexture>()) {
+ // textureLoad requires an explicit level-of-detail parameter for
+ // non-multisampled texture types.
+ args.push_back(parser_impl_.MakeNullValue(ty_.I32()));
+ }
+ if (arg_index + 1 < num_args && (image_operands_mask & SpvImageOperandsGradMask)) {
+ if (is_dref_sample) {
+ return Fail() << "WGSL does not support depth-reference sampling with "
+ "explicit gradient: "
+ << inst.PrettyPrint();
+ }
+ if (is_gather_or_dref_gather) {
+ return Fail() << "WGSL does not support image gather with "
+ "explicit gradient: "
+ << inst.PrettyPrint();
+ }
+ builtin_name += "Grad";
+ args.push_back(MakeOperand(inst, arg_index).expr);
+ args.push_back(MakeOperand(inst, arg_index + 1).expr);
+ image_operands_mask ^= SpvImageOperandsGradMask;
+ arg_index += 2;
+ }
+ if (arg_index < num_args && (image_operands_mask & SpvImageOperandsConstOffsetMask)) {
+ if (!IsImageSamplingOrGatherOrDrefGather(opcode)) {
+ return Fail() << "ConstOffset is only permitted for sampling, gather, or "
+ "depth-reference gather operations: "
+ << inst.PrettyPrint();
+ }
+ switch (texture_type->dims) {
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::k3d:
+ break;
+ default:
+ return Fail() << "ConstOffset is only permitted for 2D, 2D Arrayed, "
+ "and 3D textures: "
+ << inst.PrettyPrint();
+ }
- auto* ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(builtin_name));
- auto* call_expr =
- create<ast::CallExpression>(Source{}, ident, std::move(args));
+ args.push_back(ToSignedIfUnsigned(MakeOperand(inst, arg_index)).expr);
+ image_operands_mask ^= SpvImageOperandsConstOffsetMask;
+ arg_index++;
+ }
+ if (arg_index < num_args && (image_operands_mask & SpvImageOperandsSampleMask)) {
+ // TODO(dneto): only permitted with ImageFetch
+ args.push_back(ToI32(MakeOperand(inst, arg_index)).expr);
+ image_operands_mask ^= SpvImageOperandsSampleMask;
+ arg_index++;
+ }
+ if (image_operands_mask) {
+ return Fail() << "unsupported image operands (" << image_operands_mask
+ << "): " << inst.PrettyPrint();
+ }
- if (inst.type_id() != 0) {
- // It returns a value.
- const ast::Expression* value = call_expr;
+ // If any of the arguments are nullptr, then we've failed.
+ if (std::any_of(args.begin(), args.end(), [](auto* expr) { return expr == nullptr; })) {
+ return false;
+ }
- // The result type, derived from the SPIR-V instruction.
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
- auto* result_component_type = result_type;
- if (auto* result_vector_type = As<Vector>(result_type)) {
- result_component_type = result_vector_type->type;
- }
-
- // For depth textures, the arity might mot match WGSL:
- // Operation SPIR-V WGSL
- // normal sampling vec4 ImplicitLod f32
- // normal sampling vec4 ExplicitLod f32
- // compare sample f32 DrefImplicitLod f32
- // compare sample f32 DrefExplicitLod f32
- // texel load vec4 ImageFetch f32
- // normal gather vec4 ImageGather vec4
- // dref gather vec4 ImageDrefGather vec4
- // Construct a 4-element vector with the result from the builtin in the
- // first component.
- if (texture_type->IsAnyOf<DepthTexture, DepthMultisampledTexture>()) {
- if (is_non_dref_sample || (opcode == SpvOpImageFetch)) {
- value = builder_.Construct(
- Source{},
- result_type->Build(builder_), // a vec4
- ast::ExpressionList{
- value, parser_impl_.MakeNullValue(result_component_type),
- parser_impl_.MakeNullValue(result_component_type),
- parser_impl_.MakeNullValue(result_component_type)});
- }
- }
-
- // If necessary, convert the result to the signedness of the instruction
- // result type. Compare the SPIR-V image's sampled component type with the
- // component of the result type of the SPIR-V instruction.
- auto* spirv_image_type =
- parser_impl_.GetSpirvTypeForHandleMemoryObjectDeclaration(*image);
- if (!spirv_image_type || (spirv_image_type->opcode() != SpvOpTypeImage)) {
- return Fail() << "invalid image type for image memory object declaration "
- << image->PrettyPrint();
- }
- auto* expected_component_type =
- parser_impl_.ConvertType(spirv_image_type->GetSingleWordInOperand(0));
- if (expected_component_type != result_component_type) {
- // This occurs if one is signed integer and the other is unsigned integer,
- // or vice versa. Perform a bitcast.
- value = create<ast::BitcastExpression>(
- Source{}, result_type->Build(builder_), call_expr);
- }
- if (!expected_component_type->Is<F32>() && IsSampledImageAccess(opcode)) {
- // WGSL permits sampled image access only on float textures.
- // Reject this case in the SPIR-V reader, at least until SPIR-V validation
- // catches up with this rule and can reject it earlier in the workflow.
- return Fail() << "sampled image must have float component type";
- }
-
- EmitConstDefOrWriteToHoistedVar(inst, {result_type, value});
- } else {
- // It's an image write. No value is returned, so make a statement out
- // of the call.
- AddStatement(create<ast::CallStatement>(Source{}, call_expr));
- }
- return success();
+ auto* ident =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(builtin_name));
+ auto* call_expr = create<ast::CallExpression>(Source{}, ident, std::move(args));
+
+ if (inst.type_id() != 0) {
+ // It returns a value.
+ const ast::Expression* value = call_expr;
+
+ // The result type, derived from the SPIR-V instruction.
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+ auto* result_component_type = result_type;
+ if (auto* result_vector_type = As<Vector>(result_type)) {
+ result_component_type = result_vector_type->type;
+ }
+
+ // For depth textures, the arity might mot match WGSL:
+ // Operation SPIR-V WGSL
+ // normal sampling vec4 ImplicitLod f32
+ // normal sampling vec4 ExplicitLod f32
+ // compare sample f32 DrefImplicitLod f32
+ // compare sample f32 DrefExplicitLod f32
+ // texel load vec4 ImageFetch f32
+ // normal gather vec4 ImageGather vec4
+ // dref gather vec4 ImageDrefGather vec4
+ // Construct a 4-element vector with the result from the builtin in the
+ // first component.
+ if (texture_type->IsAnyOf<DepthTexture, DepthMultisampledTexture>()) {
+ if (is_non_dref_sample || (opcode == SpvOpImageFetch)) {
+ value = builder_.Construct(
+ Source{},
+ result_type->Build(builder_), // a vec4
+ ast::ExpressionList{value, parser_impl_.MakeNullValue(result_component_type),
+ parser_impl_.MakeNullValue(result_component_type),
+ parser_impl_.MakeNullValue(result_component_type)});
+ }
+ }
+
+ // If necessary, convert the result to the signedness of the instruction
+ // result type. Compare the SPIR-V image's sampled component type with the
+ // component of the result type of the SPIR-V instruction.
+ auto* spirv_image_type = parser_impl_.GetSpirvTypeForHandleMemoryObjectDeclaration(*image);
+ if (!spirv_image_type || (spirv_image_type->opcode() != SpvOpTypeImage)) {
+ return Fail() << "invalid image type for image memory object declaration "
+ << image->PrettyPrint();
+ }
+ auto* expected_component_type =
+ parser_impl_.ConvertType(spirv_image_type->GetSingleWordInOperand(0));
+ if (expected_component_type != result_component_type) {
+ // This occurs if one is signed integer and the other is unsigned integer,
+ // or vice versa. Perform a bitcast.
+ value =
+ create<ast::BitcastExpression>(Source{}, result_type->Build(builder_), call_expr);
+ }
+ if (!expected_component_type->Is<F32>() && IsSampledImageAccess(opcode)) {
+ // WGSL permits sampled image access only on float textures.
+ // Reject this case in the SPIR-V reader, at least until SPIR-V validation
+ // catches up with this rule and can reject it earlier in the workflow.
+ return Fail() << "sampled image must have float component type";
+ }
+
+ EmitConstDefOrWriteToHoistedVar(inst, {result_type, value});
+ } else {
+ // It's an image write. No value is returned, so make a statement out
+ // of the call.
+ AddStatement(create<ast::CallStatement>(Source{}, call_expr));
+ }
+ return success();
}
bool FunctionEmitter::EmitImageQuery(const spvtools::opt::Instruction& inst) {
- // TODO(dneto): Reject cases that are valid in Vulkan but invalid in WGSL.
- const spvtools::opt::Instruction* image = GetImage(inst);
- if (!image) {
- return false;
- }
- auto* texture_type = GetImageType(*image);
- if (!texture_type) {
- return false;
- }
-
- const auto opcode = inst.opcode();
- switch (opcode) {
- case SpvOpImageQuerySize:
- case SpvOpImageQuerySizeLod: {
- ast::ExpressionList exprs;
- // Invoke textureDimensions.
- // If the texture is arrayed, combine with the result from
- // textureNumLayers.
- auto* dims_ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("textureDimensions"));
- ast::ExpressionList dims_args{GetImageExpression(inst)};
- if (opcode == SpvOpImageQuerySizeLod) {
- dims_args.push_back(ToI32(MakeOperand(inst, 1)).expr);
- }
- const ast::Expression* dims_call =
- create<ast::CallExpression>(Source{}, dims_ident, dims_args);
- auto dims = texture_type->dims;
- if ((dims == ast::TextureDimension::kCube) ||
- (dims == ast::TextureDimension::kCubeArray)) {
- // textureDimension returns a 3-element vector but SPIR-V expects 2.
- dims_call = create<ast::MemberAccessorExpression>(Source{}, dims_call,
- PrefixSwizzle(2));
- }
- exprs.push_back(dims_call);
- if (ast::IsTextureArray(dims)) {
- auto* layers_ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register("textureNumLayers"));
- exprs.push_back(create<ast::CallExpression>(
- Source{}, layers_ident,
- ast::ExpressionList{GetImageExpression(inst)}));
- }
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
- TypedExpression expr = {
- result_type,
- builder_.Construct(Source{}, result_type->Build(builder_), exprs)};
- return EmitConstDefOrWriteToHoistedVar(inst, expr);
- }
- case SpvOpImageQueryLod:
- return Fail() << "WGSL does not support querying the level of detail of "
- "an image: "
- << inst.PrettyPrint();
- case SpvOpImageQueryLevels:
- case SpvOpImageQuerySamples: {
- const auto* name = (opcode == SpvOpImageQueryLevels)
- ? "textureNumLevels"
- : "textureNumSamples";
- auto* levels_ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(name));
- const ast::Expression* ast_expr = create<ast::CallExpression>(
- Source{}, levels_ident,
- ast::ExpressionList{GetImageExpression(inst)});
- auto* result_type = parser_impl_.ConvertType(inst.type_id());
- // The SPIR-V result type must be integer scalar. The WGSL bulitin
- // returns i32. If they aren't the same then convert the result.
- if (!result_type->Is<I32>()) {
- ast_expr = builder_.Construct(Source{}, result_type->Build(builder_),
- ast::ExpressionList{ast_expr});
- }
- TypedExpression expr{result_type, ast_expr};
- return EmitConstDefOrWriteToHoistedVar(inst, expr);
- }
- default:
- break;
- }
- return Fail() << "unhandled image query: " << inst.PrettyPrint();
+ // TODO(dneto): Reject cases that are valid in Vulkan but invalid in WGSL.
+ const spvtools::opt::Instruction* image = GetImage(inst);
+ if (!image) {
+ return false;
+ }
+ auto* texture_type = GetImageType(*image);
+ if (!texture_type) {
+ return false;
+ }
+
+ const auto opcode = inst.opcode();
+ switch (opcode) {
+ case SpvOpImageQuerySize:
+ case SpvOpImageQuerySizeLod: {
+ ast::ExpressionList exprs;
+ // Invoke textureDimensions.
+ // If the texture is arrayed, combine with the result from
+ // textureNumLayers.
+ auto* dims_ident = create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register("textureDimensions"));
+ ast::ExpressionList dims_args{GetImageExpression(inst)};
+ if (opcode == SpvOpImageQuerySizeLod) {
+ dims_args.push_back(ToI32(MakeOperand(inst, 1)).expr);
+ }
+ const ast::Expression* dims_call =
+ create<ast::CallExpression>(Source{}, dims_ident, dims_args);
+ auto dims = texture_type->dims;
+ if ((dims == ast::TextureDimension::kCube) ||
+ (dims == ast::TextureDimension::kCubeArray)) {
+ // textureDimension returns a 3-element vector but SPIR-V expects 2.
+ dims_call =
+ create<ast::MemberAccessorExpression>(Source{}, dims_call, PrefixSwizzle(2));
+ }
+ exprs.push_back(dims_call);
+ if (ast::IsTextureArray(dims)) {
+ auto* layers_ident = create<ast::IdentifierExpression>(
+ Source{}, builder_.Symbols().Register("textureNumLayers"));
+ exprs.push_back(create<ast::CallExpression>(
+ Source{}, layers_ident, ast::ExpressionList{GetImageExpression(inst)}));
+ }
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+ TypedExpression expr = {
+ result_type, builder_.Construct(Source{}, result_type->Build(builder_), exprs)};
+ return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ }
+ case SpvOpImageQueryLod:
+ return Fail() << "WGSL does not support querying the level of detail of "
+ "an image: "
+ << inst.PrettyPrint();
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples: {
+ const auto* name =
+ (opcode == SpvOpImageQueryLevels) ? "textureNumLevels" : "textureNumSamples";
+ auto* levels_ident =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(name));
+ const ast::Expression* ast_expr = create<ast::CallExpression>(
+ Source{}, levels_ident, ast::ExpressionList{GetImageExpression(inst)});
+ auto* result_type = parser_impl_.ConvertType(inst.type_id());
+ // The SPIR-V result type must be integer scalar. The WGSL bulitin
+ // returns i32. If they aren't the same then convert the result.
+ if (!result_type->Is<I32>()) {
+ ast_expr = builder_.Construct(Source{}, result_type->Build(builder_),
+ ast::ExpressionList{ast_expr});
+ }
+ TypedExpression expr{result_type, ast_expr};
+ return EmitConstDefOrWriteToHoistedVar(inst, expr);
+ }
+ default:
+ break;
+ }
+ return Fail() << "unhandled image query: " << inst.PrettyPrint();
}
ast::ExpressionList FunctionEmitter::MakeCoordinateOperandsForImageAccess(
const spvtools::opt::Instruction& inst) {
- if (!parser_impl_.success()) {
- Fail();
- return {};
- }
- const spvtools::opt::Instruction* image = GetImage(inst);
- if (!image) {
- return {};
- }
- if (inst.NumInOperands() < 1) {
- Fail() << "image access is missing a coordinate parameter: "
- << inst.PrettyPrint();
- return {};
- }
-
- // In SPIR-V for Shader, coordinates are:
- // - floating point for sampling, dref sampling, gather, dref gather
- // - integral for fetch, read, write
- // In WGSL:
- // - floating point for sampling, dref sampling, gather, dref gather
- // - signed integral for textureLoad, textureStore
- //
- // The only conversions we have to do for WGSL are:
- // - When the coordinates are unsigned integral, convert them to signed.
- // - Array index is always i32
-
- // The coordinates parameter is always in position 1.
- TypedExpression raw_coords(MakeOperand(inst, 1));
- if (!raw_coords) {
- return {};
- }
- const Texture* texture_type = GetImageType(*image);
- if (!texture_type) {
- return {};
- }
- ast::TextureDimension dim = texture_type->dims;
- // Number of regular coordinates.
- uint32_t num_axes = ast::NumCoordinateAxes(dim);
- bool is_arrayed = ast::IsTextureArray(dim);
- if ((num_axes == 0) || (num_axes > 3)) {
- Fail() << "unsupported image dimensionality for "
- << texture_type->TypeInfo().name << " prompted by "
- << inst.PrettyPrint();
- }
- bool is_proj = false;
- switch (inst.opcode()) {
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- is_proj = true;
- break;
- default:
- break;
- }
-
- const auto num_coords_required =
- num_axes + (is_arrayed ? 1 : 0) + (is_proj ? 1 : 0);
- uint32_t num_coords_supplied = 0;
- auto* component_type = raw_coords.type;
- if (component_type->IsFloatScalar() || component_type->IsIntegerScalar()) {
- num_coords_supplied = 1;
- } else if (auto* vec_type = As<Vector>(raw_coords.type)) {
- component_type = vec_type->type;
- num_coords_supplied = vec_type->size;
- }
- if (num_coords_supplied == 0) {
- Fail() << "bad or unsupported coordinate type for image access: "
- << inst.PrettyPrint();
- return {};
- }
- if (num_coords_required > num_coords_supplied) {
- Fail() << "image access required " << num_coords_required
- << " coordinate components, but only " << num_coords_supplied
- << " provided, in: " << inst.PrettyPrint();
- return {};
- }
-
- ast::ExpressionList result;
-
- // Generates the expression for the WGSL coordinates, when it is a prefix
- // swizzle with num_axes. If the result would be unsigned, also converts
- // it to a signed value of the same shape (scalar or vector).
- // Use a lambda to make it easy to only generate the expressions when we
- // will actually use them.
- auto prefix_swizzle_expr = [this, num_axes, component_type, is_proj,
- raw_coords]() -> const ast::Expression* {
- auto* swizzle_type =
- (num_axes == 1) ? component_type : ty_.Vector(component_type, num_axes);
- auto* swizzle = create<ast::MemberAccessorExpression>(
- Source{}, raw_coords.expr, PrefixSwizzle(num_axes));
- if (is_proj) {
- auto* q = create<ast::MemberAccessorExpression>(Source{}, raw_coords.expr,
- Swizzle(num_axes));
- auto* proj_div = builder_.Div(swizzle, q);
- return ToSignedIfUnsigned({swizzle_type, proj_div}).expr;
- } else {
- return ToSignedIfUnsigned({swizzle_type, swizzle}).expr;
- }
- };
-
- if (is_arrayed) {
- // The source must be a vector. It has at least one coordinate component
- // and it must have an array component. Use a vector swizzle to get the
- // first `num_axes` components.
- result.push_back(prefix_swizzle_expr());
-
- // Now get the array index.
- const ast::Expression* array_index =
- builder_.MemberAccessor(raw_coords.expr, Swizzle(num_axes));
- if (component_type->IsFloatScalar()) {
- // When converting from a float array layer to integer, Vulkan requires
- // round-to-nearest, with preference for round-to-nearest-even.
- // But i32(f32) in WGSL has unspecified rounding mode, so we have to
- // explicitly specify the rounding.
- array_index = builder_.Call("round", array_index);
- }
- // Convert it to a signed integer type, if needed.
- result.push_back(ToI32({component_type, array_index}).expr);
- } else {
- if (num_coords_supplied == num_coords_required && !is_proj) {
- // Pass the value through, with possible unsigned->signed conversion.
- result.push_back(ToSignedIfUnsigned(raw_coords).expr);
+ if (!parser_impl_.success()) {
+ Fail();
+ return {};
+ }
+ const spvtools::opt::Instruction* image = GetImage(inst);
+ if (!image) {
+ return {};
+ }
+ if (inst.NumInOperands() < 1) {
+ Fail() << "image access is missing a coordinate parameter: " << inst.PrettyPrint();
+ return {};
+ }
+
+ // In SPIR-V for Shader, coordinates are:
+ // - floating point for sampling, dref sampling, gather, dref gather
+ // - integral for fetch, read, write
+ // In WGSL:
+ // - floating point for sampling, dref sampling, gather, dref gather
+ // - signed integral for textureLoad, textureStore
+ //
+ // The only conversions we have to do for WGSL are:
+ // - When the coordinates are unsigned integral, convert them to signed.
+ // - Array index is always i32
+
+ // The coordinates parameter is always in position 1.
+ TypedExpression raw_coords(MakeOperand(inst, 1));
+ if (!raw_coords) {
+ return {};
+ }
+ const Texture* texture_type = GetImageType(*image);
+ if (!texture_type) {
+ return {};
+ }
+ ast::TextureDimension dim = texture_type->dims;
+ // Number of regular coordinates.
+ uint32_t num_axes = ast::NumCoordinateAxes(dim);
+ bool is_arrayed = ast::IsTextureArray(dim);
+ if ((num_axes == 0) || (num_axes > 3)) {
+ Fail() << "unsupported image dimensionality for " << texture_type->TypeInfo().name
+ << " prompted by " << inst.PrettyPrint();
+ }
+ bool is_proj = false;
+ switch (inst.opcode()) {
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ is_proj = true;
+ break;
+ default:
+ break;
+ }
+
+ const auto num_coords_required = num_axes + (is_arrayed ? 1 : 0) + (is_proj ? 1 : 0);
+ uint32_t num_coords_supplied = 0;
+ auto* component_type = raw_coords.type;
+ if (component_type->IsFloatScalar() || component_type->IsIntegerScalar()) {
+ num_coords_supplied = 1;
+ } else if (auto* vec_type = As<Vector>(raw_coords.type)) {
+ component_type = vec_type->type;
+ num_coords_supplied = vec_type->size;
+ }
+ if (num_coords_supplied == 0) {
+ Fail() << "bad or unsupported coordinate type for image access: " << inst.PrettyPrint();
+ return {};
+ }
+ if (num_coords_required > num_coords_supplied) {
+ Fail() << "image access required " << num_coords_required
+ << " coordinate components, but only " << num_coords_supplied
+ << " provided, in: " << inst.PrettyPrint();
+ return {};
+ }
+
+ ast::ExpressionList result;
+
+ // Generates the expression for the WGSL coordinates, when it is a prefix
+ // swizzle with num_axes. If the result would be unsigned, also converts
+ // it to a signed value of the same shape (scalar or vector).
+ // Use a lambda to make it easy to only generate the expressions when we
+ // will actually use them.
+ auto prefix_swizzle_expr = [this, num_axes, component_type, is_proj,
+ raw_coords]() -> const ast::Expression* {
+ auto* swizzle_type =
+ (num_axes == 1) ? component_type : ty_.Vector(component_type, num_axes);
+ auto* swizzle = create<ast::MemberAccessorExpression>(Source{}, raw_coords.expr,
+ PrefixSwizzle(num_axes));
+ if (is_proj) {
+ auto* q =
+ create<ast::MemberAccessorExpression>(Source{}, raw_coords.expr, Swizzle(num_axes));
+ auto* proj_div = builder_.Div(swizzle, q);
+ return ToSignedIfUnsigned({swizzle_type, proj_div}).expr;
+ } else {
+ return ToSignedIfUnsigned({swizzle_type, swizzle}).expr;
+ }
+ };
+
+ if (is_arrayed) {
+ // The source must be a vector. It has at least one coordinate component
+ // and it must have an array component. Use a vector swizzle to get the
+ // first `num_axes` components.
+ result.push_back(prefix_swizzle_expr());
+
+ // Now get the array index.
+ const ast::Expression* array_index =
+ builder_.MemberAccessor(raw_coords.expr, Swizzle(num_axes));
+ if (component_type->IsFloatScalar()) {
+ // When converting from a float array layer to integer, Vulkan requires
+ // round-to-nearest, with preference for round-to-nearest-even.
+ // But i32(f32) in WGSL has unspecified rounding mode, so we have to
+ // explicitly specify the rounding.
+ array_index = builder_.Call("round", array_index);
+ }
+ // Convert it to a signed integer type, if needed.
+ result.push_back(ToI32({component_type, array_index}).expr);
} else {
- // There are more coordinates supplied than needed. So the source type
- // is a vector. Use a vector swizzle to get the first `num_axes`
- // components.
- result.push_back(prefix_swizzle_expr());
+ if (num_coords_supplied == num_coords_required && !is_proj) {
+ // Pass the value through, with possible unsigned->signed conversion.
+ result.push_back(ToSignedIfUnsigned(raw_coords).expr);
+ } else {
+ // There are more coordinates supplied than needed. So the source type
+ // is a vector. Use a vector swizzle to get the first `num_axes`
+ // components.
+ result.push_back(prefix_swizzle_expr());
+ }
}
- }
- return result;
+ return result;
}
const ast::Expression* FunctionEmitter::ConvertTexelForStorage(
const spvtools::opt::Instruction& inst,
TypedExpression texel,
const Texture* texture_type) {
- auto* storage_texture_type = As<StorageTexture>(texture_type);
- auto* src_type = texel.type;
- if (!storage_texture_type) {
- Fail() << "writing to other than storage texture: " << inst.PrettyPrint();
- return nullptr;
- }
- const auto format = storage_texture_type->format;
- auto* dest_type = parser_impl_.GetTexelTypeForFormat(format);
- if (!dest_type) {
- Fail();
- return nullptr;
- }
+ auto* storage_texture_type = As<StorageTexture>(texture_type);
+ auto* src_type = texel.type;
+ if (!storage_texture_type) {
+ Fail() << "writing to other than storage texture: " << inst.PrettyPrint();
+ return nullptr;
+ }
+ const auto format = storage_texture_type->format;
+ auto* dest_type = parser_impl_.GetTexelTypeForFormat(format);
+ if (!dest_type) {
+ Fail();
+ return nullptr;
+ }
- // The texel type is always a 4-element vector.
- const uint32_t dest_count = 4u;
- TINT_ASSERT(Reader, dest_type->Is<Vector>() &&
- dest_type->As<Vector>()->size == dest_count);
- TINT_ASSERT(Reader, dest_type->IsFloatVector() ||
- dest_type->IsUnsignedIntegerVector() ||
- dest_type->IsSignedIntegerVector());
+ // The texel type is always a 4-element vector.
+ const uint32_t dest_count = 4u;
+ TINT_ASSERT(Reader, dest_type->Is<Vector>() && dest_type->As<Vector>()->size == dest_count);
+ TINT_ASSERT(Reader, dest_type->IsFloatVector() || dest_type->IsUnsignedIntegerVector() ||
+ dest_type->IsSignedIntegerVector());
- if (src_type == dest_type) {
- return texel.expr;
- }
-
- // Component type must match floatness, or integral signedness.
- if ((src_type->IsFloatScalarOrVector() != dest_type->IsFloatVector()) ||
- (src_type->IsUnsignedIntegerVector() !=
- dest_type->IsUnsignedIntegerVector()) ||
- (src_type->IsSignedIntegerVector() !=
- dest_type->IsSignedIntegerVector())) {
- Fail() << "invalid texel type for storage texture write: component must be "
- "float, signed integer, or unsigned integer "
- "to match the texture channel type: "
- << inst.PrettyPrint();
- return nullptr;
- }
+ if (src_type == dest_type) {
+ return texel.expr;
+ }
- const auto required_count = parser_impl_.GetChannelCountForFormat(format);
- TINT_ASSERT(Reader, 0 < required_count && required_count <= 4);
+ // Component type must match floatness, or integral signedness.
+ if ((src_type->IsFloatScalarOrVector() != dest_type->IsFloatVector()) ||
+ (src_type->IsUnsignedIntegerVector() != dest_type->IsUnsignedIntegerVector()) ||
+ (src_type->IsSignedIntegerVector() != dest_type->IsSignedIntegerVector())) {
+ Fail() << "invalid texel type for storage texture write: component must be "
+ "float, signed integer, or unsigned integer "
+ "to match the texture channel type: "
+ << inst.PrettyPrint();
+ return nullptr;
+ }
- const uint32_t src_count =
- src_type->IsScalar() ? 1 : src_type->As<Vector>()->size;
- if (src_count < required_count) {
- Fail() << "texel has too few components for storage texture: " << src_count
- << " provided but " << required_count
- << " required, in: " << inst.PrettyPrint();
- return nullptr;
- }
-
- // It's valid for required_count < src_count. The extra components will
- // be written out but the textureStore will ignore them.
-
- if (src_count < dest_count) {
- // Expand the texel to a 4 element vector.
- auto* component_type =
- texel.type->IsScalar() ? texel.type : texel.type->As<Vector>()->type;
- texel.type = ty_.Vector(component_type, dest_count);
- ast::ExpressionList exprs;
- exprs.push_back(texel.expr);
- for (auto i = src_count; i < dest_count; i++) {
- exprs.push_back(parser_impl_.MakeNullExpression(component_type).expr);
- }
- texel.expr = builder_.Construct(Source{}, texel.type->Build(builder_),
- std::move(exprs));
- }
-
- return texel.expr;
+ const auto required_count = parser_impl_.GetChannelCountForFormat(format);
+ TINT_ASSERT(Reader, 0 < required_count && required_count <= 4);
+
+ const uint32_t src_count = src_type->IsScalar() ? 1 : src_type->As<Vector>()->size;
+ if (src_count < required_count) {
+ Fail() << "texel has too few components for storage texture: " << src_count
+ << " provided but " << required_count << " required, in: " << inst.PrettyPrint();
+ return nullptr;
+ }
+
+ // It's valid for required_count < src_count. The extra components will
+ // be written out but the textureStore will ignore them.
+
+ if (src_count < dest_count) {
+ // Expand the texel to a 4 element vector.
+ auto* component_type = texel.type->IsScalar() ? texel.type : texel.type->As<Vector>()->type;
+ texel.type = ty_.Vector(component_type, dest_count);
+ ast::ExpressionList exprs;
+ exprs.push_back(texel.expr);
+ for (auto i = src_count; i < dest_count; i++) {
+ exprs.push_back(parser_impl_.MakeNullExpression(component_type).expr);
+ }
+ texel.expr = builder_.Construct(Source{}, texel.type->Build(builder_), std::move(exprs));
+ }
+
+ return texel.expr;
}
TypedExpression FunctionEmitter::ToI32(TypedExpression value) {
- if (!value || value.type->Is<I32>()) {
- return value;
- }
- return {ty_.I32(), builder_.Construct(Source{}, builder_.ty.i32(),
- ast::ExpressionList{value.expr})};
+ if (!value || value.type->Is<I32>()) {
+ return value;
+ }
+ return {ty_.I32(),
+ builder_.Construct(Source{}, builder_.ty.i32(), ast::ExpressionList{value.expr})};
}
TypedExpression FunctionEmitter::ToSignedIfUnsigned(TypedExpression value) {
- if (!value || !value.type->IsUnsignedScalarOrVector()) {
- return value;
- }
- if (auto* vec_type = value.type->As<Vector>()) {
- auto* new_type = ty_.Vector(ty_.I32(), vec_type->size);
- return {new_type, builder_.Construct(new_type->Build(builder_),
- ast::ExpressionList{value.expr})};
- }
- return ToI32(value);
+ if (!value || !value.type->IsUnsignedScalarOrVector()) {
+ return value;
+ }
+ if (auto* vec_type = value.type->As<Vector>()) {
+ auto* new_type = ty_.Vector(ty_.I32(), vec_type->size);
+ return {new_type,
+ builder_.Construct(new_type->Build(builder_), ast::ExpressionList{value.expr})};
+ }
+ return ToI32(value);
}
-TypedExpression FunctionEmitter::MakeArrayLength(
- const spvtools::opt::Instruction& inst) {
- if (inst.NumInOperands() != 2) {
- // Binary parsing will fail on this anyway.
- Fail() << "invalid array length: requires 2 operands: "
- << inst.PrettyPrint();
- return {};
- }
- const auto struct_ptr_id = inst.GetSingleWordInOperand(0);
- const auto field_index = inst.GetSingleWordInOperand(1);
- const auto struct_ptr_type_id =
- def_use_mgr_->GetDef(struct_ptr_id)->type_id();
- // Trace through the pointer type to get to the struct type.
- const auto struct_type_id =
- def_use_mgr_->GetDef(struct_ptr_type_id)->GetSingleWordInOperand(1);
- const auto field_name = namer_.GetMemberName(struct_type_id, field_index);
- if (field_name.empty()) {
- Fail() << "struct index out of bounds for array length: "
- << inst.PrettyPrint();
- return {};
- }
+TypedExpression FunctionEmitter::MakeArrayLength(const spvtools::opt::Instruction& inst) {
+ if (inst.NumInOperands() != 2) {
+ // Binary parsing will fail on this anyway.
+ Fail() << "invalid array length: requires 2 operands: " << inst.PrettyPrint();
+ return {};
+ }
+ const auto struct_ptr_id = inst.GetSingleWordInOperand(0);
+ const auto field_index = inst.GetSingleWordInOperand(1);
+ const auto struct_ptr_type_id = def_use_mgr_->GetDef(struct_ptr_id)->type_id();
+ // Trace through the pointer type to get to the struct type.
+ const auto struct_type_id = def_use_mgr_->GetDef(struct_ptr_type_id)->GetSingleWordInOperand(1);
+ const auto field_name = namer_.GetMemberName(struct_type_id, field_index);
+ if (field_name.empty()) {
+ Fail() << "struct index out of bounds for array length: " << inst.PrettyPrint();
+ return {};
+ }
- auto member_expr = MakeExpression(struct_ptr_id);
- if (!member_expr) {
- return {};
- }
- if (member_expr.type->Is<Pointer>()) {
- member_expr = Dereference(member_expr);
- }
- auto* member_ident = create<ast::IdentifierExpression>(
- Source{}, builder_.Symbols().Register(field_name));
- auto* member_access = create<ast::MemberAccessorExpression>(
- Source{}, member_expr.expr, member_ident);
-
- // Generate the builtin function call.
- auto* call_expr =
- builder_.Call(Source{}, "arrayLength", builder_.AddressOf(member_access));
-
- return {parser_impl_.ConvertType(inst.type_id()), call_expr};
+ auto member_expr = MakeExpression(struct_ptr_id);
+ if (!member_expr) {
+ return {};
+ }
+ if (member_expr.type->Is<Pointer>()) {
+ member_expr = Dereference(member_expr);
+ }
+ auto* member_ident =
+ create<ast::IdentifierExpression>(Source{}, builder_.Symbols().Register(field_name));
+ auto* member_access =
+ create<ast::MemberAccessorExpression>(Source{}, member_expr.expr, member_ident);
+
+ // Generate the builtin function call.
+ auto* call_expr = builder_.Call(Source{}, "arrayLength", builder_.AddressOf(member_access));
+
+ return {parser_impl_.ConvertType(inst.type_id()), call_expr};
}
-TypedExpression FunctionEmitter::MakeOuterProduct(
- const spvtools::opt::Instruction& inst) {
- // Synthesize the result.
- auto col = MakeOperand(inst, 0);
- auto row = MakeOperand(inst, 1);
- auto* col_ty = As<Vector>(col.type);
- auto* row_ty = As<Vector>(row.type);
- auto* result_ty = As<Matrix>(parser_impl_.ConvertType(inst.type_id()));
- if (!col_ty || !col_ty || !result_ty || result_ty->type != col_ty->type ||
- result_ty->type != row_ty->type || result_ty->columns != row_ty->size ||
- result_ty->rows != col_ty->size) {
- Fail() << "invalid outer product instruction: bad types "
- << inst.PrettyPrint();
- return {};
- }
-
- // Example:
- // c : vec3 column vector
- // r : vec2 row vector
- // OuterProduct c r : mat2x3 (2 columns, 3 rows)
- // Result:
- // | c.x * r.x c.x * r.y |
- // | c.y * r.x c.y * r.y |
- // | c.z * r.x c.z * r.y |
-
- ast::ExpressionList result_columns;
- for (uint32_t icol = 0; icol < result_ty->columns; icol++) {
- ast::ExpressionList result_row;
- auto* row_factor = create<ast::MemberAccessorExpression>(Source{}, row.expr,
- Swizzle(icol));
- for (uint32_t irow = 0; irow < result_ty->rows; irow++) {
- auto* column_factor = create<ast::MemberAccessorExpression>(
- Source{}, col.expr, Swizzle(irow));
- auto* elem = create<ast::BinaryExpression>(
- Source{}, ast::BinaryOp::kMultiply, row_factor, column_factor);
- result_row.push_back(elem);
- }
- result_columns.push_back(
- builder_.Construct(Source{}, col_ty->Build(builder_), result_row));
- }
- return {result_ty, builder_.Construct(Source{}, result_ty->Build(builder_),
- result_columns)};
+TypedExpression FunctionEmitter::MakeOuterProduct(const spvtools::opt::Instruction& inst) {
+ // Synthesize the result.
+ auto col = MakeOperand(inst, 0);
+ auto row = MakeOperand(inst, 1);
+ auto* col_ty = As<Vector>(col.type);
+ auto* row_ty = As<Vector>(row.type);
+ auto* result_ty = As<Matrix>(parser_impl_.ConvertType(inst.type_id()));
+ if (!col_ty || !col_ty || !result_ty || result_ty->type != col_ty->type ||
+ result_ty->type != row_ty->type || result_ty->columns != row_ty->size ||
+ result_ty->rows != col_ty->size) {
+ Fail() << "invalid outer product instruction: bad types " << inst.PrettyPrint();
+ return {};
+ }
+
+ // Example:
+ // c : vec3 column vector
+ // r : vec2 row vector
+ // OuterProduct c r : mat2x3 (2 columns, 3 rows)
+ // Result:
+ // | c.x * r.x c.x * r.y |
+ // | c.y * r.x c.y * r.y |
+ // | c.z * r.x c.z * r.y |
+
+ ast::ExpressionList result_columns;
+ for (uint32_t icol = 0; icol < result_ty->columns; icol++) {
+ ast::ExpressionList result_row;
+ auto* row_factor = create<ast::MemberAccessorExpression>(Source{}, row.expr, Swizzle(icol));
+ for (uint32_t irow = 0; irow < result_ty->rows; irow++) {
+ auto* column_factor =
+ create<ast::MemberAccessorExpression>(Source{}, col.expr, Swizzle(irow));
+ auto* elem = create<ast::BinaryExpression>(Source{}, ast::BinaryOp::kMultiply,
+ row_factor, column_factor);
+ result_row.push_back(elem);
+ }
+ result_columns.push_back(builder_.Construct(Source{}, col_ty->Build(builder_), result_row));
+ }
+ return {result_ty, builder_.Construct(Source{}, result_ty->Build(builder_), result_columns)};
}
-bool FunctionEmitter::MakeVectorInsertDynamic(
- const spvtools::opt::Instruction& inst) {
- // For
- // %result = OpVectorInsertDynamic %type %src_vector %component %index
- // there are two cases.
- //
- // Case 1:
- // The %src_vector value has already been hoisted into a variable.
- // In this case, assign %src_vector to that variable, then write the
- // component into the right spot:
- //
- // hoisted = src_vector;
- // hoisted[index] = component;
- //
- // Case 2:
- // The %src_vector value is not hoisted. In this case, make a temporary
- // variable with the %src_vector contents, then write the component,
- // and then make a let-declaration that reads the value out:
- //
- // var temp : type = src_vector;
- // temp[index] = component;
- // let result : type = temp;
- //
- // Then use result everywhere the original SPIR-V id is used. Using a const
- // like this avoids constantly reloading the value many times.
-
- auto* type = parser_impl_.ConvertType(inst.type_id());
- auto src_vector = MakeOperand(inst, 0);
- auto component = MakeOperand(inst, 1);
- auto index = MakeOperand(inst, 2);
-
- std::string var_name;
- auto original_value_name = namer_.Name(inst.result_id());
- const bool hoisted = WriteIfHoistedVar(inst, src_vector);
- if (hoisted) {
- // The variable was already declared in an earlier block.
- var_name = original_value_name;
- // Assign the source vector value to it.
- builder_.Assign({}, builder_.Expr(var_name), src_vector.expr);
- } else {
- // Synthesize the temporary variable.
- // It doesn't correspond to a SPIR-V ID, so we don't use the ordinary
- // API in parser_impl_.
- var_name = namer_.MakeDerivedName(original_value_name);
-
- auto* temp_var = builder_.Var(var_name, type->Build(builder_),
- ast::StorageClass::kNone, src_vector.expr);
-
- AddStatement(builder_.Decl({}, temp_var));
- }
-
- auto* lhs = create<ast::IndexAccessorExpression>(
- Source{}, builder_.Expr(var_name), index.expr);
- if (!lhs) {
- return false;
- }
+bool FunctionEmitter::MakeVectorInsertDynamic(const spvtools::opt::Instruction& inst) {
+ // For
+ // %result = OpVectorInsertDynamic %type %src_vector %component %index
+ // there are two cases.
+ //
+ // Case 1:
+ // The %src_vector value has already been hoisted into a variable.
+ // In this case, assign %src_vector to that variable, then write the
+ // component into the right spot:
+ //
+ // hoisted = src_vector;
+ // hoisted[index] = component;
+ //
+ // Case 2:
+ // The %src_vector value is not hoisted. In this case, make a temporary
+ // variable with the %src_vector contents, then write the component,
+ // and then make a let-declaration that reads the value out:
+ //
+ // var temp : type = src_vector;
+ // temp[index] = component;
+ // let result : type = temp;
+ //
+ // Then use result everywhere the original SPIR-V id is used. Using a const
+ // like this avoids constantly reloading the value many times.
+
+ auto* type = parser_impl_.ConvertType(inst.type_id());
+ auto src_vector = MakeOperand(inst, 0);
+ auto component = MakeOperand(inst, 1);
+ auto index = MakeOperand(inst, 2);
+
+ std::string var_name;
+ auto original_value_name = namer_.Name(inst.result_id());
+ const bool hoisted = WriteIfHoistedVar(inst, src_vector);
+ if (hoisted) {
+ // The variable was already declared in an earlier block.
+ var_name = original_value_name;
+ // Assign the source vector value to it.
+ builder_.Assign({}, builder_.Expr(var_name), src_vector.expr);
+ } else {
+ // Synthesize the temporary variable.
+ // It doesn't correspond to a SPIR-V ID, so we don't use the ordinary
+ // API in parser_impl_.
+ var_name = namer_.MakeDerivedName(original_value_name);
- AddStatement(builder_.Assign(lhs, component.expr));
+ auto* temp_var = builder_.Var(var_name, type->Build(builder_), ast::StorageClass::kNone,
+ src_vector.expr);
- if (hoisted) {
- // The hoisted variable itself stands for this result ID.
- return success();
- }
- // Create a new let-declaration that is initialized by the contents
- // of the temporary variable.
- return EmitConstDefinition(inst, {type, builder_.Expr(var_name)});
+ AddStatement(builder_.Decl({}, temp_var));
+ }
+
+ auto* lhs = create<ast::IndexAccessorExpression>(Source{}, builder_.Expr(var_name), index.expr);
+ if (!lhs) {
+ return false;
+ }
+
+ AddStatement(builder_.Assign(lhs, component.expr));
+
+ if (hoisted) {
+ // The hoisted variable itself stands for this result ID.
+ return success();
+ }
+ // Create a new let-declaration that is initialized by the contents
+ // of the temporary variable.
+ return EmitConstDefinition(inst, {type, builder_.Expr(var_name)});
}
-bool FunctionEmitter::MakeCompositeInsert(
- const spvtools::opt::Instruction& inst) {
- // For
- // %result = OpCompositeInsert %type %object %composite 1 2 3 ...
- // there are two cases.
- //
- // Case 1:
- // The %composite value has already been hoisted into a variable.
- // In this case, assign %composite to that variable, then write the
- // component into the right spot:
- //
- // hoisted = composite;
- // hoisted[index].x = object;
- //
- // Case 2:
- // The %composite value is not hoisted. In this case, make a temporary
- // variable with the %composite contents, then write the component,
- // and then make a let-declaration that reads the value out:
- //
- // var temp : type = composite;
- // temp[index].x = object;
- // let result : type = temp;
- //
- // Then use result everywhere the original SPIR-V id is used. Using a const
- // like this avoids constantly reloading the value many times.
- //
- // This technique is a combination of:
- // - making a temporary variable and constant declaration, like what we do
- // for VectorInsertDynamic, and
- // - building up an access-chain like access like for CompositeExtract, but
- // on the left-hand side of the assignment.
-
- auto* type = parser_impl_.ConvertType(inst.type_id());
- auto component = MakeOperand(inst, 0);
- auto src_composite = MakeOperand(inst, 1);
-
- std::string var_name;
- auto original_value_name = namer_.Name(inst.result_id());
- const bool hoisted = WriteIfHoistedVar(inst, src_composite);
- if (hoisted) {
- // The variable was already declared in an earlier block.
- var_name = original_value_name;
- // Assign the source composite value to it.
- builder_.Assign({}, builder_.Expr(var_name), src_composite.expr);
- } else {
- // Synthesize a temporary variable.
- // It doesn't correspond to a SPIR-V ID, so we don't use the ordinary
- // API in parser_impl_.
- var_name = namer_.MakeDerivedName(original_value_name);
- auto* temp_var = builder_.Var(var_name, type->Build(builder_),
- ast::StorageClass::kNone, src_composite.expr);
- AddStatement(builder_.Decl({}, temp_var));
- }
-
- TypedExpression seed_expr{type, builder_.Expr(var_name)};
-
- // The left-hand side of the assignment *looks* like a decomposition.
- TypedExpression lhs =
- MakeCompositeValueDecomposition(inst, seed_expr, inst.type_id(), 2);
- if (!lhs) {
- return false;
- }
+bool FunctionEmitter::MakeCompositeInsert(const spvtools::opt::Instruction& inst) {
+ // For
+ // %result = OpCompositeInsert %type %object %composite 1 2 3 ...
+ // there are two cases.
+ //
+ // Case 1:
+ // The %composite value has already been hoisted into a variable.
+ // In this case, assign %composite to that variable, then write the
+ // component into the right spot:
+ //
+ // hoisted = composite;
+ // hoisted[index].x = object;
+ //
+ // Case 2:
+ // The %composite value is not hoisted. In this case, make a temporary
+ // variable with the %composite contents, then write the component,
+ // and then make a let-declaration that reads the value out:
+ //
+ // var temp : type = composite;
+ // temp[index].x = object;
+ // let result : type = temp;
+ //
+ // Then use result everywhere the original SPIR-V id is used. Using a const
+ // like this avoids constantly reloading the value many times.
+ //
+ // This technique is a combination of:
+ // - making a temporary variable and constant declaration, like what we do
+ // for VectorInsertDynamic, and
+ // - building up an access-chain like access like for CompositeExtract, but
+ // on the left-hand side of the assignment.
+
+ auto* type = parser_impl_.ConvertType(inst.type_id());
+ auto component = MakeOperand(inst, 0);
+ auto src_composite = MakeOperand(inst, 1);
+
+ std::string var_name;
+ auto original_value_name = namer_.Name(inst.result_id());
+ const bool hoisted = WriteIfHoistedVar(inst, src_composite);
+ if (hoisted) {
+ // The variable was already declared in an earlier block.
+ var_name = original_value_name;
+ // Assign the source composite value to it.
+ builder_.Assign({}, builder_.Expr(var_name), src_composite.expr);
+ } else {
+ // Synthesize a temporary variable.
+ // It doesn't correspond to a SPIR-V ID, so we don't use the ordinary
+ // API in parser_impl_.
+ var_name = namer_.MakeDerivedName(original_value_name);
+ auto* temp_var = builder_.Var(var_name, type->Build(builder_), ast::StorageClass::kNone,
+ src_composite.expr);
+ AddStatement(builder_.Decl({}, temp_var));
+ }
- AddStatement(builder_.Assign(lhs.expr, component.expr));
+ TypedExpression seed_expr{type, builder_.Expr(var_name)};
- if (hoisted) {
- // The hoisted variable itself stands for this result ID.
- return success();
- }
- // Create a new let-declaration that is initialized by the contents
- // of the temporary variable.
- return EmitConstDefinition(inst, {type, builder_.Expr(var_name)});
+ // The left-hand side of the assignment *looks* like a decomposition.
+ TypedExpression lhs = MakeCompositeValueDecomposition(inst, seed_expr, inst.type_id(), 2);
+ if (!lhs) {
+ return false;
+ }
+
+ AddStatement(builder_.Assign(lhs.expr, component.expr));
+
+ if (hoisted) {
+ // The hoisted variable itself stands for this result ID.
+ return success();
+ }
+ // Create a new let-declaration that is initialized by the contents
+ // of the temporary variable.
+ return EmitConstDefinition(inst, {type, builder_.Expr(var_name)});
}
TypedExpression FunctionEmitter::AddressOf(TypedExpression expr) {
- auto* ref = expr.type->As<Reference>();
- if (!ref) {
- Fail() << "AddressOf() called on non-reference type";
- return {};
- }
- return {
- ty_.Pointer(ref->type, ref->storage_class),
- create<ast::UnaryOpExpression>(Source{}, ast::UnaryOp::kAddressOf,
- expr.expr),
- };
+ auto* ref = expr.type->As<Reference>();
+ if (!ref) {
+ Fail() << "AddressOf() called on non-reference type";
+ return {};
+ }
+ return {
+ ty_.Pointer(ref->type, ref->storage_class),
+ create<ast::UnaryOpExpression>(Source{}, ast::UnaryOp::kAddressOf, expr.expr),
+ };
}
TypedExpression FunctionEmitter::Dereference(TypedExpression expr) {
- auto* ptr = expr.type->As<Pointer>();
- if (!ptr) {
- Fail() << "Dereference() called on non-pointer type";
- return {};
- }
- return {
- ptr->type,
- create<ast::UnaryOpExpression>(Source{}, ast::UnaryOp::kIndirection,
- expr.expr),
- };
+ auto* ptr = expr.type->As<Pointer>();
+ if (!ptr) {
+ Fail() << "Dereference() called on non-pointer type";
+ return {};
+ }
+ return {
+ ptr->type,
+ create<ast::UnaryOpExpression>(Source{}, ast::UnaryOp::kIndirection, expr.expr),
+ };
}
bool FunctionEmitter::IsFloatZero(uint32_t value_id) {
- if (const auto* c = constant_mgr_->FindDeclaredConstant(value_id)) {
- if (const auto* float_const = c->AsFloatConstant()) {
- return 0.0f == float_const->GetFloatValue();
- }
- if (c->AsNullConstant()) {
- // Valid SPIR-V requires it to be a float value anyway.
- return true;
+ if (const auto* c = constant_mgr_->FindDeclaredConstant(value_id)) {
+ if (const auto* float_const = c->AsFloatConstant()) {
+ return 0.0f == float_const->GetFloatValue();
+ }
+ if (c->AsNullConstant()) {
+ // Valid SPIR-V requires it to be a float value anyway.
+ return true;
+ }
}
- }
- return false;
+ return false;
}
bool FunctionEmitter::IsFloatOne(uint32_t value_id) {
- if (const auto* c = constant_mgr_->FindDeclaredConstant(value_id)) {
- if (const auto* float_const = c->AsFloatConstant()) {
- return 1.0f == float_const->GetFloatValue();
+ if (const auto* c = constant_mgr_->FindDeclaredConstant(value_id)) {
+ if (const auto* float_const = c->AsFloatConstant()) {
+ return 1.0f == float_const->GetFloatValue();
+ }
}
- }
- return false;
+ return false;
}
FunctionEmitter::FunctionDeclaration::FunctionDeclaration() = default;
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function.h b/chromium/third_party/dawn/src/tint/reader/spirv/function.h
index b000370cc04..8eb33ab8cd2 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function.h
@@ -43,29 +43,29 @@ namespace tint::reader::spirv {
// kLoopBreak, and kLoopContinue directly map to 'break', 'break', and
// 'continue', respectively.
enum class EdgeKind {
- // A back-edge: An edge from a node to one of its ancestors in a depth-first
- // search from the entry block.
- kBack,
- // An edge from a node to the merge block of the nearest enclosing switch,
- // where there is no intervening loop.
- kSwitchBreak,
- // An edge from a node to the merge block of the nearest enclosing loop, where
- // there is no intervening switch.
- // The source block is a "break block" as defined by SPIR-V.
- kLoopBreak,
- // An edge from a node in a loop body to the associated continue target, where
- // there are no other intervening loops or switches.
- // The source block is a "continue block" as defined by SPIR-V.
- kLoopContinue,
- // An edge from a node to the merge block of the nearest enclosing structured
- // construct, but which is neither a kSwitchBreak or a kLoopBreak.
- // This can only occur for an "if" selection, i.e. where the selection
- // header ends in OpBranchConditional.
- kIfBreak,
- // An edge from one switch case to the next sibling switch case.
- kCaseFallThrough,
- // None of the above.
- kForward
+ // A back-edge: An edge from a node to one of its ancestors in a depth-first
+ // search from the entry block.
+ kBack,
+ // An edge from a node to the merge block of the nearest enclosing switch,
+ // where there is no intervening loop.
+ kSwitchBreak,
+ // An edge from a node to the merge block of the nearest enclosing loop, where
+ // there is no intervening switch.
+ // The source block is a "break block" as defined by SPIR-V.
+ kLoopBreak,
+ // An edge from a node in a loop body to the associated continue target, where
+ // there are no other intervening loops or switches.
+ // The source block is a "continue block" as defined by SPIR-V.
+ kLoopContinue,
+ // An edge from a node to the merge block of the nearest enclosing structured
+ // construct, but which is neither a kSwitchBreak or a kLoopBreak.
+ // This can only occur for an "if" selection, i.e. where the selection
+ // header ends in OpBranchConditional.
+ kIfBreak,
+ // An edge from one switch case to the next sibling switch case.
+ kCaseFallThrough,
+ // None of the above.
+ kForward
};
/// The number used to represent an invalid block position
@@ -73,107 +73,107 @@ static constexpr uint32_t kInvalidBlockPos = ~0u;
/// Bookkeeping info for a basic block.
struct BlockInfo {
- /// Constructor
- /// @param bb internal representation of the basic block
- explicit BlockInfo(const spvtools::opt::BasicBlock& bb);
- ~BlockInfo();
-
- /// The internal representation of the basic block.
- const spvtools::opt::BasicBlock* basic_block;
-
- /// The ID of the OpLabel instruction that starts this block.
- uint32_t id = 0;
-
- /// The position of this block in the reverse structured post-order.
- /// If the block is not in that order, then this remains the invalid value.
- uint32_t pos = kInvalidBlockPos;
-
- /// If this block is a header, then this is the ID of the merge block.
- uint32_t merge_for_header = 0;
- /// If this block is a loop header, then this is the ID of the continue
- /// target.
- uint32_t continue_for_header = 0;
- /// If this block is a merge, then this is the ID of the header.
- uint32_t header_for_merge = 0;
- /// If this block is a continue target, then this is the ID of the loop
- /// header.
- uint32_t header_for_continue = 0;
- /// Is this block a continue target which is its own loop header block?
- /// In this case the continue construct is the entire loop. The associated
- /// "loop construct" is empty, and not represented.
- bool is_continue_entire_loop = false;
-
- /// The immediately enclosing structured construct. If this block is not
- /// in the block order at all, then this is still nullptr.
- const Construct* construct = nullptr;
-
- /// Maps the ID of a successor block (in the CFG) to its edge classification.
- std::unordered_map<uint32_t, EdgeKind> succ_edge;
-
- /// The following fields record relationships among blocks in a selection
- /// construct for an OpSwitch instruction.
-
- /// If not null, then the pointed-at construct is a selection for an OpSwitch,
- /// and this block is a case target for it. We say this block "heads" the
- /// case construct.
- const Construct* case_head_for = nullptr;
- /// If not null, then the pointed-at construct is a selection for an OpSwitch,
- /// and this block is the default target for it. We say this block "heads"
- /// the default case construct.
- const Construct* default_head_for = nullptr;
- /// Is this a default target for a switch, and is it also the merge for its
- /// switch?
- bool default_is_merge = false;
- /// The list of switch values that cause a branch to this block.
- std::unique_ptr<std::vector<uint64_t>> case_values;
-
- /// The following fields record relationships among blocks in a selection
- /// construct for an OpBranchConditional instruction.
-
- /// When this block is an if-selection header, this is the edge kind
- /// for the true branch.
- EdgeKind true_kind = EdgeKind::kForward;
- /// When this block is an if-selection header, this is the edge kind
- /// for the false branch.
- EdgeKind false_kind = EdgeKind::kForward;
- /// If not 0, then this block is an if-selection header, and `true_head` is
- /// the target id of the true branch on the OpBranchConditional, and that
- /// target is inside the if-selection.
- uint32_t true_head = 0;
- /// If not 0, then this block is an if-selection header, and `false_head`
- /// is the target id of the false branch on the OpBranchConditional, and
- /// that target is inside the if-selection.
- uint32_t false_head = 0;
- /// If not 0, then this block is an if-selection header, and when following
- /// the flow via the true and false branches, control first reconverges at
- /// the block with ID `premerge_head`, and `premerge_head` is still inside
- /// the if-selection.
- uint32_t premerge_head = 0;
- /// If non-empty, then this block is an if-selection header, and control flow
- /// in the body must be guarded by a boolean flow variable with this name.
- /// This occurs when a block in this selection has both an if-break edge, and
- /// also a different normal forward edge but without a merge instruction.
- std::string flow_guard_name = "";
-
- /// The result IDs that this block is responsible for declaring as a
- /// hoisted variable.
- /// @see DefInfo#requires_hoisted_def
- std::vector<uint32_t> hoisted_ids;
-
- /// A PhiAssignment represents the assignment of a value to the state
- /// variable associated with an OpPhi in a successor block.
- struct PhiAssignment {
- /// The ID of an OpPhi receiving a value from this basic block.
- uint32_t phi_id;
- /// The the value carried to the given OpPhi.
- uint32_t value;
- };
- /// If this basic block branches to a visited basic block containing phis,
- /// then this is the list of writes to the variables associated those phis.
- std::vector<PhiAssignment> phi_assignments;
- /// The IDs of OpPhi instructions which require their associated state
- /// variable to be declared in this basic block.
- std::vector<uint32_t> phis_needing_state_vars;
+ /// Constructor
+ /// @param bb internal representation of the basic block
+ explicit BlockInfo(const spvtools::opt::BasicBlock& bb);
+ ~BlockInfo();
+
+ /// The internal representation of the basic block.
+ const spvtools::opt::BasicBlock* basic_block;
+
+ /// The ID of the OpLabel instruction that starts this block.
+ uint32_t id = 0;
+
+ /// The position of this block in the reverse structured post-order.
+ /// If the block is not in that order, then this remains the invalid value.
+ uint32_t pos = kInvalidBlockPos;
+
+ /// If this block is a header, then this is the ID of the merge block.
+ uint32_t merge_for_header = 0;
+ /// If this block is a loop header, then this is the ID of the continue
+ /// target.
+ uint32_t continue_for_header = 0;
+ /// If this block is a merge, then this is the ID of the header.
+ uint32_t header_for_merge = 0;
+ /// If this block is a continue target, then this is the ID of the loop
+ /// header.
+ uint32_t header_for_continue = 0;
+ /// Is this block a continue target which is its own loop header block?
+ /// In this case the continue construct is the entire loop. The associated
+ /// "loop construct" is empty, and not represented.
+ bool is_continue_entire_loop = false;
+
+ /// The immediately enclosing structured construct. If this block is not
+ /// in the block order at all, then this is still nullptr.
+ const Construct* construct = nullptr;
+
+ /// Maps the ID of a successor block (in the CFG) to its edge classification.
+ std::unordered_map<uint32_t, EdgeKind> succ_edge;
+
+ /// The following fields record relationships among blocks in a selection
+ /// construct for an OpSwitch instruction.
+
+ /// If not null, then the pointed-at construct is a selection for an OpSwitch,
+ /// and this block is a case target for it. We say this block "heads" the
+ /// case construct.
+ const Construct* case_head_for = nullptr;
+ /// If not null, then the pointed-at construct is a selection for an OpSwitch,
+ /// and this block is the default target for it. We say this block "heads"
+ /// the default case construct.
+ const Construct* default_head_for = nullptr;
+ /// Is this a default target for a switch, and is it also the merge for its
+ /// switch?
+ bool default_is_merge = false;
+ /// The list of switch values that cause a branch to this block.
+ std::unique_ptr<std::vector<uint64_t>> case_values;
+
+ /// The following fields record relationships among blocks in a selection
+ /// construct for an OpBranchConditional instruction.
+
+ /// When this block is an if-selection header, this is the edge kind
+ /// for the true branch.
+ EdgeKind true_kind = EdgeKind::kForward;
+ /// When this block is an if-selection header, this is the edge kind
+ /// for the false branch.
+ EdgeKind false_kind = EdgeKind::kForward;
+ /// If not 0, then this block is an if-selection header, and `true_head` is
+ /// the target id of the true branch on the OpBranchConditional, and that
+ /// target is inside the if-selection.
+ uint32_t true_head = 0;
+ /// If not 0, then this block is an if-selection header, and `false_head`
+ /// is the target id of the false branch on the OpBranchConditional, and
+ /// that target is inside the if-selection.
+ uint32_t false_head = 0;
+ /// If not 0, then this block is an if-selection header, and when following
+ /// the flow via the true and false branches, control first reconverges at
+ /// the block with ID `premerge_head`, and `premerge_head` is still inside
+ /// the if-selection.
+ uint32_t premerge_head = 0;
+ /// If non-empty, then this block is an if-selection header, and control flow
+ /// in the body must be guarded by a boolean flow variable with this name.
+ /// This occurs when a block in this selection has both an if-break edge, and
+ /// also a different normal forward edge but without a merge instruction.
+ std::string flow_guard_name = "";
+
+ /// The result IDs that this block is responsible for declaring as a
+ /// hoisted variable.
+ /// @see DefInfo#requires_hoisted_def
+ std::vector<uint32_t> hoisted_ids;
+
+ /// A PhiAssignment represents the assignment of a value to the state
+ /// variable associated with an OpPhi in a successor block.
+ struct PhiAssignment {
+ /// The ID of an OpPhi receiving a value from this basic block.
+ uint32_t phi_id;
+ /// The the value carried to the given OpPhi.
+ uint32_t value;
+ };
+ /// If this basic block branches to a visited basic block containing phis,
+ /// then this is the list of writes to the variables associated those phis.
+ std::vector<PhiAssignment> phi_assignments;
+ /// The IDs of OpPhi instructions which require their associated state
+ /// variable to be declared in this basic block.
+ std::vector<uint32_t> phis_needing_state_vars;
};
/// Writes the BlockInfo to the ostream
@@ -181,55 +181,54 @@ struct BlockInfo {
/// @param bi the BlockInfo
/// @returns the ostream so calls can be chained
inline std::ostream& operator<<(std::ostream& o, const BlockInfo& bi) {
- o << "BlockInfo{"
- << " id: " << bi.id << " pos: " << bi.pos
- << " merge_for_header: " << bi.merge_for_header
- << " continue_for_header: " << bi.continue_for_header
- << " header_for_merge: " << bi.header_for_merge
- << " is_continue_entire_loop: " << int(bi.is_continue_entire_loop) << "}";
- return o;
+ o << "BlockInfo{"
+ << " id: " << bi.id << " pos: " << bi.pos << " merge_for_header: " << bi.merge_for_header
+ << " continue_for_header: " << bi.continue_for_header
+ << " header_for_merge: " << bi.header_for_merge
+ << " is_continue_entire_loop: " << int(bi.is_continue_entire_loop) << "}";
+ return o;
}
/// Reasons for avoiding generating an intermediate value.
enum class SkipReason {
- /// `kDontSkip`: The value should be generated. Used for most values.
- kDontSkip,
-
- /// For remaining cases, the value is not generated.
-
- /// `kOpaqueObject`: used for any intermediate value which is an sampler,
- /// image,
- /// or sampled image, or any pointer to such object. Code is generated
- /// for those objects only when emitting the image instructions that access
- /// the image (read, write, sample, gather, fetch, or query). For example,
- /// when encountering an OpImageSampleExplicitLod, a call to the
- /// textureSampleLevel builtin function will be emitted, and the call will
- /// directly reference the underlying texture and sampler (variable or
- /// function parameter).
- kOpaqueObject,
-
- /// `kSinkPointerIntoUse`: used to avoid emitting certain pointer expressions,
- /// by instead generating their reference expression directly at the point of
- /// use. For example, we apply this to OpAccessChain when indexing into a
- /// vector, to avoid generating address-of vector component expressions.
- kSinkPointerIntoUse,
-
- /// `kPointSizeBuiltinPointer`: the value is a pointer to the Position builtin
- /// variable. Don't generate its address. Avoid generating stores to this
- /// pointer.
- kPointSizeBuiltinPointer,
- /// `kPointSizeBuiltinValue`: the value is the value loaded from the
- /// PointSize builtin. Use 1.0f instead, because that's the only value
- /// supported by WebGPU.
- kPointSizeBuiltinValue,
-
- /// `kSampleMaskInBuiltinPointer`: the value is a pointer to the SampleMaskIn
- /// builtin input variable. Don't generate its address.
- kSampleMaskInBuiltinPointer,
-
- /// `kSampleMaskOutBuiltinPointer`: the value is a pointer to the SampleMask
- /// builtin output variable.
- kSampleMaskOutBuiltinPointer,
+ /// `kDontSkip`: The value should be generated. Used for most values.
+ kDontSkip,
+
+ /// For remaining cases, the value is not generated.
+
+ /// `kOpaqueObject`: used for any intermediate value which is an sampler,
+ /// image,
+ /// or sampled image, or any pointer to such object. Code is generated
+ /// for those objects only when emitting the image instructions that access
+ /// the image (read, write, sample, gather, fetch, or query). For example,
+ /// when encountering an OpImageSampleExplicitLod, a call to the
+ /// textureSampleLevel builtin function will be emitted, and the call will
+ /// directly reference the underlying texture and sampler (variable or
+ /// function parameter).
+ kOpaqueObject,
+
+ /// `kSinkPointerIntoUse`: used to avoid emitting certain pointer expressions,
+ /// by instead generating their reference expression directly at the point of
+ /// use. For example, we apply this to OpAccessChain when indexing into a
+ /// vector, to avoid generating address-of vector component expressions.
+ kSinkPointerIntoUse,
+
+ /// `kPointSizeBuiltinPointer`: the value is a pointer to the Position builtin
+ /// variable. Don't generate its address. Avoid generating stores to this
+ /// pointer.
+ kPointSizeBuiltinPointer,
+ /// `kPointSizeBuiltinValue`: the value is the value loaded from the
+ /// PointSize builtin. Use 1.0f instead, because that's the only value
+ /// supported by WebGPU.
+ kPointSizeBuiltinValue,
+
+ /// `kSampleMaskInBuiltinPointer`: the value is a pointer to the SampleMaskIn
+ /// builtin input variable. Don't generate its address.
+ kSampleMaskInBuiltinPointer,
+
+ /// `kSampleMaskOutBuiltinPointer`: the value is a pointer to the SampleMask
+ /// builtin output variable.
+ kSampleMaskOutBuiltinPointer,
};
/// Bookkeeping info for a SPIR-V ID defined in the function, or some
@@ -240,81 +239,79 @@ enum class SkipReason {
/// function.
/// - certain module-scope builtin variables.
struct DefInfo {
- /// Constructor.
- /// @param def_inst the SPIR-V instruction defining the ID
- /// @param block_pos the position of the basic block where the ID is defined.
- /// @param index an ordering index for this local definition
- DefInfo(const spvtools::opt::Instruction& def_inst,
- uint32_t block_pos,
- size_t index);
- /// Destructor.
- ~DefInfo();
-
- /// The SPIR-V instruction that defines the ID.
- const spvtools::opt::Instruction& inst;
- /// The position of the first block in which this ID is visible, in function
- /// block order. For IDs defined outside of the function, it is 0.
- /// For IDs defined in the function, it is the position of the block
- /// containing the definition of the ID.
- /// See method `FunctionEmitter::ComputeBlockOrderAndPositions`
- const uint32_t block_pos = 0;
-
- /// An index for uniquely and deterministically ordering all DefInfo records
- /// in a function.
- const size_t index = 0;
-
- /// The number of uses of this ID.
- uint32_t num_uses = 0;
-
- /// The block position of the last use of this ID, or 0 if it is not used
- /// at all. The "last" ordering is determined by the function block order.
- uint32_t last_use_pos = 0;
-
- /// Is this value used in a construct other than the one in which it was
- /// defined?
- bool used_in_another_construct = false;
-
- /// True if this ID requires a WGSL 'const' definition, due to context. It
- /// might get one anyway (so this is *not* an if-and-only-if condition).
- bool requires_named_const_def = false;
-
- /// True if this ID must map to a WGSL variable declaration before the
- /// corresponding position of the ID definition in SPIR-V. This compensates
- /// for the difference between dominance and scoping. An SSA definition can
- /// dominate all its uses, but the construct where it is defined does not
- /// enclose all the uses, and so if it were declared as a WGSL constant
- /// definition at the point of its SPIR-V definition, then the WGSL name
- /// would go out of scope too early. Fix that by creating a variable at the
- /// top of the smallest construct that encloses both the definition and all
- /// its uses. Then the original SPIR-V definition maps to a WGSL assignment
- /// to that variable, and each SPIR-V use becomes a WGSL read from the
- /// variable.
- /// TODO(dneto): This works for constants of storable type, but not, for
- /// example, pointers. crbug.com/tint/98
- bool requires_hoisted_def = false;
-
- /// If the definition is an OpPhi, then `phi_var` is the name of the
- /// variable that stores the value carried from parent basic blocks into
- /// the basic block containing the OpPhi. Otherwise this is the empty string.
- std::string phi_var;
-
- /// The storage class to use for this value, if it is of pointer type.
- /// This is required to carry a storage class override from a storage
- /// buffer expressed in the old style (with Uniform storage class)
- /// that needs to be remapped to StorageBuffer storage class.
- /// This is kInvalid for non-pointers.
- ast::StorageClass storage_class = ast::StorageClass::kInvalid;
-
- /// The expression to use when sinking pointers into their use.
- /// When encountering a use of this instruction, we will emit this expression
- /// instead.
- TypedExpression sink_pointer_source_expr = {};
-
- /// The reason, if any, that this value should be ignored.
- /// Normally no values are ignored. This field can be updated while
- /// generating code because sometimes we only discover necessary facts
- /// in the middle of generating code.
- SkipReason skip = SkipReason::kDontSkip;
+ /// Constructor.
+ /// @param def_inst the SPIR-V instruction defining the ID
+ /// @param block_pos the position of the basic block where the ID is defined.
+ /// @param index an ordering index for this local definition
+ DefInfo(const spvtools::opt::Instruction& def_inst, uint32_t block_pos, size_t index);
+ /// Destructor.
+ ~DefInfo();
+
+ /// The SPIR-V instruction that defines the ID.
+ const spvtools::opt::Instruction& inst;
+ /// The position of the first block in which this ID is visible, in function
+ /// block order. For IDs defined outside of the function, it is 0.
+ /// For IDs defined in the function, it is the position of the block
+ /// containing the definition of the ID.
+ /// See method `FunctionEmitter::ComputeBlockOrderAndPositions`
+ const uint32_t block_pos = 0;
+
+ /// An index for uniquely and deterministically ordering all DefInfo records
+ /// in a function.
+ const size_t index = 0;
+
+ /// The number of uses of this ID.
+ uint32_t num_uses = 0;
+
+ /// The block position of the last use of this ID, or 0 if it is not used
+ /// at all. The "last" ordering is determined by the function block order.
+ uint32_t last_use_pos = 0;
+
+ /// Is this value used in a construct other than the one in which it was
+ /// defined?
+ bool used_in_another_construct = false;
+
+ /// True if this ID requires a WGSL 'const' definition, due to context. It
+ /// might get one anyway (so this is *not* an if-and-only-if condition).
+ bool requires_named_const_def = false;
+
+ /// True if this ID must map to a WGSL variable declaration before the
+ /// corresponding position of the ID definition in SPIR-V. This compensates
+ /// for the difference between dominance and scoping. An SSA definition can
+ /// dominate all its uses, but the construct where it is defined does not
+ /// enclose all the uses, and so if it were declared as a WGSL constant
+ /// definition at the point of its SPIR-V definition, then the WGSL name
+ /// would go out of scope too early. Fix that by creating a variable at the
+ /// top of the smallest construct that encloses both the definition and all
+ /// its uses. Then the original SPIR-V definition maps to a WGSL assignment
+ /// to that variable, and each SPIR-V use becomes a WGSL read from the
+ /// variable.
+ /// TODO(dneto): This works for constants of storable type, but not, for
+ /// example, pointers. crbug.com/tint/98
+ bool requires_hoisted_def = false;
+
+ /// If the definition is an OpPhi, then `phi_var` is the name of the
+ /// variable that stores the value carried from parent basic blocks into
+ /// the basic block containing the OpPhi. Otherwise this is the empty string.
+ std::string phi_var;
+
+ /// The storage class to use for this value, if it is of pointer type.
+ /// This is required to carry a storage class override from a storage
+ /// buffer expressed in the old style (with Uniform storage class)
+ /// that needs to be remapped to StorageBuffer storage class.
+ /// This is kInvalid for non-pointers.
+ ast::StorageClass storage_class = ast::StorageClass::kInvalid;
+
+ /// The expression to use when sinking pointers into their use.
+ /// When encountering a use of this instruction, we will emit this expression
+ /// instead.
+ TypedExpression sink_pointer_source_expr = {};
+
+ /// The reason, if any, that this value should be ignored.
+ /// Normally no values are ignored. This field can be updated while
+ /// generating code because sometimes we only discover necessary facts
+ /// in the middle of generating code.
+ SkipReason skip = SkipReason::kDontSkip;
};
/// Writes the DefInfo to the ostream
@@ -322,40 +319,39 @@ struct DefInfo {
/// @param di the DefInfo
/// @returns the ostream so calls can be chained
inline std::ostream& operator<<(std::ostream& o, const DefInfo& di) {
- o << "DefInfo{"
- << " inst.result_id: " << di.inst.result_id()
- << " block_pos: " << di.block_pos << " num_uses: " << di.num_uses
- << " last_use_pos: " << di.last_use_pos << " requires_named_const_def: "
- << (di.requires_named_const_def ? "true" : "false")
- << " requires_hoisted_def: " << (di.requires_hoisted_def ? "true" : "false")
- << " phi_var: '" << di.phi_var << "'";
- if (di.storage_class != ast::StorageClass::kNone) {
- o << " sc:" << int(di.storage_class);
- }
- switch (di.skip) {
- case SkipReason::kDontSkip:
- break;
- case SkipReason::kOpaqueObject:
- o << " skip:opaque";
- break;
- case SkipReason::kSinkPointerIntoUse:
- o << " skip:sink_pointer";
- break;
- case SkipReason::kPointSizeBuiltinPointer:
- o << " skip:pointsize_pointer";
- break;
- case SkipReason::kPointSizeBuiltinValue:
- o << " skip:pointsize_value";
- break;
- case SkipReason::kSampleMaskInBuiltinPointer:
- o << " skip:samplemaskin_pointer";
- break;
- case SkipReason::kSampleMaskOutBuiltinPointer:
- o << " skip:samplemaskout_pointer";
- break;
- }
- o << "}";
- return o;
+ o << "DefInfo{"
+ << " inst.result_id: " << di.inst.result_id() << " block_pos: " << di.block_pos
+ << " num_uses: " << di.num_uses << " last_use_pos: " << di.last_use_pos
+ << " requires_named_const_def: " << (di.requires_named_const_def ? "true" : "false")
+ << " requires_hoisted_def: " << (di.requires_hoisted_def ? "true" : "false") << " phi_var: '"
+ << di.phi_var << "'";
+ if (di.storage_class != ast::StorageClass::kNone) {
+ o << " sc:" << int(di.storage_class);
+ }
+ switch (di.skip) {
+ case SkipReason::kDontSkip:
+ break;
+ case SkipReason::kOpaqueObject:
+ o << " skip:opaque";
+ break;
+ case SkipReason::kSinkPointerIntoUse:
+ o << " skip:sink_pointer";
+ break;
+ case SkipReason::kPointSizeBuiltinPointer:
+ o << " skip:pointsize_pointer";
+ break;
+ case SkipReason::kPointSizeBuiltinValue:
+ o << " skip:pointsize_value";
+ break;
+ case SkipReason::kSampleMaskInBuiltinPointer:
+ o << " skip:samplemaskin_pointer";
+ break;
+ case SkipReason::kSampleMaskOutBuiltinPointer:
+ o << " skip:samplemaskout_pointer";
+ break;
+ }
+ o << "}";
+ return o;
}
/// A placeholder Statement that exists for the duration of building a
@@ -367,941 +363,925 @@ inline std::ostream& operator<<(std::ostream& o, const DefInfo& di) {
/// StatementBlock is being constructed, which becomes an immutable node on
/// StatementBlock::Finalize().
class StatementBuilder : public Castable<StatementBuilder, ast::Statement> {
- public:
- /// Constructor
- StatementBuilder() : Base(ProgramID(), Source{}) {}
+ public:
+ /// Constructor
+ StatementBuilder() : Base(ProgramID(), Source{}) {}
- /// @param builder the program builder
- /// @returns the build AST node
- virtual const ast::Statement* Build(ProgramBuilder* builder) const = 0;
+ /// @param builder the program builder
+ /// @returns the build AST node
+ virtual const ast::Statement* Build(ProgramBuilder* builder) const = 0;
- private:
- Node* Clone(CloneContext*) const override;
+ private:
+ Node* Clone(CloneContext*) const override;
};
/// A FunctionEmitter emits a SPIR-V function onto a Tint AST module.
class FunctionEmitter {
- public:
- /// Creates a FunctionEmitter, and prepares to write to the AST module
- /// in `pi`
- /// @param pi a ParserImpl which has already executed BuildInternalModule
- /// @param function the function to emit
- FunctionEmitter(ParserImpl* pi, const spvtools::opt::Function& function);
- /// Creates a FunctionEmitter, and prepares to write to the AST module
- /// in `pi`
- /// @param pi a ParserImpl which has already executed BuildInternalModule
- /// @param function the function to emit
- /// @param ep_info entry point information for this function, or nullptr
- FunctionEmitter(ParserImpl* pi,
- const spvtools::opt::Function& function,
- const EntryPointInfo* ep_info);
- /// Move constructor. Only valid when the other object was newly created.
- /// @param other the emitter to clone
- FunctionEmitter(FunctionEmitter&& other);
- /// Destructor
- ~FunctionEmitter();
-
- /// Emits the function to AST module.
- /// @return whether emission succeeded
- bool Emit();
-
- /// @returns true if emission has not yet failed.
- bool success() const { return fail_stream_.status(); }
- /// @returns true if emission has failed.
- bool failed() const { return !success(); }
-
- /// Finalizes any StatementBuilders returns the body of the function.
- /// Must only be called once, and to be used only for testing.
- /// @returns the body of the function.
- const ast::StatementList ast_body();
-
- /// Records failure.
- /// @returns a FailStream on which to emit diagnostics.
- FailStream& Fail() { return fail_stream_.Fail(); }
-
- /// @returns the parser implementation
- ParserImpl* parser() { return &parser_impl_; }
-
- /// Emits the entry point as a wrapper around its implementation function.
- /// Pipeline inputs become formal parameters, and pipeline outputs become
- /// return values.
- /// @returns false if emission failed.
- bool EmitEntryPointAsWrapper();
-
- /// Creates one or more entry point input parameters corresponding to a
- /// part of an input variable. The part of the input variable is specfied
- /// by the `index_prefix`, which successively indexes into the variable.
- /// Also generates the assignment statements that copy the input parameter
- /// to the corresponding part of the variable. Assumes the variable
- /// has already been created in the Private storage class.
- /// @param var_name The name of the variable
- /// @param var_type The store type of the variable
- /// @param decos The variable's decorations
- /// @param index_prefix Indices stepping into the variable, indicating
- /// what part of the variable to populate.
- /// @param tip_type The type of the component inside variable, after indexing
- /// with the indices in `index_prefix`.
- /// @param forced_param_type The type forced by WGSL, if the variable is a
- /// builtin, otherwise the same as var_type.
- /// @param params The parameter list where the new parameter is appended.
- /// @param statements The statement list where the assignment is appended.
- /// @returns false if emission failed
- bool EmitPipelineInput(std::string var_name,
- const Type* var_type,
- ast::AttributeList* decos,
- std::vector<int> index_prefix,
- const Type* tip_type,
- const Type* forced_param_type,
- ast::VariableList* params,
- ast::StatementList* statements);
-
- /// Creates one or more struct members from an output variable, and the
- /// expressions that compute the value they contribute to the entry point
- /// return value. The part of the output variable is specfied
- /// by the `index_prefix`, which successively indexes into the variable.
- /// Assumes the variable has already been created in the Private storage
- /// class.
- /// @param var_name The name of the variable
- /// @param var_type The store type of the variable
- /// @param decos The variable's decorations
- /// @param index_prefix Indices stepping into the variable, indicating
- /// what part of the variable to populate.
- /// @param tip_type The type of the component inside variable, after indexing
- /// with the indices in `index_prefix`.
- /// @param forced_member_type The type forced by WGSL, if the variable is a
- /// builtin, otherwise the same as var_type.
- /// @param return_members The struct member list where the new member is
- /// added.
- /// @param return_exprs The expression list where the return expression is
- /// added.
- /// @returns false if emission failed
- bool EmitPipelineOutput(std::string var_name,
- const Type* var_type,
- ast::AttributeList* decos,
- std::vector<int> index_prefix,
- const Type* tip_type,
- const Type* forced_member_type,
- ast::StructMemberList* return_members,
- ast::ExpressionList* return_exprs);
-
- /// Updates the attribute list, replacing an existing Location attribute
- /// with another having one higher location value. Does nothing if no
- /// location attribute exists.
- /// Assumes the list contains at most one Location attribute.
- /// @param attributes the attribute list to modify
- void IncrementLocation(ast::AttributeList* attributes);
-
- /// Returns the Location attribute, if it exists.
- /// @param attributes the list of attributes to search
- /// @returns the Location attribute, or nullptr if it doesn't exist
- const ast::Attribute* GetLocation(const ast::AttributeList& attributes);
-
- /// Create an ast::BlockStatement representing the body of the function.
- /// This creates the statement stack, which is non-empty for the lifetime
- /// of the function.
- /// @returns the body of the function, or null on error
- const ast::BlockStatement* MakeFunctionBody();
-
- /// Emits the function body, populating the bottom entry of the statements
- /// stack.
- /// @returns false if emission failed.
- bool EmitBody();
-
- /// Records a mapping from block ID to a BlockInfo struct.
- /// Populates `block_info_`
- void RegisterBasicBlocks();
-
- /// Verifies that terminators only branch to labels in the current function.
- /// Assumes basic blocks have been registered.
- /// @returns true if terminators are valid
- bool TerminatorsAreValid();
-
- /// Populates merge-header cross-links and BlockInfo#is_continue_entire_loop.
- /// Also verifies that merge instructions go to blocks in the same function.
- /// Assumes basic blocks have been registered, and terminators are valid.
- /// @returns false if registration fails
- bool RegisterMerges();
-
- /// Determines the output order for the basic blocks in the function.
- /// Populates `block_order_` and BlockInfo#pos.
- /// Assumes basic blocks have been registered.
- void ComputeBlockOrderAndPositions();
-
- /// @returns the reverse structured post order of the basic blocks in
- /// the function.
- const std::vector<uint32_t>& block_order() const { return block_order_; }
-
- /// Verifies that the orderings among a structured header, continue target,
- /// and merge block are valid. Assumes block order has been computed, and
- /// merges are valid and recorded.
- /// @returns false if invalid nesting was detected
- bool VerifyHeaderContinueMergeOrder();
-
- /// Labels each basic block with its nearest enclosing structured construct.
- /// Populates BlockInfo#construct and the `constructs_` list.
- /// Assumes terminators are valid and merges have been registered, block
- /// order has been computed, and each block is labeled with its position.
- /// Checks nesting of structured control flow constructs.
- /// @returns false if bad nesting has been detected
- bool LabelControlFlowConstructs();
-
- /// @returns the structured constructs
- const ConstructList& constructs() const { return constructs_; }
-
- /// Marks blocks targets of a switch, either as the head of a case or
- /// as the default target.
- /// @returns false on failure
- bool FindSwitchCaseHeaders();
-
- /// Classifies the successor CFG edges for the ordered basic blocks.
- /// Also checks validity of each edge (populates BlockInfo#succ_edge).
- /// Implicitly checks dominance rules for headers and continue constructs.
- /// Assumes each block has been labeled with its control flow construct.
- /// @returns false on failure
- bool ClassifyCFGEdges();
-
- /// Marks the blocks within a selection construct that are the first blocks
- /// in the "then" clause, the "else" clause, and the "premerge" clause.
- /// The head of the premerge clause is the block, if it exists, at which
- /// control flow reconverges from the "then" and "else" clauses, but before
- /// before the merge block for that selection. The existence of a premerge
- /// should be an exceptional case, but is allowed by the structured control
- /// flow rules.
- /// @returns false if bad nesting has been detected.
- bool FindIfSelectionInternalHeaders();
-
- /// Creates a DefInfo record for each module-scope builtin variable
- /// that should be handled specially. Either it's ignored, or its store
- /// type is converted on load.
- /// Populates the `def_info_` mapping for such IDs.
- /// @returns false on failure
- bool RegisterSpecialBuiltInVariables();
-
- /// Creates a DefInfo record for each locally defined SPIR-V ID.
- /// Populates the `def_info_` mapping with basic results for such IDs.
- /// @returns false on failure
- bool RegisterLocallyDefinedValues();
-
- /// Returns the Tint storage class for the given SPIR-V ID that is a
- /// pointer value.
- /// @param id a SPIR-V ID for a pointer value
- /// @returns the storage class
- ast::StorageClass GetStorageClassForPointerValue(uint32_t id);
-
- /// Remaps the storage class for the type of a locally-defined value,
- /// if necessary. If it's not a pointer type, or if its storage class
- /// already matches, then the result is a copy of the `type` argument.
- /// @param type the AST type
- /// @param result_id the SPIR-V ID for the locally defined value
- /// @returns an possibly updated type
- const Type* RemapStorageClass(const Type* type, uint32_t result_id);
-
- /// Marks locally defined values when they should get a 'const'
- /// definition in WGSL, or a 'var' definition at an outer scope.
- /// This occurs in several cases:
- /// - When a SPIR-V instruction might use the dynamically computed value
- /// only once, but the WGSL code might reference it multiple times.
- /// For example, this occurs for the vector operands of OpVectorShuffle.
- /// In this case the definition's DefInfo#requires_named_const_def property
- /// is set to true.
- /// - When a definition and at least one of its uses are not in the
- /// same structured construct.
- /// In this case the definition's DefInfo#requires_named_const_def property
- /// is set to true.
- /// - When a definition is in a construct that does not enclose all the
- /// uses. In this case the definition's DefInfo#requires_hoisted_def
- /// property is set to true.
- /// Updates the `def_info_` mapping.
- void FindValuesNeedingNamedOrHoistedDefinition();
-
- /// Emits declarations of function variables.
- /// @returns false if emission failed.
- bool EmitFunctionVariables();
-
- /// Emits statements in the body.
- /// @returns false if emission failed.
- bool EmitFunctionBodyStatements();
-
- /// Emits a basic block.
- /// @param block_info the block to emit
- /// @returns false if emission failed.
- bool EmitBasicBlock(const BlockInfo& block_info);
-
- /// Emits an IfStatement, including its condition expression, and sets
- /// up the statement stack to accumulate subsequent basic blocks into
- /// the "then" and "else" clauses.
- /// @param block_info the if-selection header block
- /// @returns false if emission failed.
- bool EmitIfStart(const BlockInfo& block_info);
-
- /// Emits a SwitchStatement, including its condition expression, and sets
- /// up the statement stack to accumulate subsequent basic blocks into
- /// the default clause and case clauses.
- /// @param block_info the switch-selection header block
- /// @returns false if emission failed.
- bool EmitSwitchStart(const BlockInfo& block_info);
-
- /// Emits a LoopStatement, and pushes a new StatementBlock to accumulate
- /// the remaining instructions in the current block and subsequent blocks
- /// in the loop.
- /// @param construct the loop construct
- /// @returns false if emission failed.
- bool EmitLoopStart(const Construct* construct);
-
- /// Emits a ContinuingStatement, and pushes a new StatementBlock to accumulate
- /// the remaining instructions in the current block and subsequent blocks
- /// in the continue construct.
- /// @param construct the continue construct
- /// @returns false if emission failed.
- bool EmitContinuingStart(const Construct* construct);
-
- /// Emits the non-control-flow parts of a basic block, but only once.
- /// The `already_emitted` parameter indicates whether the code has already
- /// been emitted, and is used to signal that this invocation actually emitted
- /// it.
- /// @param block_info the block to emit
- /// @param already_emitted the block to emit
- /// @returns false if the code had not yet been emitted, but emission failed
- bool EmitStatementsInBasicBlock(const BlockInfo& block_info,
- bool* already_emitted);
-
- /// Emits code for terminators, but that aren't part of entering or
- /// resolving structured control flow. That is, if the basic block
- /// terminator calls for it, emit the fallthrough, break, continue, return,
- /// or kill commands.
- /// @param block_info the block with the terminator to emit (if any)
- /// @returns false if emission failed
- bool EmitNormalTerminator(const BlockInfo& block_info);
-
- /// Returns a new statement to represent the given branch representing a
- /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
- /// WGSL statement is required, the statement will be nullptr. This method
- /// tries to avoid emitting a 'break' statement when that would be redundant
- /// in WGSL due to implicit breaking out of a switch.
- /// @param src_info the source block
- /// @param dest_info the destination block
- /// @returns the new statement, or a null statement
- const ast::Statement* MakeBranch(const BlockInfo& src_info,
- const BlockInfo& dest_info) const {
- return MakeBranchDetailed(src_info, dest_info, false, nullptr);
- }
-
- /// Returns a new statement to represent the given branch representing a
- /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
- /// WGSL statement is required, the statement will be nullptr.
- /// @param src_info the source block
- /// @param dest_info the destination block
- /// @returns the new statement, or a null statement
- const ast::Statement* MakeForcedBranch(const BlockInfo& src_info,
- const BlockInfo& dest_info) const {
- return MakeBranchDetailed(src_info, dest_info, true, nullptr);
- }
-
- /// Returns a new statement to represent the given branch representing a
- /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
- /// WGSL statement is required, the statement will be nullptr. When `forced`
- /// is false, this method tries to avoid emitting a 'break' statement when
- /// that would be redundant in WGSL due to implicit breaking out of a switch.
- /// When `forced` is true, the method won't try to avoid emitting that break.
- /// If the control flow edge is an if-break for an if-selection with a
- /// control flow guard, then return that guard name via `flow_guard_name_ptr`
- /// when that parameter is not null.
- /// @param src_info the source block
- /// @param dest_info the destination block
- /// @param forced if true, always emit the branch (if it exists in WGSL)
- /// @param flow_guard_name_ptr return parameter for control flow guard name
- /// @returns the new statement, or a null statement
- const ast::Statement* MakeBranchDetailed(
- const BlockInfo& src_info,
- const BlockInfo& dest_info,
- bool forced,
- std::string* flow_guard_name_ptr) const;
-
- /// Returns a new if statement with the given statements as the then-clause
- /// and the else-clause. Either or both clauses might be nullptr. If both
- /// are nullptr, then don't make a new statement and instead return nullptr.
- /// @param condition the branching condition
- /// @param then_stmt the statement for the then clause of the if, or nullptr
- /// @param else_stmt the statement for the else clause of the if, or nullptr
- /// @returns the new statement, or nullptr
- const ast::Statement* MakeSimpleIf(const ast::Expression* condition,
- const ast::Statement* then_stmt,
- const ast::Statement* else_stmt) const;
-
- /// Emits the statements for an normal-terminator OpBranchConditional
- /// where one branch is a case fall through (the true branch if and only
- /// if `fall_through_is_true_branch` is true), and the other branch is
- /// goes to a different destination, named by `other_dest`.
- /// @param src_info the basic block from which we're branching
- /// @param cond the branching condition
- /// @param other_edge_kind the edge kind from the source block to the other
- /// destination
- /// @param other_dest the other branching destination
- /// @param fall_through_is_true_branch true when the fall-through is the true
- /// branch
- /// @returns the false if emission fails
- bool EmitConditionalCaseFallThrough(const BlockInfo& src_info,
- const ast::Expression* cond,
- EdgeKind other_edge_kind,
- const BlockInfo& other_dest,
- bool fall_through_is_true_branch);
-
- /// Emits a normal instruction: not a terminator, label, or variable
- /// declaration.
- /// @param inst the instruction
- /// @returns false if emission failed.
- bool EmitStatement(const spvtools::opt::Instruction& inst);
-
- /// Emits a const definition for the typed value in `ast_expr`, and
- /// records it as the translation for the result ID from `inst`.
- /// @param inst the SPIR-V instruction defining the value
- /// @param ast_expr the already-computed AST expression for the value
- /// @returns false if emission failed.
- bool EmitConstDefinition(const spvtools::opt::Instruction& inst,
- TypedExpression ast_expr);
-
- /// Emits a write of the typed value in `ast_expr` to a hoisted variable
- /// for the given SPIR-V ID, if that ID has a hoisted declaration. Otherwise,
- /// emits a const definition instead.
- /// @param inst the SPIR-V instruction defining the value
- /// @param ast_expr the already-computed AST expression for the value
- /// @returns false if emission failed.
- bool EmitConstDefOrWriteToHoistedVar(const spvtools::opt::Instruction& inst,
- TypedExpression ast_expr);
-
- /// If the result ID of the given instruction is hoisted, then emits
- /// a statement to write the expression to the hoisted variable, and
- /// returns true. Otherwise return false.
- /// @param inst the SPIR-V instruction defining a value.
- /// @param ast_expr the expression to assign.
- /// @returns true if the instruction has an associated hoisted variable.
- bool WriteIfHoistedVar(const spvtools::opt::Instruction& inst,
- TypedExpression ast_expr);
-
- /// Makes an expression from a SPIR-V ID.
- /// if the SPIR-V result type is a pointer.
- /// @param id the SPIR-V ID of the value
- /// @returns an AST expression for the instruction, or an invalid
- /// TypedExpression on error.
- TypedExpression MakeExpression(uint32_t id);
-
- /// Creates an expression and supporting statements for a combinatorial
- /// instruction, or returns null. A SPIR-V instruction is combinatorial
- /// if it has no side effects and its result depends only on its operands,
- /// and not on accessing external state like memory or the state of other
- /// invocations. Statements are only created if required to provide values
- /// to the expression. Supporting statements are not required to be
- /// combinatorial.
- /// @param inst a SPIR-V instruction representing an exrpression
- /// @returns an AST expression for the instruction, or nullptr.
- TypedExpression MaybeEmitCombinatorialValue(
- const spvtools::opt::Instruction& inst);
-
- /// Creates an expression and supporting statements for the a GLSL.std.450
- /// extended instruction.
- /// @param inst a SPIR-V OpExtInst instruction from GLSL.std.450
- /// @returns an AST expression for the instruction, or nullptr.
- TypedExpression EmitGlslStd450ExtInst(const spvtools::opt::Instruction& inst);
-
- /// Creates an expression for OpCompositeExtract
- /// @param inst an OpCompositeExtract instruction.
- /// @returns an AST expression for the instruction, or nullptr.
- TypedExpression MakeCompositeExtract(const spvtools::opt::Instruction& inst);
-
- /// Creates an expression for indexing into a composite value. The literal
- /// indices that step into the value start at instruction input operand
- /// `start_index` and run to the end of the instruction.
- /// @param inst the original instruction
- /// @param composite the typed expression for the composite
- /// @param composite_type_id the SPIR-V type ID for the composite
- /// @param index_start the index of the first operand in `inst` that is an
- /// index into the composite type
- /// @returns an AST expression for the decomposed composite, or {} on error
- TypedExpression MakeCompositeValueDecomposition(
- const spvtools::opt::Instruction& inst,
- TypedExpression composite,
- uint32_t composite_type_id,
- int index_start);
-
- /// Creates an expression for OpVectorShuffle
- /// @param inst an OpVectorShuffle instruction.
- /// @returns an AST expression for the instruction, or nullptr.
- TypedExpression MakeVectorShuffle(const spvtools::opt::Instruction& inst);
-
- /// Creates an expression for a numeric conversion.
- /// @param inst a numeric conversion instruction
- /// @returns an AST expression for the instruction, or nullptr.
- TypedExpression MakeNumericConversion(const spvtools::opt::Instruction& inst);
-
- /// Gets the block info for a block ID, if any exists
- /// @param id the SPIR-V ID of the OpLabel instruction starting the block
- /// @returns the block info for the given ID, if it exists, or nullptr
- BlockInfo* GetBlockInfo(uint32_t id) const {
- auto where = block_info_.find(id);
- if (where == block_info_.end()) {
- return nullptr;
+ public:
+ /// Creates a FunctionEmitter, and prepares to write to the AST module
+ /// in `pi`
+ /// @param pi a ParserImpl which has already executed BuildInternalModule
+ /// @param function the function to emit
+ FunctionEmitter(ParserImpl* pi, const spvtools::opt::Function& function);
+ /// Creates a FunctionEmitter, and prepares to write to the AST module
+ /// in `pi`
+ /// @param pi a ParserImpl which has already executed BuildInternalModule
+ /// @param function the function to emit
+ /// @param ep_info entry point information for this function, or nullptr
+ FunctionEmitter(ParserImpl* pi,
+ const spvtools::opt::Function& function,
+ const EntryPointInfo* ep_info);
+ /// Move constructor. Only valid when the other object was newly created.
+ /// @param other the emitter to clone
+ FunctionEmitter(FunctionEmitter&& other);
+ /// Destructor
+ ~FunctionEmitter();
+
+ /// Emits the function to AST module.
+ /// @return whether emission succeeded
+ bool Emit();
+
+ /// @returns true if emission has not yet failed.
+ bool success() const { return fail_stream_.status(); }
+ /// @returns true if emission has failed.
+ bool failed() const { return !success(); }
+
+ /// Finalizes any StatementBuilders returns the body of the function.
+ /// Must only be called once, and to be used only for testing.
+ /// @returns the body of the function.
+ const ast::StatementList ast_body();
+
+ /// Records failure.
+ /// @returns a FailStream on which to emit diagnostics.
+ FailStream& Fail() { return fail_stream_.Fail(); }
+
+ /// @returns the parser implementation
+ ParserImpl* parser() { return &parser_impl_; }
+
+ /// Emits the entry point as a wrapper around its implementation function.
+ /// Pipeline inputs become formal parameters, and pipeline outputs become
+ /// return values.
+ /// @returns false if emission failed.
+ bool EmitEntryPointAsWrapper();
+
+ /// Creates one or more entry point input parameters corresponding to a
+ /// part of an input variable. The part of the input variable is specfied
+ /// by the `index_prefix`, which successively indexes into the variable.
+ /// Also generates the assignment statements that copy the input parameter
+ /// to the corresponding part of the variable. Assumes the variable
+ /// has already been created in the Private storage class.
+ /// @param var_name The name of the variable
+ /// @param var_type The store type of the variable
+ /// @param decos The variable's decorations
+ /// @param index_prefix Indices stepping into the variable, indicating
+ /// what part of the variable to populate.
+ /// @param tip_type The type of the component inside variable, after indexing
+ /// with the indices in `index_prefix`.
+ /// @param forced_param_type The type forced by WGSL, if the variable is a
+ /// builtin, otherwise the same as var_type.
+ /// @param params The parameter list where the new parameter is appended.
+ /// @param statements The statement list where the assignment is appended.
+ /// @returns false if emission failed
+ bool EmitPipelineInput(std::string var_name,
+ const Type* var_type,
+ ast::AttributeList* decos,
+ std::vector<int> index_prefix,
+ const Type* tip_type,
+ const Type* forced_param_type,
+ ast::VariableList* params,
+ ast::StatementList* statements);
+
+ /// Creates one or more struct members from an output variable, and the
+ /// expressions that compute the value they contribute to the entry point
+ /// return value. The part of the output variable is specfied
+ /// by the `index_prefix`, which successively indexes into the variable.
+ /// Assumes the variable has already been created in the Private storage
+ /// class.
+ /// @param var_name The name of the variable
+ /// @param var_type The store type of the variable
+ /// @param decos The variable's decorations
+ /// @param index_prefix Indices stepping into the variable, indicating
+ /// what part of the variable to populate.
+ /// @param tip_type The type of the component inside variable, after indexing
+ /// with the indices in `index_prefix`.
+ /// @param forced_member_type The type forced by WGSL, if the variable is a
+ /// builtin, otherwise the same as var_type.
+ /// @param return_members The struct member list where the new member is
+ /// added.
+ /// @param return_exprs The expression list where the return expression is
+ /// added.
+ /// @returns false if emission failed
+ bool EmitPipelineOutput(std::string var_name,
+ const Type* var_type,
+ ast::AttributeList* decos,
+ std::vector<int> index_prefix,
+ const Type* tip_type,
+ const Type* forced_member_type,
+ ast::StructMemberList* return_members,
+ ast::ExpressionList* return_exprs);
+
+ /// Updates the attribute list, replacing an existing Location attribute
+ /// with another having one higher location value. Does nothing if no
+ /// location attribute exists.
+ /// Assumes the list contains at most one Location attribute.
+ /// @param attributes the attribute list to modify
+ void IncrementLocation(ast::AttributeList* attributes);
+
+ /// Returns the Location attribute, if it exists.
+ /// @param attributes the list of attributes to search
+ /// @returns the Location attribute, or nullptr if it doesn't exist
+ const ast::Attribute* GetLocation(const ast::AttributeList& attributes);
+
+ /// Create an ast::BlockStatement representing the body of the function.
+ /// This creates the statement stack, which is non-empty for the lifetime
+ /// of the function.
+ /// @returns the body of the function, or null on error
+ const ast::BlockStatement* MakeFunctionBody();
+
+ /// Emits the function body, populating the bottom entry of the statements
+ /// stack.
+ /// @returns false if emission failed.
+ bool EmitBody();
+
+ /// Records a mapping from block ID to a BlockInfo struct.
+ /// Populates `block_info_`
+ void RegisterBasicBlocks();
+
+ /// Verifies that terminators only branch to labels in the current function.
+ /// Assumes basic blocks have been registered.
+ /// @returns true if terminators are valid
+ bool TerminatorsAreValid();
+
+ /// Populates merge-header cross-links and BlockInfo#is_continue_entire_loop.
+ /// Also verifies that merge instructions go to blocks in the same function.
+ /// Assumes basic blocks have been registered, and terminators are valid.
+ /// @returns false if registration fails
+ bool RegisterMerges();
+
+ /// Determines the output order for the basic blocks in the function.
+ /// Populates `block_order_` and BlockInfo#pos.
+ /// Assumes basic blocks have been registered.
+ void ComputeBlockOrderAndPositions();
+
+ /// @returns the reverse structured post order of the basic blocks in
+ /// the function.
+ const std::vector<uint32_t>& block_order() const { return block_order_; }
+
+ /// Verifies that the orderings among a structured header, continue target,
+ /// and merge block are valid. Assumes block order has been computed, and
+ /// merges are valid and recorded.
+ /// @returns false if invalid nesting was detected
+ bool VerifyHeaderContinueMergeOrder();
+
+ /// Labels each basic block with its nearest enclosing structured construct.
+ /// Populates BlockInfo#construct and the `constructs_` list.
+ /// Assumes terminators are valid and merges have been registered, block
+ /// order has been computed, and each block is labeled with its position.
+ /// Checks nesting of structured control flow constructs.
+ /// @returns false if bad nesting has been detected
+ bool LabelControlFlowConstructs();
+
+ /// @returns the structured constructs
+ const ConstructList& constructs() const { return constructs_; }
+
+ /// Marks blocks targets of a switch, either as the head of a case or
+ /// as the default target.
+ /// @returns false on failure
+ bool FindSwitchCaseHeaders();
+
+ /// Classifies the successor CFG edges for the ordered basic blocks.
+ /// Also checks validity of each edge (populates BlockInfo#succ_edge).
+ /// Implicitly checks dominance rules for headers and continue constructs.
+ /// Assumes each block has been labeled with its control flow construct.
+ /// @returns false on failure
+ bool ClassifyCFGEdges();
+
+ /// Marks the blocks within a selection construct that are the first blocks
+ /// in the "then" clause, the "else" clause, and the "premerge" clause.
+ /// The head of the premerge clause is the block, if it exists, at which
+ /// control flow reconverges from the "then" and "else" clauses, but before
+ /// before the merge block for that selection. The existence of a premerge
+ /// should be an exceptional case, but is allowed by the structured control
+ /// flow rules.
+ /// @returns false if bad nesting has been detected.
+ bool FindIfSelectionInternalHeaders();
+
+ /// Creates a DefInfo record for each module-scope builtin variable
+ /// that should be handled specially. Either it's ignored, or its store
+ /// type is converted on load.
+ /// Populates the `def_info_` mapping for such IDs.
+ /// @returns false on failure
+ bool RegisterSpecialBuiltInVariables();
+
+ /// Creates a DefInfo record for each locally defined SPIR-V ID.
+ /// Populates the `def_info_` mapping with basic results for such IDs.
+ /// @returns false on failure
+ bool RegisterLocallyDefinedValues();
+
+ /// Returns the Tint storage class for the given SPIR-V ID that is a
+ /// pointer value.
+ /// @param id a SPIR-V ID for a pointer value
+ /// @returns the storage class
+ ast::StorageClass GetStorageClassForPointerValue(uint32_t id);
+
+ /// Remaps the storage class for the type of a locally-defined value,
+ /// if necessary. If it's not a pointer type, or if its storage class
+ /// already matches, then the result is a copy of the `type` argument.
+ /// @param type the AST type
+ /// @param result_id the SPIR-V ID for the locally defined value
+ /// @returns an possibly updated type
+ const Type* RemapStorageClass(const Type* type, uint32_t result_id);
+
+ /// Marks locally defined values when they should get a 'const'
+ /// definition in WGSL, or a 'var' definition at an outer scope.
+ /// This occurs in several cases:
+ /// - When a SPIR-V instruction might use the dynamically computed value
+ /// only once, but the WGSL code might reference it multiple times.
+ /// For example, this occurs for the vector operands of OpVectorShuffle.
+ /// In this case the definition's DefInfo#requires_named_const_def property
+ /// is set to true.
+ /// - When a definition and at least one of its uses are not in the
+ /// same structured construct.
+ /// In this case the definition's DefInfo#requires_named_const_def property
+ /// is set to true.
+ /// - When a definition is in a construct that does not enclose all the
+ /// uses. In this case the definition's DefInfo#requires_hoisted_def
+ /// property is set to true.
+ /// Updates the `def_info_` mapping.
+ void FindValuesNeedingNamedOrHoistedDefinition();
+
+ /// Emits declarations of function variables.
+ /// @returns false if emission failed.
+ bool EmitFunctionVariables();
+
+ /// Emits statements in the body.
+ /// @returns false if emission failed.
+ bool EmitFunctionBodyStatements();
+
+ /// Emits a basic block.
+ /// @param block_info the block to emit
+ /// @returns false if emission failed.
+ bool EmitBasicBlock(const BlockInfo& block_info);
+
+ /// Emits an IfStatement, including its condition expression, and sets
+ /// up the statement stack to accumulate subsequent basic blocks into
+ /// the "then" and "else" clauses.
+ /// @param block_info the if-selection header block
+ /// @returns false if emission failed.
+ bool EmitIfStart(const BlockInfo& block_info);
+
+ /// Emits a SwitchStatement, including its condition expression, and sets
+ /// up the statement stack to accumulate subsequent basic blocks into
+ /// the default clause and case clauses.
+ /// @param block_info the switch-selection header block
+ /// @returns false if emission failed.
+ bool EmitSwitchStart(const BlockInfo& block_info);
+
+ /// Emits a LoopStatement, and pushes a new StatementBlock to accumulate
+ /// the remaining instructions in the current block and subsequent blocks
+ /// in the loop.
+ /// @param construct the loop construct
+ /// @returns false if emission failed.
+ bool EmitLoopStart(const Construct* construct);
+
+ /// Emits a ContinuingStatement, and pushes a new StatementBlock to accumulate
+ /// the remaining instructions in the current block and subsequent blocks
+ /// in the continue construct.
+ /// @param construct the continue construct
+ /// @returns false if emission failed.
+ bool EmitContinuingStart(const Construct* construct);
+
+ /// Emits the non-control-flow parts of a basic block, but only once.
+ /// The `already_emitted` parameter indicates whether the code has already
+ /// been emitted, and is used to signal that this invocation actually emitted
+ /// it.
+ /// @param block_info the block to emit
+ /// @param already_emitted the block to emit
+ /// @returns false if the code had not yet been emitted, but emission failed
+ bool EmitStatementsInBasicBlock(const BlockInfo& block_info, bool* already_emitted);
+
+ /// Emits code for terminators, but that aren't part of entering or
+ /// resolving structured control flow. That is, if the basic block
+ /// terminator calls for it, emit the fallthrough, break, continue, return,
+ /// or kill commands.
+ /// @param block_info the block with the terminator to emit (if any)
+ /// @returns false if emission failed
+ bool EmitNormalTerminator(const BlockInfo& block_info);
+
+ /// Returns a new statement to represent the given branch representing a
+ /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
+ /// WGSL statement is required, the statement will be nullptr. This method
+ /// tries to avoid emitting a 'break' statement when that would be redundant
+ /// in WGSL due to implicit breaking out of a switch.
+ /// @param src_info the source block
+ /// @param dest_info the destination block
+ /// @returns the new statement, or a null statement
+ const ast::Statement* MakeBranch(const BlockInfo& src_info, const BlockInfo& dest_info) const {
+ return MakeBranchDetailed(src_info, dest_info, false, nullptr);
}
- return where->second.get();
- }
-
- /// Is the block, represented by info, in the structured block order?
- /// @param info the block
- /// @returns true if the block is in the structured block order.
- bool IsInBlockOrder(const BlockInfo* info) const {
- return info && info->pos != kInvalidBlockPos;
- }
-
- /// Gets the local definition info for a result ID.
- /// @param id the SPIR-V ID of local definition.
- /// @returns the definition info for the given ID, if it exists, or nullptr
- DefInfo* GetDefInfo(uint32_t id) const {
- auto where = def_info_.find(id);
- if (where == def_info_.end()) {
- return nullptr;
+
+ /// Returns a new statement to represent the given branch representing a
+ /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
+ /// WGSL statement is required, the statement will be nullptr.
+ /// @param src_info the source block
+ /// @param dest_info the destination block
+ /// @returns the new statement, or a null statement
+ const ast::Statement* MakeForcedBranch(const BlockInfo& src_info,
+ const BlockInfo& dest_info) const {
+ return MakeBranchDetailed(src_info, dest_info, true, nullptr);
}
- return where->second.get();
- }
- /// Returns the skip reason for a result ID.
- /// @param id SPIR-V result ID
- /// @returns the skip reason for the given ID, or SkipReason::kDontSkip
- SkipReason GetSkipReason(uint32_t id) const {
- if (auto* def_info = GetDefInfo(id)) {
- return def_info->skip;
+
+ /// Returns a new statement to represent the given branch representing a
+ /// "normal" terminator, as in the sense of EmitNormalTerminator. If no
+ /// WGSL statement is required, the statement will be nullptr. When `forced`
+ /// is false, this method tries to avoid emitting a 'break' statement when
+ /// that would be redundant in WGSL due to implicit breaking out of a switch.
+ /// When `forced` is true, the method won't try to avoid emitting that break.
+ /// If the control flow edge is an if-break for an if-selection with a
+ /// control flow guard, then return that guard name via `flow_guard_name_ptr`
+ /// when that parameter is not null.
+ /// @param src_info the source block
+ /// @param dest_info the destination block
+ /// @param forced if true, always emit the branch (if it exists in WGSL)
+ /// @param flow_guard_name_ptr return parameter for control flow guard name
+ /// @returns the new statement, or a null statement
+ const ast::Statement* MakeBranchDetailed(const BlockInfo& src_info,
+ const BlockInfo& dest_info,
+ bool forced,
+ std::string* flow_guard_name_ptr) const;
+
+ /// Returns a new if statement with the given statements as the then-clause
+ /// and the else-clause. Either or both clauses might be nullptr. If both
+ /// are nullptr, then don't make a new statement and instead return nullptr.
+ /// @param condition the branching condition
+ /// @param then_stmt the statement for the then clause of the if, or nullptr
+ /// @param else_stmt the statement for the else clause of the if, or nullptr
+ /// @returns the new statement, or nullptr
+ const ast::Statement* MakeSimpleIf(const ast::Expression* condition,
+ const ast::Statement* then_stmt,
+ const ast::Statement* else_stmt) const;
+
+ /// Emits the statements for an normal-terminator OpBranchConditional
+ /// where one branch is a case fall through (the true branch if and only
+ /// if `fall_through_is_true_branch` is true), and the other branch is
+ /// goes to a different destination, named by `other_dest`.
+ /// @param src_info the basic block from which we're branching
+ /// @param cond the branching condition
+ /// @param other_edge_kind the edge kind from the source block to the other
+ /// destination
+ /// @param other_dest the other branching destination
+ /// @param fall_through_is_true_branch true when the fall-through is the true
+ /// branch
+ /// @returns the false if emission fails
+ bool EmitConditionalCaseFallThrough(const BlockInfo& src_info,
+ const ast::Expression* cond,
+ EdgeKind other_edge_kind,
+ const BlockInfo& other_dest,
+ bool fall_through_is_true_branch);
+
+ /// Emits a normal instruction: not a terminator, label, or variable
+ /// declaration.
+ /// @param inst the instruction
+ /// @returns false if emission failed.
+ bool EmitStatement(const spvtools::opt::Instruction& inst);
+
+ /// Emits a const definition for the typed value in `ast_expr`, and
+ /// records it as the translation for the result ID from `inst`.
+ /// @param inst the SPIR-V instruction defining the value
+ /// @param ast_expr the already-computed AST expression for the value
+ /// @returns false if emission failed.
+ bool EmitConstDefinition(const spvtools::opt::Instruction& inst, TypedExpression ast_expr);
+
+ /// Emits a write of the typed value in `ast_expr` to a hoisted variable
+ /// for the given SPIR-V ID, if that ID has a hoisted declaration. Otherwise,
+ /// emits a const definition instead.
+ /// @param inst the SPIR-V instruction defining the value
+ /// @param ast_expr the already-computed AST expression for the value
+ /// @returns false if emission failed.
+ bool EmitConstDefOrWriteToHoistedVar(const spvtools::opt::Instruction& inst,
+ TypedExpression ast_expr);
+
+ /// If the result ID of the given instruction is hoisted, then emits
+ /// a statement to write the expression to the hoisted variable, and
+ /// returns true. Otherwise return false.
+ /// @param inst the SPIR-V instruction defining a value.
+ /// @param ast_expr the expression to assign.
+ /// @returns true if the instruction has an associated hoisted variable.
+ bool WriteIfHoistedVar(const spvtools::opt::Instruction& inst, TypedExpression ast_expr);
+
+ /// Makes an expression from a SPIR-V ID.
+ /// if the SPIR-V result type is a pointer.
+ /// @param id the SPIR-V ID of the value
+ /// @returns an AST expression for the instruction, or an invalid
+ /// TypedExpression on error.
+ TypedExpression MakeExpression(uint32_t id);
+
+ /// Creates an expression and supporting statements for a combinatorial
+ /// instruction, or returns null. A SPIR-V instruction is combinatorial
+ /// if it has no side effects and its result depends only on its operands,
+ /// and not on accessing external state like memory or the state of other
+ /// invocations. Statements are only created if required to provide values
+ /// to the expression. Supporting statements are not required to be
+ /// combinatorial.
+ /// @param inst a SPIR-V instruction representing an exrpression
+ /// @returns an AST expression for the instruction, or nullptr.
+ TypedExpression MaybeEmitCombinatorialValue(const spvtools::opt::Instruction& inst);
+
+ /// Creates an expression and supporting statements for the a GLSL.std.450
+ /// extended instruction.
+ /// @param inst a SPIR-V OpExtInst instruction from GLSL.std.450
+ /// @returns an AST expression for the instruction, or nullptr.
+ TypedExpression EmitGlslStd450ExtInst(const spvtools::opt::Instruction& inst);
+
+ /// Creates an expression for OpCompositeExtract
+ /// @param inst an OpCompositeExtract instruction.
+ /// @returns an AST expression for the instruction, or nullptr.
+ TypedExpression MakeCompositeExtract(const spvtools::opt::Instruction& inst);
+
+ /// Creates an expression for indexing into a composite value. The literal
+ /// indices that step into the value start at instruction input operand
+ /// `start_index` and run to the end of the instruction.
+ /// @param inst the original instruction
+ /// @param composite the typed expression for the composite
+ /// @param composite_type_id the SPIR-V type ID for the composite
+ /// @param index_start the index of the first operand in `inst` that is an
+ /// index into the composite type
+ /// @returns an AST expression for the decomposed composite, or {} on error
+ TypedExpression MakeCompositeValueDecomposition(const spvtools::opt::Instruction& inst,
+ TypedExpression composite,
+ uint32_t composite_type_id,
+ int index_start);
+
+ /// Creates an expression for OpVectorShuffle
+ /// @param inst an OpVectorShuffle instruction.
+ /// @returns an AST expression for the instruction, or nullptr.
+ TypedExpression MakeVectorShuffle(const spvtools::opt::Instruction& inst);
+
+ /// Creates an expression for a numeric conversion.
+ /// @param inst a numeric conversion instruction
+ /// @returns an AST expression for the instruction, or nullptr.
+ TypedExpression MakeNumericConversion(const spvtools::opt::Instruction& inst);
+
+ /// Gets the block info for a block ID, if any exists
+ /// @param id the SPIR-V ID of the OpLabel instruction starting the block
+ /// @returns the block info for the given ID, if it exists, or nullptr
+ BlockInfo* GetBlockInfo(uint32_t id) const {
+ auto where = block_info_.find(id);
+ if (where == block_info_.end()) {
+ return nullptr;
+ }
+ return where->second.get();
+ }
+
+ /// Is the block, represented by info, in the structured block order?
+ /// @param info the block
+ /// @returns true if the block is in the structured block order.
+ bool IsInBlockOrder(const BlockInfo* info) const {
+ return info && info->pos != kInvalidBlockPos;
}
- return SkipReason::kDontSkip;
- }
-
- /// Returns the most deeply nested structured construct which encloses the
- /// WGSL scopes of names declared in both block positions. Each position must
- /// be a valid index into the function block order array.
- /// @param first_pos the first block position
- /// @param last_pos the last block position
- /// @returns the smallest construct containing both positions
- const Construct* GetEnclosingScope(uint32_t first_pos,
- uint32_t last_pos) const;
-
- /// Finds loop construct associated with a continue construct, if it exists.
- /// Returns nullptr if:
- /// - the given construct is not a continue construct
- /// - the continue construct does not have an associated loop construct
- /// (the continue target is also the loop header block)
- /// @param c the continue construct
- /// @returns the associated loop construct, or nullptr
- const Construct* SiblingLoopConstruct(const Construct* c) const;
-
- /// Returns an identifier expression for the swizzle name of the given
- /// index into a vector. Emits an error and returns nullptr if the
- /// index is out of range, i.e. 4 or higher.
- /// @param i index of the subcomponent
- /// @returns the identifier expression for the `i`'th component
- ast::IdentifierExpression* Swizzle(uint32_t i);
-
- /// Returns an identifier expression for the swizzle name of the first
- /// `n` elements of a vector. Emits an error and returns nullptr if `n`
- /// is out of range, i.e. 4 or higher.
- /// @param n the number of components in the swizzle
- /// @returns the swizzle identifier for the first n elements of a vector
- ast::IdentifierExpression* PrefixSwizzle(uint32_t n);
-
- /// Converts SPIR-V image coordinates from an image access instruction
- /// (e.g. OpImageSampledImplicitLod) into an expression list consisting of
- /// the texture coordinates, and an integral array index if the texture is
- /// arrayed. The texture coordinate is a scalar for 1D textures, a vector of
- /// 2 elements for a 2D texture, and a vector of 3 elements for a 3D or
- /// Cube texture. Excess components are ignored, e.g. if the SPIR-V
- /// coordinate is a 4-element vector but the image is a 2D non-arrayed
- /// texture then the 3rd and 4th components are ignored.
- /// On failure, issues an error and returns an empty expression list.
- /// @param image_access the image access instruction
- /// @returns an ExpressionList of the coordinate and array index (if any)
- ast::ExpressionList MakeCoordinateOperandsForImageAccess(
- const spvtools::opt::Instruction& image_access);
-
- /// Returns the given value as an I32. If it's already an I32 then this
- /// return the given value. Otherwise, wrap the value in a TypeConstructor
- /// expression.
- /// @param value the value to pass through or convert
- /// @returns the value as an I32 value.
- TypedExpression ToI32(TypedExpression value);
-
- /// Returns the given value as a signed integer type of the same shape
- /// if the value is unsigned scalar or vector, by wrapping the value
- /// with a TypeConstructor expression. Returns the value itself if the
- /// value otherwise.
- /// @param value the value to pass through or convert
- /// @returns the value itself, or converted to signed integral
- TypedExpression ToSignedIfUnsigned(TypedExpression value);
-
- /// @param value_id the value identifier to check
- /// @returns true if the given SPIR-V id represents a constant float 0.
- bool IsFloatZero(uint32_t value_id);
- /// @param value_id the value identifier to check
- /// @returns true if the given SPIR-V id represents a constant float 1.
- bool IsFloatOne(uint32_t value_id);
-
- private:
- /// FunctionDeclaration contains the parsed information for a function header.
- struct FunctionDeclaration {
- /// Constructor
- FunctionDeclaration();
- /// Destructor
- ~FunctionDeclaration();
-
- /// Parsed header source
- Source source;
- /// Function name
- std::string name;
- /// Function parameters
- ast::VariableList params;
- /// Function return type
- const Type* return_type;
- /// Function attributes
- ast::AttributeList attributes;
- };
-
- /// Parse the function declaration, which comprises the name, parameters, and
- /// return type, populating `decl`.
- /// @param decl the FunctionDeclaration to populate
- /// @returns true if emission has not yet failed.
- bool ParseFunctionDeclaration(FunctionDeclaration* decl);
-
- /// @returns the store type for the OpVariable instruction, or
- /// null on failure.
- const Type* GetVariableStoreType(
- const spvtools::opt::Instruction& var_decl_inst);
-
- /// Returns an expression for an instruction operand. Signedness conversion is
- /// performed to match the result type of the SPIR-V instruction.
- /// @param inst the SPIR-V instruction
- /// @param operand_index the index of the operand, counting 0 as the first
- /// input operand
- /// @returns a new expression node
- TypedExpression MakeOperand(const spvtools::opt::Instruction& inst,
- uint32_t operand_index);
-
- /// Copies a typed expression to the result, but when the type is a pointer
- /// or reference type, ensures the storage class is not defaulted. That is,
- /// it changes a storage class of "none" to "function".
- /// @param expr a typed expression
- /// @results a copy of the expression, with possibly updated type
- TypedExpression InferFunctionStorageClass(TypedExpression expr);
-
- /// Returns an expression for a SPIR-V OpFMod instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- TypedExpression MakeFMod(const spvtools::opt::Instruction& inst);
-
- /// Returns an expression for a SPIR-V OpAccessChain or OpInBoundsAccessChain
- /// instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- TypedExpression MakeAccessChain(const spvtools::opt::Instruction& inst);
-
- /// Emits a function call. On failure, emits a diagnostic and returns false.
- /// @param inst the SPIR-V function call instruction
- /// @returns false if emission failed
- bool EmitFunctionCall(const spvtools::opt::Instruction& inst);
-
- /// Emits a control barrier builtin. On failure, emits a diagnostic and
- /// returns false.
- /// @param inst the SPIR-V control barrier instruction
- /// @returns false if emission failed
- bool EmitControlBarrier(const spvtools::opt::Instruction& inst);
-
- /// Returns an expression for a SPIR-V instruction that maps to a WGSL
- /// builtin function call.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- TypedExpression MakeBuiltinCall(const spvtools::opt::Instruction& inst);
-
- /// Returns an expression for a SPIR-V OpArrayLength instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- TypedExpression MakeArrayLength(const spvtools::opt::Instruction& inst);
-
- /// Generates an expression for a SPIR-V OpOuterProduct instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- TypedExpression MakeOuterProduct(const spvtools::opt::Instruction& inst);
-
- /// Generates statements for a SPIR-V OpVectorInsertDynamic instruction.
- /// Registers a const declaration for the result.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- bool MakeVectorInsertDynamic(const spvtools::opt::Instruction& inst);
-
- /// Generates statements for a SPIR-V OpComposite instruction.
- /// Registers a const declaration for the result.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- bool MakeCompositeInsert(const spvtools::opt::Instruction& inst);
-
- /// Get the SPIR-V instruction for the image memory object declaration for
- /// the image operand to the given instruction.
- /// @param inst the SPIR-V instruction
- /// @returns a SPIR-V OpVariable or OpFunctionParameter instruction, or null
- /// on error
- const spvtools::opt::Instruction* GetImage(
- const spvtools::opt::Instruction& inst);
-
- /// Get the AST texture the SPIR-V image memory object declaration.
- /// @param inst the SPIR-V memory object declaration for the image.
- /// @returns a texture type, or null on error
- const Texture* GetImageType(const spvtools::opt::Instruction& inst);
-
- /// Get the expression for the image operand from the first operand to the
- /// given instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an identifier expression, or null on error
- const ast::Expression* GetImageExpression(
- const spvtools::opt::Instruction& inst);
-
- /// Get the expression for the sampler operand from the first operand to the
- /// given instruction.
- /// @param inst the SPIR-V instruction
- /// @returns an identifier expression, or null on error
- const ast::Expression* GetSamplerExpression(
- const spvtools::opt::Instruction& inst);
-
- /// Emits a texture builtin function call for a SPIR-V instruction that
- /// accesses an image or sampled image.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- bool EmitImageAccess(const spvtools::opt::Instruction& inst);
-
- /// Emits statements to implement a SPIR-V image query.
- /// @param inst the SPIR-V instruction
- /// @returns an expression
- bool EmitImageQuery(const spvtools::opt::Instruction& inst);
-
- /// Converts the given texel to match the type required for the storage
- /// texture with the given type. In WGSL the texel value is always provided
- /// as a 4-element vector, but the component type is determined by the
- /// texel channel type. See "Texel Formats for Storage Textures" in the WGSL
- /// spec. Returns an expression, or emits an error and returns nullptr.
- /// @param inst the image access instruction (used for diagnostics)
- /// @param texel the texel
- /// @param texture_type the type of the storage texture
- /// @returns the texel, after necessary conversion.
- const ast::Expression* ConvertTexelForStorage(
- const spvtools::opt::Instruction& inst,
- TypedExpression texel,
- const Texture* texture_type);
-
- /// Returns an expression for an OpSelect, if its operands are scalars
- /// or vectors. These translate directly to WGSL select. Otherwise, return
- /// an expression with a null owned expression
- /// @param inst the SPIR-V OpSelect instruction
- /// @returns a typed expression, or one with a null owned expression
- TypedExpression MakeSimpleSelect(const spvtools::opt::Instruction& inst);
-
- /// Finds the header block for a structured construct that we can "break"
- /// out from, from deeply nested control flow, if such a block exists.
- /// If the construct is:
- /// - a switch selection: return the selection header (ending in OpSwitch)
- /// - a loop construct: return the loop header block
- /// - a continue construct: return the loop header block
- /// Otherwise, return nullptr.
- /// @param c a structured construct, or nullptr
- /// @returns the block info for the structured header we can "break" from,
- /// or nullptr
- BlockInfo* HeaderIfBreakable(const Construct* c);
-
- /// Appends a new statement to the top of the statement stack.
- /// Does nothing if the statement is null.
- /// @param statement the new statement
- /// @returns a pointer to the statement.
- const ast::Statement* AddStatement(const ast::Statement* statement);
-
- /// AddStatementBuilder() constructs and adds the StatementBuilder of type
- /// `T` to the top of the statement stack.
- /// @param args the arguments forwarded to the T constructor
- /// @return the built StatementBuilder
- template <typename T, typename... ARGS>
- T* AddStatementBuilder(ARGS&&... args) {
- TINT_ASSERT(Reader, !statements_stack_.empty());
- return statements_stack_.back().AddStatementBuilder<T>(
- std::forward<ARGS>(args)...);
- }
-
- /// Returns the source record for the given instruction.
- /// @param inst the SPIR-V instruction
- /// @return the Source record, or a default one
- Source GetSourceForInst(const spvtools::opt::Instruction& inst) const;
-
- /// @returns the last statetment in the top of the statement stack.
- const ast::Statement* LastStatement();
-
- using CompletionAction = std::function<void(const ast::StatementList&)>;
-
- // A StatementBlock represents a braced-list of statements while it is being
- // constructed.
- class StatementBlock {
- public:
- StatementBlock(const Construct* construct,
- uint32_t end_id,
- CompletionAction completion_action);
- StatementBlock(StatementBlock&&);
- ~StatementBlock();
-
- StatementBlock(const StatementBlock&) = delete;
- StatementBlock& operator=(const StatementBlock&) = delete;
-
- /// Replaces any StatementBuilders with the built result, and calls the
- /// completion callback (if set). Must only be called once, after all
- /// statements have been added with Add().
- /// @param builder the program builder
- void Finalize(ProgramBuilder* builder);
- /// Add() adds `statement` to the block.
- /// Add() must not be called after calling Finalize().
- void Add(const ast::Statement* statement);
+ /// Gets the local definition info for a result ID.
+ /// @param id the SPIR-V ID of local definition.
+ /// @returns the definition info for the given ID, if it exists, or nullptr
+ DefInfo* GetDefInfo(uint32_t id) const {
+ auto where = def_info_.find(id);
+ if (where == def_info_.end()) {
+ return nullptr;
+ }
+ return where->second.get();
+ }
+ /// Returns the skip reason for a result ID.
+ /// @param id SPIR-V result ID
+ /// @returns the skip reason for the given ID, or SkipReason::kDontSkip
+ SkipReason GetSkipReason(uint32_t id) const {
+ if (auto* def_info = GetDefInfo(id)) {
+ return def_info->skip;
+ }
+ return SkipReason::kDontSkip;
+ }
+
+ /// Returns the most deeply nested structured construct which encloses the
+ /// WGSL scopes of names declared in both block positions. Each position must
+ /// be a valid index into the function block order array.
+ /// @param first_pos the first block position
+ /// @param last_pos the last block position
+ /// @returns the smallest construct containing both positions
+ const Construct* GetEnclosingScope(uint32_t first_pos, uint32_t last_pos) const;
+
+ /// Finds loop construct associated with a continue construct, if it exists.
+ /// Returns nullptr if:
+ /// - the given construct is not a continue construct
+ /// - the continue construct does not have an associated loop construct
+ /// (the continue target is also the loop header block)
+ /// @param c the continue construct
+ /// @returns the associated loop construct, or nullptr
+ const Construct* SiblingLoopConstruct(const Construct* c) const;
+
+ /// Returns an identifier expression for the swizzle name of the given
+ /// index into a vector. Emits an error and returns nullptr if the
+ /// index is out of range, i.e. 4 or higher.
+ /// @param i index of the subcomponent
+ /// @returns the identifier expression for the `i`'th component
+ ast::IdentifierExpression* Swizzle(uint32_t i);
+
+ /// Returns an identifier expression for the swizzle name of the first
+ /// `n` elements of a vector. Emits an error and returns nullptr if `n`
+ /// is out of range, i.e. 4 or higher.
+ /// @param n the number of components in the swizzle
+ /// @returns the swizzle identifier for the first n elements of a vector
+ ast::IdentifierExpression* PrefixSwizzle(uint32_t n);
+
+ /// Converts SPIR-V image coordinates from an image access instruction
+ /// (e.g. OpImageSampledImplicitLod) into an expression list consisting of
+ /// the texture coordinates, and an integral array index if the texture is
+ /// arrayed. The texture coordinate is a scalar for 1D textures, a vector of
+ /// 2 elements for a 2D texture, and a vector of 3 elements for a 3D or
+ /// Cube texture. Excess components are ignored, e.g. if the SPIR-V
+ /// coordinate is a 4-element vector but the image is a 2D non-arrayed
+ /// texture then the 3rd and 4th components are ignored.
+ /// On failure, issues an error and returns an empty expression list.
+ /// @param image_access the image access instruction
+ /// @returns an ExpressionList of the coordinate and array index (if any)
+ ast::ExpressionList MakeCoordinateOperandsForImageAccess(
+ const spvtools::opt::Instruction& image_access);
+
+ /// Returns the given value as an I32. If it's already an I32 then this
+ /// return the given value. Otherwise, wrap the value in a TypeConstructor
+ /// expression.
+ /// @param value the value to pass through or convert
+ /// @returns the value as an I32 value.
+ TypedExpression ToI32(TypedExpression value);
+
+ /// Returns the given value as a signed integer type of the same shape
+ /// if the value is unsigned scalar or vector, by wrapping the value
+ /// with a TypeConstructor expression. Returns the value itself if the
+ /// value otherwise.
+ /// @param value the value to pass through or convert
+ /// @returns the value itself, or converted to signed integral
+ TypedExpression ToSignedIfUnsigned(TypedExpression value);
+
+ /// @param value_id the value identifier to check
+ /// @returns true if the given SPIR-V id represents a constant float 0.
+ bool IsFloatZero(uint32_t value_id);
+ /// @param value_id the value identifier to check
+ /// @returns true if the given SPIR-V id represents a constant float 1.
+ bool IsFloatOne(uint32_t value_id);
+
+ private:
+ /// FunctionDeclaration contains the parsed information for a function header.
+ struct FunctionDeclaration {
+ /// Constructor
+ FunctionDeclaration();
+ /// Destructor
+ ~FunctionDeclaration();
+
+ /// Parsed header source
+ Source source;
+ /// Function name
+ std::string name;
+ /// Function parameters
+ ast::VariableList params;
+ /// Function return type
+ const Type* return_type;
+ /// Function attributes
+ ast::AttributeList attributes;
+ };
+
+ /// Parse the function declaration, which comprises the name, parameters, and
+ /// return type, populating `decl`.
+ /// @param decl the FunctionDeclaration to populate
+ /// @returns true if emission has not yet failed.
+ bool ParseFunctionDeclaration(FunctionDeclaration* decl);
+
+ /// @returns the store type for the OpVariable instruction, or
+ /// null on failure.
+ const Type* GetVariableStoreType(const spvtools::opt::Instruction& var_decl_inst);
+
+ /// Returns an expression for an instruction operand. Signedness conversion is
+ /// performed to match the result type of the SPIR-V instruction.
+ /// @param inst the SPIR-V instruction
+ /// @param operand_index the index of the operand, counting 0 as the first
+ /// input operand
+ /// @returns a new expression node
+ TypedExpression MakeOperand(const spvtools::opt::Instruction& inst, uint32_t operand_index);
+
+ /// Copies a typed expression to the result, but when the type is a pointer
+ /// or reference type, ensures the storage class is not defaulted. That is,
+ /// it changes a storage class of "none" to "function".
+ /// @param expr a typed expression
+ /// @results a copy of the expression, with possibly updated type
+ TypedExpression InferFunctionStorageClass(TypedExpression expr);
+
+ /// Returns an expression for a SPIR-V OpFMod instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ TypedExpression MakeFMod(const spvtools::opt::Instruction& inst);
+
+ /// Returns an expression for a SPIR-V OpAccessChain or OpInBoundsAccessChain
+ /// instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ TypedExpression MakeAccessChain(const spvtools::opt::Instruction& inst);
+
+ /// Emits a function call. On failure, emits a diagnostic and returns false.
+ /// @param inst the SPIR-V function call instruction
+ /// @returns false if emission failed
+ bool EmitFunctionCall(const spvtools::opt::Instruction& inst);
+
+ /// Emits a control barrier builtin. On failure, emits a diagnostic and
+ /// returns false.
+ /// @param inst the SPIR-V control barrier instruction
+ /// @returns false if emission failed
+ bool EmitControlBarrier(const spvtools::opt::Instruction& inst);
+
+ /// Returns an expression for a SPIR-V instruction that maps to a WGSL
+ /// builtin function call.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ TypedExpression MakeBuiltinCall(const spvtools::opt::Instruction& inst);
+
+ /// Returns an expression for a SPIR-V OpArrayLength instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ TypedExpression MakeArrayLength(const spvtools::opt::Instruction& inst);
+
+ /// Generates an expression for a SPIR-V OpOuterProduct instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ TypedExpression MakeOuterProduct(const spvtools::opt::Instruction& inst);
+
+ /// Generates statements for a SPIR-V OpVectorInsertDynamic instruction.
+ /// Registers a const declaration for the result.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ bool MakeVectorInsertDynamic(const spvtools::opt::Instruction& inst);
+
+ /// Generates statements for a SPIR-V OpComposite instruction.
+ /// Registers a const declaration for the result.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ bool MakeCompositeInsert(const spvtools::opt::Instruction& inst);
+
+ /// Get the SPIR-V instruction for the image memory object declaration for
+ /// the image operand to the given instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns a SPIR-V OpVariable or OpFunctionParameter instruction, or null
+ /// on error
+ const spvtools::opt::Instruction* GetImage(const spvtools::opt::Instruction& inst);
+
+ /// Get the AST texture the SPIR-V image memory object declaration.
+ /// @param inst the SPIR-V memory object declaration for the image.
+ /// @returns a texture type, or null on error
+ const Texture* GetImageType(const spvtools::opt::Instruction& inst);
+
+ /// Get the expression for the image operand from the first operand to the
+ /// given instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an identifier expression, or null on error
+ const ast::Expression* GetImageExpression(const spvtools::opt::Instruction& inst);
+
+ /// Get the expression for the sampler operand from the first operand to the
+ /// given instruction.
+ /// @param inst the SPIR-V instruction
+ /// @returns an identifier expression, or null on error
+ const ast::Expression* GetSamplerExpression(const spvtools::opt::Instruction& inst);
+
+ /// Emits a texture builtin function call for a SPIR-V instruction that
+ /// accesses an image or sampled image.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ bool EmitImageAccess(const spvtools::opt::Instruction& inst);
+
+ /// Emits statements to implement a SPIR-V image query.
+ /// @param inst the SPIR-V instruction
+ /// @returns an expression
+ bool EmitImageQuery(const spvtools::opt::Instruction& inst);
+
+ /// Converts the given texel to match the type required for the storage
+ /// texture with the given type. In WGSL the texel value is always provided
+ /// as a 4-element vector, but the component type is determined by the
+ /// texel channel type. See "Texel Formats for Storage Textures" in the WGSL
+ /// spec. Returns an expression, or emits an error and returns nullptr.
+ /// @param inst the image access instruction (used for diagnostics)
+ /// @param texel the texel
+ /// @param texture_type the type of the storage texture
+ /// @returns the texel, after necessary conversion.
+ const ast::Expression* ConvertTexelForStorage(const spvtools::opt::Instruction& inst,
+ TypedExpression texel,
+ const Texture* texture_type);
+
+ /// Returns an expression for an OpSelect, if its operands are scalars
+ /// or vectors. These translate directly to WGSL select. Otherwise, return
+ /// an expression with a null owned expression
+ /// @param inst the SPIR-V OpSelect instruction
+ /// @returns a typed expression, or one with a null owned expression
+ TypedExpression MakeSimpleSelect(const spvtools::opt::Instruction& inst);
+
+ /// Finds the header block for a structured construct that we can "break"
+ /// out from, from deeply nested control flow, if such a block exists.
+ /// If the construct is:
+ /// - a switch selection: return the selection header (ending in OpSwitch)
+ /// - a loop construct: return the loop header block
+ /// - a continue construct: return the loop header block
+ /// Otherwise, return nullptr.
+ /// @param c a structured construct, or nullptr
+ /// @returns the block info for the structured header we can "break" from,
+ /// or nullptr
+ BlockInfo* HeaderIfBreakable(const Construct* c);
+
+ /// Appends a new statement to the top of the statement stack.
+ /// Does nothing if the statement is null.
+ /// @param statement the new statement
+ /// @returns a pointer to the statement.
+ const ast::Statement* AddStatement(const ast::Statement* statement);
/// AddStatementBuilder() constructs and adds the StatementBuilder of type
- /// `T` to the block.
- /// Add() must not be called after calling Finalize().
+ /// `T` to the top of the statement stack.
/// @param args the arguments forwarded to the T constructor
/// @return the built StatementBuilder
template <typename T, typename... ARGS>
T* AddStatementBuilder(ARGS&&... args) {
- auto builder = std::make_unique<T>(std::forward<ARGS>(args)...);
- auto* ptr = builder.get();
- Add(ptr);
- builders_.emplace_back(std::move(builder));
- return ptr;
+ TINT_ASSERT(Reader, !statements_stack_.empty());
+ return statements_stack_.back().AddStatementBuilder<T>(std::forward<ARGS>(args)...);
+ }
+
+ /// Returns the source record for the given instruction.
+ /// @param inst the SPIR-V instruction
+ /// @return the Source record, or a default one
+ Source GetSourceForInst(const spvtools::opt::Instruction& inst) const;
+
+ /// @returns the last statetment in the top of the statement stack.
+ const ast::Statement* LastStatement();
+
+ using CompletionAction = std::function<void(const ast::StatementList&)>;
+
+ // A StatementBlock represents a braced-list of statements while it is being
+ // constructed.
+ class StatementBlock {
+ public:
+ StatementBlock(const Construct* construct,
+ uint32_t end_id,
+ CompletionAction completion_action);
+ StatementBlock(StatementBlock&&);
+ ~StatementBlock();
+
+ StatementBlock(const StatementBlock&) = delete;
+ StatementBlock& operator=(const StatementBlock&) = delete;
+
+ /// Replaces any StatementBuilders with the built result, and calls the
+ /// completion callback (if set). Must only be called once, after all
+ /// statements have been added with Add().
+ /// @param builder the program builder
+ void Finalize(ProgramBuilder* builder);
+
+ /// Add() adds `statement` to the block.
+ /// Add() must not be called after calling Finalize().
+ void Add(const ast::Statement* statement);
+
+ /// AddStatementBuilder() constructs and adds the StatementBuilder of type
+ /// `T` to the block.
+ /// Add() must not be called after calling Finalize().
+ /// @param args the arguments forwarded to the T constructor
+ /// @return the built StatementBuilder
+ template <typename T, typename... ARGS>
+ T* AddStatementBuilder(ARGS&&... args) {
+ auto builder = std::make_unique<T>(std::forward<ARGS>(args)...);
+ auto* ptr = builder.get();
+ Add(ptr);
+ builders_.emplace_back(std::move(builder));
+ return ptr;
+ }
+
+ /// @param construct the construct which this construct constributes to
+ void SetConstruct(const Construct* construct) { construct_ = construct; }
+
+ /// @return the construct to which this construct constributes
+ const Construct* GetConstruct() const { return construct_; }
+
+ /// @return the ID of the block at which the completion action should be
+ /// triggered and this statement block discarded. This is often the `end_id`
+ /// of `construct` itself.
+ uint32_t GetEndId() const { return end_id_; }
+
+ /// @return the list of statements being built, if this construct is not a
+ /// switch.
+ const ast::StatementList& GetStatements() const { return statements_; }
+
+ private:
+ /// The construct to which this construct constributes.
+ const Construct* construct_;
+ /// The ID of the block at which the completion action should be triggered
+ /// and this statement block discarded. This is often the `end_id` of
+ /// `construct` itself.
+ const uint32_t end_id_;
+ /// The completion action finishes processing this statement block.
+ FunctionEmitter::CompletionAction const completion_action_;
+ /// The list of statements being built, if this construct is not a switch.
+ ast::StatementList statements_;
+
+ /// Owned statement builders
+ std::vector<std::unique_ptr<StatementBuilder>> builders_;
+ /// True if Finalize() has been called.
+ bool finalized_ = false;
+ };
+
+ /// Pushes an empty statement block onto the statements stack.
+ /// @param action the completion action for this block
+ void PushNewStatementBlock(const Construct* construct,
+ uint32_t end_id,
+ CompletionAction action);
+
+ /// Emits an if-statement whose condition is the given flow guard
+ /// variable, and pushes onto the statement stack the corresponding
+ /// statement block ending (and not including) the given block.
+ /// @param flow_guard name of the flow guard variable
+ /// @param end_id first block after the if construct.
+ void PushGuard(const std::string& flow_guard, uint32_t end_id);
+
+ /// Emits an if-statement with 'true' condition, and pushes onto the
+ /// statement stack the corresponding statement block ending (and not
+ /// including) the given block.
+ /// @param end_id first block after the if construct.
+ void PushTrueGuard(uint32_t end_id);
+
+ /// @returns a boolean true expression.
+ const ast::Expression* MakeTrue(const Source&) const;
+
+ /// @returns a boolean false expression.
+ const ast::Expression* MakeFalse(const Source&) const;
+
+ /// @param expr the expression to take the address of
+ /// @returns a TypedExpression that is the address-of `expr` (`&expr`)
+ /// @note `expr` must be a reference type
+ TypedExpression AddressOf(TypedExpression expr);
+
+ /// Returns AddressOf(expr) if expr is has reference type and
+ /// the instruction has a pointer result type. Otherwise returns expr.
+ /// @param expr the expression to take the address of
+ /// @returns a TypedExpression that is the address-of `expr` (`&expr`)
+ /// @note `expr` must be a reference type
+ TypedExpression AddressOfIfNeeded(TypedExpression expr, const spvtools::opt::Instruction* inst);
+
+ /// @param expr the expression to dereference
+ /// @returns a TypedExpression that is the dereference-of `expr` (`*expr`)
+ /// @note `expr` must be a pointer type
+ TypedExpression Dereference(TypedExpression expr);
+
+ /// Creates a new `ast::Node` owned by the ProgramBuilder.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename... ARGS>
+ T* create(ARGS&&... args) const {
+ return builder_.create<T>(std::forward<ARGS>(args)...);
}
- /// @param construct the construct which this construct constributes to
- void SetConstruct(const Construct* construct) { construct_ = construct; }
-
- /// @return the construct to which this construct constributes
- const Construct* GetConstruct() const { return construct_; }
-
- /// @return the ID of the block at which the completion action should be
- /// triggered and this statement block discarded. This is often the `end_id`
- /// of `construct` itself.
- uint32_t GetEndId() const { return end_id_; }
-
- /// @return the list of statements being built, if this construct is not a
- /// switch.
- const ast::StatementList& GetStatements() const { return statements_; }
-
- private:
- /// The construct to which this construct constributes.
- const Construct* construct_;
- /// The ID of the block at which the completion action should be triggered
- /// and this statement block discarded. This is often the `end_id` of
- /// `construct` itself.
- const uint32_t end_id_;
- /// The completion action finishes processing this statement block.
- FunctionEmitter::CompletionAction const completion_action_;
- /// The list of statements being built, if this construct is not a switch.
- ast::StatementList statements_;
-
- /// Owned statement builders
- std::vector<std::unique_ptr<StatementBuilder>> builders_;
- /// True if Finalize() has been called.
- bool finalized_ = false;
- };
-
- /// Pushes an empty statement block onto the statements stack.
- /// @param action the completion action for this block
- void PushNewStatementBlock(const Construct* construct,
- uint32_t end_id,
- CompletionAction action);
-
- /// Emits an if-statement whose condition is the given flow guard
- /// variable, and pushes onto the statement stack the corresponding
- /// statement block ending (and not including) the given block.
- /// @param flow_guard name of the flow guard variable
- /// @param end_id first block after the if construct.
- void PushGuard(const std::string& flow_guard, uint32_t end_id);
-
- /// Emits an if-statement with 'true' condition, and pushes onto the
- /// statement stack the corresponding statement block ending (and not
- /// including) the given block.
- /// @param end_id first block after the if construct.
- void PushTrueGuard(uint32_t end_id);
-
- /// @returns a boolean true expression.
- const ast::Expression* MakeTrue(const Source&) const;
-
- /// @returns a boolean false expression.
- const ast::Expression* MakeFalse(const Source&) const;
-
- /// @param expr the expression to take the address of
- /// @returns a TypedExpression that is the address-of `expr` (`&expr`)
- /// @note `expr` must be a reference type
- TypedExpression AddressOf(TypedExpression expr);
-
- /// Returns AddressOf(expr) if expr is has reference type and
- /// the instruction has a pointer result type. Otherwise returns expr.
- /// @param expr the expression to take the address of
- /// @returns a TypedExpression that is the address-of `expr` (`&expr`)
- /// @note `expr` must be a reference type
- TypedExpression AddressOfIfNeeded(TypedExpression expr,
- const spvtools::opt::Instruction* inst);
-
- /// @param expr the expression to dereference
- /// @returns a TypedExpression that is the dereference-of `expr` (`*expr`)
- /// @note `expr` must be a pointer type
- TypedExpression Dereference(TypedExpression expr);
-
- /// Creates a new `ast::Node` owned by the ProgramBuilder.
- /// @param args the arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename... ARGS>
- T* create(ARGS&&... args) const {
- return builder_.create<T>(std::forward<ARGS>(args)...);
- }
-
- using StatementsStack = std::vector<StatementBlock>;
- using PtrAs = ParserImpl::PtrAs;
-
- ParserImpl& parser_impl_;
- TypeManager& ty_;
- ProgramBuilder& builder_;
- spvtools::opt::IRContext& ir_context_;
- spvtools::opt::analysis::DefUseManager* def_use_mgr_;
- spvtools::opt::analysis::ConstantManager* constant_mgr_;
- spvtools::opt::analysis::TypeManager* type_mgr_;
- FailStream& fail_stream_;
- Namer& namer_;
- const spvtools::opt::Function& function_;
-
- // The SPIR-V ID for the SampleMask input variable.
- uint32_t sample_mask_in_id;
- // The SPIR-V ID for the SampleMask output variable.
- uint32_t sample_mask_out_id;
-
- // A stack of statement lists. Each list is contained in a construct in
- // the next deeper element of stack. The 0th entry represents the statements
- // for the entire function. This stack is never empty.
- // The `construct` member for the 0th element is only valid during the
- // lifetime of the EmitFunctionBodyStatements method.
- StatementsStack statements_stack_;
-
- // The map of IDs that have already had an identifier name generated for it,
- // to their Type.
- std::unordered_map<uint32_t, const Type*> identifier_types_;
- // Mapping from SPIR-V ID that is used at most once, to its AST expression.
- std::unordered_map<uint32_t, TypedExpression> singly_used_values_;
-
- // The IDs of basic blocks, in reverse structured post-order (RSPO).
- // This is the output order for the basic blocks.
- std::vector<uint32_t> block_order_;
-
- // Mapping from block ID to its bookkeeping info.
- std::unordered_map<uint32_t, std::unique_ptr<BlockInfo>> block_info_;
-
- // Mapping from a locally-defined result ID to its bookkeeping info.
- std::unordered_map<uint32_t, std::unique_ptr<DefInfo>> def_info_;
-
- // Structured constructs, where enclosing constructs precede their children.
- ConstructList constructs_;
-
- // Information about entry point, if this function is referenced by one
- const EntryPointInfo* ep_info_ = nullptr;
+ using StatementsStack = std::vector<StatementBlock>;
+ using PtrAs = ParserImpl::PtrAs;
+
+ ParserImpl& parser_impl_;
+ TypeManager& ty_;
+ ProgramBuilder& builder_;
+ spvtools::opt::IRContext& ir_context_;
+ spvtools::opt::analysis::DefUseManager* def_use_mgr_;
+ spvtools::opt::analysis::ConstantManager* constant_mgr_;
+ spvtools::opt::analysis::TypeManager* type_mgr_;
+ FailStream& fail_stream_;
+ Namer& namer_;
+ const spvtools::opt::Function& function_;
+
+ // The SPIR-V ID for the SampleMask input variable.
+ uint32_t sample_mask_in_id;
+ // The SPIR-V ID for the SampleMask output variable.
+ uint32_t sample_mask_out_id;
+
+ // A stack of statement lists. Each list is contained in a construct in
+ // the next deeper element of stack. The 0th entry represents the statements
+ // for the entire function. This stack is never empty.
+ // The `construct` member for the 0th element is only valid during the
+ // lifetime of the EmitFunctionBodyStatements method.
+ StatementsStack statements_stack_;
+
+ // The map of IDs that have already had an identifier name generated for it,
+ // to their Type.
+ std::unordered_map<uint32_t, const Type*> identifier_types_;
+ // Mapping from SPIR-V ID that is used at most once, to its AST expression.
+ std::unordered_map<uint32_t, TypedExpression> singly_used_values_;
+
+ // The IDs of basic blocks, in reverse structured post-order (RSPO).
+ // This is the output order for the basic blocks.
+ std::vector<uint32_t> block_order_;
+
+ // Mapping from block ID to its bookkeeping info.
+ std::unordered_map<uint32_t, std::unique_ptr<BlockInfo>> block_info_;
+
+ // Mapping from a locally-defined result ID to its bookkeeping info.
+ std::unordered_map<uint32_t, std::unique_ptr<DefInfo>> def_info_;
+
+ // Structured constructs, where enclosing constructs precede their children.
+ ConstructList constructs_;
+
+ // Information about entry point, if this function is referenced by one
+ const EntryPointInfo* ep_info_ = nullptr;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_arithmetic_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_arithmetic_test.cc
index c48f86dd393..059791677fb 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_arithmetic_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_arithmetic_test.cc
@@ -23,7 +23,7 @@ namespace {
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -74,317 +74,283 @@ std::string Preamble() {
// Returns the AST dump for a given SPIR-V assembly constant.
std::string AstFor(std::string assembly) {
- if (assembly == "v2uint_10_20") {
- return "vec2<u32>(10u, 20u)";
- }
- if (assembly == "v2uint_20_10") {
- return "vec2<u32>(20u, 10u)";
- }
- if (assembly == "v2int_30_40") {
- return "vec2<i32>(30, 40)";
- }
- if (assembly == "v2int_40_30") {
- return "vec2<i32>(40, 30)";
- }
- if (assembly == "cast_int_v2uint_10_20") {
- return "bitcast<vec2<i32>>(vec2<u32>(10u, 20u))";
- }
- if (assembly == "cast_uint_v2int_40_30") {
- return "bitcast<vec2<u32>>(vec2<i32>(40, 30))";
- }
- if (assembly == "v2float_50_60") {
- return "vec2<f32>(50.0, 60.0)";
- }
- if (assembly == "v2float_60_50") {
- return "vec2<f32>(60.0, 50.0)";
- }
- return "bad case";
+ if (assembly == "v2uint_10_20") {
+ return "vec2<u32>(10u, 20u)";
+ }
+ if (assembly == "v2uint_20_10") {
+ return "vec2<u32>(20u, 10u)";
+ }
+ if (assembly == "v2int_30_40") {
+ return "vec2<i32>(30i, 40i)";
+ }
+ if (assembly == "v2int_40_30") {
+ return "vec2<i32>(40i, 30i)";
+ }
+ if (assembly == "cast_int_v2uint_10_20") {
+ return "bitcast<vec2<i32>>(vec2<u32>(10u, 20u))";
+ }
+ if (assembly == "cast_uint_v2int_40_30") {
+ return "bitcast<vec2<u32>>(vec2<i32>(40i, 30i))";
+ }
+ if (assembly == "v2float_50_60") {
+ return "vec2<f32>(50.0f, 60.0f)";
+ }
+ if (assembly == "v2float_60_50") {
+ return "vec2<f32>(60.0f, 50.0f)";
+ }
+ return "bad case";
}
using SpvUnaryArithTest = SpvParserTestBase<::testing::Test>;
TEST_F(SpvUnaryArithTest, SNegate_Int_Int) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %int %int_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : i32 = -(30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : i32 = -(30i);"));
}
TEST_F(SpvUnaryArithTest, SNegate_Int_Uint) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %int %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : i32 = -(bitcast<i32>(10u));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : i32 = -(bitcast<i32>(10u));"));
}
TEST_F(SpvUnaryArithTest, SNegate_Uint_Int) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %uint %int_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>(-(30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>(-(30i));"));
}
TEST_F(SpvUnaryArithTest, SNegate_Uint_Uint) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %uint %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>(-(bitcast<i32>(10u)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>(-(bitcast<i32>(10u)));"));
}
TEST_F(SpvUnaryArithTest, SNegate_SignedVec_SignedVec) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %v2int %v2int_30_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<i32> = -(vec2<i32>(30, 40));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<i32> = -(vec2<i32>(30i, 40i));"));
}
TEST_F(SpvUnaryArithTest, SNegate_SignedVec_UnsignedVec) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %v2int %v2uint_10_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- "let x_1 : vec2<i32> = -(bitcast<vec2<i32>>(vec2<u32>(10u, 20u)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<i32> = -(bitcast<vec2<i32>>(vec2<u32>(10u, 20u)));"));
}
TEST_F(SpvUnaryArithTest, SNegate_UnsignedVec_SignedVec) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %v2uint %v2int_30_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- "let x_1 : vec2<u32> = bitcast<vec2<u32>>(-(vec2<i32>(30, 40)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<u32> = bitcast<vec2<u32>>(-(vec2<i32>(30i, 40i)));"));
}
TEST_F(SpvUnaryArithTest, SNegate_UnsignedVec_UnsignedVec) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSNegate %v2uint %v2uint_10_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>(-(bitcast<vec2<i32>>(vec2<u32>(10u, 20u))));)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(
+ test::ToString(p->program(), ast_body),
+ HasSubstr(
+ R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>(-(bitcast<vec2<i32>>(vec2<u32>(10u, 20u))));)"));
}
TEST_F(SpvUnaryArithTest, FNegate_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFNegate %float %float_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = -(50.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : f32 = -(50.0f);"));
}
TEST_F(SpvUnaryArithTest, FNegate_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFNegate %v2float %v2float_50_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<f32> = -(vec2<f32>(50.0, 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = -(vec2<f32>(50.0f, 60.0f));"));
}
struct BinaryData {
- const std::string res_type;
- const std::string lhs;
- const std::string op;
- const std::string rhs;
- const std::string ast_type;
- const std::string ast_lhs;
- const std::string ast_op;
- const std::string ast_rhs;
+ const std::string res_type;
+ const std::string lhs;
+ const std::string op;
+ const std::string rhs;
+ const std::string ast_type;
+ const std::string ast_lhs;
+ const std::string ast_op;
+ const std::string ast_rhs;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op
- << "," << data.rhs << "," << data.ast_type << "," << data.ast_lhs << ","
- << data.ast_op << "," << data.ast_rhs << "}";
- return out;
+ out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op << "," << data.rhs
+ << "," << data.ast_type << "," << data.ast_lhs << "," << data.ast_op << "," << data.ast_rhs
+ << "}";
+ return out;
}
-using SpvBinaryArithTest =
- SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
+using SpvBinaryArithTest = SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
using SpvBinaryArithTestBasic = SpvParserTestBase<::testing::Test>;
TEST_P(SpvBinaryArithTest, EmitExpression) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = )" + GetParam().op +
- " %" + GetParam().res_type + " %" + GetParam().lhs +
- " %" + GetParam().rhs + R"(
+ " %" + GetParam().res_type + " %" + GetParam().lhs + " %" +
+ GetParam().rhs + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- std::ostringstream ss;
- ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs
- << " " << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ std::ostringstream ss;
+ ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs << " "
+ << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
}
// Use this when the result might have extra bitcasts on the outside.
struct BinaryDataGeneral {
- const std::string res_type;
- const std::string lhs;
- const std::string op;
- const std::string rhs;
- const std::string wgsl_type;
- const std::string expected;
+ const std::string res_type;
+ const std::string lhs;
+ const std::string op;
+ const std::string rhs;
+ const std::string wgsl_type;
+ const std::string expected;
};
inline std::ostream& operator<<(std::ostream& out, BinaryDataGeneral data) {
- out << "BinaryDataGeneral{" << data.res_type << "," << data.lhs << ","
- << data.op << "," << data.rhs << "," << data.wgsl_type << ","
- << data.expected << "}";
- return out;
+ out << "BinaryDataGeneral{" << data.res_type << "," << data.lhs << "," << data.op << ","
+ << data.rhs << "," << data.wgsl_type << "," << data.expected << "}";
+ return out;
}
-using SpvBinaryArithGeneralTest =
- SpvParserTestBase<::testing::TestWithParam<BinaryDataGeneral>>;
+using SpvBinaryArithGeneralTest = SpvParserTestBase<::testing::TestWithParam<BinaryDataGeneral>>;
TEST_P(SpvBinaryArithGeneralTest, EmitExpression) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = )" + GetParam().op +
- " %" + GetParam().res_type + " %" + GetParam().lhs +
- " %" + GetParam().rhs + R"(
+ " %" + GetParam().res_type + " %" + GetParam().lhs + " %" +
+ GetParam().rhs + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- std::ostringstream ss;
- ss << "let x_1 : " << GetParam().wgsl_type << " = " << GetParam().expected
- << ";";
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ std::ostringstream ss;
+ ss << "let x_1 : " << GetParam().wgsl_type << " = " << GetParam().expected << ";";
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
}
INSTANTIATE_TEST_SUITE_P(
@@ -392,13 +358,10 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryArithTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpIAdd", "uint_20", "u32", "10u", "+",
- "20u"}, // Both int
- BinaryData{"int", "int_30", "OpIAdd", "int_40", "i32", "30", "+",
- "40"}, // Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpIAdd", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "+",
- AstFor("v2uint_20_10")},
+ BinaryData{"uint", "uint_10", "OpIAdd", "uint_20", "u32", "10u", "+", "20u"}, // Both int
+ BinaryData{"int", "int_30", "OpIAdd", "int_40", "i32", "30i", "+", "40i"}, // Both v2uint
+ BinaryData{"v2uint", "v2uint_10_20", "OpIAdd", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "+", AstFor("v2uint_20_10")},
// Both v2int
BinaryData{"v2int", "v2int_30_40", "OpIAdd", "v2int_40_30", "vec2<i32>",
AstFor("v2int_30_40"), "+", AstFor("v2int_40_30")}));
@@ -409,48 +372,43 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpIAdd", "uint_10", "u32",
- "bitcast<u32>((30 + bitcast<i32>(10u)))"},
+ "bitcast<u32>((30i + bitcast<i32>(10u)))"},
// Mixed, int <- int uint
- BinaryDataGeneral{"int", "int_30", "OpIAdd", "uint_10", "i32",
- "(30 + bitcast<i32>(10u))"},
+ BinaryDataGeneral{"int", "int_30", "OpIAdd", "uint_10", "i32", "(30i + bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpIAdd", "int_30", "u32",
- "(10u + bitcast<u32>(30))"},
+ "(10u + bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpIAdd", "uint_10", "i32",
"bitcast<i32>((20u + 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
"v2uint", "v2int_30_40", "OpIAdd", "v2uint_10_20", "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) + bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) + bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpIAdd", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) + bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FAdd,
- SpvBinaryArithTest,
- ::testing::Values(
- // Scalar float
- BinaryData{"float", "float_50", "OpFAdd", "float_60", "f32", "50.0",
- "+", "60.0"}, // Vector float
- BinaryData{"v2float", "v2float_50_60", "OpFAdd", "v2float_60_50",
- "vec2<f32>", AstFor("v2float_50_60"), "+",
- AstFor("v2float_60_50")}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) + bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FAdd,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Scalar float
+ BinaryData{"float", "float_50", "OpFAdd", "float_60", "f32", "50.0f",
+ "+", "60.0f"}, // Vector float
+ BinaryData{"v2float", "v2float_50_60", "OpFAdd", "v2float_60_50",
+ "vec2<f32>", AstFor("v2float_50_60"), "+",
+ AstFor("v2float_60_50")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ISub,
SpvBinaryArithTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpISub", "uint_20", "u32", "10u", "-",
- "20u"}, // Both int
- BinaryData{"int", "int_30", "OpISub", "int_40", "i32", "30", "-",
- "40"}, // Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpISub", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "-",
- AstFor("v2uint_20_10")},
+ BinaryData{"uint", "uint_10", "OpISub", "uint_20", "u32", "10u", "-", "20u"}, // Both int
+ BinaryData{"int", "int_30", "OpISub", "int_40", "i32", "30i", "-", "40i"}, // Both v2uint
+ BinaryData{"v2uint", "v2uint_10_20", "OpISub", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "-", AstFor("v2uint_20_10")},
// Both v2int
BinaryData{"v2int", "v2int_30_40", "OpISub", "v2int_40_30", "vec2<i32>",
AstFor("v2int_30_40"), "-", AstFor("v2int_40_30")}));
@@ -461,48 +419,43 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpISub", "uint_10", "u32",
- R"(bitcast<u32>((30 - bitcast<i32>(10u))))"},
+ R"(bitcast<u32>((30i - bitcast<i32>(10u))))"},
// Mixed, int <- int uint
- BinaryDataGeneral{"int", "int_30", "OpISub", "uint_10", "i32",
- "(30 - bitcast<i32>(10u))"},
+ BinaryDataGeneral{"int", "int_30", "OpISub", "uint_10", "i32", "(30i - bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpISub", "int_30", "u32",
- "(10u - bitcast<u32>(30))"},
+ "(10u - bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpISub", "uint_10", "i32",
"bitcast<i32>((20u - 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
"v2uint", "v2int_30_40", "OpISub", "v2uint_10_20", "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) - bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) - bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpISub", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) - bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FSub,
- SpvBinaryArithTest,
- ::testing::Values(
- // Scalar float
- BinaryData{"float", "float_50", "OpFSub", "float_60", "f32", "50.0",
- "-", "60.0"}, // Vector float
- BinaryData{"v2float", "v2float_50_60", "OpFSub", "v2float_60_50",
- "vec2<f32>", AstFor("v2float_50_60"), "-",
- AstFor("v2float_60_50")}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) - bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FSub,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Scalar float
+ BinaryData{"float", "float_50", "OpFSub", "float_60", "f32", "50.0f",
+ "-", "60.0f"}, // Vector float
+ BinaryData{"v2float", "v2float_50_60", "OpFSub", "v2float_60_50",
+ "vec2<f32>", AstFor("v2float_50_60"), "-",
+ AstFor("v2float_60_50")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_IMul,
SpvBinaryArithTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpIMul", "uint_20", "u32", "10u", "*",
- "20u"}, // Both int
- BinaryData{"int", "int_30", "OpIMul", "int_40", "i32", "30", "*",
- "40"}, // Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpIMul", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "*",
- AstFor("v2uint_20_10")},
+ BinaryData{"uint", "uint_10", "OpIMul", "uint_20", "u32", "10u", "*", "20u"}, // Both int
+ BinaryData{"int", "int_30", "OpIMul", "int_40", "i32", "30i", "*", "40i"}, // Both v2uint
+ BinaryData{"v2uint", "v2uint_10_20", "OpIMul", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "*", AstFor("v2uint_20_10")},
// Both v2int
BinaryData{"v2int", "v2int_30_40", "OpIMul", "v2int_40_30", "vec2<i32>",
AstFor("v2int_30_40"), "*", AstFor("v2int_40_30")}));
@@ -513,54 +466,50 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpIMul", "uint_10", "u32",
- "bitcast<u32>((30 * bitcast<i32>(10u)))"},
+ "bitcast<u32>((30i * bitcast<i32>(10u)))"},
// Mixed, int <- int uint
- BinaryDataGeneral{"int", "int_30", "OpIMul", "uint_10", "i32",
- "(30 * bitcast<i32>(10u))"},
+ BinaryDataGeneral{"int", "int_30", "OpIMul", "uint_10", "i32", "(30i * bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpIMul", "int_30", "u32",
- "(10u * bitcast<u32>(30))"},
+ "(10u * bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpIMul", "uint_10", "i32",
"bitcast<i32>((20u * 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
"v2uint", "v2int_30_40", "OpIMul", "v2uint_10_20", "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) * bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) * bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpIMul", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) * bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FMul,
- SpvBinaryArithTest,
- ::testing::Values(
- // Scalar float
- BinaryData{"float", "float_50", "OpFMul", "float_60", "f32", "50.0",
- "*", "60.0"}, // Vector float
- BinaryData{"v2float", "v2float_50_60", "OpFMul", "v2float_60_50",
- "vec2<f32>", AstFor("v2float_50_60"), "*",
- AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_UDiv,
- SpvBinaryArithTest,
- ::testing::Values(
- // Both uint
- BinaryData{"uint", "uint_10", "OpUDiv", "uint_20", "u32", "10u", "/",
- "20u"}, // Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpUDiv", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "/",
- AstFor("v2uint_20_10")}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) * bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FMul,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Scalar float
+ BinaryData{"float", "float_50", "OpFMul", "float_60", "f32", "50.0f",
+ "*", "60.0f"}, // Vector float
+ BinaryData{"v2float", "v2float_50_60", "OpFMul", "v2float_60_50",
+ "vec2<f32>", AstFor("v2float_50_60"), "*",
+ AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_UDiv,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Both uint
+ BinaryData{"uint", "uint_10", "OpUDiv", "uint_20", "u32", "10u", "/",
+ "20u"}, // Both v2uint
+ BinaryData{"v2uint", "v2uint_10_20", "OpUDiv", "v2uint_20_10",
+ "vec2<u32>", AstFor("v2uint_10_20"), "/",
+ AstFor("v2uint_20_10")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_SDiv,
SpvBinaryArithTest,
::testing::Values(
// Both int
- BinaryData{"int", "int_30", "OpSDiv", "int_40", "i32", "30", "/",
- "40"}, // Both v2int
+ BinaryData{"int", "int_30", "OpSDiv", "int_40", "i32", "30i", "/", "40i"}, // Both v2int
BinaryData{"v2int", "v2int_30_40", "OpSDiv", "v2int_40_30", "vec2<i32>",
AstFor("v2int_30_40"), "/", AstFor("v2int_40_30")}));
@@ -569,89 +518,79 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryArithTest,
::testing::Values(
// Mixed, returning int, second arg uint
- BinaryData{"int", "int_30", "OpSDiv", "uint_10", "i32", "30", "/",
- "bitcast<i32>(10u)"},
+ BinaryData{"int", "int_30", "OpSDiv", "uint_10", "i32", "30i", "/", "bitcast<i32>(10u)"},
// Mixed, returning int, first arg uint
- BinaryData{"int", "uint_10", "OpSDiv", "int_30", "i32",
- "bitcast<i32>(10u)", "/",
- "30"}, // Mixed, returning v2int, first arg v2uint
- BinaryData{"v2int", "v2uint_10_20", "OpSDiv", "v2int_30_40",
- "vec2<i32>", AstFor("cast_int_v2uint_10_20"), "/",
- AstFor("v2int_30_40")},
+ BinaryData{"int", "uint_10", "OpSDiv", "int_30", "i32", "bitcast<i32>(10u)", "/",
+ "30i"}, // Mixed, returning v2int, first arg v2uint
+ BinaryData{"v2int", "v2uint_10_20", "OpSDiv", "v2int_30_40", "vec2<i32>",
+ AstFor("cast_int_v2uint_10_20"), "/", AstFor("v2int_30_40")},
// Mixed, returning v2int, second arg v2uint
- BinaryData{"v2int", "v2int_30_40", "OpSDiv", "v2uint_10_20",
- "vec2<i32>", AstFor("v2int_30_40"), "/",
- AstFor("cast_int_v2uint_10_20")}));
+ BinaryData{"v2int", "v2int_30_40", "OpSDiv", "v2uint_10_20", "vec2<i32>",
+ AstFor("v2int_30_40"), "/", AstFor("cast_int_v2uint_10_20")}));
TEST_F(SpvBinaryArithTestBasic, SDiv_Scalar_UnsignedResult) {
- // The WGSL signed division operator expects both operands to be signed
- // and the result is signed as well.
- // In this test SPIR-V demands an unsigned result, so we have to
- // wrap the result with an as-cast.
- const auto assembly = Preamble() + R"(
+ // The WGSL signed division operator expects both operands to be signed
+ // and the result is signed as well.
+ // In this test SPIR-V demands an unsigned result, so we have to
+ // wrap the result with an as-cast.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSDiv %uint %int_30 %int_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>((30 / 40));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>((30i / 40i));"));
}
TEST_F(SpvBinaryArithTestBasic, SDiv_Vector_UnsignedResult) {
- // The WGSL signed division operator expects both operands to be signed
- // and the result is signed as well.
- // In this test SPIR-V demands an unsigned result, so we have to
- // wrap the result with an as-cast.
- const auto assembly = Preamble() + R"(
+ // The WGSL signed division operator expects both operands to be signed
+ // and the result is signed as well.
+ // In this test SPIR-V demands an unsigned result, so we have to
+ // wrap the result with an as-cast.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSDiv %v2uint %v2int_30_40 %v2int_40_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>((vec2<i32>(30, 40) / vec2<i32>(40, 30)));)"));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FDiv,
- SpvBinaryArithTest,
- ::testing::Values(
- // Scalar float
- BinaryData{"float", "float_50", "OpFDiv", "float_60", "f32", "50.0",
- "/", "60.0"}, // Vector float
- BinaryData{"v2float", "v2float_50_60", "OpFDiv", "v2float_60_50",
- "vec2<f32>", AstFor("v2float_50_60"), "/",
- AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_UMod,
- SpvBinaryArithTest,
- ::testing::Values(
- // Both uint
- BinaryData{"uint", "uint_10", "OpUMod", "uint_20", "u32", "10u", "%",
- "20u"}, // Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpUMod", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "%",
- AstFor("v2uint_20_10")}));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(
+ test::ToString(p->program(), ast_body),
+ HasSubstr(
+ R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>((vec2<i32>(30i, 40i) / vec2<i32>(40i, 30i)));)"));
+}
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FDiv,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Scalar float
+ BinaryData{"float", "float_50", "OpFDiv", "float_60", "f32", "50.0f",
+ "/", "60.0f"}, // Vector float
+ BinaryData{"v2float", "v2float_50_60", "OpFDiv", "v2float_60_50",
+ "vec2<f32>", AstFor("v2float_50_60"), "/",
+ AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_UMod,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Both uint
+ BinaryData{"uint", "uint_10", "OpUMod", "uint_20", "u32", "10u", "%",
+ "20u"}, // Both v2uint
+ BinaryData{"v2uint", "v2uint_10_20", "OpUMod", "v2uint_20_10",
+ "vec2<u32>", AstFor("v2uint_10_20"), "%",
+ AstFor("v2uint_20_10")}));
// Currently WGSL is missing a mapping for OpSRem
// https://github.com/gpuweb/gpuweb/issues/702
@@ -661,8 +600,7 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryArithTest,
::testing::Values(
// Both int
- BinaryData{"int", "int_30", "OpSMod", "int_40", "i32", "30", "%",
- "40"}, // Both v2int
+ BinaryData{"int", "int_30", "OpSMod", "int_40", "i32", "30i", "%", "40i"}, // Both v2int
BinaryData{"v2int", "v2int_30_40", "OpSMod", "v2int_40_30", "vec2<i32>",
AstFor("v2int_30_40"), "%", AstFor("v2int_40_30")}));
@@ -671,122 +609,108 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryArithTest,
::testing::Values(
// Mixed, returning int, second arg uint
- BinaryData{"int", "int_30", "OpSMod", "uint_10", "i32", "30", "%",
- "bitcast<i32>(10u)"},
+ BinaryData{"int", "int_30", "OpSMod", "uint_10", "i32", "30i", "%", "bitcast<i32>(10u)"},
// Mixed, returning int, first arg uint
- BinaryData{"int", "uint_10", "OpSMod", "int_30", "i32",
- "bitcast<i32>(10u)", "%",
- "30"}, // Mixed, returning v2int, first arg v2uint
- BinaryData{"v2int", "v2uint_10_20", "OpSMod", "v2int_30_40",
- "vec2<i32>", AstFor("cast_int_v2uint_10_20"), "%",
- AstFor("v2int_30_40")},
+ BinaryData{"int", "uint_10", "OpSMod", "int_30", "i32", "bitcast<i32>(10u)", "%",
+ "30i"}, // Mixed, returning v2int, first arg v2uint
+ BinaryData{"v2int", "v2uint_10_20", "OpSMod", "v2int_30_40", "vec2<i32>",
+ AstFor("cast_int_v2uint_10_20"), "%", AstFor("v2int_30_40")},
// Mixed, returning v2int, second arg v2uint
- BinaryData{"v2int", "v2int_30_40", "OpSMod", "v2uint_10_20",
- "vec2<i32>", AstFor("v2int_30_40"), "%",
- AstFor("cast_int_v2uint_10_20")}));
+ BinaryData{"v2int", "v2int_30_40", "OpSMod", "v2uint_10_20", "vec2<i32>",
+ AstFor("v2int_30_40"), "%", AstFor("cast_int_v2uint_10_20")}));
TEST_F(SpvBinaryArithTestBasic, SMod_Scalar_UnsignedResult) {
- // The WGSL signed modulus operator expects both operands to be signed
- // and the result is signed as well.
- // In this test SPIR-V demands an unsigned result, so we have to
- // wrap the result with an as-cast.
- const auto assembly = Preamble() + R"(
+ // The WGSL signed modulus operator expects both operands to be signed
+ // and the result is signed as well.
+ // In this test SPIR-V demands an unsigned result, so we have to
+ // wrap the result with an as-cast.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSMod %uint %int_30 %int_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>((30 % 40));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>((30i % 40i));"));
}
TEST_F(SpvBinaryArithTestBasic, SMod_Vector_UnsignedResult) {
- // The WGSL signed modulus operator expects both operands to be signed
- // and the result is signed as well.
- // In this test SPIR-V demands an unsigned result, so we have to
- // wrap the result with an as-cast.
- const auto assembly = Preamble() + R"(
+ // The WGSL signed modulus operator expects both operands to be signed
+ // and the result is signed as well.
+ // In this test SPIR-V demands an unsigned result, so we have to
+ // wrap the result with an as-cast.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSMod %v2uint %v2int_30_40 %v2int_40_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>((vec2<i32>(30, 40) % vec2<i32>(40, 30)));)"));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FRem,
- SpvBinaryArithTest,
- ::testing::Values(
- // Scalar float
- BinaryData{"float", "float_50", "OpFRem", "float_60", "f32", "50.0",
- "%", "60.0"}, // Vector float
- BinaryData{"v2float", "v2float_50_60", "OpFRem", "v2float_60_50",
- "vec2<f32>", AstFor("v2float_50_60"), "%",
- AstFor("v2float_60_50")}));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(
+ test::ToString(p->program(), ast_body),
+ HasSubstr(
+ R"(let x_1 : vec2<u32> = bitcast<vec2<u32>>((vec2<i32>(30i, 40i) % vec2<i32>(40i, 30i)));)"));
+}
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FRem,
+ SpvBinaryArithTest,
+ ::testing::Values(
+ // Scalar float
+ BinaryData{"float", "float_50", "OpFRem", "float_60", "f32", "50.0f",
+ "%", "60.0f"}, // Vector float
+ BinaryData{"v2float", "v2float_50_60", "OpFRem", "v2float_60_50",
+ "vec2<f32>", AstFor("v2float_50_60"), "%",
+ AstFor("v2float_60_50")}));
TEST_F(SpvBinaryArithTestBasic, FMod_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFMod %float %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = (50.0 - (60.0 * floor((50.0 / 60.0))));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : f32 = (50.0f - (60.0f * floor((50.0f / 60.0f))));"));
}
TEST_F(SpvBinaryArithTestBasic, FMod_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFMod %v2float %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- R"(let x_1 : vec2<f32> = (vec2<f32>(50.0, 60.0) - (vec2<f32>(60.0, 50.0) * floor((vec2<f32>(50.0, 60.0) / vec2<f32>(60.0, 50.0)))));)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(
+ test::ToString(p->program(), ast_body),
+ HasSubstr(
+ R"(let x_1 : vec2<f32> = (vec2<f32>(50.0f, 60.0f) - (vec2<f32>(60.0f, 50.0f) * floor((vec2<f32>(50.0f, 60.0f) / vec2<f32>(60.0f, 50.0f)))));)"));
}
TEST_F(SpvBinaryArithTestBasic, VectorTimesScalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2float %v2float_50_60
@@ -795,18 +719,17 @@ TEST_F(SpvBinaryArithTestBasic, VectorTimesScalar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, MatrixTimesScalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v2float %m2v2float_a
@@ -815,18 +738,17 @@ TEST_F(SpvBinaryArithTestBasic, MatrixTimesScalar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : mat2x2<f32> = (x_1 * x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : mat2x2<f32> = (x_1 * x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, VectorTimesMatrix) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v2float %m2v2float_a
@@ -835,18 +757,17 @@ TEST_F(SpvBinaryArithTestBasic, VectorTimesMatrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, MatrixTimesVector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v2float %m2v2float_a
@@ -855,18 +776,17 @@ TEST_F(SpvBinaryArithTestBasic, MatrixTimesVector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec2<f32> = (x_1 * x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, MatrixTimesMatrix) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v2float %m2v2float_a
@@ -875,18 +795,17 @@ TEST_F(SpvBinaryArithTestBasic, MatrixTimesMatrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : mat2x2<f32> = (x_1 * x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : mat2x2<f32> = (x_1 * x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, Dot) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2float %v2float_50_60
@@ -895,20 +814,19 @@ TEST_F(SpvBinaryArithTestBasic, Dot) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_3 : f32 = dot(x_1, x_2);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_3 : f32 = dot(x_1, x_2);"));
}
TEST_F(SpvBinaryArithTestBasic, OuterProduct) {
- // OpOuterProduct is expanded to basic operations.
- // The operands, even if used once, are given their own const definitions.
- const auto assembly = Preamble() + R"(
+ // OpOuterProduct is expanded to basic operations.
+ // The operands, even if used once, are given their own const definitions.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFAdd %v3float %v3float_50_60_70 %v3float_50_60_70 ; column vector
@@ -917,91 +835,84 @@ TEST_F(SpvBinaryArithTestBasic, OuterProduct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- got,
- HasSubstr(
- "let x_3 : mat2x3<f32> = mat2x3<f32>("
- "vec3<f32>((x_2.x * x_1.x), (x_2.x * x_1.y), (x_2.x * x_1.z)), "
- "vec3<f32>((x_2.y * x_1.x), (x_2.y * x_1.y), (x_2.y * x_1.z)));"))
- << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr("let x_3 : mat2x3<f32> = mat2x3<f32>("
+ "vec3<f32>((x_2.x * x_1.x), (x_2.x * x_1.y), (x_2.x * x_1.z)), "
+ "vec3<f32>((x_2.y * x_1.x), (x_2.y * x_1.y), (x_2.y * x_1.z)));"))
+ << got;
}
struct BuiltinData {
- const std::string spirv;
- const std::string wgsl;
+ const std::string spirv;
+ const std::string wgsl;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << "OpData{" << data.spirv << "," << data.wgsl << "}";
- return out;
+ out << "OpData{" << data.spirv << "," << data.wgsl << "}";
+ return out;
}
struct ArgAndTypeData {
- const std::string spirv_type;
- const std::string spirv_arg;
- const std::string ast_type;
+ const std::string spirv_type;
+ const std::string spirv_arg;
+ const std::string ast_type;
};
inline std::ostream& operator<<(std::ostream& out, ArgAndTypeData data) {
- out << "ArgAndTypeData{" << data.spirv_type << "," << data.spirv_arg << ","
- << data.ast_type << "}";
- return out;
+ out << "ArgAndTypeData{" << data.spirv_type << "," << data.spirv_arg << "," << data.ast_type
+ << "}";
+ return out;
}
-using SpvBinaryDerivativeTest = SpvParserTestBase<
- ::testing::TestWithParam<std::tuple<BuiltinData, ArgAndTypeData>>>;
+using SpvBinaryDerivativeTest =
+ SpvParserTestBase<::testing::TestWithParam<std::tuple<BuiltinData, ArgAndTypeData>>>;
TEST_P(SpvBinaryDerivativeTest, Derivatives) {
- auto& builtin = std::get<0>(GetParam());
- auto& arg = std::get<1>(GetParam());
+ auto& builtin = std::get<0>(GetParam());
+ auto& arg = std::get<1>(GetParam());
- const auto assembly = R"(
+ const auto assembly = R"(
OpCapability DerivativeControl
)" + Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %)" +
- arg.spirv_type + " %" + arg.spirv_arg + R"(
+ arg.spirv_type + " %" + arg.spirv_arg + R"(
%2 = )" + builtin.spirv +
- " %" + arg.spirv_type + R"( %1
+ " %" + arg.spirv_type + R"( %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : " + arg.ast_type + " = " + builtin.wgsl + "(x_1);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_2 : " + arg.ast_type + " = " + builtin.wgsl + "(x_1);"));
}
INSTANTIATE_TEST_SUITE_P(
SpvBinaryDerivativeTest,
SpvBinaryDerivativeTest,
- testing::Combine(
- ::testing::Values(BuiltinData{"OpDPdx", "dpdx"},
- BuiltinData{"OpDPdy", "dpdy"},
- BuiltinData{"OpFwidth", "fwidth"},
- BuiltinData{"OpDPdxFine", "dpdxFine"},
- BuiltinData{"OpDPdyFine", "dpdyFine"},
- BuiltinData{"OpFwidthFine", "fwidthFine"},
- BuiltinData{"OpDPdxCoarse", "dpdxCoarse"},
- BuiltinData{"OpDPdyCoarse", "dpdyCoarse"},
- BuiltinData{"OpFwidthCoarse", "fwidthCoarse"}),
- ::testing::Values(
- ArgAndTypeData{"float", "float_50", "f32"},
- ArgAndTypeData{"v2float", "v2float_50_60", "vec2<f32>"},
- ArgAndTypeData{"v3float", "v3float_50_60_70", "vec3<f32>"})));
+ testing::Combine(::testing::Values(BuiltinData{"OpDPdx", "dpdx"},
+ BuiltinData{"OpDPdy", "dpdy"},
+ BuiltinData{"OpFwidth", "fwidth"},
+ BuiltinData{"OpDPdxFine", "dpdxFine"},
+ BuiltinData{"OpDPdyFine", "dpdyFine"},
+ BuiltinData{"OpFwidthFine", "fwidthFine"},
+ BuiltinData{"OpDPdxCoarse", "dpdxCoarse"},
+ BuiltinData{"OpDPdyCoarse", "dpdyCoarse"},
+ BuiltinData{"OpFwidthCoarse", "fwidthCoarse"}),
+ ::testing::Values(ArgAndTypeData{"float", "float_50", "f32"},
+ ArgAndTypeData{"v2float", "v2float_50_60", "vec2<f32>"},
+ ArgAndTypeData{"v3float", "v3float_50_60_70",
+ "vec3<f32>"})));
TEST_F(SpvUnaryArithTest, Transpose_2x2) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v2float %m2v2float_a
@@ -1009,20 +920,18 @@ TEST_F(SpvUnaryArithTest, Transpose_2x2) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- const auto* expected = "let x_2 : mat2x2<f32> = transpose(x_1);";
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ const auto* expected = "let x_2 : mat2x2<f32> = transpose(x_1);";
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvUnaryArithTest, Transpose_2x3) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m2v3float %m2v3float_a
@@ -1030,23 +939,21 @@ TEST_F(SpvUnaryArithTest, Transpose_2x3) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- // Note, in the AST dump mat_2_3 means 2 rows and 3 columns.
- // So the column vectors have 2 elements.
- // That is, %m3v2float is __mat_2_3f32.
- const auto* expected = "let x_2 : mat3x2<f32> = transpose(x_1);";
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ // Note, in the AST dump mat_2_3 means 2 rows and 3 columns.
+ // So the column vectors have 2 elements.
+ // That is, %m3v2float is __mat_2_3f32.
+ const auto* expected = "let x_2 : mat3x2<f32> = transpose(x_1);";
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvUnaryArithTest, Transpose_3x2) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %m3v2float %m3v2float_a
@@ -1054,16 +961,14 @@ TEST_F(SpvUnaryArithTest, Transpose_3x2) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- const auto* expected = "let x_2 : mat2x3<f32> = transpose(x_1);";
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ const auto* expected = "let x_2 : mat2x3<f32> = transpose(x_1);";
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
// TODO(dneto): OpSRem. Missing from WGSL
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_bit_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_bit_test.cc
index ba0e67ea3f6..a8e97b31b40 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_bit_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_bit_test.cc
@@ -22,7 +22,7 @@ namespace {
using ::testing::HasSubstr;
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -55,7 +55,7 @@ std::string CommonTypes() {
}
std::string SimplePreamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -65,118 +65,109 @@ std::string SimplePreamble() {
// Returns the AST dump for a given SPIR-V assembly constant.
std::string AstFor(std::string assembly) {
- if (assembly == "v2uint_10_20") {
- return "vec2<u32>(10u, 20u)";
- }
- if (assembly == "v2uint_20_10") {
- return "vec2<u32>(20u, 10u)";
- }
- if (assembly == "v2int_30_40") {
- return "vec2<i32>(30, 40)";
- }
- if (assembly == "v2int_40_30") {
- return "vec2<i32>(40, 30)";
- }
- if (assembly == "cast_int_v2uint_10_20") {
- return "bitcast<vec2<i32>(vec2<u32>(10u, 20u))";
- }
- if (assembly == "v2float_50_60") {
- return "vec2<f32>(50.0, 60.0))";
- }
- if (assembly == "v2float_60_50") {
- return "vec2<f32>(60.0, 50.0))";
- }
- return "bad case";
+ if (assembly == "v2uint_10_20") {
+ return "vec2<u32>(10u, 20u)";
+ }
+ if (assembly == "v2uint_20_10") {
+ return "vec2<u32>(20u, 10u)";
+ }
+ if (assembly == "v2int_30_40") {
+ return "vec2<i32>(30i, 40i)";
+ }
+ if (assembly == "v2int_40_30") {
+ return "vec2<i32>(40i, 30i)";
+ }
+ if (assembly == "cast_int_v2uint_10_20") {
+ return "bitcast<vec2<i32>(vec2<u32>(10u, 20u))";
+ }
+ if (assembly == "v2float_50_60") {
+ return "vec2<f32>(50.0, 60.0))";
+ }
+ if (assembly == "v2float_60_50") {
+ return "vec2<f32>(60.0, 50.0))";
+ }
+ return "bad case";
}
using SpvUnaryBitTest = SpvParserTestBase<::testing::Test>;
struct BinaryData {
- const std::string res_type;
- const std::string lhs;
- const std::string op;
- const std::string rhs;
- const std::string ast_type;
- const std::string ast_lhs;
- const std::string ast_op;
- const std::string ast_rhs;
+ const std::string res_type;
+ const std::string lhs;
+ const std::string op;
+ const std::string rhs;
+ const std::string ast_type;
+ const std::string ast_lhs;
+ const std::string ast_op;
+ const std::string ast_rhs;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op
- << "," << data.rhs << "," << data.ast_type << "," << data.ast_lhs << ","
- << data.ast_op << "," << data.ast_rhs << "}";
- return out;
+ out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op << "," << data.rhs
+ << "," << data.ast_type << "," << data.ast_lhs << "," << data.ast_op << "," << data.ast_rhs
+ << "}";
+ return out;
}
-using SpvBinaryBitTest =
- SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
+using SpvBinaryBitTest = SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
using SpvBinaryBitTestBasic = SpvParserTestBase<::testing::Test>;
TEST_P(SpvBinaryBitTest, EmitExpression) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = )" + GetParam().op +
- " %" + GetParam().res_type + " %" + GetParam().lhs +
- " %" + GetParam().rhs + R"(
+ " %" + GetParam().res_type + " %" + GetParam().lhs + " %" +
+ GetParam().rhs + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- std::ostringstream ss;
- ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs
- << " " << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(ss.str()))
- << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ std::ostringstream ss;
+ ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs << " "
+ << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(ss.str())) << assembly;
}
// Use this when the result might have extra bitcasts on the outside.
struct BinaryDataGeneral {
- const std::string res_type;
- const std::string lhs;
- const std::string op;
- const std::string rhs;
- const std::string wgsl_type;
- const std::string expected;
+ const std::string res_type;
+ const std::string lhs;
+ const std::string op;
+ const std::string rhs;
+ const std::string wgsl_type;
+ const std::string expected;
};
inline std::ostream& operator<<(std::ostream& out, BinaryDataGeneral data) {
- out << "BinaryDataGeneral{" << data.res_type << "," << data.lhs << ","
- << data.op << "," << data.rhs << "," << data.wgsl_type << ","
- << data.expected << "}";
- return out;
+ out << "BinaryDataGeneral{" << data.res_type << "," << data.lhs << "," << data.op << ","
+ << data.rhs << "," << data.wgsl_type << "," << data.expected << "}";
+ return out;
}
-using SpvBinaryBitGeneralTest =
- SpvParserTestBase<::testing::TestWithParam<BinaryDataGeneral>>;
+using SpvBinaryBitGeneralTest = SpvParserTestBase<::testing::TestWithParam<BinaryDataGeneral>>;
TEST_P(SpvBinaryBitGeneralTest, EmitExpression) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = )" + GetParam().op +
- " %" + GetParam().res_type + " %" + GetParam().lhs +
- " %" + GetParam().rhs + R"(
+ " %" + GetParam().res_type + " %" + GetParam().lhs + " %" +
+ GetParam().rhs + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error() << assembly;
- std::ostringstream ss;
- ss << "let x_1 : " << GetParam().wgsl_type << " = " << GetParam().expected
- << ";\nreturn;\n";
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error() << assembly;
+ std::ostringstream ss;
+ ss << "let x_1 : " << GetParam().wgsl_type << " = " << GetParam().expected << ";\nreturn;\n";
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(ss.str())) << "got:\n" << got << assembly;
}
INSTANTIATE_TEST_SUITE_P(
@@ -184,19 +175,15 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryBitTest,
::testing::Values(
// uint uint -> uint
- BinaryData{"uint", "uint_10", "OpShiftLeftLogical", "uint_20", "u32",
- "10u", "<<", "20u"},
+ BinaryData{"uint", "uint_10", "OpShiftLeftLogical", "uint_20", "u32", "10u", "<<", "20u"},
// int, uint -> int
- BinaryData{"int", "int_30", "OpShiftLeftLogical", "uint_20", "i32",
- "30", "<<", "20u"},
+ BinaryData{"int", "int_30", "OpShiftLeftLogical", "uint_20", "i32", "30i", "<<", "20u"},
// v2uint v2uint -> v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpShiftLeftLogical",
- "v2uint_20_10", "vec2<u32>", AstFor("v2uint_10_20"), "<<",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2uint", "v2uint_10_20", "OpShiftLeftLogical", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "<<", AstFor("v2uint_20_10")},
// v2int, v2uint -> v2int
- BinaryData{"v2int", "v2int_30_40", "OpShiftLeftLogical", "v2uint_20_10",
- "vec2<i32>", AstFor("v2int_30_40"), "<<",
- AstFor("v2uint_20_10")}));
+ BinaryData{"v2int", "v2int_30_40", "OpShiftLeftLogical", "v2uint_20_10", "vec2<i32>",
+ AstFor("v2int_30_40"), "<<", AstFor("v2uint_20_10")}));
INSTANTIATE_TEST_SUITE_P(
// WGSL requires second operand to be unsigned, so insert bitcasts
@@ -204,164 +191,143 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryBitGeneralTest,
::testing::Values(
// int, int -> int
- BinaryDataGeneral{"int", "int_30", "OpShiftLeftLogical", "int_40",
- "i32", "(30 << bitcast<u32>(40))"},
+ BinaryDataGeneral{"int", "int_30", "OpShiftLeftLogical", "int_40", "i32",
+ "(30i << bitcast<u32>(40i))"},
// uint, int -> uint
- BinaryDataGeneral{"uint", "uint_10", "OpShiftLeftLogical", "int_40",
- "u32", "(10u << bitcast<u32>(40))"},
+ BinaryDataGeneral{"uint", "uint_10", "OpShiftLeftLogical", "int_40", "u32",
+ "(10u << bitcast<u32>(40i))"},
// v2uint, v2int -> v2uint
- BinaryDataGeneral{"v2uint", "v2uint_10_20", "OpShiftLeftLogical",
- "v2uint_20_10", "vec2<u32>",
- "(vec2<u32>(10u, 20u) << vec2<u32>(20u, 10u))"},
+ BinaryDataGeneral{"v2uint", "v2uint_10_20", "OpShiftLeftLogical", "v2uint_20_10",
+ "vec2<u32>", "(vec2<u32>(10u, 20u) << vec2<u32>(20u, 10u))"},
// v2int, v2int -> v2int
- BinaryDataGeneral{
- "v2int", "v2int_30_40", "OpShiftLeftLogical", "v2int_40_30",
- "vec2<i32>",
- "(vec2<i32>(30, 40) << bitcast<vec2<u32>>(vec2<i32>(40, 30)))"}));
+ BinaryDataGeneral{"v2int", "v2int_30_40", "OpShiftLeftLogical", "v2int_40_30", "vec2<i32>",
+ "(vec2<i32>(30i, 40i) << bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftLeftLogical_BitcastResult,
SpvBinaryBitGeneralTest,
::testing::Values(
// int, int -> uint
- BinaryDataGeneral{"uint", "int_30", "OpShiftLeftLogical", "uint_10",
- "u32", "bitcast<u32>((30 << 10u))"},
+ BinaryDataGeneral{"uint", "int_30", "OpShiftLeftLogical", "uint_10", "u32",
+ "bitcast<u32>((30i << 10u))"},
// v2uint, v2int -> v2uint
- BinaryDataGeneral{
- "v2uint", "v2int_30_40", "OpShiftLeftLogical", "v2uint_20_10",
- "vec2<u32>",
- "bitcast<vec2<u32>>((vec2<i32>(30, 40) << vec2<u32>(20u, 10u)))"}));
+ BinaryDataGeneral{"v2uint", "v2int_30_40", "OpShiftLeftLogical", "v2uint_20_10",
+ "vec2<u32>",
+ "bitcast<vec2<u32>>((vec2<i32>(30i, 40i) << vec2<u32>(20u, 10u)))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightLogical_Arg2Unsigned,
SpvBinaryBitGeneralTest,
::testing::Values(
// uint, uint -> uint
- BinaryDataGeneral{"uint", "uint_10", "OpShiftRightLogical", "uint_20",
- "u32", "(10u >> 20u)"},
+ BinaryDataGeneral{"uint", "uint_10", "OpShiftRightLogical", "uint_20", "u32",
+ "(10u >> 20u)"},
// int, uint -> int
- BinaryDataGeneral{"int", "int_30", "OpShiftRightLogical", "uint_20",
- "i32", "bitcast<i32>((bitcast<u32>(30) >> 20u))"},
+ BinaryDataGeneral{"int", "int_30", "OpShiftRightLogical", "uint_20", "i32",
+ "bitcast<i32>((bitcast<u32>(30i) >> 20u))"},
// v2uint, v2uint -> v2uint
- BinaryDataGeneral{"v2uint", "v2uint_10_20", "OpShiftRightLogical",
- "v2uint_20_10", "vec2<u32>",
- "(vec2<u32>(10u, 20u) >> vec2<u32>(20u, 10u))"},
+ BinaryDataGeneral{"v2uint", "v2uint_10_20", "OpShiftRightLogical", "v2uint_20_10",
+ "vec2<u32>", "(vec2<u32>(10u, 20u) >> vec2<u32>(20u, 10u))"},
// v2int, v2uint -> v2int
BinaryDataGeneral{
- "v2int", "v2int_30_40", "OpShiftRightLogical", "v2uint_10_20",
- "vec2<i32>",
- R"(bitcast<vec2<i32>>((bitcast<vec2<u32>>(vec2<i32>(30, 40)) >> vec2<u32>(10u, 20u))))"}));
+ "v2int", "v2int_30_40", "OpShiftRightLogical", "v2uint_10_20", "vec2<i32>",
+ R"(bitcast<vec2<i32>>((bitcast<vec2<u32>>(vec2<i32>(30i, 40i)) >> vec2<u32>(10u, 20u))))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightLogical_Arg2Signed,
SpvBinaryBitGeneralTest,
::testing::Values(
// uint, int -> uint
- BinaryDataGeneral{"uint", "uint_10", "OpShiftRightLogical", "int_30",
- "u32", "(10u >> bitcast<u32>(30))"},
+ BinaryDataGeneral{"uint", "uint_10", "OpShiftRightLogical", "int_30", "u32",
+ "(10u >> bitcast<u32>(30i))"},
// int, int -> int
- BinaryDataGeneral{
- "int", "int_30", "OpShiftRightLogical", "int_40", "i32",
- "bitcast<i32>((bitcast<u32>(30) >> bitcast<u32>(40)))"},
+ BinaryDataGeneral{"int", "int_30", "OpShiftRightLogical", "int_40", "i32",
+ "bitcast<i32>((bitcast<u32>(30i) >> bitcast<u32>(40i)))"},
// v2uint, v2int -> v2uint
- BinaryDataGeneral{
- "v2uint", "v2uint_10_20", "OpShiftRightLogical", "v2int_30_40",
- "vec2<u32>",
- "(vec2<u32>(10u, 20u) >> bitcast<vec2<u32>>(vec2<i32>(30, 40)))"},
+ BinaryDataGeneral{"v2uint", "v2uint_10_20", "OpShiftRightLogical", "v2int_30_40",
+ "vec2<u32>",
+ "(vec2<u32>(10u, 20u) >> bitcast<vec2<u32>>(vec2<i32>(30i, 40i)))"},
// v2int, v2int -> v2int
BinaryDataGeneral{
- "v2int", "v2int_40_30", "OpShiftRightLogical", "v2int_30_40",
- "vec2<i32>",
- R"(bitcast<vec2<i32>>((bitcast<vec2<u32>>(vec2<i32>(40, 30)) >> bitcast<vec2<u32>>(vec2<i32>(30, 40)))))"}));
+ "v2int", "v2int_40_30", "OpShiftRightLogical", "v2int_30_40", "vec2<i32>",
+ R"(bitcast<vec2<i32>>((bitcast<vec2<u32>>(vec2<i32>(40i, 30i)) >> bitcast<vec2<u32>>(vec2<i32>(30i, 40i)))))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightLogical_BitcastResult,
SpvBinaryBitGeneralTest,
::testing::Values(
// uint, uint -> int
- BinaryDataGeneral{"int", "uint_20", "OpShiftRightLogical", "uint_10",
- "i32", "bitcast<i32>((20u >> 10u))"},
+ BinaryDataGeneral{"int", "uint_20", "OpShiftRightLogical", "uint_10", "i32",
+ "bitcast<i32>((20u >> 10u))"},
// v2uint, v2uint -> v2int
- BinaryDataGeneral{
- "v2int", "v2uint_10_20", "OpShiftRightLogical", "v2uint_20_10",
- "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) >> vec2<u32>(20u, 10u))))"}));
+ BinaryDataGeneral{"v2int", "v2uint_10_20", "OpShiftRightLogical", "v2uint_20_10",
+ "vec2<i32>",
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) >> vec2<u32>(20u, 10u))))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightArithmetic_Arg2Unsigned,
SpvBinaryBitGeneralTest,
::testing::Values(
// uint, uint -> uint
- BinaryDataGeneral{"uint", "uint_10", "OpShiftRightArithmetic",
- "uint_20", "u32",
+ BinaryDataGeneral{"uint", "uint_10", "OpShiftRightArithmetic", "uint_20", "u32",
"bitcast<u32>((bitcast<i32>(10u) >> 20u))"},
// int, uint -> int
- BinaryDataGeneral{"int", "int_30", "OpShiftRightArithmetic", "uint_10",
- "i32", "(30 >> 10u)"},
+ BinaryDataGeneral{"int", "int_30", "OpShiftRightArithmetic", "uint_10", "i32",
+ "(30i >> 10u)"},
// v2uint, v2uint -> v2uint
BinaryDataGeneral{
- "v2uint", "v2uint_10_20", "OpShiftRightArithmetic", "v2uint_20_10",
- "vec2<u32>",
+ "v2uint", "v2uint_10_20", "OpShiftRightArithmetic", "v2uint_20_10", "vec2<u32>",
R"(bitcast<vec2<u32>>((bitcast<vec2<i32>>(vec2<u32>(10u, 20u)) >> vec2<u32>(20u, 10u))))"},
// v2int, v2uint -> v2int
- BinaryDataGeneral{"v2int", "v2int_40_30", "OpShiftRightArithmetic",
- "v2uint_20_10", "vec2<i32>",
- "(vec2<i32>(40, 30) >> vec2<u32>(20u, 10u))"}));
+ BinaryDataGeneral{"v2int", "v2int_40_30", "OpShiftRightArithmetic", "v2uint_20_10",
+ "vec2<i32>", "(vec2<i32>(40i, 30i) >> vec2<u32>(20u, 10u))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightArithmetic_Arg2Signed,
SpvBinaryBitGeneralTest,
::testing::Values(
// uint, int -> uint
- BinaryDataGeneral{
- "uint", "uint_10", "OpShiftRightArithmetic", "int_30", "u32",
- "bitcast<u32>((bitcast<i32>(10u) >> bitcast<u32>(30)))"},
+ BinaryDataGeneral{"uint", "uint_10", "OpShiftRightArithmetic", "int_30", "u32",
+ "bitcast<u32>((bitcast<i32>(10u) >> bitcast<u32>(30i)))"},
// int, int -> int
- BinaryDataGeneral{"int", "int_30", "OpShiftRightArithmetic", "int_40",
- "i32", "(30 >> bitcast<u32>(40))"},
+ BinaryDataGeneral{"int", "int_30", "OpShiftRightArithmetic", "int_40", "i32",
+ "(30i >> bitcast<u32>(40i))"},
// v2uint, v2int -> v2uint
BinaryDataGeneral{
- "v2uint", "v2uint_10_20", "OpShiftRightArithmetic", "v2int_30_40",
- "vec2<u32>",
- R"(bitcast<vec2<u32>>((bitcast<vec2<i32>>(vec2<u32>(10u, 20u)) >> bitcast<vec2<u32>>(vec2<i32>(30, 40)))))"},
+ "v2uint", "v2uint_10_20", "OpShiftRightArithmetic", "v2int_30_40", "vec2<u32>",
+ R"(bitcast<vec2<u32>>((bitcast<vec2<i32>>(vec2<u32>(10u, 20u)) >> bitcast<vec2<u32>>(vec2<i32>(30i, 40i)))))"},
// v2int, v2int -> v2int
- BinaryDataGeneral{
- "v2int", "v2int_40_30", "OpShiftRightArithmetic", "v2int_30_40",
- "vec2<i32>",
- "(vec2<i32>(40, 30) >> bitcast<vec2<u32>>(vec2<i32>(30, 40)))"}));
+ BinaryDataGeneral{"v2int", "v2int_40_30", "OpShiftRightArithmetic", "v2int_30_40",
+ "vec2<i32>",
+ "(vec2<i32>(40i, 30i) >> bitcast<vec2<u32>>(vec2<i32>(30i, 40i)))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ShiftRightArithmetic_BitcastResult,
SpvBinaryBitGeneralTest,
::testing::Values(
// int, uint -> uint
- BinaryDataGeneral{"uint", "int_30", "OpShiftRightArithmetic", "uint_10",
- "u32", "bitcast<u32>((30 >> 10u))"},
+ BinaryDataGeneral{"uint", "int_30", "OpShiftRightArithmetic", "uint_10", "u32",
+ "bitcast<u32>((30i >> 10u))"},
// v2int, v2uint -> v2uint
- BinaryDataGeneral{
- "v2uint", "v2int_30_40", "OpShiftRightArithmetic", "v2uint_20_10",
- "vec2<u32>",
- "bitcast<vec2<u32>>((vec2<i32>(30, 40) >> vec2<u32>(20u, 10u)))"}));
+ BinaryDataGeneral{"v2uint", "v2int_30_40", "OpShiftRightArithmetic", "v2uint_20_10",
+ "vec2<u32>",
+ "bitcast<vec2<u32>>((vec2<i32>(30i, 40i) >> vec2<u32>(20u, 10u)))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseAnd,
SpvBinaryBitTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpBitwiseAnd", "uint_20", "u32", "10u",
- "&", "20u"},
+ BinaryData{"uint", "uint_10", "OpBitwiseAnd", "uint_20", "u32", "10u", "&", "20u"},
// Both int
- BinaryData{"int", "int_30", "OpBitwiseAnd", "int_40", "i32", "30", "&",
- "40"},
+ BinaryData{"int", "int_30", "OpBitwiseAnd", "int_40", "i32", "30i", "&", "40i"},
// TODO(crbug.com/tint/678): Resolver fails on vector bitwise operations
// Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseAnd", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "&",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseAnd", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "&", AstFor("v2uint_20_10")},
// Both v2int
- BinaryData{"v2int", "v2int_30_40", "OpBitwiseAnd", "v2int_40_30",
- "vec2<i32>", AstFor("v2int_30_40"), "&",
- AstFor("v2int_40_30")}));
+ BinaryData{"v2int", "v2int_30_40", "OpBitwiseAnd", "v2int_40_30", "vec2<i32>",
+ AstFor("v2int_30_40"), "&", AstFor("v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseAnd_MixedSignedness,
@@ -369,45 +335,40 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpBitwiseAnd", "uint_10", "u32",
- "bitcast<u32>((30 & bitcast<i32>(10u)))"},
+ "bitcast<u32>((30i & bitcast<i32>(10u)))"},
// Mixed, int <- int uint
BinaryDataGeneral{"int", "int_30", "OpBitwiseAnd", "uint_10", "i32",
- "(30 & bitcast<i32>(10u))"},
+ "(30i & bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpBitwiseAnd", "int_30", "u32",
- "(10u & bitcast<u32>(30))"},
+ "(10u & bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpBitwiseAnd", "uint_10", "i32",
"bitcast<i32>((20u & 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
- "v2uint", "v2int_30_40", "OpBitwiseAnd", "v2uint_10_20",
- "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) & bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ "v2uint", "v2int_30_40", "OpBitwiseAnd", "v2uint_10_20", "vec2<u32>",
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) & bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpBitwiseAnd", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) & bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) & bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseOr,
SpvBinaryBitTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpBitwiseOr", "uint_20", "u32", "10u",
- "|", "20u"},
+ BinaryData{"uint", "uint_10", "OpBitwiseOr", "uint_20", "u32", "10u", "|", "20u"},
// Both int
- BinaryData{"int", "int_30", "OpBitwiseOr", "int_40", "i32", "30", "|",
- "40"},
+ BinaryData{"int", "int_30", "OpBitwiseOr", "int_40", "i32", "30i", "|", "40i"},
// TODO(crbug.com/tint/678): Resolver fails on vector bitwise operations
// Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseOr", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "|",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseOr", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "|", AstFor("v2uint_20_10")},
// Both v2int
- BinaryData{"v2int", "v2int_30_40", "OpBitwiseOr", "v2int_40_30",
- "vec2<i32>", AstFor("v2int_30_40"), "|",
- AstFor("v2int_40_30")}));
+ BinaryData{"v2int", "v2int_30_40", "OpBitwiseOr", "v2int_40_30", "vec2<i32>",
+ AstFor("v2int_30_40"), "|", AstFor("v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseOr_MixedSignedness,
@@ -415,44 +376,40 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpBitwiseOr", "uint_10", "u32",
- "bitcast<u32>((30 | bitcast<i32>(10u)))"},
+ "bitcast<u32>((30i | bitcast<i32>(10u)))"},
// Mixed, int <- int uint
BinaryDataGeneral{"int", "int_30", "OpBitwiseOr", "uint_10", "i32",
- "(30 | bitcast<i32>(10u))"},
+ "(30i | bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpBitwiseOr", "int_30", "u32",
- "(10u | bitcast<u32>(30))"},
+ "(10u | bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpBitwiseOr", "uint_10", "i32",
"bitcast<i32>((20u | 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
"v2uint", "v2int_30_40", "OpBitwiseOr", "v2uint_10_20", "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) | bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) | bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpBitwiseOr", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) | bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) | bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseXor,
SpvBinaryBitTest,
::testing::Values(
// Both uint
- BinaryData{"uint", "uint_10", "OpBitwiseXor", "uint_20", "u32", "10u",
- "^", "20u"},
+ BinaryData{"uint", "uint_10", "OpBitwiseXor", "uint_20", "u32", "10u", "^", "20u"},
// Both int
- BinaryData{"int", "int_30", "OpBitwiseXor", "int_40", "i32", "30", "^",
- "40"},
+ BinaryData{"int", "int_30", "OpBitwiseXor", "int_40", "i32", "30i", "^", "40i"},
// TODO(crbug.com/tint/678): Resolver fails on vector bitwise operations
// Both v2uint
- BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseXor", "v2uint_20_10",
- "vec2<u32>", AstFor("v2uint_10_20"), "^",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2uint", "v2uint_10_20", "OpBitwiseXor", "v2uint_20_10", "vec2<u32>",
+ AstFor("v2uint_10_20"), "^", AstFor("v2uint_20_10")},
// Both v2int
- BinaryData{"v2int", "v2int_30_40", "OpBitwiseXor", "v2int_40_30",
- "vec2<i32>", AstFor("v2int_30_40"), "^",
- AstFor("v2int_40_30")}));
+ BinaryData{"v2int", "v2int_30_40", "OpBitwiseXor", "v2int_40_30", "vec2<i32>",
+ AstFor("v2int_30_40"), "^", AstFor("v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_BitwiseXor_MixedSignedness,
@@ -460,169 +417,164 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// Mixed, uint <- int uint
BinaryDataGeneral{"uint", "int_30", "OpBitwiseXor", "uint_10", "u32",
- "bitcast<u32>((30 ^ bitcast<i32>(10u)))"},
+ "bitcast<u32>((30i ^ bitcast<i32>(10u)))"},
// Mixed, int <- int uint
BinaryDataGeneral{"int", "int_30", "OpBitwiseXor", "uint_10", "i32",
- "(30 ^ bitcast<i32>(10u))"},
+ "(30i ^ bitcast<i32>(10u))"},
// Mixed, uint <- uint int
BinaryDataGeneral{"uint", "uint_10", "OpBitwiseXor", "int_30", "u32",
- "(10u ^ bitcast<u32>(30))"},
+ "(10u ^ bitcast<u32>(30i))"},
// Mixed, int <- uint uint
BinaryDataGeneral{"int", "uint_20", "OpBitwiseXor", "uint_10", "i32",
"bitcast<i32>((20u ^ 10u))"},
// Mixed, returning v2uint
BinaryDataGeneral{
- "v2uint", "v2int_30_40", "OpBitwiseXor", "v2uint_10_20",
- "vec2<u32>",
- R"(bitcast<vec2<u32>>((vec2<i32>(30, 40) ^ bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
+ "v2uint", "v2int_30_40", "OpBitwiseXor", "v2uint_10_20", "vec2<u32>",
+ R"(bitcast<vec2<u32>>((vec2<i32>(30i, 40i) ^ bitcast<vec2<i32>>(vec2<u32>(10u, 20u)))))"},
// Mixed, returning v2int
BinaryDataGeneral{
"v2int", "v2uint_10_20", "OpBitwiseXor", "v2int_40_30", "vec2<i32>",
- R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) ^ bitcast<vec2<u32>>(vec2<i32>(40, 30)))))"}));
+ R"(bitcast<vec2<i32>>((vec2<u32>(10u, 20u) ^ bitcast<vec2<u32>>(vec2<i32>(40i, 30i)))))"}));
TEST_F(SpvUnaryBitTest, Not_Int_Int) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %int %int_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : i32 = ~(30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = ~(30i);"));
}
TEST_F(SpvUnaryBitTest, Not_Int_Uint) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %int %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : i32 = bitcast<i32>(~(10u));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = bitcast<i32>(~(10u));"));
}
TEST_F(SpvUnaryBitTest, Not_Uint_Int) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %uint %int_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = bitcast<u32>(~(30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = bitcast<u32>(~(30i));"));
}
TEST_F(SpvUnaryBitTest, Not_Uint_Uint) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %uint %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = ~(10u);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = ~(10u);"));
}
TEST_F(SpvUnaryBitTest, Not_SignedVec_SignedVec) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %v2int %v2int_30_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = ~(vec2<i32>(30, 40));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = ~(vec2<i32>(30i, 40i));"));
}
TEST_F(SpvUnaryBitTest, Not_SignedVec_UnsignedVec) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %v2int %v2uint_10_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<i32> = bitcast<vec2<i32>>(~(vec2<u32>(10u, 20u)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<i32> = bitcast<vec2<i32>>(~(vec2<u32>(10u, 20u)));"));
}
TEST_F(SpvUnaryBitTest, Not_UnsignedVec_SignedVec) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %v2uint %v2int_30_40
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<u32> = bitcast<vec2<u32>>(~(vec2<i32>(30, 40)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<u32> = bitcast<vec2<u32>>(~(vec2<i32>(30i, 40i)));"));
}
TEST_F(SpvUnaryBitTest, Not_UnsignedVec_UnsignedVec) {
- const auto assembly = SimplePreamble() + R"(
+ const auto assembly = SimplePreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpNot %v2uint %v2uint_10_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = ~(vec2<u32>(10u, 20u));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = ~(vec2<u32>(10u, 20u));"));
}
std::string BitTestPreamble() {
- return R"(
+ return R"(
OpCapability Shader
%glsl = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
@@ -635,7 +587,7 @@ std::string BitTestPreamble() {
OpName %v2i1 "v2i1"
)" + CommonTypes() +
- R"(
+ R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -648,399 +600,365 @@ std::string BitTestPreamble() {
}
TEST_F(SpvUnaryBitTest, BitCount_Uint_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %uint %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = countOneBits(u1);")) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = countOneBits(u1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitCount_Uint_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %uint %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : u32 = bitcast<u32>(countOneBits(i1));"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = bitcast<u32>(countOneBits(i1));")) << body;
}
TEST_F(SpvUnaryBitTest, BitCount_Int_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %int %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : i32 = bitcast<i32>(countOneBits(u1));"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = bitcast<i32>(countOneBits(u1));")) << body;
}
TEST_F(SpvUnaryBitTest, BitCount_Int_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %int %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : i32 = countOneBits(i1);")) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = countOneBits(i1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitCount_UintVector_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %v2uint %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = countOneBits(v2u1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = countOneBits(v2u1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitCount_UintVector_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %v2uint %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<u32> = bitcast<vec2<u32>>(countOneBits(v2i1));"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = bitcast<vec2<u32>>(countOneBits(v2i1));"))
+ << body;
}
TEST_F(SpvUnaryBitTest, BitCount_IntVector_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %v2int %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<i32> = bitcast<vec2<i32>>(countOneBits(v2u1));"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = bitcast<vec2<i32>>(countOneBits(v2u1));"))
+ << body;
}
TEST_F(SpvUnaryBitTest, BitCount_IntVector_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitCount %v2int %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = countOneBits(v2i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = countOneBits(v2i1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitReverse_Uint_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %uint %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = reverseBits(u1);")) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = reverseBits(u1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitReverse_Uint_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %uint %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(
- p->error(),
- HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
}
TEST_F(SpvUnaryBitTest, BitReverse_Int_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %int %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(
- p->error(),
- HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
}
TEST_F(SpvUnaryBitTest, BitReverse_Int_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %int %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : i32 = reverseBits(i1);")) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = reverseBits(i1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitReverse_UintVector_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %v2uint %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = reverseBits(v2u1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = reverseBits(v2u1);")) << body;
}
TEST_F(SpvUnaryBitTest, BitReverse_UintVector_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %v2uint %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(
- p->error(),
- HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
}
TEST_F(SpvUnaryBitTest, BitReverse_IntVector_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %v2int %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(
- p->error(),
- HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected Base Type to be equal to Result Type: BitReverse"));
}
TEST_F(SpvUnaryBitTest, BitReverse_IntVector_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitReverse %v2int %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = reverseBits(v2i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = reverseBits(v2i1);")) << body;
}
TEST_F(SpvUnaryBitTest, InsertBits_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldInsert %v2int %int_30 %int_40 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : vec2<i32> = insertBits(30, 40, 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = insertBits(30i, 40i, 10u, 20u);")) << body;
}
TEST_F(SpvUnaryBitTest, InsertBits_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldInsert %v2int %v2int_30_40 %v2int_40_30 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : vec2<i32> = insertBits(vec2<i32>(30, 40), vec2<i32>(40, 30), 10u, 20u);)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_1 : vec2<i32> = insertBits(vec2<i32>(30i, 40i), vec2<i32>(40i, 30i), 10u, 20u);)"))
+ << body;
}
TEST_F(SpvUnaryBitTest, InsertBits_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldInsert %v2uint %uint_20 %uint_10 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : vec2<u32> = insertBits(20u, 10u, 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = insertBits(20u, 10u, 10u, 20u);")) << body;
}
TEST_F(SpvUnaryBitTest, InsertBits_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldInsert %v2uint %v2uint_10_20 %v2uint_20_10 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : vec2<u32> = insertBits(vec2<u32>(10u, 20u), vec2<u32>(20u, 10u), 10u, 20u);)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_1 : vec2<u32> = insertBits(vec2<u32>(10u, 20u), vec2<u32>(20u, 10u), 10u, 20u);)"))
+ << body;
}
TEST_F(SpvUnaryBitTest, ExtractBits_Int) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldSExtract %v2int %int_30 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : vec2<i32> = extractBits(30, 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = extractBits(30i, 10u, 20u);")) << body;
}
TEST_F(SpvUnaryBitTest, ExtractBits_IntVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldSExtract %v2int %v2int_30_40 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<i32> = extractBits(vec2<i32>(30, 40), 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<i32> = extractBits(vec2<i32>(30i, 40i), 10u, 20u);"))
+ << body;
}
TEST_F(SpvUnaryBitTest, ExtractBits_Uint) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldUExtract %v2uint %uint_20 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : vec2<u32> = extractBits(20u, 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = extractBits(20u, 10u, 20u);")) << body;
}
TEST_F(SpvUnaryBitTest, ExtractBits_UintVector) {
- const auto assembly = BitTestPreamble() + R"(
+ const auto assembly = BitTestPreamble() + R"(
%1 = OpBitFieldUExtract %v2uint %v2uint_10_20 %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- "let x_1 : vec2<u32> = extractBits(vec2<u32>(10u, 20u), 10u, 20u);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<u32> = extractBits(vec2<u32>(10u, 20u), 10u, 20u);"))
+ << body;
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_call_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_call_test.cc
index 6aba922bc11..584ad16c2d4 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_call_test.cc
@@ -24,7 +24,7 @@ using ::testing::Eq;
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "x_100"
@@ -33,7 +33,7 @@ std::string Preamble() {
}
TEST_F(SpvParserTest, EmitStatement_VoidCallNoParams) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -48,9 +48,9 @@ TEST_F(SpvParserTest, EmitStatement_VoidCallNoParams) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- const auto got = test::ToString(p->program());
- const char* expect = R"(fn x_50() {
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ const auto got = test::ToString(p->program());
+ const char* expect = R"(fn x_50() {
return;
}
@@ -59,16 +59,16 @@ fn x_100_1() {
return;
}
-@stage(fragment)
+@fragment
fn x_100() {
x_100_1();
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserTest, EmitStatement_ScalarCallNoParams) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -86,27 +86,26 @@ TEST_F(SpvParserTest, EmitStatement_ScalarCallNoParams) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- ast::StatementList f100;
- {
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- f100 = fe.ast_body();
- }
- ast::StatementList f50;
- {
- auto fe = p->function_emitter(50);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- f50 = fe.ast_body();
- }
- auto program = p->program();
- EXPECT_THAT(test::ToString(program, f100),
- HasSubstr("let x_1 : u32 = x_50();\nreturn;"));
- EXPECT_THAT(test::ToString(program, f50), HasSubstr("return 42u;"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ ast::StatementList f100;
+ {
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ f100 = fe.ast_body();
+ }
+ ast::StatementList f50;
+ {
+ auto fe = p->function_emitter(50);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ f50 = fe.ast_body();
+ }
+ auto program = p->program();
+ EXPECT_THAT(test::ToString(program, f100), HasSubstr("let x_1 : u32 = x_50();\nreturn;"));
+ EXPECT_THAT(test::ToString(program, f50), HasSubstr("return 42u;"));
}
TEST_F(SpvParserTest, EmitStatement_ScalarCallNoParamsUsedTwice) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -128,31 +127,31 @@ TEST_F(SpvParserTest, EmitStatement_ScalarCallNoParamsUsedTwice) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- ast::StatementList f100;
- {
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- f100 = fe.ast_body();
- }
- ast::StatementList f50;
- {
- auto fe = p->function_emitter(50);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- f50 = fe.ast_body();
- }
- auto program = p->program();
- EXPECT_EQ(test::ToString(program, f100), R"(var x_10 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ ast::StatementList f100;
+ {
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ f100 = fe.ast_body();
+ }
+ ast::StatementList f50;
+ {
+ auto fe = p->function_emitter(50);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ f50 = fe.ast_body();
+ }
+ auto program = p->program();
+ EXPECT_EQ(test::ToString(program, f100), R"(var x_10 : u32;
let x_1 : u32 = x_50();
x_10 = x_1;
x_10 = x_1;
return;
)");
- EXPECT_THAT(test::ToString(program, f50), HasSubstr("return 42u;"));
+ EXPECT_THAT(test::ToString(program, f50), HasSubstr("return 42u;"));
}
TEST_F(SpvParserTest, EmitStatement_CallWithParams) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -174,10 +173,10 @@ TEST_F(SpvParserTest, EmitStatement_CallWithParams) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto program_ast_str = test::ToString(p->program());
- const std::string expected = R"(fn x_50(x_51 : u32, x_52 : u32) -> u32 {
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto program_ast_str = test::ToString(p->program());
+ const std::string expected = R"(fn x_50(x_51 : u32, x_52 : u32) -> u32 {
return (x_51 + x_52);
}
@@ -186,12 +185,12 @@ fn x_100_1() {
return;
}
-@stage(fragment)
+@fragment
fn x_100() {
x_100_1();
}
)";
- EXPECT_EQ(program_ast_str, expected);
+ EXPECT_EQ(program_ast_str, expected);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_cfg_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_cfg_test.cc
index bd3c98c8966..edeea55271e 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_cfg_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_cfg_test.cc
@@ -27,13 +27,13 @@ using ::testing::HasSubstr;
using SpvParserCFGTest = SpvParserTest;
std::string Dump(const std::vector<uint32_t>& v) {
- std::ostringstream o;
- o << "{";
- for (auto a : v) {
- o << a << " ";
- }
- o << "}";
- return o.str();
+ std::ostringstream o;
+ o << "{";
+ for (auto a : v) {
+ o << a << " ";
+ }
+ o << "}";
+ return o.str();
}
using ::testing::ElementsAre;
@@ -41,7 +41,7 @@ using ::testing::Eq;
using ::testing::UnorderedElementsAre;
std::string CommonTypes() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -92,38 +92,38 @@ std::string CommonTypes() {
/// flow constructs.
/// @returns the result of labeling control flow constructs.
bool FlowLabelControlFlowConstructs(FunctionEmitter* fe) {
- fe->RegisterBasicBlocks();
- EXPECT_TRUE(fe->RegisterMerges()) << fe->parser()->error();
- fe->ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe->VerifyHeaderContinueMergeOrder()) << fe->parser()->error();
- return fe->LabelControlFlowConstructs();
+ fe->RegisterBasicBlocks();
+ EXPECT_TRUE(fe->RegisterMerges()) << fe->parser()->error();
+ fe->ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe->VerifyHeaderContinueMergeOrder()) << fe->parser()->error();
+ return fe->LabelControlFlowConstructs();
}
/// Runs the necessary flow until and including finding switch case
/// headers.
/// @returns the result of finding switch case headers.
bool FlowFindSwitchCaseHeaders(FunctionEmitter* fe) {
- EXPECT_TRUE(FlowLabelControlFlowConstructs(fe)) << fe->parser()->error();
- return fe->FindSwitchCaseHeaders();
+ EXPECT_TRUE(FlowLabelControlFlowConstructs(fe)) << fe->parser()->error();
+ return fe->FindSwitchCaseHeaders();
}
/// Runs the necessary flow until and including classify CFG edges,
/// @returns the result of classify CFG edges.
bool FlowClassifyCFGEdges(FunctionEmitter* fe) {
- EXPECT_TRUE(FlowFindSwitchCaseHeaders(fe)) << fe->parser()->error();
- return fe->ClassifyCFGEdges();
+ EXPECT_TRUE(FlowFindSwitchCaseHeaders(fe)) << fe->parser()->error();
+ return fe->ClassifyCFGEdges();
}
/// Runs the necessary flow until and including finding if-selection
/// internal headers.
/// @returns the result of classify CFG edges.
bool FlowFindIfSelectionInternalHeaders(FunctionEmitter* fe) {
- EXPECT_TRUE(FlowClassifyCFGEdges(fe)) << fe->parser()->error();
- return fe->FindIfSelectionInternalHeaders();
+ EXPECT_TRUE(FlowClassifyCFGEdges(fe)) << fe->parser()->error();
+ return fe->FindIfSelectionInternalHeaders();
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_SingleBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%42 = OpLabel
@@ -131,14 +131,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_SingleBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Sequence) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%20 = OpLabel
@@ -149,14 +149,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Sequence) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid()) << p->error();
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_If) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%20 = OpLabel
@@ -174,14 +174,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_If) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid()) << p->error();
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Switch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -202,14 +202,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Switch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Loop_SingleBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -224,14 +224,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Loop_SingleBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Loop_Simple) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -252,14 +252,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Loop_Simple) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Kill) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -267,14 +267,14 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Kill) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_Unreachable) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -282,26 +282,26 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_Unreachable) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.TerminatorsAreValid());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.TerminatorsAreValid());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_MissingTerminator) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
OpFunctionEnd
)"));
- // The SPIRV-Tools internal representation rejects this case earlier.
- EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
+ // The SPIRV-Tools internal representation rejects this case earlier.
+ EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowLoopToEntryBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -312,15 +312,15 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowLoopToEntryBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.TerminatorsAreValid());
- EXPECT_THAT(p->error(), Eq("Block 20 branches to function entry block 10"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.TerminatorsAreValid());
+ EXPECT_THAT(p->error(), Eq("Block 20 branches to function entry block 10"));
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowNonBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -328,17 +328,16 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowNonBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.TerminatorsAreValid());
- EXPECT_THAT(p->error(),
- Eq("Block 10 in function 100 branches to 999 which is "
- "not a block in the function"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.TerminatorsAreValid());
+ EXPECT_THAT(p->error(), Eq("Block 10 in function 100 branches to 999 which is "
+ "not a block in the function"));
}
TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowBlockInDifferentFunction) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -354,16 +353,16 @@ TEST_F(SpvParserCFGTest, TerminatorsAreValid_DisallowBlockInDifferentFunction) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.TerminatorsAreValid());
- EXPECT_THAT(p->error(), Eq("Block 10 in function 100 branches to 210 which "
- "is not a block in the function"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.TerminatorsAreValid());
+ EXPECT_THAT(p->error(), Eq("Block 10 in function 100 branches to 210 which "
+ "is not a block in the function"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_NoMerges) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -371,22 +370,22 @@ TEST_F(SpvParserCFGTest, RegisterMerges_NoMerges) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- const auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->merge_for_header, 0u);
- EXPECT_EQ(bi->continue_for_header, 0u);
- EXPECT_EQ(bi->header_for_merge, 0u);
- EXPECT_EQ(bi->header_for_continue, 0u);
- EXPECT_FALSE(bi->is_continue_entire_loop);
+ const auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->merge_for_header, 0u);
+ EXPECT_EQ(bi->continue_for_header, 0u);
+ EXPECT_EQ(bi->header_for_merge, 0u);
+ EXPECT_EQ(bi->header_for_continue, 0u);
+ EXPECT_FALSE(bi->is_continue_entire_loop);
}
TEST_F(SpvParserCFGTest, RegisterMerges_GoodSelectionMerge_BranchConditional) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -401,41 +400,41 @@ TEST_F(SpvParserCFGTest, RegisterMerges_GoodSelectionMerge_BranchConditional) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Header points to the merge
- const auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->merge_for_header, 99u);
- EXPECT_EQ(bi10->continue_for_header, 0u);
- EXPECT_EQ(bi10->header_for_merge, 0u);
- EXPECT_EQ(bi10->header_for_continue, 0u);
- EXPECT_FALSE(bi10->is_continue_entire_loop);
+ // Header points to the merge
+ const auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->merge_for_header, 99u);
+ EXPECT_EQ(bi10->continue_for_header, 0u);
+ EXPECT_EQ(bi10->header_for_merge, 0u);
+ EXPECT_EQ(bi10->header_for_continue, 0u);
+ EXPECT_FALSE(bi10->is_continue_entire_loop);
- // Middle block is neither header nor merge
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 0u);
- EXPECT_EQ(bi20->continue_for_header, 0u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 0u);
- EXPECT_FALSE(bi20->is_continue_entire_loop);
+ // Middle block is neither header nor merge
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 0u);
+ EXPECT_EQ(bi20->continue_for_header, 0u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 0u);
+ EXPECT_FALSE(bi20->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 10u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 10u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
TEST_F(SpvParserCFGTest, RegisterMerges_GoodSelectionMerge_Switch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -450,41 +449,41 @@ TEST_F(SpvParserCFGTest, RegisterMerges_GoodSelectionMerge_Switch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Header points to the merge
- const auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->merge_for_header, 99u);
- EXPECT_EQ(bi10->continue_for_header, 0u);
- EXPECT_EQ(bi10->header_for_merge, 0u);
- EXPECT_EQ(bi10->header_for_continue, 0u);
- EXPECT_FALSE(bi10->is_continue_entire_loop);
+ // Header points to the merge
+ const auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->merge_for_header, 99u);
+ EXPECT_EQ(bi10->continue_for_header, 0u);
+ EXPECT_EQ(bi10->header_for_merge, 0u);
+ EXPECT_EQ(bi10->header_for_continue, 0u);
+ EXPECT_FALSE(bi10->is_continue_entire_loop);
- // Middle block is neither header nor merge
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 0u);
- EXPECT_EQ(bi20->continue_for_header, 0u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 0u);
- EXPECT_FALSE(bi20->is_continue_entire_loop);
+ // Middle block is neither header nor merge
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 0u);
+ EXPECT_EQ(bi20->continue_for_header, 0u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 0u);
+ EXPECT_FALSE(bi20->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 10u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 10u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
TEST_F(SpvParserCFGTest, RegisterMerges_GoodLoopMerge_SingleBlockLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -499,42 +498,41 @@ TEST_F(SpvParserCFGTest, RegisterMerges_GoodLoopMerge_SingleBlockLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Entry block is not special
- const auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->merge_for_header, 0u);
- EXPECT_EQ(bi10->continue_for_header, 0u);
- EXPECT_EQ(bi10->header_for_merge, 0u);
- EXPECT_EQ(bi10->header_for_continue, 0u);
- EXPECT_FALSE(bi10->is_continue_entire_loop);
+ // Entry block is not special
+ const auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->merge_for_header, 0u);
+ EXPECT_EQ(bi10->continue_for_header, 0u);
+ EXPECT_EQ(bi10->header_for_merge, 0u);
+ EXPECT_EQ(bi10->header_for_continue, 0u);
+ EXPECT_FALSE(bi10->is_continue_entire_loop);
- // Single block loop is its own continue, and marked as single block loop.
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 99u);
- EXPECT_EQ(bi20->continue_for_header, 20u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 20u);
- EXPECT_TRUE(bi20->is_continue_entire_loop);
+ // Single block loop is its own continue, and marked as single block loop.
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 99u);
+ EXPECT_EQ(bi20->continue_for_header, 20u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 20u);
+ EXPECT_TRUE(bi20->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 20u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 20u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
-TEST_F(SpvParserCFGTest,
- RegisterMerges_GoodLoopMerge_MultiBlockLoop_ContinueIsHeader) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, RegisterMerges_GoodLoopMerge_MultiBlockLoop_ContinueIsHeader) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -552,42 +550,41 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Loop header points to continue (itself) and merge
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 99u);
- EXPECT_EQ(bi20->continue_for_header, 20u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 20u);
- EXPECT_TRUE(bi20->is_continue_entire_loop);
+ // Loop header points to continue (itself) and merge
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 99u);
+ EXPECT_EQ(bi20->continue_for_header, 20u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 20u);
+ EXPECT_TRUE(bi20->is_continue_entire_loop);
- // Backedge block, but is not a declared header, merge, or continue
- const auto* bi40 = fe.GetBlockInfo(40);
- ASSERT_NE(bi40, nullptr);
- EXPECT_EQ(bi40->merge_for_header, 0u);
- EXPECT_EQ(bi40->continue_for_header, 0u);
- EXPECT_EQ(bi40->header_for_merge, 0u);
- EXPECT_EQ(bi40->header_for_continue, 0u);
- EXPECT_FALSE(bi40->is_continue_entire_loop);
+ // Backedge block, but is not a declared header, merge, or continue
+ const auto* bi40 = fe.GetBlockInfo(40);
+ ASSERT_NE(bi40, nullptr);
+ EXPECT_EQ(bi40->merge_for_header, 0u);
+ EXPECT_EQ(bi40->continue_for_header, 0u);
+ EXPECT_EQ(bi40->header_for_merge, 0u);
+ EXPECT_EQ(bi40->header_for_continue, 0u);
+ EXPECT_FALSE(bi40->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 20u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 20u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
-TEST_F(SpvParserCFGTest,
- RegisterMerges_GoodLoopMerge_MultiBlockLoop_ContinueIsNotHeader_Branch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, RegisterMerges_GoodLoopMerge_MultiBlockLoop_ContinueIsNotHeader_Branch) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -608,43 +605,43 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Loop header points to continue and merge
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 99u);
- EXPECT_EQ(bi20->continue_for_header, 40u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 0u);
- EXPECT_FALSE(bi20->is_continue_entire_loop);
+ // Loop header points to continue and merge
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 99u);
+ EXPECT_EQ(bi20->continue_for_header, 40u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 0u);
+ EXPECT_FALSE(bi20->is_continue_entire_loop);
- // Continue block points to header
- const auto* bi40 = fe.GetBlockInfo(40);
- ASSERT_NE(bi40, nullptr);
- EXPECT_EQ(bi40->merge_for_header, 0u);
- EXPECT_EQ(bi40->continue_for_header, 0u);
- EXPECT_EQ(bi40->header_for_merge, 0u);
- EXPECT_EQ(bi40->header_for_continue, 20u);
- EXPECT_FALSE(bi40->is_continue_entire_loop);
+ // Continue block points to header
+ const auto* bi40 = fe.GetBlockInfo(40);
+ ASSERT_NE(bi40, nullptr);
+ EXPECT_EQ(bi40->merge_for_header, 0u);
+ EXPECT_EQ(bi40->continue_for_header, 0u);
+ EXPECT_EQ(bi40->header_for_merge, 0u);
+ EXPECT_EQ(bi40->header_for_continue, 20u);
+ EXPECT_FALSE(bi40->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 20u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 20u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
TEST_F(
SpvParserCFGTest,
RegisterMerges_GoodLoopMerge_MultiBlockLoop_ContinueIsNotHeader_BranchConditional) { // NOLINT
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -665,41 +662,41 @@ TEST_F(
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_TRUE(fe.RegisterMerges());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_TRUE(fe.RegisterMerges());
- // Loop header points to continue and merge
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->merge_for_header, 99u);
- EXPECT_EQ(bi20->continue_for_header, 40u);
- EXPECT_EQ(bi20->header_for_merge, 0u);
- EXPECT_EQ(bi20->header_for_continue, 0u);
- EXPECT_FALSE(bi20->is_continue_entire_loop);
+ // Loop header points to continue and merge
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->merge_for_header, 99u);
+ EXPECT_EQ(bi20->continue_for_header, 40u);
+ EXPECT_EQ(bi20->header_for_merge, 0u);
+ EXPECT_EQ(bi20->header_for_continue, 0u);
+ EXPECT_FALSE(bi20->is_continue_entire_loop);
- // Continue block points to header
- const auto* bi40 = fe.GetBlockInfo(40);
- ASSERT_NE(bi40, nullptr);
- EXPECT_EQ(bi40->merge_for_header, 0u);
- EXPECT_EQ(bi40->continue_for_header, 0u);
- EXPECT_EQ(bi40->header_for_merge, 0u);
- EXPECT_EQ(bi40->header_for_continue, 20u);
- EXPECT_FALSE(bi40->is_continue_entire_loop);
+ // Continue block points to header
+ const auto* bi40 = fe.GetBlockInfo(40);
+ ASSERT_NE(bi40, nullptr);
+ EXPECT_EQ(bi40->merge_for_header, 0u);
+ EXPECT_EQ(bi40->continue_for_header, 0u);
+ EXPECT_EQ(bi40->header_for_merge, 0u);
+ EXPECT_EQ(bi40->header_for_continue, 20u);
+ EXPECT_FALSE(bi40->is_continue_entire_loop);
- // Merge block points to the header
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->merge_for_header, 0u);
- EXPECT_EQ(bi99->continue_for_header, 0u);
- EXPECT_EQ(bi99->header_for_merge, 20u);
- EXPECT_EQ(bi99->header_for_continue, 0u);
- EXPECT_FALSE(bi99->is_continue_entire_loop);
+ // Merge block points to the header
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->merge_for_header, 0u);
+ EXPECT_EQ(bi99->continue_for_header, 0u);
+ EXPECT_EQ(bi99->header_for_merge, 20u);
+ EXPECT_EQ(bi99->header_for_continue, 0u);
+ EXPECT_FALSE(bi99->is_continue_entire_loop);
}
TEST_F(SpvParserCFGTest, RegisterMerges_SelectionMerge_BadTerminator) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -714,16 +711,16 @@ TEST_F(SpvParserCFGTest, RegisterMerges_SelectionMerge_BadTerminator) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(), Eq("Selection header 10 does not end in an "
- "OpBranchConditional or OpSwitch instruction"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Selection header 10 does not end in an "
+ "OpBranchConditional or OpSwitch instruction"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_LoopMerge_BadTerminator) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -744,16 +741,16 @@ TEST_F(SpvParserCFGTest, RegisterMerges_LoopMerge_BadTerminator) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(), Eq("Loop header 20 does not end in an OpBranch or "
- "OpBranchConditional instruction"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Loop header 20 does not end in an OpBranch or "
+ "OpBranchConditional instruction"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_BadMergeBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -768,16 +765,15 @@ TEST_F(SpvParserCFGTest, RegisterMerges_BadMergeBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(),
- Eq("Structured header block 10 declares invalid merge block 2"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Structured header block 10 declares invalid merge block 2"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_HeaderIsItsOwnMerge) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -792,16 +788,15 @@ TEST_F(SpvParserCFGTest, RegisterMerges_HeaderIsItsOwnMerge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(),
- Eq("Structured header block 10 cannot be its own merge block"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Structured header block 10 cannot be its own merge block"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_MergeReused) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -826,17 +821,16 @@ TEST_F(SpvParserCFGTest, RegisterMerges_MergeReused) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(
- p->error(),
- Eq("Block 49 declared as merge block for more than one header: 10, 50"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(),
+ Eq("Block 49 declared as merge block for more than one header: 10, 50"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_EntryBlockIsLoopHeader) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -851,16 +845,15 @@ TEST_F(SpvParserCFGTest, RegisterMerges_EntryBlockIsLoopHeader) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(),
- Eq("Function entry block 10 cannot be a loop header"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Function entry block 10 cannot be a loop header"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_BadContinueTarget) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -875,16 +868,15 @@ TEST_F(SpvParserCFGTest, RegisterMerges_BadContinueTarget) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(),
- Eq("Structured header 20 declares invalid continue target 999"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Structured header 20 declares invalid continue target 999"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_MergeSameAsContinue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -903,17 +895,16 @@ TEST_F(SpvParserCFGTest, RegisterMerges_MergeSameAsContinue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(),
- Eq("Invalid structured header block 20: declares block 50 as "
- "both its merge block and continue target"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Invalid structured header block 20: declares block 50 as "
+ "both its merge block and continue target"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_ContinueReused) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -947,16 +938,16 @@ TEST_F(SpvParserCFGTest, RegisterMerges_ContinueReused) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(), Eq("Block 40 declared as continue target for more "
- "than one header: 20, 50"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Block 40 declared as continue target for more "
+ "than one header: 20, 50"));
}
TEST_F(SpvParserCFGTest, RegisterMerges_SingleBlockLoop_NotItsOwnContinue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -974,17 +965,15 @@ TEST_F(SpvParserCFGTest, RegisterMerges_SingleBlockLoop_NotItsOwnContinue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(
- p->error(),
- Eq("Block 20 branches to itself but is not its own continue target"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Block 20 branches to itself but is not its own continue target"));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_OneBlock) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%42 = OpLabel
@@ -992,20 +981,20 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_OneBlock) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(42));
+ EXPECT_THAT(fe.block_order(), ElementsAre(42));
- const auto* bi = fe.GetBlockInfo(42);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->pos, 0u);
+ const auto* bi = fe.GetBlockInfo(42);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->pos, 0u);
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_IgnoreStaticalyUnreachable) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1019,16 +1008,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_IgnoreStaticalyUnreachable) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_KillIsDeadEnd) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1042,16 +1031,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_KillIsDeadEnd) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_UnreachableIsDeadEnd) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1065,16 +1054,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_UnreachableIsDeadEnd) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_ReorderSequence) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1092,29 +1081,29 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_ReorderSequence) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 99));
- const auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->pos, 0u);
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->pos, 1u);
- const auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->pos, 2u);
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->pos, 3u);
+ const auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->pos, 0u);
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->pos, 1u);
+ const auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->pos, 2u);
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->pos, 3u);
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_DupConditionalBranch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1129,16 +1118,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_DupConditionalBranch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectConditionalBranchOrder) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1156,16 +1145,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectConditionalBranchOrder) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_TrueOnlyBranch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1180,16 +1169,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_TrueOnlyBranch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_FalseOnlyBranch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1204,16 +1193,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_FalseOnlyBranch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_SwitchOrderNaturallyReversed) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1231,17 +1220,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_SwitchOrderNaturallyReversed) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 99));
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_SwitchWithDefaultOrderNaturallyReversed) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_SwitchWithDefaultOrderNaturallyReversed) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1262,16 +1250,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 80, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 80, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Switch_DefaultSameAsACase) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1292,16 +1280,16 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Switch_DefaultSameAsACase) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 40, 20, 30, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 40, 20, 30, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectSwitchCaseFallthrough) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1327,19 +1315,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectSwitchCaseFallthrough) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 50, 20, 40, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 50, 20, 40, 99)) << assembly;
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_RespectSwitchCaseFallthrough_FromDefault) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectSwitchCaseFallthrough_FromDefault) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1363,19 +1349,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 30, 40, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 30, 40, 99)) << assembly;
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_RespectSwitchCaseFallthrough_FromCaseToDefaultToCase) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectSwitchCaseFallthrough_FromCaseToDefaultToCase) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1396,18 +1380,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 30, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 30, 99)) << assembly;
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_SwitchCasesFallthrough_OppositeDirections) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_SwitchCasesFallthrough_OppositeDirections) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1433,22 +1416,20 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 50, 40, 20, 30, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 50, 40, 20, 30, 99)) << assembly;
- // We're deliberately testing a case that SPIR-V doesn't allow.
- p->DeliberatelyInvalidSpirv();
+ // We're deliberately testing a case that SPIR-V doesn't allow.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_RespectSwitchCaseFallthrough_Interleaved) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_RespectSwitchCaseFallthrough_Interleaved) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1480,18 +1461,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 50, 70, 20, 40, 60, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 50, 70, 20, 40, 60, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_If_Contains_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1529,19 +1509,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_If_Contains_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_If_In_SwitchCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1579,19 +1557,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_If_In_SwitchCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_IfFallthrough_In_SwitchCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1629,19 +1605,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_IfFallthrough_In_SwitchCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 60, 70, 79, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_IfBreak_In_SwitchCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1673,18 +1647,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Nest_IfBreak_In_SwitchCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 49, 50, 60, 79, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 49, 50, 60, 79, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_Simple) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
; The entry block can't be the target of a branch
@@ -1700,17 +1673,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_Simple) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_Infinite) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
; The entry block can't be the target of a branch
@@ -1726,17 +1699,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_Infinite) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_DupInfinite) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
; The entry block can't be the target of a branch
@@ -1752,17 +1725,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_SingleBlock_DupInfinite) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_HeaderHasBreakIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1783,17 +1756,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_HeaderHasBreakIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_HeaderHasBreakUnless) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1814,17 +1787,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_HeaderHasBreakUnless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreak) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1845,17 +1818,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreakIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1879,18 +1852,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreakIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreakUnless) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1914,18 +1886,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasBreakUnless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1956,18 +1927,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 45, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 45, 49, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If_Break) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1995,18 +1965,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If_Break) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasContinueIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2030,18 +1999,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasContinueIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasContinueUnless) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2065,18 +2033,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_BodyHasContinueUnless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If_Continue) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2104,18 +2071,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_If_Continue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 40, 49, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2146,18 +2112,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99)) << assembly;
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch_CaseBreaks) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2190,23 +2155,22 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch_CaseBreaks) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99)) << assembly;
- // Fails SPIR-V validation:
- // Branch from block 40 to block 99 is an invalid exit from construct starting
- // at block 30; branch bypasses merge block 49
- p->DeliberatelyInvalidSpirv();
+ // Fails SPIR-V validation:
+ // Branch from block 40 to block 99 is an invalid exit from construct starting
+ // at block 30; branch bypasses merge block 49
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch_CaseContinues) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2237,21 +2201,19 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Body_Switch_CaseContinues) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99))
- << assembly;
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 45, 40, 49, 50, 99)) << assembly;
}
// TODO(crbug.com/tint/1406): Re-enable with the typo fix (preceeded->preceded)
// once that typo fix is rolled in Tint's SPIRV-Tools.
-TEST_F(SpvParserCFGTest,
- DISABLED_ComputeBlockOrder_Loop_BodyHasSwitchContinueBreak) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, DISABLED_ComputeBlockOrder_Loop_BodyHasSwitchContinueBreak) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2276,15 +2238,14 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(),
- HasSubstr("OpSwitch must be preceeded by an OpSelectionMerge"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("OpSwitch must be preceeded by an OpSelectionMerge"));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_Sequence) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2308,17 +2269,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_Sequence) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 60, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 60, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_ContainsIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2349,17 +2310,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_ContainsIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 60, 70, 89, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 60, 70, 89, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_HasBreakIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2380,17 +2341,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_HasBreakIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_HasBreakUnless) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2411,19 +2372,19 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Continue_HasBreakUnless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 50, 99));
}
// TODO(crbug.com/tint/1406): Re-enable with the typo fix (preceeded->preceded)
// once that typo fix is rolled in Tint's SPIRV-Tools.
TEST_F(SpvParserCFGTest, DISABLED_ComputeBlockOrder_Loop_Continue_SwitchBreak) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2446,15 +2407,14 @@ TEST_F(SpvParserCFGTest, DISABLED_ComputeBlockOrder_Loop_Continue_SwitchBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(),
- HasSubstr("OpSwitch must be preceeded by an OpSelectionMerge"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("OpSwitch must be preceeded by an OpSelectionMerge"));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2488,18 +2448,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerBreak) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2533,18 +2492,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinue) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2578,18 +2536,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinueBreaks) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2623,18 +2580,17 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinueBreaks) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
}
TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinueContinues) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2668,24 +2624,22 @@ TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_InnerContinueContinues) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
- p->DeliberatelyInvalidSpirv();
- // SPIR-V validation fails:
- // block <ID> 40[%40] exits the continue headed by <ID> 40[%40], but not
- // via a structured exit"
+ p->DeliberatelyInvalidSpirv();
+ // SPIR-V validation fails:
+ // block <ID> 40[%40] exits the continue headed by <ID> 40[%40], but not
+ // via a structured exit"
}
-TEST_F(SpvParserCFGTest,
- ComputeBlockOrder_Loop_Loop_SwitchBackedgeBreakContinue) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ComputeBlockOrder_Loop_Loop_SwitchBackedgeBreakContinue) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2724,23 +2678,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
- EXPECT_THAT(fe.block_order(),
- ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 35, 37, 40, 49, 50, 99));
- p->DeliberatelyInvalidSpirv();
- // SPIR-V validation fails:
- // block <ID> 40[%40] exits the continue headed by <ID> 40[%40], but not
- // via a structured exit"
+ p->DeliberatelyInvalidSpirv();
+ // SPIR-V validation fails:
+ // block <ID> 40[%40] exits the continue headed by <ID> 40[%40], but not
+ // via a structured exit"
}
TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_Selection_Good) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2758,17 +2711,17 @@ TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_Selection_Good) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
}
TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_SingleBlockLoop_Good) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2783,17 +2736,17 @@ TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_SingleBlockLoop_Good) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder()) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder()) << p->error();
}
TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_MultiBlockLoop_Good) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2811,18 +2764,17 @@ TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_MultiBlockLoop_Good) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
}
-TEST_F(SpvParserCFGTest,
- VerifyHeaderContinueMergeOrder_HeaderDoesNotStrictlyDominateMerge) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_HeaderDoesNotStrictlyDominateMerge) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2843,24 +2795,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_THAT(p->error(),
- Eq("Header 50 does not strictly dominate its merge block 20"))
- << *fe.GetBlockInfo(50) << std::endl
- << *fe.GetBlockInfo(20) << std::endl
- << Dump(fe.block_order());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_THAT(p->error(), Eq("Header 50 does not strictly dominate its merge block 20"))
+ << *fe.GetBlockInfo(50) << std::endl
+ << *fe.GetBlockInfo(20) << std::endl
+ << Dump(fe.block_order());
}
-TEST_F(
- SpvParserCFGTest,
- VerifyHeaderContinueMergeOrder_HeaderDoesNotStrictlyDominateContinueTarget) { // NOLINT
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ VerifyHeaderContinueMergeOrder_HeaderDoesNotStrictlyDominateContinueTarget) { // NOLINT
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2881,23 +2831,21 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_THAT(p->error(),
- Eq("Loop header 50 does not dominate its continue target 20"))
- << *fe.GetBlockInfo(50) << std::endl
- << *fe.GetBlockInfo(20) << std::endl
- << Dump(fe.block_order());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_THAT(p->error(), Eq("Loop header 50 does not dominate its continue target 20"))
+ << *fe.GetBlockInfo(50) << std::endl
+ << *fe.GetBlockInfo(20) << std::endl
+ << Dump(fe.block_order());
}
-TEST_F(SpvParserCFGTest,
- VerifyHeaderContinueMergeOrder_MergeInsideContinueTarget) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, VerifyHeaderContinueMergeOrder_MergeInsideContinueTarget) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2918,22 +2866,20 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_THAT(p->error(),
- Eq("Merge block 60 for loop headed at block 50 appears at or "
- "before the loop's continue construct headed by block 70"))
- << Dump(fe.block_order());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_FALSE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_THAT(p->error(), Eq("Merge block 60 for loop headed at block 50 appears at or "
+ "before the loop's continue construct headed by block 70"))
+ << Dump(fe.block_order());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_OuterConstructIsFunction_SingleBlock) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_OuterConstructIsFunction_SingleBlock) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2941,23 +2887,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- EXPECT_EQ(fe.constructs().size(), 1u);
- auto& c = fe.constructs().front();
- EXPECT_THAT(ToString(c), Eq("Construct{ Function [0,1) begin_id:10 end_id:0 "
- "depth:0 parent:null }"));
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, c.get());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ EXPECT_EQ(fe.constructs().size(), 1u);
+ auto& c = fe.constructs().front();
+ EXPECT_THAT(ToString(c), Eq("Construct{ Function [0,1) begin_id:10 end_id:0 "
+ "depth:0 parent:null }"));
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, c.get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_OuterConstructIsFunction_MultiBlock) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_OuterConstructIsFunction_MultiBlock) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -2968,24 +2913,23 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- EXPECT_EQ(fe.constructs().size(), 1u);
- auto& c = fe.constructs().front();
- EXPECT_THAT(ToString(c), Eq("Construct{ Function [0,2) begin_id:10 end_id:0 "
- "depth:0 parent:null }"));
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, c.get());
- EXPECT_EQ(fe.GetBlockInfo(5)->construct, c.get());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ EXPECT_EQ(fe.constructs().size(), 1u);
+ auto& c = fe.constructs().front();
+ EXPECT_THAT(ToString(c), Eq("Construct{ Function [0,2) begin_id:10 end_id:0 "
+ "depth:0 parent:null }"));
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, c.get());
+ EXPECT_EQ(fe.GetBlockInfo(5)->construct, c.get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_FunctionIsOnlyIfSelectionAndItsMerge) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_FunctionIsOnlyIfSelectionAndItsMerge) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3003,30 +2947,29 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 2u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 2u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,4) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,3) begin_id:10 end_id:99 depth:1 parent:Function@10 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
-TEST_F(
- SpvParserCFGTest,
- LabelControlFlowConstructs_PaddingBlocksBeforeAndAfterStructuredConstruct) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ LabelControlFlowConstructs_PaddingBlocksBeforeAndAfterStructuredConstruct) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -3050,30 +2993,30 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 2u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 2u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,6) begin_id:5 end_id:0 depth:0 parent:null }
Construct{ IfSelection [1,4) begin_id:10 end_id:99 depth:1 parent:Function@5 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(5)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(200)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(5)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(200)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_SwitchSelection) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3094,29 +3037,29 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_SwitchSelection) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 2u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 2u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,5) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ SwitchSelection [0,4) begin_id:10 end_id:99 depth:1 parent:Function@10 in-c-l-s:SwitchSelection@10 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_SingleBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3131,31 +3074,30 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_SingleBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 2u);
- // A single-block loop consists *only* of a continue target with one block in
- // it.
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 2u);
+ // A single-block loop consists *only* of a continue target with one block in
+ // it.
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,3) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [1,2) begin_id:20 end_id:99 depth:1 parent:Function@10 in-c:Continue@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_MultiBlockLoop_HeaderIsNotContinue) {
- // In this case, we have a continue construct and a non-empty loop construct.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_MultiBlockLoop_HeaderIsNotContinue) {
+ // In this case, we have a continue construct and a non-empty loop construct.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3179,32 +3121,31 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,6) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [3,5) begin_id:40 end_id:99 depth:1 parent:Function@10 in-c:Continue@40 }
Construct{ Loop [1,3) begin_id:20 end_id:40 depth:1 parent:Function@10 scope:[1,5) in-l:Loop@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_MultiBlockLoop_HeaderIsContinue) {
- // In this case, we have only a continue construct and no loop construct.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_MultiBlockLoop_HeaderIsContinue) {
+ // In this case, we have only a continue construct and no loop construct.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3228,30 +3169,29 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,6) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [1,5) begin_id:20 end_id:99 depth:1 parent:Function@10 in-c:Continue@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_MergeBlockIsAlsoSingleBlockLoop) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_MergeBlockIsAlsoSingleBlockLoop) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3272,32 +3212,31 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 3u);
- // A single-block loop consists *only* of a continue target with one block in
- // it.
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 3u);
+ // A single-block loop consists *only* of a continue target with one block in
+ // it.
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,4) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,2) begin_id:10 end_id:50 depth:1 parent:Function@10 }
Construct{ Continue [2,3) begin_id:50 end_id:99 depth:1 parent:Function@10 in-c:Continue@50 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
-TEST_F(SpvParserCFGTest,
- LabelControlFlowConstructs_MergeBlockIsAlsoMultiBlockLoopHeader) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_MergeBlockIsAlsoMultiBlockLoopHeader) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3321,31 +3260,31 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,5) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,2) begin_id:10 end_id:50 depth:1 parent:Function@10 }
Construct{ Continue [3,4) begin_id:60 end_id:99 depth:1 parent:Function@10 in-c:Continue@60 }
Construct{ Loop [2,3) begin_id:50 end_id:60 depth:1 parent:Function@10 scope:[2,4) in-l:Loop@50 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3380,35 +3319,35 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,9) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,8) begin_id:10 end_id:99 depth:1 parent:Function@10 }
Construct{ IfSelection [1,3) begin_id:20 end_id:40 depth:2 parent:IfSelection@10 }
Construct{ IfSelection [5,7) begin_id:50 end_id:89 depth:2 parent:IfSelection@10 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Switch_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3440,35 +3379,35 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Switch_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- // The ordering among siblings depends on the computed block order.
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ // The ordering among siblings depends on the computed block order.
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,8) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ SwitchSelection [0,7) begin_id:10 end_id:99 depth:1 parent:Function@10 in-c-l-s:SwitchSelection@10 }
Construct{ IfSelection [1,3) begin_id:50 end_id:89 depth:2 parent:SwitchSelection@10 in-c-l-s:SwitchSelection@10 }
Construct{ IfSelection [4,6) begin_id:20 end_id:49 depth:2 parent:SwitchSelection@10 in-c-l-s:SwitchSelection@10 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_Switch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3490,30 +3429,30 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_Switch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 3u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 3u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,5) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,4) begin_id:10 end_id:99 depth:1 parent:Function@10 }
Construct{ SwitchSelection [1,3) begin_id:20 end_id:89 depth:2 parent:IfSelection@10 in-c-l-s:SwitchSelection@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Loop_Loop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3544,34 +3483,34 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Loop_Loop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,8) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [4,6) begin_id:50 end_id:89 depth:1 parent:Function@10 in-c:Continue@50 }
Construct{ Loop [1,4) begin_id:20 end_id:50 depth:1 parent:Function@10 scope:[1,6) in-l:Loop@20 }
Construct{ Continue [2,3) begin_id:30 end_id:40 depth:2 parent:Loop@20 in-l:Loop@20 in-c:Continue@30 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(60)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Loop_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3599,33 +3538,33 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_Loop_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,7) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [5,6) begin_id:80 end_id:99 depth:1 parent:Function@10 in-c:Continue@80 }
Construct{ Loop [1,5) begin_id:20 end_id:80 depth:1 parent:Function@10 scope:[1,6) in-l:Loop@20 }
Construct{ IfSelection [2,4) begin_id:30 end_id:49 depth:2 parent:Loop@20 in-l:Loop@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(80)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(80)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_LoopContinue_If) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3650,32 +3589,32 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_LoopContinue_If) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,6) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [2,5) begin_id:30 end_id:99 depth:1 parent:Function@10 in-c:Continue@30 }
Construct{ Loop [1,2) begin_id:20 end_id:30 depth:1 parent:Function@10 scope:[1,5) in-l:Loop@20 }
Construct{ IfSelection [2,4) begin_id:30 end_id:49 depth:2 parent:Continue@30 in-c:Continue@30 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(49)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_SingleBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3694,28 +3633,28 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_SingleBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 3u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 3u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,4) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,3) begin_id:10 end_id:99 depth:1 parent:Function@10 }
Construct{ Continue [1,2) begin_id:20 end_id:89 depth:2 parent:IfSelection@10 in-c:Continue@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_MultiBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3743,36 +3682,36 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_Nest_If_MultiBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- fe.RegisterMerges();
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ fe.RegisterMerges();
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ EXPECT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,7) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ IfSelection [0,6) begin_id:10 end_id:99 depth:1 parent:Function@10 }
Construct{ Continue [3,5) begin_id:40 end_id:89 depth:2 parent:IfSelection@10 in-c:Continue@40 }
Construct{ Loop [1,3) begin_id:20 end_id:40 depth:2 parent:IfSelection@10 scope:[1,5) in-l:Loop@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
- EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(50)->construct, constructs[2].get());
+ EXPECT_EQ(fe.GetBlockInfo(89)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_LoopInterallyDiverge) {
- // In this case, insert a synthetic if-selection with the same blocks
- // as the loop construct.
- // crbug.com/tint/524
- auto assembly = CommonTypes() + R"(
+ // In this case, insert a synthetic if-selection with the same blocks
+ // as the loop construct.
+ // crbug.com/tint/524
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3796,29 +3735,29 @@ TEST_F(SpvParserCFGTest, LabelControlFlowConstructs_LoopInterallyDiverge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
- const auto& constructs = fe.constructs();
- EXPECT_EQ(constructs.size(), 4u);
- ASSERT_THAT(ToString(constructs), Eq(R"(ConstructList{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
+ const auto& constructs = fe.constructs();
+ EXPECT_EQ(constructs.size(), 4u);
+ ASSERT_THAT(ToString(constructs), Eq(R"(ConstructList{
Construct{ Function [0,6) begin_id:10 end_id:0 depth:0 parent:null }
Construct{ Continue [4,5) begin_id:90 end_id:99 depth:1 parent:Function@10 in-c:Continue@90 }
Construct{ Loop [1,4) begin_id:20 end_id:90 depth:1 parent:Function@10 scope:[1,5) in-l:Loop@20 }
Construct{ IfSelection [1,4) begin_id:20 end_id:90 depth:2 parent:Loop@20 in-l:Loop@20 }
})")) << constructs;
- // The block records the nearest enclosing construct.
- EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
- EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
- EXPECT_EQ(fe.GetBlockInfo(90)->construct, constructs[1].get());
- EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
+ // The block records the nearest enclosing construct.
+ EXPECT_EQ(fe.GetBlockInfo(10)->construct, constructs[0].get());
+ EXPECT_EQ(fe.GetBlockInfo(20)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(30)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(40)->construct, constructs[3].get());
+ EXPECT_EQ(fe.GetBlockInfo(90)->construct, constructs[1].get());
+ EXPECT_EQ(fe.GetBlockInfo(99)->construct, constructs[0].get());
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsLongRangeBackedge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3836,21 +3775,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsLongRangeBackedge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to default target "
- "block 10 can't be a back-edge"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to default target "
+ "block 10 can't be a back-edge"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsSelfLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3868,24 +3807,22 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsSelfLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- // Self-loop that isn't its own continue target is already rejected with a
- // different message.
- EXPECT_THAT(
- p->error(),
- Eq("Block 20 branches to itself but is not its own continue target"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ // Self-loop that isn't its own continue target is already rejected with a
+ // different message.
+ EXPECT_THAT(p->error(), Eq("Block 20 branches to itself but is not its own continue target"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultCantEscapeSwitch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3903,21 +3840,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultCantEscapeSwitch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 10 to default block 99 "
- "escapes the selection construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 10 to default block 99 "
+ "escapes the selection construct"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultForTwoSwitches_AsMerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3942,23 +3879,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultForTwoSwitches_AsMerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(),
- Eq("Block 89 is the default block for switch-selection header 10 "
- "and also the merge block for 50 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Block 89 is the default block for switch-selection header 10 "
+ "and also the merge block for 50 (violates dominance rule)"));
}
-TEST_F(SpvParserCFGTest,
- FindSwitchCaseHeaders_DefaultForTwoSwitches_AsCaseClause) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultForTwoSwitches_AsCaseClause) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -3986,21 +3921,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Block 80 is declared as the default target for "
- "two OpSwitch instructions, at blocks 10 and 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Block 80 is declared as the default target for "
+ "two OpSwitch instructions, at blocks 10 and 50"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsLongRangeBackedge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4015,21 +3950,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsLongRangeBackedge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to case target "
- "block 10 can't be a back-edge"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to case target "
+ "block 10 can't be a back-edge"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsSelfLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4044,23 +3979,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsSelfLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- // The error is caught earlier
- EXPECT_THAT(
- p->error(),
- Eq("Block 20 branches to itself but is not its own continue target"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ // The error is caught earlier
+ EXPECT_THAT(p->error(), Eq("Block 20 branches to itself but is not its own continue target"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseCanBeSwitchMerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4075,22 +4008,22 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseCanBeSwitchMerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- // TODO(crbug.com/tint/774) Re-enable after codegen bug fixed.
- p->DeliberatelyInvalidSpirv();
+ // TODO(crbug.com/tint/774) Re-enable after codegen bug fixed.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseCantEscapeSwitch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4109,21 +4042,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseCantEscapeSwitch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to case target block "
- "99 escapes the selection construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 20 to case target block "
+ "99 escapes the selection construct"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseForMoreThanOneSwitch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4145,22 +4078,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseForMoreThanOneSwitch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(),
- Eq("Block 50 is declared as the switch case target for two "
- "OpSwitch instructions, at blocks 10 and 20"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Block 50 is declared as the switch case target for two "
+ "OpSwitch instructions, at blocks 10 and 20"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsMergeForAnotherConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4185,21 +4117,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsMergeForAnotherConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 10 to case target block "
- "20 escapes the selection construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 10 to case target block "
+ "20 escapes the selection construct"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_NoSwitch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4207,26 +4139,26 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_NoSwitch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->case_head_for, nullptr);
- EXPECT_EQ(bi10->default_head_for, nullptr);
- EXPECT_FALSE(bi10->default_is_merge);
- EXPECT_EQ(bi10->case_values.get(), nullptr);
+ const auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->case_head_for, nullptr);
+ EXPECT_EQ(bi10->default_head_for, nullptr);
+ EXPECT_FALSE(bi10->default_is_merge);
+ EXPECT_EQ(bi10->case_values.get(), nullptr);
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsMerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4241,27 +4173,27 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsMerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->case_head_for, nullptr);
- ASSERT_NE(bi99->default_head_for, nullptr);
- EXPECT_EQ(bi99->default_head_for->begin_id, 10u);
- EXPECT_TRUE(bi99->default_is_merge);
- EXPECT_EQ(bi99->case_values.get(), nullptr);
+ const auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->case_head_for, nullptr);
+ ASSERT_NE(bi99->default_head_for, nullptr);
+ EXPECT_EQ(bi99->default_head_for->begin_id, 10u);
+ EXPECT_TRUE(bi99->default_is_merge);
+ EXPECT_EQ(bi99->case_values.get(), nullptr);
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsNotMerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4279,27 +4211,27 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_DefaultIsNotMerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->case_head_for, nullptr);
- ASSERT_NE(bi30->default_head_for, nullptr);
- EXPECT_EQ(bi30->default_head_for->begin_id, 10u);
- EXPECT_FALSE(bi30->default_is_merge);
- EXPECT_EQ(bi30->case_values.get(), nullptr);
+ const auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->case_head_for, nullptr);
+ ASSERT_NE(bi30->default_head_for, nullptr);
+ EXPECT_EQ(bi30->default_head_for->begin_id, 10u);
+ EXPECT_FALSE(bi30->default_is_merge);
+ EXPECT_EQ(bi30->case_values.get(), nullptr);
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsNotDefault) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4317,27 +4249,27 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsNotDefault) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- ASSERT_NE(bi20->case_head_for, nullptr);
- EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
- EXPECT_EQ(bi20->default_head_for, nullptr);
- EXPECT_FALSE(bi20->default_is_merge);
- EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200));
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ ASSERT_NE(bi20->case_head_for, nullptr);
+ EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
+ EXPECT_EQ(bi20->default_head_for, nullptr);
+ EXPECT_FALSE(bi20->default_is_merge);
+ EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsDefault) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4352,27 +4284,27 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_CaseIsDefault) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- ASSERT_NE(bi20->case_head_for, nullptr);
- EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
- EXPECT_EQ(bi20->default_head_for, bi20->case_head_for);
- EXPECT_FALSE(bi20->default_is_merge);
- EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200));
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ ASSERT_NE(bi20->case_head_for, nullptr);
+ EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
+ EXPECT_EQ(bi20->default_head_for, bi20->case_head_for);
+ EXPECT_FALSE(bi20->default_is_merge);
+ EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_ManyCasesWithSameValue_IsError) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4390,22 +4322,21 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_ManyCasesWithSameValue_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(),
- Eq("Duplicate case value 200 in OpSwitch in block 10"));
+ EXPECT_THAT(p->error(), Eq("Duplicate case value 200 in OpSwitch in block 10"));
}
TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_ManyValuesWithSameCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4420,27 +4351,27 @@ TEST_F(SpvParserCFGTest, FindSwitchCaseHeaders_ManyValuesWithSameCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- fe.RegisterMerges();
- fe.LabelControlFlowConstructs();
- EXPECT_TRUE(fe.FindSwitchCaseHeaders());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ fe.RegisterMerges();
+ fe.LabelControlFlowConstructs();
+ EXPECT_TRUE(fe.FindSwitchCaseHeaders());
- const auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- ASSERT_NE(bi20->case_head_for, nullptr);
- EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
- EXPECT_EQ(bi20->default_head_for, nullptr);
- EXPECT_FALSE(bi20->default_is_merge);
- EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200, 300));
+ const auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ ASSERT_NE(bi20->case_head_for, nullptr);
+ EXPECT_EQ(bi20->case_head_for->begin_id, 10u);
+ EXPECT_EQ(bi20->default_head_for, nullptr);
+ EXPECT_FALSE(bi20->default_is_merge);
+ EXPECT_THAT(*(bi20->case_values.get()), UnorderedElementsAre(200, 300));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BranchEscapesIfConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4465,19 +4396,17 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BranchEscapesIfConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe)) << p->error();
- // Some further processing
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 80 is an invalid exit from construct "
- "starting at block 20; branch bypasses merge block 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe)) << p->error();
+ // Some further processing
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 80 is an invalid exit from construct "
+ "starting at block 20; branch bypasses merge block 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_ReturnInContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4498,16 +4427,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_ReturnInContinueConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe)) << p->error();
- EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
- "construct starting at 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe)) << p->error();
+ EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
+ "construct starting at 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_KillInContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4528,16 +4457,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_KillInContinueConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
- "construct starting at 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
+ "construct starting at 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_UnreachableInContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4558,16 +4487,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_UnreachableInContinueConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
- "construct starting at 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid function exit at block 50 from continue "
+ "construct starting at 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_NotInContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4588,18 +4517,15 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_NotInContinueConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Invalid backedge (30->20): 30 is not in a continue construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid backedge (30->20): 30 is not in a continue construct"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_BackEdge_NotInLastBlockOfContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_NotInLastBlockOfContinueConstruct) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4623,18 +4549,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Invalid exit (50->20) from continue construct: 50 is not the "
- "last block in the continue construct starting at 50 "
- "(violates post-dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid exit (50->20) from continue construct: 50 is not the "
+ "last block in the continue construct starting at 50 "
+ "(violates post-dominance rule)"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_ToWrongHeader) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4659,16 +4584,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_ToWrongHeader) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(), Eq("Invalid backedge (50->10): does not branch to "
- "the corresponding loop header, expected 20"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid backedge (50->10): does not branch to "
+ "the corresponding loop header, expected 20"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_SingleBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4683,20 +4608,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_SingleBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(20), 1u);
- EXPECT_EQ(bi20->succ_edge[20], EdgeKind::kBack);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi20->succ_edge[20], EdgeKind::kBack);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_BackEdge_MultiBlockLoop_SingleBlockContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_BackEdge_MultiBlockLoop_SingleBlockContinueConstruct) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4717,21 +4641,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi40 = fe.GetBlockInfo(40);
- ASSERT_NE(bi40, nullptr);
- EXPECT_EQ(bi40->succ_edge.count(20), 1u);
- EXPECT_EQ(bi40->succ_edge[20], EdgeKind::kBack);
+ auto* bi40 = fe.GetBlockInfo(40);
+ ASSERT_NE(bi40, nullptr);
+ EXPECT_EQ(bi40->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi40->succ_edge[20], EdgeKind::kBack);
}
TEST_F(
SpvParserCFGTest,
ClassifyCFGEdges_BackEdge_MultiBlockLoop_MultiBlockContinueConstruct_ContinueIsNotHeader) { // NOLINT
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4755,21 +4679,21 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi50 = fe.GetBlockInfo(50);
- ASSERT_NE(bi50, nullptr);
- EXPECT_EQ(bi50->succ_edge.count(20), 1u);
- EXPECT_EQ(bi50->succ_edge[20], EdgeKind::kBack);
+ auto* bi50 = fe.GetBlockInfo(50);
+ ASSERT_NE(bi50, nullptr);
+ EXPECT_EQ(bi50->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi50->succ_edge[20], EdgeKind::kBack);
}
TEST_F(
SpvParserCFGTest,
ClassifyCFGEdges_BackEdge_MultiBlockLoop_MultiBlockContinueConstruct_ContinueIsHeader) { // NOLINT
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4793,19 +4717,19 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
- auto* bi50 = fe.GetBlockInfo(50);
- ASSERT_NE(bi50, nullptr);
- EXPECT_EQ(bi50->succ_edge.count(20), 1u);
- EXPECT_EQ(bi50->succ_edge[20], EdgeKind::kBack);
+ auto* bi50 = fe.GetBlockInfo(50);
+ ASSERT_NE(bi50, nullptr);
+ EXPECT_EQ(bi50->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi50->succ_edge[20], EdgeKind::kBack);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_PrematureExitFromContinueConstruct) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4829,19 +4753,17 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_PrematureExitFromContinueConstruct) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Invalid exit (40->99) from continue construct: 40 is not the "
- "last block in the continue construct starting at 40 "
- "(violates post-dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid exit (40->99) from continue construct: 40 is not the "
+ "last block in the continue construct starting at 40 "
+ "(violates post-dominance rule)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopHeader_SingleBlockLoop_TrueBranch) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopHeader_SingleBlockLoop_TrueBranch) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4856,22 +4778,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
- EXPECT_EQ(bi->succ_edge.count(20), 1u);
- EXPECT_EQ(bi->succ_edge[20], EdgeKind::kBack);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ EXPECT_EQ(bi->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi->succ_edge[20], EdgeKind::kBack);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopHeader_SingleBlockLoop_FalseBranch) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopHeader_SingleBlockLoop_FalseBranch) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4886,22 +4807,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
- EXPECT_EQ(bi->succ_edge.count(20), 1u);
- EXPECT_EQ(bi->succ_edge[20], EdgeKind::kBack);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ EXPECT_EQ(bi->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi->succ_edge[20], EdgeKind::kBack);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopHeader_MultiBlockLoop) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopHeader_MultiBlockLoop) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4919,20 +4839,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromContinueConstructHeader) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromContinueConstructHeader) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4950,19 +4869,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromIfHeader) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -4977,19 +4896,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromIfHeader) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kIfBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromIfThenElse) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5007,26 +4926,26 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromIfThenElse) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- // Then clause
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(99), 1u);
- EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
+ // Then clause
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
- // Else clause
- auto* bi50 = fe.GetBlockInfo(50);
- ASSERT_NE(bi50, nullptr);
- EXPECT_EQ(bi50->succ_edge.count(99), 1u);
- EXPECT_EQ(bi50->succ_edge[99], EdgeKind::kIfBreak);
+ // Else clause
+ auto* bi50 = fe.GetBlockInfo(50);
+ ASSERT_NE(bi50, nullptr);
+ EXPECT_EQ(bi50->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi50->succ_edge[99], EdgeKind::kIfBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_BypassesMerge_IsError) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5044,22 +4963,20 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_BypassesMerge_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 20 to block 99 is an invalid exit from "
- "construct starting at block 10; branch bypasses merge block 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 20 to block 99 is an invalid exit from "
+ "construct starting at block 10; branch bypasses merge block 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_EscapeSwitchCase_IsError) {
- // Code generation assumes that you can't have kCaseFallThrough and kIfBreak
- // from the same OpBranchConditional.
- // This checks one direction of that, where the IfBreak is shown it can't
- // escape a switch case.
- auto assembly = CommonTypes() + R"(
+ // Code generation assumes that you can't have kCaseFallThrough and kIfBreak
+ // from the same OpBranchConditional.
+ // This checks one direction of that, where the IfBreak is shown it can't
+ // escape a switch case.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5086,18 +5003,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_EscapeSwitchCase_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from "
- "construct starting at block 20; branch bypasses merge block 80"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from "
+ "construct starting at block 20; branch bypasses merge block 80"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchCaseDirect) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5112,19 +5027,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchCaseDirect) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchCaseBody) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5139,19 +5054,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchCaseBody) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchDefaultBody) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5169,20 +5084,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchDefaultBody) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_SwitchBreak_FromSwitchDefaultIsMerge) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromSwitchDefaultIsMerge) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5197,20 +5111,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_SwitchBreak_FromNestedIf_Unconditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromNestedIf_Unconditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5232,20 +5145,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_SwitchBreak_FromNestedIf_Conditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromNestedIf_Conditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5267,19 +5179,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kSwitchBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_BypassesMerge_IsError) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5297,19 +5209,17 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_BypassesMerge_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 20 to block 99 is an invalid exit from "
- "construct starting at block 10; branch bypasses merge block 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 20 to block 99 is an invalid exit from "
+ "construct starting at block 10; branch bypasses merge block 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromNestedLoop_IsError) {
- // It's an error because the break can only go as far as the loop.
- auto assembly = CommonTypes() + R"(
+ // It's an error because the break can only go as far as the loop.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5334,20 +5244,17 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromNestedLoop_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from "
- "construct starting at block 20; branch bypasses merge block 80"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from "
+ "construct starting at block 20; branch bypasses merge block 80"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_SwitchBreak_FromNestedSwitch_IsError) {
- // It's an error because the break can only go as far as inner switch
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_SwitchBreak_FromNestedSwitch_IsError) {
+ // It's an error because the break can only go as far as inner switch
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5369,18 +5276,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from "
- "construct starting at block 20; branch bypasses merge block 80"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from "
+ "construct starting at block 20; branch bypasses merge block 80"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBody) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5401,19 +5306,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBody) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromContinueConstructTail) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5437,19 +5342,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromContinueConstructTail) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(60);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(60);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBodyDirect) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5470,20 +5375,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBodyDirect) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopBodyNestedSelection_Unconditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBodyNestedSelection_Unconditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5511,20 +5415,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopBodyNestedSelection_Conditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBodyNestedSelection_Conditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5552,20 +5455,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(99), 1u);
- EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi->succ_edge[99], EdgeKind::kLoopBreak);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromContinueConstructNestedFlow_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromContinueConstructNestedFlow_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5596,19 +5498,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Invalid exit (50->99) from continue construct: 50 is not the "
- "last block in the continue construct starting at 40 "
- "(violates post-dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid exit (50->99) from continue construct: 50 is not the "
+ "last block in the continue construct starting at 40 "
+ "(violates post-dominance rule)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromLoopBypassesMerge_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromLoopBypassesMerge_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5632,19 +5532,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from "
- "construct starting at block 20; branch bypasses merge block 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from "
+ "construct starting at block 20; branch bypasses merge block 50"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopBreak_FromContinueBypassesMerge_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopBreak_FromContinueBypassesMerge_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5671,18 +5568,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 45 to block 99 is an invalid exit from "
- "construct starting at block 40; branch bypasses merge block 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 45 to block 99 is an invalid exit from "
+ "construct starting at block 40; branch bypasses merge block 50"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_LoopBodyToContinue) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5703,19 +5598,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_LoopBodyToContinue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5743,20 +5638,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_ConditionalFromNestedIf) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_ConditionalFromNestedIf) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5784,20 +5678,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedSwitchCaseBody_Unconditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedSwitchCaseBody_Unconditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5825,20 +5718,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedSwitchCaseDirect_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedSwitchCaseDirect_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5863,22 +5755,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_TRUE(fe.RegisterMerges());
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 30 to case target block "
- "80 escapes the selection construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_TRUE(fe.RegisterMerges());
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 30 to case target block "
+ "80 escapes the selection construct"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultDirect_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultDirect_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5903,22 +5794,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_TRUE(fe.RegisterMerges());
- EXPECT_TRUE(fe.LabelControlFlowConstructs());
- EXPECT_FALSE(fe.FindSwitchCaseHeaders());
- EXPECT_THAT(p->error(), Eq("Switch branch from block 30 to default block 80 "
- "escapes the selection construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_TRUE(fe.RegisterMerges());
+ EXPECT_TRUE(fe.LabelControlFlowConstructs());
+ EXPECT_FALSE(fe.FindSwitchCaseHeaders());
+ EXPECT_THAT(p->error(), Eq("Switch branch from block 30 to default block 80 "
+ "escapes the selection construct"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultBody_Conditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultBody_Conditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5946,21 +5836,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe)) << p->error();
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
-TEST_F(
- SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultBody_Unconditional) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedSwitchDefaultBody_Unconditional) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -5988,24 +5876,23 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(40);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(80), 1u);
- EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
+ auto* bi = fe.GetBlockInfo(40);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi->succ_edge[80], EdgeKind::kLoopContinue);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_LoopContinue_FromNestedLoopHeader_IsError) {
- // Inner loop header tries to do continue to outer loop continue target.
- // This is disallowed by the rule:
- // "a continue block is valid only for the innermost loop it is nested
- // inside of"
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_LoopContinue_FromNestedLoopHeader_IsError) {
+ // Inner loop header tries to do continue to outer loop continue target.
+ // This is disallowed by the rule:
+ // "a continue block is valid only for the innermost loop it is nested
+ // inside of"
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6039,18 +5926,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 80 is an invalid exit from construct "
- "starting at block 30; branch bypasses merge block 59"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 80 is an invalid exit from construct "
+ "starting at block 30; branch bypasses merge block 59"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_CaseTailToCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6071,20 +5956,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_CaseTailToCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(40), 1u);
- EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(40), 1u);
+ EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_CaseTailToDefaultNotMerge) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_CaseTailToDefaultNotMerge) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6105,19 +5989,19 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(40), 1u);
- EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(40), 1u);
+ EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_DefaultToCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6138,22 +6022,21 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_DefaultToCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(30);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(40), 1u);
- EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
+ auto* bi = fe.GetBlockInfo(30);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(40), 1u);
+ EXPECT_EQ(bi->succ_edge[40], EdgeKind::kCaseFallThrough);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_BranchConditionalWith_IfBreak_IsError) {
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kIfBreak.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_BranchConditionalWith_IfBreak_IsError) {
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kIfBreak.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6180,21 +6063,18 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from "
- "construct starting at block 20; branch bypasses merge block 80"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from "
+ "construct starting at block 20; branch bypasses merge block 80"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Forward_IsError) {
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kForward.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Forward_IsError) {
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kForward.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6216,26 +6096,24 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Control flow diverges at block 20 (to 25, 30) but it is not "
- "a structured header (it has no merge instruction)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Control flow diverges at block 20 (to 25, 30) but it is not "
+ "a structured header (it has no merge instruction)"));
}
-TEST_F(
- SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Back_LoopOnOutside_IsError) { // NOLINT
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kBack.
- //
- // This test has the loop on the outside. The backedge coming from a case
- // clause means the switch is inside the continue construct, and the nesting
- // of the switch's merge means the backedge is coming from a block that is not
- // at the end of the continue construct.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Back_LoopOnOutside_IsError) { // NOLINT
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kBack.
+ //
+ // This test has the loop on the outside. The backedge coming from a case
+ // clause means the switch is inside the continue construct, and the nesting
+ // of the switch's merge means the backedge is coming from a block that is not
+ // at the end of the continue construct.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6264,25 +6142,24 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Invalid exit (40->20) from continue construct: 40 is not the "
- "last block in the continue construct starting at 30 "
- "(violates post-dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid exit (40->20) from continue construct: 40 is not the "
+ "last block in the continue construct starting at 30 "
+ "(violates post-dominance rule)"));
}
TEST_F(
SpvParserCFGTest,
FindSwitchCaseSelectionHeaders_Fallthrough_BranchConditionalWith_Back_LoopOnInside_FallthroughIsMerge_IsError) { // NOLINT
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kBack.
- //
- // This test has the loop on the inside. The merge block is also the
- // fallthrough target.
- auto assembly = CommonTypes() + R"(
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kBack.
+ //
+ // This test has the loop on the inside. The merge block is also the
+ // fallthrough target.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel ; continue target and
@@ -6305,26 +6182,25 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 50, 99));
- EXPECT_THAT(p->error(),
- Eq("Block 50 is a case block for switch-selection header 10 and "
- "also the merge block for 20 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 50, 99));
+ EXPECT_THAT(p->error(), Eq("Block 50 is a case block for switch-selection header 10 and "
+ "also the merge block for 20 (violates dominance rule)"));
}
TEST_F(
SpvParserCFGTest,
ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Back_LoopOnInside_FallthroughIsNotMerge_IsError) { // NOLINT
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kBack.
- //
- // This test has the loop on the inside. The merge block is not the merge
- // target But the block order gets messed up because of the weird
- // connectivity.
- auto assembly = CommonTypes() + R"(
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kBack.
+ //
+ // This test has the loop on the inside. The merge block is not the merge
+ // target But the block order gets messed up because of the weird
+ // connectivity.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel ; continue target and
@@ -6350,24 +6226,24 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(), Eq("Branch from 10 to 50 bypasses continue target 40 "
- "(dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from 10 to 50 bypasses continue target 40 "
+ "(dominance rule violated)"));
}
TEST_F(
SpvParserCFGTest,
ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Back_LoopOnInside_NestedMerge_IsError) { // NOLINT
- // Code generation assumes OpBranchConditional can't have kCaseFallThrough
- // with kBack.
- //
- // This test has the loop on the inside. The fallthrough is an invalid exit
- // from the loop. However, the block order gets all messed up because going
- // from 40 to 50 ends up pulling in 99
- auto assembly = CommonTypes() + R"(
+ // Code generation assumes OpBranchConditional can't have kCaseFallThrough
+ // with kBack.
+ //
+ // This test has the loop on the inside. The fallthrough is an invalid exit
+ // from the loop. However, the block order gets all messed up because going
+ // from 40 to 50 ends up pulling in 99
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel ; continue target and
@@ -6393,28 +6269,27 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 50, 49, 99));
- EXPECT_THAT(p->error(), Eq("Branch from 10 to 50 bypasses continue target 40 "
- "(dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 50, 49, 99));
+ EXPECT_THAT(p->error(), Eq("Branch from 10 to 50 bypasses continue target 40 "
+ "(dominance rule violated)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_CaseNonTailToCase_TrueBranch) {
- // This is an unusual one, and is an error. Structurally it looks like this:
- // switch (val) {
- // case 0: {
- // if (cond) {
- // fallthrough;
- // }
- // something = 1;
- // }
- // case 1: { }
- // }
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_CaseNonTailToCase_TrueBranch) {
+ // This is an unusual one, and is an error. Structurally it looks like this:
+ // switch (val) {
+ // case 0: {
+ // if (cond) {
+ // fallthrough;
+ // }
+ // something = 1;
+ // }
+ // case 1: { }
+ // }
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6439,30 +6314,28 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Fallthrough_CaseNonTailToCase_FalseBranch) {
- // Like previous test, but taking the false branch.
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Fallthrough_CaseNonTailToCase_FalseBranch) {
+ // Like previous test, but taking the false branch.
- // This is an unusual one, and is an error. Structurally it looks like this:
- // switch (val) {
- // case 0: {
- // if (cond) {
- // fallthrough;
- // }
- // something = 1;
- // }
- // case 1: { }
- // }
- auto assembly = CommonTypes() + R"(
+ // This is an unusual one, and is an error. Structurally it looks like this:
+ // switch (val) {
+ // case 0: {
+ // if (cond) {
+ // fallthrough;
+ // }
+ // something = 1;
+ // }
+ // case 1: { }
+ // }
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6487,17 +6360,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_IfToThen) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6512,19 +6384,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_IfToThen) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(20), 1u);
- EXPECT_EQ(bi->succ_edge[20], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi->succ_edge[20], EdgeKind::kForward);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_IfToElse) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6539,19 +6411,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_IfToElse) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(30), 1u);
- EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(30), 1u);
+ EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_SwitchToCase) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6566,19 +6438,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_SwitchToCase) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(20), 1u);
- EXPECT_EQ(bi->succ_edge[20], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi->succ_edge[20], EdgeKind::kForward);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_SwitchToDefaultNotMerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6596,19 +6468,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_SwitchToDefaultNotMerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(30), 1u);
- EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(30), 1u);
+ EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_LoopHeadToBody) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6629,20 +6501,19 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Forward_LoopHeadToBody) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(30), 1u);
- EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(30), 1u);
+ EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_BeforeIfToSelectionInterior) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_BeforeIfToSelectionInterior) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6664,18 +6535,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_BeforeSwitchToSelectionInterior) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_BeforeSwitchToSelectionInterior) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6697,18 +6566,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from 10 to 50 bypasses header 20 (dominance rule violated)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_BeforeLoopToLoopBodyInterior) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_BeforeLoopToLoopBodyInterior) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6733,22 +6600,21 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- // Weird error, but still we caught it.
- // Preferred: Eq("Branch from 10 to 50 bypasses header 20
- // (dominance rule violated)"))
- Eq("Branch from 10 to 50 bypasses continue target 80 (dominance "
- "rule violated)"))
- << Dump(fe.block_order());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ // Weird error, but still we caught it.
+ // Preferred: Eq("Branch from 10 to 50 bypasses header 20
+ // (dominance rule violated)"))
+ Eq("Branch from 10 to 50 bypasses continue target 80 (dominance "
+ "rule violated)"))
+ << Dump(fe.block_order());
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_BeforeContinueToContinueInterior) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_BeforeContinueToContinueInterior) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6775,19 +6641,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 60 is an invalid exit from "
- "construct starting at block 20; branch bypasses continue target 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from block 30 to block 60 is an invalid exit from "
+ "construct starting at block 20; branch bypasses continue target 50"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_AfterContinueToContinueInterior) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_AfterContinueToContinueInterior) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6809,20 +6673,17 @@ TEST_F(SpvParserCFGTest,
%80 = OpLabel
OpBranch %60 ; bad branch
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 50 to block 60 is an invalid exit from "
- "construct starting at block 50; branch bypasses merge block 80"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 50 to block 60 is an invalid exit from "
+ "construct starting at block 50; branch bypasses merge block 80"));
}
-TEST_F(
- SpvParserCFGTest,
- FindSwitchCaseHeaders_DomViolation_SwitchCase_CantBeMergeForOtherConstruct) { // NOLINT
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ FindSwitchCaseHeaders_DomViolation_SwitchCase_CantBeMergeForOtherConstruct) { // NOLINT
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6844,19 +6705,16 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
- EXPECT_THAT(p->error(),
- Eq("Block 50 is a case block for switch-selection header 10 and "
- "also the merge block for 20 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq("Block 50 is a case block for switch-selection header 10 and "
+ "also the merge block for 20 (violates dominance rule)"));
}
-TEST_F(
- SpvParserCFGTest,
- ClassifyCFGEdges_DomViolation_SwitchDefault_CantBeMergeForOtherConstruct) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_DomViolation_SwitchDefault_CantBeMergeForOtherConstruct) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6878,17 +6736,16 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
- EXPECT_THAT(p->error(),
- Eq("Block 50 is the default block for switch-selection header 10 "
- "and also the merge block for 20 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindSwitchCaseHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq("Block 50 is the default block for switch-selection header 10 "
+ "and also the merge block for 20 (violates dominance rule)"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_TooManyBackedges) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6909,17 +6766,15 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_TooManyBackedges) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Invalid backedge (30->20): 30 is not in a continue construct"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Invalid backedge (30->20): 30 is not in a continue construct"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_NeededMerge_BranchConditional) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%20 = OpLabel
@@ -6936,17 +6791,16 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_NeededMerge_BranchConditional) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Control flow diverges at block 20 (to 30, 40) but it is not "
- "a structured header (it has no merge instruction)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Control flow diverges at block 20 (to 30, 40) but it is not "
+ "a structured header (it has no merge instruction)"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_NeededMerge_Switch) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -6963,20 +6817,18 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_NeededMerge_Switch) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Control flow diverges at block 10 (to 99, 20) but it is not "
- "a structured header (it has no merge instruction)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Control flow diverges at block 10 (to 99, 20) but it is not "
+ "a structured header (it has no merge instruction)"));
}
-TEST_F(SpvParserCFGTest,
- ClassifyCFGEdges_Pathological_Forward_LoopHeadSplitBody) {
- // In this case the branch-conditional in the loop header is really also a
- // selection header.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Pathological_Forward_LoopHeadSplitBody) {
+ // In this case the branch-conditional in the loop header is really also a
+ // selection header.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7000,22 +6852,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(20);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->succ_edge.count(30), 1u);
- EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
- EXPECT_EQ(bi->succ_edge.count(50), 1u);
- EXPECT_EQ(bi->succ_edge[50], EdgeKind::kForward);
+ auto* bi = fe.GetBlockInfo(20);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->succ_edge.count(30), 1u);
+ EXPECT_EQ(bi->succ_edge[30], EdgeKind::kForward);
+ EXPECT_EQ(bi->succ_edge.count(50), 1u);
+ EXPECT_EQ(bi->succ_edge[50], EdgeKind::kForward);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Pathological_Forward_Premerge) {
- // Two arms of an if-selection converge early, before the merge block
- auto assembly = CommonTypes() + R"(
+ // Two arms of an if-selection converge early, before the merge block
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7039,35 +6891,35 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Pathological_Forward_Premerge) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(50), 1u);
- EXPECT_EQ(bi20->succ_edge[50], EdgeKind::kForward);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(50), 1u);
+ EXPECT_EQ(bi20->succ_edge[50], EdgeKind::kForward);
- auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->succ_edge.count(50), 1u);
- EXPECT_EQ(bi30->succ_edge[50], EdgeKind::kForward);
+ auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->succ_edge.count(50), 1u);
+ EXPECT_EQ(bi30->succ_edge[50], EdgeKind::kForward);
- auto* bi50 = fe.GetBlockInfo(50);
- ASSERT_NE(bi50, nullptr);
- EXPECT_EQ(bi50->succ_edge.count(60), 1u);
- EXPECT_EQ(bi50->succ_edge[60], EdgeKind::kForward);
+ auto* bi50 = fe.GetBlockInfo(50);
+ ASSERT_NE(bi50, nullptr);
+ EXPECT_EQ(bi50->succ_edge.count(60), 1u);
+ EXPECT_EQ(bi50->succ_edge[60], EdgeKind::kForward);
- auto* bi60 = fe.GetBlockInfo(60);
- ASSERT_NE(bi60, nullptr);
- EXPECT_EQ(bi60->succ_edge.count(99), 1u);
- EXPECT_EQ(bi60->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi60 = fe.GetBlockInfo(60);
+ ASSERT_NE(bi60, nullptr);
+ EXPECT_EQ(bi60->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi60->succ_edge[99], EdgeKind::kIfBreak);
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Pathological_Forward_Regardless) {
- // Both arms of an OpBranchConditional go to the same target.
- auto assembly = CommonTypes() + R"(
+ // Both arms of an OpBranchConditional go to the same target.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7082,24 +6934,24 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_Pathological_Forward_Regardless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->succ_edge.count(20), 1u);
- EXPECT_EQ(bi10->succ_edge[20], EdgeKind::kForward);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->succ_edge.count(20), 1u);
+ EXPECT_EQ(bi10->succ_edge[20], EdgeKind::kForward);
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(99), 1u);
- EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_NoIf) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7107,20 +6959,20 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_NoIf) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- auto* bi = fe.GetBlockInfo(10);
- ASSERT_NE(bi, nullptr);
- EXPECT_EQ(bi->true_head, 0u);
- EXPECT_EQ(bi->false_head, 0u);
- EXPECT_EQ(bi->premerge_head, 0u);
+ auto* bi = fe.GetBlockInfo(10);
+ ASSERT_NE(bi, nullptr);
+ EXPECT_EQ(bi->true_head, 0u);
+ EXPECT_EQ(bi->false_head, 0u);
+ EXPECT_EQ(bi->premerge_head, 0u);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_ThenElse) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7138,38 +6990,38 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_ThenElse) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 20u);
- EXPECT_EQ(bi10->false_head, 30u);
- EXPECT_EQ(bi10->premerge_head, 0u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 20u);
+ EXPECT_EQ(bi10->false_head, 30u);
+ EXPECT_EQ(bi10->premerge_head, 0u);
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->true_head, 0u);
- EXPECT_EQ(bi20->false_head, 0u);
- EXPECT_EQ(bi20->premerge_head, 0u);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->true_head, 0u);
+ EXPECT_EQ(bi20->false_head, 0u);
+ EXPECT_EQ(bi20->premerge_head, 0u);
- auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->true_head, 0u);
- EXPECT_EQ(bi30->false_head, 0u);
- EXPECT_EQ(bi30->premerge_head, 0u);
+ auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->true_head, 0u);
+ EXPECT_EQ(bi30->false_head, 0u);
+ EXPECT_EQ(bi30->premerge_head, 0u);
- auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->true_head, 0u);
- EXPECT_EQ(bi99->false_head, 0u);
- EXPECT_EQ(bi99->premerge_head, 0u);
+ auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->true_head, 0u);
+ EXPECT_EQ(bi99->false_head, 0u);
+ EXPECT_EQ(bi99->premerge_head, 0u);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_IfOnly) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7184,32 +7036,32 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_IfOnly) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 30u);
- EXPECT_EQ(bi10->false_head, 0u);
- EXPECT_EQ(bi10->premerge_head, 0u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 30u);
+ EXPECT_EQ(bi10->false_head, 0u);
+ EXPECT_EQ(bi10->premerge_head, 0u);
- auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->true_head, 0u);
- EXPECT_EQ(bi30->false_head, 0u);
- EXPECT_EQ(bi30->premerge_head, 0u);
+ auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->true_head, 0u);
+ EXPECT_EQ(bi30->false_head, 0u);
+ EXPECT_EQ(bi30->premerge_head, 0u);
- auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->true_head, 0u);
- EXPECT_EQ(bi99->false_head, 0u);
- EXPECT_EQ(bi99->premerge_head, 0u);
+ auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->true_head, 0u);
+ EXPECT_EQ(bi99->false_head, 0u);
+ EXPECT_EQ(bi99->premerge_head, 0u);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_ElseOnly) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7224,32 +7076,32 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_ElseOnly) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 0u);
- EXPECT_EQ(bi10->false_head, 30u);
- EXPECT_EQ(bi10->premerge_head, 0u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 0u);
+ EXPECT_EQ(bi10->false_head, 30u);
+ EXPECT_EQ(bi10->premerge_head, 0u);
- auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->true_head, 0u);
- EXPECT_EQ(bi30->false_head, 0u);
- EXPECT_EQ(bi30->premerge_head, 0u);
+ auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->true_head, 0u);
+ EXPECT_EQ(bi30->false_head, 0u);
+ EXPECT_EQ(bi30->premerge_head, 0u);
- auto* bi99 = fe.GetBlockInfo(99);
- ASSERT_NE(bi99, nullptr);
- EXPECT_EQ(bi99->true_head, 0u);
- EXPECT_EQ(bi99->false_head, 0u);
- EXPECT_EQ(bi99->premerge_head, 0u);
+ auto* bi99 = fe.GetBlockInfo(99);
+ ASSERT_NE(bi99, nullptr);
+ EXPECT_EQ(bi99->true_head, 0u);
+ EXPECT_EQ(bi99->false_head, 0u);
+ EXPECT_EQ(bi99->premerge_head, 0u);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Regardless) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7267,22 +7119,22 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Regardless) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 99));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 20u);
- EXPECT_EQ(bi10->false_head, 20u);
- EXPECT_EQ(bi10->premerge_head, 0u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 20u);
+ EXPECT_EQ(bi10->false_head, 20u);
+ EXPECT_EQ(bi10->premerge_head, 0u);
}
TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Premerge_Simple) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7303,23 +7155,22 @@ TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Premerge_Simple) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 20u);
- EXPECT_EQ(bi10->false_head, 30u);
- EXPECT_EQ(bi10->premerge_head, 80u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 20u);
+ EXPECT_EQ(bi10->false_head, 30u);
+ EXPECT_EQ(bi10->premerge_head, 80u);
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_Premerge_ThenDirectToElse) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Premerge_ThenDirectToElse) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7340,23 +7191,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 20u);
- EXPECT_EQ(bi10->false_head, 30u);
- EXPECT_EQ(bi10->premerge_head, 30u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 20u);
+ EXPECT_EQ(bi10->false_head, 30u);
+ EXPECT_EQ(bi10->premerge_head, 30u);
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_Premerge_ElseDirectToThen) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Premerge_ElseDirectToThen) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7377,23 +7227,22 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 80, 99));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 30, 20, 80, 99));
- auto* bi10 = fe.GetBlockInfo(10);
- ASSERT_NE(bi10, nullptr);
- EXPECT_EQ(bi10->true_head, 20u);
- EXPECT_EQ(bi10->false_head, 30u);
- EXPECT_EQ(bi10->premerge_head, 20u);
+ auto* bi10 = fe.GetBlockInfo(10);
+ ASSERT_NE(bi10, nullptr);
+ EXPECT_EQ(bi10->true_head, 20u);
+ EXPECT_EQ(bi10->false_head, 30u);
+ EXPECT_EQ(bi10->premerge_head, 20u);
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_Premerge_MultiCandidate_IsError) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_Premerge_MultiCandidate_IsError) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7420,19 +7269,18 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- // Error out sooner in the flow
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(p->error(),
- Eq("Control flow diverges at block 20 (to 70, 80) but it is not "
- "a structured header (it has no merge instruction)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ // Error out sooner in the flow
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Control flow diverges at block 20 (to 70, 80) but it is not "
+ "a structured header (it has no merge instruction)"));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromThen_ForwardWithinThen) {
- // SPIR-V allows this unusual configuration.
- auto assembly = CommonTypes() + R"(
+ // SPIR-V allows this unusual configuration.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7449,25 +7297,25 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromThen_ForwardWithinThen) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 99));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 80, 99));
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(80), 1u);
- EXPECT_EQ(bi20->succ_edge[80], EdgeKind::kForward);
- EXPECT_EQ(bi20->succ_edge.count(99), 1u);
- EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi20->succ_edge[80], EdgeKind::kForward);
+ EXPECT_EQ(bi20->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
- EXPECT_THAT(p->error(), Eq(""));
+ EXPECT_THAT(p->error(), Eq(""));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromElse_ForwardWithinElse) {
- // SPIR-V allows this unusual configuration.
- auto assembly = CommonTypes() + R"(
+ // SPIR-V allows this unusual configuration.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7487,24 +7335,24 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_FromElse_ForwardWithinElse) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
- auto* bi30 = fe.GetBlockInfo(30);
- ASSERT_NE(bi30, nullptr);
- EXPECT_EQ(bi30->succ_edge.count(80), 1u);
- EXPECT_EQ(bi30->succ_edge[80], EdgeKind::kForward);
- EXPECT_EQ(bi30->succ_edge.count(99), 1u);
- EXPECT_EQ(bi30->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi30 = fe.GetBlockInfo(30);
+ ASSERT_NE(bi30, nullptr);
+ EXPECT_EQ(bi30->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi30->succ_edge[80], EdgeKind::kForward);
+ EXPECT_EQ(bi30->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi30->succ_edge[99], EdgeKind::kIfBreak);
- EXPECT_THAT(p->error(), Eq(""));
+ EXPECT_THAT(p->error(), Eq(""));
}
TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_WithForwardToPremerge) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7524,30 +7372,30 @@ TEST_F(SpvParserCFGTest, ClassifyCFGEdges_IfBreak_WithForwardToPremerge) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 30, 80, 99));
- auto* bi20 = fe.GetBlockInfo(20);
- ASSERT_NE(bi20, nullptr);
- EXPECT_EQ(bi20->succ_edge.count(80), 1u);
- EXPECT_EQ(bi20->succ_edge[80], EdgeKind::kForward);
- EXPECT_EQ(bi20->succ_edge.count(99), 1u);
- EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
+ auto* bi20 = fe.GetBlockInfo(20);
+ ASSERT_NE(bi20, nullptr);
+ EXPECT_EQ(bi20->succ_edge.count(80), 1u);
+ EXPECT_EQ(bi20->succ_edge[80], EdgeKind::kForward);
+ EXPECT_EQ(bi20->succ_edge.count(99), 1u);
+ EXPECT_EQ(bi20->succ_edge[99], EdgeKind::kIfBreak);
- EXPECT_THAT(p->error(), Eq(""));
+ EXPECT_THAT(p->error(), Eq(""));
- // TODO(crbug.com/tint/775): The SPIR-V reader errors out on this case.
- // Remove this when it's fixed.
- p->DeliberatelyInvalidSpirv();
+ // TODO(crbug.com/tint/775): The SPIR-V reader errors out on this case.
+ // Remove this when it's fixed.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(
- SpvParserCFGTest,
- FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBeTrueHeader) { // NOLINT - line length
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBeTrueHeader) { // NOLINT -
+ // line length
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7569,20 +7417,20 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Block 40 is the true branch for if-selection header 10 and also the "
- "merge block for header block 20 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Block 40 is the true branch for if-selection header 10 and also the "
+ "merge block for header block 20 (violates dominance rule)"));
}
TEST_F(
SpvParserCFGTest,
- FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBeFalseHeader) { // NOLINT - line length
- auto assembly = CommonTypes() + R"(
+ FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBeFalseHeader) { // NOLINT - line
+ // length
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7604,20 +7452,17 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Block 40 is the false branch for if-selection header 10 and also the "
- "merge block for header block 20 (violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(),
+ Eq("Block 40 is the false branch for if-selection header 10 and also the "
+ "merge block for header block 20 (violates dominance rule)"));
}
-TEST_F(
- SpvParserCFGTest,
- FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBePremerge) {
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_DomViolation_InteriorMerge_CantBePremerge) {
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel ; outer if-header
@@ -7645,21 +7490,19 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(),
- Eq("Block 70 is the merge block for 50 but has alternate paths "
- "reaching it, starting from blocks 20 and 50 which are the "
- "true and false branches for the if-selection header block 10 "
- "(violates dominance rule)"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq("Block 70 is the merge block for 50 but has alternate paths "
+ "reaching it, starting from blocks 20 and 50 which are the "
+ "true and false branches for the if-selection header block 10 "
+ "(violates dominance rule)"));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_TrueBranch_LoopBreak_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_TrueBranch_LoopBreak_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -7687,17 +7530,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_TrueBranch_LoopContinue_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_TrueBranch_LoopContinue_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -7725,17 +7567,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_TrueBranch_SwitchBreak_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_TrueBranch_SwitchBreak_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7757,17 +7598,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_FalseBranch_LoopBreak_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_FalseBranch_LoopBreak_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -7795,17 +7635,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_FalseBranch_LoopContinue_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_FalseBranch_LoopContinue_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -7833,17 +7672,16 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
-TEST_F(SpvParserCFGTest,
- FindIfSelectionInternalHeaders_FalseBranch_SwitchBreak_Ok) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, FindIfSelectionInternalHeaders_FalseBranch_SwitchBreak_Ok) {
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7865,17 +7703,17 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
- EXPECT_THAT(p->error(), Eq(""));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(FlowFindIfSelectionInternalHeaders(&fe));
+ EXPECT_THAT(p->error(), Eq(""));
}
TEST_F(SpvParserCFGTest, EmitBody_IfBreak_FromThen_ForwardWithinThen) {
- // Exercises the hard case where we a single OpBranchConditional has both
- // IfBreak and Forward edges, within the true-branch clause.
- auto assembly = CommonTypes() + R"(
+ // Exercises the hard case where we a single OpBranchConditional has both
+ // IfBreak and Forward edges, within the true-branch clause.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7900,13 +7738,13 @@ TEST_F(SpvParserCFGTest, EmitBody_IfBreak_FromThen_ForwardWithinThen) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
var guard10 : bool = true;
if (false) {
var_1 = 2u;
@@ -7926,13 +7764,13 @@ if (false) {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_IfBreak_FromElse_ForwardWithinElse) {
- // Exercises the hard case where we a single OpBranchConditional has both
- // IfBreak and Forward edges, within the false-branch clause.
- auto assembly = CommonTypes() + R"(
+ // Exercises the hard case where we a single OpBranchConditional has both
+ // IfBreak and Forward edges, within the false-branch clause.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -7957,13 +7795,13 @@ TEST_F(SpvParserCFGTest, EmitBody_IfBreak_FromElse_ForwardWithinElse) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
var guard10 : bool = true;
if (false) {
var_1 = 2u;
@@ -7983,16 +7821,15 @@ if (false) {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_IfBreak_FromThenWithForward_FromElseWithForward_AlsoPremerge) {
- // This is a combination of the previous two, but also adding a premerge.
- // We have IfBreak and Forward edges from the same OpBranchConditional, and
- // this occurs in the true-branch clause, the false-branch clause, and within
- // the premerge clause. Flow guards have to be sprinkled in lots of places.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_IfBreak_FromThenWithForward_FromElseWithForward_AlsoPremerge) {
+ // This is a combination of the previous two, but also adding a premerge.
+ // We have IfBreak and Forward edges from the same OpBranchConditional, and
+ // this occurs in the true-branch clause, the false-branch clause, and within
+ // the premerge clause. Flow guards have to be sprinkled in lots of places.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8029,13 +7866,13 @@ TEST_F(SpvParserCFGTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error() << assembly;
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error() << assembly;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
var guard10 : bool = true;
if (false) {
var_1 = 2u;
@@ -8071,14 +7908,14 @@ if (guard10) {
var_1 = 8u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, BlockIsContinueForMoreThanOneHeader) {
- // This is disallowed by the rule:
- // "a continue block is valid only for the innermost loop it is nested
- // inside of"
- auto assembly = CommonTypes() + R"(
+ // This is disallowed by the rule:
+ // "a continue block is valid only for the innermost loop it is nested
+ // inside of"
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8099,19 +7936,19 @@ TEST_F(SpvParserCFGTest, BlockIsContinueForMoreThanOneHeader) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- fe.RegisterBasicBlocks();
- fe.ComputeBlockOrderAndPositions();
- EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
- EXPECT_FALSE(fe.RegisterMerges());
- EXPECT_THAT(p->error(), Eq("Block 50 declared as continue target for more "
- "than one header: 20, 50"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ fe.RegisterBasicBlocks();
+ fe.ComputeBlockOrderAndPositions();
+ EXPECT_TRUE(fe.VerifyHeaderContinueMergeOrder());
+ EXPECT_FALSE(fe.RegisterMerges());
+ EXPECT_THAT(p->error(), Eq("Block 50 declared as continue target for more "
+ "than one header: 20, 50"));
}
TEST_F(SpvParserCFGTest, EmitBody_If_Empty) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8122,21 +7959,21 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Empty) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Then_NoElse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8154,24 +7991,24 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Then_NoElse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
var_1 = 1u;
}
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_NoThen_Else) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8189,13 +8026,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_NoThen_Else) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
} else {
var_1 = 1u;
@@ -8203,11 +8040,11 @@ if (false) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Then_Else) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8229,13 +8066,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Then_Else) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
var_1 = 1u;
} else {
@@ -8244,14 +8081,14 @@ if (false) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Then_Else_Premerge) {
- // TODO(dneto): This should get an extra if(true) around
- // the premerge code.
- // See https://bugs.chromium.org/p/tint/issues/detail?id=82
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // TODO(dneto): This should get an extra if(true) around
+ // the premerge code.
+ // See https://bugs.chromium.org/p/tint/issues/detail?id=82
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8277,13 +8114,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Then_Else_Premerge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
var_1 = 1u;
} else {
@@ -8295,12 +8132,12 @@ if (true) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Then_Premerge) {
- // The premerge *is* the else.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The premerge *is* the else.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8322,13 +8159,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Then_Premerge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
var_1 = 1u;
}
@@ -8338,12 +8175,12 @@ if (true) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Else_Premerge) {
- // The premerge *is* the then-clause.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The premerge *is* the then-clause.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8365,13 +8202,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Else_Premerge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
} else {
var_1 = 1u;
@@ -8382,11 +8219,11 @@ if (true) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_If_Nest_If) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8426,13 +8263,13 @@ TEST_F(SpvParserCFGTest, EmitBody_If_Nest_If) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
var_1 = 1u;
if (true) {
@@ -8450,11 +8287,11 @@ if (false) {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_TrueBackedge) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8472,13 +8309,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_TrueBackedge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (false) {
@@ -8489,11 +8326,11 @@ loop {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_FalseBackedge) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8511,13 +8348,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_FalseBackedge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (false) {
@@ -8527,11 +8364,11 @@ loop {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_BothBackedge) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8549,24 +8386,24 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_BothBackedge) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
}
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_UnconditionalBackege) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8584,24 +8421,24 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_SingleBlock_UnconditionalBackege) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
}
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_SingleBlockContinue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8627,13 +8464,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_SingleBlockContinue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -8645,11 +8482,11 @@ loop {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_MultiBlockContinue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8679,13 +8516,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_MultiBlockContinue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -8698,11 +8535,11 @@ loop {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_ContinueNestIf) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8737,13 +8574,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_Unconditional_Body_ContinueNestIf) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -8759,12 +8596,12 @@ loop {
var_1 = 999u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_MultiBlockContinueIsEntireLoop) {
- // Test case where both branches exit. e.g both go to merge.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // Test case where both branches exit. e.g both go to merge.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8786,12 +8623,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_MultiBlockContinueIsEntireLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -8802,12 +8639,12 @@ loop {
var_1 = 3u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_Never) {
- // Test case where both branches exit. e.g both go to merge.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // Test case where both branches exit. e.g both go to merge.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8828,12 +8665,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_Never) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
break;
@@ -8844,19 +8681,19 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_Never) {
var_1 = 3u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_HeaderBreakAndContinue) {
- // Header block branches to merge, and to an outer continue.
- // This is disallowed by the rule:
- // "a continue block is valid only for the innermost loop it is nested
- // inside of"
- // See test ClassifyCFGEdges_LoopContinue_FromNestedLoopHeader_IsError
+ // Header block branches to merge, and to an outer continue.
+ // This is disallowed by the rule:
+ // "a continue block is valid only for the innermost loop it is nested
+ // inside of"
+ // See test ClassifyCFGEdges_LoopContinue_FromNestedLoopHeader_IsError
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_TrueToBody_FalseBreaks) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8881,12 +8718,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_TrueToBody_FalseBreaks) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
} else {
@@ -8901,11 +8738,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_TrueToBody_FalseBreaks) {
var_1 = 4u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_FalseToBody_TrueBreaks) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8930,12 +8767,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_FalseToBody_TrueBreaks) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
} else {
@@ -8950,12 +8787,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_FalseToBody_TrueBreaks) {
var_1 = 4u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_NestedIfContinue) {
- // By construction, it has to come from nested code.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // By construction, it has to come from nested code.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -8986,12 +8823,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_NestedIfContinue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
var_1 = 1u;
continue;
@@ -9004,11 +8841,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_NestedIfContinue) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyAlwaysBreaks) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9031,13 +8868,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyAlwaysBreaks) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
break;
@@ -9047,13 +8884,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyAlwaysBreaks) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue) {
- // The else-branch has a continue but it's skipped because it's from a
- // block that immediately precedes the continue construct.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The else-branch has a continue but it's skipped because it's from a
+ // block that immediately precedes the continue construct.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9076,13 +8913,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
break;
@@ -9094,13 +8931,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromFalse) {
- // The else-branch has a continue but it's skipped because it's from a
- // block that immediately precedes the continue construct.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The else-branch has a continue but it's skipped because it's from a
+ // block that immediately precedes the continue construct.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9123,13 +8960,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromFalse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
} else {
@@ -9142,11 +8979,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromFalse) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue_Early) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9173,13 +9010,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue_Early) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
break;
@@ -9192,12 +9029,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromTrue_Early) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_Loop_BodyConditionallyBreaks_FromFalse_Early) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_Loop_BodyConditionallyBreaks_FromFalse_Early) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9224,13 +9060,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
if (false) {
} else {
@@ -9244,11 +9080,11 @@ TEST_F(SpvParserCFGTest,
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_NoCases) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9262,13 +9098,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_NoCases) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
default: {
}
@@ -9276,12 +9112,12 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
// First do no special control flow: no fallthroughs, breaks, continues.
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_OneCase) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9299,13 +9135,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_OneCase) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -9316,11 +9152,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_TwoCases) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9342,13 +9178,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_TwoCases) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 30u: {
var_1 = 30u;
@@ -9362,11 +9198,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_CasesWithDup) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9388,13 +9224,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsMerge_CasesWithDup) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 30u: {
var_1 = 30u;
@@ -9408,13 +9244,13 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsCase_NoDupCases) {
- // The default block is not the merge block. But not the same as a case
- // either.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The default block is not the merge block. But not the same as a case
+ // either.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9440,13 +9276,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsCase_NoDupCases) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 40u: {
var_1 = 40u;
@@ -9461,14 +9297,14 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsCase_WithDupCase) {
- // The default block is not the merge block and is the same as a case.
- // We emit the default case separately, but just before the labeled
- // case, and with a fallthrough.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // The default block is not the merge block and is the same as a case.
+ // We emit the default case separately, but just before the labeled
+ // case, and with a fallthrough.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9494,13 +9330,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_DefaultIsCase_WithDupCase) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 40u: {
var_1 = 40u;
@@ -9518,11 +9354,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_Case_SintValue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9549,21 +9385,21 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_Case_SintValue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
-switch(42) {
- case -294967296: {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
+switch(42i) {
+ case -294967296i: {
var_1 = 40u;
}
- case 2000000000: {
+ case 2000000000i: {
var_1 = 30u;
}
- case 20: {
+ case 20i: {
var_1 = 20u;
}
default: {
@@ -9572,11 +9408,11 @@ switch(42) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Switch_Case_UintValue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9602,13 +9438,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Switch_Case_UintValue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 50u: {
var_1 = 40u;
@@ -9625,11 +9461,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Return_TopLevel) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9637,19 +9473,19 @@ TEST_F(SpvParserCFGTest, EmitBody_Return_TopLevel) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(return;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Return_InsideIf) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9664,22 +9500,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Return_InsideIf) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
return;
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Return_InsideLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9700,22 +9536,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Return_InsideLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
return;
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_TopLevel) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%200 = OpFunction %uint None %uintfn
%210 = OpLabel
@@ -9731,19 +9567,19 @@ TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_TopLevel) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(return 2u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(return 2u;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_InsideIf) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%200 = OpFunction %uint None %uintfn
%210 = OpLabel
@@ -9767,22 +9603,22 @@ TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_InsideIf) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
return 2u;
}
return 3u;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_Loop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%200 = OpFunction %uint None %uintfn
%210 = OpLabel
@@ -9812,22 +9648,22 @@ TEST_F(SpvParserCFGTest, EmitBody_ReturnValue_Loop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
return 2u;
}
return 3u;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Kill_TopLevel) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9835,19 +9671,19 @@ TEST_F(SpvParserCFGTest, EmitBody_Kill_TopLevel) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(discard;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(discard;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Kill_InsideIf) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9862,22 +9698,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Kill_InsideIf) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
discard;
}
discard;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Kill_InsideLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9898,22 +9734,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Kill_InsideLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
discard;
}
discard;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Unreachable_TopLevel) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9921,19 +9757,19 @@ TEST_F(SpvParserCFGTest, EmitBody_Unreachable_TopLevel) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(return;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InsideIf) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9948,22 +9784,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InsideIf) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
return;
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InsideLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -9984,22 +9820,22 @@ TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InsideLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
return;
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InNonVoidFunction) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%200 = OpFunction %uint None %uintfn
%210 = OpLabel
@@ -10015,19 +9851,19 @@ TEST_F(SpvParserCFGTest, EmitBody_Unreachable_InNonVoidFunction) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(return 0u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(return 0u;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_BackEdge_MultiBlockLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10046,13 +9882,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_BackEdge_MultiBlockLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
continuing {
var_1 = 1u;
@@ -10060,11 +9896,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_BackEdge_MultiBlockLoop) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_BackEdge_SingleBlockLoop) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10080,24 +9916,24 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_BackEdge_SingleBlockLoop) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_SwitchBreak_LastInCase) {
- // When the break is last in its case, we omit it because it's implicit in
- // WGSL.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // When the break is last in its case, we omit it because it's implicit in
+ // WGSL.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10115,13 +9951,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_SwitchBreak_LastInCase) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -10132,12 +9968,12 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_SwitchBreak_NotLastInCase) {
- // When the break is not last in its case, we must emit a 'break'
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // When the break is not last in its case, we must emit a 'break'
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10164,13 +10000,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_SwitchBreak_NotLastInCase) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -10186,11 +10022,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopBreak_MultiBlockLoop_FromBody) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10213,13 +10049,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopBreak_MultiBlockLoop_FromBody) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
break;
@@ -10229,15 +10065,14 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopBreak_MultiBlockLoop_FromBody) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(
- SpvParserCFGTest,
- EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructConditional) {
- // This case is invalid because the backedge block doesn't post-dominate the
- // continue target.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructConditional) {
+ // This case is invalid because the backedge block doesn't post-dominate the
+ // continue target.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10262,19 +10097,19 @@ TEST_F(
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_THAT(p->error(),
- Eq("Invalid exit (40->99) from continue construct: 40 is not the "
- "last block in the continue construct starting at 30 "
- "(violates post-dominance rule)"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_THAT(p->error(), Eq("Invalid exit (40->99) from continue construct: 40 is not the "
+ "last block in the continue construct starting at 30 "
+ "(violates post-dominance rule)"));
}
-TEST_F(
- SpvParserCFGTest,
- EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructEnd_Unconditional) { // NOLINT - line length
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest,
+ EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructEnd_Unconditional) { // NOLINT
+ // - line
+ // length
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10297,14 +10132,14 @@ TEST_F(
OpFunctionEnd
)"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
continuing {
var_1 = 1u;
@@ -10313,13 +10148,14 @@ TEST_F(
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(
SpvParserCFGTest,
- EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructEnd_Conditional) { // NOLINT - line length
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ EmitBody_Branch_LoopBreak_MultiBlockLoop_FromContinueConstructEnd_Conditional) { // NOLINT -
+ // line length
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10338,12 +10174,12 @@ TEST_F(
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
continuing {
var_1 = 1u;
@@ -10355,11 +10191,11 @@ TEST_F(
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_LastInLoopConstruct) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10382,12 +10218,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_LastInLoopConstruct) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var_1 = 1u;
continuing {
@@ -10396,12 +10232,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_LastInLoopConstruct) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_BeforeLast) {
- // By construction, it has to come from nested code.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // By construction, it has to come from nested code.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10432,12 +10268,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_BeforeLast) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
var_1 = 1u;
continue;
@@ -10450,11 +10286,11 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_BeforeLast) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_FromSwitch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10489,12 +10325,12 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_LoopContinue_FromSwitch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
loop {
var_1 = 2u;
var_1 = 3u;
@@ -10515,12 +10351,12 @@ loop {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_IfBreak_FromThen) {
- // When unconditional, the if-break must be last in the then clause.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // When unconditional, the if-break must be last in the then clause.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10537,23 +10373,23 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_IfBreak_FromThen) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
var_1 = 1u;
}
var_1 = 2u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_IfBreak_FromElse) {
- // When unconditional, the if-break must be last in the else clause.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // When unconditional, the if-break must be last in the else clause.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10570,23 +10406,23 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_IfBreak_FromElse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (false) {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (false) {
} else {
var_1 = 1u;
}
var_1 = 2u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_Fallthrough) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10608,13 +10444,13 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_Fallthrough) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -10629,11 +10465,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_Branch_Forward) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10646,16 +10482,16 @@ TEST_F(SpvParserCFGTest, EmitBody_Branch_Forward) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
var_1 = 2u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
// Test matrix for normal OpBranchConditional:
@@ -10727,7 +10563,7 @@ return;
// kForward: dup general case
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_SingleBlock_Back) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10745,24 +10581,23 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_SingleBlock_Back) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
}
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Back_SingleBlock_LoopBreak_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_SingleBlock_LoopBreak_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10780,12 +10615,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (false) {
@@ -10795,12 +10630,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Back_SingleBlock_LoopBreak_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_SingleBlock_LoopBreak_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10818,12 +10652,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (false) {
@@ -10834,12 +10668,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Back_MultiBlock_LoopBreak_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_MultiBlock_LoopBreak_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10860,12 +10693,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
@@ -10878,12 +10711,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Back_MultiBlock_LoopBreak_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Back_MultiBlock_LoopBreak_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10904,12 +10736,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
@@ -10923,14 +10755,13 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_SwitchBreak_LastInCase) {
- // When the break is last in its case, we omit it because it's implicit in
- // WGSL.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_SwitchBreak_LastInCase) {
+ // When the break is last in its case, we omit it because it's implicit in
+ // WGSL.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10948,13 +10779,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -10965,13 +10796,12 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_SwitchBreak_NotLastInCase) {
- // When the break is not last in its case, we must emit a 'break'
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_SwitchBreak_NotLastInCase) {
+ // When the break is not last in its case, we must emit a 'break'
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -10998,13 +10828,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -11020,12 +10850,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Continue_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Continue_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11060,13 +10889,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
loop {
var_1 = 2u;
var_1 = 3u;
@@ -11089,12 +10918,11 @@ loop {
var_1 = 8u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Continue_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Continue_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11129,13 +10957,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
loop {
var_1 = 2u;
var_1 = 3u;
@@ -11159,12 +10987,11 @@ loop {
var_1 = 8u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Forward_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Forward_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11186,12 +11013,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -11207,12 +11034,11 @@ switch(42u) {
var_1 = 8u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Forward_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Forward_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11234,12 +11060,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -11254,12 +11080,11 @@ switch(42u) {
var_1 = 8u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Fallthrough_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Fallthrough_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11281,13 +11106,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -11306,12 +11131,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_SwitchBreak_Fallthrough_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_SwitchBreak_Fallthrough_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11333,13 +11157,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -11357,12 +11181,11 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_LoopBreak_SingleBlock_LoopBreak) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_SingleBlock_LoopBreak) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11384,12 +11207,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
break;
@@ -11401,12 +11224,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_LoopBreak_MultiBlock_LoopBreak) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_MultiBlock_LoopBreak) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11432,12 +11254,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11450,11 +11272,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Continue_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11492,12 +11314,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Continue_OnTrue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (true) {
@@ -11517,12 +11339,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_LoopBreak_Continue_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Continue_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11560,12 +11381,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
if (true) {
@@ -11585,13 +11406,12 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_LoopBreak_Fallthrough_IsError) {
- // It's an error because switch break conflicts with loop break.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Fallthrough_IsError) {
+ // It's an error because switch break conflicts with loop break.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11629,17 +11449,15 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 40 to block 99 is an invalid exit from construct "
- "starting at block 30; branch bypasses merge block 79"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_THAT(p->error(), Eq("Branch from block 40 to block 99 is an invalid exit from construct "
+ "starting at block 30; branch bypasses merge block 79"));
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Forward_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11670,12 +11488,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Forward_OnTrue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11692,11 +11510,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Forward_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11727,12 +11545,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopBreak_Forward_OnFalse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11748,12 +11566,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Continue_Continue_FromHeader) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Continue_FromHeader) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11775,12 +11592,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
@@ -11791,12 +11608,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Continue_Continue_AfterHeader_Unconditional) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Continue_AfterHeader_Unconditional) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11822,12 +11638,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11839,14 +11655,13 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Continue_Continue_AfterHeader_Conditional) {
- // Create an intervening block so we actually require a "continue" statement
- // instead of just an adjacent fallthrough to the continue target.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Continue_AfterHeader_Conditional) {
+ // Create an intervening block so we actually require a "continue" statement
+ // instead of just an adjacent fallthrough to the continue target.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11881,12 +11696,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11903,14 +11718,14 @@ loop {
var_1 = 6u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(
SpvParserCFGTest,
EmitBody_BranchConditional_Continue_Continue_AfterHeader_Conditional_EmptyContinuing) { // NOLINT
- // Like the previous tests, but with an empty continuing clause.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // Like the previous tests, but with an empty continuing clause.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -11945,12 +11760,12 @@ TEST_F(
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -11963,11 +11778,11 @@ loop {
var_1 = 6u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopContinue_FromSwitch) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12002,12 +11817,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_LoopContinue_FromSwitch) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
loop {
var_1 = 2u;
var_1 = 3u;
@@ -12028,11 +11843,11 @@ loop {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_IfBreak_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12068,12 +11883,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_IfBreak_OnTrue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12093,11 +11908,11 @@ loop {
var_1 = 6u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_IfBreak_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12133,12 +11948,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_IfBreak_OnFalse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12157,12 +11972,11 @@ loop {
var_1 = 6u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Continue_Fallthrough_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Fallthrough_OnTrue) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12201,12 +12015,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12234,12 +12048,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Continue_Fallthrough_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Fallthrough_OnFalse) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12278,12 +12091,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12310,11 +12123,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Forward_OnTrue) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12345,12 +12158,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Forward_OnTrue) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12367,11 +12180,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Forward_OnFalse) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12402,12 +12215,12 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Continue_Forward_OnFalse) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
loop {
var_1 = 1u;
var_1 = 2u;
@@ -12423,11 +12236,11 @@ loop {
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_IfBreak_IfBreak_Same) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12445,23 +12258,22 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_IfBreak_IfBreak_Same) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 0u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 0u;
if (false) {
}
var_1 = 5u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_IfBreak_IfBreak_DifferentIsError) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_IfBreak_IfBreak_DifferentIsError) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12487,18 +12299,15 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from block 30 to block 99 is an invalid exit from construct "
- "starting at block 20; branch bypasses merge block 89"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(FlowClassifyCFGEdges(&fe));
+ EXPECT_THAT(p->error(), Eq("Branch from block 30 to block 99 is an invalid exit from construct "
+ "starting at block 20; branch bypasses merge block 89"));
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Fallthrough_Fallthrough_Same) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Fallthrough_Fallthrough_Same) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12520,13 +12329,13 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
switch(42u) {
case 20u: {
var_1 = 20u;
@@ -12541,14 +12350,13 @@ switch(42u) {
var_1 = 7u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Fallthrough_NotLastInCase_IsError) {
- // See also
- // ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Forward_IsError.
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Fallthrough_NotLastInCase_IsError) {
+ // See also
+ // ClassifyCFGEdges_Fallthrough_BranchConditionalWith_Forward_IsError.
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12573,19 +12381,18 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- // The weird forward branch pulls in 40 as part of the selection rather than
- // as a case.
- EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 30, 39, 99));
- EXPECT_THAT(
- p->error(),
- Eq("Branch from 10 to 40 bypasses header 20 (dominance rule violated)"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ // The weird forward branch pulls in 40 as part of the selection rather than
+ // as a case.
+ EXPECT_THAT(fe.block_order(), ElementsAre(10, 20, 40, 30, 39, 99));
+ EXPECT_THAT(p->error(),
+ Eq("Branch from 10 to 40 bypasses header 20 (dominance rule violated)"));
}
TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Forward_Forward_Same) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12598,21 +12405,20 @@ TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Forward_Forward_Same) {
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 1u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 1u;
var_1 = 2u;
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
-TEST_F(SpvParserCFGTest,
- EmitBody_BranchConditional_Forward_Forward_Different_IsError) {
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_BranchConditional_Forward_Forward_Different_IsError) {
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12627,16 +12433,15 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- Eq("Control flow diverges at block 10 (to 20, 99) but it is not "
- "a structured header (it has no merge instruction)"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), Eq("Control flow diverges at block 10 (to 20, 99) but it is not "
+ "a structured header (it has no merge instruction)"));
}
TEST_F(SpvParserCFGTest, Switch_NotAsSelectionHeader_Simple) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12647,20 +12452,17 @@ TEST_F(SpvParserCFGTest, Switch_NotAsSelectionHeader_Simple) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("invalid structured control flow: found an OpSwitch that "
- "is not preceded by an OpSelectionMerge:"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("invalid structured control flow: found an OpSwitch that "
+ "is not preceded by an OpSelectionMerge:"));
}
-TEST_F(SpvParserCFGTest,
- Switch_NotAsSelectionHeader_NonDefaultBranchesAreContinue) {
- // Adapted from SPIRV-Tools test MissingMergeOneUnseenTargetSwitchBad
- auto p = parser(test::Assemble(CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, Switch_NotAsSelectionHeader_NonDefaultBranchesAreContinue) {
+ // Adapted from SPIRV-Tools test MissingMergeOneUnseenTargetSwitchBad
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpBranch %loop
@@ -12684,18 +12486,16 @@ TEST_F(SpvParserCFGTest,
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("invalid structured control flow: found an OpSwitch that "
- "is not preceded by an OpSelectionMerge:"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("invalid structured control flow: found an OpSwitch that "
+ "is not preceded by an OpSelectionMerge:"));
}
TEST_F(SpvParserCFGTest, Switch_NotAsSelectionHeader_DefaultBranchIsContinue) {
- // Adapted from SPIRV-Tools test MissingMergeOneUnseenTargetSwitchBad
- auto p = parser(test::Assemble(CommonTypes() + R"(
+ // Adapted from SPIRV-Tools test MissingMergeOneUnseenTargetSwitchBad
+ auto p = parser(test::Assemble(CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpBranch %loop
@@ -12719,30 +12519,28 @@ TEST_F(SpvParserCFGTest, Switch_NotAsSelectionHeader_DefaultBranchIsContinue) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("invalid structured control flow: found an OpSwitch that "
- "is not preceded by an OpSelectionMerge:"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("invalid structured control flow: found an OpSwitch that "
+ "is not preceded by an OpSelectionMerge:"));
}
TEST_F(SpvParserCFGTest, SiblingLoopConstruct_Null) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_EQ(fe.SiblingLoopConstruct(nullptr), nullptr);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_EQ(fe.SiblingLoopConstruct(nullptr), nullptr);
}
TEST_F(SpvParserCFGTest, SiblingLoopConstruct_NotAContinue) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12750,17 +12548,17 @@ TEST_F(SpvParserCFGTest, SiblingLoopConstruct_NotAContinue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
- const Construct* c = fe.GetBlockInfo(10)->construct;
- EXPECT_NE(c, nullptr);
- EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
+ const Construct* c = fe.GetBlockInfo(10)->construct;
+ EXPECT_NE(c, nullptr);
+ EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
}
TEST_F(SpvParserCFGTest, SiblingLoopConstruct_SingleBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12775,17 +12573,17 @@ TEST_F(SpvParserCFGTest, SiblingLoopConstruct_SingleBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
- const Construct* c = fe.GetBlockInfo(20)->construct;
- EXPECT_EQ(c->kind, Construct::kContinue);
- EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
+ const Construct* c = fe.GetBlockInfo(20)->construct;
+ EXPECT_EQ(c->kind, Construct::kContinue);
+ EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
}
TEST_F(SpvParserCFGTest, SiblingLoopConstruct_ContinueIsWholeMultiBlockLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12803,18 +12601,17 @@ TEST_F(SpvParserCFGTest, SiblingLoopConstruct_ContinueIsWholeMultiBlockLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << assembly;
- auto fe = p->function_emitter(100);
- ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
- const Construct* c = fe.GetBlockInfo(20)->construct;
- EXPECT_EQ(c->kind, Construct::kContinue);
- EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
+ const Construct* c = fe.GetBlockInfo(20)->construct;
+ EXPECT_EQ(c->kind, Construct::kContinue);
+ EXPECT_EQ(fe.SiblingLoopConstruct(c), nullptr);
}
TEST_F(SpvParserCFGTest, SiblingLoopConstruct_HasSiblingLoop) {
- auto assembly = CommonTypes() + R"(
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12832,20 +12629,20 @@ TEST_F(SpvParserCFGTest, SiblingLoopConstruct_HasSiblingLoop) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
- const Construct* c = fe.GetBlockInfo(30)->construct;
- EXPECT_EQ(c->kind, Construct::kContinue);
- EXPECT_THAT(ToString(fe.SiblingLoopConstruct(c)),
- Eq("Construct{ Loop [1,2) begin_id:20 end_id:30 depth:1 "
- "parent:Function@10 scope:[1,3) in-l:Loop@20 }"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ ASSERT_TRUE(FlowLabelControlFlowConstructs(&fe)) << p->error();
+ const Construct* c = fe.GetBlockInfo(30)->construct;
+ EXPECT_EQ(c->kind, Construct::kContinue);
+ EXPECT_THAT(ToString(fe.SiblingLoopConstruct(c)),
+ Eq("Construct{ Loop [1,2) begin_id:20 end_id:30 depth:1 "
+ "parent:Function@10 scope:[1,3) in-l:Loop@20 }"));
}
TEST_F(SpvParserCFGTest, EmitBody_IfSelection_TrueBranch_LoopBreak) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -12872,26 +12669,26 @@ TEST_F(SpvParserCFGTest, EmitBody_IfSelection_TrueBranch_LoopBreak) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
break;
}
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_TrueBranch_LoopContinue) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -12919,25 +12716,25 @@ TEST_F(SpvParserCFGTest, EmitBody_TrueBranch_LoopContinue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
continue;
}
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_TrueBranch_SwitchBreak) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -12959,13 +12756,13 @@ TEST_F(SpvParserCFGTest, EmitBody_TrueBranch_SwitchBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(switch(20u) {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(switch(20u) {
case 20u: {
if (false) {
break;
@@ -12976,12 +12773,12 @@ TEST_F(SpvParserCFGTest, EmitBody_TrueBranch_SwitchBreak) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopBreak) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -13009,13 +12806,13 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
} else {
break;
@@ -13023,12 +12820,12 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopBreak) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopContinue) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
@@ -13056,13 +12853,13 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopContinue) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
if (false) {
} else {
continue;
@@ -13070,12 +12867,12 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_LoopContinue) {
}
return;
)";
- ASSERT_EQ(expect, got) << p->error();
+ ASSERT_EQ(expect, got) << p->error();
}
TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_SwitchBreak) {
- // crbug.com/tint/243
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/243
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -13097,13 +12894,13 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_SwitchBreak) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(switch(20u) {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(switch(20u) {
case 20u: {
if (false) {
} else {
@@ -13115,12 +12912,12 @@ TEST_F(SpvParserCFGTest, EmitBody_FalseBranch_SwitchBreak) {
}
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
TEST_F(SpvParserCFGTest, EmitBody_LoopInternallyDiverge_Simple) {
- // crbug.com/tint/524
- auto assembly = CommonTypes() + R"(
+ // crbug.com/tint/524
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
OpStore %var %uint_10
@@ -13149,13 +12946,13 @@ TEST_F(SpvParserCFGTest, EmitBody_LoopInternallyDiverge_Simple) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var_1 = 10u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var_1 = 10u;
loop {
var_1 = 20u;
if (false) {
@@ -13172,15 +12969,14 @@ loop {
var_1 = 99u;
return;
)";
- ASSERT_EQ(expect, got) << got;
+ ASSERT_EQ(expect, got) << got;
}
-TEST_F(SpvParserCFGTest,
- EmitBody_ContinueFromSingleBlockLoopToOuterLoop_IsError) {
- // crbug.com/tint/793
- // This is invalid SPIR-V but the validator was only recently upgraded
- // to catch it.
- auto assembly = CommonTypes() + R"(
+TEST_F(SpvParserCFGTest, EmitBody_ContinueFromSingleBlockLoopToOuterLoop_IsError) {
+ // crbug.com/tint/793
+ // This is invalid SPIR-V but the validator was only recently upgraded
+ // to catch it.
+ auto assembly = CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%5 = OpLabel
OpBranch %10
@@ -13212,13 +13008,12 @@ TEST_F(SpvParserCFGTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(),
- HasSubstr("block <ID> 20[%20] exits the continue headed by <ID> "
- "20[%20], but not via a structured exit"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("block <ID> 20[%20] exits the continue headed by <ID> "
+ "20[%20], but not via a structured exit"))
+ << p->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_composite_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_composite_test.cc
index 8b29a908c92..2b26d146fee 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_composite_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_composite_test.cc
@@ -24,7 +24,7 @@ using ::testing::Eq;
using ::testing::HasSubstr;
std::string Caps() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %100 "main"
@@ -33,7 +33,7 @@ std::string Caps() {
}
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -75,13 +75,13 @@ std::string CommonTypes() {
}
std::string Preamble() {
- return Caps() + CommonTypes();
+ return Caps() + CommonTypes();
}
using SpvParserTest_Composite_Construct = SpvParserTest;
TEST_F(SpvParserTest_Composite_Construct, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeConstruct %v2uint %uint_10 %uint_20
@@ -90,77 +90,74 @@ TEST_F(SpvParserTest_Composite_Construct, Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_1 : vec2<u32> = vec2<u32>(10u, 20u);
-let x_2 : vec2<i32> = vec2<i32>(30, 40);
-let x_3 : vec2<f32> = vec2<f32>(50.0, 60.0);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr(R"(let x_1 : vec2<u32> = vec2<u32>(10u, 20u);
+let x_2 : vec2<i32> = vec2<i32>(30i, 40i);
+let x_3 : vec2<f32> = vec2<f32>(50.0f, 60.0f);
)"));
}
TEST_F(SpvParserTest_Composite_Construct, Matrix) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeConstruct %m3v2float %v2float_50_60 %v2float_60_50 %v2float_70_70
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : mat3x2<f32> = mat3x2<f32>("
- "vec2<f32>(50.0, 60.0), "
- "vec2<f32>(60.0, 50.0), "
- "vec2<f32>(70.0, 70.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : mat3x2<f32> = mat3x2<f32>("
+ "vec2<f32>(50.0f, 60.0f), "
+ "vec2<f32>(60.0f, 50.0f), "
+ "vec2<f32>(70.0f, 70.0f));"));
}
TEST_F(SpvParserTest_Composite_Construct, Array) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeConstruct %a_u_5 %uint_10 %uint_20 %uint_3 %uint_4 %uint_5
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- "let x_1 : array<u32, 5u> = array<u32, 5u>(10u, 20u, 3u, 4u, 5u);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : array<u32, 5u> = array<u32, 5u>(10u, 20u, 3u, 4u, 5u);"));
}
TEST_F(SpvParserTest_Composite_Construct, Struct) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeConstruct %s_v2f_u_i %v2float_50_60 %uint_5 %int_30
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : S = S(vec2<f32>(50.0, 60.0), 5u, 30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : S = S(vec2<f32>(50.0f, 60.0f), 5u, 30i);"));
}
-TEST_F(SpvParserTest_Composite_Construct,
- ConstantComposite_Struct_NoDeduplication) {
- const auto assembly = Preamble() + R"(
+TEST_F(SpvParserTest_Composite_Construct, ConstantComposite_Struct_NoDeduplication) {
+ const auto assembly = Preamble() + R"(
%200 = OpTypeStruct %uint
%300 = OpTypeStruct %uint ; isomorphic structures
@@ -174,58 +171,58 @@ TEST_F(SpvParserTest_Composite_Construct,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const auto expected = std::string(
- R"(let x_2 : S_1 = S_1(10u);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const auto expected = std::string(
+ R"(let x_2 : S_1 = S_1(10u);
let x_3 : S_2 = S_2(10u);
return;
)");
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
using SpvParserTest_CompositeExtract = SpvParserTest;
TEST_F(SpvParserTest_CompositeExtract, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeExtract %float %v2float_50_60 1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = vec2<f32>(50.0, 60.0).y;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : f32 = vec2<f32>(50.0f, 60.0f).y;"));
}
TEST_F(SpvParserTest_CompositeExtract, Vector_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeExtract %float %v2float_50_60 900
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_EQ(p->error(),
- "OpCompositeExtract %1 index value 900 is out of bounds for vector "
- "of 2 elements");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_EQ(p->error(),
+ "OpCompositeExtract %1 index value 900 is out of bounds for vector "
+ "of 2 elements");
}
TEST_F(SpvParserTest_CompositeExtract, Matrix) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -236,17 +233,17 @@ TEST_F(SpvParserTest_CompositeExtract, Matrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : vec2<f32> = x_1[2u];"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_2 : vec2<f32> = x_1[2u];"));
}
TEST_F(SpvParserTest_CompositeExtract, Matrix_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -257,17 +254,17 @@ TEST_F(SpvParserTest_CompositeExtract, Matrix_IndexTooBigError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_EQ(p->error(),
- "OpCompositeExtract %2 index value 3 is out of bounds for matrix "
- "of 3 elements");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_EQ(p->error(),
+ "OpCompositeExtract %2 index value 3 is out of bounds for matrix "
+ "of 3 elements");
}
TEST_F(SpvParserTest_CompositeExtract, Matrix_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -278,17 +275,16 @@ TEST_F(SpvParserTest_CompositeExtract, Matrix_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : f32 = x_1[2u].y;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_2 : f32 = x_1[2u].y;"));
}
TEST_F(SpvParserTest_CompositeExtract, Array) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %a_u_5
%100 = OpFunction %void None %voidfn
@@ -299,17 +295,16 @@ TEST_F(SpvParserTest_CompositeExtract, Array) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : u32 = x_1[3u];"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_2 : u32 = x_1[3u];"));
}
TEST_F(SpvParserTest_CompositeExtract, RuntimeArray_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%rtarr = OpTypeRuntimeArray %uint
%ptr = OpTypePointer Function %rtarr
@@ -321,16 +316,15 @@ TEST_F(SpvParserTest_CompositeExtract, RuntimeArray_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_THAT(p->error(),
- HasSubstr("can't do OpCompositeExtract on a runtime array: "));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_THAT(p->error(), HasSubstr("can't do OpCompositeExtract on a runtime array: "));
}
TEST_F(SpvParserTest_CompositeExtract, Struct) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %s_v2f_u_i
%100 = OpFunction %void None %voidfn
@@ -341,17 +335,16 @@ TEST_F(SpvParserTest_CompositeExtract, Struct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : i32 = x_1.field2;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_2 : i32 = x_1.field2;"));
}
TEST_F(SpvParserTest_CompositeExtract, Struct_DifferOnlyInMemberName) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -381,23 +374,21 @@ TEST_F(SpvParserTest_CompositeExtract, Struct_DifferOnlyInMemberName) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto got = fe.ast_body();
- auto program = p->program();
- EXPECT_THAT(test::ToString(program, got),
- HasSubstr("let x_2 : u32 = x_1.algo;"))
- << test::ToString(program, got);
- EXPECT_THAT(test::ToString(program, got),
- HasSubstr("let x_4 : u32 = x_3.rithm;"))
- << test::ToString(program, got);
- p->SkipDumpingPending("crbug.com/tint/863");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto got = fe.ast_body();
+ auto program = p->program();
+ EXPECT_THAT(test::ToString(program, got), HasSubstr("let x_2 : u32 = x_1.algo;"))
+ << test::ToString(program, got);
+ EXPECT_THAT(test::ToString(program, got), HasSubstr("let x_4 : u32 = x_3.rithm;"))
+ << test::ToString(program, got);
+ p->SkipDumpingPending("crbug.com/tint/863");
}
TEST_F(SpvParserTest_CompositeExtract, Struct_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %s_v2f_u_i
%100 = OpFunction %void None %voidfn
@@ -408,17 +399,17 @@ TEST_F(SpvParserTest_CompositeExtract, Struct_IndexTooBigError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_EQ(p->error(),
- "OpCompositeExtract %2 index value 40 is out of bounds for "
- "structure %27 having 3 members");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_EQ(p->error(),
+ "OpCompositeExtract %2 index value 40 is out of bounds for "
+ "structure %27 having 3 members");
}
TEST_F(SpvParserTest_CompositeExtract, Struct_Array_Matrix_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%a_mat = OpTypeArray %m3v2float %uint_3
%s = OpTypeStruct %uint %a_mat
%ptr = OpTypePointer Function %s
@@ -431,59 +422,59 @@ TEST_F(SpvParserTest_CompositeExtract, Struct_Array_Matrix_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : f32 = x_1.field1[2u][0u].y;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_2 : f32 = x_1.field1[2u][0u].y;"));
}
using SpvParserTest_CompositeInsert = SpvParserTest;
TEST_F(SpvParserTest_CompositeInsert, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeInsert %v2float %float_70 %v2float_50_60 1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- const auto* expected =
- R"(var x_1_1 : vec2<f32> = vec2<f32>(50.0, 60.0);
-x_1_1.y = 70.0;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ const auto* expected =
+ R"(var x_1_1 : vec2<f32> = vec2<f32>(50.0f, 60.0f);
+x_1_1.y = 70.0f;
let x_1 : vec2<f32> = x_1_1;
return;
)";
- EXPECT_EQ(got, expected);
+ EXPECT_EQ(got, expected);
}
TEST_F(SpvParserTest_CompositeInsert, Vector_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCompositeInsert %v2float %float_70 %v2float_50_60 900
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_EQ(p->error(),
- "OpCompositeInsert %1 index value 900 is out of bounds for vector "
- "of 2 elements");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_EQ(p->error(),
+ "OpCompositeInsert %1 index value 900 is out of bounds for vector "
+ "of 2 elements");
}
TEST_F(SpvParserTest_CompositeInsert, Matrix) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -494,20 +485,20 @@ TEST_F(SpvParserTest_CompositeInsert, Matrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : mat3x2<f32> = x_1;
-x_2_1[2u] = vec2<f32>(50.0, 60.0);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : mat3x2<f32> = x_1;
+x_2_1[2u] = vec2<f32>(50.0f, 60.0f);
let x_2 : mat3x2<f32> = x_2_1;
)")) << body_str;
}
TEST_F(SpvParserTest_CompositeInsert, Matrix_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -518,17 +509,17 @@ TEST_F(SpvParserTest_CompositeInsert, Matrix_IndexTooBigError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_EQ(p->error(),
- "OpCompositeInsert %2 index value 3 is out of bounds for matrix of "
- "3 elements");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_EQ(p->error(),
+ "OpCompositeInsert %2 index value 3 is out of bounds for matrix of "
+ "3 elements");
}
TEST_F(SpvParserTest_CompositeInsert, Matrix_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%100 = OpFunction %void None %voidfn
@@ -539,21 +530,21 @@ TEST_F(SpvParserTest_CompositeInsert, Matrix_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : mat3x2<f32> = x_1;
-x_2_1[2u] = vec2<f32>(50.0, 60.0);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : mat3x2<f32> = x_1;
+x_2_1[2u] = vec2<f32>(50.0f, 60.0f);
let x_2 : mat3x2<f32> = x_2_1;
return;
)")) << body_str;
}
TEST_F(SpvParserTest_CompositeInsert, Array) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %a_u_5
%100 = OpFunction %void None %voidfn
@@ -564,20 +555,20 @@ TEST_F(SpvParserTest_CompositeInsert, Array) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : array<u32, 5u> = x_1;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(var x_2_1 : array<u32, 5u> = x_1;
x_2_1[3u] = 20u;
let x_2 : array<u32, 5u> = x_2_1;
)")) << body_str;
}
TEST_F(SpvParserTest_CompositeInsert, RuntimeArray_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%rtarr = OpTypeRuntimeArray %uint
%ptr = OpTypePointer Function %rtarr
@@ -589,16 +580,15 @@ TEST_F(SpvParserTest_CompositeInsert, RuntimeArray_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_THAT(p->error(),
- HasSubstr("can't do OpCompositeInsert on a runtime array: "));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_THAT(p->error(), HasSubstr("can't do OpCompositeInsert on a runtime array: "));
}
TEST_F(SpvParserTest_CompositeInsert, Struct) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %s_v2f_u_i
%100 = OpFunction %void None %voidfn
@@ -609,22 +599,22 @@ TEST_F(SpvParserTest_CompositeInsert, Struct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(var x_36 : S;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(var x_36 : S;
let x_1 : S = x_36;
var x_2_1 : S = x_1;
-x_2_1.field2 = 30;
+x_2_1.field2 = 30i;
let x_2 : S = x_2_1;
)")) << body_str;
}
TEST_F(SpvParserTest_CompositeInsert, Struct_DifferOnlyInMemberName) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -658,13 +648,13 @@ TEST_F(SpvParserTest_CompositeInsert, Struct_DifferOnlyInMemberName) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const std::string expected = R"(var var0 : S;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const std::string expected = R"(var var0 : S;
var var1 : S_1;
let x_1 : S = var0;
var x_2_1 : S = x_1;
@@ -676,11 +666,11 @@ x_4_1.rithm = 11u;
let x_4 : S_1 = x_4_1;
return;
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvParserTest_CompositeInsert, Struct_IndexTooBigError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %s_v2f_u_i
%100 = OpFunction %void None %voidfn
@@ -691,17 +681,17 @@ TEST_F(SpvParserTest_CompositeInsert, Struct_IndexTooBigError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_EQ(p->error(),
- "OpCompositeInsert %2 index value 40 is out of bounds for "
- "structure %27 having 3 members");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_EQ(p->error(),
+ "OpCompositeInsert %2 index value 40 is out of bounds for "
+ "structure %27 having 3 members");
}
TEST_F(SpvParserTest_CompositeInsert, Struct_Array_Matrix_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%a_mat = OpTypeArray %m3v2float %uint_3
%s = OpTypeStruct %uint %a_mat
%ptr = OpTypePointer Function %s
@@ -714,16 +704,16 @@ TEST_F(SpvParserTest_CompositeInsert, Struct_Array_Matrix_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(var x_38 : S_1;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(var x_38 : S_1;
let x_1 : S_1 = x_38;
var x_2_1 : S_1 = x_1;
-x_2_1.field1[2u][0u].y = 70.0;
+x_2_1.field1[2u][0u].y = 70.0f;
let x_2 : S_1 = x_2_1;
)")) << body_str;
}
@@ -731,7 +721,7 @@ let x_2 : S_1 = x_2_1;
using SpvParserTest_CopyObject = SpvParserTest;
TEST_F(SpvParserTest_CopyObject, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %uint %uint_3
@@ -739,19 +729,18 @@ TEST_F(SpvParserTest_CopyObject, Scalar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_1 : u32 = 3u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(let x_1 : u32 = 3u;
let x_2 : u32 = x_1;
)"));
}
TEST_F(SpvParserTest_CopyObject, Pointer) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%ptr = OpTypePointer Function %uint
%100 = OpFunction %void None %voidfn
@@ -762,13 +751,13 @@ TEST_F(SpvParserTest_CopyObject, Pointer) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_1 : ptr<function, u32> = &(x_10);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr(R"(let x_1 : ptr<function, u32> = &(x_10);
let x_2 : ptr<function, u32> = x_1;
)"));
}
@@ -776,8 +765,8 @@ let x_2 : ptr<function, u32> = x_1;
using SpvParserTest_VectorShuffle = SpvParserTest;
TEST_F(SpvParserTest_VectorShuffle, FunctionScopeOperands_UseBoth) {
- // Note that variables are generated for the vector operands.
- const auto assembly = Preamble() + R"(
+ // Note that variables are generated for the vector operands.
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2uint %v2uint_3_4
@@ -787,19 +776,17 @@ TEST_F(SpvParserTest_VectorShuffle, FunctionScopeOperands_UseBoth) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- "let x_10 : vec4<u32> = vec4<u32>(x_2.y, x_2.x, x_1.y, x_1.x);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec4<u32> = vec4<u32>(x_2.y, x_2.x, x_1.y, x_1.x);"));
}
TEST_F(SpvParserTest_VectorShuffle, ConstantOperands_UseBoth) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%10 = OpVectorShuffle %v4uint %v2uint_3_4 %v2uint_4_3 3 2 1 0
@@ -807,21 +794,21 @@ TEST_F(SpvParserTest_VectorShuffle, ConstantOperands_UseBoth) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec4<u32> = vec4<u32>("
- "vec2<u32>(4u, 3u).y, "
- "vec2<u32>(4u, 3u).x, "
- "vec2<u32>(3u, 4u).y, "
- "vec2<u32>(3u, 4u).x);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec4<u32> = vec4<u32>("
+ "vec2<u32>(4u, 3u).y, "
+ "vec2<u32>(4u, 3u).x, "
+ "vec2<u32>(3u, 4u).y, "
+ "vec2<u32>(3u, 4u).x);"));
}
TEST_F(SpvParserTest_VectorShuffle, ConstantOperands_AllOnesMapToNull) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2uint %v2uint_4_3
@@ -830,19 +817,18 @@ TEST_F(SpvParserTest_VectorShuffle, ConstantOperands_AllOnesMapToNull) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec2<u32> = vec2<u32>(0u, x_1.y);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec2<u32> = vec2<u32>(0u, x_1.y);"));
}
-TEST_F(SpvParserTest_VectorShuffle,
- FunctionScopeOperands_MixedInputOperandSizes) {
- // Note that variables are generated for the vector operands.
- const auto assembly = Preamble() + R"(
+TEST_F(SpvParserTest_VectorShuffle, FunctionScopeOperands_MixedInputOperandSizes) {
+ // Note that variables are generated for the vector operands.
+ const auto assembly = Preamble() + R"(
%v3uint_3_4_5 = OpConstantComposite %v3uint %uint_3 %uint_4 %uint_5
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -853,17 +839,17 @@ TEST_F(SpvParserTest_VectorShuffle,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_10 : vec2<u32> = vec2<u32>(x_1.y, x_3.z);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_10 : vec2<u32> = vec2<u32>(x_1.y, x_3.z);"));
}
TEST_F(SpvParserTest_VectorShuffle, IndexTooBig_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%10 = OpVectorShuffle %v4uint %v2uint_3_4 %v2uint_4_3 9 2 1 0
@@ -871,18 +857,17 @@ TEST_F(SpvParserTest_VectorShuffle, IndexTooBig_IsError) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody()) << p->error();
- EXPECT_THAT(p->error(),
- Eq("invalid vectorshuffle ID %10: index too large: 9"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody()) << p->error();
+ EXPECT_THAT(p->error(), Eq("invalid vectorshuffle ID %10: index too large: 9"));
}
using SpvParserTest_VectorExtractDynamic = SpvParserTest;
TEST_F(SpvParserTest_VectorExtractDynamic, SignedIndex) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2uint %v2uint_3_4
@@ -892,17 +877,17 @@ TEST_F(SpvParserTest_VectorExtractDynamic, SignedIndex) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr("let x_10 : u32 = x_1[x_2];")) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr("let x_10 : u32 = x_1[x_2];")) << got;
}
TEST_F(SpvParserTest_VectorExtractDynamic, UnsignedIndex) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2uint %v2uint_3_4
@@ -912,19 +897,19 @@ TEST_F(SpvParserTest_VectorExtractDynamic, UnsignedIndex) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr("let x_10 : u32 = x_1[x_2];")) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr("let x_10 : u32 = x_1[x_2];")) << got;
}
using SpvParserTest_VectorInsertDynamic = SpvParserTest;
TEST_F(SpvParserTest_VectorInsertDynamic, Sample) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpCopyObject %v2uint %v2uint_3_4
@@ -935,13 +920,13 @@ TEST_F(SpvParserTest_VectorInsertDynamic, Sample) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(R"(var x_10_1 : vec2<u32> = x_1;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(R"(var x_10_1 : vec2<u32> = x_1;
x_10_1[x_3] = x_2;
let x_10 : vec2<u32> = x_10_1;
)")) << got
@@ -949,8 +934,8 @@ let x_10 : vec2<u32> = x_10_1;
}
TEST_F(SpvParserTest, DISABLED_WorkgroupSize_Overridable) {
- // TODO(dneto): Support specializable workgroup size. crbug.com/tint/504
- const auto* assembly = R"(
+ // TODO(dneto): Support specializable workgroup size. crbug.com/tint/504
+ const auto* assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %100 "main"
@@ -978,12 +963,12 @@ TEST_F(SpvParserTest, DISABLED_WorkgroupSize_Overridable) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.Emit()) << p->error();
- const auto got = test::ToString(p->program());
- EXPECT_THAT(got, HasSubstr(R"(
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.Emit()) << p->error();
+ const auto got = test::ToString(p->program());
+ EXPECT_THAT(got, HasSubstr(R"(
VariableConst{
Decorations{
OverrideDecoration{0}
@@ -1018,7 +1003,7 @@ TEST_F(SpvParserTest, DISABLED_WorkgroupSize_Overridable) {
}
}
)")) << got;
- EXPECT_THAT(got, HasSubstr(R"(
+ EXPECT_THAT(got, HasSubstr(R"(
VariableDeclStatement{
VariableConst{
x_10
@@ -1064,7 +1049,7 @@ TEST_F(SpvParserTest, DISABLED_WorkgroupSize_Overridable) {
}
}
})"))
- << got << assembly;
+ << got << assembly;
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_conversion_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_conversion_test.cc
index 4d77997159b..eab003129cf 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_conversion_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_conversion_test.cc
@@ -24,7 +24,7 @@ using ::testing::Eq;
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -70,142 +70,133 @@ std::string Preamble() {
using SpvUnaryConversionTest = SpvParserTestBase<::testing::Test>;
TEST_F(SpvUnaryConversionTest, Bitcast_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpBitcast %uint %float_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>(50.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>(50.0f);"));
}
TEST_F(SpvUnaryConversionTest, Bitcast_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpBitcast %v2float %v2uint_10_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr(
- "let x_1 : vec2<f32> = bitcast<vec2<f32>>(vec2<u32>(10u, 20u));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = bitcast<vec2<f32>>(vec2<u32>(10u, 20u));"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_BadArg) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertSToF %float %void
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_BadArg) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertUToF %float %void
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_BadArg) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToS %float %void
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_BadArg) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToU %float %void
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("unhandled expression for ID 2\n%2 = OpTypeVoid"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Scalar_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertSToF %float %false
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("operand for conversion to floating point must be "
- "integral scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to floating point must be "
+ "integral scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Vector_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertSToF %v2float %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to floating point must be integral "
- "scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to floating point must be integral "
+ "scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Scalar_FromSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %int %int_30
@@ -213,17 +204,16 @@ TEST_F(SpvUnaryConversionTest, ConvertSToF_Scalar_FromSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = f32(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : f32 = f32(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Scalar_FromUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %uint %uint_10
@@ -231,17 +221,17 @@ TEST_F(SpvUnaryConversionTest, ConvertSToF_Scalar_FromUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = f32(bitcast<i32>(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : f32 = f32(bitcast<i32>(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Vector_FromSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2int %v2int_30_40
@@ -249,17 +239,17 @@ TEST_F(SpvUnaryConversionTest, ConvertSToF_Vector_FromSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<f32> = vec2<f32>(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = vec2<f32>(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertSToF_Vector_FromUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2uint %v2uint_10_20
@@ -267,53 +257,49 @@ TEST_F(SpvUnaryConversionTest, ConvertSToF_Vector_FromUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<f32> = vec2<f32>(bitcast<vec2<i32>>(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = vec2<f32>(bitcast<vec2<i32>>(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Scalar_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertUToF %float %false
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("operand for conversion to floating point must be "
- "integral scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to floating point must be "
+ "integral scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Vector_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertUToF %v2float %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to floating point must be integral "
- "scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to floating point must be integral "
+ "scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Scalar_FromSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %int %int_30
@@ -321,17 +307,17 @@ TEST_F(SpvUnaryConversionTest, ConvertUToF_Scalar_FromSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = f32(bitcast<u32>(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : f32 = f32(bitcast<u32>(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Scalar_FromUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %uint %uint_10
@@ -339,17 +325,16 @@ TEST_F(SpvUnaryConversionTest, ConvertUToF_Scalar_FromUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = f32(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : f32 = f32(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Vector_FromSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2int %v2int_30_40
@@ -357,18 +342,17 @@ TEST_F(SpvUnaryConversionTest, ConvertUToF_Vector_FromSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<f32> = vec2<f32>(bitcast<vec2<u32>>(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = vec2<f32>(bitcast<vec2<u32>>(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertUToF_Vector_FromUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2uint %v2uint_10_20
@@ -376,53 +360,49 @@ TEST_F(SpvUnaryConversionTest, ConvertUToF_Vector_FromUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<f32> = vec2<f32>(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<f32> = vec2<f32>(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Scalar_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToS %int %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to signed integer must be floating "
- "point scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to signed integer must be floating "
+ "point scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Vector_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToS %v2float %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to signed integer must be floating "
- "point scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to signed integer must be floating "
+ "point scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Scalar_ToSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %float %float_50
@@ -430,17 +410,16 @@ TEST_F(SpvUnaryConversionTest, ConvertFToS_Scalar_ToSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : i32 = i32(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : i32 = i32(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Scalar_ToUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %float %float_50
@@ -448,17 +427,17 @@ TEST_F(SpvUnaryConversionTest, ConvertFToS_Scalar_ToUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = bitcast<u32>(i32(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = bitcast<u32>(i32(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Vector_ToSigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2float %v2float_50_60
@@ -466,17 +445,17 @@ TEST_F(SpvUnaryConversionTest, ConvertFToS_Vector_ToSigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<i32> = vec2<i32>(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<i32> = vec2<i32>(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToS_Vector_ToUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2float %v2float_50_60
@@ -484,54 +463,49 @@ TEST_F(SpvUnaryConversionTest, ConvertFToS_Vector_ToUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<u32> = bitcast<vec2<u32>>(vec2<i32>(x_30));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<u32> = bitcast<vec2<u32>>(vec2<i32>(x_30));"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Scalar_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToU %int %uint_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to unsigned integer must be floating "
- "point scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to unsigned integer must be floating "
+ "point scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Vector_BadArgType) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpConvertFToU %v2float %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(
- p->error(),
- HasSubstr("operand for conversion to unsigned integer must be floating "
- "point scalar or vector"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("operand for conversion to unsigned integer must be floating "
+ "point scalar or vector"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Scalar_ToSigned_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %float %float_50
@@ -539,15 +513,15 @@ TEST_F(SpvUnaryConversionTest, ConvertFToU_Scalar_ToSigned_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(), HasSubstr("Expected unsigned int scalar or vector "
- "type as Result Type: ConvertFToU"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected unsigned int scalar or vector "
+ "type as Result Type: ConvertFToU"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Scalar_ToUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %float %float_50
@@ -555,17 +529,16 @@ TEST_F(SpvUnaryConversionTest, ConvertFToU_Scalar_ToUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = u32(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : u32 = u32(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Vector_ToSigned_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2float %v2float_50_60
@@ -573,15 +546,15 @@ TEST_F(SpvUnaryConversionTest, ConvertFToU_Vector_ToSigned_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(), HasSubstr("Expected unsigned int scalar or vector "
- "type as Result Type: ConvertFToU"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Expected unsigned int scalar or vector "
+ "type as Result Type: ConvertFToU"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_Vector_ToUnsigned) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%30 = OpCopyObject %v2float %v2float_50_60
@@ -589,18 +562,18 @@ TEST_F(SpvUnaryConversionTest, ConvertFToU_Vector_ToUnsigned) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<u32> = vec2<u32>(x_30);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<u32> = vec2<u32>(x_30);"));
}
TEST_F(SpvUnaryConversionTest, ConvertFToU_HoistedValue) {
- // From crbug.com/tint/804
- const auto assembly = Preamble() + R"(
+ // From crbug.com/tint/804
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -631,13 +604,12 @@ OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_82 : u32 = u32(x_600);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_82 : u32 = u32(x_600);"));
}
// TODO(dneto): OpSConvert // only if multiple widths
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_decl_test.cc
index 5fa6330fe48..8af9da21726 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_decl_test.cc
@@ -23,7 +23,7 @@ namespace {
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "x_100"
@@ -34,15 +34,15 @@ std::string Preamble() {
/// @returns a SPIR-V assembly segment which assigns debug names
/// to particular IDs.
std::string Names(std::vector<std::string> ids) {
- std::ostringstream outs;
- for (auto& id : ids) {
- outs << " OpName %" << id << " \"" << id << "\"\n";
- }
- return outs.str();
+ std::ostringstream outs;
+ for (auto& id : ids) {
+ outs << " OpName %" << id << " \"" << id << "\"\n";
+ }
+ return outs.str();
}
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%float = OpTypeFloat 32
@@ -53,7 +53,7 @@ std::string CommonTypes() {
}
std::string MainBody() {
- return R"(
+ return R"(
%100 = OpFunction %void None %voidfn
%entry_100 = OpLabel
OpReturn
@@ -62,46 +62,45 @@ std::string MainBody() {
}
TEST_F(SpvParserTest, Emit_VoidFunctionWithoutParams) {
- auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.Emit());
- auto got = test::ToString(p->program());
- std::string expect = R"(fn x_100() {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.Emit());
+ auto got = test::ToString(p->program());
+ std::string expect = R"(fn x_100() {
return;
}
)";
- EXPECT_EQ(got, expect);
+ EXPECT_EQ(got, expect);
}
TEST_F(SpvParserTest, Emit_NonVoidResultType) {
- auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
%fn_ret_float = OpTypeFunction %float
%200 = OpFunction %float None %fn_ret_float
%entry = OpLabel
OpReturnValue %float_0
OpFunctionEnd
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.Emit());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.Emit());
- auto got = test::ToString(p->program());
- std::string expect = R"(fn x_200() -> f32 {
- return 0.0;
+ auto got = test::ToString(p->program());
+ std::string expect = R"(fn x_200() -> f32 {
+ return 0.0f;
}
)";
- EXPECT_THAT(got, HasSubstr(expect));
+ EXPECT_THAT(got, HasSubstr(expect));
}
TEST_F(SpvParserTest, Emit_MixedParamTypes) {
- auto p = parser(
- test::Assemble(Preamble() + Names({"a", "b", "c"}) + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + Names({"a", "b", "c"}) + CommonTypes() + R"(
%fn_mixed_params = OpTypeFunction %void %uint %float %int
%200 = OpFunction %void None %fn_mixed_params
@@ -112,20 +111,20 @@ TEST_F(SpvParserTest, Emit_MixedParamTypes) {
OpReturn
OpFunctionEnd
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.Emit());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.Emit());
- auto got = test::ToString(p->program());
- std::string expect = R"(fn x_200(a : u32, b : f32, c : i32) {
+ auto got = test::ToString(p->program());
+ std::string expect = R"(fn x_200(a : u32, b : f32, c : i32) {
return;
}
)";
- EXPECT_THAT(got, HasSubstr(expect));
+ EXPECT_THAT(got, HasSubstr(expect));
}
TEST_F(SpvParserTest, Emit_GenerateParamNames) {
- auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + CommonTypes() + R"(
%fn_mixed_params = OpTypeFunction %void %uint %float %int
%200 = OpFunction %void None %fn_mixed_params
@@ -136,16 +135,16 @@ TEST_F(SpvParserTest, Emit_GenerateParamNames) {
OpReturn
OpFunctionEnd
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(200);
- EXPECT_TRUE(fe.Emit());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(200);
+ EXPECT_TRUE(fe.Emit());
- auto got = test::ToString(p->program());
- std::string expect = R"(fn x_200(x_14 : u32, x_15 : f32, x_16 : i32) {
+ auto got = test::ToString(p->program());
+ std::string expect = R"(fn x_200(x_14 : u32, x_15 : f32, x_16 : i32) {
return;
}
)";
- EXPECT_THAT(got, HasSubstr(expect));
+ EXPECT_THAT(got, HasSubstr(expect));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_glsl_std_450_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_glsl_std_450_test.cc
index 7d7a485dcf5..4836a6851c9 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_glsl_std_450_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_glsl_std_450_test.cc
@@ -23,7 +23,7 @@ namespace {
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
%glsl = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
@@ -127,12 +127,12 @@ std::string Preamble() {
}
struct GlslStd450Case {
- std::string opcode;
- std::string wgsl_func;
+ std::string opcode;
+ std::string wgsl_func;
};
inline std::ostream& operator<<(std::ostream& out, GlslStd450Case c) {
- out << "GlslStd450Case(" << c.opcode << " " << c.wgsl_func << ")";
- return out;
+ out << "GlslStd450Case(" << c.opcode << " " << c.wgsl_func << ")";
+ return out;
}
// Nomenclature:
@@ -171,240 +171,222 @@ using SpvParserTest_GlslStd450_Uinting_UintingUintingUinting =
SpvParserTestBase<::testing::TestWithParam<GlslStd450Case>>;
TEST_P(SpvParserTest_GlslStd450_Float_Floating, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1
+ GetParam().opcode + R"( %f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Float_Floating, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %v2f1
+ GetParam().opcode + R"( %v2f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(v2f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(v2f1);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Float_FloatingFloating, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1 %f2
+ GetParam().opcode + R"( %f1 %f2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, f2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, f2);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Float_FloatingFloating, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %v2f1 %v2f2
+ GetParam().opcode + R"( %v2f1 %v2f2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func +
- "(v2f1, v2f2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(v2f1, v2f2);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_Floating, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1
+ GetParam().opcode + R"( %f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_Floating, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl )" +
- GetParam().opcode + R"( %v2f1
+ GetParam().opcode + R"( %v2f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func +
- "(v2f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func + "(v2f1);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingFloating, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1 %f2
+ GetParam().opcode + R"( %f1 %f2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, f2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, f2);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingFloating, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl )" +
- GetParam().opcode + R"( %v2f1 %v2f2
+ GetParam().opcode + R"( %v2f1 %v2f2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func +
- "(v2f1, v2f2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func + "(v2f1, v2f2);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingFloatingFloating, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1 %f2 %f3
+ GetParam().opcode + R"( %f1 %f2 %f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func +
- "(f1, f2, f3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, f2, f3);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingFloatingFloating, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl )" +
- GetParam().opcode +
- R"( %v2f1 %v2f2 %v2f3
+ GetParam().opcode +
+ R"( %v2f1 %v2f2 %v2f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func +
- "(v2f1, v2f2, v2f3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func + "(v2f1, v2f2, v2f3);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingInting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl )" +
- GetParam().opcode + R"( %f1 %i1
+ GetParam().opcode + R"( %f1 %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = " + GetParam().wgsl_func + "(f1, i1);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Floating_FloatingInting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl )" +
- GetParam().opcode +
- R"( %v2f1 %v2i1
+ GetParam().opcode +
+ R"( %v2f1 %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func +
- "(v2f1, v2i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = " + GetParam().wgsl_func + "(v2f1, v2i1);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Float3_Float3Float3, Samples) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v3float %glsl )" +
- GetParam().opcode +
- R"( %v3f1 %v3f2
+ GetParam().opcode +
+ R"( %v3f1 %v3f2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec3<f32> = " + GetParam().wgsl_func +
- "(v3f1, v3f2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec3<f32> = " + GetParam().wgsl_func + "(v3f1, v3f2);"))
+ << body;
}
INSTANTIATE_TEST_SUITE_P(Samples,
@@ -413,8 +395,7 @@ INSTANTIATE_TEST_SUITE_P(Samples,
INSTANTIATE_TEST_SUITE_P(Samples,
SpvParserTest_GlslStd450_Float_FloatingFloating,
- ::testing::Values(GlslStd450Case{"Distance",
- "distance"}));
+ ::testing::Values(GlslStd450Case{"Distance", "distance"}));
INSTANTIATE_TEST_SUITE_P(Samples,
SpvParserTest_GlslStd450_Floating_Floating,
@@ -467,128 +448,120 @@ INSTANTIATE_TEST_SUITE_P(Samples,
SpvParserTest_GlslStd450_Float3_Float3Float3,
::testing::Values(GlslStd450Case{"Cross", "cross"}));
-INSTANTIATE_TEST_SUITE_P(
- Samples,
- SpvParserTest_GlslStd450_Floating_FloatingFloatingFloating,
- ::testing::ValuesIn(std::vector<GlslStd450Case>{
- {"NClamp", "clamp"},
- {"FClamp", "clamp"}, // WGSL FClamp promises more for NaN
- {"Fma", "fma"},
- {"FMix", "mix"},
- {"SmoothStep", "smoothStep"}}));
+INSTANTIATE_TEST_SUITE_P(Samples,
+ SpvParserTest_GlslStd450_Floating_FloatingFloatingFloating,
+ ::testing::ValuesIn(std::vector<GlslStd450Case>{
+ {"NClamp", "clamp"},
+ {"FClamp", "clamp"}, // WGSL FClamp promises more for NaN
+ {"Fma", "fma"},
+ {"FMix", "mix"},
+ {"SmoothStep", "smoothstep"}}));
TEST_P(SpvParserTest_GlslStd450_Inting_Inting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl )" +
- GetParam().opcode +
- R"( %i1
+ GetParam().opcode +
+ R"( %i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body,
- HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func + "(i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func + "(i1);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Inting_Inting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2int %glsl )" +
- GetParam().opcode +
- R"( %v2i1
+ GetParam().opcode +
+ R"( %v2i1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func +
- "(v2i1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func + "(v2i1);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Inting_IntingInting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl )" +
- GetParam().opcode +
- R"( %i1 %i2
+ GetParam().opcode +
+ R"( %i1 %i2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func + "(i1, i2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func + "(i1, i2);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Inting_IntingInting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2int %glsl )" +
- GetParam().opcode +
- R"( %v2i1 %v2i2
+ GetParam().opcode +
+ R"( %v2i1 %v2i2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func +
- "(v2i1, v2i2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func + "(v2i1, v2i2);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Inting_IntingIntingInting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl )" +
- GetParam().opcode +
- R"( %i1 %i2 %i3
+ GetParam().opcode +
+ R"( %i1 %i2 %i3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func +
- "(i1, i2, i3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : i32 = " + GetParam().wgsl_func + "(i1, i2, i3);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Inting_IntingIntingInting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2int %glsl )" +
- GetParam().opcode +
- R"( %v2i1 %v2i2 %v2i3
+ GetParam().opcode +
+ R"( %v2i1 %v2i2 %v2i3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func +
- "(v2i1, v2i2, v2i3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<i32> = " + GetParam().wgsl_func + "(v2i1, v2i2, v2i3);"))
+ << body;
}
INSTANTIATE_TEST_SUITE_P(Samples,
@@ -605,77 +578,73 @@ INSTANTIATE_TEST_SUITE_P(Samples,
::testing::Values(GlslStd450Case{"SClamp", "clamp"}));
TEST_P(SpvParserTest_GlslStd450_Uinting_UintingUinting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl )" +
- GetParam().opcode + R"( %u1 %u2
+ GetParam().opcode + R"( %u1 %u2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body, HasSubstr("let x_1 : u32 = " + GetParam().wgsl_func + "(u1, u2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = " + GetParam().wgsl_func + "(u1, u2);")) << body;
}
TEST_P(SpvParserTest_GlslStd450_Uinting_UintingUinting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2uint %glsl )" +
- GetParam().opcode +
- R"( %v2u1 %v2u2
+ GetParam().opcode +
+ R"( %v2u1 %v2u2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = " + GetParam().wgsl_func +
- "(v2u1, v2u2);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = " + GetParam().wgsl_func + "(v2u1, v2u2);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Uinting_UintingUintingUinting, Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl )" +
- GetParam().opcode + R"( %u1 %u2 %u3
+ GetParam().opcode + R"( %u1 %u2 %u3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = " + GetParam().wgsl_func +
- "(u1, u2, u3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = " + GetParam().wgsl_func + "(u1, u2, u3);"))
+ << body;
}
TEST_P(SpvParserTest_GlslStd450_Uinting_UintingUintingUinting, Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2uint %glsl )" +
- GetParam().opcode +
- R"( %v2u1 %v2u2 %v2u3
+ GetParam().opcode +
+ R"( %v2u1 %v2u2 %v2u3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<u32> = " + GetParam().wgsl_func +
- "(v2u1, v2u2, v2u3);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body,
+ HasSubstr("let x_1 : vec2<u32> = " + GetParam().wgsl_func + "(v2u1, v2u2, v2u3);"))
+ << body;
}
INSTANTIATE_TEST_SUITE_P(Samples,
@@ -692,281 +661,269 @@ INSTANTIATE_TEST_SUITE_P(Samples,
// above.
TEST_F(SpvParserTest, Normalize_Scalar) {
- // Scalar normalize always results in 1.0
- const auto assembly = Preamble() + R"(
+ // Scalar normalize always results in 1.0
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl Normalize %f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : f32 = 1.0;")) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : f32 = 1.0f;")) << body;
}
TEST_F(SpvParserTest, Normalize_Vector2) {
- // Scalar normalize always results in 1.0
- const auto assembly = Preamble() + R"(
+ // Scalar normalize always results in 1.0
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl Normalize %v2f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = normalize(v2f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec2<f32> = normalize(v2f1);")) << body;
}
TEST_F(SpvParserTest, Normalize_Vector3) {
- // Scalar normalize always results in 1.0
- const auto assembly = Preamble() + R"(
+ // Scalar normalize always results in 1.0
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v3float %glsl Normalize %v3f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec3<f32> = normalize(v3f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec3<f32> = normalize(v3f1);")) << body;
}
TEST_F(SpvParserTest, Normalize_Vector4) {
- // Scalar normalize always results in 1.0
- const auto assembly = Preamble() + R"(
+ // Scalar normalize always results in 1.0
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v4float %glsl Normalize %v4f1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : vec4<f32> = normalize(v4f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : vec4<f32> = normalize(v4f1);")) << body;
}
// Check that we convert signedness of operands and result type.
// This is needed for each of the integer-based extended instructions.
TEST_F(SpvParserTest, RectifyOperandsAndResult_SAbs) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl SAbs %u1
%2 = OpExtInst %v2uint %glsl SAbs %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(R"(let x_1 : u32 = bitcast<u32>(abs(bitcast<i32>(u1)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(abs(bitcast<vec2<i32>>(v2u1)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr(R"(let x_1 : u32 = bitcast<u32>(abs(bitcast<i32>(u1)));)")) << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(abs(bitcast<vec2<i32>>(v2u1)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_SMax) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl SMax %u1 %u2
%2 = OpExtInst %v2uint %glsl SMax %v2u1 %v2u2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : u32 = bitcast<u32>(max(bitcast<i32>(u1), bitcast<i32>(u2)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(max(bitcast<vec2<i32>>(v2u1), bitcast<vec2<i32>>(v2u2)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(R"(let x_1 : u32 = bitcast<u32>(max(bitcast<i32>(u1), bitcast<i32>(u2)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(max(bitcast<vec2<i32>>(v2u1), bitcast<vec2<i32>>(v2u2)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_SMin) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl SMin %u1 %u2
%2 = OpExtInst %v2uint %glsl SMin %v2u1 %v2u2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : u32 = bitcast<u32>(min(bitcast<i32>(u1), bitcast<i32>(u2)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(min(bitcast<vec2<i32>>(v2u1), bitcast<vec2<i32>>(v2u2)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(R"(let x_1 : u32 = bitcast<u32>(min(bitcast<i32>(u1), bitcast<i32>(u2)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(min(bitcast<vec2<i32>>(v2u1), bitcast<vec2<i32>>(v2u2)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_SClamp) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl SClamp %u1 %i2 %u3
%2 = OpExtInst %v2uint %glsl SClamp %v2u1 %v2i2 %v2u3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : u32 = bitcast<u32>(clamp(bitcast<i32>(u1), i2, bitcast<i32>(u3)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(clamp(bitcast<vec2<i32>>(v2u1), v2i2, bitcast<vec2<i32>>(v2u3)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_1 : u32 = bitcast<u32>(clamp(bitcast<i32>(u1), i2, bitcast<i32>(u3)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<u32> = bitcast<vec2<u32>>(clamp(bitcast<vec2<i32>>(v2u1), v2i2, bitcast<vec2<i32>>(v2u3)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_UMax) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl UMax %i1 %i2
%2 = OpExtInst %v2int %glsl UMax %v2i1 %v2i2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : i32 = bitcast<i32>(max(bitcast<u32>(i1), bitcast<u32>(i2)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(max(bitcast<vec2<u32>>(v2i1), bitcast<vec2<u32>>(v2i2)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(R"(let x_1 : i32 = bitcast<i32>(max(bitcast<u32>(i1), bitcast<u32>(i2)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(max(bitcast<vec2<u32>>(v2i1), bitcast<vec2<u32>>(v2i2)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_UMin) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl UMin %i1 %i2
%2 = OpExtInst %v2int %glsl UMin %v2i1 %v2i2
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : i32 = bitcast<i32>(min(bitcast<u32>(i1), bitcast<u32>(i2)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(min(bitcast<vec2<u32>>(v2i1), bitcast<vec2<u32>>(v2i2)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(R"(let x_1 : i32 = bitcast<i32>(min(bitcast<u32>(i1), bitcast<u32>(i2)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(min(bitcast<vec2<u32>>(v2i1), bitcast<vec2<u32>>(v2i2)));)"))
+ << body;
}
TEST_F(SpvParserTest, RectifyOperandsAndResult_UClamp) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %int %glsl UClamp %i1 %u2 %i3
%2 = OpExtInst %v2int %glsl UClamp %v2i1 %v2u2 %v2i3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_1 : i32 = bitcast<i32>(clamp(bitcast<u32>(i1), u2, bitcast<u32>(i3)));)"))
- << body;
- EXPECT_THAT(
- body,
- HasSubstr(
- R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(clamp(bitcast<vec2<u32>>(v2i1), v2u2, bitcast<vec2<u32>>(v2i3)));)"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_1 : i32 = bitcast<i32>(clamp(bitcast<u32>(i1), u2, bitcast<u32>(i3)));)"))
+ << body;
+ EXPECT_THAT(
+ body,
+ HasSubstr(
+ R"(let x_2 : vec2<i32> = bitcast<vec2<i32>>(clamp(bitcast<vec2<u32>>(v2i1), v2u2, bitcast<vec2<u32>>(v2i3)));)"))
+ << body;
}
struct DataPackingCase {
- std::string opcode;
- std::string wgsl_func;
- uint32_t vec_size;
+ std::string opcode;
+ std::string wgsl_func;
+ uint32_t vec_size;
};
inline std::ostream& operator<<(std::ostream& out, DataPackingCase c) {
- out << "DataPacking(" << c.opcode << ")";
- return out;
+ out << "DataPacking(" << c.opcode << ")";
+ return out;
}
using SpvParserTest_GlslStd450_DataPacking =
SpvParserTestBase<::testing::TestWithParam<DataPackingCase>>;
TEST_P(SpvParserTest_GlslStd450_DataPacking, Valid) {
- auto param = GetParam();
- const auto assembly = Preamble() + R"(
+ auto param = GetParam();
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %uint %glsl )" +
- param.opcode +
- (param.vec_size == 2 ? " %v2f1" : " %v4f1") + R"(
+ param.opcode + (param.vec_size == 2 ? " %v2f1" : " %v4f1") + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : u32 = " + param.wgsl_func + "(v" +
- std::to_string(param.vec_size) + "f1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : u32 = " + param.wgsl_func + "(v" +
+ std::to_string(param.vec_size) + "f1);"))
+ << body;
}
INSTANTIATE_TEST_SUITE_P(Samples,
@@ -982,25 +939,24 @@ using SpvParserTest_GlslStd450_DataUnpacking =
SpvParserTestBase<::testing::TestWithParam<DataPackingCase>>;
TEST_P(SpvParserTest_GlslStd450_DataUnpacking, Valid) {
- auto param = GetParam();
- const auto assembly = Preamble() + R"(
+ auto param = GetParam();
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst )" + (param.vec_size == 2 ? "%v2float" : "%v4float") +
- std::string(" %glsl ") + param.opcode + R"( %u1
+ std::string(" %glsl ") + param.opcode + R"( %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body, HasSubstr("let x_1 : " +
- std::string(param.vec_size == 2 ? "vec2<f32>"
- : "vec4<f32>") +
-
- +" = " + param.wgsl_func + "(u1);"))
- << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body, HasSubstr("let x_1 : " +
+ std::string(param.vec_size == 2 ? "vec2<f32>" : "vec4<f32>") +
+
+ +" = " + param.wgsl_func + "(u1);"))
+ << body;
}
INSTANTIATE_TEST_SUITE_P(Samples,
@@ -1013,160 +969,157 @@ INSTANTIATE_TEST_SUITE_P(Samples,
{"UnpackHalf2x16", "unpack2x16float", 2}}));
TEST_F(SpvParserTest, GlslStd450_Refract_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl Refract %f1 %f2 %f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected =
- R"(let x_1 : f32 = refract(vec2<f32>(f1, 0.0), vec2<f32>(f2, 0.0), f3).x;)";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected =
+ R"(let x_1 : f32 = refract(vec2<f32>(f1, 0.0f), vec2<f32>(f2, 0.0f), f3).x;)";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_Refract_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl Refract %v2f1 %v2f2 %f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected = R"(let x_1 : vec2<f32> = refract(v2f1, v2f2, f3);)";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(let x_1 : vec2<f32> = refract(v2f1, v2f2, f3);)";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_FaceForward_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%99 = OpFAdd %float %f1 %f1 ; normal operand has only one use
%1 = OpExtInst %float %glsl FaceForward %99 %f2 %f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- // The %99 sum only has one use. Ensure it is evaluated only once by
- // making a let-declaration for it, since it is the normal operand to
- // the builtin function, and code generation uses it twice.
- const auto* expected =
- R"(let x_1 : f32 = select(-(x_99), x_99, ((f2 * f3) < 0.0));)";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ // The %99 sum only has one use. Ensure it is evaluated only once by
+ // making a let-declaration for it, since it is the normal operand to
+ // the builtin function, and code generation uses it twice.
+ const auto* expected = R"(let x_1 : f32 = select(-(x_99), x_99, ((f2 * f3) < 0.0f));)";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_FaceForward_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%99 = OpFAdd %v2float %v2f1 %v2f1
%1 = OpExtInst %v2float %glsl FaceForward %v2f1 %v2f2 %v2f3
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected =
- R"(let x_1 : vec2<f32> = faceForward(v2f1, v2f2, v2f3);)";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(let x_1 : vec2<f32> = faceForward(v2f1, v2f2, v2f3);)";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_Reflect_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%98 = OpFAdd %float %f1 %f1 ; has only one use
%99 = OpFAdd %float %f2 %f2 ; has only one use
%1 = OpExtInst %float %glsl Reflect %98 %99
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- // The %99 sum only has one use. Ensure it is evaluated only once by
- // making a let-declaration for it, since it is the normal operand to
- // the builtin function, and code generation uses it twice.
- const auto* expected =
- R"(let x_1 : f32 = (x_98 - (2.0 * (x_99 * (x_99 * x_98))));)";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ // The %99 sum only has one use. Ensure it is evaluated only once by
+ // making a let-declaration for it, since it is the normal operand to
+ // the builtin function, and code generation uses it twice.
+ const auto* expected = R"(let x_1 : f32 = (x_98 - (2.0f * (x_99 * (x_99 * x_98))));)";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_Reflect_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%98 = OpFAdd %v2float %v2f1 %v2f1
%99 = OpFAdd %v2float %v2f2 %v2f2
%1 = OpExtInst %v2float %glsl Reflect %98 %99
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected = R"(
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(
let x_98 : vec2<f32> = (v2f1 + v2f1);
let x_99 : vec2<f32> = (v2f2 + v2f2);
let x_1 : vec2<f32> = reflect(x_98, x_99);
)";
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
// For ldexp with signed second argument, see above.
TEST_F(SpvParserTest, GlslStd450_Ldexp_Scalar_Float_Uint) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %float %glsl Ldexp %f1 %u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected = "let x_1 : f32 = ldexp(f1, i32(u1));";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected = "let x_1 : f32 = ldexp(f1, i32(u1));";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
TEST_F(SpvParserTest, GlslStd450_Ldexp_Vector_Floatvec_Uintvec) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%1 = OpExtInst %v2float %glsl Ldexp %v2f1 %v2u1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body = test::ToString(p->program(), ast_body);
- const auto* expected = "let x_1 : vec2<f32> = ldexp(v2f1, vec2<i32>(v2u1));";
-
- EXPECT_THAT(body, HasSubstr(expected)) << body;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body = test::ToString(p->program(), ast_body);
+ const auto* expected = "let x_1 : vec2<f32> = ldexp(v2f1, vec2<i32>(v2u1));";
+
+ EXPECT_THAT(body, HasSubstr(expected)) << body;
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_logical_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_logical_test.cc
index 2d2e4e9322b..474af2e4433 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_logical_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_logical_test.cc
@@ -23,7 +23,7 @@ namespace {
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -69,135 +69,130 @@ std::string Preamble() {
// Returns the AST dump for a given SPIR-V assembly constant.
std::string AstFor(std::string assembly) {
- if (assembly == "v2bool_t_f") {
- return "vec2<bool>(true, false)";
- }
- if (assembly == "v2bool_f_t") {
- return "vec2<bool>(false, true)";
- }
- if (assembly == "v2uint_10_20") {
- return "vec2<u32>(10u, 20u)";
- }
- if (assembly == "cast_uint_10") {
- return "bitcast<i32>(10u)";
- }
- if (assembly == "cast_uint_20") {
- return "bitcast<i32>(20u)";
- }
- if (assembly == "cast_v2uint_10_20") {
- return "bitcast<vec2<i32>>(vec2<u32>(10u, 20u))";
- }
- if (assembly == "v2uint_20_10") {
- return "vec2<u32>(20u, 10u)";
- }
- if (assembly == "cast_v2uint_20_10") {
- return "bitcast<vec2<i32>>(vec2<u32>(20u, 10u))";
- }
- if (assembly == "cast_int_30") {
- return "bitcast<u32>(30)";
- }
- if (assembly == "cast_int_40") {
- return "bitcast<u32>(40)";
- }
- if (assembly == "v2int_30_40") {
- return "vec2<i32>(30, 40)";
- }
- if (assembly == "cast_v2int_30_40") {
- return "bitcast<vec2<u32>>(vec2<i32>(30, 40))";
- }
- if (assembly == "v2int_40_30") {
- return "vec2<i32>(40, 30)";
- }
- if (assembly == "cast_v2int_40_30") {
- return "bitcast<vec2<u32>>(vec2<i32>(40, 30))";
- }
- if (assembly == "v2float_50_60") {
- return "vec2<f32>(50.0, 60.0)";
- }
- if (assembly == "v2float_60_50") {
- return "vec2<f32>(60.0, 50.0)";
- }
- return "bad case";
+ if (assembly == "v2bool_t_f") {
+ return "vec2<bool>(true, false)";
+ }
+ if (assembly == "v2bool_f_t") {
+ return "vec2<bool>(false, true)";
+ }
+ if (assembly == "v2uint_10_20") {
+ return "vec2<u32>(10u, 20u)";
+ }
+ if (assembly == "cast_uint_10") {
+ return "bitcast<i32>(10u)";
+ }
+ if (assembly == "cast_uint_20") {
+ return "bitcast<i32>(20u)";
+ }
+ if (assembly == "cast_v2uint_10_20") {
+ return "bitcast<vec2<i32>>(vec2<u32>(10u, 20u))";
+ }
+ if (assembly == "v2uint_20_10") {
+ return "vec2<u32>(20u, 10u)";
+ }
+ if (assembly == "cast_v2uint_20_10") {
+ return "bitcast<vec2<i32>>(vec2<u32>(20u, 10u))";
+ }
+ if (assembly == "cast_int_30") {
+ return "bitcast<u32>(30i)";
+ }
+ if (assembly == "cast_int_40") {
+ return "bitcast<u32>(40i)";
+ }
+ if (assembly == "v2int_30_40") {
+ return "vec2<i32>(30i, 40i)";
+ }
+ if (assembly == "cast_v2int_30_40") {
+ return "bitcast<vec2<u32>>(vec2<i32>(30i, 40i))";
+ }
+ if (assembly == "v2int_40_30") {
+ return "vec2<i32>(40i, 30i)";
+ }
+ if (assembly == "cast_v2int_40_30") {
+ return "bitcast<vec2<u32>>(vec2<i32>(40i, 30i))";
+ }
+ if (assembly == "v2float_50_60") {
+ return "vec2<f32>(50.0f, 60.0f)";
+ }
+ if (assembly == "v2float_60_50") {
+ return "vec2<f32>(60.0f, 50.0f)";
+ }
+ return "bad case";
}
using SpvUnaryLogicalTest = SpvParserTestBase<::testing::Test>;
TEST_F(SpvUnaryLogicalTest, LogicalNot_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpLogicalNot %bool %true
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !(true);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : bool = !(true);"));
}
TEST_F(SpvUnaryLogicalTest, LogicalNot_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpLogicalNot %v2bool %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = !(vec2<bool>(true, false));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = !(vec2<bool>(true, false));"));
}
struct BinaryData {
- const std::string res_type;
- const std::string lhs;
- const std::string op;
- const std::string rhs;
- const std::string ast_type;
- const std::string ast_lhs;
- const std::string ast_op;
- const std::string ast_rhs;
+ const std::string res_type;
+ const std::string lhs;
+ const std::string op;
+ const std::string rhs;
+ const std::string ast_type;
+ const std::string ast_lhs;
+ const std::string ast_op;
+ const std::string ast_rhs;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op
- << "," << data.rhs << "," << data.ast_type << "," << data.ast_lhs << ","
- << data.ast_op << "," << data.ast_rhs << "}";
- return out;
+ out << "BinaryData{" << data.res_type << "," << data.lhs << "," << data.op << "," << data.rhs
+ << "," << data.ast_type << "," << data.ast_lhs << "," << data.ast_op << "," << data.ast_rhs
+ << "}";
+ return out;
}
-using SpvBinaryLogicalTest =
- SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
+using SpvBinaryLogicalTest = SpvParserTestBase<::testing::TestWithParam<BinaryData>>;
TEST_P(SpvBinaryLogicalTest, EmitExpression) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = )" + GetParam().op +
- " %" + GetParam().res_type + " %" + GetParam().lhs +
- " %" + GetParam().rhs + R"(
+ " %" + GetParam().res_type + " %" + GetParam().lhs + " %" +
+ GetParam().rhs + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << "\n"
- << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- std::ostringstream ss;
- ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs
- << " " << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(ss.str()))
- << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << "\n" << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ std::ostringstream ss;
+ ss << "let x_1 : " << GetParam().ast_type << " = (" << GetParam().ast_lhs << " "
+ << GetParam().ast_op << " " << GetParam().ast_rhs << ");";
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(ss.str())) << assembly;
}
INSTANTIATE_TEST_SUITE_P(
@@ -205,780 +200,718 @@ INSTANTIATE_TEST_SUITE_P(
SpvBinaryLogicalTest,
::testing::Values(
// uint uint
- BinaryData{"bool", "uint_10", "OpIEqual", "uint_20", "bool", "10u",
- "==", "20u"},
+ BinaryData{"bool", "uint_10", "OpIEqual", "uint_20", "bool", "10u", "==", "20u"},
// int int
- BinaryData{"bool", "int_30", "OpIEqual", "int_40", "bool", "30",
- "==", "40"},
+ BinaryData{"bool", "int_30", "OpIEqual", "int_40", "bool", "30i", "==", "40i"},
// uint int
BinaryData{"bool", "uint_10", "OpIEqual", "int_40", "bool", "10u",
- "==", "bitcast<u32>(40)"},
+ "==", "bitcast<u32>(40i)"},
// int uint
- BinaryData{"bool", "int_40", "OpIEqual", "uint_10", "bool", "40",
+ BinaryData{"bool", "int_40", "OpIEqual", "uint_10", "bool", "40i",
"==", "bitcast<i32>(10u)"},
// v2uint v2uint
- BinaryData{"v2bool", "v2uint_10_20", "OpIEqual", "v2uint_20_10",
- "vec2<bool>", AstFor("v2uint_10_20"),
- "==", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpIEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), "==", AstFor("v2uint_20_10")},
// v2int v2int
- BinaryData{"v2bool", "v2int_30_40", "OpIEqual", "v2int_40_30",
- "vec2<bool>", AstFor("v2int_30_40"),
- "==", AstFor("v2int_40_30")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdEqual,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdEqual", "float_60",
- "bool", "50.0", "==", "60.0"},
- BinaryData{"v2bool", "v2float_50_60", "OpFOrdEqual",
- "v2float_60_50", "vec2<bool>",
- AstFor("v2float_50_60"),
- "==", AstFor("v2float_60_50")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpIEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), "==", AstFor("v2int_40_30")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdEqual,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdEqual", "float_60",
+ "bool", "50.0f", "==", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60", "OpFOrdEqual",
+ "v2float_60_50", "vec2<bool>",
+ AstFor("v2float_50_60"),
+ "==", AstFor("v2float_60_50")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_INotEqual,
SpvBinaryLogicalTest,
::testing::Values(
// Both uint
- BinaryData{"bool", "uint_10", "OpINotEqual", "uint_20", "bool", "10u",
- "!=", "20u"},
+ BinaryData{"bool", "uint_10", "OpINotEqual", "uint_20", "bool", "10u", "!=", "20u"},
// Both int
- BinaryData{"bool", "int_30", "OpINotEqual", "int_40", "bool", "30",
- "!=", "40"},
+ BinaryData{"bool", "int_30", "OpINotEqual", "int_40", "bool", "30i", "!=", "40i"},
// uint int
BinaryData{"bool", "uint_10", "OpINotEqual", "int_40", "bool", "10u",
- "!=", "bitcast<u32>(40)"},
+ "!=", "bitcast<u32>(40i)"},
// int uint
- BinaryData{"bool", "int_40", "OpINotEqual", "uint_10", "bool", "40",
+ BinaryData{"bool", "int_40", "OpINotEqual", "uint_10", "bool", "40i",
"!=", "bitcast<i32>(10u)"},
// Both v2uint
- BinaryData{"v2bool", "v2uint_10_20", "OpINotEqual", "v2uint_20_10",
- "vec2<bool>", AstFor("v2uint_10_20"),
- "!=", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpINotEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), "!=", AstFor("v2uint_20_10")},
// Both v2int
- BinaryData{"v2bool", "v2int_30_40", "OpINotEqual", "v2int_40_30",
- "vec2<bool>", AstFor("v2int_30_40"),
- "!=", AstFor("v2int_40_30")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdNotEqual,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdNotEqual",
- "float_60", "bool", "50.0", "!=", "60.0"},
- BinaryData{"v2bool", "v2float_50_60", "OpFOrdNotEqual",
- "v2float_60_50", "vec2<bool>",
- AstFor("v2float_50_60"),
- "!=", AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdLessThan,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdLessThan",
- "float_60", "bool", "50.0", "<", "60.0"},
- BinaryData{"v2bool", "v2float_50_60", "OpFOrdLessThan",
- "v2float_60_50", "vec2<bool>",
- AstFor("v2float_50_60"), "<",
- AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdLessThanEqual,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdLessThanEqual",
- "float_60", "bool", "50.0", "<=", "60.0"},
- BinaryData{"v2bool", "v2float_50_60",
- "OpFOrdLessThanEqual", "v2float_60_50",
- "vec2<bool>", AstFor("v2float_50_60"),
- "<=", AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdGreaterThan,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdGreaterThan",
- "float_60", "bool", "50.0", ">", "60.0"},
- BinaryData{"v2bool", "v2float_50_60", "OpFOrdGreaterThan",
- "v2float_60_50", "vec2<bool>",
- AstFor("v2float_50_60"), ">",
- AstFor("v2float_60_50")}));
-
-INSTANTIATE_TEST_SUITE_P(
- SpvParserTest_FOrdGreaterThanEqual,
- SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdGreaterThanEqual",
- "float_60", "bool", "50.0", ">=", "60.0"},
- BinaryData{"v2bool", "v2float_50_60",
- "OpFOrdGreaterThanEqual", "v2float_60_50",
- "vec2<bool>", AstFor("v2float_50_60"),
- ">=", AstFor("v2float_60_50")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpINotEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), "!=", AstFor("v2int_40_30")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdNotEqual,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdNotEqual",
+ "float_60", "bool", "50.0f", "!=", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60", "OpFOrdNotEqual",
+ "v2float_60_50", "vec2<bool>",
+ AstFor("v2float_50_60"),
+ "!=", AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdLessThan,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdLessThan",
+ "float_60", "bool", "50.0f", "<", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60", "OpFOrdLessThan",
+ "v2float_60_50", "vec2<bool>",
+ AstFor("v2float_50_60"), "<",
+ AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdLessThanEqual,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdLessThanEqual",
+ "float_60", "bool", "50.0f", "<=", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60",
+ "OpFOrdLessThanEqual", "v2float_60_50",
+ "vec2<bool>", AstFor("v2float_50_60"),
+ "<=", AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdGreaterThan,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdGreaterThan",
+ "float_60", "bool", "50.0f", ">", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60",
+ "OpFOrdGreaterThan", "v2float_60_50",
+ "vec2<bool>", AstFor("v2float_50_60"), ">",
+ AstFor("v2float_60_50")}));
+
+INSTANTIATE_TEST_SUITE_P(SpvParserTest_FOrdGreaterThanEqual,
+ SpvBinaryLogicalTest,
+ ::testing::Values(BinaryData{"bool", "float_50", "OpFOrdGreaterThanEqual",
+ "float_60", "bool", "50.0f", ">=", "60.0f"},
+ BinaryData{"v2bool", "v2float_50_60",
+ "OpFOrdGreaterThanEqual", "v2float_60_50",
+ "vec2<bool>", AstFor("v2float_50_60"),
+ ">=", AstFor("v2float_60_50")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_LogicalAnd,
SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "true", "OpLogicalAnd", "false",
- "bool", "true", "&", "false"},
- BinaryData{"v2bool", "v2bool_t_f", "OpLogicalAnd",
- "v2bool_f_t", "vec2<bool>",
- AstFor("v2bool_t_f"), "&",
- AstFor("v2bool_f_t")}));
+ ::testing::Values(BinaryData{"bool", "true", "OpLogicalAnd", "false", "bool", "true", "&",
+ "false"},
+ BinaryData{"v2bool", "v2bool_t_f", "OpLogicalAnd", "v2bool_f_t", "vec2<bool>",
+ AstFor("v2bool_t_f"), "&", AstFor("v2bool_f_t")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_LogicalOr,
SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "true", "OpLogicalOr", "false", "bool",
- "true", "|", "false"},
- BinaryData{"v2bool", "v2bool_t_f", "OpLogicalOr",
- "v2bool_f_t", "vec2<bool>",
- AstFor("v2bool_t_f"), "|",
- AstFor("v2bool_f_t")}));
+ ::testing::Values(BinaryData{"bool", "true", "OpLogicalOr", "false", "bool", "true", "|",
+ "false"},
+ BinaryData{"v2bool", "v2bool_t_f", "OpLogicalOr", "v2bool_f_t", "vec2<bool>",
+ AstFor("v2bool_t_f"), "|", AstFor("v2bool_f_t")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_LogicalEqual,
SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "true", "OpLogicalEqual", "false",
- "bool", "true", "==", "false"},
- BinaryData{"v2bool", "v2bool_t_f", "OpLogicalEqual",
- "v2bool_f_t", "vec2<bool>",
- AstFor("v2bool_t_f"),
- "==", AstFor("v2bool_f_t")}));
+ ::testing::Values(BinaryData{"bool", "true", "OpLogicalEqual", "false", "bool", "true",
+ "==", "false"},
+ BinaryData{"v2bool", "v2bool_t_f", "OpLogicalEqual", "v2bool_f_t",
+ "vec2<bool>", AstFor("v2bool_t_f"), "==", AstFor("v2bool_f_t")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_LogicalNotEqual,
SpvBinaryLogicalTest,
- ::testing::Values(BinaryData{"bool", "true", "OpLogicalNotEqual", "false",
- "bool", "true", "!=", "false"},
- BinaryData{"v2bool", "v2bool_t_f", "OpLogicalNotEqual",
- "v2bool_f_t", "vec2<bool>",
- AstFor("v2bool_t_f"),
- "!=", AstFor("v2bool_f_t")}));
+ ::testing::Values(BinaryData{"bool", "true", "OpLogicalNotEqual", "false", "bool", "true",
+ "!=", "false"},
+ BinaryData{"v2bool", "v2bool_t_f", "OpLogicalNotEqual", "v2bool_f_t",
+ "vec2<bool>", AstFor("v2bool_t_f"), "!=", AstFor("v2bool_f_t")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_UGreaterThan,
SpvBinaryLogicalTest,
::testing::Values(
// Both unsigned
- BinaryData{"bool", "uint_10", "OpUGreaterThan", "uint_20", "bool",
- "10u", ">", "20u"},
+ BinaryData{"bool", "uint_10", "OpUGreaterThan", "uint_20", "bool", "10u", ">", "20u"},
// First arg signed
- BinaryData{"bool", "int_30", "OpUGreaterThan", "uint_20", "bool",
- AstFor("cast_int_30"), ">", "20u"},
+ BinaryData{"bool", "int_30", "OpUGreaterThan", "uint_20", "bool", AstFor("cast_int_30"),
+ ">", "20u"},
// Second arg signed
- BinaryData{"bool", "uint_10", "OpUGreaterThan", "int_40", "bool", "10u",
- ">", AstFor("cast_int_40")},
+ BinaryData{"bool", "uint_10", "OpUGreaterThan", "int_40", "bool", "10u", ">",
+ AstFor("cast_int_40")},
// Vector, both unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThan", "v2uint_20_10",
- "vec2<bool>", AstFor("v2uint_10_20"), ">",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), ">", AstFor("v2uint_20_10")},
// First arg signed
- BinaryData{"v2bool", "v2int_30_40", "OpUGreaterThan", "v2uint_20_10",
- "vec2<bool>", AstFor("cast_v2int_30_40"), ">",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2int_30_40", "OpUGreaterThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("cast_v2int_30_40"), ">", AstFor("v2uint_20_10")},
// Second arg signed
- BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThan", "v2int_40_30",
- "vec2<bool>", AstFor("v2uint_10_20"), ">",
- AstFor("cast_v2int_40_30")}));
+ BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThan", "v2int_40_30", "vec2<bool>",
+ AstFor("v2uint_10_20"), ">", AstFor("cast_v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_UGreaterThanEqual,
SpvBinaryLogicalTest,
::testing::Values(
// Both unsigned
- BinaryData{"bool", "uint_10", "OpUGreaterThanEqual", "uint_20", "bool",
- "10u", ">=", "20u"},
+ BinaryData{"bool", "uint_10", "OpUGreaterThanEqual", "uint_20", "bool", "10u", ">=", "20u"},
// First arg signed
BinaryData{"bool", "int_30", "OpUGreaterThanEqual", "uint_20", "bool",
AstFor("cast_int_30"), ">=", "20u"},
// Second arg signed
- BinaryData{"bool", "uint_10", "OpUGreaterThanEqual", "int_40", "bool",
- "10u", ">=", AstFor("cast_int_40")},
+ BinaryData{"bool", "uint_10", "OpUGreaterThanEqual", "int_40", "bool", "10u",
+ ">=", AstFor("cast_int_40")},
// Vector, both unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThanEqual",
- "v2uint_20_10", "vec2<bool>", AstFor("v2uint_10_20"),
- ">=", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), ">=", AstFor("v2uint_20_10")},
// First arg signed
- BinaryData{"v2bool", "v2int_30_40", "OpUGreaterThanEqual",
- "v2uint_20_10", "vec2<bool>", AstFor("cast_v2int_30_40"),
- ">=", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2int_30_40", "OpUGreaterThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("cast_v2int_30_40"), ">=", AstFor("v2uint_20_10")},
// Second arg signed
- BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThanEqual",
- "v2int_40_30", "vec2<bool>", AstFor("v2uint_10_20"),
- ">=", AstFor("cast_v2int_40_30")}));
+ BinaryData{"v2bool", "v2uint_10_20", "OpUGreaterThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2uint_10_20"), ">=", AstFor("cast_v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ULessThan,
SpvBinaryLogicalTest,
::testing::Values(
// Both unsigned
- BinaryData{"bool", "uint_10", "OpULessThan", "uint_20", "bool", "10u",
- "<", "20u"},
+ BinaryData{"bool", "uint_10", "OpULessThan", "uint_20", "bool", "10u", "<", "20u"},
// First arg signed
- BinaryData{"bool", "int_30", "OpULessThan", "uint_20", "bool",
- AstFor("cast_int_30"), "<", "20u"},
+ BinaryData{"bool", "int_30", "OpULessThan", "uint_20", "bool", AstFor("cast_int_30"), "<",
+ "20u"},
// Second arg signed
- BinaryData{"bool", "uint_10", "OpULessThan", "int_40", "bool", "10u",
- "<", AstFor("cast_int_40")},
+ BinaryData{"bool", "uint_10", "OpULessThan", "int_40", "bool", "10u", "<",
+ AstFor("cast_int_40")},
// Vector, both unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpULessThan", "v2uint_20_10",
- "vec2<bool>", AstFor("v2uint_10_20"), "<",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpULessThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), "<", AstFor("v2uint_20_10")},
// First arg signed
- BinaryData{"v2bool", "v2int_30_40", "OpULessThan", "v2uint_20_10",
- "vec2<bool>", AstFor("cast_v2int_30_40"), "<",
- AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2int_30_40", "OpULessThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("cast_v2int_30_40"), "<", AstFor("v2uint_20_10")},
// Second arg signed
- BinaryData{"v2bool", "v2uint_10_20", "OpULessThan", "v2int_40_30",
- "vec2<bool>", AstFor("v2uint_10_20"), "<",
- AstFor("cast_v2int_40_30")}));
+ BinaryData{"v2bool", "v2uint_10_20", "OpULessThan", "v2int_40_30", "vec2<bool>",
+ AstFor("v2uint_10_20"), "<", AstFor("cast_v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_ULessThanEqual,
SpvBinaryLogicalTest,
::testing::Values(
// Both unsigned
- BinaryData{"bool", "uint_10", "OpULessThanEqual", "uint_20", "bool",
- "10u", "<=", "20u"},
+ BinaryData{"bool", "uint_10", "OpULessThanEqual", "uint_20", "bool", "10u", "<=", "20u"},
// First arg signed
- BinaryData{"bool", "int_30", "OpULessThanEqual", "uint_20", "bool",
- AstFor("cast_int_30"), "<=", "20u"},
+ BinaryData{"bool", "int_30", "OpULessThanEqual", "uint_20", "bool", AstFor("cast_int_30"),
+ "<=", "20u"},
// Second arg signed
- BinaryData{"bool", "uint_10", "OpULessThanEqual", "int_40", "bool",
- "10u", "<=", AstFor("cast_int_40")},
+ BinaryData{"bool", "uint_10", "OpULessThanEqual", "int_40", "bool", "10u",
+ "<=", AstFor("cast_int_40")},
// Vector, both unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpULessThanEqual", "v2uint_20_10",
- "vec2<bool>", AstFor("v2uint_10_20"),
- "<=", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpULessThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2uint_10_20"), "<=", AstFor("v2uint_20_10")},
// First arg signed
- BinaryData{"v2bool", "v2int_30_40", "OpULessThanEqual", "v2uint_20_10",
- "vec2<bool>", AstFor("cast_v2int_30_40"),
- "<=", AstFor("v2uint_20_10")},
+ BinaryData{"v2bool", "v2int_30_40", "OpULessThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("cast_v2int_30_40"), "<=", AstFor("v2uint_20_10")},
// Second arg signed
- BinaryData{"v2bool", "v2uint_10_20", "OpULessThanEqual", "v2int_40_30",
- "vec2<bool>", AstFor("v2uint_10_20"),
- "<=", AstFor("cast_v2int_40_30")}));
+ BinaryData{"v2bool", "v2uint_10_20", "OpULessThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2uint_10_20"), "<=", AstFor("cast_v2int_40_30")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_SGreaterThan,
SpvBinaryLogicalTest,
::testing::Values(
// Both signed
- BinaryData{"bool", "int_30", "OpSGreaterThan", "int_40", "bool", "30",
- ">", "40"},
+ BinaryData{"bool", "int_30", "OpSGreaterThan", "int_40", "bool", "30i", ">", "40i"},
// First arg unsigned
- BinaryData{"bool", "uint_10", "OpSGreaterThan", "int_40", "bool",
- AstFor("cast_uint_10"), ">", "40"},
+ BinaryData{"bool", "uint_10", "OpSGreaterThan", "int_40", "bool", AstFor("cast_uint_10"),
+ ">", "40i"},
// Second arg unsigned
- BinaryData{"bool", "int_30", "OpSGreaterThan", "uint_20", "bool", "30",
- ">", AstFor("cast_uint_20")},
+ BinaryData{"bool", "int_30", "OpSGreaterThan", "uint_20", "bool", "30i", ">",
+ AstFor("cast_uint_20")},
// Vector, both signed
- BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThan", "v2int_40_30",
- "vec2<bool>", AstFor("v2int_30_40"), ">",
- AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThan", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), ">", AstFor("v2int_40_30")},
// First arg unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpSGreaterThan", "v2int_40_30",
- "vec2<bool>", AstFor("cast_v2uint_10_20"), ">",
- AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpSGreaterThan", "v2int_40_30", "vec2<bool>",
+ AstFor("cast_v2uint_10_20"), ">", AstFor("v2int_40_30")},
// Second arg unsigned
- BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThan", "v2uint_20_10",
- "vec2<bool>", AstFor("v2int_30_40"), ">",
- AstFor("cast_v2uint_20_10")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2int_30_40"), ">", AstFor("cast_v2uint_20_10")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_SGreaterThanEqual,
SpvBinaryLogicalTest,
::testing::Values(
// Both signed
- BinaryData{"bool", "int_30", "OpSGreaterThanEqual", "int_40", "bool",
- "30", ">=", "40"},
+ BinaryData{"bool", "int_30", "OpSGreaterThanEqual", "int_40", "bool", "30i", ">=", "40i"},
// First arg unsigned
BinaryData{"bool", "uint_10", "OpSGreaterThanEqual", "int_40", "bool",
- AstFor("cast_uint_10"), ">=", "40"},
+ AstFor("cast_uint_10"), ">=", "40i"},
// Second arg unsigned
- BinaryData{"bool", "int_30", "OpSGreaterThanEqual", "uint_20", "bool",
- "30", ">=", AstFor("cast_uint_20")},
+ BinaryData{"bool", "int_30", "OpSGreaterThanEqual", "uint_20", "bool", "30i",
+ ">=", AstFor("cast_uint_20")},
// Vector, both signed
- BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThanEqual",
- "v2int_40_30", "vec2<bool>", AstFor("v2int_30_40"),
- ">=", AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), ">=", AstFor("v2int_40_30")},
// First arg unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpSGreaterThanEqual",
- "v2int_40_30", "vec2<bool>", AstFor("cast_v2uint_10_20"),
- ">=", AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpSGreaterThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("cast_v2uint_10_20"), ">=", AstFor("v2int_40_30")},
// Second arg unsigned
- BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThanEqual",
- "v2uint_20_10", "vec2<bool>", AstFor("v2int_30_40"),
- ">=", AstFor("cast_v2uint_20_10")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpSGreaterThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2int_30_40"), ">=", AstFor("cast_v2uint_20_10")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_SLessThan,
SpvBinaryLogicalTest,
::testing::Values(
// Both signed
- BinaryData{"bool", "int_30", "OpSLessThan", "int_40", "bool", "30", "<",
- "40"},
+ BinaryData{"bool", "int_30", "OpSLessThan", "int_40", "bool", "30i", "<", "40i"},
// First arg unsigned
- BinaryData{"bool", "uint_10", "OpSLessThan", "int_40", "bool",
- AstFor("cast_uint_10"), "<", "40"},
+ BinaryData{"bool", "uint_10", "OpSLessThan", "int_40", "bool", AstFor("cast_uint_10"), "<",
+ "40i"},
// Second arg unsigned
- BinaryData{"bool", "int_30", "OpSLessThan", "uint_20", "bool", "30",
- "<", AstFor("cast_uint_20")},
+ BinaryData{"bool", "int_30", "OpSLessThan", "uint_20", "bool", "30i", "<",
+ AstFor("cast_uint_20")},
// Vector, both signed
- BinaryData{"v2bool", "v2int_30_40", "OpSLessThan", "v2int_40_30",
- "vec2<bool>", AstFor("v2int_30_40"), "<",
- AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2int_30_40", "OpSLessThan", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), "<", AstFor("v2int_40_30")},
// First arg unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpSLessThan", "v2int_40_30",
- "vec2<bool>", AstFor("cast_v2uint_10_20"), "<",
- AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpSLessThan", "v2int_40_30", "vec2<bool>",
+ AstFor("cast_v2uint_10_20"), "<", AstFor("v2int_40_30")},
// Second arg unsigned
- BinaryData{"v2bool", "v2int_30_40", "OpSLessThan", "v2uint_20_10",
- "vec2<bool>", AstFor("v2int_30_40"), "<",
- AstFor("cast_v2uint_20_10")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpSLessThan", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2int_30_40"), "<", AstFor("cast_v2uint_20_10")}));
INSTANTIATE_TEST_SUITE_P(
SpvParserTest_SLessThanEqual,
SpvBinaryLogicalTest,
::testing::Values(
// Both signed
- BinaryData{"bool", "int_30", "OpSLessThanEqual", "int_40", "bool", "30",
- "<=", "40"},
+ BinaryData{"bool", "int_30", "OpSLessThanEqual", "int_40", "bool", "30i", "<=", "40i"},
// First arg unsigned
- BinaryData{"bool", "uint_10", "OpSLessThanEqual", "int_40", "bool",
- AstFor("cast_uint_10"), "<=", "40"},
+ BinaryData{"bool", "uint_10", "OpSLessThanEqual", "int_40", "bool", AstFor("cast_uint_10"),
+ "<=", "40i"},
// Second arg unsigned
- BinaryData{"bool", "int_30", "OpSLessThanEqual", "uint_20", "bool",
- "30", "<=", AstFor("cast_uint_20")},
+ BinaryData{"bool", "int_30", "OpSLessThanEqual", "uint_20", "bool", "30i",
+ "<=", AstFor("cast_uint_20")},
// Vector, both signed
- BinaryData{"v2bool", "v2int_30_40", "OpSLessThanEqual", "v2int_40_30",
- "vec2<bool>", AstFor("v2int_30_40"),
- "<=", AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2int_30_40", "OpSLessThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("v2int_30_40"), "<=", AstFor("v2int_40_30")},
// First arg unsigned
- BinaryData{"v2bool", "v2uint_10_20", "OpSLessThanEqual", "v2int_40_30",
- "vec2<bool>", AstFor("cast_v2uint_10_20"),
- "<=", AstFor("v2int_40_30")},
+ BinaryData{"v2bool", "v2uint_10_20", "OpSLessThanEqual", "v2int_40_30", "vec2<bool>",
+ AstFor("cast_v2uint_10_20"), "<=", AstFor("v2int_40_30")},
// Second arg unsigned
- BinaryData{"v2bool", "v2int_30_40", "OpSLessThanEqual", "v2uint_20_10",
- "vec2<bool>", AstFor("v2int_30_40"),
- "<=", AstFor("cast_v2uint_20_10")}));
+ BinaryData{"v2bool", "v2int_30_40", "OpSLessThanEqual", "v2uint_20_10", "vec2<bool>",
+ AstFor("v2int_30_40"), "<=", AstFor("cast_v2uint_20_10")}));
using SpvFUnordTest = SpvParserTestBase<::testing::Test>;
TEST_F(SpvFUnordTest, FUnordEqual_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordEqual %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 != 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f != 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordEqual_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordEqual %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = "
- "!((vec2<f32>(50.0, 60.0) != vec2<f32>(60.0, 50.0)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = "
+ "!((vec2<f32>(50.0f, 60.0f) != vec2<f32>(60.0f, 50.0f)));"));
}
TEST_F(SpvFUnordTest, FUnordNotEqual_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordNotEqual %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 == 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f == 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordNotEqual_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordNotEqual %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = "
- "!((vec2<f32>(50.0, 60.0) == vec2<f32>(60.0, 50.0)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = "
+ "!((vec2<f32>(50.0f, 60.0f) == vec2<f32>(60.0f, 50.0f)));"));
}
TEST_F(SpvFUnordTest, FUnordLessThan_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordLessThan %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 >= 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f >= 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordLessThan_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordLessThan %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = "
- "!((vec2<f32>(50.0, 60.0) >= vec2<f32>(60.0, 50.0)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = "
+ "!((vec2<f32>(50.0f, 60.0f) >= vec2<f32>(60.0f, 50.0f)));"));
}
TEST_F(SpvFUnordTest, FUnordLessThanEqual_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordLessThanEqual %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 > 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f > 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordLessThanEqual_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordLessThanEqual %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = "
- "!((vec2<f32>(50.0, 60.0) > vec2<f32>(60.0, 50.0)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = "
+ "!((vec2<f32>(50.0f, 60.0f) > vec2<f32>(60.0f, 50.0f)));"));
}
TEST_F(SpvFUnordTest, FUnordGreaterThan_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordGreaterThan %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 <= 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f <= 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordGreaterThan_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordGreaterThan %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = "
- "!((vec2<f32>(50.0, 60.0) <= vec2<f32>(60.0, 50.0)));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = "
+ "!((vec2<f32>(50.0f, 60.0f) <= vec2<f32>(60.0f, 50.0f)));"));
}
TEST_F(SpvFUnordTest, FUnordGreaterThanEqual_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordGreaterThanEqual %bool %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = !((50.0 < 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = !((50.0f < 60.0f));"));
}
TEST_F(SpvFUnordTest, FUnordGreaterThanEqual_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpFUnordGreaterThanEqual %v2bool %v2float_50_60 %v2float_60_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = !(("
- "vec2<f32>(50.0, 60.0) < vec2<f32>(60.0, 50.0)"
- "));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = !(("
+ "vec2<f32>(50.0f, 60.0f) < vec2<f32>(60.0f, 50.0f)"
+ "));"));
}
using SpvLogicalTest = SpvParserTestBase<::testing::Test>;
TEST_F(SpvLogicalTest, Select_BoolCond_BoolParams) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSelect %bool %true %true %false
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = select(false, true, true);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = select(false, true, true);"));
}
TEST_F(SpvLogicalTest, Select_BoolCond_IntScalarParams) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSelect %uint %true %uint_10 %uint_20
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : u32 = select(20u, 10u, true);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : u32 = select(20u, 10u, true);"));
}
TEST_F(SpvLogicalTest, Select_BoolCond_FloatScalarParams) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSelect %float %true %float_50 %float_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : f32 = select(60.0, 50.0, true);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : f32 = select(60.0f, 50.0f, true);"));
}
TEST_F(SpvLogicalTest, Select_BoolCond_VectorParams) {
- // Prior to SPIR-V 1.4, the condition must be a vector of bools
- // when the value operands are vectors.
- // "Before version 1.4, results are only computed per component."
- const auto assembly = Preamble() + R"(
+ // Prior to SPIR-V 1.4, the condition must be a vector of bools
+ // when the value operands are vectors.
+ // "Before version 1.4, results are only computed per component."
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSelect %v2uint %true %v2uint_10_20 %v2uint_20_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<u32> = select("
- "vec2<u32>(20u, 10u), "
- "vec2<u32>(10u, 20u), "
- "true);"));
-
- // Fails validation prior to SPIR-V 1.4: If the value operands are vectors,
- // then the condition must be a vector.
- // "Expected vector sizes of Result Type and the condition to be equal:
- // Select"
- p->DeliberatelyInvalidSpirv();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : vec2<u32> = select("
+ "vec2<u32>(20u, 10u), "
+ "vec2<u32>(10u, 20u), "
+ "true);"));
+
+ // Fails validation prior to SPIR-V 1.4: If the value operands are vectors,
+ // then the condition must be a vector.
+ // "Expected vector sizes of Result Type and the condition to be equal:
+ // Select"
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvLogicalTest, Select_VecBoolCond_VectorParams) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpSelect %v2uint %v2bool_t_f %v2uint_10_20 %v2uint_20_10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<u32> = select("
- "vec2<u32>(20u, 10u), "
- "vec2<u32>(10u, 20u), "
- "vec2<bool>(true, false));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_1 : vec2<u32> = select("
+ "vec2<u32>(20u, 10u), "
+ "vec2<u32>(10u, 20u), "
+ "vec2<bool>(true, false));"));
}
TEST_F(SpvLogicalTest, Any) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpAny %bool %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = any(vec2<bool>(true, false));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = any(vec2<bool>(true, false));"));
}
TEST_F(SpvLogicalTest, All) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpAll %bool %v2bool_t_f
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = all(vec2<bool>(true, false));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = all(vec2<bool>(true, false));"));
}
TEST_F(SpvLogicalTest, IsNan_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpIsNan %bool %float_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = isNan(50.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = isNan(50.0f);"));
}
TEST_F(SpvLogicalTest, IsNan_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpIsNan %v2bool %v2float_50_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = isNan(vec2<f32>(50.0, 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = isNan(vec2<f32>(50.0f, 60.0f));"));
}
TEST_F(SpvLogicalTest, IsInf_Scalar) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpIsInf %bool %float_50
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : bool = isInf(50.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : bool = isInf(50.0f);"));
}
TEST_F(SpvLogicalTest, IsInf_Vector) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpIsInf %v2bool %v2float_50_60
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("let x_1 : vec2<bool> = isInf(vec2<f32>(50.0, 60.0));"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_1 : vec2<bool> = isInf(vec2<f32>(50.0f, 60.0f));"));
}
// TODO(dneto): Kernel-guarded instructions.
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_memory_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_memory_test.cc
index 33fa2f59d06..2bb98e390fd 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_memory_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_memory_test.cc
@@ -26,7 +26,7 @@ using ::testing::HasSubstr;
using SpvParserMemoryTest = SpvParserTest;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -35,7 +35,7 @@ std::string Preamble() {
}
TEST_F(SpvParserMemoryTest, EmitStatement_StoreBoolConst) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeBool
@@ -52,18 +52,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_StoreBoolConst) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = true;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = true;
x_1 = false;
x_1 = false;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_StoreUintConst) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 0
@@ -78,17 +78,17 @@ TEST_F(SpvParserMemoryTest, EmitStatement_StoreUintConst) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42u;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42u;
x_1 = 0u;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_StoreIntConst) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 1
@@ -103,17 +103,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_StoreIntConst) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42;
-x_1 = 0;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42i;
+x_1 = 0i;
+return;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_StoreFloatConst) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeFloat 32
@@ -128,17 +129,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_StoreFloatConst) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42.0;
-x_1 = 0.0;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(x_1 = 42.0f;
+x_1 = 0.0f;
+return;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_LoadBool) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeBool
@@ -153,16 +155,15 @@ TEST_F(SpvParserMemoryTest, EmitStatement_LoadBool) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_2 : bool = x_1;"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("let x_2 : bool = x_1;"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_LoadScalar) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 0
@@ -176,18 +177,17 @@ TEST_F(SpvParserMemoryTest, EmitStatement_LoadScalar) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_2 : u32 = x_1;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(let x_2 : u32 = x_1;
let x_3 : u32 = x_1;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_UseLoadedScalarTwice) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 0
@@ -202,19 +202,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_UseLoadedScalarTwice) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_2 : u32 = x_1;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(let x_2 : u32 = x_1;
x_1 = x_2;
x_1 = x_2;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_StoreToModuleScopeVar) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 0
@@ -227,16 +226,15 @@ TEST_F(SpvParserMemoryTest, EmitStatement_StoreToModuleScopeVar) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("x_1 = 42u;"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("x_1 = 42u;"));
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_CopyMemory_Scalar_Function_To_Private) {
- auto p = parser(test::Assemble(Preamble() + R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_CopyMemory_Scalar_Function_To_Private) {
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%ty = OpTypeInt 32 0
@@ -251,17 +249,17 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const auto* expected = "x_2 = x_1;";
- EXPECT_THAT(got, HasSubstr(expected));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const auto* expected = "x_2 = x_1;";
+ EXPECT_THAT(got, HasSubstr(expected));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_BaseIsNotPointer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%10 = OpTypeInt 32 0
@@ -274,12 +272,12 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_BaseIsNotPointer) {
OpStore %1 %val
OpReturn
)"));
- EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_THAT(p->error(), Eq("variable with ID 20 has non-pointer type 10"));
+ EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_THAT(p->error(), Eq("variable with ID 20 has non-pointer type 10"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorSwizzle) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -297,18 +295,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorSwizzle) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar.z = 42u;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar.z = 42u;"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorConstOutOfBounds) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -326,17 +322,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorConstOutOfBounds) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(), Eq("Access chain %2 index %42 value 42 is out of "
- "bounds for vector of 4 elements"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), Eq("Access chain %2 index %42 value 42 is out of "
+ "bounds for vector of 4 elements"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorNonConstIndex) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpName %13 "a_dynamic_index"
%void = OpTypeVoid
@@ -359,21 +354,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorNonConstIndex) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar[a_dynamic_index] = 42u;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar[a_dynamic_index] = 42u;"));
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_AccessChain_VectorComponent_MultiUse) {
- // WGSL does not support pointer-to-vector-component, so test that we sink
- // these pointers into the point of use.
- const std::string assembly = Preamble() + R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorComponent_MultiUse) {
+ // WGSL does not support pointer-to-vector-component, so test that we sink
+ // these pointers into the point of use.
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -393,23 +385,21 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto wgsl = test::ToString(p->program(), ast_body);
- EXPECT_THAT(wgsl, Not(HasSubstr("&")));
- EXPECT_THAT(wgsl, HasSubstr(" = myvar.z;"));
- EXPECT_THAT(wgsl, HasSubstr("myvar.z = "));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto wgsl = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(wgsl, Not(HasSubstr("&")));
+ EXPECT_THAT(wgsl, HasSubstr(" = myvar.z;"));
+ EXPECT_THAT(wgsl, HasSubstr("myvar.z = "));
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_AccessChain_VectorComponent_MultiUse_NonConstIndex) {
- // WGSL does not support pointer-to-vector-component, so test that we sink
- // these pointers into the point of use.
- const std::string assembly = Preamble() + R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorComponent_MultiUse_NonConstIndex) {
+ // WGSL does not support pointer-to-vector-component, so test that we sink
+ // these pointers into the point of use.
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -431,23 +421,21 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto wgsl = test::ToString(p->program(), ast_body);
- EXPECT_THAT(wgsl, Not(HasSubstr("&")));
- EXPECT_THAT(wgsl, HasSubstr(" = myvar[x_12];"));
- EXPECT_THAT(wgsl, HasSubstr("myvar[x_12] = "));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto wgsl = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(wgsl, Not(HasSubstr("&")));
+ EXPECT_THAT(wgsl, HasSubstr(" = myvar[x_12];"));
+ EXPECT_THAT(wgsl, HasSubstr("myvar[x_12] = "));
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_AccessChain_VectorComponent_SinkThroughChain) {
- // Test that we can sink a pointer-to-vector-component through a chain of
- // instructions that propagate it.
- const std::string assembly = Preamble() + R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_VectorComponent_SinkThroughChain) {
+ // Test that we can sink a pointer-to-vector-component through a chain of
+ // instructions that propagate it.
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -470,20 +458,19 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- auto wgsl = test::ToString(p->program(), ast_body);
- EXPECT_THAT(wgsl, Not(HasSubstr("&")));
- EXPECT_THAT(wgsl, HasSubstr(" = myvar.z;"));
- EXPECT_THAT(wgsl, HasSubstr("myvar.z = "));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ auto wgsl = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(wgsl, Not(HasSubstr("&")));
+ EXPECT_THAT(wgsl, HasSubstr(" = myvar.z;"));
+ EXPECT_THAT(wgsl, HasSubstr("myvar.z = "));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Matrix) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -505,18 +492,17 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Matrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar[2u] = vec4<f32>(42.0, 42.0, 42.0, 42.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("myvar[2u] = vec4<f32>(42.0f, 42.0f, 42.0f, 42.0f);"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Array) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -538,18 +524,17 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Array) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar[2u] = vec4<f32>(42.0, 42.0, 42.0, 42.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("myvar[2u] = vec4<f32>(42.0f, 42.0f, 42.0f, 42.0f);"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Struct) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpMemberName %strct 1 "age"
%void = OpTypeVoid
@@ -570,23 +555,20 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Struct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar.age = 42.0;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar.age = 42.0f;"));
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_AccessChain_Struct_DifferOnlyMemberName) {
- // The spirv-opt internal representation will map both structs to the
- // same canonicalized type, because it doesn't care about member names.
- // But we care about member names when producing a member-access expression.
- // crbug.com/tint/213
- const std::string assembly = Preamble() + R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Struct_DifferOnlyMemberName) {
+ // The spirv-opt internal representation will map both structs to the
+ // same canonicalized type, because it doesn't care about member names.
+ // But we care about member names when producing a member-access expression.
+ // crbug.com/tint/213
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpName %10 "myvar2"
OpMemberName %strct 1 "age"
@@ -615,20 +597,19 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(myvar.age = 42.0;
-myvar2.ancientness = 420.0;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(myvar.age = 42.0f;
+myvar2.ancientness = 420.0f;
+return;
)"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_StructNonConstIndex) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpMemberName %55 1 "age"
%void = OpTypeVoid
@@ -652,17 +633,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_StructNonConstIndex) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(), Eq("Access chain %2 index %10 is a non-constant "
- "index into a structure %55"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), Eq("Access chain %2 index %10 is a non-constant "
+ "index into a structure %55"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_StructConstOutOfBounds) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpMemberName %55 1 "age"
%void = OpTypeVoid
@@ -683,17 +663,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_StructConstOutOfBounds) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(), Eq("Access chain %2 index value 99 is out of bounds "
- "for structure %55 having 2 members"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), Eq("Access chain %2 index value 99 is out of bounds "
+ "for structure %55 having 2 members"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Struct_RuntimeArray) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
OpMemberName %strct 1 "age"
@@ -724,18 +703,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Struct_RuntimeArray) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar.age[2u] = 42.0;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar.age[2u] = 42.0f;"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Compound_Matrix_Vector) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -757,18 +734,16 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_Compound_Matrix_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar[2u].w = 42.0;"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody());
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar[2u].w = 42.0f;"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_InvalidPointeeType) {
- const std::string assembly = Preamble() + R"(
+ const std::string assembly = Preamble() + R"(
OpName %1 "myvar"
%55 = OpTypeVoid
%voidfn = OpTypeFunction %55
@@ -785,20 +760,18 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_InvalidPointeeType) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_FALSE(fe.EmitBody());
- EXPECT_THAT(p->error(),
- HasSubstr("Access chain with unknown or invalid pointee type "
- "%60: %60 = OpTypePointer Private %55"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_FALSE(fe.EmitBody());
+ EXPECT_THAT(p->error(), HasSubstr("Access chain with unknown or invalid pointee type "
+ "%60: %60 = OpTypePointer Private %55"));
}
TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_DereferenceBase) {
- // The base operand to OpAccessChain may have to be dereferenced first.
- // crbug.com/tint/737
- const std::string assembly = Preamble() + R"(
+ // The base operand to OpAccessChain may have to be dereferenced first.
+ // crbug.com/tint/737
+ const std::string assembly = Preamble() + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -825,10 +798,10 @@ TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_DereferenceBase) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(fn x_200(x_1 : ptr<private, vec2<u32>>) {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(fn x_200(x_1 : ptr<private, vec2<u32>>) {
let x_3 : u32 = (*(x_1)).x;
return;
}
@@ -837,21 +810,20 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main() {
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvParserMemoryTest,
- EmitStatement_AccessChain_InferFunctionStorageClass) {
- // An access chain can have no indices. When the base is a Function variable,
- // the reference type has no explicit storage class in the AST representation.
- // But the pointer type for the let declaration must have an explicit
- // 'function' storage class. From crbug.com/tint/807
- const std::string assembly = R"(
+TEST_F(SpvParserMemoryTest, EmitStatement_AccessChain_InferFunctionStorageClass) {
+ // An access chain can have no indices. When the base is a Function variable,
+ // the reference type has no explicit storage class in the AST representation.
+ // But the pointer type for the let declaration must have an explicit
+ // 'function' storage class. From crbug.com/tint/807
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %main "main"
@@ -869,25 +841,25 @@ OpExecutionMode %main OriginUpperLeft
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly;
- const auto got = test::ToString(p->program());
- const std::string expected = R"(fn main_1() {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(fn main_1() {
var x_1 : u32;
let x_2 : ptr<function, u32> = &(x_1);
return;
}
-@stage(fragment)
+@fragment
fn main() {
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
std::string OldStorageBufferPreamble() {
- return Preamble() + R"(
+ return Preamble() + R"(
OpName %myvar "myvar"
OpDecorate %myvar DescriptorSet 0
@@ -915,21 +887,20 @@ std::string OldStorageBufferPreamble() {
}
TEST_F(SpvParserMemoryTest, RemapStorageBuffer_TypesAndVarDeclarations) {
- // Enusure we get the right module-scope declaration. This tests translation
- // of the structure type, arrays of the structure, pointers to them, and
- // OpVariable of these.
- const auto assembly = OldStorageBufferPreamble() + R"(
+ // Enusure we get the right module-scope declaration. This tests translation
+ // of the structure type, arrays of the structure, pointers to them, and
+ // OpVariable of these.
+ const auto assembly = OldStorageBufferPreamble() + R"(
; The preamble declared %100 to be an entry point, so supply it.
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(type RTArr = @stride(4) array<u32>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(type RTArr = @stride(4) array<u32>;
struct S {
field0 : u32,
@@ -941,7 +912,7 @@ struct S {
}
TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughAccessChain_NonCascaded) {
- const auto assembly = OldStorageBufferPreamble() + R"(
+ const auto assembly = OldStorageBufferPreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -956,21 +927,20 @@ TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughAccessChain_NonCascaded) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(R"(myvar.field0 = 0u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(R"(myvar.field0 = 0u;
myvar.field1[1u] = 0u;
)"));
}
-TEST_F(SpvParserMemoryTest,
- RemapStorageBuffer_ThroughAccessChain_NonCascaded_InBoundsAccessChain) {
- // Like the previous test, but using OpInBoundsAccessChain.
- const auto assembly = OldStorageBufferPreamble() + R"(
+TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughAccessChain_NonCascaded_InBoundsAccessChain) {
+ // Like the previous test, but using OpInBoundsAccessChain.
+ const auto assembly = OldStorageBufferPreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -985,20 +955,20 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(R"(myvar.field0 = 0u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(R"(myvar.field0 = 0u;
myvar.field1[1u] = 0u;
)")) << got
<< p->error();
}
TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughAccessChain_Cascaded) {
- const auto assembly = OldStorageBufferPreamble() + R"(
+ const auto assembly = OldStorageBufferPreamble() + R"(
%ptr_rtarr = OpTypePointer Uniform %arr
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -1012,23 +982,21 @@ TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughAccessChain_Cascaded) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("myvar.field1[1u] = 0u;"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr("myvar.field1[1u] = 0u;"))
+ << p->error();
}
-TEST_F(SpvParserMemoryTest,
- RemapStorageBuffer_ThroughCopyObject_WithoutHoisting) {
- // Generates a const declaration directly.
- // We have to do a bunch of storage class tracking for locally
- // defined values in order to get the right pointer-to-storage-buffer
- // value type for the const declration.
- const auto assembly = OldStorageBufferPreamble() + R"(
+TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughCopyObject_WithoutHoisting) {
+ // Generates a const declaration directly.
+ // We have to do a bunch of storage class tracking for locally
+ // defined values in order to get the right pointer-to-storage-buffer
+ // value type for the const declration.
+ const auto assembly = OldStorageBufferPreamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
@@ -1039,28 +1007,27 @@ TEST_F(SpvParserMemoryTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_2 : ptr<storage, u32> = &(myvar.field1[1u]);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr(R"(let x_2 : ptr<storage, u32> = &(myvar.field1[1u]);
*(x_2) = 0u;
)")) << p->error();
- p->SkipDumpingPending(
- "crbug.com/tint/1041 track access mode in spirv-reader parser type");
+ p->SkipDumpingPending("crbug.com/tint/1041 track access mode in spirv-reader parser type");
}
TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughCopyObject_WithHoisting) {
- // TODO(dneto): Hoisting non-storable values (pointers) is not yet supported.
- // It's debatable whether this test should run at all.
- // crbug.com/tint/98
+ // TODO(dneto): Hoisting non-storable values (pointers) is not yet supported.
+ // It's debatable whether this test should run at all.
+ // crbug.com/tint/98
- // Like the previous test, but the declaration for the copy-object
- // has its declaration hoisted.
- const auto assembly = OldStorageBufferPreamble() + R"(
+ // Like the previous test, but the declaration for the copy-object
+ // has its declaration hoisted.
+ const auto assembly = OldStorageBufferPreamble() + R"(
%bool = OpTypeBool
%cond = OpConstantTrue %bool
@@ -1085,13 +1052,13 @@ TEST_F(SpvParserMemoryTest, RemapStorageBuffer_ThroughCopyObject_WithHoisting) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_EQ(test::ToString(p->program(), ast_body),
- R"(var x_2 : ptr<storage, u32>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_EQ(test::ToString(p->program(), ast_body),
+ R"(var x_2 : ptr<storage, u32>;
if (true) {
x_2 = &(myvar.field1[1u]);
} else {
@@ -1100,19 +1067,18 @@ if (true) {
x_2 = 0u;
return;
)") << p->error();
- p->SkipDumpingPending("crbug.com/tint/98");
+ p->SkipDumpingPending("crbug.com/tint/98");
}
TEST_F(SpvParserMemoryTest, DISABLED_RemapStorageBuffer_ThroughFunctionCall) {
- // WGSL does not support pointer-to-storage-buffer as function parameter
+ // WGSL does not support pointer-to-storage-buffer as function parameter
}
-TEST_F(SpvParserMemoryTest,
- DISABLED_RemapStorageBuffer_ThroughFunctionParameter) {
- // WGSL does not support pointer-to-storage-buffer as function parameter
+TEST_F(SpvParserMemoryTest, DISABLED_RemapStorageBuffer_ThroughFunctionParameter) {
+ // WGSL does not support pointer-to-storage-buffer as function parameter
}
std::string RuntimeArrayPreamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -1147,7 +1113,7 @@ std::string RuntimeArrayPreamble() {
}
TEST_F(SpvParserMemoryTest, ArrayLength_FromVar) {
- const auto assembly = RuntimeArrayPreamble() + R"(
+ const auto assembly = RuntimeArrayPreamble() + R"(
%100 = OpFunction %void None %voidfn
@@ -1156,19 +1122,17 @@ TEST_F(SpvParserMemoryTest, ArrayLength_FromVar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str,
- HasSubstr("let x_1 : u32 = arrayLength(&(myvar.rtarr));"))
- << body_str;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr("let x_1 : u32 = arrayLength(&(myvar.rtarr));")) << body_str;
}
TEST_F(SpvParserMemoryTest, ArrayLength_FromCopyObject) {
- const auto assembly = RuntimeArrayPreamble() + R"(
+ const auto assembly = RuntimeArrayPreamble() + R"(
%100 = OpFunction %void None %voidfn
@@ -1178,22 +1142,21 @@ TEST_F(SpvParserMemoryTest, ArrayLength_FromCopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str, HasSubstr(R"(let x_2 : ptr<storage, S> = &(myvar);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr(R"(let x_2 : ptr<storage, S> = &(myvar);
let x_1 : u32 = arrayLength(&((*(x_2)).rtarr));
)")) << body_str;
- p->SkipDumpingPending(
- "crbug.com/tint/1041 track access mode in spirv-reader parser type");
+ p->SkipDumpingPending("crbug.com/tint/1041 track access mode in spirv-reader parser type");
}
TEST_F(SpvParserMemoryTest, ArrayLength_FromAccessChain) {
- const auto assembly = RuntimeArrayPreamble() + R"(
+ const auto assembly = RuntimeArrayPreamble() + R"(
%100 = OpFunction %void None %voidfn
@@ -1203,19 +1166,17 @@ TEST_F(SpvParserMemoryTest, ArrayLength_FromAccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto body_str = test::ToString(p->program(), ast_body);
- EXPECT_THAT(body_str,
- HasSubstr("let x_1 : u32 = arrayLength(&(myvar.rtarr));"))
- << body_str;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto body_str = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(body_str, HasSubstr("let x_1 : u32 = arrayLength(&(myvar.rtarr));")) << body_str;
}
std::string InvalidPointerPreamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %main "main"
@@ -1230,7 +1191,7 @@ OpExecutionMode %main OriginUpperLeft
}
TEST_F(SpvParserMemoryTest, InvalidPointer_Undef_ModuleScope_IsError) {
- const std::string assembly = InvalidPointerPreamble() + R"(
+ const std::string assembly = InvalidPointerPreamble() + R"(
%ptr = OpUndef %ptr_ty
%main = OpFunction %void None %voidfn
@@ -1245,13 +1206,13 @@ TEST_F(SpvParserMemoryTest, InvalidPointer_Undef_ModuleScope_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_EQ(p->error(), "undef pointer is not valid: %9 = OpUndef %6");
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_EQ(p->error(), "undef pointer is not valid: %9 = OpUndef %6");
}
TEST_F(SpvParserMemoryTest, InvalidPointer_Undef_FunctionScope_IsError) {
- const std::string assembly = InvalidPointerPreamble() + R"(
+ const std::string assembly = InvalidPointerPreamble() + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
@@ -1259,15 +1220,15 @@ TEST_F(SpvParserMemoryTest, InvalidPointer_Undef_FunctionScope_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_EQ(p->error(), "undef pointer is not valid: %7 = OpUndef %3");
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_EQ(p->error(), "undef pointer is not valid: %7 = OpUndef %3");
}
TEST_F(SpvParserMemoryTest, InvalidPointer_ConstantNull_IsError) {
- // OpConstantNull on logical pointer requires variable-pointers, which
- // is not (yet) supported by WGSL features.
- const std::string assembly = InvalidPointerPreamble() + R"(
+ // OpConstantNull on logical pointer requires variable-pointers, which
+ // is not (yet) supported by WGSL features.
+ const std::string assembly = InvalidPointerPreamble() + R"(
%ptr = OpConstantNull %ptr_ty
%main = OpFunction %void None %voidfn
@@ -1282,9 +1243,9 @@ TEST_F(SpvParserMemoryTest, InvalidPointer_ConstantNull_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_EQ(p->error(), "null pointer is not valid: %9 = OpConstantNull %6");
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_EQ(p->error(), "null pointer is not valid: %9 = OpConstantNull %6");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_misc_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_misc_test.cc
index e64eb2e6688..3f9c3985c62 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_misc_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_misc_test.cc
@@ -24,7 +24,7 @@ using ::testing::Eq;
using ::testing::HasSubstr;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -33,7 +33,7 @@ std::string Preamble() {
}
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -52,7 +52,7 @@ std::string CommonTypes() {
using SpvParserTestMiscInstruction = SpvParserTest;
TEST_F(SpvParserTestMiscInstruction, OpUndef_BeforeFunction_Scalar) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%1 = OpUndef %bool
%2 = OpUndef %uint
%3 = OpUndef %int
@@ -67,21 +67,21 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_BeforeFunction_Scalar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_11 : bool = false;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(let x_11 : bool = false;
let x_12 : u32 = 0u;
-let x_13 : i32 = 0;
-let x_14 : f32 = 0.0;
+let x_13 : i32 = 0i;
+let x_14 : f32 = 0.0f;
+return;
)"));
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_BeforeFunction_Vector) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%4 = OpUndef %v2bool
%1 = OpUndef %v2uint
%2 = OpUndef %v2int
@@ -97,13 +97,13 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_BeforeFunction_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_14 : vec2<bool> = vec2<bool>();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr(R"(let x_14 : vec2<bool> = vec2<bool>();
let x_11 : vec2<u32> = vec2<u32>();
let x_12 : vec2<i32> = vec2<i32>();
let x_13 : vec2<f32> = vec2<f32>();
@@ -111,7 +111,7 @@ let x_13 : vec2<f32> = vec2<f32>();
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Scalar) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpUndef %bool
@@ -126,21 +126,21 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Scalar) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_11 : bool = false;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(let x_11 : bool = false;
let x_12 : u32 = 0u;
-let x_13 : i32 = 0;
-let x_14 : f32 = 0.0;
+let x_13 : i32 = 0i;
+let x_14 : f32 = 0.0f;
+return;
)"));
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Vector) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpUndef %v2uint
@@ -153,20 +153,20 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Vector) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(let x_11 : vec2<u32> = vec2<u32>();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr(R"(let x_11 : vec2<u32> = vec2<u32>();
let x_12 : vec2<i32> = vec2<i32>();
let x_13 : vec2<f32> = vec2<f32>();
)"));
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Matrix) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%mat = OpTypeMatrix %v2float 2
%100 = OpFunction %void None %voidfn
@@ -177,17 +177,17 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Matrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_11 : mat2x2<f32> = mat2x2<f32>();"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_11 : mat2x2<f32> = mat2x2<f32>();"));
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Array) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%uint_2 = OpConstant %uint 2
%arr = OpTypeArray %uint %uint_2
@@ -199,17 +199,17 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Array) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_11 : array<u32, 2u> = array<u32, 2u>();"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_11 : array<u32, 2u> = array<u32, 2u>();"));
}
TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Struct) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%strct = OpTypeStruct %bool %uint %int %float
%100 = OpFunction %void None %voidfn
@@ -220,82 +220,79 @@ TEST_F(SpvParserTestMiscInstruction, OpUndef_InFunction_Struct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("let x_11 : S = S(false, 0u, 0, 0.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("let x_11 : S = S(false, 0u, 0i, 0.0f);"));
}
TEST_F(SpvParserTestMiscInstruction, OpNop) {
- const auto assembly = Preamble() + CommonTypes() + R"(
+ const auto assembly = Preamble() + CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpNop
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- EXPECT_EQ(test::ToString(p->program(), ast_body), "return;\n");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ EXPECT_EQ(test::ToString(p->program(), ast_body), "return;\n");
}
// Test swizzle generation.
struct SwizzleCase {
- uint32_t index;
- std::string expected_expr;
- std::string expected_error;
+ uint32_t index;
+ std::string expected_expr;
+ std::string expected_error;
};
-using SpvParserSwizzleTest =
- SpvParserTestBase<::testing::TestWithParam<SwizzleCase>>;
+using SpvParserSwizzleTest = SpvParserTestBase<::testing::TestWithParam<SwizzleCase>>;
TEST_P(SpvParserSwizzleTest, Sample) {
- // We need a function so we can get a FunctionEmitter.
- const auto assembly = Preamble() + CommonTypes() + R"(
+ // We need a function so we can get a FunctionEmitter.
+ const auto assembly = Preamble() + CommonTypes() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
-
- auto* result = fe.Swizzle(GetParam().index);
- if (GetParam().expected_error.empty()) {
- Program program(p->program());
- EXPECT_TRUE(fe.success());
- ASSERT_NE(result, nullptr);
- auto got = test::ToString(program, result);
- EXPECT_EQ(got, GetParam().expected_expr);
- } else {
- EXPECT_EQ(result, nullptr);
- EXPECT_FALSE(fe.success());
- EXPECT_EQ(p->error(), GetParam().expected_error);
- }
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+
+ auto* result = fe.Swizzle(GetParam().index);
+ if (GetParam().expected_error.empty()) {
+ Program program(p->program());
+ EXPECT_TRUE(fe.success());
+ ASSERT_NE(result, nullptr);
+ auto got = test::ToString(program, result);
+ EXPECT_EQ(got, GetParam().expected_expr);
+ } else {
+ EXPECT_EQ(result, nullptr);
+ EXPECT_FALSE(fe.success());
+ EXPECT_EQ(p->error(), GetParam().expected_error);
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ValidIndex,
- SpvParserSwizzleTest,
- ::testing::ValuesIn(std::vector<SwizzleCase>{
- {0, "x", ""},
- {1, "y", ""},
- {2, "z", ""},
- {3, "w", ""},
- {4, "", "vector component index is larger than 3: 4"},
- {99999, "", "vector component index is larger than 3: 99999"}}));
+INSTANTIATE_TEST_SUITE_P(ValidIndex,
+ SpvParserSwizzleTest,
+ ::testing::ValuesIn(std::vector<SwizzleCase>{
+ {0, "x", ""},
+ {1, "y", ""},
+ {2, "z", ""},
+ {3, "w", ""},
+ {4, "", "vector component index is larger than 3: 4"},
+ {99999, "", "vector component index is larger than 3: 99999"}}));
TEST_F(SpvParserTest, ValueFromBlockNotInBlockOrder) {
- // crbug.com/tint/804
- const auto assembly = Preamble() + CommonTypes() + R"(
+ // crbug.com/tint/804
+ const auto assembly = Preamble() + CommonTypes() + R"(
%float_42 = OpConstant %float 42.0
%cond = OpUndef %bool
@@ -329,13 +326,13 @@ TEST_F(SpvParserTest, ValueFromBlockNotInBlockOrder) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr("let x_81 : f32 = (0.0 * 42.0);"));
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr("let x_81 : f32 = (0.0f * 42.0f);"));
}
// TODO(dneto): OpSizeof : requires Kernel (OpenCL)
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/function_var_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/function_var_test.cc
index c6c1aa515ea..63712345884 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/function_var_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/function_var_test.cc
@@ -26,16 +26,16 @@ using ::testing::HasSubstr;
/// @returns a SPIR-V assembly segment which assigns debug names
/// to particular IDs.
std::string Names(std::vector<std::string> ids) {
- std::ostringstream outs;
- for (auto& id : ids) {
- outs << " OpName %" << id << " \"" << id << "\"\n";
- }
- return outs.str();
+ std::ostringstream outs;
+ for (auto& id : ids) {
+ outs << " OpName %" << id << " \"" << id << "\"\n";
+ }
+ return outs.str();
}
std::string CommonTypes() {
- return
- R"(
+ return
+ R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -80,7 +80,7 @@ std::string CommonTypes() {
// a vertex shader entry point declaration, and name declarations
// for specified IDs.
std::string Caps(std::vector<std::string> ids = {}) {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -91,17 +91,17 @@ std::string Caps(std::vector<std::string> ids = {}) {
// Returns the SPIR-V assembly for a vertex shader, optionally
// with OpName decorations for certain SPIR-V IDs
std::string PreambleNames(std::vector<std::string> ids) {
- return Caps(ids) + CommonTypes();
+ return Caps(ids) + CommonTypes();
}
std::string Preamble() {
- return PreambleNames({});
+ return PreambleNames({});
}
using SpvParserFunctionVarTest = SpvParserTest;
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_AnonymousVars) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%1 = OpVariable %ptr_uint Function
@@ -110,20 +110,19 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_AnonymousVars) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(var x_1 : u32;
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var x_1 : u32;
var x_2 : u32;
var x_3 : u32;
)"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_NamedVars) {
- auto p = parser(test::Assemble(PreambleNames({"a", "b", "c"}) + R"(
+ auto p = parser(test::Assemble(PreambleNames({"a", "b", "c"}) + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%a = OpVariable %ptr_uint Function
@@ -132,19 +131,19 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_NamedVars) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : u32;
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : u32;
var b : u32;
var c : u32;
)"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_MixedTypes) {
- auto p = parser(test::Assemble(PreambleNames({"a", "b", "c"}) + R"(
+ auto p = parser(test::Assemble(PreambleNames({"a", "b", "c"}) + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%a = OpVariable %ptr_uint Function
@@ -153,19 +152,19 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_MixedTypes) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : u32;
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : u32;
var b : i32;
var c : f32;
)"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ScalarInitializers) {
- auto p = parser(test::Assemble(PreambleNames({"a", "b", "c", "d", "e"}) + R"(
+ auto p = parser(test::Assemble(PreambleNames({"a", "b", "c", "d", "e"}) + R"(
%100 = OpFunction %void None %voidfn
%entry = OpLabel
%a = OpVariable %ptr_bool Function %true
@@ -176,22 +175,21 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ScalarInitializers) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(var a : bool = true;
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : bool = true;
var b : bool = false;
-var c : i32 = -1;
+var c : i32 = -1i;
var d : u32 = 1u;
-var e : f32 = 1.5;
+var e : f32 = 1.5f;
)"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ScalarNullInitializers) {
- auto p = parser(test::Assemble(PreambleNames({"a", "b", "c", "d"}) + R"(
+ auto p = parser(test::Assemble(PreambleNames({"a", "b", "c", "d"}) + R"(
%null_bool = OpConstantNull %bool
%null_int = OpConstantNull %int
%null_uint = OpConstantNull %uint
@@ -206,21 +204,20 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ScalarNullInitializers) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr(R"(var a : bool = false;
-var b : i32 = 0;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body), HasSubstr(R"(var a : bool = false;
+var b : i32 = 0i;
var c : u32 = 0u;
-var d : f32 = 0.0;
+var d : f32 = 0.0f;
)"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_VectorInitializer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %v2float
%two = OpConstant %float 2.0
%const = OpConstantComposite %v2float %float_1p5 %two
@@ -231,17 +228,17 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_VectorInitializer) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : vec2<f32> = vec2<f32>(1.5, 2.0);"));
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : vec2<f32> = vec2<f32>(1.5f, 2.0f);"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_MatrixInitializer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %m3v2float
%two = OpConstant %float 2.0
%three = OpConstant %float 3.0
@@ -257,20 +254,20 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_MatrixInitializer) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : mat3x2<f32> = mat3x2<f32>("
- "vec2<f32>(1.5, 2.0), "
- "vec2<f32>(2.0, 3.0), "
- "vec2<f32>(3.0, 4.0));"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : mat3x2<f32> = mat3x2<f32>("
+ "vec2<f32>(1.5f, 2.0f), "
+ "vec2<f32>(2.0f, 3.0f), "
+ "vec2<f32>(3.0f, 4.0f));"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %arr2uint
%two = OpConstant %uint 2
%const = OpConstantComposite %arr2uint %uint_1 %two
@@ -281,18 +278,17 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- EXPECT_THAT(
- test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : array<u32, 2u> = array<u32, 2u>(1u, 2u);"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : array<u32, 2u> = array<u32, 2u>(1u, 2u);"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer_Alias) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -309,18 +305,18 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer_Alias) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- const char* expect = "var x_200 : Arr = Arr(1u, 2u);\n";
- EXPECT_EQ(expect, got);
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ const char* expect = "var x_200 : Arr = Arr(1u, 2u);\n";
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer_Null) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %arr2uint
%two = OpConstant %uint 2
%const = OpConstantNull %arr2uint
@@ -331,18 +327,17 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer_Null) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : array<u32, 2u> = array<u32, 2u>();"));
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : array<u32, 2u> = array<u32, 2u>();"));
}
-TEST_F(SpvParserFunctionVarTest,
- EmitFunctionVariables_ArrayInitializer_Alias_Null) {
- auto p = parser(test::Assemble(R"(
+TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_ArrayInitializer_Alias_Null) {
+ auto p = parser(test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -359,17 +354,17 @@ TEST_F(SpvParserFunctionVarTest,
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : Arr = @stride(16) array<u32, 2u>();"));
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : Arr = @stride(16) array<u32, 2u>();"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_StructInitializer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %strct
%two = OpConstant %uint 2
%arrconst = OpConstantComposite %arr2uint %uint_1 %two
@@ -381,17 +376,17 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_StructInitializer) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : S = S(1u, 1.5, array<u32, 2u>(1u, 2u));"));
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : S = S(1u, 1.5f, array<u32, 2u>(1u, 2u));"));
}
TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_StructInitializer_Null) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%ptr = OpTypePointer Function %strct
%two = OpConstant %uint 2
%arrconst = OpConstantComposite %arr2uint %uint_1 %two
@@ -403,19 +398,18 @@ TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_StructInitializer_Null) {
OpReturn
OpFunctionEnd
)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- EXPECT_THAT(test::ToString(p->program(), ast_body),
- HasSubstr("var x_200 : S = S(0u, 0.0, array<u32, 2u>());"));
+ auto ast_body = fe.ast_body();
+ EXPECT_THAT(test::ToString(p->program(), ast_body),
+ HasSubstr("var x_200 : S = S(0u, 0.0f, array<u32, 2u>());"));
}
-TEST_F(SpvParserFunctionVarTest,
- EmitFunctionVariables_Decorate_RelaxedPrecision) {
- // RelaxedPrecisionis dropped
- const auto assembly = Caps({"myvar"}) + R"(
+TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_Decorate_RelaxedPrecision) {
+ // RelaxedPrecisionis dropped
+ const auto assembly = Caps({"myvar"}) + R"(
OpDecorate %myvar RelaxedPrecision
%float = OpTypeFloat 32
@@ -430,20 +424,19 @@ TEST_F(SpvParserFunctionVarTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_EQ(got, "var myvar : f32;\n") << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_EQ(got, "var myvar : f32;\n") << got;
}
-TEST_F(SpvParserFunctionVarTest,
- EmitFunctionVariables_MemberDecorate_RelaxedPrecision) {
- // RelaxedPrecisionis dropped
- const auto assembly = Caps({"myvar", "strct"}) + R"(
+TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_MemberDecorate_RelaxedPrecision) {
+ // RelaxedPrecisionis dropped
+ const auto assembly = Caps({"myvar", "strct"}) + R"(
OpMemberDecorate %strct 0 RelaxedPrecision
%float = OpTypeFloat 32
@@ -459,20 +452,19 @@ TEST_F(SpvParserFunctionVarTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error() << std::endl;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
-
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_EQ(got, "var myvar : strct;\n") << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
+ << assembly << p->error() << std::endl;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
+
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_EQ(got, "var myvar : strct;\n") << got;
}
-TEST_F(SpvParserFunctionVarTest,
- EmitFunctionVariables_StructDifferOnlyInMemberName) {
- auto p = parser(test::Assemble(R"(
+TEST_F(SpvParserFunctionVarTest, EmitFunctionVariables_StructDifferOnlyInMemberName) {
+ auto p = parser(test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %100 "main"
@@ -496,20 +488,19 @@ TEST_F(SpvParserFunctionVarTest,
%41 = OpVariable %_ptr_Function__struct_6 Function
OpReturn
OpFunctionEnd)"));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitFunctionVariables());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitFunctionVariables());
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_THAT(got, HasSubstr(R"(var x_40 : S;
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_THAT(got, HasSubstr(R"(var x_40 : S;
var x_41 : S_1;
)"));
}
-TEST_F(SpvParserFunctionVarTest,
- EmitStatement_CombinatorialValue_Defer_UsedOnceSameConstruct) {
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest, EmitStatement_CombinatorialValue_Defer_UsedOnceSameConstruct) {
+ auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -524,25 +515,24 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect =
- R"(var x_25 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect =
+ R"(var x_25 : u32;
x_25 = 1u;
x_25 = (1u + 1u);
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(SpvParserFunctionVarTest,
- EmitStatement_CombinatorialValue_Immediate_UsedTwice) {
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest, EmitStatement_CombinatorialValue_Immediate_UsedTwice) {
+ auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -558,29 +548,29 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var x_25 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var x_25 : u32;
let x_2 : u32 = (1u + 1u);
x_25 = 1u;
x_25 = x_2;
x_25 = x_2;
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest,
EmitStatement_CombinatorialValue_Immediate_UsedOnceDifferentConstruct) {
- // Translation should not sink expensive operations into or out of control
- // flow. As a simple heuristic, don't move *any* combinatorial operation
- // across any control flow.
- auto assembly = Preamble() + R"(
+ // Translation should not sink expensive operations into or out of control
+ // flow. As a simple heuristic, don't move *any* combinatorial operation
+ // across any control flow.
+ auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -603,14 +593,14 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var x_25 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var x_25 : u32;
let x_2 : u32 = (1u + 1u);
x_25 = 1u;
loop {
@@ -622,18 +612,17 @@ loop {
x_25 = 2u;
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(
- SpvParserFunctionVarTest,
- EmitStatement_CombinatorialNonPointer_DefConstruct_DoesNotEncloseAllUses) {
- // Compensate for the difference between dominance and scoping.
- // Exercise hoisting of the constant definition to before its natural
- // location.
- //
- // The definition of %2 should be hoisted
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest,
+ EmitStatement_CombinatorialNonPointer_DefConstruct_DoesNotEncloseAllUses) {
+ // Compensate for the difference between dominance and scoping.
+ // Exercise hoisting of the constant definition to before its natural
+ // location.
+ //
+ // The definition of %2 should be hoisted
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
@@ -676,14 +665,14 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(x_1 = 0u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(x_1 = 0u;
loop {
var x_2 : u32;
x_1 = 1u;
@@ -708,21 +697,20 @@ loop {
x_1 = 5u;
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(
- SpvParserFunctionVarTest,
- EmitStatement_CombinatorialNonPointer_Hoisting_DefFirstBlockIf_InFunction) {
- // This is a hoisting case, where the definition is in the first block
- // of an if selection construct. In this case the definition should count
- // as being in the parent (enclosing) construct.
- //
- // The definition of %1 is in an IfSelection construct and also the enclosing
- // Function construct, both of which start at block %10. For the purpose of
- // determining the construct containing %10, go to the parent construct of
- // the IfSelection.
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest,
+ EmitStatement_CombinatorialNonPointer_Hoisting_DefFirstBlockIf_InFunction) {
+ // This is a hoisting case, where the definition is in the first block
+ // of an if selection construct. In this case the definition should count
+ // as being in the parent (enclosing) construct.
+ //
+ // The definition of %1 is in an IfSelection construct and also the enclosing
+ // Function construct, both of which start at block %10. For the purpose of
+ // determining the construct containing %10, go to the parent construct of
+ // the IfSelection.
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%200 = OpVariable %pty Private
%cond = OpConstantTrue %bool
@@ -745,36 +733,36 @@ TEST_F(
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- // We don't hoist x_1 into its own mutable variable. It is emitted as
- // a const definition.
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(let x_1 : u32 = 1u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ // We don't hoist x_1 into its own mutable variable. It is emitted as
+ // a const definition.
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(let x_1 : u32 = 1u;
if (true) {
}
let x_3 : u32 = x_1;
x_200 = x_3;
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest,
EmitStatement_CombinatorialNonPointer_Hoisting_DefFirstBlockIf_InIf) {
- // This is like the previous case, but the IfSelection is nested inside
- // another IfSelection.
- // This tests that the hoisting algorithm goes to only one parent of
- // the definining if-selection block, and doesn't jump all the way out
- // to the Function construct that encloses everything.
- //
- // We should not hoist %1 because its definition should count as being
- // in the outer IfSelection, not the inner IfSelection.
- auto assembly = Preamble() + R"(
+ // This is like the previous case, but the IfSelection is nested inside
+ // another IfSelection.
+ // This tests that the hoisting algorithm goes to only one parent of
+ // the definining if-selection block, and doesn't jump all the way out
+ // to the Function construct that encloses everything.
+ //
+ // We should not hoist %1 because its definition should count as being
+ // in the outer IfSelection, not the inner IfSelection.
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%200 = OpVariable %pty Private
@@ -807,14 +795,14 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (true) {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (true) {
let x_1 : u32 = 1u;
if (true) {
}
@@ -823,17 +811,16 @@ TEST_F(SpvParserFunctionVarTest,
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(
- SpvParserFunctionVarTest,
- EmitStatement_CombinatorialNonPointer_Hoisting_DefFirstBlockSwitch_InIf) {
- // This is like the previous case, but the definition is in a SwitchSelection
- // inside another IfSelection.
- // Tests that definitions in the first block of a switch count as being
- // in the parent of the switch construct.
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest,
+ EmitStatement_CombinatorialNonPointer_Hoisting_DefFirstBlockSwitch_InIf) {
+ // This is like the previous case, but the definition is in a SwitchSelection
+ // inside another IfSelection.
+ // Tests that definitions in the first block of a switch count as being
+ // in the parent of the switch construct.
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%200 = OpVariable %pty Private
%cond = OpConstantTrue %bool
@@ -864,14 +851,14 @@ TEST_F(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(if (true) {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(if (true) {
let x_1 : u32 = 1u;
switch(1u) {
case 0u: {
@@ -884,20 +871,20 @@ TEST_F(
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest,
EmitStatement_CombinatorialNonPointer_Hoisting_DefAndUseFirstBlockIf) {
- // In this test, both the defintion and the use are in the first block
- // of an IfSelection. No hoisting occurs because hoisting is triggered
- // on whether the defining construct contains the last use, rather than
- // whether the two constructs are the same.
- //
- // This example has two SSA IDs which are tempting to hoist but should not:
- // %1 is defined and used in the first block of an IfSelection.
- // Do not hoist it.
- auto assembly = Preamble() + R"(
+ // In this test, both the defintion and the use are in the first block
+ // of an IfSelection. No hoisting occurs because hoisting is triggered
+ // on whether the defining construct contains the last use, rather than
+ // whether the two constructs are the same.
+ //
+ // This example has two SSA IDs which are tempting to hoist but should not:
+ // %1 is defined and used in the first block of an IfSelection.
+ // Do not hoist it.
+ auto assembly = Preamble() + R"(
%cond = OpConstantTrue %bool
%100 = OpFunction %void None %voidfn
@@ -917,26 +904,26 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- // We don't hoist x_1 into its own mutable variable. It is emitted as
- // a const definition.
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(let x_1 : u32 = 1u;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ // We don't hoist x_1 into its own mutable variable. It is emitted as
+ // a const definition.
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(let x_1 : u32 = 1u;
let x_2 : u32 = x_1;
if (true) {
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_SingleBlockLoopIndex) {
- auto assembly = Preamble() + R"(
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
%boolpty = OpTypePointer Private %bool
@@ -974,14 +961,14 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_SingleBlockLoopIndex) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var x_2_phi : u32;
var x_3_phi : u32;
let x_101 : bool = x_7;
@@ -1003,11 +990,11 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_SingleBlockLoopIndex) {
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_MultiBlockLoopIndex) {
- auto assembly = Preamble() + R"(
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
%boolpty = OpTypePointer Private %bool
@@ -1048,14 +1035,14 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_MultiBlockLoopIndex) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(loop {
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(loop {
var x_2_phi : u32;
var x_3_phi : u32;
let x_101 : bool = x_7;
@@ -1082,12 +1069,11 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_MultiBlockLoopIndex) {
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(SpvParserFunctionVarTest,
- EmitStatement_Phi_ValueFromLoopBodyAndContinuing) {
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_ValueFromLoopBodyAndContinuing) {
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
%boolpty = OpTypePointer Private %bool
@@ -1128,15 +1114,14 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions())
- << assembly << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(let x_101 : bool = x_17;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(let x_101 : bool = x_17;
loop {
var x_2_phi : u32;
var x_5_phi : u32;
@@ -1161,11 +1146,11 @@ loop {
}
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_FromElseAndThen) {
- auto assembly = Preamble() + R"(
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
%boolpty = OpTypePointer Private %bool
@@ -1208,14 +1193,14 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_FromElseAndThen) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(let x_101 : bool = x_7;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(let x_101 : bool = x_7;
let x_102 : bool = x_8;
loop {
var x_2_phi : u32;
@@ -1238,11 +1223,11 @@ loop {
}
return;
)";
- EXPECT_EQ(expect, got) << got;
+ EXPECT_EQ(expect, got) << got;
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_FromHeaderAndThen) {
- auto assembly = Preamble() + R"(
+ auto assembly = Preamble() + R"(
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
%boolpty = OpTypePointer Private %bool
@@ -1282,14 +1267,14 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_FromHeaderAndThen) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(let x_101 : bool = x_7;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(let x_101 : bool = x_7;
let x_102 : bool = x_8;
loop {
var x_2_phi : u32;
@@ -1312,13 +1297,12 @@ loop {
}
return;
)";
- EXPECT_EQ(expect, got) << got;
+ EXPECT_EQ(expect, got) << got;
}
-TEST_F(SpvParserFunctionVarTest,
- EmitStatement_Phi_InMerge_PredecessorsDominatdByNestedSwitchCase) {
- // This is the essence of the bug report from crbug.com/tint/495
- auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_InMerge_PredecessorsDominatdByNestedSwitchCase) {
+ // This is the essence of the bug report from crbug.com/tint/495
+ auto assembly = Preamble() + R"(
%cond = OpConstantTrue %bool
%pty = OpTypePointer Private %uint
%1 = OpVariable %pty Private
@@ -1355,14 +1339,14 @@ TEST_F(SpvParserFunctionVarTest,
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var x_41_phi : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var x_41_phi : u32;
switch(1u) {
default: {
fallthrough;
@@ -1382,20 +1366,20 @@ switch(1u) {
let x_41 : u32 = x_41_phi;
return;
)";
- EXPECT_EQ(expect, got) << got << assembly;
+ EXPECT_EQ(expect, got) << got << assembly;
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_UseInPhiCountsAsUse) {
- // From crbug.com/215
- // If the only use of a combinatorially computed ID is as the value
- // in an OpPhi, then we still have to emit it. The algorithm fix
- // is to always count uses in Phis.
- // This is the reduced case from the bug report.
- //
- // The only use of %12 is in the phi.
- // The only use of %11 is in %12.
- // Both definintions need to be emitted to the output.
- auto assembly = Preamble() + R"(
+ // From crbug.com/215
+ // If the only use of a combinatorially computed ID is as the value
+ // in an OpPhi, then we still have to emit it. The algorithm fix
+ // is to always count uses in Phis.
+ // This is the reduced case from the bug report.
+ //
+ // The only use of %12 is in the phi.
+ // The only use of %11 is in %12.
+ // Both definintions need to be emitted to the output.
+ auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1414,14 +1398,14 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_UseInPhiCountsAsUse) {
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var x_101_phi : bool;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var x_101_phi : bool;
let x_11 : bool = (true & true);
let x_12 : bool = !(x_11);
x_101_phi = x_11;
@@ -1431,13 +1415,12 @@ if (true) {
let x_101 : bool = x_101_phi;
return;
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(SpvParserFunctionVarTest,
- EmitStatement_Phi_ValueFromBlockNotInBlockOrderIgnored) {
- // From crbug.com/tint/804
- const auto assembly = Preamble() + R"(
+TEST_F(SpvParserFunctionVarTest, EmitStatement_Phi_ValueFromBlockNotInBlockOrderIgnored) {
+ // From crbug.com/tint/804
+ const auto assembly = Preamble() + R"(
%float_42 = OpConstant %float 42.0
%cond = OpUndef %bool
@@ -1471,12 +1454,12 @@ TEST_F(SpvParserFunctionVarTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- const auto* expected = R"(loop {
+ const auto* expected = R"(loop {
if (false) {
break;
}
@@ -1489,14 +1472,14 @@ TEST_F(SpvParserFunctionVarTest,
}
return;
)";
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_EQ(got, expected);
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_EQ(got, expected);
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_CompositeInsert) {
- // From crbug.com/tint/804
- const auto assembly = Preamble() + R"(
+ // From crbug.com/tint/804
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1515,29 +1498,29 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_CompositeInsert) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
- const auto* expected = R"(var x_200 : vec2<i32>;
+ const auto* expected = R"(var x_200 : vec2<i32>;
if (true) {
x_200 = vec2<i32>();
- x_200.x = 0;
+ x_200.x = 0i;
} else {
return;
}
let x_201 : vec2<i32> = x_200;
return;
)";
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- EXPECT_EQ(got, expected);
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ EXPECT_EQ(got, expected);
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_VectorInsertDynamic) {
- // Spawned from crbug.com/tint/804
- const auto assembly = Preamble() + R"(
+ // Spawned from crbug.com/tint/804
+ const auto assembly = Preamble() + R"(
%100 = OpFunction %void None %voidfn
%10 = OpLabel
@@ -1556,29 +1539,29 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_VectorInsertDynamic) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const auto* expected = R"(var x_200 : vec2<i32>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(var x_200 : vec2<i32>;
if (true) {
x_200 = vec2<i32>();
- x_200[1] = 3;
+ x_200[1i] = 3i;
} else {
return;
}
let x_201 : vec2<i32> = x_200;
return;
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_UsedAsNonPtrArg) {
- // Spawned from crbug.com/tint/804
- const auto assembly = Preamble() + R"(
+ // Spawned from crbug.com/tint/804
+ const auto assembly = Preamble() + R"(
%fn_int = OpTypeFunction %void %int
%500 = OpFunction %void None %fn_int
@@ -1605,29 +1588,29 @@ TEST_F(SpvParserFunctionVarTest, EmitStatement_Hoist_UsedAsNonPtrArg) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const auto* expected = R"(var x_200 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(var x_200 : i32;
if (true) {
- x_200 = 1;
+ x_200 = 1i;
} else {
return;
}
x_500(x_200);
return;
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvParserFunctionVarTest, DISABLED_EmitStatement_Hoist_UsedAsPtrArg) {
- // Spawned from crbug.com/tint/804
- // Blocked by crbug.com/tint/98: hoisting pointer types
- const auto assembly = Preamble() + R"(
+ // Spawned from crbug.com/tint/804
+ // Blocked by crbug.com/tint/98: hoisting pointer types
+ const auto assembly = Preamble() + R"(
%fn_int = OpTypeFunction %void %ptr_int
@@ -1656,15 +1639,15 @@ TEST_F(SpvParserFunctionVarTest, DISABLED_EmitStatement_Hoist_UsedAsPtrArg) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
-
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- const auto* expected = R"(xxxxxxxxxxxxxxxxxxxxx)";
- EXPECT_EQ(got, expected) << got;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ const auto* expected = R"(xxxxxxxxxxxxxxxxxxxxx)";
+ EXPECT_EQ(got, expected) << got;
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/namer.cc b/chromium/third_party/dawn/src/tint/reader/spirv/namer.cc
index 6e7044f0f2d..12331570290 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/namer.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/namer.cc
@@ -58,178 +58,173 @@ const char* kWGSLReservedWords[] = {
} // namespace
Namer::Namer(const FailStream& fail_stream) : fail_stream_(fail_stream) {
- for (const auto* reserved : kWGSLReservedWords) {
- name_to_id_[std::string(reserved)] = 0;
- }
+ for (const auto* reserved : kWGSLReservedWords) {
+ name_to_id_[std::string(reserved)] = 0;
+ }
}
Namer::~Namer() = default;
std::string Namer::Sanitize(const std::string& suggested_name) {
- if (suggested_name.empty()) {
- return "empty";
- }
- // Otherwise, replace invalid characters by '_'.
- std::string result;
- std::string invalid_as_first_char = "_0123456789";
- std::string valid =
- "abcdefghijklmnopqrstuvwxyz"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "_0123456789";
- // If the first character is invalid for starting a WGSL identifier, then
- // prefix the result with "x".
- if ((std::string::npos != invalid_as_first_char.find(suggested_name[0])) ||
- (std::string::npos == valid.find(suggested_name[0]))) {
- result = "x";
- }
- std::transform(suggested_name.begin(), suggested_name.end(),
- std::back_inserter(result), [&valid](const char c) {
- return (std::string::npos == valid.find(c)) ? '_' : c;
- });
- return result;
+ if (suggested_name.empty()) {
+ return "empty";
+ }
+ // Otherwise, replace invalid characters by '_'.
+ std::string result;
+ std::string invalid_as_first_char = "_0123456789";
+ std::string valid =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "_0123456789";
+ // If the first character is invalid for starting a WGSL identifier, then
+ // prefix the result with "x".
+ if ((std::string::npos != invalid_as_first_char.find(suggested_name[0])) ||
+ (std::string::npos == valid.find(suggested_name[0]))) {
+ result = "x";
+ }
+ std::transform(
+ suggested_name.begin(), suggested_name.end(), std::back_inserter(result),
+ [&valid](const char c) { return (std::string::npos == valid.find(c)) ? '_' : c; });
+ return result;
}
-std::string Namer::GetMemberName(uint32_t struct_id,
- uint32_t member_index) const {
- std::string result;
- auto where = struct_member_names_.find(struct_id);
- if (where != struct_member_names_.end()) {
- auto& member_names = where->second;
- if (member_index < member_names.size()) {
- result = member_names[member_index];
+std::string Namer::GetMemberName(uint32_t struct_id, uint32_t member_index) const {
+ std::string result;
+ auto where = struct_member_names_.find(struct_id);
+ if (where != struct_member_names_.end()) {
+ auto& member_names = where->second;
+ if (member_index < member_names.size()) {
+ result = member_names[member_index];
+ }
}
- }
- return result;
+ return result;
}
std::string Namer::FindUnusedDerivedName(const std::string& base_name) {
- // Ensure uniqueness among names.
- std::string derived_name;
- uint32_t& i = next_unusued_derived_name_id_[base_name];
- while (i != 0xffffffff) {
- std::stringstream new_name_stream;
- new_name_stream << base_name;
- if (i > 0) {
- new_name_stream << "_" << i;
- }
- derived_name = new_name_stream.str();
- if (!IsRegistered(derived_name)) {
- return derived_name;
+ // Ensure uniqueness among names.
+ std::string derived_name;
+ uint32_t& i = next_unusued_derived_name_id_[base_name];
+ while (i != 0xffffffff) {
+ std::stringstream new_name_stream;
+ new_name_stream << base_name;
+ if (i > 0) {
+ new_name_stream << "_" << i;
+ }
+ derived_name = new_name_stream.str();
+ if (!IsRegistered(derived_name)) {
+ return derived_name;
+ }
+ i++;
}
- i++;
- }
- TINT_ASSERT(Reader, false /* FindUnusedDerivedName() overflowed u32 */);
- return "<u32 overflow>";
+ TINT_ASSERT(Reader, false /* FindUnusedDerivedName() overflowed u32 */);
+ return "<u32 overflow>";
}
std::string Namer::MakeDerivedName(const std::string& base_name) {
- auto result = FindUnusedDerivedName(base_name);
- const bool registered = RegisterWithoutId(result);
- TINT_ASSERT(Reader, registered);
- return result;
+ auto result = FindUnusedDerivedName(base_name);
+ const bool registered = RegisterWithoutId(result);
+ TINT_ASSERT(Reader, registered);
+ return result;
}
bool Namer::Register(uint32_t id, const std::string& name) {
- if (HasName(id)) {
- return Fail() << "internal error: ID " << id
- << " already has registered name: " << id_to_name_[id];
- }
- if (!RegisterWithoutId(name)) {
- return false;
- }
- id_to_name_[id] = name;
- name_to_id_[name] = id;
- return true;
+ if (HasName(id)) {
+ return Fail() << "internal error: ID " << id
+ << " already has registered name: " << id_to_name_[id];
+ }
+ if (!RegisterWithoutId(name)) {
+ return false;
+ }
+ id_to_name_[id] = name;
+ name_to_id_[name] = id;
+ return true;
}
bool Namer::RegisterWithoutId(const std::string& name) {
- if (IsRegistered(name)) {
- return Fail() << "internal error: name already registered: " << name;
- }
- name_to_id_[name] = 0;
- return true;
+ if (IsRegistered(name)) {
+ return Fail() << "internal error: name already registered: " << name;
+ }
+ name_to_id_[name] = 0;
+ return true;
}
-bool Namer::SuggestSanitizedName(uint32_t id,
- const std::string& suggested_name) {
- if (HasName(id)) {
- return false;
- }
+bool Namer::SuggestSanitizedName(uint32_t id, const std::string& suggested_name) {
+ if (HasName(id)) {
+ return false;
+ }
- return Register(id, FindUnusedDerivedName(Sanitize(suggested_name)));
+ return Register(id, FindUnusedDerivedName(Sanitize(suggested_name)));
}
bool Namer::SuggestSanitizedMemberName(uint32_t struct_id,
uint32_t member_index,
const std::string& suggested_name) {
- // Creates an empty vector the first time we visit this struct.
- auto& name_vector = struct_member_names_[struct_id];
- // Resizing will set new entries to the empty string.
- name_vector.resize(std::max(name_vector.size(), size_t(member_index + 1)));
- auto& entry = name_vector[member_index];
- if (entry.empty()) {
- entry = Sanitize(suggested_name);
- return true;
- }
- return false;
+ // Creates an empty vector the first time we visit this struct.
+ auto& name_vector = struct_member_names_[struct_id];
+ // Resizing will set new entries to the empty string.
+ name_vector.resize(std::max(name_vector.size(), size_t(member_index + 1)));
+ auto& entry = name_vector[member_index];
+ if (entry.empty()) {
+ entry = Sanitize(suggested_name);
+ return true;
+ }
+ return false;
}
-void Namer::ResolveMemberNamesForStruct(uint32_t struct_id,
- uint32_t num_members) {
- auto& name_vector = struct_member_names_[struct_id];
- // Resizing will set new entries to the empty string.
- // It would have been an error if the client had registered a name for
- // an out-of-bounds member index, so toss those away.
- name_vector.resize(num_members);
-
- std::unordered_set<std::string> used_names;
-
- // Returns a name, based on the suggestion, which does not equal
- // any name in the used_names set.
- auto disambiguate_name =
- [&used_names](const std::string& suggestion) -> std::string {
- if (used_names.find(suggestion) == used_names.end()) {
- // There is no collision.
- return suggestion;
+void Namer::ResolveMemberNamesForStruct(uint32_t struct_id, uint32_t num_members) {
+ auto& name_vector = struct_member_names_[struct_id];
+ // Resizing will set new entries to the empty string.
+ // It would have been an error if the client had registered a name for
+ // an out-of-bounds member index, so toss those away.
+ name_vector.resize(num_members);
+
+ std::unordered_set<std::string> used_names;
+
+ // Returns a name, based on the suggestion, which does not equal
+ // any name in the used_names set.
+ auto disambiguate_name = [&used_names](const std::string& suggestion) -> std::string {
+ if (used_names.find(suggestion) == used_names.end()) {
+ // There is no collision.
+ return suggestion;
+ }
+
+ uint32_t i = 1;
+ std::string new_name;
+ do {
+ std::stringstream new_name_stream;
+ new_name_stream << suggestion << "_" << i;
+ new_name = new_name_stream.str();
+ ++i;
+ } while (used_names.find(new_name) != used_names.end());
+ return new_name;
+ };
+
+ // First ensure uniqueness among names for which we have already taken
+ // suggestions.
+ for (auto& name : name_vector) {
+ if (!name.empty()) {
+ // This modifies the names in-place, i.e. update the name_vector
+ // entries.
+ name = disambiguate_name(name);
+ used_names.insert(name);
+ }
}
- uint32_t i = 1;
- std::string new_name;
- do {
- std::stringstream new_name_stream;
- new_name_stream << suggestion << "_" << i;
- new_name = new_name_stream.str();
- ++i;
- } while (used_names.find(new_name) != used_names.end());
- return new_name;
- };
-
- // First ensure uniqueness among names for which we have already taken
- // suggestions.
- for (auto& name : name_vector) {
- if (!name.empty()) {
- // This modifies the names in-place, i.e. update the name_vector
- // entries.
- name = disambiguate_name(name);
- used_names.insert(name);
- }
- }
-
- // Now ensure uniqueness among the rest. Doing this in a second pass
- // allows us to preserve suggestions as much as possible. Otherwise
- // a generated name such as 'field1' might collide with a user-suggested
- // name of 'field1' attached to a later member.
- uint32_t index = 0;
- for (auto& name : name_vector) {
- if (name.empty()) {
- std::stringstream suggestion;
- suggestion << "field" << index;
- // Again, modify the name-vector in-place.
- name = disambiguate_name(suggestion.str());
- used_names.insert(name);
+ // Now ensure uniqueness among the rest. Doing this in a second pass
+ // allows us to preserve suggestions as much as possible. Otherwise
+ // a generated name such as 'field1' might collide with a user-suggested
+ // name of 'field1' attached to a later member.
+ uint32_t index = 0;
+ for (auto& name : name_vector) {
+ if (name.empty()) {
+ std::stringstream suggestion;
+ suggestion << "field" << index;
+ // Again, modify the name-vector in-place.
+ name = disambiguate_name(suggestion.str());
+ used_names.insert(name);
+ }
+ index++;
}
- index++;
- }
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/namer.h b/chromium/third_party/dawn/src/tint/reader/spirv/namer.h
index fa5fc3a7e95..7a20e8738b8 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/namer.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/namer.h
@@ -31,129 +31,125 @@ namespace tint::reader::spirv {
/// to a safer character such as an underscore. Also, sanitized names
/// never start with an underscore.
class Namer {
- public:
- /// Creates a new namer
- /// @param fail_stream the error reporting stream
- explicit Namer(const FailStream& fail_stream);
- /// Destructor
- ~Namer();
-
- /// Sanitizes the given string, to replace unusual characters with
- /// obviously-valid idenfier characters. An empy string yields "empty".
- /// A sanitized name never starts with an underscore.
- /// @param suggested_name input string
- /// @returns sanitized name, suitable for use as an identifier
- static std::string Sanitize(const std::string& suggested_name);
-
- /// Registers a failure.
- /// @returns a fail stream to accumulate diagnostics.
- FailStream& Fail() { return fail_stream_.Fail(); }
-
- /// @param id the SPIR-V ID
- /// @returns true if we the given ID already has a registered name.
- bool HasName(uint32_t id) {
- return id_to_name_.find(id) != id_to_name_.end();
- }
-
- /// @param name a string
- /// @returns true if the string has been registered as a name.
- bool IsRegistered(const std::string& name) const {
- return name_to_id_.find(name) != name_to_id_.end();
- }
-
- /// @param id the SPIR-V ID
- /// @returns the name for the ID. It must have been registered.
- const std::string& GetName(uint32_t id) const {
- return id_to_name_.find(id)->second;
- }
-
- /// Gets a unique name for the ID. If one already exists, then return
- /// that, otherwise synthesize a name and remember it for later.
- /// @param id the SPIR-V ID
- /// @returns a name for the given ID. Generates a name if non exists.
- const std::string& Name(uint32_t id) {
- if (!HasName(id)) {
- SuggestSanitizedName(id, "x_" + std::to_string(id));
+ public:
+ /// Creates a new namer
+ /// @param fail_stream the error reporting stream
+ explicit Namer(const FailStream& fail_stream);
+ /// Destructor
+ ~Namer();
+
+ /// Sanitizes the given string, to replace unusual characters with
+ /// obviously-valid idenfier characters. An empy string yields "empty".
+ /// A sanitized name never starts with an underscore.
+ /// @param suggested_name input string
+ /// @returns sanitized name, suitable for use as an identifier
+ static std::string Sanitize(const std::string& suggested_name);
+
+ /// Registers a failure.
+ /// @returns a fail stream to accumulate diagnostics.
+ FailStream& Fail() { return fail_stream_.Fail(); }
+
+ /// @param id the SPIR-V ID
+ /// @returns true if we the given ID already has a registered name.
+ bool HasName(uint32_t id) { return id_to_name_.find(id) != id_to_name_.end(); }
+
+ /// @param name a string
+ /// @returns true if the string has been registered as a name.
+ bool IsRegistered(const std::string& name) const {
+ return name_to_id_.find(name) != name_to_id_.end();
}
- return GetName(id);
- }
-
- /// Gets the registered name for a struct member. If no name has
- /// been registered for this member, then returns the empty string.
- /// member index is in bounds.
- /// @param id the SPIR-V ID of the struct type
- /// @param member_index the index of the member, counting from 0
- /// @returns the registered name for the ID, or an empty string if
- /// nothing has been registered.
- std::string GetMemberName(uint32_t id, uint32_t member_index) const;
-
- /// Returns an unregistered name based on a given base name.
- /// @param base_name the base name
- /// @returns a new name
- std::string FindUnusedDerivedName(const std::string& base_name);
-
- /// Returns a newly registered name based on a given base name.
- /// In the internal table `name_to_id_`, it is mapped to the invalid
- /// SPIR-V ID 0. It does not have an entry in `id_to_name_`.
- /// @param base_name the base name
- /// @returns a new name
- std::string MakeDerivedName(const std::string& base_name);
-
- /// Records a mapping from the given ID to a name. Emits a failure
- /// if the ID already has a registered name.
- /// @param id the SPIR-V ID
- /// @param name the name to map to the ID
- /// @returns true if the ID did not have a previously registered name.
- bool Register(uint32_t id, const std::string& name);
-
- /// Registers a name, but not associated to any ID. Fails and emits
- /// a diagnostic if the name was already registered.
- /// @param name the name to register
- /// @returns true if the name was not already reegistered.
- bool RegisterWithoutId(const std::string& name);
-
- /// Saves a sanitized name for the given ID, if that ID does not yet
- /// have a registered name, and if the sanitized name has not already
- /// been registered to a different ID.
- /// @param id the SPIR-V ID
- /// @param suggested_name the suggested name
- /// @returns true if a name was newly registered for the ID
- bool SuggestSanitizedName(uint32_t id, const std::string& suggested_name);
-
- /// Saves a sanitized name for a member of a struct, if that member
- /// does not yet have a registered name.
- /// @param struct_id the SPIR-V ID for the struct
- /// @param member_index the index of the member inside the struct
- /// @param suggested_name the suggested name
- /// @returns true if a name was newly registered
- bool SuggestSanitizedMemberName(uint32_t struct_id,
- uint32_t member_index,
- const std::string& suggested_name);
-
- /// Ensure there are member names registered for members of the given struct
- /// such that:
- /// - Each member has a non-empty sanitized name.
- /// - No two members in the struct have the same name.
- /// @param struct_id the SPIR-V ID for the struct
- /// @param num_members the number of members in the struct
- void ResolveMemberNamesForStruct(uint32_t struct_id, uint32_t num_members);
-
- private:
- FailStream fail_stream_;
-
- // Maps an ID to its registered name.
- std::unordered_map<uint32_t, std::string> id_to_name_;
- // Maps a name to a SPIR-V ID, or 0 (the case for derived names).
- std::unordered_map<std::string, uint32_t> name_to_id_;
-
- // Maps a struct id and member index to a suggested sanitized name.
- // If entry k in the vector is an empty string, then a suggestion
- // was recorded for a higher-numbered index, but not for index k.
- std::unordered_map<uint32_t, std::vector<std::string>> struct_member_names_;
-
- // Saved search id suffix for a given base name. Used by
- // FindUnusedDerivedName().
- std::unordered_map<std::string, uint32_t> next_unusued_derived_name_id_;
+
+ /// @param id the SPIR-V ID
+ /// @returns the name for the ID. It must have been registered.
+ const std::string& GetName(uint32_t id) const { return id_to_name_.find(id)->second; }
+
+ /// Gets a unique name for the ID. If one already exists, then return
+ /// that, otherwise synthesize a name and remember it for later.
+ /// @param id the SPIR-V ID
+ /// @returns a name for the given ID. Generates a name if non exists.
+ const std::string& Name(uint32_t id) {
+ if (!HasName(id)) {
+ SuggestSanitizedName(id, "x_" + std::to_string(id));
+ }
+ return GetName(id);
+ }
+
+ /// Gets the registered name for a struct member. If no name has
+ /// been registered for this member, then returns the empty string.
+ /// member index is in bounds.
+ /// @param id the SPIR-V ID of the struct type
+ /// @param member_index the index of the member, counting from 0
+ /// @returns the registered name for the ID, or an empty string if
+ /// nothing has been registered.
+ std::string GetMemberName(uint32_t id, uint32_t member_index) const;
+
+ /// Returns an unregistered name based on a given base name.
+ /// @param base_name the base name
+ /// @returns a new name
+ std::string FindUnusedDerivedName(const std::string& base_name);
+
+ /// Returns a newly registered name based on a given base name.
+ /// In the internal table `name_to_id_`, it is mapped to the invalid
+ /// SPIR-V ID 0. It does not have an entry in `id_to_name_`.
+ /// @param base_name the base name
+ /// @returns a new name
+ std::string MakeDerivedName(const std::string& base_name);
+
+ /// Records a mapping from the given ID to a name. Emits a failure
+ /// if the ID already has a registered name.
+ /// @param id the SPIR-V ID
+ /// @param name the name to map to the ID
+ /// @returns true if the ID did not have a previously registered name.
+ bool Register(uint32_t id, const std::string& name);
+
+ /// Registers a name, but not associated to any ID. Fails and emits
+ /// a diagnostic if the name was already registered.
+ /// @param name the name to register
+ /// @returns true if the name was not already reegistered.
+ bool RegisterWithoutId(const std::string& name);
+
+ /// Saves a sanitized name for the given ID, if that ID does not yet
+ /// have a registered name, and if the sanitized name has not already
+ /// been registered to a different ID.
+ /// @param id the SPIR-V ID
+ /// @param suggested_name the suggested name
+ /// @returns true if a name was newly registered for the ID
+ bool SuggestSanitizedName(uint32_t id, const std::string& suggested_name);
+
+ /// Saves a sanitized name for a member of a struct, if that member
+ /// does not yet have a registered name.
+ /// @param struct_id the SPIR-V ID for the struct
+ /// @param member_index the index of the member inside the struct
+ /// @param suggested_name the suggested name
+ /// @returns true if a name was newly registered
+ bool SuggestSanitizedMemberName(uint32_t struct_id,
+ uint32_t member_index,
+ const std::string& suggested_name);
+
+ /// Ensure there are member names registered for members of the given struct
+ /// such that:
+ /// - Each member has a non-empty sanitized name.
+ /// - No two members in the struct have the same name.
+ /// @param struct_id the SPIR-V ID for the struct
+ /// @param num_members the number of members in the struct
+ void ResolveMemberNamesForStruct(uint32_t struct_id, uint32_t num_members);
+
+ private:
+ FailStream fail_stream_;
+
+ // Maps an ID to its registered name.
+ std::unordered_map<uint32_t, std::string> id_to_name_;
+ // Maps a name to a SPIR-V ID, or 0 (the case for derived names).
+ std::unordered_map<std::string, uint32_t> name_to_id_;
+
+ // Maps a struct id and member index to a suggested sanitized name.
+ // If entry k in the vector is an empty string, then a suggestion
+ // was recorded for a higher-numbered index, but not for index k.
+ std::unordered_map<uint32_t, std::vector<std::string>> struct_member_names_;
+
+ // Saved search id suffix for a given base name. Used by
+ // FindUnusedDerivedName().
+ std::unordered_map<std::string, uint32_t> next_unusued_derived_name_id_;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/namer_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/namer_test.cc
index 62cf56d3f4b..a24e03cec76 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/namer_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/namer_test.cc
@@ -22,348 +22,341 @@ namespace {
using ::testing::Eq;
class SpvNamerTest : public testing::Test {
- public:
- SpvNamerTest() : fail_stream_(&success_, &errors_) {}
+ public:
+ SpvNamerTest() : fail_stream_(&success_, &errors_) {}
- /// @returns the accumulated diagnostic strings
- std::string error() { return errors_.str(); }
+ /// @returns the accumulated diagnostic strings
+ std::string error() { return errors_.str(); }
- protected:
- std::stringstream errors_;
- bool success_ = true;
- FailStream fail_stream_;
+ protected:
+ std::stringstream errors_;
+ bool success_ = true;
+ FailStream fail_stream_;
};
TEST_F(SpvNamerTest, SanitizeEmpty) {
- EXPECT_THAT(Namer::Sanitize(""), Eq("empty"));
+ EXPECT_THAT(Namer::Sanitize(""), Eq("empty"));
}
TEST_F(SpvNamerTest, SanitizeLeadingUnderscore) {
- EXPECT_THAT(Namer::Sanitize("_"), Eq("x_"));
+ EXPECT_THAT(Namer::Sanitize("_"), Eq("x_"));
}
TEST_F(SpvNamerTest, SanitizeLeadingDigit) {
- EXPECT_THAT(Namer::Sanitize("7zip"), Eq("x7zip"));
+ EXPECT_THAT(Namer::Sanitize("7zip"), Eq("x7zip"));
}
TEST_F(SpvNamerTest, SanitizeOkChars) {
- EXPECT_THAT(Namer::Sanitize("_abcdef12345"), Eq("x_abcdef12345"));
+ EXPECT_THAT(Namer::Sanitize("_abcdef12345"), Eq("x_abcdef12345"));
}
TEST_F(SpvNamerTest, SanitizeNonIdentifierChars) {
- EXPECT_THAT(Namer::Sanitize("a:1.2'f\n"), "a_1_2_f_");
+ EXPECT_THAT(Namer::Sanitize("a:1.2'f\n"), "a_1_2_f_");
}
TEST_F(SpvNamerTest, NoFailureToStart) {
- Namer namer(fail_stream_);
- EXPECT_TRUE(success_);
- EXPECT_TRUE(error().empty());
+ Namer namer(fail_stream_);
+ EXPECT_TRUE(success_);
+ EXPECT_TRUE(error().empty());
}
TEST_F(SpvNamerTest, FailLogsError) {
- Namer namer(fail_stream_);
- const bool converted_result = namer.Fail() << "st. johns wood";
- EXPECT_FALSE(converted_result);
- EXPECT_EQ(error(), "st. johns wood");
- EXPECT_FALSE(success_);
+ Namer namer(fail_stream_);
+ const bool converted_result = namer.Fail() << "st. johns wood";
+ EXPECT_FALSE(converted_result);
+ EXPECT_EQ(error(), "st. johns wood");
+ EXPECT_FALSE(success_);
}
TEST_F(SpvNamerTest, NoNameRecorded) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- EXPECT_FALSE(namer.HasName(12));
- EXPECT_TRUE(success_);
- EXPECT_TRUE(error().empty());
+ EXPECT_FALSE(namer.HasName(12));
+ EXPECT_TRUE(success_);
+ EXPECT_TRUE(error().empty());
}
TEST_F(SpvNamerTest, FindUnusedDerivedName_NoRecordedName) {
- Namer namer(fail_stream_);
- EXPECT_THAT(namer.FindUnusedDerivedName("eleanor"), Eq("eleanor"));
- // Prove that it wasn't registered when first found.
- EXPECT_THAT(namer.FindUnusedDerivedName("eleanor"), Eq("eleanor"));
+ Namer namer(fail_stream_);
+ EXPECT_THAT(namer.FindUnusedDerivedName("eleanor"), Eq("eleanor"));
+ // Prove that it wasn't registered when first found.
+ EXPECT_THAT(namer.FindUnusedDerivedName("eleanor"), Eq("eleanor"));
}
TEST_F(SpvNamerTest, FindUnusedDerivedName_HasRecordedName) {
- Namer namer(fail_stream_);
- namer.Register(12, "rigby");
- EXPECT_THAT(namer.FindUnusedDerivedName("rigby"), Eq("rigby_1"));
+ Namer namer(fail_stream_);
+ namer.Register(12, "rigby");
+ EXPECT_THAT(namer.FindUnusedDerivedName("rigby"), Eq("rigby_1"));
}
TEST_F(SpvNamerTest, FindUnusedDerivedName_HasMultipleConflicts) {
- Namer namer(fail_stream_);
- namer.Register(12, "rigby");
- namer.Register(13, "rigby_1");
- namer.Register(14, "rigby_3");
- // It picks the first non-conflicting suffix.
- EXPECT_THAT(namer.FindUnusedDerivedName("rigby"), Eq("rigby_2"));
+ Namer namer(fail_stream_);
+ namer.Register(12, "rigby");
+ namer.Register(13, "rigby_1");
+ namer.Register(14, "rigby_3");
+ // It picks the first non-conflicting suffix.
+ EXPECT_THAT(namer.FindUnusedDerivedName("rigby"), Eq("rigby_2"));
}
TEST_F(SpvNamerTest, IsRegistered_NoRecordedName) {
- Namer namer(fail_stream_);
- EXPECT_FALSE(namer.IsRegistered("abbey"));
+ Namer namer(fail_stream_);
+ EXPECT_FALSE(namer.IsRegistered("abbey"));
}
TEST_F(SpvNamerTest, IsRegistered_RegisteredById) {
- Namer namer(fail_stream_);
- namer.Register(1, "abbey");
- EXPECT_TRUE(namer.IsRegistered("abbey"));
+ Namer namer(fail_stream_);
+ namer.Register(1, "abbey");
+ EXPECT_TRUE(namer.IsRegistered("abbey"));
}
TEST_F(SpvNamerTest, IsRegistered_RegisteredByDerivation) {
- Namer namer(fail_stream_);
- const auto got = namer.MakeDerivedName("abbey");
- EXPECT_TRUE(namer.IsRegistered("abbey"));
- EXPECT_EQ(got, "abbey");
+ Namer namer(fail_stream_);
+ const auto got = namer.MakeDerivedName("abbey");
+ EXPECT_TRUE(namer.IsRegistered("abbey"));
+ EXPECT_EQ(got, "abbey");
}
TEST_F(SpvNamerTest, MakeDerivedName_NoRecordedName) {
- Namer namer(fail_stream_);
- EXPECT_THAT(namer.MakeDerivedName("eleanor"), Eq("eleanor"));
- // Prove that it was registered when first found.
- EXPECT_THAT(namer.MakeDerivedName("eleanor"), Eq("eleanor_1"));
+ Namer namer(fail_stream_);
+ EXPECT_THAT(namer.MakeDerivedName("eleanor"), Eq("eleanor"));
+ // Prove that it was registered when first found.
+ EXPECT_THAT(namer.MakeDerivedName("eleanor"), Eq("eleanor_1"));
}
TEST_F(SpvNamerTest, MakeDerivedName_HasRecordedName) {
- Namer namer(fail_stream_);
- namer.Register(12, "rigby");
- EXPECT_THAT(namer.MakeDerivedName("rigby"), Eq("rigby_1"));
+ Namer namer(fail_stream_);
+ namer.Register(12, "rigby");
+ EXPECT_THAT(namer.MakeDerivedName("rigby"), Eq("rigby_1"));
}
TEST_F(SpvNamerTest, MakeDerivedName_HasMultipleConflicts) {
- Namer namer(fail_stream_);
- namer.Register(12, "rigby");
- namer.Register(13, "rigby_1");
- namer.Register(14, "rigby_3");
- // It picks the first non-conflicting suffix.
- EXPECT_THAT(namer.MakeDerivedName("rigby"), Eq("rigby_2"));
+ Namer namer(fail_stream_);
+ namer.Register(12, "rigby");
+ namer.Register(13, "rigby_1");
+ namer.Register(14, "rigby_3");
+ // It picks the first non-conflicting suffix.
+ EXPECT_THAT(namer.MakeDerivedName("rigby"), Eq("rigby_2"));
}
TEST_F(SpvNamerTest, RegisterWithoutId_Once) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- const std::string n("abbey");
- EXPECT_FALSE(namer.IsRegistered(n));
- EXPECT_TRUE(namer.RegisterWithoutId(n));
- EXPECT_TRUE(namer.IsRegistered(n));
- EXPECT_TRUE(success_);
- EXPECT_TRUE(error().empty());
+ const std::string n("abbey");
+ EXPECT_FALSE(namer.IsRegistered(n));
+ EXPECT_TRUE(namer.RegisterWithoutId(n));
+ EXPECT_TRUE(namer.IsRegistered(n));
+ EXPECT_TRUE(success_);
+ EXPECT_TRUE(error().empty());
}
TEST_F(SpvNamerTest, RegisterWithoutId_Twice) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- const std::string n("abbey");
- EXPECT_FALSE(namer.IsRegistered(n));
- EXPECT_TRUE(namer.RegisterWithoutId(n));
- // Fails on second attempt.
- EXPECT_FALSE(namer.RegisterWithoutId(n));
- EXPECT_FALSE(success_);
- EXPECT_EQ(error(), "internal error: name already registered: abbey");
+ const std::string n("abbey");
+ EXPECT_FALSE(namer.IsRegistered(n));
+ EXPECT_TRUE(namer.RegisterWithoutId(n));
+ // Fails on second attempt.
+ EXPECT_FALSE(namer.RegisterWithoutId(n));
+ EXPECT_FALSE(success_);
+ EXPECT_EQ(error(), "internal error: name already registered: abbey");
}
TEST_F(SpvNamerTest, RegisterWithoutId_ConflictsWithIdRegisteredName) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- const std::string n("abbey");
- EXPECT_TRUE(namer.Register(1, n));
- EXPECT_TRUE(namer.IsRegistered(n));
- // Fails on attempt to register without ID.
- EXPECT_FALSE(namer.RegisterWithoutId(n));
- EXPECT_FALSE(success_);
- EXPECT_EQ(error(), "internal error: name already registered: abbey");
+ const std::string n("abbey");
+ EXPECT_TRUE(namer.Register(1, n));
+ EXPECT_TRUE(namer.IsRegistered(n));
+ // Fails on attempt to register without ID.
+ EXPECT_FALSE(namer.RegisterWithoutId(n));
+ EXPECT_FALSE(success_);
+ EXPECT_EQ(error(), "internal error: name already registered: abbey");
}
TEST_F(SpvNamerTest, Register_Once) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- const uint32_t id = 9;
- EXPECT_FALSE(namer.HasName(id));
- const bool save_result = namer.Register(id, "abbey road");
- EXPECT_TRUE(save_result);
- EXPECT_TRUE(namer.HasName(id));
- EXPECT_EQ(namer.GetName(id), "abbey road");
- EXPECT_TRUE(success_);
- EXPECT_TRUE(error().empty());
+ const uint32_t id = 9;
+ EXPECT_FALSE(namer.HasName(id));
+ const bool save_result = namer.Register(id, "abbey road");
+ EXPECT_TRUE(save_result);
+ EXPECT_TRUE(namer.HasName(id));
+ EXPECT_EQ(namer.GetName(id), "abbey road");
+ EXPECT_TRUE(success_);
+ EXPECT_TRUE(error().empty());
}
TEST_F(SpvNamerTest, Register_TwoIds) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- EXPECT_FALSE(namer.HasName(8));
- EXPECT_FALSE(namer.HasName(9));
- EXPECT_TRUE(namer.Register(8, "abbey road"));
- EXPECT_TRUE(namer.Register(9, "rubber soul"));
- EXPECT_TRUE(namer.HasName(8));
- EXPECT_TRUE(namer.HasName(9));
- EXPECT_EQ(namer.GetName(9), "rubber soul");
- EXPECT_EQ(namer.GetName(8), "abbey road");
- EXPECT_TRUE(success_);
- EXPECT_TRUE(error().empty());
+ EXPECT_FALSE(namer.HasName(8));
+ EXPECT_FALSE(namer.HasName(9));
+ EXPECT_TRUE(namer.Register(8, "abbey road"));
+ EXPECT_TRUE(namer.Register(9, "rubber soul"));
+ EXPECT_TRUE(namer.HasName(8));
+ EXPECT_TRUE(namer.HasName(9));
+ EXPECT_EQ(namer.GetName(9), "rubber soul");
+ EXPECT_EQ(namer.GetName(8), "abbey road");
+ EXPECT_TRUE(success_);
+ EXPECT_TRUE(error().empty());
}
TEST_F(SpvNamerTest, Register_FailsDueToIdReuse) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- const uint32_t id = 9;
- EXPECT_TRUE(namer.Register(id, "abbey road"));
- EXPECT_FALSE(namer.Register(id, "rubber soul"));
- EXPECT_TRUE(namer.HasName(id));
- EXPECT_EQ(namer.GetName(id), "abbey road");
- EXPECT_FALSE(success_);
- EXPECT_FALSE(error().empty());
+ const uint32_t id = 9;
+ EXPECT_TRUE(namer.Register(id, "abbey road"));
+ EXPECT_FALSE(namer.Register(id, "rubber soul"));
+ EXPECT_TRUE(namer.HasName(id));
+ EXPECT_EQ(namer.GetName(id), "abbey road");
+ EXPECT_FALSE(success_);
+ EXPECT_FALSE(error().empty());
}
TEST_F(SpvNamerTest, SuggestSanitizedName_TakeSuggestionWhenNoConflict) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedName(1, "father"));
- EXPECT_THAT(namer.GetName(1), Eq("father"));
+ EXPECT_TRUE(namer.SuggestSanitizedName(1, "father"));
+ EXPECT_THAT(namer.GetName(1), Eq("father"));
}
-TEST_F(SpvNamerTest,
- SuggestSanitizedName_RejectSuggestionWhenConflictOnSameId) {
- Namer namer(fail_stream_);
+TEST_F(SpvNamerTest, SuggestSanitizedName_RejectSuggestionWhenConflictOnSameId) {
+ Namer namer(fail_stream_);
- namer.Register(1, "lennon");
- EXPECT_FALSE(namer.SuggestSanitizedName(1, "mccartney"));
- EXPECT_THAT(namer.GetName(1), Eq("lennon"));
+ namer.Register(1, "lennon");
+ EXPECT_FALSE(namer.SuggestSanitizedName(1, "mccartney"));
+ EXPECT_THAT(namer.GetName(1), Eq("lennon"));
}
TEST_F(SpvNamerTest, SuggestSanitizedName_SanitizeSuggestion) {
- Namer namer(fail_stream_);
+ Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedName(9, "m:kenzie"));
- EXPECT_THAT(namer.GetName(9), Eq("m_kenzie"));
+ EXPECT_TRUE(namer.SuggestSanitizedName(9, "m:kenzie"));
+ EXPECT_THAT(namer.GetName(9), Eq("m_kenzie"));
}
-TEST_F(SpvNamerTest,
- SuggestSanitizedName_GenerateNewNameWhenConflictOnDifferentId) {
- Namer namer(fail_stream_);
+TEST_F(SpvNamerTest, SuggestSanitizedName_GenerateNewNameWhenConflictOnDifferentId) {
+ Namer namer(fail_stream_);
- namer.Register(7, "rice");
- EXPECT_TRUE(namer.SuggestSanitizedName(9, "rice"));
- EXPECT_THAT(namer.GetName(9), Eq("rice_1"));
+ namer.Register(7, "rice");
+ EXPECT_TRUE(namer.SuggestSanitizedName(9, "rice"));
+ EXPECT_THAT(namer.GetName(9), Eq("rice_1"));
}
TEST_F(SpvNamerTest, GetMemberName_EmptyStringForUnvisitedStruct) {
- Namer namer(fail_stream_);
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq(""));
+ Namer namer(fail_stream_);
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq(""));
}
TEST_F(SpvNamerTest, GetMemberName_EmptyStringForUnvisitedMember) {
- Namer namer(fail_stream_);
- namer.SuggestSanitizedMemberName(1, 2, "mother");
- EXPECT_THAT(namer.GetMemberName(1, 0), Eq(""));
+ Namer namer(fail_stream_);
+ namer.SuggestSanitizedMemberName(1, 2, "mother");
+ EXPECT_THAT(namer.GetMemberName(1, 0), Eq(""));
}
TEST_F(SpvNamerTest, SuggestSanitizedMemberName_TakeSuggestionWhenNoConflict) {
- Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mother"));
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mother"));
+ Namer namer(fail_stream_);
+ EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mother"));
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mother"));
}
TEST_F(SpvNamerTest, SuggestSanitizedMemberName_TakeSanitizedSuggestion) {
- Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "m:t%er"));
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq("m_t_er"));
+ Namer namer(fail_stream_);
+ EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "m:t%er"));
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq("m_t_er"));
}
TEST_F(
SpvNamerTest,
SuggestSanitizedMemberName_TakeSuggestionWhenNoConflictAfterSuggestionForLowerMember) { // NOLINT
- Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 7, "mother"));
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq(""));
- EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mary"));
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mary"));
+ Namer namer(fail_stream_);
+ EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 7, "mother"));
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq(""));
+ EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mary"));
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mary"));
}
-TEST_F(SpvNamerTest,
- SuggestSanitizedMemberName_RejectSuggestionIfConflictOnMember) {
- Namer namer(fail_stream_);
- EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mother"));
- EXPECT_FALSE(namer.SuggestSanitizedMemberName(1, 2, "mary"));
- EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mother"));
+TEST_F(SpvNamerTest, SuggestSanitizedMemberName_RejectSuggestionIfConflictOnMember) {
+ Namer namer(fail_stream_);
+ EXPECT_TRUE(namer.SuggestSanitizedMemberName(1, 2, "mother"));
+ EXPECT_FALSE(namer.SuggestSanitizedMemberName(1, 2, "mary"));
+ EXPECT_THAT(namer.GetMemberName(1, 2), Eq("mother"));
}
TEST_F(SpvNamerTest, Name_GeneratesNameIfNoneRegistered) {
- Namer namer(fail_stream_);
- EXPECT_THAT(namer.Name(14), Eq("x_14"));
+ Namer namer(fail_stream_);
+ EXPECT_THAT(namer.Name(14), Eq("x_14"));
}
TEST_F(SpvNamerTest, Name_GeneratesNameWithoutConflict) {
- Namer namer(fail_stream_);
- namer.Register(42, "x_14");
- EXPECT_THAT(namer.Name(14), Eq("x_14_1"));
+ Namer namer(fail_stream_);
+ namer.Register(42, "x_14");
+ EXPECT_THAT(namer.Name(14), Eq("x_14_1"));
}
TEST_F(SpvNamerTest, Name_ReturnsRegisteredName) {
- Namer namer(fail_stream_);
- namer.Register(14, "hello");
- EXPECT_THAT(namer.Name(14), Eq("hello"));
+ Namer namer(fail_stream_);
+ namer.Register(14, "hello");
+ EXPECT_THAT(namer.Name(14), Eq("hello"));
}
-TEST_F(SpvNamerTest,
- ResolveMemberNamesForStruct_GeneratesRegularNamesOnItsOwn) {
- Namer namer(fail_stream_);
- namer.ResolveMemberNamesForStruct(2, 4);
- EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
- EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1"));
- EXPECT_THAT(namer.GetMemberName(2, 2), Eq("field2"));
- EXPECT_THAT(namer.GetMemberName(2, 3), Eq("field3"));
+TEST_F(SpvNamerTest, ResolveMemberNamesForStruct_GeneratesRegularNamesOnItsOwn) {
+ Namer namer(fail_stream_);
+ namer.ResolveMemberNamesForStruct(2, 4);
+ EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
+ EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1"));
+ EXPECT_THAT(namer.GetMemberName(2, 2), Eq("field2"));
+ EXPECT_THAT(namer.GetMemberName(2, 3), Eq("field3"));
}
-TEST_F(SpvNamerTest,
- ResolveMemberNamesForStruct_ResolvesConflictBetweenSuggestedNames) {
- Namer namer(fail_stream_);
- namer.SuggestSanitizedMemberName(2, 0, "apple");
- namer.SuggestSanitizedMemberName(2, 1, "apple");
- namer.ResolveMemberNamesForStruct(2, 2);
- EXPECT_THAT(namer.GetMemberName(2, 0), Eq("apple"));
- EXPECT_THAT(namer.GetMemberName(2, 1), Eq("apple_1"));
+TEST_F(SpvNamerTest, ResolveMemberNamesForStruct_ResolvesConflictBetweenSuggestedNames) {
+ Namer namer(fail_stream_);
+ namer.SuggestSanitizedMemberName(2, 0, "apple");
+ namer.SuggestSanitizedMemberName(2, 1, "apple");
+ namer.ResolveMemberNamesForStruct(2, 2);
+ EXPECT_THAT(namer.GetMemberName(2, 0), Eq("apple"));
+ EXPECT_THAT(namer.GetMemberName(2, 1), Eq("apple_1"));
}
TEST_F(SpvNamerTest, ResolveMemberNamesForStruct_FillsUnsuggestedGaps) {
- Namer namer(fail_stream_);
- namer.SuggestSanitizedMemberName(2, 1, "apple");
- namer.SuggestSanitizedMemberName(2, 2, "core");
- namer.ResolveMemberNamesForStruct(2, 4);
- EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
- EXPECT_THAT(namer.GetMemberName(2, 1), Eq("apple"));
- EXPECT_THAT(namer.GetMemberName(2, 2), Eq("core"));
- EXPECT_THAT(namer.GetMemberName(2, 3), Eq("field3"));
-}
-
-TEST_F(SpvNamerTest,
- ResolveMemberNamesForStruct_GeneratedNameAvoidsConflictWithSuggestion) {
- Namer namer(fail_stream_);
- namer.SuggestSanitizedMemberName(2, 0, "field1");
- namer.ResolveMemberNamesForStruct(2, 2);
- EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field1"));
- EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1_1"));
-}
-
-TEST_F(SpvNamerTest,
- ResolveMemberNamesForStruct_TruncatesOutOfBoundsSuggestion) {
- Namer namer(fail_stream_);
- namer.SuggestSanitizedMemberName(2, 3, "sitar");
- EXPECT_THAT(namer.GetMemberName(2, 3), Eq("sitar"));
- namer.ResolveMemberNamesForStruct(2, 2);
- EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
- EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1"));
- EXPECT_THAT(namer.GetMemberName(2, 3), Eq(""));
+ Namer namer(fail_stream_);
+ namer.SuggestSanitizedMemberName(2, 1, "apple");
+ namer.SuggestSanitizedMemberName(2, 2, "core");
+ namer.ResolveMemberNamesForStruct(2, 4);
+ EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
+ EXPECT_THAT(namer.GetMemberName(2, 1), Eq("apple"));
+ EXPECT_THAT(namer.GetMemberName(2, 2), Eq("core"));
+ EXPECT_THAT(namer.GetMemberName(2, 3), Eq("field3"));
+}
+
+TEST_F(SpvNamerTest, ResolveMemberNamesForStruct_GeneratedNameAvoidsConflictWithSuggestion) {
+ Namer namer(fail_stream_);
+ namer.SuggestSanitizedMemberName(2, 0, "field1");
+ namer.ResolveMemberNamesForStruct(2, 2);
+ EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field1"));
+ EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1_1"));
+}
+
+TEST_F(SpvNamerTest, ResolveMemberNamesForStruct_TruncatesOutOfBoundsSuggestion) {
+ Namer namer(fail_stream_);
+ namer.SuggestSanitizedMemberName(2, 3, "sitar");
+ EXPECT_THAT(namer.GetMemberName(2, 3), Eq("sitar"));
+ namer.ResolveMemberNamesForStruct(2, 2);
+ EXPECT_THAT(namer.GetMemberName(2, 0), Eq("field0"));
+ EXPECT_THAT(namer.GetMemberName(2, 1), Eq("field1"));
+ EXPECT_THAT(namer.GetMemberName(2, 3), Eq(""));
}
using SpvNamerReservedWordTest = ::testing::TestWithParam<std::string>;
TEST_P(SpvNamerReservedWordTest, ReservedWordsAreUsed) {
- bool success;
- std::stringstream errors;
- FailStream fail_stream(&success, &errors);
- Namer namer(fail_stream);
- const std::string reserved = GetParam();
- // Since it's reserved, it's marked as used, and we can't register an ID
- EXPECT_THAT(namer.FindUnusedDerivedName(reserved), Eq(reserved + "_1"));
+ bool success;
+ std::stringstream errors;
+ FailStream fail_stream(&success, &errors);
+ Namer namer(fail_stream);
+ const std::string reserved = GetParam();
+ // Since it's reserved, it's marked as used, and we can't register an ID
+ EXPECT_THAT(namer.FindUnusedDerivedName(reserved), Eq(reserved + "_1"));
}
INSTANTIATE_TEST_SUITE_P(SpvParserTest_ReservedWords,
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser.cc
index ebb5bc4d924..f430d94f72c 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser.cc
@@ -27,35 +27,35 @@
namespace tint::reader::spirv {
Program Parse(const std::vector<uint32_t>& input) {
- ParserImpl parser(input);
- bool parsed = parser.Parse();
-
- ProgramBuilder& builder = parser.builder();
- if (!parsed) {
- // TODO(bclayton): Migrate spirv::ParserImpl to using diagnostics.
- builder.Diagnostics().add_error(diag::System::Reader, parser.error());
- return Program(std::move(builder));
- }
-
- // The SPIR-V parser can construct disjoint AST nodes, which is invalid for
- // the Resolver. Clone the Program to clean these up.
- builder.SetResolveOnBuild(false);
- Program program_with_disjoint_ast(std::move(builder));
-
- ProgramBuilder output;
- CloneContext(&output, &program_with_disjoint_ast, false).Clone();
- auto program = Program(std::move(output));
- if (!program.IsValid()) {
- return program;
- }
-
- transform::Manager manager;
- manager.Add<transform::Unshadow>();
- manager.Add<transform::SimplifyPointers>();
- manager.Add<transform::DecomposeStridedMatrix>();
- manager.Add<transform::DecomposeStridedArray>();
- manager.Add<transform::RemoveUnreachableStatements>();
- return manager.Run(&program).program;
+ ParserImpl parser(input);
+ bool parsed = parser.Parse();
+
+ ProgramBuilder& builder = parser.builder();
+ if (!parsed) {
+ // TODO(bclayton): Migrate spirv::ParserImpl to using diagnostics.
+ builder.Diagnostics().add_error(diag::System::Reader, parser.error());
+ return Program(std::move(builder));
+ }
+
+ // The SPIR-V parser can construct disjoint AST nodes, which is invalid for
+ // the Resolver. Clone the Program to clean these up.
+ builder.SetResolveOnBuild(false);
+ Program program_with_disjoint_ast(std::move(builder));
+
+ ProgramBuilder output;
+ CloneContext(&output, &program_with_disjoint_ast, false).Clone();
+ auto program = Program(std::move(output));
+ if (!program.IsValid()) {
+ return program;
+ }
+
+ transform::Manager manager;
+ manager.Add<transform::Unshadow>();
+ manager.Add<transform::SimplifyPointers>();
+ manager.Add<transform::DecomposeStridedMatrix>();
+ manager.Add<transform::DecomposeStridedArray>();
+ manager.Add<transform::RemoveUnreachableStatements>();
+ return manager.Run(&program).program;
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.cc
index 6cf8e7a9981..bdc52e695ef 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.cc
@@ -27,9 +27,9 @@
#include "src/tint/ast/type_name.h"
#include "src/tint/ast/unary_op_expression.h"
#include "src/tint/reader/spirv/function.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/utils/unique_vector.h"
namespace tint::reader::spirv {
@@ -45,101 +45,100 @@ const spv_target_env kInputEnv = SPV_ENV_VULKAN_1_1;
// A FunctionTraverser is used to compute an ordering of functions in the
// module such that callees precede callers.
class FunctionTraverser {
- public:
- explicit FunctionTraverser(const spvtools::opt::Module& module)
- : module_(module) {}
-
- // @returns the functions in the modules such that callees precede callers.
- std::vector<const spvtools::opt::Function*> TopologicallyOrderedFunctions() {
- visited_.clear();
- ordered_.clear();
- id_to_func_.clear();
- for (const auto& f : module_) {
- id_to_func_[f.result_id()] = &f;
- }
- for (const auto& f : module_) {
- Visit(f);
- }
- return ordered_;
- }
-
- private:
- void Visit(const spvtools::opt::Function& f) {
- if (visited_.count(&f)) {
- return;
- }
- visited_.insert(&f);
- for (const auto& bb : f) {
- for (const auto& inst : bb) {
- if (inst.opcode() != SpvOpFunctionCall) {
- continue;
- }
- const auto* callee = id_to_func_[inst.GetSingleWordInOperand(0)];
- if (callee) {
- Visit(*callee);
- }
- }
- }
- ordered_.push_back(&f);
- }
-
- const spvtools::opt::Module& module_;
- std::unordered_set<const spvtools::opt::Function*> visited_;
- std::unordered_map<uint32_t, const spvtools::opt::Function*> id_to_func_;
- std::vector<const spvtools::opt::Function*> ordered_;
+ public:
+ explicit FunctionTraverser(const spvtools::opt::Module& module) : module_(module) {}
+
+ // @returns the functions in the modules such that callees precede callers.
+ std::vector<const spvtools::opt::Function*> TopologicallyOrderedFunctions() {
+ visited_.clear();
+ ordered_.clear();
+ id_to_func_.clear();
+ for (const auto& f : module_) {
+ id_to_func_[f.result_id()] = &f;
+ }
+ for (const auto& f : module_) {
+ Visit(f);
+ }
+ return ordered_;
+ }
+
+ private:
+ void Visit(const spvtools::opt::Function& f) {
+ if (visited_.count(&f)) {
+ return;
+ }
+ visited_.insert(&f);
+ for (const auto& bb : f) {
+ for (const auto& inst : bb) {
+ if (inst.opcode() != SpvOpFunctionCall) {
+ continue;
+ }
+ const auto* callee = id_to_func_[inst.GetSingleWordInOperand(0)];
+ if (callee) {
+ Visit(*callee);
+ }
+ }
+ }
+ ordered_.push_back(&f);
+ }
+
+ const spvtools::opt::Module& module_;
+ std::unordered_set<const spvtools::opt::Function*> visited_;
+ std::unordered_map<uint32_t, const spvtools::opt::Function*> id_to_func_;
+ std::vector<const spvtools::opt::Function*> ordered_;
};
// Returns true if the opcode operates as if its operands are signed integral.
bool AssumesSignedOperands(SpvOp opcode) {
- switch (opcode) {
- case SpvOpSNegate:
- case SpvOpSDiv:
- case SpvOpSRem:
- case SpvOpSMod:
- case SpvOpSLessThan:
- case SpvOpSLessThanEqual:
- case SpvOpSGreaterThan:
- case SpvOpSGreaterThanEqual:
- case SpvOpConvertSToF:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpSNegate:
+ case SpvOpSDiv:
+ case SpvOpSRem:
+ case SpvOpSMod:
+ case SpvOpSLessThan:
+ case SpvOpSLessThanEqual:
+ case SpvOpSGreaterThan:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpConvertSToF:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the GLSL extended instruction expects operands to be signed.
// @param extended_opcode GLSL.std.450 opcode
// @returns true if all operands must be signed integral type
bool AssumesSignedOperands(GLSLstd450 extended_opcode) {
- switch (extended_opcode) {
- case GLSLstd450SAbs:
- case GLSLstd450SSign:
- case GLSLstd450SMin:
- case GLSLstd450SMax:
- case GLSLstd450SClamp:
- return true;
- default:
- break;
- }
- return false;
+ switch (extended_opcode) {
+ case GLSLstd450SAbs:
+ case GLSLstd450SSign:
+ case GLSLstd450SMin:
+ case GLSLstd450SMax:
+ case GLSLstd450SClamp:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the opcode operates as if its operands are unsigned integral.
bool AssumesUnsignedOperands(SpvOp opcode) {
- switch (opcode) {
- case SpvOpUDiv:
- case SpvOpUMod:
- case SpvOpULessThan:
- case SpvOpULessThanEqual:
- case SpvOpUGreaterThan:
- case SpvOpUGreaterThanEqual:
- case SpvOpConvertUToF:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpUDiv:
+ case SpvOpUMod:
+ case SpvOpULessThan:
+ case SpvOpULessThanEqual:
+ case SpvOpUGreaterThan:
+ case SpvOpUGreaterThanEqual:
+ case SpvOpConvertUToF:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the GLSL extended instruction expects operands to be
@@ -147,15 +146,15 @@ bool AssumesUnsignedOperands(SpvOp opcode) {
// @param extended_opcode GLSL.std.450 opcode
// @returns true if all operands must be unsigned integral type
bool AssumesUnsignedOperands(GLSLstd450 extended_opcode) {
- switch (extended_opcode) {
- case GLSLstd450UMin:
- case GLSLstd450UMax:
- case GLSLstd450UClamp:
- return true;
- default:
- break;
- }
- return false;
+ switch (extended_opcode) {
+ case GLSLstd450UMin:
+ case GLSLstd450UMax:
+ case GLSLstd450UClamp:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the corresponding WGSL operation requires
@@ -163,49 +162,49 @@ bool AssumesUnsignedOperands(GLSLstd450 extended_opcode) {
// first operand, and it's not one of the OpU* or OpS* instructions.
// (Those are handled via MakeOperand.)
bool AssumesSecondOperandSignednessMatchesFirstOperand(SpvOp opcode) {
- switch (opcode) {
- // All the OpI* integer binary operations.
- case SpvOpIAdd:
- case SpvOpISub:
- case SpvOpIMul:
- case SpvOpIEqual:
- case SpvOpINotEqual:
- // All the bitwise integer binary operations.
- case SpvOpBitwiseAnd:
- case SpvOpBitwiseOr:
- case SpvOpBitwiseXor:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ // All the OpI* integer binary operations.
+ case SpvOpIAdd:
+ case SpvOpISub:
+ case SpvOpIMul:
+ case SpvOpIEqual:
+ case SpvOpINotEqual:
+ // All the bitwise integer binary operations.
+ case SpvOpBitwiseAnd:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the corresponding WGSL operation requires
// the signedness of the result to match the signedness of the first operand.
bool AssumesResultSignednessMatchesFirstOperand(SpvOp opcode) {
- switch (opcode) {
- case SpvOpNot:
- case SpvOpSNegate:
- case SpvOpBitCount:
- case SpvOpBitReverse:
- case SpvOpSDiv:
- case SpvOpSMod:
- case SpvOpSRem:
- case SpvOpIAdd:
- case SpvOpISub:
- case SpvOpIMul:
- case SpvOpBitwiseAnd:
- case SpvOpBitwiseOr:
- case SpvOpBitwiseXor:
- case SpvOpShiftLeftLogical:
- case SpvOpShiftRightLogical:
- case SpvOpShiftRightArithmetic:
- return true;
- default:
- break;
- }
- return false;
+ switch (opcode) {
+ case SpvOpNot:
+ case SpvOpSNegate:
+ case SpvOpBitCount:
+ case SpvOpBitReverse:
+ case SpvOpSDiv:
+ case SpvOpSMod:
+ case SpvOpSRem:
+ case SpvOpIAdd:
+ case SpvOpISub:
+ case SpvOpIMul:
+ case SpvOpBitwiseAnd:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ case SpvOpShiftLeftLogical:
+ case SpvOpShiftRightLogical:
+ case SpvOpShiftRightArithmetic:
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// Returns true if the extended instruction requires the signedness of the
@@ -213,42 +212,42 @@ bool AssumesResultSignednessMatchesFirstOperand(SpvOp opcode) {
// @param extended_opcode GLSL.std.450 opcode
// @returns true if the result type must match the first operand type.
bool AssumesResultSignednessMatchesFirstOperand(GLSLstd450 extended_opcode) {
- switch (extended_opcode) {
- case GLSLstd450SAbs:
- case GLSLstd450SSign:
- case GLSLstd450SMin:
- case GLSLstd450SMax:
- case GLSLstd450SClamp:
- case GLSLstd450UMin:
- case GLSLstd450UMax:
- case GLSLstd450UClamp:
- // TODO(dneto): FindSMsb?
- // TODO(dneto): FindUMsb?
- return true;
- default:
- break;
- }
- return false;
+ switch (extended_opcode) {
+ case GLSLstd450SAbs:
+ case GLSLstd450SSign:
+ case GLSLstd450SMin:
+ case GLSLstd450SMax:
+ case GLSLstd450SClamp:
+ case GLSLstd450UMin:
+ case GLSLstd450UMax:
+ case GLSLstd450UClamp:
+ // TODO(dneto): FindSMsb?
+ // TODO(dneto): FindUMsb?
+ return true;
+ default:
+ break;
+ }
+ return false;
}
// @param a SPIR-V decoration
// @return true when the given decoration is a pipeline decoration other than a
// bulitin variable.
bool IsPipelineDecoration(const Decoration& deco) {
- if (deco.size() < 1) {
+ if (deco.size() < 1) {
+ return false;
+ }
+ switch (deco[0]) {
+ case SpvDecorationLocation:
+ case SpvDecorationFlat:
+ case SpvDecorationNoPerspective:
+ case SpvDecorationCentroid:
+ case SpvDecorationSample:
+ return true;
+ default:
+ break;
+ }
return false;
- }
- switch (deco[0]) {
- case SpvDecorationLocation:
- case SpvDecorationFlat:
- case SpvDecorationNoPerspective:
- case SpvDecorationCentroid:
- case SpvDecorationSample:
- return true;
- default:
- break;
- }
- return false;
}
} // namespace
@@ -259,8 +258,7 @@ TypedExpression::TypedExpression(const TypedExpression&) = default;
TypedExpression& TypedExpression::operator=(const TypedExpression&) = default;
-TypedExpression::TypedExpression(const Type* type_in,
- const ast::Expression* expr_in)
+TypedExpression::TypedExpression(const Type* type_in, const ast::Expression* expr_in)
: type(type_in), expr(expr_in) {}
ParserImpl::ParserImpl(const std::vector<uint32_t>& spv_binary)
@@ -270,1341 +268,1290 @@ ParserImpl::ParserImpl(const std::vector<uint32_t>& spv_binary)
namer_(fail_stream_),
enum_converter_(fail_stream_),
tools_context_(kInputEnv) {
- // Create a message consumer to propagate error messages from SPIRV-Tools
- // out as our own failures.
- message_consumer_ = [this](spv_message_level_t level, const char* /*source*/,
- const spv_position_t& position,
- const char* message) {
- switch (level) {
- // Ignore info and warning message.
- case SPV_MSG_WARNING:
- case SPV_MSG_INFO:
- break;
- // Otherwise, propagate the error.
- default:
- // For binary validation errors, we only have the instruction
- // number. It's not text, so there is no column number.
- this->Fail() << "line:" << position.index << ": " << message;
- }
- };
+ // Create a message consumer to propagate error messages from SPIRV-Tools
+ // out as our own failures.
+ message_consumer_ = [this](spv_message_level_t level, const char* /*source*/,
+ const spv_position_t& position, const char* message) {
+ switch (level) {
+ // Ignore info and warning message.
+ case SPV_MSG_WARNING:
+ case SPV_MSG_INFO:
+ break;
+ // Otherwise, propagate the error.
+ default:
+ // For binary validation errors, we only have the instruction
+ // number. It's not text, so there is no column number.
+ this->Fail() << "line:" << position.index << ": " << message;
+ }
+ };
}
ParserImpl::~ParserImpl() = default;
bool ParserImpl::Parse() {
- // Set up use of SPIRV-Tools utilities.
- spvtools::SpirvTools spv_tools(kInputEnv);
+ // Set up use of SPIRV-Tools utilities.
+ spvtools::SpirvTools spv_tools(kInputEnv);
- // Error messages from SPIRV-Tools are forwarded as failures, including
- // setting |success_| to false.
- spv_tools.SetMessageConsumer(message_consumer_);
+ // Error messages from SPIRV-Tools are forwarded as failures, including
+ // setting |success_| to false.
+ spv_tools.SetMessageConsumer(message_consumer_);
- if (!success_) {
- return false;
- }
+ if (!success_) {
+ return false;
+ }
- // Only consider modules valid for Vulkan 1.0. On failure, the message
- // consumer will set the error status.
- if (!spv_tools.Validate(spv_binary_)) {
- success_ = false;
- return false;
- }
- if (!BuildInternalModule()) {
- return false;
- }
- if (!ParseInternalModule()) {
- return false;
- }
+ // Only consider modules valid for Vulkan 1.0. On failure, the message
+ // consumer will set the error status.
+ if (!spv_tools.Validate(spv_binary_)) {
+ success_ = false;
+ return false;
+ }
+ if (!BuildInternalModule()) {
+ return false;
+ }
+ if (!ParseInternalModule()) {
+ return false;
+ }
- return success_;
+ return success_;
}
Program ParserImpl::program() {
- // TODO(dneto): Should we clear out spv_binary_ here, to reduce
- // memory usage?
- return tint::Program(std::move(builder_));
+ // TODO(dneto): Should we clear out spv_binary_ here, to reduce
+ // memory usage?
+ return tint::Program(std::move(builder_));
}
const Type* ParserImpl::ConvertType(uint32_t type_id, PtrAs ptr_as) {
- if (!success_) {
- return nullptr;
- }
+ if (!success_) {
+ return nullptr;
+ }
- if (type_mgr_ == nullptr) {
- Fail() << "ConvertType called when the internal module has not been built";
- return nullptr;
- }
+ if (type_mgr_ == nullptr) {
+ Fail() << "ConvertType called when the internal module has not been built";
+ return nullptr;
+ }
- auto* spirv_type = type_mgr_->GetType(type_id);
- if (spirv_type == nullptr) {
- Fail() << "ID is not a SPIR-V type: " << type_id;
+ auto* spirv_type = type_mgr_->GetType(type_id);
+ if (spirv_type == nullptr) {
+ Fail() << "ID is not a SPIR-V type: " << type_id;
+ return nullptr;
+ }
+
+ switch (spirv_type->kind()) {
+ case spvtools::opt::analysis::Type::kVoid:
+ return ty_.Void();
+ case spvtools::opt::analysis::Type::kBool:
+ return ty_.Bool();
+ case spvtools::opt::analysis::Type::kInteger:
+ return ConvertType(spirv_type->AsInteger());
+ case spvtools::opt::analysis::Type::kFloat:
+ return ConvertType(spirv_type->AsFloat());
+ case spvtools::opt::analysis::Type::kVector:
+ return ConvertType(spirv_type->AsVector());
+ case spvtools::opt::analysis::Type::kMatrix:
+ return ConvertType(spirv_type->AsMatrix());
+ case spvtools::opt::analysis::Type::kRuntimeArray:
+ return ConvertType(type_id, spirv_type->AsRuntimeArray());
+ case spvtools::opt::analysis::Type::kArray:
+ return ConvertType(type_id, spirv_type->AsArray());
+ case spvtools::opt::analysis::Type::kStruct:
+ return ConvertType(type_id, spirv_type->AsStruct());
+ case spvtools::opt::analysis::Type::kPointer:
+ return ConvertType(type_id, ptr_as, spirv_type->AsPointer());
+ case spvtools::opt::analysis::Type::kFunction:
+ // Tint doesn't have a Function type.
+ // We need to convert the result type and parameter types.
+ // But the SPIR-V defines those before defining the function
+ // type. No further work is required here.
+ return nullptr;
+ case spvtools::opt::analysis::Type::kSampler:
+ case spvtools::opt::analysis::Type::kSampledImage:
+ case spvtools::opt::analysis::Type::kImage:
+ // Fake it for sampler and texture types. These are handled in an
+ // entirely different way.
+ return ty_.Void();
+ default:
+ break;
+ }
+
+ Fail() << "unknown SPIR-V type with ID " << type_id << ": "
+ << def_use_mgr_->GetDef(type_id)->PrettyPrint();
return nullptr;
- }
-
- switch (spirv_type->kind()) {
- case spvtools::opt::analysis::Type::kVoid:
- return ty_.Void();
- case spvtools::opt::analysis::Type::kBool:
- return ty_.Bool();
- case spvtools::opt::analysis::Type::kInteger:
- return ConvertType(spirv_type->AsInteger());
- case spvtools::opt::analysis::Type::kFloat:
- return ConvertType(spirv_type->AsFloat());
- case spvtools::opt::analysis::Type::kVector:
- return ConvertType(spirv_type->AsVector());
- case spvtools::opt::analysis::Type::kMatrix:
- return ConvertType(spirv_type->AsMatrix());
- case spvtools::opt::analysis::Type::kRuntimeArray:
- return ConvertType(type_id, spirv_type->AsRuntimeArray());
- case spvtools::opt::analysis::Type::kArray:
- return ConvertType(type_id, spirv_type->AsArray());
- case spvtools::opt::analysis::Type::kStruct:
- return ConvertType(type_id, spirv_type->AsStruct());
- case spvtools::opt::analysis::Type::kPointer:
- return ConvertType(type_id, ptr_as, spirv_type->AsPointer());
- case spvtools::opt::analysis::Type::kFunction:
- // Tint doesn't have a Function type.
- // We need to convert the result type and parameter types.
- // But the SPIR-V defines those before defining the function
- // type. No further work is required here.
- return nullptr;
- case spvtools::opt::analysis::Type::kSampler:
- case spvtools::opt::analysis::Type::kSampledImage:
- case spvtools::opt::analysis::Type::kImage:
- // Fake it for sampler and texture types. These are handled in an
- // entirely different way.
- return ty_.Void();
- default:
- break;
- }
-
- Fail() << "unknown SPIR-V type with ID " << type_id << ": "
- << def_use_mgr_->GetDef(type_id)->PrettyPrint();
- return nullptr;
}
DecorationList ParserImpl::GetDecorationsFor(uint32_t id) const {
- DecorationList result;
- const auto& decorations = deco_mgr_->GetDecorationsFor(id, true);
- std::unordered_set<uint32_t> visited;
- for (const auto* inst : decorations) {
- if (inst->opcode() != SpvOpDecorate) {
- continue;
- }
- // Example: OpDecorate %struct_id Block
- // Example: OpDecorate %array_ty ArrayStride 16
- auto decoration_kind = inst->GetSingleWordInOperand(1);
- switch (decoration_kind) {
- // Restrict and RestrictPointer have no effect in graphics APIs.
- case SpvDecorationRestrict:
- case SpvDecorationRestrictPointer:
- break;
- default:
- if (visited.emplace(decoration_kind).second) {
- std::vector<uint32_t> inst_as_words;
- inst->ToBinaryWithoutAttachedDebugInsts(&inst_as_words);
- Decoration d(inst_as_words.begin() + 2, inst_as_words.end());
- result.push_back(d);
- }
- break;
- }
- }
- return result;
-}
-
-DecorationList ParserImpl::GetDecorationsForMember(
- uint32_t id,
- uint32_t member_index) const {
- DecorationList result;
- const auto& decorations = deco_mgr_->GetDecorationsFor(id, true);
- std::unordered_set<uint32_t> visited;
- for (const auto* inst : decorations) {
- // Example: OpMemberDecorate %struct_id 1 Offset 16
- if ((inst->opcode() != SpvOpMemberDecorate) ||
- (inst->GetSingleWordInOperand(1) != member_index)) {
- continue;
- }
- auto decoration_kind = inst->GetSingleWordInOperand(2);
- switch (decoration_kind) {
- // Restrict and RestrictPointer have no effect in graphics APIs.
- case SpvDecorationRestrict:
- case SpvDecorationRestrictPointer:
- break;
- default:
- if (visited.emplace(decoration_kind).second) {
- std::vector<uint32_t> inst_as_words;
- inst->ToBinaryWithoutAttachedDebugInsts(&inst_as_words);
- Decoration d(inst_as_words.begin() + 3, inst_as_words.end());
- result.push_back(d);
- }
- }
- }
- return result;
+ DecorationList result;
+ const auto& decorations = deco_mgr_->GetDecorationsFor(id, true);
+ std::unordered_set<uint32_t> visited;
+ for (const auto* inst : decorations) {
+ if (inst->opcode() != SpvOpDecorate) {
+ continue;
+ }
+ // Example: OpDecorate %struct_id Block
+ // Example: OpDecorate %array_ty ArrayStride 16
+ auto decoration_kind = inst->GetSingleWordInOperand(1);
+ switch (decoration_kind) {
+ // Restrict and RestrictPointer have no effect in graphics APIs.
+ case SpvDecorationRestrict:
+ case SpvDecorationRestrictPointer:
+ break;
+ default:
+ if (visited.emplace(decoration_kind).second) {
+ std::vector<uint32_t> inst_as_words;
+ inst->ToBinaryWithoutAttachedDebugInsts(&inst_as_words);
+ Decoration d(inst_as_words.begin() + 2, inst_as_words.end());
+ result.push_back(d);
+ }
+ break;
+ }
+ }
+ return result;
+}
+
+DecorationList ParserImpl::GetDecorationsForMember(uint32_t id, uint32_t member_index) const {
+ DecorationList result;
+ const auto& decorations = deco_mgr_->GetDecorationsFor(id, true);
+ std::unordered_set<uint32_t> visited;
+ for (const auto* inst : decorations) {
+ // Example: OpMemberDecorate %struct_id 1 Offset 16
+ if ((inst->opcode() != SpvOpMemberDecorate) ||
+ (inst->GetSingleWordInOperand(1) != member_index)) {
+ continue;
+ }
+ auto decoration_kind = inst->GetSingleWordInOperand(2);
+ switch (decoration_kind) {
+ // Restrict and RestrictPointer have no effect in graphics APIs.
+ case SpvDecorationRestrict:
+ case SpvDecorationRestrictPointer:
+ break;
+ default:
+ if (visited.emplace(decoration_kind).second) {
+ std::vector<uint32_t> inst_as_words;
+ inst->ToBinaryWithoutAttachedDebugInsts(&inst_as_words);
+ Decoration d(inst_as_words.begin() + 3, inst_as_words.end());
+ result.push_back(d);
+ }
+ }
+ }
+ return result;
}
std::string ParserImpl::ShowType(uint32_t type_id) {
- if (def_use_mgr_) {
- const auto* type_inst = def_use_mgr_->GetDef(type_id);
- if (type_inst) {
- return type_inst->PrettyPrint();
- }
- }
- return "SPIR-V type " + std::to_string(type_id);
-}
-
-ast::AttributeList ParserImpl::ConvertMemberDecoration(
- uint32_t struct_type_id,
- uint32_t member_index,
- const Type* member_ty,
- const Decoration& decoration) {
- if (decoration.empty()) {
- Fail() << "malformed SPIR-V decoration: it's empty";
- return {};
- }
- switch (decoration[0]) {
- case SpvDecorationOffset:
- if (decoration.size() != 2) {
- Fail()
- << "malformed Offset decoration: expected 1 literal operand, has "
- << decoration.size() - 1 << ": member " << member_index << " of "
- << ShowType(struct_type_id);
- return {};
- }
- return {
- create<ast::StructMemberOffsetAttribute>(Source{}, decoration[1]),
- };
- case SpvDecorationNonReadable:
- // WGSL doesn't have a member decoration for this. Silently drop it.
- return {};
- case SpvDecorationNonWritable:
- // WGSL doesn't have a member decoration for this.
- return {};
- case SpvDecorationColMajor:
- // WGSL only supports column major matrices.
- return {};
- case SpvDecorationRelaxedPrecision:
- // WGSL doesn't support relaxed precision.
- return {};
- case SpvDecorationRowMajor:
- Fail() << "WGSL does not support row-major matrices: can't "
- "translate member "
- << member_index << " of " << ShowType(struct_type_id);
- return {};
- case SpvDecorationMatrixStride: {
- if (decoration.size() != 2) {
- Fail() << "malformed MatrixStride decoration: expected 1 literal "
- "operand, has "
- << decoration.size() - 1 << ": member " << member_index << " of "
- << ShowType(struct_type_id);
- return {};
- }
- uint32_t stride = decoration[1];
- auto* ty = member_ty->UnwrapAlias();
- while (auto* arr = ty->As<Array>()) {
- ty = arr->type->UnwrapAlias();
- }
- auto* mat = ty->As<Matrix>();
- if (!mat) {
- Fail() << "MatrixStride cannot be applied to type " << ty->String();
- return {};
- }
- uint32_t natural_stride = (mat->rows == 2) ? 8 : 16;
- if (stride == natural_stride) {
- return {}; // Decoration matches the natural stride for the matrix
- }
- if (!member_ty->Is<Matrix>()) {
- Fail() << "custom matrix strides not currently supported on array of "
- "matrices";
+ if (def_use_mgr_) {
+ const auto* type_inst = def_use_mgr_->GetDef(type_id);
+ if (type_inst) {
+ return type_inst->PrettyPrint();
+ }
+ }
+ return "SPIR-V type " + std::to_string(type_id);
+}
+
+ast::AttributeList ParserImpl::ConvertMemberDecoration(uint32_t struct_type_id,
+ uint32_t member_index,
+ const Type* member_ty,
+ const Decoration& decoration) {
+ if (decoration.empty()) {
+ Fail() << "malformed SPIR-V decoration: it's empty";
return {};
- }
- return {
- create<ast::StrideAttribute>(Source{}, decoration[1]),
- builder_.ASTNodes().Create<ast::DisableValidationAttribute>(
- builder_.ID(), ast::DisabledValidation::kIgnoreStrideAttribute),
- };
- }
- default:
- // TODO(dneto): Support the remaining member decorations.
- break;
- }
- Fail() << "unhandled member decoration: " << decoration[0] << " on member "
- << member_index << " of " << ShowType(struct_type_id);
- return {};
+ }
+ switch (decoration[0]) {
+ case SpvDecorationOffset:
+ if (decoration.size() != 2) {
+ Fail() << "malformed Offset decoration: expected 1 literal operand, has "
+ << decoration.size() - 1 << ": member " << member_index << " of "
+ << ShowType(struct_type_id);
+ return {};
+ }
+ return {
+ create<ast::StructMemberOffsetAttribute>(Source{}, decoration[1]),
+ };
+ case SpvDecorationNonReadable:
+ // WGSL doesn't have a member decoration for this. Silently drop it.
+ return {};
+ case SpvDecorationNonWritable:
+ // WGSL doesn't have a member decoration for this.
+ return {};
+ case SpvDecorationColMajor:
+ // WGSL only supports column major matrices.
+ return {};
+ case SpvDecorationRelaxedPrecision:
+ // WGSL doesn't support relaxed precision.
+ return {};
+ case SpvDecorationRowMajor:
+ Fail() << "WGSL does not support row-major matrices: can't "
+ "translate member "
+ << member_index << " of " << ShowType(struct_type_id);
+ return {};
+ case SpvDecorationMatrixStride: {
+ if (decoration.size() != 2) {
+ Fail() << "malformed MatrixStride decoration: expected 1 literal "
+ "operand, has "
+ << decoration.size() - 1 << ": member " << member_index << " of "
+ << ShowType(struct_type_id);
+ return {};
+ }
+ uint32_t stride = decoration[1];
+ auto* ty = member_ty->UnwrapAlias();
+ while (auto* arr = ty->As<Array>()) {
+ ty = arr->type->UnwrapAlias();
+ }
+ auto* mat = ty->As<Matrix>();
+ if (!mat) {
+ Fail() << "MatrixStride cannot be applied to type " << ty->String();
+ return {};
+ }
+ uint32_t natural_stride = (mat->rows == 2) ? 8 : 16;
+ if (stride == natural_stride) {
+ return {}; // Decoration matches the natural stride for the matrix
+ }
+ if (!member_ty->Is<Matrix>()) {
+ Fail() << "custom matrix strides not currently supported on array of "
+ "matrices";
+ return {};
+ }
+ return {
+ create<ast::StrideAttribute>(Source{}, decoration[1]),
+ builder_.ASTNodes().Create<ast::DisableValidationAttribute>(
+ builder_.ID(), ast::DisabledValidation::kIgnoreStrideAttribute),
+ };
+ }
+ default:
+ // TODO(dneto): Support the remaining member decorations.
+ break;
+ }
+ Fail() << "unhandled member decoration: " << decoration[0] << " on member " << member_index
+ << " of " << ShowType(struct_type_id);
+ return {};
}
bool ParserImpl::BuildInternalModule() {
- if (!success_) {
- return false;
- }
+ if (!success_) {
+ return false;
+ }
- const spv_context& context = tools_context_.CContext();
- ir_context_ = spvtools::BuildModule(context->target_env, context->consumer,
- spv_binary_.data(), spv_binary_.size());
- if (!ir_context_) {
- return Fail() << "internal error: couldn't build the internal "
- "representation of the module";
- }
- module_ = ir_context_->module();
- def_use_mgr_ = ir_context_->get_def_use_mgr();
- constant_mgr_ = ir_context_->get_constant_mgr();
- type_mgr_ = ir_context_->get_type_mgr();
- deco_mgr_ = ir_context_->get_decoration_mgr();
+ const spv_context& context = tools_context_.CContext();
+ ir_context_ = spvtools::BuildModule(context->target_env, context->consumer, spv_binary_.data(),
+ spv_binary_.size());
+ if (!ir_context_) {
+ return Fail() << "internal error: couldn't build the internal "
+ "representation of the module";
+ }
+ module_ = ir_context_->module();
+ def_use_mgr_ = ir_context_->get_def_use_mgr();
+ constant_mgr_ = ir_context_->get_constant_mgr();
+ type_mgr_ = ir_context_->get_type_mgr();
+ deco_mgr_ = ir_context_->get_decoration_mgr();
- topologically_ordered_functions_ =
- FunctionTraverser(*module_).TopologicallyOrderedFunctions();
+ topologically_ordered_functions_ = FunctionTraverser(*module_).TopologicallyOrderedFunctions();
- return success_;
+ return success_;
}
void ParserImpl::ResetInternalModule() {
- ir_context_.reset(nullptr);
- module_ = nullptr;
- def_use_mgr_ = nullptr;
- constant_mgr_ = nullptr;
- type_mgr_ = nullptr;
- deco_mgr_ = nullptr;
+ ir_context_.reset(nullptr);
+ module_ = nullptr;
+ def_use_mgr_ = nullptr;
+ constant_mgr_ = nullptr;
+ type_mgr_ = nullptr;
+ deco_mgr_ = nullptr;
- glsl_std_450_imports_.clear();
+ glsl_std_450_imports_.clear();
}
bool ParserImpl::ParseInternalModule() {
- if (!success_) {
- return false;
- }
- RegisterLineNumbers();
- if (!ParseInternalModuleExceptFunctions()) {
- return false;
- }
- if (!EmitFunctions()) {
- return false;
- }
- return success_;
+ if (!success_) {
+ return false;
+ }
+ RegisterLineNumbers();
+ if (!ParseInternalModuleExceptFunctions()) {
+ return false;
+ }
+ if (!EmitFunctions()) {
+ return false;
+ }
+ return success_;
}
void ParserImpl::RegisterLineNumbers() {
- Source::Location instruction_number{};
-
- // Has there been an OpLine since the last OpNoLine or start of the module?
- bool in_op_line_scope = false;
- // The source location provided by the most recent OpLine instruction.
- Source::Location op_line_source{};
- const bool run_on_debug_insts = true;
- module_->ForEachInst(
- [this, &in_op_line_scope, &op_line_source,
- &instruction_number](const spvtools::opt::Instruction* inst) {
- ++instruction_number.line;
- switch (inst->opcode()) {
- case SpvOpLine:
- in_op_line_scope = true;
- // TODO(dneto): This ignores the File ID (operand 0), since the Tint
- // Source concept doesn't represent that.
- op_line_source.line = inst->GetSingleWordInOperand(1);
- op_line_source.column = inst->GetSingleWordInOperand(2);
- break;
- case SpvOpNoLine:
- in_op_line_scope = false;
- break;
- default:
- break;
- }
- this->inst_source_[inst] =
- in_op_line_scope ? op_line_source : instruction_number;
- },
- run_on_debug_insts);
+ Source::Location instruction_number{};
+
+ // Has there been an OpLine since the last OpNoLine or start of the module?
+ bool in_op_line_scope = false;
+ // The source location provided by the most recent OpLine instruction.
+ Source::Location op_line_source{};
+ const bool run_on_debug_insts = true;
+ module_->ForEachInst(
+ [this, &in_op_line_scope, &op_line_source,
+ &instruction_number](const spvtools::opt::Instruction* inst) {
+ ++instruction_number.line;
+ switch (inst->opcode()) {
+ case SpvOpLine:
+ in_op_line_scope = true;
+ // TODO(dneto): This ignores the File ID (operand 0), since the Tint
+ // Source concept doesn't represent that.
+ op_line_source.line = inst->GetSingleWordInOperand(1);
+ op_line_source.column = inst->GetSingleWordInOperand(2);
+ break;
+ case SpvOpNoLine:
+ in_op_line_scope = false;
+ break;
+ default:
+ break;
+ }
+ this->inst_source_[inst] = in_op_line_scope ? op_line_source : instruction_number;
+ },
+ run_on_debug_insts);
}
Source ParserImpl::GetSourceForResultIdForTest(uint32_t id) const {
- return GetSourceForInst(def_use_mgr_->GetDef(id));
+ return GetSourceForInst(def_use_mgr_->GetDef(id));
}
-Source ParserImpl::GetSourceForInst(
- const spvtools::opt::Instruction* inst) const {
- auto where = inst_source_.find(inst);
- if (where == inst_source_.end()) {
- return {};
- }
- return Source{where->second };
+Source ParserImpl::GetSourceForInst(const spvtools::opt::Instruction* inst) const {
+ auto where = inst_source_.find(inst);
+ if (where == inst_source_.end()) {
+ return {};
+ }
+ return Source{where->second};
}
bool ParserImpl::ParseInternalModuleExceptFunctions() {
- if (!success_) {
- return false;
- }
- if (!RegisterExtendedInstructionImports()) {
- return false;
- }
- if (!RegisterUserAndStructMemberNames()) {
- return false;
- }
- if (!RegisterWorkgroupSizeBuiltin()) {
- return false;
- }
- if (!RegisterEntryPoints()) {
- return false;
- }
- if (!RegisterHandleUsage()) {
- return false;
- }
- if (!RegisterTypes()) {
- return false;
- }
- if (!RejectInvalidPointerRoots()) {
- return false;
- }
- if (!EmitScalarSpecConstants()) {
- return false;
- }
- if (!EmitModuleScopeVariables()) {
- return false;
- }
- return success_;
+ if (!success_) {
+ return false;
+ }
+ if (!RegisterExtendedInstructionImports()) {
+ return false;
+ }
+ if (!RegisterUserAndStructMemberNames()) {
+ return false;
+ }
+ if (!RegisterWorkgroupSizeBuiltin()) {
+ return false;
+ }
+ if (!RegisterEntryPoints()) {
+ return false;
+ }
+ if (!RegisterHandleUsage()) {
+ return false;
+ }
+ if (!RegisterTypes()) {
+ return false;
+ }
+ if (!RejectInvalidPointerRoots()) {
+ return false;
+ }
+ if (!EmitScalarSpecConstants()) {
+ return false;
+ }
+ if (!EmitModuleScopeVariables()) {
+ return false;
+ }
+ return success_;
}
bool ParserImpl::RegisterExtendedInstructionImports() {
- for (const spvtools::opt::Instruction& import : module_->ext_inst_imports()) {
- std::string name(
- reinterpret_cast<const char*>(import.GetInOperand(0).words.data()));
- // TODO(dneto): Handle other extended instruction sets when needed.
- if (name == "GLSL.std.450") {
- glsl_std_450_imports_.insert(import.result_id());
- } else if (name.find("NonSemantic.") == 0) {
- ignored_imports_.insert(import.result_id());
- } else {
- return Fail() << "Unrecognized extended instruction set: " << name;
+ for (const spvtools::opt::Instruction& import : module_->ext_inst_imports()) {
+ std::string name(reinterpret_cast<const char*>(import.GetInOperand(0).words.data()));
+ // TODO(dneto): Handle other extended instruction sets when needed.
+ if (name == "GLSL.std.450") {
+ glsl_std_450_imports_.insert(import.result_id());
+ } else if (name.find("NonSemantic.") == 0) {
+ ignored_imports_.insert(import.result_id());
+ } else {
+ return Fail() << "Unrecognized extended instruction set: " << name;
+ }
}
- }
- return true;
+ return true;
}
-bool ParserImpl::IsGlslExtendedInstruction(
- const spvtools::opt::Instruction& inst) const {
- return (inst.opcode() == SpvOpExtInst) &&
- (glsl_std_450_imports_.count(inst.GetSingleWordInOperand(0)) > 0);
+bool ParserImpl::IsGlslExtendedInstruction(const spvtools::opt::Instruction& inst) const {
+ return (inst.opcode() == SpvOpExtInst) &&
+ (glsl_std_450_imports_.count(inst.GetSingleWordInOperand(0)) > 0);
}
-bool ParserImpl::IsIgnoredExtendedInstruction(
- const spvtools::opt::Instruction& inst) const {
- return (inst.opcode() == SpvOpExtInst) &&
- (ignored_imports_.count(inst.GetSingleWordInOperand(0)) > 0);
+bool ParserImpl::IsIgnoredExtendedInstruction(const spvtools::opt::Instruction& inst) const {
+ return (inst.opcode() == SpvOpExtInst) &&
+ (ignored_imports_.count(inst.GetSingleWordInOperand(0)) > 0);
}
bool ParserImpl::RegisterUserAndStructMemberNames() {
- if (!success_) {
- return false;
- }
- // Register entry point names. An entry point name is the point of contact
- // between the API and the shader. It has the highest priority for
- // preservation, so register it first.
- for (const spvtools::opt::Instruction& entry_point :
- module_->entry_points()) {
- const uint32_t function_id = entry_point.GetSingleWordInOperand(1);
- const std::string name = entry_point.GetInOperand(2).AsString();
-
- // This translator requires the entry point to be a valid WGSL identifier.
- // Allowing otherwise leads to difficulties in that the programmer needs
- // to get a mapping from their original entry point name to the WGSL name,
- // and we don't have a good mechanism for that.
- if (!IsValidIdentifier(name)) {
- return Fail() << "entry point name is not a valid WGSL identifier: "
- << name;
- }
-
- // SPIR-V allows a single function to be the implementation for more
- // than one entry point. In the common case, it's one-to-one, and we should
- // try to name the function after the entry point. Otherwise, give the
- // function a name automatically derived from the entry point name.
- namer_.SuggestSanitizedName(function_id, name);
-
- // There is another many-to-one relationship to take care of: In SPIR-V
- // the same name can be used for multiple entry points, provided they are
- // for different shader stages. Take action now to ensure we can use the
- // entry point name later on, and not have it taken for another identifier
- // by an accidental collision with a derived name made for a different ID.
- if (!namer_.IsRegistered(name)) {
- // The entry point name is "unoccupied" becase an earlier entry point
- // grabbed the slot for the function that implements both entry points.
- // Register this new entry point's name, to avoid accidental collisions
- // with a future generated ID.
- if (!namer_.RegisterWithoutId(name)) {
+ if (!success_) {
return false;
- }
}
- }
+ // Register entry point names. An entry point name is the point of contact
+ // between the API and the shader. It has the highest priority for
+ // preservation, so register it first.
+ for (const spvtools::opt::Instruction& entry_point : module_->entry_points()) {
+ const uint32_t function_id = entry_point.GetSingleWordInOperand(1);
+ const std::string name = entry_point.GetInOperand(2).AsString();
+
+ // This translator requires the entry point to be a valid WGSL identifier.
+ // Allowing otherwise leads to difficulties in that the programmer needs
+ // to get a mapping from their original entry point name to the WGSL name,
+ // and we don't have a good mechanism for that.
+ if (!IsValidIdentifier(name)) {
+ return Fail() << "entry point name is not a valid WGSL identifier: " << name;
+ }
- // Register names from OpName and OpMemberName
- for (const auto& inst : module_->debugs2()) {
- switch (inst.opcode()) {
- case SpvOpName: {
- const auto name = inst.GetInOperand(1).AsString();
- if (!name.empty()) {
- namer_.SuggestSanitizedName(inst.GetSingleWordInOperand(0), name);
+ // SPIR-V allows a single function to be the implementation for more
+ // than one entry point. In the common case, it's one-to-one, and we should
+ // try to name the function after the entry point. Otherwise, give the
+ // function a name automatically derived from the entry point name.
+ namer_.SuggestSanitizedName(function_id, name);
+
+ // There is another many-to-one relationship to take care of: In SPIR-V
+ // the same name can be used for multiple entry points, provided they are
+ // for different shader stages. Take action now to ensure we can use the
+ // entry point name later on, and not have it taken for another identifier
+ // by an accidental collision with a derived name made for a different ID.
+ if (!namer_.IsRegistered(name)) {
+ // The entry point name is "unoccupied" becase an earlier entry point
+ // grabbed the slot for the function that implements both entry points.
+ // Register this new entry point's name, to avoid accidental collisions
+ // with a future generated ID.
+ if (!namer_.RegisterWithoutId(name)) {
+ return false;
+ }
}
- break;
- }
- case SpvOpMemberName: {
- const auto name = inst.GetInOperand(2).AsString();
- if (!name.empty()) {
- namer_.SuggestSanitizedMemberName(inst.GetSingleWordInOperand(0),
- inst.GetSingleWordInOperand(1),
- name);
+ }
+
+ // Register names from OpName and OpMemberName
+ for (const auto& inst : module_->debugs2()) {
+ switch (inst.opcode()) {
+ case SpvOpName: {
+ const auto name = inst.GetInOperand(1).AsString();
+ if (!name.empty()) {
+ namer_.SuggestSanitizedName(inst.GetSingleWordInOperand(0), name);
+ }
+ break;
+ }
+ case SpvOpMemberName: {
+ const auto name = inst.GetInOperand(2).AsString();
+ if (!name.empty()) {
+ namer_.SuggestSanitizedMemberName(inst.GetSingleWordInOperand(0),
+ inst.GetSingleWordInOperand(1), name);
+ }
+ break;
+ }
+ default:
+ break;
}
- break;
- }
- default:
- break;
}
- }
- // Fill in struct member names, and disambiguate them.
- for (const auto* type_inst : module_->GetTypes()) {
- if (type_inst->opcode() == SpvOpTypeStruct) {
- namer_.ResolveMemberNamesForStruct(type_inst->result_id(),
- type_inst->NumInOperands());
+ // Fill in struct member names, and disambiguate them.
+ for (const auto* type_inst : module_->GetTypes()) {
+ if (type_inst->opcode() == SpvOpTypeStruct) {
+ namer_.ResolveMemberNamesForStruct(type_inst->result_id(), type_inst->NumInOperands());
+ }
}
- }
- return true;
+ return true;
}
bool ParserImpl::IsValidIdentifier(const std::string& str) {
- if (str.empty()) {
- return false;
- }
- std::locale c_locale("C");
- if (str[0] == '_') {
- if (str.length() == 1u || str[1] == '_') {
- // https://www.w3.org/TR/WGSL/#identifiers
- // must not be '_' (a single underscore)
- // must not start with two underscores
- return false;
- }
- } else if (!std::isalpha(str[0], c_locale)) {
- return false;
- }
- for (const char& ch : str) {
- if ((ch != '_') && !std::isalnum(ch, c_locale)) {
- return false;
- }
- }
- return true;
-}
-
-bool ParserImpl::RegisterWorkgroupSizeBuiltin() {
- WorkgroupSizeInfo& info = workgroup_size_builtin_;
- for (const spvtools::opt::Instruction& inst : module_->annotations()) {
- if (inst.opcode() != SpvOpDecorate) {
- continue;
+ if (str.empty()) {
+ return false;
}
- if (inst.GetSingleWordInOperand(1) != SpvDecorationBuiltIn) {
- continue;
+ std::locale c_locale("C");
+ if (str[0] == '_') {
+ if (str.length() == 1u || str[1] == '_') {
+ // https://www.w3.org/TR/WGSL/#identifiers
+ // must not be '_' (a single underscore)
+ // must not start with two underscores
+ return false;
+ }
+ } else if (!std::isalpha(str[0], c_locale)) {
+ return false;
}
- if (inst.GetSingleWordInOperand(2) != SpvBuiltInWorkgroupSize) {
- continue;
+ for (const char& ch : str) {
+ if ((ch != '_') && !std::isalnum(ch, c_locale)) {
+ return false;
+ }
}
- info.id = inst.GetSingleWordInOperand(0);
- }
- if (info.id == 0) {
- return true;
- }
- // Gather the values.
- const spvtools::opt::Instruction* composite_def =
- def_use_mgr_->GetDef(info.id);
- if (!composite_def) {
- return Fail() << "Invalid WorkgroupSize builtin value";
- }
- // SPIR-V validation checks that the result is a 3-element vector of 32-bit
- // integer scalars (signed or unsigned). Rely on validation to check the
- // type. In theory the instruction could be OpConstantNull and still
- // pass validation, but that would be non-sensical. Be a little more
- // stringent here and check for specific opcodes. WGSL does not support
- // const-expr yet, so avoid supporting OpSpecConstantOp here.
- // TODO(dneto): See https://github.com/gpuweb/gpuweb/issues/1272 for WGSL
- // const_expr proposals.
- if ((composite_def->opcode() != SpvOpSpecConstantComposite &&
- composite_def->opcode() != SpvOpConstantComposite)) {
- return Fail() << "Invalid WorkgroupSize builtin. Expected 3-element "
- "OpSpecConstantComposite or OpConstantComposite: "
- << composite_def->PrettyPrint();
- }
- info.type_id = composite_def->type_id();
- // Extract the component type from the vector type.
- info.component_type_id =
- def_use_mgr_->GetDef(info.type_id)->GetSingleWordInOperand(0);
-
- /// Sets the ID and value of the index'th member of the composite constant.
- /// Returns false and emits a diagnostic on error.
- auto set_param = [this, composite_def](uint32_t* id_ptr, uint32_t* value_ptr,
- int index) -> bool {
- const auto id = composite_def->GetSingleWordInOperand(index);
- const auto* def = def_use_mgr_->GetDef(id);
- if (!def ||
- (def->opcode() != SpvOpSpecConstant &&
- def->opcode() != SpvOpConstant) ||
- (def->NumInOperands() != 1)) {
- return Fail() << "invalid component " << index << " of workgroupsize "
- << (def ? def->PrettyPrint()
- : std::string("no definition"));
- }
- *id_ptr = id;
- // Use the default value of a spec constant.
- *value_ptr = def->GetSingleWordInOperand(0);
return true;
- };
+}
+
+bool ParserImpl::RegisterWorkgroupSizeBuiltin() {
+ WorkgroupSizeInfo& info = workgroup_size_builtin_;
+ for (const spvtools::opt::Instruction& inst : module_->annotations()) {
+ if (inst.opcode() != SpvOpDecorate) {
+ continue;
+ }
+ if (inst.GetSingleWordInOperand(1) != SpvDecorationBuiltIn) {
+ continue;
+ }
+ if (inst.GetSingleWordInOperand(2) != SpvBuiltInWorkgroupSize) {
+ continue;
+ }
+ info.id = inst.GetSingleWordInOperand(0);
+ }
+ if (info.id == 0) {
+ return true;
+ }
+ // Gather the values.
+ const spvtools::opt::Instruction* composite_def = def_use_mgr_->GetDef(info.id);
+ if (!composite_def) {
+ return Fail() << "Invalid WorkgroupSize builtin value";
+ }
+ // SPIR-V validation checks that the result is a 3-element vector of 32-bit
+ // integer scalars (signed or unsigned). Rely on validation to check the
+ // type. In theory the instruction could be OpConstantNull and still
+ // pass validation, but that would be non-sensical. Be a little more
+ // stringent here and check for specific opcodes. WGSL does not support
+ // const-expr yet, so avoid supporting OpSpecConstantOp here.
+ // TODO(dneto): See https://github.com/gpuweb/gpuweb/issues/1272 for WGSL
+ // const_expr proposals.
+ if ((composite_def->opcode() != SpvOpSpecConstantComposite &&
+ composite_def->opcode() != SpvOpConstantComposite)) {
+ return Fail() << "Invalid WorkgroupSize builtin. Expected 3-element "
+ "OpSpecConstantComposite or OpConstantComposite: "
+ << composite_def->PrettyPrint();
+ }
+ info.type_id = composite_def->type_id();
+ // Extract the component type from the vector type.
+ info.component_type_id = def_use_mgr_->GetDef(info.type_id)->GetSingleWordInOperand(0);
+
+ /// Sets the ID and value of the index'th member of the composite constant.
+ /// Returns false and emits a diagnostic on error.
+ auto set_param = [this, composite_def](uint32_t* id_ptr, uint32_t* value_ptr,
+ int index) -> bool {
+ const auto id = composite_def->GetSingleWordInOperand(index);
+ const auto* def = def_use_mgr_->GetDef(id);
+ if (!def || (def->opcode() != SpvOpSpecConstant && def->opcode() != SpvOpConstant) ||
+ (def->NumInOperands() != 1)) {
+ return Fail() << "invalid component " << index << " of workgroupsize "
+ << (def ? def->PrettyPrint() : std::string("no definition"));
+ }
+ *id_ptr = id;
+ // Use the default value of a spec constant.
+ *value_ptr = def->GetSingleWordInOperand(0);
+ return true;
+ };
- return set_param(&info.x_id, &info.x_value, 0) &&
- set_param(&info.y_id, &info.y_value, 1) &&
- set_param(&info.z_id, &info.z_value, 2);
+ return set_param(&info.x_id, &info.x_value, 0) && set_param(&info.y_id, &info.y_value, 1) &&
+ set_param(&info.z_id, &info.z_value, 2);
}
bool ParserImpl::RegisterEntryPoints() {
- // Mapping from entry point ID to GridSize computed from LocalSize
- // decorations.
- std::unordered_map<uint32_t, GridSize> local_size;
- for (const spvtools::opt::Instruction& inst : module_->execution_modes()) {
- auto mode = static_cast<SpvExecutionMode>(inst.GetSingleWordInOperand(1));
- if (mode == SpvExecutionModeLocalSize) {
- if (inst.NumInOperands() != 5) {
- // This won't even get past SPIR-V binary parsing.
- return Fail() << "invalid LocalSize execution mode: "
- << inst.PrettyPrint();
- }
- uint32_t function_id = inst.GetSingleWordInOperand(0);
- local_size[function_id] = GridSize{inst.GetSingleWordInOperand(2),
- inst.GetSingleWordInOperand(3),
- inst.GetSingleWordInOperand(4)};
- }
- }
-
- for (const spvtools::opt::Instruction& entry_point :
- module_->entry_points()) {
- const auto stage = SpvExecutionModel(entry_point.GetSingleWordInOperand(0));
- const uint32_t function_id = entry_point.GetSingleWordInOperand(1);
-
- const std::string ep_name = entry_point.GetOperand(2).AsString();
- if (!IsValidIdentifier(ep_name)) {
- return Fail() << "entry point name is not a valid WGSL identifier: "
- << ep_name;
- }
-
- bool owns_inner_implementation = false;
- std::string inner_implementation_name;
-
- auto where = function_to_ep_info_.find(function_id);
- if (where == function_to_ep_info_.end()) {
- // If this is the first entry point to have function_id as its
- // implementation, then this entry point is responsible for generating
- // the inner implementation.
- owns_inner_implementation = true;
- inner_implementation_name = namer_.MakeDerivedName(ep_name);
- } else {
- // Reuse the inner implementation owned by the first entry point.
- inner_implementation_name = where->second[0].inner_name;
- }
- TINT_ASSERT(Reader, !inner_implementation_name.empty());
- TINT_ASSERT(Reader, ep_name != inner_implementation_name);
-
- utils::UniqueVector<uint32_t> inputs;
- utils::UniqueVector<uint32_t> outputs;
- for (unsigned iarg = 3; iarg < entry_point.NumInOperands(); iarg++) {
- const uint32_t var_id = entry_point.GetSingleWordInOperand(iarg);
- if (const auto* var_inst = def_use_mgr_->GetDef(var_id)) {
- switch (SpvStorageClass(var_inst->GetSingleWordInOperand(0))) {
- case SpvStorageClassInput:
- inputs.add(var_id);
- break;
- case SpvStorageClassOutput:
- outputs.add(var_id);
- break;
- default:
- break;
+ // Mapping from entry point ID to GridSize computed from LocalSize
+ // decorations.
+ std::unordered_map<uint32_t, GridSize> local_size;
+ for (const spvtools::opt::Instruction& inst : module_->execution_modes()) {
+ auto mode = static_cast<SpvExecutionMode>(inst.GetSingleWordInOperand(1));
+ if (mode == SpvExecutionModeLocalSize) {
+ if (inst.NumInOperands() != 5) {
+ // This won't even get past SPIR-V binary parsing.
+ return Fail() << "invalid LocalSize execution mode: " << inst.PrettyPrint();
+ }
+ uint32_t function_id = inst.GetSingleWordInOperand(0);
+ local_size[function_id] =
+ GridSize{inst.GetSingleWordInOperand(2), inst.GetSingleWordInOperand(3),
+ inst.GetSingleWordInOperand(4)};
}
- }
- }
- // Save the lists, in ID-sorted order.
- std::vector<uint32_t> sorted_inputs(inputs);
- std::sort(sorted_inputs.begin(), sorted_inputs.end());
- std::vector<uint32_t> sorted_outputs(outputs);
- std::sort(sorted_outputs.begin(), sorted_outputs.end());
-
- const auto ast_stage = enum_converter_.ToPipelineStage(stage);
- GridSize wgsize;
- if (ast_stage == ast::PipelineStage::kCompute) {
- if (workgroup_size_builtin_.id) {
- // Store the default values.
- // WGSL allows specializing these, but this code doesn't support that
- // yet. https://github.com/gpuweb/gpuweb/issues/1442
- wgsize = GridSize{workgroup_size_builtin_.x_value,
- workgroup_size_builtin_.y_value,
- workgroup_size_builtin_.z_value};
- } else {
- // Use the LocalSize execution mode. This is the second choice.
- auto where_local_size = local_size.find(function_id);
- if (where_local_size != local_size.end()) {
- wgsize = where_local_size->second;
- }
- }
- }
- function_to_ep_info_[function_id].emplace_back(
- ep_name, ast_stage, owns_inner_implementation,
- inner_implementation_name, std::move(sorted_inputs),
- std::move(sorted_outputs), wgsize);
- }
-
- // The enum conversion could have failed, so return the existing status value.
- return success_;
-}
-
-const Type* ParserImpl::ConvertType(
- const spvtools::opt::analysis::Integer* int_ty) {
- if (int_ty->width() == 32) {
- return int_ty->IsSigned() ? static_cast<const Type*>(ty_.I32())
- : static_cast<const Type*>(ty_.U32());
- }
- Fail() << "unhandled integer width: " << int_ty->width();
- return nullptr;
-}
-
-const Type* ParserImpl::ConvertType(
- const spvtools::opt::analysis::Float* float_ty) {
- if (float_ty->width() == 32) {
- return ty_.F32();
- }
- Fail() << "unhandled float width: " << float_ty->width();
- return nullptr;
-}
-
-const Type* ParserImpl::ConvertType(
- const spvtools::opt::analysis::Vector* vec_ty) {
- const auto num_elem = vec_ty->element_count();
- auto* ast_elem_ty = ConvertType(type_mgr_->GetId(vec_ty->element_type()));
- if (ast_elem_ty == nullptr) {
- return ast_elem_ty;
- }
- return ty_.Vector(ast_elem_ty, num_elem);
-}
-
-const Type* ParserImpl::ConvertType(
- const spvtools::opt::analysis::Matrix* mat_ty) {
- const auto* vec_ty = mat_ty->element_type()->AsVector();
- const auto* scalar_ty = vec_ty->element_type();
- const auto num_rows = vec_ty->element_count();
- const auto num_columns = mat_ty->element_count();
- auto* ast_scalar_ty = ConvertType(type_mgr_->GetId(scalar_ty));
- if (ast_scalar_ty == nullptr) {
- return nullptr;
- }
- return ty_.Matrix(ast_scalar_ty, num_columns, num_rows);
+ }
+
+ for (const spvtools::opt::Instruction& entry_point : module_->entry_points()) {
+ const auto stage = SpvExecutionModel(entry_point.GetSingleWordInOperand(0));
+ const uint32_t function_id = entry_point.GetSingleWordInOperand(1);
+
+ const std::string ep_name = entry_point.GetOperand(2).AsString();
+ if (!IsValidIdentifier(ep_name)) {
+ return Fail() << "entry point name is not a valid WGSL identifier: " << ep_name;
+ }
+
+ bool owns_inner_implementation = false;
+ std::string inner_implementation_name;
+
+ auto where = function_to_ep_info_.find(function_id);
+ if (where == function_to_ep_info_.end()) {
+ // If this is the first entry point to have function_id as its
+ // implementation, then this entry point is responsible for generating
+ // the inner implementation.
+ owns_inner_implementation = true;
+ inner_implementation_name = namer_.MakeDerivedName(ep_name);
+ } else {
+ // Reuse the inner implementation owned by the first entry point.
+ inner_implementation_name = where->second[0].inner_name;
+ }
+ TINT_ASSERT(Reader, !inner_implementation_name.empty());
+ TINT_ASSERT(Reader, ep_name != inner_implementation_name);
+
+ utils::UniqueVector<uint32_t> inputs;
+ utils::UniqueVector<uint32_t> outputs;
+ for (unsigned iarg = 3; iarg < entry_point.NumInOperands(); iarg++) {
+ const uint32_t var_id = entry_point.GetSingleWordInOperand(iarg);
+ if (const auto* var_inst = def_use_mgr_->GetDef(var_id)) {
+ switch (SpvStorageClass(var_inst->GetSingleWordInOperand(0))) {
+ case SpvStorageClassInput:
+ inputs.add(var_id);
+ break;
+ case SpvStorageClassOutput:
+ outputs.add(var_id);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ // Save the lists, in ID-sorted order.
+ std::vector<uint32_t> sorted_inputs(inputs);
+ std::sort(sorted_inputs.begin(), sorted_inputs.end());
+ std::vector<uint32_t> sorted_outputs(outputs);
+ std::sort(sorted_outputs.begin(), sorted_outputs.end());
+
+ const auto ast_stage = enum_converter_.ToPipelineStage(stage);
+ GridSize wgsize;
+ if (ast_stage == ast::PipelineStage::kCompute) {
+ if (workgroup_size_builtin_.id) {
+ // Store the default values.
+ // WGSL allows specializing these, but this code doesn't support that
+ // yet. https://github.com/gpuweb/gpuweb/issues/1442
+ wgsize = GridSize{workgroup_size_builtin_.x_value, workgroup_size_builtin_.y_value,
+ workgroup_size_builtin_.z_value};
+ } else {
+ // Use the LocalSize execution mode. This is the second choice.
+ auto where_local_size = local_size.find(function_id);
+ if (where_local_size != local_size.end()) {
+ wgsize = where_local_size->second;
+ }
+ }
+ }
+ function_to_ep_info_[function_id].emplace_back(
+ ep_name, ast_stage, owns_inner_implementation, inner_implementation_name,
+ std::move(sorted_inputs), std::move(sorted_outputs), wgsize);
+ }
+
+ // The enum conversion could have failed, so return the existing status value.
+ return success_;
}
-const Type* ParserImpl::ConvertType(
- uint32_t type_id,
- const spvtools::opt::analysis::RuntimeArray* rtarr_ty) {
- auto* ast_elem_ty = ConvertType(type_mgr_->GetId(rtarr_ty->element_type()));
- if (ast_elem_ty == nullptr) {
- return nullptr;
- }
- uint32_t array_stride = 0;
- if (!ParseArrayDecorations(rtarr_ty, &array_stride)) {
- return nullptr;
- }
- const Type* result = ty_.Array(ast_elem_ty, 0, array_stride);
- return MaybeGenerateAlias(type_id, rtarr_ty, result);
-}
-
-const Type* ParserImpl::ConvertType(
- uint32_t type_id,
- const spvtools::opt::analysis::Array* arr_ty) {
- // Get the element type. The SPIR-V optimizer's types representation
- // deduplicates array types that have the same parameterization.
- // We don't want that deduplication, so get the element type from
- // the SPIR-V type directly.
- const auto* inst = def_use_mgr_->GetDef(type_id);
- const auto elem_type_id = inst->GetSingleWordInOperand(0);
- auto* ast_elem_ty = ConvertType(elem_type_id);
- if (ast_elem_ty == nullptr) {
- return nullptr;
- }
- // Get the length.
- const auto& length_info = arr_ty->length_info();
- if (length_info.words.empty()) {
- // The internal representation is invalid. The discriminant vector
- // is mal-formed.
- Fail() << "internal error: Array length info is invalid";
- return nullptr;
- }
- if (length_info.words[0] !=
- spvtools::opt::analysis::Array::LengthInfo::kConstant) {
- Fail() << "Array type " << type_mgr_->GetId(arr_ty)
- << " length is a specialization constant";
- return nullptr;
- }
- const auto* constant = constant_mgr_->FindDeclaredConstant(length_info.id);
- if (constant == nullptr) {
- Fail() << "Array type " << type_mgr_->GetId(arr_ty) << " length ID "
- << length_info.id << " does not name an OpConstant";
- return nullptr;
- }
- const uint64_t num_elem = constant->GetZeroExtendedValue();
- // For now, limit to only 32bits.
- if (num_elem > std::numeric_limits<uint32_t>::max()) {
- Fail() << "Array type " << type_mgr_->GetId(arr_ty)
- << " has too many elements (more than can fit in 32 bits): "
- << num_elem;
- return nullptr;
- }
- uint32_t array_stride = 0;
- if (!ParseArrayDecorations(arr_ty, &array_stride)) {
- return nullptr;
- }
- if (remap_buffer_block_type_.count(elem_type_id)) {
- remap_buffer_block_type_.insert(type_mgr_->GetId(arr_ty));
- }
- const Type* result =
- ty_.Array(ast_elem_ty, static_cast<uint32_t>(num_elem), array_stride);
- return MaybeGenerateAlias(type_id, arr_ty, result);
-}
-
-bool ParserImpl::ParseArrayDecorations(
- const spvtools::opt::analysis::Type* spv_type,
- uint32_t* array_stride) {
- *array_stride = 0; // Implicit stride case.
- const auto type_id = type_mgr_->GetId(spv_type);
- for (auto& decoration : this->GetDecorationsFor(type_id)) {
- if (decoration.size() == 2 && decoration[0] == SpvDecorationArrayStride) {
- const auto stride = decoration[1];
- if (stride == 0) {
- return Fail() << "invalid array type ID " << type_id
- << ": ArrayStride can't be 0";
- }
- *array_stride = stride;
- } else {
- return Fail() << "invalid array type ID " << type_id
- << ": unknown decoration "
- << (decoration.empty() ? "(empty)"
- : std::to_string(decoration[0]))
- << " with " << decoration.size() << " total words";
- }
- }
- return true;
-}
-
-const Type* ParserImpl::ConvertType(
- uint32_t type_id,
- const spvtools::opt::analysis::Struct* struct_ty) {
- // Compute the struct decoration.
- auto struct_decorations = this->GetDecorationsFor(type_id);
- if (struct_decorations.size() == 1) {
- const auto decoration = struct_decorations[0][0];
- if (decoration == SpvDecorationBufferBlock) {
- remap_buffer_block_type_.insert(type_id);
- } else if (decoration != SpvDecorationBlock) {
- Fail() << "struct with ID " << type_id
- << " has unrecognized decoration: " << int(decoration);
- }
- } else if (struct_decorations.size() > 1) {
- Fail() << "can't handle a struct with more than one decoration: struct "
- << type_id << " has " << struct_decorations.size();
+const Type* ParserImpl::ConvertType(const spvtools::opt::analysis::Integer* int_ty) {
+ if (int_ty->width() == 32) {
+ return int_ty->IsSigned() ? static_cast<const Type*>(ty_.I32())
+ : static_cast<const Type*>(ty_.U32());
+ }
+ Fail() << "unhandled integer width: " << int_ty->width();
return nullptr;
- }
+}
- // Compute members
- ast::StructMemberList ast_members;
- const auto members = struct_ty->element_types();
- if (members.empty()) {
- Fail() << "WGSL does not support empty structures. can't convert type: "
- << def_use_mgr_->GetDef(type_id)->PrettyPrint();
+const Type* ParserImpl::ConvertType(const spvtools::opt::analysis::Float* float_ty) {
+ if (float_ty->width() == 32) {
+ return ty_.F32();
+ }
+ Fail() << "unhandled float width: " << float_ty->width();
return nullptr;
- }
- TypeList ast_member_types;
- unsigned num_non_writable_members = 0;
- for (uint32_t member_index = 0; member_index < members.size();
- ++member_index) {
- const auto member_type_id = type_mgr_->GetId(members[member_index]);
- auto* ast_member_ty = ConvertType(member_type_id);
- if (ast_member_ty == nullptr) {
- // Already emitted diagnostics.
- return nullptr;
- }
-
- ast_member_types.emplace_back(ast_member_ty);
-
- // Scan member for built-in decorations. Some vertex built-ins are handled
- // specially, and should not generate a structure member.
- bool create_ast_member = true;
- for (auto& decoration : GetDecorationsForMember(type_id, member_index)) {
- if (decoration.empty()) {
- Fail() << "malformed SPIR-V decoration: it's empty";
+}
+
+const Type* ParserImpl::ConvertType(const spvtools::opt::analysis::Vector* vec_ty) {
+ const auto num_elem = vec_ty->element_count();
+ auto* ast_elem_ty = ConvertType(type_mgr_->GetId(vec_ty->element_type()));
+ if (ast_elem_ty == nullptr) {
+ return ast_elem_ty;
+ }
+ return ty_.Vector(ast_elem_ty, num_elem);
+}
+
+const Type* ParserImpl::ConvertType(const spvtools::opt::analysis::Matrix* mat_ty) {
+ const auto* vec_ty = mat_ty->element_type()->AsVector();
+ const auto* scalar_ty = vec_ty->element_type();
+ const auto num_rows = vec_ty->element_count();
+ const auto num_columns = mat_ty->element_count();
+ auto* ast_scalar_ty = ConvertType(type_mgr_->GetId(scalar_ty));
+ if (ast_scalar_ty == nullptr) {
return nullptr;
- }
- if ((decoration[0] == SpvDecorationBuiltIn) && (decoration.size() > 1)) {
- switch (decoration[1]) {
- case SpvBuiltInPosition:
- // Record this built-in variable specially.
- builtin_position_.struct_type_id = type_id;
- builtin_position_.position_member_index = member_index;
- builtin_position_.position_member_type_id = member_type_id;
- create_ast_member = false; // Not part of the WGSL structure.
- break;
- case SpvBuiltInPointSize: // not supported in WGSL, but ignore
- builtin_position_.pointsize_member_index = member_index;
- create_ast_member = false; // Not part of the WGSL structure.
- break;
- case SpvBuiltInClipDistance: // not supported in WGSL
- case SpvBuiltInCullDistance: // not supported in WGSL
- create_ast_member = false; // Not part of the WGSL structure.
- break;
- default:
- Fail() << "unrecognized builtin " << decoration[1];
+ }
+ return ty_.Matrix(ast_scalar_ty, num_columns, num_rows);
+}
+
+const Type* ParserImpl::ConvertType(uint32_t type_id,
+ const spvtools::opt::analysis::RuntimeArray* rtarr_ty) {
+ auto* ast_elem_ty = ConvertType(type_mgr_->GetId(rtarr_ty->element_type()));
+ if (ast_elem_ty == nullptr) {
+ return nullptr;
+ }
+ uint32_t array_stride = 0;
+ if (!ParseArrayDecorations(rtarr_ty, &array_stride)) {
+ return nullptr;
+ }
+ const Type* result = ty_.Array(ast_elem_ty, 0, array_stride);
+ return MaybeGenerateAlias(type_id, rtarr_ty, result);
+}
+
+const Type* ParserImpl::ConvertType(uint32_t type_id,
+ const spvtools::opt::analysis::Array* arr_ty) {
+ // Get the element type. The SPIR-V optimizer's types representation
+ // deduplicates array types that have the same parameterization.
+ // We don't want that deduplication, so get the element type from
+ // the SPIR-V type directly.
+ const auto* inst = def_use_mgr_->GetDef(type_id);
+ const auto elem_type_id = inst->GetSingleWordInOperand(0);
+ auto* ast_elem_ty = ConvertType(elem_type_id);
+ if (ast_elem_ty == nullptr) {
+ return nullptr;
+ }
+ // Get the length.
+ const auto& length_info = arr_ty->length_info();
+ if (length_info.words.empty()) {
+ // The internal representation is invalid. The discriminant vector
+ // is mal-formed.
+ Fail() << "internal error: Array length info is invalid";
+ return nullptr;
+ }
+ if (length_info.words[0] != spvtools::opt::analysis::Array::LengthInfo::kConstant) {
+ Fail() << "Array type " << type_mgr_->GetId(arr_ty)
+ << " length is a specialization constant";
+ return nullptr;
+ }
+ const auto* constant = constant_mgr_->FindDeclaredConstant(length_info.id);
+ if (constant == nullptr) {
+ Fail() << "Array type " << type_mgr_->GetId(arr_ty) << " length ID " << length_info.id
+ << " does not name an OpConstant";
+ return nullptr;
+ }
+ const uint64_t num_elem = constant->GetZeroExtendedValue();
+ // For now, limit to only 32bits.
+ if (num_elem > std::numeric_limits<uint32_t>::max()) {
+ Fail() << "Array type " << type_mgr_->GetId(arr_ty)
+ << " has too many elements (more than can fit in 32 bits): " << num_elem;
+ return nullptr;
+ }
+ uint32_t array_stride = 0;
+ if (!ParseArrayDecorations(arr_ty, &array_stride)) {
+ return nullptr;
+ }
+ if (remap_buffer_block_type_.count(elem_type_id)) {
+ remap_buffer_block_type_.insert(type_mgr_->GetId(arr_ty));
+ }
+ const Type* result = ty_.Array(ast_elem_ty, static_cast<uint32_t>(num_elem), array_stride);
+ return MaybeGenerateAlias(type_id, arr_ty, result);
+}
+
+bool ParserImpl::ParseArrayDecorations(const spvtools::opt::analysis::Type* spv_type,
+ uint32_t* array_stride) {
+ *array_stride = 0; // Implicit stride case.
+ const auto type_id = type_mgr_->GetId(spv_type);
+ for (auto& decoration : this->GetDecorationsFor(type_id)) {
+ if (decoration.size() == 2 && decoration[0] == SpvDecorationArrayStride) {
+ const auto stride = decoration[1];
+ if (stride == 0) {
+ return Fail() << "invalid array type ID " << type_id << ": ArrayStride can't be 0";
+ }
+ *array_stride = stride;
+ } else {
+ return Fail() << "invalid array type ID " << type_id << ": unknown decoration "
+ << (decoration.empty() ? "(empty)" : std::to_string(decoration[0]))
+ << " with " << decoration.size() << " total words";
+ }
+ }
+ return true;
+}
+
+const Type* ParserImpl::ConvertType(uint32_t type_id,
+ const spvtools::opt::analysis::Struct* struct_ty) {
+ // Compute the struct decoration.
+ auto struct_decorations = this->GetDecorationsFor(type_id);
+ if (struct_decorations.size() == 1) {
+ const auto decoration = struct_decorations[0][0];
+ if (decoration == SpvDecorationBufferBlock) {
+ remap_buffer_block_type_.insert(type_id);
+ } else if (decoration != SpvDecorationBlock) {
+ Fail() << "struct with ID " << type_id
+ << " has unrecognized decoration: " << int(decoration);
+ }
+ } else if (struct_decorations.size() > 1) {
+ Fail() << "can't handle a struct with more than one decoration: struct " << type_id
+ << " has " << struct_decorations.size();
+ return nullptr;
+ }
+
+ // Compute members
+ ast::StructMemberList ast_members;
+ const auto members = struct_ty->element_types();
+ if (members.empty()) {
+ Fail() << "WGSL does not support empty structures. can't convert type: "
+ << def_use_mgr_->GetDef(type_id)->PrettyPrint();
+ return nullptr;
+ }
+ TypeList ast_member_types;
+ unsigned num_non_writable_members = 0;
+ for (uint32_t member_index = 0; member_index < members.size(); ++member_index) {
+ const auto member_type_id = type_mgr_->GetId(members[member_index]);
+ auto* ast_member_ty = ConvertType(member_type_id);
+ if (ast_member_ty == nullptr) {
+ // Already emitted diagnostics.
return nullptr;
}
- }
- }
- if (!create_ast_member) {
- // This member is decorated as a built-in, and is handled specially.
- continue;
- }
-
- bool is_non_writable = false;
- ast::AttributeList ast_member_decorations;
- for (auto& decoration : GetDecorationsForMember(type_id, member_index)) {
- if (IsPipelineDecoration(decoration)) {
- // IO decorations are handled when emitting the entry point.
- continue;
- } else if (decoration[0] == SpvDecorationNonWritable) {
- // WGSL doesn't represent individual members as non-writable. Instead,
- // apply the ReadOnly access control to the containing struct if all
- // the members are non-writable.
- is_non_writable = true;
- } else {
- auto decos = ConvertMemberDecoration(type_id, member_index,
- ast_member_ty, decoration);
- for (auto* deco : decos) {
- ast_member_decorations.emplace_back(deco);
+
+ ast_member_types.emplace_back(ast_member_ty);
+
+ // Scan member for built-in decorations. Some vertex built-ins are handled
+ // specially, and should not generate a structure member.
+ bool create_ast_member = true;
+ for (auto& decoration : GetDecorationsForMember(type_id, member_index)) {
+ if (decoration.empty()) {
+ Fail() << "malformed SPIR-V decoration: it's empty";
+ return nullptr;
+ }
+ if ((decoration[0] == SpvDecorationBuiltIn) && (decoration.size() > 1)) {
+ switch (decoration[1]) {
+ case SpvBuiltInPosition:
+ // Record this built-in variable specially.
+ builtin_position_.struct_type_id = type_id;
+ builtin_position_.position_member_index = member_index;
+ builtin_position_.position_member_type_id = member_type_id;
+ create_ast_member = false; // Not part of the WGSL structure.
+ break;
+ case SpvBuiltInPointSize: // not supported in WGSL, but ignore
+ builtin_position_.pointsize_member_index = member_index;
+ create_ast_member = false; // Not part of the WGSL structure.
+ break;
+ case SpvBuiltInClipDistance: // not supported in WGSL
+ case SpvBuiltInCullDistance: // not supported in WGSL
+ create_ast_member = false; // Not part of the WGSL structure.
+ break;
+ default:
+ Fail() << "unrecognized builtin " << decoration[1];
+ return nullptr;
+ }
+ }
}
- if (!success_) {
- return nullptr;
+ if (!create_ast_member) {
+ // This member is decorated as a built-in, and is handled specially.
+ continue;
+ }
+
+ bool is_non_writable = false;
+ ast::AttributeList ast_member_decorations;
+ for (auto& decoration : GetDecorationsForMember(type_id, member_index)) {
+ if (IsPipelineDecoration(decoration)) {
+ // IO decorations are handled when emitting the entry point.
+ continue;
+ } else if (decoration[0] == SpvDecorationNonWritable) {
+ // WGSL doesn't represent individual members as non-writable. Instead,
+ // apply the ReadOnly access control to the containing struct if all
+ // the members are non-writable.
+ is_non_writable = true;
+ } else {
+ auto decos =
+ ConvertMemberDecoration(type_id, member_index, ast_member_ty, decoration);
+ for (auto* deco : decos) {
+ ast_member_decorations.emplace_back(deco);
+ }
+ if (!success_) {
+ return nullptr;
+ }
+ }
}
- }
- }
- if (is_non_writable) {
- // Count a member as non-writable only once, no matter how many
- // NonWritable decorations are applied to it.
- ++num_non_writable_members;
+ if (is_non_writable) {
+ // Count a member as non-writable only once, no matter how many
+ // NonWritable decorations are applied to it.
+ ++num_non_writable_members;
+ }
+ const auto member_name = namer_.GetMemberName(type_id, member_index);
+ auto* ast_struct_member = create<ast::StructMember>(
+ Source{}, builder_.Symbols().Register(member_name), ast_member_ty->Build(builder_),
+ std::move(ast_member_decorations));
+ ast_members.push_back(ast_struct_member);
}
- const auto member_name = namer_.GetMemberName(type_id, member_index);
- auto* ast_struct_member = create<ast::StructMember>(
- Source{}, builder_.Symbols().Register(member_name),
- ast_member_ty->Build(builder_), std::move(ast_member_decorations));
- ast_members.push_back(ast_struct_member);
- }
- if (ast_members.empty()) {
- // All members were likely built-ins. Don't generate an empty AST structure.
- return nullptr;
- }
+ if (ast_members.empty()) {
+ // All members were likely built-ins. Don't generate an empty AST structure.
+ return nullptr;
+ }
- namer_.SuggestSanitizedName(type_id, "S");
+ namer_.SuggestSanitizedName(type_id, "S");
- auto name = namer_.GetName(type_id);
+ auto name = namer_.GetName(type_id);
- // Now make the struct.
- auto sym = builder_.Symbols().Register(name);
- auto* ast_struct = create<ast::Struct>(Source{}, sym, std::move(ast_members),
- ast::AttributeList());
- if (num_non_writable_members == members.size()) {
- read_only_struct_types_.insert(ast_struct->name);
- }
- AddTypeDecl(sym, ast_struct);
- const auto* result = ty_.Struct(sym, std::move(ast_member_types));
- struct_id_for_symbol_[sym] = type_id;
- return result;
+ // Now make the struct.
+ auto sym = builder_.Symbols().Register(name);
+ auto* ast_struct =
+ create<ast::Struct>(Source{}, sym, std::move(ast_members), ast::AttributeList());
+ if (num_non_writable_members == members.size()) {
+ read_only_struct_types_.insert(ast_struct->name);
+ }
+ AddTypeDecl(sym, ast_struct);
+ const auto* result = ty_.Struct(sym, std::move(ast_member_types));
+ struct_id_for_symbol_[sym] = type_id;
+ return result;
}
void ParserImpl::AddTypeDecl(Symbol name, const ast::TypeDecl* decl) {
- auto iter = declared_types_.insert(name);
- if (iter.second) {
- builder_.AST().AddTypeDecl(decl);
- }
+ auto iter = declared_types_.insert(name);
+ if (iter.second) {
+ builder_.AST().AddTypeDecl(decl);
+ }
}
const Type* ParserImpl::ConvertType(uint32_t type_id,
PtrAs ptr_as,
const spvtools::opt::analysis::Pointer*) {
- const auto* inst = def_use_mgr_->GetDef(type_id);
- const auto pointee_type_id = inst->GetSingleWordInOperand(1);
- const auto storage_class = SpvStorageClass(inst->GetSingleWordInOperand(0));
-
- if (pointee_type_id == builtin_position_.struct_type_id) {
- builtin_position_.pointer_type_id = type_id;
- // Pipeline IO builtins map to private variables.
- builtin_position_.storage_class = SpvStorageClassPrivate;
- return nullptr;
- }
- auto* ast_elem_ty = ConvertType(pointee_type_id, PtrAs::Ptr);
- if (ast_elem_ty == nullptr) {
- Fail() << "SPIR-V pointer type with ID " << type_id
- << " has invalid pointee type " << pointee_type_id;
- return nullptr;
- }
+ const auto* inst = def_use_mgr_->GetDef(type_id);
+ const auto pointee_type_id = inst->GetSingleWordInOperand(1);
+ const auto storage_class = SpvStorageClass(inst->GetSingleWordInOperand(0));
+
+ if (pointee_type_id == builtin_position_.struct_type_id) {
+ builtin_position_.pointer_type_id = type_id;
+ // Pipeline IO builtins map to private variables.
+ builtin_position_.storage_class = SpvStorageClassPrivate;
+ return nullptr;
+ }
+ auto* ast_elem_ty = ConvertType(pointee_type_id, PtrAs::Ptr);
+ if (ast_elem_ty == nullptr) {
+ Fail() << "SPIR-V pointer type with ID " << type_id << " has invalid pointee type "
+ << pointee_type_id;
+ return nullptr;
+ }
+
+ auto ast_storage_class = enum_converter_.ToStorageClass(storage_class);
+ if (ast_storage_class == ast::StorageClass::kInvalid) {
+ Fail() << "SPIR-V pointer type with ID " << type_id << " has invalid storage class "
+ << static_cast<uint32_t>(storage_class);
+ return nullptr;
+ }
+ if (ast_storage_class == ast::StorageClass::kUniform &&
+ remap_buffer_block_type_.count(pointee_type_id)) {
+ ast_storage_class = ast::StorageClass::kStorage;
+ remap_buffer_block_type_.insert(type_id);
+ }
- auto ast_storage_class = enum_converter_.ToStorageClass(storage_class);
- if (ast_storage_class == ast::StorageClass::kInvalid) {
- Fail() << "SPIR-V pointer type with ID " << type_id
- << " has invalid storage class "
- << static_cast<uint32_t>(storage_class);
+ // Pipeline input and output variables map to private variables.
+ if (ast_storage_class == ast::StorageClass::kInput ||
+ ast_storage_class == ast::StorageClass::kOutput) {
+ ast_storage_class = ast::StorageClass::kPrivate;
+ }
+ switch (ptr_as) {
+ case PtrAs::Ref:
+ return ty_.Reference(ast_elem_ty, ast_storage_class);
+ case PtrAs::Ptr:
+ return ty_.Pointer(ast_elem_ty, ast_storage_class);
+ }
+ Fail() << "invalid value for ptr_as: " << static_cast<int>(ptr_as);
return nullptr;
- }
- if (ast_storage_class == ast::StorageClass::kUniform &&
- remap_buffer_block_type_.count(pointee_type_id)) {
- ast_storage_class = ast::StorageClass::kStorage;
- remap_buffer_block_type_.insert(type_id);
- }
-
- // Pipeline input and output variables map to private variables.
- if (ast_storage_class == ast::StorageClass::kInput ||
- ast_storage_class == ast::StorageClass::kOutput) {
- ast_storage_class = ast::StorageClass::kPrivate;
- }
- switch (ptr_as) {
- case PtrAs::Ref:
- return ty_.Reference(ast_elem_ty, ast_storage_class);
- case PtrAs::Ptr:
- return ty_.Pointer(ast_elem_ty, ast_storage_class);
- }
- Fail() << "invalid value for ptr_as: " << static_cast<int>(ptr_as);
- return nullptr;
}
bool ParserImpl::RegisterTypes() {
- if (!success_) {
- return false;
- }
-
- // First record the structure types that should have a `block` decoration
- // in WGSL. In particular, exclude user-defined pipeline IO in a
- // block-decorated struct.
- for (const auto& type_or_value : module_->types_values()) {
- if (type_or_value.opcode() != SpvOpVariable) {
- continue;
- }
- const auto& var = type_or_value;
- const auto spirv_storage_class =
- SpvStorageClass(var.GetSingleWordInOperand(0));
- if ((spirv_storage_class != SpvStorageClassStorageBuffer) &&
- (spirv_storage_class != SpvStorageClassUniform)) {
- continue;
+ if (!success_) {
+ return false;
}
- const auto* ptr_type = def_use_mgr_->GetDef(var.type_id());
- if (ptr_type->opcode() != SpvOpTypePointer) {
- return Fail() << "OpVariable type expected to be a pointer: "
- << var.PrettyPrint();
- }
- const auto* store_type =
- def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
- if (store_type->opcode() == SpvOpTypeStruct) {
- struct_types_for_buffers_.insert(store_type->result_id());
- } else {
- Fail() << "WGSL does not support arrays of buffers: "
- << var.PrettyPrint();
- }
- }
-
- // Now convert each type.
- for (auto& type_or_const : module_->types_values()) {
- const auto* type = type_mgr_->GetType(type_or_const.result_id());
- if (type == nullptr) {
- continue;
- }
- ConvertType(type_or_const.result_id());
- }
- // Manufacture a type for the gl_Position variable if we have to.
- if ((builtin_position_.struct_type_id != 0) &&
- (builtin_position_.position_member_pointer_type_id == 0)) {
- builtin_position_.position_member_pointer_type_id =
- type_mgr_->FindPointerToType(builtin_position_.position_member_type_id,
- builtin_position_.storage_class);
- ConvertType(builtin_position_.position_member_pointer_type_id);
- }
- return success_;
+
+ // First record the structure types that should have a `block` decoration
+ // in WGSL. In particular, exclude user-defined pipeline IO in a
+ // block-decorated struct.
+ for (const auto& type_or_value : module_->types_values()) {
+ if (type_or_value.opcode() != SpvOpVariable) {
+ continue;
+ }
+ const auto& var = type_or_value;
+ const auto spirv_storage_class = SpvStorageClass(var.GetSingleWordInOperand(0));
+ if ((spirv_storage_class != SpvStorageClassStorageBuffer) &&
+ (spirv_storage_class != SpvStorageClassUniform)) {
+ continue;
+ }
+ const auto* ptr_type = def_use_mgr_->GetDef(var.type_id());
+ if (ptr_type->opcode() != SpvOpTypePointer) {
+ return Fail() << "OpVariable type expected to be a pointer: " << var.PrettyPrint();
+ }
+ const auto* store_type = def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
+ if (store_type->opcode() == SpvOpTypeStruct) {
+ struct_types_for_buffers_.insert(store_type->result_id());
+ } else {
+ Fail() << "WGSL does not support arrays of buffers: " << var.PrettyPrint();
+ }
+ }
+
+ // Now convert each type.
+ for (auto& type_or_const : module_->types_values()) {
+ const auto* type = type_mgr_->GetType(type_or_const.result_id());
+ if (type == nullptr) {
+ continue;
+ }
+ ConvertType(type_or_const.result_id());
+ }
+ // Manufacture a type for the gl_Position variable if we have to.
+ if ((builtin_position_.struct_type_id != 0) &&
+ (builtin_position_.position_member_pointer_type_id == 0)) {
+ builtin_position_.position_member_pointer_type_id = type_mgr_->FindPointerToType(
+ builtin_position_.position_member_type_id, builtin_position_.storage_class);
+ ConvertType(builtin_position_.position_member_pointer_type_id);
+ }
+ return success_;
}
bool ParserImpl::RejectInvalidPointerRoots() {
- if (!success_) {
- return false;
- }
- for (auto& inst : module_->types_values()) {
- if (const auto* result_type = type_mgr_->GetType(inst.type_id())) {
- if (result_type->AsPointer()) {
- switch (inst.opcode()) {
- case SpvOpVariable:
- // This is the only valid case.
- break;
- case SpvOpUndef:
- return Fail() << "undef pointer is not valid: "
- << inst.PrettyPrint();
- case SpvOpConstantNull:
- return Fail() << "null pointer is not valid: "
- << inst.PrettyPrint();
- default:
- return Fail() << "module-scope pointer is not valid: "
- << inst.PrettyPrint();
+ if (!success_) {
+ return false;
+ }
+ for (auto& inst : module_->types_values()) {
+ if (const auto* result_type = type_mgr_->GetType(inst.type_id())) {
+ if (result_type->AsPointer()) {
+ switch (inst.opcode()) {
+ case SpvOpVariable:
+ // This is the only valid case.
+ break;
+ case SpvOpUndef:
+ return Fail() << "undef pointer is not valid: " << inst.PrettyPrint();
+ case SpvOpConstantNull:
+ return Fail() << "null pointer is not valid: " << inst.PrettyPrint();
+ default:
+ return Fail()
+ << "module-scope pointer is not valid: " << inst.PrettyPrint();
+ }
+ }
}
- }
}
- }
- return success();
+ return success();
}
bool ParserImpl::EmitScalarSpecConstants() {
- if (!success_) {
- return false;
- }
- // Generate a module-scope const declaration for each instruction
- // that is OpSpecConstantTrue, OpSpecConstantFalse, or OpSpecConstant.
- for (auto& inst : module_->types_values()) {
- // These will be populated for a valid scalar spec constant.
- const Type* ast_type = nullptr;
- ast::LiteralExpression* ast_expr = nullptr;
-
- switch (inst.opcode()) {
- case SpvOpSpecConstantTrue:
- case SpvOpSpecConstantFalse: {
- ast_type = ConvertType(inst.type_id());
- ast_expr = create<ast::BoolLiteralExpression>(
- Source{}, inst.opcode() == SpvOpSpecConstantTrue);
- break;
- }
- case SpvOpSpecConstant: {
- ast_type = ConvertType(inst.type_id());
- const uint32_t literal_value = inst.GetSingleWordInOperand(0);
- if (ast_type->Is<I32>()) {
- ast_expr = create<ast::SintLiteralExpression>(
- Source{}, static_cast<int32_t>(literal_value));
- } else if (ast_type->Is<U32>()) {
- ast_expr = create<ast::UintLiteralExpression>(
- Source{}, static_cast<uint32_t>(literal_value));
- } else if (ast_type->Is<F32>()) {
- float float_value;
- // Copy the bits so we can read them as a float.
- std::memcpy(&float_value, &literal_value, sizeof(float_value));
- ast_expr = create<ast::FloatLiteralExpression>(Source{}, float_value);
- } else {
- return Fail() << " invalid result type for OpSpecConstant "
- << inst.PrettyPrint();
- }
- break;
- }
- default:
- break;
- }
- if (ast_type && ast_expr) {
- ast::AttributeList spec_id_decos;
- for (const auto& deco : GetDecorationsFor(inst.result_id())) {
- if ((deco.size() == 2) && (deco[0] == SpvDecorationSpecId)) {
- const uint32_t id = deco[1];
- if (id > 65535) {
- return Fail() << "SpecId too large. WGSL override IDs must be "
- "between 0 and 65535: ID %"
- << inst.result_id() << " has SpecId " << id;
- }
- auto* cid = create<ast::IdAttribute>(Source{}, id);
- spec_id_decos.push_back(cid);
- break;
- }
- }
- auto* ast_var =
- MakeVariable(inst.result_id(), ast::StorageClass::kNone, ast_type,
- true, true, ast_expr, std::move(spec_id_decos));
- if (ast_var) {
- builder_.AST().AddGlobalVariable(ast_var);
- scalar_spec_constants_.insert(inst.result_id());
- }
+ if (!success_) {
+ return false;
}
- }
- return success_;
+ // Generate a module-scope const declaration for each instruction
+ // that is OpSpecConstantTrue, OpSpecConstantFalse, or OpSpecConstant.
+ for (auto& inst : module_->types_values()) {
+ // These will be populated for a valid scalar spec constant.
+ const Type* ast_type = nullptr;
+ ast::LiteralExpression* ast_expr = nullptr;
+
+ switch (inst.opcode()) {
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse: {
+ ast_type = ConvertType(inst.type_id());
+ ast_expr = create<ast::BoolLiteralExpression>(
+ Source{}, inst.opcode() == SpvOpSpecConstantTrue);
+ break;
+ }
+ case SpvOpSpecConstant: {
+ ast_type = ConvertType(inst.type_id());
+ const uint32_t literal_value = inst.GetSingleWordInOperand(0);
+ ast_expr = Switch(
+ ast_type, //
+ [&](const I32*) {
+ return create<ast::IntLiteralExpression>(
+ Source{}, static_cast<int64_t>(literal_value),
+ ast::IntLiteralExpression::Suffix::kI);
+ },
+ [&](const U32*) {
+ return create<ast::IntLiteralExpression>(
+ Source{}, static_cast<uint64_t>(literal_value),
+ ast::IntLiteralExpression::Suffix::kU);
+ },
+ [&](const F32*) {
+ float float_value;
+ // Copy the bits so we can read them as a float.
+ std::memcpy(&float_value, &literal_value, sizeof(float_value));
+ return create<ast::FloatLiteralExpression>(
+ Source{}, static_cast<double>(float_value),
+ ast::FloatLiteralExpression::Suffix::kF);
+ });
+ if (ast_expr == nullptr) {
+ return Fail() << " invalid result type for OpSpecConstant "
+ << inst.PrettyPrint();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ if (ast_type && ast_expr) {
+ ast::AttributeList spec_id_decos;
+ for (const auto& deco : GetDecorationsFor(inst.result_id())) {
+ if ((deco.size() == 2) && (deco[0] == SpvDecorationSpecId)) {
+ const uint32_t id = deco[1];
+ if (id > 65535) {
+ return Fail() << "SpecId too large. WGSL override IDs must be "
+ "between 0 and 65535: ID %"
+ << inst.result_id() << " has SpecId " << id;
+ }
+ auto* cid = create<ast::IdAttribute>(Source{}, id);
+ spec_id_decos.push_back(cid);
+ break;
+ }
+ }
+ auto* ast_var = MakeVariable(inst.result_id(), ast::StorageClass::kNone, ast_type, true,
+ true, ast_expr, std::move(spec_id_decos));
+ if (ast_var) {
+ builder_.AST().AddGlobalVariable(ast_var);
+ scalar_spec_constants_.insert(inst.result_id());
+ }
+ }
+ }
+ return success_;
}
-const Type* ParserImpl::MaybeGenerateAlias(
- uint32_t type_id,
- const spvtools::opt::analysis::Type* type,
- const Type* ast_type) {
- if (!success_) {
- return nullptr;
- }
-
- // We only care about arrays, and runtime arrays.
- switch (type->kind()) {
- case spvtools::opt::analysis::Type::kRuntimeArray:
- // Runtime arrays are always decorated with ArrayStride so always get a
- // type alias.
- namer_.SuggestSanitizedName(type_id, "RTArr");
- break;
- case spvtools::opt::analysis::Type::kArray:
- // Only make a type aliase for arrays with decorations.
- if (GetDecorationsFor(type_id).empty()) {
- return ast_type;
- }
- namer_.SuggestSanitizedName(type_id, "Arr");
- break;
- default:
- // Ignore constants, and any other types.
- return ast_type;
- }
- auto* ast_underlying_type = ast_type;
- if (ast_underlying_type == nullptr) {
- Fail() << "internal error: no type registered for SPIR-V ID: " << type_id;
- return nullptr;
- }
- const auto name = namer_.GetName(type_id);
- const auto sym = builder_.Symbols().Register(name);
- auto* ast_alias_type =
- builder_.ty.alias(sym, ast_underlying_type->Build(builder_));
+const Type* ParserImpl::MaybeGenerateAlias(uint32_t type_id,
+ const spvtools::opt::analysis::Type* type,
+ const Type* ast_type) {
+ if (!success_) {
+ return nullptr;
+ }
+
+ // We only care about arrays, and runtime arrays.
+ switch (type->kind()) {
+ case spvtools::opt::analysis::Type::kRuntimeArray:
+ // Runtime arrays are always decorated with ArrayStride so always get a
+ // type alias.
+ namer_.SuggestSanitizedName(type_id, "RTArr");
+ break;
+ case spvtools::opt::analysis::Type::kArray:
+ // Only make a type aliase for arrays with decorations.
+ if (GetDecorationsFor(type_id).empty()) {
+ return ast_type;
+ }
+ namer_.SuggestSanitizedName(type_id, "Arr");
+ break;
+ default:
+ // Ignore constants, and any other types.
+ return ast_type;
+ }
+ auto* ast_underlying_type = ast_type;
+ if (ast_underlying_type == nullptr) {
+ Fail() << "internal error: no type registered for SPIR-V ID: " << type_id;
+ return nullptr;
+ }
+ const auto name = namer_.GetName(type_id);
+ const auto sym = builder_.Symbols().Register(name);
+ auto* ast_alias_type = builder_.ty.alias(sym, ast_underlying_type->Build(builder_));
- // Record this new alias as the AST type for this SPIR-V ID.
- AddTypeDecl(sym, ast_alias_type);
+ // Record this new alias as the AST type for this SPIR-V ID.
+ AddTypeDecl(sym, ast_alias_type);
- return ty_.Alias(sym, ast_underlying_type);
+ return ty_.Alias(sym, ast_underlying_type);
}
bool ParserImpl::EmitModuleScopeVariables() {
- if (!success_) {
- return false;
- }
- for (const auto& type_or_value : module_->types_values()) {
- if (type_or_value.opcode() != SpvOpVariable) {
- continue;
- }
- const auto& var = type_or_value;
- const auto spirv_storage_class =
- SpvStorageClass(var.GetSingleWordInOperand(0));
-
- uint32_t type_id = var.type_id();
- if ((type_id == builtin_position_.pointer_type_id) &&
- ((spirv_storage_class == SpvStorageClassInput) ||
- (spirv_storage_class == SpvStorageClassOutput))) {
- // Skip emitting gl_PerVertex.
- builtin_position_.per_vertex_var_id = var.result_id();
- builtin_position_.per_vertex_var_init_id =
- var.NumInOperands() > 1 ? var.GetSingleWordInOperand(1) : 0u;
- continue;
- }
- switch (enum_converter_.ToStorageClass(spirv_storage_class)) {
- case ast::StorageClass::kNone:
- case ast::StorageClass::kInput:
- case ast::StorageClass::kOutput:
- case ast::StorageClass::kUniform:
- case ast::StorageClass::kUniformConstant:
- case ast::StorageClass::kStorage:
- case ast::StorageClass::kWorkgroup:
- case ast::StorageClass::kPrivate:
- break;
- default:
- return Fail() << "invalid SPIR-V storage class "
- << int(spirv_storage_class)
- << " for module scope variable: " << var.PrettyPrint();
- }
if (!success_) {
- return false;
- }
- const Type* ast_type = nullptr;
- if (spirv_storage_class == SpvStorageClassUniformConstant) {
- // These are opaque handles: samplers or textures
- ast_type = GetTypeForHandleVar(var);
- if (!ast_type) {
return false;
- }
- } else {
- ast_type = ConvertType(type_id);
- if (ast_type == nullptr) {
- return Fail() << "internal error: failed to register Tint AST type for "
- "SPIR-V type with ID: "
- << var.type_id();
- }
- if (!ast_type->Is<Pointer>()) {
- return Fail() << "variable with ID " << var.result_id()
- << " has non-pointer type " << var.type_id();
- }
- }
-
- auto* ast_store_type = ast_type->As<Pointer>()->type;
- auto ast_storage_class = ast_type->As<Pointer>()->storage_class;
- const ast::Expression* ast_constructor = nullptr;
- if (var.NumInOperands() > 1) {
- // SPIR-V initializers are always constants.
- // (OpenCL also allows the ID of an OpVariable, but we don't handle that
- // here.)
- ast_constructor =
- MakeConstantExpression(var.GetSingleWordInOperand(1)).expr;
- }
- auto* ast_var =
- MakeVariable(var.result_id(), ast_storage_class, ast_store_type, false,
- false, ast_constructor, ast::AttributeList{});
- // TODO(dneto): initializers (a.k.a. constructor expression)
- if (ast_var) {
- builder_.AST().AddGlobalVariable(ast_var);
- }
- }
-
- // Emit gl_Position instead of gl_PerVertex
- if (builtin_position_.per_vertex_var_id) {
- // Make sure the variable has a name.
- namer_.SuggestSanitizedName(builtin_position_.per_vertex_var_id,
- "gl_Position");
- const ast::Expression* ast_constructor = nullptr;
- if (builtin_position_.per_vertex_var_init_id) {
- // The initializer is complex.
- const auto* init =
- def_use_mgr_->GetDef(builtin_position_.per_vertex_var_init_id);
- switch (init->opcode()) {
- case SpvOpConstantComposite:
- case SpvOpSpecConstantComposite:
- ast_constructor = MakeConstantExpression(
- init->GetSingleWordInOperand(
- builtin_position_.position_member_index))
- .expr;
- break;
- default:
- return Fail() << "gl_PerVertex initializer too complex. only "
- "OpCompositeConstruct and OpSpecConstantComposite "
- "are supported: "
- << init->PrettyPrint();
- }
}
- auto* ast_var = MakeVariable(
- builtin_position_.per_vertex_var_id,
- enum_converter_.ToStorageClass(builtin_position_.storage_class),
- ConvertType(builtin_position_.position_member_type_id), false, false,
- ast_constructor, {});
+ for (const auto& type_or_value : module_->types_values()) {
+ if (type_or_value.opcode() != SpvOpVariable) {
+ continue;
+ }
+ const auto& var = type_or_value;
+ const auto spirv_storage_class = SpvStorageClass(var.GetSingleWordInOperand(0));
+
+ uint32_t type_id = var.type_id();
+ if ((type_id == builtin_position_.pointer_type_id) &&
+ ((spirv_storage_class == SpvStorageClassInput) ||
+ (spirv_storage_class == SpvStorageClassOutput))) {
+ // Skip emitting gl_PerVertex.
+ builtin_position_.per_vertex_var_id = var.result_id();
+ builtin_position_.per_vertex_var_init_id =
+ var.NumInOperands() > 1 ? var.GetSingleWordInOperand(1) : 0u;
+ continue;
+ }
+ switch (enum_converter_.ToStorageClass(spirv_storage_class)) {
+ case ast::StorageClass::kNone:
+ case ast::StorageClass::kInput:
+ case ast::StorageClass::kOutput:
+ case ast::StorageClass::kUniform:
+ case ast::StorageClass::kHandle:
+ case ast::StorageClass::kStorage:
+ case ast::StorageClass::kWorkgroup:
+ case ast::StorageClass::kPrivate:
+ break;
+ default:
+ return Fail() << "invalid SPIR-V storage class " << int(spirv_storage_class)
+ << " for module scope variable: " << var.PrettyPrint();
+ }
+ if (!success_) {
+ return false;
+ }
+ const Type* ast_type = nullptr;
+ if (spirv_storage_class == SpvStorageClassUniformConstant) {
+ // These are opaque handles: samplers or textures
+ ast_type = GetTypeForHandleVar(var);
+ if (!ast_type) {
+ return false;
+ }
+ } else {
+ ast_type = ConvertType(type_id);
+ if (ast_type == nullptr) {
+ return Fail() << "internal error: failed to register Tint AST type for "
+ "SPIR-V type with ID: "
+ << var.type_id();
+ }
+ if (!ast_type->Is<Pointer>()) {
+ return Fail() << "variable with ID " << var.result_id() << " has non-pointer type "
+ << var.type_id();
+ }
+ }
- builder_.AST().AddGlobalVariable(ast_var);
- }
- return success_;
+ auto* ast_store_type = ast_type->As<Pointer>()->type;
+ auto ast_storage_class = ast_type->As<Pointer>()->storage_class;
+ const ast::Expression* ast_constructor = nullptr;
+ if (var.NumInOperands() > 1) {
+ // SPIR-V initializers are always constants.
+ // (OpenCL also allows the ID of an OpVariable, but we don't handle that
+ // here.)
+ ast_constructor = MakeConstantExpression(var.GetSingleWordInOperand(1)).expr;
+ }
+ auto* ast_var = MakeVariable(var.result_id(), ast_storage_class, ast_store_type, false,
+ false, ast_constructor, ast::AttributeList{});
+ // TODO(dneto): initializers (a.k.a. constructor expression)
+ if (ast_var) {
+ builder_.AST().AddGlobalVariable(ast_var);
+ }
+ }
+
+ // Emit gl_Position instead of gl_PerVertex
+ if (builtin_position_.per_vertex_var_id) {
+ // Make sure the variable has a name.
+ namer_.SuggestSanitizedName(builtin_position_.per_vertex_var_id, "gl_Position");
+ const ast::Expression* ast_constructor = nullptr;
+ if (builtin_position_.per_vertex_var_init_id) {
+ // The initializer is complex.
+ const auto* init = def_use_mgr_->GetDef(builtin_position_.per_vertex_var_init_id);
+ switch (init->opcode()) {
+ case SpvOpConstantComposite:
+ case SpvOpSpecConstantComposite:
+ ast_constructor =
+ MakeConstantExpression(
+ init->GetSingleWordInOperand(builtin_position_.position_member_index))
+ .expr;
+ break;
+ default:
+ return Fail() << "gl_PerVertex initializer too complex. only "
+ "OpCompositeConstruct and OpSpecConstantComposite "
+ "are supported: "
+ << init->PrettyPrint();
+ }
+ }
+ auto* ast_var =
+ MakeVariable(builtin_position_.per_vertex_var_id,
+ enum_converter_.ToStorageClass(builtin_position_.storage_class),
+ ConvertType(builtin_position_.position_member_type_id), false, false,
+ ast_constructor, {});
+
+ builder_.AST().AddGlobalVariable(ast_var);
+ }
+ return success_;
}
// @param var_id SPIR-V id of an OpVariable, assumed to be pointer
// to an array
// @returns the IntConstant for the size of the array, or nullptr
-const spvtools::opt::analysis::IntConstant* ParserImpl::GetArraySize(
- uint32_t var_id) {
- auto* var = def_use_mgr_->GetDef(var_id);
- if (!var || var->opcode() != SpvOpVariable) {
- return nullptr;
- }
- auto* ptr_type = def_use_mgr_->GetDef(var->type_id());
- if (!ptr_type || ptr_type->opcode() != SpvOpTypePointer) {
- return nullptr;
- }
- auto* array_type = def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
- if (!array_type || array_type->opcode() != SpvOpTypeArray) {
- return nullptr;
- }
- auto* size = constant_mgr_->FindDeclaredConstant(
- array_type->GetSingleWordInOperand(1));
- if (!size) {
- return nullptr;
- }
- return size->AsIntConstant();
+const spvtools::opt::analysis::IntConstant* ParserImpl::GetArraySize(uint32_t var_id) {
+ auto* var = def_use_mgr_->GetDef(var_id);
+ if (!var || var->opcode() != SpvOpVariable) {
+ return nullptr;
+ }
+ auto* ptr_type = def_use_mgr_->GetDef(var->type_id());
+ if (!ptr_type || ptr_type->opcode() != SpvOpTypePointer) {
+ return nullptr;
+ }
+ auto* array_type = def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
+ if (!array_type || array_type->opcode() != SpvOpTypeArray) {
+ return nullptr;
+ }
+ auto* size = constant_mgr_->FindDeclaredConstant(array_type->GetSingleWordInOperand(1));
+ if (!size) {
+ return nullptr;
+ }
+ return size->AsIntConstant();
}
ast::Variable* ParserImpl::MakeVariable(uint32_t id,
@@ -1614,1175 +1561,1148 @@ ast::Variable* ParserImpl::MakeVariable(uint32_t id,
bool is_overridable,
const ast::Expression* constructor,
ast::AttributeList decorations) {
- if (storage_type == nullptr) {
- Fail() << "internal error: can't make ast::Variable for null type";
- return nullptr;
- }
-
- ast::Access access = ast::Access::kUndefined;
- if (sc == ast::StorageClass::kStorage) {
- bool read_only = false;
- if (auto* tn = storage_type->As<Named>()) {
- read_only = read_only_struct_types_.count(tn->name) > 0;
+ if (storage_type == nullptr) {
+ Fail() << "internal error: can't make ast::Variable for null type";
+ return nullptr;
}
- // Apply the access(read) or access(read_write) modifier.
- access = read_only ? ast::Access::kRead : ast::Access::kReadWrite;
- }
+ ast::Access access = ast::Access::kUndefined;
+ if (sc == ast::StorageClass::kStorage) {
+ bool read_only = false;
+ if (auto* tn = storage_type->As<Named>()) {
+ read_only = read_only_struct_types_.count(tn->name) > 0;
+ }
- // Handle variables (textures and samplers) are always in the handle
- // storage class, so we don't mention the storage class.
- if (sc == ast::StorageClass::kUniformConstant) {
- sc = ast::StorageClass::kNone;
- }
+ // Apply the access(read) or access(read_write) modifier.
+ access = read_only ? ast::Access::kRead : ast::Access::kReadWrite;
+ }
- if (!ConvertDecorationsForVariable(id, &storage_type, &decorations,
- sc != ast::StorageClass::kPrivate)) {
- return nullptr;
- }
+ // Handle variables (textures and samplers) are always in the handle
+ // storage class, so we don't mention the storage class.
+ if (sc == ast::StorageClass::kHandle) {
+ sc = ast::StorageClass::kNone;
+ }
- std::string name = namer_.Name(id);
+ if (!ConvertDecorationsForVariable(id, &storage_type, &decorations,
+ sc != ast::StorageClass::kPrivate)) {
+ return nullptr;
+ }
+
+ std::string name = namer_.Name(id);
- // Note: we're constructing the variable here with the *storage* type,
- // regardless of whether this is a `let`, `override`, or `var` declaration.
- // `var` declarations will have a resolved type of ref<storage>, but at the
- // AST level all three are declared with the same type.
- return create<ast::Variable>(Source{}, builder_.Symbols().Register(name), sc,
- access, storage_type->Build(builder_), is_const,
- is_overridable, constructor, decorations);
+ // Note: we're constructing the variable here with the *storage* type,
+ // regardless of whether this is a `let`, `override`, or `var` declaration.
+ // `var` declarations will have a resolved type of ref<storage>, but at the
+ // AST level all three are declared with the same type.
+ return create<ast::Variable>(Source{}, builder_.Symbols().Register(name), sc, access,
+ storage_type->Build(builder_), is_const, is_overridable,
+ constructor, decorations);
}
bool ParserImpl::ConvertDecorationsForVariable(uint32_t id,
const Type** store_type,
ast::AttributeList* decorations,
bool transfer_pipeline_io) {
- DecorationList non_builtin_pipeline_decorations;
- for (auto& deco : GetDecorationsFor(id)) {
- if (deco.empty()) {
- return Fail() << "malformed decoration on ID " << id << ": it is empty";
- }
- if (deco[0] == SpvDecorationBuiltIn) {
- if (deco.size() == 1) {
- return Fail() << "malformed BuiltIn decoration on ID " << id
- << ": has no operand";
- }
- const auto spv_builtin = static_cast<SpvBuiltIn>(deco[1]);
- switch (spv_builtin) {
- case SpvBuiltInPointSize:
- special_builtins_[id] = spv_builtin;
- return false; // This is not an error
- case SpvBuiltInSampleId:
- case SpvBuiltInVertexIndex:
- case SpvBuiltInInstanceIndex:
- case SpvBuiltInLocalInvocationId:
- case SpvBuiltInLocalInvocationIndex:
- case SpvBuiltInGlobalInvocationId:
- case SpvBuiltInWorkgroupId:
- case SpvBuiltInNumWorkgroups:
- // The SPIR-V variable may signed (because GLSL requires signed for
- // some of these), but WGSL requires unsigned. Handle specially
- // so we always perform the conversion at load and store.
- special_builtins_[id] = spv_builtin;
- if (auto* forced_type = UnsignedTypeFor(*store_type)) {
- // Requires conversion and special handling in code generation.
+ DecorationList non_builtin_pipeline_decorations;
+ for (auto& deco : GetDecorationsFor(id)) {
+ if (deco.empty()) {
+ return Fail() << "malformed decoration on ID " << id << ": it is empty";
+ }
+ if (deco[0] == SpvDecorationBuiltIn) {
+ if (deco.size() == 1) {
+ return Fail() << "malformed BuiltIn decoration on ID " << id << ": has no operand";
+ }
+ const auto spv_builtin = static_cast<SpvBuiltIn>(deco[1]);
+ switch (spv_builtin) {
+ case SpvBuiltInPointSize:
+ special_builtins_[id] = spv_builtin;
+ return false; // This is not an error
+ case SpvBuiltInSampleId:
+ case SpvBuiltInVertexIndex:
+ case SpvBuiltInInstanceIndex:
+ case SpvBuiltInLocalInvocationId:
+ case SpvBuiltInLocalInvocationIndex:
+ case SpvBuiltInGlobalInvocationId:
+ case SpvBuiltInWorkgroupId:
+ case SpvBuiltInNumWorkgroups:
+ // The SPIR-V variable may signed (because GLSL requires signed for
+ // some of these), but WGSL requires unsigned. Handle specially
+ // so we always perform the conversion at load and store.
+ special_builtins_[id] = spv_builtin;
+ if (auto* forced_type = UnsignedTypeFor(*store_type)) {
+ // Requires conversion and special handling in code generation.
+ if (transfer_pipeline_io) {
+ *store_type = forced_type;
+ }
+ }
+ break;
+ case SpvBuiltInSampleMask: {
+ // In SPIR-V this is used for both input and output variable.
+ // The SPIR-V variable has store type of array of integer scalar,
+ // either signed or unsigned.
+ // WGSL requires the store type to be u32.
+ auto* size = GetArraySize(id);
+ if (!size || size->GetZeroExtendedValue() != 1) {
+ Fail() << "WGSL supports a sample mask of at most 32 bits. "
+ "SampleMask must be an array of 1 element.";
+ }
+ special_builtins_[id] = spv_builtin;
+ if (transfer_pipeline_io) {
+ *store_type = ty_.U32();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ auto ast_builtin = enum_converter_.ToBuiltin(spv_builtin);
+ if (ast_builtin == ast::Builtin::kNone) {
+ // A diagnostic has already been emitted.
+ return false;
+ }
if (transfer_pipeline_io) {
- *store_type = forced_type;
+ decorations->emplace_back(create<ast::BuiltinAttribute>(Source{}, ast_builtin));
}
- }
- break;
- case SpvBuiltInSampleMask: {
- // In SPIR-V this is used for both input and output variable.
- // The SPIR-V variable has store type of array of integer scalar,
- // either signed or unsigned.
- // WGSL requires the store type to be u32.
- auto* size = GetArraySize(id);
- if (!size || size->GetZeroExtendedValue() != 1) {
- Fail() << "WGSL supports a sample mask of at most 32 bits. "
- "SampleMask must be an array of 1 element.";
- }
- special_builtins_[id] = spv_builtin;
- if (transfer_pipeline_io) {
- *store_type = ty_.U32();
- }
- break;
}
- default:
- break;
- }
- auto ast_builtin = enum_converter_.ToBuiltin(spv_builtin);
- if (ast_builtin == ast::Builtin::kNone) {
- // A diagnostic has already been emitted.
- return false;
- }
- if (transfer_pipeline_io) {
- decorations->emplace_back(
- create<ast::BuiltinAttribute>(Source{}, ast_builtin));
- }
- }
- if (transfer_pipeline_io && IsPipelineDecoration(deco)) {
- non_builtin_pipeline_decorations.push_back(deco);
- }
- if (deco[0] == SpvDecorationDescriptorSet) {
- if (deco.size() == 1) {
- return Fail() << "malformed DescriptorSet decoration on ID " << id
- << ": has no operand";
- }
- decorations->emplace_back(create<ast::GroupAttribute>(Source{}, deco[1]));
- }
- if (deco[0] == SpvDecorationBinding) {
- if (deco.size() == 1) {
- return Fail() << "malformed Binding decoration on ID " << id
- << ": has no operand";
- }
- decorations->emplace_back(
- create<ast::BindingAttribute>(Source{}, deco[1]));
- }
- }
-
- if (transfer_pipeline_io) {
- if (!ConvertPipelineDecorations(
- *store_type, non_builtin_pipeline_decorations, decorations)) {
- return false;
- }
- }
-
- return success();
-}
-
-DecorationList ParserImpl::GetMemberPipelineDecorations(
- const Struct& struct_type,
- int member_index) {
- // Yes, I could have used std::copy_if or std::copy_if.
- DecorationList result;
- for (const auto& deco : GetDecorationsForMember(
- struct_id_for_symbol_[struct_type.name], member_index)) {
- if (IsPipelineDecoration(deco)) {
- result.emplace_back(deco);
- }
- }
- return result;
-}
-
-const ast::Attribute* ParserImpl::SetLocation(
- ast::AttributeList* attributes,
- const ast::Attribute* replacement) {
- if (!replacement) {
+ if (transfer_pipeline_io && IsPipelineDecoration(deco)) {
+ non_builtin_pipeline_decorations.push_back(deco);
+ }
+ if (deco[0] == SpvDecorationDescriptorSet) {
+ if (deco.size() == 1) {
+ return Fail() << "malformed DescriptorSet decoration on ID " << id
+ << ": has no operand";
+ }
+ decorations->emplace_back(create<ast::GroupAttribute>(Source{}, deco[1]));
+ }
+ if (deco[0] == SpvDecorationBinding) {
+ if (deco.size() == 1) {
+ return Fail() << "malformed Binding decoration on ID " << id << ": has no operand";
+ }
+ decorations->emplace_back(create<ast::BindingAttribute>(Source{}, deco[1]));
+ }
+ }
+
+ if (transfer_pipeline_io) {
+ if (!ConvertPipelineDecorations(*store_type, non_builtin_pipeline_decorations,
+ decorations)) {
+ return false;
+ }
+ }
+
+ return success();
+}
+
+DecorationList ParserImpl::GetMemberPipelineDecorations(const Struct& struct_type,
+ int member_index) {
+ // Yes, I could have used std::copy_if or std::copy_if.
+ DecorationList result;
+ for (const auto& deco :
+ GetDecorationsForMember(struct_id_for_symbol_[struct_type.name], member_index)) {
+ if (IsPipelineDecoration(deco)) {
+ result.emplace_back(deco);
+ }
+ }
+ return result;
+}
+
+const ast::Attribute* ParserImpl::SetLocation(ast::AttributeList* attributes,
+ const ast::Attribute* replacement) {
+ if (!replacement) {
+ return nullptr;
+ }
+ for (auto*& attribute : *attributes) {
+ if (attribute->Is<ast::LocationAttribute>()) {
+ // Replace this location attribute with the replacement.
+ // The old one doesn't leak because it's kept in the builder's AST node
+ // list.
+ const ast::Attribute* result = nullptr;
+ result = attribute;
+ attribute = replacement;
+ return result; // Assume there is only one such decoration.
+ }
+ }
+ // The list didn't have a location. Add it.
+ attributes->push_back(replacement);
return nullptr;
- }
- for (auto*& attribute : *attributes) {
- if (attribute->Is<ast::LocationAttribute>()) {
- // Replace this location attribute with the replacement.
- // The old one doesn't leak because it's kept in the builder's AST node
- // list.
- const ast::Attribute* result = nullptr;
- result = attribute;
- attribute = replacement;
- return result; // Assume there is only one such decoration.
- }
- }
- // The list didn't have a location. Add it.
- attributes->push_back(replacement);
- return nullptr;
}
bool ParserImpl::ConvertPipelineDecorations(const Type* store_type,
const DecorationList& decorations,
ast::AttributeList* attributes) {
- // Vulkan defaults to perspective-correct interpolation.
- ast::InterpolationType type = ast::InterpolationType::kPerspective;
- ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone;
+ // Vulkan defaults to perspective-correct interpolation.
+ ast::InterpolationType type = ast::InterpolationType::kPerspective;
+ ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone;
+
+ for (const auto& deco : decorations) {
+ TINT_ASSERT(Reader, deco.size() > 0);
+ switch (deco[0]) {
+ case SpvDecorationLocation:
+ if (deco.size() != 2) {
+ return Fail() << "malformed Location decoration on ID requires one "
+ "literal operand";
+ }
+ SetLocation(attributes, create<ast::LocationAttribute>(Source{}, deco[1]));
+ if (store_type->IsIntegerScalarOrVector()) {
+ // Default to flat interpolation for integral user-defined IO types.
+ type = ast::InterpolationType::kFlat;
+ }
+ break;
+ case SpvDecorationFlat:
+ type = ast::InterpolationType::kFlat;
+ break;
+ case SpvDecorationNoPerspective:
+ if (store_type->IsIntegerScalarOrVector()) {
+ // This doesn't capture the array or struct case.
+ return Fail() << "NoPerspective is invalid on integral IO";
+ }
+ type = ast::InterpolationType::kLinear;
+ break;
+ case SpvDecorationCentroid:
+ if (store_type->IsIntegerScalarOrVector()) {
+ // This doesn't capture the array or struct case.
+ return Fail() << "Centroid interpolation sampling is invalid on integral IO";
+ }
+ sampling = ast::InterpolationSampling::kCentroid;
+ break;
+ case SpvDecorationSample:
+ if (store_type->IsIntegerScalarOrVector()) {
+ // This doesn't capture the array or struct case.
+ return Fail() << "Sample interpolation sampling is invalid on integral IO";
+ }
+ sampling = ast::InterpolationSampling::kSample;
+ break;
+ default:
+ break;
+ }
+ }
- for (const auto& deco : decorations) {
- TINT_ASSERT(Reader, deco.size() > 0);
- switch (deco[0]) {
- case SpvDecorationLocation:
- if (deco.size() != 2) {
- return Fail() << "malformed Location decoration on ID requires one "
- "literal operand";
- }
- SetLocation(attributes,
- create<ast::LocationAttribute>(Source{}, deco[1]));
- if (store_type->IsIntegerScalarOrVector()) {
- // Default to flat interpolation for integral user-defined IO types.
- type = ast::InterpolationType::kFlat;
- }
- break;
- case SpvDecorationFlat:
- type = ast::InterpolationType::kFlat;
- break;
- case SpvDecorationNoPerspective:
- if (store_type->IsIntegerScalarOrVector()) {
- // This doesn't capture the array or struct case.
- return Fail() << "NoPerspective is invalid on integral IO";
- }
- type = ast::InterpolationType::kLinear;
- break;
- case SpvDecorationCentroid:
- if (store_type->IsIntegerScalarOrVector()) {
- // This doesn't capture the array or struct case.
- return Fail()
- << "Centroid interpolation sampling is invalid on integral IO";
- }
- sampling = ast::InterpolationSampling::kCentroid;
- break;
- case SpvDecorationSample:
- if (store_type->IsIntegerScalarOrVector()) {
- // This doesn't capture the array or struct case.
- return Fail()
- << "Sample interpolation sampling is invalid on integral IO";
- }
- sampling = ast::InterpolationSampling::kSample;
- break;
- default:
- break;
- }
- }
-
- // Apply interpolation.
- if (type == ast::InterpolationType::kPerspective &&
- sampling == ast::InterpolationSampling::kNone) {
- // This is the default. Don't add a decoration.
- } else {
- attributes->emplace_back(create<ast::InterpolateAttribute>(type, sampling));
- }
-
- return success();
+ // Apply interpolation.
+ if (type == ast::InterpolationType::kPerspective &&
+ sampling == ast::InterpolationSampling::kNone) {
+ // This is the default. Don't add a decoration.
+ } else {
+ attributes->emplace_back(create<ast::InterpolateAttribute>(type, sampling));
+ }
+
+ return success();
}
bool ParserImpl::CanMakeConstantExpression(uint32_t id) {
- if ((id == workgroup_size_builtin_.id) ||
- (id == workgroup_size_builtin_.x_id) ||
- (id == workgroup_size_builtin_.y_id) ||
- (id == workgroup_size_builtin_.z_id)) {
- return true;
- }
- const auto* inst = def_use_mgr_->GetDef(id);
- if (!inst) {
- return false;
- }
- if (inst->opcode() == SpvOpUndef) {
- return true;
- }
- return nullptr != constant_mgr_->FindDeclaredConstant(id);
+ if ((id == workgroup_size_builtin_.id) || (id == workgroup_size_builtin_.x_id) ||
+ (id == workgroup_size_builtin_.y_id) || (id == workgroup_size_builtin_.z_id)) {
+ return true;
+ }
+ const auto* inst = def_use_mgr_->GetDef(id);
+ if (!inst) {
+ return false;
+ }
+ if (inst->opcode() == SpvOpUndef) {
+ return true;
+ }
+ return nullptr != constant_mgr_->FindDeclaredConstant(id);
}
TypedExpression ParserImpl::MakeConstantExpression(uint32_t id) {
- if (!success_) {
- return {};
- }
-
- // Handle the special cases for workgroup sizing.
- if (id == workgroup_size_builtin_.id) {
- auto x = MakeConstantExpression(workgroup_size_builtin_.x_id);
- auto y = MakeConstantExpression(workgroup_size_builtin_.y_id);
- auto z = MakeConstantExpression(workgroup_size_builtin_.z_id);
- auto* ast_type = ty_.Vector(x.type, 3);
- return {ast_type,
- builder_.Construct(Source{}, ast_type->Build(builder_),
- ast::ExpressionList{x.expr, y.expr, z.expr})};
- } else if (id == workgroup_size_builtin_.x_id) {
- return MakeConstantExpressionForScalarSpirvConstant(
- Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
- constant_mgr_->GetConstant(
- type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
- {workgroup_size_builtin_.x_value}));
- } else if (id == workgroup_size_builtin_.y_id) {
- return MakeConstantExpressionForScalarSpirvConstant(
- Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
- constant_mgr_->GetConstant(
- type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
- {workgroup_size_builtin_.y_value}));
- } else if (id == workgroup_size_builtin_.z_id) {
- return MakeConstantExpressionForScalarSpirvConstant(
- Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
- constant_mgr_->GetConstant(
- type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
- {workgroup_size_builtin_.z_value}));
- }
-
- // Handle the general case where a constant is already registered
- // with the SPIR-V optimizer's analysis framework.
- const auto* inst = def_use_mgr_->GetDef(id);
- if (inst == nullptr) {
- Fail() << "ID " << id << " is not a registered instruction";
- return {};
- }
- auto source = GetSourceForInst(inst);
+ if (!success_) {
+ return {};
+ }
+
+ // Handle the special cases for workgroup sizing.
+ if (id == workgroup_size_builtin_.id) {
+ auto x = MakeConstantExpression(workgroup_size_builtin_.x_id);
+ auto y = MakeConstantExpression(workgroup_size_builtin_.y_id);
+ auto z = MakeConstantExpression(workgroup_size_builtin_.z_id);
+ auto* ast_type = ty_.Vector(x.type, 3);
+ return {ast_type, builder_.Construct(Source{}, ast_type->Build(builder_),
+ ast::ExpressionList{x.expr, y.expr, z.expr})};
+ } else if (id == workgroup_size_builtin_.x_id) {
+ return MakeConstantExpressionForScalarSpirvConstant(
+ Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
+ constant_mgr_->GetConstant(
+ type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
+ {workgroup_size_builtin_.x_value}));
+ } else if (id == workgroup_size_builtin_.y_id) {
+ return MakeConstantExpressionForScalarSpirvConstant(
+ Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
+ constant_mgr_->GetConstant(
+ type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
+ {workgroup_size_builtin_.y_value}));
+ } else if (id == workgroup_size_builtin_.z_id) {
+ return MakeConstantExpressionForScalarSpirvConstant(
+ Source{}, ConvertType(workgroup_size_builtin_.component_type_id),
+ constant_mgr_->GetConstant(
+ type_mgr_->GetType(workgroup_size_builtin_.component_type_id),
+ {workgroup_size_builtin_.z_value}));
+ }
+
+ // Handle the general case where a constant is already registered
+ // with the SPIR-V optimizer's analysis framework.
+ const auto* inst = def_use_mgr_->GetDef(id);
+ if (inst == nullptr) {
+ Fail() << "ID " << id << " is not a registered instruction";
+ return {};
+ }
+ auto source = GetSourceForInst(inst);
- // TODO(dneto): Handle spec constants too?
+ // TODO(dneto): Handle spec constants too?
- auto* original_ast_type = ConvertType(inst->type_id());
- if (original_ast_type == nullptr) {
- return {};
- }
-
- switch (inst->opcode()) {
- case SpvOpUndef: // Remap undef to null.
- case SpvOpConstantNull:
- return {original_ast_type, MakeNullValue(original_ast_type)};
- case SpvOpConstantTrue:
- case SpvOpConstantFalse:
- case SpvOpConstant: {
- const auto* spirv_const = constant_mgr_->FindDeclaredConstant(id);
- if (spirv_const == nullptr) {
- Fail() << "ID " << id << " is not a constant";
+ auto* original_ast_type = ConvertType(inst->type_id());
+ if (original_ast_type == nullptr) {
return {};
- }
- return MakeConstantExpressionForScalarSpirvConstant(
- source, original_ast_type, spirv_const);
- }
- case SpvOpConstantComposite: {
- // Handle vector, matrix, array, and struct
-
- // Generate a composite from explicit components.
- ast::ExpressionList ast_components;
- if (!inst->WhileEachInId([&](const uint32_t* id_ref) -> bool {
- auto component = MakeConstantExpression(*id_ref);
- if (!component) {
- this->Fail() << "invalid constant with ID " << *id_ref;
- return false;
+ }
+
+ switch (inst->opcode()) {
+ case SpvOpUndef: // Remap undef to null.
+ case SpvOpConstantNull:
+ return {original_ast_type, MakeNullValue(original_ast_type)};
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstant: {
+ const auto* spirv_const = constant_mgr_->FindDeclaredConstant(id);
+ if (spirv_const == nullptr) {
+ Fail() << "ID " << id << " is not a constant";
+ return {};
}
- ast_components.emplace_back(component.expr);
- return true;
- })) {
- // We've already emitted a diagnostic.
- return {};
- }
- return {original_ast_type,
- builder_.Construct(source, original_ast_type->Build(builder_),
- std::move(ast_components))};
+ return MakeConstantExpressionForScalarSpirvConstant(source, original_ast_type,
+ spirv_const);
+ }
+ case SpvOpConstantComposite: {
+ // Handle vector, matrix, array, and struct
+
+ // Generate a composite from explicit components.
+ ast::ExpressionList ast_components;
+ if (!inst->WhileEachInId([&](const uint32_t* id_ref) -> bool {
+ auto component = MakeConstantExpression(*id_ref);
+ if (!component) {
+ this->Fail() << "invalid constant with ID " << *id_ref;
+ return false;
+ }
+ ast_components.emplace_back(component.expr);
+ return true;
+ })) {
+ // We've already emitted a diagnostic.
+ return {};
+ }
+ return {original_ast_type,
+ builder_.Construct(source, original_ast_type->Build(builder_),
+ std::move(ast_components))};
+ }
+ default:
+ break;
}
- default:
- break;
- }
- Fail() << "unhandled constant instruction " << inst->PrettyPrint();
- return {};
+ Fail() << "unhandled constant instruction " << inst->PrettyPrint();
+ return {};
}
TypedExpression ParserImpl::MakeConstantExpressionForScalarSpirvConstant(
Source source,
const Type* original_ast_type,
const spvtools::opt::analysis::Constant* spirv_const) {
- auto* ast_type = original_ast_type->UnwrapAlias();
-
- // TODO(dneto): Note: NullConstant for int, uint, float map to a regular 0.
- // So canonicalization should map that way too.
- // Currently "null<type>" is missing from the WGSL parser.
- // See https://bugs.chromium.org/p/tint/issues/detail?id=34
- if (ast_type->Is<U32>()) {
- return {ty_.U32(),
- create<ast::UintLiteralExpression>(source, spirv_const->GetU32())};
- }
- if (ast_type->Is<I32>()) {
- return {ty_.I32(),
- create<ast::SintLiteralExpression>(source, spirv_const->GetS32())};
- }
- if (ast_type->Is<F32>()) {
- return {ty_.F32(), create<ast::FloatLiteralExpression>(
- source, spirv_const->GetFloat())};
- }
- if (ast_type->Is<Bool>()) {
- const bool value = spirv_const->AsNullConstant()
- ? false
- : spirv_const->AsBoolConstant()->value();
- return {ty_.Bool(), create<ast::BoolLiteralExpression>(source, value)};
- }
- Fail() << "expected scalar constant";
- return {};
+ auto* ast_type = original_ast_type->UnwrapAlias();
+
+ // TODO(dneto): Note: NullConstant for int, uint, float map to a regular 0.
+ // So canonicalization should map that way too.
+ // Currently "null<type>" is missing from the WGSL parser.
+ // See https://bugs.chromium.org/p/tint/issues/detail?id=34
+ return Switch(
+ ast_type,
+ [&](const I32*) {
+ return TypedExpression{ty_.I32(),
+ create<ast::IntLiteralExpression>(
+ source, static_cast<int64_t>(spirv_const->GetS32()),
+ ast::IntLiteralExpression::Suffix::kI)};
+ },
+ [&](const U32*) {
+ return TypedExpression{ty_.U32(),
+ create<ast::IntLiteralExpression>(
+ source, static_cast<int64_t>(spirv_const->GetU32()),
+ ast::IntLiteralExpression::Suffix::kU)};
+ },
+ [&](const F32*) {
+ return TypedExpression{ty_.F32(),
+ create<ast::FloatLiteralExpression>(
+ source, static_cast<double>(spirv_const->GetFloat()),
+ ast::FloatLiteralExpression::Suffix::kF)};
+ },
+ [&](const Bool*) {
+ const bool value =
+ spirv_const->AsNullConstant() ? false : spirv_const->AsBoolConstant()->value();
+ return TypedExpression{ty_.Bool(), create<ast::BoolLiteralExpression>(source, value)};
+ },
+ [&](Default) {
+ Fail() << "expected scalar constant";
+ return TypedExpression{};
+ });
}
const ast::Expression* ParserImpl::MakeNullValue(const Type* type) {
- // TODO(dneto): Use the no-operands constructor syntax when it becomes
- // available in Tint.
- // https://github.com/gpuweb/gpuweb/issues/685
- // https://bugs.chromium.org/p/tint/issues/detail?id=34
+ // TODO(dneto): Use the no-operands constructor syntax when it becomes
+ // available in Tint.
+ // https://github.com/gpuweb/gpuweb/issues/685
+ // https://bugs.chromium.org/p/tint/issues/detail?id=34
- if (!type) {
- Fail() << "trying to create null value for a null type";
- return nullptr;
- }
-
- auto* original_type = type;
- type = type->UnwrapAlias();
-
- if (type->Is<Bool>()) {
- return create<ast::BoolLiteralExpression>(Source{}, false);
- }
- if (type->Is<U32>()) {
- return create<ast::UintLiteralExpression>(Source{}, 0u);
- }
- if (type->Is<I32>()) {
- return create<ast::SintLiteralExpression>(Source{}, 0);
- }
- if (type->Is<F32>()) {
- return create<ast::FloatLiteralExpression>(Source{}, 0.0f);
- }
- if (type->IsAnyOf<Vector, Matrix, Array>()) {
- return builder_.Construct(Source{}, type->Build(builder_));
- }
- if (auto* struct_ty = type->As<Struct>()) {
- ast::ExpressionList ast_components;
- for (auto* member : struct_ty->members) {
- ast_components.emplace_back(MakeNullValue(member));
- }
- return builder_.Construct(Source{}, original_type->Build(builder_),
- std::move(ast_components));
- }
- Fail() << "can't make null value for type: " << type->TypeInfo().name;
- return nullptr;
+ if (!type) {
+ Fail() << "trying to create null value for a null type";
+ return nullptr;
+ }
+
+ auto* original_type = type;
+ type = type->UnwrapAlias();
+
+ return Switch(
+ type, //
+ [&](const I32*) {
+ return create<ast::IntLiteralExpression>(Source{}, 0,
+ ast::IntLiteralExpression::Suffix::kI);
+ },
+ [&](const U32*) {
+ return create<ast::IntLiteralExpression>(Source{}, 0,
+ ast::IntLiteralExpression::Suffix::kU);
+ },
+ [&](const F32*) {
+ return create<ast::FloatLiteralExpression>(Source{}, 0,
+ ast::FloatLiteralExpression::Suffix::kF);
+ },
+ [&](const Vector*) { return builder_.Construct(Source{}, type->Build(builder_)); },
+ [&](const Matrix*) { return builder_.Construct(Source{}, type->Build(builder_)); },
+ [&](const Array*) { return builder_.Construct(Source{}, type->Build(builder_)); },
+ [&](const Bool*) { return create<ast::BoolLiteralExpression>(Source{}, false); },
+ [&](const Struct* struct_ty) {
+ ast::ExpressionList ast_components;
+ for (auto* member : struct_ty->members) {
+ ast_components.emplace_back(MakeNullValue(member));
+ }
+ return builder_.Construct(Source{}, original_type->Build(builder_),
+ std::move(ast_components));
+ },
+ [&](Default) {
+ Fail() << "can't make null value for type: " << type->TypeInfo().name;
+ return nullptr;
+ });
}
TypedExpression ParserImpl::MakeNullExpression(const Type* type) {
- return {type, MakeNullValue(type)};
+ return {type, MakeNullValue(type)};
}
const Type* ParserImpl::UnsignedTypeFor(const Type* type) {
- if (type->Is<I32>()) {
- return ty_.U32();
- }
- if (auto* v = type->As<Vector>()) {
- if (v->type->Is<I32>()) {
- return ty_.Vector(ty_.U32(), v->size);
+ if (type->Is<I32>()) {
+ return ty_.U32();
+ }
+ if (auto* v = type->As<Vector>()) {
+ if (v->type->Is<I32>()) {
+ return ty_.Vector(ty_.U32(), v->size);
+ }
}
- }
- return {};
+ return {};
}
const Type* ParserImpl::SignedTypeFor(const Type* type) {
- if (type->Is<U32>()) {
- return ty_.I32();
- }
- if (auto* v = type->As<Vector>()) {
- if (v->type->Is<U32>()) {
- return ty_.Vector(ty_.I32(), v->size);
- }
- }
- return {};
-}
-
-TypedExpression ParserImpl::RectifyOperandSignedness(
- const spvtools::opt::Instruction& inst,
- TypedExpression&& expr) {
- bool requires_signed = false;
- bool requires_unsigned = false;
- if (IsGlslExtendedInstruction(inst)) {
- const auto extended_opcode =
- static_cast<GLSLstd450>(inst.GetSingleWordInOperand(1));
- requires_signed = AssumesSignedOperands(extended_opcode);
- requires_unsigned = AssumesUnsignedOperands(extended_opcode);
- } else {
- const auto opcode = inst.opcode();
- requires_signed = AssumesSignedOperands(opcode);
- requires_unsigned = AssumesUnsignedOperands(opcode);
- }
- if (!requires_signed && !requires_unsigned) {
- // No conversion is required, assuming our tables are complete.
- return std::move(expr);
- }
- if (!expr) {
- Fail() << "internal error: RectifyOperandSignedness given a null expr\n";
- return {};
- }
- auto* type = expr.type;
- if (!type) {
- Fail() << "internal error: unmapped type for: "
- << expr.expr->TypeInfo().name << "\n";
+ if (type->Is<U32>()) {
+ return ty_.I32();
+ }
+ if (auto* v = type->As<Vector>()) {
+ if (v->type->Is<U32>()) {
+ return ty_.Vector(ty_.I32(), v->size);
+ }
+ }
return {};
- }
- if (requires_unsigned) {
- if (auto* unsigned_ty = UnsignedTypeFor(type)) {
- // Conversion is required.
- return {unsigned_ty,
- create<ast::BitcastExpression>(
- Source{}, unsigned_ty->Build(builder_), expr.expr)};
- }
- } else if (requires_signed) {
- if (auto* signed_ty = SignedTypeFor(type)) {
- // Conversion is required.
- return {signed_ty, create<ast::BitcastExpression>(
- Source{}, signed_ty->Build(builder_), expr.expr)};
- }
- }
- // We should not reach here.
- return std::move(expr);
-}
-
-TypedExpression ParserImpl::RectifySecondOperandSignedness(
- const spvtools::opt::Instruction& inst,
- const Type* first_operand_type,
- TypedExpression&& second_operand_expr) {
- if ((first_operand_type != second_operand_expr.type) &&
- AssumesSecondOperandSignednessMatchesFirstOperand(inst.opcode())) {
- // Conversion is required.
- return {first_operand_type,
- create<ast::BitcastExpression>(Source{},
- first_operand_type->Build(builder_),
- second_operand_expr.expr)};
- }
- // No conversion necessary.
- return std::move(second_operand_expr);
+}
+
+TypedExpression ParserImpl::RectifyOperandSignedness(const spvtools::opt::Instruction& inst,
+ TypedExpression&& expr) {
+ bool requires_signed = false;
+ bool requires_unsigned = false;
+ if (IsGlslExtendedInstruction(inst)) {
+ const auto extended_opcode = static_cast<GLSLstd450>(inst.GetSingleWordInOperand(1));
+ requires_signed = AssumesSignedOperands(extended_opcode);
+ requires_unsigned = AssumesUnsignedOperands(extended_opcode);
+ } else {
+ const auto opcode = inst.opcode();
+ requires_signed = AssumesSignedOperands(opcode);
+ requires_unsigned = AssumesUnsignedOperands(opcode);
+ }
+ if (!requires_signed && !requires_unsigned) {
+ // No conversion is required, assuming our tables are complete.
+ return std::move(expr);
+ }
+ if (!expr) {
+ Fail() << "internal error: RectifyOperandSignedness given a null expr\n";
+ return {};
+ }
+ auto* type = expr.type;
+ if (!type) {
+ Fail() << "internal error: unmapped type for: " << expr.expr->TypeInfo().name << "\n";
+ return {};
+ }
+ if (requires_unsigned) {
+ if (auto* unsigned_ty = UnsignedTypeFor(type)) {
+ // Conversion is required.
+ return {unsigned_ty, create<ast::BitcastExpression>(
+ Source{}, unsigned_ty->Build(builder_), expr.expr)};
+ }
+ } else if (requires_signed) {
+ if (auto* signed_ty = SignedTypeFor(type)) {
+ // Conversion is required.
+ return {signed_ty, create<ast::BitcastExpression>(Source{}, signed_ty->Build(builder_),
+ expr.expr)};
+ }
+ }
+ // We should not reach here.
+ return std::move(expr);
+}
+
+TypedExpression ParserImpl::RectifySecondOperandSignedness(const spvtools::opt::Instruction& inst,
+ const Type* first_operand_type,
+ TypedExpression&& second_operand_expr) {
+ if ((first_operand_type != second_operand_expr.type) &&
+ AssumesSecondOperandSignednessMatchesFirstOperand(inst.opcode())) {
+ // Conversion is required.
+ return {first_operand_type,
+ create<ast::BitcastExpression>(Source{}, first_operand_type->Build(builder_),
+ second_operand_expr.expr)};
+ }
+ // No conversion necessary.
+ return std::move(second_operand_expr);
}
const Type* ParserImpl::ForcedResultType(const spvtools::opt::Instruction& inst,
const Type* first_operand_type) {
- const auto opcode = inst.opcode();
- if (AssumesResultSignednessMatchesFirstOperand(opcode)) {
- return first_operand_type;
- }
- if (IsGlslExtendedInstruction(inst)) {
- const auto extended_opcode =
- static_cast<GLSLstd450>(inst.GetSingleWordInOperand(1));
- if (AssumesResultSignednessMatchesFirstOperand(extended_opcode)) {
- return first_operand_type;
+ const auto opcode = inst.opcode();
+ if (AssumesResultSignednessMatchesFirstOperand(opcode)) {
+ return first_operand_type;
+ }
+ if (IsGlslExtendedInstruction(inst)) {
+ const auto extended_opcode = static_cast<GLSLstd450>(inst.GetSingleWordInOperand(1));
+ if (AssumesResultSignednessMatchesFirstOperand(extended_opcode)) {
+ return first_operand_type;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
const Type* ParserImpl::GetSignedIntMatchingShape(const Type* other) {
- if (other == nullptr) {
- Fail() << "no type provided";
- }
- if (other->Is<F32>() || other->Is<U32>() || other->Is<I32>()) {
- return ty_.I32();
- }
- if (auto* vec_ty = other->As<Vector>()) {
- return ty_.Vector(ty_.I32(), vec_ty->size);
- }
- Fail() << "required numeric scalar or vector, but got "
- << other->TypeInfo().name;
- return nullptr;
+ if (other == nullptr) {
+ Fail() << "no type provided";
+ }
+ if (other->Is<F32>() || other->Is<U32>() || other->Is<I32>()) {
+ return ty_.I32();
+ }
+ if (auto* vec_ty = other->As<Vector>()) {
+ return ty_.Vector(ty_.I32(), vec_ty->size);
+ }
+ Fail() << "required numeric scalar or vector, but got " << other->TypeInfo().name;
+ return nullptr;
}
const Type* ParserImpl::GetUnsignedIntMatchingShape(const Type* other) {
- if (other == nullptr) {
- Fail() << "no type provided";
+ if (other == nullptr) {
+ Fail() << "no type provided";
+ return nullptr;
+ }
+ if (other->Is<F32>() || other->Is<U32>() || other->Is<I32>()) {
+ return ty_.U32();
+ }
+ if (auto* vec_ty = other->As<Vector>()) {
+ return ty_.Vector(ty_.U32(), vec_ty->size);
+ }
+ Fail() << "required numeric scalar or vector, but got " << other->TypeInfo().name;
return nullptr;
- }
- if (other->Is<F32>() || other->Is<U32>() || other->Is<I32>()) {
- return ty_.U32();
- }
- if (auto* vec_ty = other->As<Vector>()) {
- return ty_.Vector(ty_.U32(), vec_ty->size);
- }
- Fail() << "required numeric scalar or vector, but got "
- << other->TypeInfo().name;
- return nullptr;
-}
-
-TypedExpression ParserImpl::RectifyForcedResultType(
- TypedExpression expr,
- const spvtools::opt::Instruction& inst,
- const Type* first_operand_type) {
- auto* forced_result_ty = ForcedResultType(inst, first_operand_type);
- if ((!forced_result_ty) || (forced_result_ty == expr.type)) {
- return expr;
- }
- return {expr.type, create<ast::BitcastExpression>(
- Source{}, expr.type->Build(builder_), expr.expr)};
+}
+
+TypedExpression ParserImpl::RectifyForcedResultType(TypedExpression expr,
+ const spvtools::opt::Instruction& inst,
+ const Type* first_operand_type) {
+ auto* forced_result_ty = ForcedResultType(inst, first_operand_type);
+ if ((!forced_result_ty) || (forced_result_ty == expr.type)) {
+ return expr;
+ }
+ return {expr.type,
+ create<ast::BitcastExpression>(Source{}, expr.type->Build(builder_), expr.expr)};
}
TypedExpression ParserImpl::AsUnsigned(TypedExpression expr) {
- if (expr.type && expr.type->IsSignedScalarOrVector()) {
- auto* new_type = GetUnsignedIntMatchingShape(expr.type);
- return {new_type, create<ast::BitcastExpression>(
- Source{}, new_type->Build(builder_), expr.expr)};
- }
- return expr;
+ if (expr.type && expr.type->IsSignedScalarOrVector()) {
+ auto* new_type = GetUnsignedIntMatchingShape(expr.type);
+ return {new_type,
+ create<ast::BitcastExpression>(Source{}, new_type->Build(builder_), expr.expr)};
+ }
+ return expr;
}
TypedExpression ParserImpl::AsSigned(TypedExpression expr) {
- if (expr.type && expr.type->IsUnsignedScalarOrVector()) {
- auto* new_type = GetSignedIntMatchingShape(expr.type);
- return {new_type, create<ast::BitcastExpression>(
- Source{}, new_type->Build(builder_), expr.expr)};
- }
- return expr;
+ if (expr.type && expr.type->IsUnsignedScalarOrVector()) {
+ auto* new_type = GetSignedIntMatchingShape(expr.type);
+ return {new_type,
+ create<ast::BitcastExpression>(Source{}, new_type->Build(builder_), expr.expr)};
+ }
+ return expr;
}
bool ParserImpl::EmitFunctions() {
- if (!success_) {
- return false;
- }
- for (const auto* f : topologically_ordered_functions_) {
if (!success_) {
- return false;
+ return false;
}
-
- auto id = f->result_id();
- auto it = function_to_ep_info_.find(id);
- if (it == function_to_ep_info_.end()) {
- FunctionEmitter emitter(this, *f, nullptr);
- success_ = emitter.Emit();
- } else {
- for (const auto& ep : it->second) {
- FunctionEmitter emitter(this, *f, &ep);
- success_ = emitter.Emit();
+ for (const auto* f : topologically_ordered_functions_) {
if (!success_) {
- return false;
+ return false;
+ }
+
+ auto id = f->result_id();
+ auto it = function_to_ep_info_.find(id);
+ if (it == function_to_ep_info_.end()) {
+ FunctionEmitter emitter(this, *f, nullptr);
+ success_ = emitter.Emit();
+ } else {
+ for (const auto& ep : it->second) {
+ FunctionEmitter emitter(this, *f, &ep);
+ success_ = emitter.Emit();
+ if (!success_) {
+ return false;
+ }
+ }
}
- }
}
- }
- return success_;
+ return success_;
}
-const spvtools::opt::Instruction*
-ParserImpl::GetMemoryObjectDeclarationForHandle(uint32_t id,
- bool follow_image) {
- auto saved_id = id;
- auto local_fail = [this, saved_id, id,
- follow_image]() -> const spvtools::opt::Instruction* {
- const auto* inst = def_use_mgr_->GetDef(id);
- Fail() << "Could not find memory object declaration for the "
- << (follow_image ? "image" : "sampler") << " underlying id " << id
- << " (from original id " << saved_id << ") "
- << (inst ? inst->PrettyPrint() : std::string());
- return nullptr;
- };
-
- auto& memo_table =
- (follow_image ? mem_obj_decl_image_ : mem_obj_decl_sampler_);
-
- // Use a visited set to defend against bad input which might have long
- // chains or even loops.
- std::unordered_set<uint32_t> visited;
-
- // Trace backward in the SSA data flow until we hit a memory object
- // declaration.
- while (true) {
- auto where = memo_table.find(id);
- if (where != memo_table.end()) {
- return where->second;
- }
- // Protect against loops.
- auto visited_iter = visited.find(id);
- if (visited_iter != visited.end()) {
- // We've hit a loop. Mark all the visited nodes
- // as dead ends.
- for (auto iter : visited) {
- memo_table[iter] = nullptr;
- }
- return nullptr;
- }
- visited.insert(id);
+const spvtools::opt::Instruction* ParserImpl::GetMemoryObjectDeclarationForHandle(
+ uint32_t id,
+ bool follow_image) {
+ auto saved_id = id;
+ auto local_fail = [this, saved_id, id, follow_image]() -> const spvtools::opt::Instruction* {
+ const auto* inst = def_use_mgr_->GetDef(id);
+ Fail() << "Could not find memory object declaration for the "
+ << (follow_image ? "image" : "sampler") << " underlying id " << id
+ << " (from original id " << saved_id << ") "
+ << (inst ? inst->PrettyPrint() : std::string());
+ return nullptr;
+ };
- const auto* inst = def_use_mgr_->GetDef(id);
- if (inst == nullptr) {
- return local_fail();
- }
- switch (inst->opcode()) {
- case SpvOpFunctionParameter:
- case SpvOpVariable:
- // We found the memory object declaration.
- // Remember it as the answer for the whole path.
- for (auto iter : visited) {
- memo_table[iter] = inst;
- }
- return inst;
- case SpvOpLoad:
- // Follow the pointer being loaded
- id = inst->GetSingleWordInOperand(0);
- break;
- case SpvOpCopyObject:
- // Follow the object being copied.
- id = inst->GetSingleWordInOperand(0);
- break;
- case SpvOpAccessChain:
- case SpvOpInBoundsAccessChain:
- case SpvOpPtrAccessChain:
- case SpvOpInBoundsPtrAccessChain:
- // Follow the base pointer.
- id = inst->GetSingleWordInOperand(0);
- break;
- case SpvOpSampledImage:
- // Follow the image or the sampler, depending on the follow_image
- // parameter.
- id = inst->GetSingleWordInOperand(follow_image ? 0 : 1);
- break;
- case SpvOpImage:
- // Follow the sampled image
- id = inst->GetSingleWordInOperand(0);
- break;
- default:
- // Can't trace further.
- // Remember it as the answer for the whole path.
- for (auto iter : visited) {
- memo_table[iter] = nullptr;
+ auto& memo_table = (follow_image ? mem_obj_decl_image_ : mem_obj_decl_sampler_);
+
+ // Use a visited set to defend against bad input which might have long
+ // chains or even loops.
+ std::unordered_set<uint32_t> visited;
+
+ // Trace backward in the SSA data flow until we hit a memory object
+ // declaration.
+ while (true) {
+ auto where = memo_table.find(id);
+ if (where != memo_table.end()) {
+ return where->second;
+ }
+ // Protect against loops.
+ auto visited_iter = visited.find(id);
+ if (visited_iter != visited.end()) {
+ // We've hit a loop. Mark all the visited nodes
+ // as dead ends.
+ for (auto iter : visited) {
+ memo_table[iter] = nullptr;
+ }
+ return nullptr;
+ }
+ visited.insert(id);
+
+ const auto* inst = def_use_mgr_->GetDef(id);
+ if (inst == nullptr) {
+ return local_fail();
+ }
+ switch (inst->opcode()) {
+ case SpvOpFunctionParameter:
+ case SpvOpVariable:
+ // We found the memory object declaration.
+ // Remember it as the answer for the whole path.
+ for (auto iter : visited) {
+ memo_table[iter] = inst;
+ }
+ return inst;
+ case SpvOpLoad:
+ // Follow the pointer being loaded
+ id = inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpCopyObject:
+ // Follow the object being copied.
+ id = inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpPtrAccessChain:
+ case SpvOpInBoundsPtrAccessChain:
+ // Follow the base pointer.
+ id = inst->GetSingleWordInOperand(0);
+ break;
+ case SpvOpSampledImage:
+ // Follow the image or the sampler, depending on the follow_image
+ // parameter.
+ id = inst->GetSingleWordInOperand(follow_image ? 0 : 1);
+ break;
+ case SpvOpImage:
+ // Follow the sampled image
+ id = inst->GetSingleWordInOperand(0);
+ break;
+ default:
+ // Can't trace further.
+ // Remember it as the answer for the whole path.
+ for (auto iter : visited) {
+ memo_table[iter] = nullptr;
+ }
+ return nullptr;
}
- return nullptr;
}
- }
}
-const spvtools::opt::Instruction*
-ParserImpl::GetSpirvTypeForHandleMemoryObjectDeclaration(
- const spvtools::opt::Instruction& var) {
- if (!success()) {
- return nullptr;
- }
- // The WGSL handle type is determined by looking at information from
- // several sources:
- // - the usage of the handle by image access instructions
- // - the SPIR-V type declaration
- // Each source does not have enough information to completely determine
- // the result.
-
- // Messages are phrased in terms of images and samplers because those
- // are the only SPIR-V handles supported by WGSL.
-
- // Get the SPIR-V handle type.
- const auto* ptr_type = def_use_mgr_->GetDef(var.type_id());
- if (!ptr_type || (ptr_type->opcode() != SpvOpTypePointer)) {
- Fail() << "Invalid type for variable or function parameter "
- << var.PrettyPrint();
- return nullptr;
- }
- const auto* raw_handle_type =
- def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
- if (!raw_handle_type) {
- Fail() << "Invalid pointer type for variable or function parameter "
- << var.PrettyPrint();
- return nullptr;
- }
- switch (raw_handle_type->opcode()) {
- case SpvOpTypeSampler:
- case SpvOpTypeImage:
- // The expected cases.
- break;
- case SpvOpTypeArray:
- case SpvOpTypeRuntimeArray:
- Fail()
- << "arrays of textures or samplers are not supported in WGSL; can't "
- "translate variable or function parameter: "
- << var.PrettyPrint();
- return nullptr;
- case SpvOpTypeSampledImage:
- Fail() << "WGSL does not support combined image-samplers: "
- << var.PrettyPrint();
- return nullptr;
- default:
- Fail() << "invalid type for image or sampler variable or function "
- "parameter: "
- << var.PrettyPrint();
- return nullptr;
- }
- return raw_handle_type;
-}
-
-const Pointer* ParserImpl::GetTypeForHandleVar(
+const spvtools::opt::Instruction* ParserImpl::GetSpirvTypeForHandleMemoryObjectDeclaration(
const spvtools::opt::Instruction& var) {
- auto where = handle_type_.find(&var);
- if (where != handle_type_.end()) {
- return where->second;
- }
-
- const spvtools::opt::Instruction* raw_handle_type =
- GetSpirvTypeForHandleMemoryObjectDeclaration(var);
- if (!raw_handle_type) {
- return nullptr;
- }
-
- // The variable could be a sampler or image.
- // Where possible, determine which one it is from the usage inferred
- // for the variable.
- Usage usage = handle_usage_[&var];
- if (!usage.IsValid()) {
- Fail() << "Invalid sampler or texture usage for variable "
- << var.PrettyPrint() << "\n"
- << usage;
- return nullptr;
- }
- // Infer a handle type, if usage didn't already tell us.
- if (!usage.IsComplete()) {
- // In SPIR-V you could statically reference a texture or sampler without
- // using it in a way that gives us a clue on how to declare it. Look inside
- // the store type to infer a usage.
- if (raw_handle_type->opcode() == SpvOpTypeSampler) {
- usage.AddSampler();
- } else {
- // It's a texture.
- if (raw_handle_type->NumInOperands() != 7) {
- Fail() << "invalid SPIR-V image type: expected 7 operands: "
- << raw_handle_type->PrettyPrint();
+ if (!success()) {
return nullptr;
- }
- const auto sampled_param = raw_handle_type->GetSingleWordInOperand(5);
- const auto format_param = raw_handle_type->GetSingleWordInOperand(6);
- // Only storage images have a format.
- if ((format_param != SpvImageFormatUnknown) ||
- sampled_param == 2 /* without sampler */) {
- // Get NonWritable and NonReadable attributes of the variable.
- bool is_nonwritable = false;
- bool is_nonreadable = false;
- for (const auto& deco : GetDecorationsFor(var.result_id())) {
- if (deco.size() != 1) {
- continue;
- }
- if (deco[0] == SpvDecorationNonWritable) {
- is_nonwritable = true;
- }
- if (deco[0] == SpvDecorationNonReadable) {
- is_nonreadable = true;
- }
- }
- if (is_nonwritable && is_nonreadable) {
- Fail() << "storage image variable is both NonWritable and NonReadable"
- << var.PrettyPrint();
- }
- if (!is_nonwritable && !is_nonreadable) {
- Fail()
- << "storage image variable is neither NonWritable nor NonReadable"
- << var.PrettyPrint();
- }
- // Let's make it one of the storage textures.
- if (is_nonwritable) {
- usage.AddStorageReadTexture();
- } else {
- usage.AddStorageWriteTexture();
- }
- } else {
- usage.AddSampledTexture();
- }
}
- if (!usage.IsComplete()) {
- Fail()
- << "internal error: should have inferred a complete handle type. got "
- << usage.to_str();
- return nullptr;
- }
- }
-
- // Construct the Tint handle type.
- const Type* ast_store_type = nullptr;
- if (usage.IsSampler()) {
- ast_store_type = ty_.Sampler(usage.IsComparisonSampler()
- ? ast::SamplerKind::kComparisonSampler
- : ast::SamplerKind::kSampler);
- } else if (usage.IsTexture()) {
- const spvtools::opt::analysis::Image* image_type =
- type_mgr_->GetType(raw_handle_type->result_id())->AsImage();
- if (!image_type) {
- Fail() << "internal error: Couldn't look up image type"
- << raw_handle_type->PrettyPrint();
- return nullptr;
- }
-
- if (image_type->is_arrayed()) {
- // Give a nicer error message here, where we have the offending variable
- // in hand, rather than inside the enum converter.
- switch (image_type->dim()) {
- case SpvDim2D:
- case SpvDimCube:
- break;
+ // The WGSL handle type is determined by looking at information from
+ // several sources:
+ // - the usage of the handle by image access instructions
+ // - the SPIR-V type declaration
+ // Each source does not have enough information to completely determine
+ // the result.
+
+ // Messages are phrased in terms of images and samplers because those
+ // are the only SPIR-V handles supported by WGSL.
+
+ // Get the SPIR-V handle type.
+ const auto* ptr_type = def_use_mgr_->GetDef(var.type_id());
+ if (!ptr_type || (ptr_type->opcode() != SpvOpTypePointer)) {
+ Fail() << "Invalid type for variable or function parameter " << var.PrettyPrint();
+ return nullptr;
+ }
+ const auto* raw_handle_type = def_use_mgr_->GetDef(ptr_type->GetSingleWordInOperand(1));
+ if (!raw_handle_type) {
+ Fail() << "Invalid pointer type for variable or function parameter " << var.PrettyPrint();
+ return nullptr;
+ }
+ switch (raw_handle_type->opcode()) {
+ case SpvOpTypeSampler:
+ case SpvOpTypeImage:
+ // The expected cases.
+ break;
+ case SpvOpTypeArray:
+ case SpvOpTypeRuntimeArray:
+ Fail() << "arrays of textures or samplers are not supported in WGSL; can't "
+ "translate variable or function parameter: "
+ << var.PrettyPrint();
+ return nullptr;
+ case SpvOpTypeSampledImage:
+ Fail() << "WGSL does not support combined image-samplers: " << var.PrettyPrint();
+ return nullptr;
default:
- Fail() << "WGSL arrayed textures must be 2d_array or cube_array: "
- "invalid multisampled texture variable "
- << namer_.Name(var.result_id()) << ": " << var.PrettyPrint();
- return nullptr;
- }
- }
-
- const ast::TextureDimension dim =
- enum_converter_.ToDim(image_type->dim(), image_type->is_arrayed());
- if (dim == ast::TextureDimension::kNone) {
- return nullptr;
- }
-
- // WGSL textures are always formatted. Unformatted textures are always
- // sampled.
- if (usage.IsSampledTexture() || usage.IsStorageReadTexture() ||
- (image_type->format() == SpvImageFormatUnknown)) {
- // Make a sampled texture type.
- auto* ast_sampled_component_type =
- ConvertType(raw_handle_type->GetSingleWordInOperand(0));
-
- // Vulkan ignores the depth parameter on OpImage, so pay attention to the
- // usage as well. That is, it's valid for a Vulkan shader to use an
- // OpImage variable with an OpImage*Dref* instruction. In WGSL we must
- // treat that as a depth texture.
- if (image_type->depth() || usage.IsDepthTexture()) {
- if (image_type->is_multisampled()) {
- ast_store_type = ty_.DepthMultisampledTexture(dim);
+ Fail() << "invalid type for image or sampler variable or function "
+ "parameter: "
+ << var.PrettyPrint();
+ return nullptr;
+ }
+ return raw_handle_type;
+}
+
+const Pointer* ParserImpl::GetTypeForHandleVar(const spvtools::opt::Instruction& var) {
+ auto where = handle_type_.find(&var);
+ if (where != handle_type_.end()) {
+ return where->second;
+ }
+
+ const spvtools::opt::Instruction* raw_handle_type =
+ GetSpirvTypeForHandleMemoryObjectDeclaration(var);
+ if (!raw_handle_type) {
+ return nullptr;
+ }
+
+ // The variable could be a sampler or image.
+ // Where possible, determine which one it is from the usage inferred
+ // for the variable.
+ Usage usage = handle_usage_[&var];
+ if (!usage.IsValid()) {
+ Fail() << "Invalid sampler or texture usage for variable " << var.PrettyPrint() << "\n"
+ << usage;
+ return nullptr;
+ }
+ // Infer a handle type, if usage didn't already tell us.
+ if (!usage.IsComplete()) {
+ // In SPIR-V you could statically reference a texture or sampler without
+ // using it in a way that gives us a clue on how to declare it. Look inside
+ // the store type to infer a usage.
+ if (raw_handle_type->opcode() == SpvOpTypeSampler) {
+ usage.AddSampler();
} else {
- ast_store_type = ty_.DepthTexture(dim);
+ // It's a texture.
+ if (raw_handle_type->NumInOperands() != 7) {
+ Fail() << "invalid SPIR-V image type: expected 7 operands: "
+ << raw_handle_type->PrettyPrint();
+ return nullptr;
+ }
+ const auto sampled_param = raw_handle_type->GetSingleWordInOperand(5);
+ const auto format_param = raw_handle_type->GetSingleWordInOperand(6);
+ // Only storage images have a format.
+ if ((format_param != SpvImageFormatUnknown) ||
+ sampled_param == 2 /* without sampler */) {
+ // Get NonWritable and NonReadable attributes of the variable.
+ bool is_nonwritable = false;
+ bool is_nonreadable = false;
+ for (const auto& deco : GetDecorationsFor(var.result_id())) {
+ if (deco.size() != 1) {
+ continue;
+ }
+ if (deco[0] == SpvDecorationNonWritable) {
+ is_nonwritable = true;
+ }
+ if (deco[0] == SpvDecorationNonReadable) {
+ is_nonreadable = true;
+ }
+ }
+ if (is_nonwritable && is_nonreadable) {
+ Fail() << "storage image variable is both NonWritable and NonReadable"
+ << var.PrettyPrint();
+ }
+ if (!is_nonwritable && !is_nonreadable) {
+ Fail() << "storage image variable is neither NonWritable nor NonReadable"
+ << var.PrettyPrint();
+ }
+ // Let's make it one of the storage textures.
+ if (is_nonwritable) {
+ usage.AddStorageReadTexture();
+ } else {
+ usage.AddStorageWriteTexture();
+ }
+ } else {
+ usage.AddSampledTexture();
+ }
}
- } else if (image_type->is_multisampled()) {
- if (dim != ast::TextureDimension::k2d) {
- Fail() << "WGSL multisampled textures must be 2d and non-arrayed: "
- "invalid multisampled texture variable "
- << namer_.Name(var.result_id()) << ": " << var.PrettyPrint();
+ if (!usage.IsComplete()) {
+ Fail() << "internal error: should have inferred a complete handle type. got "
+ << usage.to_str();
+ return nullptr;
}
- // Multisampled textures are never depth textures.
+ }
+
+ // Construct the Tint handle type.
+ const Type* ast_store_type = nullptr;
+ if (usage.IsSampler()) {
ast_store_type =
- ty_.MultisampledTexture(dim, ast_sampled_component_type);
- } else {
- ast_store_type = ty_.SampledTexture(dim, ast_sampled_component_type);
- }
+ ty_.Sampler(usage.IsComparisonSampler() ? ast::SamplerKind::kComparisonSampler
+ : ast::SamplerKind::kSampler);
+ } else if (usage.IsTexture()) {
+ const spvtools::opt::analysis::Image* image_type =
+ type_mgr_->GetType(raw_handle_type->result_id())->AsImage();
+ if (!image_type) {
+ Fail() << "internal error: Couldn't look up image type"
+ << raw_handle_type->PrettyPrint();
+ return nullptr;
+ }
+
+ if (image_type->is_arrayed()) {
+ // Give a nicer error message here, where we have the offending variable
+ // in hand, rather than inside the enum converter.
+ switch (image_type->dim()) {
+ case SpvDim2D:
+ case SpvDimCube:
+ break;
+ default:
+ Fail() << "WGSL arrayed textures must be 2d_array or cube_array: "
+ "invalid multisampled texture variable "
+ << namer_.Name(var.result_id()) << ": " << var.PrettyPrint();
+ return nullptr;
+ }
+ }
+
+ const ast::TextureDimension dim =
+ enum_converter_.ToDim(image_type->dim(), image_type->is_arrayed());
+ if (dim == ast::TextureDimension::kNone) {
+ return nullptr;
+ }
+
+ // WGSL textures are always formatted. Unformatted textures are always
+ // sampled.
+ if (usage.IsSampledTexture() || usage.IsStorageReadTexture() ||
+ (image_type->format() == SpvImageFormatUnknown)) {
+ // Make a sampled texture type.
+ auto* ast_sampled_component_type =
+ ConvertType(raw_handle_type->GetSingleWordInOperand(0));
+
+ // Vulkan ignores the depth parameter on OpImage, so pay attention to the
+ // usage as well. That is, it's valid for a Vulkan shader to use an
+ // OpImage variable with an OpImage*Dref* instruction. In WGSL we must
+ // treat that as a depth texture.
+ if (image_type->depth() || usage.IsDepthTexture()) {
+ if (image_type->is_multisampled()) {
+ ast_store_type = ty_.DepthMultisampledTexture(dim);
+ } else {
+ ast_store_type = ty_.DepthTexture(dim);
+ }
+ } else if (image_type->is_multisampled()) {
+ if (dim != ast::TextureDimension::k2d) {
+ Fail() << "WGSL multisampled textures must be 2d and non-arrayed: "
+ "invalid multisampled texture variable "
+ << namer_.Name(var.result_id()) << ": " << var.PrettyPrint();
+ }
+ // Multisampled textures are never depth textures.
+ ast_store_type = ty_.MultisampledTexture(dim, ast_sampled_component_type);
+ } else {
+ ast_store_type = ty_.SampledTexture(dim, ast_sampled_component_type);
+ }
+ } else {
+ const auto access = ast::Access::kWrite;
+ const auto format = enum_converter_.ToTexelFormat(image_type->format());
+ if (format == ast::TexelFormat::kNone) {
+ return nullptr;
+ }
+ ast_store_type = ty_.StorageTexture(dim, format, access);
+ }
} else {
- const auto access = ast::Access::kWrite;
- const auto format = enum_converter_.ToTexelFormat(image_type->format());
- if (format == ast::TexelFormat::kNone) {
+ Fail() << "unsupported: UniformConstant variable is not a recognized "
+ "sampler or texture"
+ << var.PrettyPrint();
return nullptr;
- }
- ast_store_type = ty_.StorageTexture(dim, format, access);
}
- } else {
- Fail() << "unsupported: UniformConstant variable is not a recognized "
- "sampler or texture"
- << var.PrettyPrint();
- return nullptr;
- }
- // Form the pointer type.
- auto* result =
- ty_.Pointer(ast_store_type, ast::StorageClass::kUniformConstant);
- // Remember it for later.
- handle_type_[&var] = result;
- return result;
+ // Form the pointer type.
+ auto* result = ty_.Pointer(ast_store_type, ast::StorageClass::kHandle);
+ // Remember it for later.
+ handle_type_[&var] = result;
+ return result;
}
const Type* ParserImpl::GetComponentTypeForFormat(ast::TexelFormat format) {
- switch (format) {
- case ast::TexelFormat::kR32Uint:
- case ast::TexelFormat::kRgba8Uint:
- case ast::TexelFormat::kRg32Uint:
- case ast::TexelFormat::kRgba16Uint:
- case ast::TexelFormat::kRgba32Uint:
- return ty_.U32();
-
- case ast::TexelFormat::kR32Sint:
- case ast::TexelFormat::kRgba8Sint:
- case ast::TexelFormat::kRg32Sint:
- case ast::TexelFormat::kRgba16Sint:
- case ast::TexelFormat::kRgba32Sint:
- return ty_.I32();
-
- case ast::TexelFormat::kRgba8Unorm:
- case ast::TexelFormat::kRgba8Snorm:
- case ast::TexelFormat::kR32Float:
- case ast::TexelFormat::kRg32Float:
- case ast::TexelFormat::kRgba16Float:
- case ast::TexelFormat::kRgba32Float:
- return ty_.F32();
- default:
- break;
- }
- Fail() << "unknown format " << int(format);
- return nullptr;
+ switch (format) {
+ case ast::TexelFormat::kR32Uint:
+ case ast::TexelFormat::kRgba8Uint:
+ case ast::TexelFormat::kRg32Uint:
+ case ast::TexelFormat::kRgba16Uint:
+ case ast::TexelFormat::kRgba32Uint:
+ return ty_.U32();
+
+ case ast::TexelFormat::kR32Sint:
+ case ast::TexelFormat::kRgba8Sint:
+ case ast::TexelFormat::kRg32Sint:
+ case ast::TexelFormat::kRgba16Sint:
+ case ast::TexelFormat::kRgba32Sint:
+ return ty_.I32();
+
+ case ast::TexelFormat::kRgba8Unorm:
+ case ast::TexelFormat::kRgba8Snorm:
+ case ast::TexelFormat::kR32Float:
+ case ast::TexelFormat::kRg32Float:
+ case ast::TexelFormat::kRgba16Float:
+ case ast::TexelFormat::kRgba32Float:
+ return ty_.F32();
+ default:
+ break;
+ }
+ Fail() << "unknown format " << int(format);
+ return nullptr;
}
unsigned ParserImpl::GetChannelCountForFormat(ast::TexelFormat format) {
- switch (format) {
- case ast::TexelFormat::kR32Float:
- case ast::TexelFormat::kR32Sint:
- case ast::TexelFormat::kR32Uint:
- // One channel
- return 1;
-
- case ast::TexelFormat::kRg32Float:
- case ast::TexelFormat::kRg32Sint:
- case ast::TexelFormat::kRg32Uint:
- // Two channels
- return 2;
-
- case ast::TexelFormat::kRgba16Float:
- case ast::TexelFormat::kRgba16Sint:
- case ast::TexelFormat::kRgba16Uint:
- case ast::TexelFormat::kRgba32Float:
- case ast::TexelFormat::kRgba32Sint:
- case ast::TexelFormat::kRgba32Uint:
- case ast::TexelFormat::kRgba8Sint:
- case ast::TexelFormat::kRgba8Snorm:
- case ast::TexelFormat::kRgba8Uint:
- case ast::TexelFormat::kRgba8Unorm:
- // Four channels
- return 4;
-
- default:
- break;
- }
- Fail() << "unknown format " << int(format);
- return 0;
+ switch (format) {
+ case ast::TexelFormat::kR32Float:
+ case ast::TexelFormat::kR32Sint:
+ case ast::TexelFormat::kR32Uint:
+ // One channel
+ return 1;
+
+ case ast::TexelFormat::kRg32Float:
+ case ast::TexelFormat::kRg32Sint:
+ case ast::TexelFormat::kRg32Uint:
+ // Two channels
+ return 2;
+
+ case ast::TexelFormat::kRgba16Float:
+ case ast::TexelFormat::kRgba16Sint:
+ case ast::TexelFormat::kRgba16Uint:
+ case ast::TexelFormat::kRgba32Float:
+ case ast::TexelFormat::kRgba32Sint:
+ case ast::TexelFormat::kRgba32Uint:
+ case ast::TexelFormat::kRgba8Sint:
+ case ast::TexelFormat::kRgba8Snorm:
+ case ast::TexelFormat::kRgba8Uint:
+ case ast::TexelFormat::kRgba8Unorm:
+ // Four channels
+ return 4;
+
+ default:
+ break;
+ }
+ Fail() << "unknown format " << int(format);
+ return 0;
}
const Type* ParserImpl::GetTexelTypeForFormat(ast::TexelFormat format) {
- const auto* component_type = GetComponentTypeForFormat(format);
- if (!component_type) {
- return nullptr;
- }
- return ty_.Vector(component_type, 4);
+ const auto* component_type = GetComponentTypeForFormat(format);
+ if (!component_type) {
+ return nullptr;
+ }
+ return ty_.Vector(component_type, 4);
}
bool ParserImpl::RegisterHandleUsage() {
- if (!success_) {
- return false;
- }
-
- // Map a function ID to the list of its function parameter instructions, in
- // order.
- std::unordered_map<uint32_t, std::vector<const spvtools::opt::Instruction*>>
- function_params;
- for (const auto* f : topologically_ordered_functions_) {
- // Record the instructions defining this function's parameters.
- auto& params = function_params[f->result_id()];
- f->ForEachParam([&params](const spvtools::opt::Instruction* param) {
- params.push_back(param);
- });
- }
-
- // Returns the memory object declaration for an image underlying the first
- // operand of the given image instruction.
- auto get_image = [this](const spvtools::opt::Instruction& image_inst) {
- return this->GetMemoryObjectDeclarationForHandle(
- image_inst.GetSingleWordInOperand(0), true);
- };
- // Returns the memory object declaration for a sampler underlying the first
- // operand of the given image instruction.
- auto get_sampler = [this](const spvtools::opt::Instruction& image_inst) {
- return this->GetMemoryObjectDeclarationForHandle(
- image_inst.GetSingleWordInOperand(0), false);
- };
-
- // Scan the bodies of functions for image operations, recording their implied
- // usage properties on the memory object declarations (i.e. variables or
- // function parameters). We scan the functions in an order so that callees
- // precede callers. That way the usage on a function parameter is already
- // computed before we see the call to that function. So when we reach
- // a function call, we can add the usage from the callee formal parameters.
- for (const auto* f : topologically_ordered_functions_) {
- for (const auto& bb : *f) {
- for (const auto& inst : bb) {
- switch (inst.opcode()) {
- // Single texel reads and writes
-
- case SpvOpImageRead:
- handle_usage_[get_image(inst)].AddStorageReadTexture();
- break;
- case SpvOpImageWrite:
- handle_usage_[get_image(inst)].AddStorageWriteTexture();
- break;
- case SpvOpImageFetch:
- handle_usage_[get_image(inst)].AddSampledTexture();
- break;
-
- // Sampling and gathering from a sampled image.
-
- case SpvOpImageSampleImplicitLod:
- case SpvOpImageSampleExplicitLod:
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- case SpvOpImageGather:
- handle_usage_[get_image(inst)].AddSampledTexture();
- handle_usage_[get_sampler(inst)].AddSampler();
- break;
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- case SpvOpImageDrefGather:
- // Depth reference access implies usage as a depth texture, which
- // in turn is a sampled texture.
- handle_usage_[get_image(inst)].AddDepthTexture();
- handle_usage_[get_sampler(inst)].AddComparisonSampler();
- break;
-
- // Image queries
-
- case SpvOpImageQuerySizeLod:
- // Vulkan requires Sampled=1 for this. SPIR-V already requires MS=0.
- handle_usage_[get_image(inst)].AddSampledTexture();
- break;
- case SpvOpImageQuerySize:
- // Applies to either MS=1 or Sampled=0 or 2.
- // So we can't force it to be multisampled, or storage image.
- break;
- case SpvOpImageQueryLod:
- handle_usage_[get_image(inst)].AddSampledTexture();
- handle_usage_[get_sampler(inst)].AddSampler();
- break;
- case SpvOpImageQueryLevels:
- // We can't tell anything more than that it's an image.
- handle_usage_[get_image(inst)].AddTexture();
- break;
- case SpvOpImageQuerySamples:
- handle_usage_[get_image(inst)].AddMultisampledTexture();
- break;
+ if (!success_) {
+ return false;
+ }
- // Function calls
-
- case SpvOpFunctionCall: {
- // Propagate handle usages from callee function formal parameters to
- // the matching caller parameters. This is where we rely on the
- // fact that callees have been processed earlier in the flow.
- const auto num_in_operands = inst.NumInOperands();
- // The first operand of the call is the function ID.
- // The remaining operands are the operands to the function.
- if (num_in_operands < 1) {
- return Fail() << "Call instruction must have at least one operand"
- << inst.PrettyPrint();
- }
- const auto function_id = inst.GetSingleWordInOperand(0);
- const auto& formal_params = function_params[function_id];
- if (formal_params.size() != (num_in_operands - 1)) {
- return Fail() << "Called function has " << formal_params.size()
- << " parameters, but function call has "
- << (num_in_operands - 1) << " parameters"
- << inst.PrettyPrint();
- }
- for (uint32_t i = 1; i < num_in_operands; ++i) {
- auto where = handle_usage_.find(formal_params[i - 1]);
- if (where == handle_usage_.end()) {
- // We haven't recorded any handle usage on the formal parameter.
- continue;
- }
- const Usage& formal_param_usage = where->second;
- const auto operand_id = inst.GetSingleWordInOperand(i);
- const auto* operand_as_sampler =
- GetMemoryObjectDeclarationForHandle(operand_id, false);
- const auto* operand_as_image =
- GetMemoryObjectDeclarationForHandle(operand_id, true);
- if (operand_as_sampler) {
- handle_usage_[operand_as_sampler].Add(formal_param_usage);
- }
- if (operand_as_image &&
- (operand_as_image != operand_as_sampler)) {
- handle_usage_[operand_as_image].Add(formal_param_usage);
- }
+ // Map a function ID to the list of its function parameter instructions, in
+ // order.
+ std::unordered_map<uint32_t, std::vector<const spvtools::opt::Instruction*>> function_params;
+ for (const auto* f : topologically_ordered_functions_) {
+ // Record the instructions defining this function's parameters.
+ auto& params = function_params[f->result_id()];
+ f->ForEachParam(
+ [&params](const spvtools::opt::Instruction* param) { params.push_back(param); });
+ }
+
+ // Returns the memory object declaration for an image underlying the first
+ // operand of the given image instruction.
+ auto get_image = [this](const spvtools::opt::Instruction& image_inst) {
+ return this->GetMemoryObjectDeclarationForHandle(image_inst.GetSingleWordInOperand(0),
+ true);
+ };
+ // Returns the memory object declaration for a sampler underlying the first
+ // operand of the given image instruction.
+ auto get_sampler = [this](const spvtools::opt::Instruction& image_inst) {
+ return this->GetMemoryObjectDeclarationForHandle(image_inst.GetSingleWordInOperand(0),
+ false);
+ };
+
+ // Scan the bodies of functions for image operations, recording their implied
+ // usage properties on the memory object declarations (i.e. variables or
+ // function parameters). We scan the functions in an order so that callees
+ // precede callers. That way the usage on a function parameter is already
+ // computed before we see the call to that function. So when we reach
+ // a function call, we can add the usage from the callee formal parameters.
+ for (const auto* f : topologically_ordered_functions_) {
+ for (const auto& bb : *f) {
+ for (const auto& inst : bb) {
+ switch (inst.opcode()) {
+ // Single texel reads and writes
+
+ case SpvOpImageRead:
+ handle_usage_[get_image(inst)].AddStorageReadTexture();
+ break;
+ case SpvOpImageWrite:
+ handle_usage_[get_image(inst)].AddStorageWriteTexture();
+ break;
+ case SpvOpImageFetch:
+ handle_usage_[get_image(inst)].AddSampledTexture();
+ break;
+
+ // Sampling and gathering from a sampled image.
+
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageGather:
+ handle_usage_[get_image(inst)].AddSampledTexture();
+ handle_usage_[get_sampler(inst)].AddSampler();
+ break;
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageDrefGather:
+ // Depth reference access implies usage as a depth texture, which
+ // in turn is a sampled texture.
+ handle_usage_[get_image(inst)].AddDepthTexture();
+ handle_usage_[get_sampler(inst)].AddComparisonSampler();
+ break;
+
+ // Image queries
+
+ case SpvOpImageQuerySizeLod:
+ // Vulkan requires Sampled=1 for this. SPIR-V already requires MS=0.
+ handle_usage_[get_image(inst)].AddSampledTexture();
+ break;
+ case SpvOpImageQuerySize:
+ // Applies to either MS=1 or Sampled=0 or 2.
+ // So we can't force it to be multisampled, or storage image.
+ break;
+ case SpvOpImageQueryLod:
+ handle_usage_[get_image(inst)].AddSampledTexture();
+ handle_usage_[get_sampler(inst)].AddSampler();
+ break;
+ case SpvOpImageQueryLevels:
+ // We can't tell anything more than that it's an image.
+ handle_usage_[get_image(inst)].AddTexture();
+ break;
+ case SpvOpImageQuerySamples:
+ handle_usage_[get_image(inst)].AddMultisampledTexture();
+ break;
+
+ // Function calls
+
+ case SpvOpFunctionCall: {
+ // Propagate handle usages from callee function formal parameters to
+ // the matching caller parameters. This is where we rely on the
+ // fact that callees have been processed earlier in the flow.
+ const auto num_in_operands = inst.NumInOperands();
+ // The first operand of the call is the function ID.
+ // The remaining operands are the operands to the function.
+ if (num_in_operands < 1) {
+ return Fail() << "Call instruction must have at least one operand"
+ << inst.PrettyPrint();
+ }
+ const auto function_id = inst.GetSingleWordInOperand(0);
+ const auto& formal_params = function_params[function_id];
+ if (formal_params.size() != (num_in_operands - 1)) {
+ return Fail()
+ << "Called function has " << formal_params.size()
+ << " parameters, but function call has " << (num_in_operands - 1)
+ << " parameters" << inst.PrettyPrint();
+ }
+ for (uint32_t i = 1; i < num_in_operands; ++i) {
+ auto where = handle_usage_.find(formal_params[i - 1]);
+ if (where == handle_usage_.end()) {
+ // We haven't recorded any handle usage on the formal parameter.
+ continue;
+ }
+ const Usage& formal_param_usage = where->second;
+ const auto operand_id = inst.GetSingleWordInOperand(i);
+ const auto* operand_as_sampler =
+ GetMemoryObjectDeclarationForHandle(operand_id, false);
+ const auto* operand_as_image =
+ GetMemoryObjectDeclarationForHandle(operand_id, true);
+ if (operand_as_sampler) {
+ handle_usage_[operand_as_sampler].Add(formal_param_usage);
+ }
+ if (operand_as_image && (operand_as_image != operand_as_sampler)) {
+ handle_usage_[operand_as_image].Add(formal_param_usage);
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
}
- break;
- }
-
- default:
- break;
}
- }
}
- }
- return success_;
+ return success_;
}
Usage ParserImpl::GetHandleUsage(uint32_t id) const {
- const auto where = handle_usage_.find(def_use_mgr_->GetDef(id));
- if (where != handle_usage_.end()) {
- return where->second;
- }
- return Usage();
-}
-
-const spvtools::opt::Instruction* ParserImpl::GetInstructionForTest(
- uint32_t id) const {
- return def_use_mgr_ ? def_use_mgr_->GetDef(id) : nullptr;
-}
-
-std::string ParserImpl::GetMemberName(const Struct& struct_type,
- int member_index) {
- auto where = struct_id_for_symbol_.find(struct_type.name);
- if (where == struct_id_for_symbol_.end()) {
- Fail() << "no structure type registered for symbol";
- return "";
- }
- return namer_.GetMemberName(where->second, member_index);
+ const auto where = handle_usage_.find(def_use_mgr_->GetDef(id));
+ if (where != handle_usage_.end()) {
+ return where->second;
+ }
+ return Usage();
+}
+
+const spvtools::opt::Instruction* ParserImpl::GetInstructionForTest(uint32_t id) const {
+ return def_use_mgr_ ? def_use_mgr_->GetDef(id) : nullptr;
+}
+
+std::string ParserImpl::GetMemberName(const Struct& struct_type, int member_index) {
+ auto where = struct_id_for_symbol_.find(struct_type.name);
+ if (where == struct_id_for_symbol_.end()) {
+ Fail() << "no structure type registered for symbol";
+ return "";
+ }
+ return namer_.GetMemberName(where->second, member_index);
}
WorkgroupSizeInfo::WorkgroupSizeInfo() = default;
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.h b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.h
index 96fe99c60d7..b91f1924a59 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl.h
@@ -64,821 +64,798 @@ using DecorationList = std::vector<Decoration>;
/// An AST expression with its type.
struct TypedExpression {
- /// Constructor
- TypedExpression();
+ /// Constructor
+ TypedExpression();
- /// Copy constructor
- TypedExpression(const TypedExpression&);
+ /// Copy constructor
+ TypedExpression(const TypedExpression&);
- /// Constructor
- /// @param type_in the type of the expression
- /// @param expr_in the expression
- TypedExpression(const Type* type_in, const ast::Expression* expr_in);
+ /// Constructor
+ /// @param type_in the type of the expression
+ /// @param expr_in the expression
+ TypedExpression(const Type* type_in, const ast::Expression* expr_in);
- /// Assignment operator
- /// @returns this TypedExpression
- TypedExpression& operator=(const TypedExpression&);
+ /// Assignment operator
+ /// @returns this TypedExpression
+ TypedExpression& operator=(const TypedExpression&);
- /// @returns true if both type and expr are not nullptr
- operator bool() const { return type && expr; }
+ /// @returns true if both type and expr are not nullptr
+ operator bool() const { return type && expr; }
- /// The type
- const Type* type = nullptr;
- /// The expression
- const ast::Expression* expr = nullptr;
+ /// The type
+ const Type* type = nullptr;
+ /// The expression
+ const ast::Expression* expr = nullptr;
};
/// Info about the WorkgroupSize builtin.
struct WorkgroupSizeInfo {
- /// Constructor
- WorkgroupSizeInfo();
- /// Destructor
- ~WorkgroupSizeInfo();
- /// The SPIR-V ID of the WorkgroupSize builtin, if any.
- uint32_t id = 0u;
- /// The SPIR-V type ID of the WorkgroupSize builtin, if any.
- uint32_t type_id = 0u;
- /// The SPIR-V type IDs of the x, y, and z components.
- uint32_t component_type_id = 0u;
- /// The SPIR-V IDs of the X, Y, and Z components of the workgroup size
- /// builtin.
- uint32_t x_id = 0u; /// X component ID
- uint32_t y_id = 0u; /// Y component ID
- uint32_t z_id = 0u; /// Z component ID
- /// The effective workgroup size, if this is a compute shader.
- uint32_t x_value = 0u; /// X workgroup size
- uint32_t y_value = 0u; /// Y workgroup size
- uint32_t z_value = 0u; /// Z workgroup size
+ /// Constructor
+ WorkgroupSizeInfo();
+ /// Destructor
+ ~WorkgroupSizeInfo();
+ /// The SPIR-V ID of the WorkgroupSize builtin, if any.
+ uint32_t id = 0u;
+ /// The SPIR-V type ID of the WorkgroupSize builtin, if any.
+ uint32_t type_id = 0u;
+ /// The SPIR-V type IDs of the x, y, and z components.
+ uint32_t component_type_id = 0u;
+ /// The SPIR-V IDs of the X, Y, and Z components of the workgroup size
+ /// builtin.
+ uint32_t x_id = 0u; /// X component ID
+ uint32_t y_id = 0u; /// Y component ID
+ uint32_t z_id = 0u; /// Z component ID
+ /// The effective workgroup size, if this is a compute shader.
+ uint32_t x_value = 0u; /// X workgroup size
+ uint32_t y_value = 0u; /// Y workgroup size
+ uint32_t z_value = 0u; /// Z workgroup size
};
/// Parser implementation for SPIR-V.
class ParserImpl : Reader {
- public:
- /// Creates a new parser
- /// @param input the input data to parse
- explicit ParserImpl(const std::vector<uint32_t>& input);
- /// Destructor
- ~ParserImpl() override;
-
- /// Run the parser
- /// @returns true if the parse was successful, false otherwise.
- bool Parse() override;
-
- /// @returns the program. The program builder in the parser will be reset
- /// after this.
- Program program() override;
-
- /// @returns a reference to the internal builder, without building the
- /// program. To be used only for testing.
- ProgramBuilder& builder() { return builder_; }
-
- /// @returns the type manager
- TypeManager& type_manager() { return ty_; }
-
- /// Logs failure, ands return a failure stream to accumulate diagnostic
- /// messages. By convention, a failure should only be logged along with
- /// a non-empty string diagnostic.
- /// @returns the failure stream
- FailStream& Fail() {
- success_ = false;
- return fail_stream_;
- }
-
- /// @return true if failure has not yet occurred
- bool success() const { return success_; }
-
- /// @returns the accumulated error string
- const std::string error() { return errors_.str(); }
-
- /// Builds an internal representation of the SPIR-V binary,
- /// and parses it into a Tint AST module. Diagnostics are emitted
- /// to the error stream.
- /// @returns true if it was successful.
- bool BuildAndParseInternalModule() {
- return BuildInternalModule() && ParseInternalModule();
- }
- /// Builds an internal representation of the SPIR-V binary,
- /// and parses the module, except functions, into a Tint AST module.
- /// Diagnostics are emitted to the error stream.
- /// @returns true if it was successful.
- bool BuildAndParseInternalModuleExceptFunctions() {
- return BuildInternalModule() && ParseInternalModuleExceptFunctions();
- }
-
- /// @returns the set of SPIR-V IDs for imports of the "GLSL.std.450"
- /// extended instruction set.
- const std::unordered_set<uint32_t>& glsl_std_450_imports() const {
- return glsl_std_450_imports_;
- }
-
- /// Desired handling of SPIR-V pointers by ConvertType()
- enum class PtrAs {
- // SPIR-V pointer is converted to a spirv::Pointer
- Ptr,
- // SPIR-V pointer is converted to a spirv::Reference
- Ref
- };
-
- /// Converts a SPIR-V type to a Tint type, and saves it for fast lookup.
- /// If the type is only used for builtins, then register that specially,
- /// and return null. If the type is a sampler, image, or sampled image, then
- /// return the Void type, because those opaque types are handled in a
- /// different way.
- /// On failure, logs an error and returns null. This should only be called
- /// after the internal representation of the module has been built.
- /// @param type_id the SPIR-V ID of a type.
- /// @param ptr_as if the SPIR-V type is a pointer and ptr_as is equal to
- /// PtrAs::Ref then a Reference will be returned, otherwise a Pointer will be
- /// returned for a SPIR-V pointer
- /// @returns a Tint type, or nullptr
- const Type* ConvertType(uint32_t type_id, PtrAs ptr_as = PtrAs::Ptr);
-
- /// Emits an alias type declaration for array or runtime-sized array type,
- /// when needed to distinguish between differently-decorated underlying types.
- /// Updates the mapping of the SPIR-V type ID to the alias type.
- /// This is a no-op if the parser has already failed.
- /// @param type_id the SPIR-V ID for the type
- /// @param type the type that might get an alias
- /// @param ast_type the ast type that might get an alias
- /// @returns an alias type or `ast_type` if no alias was created
- const Type* MaybeGenerateAlias(uint32_t type_id,
- const spvtools::opt::analysis::Type* type,
- const Type* ast_type);
-
- /// Adds `decl` as a declared type if it hasn't been added yet.
- /// @param name the type's unique name
- /// @param decl the type declaration to add
- void AddTypeDecl(Symbol name, const ast::TypeDecl* decl);
-
- /// @returns the fail stream object
- FailStream& fail_stream() { return fail_stream_; }
- /// @returns the namer object
- Namer& namer() { return namer_; }
- /// @returns a borrowed pointer to the internal representation of the module.
- /// This is null until BuildInternalModule has been called.
- spvtools::opt::IRContext* ir_context() { return ir_context_.get(); }
-
- /// Gets the list of unique decorations for a SPIR-V result ID. Returns an
- /// empty vector if the ID is not a result ID, or if no decorations target
- /// that ID. The internal representation must have already been built.
- /// Ignores decorations that have no effect in graphics APIs, e.g. Restrict
- /// and RestrictPointer.
- /// @param id SPIR-V ID
- /// @returns the list of decorations on the given ID
- DecorationList GetDecorationsFor(uint32_t id) const;
- /// Gets the list of unique decorations for the member of a struct. Returns
- /// an empty list if the `id` is not the ID of a struct, or if the member
- /// index is out of range, or if the target member has no decorations. The
- /// internal representation must have already been built.
- /// Ignores decorations that have no effect in graphics APIs, e.g. Restrict
- /// and RestrictPointer.
- /// @param id SPIR-V ID of a struct
- /// @param member_index the member within the struct
- /// @returns the list of decorations on the member
- DecorationList GetDecorationsForMember(uint32_t id,
- uint32_t member_index) const;
-
- /// Converts SPIR-V decorations for the variable with the given ID.
- /// Registers the IDs of variables that require special handling by code
- /// generation. If the WGSL type differs from the store type for SPIR-V,
- /// then the `type` parameter is updated. Returns false on failure (with
- /// a diagnostic), or when the variable should not be emitted, e.g. for a
- /// PointSize builtin.
- /// @param id the ID of the SPIR-V variable
- /// @param store_type the WGSL store type for the variable, which should be
- /// prepopulatd
- /// @param attributes the attribute list to populate
- /// @param transfer_pipeline_io true if pipeline IO decorations (builtins,
- /// or locations) will update the store type and the decorations list
- /// @returns false when the variable should not be emitted as a variable
- bool ConvertDecorationsForVariable(uint32_t id,
- const Type** store_type,
- ast::AttributeList* attributes,
- bool transfer_pipeline_io);
-
- /// Converts SPIR-V decorations for pipeline IO into AST decorations.
- /// @param store_type the store type for the variable or member
- /// @param decorations the SPIR-V interpolation decorations
- /// @param attributes the attribute list to populate.
- /// @returns false if conversion fails
- bool ConvertPipelineDecorations(const Type* store_type,
- const DecorationList& decorations,
- ast::AttributeList* attributes);
-
- /// Updates the attribute list, placing a non-null location decoration into
- /// the list, replacing an existing one if it exists. Does nothing if the
- /// replacement is nullptr.
- /// Assumes the list contains at most one Location decoration.
- /// @param decos the attribute list to modify
- /// @param replacement the location decoration to place into the list
- /// @returns the location decoration that was replaced, if one was replaced,
- /// or null otherwise.
- const ast::Attribute* SetLocation(ast::AttributeList* decos,
- const ast::Attribute* replacement);
-
- /// Converts a SPIR-V struct member decoration into a number of AST
- /// decorations. If the decoration is recognized but deliberately dropped,
- /// then returns an empty list without a diagnostic. On failure, emits a
- /// diagnostic and returns an empty list.
- /// @param struct_type_id the ID of the struct type
- /// @param member_index the index of the member
- /// @param member_ty the type of the member
- /// @param decoration an encoded SPIR-V Decoration
- /// @returns the AST decorations
- ast::AttributeList ConvertMemberDecoration(uint32_t struct_type_id,
- uint32_t member_index,
- const Type* member_ty,
- const Decoration& decoration);
-
- /// Returns a string for the given type. If the type ID is invalid,
- /// then the resulting string only names the type ID.
- /// @param type_id the SPIR-V ID for the type
- /// @returns a string description of the type.
- std::string ShowType(uint32_t type_id);
-
- /// Builds the internal representation of the SPIR-V module.
- /// Assumes the module is somewhat well-formed. Normally you
- /// would want to validate the SPIR-V module before attempting
- /// to build this internal representation. Also computes a topological
- /// ordering of the functions.
- /// This is a no-op if the parser has already failed.
- /// @returns true if the parser is still successful.
- bool BuildInternalModule();
-
- /// Walks the internal representation of the module to populate
- /// the AST form of the module.
- /// This is a no-op if the parser has already failed.
- /// @returns true if the parser is still successful.
- bool ParseInternalModule();
-
- /// Records line numbers for each instruction.
- void RegisterLineNumbers();
-
- /// Walks the internal representation of the module, except for function
- /// definitions, to populate the AST form of the module.
- /// This is a no-op if the parser has already failed.
- /// @returns true if the parser is still successful.
- bool ParseInternalModuleExceptFunctions();
-
- /// Destroys the internal representation of the SPIR-V module.
- void ResetInternalModule();
-
- /// Registers extended instruction imports. Only "GLSL.std.450" is supported.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterExtendedInstructionImports();
-
- /// Returns true when the given instruction is an extended instruction
- /// for GLSL.std.450.
- /// @param inst a SPIR-V instruction
- /// @returns true if its an SpvOpExtInst for GLSL.std.450
- bool IsGlslExtendedInstruction(const spvtools::opt::Instruction& inst) const;
-
- /// Returns true when the given instruction is an extended instruction
- /// from an ignored extended instruction set.
- /// @param inst a SPIR-V instruction
- /// @returns true if its an SpvOpExtInst for an ignored extended instruction
- bool IsIgnoredExtendedInstruction(
- const spvtools::opt::Instruction& inst) const;
-
- /// Registers user names for SPIR-V objects, from OpName, and OpMemberName.
- /// Also synthesizes struct field names. Ensures uniqueness for names for
- /// SPIR-V IDs, and uniqueness of names of fields within any single struct.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterUserAndStructMemberNames();
-
- /// Register the WorkgroupSize builtin and its associated constant value.
- /// @returns true if parser is still successful.
- bool RegisterWorkgroupSizeBuiltin();
-
- /// @returns the workgroup size builtin
- const WorkgroupSizeInfo& workgroup_size_builtin() {
- return workgroup_size_builtin_;
- }
-
- /// Register entry point information.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterEntryPoints();
-
- /// Register Tint AST types for SPIR-V types, including type aliases as
- /// needed. This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterTypes();
-
- /// Fail if there are any module-scope pointer values other than those
- /// declared by OpVariable.
- /// @returns true if parser is still successful.
- bool RejectInvalidPointerRoots();
-
- /// Register sampler and texture usage for memory object declarations.
- /// This must be called after we've registered line numbers for all
- /// instructions. This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterHandleUsage();
-
- /// Emit const definitions for scalar specialization constants generated
- /// by one of OpConstantTrue, OpConstantFalse, or OpSpecConstant.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool EmitScalarSpecConstants();
-
- /// Emits module-scope variables.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool EmitModuleScopeVariables();
-
- /// Emits functions, with callees preceding their callers.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool EmitFunctions();
-
- /// Emits a single function, if it has a body.
- /// This is a no-op if the parser has already failed.
- /// @param f the function to emit
- /// @returns true if parser is still successful.
- bool EmitFunction(const spvtools::opt::Function& f);
-
- /// Returns the integer constant for the array size of the given variable.
- /// @param var_id SPIR-V ID for an array variable
- /// @returns the integer constant for its array size, or nullptr.
- const spvtools::opt::analysis::IntConstant* GetArraySize(uint32_t var_id);
-
- /// Returns the member name for the struct member.
- /// @param struct_type the parser's structure type.
- /// @param member_index the member index
- /// @returns the field name
- std::string GetMemberName(const Struct& struct_type, int member_index);
-
- /// Returns the SPIR-V decorations for pipeline IO, if any, on a struct
- /// member.
- /// @param struct_type the parser's structure type.
- /// @param member_index the member index
- /// @returns a list of SPIR-V decorations.
- DecorationList GetMemberPipelineDecorations(const Struct& struct_type,
- int member_index);
-
- /// Creates an AST Variable node for a SPIR-V ID, including any attached
- /// decorations, unless it's an ignorable builtin variable.
- /// @param id the SPIR-V result ID
- /// @param sc the storage class, which cannot be ast::StorageClass::kNone
- /// @param storage_type the storage type of the variable
- /// @param is_const if true, the variable is const
- /// @param is_overridable if true, the variable is pipeline-overridable
- /// @param constructor the variable constructor
- /// @param decorations the variable decorations
- /// @returns a new Variable node, or null in the ignorable variable case and
- /// in the error case
- ast::Variable* MakeVariable(uint32_t id,
- ast::StorageClass sc,
- const Type* storage_type,
- bool is_const,
- bool is_overridable,
- const ast::Expression* constructor,
- ast::AttributeList decorations);
-
- /// Returns true if a constant expression can be generated.
- /// @param id the SPIR-V ID of the value
- /// @returns true if a constant expression can be generated
- bool CanMakeConstantExpression(uint32_t id);
-
- /// Creates an AST expression node for a SPIR-V ID. This is valid to call
- /// when `CanMakeConstantExpression` returns true.
- /// @param id the SPIR-V ID of the constant
- /// @returns a new expression
- TypedExpression MakeConstantExpression(uint32_t id);
-
- /// Creates an AST expression node for a scalar SPIR-V constant.
- /// @param source the source location
- /// @param ast_type the AST type for the value
- /// @param spirv_const the internal representation of the SPIR-V constant.
- /// @returns a new expression
- TypedExpression MakeConstantExpressionForScalarSpirvConstant(
- Source source,
- const Type* ast_type,
- const spvtools::opt::analysis::Constant* spirv_const);
-
- /// Creates an AST expression node for the null value for the given type.
- /// @param type the AST type
- /// @returns a new expression
- const ast::Expression* MakeNullValue(const Type* type);
-
- /// Make a typed expression for the null value for the given type.
- /// @param type the AST type
- /// @returns a new typed expression
- TypedExpression MakeNullExpression(const Type* type);
-
- /// Converts a given expression to the signedness demanded for an operand
- /// of the given SPIR-V instruction, if required. If the instruction assumes
- /// signed integer operands, and `expr` is unsigned, then return an
- /// as-cast expression converting it to signed. Otherwise, return
- /// `expr` itself. Similarly, convert as required from unsigned
- /// to signed. Assumes all SPIR-V types have been mapped to AST types.
- /// @param inst the SPIR-V instruction
- /// @param expr an expression
- /// @returns expr, or a cast of expr
- TypedExpression RectifyOperandSignedness(
- const spvtools::opt::Instruction& inst,
- TypedExpression&& expr);
-
- /// Converts a second operand to the signedness of the first operand
- /// of a binary operator, if the WGSL operator requires they be the same.
- /// Returns the converted expression, or the original expression if the
- /// conversion is not needed.
- /// @param inst the SPIR-V instruction
- /// @param first_operand_type the type of the first operand to the instruction
- /// @param second_operand_expr the second operand of the instruction
- /// @returns second_operand_expr, or a cast of it
- TypedExpression RectifySecondOperandSignedness(
- const spvtools::opt::Instruction& inst,
- const Type* first_operand_type,
- TypedExpression&& second_operand_expr);
-
- /// Returns the "forced" result type for the given SPIR-V instruction.
- /// If the WGSL result type for an operation has a more strict rule than
- /// requried by SPIR-V, then we say the result type is "forced". This occurs
- /// for signed integer division (OpSDiv), for example, where the result type
- /// in WGSL must match the operand types.
- /// @param inst the SPIR-V instruction
- /// @param first_operand_type the AST type for the first operand.
- /// @returns the forced AST result type, or nullptr if no forcing is required.
- const Type* ForcedResultType(const spvtools::opt::Instruction& inst,
- const Type* first_operand_type);
-
- /// Returns a signed integer scalar or vector type matching the shape (scalar,
- /// vector, and component bit width) of another type, which itself is a
- /// numeric scalar or vector. Returns null if the other type does not meet the
- /// requirement.
- /// @param other the type whose shape must be matched
- /// @returns the signed scalar or vector type
- const Type* GetSignedIntMatchingShape(const Type* other);
-
- /// Returns a signed integer scalar or vector type matching the shape (scalar,
- /// vector, and component bit width) of another type, which itself is a
- /// numeric scalar or vector. Returns null if the other type does not meet the
- /// requirement.
- /// @param other the type whose shape must be matched
- /// @returns the unsigned scalar or vector type
- const Type* GetUnsignedIntMatchingShape(const Type* other);
-
- /// Wraps the given expression in an as-cast to the given expression's type,
- /// when the underlying operation produces a forced result type different
- /// from the expression's result type. Otherwise, returns the given expression
- /// unchanged.
- /// @param expr the expression to pass through or to wrap
- /// @param inst the SPIR-V instruction
- /// @param first_operand_type the AST type for the first operand.
- /// @returns the forced AST result type, or nullptr if no forcing is required.
- TypedExpression RectifyForcedResultType(
- TypedExpression expr,
- const spvtools::opt::Instruction& inst,
- const Type* first_operand_type);
-
- /// Returns the given expression, but ensuring it's an unsigned type of the
- /// same shape as the operand. Wraps the expression with a bitcast if needed.
- /// Assumes the given expresion is a integer scalar or vector.
- /// @param expr an integer scalar or integer vector expression.
- /// @return the potentially cast TypedExpression
- TypedExpression AsUnsigned(TypedExpression expr);
-
- /// Returns the given expression, but ensuring it's a signed type of the
- /// same shape as the operand. Wraps the expression with a bitcast if needed.
- /// Assumes the given expresion is a integer scalar or vector.
- /// @param expr an integer scalar or integer vector expression.
- /// @return the potentially cast TypedExpression
- TypedExpression AsSigned(TypedExpression expr);
-
- /// Bookkeeping used for tracking the "position" builtin variable.
- struct BuiltInPositionInfo {
- /// The ID for the gl_PerVertex struct containing the Position builtin.
- uint32_t struct_type_id = 0;
- /// The member index for the Position builtin within the struct.
- uint32_t position_member_index = 0;
- /// The member index for the PointSize builtin within the struct.
- uint32_t pointsize_member_index = 0;
- /// The ID for the member type, which should map to vec4<f32>.
- uint32_t position_member_type_id = 0;
- /// The ID of the type of a pointer to the struct in the Output storage
- /// class class.
- uint32_t pointer_type_id = 0;
- /// The SPIR-V storage class.
- SpvStorageClass storage_class = SpvStorageClassOutput;
- /// The ID of the type of a pointer to the Position member.
- uint32_t position_member_pointer_type_id = 0;
- /// The ID of the gl_PerVertex variable, if it was declared.
- /// We'll use this for the gl_Position variable instead.
- uint32_t per_vertex_var_id = 0;
- /// The ID of the initializer to gl_PerVertex, if any.
- uint32_t per_vertex_var_init_id = 0;
- };
- /// @returns info about the gl_Position builtin variable.
- const BuiltInPositionInfo& GetBuiltInPositionInfo() {
- return builtin_position_;
- }
-
- /// Returns the source record for the SPIR-V instruction with the given
- /// result ID.
- /// @param id the SPIR-V result id.
- /// @return the Source record, or a default one
- Source GetSourceForResultIdForTest(uint32_t id) const;
- /// Returns the source record for the given instruction.
- /// @param inst the SPIR-V instruction
- /// @return the Source record, or a default one
- Source GetSourceForInst(const spvtools::opt::Instruction* inst) const;
-
- /// @param str a candidate identifier
- /// @returns true if the given string is a valid WGSL identifier.
- static bool IsValidIdentifier(const std::string& str);
-
- /// Returns true if the given SPIR-V ID is a declared specialization constant,
- /// generated by one of OpConstantTrue, OpConstantFalse, or OpSpecConstant
- /// @param id a SPIR-V result ID
- /// @returns true if the ID is a scalar spec constant.
- bool IsScalarSpecConstant(uint32_t id) {
- return scalar_spec_constants_.find(id) != scalar_spec_constants_.end();
- }
-
- /// For a SPIR-V ID that might define a sampler, image, or sampled image
- /// value, return the SPIR-V instruction that represents the memory object
- /// declaration for the object. If we encounter an OpSampledImage along the
- /// way, follow the image operand when follow_image is true; otherwise follow
- /// the sampler operand. Returns nullptr if we can't trace back to a memory
- /// object declaration. Emits an error and returns nullptr when the scan
- /// fails due to a malformed module. This method can be used any time after
- /// BuildInternalModule has been invoked.
- /// @param id the SPIR-V ID of the sampler, image, or sampled image
- /// @param follow_image indicates whether to follow the image operand of
- /// OpSampledImage
- /// @returns the memory object declaration for the handle, or nullptr
- const spvtools::opt::Instruction* GetMemoryObjectDeclarationForHandle(
- uint32_t id,
- bool follow_image);
-
- /// Returns the handle usage for a memory object declaration.
- /// @param id SPIR-V ID of a sampler or image OpVariable or
- /// OpFunctionParameter
- /// @returns the handle usage, or an empty usage object.
- Usage GetHandleUsage(uint32_t id) const;
-
- /// Returns the SPIR-V type for the sampler or image type for the given
- /// variable in UniformConstant storage class, or function parameter pointing
- /// into the UniformConstant storage class . Returns null and emits an
- /// error on failure.
- /// @param var the OpVariable instruction or OpFunctionParameter
- /// @returns the Tint AST type for the sampler or texture, or null on error
- const spvtools::opt::Instruction*
- GetSpirvTypeForHandleMemoryObjectDeclaration(
- const spvtools::opt::Instruction& var);
-
- /// Returns the AST type for the pointer-to-sampler or pointer-to-texture type
- /// for the given variable in UniformConstant storage class. Returns null and
- /// emits an error on failure.
- /// @param var the OpVariable instruction
- /// @returns the Tint AST type for the poiner-to-{sampler|texture} or null on
- /// error
- const Pointer* GetTypeForHandleVar(const spvtools::opt::Instruction& var);
-
- /// Returns the channel component type corresponding to the given image
- /// format.
- /// @param format image texel format
- /// @returns the component type, one of f32, i32, u32
- const Type* GetComponentTypeForFormat(ast::TexelFormat format);
-
- /// Returns the number of channels in the given image format.
- /// @param format image texel format
- /// @returns the number of channels in the format
- unsigned GetChannelCountForFormat(ast::TexelFormat format);
-
- /// Returns the texel type corresponding to the given image format.
- /// This the WGSL type used for the texel parameter to textureStore.
- /// It's always a 4-element vector.
- /// @param format image texel format
- /// @returns the texel format
- const Type* GetTexelTypeForFormat(ast::TexelFormat format);
-
- /// Returns the SPIR-V instruction with the given ID, or nullptr.
- /// @param id the SPIR-V result ID
- /// @returns the instruction, or nullptr on error
- const spvtools::opt::Instruction* GetInstructionForTest(uint32_t id) const;
-
- /// A map of SPIR-V identifiers to builtins
- using BuiltInsMap = std::unordered_map<uint32_t, SpvBuiltIn>;
-
- /// @returns a map of builtins that should be handled specially by code
- /// generation. Either the builtin does not exist in WGSL, or a type
- /// conversion must be implemented on load and store.
- const BuiltInsMap& special_builtins() const { return special_builtins_; }
-
- /// @param builtin the SPIR-V builtin variable kind
- /// @returns the SPIR-V ID for the variable defining the given builtin, or 0
- uint32_t IdForSpecialBuiltIn(SpvBuiltIn builtin) const {
- // Do a linear search.
- for (const auto& entry : special_builtins_) {
- if (entry.second == builtin) {
- return entry.first;
- }
+ public:
+ /// Creates a new parser
+ /// @param input the input data to parse
+ explicit ParserImpl(const std::vector<uint32_t>& input);
+ /// Destructor
+ ~ParserImpl() override;
+
+ /// Run the parser
+ /// @returns true if the parse was successful, false otherwise.
+ bool Parse() override;
+
+ /// @returns the program. The program builder in the parser will be reset
+ /// after this.
+ Program program() override;
+
+ /// @returns a reference to the internal builder, without building the
+ /// program. To be used only for testing.
+ ProgramBuilder& builder() { return builder_; }
+
+ /// @returns the type manager
+ TypeManager& type_manager() { return ty_; }
+
+ /// Logs failure, ands return a failure stream to accumulate diagnostic
+ /// messages. By convention, a failure should only be logged along with
+ /// a non-empty string diagnostic.
+ /// @returns the failure stream
+ FailStream& Fail() {
+ success_ = false;
+ return fail_stream_;
}
- return 0;
- }
-
- /// @param entry_point the SPIR-V ID of an entry point.
- /// @returns the entry point info for the given ID
- const std::vector<EntryPointInfo>& GetEntryPointInfo(uint32_t entry_point) {
- return function_to_ep_info_[entry_point];
- }
-
- /// @returns the SPIR-V binary.
- const std::vector<uint32_t>& spv_binary() { return spv_binary_; }
-
- private:
- /// Converts a specific SPIR-V type to a Tint type. Integer case
- const Type* ConvertType(const spvtools::opt::analysis::Integer* int_ty);
- /// Converts a specific SPIR-V type to a Tint type. Float case
- const Type* ConvertType(const spvtools::opt::analysis::Float* float_ty);
- /// Converts a specific SPIR-V type to a Tint type. Vector case
- const Type* ConvertType(const spvtools::opt::analysis::Vector* vec_ty);
- /// Converts a specific SPIR-V type to a Tint type. Matrix case
- const Type* ConvertType(const spvtools::opt::analysis::Matrix* mat_ty);
- /// Converts a specific SPIR-V type to a Tint type. RuntimeArray case
- /// Distinct SPIR-V array types map to distinct Tint array types.
- /// @param rtarr_ty the Tint type
- const Type* ConvertType(
- uint32_t type_id,
- const spvtools::opt::analysis::RuntimeArray* rtarr_ty);
- /// Converts a specific SPIR-V type to a Tint type. Array case
- /// Distinct SPIR-V array types map to distinct Tint array types.
- /// @param arr_ty the Tint type
- const Type* ConvertType(uint32_t type_id,
- const spvtools::opt::analysis::Array* arr_ty);
- /// Converts a specific SPIR-V type to a Tint type. Struct case.
- /// SPIR-V allows distinct struct type definitions for two OpTypeStruct
- /// that otherwise have the same set of members (and struct and member
- /// decorations). However, the SPIRV-Tools always produces a unique
- /// `spvtools::opt::analysis::Struct` object in these cases. For this type
- /// conversion, we need to have the original SPIR-V ID because we can't always
- /// recover it from the optimizer's struct type object. This also lets us
- /// preserve member names, which are given by OpMemberName which is normally
- /// not significant to the optimizer's module representation.
- /// @param type_id the SPIR-V ID for the type.
- /// @param struct_ty the Tint type
- const Type* ConvertType(uint32_t type_id,
- const spvtools::opt::analysis::Struct* struct_ty);
- /// Converts a specific SPIR-V type to a Tint type. Pointer / Reference case
- /// The pointer to gl_PerVertex maps to nullptr, and instead is recorded
- /// in member #builtin_position_.
- /// @param type_id the SPIR-V ID for the type.
- /// @param ptr_as if PtrAs::Ref then a Reference will be returned, otherwise
- /// Pointer
- /// @param ptr_ty the Tint type
- const Type* ConvertType(uint32_t type_id,
- PtrAs ptr_as,
- const spvtools::opt::analysis::Pointer* ptr_ty);
-
- /// If `type` is a signed integral, or vector of signed integral,
- /// returns the unsigned type, otherwise returns `type`.
- /// @param type the possibly signed type
- /// @returns the unsigned type
- const Type* UnsignedTypeFor(const Type* type);
-
- /// If `type` is a unsigned integral, or vector of unsigned integral,
- /// returns the signed type, otherwise returns `type`.
- /// @param type the possibly unsigned type
- /// @returns the signed type
- const Type* SignedTypeFor(const Type* type);
-
- /// Parses the array or runtime-array decorations. Sets 0 if no explicit
- /// stride was found, and therefore the implicit stride should be used.
- /// @param spv_type the SPIR-V array or runtime-array type.
- /// @param array_stride pointer to the array stride
- /// @returns true on success.
- bool ParseArrayDecorations(const spvtools::opt::analysis::Type* spv_type,
- uint32_t* array_stride);
-
- /// Creates a new `ast::Node` owned by the ProgramBuilder.
- /// @param args the arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename... ARGS>
- T* create(ARGS&&... args) {
- return builder_.create<T>(std::forward<ARGS>(args)...);
- }
-
- // The SPIR-V binary we're parsing
- std::vector<uint32_t> spv_binary_;
-
- // The program builder.
- ProgramBuilder builder_;
-
- // The type manager.
- TypeManager ty_;
-
- // Is the parse successful?
- bool success_ = true;
- // Collector for diagnostic messages.
- std::stringstream errors_;
- FailStream fail_stream_;
- spvtools::MessageConsumer message_consumer_;
-
- // An object used to store and generate names for SPIR-V objects.
- Namer namer_;
- // An object used to convert SPIR-V enums to Tint enums
- EnumConverter enum_converter_;
-
- // The internal representation of the SPIR-V module and its context.
- spvtools::Context tools_context_;
- // All the state is owned by ir_context_.
- std::unique_ptr<spvtools::opt::IRContext> ir_context_;
- // The following are borrowed pointers to the internal state of ir_context_.
- spvtools::opt::Module* module_ = nullptr;
- spvtools::opt::analysis::DefUseManager* def_use_mgr_ = nullptr;
- spvtools::opt::analysis::ConstantManager* constant_mgr_ = nullptr;
- spvtools::opt::analysis::TypeManager* type_mgr_ = nullptr;
- spvtools::opt::analysis::DecorationManager* deco_mgr_ = nullptr;
-
- // The functions ordered so that callees precede their callers.
- std::vector<const spvtools::opt::Function*> topologically_ordered_functions_;
-
- // Maps an instruction to its source location. If no OpLine information
- // is in effect for the instruction, map the instruction to its position
- // in the SPIR-V module, counting by instructions, where the first
- // instruction is line 1.
- std::unordered_map<const spvtools::opt::Instruction*, Source::Location>
- inst_source_;
-
- // The set of IDs that are imports of the GLSL.std.450 extended instruction
- // sets.
- std::unordered_set<uint32_t> glsl_std_450_imports_;
- // The set of IDs of imports that are ignored. For example, any
- // "NonSemanticInfo." import is ignored.
- std::unordered_set<uint32_t> ignored_imports_;
-
- // The SPIR-V IDs of structure types that are the store type for buffer
- // variables, either UBO or SSBO.
- std::unordered_set<uint32_t> struct_types_for_buffers_;
-
- // Bookkeeping for the gl_Position builtin.
- // In Vulkan SPIR-V, it's the 0 member of the gl_PerVertex structure.
- // But in WGSL we make a module-scope variable:
- // [[position]] var<in> gl_Position : vec4<f32>;
- // The builtin variable was detected if and only if the struct_id is non-zero.
- BuiltInPositionInfo builtin_position_;
-
- // SPIR-V type IDs that are either:
- // - a struct type decorated by BufferBlock
- // - an array, runtime array containing one of these
- // - a pointer type to one of these
- // These are the types "enclosing" a buffer block with the old style
- // representation: using Uniform storage class and BufferBlock decoration
- // on the struct. The new style is to use the StorageBuffer storage class
- // and Block decoration.
- std::unordered_set<uint32_t> remap_buffer_block_type_;
-
- // The ast::Struct type names with only read-only members.
- std::unordered_set<Symbol> read_only_struct_types_;
-
- // The IDs of scalar spec constants
- std::unordered_set<uint32_t> scalar_spec_constants_;
-
- // Maps function_id to a list of entrypoint information
- std::unordered_map<uint32_t, std::vector<EntryPointInfo>>
- function_to_ep_info_;
-
- // Maps from a SPIR-V ID to its underlying memory object declaration,
- // following image paths. This a memoization table for
- // GetMemoryObjectDeclarationForHandle. (A SPIR-V memory object declaration is
- // an OpVariable or an OpFunctinParameter with pointer type).
- std::unordered_map<uint32_t, const spvtools::opt::Instruction*>
- mem_obj_decl_image_;
- // Maps from a SPIR-V ID to its underlying memory object declaration,
- // following sampler paths. This a memoization table for
- // GetMemoryObjectDeclarationForHandle.
- std::unordered_map<uint32_t, const spvtools::opt::Instruction*>
- mem_obj_decl_sampler_;
-
- // Maps a memory-object-declaration instruction to any sampler or texture
- // usages implied by usages of the memory-object-declaration.
- std::unordered_map<const spvtools::opt::Instruction*, Usage> handle_usage_;
- // The inferred pointer type for the given handle variable.
- std::unordered_map<const spvtools::opt::Instruction*, const Pointer*>
- handle_type_;
-
- // Set of symbols of declared type that have been added, used to avoid
- // adding duplicates.
- std::unordered_set<Symbol> declared_types_;
-
- // Maps a struct type name to the SPIR-V ID for the structure type.
- std::unordered_map<Symbol, uint32_t> struct_id_for_symbol_;
-
- /// Maps the SPIR-V ID of a module-scope builtin variable that should be
- /// ignored or type-converted, to its builtin kind.
- /// See also BuiltInPositionInfo which is a separate mechanism for a more
- /// complex case of replacing an entire structure.
- BuiltInsMap special_builtins_;
-
- /// Info about the WorkgroupSize builtin. If it's not present, then the 'id'
- /// field will be 0. Sadly, in SPIR-V right now, there's only one workgroup
- /// size object in the module.
- WorkgroupSizeInfo workgroup_size_builtin_;
+
+ /// @return true if failure has not yet occurred
+ bool success() const { return success_; }
+
+ /// @returns the accumulated error string
+ const std::string error() { return errors_.str(); }
+
+ /// Builds an internal representation of the SPIR-V binary,
+ /// and parses it into a Tint AST module. Diagnostics are emitted
+ /// to the error stream.
+ /// @returns true if it was successful.
+ bool BuildAndParseInternalModule() { return BuildInternalModule() && ParseInternalModule(); }
+ /// Builds an internal representation of the SPIR-V binary,
+ /// and parses the module, except functions, into a Tint AST module.
+ /// Diagnostics are emitted to the error stream.
+ /// @returns true if it was successful.
+ bool BuildAndParseInternalModuleExceptFunctions() {
+ return BuildInternalModule() && ParseInternalModuleExceptFunctions();
+ }
+
+ /// @returns the set of SPIR-V IDs for imports of the "GLSL.std.450"
+ /// extended instruction set.
+ const std::unordered_set<uint32_t>& glsl_std_450_imports() const {
+ return glsl_std_450_imports_;
+ }
+
+ /// Desired handling of SPIR-V pointers by ConvertType()
+ enum class PtrAs {
+ // SPIR-V pointer is converted to a spirv::Pointer
+ Ptr,
+ // SPIR-V pointer is converted to a spirv::Reference
+ Ref
+ };
+
+ /// Converts a SPIR-V type to a Tint type, and saves it for fast lookup.
+ /// If the type is only used for builtins, then register that specially,
+ /// and return null. If the type is a sampler, image, or sampled image, then
+ /// return the Void type, because those opaque types are handled in a
+ /// different way.
+ /// On failure, logs an error and returns null. This should only be called
+ /// after the internal representation of the module has been built.
+ /// @param type_id the SPIR-V ID of a type.
+ /// @param ptr_as if the SPIR-V type is a pointer and ptr_as is equal to
+ /// PtrAs::Ref then a Reference will be returned, otherwise a Pointer will be
+ /// returned for a SPIR-V pointer
+ /// @returns a Tint type, or nullptr
+ const Type* ConvertType(uint32_t type_id, PtrAs ptr_as = PtrAs::Ptr);
+
+ /// Emits an alias type declaration for array or runtime-sized array type,
+ /// when needed to distinguish between differently-decorated underlying types.
+ /// Updates the mapping of the SPIR-V type ID to the alias type.
+ /// This is a no-op if the parser has already failed.
+ /// @param type_id the SPIR-V ID for the type
+ /// @param type the type that might get an alias
+ /// @param ast_type the ast type that might get an alias
+ /// @returns an alias type or `ast_type` if no alias was created
+ const Type* MaybeGenerateAlias(uint32_t type_id,
+ const spvtools::opt::analysis::Type* type,
+ const Type* ast_type);
+
+ /// Adds `decl` as a declared type if it hasn't been added yet.
+ /// @param name the type's unique name
+ /// @param decl the type declaration to add
+ void AddTypeDecl(Symbol name, const ast::TypeDecl* decl);
+
+ /// @returns the fail stream object
+ FailStream& fail_stream() { return fail_stream_; }
+ /// @returns the namer object
+ Namer& namer() { return namer_; }
+ /// @returns a borrowed pointer to the internal representation of the module.
+ /// This is null until BuildInternalModule has been called.
+ spvtools::opt::IRContext* ir_context() { return ir_context_.get(); }
+
+ /// Gets the list of unique decorations for a SPIR-V result ID. Returns an
+ /// empty vector if the ID is not a result ID, or if no decorations target
+ /// that ID. The internal representation must have already been built.
+ /// Ignores decorations that have no effect in graphics APIs, e.g. Restrict
+ /// and RestrictPointer.
+ /// @param id SPIR-V ID
+ /// @returns the list of decorations on the given ID
+ DecorationList GetDecorationsFor(uint32_t id) const;
+ /// Gets the list of unique decorations for the member of a struct. Returns
+ /// an empty list if the `id` is not the ID of a struct, or if the member
+ /// index is out of range, or if the target member has no decorations. The
+ /// internal representation must have already been built.
+ /// Ignores decorations that have no effect in graphics APIs, e.g. Restrict
+ /// and RestrictPointer.
+ /// @param id SPIR-V ID of a struct
+ /// @param member_index the member within the struct
+ /// @returns the list of decorations on the member
+ DecorationList GetDecorationsForMember(uint32_t id, uint32_t member_index) const;
+
+ /// Converts SPIR-V decorations for the variable with the given ID.
+ /// Registers the IDs of variables that require special handling by code
+ /// generation. If the WGSL type differs from the store type for SPIR-V,
+ /// then the `type` parameter is updated. Returns false on failure (with
+ /// a diagnostic), or when the variable should not be emitted, e.g. for a
+ /// PointSize builtin.
+ /// @param id the ID of the SPIR-V variable
+ /// @param store_type the WGSL store type for the variable, which should be
+ /// prepopulatd
+ /// @param attributes the attribute list to populate
+ /// @param transfer_pipeline_io true if pipeline IO decorations (builtins,
+ /// or locations) will update the store type and the decorations list
+ /// @returns false when the variable should not be emitted as a variable
+ bool ConvertDecorationsForVariable(uint32_t id,
+ const Type** store_type,
+ ast::AttributeList* attributes,
+ bool transfer_pipeline_io);
+
+ /// Converts SPIR-V decorations for pipeline IO into AST decorations.
+ /// @param store_type the store type for the variable or member
+ /// @param decorations the SPIR-V interpolation decorations
+ /// @param attributes the attribute list to populate.
+ /// @returns false if conversion fails
+ bool ConvertPipelineDecorations(const Type* store_type,
+ const DecorationList& decorations,
+ ast::AttributeList* attributes);
+
+ /// Updates the attribute list, placing a non-null location decoration into
+ /// the list, replacing an existing one if it exists. Does nothing if the
+ /// replacement is nullptr.
+ /// Assumes the list contains at most one Location decoration.
+ /// @param decos the attribute list to modify
+ /// @param replacement the location decoration to place into the list
+ /// @returns the location decoration that was replaced, if one was replaced,
+ /// or null otherwise.
+ const ast::Attribute* SetLocation(ast::AttributeList* decos, const ast::Attribute* replacement);
+
+ /// Converts a SPIR-V struct member decoration into a number of AST
+ /// decorations. If the decoration is recognized but deliberately dropped,
+ /// then returns an empty list without a diagnostic. On failure, emits a
+ /// diagnostic and returns an empty list.
+ /// @param struct_type_id the ID of the struct type
+ /// @param member_index the index of the member
+ /// @param member_ty the type of the member
+ /// @param decoration an encoded SPIR-V Decoration
+ /// @returns the AST decorations
+ ast::AttributeList ConvertMemberDecoration(uint32_t struct_type_id,
+ uint32_t member_index,
+ const Type* member_ty,
+ const Decoration& decoration);
+
+ /// Returns a string for the given type. If the type ID is invalid,
+ /// then the resulting string only names the type ID.
+ /// @param type_id the SPIR-V ID for the type
+ /// @returns a string description of the type.
+ std::string ShowType(uint32_t type_id);
+
+ /// Builds the internal representation of the SPIR-V module.
+ /// Assumes the module is somewhat well-formed. Normally you
+ /// would want to validate the SPIR-V module before attempting
+ /// to build this internal representation. Also computes a topological
+ /// ordering of the functions.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if the parser is still successful.
+ bool BuildInternalModule();
+
+ /// Walks the internal representation of the module to populate
+ /// the AST form of the module.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if the parser is still successful.
+ bool ParseInternalModule();
+
+ /// Records line numbers for each instruction.
+ void RegisterLineNumbers();
+
+ /// Walks the internal representation of the module, except for function
+ /// definitions, to populate the AST form of the module.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if the parser is still successful.
+ bool ParseInternalModuleExceptFunctions();
+
+ /// Destroys the internal representation of the SPIR-V module.
+ void ResetInternalModule();
+
+ /// Registers extended instruction imports. Only "GLSL.std.450" is supported.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterExtendedInstructionImports();
+
+ /// Returns true when the given instruction is an extended instruction
+ /// for GLSL.std.450.
+ /// @param inst a SPIR-V instruction
+ /// @returns true if its an SpvOpExtInst for GLSL.std.450
+ bool IsGlslExtendedInstruction(const spvtools::opt::Instruction& inst) const;
+
+ /// Returns true when the given instruction is an extended instruction
+ /// from an ignored extended instruction set.
+ /// @param inst a SPIR-V instruction
+ /// @returns true if its an SpvOpExtInst for an ignored extended instruction
+ bool IsIgnoredExtendedInstruction(const spvtools::opt::Instruction& inst) const;
+
+ /// Registers user names for SPIR-V objects, from OpName, and OpMemberName.
+ /// Also synthesizes struct field names. Ensures uniqueness for names for
+ /// SPIR-V IDs, and uniqueness of names of fields within any single struct.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterUserAndStructMemberNames();
+
+ /// Register the WorkgroupSize builtin and its associated constant value.
+ /// @returns true if parser is still successful.
+ bool RegisterWorkgroupSizeBuiltin();
+
+ /// @returns the workgroup size builtin
+ const WorkgroupSizeInfo& workgroup_size_builtin() { return workgroup_size_builtin_; }
+
+ /// Register entry point information.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterEntryPoints();
+
+ /// Register Tint AST types for SPIR-V types, including type aliases as
+ /// needed. This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterTypes();
+
+ /// Fail if there are any module-scope pointer values other than those
+ /// declared by OpVariable.
+ /// @returns true if parser is still successful.
+ bool RejectInvalidPointerRoots();
+
+ /// Register sampler and texture usage for memory object declarations.
+ /// This must be called after we've registered line numbers for all
+ /// instructions. This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterHandleUsage();
+
+ /// Emit const definitions for scalar specialization constants generated
+ /// by one of OpConstantTrue, OpConstantFalse, or OpSpecConstant.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool EmitScalarSpecConstants();
+
+ /// Emits module-scope variables.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool EmitModuleScopeVariables();
+
+ /// Emits functions, with callees preceding their callers.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool EmitFunctions();
+
+ /// Emits a single function, if it has a body.
+ /// This is a no-op if the parser has already failed.
+ /// @param f the function to emit
+ /// @returns true if parser is still successful.
+ bool EmitFunction(const spvtools::opt::Function& f);
+
+ /// Returns the integer constant for the array size of the given variable.
+ /// @param var_id SPIR-V ID for an array variable
+ /// @returns the integer constant for its array size, or nullptr.
+ const spvtools::opt::analysis::IntConstant* GetArraySize(uint32_t var_id);
+
+ /// Returns the member name for the struct member.
+ /// @param struct_type the parser's structure type.
+ /// @param member_index the member index
+ /// @returns the field name
+ std::string GetMemberName(const Struct& struct_type, int member_index);
+
+ /// Returns the SPIR-V decorations for pipeline IO, if any, on a struct
+ /// member.
+ /// @param struct_type the parser's structure type.
+ /// @param member_index the member index
+ /// @returns a list of SPIR-V decorations.
+ DecorationList GetMemberPipelineDecorations(const Struct& struct_type, int member_index);
+
+ /// Creates an AST Variable node for a SPIR-V ID, including any attached
+ /// decorations, unless it's an ignorable builtin variable.
+ /// @param id the SPIR-V result ID
+ /// @param sc the storage class, which cannot be ast::StorageClass::kNone
+ /// @param storage_type the storage type of the variable
+ /// @param is_const if true, the variable is const
+ /// @param is_overridable if true, the variable is pipeline-overridable
+ /// @param constructor the variable constructor
+ /// @param decorations the variable decorations
+ /// @returns a new Variable node, or null in the ignorable variable case and
+ /// in the error case
+ ast::Variable* MakeVariable(uint32_t id,
+ ast::StorageClass sc,
+ const Type* storage_type,
+ bool is_const,
+ bool is_overridable,
+ const ast::Expression* constructor,
+ ast::AttributeList decorations);
+
+ /// Returns true if a constant expression can be generated.
+ /// @param id the SPIR-V ID of the value
+ /// @returns true if a constant expression can be generated
+ bool CanMakeConstantExpression(uint32_t id);
+
+ /// Creates an AST expression node for a SPIR-V ID. This is valid to call
+ /// when `CanMakeConstantExpression` returns true.
+ /// @param id the SPIR-V ID of the constant
+ /// @returns a new expression
+ TypedExpression MakeConstantExpression(uint32_t id);
+
+ /// Creates an AST expression node for a scalar SPIR-V constant.
+ /// @param source the source location
+ /// @param ast_type the AST type for the value
+ /// @param spirv_const the internal representation of the SPIR-V constant.
+ /// @returns a new expression
+ TypedExpression MakeConstantExpressionForScalarSpirvConstant(
+ Source source,
+ const Type* ast_type,
+ const spvtools::opt::analysis::Constant* spirv_const);
+
+ /// Creates an AST expression node for the null value for the given type.
+ /// @param type the AST type
+ /// @returns a new expression
+ const ast::Expression* MakeNullValue(const Type* type);
+
+ /// Make a typed expression for the null value for the given type.
+ /// @param type the AST type
+ /// @returns a new typed expression
+ TypedExpression MakeNullExpression(const Type* type);
+
+ /// Converts a given expression to the signedness demanded for an operand
+ /// of the given SPIR-V instruction, if required. If the instruction assumes
+ /// signed integer operands, and `expr` is unsigned, then return an
+ /// as-cast expression converting it to signed. Otherwise, return
+ /// `expr` itself. Similarly, convert as required from unsigned
+ /// to signed. Assumes all SPIR-V types have been mapped to AST types.
+ /// @param inst the SPIR-V instruction
+ /// @param expr an expression
+ /// @returns expr, or a cast of expr
+ TypedExpression RectifyOperandSignedness(const spvtools::opt::Instruction& inst,
+ TypedExpression&& expr);
+
+ /// Converts a second operand to the signedness of the first operand
+ /// of a binary operator, if the WGSL operator requires they be the same.
+ /// Returns the converted expression, or the original expression if the
+ /// conversion is not needed.
+ /// @param inst the SPIR-V instruction
+ /// @param first_operand_type the type of the first operand to the instruction
+ /// @param second_operand_expr the second operand of the instruction
+ /// @returns second_operand_expr, or a cast of it
+ TypedExpression RectifySecondOperandSignedness(const spvtools::opt::Instruction& inst,
+ const Type* first_operand_type,
+ TypedExpression&& second_operand_expr);
+
+ /// Returns the "forced" result type for the given SPIR-V instruction.
+ /// If the WGSL result type for an operation has a more strict rule than
+ /// requried by SPIR-V, then we say the result type is "forced". This occurs
+ /// for signed integer division (OpSDiv), for example, where the result type
+ /// in WGSL must match the operand types.
+ /// @param inst the SPIR-V instruction
+ /// @param first_operand_type the AST type for the first operand.
+ /// @returns the forced AST result type, or nullptr if no forcing is required.
+ const Type* ForcedResultType(const spvtools::opt::Instruction& inst,
+ const Type* first_operand_type);
+
+ /// Returns a signed integer scalar or vector type matching the shape (scalar,
+ /// vector, and component bit width) of another type, which itself is a
+ /// numeric scalar or vector. Returns null if the other type does not meet the
+ /// requirement.
+ /// @param other the type whose shape must be matched
+ /// @returns the signed scalar or vector type
+ const Type* GetSignedIntMatchingShape(const Type* other);
+
+ /// Returns a signed integer scalar or vector type matching the shape (scalar,
+ /// vector, and component bit width) of another type, which itself is a
+ /// numeric scalar or vector. Returns null if the other type does not meet the
+ /// requirement.
+ /// @param other the type whose shape must be matched
+ /// @returns the unsigned scalar or vector type
+ const Type* GetUnsignedIntMatchingShape(const Type* other);
+
+ /// Wraps the given expression in an as-cast to the given expression's type,
+ /// when the underlying operation produces a forced result type different
+ /// from the expression's result type. Otherwise, returns the given expression
+ /// unchanged.
+ /// @param expr the expression to pass through or to wrap
+ /// @param inst the SPIR-V instruction
+ /// @param first_operand_type the AST type for the first operand.
+ /// @returns the forced AST result type, or nullptr if no forcing is required.
+ TypedExpression RectifyForcedResultType(TypedExpression expr,
+ const spvtools::opt::Instruction& inst,
+ const Type* first_operand_type);
+
+ /// Returns the given expression, but ensuring it's an unsigned type of the
+ /// same shape as the operand. Wraps the expression with a bitcast if needed.
+ /// Assumes the given expresion is a integer scalar or vector.
+ /// @param expr an integer scalar or integer vector expression.
+ /// @return the potentially cast TypedExpression
+ TypedExpression AsUnsigned(TypedExpression expr);
+
+ /// Returns the given expression, but ensuring it's a signed type of the
+ /// same shape as the operand. Wraps the expression with a bitcast if needed.
+ /// Assumes the given expresion is a integer scalar or vector.
+ /// @param expr an integer scalar or integer vector expression.
+ /// @return the potentially cast TypedExpression
+ TypedExpression AsSigned(TypedExpression expr);
+
+ /// Bookkeeping used for tracking the "position" builtin variable.
+ struct BuiltInPositionInfo {
+ /// The ID for the gl_PerVertex struct containing the Position builtin.
+ uint32_t struct_type_id = 0;
+ /// The member index for the Position builtin within the struct.
+ uint32_t position_member_index = 0;
+ /// The member index for the PointSize builtin within the struct.
+ uint32_t pointsize_member_index = 0;
+ /// The ID for the member type, which should map to vec4<f32>.
+ uint32_t position_member_type_id = 0;
+ /// The ID of the type of a pointer to the struct in the Output storage
+ /// class class.
+ uint32_t pointer_type_id = 0;
+ /// The SPIR-V storage class.
+ SpvStorageClass storage_class = SpvStorageClassOutput;
+ /// The ID of the type of a pointer to the Position member.
+ uint32_t position_member_pointer_type_id = 0;
+ /// The ID of the gl_PerVertex variable, if it was declared.
+ /// We'll use this for the gl_Position variable instead.
+ uint32_t per_vertex_var_id = 0;
+ /// The ID of the initializer to gl_PerVertex, if any.
+ uint32_t per_vertex_var_init_id = 0;
+ };
+ /// @returns info about the gl_Position builtin variable.
+ const BuiltInPositionInfo& GetBuiltInPositionInfo() { return builtin_position_; }
+
+ /// Returns the source record for the SPIR-V instruction with the given
+ /// result ID.
+ /// @param id the SPIR-V result id.
+ /// @return the Source record, or a default one
+ Source GetSourceForResultIdForTest(uint32_t id) const;
+ /// Returns the source record for the given instruction.
+ /// @param inst the SPIR-V instruction
+ /// @return the Source record, or a default one
+ Source GetSourceForInst(const spvtools::opt::Instruction* inst) const;
+
+ /// @param str a candidate identifier
+ /// @returns true if the given string is a valid WGSL identifier.
+ static bool IsValidIdentifier(const std::string& str);
+
+ /// Returns true if the given SPIR-V ID is a declared specialization constant,
+ /// generated by one of OpConstantTrue, OpConstantFalse, or OpSpecConstant
+ /// @param id a SPIR-V result ID
+ /// @returns true if the ID is a scalar spec constant.
+ bool IsScalarSpecConstant(uint32_t id) {
+ return scalar_spec_constants_.find(id) != scalar_spec_constants_.end();
+ }
+
+ /// For a SPIR-V ID that might define a sampler, image, or sampled image
+ /// value, return the SPIR-V instruction that represents the memory object
+ /// declaration for the object. If we encounter an OpSampledImage along the
+ /// way, follow the image operand when follow_image is true; otherwise follow
+ /// the sampler operand. Returns nullptr if we can't trace back to a memory
+ /// object declaration. Emits an error and returns nullptr when the scan
+ /// fails due to a malformed module. This method can be used any time after
+ /// BuildInternalModule has been invoked.
+ /// @param id the SPIR-V ID of the sampler, image, or sampled image
+ /// @param follow_image indicates whether to follow the image operand of
+ /// OpSampledImage
+ /// @returns the memory object declaration for the handle, or nullptr
+ const spvtools::opt::Instruction* GetMemoryObjectDeclarationForHandle(uint32_t id,
+ bool follow_image);
+
+ /// Returns the handle usage for a memory object declaration.
+ /// @param id SPIR-V ID of a sampler or image OpVariable or
+ /// OpFunctionParameter
+ /// @returns the handle usage, or an empty usage object.
+ Usage GetHandleUsage(uint32_t id) const;
+
+ /// Returns the SPIR-V type for the sampler or image type for the given
+ /// variable in UniformConstant storage class, or function parameter pointing
+ /// into the UniformConstant storage class . Returns null and emits an
+ /// error on failure.
+ /// @param var the OpVariable instruction or OpFunctionParameter
+ /// @returns the Tint AST type for the sampler or texture, or null on error
+ const spvtools::opt::Instruction* GetSpirvTypeForHandleMemoryObjectDeclaration(
+ const spvtools::opt::Instruction& var);
+
+ /// Returns the AST type for the pointer-to-sampler or pointer-to-texture type
+ /// for the given variable in UniformConstant storage class. Returns null and
+ /// emits an error on failure.
+ /// @param var the OpVariable instruction
+ /// @returns the Tint AST type for the poiner-to-{sampler|texture} or null on
+ /// error
+ const Pointer* GetTypeForHandleVar(const spvtools::opt::Instruction& var);
+
+ /// Returns the channel component type corresponding to the given image
+ /// format.
+ /// @param format image texel format
+ /// @returns the component type, one of f32, i32, u32
+ const Type* GetComponentTypeForFormat(ast::TexelFormat format);
+
+ /// Returns the number of channels in the given image format.
+ /// @param format image texel format
+ /// @returns the number of channels in the format
+ unsigned GetChannelCountForFormat(ast::TexelFormat format);
+
+ /// Returns the texel type corresponding to the given image format.
+ /// This the WGSL type used for the texel parameter to textureStore.
+ /// It's always a 4-element vector.
+ /// @param format image texel format
+ /// @returns the texel format
+ const Type* GetTexelTypeForFormat(ast::TexelFormat format);
+
+ /// Returns the SPIR-V instruction with the given ID, or nullptr.
+ /// @param id the SPIR-V result ID
+ /// @returns the instruction, or nullptr on error
+ const spvtools::opt::Instruction* GetInstructionForTest(uint32_t id) const;
+
+ /// A map of SPIR-V identifiers to builtins
+ using BuiltInsMap = std::unordered_map<uint32_t, SpvBuiltIn>;
+
+ /// @returns a map of builtins that should be handled specially by code
+ /// generation. Either the builtin does not exist in WGSL, or a type
+ /// conversion must be implemented on load and store.
+ const BuiltInsMap& special_builtins() const { return special_builtins_; }
+
+ /// @param builtin the SPIR-V builtin variable kind
+ /// @returns the SPIR-V ID for the variable defining the given builtin, or 0
+ uint32_t IdForSpecialBuiltIn(SpvBuiltIn builtin) const {
+ // Do a linear search.
+ for (const auto& entry : special_builtins_) {
+ if (entry.second == builtin) {
+ return entry.first;
+ }
+ }
+ return 0;
+ }
+
+ /// @param entry_point the SPIR-V ID of an entry point.
+ /// @returns the entry point info for the given ID
+ const std::vector<EntryPointInfo>& GetEntryPointInfo(uint32_t entry_point) {
+ return function_to_ep_info_[entry_point];
+ }
+
+ /// @returns the SPIR-V binary.
+ const std::vector<uint32_t>& spv_binary() { return spv_binary_; }
+
+ private:
+ /// Converts a specific SPIR-V type to a Tint type. Integer case
+ const Type* ConvertType(const spvtools::opt::analysis::Integer* int_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Float case
+ const Type* ConvertType(const spvtools::opt::analysis::Float* float_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Vector case
+ const Type* ConvertType(const spvtools::opt::analysis::Vector* vec_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Matrix case
+ const Type* ConvertType(const spvtools::opt::analysis::Matrix* mat_ty);
+ /// Converts a specific SPIR-V type to a Tint type. RuntimeArray case
+ /// Distinct SPIR-V array types map to distinct Tint array types.
+ /// @param rtarr_ty the Tint type
+ const Type* ConvertType(uint32_t type_id,
+ const spvtools::opt::analysis::RuntimeArray* rtarr_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Array case
+ /// Distinct SPIR-V array types map to distinct Tint array types.
+ /// @param arr_ty the Tint type
+ const Type* ConvertType(uint32_t type_id, const spvtools::opt::analysis::Array* arr_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Struct case.
+ /// SPIR-V allows distinct struct type definitions for two OpTypeStruct
+ /// that otherwise have the same set of members (and struct and member
+ /// decorations). However, the SPIRV-Tools always produces a unique
+ /// `spvtools::opt::analysis::Struct` object in these cases. For this type
+ /// conversion, we need to have the original SPIR-V ID because we can't always
+ /// recover it from the optimizer's struct type object. This also lets us
+ /// preserve member names, which are given by OpMemberName which is normally
+ /// not significant to the optimizer's module representation.
+ /// @param type_id the SPIR-V ID for the type.
+ /// @param struct_ty the Tint type
+ const Type* ConvertType(uint32_t type_id, const spvtools::opt::analysis::Struct* struct_ty);
+ /// Converts a specific SPIR-V type to a Tint type. Pointer / Reference case
+ /// The pointer to gl_PerVertex maps to nullptr, and instead is recorded
+ /// in member #builtin_position_.
+ /// @param type_id the SPIR-V ID for the type.
+ /// @param ptr_as if PtrAs::Ref then a Reference will be returned, otherwise
+ /// Pointer
+ /// @param ptr_ty the Tint type
+ const Type* ConvertType(uint32_t type_id,
+ PtrAs ptr_as,
+ const spvtools::opt::analysis::Pointer* ptr_ty);
+
+ /// If `type` is a signed integral, or vector of signed integral,
+ /// returns the unsigned type, otherwise returns `type`.
+ /// @param type the possibly signed type
+ /// @returns the unsigned type
+ const Type* UnsignedTypeFor(const Type* type);
+
+ /// If `type` is a unsigned integral, or vector of unsigned integral,
+ /// returns the signed type, otherwise returns `type`.
+ /// @param type the possibly unsigned type
+ /// @returns the signed type
+ const Type* SignedTypeFor(const Type* type);
+
+ /// Parses the array or runtime-array decorations. Sets 0 if no explicit
+ /// stride was found, and therefore the implicit stride should be used.
+ /// @param spv_type the SPIR-V array or runtime-array type.
+ /// @param array_stride pointer to the array stride
+ /// @returns true on success.
+ bool ParseArrayDecorations(const spvtools::opt::analysis::Type* spv_type,
+ uint32_t* array_stride);
+
+ /// Creates a new `ast::Node` owned by the ProgramBuilder.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename... ARGS>
+ T* create(ARGS&&... args) {
+ return builder_.create<T>(std::forward<ARGS>(args)...);
+ }
+
+ // The SPIR-V binary we're parsing
+ std::vector<uint32_t> spv_binary_;
+
+ // The program builder.
+ ProgramBuilder builder_;
+
+ // The type manager.
+ TypeManager ty_;
+
+ // Is the parse successful?
+ bool success_ = true;
+ // Collector for diagnostic messages.
+ std::stringstream errors_;
+ FailStream fail_stream_;
+ spvtools::MessageConsumer message_consumer_;
+
+ // An object used to store and generate names for SPIR-V objects.
+ Namer namer_;
+ // An object used to convert SPIR-V enums to Tint enums
+ EnumConverter enum_converter_;
+
+ // The internal representation of the SPIR-V module and its context.
+ spvtools::Context tools_context_;
+ // All the state is owned by ir_context_.
+ std::unique_ptr<spvtools::opt::IRContext> ir_context_;
+ // The following are borrowed pointers to the internal state of ir_context_.
+ spvtools::opt::Module* module_ = nullptr;
+ spvtools::opt::analysis::DefUseManager* def_use_mgr_ = nullptr;
+ spvtools::opt::analysis::ConstantManager* constant_mgr_ = nullptr;
+ spvtools::opt::analysis::TypeManager* type_mgr_ = nullptr;
+ spvtools::opt::analysis::DecorationManager* deco_mgr_ = nullptr;
+
+ // The functions ordered so that callees precede their callers.
+ std::vector<const spvtools::opt::Function*> topologically_ordered_functions_;
+
+ // Maps an instruction to its source location. If no OpLine information
+ // is in effect for the instruction, map the instruction to its position
+ // in the SPIR-V module, counting by instructions, where the first
+ // instruction is line 1.
+ std::unordered_map<const spvtools::opt::Instruction*, Source::Location> inst_source_;
+
+ // The set of IDs that are imports of the GLSL.std.450 extended instruction
+ // sets.
+ std::unordered_set<uint32_t> glsl_std_450_imports_;
+ // The set of IDs of imports that are ignored. For example, any
+ // "NonSemanticInfo." import is ignored.
+ std::unordered_set<uint32_t> ignored_imports_;
+
+ // The SPIR-V IDs of structure types that are the store type for buffer
+ // variables, either UBO or SSBO.
+ std::unordered_set<uint32_t> struct_types_for_buffers_;
+
+ // Bookkeeping for the gl_Position builtin.
+ // In Vulkan SPIR-V, it's the 0 member of the gl_PerVertex structure.
+ // But in WGSL we make a module-scope variable:
+ // [[position]] var<in> gl_Position : vec4<f32>;
+ // The builtin variable was detected if and only if the struct_id is non-zero.
+ BuiltInPositionInfo builtin_position_;
+
+ // SPIR-V type IDs that are either:
+ // - a struct type decorated by BufferBlock
+ // - an array, runtime array containing one of these
+ // - a pointer type to one of these
+ // These are the types "enclosing" a buffer block with the old style
+ // representation: using Uniform storage class and BufferBlock decoration
+ // on the struct. The new style is to use the StorageBuffer storage class
+ // and Block decoration.
+ std::unordered_set<uint32_t> remap_buffer_block_type_;
+
+ // The ast::Struct type names with only read-only members.
+ std::unordered_set<Symbol> read_only_struct_types_;
+
+ // The IDs of scalar spec constants
+ std::unordered_set<uint32_t> scalar_spec_constants_;
+
+ // Maps function_id to a list of entrypoint information
+ std::unordered_map<uint32_t, std::vector<EntryPointInfo>> function_to_ep_info_;
+
+ // Maps from a SPIR-V ID to its underlying memory object declaration,
+ // following image paths. This a memoization table for
+ // GetMemoryObjectDeclarationForHandle. (A SPIR-V memory object declaration is
+ // an OpVariable or an OpFunctinParameter with pointer type).
+ std::unordered_map<uint32_t, const spvtools::opt::Instruction*> mem_obj_decl_image_;
+ // Maps from a SPIR-V ID to its underlying memory object declaration,
+ // following sampler paths. This a memoization table for
+ // GetMemoryObjectDeclarationForHandle.
+ std::unordered_map<uint32_t, const spvtools::opt::Instruction*> mem_obj_decl_sampler_;
+
+ // Maps a memory-object-declaration instruction to any sampler or texture
+ // usages implied by usages of the memory-object-declaration.
+ std::unordered_map<const spvtools::opt::Instruction*, Usage> handle_usage_;
+ // The inferred pointer type for the given handle variable.
+ std::unordered_map<const spvtools::opt::Instruction*, const Pointer*> handle_type_;
+
+ // Set of symbols of declared type that have been added, used to avoid
+ // adding duplicates.
+ std::unordered_set<Symbol> declared_types_;
+
+ // Maps a struct type name to the SPIR-V ID for the structure type.
+ std::unordered_map<Symbol, uint32_t> struct_id_for_symbol_;
+
+ /// Maps the SPIR-V ID of a module-scope builtin variable that should be
+ /// ignored or type-converted, to its builtin kind.
+ /// See also BuiltInPositionInfo which is a separate mechanism for a more
+ /// complex case of replacing an entire structure.
+ BuiltInsMap special_builtins_;
+
+ /// Info about the WorkgroupSize builtin. If it's not present, then the 'id'
+ /// field will be 0. Sadly, in SPIR-V right now, there's only one workgroup
+ /// size object in the module.
+ WorkgroupSizeInfo workgroup_size_builtin_;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_barrier_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_barrier_test.cc
index 39f267f193c..fdfa3bf6815 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_barrier_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_barrier_test.cc
@@ -28,24 +28,24 @@ using ::testing::Not;
using ::testing::StartsWith;
Program ParseAndBuild(std::string spirv) {
- const char* preamble = R"(OpCapability Shader
+ const char* preamble = R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %main "main"
OpExecutionMode %main LocalSize 1 1 1
OpName %main "main"
)";
- auto p = std::make_unique<ParserImpl>(test::Assemble(preamble + spirv));
- if (!p->BuildAndParseInternalModule()) {
- ProgramBuilder builder;
- builder.Diagnostics().add_error(diag::System::Reader, p->error());
- return Program(std::move(builder));
- }
- return p->program();
+ auto p = std::make_unique<ParserImpl>(test::Assemble(preamble + spirv));
+ if (!p->BuildAndParseInternalModule()) {
+ ProgramBuilder builder;
+ builder.Diagnostics().add_error(diag::System::Reader, p->error());
+ return Program(std::move(builder));
+ }
+ return p->program();
}
TEST_F(SpvParserTest, WorkgroupBarrier) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
OpName %helper "helper"
%void = OpTypeVoid
%1 = OpTypeFunction %void
@@ -62,23 +62,22 @@ TEST_F(SpvParserTest, WorkgroupBarrier) {
OpReturn
OpFunctionEnd
)");
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- auto* helper =
- program.AST().Functions().Find(program.Symbols().Get("helper"));
- ASSERT_NE(helper, nullptr);
- ASSERT_GT(helper->body->statements.size(), 0u);
- auto* call = helper->body->statements[0]->As<ast::CallStatement>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->expr->args.size(), 0u);
- auto* sem_call = program.Sem().Get(call->expr);
- ASSERT_NE(sem_call, nullptr);
- auto* builtin = sem_call->Target()->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
- EXPECT_EQ(builtin->Type(), sem::BuiltinType::kWorkgroupBarrier);
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ auto* helper = program.AST().Functions().Find(program.Symbols().Get("helper"));
+ ASSERT_NE(helper, nullptr);
+ ASSERT_GT(helper->body->statements.size(), 0u);
+ auto* call = helper->body->statements[0]->As<ast::CallStatement>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->expr->args.size(), 0u);
+ auto* sem_call = program.Sem().Get<sem::Call>(call->expr);
+ ASSERT_NE(sem_call, nullptr);
+ auto* builtin = sem_call->Target()->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
+ EXPECT_EQ(builtin->Type(), sem::BuiltinType::kWorkgroupBarrier);
}
TEST_F(SpvParserTest, StorageBarrier) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
OpName %helper "helper"
%void = OpTypeVoid
%1 = OpTypeFunction %void
@@ -96,23 +95,22 @@ TEST_F(SpvParserTest, StorageBarrier) {
OpReturn
OpFunctionEnd
)");
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- auto* helper =
- program.AST().Functions().Find(program.Symbols().Get("helper"));
- ASSERT_NE(helper, nullptr);
- ASSERT_GT(helper->body->statements.size(), 0u);
- auto* call = helper->body->statements[0]->As<ast::CallStatement>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->expr->args.size(), 0u);
- auto* sem_call = program.Sem().Get(call->expr);
- ASSERT_NE(sem_call, nullptr);
- auto* builtin = sem_call->Target()->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
- EXPECT_EQ(builtin->Type(), sem::BuiltinType::kStorageBarrier);
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ auto* helper = program.AST().Functions().Find(program.Symbols().Get("helper"));
+ ASSERT_NE(helper, nullptr);
+ ASSERT_GT(helper->body->statements.size(), 0u);
+ auto* call = helper->body->statements[0]->As<ast::CallStatement>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->expr->args.size(), 0u);
+ auto* sem_call = program.Sem().Get<sem::Call>(call->expr);
+ ASSERT_NE(sem_call, nullptr);
+ auto* builtin = sem_call->Target()->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
+ EXPECT_EQ(builtin->Type(), sem::BuiltinType::kStorageBarrier);
}
TEST_F(SpvParserTest, ErrBarrierInvalidExecution) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
%void = OpTypeVoid
%1 = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -125,13 +123,13 @@ TEST_F(SpvParserTest, ErrBarrierInvalidExecution) {
OpReturn
OpFunctionEnd
)");
- EXPECT_FALSE(program.IsValid());
- EXPECT_THAT(program.Diagnostics().str(),
- HasSubstr("unsupported control barrier execution scope"));
+ EXPECT_FALSE(program.IsValid());
+ EXPECT_THAT(program.Diagnostics().str(),
+ HasSubstr("unsupported control barrier execution scope"));
}
TEST_F(SpvParserTest, ErrBarrierSemanticsMissingAcquireRelease) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
%void = OpTypeVoid
%1 = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -143,14 +141,13 @@ TEST_F(SpvParserTest, ErrBarrierSemanticsMissingAcquireRelease) {
OpReturn
OpFunctionEnd
)");
- EXPECT_FALSE(program.IsValid());
- EXPECT_THAT(
- program.Diagnostics().str(),
- HasSubstr("control barrier semantics requires acquire and release"));
+ EXPECT_FALSE(program.IsValid());
+ EXPECT_THAT(program.Diagnostics().str(),
+ HasSubstr("control barrier semantics requires acquire and release"));
}
TEST_F(SpvParserTest, ErrBarrierInvalidSemantics) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
%void = OpTypeVoid
%1 = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -162,13 +159,12 @@ TEST_F(SpvParserTest, ErrBarrierInvalidSemantics) {
OpReturn
OpFunctionEnd
)");
- EXPECT_FALSE(program.IsValid());
- EXPECT_THAT(program.Diagnostics().str(),
- HasSubstr("unsupported control barrier semantics"));
+ EXPECT_FALSE(program.IsValid());
+ EXPECT_THAT(program.Diagnostics().str(), HasSubstr("unsupported control barrier semantics"));
}
TEST_F(SpvParserTest, ErrWorkgroupBarrierInvalidMemory) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
%void = OpTypeVoid
%1 = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -181,13 +177,13 @@ TEST_F(SpvParserTest, ErrWorkgroupBarrierInvalidMemory) {
OpReturn
OpFunctionEnd
)");
- EXPECT_FALSE(program.IsValid());
- EXPECT_THAT(program.Diagnostics().str(),
- HasSubstr("workgroupBarrier requires workgroup memory scope"));
+ EXPECT_FALSE(program.IsValid());
+ EXPECT_THAT(program.Diagnostics().str(),
+ HasSubstr("workgroupBarrier requires workgroup memory scope"));
}
TEST_F(SpvParserTest, ErrStorageBarrierInvalidMemory) {
- auto program = ParseAndBuild(R"(
+ auto program = ParseAndBuild(R"(
%void = OpTypeVoid
%1 = OpTypeFunction %void
%uint = OpTypeInt 32 0
@@ -200,9 +196,9 @@ TEST_F(SpvParserTest, ErrStorageBarrierInvalidMemory) {
OpReturn
OpFunctionEnd
)");
- EXPECT_FALSE(program.IsValid());
- EXPECT_THAT(program.Diagnostics().str(),
- HasSubstr("storageBarrier requires device memory scope"));
+ EXPECT_FALSE(program.IsValid());
+ EXPECT_THAT(program.Diagnostics().str(),
+ HasSubstr("storageBarrier requires device memory scope"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_member_decoration_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_member_decoration_test.cc
index adc33971cfe..3147ac12bdf 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_member_decoration_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_member_decoration_test.cc
@@ -21,132 +21,123 @@ namespace {
using ::testing::Eq;
TEST_F(SpvParserTest, ConvertMemberDecoration_Empty) {
- auto p = parser(std::vector<uint32_t>{});
+ auto p = parser(std::vector<uint32_t>{});
- auto result = p->ConvertMemberDecoration(1, 1, nullptr, {});
- EXPECT_TRUE(result.empty());
- EXPECT_THAT(p->error(), Eq("malformed SPIR-V decoration: it's empty"));
+ auto result = p->ConvertMemberDecoration(1, 1, nullptr, {});
+ EXPECT_TRUE(result.empty());
+ EXPECT_THAT(p->error(), Eq("malformed SPIR-V decoration: it's empty"));
}
TEST_F(SpvParserTest, ConvertMemberDecoration_OffsetWithoutOperand) {
- auto p = parser(std::vector<uint32_t>{});
+ auto p = parser(std::vector<uint32_t>{});
- auto result =
- p->ConvertMemberDecoration(12, 13, nullptr, {SpvDecorationOffset});
- EXPECT_TRUE(result.empty());
- EXPECT_THAT(p->error(), Eq("malformed Offset decoration: expected 1 literal "
- "operand, has 0: member 13 of SPIR-V type 12"));
+ auto result = p->ConvertMemberDecoration(12, 13, nullptr, {SpvDecorationOffset});
+ EXPECT_TRUE(result.empty());
+ EXPECT_THAT(p->error(), Eq("malformed Offset decoration: expected 1 literal "
+ "operand, has 0: member 13 of SPIR-V type 12"));
}
TEST_F(SpvParserTest, ConvertMemberDecoration_OffsetWithTooManyOperands) {
- auto p = parser(std::vector<uint32_t>{});
+ auto p = parser(std::vector<uint32_t>{});
- auto result =
- p->ConvertMemberDecoration(12, 13, nullptr, {SpvDecorationOffset, 3, 4});
- EXPECT_TRUE(result.empty());
- EXPECT_THAT(p->error(), Eq("malformed Offset decoration: expected 1 literal "
- "operand, has 2: member 13 of SPIR-V type 12"));
+ auto result = p->ConvertMemberDecoration(12, 13, nullptr, {SpvDecorationOffset, 3, 4});
+ EXPECT_TRUE(result.empty());
+ EXPECT_THAT(p->error(), Eq("malformed Offset decoration: expected 1 literal "
+ "operand, has 2: member 13 of SPIR-V type 12"));
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Offset) {
- auto p = parser(std::vector<uint32_t>{});
-
- auto result =
- p->ConvertMemberDecoration(1, 1, nullptr, {SpvDecorationOffset, 8});
- ASSERT_FALSE(result.empty());
- EXPECT_TRUE(result[0]->Is<ast::StructMemberOffsetAttribute>());
- auto* offset_deco = result[0]->As<ast::StructMemberOffsetAttribute>();
- ASSERT_NE(offset_deco, nullptr);
- EXPECT_EQ(offset_deco->offset, 8u);
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ auto result = p->ConvertMemberDecoration(1, 1, nullptr, {SpvDecorationOffset, 8});
+ ASSERT_FALSE(result.empty());
+ EXPECT_TRUE(result[0]->Is<ast::StructMemberOffsetAttribute>());
+ auto* offset_deco = result[0]->As<ast::StructMemberOffsetAttribute>();
+ ASSERT_NE(offset_deco, nullptr);
+ EXPECT_EQ(offset_deco->offset, 8u);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Matrix2x2_Stride_Natural) {
- auto p = parser(std::vector<uint32_t>{});
-
- spirv::F32 f32;
- spirv::Matrix matrix(&f32, 2, 2);
- auto result =
- p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 8});
- EXPECT_TRUE(result.empty());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ spirv::F32 f32;
+ spirv::Matrix matrix(&f32, 2, 2);
+ auto result = p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 8});
+ EXPECT_TRUE(result.empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Matrix2x2_Stride_Custom) {
- auto p = parser(std::vector<uint32_t>{});
-
- spirv::F32 f32;
- spirv::Matrix matrix(&f32, 2, 2);
- auto result = p->ConvertMemberDecoration(1, 1, &matrix,
- {SpvDecorationMatrixStride, 16});
- ASSERT_FALSE(result.empty());
- EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
- auto* stride_deco = result[0]->As<ast::StrideAttribute>();
- ASSERT_NE(stride_deco, nullptr);
- EXPECT_EQ(stride_deco->stride, 16u);
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ spirv::F32 f32;
+ spirv::Matrix matrix(&f32, 2, 2);
+ auto result = p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 16});
+ ASSERT_FALSE(result.empty());
+ EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
+ auto* stride_deco = result[0]->As<ast::StrideAttribute>();
+ ASSERT_NE(stride_deco, nullptr);
+ EXPECT_EQ(stride_deco->stride, 16u);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Matrix2x4_Stride_Natural) {
- auto p = parser(std::vector<uint32_t>{});
-
- spirv::F32 f32;
- spirv::Matrix matrix(&f32, 2, 4);
- auto result = p->ConvertMemberDecoration(1, 1, &matrix,
- {SpvDecorationMatrixStride, 16});
- EXPECT_TRUE(result.empty());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ spirv::F32 f32;
+ spirv::Matrix matrix(&f32, 2, 4);
+ auto result = p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 16});
+ EXPECT_TRUE(result.empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Matrix2x4_Stride_Custom) {
- auto p = parser(std::vector<uint32_t>{});
-
- spirv::F32 f32;
- spirv::Matrix matrix(&f32, 2, 4);
- auto result = p->ConvertMemberDecoration(1, 1, &matrix,
- {SpvDecorationMatrixStride, 64});
- ASSERT_FALSE(result.empty());
- EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
- auto* stride_deco = result[0]->As<ast::StrideAttribute>();
- ASSERT_NE(stride_deco, nullptr);
- EXPECT_EQ(stride_deco->stride, 64u);
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ spirv::F32 f32;
+ spirv::Matrix matrix(&f32, 2, 4);
+ auto result = p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 64});
+ ASSERT_FALSE(result.empty());
+ EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
+ auto* stride_deco = result[0]->As<ast::StrideAttribute>();
+ ASSERT_NE(stride_deco, nullptr);
+ EXPECT_EQ(stride_deco->stride, 64u);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_Matrix2x3_Stride_Custom) {
- auto p = parser(std::vector<uint32_t>{});
-
- spirv::F32 f32;
- spirv::Matrix matrix(&f32, 2, 3);
- auto result = p->ConvertMemberDecoration(1, 1, &matrix,
- {SpvDecorationMatrixStride, 32});
- ASSERT_FALSE(result.empty());
- EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
- auto* stride_deco = result[0]->As<ast::StrideAttribute>();
- ASSERT_NE(stride_deco, nullptr);
- EXPECT_EQ(stride_deco->stride, 32u);
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(std::vector<uint32_t>{});
+
+ spirv::F32 f32;
+ spirv::Matrix matrix(&f32, 2, 3);
+ auto result = p->ConvertMemberDecoration(1, 1, &matrix, {SpvDecorationMatrixStride, 32});
+ ASSERT_FALSE(result.empty());
+ EXPECT_TRUE(result[0]->Is<ast::StrideAttribute>());
+ auto* stride_deco = result[0]->As<ast::StrideAttribute>();
+ ASSERT_NE(stride_deco, nullptr);
+ EXPECT_EQ(stride_deco->stride, 32u);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_RelaxedPrecision) {
- // WGSL does not support relaxed precision. Drop it.
- // It's functionally correct to use full precision f32 instead of
- // relaxed precision f32.
- auto p = parser(std::vector<uint32_t>{});
-
- auto result = p->ConvertMemberDecoration(1, 1, nullptr,
- {SpvDecorationRelaxedPrecision});
- EXPECT_TRUE(result.empty());
- EXPECT_TRUE(p->error().empty());
+ // WGSL does not support relaxed precision. Drop it.
+ // It's functionally correct to use full precision f32 instead of
+ // relaxed precision f32.
+ auto p = parser(std::vector<uint32_t>{});
+
+ auto result = p->ConvertMemberDecoration(1, 1, nullptr, {SpvDecorationRelaxedPrecision});
+ EXPECT_TRUE(result.empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertMemberDecoration_UnhandledDecoration) {
- auto p = parser(std::vector<uint32_t>{});
+ auto p = parser(std::vector<uint32_t>{});
- auto result = p->ConvertMemberDecoration(12, 13, nullptr, {12345678});
- EXPECT_TRUE(result.empty());
- EXPECT_THAT(p->error(), Eq("unhandled member decoration: 12345678 on member "
- "13 of SPIR-V type 12"));
+ auto result = p->ConvertMemberDecoration(12, 13, nullptr, {12345678});
+ EXPECT_TRUE(result.empty());
+ EXPECT_THAT(p->error(), Eq("unhandled member decoration: 12345678 on member "
+ "13 of SPIR-V type 12"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_type_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_type_test.cc
index bbda4143772..6cedddbaea4 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_convert_type_test.cc
@@ -22,7 +22,7 @@ namespace {
using ::testing::Eq;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %main "x_100"
@@ -31,7 +31,7 @@ std::string Preamble() {
}
std::string MainBody() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%main = OpFunction %void None %voidfn
@@ -42,244 +42,235 @@ std::string MainBody() {
}
TEST_F(SpvParserTest, ConvertType_PreservesExistingFailure) {
- auto p = parser(std::vector<uint32_t>{});
- p->Fail() << "boing";
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(), Eq("boing"));
+ auto p = parser(std::vector<uint32_t>{});
+ p->Fail() << "boing";
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("boing"));
}
TEST_F(SpvParserTest, ConvertType_RequiresInternalRepresntation) {
- auto p = parser(std::vector<uint32_t>{});
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(
- p->error(),
- Eq("ConvertType called when the internal module has not been built"));
+ auto p = parser(std::vector<uint32_t>{});
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("ConvertType called when the internal module has not been built"));
}
TEST_F(SpvParserTest, ConvertType_NotAnId) {
- auto assembly = Preamble() + MainBody();
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto assembly = Preamble() + MainBody();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(900);
- EXPECT_EQ(type, nullptr);
- EXPECT_EQ(nullptr, type);
- EXPECT_THAT(p->error(), Eq("ID is not a SPIR-V type: 900"));
+ auto* type = p->ConvertType(900);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_EQ(nullptr, type);
+ EXPECT_THAT(p->error(), Eq("ID is not a SPIR-V type: 900"));
}
TEST_F(SpvParserTest, ConvertType_IdExistsButIsNotAType) {
- auto assembly = R"(
+ auto assembly = R"(
OpCapability Shader
%1 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical Simple
OpEntryPoint Fragment %main "x_100"
OpExecutionMode %main OriginUpperLeft
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(1);
- EXPECT_EQ(nullptr, type);
- EXPECT_THAT(p->error(), Eq("ID is not a SPIR-V type: 1"));
+ auto* type = p->ConvertType(1);
+ EXPECT_EQ(nullptr, type);
+ EXPECT_THAT(p->error(), Eq("ID is not a SPIR-V type: 1"));
}
TEST_F(SpvParserTest, ConvertType_UnhandledType) {
- // Pipes are an OpenCL type. Tint doesn't support them.
- auto p = parser(test::Assemble("%70 = OpTypePipe WriteOnly"));
- EXPECT_TRUE(p->BuildInternalModule());
+ // Pipes are an OpenCL type. Tint doesn't support them.
+ auto p = parser(test::Assemble("%70 = OpTypePipe WriteOnly"));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(70);
- EXPECT_EQ(nullptr, type);
- EXPECT_THAT(p->error(),
- Eq("unknown SPIR-V type with ID 70: %70 = OpTypePipe WriteOnly"));
+ auto* type = p->ConvertType(70);
+ EXPECT_EQ(nullptr, type);
+ EXPECT_THAT(p->error(), Eq("unknown SPIR-V type with ID 70: %70 = OpTypePipe WriteOnly"));
}
TEST_F(SpvParserTest, ConvertType_Void) {
- auto p = parser(test::Assemble(Preamble() + "%1 = OpTypeVoid" + R"(
+ auto p = parser(test::Assemble(Preamble() + "%1 = OpTypeVoid" + R"(
%voidfn = OpTypeFunction %1
%main = OpFunction %1 None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)"));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(1);
- EXPECT_TRUE(type->Is<Void>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(1);
+ EXPECT_TRUE(type->Is<Void>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_Bool) {
- auto p =
- parser(test::Assemble(Preamble() + "%100 = OpTypeBool" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%100 = OpTypeBool" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(100);
- EXPECT_TRUE(type->Is<Bool>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(100);
+ EXPECT_TRUE(type->Is<Bool>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_I32) {
- auto p =
- parser(test::Assemble(Preamble() + "%2 = OpTypeInt 32 1" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%2 = OpTypeInt 32 1" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(2);
- EXPECT_TRUE(type->Is<I32>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(2);
+ EXPECT_TRUE(type->Is<I32>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_U32) {
- auto p =
- parser(test::Assemble(Preamble() + "%3 = OpTypeInt 32 0" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%3 = OpTypeInt 32 0" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<U32>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<U32>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_F32) {
- auto p =
- parser(test::Assemble(Preamble() + "%4 = OpTypeFloat 32" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%4 = OpTypeFloat 32" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(4);
- EXPECT_TRUE(type->Is<F32>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(4);
+ EXPECT_TRUE(type->Is<F32>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_BadIntWidth) {
- auto p =
- parser(test::Assemble(Preamble() + "%5 = OpTypeInt 17 1" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%5 = OpTypeInt 17 1" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(5);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(), Eq("unhandled integer width: 17"));
+ auto* type = p->ConvertType(5);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("unhandled integer width: 17"));
}
TEST_F(SpvParserTest, ConvertType_BadFloatWidth) {
- auto p =
- parser(test::Assemble(Preamble() + "%6 = OpTypeFloat 19" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ auto p = parser(test::Assemble(Preamble() + "%6 = OpTypeFloat 19" + MainBody()));
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(6);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(), Eq("unhandled float width: 19"));
+ auto* type = p->ConvertType(6);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("unhandled float width: 19"));
}
TEST_F(SpvParserTest, DISABLED_ConvertType_InvalidVectorElement) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%5 = OpTypePipe ReadOnly
%20 = OpTypeVector %5 2
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(20);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(), Eq("unknown SPIR-V type: 5"));
+ auto* type = p->ConvertType(20);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("unknown SPIR-V type: 5"));
}
TEST_F(SpvParserTest, ConvertType_VecOverF32) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%20 = OpTypeVector %float 2
%30 = OpTypeVector %float 3
%40 = OpTypeVector %float 4
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* v2xf32 = p->ConvertType(20);
- EXPECT_TRUE(v2xf32->Is<Vector>());
- EXPECT_TRUE(v2xf32->As<Vector>()->type->Is<F32>());
- EXPECT_EQ(v2xf32->As<Vector>()->size, 2u);
+ auto* v2xf32 = p->ConvertType(20);
+ EXPECT_TRUE(v2xf32->Is<Vector>());
+ EXPECT_TRUE(v2xf32->As<Vector>()->type->Is<F32>());
+ EXPECT_EQ(v2xf32->As<Vector>()->size, 2u);
- auto* v3xf32 = p->ConvertType(30);
- EXPECT_TRUE(v3xf32->Is<Vector>());
- EXPECT_TRUE(v3xf32->As<Vector>()->type->Is<F32>());
- EXPECT_EQ(v3xf32->As<Vector>()->size, 3u);
+ auto* v3xf32 = p->ConvertType(30);
+ EXPECT_TRUE(v3xf32->Is<Vector>());
+ EXPECT_TRUE(v3xf32->As<Vector>()->type->Is<F32>());
+ EXPECT_EQ(v3xf32->As<Vector>()->size, 3u);
- auto* v4xf32 = p->ConvertType(40);
- EXPECT_TRUE(v4xf32->Is<Vector>());
- EXPECT_TRUE(v4xf32->As<Vector>()->type->Is<F32>());
- EXPECT_EQ(v4xf32->As<Vector>()->size, 4u);
+ auto* v4xf32 = p->ConvertType(40);
+ EXPECT_TRUE(v4xf32->Is<Vector>());
+ EXPECT_TRUE(v4xf32->As<Vector>()->type->Is<F32>());
+ EXPECT_EQ(v4xf32->As<Vector>()->size, 4u);
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_VecOverI32) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%int = OpTypeInt 32 1
%20 = OpTypeVector %int 2
%30 = OpTypeVector %int 3
%40 = OpTypeVector %int 4
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* v2xi32 = p->ConvertType(20);
- EXPECT_TRUE(v2xi32->Is<Vector>());
- EXPECT_TRUE(v2xi32->As<Vector>()->type->Is<I32>());
- EXPECT_EQ(v2xi32->As<Vector>()->size, 2u);
+ auto* v2xi32 = p->ConvertType(20);
+ EXPECT_TRUE(v2xi32->Is<Vector>());
+ EXPECT_TRUE(v2xi32->As<Vector>()->type->Is<I32>());
+ EXPECT_EQ(v2xi32->As<Vector>()->size, 2u);
- auto* v3xi32 = p->ConvertType(30);
- EXPECT_TRUE(v3xi32->Is<Vector>());
- EXPECT_TRUE(v3xi32->As<Vector>()->type->Is<I32>());
- EXPECT_EQ(v3xi32->As<Vector>()->size, 3u);
+ auto* v3xi32 = p->ConvertType(30);
+ EXPECT_TRUE(v3xi32->Is<Vector>());
+ EXPECT_TRUE(v3xi32->As<Vector>()->type->Is<I32>());
+ EXPECT_EQ(v3xi32->As<Vector>()->size, 3u);
- auto* v4xi32 = p->ConvertType(40);
- EXPECT_TRUE(v4xi32->Is<Vector>());
- EXPECT_TRUE(v4xi32->As<Vector>()->type->Is<I32>());
- EXPECT_EQ(v4xi32->As<Vector>()->size, 4u);
+ auto* v4xi32 = p->ConvertType(40);
+ EXPECT_TRUE(v4xi32->Is<Vector>());
+ EXPECT_TRUE(v4xi32->As<Vector>()->type->Is<I32>());
+ EXPECT_EQ(v4xi32->As<Vector>()->size, 4u);
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_VecOverU32) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%20 = OpTypeVector %uint 2
%30 = OpTypeVector %uint 3
%40 = OpTypeVector %uint 4
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* v2xu32 = p->ConvertType(20);
- EXPECT_TRUE(v2xu32->Is<Vector>());
- EXPECT_TRUE(v2xu32->As<Vector>()->type->Is<U32>());
- EXPECT_EQ(v2xu32->As<Vector>()->size, 2u);
+ auto* v2xu32 = p->ConvertType(20);
+ EXPECT_TRUE(v2xu32->Is<Vector>());
+ EXPECT_TRUE(v2xu32->As<Vector>()->type->Is<U32>());
+ EXPECT_EQ(v2xu32->As<Vector>()->size, 2u);
- auto* v3xu32 = p->ConvertType(30);
- EXPECT_TRUE(v3xu32->Is<Vector>());
- EXPECT_TRUE(v3xu32->As<Vector>()->type->Is<U32>());
- EXPECT_EQ(v3xu32->As<Vector>()->size, 3u);
+ auto* v3xu32 = p->ConvertType(30);
+ EXPECT_TRUE(v3xu32->Is<Vector>());
+ EXPECT_TRUE(v3xu32->As<Vector>()->type->Is<U32>());
+ EXPECT_EQ(v3xu32->As<Vector>()->size, 3u);
- auto* v4xu32 = p->ConvertType(40);
- EXPECT_TRUE(v4xu32->Is<Vector>());
- EXPECT_TRUE(v4xu32->As<Vector>()->type->Is<U32>());
- EXPECT_EQ(v4xu32->As<Vector>()->size, 4u);
+ auto* v4xu32 = p->ConvertType(40);
+ EXPECT_TRUE(v4xu32->Is<Vector>());
+ EXPECT_TRUE(v4xu32->As<Vector>()->type->Is<U32>());
+ EXPECT_EQ(v4xu32->As<Vector>()->size, 4u);
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, DISABLED_ConvertType_InvalidMatrixElement) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%5 = OpTypePipe ReadOnly
%10 = OpTypeVector %5 2
%20 = OpTypeMatrix %10 2
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(20);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(), Eq("unknown SPIR-V type: 5"));
+ auto* type = p->ConvertType(20);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("unknown SPIR-V type: 5"));
}
TEST_F(SpvParserTest, ConvertType_MatrixOverF32) {
- // Matrices are only defined over floats.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // Matrices are only defined over floats.
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%v2 = OpTypeVector %float 2
%v3 = OpTypeVector %float 3
@@ -296,176 +287,172 @@ TEST_F(SpvParserTest, ConvertType_MatrixOverF32) {
%43 = OpTypeMatrix %v4 3
%44 = OpTypeMatrix %v4 4
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
-
- auto* m22 = p->ConvertType(22);
- EXPECT_TRUE(m22->Is<Matrix>());
- EXPECT_TRUE(m22->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m22->As<Matrix>()->rows, 2u);
- EXPECT_EQ(m22->As<Matrix>()->columns, 2u);
-
- auto* m23 = p->ConvertType(23);
- EXPECT_TRUE(m23->Is<Matrix>());
- EXPECT_TRUE(m23->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m23->As<Matrix>()->rows, 2u);
- EXPECT_EQ(m23->As<Matrix>()->columns, 3u);
-
- auto* m24 = p->ConvertType(24);
- EXPECT_TRUE(m24->Is<Matrix>());
- EXPECT_TRUE(m24->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m24->As<Matrix>()->rows, 2u);
- EXPECT_EQ(m24->As<Matrix>()->columns, 4u);
-
- auto* m32 = p->ConvertType(32);
- EXPECT_TRUE(m32->Is<Matrix>());
- EXPECT_TRUE(m32->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m32->As<Matrix>()->rows, 3u);
- EXPECT_EQ(m32->As<Matrix>()->columns, 2u);
-
- auto* m33 = p->ConvertType(33);
- EXPECT_TRUE(m33->Is<Matrix>());
- EXPECT_TRUE(m33->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m33->As<Matrix>()->rows, 3u);
- EXPECT_EQ(m33->As<Matrix>()->columns, 3u);
-
- auto* m34 = p->ConvertType(34);
- EXPECT_TRUE(m34->Is<Matrix>());
- EXPECT_TRUE(m34->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m34->As<Matrix>()->rows, 3u);
- EXPECT_EQ(m34->As<Matrix>()->columns, 4u);
-
- auto* m42 = p->ConvertType(42);
- EXPECT_TRUE(m42->Is<Matrix>());
- EXPECT_TRUE(m42->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m42->As<Matrix>()->rows, 4u);
- EXPECT_EQ(m42->As<Matrix>()->columns, 2u);
-
- auto* m43 = p->ConvertType(43);
- EXPECT_TRUE(m43->Is<Matrix>());
- EXPECT_TRUE(m43->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m43->As<Matrix>()->rows, 4u);
- EXPECT_EQ(m43->As<Matrix>()->columns, 3u);
-
- auto* m44 = p->ConvertType(44);
- EXPECT_TRUE(m44->Is<Matrix>());
- EXPECT_TRUE(m44->As<Matrix>()->type->Is<F32>());
- EXPECT_EQ(m44->As<Matrix>()->rows, 4u);
- EXPECT_EQ(m44->As<Matrix>()->columns, 4u);
-
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->BuildInternalModule());
+
+ auto* m22 = p->ConvertType(22);
+ EXPECT_TRUE(m22->Is<Matrix>());
+ EXPECT_TRUE(m22->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m22->As<Matrix>()->rows, 2u);
+ EXPECT_EQ(m22->As<Matrix>()->columns, 2u);
+
+ auto* m23 = p->ConvertType(23);
+ EXPECT_TRUE(m23->Is<Matrix>());
+ EXPECT_TRUE(m23->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m23->As<Matrix>()->rows, 2u);
+ EXPECT_EQ(m23->As<Matrix>()->columns, 3u);
+
+ auto* m24 = p->ConvertType(24);
+ EXPECT_TRUE(m24->Is<Matrix>());
+ EXPECT_TRUE(m24->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m24->As<Matrix>()->rows, 2u);
+ EXPECT_EQ(m24->As<Matrix>()->columns, 4u);
+
+ auto* m32 = p->ConvertType(32);
+ EXPECT_TRUE(m32->Is<Matrix>());
+ EXPECT_TRUE(m32->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m32->As<Matrix>()->rows, 3u);
+ EXPECT_EQ(m32->As<Matrix>()->columns, 2u);
+
+ auto* m33 = p->ConvertType(33);
+ EXPECT_TRUE(m33->Is<Matrix>());
+ EXPECT_TRUE(m33->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m33->As<Matrix>()->rows, 3u);
+ EXPECT_EQ(m33->As<Matrix>()->columns, 3u);
+
+ auto* m34 = p->ConvertType(34);
+ EXPECT_TRUE(m34->Is<Matrix>());
+ EXPECT_TRUE(m34->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m34->As<Matrix>()->rows, 3u);
+ EXPECT_EQ(m34->As<Matrix>()->columns, 4u);
+
+ auto* m42 = p->ConvertType(42);
+ EXPECT_TRUE(m42->Is<Matrix>());
+ EXPECT_TRUE(m42->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m42->As<Matrix>()->rows, 4u);
+ EXPECT_EQ(m42->As<Matrix>()->columns, 2u);
+
+ auto* m43 = p->ConvertType(43);
+ EXPECT_TRUE(m43->Is<Matrix>());
+ EXPECT_TRUE(m43->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m43->As<Matrix>()->rows, 4u);
+ EXPECT_EQ(m43->As<Matrix>()->columns, 3u);
+
+ auto* m44 = p->ConvertType(44);
+ EXPECT_TRUE(m44->Is<Matrix>());
+ EXPECT_TRUE(m44->As<Matrix>()->type->Is<F32>());
+ EXPECT_EQ(m44->As<Matrix>()->rows, 4u);
+ EXPECT_EQ(m44->As<Matrix>()->columns, 4u);
+
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_RuntimeArray) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%10 = OpTypeRuntimeArray %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->UnwrapAll()->Is<Array>());
- auto* arr_type = type->UnwrapAll()->As<Array>();
- ASSERT_NE(arr_type, nullptr);
- EXPECT_EQ(arr_type->size, 0u);
- EXPECT_EQ(arr_type->stride, 0u);
- auto* elem_type = arr_type->type;
- ASSERT_NE(elem_type, nullptr);
- EXPECT_TRUE(elem_type->Is<U32>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->UnwrapAll()->Is<Array>());
+ auto* arr_type = type->UnwrapAll()->As<Array>();
+ ASSERT_NE(arr_type, nullptr);
+ EXPECT_EQ(arr_type->size, 0u);
+ EXPECT_EQ(arr_type->stride, 0u);
+ auto* elem_type = arr_type->type;
+ ASSERT_NE(elem_type, nullptr);
+ EXPECT_TRUE(elem_type->Is<U32>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_RuntimeArray_InvalidDecoration) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 Block
%uint = OpTypeInt 32 0
%10 = OpTypeRuntimeArray %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(
- p->error(),
- Eq("invalid array type ID 10: unknown decoration 2 with 1 total words"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(),
+ Eq("invalid array type ID 10: unknown decoration 2 with 1 total words"));
}
TEST_F(SpvParserTest, ConvertType_RuntimeArray_ArrayStride_Valid) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 ArrayStride 64
%uint = OpTypeInt 32 0
%10 = OpTypeRuntimeArray %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- auto* arr_type = type->UnwrapAll()->As<Array>();
- ASSERT_NE(arr_type, nullptr);
- EXPECT_EQ(arr_type->size, 0u);
- EXPECT_EQ(arr_type->stride, 64u);
+ EXPECT_TRUE(p->BuildInternalModule());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ auto* arr_type = type->UnwrapAll()->As<Array>();
+ ASSERT_NE(arr_type, nullptr);
+ EXPECT_EQ(arr_type->size, 0u);
+ EXPECT_EQ(arr_type->stride, 64u);
}
TEST_F(SpvParserTest, ConvertType_RuntimeArray_ArrayStride_ZeroIsError) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 ArrayStride 0
%uint = OpTypeInt 32 0
%10 = OpTypeRuntimeArray %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(),
- Eq("invalid array type ID 10: ArrayStride can't be 0"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("invalid array type ID 10: ArrayStride can't be 0"));
}
TEST_F(SpvParserTest, ConvertType_Array) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%uint_42 = OpConstant %uint 42
%10 = OpTypeArray %uint %uint_42
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->Is<Array>());
- auto* arr_type = type->As<Array>();
- ASSERT_NE(arr_type, nullptr);
- EXPECT_EQ(arr_type->size, 42u);
- EXPECT_EQ(arr_type->stride, 0u);
- auto* elem_type = arr_type->type;
- ASSERT_NE(elem_type, nullptr);
- EXPECT_TRUE(elem_type->Is<U32>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->Is<Array>());
+ auto* arr_type = type->As<Array>();
+ ASSERT_NE(arr_type, nullptr);
+ EXPECT_EQ(arr_type->size, 42u);
+ EXPECT_EQ(arr_type->stride, 0u);
+ auto* elem_type = arr_type->type;
+ ASSERT_NE(elem_type, nullptr);
+ EXPECT_TRUE(elem_type->Is<U32>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_ArrayBadLengthIsSpecConstantValue) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %uint_42 SpecId 12
%uint = OpTypeInt 32 0
%uint_42 = OpSpecConstant %uint 42
%10 = OpTypeArray %uint %uint_42
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_EQ(type, nullptr);
- EXPECT_THAT(p->error(),
- Eq("Array type 10 length is a specialization constant"));
+ auto* type = p->ConvertType(10);
+ ASSERT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("Array type 10 length is a specialization constant"));
}
TEST_F(SpvParserTest, ConvertType_ArrayBadLengthIsSpecConstantExpr) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%uint_42 = OpConstant %uint 42
%sum = OpSpecConstantOp %uint IAdd %uint_42 %uint_42
%10 = OpTypeArray %uint %sum
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_EQ(type, nullptr);
- EXPECT_THAT(p->error(),
- Eq("Array type 10 length is a specialization constant"));
+ auto* type = p->ConvertType(10);
+ ASSERT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("Array type 10 length is a specialization constant"));
}
// TODO(dneto): Maybe add a test where the length operand is not a constant.
@@ -473,119 +460,117 @@ TEST_F(SpvParserTest, ConvertType_ArrayBadLengthIsSpecConstantExpr) {
// optimizer representation doesn't handle it and asserts out instead.
TEST_F(SpvParserTest, ConvertType_ArrayBadTooBig) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint64 = OpTypeInt 64 0
%uint64_big = OpConstant %uint64 5000000000
%10 = OpTypeArray %uint64 %uint64_big
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_EQ(type, nullptr);
- // TODO(dneto): Right now it's rejected earlier in the flow because
- // we can't even utter the uint64 type.
- EXPECT_THAT(p->error(), Eq("unhandled integer width: 64"));
+ auto* type = p->ConvertType(10);
+ ASSERT_EQ(type, nullptr);
+ // TODO(dneto): Right now it's rejected earlier in the flow because
+ // we can't even utter the uint64 type.
+ EXPECT_THAT(p->error(), Eq("unhandled integer width: 64"));
}
TEST_F(SpvParserTest, ConvertType_Array_InvalidDecoration) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 Block
%uint = OpTypeInt 32 0
%uint_5 = OpConstant %uint 5
%10 = OpTypeArray %uint %uint_5
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(
- p->error(),
- Eq("invalid array type ID 10: unknown decoration 2 with 1 total words"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(),
+ Eq("invalid array type ID 10: unknown decoration 2 with 1 total words"));
}
TEST_F(SpvParserTest, ConvertType_ArrayStride_Valid) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 ArrayStride 8
%uint = OpTypeInt 32 0
%uint_5 = OpConstant %uint 5
%10 = OpTypeArray %uint %uint_5
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->UnwrapAll()->Is<Array>());
- auto* arr_type = type->UnwrapAll()->As<Array>();
- ASSERT_NE(arr_type, nullptr);
- EXPECT_EQ(arr_type->stride, 8u);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->UnwrapAll()->Is<Array>());
+ auto* arr_type = type->UnwrapAll()->As<Array>();
+ ASSERT_NE(arr_type, nullptr);
+ EXPECT_EQ(arr_type->stride, 8u);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_ArrayStride_ZeroIsError) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 ArrayStride 0
%uint = OpTypeInt 32 0
%uint_5 = OpConstant %uint 5
%10 = OpTypeArray %uint %uint_5
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- ASSERT_EQ(type, nullptr);
- EXPECT_THAT(p->error(),
- Eq("invalid array type ID 10: ArrayStride can't be 0"));
+ auto* type = p->ConvertType(10);
+ ASSERT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("invalid array type ID 10: ArrayStride can't be 0"));
}
TEST_F(SpvParserTest, ConvertType_StructEmpty) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%10 = OpTypeStruct
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(10);
- EXPECT_EQ(type, nullptr);
- EXPECT_EQ(p->error(),
- "WGSL does not support empty structures. can't convert type: %10 = "
- "OpTypeStruct");
+ auto* type = p->ConvertType(10);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_EQ(p->error(),
+ "WGSL does not support empty structures. can't convert type: %10 = "
+ "OpTypeStruct");
}
TEST_F(SpvParserTest, ConvertType_StructTwoMembers) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%float = OpTypeFloat 32
%10 = OpTypeStruct %uint %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
+ EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->Is<Struct>());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->Is<Struct>());
- auto* str = type->Build(p->builder());
- Program program = p->program();
- EXPECT_EQ(test::ToString(program, str), "S");
+ auto* str = type->Build(p->builder());
+ Program program = p->program();
+ EXPECT_EQ(test::ToString(program, str), "S");
}
TEST_F(SpvParserTest, ConvertType_StructWithBlockDecoration) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpDecorate %10 Block
%uint = OpTypeInt 32 0
%10 = OpTypeStruct %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
+ EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->Is<Struct>());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->Is<Struct>());
- auto* str = type->Build(p->builder());
- Program program = p->program();
- EXPECT_EQ(test::ToString(program, str), "S");
+ auto* str = type->Build(p->builder());
+ Program program = p->program();
+ EXPECT_EQ(test::ToString(program, str), "S");
}
TEST_F(SpvParserTest, ConvertType_StructWithMemberDecorations) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
OpMemberDecorate %10 0 Offset 0
OpMemberDecorate %10 1 Offset 8
OpMemberDecorate %10 2 Offset 16
@@ -594,50 +579,50 @@ TEST_F(SpvParserTest, ConvertType_StructWithMemberDecorations) {
%mat = OpTypeMatrix %vec 2
%10 = OpTypeStruct %float %vec %mat
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
+ EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterUserAndStructMemberNames());
- auto* type = p->ConvertType(10);
- ASSERT_NE(type, nullptr);
- EXPECT_TRUE(type->Is<Struct>());
+ auto* type = p->ConvertType(10);
+ ASSERT_NE(type, nullptr);
+ EXPECT_TRUE(type->Is<Struct>());
- auto* str = type->Build(p->builder());
- Program program = p->program();
- EXPECT_EQ(test::ToString(program, str), "S");
+ auto* str = type->Build(p->builder());
+ Program program = p->program();
+ EXPECT_EQ(test::ToString(program, str), "S");
}
TEST_F(SpvParserTest, ConvertType_Struct_NoDeduplication) {
- // Prove that distinct SPIR-V structs map to distinct WGSL types.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // Prove that distinct SPIR-V structs map to distinct WGSL types.
+ auto p = parser(test::Assemble(Preamble() + R"(
%uint = OpTypeInt 32 0
%10 = OpTypeStruct %uint
%11 = OpTypeStruct %uint
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto* type10 = p->ConvertType(10);
- ASSERT_NE(type10, nullptr);
- EXPECT_TRUE(type10->Is<Struct>());
- auto* struct_type10 = type10->As<Struct>();
- ASSERT_NE(struct_type10, nullptr);
- EXPECT_EQ(struct_type10->members.size(), 1u);
- EXPECT_TRUE(struct_type10->members[0]->Is<U32>());
+ auto* type10 = p->ConvertType(10);
+ ASSERT_NE(type10, nullptr);
+ EXPECT_TRUE(type10->Is<Struct>());
+ auto* struct_type10 = type10->As<Struct>();
+ ASSERT_NE(struct_type10, nullptr);
+ EXPECT_EQ(struct_type10->members.size(), 1u);
+ EXPECT_TRUE(struct_type10->members[0]->Is<U32>());
- auto* type11 = p->ConvertType(11);
- ASSERT_NE(type11, nullptr);
- EXPECT_TRUE(type11->Is<Struct>());
- auto* struct_type11 = type11->As<Struct>();
- ASSERT_NE(struct_type11, nullptr);
- EXPECT_EQ(struct_type11->members.size(), 1u);
- EXPECT_TRUE(struct_type11->members[0]->Is<U32>());
+ auto* type11 = p->ConvertType(11);
+ ASSERT_NE(type11, nullptr);
+ EXPECT_TRUE(type11->Is<Struct>());
+ auto* struct_type11 = type11->As<Struct>();
+ ASSERT_NE(struct_type11, nullptr);
+ EXPECT_EQ(struct_type11->members.size(), 1u);
+ EXPECT_TRUE(struct_type11->members[0]->Is<U32>());
- // They map to distinct types in WGSL
- EXPECT_NE(type11, type10);
+ // They map to distinct types in WGSL
+ EXPECT_NE(type11, type10);
}
TEST_F(SpvParserTest, ConvertType_Array_NoDeduplication) {
- // Prove that distinct SPIR-V arrays map to distinct WGSL types.
- auto assembly = Preamble() + R"(
+ // Prove that distinct SPIR-V arrays map to distinct WGSL types.
+ auto assembly = Preamble() + R"(
%uint = OpTypeInt 32 0
%10 = OpTypeStruct %uint
%11 = OpTypeStruct %uint
@@ -645,26 +630,26 @@ TEST_F(SpvParserTest, ConvertType_Array_NoDeduplication) {
%20 = OpTypeArray %10 %uint_1
%21 = OpTypeArray %11 %uint_1
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto* type20 = p->ConvertType(20);
- ASSERT_NE(type20, nullptr);
- EXPECT_TRUE(type20->Is<Array>());
+ auto* type20 = p->ConvertType(20);
+ ASSERT_NE(type20, nullptr);
+ EXPECT_TRUE(type20->Is<Array>());
- auto* type21 = p->ConvertType(21);
- ASSERT_NE(type21, nullptr);
- EXPECT_TRUE(type21->Is<Array>());
+ auto* type21 = p->ConvertType(21);
+ ASSERT_NE(type21, nullptr);
+ EXPECT_TRUE(type21->Is<Array>());
- // They map to distinct types in WGSL
- EXPECT_NE(type21, type20);
+ // They map to distinct types in WGSL
+ EXPECT_NE(type21, type20);
}
TEST_F(SpvParserTest, ConvertType_RuntimeArray_NoDeduplication) {
- // Prove that distinct SPIR-V runtime arrays map to distinct WGSL types.
- // The implementation already de-duplicates them because it knows
- // runtime-arrays normally have stride decorations.
- auto assembly = Preamble() + R"(
+ // Prove that distinct SPIR-V runtime arrays map to distinct WGSL types.
+ // The implementation already de-duplicates them because it knows
+ // runtime-arrays normally have stride decorations.
+ auto assembly = Preamble() + R"(
%uint = OpTypeInt 32 0
%10 = OpTypeStruct %uint
%11 = OpTypeStruct %uint
@@ -672,31 +657,31 @@ TEST_F(SpvParserTest, ConvertType_RuntimeArray_NoDeduplication) {
%21 = OpTypeRuntimeArray %11
%22 = OpTypeRuntimeArray %10
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto* type20 = p->ConvertType(20);
- ASSERT_NE(type20, nullptr);
- EXPECT_TRUE(type20->Is<Alias>());
- EXPECT_TRUE(type20->UnwrapAll()->Is<Array>());
- EXPECT_EQ(type20->UnwrapAll()->As<Array>()->size, 0u);
+ auto* type20 = p->ConvertType(20);
+ ASSERT_NE(type20, nullptr);
+ EXPECT_TRUE(type20->Is<Alias>());
+ EXPECT_TRUE(type20->UnwrapAll()->Is<Array>());
+ EXPECT_EQ(type20->UnwrapAll()->As<Array>()->size, 0u);
- auto* type21 = p->ConvertType(21);
- ASSERT_NE(type21, nullptr);
- EXPECT_TRUE(type21->Is<Alias>());
- EXPECT_TRUE(type21->UnwrapAll()->Is<Array>());
- EXPECT_EQ(type21->UnwrapAll()->As<Array>()->size, 0u);
+ auto* type21 = p->ConvertType(21);
+ ASSERT_NE(type21, nullptr);
+ EXPECT_TRUE(type21->Is<Alias>());
+ EXPECT_TRUE(type21->UnwrapAll()->Is<Array>());
+ EXPECT_EQ(type21->UnwrapAll()->As<Array>()->size, 0u);
- auto* type22 = p->ConvertType(22);
- ASSERT_NE(type22, nullptr);
- EXPECT_TRUE(type22->Is<Alias>());
- EXPECT_TRUE(type22->UnwrapAll()->Is<Array>());
- EXPECT_EQ(type22->UnwrapAll()->As<Array>()->size, 0u);
+ auto* type22 = p->ConvertType(22);
+ ASSERT_NE(type22, nullptr);
+ EXPECT_TRUE(type22->Is<Alias>());
+ EXPECT_TRUE(type22->UnwrapAll()->Is<Array>());
+ EXPECT_EQ(type22->UnwrapAll()->As<Array>()->size, 0u);
- // They map to distinct types in WGSL
- EXPECT_NE(type21, type20);
- EXPECT_NE(type22, type21);
- EXPECT_NE(type22, type20);
+ // They map to distinct types in WGSL
+ EXPECT_NE(type21, type20);
+ EXPECT_NE(type22, type21);
+ EXPECT_NE(type22, type20);
}
// TODO(dneto): Demonstrate other member decorations. Blocked on
@@ -705,8 +690,8 @@ TEST_F(SpvParserTest, ConvertType_RuntimeArray_NoDeduplication) {
// crbug.com/tint/30
TEST_F(SpvParserTest, ConvertType_InvalidPointeetype) {
- // Disallow pointer-to-function
- auto p = parser(test::Assemble(Preamble() + R"(
+ // Disallow pointer-to-function
+ auto p = parser(test::Assemble(Preamble() + R"(
%void = OpTypeVoid
%42 = OpTypeFunction %void
%3 = OpTypePointer Input %42
@@ -717,214 +702,213 @@ TEST_F(SpvParserTest, ConvertType_InvalidPointeetype) {
OpReturn
OpFunctionEnd
)"));
- EXPECT_TRUE(p->BuildInternalModule()) << p->error();
+ EXPECT_TRUE(p->BuildInternalModule()) << p->error();
- auto* type = p->ConvertType(3);
- EXPECT_EQ(type, nullptr);
- EXPECT_THAT(p->error(),
- Eq("SPIR-V pointer type with ID 3 has invalid pointee type 42"));
+ auto* type = p->ConvertType(3);
+ EXPECT_EQ(type, nullptr);
+ EXPECT_THAT(p->error(), Eq("SPIR-V pointer type with ID 3 has invalid pointee type 42"));
}
TEST_F(SpvParserTest, DISABLED_ConvertType_InvalidStorageClass) {
- // Disallow invalid storage class
- auto p = parser(test::Assemble(Preamble() + R"(
+ // Disallow invalid storage class
+ auto p = parser(test::Assemble(Preamble() + R"(
%1 = OpTypeFloat 32
%3 = OpTypePointer !999 %1 ; Special syntax to inject 999 as the storage class
)" + MainBody()));
- // TODO(dneto): I can't get it past module building.
- EXPECT_FALSE(p->BuildInternalModule()) << p->error();
+ // TODO(dneto): I can't get it past module building.
+ EXPECT_FALSE(p->BuildInternalModule()) << p->error();
}
TEST_F(SpvParserTest, ConvertType_PointerInput) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Input %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerOutput) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Output %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerUniform) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Uniform %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kUniform);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kUniform);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerWorkgroup) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Workgroup %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kWorkgroup);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kWorkgroup);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerUniformConstant) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer UniformConstant %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kNone);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kNone);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerStorageBuffer) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer StorageBuffer %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kStorage);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kStorage);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerPrivate) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Private %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerFunction) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%3 = OpTypePointer Function %float
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_TRUE(ptr_ty->type->Is<F32>());
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kFunction);
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(3);
+ EXPECT_TRUE(type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_TRUE(ptr_ty->type->Is<F32>());
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kFunction);
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_PointerToPointer) {
- // FYI: The reader suports pointer-to-pointer even while WebGPU does not.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // FYI: The reader suports pointer-to-pointer even while WebGPU does not.
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%42 = OpTypePointer Output %float
%3 = OpTypePointer Input %42
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(3);
- EXPECT_NE(type, nullptr);
- EXPECT_TRUE(type->Is<Pointer>());
+ auto* type = p->ConvertType(3);
+ EXPECT_NE(type, nullptr);
+ EXPECT_TRUE(type->Is<Pointer>());
- auto* ptr_ty = type->As<Pointer>();
- EXPECT_NE(ptr_ty, nullptr);
- EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
- EXPECT_TRUE(ptr_ty->type->Is<Pointer>());
+ auto* ptr_ty = type->As<Pointer>();
+ EXPECT_NE(ptr_ty, nullptr);
+ EXPECT_EQ(ptr_ty->storage_class, ast::StorageClass::kPrivate);
+ EXPECT_TRUE(ptr_ty->type->Is<Pointer>());
- auto* ptr_ptr_ty = ptr_ty->type->As<Pointer>();
- EXPECT_NE(ptr_ptr_ty, nullptr);
- EXPECT_EQ(ptr_ptr_ty->storage_class, ast::StorageClass::kPrivate);
- EXPECT_TRUE(ptr_ptr_ty->type->Is<F32>());
+ auto* ptr_ptr_ty = ptr_ty->type->As<Pointer>();
+ EXPECT_NE(ptr_ptr_ty, nullptr);
+ EXPECT_EQ(ptr_ptr_ty->storage_class, ast::StorageClass::kPrivate);
+ EXPECT_TRUE(ptr_ptr_ty->type->Is<F32>());
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_Sampler_PretendVoid) {
- // We fake the type suport for samplers, images, and sampled images.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // We fake the type suport for samplers, images, and sampled images.
+ auto p = parser(test::Assemble(Preamble() + R"(
%1 = OpTypeSampler
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(1);
- EXPECT_TRUE(type->Is<Void>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(1);
+ EXPECT_TRUE(type->Is<Void>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_Image_PretendVoid) {
- // We fake the type suport for samplers, images, and sampled images.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // We fake the type suport for samplers, images, and sampled images.
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%1 = OpTypeImage %float 2D 0 0 0 1 Unknown
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(1);
- EXPECT_TRUE(type->Is<Void>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(1);
+ EXPECT_TRUE(type->Is<Void>());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, ConvertType_SampledImage_PretendVoid) {
- auto p = parser(test::Assemble(Preamble() + R"(
+ auto p = parser(test::Assemble(Preamble() + R"(
%float = OpTypeFloat 32
%im = OpTypeImage %float 2D 0 0 0 1 Unknown
%1 = OpTypeSampledImage %im
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->BuildInternalModule());
- auto* type = p->ConvertType(1);
- EXPECT_TRUE(type->Is<Void>());
- EXPECT_TRUE(p->error().empty());
+ auto* type = p->ConvertType(1);
+ EXPECT_TRUE(type->Is<Void>());
+ EXPECT_TRUE(p->error().empty());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_function_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_function_decl_test.cc
index 4e10725e6dc..459460d6f0c 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_function_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_function_decl_test.cc
@@ -22,21 +22,21 @@ namespace {
using ::testing::HasSubstr;
std::string Caps() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
)";
}
std::string Preamble() {
- return Caps() + R"(
+ return Caps() + R"(
OpEntryPoint Fragment %main "x_100"
OpExecutionMode %main OriginUpperLeft
)";
}
std::string MainBody() {
- return R"(
+ return R"(
%main = OpFunction %void None %voidfn
%main_entry = OpLabel
OpReturn
@@ -47,15 +47,15 @@ std::string MainBody() {
/// @returns a SPIR-V assembly segment which assigns debug names
/// to particular IDs.
std::string Names(std::vector<std::string> ids) {
- std::ostringstream outs;
- for (auto& id : ids) {
- outs << " OpName %" << id << " \"" << id << "\"\n";
- }
- return outs.str();
+ std::ostringstream outs;
+ for (auto& id : ids) {
+ outs << " OpName %" << id << " \"" << id << "\"\n";
+ }
+ return outs.str();
}
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%float = OpTypeFloat 32
@@ -65,7 +65,7 @@ std::string CommonTypes() {
}
std::string BuiltinPosition() {
- return R"(OpDecorate %position BuiltIn Position
+ return R"(OpDecorate %position BuiltIn Position
%float = OpTypeFloat 32
%v4float = OpTypeVector %float 4
%ptr = OpTypePointer Output %v4float
@@ -78,147 +78,143 @@ std::string BuiltinPosition() {
}
TEST_F(SpvParserTest, EmitFunctions_NoFunctions) {
- auto p = parser(test::Assemble(
- R"(
+ auto p = parser(test::Assemble(
+ R"(
OpCapability Shader
OpMemoryModel Logical Simple
)" + CommonTypes()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, Not(HasSubstr("Function{")));
- p->SkipDumpingPending("Not valid for Vulkan: needs an entry point");
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, Not(HasSubstr("Function{")));
+ p->SkipDumpingPending("Not valid for Vulkan: needs an entry point");
}
TEST_F(SpvParserTest, EmitFunctions_FunctionWithoutBody) {
- auto p =
- parser(test::Assemble(Preamble() + Names({"main"}) + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + Names({"main"}) + CommonTypes() + R"(
%main = OpFunction %void None %voidfn
OpFunctionEnd
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, Not(HasSubstr("Function{")));
- p->SkipDumpingPending("Missing an entry point body requires Linkage");
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, Not(HasSubstr("Function{")));
+ p->SkipDumpingPending("Missing an entry point body requires Linkage");
}
TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_Vertex) {
- std::string input = Caps() +
- R"(OpEntryPoint Vertex %main "main" %position )" +
- Names({"main"}) + BuiltinPosition() + R"(
+ std::string input = Caps() + R"(OpEntryPoint Vertex %main "main" %position )" +
+ Names({"main"}) + BuiltinPosition() + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
struct main_out {
@builtin(position)
x_2_1 : vec4<f32>,
}
)")) << program_ast;
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(vertex)
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@vertex
fn main() -> main_out {
)"));
}
TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_Fragment) {
- std::string input = Caps() + R"(
+ std::string input = Caps() + R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
)" + Names({"main"}) + CommonTypes() +
- MainBody();
-
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(fragment)
+ MainBody();
+
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@fragment
fn main() {
)"));
}
TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_GLCompute) {
- std::string input = Caps() + R"(
+ std::string input = Caps() + R"(
OpEntryPoint GLCompute %main "main"
OpExecutionMode %main LocalSize 1 1 1
)" + Names({"main"}) + CommonTypes() +
- MainBody();
-
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(1, 1, 1)
+ MainBody();
+
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(1i, 1i, 1i)
fn main() {
)"));
}
TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_MultipleEntryPoints) {
- std::string input = Caps() +
- R"(
+ std::string input = Caps() +
+ R"(
OpEntryPoint Fragment %main "first_shader"
OpEntryPoint Fragment %main "second_shader"
OpExecutionMode %main OriginUpperLeft
)" + Names({"main"}) + CommonTypes() +
- MainBody();
-
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(fragment)
+ MainBody();
+
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@fragment
fn first_shader() {
)"));
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(fragment)
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@fragment
fn second_shader() {
)"));
}
-TEST_F(SpvParserTest,
- EmitFunctions_Function_EntryPoint_GLCompute_LocalSize_Only) {
- std::string input = Caps() + R"(
+TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_GLCompute_LocalSize_Only) {
+ std::string input = Caps() + R"(
OpEntryPoint GLCompute %main "comp_main"
OpExecutionMode %main LocalSize 2 4 8
)" + Names({"main"}) + CommonTypes() +
- R"(
+ R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(2, 4, 8)
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(2i, 4i, 8i)
fn comp_main() {
)")) << program_ast;
}
-TEST_F(SpvParserTest,
- EmitFunctions_Function_EntryPoint_WorkgroupSizeBuiltin_Constant_Only) {
- std::string input = Caps() + R"(OpEntryPoint GLCompute %main "comp_main"
+TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_WorkgroupSizeBuiltin_Constant_Only) {
+ std::string input = Caps() + R"(OpEntryPoint GLCompute %main "comp_main"
OpDecorate %wgsize BuiltIn WorkgroupSize
)" + CommonTypes() + R"(
%uvec3 = OpTypeVector %uint 3
@@ -231,22 +227,20 @@ OpDecorate %wgsize BuiltIn WorkgroupSize
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(3, 5, 7)
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(3i, 5i, 7i)
fn comp_main() {
)")) << program_ast;
}
-TEST_F(
- SpvParserTest,
- EmitFunctions_Function_EntryPoint_WorkgroupSizeBuiltin_SpecConstant_Only) {
- std::string input = Caps() +
- R"(OpEntryPoint GLCompute %main "comp_main"
+TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_WorkgroupSizeBuiltin_SpecConstant_Only) {
+ std::string input = Caps() +
+ R"(OpEntryPoint GLCompute %main "comp_main"
OpDecorate %wgsize BuiltIn WorkgroupSize
OpDecorate %uint_3 SpecId 0
OpDecorate %uint_5 SpecId 1
@@ -262,22 +256,20 @@ OpDecorate %uint_7 SpecId 2
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(3, 5, 7)
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(3i, 5i, 7i)
fn comp_main() {
)")) << program_ast;
}
-TEST_F(
- SpvParserTest,
- EmitFunctions_Function_EntryPoint_WorkgroupSize_MixedConstantSpecConstant) {
- std::string input = Caps() +
- R"(OpEntryPoint GLCompute %main "comp_main"
+TEST_F(SpvParserTest, EmitFunctions_Function_EntryPoint_WorkgroupSize_MixedConstantSpecConstant) {
+ std::string input = Caps() +
+ R"(OpEntryPoint GLCompute %main "comp_main"
OpDecorate %wgsize BuiltIn WorkgroupSize
OpDecorate %uint_3 SpecId 0
OpDecorate %uint_7 SpecId 2
@@ -292,24 +284,23 @@ OpDecorate %uint_7 SpecId 2
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(3, 5, 7)
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(3i, 5i, 7i)
fn comp_main() {
)")) << program_ast;
}
-TEST_F(
- SpvParserTest,
- // I had to shorten the name to pass the linter.
- EmitFunctions_Function_EntryPoint_LocalSize_And_WGSBuiltin_SpecConstant) {
- // WorkgroupSize builtin wins.
- std::string input = Caps() +
- R"(OpEntryPoint GLCompute %main "comp_main"
+TEST_F(SpvParserTest,
+ // I had to shorten the name to pass the linter.
+ EmitFunctions_Function_EntryPoint_LocalSize_And_WGSBuiltin_SpecConstant) {
+ // WorkgroupSize builtin wins.
+ std::string input = Caps() +
+ R"(OpEntryPoint GLCompute %main "comp_main"
OpExecutionMode %main LocalSize 2 4 8
OpDecorate %wgsize BuiltIn WorkgroupSize
OpDecorate %uint_3 SpecId 0
@@ -326,38 +317,36 @@ OpDecorate %uint_7 SpecId 2
OpReturn
OpFunctionEnd)";
- auto p = parser(test::Assemble(input));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- ASSERT_TRUE(p->error().empty()) << p->error();
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(
-@stage(compute) @workgroup_size(3, 5, 7)
+ auto p = parser(test::Assemble(input));
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ ASSERT_TRUE(p->error().empty()) << p->error();
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(
+@compute @workgroup_size(3i, 5i, 7i)
fn comp_main() {
)")) << program_ast;
}
TEST_F(SpvParserTest, EmitFunctions_VoidFunctionWithoutParams) {
- auto p = parser(test::Assemble(Preamble() + Names({"another_function"}) +
- CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + Names({"another_function"}) + CommonTypes() + R"(
%another_function = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(fn another_function() {
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(fn another_function() {
)"));
}
TEST_F(SpvParserTest, EmitFunctions_CalleePrecedesCaller) {
- auto p = parser(test::Assemble(
- Preamble() +
- Names({"root", "branch", "leaf", "leaf_result", "branch_result"}) +
- CommonTypes() + R"(
+ auto p = parser(test::Assemble(
+ Preamble() + Names({"root", "branch", "leaf", "leaf_result", "branch_result"}) +
+ CommonTypes() + R"(
%uintfn = OpTypeFunction %uint
%uint_0 = OpConstant %uint 0
@@ -378,11 +367,11 @@ TEST_F(SpvParserTest, EmitFunctions_CalleePrecedesCaller) {
OpReturnValue %uint_0
OpFunctionEnd
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(fn leaf() -> u32 {
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(fn leaf() -> u32 {
return 0u;
}
@@ -399,8 +388,7 @@ fn root() {
}
TEST_F(SpvParserTest, EmitFunctions_NonVoidResultType) {
- auto p = parser(
- test::Assemble(Preamble() + Names({"ret_float"}) + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + Names({"ret_float"}) + CommonTypes() + R"(
%float_0 = OpConstant %float 0.0
%fn_ret_float = OpTypeFunction %float
@@ -409,19 +397,28 @@ TEST_F(SpvParserTest, EmitFunctions_NonVoidResultType) {
OpReturnValue %float_0
OpFunctionEnd
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast, HasSubstr(R"(fn ret_float() -> f32 {
- return 0.0;
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(fn ret_float() -> f32 {
+ return 0.0f;
+}
+
+fn x_100_1() {
+ return;
+}
+
+@fragment
+fn x_100() {
+ x_100_1();
}
)")) << program_ast;
}
TEST_F(SpvParserTest, EmitFunctions_MixedParamTypes) {
- auto p = parser(test::Assemble(
- Preamble() + Names({"mixed_params", "a", "b", "c"}) + CommonTypes() + R"(
+ auto p = parser(
+ test::Assemble(Preamble() + Names({"mixed_params", "a", "b", "c"}) + CommonTypes() + R"(
%fn_mixed_params = OpTypeFunction %void %uint %float %int
%mixed_params = OpFunction %void None %fn_mixed_params
@@ -432,20 +429,18 @@ TEST_F(SpvParserTest, EmitFunctions_MixedParamTypes) {
OpReturn
OpFunctionEnd
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast,
- HasSubstr(R"(fn mixed_params(a : u32, b : f32, c : i32) {
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(fn mixed_params(a : u32, b : f32, c : i32) {
return;
}
)"));
}
TEST_F(SpvParserTest, EmitFunctions_GenerateParamNames) {
- auto p = parser(
- test::Assemble(Preamble() + Names({"mixed_params"}) + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + Names({"mixed_params"}) + CommonTypes() + R"(
%fn_mixed_params = OpTypeFunction %void %uint %float %int
%mixed_params = OpFunction %void None %fn_mixed_params
@@ -456,12 +451,11 @@ TEST_F(SpvParserTest, EmitFunctions_GenerateParamNames) {
OpReturn
OpFunctionEnd
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto program_ast = test::ToString(program);
- EXPECT_THAT(program_ast,
- HasSubstr(R"(fn mixed_params(x_14 : u32, x_15 : f32, x_16 : i32) {
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ Program program = p->program();
+ const auto program_ast = test::ToString(program);
+ EXPECT_THAT(program_ast, HasSubstr(R"(fn mixed_params(x_14 : u32, x_15 : f32, x_16 : i32) {
return;
}
)")) << program_ast;
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_get_decorations_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_get_decorations_test.cc
index 5f4f9714723..929ab1f2f7a 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_get_decorations_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_get_decorations_test.cc
@@ -27,151 +27,142 @@ using SpvParserGetDecorationsTest = SpvParserTest;
const char* kSkipReason = "This example is deliberately a SPIR-V fragment";
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_NotAnId) {
- auto p = parser(test::Assemble(""));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(42);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ auto p = parser(test::Assemble(""));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(42);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_NoDecorations) {
- auto p = parser(test::Assemble("%1 = OpTypeVoid"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(1);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ auto p = parser(test::Assemble("%1 = OpTypeVoid"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(1);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_OneDecoration) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpDecorate %10 Block
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(10);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationBlock}));
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(10);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationBlock}));
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_Duplicate) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpDecorate %10 Block
OpDecorate %10 Block
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(10);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationBlock}));
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(10);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationBlock}));
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_MultiDecoration) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpDecorate %5 RelaxedPrecision
OpDecorate %5 Location 7 ; Invalid case made up for test
%float = OpTypeFloat 32
%5 = OpConstant %float 3.14
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(5);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision},
- Decoration{SpvDecorationLocation, 7}));
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(5);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision},
+ Decoration{SpvDecorationLocation, 7}));
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_NotAnId) {
- auto p = parser(test::Assemble(""));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsForMember(42, 9);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ auto p = parser(test::Assemble(""));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsForMember(42, 9);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_NotAStruct) {
- auto p = parser(test::Assemble("%1 = OpTypeVoid"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(1);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ auto p = parser(test::Assemble("%1 = OpTypeVoid"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(1);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
-TEST_F(SpvParserGetDecorationsTest,
- GetDecorationsForMember_MemberWithoutDecoration) {
- auto p = parser(test::Assemble(R"(
+TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_MemberWithoutDecoration) {
+ auto p = parser(test::Assemble(R"(
%uint = OpTypeInt 32 0
%10 = OpTypeStruct %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsForMember(10, 0);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsForMember(10, 0);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_RelaxedPrecision) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %10 0 RelaxedPrecision
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- auto decorations = p->GetDecorationsForMember(10, 0);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ auto decorations = p->GetDecorationsForMember(10, 0);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_Duplicate) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %10 0 RelaxedPrecision
OpMemberDecorate %10 0 RelaxedPrecision
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- auto decorations = p->GetDecorationsForMember(10, 0);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ auto decorations = p->GetDecorationsForMember(10, 0);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
// TODO(dneto): Enable when ArrayStride is handled
-TEST_F(SpvParserGetDecorationsTest,
- DISABLED_GetDecorationsForMember_OneDecoration) {
- auto p = parser(test::Assemble(R"(
+TEST_F(SpvParserGetDecorationsTest, DISABLED_GetDecorationsForMember_OneDecoration) {
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %10 1 ArrayStride 12
%uint = OpTypeInt 32 0
%uint_2 = OpConstant %uint 2
%arr = OpTypeArray %uint %uint_2
%10 = OpTypeStruct %uint %arr
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- auto decorations = p->GetDecorationsForMember(10, 1);
- EXPECT_THAT(decorations,
- UnorderedElementsAre(Decoration{SpvDecorationArrayStride, 12}));
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ auto decorations = p->GetDecorationsForMember(10, 1);
+ EXPECT_THAT(decorations, UnorderedElementsAre(Decoration{SpvDecorationArrayStride, 12}));
+ EXPECT_TRUE(p->error().empty());
}
// TODO(dneto): Enable when ArrayStride, MatrixStride, ColMajor are handled
// crbug.com/tint/30 for ArrayStride
// crbug.com/tint/31 for matrix layout
-TEST_F(SpvParserGetDecorationsTest,
- DISABLED_GetDecorationsForMember_MultiDecoration) {
- auto p = parser(test::Assemble(R"(
+TEST_F(SpvParserGetDecorationsTest, DISABLED_GetDecorationsForMember_MultiDecoration) {
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %50 1 RelaxedPrecision
OpMemberDecorate %50 2 ArrayStride 16
OpMemberDecorate %50 2 MatrixStride 8
@@ -184,78 +175,78 @@ TEST_F(SpvParserGetDecorationsTest,
%arr = OpTypeArray %mat %uint_2
%50 = OpTypeStruct %uint %float %arr
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_TRUE(p->GetDecorationsForMember(50, 0).empty());
- EXPECT_THAT(p->GetDecorationsForMember(50, 1),
- UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
- EXPECT_THAT(p->GetDecorationsForMember(50, 2),
- UnorderedElementsAre(Decoration{SpvDecorationColMajor},
- Decoration{SpvDecorationMatrixStride, 8},
- Decoration{SpvDecorationArrayStride, 16}));
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->GetDecorationsForMember(50, 0).empty());
+ EXPECT_THAT(p->GetDecorationsForMember(50, 1),
+ UnorderedElementsAre(Decoration{SpvDecorationRelaxedPrecision}));
+ EXPECT_THAT(p->GetDecorationsForMember(50, 2),
+ UnorderedElementsAre(Decoration{SpvDecorationColMajor},
+ Decoration{SpvDecorationMatrixStride, 8},
+ Decoration{SpvDecorationArrayStride, 16}));
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_Restrict) {
- // RestrictPointer applies to a memory object declaration. Use a variable.
- auto p = parser(test::Assemble(R"(
+ // RestrictPointer applies to a memory object declaration. Use a variable.
+ auto p = parser(test::Assemble(R"(
OpDecorate %10 Restrict
%float = OpTypeFloat 32
%ptr = OpTypePointer Workgroup %float
%10 = OpVariable %ptr Workgroup
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsFor(10);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsFor(10);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_Restrict) {
- // Restrict applies to a memory object declaration.
- // But OpMemberDecorate can only be applied to a structure type.
- // Test the reader's ability to be resilient to more than what SPIR-V allows.
- auto p = parser(test::Assemble(R"(
+ // Restrict applies to a memory object declaration.
+ // But OpMemberDecorate can only be applied to a structure type.
+ // Test the reader's ability to be resilient to more than what SPIR-V allows.
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %10 0 Restrict
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- auto decorations = p->GetDecorationsForMember(10, 0);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ auto decorations = p->GetDecorationsForMember(10, 0);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsFor_RestrictPointer) {
- // RestrictPointer applies to a memory object declaration. Use a variable.
- auto p = parser(test::Assemble(R"(
+ // RestrictPointer applies to a memory object declaration. Use a variable.
+ auto p = parser(test::Assemble(R"(
OpDecorate %10 RestrictPointer
%float = OpTypeFloat 32
%ptr = OpTypePointer Workgroup %float
%10 = OpVariable %ptr Workgroup
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- auto decorations = p->GetDecorationsFor(10);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ auto decorations = p->GetDecorationsFor(10);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
TEST_F(SpvParserGetDecorationsTest, GetDecorationsForMember_RestrictPointer) {
- // RestrictPointer applies to a memory object declaration.
- // But OpMemberDecorate can only be applied to a structure type.
- // Test the reader's ability to be resilient to more than what SPIR-V allows.
- auto p = parser(test::Assemble(R"(
+ // RestrictPointer applies to a memory object declaration.
+ // But OpMemberDecorate can only be applied to a structure type.
+ // Test the reader's ability to be resilient to more than what SPIR-V allows.
+ auto p = parser(test::Assemble(R"(
OpMemberDecorate %10 0 RestrictPointer
%float = OpTypeFloat 32
%10 = OpTypeStruct %float
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- auto decorations = p->GetDecorationsFor(10);
- EXPECT_TRUE(decorations.empty());
- EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(kSkipReason);
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ auto decorations = p->GetDecorationsFor(10);
+ EXPECT_TRUE(decorations.empty());
+ EXPECT_TRUE(p->error().empty());
+ p->SkipDumpingPending(kSkipReason);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_handle_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_handle_test.cc
index d1938aa91db..84f3dcb3efa 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_handle_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_handle_test.cc
@@ -30,7 +30,7 @@ using ::testing::StartsWith;
using SpvParserHandleTest = SpvParserTest;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpCapability Sampled1D
OpCapability Image1D
@@ -41,14 +41,14 @@ std::string Preamble() {
}
std::string FragMain() {
- return R"(
+ return R"(
OpEntryPoint Fragment %main "main" ; assume no IO
OpExecutionMode %main OriginUpperLeft
)";
}
std::string MainBody() {
- return R"(
+ return R"(
%main = OpFunction %void None %voidfn
%main_entry = OpLabel
OpReturn
@@ -57,7 +57,7 @@ std::string MainBody() {
}
std::string CommonBasicTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -117,7 +117,7 @@ std::string CommonBasicTypes() {
}
std::string CommonImageTypes() {
- return R"(
+ return R"(
; Define types for all sampler and texture types that can map to WGSL,
; modulo texel formats for storage textures. For now, we limit
@@ -231,63 +231,58 @@ std::string CommonImageTypes() {
}
std::string CommonTypes() {
- return CommonBasicTypes() + CommonImageTypes();
+ return CommonBasicTypes() + CommonImageTypes();
}
std::string Bindings(std::vector<uint32_t> ids) {
- std::ostringstream os;
- int binding = 0;
- for (auto id : ids) {
- os << " OpDecorate %" << id << " DescriptorSet 0\n"
- << " OpDecorate %" << id << " Binding " << binding++ << "\n";
- }
- return os.str();
+ std::ostringstream os;
+ int binding = 0;
+ for (auto id : ids) {
+ os << " OpDecorate %" << id << " DescriptorSet 0\n"
+ << " OpDecorate %" << id << " Binding " << binding++ << "\n";
+ }
+ return os.str();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_WellFormedButNotAHandle) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_WellFormedButNotAHandle) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%10 = OpConstantNull %ptr_sampler
%20 = OpConstantNull %ptr_f_texture_1d
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule()) << assembly;
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule()) << assembly;
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
- EXPECT_EQ(sampler, nullptr);
- EXPECT_EQ(image, nullptr);
- EXPECT_TRUE(p->error().empty());
+ EXPECT_EQ(sampler, nullptr);
+ EXPECT_EQ(image, nullptr);
+ EXPECT_TRUE(p->error().empty());
- p->DeliberatelyInvalidSpirv(); // WGSL does not have null pointers.
+ p->DeliberatelyInvalidSpirv(); // WGSL does not have null pointers.
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_Direct) {
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_Direct) {
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%10 = OpVariable %ptr_sampler UniformConstant
%20 = OpVariable %ptr_f_texture_1d UniformConstant
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_AccessChain) {
- // Show that we would generalize to arrays of handles, even though that
- // is not supported in WGSL MVP.
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_AccessChain) {
+ // Show that we would generalize to arrays of handles, even though that
+ // is not supported in WGSL MVP.
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -307,26 +302,24 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // WGSL does not support arrays of textures and samplers.
- p->DeliberatelyInvalidSpirv();
+ // WGSL does not support arrays of textures and samplers.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_InBoundsAccessChain) {
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_InBoundsAccessChain) {
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -346,29 +339,28 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // WGSL does not support arrays of textures and samplers.
- p->DeliberatelyInvalidSpirv();
+ // WGSL does not support arrays of textures and samplers.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_PtrAccessChain) {
- // Show that we would generalize to arrays of handles, even though that
- // is not supported in WGSL MVP.
- // Use VariablePointers for the OpInBoundsPtrAccessChain.
- const auto assembly = "OpCapability VariablePointers " + Preamble() +
- FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_PtrAccessChain) {
+ // Show that we would generalize to arrays of handles, even though that
+ // is not supported in WGSL MVP.
+ // Use VariablePointers for the OpInBoundsPtrAccessChain.
+ const auto assembly = "OpCapability VariablePointers " + Preamble() + FragMain() +
+ Bindings({10, 20}) + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -388,27 +380,26 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // Variable pointers is not allowed for WGSL. So don't dump it.
- p->DeliberatelyInvalidSpirv();
+ // Variable pointers is not allowed for WGSL. So don't dump it.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_InBoundsPtrAccessChain) {
- // Use VariablePointers for the OpInBoundsPtrAccessChain.
- const auto assembly = "OpCapability VariablePointers " + Preamble() +
- FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_InBoundsPtrAccessChain) {
+ // Use VariablePointers for the OpInBoundsPtrAccessChain.
+ const auto assembly = "OpCapability VariablePointers " + Preamble() + FragMain() +
+ Bindings({10, 20}) + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -428,26 +419,24 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // Variable pointers is not allowed for WGSL. So don't dump it.
- p->DeliberatelyInvalidSpirv();
+ // Variable pointers is not allowed for WGSL. So don't dump it.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_CopyObject) {
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_CopyObject) {
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%10 = OpVariable %ptr_sampler UniformConstant
%20 = OpVariable %ptr_f_texture_1d UniformConstant
@@ -461,22 +450,21 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
}
TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_Load) {
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%10 = OpVariable %ptr_sampler UniformConstant
%20 = OpVariable %ptr_f_texture_1d UniformConstant
@@ -490,25 +478,23 @@ TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_Load) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_SampledImage) {
- // Trace through the sampled image instruction, but in two different
- // directions.
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_SampledImage) {
+ // Trace through the sampled image instruction, but in two different
+ // directions.
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%sampled_image_type = OpTypeSampledImage %f_texture_1d
%10 = OpVariable %ptr_sampler UniformConstant
@@ -524,23 +510,21 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(100, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(100, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(100, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(100, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_Variable_Image) {
- const auto assembly =
- Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_Variable_Image) {
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%sampled_image_type = OpTypeSampledImage %f_texture_1d
%10 = OpVariable %ptr_sampler UniformConstant
@@ -557,18 +541,17 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
- const auto* image = p->GetMemoryObjectDeclarationForHandle(200, true);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(200, true);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_Direct) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_Direct) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%fty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_1d
%func = OpFunction %void None %fty
@@ -578,26 +561,25 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(10, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(20, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- p->SkipDumpingPending("crbug.com/tint/1039");
+ p->SkipDumpingPending("crbug.com/tint/1039");
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_AccessChain) {
- // Show that we would generalize to arrays of handles, even though that
- // is not supported in WGSL MVP.
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_AccessChain) {
+ // Show that we would generalize to arrays of handles, even though that
+ // is not supported in WGSL MVP.
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -617,25 +599,24 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // WGSL does not support arrays of textures or samplers
- p->DeliberatelyInvalidSpirv();
+ // WGSL does not support arrays of textures or samplers
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_InBoundsAccessChain) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_InBoundsAccessChain) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -655,27 +636,26 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // WGSL does not support arrays of textures or samplers
- p->DeliberatelyInvalidSpirv();
+ // WGSL does not support arrays of textures or samplers
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_PtrAccessChain) {
- // Show that we would generalize to arrays of handles, even though that
- // is not supported in WGSL MVP.
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_PtrAccessChain) {
+ // Show that we would generalize to arrays of handles, even though that
+ // is not supported in WGSL MVP.
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -695,25 +675,24 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // Variable pointers is not allowed for WGSL. So don't dump it.
- p->DeliberatelyInvalidSpirv();
+ // Variable pointers is not allowed for WGSL. So don't dump it.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_InBoundsPtrAccessChain) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_InBoundsPtrAccessChain) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampler_array = OpTypeArray %sampler %uint_100
%image_array = OpTypeArray %f_texture_1d %uint_100
@@ -733,25 +712,24 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- // Variable pointers is not allowed for WGSL. So don't dump it.
- p->DeliberatelyInvalidSpirv();
+ // Variable pointers is not allowed for WGSL. So don't dump it.
+ p->DeliberatelyInvalidSpirv();
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_CopyObject) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_CopyObject) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%fty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_1d
%func = OpFunction %void None %fty
@@ -765,24 +743,23 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- p->SkipDumpingPending("crbug.com/tint/1039");
+ p->SkipDumpingPending("crbug.com/tint/1039");
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_Load) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_Load) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%fty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_1d
%func = OpFunction %void None %fty
@@ -796,26 +773,25 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(110, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(120, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- p->SkipDumpingPending("crbug.com/tint/1039");
+ p->SkipDumpingPending("crbug.com/tint/1039");
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_SampledImage) {
- // Trace through the sampled image instruction, but in two different
- // directions.
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_SampledImage) {
+ // Trace through the sampled image instruction, but in two different
+ // directions.
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampled_image_type = OpTypeSampledImage %f_texture_1d
%fty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_1d
@@ -832,24 +808,23 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto* sampler = p->GetMemoryObjectDeclarationForHandle(100, false);
- const auto* image = p->GetMemoryObjectDeclarationForHandle(100, true);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto* sampler = p->GetMemoryObjectDeclarationForHandle(100, false);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(100, true);
- ASSERT_TRUE(sampler != nullptr);
- EXPECT_EQ(sampler->result_id(), 10u);
+ ASSERT_TRUE(sampler != nullptr);
+ EXPECT_EQ(sampler->result_id(), 10u);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- p->SkipDumpingPending("crbug.com/tint/1039");
+ p->SkipDumpingPending("crbug.com/tint/1039");
}
-TEST_F(SpvParserHandleTest,
- GetMemoryObjectDeclarationForHandle_FuncParam_Image) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvParserHandleTest, GetMemoryObjectDeclarationForHandle_FuncParam_Image) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%sampled_image_type = OpTypeSampledImage %f_texture_1d
%fty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_1d
@@ -867,38 +842,36 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->error().empty());
- const auto* image = p->GetMemoryObjectDeclarationForHandle(200, true);
- ASSERT_TRUE(image != nullptr);
- EXPECT_EQ(image->result_id(), 20u);
+ const auto* image = p->GetMemoryObjectDeclarationForHandle(200, true);
+ ASSERT_TRUE(image != nullptr);
+ EXPECT_EQ(image->result_id(), 20u);
- p->SkipDumpingPending("crbug.com/tint/1039");
+ p->SkipDumpingPending("crbug.com/tint/1039");
}
// Test RegisterHandleUsage, sampled image cases
struct UsageImageAccessCase {
- std::string inst;
- std::string expected_sampler_usage;
- std::string expected_image_usage;
+ std::string inst;
+ std::string expected_sampler_usage;
+ std::string expected_image_usage;
};
-inline std::ostream& operator<<(std::ostream& out,
- const UsageImageAccessCase& c) {
- out << "UsageImageAccessCase(" << c.inst << ", " << c.expected_sampler_usage
- << ", " << c.expected_image_usage << ")";
- return out;
+inline std::ostream& operator<<(std::ostream& out, const UsageImageAccessCase& c) {
+ out << "UsageImageAccessCase(" << c.inst << ", " << c.expected_sampler_usage << ", "
+ << c.expected_image_usage << ")";
+ return out;
}
using SpvParserHandleTest_RegisterHandleUsage_SampledImage =
SpvParserTestBase<::testing::TestWithParam<UsageImageAccessCase>>;
TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage, Variable) {
- const std::string inst = GetParam().inst;
- const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) +
- CommonTypes() + R"(
+ const std::string inst = GetParam().inst;
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%si_ty = OpTypeSampledImage %f_texture_2d
%coords = OpConstantNull %v2float
%coords3d = OpConstantNull %v3float ; needed for Proj variants
@@ -917,30 +890,29 @@ TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage, Variable) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterHandleUsage());
- EXPECT_TRUE(p->error().empty());
- Usage su = p->GetHandleUsage(10);
- Usage iu = p->GetHandleUsage(20);
-
- EXPECT_THAT(su.to_str(), Eq(GetParam().expected_sampler_usage));
- EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
-
- if (inst.find("ImageQueryLod") != std::string::npos) {
- // WGSL does not support querying image level of detail.
- // So don't emit them as part of a "passing" corpus.
- p->DeliberatelyInvalidSpirv();
- }
- if (inst.find("ImageSampleDrefExplicitLod") != std::string::npos) {
- p->SkipDumpingPending("crbug.com/tint/425"); // gpuweb issue #1319
- }
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterHandleUsage());
+ EXPECT_TRUE(p->error().empty());
+ Usage su = p->GetHandleUsage(10);
+ Usage iu = p->GetHandleUsage(20);
+
+ EXPECT_THAT(su.to_str(), Eq(GetParam().expected_sampler_usage));
+ EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
+
+ if (inst.find("ImageQueryLod") != std::string::npos) {
+ // WGSL does not support querying image level of detail.
+ // So don't emit them as part of a "passing" corpus.
+ p->DeliberatelyInvalidSpirv();
+ }
+ if (inst.find("ImageSampleDrefExplicitLod") != std::string::npos) {
+ p->SkipDumpingPending("crbug.com/tint/425"); // gpuweb issue #1319
+ }
}
TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage, FunctionParam) {
- const std::string inst = GetParam().inst;
- const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) +
- CommonTypes() + R"(
+ const std::string inst = GetParam().inst;
+ const auto assembly = Preamble() + FragMain() + Bindings({10, 20}) + CommonTypes() + R"(
%f_ty = OpTypeFunction %void %ptr_sampler %ptr_f_texture_2d
%si_ty = OpTypeSampledImage %f_texture_2d
%coords = OpConstantNull %v2float
@@ -969,22 +941,22 @@ TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage, FunctionParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule()) << p->error() << assembly << std::endl;
- EXPECT_TRUE(p->RegisterHandleUsage()) << p->error() << assembly << std::endl;
- EXPECT_TRUE(p->error().empty()) << p->error() << assembly << std::endl;
- Usage su = p->GetHandleUsage(10);
- Usage iu = p->GetHandleUsage(20);
-
- EXPECT_THAT(su.to_str(), Eq(GetParam().expected_sampler_usage));
- EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
-
- if (inst.find("ImageQueryLod") != std::string::npos) {
- // WGSL does not support querying image level of detail.
- // So don't emit them as part of a "passing" corpus.
- p->DeliberatelyInvalidSpirv();
- }
- p->SkipDumpingPending("crbug.com/tint/785");
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule()) << p->error() << assembly << std::endl;
+ EXPECT_TRUE(p->RegisterHandleUsage()) << p->error() << assembly << std::endl;
+ EXPECT_TRUE(p->error().empty()) << p->error() << assembly << std::endl;
+ Usage su = p->GetHandleUsage(10);
+ Usage iu = p->GetHandleUsage(20);
+
+ EXPECT_THAT(su.to_str(), Eq(GetParam().expected_sampler_usage));
+ EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
+
+ if (inst.find("ImageQueryLod") != std::string::npos) {
+ // WGSL does not support querying image level of detail.
+ // So don't emit them as part of a "passing" corpus.
+ p->DeliberatelyInvalidSpirv();
+ }
+ p->SkipDumpingPending("crbug.com/tint/785");
}
INSTANTIATE_TEST_SUITE_P(
@@ -995,37 +967,30 @@ INSTANTIATE_TEST_SUITE_P(
// OpImageGather
UsageImageAccessCase{"%result = OpImageGather "
"%v4float %sampled_image %coords %uint_1",
- "Usage(Sampler( ))",
- "Usage(Texture( is_sampled ))"},
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
// OpImageDrefGather
UsageImageAccessCase{"%result = OpImageDrefGather "
"%v4float %sampled_image %coords %depth",
- "Usage(Sampler( comparison ))",
- "Usage(Texture( is_sampled depth ))"},
+ "Usage(Sampler( comparison ))", "Usage(Texture( is_sampled depth ))"},
// Sample the texture.
// OpImageSampleImplicitLod
UsageImageAccessCase{"%result = OpImageSampleImplicitLod "
"%v4float %sampled_image %coords",
- "Usage(Sampler( ))",
- "Usage(Texture( is_sampled ))"},
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
// OpImageSampleExplicitLod
UsageImageAccessCase{"%result = OpImageSampleExplicitLod "
"%v4float %sampled_image %coords Lod %float_null",
- "Usage(Sampler( ))",
- "Usage(Texture( is_sampled ))"},
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
// OpImageSampleDrefImplicitLod
UsageImageAccessCase{"%result = OpImageSampleDrefImplicitLod "
"%float %sampled_image %coords %depth",
- "Usage(Sampler( comparison ))",
- "Usage(Texture( is_sampled depth ))"},
+ "Usage(Sampler( comparison ))", "Usage(Texture( is_sampled depth ))"},
// OpImageSampleDrefExplicitLod
- UsageImageAccessCase{
- "%result = OpImageSampleDrefExplicitLod "
- "%float %sampled_image %coords %depth Lod %float_null",
- "Usage(Sampler( comparison ))",
- "Usage(Texture( is_sampled depth ))"},
+ UsageImageAccessCase{"%result = OpImageSampleDrefExplicitLod "
+ "%float %sampled_image %coords %depth Lod %float_null",
+ "Usage(Sampler( comparison ))", "Usage(Texture( is_sampled depth ))"},
// Sample the texture, with *Proj* variants, even though WGSL doesn't
// support them.
@@ -1033,102 +998,94 @@ INSTANTIATE_TEST_SUITE_P(
// OpImageSampleProjImplicitLod
UsageImageAccessCase{"%result = OpImageSampleProjImplicitLod "
"%v4float %sampled_image %coords3d",
- "Usage(Sampler( ))",
- "Usage(Texture( is_sampled ))"},
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
// OpImageSampleProjExplicitLod
- UsageImageAccessCase{
- "%result = OpImageSampleProjExplicitLod "
- "%v4float %sampled_image %coords3d Lod %float_null",
- "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
+ UsageImageAccessCase{"%result = OpImageSampleProjExplicitLod "
+ "%v4float %sampled_image %coords3d Lod %float_null",
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"},
// OpImageSampleProjDrefImplicitLod
UsageImageAccessCase{"%result = OpImageSampleProjDrefImplicitLod "
"%float %sampled_image %coords3d %depth",
- "Usage(Sampler( comparison ))",
- "Usage(Texture( is_sampled depth ))"},
+ "Usage(Sampler( comparison ))", "Usage(Texture( is_sampled depth ))"},
// OpImageSampleProjDrefExplicitLod
- UsageImageAccessCase{
- "%result = OpImageSampleProjDrefExplicitLod "
- "%float %sampled_image %coords3d %depth Lod %float_null",
- "Usage(Sampler( comparison ))",
- "Usage(Texture( is_sampled depth ))"},
+ UsageImageAccessCase{"%result = OpImageSampleProjDrefExplicitLod "
+ "%float %sampled_image %coords3d %depth Lod %float_null",
+ "Usage(Sampler( comparison ))", "Usage(Texture( is_sampled depth ))"},
// OpImageQueryLod
- UsageImageAccessCase{
- "%result = OpImageQueryLod %v2float %sampled_image %coords",
- "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"}));
+ UsageImageAccessCase{"%result = OpImageQueryLod %v2float %sampled_image %coords",
+ "Usage(Sampler( ))", "Usage(Texture( is_sampled ))"}));
// Test RegisterHandleUsage, raw image cases.
// For these we test the use of an image value directly, and not combined
// with the sampler. The image still could be of sampled image type.
struct UsageRawImageCase {
- std::string type; // Example: f_storage_1d or f_texture_1d
- std::string inst;
- std::string expected_image_usage;
+ std::string type; // Example: f_storage_1d or f_texture_1d
+ std::string inst;
+ std::string expected_image_usage;
};
inline std::ostream& operator<<(std::ostream& out, const UsageRawImageCase& c) {
- out << "UsageRawImageCase(" << c.type << ", " << c.inst << ", "
- << c.expected_image_usage << ")";
- return out;
+ out << "UsageRawImageCase(" << c.type << ", " << c.inst << ", " << c.expected_image_usage
+ << ")";
+ return out;
}
using SpvParserHandleTest_RegisterHandleUsage_RawImage =
SpvParserTestBase<::testing::TestWithParam<UsageRawImageCase>>;
TEST_P(SpvParserHandleTest_RegisterHandleUsage_RawImage, Variable) {
- const bool is_storage = GetParam().type.find("storage") != std::string::npos;
- const bool is_write = GetParam().inst.find("ImageWrite") != std::string::npos;
- const auto assembly = Preamble() + FragMain() + Bindings({20}) +
- (is_storage ? std::string("OpDecorate %20 ") +
- std::string(is_write ? "NonReadable"
- : "NonWritable")
- : std::string("")) +
- " " + CommonTypes() + R"(
+ const bool is_storage = GetParam().type.find("storage") != std::string::npos;
+ const bool is_write = GetParam().inst.find("ImageWrite") != std::string::npos;
+ const auto assembly = Preamble() + FragMain() + Bindings({20}) +
+ (is_storage ? std::string("OpDecorate %20 ") +
+ std::string(is_write ? "NonReadable" : "NonWritable")
+ : std::string("")) +
+ " " + CommonTypes() + R"(
%20 = OpVariable %ptr_)" +
- GetParam().type + R"( UniformConstant
+ GetParam().type + R"( UniformConstant
%main = OpFunction %void None %voidfn
%entry = OpLabel
%im = OpLoad %)" + GetParam().type +
- R"( %20
+ R"( %20
)" + GetParam().inst + R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterHandleUsage());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterHandleUsage());
+ EXPECT_TRUE(p->error().empty());
- Usage iu = p->GetHandleUsage(20);
- EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
+ Usage iu = p->GetHandleUsage(20);
+ EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
- Usage su = p->GetHandleUsage(20);
+ Usage su = p->GetHandleUsage(20);
}
TEST_P(SpvParserHandleTest_RegisterHandleUsage_RawImage, FunctionParam) {
- const bool is_storage = GetParam().type.find("storage") != std::string::npos;
- const bool is_write = GetParam().inst.find("ImageWrite") != std::string::npos;
- const auto assembly = Preamble() + FragMain() + Bindings({20}) +
- (is_storage ? std::string("OpDecorate %20 ") +
- std::string(is_write ? "NonReadable"
- : "NonWritable")
- : std::string("")) +
- " " + CommonTypes() + R"(
+ const bool is_storage = GetParam().type.find("storage") != std::string::npos;
+ const bool is_write = GetParam().inst.find("ImageWrite") != std::string::npos;
+ const auto assembly = Preamble() + FragMain() + Bindings({20}) +
+ (is_storage ? std::string("OpDecorate %20 ") +
+ std::string(is_write ? "NonReadable" : "NonWritable")
+ : std::string("")) +
+ " " + CommonTypes() + R"(
%f_ty = OpTypeFunction %void %ptr_)" +
- GetParam().type + R"(
+ GetParam().type + R"(
%20 = OpVariable %ptr_)" +
- GetParam().type + R"( UniformConstant
+ GetParam().type + R"( UniformConstant
%func = OpFunction %void None %f_ty
%i_param = OpFunctionParameter %ptr_)" +
- GetParam().type + R"(
+ GetParam().type + R"(
%func_entry = OpLabel
%im = OpLoad %)" + GetParam().type +
- R"( %i_param
+ R"( %i_param
)" + GetParam().inst + R"(
@@ -1141,16 +1098,16 @@ TEST_P(SpvParserHandleTest_RegisterHandleUsage_RawImage, FunctionParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildInternalModule());
- EXPECT_TRUE(p->RegisterHandleUsage());
- EXPECT_TRUE(p->error().empty());
- Usage iu = p->GetHandleUsage(20);
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildInternalModule());
+ EXPECT_TRUE(p->RegisterHandleUsage());
+ EXPECT_TRUE(p->error().empty());
+ Usage iu = p->GetHandleUsage(20);
- EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
+ EXPECT_THAT(iu.to_str(), Eq(GetParam().expected_image_usage));
- // Textures and samplers not yet supported as function parameters.
- p->SkipDumpingPending("crbug.com/tint/785");
+ // Textures and samplers not yet supported as function parameters.
+ p->SkipDumpingPending("crbug.com/tint/785");
}
INSTANTIATE_TEST_SUITE_P(
@@ -1159,13 +1116,11 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// OpImageRead
- UsageRawImageCase{"f_storage_1d",
- "%result = OpImageRead %v4float %im %uint_1",
+ UsageRawImageCase{"f_storage_1d", "%result = OpImageRead %v4float %im %uint_1",
"Usage(Texture( read ))"},
// OpImageWrite
- UsageRawImageCase{"f_storage_1d",
- "OpImageWrite %im %uint_1 %v4float_null",
+ UsageRawImageCase{"f_storage_1d", "OpImageWrite %im %uint_1 %v4float_null",
"Usage(Texture( write ))"},
// OpImageFetch
@@ -1207,63 +1162,60 @@ INSTANTIATE_TEST_SUITE_P(
// use in image access instructions in executable code. For these we have
// to infer usage from the SPIR-V sampler or image type.
struct DeclUnderspecifiedHandleCase {
- std::string decorations; // SPIR-V decorations
- std::string inst; // SPIR-V variable declarations
- std::string var_decl; // WGSL variable declaration
+ std::string decorations; // SPIR-V decorations
+ std::string inst; // SPIR-V variable declarations
+ std::string var_decl; // WGSL variable declaration
};
-inline std::ostream& operator<<(std::ostream& out,
- const DeclUnderspecifiedHandleCase& c) {
- out << "DeclUnderspecifiedHandleCase(" << c.inst << "\n" << c.var_decl << ")";
- return out;
+inline std::ostream& operator<<(std::ostream& out, const DeclUnderspecifiedHandleCase& c) {
+ out << "DeclUnderspecifiedHandleCase(" << c.inst << "\n" << c.var_decl << ")";
+ return out;
}
using SpvParserHandleTest_DeclUnderspecifiedHandle =
SpvParserTestBase<::testing::TestWithParam<DeclUnderspecifiedHandleCase>>;
TEST_P(SpvParserHandleTest_DeclUnderspecifiedHandle, Variable) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
OpDecorate %10 DescriptorSet 0
OpDecorate %10 Binding 0
)" + GetParam().decorations +
- CommonTypes() + GetParam().inst +
- R"(
+ CommonTypes() + GetParam().inst +
+ R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty()) << p->error();
- const auto program = test::ToString(p->program());
- EXPECT_THAT(program, HasSubstr(GetParam().var_decl)) << program;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ const auto program = test::ToString(p->program());
+ EXPECT_THAT(program, HasSubstr(GetParam().var_decl)) << program;
}
-INSTANTIATE_TEST_SUITE_P(
- Samplers,
- SpvParserHandleTest_DeclUnderspecifiedHandle,
- ::testing::Values(
+INSTANTIATE_TEST_SUITE_P(Samplers,
+ SpvParserHandleTest_DeclUnderspecifiedHandle,
+ ::testing::Values(
- DeclUnderspecifiedHandleCase{
- "", R"(
+ DeclUnderspecifiedHandleCase{
+ "", R"(
%ptr = OpTypePointer UniformConstant %sampler
%10 = OpVariable %ptr UniformConstant
)",
- R"(@group(0) @binding(0) var x_10 : sampler;)"}));
+ R"(@group(0) @binding(0) var x_10 : sampler;)"}));
INSTANTIATE_TEST_SUITE_P(
Images,
SpvParserHandleTest_DeclUnderspecifiedHandle,
::testing::Values(
- DeclUnderspecifiedHandleCase{
- "", R"(
+ DeclUnderspecifiedHandleCase{"", R"(
%10 = OpVariable %ptr_f_texture_1d UniformConstant
)",
- R"(@group(0) @binding(0) var x_10 : texture_1d<f32>;)"},
+ R"(@group(0) @binding(0) var x_10 : texture_1d<f32>;)"},
DeclUnderspecifiedHandleCase{
R"(
OpDecorate %10 NonWritable
@@ -1284,31 +1236,30 @@ INSTANTIATE_TEST_SUITE_P(
// Test handle declaration or error, when there is an image access.
struct ImageDeclCase {
- // SPIR-V image type, excluding result ID and opcode
- std::string spirv_image_type_details;
- std::string spirv_image_access; // Optional instruction to provoke use
- std::string expected_error;
- std::string expected_decl;
+ // SPIR-V image type, excluding result ID and opcode
+ std::string spirv_image_type_details;
+ std::string spirv_image_access; // Optional instruction to provoke use
+ std::string expected_error;
+ std::string expected_decl;
};
inline std::ostream& operator<<(std::ostream& out, const ImageDeclCase& c) {
- out << "ImageDeclCase(" << c.spirv_image_type_details << "\n"
- << "access: " << c.spirv_image_access << "\n"
- << "error: " << c.expected_error << "\n"
- << "decl:" << c.expected_decl << "\n)";
- return out;
+ out << "ImageDeclCase(" << c.spirv_image_type_details << "\n"
+ << "access: " << c.spirv_image_access << "\n"
+ << "error: " << c.expected_error << "\n"
+ << "decl:" << c.expected_decl << "\n)";
+ return out;
}
using SpvParserHandleTest_ImageDeclTest =
SpvParserTestBase<::testing::TestWithParam<ImageDeclCase>>;
TEST_P(SpvParserHandleTest_ImageDeclTest, DeclareAndUseHandle) {
- // Only declare the sampled image type, and the associated variable
- // if the requested image type is a sampled image type and not multisampled.
- const bool is_sampled_image_type = GetParam().spirv_image_type_details.find(
- "0 1 Unknown") != std::string::npos;
- const auto assembly =
- Preamble() + R"(
+ // Only declare the sampled image type, and the associated variable
+ // if the requested image type is a sampled image type and not multisampled.
+ const bool is_sampled_image_type =
+ GetParam().spirv_image_type_details.find("0 1 Unknown") != std::string::npos;
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %100 "main"
OpExecutionMode %100 OriginUpperLeft
OpName %float_var "float_var"
@@ -1332,14 +1283,14 @@ TEST_P(SpvParserHandleTest_ImageDeclTest, DeclareAndUseHandle) {
OpDecorate %30 DescriptorSet 0
OpDecorate %30 Binding 1
)" + CommonBasicTypes() +
- R"(
+ R"(
%sampler = OpTypeSampler
%ptr_sampler = OpTypePointer UniformConstant %sampler
%im_ty = OpTypeImage )" +
- GetParam().spirv_image_type_details + R"(
+ GetParam().spirv_image_type_details + R"(
%ptr_im_ty = OpTypePointer UniformConstant %im_ty
)" + (is_sampled_image_type ? " %si_ty = OpTypeSampledImage %im_ty " : "") +
- R"(
+ R"(
%ptr_float = OpTypePointer Function %float
@@ -1370,28 +1321,25 @@ TEST_P(SpvParserHandleTest_ImageDeclTest, DeclareAndUseHandle) {
%sam = OpLoad %sampler %10
%im = OpLoad %im_ty %20
-)" +
- (is_sampled_image_type
- ? " %sampled_image = OpSampledImage %si_ty %im %sam "
- : "") +
- GetParam().spirv_image_access +
- R"(
+)" + (is_sampled_image_type ? " %sampled_image = OpSampledImage %si_ty %im %sam " : "") +
+ GetParam().spirv_image_access +
+ R"(
; Use an anchor for the cases when the image access doesn't have a result ID.
%1000 = OpCopyObject %uint %uint_0
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- const bool succeeded = p->BuildAndParseInternalModule();
- if (succeeded) {
- EXPECT_TRUE(GetParam().expected_error.empty());
- const auto got = test::ToString(p->program());
- EXPECT_THAT(got, HasSubstr(GetParam().expected_decl));
- } else {
- EXPECT_FALSE(GetParam().expected_error.empty());
- EXPECT_THAT(p->error(), HasSubstr(GetParam().expected_error));
- }
+ auto p = parser(test::Assemble(assembly));
+ const bool succeeded = p->BuildAndParseInternalModule();
+ if (succeeded) {
+ EXPECT_TRUE(GetParam().expected_error.empty());
+ const auto got = test::ToString(p->program());
+ EXPECT_THAT(got, HasSubstr(GetParam().expected_decl));
+ } else {
+ EXPECT_FALSE(GetParam().expected_error.empty());
+ EXPECT_THAT(p->error(), HasSubstr(GetParam().expected_error));
+ }
}
INSTANTIATE_TEST_SUITE_P(
@@ -1402,49 +1350,46 @@ INSTANTIATE_TEST_SUITE_P(
"WGSL multisampled textures must be 2d and non-arrayed: ", ""},
{"%float 1D 0 1 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL arrayed textures must be 2d_array or cube_array: ", ""},
- {"%float 2D 0 0 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
- "", "@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;"},
+ {"%float 2D 0 0 1 1 Unknown", "%result = OpImageQuerySamples %uint %im", "",
+ "@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;"},
{"%float 2D 0 1 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL multisampled textures must be 2d and non-arrayed: ", ""},
{"%float 3D 0 0 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL multisampled textures must be 2d and non-arrayed: ", ""},
{"%float 3D 0 1 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL arrayed textures must be 2d_array or cube_array: ", ""},
- {"%float Cube 0 0 1 1 Unknown",
- "%result = OpImageQuerySamples %uint %im",
+ {"%float Cube 0 0 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL multisampled textures must be 2d and non-arrayed: ", ""},
- {"%float Cube 0 1 1 1 Unknown",
- "%result = OpImageQuerySamples %uint %im",
+ {"%float Cube 0 1 1 1 Unknown", "%result = OpImageQuerySamples %uint %im",
"WGSL multisampled textures must be 2d and non-arrayed: ", ""}}));
// Test emission of variables when we have image accesses in executable code.
struct ImageAccessCase {
- // SPIR-V image type, excluding result ID and opcode
- std::string spirv_image_type_details;
- std::string spirv_image_access; // The provoking image access instruction.
- std::string var_decl; // WGSL variable declaration
- std::string texture_builtin; // WGSL texture usage.
+ // SPIR-V image type, excluding result ID and opcode
+ std::string spirv_image_type_details;
+ std::string spirv_image_access; // The provoking image access instruction.
+ std::string var_decl; // WGSL variable declaration
+ std::string texture_builtin; // WGSL texture usage.
};
inline std::ostream& operator<<(std::ostream& out, const ImageAccessCase& c) {
- out << "ImageCase(" << c.spirv_image_type_details << "\n"
- << c.spirv_image_access << "\n"
- << c.var_decl << "\n"
- << c.texture_builtin << ")";
- return out;
+ out << "ImageCase(" << c.spirv_image_type_details << "\n"
+ << c.spirv_image_access << "\n"
+ << c.var_decl << "\n"
+ << c.texture_builtin << ")";
+ return out;
}
using SpvParserHandleTest_SampledImageAccessTest =
SpvParserTestBase<::testing::TestWithParam<ImageAccessCase>>;
TEST_P(SpvParserHandleTest_SampledImageAccessTest, Variable) {
- // Only declare the sampled image type, and the associated variable
- // if the requested image type is a sampled image type, and not a
- // multisampled texture
- const bool is_sampled_image_type = GetParam().spirv_image_type_details.find(
- "0 1 Unknown") != std::string::npos;
- const auto assembly =
- Preamble() + R"(
+ // Only declare the sampled image type, and the associated variable
+ // if the requested image type is a sampled image type, and not a
+ // multisampled texture
+ const bool is_sampled_image_type =
+ GetParam().spirv_image_type_details.find("0 1 Unknown") != std::string::npos;
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
OpName %f1 "f1"
@@ -1473,14 +1418,14 @@ TEST_P(SpvParserHandleTest_SampledImageAccessTest, Variable) {
OpDecorate %30 DescriptorSet 0
OpDecorate %30 Binding 1
)" + CommonBasicTypes() +
- R"(
+ R"(
%sampler = OpTypeSampler
%ptr_sampler = OpTypePointer UniformConstant %sampler
%im_ty = OpTypeImage )" +
- GetParam().spirv_image_type_details + R"(
+ GetParam().spirv_image_type_details + R"(
%ptr_im_ty = OpTypePointer UniformConstant %im_ty
)" + (is_sampled_image_type ? " %si_ty = OpTypeSampledImage %im_ty " : "") +
- R"(
+ R"(
%10 = OpVariable %ptr_sampler UniformConstant
%20 = OpVariable %ptr_im_ty UniformConstant
@@ -1516,38 +1461,32 @@ TEST_P(SpvParserHandleTest_SampledImageAccessTest, Variable) {
%sam = OpLoad %sampler %10
%im = OpLoad %im_ty %20
-)" +
- (is_sampled_image_type
- ? " %sampled_image = OpSampledImage %si_ty %im %sam\n"
- : "") +
- GetParam().spirv_image_access +
- R"(
+)" + (is_sampled_image_type ? " %sampled_image = OpSampledImage %si_ty %im %sam\n" : "") +
+ GetParam().spirv_image_access +
+ R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty()) << p->error();
- const auto program = test::ToString(p->program());
- EXPECT_THAT(program, HasSubstr(GetParam().var_decl))
- << "DECLARATIONS ARE BAD " << program;
- EXPECT_THAT(program, HasSubstr(GetParam().texture_builtin))
- << "TEXTURE BUILTIN IS BAD " << program << assembly;
-
- const bool is_query_size =
- GetParam().spirv_image_access.find("ImageQuerySize") != std::string::npos;
- const bool is_1d =
- GetParam().spirv_image_type_details.find("1D") != std::string::npos;
- if (is_query_size && is_1d) {
- p->SkipDumpingPending("crbug.com/tint/788");
- }
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ const auto program = test::ToString(p->program());
+ EXPECT_THAT(program, HasSubstr(GetParam().var_decl)) << "DECLARATIONS ARE BAD " << program;
+ EXPECT_THAT(program, HasSubstr(GetParam().texture_builtin))
+ << "TEXTURE BUILTIN IS BAD " << program << assembly;
+
+ const bool is_query_size =
+ GetParam().spirv_image_access.find("ImageQuerySize") != std::string::npos;
+ const bool is_1d = GetParam().spirv_image_type_details.find("1D") != std::string::npos;
+ if (is_query_size && is_1d) {
+ p->SkipDumpingPending("crbug.com/tint/788");
+ }
}
// TODO(dneto): Test variable declaration and texture builtins provoked by
// use of an image access instruction inside helper function.
-TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage,
- DISABLED_FunctionParam) {}
+TEST_P(SpvParserHandleTest_RegisterHandleUsage_SampledImage, DISABLED_FunctionParam) {}
INSTANTIATE_TEST_SUITE_P(
ImageGather,
@@ -1560,16 +1499,15 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- "textureGather(1, x_20, x_10, coords12)"},
+ "textureGather(1i, x_20, x_10, coords12)"},
// OpImageGather 2D ConstOffset signed
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageGather "
- "%v4float %sampled_image %coords12 %int_1 ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageGather "
+ "%v4float %sampled_image %coords12 %int_1 ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- "textureGather(1, x_20, x_10, coords12, vec2<i32>(3, 4))"},
+ "textureGather(1i, x_20, x_10, coords12, vec2<i32>(3i, 4i))"},
// OpImageGather 2D ConstOffset unsigned
ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
"%result = OpImageGather "
@@ -1578,7 +1516,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- "textureGather(1, x_20, x_10, coords12, "
+ "textureGather(1i, x_20, x_10, coords12, "
"vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageGather 2D Array
ImageAccessCase{"%float 2D 0 1 0 1 Unknown",
@@ -1587,18 +1525,17 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- "textureGather(1, x_20, x_10, coords123.xy, "
+ "textureGather(1i, x_20, x_10, coords123.xy, "
"i32(round(coords123.z)))"},
// OpImageGather 2D Array ConstOffset signed
- ImageAccessCase{
- "%float 2D 0 1 0 1 Unknown",
- "%result = OpImageGather "
- "%v4float %sampled_image %coords123 %int_1 ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageGather "
+ "%v4float %sampled_image %coords123 %int_1 ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- "textureGather(1, x_20, x_10, coords123.xy, "
- "i32(round(coords123.z)), vec2<i32>(3, 4))"},
+ "textureGather(1i, x_20, x_10, coords123.xy, "
+ "i32(round(coords123.z)), vec2<i32>(3i, 4i))"},
// OpImageGather 2D Array ConstOffset unsigned
ImageAccessCase{"%float 2D 0 1 0 1 Unknown",
"%result = OpImageGather "
@@ -1607,7 +1544,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- "textureGather(1, x_20, x_10, coords123.xy, "
+ "textureGather(1i, x_20, x_10, coords123.xy, "
"i32(round(coords123.z)), "
"vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageGather Cube
@@ -1617,7 +1554,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_cube<f32>;)",
- "textureGather(1, x_20, x_10, coords123)"},
+ "textureGather(1i, x_20, x_10, coords123)"},
// OpImageGather Cube Array
ImageAccessCase{"%float Cube 0 1 0 1 Unknown",
"%result = OpImageGather "
@@ -1625,7 +1562,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_cube_array<f32>;)",
- "textureGather(1, x_20, x_10, coords1234.xyz, "
+ "textureGather(1i, x_20, x_10, coords1234.xyz, "
"i32(round(coords1234.w)))"},
// OpImageGather 2DDepth
ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
@@ -1636,14 +1573,13 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;)",
"textureGather(x_20, x_10, coords12)"},
// OpImageGather 2DDepth ConstOffset signed
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageGather "
- "%v4float %sampled_image %coords12 %int_1 ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageGather "
+ "%v4float %sampled_image %coords12 %int_1 ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- "textureGather(x_20, x_10, coords12, vec2<i32>(3, 4))"},
+ "textureGather(x_20, x_10, coords12, vec2<i32>(3i, 4i))"},
// OpImageGather 2DDepth ConstOffset unsigned
ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
"%result = OpImageGather "
@@ -1664,15 +1600,14 @@ INSTANTIATE_TEST_SUITE_P(
"textureGather(x_20, x_10, coords123.xy, "
"i32(round(coords123.z)))"},
// OpImageGather 2DDepth Array ConstOffset signed
- ImageAccessCase{
- "%float 2D 1 1 0 1 Unknown",
- "%result = OpImageGather "
- "%v4float %sampled_image %coords123 %int_1 ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 1 1 0 1 Unknown",
+ "%result = OpImageGather "
+ "%v4float %sampled_image %coords123 %int_1 ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- "textureGather(x_20, x_10, coords123.xy, "
- "i32(round(coords123.z)), vec2<i32>(3, 4))"},
+ "textureGather(x_20, x_10, coords123.xy, "
+ "i32(round(coords123.z)), vec2<i32>(3i, 4i))"},
// OpImageGather 2DDepth Array ConstOffset unsigned
ImageAccessCase{"%float 2D 1 1 0 1 Unknown",
"%result = OpImageGather "
@@ -1707,35 +1642,32 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_SampledImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
// OpImageDrefGather 2DDepth
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageDrefGather "
- "%v4float %sampled_image %coords12 %depth",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather "
+ "%v4float %sampled_image %coords12 %depth",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- "textureGatherCompare(x_20, x_10, coords12, 0.200000003)"},
+ "textureGatherCompare(x_20, x_10, coords12, 0.200000003f)"},
// OpImageDrefGather 2DDepth ConstOffset signed
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageDrefGather "
- "%v4float %sampled_image %coords12 %depth ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather "
+ "%v4float %sampled_image %coords12 %depth ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- "textureGatherCompare(x_20, x_10, coords12, 0.200000003, "
- "vec2<i32>(3, 4))"},
+ "textureGatherCompare(x_20, x_10, coords12, 0.200000003f, "
+ "vec2<i32>(3i, 4i))"},
// OpImageDrefGather 2DDepth ConstOffset unsigned
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageDrefGather "
- "%v4float %sampled_image %coords12 %depth ConstOffset "
- "%u_offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather "
+ "%v4float %sampled_image %coords12 %depth ConstOffset "
+ "%u_offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- "textureGatherCompare(x_20, x_10, coords12, 0.200000003, "
- "vec2<i32>(vec2<u32>(3u, 4u)))"},
+ "textureGatherCompare(x_20, x_10, coords12, 0.200000003f, "
+ "vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageDrefGather 2DDepth Array
ImageAccessCase{"%float 2D 1 1 0 1 Unknown",
"%result = OpImageDrefGather "
@@ -1744,17 +1676,16 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
"textureGatherCompare(x_20, x_10, coords123.xy, "
- "i32(round(coords123.z)), 0.200000003)"},
+ "i32(round(coords123.z)), 0.200000003f)"},
// OpImageDrefGather 2DDepth Array ConstOffset signed
- ImageAccessCase{
- "%float 2D 1 1 0 1 Unknown",
- "%result = OpImageDrefGather "
- "%v4float %sampled_image %coords123 %depth ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 1 0 1 Unknown",
+ "%result = OpImageDrefGather "
+ "%v4float %sampled_image %coords123 %depth ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- "textureGatherCompare(x_20, x_10, coords123.xy, "
- "i32(round(coords123.z)), 0.200000003, vec2<i32>(3, 4))"},
+ "textureGatherCompare(x_20, x_10, coords123.xy, "
+ "i32(round(coords123.z)), 0.200000003f, vec2<i32>(3i, 4i))"},
// OpImageDrefGather 2DDepth Array ConstOffset unsigned
ImageAccessCase{"%float 2D 1 1 0 1 Unknown",
"%result = OpImageDrefGather "
@@ -1764,17 +1695,16 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
"textureGatherCompare(x_20, x_10, coords123.xy, "
- "i32(round(coords123.z)), 0.200000003, "
+ "i32(round(coords123.z)), 0.200000003f, "
"vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageDrefGather DepthCube
- ImageAccessCase{
- "%float Cube 1 0 0 1 Unknown",
- "%result = OpImageDrefGather "
- "%v4float %sampled_image %coords123 %depth",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float Cube 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather "
+ "%v4float %sampled_image %coords123 %depth",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_cube;)",
- "textureGatherCompare(x_20, x_10, coords123, 0.200000003)"},
+ "textureGatherCompare(x_20, x_10, coords123, 0.200000003f)"},
// OpImageDrefGather DepthCube Array
ImageAccessCase{"%float Cube 1 1 0 1 Unknown",
"%result = OpImageDrefGather "
@@ -1783,7 +1713,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_cube_array;)",
"textureGatherCompare(x_20, x_10, coords1234.xyz, "
- "i32(round(coords1234.w)), 0.200000003)"}}));
+ "i32(round(coords1234.w)), 0.200000003f)"}}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleImplicitLod,
@@ -1800,24 +1730,22 @@ INSTANTIATE_TEST_SUITE_P(
"textureSample(x_20, x_10, coords12)"},
// OpImageSampleImplicitLod arrayed
- ImageAccessCase{
- "%float 2D 0 1 0 1 Unknown",
- "%result = OpImageSampleImplicitLod "
- "%v4float %sampled_image %coords123",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod "
+ "%v4float %sampled_image %coords123",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- "textureSample(x_20, x_10, coords123.xy, i32(round(coords123.z)))"},
+ "textureSample(x_20, x_10, coords123.xy, i32(round(coords123.z)))"},
// OpImageSampleImplicitLod with ConstOffset
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod "
- "%v4float %sampled_image %coords12 ConstOffset %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod "
+ "%v4float %sampled_image %coords12 ConstOffset %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- "textureSample(x_20, x_10, coords12, vec2<i32>(3, 4))"},
+ "textureSample(x_20, x_10, coords12, vec2<i32>(3i, 4i))"},
// OpImageSampleImplicitLod arrayed with ConstOffset
ImageAccessCase{
@@ -1827,7 +1755,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSample(x_20, x_10, coords123.xy, i32(round(coords123.z)), vec2<i32>(3, 4)))"},
+ R"(textureSample(x_20, x_10, coords123.xy, i32(round(coords123.z)), vec2<i32>(3i, 4i)))"},
// OpImageSampleImplicitLod with Bias
ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
@@ -1836,7 +1764,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- "textureSampleBias(x_20, x_10, coords12, 7.0)"},
+ "textureSampleBias(x_20, x_10, coords12, 7.0f)"},
// OpImageSampleImplicitLod arrayed with Bias
ImageAccessCase{
@@ -1846,18 +1774,17 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSampleBias(x_20, x_10, coords123.xy, i32(round(coords123.z)), 7.0))"},
+ R"(textureSampleBias(x_20, x_10, coords123.xy, i32(round(coords123.z)), 7.0f))"},
// OpImageSampleImplicitLod with Bias and signed ConstOffset
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod "
- "%v4float %sampled_image %coords12 Bias|ConstOffset "
- "%float_7 %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod "
+ "%v4float %sampled_image %coords12 Bias|ConstOffset "
+ "%float_7 %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleBias(x_20, x_10, coords12, 7.0, vec2<i32>(3, 4))"},
+ R"(textureSampleBias(x_20, x_10, coords12, 7.0f, vec2<i32>(3i, 4i))"},
// OpImageSampleImplicitLod with Bias and unsigned ConstOffset
// Convert ConstOffset to signed
@@ -1869,7 +1796,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleBias(x_20, x_10, coords12, 7.0, vec2<i32>(vec2<u32>(3u, 4u)))"},
+ R"(textureSampleBias(x_20, x_10, coords12, 7.0f, vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageSampleImplicitLod arrayed with Bias
ImageAccessCase{
"%float 2D 0 1 0 1 Unknown",
@@ -1879,7 +1806,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSampleBias(x_20, x_10, coords123.xy, i32(round(coords123.z)), 7.0, vec2<i32>(3, 4))"}));
+ R"(textureSampleBias(x_20, x_10, coords123.xy, i32(round(coords123.z)), 7.0f, vec2<i32>(3i, 4i))"}));
INSTANTIATE_TEST_SUITE_P(
// This test shows the use of a sampled image used with both regular
@@ -1904,8 +1831,8 @@ INSTANTIATE_TEST_SUITE_P(
@group(0) @binding(1) var x_30 : sampler_comparison;
)",
R"(
- let x_200 : vec4<f32> = vec4<f32>(textureSample(x_20, x_10, coords12), 0.0, 0.0, 0.0);
- let x_210 : f32 = textureSampleCompare(x_20, x_30, coords12, 0.200000003);
+ let x_200 : vec4<f32> = vec4<f32>(textureSample(x_20, x_10, coords12), 0.0f, 0.0f, 0.0f);
+ let x_210 : f32 = textureSampleCompare(x_20, x_30, coords12, 0.200000003f);
)"}));
INSTANTIATE_TEST_SUITE_P(
@@ -1913,15 +1840,14 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_SampledImageAccessTest,
::testing::Values(
// ImageSampleDrefImplicitLod
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleDrefImplicitLod "
- "%float %sampled_image %coords12 %depth",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleDrefImplicitLod "
+ "%float %sampled_image %coords12 %depth",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompare(x_20, x_10, coords12, 0.200000003))"},
+ R"(textureSampleCompare(x_20, x_10, coords12, 0.200000003f))"},
// ImageSampleDrefImplicitLod - arrayed
ImageAccessCase{
"%float 2D 0 1 0 1 Unknown",
@@ -1930,7 +1856,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- R"(textureSampleCompare(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003))"},
+ R"(textureSampleCompare(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003f))"},
// ImageSampleDrefImplicitLod with ConstOffset
ImageAccessCase{
"%float 2D 0 0 0 1 Unknown",
@@ -1940,7 +1866,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompare(x_20, x_10, coords12, 0.200000003, vec2<i32>(3, 4)))"},
+ R"(textureSampleCompare(x_20, x_10, coords12, 0.200000003f, vec2<i32>(3i, 4i)))"},
// ImageSampleDrefImplicitLod arrayed with ConstOffset
ImageAccessCase{
"%float 2D 0 1 0 1 Unknown",
@@ -1949,7 +1875,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- R"(textureSampleCompare(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003, vec2<i32>(3, 4)))"}));
+ R"(textureSampleCompare(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003f, vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleDrefExplicitLod,
@@ -1958,15 +1884,14 @@ INSTANTIATE_TEST_SUITE_P(
// Another test checks cases where the Lod is not float constant 0.
::testing::Values(
// 2D
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageSampleDrefExplicitLod "
- "%float %sampled_image %coords12 %depth Lod %float_0",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageSampleDrefExplicitLod "
+ "%float %sampled_image %coords12 %depth Lod %float_0",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompareLevel(x_20, x_10, coords12, 0.200000003))"},
+ R"(textureSampleCompareLevel(x_20, x_10, coords12, 0.200000003f))"},
// 2D array
ImageAccessCase{
"%float 2D 1 1 0 1 Unknown",
@@ -1975,7 +1900,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- R"(textureSampleCompareLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003))"},
+ R"(textureSampleCompareLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003f))"},
// 2D, ConstOffset
ImageAccessCase{
"%float 2D 1 0 0 1 Unknown",
@@ -1986,7 +1911,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompareLevel(x_20, x_10, coords12, 0.200000003, vec2<i32>(3, 4)))"},
+ R"(textureSampleCompareLevel(x_20, x_10, coords12, 0.200000003f, vec2<i32>(3i, 4i)))"},
// 2D array, ConstOffset
ImageAccessCase{
"%float 2D 1 1 0 1 Unknown",
@@ -1996,16 +1921,15 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- R"(textureSampleCompareLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003, vec2<i32>(3, 4)))"},
+ R"(textureSampleCompareLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.200000003f, vec2<i32>(3i, 4i)))"},
// Cube
- ImageAccessCase{
- "%float Cube 1 0 0 1 Unknown",
- "%result = OpImageSampleDrefExplicitLod "
- "%float %sampled_image %coords123 %depth Lod %float_0",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float Cube 1 0 0 1 Unknown",
+ "%result = OpImageSampleDrefExplicitLod "
+ "%float %sampled_image %coords123 %depth Lod %float_0",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_cube;)",
- R"(textureSampleCompareLevel(x_20, x_10, coords123, 0.200000003))"},
+ R"(textureSampleCompareLevel(x_20, x_10, coords123, 0.200000003f))"},
// Cube array
ImageAccessCase{
"%float Cube 1 1 0 1 Unknown",
@@ -2014,7 +1938,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_cube_array;)",
- R"(textureSampleCompareLevel(x_20, x_10, coords1234.xyz, i32(round(coords1234.w)), 0.200000003))"}));
+ R"(textureSampleCompareLevel(x_20, x_10, coords1234.xyz, i32(round(coords1234.w)), 0.200000003f))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleExplicitLod_UsingLod,
@@ -2028,7 +1952,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleLevel(x_20, x_10, coords12, 0.0))"},
+ R"(textureSampleLevel(x_20, x_10, coords12, 0.0f))"},
// OpImageSampleExplicitLod arrayed - using Lod
ImageAccessCase{
@@ -2038,18 +1962,17 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSampleLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.0))"},
+ R"(textureSampleLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.0f))"},
// OpImageSampleExplicitLod - using Lod and ConstOffset
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleExplicitLod "
- "%v4float %sampled_image %coords12 Lod|ConstOffset "
- "%float_null %offsets2d",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleExplicitLod "
+ "%v4float %sampled_image %coords12 Lod|ConstOffset "
+ "%float_null %offsets2d",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleLevel(x_20, x_10, coords12, 0.0, vec2<i32>(3, 4)))"},
+ R"(textureSampleLevel(x_20, x_10, coords12, 0.0f, vec2<i32>(3i, 4i)))"},
// OpImageSampleExplicitLod - using Lod and unsigned ConstOffset
// Convert the ConstOffset operand to signed
@@ -2061,7 +1984,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleLevel(x_20, x_10, coords12, 0.0, vec2<i32>(vec2<u32>(3u, 4u)))"},
+ R"(textureSampleLevel(x_20, x_10, coords12, 0.0f, vec2<i32>(vec2<u32>(3u, 4u)))"},
// OpImageSampleExplicitLod arrayed - using Lod and ConstOffset
ImageAccessCase{
@@ -2072,7 +1995,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSampleLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.0, vec2<i32>(3, 4)))"}));
+ R"(textureSampleLevel(x_20, x_10, coords123.xy, i32(round(coords123.z)), 0.0f, vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleExplicitLod_UsingGrad,
@@ -2080,14 +2003,13 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// OpImageSampleExplicitLod - using Grad
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleExplicitLod "
- "%v4float %sampled_image %coords12 Grad %vf12 %vf21",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleExplicitLod "
+ "%v4float %sampled_image %coords12 Grad %vf12 %vf21",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleGrad(x_20, x_10, coords12, vf12, vf21))"},
+ R"(textureSampleGrad(x_20, x_10, coords12, vf12, vf21))"},
// OpImageSampleExplicitLod arrayed - using Grad
ImageAccessCase{
@@ -2108,7 +2030,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleGrad(x_20, x_10, coords12, vf12, vf21, vec2<i32>(3, 4)))"},
+ R"(textureSampleGrad(x_20, x_10, coords12, vf12, vf21, vec2<i32>(3i, 4i)))"},
// OpImageSampleExplicitLod - using Grad and unsigned ConstOffset
ImageAccessCase{
@@ -2130,7 +2052,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(textureSampleGrad(x_20, x_10, coords123.xy, i32(round(coords123.z)), vf12, vf21, vec2<i32>(3, 4)))"},
+ R"(textureSampleGrad(x_20, x_10, coords123.xy, i32(round(coords123.z)), vf12, vf21, vec2<i32>(3i, 4i)))"},
// OpImageSampleExplicitLod arrayed - using Grad and unsigned
// ConstOffset
@@ -2170,7 +2092,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(vec4<f32>(textureSampleLevel(x_20, x_10, vf12, i32(f1)), 0.0, 0.0, 0.0))"}}));
+ R"(vec4<f32>(textureSampleLevel(x_20, x_10, vf12, i32(f1)), 0.0f, 0.0f, 0.0f))"}}));
/////
// Projection sampling
@@ -2203,34 +2125,31 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// OpImageSampleProjImplicitLod 1D
- ImageAccessCase{
- "%float 1D 0 0 0 1 Unknown",
- "%result = OpImageSampleProjImplicitLod "
- "%v4float %sampled_image %coords12",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 1D 0 0 0 1 Unknown",
+ "%result = OpImageSampleProjImplicitLod "
+ "%v4float %sampled_image %coords12",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
- R"(textureSample(x_20, x_10, (coords12.x / coords12.y)))"},
+ R"(textureSample(x_20, x_10, (coords12.x / coords12.y)))"},
// OpImageSampleProjImplicitLod 2D
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleProjImplicitLod "
- "%v4float %sampled_image %coords123",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleProjImplicitLod "
+ "%v4float %sampled_image %coords123",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSample(x_20, x_10, (coords123.xy / coords123.z)))"},
+ R"(textureSample(x_20, x_10, (coords123.xy / coords123.z)))"},
// OpImageSampleProjImplicitLod 3D
- ImageAccessCase{
- "%float 3D 0 0 0 1 Unknown",
- "%result = OpImageSampleProjImplicitLod "
- "%v4float %sampled_image %coords1234",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 3D 0 0 0 1 Unknown",
+ "%result = OpImageSampleProjImplicitLod "
+ "%v4float %sampled_image %coords1234",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
- R"(textureSample(x_20, x_10, (coords1234.xyz / coords1234.w)))"},
+ R"(textureSample(x_20, x_10, (coords1234.xyz / coords1234.w)))"},
// OpImageSampleProjImplicitLod 2D with ConstOffset
// (Don't need to test with 1D or 3D, as the hard part was the splatted
@@ -2242,7 +2161,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSample(x_20, x_10, (coords123.xy / coords123.z), vec2<i32>(3, 4)))"}));
+ R"(textureSample(x_20, x_10, (coords123.xy / coords123.z), vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleProjImplicitLod_Bias,
@@ -2251,14 +2170,13 @@ INSTANTIATE_TEST_SUITE_P(
// OpImageSampleProjImplicitLod with Bias
// Only testing 2D
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleProjImplicitLod "
- "%v4float %sampled_image %coords123 Bias %float_7",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleProjImplicitLod "
+ "%v4float %sampled_image %coords123 Bias %float_7",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0))"},
+ R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0f))"},
// OpImageSampleProjImplicitLod with Bias and signed ConstOffset
ImageAccessCase{
@@ -2269,7 +2187,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0, vec2<i32>(3, 4)))"},
+ R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0f, vec2<i32>(3i, 4i)))"},
// OpImageSampleProjImplicitLod with Bias and unsigned ConstOffset
// Convert ConstOffset to signed
@@ -2281,21 +2199,20 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0, vec2<i32>(vec2<u32>(3u, 4u))))"}));
+ R"(textureSampleBias(x_20, x_10, (coords123.xy / coords123.z), 7.0f, vec2<i32>(vec2<u32>(3u, 4u))))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleProjExplicitLod_Lod,
SpvParserHandleTest_SampledImageAccessTest,
::testing::Values(
// OpImageSampleProjExplicitLod 2D
- ImageAccessCase{
- "%float 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleProjExplicitLod "
- "%v4float %sampled_image %coords123 Lod %f1",
- R"(@group(0) @binding(0) var x_10 : sampler;
+ ImageAccessCase{"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleProjExplicitLod "
+ "%v4float %sampled_image %coords123 Lod %f1",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleLevel(x_20, x_10, (coords123.xy / coords123.z), f1))"},
+ R"(textureSampleLevel(x_20, x_10, (coords123.xy / coords123.z), f1))"},
// OpImageSampleProjExplicitLod 2D Lod with ConstOffset
ImageAccessCase{
@@ -2305,7 +2222,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleLevel(x_20, x_10, (coords123.xy / coords123.z), f1, vec2<i32>(3, 4)))"}));
+ R"(textureSampleLevel(x_20, x_10, (coords123.xy / coords123.z), f1, vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleProjExplicitLod_Grad,
@@ -2330,7 +2247,7 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(textureSampleGrad(x_20, x_10, (coords123.xy / coords123.z), vf12, vf21, vec2<i32>(3, 4)))"}));
+ R"(textureSampleGrad(x_20, x_10, (coords123.xy / coords123.z), vf12, vf21, vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
// Ordinary (non-comparison) sampling on a depth texture.
@@ -2349,7 +2266,7 @@ INSTANTIATE_TEST_SUITE_P(
// Sampling the depth texture yields an f32, but the
// SPIR-V operation yiedls vec4<f32>, so fill out the
// remaining components with 0.
- R"(vec4<f32>(textureSample(x_20, x_10, (coords123.xy / coords123.z)), 0.0, 0.0, 0.0))"}));
+ R"(vec4<f32>(textureSample(x_20, x_10, (coords123.xy / coords123.z)), 0.0f, 0.0f, 0.0f))"}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleProjDrefImplicitLod,
@@ -2357,15 +2274,14 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(
// OpImageSampleProjDrefImplicitLod 2D depth-texture
- ImageAccessCase{
- "%float 2D 1 0 0 1 Unknown",
- "%result = OpImageSampleProjDrefImplicitLod "
- "%float %sampled_image %coords123 %f1",
- R"(@group(0) @binding(0) var x_10 : sampler_comparison;
+ ImageAccessCase{"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageSampleProjDrefImplicitLod "
+ "%float %sampled_image %coords123 %f1",
+ R"(@group(0) @binding(0) var x_10 : sampler_comparison;
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), f1))"},
+ R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), f1))"},
// OpImageSampleProjDrefImplicitLod 2D depth-texture, ConstOffset
ImageAccessCase{
@@ -2376,7 +2292,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), f1, vec2<i32>(3, 4)))"}));
+ R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), f1, vec2<i32>(3i, 4i)))"}));
INSTANTIATE_TEST_SUITE_P(
DISABLED_ImageSampleProjDrefExplicitLod_Lod,
@@ -2395,7 +2311,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), 0.200000003, 0.0))"},
+ R"(textureSampleCompare(x_20, x_10, (coords123.xy / coords123.z), 0.200000003f, 0.0f))"},
// OpImageSampleProjDrefImplicitLod 2D depth-texture, Lod ConstOffset
ImageAccessCase{
@@ -2407,7 +2323,7 @@ INSTANTIATE_TEST_SUITE_P(
@group(2) @binding(1) var x_20 : texture_depth_2d;
)",
- R"(textureSampleCompareLevel(x_20, x_10, (coords123.xy / coords123.z), 0.200000003, 0.0, vec2<i32>(3, 4)))"}));
+ R"(textureSampleCompareLevel(x_20, x_10, (coords123.xy / coords123.z), 0.200000003f, 0.0f, vec2<i32>(3i, 4i)))"}));
/////
// End projection sampling
@@ -2417,8 +2333,8 @@ using SpvParserHandleTest_ImageAccessTest =
SpvParserTestBase<::testing::TestWithParam<ImageAccessCase>>;
TEST_P(SpvParserHandleTest_ImageAccessTest, Variable) {
- // In this test harness, we only create an image.
- const auto assembly = Preamble() + R"(
+ // In this test harness, we only create an image.
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
OpName %f1 "f1"
@@ -2437,9 +2353,9 @@ TEST_P(SpvParserHandleTest_ImageAccessTest, Variable) {
OpDecorate %20 DescriptorSet 2
OpDecorate %20 Binding 1
)" + CommonBasicTypes() +
- R"(
+ R"(
%im_ty = OpTypeImage )" +
- GetParam().spirv_image_type_details + R"(
+ GetParam().spirv_image_type_details + R"(
%ptr_im_ty = OpTypePointer UniformConstant %im_ty
%20 = OpVariable %ptr_im_ty UniformConstant
@@ -2466,26 +2382,24 @@ TEST_P(SpvParserHandleTest_ImageAccessTest, Variable) {
%im = OpLoad %im_ty %20
)" + GetParam().spirv_image_access +
- R"(
+ R"(
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty()) << p->error();
- const auto program = test::ToString(p->program());
- EXPECT_THAT(program, HasSubstr(GetParam().var_decl))
- << "DECLARATIONS ARE BAD " << program;
- EXPECT_THAT(program, HasSubstr(GetParam().texture_builtin))
- << "TEXTURE BUILTIN IS BAD " << program << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ const auto program = test::ToString(p->program());
+ EXPECT_THAT(program, HasSubstr(GetParam().var_decl)) << "DECLARATIONS ARE BAD " << program;
+ EXPECT_THAT(program, HasSubstr(GetParam().texture_builtin))
+ << "TEXTURE BUILTIN IS BAD " << program << assembly;
}
INSTANTIATE_TEST_SUITE_P(ImageWrite_OptionalParams,
SpvParserHandleTest_ImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
// OpImageWrite with no extra params
- {"%float 2D 0 0 0 2 Rgba32f",
- "OpImageWrite %im %vi12 %vf1234",
+ {"%float 2D 0 0 0 2 Rgba32f", "OpImageWrite %im %vi12 %vf1234",
"@group(2) @binding(1) var x_20 : "
"texture_storage_2d<rgba32float, write>;",
"textureStore(x_20, vi12, vf1234);"}}));
@@ -2502,15 +2416,15 @@ INSTANTIATE_TEST_SUITE_P(
// Source 1 component
{"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vi12 %f1",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<r32float, write>;)",
- "textureStore(x_20, vi12, vec4<f32>(f1, 0.0, 0.0, 0.0));"},
+ "textureStore(x_20, vi12, vec4<f32>(f1, 0.0f, 0.0f, 0.0f));"},
// Source 2 component, dest 1 component
{"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vi12 %vf12",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<r32float, write>;)",
- "textureStore(x_20, vi12, vec4<f32>(vf12, 0.0, 0.0));"},
+ "textureStore(x_20, vi12, vec4<f32>(vf12, 0.0f, 0.0f));"},
// Source 3 component, dest 1 component
{"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vi12 %vf123",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<r32float, write>;)",
- "textureStore(x_20, vi12, vec4<f32>(vf123, 0.0));"},
+ "textureStore(x_20, vi12, vec4<f32>(vf123, 0.0f));"},
// Source 4 component, dest 1 component
{"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vi12 %vf1234",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<r32float, write>;)",
@@ -2518,11 +2432,11 @@ INSTANTIATE_TEST_SUITE_P(
// Source 2 component, dest 2 component
{"%float 2D 0 0 0 2 Rg32f", "OpImageWrite %im %vi12 %vf12",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<rg32float, write>;)",
- "textureStore(x_20, vi12, vec4<f32>(vf12, 0.0, 0.0));"},
+ "textureStore(x_20, vi12, vec4<f32>(vf12, 0.0f, 0.0f));"},
// Source 3 component, dest 2 component
{"%float 2D 0 0 0 2 Rg32f", "OpImageWrite %im %vi12 %vf123",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<rg32float, write>;)",
- "textureStore(x_20, vi12, vec4<f32>(vf123, 0.0));"},
+ "textureStore(x_20, vi12, vec4<f32>(vf123, 0.0f));"},
// Source 4 component, dest 2 component
{"%float 2D 0 0 0 2 Rg32f", "OpImageWrite %im %vi12 %vf1234",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<rg32float, write>;)",
@@ -2535,7 +2449,7 @@ INSTANTIATE_TEST_SUITE_P(
"textureStore(x_20, vi12, vf1234);"}}));
TEST_F(SpvParserHandleTest, ImageWrite_TooFewSrcTexelComponents_1_vs_4) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
OpName %f1 "f1"
@@ -2543,7 +2457,7 @@ TEST_F(SpvParserHandleTest, ImageWrite_TooFewSrcTexelComponents_1_vs_4) {
OpDecorate %20 DescriptorSet 2
OpDecorate %20 Binding 1
)" + CommonBasicTypes() +
- R"(
+ R"(
%im_ty = OpTypeImage %void 2D 0 0 0 2 Rgba32f
%ptr_im_ty = OpTypePointer UniformConstant %im_ty
@@ -2561,12 +2475,11 @@ TEST_F(SpvParserHandleTest, ImageWrite_TooFewSrcTexelComponents_1_vs_4) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- Eq("texel has too few components for storage texture: 1 provided "
- "but 4 required, in: OpImageWrite %54 %3 %2"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), Eq("texel has too few components for storage texture: 1 provided "
+ "but 4 required, in: OpImageWrite %54 %3 %2"))
+ << p->error();
}
INSTANTIATE_TEST_SUITE_P(
@@ -2651,7 +2564,7 @@ INSTANTIATE_TEST_SUITE_P(
// Source signed, dest signed
{"%int 2D 0 0 0 2 R32i", "OpImageWrite %im %vi12 %vi12",
R"(@group(2) @binding(1) var x_20 : texture_storage_2d<r32sint, write>;)",
- R"(textureStore(x_20, vi12, vec4<i32>(vi12, 0, 0)))"}}));
+ R"(textureStore(x_20, vi12, vec4<i32>(vi12, 0i, 0i)))"}}));
INSTANTIATE_TEST_SUITE_P(
ImageFetch_OptionalParams,
@@ -2661,22 +2574,20 @@ INSTANTIATE_TEST_SUITE_P(
// Level of detail is injected for sampled texture
{"%float 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12",
R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0);)"},
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0i);)"},
// OpImageFetch with explicit level, on sampled texture
- {"%float 2D 0 0 0 1 Unknown",
- "%99 = OpImageFetch %v4float %im %vi12 Lod %int_3",
+ {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12 Lod %int_3",
R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 3);)"},
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 3i);)"},
// OpImageFetch with no extra params, on depth texture
// Level of detail is injected for depth texture
{"%float 2D 1 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12",
R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 0), 0.0, 0.0, 0.0);)"},
+ R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 0i), 0.0f, 0.0f, 0.0f);)"},
// OpImageFetch with extra params, on depth texture
- {"%float 2D 1 0 0 1 Unknown",
- "%99 = OpImageFetch %v4float %im %vi12 Lod %int_3",
+ {"%float 2D 1 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12 Lod %int_3",
R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 3), 0.0, 0.0, 0.0);)"}}));
+ R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 3i), 0.0f, 0.0f, 0.0f);)"}}));
INSTANTIATE_TEST_SUITE_P(
ImageFetch_Depth,
@@ -2689,7 +2600,7 @@ INSTANTIATE_TEST_SUITE_P(
// ImageFetch on depth image.
{"%float 2D 1 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12 ",
R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 0), 0.0, 0.0, 0.0);)"}}));
+ R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, 0i), 0.0f, 0.0f, 0.0f);)"}}));
INSTANTIATE_TEST_SUITE_P(
ImageFetch_DepthMultisampled,
@@ -2700,151 +2611,146 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_ImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
// ImageFetch on multisampled depth image.
- {"%float 2D 1 0 1 1 Unknown",
- "%99 = OpImageFetch %v4float %im %vi12 Sample %i1",
+ {"%float 2D 1 0 1 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12 Sample %i1",
R"(@group(2) @binding(1) var x_20 : texture_depth_multisampled_2d;)",
- R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, i1), 0.0, 0.0, 0.0);)"}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ImageFetch_Multisampled,
- SpvParserHandleTest_ImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- // SPIR-V requires a Sample image operand when operating on a
- // multisampled image.
-
- // ImageFetch arrayed
- // Not in WebGPU
-
- // ImageFetch non-arrayed
- {"%float 2D 0 0 1 1 Unknown",
- "%99 = OpImageFetch %v4float %im %vi12 Sample %i1",
- R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, i1);)"}}));
+ R"(let x_99 : vec4<f32> = vec4<f32>(textureLoad(x_20, vi12, i1), 0.0f, 0.0f, 0.0f);)"}}));
-INSTANTIATE_TEST_SUITE_P(
- ImageFetch_Multisampled_ConvertSampleOperand,
- SpvParserHandleTest_ImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- {"%float 2D 0 0 1 1 Unknown",
- "%99 = OpImageFetch %v4float %im %vi12 Sample %u1",
- R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, i32(u1));)"}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ConvertResultSignedness,
- SpvParserHandleTest_SampledImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- // Valid SPIR-V only has:
- // float scalar sampled type vs. floating result
- // integral scalar sampled type vs. integral result
- // Any of the sampling, reading, or fetching use the same codepath.
-
- // We'll test with:
- // OpImageFetch
- // OpImageRead
- // OpImageSampleImplicitLod - representative of sampling
-
- //
- // OpImageRead
- //
-
- // OpImageFetch requires no conversion, float -> v4float
- {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0);)"},
- // OpImageFetch requires no conversion, uint -> v4uint
- {"%uint 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4uint %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<u32>;)",
- R"(let x_99 : vec4<u32> = textureLoad(x_20, vi12, 0);)"},
- // OpImageFetch requires conversion, uint -> v4int
- // is invalid SPIR-V:
- // "Expected Image 'Sampled Type' to be the same as Result Type
- // components"
-
- // OpImageFetch requires no conversion, int -> v4int
- {"%int 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4int %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<i32>;)",
- R"(let x_99 : vec4<i32> = textureLoad(x_20, vi12, 0);)"},
- // OpImageFetch requires conversion, int -> v4uint
- // is invalid SPIR-V:
- // "Expected Image 'Sampled Type' to be the same as Result Type
- // components"
+INSTANTIATE_TEST_SUITE_P(ImageFetch_Multisampled,
+ SpvParserHandleTest_ImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ // SPIR-V requires a Sample image operand when operating on a
+ // multisampled image.
- //
- // OpImageRead
- //
+ // ImageFetch arrayed
+ // Not in WebGPU
- // OpImageRead requires no conversion, float -> v4float
- {"%float 2D 0 0 0 2 Rgba32f", "%99 = OpImageRead %v4float %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0);)"},
- // OpImageRead requires no conversion, uint -> v4uint
- {"%uint 2D 0 0 0 2 Rgba32ui", "%99 = OpImageRead %v4uint %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<u32>;)",
- R"(let x_99 : vec4<u32> = textureLoad(x_20, vi12, 0);)"},
-
- // OpImageRead requires conversion, uint -> v4int
- // is invalid SPIR-V:
- // "Expected Image 'Sampled Type' to be the same as Result Type
- // components"
-
- // OpImageRead requires no conversion, int -> v4int
- {"%int 2D 0 0 0 2 Rgba32i", "%99 = OpImageRead %v4int %im %vi12",
- R"(@group(2) @binding(1) var x_20 : texture_2d<i32>;)",
- R"(let x_99 : vec4<i32> = textureLoad(x_20, vi12, 0);)"},
-
- // OpImageRead requires conversion, int -> v4uint
- // is invalid SPIR-V:
- // "Expected Image 'Sampled Type' to be the same as Result Type
- // components"
+ // ImageFetch non-arrayed
+ {"%float 2D 0 0 1 1 Unknown",
+ "%99 = OpImageFetch %v4float %im %vi12 Sample %i1",
+ R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, i1);)"}}));
- //
- // Sampling operations, using OpImageSampleImplicitLod as an example.
- // WGSL sampling operations only work on textures with a float sampled
- // component. So we can only test the float -> float (non-conversion)
- // case.
+INSTANTIATE_TEST_SUITE_P(ImageFetch_Multisampled_ConvertSampleOperand,
+ SpvParserHandleTest_ImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ {"%float 2D 0 0 1 1 Unknown",
+ "%99 = OpImageFetch %v4float %im %vi12 Sample %u1",
+ R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, i32(u1));)"}}));
- // OpImageSampleImplicitLod requires no conversion, float -> v4float
- {"%float 2D 0 0 0 1 Unknown",
- "%99 = OpImageSampleImplicitLod %v4float %sampled_image %vf12",
- R"(@group(0) @binding(0) var x_10 : sampler;
+INSTANTIATE_TEST_SUITE_P(ConvertResultSignedness,
+ SpvParserHandleTest_SampledImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ // Valid SPIR-V only has:
+ // float scalar sampled type vs. floating result
+ // integral scalar sampled type vs. integral result
+ // Any of the sampling, reading, or fetching use the same codepath.
+
+ // We'll test with:
+ // OpImageFetch
+ // OpImageRead
+ // OpImageSampleImplicitLod - representative of sampling
+
+ //
+ // OpImageRead
+ //
+
+ // OpImageFetch requires no conversion, float -> v4float
+ {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4float %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0i);)"},
+ // OpImageFetch requires no conversion, uint -> v4uint
+ {"%uint 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4uint %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<u32>;)",
+ R"(let x_99 : vec4<u32> = textureLoad(x_20, vi12, 0i);)"},
+ // OpImageFetch requires conversion, uint -> v4int
+ // is invalid SPIR-V:
+ // "Expected Image 'Sampled Type' to be the same as Result Type
+ // components"
+
+ // OpImageFetch requires no conversion, int -> v4int
+ {"%int 2D 0 0 0 1 Unknown", "%99 = OpImageFetch %v4int %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<i32>;)",
+ R"(let x_99 : vec4<i32> = textureLoad(x_20, vi12, 0i);)"},
+ // OpImageFetch requires conversion, int -> v4uint
+ // is invalid SPIR-V:
+ // "Expected Image 'Sampled Type' to be the same as Result Type
+ // components"
+
+ //
+ // OpImageRead
+ //
+
+ // OpImageRead requires no conversion, float -> v4float
+ {"%float 2D 0 0 0 2 Rgba32f", "%99 = OpImageRead %v4float %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
+ R"(let x_99 : vec4<f32> = textureLoad(x_20, vi12, 0i);)"},
+ // OpImageRead requires no conversion, uint -> v4uint
+ {"%uint 2D 0 0 0 2 Rgba32ui", "%99 = OpImageRead %v4uint %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<u32>;)",
+ R"(let x_99 : vec4<u32> = textureLoad(x_20, vi12, 0i);)"},
+
+ // OpImageRead requires conversion, uint -> v4int
+ // is invalid SPIR-V:
+ // "Expected Image 'Sampled Type' to be the same as Result Type
+ // components"
+
+ // OpImageRead requires no conversion, int -> v4int
+ {"%int 2D 0 0 0 2 Rgba32i", "%99 = OpImageRead %v4int %im %vi12",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<i32>;)",
+ R"(let x_99 : vec4<i32> = textureLoad(x_20, vi12, 0i);)"},
+
+ // OpImageRead requires conversion, int -> v4uint
+ // is invalid SPIR-V:
+ // "Expected Image 'Sampled Type' to be the same as Result Type
+ // components"
+
+ //
+ // Sampling operations, using OpImageSampleImplicitLod as an example.
+ // WGSL sampling operations only work on textures with a float sampled
+ // component. So we can only test the float -> float (non-conversion)
+ // case.
+
+ // OpImageSampleImplicitLod requires no conversion, float -> v4float
+ {"%float 2D 0 0 0 1 Unknown",
+ "%99 = OpImageSampleImplicitLod %v4float %sampled_image %vf12",
+ R"(@group(0) @binding(0) var x_10 : sampler;
@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec4<f32> = textureSample(x_20, x_10, vf12);)"}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ImageQuerySize_NonArrayed_SignedResult,
- // ImageQuerySize requires storage image or multisampled
- // For storage image, use another instruction to indicate whether it
- // is readonly or writeonly.
- SpvParserHandleTest_SampledImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- // 1D storage image
- {"%float 1D 0 0 0 2 Rgba32f",
- "%99 = OpImageQuerySize %int %im \n"
- "%98 = OpImageRead %v4float %im %i1\n", // Implicitly mark as
- // NonWritable
- R"(@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
- R"(let x_99 : i32 = i32(textureDimensions(x_20));)"},
- // 2D storage image
- {"%float 2D 0 0 0 2 Rgba32f",
- "%99 = OpImageQuerySize %v2int %im \n"
- "%98 = OpImageRead %v4float %im %vi12\n", // Implicitly mark as
- // NonWritable
- R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20))"},
- // 3D storage image
- {"%float 3D 0 0 0 2 Rgba32f",
- "%99 = OpImageQuerySize %v3int %im \n"
- "%98 = OpImageRead %v4float %im %vi123\n", // Implicitly mark as
- // NonWritable
- R"(@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
- R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20));)"},
+ R"(let x_99 : vec4<f32> = textureSample(x_20, x_10, vf12);)"}}));
- // Multisampled
- {"%float 2D 0 0 1 1 Unknown", "%99 = OpImageQuerySize %v2int %im \n",
- R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
- R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20));)"}}));
+INSTANTIATE_TEST_SUITE_P(ImageQuerySize_NonArrayed_SignedResult,
+ // ImageQuerySize requires storage image or multisampled
+ // For storage image, use another instruction to indicate whether it
+ // is readonly or writeonly.
+ SpvParserHandleTest_SampledImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ // 1D storage image
+ {"%float 1D 0 0 0 2 Rgba32f",
+ "%99 = OpImageQuerySize %int %im \n"
+ "%98 = OpImageRead %v4float %im %i1\n", // Implicitly mark as
+ // NonWritable
+ R"(@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
+ R"(let x_99 : i32 = i32(textureDimensions(x_20));)"},
+ // 2D storage image
+ {"%float 2D 0 0 0 2 Rgba32f",
+ "%99 = OpImageQuerySize %v2int %im \n"
+ "%98 = OpImageRead %v4float %im %vi12\n", // Implicitly mark as
+ // NonWritable
+ R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
+ R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20))"},
+ // 3D storage image
+ {"%float 3D 0 0 0 2 Rgba32f",
+ "%99 = OpImageQuerySize %v3int %im \n"
+ "%98 = OpImageRead %v4float %im %vi123\n", // Implicitly mark as
+ // NonWritable
+ R"(@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
+ R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20));)"},
+
+ // Multisampled
+ {"%float 2D 0 0 1 1 Unknown", "%99 = OpImageQuerySize %v2int %im \n",
+ R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
+ R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20));)"}}));
INSTANTIATE_TEST_SUITE_P(
ImageQuerySize_Arrayed_SignedResult,
@@ -2874,38 +2780,32 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_SampledImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
// 1D
- {"%float 1D 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %int %im %i1\n",
+ {"%float 1D 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
R"(let x_99 : i32 = i32(textureDimensions(x_20, i1)))"},
// 2D
- {"%float 2D 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
+ {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20, i1));)"},
// 3D
- {"%float 3D 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
+ {"%float 3D 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20, i1));)"},
// Cube
- {"%float Cube 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
+ {"%float Cube 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_cube<f32>;)",
R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20, i1).xy);)"},
// Depth 2D
- {"%float 2D 1 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
+ {"%float 2D 1 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20, i1));)"},
// Depth Cube
- {"%float Cube 1 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
+ {"%float Cube 1 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %v2int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_depth_cube;)",
R"(let x_99 : vec2<i32> = vec2<i32>(textureDimensions(x_20, i1).xy);)"}}));
@@ -2920,8 +2820,7 @@ INSTANTIATE_TEST_SUITE_P(
// There is no 1D array
// 2D array
- {"%float 2D 0 1 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
+ {"%float 2D 0 1 0 1 Unknown", "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20, i1), textureNumLayers(x_20));)"},
@@ -2932,14 +2831,12 @@ INSTANTIATE_TEST_SUITE_P(
// Currently textureDimension on cube returns vec3 but maybe should
// return vec2
// https://github.com/gpuweb/gpuweb/issues/1345
- {"%float Cube 0 1 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
+ {"%float Cube 0 1 0 1 Unknown", "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_cube_array<f32>;)",
R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20, i1).xy, textureNumLayers(x_20));)"},
// Depth 2D array
- {"%float 2D 1 1 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
+ {"%float 2D 1 1 0 1 Unknown", "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20, i1), textureNumLayers(x_20));)"},
@@ -2948,8 +2845,7 @@ INSTANTIATE_TEST_SUITE_P(
// Currently textureDimension on cube returns vec3 but maybe should
// return vec2
// https://github.com/gpuweb/gpuweb/issues/1345
- {"%float Cube 1 1 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
+ {"%float Cube 1 1 0 1 Unknown", "%99 = OpImageQuerySizeLod %v3int %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_depth_cube_array;)",
R"(let x_99 : vec3<i32> = vec3<i32>(textureDimensions(x_20, i1).xy, textureNumLayers(x_20));)"}}));
@@ -2961,8 +2857,7 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_SampledImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
- {"%float 1D 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %int %im %u1\n",
+ {"%float 1D 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %int %im %u1\n",
R"(@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
R"(let x_99 : i32 = i32(textureDimensions(x_20, i32(u1)));)"}}));
@@ -2975,64 +2870,62 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_SampledImageAccessTest,
::testing::ValuesIn(std::vector<ImageAccessCase>{
- {"%float 1D 0 0 0 1 Unknown",
- "%99 = OpImageQuerySizeLod %uint %im %i1\n",
+ {"%float 1D 0 0 0 1 Unknown", "%99 = OpImageQuerySizeLod %uint %im %i1\n",
R"(@group(2) @binding(1) var x_20 : texture_1d<f32>;)",
R"(let x_99 : u32 = u32(textureDimensions(x_20, i1));)"}}));
-INSTANTIATE_TEST_SUITE_P(
- ImageQueryLevels_SignedResult,
- SpvParserHandleTest_SampledImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- // In Vulkan:
- // Dim must be 1D, 2D, 3D, Cube
- // WGSL allows 2d, 2d_array, 3d, cube, cube_array
- // depth_2d, depth_2d_array, depth_cube, depth_cube_array
-
- // 2D
- {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // 2D array
- {"%float 2D 0 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // 3D
- {"%float 3D 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // Cube
- {"%float Cube 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_cube<f32>;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // Cube array
- {"%float Cube 0 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_cube_array<f32>;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // depth 2d
- {"%float 2D 1 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // depth 2d array
- {"%float 2D 1 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // depth cube
- {"%float Cube 1 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_depth_cube;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"},
-
- // depth cube array
- {"%float Cube 1 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_depth_cube_array;)",
- R"(let x_99 : i32 = textureNumLevels(x_20);)"}}));
+INSTANTIATE_TEST_SUITE_P(ImageQueryLevels_SignedResult,
+ SpvParserHandleTest_SampledImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ // In Vulkan:
+ // Dim must be 1D, 2D, 3D, Cube
+ // WGSL allows 2d, 2d_array, 3d, cube, cube_array
+ // depth_2d, depth_2d_array, depth_cube, depth_cube_array
+
+ // 2D
+ {"%float 2D 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // 2D array
+ {"%float 2D 0 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_2d_array<f32>;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // 3D
+ {"%float 3D 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_3d<f32>;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // Cube
+ {"%float Cube 0 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_cube<f32>;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // Cube array
+ {"%float Cube 0 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_cube_array<f32>;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // depth 2d
+ {"%float 2D 1 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_depth_2d;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // depth 2d array
+ {"%float 2D 1 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_depth_2d_array;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // depth cube
+ {"%float Cube 1 0 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_depth_cube;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"},
+
+ // depth cube array
+ {"%float Cube 1 1 0 1 Unknown", "%99 = OpImageQueryLevels %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_depth_cube_array;)",
+ R"(let x_99 : i32 = textureNumLevels(x_20);)"}}));
INSTANTIATE_TEST_SUITE_P(
// Spot check that a type conversion is inserted when SPIR-V asks for
@@ -3044,18 +2937,17 @@ INSTANTIATE_TEST_SUITE_P(
R"(@group(2) @binding(1) var x_20 : texture_2d<f32>;)",
R"(let x_99 : u32 = u32(textureNumLevels(x_20));)"}}));
-INSTANTIATE_TEST_SUITE_P(
- ImageQuerySamples_SignedResult,
- SpvParserHandleTest_SampledImageAccessTest,
- ::testing::ValuesIn(std::vector<ImageAccessCase>{
- // Multsample 2D
- {"%float 2D 0 0 1 1 Unknown", "%99 = OpImageQuerySamples %int %im\n",
- R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
- R"(let x_99 : i32 = textureNumSamples(x_20);)"} // namespace
+INSTANTIATE_TEST_SUITE_P(ImageQuerySamples_SignedResult,
+ SpvParserHandleTest_SampledImageAccessTest,
+ ::testing::ValuesIn(std::vector<ImageAccessCase>{
+ // Multsample 2D
+ {"%float 2D 0 0 1 1 Unknown", "%99 = OpImageQuerySamples %int %im\n",
+ R"(@group(2) @binding(1) var x_20 : texture_multisampled_2d<f32>;)",
+ R"(let x_99 : i32 = textureNumSamples(x_20);)"} // namespace
- // Multisample 2D array
- // Not in WebGPU
- }));
+ // Multisample 2D array
+ // Not in WebGPU
+ }));
INSTANTIATE_TEST_SUITE_P(
// Translation must inject a type coersion from signed to unsigned.
@@ -3072,36 +2964,34 @@ INSTANTIATE_TEST_SUITE_P(
}));
struct ImageCoordsCase {
- // SPIR-V image type, excluding result ID and opcode
- std::string spirv_image_type_details;
- std::string spirv_image_access;
- std::string expected_error;
- std::vector<std::string> expected_expressions;
+ // SPIR-V image type, excluding result ID and opcode
+ std::string spirv_image_type_details;
+ std::string spirv_image_access;
+ std::string expected_error;
+ std::vector<std::string> expected_expressions;
};
inline std::ostream& operator<<(std::ostream& out, const ImageCoordsCase& c) {
- out << "ImageCoordsCase(" << c.spirv_image_type_details << "\n"
- << c.spirv_image_access << "\n"
- << "expected_error(" << c.expected_error << ")\n";
-
- for (auto e : c.expected_expressions) {
- out << e << ",";
- }
- out << ")" << std::endl;
- return out;
+ out << "ImageCoordsCase(" << c.spirv_image_type_details << "\n"
+ << c.spirv_image_access << "\n"
+ << "expected_error(" << c.expected_error << ")\n";
+
+ for (auto e : c.expected_expressions) {
+ out << e << ",";
+ }
+ out << ")" << std::endl;
+ return out;
}
using SpvParserHandleTest_ImageCoordsTest =
SpvParserTestBase<::testing::TestWithParam<ImageCoordsCase>>;
-TEST_P(SpvParserHandleTest_ImageCoordsTest,
- MakeCoordinateOperandsForImageAccess) {
- // Only declare the sampled image type, and the associated variable
- // if the requested image type is a sampled image type and not multisampled.
- const bool is_sampled_image_type = GetParam().spirv_image_type_details.find(
- "0 1 Unknown") != std::string::npos;
- const auto assembly =
- Preamble() + R"(
+TEST_P(SpvParserHandleTest_ImageCoordsTest, MakeCoordinateOperandsForImageAccess) {
+ // Only declare the sampled image type, and the associated variable
+ // if the requested image type is a sampled image type and not multisampled.
+ const bool is_sampled_image_type =
+ GetParam().spirv_image_type_details.find("0 1 Unknown") != std::string::npos;
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %100 "main"
OpExecutionMode %100 OriginUpperLeft
OpName %float_var "float_var"
@@ -3125,14 +3015,14 @@ TEST_P(SpvParserHandleTest_ImageCoordsTest,
OpDecorate %30 DescriptorSet 0
OpDecorate %30 Binding 1
)" + CommonBasicTypes() +
- R"(
+ R"(
%sampler = OpTypeSampler
%ptr_sampler = OpTypePointer UniformConstant %sampler
%im_ty = OpTypeImage )" +
- GetParam().spirv_image_type_details + R"(
+ GetParam().spirv_image_type_details + R"(
%ptr_im_ty = OpTypePointer UniformConstant %im_ty
)" + (is_sampled_image_type ? " %si_ty = OpTypeSampledImage %im_ty " : "") +
- R"(
+ R"(
%ptr_float = OpTypePointer Function %float
@@ -3163,66 +3053,58 @@ TEST_P(SpvParserHandleTest_ImageCoordsTest,
%sam = OpLoad %sampler %10
%im = OpLoad %im_ty %20
-)" +
- (is_sampled_image_type
- ? " %sampled_image = OpSampledImage %si_ty %im %sam "
- : "") +
- GetParam().spirv_image_access +
- R"(
+)" + (is_sampled_image_type ? " %sampled_image = OpSampledImage %si_ty %im %sam " : "") +
+ GetParam().spirv_image_access +
+ R"(
; Use an anchor for the cases when the image access doesn't have a result ID.
%1000 = OpCopyObject %uint %uint_0
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- if (!p->BuildAndParseInternalModule()) {
- EXPECT_THAT(p->error(), StartsWith(GetParam().expected_error)) << assembly;
- } else {
- EXPECT_TRUE(p->error().empty()) << p->error();
- auto fe = p->function_emitter(100);
- // We actually have to generate the module to cache expressions for the
- // result IDs, particularly the OpCopyObject
- fe.Emit();
-
- const spvtools::opt::Instruction* anchor = p->GetInstructionForTest(1000);
- ASSERT_NE(anchor, nullptr);
- const spvtools::opt::Instruction& image_access = *(anchor->PreviousNode());
-
- ast::ExpressionList result =
- fe.MakeCoordinateOperandsForImageAccess(image_access);
- if (GetParam().expected_error.empty()) {
- EXPECT_TRUE(fe.success()) << p->error();
- EXPECT_TRUE(p->error().empty());
- std::vector<std::string> result_strings;
- Program program = p->program();
- for (auto* expr : result) {
- ASSERT_NE(expr, nullptr);
- result_strings.push_back(test::ToString(program, expr));
- }
- EXPECT_THAT(result_strings,
- ::testing::ContainerEq(GetParam().expected_expressions));
+ auto p = parser(test::Assemble(assembly));
+ if (!p->BuildAndParseInternalModule()) {
+ EXPECT_THAT(p->error(), StartsWith(GetParam().expected_error)) << assembly;
} else {
- EXPECT_FALSE(fe.success());
- EXPECT_THAT(p->error(), Eq(GetParam().expected_error)) << assembly;
- EXPECT_TRUE(result.empty());
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ auto fe = p->function_emitter(100);
+ // We actually have to generate the module to cache expressions for the
+ // result IDs, particularly the OpCopyObject
+ fe.Emit();
+
+ const spvtools::opt::Instruction* anchor = p->GetInstructionForTest(1000);
+ ASSERT_NE(anchor, nullptr);
+ const spvtools::opt::Instruction& image_access = *(anchor->PreviousNode());
+
+ ast::ExpressionList result = fe.MakeCoordinateOperandsForImageAccess(image_access);
+ if (GetParam().expected_error.empty()) {
+ EXPECT_TRUE(fe.success()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ std::vector<std::string> result_strings;
+ Program program = p->program();
+ for (auto* expr : result) {
+ ASSERT_NE(expr, nullptr);
+ result_strings.push_back(test::ToString(program, expr));
+ }
+ EXPECT_THAT(result_strings, ::testing::ContainerEq(GetParam().expected_expressions));
+ } else {
+ EXPECT_FALSE(fe.success());
+ EXPECT_THAT(p->error(), Eq(GetParam().expected_error)) << assembly;
+ EXPECT_TRUE(result.empty());
+ }
+ }
+
+ const bool is_sample_level =
+ GetParam().spirv_image_access.find("ImageSampleExplicitLod") != std::string::npos;
+ const bool is_comparison_sample_level =
+ GetParam().spirv_image_access.find("ImageSampleDrefExplicitLod") != std::string::npos;
+ const bool is_1d = GetParam().spirv_image_type_details.find("1D") != std::string::npos;
+ if (is_sample_level && is_1d) {
+ p->SkipDumpingPending("crbug.com/tint/789");
+ }
+ if (is_comparison_sample_level) {
+ p->SkipDumpingPending("crbug.com/tint/425");
}
- }
-
- const bool is_sample_level =
- GetParam().spirv_image_access.find("ImageSampleExplicitLod") !=
- std::string::npos;
- const bool is_comparison_sample_level =
- GetParam().spirv_image_access.find("ImageSampleDrefExplicitLod") !=
- std::string::npos;
- const bool is_1d =
- GetParam().spirv_image_type_details.find("1D") != std::string::npos;
- if (is_sample_level && is_1d) {
- p->SkipDumpingPending("crbug.com/tint/789");
- }
- if (is_comparison_sample_level) {
- p->SkipDumpingPending("crbug.com/tint/425");
- }
}
INSTANTIATE_TEST_SUITE_P(Good_1D,
@@ -3385,34 +3267,33 @@ INSTANTIATE_TEST_SUITE_P(
{"vf12"}},
}));
-INSTANTIATE_TEST_SUITE_P(
- PreserveFloatCoords_Arrayed,
- // In SPIR-V, sampling and dref sampling operations use floating point
- // coordinates. Prove that we preserve floating point-ness of the
- // coordinate part, but convert the array index to signed integer. Test
- // across all such instructions.
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 1 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf123",
- "",
- {"vf123.xy", "i32(round(vf123.z))"}},
+INSTANTIATE_TEST_SUITE_P(PreserveFloatCoords_Arrayed,
+ // In SPIR-V, sampling and dref sampling operations use floating point
+ // coordinates. Prove that we preserve floating point-ness of the
+ // coordinate part, but convert the array index to signed integer. Test
+ // across all such instructions.
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf123",
+ "",
+ {"vf123.xy", "i32(round(vf123.z))"}},
- {"%float 2D 0 1 0 1 Unknown",
- "%result = OpImageSampleExplicitLod %v4float %sampled_image %vf123 "
- "Lod %f1",
- "",
- {"vf123.xy", "i32(round(vf123.z))"}},
- {"%float 2D 1 1 0 1 Unknown",
- "%result = OpImageSampleDrefImplicitLod %float %sampled_image "
- "%vf123 %depth",
- "",
- {"vf123.xy", "i32(round(vf123.z))"}},
- {"%float 2D 1 1 0 1 Unknown",
- "%result = OpImageSampleDrefExplicitLod %float %sampled_image "
- "%vf123 %depth Lod %float_0",
- "",
- {"vf123.xy", "i32(round(vf123.z))"}}}));
+ {"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageSampleExplicitLod %v4float %sampled_image %vf123 "
+ "Lod %f1",
+ "",
+ {"vf123.xy", "i32(round(vf123.z))"}},
+ {"%float 2D 1 1 0 1 Unknown",
+ "%result = OpImageSampleDrefImplicitLod %float %sampled_image "
+ "%vf123 %depth",
+ "",
+ {"vf123.xy", "i32(round(vf123.z))"}},
+ {"%float 2D 1 1 0 1 Unknown",
+ "%result = OpImageSampleDrefExplicitLod %float %sampled_image "
+ "%vf123 %depth Lod %float_0",
+ "",
+ {"vf123.xy", "i32(round(vf123.z))"}}}));
INSTANTIATE_TEST_SUITE_P(
PreserveIntCoords_NonArrayed,
@@ -3421,47 +3302,31 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_ImageCoordsTest,
::testing::ValuesIn(std::vector<ImageCoordsCase>{
// Scalar cases
- {"%float 1D 0 0 0 1 Unknown",
- "%result = OpImageFetch %v4float %im %i1",
- "",
- {"i1"}},
- {"%float 1D 0 0 0 2 R32f",
- "%result = OpImageRead %v4float %im %i1",
- "",
- {"i1"}},
+ {"%float 1D 0 0 0 1 Unknown", "%result = OpImageFetch %v4float %im %i1", "", {"i1"}},
+ {"%float 1D 0 0 0 2 R32f", "%result = OpImageRead %v4float %im %i1", "", {"i1"}},
{"%float 1D 0 0 0 2 R32f", "OpImageWrite %im %i1 %vf1234", "", {"i1"}},
// Vector cases
- {"%float 2D 0 0 0 1 Unknown",
- "%result = OpImageFetch %v4float %im %vi12",
- "",
- {"vi12"}},
- {"%float 2D 0 0 0 2 R32f",
- "%result = OpImageRead %v4float %im %vi12",
- "",
- {"vi12"}},
- {"%float 2D 0 0 0 2 R32f",
- "OpImageWrite %im %vi12 %vf1234",
- "",
- {"vi12"}}}));
+ {"%float 2D 0 0 0 1 Unknown", "%result = OpImageFetch %v4float %im %vi12", "", {"vi12"}},
+ {"%float 2D 0 0 0 2 R32f", "%result = OpImageRead %v4float %im %vi12", "", {"vi12"}},
+ {"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vi12 %vf1234", "", {"vi12"}}}));
-INSTANTIATE_TEST_SUITE_P(
- PreserveIntCoords_Arrayed,
- // In SPIR-V, image read, fetch, and write use integer coordinates.
- // Prove that we preserve signed integer coordinates.
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 1 0 1 Unknown",
- "%result = OpImageFetch %v4float %im %vi123",
- "",
- {"vi123.xy", "vi123.z"}},
- {"%float 2D 0 1 0 2 R32f",
- "%result = OpImageRead %v4float %im %vi123",
- "",
- {"vi123.xy", "vi123.z"}},
- {"%float 2D 0 1 0 2 R32f",
- "OpImageWrite %im %vi123 %vf1234",
- "",
- {"vi123.xy", "vi123.z"}}}));
+INSTANTIATE_TEST_SUITE_P(PreserveIntCoords_Arrayed,
+ // In SPIR-V, image read, fetch, and write use integer coordinates.
+ // Prove that we preserve signed integer coordinates.
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageFetch %v4float %im %vi123",
+ "",
+ {"vi123.xy", "vi123.z"}},
+ {"%float 2D 0 1 0 2 R32f",
+ "%result = OpImageRead %v4float %im %vi123",
+ "",
+ {"vi123.xy", "vi123.z"}},
+ {"%float 2D 0 1 0 2 R32f",
+ "OpImageWrite %im %vi123 %vf1234",
+ "",
+ {"vi123.xy", "vi123.z"}}}));
INSTANTIATE_TEST_SUITE_P(
ConvertUintCoords_NonArrayed,
@@ -3470,18 +3335,9 @@ INSTANTIATE_TEST_SUITE_P(
SpvParserHandleTest_ImageCoordsTest,
::testing::ValuesIn(std::vector<ImageCoordsCase>{
// Scalar cases
- {"%float 1D 0 0 0 1 Unknown",
- "%result = OpImageFetch %v4float %im %u1",
- "",
- {"i32(u1)"}},
- {"%float 1D 0 0 0 2 R32f",
- "%result = OpImageRead %v4float %im %u1",
- "",
- {"i32(u1)"}},
- {"%float 1D 0 0 0 2 R32f",
- "OpImageWrite %im %u1 %vf1234",
- "",
- {"i32(u1)"}},
+ {"%float 1D 0 0 0 1 Unknown", "%result = OpImageFetch %v4float %im %u1", "", {"i32(u1)"}},
+ {"%float 1D 0 0 0 2 R32f", "%result = OpImageRead %v4float %im %u1", "", {"i32(u1)"}},
+ {"%float 1D 0 0 0 2 R32f", "OpImageWrite %im %u1 %vf1234", "", {"i32(u1)"}},
// Vector cases
{"%float 2D 0 0 0 1 Unknown",
"%result = OpImageFetch %v4float %im %vu12",
@@ -3491,38 +3347,31 @@ INSTANTIATE_TEST_SUITE_P(
"%result = OpImageRead %v4float %im %vu12",
"",
{"vec2<i32>(vu12)"}},
- {"%float 2D 0 0 0 2 R32f",
- "OpImageWrite %im %vu12 %vf1234",
- "",
- {"vec2<i32>(vu12)"}}}));
+ {"%float 2D 0 0 0 2 R32f", "OpImageWrite %im %vu12 %vf1234", "", {"vec2<i32>(vu12)"}}}));
-INSTANTIATE_TEST_SUITE_P(
- ConvertUintCoords_Arrayed,
- // In SPIR-V, image read, fetch, and write use integer coordinates.
- // Prove that we convert unsigned integer coordinates to signed.
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 1 0 1 Unknown",
- "%result = OpImageFetch %v4float %im %vu123",
- "",
- {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}},
- {"%float 2D 0 1 0 2 R32f",
- "%result = OpImageRead %v4float %im %vu123",
- "",
- {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}},
- {"%float 2D 0 1 0 2 R32f",
- "OpImageWrite %im %vu123 %vf1234",
- "",
- {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}}}));
+INSTANTIATE_TEST_SUITE_P(ConvertUintCoords_Arrayed,
+ // In SPIR-V, image read, fetch, and write use integer coordinates.
+ // Prove that we convert unsigned integer coordinates to signed.
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 1 0 1 Unknown",
+ "%result = OpImageFetch %v4float %im %vu123",
+ "",
+ {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}},
+ {"%float 2D 0 1 0 2 R32f",
+ "%result = OpImageRead %v4float %im %vu123",
+ "",
+ {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}},
+ {"%float 2D 0 1 0 2 R32f",
+ "OpImageWrite %im %vu123 %vf1234",
+ "",
+ {"vec2<i32>(vu123.xy)", "i32(vu123.z)"}}}));
INSTANTIATE_TEST_SUITE_P(
BadInstructions,
SpvParserHandleTest_ImageCoordsTest,
::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 1D 0 0 0 1 Unknown",
- "OpNop",
- "not an image access instruction: OpNop",
- {}},
+ {"%float 1D 0 0 0 1 Unknown", "OpNop", "not an image access instruction: OpNop", {}},
{"%float 1D 0 0 0 1 Unknown",
"%50 = OpCopyObject %float %float_1",
"internal error: couldn't find image for "
@@ -3537,133 +3386,129 @@ INSTANTIATE_TEST_SUITE_P(
// won't assemble, so we skip it.
}));
-INSTANTIATE_TEST_SUITE_P(
- Bad_Coordinate,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 1D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod "
- // bad type for coordinate: not a number
- "%v4float %sampled_image %float_var",
- "bad or unsupported coordinate type for image access: %73 = "
- "OpImageSampleImplicitLod %42 %72 %1",
- {}},
- {"%float 2D 0 0 0 1 Unknown", // 2D
- "%result = OpImageSampleImplicitLod "
- // 1 component, but need 2
- "%v4float %sampled_image %f1",
- "image access required 2 coordinate components, but only 1 provided, "
- "in: %73 = OpImageSampleImplicitLod %42 %72 %12",
- {}},
- {"%float 2D 0 1 0 1 Unknown", // 2DArray
- "%result = OpImageSampleImplicitLod "
- // 2 component, but need 3
- "%v4float %sampled_image %vf12",
- "image access required 3 coordinate components, but only 2 provided, "
- "in: %73 = OpImageSampleImplicitLod %42 %72 %13",
- {}},
- {"%float 3D 0 0 0 1 Unknown", // 3D
- "%result = OpImageSampleImplicitLod "
- // 2 components, but need 3
- "%v4float %sampled_image %vf12",
- "image access required 3 coordinate components, but only 2 provided, "
- "in: %73 = OpImageSampleImplicitLod %42 %72 %13",
- {}},
- }));
-
-INSTANTIATE_TEST_SUITE_P(
- SampleNonFloatTexture_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- // ImageSampleImplicitLod
- {"%uint 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4uint %sampled_image %vf12",
- "sampled image must have float component type",
- {}},
- {"%int 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4int %sampled_image %vf12",
- "sampled image must have float component type",
- {}},
- // ImageSampleExplicitLod
- {"%uint 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleExplicitLod %v4uint %sampled_image %vf12 "
- "Lod %f1",
- "sampled image must have float component type",
- {}},
- {"%int 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleExplicitLod %v4int %sampled_image %vf12 "
- "Lod %f1",
- "sampled image must have float component type",
- {}},
- // ImageSampleDrefImplicitLod
- {"%uint 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleDrefImplicitLod %uint %sampled_image %vf12 "
- "%f1",
- "sampled image must have float component type",
- {}},
- {"%int 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleDrefImplicitLod %int %sampled_image %vf12 "
- "%f1",
- "sampled image must have float component type",
- {}},
- // ImageSampleDrefExplicitLod
- {"%uint 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleDrefExplicitLod %uint %sampled_image %vf12 "
- "%f1 Lod %float_0",
- "sampled image must have float component type",
- {}},
- {"%int 2D 0 0 0 1 Unknown",
- "%result = OpImageSampleDrefExplicitLod %int %sampled_image %vf12 "
- "%f1 Lod %float_0",
- "sampled image must have float component type",
- {}}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ConstOffset_BadInstruction_Errors,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- // ImageFetch
- {"%uint 2D 0 0 0 1 Unknown",
- "%result = OpImageFetch %v4uint %sampled_image %vf12 ConstOffset "
- "%the_vu12",
- "ConstOffset is only permitted for sampling, gather, or "
- "depth-reference gather operations: ",
- {}},
- // ImageRead
- {"%uint 2D 0 0 0 2 Rgba32ui",
- "%result = OpImageRead %v4uint %im %vu12 ConstOffset %the_vu12",
- "ConstOffset is only permitted for sampling, gather, or "
- "depth-reference gather operations: ",
- {}},
- // ImageWrite
- {"%uint 2D 0 0 0 2 Rgba32ui",
- "OpImageWrite %im %vu12 %vu1234 ConstOffset %the_vu12",
- "ConstOffset is only permitted for sampling, gather, or "
- "depth-reference gather operations: ",
- {}}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ConstOffset_BadDim_Errors,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- // 1D
- {"%uint 1D 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
- "ConstOffset %the_vu12",
- "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
- {}},
- // Cube
- {"%uint Cube 0 0 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
- "ConstOffset %the_vu12",
- "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
- {}},
- // Cube Array
- {"%uint Cube 0 1 0 1 Unknown",
- "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
- "ConstOffset %the_vu12",
- "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
- {}}}));
+INSTANTIATE_TEST_SUITE_P(Bad_Coordinate,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 1D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod "
+ // bad type for coordinate: not a number
+ "%v4float %sampled_image %float_var",
+ "bad or unsupported coordinate type for image access: %73 = "
+ "OpImageSampleImplicitLod %42 %72 %1",
+ {}},
+ {"%float 2D 0 0 0 1 Unknown", // 2D
+ "%result = OpImageSampleImplicitLod "
+ // 1 component, but need 2
+ "%v4float %sampled_image %f1",
+ "image access required 2 coordinate components, but only 1 provided, "
+ "in: %73 = OpImageSampleImplicitLod %42 %72 %12",
+ {}},
+ {"%float 2D 0 1 0 1 Unknown", // 2DArray
+ "%result = OpImageSampleImplicitLod "
+ // 2 component, but need 3
+ "%v4float %sampled_image %vf12",
+ "image access required 3 coordinate components, but only 2 provided, "
+ "in: %73 = OpImageSampleImplicitLod %42 %72 %13",
+ {}},
+ {"%float 3D 0 0 0 1 Unknown", // 3D
+ "%result = OpImageSampleImplicitLod "
+ // 2 components, but need 3
+ "%v4float %sampled_image %vf12",
+ "image access required 3 coordinate components, but only 2 provided, "
+ "in: %73 = OpImageSampleImplicitLod %42 %72 %13",
+ {}},
+ }));
+
+INSTANTIATE_TEST_SUITE_P(SampleNonFloatTexture_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ // ImageSampleImplicitLod
+ {"%uint 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4uint %sampled_image %vf12",
+ "sampled image must have float component type",
+ {}},
+ {"%int 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4int %sampled_image %vf12",
+ "sampled image must have float component type",
+ {}},
+ // ImageSampleExplicitLod
+ {"%uint 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleExplicitLod %v4uint %sampled_image %vf12 "
+ "Lod %f1",
+ "sampled image must have float component type",
+ {}},
+ {"%int 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleExplicitLod %v4int %sampled_image %vf12 "
+ "Lod %f1",
+ "sampled image must have float component type",
+ {}},
+ // ImageSampleDrefImplicitLod
+ {"%uint 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleDrefImplicitLod %uint %sampled_image %vf12 "
+ "%f1",
+ "sampled image must have float component type",
+ {}},
+ {"%int 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleDrefImplicitLod %int %sampled_image %vf12 "
+ "%f1",
+ "sampled image must have float component type",
+ {}},
+ // ImageSampleDrefExplicitLod
+ {"%uint 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleDrefExplicitLod %uint %sampled_image %vf12 "
+ "%f1 Lod %float_0",
+ "sampled image must have float component type",
+ {}},
+ {"%int 2D 0 0 0 1 Unknown",
+ "%result = OpImageSampleDrefExplicitLod %int %sampled_image %vf12 "
+ "%f1 Lod %float_0",
+ "sampled image must have float component type",
+ {}}}));
+
+INSTANTIATE_TEST_SUITE_P(ConstOffset_BadInstruction_Errors,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ // ImageFetch
+ {"%uint 2D 0 0 0 1 Unknown",
+ "%result = OpImageFetch %v4uint %sampled_image %vf12 ConstOffset "
+ "%the_vu12",
+ "ConstOffset is only permitted for sampling, gather, or "
+ "depth-reference gather operations: ",
+ {}},
+ // ImageRead
+ {"%uint 2D 0 0 0 2 Rgba32ui",
+ "%result = OpImageRead %v4uint %im %vu12 ConstOffset %the_vu12",
+ "ConstOffset is only permitted for sampling, gather, or "
+ "depth-reference gather operations: ",
+ {}},
+ // ImageWrite
+ {"%uint 2D 0 0 0 2 Rgba32ui",
+ "OpImageWrite %im %vu12 %vu1234 ConstOffset %the_vu12",
+ "ConstOffset is only permitted for sampling, gather, or "
+ "depth-reference gather operations: ",
+ {}}}));
+
+INSTANTIATE_TEST_SUITE_P(ConstOffset_BadDim_Errors,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ // 1D
+ {"%uint 1D 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
+ "ConstOffset %the_vu12",
+ "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
+ {}},
+ // Cube
+ {"%uint Cube 0 0 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
+ "ConstOffset %the_vu12",
+ "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
+ {}},
+ // Cube Array
+ {"%uint Cube 0 1 0 1 Unknown",
+ "%result = OpImageSampleImplicitLod %v4float %sampled_image %vf1234 "
+ "ConstOffset %the_vu12",
+ "ConstOffset is only permitted for 2D, 2D Arrayed, and 3D textures: ",
+ {}}}));
INSTANTIATE_TEST_SUITE_P(
ImageSampleDref_Bias_IsError,
@@ -3764,7 +3609,7 @@ INSTANTIATE_TEST_SUITE_P(
{}}}));
TEST_F(SpvParserHandleTest, CombinedImageSampler_IsError) {
- const auto assembly = Preamble() + R"(
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %100 "main"
OpExecutionMode %100 OriginUpperLeft
@@ -3783,72 +3628,65 @@ TEST_F(SpvParserHandleTest, CombinedImageSampler_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_THAT(p->error(),
- HasSubstr("WGSL does not support combined image-samplers: "));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_THAT(p->error(), HasSubstr("WGSL does not support combined image-samplers: "));
}
-INSTANTIATE_TEST_SUITE_P(
- ImageQueryLod_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 0 0 1 Unknown",
- "%result = OpImageQueryLod %v2int %sampled_image %vf12",
- "WGSL does not support querying the level of detail of an image: ",
- {}}}));
+INSTANTIATE_TEST_SUITE_P(ImageQueryLod_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageQueryLod %v2int %sampled_image %vf12",
+ "WGSL does not support querying the level of detail of an image: ",
+ {}}}));
-INSTANTIATE_TEST_SUITE_P(
- ImageGather_Bias_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 0 0 1 Unknown",
- "%result = OpImageGather %v4float %sampled_image %vf12 %int_1 "
- "Bias %float_null",
- "WGSL does not support image gather with level-of-detail bias: ",
- {}}}));
+INSTANTIATE_TEST_SUITE_P(ImageGather_Bias_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageGather %v4float %sampled_image %vf12 %int_1 "
+ "Bias %float_null",
+ "WGSL does not support image gather with level-of-detail bias: ",
+ {}}}));
-INSTANTIATE_TEST_SUITE_P(
- ImageDrefGather_Bias_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 1 0 0 1 Unknown",
- "%result = OpImageDrefGather %v4float %sampled_image %vf12 %depth "
- "Bias %float_null",
- "WGSL does not support image gather with level-of-detail bias: ",
- {}}}));
+INSTANTIATE_TEST_SUITE_P(ImageDrefGather_Bias_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather %v4float %sampled_image %vf12 %depth "
+ "Bias %float_null",
+ "WGSL does not support image gather with level-of-detail bias: ",
+ {}}}));
// Note: Vulkan SPIR-V ImageGather and ImageDrefGather do not allow explicit
// Lod. The SPIR-V validator should reject those cases already.
-INSTANTIATE_TEST_SUITE_P(
- ImageGather_Grad_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 0 0 0 1 Unknown",
- "%result = OpImageGather %v4float %sampled_image %vf12 %int_1 "
- "Grad %vf12 %vf12",
- "WGSL does not support image gather with explicit gradient: ",
- {}}}));
-
-INSTANTIATE_TEST_SUITE_P(
- ImageDrefGather_Grad_IsError,
- SpvParserHandleTest_ImageCoordsTest,
- ::testing::ValuesIn(std::vector<ImageCoordsCase>{
- {"%float 2D 1 0 0 1 Unknown",
- "%result = OpImageDrefGather %v4float %sampled_image %vf12 %depth "
- "Grad %vf12 %vf12",
- "WGSL does not support image gather with explicit gradient: ",
- {}}}));
+INSTANTIATE_TEST_SUITE_P(ImageGather_Grad_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 0 0 0 1 Unknown",
+ "%result = OpImageGather %v4float %sampled_image %vf12 %int_1 "
+ "Grad %vf12 %vf12",
+ "WGSL does not support image gather with explicit gradient: ",
+ {}}}));
-TEST_F(SpvParserHandleTest,
- NeverGenerateConstDeclForHandle_UseVariableDirectly) {
- // An ad-hoc test to prove we never had the issue
- // feared in crbug.com/tint/265.
- // Never create a const-declaration for a pointer to
- // a texture or sampler. Code generation always
- // traces back to the memory object declaration.
- const auto assembly = Preamble() + R"(
+INSTANTIATE_TEST_SUITE_P(ImageDrefGather_Grad_IsError,
+ SpvParserHandleTest_ImageCoordsTest,
+ ::testing::ValuesIn(std::vector<ImageCoordsCase>{
+ {"%float 2D 1 0 0 1 Unknown",
+ "%result = OpImageDrefGather %v4float %sampled_image %vf12 %depth "
+ "Grad %vf12 %vf12",
+ "WGSL does not support image gather with explicit gradient: ",
+ {}}}));
+
+TEST_F(SpvParserHandleTest, NeverGenerateConstDeclForHandle_UseVariableDirectly) {
+ // An ad-hoc test to prove we never had the issue
+ // feared in crbug.com/tint/265.
+ // Never create a const-declaration for a pointer to
+ // a texture or sampler. Code generation always
+ // traces back to the memory object declaration.
+ const auto assembly = Preamble() + R"(
OpEntryPoint Fragment %100 "main"
OpExecutionMode %100 OriginUpperLeft
@@ -3899,20 +3737,20 @@ TEST_F(SpvParserHandleTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << assembly;
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- EXPECT_TRUE(p->error().empty()) << p->error();
- auto ast_body = fe.ast_body();
- const auto got = test::ToString(p->program(), ast_body);
- auto* expect = R"(var var_1 : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << assembly;
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ auto ast_body = fe.ast_body();
+ const auto got = test::ToString(p->program(), ast_body);
+ auto* expect = R"(var var_1 : vec4<f32>;
let x_22 : vec4<f32> = textureSample(x_2, x_3, vec2<f32>());
let x_26 : vec4<f32> = textureSample(x_2, x_3, vec2<f32>());
var_1 = (x_22 + x_26);
return;
)";
- ASSERT_EQ(expect, got);
+ ASSERT_EQ(expect, got);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_import_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_import_test.cc
index 852c08ad06c..b25db07ffd6 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_import_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_import_test.cc
@@ -28,44 +28,43 @@ using ::testing::UnorderedElementsAre;
using SpvParserImportTest = SpvParserTest;
TEST_F(SpvParserImportTest, Import_NoImport) {
- auto p = parser(test::Assemble("%1 = OpTypeVoid"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto program_ast = test::ToString(p->program());
- EXPECT_THAT(program_ast, Not(HasSubstr("Import")));
+ auto p = parser(test::Assemble("%1 = OpTypeVoid"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto program_ast = test::ToString(p->program());
+ EXPECT_THAT(program_ast, Not(HasSubstr("Import")));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserImportTest, Import_ImportGlslStd450) {
- auto p = parser(test::Assemble(R"(%1 = OpExtInstImport "GLSL.std.450")"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- EXPECT_THAT(p->glsl_std_450_imports(), ElementsAre(1));
+ auto p = parser(test::Assemble(R"(%1 = OpExtInstImport "GLSL.std.450")"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ EXPECT_THAT(p->glsl_std_450_imports(), ElementsAre(1));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserImportTest, Import_NonSemantic_IgnoredImport) {
- auto p = parser(test::Assemble(
- R"(%40 = OpExtInstImport "NonSemantic.ClspvReflection.1")"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(R"(%40 = OpExtInstImport "NonSemantic.ClspvReflection.1")"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserImportTest, Import_NonSemantic_IgnoredExtInsts) {
- // This is the clspv-compiled output of this OpenCL C:
- // kernel void foo(global int*A) { A=A; }
- // It emits NonSemantic.ClspvReflection.1 extended instructions.
- // But *tweaked*:
- // - to remove gl_WorkgroupSize
- // - to add LocalSize execution mode
- // - to move one of the ExtInsts into the globals-and-constants
- // section
- // - to move one of the ExtInsts into the function body.
- auto p = parser(test::Assemble(R"(
+ // This is the clspv-compiled output of this OpenCL C:
+ // kernel void foo(global int*A) { A=A; }
+ // It emits NonSemantic.ClspvReflection.1 extended instructions.
+ // But *tweaked*:
+ // - to remove gl_WorkgroupSize
+ // - to add LocalSize execution mode
+ // - to move one of the ExtInsts into the globals-and-constants
+ // section
+ // - to move one of the ExtInsts into the function body.
+ auto p = parser(test::Assemble(R"(
OpCapability Shader
OpExtension "SPV_KHR_storage_buffer_storage_class"
OpExtension "SPV_KHR_non_semantic_info"
@@ -110,11 +109,10 @@ TEST_F(SpvParserImportTest, Import_NonSemantic_IgnoredExtInsts) {
%25 = OpExtInst %void %20 ArgumentStorageBuffer %22 %uint_0 %uint_0 %uint_0 %24
%28 = OpExtInst %void %20 SpecConstantWorkgroupSize %uint_0 %uint_1 %uint_2
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
- p->SkipDumpingPending(
- "crbug.com/tint/1041 track access mode in spirv-reader parser type");
+ p->SkipDumpingPending("crbug.com/tint/1041 track access mode in spirv-reader parser type");
}
// TODO(dneto): We don't currently support other kinds of extended instruction
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_module_var_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_module_var_test.cc
index d50959dd52e..e4ac8cbb574 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_module_var_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_module_var_test.cc
@@ -29,21 +29,21 @@ using ::testing::HasSubstr;
using ::testing::Not;
std::string Preamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
)";
}
std::string FragMain() {
- return R"(
+ return R"(
OpEntryPoint Fragment %main "main"
OpExecutionMode %main OriginUpperLeft
)";
}
std::string MainBody() {
- return R"(
+ return R"(
%main = OpFunction %void None %voidfn
%main_entry = OpLabel
OpReturn
@@ -52,7 +52,7 @@ std::string MainBody() {
}
std::string CommonCapabilities() {
- return R"(
+ return R"(
OpCapability Shader
OpCapability SampleRateShading
OpMemoryModel Logical Simple
@@ -60,7 +60,7 @@ std::string CommonCapabilities() {
}
std::string CommonTypes() {
- return R"(
+ return R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -95,14 +95,14 @@ std::string CommonTypes() {
}
std::string StructTypes() {
- return R"(
+ return R"(
%strct = OpTypeStruct %uint %float %arr2uint
)";
}
// Returns layout annotations for types in StructTypes()
std::string CommonLayout() {
- return R"(
+ return R"(
OpMemberDecorate %strct 0 Offset 0
OpMemberDecorate %strct 1 Offset 4
OpMemberDecorate %strct 2 Offset 8
@@ -111,50 +111,49 @@ std::string CommonLayout() {
}
TEST_F(SpvModuleScopeVarParserTest, NoVar) {
- auto assembly = Preamble() + FragMain() + CommonTypes() + MainBody();
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_ast = test::ToString(p->program());
- EXPECT_THAT(module_ast, Not(HasSubstr("Variable"))) << module_ast;
+ auto assembly = Preamble() + FragMain() + CommonTypes() + MainBody();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_ast = test::ToString(p->program());
+ EXPECT_THAT(module_ast, Not(HasSubstr("Variable"))) << module_ast;
}
TEST_F(SpvModuleScopeVarParserTest, BadStorageClass_NotAWebGPUStorageClass) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
%float = OpTypeFloat 32
%ptr = OpTypePointer CrossWorkgroup %float
%52 = OpVariable %ptr CrossWorkgroup
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- // Normally we should run ParserImpl::RegisterTypes before emitting
- // variables. But defensive coding in EmitModuleScopeVariables lets
- // us catch this error.
- EXPECT_FALSE(p->EmitModuleScopeVariables()) << p->error();
- EXPECT_THAT(p->error(), HasSubstr("unknown SPIR-V storage class: 5"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ // Normally we should run ParserImpl::RegisterTypes before emitting
+ // variables. But defensive coding in EmitModuleScopeVariables lets
+ // us catch this error.
+ EXPECT_FALSE(p->EmitModuleScopeVariables()) << p->error();
+ EXPECT_THAT(p->error(), HasSubstr("unknown SPIR-V storage class: 5"));
}
TEST_F(SpvModuleScopeVarParserTest, BadStorageClass_Function) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
%float = OpTypeFloat 32
%ptr = OpTypePointer Function %float
%52 = OpVariable %ptr Function
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- // Normally we should run ParserImpl::RegisterTypes before emitting
- // variables. But defensive coding in EmitModuleScopeVariables lets
- // us catch this error.
- EXPECT_FALSE(p->EmitModuleScopeVariables()) << p->error();
- EXPECT_THAT(p->error(),
- HasSubstr("invalid SPIR-V storage class 7 for module scope "
- "variable: %52 = OpVariable %3 Function"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ // Normally we should run ParserImpl::RegisterTypes before emitting
+ // variables. But defensive coding in EmitModuleScopeVariables lets
+ // us catch this error.
+ EXPECT_FALSE(p->EmitModuleScopeVariables()) << p->error();
+ EXPECT_THAT(p->error(), HasSubstr("invalid SPIR-V storage class 7 for module scope "
+ "variable: %52 = OpVariable %3 Function"));
}
TEST_F(SpvModuleScopeVarParserTest, BadPointerType) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
%float = OpTypeFloat 32
%fn_ty = OpTypeFunction %float
%3 = OpTypePointer Private %fn_ty
@@ -162,17 +161,17 @@ TEST_F(SpvModuleScopeVarParserTest, BadPointerType) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- // Normally we should run ParserImpl::RegisterTypes before emitting
- // variables. But defensive coding in EmitModuleScopeVariables lets
- // us catch this error.
- EXPECT_FALSE(p->EmitModuleScopeVariables());
- EXPECT_THAT(p->error(), HasSubstr("internal error: failed to register Tint "
- "AST type for SPIR-V type with ID: 3"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ // Normally we should run ParserImpl::RegisterTypes before emitting
+ // variables. But defensive coding in EmitModuleScopeVariables lets
+ // us catch this error.
+ EXPECT_FALSE(p->EmitModuleScopeVariables());
+ EXPECT_THAT(p->error(), HasSubstr("internal error: failed to register Tint "
+ "AST type for SPIR-V type with ID: 3"));
}
TEST_F(SpvModuleScopeVarParserTest, NonPointerType) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
%float = OpTypeFloat 32
%5 = OpTypeFunction %float
%3 = OpTypePointer Private %5
@@ -180,15 +179,13 @@ TEST_F(SpvModuleScopeVarParserTest, NonPointerType) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildInternalModule());
- EXPECT_FALSE(p->RegisterTypes());
- EXPECT_THAT(
- p->error(),
- HasSubstr("SPIR-V pointer type with ID 3 has invalid pointee type 5"));
+ EXPECT_TRUE(p->BuildInternalModule());
+ EXPECT_FALSE(p->RegisterTypes());
+ EXPECT_THAT(p->error(), HasSubstr("SPIR-V pointer type with ID 3 has invalid pointee type 5"));
}
TEST_F(SpvModuleScopeVarParserTest, AnonWorkgroupVar) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
%float = OpTypeFloat 32
%ptr = OpTypePointer Workgroup %float
%52 = OpVariable %ptr Workgroup
@@ -196,14 +193,14 @@ TEST_F(SpvModuleScopeVarParserTest, AnonWorkgroupVar) {
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("var<workgroup> x_52 : f32;"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<workgroup> x_52 : f32;"));
}
TEST_F(SpvModuleScopeVarParserTest, NamedWorkgroupVar) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %52 "the_counter"
%float = OpTypeFloat 32
%ptr = OpTypePointer Workgroup %float
@@ -212,14 +209,14 @@ TEST_F(SpvModuleScopeVarParserTest, NamedWorkgroupVar) {
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("var<workgroup> the_counter : f32;"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<workgroup> the_counter : f32;"));
}
TEST_F(SpvModuleScopeVarParserTest, PrivateVar) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %52 "my_own_private_idaho"
%float = OpTypeFloat 32
%ptr = OpTypePointer Private %float
@@ -228,19 +225,18 @@ TEST_F(SpvModuleScopeVarParserTest, PrivateVar) {
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> my_own_private_idaho : f32;"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> my_own_private_idaho : f32;"));
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinVertexIndex) {
- // This is the simple case for the vertex_index builtin,
- // where the SPIR-V uses the same store type as in WGSL.
- // See later for tests where the SPIR-V store type is signed
- // integer, as in GLSL.
- auto p = parser(test::Assemble(Preamble() + R"(
+ // This is the simple case for the vertex_index builtin,
+ // where the SPIR-V uses the same store type as in WGSL.
+ // See later for tests where the SPIR-V store type is signed
+ // integer, as in GLSL.
+ auto p = parser(test::Assemble(Preamble() + R"(
OpEntryPoint Vertex %main "main" %52 %position
OpName %position "position"
OpDecorate %position BuiltIn Position
@@ -256,14 +252,14 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinVertexIndex) {
%position = OpVariable %posty Output
)" + MainBody()));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("var<private> x_52 : u32;"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_52 : u32;"));
}
std::string PerVertexPreamble() {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1
@@ -287,10 +283,9 @@ std::string PerVertexPreamble() {
)";
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_StoreWholeStruct_NotSupported) {
- // Glslang does not generate this code pattern.
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StoreWholeStruct_NotSupported) {
+ // Glslang does not generate this code pattern.
+ const std::string assembly = PerVertexPreamble() + R"(
%nil = OpConstantNull %10 ; the whole struct
%main = OpFunction %void None %voidfn
@@ -299,48 +294,45 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_THAT(p->error(), Eq("storing to the whole per-vertex structure is not "
- "supported: OpStore %1 %13"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_THAT(p->error(), Eq("storing to the whole per-vertex structure is not "
+ "supported: OpStore %1 %13"))
+ << p->error();
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_IntermediateWholeStruct_NotSupported) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_IntermediateWholeStruct_NotSupported) {
+ const std::string assembly = PerVertexPreamble() + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%1000 = OpUndef %10
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
- EXPECT_THAT(p->error(), Eq("operations producing a per-vertex structure are "
- "not supported: %1000 = OpUndef %10"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << assembly;
+ EXPECT_THAT(p->error(), Eq("operations producing a per-vertex structure are "
+ "not supported: %1000 = OpUndef %10"))
+ << p->error();
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_IntermediatePtrWholeStruct_NotSupported) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_IntermediatePtrWholeStruct_NotSupported) {
+ const std::string assembly = PerVertexPreamble() + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%1000 = OpCopyObject %11 %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- Eq("operations producing a pointer to a per-vertex structure are "
- "not supported: %1000 = OpCopyObject %11 %1"))
- << p->error();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), Eq("operations producing a pointer to a per-vertex structure are "
+ "not supported: %1000 = OpCopyObject %11 %1"))
+ << p->error();
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StorePosition) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_v4float = OpTypePointer Output %12
%nil = OpConstantNull %12
@@ -351,17 +343,15 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StorePosition) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("gl_Position = vec4<f32>();"))
- << module_str;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("gl_Position = vec4<f32>();")) << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_StorePosition_PerVertexStructOutOfOrderDecl) {
- const std::string assembly = R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StorePosition_PerVertexStructOutOfOrderDecl) {
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1
@@ -395,17 +385,15 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("gl_Position = vec4<f32>();"))
- << module_str;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("gl_Position = vec4<f32>();")) << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_StorePositionMember_OneAccessChain) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StorePositionMember_OneAccessChain) {
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
@@ -416,17 +404,16 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("gl_Position.y = 0.0;")) << module_str;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("gl_Position.y = 0.0f;")) << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_StorePositionMember_TwoAccessChain) {
- // The algorithm is smart enough to collapse it down.
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_StorePositionMember_TwoAccessChain) {
+ // The algorithm is smart enough to collapse it down.
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %12
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
@@ -439,15 +426,15 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("gl_Position.y = 0.0;")) << module_str;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("gl_Position.y = 0.0f;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Write1_IsErased) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %float
%one = OpConstant %float 1.0
@@ -458,11 +445,11 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Write1_IsErased) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> gl_Position : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> gl_Position : vec4<f32>;
fn main_1() {
return;
@@ -473,7 +460,7 @@ struct main_out {
gl_Position : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(gl_Position);
@@ -482,7 +469,7 @@ fn main() -> main_out {
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_WriteNon1_IsError) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %float
%999 = OpConstant %float 2.0
@@ -493,15 +480,14 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_WriteNon1_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- HasSubstr("cannot store a value other than constant 1.0 to "
- "PointSize builtin: OpStore %100 %999"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), HasSubstr("cannot store a value other than constant 1.0 to "
+ "PointSize builtin: OpStore %100 %999"));
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_ReadReplaced) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %float
%nil = OpConstantNull %12
%private_ptr = OpTypePointer Private %float
@@ -515,16 +501,16 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_ReadReplaced) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> x_900 : f32;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> x_900 : f32;
var<private> gl_Position : vec4<f32>;
fn main_1() {
- x_900 = 1.0;
+ x_900 = 1.0f;
return;
}
@@ -533,7 +519,7 @@ struct main_out {
gl_Position : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(gl_Position);
@@ -541,9 +527,8 @@ fn main() -> main_out {
)") << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPointSize_WriteViaCopyObjectPriorAccess_Unsupported) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_WriteViaCopyObjectPriorAccess_Unsupported) {
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %float
%nil = OpConstantNull %12
@@ -555,17 +540,15 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_THAT(
- p->error(),
- HasSubstr("operations producing a pointer to a per-vertex structure are "
- "not supported: %20 = OpCopyObject %11 %1"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_THAT(p->error(),
+ HasSubstr("operations producing a pointer to a per-vertex structure are "
+ "not supported: %20 = OpCopyObject %11 %1"));
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPointSize_WriteViaCopyObjectPostAccessChainErased) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_WriteViaCopyObjectPostAccessChainErased) {
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr = OpTypePointer Output %float
%one = OpConstant %float 1.0
@@ -577,11 +560,11 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> gl_Position : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> gl_Position : vec4<f32>;
fn main_1() {
return;
@@ -592,7 +575,7 @@ struct main_out {
gl_Position : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(gl_Position);
@@ -601,15 +584,15 @@ fn main() -> main_out {
}
std::string LoosePointSizePreamble(std::string stage = "Vertex") {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint )" +
- stage + R"( %500 "main" %1
+ stage + R"( %500 "main" %1
)" + (stage == "Vertex" ? " %2 " : "") +
- +(stage == "Fragment" ? "OpExecutionMode %500 OriginUpperLeft" : "") +
- +(stage == "Vertex" ? " OpDecorate %2 BuiltIn Position " : "") +
- R"(
+ +(stage == "Fragment" ? "OpExecutionMode %500 OriginUpperLeft" : "") +
+ +(stage == "Vertex" ? " OpDecorate %2 BuiltIn Position " : "") +
+ R"(
OpDecorate %1 BuiltIn PointSize
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -626,7 +609,7 @@ std::string LoosePointSizePreamble(std::string stage = "Vertex") {
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_Write1_IsErased) {
- const std::string assembly = LoosePointSizePreamble() + R"(
+ const std::string assembly = LoosePointSizePreamble() + R"(
%ptr = OpTypePointer Output %float
%one = OpConstant %float 1.0
@@ -636,11 +619,11 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_Write1_IsErased) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
fn main_1() {
return;
@@ -651,7 +634,7 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_2);
@@ -660,7 +643,7 @@ fn main() -> main_out {
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_WriteNon1_IsError) {
- const std::string assembly = LoosePointSizePreamble() + R"(
+ const std::string assembly = LoosePointSizePreamble() + R"(
%ptr = OpTypePointer Output %float
%999 = OpConstant %float 2.0
@@ -670,16 +653,14 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_WriteNon1_IsError) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- HasSubstr("cannot store a value other than constant 1.0 to "
- "PointSize builtin: OpStore %1 %999"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), HasSubstr("cannot store a value other than constant 1.0 to "
+ "PointSize builtin: OpStore %1 %999"));
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPointSize_Loose_ReadReplaced_Vertex) {
- const std::string assembly = LoosePointSizePreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_ReadReplaced_Vertex) {
+ const std::string assembly = LoosePointSizePreamble() + R"(
%ptr = OpTypePointer Private %float
%900 = OpVariable %ptr Private
@@ -690,17 +671,17 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
var<private> x_900 : f32;
fn main_1() {
- x_900 = 1.0;
+ x_900 = 1.0f;
return;
}
@@ -709,7 +690,7 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_2);
@@ -717,9 +698,8 @@ fn main() -> main_out {
)") << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPointSize_Loose_ReadReplaced_Fragment) {
- const std::string assembly = LoosePointSizePreamble("Fragment") + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_ReadReplaced_Fragment) {
+ const std::string assembly = LoosePointSizePreamble("Fragment") + R"(
%ptr = OpTypePointer Private %float
%900 = OpVariable %ptr Private
@@ -730,18 +710,17 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- // This example is invalid because you PointSize is not valid in Vulkan
- // Fragment shaders.
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(), HasSubstr("VUID-PointSize-PointSize-04314"));
+ // This example is invalid because you PointSize is not valid in Vulkan
+ // Fragment shaders.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("VUID-PointSize-PointSize-04314"));
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPointSize_Loose_WriteViaCopyObjectPriorAccess_Erased) {
- const std::string assembly = LoosePointSizePreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPointSize_Loose_WriteViaCopyObjectPriorAccess_Erased) {
+ const std::string assembly = LoosePointSizePreamble() + R"(
%one = OpConstant %float 1.0
%500 = OpFunction %void None %voidfn
@@ -752,11 +731,11 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
fn main_1() {
return;
@@ -767,7 +746,7 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_2);
@@ -777,7 +756,7 @@ fn main() -> main_out {
TEST_F(SpvModuleScopeVarParserTest,
BuiltinPointSize_Loose_WriteViaCopyObjectPostAccessChainErased) {
- const std::string assembly = LoosePointSizePreamble() + R"(
+ const std::string assembly = LoosePointSizePreamble() + R"(
%one = OpConstant %float 1.0
%500 = OpFunction %void None %voidfn
@@ -788,11 +767,11 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_TRUE(p->error().empty()) << p->error();
- const auto module_str = test::ToString(p->program());
- EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_TRUE(p->error().empty()) << p->error();
+ const auto module_str = test::ToString(p->program());
+ EXPECT_EQ(module_str, R"(var<private> x_2 : vec4<f32>;
fn main_1() {
return;
@@ -803,7 +782,7 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_2);
@@ -812,7 +791,7 @@ fn main() -> main_out {
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinClipDistance_NotSupported) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
%uint_2 = OpConstant %uint 2
@@ -825,15 +804,15 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinClipDistance_NotSupported) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_EQ(p->error(),
- "accessing per-vertex member 2 is not supported. Only Position is "
- "supported, and PointSize is ignored");
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_EQ(p->error(),
+ "accessing per-vertex member 2 is not supported. Only Position is "
+ "supported, and PointSize is ignored");
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinCullDistance_NotSupported) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
%uint_3 = OpConstant %uint 3
@@ -846,15 +825,15 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinCullDistance_NotSupported) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_EQ(p->error(),
- "accessing per-vertex member 3 is not supported. Only Position is "
- "supported, and PointSize is ignored");
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_EQ(p->error(),
+ "accessing per-vertex member 3 is not supported. Only Position is "
+ "supported, and PointSize is ignored");
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPerVertex_MemberIndex_NotConstant) {
- const std::string assembly = PerVertexPreamble() + R"(
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
@@ -866,16 +845,14 @@ TEST_F(SpvModuleScopeVarParserTest, BuiltinPerVertex_MemberIndex_NotConstant) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- Eq("first index of access chain into per-vertex structure is not "
- "a constant: %100 = OpAccessChain %13 %1 %16"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), Eq("first index of access chain into per-vertex structure is not "
+ "a constant: %100 = OpAccessChain %13 %1 %16"));
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPerVertex_MemberIndex_NotConstantInteger) {
- const std::string assembly = PerVertexPreamble() + R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPerVertex_MemberIndex_NotConstantInteger) {
+ const std::string assembly = PerVertexPreamble() + R"(
%ptr_float = OpTypePointer Output %float
%nil = OpConstantNull %float
@@ -887,38 +864,37 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- Eq("first index of access chain into per-vertex structure is not "
- "a constant integer: %100 = OpAccessChain %13 %1 %14"));
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), Eq("first index of access chain into per-vertex structure is not "
+ "a constant integer: %100 = OpAccessChain %13 %1 %14"));
}
TEST_F(SpvModuleScopeVarParserTest, ScalarInitializers) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%1 = OpVariable %ptr_bool Private %true
%2 = OpVariable %ptr_bool Private %false
%3 = OpVariable %ptr_int Private %int_m1
%4 = OpVariable %ptr_uint Private %uint_1
%5 = OpVariable %ptr_float Private %float_1p5
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = true;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = true;
var<private> x_2 : bool = false;
-var<private> x_3 : i32 = -1;
+var<private> x_3 : i32 = -1i;
var<private> x_4 : u32 = 1u;
-var<private> x_5 : f32 = 1.5;
+var<private> x_5 : f32 = 1.5f;
)"));
}
TEST_F(SpvModuleScopeVarParserTest, ScalarNullInitializers) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%null_bool = OpConstantNull %bool
%null_int = OpConstantNull %int
%null_uint = OpConstantNull %uint
@@ -929,21 +905,21 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarNullInitializers) {
%3 = OpVariable %ptr_uint Private %null_uint
%4 = OpVariable %ptr_float Private %null_float
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = false;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = false;
-var<private> x_2 : i32 = 0;
+var<private> x_2 : i32 = 0i;
var<private> x_3 : u32 = 0u;
-var<private> x_4 : f32 = 0.0;
+var<private> x_4 : f32 = 0.0f;
)"));
}
TEST_F(SpvModuleScopeVarParserTest, ScalarUndefInitializers) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%undef_bool = OpUndef %bool
%undef_int = OpUndef %int
%undef_uint = OpUndef %uint
@@ -954,155 +930,145 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarUndefInitializers) {
%3 = OpVariable %ptr_uint Private %undef_uint
%4 = OpVariable %ptr_float Private %undef_float
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = false;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(var<private> x_1 : bool = false;
-var<private> x_2 : i32 = 0;
+var<private> x_2 : i32 = 0i;
var<private> x_3 : u32 = 0u;
-var<private> x_4 : f32 = 0.0;
+var<private> x_4 : f32 = 0.0f;
)"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, VectorInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2float
%two = OpConstant %float 2.0
%const = OpConstantComposite %v2float %float_1p5 %two
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>(1.5, 2.0);"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>(1.5f, 2.0f);"));
}
TEST_F(SpvModuleScopeVarParserTest, VectorBoolNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2bool
%const = OpConstantNull %v2bool
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<bool> = vec2<bool>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<bool> = vec2<bool>();"));
}
TEST_F(SpvModuleScopeVarParserTest, VectorBoolUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2bool
%const = OpUndef %v2bool
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<bool> = vec2<bool>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<bool> = vec2<bool>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, VectorUintNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2uint
%const = OpConstantNull %v2uint
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<u32> = vec2<u32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<u32> = vec2<u32>();"));
}
TEST_F(SpvModuleScopeVarParserTest, VectorUintUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2uint
%const = OpUndef %v2uint
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<u32> = vec2<u32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<u32> = vec2<u32>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, VectorIntNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2int
%const = OpConstantNull %v2int
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<i32> = vec2<i32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<i32> = vec2<i32>();"));
}
TEST_F(SpvModuleScopeVarParserTest, VectorIntUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2int
%const = OpUndef %v2int
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<i32> = vec2<i32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<i32> = vec2<i32>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, VectorFloatNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2float
%const = OpConstantNull %v2float
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>();"));
}
TEST_F(SpvModuleScopeVarParserTest, VectorFloatUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %v2float
%const = OpUndef %v2float
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : vec2<f32> = vec2<f32>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, MatrixInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %m3v2float
%two = OpConstant %float 2.0
%three = OpConstant %float 3.0
@@ -1113,202 +1079,179 @@ TEST_F(SpvModuleScopeVarParserTest, MatrixInitializer) {
%const = OpConstantComposite %m3v2float %v0 %v1 %v2
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>("
- "vec2<f32>(1.5, 2.0), "
- "vec2<f32>(2.0, 3.0), "
- "vec2<f32>(3.0, 4.0));"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>("
+ "vec2<f32>(1.5f, 2.0f), "
+ "vec2<f32>(2.0f, 3.0f), "
+ "vec2<f32>(3.0f, 4.0f));"));
}
TEST_F(SpvModuleScopeVarParserTest, MatrixNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %m3v2float
%const = OpConstantNull %m3v2float
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>();"));
}
TEST_F(SpvModuleScopeVarParserTest, MatrixUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %m3v2float
%const = OpUndef %m3v2float
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str,
- HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : mat3x2<f32> = mat3x2<f32>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, ArrayInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %arr2uint
%two = OpConstant %uint 2
%const = OpConstantComposite %arr2uint %uint_1 %two
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr(
- "var<private> x_200 : array<u32, 2u> = array<u32, 2u>(1u, 2u);"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str,
+ HasSubstr("var<private> x_200 : array<u32, 2u> = array<u32, 2u>(1u, 2u);"));
}
TEST_F(SpvModuleScopeVarParserTest, ArrayNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %arr2uint
%const = OpConstantNull %arr2uint
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : array<u32, 2u> = array<u32, 2u>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : array<u32, 2u> = array<u32, 2u>();"));
}
TEST_F(SpvModuleScopeVarParserTest, ArrayUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + R"(
%ptr = OpTypePointer Private %arr2uint
%const = OpUndef %arr2uint
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : array<u32, 2u> = array<u32, 2u>();"));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : array<u32, 2u> = array<u32, 2u>();"));
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, StructInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() +
- StructTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + StructTypes() + R"(
%ptr = OpTypePointer Private %strct
%two = OpConstant %uint 2
%arrconst = OpConstantComposite %arr2uint %uint_1 %two
%const = OpConstantComposite %strct %uint_1 %float_1p5 %arrconst
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : S = S(1u, 1.5, array<u32, 2u>(1u, 2u));"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str,
+ HasSubstr("var<private> x_200 : S = S(1u, 1.5f, array<u32, 2u>(1u, 2u));"))
+ << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, StructNullInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() +
- StructTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + StructTypes() + R"(
%ptr = OpTypePointer Private %strct
%const = OpConstantNull %strct
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : S = S(0u, 0.0, array<u32, 2u>());"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : S = S(0u, 0.0f, array<u32, 2u>());"))
+ << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, StructUndefInitializer) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() +
- StructTypes() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonTypes() + StructTypes() + R"(
%ptr = OpTypePointer Private %strct
%const = OpUndef %strct
%200 = OpVariable %ptr Private %const
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("var<private> x_200 : S = S(0u, 0.0, array<u32, 2u>());"))
- << module_str;
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("var<private> x_200 : S = S(0u, 0.0f, array<u32, 2u>());"))
+ << module_str;
- // This example module emits ok, but is not valid SPIR-V in the first place.
- p->DeliberatelyInvalidSpirv();
+ // This example module emits ok, but is not valid SPIR-V in the first place.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvModuleScopeVarParserTest, DescriptorGroupDecoration_Valid) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + CommonLayout() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + CommonLayout() + R"(
OpDecorate %1 DescriptorSet 3
OpDecorate %1 Binding 9 ; Required to pass WGSL validation
OpDecorate %strct Block
)" + CommonTypes() + StructTypes() +
- R"(
+ R"(
%ptr_sb_strct = OpTypePointer StorageBuffer %strct
%1 = OpVariable %ptr_sb_strct StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("@group(3) @binding(9) var<storage, read_write> x_1 : S;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@group(3) @binding(9) var<storage, read_write> x_1 : S;"))
+ << module_str;
}
-
TEST_F(SpvModuleScopeVarParserTest, BindingDecoration_Valid) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %1 DescriptorSet 0 ; WGSL validation requires this already
OpDecorate %1 Binding 3
OpDecorate %strct Block
)" + CommonLayout() + CommonTypes() +
- StructTypes() +
- R"(
+ StructTypes() +
+ R"(
%ptr_sb_strct = OpTypePointer StorageBuffer %strct
%1 = OpVariable %ptr_sb_strct StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(
- module_str,
- HasSubstr("@group(0) @binding(3) var<storage, read_write> x_1 : S;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@group(0) @binding(3) var<storage, read_write> x_1 : S;"))
+ << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- StructMember_NonReadableDecoration_Dropped) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest, StructMember_NonReadableDecoration_Dropped) {
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %1 DescriptorSet 0
OpDecorate %1 Binding 0
OpDecorate %strct Block
OpMemberDecorate %strct 0 NonReadable
)" + CommonLayout() + CommonTypes() +
- StructTypes() + R"(
+ StructTypes() + R"(
%ptr_sb_strct = OpTypePointer StorageBuffer %strct
%1 = OpVariable %ptr_sb_strct StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(type Arr = @stride(4) array<u32, 2u>;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(type Arr = @stride(4) array<u32, 2u>;
struct S {
field0 : u32,
@@ -1321,7 +1264,7 @@ struct S {
}
TEST_F(SpvModuleScopeVarParserTest, ColMajorDecoration_Dropped) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %myvar "myvar"
OpDecorate %myvar DescriptorSet 0
OpDecorate %myvar Binding 0
@@ -1339,10 +1282,10 @@ TEST_F(SpvModuleScopeVarParserTest, ColMajorDecoration_Dropped) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
field0 : mat3x2<f32>,
}
@@ -1351,7 +1294,7 @@ TEST_F(SpvModuleScopeVarParserTest, ColMajorDecoration_Dropped) {
}
TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration_Natural_Dropped) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %myvar "myvar"
OpDecorate %myvar DescriptorSet 0
OpDecorate %myvar Binding 0
@@ -1368,10 +1311,10 @@ TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration_Natural_Dropped) {
%ptr_sb_s = OpTypePointer StorageBuffer %s
%myvar = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
field0 : mat3x2<f32>,
}
@@ -1380,7 +1323,7 @@ TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration_Natural_Dropped) {
}
TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %myvar "myvar"
OpDecorate %myvar DescriptorSet 0
OpDecorate %myvar Binding 0
@@ -1397,10 +1340,10 @@ TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration) {
%ptr_sb_s = OpTypePointer StorageBuffer %s
%myvar = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
@stride(64) @internal(disable_validation__ignore_stride)
field0 : mat3x2<f32>,
}
@@ -1410,7 +1353,7 @@ TEST_F(SpvModuleScopeVarParserTest, MatrixStrideDecoration) {
}
TEST_F(SpvModuleScopeVarParserTest, RowMajorDecoration_IsError) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %myvar "myvar"
OpDecorate %s Block
OpMemberDecorate %s 0 RowMajor
@@ -1425,16 +1368,16 @@ TEST_F(SpvModuleScopeVarParserTest, RowMajorDecoration_IsError) {
%ptr_sb_s = OpTypePointer StorageBuffer %s
%myvar = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_THAT(
- p->error(),
- Eq(R"(WGSL does not support row-major matrices: can't translate member 0 of %3 = OpTypeStruct %8)"))
- << p->error();
+ EXPECT_FALSE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_THAT(
+ p->error(),
+ Eq(R"(WGSL does not support row-major matrices: can't translate member 0 of %3 = OpTypeStruct %8)"))
+ << p->error();
}
TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_AllMembers) {
- // Variable should have access(read)
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ // Variable should have access(read)
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %s Block
OpDecorate %1 DescriptorSet 0
OpDecorate %1 Binding 0
@@ -1450,10 +1393,10 @@ TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_AllMembers) {
%ptr_sb_s = OpTypePointer StorageBuffer %s
%1 = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
field0 : f32,
field1 : f32,
}
@@ -1463,8 +1406,8 @@ TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_AllMembers) {
}
TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_NotAllMembers) {
- // Variable should have access(read_write)
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ // Variable should have access(read_write)
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %1 DescriptorSet 0
OpDecorate %1 Binding 0
OpDecorate %s Block
@@ -1479,10 +1422,10 @@ TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_NotAllMembers) {
%ptr_sb_s = OpTypePointer StorageBuffer %s
%1 = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
field0 : f32,
field1 : f32,
}
@@ -1491,11 +1434,10 @@ TEST_F(SpvModuleScopeVarParserTest, StorageBuffer_NonWritable_NotAllMembers) {
)")) << module_str;
}
-TEST_F(
- SpvModuleScopeVarParserTest,
- StorageBuffer_NonWritable_NotAllMembers_DuplicatedOnSameMember) { // NOLINT
- // Variable should have access(read_write)
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest,
+ StorageBuffer_NonWritable_NotAllMembers_DuplicatedOnSameMember) { // NOLINT
+ // Variable should have access(read_write)
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %s Block
OpDecorate %1 DescriptorSet 0
OpDecorate %1 Binding 0
@@ -1511,10 +1453,10 @@ TEST_F(
%ptr_sb_s = OpTypePointer StorageBuffer %s
%1 = OpVariable %ptr_sb_s StorageBuffer
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr(R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr(R"(struct S {
field0 : f32,
field1 : f32,
}
@@ -1524,36 +1466,35 @@ TEST_F(
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_Id_TooBig) {
- // Override IDs must be between 0 and 65535
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ // Override IDs must be between 0 and 65535
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %1 SpecId 65536
%bool = OpTypeBool
%1 = OpSpecConstantTrue %bool
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_FALSE(p->Parse());
- EXPECT_EQ(p->error(),
- "SpecId too large. WGSL override IDs must be between 0 and 65535: "
- "ID %1 has SpecId 65536");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_EQ(p->error(),
+ "SpecId too large. WGSL override IDs must be between 0 and 65535: "
+ "ID %1 has SpecId 65536");
}
-TEST_F(SpvModuleScopeVarParserTest,
- ScalarSpecConstant_DeclareConst_Id_MaxValid) {
- // Override IDs must be between 0 and 65535
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_Id_MaxValid) {
+ // Override IDs must be between 0 and 65535
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpDecorate %1 SpecId 65535
%bool = OpTypeBool
%1 = OpSpecConstantTrue %bool
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- EXPECT_TRUE(p->Parse());
- EXPECT_EQ(p->error(), "");
+ EXPECT_TRUE(p->Parse());
+ EXPECT_EQ(p->error(), "");
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_True) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
OpDecorate %c SpecId 12
%bool = OpTypeBool
@@ -1561,15 +1502,14 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_True) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : bool = true;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : bool = true;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_False) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
OpDecorate %c SpecId 12
%bool = OpTypeBool
@@ -1577,15 +1517,14 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_False) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : bool = false;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : bool = false;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_U32) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
OpDecorate %c SpecId 12
%uint = OpTypeInt 32 0
@@ -1593,15 +1532,14 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_U32) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : u32 = 42u;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : u32 = 42u;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_I32) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
OpDecorate %c SpecId 12
%int = OpTypeInt 32 1
@@ -1609,15 +1547,14 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_I32) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : i32 = 42;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : i32 = 42i;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_F32) {
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
OpDecorate %c SpecId 12
%float = OpTypeFloat 32
@@ -1625,32 +1562,29 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_F32) {
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : f32 = 2.5;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("@id(12) override myconst : f32 = 2.5f;")) << module_str;
}
-TEST_F(SpvModuleScopeVarParserTest,
- ScalarSpecConstant_DeclareConst_F32_WithoutSpecId) {
- // When we don't have a spec ID, declare an undecorated module-scope constant.
- auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_DeclareConst_F32_WithoutSpecId) {
+ // When we don't have a spec ID, declare an undecorated module-scope constant.
+ auto p = parser(test::Assemble(Preamble() + FragMain() + R"(
OpName %c "myconst"
%float = OpTypeFloat 32
%c = OpSpecConstant %float 2.5
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
)" + MainBody()));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- EXPECT_THAT(module_str, HasSubstr("override myconst : f32 = 2.5;"))
- << module_str;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ EXPECT_THAT(module_str, HasSubstr("override myconst : f32 = 2.5f;")) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_UsedInFunction) {
- const auto assembly = Preamble() + FragMain() + R"(
+ const auto assembly = Preamble() + FragMain() + R"(
OpName %c "myconst"
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
@@ -1663,22 +1597,22 @@ TEST_F(SpvModuleScopeVarParserTest, ScalarSpecConstant_UsedInFunction) {
OpReturnValue %1
OpFunctionEnd
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
- auto fe = p->function_emitter(100);
- EXPECT_TRUE(fe.EmitBody()) << p->error();
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions()) << p->error();
+ auto fe = p->function_emitter(100);
+ EXPECT_TRUE(fe.EmitBody()) << p->error();
+ EXPECT_TRUE(p->error().empty());
- Program program = p->program();
- const auto got = test::ToString(program, fe.ast_body());
+ Program program = p->program();
+ const auto got = test::ToString(program, fe.ast_body());
- EXPECT_THAT(got, HasSubstr("return (myconst + myconst);")) << got;
+ EXPECT_THAT(got, HasSubstr("return (myconst + myconst);")) << got;
}
// Returns the start of a shader for testing SampleId,
// parameterized by store type of %int or %uint
std::string SampleIdPreamble(std::string store_type) {
- return R"(
+ return R"(
OpCapability Shader
OpCapability SampleRateShading
OpMemoryModel Logical Simple
@@ -1691,42 +1625,42 @@ std::string SampleIdPreamble(std::string store_type) {
%uint = OpTypeInt 32 0
%int = OpTypeInt 32 1
%ptr_ty = OpTypePointer Input )" +
- store_type + R"(
+ store_type + R"(
%1 = OpVariable %ptr_ty Input
)";
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_Direct) {
- const std::string assembly = SampleIdPreamble("%int") + R"(
+ const std::string assembly = SampleIdPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %int %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : i32;
fn main_1() {
let x_2 : i32 = x_1;
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_index) x_1_param : u32) {
x_1 = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_CopyObject) {
- const std::string assembly = SampleIdPreamble("%int") + R"(
+ const std::string assembly = SampleIdPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -1734,12 +1668,12 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected =
- R"(Module{
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected =
+ R"(Module{
Variable{
x_1
private
@@ -1810,7 +1744,7 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_CopyObject) {
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_AccessChain) {
- const std::string assembly = SampleIdPreamble("%int") + R"(
+ const std::string assembly = SampleIdPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -1818,28 +1752,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
fn main_1() {
let x_2 : i32 = x_1;
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_index) x_1_param : u32) {
x_1 = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_FunctParam) {
- const std::string assembly = SampleIdPreamble("%int") + R"(
+ const std::string assembly = SampleIdPreamble("%int") + R"(
%helper_ty = OpTypeFunction %int %ptr_ty
%helper = OpFunction %int None %helper_ty
%param = OpFunctionParameter %ptr_ty
@@ -1854,46 +1788,45 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_I32_FunctParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- // This example is invalid because you can't pass pointer-to-Input
- // as a function parameter.
- EXPECT_FALSE(p->Parse());
- EXPECT_FALSE(p->success());
- EXPECT_THAT(p->error(),
- HasSubstr("Invalid storage class for pointer operand 1"));
+ // This example is invalid because you can't pass pointer-to-Input
+ // as a function parameter.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_FALSE(p->success());
+ EXPECT_THAT(p->error(), HasSubstr("Invalid storage class for pointer operand 1"));
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_Load_Direct) {
- const std::string assembly = SampleIdPreamble("%uint") + R"(
+ const std::string assembly = SampleIdPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %uint %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
fn main_1() {
let x_2 : u32 = x_1;
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_index) x_1_param : u32) {
x_1 = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_Load_CopyObject) {
- const std::string assembly = SampleIdPreamble("%uint") + R"(
+ const std::string assembly = SampleIdPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -1901,11 +1834,11 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
fn main_1() {
let x_11 : ptr<private, u32> = &(x_1);
@@ -1913,17 +1846,17 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_index) x_1_param : u32) {
x_1 = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_Load_AccessChain) {
- const std::string assembly = SampleIdPreamble("%uint") + R"(
+ const std::string assembly = SampleIdPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -1931,28 +1864,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
fn main_1() {
let x_2 : u32 = x_1;
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_index) x_1_param : u32) {
x_1 = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_FunctParam) {
- const std::string assembly = SampleIdPreamble("%uint") + R"(
+ const std::string assembly = SampleIdPreamble("%uint") + R"(
%helper_ty = OpTypeFunction %uint %ptr_ty
%helper = OpFunction %uint None %helper_ty
%param = OpFunctionParameter %ptr_ty
@@ -1967,32 +1900,31 @@ TEST_F(SpvModuleScopeVarParserTest, SampleId_U32_FunctParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- // This example is invalid because you can't pass pointer-to-Input
- // as a function parameter.
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(p->error(),
- HasSubstr("Invalid storage class for pointer operand 1"));
+ auto p = parser(test::Assemble(assembly));
+ // This example is invalid because you can't pass pointer-to-Input
+ // as a function parameter.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("Invalid storage class for pointer operand 1"));
}
// Returns the start of a shader for testing SampleMask
// parameterized by store type.
std::string SampleMaskPreamble(std::string store_type, uint32_t stride = 0u) {
- return std::string(R"(
+ return std::string(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn SampleMask
)") +
- (stride > 0u ? R"(
+ (stride > 0u ? R"(
OpDecorate %uarr1 ArrayStride 4
OpDecorate %uarr2 ArrayStride 4
OpDecorate %iarr1 ArrayStride 4
OpDecorate %iarr2 ArrayStride 4
)"
- : "") +
- R"(
+ : "") +
+ R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%float = OpTypeFloat 32
@@ -2011,14 +1943,14 @@ std::string SampleMaskPreamble(std::string store_type, uint32_t stride = 0u) {
%iptr_out_ty = OpTypePointer Output %int
%uptr_out_ty = OpTypePointer Output %uint
%in_ty = OpTypePointer Input )" +
- store_type + R"(
+ store_type + R"(
%out_ty = OpTypePointer Output )" +
- store_type + R"(
+ store_type + R"(
)";
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_ArraySize2_Error) {
- const std::string assembly = SampleMaskPreamble("%uarr2") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr2") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2028,16 +1960,15 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_ArraySize2_Error) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- HasSubstr("WGSL supports a sample mask of at most 32 bits. "
- "SampleMask must be an array of 1 element"))
- << p->error() << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), HasSubstr("WGSL supports a sample mask of at most 32 bits. "
+ "SampleMask must be an array of 1 element"))
+ << p->error() << assembly;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_Direct) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2047,28 +1978,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_Direct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- let x_3 : u32 = x_1[0];
+ let x_3 : u32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = x_1_param;
+ x_1[0i] = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_CopyObject) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2079,28 +2010,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- let x_4 : u32 = x_1[0];
+ let x_4 : u32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = x_1_param;
+ x_1[0i] = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_AccessChain) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2111,28 +2042,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_U32_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- let x_4 : u32 = x_1[0];
+ let x_4 : u32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = x_1_param;
+ x_1[0i] = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_Direct) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2142,28 +2073,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_Direct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- let x_3 : i32 = x_1[0];
+ let x_3 : i32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = bitcast<i32>(x_1_param);
+ x_1[0i] = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_CopyObject) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2174,28 +2105,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- let x_4 : i32 = x_1[0];
+ let x_4 : i32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = bitcast<i32>(x_1_param);
+ x_1[0i] = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_AccessChain) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2206,28 +2137,28 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_I32_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- let x_4 : i32 = x_1[0];
+ let x_4 : i32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = bitcast<i32>(x_1_param);
+ x_1[0i] = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_ArraySize2_Error) {
- const std::string assembly = SampleMaskPreamble("%uarr2") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr2") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2237,16 +2168,15 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_ArraySize2_Error) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_FALSE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->error(),
- HasSubstr("WGSL supports a sample mask of at most 32 bits. "
- "SampleMask must be an array of 1 element"))
- << p->error() << assembly;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_FALSE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->error(), HasSubstr("WGSL supports a sample mask of at most 32 bits. "
+ "SampleMask must be an array of 1 element"))
+ << p->error() << assembly;
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_Direct) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2256,14 +2186,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_Direct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- x_1[0] = 0u;
+ x_1[0i] = 0u;
return;
}
@@ -2272,17 +2202,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(x_1[0]);
+ return main_out(x_1[0i]);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_CopyObject) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2293,14 +2223,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- x_1[0] = 0u;
+ x_1[0i] = 0u;
return;
}
@@ -2309,17 +2239,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(x_1[0]);
+ return main_out(x_1[0i]);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_AccessChain) {
- const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2330,14 +2260,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_U32_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
- x_1[0] = 0u;
+ x_1[0i] = 0u;
return;
}
@@ -2346,17 +2276,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(x_1[0]);
+ return main_out(x_1[0i]);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_Direct) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2366,14 +2296,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_Direct) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- x_1[0] = 12;
+ x_1[0i] = 12i;
return;
}
@@ -2382,17 +2312,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(bitcast<u32>(x_1[0]));
+ return main_out(bitcast<u32>(x_1[0i]));
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_CopyObject) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2403,14 +2333,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- x_1[0] = 12;
+ x_1[0i] = 12i;
return;
}
@@ -2419,17 +2349,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(bitcast<u32>(x_1[0]));
+ return main_out(bitcast<u32>(x_1[0i]));
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_AccessChain) {
- const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
+ const std::string assembly = SampleMaskPreamble("%iarr1") + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2440,14 +2370,14 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_I32_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
- x_1[0] = 12;
+ x_1[0i] = 12i;
return;
}
@@ -2456,17 +2386,17 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(bitcast<u32>(x_1[0]));
+ return main_out(bitcast<u32>(x_1[0i]));
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_WithStride) {
- const std::string assembly = SampleMaskPreamble("%uarr1", 4u) + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1", 4u) + R"(
%1 = OpVariable %in_ty Input
%main = OpFunction %void None %voidfn
@@ -2476,11 +2406,11 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_In_WithStride) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(type Arr = @stride(4) array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(type Arr = @stride(4) array<u32, 1u>;
type Arr_1 = @stride(4) array<u32, 2u>;
@@ -2491,21 +2421,21 @@ type Arr_3 = @stride(4) array<i32, 2u>;
var<private> x_1 : Arr;
fn main_1() {
- let x_3 : u32 = x_1[0];
+ let x_3 : u32 = x_1[0i];
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = x_1_param;
+ x_1[0i] = x_1_param;
main_1();
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_WithStride) {
- const std::string assembly = SampleMaskPreamble("%uarr1", 4u) + R"(
+ const std::string assembly = SampleMaskPreamble("%uarr1", 4u) + R"(
%1 = OpVariable %out_ty Output
%main = OpFunction %void None %voidfn
@@ -2515,11 +2445,11 @@ TEST_F(SpvModuleScopeVarParserTest, SampleMask_Out_WithStride) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(type Arr = @stride(4) array<u32, 1u>;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(type Arr = @stride(4) array<u32, 1u>;
type Arr_1 = @stride(4) array<u32, 2u>;
@@ -2530,7 +2460,7 @@ type Arr_3 = @stride(4) array<i32, 2u>;
var<private> x_1 : Arr;
fn main_1() {
- x_1[0] = 0u;
+ x_1[0i] = 0u;
return;
}
@@ -2539,19 +2469,19 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(x_1[0]);
+ return main_out(x_1[0i]);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
// Returns the start of a shader for testing VertexIndex,
// parameterized by store type of %int or %uint
std::string VertexIndexPreamble(std::string store_type) {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %position %1
@@ -2563,7 +2493,7 @@ std::string VertexIndexPreamble(std::string store_type) {
%uint = OpTypeInt 32 0
%int = OpTypeInt 32 1
%ptr_ty = OpTypePointer Input )" +
- store_type + R"(
+ store_type + R"(
%1 = OpVariable %ptr_ty Input
%v4float = OpTypeVector %float 4
%posty = OpTypePointer Output %v4float
@@ -2572,18 +2502,18 @@ std::string VertexIndexPreamble(std::string store_type) {
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_I32_Load_Direct) {
- const std::string assembly = VertexIndexPreamble("%int") + R"(
+ const std::string assembly = VertexIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %int %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> x_4 : vec4<f32>;
@@ -2597,18 +2527,18 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_I32_Load_CopyObject) {
- const std::string assembly = VertexIndexPreamble("%int") + R"(
+ const std::string assembly = VertexIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -2616,11 +2546,11 @@ TEST_F(SpvModuleScopeVarParserTest, VertexIndex_I32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> x_4 : vec4<f32>;
@@ -2635,18 +2565,18 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_I32_Load_AccessChain) {
- const std::string assembly = VertexIndexPreamble("%int") + R"(
+ const std::string assembly = VertexIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -2654,11 +2584,11 @@ TEST_F(SpvModuleScopeVarParserTest, VertexIndex_I32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> x_4 : vec4<f32>;
@@ -2672,29 +2602,29 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_Load_Direct) {
- const std::string assembly = VertexIndexPreamble("%uint") + R"(
+ const std::string assembly = VertexIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %uint %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> x_4 : vec4<f32>;
@@ -2708,18 +2638,18 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_Load_CopyObject) {
- const std::string assembly = VertexIndexPreamble("%uint") + R"(
+ const std::string assembly = VertexIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -2727,11 +2657,11 @@ TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> x_4 : vec4<f32>;
@@ -2746,18 +2676,18 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_Load_AccessChain) {
- const std::string assembly = VertexIndexPreamble("%uint") + R"(
+ const std::string assembly = VertexIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -2765,11 +2695,11 @@ TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> x_4 : vec4<f32>;
@@ -2783,18 +2713,18 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_FunctParam) {
- const std::string assembly = VertexIndexPreamble("%uint") + R"(
+ const std::string assembly = VertexIndexPreamble("%uint") + R"(
%helper_ty = OpTypeFunction %uint %ptr_ty
%helper = OpFunction %uint None %helper_ty
%param = OpFunctionParameter %ptr_ty
@@ -2809,19 +2739,18 @@ TEST_F(SpvModuleScopeVarParserTest, VertexIndex_U32_FunctParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- // This example is invalid because you can't pass pointer-to-Input
- // as a function parameter.
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(p->error(),
- HasSubstr("Invalid storage class for pointer operand 1"));
+ // This example is invalid because you can't pass pointer-to-Input
+ // as a function parameter.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("Invalid storage class for pointer operand 1"));
}
// Returns the start of a shader for testing InstanceIndex,
// parameterized by store type of %int or %uint
std::string InstanceIndexPreamble(std::string store_type) {
- return R"(
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %position %1
@@ -2834,7 +2763,7 @@ std::string InstanceIndexPreamble(std::string store_type) {
%uint = OpTypeInt 32 0
%int = OpTypeInt 32 1
%ptr_ty = OpTypePointer Input )" +
- store_type + R"(
+ store_type + R"(
%1 = OpVariable %ptr_ty Input
%v4float = OpTypeVector %float 4
%posty = OpTypePointer Output %v4float
@@ -2843,18 +2772,18 @@ std::string InstanceIndexPreamble(std::string store_type) {
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_Load_Direct) {
- const std::string assembly = InstanceIndexPreamble("%int") + R"(
+ const std::string assembly = InstanceIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %int %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> position : vec4<f32>;
@@ -2868,18 +2797,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_Load_CopyObject) {
- const std::string assembly = InstanceIndexPreamble("%int") + R"(
+ const std::string assembly = InstanceIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -2887,11 +2816,11 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> position : vec4<f32>;
@@ -2906,18 +2835,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_Load_AccessChain) {
- const std::string assembly = InstanceIndexPreamble("%int") + R"(
+ const std::string assembly = InstanceIndexPreamble("%int") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -2925,11 +2854,11 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> position : vec4<f32>;
@@ -2943,18 +2872,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_FunctParam) {
- const std::string assembly = InstanceIndexPreamble("%int") + R"(
+ const std::string assembly = InstanceIndexPreamble("%int") + R"(
%helper_ty = OpTypeFunction %int %ptr_ty
%helper = OpFunction %int None %helper_ty
%param = OpFunctionParameter %ptr_ty
@@ -2969,27 +2898,26 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_I32_FunctParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- // This example is invalid because you can't pass pointer-to-Input
- // as a function parameter.
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(p->error(),
- HasSubstr("Invalid storage class for pointer operand 1"));
+ auto p = parser(test::Assemble(assembly));
+ // This example is invalid because you can't pass pointer-to-Input
+ // as a function parameter.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("Invalid storage class for pointer operand 1"));
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_Load_Direct) {
- const std::string assembly = InstanceIndexPreamble("%uint") + R"(
+ const std::string assembly = InstanceIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad %uint %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> position : vec4<f32>;
@@ -3003,18 +2931,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_Load_CopyObject) {
- const std::string assembly = InstanceIndexPreamble("%uint") + R"(
+ const std::string assembly = InstanceIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpCopyObject %ptr_ty %1
@@ -3022,11 +2950,11 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_Load_CopyObject) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> position : vec4<f32>;
@@ -3041,18 +2969,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_Load_AccessChain) {
- const std::string assembly = InstanceIndexPreamble("%uint") + R"(
+ const std::string assembly = InstanceIndexPreamble("%uint") + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%copy_ptr = OpAccessChain %ptr_ty %1
@@ -3060,11 +2988,11 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_Load_AccessChain) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> position : vec4<f32>;
@@ -3078,18 +3006,18 @@ struct main_out {
position_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(position);
}
)";
- EXPECT_EQ(module_str, expected);
+ EXPECT_EQ(module_str, expected);
}
TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_FunctParam) {
- const std::string assembly = InstanceIndexPreamble("%uint") + R"(
+ const std::string assembly = InstanceIndexPreamble("%uint") + R"(
%helper_ty = OpTypeFunction %uint %ptr_ty
%helper = OpFunction %uint None %helper_ty
%param = OpFunctionParameter %ptr_ty
@@ -3104,25 +3032,23 @@ TEST_F(SpvModuleScopeVarParserTest, InstanceIndex_U32_FunctParam) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- // This example is invalid because you can't pass pointer-to-Input
- // as a function parameter.
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(p->error(),
- HasSubstr("Invalid storage class for pointer operand 1"));
+ auto p = parser(test::Assemble(assembly));
+ // This example is invalid because you can't pass pointer-to-Input
+ // as a function parameter.
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("Invalid storage class for pointer operand 1"));
}
// Returns the start of a shader for testing LocalInvocationIndex,
// parameterized by store type of %int or %uint
-std::string ComputeBuiltinInputPreamble(std::string builtin,
- std::string store_type) {
- return R"(
+std::string ComputeBuiltinInputPreamble(std::string builtin, std::string store_type) {
+ return R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %main "main" %1
OpExecutionMode %main LocalSize 1 1 1
OpDecorate %1 BuiltIn )" +
- builtin + R"(
+ builtin + R"(
%void = OpTypeVoid
%voidfn = OpTypeFunction %void
%float = OpTypeFloat 32
@@ -3131,141 +3057,137 @@ std::string ComputeBuiltinInputPreamble(std::string builtin,
%v3uint = OpTypeVector %uint 3
%v3int = OpTypeVector %int 3
%ptr_ty = OpTypePointer Input )" +
- store_type + R"(
+ store_type + R"(
%1 = OpVariable %ptr_ty Input
)";
}
struct ComputeBuiltinInputCase {
- std::string spirv_builtin;
- std::string spirv_store_type;
- std::string wgsl_builtin;
+ std::string spirv_builtin;
+ std::string spirv_store_type;
+ std::string wgsl_builtin;
};
inline std::ostream& operator<<(std::ostream& o, ComputeBuiltinInputCase c) {
- return o << "ComputeBuiltinInputCase(" << c.spirv_builtin << " "
- << c.spirv_store_type << " " << c.wgsl_builtin << ")";
+ return o << "ComputeBuiltinInputCase(" << c.spirv_builtin << " " << c.spirv_store_type << " "
+ << c.wgsl_builtin << ")";
}
std::string WgslType(std::string spirv_type) {
- if (spirv_type == "%uint") {
- return "u32";
- }
- if (spirv_type == "%int") {
- return "i32";
- }
- if (spirv_type == "%v3uint") {
- return "vec3<u32>";
- }
- if (spirv_type == "%v3int") {
- return "vec3<i32>";
- }
- return "error";
+ if (spirv_type == "%uint") {
+ return "u32";
+ }
+ if (spirv_type == "%int") {
+ return "i32";
+ }
+ if (spirv_type == "%v3uint") {
+ return "vec3<u32>";
+ }
+ if (spirv_type == "%v3int") {
+ return "vec3<i32>";
+ }
+ return "error";
}
std::string UnsignedWgslType(std::string wgsl_type) {
- if (wgsl_type == "u32") {
- return "u32";
- }
- if (wgsl_type == "i32") {
- return "u32";
- }
- if (wgsl_type == "vec3<u32>") {
- return "vec3<u32>";
- }
- if (wgsl_type == "vec3<i32>") {
- return "vec3<u32>";
- }
- return "error";
+ if (wgsl_type == "u32") {
+ return "u32";
+ }
+ if (wgsl_type == "i32") {
+ return "u32";
+ }
+ if (wgsl_type == "vec3<u32>") {
+ return "vec3<u32>";
+ }
+ if (wgsl_type == "vec3<i32>") {
+ return "vec3<u32>";
+ }
+ return "error";
}
std::string SignedWgslType(std::string wgsl_type) {
- if (wgsl_type == "u32") {
- return "i32";
- }
- if (wgsl_type == "i32") {
- return "i32";
- }
- if (wgsl_type == "vec3<u32>") {
- return "vec3<i32>";
- }
- if (wgsl_type == "vec3<i32>") {
- return "vec3<i32>";
- }
- return "error";
+ if (wgsl_type == "u32") {
+ return "i32";
+ }
+ if (wgsl_type == "i32") {
+ return "i32";
+ }
+ if (wgsl_type == "vec3<u32>") {
+ return "vec3<i32>";
+ }
+ if (wgsl_type == "vec3<i32>") {
+ return "vec3<i32>";
+ }
+ return "error";
}
using SpvModuleScopeVarParserTest_ComputeBuiltin =
SpvParserTestBase<::testing::TestWithParam<ComputeBuiltinInputCase>>;
TEST_P(SpvModuleScopeVarParserTest_ComputeBuiltin, Load_Direct) {
- const auto wgsl_type = WgslType(GetParam().spirv_store_type);
- const auto wgsl_builtin = GetParam().wgsl_builtin;
- const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
- const auto signed_wgsl_type = SignedWgslType(wgsl_type);
- const std::string assembly =
- ComputeBuiltinInputPreamble(GetParam().spirv_builtin,
- GetParam().spirv_store_type) +
- R"(
+ const auto wgsl_type = WgslType(GetParam().spirv_store_type);
+ const auto wgsl_builtin = GetParam().wgsl_builtin;
+ const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
+ const auto signed_wgsl_type = SignedWgslType(wgsl_type);
+ const std::string assembly =
+ ComputeBuiltinInputPreamble(GetParam().spirv_builtin, GetParam().spirv_store_type) +
+ R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%2 = OpLoad )" +
- GetParam().spirv_store_type + R"( %1
+ GetParam().spirv_store_type + R"( %1
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- std::string expected = R"(var<private> x_1 : ${wgsl_type};
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ std::string expected = R"(var<private> x_1 : ${wgsl_type};
fn main_1() {
let x_2 : ${wgsl_type} = x_1;
return;
}
-@stage(compute) @workgroup_size(1, 1, 1)
+@compute @workgroup_size(1i, 1i, 1i)
fn main(@builtin(${wgsl_builtin}) x_1_param : ${unsigned_wgsl_type}) {
x_1 = ${assignment_value};
main_1();
}
)";
- expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
- expected =
- utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
- expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
- expected =
- utils::ReplaceAll(expected, "${assignment_value}",
- (wgsl_type == unsigned_wgsl_type)
- ? "x_1_param"
- : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
+ expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
+ expected = utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
+ expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
+ expected = utils::ReplaceAll(expected, "${assignment_value}",
+ (wgsl_type == unsigned_wgsl_type)
+ ? "x_1_param"
+ : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_P(SpvModuleScopeVarParserTest_ComputeBuiltin, Load_CopyObject) {
- const auto wgsl_type = WgslType(GetParam().spirv_store_type);
- const auto wgsl_builtin = GetParam().wgsl_builtin;
- const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
- const auto signed_wgsl_type = SignedWgslType(wgsl_type);
- const std::string assembly =
- ComputeBuiltinInputPreamble(GetParam().spirv_builtin,
- GetParam().spirv_store_type) +
- R"(
+ const auto wgsl_type = WgslType(GetParam().spirv_store_type);
+ const auto wgsl_builtin = GetParam().wgsl_builtin;
+ const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
+ const auto signed_wgsl_type = SignedWgslType(wgsl_type);
+ const std::string assembly =
+ ComputeBuiltinInputPreamble(GetParam().spirv_builtin, GetParam().spirv_store_type) +
+ R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%13 = OpCopyObject %ptr_ty %1
%2 = OpLoad )" +
- GetParam().spirv_store_type + R"( %13
+ GetParam().spirv_store_type + R"( %13
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- std::string expected = R"(var<private> x_1 : ${wgsl_type};
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ std::string expected = R"(var<private> x_1 : ${wgsl_type};
fn main_1() {
let x_13 : ptr<private, ${wgsl_type}> = &(x_1);
@@ -3273,86 +3195,80 @@ fn main_1() {
return;
}
-@stage(compute) @workgroup_size(1, 1, 1)
+@compute @workgroup_size(1i, 1i, 1i)
fn main(@builtin(${wgsl_builtin}) x_1_param : ${unsigned_wgsl_type}) {
x_1 = ${assignment_value};
main_1();
}
)";
- expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
- expected =
- utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
- expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
- expected =
- utils::ReplaceAll(expected, "${assignment_value}",
- (wgsl_type == unsigned_wgsl_type)
- ? "x_1_param"
- : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
+ expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
+ expected = utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
+ expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
+ expected = utils::ReplaceAll(expected, "${assignment_value}",
+ (wgsl_type == unsigned_wgsl_type)
+ ? "x_1_param"
+ : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
- EXPECT_EQ(module_str, expected) << module_str;
+ EXPECT_EQ(module_str, expected) << module_str;
}
TEST_P(SpvModuleScopeVarParserTest_ComputeBuiltin, Load_AccessChain) {
- const auto wgsl_type = WgslType(GetParam().spirv_store_type);
- const auto wgsl_builtin = GetParam().wgsl_builtin;
- const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
- const auto signed_wgsl_type = SignedWgslType(wgsl_type);
- const std::string assembly =
- ComputeBuiltinInputPreamble(GetParam().spirv_builtin,
- GetParam().spirv_store_type) +
- R"(
+ const auto wgsl_type = WgslType(GetParam().spirv_store_type);
+ const auto wgsl_builtin = GetParam().wgsl_builtin;
+ const auto unsigned_wgsl_type = UnsignedWgslType(wgsl_type);
+ const auto signed_wgsl_type = SignedWgslType(wgsl_type);
+ const std::string assembly =
+ ComputeBuiltinInputPreamble(GetParam().spirv_builtin, GetParam().spirv_store_type) +
+ R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
%13 = OpAccessChain %ptr_ty %1
%2 = OpLoad )" +
- GetParam().spirv_store_type + R"( %13
+ GetParam().spirv_store_type + R"( %13
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto module_str = test::ToString(p->program());
- std::string expected = R"(var<private> x_1 : ${wgsl_type};
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto module_str = test::ToString(p->program());
+ std::string expected = R"(var<private> x_1 : ${wgsl_type};
fn main_1() {
let x_2 : ${wgsl_type} = x_1;
return;
}
-@stage(compute) @workgroup_size(1, 1, 1)
+@compute @workgroup_size(1i, 1i, 1i)
fn main(@builtin(${wgsl_builtin}) x_1_param : ${unsigned_wgsl_type}) {
x_1 = ${assignment_value};
main_1();
}
)";
- expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
- expected =
- utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
- expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
- expected =
- utils::ReplaceAll(expected, "${assignment_value}",
- (wgsl_type == unsigned_wgsl_type)
- ? "x_1_param"
- : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
-
- EXPECT_EQ(module_str, expected) << module_str;
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Samples,
- SpvModuleScopeVarParserTest_ComputeBuiltin,
- ::testing::ValuesIn(std::vector<ComputeBuiltinInputCase>{
- {"LocalInvocationIndex", "%uint", "local_invocation_index"},
- {"LocalInvocationIndex", "%int", "local_invocation_index"},
- {"LocalInvocationId", "%v3uint", "local_invocation_id"},
- {"LocalInvocationId", "%v3int", "local_invocation_id"},
- {"GlobalInvocationId", "%v3uint", "global_invocation_id"},
- {"GlobalInvocationId", "%v3int", "global_invocation_id"},
- {"WorkgroupId", "%v3uint", "workgroup_id"},
- {"WorkgroupId", "%v3int", "workgroup_id"}}));
+ expected = utils::ReplaceAll(expected, "${wgsl_type}", wgsl_type);
+ expected = utils::ReplaceAll(expected, "${unsigned_wgsl_type}", unsigned_wgsl_type);
+ expected = utils::ReplaceAll(expected, "${wgsl_builtin}", wgsl_builtin);
+ expected = utils::ReplaceAll(expected, "${assignment_value}",
+ (wgsl_type == unsigned_wgsl_type)
+ ? "x_1_param"
+ : "bitcast<" + signed_wgsl_type + ">(x_1_param)");
+
+ EXPECT_EQ(module_str, expected) << module_str;
+}
+
+INSTANTIATE_TEST_SUITE_P(Samples,
+ SpvModuleScopeVarParserTest_ComputeBuiltin,
+ ::testing::ValuesIn(std::vector<ComputeBuiltinInputCase>{
+ {"LocalInvocationIndex", "%uint", "local_invocation_index"},
+ {"LocalInvocationIndex", "%int", "local_invocation_index"},
+ {"LocalInvocationId", "%v3uint", "local_invocation_id"},
+ {"LocalInvocationId", "%v3int", "local_invocation_id"},
+ {"GlobalInvocationId", "%v3uint", "global_invocation_id"},
+ {"GlobalInvocationId", "%v3int", "global_invocation_id"},
+ {"WorkgroupId", "%v3uint", "workgroup_id"},
+ {"WorkgroupId", "%v3int", "workgroup_id"}}));
// TODO(dneto): crbug.com/tint/752
// NumWorkgroups support is blocked by crbug.com/tint/752
@@ -3361,8 +3277,8 @@ INSTANTIATE_TEST_SUITE_P(
// {"NumWorkgroups", "%int", "num_workgroups"}
TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
- const std::string assembly =
- R"(
+ const std::string assembly =
+ R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Fragment %1000 "w1000"
@@ -3383,7 +3299,7 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
OpDecorate %15 Location 5
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_uint = OpTypePointer Input %uint
%ptr_out_uint = OpTypePointer Output %uint
@@ -3411,8 +3327,8 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
%300 = OpFunction %void None %voidfn
%entry_300 = OpLabel
- %dummy_300_1 = OpFunctionCall %void %100
- %dummy_300_2 = OpFunctionCall %void %200
+ %placeholder_300_1 = OpFunctionCall %void %100
+ %placeholder_300_2 = OpFunctionCall %void %200
OpReturn
OpFunctionEnd
@@ -3425,103 +3341,100 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
; Call %100
%1100 = OpFunction %void None %voidfn
%entry_1100 = OpLabel
- %dummy_1100_1 = OpFunctionCall %void %100
+ %placeholder_1100_1 = OpFunctionCall %void %100
OpReturn
OpFunctionEnd
; Call %200
%1200 = OpFunction %void None %voidfn
%entry_1200 = OpLabel
- %dummy_1200_1 = OpFunctionCall %void %200
+ %placeholder_1200_1 = OpFunctionCall %void %200
OpReturn
OpFunctionEnd
; Call %300
%1300 = OpFunction %void None %voidfn
%entry_1300 = OpLabel
- %dummy_1300_1 = OpFunctionCall %void %300
+ %placeholder_1300_1 = OpFunctionCall %void %300
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(test::Assemble(assembly));
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto& info_1000 = p->GetEntryPointInfo(1000);
- EXPECT_EQ(1u, info_1000.size());
- EXPECT_TRUE(info_1000[0].inputs.empty());
- EXPECT_TRUE(info_1000[0].outputs.empty());
+ const auto& info_1000 = p->GetEntryPointInfo(1000);
+ EXPECT_EQ(1u, info_1000.size());
+ EXPECT_TRUE(info_1000[0].inputs.empty());
+ EXPECT_TRUE(info_1000[0].outputs.empty());
- const auto& info_1100 = p->GetEntryPointInfo(1100);
- EXPECT_EQ(1u, info_1100.size());
- EXPECT_THAT(info_1100[0].inputs, ElementsAre(1));
- EXPECT_TRUE(info_1100[0].outputs.empty());
+ const auto& info_1100 = p->GetEntryPointInfo(1100);
+ EXPECT_EQ(1u, info_1100.size());
+ EXPECT_THAT(info_1100[0].inputs, ElementsAre(1));
+ EXPECT_TRUE(info_1100[0].outputs.empty());
- const auto& info_1200 = p->GetEntryPointInfo(1200);
- EXPECT_EQ(1u, info_1200.size());
- EXPECT_THAT(info_1200[0].inputs, ElementsAre(2));
- EXPECT_THAT(info_1200[0].outputs, ElementsAre(15));
+ const auto& info_1200 = p->GetEntryPointInfo(1200);
+ EXPECT_EQ(1u, info_1200.size());
+ EXPECT_THAT(info_1200[0].inputs, ElementsAre(2));
+ EXPECT_THAT(info_1200[0].outputs, ElementsAre(15));
- const auto& info_1300 = p->GetEntryPointInfo(1300);
- EXPECT_EQ(1u, info_1300.size());
- EXPECT_THAT(info_1300[0].inputs, ElementsAre(1, 2));
- EXPECT_THAT(info_1300[0].outputs, ElementsAre(15));
+ const auto& info_1300 = p->GetEntryPointInfo(1300);
+ EXPECT_EQ(1u, info_1300.size());
+ EXPECT_THAT(info_1300[0].inputs, ElementsAre(1, 2));
+ EXPECT_THAT(info_1300[0].outputs, ElementsAre(15));
- // Validation incorrectly reports an overlap for the duplicated variable %1 on
- // shader %1300
- p->SkipDumpingPending(
- "https://github.com/KhronosGroup/SPIRV-Tools/issues/4403");
+ // Validation incorrectly reports an overlap for the duplicated variable %1 on
+ // shader %1300
+ p->SkipDumpingPending("https://github.com/KhronosGroup/SPIRV-Tools/issues/4403");
}
TEST_F(SpvModuleScopeVarParserTest, InputVarsConvertedToPrivate) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%ptr_in_uint = OpTypePointer Input %uint
%1 = OpVariable %ptr_in_uint Input
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = "var<private> x_1 : u32;";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : u32;";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvModuleScopeVarParserTest, OutputVarsConvertedToPrivate) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%ptr_out_uint = OpTypePointer Output %uint
%1 = OpVariable %ptr_out_uint Output
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = "var<private> x_1 : u32;";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : u32;";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- OutputVarsConvertedToPrivate_WithInitializer) {
- const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
+TEST_F(SpvModuleScopeVarParserTest, OutputVarsConvertedToPrivate_WithInitializer) {
+ const auto assembly = Preamble() + FragMain() + CommonTypes() + R"(
%ptr_out_uint = OpTypePointer Output %uint
%1 = OpVariable %ptr_out_uint Output %uint_1
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = "var<private> x_1 : u32 = 1u;";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : u32 = 1u;";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- Builtin_Output_Initializer_SameSignednessAsWGSL) {
- // Only outputs can have initializers.
- // WGSL sample_mask store type is u32.
- const auto assembly = Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest, Builtin_Output_Initializer_SameSignednessAsWGSL) {
+ // Only outputs can have initializers.
+ // WGSL sample_mask store type is u32.
+ const auto assembly = Preamble() + FragMain() + R"(
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() + R"(
%arr_ty = OpTypeArray %uint %uint_1
@@ -3529,21 +3442,19 @@ TEST_F(SpvModuleScopeVarParserTest,
%arr_init = OpConstantComposite %arr_ty %uint_2
%1 = OpVariable %ptr_ty Output %arr_init
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- "var<private> x_1 : array<u32, 1u> = array<u32, 1u>(2u);";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : array<u32, 1u> = array<u32, 1u>(2u);";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- Builtin_Output_Initializer_OppositeSignednessAsWGSL) {
- // Only outputs can have initializers.
- // WGSL sample_mask store type is u32. Use i32 in SPIR-V
- const auto assembly = Preamble() + FragMain() + R"(
+TEST_F(SpvModuleScopeVarParserTest, Builtin_Output_Initializer_OppositeSignednessAsWGSL) {
+ // Only outputs can have initializers.
+ // WGSL sample_mask store type is u32. Use i32 in SPIR-V
+ const auto assembly = Preamble() + FragMain() + R"(
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() + R"(
%arr_ty = OpTypeArray %int %uint_1
@@ -3551,52 +3462,51 @@ TEST_F(SpvModuleScopeVarParserTest,
%arr_init = OpConstantComposite %arr_ty %int_14
%1 = OpVariable %ptr_ty Output %arr_init
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- "var<private> x_1 : array<i32, 1u> = array<i32, 1u>(14);";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : array<i32, 1u> = array<i32, 1u>(14i);";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Builtin_Input_SameSignednessAsWGSL) {
- // WGSL vertex_index store type is u32.
- const auto assembly = Preamble() + FragMain() + R"(
+ // WGSL vertex_index store type is u32.
+ const auto assembly = Preamble() + FragMain() + R"(
OpDecorate %1 BuiltIn VertexIndex
)" + CommonTypes() + R"(
%ptr_ty = OpTypePointer Input %uint
%1 = OpVariable %ptr_ty Input
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = "var<private> x_1 : u32;";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : u32;";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Builtin_Input_OppositeSignednessAsWGSL) {
- // WGSL vertex_index store type is u32. Use i32 in SPIR-V.
- const auto assembly = Preamble() + FragMain() + R"(
+ // WGSL vertex_index store type is u32. Use i32 in SPIR-V.
+ const auto assembly = Preamble() + FragMain() + R"(
OpDecorate %1 BuiltIn VertexIndex
)" + CommonTypes() + R"(
%ptr_ty = OpTypePointer Input %int
%1 = OpVariable %ptr_ty Input
)" + MainBody();
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = "var<private> x_1 : i32;";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ ASSERT_TRUE(p->BuildAndParseInternalModuleExceptFunctions());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = "var<private> x_1 : i32;";
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_IOLocations) {
- const auto assembly = CommonCapabilities() + R"(
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1 %2 %3 %4
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 Location 0
@@ -3604,7 +3514,7 @@ TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_IOLocations) {
OpDecorate %3 Location 30
OpDecorate %4 Location 6
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_uint = OpTypePointer Input %uint
%ptr_out_uint = OpTypePointer Output %uint
%1 = OpVariable %ptr_in_uint Input
@@ -3617,13 +3527,13 @@ TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_IOLocations) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : u32;
var<private> x_2 : u32;
@@ -3642,7 +3552,7 @@ struct main_out {
x_4_1 : u32,
}
-@stage(fragment)
+@fragment
fn main(@location(0) @interpolate(flat) x_1_param : u32, @location(30) @interpolate(flat) x_3_param : u32) -> main_out {
x_1 = x_1_param;
x_3 = x_3_param;
@@ -3650,19 +3560,18 @@ fn main(@location(0) @interpolate(flat) x_1_param : u32, @location(30) @interpol
return main_out(x_2, x_4);
}
)";
- EXPECT_THAT(got, HasSubstr(expected)) << got;
+ EXPECT_THAT(got, HasSubstr(expected)) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_BuiltinVar_Input_SameSignedness) {
- // instance_index is u32 in WGSL. Use uint in SPIR-V.
- // No bitcasts are used for parameter formation or return value.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_BuiltinVar_Input_SameSignedness) {
+ // instance_index is u32 in WGSL. Use uint in SPIR-V.
+ // No bitcasts are used for parameter formation or return value.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Vertex %main "main" %1 %position
OpDecorate %position BuiltIn Position
OpDecorate %1 BuiltIn InstanceIndex
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_uint = OpTypePointer Input %uint
%1 = OpVariable %ptr_in_uint Input
%posty = OpTypePointer Output %v4float
@@ -3675,12 +3584,12 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : u32;
var<private> x_4 : vec4<f32>;
@@ -3694,25 +3603,24 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = x_1_param;
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_BuiltinVar_Input_OppositeSignedness) {
- // instance_index is u32 in WGSL. Use int in SPIR-V.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_BuiltinVar_Input_OppositeSignedness) {
+ // instance_index is u32 in WGSL. Use int in SPIR-V.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Vertex %main "main" %position %1
OpDecorate %position BuiltIn Position
OpDecorate %1 BuiltIn InstanceIndex
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_int = OpTypePointer Input %int
%1 = OpVariable %ptr_in_int Input
%posty = OpTypePointer Output %v4float
@@ -3725,12 +3633,12 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : i32;
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : i32;
var<private> x_4 : vec4<f32>;
@@ -3744,27 +3652,26 @@ struct main_out {
x_4_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) x_1_param : u32) -> main_out {
x_1 = bitcast<i32>(x_1_param);
main_1();
return main_out(x_4);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
// SampleMask is an array in Vulkan SPIR-V, but a scalar in WGSL.
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_BuiltinVar_SampleMask_In_Unsigned) {
- // SampleMask is u32 in WGSL.
- // Use unsigned array element in Vulkan.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_BuiltinVar_SampleMask_In_Unsigned) {
+ // SampleMask is u32 in WGSL.
+ // Use unsigned array element in Vulkan.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() +
- R"(
+ R"(
%arr = OpTypeArray %uint %uint_1
%ptr_ty = OpTypePointer Input %arr
%1 = OpVariable %ptr_ty Input
@@ -3774,36 +3681,35 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<u32, 1u>;
fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = x_1_param;
+ x_1[0i] = x_1_param;
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_BuiltinVar_SampleMask_In_Signed) {
- // SampleMask is u32 in WGSL.
- // Use signed array element in Vulkan.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_BuiltinVar_SampleMask_In_Signed) {
+ // SampleMask is u32 in WGSL.
+ // Use signed array element in Vulkan.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() +
- R"(
+ R"(
%arr = OpTypeArray %int %uint_1
%ptr_ty = OpTypePointer Input %arr
%1 = OpVariable %ptr_ty Input
@@ -3813,36 +3719,36 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<i32, 1u>;
fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@builtin(sample_mask) x_1_param : u32) {
- x_1[0] = bitcast<i32>(x_1_param);
+ x_1[0i] = bitcast<i32>(x_1_param);
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest,
EntryPointWrapping_BuiltinVar_SampleMask_Out_Unsigned_Initializer) {
- // SampleMask is u32 in WGSL.
- // Use unsigned array element in Vulkan.
- const auto assembly = CommonCapabilities() + R"(
+ // SampleMask is u32 in WGSL.
+ // Use unsigned array element in Vulkan.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() +
- R"(
+ R"(
%arr = OpTypeArray %uint %uint_1
%ptr_ty = OpTypePointer Output %arr
%zero = OpConstantNull %arr
@@ -3853,13 +3759,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : array<u32, 1u> = array<u32, 1u>();
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : array<u32, 1u> = array<u32, 1u>();
fn main_1() {
return;
@@ -3870,25 +3776,25 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(x_1[0]);
+ return main_out(x_1[0i]);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest,
EntryPointWrapping_BuiltinVar_SampleMask_Out_Signed_Initializer) {
- // SampleMask is u32 in WGSL.
- // Use signed array element in Vulkan.
- const auto assembly = CommonCapabilities() + R"(
+ // SampleMask is u32 in WGSL.
+ // Use signed array element in Vulkan.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn SampleMask
)" + CommonTypes() +
- R"(
+ R"(
%arr = OpTypeArray %int %uint_1
%ptr_ty = OpTypePointer Output %arr
%zero = OpConstantNull %arr
@@ -3899,13 +3805,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : array<i32, 1u> = array<i32, 1u>();
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : array<i32, 1u> = array<i32, 1u>();
fn main_1() {
return;
@@ -3916,26 +3822,25 @@ struct main_out {
x_1_1 : u32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
- return main_out(bitcast<u32>(x_1[0]));
+ return main_out(bitcast<u32>(x_1[0i]));
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_BuiltinVar_FragDepth_Out_Initializer) {
- // FragDepth does not require conversion, because it's f32.
- // The member of the return type is just the identifier corresponding
- // to the module-scope private variable.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_BuiltinVar_FragDepth_Out_Initializer) {
+ // FragDepth does not require conversion, because it's f32.
+ // The member of the return type is just the identifier corresponding
+ // to the module-scope private variable.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 BuiltIn FragDepth
)" + CommonTypes() +
- R"(
+ R"(
%ptr_ty = OpTypePointer Output %float
%1 = OpVariable %ptr_ty Output %float_0
@@ -3944,12 +3849,12 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : f32 = 0.0;
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : f32 = 0.0f;
fn main_1() {
return;
@@ -3960,30 +3865,30 @@ struct main_out {
x_1_1 : f32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
return main_out(x_1);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_BuiltIn_Position) {
- // In Vulkan SPIR-V, Position is the first member of gl_PerVertex
- const std::string assembly = PerVertexPreamble() + R"(
+ // In Vulkan SPIR-V, Position is the first member of gl_PerVertex
+ const std::string assembly = PerVertexPreamble() + R"(
%main = OpFunction %void None %voidfn
%entry = OpLabel
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> gl_Position : vec4<f32>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> gl_Position : vec4<f32>;
fn main_1() {
return;
@@ -3994,18 +3899,17 @@ struct main_out {
gl_Position : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(gl_Position);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- BuiltinPosition_BuiltIn_Position_Initializer) {
- const std::string assembly = R"(
+TEST_F(SpvModuleScopeVarParserTest, BuiltinPosition_BuiltIn_Position_Initializer) {
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1
@@ -4046,14 +3950,14 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> gl_Position : vec4<f32> = vec4<f32>(1.0, 2.0, 3.0, 4.0);
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> gl_Position : vec4<f32> = vec4<f32>(1.0f, 2.0f, 3.0f, 4.0f);
fn main_1() {
return;
@@ -4064,17 +3968,17 @@ struct main_out {
gl_Position : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(gl_Position);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Input_FlattenArray_OneLevel) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4102,13 +4006,13 @@ TEST_F(SpvModuleScopeVarParserTest, Input_FlattenArray_OneLevel) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<f32, 3u>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<f32, 3u>;
var<private> x_2 : vec4<f32>;
@@ -4121,20 +4025,20 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(4) x_1_param : f32, @location(5) x_1_param_1 : f32, @location(6) x_1_param_2 : f32) -> main_out {
- x_1[0] = x_1_param;
- x_1[1] = x_1_param_1;
- x_1[2] = x_1_param_2;
+ x_1[0i] = x_1_param;
+ x_1[1i] = x_1_param_1;
+ x_1[2i] = x_1_param_2;
main_1();
return main_out(x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Input_FlattenMatrix) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4160,13 +4064,13 @@ TEST_F(SpvModuleScopeVarParserTest, Input_FlattenMatrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : mat2x4<f32>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : mat2x4<f32>;
var<private> x_2 : vec4<f32>;
@@ -4179,19 +4083,19 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(9) x_1_param : vec4<f32>, @location(10) x_1_param_1 : vec4<f32>) -> main_out {
- x_1[0] = x_1_param;
- x_1[1] = x_1_param_1;
+ x_1[0i] = x_1_param;
+ x_1[1i] = x_1_param_1;
main_1();
return main_out(x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Input_FlattenStruct_LocOnVariable) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4222,13 +4126,13 @@ TEST_F(SpvModuleScopeVarParserTest, Input_FlattenStruct_LocOnVariable) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(struct Communicators {
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(struct Communicators {
alice : f32,
bob : vec4<f32>,
}
@@ -4246,7 +4150,7 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(9) x_1_param : f32, @location(10) x_1_param_1 : vec4<f32>) -> main_out {
x_1.alice = x_1_param;
x_1.bob = x_1_param_1;
@@ -4254,11 +4158,11 @@ fn main(@location(9) x_1_param : f32, @location(10) x_1_param_1 : vec4<f32>) ->
return main_out(x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Input_FlattenNested) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4286,13 +4190,13 @@ TEST_F(SpvModuleScopeVarParserTest, Input_FlattenNested) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<mat2x4<f32>, 2u>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<mat2x4<f32>, 2u>;
var<private> x_2 : vec4<f32>;
@@ -4305,21 +4209,21 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(7) x_1_param : vec4<f32>, @location(8) x_1_param_1 : vec4<f32>, @location(9) x_1_param_2 : vec4<f32>, @location(10) x_1_param_3 : vec4<f32>) -> main_out {
- x_1[0][0] = x_1_param;
- x_1[0][1] = x_1_param_1;
- x_1[1][0] = x_1_param_2;
- x_1[1][1] = x_1_param_3;
+ x_1[0i][0i] = x_1_param;
+ x_1[0i][1i] = x_1_param_1;
+ x_1[1i][0i] = x_1_param_2;
+ x_1[1i][1i] = x_1_param_3;
main_1();
return main_out(x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Output_FlattenArray_OneLevel) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4347,13 +4251,13 @@ TEST_F(SpvModuleScopeVarParserTest, Output_FlattenArray_OneLevel) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : array<f32, 3u>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : array<f32, 3u>;
var<private> x_2 : vec4<f32>;
@@ -4372,17 +4276,17 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
- return main_out(x_1[0], x_1[1], x_1[2], x_2);
+ return main_out(x_1[0i], x_1[1i], x_1[2i], x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Output_FlattenMatrix) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4408,13 +4312,13 @@ TEST_F(SpvModuleScopeVarParserTest, Output_FlattenMatrix) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(var<private> x_1 : mat2x4<f32>;
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(var<private> x_1 : mat2x4<f32>;
var<private> x_2 : vec4<f32>;
@@ -4431,17 +4335,17 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
- return main_out(x_1[0], x_1[1], x_2);
+ return main_out(x_1[0i], x_1[1i], x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, Output_FlattenStruct_LocOnVariable) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2
@@ -4472,13 +4376,13 @@ TEST_F(SpvModuleScopeVarParserTest, Output_FlattenStruct_LocOnVariable) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(struct Communicators {
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(struct Communicators {
alice : f32,
bob : vec4<f32>,
}
@@ -4500,18 +4404,18 @@ struct main_out {
x_2_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_1.alice, x_1.bob, x_2);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest, FlattenStruct_LocOnMembers) {
- // Block-decorated struct may have its members decorated with Location.
- const std::string assembly = R"(
+ // Block-decorated struct may have its members decorated with Location.
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %main "main" %1 %2 %3
@@ -4545,13 +4449,13 @@ TEST_F(SpvModuleScopeVarParserTest, FlattenStruct_LocOnMembers) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->Parse()) << p->error() << assembly;
- EXPECT_TRUE(p->error().empty());
+ ASSERT_TRUE(p->Parse()) << p->error() << assembly;
+ EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected = R"(struct Communicators {
+ const auto got = test::ToString(p->program());
+ const std::string expected = R"(struct Communicators {
alice : f32,
bob : vec4<f32>,
}
@@ -4575,7 +4479,7 @@ struct main_out {
x_3_2 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(9) x_1_param : f32, @location(11) x_1_param_1 : vec4<f32>) -> main_out {
x_1.alice = x_1_param;
x_1.bob = x_1_param_1;
@@ -4583,12 +4487,11 @@ fn main(@location(9) x_1_param : f32, @location(11) x_1_param_1 : vec4<f32>) ->
return main_out(x_2, x_3.alice, x_3.bob);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Flat_Vertex_In) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Flat_Vertex_In) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Vertex %main "main" %1 %2 %3 %4 %5 %6 %10
OpDecorate %1 Location 1
OpDecorate %2 Location 2
@@ -4604,7 +4507,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %6 Flat
OpDecorate %10 BuiltIn Position
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_uint = OpTypePointer Input %uint
%ptr_in_v2uint = OpTypePointer Input %v2uint
%ptr_in_int = OpTypePointer Input %int
@@ -4626,13 +4529,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : u32;
var<private> x_2 : vec2<u32>;
@@ -4655,7 +4558,7 @@ struct main_out {
x_10_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main(@location(1) @interpolate(flat) x_1_param : u32, @location(2) @interpolate(flat) x_2_param : vec2<u32>, @location(3) @interpolate(flat) x_3_param : i32, @location(4) @interpolate(flat) x_4_param : vec2<i32>, @location(5) @interpolate(flat) x_5_param : f32, @location(6) @interpolate(flat) x_6_param : vec2<f32>) -> main_out {
x_1 = x_1_param;
x_2 = x_2_param;
@@ -4667,12 +4570,11 @@ fn main(@location(1) @interpolate(flat) x_1_param : u32, @location(2) @interpola
return main_out(x_10);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Flat_Vertex_Output) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Flat_Vertex_Output) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Vertex %main "main" %1 %2 %3 %4 %5 %6 %10
OpDecorate %1 Location 1
OpDecorate %2 Location 2
@@ -4688,7 +4590,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %6 Flat
OpDecorate %10 BuiltIn Position
)" + CommonTypes() +
- R"(
+ R"(
%ptr_out_uint = OpTypePointer Output %uint
%ptr_out_v2uint = OpTypePointer Output %v2uint
%ptr_out_int = OpTypePointer Output %int
@@ -4710,13 +4612,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : u32;
var<private> x_2 : vec2<u32>;
@@ -4751,18 +4653,17 @@ struct main_out {
x_10_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_1, x_2, x_3, x_4, x_5, x_6, x_10);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Flatten_Interpolation_Flat_Fragment_In) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Flatten_Interpolation_Flat_Fragment_In) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1 %2
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 Location 1
@@ -4770,7 +4671,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %1 Flat
OpDecorate %2 Flat
)" + CommonTypes() +
- R"(
+ R"(
%arr = OpTypeArray %float %uint_2
%strct = OpTypeStruct %float %float
%ptr_in_arr = OpTypePointer Input %arr
@@ -4783,13 +4684,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(struct S {
field0 : f32,
field1 : f32,
}
@@ -4802,21 +4703,20 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@location(1) @interpolate(flat) x_1_param : f32, @location(2) @interpolate(flat) x_1_param_1 : f32, @location(5) @interpolate(flat) x_2_param : f32, @location(6) @interpolate(flat) x_2_param_1 : f32) {
- x_1[0] = x_1_param;
- x_1[1] = x_1_param_1;
+ x_1[0i] = x_1_param;
+ x_1[1i] = x_1_param_1;
x_2.field0 = x_2_param;
x_2.field1 = x_2_param_1;
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Floating_Fragment_In) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Floating_Fragment_In) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1 %2 %3 %4 %5 %6
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 Location 1
@@ -4841,7 +4741,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %6 Sample
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_float = OpTypePointer Input %float
%1 = OpVariable %ptr_in_float Input
%2 = OpVariable %ptr_in_float Input
@@ -4855,13 +4755,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : f32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : f32;
var<private> x_2 : f32;
@@ -4877,7 +4777,7 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@location(1) x_1_param : f32, @location(2) @interpolate(perspective, centroid) x_2_param : f32, @location(3) @interpolate(perspective, sample) x_3_param : f32, @location(4) @interpolate(linear) x_4_param : f32, @location(5) @interpolate(linear, centroid) x_5_param : f32, @location(6) @interpolate(linear, sample) x_6_param : f32) {
x_1 = x_1_param;
x_2 = x_2_param;
@@ -4888,12 +4788,11 @@ fn main(@location(1) x_1_param : f32, @location(2) @interpolate(perspective, cen
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Flatten_Interpolation_Floating_Fragment_In) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Flatten_Interpolation_Floating_Fragment_In) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 Location 1
@@ -4913,7 +4812,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpMemberDecorate %10 5 Sample
)" + CommonTypes() +
- R"(
+ R"(
%10 = OpTypeStruct %float %float %float %float %float %float
%ptr_in_strct = OpTypePointer Input %10
@@ -4924,13 +4823,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModule()) << assembly << p->error();
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(struct S {
field0 : f32,
field1 : f32,
field2 : f32,
@@ -4945,7 +4844,7 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@location(1) x_1_param : f32, @location(2) @interpolate(perspective, centroid) x_1_param_1 : f32, @location(3) @interpolate(perspective, sample) x_1_param_2 : f32, @location(4) @interpolate(linear) x_1_param_3 : f32, @location(5) @interpolate(linear, centroid) x_1_param_4 : f32, @location(6) @interpolate(linear, sample) x_1_param_5 : f32) {
x_1.field0 = x_1_param;
x_1.field1 = x_1_param_1;
@@ -4956,12 +4855,11 @@ fn main(@location(1) x_1_param : f32, @location(2) @interpolate(perspective, cen
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Floating_Fragment_Out) {
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Floating_Fragment_Out) {
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1 %2 %3 %4 %5 %6
OpExecutionMode %main OriginUpperLeft
OpDecorate %1 Location 1
@@ -4986,7 +4884,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %6 Sample
)" + CommonTypes() +
- R"(
+ R"(
%ptr_out_float = OpTypePointer Output %float
%1 = OpVariable %ptr_out_float Output
%2 = OpVariable %ptr_out_float Output
@@ -5000,13 +4898,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : f32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : f32;
var<private> x_2 : f32;
@@ -5037,18 +4935,18 @@ struct main_out {
x_6_1 : f32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
return main_out(x_1, x_2, x_3, x_4, x_5, x_6);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
TEST_F(SpvModuleScopeVarParserTest,
EntryPointWrapping_Flatten_Interpolation_Floating_Fragment_Out) {
- const auto assembly = CommonCapabilities() + R"(
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1
OpExecutionMode %main OriginUpperLeft
@@ -5069,7 +4967,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpMemberDecorate %10 5 Sample
)" + CommonTypes() +
- R"(
+ R"(
%10 = OpTypeStruct %float %float %float %float %float %float
%ptr_in_strct = OpTypePointer Output %10
@@ -5080,13 +4978,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(struct S {
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(struct S {
field0 : f32,
field1 : f32,
field2 : f32,
@@ -5116,21 +5014,20 @@ struct main_out {
x_1_6 : f32,
}
-@stage(fragment)
+@fragment
fn main() -> main_out {
main_1();
return main_out(x_1.field0, x_1.field1, x_1.field2, x_1.field3, x_1.field4, x_1.field5);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Default_Vertex_Output) {
- // Integral types default to @interpolate(flat).
- // Floating types default to @interpolate(perspective, center), which is the
- // same as WGSL and therefore dropped.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Default_Vertex_Output) {
+ // Integral types default to @interpolate(flat).
+ // Floating types default to @interpolate(perspective, center), which is the
+ // same as WGSL and therefore dropped.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Vertex %main "main" %1 %2 %3 %4 %5 %6 %10
OpDecorate %1 Location 1
OpDecorate %2 Location 2
@@ -5140,7 +5037,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %6 Location 6
OpDecorate %10 BuiltIn Position
)" + CommonTypes() +
- R"(
+ R"(
%ptr_out_uint = OpTypePointer Output %uint
%ptr_out_v2uint = OpTypePointer Output %v2uint
%ptr_out_int = OpTypePointer Output %int
@@ -5162,13 +5059,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : u32;
var<private> x_2 : vec2<u32>;
@@ -5203,21 +5100,20 @@ struct main_out {
x_10_1 : vec4<f32>,
}
-@stage(vertex)
+@vertex
fn main() -> main_out {
main_1();
return main_out(x_1, x_2, x_3, x_4, x_5, x_6, x_10);
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
-TEST_F(SpvModuleScopeVarParserTest,
- EntryPointWrapping_Interpolation_Default_Fragment_In) {
- // Integral types default to @interpolate(flat).
- // Floating types default to @interpolate(perspective, center), which is the
- // same as WGSL and therefore dropped.
- const auto assembly = CommonCapabilities() + R"(
+TEST_F(SpvModuleScopeVarParserTest, EntryPointWrapping_Interpolation_Default_Fragment_In) {
+ // Integral types default to @interpolate(flat).
+ // Floating types default to @interpolate(perspective, center), which is the
+ // same as WGSL and therefore dropped.
+ const auto assembly = CommonCapabilities() + R"(
OpEntryPoint Fragment %main "main" %1 %2 %3 %4 %5 %6
OpDecorate %1 Location 1
OpDecorate %2 Location 2
@@ -5226,7 +5122,7 @@ TEST_F(SpvModuleScopeVarParserTest,
OpDecorate %5 Location 5
OpDecorate %6 Location 6
)" + CommonTypes() +
- R"(
+ R"(
%ptr_in_uint = OpTypePointer Input %uint
%ptr_in_v2uint = OpTypePointer Input %v2uint
%ptr_in_int = OpTypePointer Input %int
@@ -5245,13 +5141,13 @@ TEST_F(SpvModuleScopeVarParserTest,
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
+ auto p = parser(test::Assemble(assembly));
- ASSERT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_TRUE(p->error().empty());
- const auto got = test::ToString(p->program());
- const std::string expected =
- R"(var<private> x_1 : u32;
+ ASSERT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_TRUE(p->error().empty());
+ const auto got = test::ToString(p->program());
+ const std::string expected =
+ R"(var<private> x_1 : u32;
var<private> x_2 : vec2<u32>;
@@ -5267,7 +5163,7 @@ fn main_1() {
return;
}
-@stage(fragment)
+@fragment
fn main(@location(1) @interpolate(flat) x_1_param : u32, @location(2) @interpolate(flat) x_2_param : vec2<u32>, @location(3) @interpolate(flat) x_3_param : i32, @location(4) @interpolate(flat) x_4_param : vec2<i32>, @location(5) x_5_param : f32, @location(6) x_6_param : vec2<f32>) {
x_1 = x_1_param;
x_2 = x_2_param;
@@ -5278,7 +5174,7 @@ fn main(@location(1) @interpolate(flat) x_1_param : u32, @location(2) @interpola
main_1();
}
)";
- EXPECT_EQ(got, expected) << got;
+ EXPECT_EQ(got, expected) << got;
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_named_types_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_named_types_test.cc
index 53366e64a29..3fb07a4e8a3 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_named_types_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_named_types_test.cc
@@ -22,36 +22,36 @@ namespace {
using ::testing::HasSubstr;
TEST_F(SpvParserTest, NamedTypes_AnonStruct) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
%uint = OpTypeInt 32 0
%s = OpTypeStruct %uint %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()), HasSubstr("struct S"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr("struct S"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_NamedStruct) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %s "mystruct"
%uint = OpTypeInt 32 0
%s = OpTypeStruct %uint %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()), HasSubstr("struct mystruct"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr("struct mystruct"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_Dup_EmitBoth) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
%uint = OpTypeInt 32 0
%s = OpTypeStruct %uint %uint
%s2 = OpTypeStruct %uint %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
- EXPECT_THAT(test::ToString(p->program()), HasSubstr(R"(struct S {
+ EXPECT_TRUE(p->BuildAndParseInternalModule()) << p->error();
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr(R"(struct S {
field0 : u32,
field1 : u32,
}
@@ -61,60 +61,57 @@ struct S_1 {
field1 : u32,
})"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
// TODO(dneto): Should we make an alias for an un-decoratrd array with
// an OpName?
TEST_F(SpvParserTest, NamedTypes_AnonRTArrayWithDecoration) {
- // Runtime arrays are always in SSBO, and those are always laid out.
- auto p = parser(test::Assemble(R"(
+ // Runtime arrays are always in SSBO, and those are always laid out.
+ auto p = parser(test::Assemble(R"(
OpDecorate %arr ArrayStride 8
%uint = OpTypeInt 32 0
%arr = OpTypeRuntimeArray %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()),
- HasSubstr("RTArr = @stride(8) array<u32>;\n"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr("RTArr = @stride(8) array<u32>;\n"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_AnonRTArray_Dup_EmitBoth) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpDecorate %arr ArrayStride 8
OpDecorate %arr2 ArrayStride 8
%uint = OpTypeInt 32 0
%arr = OpTypeRuntimeArray %uint
%arr2 = OpTypeRuntimeArray %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()),
- HasSubstr(R"(type RTArr = @stride(8) array<u32>;
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr(R"(type RTArr = @stride(8) array<u32>;
type RTArr_1 = @stride(8) array<u32>;
)"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_NamedRTArray) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %arr "myrtarr"
OpDecorate %arr ArrayStride 8
%uint = OpTypeInt 32 0
%arr = OpTypeRuntimeArray %uint
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()),
- HasSubstr("myrtarr = @stride(8) array<u32>;\n"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr("myrtarr = @stride(8) array<u32>;\n"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_NamedArray) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %arr "myarr"
OpDecorate %arr ArrayStride 8
%uint = OpTypeInt 32 0
@@ -122,15 +119,14 @@ TEST_F(SpvParserTest, NamedTypes_NamedArray) {
%arr = OpTypeArray %uint %uint_5
%arr2 = OpTypeArray %uint %uint_5
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()),
- HasSubstr("myarr = @stride(8) array<u32, 5u>;"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr("myarr = @stride(8) array<u32, 5u>;"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserTest, NamedTypes_AnonArray_Dup_EmitBoth) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpDecorate %arr ArrayStride 8
OpDecorate %arr2 ArrayStride 8
%uint = OpTypeInt 32 0
@@ -138,14 +134,13 @@ TEST_F(SpvParserTest, NamedTypes_AnonArray_Dup_EmitBoth) {
%arr = OpTypeArray %uint %uint_5
%arr2 = OpTypeArray %uint %uint_5
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(test::ToString(p->program()),
- HasSubstr(R"(type Arr = @stride(8) array<u32, 5u>;
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(test::ToString(p->program()), HasSubstr(R"(type Arr = @stride(8) array<u32, 5u>;
type Arr_1 = @stride(8) array<u32, 5u>;
)"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
// TODO(dneto): Handle arrays sized by a spec constant.
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test.cc
index 237bb67a3b6..42c0098ad01 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test.cc
@@ -22,24 +22,22 @@ namespace {
using ::testing::HasSubstr;
TEST_F(SpvParserTest, Impl_Uint32VecEmpty) {
- std::vector<uint32_t> data;
- auto p = parser(data);
- EXPECT_FALSE(p->Parse());
- // TODO(dneto): What message?
+ std::vector<uint32_t> data;
+ auto p = parser(data);
+ EXPECT_FALSE(p->Parse());
+ // TODO(dneto): What message?
}
TEST_F(SpvParserTest, Impl_InvalidModuleFails) {
- auto invalid_spv = test::Assemble("%ty = OpTypeInt 3 0");
- auto p = parser(invalid_spv);
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(
- p->error(),
- HasSubstr("TypeInt cannot appear before the memory model instruction"));
- EXPECT_THAT(p->error(), HasSubstr("OpTypeInt 3 0"));
+ auto invalid_spv = test::Assemble("%ty = OpTypeInt 3 0");
+ auto p = parser(invalid_spv);
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("TypeInt cannot appear before the memory model instruction"));
+ EXPECT_THAT(p->error(), HasSubstr("OpTypeInt 3 0"));
}
TEST_F(SpvParserTest, Impl_GenericVulkanShader_SimpleMemoryModel) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %main "main"
@@ -51,13 +49,13 @@ TEST_F(SpvParserTest, Impl_GenericVulkanShader_SimpleMemoryModel) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, Impl_GenericVulkanShader_GLSL450MemoryModel) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %main "main"
@@ -69,13 +67,13 @@ TEST_F(SpvParserTest, Impl_GenericVulkanShader_GLSL450MemoryModel) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, Impl_GenericVulkanShader_VulkanMemoryModel) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpCapability VulkanMemoryModelKHR
OpExtension "SPV_KHR_vulkan_memory_model"
@@ -89,13 +87,13 @@ TEST_F(SpvParserTest, Impl_GenericVulkanShader_VulkanMemoryModel) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
}
TEST_F(SpvParserTest, Impl_OpenCLKernel_Fails) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Kernel
OpCapability Addresses
OpMemoryModel Physical32 OpenCL
@@ -107,13 +105,13 @@ TEST_F(SpvParserTest, Impl_OpenCLKernel_Fails) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_FALSE(p->Parse());
- EXPECT_THAT(p->error(), HasSubstr("Capability Kernel is not allowed"));
+ auto p = parser(spv);
+ EXPECT_FALSE(p->Parse());
+ EXPECT_THAT(p->error(), HasSubstr("Capability Kernel is not allowed"));
}
TEST_F(SpvParserTest, Impl_Source_NoOpLine) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %main "main"
@@ -127,23 +125,23 @@ TEST_F(SpvParserTest, Impl_Source_NoOpLine) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
- // Use instruction counting.
- auto s5 = p->GetSourceForResultIdForTest(5);
- EXPECT_EQ(7u, s5.range.begin.line);
- EXPECT_EQ(0u, s5.range.begin.column);
- auto s60 = p->GetSourceForResultIdForTest(60);
- EXPECT_EQ(8u, s60.range.begin.line);
- EXPECT_EQ(0u, s60.range.begin.column);
- auto s1 = p->GetSourceForResultIdForTest(1);
- EXPECT_EQ(10u, s1.range.begin.line);
- EXPECT_EQ(0u, s1.range.begin.column);
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
+ // Use instruction counting.
+ auto s5 = p->GetSourceForResultIdForTest(5);
+ EXPECT_EQ(7u, s5.range.begin.line);
+ EXPECT_EQ(0u, s5.range.begin.column);
+ auto s60 = p->GetSourceForResultIdForTest(60);
+ EXPECT_EQ(8u, s60.range.begin.line);
+ EXPECT_EQ(0u, s60.range.begin.column);
+ auto s1 = p->GetSourceForResultIdForTest(1);
+ EXPECT_EQ(10u, s1.range.begin.line);
+ EXPECT_EQ(0u, s1.range.begin.column);
}
TEST_F(SpvParserTest, Impl_Source_WithOpLine_WithOpNoLine) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %main "main"
@@ -160,24 +158,24 @@ TEST_F(SpvParserTest, Impl_Source_WithOpLine_WithOpNoLine) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
- // Use the information from the OpLine that is still in scope.
- auto s5 = p->GetSourceForResultIdForTest(5);
- EXPECT_EQ(42u, s5.range.begin.line);
- EXPECT_EQ(53u, s5.range.begin.column);
- auto s60 = p->GetSourceForResultIdForTest(60);
- EXPECT_EQ(42u, s60.range.begin.line);
- EXPECT_EQ(53u, s60.range.begin.column);
- // After OpNoLine, revert back to instruction counting.
- auto s1 = p->GetSourceForResultIdForTest(1);
- EXPECT_EQ(14u, s1.range.begin.line);
- EXPECT_EQ(0u, s1.range.begin.column);
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
+ // Use the information from the OpLine that is still in scope.
+ auto s5 = p->GetSourceForResultIdForTest(5);
+ EXPECT_EQ(42u, s5.range.begin.line);
+ EXPECT_EQ(53u, s5.range.begin.column);
+ auto s60 = p->GetSourceForResultIdForTest(60);
+ EXPECT_EQ(42u, s60.range.begin.line);
+ EXPECT_EQ(53u, s60.range.begin.column);
+ // After OpNoLine, revert back to instruction counting.
+ auto s1 = p->GetSourceForResultIdForTest(1);
+ EXPECT_EQ(14u, s1.range.begin.line);
+ EXPECT_EQ(0u, s1.range.begin.column);
}
TEST_F(SpvParserTest, Impl_Source_InvalidId) {
- auto spv = test::Assemble(R"(
+ auto spv = test::Assemble(R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint GLCompute %main "main"
@@ -190,34 +188,33 @@ TEST_F(SpvParserTest, Impl_Source_InvalidId) {
OpReturn
OpFunctionEnd
)");
- auto p = parser(spv);
- EXPECT_TRUE(p->Parse());
- EXPECT_TRUE(p->error().empty());
- auto s99 = p->GetSourceForResultIdForTest(99);
- EXPECT_EQ(0u, s99.range.begin.line);
- EXPECT_EQ(0u, s99.range.begin.column);
+ auto p = parser(spv);
+ EXPECT_TRUE(p->Parse());
+ EXPECT_TRUE(p->error().empty());
+ auto s99 = p->GetSourceForResultIdForTest(99);
+ EXPECT_EQ(0u, s99.range.begin.line);
+ EXPECT_EQ(0u, s99.range.begin.column);
}
TEST_F(SpvParserTest, Impl_IsValidIdentifier) {
- EXPECT_FALSE(ParserImpl::IsValidIdentifier("")); // empty
- EXPECT_FALSE(ParserImpl::IsValidIdentifier("_"));
- EXPECT_FALSE(ParserImpl::IsValidIdentifier("__"));
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("_x"));
- EXPECT_FALSE(
- ParserImpl::IsValidIdentifier("9")); // leading digit, but ok later
- EXPECT_FALSE(ParserImpl::IsValidIdentifier(" ")); // leading space
- EXPECT_FALSE(ParserImpl::IsValidIdentifier("a ")); // trailing space
- EXPECT_FALSE(ParserImpl::IsValidIdentifier("a 1")); // space in the middle
- EXPECT_FALSE(ParserImpl::IsValidIdentifier(".")); // weird character
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("")); // empty
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("_"));
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("__"));
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("_x"));
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("9")); // leading digit, but ok later
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier(" ")); // leading space
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("a ")); // trailing space
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier("a 1")); // space in the middle
+ EXPECT_FALSE(ParserImpl::IsValidIdentifier(".")); // weird character
- // a simple identifier
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("A"));
- // each upper case letter
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
- // each lower case letter
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("abcdefghijklmnopqrstuvwxyz"));
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("a0123456789")); // each digit
- EXPECT_TRUE(ParserImpl::IsValidIdentifier("x_")); // has underscore
+ // a simple identifier
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("A"));
+ // each upper case letter
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("ABCDEFGHIJKLMNOPQRSTUVWXYZ"));
+ // each lower case letter
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("abcdefghijklmnopqrstuvwxyz"));
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("a0123456789")); // each digit
+ EXPECT_TRUE(ParserImpl::IsValidIdentifier("x_")); // has underscore
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.cc
index bc62973fdc5..1e52d815ace 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.cc
@@ -20,60 +20,57 @@ namespace tint::reader::spirv::test {
// Default to not dumping the SPIR-V assembly.
bool ParserImplWrapperForTest::dump_successfully_converted_spirv_ = false;
-ParserImplWrapperForTest::ParserImplWrapperForTest(
- const std::vector<uint32_t>& input)
+ParserImplWrapperForTest::ParserImplWrapperForTest(const std::vector<uint32_t>& input)
: impl_(input) {}
ParserImplWrapperForTest::~ParserImplWrapperForTest() {
- if (dump_successfully_converted_spirv_ && !skip_dumping_spirv_ &&
- !impl_.spv_binary().empty() && impl_.success()) {
- std::string disassembly = Disassemble(impl_.spv_binary());
- std::cout << "BEGIN ConvertedOk:\n"
- << disassembly << "\nEND ConvertedOk" << std::endl;
- }
+ if (dump_successfully_converted_spirv_ && !skip_dumping_spirv_ && !impl_.spv_binary().empty() &&
+ impl_.success()) {
+ std::string disassembly = Disassemble(impl_.spv_binary());
+ std::cout << "BEGIN ConvertedOk:\n" << disassembly << "\nEND ConvertedOk" << std::endl;
+ }
}
std::string ToString(const Program& program) {
- writer::wgsl::GeneratorImpl writer(&program);
- if (!writer.Generate()) {
- return "WGSL writer error: " + writer.error();
- }
- return writer.result();
+ writer::wgsl::GeneratorImpl writer(&program);
+ if (!writer.Generate()) {
+ return "WGSL writer error: " + writer.error();
+ }
+ return writer.result();
}
std::string ToString(const Program& program, const ast::StatementList& stmts) {
- writer::wgsl::GeneratorImpl writer(&program);
- for (const auto* stmt : stmts) {
- if (!writer.EmitStatement(stmt)) {
- return "WGSL writer error: " + writer.error();
+ writer::wgsl::GeneratorImpl writer(&program);
+ for (const auto* stmt : stmts) {
+ if (!writer.EmitStatement(stmt)) {
+ return "WGSL writer error: " + writer.error();
+ }
}
- }
- return writer.result();
+ return writer.result();
}
std::string ToString(const Program& program, const ast::Node* node) {
- writer::wgsl::GeneratorImpl writer(&program);
- if (auto* expr = node->As<ast::Expression>()) {
- std::stringstream out;
- if (!writer.EmitExpression(out, expr)) {
- return "WGSL writer error: " + writer.error();
- }
- return out.str();
- } else if (auto* stmt = node->As<ast::Statement>()) {
- if (!writer.EmitStatement(stmt)) {
- return "WGSL writer error: " + writer.error();
- }
- } else if (auto* ty = node->As<ast::Type>()) {
- std::stringstream out;
- if (!writer.EmitType(out, ty)) {
- return "WGSL writer error: " + writer.error();
+ writer::wgsl::GeneratorImpl writer(&program);
+ if (auto* expr = node->As<ast::Expression>()) {
+ std::stringstream out;
+ if (!writer.EmitExpression(out, expr)) {
+ return "WGSL writer error: " + writer.error();
+ }
+ return out.str();
+ } else if (auto* stmt = node->As<ast::Statement>()) {
+ if (!writer.EmitStatement(stmt)) {
+ return "WGSL writer error: " + writer.error();
+ }
+ } else if (auto* ty = node->As<ast::Type>()) {
+ std::stringstream out;
+ if (!writer.EmitType(out, ty)) {
+ return "WGSL writer error: " + writer.error();
+ }
+ return out.str();
+ } else {
+ return "<unhandled AST node type " + std::string(node->TypeInfo().name) + ">";
}
- return out.str();
- } else {
- return "<unhandled AST node type " + std::string(node->TypeInfo().name) +
- ">";
- }
- return writer.result();
+ return writer.result();
}
} // namespace tint::reader::spirv::test
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.h b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.h
index 31640a53baf..7362a2d75ed 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_test_helper.h
@@ -39,232 +39,221 @@ namespace test {
/// A test class that wraps ParseImpl
class ParserImplWrapperForTest {
- public:
- /// Constructor
- /// @param input the input data to parse
- explicit ParserImplWrapperForTest(const std::vector<uint32_t>& input);
- /// Dumps SPIR-V if the conversion succeeded, then destroys the wrapper.
- ~ParserImplWrapperForTest();
-
- /// Sets global state to force dumping of the assembly text of succesfully
- /// SPIR-V.
- static void DumpSuccessfullyConvertedSpirv() {
- dump_successfully_converted_spirv_ = true;
- }
- /// Marks the test has having deliberately invalid SPIR-V
- void DeliberatelyInvalidSpirv() { skip_dumping_spirv_ = true; }
- /// Marks the test's SPIR-V as not being suitable for dumping, for a stated
- /// reason.
- void SkipDumpingPending(std::string) { skip_dumping_spirv_ = true; }
-
- /// @returns a new function emitter for the given function ID.
- /// Assumes ParserImpl::BuildInternalRepresentation has been run and
- /// succeeded.
- /// @param function_id the SPIR-V identifier of the function
- FunctionEmitter function_emitter(uint32_t function_id) {
- auto* spirv_function = impl_.ir_context()->GetFunction(function_id);
- return FunctionEmitter(&impl_, *spirv_function);
- }
-
- /// Run the parser
- /// @returns true if the parse was successful, false otherwise.
- bool Parse() { return impl_.Parse(); }
-
- /// @returns the program. The program builder in the parser will be reset
- /// after this.
- Program program() { return impl_.program(); }
-
- /// @returns the namer object
- Namer& namer() { return impl_.namer(); }
-
- /// @returns a reference to the internal builder, without building the
- /// program. To be used only for testing.
- ProgramBuilder& builder() { return impl_.builder(); }
-
- /// @returns the accumulated error string
- const std::string error() { return impl_.error(); }
-
- /// @return true if failure has not yet occurred
- bool success() { return impl_.success(); }
-
- /// Logs failure, ands return a failure stream to accumulate diagnostic
- /// messages. By convention, a failure should only be logged along with
- /// a non-empty string diagnostic.
- /// @returns the failure stream
- FailStream& Fail() { return impl_.Fail(); }
-
- /// @returns a borrowed pointer to the internal representation of the module.
- /// This is null until BuildInternalModule has been called.
- spvtools::opt::IRContext* ir_context() { return impl_.ir_context(); }
-
- /// Builds the internal representation of the SPIR-V module.
- /// Assumes the module is somewhat well-formed. Normally you
- /// would want to validate the SPIR-V module before attempting
- /// to build this internal representation. Also computes a topological
- /// ordering of the functions.
- /// This is a no-op if the parser has already failed.
- /// @returns true if the parser is still successful.
- bool BuildInternalModule() { return impl_.BuildInternalModule(); }
-
- /// Builds an internal representation of the SPIR-V binary,
- /// and parses the module, except functions, into a Tint AST module.
- /// Diagnostics are emitted to the error stream.
- /// @returns true if it was successful.
- bool BuildAndParseInternalModuleExceptFunctions() {
- return impl_.BuildAndParseInternalModuleExceptFunctions();
- }
-
- /// Builds an internal representation of the SPIR-V binary,
- /// and parses it into a Tint AST module. Diagnostics are emitted
- /// to the error stream.
- /// @returns true if it was successful.
- bool BuildAndParseInternalModule() {
- return impl_.BuildAndParseInternalModule();
- }
-
- /// Registers user names for SPIR-V objects, from OpName, and OpMemberName.
- /// Also synthesizes struct field names. Ensures uniqueness for names for
- /// SPIR-V IDs, and uniqueness of names of fields within any single struct.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterUserAndStructMemberNames() {
- return impl_.RegisterUserAndStructMemberNames();
- }
-
- /// Register Tint AST types for SPIR-V types, including type aliases as
- /// needed. This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterTypes() { return impl_.RegisterTypes(); }
-
- /// Register sampler and texture usage for memory object declarations.
- /// This must be called after we've registered line numbers for all
- /// instructions. This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool RegisterHandleUsage() { return impl_.RegisterHandleUsage(); }
-
- /// Emits module-scope variables.
- /// This is a no-op if the parser has already failed.
- /// @returns true if parser is still successful.
- bool EmitModuleScopeVariables() { return impl_.EmitModuleScopeVariables(); }
-
- /// @returns the set of SPIR-V IDs for imports of the "GLSL.std.450"
- /// extended instruction set.
- const std::unordered_set<uint32_t>& glsl_std_450_imports() const {
- return impl_.glsl_std_450_imports();
- }
-
- /// Converts a SPIR-V type to a Tint type, and saves it for fast lookup.
- /// If the type is only used for builtins, then register that specially,
- /// and return null. If the type is a sampler, image, or sampled image, then
- /// return the Void type, because those opaque types are handled in a
- /// different way.
- /// On failure, logs an error and returns null. This should only be called
- /// after the internal representation of the module has been built.
- /// @param id the SPIR-V ID of a type.
- /// @returns a Tint type, or nullptr
- const Type* ConvertType(uint32_t id) { return impl_.ConvertType(id); }
-
- /// Gets the list of decorations for a SPIR-V result ID. Returns an empty
- /// vector if the ID is not a result ID, or if no decorations target that ID.
- /// The internal representation must have already been built.
- /// @param id SPIR-V ID
- /// @returns the list of decorations on the given ID
- DecorationList GetDecorationsFor(uint32_t id) const {
- return impl_.GetDecorationsFor(id);
- }
-
- /// Gets the list of decorations for the member of a struct. Returns an empty
- /// list if the `id` is not the ID of a struct, or if the member index is out
- /// of range, or if the target member has no decorations.
- /// The internal representation must have already been built.
- /// @param id SPIR-V ID of a struct
- /// @param member_index the member within the struct
- /// @returns the list of decorations on the member
- DecorationList GetDecorationsForMember(uint32_t id,
- uint32_t member_index) const {
- return impl_.GetDecorationsForMember(id, member_index);
- }
-
- /// Converts a SPIR-V struct member decoration into a number of AST
- /// decorations. If the decoration is recognized but deliberately dropped,
- /// then returns an empty list without a diagnostic. On failure, emits a
- /// diagnostic and returns an empty list.
- /// @param struct_type_id the ID of the struct type
- /// @param member_index the index of the member
- /// @param member_ty the type of the member
- /// @param decoration an encoded SPIR-V Decoration
- /// @returns the AST decorations
- ast::AttributeList ConvertMemberDecoration(uint32_t struct_type_id,
- uint32_t member_index,
- const Type* member_ty,
- const Decoration& decoration) {
- return impl_.ConvertMemberDecoration(struct_type_id, member_index,
- member_ty, decoration);
- }
-
- /// For a SPIR-V ID that might define a sampler, image, or sampled image
- /// value, return the SPIR-V instruction that represents the memory object
- /// declaration for the object. If we encounter an OpSampledImage along the
- /// way, follow the image operand when follow_image is true; otherwise follow
- /// the sampler operand. Returns nullptr if we can't trace back to a memory
- /// object declaration. Emits an error and returns nullptr when the scan
- /// fails due to a malformed module. This method can be used any time after
- /// BuildInternalModule has been invoked.
- /// @param id the SPIR-V ID of the sampler, image, or sampled image
- /// @param follow_image indicates whether to follow the image operand of
- /// OpSampledImage
- /// @returns the memory object declaration for the handle, or nullptr
- const spvtools::opt::Instruction* GetMemoryObjectDeclarationForHandle(
- uint32_t id,
- bool follow_image) {
- return impl_.GetMemoryObjectDeclarationForHandle(id, follow_image);
- }
-
- /// @param entry_point the SPIR-V ID of an entry point.
- /// @returns the entry point info for the given ID
- const std::vector<EntryPointInfo>& GetEntryPointInfo(uint32_t entry_point) {
- return impl_.GetEntryPointInfo(entry_point);
- }
-
- /// Returns the handle usage for a memory object declaration.
- /// @param id SPIR-V ID of a sampler or image OpVariable or
- /// OpFunctionParameter
- /// @returns the handle usage, or an empty usage object.
- Usage GetHandleUsage(uint32_t id) const { return impl_.GetHandleUsage(id); }
-
- /// Returns the SPIR-V instruction with the given ID, or nullptr.
- /// @param id the SPIR-V result ID
- /// @returns the instruction, or nullptr on error
- const spvtools::opt::Instruction* GetInstructionForTest(uint32_t id) const {
- return impl_.GetInstructionForTest(id);
- }
-
- /// @returns info about the gl_Position builtin variable.
- const ParserImpl::BuiltInPositionInfo& GetBuiltInPositionInfo() {
- return impl_.GetBuiltInPositionInfo();
- }
-
- /// Returns the source record for the SPIR-V instruction with the given
- /// result ID.
- /// @param id the SPIR-V result id.
- /// @return the Source record, or a default one
- Source GetSourceForResultIdForTest(uint32_t id) const {
- return impl_.GetSourceForResultIdForTest(id);
- }
-
- private:
- ParserImpl impl_;
- /// When true, indicates the input SPIR-V module should not be emitted.
- /// It's either deliberately invalid, or not supported for some pending
- /// reason.
- bool skip_dumping_spirv_ = false;
- static bool dump_successfully_converted_spirv_;
+ public:
+ /// Constructor
+ /// @param input the input data to parse
+ explicit ParserImplWrapperForTest(const std::vector<uint32_t>& input);
+ /// Dumps SPIR-V if the conversion succeeded, then destroys the wrapper.
+ ~ParserImplWrapperForTest();
+
+ /// Sets global state to force dumping of the assembly text of succesfully
+ /// SPIR-V.
+ static void DumpSuccessfullyConvertedSpirv() { dump_successfully_converted_spirv_ = true; }
+ /// Marks the test has having deliberately invalid SPIR-V
+ void DeliberatelyInvalidSpirv() { skip_dumping_spirv_ = true; }
+ /// Marks the test's SPIR-V as not being suitable for dumping, for a stated
+ /// reason.
+ void SkipDumpingPending(std::string) { skip_dumping_spirv_ = true; }
+
+ /// @returns a new function emitter for the given function ID.
+ /// Assumes ParserImpl::BuildInternalRepresentation has been run and
+ /// succeeded.
+ /// @param function_id the SPIR-V identifier of the function
+ FunctionEmitter function_emitter(uint32_t function_id) {
+ auto* spirv_function = impl_.ir_context()->GetFunction(function_id);
+ return FunctionEmitter(&impl_, *spirv_function);
+ }
+
+ /// Run the parser
+ /// @returns true if the parse was successful, false otherwise.
+ bool Parse() { return impl_.Parse(); }
+
+ /// @returns the program. The program builder in the parser will be reset
+ /// after this.
+ Program program() { return impl_.program(); }
+
+ /// @returns the namer object
+ Namer& namer() { return impl_.namer(); }
+
+ /// @returns a reference to the internal builder, without building the
+ /// program. To be used only for testing.
+ ProgramBuilder& builder() { return impl_.builder(); }
+
+ /// @returns the accumulated error string
+ const std::string error() { return impl_.error(); }
+
+ /// @return true if failure has not yet occurred
+ bool success() { return impl_.success(); }
+
+ /// Logs failure, ands return a failure stream to accumulate diagnostic
+ /// messages. By convention, a failure should only be logged along with
+ /// a non-empty string diagnostic.
+ /// @returns the failure stream
+ FailStream& Fail() { return impl_.Fail(); }
+
+ /// @returns a borrowed pointer to the internal representation of the module.
+ /// This is null until BuildInternalModule has been called.
+ spvtools::opt::IRContext* ir_context() { return impl_.ir_context(); }
+
+ /// Builds the internal representation of the SPIR-V module.
+ /// Assumes the module is somewhat well-formed. Normally you
+ /// would want to validate the SPIR-V module before attempting
+ /// to build this internal representation. Also computes a topological
+ /// ordering of the functions.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if the parser is still successful.
+ bool BuildInternalModule() { return impl_.BuildInternalModule(); }
+
+ /// Builds an internal representation of the SPIR-V binary,
+ /// and parses the module, except functions, into a Tint AST module.
+ /// Diagnostics are emitted to the error stream.
+ /// @returns true if it was successful.
+ bool BuildAndParseInternalModuleExceptFunctions() {
+ return impl_.BuildAndParseInternalModuleExceptFunctions();
+ }
+
+ /// Builds an internal representation of the SPIR-V binary,
+ /// and parses it into a Tint AST module. Diagnostics are emitted
+ /// to the error stream.
+ /// @returns true if it was successful.
+ bool BuildAndParseInternalModule() { return impl_.BuildAndParseInternalModule(); }
+
+ /// Registers user names for SPIR-V objects, from OpName, and OpMemberName.
+ /// Also synthesizes struct field names. Ensures uniqueness for names for
+ /// SPIR-V IDs, and uniqueness of names of fields within any single struct.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterUserAndStructMemberNames() { return impl_.RegisterUserAndStructMemberNames(); }
+
+ /// Register Tint AST types for SPIR-V types, including type aliases as
+ /// needed. This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterTypes() { return impl_.RegisterTypes(); }
+
+ /// Register sampler and texture usage for memory object declarations.
+ /// This must be called after we've registered line numbers for all
+ /// instructions. This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool RegisterHandleUsage() { return impl_.RegisterHandleUsage(); }
+
+ /// Emits module-scope variables.
+ /// This is a no-op if the parser has already failed.
+ /// @returns true if parser is still successful.
+ bool EmitModuleScopeVariables() { return impl_.EmitModuleScopeVariables(); }
+
+ /// @returns the set of SPIR-V IDs for imports of the "GLSL.std.450"
+ /// extended instruction set.
+ const std::unordered_set<uint32_t>& glsl_std_450_imports() const {
+ return impl_.glsl_std_450_imports();
+ }
+
+ /// Converts a SPIR-V type to a Tint type, and saves it for fast lookup.
+ /// If the type is only used for builtins, then register that specially,
+ /// and return null. If the type is a sampler, image, or sampled image, then
+ /// return the Void type, because those opaque types are handled in a
+ /// different way.
+ /// On failure, logs an error and returns null. This should only be called
+ /// after the internal representation of the module has been built.
+ /// @param id the SPIR-V ID of a type.
+ /// @returns a Tint type, or nullptr
+ const Type* ConvertType(uint32_t id) { return impl_.ConvertType(id); }
+
+ /// Gets the list of decorations for a SPIR-V result ID. Returns an empty
+ /// vector if the ID is not a result ID, or if no decorations target that ID.
+ /// The internal representation must have already been built.
+ /// @param id SPIR-V ID
+ /// @returns the list of decorations on the given ID
+ DecorationList GetDecorationsFor(uint32_t id) const { return impl_.GetDecorationsFor(id); }
+
+ /// Gets the list of decorations for the member of a struct. Returns an empty
+ /// list if the `id` is not the ID of a struct, or if the member index is out
+ /// of range, or if the target member has no decorations.
+ /// The internal representation must have already been built.
+ /// @param id SPIR-V ID of a struct
+ /// @param member_index the member within the struct
+ /// @returns the list of decorations on the member
+ DecorationList GetDecorationsForMember(uint32_t id, uint32_t member_index) const {
+ return impl_.GetDecorationsForMember(id, member_index);
+ }
+
+ /// Converts a SPIR-V struct member decoration into a number of AST
+ /// decorations. If the decoration is recognized but deliberately dropped,
+ /// then returns an empty list without a diagnostic. On failure, emits a
+ /// diagnostic and returns an empty list.
+ /// @param struct_type_id the ID of the struct type
+ /// @param member_index the index of the member
+ /// @param member_ty the type of the member
+ /// @param decoration an encoded SPIR-V Decoration
+ /// @returns the AST decorations
+ ast::AttributeList ConvertMemberDecoration(uint32_t struct_type_id,
+ uint32_t member_index,
+ const Type* member_ty,
+ const Decoration& decoration) {
+ return impl_.ConvertMemberDecoration(struct_type_id, member_index, member_ty, decoration);
+ }
+
+ /// For a SPIR-V ID that might define a sampler, image, or sampled image
+ /// value, return the SPIR-V instruction that represents the memory object
+ /// declaration for the object. If we encounter an OpSampledImage along the
+ /// way, follow the image operand when follow_image is true; otherwise follow
+ /// the sampler operand. Returns nullptr if we can't trace back to a memory
+ /// object declaration. Emits an error and returns nullptr when the scan
+ /// fails due to a malformed module. This method can be used any time after
+ /// BuildInternalModule has been invoked.
+ /// @param id the SPIR-V ID of the sampler, image, or sampled image
+ /// @param follow_image indicates whether to follow the image operand of
+ /// OpSampledImage
+ /// @returns the memory object declaration for the handle, or nullptr
+ const spvtools::opt::Instruction* GetMemoryObjectDeclarationForHandle(uint32_t id,
+ bool follow_image) {
+ return impl_.GetMemoryObjectDeclarationForHandle(id, follow_image);
+ }
+
+ /// @param entry_point the SPIR-V ID of an entry point.
+ /// @returns the entry point info for the given ID
+ const std::vector<EntryPointInfo>& GetEntryPointInfo(uint32_t entry_point) {
+ return impl_.GetEntryPointInfo(entry_point);
+ }
+
+ /// Returns the handle usage for a memory object declaration.
+ /// @param id SPIR-V ID of a sampler or image OpVariable or
+ /// OpFunctionParameter
+ /// @returns the handle usage, or an empty usage object.
+ Usage GetHandleUsage(uint32_t id) const { return impl_.GetHandleUsage(id); }
+
+ /// Returns the SPIR-V instruction with the given ID, or nullptr.
+ /// @param id the SPIR-V result ID
+ /// @returns the instruction, or nullptr on error
+ const spvtools::opt::Instruction* GetInstructionForTest(uint32_t id) const {
+ return impl_.GetInstructionForTest(id);
+ }
+
+ /// @returns info about the gl_Position builtin variable.
+ const ParserImpl::BuiltInPositionInfo& GetBuiltInPositionInfo() {
+ return impl_.GetBuiltInPositionInfo();
+ }
+
+ /// Returns the source record for the SPIR-V instruction with the given
+ /// result ID.
+ /// @param id the SPIR-V result id.
+ /// @return the Source record, or a default one
+ Source GetSourceForResultIdForTest(uint32_t id) const {
+ return impl_.GetSourceForResultIdForTest(id);
+ }
+
+ private:
+ ParserImpl impl_;
+ /// When true, indicates the input SPIR-V module should not be emitted.
+ /// It's either deliberately invalid, or not supported for some pending
+ /// reason.
+ bool skip_dumping_spirv_ = false;
+ static bool dump_successfully_converted_spirv_;
};
// Sets global state to force dumping of the assembly text of succesfully
// SPIR-V.
inline void DumpSuccessfullyConvertedSpirv() {
- ParserImplWrapperForTest::DumpSuccessfullyConvertedSpirv();
+ ParserImplWrapperForTest::DumpSuccessfullyConvertedSpirv();
}
/// Returns the WGSL printed string of a program.
@@ -289,22 +278,21 @@ std::string ToString(const Program& program, const ast::Node* node);
/// SPIR-V Parser test class
template <typename T>
class SpvParserTestBase : public T {
- public:
- SpvParserTestBase() = default;
- ~SpvParserTestBase() override = default;
-
- /// Retrieves the parser from the helper
- /// @param input the SPIR-V binary to parse
- /// @returns a parser for the given binary
- std::unique_ptr<test::ParserImplWrapperForTest> parser(
- const std::vector<uint32_t>& input) {
- auto parser = std::make_unique<test::ParserImplWrapperForTest>(input);
-
- // Don't run the Resolver when building the program.
- // We're not interested in type information with these tests.
- parser->builder().SetResolveOnBuild(false);
- return parser;
- }
+ public:
+ SpvParserTestBase() = default;
+ ~SpvParserTestBase() override = default;
+
+ /// Retrieves the parser from the helper
+ /// @param input the SPIR-V binary to parse
+ /// @returns a parser for the given binary
+ std::unique_ptr<test::ParserImplWrapperForTest> parser(const std::vector<uint32_t>& input) {
+ auto parser = std::make_unique<test::ParserImplWrapperForTest>(input);
+
+ // Don't run the Resolver when building the program.
+ // We're not interested in type information with these tests.
+ parser->builder().SetResolveOnBuild(false);
+ return parser;
+ }
};
/// SpvParserTest the the base class for SPIR-V reader unit tests.
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_user_name_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_user_name_test.cc
index 65405127e40..3af1a58f172 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_user_name_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_impl_user_name_test.cc
@@ -24,98 +24,98 @@ using ::testing::Eq;
using SpvParserUserNameTest = SpvParserTest;
TEST_F(SpvParserUserNameTest, UserName_RespectOpName) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %1 "the_void_type"
%1 = OpTypeVoid
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetName(1), Eq("the_void_type"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetName(1), Eq("the_void_type"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_IgnoreEmptyName) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %1 ""
%1 = OpTypeVoid
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_FALSE(p->namer().HasName(1));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_FALSE(p->namer().HasName(1));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_DistinguishDuplicateSuggestion) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpName %1 "vanilla"
OpName %2 "vanilla"
%1 = OpTypeVoid
%2 = OpTypeInt 32 0
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetName(1), Eq("vanilla"));
- EXPECT_THAT(p->namer().GetName(2), Eq("vanilla_1"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetName(1), Eq("vanilla"));
+ EXPECT_THAT(p->namer().GetName(2), Eq("vanilla_1"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_RespectOpMemberName) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpMemberName %3 0 "strawberry"
OpMemberName %3 1 "vanilla"
OpMemberName %3 2 "chocolate"
%2 = OpTypeInt 32 0
%3 = OpTypeStruct %2 %2 %2
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("strawberry"));
- EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("vanilla"));
- EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("chocolate"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("strawberry"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("vanilla"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("chocolate"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_IgnoreEmptyMemberName) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpMemberName %3 0 ""
%2 = OpTypeInt 32 0
%3 = OpTypeStruct %2
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_SynthesizeMemberNames) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
%2 = OpTypeInt 32 0
%3 = OpTypeStruct %2 %2 %2
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
- EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("field1"));
- EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("field2"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("field1"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("field2"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, UserName_MemberNamesMixUserAndSynthesized) {
- auto p = parser(test::Assemble(R"(
+ auto p = parser(test::Assemble(R"(
OpMemberName %3 1 "vanilla"
%2 = OpTypeInt 32 0
%3 = OpTypeStruct %2 %2 %2
)"));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
- EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("vanilla"));
- EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("field2"));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ EXPECT_THAT(p->namer().GetMemberName(3, 0), Eq("field0"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 1), Eq("vanilla"));
+ EXPECT_THAT(p->namer().GetMemberName(3, 2), Eq("field2"));
- p->DeliberatelyInvalidSpirv();
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, EntryPointNames_AlwaysTakePrecedence) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %100 "main"
@@ -137,30 +137,30 @@ TEST_F(SpvParserUserNameTest, EntryPointNames_AlwaysTakePrecedence) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- // The first entry point grabs the best name, "main"
- EXPECT_THAT(p->namer().Name(100), Eq("main"));
- // The OpName on %1 is overriden because the second entry point
- // has grabbed "main_1" first.
- EXPECT_THAT(p->namer().Name(1), Eq("main_1_1"));
-
- const auto& ep_info = p->GetEntryPointInfo(100);
- ASSERT_EQ(2u, ep_info.size());
- EXPECT_EQ(ep_info[0].name, "main");
- EXPECT_EQ(ep_info[1].name, "main_1");
-
- // This test checks two entry point with the same implementation function.
- // But for the shader stages supported by WGSL, the SPIR-V rules require
- // conflicting execution modes be applied to them.
- // I still want to test the name disambiguation behaviour, but the cases
- // are rejected by SPIR-V validation. This is true at least for the current
- // WGSL feature set.
- p->DeliberatelyInvalidSpirv();
+ auto p = parser(test::Assemble(assembly));
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ // The first entry point grabs the best name, "main"
+ EXPECT_THAT(p->namer().Name(100), Eq("main"));
+ // The OpName on %1 is overriden because the second entry point
+ // has grabbed "main_1" first.
+ EXPECT_THAT(p->namer().Name(1), Eq("main_1_1"));
+
+ const auto& ep_info = p->GetEntryPointInfo(100);
+ ASSERT_EQ(2u, ep_info.size());
+ EXPECT_EQ(ep_info[0].name, "main");
+ EXPECT_EQ(ep_info[1].name, "main_1");
+
+ // This test checks two entry point with the same implementation function.
+ // But for the shader stages supported by WGSL, the SPIR-V rules require
+ // conflicting execution modes be applied to them.
+ // I still want to test the name disambiguation behaviour, but the cases
+ // are rejected by SPIR-V validation. This is true at least for the current
+ // WGSL feature set.
+ p->DeliberatelyInvalidSpirv();
}
TEST_F(SpvParserUserNameTest, EntryPointNames_DistinctFromInnerNames) {
- const std::string assembly = R"(
+ const std::string assembly = R"(
OpCapability Shader
OpMemoryModel Logical Simple
OpEntryPoint Vertex %100 "main"
@@ -182,29 +182,29 @@ TEST_F(SpvParserUserNameTest, EntryPointNames_DistinctFromInnerNames) {
OpReturn
OpFunctionEnd
)";
- auto p = parser(test::Assemble(assembly));
-
- EXPECT_TRUE(p->BuildAndParseInternalModule());
- // The first entry point grabs the best name, "main"
- EXPECT_THAT(p->namer().Name(100), Eq("main"));
- EXPECT_THAT(p->namer().Name(1), Eq("main_1_1"));
-
- const auto ep_info = p->GetEntryPointInfo(100);
- ASSERT_EQ(2u, ep_info.size());
- EXPECT_EQ(ep_info[0].name, "main");
- EXPECT_EQ(ep_info[0].inner_name, "main_2");
- // The second entry point retains its name...
- EXPECT_EQ(ep_info[1].name, "main_1");
- // ...but will use the same implementation function.
- EXPECT_EQ(ep_info[1].inner_name, "main_2");
-
- // This test checks two entry point with the same implementation function.
- // But for the shader stages supported by WGSL, the SPIR-V rules require
- // conflicting execution modes be applied to them.
- // I still want to test the name disambiguation behaviour, but the cases
- // are rejected by SPIR-V validation. This is true at least for the current
- // WGSL feature set.
- p->DeliberatelyInvalidSpirv();
+ auto p = parser(test::Assemble(assembly));
+
+ EXPECT_TRUE(p->BuildAndParseInternalModule());
+ // The first entry point grabs the best name, "main"
+ EXPECT_THAT(p->namer().Name(100), Eq("main"));
+ EXPECT_THAT(p->namer().Name(1), Eq("main_1_1"));
+
+ const auto ep_info = p->GetEntryPointInfo(100);
+ ASSERT_EQ(2u, ep_info.size());
+ EXPECT_EQ(ep_info[0].name, "main");
+ EXPECT_EQ(ep_info[0].inner_name, "main_2");
+ // The second entry point retains its name...
+ EXPECT_EQ(ep_info[1].name, "main_1");
+ // ...but will use the same implementation function.
+ EXPECT_EQ(ep_info[1].inner_name, "main_2");
+
+ // This test checks two entry point with the same implementation function.
+ // But for the shader stages supported by WGSL, the SPIR-V rules require
+ // conflicting execution modes be applied to them.
+ // I still want to test the name disambiguation behaviour, but the cases
+ // are rejected by SPIR-V validation. This is true at least for the current
+ // WGSL feature set.
+ p->DeliberatelyInvalidSpirv();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_test.cc
index 304eb3ec5e1..35cb5da8e94 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_test.cc
@@ -22,11 +22,11 @@ namespace {
using ParserTest = testing::Test;
TEST_F(ParserTest, DataEmpty) {
- std::vector<uint32_t> data;
- auto program = Parse(data);
- auto errs = diag::Formatter().format(program.Diagnostics());
- ASSERT_FALSE(program.IsValid()) << errs;
- EXPECT_EQ(errs, "error: line:0: Invalid SPIR-V magic number.\n");
+ std::vector<uint32_t> data;
+ auto program = Parse(data);
+ auto errs = diag::Formatter().format(program.Diagnostics());
+ ASSERT_FALSE(program.IsValid()) << errs;
+ EXPECT_EQ(errs, "error: line:0: Invalid SPIR-V magic number.\n");
}
// TODO(dneto): uint32 vec, valid SPIR-V
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.cc
index dbe4e14a5a8..3332cd4f747 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.cc
@@ -49,186 +49,164 @@ namespace tint::reader::spirv {
namespace {
struct PointerHasher {
- size_t operator()(const Pointer& t) const {
- return utils::Hash(t.type, t.storage_class);
- }
+ size_t operator()(const Pointer& t) const { return utils::Hash(t.type, t.storage_class); }
};
struct ReferenceHasher {
- size_t operator()(const Reference& t) const {
- return utils::Hash(t.type, t.storage_class);
- }
+ size_t operator()(const Reference& t) const { return utils::Hash(t.type, t.storage_class); }
};
struct VectorHasher {
- size_t operator()(const Vector& t) const {
- return utils::Hash(t.type, t.size);
- }
+ size_t operator()(const Vector& t) const { return utils::Hash(t.type, t.size); }
};
struct MatrixHasher {
- size_t operator()(const Matrix& t) const {
- return utils::Hash(t.type, t.columns, t.rows);
- }
+ size_t operator()(const Matrix& t) const { return utils::Hash(t.type, t.columns, t.rows); }
};
struct ArrayHasher {
- size_t operator()(const Array& t) const {
- return utils::Hash(t.type, t.size, t.stride);
- }
+ size_t operator()(const Array& t) const { return utils::Hash(t.type, t.size, t.stride); }
};
struct AliasHasher {
- size_t operator()(const Alias& t) const { return utils::Hash(t.name); }
+ size_t operator()(const Alias& t) const { return utils::Hash(t.name); }
};
struct StructHasher {
- size_t operator()(const Struct& t) const { return utils::Hash(t.name); }
+ size_t operator()(const Struct& t) const { return utils::Hash(t.name); }
};
struct SamplerHasher {
- size_t operator()(const Sampler& s) const { return utils::Hash(s.kind); }
+ size_t operator()(const Sampler& s) const { return utils::Hash(s.kind); }
};
struct DepthTextureHasher {
- size_t operator()(const DepthTexture& t) const { return utils::Hash(t.dims); }
+ size_t operator()(const DepthTexture& t) const { return utils::Hash(t.dims); }
};
struct DepthMultisampledTextureHasher {
- size_t operator()(const DepthMultisampledTexture& t) const {
- return utils::Hash(t.dims);
- }
+ size_t operator()(const DepthMultisampledTexture& t) const { return utils::Hash(t.dims); }
};
struct MultisampledTextureHasher {
- size_t operator()(const MultisampledTexture& t) const {
- return utils::Hash(t.dims, t.type);
- }
+ size_t operator()(const MultisampledTexture& t) const { return utils::Hash(t.dims, t.type); }
};
struct SampledTextureHasher {
- size_t operator()(const SampledTexture& t) const {
- return utils::Hash(t.dims, t.type);
- }
+ size_t operator()(const SampledTexture& t) const { return utils::Hash(t.dims, t.type); }
};
struct StorageTextureHasher {
- size_t operator()(const StorageTexture& t) const {
- return utils::Hash(t.dims, t.format, t.access);
- }
+ size_t operator()(const StorageTexture& t) const {
+ return utils::Hash(t.dims, t.format, t.access);
+ }
};
} // namespace
// Equality operators
//! @cond Doxygen_Suppress
static bool operator==(const Pointer& a, const Pointer& b) {
- return a.type == b.type && a.storage_class == b.storage_class;
+ return a.type == b.type && a.storage_class == b.storage_class;
}
static bool operator==(const Reference& a, const Reference& b) {
- return a.type == b.type && a.storage_class == b.storage_class;
+ return a.type == b.type && a.storage_class == b.storage_class;
}
static bool operator==(const Vector& a, const Vector& b) {
- return a.type == b.type && a.size == b.size;
+ return a.type == b.type && a.size == b.size;
}
static bool operator==(const Matrix& a, const Matrix& b) {
- return a.type == b.type && a.columns == b.columns && a.rows == b.rows;
+ return a.type == b.type && a.columns == b.columns && a.rows == b.rows;
}
static bool operator==(const Array& a, const Array& b) {
- return a.type == b.type && a.size == b.size && a.stride == b.stride;
+ return a.type == b.type && a.size == b.size && a.stride == b.stride;
}
static bool operator==(const Named& a, const Named& b) {
- return a.name == b.name;
+ return a.name == b.name;
}
static bool operator==(const Sampler& a, const Sampler& b) {
- return a.kind == b.kind;
+ return a.kind == b.kind;
}
static bool operator==(const DepthTexture& a, const DepthTexture& b) {
- return a.dims == b.dims;
+ return a.dims == b.dims;
}
-static bool operator==(const DepthMultisampledTexture& a,
- const DepthMultisampledTexture& b) {
- return a.dims == b.dims;
+static bool operator==(const DepthMultisampledTexture& a, const DepthMultisampledTexture& b) {
+ return a.dims == b.dims;
}
-static bool operator==(const MultisampledTexture& a,
- const MultisampledTexture& b) {
- return a.dims == b.dims && a.type == b.type;
+static bool operator==(const MultisampledTexture& a, const MultisampledTexture& b) {
+ return a.dims == b.dims && a.type == b.type;
}
static bool operator==(const SampledTexture& a, const SampledTexture& b) {
- return a.dims == b.dims && a.type == b.type;
+ return a.dims == b.dims && a.type == b.type;
}
static bool operator==(const StorageTexture& a, const StorageTexture& b) {
- return a.dims == b.dims && a.format == b.format;
+ return a.dims == b.dims && a.format == b.format;
}
//! @endcond
const ast::Type* Void::Build(ProgramBuilder& b) const {
- return b.ty.void_();
+ return b.ty.void_();
}
const ast::Type* Bool::Build(ProgramBuilder& b) const {
- return b.ty.bool_();
+ return b.ty.bool_();
}
const ast::Type* U32::Build(ProgramBuilder& b) const {
- return b.ty.u32();
+ return b.ty.u32();
}
const ast::Type* F32::Build(ProgramBuilder& b) const {
- return b.ty.f32();
+ return b.ty.f32();
}
const ast::Type* I32::Build(ProgramBuilder& b) const {
- return b.ty.i32();
+ return b.ty.i32();
}
-Pointer::Pointer(const Type* t, ast::StorageClass s)
- : type(t), storage_class(s) {}
+Pointer::Pointer(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {}
Pointer::Pointer(const Pointer&) = default;
const ast::Type* Pointer::Build(ProgramBuilder& b) const {
- return b.ty.pointer(type->Build(b), storage_class);
+ return b.ty.pointer(type->Build(b), storage_class);
}
-Reference::Reference(const Type* t, ast::StorageClass s)
- : type(t), storage_class(s) {}
+Reference::Reference(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {}
Reference::Reference(const Reference&) = default;
const ast::Type* Reference::Build(ProgramBuilder& b) const {
- return type->Build(b);
+ return type->Build(b);
}
Vector::Vector(const Type* t, uint32_t s) : type(t), size(s) {}
Vector::Vector(const Vector&) = default;
const ast::Type* Vector::Build(ProgramBuilder& b) const {
- return b.ty.vec(type->Build(b), size);
+ return b.ty.vec(type->Build(b), size);
}
-Matrix::Matrix(const Type* t, uint32_t c, uint32_t r)
- : type(t), columns(c), rows(r) {}
+Matrix::Matrix(const Type* t, uint32_t c, uint32_t r) : type(t), columns(c), rows(r) {}
Matrix::Matrix(const Matrix&) = default;
const ast::Type* Matrix::Build(ProgramBuilder& b) const {
- return b.ty.mat(type->Build(b), columns, rows);
+ return b.ty.mat(type->Build(b), columns, rows);
}
-Array::Array(const Type* t, uint32_t sz, uint32_t st)
- : type(t), size(sz), stride(st) {}
+Array::Array(const Type* t, uint32_t sz, uint32_t st) : type(t), size(sz), stride(st) {}
Array::Array(const Array&) = default;
const ast::Type* Array::Build(ProgramBuilder& b) const {
- if (size > 0) {
- return b.ty.array(type->Build(b), size, stride);
- } else {
- return b.ty.array(type->Build(b), nullptr, stride);
- }
+ if (size > 0) {
+ return b.ty.array(type->Build(b), u32(size), stride);
+ } else {
+ return b.ty.array(type->Build(b), nullptr, stride);
+ }
}
Sampler::Sampler(ast::SamplerKind k) : kind(k) {}
Sampler::Sampler(const Sampler&) = default;
const ast::Type* Sampler::Build(ProgramBuilder& b) const {
- return b.ty.sampler(kind);
+ return b.ty.sampler(kind);
}
Texture::Texture(ast::TextureDimension d) : dims(d) {}
@@ -238,16 +216,14 @@ DepthTexture::DepthTexture(ast::TextureDimension d) : Base(d) {}
DepthTexture::DepthTexture(const DepthTexture&) = default;
const ast::Type* DepthTexture::Build(ProgramBuilder& b) const {
- return b.ty.depth_texture(dims);
+ return b.ty.depth_texture(dims);
}
-DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension d)
- : Base(d) {}
-DepthMultisampledTexture::DepthMultisampledTexture(
- const DepthMultisampledTexture&) = default;
+DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension d) : Base(d) {}
+DepthMultisampledTexture::DepthMultisampledTexture(const DepthMultisampledTexture&) = default;
const ast::Type* DepthMultisampledTexture::Build(ProgramBuilder& b) const {
- return b.ty.depth_multisampled_texture(dims);
+ return b.ty.depth_multisampled_texture(dims);
}
MultisampledTexture::MultisampledTexture(ast::TextureDimension d, const Type* t)
@@ -255,25 +231,22 @@ MultisampledTexture::MultisampledTexture(ast::TextureDimension d, const Type* t)
MultisampledTexture::MultisampledTexture(const MultisampledTexture&) = default;
const ast::Type* MultisampledTexture::Build(ProgramBuilder& b) const {
- return b.ty.multisampled_texture(dims, type->Build(b));
+ return b.ty.multisampled_texture(dims, type->Build(b));
}
-SampledTexture::SampledTexture(ast::TextureDimension d, const Type* t)
- : Base(d), type(t) {}
+SampledTexture::SampledTexture(ast::TextureDimension d, const Type* t) : Base(d), type(t) {}
SampledTexture::SampledTexture(const SampledTexture&) = default;
const ast::Type* SampledTexture::Build(ProgramBuilder& b) const {
- return b.ty.sampled_texture(dims, type->Build(b));
+ return b.ty.sampled_texture(dims, type->Build(b));
}
-StorageTexture::StorageTexture(ast::TextureDimension d,
- ast::TexelFormat f,
- ast::Access a)
+StorageTexture::StorageTexture(ast::TextureDimension d, ast::TexelFormat f, ast::Access a)
: Base(d), format(f), access(a) {}
StorageTexture::StorageTexture(const StorageTexture&) = default;
const ast::Type* StorageTexture::Build(ProgramBuilder& b) const {
- return b.ty.storage_texture(dims, format, access);
+ return b.ty.storage_texture(dims, format, access);
}
Named::Named(Symbol n) : name(n) {}
@@ -284,7 +257,7 @@ Alias::Alias(Symbol n, const Type* ty) : Base(n), type(ty) {}
Alias::Alias(const Alias&) = default;
const ast::Type* Alias::Build(ProgramBuilder& b) const {
- return b.ty.type_name(name);
+ return b.ty.type_name(name);
}
Struct::Struct(Symbol n, TypeList m) : Base(n), members(std::move(m)) {}
@@ -292,339 +265,323 @@ Struct::Struct(const Struct&) = default;
Struct::~Struct() = default;
const ast::Type* Struct::Build(ProgramBuilder& b) const {
- return b.ty.type_name(name);
+ return b.ty.type_name(name);
}
/// The PIMPL state of the Types object.
struct TypeManager::State {
- /// The allocator of primitive types
- utils::BlockAllocator<Type> allocator_;
- /// The lazily-created Void type
- spirv::Void const* void_ = nullptr;
- /// The lazily-created Bool type
- spirv::Bool const* bool_ = nullptr;
- /// The lazily-created U32 type
- spirv::U32 const* u32_ = nullptr;
- /// The lazily-created F32 type
- spirv::F32 const* f32_ = nullptr;
- /// The lazily-created I32 type
- spirv::I32 const* i32_ = nullptr;
- /// Unique Pointer instances
- utils::UniqueAllocator<spirv::Pointer, PointerHasher> pointers_;
- /// Unique Reference instances
- utils::UniqueAllocator<spirv::Reference, ReferenceHasher> references_;
- /// Unique Vector instances
- utils::UniqueAllocator<spirv::Vector, VectorHasher> vectors_;
- /// Unique Matrix instances
- utils::UniqueAllocator<spirv::Matrix, MatrixHasher> matrices_;
- /// Unique Array instances
- utils::UniqueAllocator<spirv::Array, ArrayHasher> arrays_;
- /// Unique Alias instances
- utils::UniqueAllocator<spirv::Alias, AliasHasher> aliases_;
- /// Unique Struct instances
- utils::UniqueAllocator<spirv::Struct, StructHasher> structs_;
- /// Unique Sampler instances
- utils::UniqueAllocator<spirv::Sampler, SamplerHasher> samplers_;
- /// Unique DepthTexture instances
- utils::UniqueAllocator<spirv::DepthTexture, DepthTextureHasher>
- depth_textures_;
- /// Unique DepthMultisampledTexture instances
- utils::UniqueAllocator<spirv::DepthMultisampledTexture,
- DepthMultisampledTextureHasher>
- depth_multisampled_textures_;
- /// Unique MultisampledTexture instances
- utils::UniqueAllocator<spirv::MultisampledTexture, MultisampledTextureHasher>
- multisampled_textures_;
- /// Unique SampledTexture instances
- utils::UniqueAllocator<spirv::SampledTexture, SampledTextureHasher>
- sampled_textures_;
- /// Unique StorageTexture instances
- utils::UniqueAllocator<spirv::StorageTexture, StorageTextureHasher>
- storage_textures_;
+ /// The allocator of primitive types
+ utils::BlockAllocator<Type> allocator_;
+ /// The lazily-created Void type
+ spirv::Void const* void_ = nullptr;
+ /// The lazily-created Bool type
+ spirv::Bool const* bool_ = nullptr;
+ /// The lazily-created U32 type
+ spirv::U32 const* u32_ = nullptr;
+ /// The lazily-created F32 type
+ spirv::F32 const* f32_ = nullptr;
+ /// The lazily-created I32 type
+ spirv::I32 const* i32_ = nullptr;
+ /// Unique Pointer instances
+ utils::UniqueAllocator<spirv::Pointer, PointerHasher> pointers_;
+ /// Unique Reference instances
+ utils::UniqueAllocator<spirv::Reference, ReferenceHasher> references_;
+ /// Unique Vector instances
+ utils::UniqueAllocator<spirv::Vector, VectorHasher> vectors_;
+ /// Unique Matrix instances
+ utils::UniqueAllocator<spirv::Matrix, MatrixHasher> matrices_;
+ /// Unique Array instances
+ utils::UniqueAllocator<spirv::Array, ArrayHasher> arrays_;
+ /// Unique Alias instances
+ utils::UniqueAllocator<spirv::Alias, AliasHasher> aliases_;
+ /// Unique Struct instances
+ utils::UniqueAllocator<spirv::Struct, StructHasher> structs_;
+ /// Unique Sampler instances
+ utils::UniqueAllocator<spirv::Sampler, SamplerHasher> samplers_;
+ /// Unique DepthTexture instances
+ utils::UniqueAllocator<spirv::DepthTexture, DepthTextureHasher> depth_textures_;
+ /// Unique DepthMultisampledTexture instances
+ utils::UniqueAllocator<spirv::DepthMultisampledTexture, DepthMultisampledTextureHasher>
+ depth_multisampled_textures_;
+ /// Unique MultisampledTexture instances
+ utils::UniqueAllocator<spirv::MultisampledTexture, MultisampledTextureHasher>
+ multisampled_textures_;
+ /// Unique SampledTexture instances
+ utils::UniqueAllocator<spirv::SampledTexture, SampledTextureHasher> sampled_textures_;
+ /// Unique StorageTexture instances
+ utils::UniqueAllocator<spirv::StorageTexture, StorageTextureHasher> storage_textures_;
};
const Type* Type::UnwrapPtr() const {
- const Type* type = this;
- while (auto* ptr = type->As<Pointer>()) {
- type = ptr->type;
- }
- return type;
+ const Type* type = this;
+ while (auto* ptr = type->As<Pointer>()) {
+ type = ptr->type;
+ }
+ return type;
}
const Type* Type::UnwrapRef() const {
- const Type* type = this;
- while (auto* ptr = type->As<Reference>()) {
- type = ptr->type;
- }
- return type;
+ const Type* type = this;
+ while (auto* ptr = type->As<Reference>()) {
+ type = ptr->type;
+ }
+ return type;
}
const Type* Type::UnwrapAlias() const {
- const Type* type = this;
- while (auto* alias = type->As<Alias>()) {
- type = alias->type;
- }
- return type;
+ const Type* type = this;
+ while (auto* alias = type->As<Alias>()) {
+ type = alias->type;
+ }
+ return type;
}
const Type* Type::UnwrapAll() const {
- auto* type = this;
- while (true) {
- if (auto* alias = type->As<Alias>()) {
- type = alias->type;
- } else if (auto* ptr = type->As<Pointer>()) {
- type = ptr->type;
- } else {
- break;
+ auto* type = this;
+ while (true) {
+ if (auto* alias = type->As<Alias>()) {
+ type = alias->type;
+ } else if (auto* ptr = type->As<Pointer>()) {
+ type = ptr->type;
+ } else {
+ break;
+ }
}
- }
- return type;
+ return type;
}
bool Type::IsFloatScalar() const {
- return Is<F32>();
+ return Is<F32>();
}
bool Type::IsFloatScalarOrVector() const {
- return IsFloatScalar() || IsFloatVector();
+ return IsFloatScalar() || IsFloatVector();
}
bool Type::IsFloatVector() const {
- return Is([](const Vector* v) { return v->type->IsFloatScalar(); });
+ return Is([](const Vector* v) { return v->type->IsFloatScalar(); });
}
bool Type::IsIntegerScalar() const {
- return IsAnyOf<U32, I32>();
+ return IsAnyOf<U32, I32>();
}
bool Type::IsIntegerScalarOrVector() const {
- return IsUnsignedScalarOrVector() || IsSignedScalarOrVector();
+ return IsUnsignedScalarOrVector() || IsSignedScalarOrVector();
}
bool Type::IsScalar() const {
- return IsAnyOf<F32, U32, I32, Bool>();
+ return IsAnyOf<F32, U32, I32, Bool>();
}
bool Type::IsSignedIntegerVector() const {
- return Is([](const Vector* v) { return v->type->Is<I32>(); });
+ return Is([](const Vector* v) { return v->type->Is<I32>(); });
}
bool Type::IsSignedScalarOrVector() const {
- return Is<I32>() || IsSignedIntegerVector();
+ return Is<I32>() || IsSignedIntegerVector();
}
bool Type::IsUnsignedIntegerVector() const {
- return Is([](const Vector* v) { return v->type->Is<U32>(); });
+ return Is([](const Vector* v) { return v->type->Is<U32>(); });
}
bool Type::IsUnsignedScalarOrVector() const {
- return Is<U32>() || IsUnsignedIntegerVector();
+ return Is<U32>() || IsUnsignedIntegerVector();
}
TypeManager::TypeManager() {
- state = std::make_unique<State>();
+ state = std::make_unique<State>();
}
TypeManager::~TypeManager() = default;
const spirv::Void* TypeManager::Void() {
- if (!state->void_) {
- state->void_ = state->allocator_.Create<spirv::Void>();
- }
- return state->void_;
+ if (!state->void_) {
+ state->void_ = state->allocator_.Create<spirv::Void>();
+ }
+ return state->void_;
}
const spirv::Bool* TypeManager::Bool() {
- if (!state->bool_) {
- state->bool_ = state->allocator_.Create<spirv::Bool>();
- }
- return state->bool_;
+ if (!state->bool_) {
+ state->bool_ = state->allocator_.Create<spirv::Bool>();
+ }
+ return state->bool_;
}
const spirv::U32* TypeManager::U32() {
- if (!state->u32_) {
- state->u32_ = state->allocator_.Create<spirv::U32>();
- }
- return state->u32_;
+ if (!state->u32_) {
+ state->u32_ = state->allocator_.Create<spirv::U32>();
+ }
+ return state->u32_;
}
const spirv::F32* TypeManager::F32() {
- if (!state->f32_) {
- state->f32_ = state->allocator_.Create<spirv::F32>();
- }
- return state->f32_;
+ if (!state->f32_) {
+ state->f32_ = state->allocator_.Create<spirv::F32>();
+ }
+ return state->f32_;
}
const spirv::I32* TypeManager::I32() {
- if (!state->i32_) {
- state->i32_ = state->allocator_.Create<spirv::I32>();
- }
- return state->i32_;
+ if (!state->i32_) {
+ state->i32_ = state->allocator_.Create<spirv::I32>();
+ }
+ return state->i32_;
}
-const spirv::Pointer* TypeManager::Pointer(const Type* el,
- ast::StorageClass sc) {
- return state->pointers_.Get(el, sc);
+const spirv::Pointer* TypeManager::Pointer(const Type* el, ast::StorageClass sc) {
+ return state->pointers_.Get(el, sc);
}
-const spirv::Reference* TypeManager::Reference(const Type* el,
- ast::StorageClass sc) {
- return state->references_.Get(el, sc);
+const spirv::Reference* TypeManager::Reference(const Type* el, ast::StorageClass sc) {
+ return state->references_.Get(el, sc);
}
const spirv::Vector* TypeManager::Vector(const Type* el, uint32_t size) {
- return state->vectors_.Get(el, size);
+ return state->vectors_.Get(el, size);
}
-const spirv::Matrix* TypeManager::Matrix(const Type* el,
- uint32_t columns,
- uint32_t rows) {
- return state->matrices_.Get(el, columns, rows);
+const spirv::Matrix* TypeManager::Matrix(const Type* el, uint32_t columns, uint32_t rows) {
+ return state->matrices_.Get(el, columns, rows);
}
-const spirv::Array* TypeManager::Array(const Type* el,
- uint32_t size,
- uint32_t stride) {
- return state->arrays_.Get(el, size, stride);
+const spirv::Array* TypeManager::Array(const Type* el, uint32_t size, uint32_t stride) {
+ return state->arrays_.Get(el, size, stride);
}
const spirv::Alias* TypeManager::Alias(Symbol name, const Type* ty) {
- return state->aliases_.Get(name, ty);
+ return state->aliases_.Get(name, ty);
}
const spirv::Struct* TypeManager::Struct(Symbol name, TypeList members) {
- return state->structs_.Get(name, std::move(members));
+ return state->structs_.Get(name, std::move(members));
}
const spirv::Sampler* TypeManager::Sampler(ast::SamplerKind kind) {
- return state->samplers_.Get(kind);
+ return state->samplers_.Get(kind);
}
-const spirv::DepthTexture* TypeManager::DepthTexture(
- ast::TextureDimension dims) {
- return state->depth_textures_.Get(dims);
+const spirv::DepthTexture* TypeManager::DepthTexture(ast::TextureDimension dims) {
+ return state->depth_textures_.Get(dims);
}
const spirv::DepthMultisampledTexture* TypeManager::DepthMultisampledTexture(
ast::TextureDimension dims) {
- return state->depth_multisampled_textures_.Get(dims);
+ return state->depth_multisampled_textures_.Get(dims);
}
-const spirv::MultisampledTexture* TypeManager::MultisampledTexture(
- ast::TextureDimension dims,
- const Type* ty) {
- return state->multisampled_textures_.Get(dims, ty);
+const spirv::MultisampledTexture* TypeManager::MultisampledTexture(ast::TextureDimension dims,
+ const Type* ty) {
+ return state->multisampled_textures_.Get(dims, ty);
}
-const spirv::SampledTexture* TypeManager::SampledTexture(
- ast::TextureDimension dims,
- const Type* ty) {
- return state->sampled_textures_.Get(dims, ty);
+const spirv::SampledTexture* TypeManager::SampledTexture(ast::TextureDimension dims,
+ const Type* ty) {
+ return state->sampled_textures_.Get(dims, ty);
}
-const spirv::StorageTexture* TypeManager::StorageTexture(
- ast::TextureDimension dims,
- ast::TexelFormat fmt,
- ast::Access access) {
- return state->storage_textures_.Get(dims, fmt, access);
+const spirv::StorageTexture* TypeManager::StorageTexture(ast::TextureDimension dims,
+ ast::TexelFormat fmt,
+ ast::Access access) {
+ return state->storage_textures_.Get(dims, fmt, access);
}
// Debug String() methods for Type classes. Only enabled in debug builds.
#ifndef NDEBUG
std::string Void::String() const {
- return "void";
+ return "void";
}
std::string Bool::String() const {
- return "bool";
+ return "bool";
}
std::string U32::String() const {
- return "u32";
+ return "u32";
}
std::string F32::String() const {
- return "f32";
+ return "f32";
}
std::string I32::String() const {
- return "i32";
+ return "i32";
}
std::string Pointer::String() const {
- std::stringstream ss;
- ss << "ptr<" << std::string(ast::ToString(storage_class)) << ", "
- << type->String() + ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "ptr<" << std::string(ast::ToString(storage_class)) << ", " << type->String() + ">";
+ return ss.str();
}
std::string Reference::String() const {
- std::stringstream ss;
- ss << "ref<" + std::string(ast::ToString(storage_class)) << ", "
- << type->String() << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "ref<" + std::string(ast::ToString(storage_class)) << ", " << type->String() << ">";
+ return ss.str();
}
std::string Vector::String() const {
- std::stringstream ss;
- ss << "vec" << size << "<" << type->String() << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "vec" << size << "<" << type->String() << ">";
+ return ss.str();
}
std::string Matrix::String() const {
- std::stringstream ss;
- ss << "mat" << columns << "x" << rows << "<" << type->String() << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "mat" << columns << "x" << rows << "<" << type->String() << ">";
+ return ss.str();
}
std::string Array::String() const {
- std::stringstream ss;
- ss << "array<" << type->String() << ", " << size << ", " << stride << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "array<" << type->String() << ", " << size << ", " << stride << ">";
+ return ss.str();
}
std::string Sampler::String() const {
- switch (kind) {
- case ast::SamplerKind::kSampler:
- return "sampler";
- case ast::SamplerKind::kComparisonSampler:
- return "sampler_comparison";
- }
- return "<unknown sampler>";
+ switch (kind) {
+ case ast::SamplerKind::kSampler:
+ return "sampler";
+ case ast::SamplerKind::kComparisonSampler:
+ return "sampler_comparison";
+ }
+ return "<unknown sampler>";
}
std::string DepthTexture::String() const {
- std::stringstream ss;
- ss << "depth_" << dims;
- return ss.str();
+ std::stringstream ss;
+ ss << "depth_" << dims;
+ return ss.str();
}
std::string DepthMultisampledTexture::String() const {
- std::stringstream ss;
- ss << "depth_multisampled_" << dims;
- return ss.str();
+ std::stringstream ss;
+ ss << "depth_multisampled_" << dims;
+ return ss.str();
}
std::string MultisampledTexture::String() const {
- std::stringstream ss;
- ss << "texture_multisampled_" << dims << "<" << type << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "texture_multisampled_" << dims << "<" << type << ">";
+ return ss.str();
}
std::string SampledTexture::String() const {
- std::stringstream ss;
- ss << "texture_" << dims << "<" << type << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "texture_" << dims << "<" << type << ">";
+ return ss.str();
}
std::string StorageTexture::String() const {
- std::stringstream ss;
- ss << "texture_storage_" << dims << "<" << format << ", " << access << ">";
- return ss.str();
+ std::stringstream ss;
+ ss << "texture_storage_" << dims << "<" << format << ", " << access << ">";
+ return ss.str();
}
std::string Named::String() const {
- return name.to_str();
+ return name.to_str();
}
#endif // NDEBUG
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.h b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.h
index 6225a852c9f..605ac9b0b6e 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type.h
@@ -39,51 +39,51 @@ namespace tint::reader::spirv {
/// Type is the base class for all types
class Type : public Castable<Type> {
- public:
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- virtual const ast::Type* Build(ProgramBuilder& b) const = 0;
-
- /// @returns the inner most store type if this is a pointer, `this` otherwise
- const Type* UnwrapPtr() const;
-
- /// @returns the inner most store type if this is a reference, `this`
- /// otherwise
- const Type* UnwrapRef() const;
-
- /// @returns the inner most aliased type if this is an alias, `this` otherwise
- const Type* UnwrapAlias() const;
-
- /// @returns the type with all aliasing, access control and pointers removed
- const Type* UnwrapAll() const;
-
- /// @returns true if this type is a float scalar
- bool IsFloatScalar() const;
- /// @returns true if this type is a float scalar or vector
- bool IsFloatScalarOrVector() const;
- /// @returns true if this type is a float vector
- bool IsFloatVector() const;
- /// @returns true if this type is an integer scalar
- bool IsIntegerScalar() const;
- /// @returns true if this type is an integer scalar or vector
- bool IsIntegerScalarOrVector() const;
- /// @returns true if this type is a scalar
- bool IsScalar() const;
- /// @returns true if this type is a signed integer vector
- bool IsSignedIntegerVector() const;
- /// @returns true if this type is a signed scalar or vector
- bool IsSignedScalarOrVector() const;
- /// @returns true if this type is an unsigned integer vector
- bool IsUnsignedIntegerVector() const;
- /// @returns true if this type is an unsigned scalar or vector
- bool IsUnsignedScalarOrVector() const;
+ public:
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ virtual const ast::Type* Build(ProgramBuilder& b) const = 0;
+
+ /// @returns the inner most store type if this is a pointer, `this` otherwise
+ const Type* UnwrapPtr() const;
+
+ /// @returns the inner most store type if this is a reference, `this`
+ /// otherwise
+ const Type* UnwrapRef() const;
+
+ /// @returns the inner most aliased type if this is an alias, `this` otherwise
+ const Type* UnwrapAlias() const;
+
+ /// @returns the type with all aliasing, access control and pointers removed
+ const Type* UnwrapAll() const;
+
+ /// @returns true if this type is a float scalar
+ bool IsFloatScalar() const;
+ /// @returns true if this type is a float scalar or vector
+ bool IsFloatScalarOrVector() const;
+ /// @returns true if this type is a float vector
+ bool IsFloatVector() const;
+ /// @returns true if this type is an integer scalar
+ bool IsIntegerScalar() const;
+ /// @returns true if this type is an integer scalar or vector
+ bool IsIntegerScalarOrVector() const;
+ /// @returns true if this type is a scalar
+ bool IsScalar() const;
+ /// @returns true if this type is a signed integer vector
+ bool IsSignedIntegerVector() const;
+ /// @returns true if this type is a signed scalar or vector
+ bool IsSignedScalarOrVector() const;
+ /// @returns true if this type is an unsigned integer vector
+ bool IsUnsignedIntegerVector() const;
+ /// @returns true if this type is an unsigned scalar or vector
+ bool IsUnsignedScalarOrVector() const;
#ifdef NDEBUG
- /// @returns "<no-type-info>", for debug purposes only
- std::string String() const { return "<no-type-info>"; }
+ /// @returns "<no-type-info>", for debug purposes only
+ std::string String() const { return "<no-type-info>"; }
#else
- /// @returns a string representation of the type, for debug purposes only
- virtual std::string String() const = 0;
+ /// @returns a string representation of the type, for debug purposes only
+ virtual std::string String() const = 0;
#endif // NDEBUG
};
@@ -92,515 +92,510 @@ using TypeList = std::vector<const Type*>;
/// `void` type
struct Void final : public Castable<Void, Type> {
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `bool` type
struct Bool final : public Castable<Bool, Type> {
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `u32` type
struct U32 final : public Castable<U32, Type> {
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `f32` type
struct F32 final : public Castable<F32, Type> {
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `i32` type
struct I32 final : public Castable<I32, Type> {
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `ptr<SC, T>` type
struct Pointer final : public Castable<Pointer, Type> {
- /// Constructor
- /// @param ty the store type
- /// @param sc the pointer storage class
- Pointer(const Type* ty, ast::StorageClass sc);
+ /// Constructor
+ /// @param ty the store type
+ /// @param sc the pointer storage class
+ Pointer(const Type* ty, ast::StorageClass sc);
- /// Copy constructor
- /// @param other the other type to copy
- Pointer(const Pointer& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Pointer(const Pointer& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the store type
- Type const* const type;
- /// the pointer storage class
- ast::StorageClass const storage_class;
+ /// the store type
+ Type const* const type;
+ /// the pointer storage class
+ ast::StorageClass const storage_class;
};
/// `ref<SC, T>` type
/// Note this has no AST representation, but is used for type tracking in the
/// reader.
struct Reference final : public Castable<Reference, Type> {
- /// Constructor
- /// @param ty the referenced type
- /// @param sc the reference storage class
- Reference(const Type* ty, ast::StorageClass sc);
+ /// Constructor
+ /// @param ty the referenced type
+ /// @param sc the reference storage class
+ Reference(const Type* ty, ast::StorageClass sc);
- /// Copy constructor
- /// @param other the other type to copy
- Reference(const Reference& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Reference(const Reference& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the store type
- Type const* const type;
- /// the pointer storage class
- ast::StorageClass const storage_class;
+ /// the store type
+ Type const* const type;
+ /// the pointer storage class
+ ast::StorageClass const storage_class;
};
/// `vecN<T>` type
struct Vector final : public Castable<Vector, Type> {
- /// Constructor
- /// @param ty the element type
- /// @param sz the number of elements in the vector
- Vector(const Type* ty, uint32_t sz);
+ /// Constructor
+ /// @param ty the element type
+ /// @param sz the number of elements in the vector
+ Vector(const Type* ty, uint32_t sz);
- /// Copy constructor
- /// @param other the other type to copy
- Vector(const Vector& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Vector(const Vector& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the element type
- Type const* const type;
- /// the number of elements in the vector
- const uint32_t size;
+ /// the element type
+ Type const* const type;
+ /// the number of elements in the vector
+ const uint32_t size;
};
/// `matNxM<T>` type
struct Matrix final : public Castable<Matrix, Type> {
- /// Constructor
- /// @param ty the matrix element type
- /// @param c the number of columns in the matrix
- /// @param r the number of rows in the matrix
- Matrix(const Type* ty, uint32_t c, uint32_t r);
+ /// Constructor
+ /// @param ty the matrix element type
+ /// @param c the number of columns in the matrix
+ /// @param r the number of rows in the matrix
+ Matrix(const Type* ty, uint32_t c, uint32_t r);
- /// Copy constructor
- /// @param other the other type to copy
- Matrix(const Matrix& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Matrix(const Matrix& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the matrix element type
- Type const* const type;
- /// the number of columns in the matrix
- const uint32_t columns;
- /// the number of rows in the matrix
- const uint32_t rows;
+ /// the matrix element type
+ Type const* const type;
+ /// the number of columns in the matrix
+ const uint32_t columns;
+ /// the number of rows in the matrix
+ const uint32_t rows;
};
/// `array<T, N>` type
struct Array final : public Castable<Array, Type> {
- /// Constructor
- /// @param el the element type
- /// @param sz the number of elements in the array. 0 represents runtime-sized
- /// array.
- /// @param st the byte stride of the array. 0 means use implicit stride.
- Array(const Type* el, uint32_t sz, uint32_t st);
+ /// Constructor
+ /// @param el the element type
+ /// @param sz the number of elements in the array. 0 represents runtime-sized
+ /// array.
+ /// @param st the byte stride of the array. 0 means use implicit stride.
+ Array(const Type* el, uint32_t sz, uint32_t st);
- /// Copy constructor
- /// @param other the other type to copy
- Array(const Array& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Array(const Array& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the element type
- Type const* const type;
- /// the number of elements in the array. 0 represents runtime-sized array.
- const uint32_t size;
- /// the byte stride of the array
- const uint32_t stride;
+ /// the element type
+ Type const* const type;
+ /// the number of elements in the array. 0 represents runtime-sized array.
+ const uint32_t size;
+ /// the byte stride of the array
+ const uint32_t stride;
};
/// `sampler` type
struct Sampler final : public Castable<Sampler, Type> {
- /// Constructor
- /// @param k the sampler kind
- explicit Sampler(ast::SamplerKind k);
+ /// Constructor
+ /// @param k the sampler kind
+ explicit Sampler(ast::SamplerKind k);
- /// Copy constructor
- /// @param other the other type to copy
- Sampler(const Sampler& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Sampler(const Sampler& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the sampler kind
- ast::SamplerKind const kind;
+ /// the sampler kind
+ ast::SamplerKind const kind;
};
/// Base class for texture types
struct Texture : public Castable<Texture, Type> {
- /// Constructor
- /// @param d the texture dimensions
- explicit Texture(ast::TextureDimension d);
+ /// Constructor
+ /// @param d the texture dimensions
+ explicit Texture(ast::TextureDimension d);
- /// Copy constructor
- /// @param other the other type to copy
- Texture(const Texture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Texture(const Texture& other);
- /// the texture dimensions
- ast::TextureDimension const dims;
+ /// the texture dimensions
+ ast::TextureDimension const dims;
};
/// `texture_depth_D` type
struct DepthTexture final : public Castable<DepthTexture, Texture> {
- /// Constructor
- /// @param d the texture dimensions
- explicit DepthTexture(ast::TextureDimension d);
+ /// Constructor
+ /// @param d the texture dimensions
+ explicit DepthTexture(ast::TextureDimension d);
- /// Copy constructor
- /// @param other the other type to copy
- DepthTexture(const DepthTexture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ DepthTexture(const DepthTexture& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `texture_depth_multisampled_D` type
-struct DepthMultisampledTexture final
- : public Castable<DepthMultisampledTexture, Texture> {
- /// Constructor
- /// @param d the texture dimensions
- explicit DepthMultisampledTexture(ast::TextureDimension d);
+struct DepthMultisampledTexture final : public Castable<DepthMultisampledTexture, Texture> {
+ /// Constructor
+ /// @param d the texture dimensions
+ explicit DepthMultisampledTexture(ast::TextureDimension d);
- /// Copy constructor
- /// @param other the other type to copy
- DepthMultisampledTexture(const DepthMultisampledTexture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ DepthMultisampledTexture(const DepthMultisampledTexture& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
};
/// `texture_multisampled_D<T>` type
-struct MultisampledTexture final
- : public Castable<MultisampledTexture, Texture> {
- /// Constructor
- /// @param d the texture dimensions
- /// @param t the multisampled texture type
- MultisampledTexture(ast::TextureDimension d, const Type* t);
+struct MultisampledTexture final : public Castable<MultisampledTexture, Texture> {
+ /// Constructor
+ /// @param d the texture dimensions
+ /// @param t the multisampled texture type
+ MultisampledTexture(ast::TextureDimension d, const Type* t);
- /// Copy constructor
- /// @param other the other type to copy
- MultisampledTexture(const MultisampledTexture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ MultisampledTexture(const MultisampledTexture& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the multisampled texture type
- Type const* const type;
+ /// the multisampled texture type
+ Type const* const type;
};
/// `texture_D<T>` type
struct SampledTexture final : public Castable<SampledTexture, Texture> {
- /// Constructor
- /// @param d the texture dimensions
- /// @param t the sampled texture type
- SampledTexture(ast::TextureDimension d, const Type* t);
+ /// Constructor
+ /// @param d the texture dimensions
+ /// @param t the sampled texture type
+ SampledTexture(ast::TextureDimension d, const Type* t);
- /// Copy constructor
- /// @param other the other type to copy
- SampledTexture(const SampledTexture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ SampledTexture(const SampledTexture& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the sampled texture type
- Type const* const type;
+ /// the sampled texture type
+ Type const* const type;
};
/// `texture_storage_D<F>` type
struct StorageTexture final : public Castable<StorageTexture, Texture> {
- /// Constructor
- /// @param d the texture dimensions
- /// @param f the storage image format
- /// @param a the access control
- StorageTexture(ast::TextureDimension d, ast::TexelFormat f, ast::Access a);
+ /// Constructor
+ /// @param d the texture dimensions
+ /// @param f the storage image format
+ /// @param a the access control
+ StorageTexture(ast::TextureDimension d, ast::TexelFormat f, ast::Access a);
- /// Copy constructor
- /// @param other the other type to copy
- StorageTexture(const StorageTexture& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ StorageTexture(const StorageTexture& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the storage image format
- ast::TexelFormat const format;
+ /// the storage image format
+ ast::TexelFormat const format;
- /// the access control
- ast::Access const access;
+ /// the access control
+ ast::Access const access;
};
/// Base class for named types
struct Named : public Castable<Named, Type> {
- /// Constructor
- /// @param n the type name
- explicit Named(Symbol n);
+ /// Constructor
+ /// @param n the type name
+ explicit Named(Symbol n);
- /// Copy constructor
- /// @param other the other type to copy
- Named(const Named& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Named(const Named& other);
- /// Destructor
- ~Named() override;
+ /// Destructor
+ ~Named() override;
#ifndef NDEBUG
- /// @returns a string representation of the type, for debug purposes only
- std::string String() const override;
+ /// @returns a string representation of the type, for debug purposes only
+ std::string String() const override;
#endif // NDEBUG
- /// the type name
- const Symbol name;
+ /// the type name
+ const Symbol name;
};
/// `type T = N` type
struct Alias final : public Castable<Alias, Named> {
- /// Constructor
- /// @param n the alias name
- /// @param t the aliased type
- Alias(Symbol n, const Type* t);
+ /// Constructor
+ /// @param n the alias name
+ /// @param t the aliased type
+ Alias(Symbol n, const Type* t);
- /// Copy constructor
- /// @param other the other type to copy
- Alias(const Alias& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Alias(const Alias& other);
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
- /// the aliased type
- Type const* const type;
+ /// the aliased type
+ Type const* const type;
};
/// `struct N { ... };` type
struct Struct final : public Castable<Struct, Named> {
- /// Constructor
- /// @param n the struct name
- /// @param m the member types
- Struct(Symbol n, TypeList m);
+ /// Constructor
+ /// @param n the struct name
+ /// @param m the member types
+ Struct(Symbol n, TypeList m);
- /// Copy constructor
- /// @param other the other type to copy
- Struct(const Struct& other);
+ /// Copy constructor
+ /// @param other the other type to copy
+ Struct(const Struct& other);
- /// Destructor
- ~Struct() override;
+ /// Destructor
+ ~Struct() override;
- /// @param b the ProgramBuilder used to construct the AST types
- /// @returns the constructed ast::Type node for the given type
- const ast::Type* Build(ProgramBuilder& b) const override;
+ /// @param b the ProgramBuilder used to construct the AST types
+ /// @returns the constructed ast::Type node for the given type
+ const ast::Type* Build(ProgramBuilder& b) const override;
- /// the member types
- const TypeList members;
+ /// the member types
+ const TypeList members;
};
/// A manager of types
class TypeManager {
- public:
- /// Constructor
- TypeManager();
-
- /// Destructor
- ~TypeManager();
-
- /// @return a Void type. Repeated calls will return the same pointer.
- const spirv::Void* Void();
- /// @return a Bool type. Repeated calls will return the same pointer.
- const spirv::Bool* Bool();
- /// @return a U32 type. Repeated calls will return the same pointer.
- const spirv::U32* U32();
- /// @return a F32 type. Repeated calls will return the same pointer.
- const spirv::F32* F32();
- /// @return a I32 type. Repeated calls will return the same pointer.
- const spirv::I32* I32();
- /// @param ty the store type
- /// @param sc the pointer storage class
- /// @return a Pointer type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Pointer* Pointer(const Type* ty, ast::StorageClass sc);
- /// @param ty the referenced type
- /// @param sc the reference storage class
- /// @return a Reference type. Repeated calls with the same arguments will
- /// return the same pointer.
- const spirv::Reference* Reference(const Type* ty, ast::StorageClass sc);
- /// @param ty the element type
- /// @param sz the number of elements in the vector
- /// @return a Vector type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Vector* Vector(const Type* ty, uint32_t sz);
- /// @param ty the matrix element type
- /// @param c the number of columns in the matrix
- /// @param r the number of rows in the matrix
- /// @return a Matrix type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Matrix* Matrix(const Type* ty, uint32_t c, uint32_t r);
- /// @param el the element type
- /// @param sz the number of elements in the array. 0 represents runtime-sized
- /// array.
- /// @param st the byte stride of the array
- /// @return a Array type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Array* Array(const Type* el, uint32_t sz, uint32_t st);
- /// @param n the alias name
- /// @param t the aliased type
- /// @return a Alias type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Alias* Alias(Symbol n, const Type* t);
- /// @param n the struct name
- /// @param m the member types
- /// @return a Struct type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Struct* Struct(Symbol n, TypeList m);
- /// @param k the sampler kind
- /// @return a Sampler type. Repeated calls with the same arguments will return
- /// the same pointer.
- const spirv::Sampler* Sampler(ast::SamplerKind k);
- /// @param d the texture dimensions
- /// @return a DepthTexture type. Repeated calls with the same arguments will
- /// return the same pointer.
- const spirv::DepthTexture* DepthTexture(ast::TextureDimension d);
- /// @param d the texture dimensions
- /// @return a DepthMultisampledTexture type. Repeated calls with the same
- /// arguments will return the same pointer.
- const spirv::DepthMultisampledTexture* DepthMultisampledTexture(
- ast::TextureDimension d);
- /// @param d the texture dimensions
- /// @param t the multisampled texture type
- /// @return a MultisampledTexture type. Repeated calls with the same arguments
- /// will return the same pointer.
- const spirv::MultisampledTexture* MultisampledTexture(ast::TextureDimension d,
- const Type* t);
- /// @param d the texture dimensions
- /// @param t the sampled texture type
- /// @return a SampledTexture type. Repeated calls with the same arguments will
- /// return the same pointer.
- const spirv::SampledTexture* SampledTexture(ast::TextureDimension d,
- const Type* t);
- /// @param d the texture dimensions
- /// @param f the storage image format
- /// @param a the access control
- /// @return a StorageTexture type. Repeated calls with the same arguments will
- /// return the same pointer.
- const spirv::StorageTexture* StorageTexture(ast::TextureDimension d,
- ast::TexelFormat f,
- ast::Access a);
-
- private:
- struct State;
- std::unique_ptr<State> state;
+ public:
+ /// Constructor
+ TypeManager();
+
+ /// Destructor
+ ~TypeManager();
+
+ /// @return a Void type. Repeated calls will return the same pointer.
+ const spirv::Void* Void();
+ /// @return a Bool type. Repeated calls will return the same pointer.
+ const spirv::Bool* Bool();
+ /// @return a U32 type. Repeated calls will return the same pointer.
+ const spirv::U32* U32();
+ /// @return a F32 type. Repeated calls will return the same pointer.
+ const spirv::F32* F32();
+ /// @return a I32 type. Repeated calls will return the same pointer.
+ const spirv::I32* I32();
+ /// @param ty the store type
+ /// @param sc the pointer storage class
+ /// @return a Pointer type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Pointer* Pointer(const Type* ty, ast::StorageClass sc);
+ /// @param ty the referenced type
+ /// @param sc the reference storage class
+ /// @return a Reference type. Repeated calls with the same arguments will
+ /// return the same pointer.
+ const spirv::Reference* Reference(const Type* ty, ast::StorageClass sc);
+ /// @param ty the element type
+ /// @param sz the number of elements in the vector
+ /// @return a Vector type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Vector* Vector(const Type* ty, uint32_t sz);
+ /// @param ty the matrix element type
+ /// @param c the number of columns in the matrix
+ /// @param r the number of rows in the matrix
+ /// @return a Matrix type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Matrix* Matrix(const Type* ty, uint32_t c, uint32_t r);
+ /// @param el the element type
+ /// @param sz the number of elements in the array. 0 represents runtime-sized
+ /// array.
+ /// @param st the byte stride of the array
+ /// @return a Array type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Array* Array(const Type* el, uint32_t sz, uint32_t st);
+ /// @param n the alias name
+ /// @param t the aliased type
+ /// @return a Alias type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Alias* Alias(Symbol n, const Type* t);
+ /// @param n the struct name
+ /// @param m the member types
+ /// @return a Struct type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Struct* Struct(Symbol n, TypeList m);
+ /// @param k the sampler kind
+ /// @return a Sampler type. Repeated calls with the same arguments will return
+ /// the same pointer.
+ const spirv::Sampler* Sampler(ast::SamplerKind k);
+ /// @param d the texture dimensions
+ /// @return a DepthTexture type. Repeated calls with the same arguments will
+ /// return the same pointer.
+ const spirv::DepthTexture* DepthTexture(ast::TextureDimension d);
+ /// @param d the texture dimensions
+ /// @return a DepthMultisampledTexture type. Repeated calls with the same
+ /// arguments will return the same pointer.
+ const spirv::DepthMultisampledTexture* DepthMultisampledTexture(ast::TextureDimension d);
+ /// @param d the texture dimensions
+ /// @param t the multisampled texture type
+ /// @return a MultisampledTexture type. Repeated calls with the same arguments
+ /// will return the same pointer.
+ const spirv::MultisampledTexture* MultisampledTexture(ast::TextureDimension d, const Type* t);
+ /// @param d the texture dimensions
+ /// @param t the sampled texture type
+ /// @return a SampledTexture type. Repeated calls with the same arguments will
+ /// return the same pointer.
+ const spirv::SampledTexture* SampledTexture(ast::TextureDimension d, const Type* t);
+ /// @param d the texture dimensions
+ /// @param f the storage image format
+ /// @param a the access control
+ /// @return a StorageTexture type. Repeated calls with the same arguments will
+ /// return the same pointer.
+ const spirv::StorageTexture* StorageTexture(ast::TextureDimension d,
+ ast::TexelFormat f,
+ ast::Access a);
+
+ private:
+ struct State;
+ std::unique_ptr<State> state;
};
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type_test.cc
index bddfdc5aff6..c031cd75670 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/parser_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/parser_type_test.cc
@@ -20,78 +20,77 @@ namespace tint::reader::spirv {
namespace {
TEST(SpvParserTypeTest, SameArgumentsGivesSamePointer) {
- Symbol sym(Symbol(1, {}));
+ Symbol sym(Symbol(1, {}));
- TypeManager ty;
- EXPECT_EQ(ty.Void(), ty.Void());
- EXPECT_EQ(ty.Bool(), ty.Bool());
- EXPECT_EQ(ty.U32(), ty.U32());
- EXPECT_EQ(ty.F32(), ty.F32());
- EXPECT_EQ(ty.I32(), ty.I32());
- EXPECT_EQ(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
- ty.Pointer(ty.I32(), ast::StorageClass::kNone));
- EXPECT_EQ(ty.Vector(ty.I32(), 3), ty.Vector(ty.I32(), 3));
- EXPECT_EQ(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 3, 2));
- EXPECT_EQ(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 3, 2));
- EXPECT_EQ(ty.Alias(sym, ty.I32()), ty.Alias(sym, ty.I32()));
- EXPECT_EQ(ty.Struct(sym, {ty.I32()}), ty.Struct(sym, {ty.I32()}));
- EXPECT_EQ(ty.Sampler(ast::SamplerKind::kSampler),
- ty.Sampler(ast::SamplerKind::kSampler));
- EXPECT_EQ(ty.DepthTexture(ast::TextureDimension::k2d),
- ty.DepthTexture(ast::TextureDimension::k2d));
- EXPECT_EQ(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()));
- EXPECT_EQ(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()));
- EXPECT_EQ(ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead),
- ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead));
+ TypeManager ty;
+ EXPECT_EQ(ty.Void(), ty.Void());
+ EXPECT_EQ(ty.Bool(), ty.Bool());
+ EXPECT_EQ(ty.U32(), ty.U32());
+ EXPECT_EQ(ty.F32(), ty.F32());
+ EXPECT_EQ(ty.I32(), ty.I32());
+ EXPECT_EQ(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
+ ty.Pointer(ty.I32(), ast::StorageClass::kNone));
+ EXPECT_EQ(ty.Vector(ty.I32(), 3), ty.Vector(ty.I32(), 3));
+ EXPECT_EQ(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 3, 2));
+ EXPECT_EQ(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 3, 2));
+ EXPECT_EQ(ty.Alias(sym, ty.I32()), ty.Alias(sym, ty.I32()));
+ EXPECT_EQ(ty.Struct(sym, {ty.I32()}), ty.Struct(sym, {ty.I32()}));
+ EXPECT_EQ(ty.Sampler(ast::SamplerKind::kSampler), ty.Sampler(ast::SamplerKind::kSampler));
+ EXPECT_EQ(ty.DepthTexture(ast::TextureDimension::k2d),
+ ty.DepthTexture(ast::TextureDimension::k2d));
+ EXPECT_EQ(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()));
+ EXPECT_EQ(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()));
+ EXPECT_EQ(ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead),
+ ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead));
}
TEST(SpvParserTypeTest, DifferentArgumentsGivesDifferentPointer) {
- Symbol sym_a(Symbol(1, {}));
- Symbol sym_b(Symbol(2, {}));
+ Symbol sym_a(Symbol(1, {}));
+ Symbol sym_b(Symbol(2, {}));
- TypeManager ty;
- EXPECT_NE(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
- ty.Pointer(ty.U32(), ast::StorageClass::kNone));
- EXPECT_NE(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
- ty.Pointer(ty.I32(), ast::StorageClass::kInput));
- EXPECT_NE(ty.Vector(ty.I32(), 3), ty.Vector(ty.U32(), 3));
- EXPECT_NE(ty.Vector(ty.I32(), 3), ty.Vector(ty.I32(), 2));
- EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.U32(), 3, 2));
- EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 2, 2));
- EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 3, 3));
- EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.U32(), 3, 2));
- EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 2, 2));
- EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 3, 3));
- EXPECT_NE(ty.Alias(sym_a, ty.I32()), ty.Alias(sym_b, ty.I32()));
- EXPECT_NE(ty.Struct(sym_a, {ty.I32()}), ty.Struct(sym_b, {ty.I32()}));
- EXPECT_NE(ty.Sampler(ast::SamplerKind::kSampler),
- ty.Sampler(ast::SamplerKind::kComparisonSampler));
- EXPECT_NE(ty.DepthTexture(ast::TextureDimension::k2d),
- ty.DepthTexture(ast::TextureDimension::k1d));
- EXPECT_NE(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.MultisampledTexture(ast::TextureDimension::k3d, ty.I32()));
- EXPECT_NE(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.MultisampledTexture(ast::TextureDimension::k2d, ty.U32()));
- EXPECT_NE(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.SampledTexture(ast::TextureDimension::k3d, ty.I32()));
- EXPECT_NE(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
- ty.SampledTexture(ast::TextureDimension::k2d, ty.U32()));
- EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead),
- ty.StorageTexture(ast::TextureDimension::k3d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead));
- EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead),
- ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Sint, ast::Access::kRead));
- EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead),
- ty.StorageTexture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kWrite));
+ TypeManager ty;
+ EXPECT_NE(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
+ ty.Pointer(ty.U32(), ast::StorageClass::kNone));
+ EXPECT_NE(ty.Pointer(ty.I32(), ast::StorageClass::kNone),
+ ty.Pointer(ty.I32(), ast::StorageClass::kInput));
+ EXPECT_NE(ty.Vector(ty.I32(), 3), ty.Vector(ty.U32(), 3));
+ EXPECT_NE(ty.Vector(ty.I32(), 3), ty.Vector(ty.I32(), 2));
+ EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.U32(), 3, 2));
+ EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 2, 2));
+ EXPECT_NE(ty.Matrix(ty.I32(), 3, 2), ty.Matrix(ty.I32(), 3, 3));
+ EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.U32(), 3, 2));
+ EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 2, 2));
+ EXPECT_NE(ty.Array(ty.I32(), 3, 2), ty.Array(ty.I32(), 3, 3));
+ EXPECT_NE(ty.Alias(sym_a, ty.I32()), ty.Alias(sym_b, ty.I32()));
+ EXPECT_NE(ty.Struct(sym_a, {ty.I32()}), ty.Struct(sym_b, {ty.I32()}));
+ EXPECT_NE(ty.Sampler(ast::SamplerKind::kSampler),
+ ty.Sampler(ast::SamplerKind::kComparisonSampler));
+ EXPECT_NE(ty.DepthTexture(ast::TextureDimension::k2d),
+ ty.DepthTexture(ast::TextureDimension::k1d));
+ EXPECT_NE(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.MultisampledTexture(ast::TextureDimension::k3d, ty.I32()));
+ EXPECT_NE(ty.MultisampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.MultisampledTexture(ast::TextureDimension::k2d, ty.U32()));
+ EXPECT_NE(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.SampledTexture(ast::TextureDimension::k3d, ty.I32()));
+ EXPECT_NE(ty.SampledTexture(ast::TextureDimension::k2d, ty.I32()),
+ ty.SampledTexture(ast::TextureDimension::k2d, ty.U32()));
+ EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead),
+ ty.StorageTexture(ast::TextureDimension::k3d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead));
+ EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead),
+ ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Sint,
+ ast::Access::kRead));
+ EXPECT_NE(ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kRead),
+ ty.StorageTexture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/spirv_tools_helpers_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/spirv_tools_helpers_test.cc
index 057dffd55b7..21a5be212a3 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/spirv_tools_helpers_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/spirv_tools_helpers_test.cc
@@ -20,42 +20,38 @@
namespace tint::reader::spirv::test {
std::vector<uint32_t> Assemble(const std::string& spirv_assembly) {
- // TODO(dneto): Use ScopedTrace?
-
- // (The target environment doesn't affect assembly.
- spvtools::SpirvTools tools(SPV_ENV_UNIVERSAL_1_0);
- std::stringstream errors;
- std::vector<uint32_t> result;
- tools.SetMessageConsumer([&errors](spv_message_level_t, const char*,
- const spv_position_t& position,
- const char* message) {
- errors << "assembly error:" << position.line << ":" << position.column
- << ": " << message;
- });
-
- const auto success = tools.Assemble(
- spirv_assembly, &result, SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS);
- EXPECT_TRUE(success) << errors.str();
-
- return result;
+ // TODO(dneto): Use ScopedTrace?
+
+ // (The target environment doesn't affect assembly.
+ spvtools::SpirvTools tools(SPV_ENV_UNIVERSAL_1_0);
+ std::stringstream errors;
+ std::vector<uint32_t> result;
+ tools.SetMessageConsumer([&errors](spv_message_level_t, const char*,
+ const spv_position_t& position, const char* message) {
+ errors << "assembly error:" << position.line << ":" << position.column << ": " << message;
+ });
+
+ const auto success =
+ tools.Assemble(spirv_assembly, &result, SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS);
+ EXPECT_TRUE(success) << errors.str();
+
+ return result;
}
std::string Disassemble(const std::vector<uint32_t>& spirv_module) {
- spvtools::SpirvTools tools(SPV_ENV_UNIVERSAL_1_0);
- std::stringstream errors;
- tools.SetMessageConsumer([&errors](spv_message_level_t, const char*,
- const spv_position_t& position,
- const char* message) {
- errors << "disassmbly error:" << position.line << ":" << position.column
- << ": " << message;
- });
-
- std::string result;
- const auto success = tools.Disassemble(
- spirv_module, &result, SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES);
- EXPECT_TRUE(success) << errors.str();
-
- return result;
+ spvtools::SpirvTools tools(SPV_ENV_UNIVERSAL_1_0);
+ std::stringstream errors;
+ tools.SetMessageConsumer([&errors](spv_message_level_t, const char*,
+ const spv_position_t& position, const char* message) {
+ errors << "disassmbly error:" << position.line << ":" << position.column << ": " << message;
+ });
+
+ std::string result;
+ const auto success =
+ tools.Disassemble(spirv_module, &result, SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES);
+ EXPECT_TRUE(success) << errors.str();
+
+ return result;
}
} // namespace tint::reader::spirv::test
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/usage.cc b/chromium/third_party/dawn/src/tint/reader/spirv/usage.cc
index 2ddb696f16f..c120b2cc963 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/usage.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/usage.cc
@@ -23,168 +23,165 @@ Usage::Usage(const Usage& other) = default;
Usage::~Usage() = default;
std::ostream& Usage::operator<<(std::ostream& out) const {
- out << "Usage(";
- if (IsSampler()) {
- out << "Sampler(";
- if (is_comparison_sampler_) {
- out << " comparison";
+ out << "Usage(";
+ if (IsSampler()) {
+ out << "Sampler(";
+ if (is_comparison_sampler_) {
+ out << " comparison";
+ }
+ out << " )";
}
- out << " )";
- }
- if (IsTexture()) {
- out << "Texture(";
- if (is_sampled_) {
- out << " is_sampled";
+ if (IsTexture()) {
+ out << "Texture(";
+ if (is_sampled_) {
+ out << " is_sampled";
+ }
+ if (is_multisampled_) {
+ out << " ms";
+ }
+ if (is_depth_) {
+ out << " depth";
+ }
+ if (is_storage_read_) {
+ out << " read";
+ }
+ if (is_storage_write_) {
+ out << " write";
+ }
+ out << " )";
}
- if (is_multisampled_) {
- out << " ms";
- }
- if (is_depth_) {
- out << " depth";
- }
- if (is_storage_read_) {
- out << " read";
- }
- if (is_storage_write_) {
- out << " write";
- }
- out << " )";
- }
- out << ")";
- return out;
+ out << ")";
+ return out;
}
bool Usage::IsValid() const {
- // Check sampler state internal consistency.
- if (is_comparison_sampler_ && !is_sampler_) {
- return false;
- }
-
- // Check texture state.
- // |is_texture_| is implied by any of the later texture-based properties.
- if ((IsStorageTexture() || is_sampled_ || is_multisampled_ || is_depth_) &&
- !is_texture_) {
- return false;
- }
- if (is_texture_) {
- // Multisampled implies sampled.
- if (is_multisampled_) {
- if (!is_sampled_) {
+ // Check sampler state internal consistency.
+ if (is_comparison_sampler_ && !is_sampler_) {
return false;
- }
- }
- // Depth implies sampled.
- if (is_depth_) {
- if (!is_sampled_) {
- return false;
- }
}
- // Sampled can't be storage.
- if (is_sampled_) {
- if (IsStorageTexture()) {
+ // Check texture state.
+ // |is_texture_| is implied by any of the later texture-based properties.
+ if ((IsStorageTexture() || is_sampled_ || is_multisampled_ || is_depth_) && !is_texture_) {
return false;
- }
}
+ if (is_texture_) {
+ // Multisampled implies sampled.
+ if (is_multisampled_) {
+ if (!is_sampled_) {
+ return false;
+ }
+ }
+ // Depth implies sampled.
+ if (is_depth_) {
+ if (!is_sampled_) {
+ return false;
+ }
+ }
+
+ // Sampled can't be storage.
+ if (is_sampled_) {
+ if (IsStorageTexture()) {
+ return false;
+ }
+ }
+
+ // Storage can't be sampled.
+ if (IsStorageTexture()) {
+ if (is_sampled_) {
+ return false;
+ }
+ }
+ // Storage texture can't also be a sampler.
+ if (IsStorageTexture()) {
+ if (is_sampler_) {
+ return false;
+ }
+ }
+
+ // Can't be both read and write. This is a restriction in WebGPU.
+ if (is_storage_read_ && is_storage_write_) {
+ return false;
+ }
+ }
+ return true;
+}
- // Storage can't be sampled.
- if (IsStorageTexture()) {
- if (is_sampled_) {
+bool Usage::IsComplete() const {
+ if (!IsValid()) {
return false;
- }
}
- // Storage texture can't also be a sampler.
- if (IsStorageTexture()) {
- if (is_sampler_) {
- return false;
- }
+ if (IsSampler()) {
+ return true;
}
-
- // Can't be both read and write. This is a restriction in WebGPU.
- if (is_storage_read_ && is_storage_write_) {
- return false;
+ if (IsTexture()) {
+ return is_sampled_ || IsStorageTexture();
}
- }
- return true;
-}
-
-bool Usage::IsComplete() const {
- if (!IsValid()) {
return false;
- }
- if (IsSampler()) {
- return true;
- }
- if (IsTexture()) {
- return is_sampled_ || IsStorageTexture();
- }
- return false;
}
bool Usage::operator==(const Usage& other) const {
- return is_sampler_ == other.is_sampler_ &&
- is_comparison_sampler_ == other.is_comparison_sampler_ &&
- is_texture_ == other.is_texture_ && is_sampled_ == other.is_sampled_ &&
- is_multisampled_ == other.is_multisampled_ &&
- is_depth_ == other.is_depth_ &&
- is_storage_read_ == other.is_storage_read_ &&
- is_storage_write_ == other.is_storage_write_;
+ return is_sampler_ == other.is_sampler_ &&
+ is_comparison_sampler_ == other.is_comparison_sampler_ &&
+ is_texture_ == other.is_texture_ && is_sampled_ == other.is_sampled_ &&
+ is_multisampled_ == other.is_multisampled_ && is_depth_ == other.is_depth_ &&
+ is_storage_read_ == other.is_storage_read_ &&
+ is_storage_write_ == other.is_storage_write_;
}
void Usage::Add(const Usage& other) {
- is_sampler_ = is_sampler_ || other.is_sampler_;
- is_comparison_sampler_ =
- is_comparison_sampler_ || other.is_comparison_sampler_;
- is_texture_ = is_texture_ || other.is_texture_;
- is_sampled_ = is_sampled_ || other.is_sampled_;
- is_multisampled_ = is_multisampled_ || other.is_multisampled_;
- is_depth_ = is_depth_ || other.is_depth_;
- is_storage_read_ = is_storage_read_ || other.is_storage_read_;
- is_storage_write_ = is_storage_write_ || other.is_storage_write_;
+ is_sampler_ = is_sampler_ || other.is_sampler_;
+ is_comparison_sampler_ = is_comparison_sampler_ || other.is_comparison_sampler_;
+ is_texture_ = is_texture_ || other.is_texture_;
+ is_sampled_ = is_sampled_ || other.is_sampled_;
+ is_multisampled_ = is_multisampled_ || other.is_multisampled_;
+ is_depth_ = is_depth_ || other.is_depth_;
+ is_storage_read_ = is_storage_read_ || other.is_storage_read_;
+ is_storage_write_ = is_storage_write_ || other.is_storage_write_;
}
void Usage::AddSampler() {
- is_sampler_ = true;
+ is_sampler_ = true;
}
void Usage::AddComparisonSampler() {
- AddSampler();
- is_comparison_sampler_ = true;
+ AddSampler();
+ is_comparison_sampler_ = true;
}
void Usage::AddTexture() {
- is_texture_ = true;
+ is_texture_ = true;
}
void Usage::AddStorageReadTexture() {
- AddTexture();
- is_storage_read_ = true;
+ AddTexture();
+ is_storage_read_ = true;
}
void Usage::AddStorageWriteTexture() {
- AddTexture();
- is_storage_write_ = true;
+ AddTexture();
+ is_storage_write_ = true;
}
void Usage::AddSampledTexture() {
- AddTexture();
- is_sampled_ = true;
+ AddTexture();
+ is_sampled_ = true;
}
void Usage::AddMultisampledTexture() {
- AddSampledTexture();
- is_multisampled_ = true;
+ AddSampledTexture();
+ is_multisampled_ = true;
}
void Usage::AddDepthTexture() {
- AddSampledTexture();
- is_depth_ = true;
+ AddSampledTexture();
+ is_depth_ = true;
}
std::string Usage::to_str() const {
- std::ostringstream ss;
- ss << *this;
- return ss.str();
+ std::ostringstream ss;
+ ss << *this;
+ return ss.str();
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/usage.h b/chromium/third_party/dawn/src/tint/reader/spirv/usage.h
index 140a4ba3ac0..4c2ccbbd622 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/usage.h
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/usage.h
@@ -35,93 +35,91 @@ namespace tint::reader::spirv {
/// - The memory object declaration unederlying %YIm will gain
/// AddSampledTexture and AddDepthTexture usages
class Usage {
- public:
- /// Constructor
- Usage();
- /// Copy constructor
- /// @param other the Usage to clone
- Usage(const Usage& other);
- /// Destructor
- ~Usage();
-
- /// @returns true if this usage is internally consistent
- bool IsValid() const;
- /// @returns true if the usage fully determines a WebGPU binding type.
- bool IsComplete() const;
-
- /// @returns true if this usage is a sampler usage.
- bool IsSampler() const { return is_sampler_; }
- /// @returns true if this usage is a comparison sampler usage.
- bool IsComparisonSampler() const { return is_comparison_sampler_; }
-
- /// @returns true if this usage is a texture usage.
- bool IsTexture() const { return is_texture_; }
- /// @returns true if this usage is a sampled texture usage.
- bool IsSampledTexture() const { return is_sampled_; }
- /// @returns true if this usage is a multisampled texture usage.
- bool IsMultisampledTexture() const { return is_multisampled_; }
- /// @returns true if this usage is a dpeth texture usage.
- bool IsDepthTexture() const { return is_depth_; }
- /// @returns true if this usage is a read-only storage texture
- bool IsStorageReadTexture() const { return is_storage_read_; }
- /// @returns true if this usage is a write-only storage texture
- bool IsStorageWriteTexture() const { return is_storage_write_; }
-
- /// @returns true if this is a storage texture.
- bool IsStorageTexture() const {
- return is_storage_read_ || is_storage_write_;
- }
-
- /// Emits this usage to the given stream
- /// @param out the output stream.
- /// @returns the modified stream.
- std::ostream& operator<<(std::ostream& out) const;
-
- /// Equality operator
- /// @param other the RHS of the equality test.
- /// @returns true if `other` is identical to `*this`
- bool operator==(const Usage& other) const;
-
- /// Adds the usages from another usage object.
- /// @param other the other usage
- void Add(const Usage& other);
-
- /// Records usage as a sampler.
- void AddSampler();
- /// Records usage as a comparison sampler.
- void AddComparisonSampler();
-
- /// Records usage as a texture of some kind.
- void AddTexture();
- /// Records usage as a read-only storage texture.
- void AddStorageReadTexture();
- /// Records usage as a write-only storage texture.
- void AddStorageWriteTexture();
- /// Records usage as a sampled texture.
- void AddSampledTexture();
- /// Records usage as a multisampled texture.
- void AddMultisampledTexture();
- /// Records usage as a depth texture.
- void AddDepthTexture();
-
- /// @returns this usage object as a string.
- std::string to_str() const;
-
- private:
- // Sampler properties.
- bool is_sampler_ = false;
- // A comparison sampler is always a sampler:
- // |is_comparison_sampler_| implies |is_sampler_|
- bool is_comparison_sampler_ = false;
-
- // Texture properties.
- // |is_texture_| is always implied by any of the others below.
- bool is_texture_ = false;
- bool is_sampled_ = false;
- bool is_multisampled_ = false; // This implies it's sampled as well.
- bool is_depth_ = false;
- bool is_storage_read_ = false;
- bool is_storage_write_ = false;
+ public:
+ /// Constructor
+ Usage();
+ /// Copy constructor
+ /// @param other the Usage to clone
+ Usage(const Usage& other);
+ /// Destructor
+ ~Usage();
+
+ /// @returns true if this usage is internally consistent
+ bool IsValid() const;
+ /// @returns true if the usage fully determines a WebGPU binding type.
+ bool IsComplete() const;
+
+ /// @returns true if this usage is a sampler usage.
+ bool IsSampler() const { return is_sampler_; }
+ /// @returns true if this usage is a comparison sampler usage.
+ bool IsComparisonSampler() const { return is_comparison_sampler_; }
+
+ /// @returns true if this usage is a texture usage.
+ bool IsTexture() const { return is_texture_; }
+ /// @returns true if this usage is a sampled texture usage.
+ bool IsSampledTexture() const { return is_sampled_; }
+ /// @returns true if this usage is a multisampled texture usage.
+ bool IsMultisampledTexture() const { return is_multisampled_; }
+ /// @returns true if this usage is a dpeth texture usage.
+ bool IsDepthTexture() const { return is_depth_; }
+ /// @returns true if this usage is a read-only storage texture
+ bool IsStorageReadTexture() const { return is_storage_read_; }
+ /// @returns true if this usage is a write-only storage texture
+ bool IsStorageWriteTexture() const { return is_storage_write_; }
+
+ /// @returns true if this is a storage texture.
+ bool IsStorageTexture() const { return is_storage_read_ || is_storage_write_; }
+
+ /// Emits this usage to the given stream
+ /// @param out the output stream.
+ /// @returns the modified stream.
+ std::ostream& operator<<(std::ostream& out) const;
+
+ /// Equality operator
+ /// @param other the RHS of the equality test.
+ /// @returns true if `other` is identical to `*this`
+ bool operator==(const Usage& other) const;
+
+ /// Adds the usages from another usage object.
+ /// @param other the other usage
+ void Add(const Usage& other);
+
+ /// Records usage as a sampler.
+ void AddSampler();
+ /// Records usage as a comparison sampler.
+ void AddComparisonSampler();
+
+ /// Records usage as a texture of some kind.
+ void AddTexture();
+ /// Records usage as a read-only storage texture.
+ void AddStorageReadTexture();
+ /// Records usage as a write-only storage texture.
+ void AddStorageWriteTexture();
+ /// Records usage as a sampled texture.
+ void AddSampledTexture();
+ /// Records usage as a multisampled texture.
+ void AddMultisampledTexture();
+ /// Records usage as a depth texture.
+ void AddDepthTexture();
+
+ /// @returns this usage object as a string.
+ std::string to_str() const;
+
+ private:
+ // Sampler properties.
+ bool is_sampler_ = false;
+ // A comparison sampler is always a sampler:
+ // |is_comparison_sampler_| implies |is_sampler_|
+ bool is_comparison_sampler_ = false;
+
+ // Texture properties.
+ // |is_texture_| is always implied by any of the others below.
+ bool is_texture_ = false;
+ bool is_sampled_ = false;
+ bool is_multisampled_ = false; // This implies it's sampled as well.
+ bool is_depth_ = false;
+ bool is_storage_read_ = false;
+ bool is_storage_write_ = false;
};
/// Writes the Usage to the ostream
@@ -129,7 +127,7 @@ class Usage {
/// @param u the Usage
/// @returns the ostream so calls can be chained
inline std::ostream& operator<<(std::ostream& out, const Usage& u) {
- return u.operator<<(out);
+ return u.operator<<(out);
}
} // namespace tint::reader::spirv
diff --git a/chromium/third_party/dawn/src/tint/reader/spirv/usage_test.cc b/chromium/third_party/dawn/src/tint/reader/spirv/usage_test.cc
index 0ee7f15fa09..d01d1a264ef 100644
--- a/chromium/third_party/dawn/src/tint/reader/spirv/usage_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/spirv/usage_test.cc
@@ -24,267 +24,267 @@ namespace {
using ::testing::Eq;
TEST_F(SpvParserTest, Usage_Trivial_Properties) {
- Usage u;
- EXPECT_TRUE(u.IsValid());
- EXPECT_FALSE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_FALSE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
+ Usage u;
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_FALSE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_FALSE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
}
TEST_F(SpvParserTest, Usage_Trivial_Output) {
- std::ostringstream ss;
- Usage u;
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage()"));
+ std::ostringstream ss;
+ Usage u;
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage()"));
}
TEST_F(SpvParserTest, Usage_Equality_OneDifference) {
- const int num_usages = 9;
- std::vector<Usage> usages(num_usages);
- usages[1].AddSampler();
- usages[2].AddComparisonSampler();
- usages[3].AddTexture();
- usages[4].AddSampledTexture();
- usages[5].AddMultisampledTexture();
- usages[6].AddDepthTexture();
- usages[7].AddStorageReadTexture();
- usages[8].AddStorageWriteTexture();
- for (int i = 0; i < num_usages; ++i) {
- for (int j = 0; j < num_usages; ++j) {
- const auto& lhs = usages[i];
- const auto& rhs = usages[j];
- if (i == j) {
- EXPECT_TRUE(lhs == rhs);
- } else {
- EXPECT_FALSE(lhs == rhs);
- }
+ const int num_usages = 9;
+ std::vector<Usage> usages(num_usages);
+ usages[1].AddSampler();
+ usages[2].AddComparisonSampler();
+ usages[3].AddTexture();
+ usages[4].AddSampledTexture();
+ usages[5].AddMultisampledTexture();
+ usages[6].AddDepthTexture();
+ usages[7].AddStorageReadTexture();
+ usages[8].AddStorageWriteTexture();
+ for (int i = 0; i < num_usages; ++i) {
+ for (int j = 0; j < num_usages; ++j) {
+ const auto& lhs = usages[i];
+ const auto& rhs = usages[j];
+ if (i == j) {
+ EXPECT_TRUE(lhs == rhs);
+ } else {
+ EXPECT_FALSE(lhs == rhs);
+ }
+ }
}
- }
}
TEST_F(SpvParserTest, Usage_Add) {
- // Mix two nontrivial usages.
- Usage a;
- a.AddStorageReadTexture();
-
- Usage b;
- b.AddComparisonSampler();
-
- a.Add(b);
-
- EXPECT_FALSE(a.IsValid());
- EXPECT_FALSE(a.IsComplete());
- EXPECT_TRUE(a.IsSampler());
- EXPECT_TRUE(a.IsComparisonSampler());
- EXPECT_TRUE(a.IsTexture());
- EXPECT_FALSE(a.IsSampledTexture());
- EXPECT_FALSE(a.IsMultisampledTexture());
- EXPECT_FALSE(a.IsDepthTexture());
- EXPECT_TRUE(a.IsStorageReadTexture());
- EXPECT_FALSE(a.IsStorageWriteTexture());
-
- std::ostringstream ss;
- ss << a;
- EXPECT_THAT(ss.str(), Eq("Usage(Sampler( comparison )Texture( read ))"));
+ // Mix two nontrivial usages.
+ Usage a;
+ a.AddStorageReadTexture();
+
+ Usage b;
+ b.AddComparisonSampler();
+
+ a.Add(b);
+
+ EXPECT_FALSE(a.IsValid());
+ EXPECT_FALSE(a.IsComplete());
+ EXPECT_TRUE(a.IsSampler());
+ EXPECT_TRUE(a.IsComparisonSampler());
+ EXPECT_TRUE(a.IsTexture());
+ EXPECT_FALSE(a.IsSampledTexture());
+ EXPECT_FALSE(a.IsMultisampledTexture());
+ EXPECT_FALSE(a.IsDepthTexture());
+ EXPECT_TRUE(a.IsStorageReadTexture());
+ EXPECT_FALSE(a.IsStorageWriteTexture());
+
+ std::ostringstream ss;
+ ss << a;
+ EXPECT_THAT(ss.str(), Eq("Usage(Sampler( comparison )Texture( read ))"));
}
TEST_F(SpvParserTest, Usage_AddSampler) {
- std::ostringstream ss;
- Usage u;
- u.AddSampler();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_TRUE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_FALSE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Sampler( ))"));
-
- // Check idempotency
- auto copy(u);
- u.AddSampler();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddSampler();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_TRUE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_FALSE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Sampler( ))"));
+
+ // Check idempotency
+ auto copy(u);
+ u.AddSampler();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddComparisonSampler) {
- std::ostringstream ss;
- Usage u;
- u.AddComparisonSampler();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_TRUE(u.IsSampler());
- EXPECT_TRUE(u.IsComparisonSampler());
- EXPECT_FALSE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Sampler( comparison ))"));
-
- auto copy(u);
- u.AddComparisonSampler();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddComparisonSampler();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_TRUE(u.IsSampler());
+ EXPECT_TRUE(u.IsComparisonSampler());
+ EXPECT_FALSE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Sampler( comparison ))"));
+
+ auto copy(u);
+ u.AddComparisonSampler();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_FALSE(u.IsComplete()); // Don't know if it's sampled or storage
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( ))"));
-
- auto copy(u);
- u.AddTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_FALSE(u.IsComplete()); // Don't know if it's sampled or storage
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( ))"));
+
+ auto copy(u);
+ u.AddTexture();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddSampledTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddSampledTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_TRUE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled ))"));
-
- auto copy(u);
- u.AddSampledTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddSampledTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_TRUE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled ))"));
+
+ auto copy(u);
+ u.AddSampledTexture();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddMultisampledTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddMultisampledTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_TRUE(u.IsSampledTexture());
- EXPECT_TRUE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled ms ))"));
-
- auto copy(u);
- u.AddMultisampledTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddMultisampledTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_TRUE(u.IsSampledTexture());
+ EXPECT_TRUE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled ms ))"));
+
+ auto copy(u);
+ u.AddMultisampledTexture();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddDepthTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddDepthTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_TRUE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_TRUE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled depth ))"));
-
- auto copy(u);
- u.AddDepthTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddDepthTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_TRUE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_TRUE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( is_sampled depth ))"));
+
+ auto copy(u);
+ u.AddDepthTexture();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddStorageReadTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddStorageReadTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_TRUE(u.IsStorageReadTexture());
- EXPECT_FALSE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( read ))"));
-
- auto copy(u);
- u.AddStorageReadTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddStorageReadTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_TRUE(u.IsStorageReadTexture());
+ EXPECT_FALSE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( read ))"));
+
+ auto copy(u);
+ u.AddStorageReadTexture();
+ EXPECT_TRUE(u == copy);
}
TEST_F(SpvParserTest, Usage_AddStorageWriteTexture) {
- std::ostringstream ss;
- Usage u;
- u.AddStorageWriteTexture();
-
- EXPECT_TRUE(u.IsValid());
- EXPECT_TRUE(u.IsComplete());
- EXPECT_FALSE(u.IsSampler());
- EXPECT_FALSE(u.IsComparisonSampler());
- EXPECT_TRUE(u.IsTexture());
- EXPECT_FALSE(u.IsSampledTexture());
- EXPECT_FALSE(u.IsMultisampledTexture());
- EXPECT_FALSE(u.IsDepthTexture());
- EXPECT_FALSE(u.IsStorageReadTexture());
- EXPECT_TRUE(u.IsStorageWriteTexture());
-
- ss << u;
- EXPECT_THAT(ss.str(), Eq("Usage(Texture( write ))"));
-
- auto copy(u);
- u.AddStorageWriteTexture();
- EXPECT_TRUE(u == copy);
+ std::ostringstream ss;
+ Usage u;
+ u.AddStorageWriteTexture();
+
+ EXPECT_TRUE(u.IsValid());
+ EXPECT_TRUE(u.IsComplete());
+ EXPECT_FALSE(u.IsSampler());
+ EXPECT_FALSE(u.IsComparisonSampler());
+ EXPECT_TRUE(u.IsTexture());
+ EXPECT_FALSE(u.IsSampledTexture());
+ EXPECT_FALSE(u.IsMultisampledTexture());
+ EXPECT_FALSE(u.IsDepthTexture());
+ EXPECT_FALSE(u.IsStorageReadTexture());
+ EXPECT_TRUE(u.IsStorageWriteTexture());
+
+ ss << u;
+ EXPECT_THAT(ss.str(), Eq("Usage(Texture( write ))"));
+
+ auto copy(u);
+ u.AddStorageWriteTexture();
+ EXPECT_TRUE(u == copy);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.cc
index 7d8669035da..58e3c85cc5a 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.cc
@@ -17,1117 +17,1156 @@
#include <cctype>
#include <cmath>
#include <cstring>
+#include <functional>
#include <limits>
+#include <optional> // NOLINT(build/include_order)
+#include <tuple>
+#include <type_traits>
#include <utility>
#include "src/tint/debug.h"
+#include "src/tint/number.h"
#include "src/tint/text/unicode.h"
namespace tint::reader::wgsl {
namespace {
-bool is_blankspace(char c) {
- // See https://www.w3.org/TR/WGSL/#blankspace.
- return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' ||
- c == '\r';
+// Unicode parsing code assumes that the size of a single std::string element is
+// 1 byte.
+static_assert(sizeof(decltype(tint::Source::FileContent::data[0])) == sizeof(uint8_t),
+ "tint::reader::wgsl requires the size of a std::string element "
+ "to be a single byte");
+
+bool read_blankspace(std::string_view str, size_t i, bool* is_blankspace, size_t* blankspace_size) {
+ // See https://www.w3.org/TR/WGSL/#blankspace
+
+ auto* utf8 = reinterpret_cast<const uint8_t*>(&str[i]);
+ auto [cp, n] = text::utf8::Decode(utf8, str.size() - i);
+
+ if (n == 0) {
+ return false;
+ }
+
+ static const auto kSpace = text::CodePoint(0x0020); // space
+ static const auto kHTab = text::CodePoint(0x0009); // horizontal tab
+ static const auto kL2R = text::CodePoint(0x200E); // left-to-right mark
+ static const auto kR2L = text::CodePoint(0x200F); // right-to-left mark
+
+ if (cp == kSpace || cp == kHTab || cp == kL2R || cp == kR2L) {
+ *is_blankspace = true;
+ *blankspace_size = n;
+ return true;
+ }
+
+ *is_blankspace = false;
+ return true;
}
uint32_t dec_value(char c) {
- if (c >= '0' && c <= '9') {
- return static_cast<uint32_t>(c - '0');
- }
- return 0;
+ if (c >= '0' && c <= '9') {
+ return static_cast<uint32_t>(c - '0');
+ }
+ return 0;
}
uint32_t hex_value(char c) {
- if (c >= '0' && c <= '9') {
- return static_cast<uint32_t>(c - '0');
- }
- if (c >= 'a' && c <= 'f') {
- return 0xA + static_cast<uint32_t>(c - 'a');
- }
- if (c >= 'A' && c <= 'F') {
- return 0xA + static_cast<uint32_t>(c - 'A');
- }
- return 0;
+ if (c >= '0' && c <= '9') {
+ return static_cast<uint32_t>(c - '0');
+ }
+ if (c >= 'a' && c <= 'f') {
+ return 0xA + static_cast<uint32_t>(c - 'a');
+ }
+ if (c >= 'A' && c <= 'F') {
+ return 0xA + static_cast<uint32_t>(c - 'A');
+ }
+ return 0;
}
} // namespace
-Lexer::Lexer(const Source::File* file)
- : file_(file),
- len_(static_cast<uint32_t>(file->content.data.size())),
- location_{1, 1} {}
+Lexer::Lexer(const Source::File* file) : file_(file), location_{1, 1} {}
Lexer::~Lexer() = default;
+const std::string_view Lexer::line() const {
+ if (file_->content.lines.size() == 0) {
+ static const char* empty_string = "";
+ return empty_string;
+ }
+ return file_->content.lines[location_.line - 1];
+}
+
+size_t Lexer::pos() const {
+ return location_.column - 1;
+}
+
+size_t Lexer::length() const {
+ return line().size();
+}
+
+const char& Lexer::at(size_t pos) const {
+ auto l = line();
+ // Unlike for std::string, if pos == l.size(), indexing `l[pos]` is UB for
+ // std::string_view.
+ if (pos >= l.size()) {
+ static const char zero = 0;
+ return zero;
+ }
+ return l[pos];
+}
+
+std::string_view Lexer::substr(size_t offset, size_t count) {
+ return line().substr(offset, count);
+}
+
+void Lexer::advance(size_t offset) {
+ location_.column += offset;
+}
+
+void Lexer::set_pos(size_t pos) {
+ location_.column = pos + 1;
+}
+
+void Lexer::advance_line() {
+ location_.line++;
+ location_.column = 1;
+}
+
+bool Lexer::is_eof() const {
+ return location_.line >= file_->content.lines.size() && pos() >= length();
+}
+
+bool Lexer::is_eol() const {
+ return pos() >= length();
+}
+
Token Lexer::next() {
- if (auto t = skip_blankspace_and_comments(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = skip_blankspace_and_comments(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_hex_float(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_hex_float(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_hex_integer(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_hex_integer(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_float(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_float(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_integer(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_integer(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_ident(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_ident(); !t.IsUninitialized()) {
+ return t;
+ }
- if (auto t = try_punctuation(); !t.IsUninitialized()) {
- return t;
- }
+ if (auto t = try_punctuation(); !t.IsUninitialized()) {
+ return t;
+ }
- return {Token::Type::kError, begin_source(),
- (is_null() ? "null character found" : "invalid character found")};
+ return {Token::Type::kError, begin_source(),
+ (is_null() ? "null character found" : "invalid character found")};
}
Source Lexer::begin_source() const {
- Source src{};
- src.file = file_;
- src.range.begin = location_;
- src.range.end = location_;
- return src;
+ Source src{};
+ src.file = file_;
+ src.range.begin = location_;
+ src.range.end = location_;
+ return src;
}
void Lexer::end_source(Source& src) const {
- src.range.end = location_;
-}
-
-bool Lexer::is_eof() const {
- return pos_ >= len_;
+ src.range.end = location_;
}
bool Lexer::is_null() const {
- return (pos_ < len_) && (file_->content.data[pos_] == 0);
+ return (pos() < length()) && (at(pos()) == 0);
}
bool Lexer::is_digit(char ch) const {
- return std::isdigit(static_cast<unsigned char>(ch));
+ return std::isdigit(static_cast<unsigned char>(ch));
}
bool Lexer::is_hex(char ch) const {
- return std::isxdigit(static_cast<unsigned char>(ch));
+ return std::isxdigit(static_cast<unsigned char>(ch));
}
-bool Lexer::matches(size_t pos, std::string_view substr) {
- if (pos >= len_)
- return false;
- return file_->content.data_view.substr(pos, substr.size()) == substr;
+bool Lexer::matches(size_t pos, std::string_view sub_string) {
+ if (pos >= length()) {
+ return false;
+ }
+ return substr(pos, sub_string.size()) == sub_string;
}
Token Lexer::skip_blankspace_and_comments() {
- for (;;) {
- auto pos = pos_;
- while (!is_eof() && is_blankspace(file_->content.data[pos_])) {
- if (matches(pos_, "\n")) {
- pos_++;
- location_.line++;
- location_.column = 1;
- continue;
- }
+ for (;;) {
+ auto loc = location_;
+ while (!is_eof()) {
+ if (is_eol()) {
+ advance_line();
+ continue;
+ }
+
+ bool is_blankspace;
+ size_t blankspace_size;
+ if (!read_blankspace(line(), pos(), &is_blankspace, &blankspace_size)) {
+ return {Token::Type::kError, begin_source(), "invalid UTF-8"};
+ }
+ if (!is_blankspace) {
+ break;
+ }
+
+ advance(blankspace_size);
+ }
- pos_++;
- location_.column++;
- }
+ auto t = skip_comment();
+ if (!t.IsUninitialized()) {
+ return t;
+ }
- auto t = skip_comment();
- if (!t.IsUninitialized()) {
- return t;
+ // If the cursor didn't advance we didn't remove any blankspace
+ // so we're done.
+ if (loc == location_) {
+ break;
+ }
+ }
+ if (is_eof()) {
+ return {Token::Type::kEOF, begin_source()};
}
- // If the cursor didn't advance we didn't remove any blankspace
- // so we're done.
- if (pos == pos_)
- break;
- }
- if (is_eof()) {
- return {Token::Type::kEOF, begin_source()};
- }
-
- return {};
+ return {};
}
Token Lexer::skip_comment() {
- if (matches(pos_, "//")) {
- // Line comment: ignore everything until the end of input or a blankspace
- // character other than space or horizontal tab.
- while (!is_eof() && !(is_blankspace(file_->content.data[pos_]) &&
- !matches(pos_, " ") && !matches(pos_, "\t"))) {
- if (is_null()) {
- return {Token::Type::kError, begin_source(), "null character found"};
- }
- pos_++;
- location_.column++;
+ if (matches(pos(), "//")) {
+ // Line comment: ignore everything until the end of line.
+ while (!is_eol()) {
+ if (is_null()) {
+ return {Token::Type::kError, begin_source(), "null character found"};
+ }
+ advance();
+ }
+ return {};
+ }
+
+ if (matches(pos(), "/*")) {
+ // Block comment: ignore everything until the closing '*/' token.
+
+ // Record source location of the initial '/*'
+ auto source = begin_source();
+ source.range.end.column += 1;
+
+ advance(2);
+
+ int depth = 1;
+ while (!is_eof() && depth > 0) {
+ if (matches(pos(), "/*")) {
+ // Start of block comment: increase nesting depth.
+ advance(2);
+ depth++;
+ } else if (matches(pos(), "*/")) {
+ // End of block comment: decrease nesting depth.
+ advance(2);
+ depth--;
+ } else if (is_eol()) {
+ // Newline: skip and update source location.
+ advance_line();
+ } else if (is_null()) {
+ return {Token::Type::kError, begin_source(), "null character found"};
+ } else {
+ // Anything else: skip and update source location.
+ advance();
+ }
+ }
+ if (depth > 0) {
+ return {Token::Type::kError, source, "unterminated block comment"};
+ }
}
return {};
- }
+}
- if (matches(pos_, "/*")) {
- // Block comment: ignore everything until the closing '*/' token.
+Token Lexer::try_float() {
+ auto start = pos();
+ auto end = pos();
- // Record source location of the initial '/*'
auto source = begin_source();
- source.range.end.column += 1;
-
- pos_ += 2;
- location_.column += 2;
-
- int depth = 1;
- while (!is_eof() && depth > 0) {
- if (matches(pos_, "/*")) {
- // Start of block comment: increase nesting depth.
- pos_ += 2;
- location_.column += 2;
- depth++;
- } else if (matches(pos_, "*/")) {
- // End of block comment: decrease nesting depth.
- pos_ += 2;
- location_.column += 2;
- depth--;
- } else if (matches(pos_, "\n")) {
- // Newline: skip and update source location.
- pos_++;
- location_.line++;
- location_.column = 1;
- } else if (is_null()) {
- return {Token::Type::kError, begin_source(), "null character found"};
- } else {
- // Anything else: skip and update source location.
- pos_++;
- location_.column++;
- }
- }
- if (depth > 0) {
- return {Token::Type::kError, source, "unterminated block comment"};
- }
- }
- return {};
-}
+ bool has_mantissa_digits = false;
-Token Lexer::try_float() {
- auto start = pos_;
- auto end = pos_;
-
- auto source = begin_source();
- bool has_mantissa_digits = false;
-
- if (matches(end, "-")) {
- end++;
- }
- while (end < len_ && is_digit(file_->content.data[end])) {
- has_mantissa_digits = true;
- end++;
- }
-
- bool has_point = false;
- if (end < len_ && matches(end, ".")) {
- has_point = true;
- end++;
- }
-
- while (end < len_ && is_digit(file_->content.data[end])) {
- has_mantissa_digits = true;
- end++;
- }
-
- if (!has_mantissa_digits) {
- return {};
- }
+ if (matches(end, "-")) {
+ end++;
+ }
+ while (end < length() && is_digit(at(end))) {
+ has_mantissa_digits = true;
+ end++;
+ }
- // Parse the exponent if one exists
- bool has_exponent = false;
- if (end < len_ && (matches(end, "e") || matches(end, "E"))) {
- end++;
- if (end < len_ && (matches(end, "+") || matches(end, "-"))) {
- end++;
+ bool has_point = false;
+ if (end < length() && matches(end, ".")) {
+ has_point = true;
+ end++;
}
- while (end < len_ && isdigit(file_->content.data[end])) {
- has_exponent = true;
- end++;
+ while (end < length() && is_digit(at(end))) {
+ has_mantissa_digits = true;
+ end++;
}
- // If an 'e' or 'E' was present, then the number part must also be present.
- if (!has_exponent) {
- const auto str = file_->content.data.substr(start, end - start);
- return {Token::Type::kError, source,
- "incomplete exponent for floating point literal: " + str};
+ if (!has_mantissa_digits) {
+ return {};
}
- }
- bool has_f_suffix = false;
- if (end < len_ && matches(end, "f")) {
- end++;
- has_f_suffix = true;
- }
+ // Parse the exponent if one exists
+ bool has_exponent = false;
+ if (end < length() && (matches(end, "e") || matches(end, "E"))) {
+ end++;
+ if (end < length() && (matches(end, "+") || matches(end, "-"))) {
+ end++;
+ }
- if (!has_point && !has_exponent && !has_f_suffix) {
- // If it only has digits then it's an integer.
- return {};
- }
-
- // Save the error string, for use by diagnostics.
- const auto str = file_->content.data.substr(start, end - start);
-
- pos_ = end;
- location_.column += (end - start);
-
- end_source(source);
-
- auto res = strtod(file_->content.data.c_str() + start, nullptr);
- // This errors out if a non-zero magnitude is too small to represent in a
- // float. It can't be represented faithfully in an f32.
- const auto magnitude = std::fabs(res);
- if (0.0 < magnitude &&
- magnitude < static_cast<double>(std::numeric_limits<float>::min())) {
- return {Token::Type::kError, source,
- "f32 (" + str + ") magnitude too small, not representable"};
- }
- // This handles if the number is really large negative number
- if (res < static_cast<double>(std::numeric_limits<float>::lowest())) {
- return {Token::Type::kError, source,
- "f32 (" + str + ") too large (negative)"};
- }
- if (res > static_cast<double>(std::numeric_limits<float>::max())) {
- return {Token::Type::kError, source,
- "f32 (" + str + ") too large (positive)"};
- }
-
- return {source, static_cast<float>(res)};
+ while (end < length() && isdigit(at(end))) {
+ has_exponent = true;
+ end++;
+ }
+
+ // If an 'e' or 'E' was present, then the number part must also be present.
+ if (!has_exponent) {
+ const auto str = std::string{substr(start, end - start)};
+ return {Token::Type::kError, source,
+ "incomplete exponent for floating point literal: " + str};
+ }
+ }
+
+ bool has_f_suffix = false;
+ if (end < length() && matches(end, "f")) {
+ end++;
+ has_f_suffix = true;
+ }
+
+ if (!has_point && !has_exponent && !has_f_suffix) {
+ // If it only has digits then it's an integer.
+ return {};
+ }
+
+ // Save the error string, for use by diagnostics.
+ const auto str = std::string{substr(start, end - start)};
+
+ advance(end - start);
+ end_source(source);
+
+ double value = std::strtod(&at(start), nullptr);
+
+ if (has_f_suffix) {
+ if (auto f = CheckedConvert<f32>(AFloat(value))) {
+ return {Token::Type::kFloatLiteral_F, source, static_cast<double>(f.Get())};
+ } else {
+ return {Token::Type::kError, source, "value cannot be represented as 'f32'"};
+ }
+ }
+
+ if (value == HUGE_VAL || -value == HUGE_VAL) {
+ return {Token::Type::kError, source, "value cannot be represented as 'abstract-float'"};
+ } else {
+ return {Token::Type::kFloatLiteral, source, value};
+ }
}
Token Lexer::try_hex_float() {
- constexpr uint32_t kTotalBits = 32;
- constexpr uint32_t kTotalMsb = kTotalBits - 1;
- constexpr uint32_t kMantissaBits = 23;
- constexpr uint32_t kMantissaMsb = kMantissaBits - 1;
- constexpr uint32_t kMantissaShiftRight = kTotalBits - kMantissaBits;
- constexpr int32_t kExponentBias = 127;
- constexpr int32_t kExponentMax = 255;
- constexpr uint32_t kExponentBits = 8;
- constexpr uint32_t kExponentMask = (1 << kExponentBits) - 1;
- constexpr uint32_t kExponentLeftShift = kMantissaBits;
- constexpr uint32_t kSignBit = 31;
-
- auto start = pos_;
- auto end = pos_;
-
- auto source = begin_source();
-
- // clang-format off
+ constexpr uint64_t kExponentBits = 11;
+ constexpr uint64_t kMantissaBits = 52;
+ constexpr uint64_t kTotalBits = 1 + kExponentBits + kMantissaBits;
+ constexpr uint64_t kTotalMsb = kTotalBits - 1;
+ constexpr uint64_t kMantissaMsb = kMantissaBits - 1;
+ constexpr uint64_t kMantissaShiftRight = kTotalBits - kMantissaBits;
+ constexpr int64_t kExponentBias = 1023;
+ constexpr uint64_t kExponentMask = (1 << kExponentBits) - 1;
+ constexpr int64_t kExponentMax = kExponentMask; // Including NaN / inf
+ constexpr uint64_t kExponentLeftShift = kMantissaBits;
+ constexpr uint64_t kSignBit = kTotalBits - 1;
+ constexpr uint64_t kOne = 1;
+
+ auto start = pos();
+ auto end = pos();
+
+ auto source = begin_source();
+
+ // clang-format off
// -?0[xX]([0-9a-fA-F]*.?[0-9a-fA-F]+ | [0-9a-fA-F]+.[0-9a-fA-F]*)(p|P)(+|-)?[0-9]+ // NOLINT
- // clang-format on
-
- // -?
- int32_t sign_bit = 0;
- if (matches(end, "-")) {
- sign_bit = 1;
- end++;
- }
- // 0[xX]
- if (matches(end, "0x") || matches(end, "0X")) {
- end += 2;
- } else {
- return {};
- }
-
- uint32_t mantissa = 0;
- uint32_t exponent = 0;
-
- // TODO(dneto): Values in the normal range for the format do not explicitly
- // store the most significant bit. The algorithm here works hard to eliminate
- // that bit in the representation during parsing, and then it backtracks
- // when it sees it may have to explicitly represent it, and backtracks again
- // when it sees the number is sub-normal (i.e. the exponent underflows).
- // I suspect the logic can be clarified by storing it during parsing, and
- // then removing it later only when needed.
-
- // `set_next_mantissa_bit_to` sets next `mantissa` bit starting from msb to
- // lsb to value 1 if `set` is true, 0 otherwise. Returns true on success, i.e.
- // when the bit can be accommodated in the available space.
- uint32_t mantissa_next_bit = kTotalMsb;
- auto set_next_mantissa_bit_to = [&](bool set, bool integer_part) -> bool {
- // If adding bits for the integer part, we can overflow whether we set the
- // bit or not. For the fractional part, we can only overflow when setting
- // the bit.
- const bool check_overflow = integer_part || set;
- // Note: mantissa_next_bit actually decrements, so comparing it as
- // larger than a positive number relies on wraparound.
- if (check_overflow && (mantissa_next_bit > kTotalMsb)) {
- return false; // Overflowed mantissa
- }
- if (set) {
- mantissa |= (1 << mantissa_next_bit);
- }
- --mantissa_next_bit;
- return true;
- };
-
- // Collect integer range (if any)
- auto integer_range = std::make_pair(end, end);
- while (end < len_ && is_hex(file_->content.data[end])) {
- integer_range.second = ++end;
- }
-
- // .?
- bool hex_point = false;
- if (matches(end, ".")) {
- hex_point = true;
- end++;
- }
-
- // Collect fractional range (if any)
- auto fractional_range = std::make_pair(end, end);
- while (end < len_ && is_hex(file_->content.data[end])) {
- fractional_range.second = ++end;
- }
-
- // Must have at least an integer or fractional part
- if ((integer_range.first == integer_range.second) &&
- (fractional_range.first == fractional_range.second)) {
- return {};
- }
-
- // Is the binary exponent present? It's optional.
- const bool has_exponent = (matches(end, "p") || matches(end, "P"));
- if (has_exponent) {
- end++;
- }
- if (!has_exponent && !hex_point) {
- // It's not a hex float. At best it's a hex integer.
- return {};
- }
+ // clang-format on
- // At this point, we know for sure our token is a hex float value,
- // or an invalid token.
+ // -?
+ int64_t sign_bit = 0;
+ if (matches(end, "-")) {
+ sign_bit = 1;
+ end++;
+ }
+ // 0[xX]
+ if (matches(end, "0x") || matches(end, "0X")) {
+ end += 2;
+ } else {
+ return {};
+ }
- // Parse integer part
- // [0-9a-fA-F]*
+ uint64_t mantissa = 0;
+ uint64_t exponent = 0;
+
+ // TODO(dneto): Values in the normal range for the format do not explicitly
+ // store the most significant bit. The algorithm here works hard to eliminate
+ // that bit in the representation during parsing, and then it backtracks
+ // when it sees it may have to explicitly represent it, and backtracks again
+ // when it sees the number is sub-normal (i.e. the exponent underflows).
+ // I suspect the logic can be clarified by storing it during parsing, and
+ // then removing it later only when needed.
+
+ // `set_next_mantissa_bit_to` sets next `mantissa` bit starting from msb to
+ // lsb to value 1 if `set` is true, 0 otherwise. Returns true on success, i.e.
+ // when the bit can be accommodated in the available space.
+ uint64_t mantissa_next_bit = kTotalMsb;
+ auto set_next_mantissa_bit_to = [&](bool set, bool integer_part) -> bool {
+ // If adding bits for the integer part, we can overflow whether we set the
+ // bit or not. For the fractional part, we can only overflow when setting
+ // the bit.
+ const bool check_overflow = integer_part || set;
+ // Note: mantissa_next_bit actually decrements, so comparing it as
+ // larger than a positive number relies on wraparound.
+ if (check_overflow && (mantissa_next_bit > kTotalMsb)) {
+ return false; // Overflowed mantissa
+ }
+ if (set) {
+ mantissa |= (kOne << mantissa_next_bit);
+ }
+ --mantissa_next_bit;
+ return true;
+ };
+
+ // Collect integer range (if any)
+ auto integer_range = std::make_pair(end, end);
+ while (end < length() && is_hex(at(end))) {
+ integer_range.second = ++end;
+ }
+
+ // .?
+ bool hex_point = false;
+ if (matches(end, ".")) {
+ hex_point = true;
+ end++;
+ }
+
+ // Collect fractional range (if any)
+ auto fractional_range = std::make_pair(end, end);
+ while (end < length() && is_hex(at(end))) {
+ fractional_range.second = ++end;
+ }
+
+ // Must have at least an integer or fractional part
+ if ((integer_range.first == integer_range.second) &&
+ (fractional_range.first == fractional_range.second)) {
+ return {};
+ }
- bool has_zero_integer = true;
- // The magnitude is zero if and only if seen_prior_one_bits is false.
- bool seen_prior_one_bits = false;
- for (auto i = integer_range.first; i < integer_range.second; ++i) {
- const auto nibble = hex_value(file_->content.data[i]);
- if (nibble != 0) {
- has_zero_integer = false;
+ // Is the binary exponent present? It's optional.
+ const bool has_exponent = (matches(end, "p") || matches(end, "P"));
+ if (has_exponent) {
+ end++;
+ }
+ if (!has_exponent && !hex_point) {
+ // It's not a hex float. At best it's a hex integer.
+ return {};
}
- for (int32_t bit = 3; bit >= 0; --bit) {
- auto v = 1 & (nibble >> bit);
+ // At this point, we know for sure our token is a hex float value,
+ // or an invalid token.
- // Skip leading 0s and the first 1
- if (seen_prior_one_bits) {
- if (!set_next_mantissa_bit_to(v != 0, true)) {
- return {Token::Type::kError, source,
- "mantissa is too large for hex float"};
+ // Parse integer part
+ // [0-9a-fA-F]*
+
+ bool has_zero_integer = true;
+ // The magnitude is zero if and only if seen_prior_one_bits is false.
+ bool seen_prior_one_bits = false;
+ for (auto i = integer_range.first; i < integer_range.second; ++i) {
+ const auto nibble = hex_value(at(i));
+ if (nibble != 0) {
+ has_zero_integer = false;
}
- ++exponent;
- } else {
- if (v == 1) {
- seen_prior_one_bits = true;
+
+ for (int bit = 3; bit >= 0; --bit) {
+ auto v = 1 & (nibble >> bit);
+
+ // Skip leading 0s and the first 1
+ if (seen_prior_one_bits) {
+ if (!set_next_mantissa_bit_to(v != 0, true)) {
+ return {Token::Type::kError, source, "mantissa is too large for hex float"};
+ }
+ ++exponent;
+ } else {
+ if (v == 1) {
+ seen_prior_one_bits = true;
+ }
+ }
}
- }
- }
- }
-
- // Parse fractional part
- // [0-9a-fA-F]*
- for (auto i = fractional_range.first; i < fractional_range.second; ++i) {
- auto nibble = hex_value(file_->content.data[i]);
- for (int32_t bit = 3; bit >= 0; --bit) {
- auto v = 1 & (nibble >> bit);
-
- if (v == 1) {
- seen_prior_one_bits = true;
- }
-
- // If integer part is 0, we only start writing bits to the
- // mantissa once we have a non-zero fractional bit. While the fractional
- // values are 0, we adjust the exponent to avoid overflowing `mantissa`.
- if (!seen_prior_one_bits) {
- --exponent;
- } else {
- if (!set_next_mantissa_bit_to(v != 0, false)) {
- return {Token::Type::kError, source,
- "mantissa is too large for hex float"};
+ }
+
+ // Parse fractional part
+ // [0-9a-fA-F]*
+ for (auto i = fractional_range.first; i < fractional_range.second; ++i) {
+ auto nibble = hex_value(at(i));
+ for (int bit = 3; bit >= 0; --bit) {
+ auto v = 1 & (nibble >> bit);
+
+ if (v == 1) {
+ seen_prior_one_bits = true;
+ }
+
+ // If integer part is 0, we only start writing bits to the
+ // mantissa once we have a non-zero fractional bit. While the fractional
+ // values are 0, we adjust the exponent to avoid overflowing `mantissa`.
+ if (!seen_prior_one_bits) {
+ --exponent;
+ } else {
+ if (!set_next_mantissa_bit_to(v != 0, false)) {
+ return {Token::Type::kError, source, "mantissa is too large for hex float"};
+ }
+ }
}
- }
- }
- }
-
- // Determine if the value of the mantissa is zero.
- // Note: it's not enough to check mantissa == 0 as we drop the initial bit,
- // whether it's in the integer part or the fractional part.
- const bool is_zero = !seen_prior_one_bits;
- TINT_ASSERT(Reader, !is_zero || mantissa == 0);
-
- // Parse the optional exponent.
- // ((p|P)(\+|-)?[0-9]+)?
- uint32_t input_exponent = 0; // Defaults to 0 if not present
- int32_t exponent_sign = 1;
- // If the 'p' part is present, the rest of the exponent must exist.
- if (has_exponent) {
- // Parse the rest of the exponent.
- // (+|-)?
- if (matches(end, "+")) {
- end++;
- } else if (matches(end, "-")) {
- exponent_sign = -1;
- end++;
- }
-
- // Parse exponent from input
- // [0-9]+
- // Allow overflow (in uint32_t) when the floating point value magnitude is
- // zero.
- bool has_exponent_digits = false;
- while (end < len_ && isdigit(file_->content.data[end])) {
- has_exponent_digits = true;
- auto prev_exponent = input_exponent;
- input_exponent =
- (input_exponent * 10) + dec_value(file_->content.data[end]);
- // Check if we've overflowed input_exponent. This only matters when
- // the mantissa is non-zero.
- if (!is_zero && (prev_exponent > input_exponent)) {
- return {Token::Type::kError, source,
- "exponent is too large for hex float"};
- }
- end++;
- }
-
- // Parse optional 'f' suffix. For a hex float, it can only exist
- // when the exponent is present. Otherwise it will look like
- // one of the mantissa digits.
- if (end < len_ && matches(end, "f")) {
- end++;
- }
-
- if (!has_exponent_digits) {
- return {Token::Type::kError, source,
- "expected an exponent value for hex float"};
- }
- }
-
- pos_ = end;
- location_.column += (end - start);
- end_source(source);
-
- if (is_zero) {
- // If value is zero, then ignore the exponent and produce a zero
- exponent = 0;
- } else {
- // Ensure input exponent is not too large; i.e. that it won't overflow when
- // adding the exponent bias.
- const uint32_t kIntMax =
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max());
- const uint32_t kMaxInputExponent = kIntMax - kExponentBias;
- if (input_exponent > kMaxInputExponent) {
- return {Token::Type::kError, source,
- "exponent is too large for hex float"};
- }
-
- // Compute exponent so far
- exponent += static_cast<uint32_t>(static_cast<int32_t>(input_exponent) *
- exponent_sign);
-
- // Bias exponent if non-zero
- // After this, if exponent is <= 0, our value is a denormal
- exponent += kExponentBias;
-
- // We know the number is not zero. The MSB is 1 (by construction), and
- // should be eliminated because it becomes the implicit 1 that isn't
- // explicitly represented in the binary32 format. We'll bring it back
- // later if we find the exponent actually underflowed, i.e. the number
- // is sub-normal.
- if (has_zero_integer) {
- mantissa <<= 1;
- --exponent;
- }
- }
-
- // We can now safely work with exponent as a signed quantity, as there's no
- // chance to overflow
- int32_t signed_exponent = static_cast<int32_t>(exponent);
-
- // Shift mantissa to occupy the low 23 bits
- mantissa >>= kMantissaShiftRight;
-
- // If denormal, shift mantissa until our exponent is zero
- if (!is_zero) {
- // Denorm has exponent 0 and non-zero mantissa. We set the top bit here,
- // then shift the mantissa to make exponent zero.
- if (signed_exponent <= 0) {
- mantissa >>= 1;
- mantissa |= (1 << kMantissaMsb);
- }
-
- while (signed_exponent < 0) {
- mantissa >>= 1;
- ++signed_exponent;
-
- // If underflow, clamp to zero
- if (mantissa == 0) {
- signed_exponent = 0;
- }
- }
- }
-
- if (signed_exponent > kExponentMax) {
- // Overflow: set to infinity
- signed_exponent = kExponentMax;
- mantissa = 0;
- } else if (signed_exponent == kExponentMax && mantissa != 0) {
- // NaN: set to infinity
- mantissa = 0;
- }
-
- // Combine sign, mantissa, and exponent
- uint32_t result_u32 = sign_bit << kSignBit;
- result_u32 |= mantissa;
- result_u32 |= (static_cast<uint32_t>(signed_exponent) & kExponentMask)
- << kExponentLeftShift;
-
- // Reinterpret as float and return
- float result;
- std::memcpy(&result, &result_u32, sizeof(result));
- return {source, static_cast<float>(result)};
-}
+ }
+
+ // Determine if the value of the mantissa is zero.
+ // Note: it's not enough to check mantissa == 0 as we drop the initial bit,
+ // whether it's in the integer part or the fractional part.
+ const bool is_zero = !seen_prior_one_bits;
+ TINT_ASSERT(Reader, !is_zero || mantissa == 0);
+
+ // Parse the optional exponent.
+ // ((p|P)(\+|-)?[0-9]+)?
+ uint64_t input_exponent = 0; // Defaults to 0 if not present
+ int64_t exponent_sign = 1;
+ // If the 'p' part is present, the rest of the exponent must exist.
+ bool has_f_suffix = false;
+ if (has_exponent) {
+ // Parse the rest of the exponent.
+ // (+|-)?
+ if (matches(end, "+")) {
+ end++;
+ } else if (matches(end, "-")) {
+ exponent_sign = -1;
+ end++;
+ }
+
+ // Parse exponent from input
+ // [0-9]+
+ // Allow overflow (in uint64_t) when the floating point value magnitude is
+ // zero.
+ bool has_exponent_digits = false;
+ while (end < length() && isdigit(at(end))) {
+ has_exponent_digits = true;
+ auto prev_exponent = input_exponent;
+ input_exponent = (input_exponent * 10) + dec_value(at(end));
+ // Check if we've overflowed input_exponent. This only matters when
+ // the mantissa is non-zero.
+ if (!is_zero && (prev_exponent > input_exponent)) {
+ return {Token::Type::kError, source, "exponent is too large for hex float"};
+ }
+ end++;
+ }
+
+ // Parse optional 'f' suffix. For a hex float, it can only exist
+ // when the exponent is present. Otherwise it will look like
+ // one of the mantissa digits.
+ if (end < length() && matches(end, "f")) {
+ has_f_suffix = true;
+ end++;
+ }
+
+ if (!has_exponent_digits) {
+ return {Token::Type::kError, source, "expected an exponent value for hex float"};
+ }
+ }
-Token Lexer::build_token_from_int_if_possible(Source source,
- size_t start,
- size_t end,
- int32_t base) {
- auto res = strtoll(file_->content.data.c_str() + start, nullptr, base);
- if (matches(pos_, "u")) {
- if (static_cast<uint64_t>(res) >
- static_cast<uint64_t>(std::numeric_limits<uint32_t>::max())) {
- return {Token::Type::kError, source,
- "u32 (" + file_->content.data.substr(start, end - start) +
- ") too large"};
- }
- pos_ += 1;
- location_.column += 1;
+ advance(end - start);
end_source(source);
- return {source, static_cast<uint32_t>(res)};
- }
-
- if (res < static_cast<int64_t>(std::numeric_limits<int32_t>::min())) {
- return {Token::Type::kError, source,
- "i32 (" + file_->content.data.substr(start, end - start) +
- ") too small"};
- }
- if (res > static_cast<int64_t>(std::numeric_limits<int32_t>::max())) {
- return {Token::Type::kError, source,
- "i32 (" + file_->content.data.substr(start, end - start) +
- ") too large"};
- }
- end_source(source);
- return {source, static_cast<int32_t>(res)};
+
+ if (is_zero) {
+ // If value is zero, then ignore the exponent and produce a zero
+ exponent = 0;
+ } else {
+ // Ensure input exponent is not too large; i.e. that it won't overflow when
+ // adding the exponent bias.
+ const uint64_t kIntMax = static_cast<uint64_t>(std::numeric_limits<int64_t>::max());
+ const uint64_t kMaxInputExponent = kIntMax - kExponentBias;
+ if (input_exponent > kMaxInputExponent) {
+ return {Token::Type::kError, source, "exponent is too large for hex float"};
+ }
+
+ // Compute exponent so far
+ exponent += static_cast<uint64_t>(static_cast<int64_t>(input_exponent) * exponent_sign);
+
+ // Bias exponent if non-zero
+ // After this, if exponent is <= 0, our value is a denormal
+ exponent += kExponentBias;
+
+ // We know the number is not zero. The MSB is 1 (by construction), and
+ // should be eliminated because it becomes the implicit 1 that isn't
+ // explicitly represented in the binary32 format. We'll bring it back
+ // later if we find the exponent actually underflowed, i.e. the number
+ // is sub-normal.
+ if (has_zero_integer) {
+ mantissa <<= 1;
+ --exponent;
+ }
+ }
+
+ // We can now safely work with exponent as a signed quantity, as there's no
+ // chance to overflow
+ int64_t signed_exponent = static_cast<int64_t>(exponent);
+
+ // Shift mantissa to occupy the low 23 bits
+ mantissa >>= kMantissaShiftRight;
+
+ // If denormal, shift mantissa until our exponent is zero
+ if (!is_zero) {
+ // Denorm has exponent 0 and non-zero mantissa. We set the top bit here,
+ // then shift the mantissa to make exponent zero.
+ if (signed_exponent <= 0) {
+ mantissa >>= 1;
+ mantissa |= (kOne << kMantissaMsb);
+ }
+
+ while (signed_exponent < 0) {
+ mantissa >>= 1;
+ ++signed_exponent;
+
+ // If underflow, clamp to zero
+ if (mantissa == 0) {
+ signed_exponent = 0;
+ }
+ }
+ }
+
+ if (signed_exponent >= kExponentMax || (signed_exponent == kExponentMax && mantissa != 0)) {
+ std::string type = has_f_suffix ? "f32" : "abstract-float";
+ return {Token::Type::kError, source, "value cannot be represented as '" + type + "'"};
+ }
+
+ // Combine sign, mantissa, and exponent
+ uint64_t result_u64 = sign_bit << kSignBit;
+ result_u64 |= mantissa;
+ result_u64 |= (static_cast<uint64_t>(signed_exponent) & kExponentMask) << kExponentLeftShift;
+
+ // Reinterpret as f16 and return
+ double result_f64;
+ std::memcpy(&result_f64, &result_u64, 8);
+
+ if (has_f_suffix) {
+ // Check value fits in f32
+ if (result_f64 < static_cast<double>(f32::kLowest) ||
+ result_f64 > static_cast<double>(f32::kHighest)) {
+ return {Token::Type::kError, source, "value cannot be represented as 'f32'"};
+ }
+ // Check the value can be exactly represented (low 29 mantissa bits must be 0)
+ if (result_u64 & 0x1fffffff) {
+ return {Token::Type::kError, source, "value cannot be exactly represented as 'f32'"};
+ }
+ }
+
+ return {has_f_suffix ? Token::Type::kFloatLiteral_F : Token::Type::kFloatLiteral, source,
+ result_f64};
}
-Token Lexer::try_hex_integer() {
- constexpr size_t kMaxDigits = 8; // Valid for both 32-bit integer types
- auto start = pos_;
- auto end = pos_;
+Token Lexer::build_token_from_int_if_possible(Source source, size_t start, int32_t base) {
+ const char* start_ptr = &at(start);
+ char* end_ptr = nullptr;
- auto source = begin_source();
+ errno = 0;
+ int64_t res = strtoll(start_ptr, &end_ptr, base);
+ const bool overflow = errno == ERANGE;
- if (matches(end, "-")) {
- end++;
- }
+ if (end_ptr) {
+ advance(end_ptr - start_ptr);
+ }
- if (matches(end, "0x") || matches(end, "0X")) {
- end += 2;
- } else {
- return {};
- }
+ if (matches(pos(), "u")) {
+ if (!overflow && CheckedConvert<u32>(AInt(res))) {
+ advance(1);
+ end_source(source);
+ return {Token::Type::kIntLiteral_U, source, res};
+ }
+ return {Token::Type::kError, source, "value cannot be represented as 'u32'"};
+ }
+
+ if (matches(pos(), "i")) {
+ if (!overflow && CheckedConvert<i32>(AInt(res))) {
+ advance(1);
+ end_source(source);
+ return {Token::Type::kIntLiteral_I, source, res};
+ }
+ return {Token::Type::kError, source, "value cannot be represented as 'i32'"};
+ }
+
+ end_source(source);
+ if (overflow) {
+ return {Token::Type::kError, source, "value cannot be represented as 'abstract-int'"};
+ }
+ return {Token::Type::kIntLiteral, source, res};
+}
+
+Token Lexer::try_hex_integer() {
+ auto start = pos();
+ auto curr = start;
+
+ auto source = begin_source();
- auto first = end;
- while (!is_eof() && is_hex(file_->content.data[end])) {
- end++;
+ if (matches(curr, "-")) {
+ curr++;
+ }
- auto digits = end - first;
- if (digits > kMaxDigits) {
- return {Token::Type::kError, source,
- "integer literal (" +
- file_->content.data.substr(start, end - 1 - start) +
- "...) has too many digits"};
+ if (matches(curr, "0x") || matches(curr, "0X")) {
+ curr += 2;
+ } else {
+ return {};
}
- }
- if (first == end) {
- return {Token::Type::kError, source,
- "integer or float hex literal has no significant digits"};
- }
- pos_ = end;
- location_.column += (end - start);
+ if (!is_hex(at(curr))) {
+ return {Token::Type::kError, source,
+ "integer or float hex literal has no significant digits"};
+ }
- return build_token_from_int_if_possible(source, start, end, 16);
+ return build_token_from_int_if_possible(source, start, 16);
}
Token Lexer::try_integer() {
- constexpr size_t kMaxDigits = 10; // Valid for both 32-bit integer types
- auto start = pos_;
- auto end = start;
+ auto start = pos();
+ auto curr = start;
- auto source = begin_source();
+ auto source = begin_source();
- if (matches(end, "-")) {
- end++;
- }
+ if (matches(curr, "-")) {
+ curr++;
+ }
- if (end >= len_ || !is_digit(file_->content.data[end])) {
- return {};
- }
-
- auto first = end;
- // If the first digit is a zero this must only be zero as leading zeros
- // are not allowed.
- auto next = first + 1;
- if (next < len_) {
- if (file_->content.data[first] == '0' &&
- is_digit(file_->content.data[next])) {
- return {Token::Type::kError, source,
- "integer literal (" +
- file_->content.data.substr(start, end - 1 - start) +
- "...) has leading 0s"};
- }
- }
-
- while (end < len_ && is_digit(file_->content.data[end])) {
- auto digits = end - first;
- if (digits > kMaxDigits) {
- return {Token::Type::kError, source,
- "integer literal (" +
- file_->content.data.substr(start, end - 1 - start) +
- "...) has too many digits"};
- }
-
- end++;
- }
-
- pos_ = end;
- location_.column += (end - start);
-
- return build_token_from_int_if_possible(source, start, end, 10);
+ if (curr >= length() || !is_digit(at(curr))) {
+ return {};
+ }
+
+ // If the first digit is a zero this must only be zero as leading zeros
+ // are not allowed.
+ if (auto next = curr + 1; next < length()) {
+ if (at(curr) == '0' && is_digit(at(next))) {
+ return {Token::Type::kError, source, "integer literal cannot have leading 0s"};
+ }
+ }
+
+ return build_token_from_int_if_possible(source, start, 10);
}
Token Lexer::try_ident() {
- auto source = begin_source();
- auto start = pos_;
-
- // This below assumes that the size of a single std::string element is 1 byte.
- static_assert(sizeof(file_->content.data[0]) == sizeof(uint8_t),
- "tint::reader::wgsl requires the size of a std::string element "
- "to be a single byte");
-
- // Must begin with an XID_Source unicode character, or underscore
- {
- auto* utf8 = reinterpret_cast<const uint8_t*>(&file_->content.data[pos_]);
- auto [code_point, n] =
- text::utf8::Decode(utf8, file_->content.data.size() - pos_);
- if (n == 0) {
- pos_++; // Skip the bad byte.
- return {Token::Type::kError, source, "invalid UTF-8"};
- }
- if (code_point != text::CodePoint('_') && !code_point.IsXIDStart()) {
- return {};
- }
- // Consume start codepoint
- pos_ += n;
- location_.column += n;
- }
-
- while (!is_eof()) {
- // Must continue with an XID_Continue unicode character
- auto* utf8 = reinterpret_cast<const uint8_t*>(&file_->content.data[pos_]);
- auto [code_point, n] =
- text::utf8::Decode(utf8, file_->content.data.size() - pos_);
- if (n == 0) {
- pos_++; // Skip the bad byte.
- return {Token::Type::kError, source, "invalid UTF-8"};
- }
- if (!code_point.IsXIDContinue()) {
- break;
+ auto source = begin_source();
+ auto start = pos();
+
+ // Must begin with an XID_Source unicode character, or underscore
+ {
+ auto* utf8 = reinterpret_cast<const uint8_t*>(&at(pos()));
+ auto [code_point, n] = text::utf8::Decode(utf8, length() - pos());
+ if (n == 0) {
+ advance(); // Skip the bad byte.
+ return {Token::Type::kError, source, "invalid UTF-8"};
+ }
+ if (code_point != text::CodePoint('_') && !code_point.IsXIDStart()) {
+ return {};
+ }
+ // Consume start codepoint
+ advance(n);
}
- // Consume continuing codepoint
- pos_ += n;
- location_.column += n;
- }
+ while (!is_eol()) {
+ // Must continue with an XID_Continue unicode character
+ auto* utf8 = reinterpret_cast<const uint8_t*>(&at(pos()));
+ auto [code_point, n] = text::utf8::Decode(utf8, line().size() - pos());
+ if (n == 0) {
+ advance(); // Skip the bad byte.
+ return {Token::Type::kError, source, "invalid UTF-8"};
+ }
+ if (!code_point.IsXIDContinue()) {
+ break;
+ }
+
+ // Consume continuing codepoint
+ advance(n);
+ }
- if (file_->content.data[start] == '_') {
- // Check for an underscore on its own (special token), or a
- // double-underscore (not allowed).
- if ((pos_ == start + 1) || (file_->content.data[start + 1] == '_')) {
- location_.column -= (pos_ - start);
- pos_ = start;
- return {};
+ if (at(start) == '_') {
+ // Check for an underscore on its own (special token), or a
+ // double-underscore (not allowed).
+ if ((pos() == start + 1) || (at(start + 1) == '_')) {
+ set_pos(start);
+ return {};
+ }
}
- }
- auto str = file_->content.data_view.substr(start, pos_ - start);
- end_source(source);
+ auto str = substr(start, pos() - start);
+ end_source(source);
- auto t = check_keyword(source, str);
- if (!t.IsUninitialized()) {
- return t;
- }
+ auto t = check_keyword(source, str);
+ if (!t.IsUninitialized()) {
+ return t;
+ }
- return {Token::Type::kIdentifier, source, str};
+ return {Token::Type::kIdentifier, source, str};
}
Token Lexer::try_punctuation() {
- auto source = begin_source();
- auto type = Token::Type::kUninitialized;
-
- if (matches(pos_, "@")) {
- type = Token::Type::kAttr;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "(")) {
- type = Token::Type::kParenLeft;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ")")) {
- type = Token::Type::kParenRight;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "[")) {
- type = Token::Type::kBracketLeft;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "]")) {
- type = Token::Type::kBracketRight;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "{")) {
- type = Token::Type::kBraceLeft;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "}")) {
- type = Token::Type::kBraceRight;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "&&")) {
- type = Token::Type::kAndAnd;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "&=")) {
- type = Token::Type::kAndEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "&")) {
- type = Token::Type::kAnd;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "/=")) {
- type = Token::Type::kDivisionEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "/")) {
- type = Token::Type::kForwardSlash;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "!=")) {
- type = Token::Type::kNotEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "!")) {
- type = Token::Type::kBang;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ":")) {
- type = Token::Type::kColon;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ",")) {
- type = Token::Type::kComma;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "==")) {
- type = Token::Type::kEqualEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "=")) {
- type = Token::Type::kEqual;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ">=")) {
- type = Token::Type::kGreaterThanEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, ">>")) {
- type = Token::Type::kShiftRight;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, ">")) {
- type = Token::Type::kGreaterThan;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "<=")) {
- type = Token::Type::kLessThanEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "<<")) {
- type = Token::Type::kShiftLeft;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "<")) {
- type = Token::Type::kLessThan;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "%=")) {
- type = Token::Type::kModuloEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "%")) {
- type = Token::Type::kMod;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "->")) {
- type = Token::Type::kArrow;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "--")) {
- type = Token::Type::kMinusMinus;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "-=")) {
- type = Token::Type::kMinusEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "-")) {
- type = Token::Type::kMinus;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ".")) {
- type = Token::Type::kPeriod;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "++")) {
- type = Token::Type::kPlusPlus;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "+=")) {
- type = Token::Type::kPlusEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "+")) {
- type = Token::Type::kPlus;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "||")) {
- type = Token::Type::kOrOr;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "|=")) {
- type = Token::Type::kOrEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "|")) {
- type = Token::Type::kOr;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, ";")) {
- type = Token::Type::kSemicolon;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "*=")) {
- type = Token::Type::kTimesEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "*")) {
- type = Token::Type::kStar;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "~")) {
- type = Token::Type::kTilde;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "_")) {
- type = Token::Type::kUnderscore;
- pos_ += 1;
- location_.column += 1;
- } else if (matches(pos_, "^=")) {
- type = Token::Type::kXorEqual;
- pos_ += 2;
- location_.column += 2;
- } else if (matches(pos_, "^")) {
- type = Token::Type::kXor;
- pos_ += 1;
- location_.column += 1;
- }
-
- end_source(source);
-
- return {type, source};
+ auto source = begin_source();
+ auto type = Token::Type::kUninitialized;
+
+ if (matches(pos(), "@")) {
+ type = Token::Type::kAttr;
+ advance(1);
+ } else if (matches(pos(), "(")) {
+ type = Token::Type::kParenLeft;
+ advance(1);
+ } else if (matches(pos(), ")")) {
+ type = Token::Type::kParenRight;
+ advance(1);
+ } else if (matches(pos(), "[")) {
+ type = Token::Type::kBracketLeft;
+ advance(1);
+ } else if (matches(pos(), "]")) {
+ type = Token::Type::kBracketRight;
+ advance(1);
+ } else if (matches(pos(), "{")) {
+ type = Token::Type::kBraceLeft;
+ advance(1);
+ } else if (matches(pos(), "}")) {
+ type = Token::Type::kBraceRight;
+ advance(1);
+ } else if (matches(pos(), "&&")) {
+ type = Token::Type::kAndAnd;
+ advance(2);
+ } else if (matches(pos(), "&=")) {
+ type = Token::Type::kAndEqual;
+ advance(2);
+ } else if (matches(pos(), "&")) {
+ type = Token::Type::kAnd;
+ advance(1);
+ } else if (matches(pos(), "/=")) {
+ type = Token::Type::kDivisionEqual;
+ advance(2);
+ } else if (matches(pos(), "/")) {
+ type = Token::Type::kForwardSlash;
+ advance(1);
+ } else if (matches(pos(), "!=")) {
+ type = Token::Type::kNotEqual;
+ advance(2);
+ } else if (matches(pos(), "!")) {
+ type = Token::Type::kBang;
+ advance(1);
+ } else if (matches(pos(), ":")) {
+ type = Token::Type::kColon;
+ advance(1);
+ } else if (matches(pos(), ",")) {
+ type = Token::Type::kComma;
+ advance(1);
+ } else if (matches(pos(), "==")) {
+ type = Token::Type::kEqualEqual;
+ advance(2);
+ } else if (matches(pos(), "=")) {
+ type = Token::Type::kEqual;
+ advance(1);
+ } else if (matches(pos(), ">=")) {
+ type = Token::Type::kGreaterThanEqual;
+ advance(2);
+ } else if (matches(pos(), ">>")) {
+ type = Token::Type::kShiftRight;
+ advance(2);
+ } else if (matches(pos(), ">")) {
+ type = Token::Type::kGreaterThan;
+ advance(1);
+ } else if (matches(pos(), "<=")) {
+ type = Token::Type::kLessThanEqual;
+ advance(2);
+ } else if (matches(pos(), "<<")) {
+ type = Token::Type::kShiftLeft;
+ advance(2);
+ } else if (matches(pos(), "<")) {
+ type = Token::Type::kLessThan;
+ advance(1);
+ } else if (matches(pos(), "%=")) {
+ type = Token::Type::kModuloEqual;
+ advance(2);
+ } else if (matches(pos(), "%")) {
+ type = Token::Type::kMod;
+ advance(1);
+ } else if (matches(pos(), "->")) {
+ type = Token::Type::kArrow;
+ advance(2);
+ } else if (matches(pos(), "--")) {
+ type = Token::Type::kMinusMinus;
+ advance(2);
+ } else if (matches(pos(), "-=")) {
+ type = Token::Type::kMinusEqual;
+ advance(2);
+ } else if (matches(pos(), "-")) {
+ type = Token::Type::kMinus;
+ advance(1);
+ } else if (matches(pos(), ".")) {
+ type = Token::Type::kPeriod;
+ advance(1);
+ } else if (matches(pos(), "++")) {
+ type = Token::Type::kPlusPlus;
+ advance(2);
+ } else if (matches(pos(), "+=")) {
+ type = Token::Type::kPlusEqual;
+ advance(2);
+ } else if (matches(pos(), "+")) {
+ type = Token::Type::kPlus;
+ advance(1);
+ } else if (matches(pos(), "||")) {
+ type = Token::Type::kOrOr;
+ advance(2);
+ } else if (matches(pos(), "|=")) {
+ type = Token::Type::kOrEqual;
+ advance(2);
+ } else if (matches(pos(), "|")) {
+ type = Token::Type::kOr;
+ advance(1);
+ } else if (matches(pos(), ";")) {
+ type = Token::Type::kSemicolon;
+ advance(1);
+ } else if (matches(pos(), "*=")) {
+ type = Token::Type::kTimesEqual;
+ advance(2);
+ } else if (matches(pos(), "*")) {
+ type = Token::Type::kStar;
+ advance(1);
+ } else if (matches(pos(), "~")) {
+ type = Token::Type::kTilde;
+ advance(1);
+ } else if (matches(pos(), "_")) {
+ type = Token::Type::kUnderscore;
+ advance(1);
+ } else if (matches(pos(), "^=")) {
+ type = Token::Type::kXorEqual;
+ advance(2);
+ } else if (matches(pos(), "^")) {
+ type = Token::Type::kXor;
+ advance(1);
+ }
+
+ end_source(source);
+
+ return {type, source};
}
Token Lexer::check_keyword(const Source& source, std::string_view str) {
- if (str == "array")
- return {Token::Type::kArray, source, "array"};
- if (str == "atomic")
- return {Token::Type::kAtomic, source, "atomic"};
- if (str == "bitcast")
- return {Token::Type::kBitcast, source, "bitcast"};
- if (str == "bool")
- return {Token::Type::kBool, source, "bool"};
- if (str == "break")
- return {Token::Type::kBreak, source, "break"};
- if (str == "case")
- return {Token::Type::kCase, source, "case"};
- if (str == "continue")
- return {Token::Type::kContinue, source, "continue"};
- if (str == "continuing")
- return {Token::Type::kContinuing, source, "continuing"};
- if (str == "discard")
- return {Token::Type::kDiscard, source, "discard"};
- if (str == "default")
- return {Token::Type::kDefault, source, "default"};
- if (str == "else")
- return {Token::Type::kElse, source, "else"};
- if (str == "f32")
- return {Token::Type::kF32, source, "f32"};
- if (str == "fallthrough")
- return {Token::Type::kFallthrough, source, "fallthrough"};
- if (str == "false")
- return {Token::Type::kFalse, source, "false"};
- if (str == "fn")
- return {Token::Type::kFn, source, "fn"};
- if (str == "for")
- return {Token::Type::kFor, source, "for"};
- if (str == "function")
- return {Token::Type::kFunction, source, "function"};
- if (str == "i32")
- return {Token::Type::kI32, source, "i32"};
- if (str == "if")
- return {Token::Type::kIf, source, "if"};
- if (str == "import")
- return {Token::Type::kImport, source, "import"};
- if (str == "let")
- return {Token::Type::kLet, source, "let"};
- if (str == "loop")
- return {Token::Type::kLoop, source, "loop"};
- if (str == "mat2x2")
- return {Token::Type::kMat2x2, source, "mat2x2"};
- if (str == "mat2x3")
- return {Token::Type::kMat2x3, source, "mat2x3"};
- if (str == "mat2x4")
- return {Token::Type::kMat2x4, source, "mat2x4"};
- if (str == "mat3x2")
- return {Token::Type::kMat3x2, source, "mat3x2"};
- if (str == "mat3x3")
- return {Token::Type::kMat3x3, source, "mat3x3"};
- if (str == "mat3x4")
- return {Token::Type::kMat3x4, source, "mat3x4"};
- if (str == "mat4x2")
- return {Token::Type::kMat4x2, source, "mat4x2"};
- if (str == "mat4x3")
- return {Token::Type::kMat4x3, source, "mat4x3"};
- if (str == "mat4x4")
- return {Token::Type::kMat4x4, source, "mat4x4"};
- if (str == "override")
- return {Token::Type::kOverride, source, "override"};
- if (str == "private")
- return {Token::Type::kPrivate, source, "private"};
- if (str == "ptr")
- return {Token::Type::kPtr, source, "ptr"};
- if (str == "return")
- return {Token::Type::kReturn, source, "return"};
- if (str == "sampler")
- return {Token::Type::kSampler, source, "sampler"};
- if (str == "sampler_comparison")
- return {Token::Type::kComparisonSampler, source, "sampler_comparison"};
- if (str == "storage_buffer" || str == "storage")
- return {Token::Type::kStorage, source, "storage"};
- if (str == "struct")
- return {Token::Type::kStruct, source, "struct"};
- if (str == "switch")
- return {Token::Type::kSwitch, source, "switch"};
- if (str == "texture_1d")
- return {Token::Type::kTextureSampled1d, source, "texture_1d"};
- if (str == "texture_2d")
- return {Token::Type::kTextureSampled2d, source, "texture_2d"};
- if (str == "texture_2d_array")
- return {Token::Type::kTextureSampled2dArray, source, "texture_2d_array"};
- if (str == "texture_3d")
- return {Token::Type::kTextureSampled3d, source, "texture_3d"};
- if (str == "texture_cube")
- return {Token::Type::kTextureSampledCube, source, "texture_cube"};
- if (str == "texture_cube_array") {
- return {Token::Type::kTextureSampledCubeArray, source,
- "texture_cube_array"};
- }
- if (str == "texture_depth_2d")
- return {Token::Type::kTextureDepth2d, source, "texture_depth_2d"};
- if (str == "texture_depth_2d_array") {
- return {Token::Type::kTextureDepth2dArray, source,
- "texture_depth_2d_array"};
- }
- if (str == "texture_depth_cube")
- return {Token::Type::kTextureDepthCube, source, "texture_depth_cube"};
- if (str == "texture_depth_cube_array") {
- return {Token::Type::kTextureDepthCubeArray, source,
- "texture_depth_cube_array"};
- }
- if (str == "texture_depth_multisampled_2d") {
- return {Token::Type::kTextureDepthMultisampled2d, source,
- "texture_depth_multisampled_2d"};
- }
- if (str == "texture_external") {
- return {Token::Type::kTextureExternal, source, "texture_external"};
- }
- if (str == "texture_multisampled_2d") {
- return {Token::Type::kTextureMultisampled2d, source,
- "texture_multisampled_2d"};
- }
- if (str == "texture_storage_1d") {
- return {Token::Type::kTextureStorage1d, source, "texture_storage_1d"};
- }
- if (str == "texture_storage_2d") {
- return {Token::Type::kTextureStorage2d, source, "texture_storage_2d"};
- }
- if (str == "texture_storage_2d_array") {
- return {Token::Type::kTextureStorage2dArray, source,
- "texture_storage_2d_array"};
- }
- if (str == "texture_storage_3d") {
- return {Token::Type::kTextureStorage3d, source, "texture_storage_3d"};
- }
- if (str == "true")
- return {Token::Type::kTrue, source, "true"};
- if (str == "type")
- return {Token::Type::kType, source, "type"};
- if (str == "u32")
- return {Token::Type::kU32, source, "u32"};
- if (str == "uniform")
- return {Token::Type::kUniform, source, "uniform"};
- if (str == "var")
- return {Token::Type::kVar, source, "var"};
- if (str == "vec2")
- return {Token::Type::kVec2, source, "vec2"};
- if (str == "vec3")
- return {Token::Type::kVec3, source, "vec3"};
- if (str == "vec4")
- return {Token::Type::kVec4, source, "vec4"};
- if (str == "workgroup")
- return {Token::Type::kWorkgroup, source, "workgroup"};
- return {};
+ if (str == "array") {
+ return {Token::Type::kArray, source, "array"};
+ }
+ if (str == "atomic") {
+ return {Token::Type::kAtomic, source, "atomic"};
+ }
+ if (str == "bitcast") {
+ return {Token::Type::kBitcast, source, "bitcast"};
+ }
+ if (str == "bool") {
+ return {Token::Type::kBool, source, "bool"};
+ }
+ if (str == "break") {
+ return {Token::Type::kBreak, source, "break"};
+ }
+ if (str == "case") {
+ return {Token::Type::kCase, source, "case"};
+ }
+ if (str == "continue") {
+ return {Token::Type::kContinue, source, "continue"};
+ }
+ if (str == "continuing") {
+ return {Token::Type::kContinuing, source, "continuing"};
+ }
+ if (str == "discard") {
+ return {Token::Type::kDiscard, source, "discard"};
+ }
+ if (str == "default") {
+ return {Token::Type::kDefault, source, "default"};
+ }
+ if (str == "else") {
+ return {Token::Type::kElse, source, "else"};
+ }
+ if (str == "enable") {
+ return {Token::Type::kEnable, source, "enable"};
+ }
+ if (str == "f16") {
+ return {Token::Type::kF16, source, "f16"};
+ }
+ if (str == "f32") {
+ return {Token::Type::kF32, source, "f32"};
+ }
+ if (str == "fallthrough") {
+ return {Token::Type::kFallthrough, source, "fallthrough"};
+ }
+ if (str == "false") {
+ return {Token::Type::kFalse, source, "false"};
+ }
+ if (str == "fn") {
+ return {Token::Type::kFn, source, "fn"};
+ }
+ if (str == "for") {
+ return {Token::Type::kFor, source, "for"};
+ }
+ if (str == "function") {
+ return {Token::Type::kFunction, source, "function"};
+ }
+ if (str == "i32") {
+ return {Token::Type::kI32, source, "i32"};
+ }
+ if (str == "if") {
+ return {Token::Type::kIf, source, "if"};
+ }
+ if (str == "import") {
+ return {Token::Type::kImport, source, "import"};
+ }
+ if (str == "let") {
+ return {Token::Type::kLet, source, "let"};
+ }
+ if (str == "loop") {
+ return {Token::Type::kLoop, source, "loop"};
+ }
+ if (str == "mat2x2") {
+ return {Token::Type::kMat2x2, source, "mat2x2"};
+ }
+ if (str == "mat2x3") {
+ return {Token::Type::kMat2x3, source, "mat2x3"};
+ }
+ if (str == "mat2x4") {
+ return {Token::Type::kMat2x4, source, "mat2x4"};
+ }
+ if (str == "mat3x2") {
+ return {Token::Type::kMat3x2, source, "mat3x2"};
+ }
+ if (str == "mat3x3") {
+ return {Token::Type::kMat3x3, source, "mat3x3"};
+ }
+ if (str == "mat3x4") {
+ return {Token::Type::kMat3x4, source, "mat3x4"};
+ }
+ if (str == "mat4x2") {
+ return {Token::Type::kMat4x2, source, "mat4x2"};
+ }
+ if (str == "mat4x3") {
+ return {Token::Type::kMat4x3, source, "mat4x3"};
+ }
+ if (str == "mat4x4") {
+ return {Token::Type::kMat4x4, source, "mat4x4"};
+ }
+ if (str == "override") {
+ return {Token::Type::kOverride, source, "override"};
+ }
+ if (str == "private") {
+ return {Token::Type::kPrivate, source, "private"};
+ }
+ if (str == "ptr") {
+ return {Token::Type::kPtr, source, "ptr"};
+ }
+ if (str == "return") {
+ return {Token::Type::kReturn, source, "return"};
+ }
+ if (str == "sampler") {
+ return {Token::Type::kSampler, source, "sampler"};
+ }
+ if (str == "sampler_comparison") {
+ return {Token::Type::kComparisonSampler, source, "sampler_comparison"};
+ }
+ if (str == "storage_buffer" || str == "storage") {
+ return {Token::Type::kStorage, source, "storage"};
+ }
+ if (str == "struct") {
+ return {Token::Type::kStruct, source, "struct"};
+ }
+ if (str == "switch") {
+ return {Token::Type::kSwitch, source, "switch"};
+ }
+ if (str == "texture_1d") {
+ return {Token::Type::kTextureSampled1d, source, "texture_1d"};
+ }
+ if (str == "texture_2d") {
+ return {Token::Type::kTextureSampled2d, source, "texture_2d"};
+ }
+ if (str == "texture_2d_array") {
+ return {Token::Type::kTextureSampled2dArray, source, "texture_2d_array"};
+ }
+ if (str == "texture_3d") {
+ return {Token::Type::kTextureSampled3d, source, "texture_3d"};
+ }
+ if (str == "texture_cube") {
+ return {Token::Type::kTextureSampledCube, source, "texture_cube"};
+ }
+ if (str == "texture_cube_array") {
+ return {Token::Type::kTextureSampledCubeArray, source, "texture_cube_array"};
+ }
+ if (str == "texture_depth_2d") {
+ return {Token::Type::kTextureDepth2d, source, "texture_depth_2d"};
+ }
+ if (str == "texture_depth_2d_array") {
+ return {Token::Type::kTextureDepth2dArray, source, "texture_depth_2d_array"};
+ }
+ if (str == "texture_depth_cube") {
+ return {Token::Type::kTextureDepthCube, source, "texture_depth_cube"};
+ }
+ if (str == "texture_depth_cube_array") {
+ return {Token::Type::kTextureDepthCubeArray, source, "texture_depth_cube_array"};
+ }
+ if (str == "texture_depth_multisampled_2d") {
+ return {Token::Type::kTextureDepthMultisampled2d, source, "texture_depth_multisampled_2d"};
+ }
+ if (str == "texture_external") {
+ return {Token::Type::kTextureExternal, source, "texture_external"};
+ }
+ if (str == "texture_multisampled_2d") {
+ return {Token::Type::kTextureMultisampled2d, source, "texture_multisampled_2d"};
+ }
+ if (str == "texture_storage_1d") {
+ return {Token::Type::kTextureStorage1d, source, "texture_storage_1d"};
+ }
+ if (str == "texture_storage_2d") {
+ return {Token::Type::kTextureStorage2d, source, "texture_storage_2d"};
+ }
+ if (str == "texture_storage_2d_array") {
+ return {Token::Type::kTextureStorage2dArray, source, "texture_storage_2d_array"};
+ }
+ if (str == "texture_storage_3d") {
+ return {Token::Type::kTextureStorage3d, source, "texture_storage_3d"};
+ }
+ if (str == "true") {
+ return {Token::Type::kTrue, source, "true"};
+ }
+ if (str == "type") {
+ return {Token::Type::kType, source, "type"};
+ }
+ if (str == "u32") {
+ return {Token::Type::kU32, source, "u32"};
+ }
+ if (str == "uniform") {
+ return {Token::Type::kUniform, source, "uniform"};
+ }
+ if (str == "var") {
+ return {Token::Type::kVar, source, "var"};
+ }
+ if (str == "vec2") {
+ return {Token::Type::kVec2, source, "vec2"};
+ }
+ if (str == "vec3") {
+ return {Token::Type::kVec3, source, "vec3"};
+ }
+ if (str == "vec4") {
+ return {Token::Type::kVec4, source, "vec4"};
+ }
+ if (str == "workgroup") {
+ return {Token::Type::kWorkgroup, source, "workgroup"};
+ }
+ return {};
}
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.h b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.h
index f378d578176..d93848ff5d0 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.h
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer.h
@@ -23,71 +23,84 @@ namespace tint::reader::wgsl {
/// Converts the input stream into a series of Tokens
class Lexer {
- public:
- /// Creates a new Lexer
- /// @param file the source file
- explicit Lexer(const Source::File* file);
- ~Lexer();
+ public:
+ /// Creates a new Lexer
+ /// @param file the source file
+ explicit Lexer(const Source::File* file);
+ ~Lexer();
- /// Returns the next token in the input stream.
- /// @return Token
- Token next();
+ /// Returns the next token in the input stream.
+ /// @return Token
+ Token next();
- private:
- /// Advances past blankspace and comments, if present at the current position.
- /// @returns error token, EOF, or uninitialized
- Token skip_blankspace_and_comments();
- /// Advances past a comment at the current position, if one exists.
- /// Returns an error if there was an unterminated block comment,
- /// or a null character was present.
- /// @returns uninitialized token on success, or error
- Token skip_comment();
+ private:
+ /// Advances past blankspace and comments, if present at the current position.
+ /// @returns error token, EOF, or uninitialized
+ Token skip_blankspace_and_comments();
+ /// Advances past a comment at the current position, if one exists.
+ /// Returns an error if there was an unterminated block comment,
+ /// or a null character was present.
+ /// @returns uninitialized token on success, or error
+ Token skip_comment();
- Token build_token_from_int_if_possible(Source source,
- size_t start,
- size_t end,
- int32_t base);
- Token check_keyword(const Source&, std::string_view);
+ Token build_token_from_int_if_possible(Source source, size_t start, int32_t base);
- /// The try_* methods have the following in common:
- /// - They assume there is at least one character to be consumed,
- /// i.e. the input has not yet reached end of file.
- /// - They return an initialized token when they match and consume
- /// a token of the specified kind.
- /// - Some can return an error token.
- /// - Otherwise they return an uninitialized token when they did not
- /// match a token of the specfied kind.
- Token try_float();
- Token try_hex_float();
- Token try_hex_integer();
- Token try_ident();
- Token try_integer();
- Token try_punctuation();
+ Token check_keyword(const Source&, std::string_view);
- Source begin_source() const;
- void end_source(Source&) const;
+ /// The try_* methods have the following in common:
+ /// - They assume there is at least one character to be consumed,
+ /// i.e. the input has not yet reached end of file.
+ /// - They return an initialized token when they match and consume
+ /// a token of the specified kind.
+ /// - Some can return an error token.
+ /// - Otherwise they return an uninitialized token when they did not
+ /// match a token of the specfied kind.
+ Token try_float();
+ Token try_hex_float();
+ Token try_hex_integer();
+ Token try_ident();
+ Token try_integer();
+ Token try_punctuation();
- /// @returns true if the end of the input has been reached.
- bool is_eof() const;
- /// @returns true if there is another character on the input and
- /// it is not null.
- bool is_null() const;
- /// @param ch a character
- /// @returns true if 'ch' is a decimal digit
- bool is_digit(char ch) const;
- /// @param ch a character
- /// @returns true if 'ch' is a hexadecimal digit
- bool is_hex(char ch) const;
- bool matches(size_t pos, std::string_view substr);
+ Source begin_source() const;
+ void end_source(Source&) const;
- /// The source file content
- Source::File const* const file_;
- /// The length of the input
- uint32_t len_ = 0;
- /// The current position in utf-8 code units (bytes) within the input
- uint32_t pos_ = 0;
- /// The current location within the input
- Source::Location location_;
+ /// @returns view of current line
+ const std::string_view line() const;
+ /// @returns position in current line
+ size_t pos() const;
+ /// @returns length of current line
+ size_t length() const;
+ /// @returns reference to character at `pos` within current line
+ const char& at(size_t pos) const;
+ /// @returns substring view at `offset` within current line of length `count`
+ std::string_view substr(size_t offset, size_t count);
+ /// advances current position by `offset` within current line
+ void advance(size_t offset = 1);
+ /// sets current position to `pos` within current line
+ void set_pos(size_t pos);
+ /// advances current position to next line
+ void advance_line();
+ /// @returns true if the end of the input has been reached.
+ bool is_eof() const;
+ /// @returns true if the end of the current line has been reached.
+ bool is_eol() const;
+ /// @returns true if there is another character on the input and
+ /// it is not null.
+ bool is_null() const;
+ /// @param ch a character
+ /// @returns true if 'ch' is a decimal digit
+ bool is_digit(char ch) const;
+ /// @param ch a character
+ /// @returns true if 'ch' is a hexadecimal digit
+ bool is_hex(char ch) const;
+ /// @returns true if string at `pos` matches `substr`
+ bool matches(size_t pos, std::string_view substr);
+
+ /// The source file content
+ Source::File const* const file_;
+ /// The current location within the input
+ Source::Location location_;
};
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer_test.cc
index 6ea313eaed2..16ae46d4304 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/lexer_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/lexer_test.cc
@@ -15,6 +15,8 @@
#include "src/tint/reader/wgsl/lexer.h"
#include <limits>
+#include <tuple>
+#include <vector>
#include "gtest/gtest.h"
@@ -23,345 +25,432 @@ namespace {
using LexerTest = testing::Test;
+// Blankspace constants. These are macros on purpose to be able to easily build
+// up string literals with them.
+//
+// Same line code points
+#define kSpace " "
+#define kHTab "\t"
+#define kL2R "\xE2\x80\x8E"
+#define kR2L "\xE2\x80\x8F"
+// Line break code points
+#define kCR "\r"
+#define kLF "\n"
+#define kVTab "\x0B"
+#define kFF "\x0C"
+#define kNL "\xC2\x85"
+#define kLS "\xE2\x80\xA8"
+#define kPS "\xE2\x80\xA9"
+
TEST_F(LexerTest, Empty) {
- Source::File file("", "");
- Lexer l(&file);
- auto t = l.next();
- EXPECT_TRUE(t.IsEof());
+ Source::File file("", "");
+ Lexer l(&file);
+ auto t = l.next();
+ EXPECT_TRUE(t.IsEof());
+}
+
+TEST_F(LexerTest, Skips_Blankspace_Basic) {
+ Source::File file("", "\t\r\n\t ident\t\n\t \r ");
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 2u);
+ EXPECT_EQ(t.source().range.begin.column, 6u);
+ EXPECT_EQ(t.source().range.end.line, 2u);
+ EXPECT_EQ(t.source().range.end.column, 11u);
+ EXPECT_EQ(t.to_str(), "ident");
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
-TEST_F(LexerTest, Skips_Blankspace) {
- Source::File file("", "\t\r\n\t ident\t\n\t \r ");
- Lexer l(&file);
+TEST_F(LexerTest, Skips_Blankspace_Exotic) {
+ Source::File file("", //
+ kVTab kFF kNL kLS kPS kL2R kR2L //
+ "ident" //
+ kVTab kFF kNL kLS kPS kL2R kR2L);
+ Lexer l(&file);
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 2u);
- EXPECT_EQ(t.source().range.begin.column, 6u);
- EXPECT_EQ(t.source().range.end.line, 2u);
- EXPECT_EQ(t.source().range.end.column, 11u);
- EXPECT_EQ(t.to_str(), "ident");
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 6u);
+ EXPECT_EQ(t.source().range.begin.column, 7u);
+ EXPECT_EQ(t.source().range.end.line, 6u);
+ EXPECT_EQ(t.source().range.end.column, 12u);
+ EXPECT_EQ(t.to_str(), "ident");
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
TEST_F(LexerTest, Skips_Comments_Line) {
- Source::File file("", R"(//starts with comment
+ Source::File file("", R"(//starts with comment
ident1 //ends with comment
// blank line
ident2)");
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 2u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 2u);
- EXPECT_EQ(t.source().range.end.column, 7u);
- EXPECT_EQ(t.to_str(), "ident1");
-
- t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 4u);
- EXPECT_EQ(t.source().range.begin.column, 2u);
- EXPECT_EQ(t.source().range.end.line, 4u);
- EXPECT_EQ(t.source().range.end.column, 8u);
- EXPECT_EQ(t.to_str(), "ident2");
-
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 2u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 2u);
+ EXPECT_EQ(t.source().range.end.column, 7u);
+ EXPECT_EQ(t.to_str(), "ident1");
+
+ t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 4u);
+ EXPECT_EQ(t.source().range.begin.column, 2u);
+ EXPECT_EQ(t.source().range.end.line, 4u);
+ EXPECT_EQ(t.source().range.end.column, 8u);
+ EXPECT_EQ(t.to_str(), "ident2");
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
-using LineCommentTerminatorTest = testing::TestWithParam<char>;
-TEST_P(LineCommentTerminatorTest, Terminators) {
- // Test that line comments are ended by blankspace characters other than space
- // and horizontal tab.
- char c = GetParam();
- std::string src = "let// This is a comment";
- src += c;
- src += "ident";
- Source::File file("", src);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kLet));
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 4u);
-
- if (c != ' ' && c != '\t') {
- size_t line = c == '\n' ? 2u : 1u;
- size_t col = c == '\n' ? 1u : 25u;
+TEST_F(LexerTest, Skips_Comments_Unicode) {
+ Source::File file("", R"(// starts with 🙂🙂🙂
+ident1 //ends with 🙂🙂🙂
+// blank line
+ ident2)");
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 2u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 2u);
+ EXPECT_EQ(t.source().range.end.column, 7u);
+ EXPECT_EQ(t.to_str(), "ident1");
+
t = l.next();
EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, line);
- EXPECT_EQ(t.source().range.begin.column, col);
- EXPECT_EQ(t.source().range.end.line, line);
- EXPECT_EQ(t.source().range.end.column, col + 5);
- EXPECT_EQ(t.to_str(), "ident");
- }
+ EXPECT_EQ(t.source().range.begin.line, 4u);
+ EXPECT_EQ(t.source().range.begin.column, 2u);
+ EXPECT_EQ(t.source().range.end.line, 4u);
+ EXPECT_EQ(t.source().range.end.column, 8u);
+ EXPECT_EQ(t.to_str(), "ident2");
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
+}
+
+using LineCommentTerminatorTest = testing::TestWithParam<const char*>;
+TEST_P(LineCommentTerminatorTest, Terminators) {
+ // Test that line comments are ended by blankspace characters other than
+ // space, horizontal tab, left-to-right mark, and right-to-left mark.
+ auto* c = GetParam();
+ std::string src = "let// This is a comment";
+ src += c;
+ src += "ident";
+ Source::File file("", src);
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.Is(Token::Type::kLet));
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 4u);
+
+ auto is_same_line = [](std::string_view v) {
+ return v == kSpace || v == kHTab || v == kL2R || v == kR2L;
+ };
+
+ if (!is_same_line(c)) {
+ size_t line = is_same_line(c) ? 1u : 2u;
+ size_t col = is_same_line(c) ? 25u : 1u;
+ t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, line);
+ EXPECT_EQ(t.source().range.begin.column, col);
+ EXPECT_EQ(t.source().range.end.line, line);
+ EXPECT_EQ(t.source().range.end.column, col + 5);
+ EXPECT_EQ(t.to_str(), "ident");
+ }
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
INSTANTIATE_TEST_SUITE_P(LexerTest,
LineCommentTerminatorTest,
- testing::Values(' ', '\t', '\n', '\v', '\f', '\r'));
+ testing::Values(
+ // same line
+ kSpace,
+ kHTab,
+ kCR,
+ kL2R,
+ kR2L,
+ // line break
+ kLF,
+ kVTab,
+ kFF,
+ kNL,
+ kLS,
+ kPS));
TEST_F(LexerTest, Skips_Comments_Block) {
- Source::File file("", R"(/* comment
+ Source::File file("", R"(/* comment
text */ident)");
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 2u);
- EXPECT_EQ(t.source().range.begin.column, 8u);
- EXPECT_EQ(t.source().range.end.line, 2u);
- EXPECT_EQ(t.source().range.end.column, 13u);
- EXPECT_EQ(t.to_str(), "ident");
-
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 2u);
+ EXPECT_EQ(t.source().range.begin.column, 8u);
+ EXPECT_EQ(t.source().range.end.line, 2u);
+ EXPECT_EQ(t.source().range.end.column, 13u);
+ EXPECT_EQ(t.to_str(), "ident");
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
TEST_F(LexerTest, Skips_Comments_Block_Nested) {
- Source::File file("", R"(/* comment
+ Source::File file("", R"(/* comment
text // nested line comments are ignored /* more text
/////**/ */*/ident)");
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 3u);
- EXPECT_EQ(t.source().range.begin.column, 14u);
- EXPECT_EQ(t.source().range.end.line, 3u);
- EXPECT_EQ(t.source().range.end.column, 19u);
- EXPECT_EQ(t.to_str(), "ident");
-
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 3u);
+ EXPECT_EQ(t.source().range.begin.column, 14u);
+ EXPECT_EQ(t.source().range.end.line, 3u);
+ EXPECT_EQ(t.source().range.end.column, 19u);
+ EXPECT_EQ(t.to_str(), "ident");
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
TEST_F(LexerTest, Skips_Comments_Block_Unterminated) {
- // I had to break up the /* because otherwise the clang readability check
- // errored out saying it could not find the end of a multi-line comment.
- Source::File file("", R"(
+ // I had to break up the /* because otherwise the clang readability check
+ // errored out saying it could not find the end of a multi-line comment.
+ Source::File file("", R"(
/)"
- R"(*
+ R"(*
abcd)");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "unterminated block comment");
- EXPECT_EQ(t.source().range.begin.line, 2u);
- EXPECT_EQ(t.source().range.begin.column, 3u);
- EXPECT_EQ(t.source().range.end.line, 2u);
- EXPECT_EQ(t.source().range.end.column, 4u);
+ Lexer l(&file);
+
+ auto t = l.next();
+ ASSERT_TRUE(t.Is(Token::Type::kError));
+ EXPECT_EQ(t.to_str(), "unterminated block comment");
+ EXPECT_EQ(t.source().range.begin.line, 2u);
+ EXPECT_EQ(t.source().range.begin.column, 3u);
+ EXPECT_EQ(t.source().range.end.line, 2u);
+ EXPECT_EQ(t.source().range.end.column, 4u);
}
TEST_F(LexerTest, Null_InBlankspace_IsError) {
- Source::File file("", std::string{' ', 0, ' '});
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsError());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 2u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 2u);
- EXPECT_EQ(t.to_str(), "null character found");
+ Source::File file("", std::string{' ', 0, ' '});
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsError());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 2u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 2u);
+ EXPECT_EQ(t.to_str(), "null character found");
}
TEST_F(LexerTest, Null_InLineComment_IsError) {
- Source::File file("", std::string{'/', '/', ' ', 0, ' '});
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsError());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 4u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 4u);
- EXPECT_EQ(t.to_str(), "null character found");
+ Source::File file("", std::string{'/', '/', ' ', 0, ' '});
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsError());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 4u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 4u);
+ EXPECT_EQ(t.to_str(), "null character found");
}
TEST_F(LexerTest, Null_InBlockComment_IsError) {
- Source::File file("", std::string{'/', '*', ' ', 0, '*', '/'});
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsError());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 4u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 4u);
- EXPECT_EQ(t.to_str(), "null character found");
+ Source::File file("", std::string{'/', '*', ' ', 0, '*', '/'});
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsError());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 4u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 4u);
+ EXPECT_EQ(t.to_str(), "null character found");
}
TEST_F(LexerTest, Null_InIdentifier_IsError) {
- // Try inserting a null in an identifier. Other valid token
- // kinds will behave similarly, so use the identifier case
- // as a representative.
- Source::File file("", std::string{'a', 0, 'c'});
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.to_str(), "a");
- t = l.next();
- EXPECT_TRUE(t.IsError());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 2u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 2u);
- EXPECT_EQ(t.to_str(), "null character found");
+ // Try inserting a null in an identifier. Other valid token
+ // kinds will behave similarly, so use the identifier case
+ // as a representative.
+ Source::File file("", std::string{'a', 0, 'c'});
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.to_str(), "a");
+ t = l.next();
+ EXPECT_TRUE(t.IsError());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 2u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 2u);
+ EXPECT_EQ(t.to_str(), "null character found");
}
struct FloatData {
- const char* input;
- float result;
+ const char* input;
+ double result;
};
inline std::ostream& operator<<(std::ostream& out, FloatData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
using FloatTest = testing::TestWithParam<FloatData>;
TEST_P(FloatTest, Parse) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kFloatLiteral));
- EXPECT_EQ(t.to_f32(), params.result);
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
-
- t = l.next();
- EXPECT_TRUE(t.IsEof());
+ auto params = GetParam();
+ Source::File file("", params.input);
+ Lexer l(&file);
+
+ auto t = l.next();
+ if (std::string(params.input).back() == 'f') {
+ EXPECT_TRUE(t.Is(Token::Type::kFloatLiteral_F));
+ } else {
+ EXPECT_TRUE(t.Is(Token::Type::kFloatLiteral));
+ }
+ EXPECT_EQ(t.to_f64(), params.result);
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
+
+ t = l.next();
+ EXPECT_TRUE(t.IsEof());
}
INSTANTIATE_TEST_SUITE_P(LexerTest,
FloatTest,
testing::Values(
// No decimal, with 'f' suffix
- FloatData{"0f", 0.0f},
- FloatData{"1f", 1.0f},
- FloatData{"-0f", 0.0f},
- FloatData{"-1f", -1.0f},
+ FloatData{"0f", 0.0},
+ FloatData{"1f", 1.0},
+ FloatData{"-0f", 0.0},
+ FloatData{"-1f", -1.0},
// Zero, with decimal.
- FloatData{"0.0", 0.0f},
- FloatData{"0.", 0.0f},
- FloatData{".0", 0.0f},
- FloatData{"-0.0", 0.0f},
- FloatData{"-0.", 0.0f},
- FloatData{"-.0", 0.0f},
+ FloatData{"0.0", 0.0},
+ FloatData{"0.", 0.0},
+ FloatData{".0", 0.0},
+ FloatData{"-0.0", 0.0},
+ FloatData{"-0.", 0.0},
+ FloatData{"-.0", 0.0},
// Zero, with decimal and 'f' suffix
- FloatData{"0.0f", 0.0f},
- FloatData{"0.f", 0.0f},
- FloatData{".0f", 0.0f},
- FloatData{"-0.0f", 0.0f},
- FloatData{"-0.f", 0.0f},
- FloatData{"-.0", 0.0f},
+ FloatData{"0.0f", 0.0},
+ FloatData{"0.f", 0.0},
+ FloatData{".0f", 0.0},
+ FloatData{"-0.0f", 0.0},
+ FloatData{"-0.f", 0.0},
+ FloatData{"-.0", 0.0},
// Non-zero with decimal
- FloatData{"5.7", 5.7f},
- FloatData{"5.", 5.f},
- FloatData{".7", .7f},
- FloatData{"-5.7", -5.7f},
- FloatData{"-5.", -5.f},
- FloatData{"-.7", -.7f},
+ FloatData{"5.7", 5.7},
+ FloatData{"5.", 5.},
+ FloatData{".7", .7},
+ FloatData{"-5.7", -5.7},
+ FloatData{"-5.", -5.},
+ FloatData{"-.7", -.7},
// Non-zero with decimal and 'f' suffix
- FloatData{"5.7f", 5.7f},
- FloatData{"5.f", 5.f},
- FloatData{".7f", .7f},
- FloatData{"-5.7f", -5.7f},
- FloatData{"-5.f", -5.f},
- FloatData{"-.7f", -.7f},
+ FloatData{"5.7f", static_cast<double>(5.7f)},
+ FloatData{"5.f", static_cast<double>(5.f)},
+ FloatData{".7f", static_cast<double>(.7f)},
+ FloatData{"-5.7f", static_cast<double>(-5.7f)},
+ FloatData{"-5.f", static_cast<double>(-5.f)},
+ FloatData{"-.7f", static_cast<double>(-.7f)},
// No decimal, with exponent
- FloatData{"1e5", 1e5f},
- FloatData{"1E5", 1e5f},
- FloatData{"1e-5", 1e-5f},
- FloatData{"1E-5", 1e-5f},
+ FloatData{"1e5", 1e5},
+ FloatData{"1E5", 1e5},
+ FloatData{"1e-5", 1e-5},
+ FloatData{"1E-5", 1e-5},
// No decimal, with exponent and 'f' suffix
- FloatData{"1e5f", 1e5f},
- FloatData{"1E5f", 1e5f},
- FloatData{"1e-5f", 1e-5f},
- FloatData{"1E-5f", 1e-5f},
+ FloatData{"1e5f", static_cast<double>(1e5f)},
+ FloatData{"1E5f", static_cast<double>(1e5f)},
+ FloatData{"1e-5f", static_cast<double>(1e-5f)},
+ FloatData{"1E-5f", static_cast<double>(1e-5f)},
// With decimal and exponents
- FloatData{"0.2e+12", 0.2e12f},
- FloatData{"1.2e-5", 1.2e-5f},
- FloatData{"2.57e23", 2.57e23f},
- FloatData{"2.5e+0", 2.5f},
- FloatData{"2.5e-0", 2.5f},
+ FloatData{"0.2e+12", 0.2e12},
+ FloatData{"1.2e-5", 1.2e-5},
+ FloatData{"2.57e23", 2.57e23},
+ FloatData{"2.5e+0", 2.5},
+ FloatData{"2.5e-0", 2.5},
// With decimal and exponents and 'f' suffix
- FloatData{"0.2e+12f", 0.2e12f},
- FloatData{"1.2e-5f", 1.2e-5f},
- FloatData{"2.57e23f", 2.57e23f},
- FloatData{"2.5e+0f", 2.5f},
- FloatData{"2.5e-0f", 2.5f}));
+ FloatData{"0.2e+12f", static_cast<double>(0.2e12f)},
+ FloatData{"1.2e-5f", static_cast<double>(1.2e-5f)},
+ FloatData{"2.57e23f", static_cast<double>(2.57e23f)},
+ FloatData{"2.5e+0f", static_cast<double>(2.5f)},
+ FloatData{"2.5e-0f", static_cast<double>(2.5f)},
+ // Quantization
+ FloatData{"3.141592653589793", 3.141592653589793}, // no quantization
+ FloatData{"3.141592653589793f", 3.1415927410125732} // f32 quantized
+ ));
using FloatTest_Invalid = testing::TestWithParam<const char*>;
TEST_P(FloatTest_Invalid, Handles) {
- Source::File file("", GetParam());
- Lexer l(&file);
+ Source::File file("", GetParam());
+ Lexer l(&file);
- auto t = l.next();
- EXPECT_FALSE(t.Is(Token::Type::kFloatLiteral));
+ auto t = l.next();
+ EXPECT_FALSE(t.Is(Token::Type::kFloatLiteral));
}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- FloatTest_Invalid,
- testing::Values(".",
- "-.",
- // Need a mantissa digit
- ".e5",
- ".E5",
- // Need exponent digits
- ".e",
- ".e+",
- ".e-",
- ".E",
- ".e+",
- ".e-",
- // Overflow
- "2.5e+256",
- "-2.5e+127",
- // Magnitude smaller than smallest positive f32.
- "2.5e-300",
- "-2.5e-300",
- // Decimal exponent must immediately
- // follow the 'e'.
- "2.5e 12",
- "2.5e +12",
- "2.5e -12",
- "2.5e+ 123",
- "2.5e- 123",
- "2.5E 12",
- "2.5E +12",
- "2.5E -12",
- "2.5E+ 123",
- "2.5E- 123"));
+INSTANTIATE_TEST_SUITE_P(LexerTest,
+ FloatTest_Invalid,
+ testing::Values(".",
+ "-.",
+ // Need a mantissa digit
+ ".e5",
+ ".E5",
+ // Need exponent digits
+ ".e",
+ ".e+",
+ ".e-",
+ ".E",
+ ".e+",
+ ".e-",
+ // Overflow
+ "2.5e+256f",
+ "-2.5e+127f",
+ // Magnitude smaller than smallest positive f32.
+ "2.5e-300f",
+ "-2.5e-300f",
+ // Decimal exponent must immediately
+ // follow the 'e'.
+ "2.5e 12",
+ "2.5e +12",
+ "2.5e -12",
+ "2.5e+ 123",
+ "2.5e- 123",
+ "2.5E 12",
+ "2.5E +12",
+ "2.5E -12",
+ "2.5E+ 123",
+ "2.5E- 123"));
using AsciiIdentifierTest = testing::TestWithParam<const char*>;
TEST_P(AsciiIdentifierTest, Parse) {
- Source::File file("", GetParam());
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(GetParam()));
- EXPECT_EQ(t.to_str(), GetParam());
+ Source::File file("", GetParam());
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + strlen(GetParam()));
+ EXPECT_EQ(t.to_str(), GetParam());
}
INSTANTIATE_TEST_SUITE_P(LexerTest,
AsciiIdentifierTest,
@@ -378,22 +467,22 @@ INSTANTIATE_TEST_SUITE_P(LexerTest,
"alldigits_0123456789"));
struct UnicodeCase {
- const char* utf8;
- size_t count;
+ const char* utf8;
+ size_t count;
};
using ValidUnicodeIdentifierTest = testing::TestWithParam<UnicodeCase>;
TEST_P(ValidUnicodeIdentifierTest, Parse) {
- Source::File file("", GetParam().utf8);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + GetParam().count);
- EXPECT_EQ(t.to_str(), GetParam().utf8);
+ Source::File file("", GetParam().utf8);
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + GetParam().count);
+ EXPECT_EQ(t.to_str(), GetParam().utf8);
}
INSTANTIATE_TEST_SUITE_P(
LexerTest,
@@ -409,527 +498,465 @@ INSTANTIATE_TEST_SUITE_P(
"\x91\x9b\xf0\x9d\x91\xa1\xf0\x9d\x91\x96\xf0\x9d\x91\x93"
"\xf0\x9d\x91\x96\xf0\x9d\x91\x92\xf0\x9d\x91\x9f",
40},
- UnicodeCase{
- // "identifier"
- "\xef\xbd\x89\xef\xbd\x84\xef\xbd\x85\xef\xbd\x8e\xef\xbd\x94\xef"
- "\xbd\x89\xef\xbd\x86\xef\xbd\x89\xef\xbd\x85\xef\xbd\x92",
- 30},
+ UnicodeCase{// "identifier"
+ "\xef\xbd\x89\xef\xbd\x84\xef\xbd\x85\xef\xbd\x8e\xef\xbd\x94\xef"
+ "\xbd\x89\xef\xbd\x86\xef\xbd\x89\xef\xbd\x85\xef\xbd\x92",
+ 30},
UnicodeCase{// "𝕚𝕕𝕖𝕟𝕥𝕚𝕗𝕚𝕖𝕣𝟙𝟚𝟛"
"\xf0\x9d\x95\x9a\xf0\x9d\x95\x95\xf0\x9d\x95\x96\xf0\x9d"
"\x95\x9f\xf0\x9d\x95\xa5\xf0\x9d\x95\x9a\xf0\x9d\x95\x97"
"\xf0\x9d\x95\x9a\xf0\x9d\x95\x96\xf0\x9d\x95\xa3\xf0\x9d"
"\x9f\x99\xf0\x9d\x9f\x9a\xf0\x9d\x9f\x9b",
52},
- UnicodeCase{
- // "𝖎𝖉𝖊𝖓𝖙𝖎𝖋𝖎𝖊𝖗123"
- "\xf0\x9d\x96\x8e\xf0\x9d\x96\x89\xf0\x9d\x96\x8a\xf0\x9d\x96\x93"
- "\xf0\x9d\x96\x99\xf0\x9d\x96\x8e\xf0\x9d\x96\x8b\xf0\x9d\x96\x8e"
- "\xf0\x9d\x96\x8a\xf0\x9d\x96\x97\x31\x32\x33",
- 43},
+ UnicodeCase{// "𝖎𝖉𝖊𝖓𝖙𝖎𝖋𝖎𝖊𝖗123"
+ "\xf0\x9d\x96\x8e\xf0\x9d\x96\x89\xf0\x9d\x96\x8a\xf0\x9d\x96\x93"
+ "\xf0\x9d\x96\x99\xf0\x9d\x96\x8e\xf0\x9d\x96\x8b\xf0\x9d\x96\x8e"
+ "\xf0\x9d\x96\x8a\xf0\x9d\x96\x97\x31\x32\x33",
+ 43},
}));
using InvalidUnicodeIdentifierTest = testing::TestWithParam<const char*>;
TEST_P(InvalidUnicodeIdentifierTest, Parse) {
- Source::File file("", GetParam());
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.IsError());
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u);
- EXPECT_EQ(t.to_str(), "invalid UTF-8");
+ Source::File file("", GetParam());
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.IsError());
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u);
+ EXPECT_EQ(t.to_str(), "invalid UTF-8");
}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- InvalidUnicodeIdentifierTest,
- testing::ValuesIn({
- "\x80\x80\x80\x80", // 10000000
- "\x81\x80\x80\x80", // 10000001
- "\x8f\x80\x80\x80", // 10001111
- "\x90\x80\x80\x80", // 10010000
- "\x91\x80\x80\x80", // 10010001
- "\x9f\x80\x80\x80", // 10011111
- "\xa0\x80\x80\x80", // 10100000
- "\xa1\x80\x80\x80", // 10100001
- "\xaf\x80\x80\x80", // 10101111
- "\xb0\x80\x80\x80", // 10110000
- "\xb1\x80\x80\x80", // 10110001
- "\xbf\x80\x80\x80", // 10111111
- "\xc0\x80\x80\x80", // 11000000
- "\xc1\x80\x80\x80", // 11000001
- "\xf5\x80\x80\x80", // 11110101
- "\xf6\x80\x80\x80", // 11110110
- "\xf7\x80\x80\x80", // 11110111
- "\xf8\x80\x80\x80", // 11111000
- "\xfe\x80\x80\x80", // 11111110
- "\xff\x80\x80\x80", // 11111111
-
- "\xd0", // 2-bytes, missing second byte
- "\xe8\x8f", // 3-bytes, missing third byte
- "\xf4\x8f\x8f", // 4-bytes, missing fourth byte
-
- "\xd0\x7f", // 2-bytes, second byte MSB unset
- "\xe8\x7f\x8f", // 3-bytes, second byte MSB unset
- "\xe8\x8f\x7f", // 3-bytes, third byte MSB unset
- "\xf4\x7f\x8f\x8f", // 4-bytes, second byte MSB unset
- "\xf4\x8f\x7f\x8f", // 4-bytes, third byte MSB unset
- "\xf4\x8f\x8f\x7f", // 4-bytes, fourth byte MSB unset
- }));
+INSTANTIATE_TEST_SUITE_P(LexerTest,
+ InvalidUnicodeIdentifierTest,
+ testing::ValuesIn({
+ "\x80\x80\x80\x80", // 10000000
+ "\x81\x80\x80\x80", // 10000001
+ "\x8f\x80\x80\x80", // 10001111
+ "\x90\x80\x80\x80", // 10010000
+ "\x91\x80\x80\x80", // 10010001
+ "\x9f\x80\x80\x80", // 10011111
+ "\xa0\x80\x80\x80", // 10100000
+ "\xa1\x80\x80\x80", // 10100001
+ "\xaf\x80\x80\x80", // 10101111
+ "\xb0\x80\x80\x80", // 10110000
+ "\xb1\x80\x80\x80", // 10110001
+ "\xbf\x80\x80\x80", // 10111111
+ "\xc0\x80\x80\x80", // 11000000
+ "\xc1\x80\x80\x80", // 11000001
+ "\xf5\x80\x80\x80", // 11110101
+ "\xf6\x80\x80\x80", // 11110110
+ "\xf7\x80\x80\x80", // 11110111
+ "\xf8\x80\x80\x80", // 11111000
+ "\xfe\x80\x80\x80", // 11111110
+ "\xff\x80\x80\x80", // 11111111
+
+ "\xd0", // 2-bytes, missing second byte
+ "\xe8\x8f", // 3-bytes, missing third byte
+ "\xf4\x8f\x8f", // 4-bytes, missing fourth byte
+
+ "\xd0\x7f", // 2-bytes, second byte MSB unset
+ "\xe8\x7f\x8f", // 3-bytes, second byte MSB unset
+ "\xe8\x8f\x7f", // 3-bytes, third byte MSB unset
+ "\xf4\x7f\x8f\x8f", // 4-bytes, second byte MSB unset
+ "\xf4\x8f\x7f\x8f", // 4-bytes, third byte MSB unset
+ "\xf4\x8f\x8f\x7f", // 4-bytes, fourth byte MSB unset
+ }));
TEST_F(LexerTest, IdentifierTest_SingleUnderscoreDoesNotMatch) {
- Source::File file("", "_");
- Lexer l(&file);
+ Source::File file("", "_");
+ Lexer l(&file);
- auto t = l.next();
- EXPECT_FALSE(t.IsIdentifier());
+ auto t = l.next();
+ EXPECT_FALSE(t.IsIdentifier());
}
TEST_F(LexerTest, IdentifierTest_DoesNotStartWithDoubleUnderscore) {
- Source::File file("", "__test");
- Lexer l(&file);
+ Source::File file("", "__test");
+ Lexer l(&file);
- auto t = l.next();
- EXPECT_FALSE(t.IsIdentifier());
+ auto t = l.next();
+ EXPECT_FALSE(t.IsIdentifier());
}
TEST_F(LexerTest, IdentifierTest_DoesNotStartWithNumber) {
- Source::File file("", "01test");
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_FALSE(t.IsIdentifier());
-}
-
-struct HexSignedIntData {
- const char* input;
- int32_t result;
-};
-inline std::ostream& operator<<(std::ostream& out, HexSignedIntData data) {
- out << std::string(data.input);
- return out;
-}
-
-using IntegerTest_HexSigned = testing::TestWithParam<HexSignedIntData>;
-TEST_P(IntegerTest_HexSigned, Matches) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kSintLiteral));
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
- EXPECT_EQ(t.to_i32(), params.result);
-}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- IntegerTest_HexSigned,
- testing::Values(
- HexSignedIntData{"0x0", 0},
- HexSignedIntData{"0X0", 0},
- HexSignedIntData{"0x42", 66},
- HexSignedIntData{"0X42", 66},
- HexSignedIntData{"-0x42", -66},
- HexSignedIntData{"-0X42", -66},
- HexSignedIntData{"0xeF1Abc9", 250719177},
- HexSignedIntData{"0XeF1Abc9", 250719177},
- HexSignedIntData{"-0x80000000", std::numeric_limits<int32_t>::min()},
- HexSignedIntData{"-0X80000000", std::numeric_limits<int32_t>::min()},
- HexSignedIntData{"0x7FFFFFFF", std::numeric_limits<int32_t>::max()},
- HexSignedIntData{"0X7FFFFFFF", std::numeric_limits<int32_t>::max()}));
-
-TEST_F(LexerTest, HexPrefixOnly_IsError) {
- // Could be the start of a hex integer or hex float, but is neither.
- Source::File file("", "0x");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer or float hex literal has no significant digits");
-}
-
-TEST_F(LexerTest, HexPrefixUpperCaseOnly_IsError) {
- // Could be the start of a hex integer or hex float, but is neither.
- Source::File file("", "0X");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer or float hex literal has no significant digits");
-}
-
-TEST_F(LexerTest, NegativeHexPrefixOnly_IsError) {
- // Could be the start of a hex integer or hex float, but is neither.
- Source::File file("", "-0x");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer or float hex literal has no significant digits");
-}
-
-TEST_F(LexerTest, NegativeHexPrefixUpperCaseOnly_IsError) {
- // Could be the start of a hex integer or hex float, but is neither.
- Source::File file("", "-0X");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer or float hex literal has no significant digits");
-}
-
-TEST_F(LexerTest, IntegerTest_HexSignedTooLarge) {
- Source::File file("", "0x80000000");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "i32 (0x80000000) too large");
-}
-
-TEST_F(LexerTest, IntegerTest_HexSignedTooSmall) {
- Source::File file("", "-0x8000000F");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "i32 (-0x8000000F) too small");
-}
-
-TEST_F(LexerTest, IntegerTest_HexSignedTooManyDigits) {
- {
- Source::File file("", "-0x100000000000000000000000");
+ Source::File file("", "01test");
Lexer l(&file);
auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer literal (-0x10000000...) has too many digits");
- }
- {
- Source::File file("", "0x100000000000000");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(),
- "integer literal (0x10000000...) has too many digits");
- }
+ EXPECT_FALSE(t.IsIdentifier());
}
-struct HexUnsignedIntData {
- const char* input;
- uint32_t result;
+////////////////////////////////////////////////////////////////////////////////
+// ParseIntegerTest
+////////////////////////////////////////////////////////////////////////////////
+struct ParseIntegerCase {
+ const char* input;
+ int64_t result;
};
-inline std::ostream& operator<<(std::ostream& out, HexUnsignedIntData data) {
- out << std::string(data.input);
- return out;
-}
-using IntegerTest_HexUnsigned = testing::TestWithParam<HexUnsignedIntData>;
-TEST_P(IntegerTest_HexUnsigned, Matches) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kUintLiteral));
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
- EXPECT_EQ(t.to_u32(), params.result);
-
- t = l.next();
- EXPECT_TRUE(t.IsEof());
-}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- IntegerTest_HexUnsigned,
- testing::Values(HexUnsignedIntData{"0x0u", 0},
- HexUnsignedIntData{"0x42u", 66},
- HexUnsignedIntData{"0xeF1Abc9u", 250719177},
- HexUnsignedIntData{"0x0u",
- std::numeric_limits<uint32_t>::min()},
- HexUnsignedIntData{"0xFFFFFFFFu",
- std::numeric_limits<uint32_t>::max()}));
-
-TEST_F(LexerTest, IntegerTest_HexUnsignedTooManyDigits) {
- Source::File file("", "0x1000000000000000000000u");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "integer literal (0x10000000...) has too many digits");
-}
-struct UnsignedIntData {
- const char* input;
- uint32_t result;
-};
-inline std::ostream& operator<<(std::ostream& out, UnsignedIntData data) {
- out << std::string(data.input);
- return out;
-}
-using IntegerTest_Unsigned = testing::TestWithParam<UnsignedIntData>;
-TEST_P(IntegerTest_Unsigned, Matches) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kUintLiteral));
- EXPECT_EQ(t.to_u32(), params.result);
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
-}
-INSTANTIATE_TEST_SUITE_P(LexerTest,
- IntegerTest_Unsigned,
- testing::Values(UnsignedIntData{"0u", 0u},
- UnsignedIntData{"123u", 123u},
- UnsignedIntData{"4294967295u",
- 4294967295u}));
-
-TEST_F(LexerTest, IntegerTest_UnsignedTooManyDigits) {
- Source::File file("", "10000000000000000000000u");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "integer literal (1000000000...) has too many digits");
-}
-
-struct SignedIntData {
- const char* input;
- int32_t result;
-};
-inline std::ostream& operator<<(std::ostream& out, SignedIntData data) {
- out << std::string(data.input);
- return out;
-}
-using IntegerTest_Signed = testing::TestWithParam<SignedIntData>;
-TEST_P(IntegerTest_Signed, Matches) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(Token::Type::kSintLiteral));
- EXPECT_EQ(t.to_i32(), params.result);
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
-}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- IntegerTest_Signed,
- testing::Values(SignedIntData{"0", 0},
- SignedIntData{"-2", -2},
- SignedIntData{"2", 2},
- SignedIntData{"123", 123},
- SignedIntData{"2147483647", 2147483647},
- SignedIntData{"-2147483648", -2147483648LL}));
-
-TEST_F(LexerTest, IntegerTest_SignedTooManyDigits) {
- Source::File file("", "-10000000000000000");
- Lexer l(&file);
-
- auto t = l.next();
- ASSERT_TRUE(t.Is(Token::Type::kError));
- EXPECT_EQ(t.to_str(), "integer literal (-1000000000...) has too many digits");
-}
-
-using IntegerTest_Invalid = testing::TestWithParam<const char*>;
-TEST_P(IntegerTest_Invalid, Parses) {
- Source::File file("", GetParam());
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_FALSE(t.Is(Token::Type::kSintLiteral));
- EXPECT_FALSE(t.Is(Token::Type::kUintLiteral));
-}
-INSTANTIATE_TEST_SUITE_P(LexerTest,
- IntegerTest_Invalid,
- testing::Values("2147483648",
- "4294967296u",
- "01234",
- "0000",
- "-00",
- "00u"));
+inline std::ostream& operator<<(std::ostream& out, ParseIntegerCase data) {
+ out << std::string(data.input);
+ return out;
+}
+
+using ParseIntegerTest = testing::TestWithParam<std::tuple<char, ParseIntegerCase>>;
+TEST_P(ParseIntegerTest, Parse) {
+ auto suffix = std::get<0>(GetParam());
+ auto params = std::get<1>(GetParam());
+ Source::File file("", params.input);
+
+ auto t = Lexer(&file).next();
+ switch (suffix) {
+ case 'i':
+ EXPECT_TRUE(t.Is(Token::Type::kIntLiteral_I));
+ break;
+ case 'u':
+ EXPECT_TRUE(t.Is(Token::Type::kIntLiteral_U));
+ break;
+ case 0:
+ EXPECT_TRUE(t.Is(Token::Type::kIntLiteral));
+ break;
+ }
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
+ ASSERT_FALSE(t.IsError()) << t.to_str();
+ EXPECT_EQ(t.to_i64(), params.result);
+}
+
+INSTANTIATE_TEST_SUITE_P(Dec_AInt,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('\0'), // No suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0", 0},
+ {"-2", -2},
+ {"2", 2},
+ {"123", 123},
+ {"2147483647", 2147483647},
+ {"-2147483648", -2147483648LL},
+ {"-9223372036854775808", -9223372036854775807LL - 1},
+ })));
+
+INSTANTIATE_TEST_SUITE_P(Dec_u32,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('u'), // Suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0u", 0},
+ {"123u", 123},
+ {"4294967295u", 4294967295ll},
+ })));
+
+INSTANTIATE_TEST_SUITE_P(Dec_i32,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('i'), // Suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0i", 0u},
+ {"-0i", 0u},
+ {"123i", 123},
+ {"-123i", -123},
+ {"2147483647i", 2147483647},
+ {"-2147483647i", -2147483647ll},
+ })));
+
+INSTANTIATE_TEST_SUITE_P(Hex_AInt,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('\0'), // No suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0x0", 0},
+ {"0X0", 0},
+ {"0x42", 66},
+ {"0X42", 66},
+ {"-0x42", -66},
+ {"-0X42", -66},
+ {"0xeF1Abc9", 0xeF1Abc9},
+ {"0XeF1Abc9", 0xeF1Abc9},
+ {"-0xeF1Abc9", -0xeF1Abc9},
+ {"-0XeF1Abc9", -0xeF1Abc9},
+ {"0x80000000", 0x80000000},
+ {"0X80000000", 0X80000000},
+ {"-0x80000000", -0x80000000ll},
+ {"-0X80000000", -0X80000000ll},
+ {"0x7FFFFFFF", 0x7fffffff},
+ {"0X7FFFFFFF", 0x7fffffff},
+ {"0x7fffffff", 0x7fffffff},
+ {"0x7fffffff", 0x7fffffff},
+ {"0x7FfFfFfF", 0x7fffffff},
+ {"0X7FfFfFfF", 0x7fffffff},
+ {"0x7fffffffffffffff", 0x7fffffffffffffffll},
+ {"-0x7fffffffffffffff", -0x7fffffffffffffffll},
+ })));
+
+INSTANTIATE_TEST_SUITE_P(Hex_u32,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('u'), // Suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0x0u", 0},
+ {"0x42u", 66},
+ {"0xeF1Abc9u", 250719177},
+ {"0xFFFFFFFFu", 0xffffffff},
+ {"0XFFFFFFFFu", 0xffffffff},
+ {"0xffffffffu", 0xffffffff},
+ {"0Xffffffffu", 0xffffffff},
+ {"0xfFfFfFfFu", 0xffffffff},
+ {"0XfFfFfFfFu", 0xffffffff},
+ })));
+
+INSTANTIATE_TEST_SUITE_P(Hex_i32,
+ ParseIntegerTest,
+ testing::Combine(testing::Values('i'), // Suffix
+ testing::ValuesIn(std::vector<ParseIntegerCase>{
+ {"0x0i", 0},
+ {"0x42i", 66},
+ {"-0x0i", 0},
+ {"-0x42i", -66},
+ {"0xeF1Abc9i", 250719177},
+ {"-0xeF1Abc9i", -250719177},
+ {"0x7FFFFFFFi", 0x7fffffff},
+ {"-0x7FFFFFFFi", -0x7fffffff},
+ {"0X7FFFFFFFi", 0x7fffffff},
+ {"-0X7FFFFFFFi", -0x7fffffff},
+ {"0x7fffffffi", 0x7fffffff},
+ {"-0x7fffffffi", -0x7fffffff},
+ {"0X7fffffffi", 0x7fffffff},
+ {"-0X7fffffffi", -0x7fffffff},
+ {"0x7FfFfFfFi", 0x7fffffff},
+ {"-0x7FfFfFfFi", -0x7fffffff},
+ {"0X7FfFfFfFi", 0x7fffffff},
+ {"-0X7FfFfFfFi", -0x7fffffff},
+ })));
+////////////////////////////////////////////////////////////////////////////////
+// ParseIntegerTest_CannotBeRepresented
+////////////////////////////////////////////////////////////////////////////////
+using ParseIntegerTest_CannotBeRepresented =
+ testing::TestWithParam<std::tuple<const char*, const char*>>;
+TEST_P(ParseIntegerTest_CannotBeRepresented, Parse) {
+ auto type = std::get<0>(GetParam());
+ auto source = std::get<1>(GetParam());
+ Source::File file("", source);
+ auto t = Lexer(&file).next();
+ EXPECT_TRUE(t.Is(Token::Type::kError));
+ auto expect = "value cannot be represented as '" + std::string(type) + "'";
+ EXPECT_EQ(t.to_str(), expect);
+}
+INSTANTIATE_TEST_SUITE_P(AbstractInt,
+ ParseIntegerTest_CannotBeRepresented,
+ testing::Combine(testing::Values("abstract-int"),
+ testing::Values("9223372036854775808",
+ "0xFFFFFFFFFFFFFFFF",
+ "0xffffffffffffffff",
+ "0x8000000000000000")));
+
+INSTANTIATE_TEST_SUITE_P(i32,
+ ParseIntegerTest_CannotBeRepresented,
+ testing::Combine(testing::Values("i32"), // type
+ testing::Values("2147483648i")));
+
+INSTANTIATE_TEST_SUITE_P(u32,
+ ParseIntegerTest_CannotBeRepresented,
+ testing::Combine(testing::Values("u32"), // type
+ testing::Values("4294967296u", //
+ "-1u")));
+
+////////////////////////////////////////////////////////////////////////////////
+// ParseIntegerTest_LeadingZeros
+////////////////////////////////////////////////////////////////////////////////
+using ParseIntegerTest_LeadingZeros = testing::TestWithParam<const char*>;
+TEST_P(ParseIntegerTest_LeadingZeros, Parse) {
+ Source::File file("", GetParam());
+ auto t = Lexer(&file).next();
+ EXPECT_TRUE(t.Is(Token::Type::kError));
+ EXPECT_EQ(t.to_str(), "integer literal cannot have leading 0s");
+}
+
+INSTANTIATE_TEST_SUITE_P(LeadingZero,
+ ParseIntegerTest_LeadingZeros,
+ testing::Values("01234", "0000", "-00", "00u"));
+
+////////////////////////////////////////////////////////////////////////////////
+// ParseIntegerTest_NoSignificantDigits
+////////////////////////////////////////////////////////////////////////////////
+using ParseIntegerTest_NoSignificantDigits = testing::TestWithParam<const char*>;
+TEST_P(ParseIntegerTest_NoSignificantDigits, Parse) {
+ Source::File file("", GetParam());
+ auto t = Lexer(&file).next();
+ EXPECT_TRUE(t.Is(Token::Type::kError));
+ EXPECT_EQ(t.to_str(), "integer or float hex literal has no significant digits");
+}
+
+INSTANTIATE_TEST_SUITE_P(LeadingZero,
+ ParseIntegerTest_NoSignificantDigits,
+ testing::Values("0x",
+ "0X",
+ "-0x",
+ "-0X",
+ "0xu",
+ "0Xu",
+ "-0xu",
+ "-0Xu",
+ "0xi",
+ "0Xi",
+ "-0xi",
+ "-0Xi"));
struct TokenData {
- const char* input;
- Token::Type type;
+ const char* input;
+ Token::Type type;
};
inline std::ostream& operator<<(std::ostream& out, TokenData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
using PunctuationTest = testing::TestWithParam<TokenData>;
TEST_P(PunctuationTest, Parses) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(params.type));
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
-
- t = l.next();
- EXPECT_EQ(t.source().range.begin.column,
- 1 + std::string(params.input).size());
+ auto params = GetParam();
+ Source::File file("", params.input);
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.Is(params.type));
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
+
+ t = l.next();
+ EXPECT_EQ(t.source().range.begin.column, 1 + std::string(params.input).size());
}
-INSTANTIATE_TEST_SUITE_P(
- LexerTest,
- PunctuationTest,
- testing::Values(TokenData{"&", Token::Type::kAnd},
- TokenData{"&&", Token::Type::kAndAnd},
- TokenData{"->", Token::Type::kArrow},
- TokenData{"@", Token::Type::kAttr},
- TokenData{"/", Token::Type::kForwardSlash},
- TokenData{"!", Token::Type::kBang},
- TokenData{"[", Token::Type::kBracketLeft},
- TokenData{"]", Token::Type::kBracketRight},
- TokenData{"{", Token::Type::kBraceLeft},
- TokenData{"}", Token::Type::kBraceRight},
- TokenData{":", Token::Type::kColon},
- TokenData{",", Token::Type::kComma},
- TokenData{"=", Token::Type::kEqual},
- TokenData{"==", Token::Type::kEqualEqual},
- TokenData{">", Token::Type::kGreaterThan},
- TokenData{">=", Token::Type::kGreaterThanEqual},
- TokenData{">>", Token::Type::kShiftRight},
- TokenData{"<", Token::Type::kLessThan},
- TokenData{"<=", Token::Type::kLessThanEqual},
- TokenData{"<<", Token::Type::kShiftLeft},
- TokenData{"%", Token::Type::kMod},
- TokenData{"!=", Token::Type::kNotEqual},
- TokenData{"-", Token::Type::kMinus},
- TokenData{"--", Token::Type::kMinusMinus},
- TokenData{".", Token::Type::kPeriod},
- TokenData{"+", Token::Type::kPlus},
- TokenData{"++", Token::Type::kPlusPlus},
- TokenData{"|", Token::Type::kOr},
- TokenData{"||", Token::Type::kOrOr},
- TokenData{"(", Token::Type::kParenLeft},
- TokenData{")", Token::Type::kParenRight},
- TokenData{";", Token::Type::kSemicolon},
- TokenData{"*", Token::Type::kStar},
- TokenData{"~", Token::Type::kTilde},
- TokenData{"_", Token::Type::kUnderscore},
- TokenData{"^", Token::Type::kXor},
- TokenData{"+=", Token::Type::kPlusEqual},
- TokenData{"-=", Token::Type::kMinusEqual},
- TokenData{"*=", Token::Type::kTimesEqual},
- TokenData{"/=", Token::Type::kDivisionEqual},
- TokenData{"%=", Token::Type::kModuloEqual},
- TokenData{"&=", Token::Type::kAndEqual},
- TokenData{"|=", Token::Type::kOrEqual},
- TokenData{"^=", Token::Type::kXorEqual}));
+INSTANTIATE_TEST_SUITE_P(LexerTest,
+ PunctuationTest,
+ testing::Values(TokenData{"&", Token::Type::kAnd},
+ TokenData{"&&", Token::Type::kAndAnd},
+ TokenData{"->", Token::Type::kArrow},
+ TokenData{"@", Token::Type::kAttr},
+ TokenData{"/", Token::Type::kForwardSlash},
+ TokenData{"!", Token::Type::kBang},
+ TokenData{"[", Token::Type::kBracketLeft},
+ TokenData{"]", Token::Type::kBracketRight},
+ TokenData{"{", Token::Type::kBraceLeft},
+ TokenData{"}", Token::Type::kBraceRight},
+ TokenData{":", Token::Type::kColon},
+ TokenData{",", Token::Type::kComma},
+ TokenData{"=", Token::Type::kEqual},
+ TokenData{"==", Token::Type::kEqualEqual},
+ TokenData{">", Token::Type::kGreaterThan},
+ TokenData{">=", Token::Type::kGreaterThanEqual},
+ TokenData{">>", Token::Type::kShiftRight},
+ TokenData{"<", Token::Type::kLessThan},
+ TokenData{"<=", Token::Type::kLessThanEqual},
+ TokenData{"<<", Token::Type::kShiftLeft},
+ TokenData{"%", Token::Type::kMod},
+ TokenData{"!=", Token::Type::kNotEqual},
+ TokenData{"-", Token::Type::kMinus},
+ TokenData{"--", Token::Type::kMinusMinus},
+ TokenData{".", Token::Type::kPeriod},
+ TokenData{"+", Token::Type::kPlus},
+ TokenData{"++", Token::Type::kPlusPlus},
+ TokenData{"|", Token::Type::kOr},
+ TokenData{"||", Token::Type::kOrOr},
+ TokenData{"(", Token::Type::kParenLeft},
+ TokenData{")", Token::Type::kParenRight},
+ TokenData{";", Token::Type::kSemicolon},
+ TokenData{"*", Token::Type::kStar},
+ TokenData{"~", Token::Type::kTilde},
+ TokenData{"_", Token::Type::kUnderscore},
+ TokenData{"^", Token::Type::kXor},
+ TokenData{"+=", Token::Type::kPlusEqual},
+ TokenData{"-=", Token::Type::kMinusEqual},
+ TokenData{"*=", Token::Type::kTimesEqual},
+ TokenData{"/=", Token::Type::kDivisionEqual},
+ TokenData{"%=", Token::Type::kModuloEqual},
+ TokenData{"&=", Token::Type::kAndEqual},
+ TokenData{"|=", Token::Type::kOrEqual},
+ TokenData{"^=", Token::Type::kXorEqual}));
using KeywordTest = testing::TestWithParam<TokenData>;
TEST_P(KeywordTest, Parses) {
- auto params = GetParam();
- Source::File file("", params.input);
- Lexer l(&file);
-
- auto t = l.next();
- EXPECT_TRUE(t.Is(params.type)) << params.input;
- EXPECT_EQ(t.source().range.begin.line, 1u);
- EXPECT_EQ(t.source().range.begin.column, 1u);
- EXPECT_EQ(t.source().range.end.line, 1u);
- EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
-
- t = l.next();
- EXPECT_EQ(t.source().range.begin.column,
- 1 + std::string(params.input).size());
+ auto params = GetParam();
+ Source::File file("", params.input);
+ Lexer l(&file);
+
+ auto t = l.next();
+ EXPECT_TRUE(t.Is(params.type)) << params.input;
+ EXPECT_EQ(t.source().range.begin.line, 1u);
+ EXPECT_EQ(t.source().range.begin.column, 1u);
+ EXPECT_EQ(t.source().range.end.line, 1u);
+ EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input));
+
+ t = l.next();
+ EXPECT_EQ(t.source().range.begin.column, 1 + std::string(params.input).size());
}
INSTANTIATE_TEST_SUITE_P(
LexerTest,
KeywordTest,
- testing::Values(
- TokenData{"array", Token::Type::kArray},
- TokenData{"bitcast", Token::Type::kBitcast},
- TokenData{"bool", Token::Type::kBool},
- TokenData{"break", Token::Type::kBreak},
- TokenData{"case", Token::Type::kCase},
- TokenData{"continue", Token::Type::kContinue},
- TokenData{"continuing", Token::Type::kContinuing},
- TokenData{"default", Token::Type::kDefault},
- TokenData{"discard", Token::Type::kDiscard},
- TokenData{"else", Token::Type::kElse},
- TokenData{"f32", Token::Type::kF32},
- TokenData{"fallthrough", Token::Type::kFallthrough},
- TokenData{"false", Token::Type::kFalse},
- TokenData{"fn", Token::Type::kFn},
- TokenData{"for", Token::Type::kFor},
- TokenData{"function", Token::Type::kFunction},
- TokenData{"i32", Token::Type::kI32},
- TokenData{"if", Token::Type::kIf},
- TokenData{"import", Token::Type::kImport},
- TokenData{"let", Token::Type::kLet},
- TokenData{"loop", Token::Type::kLoop},
- TokenData{"mat2x2", Token::Type::kMat2x2},
- TokenData{"mat2x3", Token::Type::kMat2x3},
- TokenData{"mat2x4", Token::Type::kMat2x4},
- TokenData{"mat3x2", Token::Type::kMat3x2},
- TokenData{"mat3x3", Token::Type::kMat3x3},
- TokenData{"mat3x4", Token::Type::kMat3x4},
- TokenData{"mat4x2", Token::Type::kMat4x2},
- TokenData{"mat4x3", Token::Type::kMat4x3},
- TokenData{"mat4x4", Token::Type::kMat4x4},
- TokenData{"override", Token::Type::kOverride},
- TokenData{"private", Token::Type::kPrivate},
- TokenData{"ptr", Token::Type::kPtr},
- TokenData{"return", Token::Type::kReturn},
- TokenData{"sampler", Token::Type::kSampler},
- TokenData{"sampler_comparison", Token::Type::kComparisonSampler},
- TokenData{"storage", Token::Type::kStorage},
- TokenData{"storage_buffer", Token::Type::kStorage},
- TokenData{"struct", Token::Type::kStruct},
- TokenData{"switch", Token::Type::kSwitch},
- TokenData{"texture_1d", Token::Type::kTextureSampled1d},
- TokenData{"texture_2d", Token::Type::kTextureSampled2d},
- TokenData{"texture_2d_array", Token::Type::kTextureSampled2dArray},
- TokenData{"texture_3d", Token::Type::kTextureSampled3d},
- TokenData{"texture_cube", Token::Type::kTextureSampledCube},
- TokenData{"texture_cube_array", Token::Type::kTextureSampledCubeArray},
- TokenData{"texture_depth_2d", Token::Type::kTextureDepth2d},
- TokenData{"texture_depth_2d_array", Token::Type::kTextureDepth2dArray},
- TokenData{"texture_depth_cube", Token::Type::kTextureDepthCube},
- TokenData{"texture_depth_cube_array",
- Token::Type::kTextureDepthCubeArray},
- TokenData{"texture_depth_multisampled_2d",
- Token::Type::kTextureDepthMultisampled2d},
- TokenData{"texture_multisampled_2d",
- Token::Type::kTextureMultisampled2d},
- TokenData{"texture_storage_1d", Token::Type::kTextureStorage1d},
- TokenData{"texture_storage_2d", Token::Type::kTextureStorage2d},
- TokenData{"texture_storage_2d_array",
- Token::Type::kTextureStorage2dArray},
- TokenData{"texture_storage_3d", Token::Type::kTextureStorage3d},
- TokenData{"true", Token::Type::kTrue},
- TokenData{"type", Token::Type::kType},
- TokenData{"u32", Token::Type::kU32},
- TokenData{"uniform", Token::Type::kUniform},
- TokenData{"var", Token::Type::kVar},
- TokenData{"vec2", Token::Type::kVec2},
- TokenData{"vec3", Token::Type::kVec3},
- TokenData{"vec4", Token::Type::kVec4},
- TokenData{"workgroup", Token::Type::kWorkgroup}));
+ testing::Values(TokenData{"array", Token::Type::kArray},
+ TokenData{"bitcast", Token::Type::kBitcast},
+ TokenData{"bool", Token::Type::kBool},
+ TokenData{"break", Token::Type::kBreak},
+ TokenData{"case", Token::Type::kCase},
+ TokenData{"continue", Token::Type::kContinue},
+ TokenData{"continuing", Token::Type::kContinuing},
+ TokenData{"default", Token::Type::kDefault},
+ TokenData{"discard", Token::Type::kDiscard},
+ TokenData{"else", Token::Type::kElse},
+ TokenData{"f32", Token::Type::kF32},
+ TokenData{"fallthrough", Token::Type::kFallthrough},
+ TokenData{"false", Token::Type::kFalse},
+ TokenData{"fn", Token::Type::kFn},
+ TokenData{"for", Token::Type::kFor},
+ TokenData{"function", Token::Type::kFunction},
+ TokenData{"i32", Token::Type::kI32},
+ TokenData{"if", Token::Type::kIf},
+ TokenData{"import", Token::Type::kImport},
+ TokenData{"let", Token::Type::kLet},
+ TokenData{"loop", Token::Type::kLoop},
+ TokenData{"mat2x2", Token::Type::kMat2x2},
+ TokenData{"mat2x3", Token::Type::kMat2x3},
+ TokenData{"mat2x4", Token::Type::kMat2x4},
+ TokenData{"mat3x2", Token::Type::kMat3x2},
+ TokenData{"mat3x3", Token::Type::kMat3x3},
+ TokenData{"mat3x4", Token::Type::kMat3x4},
+ TokenData{"mat4x2", Token::Type::kMat4x2},
+ TokenData{"mat4x3", Token::Type::kMat4x3},
+ TokenData{"mat4x4", Token::Type::kMat4x4},
+ TokenData{"override", Token::Type::kOverride},
+ TokenData{"private", Token::Type::kPrivate},
+ TokenData{"ptr", Token::Type::kPtr},
+ TokenData{"return", Token::Type::kReturn},
+ TokenData{"sampler", Token::Type::kSampler},
+ TokenData{"sampler_comparison", Token::Type::kComparisonSampler},
+ TokenData{"storage", Token::Type::kStorage},
+ TokenData{"storage_buffer", Token::Type::kStorage},
+ TokenData{"struct", Token::Type::kStruct},
+ TokenData{"switch", Token::Type::kSwitch},
+ TokenData{"texture_1d", Token::Type::kTextureSampled1d},
+ TokenData{"texture_2d", Token::Type::kTextureSampled2d},
+ TokenData{"texture_2d_array", Token::Type::kTextureSampled2dArray},
+ TokenData{"texture_3d", Token::Type::kTextureSampled3d},
+ TokenData{"texture_cube", Token::Type::kTextureSampledCube},
+ TokenData{"texture_cube_array", Token::Type::kTextureSampledCubeArray},
+ TokenData{"texture_depth_2d", Token::Type::kTextureDepth2d},
+ TokenData{"texture_depth_2d_array", Token::Type::kTextureDepth2dArray},
+ TokenData{"texture_depth_cube", Token::Type::kTextureDepthCube},
+ TokenData{"texture_depth_cube_array", Token::Type::kTextureDepthCubeArray},
+ TokenData{"texture_depth_multisampled_2d",
+ Token::Type::kTextureDepthMultisampled2d},
+ TokenData{"texture_multisampled_2d", Token::Type::kTextureMultisampled2d},
+ TokenData{"texture_storage_1d", Token::Type::kTextureStorage1d},
+ TokenData{"texture_storage_2d", Token::Type::kTextureStorage2d},
+ TokenData{"texture_storage_2d_array", Token::Type::kTextureStorage2dArray},
+ TokenData{"texture_storage_3d", Token::Type::kTextureStorage3d},
+ TokenData{"true", Token::Type::kTrue},
+ TokenData{"type", Token::Type::kType},
+ TokenData{"u32", Token::Type::kU32},
+ TokenData{"uniform", Token::Type::kUniform},
+ TokenData{"var", Token::Type::kVar},
+ TokenData{"vec2", Token::Type::kVec2},
+ TokenData{"vec3", Token::Type::kVec3},
+ TokenData{"vec4", Token::Type::kVec4},
+ TokenData{"workgroup", Token::Type::kWorkgroup}));
} // namespace
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser.cc
index 70e8c09c0a0..c1ad4a365ca 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser.cc
@@ -21,9 +21,9 @@
namespace tint::reader::wgsl {
Program Parse(Source::File const* file) {
- ParserImpl parser(file);
- parser.Parse();
- return Program(std::move(parser.builder()));
+ ParserImpl parser(file);
+ parser.Parse();
+ return Program(std::move(parser.builder()));
}
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_bench.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_bench.cc
index 471bba00ecc..097accf0c8c 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_bench.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_bench.cc
@@ -20,18 +20,18 @@ namespace tint::reader::wgsl {
namespace {
void ParseWGSL(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadInputFile(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& file = std::get<Source::File>(res);
- for (auto _ : state) {
- auto res = Parse(&file);
- if (res.Diagnostics().contains_errors()) {
- state.SkipWithError(res.Diagnostics().str().c_str());
+ auto res = bench::LoadInputFile(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& file = std::get<Source::File>(res);
+ for (auto _ : state) {
+ auto res = Parse(&file);
+ if (res.Diagnostics().contains_errors()) {
+ state.SkipWithError(res.Diagnostics().str().c_str());
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(ParseWGSL);
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.cc
index b5890dbda38..a28b7987fbc 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.cc
@@ -14,6 +14,8 @@
#include "src/tint/reader/wgsl/parser_impl.h"
+#include <limits>
+
#include "src/tint/ast/array.h"
#include "src/tint/ast/assignment_statement.h"
#include "src/tint/ast/bitcast_expression.h"
@@ -37,10 +39,10 @@
#include "src/tint/ast/vector.h"
#include "src/tint/ast/workgroup_attribute.h"
#include "src/tint/reader/wgsl/lexer.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
namespace tint::reader::wgsl {
namespace {
@@ -69,43 +71,43 @@ const char kWriteAccess[] = "write";
const char kReadWriteAccess[] = "read_write";
ast::Builtin ident_to_builtin(std::string_view str) {
- if (str == "position") {
- return ast::Builtin::kPosition;
- }
- if (str == "vertex_index") {
- return ast::Builtin::kVertexIndex;
- }
- if (str == "instance_index") {
- return ast::Builtin::kInstanceIndex;
- }
- if (str == "front_facing") {
- return ast::Builtin::kFrontFacing;
- }
- if (str == "frag_depth") {
- return ast::Builtin::kFragDepth;
- }
- if (str == "local_invocation_id") {
- return ast::Builtin::kLocalInvocationId;
- }
- if (str == "local_invocation_idx" || str == "local_invocation_index") {
- return ast::Builtin::kLocalInvocationIndex;
- }
- if (str == "global_invocation_id") {
- return ast::Builtin::kGlobalInvocationId;
- }
- if (str == "workgroup_id") {
- return ast::Builtin::kWorkgroupId;
- }
- if (str == "num_workgroups") {
- return ast::Builtin::kNumWorkgroups;
- }
- if (str == "sample_index") {
- return ast::Builtin::kSampleIndex;
- }
- if (str == "sample_mask") {
- return ast::Builtin::kSampleMask;
- }
- return ast::Builtin::kNone;
+ if (str == "position") {
+ return ast::Builtin::kPosition;
+ }
+ if (str == "vertex_index") {
+ return ast::Builtin::kVertexIndex;
+ }
+ if (str == "instance_index") {
+ return ast::Builtin::kInstanceIndex;
+ }
+ if (str == "front_facing") {
+ return ast::Builtin::kFrontFacing;
+ }
+ if (str == "frag_depth") {
+ return ast::Builtin::kFragDepth;
+ }
+ if (str == "local_invocation_id") {
+ return ast::Builtin::kLocalInvocationId;
+ }
+ if (str == "local_invocation_idx" || str == "local_invocation_index") {
+ return ast::Builtin::kLocalInvocationIndex;
+ }
+ if (str == "global_invocation_id") {
+ return ast::Builtin::kGlobalInvocationId;
+ }
+ if (str == "workgroup_id") {
+ return ast::Builtin::kWorkgroupId;
+ }
+ if (str == "num_workgroups") {
+ return ast::Builtin::kNumWorkgroups;
+ }
+ if (str == "sample_index") {
+ return ast::Builtin::kSampleIndex;
+ }
+ if (str == "sample_mask") {
+ return ast::Builtin::kSampleMask;
+ }
+ return ast::Builtin::kNone;
}
const char kBindingAttribute[] = "binding";
@@ -122,70 +124,73 @@ const char kWorkgroupSizeAttribute[] = "workgroup_size";
// https://gpuweb.github.io/gpuweb/wgsl.html#reserved-keywords
bool is_reserved(Token t) {
- return t == "asm" || t == "bf16" || t == "const" || t == "do" ||
- t == "enum" || t == "f16" || t == "f64" || t == "handle" ||
- t == "i8" || t == "i16" || t == "i64" || t == "mat" ||
- t == "premerge" || t == "regardless" || t == "typedef" || t == "u8" ||
- t == "u16" || t == "u64" || t == "unless" || t == "using" ||
- t == "vec" || t == "void" || t == "while";
+ return t == "asm" || t == "bf16" || t == "const" || t == "do" || t == "enum" || t == "f64" ||
+ t == "handle" || t == "i8" || t == "i16" || t == "i64" || t == "mat" ||
+ t == "premerge" || t == "regardless" || t == "typedef" || t == "u8" || t == "u16" ||
+ t == "u64" || t == "unless" || t == "using" || t == "vec" || t == "void" || t == "while";
}
/// Enter-exit counters for block token types.
/// Used by sync_to() to skip over closing block tokens that were opened during
/// the forward scan.
struct BlockCounters {
- int brace = 0; // { }
- int bracket = 0; // [ ]
- int paren = 0; // ( )
-
- /// @return the current enter-exit depth for the given block token type. If
- /// `t` is not a block token type, then 0 is always returned.
- int consume(const Token& t) {
- if (t.Is(Token::Type::kBraceLeft))
- return brace++;
- if (t.Is(Token::Type::kBraceRight))
- return brace--;
- if (t.Is(Token::Type::kBracketLeft))
- return bracket++;
- if (t.Is(Token::Type::kBracketRight))
- return bracket--;
- if (t.Is(Token::Type::kParenLeft))
- return paren++;
- if (t.Is(Token::Type::kParenRight))
- return paren--;
- return 0;
- }
+ int brace = 0; // { }
+ int bracket = 0; // [ ]
+ int paren = 0; // ( )
+
+ /// @return the current enter-exit depth for the given block token type. If
+ /// `t` is not a block token type, then 0 is always returned.
+ int consume(const Token& t) {
+ if (t.Is(Token::Type::kBraceLeft)) {
+ return brace++;
+ }
+ if (t.Is(Token::Type::kBraceRight)) {
+ return brace--;
+ }
+ if (t.Is(Token::Type::kBracketLeft)) {
+ return bracket++;
+ }
+ if (t.Is(Token::Type::kBracketRight)) {
+ return bracket--;
+ }
+ if (t.Is(Token::Type::kParenLeft)) {
+ return paren++;
+ }
+ if (t.Is(Token::Type::kParenRight)) {
+ return paren--;
+ }
+ return 0;
+ }
};
} // namespace
/// RAII helper that combines a Source on construction with the last token's
/// source when implicitly converted to `Source`.
class ParserImpl::MultiTokenSource {
- public:
- /// Constructor that starts with Source at the current peek position
- /// @param parser the parser
- explicit MultiTokenSource(ParserImpl* parser)
- : MultiTokenSource(parser, parser->peek().source().Begin()) {}
-
- /// Constructor that starts with the input `start` Source
- /// @param parser the parser
- /// @param start the start source of the range
- MultiTokenSource(ParserImpl* parser, const Source& start)
- : parser_(parser), start_(start) {}
-
- /// Implicit conversion to Source that returns the combined source from start
- /// to the current last token's source.
- operator Source() const {
- Source end = parser_->last_token().source().End();
- if (end < start_) {
- end = start_;
- }
- return Source::Combine(start_, end);
- }
-
- private:
- ParserImpl* parser_;
- Source start_;
+ public:
+ /// Constructor that starts with Source at the current peek position
+ /// @param parser the parser
+ explicit MultiTokenSource(ParserImpl* parser)
+ : MultiTokenSource(parser, parser->peek().source().Begin()) {}
+
+ /// Constructor that starts with the input `start` Source
+ /// @param parser the parser
+ /// @param start the start source of the range
+ MultiTokenSource(ParserImpl* parser, const Source& start) : parser_(parser), start_(start) {}
+
+ /// Implicit conversion to Source that returns the combined source from start
+ /// to the current last token's source.
+ operator Source() const {
+ Source end = parser_->last_token().source().End();
+ if (end < start_) {
+ end = start_;
+ }
+ return Source::Combine(start_, end);
+ }
+
+ private:
+ ParserImpl* parser_;
+ Source start_;
};
ParserImpl::TypedIdentifier::TypedIdentifier() = default;
@@ -208,16 +213,12 @@ ParserImpl::FunctionHeader::FunctionHeader(Source src,
ast::VariableList p,
const ast::Type* ret_ty,
ast::AttributeList ret_attrs)
- : source(src),
- name(n),
- params(p),
- return_type(ret_ty),
- return_type_attributes(ret_attrs) {}
+ : source(src), name(n), params(p), return_type(ret_ty), return_type_attributes(ret_attrs) {}
ParserImpl::FunctionHeader::~FunctionHeader() = default;
-ParserImpl::FunctionHeader& ParserImpl::FunctionHeader::operator=(
- const FunctionHeader& rhs) = default;
+ParserImpl::FunctionHeader& ParserImpl::FunctionHeader::operator=(const FunctionHeader& rhs) =
+ default;
ParserImpl::VarDeclInfo::VarDeclInfo() = default;
@@ -236,89 +237,158 @@ ParserImpl::VarDeclInfo::VarDeclInfo(Source source_in,
ParserImpl::VarDeclInfo::~VarDeclInfo() = default;
-ParserImpl::ParserImpl(Source::File const* file)
- : lexer_(std::make_unique<Lexer>(file)) {}
+ParserImpl::ParserImpl(Source::File const* file) : lexer_(std::make_unique<Lexer>(file)) {}
ParserImpl::~ParserImpl() = default;
ParserImpl::Failure::Errored ParserImpl::add_error(const Source& source,
std::string_view err,
std::string_view use) {
- std::stringstream msg;
- msg << err;
- if (!use.empty()) {
- msg << " for " << use;
- }
- add_error(source, msg.str());
- return Failure::kErrored;
+ std::stringstream msg;
+ msg << err;
+ if (!use.empty()) {
+ msg << " for " << use;
+ }
+ add_error(source, msg.str());
+ return Failure::kErrored;
}
-ParserImpl::Failure::Errored ParserImpl::add_error(const Token& t,
- const std::string& err) {
- add_error(t.source(), err);
- return Failure::kErrored;
+ParserImpl::Failure::Errored ParserImpl::add_error(const Token& t, const std::string& err) {
+ add_error(t.source(), err);
+ return Failure::kErrored;
}
-ParserImpl::Failure::Errored ParserImpl::add_error(const Source& source,
- const std::string& err) {
- if (silence_errors_ == 0) {
- builder_.Diagnostics().add_error(diag::System::Reader, err, source);
- }
- return Failure::kErrored;
+ParserImpl::Failure::Errored ParserImpl::add_error(const Source& source, const std::string& err) {
+ if (silence_errors_ == 0) {
+ builder_.Diagnostics().add_error(diag::System::Reader, err, source);
+ }
+ return Failure::kErrored;
}
void ParserImpl::deprecated(const Source& source, const std::string& msg) {
- builder_.Diagnostics().add_warning(
- diag::System::Reader, "use of deprecated language feature: " + msg,
- source);
+ builder_.Diagnostics().add_warning(diag::System::Reader,
+ "use of deprecated language feature: " + msg, source);
}
Token ParserImpl::next() {
- if (!token_queue_.empty()) {
- auto t = token_queue_.front();
- token_queue_.pop_front();
- last_token_ = t;
+ if (!token_queue_.empty()) {
+ auto t = token_queue_.front();
+ token_queue_.pop_front();
+ last_token_ = t;
+ return last_token_;
+ }
+ last_token_ = lexer_->next();
return last_token_;
- }
- last_token_ = lexer_->next();
- return last_token_;
}
Token ParserImpl::peek(size_t idx) {
- while (token_queue_.size() < (idx + 1)) {
- token_queue_.push_back(lexer_->next());
- }
- return token_queue_[idx];
+ while (token_queue_.size() < (idx + 1)) {
+ token_queue_.push_back(lexer_->next());
+ }
+ return token_queue_[idx];
}
bool ParserImpl::peek_is(Token::Type tok, size_t idx) {
- return peek(idx).Is(tok);
+ return peek(idx).Is(tok);
}
Token ParserImpl::last_token() const {
- return last_token_;
+ return last_token_;
}
bool ParserImpl::Parse() {
- translation_unit();
- return !has_error();
+ translation_unit();
+ return !has_error();
}
// translation_unit
-// : global_decl* EOF
+// : enable_directive* global_decl* EOF
void ParserImpl::translation_unit() {
- while (continue_parsing()) {
- auto p = peek();
- if (p.IsEof()) {
- break;
+ bool after_global_decl = false;
+ while (continue_parsing()) {
+ auto p = peek();
+ if (p.IsEof()) {
+ break;
+ }
+
+ auto ed = enable_directive();
+ if (ed.matched) {
+ if (after_global_decl) {
+ add_error(p, "enable directives must come before all global declarations");
+ }
+ } else if (ed.errored) {
+ // Found a invalid enable directive.
+ continue;
+ } else {
+ auto gd = global_decl();
+
+ if (gd.matched) {
+ after_global_decl = true;
+ }
+
+ if (!gd.matched && !gd.errored) {
+ add_error(p, "unexpected token");
+ }
+ }
+
+ if (builder_.Diagnostics().error_count() >= max_errors_) {
+ add_error(Source{{}, p.source().file},
+ "stopping after " + std::to_string(max_errors_) + " errors");
+ break;
+ }
+ }
+}
+
+// enable_directive
+// : enable name SEMICLON
+Maybe<bool> ParserImpl::enable_directive() {
+ auto decl = sync(Token::Type::kSemicolon, [&]() -> Maybe<bool> {
+ if (!match(Token::Type::kEnable)) {
+ return Failure::kNoMatch;
+ }
+
+ // Match the extension name.
+ Expect<std::string> name = {""};
+ auto t = peek();
+ if (t.IsIdentifier()) {
+ synchronized_ = true;
+ next();
+ name = {t.to_str(), t.source()};
+ } else if (t.Is(Token::Type::kF16)) {
+ // `f16` is a valid extension name and also a keyword
+ synchronized_ = true;
+ next();
+ name = {"f16", t.source()};
+ } else if (handle_error(t)) {
+ // The token might itself be an error.
+ return Failure::kErrored;
+ } else {
+ // Failed to match an extension name.
+ synchronized_ = false;
+ return add_error(t.source(), "invalid extension name");
+ }
+
+ if (!expect("enable directive", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
+
+ auto extension = ast::ParseExtension(name.value);
+ if (extension == ast::Extension::kNone) {
+ return add_error(name.source, "unsupported extension: '" + name.value + "'");
+ }
+ builder_.AST().AddEnable(create<ast::Enable>(name.source, extension));
+
+ return true;
+ });
+
+ if (decl.errored) {
+ return Failure::kErrored;
}
- expect_global_decl();
- if (builder_.Diagnostics().error_count() >= max_errors_) {
- add_error(Source{{}, p.source().file},
- "stopping after " + std::to_string(max_errors_) + " errors");
- break;
+ if (decl.matched) {
+ return true;
}
- }
+
+ return Failure::kNoMatch;
}
// global_decl
@@ -328,148 +398,159 @@ void ParserImpl::translation_unit() {
// | type_alias SEMICOLON
// | struct_decl
// | function_decl
-Expect<bool> ParserImpl::expect_global_decl() {
- if (match(Token::Type::kSemicolon) || match(Token::Type::kEOF))
- return true;
-
- bool errored = false;
+Maybe<bool> ParserImpl::global_decl() {
+ if (match(Token::Type::kSemicolon) || match(Token::Type::kEOF)) {
+ return true;
+ }
- auto attrs = attribute_list();
- if (attrs.errored)
- errored = true;
- if (!continue_parsing())
- return Failure::kErrored;
+ bool errored = false;
- auto decl = sync(Token::Type::kSemicolon, [&]() -> Maybe<bool> {
- auto gv = global_variable_decl(attrs.value);
- if (gv.errored)
- return Failure::kErrored;
- if (gv.matched) {
- if (!expect("variable declaration", Token::Type::kSemicolon))
+ auto attrs = attribute_list();
+ if (attrs.errored) {
+ errored = true;
+ }
+ if (!continue_parsing()) {
return Failure::kErrored;
-
- builder_.AST().AddGlobalVariable(gv.value);
- return true;
}
- auto gc = global_constant_decl(attrs.value);
- if (gc.errored)
- return Failure::kErrored;
+ auto decl = sync(Token::Type::kSemicolon, [&]() -> Maybe<bool> {
+ auto gv = global_variable_decl(attrs.value);
+ if (gv.errored) {
+ return Failure::kErrored;
+ }
+ if (gv.matched) {
+ if (!expect("variable declaration", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
- if (gc.matched) {
- if (!expect("let declaration", Token::Type::kSemicolon))
- return Failure::kErrored;
+ builder_.AST().AddGlobalVariable(gv.value);
+ return true;
+ }
+
+ auto gc = global_constant_decl(attrs.value);
+ if (gc.errored) {
+ return Failure::kErrored;
+ }
+
+ if (gc.matched) {
+ if (!expect("let declaration", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
+
+ builder_.AST().AddGlobalVariable(gc.value);
+ return true;
+ }
+
+ auto ta = type_alias();
+ if (ta.errored) {
+ return Failure::kErrored;
+ }
- builder_.AST().AddGlobalVariable(gc.value);
- return true;
+ if (ta.matched) {
+ if (!expect("type alias", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
+
+ builder_.AST().AddTypeDecl(ta.value);
+ return true;
+ }
+
+ auto str = struct_decl();
+ if (str.errored) {
+ return Failure::kErrored;
+ }
+
+ if (str.matched) {
+ builder_.AST().AddTypeDecl(str.value);
+ return true;
+ }
+
+ return Failure::kNoMatch;
+ });
+
+ if (decl.errored) {
+ errored = true;
+ }
+ if (decl.matched) {
+ return expect_attributes_consumed(attrs.value);
}
- auto ta = type_alias();
- if (ta.errored)
- return Failure::kErrored;
+ auto func = function_decl(attrs.value);
+ if (func.errored) {
+ errored = true;
+ }
+ if (func.matched) {
+ builder_.AST().AddFunction(func.value);
+ return true;
+ }
- if (ta.matched) {
- if (!expect("type alias", Token::Type::kSemicolon))
+ if (errored) {
return Failure::kErrored;
-
- builder_.AST().AddTypeDecl(ta.value);
- return true;
}
- auto str = struct_decl();
- if (str.errored)
- return Failure::kErrored;
+ // Invalid syntax found - try and determine the best error message
- if (str.matched) {
- builder_.AST().AddTypeDecl(str.value);
- return true;
+ // We have attributes parsed, but nothing to consume them?
+ if (attrs.value.size() > 0) {
+ return add_error(next(), "expected declaration after attributes");
}
- return Failure::kNoMatch;
- });
-
- if (decl.errored) {
- errored = true;
- }
- if (decl.matched) {
- return expect_attributes_consumed(attrs.value);
- }
-
- auto func = function_decl(attrs.value);
- if (func.errored) {
- errored = true;
- }
- if (func.matched) {
- builder_.AST().AddFunction(func.value);
- return true;
- }
-
- if (errored) {
- return Failure::kErrored;
- }
-
- // Invalid syntax found - try and determine the best error message
-
- // We have attributes parsed, but nothing to consume them?
- if (attrs.value.size() > 0) {
- return add_error(next(), "expected declaration after attributes");
- }
-
- // We have a statement outside of a function?
- auto t = peek();
- auto stat = without_error([&] { return statement(); });
- if (stat.matched) {
- // Attempt to jump to the next '}' - the function might have just been
- // missing an opening line.
- sync_to(Token::Type::kBraceRight, true);
- return add_error(t, "statement found outside of function body");
- }
- if (!stat.errored) {
- // No match, no error - the parser might not have progressed.
- // Ensure we always make _some_ forward progress.
- next();
- }
+ // We have a statement outside of a function?
+ auto t = peek();
+ auto stat = without_error([&] { return statement(); });
+ if (stat.matched) {
+ // Attempt to jump to the next '}' - the function might have just been
+ // missing an opening line.
+ sync_to(Token::Type::kBraceRight, true);
+ return add_error(t, "statement found outside of function body");
+ }
+ if (!stat.errored) {
+ // No match, no error - the parser might not have progressed.
+ // Ensure we always make _some_ forward progress.
+ next();
+ }
- // The token might itself be an error.
- if (handle_error(t)) {
- return Failure::kErrored;
- }
+ // The token might itself be an error.
+ if (handle_error(t)) {
+ return Failure::kErrored;
+ }
- // Exhausted all attempts to make sense of where we're at.
- // Spew a generic error.
+ // Exhausted all attempts to make sense of where we're at.
+ // Return a no-match
- return add_error(t, "unexpected token");
+ return Failure::kNoMatch;
}
// global_variable_decl
// : variable_attribute_list* variable_decl
// | variable_attribute_list* variable_decl EQUAL const_expr
-Maybe<const ast::Variable*> ParserImpl::global_variable_decl(
- ast::AttributeList& attrs) {
- auto decl = variable_decl();
- if (decl.errored)
- return Failure::kErrored;
- if (!decl.matched)
- return Failure::kNoMatch;
+Maybe<const ast::Variable*> ParserImpl::global_variable_decl(ast::AttributeList& attrs) {
+ auto decl = variable_decl();
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
+ if (!decl.matched) {
+ return Failure::kNoMatch;
+ }
+
+ const ast::Expression* constructor = nullptr;
+ if (match(Token::Type::kEqual)) {
+ auto expr = expect_const_expr();
+ if (expr.errored) {
+ return Failure::kErrored;
+ }
+ constructor = expr.value;
+ }
- const ast::Expression* constructor = nullptr;
- if (match(Token::Type::kEqual)) {
- auto expr = expect_const_expr();
- if (expr.errored)
- return Failure::kErrored;
- constructor = expr.value;
- }
-
- return create<ast::Variable>(
- decl->source, // source
- builder_.Symbols().Register(decl->name), // symbol
- decl->storage_class, // storage class
- decl->access, // access control
- decl->type, // type
- false, // is_const
- false, // is_overridable
- constructor, // constructor
- std::move(attrs)); // attributes
+ return create<ast::Variable>(decl->source, // source
+ builder_.Symbols().Register(decl->name), // symbol
+ decl->storage_class, // storage class
+ decl->access, // access control
+ decl->type, // type
+ false, // is_const
+ false, // is_overridable
+ constructor, // constructor
+ std::move(attrs)); // attributes
}
// global_constant_decl :
@@ -477,253 +558,268 @@ Maybe<const ast::Variable*> ParserImpl::global_variable_decl(
// | attribute* override (ident | variable_ident_decl) (equal expression)?
// global_const_initializer
// : EQUAL const_expr
-Maybe<const ast::Variable*> ParserImpl::global_constant_decl(
- ast::AttributeList& attrs) {
- bool is_overridable = false;
- const char* use = nullptr;
- if (match(Token::Type::kLet)) {
- use = "let declaration";
- } else if (match(Token::Type::kOverride)) {
- use = "override declaration";
- is_overridable = true;
- } else {
- return Failure::kNoMatch;
- }
+Maybe<const ast::Variable*> ParserImpl::global_constant_decl(ast::AttributeList& attrs) {
+ bool is_overridable = false;
+ const char* use = nullptr;
+ if (match(Token::Type::kLet)) {
+ use = "let declaration";
+ } else if (match(Token::Type::kOverride)) {
+ use = "override declaration";
+ is_overridable = true;
+ } else {
+ return Failure::kNoMatch;
+ }
- auto decl = expect_variable_ident_decl(use, /* allow_inferred = */ true);
- if (decl.errored)
- return Failure::kErrored;
+ auto decl = expect_variable_ident_decl(use, /* allow_inferred = */ true);
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
- const ast::Expression* initializer = nullptr;
- if (match(Token::Type::kEqual)) {
- auto init = expect_const_expr();
- if (init.errored) {
- return Failure::kErrored;
+ const ast::Expression* initializer = nullptr;
+ if (match(Token::Type::kEqual)) {
+ auto init = expect_const_expr();
+ if (init.errored) {
+ return Failure::kErrored;
+ }
+ initializer = std::move(init.value);
}
- initializer = std::move(init.value);
- }
- return create<ast::Variable>(
- decl->source, // source
- builder_.Symbols().Register(decl->name), // symbol
- ast::StorageClass::kNone, // storage class
- ast::Access::kUndefined, // access control
- decl->type, // type
- true, // is_const
- is_overridable, // is_overridable
- initializer, // constructor
- std::move(attrs)); // attributes
+ return create<ast::Variable>(decl->source, // source
+ builder_.Symbols().Register(decl->name), // symbol
+ ast::StorageClass::kNone, // storage class
+ ast::Access::kUndefined, // access control
+ decl->type, // type
+ true, // is_const
+ is_overridable, // is_overridable
+ initializer, // constructor
+ std::move(attrs)); // attributes
}
// variable_decl
// : VAR variable_qualifier? variable_ident_decl
Maybe<ParserImpl::VarDeclInfo> ParserImpl::variable_decl(bool allow_inferred) {
- Source source;
- if (!match(Token::Type::kVar, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kVar, &source)) {
+ return Failure::kNoMatch;
+ }
- VariableQualifier vq;
- auto explicit_vq = variable_qualifier();
- if (explicit_vq.errored)
- return Failure::kErrored;
- if (explicit_vq.matched) {
- vq = explicit_vq.value;
- }
+ VariableQualifier vq;
+ auto explicit_vq = variable_qualifier();
+ if (explicit_vq.errored) {
+ return Failure::kErrored;
+ }
+ if (explicit_vq.matched) {
+ vq = explicit_vq.value;
+ }
- auto decl =
- expect_variable_ident_decl("variable declaration", allow_inferred);
- if (decl.errored)
- return Failure::kErrored;
+ auto decl = expect_variable_ident_decl("variable declaration", allow_inferred);
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
- return VarDeclInfo{decl->source, decl->name, vq.storage_class, vq.access,
- decl->type};
+ return VarDeclInfo{decl->source, decl->name, vq.storage_class, vq.access, decl->type};
}
-// texture_sampler_types
-// : sampler_type
-// | depth_texture_type
-// | sampled_texture_type LESS_THAN type_decl GREATER_THAN
-// | multisampled_texture_type LESS_THAN type_decl GREATER_THAN
-// | storage_texture_type LESS_THAN texel_format
+// texture_samplers
+// : sampler
+// | depth_texture
+// | sampled_texture LESS_THAN type_decl GREATER_THAN
+// | multisampled_texture LESS_THAN type_decl GREATER_THAN
+// | storage_texture LESS_THAN texel_format
// COMMA access GREATER_THAN
-Maybe<const ast::Type*> ParserImpl::texture_sampler_types() {
- auto type = sampler_type();
- if (type.matched)
- return type;
+Maybe<const ast::Type*> ParserImpl::texture_samplers() {
+ auto type = sampler();
+ if (type.matched) {
+ return type;
+ }
- type = depth_texture_type();
- if (type.matched)
- return type;
+ type = depth_texture();
+ if (type.matched) {
+ return type;
+ }
- type = external_texture_type();
- if (type.matched)
- return type.value;
+ type = external_texture();
+ if (type.matched) {
+ return type.value;
+ }
- auto source_range = make_source_range();
+ auto source_range = make_source_range();
- auto dim = sampled_texture_type();
- if (dim.matched) {
- const char* use = "sampled texture type";
+ auto dim = sampled_texture();
+ if (dim.matched) {
+ const char* use = "sampled texture type";
- auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (subtype.errored)
- return Failure::kErrored;
+ auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (subtype.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.sampled_texture(source_range, dim.value, subtype.value);
- }
+ return builder_.ty.sampled_texture(source_range, dim.value, subtype.value);
+ }
- auto ms_dim = multisampled_texture_type();
- if (ms_dim.matched) {
- const char* use = "multisampled texture type";
+ auto ms_dim = multisampled_texture();
+ if (ms_dim.matched) {
+ const char* use = "multisampled texture type";
- auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (subtype.errored)
- return Failure::kErrored;
+ auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (subtype.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.multisampled_texture(source_range, ms_dim.value,
- subtype.value);
- }
+ return builder_.ty.multisampled_texture(source_range, ms_dim.value, subtype.value);
+ }
- auto storage = storage_texture_type();
- if (storage.matched) {
- const char* use = "storage texture type";
- using StorageTextureInfo =
- std::pair<tint::ast::TexelFormat, tint::ast::Access>;
- auto params = expect_lt_gt_block(use, [&]() -> Expect<StorageTextureInfo> {
- auto format = expect_texel_format(use);
- if (format.errored) {
- return Failure::kErrored;
- }
+ auto storage = storage_texture();
+ if (storage.matched) {
+ const char* use = "storage texture type";
+ using StorageTextureInfo = std::pair<tint::ast::TexelFormat, tint::ast::Access>;
+ auto params = expect_lt_gt_block(use, [&]() -> Expect<StorageTextureInfo> {
+ auto format = expect_texel_format(use);
+ if (format.errored) {
+ return Failure::kErrored;
+ }
- if (!expect("access control", Token::Type::kComma)) {
- return Failure::kErrored;
- }
+ if (!expect("access control", Token::Type::kComma)) {
+ return Failure::kErrored;
+ }
- auto access = expect_access("access control");
- if (access.errored) {
- return Failure::kErrored;
- }
+ auto access = expect_access("access control");
+ if (access.errored) {
+ return Failure::kErrored;
+ }
- return std::make_pair(format.value, access.value);
- });
+ return std::make_pair(format.value, access.value);
+ });
- if (params.errored) {
- return Failure::kErrored;
- }
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.storage_texture(source_range, storage.value,
- params->first, params->second);
- }
+ return builder_.ty.storage_texture(source_range, storage.value, params->first,
+ params->second);
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// sampler_type
+// sampler
// : SAMPLER
// | SAMPLER_COMPARISON
-Maybe<const ast::Type*> ParserImpl::sampler_type() {
- Source source;
- if (match(Token::Type::kSampler, &source))
- return builder_.ty.sampler(source, ast::SamplerKind::kSampler);
+Maybe<const ast::Type*> ParserImpl::sampler() {
+ Source source;
+ if (match(Token::Type::kSampler, &source)) {
+ return builder_.ty.sampler(source, ast::SamplerKind::kSampler);
+ }
- if (match(Token::Type::kComparisonSampler, &source))
- return builder_.ty.sampler(source, ast::SamplerKind::kComparisonSampler);
+ if (match(Token::Type::kComparisonSampler, &source)) {
+ return builder_.ty.sampler(source, ast::SamplerKind::kComparisonSampler);
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// sampled_texture_type
+// sampled_texture
// : TEXTURE_SAMPLED_1D
// | TEXTURE_SAMPLED_2D
// | TEXTURE_SAMPLED_2D_ARRAY
// | TEXTURE_SAMPLED_3D
// | TEXTURE_SAMPLED_CUBE
// | TEXTURE_SAMPLED_CUBE_ARRAY
-Maybe<const ast::TextureDimension> ParserImpl::sampled_texture_type() {
- if (match(Token::Type::kTextureSampled1d))
- return ast::TextureDimension::k1d;
+Maybe<const ast::TextureDimension> ParserImpl::sampled_texture() {
+ if (match(Token::Type::kTextureSampled1d)) {
+ return ast::TextureDimension::k1d;
+ }
- if (match(Token::Type::kTextureSampled2d))
- return ast::TextureDimension::k2d;
+ if (match(Token::Type::kTextureSampled2d)) {
+ return ast::TextureDimension::k2d;
+ }
- if (match(Token::Type::kTextureSampled2dArray))
- return ast::TextureDimension::k2dArray;
+ if (match(Token::Type::kTextureSampled2dArray)) {
+ return ast::TextureDimension::k2dArray;
+ }
- if (match(Token::Type::kTextureSampled3d))
- return ast::TextureDimension::k3d;
+ if (match(Token::Type::kTextureSampled3d)) {
+ return ast::TextureDimension::k3d;
+ }
- if (match(Token::Type::kTextureSampledCube))
- return ast::TextureDimension::kCube;
+ if (match(Token::Type::kTextureSampledCube)) {
+ return ast::TextureDimension::kCube;
+ }
- if (match(Token::Type::kTextureSampledCubeArray))
- return ast::TextureDimension::kCubeArray;
+ if (match(Token::Type::kTextureSampledCubeArray)) {
+ return ast::TextureDimension::kCubeArray;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// external_texture_type
+// external_texture
// : TEXTURE_EXTERNAL
-Maybe<const ast::Type*> ParserImpl::external_texture_type() {
- Source source;
- if (match(Token::Type::kTextureExternal, &source)) {
- return builder_.ty.external_texture(source);
- }
+Maybe<const ast::Type*> ParserImpl::external_texture() {
+ Source source;
+ if (match(Token::Type::kTextureExternal, &source)) {
+ return builder_.ty.external_texture(source);
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// multisampled_texture_type
+// multisampled_texture
// : TEXTURE_MULTISAMPLED_2D
-Maybe<const ast::TextureDimension> ParserImpl::multisampled_texture_type() {
- if (match(Token::Type::kTextureMultisampled2d))
- return ast::TextureDimension::k2d;
+Maybe<const ast::TextureDimension> ParserImpl::multisampled_texture() {
+ if (match(Token::Type::kTextureMultisampled2d)) {
+ return ast::TextureDimension::k2d;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// storage_texture_type
+// storage_texture
// : TEXTURE_STORAGE_1D
// | TEXTURE_STORAGE_2D
// | TEXTURE_STORAGE_2D_ARRAY
// | TEXTURE_STORAGE_3D
-Maybe<const ast::TextureDimension> ParserImpl::storage_texture_type() {
- if (match(Token::Type::kTextureStorage1d))
- return ast::TextureDimension::k1d;
- if (match(Token::Type::kTextureStorage2d))
- return ast::TextureDimension::k2d;
- if (match(Token::Type::kTextureStorage2dArray))
- return ast::TextureDimension::k2dArray;
- if (match(Token::Type::kTextureStorage3d))
- return ast::TextureDimension::k3d;
+Maybe<const ast::TextureDimension> ParserImpl::storage_texture() {
+ if (match(Token::Type::kTextureStorage1d)) {
+ return ast::TextureDimension::k1d;
+ }
+ if (match(Token::Type::kTextureStorage2d)) {
+ return ast::TextureDimension::k2d;
+ }
+ if (match(Token::Type::kTextureStorage2dArray)) {
+ return ast::TextureDimension::k2dArray;
+ }
+ if (match(Token::Type::kTextureStorage3d)) {
+ return ast::TextureDimension::k3d;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
-// depth_texture_type
+// depth_texture
// : TEXTURE_DEPTH_2D
// | TEXTURE_DEPTH_2D_ARRAY
// | TEXTURE_DEPTH_CUBE
// | TEXTURE_DEPTH_CUBE_ARRAY
// | TEXTURE_DEPTH_MULTISAMPLED_2D
-Maybe<const ast::Type*> ParserImpl::depth_texture_type() {
- Source source;
- if (match(Token::Type::kTextureDepth2d, &source)) {
- return builder_.ty.depth_texture(source, ast::TextureDimension::k2d);
- }
- if (match(Token::Type::kTextureDepth2dArray, &source)) {
- return builder_.ty.depth_texture(source, ast::TextureDimension::k2dArray);
- }
- if (match(Token::Type::kTextureDepthCube, &source)) {
- return builder_.ty.depth_texture(source, ast::TextureDimension::kCube);
- }
- if (match(Token::Type::kTextureDepthCubeArray, &source)) {
- return builder_.ty.depth_texture(source, ast::TextureDimension::kCubeArray);
- }
- if (match(Token::Type::kTextureDepthMultisampled2d, &source)) {
- return builder_.ty.depth_multisampled_texture(source,
- ast::TextureDimension::k2d);
- }
- return Failure::kNoMatch;
+Maybe<const ast::Type*> ParserImpl::depth_texture() {
+ Source source;
+ if (match(Token::Type::kTextureDepth2d, &source)) {
+ return builder_.ty.depth_texture(source, ast::TextureDimension::k2d);
+ }
+ if (match(Token::Type::kTextureDepth2dArray, &source)) {
+ return builder_.ty.depth_texture(source, ast::TextureDimension::k2dArray);
+ }
+ if (match(Token::Type::kTextureDepthCube, &source)) {
+ return builder_.ty.depth_texture(source, ast::TextureDimension::kCube);
+ }
+ if (match(Token::Type::kTextureDepthCubeArray, &source)) {
+ return builder_.ty.depth_texture(source, ast::TextureDimension::kCubeArray);
+ }
+ if (match(Token::Type::kTextureDepthMultisampled2d, &source)) {
+ return builder_.ty.depth_multisampled_texture(source, ast::TextureDimension::k2d);
+ }
+ return Failure::kNoMatch;
}
// texel_format
@@ -744,155 +840,166 @@ Maybe<const ast::Type*> ParserImpl::depth_texture_type() {
// | 'rgba32sint'
// | 'rgba32float'
Expect<ast::TexelFormat> ParserImpl::expect_texel_format(std::string_view use) {
- auto t = next();
- if (t == "rgba8unorm") {
- return ast::TexelFormat::kRgba8Unorm;
- }
- if (t == "rgba8snorm") {
- return ast::TexelFormat::kRgba8Snorm;
- }
- if (t == "rgba8uint") {
- return ast::TexelFormat::kRgba8Uint;
- }
- if (t == "rgba8sint") {
- return ast::TexelFormat::kRgba8Sint;
- }
- if (t == "rgba16uint") {
- return ast::TexelFormat::kRgba16Uint;
- }
- if (t == "rgba16sint") {
- return ast::TexelFormat::kRgba16Sint;
- }
- if (t == "rgba16float") {
- return ast::TexelFormat::kRgba16Float;
- }
- if (t == "r32uint") {
- return ast::TexelFormat::kR32Uint;
- }
- if (t == "r32sint") {
- return ast::TexelFormat::kR32Sint;
- }
- if (t == "r32float") {
- return ast::TexelFormat::kR32Float;
- }
- if (t == "rg32uint") {
- return ast::TexelFormat::kRg32Uint;
- }
- if (t == "rg32sint") {
- return ast::TexelFormat::kRg32Sint;
- }
- if (t == "rg32float") {
- return ast::TexelFormat::kRg32Float;
- }
- if (t == "rgba32uint") {
- return ast::TexelFormat::kRgba32Uint;
- }
- if (t == "rgba32sint") {
- return ast::TexelFormat::kRgba32Sint;
- }
- if (t == "rgba32float") {
- return ast::TexelFormat::kRgba32Float;
- }
- return add_error(t.source(), "invalid format", use);
+ auto t = next();
+ if (t == "rgba8unorm") {
+ return ast::TexelFormat::kRgba8Unorm;
+ }
+ if (t == "rgba8snorm") {
+ return ast::TexelFormat::kRgba8Snorm;
+ }
+ if (t == "rgba8uint") {
+ return ast::TexelFormat::kRgba8Uint;
+ }
+ if (t == "rgba8sint") {
+ return ast::TexelFormat::kRgba8Sint;
+ }
+ if (t == "rgba16uint") {
+ return ast::TexelFormat::kRgba16Uint;
+ }
+ if (t == "rgba16sint") {
+ return ast::TexelFormat::kRgba16Sint;
+ }
+ if (t == "rgba16float") {
+ return ast::TexelFormat::kRgba16Float;
+ }
+ if (t == "r32uint") {
+ return ast::TexelFormat::kR32Uint;
+ }
+ if (t == "r32sint") {
+ return ast::TexelFormat::kR32Sint;
+ }
+ if (t == "r32float") {
+ return ast::TexelFormat::kR32Float;
+ }
+ if (t == "rg32uint") {
+ return ast::TexelFormat::kRg32Uint;
+ }
+ if (t == "rg32sint") {
+ return ast::TexelFormat::kRg32Sint;
+ }
+ if (t == "rg32float") {
+ return ast::TexelFormat::kRg32Float;
+ }
+ if (t == "rgba32uint") {
+ return ast::TexelFormat::kRgba32Uint;
+ }
+ if (t == "rgba32sint") {
+ return ast::TexelFormat::kRgba32Sint;
+ }
+ if (t == "rgba32float") {
+ return ast::TexelFormat::kRgba32Float;
+ }
+ return add_error(t.source(), "invalid format", use);
}
// variable_ident_decl
// : IDENT COLON type_decl
-Expect<ParserImpl::TypedIdentifier> ParserImpl::expect_variable_ident_decl(
- std::string_view use,
- bool allow_inferred) {
- auto ident = expect_ident(use);
- if (ident.errored)
- return Failure::kErrored;
+Expect<ParserImpl::TypedIdentifier> ParserImpl::expect_variable_ident_decl(std::string_view use,
+ bool allow_inferred) {
+ auto ident = expect_ident(use);
+ if (ident.errored) {
+ return Failure::kErrored;
+ }
- if (allow_inferred && !peek_is(Token::Type::kColon)) {
- return TypedIdentifier{nullptr, ident.value, ident.source};
- }
+ if (allow_inferred && !peek_is(Token::Type::kColon)) {
+ return TypedIdentifier{nullptr, ident.value, ident.source};
+ }
- if (!expect(use, Token::Type::kColon))
- return Failure::kErrored;
+ if (!expect(use, Token::Type::kColon)) {
+ return Failure::kErrored;
+ }
- auto t = peek();
- auto type = type_decl();
- if (type.errored)
- return Failure::kErrored;
- if (!type.matched)
- return add_error(t.source(), "invalid type", use);
+ auto t = peek();
+ auto type = type_decl();
+ if (type.errored) {
+ return Failure::kErrored;
+ }
+ if (!type.matched) {
+ return add_error(t.source(), "invalid type", use);
+ }
- return TypedIdentifier{type.value, ident.value, ident.source};
+ return TypedIdentifier{type.value, ident.value, ident.source};
}
Expect<ast::Access> ParserImpl::expect_access(std::string_view use) {
- auto ident = expect_ident(use);
- if (ident.errored)
- return Failure::kErrored;
+ auto ident = expect_ident(use);
+ if (ident.errored) {
+ return Failure::kErrored;
+ }
- if (ident.value == kReadAccess)
- return {ast::Access::kRead, ident.source};
- if (ident.value == kWriteAccess)
- return {ast::Access::kWrite, ident.source};
- if (ident.value == kReadWriteAccess)
- return {ast::Access::kReadWrite, ident.source};
+ if (ident.value == kReadAccess) {
+ return {ast::Access::kRead, ident.source};
+ }
+ if (ident.value == kWriteAccess) {
+ return {ast::Access::kWrite, ident.source};
+ }
+ if (ident.value == kReadWriteAccess) {
+ return {ast::Access::kReadWrite, ident.source};
+ }
- return add_error(ident.source, "invalid value for access control");
+ return add_error(ident.source, "invalid value for access control");
}
// variable_qualifier
// : LESS_THAN storage_class (COMMA access_mode)? GREATER_THAN
Maybe<ParserImpl::VariableQualifier> ParserImpl::variable_qualifier() {
- if (!peek_is(Token::Type::kLessThan)) {
- return Failure::kNoMatch;
- }
-
- auto* use = "variable declaration";
- auto vq = expect_lt_gt_block(use, [&]() -> Expect<VariableQualifier> {
- auto source = make_source_range();
- auto sc = expect_storage_class(use);
- if (sc.errored) {
- return Failure::kErrored;
+ if (!peek_is(Token::Type::kLessThan)) {
+ return Failure::kNoMatch;
}
- if (match(Token::Type::kComma)) {
- auto ac = expect_access(use);
- if (ac.errored) {
+
+ auto* use = "variable declaration";
+ auto vq = expect_lt_gt_block(use, [&]() -> Expect<VariableQualifier> {
+ auto source = make_source_range();
+ auto sc = expect_storage_class(use);
+ if (sc.errored) {
+ return Failure::kErrored;
+ }
+ if (match(Token::Type::kComma)) {
+ auto ac = expect_access(use);
+ if (ac.errored) {
+ return Failure::kErrored;
+ }
+ return VariableQualifier{sc.value, ac.value};
+ }
+ return Expect<VariableQualifier>{VariableQualifier{sc.value, ast::Access::kUndefined},
+ source};
+ });
+
+ if (vq.errored) {
return Failure::kErrored;
- }
- return VariableQualifier{sc.value, ac.value};
}
- return Expect<VariableQualifier>{
- VariableQualifier{sc.value, ast::Access::kUndefined}, source};
- });
- if (vq.errored) {
- return Failure::kErrored;
- }
-
- return vq;
+ return vq;
}
// type_alias
// : TYPE IDENT EQUAL type_decl
Maybe<const ast::Alias*> ParserImpl::type_alias() {
- if (!peek_is(Token::Type::kType))
- return Failure::kNoMatch;
+ if (!peek_is(Token::Type::kType)) {
+ return Failure::kNoMatch;
+ }
- auto t = next();
- const char* use = "type alias";
+ auto t = next();
+ const char* use = "type alias";
- auto name = expect_ident(use);
- if (name.errored)
- return Failure::kErrored;
+ auto name = expect_ident(use);
+ if (name.errored) {
+ return Failure::kErrored;
+ }
- if (!expect(use, Token::Type::kEqual))
- return Failure::kErrored;
+ if (!expect(use, Token::Type::kEqual)) {
+ return Failure::kErrored;
+ }
- auto type = type_decl();
- if (type.errored)
- return Failure::kErrored;
- if (!type.matched)
- return add_error(peek(), "invalid type alias");
+ auto type = type_decl();
+ if (type.errored) {
+ return Failure::kErrored;
+ }
+ if (!type.matched) {
+ return add_error(peek(), "invalid type alias");
+ }
- return builder_.ty.alias(make_source_range_from(t.source()), name.value,
- type.value);
+ return builder_.ty.alias(make_source_range_from(t.source()), name.value, type.value);
}
// type_decl
@@ -918,198 +1025,208 @@ Maybe<const ast::Alias*> ParserImpl::type_alias() {
// | MAT4x2 LESS_THAN type_decl GREATER_THAN
// | MAT4x3 LESS_THAN type_decl GREATER_THAN
// | MAT4x4 LESS_THAN type_decl GREATER_THAN
-// | texture_sampler_types
+// | texture_samplers
Maybe<const ast::Type*> ParserImpl::type_decl() {
- auto t = peek();
- Source source;
- if (match(Token::Type::kIdentifier, &source)) {
- return builder_.create<ast::TypeName>(
- source, builder_.Symbols().Register(t.to_str()));
- }
+ auto t = peek();
+ Source source;
+ if (match(Token::Type::kIdentifier, &source)) {
+ return builder_.create<ast::TypeName>(source, builder_.Symbols().Register(t.to_str()));
+ }
+
+ if (match(Token::Type::kBool, &source)) {
+ return builder_.ty.bool_(source);
+ }
- if (match(Token::Type::kBool, &source))
- return builder_.ty.bool_(source);
+ if (match(Token::Type::kF16, &source)) {
+ return builder_.ty.f16(source);
+ }
- if (match(Token::Type::kF32, &source))
- return builder_.ty.f32(source);
+ if (match(Token::Type::kF32, &source)) {
+ return builder_.ty.f32(source);
+ }
- if (match(Token::Type::kI32, &source))
- return builder_.ty.i32(source);
+ if (match(Token::Type::kI32, &source)) {
+ return builder_.ty.i32(source);
+ }
- if (match(Token::Type::kU32, &source))
- return builder_.ty.u32(source);
+ if (match(Token::Type::kU32, &source)) {
+ return builder_.ty.u32(source);
+ }
- if (t.IsVector()) {
- next(); // Consume the peek
- return expect_type_decl_vector(t);
- }
+ if (t.IsVector()) {
+ next(); // Consume the peek
+ return expect_type_decl_vector(t);
+ }
- if (match(Token::Type::kPtr)) {
- return expect_type_decl_pointer(t);
- }
+ if (match(Token::Type::kPtr)) {
+ return expect_type_decl_pointer(t);
+ }
- if (match(Token::Type::kAtomic)) {
- return expect_type_decl_atomic(t);
- }
+ if (match(Token::Type::kAtomic)) {
+ return expect_type_decl_atomic(t);
+ }
- if (match(Token::Type::kArray, &source)) {
- return expect_type_decl_array(t);
- }
+ if (match(Token::Type::kArray, &source)) {
+ return expect_type_decl_array(t);
+ }
- if (t.IsMatrix()) {
- next(); // Consume the peek
- return expect_type_decl_matrix(t);
- }
+ if (t.IsMatrix()) {
+ next(); // Consume the peek
+ return expect_type_decl_matrix(t);
+ }
- auto texture_or_sampler = texture_sampler_types();
- if (texture_or_sampler.errored)
- return Failure::kErrored;
- if (texture_or_sampler.matched)
- return texture_or_sampler;
+ auto texture_or_sampler = texture_samplers();
+ if (texture_or_sampler.errored) {
+ return Failure::kErrored;
+ }
+ if (texture_or_sampler.matched) {
+ return texture_or_sampler;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
Expect<const ast::Type*> ParserImpl::expect_type(std::string_view use) {
- auto type = type_decl();
- if (type.errored)
- return Failure::kErrored;
- if (!type.matched)
- return add_error(peek().source(), "invalid type", use);
- return type.value;
+ auto type = type_decl();
+ if (type.errored) {
+ return Failure::kErrored;
+ }
+ if (!type.matched) {
+ return add_error(peek().source(), "invalid type", use);
+ }
+ return type.value;
}
Expect<const ast::Type*> ParserImpl::expect_type_decl_pointer(Token t) {
- const char* use = "ptr declaration";
+ const char* use = "ptr declaration";
- auto storage_class = ast::StorageClass::kNone;
- auto access = ast::Access::kUndefined;
+ auto storage_class = ast::StorageClass::kNone;
+ auto access = ast::Access::kUndefined;
- auto subtype = expect_lt_gt_block(use, [&]() -> Expect<const ast::Type*> {
- auto sc = expect_storage_class(use);
- if (sc.errored) {
- return Failure::kErrored;
- }
- storage_class = sc.value;
+ auto subtype = expect_lt_gt_block(use, [&]() -> Expect<const ast::Type*> {
+ auto sc = expect_storage_class(use);
+ if (sc.errored) {
+ return Failure::kErrored;
+ }
+ storage_class = sc.value;
- if (!expect(use, Token::Type::kComma)) {
- return Failure::kErrored;
- }
+ if (!expect(use, Token::Type::kComma)) {
+ return Failure::kErrored;
+ }
- auto type = expect_type(use);
- if (type.errored) {
- return Failure::kErrored;
- }
+ auto type = expect_type(use);
+ if (type.errored) {
+ return Failure::kErrored;
+ }
- if (match(Token::Type::kComma)) {
- auto ac = expect_access("access control");
- if (ac.errored) {
- return Failure::kErrored;
- }
- access = ac.value;
- }
+ if (match(Token::Type::kComma)) {
+ auto ac = expect_access("access control");
+ if (ac.errored) {
+ return Failure::kErrored;
+ }
+ access = ac.value;
+ }
- return type.value;
- });
+ return type.value;
+ });
- if (subtype.errored) {
- return Failure::kErrored;
- }
+ if (subtype.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.pointer(make_source_range_from(t.source()), subtype.value,
- storage_class, access);
+ return builder_.ty.pointer(make_source_range_from(t.source()), subtype.value, storage_class,
+ access);
}
Expect<const ast::Type*> ParserImpl::expect_type_decl_atomic(Token t) {
- const char* use = "atomic declaration";
+ const char* use = "atomic declaration";
- auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (subtype.errored) {
- return Failure::kErrored;
- }
+ auto subtype = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (subtype.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.atomic(make_source_range_from(t.source()), subtype.value);
+ return builder_.ty.atomic(make_source_range_from(t.source()), subtype.value);
}
Expect<const ast::Type*> ParserImpl::expect_type_decl_vector(Token t) {
- uint32_t count = 2;
- if (t.Is(Token::Type::kVec3)) {
- count = 3;
- } else if (t.Is(Token::Type::kVec4)) {
- count = 4;
- }
+ uint32_t count = 2;
+ if (t.Is(Token::Type::kVec3)) {
+ count = 3;
+ } else if (t.Is(Token::Type::kVec4)) {
+ count = 4;
+ }
- const ast::Type* subtype = nullptr;
- if (peek_is(Token::Type::kLessThan)) {
- const char* use = "vector";
- auto ty = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (ty.errored) {
- return Failure::kErrored;
+ const ast::Type* subtype = nullptr;
+ if (peek_is(Token::Type::kLessThan)) {
+ const char* use = "vector";
+ auto ty = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (ty.errored) {
+ return Failure::kErrored;
+ }
+ subtype = ty.value;
}
- subtype = ty.value;
- }
- return builder_.ty.vec(make_source_range_from(t.source()), subtype, count);
+ return builder_.ty.vec(make_source_range_from(t.source()), subtype, count);
}
Expect<const ast::Type*> ParserImpl::expect_type_decl_array(Token t) {
- const char* use = "array declaration";
+ const char* use = "array declaration";
- const ast::Expression* size = nullptr;
+ const ast::Expression* size = nullptr;
- auto subtype = expect_lt_gt_block(use, [&]() -> Expect<const ast::Type*> {
- auto type = expect_type(use);
- if (type.errored)
- return Failure::kErrored;
+ auto subtype = expect_lt_gt_block(use, [&]() -> Expect<const ast::Type*> {
+ auto type = expect_type(use);
+ if (type.errored) {
+ return Failure::kErrored;
+ }
- if (match(Token::Type::kComma)) {
- auto expr = primary_expression();
- if (expr.errored) {
- return Failure::kErrored;
- } else if (!expr.matched) {
- return add_error(peek(), "expected array size expression");
- }
+ if (match(Token::Type::kComma)) {
+ auto expr = primary_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ } else if (!expr.matched) {
+ return add_error(peek(), "expected array size expression");
+ }
- size = std::move(expr.value);
- }
+ size = std::move(expr.value);
+ }
- return type.value;
- });
+ return type.value;
+ });
- if (subtype.errored) {
- return Failure::kErrored;
- }
+ if (subtype.errored) {
+ return Failure::kErrored;
+ }
- return builder_.ty.array(make_source_range_from(t.source()), subtype.value,
- size);
+ return builder_.ty.array(make_source_range_from(t.source()), subtype.value, size);
}
Expect<const ast::Type*> ParserImpl::expect_type_decl_matrix(Token t) {
- uint32_t rows = 2;
- uint32_t columns = 2;
- if (t.IsMat3xN()) {
- columns = 3;
- } else if (t.IsMat4xN()) {
- columns = 4;
- }
- if (t.IsMatNx3()) {
- rows = 3;
- } else if (t.IsMatNx4()) {
- rows = 4;
- }
-
- const ast::Type* subtype = nullptr;
- if (peek_is(Token::Type::kLessThan)) {
- const char* use = "matrix";
- auto ty = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (ty.errored) {
- return Failure::kErrored;
- }
- subtype = ty.value;
- }
-
- return builder_.ty.mat(make_source_range_from(t.source()), subtype, columns,
- rows);
+ uint32_t rows = 2;
+ uint32_t columns = 2;
+ if (t.IsMat3xN()) {
+ columns = 3;
+ } else if (t.IsMat4xN()) {
+ columns = 4;
+ }
+ if (t.IsMatNx3()) {
+ rows = 3;
+ } else if (t.IsMatNx4()) {
+ rows = 4;
+ }
+
+ const ast::Type* subtype = nullptr;
+ if (peek_is(Token::Type::kLessThan)) {
+ const char* use = "matrix";
+ auto ty = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (ty.errored) {
+ return Failure::kErrored;
+ }
+ subtype = ty.value;
+ }
+
+ return builder_.ty.mat(make_source_range_from(t.source()), subtype, columns, rows);
}
// storage_class
@@ -1120,140 +1237,147 @@ Expect<const ast::Type*> ParserImpl::expect_type_decl_matrix(Token t) {
// | STORAGE
// | PRIVATE
// | FUNCTION
-Expect<ast::StorageClass> ParserImpl::expect_storage_class(
- std::string_view use) {
- auto source = peek().source();
+Expect<ast::StorageClass> ParserImpl::expect_storage_class(std::string_view use) {
+ auto source = peek().source();
- if (match(Token::Type::kUniform))
- return {ast::StorageClass::kUniform, source};
+ if (match(Token::Type::kUniform)) {
+ return {ast::StorageClass::kUniform, source};
+ }
- if (match(Token::Type::kWorkgroup))
- return {ast::StorageClass::kWorkgroup, source};
+ if (match(Token::Type::kWorkgroup)) {
+ return {ast::StorageClass::kWorkgroup, source};
+ }
- if (match(Token::Type::kStorage))
- return {ast::StorageClass::kStorage, source};
+ if (match(Token::Type::kStorage)) {
+ return {ast::StorageClass::kStorage, source};
+ }
- if (match(Token::Type::kPrivate))
- return {ast::StorageClass::kPrivate, source};
+ if (match(Token::Type::kPrivate)) {
+ return {ast::StorageClass::kPrivate, source};
+ }
- if (match(Token::Type::kFunction))
- return {ast::StorageClass::kFunction, source};
+ if (match(Token::Type::kFunction)) {
+ return {ast::StorageClass::kFunction, source};
+ }
- return add_error(source, "invalid storage class", use);
+ return add_error(source, "invalid storage class", use);
}
// struct_decl
// : STRUCT IDENT struct_body_decl
Maybe<const ast::Struct*> ParserImpl::struct_decl() {
- auto t = peek();
- auto source = t.source();
+ auto t = peek();
+ auto source = t.source();
- if (!match(Token::Type::kStruct))
- return Failure::kNoMatch;
+ if (!match(Token::Type::kStruct)) {
+ return Failure::kNoMatch;
+ }
- auto name = expect_ident("struct declaration");
- if (name.errored)
- return Failure::kErrored;
+ auto name = expect_ident("struct declaration");
+ if (name.errored) {
+ return Failure::kErrored;
+ }
- auto body = expect_struct_body_decl();
- if (body.errored)
- return Failure::kErrored;
+ auto body = expect_struct_body_decl();
+ if (body.errored) {
+ return Failure::kErrored;
+ }
- auto sym = builder_.Symbols().Register(name.value);
- return create<ast::Struct>(source, sym, std::move(body.value),
- ast::AttributeList{});
+ auto sym = builder_.Symbols().Register(name.value);
+ return create<ast::Struct>(source, sym, std::move(body.value), ast::AttributeList{});
}
// struct_body_decl
// : BRACE_LEFT (struct_member COMMA)* struct_member COMMA? BRACE_RIGHT
Expect<ast::StructMemberList> ParserImpl::expect_struct_body_decl() {
- return expect_brace_block(
- "struct declaration", [&]() -> Expect<ast::StructMemberList> {
+ return expect_brace_block("struct declaration", [&]() -> Expect<ast::StructMemberList> {
ast::StructMemberList members;
bool errored = false;
while (continue_parsing()) {
- // Check for the end of the list.
- auto t = peek();
- if (!t.IsIdentifier() && !t.Is(Token::Type::kAttr)) {
- break;
- }
+ // Check for the end of the list.
+ auto t = peek();
+ if (!t.IsIdentifier() && !t.Is(Token::Type::kAttr)) {
+ break;
+ }
- auto member = expect_struct_member();
- if (member.errored) {
- errored = true;
- if (!sync_to(Token::Type::kComma, /* consume: */ false)) {
- return Failure::kErrored;
+ auto member = expect_struct_member();
+ if (member.errored) {
+ errored = true;
+ if (!sync_to(Token::Type::kComma, /* consume: */ false)) {
+ return Failure::kErrored;
+ }
+ } else {
+ members.push_back(member.value);
+ }
+
+ // TODO(crbug.com/tint/1475): Remove support for semicolons.
+ if (auto sc = peek(); sc.Is(Token::Type::kSemicolon)) {
+ deprecated(sc.source(), "struct members should be separated with commas");
+ next();
+ continue;
+ }
+ if (!match(Token::Type::kComma)) {
+ break;
}
- } else {
- members.push_back(member.value);
- }
-
- // TODO(crbug.com/tint/1475): Remove support for semicolons.
- if (auto sc = peek(); sc.Is(Token::Type::kSemicolon)) {
- deprecated(sc.source(),
- "struct members should be separated with commas");
- next();
- continue;
- }
- if (!match(Token::Type::kComma))
- break;
}
if (errored) {
- return Failure::kErrored;
+ return Failure::kErrored;
}
return members;
- });
+ });
}
// struct_member
// : attribute* variable_ident_decl
Expect<ast::StructMember*> ParserImpl::expect_struct_member() {
- auto attrs = attribute_list();
- if (attrs.errored) {
- return Failure::kErrored;
- }
+ auto attrs = attribute_list();
+ if (attrs.errored) {
+ return Failure::kErrored;
+ }
- auto decl = expect_variable_ident_decl("struct member");
- if (decl.errored)
- return Failure::kErrored;
+ auto decl = expect_variable_ident_decl("struct member");
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::StructMember>(decl->source,
- builder_.Symbols().Register(decl->name),
- decl->type, std::move(attrs.value));
+ return create<ast::StructMember>(decl->source, builder_.Symbols().Register(decl->name),
+ decl->type, std::move(attrs.value));
}
// function_decl
// : function_header body_stmt
-Maybe<const ast::Function*> ParserImpl::function_decl(
- ast::AttributeList& attrs) {
- auto header = function_header();
- if (header.errored) {
- if (sync_to(Token::Type::kBraceLeft, /* consume: */ false)) {
- // There were errors in the function header, but the parser has managed to
- // resynchronize with the opening brace. As there's no outer
- // synchronization token for function declarations, attempt to parse the
- // function body. The AST isn't used as we've already errored, but this
- // catches any errors inside the body, and can help keep the parser in
- // sync.
- expect_body_stmt();
+Maybe<const ast::Function*> ParserImpl::function_decl(ast::AttributeList& attrs) {
+ auto header = function_header();
+ if (header.errored) {
+ if (sync_to(Token::Type::kBraceLeft, /* consume: */ false)) {
+ // There were errors in the function header, but the parser has managed to
+ // resynchronize with the opening brace. As there's no outer
+ // synchronization token for function declarations, attempt to parse the
+ // function body. The AST isn't used as we've already errored, but this
+ // catches any errors inside the body, and can help keep the parser in
+ // sync.
+ expect_body_stmt();
+ }
+ return Failure::kErrored;
+ }
+ if (!header.matched) {
+ return Failure::kNoMatch;
}
- return Failure::kErrored;
- }
- if (!header.matched)
- return Failure::kNoMatch;
- bool errored = false;
+ bool errored = false;
- auto body = expect_body_stmt();
- if (body.errored)
- errored = true;
+ auto body = expect_body_stmt();
+ if (body.errored) {
+ errored = true;
+ }
- if (errored)
- return Failure::kErrored;
+ if (errored) {
+ return Failure::kErrored;
+ }
- return create<ast::Function>(
- header->source, builder_.Symbols().Register(header->name), header->params,
- header->return_type, body.value, attrs, header->return_type_attributes);
+ return create<ast::Function>(header->source, builder_.Symbols().Register(header->name),
+ header->params, header->return_type, body.value, attrs,
+ header->return_type_attributes);
}
// function_header
@@ -1262,109 +1386,111 @@ Maybe<const ast::Function*> ParserImpl::function_decl(
// :
// | ARROW attribute_list* type_decl
Maybe<ParserImpl::FunctionHeader> ParserImpl::function_header() {
- Source source;
- if (!match(Token::Type::kFn, &source)) {
- return Failure::kNoMatch;
- }
+ Source source;
+ if (!match(Token::Type::kFn, &source)) {
+ return Failure::kNoMatch;
+ }
- const char* use = "function declaration";
- bool errored = false;
+ const char* use = "function declaration";
+ bool errored = false;
- auto name = expect_ident(use);
- if (name.errored) {
- errored = true;
- if (!sync_to(Token::Type::kParenLeft, /* consume: */ false)) {
- return Failure::kErrored;
+ auto name = expect_ident(use);
+ if (name.errored) {
+ errored = true;
+ if (!sync_to(Token::Type::kParenLeft, /* consume: */ false)) {
+ return Failure::kErrored;
+ }
}
- }
- auto params = expect_paren_block(use, [&] { return expect_param_list(); });
- if (params.errored) {
- errored = true;
- if (!synchronized_) {
- return Failure::kErrored;
+ auto params = expect_paren_block(use, [&] { return expect_param_list(); });
+ if (params.errored) {
+ errored = true;
+ if (!synchronized_) {
+ return Failure::kErrored;
+ }
}
- }
- const ast::Type* return_type = nullptr;
- ast::AttributeList return_attributes;
+ const ast::Type* return_type = nullptr;
+ ast::AttributeList return_attributes;
- if (match(Token::Type::kArrow)) {
- auto attrs = attribute_list();
- if (attrs.errored) {
- return Failure::kErrored;
- }
- return_attributes = attrs.value;
+ if (match(Token::Type::kArrow)) {
+ auto attrs = attribute_list();
+ if (attrs.errored) {
+ return Failure::kErrored;
+ }
+ return_attributes = attrs.value;
- auto type = type_decl();
- if (type.errored) {
- errored = true;
- } else if (!type.matched) {
- return add_error(peek(), "unable to determine function return type");
+ auto type = type_decl();
+ if (type.errored) {
+ errored = true;
+ } else if (!type.matched) {
+ return add_error(peek(), "unable to determine function return type");
+ } else {
+ return_type = type.value;
+ }
} else {
- return_type = type.value;
+ return_type = builder_.ty.void_();
}
- } else {
- return_type = builder_.ty.void_();
- }
- if (errored) {
- return Failure::kErrored;
- }
+ if (errored) {
+ return Failure::kErrored;
+ }
- return FunctionHeader{source, name.value, std::move(params.value),
- return_type, std::move(return_attributes)};
+ return FunctionHeader{source, name.value, std::move(params.value), return_type,
+ std::move(return_attributes)};
}
// param_list
// :
// | (param COMMA)* param COMMA?
Expect<ast::VariableList> ParserImpl::expect_param_list() {
- ast::VariableList ret;
- while (continue_parsing()) {
- // Check for the end of the list.
- auto t = peek();
- if (!t.IsIdentifier() && !t.Is(Token::Type::kAttr)) {
- break;
- }
+ ast::VariableList ret;
+ while (continue_parsing()) {
+ // Check for the end of the list.
+ auto t = peek();
+ if (!t.IsIdentifier() && !t.Is(Token::Type::kAttr)) {
+ break;
+ }
- auto param = expect_param();
- if (param.errored)
- return Failure::kErrored;
- ret.push_back(param.value);
+ auto param = expect_param();
+ if (param.errored) {
+ return Failure::kErrored;
+ }
+ ret.push_back(param.value);
- if (!match(Token::Type::kComma))
- break;
- }
+ if (!match(Token::Type::kComma)) {
+ break;
+ }
+ }
- return ret;
+ return ret;
}
// param
// : attribute_list* variable_ident_decl
Expect<ast::Variable*> ParserImpl::expect_param() {
- auto attrs = attribute_list();
+ auto attrs = attribute_list();
- auto decl = expect_variable_ident_decl("parameter");
- if (decl.errored)
- return Failure::kErrored;
+ auto decl = expect_variable_ident_decl("parameter");
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
- auto* var =
- create<ast::Variable>(decl->source, // source
- builder_.Symbols().Register(decl->name), // symbol
- ast::StorageClass::kNone, // storage class
- ast::Access::kUndefined, // access control
- decl->type, // type
- true, // is_const
- false, // is_overridable
- nullptr, // constructor
- std::move(attrs.value)); // attributes
- // Formal parameters are treated like a const declaration where the
- // initializer value is provided by the call's argument. The key point is
- // that it's not updatable after initially set. This is unlike C or GLSL
- // which treat formal parameters like local variables that can be updated.
+ auto* var = create<ast::Variable>(decl->source, // source
+ builder_.Symbols().Register(decl->name), // symbol
+ ast::StorageClass::kNone, // storage class
+ ast::Access::kUndefined, // access control
+ decl->type, // type
+ true, // is_const
+ false, // is_overridable
+ nullptr, // constructor
+ std::move(attrs.value)); // attributes
+ // Formal parameters are treated like a const declaration where the
+ // initializer value is provided by the call's argument. The key point is
+ // that it's not updatable after initially set. This is unlike C or GLSL
+ // which treat formal parameters like local variables that can be updated.
- return var;
+ return var;
}
// pipeline_stage
@@ -1372,80 +1498,86 @@ Expect<ast::Variable*> ParserImpl::expect_param() {
// | FRAGMENT
// | COMPUTE
Expect<ast::PipelineStage> ParserImpl::expect_pipeline_stage() {
- auto t = peek();
- if (t == kVertexStage) {
- next(); // Consume the peek
- return {ast::PipelineStage::kVertex, t.source()};
- }
- if (t == kFragmentStage) {
- next(); // Consume the peek
- return {ast::PipelineStage::kFragment, t.source()};
- }
- if (t == kComputeStage) {
- next(); // Consume the peek
- return {ast::PipelineStage::kCompute, t.source()};
- }
- return add_error(peek(), "invalid value for stage attribute");
+ auto t = peek();
+ if (t == kVertexStage) {
+ next(); // Consume the peek
+ return {ast::PipelineStage::kVertex, t.source()};
+ }
+ if (t == kFragmentStage) {
+ next(); // Consume the peek
+ return {ast::PipelineStage::kFragment, t.source()};
+ }
+ if (t == kComputeStage) {
+ next(); // Consume the peek
+ return {ast::PipelineStage::kCompute, t.source()};
+ }
+ return add_error(peek(), "invalid value for stage attribute");
}
Expect<ast::Builtin> ParserImpl::expect_builtin() {
- auto ident = expect_ident("builtin");
- if (ident.errored)
- return Failure::kErrored;
+ auto ident = expect_ident("builtin");
+ if (ident.errored) {
+ return Failure::kErrored;
+ }
- ast::Builtin builtin = ident_to_builtin(ident.value);
- if (builtin == ast::Builtin::kNone)
- return add_error(ident.source, "invalid value for builtin attribute");
+ ast::Builtin builtin = ident_to_builtin(ident.value);
+ if (builtin == ast::Builtin::kNone) {
+ return add_error(ident.source, "invalid value for builtin attribute");
+ }
- return {builtin, ident.source};
+ return {builtin, ident.source};
}
// body_stmt
// : BRACE_LEFT statements BRACE_RIGHT
Expect<ast::BlockStatement*> ParserImpl::expect_body_stmt() {
- return expect_brace_block("", [&]() -> Expect<ast::BlockStatement*> {
- auto stmts = expect_statements();
- if (stmts.errored)
- return Failure::kErrored;
- return create<ast::BlockStatement>(Source{}, stmts.value);
- });
+ return expect_brace_block("", [&]() -> Expect<ast::BlockStatement*> {
+ auto stmts = expect_statements();
+ if (stmts.errored) {
+ return Failure::kErrored;
+ }
+ return create<ast::BlockStatement>(Source{}, stmts.value);
+ });
}
// paren_rhs_stmt
// : PAREN_LEFT logical_or_expression PAREN_RIGHT
Expect<const ast::Expression*> ParserImpl::expect_paren_rhs_stmt() {
- return expect_paren_block("", [&]() -> Expect<const ast::Expression*> {
- auto expr = logical_or_expression();
- if (expr.errored)
- return Failure::kErrored;
- if (!expr.matched)
- return add_error(peek(), "unable to parse expression");
+ return expect_paren_block("", [&]() -> Expect<const ast::Expression*> {
+ auto expr = logical_or_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ }
+ if (!expr.matched) {
+ return add_error(peek(), "unable to parse expression");
+ }
- return expr.value;
- });
+ return expr.value;
+ });
}
// statements
// : statement*
Expect<ast::StatementList> ParserImpl::expect_statements() {
- bool errored = false;
- ast::StatementList stmts;
+ bool errored = false;
+ ast::StatementList stmts;
- while (continue_parsing()) {
- auto stmt = statement();
- if (stmt.errored) {
- errored = true;
- } else if (stmt.matched) {
- stmts.emplace_back(stmt.value);
- } else {
- break;
+ while (continue_parsing()) {
+ auto stmt = statement();
+ if (stmt.errored) {
+ errored = true;
+ } else if (stmt.matched) {
+ stmts.emplace_back(stmt.value);
+ } else {
+ break;
+ }
}
- }
- if (errored)
- return Failure::kErrored;
+ if (errored) {
+ return Failure::kErrored;
+ }
- return stmts;
+ return stmts;
}
// statement
@@ -1466,51 +1598,61 @@ Expect<ast::StatementList> ParserImpl::expect_statements() {
// | increment_stmt SEMICOLON
// | decrement_stmt SEMICOLON
Maybe<const ast::Statement*> ParserImpl::statement() {
- while (match(Token::Type::kSemicolon)) {
- // Skip empty statements
- }
+ while (match(Token::Type::kSemicolon)) {
+ // Skip empty statements
+ }
- // Non-block statments that error can resynchronize on semicolon.
- auto stmt =
- sync(Token::Type::kSemicolon, [&] { return non_block_statement(); });
+ // Non-block statments that error can resynchronize on semicolon.
+ auto stmt = sync(Token::Type::kSemicolon, [&] { return non_block_statement(); });
- if (stmt.errored)
- return Failure::kErrored;
- if (stmt.matched)
- return stmt;
+ if (stmt.errored) {
+ return Failure::kErrored;
+ }
+ if (stmt.matched) {
+ return stmt;
+ }
- auto stmt_if = if_stmt();
- if (stmt_if.errored)
- return Failure::kErrored;
- if (stmt_if.matched)
- return stmt_if.value;
+ auto stmt_if = if_stmt();
+ if (stmt_if.errored) {
+ return Failure::kErrored;
+ }
+ if (stmt_if.matched) {
+ return stmt_if.value;
+ }
- auto sw = switch_stmt();
- if (sw.errored)
- return Failure::kErrored;
- if (sw.matched)
- return sw.value;
+ auto sw = switch_stmt();
+ if (sw.errored) {
+ return Failure::kErrored;
+ }
+ if (sw.matched) {
+ return sw.value;
+ }
- auto loop = loop_stmt();
- if (loop.errored)
- return Failure::kErrored;
- if (loop.matched)
- return loop.value;
+ auto loop = loop_stmt();
+ if (loop.errored) {
+ return Failure::kErrored;
+ }
+ if (loop.matched) {
+ return loop.value;
+ }
- auto stmt_for = for_stmt();
- if (stmt_for.errored)
- return Failure::kErrored;
- if (stmt_for.matched)
- return stmt_for.value;
+ auto stmt_for = for_stmt();
+ if (stmt_for.errored) {
+ return Failure::kErrored;
+ }
+ if (stmt_for.matched) {
+ return stmt_for.value;
+ }
- if (peek_is(Token::Type::kBraceLeft)) {
- auto body = expect_body_stmt();
- if (body.errored)
- return Failure::kErrored;
- return body.value;
- }
+ if (peek_is(Token::Type::kBraceLeft)) {
+ auto body = expect_body_stmt();
+ if (body.errored) {
+ return Failure::kErrored;
+ }
+ return body.value;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
// statement (continued)
@@ -1524,72 +1666,89 @@ Maybe<const ast::Statement*> ParserImpl::statement() {
// | increment_stmt SEMICOLON
// | decrement_stmt SEMICOLON
Maybe<const ast::Statement*> ParserImpl::non_block_statement() {
- auto stmt = [&]() -> Maybe<const ast::Statement*> {
- auto ret_stmt = return_stmt();
- if (ret_stmt.errored)
- return Failure::kErrored;
- if (ret_stmt.matched)
- return ret_stmt.value;
-
- auto func = func_call_stmt();
- if (func.errored)
- return Failure::kErrored;
- if (func.matched)
- return func.value;
+ auto stmt = [&]() -> Maybe<const ast::Statement*> {
+ auto ret_stmt = return_stmt();
+ if (ret_stmt.errored) {
+ return Failure::kErrored;
+ }
+ if (ret_stmt.matched) {
+ return ret_stmt.value;
+ }
- auto var = variable_stmt();
- if (var.errored)
- return Failure::kErrored;
- if (var.matched)
- return var.value;
-
- auto b = break_stmt();
- if (b.errored)
- return Failure::kErrored;
- if (b.matched)
- return b.value;
-
- auto cont = continue_stmt();
- if (cont.errored)
- return Failure::kErrored;
- if (cont.matched)
- return cont.value;
+ auto func = func_call_stmt();
+ if (func.errored) {
+ return Failure::kErrored;
+ }
+ if (func.matched) {
+ return func.value;
+ }
- auto assign = assignment_stmt();
- if (assign.errored)
- return Failure::kErrored;
- if (assign.matched)
- return assign.value;
+ auto var = variable_stmt();
+ if (var.errored) {
+ return Failure::kErrored;
+ }
+ if (var.matched) {
+ return var.value;
+ }
- Source source;
- if (match(Token::Type::kDiscard, &source))
- return create<ast::DiscardStatement>(source);
+ auto b = break_stmt();
+ if (b.errored) {
+ return Failure::kErrored;
+ }
+ if (b.matched) {
+ return b.value;
+ }
- return Failure::kNoMatch;
- }();
+ auto cont = continue_stmt();
+ if (cont.errored) {
+ return Failure::kErrored;
+ }
+ if (cont.matched) {
+ return cont.value;
+ }
- if (stmt.matched && !expect(stmt->Name(), Token::Type::kSemicolon))
- return Failure::kErrored;
+ auto assign = assignment_stmt();
+ if (assign.errored) {
+ return Failure::kErrored;
+ }
+ if (assign.matched) {
+ return assign.value;
+ }
+
+ Source source;
+ if (match(Token::Type::kDiscard, &source)) {
+ return create<ast::DiscardStatement>(source);
+ }
+
+ return Failure::kNoMatch;
+ }();
+
+ if (stmt.matched && !expect(stmt->Name(), Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
- return stmt;
+ return stmt;
}
// return_stmt
// : RETURN logical_or_expression?
Maybe<const ast::ReturnStatement*> ParserImpl::return_stmt() {
- Source source;
- if (!match(Token::Type::kReturn, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kReturn, &source)) {
+ return Failure::kNoMatch;
+ }
- if (peek_is(Token::Type::kSemicolon))
- return create<ast::ReturnStatement>(source, nullptr);
+ if (peek_is(Token::Type::kSemicolon)) {
+ return create<ast::ReturnStatement>(source, nullptr);
+ }
- auto expr = logical_or_expression();
- if (expr.errored)
- return Failure::kErrored;
+ auto expr = logical_or_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ }
- // TODO(bclayton): Check matched?
- return create<ast::ReturnStatement>(source, expr.value);
+ // TODO(bclayton): Check matched?
+ return create<ast::ReturnStatement>(source, expr.value);
}
// variable_stmt
@@ -1597,232 +1756,261 @@ Maybe<const ast::ReturnStatement*> ParserImpl::return_stmt() {
// | variable_decl EQUAL logical_or_expression
// | CONST variable_ident_decl EQUAL logical_or_expression
Maybe<const ast::VariableDeclStatement*> ParserImpl::variable_stmt() {
- if (match(Token::Type::kLet)) {
- auto decl = expect_variable_ident_decl("let declaration",
- /*allow_inferred = */ true);
- if (decl.errored)
- return Failure::kErrored;
-
- if (!expect("let declaration", Token::Type::kEqual))
- return Failure::kErrored;
-
- auto constructor = logical_or_expression();
- if (constructor.errored)
- return Failure::kErrored;
- if (!constructor.matched)
- return add_error(peek(), "missing constructor for let declaration");
-
- auto* var = create<ast::Variable>(
- decl->source, // source
- builder_.Symbols().Register(decl->name), // symbol
- ast::StorageClass::kNone, // storage class
- ast::Access::kUndefined, // access control
- decl->type, // type
- true, // is_const
- false, // is_overridable
- constructor.value, // constructor
- ast::AttributeList{}); // attributes
-
- return create<ast::VariableDeclStatement>(decl->source, var);
- }
-
- auto decl = variable_decl(/*allow_inferred = */ true);
- if (decl.errored)
- return Failure::kErrored;
- if (!decl.matched)
- return Failure::kNoMatch;
+ if (match(Token::Type::kLet)) {
+ auto decl = expect_variable_ident_decl("let declaration",
+ /*allow_inferred = */ true);
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
+
+ if (!expect("let declaration", Token::Type::kEqual)) {
+ return Failure::kErrored;
+ }
- const ast::Expression* constructor = nullptr;
- if (match(Token::Type::kEqual)) {
- auto constructor_expr = logical_or_expression();
- if (constructor_expr.errored)
- return Failure::kErrored;
- if (!constructor_expr.matched)
- return add_error(peek(), "missing constructor for variable declaration");
+ auto constructor = logical_or_expression();
+ if (constructor.errored) {
+ return Failure::kErrored;
+ }
+ if (!constructor.matched) {
+ return add_error(peek(), "missing constructor for let declaration");
+ }
+
+ auto* var = create<ast::Variable>(decl->source, // source
+ builder_.Symbols().Register(decl->name), // symbol
+ ast::StorageClass::kNone, // storage class
+ ast::Access::kUndefined, // access control
+ decl->type, // type
+ true, // is_const
+ false, // is_overridable
+ constructor.value, // constructor
+ ast::AttributeList{}); // attributes
+
+ return create<ast::VariableDeclStatement>(decl->source, var);
+ }
+
+ auto decl = variable_decl(/*allow_inferred = */ true);
+ if (decl.errored) {
+ return Failure::kErrored;
+ }
+ if (!decl.matched) {
+ return Failure::kNoMatch;
+ }
- constructor = constructor_expr.value;
- }
+ const ast::Expression* constructor = nullptr;
+ if (match(Token::Type::kEqual)) {
+ auto constructor_expr = logical_or_expression();
+ if (constructor_expr.errored) {
+ return Failure::kErrored;
+ }
+ if (!constructor_expr.matched) {
+ return add_error(peek(), "missing constructor for variable declaration");
+ }
+
+ constructor = constructor_expr.value;
+ }
- auto* var =
- create<ast::Variable>(decl->source, // source
- builder_.Symbols().Register(decl->name), // symbol
- decl->storage_class, // storage class
- decl->access, // access control
- decl->type, // type
- false, // is_const
- false, // is_overridable
- constructor, // constructor
- ast::AttributeList{}); // attributes
+ auto* var = create<ast::Variable>(decl->source, // source
+ builder_.Symbols().Register(decl->name), // symbol
+ decl->storage_class, // storage class
+ decl->access, // access control
+ decl->type, // type
+ false, // is_const
+ false, // is_overridable
+ constructor, // constructor
+ ast::AttributeList{}); // attributes
- return create<ast::VariableDeclStatement>(var->source, var);
+ return create<ast::VariableDeclStatement>(var->source, var);
}
// if_stmt
-// : IF expression compound_stmt ( ELSE else_stmts ) ?
+// : IF expression compound_stmt ( ELSE else_stmt ) ?
+// else_stmt
+// : body_stmt
+// | if_stmt
Maybe<const ast::IfStatement*> ParserImpl::if_stmt() {
- Source source;
- if (!match(Token::Type::kIf, &source))
- return Failure::kNoMatch;
+ // Parse if-else chains iteratively instead of recursively, to avoid
+ // stack-overflow for long chains of if-else statements.
+
+ struct IfInfo {
+ Source source;
+ const ast::Expression* condition;
+ const ast::BlockStatement* body;
+ };
+
+ // Parse an if statement, capturing the source, condition, and body statement.
+ auto parse_if = [&]() -> Maybe<IfInfo> {
+ Source source;
+ if (!match(Token::Type::kIf, &source)) {
+ return Failure::kNoMatch;
+ }
- auto condition = logical_or_expression();
- if (condition.errored)
- return Failure::kErrored;
- if (!condition.matched) {
- return add_error(peek(), "unable to parse condition expression");
- }
+ auto condition = logical_or_expression();
+ if (condition.errored) {
+ return Failure::kErrored;
+ }
+ if (!condition.matched) {
+ return add_error(peek(), "unable to parse condition expression");
+ }
- auto body = expect_body_stmt();
- if (body.errored)
- return Failure::kErrored;
+ auto body = expect_body_stmt();
+ if (body.errored) {
+ return Failure::kErrored;
+ }
- auto el = else_stmts();
- if (el.errored) {
- return Failure::kErrored;
- }
+ return IfInfo{source, condition.value, body.value};
+ };
- return create<ast::IfStatement>(source, condition.value, body.value,
- std::move(el.value));
-}
+ std::vector<IfInfo> statements;
-// else_stmts
-// : body_stmt
-// | if_stmt
-Expect<ast::ElseStatementList> ParserImpl::else_stmts() {
- ast::ElseStatementList stmts;
- while (continue_parsing()) {
- Source start;
-
- bool else_if = false;
- if (match(Token::Type::kElse, &start)) {
- else_if = match(Token::Type::kIf);
- } else {
- break;
+ // Parse the first if statement.
+ auto first_if = parse_if();
+ if (first_if.errored) {
+ return Failure::kErrored;
+ } else if (!first_if.matched) {
+ return Failure::kNoMatch;
}
+ statements.push_back(first_if.value);
- const ast::Expression* cond = nullptr;
- if (else_if) {
- auto condition = logical_or_expression();
- if (condition.errored) {
- return Failure::kErrored;
- }
- if (!condition.matched) {
- return add_error(peek(), "unable to parse condition expression");
- }
+ // Parse the components of every "else {if}" in the chain.
+ const ast::Statement* last_stmt = nullptr;
+ while (continue_parsing()) {
+ if (!match(Token::Type::kElse)) {
+ break;
+ }
- cond = condition.value;
- }
+ // Try to parse an "else if".
+ auto else_if = parse_if();
+ if (else_if.errored) {
+ return Failure::kErrored;
+ } else if (else_if.matched) {
+ statements.push_back(else_if.value);
+ continue;
+ }
- auto body = expect_body_stmt();
- if (body.errored) {
- return Failure::kErrored;
+ // If it wasn't an "else if", it must just be an "else".
+ auto else_body = expect_body_stmt();
+ if (else_body.errored) {
+ return Failure::kErrored;
+ }
+ last_stmt = else_body.value;
+ break;
}
- Source source = make_source_range_from(start);
- stmts.emplace_back(create<ast::ElseStatement>(source, cond, body.value));
- }
+ // Now walk back through the statements to create their AST nodes.
+ for (auto itr = statements.rbegin(); itr != statements.rend(); itr++) {
+ last_stmt = create<ast::IfStatement>(itr->source, itr->condition, itr->body, last_stmt);
+ }
- return stmts;
+ return last_stmt->As<ast::IfStatement>();
}
// switch_stmt
// : SWITCH paren_rhs_stmt BRACKET_LEFT switch_body+ BRACKET_RIGHT
Maybe<const ast::SwitchStatement*> ParserImpl::switch_stmt() {
- Source source;
- if (!match(Token::Type::kSwitch, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kSwitch, &source)) {
+ return Failure::kNoMatch;
+ }
- auto condition = logical_or_expression();
- if (condition.errored)
- return Failure::kErrored;
- if (!condition.matched) {
- return add_error(peek(), "unable to parse selector expression");
- }
-
- auto body = expect_brace_block("switch statement",
- [&]() -> Expect<ast::CaseStatementList> {
- bool errored = false;
- ast::CaseStatementList list;
- while (continue_parsing()) {
- auto stmt = switch_body();
- if (stmt.errored) {
- errored = true;
- continue;
- }
- if (!stmt.matched)
- break;
- list.push_back(stmt.value);
- }
- if (errored)
- return Failure::kErrored;
- return list;
- });
-
- if (body.errored)
- return Failure::kErrored;
+ auto condition = logical_or_expression();
+ if (condition.errored) {
+ return Failure::kErrored;
+ }
+ if (!condition.matched) {
+ return add_error(peek(), "unable to parse selector expression");
+ }
+
+ auto body = expect_brace_block("switch statement", [&]() -> Expect<ast::CaseStatementList> {
+ bool errored = false;
+ ast::CaseStatementList list;
+ while (continue_parsing()) {
+ auto stmt = switch_body();
+ if (stmt.errored) {
+ errored = true;
+ continue;
+ }
+ if (!stmt.matched) {
+ break;
+ }
+ list.push_back(stmt.value);
+ }
+ if (errored) {
+ return Failure::kErrored;
+ }
+ return list;
+ });
- return create<ast::SwitchStatement>(source, condition.value, body.value);
+ if (body.errored) {
+ return Failure::kErrored;
+ }
+
+ return create<ast::SwitchStatement>(source, condition.value, body.value);
}
// switch_body
// : CASE case_selectors COLON? BRACKET_LEFT case_body BRACKET_RIGHT
// | DEFAULT COLON? BRACKET_LEFT case_body BRACKET_RIGHT
Maybe<const ast::CaseStatement*> ParserImpl::switch_body() {
- if (!peek_is(Token::Type::kCase) && !peek_is(Token::Type::kDefault))
- return Failure::kNoMatch;
+ if (!peek_is(Token::Type::kCase) && !peek_is(Token::Type::kDefault)) {
+ return Failure::kNoMatch;
+ }
- auto t = next();
- auto source = t.source();
+ auto t = next();
+ auto source = t.source();
- ast::CaseSelectorList selector_list;
- if (t.Is(Token::Type::kCase)) {
- auto selectors = expect_case_selectors();
- if (selectors.errored)
- return Failure::kErrored;
+ ast::CaseSelectorList selector_list;
+ if (t.Is(Token::Type::kCase)) {
+ auto selectors = expect_case_selectors();
+ if (selectors.errored) {
+ return Failure::kErrored;
+ }
- selector_list = std::move(selectors.value);
- }
+ selector_list = std::move(selectors.value);
+ }
- // Consume the optional colon if present.
- match(Token::Type::kColon);
+ // Consume the optional colon if present.
+ match(Token::Type::kColon);
- const char* use = "case statement";
- auto body = expect_brace_block(use, [&] { return case_body(); });
+ const char* use = "case statement";
+ auto body = expect_brace_block(use, [&] { return case_body(); });
- if (body.errored)
- return Failure::kErrored;
- if (!body.matched)
- return add_error(body.source, "expected case body");
+ if (body.errored) {
+ return Failure::kErrored;
+ }
+ if (!body.matched) {
+ return add_error(body.source, "expected case body");
+ }
- return create<ast::CaseStatement>(source, selector_list, body.value);
+ return create<ast::CaseStatement>(source, selector_list, body.value);
}
// case_selectors
// : const_literal (COMMA const_literal)* COMMA?
Expect<ast::CaseSelectorList> ParserImpl::expect_case_selectors() {
- ast::CaseSelectorList selectors;
+ ast::CaseSelectorList selectors;
- while (continue_parsing()) {
- auto cond = const_literal();
- if (cond.errored) {
- return Failure::kErrored;
- } else if (!cond.matched) {
- break;
- } else if (!cond->Is<ast::IntLiteralExpression>()) {
- return add_error(cond.value->source,
- "invalid case selector must be an integer value");
- }
+ while (continue_parsing()) {
+ auto cond = const_literal();
+ if (cond.errored) {
+ return Failure::kErrored;
+ } else if (!cond.matched) {
+ break;
+ } else if (!cond->Is<ast::IntLiteralExpression>()) {
+ return add_error(cond.value->source, "invalid case selector must be an integer value");
+ }
- selectors.push_back(cond.value->As<ast::IntLiteralExpression>());
+ selectors.push_back(cond.value->As<ast::IntLiteralExpression>());
- if (!match(Token::Type::kComma)) {
- break;
+ if (!match(Token::Type::kComma)) {
+ break;
+ }
}
- }
- if (selectors.empty())
- return add_error(peek(), "unable to parse case selectors");
+ if (selectors.empty()) {
+ return add_error(peek(), "unable to parse case selectors");
+ }
- return selectors;
+ return selectors;
}
// case_body
@@ -1830,48 +2018,54 @@ Expect<ast::CaseSelectorList> ParserImpl::expect_case_selectors() {
// | statement case_body
// | FALLTHROUGH SEMICOLON
Maybe<const ast::BlockStatement*> ParserImpl::case_body() {
- ast::StatementList stmts;
- while (continue_parsing()) {
- Source source;
- if (match(Token::Type::kFallthrough, &source)) {
- if (!expect("fallthrough statement", Token::Type::kSemicolon))
- return Failure::kErrored;
+ ast::StatementList stmts;
+ while (continue_parsing()) {
+ Source source;
+ if (match(Token::Type::kFallthrough, &source)) {
+ if (!expect("fallthrough statement", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
- stmts.emplace_back(create<ast::FallthroughStatement>(source));
- break;
- }
+ stmts.emplace_back(create<ast::FallthroughStatement>(source));
+ break;
+ }
- auto stmt = statement();
- if (stmt.errored)
- return Failure::kErrored;
- if (!stmt.matched)
- break;
+ auto stmt = statement();
+ if (stmt.errored) {
+ return Failure::kErrored;
+ }
+ if (!stmt.matched) {
+ break;
+ }
- stmts.emplace_back(stmt.value);
- }
+ stmts.emplace_back(stmt.value);
+ }
- return create<ast::BlockStatement>(Source{}, stmts);
+ return create<ast::BlockStatement>(Source{}, stmts);
}
// loop_stmt
// : LOOP BRACKET_LEFT statements continuing_stmt? BRACKET_RIGHT
Maybe<const ast::LoopStatement*> ParserImpl::loop_stmt() {
- Source source;
- if (!match(Token::Type::kLoop, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kLoop, &source)) {
+ return Failure::kNoMatch;
+ }
- return expect_brace_block("loop", [&]() -> Maybe<const ast::LoopStatement*> {
- auto stmts = expect_statements();
- if (stmts.errored)
- return Failure::kErrored;
+ return expect_brace_block("loop", [&]() -> Maybe<const ast::LoopStatement*> {
+ auto stmts = expect_statements();
+ if (stmts.errored) {
+ return Failure::kErrored;
+ }
- auto continuing = continuing_stmt();
- if (continuing.errored)
- return Failure::kErrored;
+ auto continuing = continuing_stmt();
+ if (continuing.errored) {
+ return Failure::kErrored;
+ }
- auto* body = create<ast::BlockStatement>(source, stmts.value);
- return create<ast::LoopStatement>(source, body, continuing.value);
- });
+ auto* body = create<ast::BlockStatement>(source, stmts.value);
+ return create<ast::LoopStatement>(source, body, continuing.value);
+ });
}
ForHeader::ForHeader(const ast::Statement* init,
@@ -1884,42 +2078,52 @@ ForHeader::~ForHeader() = default;
// (variable_stmt | increment_stmt | decrement_stmt | assignment_stmt |
// func_call_stmt)?
Maybe<const ast::Statement*> ParserImpl::for_header_initializer() {
- auto call = func_call_stmt();
- if (call.errored)
- return Failure::kErrored;
- if (call.matched)
- return call.value;
+ auto call = func_call_stmt();
+ if (call.errored) {
+ return Failure::kErrored;
+ }
+ if (call.matched) {
+ return call.value;
+ }
- auto var = variable_stmt();
- if (var.errored)
- return Failure::kErrored;
- if (var.matched)
- return var.value;
+ auto var = variable_stmt();
+ if (var.errored) {
+ return Failure::kErrored;
+ }
+ if (var.matched) {
+ return var.value;
+ }
- auto assign = assignment_stmt();
- if (assign.errored)
- return Failure::kErrored;
- if (assign.matched)
- return assign.value;
+ auto assign = assignment_stmt();
+ if (assign.errored) {
+ return Failure::kErrored;
+ }
+ if (assign.matched) {
+ return assign.value;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
// (increment_stmt | decrement_stmt | assignment_stmt | func_call_stmt)?
Maybe<const ast::Statement*> ParserImpl::for_header_continuing() {
- auto call_stmt = func_call_stmt();
- if (call_stmt.errored)
- return Failure::kErrored;
- if (call_stmt.matched)
- return call_stmt.value;
+ auto call_stmt = func_call_stmt();
+ if (call_stmt.errored) {
+ return Failure::kErrored;
+ }
+ if (call_stmt.matched) {
+ return call_stmt.value;
+ }
- auto assign = assignment_stmt();
- if (assign.errored)
- return Failure::kErrored;
- if (assign.matched)
- return assign.value;
+ auto assign = assignment_stmt();
+ if (assign.errored) {
+ return Failure::kErrored;
+ }
+ if (assign.matched) {
+ return assign.value;
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
// for_header
@@ -1928,102 +2132,111 @@ Maybe<const ast::Statement*> ParserImpl::for_header_continuing() {
// logical_or_expression? SEMICOLON
// (assignment_stmt | func_call_stmt)?
Expect<std::unique_ptr<ForHeader>> ParserImpl::expect_for_header() {
- auto initializer = for_header_initializer();
- if (initializer.errored)
- return Failure::kErrored;
+ auto initializer = for_header_initializer();
+ if (initializer.errored) {
+ return Failure::kErrored;
+ }
- if (!expect("initializer in for loop", Token::Type::kSemicolon))
- return Failure::kErrored;
+ if (!expect("initializer in for loop", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
- auto condition = logical_or_expression();
- if (condition.errored)
- return Failure::kErrored;
+ auto condition = logical_or_expression();
+ if (condition.errored) {
+ return Failure::kErrored;
+ }
- if (!expect("condition in for loop", Token::Type::kSemicolon))
- return Failure::kErrored;
+ if (!expect("condition in for loop", Token::Type::kSemicolon)) {
+ return Failure::kErrored;
+ }
- auto continuing = for_header_continuing();
- if (continuing.errored)
- return Failure::kErrored;
+ auto continuing = for_header_continuing();
+ if (continuing.errored) {
+ return Failure::kErrored;
+ }
- return std::make_unique<ForHeader>(initializer.value, condition.value,
- continuing.value);
+ return std::make_unique<ForHeader>(initializer.value, condition.value, continuing.value);
}
// for_statement
// : FOR PAREN_LEFT for_header PAREN_RIGHT BRACE_LEFT statements BRACE_RIGHT
Maybe<const ast::ForLoopStatement*> ParserImpl::for_stmt() {
- Source source;
- if (!match(Token::Type::kFor, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kFor, &source)) {
+ return Failure::kNoMatch;
+ }
- auto header =
- expect_paren_block("for loop", [&] { return expect_for_header(); });
- if (header.errored)
- return Failure::kErrored;
+ auto header = expect_paren_block("for loop", [&] { return expect_for_header(); });
+ if (header.errored) {
+ return Failure::kErrored;
+ }
- auto stmts =
- expect_brace_block("for loop", [&] { return expect_statements(); });
- if (stmts.errored)
- return Failure::kErrored;
+ auto stmts = expect_brace_block("for loop", [&] { return expect_statements(); });
+ if (stmts.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::ForLoopStatement>(
- source, header->initializer, header->condition, header->continuing,
- create<ast::BlockStatement>(stmts.value));
+ return create<ast::ForLoopStatement>(source, header->initializer, header->condition,
+ header->continuing,
+ create<ast::BlockStatement>(stmts.value));
}
// func_call_stmt
// : IDENT argument_expression_list
Maybe<const ast::CallStatement*> ParserImpl::func_call_stmt() {
- auto t = peek();
- auto t2 = peek(1);
- if (!t.IsIdentifier() || !t2.Is(Token::Type::kParenLeft))
- return Failure::kNoMatch;
+ auto t = peek();
+ auto t2 = peek(1);
+ if (!t.IsIdentifier() || !t2.Is(Token::Type::kParenLeft)) {
+ return Failure::kNoMatch;
+ }
- next(); // Consume the first peek
+ next(); // Consume the first peek
- auto source = t.source();
- auto name = t.to_str();
+ auto source = t.source();
+ auto name = t.to_str();
- auto params = expect_argument_expression_list("function call");
- if (params.errored)
- return Failure::kErrored;
+ auto params = expect_argument_expression_list("function call");
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::CallStatement>(
- source, create<ast::CallExpression>(
- source,
- create<ast::IdentifierExpression>(
- source, builder_.Symbols().Register(name)),
- std::move(params.value)));
+ return create<ast::CallStatement>(
+ source,
+ create<ast::CallExpression>(
+ source, create<ast::IdentifierExpression>(source, builder_.Symbols().Register(name)),
+ std::move(params.value)));
}
// break_stmt
// : BREAK
Maybe<const ast::BreakStatement*> ParserImpl::break_stmt() {
- Source source;
- if (!match(Token::Type::kBreak, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kBreak, &source)) {
+ return Failure::kNoMatch;
+ }
- return create<ast::BreakStatement>(source);
+ return create<ast::BreakStatement>(source);
}
// continue_stmt
// : CONTINUE
Maybe<const ast::ContinueStatement*> ParserImpl::continue_stmt() {
- Source source;
- if (!match(Token::Type::kContinue, &source))
- return Failure::kNoMatch;
+ Source source;
+ if (!match(Token::Type::kContinue, &source)) {
+ return Failure::kNoMatch;
+ }
- return create<ast::ContinueStatement>(source);
+ return create<ast::ContinueStatement>(source);
}
// continuing_stmt
// : CONTINUING body_stmt
Maybe<const ast::BlockStatement*> ParserImpl::continuing_stmt() {
- if (!match(Token::Type::kContinuing))
- return create<ast::BlockStatement>(Source{}, ast::StatementList{});
+ if (!match(Token::Type::kContinuing)) {
+ return create<ast::BlockStatement>(Source{}, ast::StatementList{});
+ }
- return expect_body_stmt();
+ return expect_body_stmt();
}
// primary_expression
@@ -2033,159 +2246,162 @@ Maybe<const ast::BlockStatement*> ParserImpl::continuing_stmt() {
// | paren_rhs_stmt
// | BITCAST LESS_THAN type_decl GREATER_THAN paren_rhs_stmt
Maybe<const ast::Expression*> ParserImpl::primary_expression() {
- auto t = peek();
- auto source = t.source();
+ auto t = peek();
+ auto source = t.source();
- auto lit = const_literal();
- if (lit.errored) {
- return Failure::kErrored;
- }
- if (lit.matched) {
- return lit.value;
- }
+ auto lit = const_literal();
+ if (lit.errored) {
+ return Failure::kErrored;
+ }
+ if (lit.matched) {
+ return lit.value;
+ }
+
+ if (t.Is(Token::Type::kParenLeft)) {
+ auto paren = expect_paren_rhs_stmt();
+ if (paren.errored) {
+ return Failure::kErrored;
+ }
- if (t.Is(Token::Type::kParenLeft)) {
- auto paren = expect_paren_rhs_stmt();
- if (paren.errored) {
- return Failure::kErrored;
+ return paren.value;
}
- return paren.value;
- }
+ if (match(Token::Type::kBitcast)) {
+ const char* use = "bitcast expression";
- if (match(Token::Type::kBitcast)) {
- const char* use = "bitcast expression";
+ auto type = expect_lt_gt_block(use, [&] { return expect_type(use); });
+ if (type.errored) {
+ return Failure::kErrored;
+ }
- auto type = expect_lt_gt_block(use, [&] { return expect_type(use); });
- if (type.errored)
- return Failure::kErrored;
+ auto params = expect_paren_rhs_stmt();
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- auto params = expect_paren_rhs_stmt();
- if (params.errored)
- return Failure::kErrored;
+ return create<ast::BitcastExpression>(source, type.value, params.value);
+ }
- return create<ast::BitcastExpression>(source, type.value, params.value);
- }
+ if (t.IsIdentifier()) {
+ next();
- if (t.IsIdentifier()) {
- next();
+ auto* ident =
+ create<ast::IdentifierExpression>(t.source(), builder_.Symbols().Register(t.to_str()));
- auto* ident = create<ast::IdentifierExpression>(
- t.source(), builder_.Symbols().Register(t.to_str()));
+ if (peek_is(Token::Type::kParenLeft)) {
+ auto params = expect_argument_expression_list("function call");
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- if (peek_is(Token::Type::kParenLeft)) {
- auto params = expect_argument_expression_list("function call");
- if (params.errored)
- return Failure::kErrored;
+ return create<ast::CallExpression>(source, ident, std::move(params.value));
+ }
- return create<ast::CallExpression>(source, ident,
- std::move(params.value));
+ return ident;
}
- return ident;
- }
-
- auto type = type_decl();
- if (type.errored)
- return Failure::kErrored;
- if (type.matched) {
- auto params = expect_argument_expression_list("type constructor");
- if (params.errored)
- return Failure::kErrored;
+ auto type = type_decl();
+ if (type.errored) {
+ return Failure::kErrored;
+ }
+ if (type.matched) {
+ auto params = expect_argument_expression_list("type constructor");
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- return builder_.Construct(source, type.value, std::move(params.value));
- }
+ return builder_.Construct(source, type.value, std::move(params.value));
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
// postfix_expression
// :
// | BRACE_LEFT logical_or_expression BRACE_RIGHT postfix_expr
// | PERIOD IDENTIFIER postfix_expr
-Maybe<const ast::Expression*> ParserImpl::postfix_expression(
- const ast::Expression* prefix) {
- Source source;
-
- while (continue_parsing()) {
- if (match(Token::Type::kBracketLeft, &source)) {
- auto res = sync(
- Token::Type::kBracketRight, [&]() -> Maybe<const ast::Expression*> {
- auto param = logical_or_expression();
- if (param.errored)
- return Failure::kErrored;
- if (!param.matched) {
- return add_error(peek(), "unable to parse expression inside []");
- }
+Maybe<const ast::Expression*> ParserImpl::postfix_expression(const ast::Expression* prefix) {
+ Source source;
- if (!expect("index accessor", Token::Type::kBracketRight)) {
- return Failure::kErrored;
+ while (continue_parsing()) {
+ if (match(Token::Type::kBracketLeft, &source)) {
+ auto res = sync(Token::Type::kBracketRight, [&]() -> Maybe<const ast::Expression*> {
+ auto param = logical_or_expression();
+ if (param.errored) {
+ return Failure::kErrored;
+ }
+ if (!param.matched) {
+ return add_error(peek(), "unable to parse expression inside []");
+ }
+
+ if (!expect("index accessor", Token::Type::kBracketRight)) {
+ return Failure::kErrored;
+ }
+
+ return create<ast::IndexAccessorExpression>(source, prefix, param.value);
+ });
+
+ if (res.errored) {
+ return res;
}
+ prefix = res.value;
+ continue;
+ }
- return create<ast::IndexAccessorExpression>(source, prefix,
- param.value);
- });
-
- if (res.errored) {
- return res;
- }
- prefix = res.value;
- continue;
- }
+ if (match(Token::Type::kPeriod)) {
+ auto ident = expect_ident("member accessor");
+ if (ident.errored) {
+ return Failure::kErrored;
+ }
- if (match(Token::Type::kPeriod)) {
- auto ident = expect_ident("member accessor");
- if (ident.errored) {
- return Failure::kErrored;
- }
+ prefix = create<ast::MemberAccessorExpression>(
+ ident.source, prefix,
+ create<ast::IdentifierExpression>(ident.source,
+ builder_.Symbols().Register(ident.value)));
+ continue;
+ }
- prefix = create<ast::MemberAccessorExpression>(
- ident.source, prefix,
- create<ast::IdentifierExpression>(
- ident.source, builder_.Symbols().Register(ident.value)));
- continue;
+ return prefix;
}
- return prefix;
- }
-
- return Failure::kErrored;
+ return Failure::kErrored;
}
// singular_expression
// : primary_expression postfix_expr
Maybe<const ast::Expression*> ParserImpl::singular_expression() {
- auto prefix = primary_expression();
- if (prefix.errored)
- return Failure::kErrored;
- if (!prefix.matched)
- return Failure::kNoMatch;
+ auto prefix = primary_expression();
+ if (prefix.errored) {
+ return Failure::kErrored;
+ }
+ if (!prefix.matched) {
+ return Failure::kNoMatch;
+ }
- return postfix_expression(prefix.value);
+ return postfix_expression(prefix.value);
}
// argument_expression_list
// : PAREN_LEFT ((logical_or_expression COMMA)* logical_or_expression COMMA?)?
// PAREN_RIGHT
-Expect<ast::ExpressionList> ParserImpl::expect_argument_expression_list(
- std::string_view use) {
- return expect_paren_block(use, [&]() -> Expect<ast::ExpressionList> {
- ast::ExpressionList ret;
- while (continue_parsing()) {
- auto arg = logical_or_expression();
- if (arg.errored) {
- return Failure::kErrored;
- } else if (!arg.matched) {
- break;
- }
- ret.push_back(arg.value);
+Expect<ast::ExpressionList> ParserImpl::expect_argument_expression_list(std::string_view use) {
+ return expect_paren_block(use, [&]() -> Expect<ast::ExpressionList> {
+ ast::ExpressionList ret;
+ while (continue_parsing()) {
+ auto arg = logical_or_expression();
+ if (arg.errored) {
+ return Failure::kErrored;
+ } else if (!arg.matched) {
+ break;
+ }
+ ret.push_back(arg.value);
- if (!match(Token::Type::kComma)) {
- break;
- }
- }
- return ret;
- });
+ if (!match(Token::Type::kComma)) {
+ break;
+ }
+ }
+ return ret;
+ });
}
// unary_expression
@@ -2196,51 +2412,51 @@ Expect<ast::ExpressionList> ParserImpl::expect_argument_expression_list(
// | STAR unary_expression
// | AND unary_expression
Maybe<const ast::Expression*> ParserImpl::unary_expression() {
- auto t = peek();
+ auto t = peek();
- if (match(Token::Type::kPlusPlus) || match(Token::Type::kMinusMinus)) {
- add_error(t.source(),
- "prefix increment and decrement operators are reserved for a "
- "future WGSL version");
- return Failure::kErrored;
- }
-
- ast::UnaryOp op;
- if (match(Token::Type::kMinus)) {
- op = ast::UnaryOp::kNegation;
- } else if (match(Token::Type::kBang)) {
- op = ast::UnaryOp::kNot;
- } else if (match(Token::Type::kTilde)) {
- op = ast::UnaryOp::kComplement;
- } else if (match(Token::Type::kStar)) {
- op = ast::UnaryOp::kIndirection;
- } else if (match(Token::Type::kAnd)) {
- op = ast::UnaryOp::kAddressOf;
- } else {
- return singular_expression();
- }
-
- if (parse_depth_ >= kMaxParseDepth) {
- // We've hit a maximum parser recursive depth.
- // We can't call into unary_expression() as we might stack overflow.
- // Instead, report an error
- add_error(peek(), "maximum parser recursive depth reached");
- return Failure::kErrored;
- }
+ if (match(Token::Type::kPlusPlus) || match(Token::Type::kMinusMinus)) {
+ add_error(t.source(),
+ "prefix increment and decrement operators are reserved for a "
+ "future WGSL version");
+ return Failure::kErrored;
+ }
- ++parse_depth_;
- auto expr = unary_expression();
- --parse_depth_;
+ ast::UnaryOp op;
+ if (match(Token::Type::kMinus)) {
+ op = ast::UnaryOp::kNegation;
+ } else if (match(Token::Type::kBang)) {
+ op = ast::UnaryOp::kNot;
+ } else if (match(Token::Type::kTilde)) {
+ op = ast::UnaryOp::kComplement;
+ } else if (match(Token::Type::kStar)) {
+ op = ast::UnaryOp::kIndirection;
+ } else if (match(Token::Type::kAnd)) {
+ op = ast::UnaryOp::kAddressOf;
+ } else {
+ return singular_expression();
+ }
- if (expr.errored) {
- return Failure::kErrored;
- }
- if (!expr.matched) {
- return add_error(peek(), "unable to parse right side of " +
- std::string(t.to_name()) + " expression");
- }
+ if (parse_depth_ >= kMaxParseDepth) {
+ // We've hit a maximum parser recursive depth.
+ // We can't call into unary_expression() as we might stack overflow.
+ // Instead, report an error
+ add_error(peek(), "maximum parser recursive depth reached");
+ return Failure::kErrored;
+ }
- return create<ast::UnaryOpExpression>(t.source(), op, expr.value);
+ ++parse_depth_;
+ auto expr = unary_expression();
+ --parse_depth_;
+
+ if (expr.errored) {
+ return Failure::kErrored;
+ }
+ if (!expr.matched) {
+ return add_error(
+ peek(), "unable to parse right side of " + std::string(t.to_name()) + " expression");
+ }
+
+ return create<ast::UnaryOpExpression>(t.source(), op, expr.value);
}
// multiplicative_expr
@@ -2248,133 +2464,142 @@ Maybe<const ast::Expression*> ParserImpl::unary_expression() {
// | STAR unary_expression multiplicative_expr
// | FORWARD_SLASH unary_expression multiplicative_expr
// | MODULO unary_expression multiplicative_expr
-Expect<const ast::Expression*> ParserImpl::expect_multiplicative_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- ast::BinaryOp op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kStar))
- op = ast::BinaryOp::kMultiply;
- else if (peek_is(Token::Type::kForwardSlash))
- op = ast::BinaryOp::kDivide;
- else if (peek_is(Token::Type::kMod))
- op = ast::BinaryOp::kModulo;
- else
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_multiplicative_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ ast::BinaryOp op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kStar)) {
+ op = ast::BinaryOp::kMultiply;
+ } else if (peek_is(Token::Type::kForwardSlash)) {
+ op = ast::BinaryOp::kDivide;
+ } else if (peek_is(Token::Type::kMod)) {
+ op = ast::BinaryOp::kModulo;
+ } else {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
- auto name = t.to_name();
+ auto t = next();
+ auto source = t.source();
+ auto name = t.to_name();
- auto rhs = unary_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched) {
- return add_error(peek(), "unable to parse right side of " +
- std::string(name) + " expression");
- }
+ auto rhs = unary_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(),
+ "unable to parse right side of " + std::string(name) + " expression");
+ }
- lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// multiplicative_expression
// : unary_expression multiplicative_expr
Maybe<const ast::Expression*> ParserImpl::multiplicative_expression() {
- auto lhs = unary_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = unary_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_multiplicative_expr(lhs.value);
+ return expect_multiplicative_expr(lhs.value);
}
// additive_expr
// :
// | PLUS multiplicative_expression additive_expr
// | MINUS multiplicative_expression additive_expr
-Expect<const ast::Expression*> ParserImpl::expect_additive_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- ast::BinaryOp op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kPlus))
- op = ast::BinaryOp::kAdd;
- else if (peek_is(Token::Type::kMinus))
- op = ast::BinaryOp::kSubtract;
- else
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_additive_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ ast::BinaryOp op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kPlus)) {
+ op = ast::BinaryOp::kAdd;
+ } else if (peek_is(Token::Type::kMinus)) {
+ op = ast::BinaryOp::kSubtract;
+ } else {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
+ auto t = next();
+ auto source = t.source();
- auto rhs = multiplicative_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of + expression");
+ auto rhs = multiplicative_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of + expression");
+ }
- lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// additive_expression
// : multiplicative_expression additive_expr
Maybe<const ast::Expression*> ParserImpl::additive_expression() {
- auto lhs = multiplicative_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = multiplicative_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_additive_expr(lhs.value);
+ return expect_additive_expr(lhs.value);
}
// shift_expr
// :
// | SHIFT_LEFT additive_expression shift_expr
// | SHIFT_RIGHT additive_expression shift_expr
-Expect<const ast::Expression*> ParserImpl::expect_shift_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- auto* name = "";
- ast::BinaryOp op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kShiftLeft)) {
- op = ast::BinaryOp::kShiftLeft;
- name = "<<";
- } else if (peek_is(Token::Type::kShiftRight)) {
- op = ast::BinaryOp::kShiftRight;
- name = ">>";
- } else {
- return lhs;
- }
+Expect<const ast::Expression*> ParserImpl::expect_shift_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ auto* name = "";
+ ast::BinaryOp op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kShiftLeft)) {
+ op = ast::BinaryOp::kShiftLeft;
+ name = "<<";
+ } else if (peek_is(Token::Type::kShiftRight)) {
+ op = ast::BinaryOp::kShiftRight;
+ name = ">>";
+ } else {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
- auto rhs = additive_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched) {
- return add_error(peek(), std::string("unable to parse right side of ") +
- name + " expression");
- }
+ auto t = next();
+ auto source = t.source();
+ auto rhs = additive_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(),
+ std::string("unable to parse right side of ") + name + " expression");
+ }
- return lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
- }
- return Failure::kErrored;
+ return lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// shift_expression
// : additive_expression shift_expr
Maybe<const ast::Expression*> ParserImpl::shift_expression() {
- auto lhs = additive_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = additive_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_shift_expr(lhs.value);
+ return expect_shift_expr(lhs.value);
}
// relational_expr
@@ -2383,268 +2608,287 @@ Maybe<const ast::Expression*> ParserImpl::shift_expression() {
// | GREATER_THAN shift_expression relational_expr
// | LESS_THAN_EQUAL shift_expression relational_expr
// | GREATER_THAN_EQUAL shift_expression relational_expr
-Expect<const ast::Expression*> ParserImpl::expect_relational_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- ast::BinaryOp op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kLessThan))
- op = ast::BinaryOp::kLessThan;
- else if (peek_is(Token::Type::kGreaterThan))
- op = ast::BinaryOp::kGreaterThan;
- else if (peek_is(Token::Type::kLessThanEqual))
- op = ast::BinaryOp::kLessThanEqual;
- else if (peek_is(Token::Type::kGreaterThanEqual))
- op = ast::BinaryOp::kGreaterThanEqual;
- else
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_relational_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ ast::BinaryOp op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kLessThan)) {
+ op = ast::BinaryOp::kLessThan;
+ } else if (peek_is(Token::Type::kGreaterThan)) {
+ op = ast::BinaryOp::kGreaterThan;
+ } else if (peek_is(Token::Type::kLessThanEqual)) {
+ op = ast::BinaryOp::kLessThanEqual;
+ } else if (peek_is(Token::Type::kGreaterThanEqual)) {
+ op = ast::BinaryOp::kGreaterThanEqual;
+ } else {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
- auto name = t.to_name();
+ auto t = next();
+ auto source = t.source();
+ auto name = t.to_name();
- auto rhs = shift_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched) {
- return add_error(peek(), "unable to parse right side of " +
- std::string(name) + " expression");
- }
+ auto rhs = shift_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(),
+ "unable to parse right side of " + std::string(name) + " expression");
+ }
- lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// relational_expression
// : shift_expression relational_expr
Maybe<const ast::Expression*> ParserImpl::relational_expression() {
- auto lhs = shift_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = shift_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_relational_expr(lhs.value);
+ return expect_relational_expr(lhs.value);
}
// equality_expr
// :
// | EQUAL_EQUAL relational_expression equality_expr
// | NOT_EQUAL relational_expression equality_expr
-Expect<const ast::Expression*> ParserImpl::expect_equality_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- ast::BinaryOp op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kEqualEqual))
- op = ast::BinaryOp::kEqual;
- else if (peek_is(Token::Type::kNotEqual))
- op = ast::BinaryOp::kNotEqual;
- else
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_equality_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ ast::BinaryOp op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kEqualEqual)) {
+ op = ast::BinaryOp::kEqual;
+ } else if (peek_is(Token::Type::kNotEqual)) {
+ op = ast::BinaryOp::kNotEqual;
+ } else {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
- auto name = t.to_name();
+ auto t = next();
+ auto source = t.source();
+ auto name = t.to_name();
- auto rhs = relational_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched) {
- return add_error(peek(), "unable to parse right side of " +
- std::string(name) + " expression");
- }
+ auto rhs = relational_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(),
+ "unable to parse right side of " + std::string(name) + " expression");
+ }
- lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, op, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// equality_expression
// : relational_expression equality_expr
Maybe<const ast::Expression*> ParserImpl::equality_expression() {
- auto lhs = relational_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = relational_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_equality_expr(lhs.value);
+ return expect_equality_expr(lhs.value);
}
// and_expr
// :
// | AND equality_expression and_expr
-Expect<const ast::Expression*> ParserImpl::expect_and_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- if (!peek_is(Token::Type::kAnd)) {
- return lhs;
- }
+Expect<const ast::Expression*> ParserImpl::expect_and_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ if (!peek_is(Token::Type::kAnd)) {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
+ auto t = next();
+ auto source = t.source();
- auto rhs = equality_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of & expression");
+ auto rhs = equality_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of & expression");
+ }
- lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kAnd, lhs,
- rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kAnd, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// and_expression
// : equality_expression and_expr
Maybe<const ast::Expression*> ParserImpl::and_expression() {
- auto lhs = equality_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = equality_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_and_expr(lhs.value);
+ return expect_and_expr(lhs.value);
}
// exclusive_or_expr
// :
// | XOR and_expression exclusive_or_expr
-Expect<const ast::Expression*> ParserImpl::expect_exclusive_or_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- Source source;
- if (!match(Token::Type::kXor, &source))
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_exclusive_or_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ Source source;
+ if (!match(Token::Type::kXor, &source)) {
+ return lhs;
+ }
- auto rhs = and_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of ^ expression");
+ auto rhs = and_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of ^ expression");
+ }
- lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kXor, lhs,
- rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kXor, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// exclusive_or_expression
// : and_expression exclusive_or_expr
Maybe<const ast::Expression*> ParserImpl::exclusive_or_expression() {
- auto lhs = and_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = and_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_exclusive_or_expr(lhs.value);
+ return expect_exclusive_or_expr(lhs.value);
}
// inclusive_or_expr
// :
// | OR exclusive_or_expression inclusive_or_expr
-Expect<const ast::Expression*> ParserImpl::expect_inclusive_or_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- Source source;
- if (!match(Token::Type::kOr))
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_inclusive_or_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ Source source;
+ if (!match(Token::Type::kOr, &source)) {
+ return lhs;
+ }
- auto rhs = exclusive_or_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of | expression");
+ auto rhs = exclusive_or_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of | expression");
+ }
- lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kOr, lhs,
- rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kOr, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// inclusive_or_expression
// : exclusive_or_expression inclusive_or_expr
Maybe<const ast::Expression*> ParserImpl::inclusive_or_expression() {
- auto lhs = exclusive_or_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = exclusive_or_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_inclusive_or_expr(lhs.value);
+ return expect_inclusive_or_expr(lhs.value);
}
// logical_and_expr
// :
// | AND_AND inclusive_or_expression logical_and_expr
-Expect<const ast::Expression*> ParserImpl::expect_logical_and_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- if (!peek_is(Token::Type::kAndAnd)) {
- return lhs;
- }
+Expect<const ast::Expression*> ParserImpl::expect_logical_and_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ if (!peek_is(Token::Type::kAndAnd)) {
+ return lhs;
+ }
- auto t = next();
- auto source = t.source();
+ auto t = next();
+ auto source = t.source();
- auto rhs = inclusive_or_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of && expression");
+ auto rhs = inclusive_or_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of && expression");
+ }
- lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kLogicalAnd, lhs,
- rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kLogicalAnd, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// logical_and_expression
// : inclusive_or_expression logical_and_expr
Maybe<const ast::Expression*> ParserImpl::logical_and_expression() {
- auto lhs = inclusive_or_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = inclusive_or_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_logical_and_expr(lhs.value);
+ return expect_logical_and_expr(lhs.value);
}
// logical_or_expr
// :
// | OR_OR logical_and_expression logical_or_expr
-Expect<const ast::Expression*> ParserImpl::expect_logical_or_expr(
- const ast::Expression* lhs) {
- while (continue_parsing()) {
- Source source;
- if (!match(Token::Type::kOrOr))
- return lhs;
+Expect<const ast::Expression*> ParserImpl::expect_logical_or_expr(const ast::Expression* lhs) {
+ while (continue_parsing()) {
+ Source source;
+ if (!match(Token::Type::kOrOr, &source)) {
+ return lhs;
+ }
- auto rhs = logical_and_expression();
- if (rhs.errored)
- return Failure::kErrored;
- if (!rhs.matched)
- return add_error(peek(), "unable to parse right side of || expression");
+ auto rhs = logical_and_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of || expression");
+ }
- lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kLogicalOr, lhs,
- rhs.value);
- }
- return Failure::kErrored;
+ lhs = create<ast::BinaryExpression>(source, ast::BinaryOp::kLogicalOr, lhs, rhs.value);
+ }
+ return Failure::kErrored;
}
// logical_or_expression
// : logical_and_expression logical_or_expr
Maybe<const ast::Expression*> ParserImpl::logical_or_expression() {
- auto lhs = logical_and_expression();
- if (lhs.errored)
- return Failure::kErrored;
- if (!lhs.matched)
- return Failure::kNoMatch;
+ auto lhs = logical_and_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ return Failure::kNoMatch;
+ }
- return expect_logical_or_expr(lhs.value);
+ return expect_logical_or_expr(lhs.value);
}
// compound_assignment_operator:
@@ -2657,29 +2901,29 @@ Maybe<const ast::Expression*> ParserImpl::logical_or_expression() {
// | or_equal
// | xor_equal
Maybe<ast::BinaryOp> ParserImpl::compound_assignment_operator() {
- ast::BinaryOp compound_op = ast::BinaryOp::kNone;
- if (peek_is(Token::Type::kPlusEqual)) {
- compound_op = ast::BinaryOp::kAdd;
- } else if (peek_is(Token::Type::kMinusEqual)) {
- compound_op = ast::BinaryOp::kSubtract;
- } else if (peek_is(Token::Type::kTimesEqual)) {
- compound_op = ast::BinaryOp::kMultiply;
- } else if (peek_is(Token::Type::kDivisionEqual)) {
- compound_op = ast::BinaryOp::kDivide;
- } else if (peek_is(Token::Type::kModuloEqual)) {
- compound_op = ast::BinaryOp::kModulo;
- } else if (peek_is(Token::Type::kAndEqual)) {
- compound_op = ast::BinaryOp::kAnd;
- } else if (peek_is(Token::Type::kOrEqual)) {
- compound_op = ast::BinaryOp::kOr;
- } else if (peek_is(Token::Type::kXorEqual)) {
- compound_op = ast::BinaryOp::kXor;
- }
- if (compound_op != ast::BinaryOp::kNone) {
- next();
- return compound_op;
- }
- return Failure::kNoMatch;
+ ast::BinaryOp compound_op = ast::BinaryOp::kNone;
+ if (peek_is(Token::Type::kPlusEqual)) {
+ compound_op = ast::BinaryOp::kAdd;
+ } else if (peek_is(Token::Type::kMinusEqual)) {
+ compound_op = ast::BinaryOp::kSubtract;
+ } else if (peek_is(Token::Type::kTimesEqual)) {
+ compound_op = ast::BinaryOp::kMultiply;
+ } else if (peek_is(Token::Type::kDivisionEqual)) {
+ compound_op = ast::BinaryOp::kDivide;
+ } else if (peek_is(Token::Type::kModuloEqual)) {
+ compound_op = ast::BinaryOp::kModulo;
+ } else if (peek_is(Token::Type::kAndEqual)) {
+ compound_op = ast::BinaryOp::kAnd;
+ } else if (peek_is(Token::Type::kOrEqual)) {
+ compound_op = ast::BinaryOp::kOr;
+ } else if (peek_is(Token::Type::kXorEqual)) {
+ compound_op = ast::BinaryOp::kXor;
+ }
+ if (compound_op != ast::BinaryOp::kNone) {
+ next();
+ return compound_op;
+ }
+ return Failure::kNoMatch;
}
// assignment_stmt
@@ -2690,611 +2934,668 @@ Maybe<ast::BinaryOp> ParserImpl::compound_assignment_operator() {
// decrement_stmt
// | lhs_expression MINUS_MINUS
Maybe<const ast::Statement*> ParserImpl::assignment_stmt() {
- auto t = peek();
- auto source = t.source();
-
- // tint:295 - Test for `ident COLON` - this is invalid grammar, and without
- // special casing will error as "missing = for assignment", which is less
- // helpful than this error message:
- if (peek_is(Token::Type::kIdentifier) && peek_is(Token::Type::kColon, 1)) {
- return add_error(peek(0).source(),
- "expected 'var' for variable declaration");
- }
-
- auto lhs = unary_expression();
- if (lhs.errored) {
- return Failure::kErrored;
- }
- if (!lhs.matched) {
- if (!match(Token::Type::kUnderscore, &source)) {
- return Failure::kNoMatch;
- }
- lhs = create<ast::PhonyExpression>(source);
- }
-
- // Handle increment and decrement statements.
- // We do this here because the parsing of the LHS expression overlaps with
- // the assignment statement, and we cannot tell which we are parsing until we
- // hit the ++/--/= token.
- if (match(Token::Type::kPlusPlus)) {
- return create<ast::IncrementDecrementStatement>(source, lhs.value, true);
- } else if (match(Token::Type::kMinusMinus)) {
- return create<ast::IncrementDecrementStatement>(source, lhs.value, false);
- }
-
- auto compound_op = compound_assignment_operator();
- if (compound_op.errored) {
- return Failure::kErrored;
- }
- if (!compound_op.matched) {
- if (!expect("assignment", Token::Type::kEqual)) {
- return Failure::kErrored;
+ auto t = peek();
+ auto source = t.source();
+
+ // tint:295 - Test for `ident COLON` - this is invalid grammar, and without
+ // special casing will error as "missing = for assignment", which is less
+ // helpful than this error message:
+ if (peek_is(Token::Type::kIdentifier) && peek_is(Token::Type::kColon, 1)) {
+ return add_error(peek(0).source(), "expected 'var' for variable declaration");
}
- }
- auto rhs = logical_or_expression();
- if (rhs.errored) {
- return Failure::kErrored;
- }
- if (!rhs.matched) {
- return add_error(peek(), "unable to parse right side of assignment");
- }
+ auto lhs = unary_expression();
+ if (lhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!lhs.matched) {
+ if (!match(Token::Type::kUnderscore, &source)) {
+ return Failure::kNoMatch;
+ }
+ lhs = create<ast::PhonyExpression>(source);
+ }
+
+ // Handle increment and decrement statements.
+ // We do this here because the parsing of the LHS expression overlaps with
+ // the assignment statement, and we cannot tell which we are parsing until we
+ // hit the ++/--/= token.
+ if (match(Token::Type::kPlusPlus)) {
+ return create<ast::IncrementDecrementStatement>(source, lhs.value, true);
+ } else if (match(Token::Type::kMinusMinus)) {
+ return create<ast::IncrementDecrementStatement>(source, lhs.value, false);
+ }
+
+ auto compound_op = compound_assignment_operator();
+ if (compound_op.errored) {
+ return Failure::kErrored;
+ }
+ if (!compound_op.matched) {
+ if (!expect("assignment", Token::Type::kEqual)) {
+ return Failure::kErrored;
+ }
+ }
- if (compound_op.value != ast::BinaryOp::kNone) {
- return create<ast::CompoundAssignmentStatement>(
- source, lhs.value, rhs.value, compound_op.value);
- } else {
- return create<ast::AssignmentStatement>(source, lhs.value, rhs.value);
- }
+ auto rhs = logical_or_expression();
+ if (rhs.errored) {
+ return Failure::kErrored;
+ }
+ if (!rhs.matched) {
+ return add_error(peek(), "unable to parse right side of assignment");
+ }
+
+ if (compound_op.value != ast::BinaryOp::kNone) {
+ return create<ast::CompoundAssignmentStatement>(source, lhs.value, rhs.value,
+ compound_op.value);
+ } else {
+ return create<ast::AssignmentStatement>(source, lhs.value, rhs.value);
+ }
}
// const_literal
// : INT_LITERAL
-// | UINT_LITERAL
// | FLOAT_LITERAL
// | TRUE
// | FALSE
Maybe<const ast::LiteralExpression*> ParserImpl::const_literal() {
- auto t = peek();
- if (match(Token::Type::kTrue)) {
- return create<ast::BoolLiteralExpression>(t.source(), true);
- }
- if (match(Token::Type::kFalse)) {
- return create<ast::BoolLiteralExpression>(t.source(), false);
- }
- if (match(Token::Type::kSintLiteral)) {
- return create<ast::SintLiteralExpression>(t.source(), t.to_i32());
- }
- if (match(Token::Type::kUintLiteral)) {
- return create<ast::UintLiteralExpression>(t.source(), t.to_u32());
- }
- if (match(Token::Type::kFloatLiteral)) {
- return create<ast::FloatLiteralExpression>(t.source(), t.to_f32());
- }
- if (handle_error(t)) {
- return Failure::kErrored;
- }
- return Failure::kNoMatch;
+ auto t = peek();
+ if (match(Token::Type::kIntLiteral)) {
+ return create<ast::IntLiteralExpression>(t.source(), t.to_i64(),
+ ast::IntLiteralExpression::Suffix::kNone);
+ }
+ if (match(Token::Type::kIntLiteral_I)) {
+ return create<ast::IntLiteralExpression>(t.source(), t.to_i64(),
+ ast::IntLiteralExpression::Suffix::kI);
+ }
+ if (match(Token::Type::kIntLiteral_U)) {
+ return create<ast::IntLiteralExpression>(t.source(), t.to_i64(),
+ ast::IntLiteralExpression::Suffix::kU);
+ }
+ if (match(Token::Type::kFloatLiteral)) {
+ return create<ast::FloatLiteralExpression>(t.source(), t.to_f64(),
+ ast::FloatLiteralExpression::Suffix::kNone);
+ }
+ if (match(Token::Type::kFloatLiteral_F)) {
+ return create<ast::FloatLiteralExpression>(t.source(), t.to_f64(),
+ ast::FloatLiteralExpression::Suffix::kF);
+ }
+ if (match(Token::Type::kTrue)) {
+ return create<ast::BoolLiteralExpression>(t.source(), true);
+ }
+ if (match(Token::Type::kFalse)) {
+ return create<ast::BoolLiteralExpression>(t.source(), false);
+ }
+ if (handle_error(t)) {
+ return Failure::kErrored;
+ }
+ return Failure::kNoMatch;
}
// const_expr
// : type_decl PAREN_LEFT ((const_expr COMMA)? const_expr COMMA?)? PAREN_RIGHT
// | const_literal
Expect<const ast::Expression*> ParserImpl::expect_const_expr() {
- auto t = peek();
- auto source = t.source();
- if (t.IsLiteral()) {
- auto lit = const_literal();
- if (lit.errored) {
- return Failure::kErrored;
- }
- if (!lit.matched) {
- return add_error(peek(), "unable to parse constant literal");
- }
- return lit.value;
- }
-
- if (peek_is(Token::Type::kParenLeft, 1) ||
- peek_is(Token::Type::kLessThan, 1)) {
- auto type = expect_type("const_expr");
- if (type.errored) {
- return Failure::kErrored;
+ auto t = peek();
+ auto source = t.source();
+ if (t.IsLiteral()) {
+ auto lit = const_literal();
+ if (lit.errored) {
+ return Failure::kErrored;
+ }
+ if (!lit.matched) {
+ return add_error(peek(), "unable to parse constant literal");
+ }
+ return lit.value;
}
- auto params = expect_paren_block(
- "type constructor", [&]() -> Expect<ast::ExpressionList> {
- ast::ExpressionList list;
- while (continue_parsing()) {
- if (peek_is(Token::Type::kParenRight)) {
- break;
- }
-
- auto arg = expect_const_expr();
- if (arg.errored) {
- return Failure::kErrored;
- }
- list.emplace_back(arg.value);
+ if (peek_is(Token::Type::kParenLeft, 1) || peek_is(Token::Type::kLessThan, 1)) {
+ auto type = expect_type("const_expr");
+ if (type.errored) {
+ return Failure::kErrored;
+ }
- if (!match(Token::Type::kComma)) {
- break;
+ auto params = expect_paren_block("type constructor", [&]() -> Expect<ast::ExpressionList> {
+ ast::ExpressionList list;
+ while (continue_parsing()) {
+ if (peek_is(Token::Type::kParenRight)) {
+ break;
+ }
+
+ auto arg = expect_const_expr();
+ if (arg.errored) {
+ return Failure::kErrored;
+ }
+ list.emplace_back(arg.value);
+
+ if (!match(Token::Type::kComma)) {
+ break;
+ }
}
- }
- return list;
+ return list;
});
- if (params.errored)
- return Failure::kErrored;
+ if (params.errored) {
+ return Failure::kErrored;
+ }
- return builder_.Construct(source, type.value, params.value);
- }
- return add_error(peek(), "unable to parse const_expr");
+ return builder_.Construct(source, type.value, params.value);
+ }
+ return add_error(peek(), "unable to parse const_expr");
}
Maybe<ast::AttributeList> ParserImpl::attribute_list() {
- bool errored = false;
- ast::AttributeList attrs;
+ bool errored = false;
+ ast::AttributeList attrs;
- while (continue_parsing()) {
- if (match(Token::Type::kAttr)) {
- if (auto attr = expect_attribute(); attr.errored) {
- errored = true;
- } else {
- attrs.emplace_back(attr.value);
- }
- } else {
- break;
+ while (continue_parsing()) {
+ if (match(Token::Type::kAttr)) {
+ if (auto attr = expect_attribute(); attr.errored) {
+ errored = true;
+ } else {
+ attrs.emplace_back(attr.value);
+ }
+ } else {
+ break;
+ }
}
- }
- if (errored)
- return Failure::kErrored;
+ if (errored) {
+ return Failure::kErrored;
+ }
- if (attrs.empty())
- return Failure::kNoMatch;
+ if (attrs.empty()) {
+ return Failure::kNoMatch;
+ }
- return attrs;
+ return attrs;
}
Expect<const ast::Attribute*> ParserImpl::expect_attribute() {
- auto t = peek();
- auto attr = attribute();
- if (attr.errored)
- return Failure::kErrored;
- if (attr.matched)
- return attr.value;
- return add_error(t, "expected attribute");
+ auto t = peek();
+ auto attr = attribute();
+ if (attr.errored) {
+ return Failure::kErrored;
+ }
+ if (attr.matched) {
+ return attr.value;
+ }
+ return add_error(t, "expected attribute");
}
Maybe<const ast::Attribute*> ParserImpl::attribute() {
- using Result = Maybe<const ast::Attribute*>;
- auto t = next();
-
- if (!t.IsIdentifier()) {
- return Failure::kNoMatch;
- }
+ using Result = Maybe<const ast::Attribute*>;
+ auto t = next();
- if (t == kLocationAttribute) {
- const char* use = "location attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ if (!t.IsIdentifier()) {
+ return Failure::kNoMatch;
+ }
- return create<ast::LocationAttribute>(t.source(), val.value);
- });
- }
+ if (t == kLocationAttribute) {
+ const char* use = "location attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- if (t == kBindingAttribute) {
- const char* use = "binding attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ return create<ast::LocationAttribute>(t.source(), val.value);
+ });
+ }
- return create<ast::BindingAttribute>(t.source(), val.value);
- });
- }
+ if (t == kBindingAttribute) {
+ const char* use = "binding attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- if (t == kGroupAttribute) {
- const char* use = "group attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ return create<ast::BindingAttribute>(t.source(), val.value);
+ });
+ }
- return create<ast::GroupAttribute>(t.source(), val.value);
- });
- }
-
- if (t == kInterpolateAttribute) {
- return expect_paren_block("interpolate attribute", [&]() -> Result {
- ast::InterpolationType type;
- ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone;
-
- auto type_tok = next();
- if (type_tok == "perspective") {
- type = ast::InterpolationType::kPerspective;
- } else if (type_tok == "linear") {
- type = ast::InterpolationType::kLinear;
- } else if (type_tok == "flat") {
- type = ast::InterpolationType::kFlat;
- } else {
- return add_error(type_tok, "invalid interpolation type");
- }
-
- if (match(Token::Type::kComma)) {
- auto sampling_tok = next();
- if (sampling_tok == "center") {
- sampling = ast::InterpolationSampling::kCenter;
- } else if (sampling_tok == "centroid") {
- sampling = ast::InterpolationSampling::kCentroid;
- } else if (sampling_tok == "sample") {
- sampling = ast::InterpolationSampling::kSample;
- } else {
- return add_error(sampling_tok, "invalid interpolation sampling");
- }
- }
+ if (t == kGroupAttribute) {
+ const char* use = "group attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::InterpolateAttribute>(t.source(), type, sampling);
- });
- }
+ return create<ast::GroupAttribute>(t.source(), val.value);
+ });
+ }
- if (t == kInvariantAttribute) {
- return create<ast::InvariantAttribute>(t.source());
- }
+ if (t == kInterpolateAttribute) {
+ return expect_paren_block("interpolate attribute", [&]() -> Result {
+ ast::InterpolationType type;
+ ast::InterpolationSampling sampling = ast::InterpolationSampling::kNone;
+
+ auto type_tok = next();
+ if (type_tok == "perspective") {
+ type = ast::InterpolationType::kPerspective;
+ } else if (type_tok == "linear") {
+ type = ast::InterpolationType::kLinear;
+ } else if (type_tok == "flat") {
+ type = ast::InterpolationType::kFlat;
+ } else {
+ return add_error(type_tok, "invalid interpolation type");
+ }
- if (t == kBuiltinAttribute) {
- return expect_paren_block("builtin attribute", [&]() -> Result {
- auto builtin = expect_builtin();
- if (builtin.errored)
- return Failure::kErrored;
+ if (match(Token::Type::kComma)) {
+ auto sampling_tok = next();
+ if (sampling_tok == "center") {
+ sampling = ast::InterpolationSampling::kCenter;
+ } else if (sampling_tok == "centroid") {
+ sampling = ast::InterpolationSampling::kCentroid;
+ } else if (sampling_tok == "sample") {
+ sampling = ast::InterpolationSampling::kSample;
+ } else {
+ return add_error(sampling_tok, "invalid interpolation sampling");
+ }
+ }
- return create<ast::BuiltinAttribute>(t.source(), builtin.value);
- });
- }
+ return create<ast::InterpolateAttribute>(t.source(), type, sampling);
+ });
+ }
- if (t == kWorkgroupSizeAttribute) {
- return expect_paren_block("workgroup_size attribute", [&]() -> Result {
- const ast::Expression* x = nullptr;
- const ast::Expression* y = nullptr;
- const ast::Expression* z = nullptr;
+ if (t == kInvariantAttribute) {
+ return create<ast::InvariantAttribute>(t.source());
+ }
- auto expr = primary_expression();
- if (expr.errored) {
- return Failure::kErrored;
- } else if (!expr.matched) {
- return add_error(peek(), "expected workgroup_size x parameter");
- }
- x = std::move(expr.value);
+ if (t == kBuiltinAttribute) {
+ return expect_paren_block("builtin attribute", [&]() -> Result {
+ auto builtin = expect_builtin();
+ if (builtin.errored) {
+ return Failure::kErrored;
+ }
- if (match(Token::Type::kComma)) {
- expr = primary_expression();
- if (expr.errored) {
- return Failure::kErrored;
- } else if (!expr.matched) {
- return add_error(peek(), "expected workgroup_size y parameter");
- }
- y = std::move(expr.value);
+ return create<ast::BuiltinAttribute>(t.source(), builtin.value);
+ });
+ }
- if (match(Token::Type::kComma)) {
- expr = primary_expression();
- if (expr.errored) {
- return Failure::kErrored;
- } else if (!expr.matched) {
- return add_error(peek(), "expected workgroup_size z parameter");
- }
- z = std::move(expr.value);
- }
- }
+ if (t == kWorkgroupSizeAttribute) {
+ return expect_paren_block("workgroup_size attribute", [&]() -> Result {
+ const ast::Expression* x = nullptr;
+ const ast::Expression* y = nullptr;
+ const ast::Expression* z = nullptr;
+
+ auto expr = primary_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ } else if (!expr.matched) {
+ return add_error(peek(), "expected workgroup_size x parameter");
+ }
+ x = std::move(expr.value);
+
+ if (match(Token::Type::kComma)) {
+ expr = primary_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ } else if (!expr.matched) {
+ return add_error(peek(), "expected workgroup_size y parameter");
+ }
+ y = std::move(expr.value);
+
+ if (match(Token::Type::kComma)) {
+ expr = primary_expression();
+ if (expr.errored) {
+ return Failure::kErrored;
+ } else if (!expr.matched) {
+ return add_error(peek(), "expected workgroup_size z parameter");
+ }
+ z = std::move(expr.value);
+ }
+ }
- return create<ast::WorkgroupAttribute>(t.source(), x, y, z);
- });
- }
+ return create<ast::WorkgroupAttribute>(t.source(), x, y, z);
+ });
+ }
- if (t == kStageAttribute) {
- return expect_paren_block("stage attribute", [&]() -> Result {
- auto stage = expect_pipeline_stage();
- if (stage.errored)
- return Failure::kErrored;
+ if (t == kStageAttribute) {
+ return expect_paren_block("stage attribute", [&]() -> Result {
+ auto stage = expect_pipeline_stage();
+ if (stage.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::StageAttribute>(t.source(), stage.value);
- });
- }
+ // TODO(crbug.com/tint/1503): Enable this once all the Dawn and CTS
+ // tests are updated to use the new format so we can avoid spamming
+ // the log files.
+ if ((false)) {
+ std::string warning = "stage should use @";
+ switch (stage.value) {
+ case ast::PipelineStage::kVertex:
+ warning += "vertex";
+ break;
+ case ast::PipelineStage::kFragment:
+ warning += "fragment";
+ break;
+ case ast::PipelineStage::kCompute:
+ warning += "compute";
+ break;
+ case ast::PipelineStage::kNone:
+ break;
+ }
+ deprecated(t.source(), warning);
+ }
+ return create<ast::StageAttribute>(t.source(), stage.value);
+ });
+ }
+ if (t == kComputeStage) {
+ return create<ast::StageAttribute>(t.source(), ast::PipelineStage::kCompute);
+ }
+ if (t == kVertexStage) {
+ return create<ast::StageAttribute>(t.source(), ast::PipelineStage::kVertex);
+ }
+ if (t == kFragmentStage) {
+ return create<ast::StageAttribute>(t.source(), ast::PipelineStage::kFragment);
+ }
- if (t == kSizeAttribute) {
- const char* use = "size attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ if (t == kSizeAttribute) {
+ const char* use = "size attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::StructMemberSizeAttribute>(t.source(), val.value);
- });
- }
+ return create<ast::StructMemberSizeAttribute>(t.source(), val.value);
+ });
+ }
- if (t == kAlignAttribute) {
- const char* use = "align attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ if (t == kAlignAttribute) {
+ const char* use = "align attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::StructMemberAlignAttribute>(t.source(), val.value);
- });
- }
+ return create<ast::StructMemberAlignAttribute>(t.source(), val.value);
+ });
+ }
- if (t == kIdAttribute) {
- const char* use = "id attribute";
- return expect_paren_block(use, [&]() -> Result {
- auto val = expect_positive_sint(use);
- if (val.errored)
- return Failure::kErrored;
+ if (t == kIdAttribute) {
+ const char* use = "id attribute";
+ return expect_paren_block(use, [&]() -> Result {
+ auto val = expect_positive_sint(use);
+ if (val.errored) {
+ return Failure::kErrored;
+ }
- return create<ast::IdAttribute>(t.source(), val.value);
- });
- }
+ return create<ast::IdAttribute>(t.source(), val.value);
+ });
+ }
- return Failure::kNoMatch;
+ return Failure::kNoMatch;
}
bool ParserImpl::expect_attributes_consumed(ast::AttributeList& in) {
- if (in.empty()) {
- return true;
- }
- add_error(in[0]->source, "unexpected attributes");
- return false;
+ if (in.empty()) {
+ return true;
+ }
+ add_error(in[0]->source, "unexpected attributes");
+ return false;
}
bool ParserImpl::match(Token::Type tok, Source* source /*= nullptr*/) {
- auto t = peek();
+ auto t = peek();
- if (source != nullptr)
- *source = t.source();
+ if (source != nullptr) {
+ *source = t.source();
+ }
- if (t.Is(tok)) {
- next();
- return true;
- }
- return false;
+ if (t.Is(tok)) {
+ next();
+ return true;
+ }
+ return false;
}
bool ParserImpl::expect(std::string_view use, Token::Type tok) {
- auto t = peek();
- if (t.Is(tok)) {
- next();
- synchronized_ = true;
- return true;
- }
-
- // Special case to split `>>` and `>=` tokens if we are looking for a `>`.
- if (tok == Token::Type::kGreaterThan &&
- (t.Is(Token::Type::kShiftRight) ||
- t.Is(Token::Type::kGreaterThanEqual))) {
- next();
+ auto t = peek();
+ if (t.Is(tok)) {
+ next();
+ synchronized_ = true;
+ return true;
+ }
- // Push the second character to the token queue.
- auto source = t.source();
- source.range.begin.column++;
- if (t.Is(Token::Type::kShiftRight)) {
- token_queue_.push_front(Token(Token::Type::kGreaterThan, source));
- } else if (t.Is(Token::Type::kGreaterThanEqual)) {
- token_queue_.push_front(Token(Token::Type::kEqual, source));
+ // Special case to split `>>` and `>=` tokens if we are looking for a `>`.
+ if (tok == Token::Type::kGreaterThan &&
+ (t.Is(Token::Type::kShiftRight) || t.Is(Token::Type::kGreaterThanEqual))) {
+ next();
+
+ // Push the second character to the token queue.
+ auto source = t.source();
+ source.range.begin.column++;
+ if (t.Is(Token::Type::kShiftRight)) {
+ token_queue_.push_front(Token(Token::Type::kGreaterThan, source));
+ } else if (t.Is(Token::Type::kGreaterThanEqual)) {
+ token_queue_.push_front(Token(Token::Type::kEqual, source));
+ }
+
+ synchronized_ = true;
+ return true;
}
- synchronized_ = true;
- return true;
- }
+ // Error cases
+ synchronized_ = false;
+ if (handle_error(t)) {
+ return false;
+ }
- // Error cases
- synchronized_ = false;
- if (handle_error(t)) {
+ std::stringstream err;
+ err << "expected '" << Token::TypeToName(tok) << "'";
+ if (!use.empty()) {
+ err << " for " << use;
+ }
+ add_error(t, err.str());
return false;
- }
-
- std::stringstream err;
- err << "expected '" << Token::TypeToName(tok) << "'";
- if (!use.empty()) {
- err << " for " << use;
- }
- add_error(t, err.str());
- return false;
}
Expect<int32_t> ParserImpl::expect_sint(std::string_view use) {
- auto t = peek();
- if (!t.Is(Token::Type::kSintLiteral))
- return add_error(t.source(), "expected signed integer literal", use);
+ auto t = peek();
+ if (!t.Is(Token::Type::kIntLiteral) && !t.Is(Token::Type::kIntLiteral_I)) {
+ return add_error(t.source(), "expected signed integer literal", use);
+ }
- next();
- return {t.to_i32(), t.source()};
+ int64_t val = t.to_i64();
+ if ((val > std::numeric_limits<int32_t>::max()) ||
+ (val < std::numeric_limits<int32_t>::min())) {
+ // TODO(crbug.com/tint/1504): Test this when abstract int is implemented
+ return add_error(t.source(), "value overflows i32", use);
+ }
+
+ next();
+ return {static_cast<int32_t>(t.to_i64()), t.source()};
}
Expect<uint32_t> ParserImpl::expect_positive_sint(std::string_view use) {
- auto sint = expect_sint(use);
- if (sint.errored)
- return Failure::kErrored;
+ auto sint = expect_sint(use);
+ if (sint.errored) {
+ return Failure::kErrored;
+ }
- if (sint.value < 0)
- return add_error(sint.source, std::string(use) + " must be positive");
+ if (sint.value < 0) {
+ return add_error(sint.source, std::string(use) + " must be positive");
+ }
- return {static_cast<uint32_t>(sint.value), sint.source};
+ return {static_cast<uint32_t>(sint.value), sint.source};
}
-Expect<uint32_t> ParserImpl::expect_nonzero_positive_sint(
- std::string_view use) {
- auto sint = expect_sint(use);
- if (sint.errored)
- return Failure::kErrored;
+Expect<uint32_t> ParserImpl::expect_nonzero_positive_sint(std::string_view use) {
+ auto sint = expect_sint(use);
+ if (sint.errored) {
+ return Failure::kErrored;
+ }
- if (sint.value <= 0)
- return add_error(sint.source, std::string(use) + " must be greater than 0");
+ if (sint.value <= 0) {
+ return add_error(sint.source, std::string(use) + " must be greater than 0");
+ }
- return {static_cast<uint32_t>(sint.value), sint.source};
+ return {static_cast<uint32_t>(sint.value), sint.source};
}
Expect<std::string> ParserImpl::expect_ident(std::string_view use) {
- auto t = peek();
- if (t.IsIdentifier()) {
- synchronized_ = true;
- next();
+ auto t = peek();
+ if (t.IsIdentifier()) {
+ synchronized_ = true;
+ next();
- if (is_reserved(t)) {
- return add_error(t.source(),
- "'" + t.to_str() + "' is a reserved keyword");
- }
+ if (is_reserved(t)) {
+ return add_error(t.source(), "'" + t.to_str() + "' is a reserved keyword");
+ }
- return {t.to_str(), t.source()};
- }
- if (handle_error(t)) {
- return Failure::kErrored;
- }
- synchronized_ = false;
- return add_error(t.source(), "expected identifier", use);
+ return {t.to_str(), t.source()};
+ }
+ if (handle_error(t)) {
+ return Failure::kErrored;
+ }
+ synchronized_ = false;
+ return add_error(t.source(), "expected identifier", use);
}
template <typename F, typename T>
-T ParserImpl::expect_block(Token::Type start,
- Token::Type end,
- std::string_view use,
- F&& body) {
- if (!expect(use, start)) {
- return Failure::kErrored;
- }
+T ParserImpl::expect_block(Token::Type start, Token::Type end, std::string_view use, F&& body) {
+ if (!expect(use, start)) {
+ return Failure::kErrored;
+ }
- return sync(end, [&]() -> T {
- auto res = body();
+ return sync(end, [&]() -> T {
+ auto res = body();
- if (res.errored)
- return Failure::kErrored;
+ if (res.errored) {
+ return Failure::kErrored;
+ }
- if (!expect(use, end))
- return Failure::kErrored;
+ if (!expect(use, end)) {
+ return Failure::kErrored;
+ }
- return res;
- });
+ return res;
+ });
}
template <typename F, typename T>
T ParserImpl::expect_paren_block(std::string_view use, F&& body) {
- return expect_block(Token::Type::kParenLeft, Token::Type::kParenRight, use,
- std::forward<F>(body));
+ return expect_block(Token::Type::kParenLeft, Token::Type::kParenRight, use,
+ std::forward<F>(body));
}
template <typename F, typename T>
T ParserImpl::expect_brace_block(std::string_view use, F&& body) {
- return expect_block(Token::Type::kBraceLeft, Token::Type::kBraceRight, use,
- std::forward<F>(body));
+ return expect_block(Token::Type::kBraceLeft, Token::Type::kBraceRight, use,
+ std::forward<F>(body));
}
template <typename F, typename T>
T ParserImpl::expect_lt_gt_block(std::string_view use, F&& body) {
- return expect_block(Token::Type::kLessThan, Token::Type::kGreaterThan, use,
- std::forward<F>(body));
+ return expect_block(Token::Type::kLessThan, Token::Type::kGreaterThan, use,
+ std::forward<F>(body));
}
template <typename F, typename T>
T ParserImpl::sync(Token::Type tok, F&& body) {
- if (parse_depth_ >= kMaxParseDepth) {
- // We've hit a maximum parser recursive depth.
- // We can't call into body() as we might stack overflow.
- // Instead, report an error...
- add_error(peek(), "maximum parser recursive depth reached");
- // ...and try to resynchronize. If we cannot resynchronize to `tok` then
- // synchronized_ is set to false, and the parser knows that forward progress
- // is not being made.
- sync_to(tok, /* consume: */ true);
- return Failure::kErrored;
- }
+ if (parse_depth_ >= kMaxParseDepth) {
+ // We've hit a maximum parser recursive depth.
+ // We can't call into body() as we might stack overflow.
+ // Instead, report an error...
+ add_error(peek(), "maximum parser recursive depth reached");
+ // ...and try to resynchronize. If we cannot resynchronize to `tok` then
+ // synchronized_ is set to false, and the parser knows that forward progress
+ // is not being made.
+ sync_to(tok, /* consume: */ true);
+ return Failure::kErrored;
+ }
- sync_tokens_.push_back(tok);
+ sync_tokens_.push_back(tok);
- ++parse_depth_;
- auto result = body();
- --parse_depth_;
+ ++parse_depth_;
+ auto result = body();
+ --parse_depth_;
- if (sync_tokens_.back() != tok) {
- TINT_ICE(Reader, builder_.Diagnostics()) << "sync_tokens is out of sync";
- }
- sync_tokens_.pop_back();
+ if (sync_tokens_.back() != tok) {
+ TINT_ICE(Reader, builder_.Diagnostics()) << "sync_tokens is out of sync";
+ }
+ sync_tokens_.pop_back();
- if (result.errored) {
- sync_to(tok, /* consume: */ true);
- }
+ if (result.errored) {
+ sync_to(tok, /* consume: */ true);
+ }
- return result;
+ return result;
}
bool ParserImpl::sync_to(Token::Type tok, bool consume) {
- // Clear the synchronized state - gets set to true again on success.
- synchronized_ = false;
+ // Clear the synchronized state - gets set to true again on success.
+ synchronized_ = false;
- BlockCounters counters;
+ BlockCounters counters;
- for (size_t i = 0; i < kMaxResynchronizeLookahead; i++) {
- auto t = peek(i);
- if (counters.consume(t) > 0) {
- continue; // Nested block
- }
- if (!t.Is(tok) && !is_sync_token(t)) {
- continue; // Not a synchronization point
- }
+ for (size_t i = 0; i < kMaxResynchronizeLookahead; i++) {
+ auto t = peek(i);
+ if (counters.consume(t) > 0) {
+ continue; // Nested block
+ }
+ if (!t.Is(tok) && !is_sync_token(t)) {
+ continue; // Not a synchronization point
+ }
- // Synchronization point found.
+ // Synchronization point found.
- // Skip any tokens we don't understand, bringing us to just before the
- // resync point.
- while (i-- > 0) {
- next();
- }
+ // Skip any tokens we don't understand, bringing us to just before the
+ // resync point.
+ while (i-- > 0) {
+ next();
+ }
- // Is this synchronization token |tok|?
- if (t.Is(tok)) {
- if (consume) {
- next();
- }
- synchronized_ = true;
- return true;
+ // Is this synchronization token |tok|?
+ if (t.Is(tok)) {
+ if (consume) {
+ next();
+ }
+ synchronized_ = true;
+ return true;
+ }
+ break;
}
- break;
- }
- return false;
+ return false;
}
bool ParserImpl::is_sync_token(const Token& t) const {
- for (auto r : sync_tokens_) {
- if (t.Is(r)) {
- return true;
+ for (auto r : sync_tokens_) {
+ if (t.Is(r)) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
bool ParserImpl::handle_error(const Token& t) {
- // The token might itself be an error.
- if (t.IsError()) {
- synchronized_ = false;
- add_error(t.source(), t.to_str());
- return true;
- }
- return false;
+ // The token might itself be an error.
+ if (t.IsError()) {
+ synchronized_ = false;
+ add_error(t.source(), t.to_str());
+ return true;
+ }
+ return false;
}
template <typename F, typename T>
T ParserImpl::without_error(F&& body) {
- silence_errors_++;
- auto result = body();
- silence_errors_--;
- return result;
+ silence_errors_++;
+ auto result = body();
+ silence_errors_--;
+ return result;
}
ParserImpl::MultiTokenSource ParserImpl::make_source_range() {
- return MultiTokenSource(this);
+ return MultiTokenSource(this);
}
-ParserImpl::MultiTokenSource ParserImpl::make_source_range_from(
- const Source& start) {
- return MultiTokenSource(this, start);
+ParserImpl::MultiTokenSource ParserImpl::make_source_range_from(const Source& start) {
+ return MultiTokenSource(this, start);
}
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.h b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.h
index 9c65c97d54c..67d000437e8 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.h
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl.h
@@ -27,7 +27,7 @@
#include "src/tint/program_builder.h"
#include "src/tint/reader/wgsl/parser_impl_detail.h"
#include "src/tint/reader/wgsl/token.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
namespace tint::ast {
class BreakStatement;
@@ -46,832 +46,809 @@ class Lexer;
/// Struct holding information for a for loop
struct ForHeader {
- /// Constructor
- /// @param init the initializer statement
- /// @param cond the condition statement
- /// @param cont the continuing statement
- ForHeader(const ast::Statement* init,
- const ast::Expression* cond,
- const ast::Statement* cont);
-
- ~ForHeader();
-
- /// The for loop initializer
- const ast::Statement* initializer = nullptr;
- /// The for loop condition
- const ast::Expression* condition = nullptr;
- /// The for loop continuing statement
- const ast::Statement* continuing = nullptr;
+ /// Constructor
+ /// @param init the initializer statement
+ /// @param cond the condition statement
+ /// @param cont the continuing statement
+ ForHeader(const ast::Statement* init, const ast::Expression* cond, const ast::Statement* cont);
+
+ ~ForHeader();
+
+ /// The for loop initializer
+ const ast::Statement* initializer = nullptr;
+ /// The for loop condition
+ const ast::Expression* condition = nullptr;
+ /// The for loop continuing statement
+ const ast::Statement* continuing = nullptr;
};
/// ParserImpl for WGSL source data
class ParserImpl {
- /// Failure holds enumerator values used for the constructing an Expect and
- /// Match in an errored state.
- struct Failure {
- enum Errored { kErrored };
- enum NoMatch { kNoMatch };
- };
-
- public:
- /// Expect is the return type of the parser methods that are expected to
- /// return a parsed value of type T, unless there was an parse error.
- /// In the case of a parse error the called method will have called
- /// add_error() and #errored will be set to true.
- template <typename T>
- struct Expect {
- /// An alias to the templated type T.
- using type = T;
-
- /// Don't allow an Expect to take a nullptr.
- inline Expect(std::nullptr_t) = delete; // NOLINT
-
- /// Constructor for a successful parse.
- /// @param val the result value of the parse
- /// @param s the optional source of the value
- template <typename U>
- inline Expect(U&& val, const Source& s = {}) // NOLINT
- : value(std::forward<U>(val)), source(s) {}
-
- /// Constructor for parse error.
- inline Expect(Failure::Errored) : errored(true) {} // NOLINT
-
- /// Copy constructor
- inline Expect(const Expect&) = default;
- /// Move constructor
- inline Expect(Expect&&) = default;
- /// Assignment operator
- /// @return this Expect
- inline Expect& operator=(const Expect&) = default;
- /// Assignment move operator
- /// @return this Expect
- inline Expect& operator=(Expect&&) = default;
-
- /// @return a pointer to the returned value. If T is a pointer or
- /// std::unique_ptr, operator->() automatically dereferences so that the
- /// return type will always be a pointer to a non-pointer type. #errored
- /// must be false to call.
- inline typename detail::OperatorArrow<T>::type operator->() {
- TINT_ASSERT(Reader, !errored);
- return detail::OperatorArrow<T>::ptr(value);
+ /// Failure holds enumerator values used for the constructing an Expect and
+ /// Match in an errored state.
+ struct Failure {
+ enum Errored { kErrored };
+ enum NoMatch { kNoMatch };
+ };
+
+ public:
+ /// Expect is the return type of the parser methods that are expected to
+ /// return a parsed value of type T, unless there was an parse error.
+ /// In the case of a parse error the called method will have called
+ /// add_error() and #errored will be set to true.
+ template <typename T>
+ struct Expect {
+ /// An alias to the templated type T.
+ using type = T;
+
+ /// Don't allow an Expect to take a nullptr.
+ inline Expect(std::nullptr_t) = delete; // NOLINT
+
+ /// Constructor for a successful parse.
+ /// @param val the result value of the parse
+ /// @param s the optional source of the value
+ template <typename U>
+ inline Expect(U&& val, const Source& s = {}) // NOLINT
+ : value(std::forward<U>(val)), source(s) {}
+
+ /// Constructor for parse error.
+ inline Expect(Failure::Errored) : errored(true) {} // NOLINT
+
+ /// Copy constructor
+ inline Expect(const Expect&) = default;
+ /// Move constructor
+ inline Expect(Expect&&) = default;
+ /// Assignment operator
+ /// @return this Expect
+ inline Expect& operator=(const Expect&) = default;
+ /// Assignment move operator
+ /// @return this Expect
+ inline Expect& operator=(Expect&&) = default;
+
+ /// @return a pointer to the returned value. If T is a pointer or
+ /// std::unique_ptr, operator->() automatically dereferences so that the
+ /// return type will always be a pointer to a non-pointer type. #errored
+ /// must be false to call.
+ inline typename detail::OperatorArrow<T>::type operator->() {
+ TINT_ASSERT(Reader, !errored);
+ return detail::OperatorArrow<T>::ptr(value);
+ }
+
+ /// The expected value of a successful parse.
+ /// Zero-initialized when there was a parse error.
+ T value{};
+ /// Optional source of the value.
+ Source source;
+ /// True if there was a error parsing.
+ bool errored = false;
+ };
+
+ /// Maybe is the return type of the parser methods that attempts to match a
+ /// grammar and return a parsed value of type T, or may parse part of the
+ /// grammar and then hit a parse error.
+ /// In the case of a successful grammar match, the Maybe will have #matched
+ /// set to true.
+ /// In the case of a parse error the called method will have called
+ /// add_error() and the Maybe will have #errored set to true.
+ template <typename T>
+ struct Maybe {
+ inline Maybe(std::nullptr_t) = delete; // NOLINT
+
+ /// Constructor for a successful parse.
+ /// @param val the result value of the parse
+ /// @param s the optional source of the value
+ template <typename U>
+ inline Maybe(U&& val, const Source& s = {}) // NOLINT
+ : value(std::forward<U>(val)), source(s), matched(true) {}
+
+ /// Constructor for parse error state.
+ inline Maybe(Failure::Errored) : errored(true) {} // NOLINT
+
+ /// Constructor for the no-match state.
+ inline Maybe(Failure::NoMatch) {} // NOLINT
+
+ /// Constructor from an Expect.
+ /// @param e the Expect to copy this Maybe from
+ template <typename U>
+ inline Maybe(const Expect<U>& e) // NOLINT
+ : value(e.value), source(e.value), errored(e.errored), matched(!e.errored) {}
+
+ /// Move from an Expect.
+ /// @param e the Expect to move this Maybe from
+ template <typename U>
+ inline Maybe(Expect<U>&& e) // NOLINT
+ : value(std::move(e.value)),
+ source(std::move(e.source)),
+ errored(e.errored),
+ matched(!e.errored) {}
+
+ /// Copy constructor
+ inline Maybe(const Maybe&) = default;
+ /// Move constructor
+ inline Maybe(Maybe&&) = default;
+ /// Assignment operator
+ /// @return this Maybe
+ inline Maybe& operator=(const Maybe&) = default;
+ /// Assignment move operator
+ /// @return this Maybe
+ inline Maybe& operator=(Maybe&&) = default;
+
+ /// @return a pointer to the returned value. If T is a pointer or
+ /// std::unique_ptr, operator->() automatically dereferences so that the
+ /// return type will always be a pointer to a non-pointer type. #errored
+ /// must be false to call.
+ inline typename detail::OperatorArrow<T>::type operator->() {
+ TINT_ASSERT(Reader, !errored);
+ return detail::OperatorArrow<T>::ptr(value);
+ }
+
+ /// The value of a successful parse.
+ /// Zero-initialized when there was a parse error.
+ T value{};
+ /// Optional source of the value.
+ Source source;
+ /// True if there was a error parsing.
+ bool errored = false;
+ /// True if there was a error parsing.
+ bool matched = false;
+ };
+
+ /// TypedIdentifier holds a parsed identifier and type. Returned by
+ /// variable_ident_decl().
+ struct TypedIdentifier {
+ /// Constructor
+ TypedIdentifier();
+ /// Copy constructor
+ /// @param other the FunctionHeader to copy
+ TypedIdentifier(const TypedIdentifier& other);
+ /// Constructor
+ /// @param type_in parsed type
+ /// @param name_in parsed identifier
+ /// @param source_in source to the identifier
+ TypedIdentifier(const ast::Type* type_in, std::string name_in, Source source_in);
+ /// Destructor
+ ~TypedIdentifier();
+
+ /// Parsed type. May be nullptr for inferred types.
+ const ast::Type* type = nullptr;
+ /// Parsed identifier.
+ std::string name;
+ /// Source to the identifier.
+ Source source;
+ };
+
+ /// FunctionHeader contains the parsed information for a function header.
+ struct FunctionHeader {
+ /// Constructor
+ FunctionHeader();
+ /// Copy constructor
+ /// @param other the FunctionHeader to copy
+ FunctionHeader(const FunctionHeader& other);
+ /// Constructor
+ /// @param src parsed header source
+ /// @param n function name
+ /// @param p function parameters
+ /// @param ret_ty function return type
+ /// @param ret_attrs return type attributes
+ FunctionHeader(Source src,
+ std::string n,
+ ast::VariableList p,
+ const ast::Type* ret_ty,
+ ast::AttributeList ret_attrs);
+ /// Destructor
+ ~FunctionHeader();
+ /// Assignment operator
+ /// @param other the FunctionHeader to copy
+ /// @returns this FunctionHeader
+ FunctionHeader& operator=(const FunctionHeader& other);
+
+ /// Parsed header source
+ Source source;
+ /// Function name
+ std::string name;
+ /// Function parameters
+ ast::VariableList params;
+ /// Function return type
+ const ast::Type* return_type = nullptr;
+ /// Function return type attributes
+ ast::AttributeList return_type_attributes;
+ };
+
+ /// VarDeclInfo contains the parsed information for variable declaration.
+ struct VarDeclInfo {
+ /// Constructor
+ VarDeclInfo();
+ /// Copy constructor
+ /// @param other the VarDeclInfo to copy
+ VarDeclInfo(const VarDeclInfo& other);
+ /// Constructor
+ /// @param source_in variable declaration source
+ /// @param name_in variable name
+ /// @param storage_class_in variable storage class
+ /// @param access_in variable access control
+ /// @param type_in variable type
+ VarDeclInfo(Source source_in,
+ std::string name_in,
+ ast::StorageClass storage_class_in,
+ ast::Access access_in,
+ const ast::Type* type_in);
+ /// Destructor
+ ~VarDeclInfo();
+
+ /// Variable declaration source
+ Source source;
+ /// Variable name
+ std::string name;
+ /// Variable storage class
+ ast::StorageClass storage_class = ast::StorageClass::kNone;
+ /// Variable access control
+ ast::Access access = ast::Access::kUndefined;
+ /// Variable type
+ const ast::Type* type = nullptr;
+ };
+
+ /// VariableQualifier contains the parsed information for a variable qualifier
+ struct VariableQualifier {
+ /// The variable's storage class
+ ast::StorageClass storage_class = ast::StorageClass::kNone;
+ /// The variable's access control
+ ast::Access access = ast::Access::kUndefined;
+ };
+
+ /// Creates a new parser using the given file
+ /// @param file the input source file to parse
+ explicit ParserImpl(Source::File const* file);
+ ~ParserImpl();
+
+ /// Run the parser
+ /// @returns true if the parse was successful, false otherwise.
+ bool Parse();
+
+ /// set_max_diagnostics sets the maximum number of reported errors before
+ /// aborting parsing.
+ /// @param limit the new maximum number of errors
+ void set_max_errors(size_t limit) { max_errors_ = limit; }
+
+ /// @return the number of maximum number of reported errors before aborting
+ /// parsing.
+ size_t get_max_errors() const { return max_errors_; }
+
+ /// @returns true if an error was encountered.
+ bool has_error() const { return builder_.Diagnostics().contains_errors(); }
+
+ /// @returns the parser error string
+ std::string error() const {
+ diag::Formatter formatter{{false, false, false, false}};
+ return formatter.format(builder_.Diagnostics());
}
- /// The expected value of a successful parse.
- /// Zero-initialized when there was a parse error.
- T value{};
- /// Optional source of the value.
- Source source;
- /// True if there was a error parsing.
- bool errored = false;
- };
-
- /// Maybe is the return type of the parser methods that attempts to match a
- /// grammar and return a parsed value of type T, or may parse part of the
- /// grammar and then hit a parse error.
- /// In the case of a successful grammar match, the Maybe will have #matched
- /// set to true.
- /// In the case of a parse error the called method will have called
- /// add_error() and the Maybe will have #errored set to true.
- template <typename T>
- struct Maybe {
- inline Maybe(std::nullptr_t) = delete; // NOLINT
-
- /// Constructor for a successful parse.
- /// @param val the result value of the parse
- /// @param s the optional source of the value
- template <typename U>
- inline Maybe(U&& val, const Source& s = {}) // NOLINT
- : value(std::forward<U>(val)), source(s), matched(true) {}
-
- /// Constructor for parse error state.
- inline Maybe(Failure::Errored) : errored(true) {} // NOLINT
-
- /// Constructor for the no-match state.
- inline Maybe(Failure::NoMatch) {} // NOLINT
-
- /// Constructor from an Expect.
- /// @param e the Expect to copy this Maybe from
- template <typename U>
- inline Maybe(const Expect<U>& e) // NOLINT
- : value(e.value),
- source(e.value),
- errored(e.errored),
- matched(!e.errored) {}
-
- /// Move from an Expect.
- /// @param e the Expect to move this Maybe from
- template <typename U>
- inline Maybe(Expect<U>&& e) // NOLINT
- : value(std::move(e.value)),
- source(std::move(e.source)),
- errored(e.errored),
- matched(!e.errored) {}
-
- /// Copy constructor
- inline Maybe(const Maybe&) = default;
- /// Move constructor
- inline Maybe(Maybe&&) = default;
- /// Assignment operator
- /// @return this Maybe
- inline Maybe& operator=(const Maybe&) = default;
- /// Assignment move operator
- /// @return this Maybe
- inline Maybe& operator=(Maybe&&) = default;
-
- /// @return a pointer to the returned value. If T is a pointer or
- /// std::unique_ptr, operator->() automatically dereferences so that the
- /// return type will always be a pointer to a non-pointer type. #errored
- /// must be false to call.
- inline typename detail::OperatorArrow<T>::type operator->() {
- TINT_ASSERT(Reader, !errored);
- return detail::OperatorArrow<T>::ptr(value);
+ /// @returns the Program. The program builder in the parser will be reset
+ /// after this.
+ Program program() { return Program(std::move(builder_)); }
+
+ /// @returns the program builder.
+ ProgramBuilder& builder() { return builder_; }
+
+ /// @returns the next token
+ Token next();
+ /// Peeks ahead and returns the token at `idx` ahead of the current position
+ /// @param idx the index of the token to return
+ /// @returns the token `idx` positions ahead without advancing
+ Token peek(size_t idx = 0);
+ /// Peeks ahead and returns true if the token at `idx` ahead of the current
+ /// position is |tok|
+ /// @param idx the index of the token to return
+ /// @param tok the token to look for
+ /// @returns true if the token `idx` positions ahead is |tok|
+ bool peek_is(Token::Type tok, size_t idx = 0);
+ /// @returns the last token that was returned by `next()`
+ Token last_token() const;
+ /// Appends an error at `t` with the message `msg`
+ /// @param t the token to associate the error with
+ /// @param msg the error message
+ /// @return `Failure::Errored::kError` so that you can combine an add_error()
+ /// call and return on the same line.
+ Failure::Errored add_error(const Token& t, const std::string& msg);
+ /// Appends an error raised when parsing `use` at `t` with the message
+ /// `msg`
+ /// @param source the source to associate the error with
+ /// @param msg the error message
+ /// @param use a description of what was being parsed when the error was
+ /// raised.
+ /// @return `Failure::Errored::kError` so that you can combine an add_error()
+ /// call and return on the same line.
+ Failure::Errored add_error(const Source& source, std::string_view msg, std::string_view use);
+ /// Appends an error at `source` with the message `msg`
+ /// @param source the source to associate the error with
+ /// @param msg the error message
+ /// @return `Failure::Errored::kError` so that you can combine an add_error()
+ /// call and return on the same line.
+ Failure::Errored add_error(const Source& source, const std::string& msg);
+ /// Appends a deprecated-language-feature warning at `source` with the message
+ /// `msg`
+ /// @param source the source to associate the error with
+ /// @param msg the warning message
+ void deprecated(const Source& source, const std::string& msg);
+ /// Parses the `translation_unit` grammar element
+ void translation_unit();
+ /// Parses the `enable_directive` grammar element, erroring on parse failure.
+ /// @return true on parse success, otherwise an error or no-match.
+ Maybe<bool> enable_directive();
+ /// Parses the `global_decl` grammar element, erroring on parse failure.
+ /// @return true on parse success, otherwise an error or no-match.
+ Maybe<bool> global_decl();
+ /// Parses a `global_variable_decl` grammar element with the initial
+ /// `variable_attribute_list*` provided as `attrs`
+ /// @returns the variable parsed or nullptr
+ /// @param attrs the list of attributes for the variable declaration.
+ Maybe<const ast::Variable*> global_variable_decl(ast::AttributeList& attrs);
+ /// Parses a `global_constant_decl` grammar element with the initial
+ /// `variable_attribute_list*` provided as `attrs`
+ /// @returns the const object or nullptr
+ /// @param attrs the list of attributes for the constant declaration.
+ Maybe<const ast::Variable*> global_constant_decl(ast::AttributeList& attrs);
+ /// Parses a `variable_decl` grammar element
+ /// @param allow_inferred if true, do not fail if variable decl does not
+ /// specify type
+ /// @returns the parsed variable declaration info
+ Maybe<VarDeclInfo> variable_decl(bool allow_inferred = false);
+ /// Parses a `variable_ident_decl` grammar element, erroring on parse
+ /// failure.
+ /// @param use a description of what was being parsed if an error was raised.
+ /// @param allow_inferred if true, do not fail if variable decl does not
+ /// specify type
+ /// @returns the identifier and type parsed or empty otherwise
+ Expect<TypedIdentifier> expect_variable_ident_decl(std::string_view use,
+ bool allow_inferred = false);
+ /// Parses a `variable_qualifier` grammar element
+ /// @returns the variable qualifier information
+ Maybe<VariableQualifier> variable_qualifier();
+ /// Parses a `type_alias` grammar element
+ /// @returns the type alias or nullptr on error
+ Maybe<const ast::Alias*> type_alias();
+ /// Parses a `type_decl` grammar element
+ /// @returns the parsed Type or nullptr if none matched.
+ Maybe<const ast::Type*> type_decl();
+ /// Parses a `storage_class` grammar element, erroring on parse failure.
+ /// @param use a description of what was being parsed if an error was raised.
+ /// @returns the storage class or StorageClass::kNone if none matched
+ Expect<ast::StorageClass> expect_storage_class(std::string_view use);
+ /// Parses a `struct_decl` grammar element.
+ /// @returns the struct type or nullptr on error
+ Maybe<const ast::Struct*> struct_decl();
+ /// Parses a `struct_body_decl` grammar element, erroring on parse failure.
+ /// @returns the struct members
+ Expect<ast::StructMemberList> expect_struct_body_decl();
+ /// Parses a `struct_member` grammar element, erroring on parse failure.
+ /// @returns the struct member or nullptr
+ Expect<ast::StructMember*> expect_struct_member();
+ /// Parses a `function_decl` grammar element with the initial
+ /// `function_attribute_decl*` provided as `attrs`.
+ /// @param attrs the list of attributes for the function declaration.
+ /// @returns the parsed function, nullptr otherwise
+ Maybe<const ast::Function*> function_decl(ast::AttributeList& attrs);
+ /// Parses a `texture_samplers` grammar element
+ /// @returns the parsed Type or nullptr if none matched.
+ Maybe<const ast::Type*> texture_samplers();
+ /// Parses a `sampler` grammar element
+ /// @returns the parsed Type or nullptr if none matched.
+ Maybe<const ast::Type*> sampler();
+ /// Parses a `multisampled_texture` grammar element
+ /// @returns returns the multisample texture dimension or kNone if none
+ /// matched.
+ Maybe<const ast::TextureDimension> multisampled_texture();
+ /// Parses a `sampled_texture` grammar element
+ /// @returns returns the sample texture dimension or kNone if none matched.
+ Maybe<const ast::TextureDimension> sampled_texture();
+ /// Parses a `storage_texture` grammar element
+ /// @returns returns the storage texture dimension.
+ /// Returns kNone if none matched.
+ Maybe<const ast::TextureDimension> storage_texture();
+ /// Parses a `depth_texture` grammar element
+ /// @returns the parsed Type or nullptr if none matched.
+ Maybe<const ast::Type*> depth_texture();
+ /// Parses a 'texture_external_type' grammar element
+ /// @returns the parsed Type or nullptr if none matched
+ Maybe<const ast::Type*> external_texture();
+ /// Parses a `texel_format` grammar element
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns returns the texel format or kNone if none matched.
+ Expect<ast::TexelFormat> expect_texel_format(std::string_view use);
+ /// Parses a `function_header` grammar element
+ /// @returns the parsed function header
+ Maybe<FunctionHeader> function_header();
+ /// Parses a `param_list` grammar element, erroring on parse failure.
+ /// @returns the parsed variables
+ Expect<ast::VariableList> expect_param_list();
+ /// Parses a `param` grammar element, erroring on parse failure.
+ /// @returns the parsed variable
+ Expect<ast::Variable*> expect_param();
+ /// Parses a `pipeline_stage` grammar element, erroring if the next token does
+ /// not match a stage name.
+ /// @returns the pipeline stage.
+ Expect<ast::PipelineStage> expect_pipeline_stage();
+ /// Parses an access control identifier, erroring if the next token does not
+ /// match a valid access control.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the parsed access control.
+ Expect<ast::Access> expect_access(std::string_view use);
+ /// Parses a builtin identifier, erroring if the next token does not match a
+ /// valid builtin name.
+ /// @returns the parsed builtin.
+ Expect<ast::Builtin> expect_builtin();
+ /// Parses a `body_stmt` grammar element, erroring on parse failure.
+ /// @returns the parsed statements
+ Expect<ast::BlockStatement*> expect_body_stmt();
+ /// Parses a `paren_rhs_stmt` grammar element, erroring on parse failure.
+ /// @returns the parsed element or nullptr
+ Expect<const ast::Expression*> expect_paren_rhs_stmt();
+ /// Parses a `statements` grammar element
+ /// @returns the statements parsed
+ Expect<ast::StatementList> expect_statements();
+ /// Parses a `statement` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::Statement*> statement();
+ /// Parses a `break_stmt` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::BreakStatement*> break_stmt();
+ /// Parses a `return_stmt` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::ReturnStatement*> return_stmt();
+ /// Parses a `continue_stmt` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::ContinueStatement*> continue_stmt();
+ /// Parses a `variable_stmt` grammar element
+ /// @returns the parsed variable or nullptr
+ Maybe<const ast::VariableDeclStatement*> variable_stmt();
+ /// Parses a `if_stmt` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::IfStatement*> if_stmt();
+ /// Parses a `switch_stmt` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::SwitchStatement*> switch_stmt();
+ /// Parses a `switch_body` grammar element
+ /// @returns the parsed statement or nullptr
+ Maybe<const ast::CaseStatement*> switch_body();
+ /// Parses a `case_selectors` grammar element
+ /// @returns the list of literals
+ Expect<ast::CaseSelectorList> expect_case_selectors();
+ /// Parses a `case_body` grammar element
+ /// @returns the parsed statements
+ Maybe<const ast::BlockStatement*> case_body();
+ /// Parses a `func_call_stmt` grammar element
+ /// @returns the parsed function call or nullptr
+ Maybe<const ast::CallStatement*> func_call_stmt();
+ /// Parses a `loop_stmt` grammar element
+ /// @returns the parsed loop or nullptr
+ Maybe<const ast::LoopStatement*> loop_stmt();
+ /// Parses a `for_header` grammar element, erroring on parse failure.
+ /// @returns the parsed for header or nullptr
+ Expect<std::unique_ptr<ForHeader>> expect_for_header();
+ /// Parses a `for_stmt` grammar element
+ /// @returns the parsed for loop or nullptr
+ Maybe<const ast::ForLoopStatement*> for_stmt();
+ /// Parses a `continuing_stmt` grammar element
+ /// @returns the parsed statements
+ Maybe<const ast::BlockStatement*> continuing_stmt();
+ /// Parses a `const_literal` grammar element
+ /// @returns the const literal parsed or nullptr if none found
+ Maybe<const ast::LiteralExpression*> const_literal();
+ /// Parses a `const_expr` grammar element, erroring on parse failure.
+ /// @returns the parsed constructor expression or nullptr on error
+ Expect<const ast::Expression*> expect_const_expr();
+ /// Parses a `primary_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> primary_expression();
+ /// Parses a `argument_expression_list` grammar element, erroring on parse
+ /// failure.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the list of arguments
+ Expect<ast::ExpressionList> expect_argument_expression_list(std::string_view use);
+ /// Parses the recursive portion of the postfix_expression
+ /// @param prefix the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> postfix_expression(const ast::Expression* prefix);
+ /// Parses a `singular_expression` grammar elment
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> singular_expression();
+ /// Parses a `unary_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> unary_expression();
+ /// Parses the recursive part of the `multiplicative_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_multiplicative_expr(const ast::Expression* lhs);
+ /// Parses the `multiplicative_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> multiplicative_expression();
+ /// Parses the recursive part of the `additive_expression`, erroring on parse
+ /// failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_additive_expr(const ast::Expression* lhs);
+ /// Parses the `additive_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> additive_expression();
+ /// Parses the recursive part of the `shift_expression`, erroring on parse
+ /// failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_shift_expr(const ast::Expression* lhs);
+ /// Parses the `shift_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> shift_expression();
+ /// Parses the recursive part of the `relational_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_relational_expr(const ast::Expression* lhs);
+ /// Parses the `relational_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> relational_expression();
+ /// Parses the recursive part of the `equality_expression`, erroring on parse
+ /// failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_equality_expr(const ast::Expression* lhs);
+ /// Parses the `equality_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> equality_expression();
+ /// Parses the recursive part of the `and_expression`, erroring on parse
+ /// failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_and_expr(const ast::Expression* lhs);
+ /// Parses the `and_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> and_expression();
+ /// Parses the recursive part of the `exclusive_or_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_exclusive_or_expr(const ast::Expression* lhs);
+ /// Parses the `exclusive_or_expression` grammar elememnt
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> exclusive_or_expression();
+ /// Parses the recursive part of the `inclusive_or_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_inclusive_or_expr(const ast::Expression* lhs);
+ /// Parses the `inclusive_or_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> inclusive_or_expression();
+ /// Parses the recursive part of the `logical_and_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_logical_and_expr(const ast::Expression* lhs);
+ /// Parses a `logical_and_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> logical_and_expression();
+ /// Parses the recursive part of the `logical_or_expression`, erroring on
+ /// parse failure.
+ /// @param lhs the left side of the expression
+ /// @returns the parsed expression or nullptr
+ Expect<const ast::Expression*> expect_logical_or_expr(const ast::Expression* lhs);
+ /// Parses a `logical_or_expression` grammar element
+ /// @returns the parsed expression or nullptr
+ Maybe<const ast::Expression*> logical_or_expression();
+ /// Parses a `compound_assignment_operator` grammar element
+ /// @returns the parsed compound assignment operator
+ Maybe<ast::BinaryOp> compound_assignment_operator();
+ /// Parses a `assignment_stmt` grammar element
+ /// @returns the parsed assignment or nullptr
+ Maybe<const ast::Statement*> assignment_stmt();
+ /// Parses one or more attribute lists.
+ /// @return the parsed attribute list, or an empty list on error.
+ Maybe<ast::AttributeList> attribute_list();
+ /// Parses a single attribute of the following types:
+ /// * `struct_attribute`
+ /// * `struct_member_attribute`
+ /// * `array_attribute`
+ /// * `variable_attribute`
+ /// * `global_const_attribute`
+ /// * `function_attribute`
+ /// @return the parsed attribute, or nullptr.
+ Maybe<const ast::Attribute*> attribute();
+ /// Parses a single attribute, reporting an error if the next token does not
+ /// represent a attribute.
+ /// @see #attribute for the full list of attributes this method parses.
+ /// @return the parsed attribute, or nullptr on error.
+ Expect<const ast::Attribute*> expect_attribute();
+
+ private:
+ /// ReturnType resolves to the return type for the function or lambda F.
+ template <typename F>
+ using ReturnType = typename std::invoke_result<F>::type;
+
+ /// ResultType resolves to `T` for a `RESULT` of type Expect<T>.
+ template <typename RESULT>
+ using ResultType = typename RESULT::type;
+
+ /// @returns true and consumes the next token if it equals `tok`
+ /// @param source if not nullptr, the next token's source is written to this
+ /// pointer, regardless of success or error
+ bool match(Token::Type tok, Source* source = nullptr);
+ /// Errors if the next token is not equal to `tok`
+ /// Consumes the next token on match.
+ /// expect() also updates #synchronized_, setting it to `true` if the next
+ /// token is equal to `tok`, otherwise `false`.
+ /// @param use a description of what was being parsed if an error was raised.
+ /// @param tok the token to test against
+ /// @returns true if the next token equals `tok`
+ bool expect(std::string_view use, Token::Type tok);
+ /// Parses a signed integer from the next token in the stream, erroring if the
+ /// next token is not a signed integer.
+ /// Consumes the next token on match.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the parsed integer.
+ Expect<int32_t> expect_sint(std::string_view use);
+ /// Parses a signed integer from the next token in the stream, erroring if
+ /// the next token is not a signed integer or is negative.
+ /// Consumes the next token if it is a signed integer (not necessarily
+ /// negative).
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the parsed integer.
+ Expect<uint32_t> expect_positive_sint(std::string_view use);
+ /// Parses a non-zero signed integer from the next token in the stream,
+ /// erroring if the next token is not a signed integer or is less than 1.
+ /// Consumes the next token if it is a signed integer (not necessarily
+ /// >= 1).
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the parsed integer.
+ Expect<uint32_t> expect_nonzero_positive_sint(std::string_view use);
+ /// Errors if the next token is not an identifier.
+ /// Consumes the next token on match.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @returns the parsed identifier.
+ Expect<std::string> expect_ident(std::string_view use);
+ /// Parses a lexical block starting with the token `start` and ending with
+ /// the token `end`. `body` is called to parse the lexical block body
+ /// between the `start` and `end` tokens. If the `start` or `end` tokens
+ /// are not matched then an error is generated and a zero-initialized `T` is
+ /// returned. If `body` raises an error while parsing then a zero-initialized
+ /// `T` is returned.
+ /// @param start the token that begins the lexical block
+ /// @param end the token that ends the lexical block
+ /// @param use a description of what was being parsed if an error was raised
+ /// @param body a function or lambda that is called to parse the lexical block
+ /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
+ /// @return the value returned by `body` if no errors are raised, otherwise
+ /// an Expect with error state.
+ template <typename F, typename T = ReturnType<F>>
+ T expect_block(Token::Type start, Token::Type end, std::string_view use, F&& body);
+ /// A convenience function that calls expect_block() passing
+ /// `Token::Type::kParenLeft` and `Token::Type::kParenRight` for the `start`
+ /// and `end` arguments, respectively.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @param body a function or lambda that is called to parse the lexical block
+ /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
+ /// @return the value returned by `body` if no errors are raised, otherwise
+ /// an Expect with error state.
+ template <typename F, typename T = ReturnType<F>>
+ T expect_paren_block(std::string_view use, F&& body);
+ /// A convenience function that calls `expect_block` passing
+ /// `Token::Type::kBraceLeft` and `Token::Type::kBraceRight` for the `start`
+ /// and `end` arguments, respectively.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @param body a function or lambda that is called to parse the lexical block
+ /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
+ /// @return the value returned by `body` if no errors are raised, otherwise
+ /// an Expect with error state.
+ template <typename F, typename T = ReturnType<F>>
+ T expect_brace_block(std::string_view use, F&& body);
+ /// A convenience function that calls `expect_block` passing
+ /// `Token::Type::kLessThan` and `Token::Type::kGreaterThan` for the `start`
+ /// and `end` arguments, respectively.
+ /// @param use a description of what was being parsed if an error was raised
+ /// @param body a function or lambda that is called to parse the lexical block
+ /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
+ /// @return the value returned by `body` if no errors are raised, otherwise
+ /// an Expect with error state.
+ template <typename F, typename T = ReturnType<F>>
+ T expect_lt_gt_block(std::string_view use, F&& body);
+
+ /// sync() calls the function `func`, and attempts to resynchronize the
+ /// parser to the next found resynchronization token if `func` fails. If the
+ /// next found resynchronization token is `tok`, then sync will also consume
+ /// `tok`.
+ ///
+ /// sync() will transiently add `tok` to the parser's stack of
+ /// synchronization tokens for the duration of the call to `func`. Once @p
+ /// func returns,
+ /// `tok` is removed from the stack of resynchronization tokens. sync calls
+ /// may be nested, and so the number of resynchronization tokens is equal to
+ /// the number of sync() calls in the current stack frame.
+ ///
+ /// sync() updates #synchronized_, setting it to `true` if the next
+ /// resynchronization token found was `tok`, otherwise `false`.
+ ///
+ /// @param tok the token to attempt to synchronize the parser to if `func`
+ /// fails.
+ /// @param func a function or lambda with the signature: `Expect<Result>()` or
+ /// `Maybe<Result>()`.
+ /// @return the value returned by `func`
+ template <typename F, typename T = ReturnType<F>>
+ T sync(Token::Type tok, F&& func);
+ /// sync_to() attempts to resynchronize the parser to the next found
+ /// resynchronization token or `tok` (whichever comes first).
+ ///
+ /// Synchronization tokens are transiently defined by calls to sync().
+ ///
+ /// sync_to() updates #synchronized_, setting it to `true` if a
+ /// resynchronization token was found and it was `tok`, otherwise `false`.
+ ///
+ /// @param tok the token to attempt to synchronize the parser to.
+ /// @param consume if true and the next found resynchronization token is
+ /// `tok` then sync_to() will also consume `tok`.
+ /// @return the state of #synchronized_.
+ /// @see sync().
+ bool sync_to(Token::Type tok, bool consume);
+ /// @return true if `t` is in the stack of resynchronization tokens.
+ /// @see sync().
+ bool is_sync_token(const Token& t) const;
+
+ /// If `t` is an error token, then `synchronized_` is set to false and the
+ /// token's error is appended to the builder's diagnostics. If `t` is not an
+ /// error token, then this function does nothing and false is returned.
+ /// @returns true if `t` is an error, otherwise false.
+ bool handle_error(const Token& t);
+
+ /// @returns true if #synchronized_ is true and the number of reported errors
+ /// is less than #max_errors_.
+ bool continue_parsing() {
+ return synchronized_ && builder_.Diagnostics().error_count() < max_errors_;
}
- /// The value of a successful parse.
- /// Zero-initialized when there was a parse error.
- T value{};
- /// Optional source of the value.
- Source source;
- /// True if there was a error parsing.
- bool errored = false;
- /// True if there was a error parsing.
- bool matched = false;
- };
-
- /// TypedIdentifier holds a parsed identifier and type. Returned by
- /// variable_ident_decl().
- struct TypedIdentifier {
- /// Constructor
- TypedIdentifier();
- /// Copy constructor
- /// @param other the FunctionHeader to copy
- TypedIdentifier(const TypedIdentifier& other);
- /// Constructor
- /// @param type_in parsed type
- /// @param name_in parsed identifier
- /// @param source_in source to the identifier
- TypedIdentifier(const ast::Type* type_in,
- std::string name_in,
- Source source_in);
- /// Destructor
- ~TypedIdentifier();
-
- /// Parsed type. May be nullptr for inferred types.
- const ast::Type* type = nullptr;
- /// Parsed identifier.
- std::string name;
- /// Source to the identifier.
- Source source;
- };
-
- /// FunctionHeader contains the parsed information for a function header.
- struct FunctionHeader {
- /// Constructor
- FunctionHeader();
- /// Copy constructor
- /// @param other the FunctionHeader to copy
- FunctionHeader(const FunctionHeader& other);
- /// Constructor
- /// @param src parsed header source
- /// @param n function name
- /// @param p function parameters
- /// @param ret_ty function return type
- /// @param ret_attrs return type attributes
- FunctionHeader(Source src,
- std::string n,
- ast::VariableList p,
- const ast::Type* ret_ty,
- ast::AttributeList ret_attrs);
- /// Destructor
- ~FunctionHeader();
- /// Assignment operator
- /// @param other the FunctionHeader to copy
- /// @returns this FunctionHeader
- FunctionHeader& operator=(const FunctionHeader& other);
-
- /// Parsed header source
- Source source;
- /// Function name
- std::string name;
- /// Function parameters
- ast::VariableList params;
- /// Function return type
- const ast::Type* return_type = nullptr;
- /// Function return type attributes
- ast::AttributeList return_type_attributes;
- };
-
- /// VarDeclInfo contains the parsed information for variable declaration.
- struct VarDeclInfo {
- /// Constructor
- VarDeclInfo();
- /// Copy constructor
- /// @param other the VarDeclInfo to copy
- VarDeclInfo(const VarDeclInfo& other);
- /// Constructor
- /// @param source_in variable declaration source
- /// @param name_in variable name
- /// @param storage_class_in variable storage class
- /// @param access_in variable access control
- /// @param type_in variable type
- VarDeclInfo(Source source_in,
- std::string name_in,
- ast::StorageClass storage_class_in,
- ast::Access access_in,
- const ast::Type* type_in);
- /// Destructor
- ~VarDeclInfo();
-
- /// Variable declaration source
- Source source;
- /// Variable name
- std::string name;
- /// Variable storage class
- ast::StorageClass storage_class = ast::StorageClass::kNone;
- /// Variable access control
- ast::Access access = ast::Access::kUndefined;
- /// Variable type
- const ast::Type* type = nullptr;
- };
-
- /// VariableQualifier contains the parsed information for a variable qualifier
- struct VariableQualifier {
- /// The variable's storage class
- ast::StorageClass storage_class = ast::StorageClass::kNone;
- /// The variable's access control
- ast::Access access = ast::Access::kUndefined;
- };
-
- /// Creates a new parser using the given file
- /// @param file the input source file to parse
- explicit ParserImpl(Source::File const* file);
- ~ParserImpl();
-
- /// Run the parser
- /// @returns true if the parse was successful, false otherwise.
- bool Parse();
-
- /// set_max_diagnostics sets the maximum number of reported errors before
- /// aborting parsing.
- /// @param limit the new maximum number of errors
- void set_max_errors(size_t limit) { max_errors_ = limit; }
-
- /// @return the number of maximum number of reported errors before aborting
- /// parsing.
- size_t get_max_errors() const { return max_errors_; }
-
- /// @returns true if an error was encountered.
- bool has_error() const { return builder_.Diagnostics().contains_errors(); }
-
- /// @returns the parser error string
- std::string error() const {
- diag::Formatter formatter{{false, false, false, false}};
- return formatter.format(builder_.Diagnostics());
- }
-
- /// @returns the Program. The program builder in the parser will be reset
- /// after this.
- Program program() { return Program(std::move(builder_)); }
-
- /// @returns the program builder.
- ProgramBuilder& builder() { return builder_; }
-
- /// @returns the next token
- Token next();
- /// Peeks ahead and returns the token at `idx` ahead of the current position
- /// @param idx the index of the token to return
- /// @returns the token `idx` positions ahead without advancing
- Token peek(size_t idx = 0);
- /// Peeks ahead and returns true if the token at `idx` ahead of the current
- /// position is |tok|
- /// @param idx the index of the token to return
- /// @param tok the token to look for
- /// @returns true if the token `idx` positions ahead is |tok|
- bool peek_is(Token::Type tok, size_t idx = 0);
- /// @returns the last token that was returned by `next()`
- Token last_token() const;
- /// Appends an error at `t` with the message `msg`
- /// @param t the token to associate the error with
- /// @param msg the error message
- /// @return `Failure::Errored::kError` so that you can combine an add_error()
- /// call and return on the same line.
- Failure::Errored add_error(const Token& t, const std::string& msg);
- /// Appends an error raised when parsing `use` at `t` with the message
- /// `msg`
- /// @param source the source to associate the error with
- /// @param msg the error message
- /// @param use a description of what was being parsed when the error was
- /// raised.
- /// @return `Failure::Errored::kError` so that you can combine an add_error()
- /// call and return on the same line.
- Failure::Errored add_error(const Source& source,
- std::string_view msg,
- std::string_view use);
- /// Appends an error at `source` with the message `msg`
- /// @param source the source to associate the error with
- /// @param msg the error message
- /// @return `Failure::Errored::kError` so that you can combine an add_error()
- /// call and return on the same line.
- Failure::Errored add_error(const Source& source, const std::string& msg);
- /// Appends a deprecated-language-feature warning at `source` with the message
- /// `msg`
- /// @param source the source to associate the error with
- /// @param msg the warning message
- void deprecated(const Source& source, const std::string& msg);
- /// Parses the `translation_unit` grammar element
- void translation_unit();
- /// Parses the `global_decl` grammar element, erroring on parse failure.
- /// @return true on parse success, otherwise an error.
- Expect<bool> expect_global_decl();
- /// Parses a `global_variable_decl` grammar element with the initial
- /// `variable_attribute_list*` provided as `attrs`
- /// @returns the variable parsed or nullptr
- /// @param attrs the list of attributes for the variable declaration.
- Maybe<const ast::Variable*> global_variable_decl(ast::AttributeList& attrs);
- /// Parses a `global_constant_decl` grammar element with the initial
- /// `variable_attribute_list*` provided as `attrs`
- /// @returns the const object or nullptr
- /// @param attrs the list of attributes for the constant declaration.
- Maybe<const ast::Variable*> global_constant_decl(ast::AttributeList& attrs);
- /// Parses a `variable_decl` grammar element
- /// @param allow_inferred if true, do not fail if variable decl does not
- /// specify type
- /// @returns the parsed variable declaration info
- Maybe<VarDeclInfo> variable_decl(bool allow_inferred = false);
- /// Parses a `variable_ident_decl` grammar element, erroring on parse
- /// failure.
- /// @param use a description of what was being parsed if an error was raised.
- /// @param allow_inferred if true, do not fail if variable decl does not
- /// specify type
- /// @returns the identifier and type parsed or empty otherwise
- Expect<TypedIdentifier> expect_variable_ident_decl(
- std::string_view use,
- bool allow_inferred = false);
- /// Parses a `variable_qualifier` grammar element
- /// @returns the variable qualifier information
- Maybe<VariableQualifier> variable_qualifier();
- /// Parses a `type_alias` grammar element
- /// @returns the type alias or nullptr on error
- Maybe<const ast::Alias*> type_alias();
- /// Parses a `type_decl` grammar element
- /// @returns the parsed Type or nullptr if none matched.
- Maybe<const ast::Type*> type_decl();
- /// Parses a `storage_class` grammar element, erroring on parse failure.
- /// @param use a description of what was being parsed if an error was raised.
- /// @returns the storage class or StorageClass::kNone if none matched
- Expect<ast::StorageClass> expect_storage_class(std::string_view use);
- /// Parses a `struct_decl` grammar element.
- /// @returns the struct type or nullptr on error
- Maybe<const ast::Struct*> struct_decl();
- /// Parses a `struct_body_decl` grammar element, erroring on parse failure.
- /// @returns the struct members
- Expect<ast::StructMemberList> expect_struct_body_decl();
- /// Parses a `struct_member` grammar element, erroring on parse failure.
- /// @returns the struct member or nullptr
- Expect<ast::StructMember*> expect_struct_member();
- /// Parses a `function_decl` grammar element with the initial
- /// `function_attribute_decl*` provided as `attrs`.
- /// @param attrs the list of attributes for the function declaration.
- /// @returns the parsed function, nullptr otherwise
- Maybe<const ast::Function*> function_decl(ast::AttributeList& attrs);
- /// Parses a `texture_sampler_types` grammar element
- /// @returns the parsed Type or nullptr if none matched.
- Maybe<const ast::Type*> texture_sampler_types();
- /// Parses a `sampler_type` grammar element
- /// @returns the parsed Type or nullptr if none matched.
- Maybe<const ast::Type*> sampler_type();
- /// Parses a `multisampled_texture_type` grammar element
- /// @returns returns the multisample texture dimension or kNone if none
- /// matched.
- Maybe<const ast::TextureDimension> multisampled_texture_type();
- /// Parses a `sampled_texture_type` grammar element
- /// @returns returns the sample texture dimension or kNone if none matched.
- Maybe<const ast::TextureDimension> sampled_texture_type();
- /// Parses a `storage_texture_type` grammar element
- /// @returns returns the storage texture dimension.
- /// Returns kNone if none matched.
- Maybe<const ast::TextureDimension> storage_texture_type();
- /// Parses a `depth_texture_type` grammar element
- /// @returns the parsed Type or nullptr if none matched.
- Maybe<const ast::Type*> depth_texture_type();
- /// Parses a 'texture_external_type' grammar element
- /// @returns the parsed Type or nullptr if none matched
- Maybe<const ast::Type*> external_texture_type();
- /// Parses a `texel_format` grammar element
- /// @param use a description of what was being parsed if an error was raised
- /// @returns returns the texel format or kNone if none matched.
- Expect<ast::TexelFormat> expect_texel_format(std::string_view use);
- /// Parses a `function_header` grammar element
- /// @returns the parsed function header
- Maybe<FunctionHeader> function_header();
- /// Parses a `param_list` grammar element, erroring on parse failure.
- /// @returns the parsed variables
- Expect<ast::VariableList> expect_param_list();
- /// Parses a `param` grammar element, erroring on parse failure.
- /// @returns the parsed variable
- Expect<ast::Variable*> expect_param();
- /// Parses a `pipeline_stage` grammar element, erroring if the next token does
- /// not match a stage name.
- /// @returns the pipeline stage.
- Expect<ast::PipelineStage> expect_pipeline_stage();
- /// Parses an access control identifier, erroring if the next token does not
- /// match a valid access control.
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the parsed access control.
- Expect<ast::Access> expect_access(std::string_view use);
- /// Parses a builtin identifier, erroring if the next token does not match a
- /// valid builtin name.
- /// @returns the parsed builtin.
- Expect<ast::Builtin> expect_builtin();
- /// Parses a `body_stmt` grammar element, erroring on parse failure.
- /// @returns the parsed statements
- Expect<ast::BlockStatement*> expect_body_stmt();
- /// Parses a `paren_rhs_stmt` grammar element, erroring on parse failure.
- /// @returns the parsed element or nullptr
- Expect<const ast::Expression*> expect_paren_rhs_stmt();
- /// Parses a `statements` grammar element
- /// @returns the statements parsed
- Expect<ast::StatementList> expect_statements();
- /// Parses a `statement` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::Statement*> statement();
- /// Parses a `break_stmt` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::BreakStatement*> break_stmt();
- /// Parses a `return_stmt` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::ReturnStatement*> return_stmt();
- /// Parses a `continue_stmt` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::ContinueStatement*> continue_stmt();
- /// Parses a `variable_stmt` grammar element
- /// @returns the parsed variable or nullptr
- Maybe<const ast::VariableDeclStatement*> variable_stmt();
- /// Parses a `if_stmt` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::IfStatement*> if_stmt();
- /// Parses a list of `else_stmt` grammar elements
- /// @returns the parsed statement or nullptr
- Expect<ast::ElseStatementList> else_stmts();
- /// Parses a `switch_stmt` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::SwitchStatement*> switch_stmt();
- /// Parses a `switch_body` grammar element
- /// @returns the parsed statement or nullptr
- Maybe<const ast::CaseStatement*> switch_body();
- /// Parses a `case_selectors` grammar element
- /// @returns the list of literals
- Expect<ast::CaseSelectorList> expect_case_selectors();
- /// Parses a `case_body` grammar element
- /// @returns the parsed statements
- Maybe<const ast::BlockStatement*> case_body();
- /// Parses a `func_call_stmt` grammar element
- /// @returns the parsed function call or nullptr
- Maybe<const ast::CallStatement*> func_call_stmt();
- /// Parses a `loop_stmt` grammar element
- /// @returns the parsed loop or nullptr
- Maybe<const ast::LoopStatement*> loop_stmt();
- /// Parses a `for_header` grammar element, erroring on parse failure.
- /// @returns the parsed for header or nullptr
- Expect<std::unique_ptr<ForHeader>> expect_for_header();
- /// Parses a `for_stmt` grammar element
- /// @returns the parsed for loop or nullptr
- Maybe<const ast::ForLoopStatement*> for_stmt();
- /// Parses a `continuing_stmt` grammar element
- /// @returns the parsed statements
- Maybe<const ast::BlockStatement*> continuing_stmt();
- /// Parses a `const_literal` grammar element
- /// @returns the const literal parsed or nullptr if none found
- Maybe<const ast::LiteralExpression*> const_literal();
- /// Parses a `const_expr` grammar element, erroring on parse failure.
- /// @returns the parsed constructor expression or nullptr on error
- Expect<const ast::Expression*> expect_const_expr();
- /// Parses a `primary_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> primary_expression();
- /// Parses a `argument_expression_list` grammar element, erroring on parse
- /// failure.
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the list of arguments
- Expect<ast::ExpressionList> expect_argument_expression_list(
- std::string_view use);
- /// Parses the recursive portion of the postfix_expression
- /// @param prefix the left side of the expression
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> postfix_expression(
- const ast::Expression* prefix);
- /// Parses a `singular_expression` grammar elment
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> singular_expression();
- /// Parses a `unary_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> unary_expression();
- /// Parses the recursive part of the `multiplicative_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_multiplicative_expr(
- const ast::Expression* lhs);
- /// Parses the `multiplicative_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> multiplicative_expression();
- /// Parses the recursive part of the `additive_expression`, erroring on parse
- /// failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_additive_expr(
- const ast::Expression* lhs);
- /// Parses the `additive_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> additive_expression();
- /// Parses the recursive part of the `shift_expression`, erroring on parse
- /// failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_shift_expr(const ast::Expression* lhs);
- /// Parses the `shift_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> shift_expression();
- /// Parses the recursive part of the `relational_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_relational_expr(
- const ast::Expression* lhs);
- /// Parses the `relational_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> relational_expression();
- /// Parses the recursive part of the `equality_expression`, erroring on parse
- /// failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_equality_expr(
- const ast::Expression* lhs);
- /// Parses the `equality_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> equality_expression();
- /// Parses the recursive part of the `and_expression`, erroring on parse
- /// failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_and_expr(const ast::Expression* lhs);
- /// Parses the `and_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> and_expression();
- /// Parses the recursive part of the `exclusive_or_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_exclusive_or_expr(
- const ast::Expression* lhs);
- /// Parses the `exclusive_or_expression` grammar elememnt
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> exclusive_or_expression();
- /// Parses the recursive part of the `inclusive_or_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_inclusive_or_expr(
- const ast::Expression* lhs);
- /// Parses the `inclusive_or_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> inclusive_or_expression();
- /// Parses the recursive part of the `logical_and_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_logical_and_expr(
- const ast::Expression* lhs);
- /// Parses a `logical_and_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> logical_and_expression();
- /// Parses the recursive part of the `logical_or_expression`, erroring on
- /// parse failure.
- /// @param lhs the left side of the expression
- /// @returns the parsed expression or nullptr
- Expect<const ast::Expression*> expect_logical_or_expr(
- const ast::Expression* lhs);
- /// Parses a `logical_or_expression` grammar element
- /// @returns the parsed expression or nullptr
- Maybe<const ast::Expression*> logical_or_expression();
- /// Parses a `compound_assignment_operator` grammar element
- /// @returns the parsed compound assignment operator
- Maybe<ast::BinaryOp> compound_assignment_operator();
- /// Parses a `assignment_stmt` grammar element
- /// @returns the parsed assignment or nullptr
- Maybe<const ast::Statement*> assignment_stmt();
- /// Parses one or more attribute lists.
- /// @return the parsed attribute list, or an empty list on error.
- Maybe<ast::AttributeList> attribute_list();
- /// Parses a single attribute of the following types:
- /// * `struct_attribute`
- /// * `struct_member_attribute`
- /// * `array_attribute`
- /// * `variable_attribute`
- /// * `global_const_attribute`
- /// * `function_attribute`
- /// @return the parsed attribute, or nullptr.
- Maybe<const ast::Attribute*> attribute();
- /// Parses a single attribute, reporting an error if the next token does not
- /// represent a attribute.
- /// @see #attribute for the full list of attributes this method parses.
- /// @return the parsed attribute, or nullptr on error.
- Expect<const ast::Attribute*> expect_attribute();
-
- private:
- /// ReturnType resolves to the return type for the function or lambda F.
- template <typename F>
- using ReturnType = typename std::invoke_result<F>::type;
-
- /// ResultType resolves to `T` for a `RESULT` of type Expect<T>.
- template <typename RESULT>
- using ResultType = typename RESULT::type;
-
- /// @returns true and consumes the next token if it equals `tok`
- /// @param source if not nullptr, the next token's source is written to this
- /// pointer, regardless of success or error
- bool match(Token::Type tok, Source* source = nullptr);
- /// Errors if the next token is not equal to `tok`
- /// Consumes the next token on match.
- /// expect() also updates #synchronized_, setting it to `true` if the next
- /// token is equal to `tok`, otherwise `false`.
- /// @param use a description of what was being parsed if an error was raised.
- /// @param tok the token to test against
- /// @returns true if the next token equals `tok`
- bool expect(std::string_view use, Token::Type tok);
- /// Parses a signed integer from the next token in the stream, erroring if the
- /// next token is not a signed integer.
- /// Consumes the next token on match.
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the parsed integer.
- Expect<int32_t> expect_sint(std::string_view use);
- /// Parses a signed integer from the next token in the stream, erroring if
- /// the next token is not a signed integer or is negative.
- /// Consumes the next token if it is a signed integer (not necessarily
- /// negative).
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the parsed integer.
- Expect<uint32_t> expect_positive_sint(std::string_view use);
- /// Parses a non-zero signed integer from the next token in the stream,
- /// erroring if the next token is not a signed integer or is less than 1.
- /// Consumes the next token if it is a signed integer (not necessarily
- /// >= 1).
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the parsed integer.
- Expect<uint32_t> expect_nonzero_positive_sint(std::string_view use);
- /// Errors if the next token is not an identifier.
- /// Consumes the next token on match.
- /// @param use a description of what was being parsed if an error was raised
- /// @returns the parsed identifier.
- Expect<std::string> expect_ident(std::string_view use);
- /// Parses a lexical block starting with the token `start` and ending with
- /// the token `end`. `body` is called to parse the lexical block body
- /// between the `start` and `end` tokens. If the `start` or `end` tokens
- /// are not matched then an error is generated and a zero-initialized `T` is
- /// returned. If `body` raises an error while parsing then a zero-initialized
- /// `T` is returned.
- /// @param start the token that begins the lexical block
- /// @param end the token that ends the lexical block
- /// @param use a description of what was being parsed if an error was raised
- /// @param body a function or lambda that is called to parse the lexical block
- /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
- /// @return the value returned by `body` if no errors are raised, otherwise
- /// an Expect with error state.
- template <typename F, typename T = ReturnType<F>>
- T expect_block(Token::Type start,
- Token::Type end,
- std::string_view use,
- F&& body);
- /// A convenience function that calls expect_block() passing
- /// `Token::Type::kParenLeft` and `Token::Type::kParenRight` for the `start`
- /// and `end` arguments, respectively.
- /// @param use a description of what was being parsed if an error was raised
- /// @param body a function or lambda that is called to parse the lexical block
- /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
- /// @return the value returned by `body` if no errors are raised, otherwise
- /// an Expect with error state.
- template <typename F, typename T = ReturnType<F>>
- T expect_paren_block(std::string_view use, F&& body);
- /// A convenience function that calls `expect_block` passing
- /// `Token::Type::kBraceLeft` and `Token::Type::kBraceRight` for the `start`
- /// and `end` arguments, respectively.
- /// @param use a description of what was being parsed if an error was raised
- /// @param body a function or lambda that is called to parse the lexical block
- /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
- /// @return the value returned by `body` if no errors are raised, otherwise
- /// an Expect with error state.
- template <typename F, typename T = ReturnType<F>>
- T expect_brace_block(std::string_view use, F&& body);
- /// A convenience function that calls `expect_block` passing
- /// `Token::Type::kLessThan` and `Token::Type::kGreaterThan` for the `start`
- /// and `end` arguments, respectively.
- /// @param use a description of what was being parsed if an error was raised
- /// @param body a function or lambda that is called to parse the lexical block
- /// body, with the signature: `Expect<Result>()` or `Maybe<Result>()`.
- /// @return the value returned by `body` if no errors are raised, otherwise
- /// an Expect with error state.
- template <typename F, typename T = ReturnType<F>>
- T expect_lt_gt_block(std::string_view use, F&& body);
-
- /// sync() calls the function `func`, and attempts to resynchronize the
- /// parser to the next found resynchronization token if `func` fails. If the
- /// next found resynchronization token is `tok`, then sync will also consume
- /// `tok`.
- ///
- /// sync() will transiently add `tok` to the parser's stack of
- /// synchronization tokens for the duration of the call to `func`. Once @p
- /// func returns,
- /// `tok` is removed from the stack of resynchronization tokens. sync calls
- /// may be nested, and so the number of resynchronization tokens is equal to
- /// the number of sync() calls in the current stack frame.
- ///
- /// sync() updates #synchronized_, setting it to `true` if the next
- /// resynchronization token found was `tok`, otherwise `false`.
- ///
- /// @param tok the token to attempt to synchronize the parser to if `func`
- /// fails.
- /// @param func a function or lambda with the signature: `Expect<Result>()` or
- /// `Maybe<Result>()`.
- /// @return the value returned by `func`
- template <typename F, typename T = ReturnType<F>>
- T sync(Token::Type tok, F&& func);
- /// sync_to() attempts to resynchronize the parser to the next found
- /// resynchronization token or `tok` (whichever comes first).
- ///
- /// Synchronization tokens are transiently defined by calls to sync().
- ///
- /// sync_to() updates #synchronized_, setting it to `true` if a
- /// resynchronization token was found and it was `tok`, otherwise `false`.
- ///
- /// @param tok the token to attempt to synchronize the parser to.
- /// @param consume if true and the next found resynchronization token is
- /// `tok` then sync_to() will also consume `tok`.
- /// @return the state of #synchronized_.
- /// @see sync().
- bool sync_to(Token::Type tok, bool consume);
- /// @return true if `t` is in the stack of resynchronization tokens.
- /// @see sync().
- bool is_sync_token(const Token& t) const;
-
- /// If `t` is an error token, then `synchronized_` is set to false and the
- /// token's error is appended to the builder's diagnostics. If `t` is not an
- /// error token, then this function does nothing and false is returned.
- /// @returns true if `t` is an error, otherwise false.
- bool handle_error(const Token& t);
-
- /// @returns true if #synchronized_ is true and the number of reported errors
- /// is less than #max_errors_.
- bool continue_parsing() {
- return synchronized_ && builder_.Diagnostics().error_count() < max_errors_;
- }
-
- /// without_error() calls the function `func` muting any grammatical errors
- /// found while executing the function. This can be used as a best-effort to
- /// produce a meaningful error message when the parser is out of sync.
- /// @param func a function or lambda with the signature: `Expect<Result>()` or
- /// `Maybe<Result>()`.
- /// @return the value returned by `func`
- template <typename F, typename T = ReturnType<F>>
- T without_error(F&& func);
-
- /// Reports an error if the attribute list `list` is not empty.
- /// Used to ensure that all attributes are consumed.
- bool expect_attributes_consumed(ast::AttributeList& list);
-
- Expect<const ast::Type*> expect_type_decl_pointer(Token t);
- Expect<const ast::Type*> expect_type_decl_atomic(Token t);
- Expect<const ast::Type*> expect_type_decl_vector(Token t);
- Expect<const ast::Type*> expect_type_decl_array(Token t);
- Expect<const ast::Type*> expect_type_decl_matrix(Token t);
-
- Expect<const ast::Type*> expect_type(std::string_view use);
-
- Maybe<const ast::Statement*> non_block_statement();
- Maybe<const ast::Statement*> for_header_initializer();
- Maybe<const ast::Statement*> for_header_continuing();
-
- class MultiTokenSource;
- MultiTokenSource make_source_range();
- MultiTokenSource make_source_range_from(const Source& start);
-
- /// Creates a new `ast::Node` owned by the Module. When the Module is
- /// destructed, the `ast::Node` will also be destructed.
- /// @param args the arguments to pass to the type constructor
- /// @returns the node pointer
- template <typename T, typename... ARGS>
- T* create(ARGS&&... args) {
- return builder_.create<T>(std::forward<ARGS>(args)...);
- }
-
- std::unique_ptr<Lexer> lexer_;
- std::deque<Token> token_queue_;
- Token last_token_;
- bool synchronized_ = true;
- uint32_t parse_depth_ = 0;
- std::vector<Token::Type> sync_tokens_;
- int silence_errors_ = 0;
- ProgramBuilder builder_;
- size_t max_errors_ = 25;
+ /// without_error() calls the function `func` muting any grammatical errors
+ /// found while executing the function. This can be used as a best-effort to
+ /// produce a meaningful error message when the parser is out of sync.
+ /// @param func a function or lambda with the signature: `Expect<Result>()` or
+ /// `Maybe<Result>()`.
+ /// @return the value returned by `func`
+ template <typename F, typename T = ReturnType<F>>
+ T without_error(F&& func);
+
+ /// Reports an error if the attribute list `list` is not empty.
+ /// Used to ensure that all attributes are consumed.
+ bool expect_attributes_consumed(ast::AttributeList& list);
+
+ Expect<const ast::Type*> expect_type_decl_pointer(Token t);
+ Expect<const ast::Type*> expect_type_decl_atomic(Token t);
+ Expect<const ast::Type*> expect_type_decl_vector(Token t);
+ Expect<const ast::Type*> expect_type_decl_array(Token t);
+ Expect<const ast::Type*> expect_type_decl_matrix(Token t);
+
+ Expect<const ast::Type*> expect_type(std::string_view use);
+
+ Maybe<const ast::Statement*> non_block_statement();
+ Maybe<const ast::Statement*> for_header_initializer();
+ Maybe<const ast::Statement*> for_header_continuing();
+
+ class MultiTokenSource;
+ MultiTokenSource make_source_range();
+ MultiTokenSource make_source_range_from(const Source& start);
+
+ /// Creates a new `ast::Node` owned by the Module. When the Module is
+ /// destructed, the `ast::Node` will also be destructed.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the node pointer
+ template <typename T, typename... ARGS>
+ T* create(ARGS&&... args) {
+ return builder_.create<T>(std::forward<ARGS>(args)...);
+ }
+
+ std::unique_ptr<Lexer> lexer_;
+ std::deque<Token> token_queue_;
+ Token last_token_;
+ bool synchronized_ = true;
+ uint32_t parse_depth_ = 0;
+ std::vector<Token::Type> sync_tokens_;
+ int silence_errors_ = 0;
+ ProgramBuilder builder_;
+ size_t max_errors_ = 25;
};
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_additive_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_additive_expression_test.cc
index b189a52b1b6..4573b84f03c 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_additive_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_additive_expression_test.cc
@@ -18,72 +18,77 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AdditiveExpression_Parses_Plus) {
- auto p = parser("a + true");
- auto e = p->additive_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a + true");
+ auto e = p->additive_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kAdd, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kAdd, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, AdditiveExpression_Parses_Minus) {
- auto p = parser("a - true");
- auto e = p->additive_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a - true");
+ auto e = p->additive_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kSubtract, rel->op);
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kSubtract, rel->op);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, AdditiveExpression_InvalidLHS) {
- auto p = parser("if (a) {} + true");
- auto e = p->additive_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} + true");
+ auto e = p->additive_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, AdditiveExpression_InvalidRHS) {
- auto p = parser("true + if (a) {}");
- auto e = p->additive_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of + expression");
+ auto p = parser("true + if (a) {}");
+ auto e = p->additive_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of + expression");
}
TEST_F(ParserImplTest, AdditiveExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->additive_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->additive_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_and_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_and_expression_test.cc
index 6283cd456c0..fd90460124f 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_and_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_and_expression_test.cc
@@ -18,52 +18,57 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AndExpression_Parses) {
- auto p = parser("a & true");
- auto e = p->and_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a & true");
+ auto e = p->and_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kAnd, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kAnd, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, AndExpression_InvalidLHS) {
- auto p = parser("if (a) {} & true");
- auto e = p->and_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} & true");
+ auto e = p->and_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, AndExpression_InvalidRHS) {
- auto p = parser("true & if (a) {}");
- auto e = p->and_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of & expression");
+ auto p = parser("true & if (a) {}");
+ auto e = p->and_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of & expression");
}
TEST_F(ParserImplTest, AndExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->and_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->and_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_argument_expression_list_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_argument_expression_list_test.cc
index 6d298975eb5..3042c201df8 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_argument_expression_list_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_argument_expression_list_test.cc
@@ -18,85 +18,85 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ArgumentExpressionList_Parses) {
- auto p = parser("(a)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
+ auto p = parser("(a)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
- ASSERT_EQ(e.value.size(), 1u);
- ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
+ ASSERT_EQ(e.value.size(), 1u);
+ ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
}
TEST_F(ParserImplTest, ArgumentExpressionList_ParsesEmptyList) {
- auto p = parser("()");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
+ auto p = parser("()");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
- ASSERT_EQ(e.value.size(), 0u);
+ ASSERT_EQ(e.value.size(), 0u);
}
TEST_F(ParserImplTest, ArgumentExpressionList_ParsesMultiple) {
- auto p = parser("(a, -33, 1+2)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
-
- ASSERT_EQ(e.value.size(), 3u);
- ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
- ASSERT_TRUE(e.value[1]->Is<ast::LiteralExpression>());
- ASSERT_TRUE(e.value[2]->Is<ast::BinaryExpression>());
+ auto p = parser("(a, -33, 1+2)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+
+ ASSERT_EQ(e.value.size(), 3u);
+ ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
+ ASSERT_TRUE(e.value[1]->Is<ast::LiteralExpression>());
+ ASSERT_TRUE(e.value[2]->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, ArgumentExpressionList_TrailingComma) {
- auto p = parser("(a, 42,)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
-
- ASSERT_EQ(e.value.size(), 2u);
- ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
- ASSERT_TRUE(e.value[1]->Is<ast::LiteralExpression>());
+ auto p = parser("(a, 42,)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+
+ ASSERT_EQ(e.value.size(), 2u);
+ ASSERT_TRUE(e.value[0]->Is<ast::IdentifierExpression>());
+ ASSERT_TRUE(e.value[1]->Is<ast::LiteralExpression>());
}
TEST_F(ParserImplTest, ArgumentExpressionList_HandlesMissingLeftParen) {
- auto p = parser("a)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:1: expected '(' for argument list");
+ auto p = parser("a)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:1: expected '(' for argument list");
}
TEST_F(ParserImplTest, ArgumentExpressionList_HandlesMissingRightParen) {
- auto p = parser("(a");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:3: expected ')' for argument list");
+ auto p = parser("(a");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:3: expected ')' for argument list");
}
TEST_F(ParserImplTest, ArgumentExpressionList_HandlesMissingExpression_0) {
- auto p = parser("(,)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:2: expected ')' for argument list");
+ auto p = parser("(,)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:2: expected ')' for argument list");
}
TEST_F(ParserImplTest, ArgumentExpressionList_HandlesMissingExpression_1) {
- auto p = parser("(a, ,)");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:5: expected ')' for argument list");
+ auto p = parser("(a, ,)");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:5: expected ')' for argument list");
}
TEST_F(ParserImplTest, ArgumentExpressionList_HandlesInvalidExpression) {
- auto p = parser("(if(a) {})");
- auto e = p->expect_argument_expression_list("argument list");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:2: expected ')' for argument list");
+ auto p = parser("(if(a) {})");
+ auto e = p->expect_argument_expression_list("argument list");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:2: expected ')' for argument list");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_assignment_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_assignment_stmt_test.cc
index 2f1d405fbd1..4fd23ce08fa 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_assignment_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_assignment_stmt_test.cc
@@ -18,162 +18,170 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AssignmentStmt_Parses_ToVariable) {
- auto p = parser("a = 123");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* a = e->As<ast::AssignmentStatement>();
- ASSERT_NE(a, nullptr);
- ASSERT_NE(a->lhs, nullptr);
- ASSERT_NE(a->rhs, nullptr);
-
- ASSERT_TRUE(a->lhs->Is<ast::IdentifierExpression>());
- auto* ident = a->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(a->rhs->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(a->rhs->As<ast::SintLiteralExpression>()->value, 123);
+ auto p = parser("a = 123");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* a = e->As<ast::AssignmentStatement>();
+ ASSERT_NE(a, nullptr);
+ ASSERT_NE(a->lhs, nullptr);
+ ASSERT_NE(a->rhs, nullptr);
+
+ ASSERT_TRUE(a->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = a->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(a->rhs->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->value, 123);
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, AssignmentStmt_Parses_ToMember) {
- auto p = parser("a.b.c[2].d = 123");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* a = e->As<ast::AssignmentStatement>();
- ASSERT_NE(a, nullptr);
- ASSERT_NE(a->lhs, nullptr);
- ASSERT_NE(a->rhs, nullptr);
-
- ASSERT_TRUE(a->rhs->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(a->rhs->As<ast::SintLiteralExpression>()->value, 123);
-
- ASSERT_TRUE(a->lhs->Is<ast::MemberAccessorExpression>());
- auto* mem = a->lhs->As<ast::MemberAccessorExpression>();
-
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- auto* ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("d"));
-
- ASSERT_TRUE(mem->structure->Is<ast::IndexAccessorExpression>());
- auto* idx = mem->structure->As<ast::IndexAccessorExpression>();
-
- ASSERT_NE(idx->index, nullptr);
- ASSERT_TRUE(idx->index->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(idx->index->As<ast::SintLiteralExpression>()->value, 2);
-
- ASSERT_TRUE(idx->object->Is<ast::MemberAccessorExpression>());
- mem = idx->object->As<ast::MemberAccessorExpression>();
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("c"));
-
- ASSERT_TRUE(mem->structure->Is<ast::MemberAccessorExpression>());
- mem = mem->structure->As<ast::MemberAccessorExpression>();
-
- ASSERT_TRUE(mem->structure->Is<ast::IdentifierExpression>());
- ident = mem->structure->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("b"));
+ auto p = parser("a.b.c[2].d = 123");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* a = e->As<ast::AssignmentStatement>();
+ ASSERT_NE(a, nullptr);
+ ASSERT_NE(a->lhs, nullptr);
+ ASSERT_NE(a->rhs, nullptr);
+
+ ASSERT_TRUE(a->rhs->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->value, 123);
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(a->lhs->Is<ast::MemberAccessorExpression>());
+ auto* mem = a->lhs->As<ast::MemberAccessorExpression>();
+
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ auto* ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("d"));
+
+ ASSERT_TRUE(mem->structure->Is<ast::IndexAccessorExpression>());
+ auto* idx = mem->structure->As<ast::IndexAccessorExpression>();
+
+ ASSERT_NE(idx->index, nullptr);
+ ASSERT_TRUE(idx->index->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(idx->index->As<ast::IntLiteralExpression>()->value, 2);
+
+ ASSERT_TRUE(idx->object->Is<ast::MemberAccessorExpression>());
+ mem = idx->object->As<ast::MemberAccessorExpression>();
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("c"));
+
+ ASSERT_TRUE(mem->structure->Is<ast::MemberAccessorExpression>());
+ mem = mem->structure->As<ast::MemberAccessorExpression>();
+
+ ASSERT_TRUE(mem->structure->Is<ast::IdentifierExpression>());
+ ident = mem->structure->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("b"));
}
TEST_F(ParserImplTest, AssignmentStmt_Parses_ToPhony) {
- auto p = parser("_ = 123");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* a = e->As<ast::AssignmentStatement>();
- ASSERT_NE(a, nullptr);
- ASSERT_NE(a->lhs, nullptr);
- ASSERT_NE(a->rhs, nullptr);
-
- ASSERT_TRUE(a->rhs->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(a->rhs->As<ast::SintLiteralExpression>()->value, 123);
-
- ASSERT_TRUE(a->lhs->Is<ast::PhonyExpression>());
+ auto p = parser("_ = 123i");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* a = e->As<ast::AssignmentStatement>();
+ ASSERT_NE(a, nullptr);
+ ASSERT_NE(a->lhs, nullptr);
+ ASSERT_NE(a->rhs, nullptr);
+
+ ASSERT_TRUE(a->rhs->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->value, 123);
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kI);
+
+ ASSERT_TRUE(a->lhs->Is<ast::PhonyExpression>());
}
TEST_F(ParserImplTest, AssignmentStmt_Parses_CompoundOp) {
- auto p = parser("a += 123");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* a = e->As<ast::CompoundAssignmentStatement>();
- ASSERT_NE(a, nullptr);
- ASSERT_NE(a->lhs, nullptr);
- ASSERT_NE(a->rhs, nullptr);
- EXPECT_EQ(a->op, ast::BinaryOp::kAdd);
-
- ASSERT_TRUE(a->lhs->Is<ast::IdentifierExpression>());
- auto* ident = a->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(a->rhs->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(a->rhs->As<ast::SintLiteralExpression>()->value, 123);
+ auto p = parser("a += 123u");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* a = e->As<ast::CompoundAssignmentStatement>();
+ ASSERT_NE(a, nullptr);
+ ASSERT_NE(a->lhs, nullptr);
+ ASSERT_NE(a->rhs, nullptr);
+ EXPECT_EQ(a->op, ast::BinaryOp::kAdd);
+
+ ASSERT_TRUE(a->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = a->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(a->rhs->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->value, 123);
+ EXPECT_EQ(a->rhs->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kU);
}
TEST_F(ParserImplTest, AssignmentStmt_MissingEqual) {
- auto p = parser("a.b.c[2].d 123");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:12: expected '=' for assignment");
+ auto p = parser("a.b.c[2].d 123");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:12: expected '=' for assignment");
}
TEST_F(ParserImplTest, AssignmentStmt_Compound_MissingEqual) {
- auto p = parser("a + 123");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:3: expected '=' for assignment");
+ auto p = parser("a + 123");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:3: expected '=' for assignment");
}
TEST_F(ParserImplTest, AssignmentStmt_InvalidLHS) {
- auto p = parser("if (true) {} = 123");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (true) {} = 123");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, AssignmentStmt_InvalidRHS) {
- auto p = parser("a.b.c[2].d = if (true) {}");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: unable to parse right side of assignment");
+ auto p = parser("a.b.c[2].d = if (true) {}");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: unable to parse right side of assignment");
}
TEST_F(ParserImplTest, AssignmentStmt_InvalidCompoundOp) {
- auto p = parser("a &&= true");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: expected '=' for assignment");
+ auto p = parser("a &&= true");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: expected '=' for assignment");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_body_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_body_stmt_test.cc
index 56efb9b020e..f84a5ba6c37 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_body_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_body_stmt_test.cc
@@ -19,40 +19,40 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, BodyStmt) {
- auto p = parser(R"({
+ auto p = parser(R"({
discard;
return 1 + b / 2;
})");
- auto e = p->expect_body_stmt();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_EQ(e->statements.size(), 2u);
- EXPECT_TRUE(e->statements[0]->Is<ast::DiscardStatement>());
- EXPECT_TRUE(e->statements[1]->Is<ast::ReturnStatement>());
+ auto e = p->expect_body_stmt();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_EQ(e->statements.size(), 2u);
+ EXPECT_TRUE(e->statements[0]->Is<ast::DiscardStatement>());
+ EXPECT_TRUE(e->statements[1]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, BodyStmt_Empty) {
- auto p = parser("{}");
- auto e = p->expect_body_stmt();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- EXPECT_EQ(e->statements.size(), 0u);
+ auto p = parser("{}");
+ auto e = p->expect_body_stmt();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ EXPECT_EQ(e->statements.size(), 0u);
}
TEST_F(ParserImplTest, BodyStmt_InvalidStmt) {
- auto p = parser("{fn main() {}}");
- auto e = p->expect_body_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:2: expected '}'");
+ auto p = parser("{fn main() {}}");
+ auto e = p->expect_body_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:2: expected '}'");
}
TEST_F(ParserImplTest, BodyStmt_MissingRightParen) {
- auto p = parser("{return;");
- auto e = p->expect_body_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- EXPECT_EQ(p->error(), "1:9: expected '}'");
+ auto p = parser("{return;");
+ auto e = p->expect_body_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ EXPECT_EQ(p->error(), "1:9: expected '}'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_break_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_break_stmt_test.cc
index d889b551bc5..ca8802a3711 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_break_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_break_stmt_test.cc
@@ -19,12 +19,12 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, BreakStmt) {
- auto p = parser("break");
- auto e = p->break_stmt();
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BreakStatement>());
+ auto p = parser("break");
+ auto e = p->break_stmt();
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::BreakStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_bug_cases_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_bug_cases_test.cc
index 7119ba202da..48afd3d5584 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_bug_cases_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_bug_cases_test.cc
@@ -18,10 +18,10 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Bug_chromium_1180130) {
- auto p = parser(
- R"(a;{}}a;}};{{{;{}};{};{}}a;{}};{{{}};{{{;{}};{};{}}a;{}};{{{}}{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{{{;u[([[,a;{}}a;{}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{z{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}}i;{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{;u[[a,([}};{{{;{}})");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
+ auto p = parser(
+ R"(a;{}}a;}};{{{;{}};{};{}}a;{}};{{{}};{{{;{}};{};{}}a;{}};{{{}}{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{{{;u[([[,a;{}}a;{}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{z{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}}{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}}i;{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};{}{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{{;u[({}};{{{}};{{}a;{}};{{{}};{{{;{}};{};}a;{}};{{{}};{{;u[[a,([}};{{{;{}})");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_call_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_call_stmt_test.cc
index 8d3ec0f0545..d044f8da00e 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_call_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_call_stmt_test.cc
@@ -19,88 +19,88 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Statement_Call) {
- auto p = parser("a();");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
+ auto p = parser("a();");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 1u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 2u);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 1u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 2u);
- ASSERT_TRUE(e->Is<ast::CallStatement>());
- auto* c = e->As<ast::CallStatement>()->expr;
+ ASSERT_TRUE(e->Is<ast::CallStatement>());
+ auto* c = e->As<ast::CallStatement>()->expr;
- EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(c->args.size(), 0u);
+ EXPECT_EQ(c->args.size(), 0u);
}
TEST_F(ParserImplTest, Statement_Call_WithParams) {
- auto p = parser("a(1, b, 2 + 3 / b);");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
-
- ASSERT_TRUE(e->Is<ast::CallStatement>());
- auto* c = e->As<ast::CallStatement>()->expr;
-
- EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
-
- EXPECT_EQ(c->args.size(), 3u);
- EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
- EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
- EXPECT_TRUE(c->args[2]->Is<ast::BinaryExpression>());
+ auto p = parser("a(1, b, 2 + 3 / b);");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+
+ ASSERT_TRUE(e->Is<ast::CallStatement>());
+ auto* c = e->As<ast::CallStatement>()->expr;
+
+ EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
+
+ EXPECT_EQ(c->args.size(), 3u);
+ EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
+ EXPECT_TRUE(c->args[2]->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, Statement_Call_WithParams_TrailingComma) {
- auto p = parser("a(1, b,);");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
+ auto p = parser("a(1, b,);");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::CallStatement>());
- auto* c = e->As<ast::CallStatement>()->expr;
+ ASSERT_TRUE(e->Is<ast::CallStatement>());
+ auto* c = e->As<ast::CallStatement>()->expr;
- EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(c->args.size(), 2u);
- EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
- EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
+ EXPECT_EQ(c->args.size(), 2u);
+ EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
}
TEST_F(ParserImplTest, Statement_Call_Missing_RightParen) {
- auto p = parser("a(");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
+ auto p = parser("a(");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
}
TEST_F(ParserImplTest, Statement_Call_Missing_Semi) {
- auto p = parser("a()");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(p->error(), "1:4: expected ';' for function call");
+ auto p = parser("a()");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(p->error(), "1:4: expected ';' for function call");
}
TEST_F(ParserImplTest, Statement_Call_Bad_ArgList) {
- auto p = parser("a(b c);");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(p->error(), "1:5: expected ')' for function call");
+ auto p = parser("a(b c);");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(p->error(), "1:5: expected ')' for function call");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_case_body_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_case_body_test.cc
index 1c4bb93ffe8..f8c07a329ec 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_case_body_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_case_body_test.cc
@@ -19,55 +19,55 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, CaseBody_Empty) {
- auto p = parser("");
- auto e = p->case_body();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(e.errored);
- EXPECT_TRUE(e.matched);
- EXPECT_EQ(e->statements.size(), 0u);
+ auto p = parser("");
+ auto e = p->case_body();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(e.errored);
+ EXPECT_TRUE(e.matched);
+ EXPECT_EQ(e->statements.size(), 0u);
}
TEST_F(ParserImplTest, CaseBody_Statements) {
- auto p = parser(R"(
+ auto p = parser(R"(
var a: i32;
a = 2;)");
- auto e = p->case_body();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(e.errored);
- EXPECT_TRUE(e.matched);
- ASSERT_EQ(e->statements.size(), 2u);
- EXPECT_TRUE(e->statements[0]->Is<ast::VariableDeclStatement>());
- EXPECT_TRUE(e->statements[1]->Is<ast::AssignmentStatement>());
+ auto e = p->case_body();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(e.errored);
+ EXPECT_TRUE(e.matched);
+ ASSERT_EQ(e->statements.size(), 2u);
+ EXPECT_TRUE(e->statements[0]->Is<ast::VariableDeclStatement>());
+ EXPECT_TRUE(e->statements[1]->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, CaseBody_InvalidStatement) {
- auto p = parser("a =");
- auto e = p->case_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("a =");
+ auto e = p->case_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, CaseBody_Fallthrough) {
- auto p = parser("fallthrough;");
- auto e = p->case_body();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(e.errored);
- EXPECT_TRUE(e.matched);
- ASSERT_EQ(e->statements.size(), 1u);
- EXPECT_TRUE(e->statements[0]->Is<ast::FallthroughStatement>());
+ auto p = parser("fallthrough;");
+ auto e = p->case_body();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(e.errored);
+ EXPECT_TRUE(e.matched);
+ ASSERT_EQ(e->statements.size(), 1u);
+ EXPECT_TRUE(e->statements[0]->Is<ast::FallthroughStatement>());
}
TEST_F(ParserImplTest, CaseBody_Fallthrough_MissingSemicolon) {
- auto p = parser("fallthrough");
- auto e = p->case_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:12: expected ';' for fallthrough statement");
+ auto p = parser("fallthrough");
+ auto e = p->case_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:12: expected ';' for fallthrough statement");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_expr_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_expr_test.cc
index bb612d124f8..80536bd6e6a 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_expr_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_expr_test.cc
@@ -18,152 +18,155 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ConstExpr_TypeDecl) {
- auto p = parser("vec2<f32>(1., 2.)");
- auto e = p->expect_const_expr();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
-
- auto* t = e->As<ast::CallExpression>();
- ASSERT_TRUE(t->target.type->Is<ast::Vector>());
- EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
-
- ASSERT_EQ(t->args.size(), 2u);
-
- ASSERT_TRUE(t->args[0]->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(t->args[0]->As<ast::FloatLiteralExpression>()->value, 1.);
-
- ASSERT_TRUE(t->args[1]->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(t->args[1]->As<ast::FloatLiteralExpression>()->value, 2.);
+ auto p = parser("vec2<f32>(1., 2.)");
+ auto e = p->expect_const_expr();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+
+ auto* t = e->As<ast::CallExpression>();
+ ASSERT_TRUE(t->target.type->Is<ast::Vector>());
+ EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
+
+ ASSERT_EQ(t->args.size(), 2u);
+
+ ASSERT_TRUE(t->args[0]->Is<ast::FloatLiteralExpression>());
+ EXPECT_DOUBLE_EQ(t->args[0]->As<ast::FloatLiteralExpression>()->value, 1.);
+ EXPECT_EQ(t->args[0]->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(t->args[1]->Is<ast::FloatLiteralExpression>());
+ EXPECT_DOUBLE_EQ(t->args[1]->As<ast::FloatLiteralExpression>()->value, 2.);
+ EXPECT_EQ(t->args[1]->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, ConstExpr_TypeDecl_Empty) {
- auto p = parser("vec2<f32>()");
- auto e = p->expect_const_expr();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto p = parser("vec2<f32>()");
+ auto e = p->expect_const_expr();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* t = e->As<ast::CallExpression>();
- ASSERT_TRUE(t->target.type->Is<ast::Vector>());
- EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
+ auto* t = e->As<ast::CallExpression>();
+ ASSERT_TRUE(t->target.type->Is<ast::Vector>());
+ EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
- ASSERT_EQ(t->args.size(), 0u);
+ ASSERT_EQ(t->args.size(), 0u);
}
TEST_F(ParserImplTest, ConstExpr_TypeDecl_TrailingComma) {
- auto p = parser("vec2<f32>(1., 2.,)");
- auto e = p->expect_const_expr();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
-
- auto* t = e->As<ast::CallExpression>();
- ASSERT_TRUE(t->target.type->Is<ast::Vector>());
- EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
-
- ASSERT_EQ(t->args.size(), 2u);
- ASSERT_TRUE(t->args[0]->Is<ast::LiteralExpression>());
- ASSERT_TRUE(t->args[1]->Is<ast::LiteralExpression>());
+ auto p = parser("vec2<f32>(1., 2.,)");
+ auto e = p->expect_const_expr();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+
+ auto* t = e->As<ast::CallExpression>();
+ ASSERT_TRUE(t->target.type->Is<ast::Vector>());
+ EXPECT_EQ(t->target.type->As<ast::Vector>()->width, 2u);
+
+ ASSERT_EQ(t->args.size(), 2u);
+ ASSERT_TRUE(t->args[0]->Is<ast::LiteralExpression>());
+ ASSERT_TRUE(t->args[1]->Is<ast::LiteralExpression>());
}
TEST_F(ParserImplTest, ConstExpr_TypeDecl_MissingRightParen) {
- auto p = parser("vec2<f32>(1., 2.");
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:17: expected ')' for type constructor");
+ auto p = parser("vec2<f32>(1., 2.");
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:17: expected ')' for type constructor");
}
TEST_F(ParserImplTest, ConstExpr_TypeDecl_MissingLeftParen) {
- auto p = parser("vec2<f32> 1., 2.)");
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:11: expected '(' for type constructor");
+ auto p = parser("vec2<f32> 1., 2.)");
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:11: expected '(' for type constructor");
}
TEST_F(ParserImplTest, ConstExpr_TypeDecl_MissingComma) {
- auto p = parser("vec2<f32>(1. 2.");
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:14: expected ')' for type constructor");
+ auto p = parser("vec2<f32>(1. 2.");
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:14: expected ')' for type constructor");
}
TEST_F(ParserImplTest, ConstExpr_InvalidExpr) {
- auto p = parser("vec2<f32>(1., if(a) {})");
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:15: invalid type for const_expr");
+ auto p = parser("vec2<f32>(1., if(a) {})");
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:15: invalid type for const_expr");
}
TEST_F(ParserImplTest, ConstExpr_ConstLiteral) {
- auto p = parser("true");
- auto e = p->expect_const_expr();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e.value->Is<ast::BoolLiteralExpression>());
- EXPECT_TRUE(e.value->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("true");
+ auto e = p->expect_const_expr();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e.value->Is<ast::BoolLiteralExpression>());
+ EXPECT_TRUE(e.value->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, ConstExpr_ConstLiteral_Invalid) {
- auto p = parser("invalid");
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:1: unable to parse const_expr");
+ auto p = parser("invalid");
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:1: unable to parse const_expr");
}
TEST_F(ParserImplTest, ConstExpr_TypeConstructor) {
- auto p = parser("S(0)");
-
- auto e = p->expect_const_expr();
- ASSERT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- ASSERT_NE(e->As<ast::CallExpression>()->target.type, nullptr);
- ASSERT_TRUE(e->As<ast::CallExpression>()->target.type->Is<ast::TypeName>());
- EXPECT_EQ(
- e->As<ast::CallExpression>()->target.type->As<ast::TypeName>()->name,
- p->builder().Symbols().Get("S"));
+ auto p = parser("S(0)");
+
+ auto e = p->expect_const_expr();
+ ASSERT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ ASSERT_NE(e->As<ast::CallExpression>()->target.type, nullptr);
+ ASSERT_TRUE(e->As<ast::CallExpression>()->target.type->Is<ast::TypeName>());
+ EXPECT_EQ(e->As<ast::CallExpression>()->target.type->As<ast::TypeName>()->name,
+ p->builder().Symbols().Get("S"));
}
TEST_F(ParserImplTest, ConstExpr_Recursion) {
- std::stringstream out;
- for (size_t i = 0; i < 200; i++) {
- out << "f32(";
- }
- out << "1.0";
- for (size_t i = 0; i < 200; i++) {
- out << ")";
- }
- auto p = parser(out.str());
- auto e = p->expect_const_expr();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:517: maximum parser recursive depth reached");
+ std::stringstream out;
+ for (size_t i = 0; i < 200; i++) {
+ out << "f32(";
+ }
+ out << "1.0";
+ for (size_t i = 0; i < 200; i++) {
+ out << ")";
+ }
+ auto p = parser(out.str());
+ auto e = p->expect_const_expr();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:517: maximum parser recursive depth reached");
}
TEST_F(ParserImplTest, UnaryOp_Recursion) {
- std::stringstream out;
- for (size_t i = 0; i < 200; i++) {
- out << "!";
- }
- out << "1.0";
- auto p = parser(out.str());
- auto e = p->unary_expression();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:130: maximum parser recursive depth reached");
+ std::stringstream out;
+ for (size_t i = 0; i < 200; i++) {
+ out << "!";
+ }
+ out << "1.0";
+ auto p = parser(out.str());
+ auto e = p->unary_expression();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:130: maximum parser recursive depth reached");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_literal_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_literal_test.cc
index 44c70832b4f..b07ec1b66a5 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_literal_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_const_literal_test.cc
@@ -14,7 +14,6 @@
#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-#include <cmath>
#include <cstring>
#include "gmock/gmock.h"
@@ -22,324 +21,385 @@
namespace tint::reader::wgsl {
namespace {
-// Makes an IEEE 754 binary32 floating point number with
+// Makes an IEEE 754 binary64 floating point number with
// - 0 sign if sign is 0, 1 otherwise
// - 'exponent_bits' is placed in the exponent space.
// So, the exponent bias must already be included.
-float MakeFloat(int sign, int biased_exponent, int mantissa) {
- const uint32_t sign_bit = sign ? 0x80000000u : 0u;
- // The binary32 exponent is 8 bits, just below the sign.
- const uint32_t exponent_bits = (biased_exponent & 0xffu) << 23;
- // The mantissa is the bottom 23 bits.
- const uint32_t mantissa_bits = (mantissa & 0x7fffffu);
-
- uint32_t bits = sign_bit | exponent_bits | mantissa_bits;
- float result = 0.0f;
- static_assert(sizeof(result) == sizeof(bits),
- "expected float and uint32_t to be the same size");
- std::memcpy(&result, &bits, sizeof(bits));
- return result;
+double MakeDouble(uint64_t sign, uint64_t biased_exponent, uint64_t mantissa) {
+ const uint64_t sign_bit = sign ? 0x8000000000000000u : 0u;
+ // The binary64 exponent is 11 bits, just below the sign.
+ const uint64_t exponent_bits = (biased_exponent & 0x7FFull) << 52;
+ // The mantissa is the bottom 52 bits.
+ const uint64_t mantissa_bits = (mantissa & 0xFFFFFFFFFFFFFull);
+
+ uint64_t bits = sign_bit | exponent_bits | mantissa_bits;
+ double result = 0.0;
+ static_assert(sizeof(result) == sizeof(bits),
+ "expected double and uint64_t to be the same size");
+ std::memcpy(&result, &bits, sizeof(bits));
+ return result;
}
TEST_F(ParserImplTest, ConstLiteral_Int) {
- auto p = parser("-234");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(c->As<ast::SintLiteralExpression>()->value, -234);
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ {
+ auto p = parser("234");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->value, 234);
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
+ }
+ {
+ auto p = parser("234i");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->value, 234);
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kI);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ }
+ {
+ auto p = parser("-234");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->value, -234);
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ }
+ {
+ auto p = parser("-234i");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->value, -234);
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kI);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 6u}}));
+ }
}
TEST_F(ParserImplTest, ConstLiteral_Uint) {
- auto p = parser("234u");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::UintLiteralExpression>());
- EXPECT_EQ(c->As<ast::UintLiteralExpression>()->value, 234u);
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ auto p = parser("234u");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->value, 234);
+ EXPECT_EQ(c->As<ast::IntLiteralExpression>()->suffix, ast::IntLiteralExpression::Suffix::kU);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
}
-TEST_F(ParserImplTest, ConstLiteral_Float) {
- auto p = parser("234.e12");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(c->As<ast::FloatLiteralExpression>()->value, 234e12f);
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
+TEST_F(ParserImplTest, ConstLiteral_Uint_Negative) {
+ auto p = parser("-234u");
+ auto c = p->const_literal();
+ EXPECT_FALSE(c.matched);
+ EXPECT_TRUE(c.errored);
+ EXPECT_EQ(p->error(), "1:1: value cannot be represented as 'u32'");
+ ASSERT_EQ(c.value, nullptr);
}
TEST_F(ParserImplTest, ConstLiteral_InvalidFloat_IncompleteExponent) {
- auto p = parser("1.0e+");
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_TRUE(c.errored);
- EXPECT_EQ(p->error(),
- "1:1: incomplete exponent for floating point literal: 1.0e+");
- ASSERT_EQ(c.value, nullptr);
-}
-
-TEST_F(ParserImplTest, ConstLiteral_InvalidFloat_TooSmallMagnitude) {
- auto p = parser("1e-256");
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_TRUE(c.errored);
- EXPECT_EQ(p->error(),
- "1:1: f32 (1e-256) magnitude too small, not representable");
- ASSERT_EQ(c.value, nullptr);
-}
-
-TEST_F(ParserImplTest, ConstLiteral_InvalidFloat_TooLargeNegative) {
- auto p = parser("-1.2e+256");
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_TRUE(c.errored);
- EXPECT_EQ(p->error(), "1:1: f32 (-1.2e+256) too large (negative)");
- ASSERT_EQ(c.value, nullptr);
-}
-
-TEST_F(ParserImplTest, ConstLiteral_InvalidFloat_TooLargePositive) {
- auto p = parser("1.2e+256");
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_TRUE(c.errored);
- EXPECT_EQ(p->error(), "1:1: f32 (1.2e+256) too large (positive)");
- ASSERT_EQ(c.value, nullptr);
-}
-
-// Returns true if the given non-Nan float numbers are equal.
-bool FloatEqual(float a, float b) {
- // Avoid Clang complaining about equality test on float.
- // -Wfloat-equal.
- return (a <= b) && (a >= b);
+ auto p = parser("1.0e+");
+ auto c = p->const_literal();
+ EXPECT_FALSE(c.matched);
+ EXPECT_TRUE(c.errored);
+ EXPECT_EQ(p->error(), "1:1: incomplete exponent for floating point literal: 1.0e+");
+ ASSERT_EQ(c.value, nullptr);
}
struct FloatLiteralTestCase {
- std::string input;
- float expected;
- bool operator==(const FloatLiteralTestCase& other) const {
- return (input == other.input) && FloatEqual(expected, other.expected);
- }
+ std::string input;
+ double expected;
+ bool operator==(const FloatLiteralTestCase& other) const {
+ return (input == other.input) && std::equal_to<double>()(expected, other.expected);
+ }
};
inline std::ostream& operator<<(std::ostream& out, FloatLiteralTestCase data) {
- out << data.input;
- return out;
+ out << data.input;
+ return out;
}
-class ParserImplFloatLiteralTest
- : public ParserImplTestWithParam<FloatLiteralTestCase> {};
+class ParserImplFloatLiteralTest : public ParserImplTestWithParam<FloatLiteralTestCase> {};
TEST_P(ParserImplFloatLiteralTest, Parse) {
- auto params = GetParam();
- SCOPED_TRACE(params.input);
- auto p = parser(params.input);
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(c->As<ast::FloatLiteralExpression>()->value, params.expected);
+ auto params = GetParam();
+ SCOPED_TRACE(params.input);
+ auto p = parser(params.input);
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ auto* literal = c->As<ast::FloatLiteralExpression>();
+ ASSERT_NE(literal, nullptr);
+ EXPECT_DOUBLE_EQ(literal->value, params.expected)
+ << "\n"
+ << "got: " << std::hexfloat << literal->value << "\n"
+ << "expected: " << std::hexfloat << params.expected;
+ if (params.input.back() == 'f') {
+ EXPECT_EQ(c->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kF);
+ } else {
+ EXPECT_EQ(c->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kNone);
+ }
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 1u + params.input.size()}}));
}
-
using FloatLiteralTestCaseList = std::vector<FloatLiteralTestCase>;
-FloatLiteralTestCaseList DecimalFloatCases() {
- return FloatLiteralTestCaseList{
- {"0.0", 0.0f}, // Zero
- {"1.0", 1.0f}, // One
- {"-1.0", -1.0f}, // MinusOne
- {"1000000000.0", 1e9f}, // Billion
- {"-0.0", std::copysign(0.0f, -5.0f)}, // NegativeZero
- {"0.0", MakeFloat(0, 0, 0)}, // Zero
- {"-0.0", MakeFloat(1, 0, 0)}, // NegativeZero
- {"1.0", MakeFloat(0, 127, 0)}, // One
- {"-1.0", MakeFloat(1, 127, 0)}, // NegativeOne
- };
-}
-
INSTANTIATE_TEST_SUITE_P(ParserImplFloatLiteralTest_Float,
ParserImplFloatLiteralTest,
- testing::ValuesIn(DecimalFloatCases()));
-
-const float NegInf = MakeFloat(1, 255, 0);
-const float PosInf = MakeFloat(0, 255, 0);
+ testing::ValuesIn(FloatLiteralTestCaseList{
+ {"0.0", 0.0}, // Zero
+ {"1.0", 1.0}, // One
+ {"-1.0", -1.0}, // MinusOne
+ {"1000000000.0", 1e9}, // Billion
+ {"-0.0", std::copysign(0.0, -5.0)}, // NegativeZero
+ {"0.0", MakeDouble(0, 0, 0)}, // Zero
+ {"-0.0", MakeDouble(1, 0, 0)}, // NegativeZero
+ {"1.0", MakeDouble(0, 1023, 0)}, // One
+ {"-1.0", MakeDouble(1, 1023, 0)}, // NegativeOne
+
+ {"234.e12", 234.e12},
+ {"234.e12f", static_cast<double>(234.e12f)},
+
+ // Tiny cases
+ {"1e-5000", 0.0},
+ {"-1e-5000", 0.0},
+ {"1e-5000f", 0.0},
+ {"-1e-5000f", 0.0},
+ {"1e-50f", 0.0},
+ {"-1e-50f", 0.0},
+
+ // Nearly overflow
+ {"1.e308", 1.e308},
+ {"-1.e308", -1.e308},
+ {"1.8e307", 1.8e307},
+ {"-1.8e307", -1.8e307},
+ {"1.798e307", 1.798e307},
+ {"-1.798e307", -1.798e307},
+ {"1.7977e307", 1.7977e307},
+ {"-1.7977e307", -1.7977e307},
+
+ // Nearly overflow
+ {"1e38f", static_cast<double>(1e38f)},
+ {"-1e38f", static_cast<double>(-1e38f)},
+ {"4.0e37f", static_cast<double>(4.0e37f)},
+ {"-4.0e37f", static_cast<double>(-4.0e37f)},
+ {"3.5e37f", static_cast<double>(3.5e37f)},
+ {"-3.5e37f", static_cast<double>(-3.5e37f)},
+ {"3.403e37f", static_cast<double>(3.403e37f)},
+ {"-3.403e37f", static_cast<double>(-3.403e37f)},
+ }));
+
+const double NegInf = MakeDouble(1, 0x7FF, 0);
+const double PosInf = MakeDouble(0, 0x7FF, 0);
FloatLiteralTestCaseList HexFloatCases() {
- return FloatLiteralTestCaseList{
- // Regular numbers
- {"0x0p+0", 0.f},
- {"0x1p+0", 1.f},
- {"0x1p+1", 2.f},
- {"0x1.8p+1", 3.f},
- {"0x1.99999ap-4", 0.1f},
- {"0x1p-1", 0.5f},
- {"0x1p-2", 0.25f},
- {"0x1.8p-1", 0.75f},
- {"-0x0p+0", -0.f},
- {"-0x1p+0", -1.f},
- {"-0x1p-1", -0.5f},
- {"-0x1p-2", -0.25f},
- {"-0x1.8p-1", -0.75f},
-
- // Large numbers
- {"0x1p+9", 512.f},
- {"0x1p+10", 1024.f},
- {"0x1.02p+10", 1024.f + 8.f},
- {"-0x1p+9", -512.f},
- {"-0x1p+10", -1024.f},
- {"-0x1.02p+10", -1024.f - 8.f},
-
- // Small numbers
- {"0x1p-9", 1.0f / 512.f},
- {"0x1p-10", 1.0f / 1024.f},
- {"0x1.02p-3", 1.0f / 1024.f + 1.0f / 8.f},
- {"-0x1p-9", 1.0f / -512.f},
- {"-0x1p-10", 1.0f / -1024.f},
- {"-0x1.02p-3", 1.0f / -1024.f - 1.0f / 8.f},
-
- // Near lowest non-denorm
- {"0x1p-124", std::ldexp(1.f * 8.f, -127)},
- {"0x1p-125", std::ldexp(1.f * 4.f, -127)},
- {"-0x1p-124", -std::ldexp(1.f * 8.f, -127)},
- {"-0x1p-125", -std::ldexp(1.f * 4.f, -127)},
-
- // Lowest non-denorm
- {"0x1p-126", std::ldexp(1.f * 2.f, -127)},
- {"-0x1p-126", -std::ldexp(1.f * 2.f, -127)},
-
- // Denormalized values
- {"0x1p-127", std::ldexp(1.f, -127)},
- {"0x1p-128", std::ldexp(1.f / 2.f, -127)},
- {"0x1p-129", std::ldexp(1.f / 4.f, -127)},
- {"0x1p-130", std::ldexp(1.f / 8.f, -127)},
- {"-0x1p-127", -std::ldexp(1.f, -127)},
- {"-0x1p-128", -std::ldexp(1.f / 2.f, -127)},
- {"-0x1p-129", -std::ldexp(1.f / 4.f, -127)},
- {"-0x1p-130", -std::ldexp(1.f / 8.f, -127)},
-
- {"0x1.8p-127", std::ldexp(1.f, -127) + (std::ldexp(1.f, -127) / 2.f)},
- {"0x1.8p-128",
- std::ldexp(1.f, -127) / 2.f + (std::ldexp(1.f, -127) / 4.f)},
-
- {"0x1p-149", MakeFloat(0, 0, 1)}, // +SmallestDenormal
- {"0x1p-148", MakeFloat(0, 0, 2)}, // +BiggerDenormal
- {"0x1.fffffcp-127", MakeFloat(0, 0, 0x7fffff)}, // +LargestDenormal
- {"-0x1p-149", MakeFloat(1, 0, 1)}, // -SmallestDenormal
- {"-0x1p-148", MakeFloat(1, 0, 2)}, // -BiggerDenormal
- {"-0x1.fffffcp-127", MakeFloat(1, 0, 0x7fffff)}, // -LargestDenormal
-
- {"0x1.2bfaf8p-127", MakeFloat(0, 0, 0xcafebe)}, // +Subnormal
- {"-0x1.2bfaf8p-127", MakeFloat(1, 0, 0xcafebe)}, // -Subnormal
- {"0x1.55554p-130", MakeFloat(0, 0, 0xaaaaa)}, // +Subnormal
- {"-0x1.55554p-130", MakeFloat(1, 0, 0xaaaaa)}, // -Subnormal
-
- // Nan -> Infinity
- {"0x1.8p+128", PosInf},
- {"0x1.0002p+128", PosInf},
- {"0x1.0018p+128", PosInf},
- {"0x1.01ep+128", PosInf},
- {"0x1.fffffep+128", PosInf},
- {"-0x1.8p+128", NegInf},
- {"-0x1.0002p+128", NegInf},
- {"-0x1.0018p+128", NegInf},
- {"-0x1.01ep+128", NegInf},
- {"-0x1.fffffep+128", NegInf},
-
- // Infinity
- {"0x1p+128", PosInf},
- {"-0x1p+128", NegInf},
- {"0x32p+127", PosInf},
- {"0x32p+500", PosInf},
- {"-0x32p+127", NegInf},
- {"-0x32p+500", NegInf},
-
- // Overflow -> Infinity
- {"0x1p+129", PosInf},
- {"0x1.1p+128", PosInf},
- {"-0x1p+129", NegInf},
- {"-0x1.1p+128", NegInf},
- {"0x1.0p2147483520", PosInf}, // INT_MAX - 127 (largest valid exponent)
-
- // Underflow -> Zero
- {"0x1p-500", 0.f}, // Exponent underflows
- {"-0x1p-500", -0.f},
- {"0x0.00000000001p-126", 0.f}, // Fraction causes underflow
- {"-0x0.0000000001p-127", -0.f},
- {"0x0.01p-142", 0.f},
- {"-0x0.01p-142", -0.f}, // Fraction causes additional underflow
- {"0x1.0p-2147483520", 0}, // -(INT_MAX - 127) (smallest valid exponent)
-
- // Zero with non-zero exponent -> Zero
- {"0x0p+0", 0.f},
- {"0x0p+1", 0.f},
- {"0x0p-1", 0.f},
- {"0x0p+9999999999", 0.f},
- {"0x0p-9999999999", 0.f},
- // Same, but with very large positive exponents that would cause overflow
- // if the mantissa were non-zero.
- {"0x0p+4000000000", 0.f}, // 4 billion:
- {"0x0p+40000000000", 0.f}, // 40 billion
- {"-0x0p+40000000000", 0.f}, // As above 2, but negative mantissa
- {"-0x0p+400000000000", 0.f},
- {"0x0.00p+4000000000", 0.f}, // As above 4, but with fractional part
- {"0x0.00p+40000000000", 0.f},
- {"-0x0.00p+40000000000", 0.f},
- {"-0x0.00p+400000000000", 0.f},
- {"0x0p-4000000000", 0.f}, // As above 8, but with negative exponents
- {"0x0p-40000000000", 0.f},
- {"-0x0p-40000000000", 0.f},
- {"-0x0p-400000000000", 0.f},
- {"0x0.00p-4000000000", 0.f},
- {"0x0.00p-40000000000", 0.f},
- {"-0x0.00p-40000000000", 0.f},
- {"-0x0.00p-400000000000", 0.f},
-
- // Test parsing
- {"0x0p0", 0.f},
- {"0x0p-0", 0.f},
- {"0x0p+000", 0.f},
- {"0x00000000000000p+000000000000000", 0.f},
- {"0x00000000000000p-000000000000000", 0.f},
- {"0x00000000000001p+000000000000000", 1.f},
- {"0x00000000000001p-000000000000000", 1.f},
- {"0x0000000000000000000001.99999ap-000000000000000004", 0.1f},
- {"0x2p+0", 2.f},
- {"0xFFp+0", 255.f},
- {"0x0.8p+0", 0.5f},
- {"0x0.4p+0", 0.25f},
- {"0x0.4p+1", 2 * 0.25f},
- {"0x0.4p+2", 4 * 0.25f},
- {"0x123Ep+1", 9340.f},
- {"-0x123Ep+1", -9340.f},
- {"0x1a2b3cP12", 7.024656e+09f},
- {"-0x1a2b3cP12", -7.024656e+09f},
-
- // Examples without a binary exponent part.
- {"0x1.", 1.0f},
- {"0x.8", 0.5f},
- {"0x1.8", 1.5f},
- {"-0x1.", -1.0f},
- {"-0x.8", -0.5f},
- {"-0x1.8", -1.5f},
-
- // Examples with a binary exponent and a 'f' suffix.
- {"0x1.p0f", 1.0f},
- {"0x.8p2f", 2.0f},
- {"0x1.8p-1f", 0.75f},
- {"0x2p-2f", 0.5f}, // No binary point
- {"-0x1.p0f", -1.0f},
- {"-0x.8p2f", -2.0f},
- {"-0x1.8p-1f", -0.75f},
- {"-0x2p-2f", -0.5f}, // No binary point
- };
+ return FloatLiteralTestCaseList{
+ // Regular numbers
+ {"0x0p+0", 0x0p+0},
+ {"0x1p+0", 0x1p+0},
+ {"0x1p+1", 0x1p+1},
+ {"0x1.8p+1", 0x1.8p+1},
+ {"0x1.99999ap-4", 0x1.99999ap-4},
+ {"0x1p-1", 0x1p-1},
+ {"0x1p-2", 0x1p-2},
+ {"0x1.8p-1", 0x1.8p-1},
+ {"-0x0p+0", -0x0p+0},
+ {"-0x1p+0", -0x1p+0},
+ {"-0x1p-1", -0x1p-1},
+ {"-0x1p-2", -0x1p-2},
+ {"-0x1.8p-1", -0x1.8p-1},
+
+ // Large numbers
+ {"0x1p+9", 0x1p+9},
+ {"0x1p+10", 0x1p+10},
+ {"0x1.02p+10", 0x1.02p+10},
+ {"-0x1p+9", -0x1p+9},
+ {"-0x1p+10", -0x1p+10},
+ {"-0x1.02p+10", -0x1.02p+10},
+
+ // Small numbers
+ {"0x1p-9", 0x1p-9},
+ {"0x1p-10", 0x1p-10},
+ {"0x1.02p-3", 0x1.02p-3},
+ {"-0x1p-9", -0x1p-9},
+ {"-0x1p-10", -0x1p-10},
+ {"-0x1.02p-3", -0x1.02p-3},
+
+ // Near lowest non-denorm
+ {"0x1p-1020", 0x1p-1020},
+ {"0x1p-1021", 0x1p-1021},
+ {"-0x1p-1020", -0x1p-1020},
+ {"-0x1p-1021", -0x1p-1021},
+
+ {"0x1p-124f", 0x1p-124},
+ {"0x1p-125f", 0x1p-125},
+ {"-0x1p-124f", -0x1p-124},
+ {"-0x1p-125f", -0x1p-125},
+
+ // Lowest non-denorm
+ {"0x1p-1022", 0x1p-1022},
+ {"-0x1p-1022", -0x1p-1022},
+
+ {"0x1p-126f", 0x1p-126},
+ {"-0x1p-126f", -0x1p-126},
+
+ // Denormalized values
+ {"0x1p-1023", 0x1p-1023},
+ {"0x1p-1024", 0x1p-1024},
+ {"0x1p-1025", 0x1p-1025},
+ {"0x1p-1026", 0x1p-1026},
+ {"-0x1p-1023", -0x1p-1023},
+ {"-0x1p-1024", -0x1p-1024},
+ {"-0x1p-1025", -0x1p-1025},
+ {"-0x1p-1026", -0x1p-1026},
+ {"0x1.8p-1023", 0x1.8p-1023},
+ {"0x1.8p-1024", 0x1.8p-1024},
+
+ {"0x1p-127f", 0x1p-127},
+ {"0x1p-128f", 0x1p-128},
+ {"0x1p-129f", 0x1p-129},
+ {"0x1p-130f", 0x1p-130},
+ {"-0x1p-127f", -0x1p-127},
+ {"-0x1p-128f", -0x1p-128},
+ {"-0x1p-129f", -0x1p-129},
+ {"-0x1p-130f", -0x1p-130},
+ {"0x1.8p-127f", 0x1.8p-127},
+ {"0x1.8p-128f", 0x1.8p-128},
+
+ // F64 extremities
+ {"0x1p-1074", 0x1p-1074}, // +SmallestDenormal
+ {"0x1p-1073", 0x1p-1073}, // +BiggerDenormal
+ {"0x1.ffffffffffffp-1027", 0x1.ffffffffffffp-1027}, // +LargestDenormal
+ {"-0x1p-1074", -0x1p-1074}, // -SmallestDenormal
+ {"-0x1p-1073", -0x1p-1073}, // -BiggerDenormal
+ {"-0x1.ffffffffffffp-1027", -0x1.ffffffffffffp-1027}, // -LargestDenormal
+
+ {"0x0.cafebeeff000dp-1022", 0x0.cafebeeff000dp-1022}, // +Subnormal
+ {"-0x0.cafebeeff000dp-1022", -0x0.cafebeeff000dp-1022}, // -Subnormal
+ {"0x1.2bfaf8p-1052", 0x1.2bfaf8p-1052}, // +Subnormal
+ {"-0x1.2bfaf8p-1052", -0x1.2bfaf8p-1052}, // +Subnormal
+ {"0x1.55554p-1055", 0x1.55554p-1055}, // +Subnormal
+ {"-0x1.55554p-1055", -0x1.55554p-1055}, // -Subnormal
+
+ // F32 extremities
+ {"0x1p-149", 0x1p-149}, // +SmallestDenormal
+ {"0x1p-148", 0x1p-148}, // +BiggerDenormal
+ {"0x1.fffffcp-127", 0x1.fffffcp-127}, // +LargestDenormal
+ {"-0x1p-149", -0x1p-149}, // -SmallestDenormal
+ {"-0x1p-148", -0x1p-148}, // -BiggerDenormal
+ {"-0x1.fffffcp-127", -0x1.fffffcp-127}, // -LargestDenormal
+
+ {"0x0.cafebp-129", 0x0.cafebp-129}, // +Subnormal
+ {"-0x0.cafebp-129", -0x0.cafebp-129}, // -Subnormal
+ {"0x1.2bfaf8p-127", 0x1.2bfaf8p-127}, // +Subnormal
+ {"-0x1.2bfaf8p-127", -0x1.2bfaf8p-127}, // -Subnormal
+ {"0x1.55554p-130", 0x1.55554p-130}, // +Subnormal
+ {"-0x1.55554p-130", -0x1.55554p-130}, // -Subnormal
+
+ // F32 exactly representable
+ {"0x1.000002p+0f", 0x1.000002p+0},
+ {"0x8.0000fp+0f", 0x8.0000fp+0},
+ {"0x8.fffffp+0f", 0x8.fffffp+0},
+ {"0x8.00003p+0f", 0x8.00003p+0},
+ {"0x2.123p+0f", 0x2.123p+0},
+ {"0x2.cafefp+0f", 0x2.cafefp+0},
+
+ // Underflow -> Zero
+ {"0x1p-1074", 0.0}, // Exponent underflows
+ {"-0x1p-1074", 0.0},
+ {"0x1p-5000", 0.0},
+ {"-0x1p-5000", 0.0},
+ {"0x0.00000000000000000000001p-1022", 0.0}, // Fraction causes underflow
+ {"-0x0.0000000000000000000001p-1023", -0.0},
+ {"0x0.01p-1073", -0.0},
+ {"-0x0.01p-1073", -0.0}, // Fraction causes additional underflow
+
+ {"0x1.0p-9223372036854774784", 0}, // -(INT64_MAX - 1023) (smallest valid exponent)
+
+ // Zero with non-zero exponent -> Zero
+ {"0x0p+0", 0.0},
+ {"0x0p+1", 0.0},
+ {"0x0p-1", 0.0},
+ {"0x0p+9999999999", 0.0},
+ {"0x0p-9999999999", 0.0},
+ // Same, but with very large positive exponents that would cause overflow
+ // if the mantissa were non-zero.
+ {"0x0p+10000000000000000000", 0.0}, // 10 quintillion (10,000,000,000,000,000,000)
+ {"0x0p+100000000000000000000", 0.0}, // 100 quintillion (100,000,000,000,000,000,000)
+ {"-0x0p+100000000000000000000", 0.0}, // As above 2, but negative mantissa
+ {"-0x0p+1000000000000000000000", 0.0},
+ {"0x0.00p+10000000000000000000", 0.0}, // As above 4, but with fractional part
+ {"0x0.00p+100000000000000000000", 0.0},
+ {"-0x0.00p+100000000000000000000", 0.0},
+ {"-0x0.00p+1000000000000000000000", 0.0},
+ {"0x0p-10000000000000000000", 0.0}, // As above 8, but with negative exponents
+ {"0x0p-100000000000000000000", 0.0},
+ {"-0x0p-100000000000000000000", 0.0},
+ {"-0x0p-1000000000000000000000", 0.0},
+ {"0x0.00p-10000000000000000000", 0.0},
+ {"0x0.00p-100000000000000000000", 0.0},
+ {"-0x0.00p-100000000000000000000", 0.0},
+ {"-0x0.00p-1000000000000000000000", 0.0},
+
+ // Test parsing
+ {"0x0p0", 0.0},
+ {"0x0p-0", 0.0},
+ {"0x0p+000", 0.0},
+ {"0x00000000000000p+000000000000000", 0.0},
+ {"0x00000000000000p-000000000000000", 0.0},
+ {"0x00000000000001p+000000000000000", 1.0},
+ {"0x00000000000001p-000000000000000", 1.0},
+ {"0x0000000000000000000001.99999ap-000000000000000004", 0.10000000149011612},
+ {"0x2p+0", 2.0},
+ {"0xFFp+0", 255.0},
+ {"0x0.8p+0", 0.5},
+ {"0x0.4p+0", 0.25},
+ {"0x0.4p+1", 2 * 0.25},
+ {"0x0.4p+2", 4 * 0.25},
+ {"0x123Ep+1", 9340.0},
+ {"-0x123Ep+1", -9340.0},
+ {"0x1a2b3cP12", 7.024656384e+09},
+ {"-0x1a2b3cP12", -7.024656384e+09},
+
+ // Examples without a binary exponent part.
+ {"0x1.", 1.0},
+ {"0x.8", 0.5},
+ {"0x1.8", 1.5},
+ {"-0x1.", -1.0},
+ {"-0x.8", -0.5},
+ {"-0x1.8", -1.5},
+
+ // Examples with a binary exponent and a 'f' suffix.
+ {"0x1.p0f", 1.0},
+ {"0x.8p2f", 2.0},
+ {"0x1.8p-1f", 0.75},
+ {"0x2p-2f", 0.5}, // No binary point
+ {"-0x1.p0f", -1.0},
+ {"-0x.8p2f", -2.0},
+ {"-0x1.8p-1f", -0.75},
+ {"-0x2p-2f", -0.5}, // No binary point
+ };
}
INSTANTIATE_TEST_SUITE_P(ParserImplFloatLiteralTest_HexFloat,
ParserImplFloatLiteralTest,
@@ -348,173 +408,296 @@ INSTANTIATE_TEST_SUITE_P(ParserImplFloatLiteralTest_HexFloat,
// Now test all the same hex float cases, but with 0X instead of 0x
template <typename ARR>
std::vector<FloatLiteralTestCase> UpperCase0X(const ARR& cases) {
- std::vector<FloatLiteralTestCase> result;
- result.reserve(cases.size());
- for (const auto& c : cases) {
- result.emplace_back(c);
- auto& input = result.back().input;
- const auto where = input.find("0x");
- if (where != std::string::npos) {
- input[where+1] = 'X';
+ std::vector<FloatLiteralTestCase> result;
+ result.reserve(cases.size());
+ for (const auto& c : cases) {
+ result.emplace_back(c);
+ auto& input = result.back().input;
+ const auto where = input.find("0x");
+ if (where != std::string::npos) {
+ input[where + 1] = 'X';
+ }
}
- }
- return result;
+ return result;
}
using UpperCase0XTest = ::testing::Test;
TEST_F(UpperCase0XTest, Samples) {
- const auto cases = FloatLiteralTestCaseList{
- {"absent", 0.0}, {"0x", 1.0}, {"0X", 2.0}, {"-0x", 3.0},
- {"-0X", 4.0}, {" 0x1p1", 5.0}, {" -0x1p", 6.0}, {" examine ", 7.0}};
- const auto expected = FloatLiteralTestCaseList{
- {"absent", 0.0}, {"0X", 1.0}, {"0X", 2.0}, {"-0X", 3.0},
- {"-0X", 4.0}, {" 0X1p1", 5.0}, {" -0X1p", 6.0}, {" examine ", 7.0}};
-
- auto result = UpperCase0X(cases);
- EXPECT_THAT(result, ::testing::ElementsAreArray(expected));
+ const auto cases = FloatLiteralTestCaseList{
+ {"absent", 0.0}, {"0x", 1.0}, {"0X", 2.0}, {"-0x", 3.0},
+ {"-0X", 4.0}, {" 0x1p1", 5.0}, {" -0x1p", 6.0}, {" examine ", 7.0}};
+ const auto expected = FloatLiteralTestCaseList{
+ {"absent", 0.0}, {"0X", 1.0}, {"0X", 2.0}, {"-0X", 3.0},
+ {"-0X", 4.0}, {" 0X1p1", 5.0}, {" -0X1p", 6.0}, {" examine ", 7.0}};
+
+ auto result = UpperCase0X(cases);
+ EXPECT_THAT(result, ::testing::ElementsAreArray(expected));
}
INSTANTIATE_TEST_SUITE_P(ParserImplFloatLiteralTest_HexFloat_UpperCase0X,
ParserImplFloatLiteralTest,
testing::ValuesIn(UpperCase0X(HexFloatCases())));
-struct InvalidLiteralTestCase {
- const char* input;
- const char* error_msg;
-};
-class ParserImplInvalidLiteralTest
- : public ParserImplTestWithParam<InvalidLiteralTestCase> {};
+// <error, source>
+using InvalidLiteralTestCase = std::tuple<const char*, const char*>;
+
+class ParserImplInvalidLiteralTest : public ParserImplTestWithParam<InvalidLiteralTestCase> {};
TEST_P(ParserImplInvalidLiteralTest, Parse) {
- auto params = GetParam();
- SCOPED_TRACE(params.input);
- auto p = parser(params.input);
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_TRUE(c.errored);
- EXPECT_EQ(p->error(), params.error_msg);
- ASSERT_EQ(c.value, nullptr);
+ auto* error = std::get<0>(GetParam());
+ auto* source = std::get<1>(GetParam());
+ auto p = parser(source);
+ auto c = p->const_literal();
+ EXPECT_FALSE(c.matched);
+ EXPECT_TRUE(c.errored);
+ EXPECT_EQ(p->error(), std::string(error));
+ ASSERT_EQ(c.value, nullptr);
}
-InvalidLiteralTestCase invalid_hexfloat_mantissa_too_large_cases[] = {
- {"0x1.ffffffff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1f.fffffff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1ff.ffffff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1fff.fffff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1ffff.ffff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1fffff.fff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1ffffff.ff8p0", "1:1: mantissa is too large for hex float"},
- {"0x1fffffff.f8p0", "1:1: mantissa is too large for hex float"},
- {"0x1ffffffff.8p0", "1:1: mantissa is too large for hex float"},
- {"0x1ffffffff8.p0", "1:1: mantissa is too large for hex float"},
-};
INSTANTIATE_TEST_SUITE_P(
- ParserImplInvalidLiteralTest_HexFloatMantissaTooLarge,
+ HexFloatMantissaTooLarge,
ParserImplInvalidLiteralTest,
- testing::ValuesIn(invalid_hexfloat_mantissa_too_large_cases));
+ testing::Combine(testing::Values("1:1: mantissa is too large for hex float"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1.ffffffffffffffff8p0",
+ "0x1f.fffffffffffffff8p0",
+ "0x1ff.ffffffffffffff8p0",
+ "0x1fff.fffffffffffff8p0",
+ "0x1ffff.ffffffffffff8p0",
+ "0x1fffff.fffffffffff8p0",
+ "0x1ffffff.ffffffffff8p0",
+ "0x1fffffff.fffffffff8p0",
+ "0x1ffffffff.ffffffff8p0",
+ "0x1fffffffff.fffffff8p0",
+ "0x1ffffffffff.ffffff8p0",
+ "0x1fffffffffff.fffff8p0",
+ "0x1ffffffffffff.ffff8p0",
+ "0x1fffffffffffff.fff8p0",
+ "0x1ffffffffffffff.ff8p0",
+ "0x1ffffffffffffffff.8p0",
+ "0x1ffffffffffffffff8.p0",
+ })));
-InvalidLiteralTestCase invalid_hexfloat_exponent_too_large_cases[] = {
- {"0x1p+2147483521", "1:1: exponent is too large for hex float"},
- {"0x1p-2147483521", "1:1: exponent is too large for hex float"},
- {"0x1p+4294967296", "1:1: exponent is too large for hex float"},
- {"0x1p-4294967296", "1:1: exponent is too large for hex float"},
-};
INSTANTIATE_TEST_SUITE_P(
- ParserImplInvalidLiteralTest_HexFloatExponentTooLarge,
+ HexFloatExponentTooLarge,
ParserImplInvalidLiteralTest,
- testing::ValuesIn(invalid_hexfloat_exponent_too_large_cases));
-
-InvalidLiteralTestCase invalid_hexfloat_exponent_missing_cases[] = {
- // Lower case p
- {"0x0p", "1:1: expected an exponent value for hex float"},
- {"0x0p+", "1:1: expected an exponent value for hex float"},
- {"0x0p-", "1:1: expected an exponent value for hex float"},
- {"0x1.0p", "1:1: expected an exponent value for hex float"},
- {"0x0.1p", "1:1: expected an exponent value for hex float"},
- // Upper case p
- {"0x0P", "1:1: expected an exponent value for hex float"},
- {"0x0P+", "1:1: expected an exponent value for hex float"},
- {"0x0P-", "1:1: expected an exponent value for hex float"},
- {"0x1.0P", "1:1: expected an exponent value for hex float"},
- {"0x0.1P", "1:1: expected an exponent value for hex float"},
-};
+ testing::Combine(testing::Values("1:1: exponent is too large for hex float"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1p+9223372036854774785",
+ "0x1p-9223372036854774785",
+ "0x1p+18446744073709551616",
+ "0x1p-18446744073709551616",
+ })));
+
INSTANTIATE_TEST_SUITE_P(
- ParserImplInvalidLiteralTest_HexFloatExponentMissing,
+ HexFloatMissingExponent,
ParserImplInvalidLiteralTest,
- testing::ValuesIn(invalid_hexfloat_exponent_missing_cases));
+ testing::Combine(testing::Values("1:1: expected an exponent value for hex float"),
+ testing::ValuesIn(std::vector<const char*>{
+ // Lower case p
+ "0x0p",
+ "0x0p+",
+ "0x0p-",
+ "0x1.0p",
+ "0x0.1p",
+ // Upper case p
+ "0x0P",
+ "0x0P+",
+ "0x0P-",
+ "0x1.0P",
+ "0x0.1P",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ HexNaNAFloat,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'abstract-float'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1.8p+1024",
+ "0x1.0002p+1024",
+ "0x1.0018p+1024",
+ "0x1.01ep+1024",
+ "0x1.fffffep+1024",
+ "-0x1.8p+1024",
+ "-0x1.0002p+1024",
+ "-0x1.0018p+1024",
+ "-0x1.01ep+1024",
+ "-0x1.fffffep+1024",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ HexNaNF32,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'f32'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1.8p+128f",
+ "0x1.0002p+128f",
+ "0x1.0018p+128f",
+ "0x1.01ep+128f",
+ "0x1.fffffep+128f",
+ "-0x1.8p+128f",
+ "-0x1.0002p+128f",
+ "-0x1.0018p+128f",
+ "-0x1.01ep+128f",
+ "-0x1.fffffep+128f",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ HexOverflowAFloat,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'abstract-float'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1p+1024",
+ "-0x1p+1024",
+ "0x1.1p+1024",
+ "-0x1.1p+1024",
+ "0x1p+1025",
+ "-0x1p+1025",
+ "0x32p+1023",
+ "-0x32p+1023",
+ "0x32p+5000",
+ "-0x32p+5000",
+ "0x1.0p9223372036854774784",
+ "-0x1.0p9223372036854774784",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ HexOverflowF32,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'f32'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1p+128f",
+ "-0x1p+128f",
+ "0x1.1p+128f",
+ "-0x1.1p+128f",
+ "0x1p+129f",
+ "-0x1p+129f",
+ "0x32p+127f",
+ "-0x32p+127f",
+ "0x32p+500f",
+ "-0x32p+500f",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ HexNotExactlyRepresentableF32,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be exactly represented as 'f32'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "0x1.000001p+0f", // Quantizes to 0x1.0p+0
+ "0x8.0000f8p+0f", // Quantizes to 0x8.0000fp+0
+ "0x8.000038p+0f", // Quantizes to 0x8.00003p+0
+ "0x2.cafef00dp+0f", // Quantizes to 0x2.cafefp+0
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ DecOverflowAFloat,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'abstract-float'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "1.e309",
+ "-1.e309",
+ "1.8e308",
+ "-1.8e308",
+ "1.798e308",
+ "-1.798e308",
+ "1.7977e308",
+ "-1.7977e308",
+ "1.2e+5000",
+ "-1.2e+5000",
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ DecOverflowF32,
+ ParserImplInvalidLiteralTest,
+ testing::Combine(testing::Values("1:1: value cannot be represented as 'f32'"),
+ testing::ValuesIn(std::vector<const char*>{
+ "1e39f",
+ "-1e39f",
+ "4.0e38f",
+ "-4.0e38f",
+ "3.5e38f",
+ "-3.5e38f",
+ "3.403e38f",
+ "-3.403e38f",
+ "1.2e+256f",
+ "-1.2e+256f",
+ })));
TEST_F(ParserImplTest, ConstLiteral_FloatHighest) {
- const auto highest = std::numeric_limits<float>::max();
- const auto expected_highest = 340282346638528859811704183484516925440.0f;
- if (highest < expected_highest || highest > expected_highest) {
- GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
- "this target";
- }
- auto p = parser("340282346638528859811704183484516925440.0");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(c->As<ast::FloatLiteralExpression>()->value,
- std::numeric_limits<float>::max());
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 42u}}));
+ const auto highest = std::numeric_limits<float>::max();
+ const auto expected_highest = 340282346638528859811704183484516925440.0f;
+ if (highest < expected_highest || highest > expected_highest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
+ "this target";
+ }
+ auto p = parser("340282346638528859811704183484516925440.0");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
+ EXPECT_DOUBLE_EQ(c->As<ast::FloatLiteralExpression>()->value,
+ std::numeric_limits<float>::max());
+ EXPECT_EQ(c->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 42u}}));
}
TEST_F(ParserImplTest, ConstLiteral_FloatLowest) {
- // Some compilers complain if you test floating point numbers for equality.
- // So say it via two inequalities.
- const auto lowest = std::numeric_limits<float>::lowest();
- const auto expected_lowest = -340282346638528859811704183484516925440.0f;
- if (lowest < expected_lowest || lowest > expected_lowest) {
- GTEST_SKIP()
- << "std::numeric_limits<float>::lowest() is not as expected for "
- "this target";
- }
-
- auto p = parser("-340282346638528859811704183484516925440.0");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
- EXPECT_FLOAT_EQ(c->As<ast::FloatLiteralExpression>()->value,
- std::numeric_limits<float>::lowest());
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 43u}}));
+ // Some compilers complain if you test floating point numbers for equality.
+ // So say it via two inequalities.
+ const auto lowest = std::numeric_limits<float>::lowest();
+ const auto expected_lowest = -340282346638528859811704183484516925440.0f;
+ if (lowest < expected_lowest || lowest > expected_lowest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::lowest() is not as expected for "
+ "this target";
+ }
+
+ auto p = parser("-340282346638528859811704183484516925440.0");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::FloatLiteralExpression>());
+ EXPECT_DOUBLE_EQ(c->As<ast::FloatLiteralExpression>()->value,
+ std::numeric_limits<float>::lowest());
+ EXPECT_EQ(c->As<ast::FloatLiteralExpression>()->suffix,
+ ast::FloatLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 43u}}));
}
TEST_F(ParserImplTest, ConstLiteral_True) {
- auto p = parser("true");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::BoolLiteralExpression>());
- EXPECT_TRUE(c->As<ast::BoolLiteralExpression>()->value);
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ auto p = parser("true");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::BoolLiteralExpression>());
+ EXPECT_TRUE(c->As<ast::BoolLiteralExpression>()->value);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
}
TEST_F(ParserImplTest, ConstLiteral_False) {
- auto p = parser("false");
- auto c = p->const_literal();
- EXPECT_TRUE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(c.value, nullptr);
- ASSERT_TRUE(c->Is<ast::BoolLiteralExpression>());
- EXPECT_FALSE(c->As<ast::BoolLiteralExpression>()->value);
- EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 6u}}));
+ auto p = parser("false");
+ auto c = p->const_literal();
+ EXPECT_TRUE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(c.value, nullptr);
+ ASSERT_TRUE(c->Is<ast::BoolLiteralExpression>());
+ EXPECT_FALSE(c->As<ast::BoolLiteralExpression>()->value);
+ EXPECT_EQ(c->source.range, (Source::Range{{1u, 1u}, {1u, 6u}}));
}
TEST_F(ParserImplTest, ConstLiteral_NoMatch) {
- auto p = parser("another-token");
- auto c = p->const_literal();
- EXPECT_FALSE(c.matched);
- EXPECT_FALSE(c.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(c.value, nullptr);
+ auto p = parser("another-token");
+ auto c = p->const_literal();
+ EXPECT_FALSE(c.matched);
+ EXPECT_FALSE(c.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_EQ(c.value, nullptr);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc
index 34a9d519531..002f638c37c 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc
@@ -19,12 +19,12 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ContinueStmt) {
- auto p = parser("continue");
- auto e = p->continue_stmt();
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::ContinueStatement>());
+ auto p = parser("continue");
+ auto e = p->continue_stmt();
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::ContinueStatement>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc
index 80f2e0e375a..d7a5779ff71 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc
@@ -19,23 +19,23 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ContinuingStmt) {
- auto p = parser("continuing { discard; }");
- auto e = p->continuing_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e->statements.size(), 1u);
- ASSERT_TRUE(e->statements[0]->Is<ast::DiscardStatement>());
+ auto p = parser("continuing { discard; }");
+ auto e = p->continuing_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_EQ(e->statements.size(), 1u);
+ ASSERT_TRUE(e->statements[0]->Is<ast::DiscardStatement>());
}
TEST_F(ParserImplTest, ContinuingStmt_InvalidBody) {
- auto p = parser("continuing { discard }");
- auto e = p->continuing_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:22: expected ';' for discard statement");
+ auto p = parser("continuing { discard }");
+ auto e = p->continuing_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:22: expected ';' for discard statement");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_test.cc
new file mode 100644
index 00000000000..6c70bd3a661
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_test.cc
@@ -0,0 +1,95 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
+#include "src/tint/sem/depth_texture.h"
+
+namespace tint::reader::wgsl {
+namespace {
+
+TEST_F(ParserImplTest, DepthTextureType_Invalid) {
+ auto p = parser("1234");
+ auto t = p->depth_texture();
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, DepthTextureType_2d) {
+ auto p = parser("texture_depth_2d");
+ auto t = p->depth_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
+}
+
+TEST_F(ParserImplTest, DepthTextureType_2dArray) {
+ auto p = parser("texture_depth_2d_array");
+ auto t = p->depth_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2dArray);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 23u}}));
+}
+
+TEST_F(ParserImplTest, DepthTextureType_Cube) {
+ auto p = parser("texture_depth_cube");
+ auto t = p->depth_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::kCube);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
+}
+
+TEST_F(ParserImplTest, DepthTextureType_CubeArray) {
+ auto p = parser("texture_depth_cube_array");
+ auto t = p->depth_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::kCubeArray);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25u}}));
+}
+
+TEST_F(ParserImplTest, DepthTextureType_Multisampled2d) {
+ auto p = parser("texture_depth_multisampled_2d");
+ auto t = p->depth_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthMultisampledTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 30u}}));
+}
+
+} // namespace
+} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_type_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_type_test.cc
deleted file mode 100644
index 6a9c528002b..00000000000
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_depth_texture_type_test.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-#include "src/tint/sem/depth_texture_type.h"
-
-namespace tint::reader::wgsl {
-namespace {
-
-TEST_F(ParserImplTest, DepthTextureType_Invalid) {
- auto p = parser("1234");
- auto t = p->depth_texture_type();
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, DepthTextureType_2d) {
- auto p = parser("texture_depth_2d");
- auto t = p->depth_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
-}
-
-TEST_F(ParserImplTest, DepthTextureType_2dArray) {
- auto p = parser("texture_depth_2d_array");
- auto t = p->depth_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2dArray);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 23u}}));
-}
-
-TEST_F(ParserImplTest, DepthTextureType_Cube) {
- auto p = parser("texture_depth_cube");
- auto t = p->depth_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::kCube);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
-}
-
-TEST_F(ParserImplTest, DepthTextureType_CubeArray) {
- auto p = parser("texture_depth_cube_array");
- auto t = p->depth_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::kCubeArray);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25u}}));
-}
-
-TEST_F(ParserImplTest, DepthTextureType_Multisampled2d) {
- auto p = parser("texture_depth_multisampled_2d");
- auto t = p->depth_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthMultisampledTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 30u}}));
-}
-
-} // namespace
-} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_detail.h b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_detail.h
index 02d63dfcb07..d25edff4038 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_detail.h
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_detail.h
@@ -29,34 +29,34 @@ namespace tint::reader::wgsl::detail {
/// the Expect<T> or Maybe<T> is not in an error state before dereferencing.
template <typename T>
struct OperatorArrow {
- /// type resolves to the return type for the operator->()
- using type = T*;
- /// @param val the value held by `ParserImpl::Expect<T>` or
- /// `ParserImpl::Maybe<T>`.
- /// @return a pointer to `val`
- static inline T* ptr(T& val) { return &val; }
+ /// type resolves to the return type for the operator->()
+ using type = T*;
+ /// @param val the value held by `ParserImpl::Expect<T>` or
+ /// `ParserImpl::Maybe<T>`.
+ /// @return a pointer to `val`
+ static inline T* ptr(T& val) { return &val; }
};
/// OperatorArrow template specialization for std::unique_ptr<>.
template <typename T>
struct OperatorArrow<std::unique_ptr<T>> {
- /// type resolves to the return type for the operator->()
- using type = T*;
- /// @param val the value held by `ParserImpl::Expect<T>` or
- /// `ParserImpl::Maybe<T>`.
- /// @return the raw pointer held by `val`.
- static inline T* ptr(std::unique_ptr<T>& val) { return val.get(); }
+ /// type resolves to the return type for the operator->()
+ using type = T*;
+ /// @param val the value held by `ParserImpl::Expect<T>` or
+ /// `ParserImpl::Maybe<T>`.
+ /// @return the raw pointer held by `val`.
+ static inline T* ptr(std::unique_ptr<T>& val) { return val.get(); }
};
/// OperatorArrow template specialization for T*.
template <typename T>
struct OperatorArrow<T*> {
- /// type resolves to the return type for the operator->()
- using type = T*;
- /// @param val the value held by `ParserImpl::Expect<T>` or
- /// `ParserImpl::Maybe<T>`.
- /// @return `val`.
- static inline T* ptr(T* val) { return val; }
+ /// type resolves to the return type for the operator->()
+ using type = T*;
+ /// @param val the value held by `ParserImpl::Expect<T>` or
+ /// `ParserImpl::Maybe<T>`.
+ /// @return `val`.
+ static inline T* ptr(T* val) { return val; }
};
} // namespace tint::reader::wgsl::detail
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_elseif_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_elseif_stmt_test.cc
deleted file mode 100644
index f0fb963a35a..00000000000
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_elseif_stmt_test.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-
-namespace tint::reader::wgsl {
-namespace {
-
-TEST_F(ParserImplTest, ElseStmts) {
- auto p = parser("else if (a == 4) { a = b; c = d; }");
- auto e = p->else_stmts();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e.value.size(), 1u);
-
- ASSERT_TRUE(e.value[0]->Is<ast::ElseStatement>());
- ASSERT_NE(e.value[0]->condition, nullptr);
- ASSERT_TRUE(e.value[0]->condition->Is<ast::BinaryExpression>());
- EXPECT_EQ(e.value[0]->body->statements.size(), 2u);
-}
-
-TEST_F(ParserImplTest, ElseStmts_Multiple) {
- auto p = parser("else if (a == 4) { a = b; c = d; } else if(c) { d = 2; }");
- auto e = p->else_stmts();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e.value.size(), 2u);
-
- ASSERT_TRUE(e.value[0]->Is<ast::ElseStatement>());
- ASSERT_NE(e.value[0]->condition, nullptr);
- ASSERT_TRUE(e.value[0]->condition->Is<ast::BinaryExpression>());
- EXPECT_EQ(e.value[0]->body->statements.size(), 2u);
-
- ASSERT_TRUE(e.value[1]->Is<ast::ElseStatement>());
- ASSERT_NE(e.value[1]->condition, nullptr);
- ASSERT_TRUE(e.value[1]->condition->Is<ast::IdentifierExpression>());
- EXPECT_EQ(e.value[1]->body->statements.size(), 1u);
-}
-
-TEST_F(ParserImplTest, ElseStmts_InvalidBody) {
- auto p = parser("else if (true) { fn main() {}}");
- auto e = p->else_stmts();
- EXPECT_TRUE(e.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:18: expected '}'");
-}
-
-TEST_F(ParserImplTest, ElseStmts_MissingBody) {
- auto p = parser("else if (true)");
- auto e = p->else_stmts();
- EXPECT_TRUE(e.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:15: expected '{'");
-}
-
-} // namespace
-} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_enable_directive_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_enable_directive_test.cc
new file mode 100644
index 00000000000..bdf7eebd12c
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_enable_directive_test.cc
@@ -0,0 +1,167 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
+
+#include "src/tint/ast/enable.h"
+
+namespace tint::reader::wgsl {
+namespace {
+
+using EnableDirectiveTest = ParserImplTest;
+
+// Test a valid enable directive.
+TEST_F(EnableDirectiveTest, Valid) {
+ auto p = parser("enable f16;");
+ p->enable_directive();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ auto program = p->program();
+ auto& ast = program.AST();
+ ASSERT_EQ(ast.Enables().size(), 1u);
+ auto* enable = ast.Enables()[0];
+ EXPECT_EQ(enable->extension, ast::Extension::kF16);
+ ASSERT_EQ(ast.GlobalDeclarations().size(), 1u);
+ EXPECT_EQ(ast.GlobalDeclarations()[0], enable);
+}
+
+// Test multiple enable directives for a same extension.
+TEST_F(EnableDirectiveTest, EnableMultipleTime) {
+ auto p = parser(R"(
+enable f16;
+enable f16;
+)");
+ p->translation_unit();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ auto program = p->program();
+ auto& ast = program.AST();
+ ASSERT_EQ(ast.Enables().size(), 2u);
+ auto* enable_a = ast.Enables()[0];
+ auto* enable_b = ast.Enables()[1];
+ EXPECT_EQ(enable_a->extension, ast::Extension::kF16);
+ EXPECT_EQ(enable_b->extension, ast::Extension::kF16);
+ ASSERT_EQ(ast.GlobalDeclarations().size(), 2u);
+ EXPECT_EQ(ast.GlobalDeclarations()[0], enable_a);
+ EXPECT_EQ(ast.GlobalDeclarations()[1], enable_b);
+}
+
+// Test an unknown extension identifier.
+TEST_F(EnableDirectiveTest, InvalidIdentifier) {
+ auto p = parser("enable NotAValidExtensionName;");
+ p->enable_directive();
+ // Error when unknown extension found
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unsupported extension: 'NotAValidExtensionName'");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+}
+
+// Test an enable directive missing ending semicolon.
+TEST_F(EnableDirectiveTest, MissingEndingSemicolon) {
+ auto p = parser("enable f16");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:11: expected ';' for enable directive");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+}
+
+// Test using invalid tokens in an enable directive.
+TEST_F(EnableDirectiveTest, InvalidTokens) {
+ {
+ auto p = parser("enable f16<;");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:11: expected ';' for enable directive");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+ }
+ {
+ auto p = parser("enable <f16;");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: invalid extension name");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+ }
+ {
+ auto p = parser("enable =;");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: invalid extension name");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+ }
+ {
+ auto p = parser("enable vec4;");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: invalid extension name");
+ auto program = p->program();
+ auto& ast = program.AST();
+ EXPECT_EQ(ast.Enables().size(), 0u);
+ EXPECT_EQ(ast.GlobalDeclarations().size(), 0u);
+ }
+}
+
+// Test an enable directive go after other global declarations.
+TEST_F(EnableDirectiveTest, FollowingOtherGlobalDecl) {
+ auto p = parser(R"(
+var<private> t: f32 = 0f;
+enable f16;
+)");
+ p->translation_unit();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "3:1: enable directives must come before all global declarations");
+ auto program = p->program();
+ auto& ast = program.AST();
+ // Accept the enable directive although it caused an error
+ ASSERT_EQ(ast.Enables().size(), 1u);
+ auto* enable = ast.Enables()[0];
+ EXPECT_EQ(enable->extension, ast::Extension::kF16);
+ ASSERT_EQ(ast.GlobalDeclarations().size(), 2u);
+ EXPECT_EQ(ast.GlobalDeclarations()[1], enable);
+}
+
+// Test an enable directive go after an empty semicolon.
+TEST_F(EnableDirectiveTest, FollowingEmptySemicolon) {
+ auto p = parser(R"(
+;
+enable f16;
+)");
+ p->translation_unit();
+ // An empty semicolon is treated as a global declaration
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "3:1: enable directives must come before all global declarations");
+ auto program = p->program();
+ auto& ast = program.AST();
+ // Accept the enable directive although it cause an error
+ ASSERT_EQ(ast.Enables().size(), 1u);
+ auto* enable = ast.Enables()[0];
+ EXPECT_EQ(enable->extension, ast::Extension::kF16);
+ ASSERT_EQ(ast.GlobalDeclarations().size(), 1u);
+ EXPECT_EQ(ast.GlobalDeclarations()[0], enable);
+}
+
+} // namespace
+} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_equality_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_equality_expression_test.cc
index 64cf213302c..158227d833f 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_equality_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_equality_expression_test.cc
@@ -18,72 +18,82 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, EqualityExpression_Parses_Equal) {
- auto p = parser("a == true");
- auto e = p->equality_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kEqual, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a == true");
+ auto e = p->equality_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kEqual, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, EqualityExpression_Parses_NotEqual) {
- auto p = parser("a != true");
- auto e = p->equality_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kNotEqual, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a != true");
+ auto e = p->equality_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kNotEqual, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, EqualityExpression_InvalidLHS) {
- auto p = parser("if (a) {} == true");
- auto e = p->equality_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} == true");
+ auto e = p->equality_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, EqualityExpression_InvalidRHS) {
- auto p = parser("true == if (a) {}");
- auto e = p->equality_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: unable to parse right side of == expression");
+ auto p = parser("true == if (a) {}");
+ auto e = p->equality_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: unable to parse right side of == expression");
}
TEST_F(ParserImplTest, EqualityExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->equality_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->equality_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_msg_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_msg_test.cc
index bbbb511b0f2..ab97d94128a 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_msg_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_msg_test.cc
@@ -17,322 +17,323 @@
namespace tint::reader::wgsl {
namespace {
-const diag::Formatter::Style formatter_style{
- /* print_file: */ true, /* print_severity: */ true,
- /* print_line: */ true, /* print_newline_at_end: */ false};
+const diag::Formatter::Style formatter_style{/* print_file: */ true, /* print_severity: */ true,
+ /* print_line: */ true,
+ /* print_newline_at_end: */ false};
class ParserImplErrorTest : public ParserImplTest {};
-#define EXPECT(SOURCE, EXPECTED) \
- do { \
- std::string source = SOURCE; \
- std::string expected = EXPECTED; \
- auto p = parser(source); \
- p->set_max_errors(5); \
- EXPECT_EQ(false, p->Parse()); \
- auto diagnostics = p->builder().Diagnostics(); \
- EXPECT_EQ(true, diagnostics.contains_errors()); \
- EXPECT_EQ(expected, diag::Formatter(formatter_style).format(diagnostics)); \
- } while (false)
+#define EXPECT(SOURCE, EXPECTED) \
+ do { \
+ std::string source = SOURCE; \
+ std::string expected = EXPECTED; \
+ auto p = parser(source); \
+ p->set_max_errors(5); \
+ EXPECT_EQ(false, p->Parse()); \
+ auto diagnostics = p->builder().Diagnostics(); \
+ EXPECT_EQ(true, diagnostics.contains_errors()); \
+ EXPECT_EQ(expected, diag::Formatter(formatter_style).format(diagnostics)); \
+ } while (false)
TEST_F(ParserImplErrorTest, AdditiveInvalidExpr) {
- EXPECT("fn f() { return 1.0 + <; }",
- R"(test.wgsl:1:23 error: unable to parse right side of + expression
+ EXPECT("fn f() { return 1.0 + <; }",
+ R"(test.wgsl:1:23 error: unable to parse right side of + expression
fn f() { return 1.0 + <; }
^
)");
}
TEST_F(ParserImplErrorTest, AndInvalidExpr) {
- EXPECT("fn f() { return 1 & >; }",
- R"(test.wgsl:1:21 error: unable to parse right side of & expression
+ EXPECT("fn f() { return 1 & >; }",
+ R"(test.wgsl:1:21 error: unable to parse right side of & expression
fn f() { return 1 & >; }
^
)");
}
TEST_F(ParserImplErrorTest, AliasDeclInvalidAttribute) {
- EXPECT("@invariant type e=u32;",
- R"(test.wgsl:1:2 error: unexpected attributes
+ EXPECT("@invariant type e=u32;",
+ R"(test.wgsl:1:2 error: unexpected attributes
@invariant type e=u32;
^^^^^^^^^
)");
}
TEST_F(ParserImplErrorTest, IndexExprInvalidExpr) {
- EXPECT("fn f() { x = y[^]; }",
- R"(test.wgsl:1:16 error: unable to parse expression inside []
+ EXPECT("fn f() { x = y[^]; }",
+ R"(test.wgsl:1:16 error: unable to parse expression inside []
fn f() { x = y[^]; }
^
)");
}
TEST_F(ParserImplErrorTest, IndexExprMissingRBracket) {
- EXPECT("fn f() { x = y[1; }",
- R"(test.wgsl:1:17 error: expected ']' for index accessor
+ EXPECT("fn f() { x = y[1; }",
+ R"(test.wgsl:1:17 error: expected ']' for index accessor
fn f() { x = y[1; }
^
)");
}
TEST_F(ParserImplErrorTest, AssignmentStmtMissingAssignment) {
- EXPECT("fn f() { a; }", R"(test.wgsl:1:11 error: expected '=' for assignment
+ EXPECT("fn f() { a; }", R"(test.wgsl:1:11 error: expected '=' for assignment
fn f() { a; }
^
)");
}
TEST_F(ParserImplErrorTest, AssignmentStmtMissingAssignment2) {
- EXPECT("fn f() { a : i32; }",
- R"(test.wgsl:1:10 error: expected 'var' for variable declaration
+ EXPECT("fn f() { a : i32; }",
+ R"(test.wgsl:1:10 error: expected 'var' for variable declaration
fn f() { a : i32; }
^
)");
}
TEST_F(ParserImplErrorTest, AssignmentStmtMissingSemicolon) {
- EXPECT("fn f() { a = 1 }",
- R"(test.wgsl:1:16 error: expected ';' for assignment statement
+ EXPECT("fn f() { a = 1 }",
+ R"(test.wgsl:1:16 error: expected ';' for assignment statement
fn f() { a = 1 }
^
)");
}
TEST_F(ParserImplErrorTest, AssignmentStmtInvalidLHS_BuiltinFunctionName) {
- EXPECT("normalize = 5;",
- R"(test.wgsl:1:1 error: statement found outside of function body
+ EXPECT("normalize = 5;",
+ R"(test.wgsl:1:1 error: statement found outside of function body
normalize = 5;
^^^^^^^^^
)");
}
TEST_F(ParserImplErrorTest, AssignmentStmtInvalidRHS) {
- EXPECT("fn f() { a = >; }",
- R"(test.wgsl:1:14 error: unable to parse right side of assignment
+ EXPECT("fn f() { a = >; }",
+ R"(test.wgsl:1:14 error: unable to parse right side of assignment
fn f() { a = >; }
^
)");
}
TEST_F(ParserImplErrorTest, BitcastExprMissingLessThan) {
- EXPECT("fn f() { x = bitcast(y); }",
- R"(test.wgsl:1:21 error: expected '<' for bitcast expression
+ EXPECT("fn f() { x = bitcast(y); }",
+ R"(test.wgsl:1:21 error: expected '<' for bitcast expression
fn f() { x = bitcast(y); }
^
)");
}
TEST_F(ParserImplErrorTest, BitcastExprMissingGreaterThan) {
- EXPECT("fn f() { x = bitcast<u32(y); }",
- R"(test.wgsl:1:25 error: expected '>' for bitcast expression
+ EXPECT("fn f() { x = bitcast<u32(y); }",
+ R"(test.wgsl:1:25 error: expected '>' for bitcast expression
fn f() { x = bitcast<u32(y); }
^
)");
}
TEST_F(ParserImplErrorTest, BitcastExprMissingType) {
- EXPECT("fn f() { x = bitcast<>(y); }",
- R"(test.wgsl:1:22 error: invalid type for bitcast expression
+ EXPECT("fn f() { x = bitcast<>(y); }",
+ R"(test.wgsl:1:22 error: invalid type for bitcast expression
fn f() { x = bitcast<>(y); }
^
)");
}
TEST_F(ParserImplErrorTest, BreakStmtMissingSemicolon) {
- EXPECT("fn f() { loop { break } }",
- R"(test.wgsl:1:23 error: expected ';' for break statement
+ EXPECT("fn f() { loop { break } }",
+ R"(test.wgsl:1:23 error: expected ';' for break statement
fn f() { loop { break } }
^
)");
}
TEST_F(ParserImplErrorTest, CallExprMissingRParen) {
- EXPECT("fn f() { x = f(1.; }",
- R"(test.wgsl:1:18 error: expected ')' for function call
+ EXPECT("fn f() { x = f(1.; }",
+ R"(test.wgsl:1:18 error: expected ')' for function call
fn f() { x = f(1.; }
^
)");
}
TEST_F(ParserImplErrorTest, CallStmtMissingRParen) {
- EXPECT("fn f() { f(1.; }",
- R"(test.wgsl:1:14 error: expected ')' for function call
+ EXPECT("fn f() { f(1.; }",
+ R"(test.wgsl:1:14 error: expected ')' for function call
fn f() { f(1.; }
^
)");
}
TEST_F(ParserImplErrorTest, CallStmtInvalidArgument0) {
- EXPECT("fn f() { f(<); }",
- R"(test.wgsl:1:12 error: expected ')' for function call
+ EXPECT("fn f() { f(<); }",
+ R"(test.wgsl:1:12 error: expected ')' for function call
fn f() { f(<); }
^
)");
}
TEST_F(ParserImplErrorTest, CallStmtInvalidArgument1) {
- EXPECT("fn f() { f(1.0, <); }",
- R"(test.wgsl:1:17 error: expected ')' for function call
+ EXPECT("fn f() { f(1.0, <); }",
+ R"(test.wgsl:1:17 error: expected ')' for function call
fn f() { f(1.0, <); }
^
)");
}
TEST_F(ParserImplErrorTest, CallStmtMissingSemicolon) {
- EXPECT("fn f() { f() }",
- R"(test.wgsl:1:14 error: expected ';' for function call
+ EXPECT("fn f() { f() }",
+ R"(test.wgsl:1:14 error: expected ';' for function call
fn f() { f() }
^
)");
}
TEST_F(ParserImplErrorTest, ConstructorExprMissingLParen) {
- EXPECT("fn f() { x = vec2<u32>1,2); }",
- R"(test.wgsl:1:23 error: expected '(' for type constructor
+ EXPECT("fn f() { x = vec2<u32>1,2); }",
+ R"(test.wgsl:1:23 error: expected '(' for type constructor
fn f() { x = vec2<u32>1,2); }
^
)");
}
TEST_F(ParserImplErrorTest, ConstructorExprMissingRParen) {
- EXPECT("fn f() { x = vec2<u32>(1,2; }",
- R"(test.wgsl:1:27 error: expected ')' for type constructor
+ EXPECT("fn f() { x = vec2<u32>(1,2; }",
+ R"(test.wgsl:1:27 error: expected ')' for type constructor
fn f() { x = vec2<u32>(1,2; }
^
)");
}
TEST_F(ParserImplErrorTest, ConstVarStmtInvalid) {
- EXPECT("fn f() { let >; }",
- R"(test.wgsl:1:14 error: expected identifier for let declaration
+ EXPECT("fn f() { let >; }",
+ R"(test.wgsl:1:14 error: expected identifier for let declaration
fn f() { let >; }
^
)");
}
TEST_F(ParserImplErrorTest, ConstVarStmtMissingAssignment) {
- EXPECT("fn f() { let a : i32; }",
- R"(test.wgsl:1:21 error: expected '=' for let declaration
+ EXPECT("fn f() { let a : i32; }",
+ R"(test.wgsl:1:21 error: expected '=' for let declaration
fn f() { let a : i32; }
^
)");
}
TEST_F(ParserImplErrorTest, ConstVarStmtMissingConstructor) {
- EXPECT("fn f() { let a : i32 = >; }",
- R"(test.wgsl:1:24 error: missing constructor for let declaration
+ EXPECT("fn f() { let a : i32 = >; }",
+ R"(test.wgsl:1:24 error: missing constructor for let declaration
fn f() { let a : i32 = >; }
^
)");
}
TEST_F(ParserImplErrorTest, ContinueStmtMissingSemicolon) {
- EXPECT("fn f() { loop { continue } }",
- R"(test.wgsl:1:26 error: expected ';' for continue statement
+ EXPECT("fn f() { loop { continue } }",
+ R"(test.wgsl:1:26 error: expected ';' for continue statement
fn f() { loop { continue } }
^
)");
}
TEST_F(ParserImplErrorTest, DiscardStmtMissingSemicolon) {
- EXPECT("fn f() { discard }",
- R"(test.wgsl:1:18 error: expected ';' for discard statement
+ EXPECT("fn f() { discard }",
+ R"(test.wgsl:1:18 error: expected ';' for discard statement
fn f() { discard }
^
)");
}
TEST_F(ParserImplErrorTest, EqualityInvalidExpr) {
- EXPECT("fn f() { return 1 == >; }",
- R"(test.wgsl:1:22 error: unable to parse right side of == expression
+ EXPECT("fn f() { return 1 == >; }",
+ R"(test.wgsl:1:22 error: unable to parse right side of == expression
fn f() { return 1 == >; }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopInitializerMissingSemicolon) {
- EXPECT("fn f() { for (var i : i32 = 0 i < 8; i=i+1) {} }",
- R"(test.wgsl:1:31 error: expected ';' for initializer in for loop
+ EXPECT("fn f() { for (var i : i32 = 0 i < 8; i=i+1) {} }",
+ R"(test.wgsl:1:31 error: expected ';' for initializer in for loop
fn f() { for (var i : i32 = 0 i < 8; i=i+1) {} }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopInitializerMissingVar) {
- EXPECT("fn f() { for (i : i32 = 0; i < 8; i=i+1) {} }",
- R"(test.wgsl:1:15 error: expected 'var' for variable declaration
+ EXPECT("fn f() { for (i : i32 = 0; i < 8; i=i+1) {} }",
+ R"(test.wgsl:1:15 error: expected 'var' for variable declaration
fn f() { for (i : i32 = 0; i < 8; i=i+1) {} }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopConditionMissingSemicolon) {
- EXPECT("fn f() { for (var i : i32 = 0; i < 8 i=i+1) {} }",
- R"(test.wgsl:1:38 error: expected ';' for condition in for loop
+ EXPECT("fn f() { for (var i : i32 = 0; i < 8 i=i+1) {} }",
+ R"(test.wgsl:1:38 error: expected ';' for condition in for loop
fn f() { for (var i : i32 = 0; i < 8 i=i+1) {} }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopMissingLParen) {
- EXPECT("fn f() { for var i : i32 = 0; i < 8; i=i+1) {} }",
- R"(test.wgsl:1:14 error: expected '(' for for loop
+ EXPECT("fn f() { for var i : i32 = 0; i < 8; i=i+1) {} }",
+ R"(test.wgsl:1:14 error: expected '(' for for loop
fn f() { for var i : i32 = 0; i < 8; i=i+1) {} }
^^^
)");
}
TEST_F(ParserImplErrorTest, ForLoopMissingRParen) {
- EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1 {} }",
- R"(test.wgsl:1:45 error: expected ')' for for loop
+ EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1 {} }",
+ R"(test.wgsl:1:45 error: expected ')' for for loop
fn f() { for (var i : i32 = 0; i < 8; i=i+1 {} }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopMissingLBrace) {
- EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1) }",
- R"(test.wgsl:1:46 error: expected '{' for for loop
+ EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1) }",
+ R"(test.wgsl:1:46 error: expected '{' for for loop
fn f() { for (var i : i32 = 0; i < 8; i=i+1) }
^
)");
}
TEST_F(ParserImplErrorTest, ForLoopMissingRBrace) {
- EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1) {",
- R"(test.wgsl:1:47 error: expected '}' for for loop
+ EXPECT("fn f() { for (var i : i32 = 0; i < 8; i=i+1) {",
+ R"(test.wgsl:1:47 error: expected '}' for for loop
fn f() { for (var i : i32 = 0; i < 8; i=i+1) {
^
)");
}
+// TODO(crbug.com/tint/1503): Remove this when @stage is removed
TEST_F(ParserImplErrorTest, FunctionDeclStageMissingLParen) {
- EXPECT("@stage vertex) fn f() {}",
- R"(test.wgsl:1:8 error: expected '(' for stage attribute
+ EXPECT("@stage vertex) fn f() {}",
+ R"(test.wgsl:1:8 error: expected '(' for stage attribute
@stage vertex) fn f() {}
^^^^^^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclStageMissingRParen) {
- EXPECT("@stage(vertex fn f() {}",
- R"(test.wgsl:1:15 error: expected ')' for stage attribute
+ EXPECT("@stage(vertex fn f() {}",
+ R"(test.wgsl:1:15 error: expected ')' for stage attribute
@stage(vertex fn f() {}
^^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclStageInvalid) {
- EXPECT("@stage(x) fn f() {}",
- R"(test.wgsl:1:8 error: invalid value for stage attribute
+ EXPECT("@stage(x) fn f() {}",
+ R"(test.wgsl:1:8 error: invalid value for stage attribute
@stage(x) fn f() {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclStageTypeInvalid) {
- EXPECT("@shader(vertex) fn main() {}",
- R"(test.wgsl:1:2 error: expected attribute
+ EXPECT("@shader(vertex) fn main() {}",
+ R"(test.wgsl:1:2 error: expected attribute
@shader(vertex) fn main() {}
^^^^^^
@@ -343,118 +344,118 @@ test.wgsl:1:8 error: unexpected token
}
TEST_F(ParserImplErrorTest, FunctionDeclWorkgroupSizeXInvalid) {
- EXPECT("@workgroup_size() fn f() {}",
- R"(test.wgsl:1:17 error: expected workgroup_size x parameter
+ EXPECT("@workgroup_size() fn f() {}",
+ R"(test.wgsl:1:17 error: expected workgroup_size x parameter
@workgroup_size() fn f() {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclWorkgroupSizeYInvalid) {
- EXPECT("@workgroup_size(1, ) fn f() {}",
- R"(test.wgsl:1:20 error: expected workgroup_size y parameter
+ EXPECT("@workgroup_size(1, ) fn f() {}",
+ R"(test.wgsl:1:20 error: expected workgroup_size y parameter
@workgroup_size(1, ) fn f() {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclWorkgroupSizeZInvalid) {
- EXPECT("@workgroup_size(1, 2, ) fn f() {}",
- R"(test.wgsl:1:23 error: expected workgroup_size z parameter
+ EXPECT("@workgroup_size(1, 2, ) fn f() {}",
+ R"(test.wgsl:1:23 error: expected workgroup_size z parameter
@workgroup_size(1, 2, ) fn f() {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingIdentifier) {
- EXPECT("fn () {}",
- R"(test.wgsl:1:4 error: expected identifier for function declaration
+ EXPECT("fn () {}",
+ R"(test.wgsl:1:4 error: expected identifier for function declaration
fn () {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingLParen) {
- EXPECT("fn f) {}",
- R"(test.wgsl:1:5 error: expected '(' for function declaration
+ EXPECT("fn f) {}",
+ R"(test.wgsl:1:5 error: expected '(' for function declaration
fn f) {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingRParen) {
- EXPECT("fn f( {}",
- R"(test.wgsl:1:7 error: expected ')' for function declaration
+ EXPECT("fn f( {}",
+ R"(test.wgsl:1:7 error: expected ')' for function declaration
fn f( {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingArrow) {
- EXPECT("fn f() f32 {}", R"(test.wgsl:1:8 error: expected '{'
+ EXPECT("fn f() f32 {}", R"(test.wgsl:1:8 error: expected '{'
fn f() f32 {}
^^^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclInvalidReturnType) {
- EXPECT("fn f() -> 1 {}",
- R"(test.wgsl:1:11 error: unable to determine function return type
+ EXPECT("fn f() -> 1 {}",
+ R"(test.wgsl:1:11 error: unable to determine function return type
fn f() -> 1 {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclParamMissingColon) {
- EXPECT("fn f(x) {}", R"(test.wgsl:1:7 error: expected ':' for parameter
+ EXPECT("fn f(x) {}", R"(test.wgsl:1:7 error: expected ':' for parameter
fn f(x) {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclParamInvalidType) {
- EXPECT("fn f(x : 1) {}", R"(test.wgsl:1:10 error: invalid type for parameter
+ EXPECT("fn f(x : 1) {}", R"(test.wgsl:1:10 error: invalid type for parameter
fn f(x : 1) {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclParamMissing) {
- EXPECT("fn f(x : i32, ,) {}",
- R"(test.wgsl:1:15 error: expected ')' for function declaration
+ EXPECT("fn f(x : i32, ,) {}",
+ R"(test.wgsl:1:15 error: expected ')' for function declaration
fn f(x : i32, ,) {}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingLBrace) {
- EXPECT("fn f() }", R"(test.wgsl:1:8 error: expected '{'
+ EXPECT("fn f() }", R"(test.wgsl:1:8 error: expected '{'
fn f() }
^
)");
}
TEST_F(ParserImplErrorTest, FunctionDeclMissingRBrace) {
- EXPECT("fn f() {", R"(test.wgsl:1:9 error: expected '}'
+ EXPECT("fn f() {", R"(test.wgsl:1:9 error: expected '}'
fn f() {
^
)");
}
TEST_F(ParserImplErrorTest, FunctionScopeUnusedDecl) {
- EXPECT("fn f(a:i32)->i32{return a;@size(1)}",
- R"(test.wgsl:1:27 error: expected '}'
+ EXPECT("fn f(a:i32)->i32{return a;@size(1)}",
+ R"(test.wgsl:1:27 error: expected '}'
fn f(a:i32)->i32{return a;@size(1)}
^
)");
}
TEST_F(ParserImplErrorTest, FunctionMissingOpenLine) {
- EXPECT(R"(let bar : vec2<f32> = vec2<f32>(1., 2.);
+ EXPECT(R"(let bar : vec2<f32> = vec2<f32>(1., 2.);
var a : f32 = bar[0];
return;
})",
- R"(test.wgsl:2:17 error: unable to parse const_expr
+ R"(test.wgsl:2:17 error: unable to parse const_expr
var a : f32 = bar[0];
^^^
@@ -465,332 +466,330 @@ test.wgsl:3:3 error: statement found outside of function body
}
TEST_F(ParserImplErrorTest, GlobalDeclConstInvalidIdentifier) {
- EXPECT("let ^ : i32 = 1;",
- R"(test.wgsl:1:5 error: expected identifier for let declaration
+ EXPECT("let ^ : i32 = 1;",
+ R"(test.wgsl:1:5 error: expected identifier for let declaration
let ^ : i32 = 1;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstMissingSemicolon) {
- EXPECT("let i : i32 = 1",
- R"(test.wgsl:1:16 error: expected ';' for let declaration
+ EXPECT("let i : i32 = 1",
+ R"(test.wgsl:1:16 error: expected ';' for let declaration
let i : i32 = 1
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstMissingLParen) {
- EXPECT("let i : vec2<i32> = vec2<i32>;",
- R"(test.wgsl:1:30 error: expected '(' for type constructor
+ EXPECT("let i : vec2<i32> = vec2<i32>;",
+ R"(test.wgsl:1:30 error: expected '(' for type constructor
let i : vec2<i32> = vec2<i32>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstMissingRParen) {
- EXPECT("let i : vec2<i32> = vec2<i32>(1., 2.;",
- R"(test.wgsl:1:37 error: expected ')' for type constructor
+ EXPECT("let i : vec2<i32> = vec2<i32>(1., 2.;",
+ R"(test.wgsl:1:37 error: expected ')' for type constructor
let i : vec2<i32> = vec2<i32>(1., 2.;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstBadConstLiteral) {
- EXPECT("let i : vec2<i32> = vec2<i32>(!);",
- R"(test.wgsl:1:31 error: unable to parse const_expr
+ EXPECT("let i : vec2<i32> = vec2<i32>(!);",
+ R"(test.wgsl:1:31 error: unable to parse const_expr
let i : vec2<i32> = vec2<i32>(!);
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstBadConstLiteralSpaceLessThan) {
- EXPECT("let i = 1 < 2;",
- R"(test.wgsl:1:11 error: expected ';' for let declaration
+ EXPECT("let i = 1 < 2;",
+ R"(test.wgsl:1:11 error: expected ';' for let declaration
let i = 1 < 2;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstNotConstExpr) {
- EXPECT(
- "let a = 1;\n"
- "let b = a;",
- R"(test.wgsl:2:9 error: unable to parse const_expr
+ EXPECT(
+ "let a = 1;\n"
+ "let b = a;",
+ R"(test.wgsl:2:9 error: unable to parse const_expr
let b = a;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstExprMaxDepth) {
- uint32_t kMaxDepth = 128;
-
- std::stringstream src;
- std::stringstream mkr;
- src << "let i : i32 = ";
- mkr << " ";
- for (size_t i = 0; i < kMaxDepth + 8; i++) {
- src << "f32(";
- if (i < kMaxDepth) {
- mkr << " ";
- } else if (i == kMaxDepth) {
- mkr << "^^^";
+ uint32_t kMaxDepth = 128;
+
+ std::stringstream src;
+ std::stringstream mkr;
+ src << "let i : i32 = ";
+ mkr << " ";
+ for (size_t i = 0; i < kMaxDepth + 8; i++) {
+ src << "f32(";
+ if (i < kMaxDepth) {
+ mkr << " ";
+ } else if (i == kMaxDepth) {
+ mkr << "^^^";
+ }
}
- }
- src << "1.0";
- for (size_t i = 0; i < 200; i++) {
- src << ")";
- }
- src << ";";
- std::stringstream err;
- err << "test.wgsl:1:527 error: maximum parser recursive depth reached\n"
- << src.str() << "\n"
- << mkr.str() << "\n";
- EXPECT(src.str().c_str(), err.str().c_str());
+ src << "1.0";
+ for (size_t i = 0; i < 200; i++) {
+ src << ")";
+ }
+ src << ";";
+ std::stringstream err;
+ err << "test.wgsl:1:527 error: maximum parser recursive depth reached\n"
+ << src.str() << "\n"
+ << mkr.str() << "\n";
+ EXPECT(src.str().c_str(), err.str().c_str());
}
TEST_F(ParserImplErrorTest, GlobalDeclConstExprMissingLParen) {
- EXPECT("let i : vec2<i32> = vec2<i32> 1, 2);",
- R"(test.wgsl:1:31 error: expected '(' for type constructor
+ EXPECT("let i : vec2<i32> = vec2<i32> 1, 2);",
+ R"(test.wgsl:1:31 error: expected '(' for type constructor
let i : vec2<i32> = vec2<i32> 1, 2);
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclConstExprMissingRParen) {
- EXPECT("let i : vec2<i32> = vec2<i32>(1, 2;",
- R"(test.wgsl:1:35 error: expected ')' for type constructor
+ EXPECT("let i : vec2<i32> = vec2<i32>(1, 2;",
+ R"(test.wgsl:1:35 error: expected ')' for type constructor
let i : vec2<i32> = vec2<i32>(1, 2;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclInvalidAttribute) {
- EXPECT("@stage(vertex) x;",
- R"(test.wgsl:1:16 error: expected declaration after attributes
-@stage(vertex) x;
- ^
+ EXPECT("@vertex x;",
+ R"(test.wgsl:1:9 error: expected declaration after attributes
+@vertex x;
+ ^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclSampledTextureMissingLessThan) {
- EXPECT("var x : texture_1d;",
- R"(test.wgsl:1:19 error: expected '<' for sampled texture type
+ EXPECT("var x : texture_1d;",
+ R"(test.wgsl:1:19 error: expected '<' for sampled texture type
var x : texture_1d;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclSampledTextureMissingGreaterThan) {
- EXPECT("var x : texture_1d<f32;",
- R"(test.wgsl:1:23 error: expected '>' for sampled texture type
+ EXPECT("var x : texture_1d<f32;",
+ R"(test.wgsl:1:23 error: expected '>' for sampled texture type
var x : texture_1d<f32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclSampledTextureInvalidSubtype) {
- EXPECT("var x : texture_1d<1>;",
- R"(test.wgsl:1:20 error: invalid type for sampled texture type
+ EXPECT("var x : texture_1d<1>;",
+ R"(test.wgsl:1:20 error: invalid type for sampled texture type
var x : texture_1d<1>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclMultisampledTextureMissingLessThan) {
- EXPECT("var x : texture_multisampled_2d;",
- R"(test.wgsl:1:32 error: expected '<' for multisampled texture type
+ EXPECT("var x : texture_multisampled_2d;",
+ R"(test.wgsl:1:32 error: expected '<' for multisampled texture type
var x : texture_multisampled_2d;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclMultisampledTextureMissingGreaterThan) {
- EXPECT("var x : texture_multisampled_2d<f32;",
- R"(test.wgsl:1:36 error: expected '>' for multisampled texture type
+ EXPECT("var x : texture_multisampled_2d<f32;",
+ R"(test.wgsl:1:36 error: expected '>' for multisampled texture type
var x : texture_multisampled_2d<f32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclMultisampledTextureInvalidSubtype) {
- EXPECT("var x : texture_multisampled_2d<1>;",
- R"(test.wgsl:1:33 error: invalid type for multisampled texture type
+ EXPECT("var x : texture_multisampled_2d<1>;",
+ R"(test.wgsl:1:33 error: invalid type for multisampled texture type
var x : texture_multisampled_2d<1>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStorageTextureMissingLessThan) {
- EXPECT("var x : texture_storage_2d;",
- R"(test.wgsl:1:27 error: expected '<' for storage texture type
+ EXPECT("var x : texture_storage_2d;",
+ R"(test.wgsl:1:27 error: expected '<' for storage texture type
var x : texture_storage_2d;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStorageTextureMissingGreaterThan) {
- EXPECT("var x : texture_storage_2d<r32uint, read;",
- R"(test.wgsl:1:41 error: expected '>' for storage texture type
+ EXPECT("var x : texture_storage_2d<r32uint, read;",
+ R"(test.wgsl:1:41 error: expected '>' for storage texture type
var x : texture_storage_2d<r32uint, read;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStorageTextureMissingSubtype) {
- EXPECT("var x : texture_storage_2d<>;",
- R"(test.wgsl:1:28 error: invalid format for storage texture type
+ EXPECT("var x : texture_storage_2d<>;",
+ R"(test.wgsl:1:28 error: invalid format for storage texture type
var x : texture_storage_2d<>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStorageTextureMissingInvalidSubtype) {
- EXPECT("var x : texture_storage_2d<1>;",
- R"(test.wgsl:1:28 error: invalid format for storage texture type
+ EXPECT("var x : texture_storage_2d<1>;",
+ R"(test.wgsl:1:28 error: invalid format for storage texture type
var x : texture_storage_2d<1>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructDeclMissingIdentifier) {
- EXPECT("struct {};",
- R"(test.wgsl:1:8 error: expected identifier for struct declaration
+ EXPECT("struct {};",
+ R"(test.wgsl:1:8 error: expected identifier for struct declaration
struct {};
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructDeclMissingLBrace) {
- EXPECT("struct S };",
- R"(test.wgsl:1:10 error: expected '{' for struct declaration
+ EXPECT("struct S };",
+ R"(test.wgsl:1:10 error: expected '{' for struct declaration
struct S };
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructDeclMissingRBrace) {
- EXPECT("struct S { i : i32,",
- R"(test.wgsl:1:20 error: expected '}' for struct declaration
+ EXPECT("struct S { i : i32,",
+ R"(test.wgsl:1:20 error: expected '}' for struct declaration
struct S { i : i32,
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructMemberInvalidIdentifier) {
- EXPECT("struct S { 1 : i32, };",
- R"(test.wgsl:1:12 error: expected '}' for struct declaration
+ EXPECT("struct S { 1 : i32, };",
+ R"(test.wgsl:1:12 error: expected '}' for struct declaration
struct S { 1 : i32, };
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructMemberAlignInvaldValue) {
- EXPECT(
- "struct S { @align(x) i : i32, };",
- R"(test.wgsl:1:19 error: expected signed integer literal for align attribute
+ EXPECT("struct S { @align(x) i : i32, };",
+ R"(test.wgsl:1:19 error: expected signed integer literal for align attribute
struct S { @align(x) i : i32, };
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructMemberAlignNegativeValue) {
- EXPECT("struct S { @align(-2) i : i32, };",
- R"(test.wgsl:1:19 error: align attribute must be positive
+ EXPECT("struct S { @align(-2) i : i32, };",
+ R"(test.wgsl:1:19 error: align attribute must be positive
struct S { @align(-2) i : i32, };
^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructMemberSizeInvaldValue) {
- EXPECT(
- "struct S { @size(x) i : i32, };",
- R"(test.wgsl:1:18 error: expected signed integer literal for size attribute
+ EXPECT("struct S { @size(x) i : i32, };",
+ R"(test.wgsl:1:18 error: expected signed integer literal for size attribute
struct S { @size(x) i : i32, };
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclStructMemberSizeNegativeValue) {
- EXPECT("struct S { @size(-2) i : i32, };",
- R"(test.wgsl:1:18 error: size attribute must be positive
+ EXPECT("struct S { @size(-2) i : i32, };",
+ R"(test.wgsl:1:18 error: size attribute must be positive
struct S { @size(-2) i : i32, };
^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclTypeAliasMissingIdentifier) {
- EXPECT("type 1 = f32;",
- R"(test.wgsl:1:6 error: expected identifier for type alias
+ EXPECT("type 1 = f32;",
+ R"(test.wgsl:1:6 error: expected identifier for type alias
type 1 = f32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclTypeAliasInvalidType) {
- EXPECT("type meow = 1;", R"(test.wgsl:1:13 error: invalid type alias
+ EXPECT("type meow = 1;", R"(test.wgsl:1:13 error: invalid type alias
type meow = 1;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclTypeAliasMissingAssignment) {
- EXPECT("type meow f32", R"(test.wgsl:1:11 error: expected '=' for type alias
+ EXPECT("type meow f32", R"(test.wgsl:1:11 error: expected '=' for type alias
type meow f32
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclTypeAliasMissingSemicolon) {
- EXPECT("type meow = f32", R"(test.wgsl:1:16 error: expected ';' for type alias
+ EXPECT("type meow = f32", R"(test.wgsl:1:16 error: expected ';' for type alias
type meow = f32
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarArrayMissingLessThan) {
- EXPECT("var i : array;",
- R"(test.wgsl:1:14 error: expected '<' for array declaration
+ EXPECT("var i : array;",
+ R"(test.wgsl:1:14 error: expected '<' for array declaration
var i : array;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarArrayMissingGreaterThan) {
- EXPECT("var i : array<u32, 3;",
- R"(test.wgsl:1:21 error: expected '>' for array declaration
+ EXPECT("var i : array<u32, 3;",
+ R"(test.wgsl:1:21 error: expected '>' for array declaration
var i : array<u32, 3;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarArrayMissingType) {
- EXPECT("var i : array<1, 3>;",
- R"(test.wgsl:1:15 error: invalid type for array declaration
+ EXPECT("var i : array<1, 3>;",
+ R"(test.wgsl:1:15 error: invalid type for array declaration
var i : array<1, 3>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarArrayMissingSize) {
- EXPECT("var i : array<u32, >;",
- R"(test.wgsl:1:20 error: expected array size expression
+ EXPECT("var i : array<u32, >;",
+ R"(test.wgsl:1:20 error: expected array size expression
var i : array<u32, >;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarArrayInvalidSize) {
- EXPECT("var i : array<u32, !>;",
- R"(test.wgsl:1:20 error: expected array size expression
+ EXPECT("var i : array<u32, !>;",
+ R"(test.wgsl:1:20 error: expected array size expression
var i : array<u32, !>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrListMissingComma) {
- EXPECT("@location(1) group(2) var i : i32;",
- R"(test.wgsl:1:14 error: expected declaration after attributes
+ EXPECT("@location(1) group(2) var i : i32;",
+ R"(test.wgsl:1:14 error: expected declaration after attributes
@location(1) group(2) var i : i32;
^^^^^
@@ -801,275 +800,272 @@ test.wgsl:1:19 error: unexpected token
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrLocationMissingLParen) {
- EXPECT("@location 1) var i : i32;",
- R"(test.wgsl:1:11 error: expected '(' for location attribute
+ EXPECT("@location 1) var i : i32;",
+ R"(test.wgsl:1:11 error: expected '(' for location attribute
@location 1) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrLocationMissingRParen) {
- EXPECT("@location (1 var i : i32;",
- R"(test.wgsl:1:14 error: expected ')' for location attribute
+ EXPECT("@location (1 var i : i32;",
+ R"(test.wgsl:1:14 error: expected ')' for location attribute
@location (1 var i : i32;
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrLocationInvalidValue) {
- EXPECT(
- "@location(x) var i : i32;",
- R"(test.wgsl:1:11 error: expected signed integer literal for location attribute
+ EXPECT("@location(x) var i : i32;",
+ R"(test.wgsl:1:11 error: expected signed integer literal for location attribute
@location(x) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBuiltinMissingLParen) {
- EXPECT("@builtin position) var i : i32;",
- R"(test.wgsl:1:10 error: expected '(' for builtin attribute
+ EXPECT("@builtin position) var i : i32;",
+ R"(test.wgsl:1:10 error: expected '(' for builtin attribute
@builtin position) var i : i32;
^^^^^^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBuiltinMissingRParen) {
- EXPECT("@builtin(position var i : i32;",
- R"(test.wgsl:1:19 error: expected ')' for builtin attribute
+ EXPECT("@builtin(position var i : i32;",
+ R"(test.wgsl:1:19 error: expected ')' for builtin attribute
@builtin(position var i : i32;
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBuiltinInvalidIdentifer) {
- EXPECT("@builtin(1) var i : i32;",
- R"(test.wgsl:1:10 error: expected identifier for builtin
+ EXPECT("@builtin(1) var i : i32;",
+ R"(test.wgsl:1:10 error: expected identifier for builtin
@builtin(1) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBuiltinInvalidValue) {
- EXPECT("@builtin(x) var i : i32;",
- R"(test.wgsl:1:10 error: invalid value for builtin attribute
+ EXPECT("@builtin(x) var i : i32;",
+ R"(test.wgsl:1:10 error: invalid value for builtin attribute
@builtin(x) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBindingMissingLParen) {
- EXPECT("@binding 1) var i : i32;",
- R"(test.wgsl:1:10 error: expected '(' for binding attribute
+ EXPECT("@binding 1) var i : i32;",
+ R"(test.wgsl:1:10 error: expected '(' for binding attribute
@binding 1) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBindingMissingRParen) {
- EXPECT("@binding(1 var i : i32;",
- R"(test.wgsl:1:12 error: expected ')' for binding attribute
+ EXPECT("@binding(1 var i : i32;",
+ R"(test.wgsl:1:12 error: expected ')' for binding attribute
@binding(1 var i : i32;
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBindingInvalidValue) {
- EXPECT(
- "@binding(x) var i : i32;",
- R"(test.wgsl:1:10 error: expected signed integer literal for binding attribute
+ EXPECT("@binding(x) var i : i32;",
+ R"(test.wgsl:1:10 error: expected signed integer literal for binding attribute
@binding(x) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrGroupMissingLParen) {
- EXPECT("@group 1) var i : i32;",
- R"(test.wgsl:1:8 error: expected '(' for group attribute
+ EXPECT("@group 1) var i : i32;",
+ R"(test.wgsl:1:8 error: expected '(' for group attribute
@group 1) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrGroupMissingRParen) {
- EXPECT("@group(1 var i : i32;",
- R"(test.wgsl:1:10 error: expected ')' for group attribute
+ EXPECT("@group(1 var i : i32;",
+ R"(test.wgsl:1:10 error: expected ')' for group attribute
@group(1 var i : i32;
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAttrBindingGroupValue) {
- EXPECT(
- "@group(x) var i : i32;",
- R"(test.wgsl:1:8 error: expected signed integer literal for group attribute
+ EXPECT("@group(x) var i : i32;",
+ R"(test.wgsl:1:8 error: expected signed integer literal for group attribute
@group(x) var i : i32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarInvalidIdentifier) {
- EXPECT("var ^ : mat4x4;",
- R"(test.wgsl:1:5 error: expected identifier for variable declaration
+ EXPECT("var ^ : mat4x4;",
+ R"(test.wgsl:1:5 error: expected identifier for variable declaration
var ^ : mat4x4;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarMatrixMissingGreaterThan) {
- EXPECT("var i : mat4x4<u32;", R"(test.wgsl:1:19 error: expected '>' for matrix
+ EXPECT("var i : mat4x4<u32;", R"(test.wgsl:1:19 error: expected '>' for matrix
var i : mat4x4<u32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarMatrixMissingType) {
- EXPECT("var i : mat4x4<1>;", R"(test.wgsl:1:16 error: invalid type for matrix
+ EXPECT("var i : mat4x4<1>;", R"(test.wgsl:1:16 error: invalid type for matrix
var i : mat4x4<1>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarMissingSemicolon) {
- EXPECT("var i : i32",
- R"(test.wgsl:1:12 error: expected ';' for variable declaration
+ EXPECT("var i : i32",
+ R"(test.wgsl:1:12 error: expected ';' for variable declaration
var i : i32
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarPtrMissingLessThan) {
- EXPECT("var i : ptr;",
- R"(test.wgsl:1:12 error: expected '<' for ptr declaration
+ EXPECT("var i : ptr;",
+ R"(test.wgsl:1:12 error: expected '<' for ptr declaration
var i : ptr;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarPtrMissingGreaterThan) {
- EXPECT("var i : ptr<private, u32;",
- R"(test.wgsl:1:25 error: expected '>' for ptr declaration
+ EXPECT("var i : ptr<private, u32;",
+ R"(test.wgsl:1:25 error: expected '>' for ptr declaration
var i : ptr<private, u32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarPtrMissingComma) {
- EXPECT("var i : ptr<private u32>;",
- R"(test.wgsl:1:21 error: expected ',' for ptr declaration
+ EXPECT("var i : ptr<private u32>;",
+ R"(test.wgsl:1:21 error: expected ',' for ptr declaration
var i : ptr<private u32>;
^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarPtrMissingStorageClass) {
- EXPECT("var i : ptr<meow, u32>;",
- R"(test.wgsl:1:13 error: invalid storage class for ptr declaration
+ EXPECT("var i : ptr<meow, u32>;",
+ R"(test.wgsl:1:13 error: invalid storage class for ptr declaration
var i : ptr<meow, u32>;
^^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarPtrMissingType) {
- EXPECT("var i : ptr<private, 1>;",
- R"(test.wgsl:1:22 error: invalid type for ptr declaration
+ EXPECT("var i : ptr<private, 1>;",
+ R"(test.wgsl:1:22 error: invalid type for ptr declaration
var i : ptr<private, 1>;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAtomicMissingLessThan) {
- EXPECT("var i : atomic;",
- R"(test.wgsl:1:15 error: expected '<' for atomic declaration
+ EXPECT("var i : atomic;",
+ R"(test.wgsl:1:15 error: expected '<' for atomic declaration
var i : atomic;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarAtomicMissingGreaterThan) {
- EXPECT("var i : atomic<u32 x;",
- R"(test.wgsl:1:20 error: expected '>' for atomic declaration
+ EXPECT("var i : atomic<u32 x;",
+ R"(test.wgsl:1:20 error: expected '>' for atomic declaration
var i : atomic<u32 x;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarStorageDeclInvalidClass) {
- EXPECT("var<fish> i : i32",
- R"(test.wgsl:1:5 error: invalid storage class for variable declaration
+ EXPECT("var<fish> i : i32",
+ R"(test.wgsl:1:5 error: invalid storage class for variable declaration
var<fish> i : i32
^^^^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarStorageDeclMissingGThan) {
- EXPECT("var<private i : i32",
- R"(test.wgsl:1:13 error: expected '>' for variable declaration
+ EXPECT("var<private i : i32",
+ R"(test.wgsl:1:13 error: expected '>' for variable declaration
var<private i : i32
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarVectorMissingGreaterThan) {
- EXPECT("var i : vec3<u32;", R"(test.wgsl:1:17 error: expected '>' for vector
+ EXPECT("var i : vec3<u32;", R"(test.wgsl:1:17 error: expected '>' for vector
var i : vec3<u32;
^
)");
}
TEST_F(ParserImplErrorTest, GlobalDeclVarVectorMissingType) {
- EXPECT("var i : vec3<1>;", R"(test.wgsl:1:14 error: invalid type for vector
+ EXPECT("var i : vec3<1>;", R"(test.wgsl:1:14 error: invalid type for vector
var i : vec3<1>;
^
)");
}
TEST_F(ParserImplErrorTest, IfStmtMissingRParen) {
- EXPECT("fn f() { if (true {} }", R"(test.wgsl:1:19 error: expected ')'
+ EXPECT("fn f() { if (true {} }", R"(test.wgsl:1:19 error: expected ')'
fn f() { if (true {} }
^
)");
}
TEST_F(ParserImplErrorTest, IfStmtInvalidCond) {
- EXPECT("fn f() { if (>) {} }",
- R"(test.wgsl:1:14 error: unable to parse expression
+ EXPECT("fn f() { if (>) {} }",
+ R"(test.wgsl:1:14 error: unable to parse expression
fn f() { if (>) {} }
^
)");
}
TEST_F(ParserImplErrorTest, LogicalAndInvalidExpr) {
- EXPECT("fn f() { return 1 && >; }",
- R"(test.wgsl:1:22 error: unable to parse right side of && expression
+ EXPECT("fn f() { return 1 && >; }",
+ R"(test.wgsl:1:22 error: unable to parse right side of && expression
fn f() { return 1 && >; }
^
)");
}
TEST_F(ParserImplErrorTest, LogicalOrInvalidExpr) {
- EXPECT("fn f() { return 1 || >; }",
- R"(test.wgsl:1:22 error: unable to parse right side of || expression
+ EXPECT("fn f() { return 1 || >; }",
+ R"(test.wgsl:1:22 error: unable to parse right side of || expression
fn f() { return 1 || >; }
^
)");
}
TEST_F(ParserImplErrorTest, LoopMissingLBrace) {
- EXPECT("fn f() { loop }", R"(test.wgsl:1:15 error: expected '{' for loop
+ EXPECT("fn f() { loop }", R"(test.wgsl:1:15 error: expected '{' for loop
fn f() { loop }
^
)");
}
TEST_F(ParserImplErrorTest, LoopMissingRBrace) {
- EXPECT("fn f() { loop {", R"(test.wgsl:1:16 error: expected '}' for loop
+ EXPECT("fn f() { loop {", R"(test.wgsl:1:16 error: expected '}' for loop
fn f() { loop {
^
)");
}
TEST_F(ParserImplErrorTest, MaxErrorsReached) {
- EXPECT("x; x; x; x; x; x; x; x;", R"(test.wgsl:1:1 error: unexpected token
+ EXPECT("x; x; x; x; x; x; x; x;", R"(test.wgsl:1:1 error: unexpected token
x; x; x; x; x; x; x; x;
^
@@ -1093,160 +1089,160 @@ test.wgsl error: stopping after 5 errors)");
}
TEST_F(ParserImplErrorTest, MemberExprMissingIdentifier) {
- EXPECT("fn f() { x = a.; }",
- R"(test.wgsl:1:16 error: expected identifier for member accessor
+ EXPECT("fn f() { x = a.; }",
+ R"(test.wgsl:1:16 error: expected identifier for member accessor
fn f() { x = a.; }
^
)");
}
TEST_F(ParserImplErrorTest, MultiplicativeInvalidExpr) {
- EXPECT("fn f() { return 1.0 * <; }",
- R"(test.wgsl:1:23 error: unable to parse right side of * expression
+ EXPECT("fn f() { return 1.0 * <; }",
+ R"(test.wgsl:1:23 error: unable to parse right side of * expression
fn f() { return 1.0 * <; }
^
)");
}
TEST_F(ParserImplErrorTest, OrInvalidExpr) {
- EXPECT("fn f() { return 1 | >; }",
- R"(test.wgsl:1:21 error: unable to parse right side of | expression
+ EXPECT("fn f() { return 1 | >; }",
+ R"(test.wgsl:1:21 error: unable to parse right side of | expression
fn f() { return 1 | >; }
^
)");
}
TEST_F(ParserImplErrorTest, PostfixIncrementAsExpr) {
- EXPECT("fn f() { var x : i32; let y = x++; }",
- R"(test.wgsl:1:32 error: expected ';' for variable declaration
+ EXPECT("fn f() { var x : i32; let y = x++; }",
+ R"(test.wgsl:1:32 error: expected ';' for variable declaration
fn f() { var x : i32; let y = x++; }
^^
)");
}
TEST_F(ParserImplErrorTest, RelationalInvalidExpr) {
- EXPECT("fn f() { return 1 < >; }",
- R"(test.wgsl:1:21 error: unable to parse right side of < expression
+ EXPECT("fn f() { return 1 < >; }",
+ R"(test.wgsl:1:21 error: unable to parse right side of < expression
fn f() { return 1 < >; }
^
)");
}
TEST_F(ParserImplErrorTest, ReturnStmtMissingSemicolon) {
- EXPECT("fn f() { return }",
- R"(test.wgsl:1:17 error: expected ';' for return statement
+ EXPECT("fn f() { return }",
+ R"(test.wgsl:1:17 error: expected ';' for return statement
fn f() { return }
^
)");
}
TEST_F(ParserImplErrorTest, ShiftInvalidExpr) {
- EXPECT("fn f() { return 1 << >; }",
- R"(test.wgsl:1:22 error: unable to parse right side of << expression
+ EXPECT("fn f() { return 1 << >; }",
+ R"(test.wgsl:1:22 error: unable to parse right side of << expression
fn f() { return 1 << >; }
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtMissingLBrace) {
- EXPECT("fn f() { switch(1) }",
- R"(test.wgsl:1:20 error: expected '{' for switch statement
+ EXPECT("fn f() { switch(1) }",
+ R"(test.wgsl:1:20 error: expected '{' for switch statement
fn f() { switch(1) }
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtMissingRBrace) {
- EXPECT("fn f() { switch(1) {",
- R"(test.wgsl:1:21 error: expected '}' for switch statement
+ EXPECT("fn f() { switch(1) {",
+ R"(test.wgsl:1:21 error: expected '}' for switch statement
fn f() { switch(1) {
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtInvalidCase) {
- EXPECT("fn f() { switch(1) { case ^: } }",
- R"(test.wgsl:1:27 error: unable to parse case selectors
+ EXPECT("fn f() { switch(1) { case ^: } }",
+ R"(test.wgsl:1:27 error: unable to parse case selectors
fn f() { switch(1) { case ^: } }
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtInvalidCase2) {
- EXPECT("fn f() { switch(1) { case false: } }",
- R"(test.wgsl:1:27 error: invalid case selector must be an integer value
+ EXPECT("fn f() { switch(1) { case false: } }",
+ R"(test.wgsl:1:27 error: invalid case selector must be an integer value
fn f() { switch(1) { case false: } }
^^^^^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtCaseMissingLBrace) {
- EXPECT("fn f() { switch(1) { case 1: } }",
- R"(test.wgsl:1:30 error: expected '{' for case statement
+ EXPECT("fn f() { switch(1) { case 1: } }",
+ R"(test.wgsl:1:30 error: expected '{' for case statement
fn f() { switch(1) { case 1: } }
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtCaseMissingRBrace) {
- EXPECT("fn f() { switch(1) { case 1: {",
- R"(test.wgsl:1:31 error: expected '}' for case statement
+ EXPECT("fn f() { switch(1) { case 1: {",
+ R"(test.wgsl:1:31 error: expected '}' for case statement
fn f() { switch(1) { case 1: {
^
)");
}
TEST_F(ParserImplErrorTest, SwitchStmtCaseFallthroughMissingSemicolon) {
- EXPECT("fn f() { switch(1) { case 1: { fallthrough } case 2: {} } }",
- R"(test.wgsl:1:44 error: expected ';' for fallthrough statement
+ EXPECT("fn f() { switch(1) { case 1: { fallthrough } case 2: {} } }",
+ R"(test.wgsl:1:44 error: expected ';' for fallthrough statement
fn f() { switch(1) { case 1: { fallthrough } case 2: {} } }
^
)");
}
TEST_F(ParserImplErrorTest, VarStmtMissingSemicolon) {
- EXPECT("fn f() { var a : u32 }",
- R"(test.wgsl:1:22 error: expected ';' for variable declaration
+ EXPECT("fn f() { var a : u32 }",
+ R"(test.wgsl:1:22 error: expected ';' for variable declaration
fn f() { var a : u32 }
^
)");
}
TEST_F(ParserImplErrorTest, VarStmtInvalidAssignment) {
- EXPECT("fn f() { var a : u32 = >; }",
- R"(test.wgsl:1:24 error: missing constructor for variable declaration
+ EXPECT("fn f() { var a : u32 = >; }",
+ R"(test.wgsl:1:24 error: missing constructor for variable declaration
fn f() { var a : u32 = >; }
^
)");
}
TEST_F(ParserImplErrorTest, UnaryInvalidExpr) {
- EXPECT("fn f() { return !<; }",
- R"(test.wgsl:1:18 error: unable to parse right side of ! expression
+ EXPECT("fn f() { return !<; }",
+ R"(test.wgsl:1:18 error: unable to parse right side of ! expression
fn f() { return !<; }
^
)");
}
TEST_F(ParserImplErrorTest, UnexpectedToken) {
- EXPECT("unexpected", R"(test.wgsl:1:1 error: unexpected token
+ EXPECT("unexpected", R"(test.wgsl:1:1 error: unexpected token
unexpected
^^^^^^^^^^
)");
}
TEST_F(ParserImplErrorTest, XorInvalidExpr) {
- EXPECT("fn f() { return 1 ^ >; }",
- R"(test.wgsl:1:21 error: unable to parse right side of ^ expression
+ EXPECT("fn f() { return 1 ^ >; }",
+ R"(test.wgsl:1:21 error: unable to parse right side of ^ expression
fn f() { return 1 ^ >; }
^
)");
}
TEST_F(ParserImplErrorTest, InvalidUTF8) {
- EXPECT("fn fu\xd0nc() {}",
- "test.wgsl:1:4 error: invalid UTF-8\n"
- "fn fu\xD0nc() {}\n");
+ EXPECT("fn fu\xd0nc() {}",
+ "test.wgsl:1:4 error: invalid UTF-8\n"
+ "fn fu\xD0nc() {}\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_resync_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_resync_test.cc
index 69646a48318..9e37b51801e 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_resync_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_error_resync_test.cc
@@ -17,31 +17,31 @@
namespace tint::reader::wgsl {
namespace {
-const diag::Formatter::Style formatter_style{
- /* print_file: */ true, /* print_severity: */ true,
- /* print_line: */ true, /* print_newline_at_end: */ false};
+const diag::Formatter::Style formatter_style{/* print_file: */ true, /* print_severity: */ true,
+ /* print_line: */ true,
+ /* print_newline_at_end: */ false};
class ParserImplErrorResyncTest : public ParserImplTest {};
-#define EXPECT(SOURCE, EXPECTED) \
- do { \
- std::string source = SOURCE; \
- std::string expected = EXPECTED; \
- auto p = parser(source); \
- EXPECT_EQ(false, p->Parse()); \
- auto diagnostics = p->builder().Diagnostics(); \
- EXPECT_EQ(true, diagnostics.contains_errors()); \
- EXPECT_EQ(expected, diag::Formatter(formatter_style).format(diagnostics)); \
- } while (false)
+#define EXPECT(SOURCE, EXPECTED) \
+ do { \
+ std::string source = SOURCE; \
+ std::string expected = EXPECTED; \
+ auto p = parser(source); \
+ EXPECT_EQ(false, p->Parse()); \
+ auto diagnostics = p->builder().Diagnostics(); \
+ EXPECT_EQ(true, diagnostics.contains_errors()); \
+ EXPECT_EQ(expected, diag::Formatter(formatter_style).format(diagnostics)); \
+ } while (false)
TEST_F(ParserImplErrorResyncTest, BadFunctionDecls) {
- EXPECT(R"(
+ EXPECT(R"(
fn .() -> . {}
fn x(.) {}
@_ fn -> {}
fn good() {}
)",
- R"(test.wgsl:2:4 error: expected identifier for function declaration
+ R"(test.wgsl:2:4 error: expected identifier for function declaration
fn .() -> . {}
^
@@ -64,7 +64,7 @@ test.wgsl:4:7 error: expected identifier for function declaration
}
TEST_F(ParserImplErrorResyncTest, AssignmentStatement) {
- EXPECT(R"(
+ EXPECT(R"(
fn f() {
blah blah blah blah;
good = 1;
@@ -73,7 +73,7 @@ fn f() {
good = 1;
}
)",
- R"(test.wgsl:3:8 error: expected '=' for assignment
+ R"(test.wgsl:3:8 error: expected '=' for assignment
blah blah blah blah;
^^^^
@@ -88,14 +88,14 @@ test.wgsl:6:7 error: unable to parse right side of assignment
}
TEST_F(ParserImplErrorResyncTest, DiscardStatement) {
- EXPECT(R"(
+ EXPECT(R"(
fn f() {
discard blah blah blah;
a = 1;
discard blah blah blah;
}
)",
- R"(test.wgsl:3:11 error: expected ';' for discard statement
+ R"(test.wgsl:3:11 error: expected ';' for discard statement
discard blah blah blah;
^^^^
@@ -106,7 +106,7 @@ test.wgsl:5:11 error: expected ';' for discard statement
}
TEST_F(ParserImplErrorResyncTest, StructMembers) {
- EXPECT(R"(
+ EXPECT(R"(
struct S {
blah blah blah,
a : i32,
@@ -116,7 +116,7 @@ struct S {
c : i32,
}
)",
- R"(test.wgsl:3:10 error: expected ':' for struct member
+ R"(test.wgsl:3:10 error: expected ':' for struct member
blah blah blah,
^^^^
@@ -135,14 +135,14 @@ test.wgsl:7:6 error: expected attribute
// the outer resynchronize() is looking for a terminating '}' for the function
// scope.
TEST_F(ParserImplErrorResyncTest, NestedSyncPoints) {
- EXPECT(R"(
+ EXPECT(R"(
fn f() {
x = 1;
discard
}
struct S { blah };
)",
- R"(test.wgsl:5:1 error: expected ';' for discard statement
+ R"(test.wgsl:5:1 error: expected ';' for discard statement
}
^
@@ -153,14 +153,14 @@ struct S { blah };
}
TEST_F(ParserImplErrorResyncTest, BracketCounting) {
- EXPECT(
- R"(
+ EXPECT(
+ R"(
fn f(x(((())))) {
meow = {{{}}}
}
struct S { blah };
)",
- R"(test.wgsl:2:7 error: expected ':' for parameter
+ R"(test.wgsl:2:7 error: expected ':' for parameter
fn f(x(((())))) {
^
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc
index f9fb52a8159..2994ae8eca2 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc
@@ -18,52 +18,57 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ExclusiveOrExpression_Parses) {
- auto p = parser("a ^ true");
- auto e = p->exclusive_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a ^ true");
+ auto e = p->exclusive_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kXor, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kXor, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, ExclusiveOrExpression_InvalidLHS) {
- auto p = parser("if (a) {} ^ true");
- auto e = p->exclusive_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} ^ true");
+ auto e = p->exclusive_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, ExclusiveOrExpression_InvalidRHS) {
- auto p = parser("true ^ if (a) {}");
- auto e = p->exclusive_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of ^ expression");
+ auto p = parser("true ^ if (a) {}");
+ auto e = p->exclusive_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of ^ expression");
}
TEST_F(ParserImplTest, ExclusiveOrExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->exclusive_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->exclusive_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_type_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_test.cc
index b567634d266..4bd5cb17be1 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_external_texture_test.cc
@@ -18,19 +18,19 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ExternalTextureType_Invalid) {
- auto p = parser("1234");
- auto t = p->external_texture_type();
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("1234");
+ auto t = p->external_texture();
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ExternalTextureType) {
- auto p = parser("texture_external");
- auto t = p->external_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
+ auto p = parser("texture_external");
+ auto t = p->external_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_for_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_for_stmt_test.cc
index b62ad717835..26f3298fd5f 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_for_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_for_stmt_test.cc
@@ -23,298 +23,298 @@ using ForStmtTest = ParserImplTest;
// Test an empty for loop.
TEST_F(ForStmtTest, Empty) {
- auto p = parser("for (;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop with non-empty body.
TEST_F(ForStmtTest, Body) {
- auto p = parser("for (;;) { discard; }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- ASSERT_EQ(fl->body->statements.size(), 1u);
- EXPECT_TRUE(fl->body->statements[0]->Is<ast::DiscardStatement>());
+ auto p = parser("for (;;) { discard; }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ ASSERT_EQ(fl->body->statements.size(), 1u);
+ EXPECT_TRUE(fl->body->statements[0]->Is<ast::DiscardStatement>());
}
// Test a for loop declaring a variable in the initializer statement.
TEST_F(ForStmtTest, InitializerStatementDecl) {
- auto p = parser("for (var i: i32 ;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
- auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
- EXPECT_FALSE(var->is_const);
- EXPECT_EQ(var->constructor, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (var i: i32 ;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
+ auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
+ EXPECT_FALSE(var->is_const);
+ EXPECT_EQ(var->constructor, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop declaring and initializing a variable in the initializer
// statement.
TEST_F(ForStmtTest, InitializerStatementDeclEqual) {
- auto p = parser("for (var i: i32 = 0 ;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
- auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
- EXPECT_FALSE(var->is_const);
- EXPECT_NE(var->constructor, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (var i: i32 = 0 ;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
+ auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
+ EXPECT_FALSE(var->is_const);
+ EXPECT_NE(var->constructor, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop declaring a const variable in the initializer statement.
TEST_F(ForStmtTest, InitializerStatementConstDecl) {
- auto p = parser("for (let i: i32 = 0 ;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
- auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
- EXPECT_TRUE(var->is_const);
- EXPECT_NE(var->constructor, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (let i: i32 = 0 ;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ ASSERT_TRUE(Is<ast::VariableDeclStatement>(fl->initializer));
+ auto* var = fl->initializer->As<ast::VariableDeclStatement>()->variable;
+ EXPECT_TRUE(var->is_const);
+ EXPECT_NE(var->constructor, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop assigning a variable in the initializer statement.
TEST_F(ForStmtTest, InitializerStatementAssignment) {
- auto p = parser("for (i = 0 ;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_TRUE(Is<ast::AssignmentStatement>(fl->initializer));
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (i = 0 ;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_TRUE(Is<ast::AssignmentStatement>(fl->initializer));
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop incrementing a variable in the initializer statement.
TEST_F(ForStmtTest, InitializerStatementIncrement) {
- auto p = parser("for (i++;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_TRUE(Is<ast::IncrementDecrementStatement>(fl->initializer));
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (i++;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_TRUE(Is<ast::IncrementDecrementStatement>(fl->initializer));
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop calling a function in the initializer statement.
TEST_F(ForStmtTest, InitializerStatementFuncCall) {
- auto p = parser("for (a(b,c) ;;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_TRUE(Is<ast::CallStatement>(fl->initializer));
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (a(b,c) ;;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_TRUE(Is<ast::CallStatement>(fl->initializer));
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop with a break condition
TEST_F(ForStmtTest, BreakCondition) {
- auto p = parser("for (; 0 == 1;) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_TRUE(Is<ast::BinaryExpression>(fl->condition));
- EXPECT_EQ(fl->continuing, nullptr);
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (; 0 == 1;) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_TRUE(Is<ast::BinaryExpression>(fl->condition));
+ EXPECT_EQ(fl->continuing, nullptr);
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop assigning a variable in the continuing statement.
TEST_F(ForStmtTest, ContinuingAssignment) {
- auto p = parser("for (;; x = 2) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_TRUE(Is<ast::AssignmentStatement>(fl->continuing));
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (;; x = 2) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_TRUE(Is<ast::AssignmentStatement>(fl->continuing));
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop with an increment statement as the continuing statement.
TEST_F(ForStmtTest, ContinuingIncrement) {
- auto p = parser("for (;; x++) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_TRUE(Is<ast::IncrementDecrementStatement>(fl->continuing));
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (;; x++) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_TRUE(Is<ast::IncrementDecrementStatement>(fl->continuing));
+ EXPECT_TRUE(fl->body->Empty());
}
// Test a for loop calling a function in the continuing statement.
TEST_F(ForStmtTest, ContinuingFuncCall) {
- auto p = parser("for (;; a(b,c)) { }");
- auto fl = p->for_stmt();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(fl.errored);
- ASSERT_TRUE(fl.matched);
- EXPECT_EQ(fl->initializer, nullptr);
- EXPECT_EQ(fl->condition, nullptr);
- EXPECT_TRUE(Is<ast::CallStatement>(fl->continuing));
- EXPECT_TRUE(fl->body->Empty());
+ auto p = parser("for (;; a(b,c)) { }");
+ auto fl = p->for_stmt();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(fl.errored);
+ ASSERT_TRUE(fl.matched);
+ EXPECT_EQ(fl->initializer, nullptr);
+ EXPECT_EQ(fl->condition, nullptr);
+ EXPECT_TRUE(Is<ast::CallStatement>(fl->continuing));
+ EXPECT_TRUE(fl->body->Empty());
}
class ForStmtErrorTest : public ParserImplTest {
- public:
- void TestForWithError(std::string for_str, std::string error_str) {
- auto p_for = parser(for_str);
- auto e_for = p_for->for_stmt();
-
- EXPECT_FALSE(e_for.matched);
- EXPECT_TRUE(e_for.errored);
- EXPECT_TRUE(p_for->has_error());
- ASSERT_EQ(e_for.value, nullptr);
- EXPECT_EQ(p_for->error(), error_str);
- }
+ public:
+ void TestForWithError(std::string for_str, std::string error_str) {
+ auto p_for = parser(for_str);
+ auto e_for = p_for->for_stmt();
+
+ EXPECT_FALSE(e_for.matched);
+ EXPECT_TRUE(e_for.errored);
+ EXPECT_TRUE(p_for->has_error());
+ ASSERT_EQ(e_for.value, nullptr);
+ EXPECT_EQ(p_for->error(), error_str);
+ }
};
// Test a for loop with missing left parenthesis is invalid.
TEST_F(ForStmtErrorTest, MissingLeftParen) {
- std::string for_str = "for { }";
- std::string error_str = "1:5: expected '(' for for loop";
+ std::string for_str = "for { }";
+ std::string error_str = "1:5: expected '(' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with missing first semicolon is invalid.
TEST_F(ForStmtErrorTest, MissingFirstSemicolon) {
- std::string for_str = "for () {}";
- std::string error_str = "1:6: expected ';' for initializer in for loop";
+ std::string for_str = "for () {}";
+ std::string error_str = "1:6: expected ';' for initializer in for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with missing second semicolon is invalid.
TEST_F(ForStmtErrorTest, MissingSecondSemicolon) {
- std::string for_str = "for (;) {}";
- std::string error_str = "1:7: expected ';' for condition in for loop";
+ std::string for_str = "for (;) {}";
+ std::string error_str = "1:7: expected ';' for condition in for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with missing right parenthesis is invalid.
TEST_F(ForStmtErrorTest, MissingRightParen) {
- std::string for_str = "for (;; {}";
- std::string error_str = "1:9: expected ')' for for loop";
+ std::string for_str = "for (;; {}";
+ std::string error_str = "1:9: expected ')' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with missing left brace is invalid.
TEST_F(ForStmtErrorTest, MissingLeftBrace) {
- std::string for_str = "for (;;)";
- std::string error_str = "1:9: expected '{' for for loop";
+ std::string for_str = "for (;;)";
+ std::string error_str = "1:9: expected '{' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with missing right brace is invalid.
TEST_F(ForStmtErrorTest, MissingRightBrace) {
- std::string for_str = "for (;;) {";
- std::string error_str = "1:11: expected '}' for for loop";
+ std::string for_str = "for (;;) {";
+ std::string error_str = "1:11: expected '}' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with an invalid initializer statement.
TEST_F(ForStmtErrorTest, InvalidInitializerAsConstDecl) {
- std::string for_str = "for (let x: i32;;) { }";
- std::string error_str = "1:16: expected '=' for let declaration";
+ std::string for_str = "for (let x: i32;;) { }";
+ std::string error_str = "1:16: expected '=' for let declaration";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with a initializer statement not matching
// variable_stmt | assignment_stmt | func_call_stmt.
TEST_F(ForStmtErrorTest, InvalidInitializerMatch) {
- std::string for_str = "for (if (true) {} ;;) { }";
- std::string error_str = "1:6: expected ';' for initializer in for loop";
+ std::string for_str = "for (if (true) {} ;;) { }";
+ std::string error_str = "1:6: expected ';' for initializer in for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with an invalid break condition.
TEST_F(ForStmtErrorTest, InvalidBreakConditionAsExpression) {
- std::string for_str = "for (; (0 == 1; ) { }";
- std::string error_str = "1:15: expected ')'";
+ std::string for_str = "for (; (0 == 1; ) { }";
+ std::string error_str = "1:15: expected ')'";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with a break condition not matching
// logical_or_expression.
TEST_F(ForStmtErrorTest, InvalidBreakConditionMatch) {
- std::string for_str = "for (; var i: i32 = 0;) { }";
- std::string error_str = "1:8: expected ';' for condition in for loop";
+ std::string for_str = "for (; var i: i32 = 0;) { }";
+ std::string error_str = "1:8: expected ';' for condition in for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with an invalid continuing statement.
TEST_F(ForStmtErrorTest, InvalidContinuingAsFuncCall) {
- std::string for_str = "for (;; a(,) ) { }";
- std::string error_str = "1:11: expected ')' for function call";
+ std::string for_str = "for (;; a(,) ) { }";
+ std::string error_str = "1:11: expected ')' for function call";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with a continuing statement not matching
// assignment_stmt | func_call_stmt.
TEST_F(ForStmtErrorTest, InvalidContinuingMatch) {
- std::string for_str = "for (;; var i: i32 = 0) { }";
- std::string error_str = "1:9: expected ')' for for loop";
+ std::string for_str = "for (;; var i: i32 = 0) { }";
+ std::string error_str = "1:9: expected ')' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with an invalid body.
TEST_F(ForStmtErrorTest, InvalidBody) {
- std::string for_str = "for (;;) { let x: i32; }";
- std::string error_str = "1:22: expected '=' for let declaration";
+ std::string for_str = "for (;;) { let x: i32; }";
+ std::string error_str = "1:22: expected '=' for let declaration";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
// Test a for loop with a body not matching statements
TEST_F(ForStmtErrorTest, InvalidBodyMatch) {
- std::string for_str = "for (;;) { fn main() {} }";
- std::string error_str = "1:12: expected '}' for for loop";
+ std::string for_str = "for (;;) { fn main() {} }";
+ std::string error_str = "1:12: expected '}' for for loop";
- TestForWithError(for_str, error_str);
+ TestForWithError(for_str, error_str);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc
index 00d3c88126d..0773ffd362e 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc
@@ -18,49 +18,76 @@
namespace tint::reader::wgsl {
namespace {
-TEST_F(ParserImplTest, AttributeList_Parses) {
- auto p = parser("@workgroup_size(2) @stage(compute)");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
- ASSERT_EQ(attrs.value.size(), 2u);
+// TODO(crbug.com/tint/1503): Remove this when @stage is removed
+TEST_F(ParserImplTest, AttributeList_Parses_Stage) {
+ auto p = parser("@workgroup_size(2) @stage(compute)");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+ ASSERT_EQ(attrs.value.size(), 2u);
+
+ auto* attr_0 = attrs.value[0]->As<ast::Attribute>();
+ auto* attr_1 = attrs.value[1]->As<ast::Attribute>();
+ ASSERT_NE(attr_0, nullptr);
+ ASSERT_NE(attr_1, nullptr);
- auto* attr_0 = attrs.value[0]->As<ast::Attribute>();
- auto* attr_1 = attrs.value[1]->As<ast::Attribute>();
- ASSERT_NE(attr_0, nullptr);
- ASSERT_NE(attr_1, nullptr);
+ ASSERT_TRUE(attr_0->Is<ast::WorkgroupAttribute>());
+ const ast::Expression* x = attr_0->As<ast::WorkgroupAttribute>()->x;
+ ASSERT_NE(x, nullptr);
+ auto* x_literal = x->As<ast::LiteralExpression>();
+ ASSERT_NE(x_literal, nullptr);
+ ASSERT_TRUE(x_literal->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(x_literal->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(x_literal->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_TRUE(attr_1->Is<ast::StageAttribute>());
+ EXPECT_EQ(attr_1->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
+}
+
+TEST_F(ParserImplTest, AttributeList_Parses) {
+ auto p = parser("@workgroup_size(2) @compute");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+ ASSERT_EQ(attrs.value.size(), 2u);
- ASSERT_TRUE(attr_0->Is<ast::WorkgroupAttribute>());
- const ast::Expression* x = attr_0->As<ast::WorkgroupAttribute>()->x;
- ASSERT_NE(x, nullptr);
- auto* x_literal = x->As<ast::LiteralExpression>();
- ASSERT_NE(x_literal, nullptr);
- ASSERT_TRUE(x_literal->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(x_literal->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
+ auto* attr_0 = attrs.value[0]->As<ast::Attribute>();
+ auto* attr_1 = attrs.value[1]->As<ast::Attribute>();
+ ASSERT_NE(attr_0, nullptr);
+ ASSERT_NE(attr_1, nullptr);
- ASSERT_TRUE(attr_1->Is<ast::StageAttribute>());
- EXPECT_EQ(attr_1->As<ast::StageAttribute>()->stage,
- ast::PipelineStage::kCompute);
+ ASSERT_TRUE(attr_0->Is<ast::WorkgroupAttribute>());
+ const ast::Expression* x = attr_0->As<ast::WorkgroupAttribute>()->x;
+ ASSERT_NE(x, nullptr);
+ auto* x_literal = x->As<ast::LiteralExpression>();
+ ASSERT_NE(x_literal, nullptr);
+ ASSERT_TRUE(x_literal->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(x_literal->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(x_literal->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_TRUE(attr_1->Is<ast::StageAttribute>());
+ EXPECT_EQ(attr_1->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
}
TEST_F(ParserImplTest, AttributeList_Invalid) {
- auto p = parser("@invalid");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(), "1:2: expected attribute");
+ auto p = parser("@invalid");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), "1:2: expected attribute");
}
TEST_F(ParserImplTest, AttributeList_BadAttribute) {
- auto p = parser("@stage()");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_EQ(p->error(), "1:8: invalid value for stage attribute");
+ auto p = parser("@stage()");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_EQ(p->error(), "1:8: invalid value for stage attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_test.cc
index 5d8c462bd3b..44f3d783725 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_attribute_test.cc
@@ -20,236 +20,289 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Attribute_Workgroup) {
- auto p = parser("workgroup_size(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- auto* func_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(func_attr, nullptr);
- ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
-
- auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- EXPECT_EQ(values[1], nullptr);
- EXPECT_EQ(values[2], nullptr);
+ auto p = parser("workgroup_size(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
+
+ auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ EXPECT_EQ(values[1], nullptr);
+ EXPECT_EQ(values[2], nullptr);
}
TEST_F(ParserImplTest, Attribute_Workgroup_2Param) {
- auto p = parser("workgroup_size(4, 5)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- auto* func_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(func_attr, nullptr) << p->error();
- ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
-
- auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 5u);
-
- EXPECT_EQ(values[2], nullptr);
+ auto p = parser("workgroup_size(4, 5)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr) << p->error();
+ ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
+
+ auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 5);
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ EXPECT_EQ(values[2], nullptr);
}
TEST_F(ParserImplTest, Attribute_Workgroup_3Param) {
- auto p = parser("workgroup_size(4, 5, 6)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- auto* func_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(func_attr, nullptr);
- ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
-
- auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 5u);
-
- ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->ValueAsU32(), 6u);
+ auto p = parser("workgroup_size(4, 5, 6)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
+
+ auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 5);
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->value, 6);
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, Attribute_Workgroup_WithIdent) {
- auto p = parser("workgroup_size(4, height)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- auto* func_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(func_attr, nullptr);
- ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
-
- auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- ASSERT_NE(values[1], nullptr);
- auto* y_ident = values[1]->As<ast::IdentifierExpression>();
- ASSERT_NE(y_ident, nullptr);
- EXPECT_EQ(p->builder().Symbols().NameFor(y_ident->symbol), "height");
-
- ASSERT_EQ(values[2], nullptr);
+ auto p = parser("workgroup_size(4, height)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::WorkgroupAttribute>());
+
+ auto values = func_attr->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_NE(values[1], nullptr);
+ auto* y_ident = values[1]->As<ast::IdentifierExpression>();
+ ASSERT_NE(y_ident, nullptr);
+ EXPECT_EQ(p->builder().Symbols().NameFor(y_ident->symbol), "height");
+
+ ASSERT_EQ(values[2], nullptr);
}
TEST_F(ParserImplTest, Attribute_Workgroup_TooManyValues) {
- auto p = parser("workgroup_size(1, 2, 3, 4)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:23: expected ')' for workgroup_size attribute");
+ auto p = parser("workgroup_size(1, 2, 3, 4)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:23: expected ')' for workgroup_size attribute");
}
TEST_F(ParserImplTest, Attribute_Workgroup_MissingLeftParam) {
- auto p = parser("workgroup_size 4, 5, 6)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected '(' for workgroup_size attribute");
+ auto p = parser("workgroup_size 4, 5, 6)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected '(' for workgroup_size attribute");
}
TEST_F(ParserImplTest, Attribute_Workgroup_MissingRightParam) {
- auto p = parser("workgroup_size(4, 5, 6");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:23: expected ')' for workgroup_size attribute");
+ auto p = parser("workgroup_size(4, 5, 6");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:23: expected ')' for workgroup_size attribute");
}
TEST_F(ParserImplTest, Attribute_Workgroup_MissingValues) {
- auto p = parser("workgroup_size()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected workgroup_size x parameter");
+ auto p = parser("workgroup_size()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected workgroup_size x parameter");
}
TEST_F(ParserImplTest, Attribute_Workgroup_Missing_X_Value) {
- auto p = parser("workgroup_size(, 2, 3)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected workgroup_size x parameter");
+ auto p = parser("workgroup_size(, 2, 3)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected workgroup_size x parameter");
}
TEST_F(ParserImplTest, Attribute_Workgroup_Missing_Y_Comma) {
- auto p = parser("workgroup_size(1 2, 3)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:18: expected ')' for workgroup_size attribute");
+ auto p = parser("workgroup_size(1 2, 3)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:18: expected ')' for workgroup_size attribute");
}
TEST_F(ParserImplTest, Attribute_Workgroup_Missing_Y_Value) {
- auto p = parser("workgroup_size(1, , 3)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:19: expected workgroup_size y parameter");
+ auto p = parser("workgroup_size(1, , 3)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:19: expected workgroup_size y parameter");
}
TEST_F(ParserImplTest, Attribute_Workgroup_Missing_Z_Comma) {
- auto p = parser("workgroup_size(1, 2 3)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:21: expected ')' for workgroup_size attribute");
+ auto p = parser("workgroup_size(1, 2 3)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:21: expected ')' for workgroup_size attribute");
}
TEST_F(ParserImplTest, Attribute_Workgroup_Missing_Z_Value) {
- auto p = parser("workgroup_size(1, 2, )");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:22: expected workgroup_size z parameter");
+ auto p = parser("workgroup_size(1, 2, )");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:22: expected workgroup_size z parameter");
}
+// TODO(crbug.com/tint/1503): Remove when @stage is removed
TEST_F(ParserImplTest, Attribute_Stage) {
- auto p = parser("stage(compute)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- auto* func_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(func_attr, nullptr);
- ASSERT_TRUE(func_attr->Is<ast::StageAttribute>());
- EXPECT_EQ(func_attr->As<ast::StageAttribute>()->stage,
- ast::PipelineStage::kCompute);
+ auto p = parser("stage(compute)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::StageAttribute>());
+ EXPECT_EQ(func_attr->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
}
TEST_F(ParserImplTest, Attribute_Stage_MissingValue) {
- auto p = parser("stage()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: invalid value for stage attribute");
+ auto p = parser("stage()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: invalid value for stage attribute");
}
TEST_F(ParserImplTest, Attribute_Stage_MissingInvalid) {
- auto p = parser("stage(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: invalid value for stage attribute");
+ auto p = parser("stage(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: invalid value for stage attribute");
}
TEST_F(ParserImplTest, Attribute_Stage_MissingLeftParen) {
- auto p = parser("stage compute)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: expected '(' for stage attribute");
+ auto p = parser("stage compute)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected '(' for stage attribute");
}
TEST_F(ParserImplTest, Attribute_Stage_MissingRightParen) {
- auto p = parser("stage(compute");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: expected ')' for stage attribute");
+ auto p = parser("stage(compute");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: expected ')' for stage attribute");
+}
+
+TEST_F(ParserImplTest, Attribute_Compute) {
+ auto p = parser("compute");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::StageAttribute>());
+ EXPECT_EQ(func_attr->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
+}
+
+TEST_F(ParserImplTest, Attribute_Vertex) {
+ auto p = parser("vertex");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::StageAttribute>());
+ EXPECT_EQ(func_attr->As<ast::StageAttribute>()->stage, ast::PipelineStage::kVertex);
+}
+
+TEST_F(ParserImplTest, Attribute_Fragment) {
+ auto p = parser("fragment");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ auto* func_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(func_attr, nullptr);
+ ASSERT_TRUE(func_attr->Is<ast::StageAttribute>());
+ EXPECT_EQ(func_attr->As<ast::StageAttribute>()->stage, ast::PipelineStage::kFragment);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_decl_test.cc
index d94e5259dff..4d7857b0407 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_decl_test.cc
@@ -20,273 +20,289 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, FunctionDecl) {
- auto p = parser("fn main(a : i32, b : f32) { return; }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
-
- ASSERT_EQ(f->params.size(), 2u);
- EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get("b"));
-
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ auto p = parser("fn main(a : i32, b : f32) { return; }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+
+ ASSERT_EQ(f->params.size(), 2u);
+ EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get("b"));
+
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_Unicode) {
- const std::string function_ident = // "𝗳𝘂𝗻𝗰𝘁𝗶𝗼𝗻"
- "\xf0\x9d\x97\xb3\xf0\x9d\x98\x82\xf0\x9d\x97\xbb\xf0\x9d\x97\xb0\xf0\x9d"
- "\x98\x81\xf0\x9d\x97\xb6\xf0\x9d\x97\xbc\xf0\x9d\x97\xbb";
-
- const std::string param_a_ident = // "𝓹𝓪𝓻𝓪𝓶_𝓪"
- "\xf0\x9d\x93\xb9\xf0\x9d\x93\xaa\xf0\x9d\x93\xbb\xf0\x9d\x93\xaa\xf0\x9d"
- "\x93\xb6\x5f\xf0\x9d\x93\xaa";
-
- const std::string param_b_ident = // "𝕡𝕒𝕣𝕒𝕞_𝕓"
- "\xf0\x9d\x95\xa1\xf0\x9d\x95\x92\xf0\x9d\x95\xa3\xf0\x9d\x95\x92\xf0\x9d"
- "\x95\x9e\x5f\xf0\x9d\x95\x93";
-
- std::string src = "fn $function($param_a : i32, $param_b : f32) { return; }";
- src = utils::ReplaceAll(src, "$function", function_ident);
- src = utils::ReplaceAll(src, "$param_a", param_a_ident);
- src = utils::ReplaceAll(src, "$param_b", param_b_ident);
-
- auto p = parser(src);
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get(function_ident));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
-
- ASSERT_EQ(f->params.size(), 2u);
- EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get(param_a_ident));
- EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get(param_b_ident));
-
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ const std::string function_ident = // "𝗳𝘂𝗻𝗰𝘁𝗶𝗼𝗻"
+ "\xf0\x9d\x97\xb3\xf0\x9d\x98\x82\xf0\x9d\x97\xbb\xf0\x9d\x97\xb0\xf0\x9d"
+ "\x98\x81\xf0\x9d\x97\xb6\xf0\x9d\x97\xbc\xf0\x9d\x97\xbb";
+
+ const std::string param_a_ident = // "𝓹𝓪𝓻𝓪𝓶_𝓪"
+ "\xf0\x9d\x93\xb9\xf0\x9d\x93\xaa\xf0\x9d\x93\xbb\xf0\x9d\x93\xaa\xf0\x9d"
+ "\x93\xb6\x5f\xf0\x9d\x93\xaa";
+
+ const std::string param_b_ident = // "𝕡𝕒𝕣𝕒𝕞_𝕓"
+ "\xf0\x9d\x95\xa1\xf0\x9d\x95\x92\xf0\x9d\x95\xa3\xf0\x9d\x95\x92\xf0\x9d"
+ "\x95\x9e\x5f\xf0\x9d\x95\x93";
+
+ std::string src = "fn $function($param_a : i32, $param_b : f32) { return; }";
+ src = utils::ReplaceAll(src, "$function", function_ident);
+ src = utils::ReplaceAll(src, "$param_a", param_a_ident);
+ src = utils::ReplaceAll(src, "$param_b", param_b_ident);
+
+ auto p = parser(src);
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get(function_ident));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+
+ ASSERT_EQ(f->params.size(), 2u);
+ EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get(param_a_ident));
+ EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get(param_b_ident));
+
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_AttributeList) {
- auto p = parser("@workgroup_size(2, 3, 4) fn main() { return; }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- ASSERT_TRUE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
- ASSERT_EQ(f->params.size(), 0u);
-
- auto& attributes = f->attributes;
- ASSERT_EQ(attributes.size(), 1u);
- ASSERT_TRUE(attributes[0]->Is<ast::WorkgroupAttribute>());
-
- auto values = attributes[0]->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
-
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 3u);
-
- ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ auto p = parser("@workgroup_size(2, 3, 4) fn main() { return; }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ ASSERT_TRUE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ ASSERT_EQ(f->params.size(), 0u);
+
+ auto& attributes = f->attributes;
+ ASSERT_EQ(attributes.size(), 1u);
+ ASSERT_TRUE(attributes[0]->Is<ast::WorkgroupAttribute>());
+
+ auto values = attributes[0]->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 3);
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_AttributeList_MultipleEntries) {
- auto p = parser(R"(
-@workgroup_size(2, 3, 4) @stage(compute)
+ auto p = parser(R"(
+@workgroup_size(2, 3, 4) @compute
fn main() { return; })");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- ASSERT_TRUE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
- ASSERT_EQ(f->params.size(), 0u);
-
- auto& attributes = f->attributes;
- ASSERT_EQ(attributes.size(), 2u);
-
- ASSERT_TRUE(attributes[0]->Is<ast::WorkgroupAttribute>());
- auto values = attributes[0]->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
-
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 3u);
-
- ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- ASSERT_TRUE(attributes[1]->Is<ast::StageAttribute>());
- EXPECT_EQ(attributes[1]->As<ast::StageAttribute>()->stage,
- ast::PipelineStage::kCompute);
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ ASSERT_TRUE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ ASSERT_EQ(f->params.size(), 0u);
+
+ auto& attributes = f->attributes;
+ ASSERT_EQ(attributes.size(), 2u);
+
+ ASSERT_TRUE(attributes[0]->Is<ast::WorkgroupAttribute>());
+ auto values = attributes[0]->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 3);
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(attributes[1]->Is<ast::StageAttribute>());
+ EXPECT_EQ(attributes[1]->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_AttributeList_MultipleLists) {
- auto p = parser(R"(
+ auto p = parser(R"(
@workgroup_size(2, 3, 4)
-@stage(compute)
+@compute
fn main() { return; })");
- auto attributes = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attributes.errored);
- ASSERT_TRUE(attributes.matched);
- auto f = p->function_decl(attributes.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
- ASSERT_EQ(f->params.size(), 0u);
-
- auto& attrs = f->attributes;
- ASSERT_EQ(attrs.size(), 2u);
-
- ASSERT_TRUE(attrs[0]->Is<ast::WorkgroupAttribute>());
- auto values = attrs[0]->As<ast::WorkgroupAttribute>()->Values();
-
- ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->ValueAsU32(), 2u);
-
- ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->ValueAsU32(), 3u);
-
- ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
- EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->ValueAsU32(), 4u);
-
- ASSERT_TRUE(attrs[1]->Is<ast::StageAttribute>());
- EXPECT_EQ(attrs[1]->As<ast::StageAttribute>()->stage,
- ast::PipelineStage::kCompute);
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ auto attributes = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attributes.errored);
+ ASSERT_TRUE(attributes.matched);
+ auto f = p->function_decl(attributes.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ ASSERT_EQ(f->params.size(), 0u);
+
+ auto& attrs = f->attributes;
+ ASSERT_EQ(attrs.size(), 2u);
+
+ ASSERT_TRUE(attrs[0]->Is<ast::WorkgroupAttribute>());
+ auto values = attrs[0]->As<ast::WorkgroupAttribute>()->Values();
+
+ ASSERT_TRUE(values[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(values[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->value, 3);
+ EXPECT_EQ(values[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(values[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(values[2]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(attrs[1]->Is<ast::StageAttribute>());
+ EXPECT_EQ(attrs[1]->As<ast::StageAttribute>()->stage, ast::PipelineStage::kCompute);
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_ReturnTypeAttributeList) {
- auto p = parser("fn main() -> @location(1) f32 { return 1.0; }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_FALSE(f.errored);
- EXPECT_TRUE(f.matched);
- ASSERT_NE(f.value, nullptr);
-
- EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
- ASSERT_NE(f->return_type, nullptr);
- EXPECT_TRUE(f->return_type->Is<ast::F32>());
- ASSERT_EQ(f->params.size(), 0u);
-
- auto& attributes = f->attributes;
- EXPECT_EQ(attributes.size(), 0u);
-
- auto& ret_type_attributes = f->return_type_attributes;
- ASSERT_EQ(ret_type_attributes.size(), 1u);
- auto* loc = ret_type_attributes[0]->As<ast::LocationAttribute>();
- ASSERT_TRUE(loc != nullptr);
- EXPECT_EQ(loc->value, 1u);
-
- auto* body = f->body;
- ASSERT_EQ(body->statements.size(), 1u);
- EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
+ auto p = parser("fn main() -> @location(1) f32 { return 1.0; }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_FALSE(f.errored);
+ EXPECT_TRUE(f.matched);
+ ASSERT_NE(f.value, nullptr);
+
+ EXPECT_EQ(f->symbol, p->builder().Symbols().Get("main"));
+ ASSERT_NE(f->return_type, nullptr);
+ EXPECT_TRUE(f->return_type->Is<ast::F32>());
+ ASSERT_EQ(f->params.size(), 0u);
+
+ auto& attributes = f->attributes;
+ EXPECT_EQ(attributes.size(), 0u);
+
+ auto& ret_type_attributes = f->return_type_attributes;
+ ASSERT_EQ(ret_type_attributes.size(), 1u);
+ auto* loc = ret_type_attributes[0]->As<ast::LocationAttribute>();
+ ASSERT_TRUE(loc != nullptr);
+ EXPECT_EQ(loc->value, 1u);
+
+ auto* body = f->body;
+ ASSERT_EQ(body->statements.size(), 1u);
+ EXPECT_TRUE(body->statements[0]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, FunctionDecl_InvalidHeader) {
- auto p = parser("fn main() -> { }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_TRUE(f.errored);
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(f.value, nullptr);
- EXPECT_EQ(p->error(), "1:14: unable to determine function return type");
+ auto p = parser("fn main() -> { }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_TRUE(f.errored);
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(f.value, nullptr);
+ EXPECT_EQ(p->error(), "1:14: unable to determine function return type");
}
TEST_F(ParserImplTest, FunctionDecl_InvalidBody) {
- auto p = parser("fn main() { return }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_TRUE(f.errored);
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(f.value, nullptr);
- EXPECT_EQ(p->error(), "1:20: expected ';' for return statement");
+ auto p = parser("fn main() { return }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_TRUE(f.errored);
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(f.value, nullptr);
+ EXPECT_EQ(p->error(), "1:20: expected ';' for return statement");
}
TEST_F(ParserImplTest, FunctionDecl_MissingLeftBrace) {
- auto p = parser("fn main() return; }");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto f = p->function_decl(attrs.value);
- EXPECT_TRUE(f.errored);
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(f.value, nullptr);
- EXPECT_EQ(p->error(), "1:11: expected '{'");
+ auto p = parser("fn main() return; }");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto f = p->function_decl(attrs.value);
+ EXPECT_TRUE(f.errored);
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(f.value, nullptr);
+ EXPECT_EQ(p->error(), "1:11: expected '{'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_header_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_header_test.cc
index c59a7e9eda2..6f2e6cbf976 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_header_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_function_header_test.cc
@@ -18,113 +18,113 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, FunctionHeader) {
- auto p = parser("fn main(a : i32, b: f32)");
- auto f = p->function_header();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(f.matched);
- EXPECT_FALSE(f.errored);
-
- EXPECT_EQ(f->name, "main");
- ASSERT_EQ(f->params.size(), 2u);
- EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get("b"));
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ auto p = parser("fn main(a : i32, b: f32)");
+ auto f = p->function_header();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(f.matched);
+ EXPECT_FALSE(f.errored);
+
+ EXPECT_EQ(f->name, "main");
+ ASSERT_EQ(f->params.size(), 2u);
+ EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(f->params[1]->symbol, p->builder().Symbols().Get("b"));
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
}
TEST_F(ParserImplTest, FunctionHeader_TrailingComma) {
- auto p = parser("fn main(a :i32,)");
- auto f = p->function_header();
- EXPECT_TRUE(f.matched);
- EXPECT_FALSE(f.errored);
-
- EXPECT_EQ(f->name, "main");
- ASSERT_EQ(f->params.size(), 1u);
- EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
- EXPECT_TRUE(f->return_type->Is<ast::Void>());
+ auto p = parser("fn main(a :i32,)");
+ auto f = p->function_header();
+ EXPECT_TRUE(f.matched);
+ EXPECT_FALSE(f.errored);
+
+ EXPECT_EQ(f->name, "main");
+ ASSERT_EQ(f->params.size(), 1u);
+ EXPECT_EQ(f->params[0]->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_TRUE(f->return_type->Is<ast::Void>());
}
TEST_F(ParserImplTest, FunctionHeader_AttributeReturnType) {
- auto p = parser("fn main() -> @location(1) f32");
- auto f = p->function_header();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(f.matched);
- EXPECT_FALSE(f.errored);
-
- EXPECT_EQ(f->name, "main");
- EXPECT_EQ(f->params.size(), 0u);
- EXPECT_TRUE(f->return_type->Is<ast::F32>());
- ASSERT_EQ(f->return_type_attributes.size(), 1u);
- auto* loc = f->return_type_attributes[0]->As<ast::LocationAttribute>();
- ASSERT_TRUE(loc != nullptr);
- EXPECT_EQ(loc->value, 1u);
+ auto p = parser("fn main() -> @location(1) f32");
+ auto f = p->function_header();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(f.matched);
+ EXPECT_FALSE(f.errored);
+
+ EXPECT_EQ(f->name, "main");
+ EXPECT_EQ(f->params.size(), 0u);
+ EXPECT_TRUE(f->return_type->Is<ast::F32>());
+ ASSERT_EQ(f->return_type_attributes.size(), 1u);
+ auto* loc = f->return_type_attributes[0]->As<ast::LocationAttribute>();
+ ASSERT_TRUE(loc != nullptr);
+ EXPECT_EQ(loc->value, 1u);
}
TEST_F(ParserImplTest, FunctionHeader_InvariantReturnType) {
- auto p = parser("fn main() -> @invariant f32");
- auto f = p->function_header();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(f.matched);
- EXPECT_FALSE(f.errored);
-
- EXPECT_EQ(f->name, "main");
- EXPECT_EQ(f->params.size(), 0u);
- EXPECT_TRUE(f->return_type->Is<ast::F32>());
- ASSERT_EQ(f->return_type_attributes.size(), 1u);
- EXPECT_TRUE(f->return_type_attributes[0]->Is<ast::InvariantAttribute>());
+ auto p = parser("fn main() -> @invariant f32");
+ auto f = p->function_header();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(f.matched);
+ EXPECT_FALSE(f.errored);
+
+ EXPECT_EQ(f->name, "main");
+ EXPECT_EQ(f->params.size(), 0u);
+ EXPECT_TRUE(f->return_type->Is<ast::F32>());
+ ASSERT_EQ(f->return_type_attributes.size(), 1u);
+ EXPECT_TRUE(f->return_type_attributes[0]->Is<ast::InvariantAttribute>());
}
TEST_F(ParserImplTest, FunctionHeader_MissingIdent) {
- auto p = parser("fn ()");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:4: expected identifier for function declaration");
+ auto p = parser("fn ()");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:4: expected identifier for function declaration");
}
TEST_F(ParserImplTest, FunctionHeader_InvalidIdent) {
- auto p = parser("fn 133main() -> i32");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:4: expected identifier for function declaration");
+ auto p = parser("fn 133main() -> i32");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:4: expected identifier for function declaration");
}
TEST_F(ParserImplTest, FunctionHeader_MissingParenLeft) {
- auto p = parser("fn main) -> i32");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected '(' for function declaration");
+ auto p = parser("fn main) -> i32");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected '(' for function declaration");
}
TEST_F(ParserImplTest, FunctionHeader_InvalidParamList) {
- auto p = parser("fn main(a :i32, ,) -> i32");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:17: expected ')' for function declaration");
+ auto p = parser("fn main(a :i32, ,) -> i32");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:17: expected ')' for function declaration");
}
TEST_F(ParserImplTest, FunctionHeader_MissingParenRight) {
- auto p = parser("fn main( -> i32");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:10: expected ')' for function declaration");
+ auto p = parser("fn main( -> i32");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected ')' for function declaration");
}
TEST_F(ParserImplTest, FunctionHeader_MissingReturnType) {
- auto p = parser("fn main() ->");
- auto f = p->function_header();
- EXPECT_FALSE(f.matched);
- EXPECT_TRUE(f.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: unable to determine function return type");
+ auto p = parser("fn main() ->");
+ auto f = p->function_header();
+ EXPECT_FALSE(f.matched);
+ EXPECT_TRUE(f.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: unable to determine function return type");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_constant_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_constant_decl_test.cc
index bbf53239ae8..7a5bb452d75 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_constant_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_constant_decl_test.cc
@@ -19,173 +19,171 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, GlobalConstantDecl) {
- auto p = parser("let a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(e->is_const);
- EXPECT_FALSE(e->is_overridable);
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- ASSERT_NE(e->type, nullptr);
- EXPECT_TRUE(e->type->Is<ast::F32>());
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 5u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 6u);
-
- ASSERT_NE(e->constructor, nullptr);
- EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
+ auto p = parser("let a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(e->is_const);
+ EXPECT_FALSE(e->is_overridable);
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_NE(e->type, nullptr);
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 5u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 6u);
+
+ ASSERT_NE(e->constructor, nullptr);
+ EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
}
TEST_F(ParserImplTest, GlobalConstantDecl_Inferred) {
- auto p = parser("let a = 1.");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(e->is_const);
- EXPECT_FALSE(e->is_overridable);
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(e->type, nullptr);
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 5u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 6u);
-
- ASSERT_NE(e->constructor, nullptr);
- EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
+ auto p = parser("let a = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(e->is_const);
+ EXPECT_FALSE(e->is_overridable);
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(e->type, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 5u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 6u);
+
+ ASSERT_NE(e->constructor, nullptr);
+ EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
}
TEST_F(ParserImplTest, GlobalConstantDecl_InvalidExpression) {
- auto p = parser("let a : f32 = if (a) {}");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:15: invalid type for const_expr");
+ auto p = parser("let a : f32 = if (a) {}");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:15: invalid type for const_expr");
}
TEST_F(ParserImplTest, GlobalConstantDecl_MissingExpression) {
- auto p = parser("let a : f32 =");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:14: unable to parse const_expr");
+ auto p = parser("let a : f32 =");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:14: unable to parse const_expr");
}
TEST_F(ParserImplTest, GlobalConstantDec_Override_WithId) {
- auto p = parser("@id(7) override a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
-
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(e->is_const);
- EXPECT_TRUE(e->is_overridable);
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- ASSERT_NE(e->type, nullptr);
- EXPECT_TRUE(e->type->Is<ast::F32>());
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 17u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 18u);
-
- ASSERT_NE(e->constructor, nullptr);
- EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
-
- auto* override_attr =
- ast::GetAttribute<ast::IdAttribute>(e.value->attributes);
- ASSERT_NE(override_attr, nullptr);
- EXPECT_EQ(override_attr->value, 7u);
+ auto p = parser("@id(7) override a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(e->is_const);
+ EXPECT_TRUE(e->is_overridable);
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_NE(e->type, nullptr);
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 17u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 18u);
+
+ ASSERT_NE(e->constructor, nullptr);
+ EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
+
+ auto* override_attr = ast::GetAttribute<ast::IdAttribute>(e.value->attributes);
+ ASSERT_NE(override_attr, nullptr);
+ EXPECT_EQ(override_attr->value, 7u);
}
TEST_F(ParserImplTest, GlobalConstantDec_Override_WithoutId) {
- auto p = parser("override a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
-
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(e->is_const);
- EXPECT_TRUE(e->is_overridable);
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- ASSERT_NE(e->type, nullptr);
- EXPECT_TRUE(e->type->Is<ast::F32>());
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 10u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 11u);
-
- ASSERT_NE(e->constructor, nullptr);
- EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
-
- auto* id_attr = ast::GetAttribute<ast::IdAttribute>(e.value->attributes);
- ASSERT_EQ(id_attr, nullptr);
+ auto p = parser("override a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(e->is_const);
+ EXPECT_TRUE(e->is_overridable);
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_NE(e->type, nullptr);
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 10u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 11u);
+
+ ASSERT_NE(e->constructor, nullptr);
+ EXPECT_TRUE(e->constructor->Is<ast::LiteralExpression>());
+
+ auto* id_attr = ast::GetAttribute<ast::IdAttribute>(e.value->attributes);
+ ASSERT_EQ(id_attr, nullptr);
}
TEST_F(ParserImplTest, GlobalConstantDec_Override_MissingId) {
- auto p = parser("@id() override a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
-
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:5: expected signed integer literal for id attribute");
+ auto p = parser("@id() override a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: expected signed integer literal for id attribute");
}
TEST_F(ParserImplTest, GlobalConstantDec_Override_InvalidId) {
- auto p = parser("@id(-7) override a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
-
- auto e = p->global_constant_decl(attrs.value);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: id attribute must be positive");
+ auto p = parser("@id(-7) override a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+
+ auto e = p->global_constant_decl(attrs.value);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: id attribute must be positive");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_decl_test.cc
index 562a2dee69e..e2c7d7ad6d6 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_decl_test.cc
@@ -18,157 +18,163 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, GlobalDecl_Semicolon) {
- auto p = parser(";");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser(";");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
}
TEST_F(ParserImplTest, GlobalDecl_GlobalVariable) {
- auto p = parser("var<private> a : vec2<i32> = vec2<i32>(1, 2);");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("var<private> a : vec2<i32> = vec2<i32>(1, 2);");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().GlobalVariables().size(), 1u);
+ auto program = p->program();
+ ASSERT_EQ(program.AST().GlobalVariables().size(), 1u);
- auto* v = program.AST().GlobalVariables()[0];
- EXPECT_EQ(v->symbol, program.Symbols().Get("a"));
+ auto* v = program.AST().GlobalVariables()[0];
+ EXPECT_EQ(v->symbol, program.Symbols().Get("a"));
}
TEST_F(ParserImplTest, GlobalDecl_GlobalVariable_Inferred_Invalid) {
- auto p = parser("var<private> a = vec2<i32>(1, 2);");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected ':' for variable declaration");
+ auto p = parser("var<private> a = vec2<i32>(1, 2);");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected ':' for variable declaration");
}
TEST_F(ParserImplTest, GlobalDecl_GlobalVariable_MissingSemicolon) {
- auto p = parser("var<private> a : vec2<i32>");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:27: expected ';' for variable declaration");
+ auto p = parser("var<private> a : vec2<i32>");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:27: expected ';' for variable declaration");
}
TEST_F(ParserImplTest, GlobalDecl_GlobalConstant) {
- auto p = parser("let a : i32 = 2;");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("let a : i32 = 2;");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().GlobalVariables().size(), 1u);
+ auto program = p->program();
+ ASSERT_EQ(program.AST().GlobalVariables().size(), 1u);
- auto* v = program.AST().GlobalVariables()[0];
- EXPECT_EQ(v->symbol, program.Symbols().Get("a"));
+ auto* v = program.AST().GlobalVariables()[0];
+ EXPECT_EQ(v->symbol, program.Symbols().Get("a"));
}
TEST_F(ParserImplTest, GlobalDecl_GlobalConstant_Invalid) {
- auto p = parser("let a : vec2<i32> 1.0;");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:19: expected ';' for let declaration");
+ auto p = parser("let a : vec2<i32> 1.0;");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:19: expected ';' for let declaration");
}
TEST_F(ParserImplTest, GlobalDecl_GlobalConstant_MissingSemicolon) {
- auto p = parser("let a : vec2<i32> = vec2<i32>(1, 2)");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:36: expected ';' for let declaration");
+ auto p = parser("let a : vec2<i32> = vec2<i32>(1, 2)");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:36: expected ';' for let declaration");
}
TEST_F(ParserImplTest, GlobalDecl_TypeAlias) {
- auto p = parser("type A = i32;");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("type A = i32;");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().TypeDecls().size(), 1u);
- ASSERT_TRUE(program.AST().TypeDecls()[0]->Is<ast::Alias>());
- EXPECT_EQ(program.Symbols().NameFor(
- program.AST().TypeDecls()[0]->As<ast::Alias>()->name),
- "A");
+ auto program = p->program();
+ ASSERT_EQ(program.AST().TypeDecls().size(), 1u);
+ ASSERT_TRUE(program.AST().TypeDecls()[0]->Is<ast::Alias>());
+ EXPECT_EQ(program.Symbols().NameFor(program.AST().TypeDecls()[0]->As<ast::Alias>()->name), "A");
}
TEST_F(ParserImplTest, GlobalDecl_TypeAlias_StructIdent) {
- auto p = parser(R"(struct A {
+ auto p = parser(R"(struct A {
a : f32,
}
type B = A;)");
- p->expect_global_decl();
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ p->global_decl();
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().TypeDecls().size(), 2u);
- ASSERT_TRUE(program.AST().TypeDecls()[0]->Is<ast::Struct>());
- auto* str = program.AST().TypeDecls()[0]->As<ast::Struct>();
- EXPECT_EQ(str->name, program.Symbols().Get("A"));
+ auto program = p->program();
+ ASSERT_EQ(program.AST().TypeDecls().size(), 2u);
+ ASSERT_TRUE(program.AST().TypeDecls()[0]->Is<ast::Struct>());
+ auto* str = program.AST().TypeDecls()[0]->As<ast::Struct>();
+ EXPECT_EQ(str->name, program.Symbols().Get("A"));
- ASSERT_TRUE(program.AST().TypeDecls()[1]->Is<ast::Alias>());
- auto* alias = program.AST().TypeDecls()[1]->As<ast::Alias>();
- EXPECT_EQ(alias->name, program.Symbols().Get("B"));
- auto* tn = alias->type->As<ast::TypeName>();
- EXPECT_NE(tn, nullptr);
- EXPECT_EQ(tn->name, str->name);
+ ASSERT_TRUE(program.AST().TypeDecls()[1]->Is<ast::Alias>());
+ auto* alias = program.AST().TypeDecls()[1]->As<ast::Alias>();
+ EXPECT_EQ(alias->name, program.Symbols().Get("B"));
+ auto* tn = alias->type->As<ast::TypeName>();
+ EXPECT_NE(tn, nullptr);
+ EXPECT_EQ(tn->name, str->name);
}
TEST_F(ParserImplTest, GlobalDecl_TypeAlias_MissingSemicolon) {
- auto p = parser("type A = i32");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: expected ';' for type alias");
+ auto p = parser("type A = i32");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: expected ';' for type alias");
}
TEST_F(ParserImplTest, GlobalDecl_Function) {
- auto p = parser("fn main() { return; }");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("fn main() { return; }");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().Functions().size(), 1u);
- EXPECT_EQ(program.Symbols().NameFor(program.AST().Functions()[0]->symbol),
- "main");
+ auto program = p->program();
+ ASSERT_EQ(program.AST().Functions().size(), 1u);
+ EXPECT_EQ(program.Symbols().NameFor(program.AST().Functions()[0]->symbol), "main");
}
TEST_F(ParserImplTest, GlobalDecl_Function_WithAttribute) {
- auto p = parser("@workgroup_size(2) fn main() { return; }");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("@workgroup_size(2) fn main() { return; }");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().Functions().size(), 1u);
- EXPECT_EQ(program.Symbols().NameFor(program.AST().Functions()[0]->symbol),
- "main");
+ auto program = p->program();
+ ASSERT_EQ(program.AST().Functions().size(), 1u);
+ EXPECT_EQ(program.Symbols().NameFor(program.AST().Functions()[0]->symbol), "main");
}
TEST_F(ParserImplTest, GlobalDecl_Function_Invalid) {
- auto p = parser("fn main() -> { return; }");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: unable to determine function return type");
+ auto p = parser("fn main() -> { return; }");
+ p->global_decl();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: unable to determine function return type");
}
TEST_F(ParserImplTest, GlobalDecl_ParsesStruct) {
- auto p = parser("struct A { b: i32, c: f32}");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("struct A { b: i32, c: f32}");
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto program = p->program();
- ASSERT_EQ(program.AST().TypeDecls().size(), 1u);
+ auto program = p->program();
+ ASSERT_EQ(program.AST().TypeDecls().size(), 1u);
- auto* t = program.AST().TypeDecls()[0];
- ASSERT_NE(t, nullptr);
- ASSERT_TRUE(t->Is<ast::Struct>());
+ auto* t = program.AST().TypeDecls()[0];
+ ASSERT_NE(t, nullptr);
+ ASSERT_TRUE(t->Is<ast::Struct>());
- auto* str = t->As<ast::Struct>();
- EXPECT_EQ(str->name, program.Symbols().Get("A"));
- EXPECT_EQ(str->members.size(), 2u);
+ auto* str = t->As<ast::Struct>();
+ EXPECT_EQ(str->name, program.Symbols().Get("A"));
+ EXPECT_EQ(str->members.size(), 2u);
}
TEST_F(ParserImplTest, GlobalDecl_Struct_Invalid) {
- auto p = parser("A {}");
- p->expect_global_decl();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:1: unexpected token");
+ {
+ auto p = parser("A {}");
+ auto decl = p->global_decl();
+ // global_decl will result in a no match.
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_TRUE(!decl.matched && !decl.errored);
+ }
+ {
+ auto p = parser("A {}");
+ p->translation_unit();
+ // translation_unit will result in a general error.
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:1: unexpected token");
+ }
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_variable_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_variable_decl_test.cc
index 6dd8f74788c..57f90b95734 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_variable_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_global_variable_decl_test.cc
@@ -18,151 +18,150 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, GlobalVariableDecl_WithoutConstructor) {
- auto p = parser("var<private> a : f32");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_variable_decl(attrs.value);
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- EXPECT_TRUE(e->type->Is<ast::F32>());
- EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kPrivate);
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 14u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 15u);
-
- ASSERT_EQ(e->constructor, nullptr);
+ auto p = parser("var<private> a : f32");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_variable_decl(attrs.value);
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+ EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kPrivate);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 14u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 15u);
+
+ ASSERT_EQ(e->constructor, nullptr);
}
TEST_F(ParserImplTest, GlobalVariableDecl_WithConstructor) {
- auto p = parser("var<private> a : f32 = 1.");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_variable_decl(attrs.value);
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- EXPECT_TRUE(e->type->Is<ast::F32>());
- EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kPrivate);
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 14u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 15u);
-
- ASSERT_NE(e->constructor, nullptr);
- ASSERT_TRUE(e->constructor->Is<ast::FloatLiteralExpression>());
+ auto p = parser("var<private> a : f32 = 1.");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_variable_decl(attrs.value);
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+ EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kPrivate);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 14u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 15u);
+
+ ASSERT_NE(e->constructor, nullptr);
+ ASSERT_TRUE(e->constructor->Is<ast::FloatLiteralExpression>());
}
TEST_F(ParserImplTest, GlobalVariableDecl_WithAttribute) {
- auto p = parser("@binding(2) @group(1) var<uniform> a : f32");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
- auto e = p->global_variable_decl(attrs.value);
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- ASSERT_NE(e->type, nullptr);
- EXPECT_TRUE(e->type->Is<ast::F32>());
- EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kUniform);
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 36u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 37u);
-
- ASSERT_EQ(e->constructor, nullptr);
-
- auto& attributes = e->attributes;
- ASSERT_EQ(attributes.size(), 2u);
- ASSERT_TRUE(attributes[0]->Is<ast::BindingAttribute>());
- ASSERT_TRUE(attributes[1]->Is<ast::GroupAttribute>());
+ auto p = parser("@binding(2) @group(1) var<uniform> a : f32");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+ auto e = p->global_variable_decl(attrs.value);
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_NE(e->type, nullptr);
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+ EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kUniform);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 36u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 37u);
+
+ ASSERT_EQ(e->constructor, nullptr);
+
+ auto& attributes = e->attributes;
+ ASSERT_EQ(attributes.size(), 2u);
+ ASSERT_TRUE(attributes[0]->Is<ast::BindingAttribute>());
+ ASSERT_TRUE(attributes[1]->Is<ast::GroupAttribute>());
}
TEST_F(ParserImplTest, GlobalVariableDecl_WithAttribute_MulitpleGroups) {
- auto p = parser("@binding(2) @group(1) var<uniform> a : f32");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
-
- auto e = p->global_variable_decl(attrs.value);
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
- ASSERT_NE(e->type, nullptr);
- EXPECT_TRUE(e->type->Is<ast::F32>());
- EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kUniform);
-
- EXPECT_EQ(e->source.range.begin.line, 1u);
- EXPECT_EQ(e->source.range.begin.column, 36u);
- EXPECT_EQ(e->source.range.end.line, 1u);
- EXPECT_EQ(e->source.range.end.column, 37u);
-
- ASSERT_EQ(e->constructor, nullptr);
-
- auto& attributes = e->attributes;
- ASSERT_EQ(attributes.size(), 2u);
- ASSERT_TRUE(attributes[0]->Is<ast::BindingAttribute>());
- ASSERT_TRUE(attributes[1]->Is<ast::GroupAttribute>());
+ auto p = parser("@binding(2) @group(1) var<uniform> a : f32");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+
+ auto e = p->global_variable_decl(attrs.value);
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_NE(e->type, nullptr);
+ EXPECT_TRUE(e->type->Is<ast::F32>());
+ EXPECT_EQ(e->declared_storage_class, ast::StorageClass::kUniform);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 36u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 37u);
+
+ ASSERT_EQ(e->constructor, nullptr);
+
+ auto& attributes = e->attributes;
+ ASSERT_EQ(attributes.size(), 2u);
+ ASSERT_TRUE(attributes[0]->Is<ast::BindingAttribute>());
+ ASSERT_TRUE(attributes[1]->Is<ast::GroupAttribute>());
}
TEST_F(ParserImplTest, GlobalVariableDecl_InvalidAttribute) {
- auto p = parser("@binding() var<uniform> a : f32");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
-
- auto e = p->global_variable_decl(attrs.value);
- EXPECT_FALSE(e.errored);
- EXPECT_TRUE(e.matched);
- EXPECT_NE(e.value, nullptr);
-
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:10: expected signed integer literal for binding attribute");
+ auto p = parser("@binding() var<uniform> a : f32");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+
+ auto e = p->global_variable_decl(attrs.value);
+ EXPECT_FALSE(e.errored);
+ EXPECT_TRUE(e.matched);
+ EXPECT_NE(e.value, nullptr);
+
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected signed integer literal for binding attribute");
}
TEST_F(ParserImplTest, GlobalVariableDecl_InvalidConstExpr) {
- auto p = parser("var<private> a : f32 = if (a) {}");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_variable_decl(attrs.value);
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:24: invalid type for const_expr");
+ auto p = parser("var<private> a : f32 = if (a) {}");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_variable_decl(attrs.value);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:24: invalid type for const_expr");
}
TEST_F(ParserImplTest, GlobalVariableDecl_InvalidVariableDecl) {
- auto p = parser("var<invalid> a : f32;");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- auto e = p->global_variable_decl(attrs.value);
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:5: invalid storage class for variable declaration");
+ auto p = parser("var<invalid> a : f32;");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ auto e = p->global_variable_decl(attrs.value);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:5: invalid storage class for variable declaration");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_if_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_if_stmt_test.cc
index 18c30e9453a..b9e5566d61a 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_if_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_if_stmt_test.cc
@@ -18,124 +18,124 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, IfStmt) {
- auto p = parser("if a == 4 { a = b; c = d; }");
- auto e = p->if_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::IfStatement>());
- ASSERT_NE(e->condition, nullptr);
- ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
- EXPECT_EQ(e->body->statements.size(), 2u);
- EXPECT_EQ(e->else_statements.size(), 0u);
+ auto p = parser("if a == 4 { a = b; c = d; }");
+ auto e = p->if_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::IfStatement>());
+ ASSERT_NE(e->condition, nullptr);
+ ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
+ EXPECT_EQ(e->body->statements.size(), 2u);
+ EXPECT_EQ(e->else_statement, nullptr);
}
TEST_F(ParserImplTest, IfStmt_WithElse) {
- auto p = parser("if a == 4 { a = b; c = d; } else if(c) { d = 2; } else {}");
- auto e = p->if_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::IfStatement>());
- ASSERT_NE(e->condition, nullptr);
- ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
- EXPECT_EQ(e->body->statements.size(), 2u);
-
- ASSERT_EQ(e->else_statements.size(), 2u);
- ASSERT_NE(e->else_statements[0]->condition, nullptr);
- ASSERT_TRUE(
- e->else_statements[0]->condition->Is<ast::IdentifierExpression>());
- EXPECT_EQ(e->else_statements[0]->body->statements.size(), 1u);
-
- ASSERT_EQ(e->else_statements[1]->condition, nullptr);
- EXPECT_EQ(e->else_statements[1]->body->statements.size(), 0u);
+ auto p = parser("if a == 4 { a = b; c = d; } else if(c) { d = 2; } else {}");
+ auto e = p->if_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::IfStatement>());
+ ASSERT_NE(e->condition, nullptr);
+ ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
+ EXPECT_EQ(e->body->statements.size(), 2u);
+
+ auto* elseif = As<ast::IfStatement>(e->else_statement);
+ ASSERT_NE(elseif, nullptr);
+ ASSERT_TRUE(elseif->condition->Is<ast::IdentifierExpression>());
+ EXPECT_EQ(elseif->body->statements.size(), 1u);
+
+ auto* el = As<ast::BlockStatement>(elseif->else_statement);
+ ASSERT_NE(el, nullptr);
+ EXPECT_EQ(el->statements.size(), 0u);
}
TEST_F(ParserImplTest, IfStmt_WithElse_WithParens) {
- auto p = parser("if(a==4) { a = b; c = d; } else if(c) { d = 2; } else {}");
- auto e = p->if_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::IfStatement>());
- ASSERT_NE(e->condition, nullptr);
- ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
- EXPECT_EQ(e->body->statements.size(), 2u);
-
- ASSERT_EQ(e->else_statements.size(), 2u);
- ASSERT_NE(e->else_statements[0]->condition, nullptr);
- ASSERT_TRUE(
- e->else_statements[0]->condition->Is<ast::IdentifierExpression>());
- EXPECT_EQ(e->else_statements[0]->body->statements.size(), 1u);
-
- ASSERT_EQ(e->else_statements[1]->condition, nullptr);
- EXPECT_EQ(e->else_statements[1]->body->statements.size(), 0u);
+ auto p = parser("if(a==4) { a = b; c = d; } else if(c) { d = 2; } else {}");
+ auto e = p->if_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::IfStatement>());
+ ASSERT_NE(e->condition, nullptr);
+ ASSERT_TRUE(e->condition->Is<ast::BinaryExpression>());
+ EXPECT_EQ(e->body->statements.size(), 2u);
+
+ auto* elseif = As<ast::IfStatement>(e->else_statement);
+ ASSERT_NE(elseif, nullptr);
+ ASSERT_TRUE(elseif->condition->Is<ast::IdentifierExpression>());
+ EXPECT_EQ(elseif->body->statements.size(), 1u);
+
+ auto* el = As<ast::BlockStatement>(elseif->else_statement);
+ ASSERT_NE(el, nullptr);
+ EXPECT_EQ(el->statements.size(), 0u);
}
TEST_F(ParserImplTest, IfStmt_InvalidCondition) {
- auto p = parser("if a = 3 {}");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: expected '{'");
+ auto p = parser("if a = 3 {}");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: expected '{'");
}
TEST_F(ParserImplTest, IfStmt_MissingCondition) {
- auto p = parser("if {}");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:4: unable to parse condition expression");
+ auto p = parser("if {}");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:4: unable to parse condition expression");
}
TEST_F(ParserImplTest, IfStmt_InvalidBody) {
- auto p = parser("if a { fn main() {}}");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected '}'");
+ auto p = parser("if a { fn main() {}}");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected '}'");
}
TEST_F(ParserImplTest, IfStmt_MissingBody) {
- auto p = parser("if a");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: expected '{'");
+ auto p = parser("if a");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: expected '{'");
}
TEST_F(ParserImplTest, IfStmt_InvalidElseif) {
- auto p = parser("if a {} else if a { fn main() -> a{}}");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:21: expected '}'");
+ auto p = parser("if a {} else if a { fn main() -> a{}}");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:21: expected '}'");
}
TEST_F(ParserImplTest, IfStmt_InvalidElse) {
- auto p = parser("if a {} else { fn main() -> a{}}");
- auto e = p->if_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected '}'");
+ auto p = parser("if a {} else { fn main() -> a{}}");
+ auto e = p->if_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected '}'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_inclusive_or_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_inclusive_or_expression_test.cc
index d8b52099e10..f534ff79914 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_inclusive_or_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_inclusive_or_expression_test.cc
@@ -18,52 +18,57 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, InclusiveOrExpression_Parses) {
- auto p = parser("a | true");
- auto e = p->inclusive_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a | true");
+ auto e = p->inclusive_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kOr, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kOr, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, InclusiveOrExpression_InvalidLHS) {
- auto p = parser("if (a) {} | true");
- auto e = p->inclusive_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} | true");
+ auto e = p->inclusive_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, InclusiveOrExpression_InvalidRHS) {
- auto p = parser("true | if (a) {}");
- auto e = p->inclusive_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of | expression");
+ auto p = parser("true | if (a) {}");
+ auto e = p->inclusive_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of | expression");
}
TEST_F(ParserImplTest, InclusiveOrExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->inclusive_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->inclusive_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_increment_decrement_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_increment_decrement_stmt_test.cc
index 18f69a188dd..80417353295 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_increment_decrement_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_increment_decrement_stmt_test.cc
@@ -18,114 +18,114 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, IncrementDecrementStmt_Increment) {
- auto p = parser("a++");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* i = e->As<ast::IncrementDecrementStatement>();
- ASSERT_NE(i, nullptr);
- ASSERT_NE(i->lhs, nullptr);
-
- ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
- auto* ident = i->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- EXPECT_TRUE(i->increment);
+ auto p = parser("a++");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* i = e->As<ast::IncrementDecrementStatement>();
+ ASSERT_NE(i, nullptr);
+ ASSERT_NE(i->lhs, nullptr);
+
+ ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = i->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ EXPECT_TRUE(i->increment);
}
TEST_F(ParserImplTest, IncrementDecrementStmt_Decrement) {
- auto p = parser("a--");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* i = e->As<ast::IncrementDecrementStatement>();
- ASSERT_NE(i, nullptr);
- ASSERT_NE(i->lhs, nullptr);
-
- ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
- auto* ident = i->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- EXPECT_FALSE(i->increment);
+ auto p = parser("a--");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* i = e->As<ast::IncrementDecrementStatement>();
+ ASSERT_NE(i, nullptr);
+ ASSERT_NE(i->lhs, nullptr);
+
+ ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = i->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ EXPECT_FALSE(i->increment);
}
TEST_F(ParserImplTest, IncrementDecrementStmt_Parenthesized) {
- auto p = parser("(a)++");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* i = e->As<ast::IncrementDecrementStatement>();
- ASSERT_NE(i, nullptr);
- ASSERT_NE(i->lhs, nullptr);
-
- ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
- auto* ident = i->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- EXPECT_TRUE(i->increment);
+ auto p = parser("(a)++");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* i = e->As<ast::IncrementDecrementStatement>();
+ ASSERT_NE(i, nullptr);
+ ASSERT_NE(i->lhs, nullptr);
+
+ ASSERT_TRUE(i->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = i->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ EXPECT_TRUE(i->increment);
}
TEST_F(ParserImplTest, IncrementDecrementStmt_ToMember) {
- auto p = parser("a.b.c[2].d++");
- auto e = p->assignment_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- auto* i = e->As<ast::IncrementDecrementStatement>();
- ASSERT_NE(i, nullptr);
- ASSERT_NE(i->lhs, nullptr);
- EXPECT_TRUE(i->increment);
-
- ASSERT_TRUE(i->lhs->Is<ast::MemberAccessorExpression>());
- auto* mem = i->lhs->As<ast::MemberAccessorExpression>();
-
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- auto* ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("d"));
-
- ASSERT_TRUE(mem->structure->Is<ast::IndexAccessorExpression>());
- auto* idx = mem->structure->As<ast::IndexAccessorExpression>();
-
- ASSERT_NE(idx->index, nullptr);
- ASSERT_TRUE(idx->index->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(idx->index->As<ast::SintLiteralExpression>()->value, 2);
-
- ASSERT_TRUE(idx->object->Is<ast::MemberAccessorExpression>());
- mem = idx->object->As<ast::MemberAccessorExpression>();
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("c"));
-
- ASSERT_TRUE(mem->structure->Is<ast::MemberAccessorExpression>());
- mem = mem->structure->As<ast::MemberAccessorExpression>();
-
- ASSERT_TRUE(mem->structure->Is<ast::IdentifierExpression>());
- ident = mem->structure->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
- ident = mem->member->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("b"));
+ auto p = parser("a.b.c[2].d++");
+ auto e = p->assignment_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ auto* i = e->As<ast::IncrementDecrementStatement>();
+ ASSERT_NE(i, nullptr);
+ ASSERT_NE(i->lhs, nullptr);
+ EXPECT_TRUE(i->increment);
+
+ ASSERT_TRUE(i->lhs->Is<ast::MemberAccessorExpression>());
+ auto* mem = i->lhs->As<ast::MemberAccessorExpression>();
+
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ auto* ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("d"));
+
+ ASSERT_TRUE(mem->structure->Is<ast::IndexAccessorExpression>());
+ auto* idx = mem->structure->As<ast::IndexAccessorExpression>();
+
+ ASSERT_NE(idx->index, nullptr);
+ ASSERT_TRUE(idx->index->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(idx->index->As<ast::IntLiteralExpression>()->value, 2);
+
+ ASSERT_TRUE(idx->object->Is<ast::MemberAccessorExpression>());
+ mem = idx->object->As<ast::MemberAccessorExpression>();
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("c"));
+
+ ASSERT_TRUE(mem->structure->Is<ast::MemberAccessorExpression>());
+ mem = mem->structure->As<ast::MemberAccessorExpression>();
+
+ ASSERT_TRUE(mem->structure->Is<ast::IdentifierExpression>());
+ ident = mem->structure->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(mem->member->Is<ast::IdentifierExpression>());
+ ident = mem->member->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("b"));
}
TEST_F(ParserImplTest, IncrementDecrementStmt_InvalidLHS) {
- auto p = parser("{}++");
- auto e = p->assignment_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("{}++");
+ auto e = p->assignment_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_and_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_and_expression_test.cc
index ae4cfe890d9..8baadaf7cf7 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_and_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_and_expression_test.cc
@@ -18,52 +18,57 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, LogicalAndExpression_Parses) {
- auto p = parser("a && true");
- auto e = p->logical_and_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a && true");
+ auto e = p->logical_and_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kLogicalAnd, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kLogicalAnd, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, LogicalAndExpression_InvalidLHS) {
- auto p = parser("if (a) {} && true");
- auto e = p->logical_and_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} && true");
+ auto e = p->logical_and_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, LogicalAndExpression_InvalidRHS) {
- auto p = parser("true && if (a) {}");
- auto e = p->logical_and_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: unable to parse right side of && expression");
+ auto p = parser("true && if (a) {}");
+ auto e = p->logical_and_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: unable to parse right side of && expression");
}
TEST_F(ParserImplTest, LogicalAndExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->logical_and_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->logical_and_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_or_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_or_expression_test.cc
index 6af8355b8bf..943b059321f 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_or_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_logical_or_expression_test.cc
@@ -18,52 +18,57 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, LogicalOrExpression_Parses) {
- auto p = parser("a || true");
- auto e = p->logical_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a || true");
+ auto e = p->logical_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kLogicalOr, rel->op);
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kLogicalOr, rel->op);
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, LogicalOrExpression_InvalidLHS) {
- auto p = parser("if (a) {} || true");
- auto e = p->logical_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} || true");
+ auto e = p->logical_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, LogicalOrExpression_InvalidRHS) {
- auto p = parser("true || if (a) {}");
- auto e = p->logical_or_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: unable to parse right side of || expression");
+ auto p = parser("true || if (a) {}");
+ auto e = p->logical_or_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: unable to parse right side of || expression");
}
TEST_F(ParserImplTest, LogicalOrExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->logical_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->logical_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_loop_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_loop_stmt_test.cc
index 6070cca5f53..20cd4f67703 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_loop_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_loop_stmt_test.cc
@@ -19,95 +19,95 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, LoopStmt_BodyNoContinuing) {
- auto p = parser("loop { discard; }");
- auto e = p->loop_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("loop { discard; }");
+ auto e = p->loop_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::DiscardStatement>());
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::DiscardStatement>());
- EXPECT_EQ(e->continuing->statements.size(), 0u);
+ EXPECT_EQ(e->continuing->statements.size(), 0u);
}
TEST_F(ParserImplTest, LoopStmt_BodyWithContinuing) {
- auto p = parser("loop { discard; continuing { discard; }}");
- auto e = p->loop_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("loop { discard; continuing { discard; }}");
+ auto e = p->loop_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::DiscardStatement>());
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::DiscardStatement>());
- EXPECT_EQ(e->continuing->statements.size(), 1u);
- EXPECT_TRUE(e->continuing->statements[0]->Is<ast::DiscardStatement>());
+ EXPECT_EQ(e->continuing->statements.size(), 1u);
+ EXPECT_TRUE(e->continuing->statements[0]->Is<ast::DiscardStatement>());
}
TEST_F(ParserImplTest, LoopStmt_NoBodyNoContinuing) {
- auto p = parser("loop { }");
- auto e = p->loop_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_EQ(e->body->statements.size(), 0u);
- ASSERT_EQ(e->continuing->statements.size(), 0u);
+ auto p = parser("loop { }");
+ auto e = p->loop_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_EQ(e->body->statements.size(), 0u);
+ ASSERT_EQ(e->continuing->statements.size(), 0u);
}
TEST_F(ParserImplTest, LoopStmt_NoBodyWithContinuing) {
- auto p = parser("loop { continuing { discard; }}");
- auto e = p->loop_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_EQ(e->body->statements.size(), 0u);
- ASSERT_EQ(e->continuing->statements.size(), 1u);
- EXPECT_TRUE(e->continuing->statements[0]->Is<ast::DiscardStatement>());
+ auto p = parser("loop { continuing { discard; }}");
+ auto e = p->loop_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_EQ(e->body->statements.size(), 0u);
+ ASSERT_EQ(e->continuing->statements.size(), 1u);
+ EXPECT_TRUE(e->continuing->statements[0]->Is<ast::DiscardStatement>());
}
TEST_F(ParserImplTest, LoopStmt_MissingBracketLeft) {
- auto p = parser("loop discard; }");
- auto e = p->loop_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: expected '{' for loop");
+ auto p = parser("loop discard; }");
+ auto e = p->loop_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: expected '{' for loop");
}
TEST_F(ParserImplTest, LoopStmt_MissingBracketRight) {
- auto p = parser("loop { discard; ");
- auto e = p->loop_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:17: expected '}' for loop");
+ auto p = parser("loop { discard; ");
+ auto e = p->loop_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:17: expected '}' for loop");
}
TEST_F(ParserImplTest, LoopStmt_InvalidStatements) {
- auto p = parser("loop { discard }");
- auto e = p->loop_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:16: expected ';' for discard statement");
+ auto p = parser("loop { discard }");
+ auto e = p->loop_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:16: expected ';' for discard statement");
}
TEST_F(ParserImplTest, LoopStmt_InvalidContinuing) {
- auto p = parser("loop { continuing { discard }}");
- auto e = p->loop_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:29: expected ';' for discard statement");
+ auto p = parser("loop { continuing { discard }}");
+ auto e = p->loop_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:29: expected ';' for discard statement");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_multiplicative_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_multiplicative_expression_test.cc
index 56273c96239..28ac568d19d 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_multiplicative_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_multiplicative_expression_test.cc
@@ -18,92 +18,97 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, MultiplicativeExpression_Parses_Multiply) {
- auto p = parser("a * true");
- auto e = p->multiplicative_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kMultiply, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a * true");
+ auto e = p->multiplicative_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kMultiply, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, MultiplicativeExpression_Parses_Divide) {
- auto p = parser("a / true");
- auto e = p->multiplicative_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kDivide, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a / true");
+ auto e = p->multiplicative_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kDivide, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, MultiplicativeExpression_Parses_Modulo) {
- auto p = parser("a % true");
- auto e = p->multiplicative_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kModulo, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a % true");
+ auto e = p->multiplicative_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kModulo, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, MultiplicativeExpression_InvalidLHS) {
- auto p = parser("if (a) {} * true");
- auto e = p->multiplicative_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} * true");
+ auto e = p->multiplicative_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, MultiplicativeExpression_InvalidRHS) {
- auto p = parser("true * if (a) {}");
- auto e = p->multiplicative_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of * expression");
+ auto p = parser("true * if (a) {}");
+ auto e = p->multiplicative_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of * expression");
}
TEST_F(ParserImplTest, MultiplicativeExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->multiplicative_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->multiplicative_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_param_list_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_param_list_test.cc
index d4d7b9c0fd7..2d79a99da15 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_param_list_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_param_list_test.cc
@@ -18,114 +18,112 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ParamList_Single) {
- auto p = parser("a : i32");
+ auto p = parser("a : i32");
- auto e = p->expect_param_list();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- EXPECT_EQ(e.value.size(), 1u);
+ auto e = p->expect_param_list();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ EXPECT_EQ(e.value.size(), 1u);
- EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("a"));
- EXPECT_TRUE(e.value[0]->type->Is<ast::I32>());
- EXPECT_TRUE(e.value[0]->is_const);
+ EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_TRUE(e.value[0]->type->Is<ast::I32>());
+ EXPECT_TRUE(e.value[0]->is_const);
- ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.begin.column, 1u);
- ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.end.column, 2u);
+ ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.begin.column, 1u);
+ ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.end.column, 2u);
}
TEST_F(ParserImplTest, ParamList_Multiple) {
- auto p = parser("a : i32, b: f32, c: vec2<f32>");
-
- auto e = p->expect_param_list();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- EXPECT_EQ(e.value.size(), 3u);
-
- EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("a"));
- EXPECT_TRUE(e.value[0]->type->Is<ast::I32>());
- EXPECT_TRUE(e.value[0]->is_const);
-
- ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.begin.column, 1u);
- ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.end.column, 2u);
-
- EXPECT_EQ(e.value[1]->symbol, p->builder().Symbols().Get("b"));
- EXPECT_TRUE(e.value[1]->type->Is<ast::F32>());
- EXPECT_TRUE(e.value[1]->is_const);
-
- ASSERT_EQ(e.value[1]->source.range.begin.line, 1u);
- ASSERT_EQ(e.value[1]->source.range.begin.column, 10u);
- ASSERT_EQ(e.value[1]->source.range.end.line, 1u);
- ASSERT_EQ(e.value[1]->source.range.end.column, 11u);
-
- EXPECT_EQ(e.value[2]->symbol, p->builder().Symbols().Get("c"));
- ASSERT_TRUE(e.value[2]->type->Is<ast::Vector>());
- ASSERT_TRUE(e.value[2]->type->As<ast::Vector>()->type->Is<ast::F32>());
- EXPECT_EQ(e.value[2]->type->As<ast::Vector>()->width, 2u);
- EXPECT_TRUE(e.value[2]->is_const);
-
- ASSERT_EQ(e.value[2]->source.range.begin.line, 1u);
- ASSERT_EQ(e.value[2]->source.range.begin.column, 18u);
- ASSERT_EQ(e.value[2]->source.range.end.line, 1u);
- ASSERT_EQ(e.value[2]->source.range.end.column, 19u);
+ auto p = parser("a : i32, b: f32, c: vec2<f32>");
+
+ auto e = p->expect_param_list();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ EXPECT_EQ(e.value.size(), 3u);
+
+ EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_TRUE(e.value[0]->type->Is<ast::I32>());
+ EXPECT_TRUE(e.value[0]->is_const);
+
+ ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.begin.column, 1u);
+ ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.end.column, 2u);
+
+ EXPECT_EQ(e.value[1]->symbol, p->builder().Symbols().Get("b"));
+ EXPECT_TRUE(e.value[1]->type->Is<ast::F32>());
+ EXPECT_TRUE(e.value[1]->is_const);
+
+ ASSERT_EQ(e.value[1]->source.range.begin.line, 1u);
+ ASSERT_EQ(e.value[1]->source.range.begin.column, 10u);
+ ASSERT_EQ(e.value[1]->source.range.end.line, 1u);
+ ASSERT_EQ(e.value[1]->source.range.end.column, 11u);
+
+ EXPECT_EQ(e.value[2]->symbol, p->builder().Symbols().Get("c"));
+ ASSERT_TRUE(e.value[2]->type->Is<ast::Vector>());
+ ASSERT_TRUE(e.value[2]->type->As<ast::Vector>()->type->Is<ast::F32>());
+ EXPECT_EQ(e.value[2]->type->As<ast::Vector>()->width, 2u);
+ EXPECT_TRUE(e.value[2]->is_const);
+
+ ASSERT_EQ(e.value[2]->source.range.begin.line, 1u);
+ ASSERT_EQ(e.value[2]->source.range.begin.column, 18u);
+ ASSERT_EQ(e.value[2]->source.range.end.line, 1u);
+ ASSERT_EQ(e.value[2]->source.range.end.column, 19u);
}
TEST_F(ParserImplTest, ParamList_Empty) {
- auto p = parser("");
- auto e = p->expect_param_list();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(e.errored);
- EXPECT_EQ(e.value.size(), 0u);
+ auto p = parser("");
+ auto e = p->expect_param_list();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(e.errored);
+ EXPECT_EQ(e.value.size(), 0u);
}
TEST_F(ParserImplTest, ParamList_TrailingComma) {
- auto p = parser("a : i32,");
- auto e = p->expect_param_list();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(e.errored);
- EXPECT_EQ(e.value.size(), 1u);
+ auto p = parser("a : i32,");
+ auto e = p->expect_param_list();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(e.errored);
+ EXPECT_EQ(e.value.size(), 1u);
}
TEST_F(ParserImplTest, ParamList_Attributes) {
- auto p =
- parser("@builtin(position) coord : vec4<f32>, @location(1) loc1 : f32");
-
- auto e = p->expect_param_list();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_EQ(e.value.size(), 2u);
-
- EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("coord"));
- ASSERT_TRUE(e.value[0]->type->Is<ast::Vector>());
- EXPECT_TRUE(e.value[0]->type->As<ast::Vector>()->type->Is<ast::F32>());
- EXPECT_EQ(e.value[0]->type->As<ast::Vector>()->width, 4u);
- EXPECT_TRUE(e.value[0]->is_const);
- auto attrs_0 = e.value[0]->attributes;
- ASSERT_EQ(attrs_0.size(), 1u);
- EXPECT_TRUE(attrs_0[0]->Is<ast::BuiltinAttribute>());
- EXPECT_EQ(attrs_0[0]->As<ast::BuiltinAttribute>()->builtin,
- ast::Builtin::kPosition);
-
- ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.begin.column, 20u);
- ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
- ASSERT_EQ(e.value[0]->source.range.end.column, 25u);
-
- EXPECT_EQ(e.value[1]->symbol, p->builder().Symbols().Get("loc1"));
- EXPECT_TRUE(e.value[1]->type->Is<ast::F32>());
- EXPECT_TRUE(e.value[1]->is_const);
- auto attrs_1 = e.value[1]->attributes;
- ASSERT_EQ(attrs_1.size(), 1u);
- EXPECT_TRUE(attrs_1[0]->Is<ast::LocationAttribute>());
- EXPECT_EQ(attrs_1[0]->As<ast::LocationAttribute>()->value, 1u);
-
- EXPECT_EQ(e.value[1]->source.range.begin.line, 1u);
- EXPECT_EQ(e.value[1]->source.range.begin.column, 52u);
- EXPECT_EQ(e.value[1]->source.range.end.line, 1u);
- EXPECT_EQ(e.value[1]->source.range.end.column, 56u);
+ auto p = parser("@builtin(position) coord : vec4<f32>, @location(1) loc1 : f32");
+
+ auto e = p->expect_param_list();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_EQ(e.value.size(), 2u);
+
+ EXPECT_EQ(e.value[0]->symbol, p->builder().Symbols().Get("coord"));
+ ASSERT_TRUE(e.value[0]->type->Is<ast::Vector>());
+ EXPECT_TRUE(e.value[0]->type->As<ast::Vector>()->type->Is<ast::F32>());
+ EXPECT_EQ(e.value[0]->type->As<ast::Vector>()->width, 4u);
+ EXPECT_TRUE(e.value[0]->is_const);
+ auto attrs_0 = e.value[0]->attributes;
+ ASSERT_EQ(attrs_0.size(), 1u);
+ EXPECT_TRUE(attrs_0[0]->Is<ast::BuiltinAttribute>());
+ EXPECT_EQ(attrs_0[0]->As<ast::BuiltinAttribute>()->builtin, ast::Builtin::kPosition);
+
+ ASSERT_EQ(e.value[0]->source.range.begin.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.begin.column, 20u);
+ ASSERT_EQ(e.value[0]->source.range.end.line, 1u);
+ ASSERT_EQ(e.value[0]->source.range.end.column, 25u);
+
+ EXPECT_EQ(e.value[1]->symbol, p->builder().Symbols().Get("loc1"));
+ EXPECT_TRUE(e.value[1]->type->Is<ast::F32>());
+ EXPECT_TRUE(e.value[1]->is_const);
+ auto attrs_1 = e.value[1]->attributes;
+ ASSERT_EQ(attrs_1.size(), 1u);
+ EXPECT_TRUE(attrs_1[0]->Is<ast::LocationAttribute>());
+ EXPECT_EQ(attrs_1[0]->As<ast::LocationAttribute>()->value, 1u);
+
+ EXPECT_EQ(e.value[1]->source.range.begin.line, 1u);
+ EXPECT_EQ(e.value[1]->source.range.begin.column, 52u);
+ EXPECT_EQ(e.value[1]->source.range.end.line, 1u);
+ EXPECT_EQ(e.value[1]->source.range.end.column, 56u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_paren_rhs_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_paren_rhs_stmt_test.cc
index 214afe7c94e..dd5a978bbe9 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_paren_rhs_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_paren_rhs_stmt_test.cc
@@ -18,48 +18,48 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ParenRhsStmt) {
- auto p = parser("(a + b)");
- auto e = p->expect_paren_rhs_stmt();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto p = parser("(a + b)");
+ auto e = p->expect_paren_rhs_stmt();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, ParenRhsStmt_MissingLeftParen) {
- auto p = parser("true)");
- auto e = p->expect_paren_rhs_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:1: expected '('");
+ auto p = parser("true)");
+ auto e = p->expect_paren_rhs_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:1: expected '('");
}
TEST_F(ParserImplTest, ParenRhsStmt_MissingRightParen) {
- auto p = parser("(true");
- auto e = p->expect_paren_rhs_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected ')'");
+ auto p = parser("(true");
+ auto e = p->expect_paren_rhs_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected ')'");
}
TEST_F(ParserImplTest, ParenRhsStmt_InvalidExpression) {
- auto p = parser("(if (a() {})");
- auto e = p->expect_paren_rhs_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:2: unable to parse expression");
+ auto p = parser("(if (a() {})");
+ auto e = p->expect_paren_rhs_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:2: unable to parse expression");
}
TEST_F(ParserImplTest, ParenRhsStmt_MissingExpression) {
- auto p = parser("()");
- auto e = p->expect_paren_rhs_stmt();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(e.errored);
- ASSERT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:2: unable to parse expression");
+ auto p = parser("()");
+ auto e = p->expect_paren_rhs_stmt();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(e.errored);
+ ASSERT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:2: unable to parse expression");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_pipeline_stage_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_pipeline_stage_test.cc
index c0bce43d6d1..75a1ec200a5 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_pipeline_stage_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_pipeline_stage_test.cc
@@ -18,45 +18,44 @@ namespace tint::reader::wgsl {
namespace {
struct PipelineStageData {
- std::string input;
- ast::PipelineStage result;
+ std::string input;
+ ast::PipelineStage result;
};
inline std::ostream& operator<<(std::ostream& out, PipelineStageData data) {
- return out << data.input;
+ return out << data.input;
}
class PipelineStageTest : public ParserImplTestWithParam<PipelineStageData> {};
TEST_P(PipelineStageTest, Parses) {
- auto params = GetParam();
- auto p = parser(params.input);
-
- auto stage = p->expect_pipeline_stage();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(stage.errored);
- EXPECT_EQ(stage.value, params.result);
- EXPECT_EQ(stage.source.range.begin.line, 1u);
- EXPECT_EQ(stage.source.range.begin.column, 1u);
- EXPECT_EQ(stage.source.range.end.line, 1u);
- EXPECT_EQ(stage.source.range.end.column, 1u + params.input.size());
-
- auto t = p->next();
- EXPECT_TRUE(t.IsEof());
+ auto params = GetParam();
+ auto p = parser(params.input);
+
+ auto stage = p->expect_pipeline_stage();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(stage.errored);
+ EXPECT_EQ(stage.value, params.result);
+ EXPECT_EQ(stage.source.range.begin.line, 1u);
+ EXPECT_EQ(stage.source.range.begin.column, 1u);
+ EXPECT_EQ(stage.source.range.end.line, 1u);
+ EXPECT_EQ(stage.source.range.end.column, 1u + params.input.size());
+
+ auto t = p->next();
+ EXPECT_TRUE(t.IsEof());
}
INSTANTIATE_TEST_SUITE_P(
ParserImplTest,
PipelineStageTest,
- testing::Values(
- PipelineStageData{"vertex", ast::PipelineStage::kVertex},
- PipelineStageData{"fragment", ast::PipelineStage::kFragment},
- PipelineStageData{"compute", ast::PipelineStage::kCompute}));
+ testing::Values(PipelineStageData{"vertex", ast::PipelineStage::kVertex},
+ PipelineStageData{"fragment", ast::PipelineStage::kFragment},
+ PipelineStageData{"compute", ast::PipelineStage::kCompute}));
TEST_F(ParserImplTest, PipelineStage_NoMatch) {
- auto p = parser("not-a-stage");
- auto stage = p->expect_pipeline_stage();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(stage.errored);
- ASSERT_EQ(p->error(), "1:1: invalid value for stage attribute");
+ auto p = parser("not-a-stage");
+ auto stage = p->expect_pipeline_stage();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(stage.errored);
+ ASSERT_EQ(p->error(), "1:1: invalid value for stage attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_primary_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_primary_expression_test.cc
index c81fdd98542..493ce878a15 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_primary_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_primary_expression_test.cc
@@ -19,294 +19,304 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, PrimaryExpression_Ident) {
- auto p = parser("a");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
- auto* ident = e->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ auto p = parser("a");
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto* ident = e->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl) {
- auto p = parser("vec4<i32>(1, 2, 3, 4))");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* call = e->As<ast::CallExpression>();
-
- EXPECT_NE(call->target.type, nullptr);
-
- ASSERT_EQ(call->args.size(), 4u);
- const auto& val = call->args;
- ASSERT_TRUE(val[0]->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(val[0]->As<ast::SintLiteralExpression>()->value, 1);
-
- ASSERT_TRUE(val[1]->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(val[1]->As<ast::SintLiteralExpression>()->value, 2);
-
- ASSERT_TRUE(val[2]->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(val[2]->As<ast::SintLiteralExpression>()->value, 3);
-
- ASSERT_TRUE(val[3]->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(val[3]->As<ast::SintLiteralExpression>()->value, 4);
+ auto p = parser("vec4<i32>(1, 2, 3, 4))");
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* call = e->As<ast::CallExpression>();
+
+ EXPECT_NE(call->target.type, nullptr);
+
+ ASSERT_EQ(call->args.size(), 4u);
+ const auto& val = call->args;
+ ASSERT_TRUE(val[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(val[0]->As<ast::IntLiteralExpression>()->value, 1);
+ EXPECT_EQ(val[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(val[1]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(val[1]->As<ast::IntLiteralExpression>()->value, 2);
+ EXPECT_EQ(val[1]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(val[2]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(val[2]->As<ast::IntLiteralExpression>()->value, 3);
+ EXPECT_EQ(val[2]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
+
+ ASSERT_TRUE(val[3]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(val[3]->As<ast::IntLiteralExpression>()->value, 4);
+ EXPECT_EQ(val[3]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_ZeroConstructor) {
- auto p = parser("vec4<i32>()");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("vec4<i32>()");
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* call = e->As<ast::CallExpression>();
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* call = e->As<ast::CallExpression>();
- ASSERT_EQ(call->args.size(), 0u);
+ ASSERT_EQ(call->args.size(), 0u);
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_InvalidTypeDecl) {
- auto p = parser("vec4<if>(2., 3., 4., 5.)");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: invalid type for vector");
+ auto p = parser("vec4<if>(2., 3., 4., 5.)");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: invalid type for vector");
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_MissingLeftParen) {
- auto p = parser("vec4<f32> 2., 3., 4., 5.)");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:11: expected '(' for type constructor");
+ auto p = parser("vec4<f32> 2., 3., 4., 5.)");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:11: expected '(' for type constructor");
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_MissingRightParen) {
- auto p = parser("vec4<f32>(2., 3., 4., 5.");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:25: expected ')' for type constructor");
+ auto p = parser("vec4<f32>(2., 3., 4., 5.");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:25: expected ')' for type constructor");
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_InvalidValue) {
- auto p = parser("i32(if(a) {})");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: expected ')' for type constructor");
+ auto p = parser("i32(if(a) {})");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: expected ')' for type constructor");
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_StructConstructor_Empty) {
- auto p = parser(R"(
+ auto p = parser(R"(
struct S { a : i32, b : f32, }
S()
)");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* call = e->As<ast::CallExpression>();
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* call = e->As<ast::CallExpression>();
- ASSERT_NE(call->target.name, nullptr);
- EXPECT_EQ(call->target.name->symbol, p->builder().Symbols().Get("S"));
+ ASSERT_NE(call->target.name, nullptr);
+ EXPECT_EQ(call->target.name->symbol, p->builder().Symbols().Get("S"));
- ASSERT_EQ(call->args.size(), 0u);
+ ASSERT_EQ(call->args.size(), 0u);
}
TEST_F(ParserImplTest, PrimaryExpression_TypeDecl_StructConstructor_NotEmpty) {
- auto p = parser(R"(
+ auto p = parser(R"(
struct S { a : i32, b : f32, }
S(1u, 2.0)
)");
- p->expect_global_decl();
- ASSERT_FALSE(p->has_error()) << p->error();
+ p->global_decl();
+ ASSERT_FALSE(p->has_error()) << p->error();
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* call = e->As<ast::CallExpression>();
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* call = e->As<ast::CallExpression>();
- ASSERT_NE(call->target.name, nullptr);
- EXPECT_EQ(call->target.name->symbol, p->builder().Symbols().Get("S"));
+ ASSERT_NE(call->target.name, nullptr);
+ EXPECT_EQ(call->target.name->symbol, p->builder().Symbols().Get("S"));
- ASSERT_EQ(call->args.size(), 2u);
+ ASSERT_EQ(call->args.size(), 2u);
- ASSERT_TRUE(call->args[0]->Is<ast::UintLiteralExpression>());
- EXPECT_EQ(call->args[0]->As<ast::UintLiteralExpression>()->value, 1u);
+ ASSERT_TRUE(call->args[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(call->args[0]->As<ast::IntLiteralExpression>()->value, 1u);
+ EXPECT_EQ(call->args[0]->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kU);
- ASSERT_TRUE(call->args[1]->Is<ast::FloatLiteralExpression>());
- EXPECT_EQ(call->args[1]->As<ast::FloatLiteralExpression>()->value, 2.f);
+ ASSERT_TRUE(call->args[1]->Is<ast::FloatLiteralExpression>());
+ EXPECT_EQ(call->args[1]->As<ast::FloatLiteralExpression>()->value, 2.f);
}
TEST_F(ParserImplTest, PrimaryExpression_ConstLiteral_True) {
- auto p = parser("true");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BoolLiteralExpression>());
- EXPECT_TRUE(e->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("true");
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::BoolLiteralExpression>());
+ EXPECT_TRUE(e->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, PrimaryExpression_ParenExpr) {
- auto p = parser("(a == b)");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto p = parser("(a == b)");
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, PrimaryExpression_ParenExpr_MissingRightParen) {
- auto p = parser("(a == b");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected ')'");
+ auto p = parser("(a == b");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected ')'");
}
TEST_F(ParserImplTest, PrimaryExpression_ParenExpr_MissingExpr) {
- auto p = parser("()");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:2: unable to parse expression");
+ auto p = parser("()");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:2: unable to parse expression");
}
TEST_F(ParserImplTest, PrimaryExpression_ParenExpr_InvalidExpr) {
- auto p = parser("(if (a) {})");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:2: unable to parse expression");
+ auto p = parser("(if (a) {})");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:2: unable to parse expression");
}
TEST_F(ParserImplTest, PrimaryExpression_Cast) {
- auto p = parser("f32(1)");
+ auto p = parser("f32(1)");
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* call = e->As<ast::CallExpression>();
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* call = e->As<ast::CallExpression>();
- ASSERT_TRUE(call->target.type->Is<ast::F32>());
- ASSERT_EQ(call->args.size(), 1u);
+ ASSERT_TRUE(call->target.type->Is<ast::F32>());
+ ASSERT_EQ(call->args.size(), 1u);
- ASSERT_TRUE(call->args[0]->Is<ast::IntLiteralExpression>());
+ ASSERT_TRUE(call->args[0]->Is<ast::IntLiteralExpression>());
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast) {
- auto p = parser("bitcast<f32>(1)");
-
- auto e = p->primary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::BitcastExpression>());
-
- auto* c = e->As<ast::BitcastExpression>();
- ASSERT_TRUE(c->type->Is<ast::F32>());
- ASSERT_TRUE(c->expr->Is<ast::IntLiteralExpression>());
+ auto p = parser("bitcast<f32>(1)");
+
+ auto e = p->primary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::BitcastExpression>());
+
+ auto* c = e->As<ast::BitcastExpression>();
+ ASSERT_TRUE(c->type->Is<ast::F32>());
+ ASSERT_TRUE(c->expr->Is<ast::IntLiteralExpression>());
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast_MissingGreaterThan) {
- auto p = parser("bitcast<f32(1)");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:12: expected '>' for bitcast expression");
+ auto p = parser("bitcast<f32(1)");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:12: expected '>' for bitcast expression");
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast_MissingType) {
- auto p = parser("bitcast<>(1)");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: invalid type for bitcast expression");
+ auto p = parser("bitcast<>(1)");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: invalid type for bitcast expression");
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast_MissingLeftParen) {
- auto p = parser("bitcast<f32>1)");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: expected '('");
+ auto p = parser("bitcast<f32>1)");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: expected '('");
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast_MissingRightParen) {
- auto p = parser("bitcast<f32>(1");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:15: expected ')'");
+ auto p = parser("bitcast<f32>(1");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:15: expected ')'");
}
TEST_F(ParserImplTest, PrimaryExpression_Bitcast_MissingExpression) {
- auto p = parser("bitcast<f32>()");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: unable to parse expression");
+ auto p = parser("bitcast<f32>()");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: unable to parse expression");
}
TEST_F(ParserImplTest, PrimaryExpression_bitcast_InvalidExpression) {
- auto p = parser("bitcast<f32>(if (a) {})");
- auto e = p->primary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: unable to parse expression");
+ auto p = parser("bitcast<f32>(if (a) {})");
+ auto e = p->primary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: unable to parse expression");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_relational_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_relational_expression_test.cc
index f071731d0c7..7ad161f94bc 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_relational_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_relational_expression_test.cc
@@ -18,110 +18,130 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, RelationalExpression_Parses_LessThan) {
- auto p = parser("a < true");
- auto e = p->relational_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kLessThan, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a < true");
+ auto e = p->relational_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kLessThan, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, RelationalExpression_Parses_GreaterThan) {
- auto p = parser("a > true");
- auto e = p->relational_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kGreaterThan, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a > true");
+ auto e = p->relational_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 4u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kGreaterThan, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, RelationalExpression_Parses_LessThanEqual) {
- auto p = parser("a <= true");
- auto e = p->relational_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kLessThanEqual, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a <= true");
+ auto e = p->relational_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kLessThanEqual, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, RelationalExpression_Parses_GreaterThanEqual) {
- auto p = parser("a >= true");
- auto e = p->relational_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kGreaterThanEqual, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a >= true");
+ auto e = p->relational_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kGreaterThanEqual, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Register("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, RelationalExpression_InvalidLHS) {
- auto p = parser("if (a) {} < true");
- auto e = p->relational_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} < true");
+ auto e = p->relational_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, RelationalExpression_InvalidRHS) {
- auto p = parser("true < if (a) {}");
- auto e = p->relational_expression();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:8: unable to parse right side of < expression");
+ auto p = parser("true < if (a) {}");
+ auto e = p->relational_expression();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:8: unable to parse right side of < expression");
}
TEST_F(ParserImplTest, RelationalExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->relational_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->relational_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc
index 7ce03c2e753..e840af7c0fe 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc
@@ -19,67 +19,67 @@ namespace {
using ParserImplReservedKeywordTest = ParserImplTestWithParam<std::string>;
TEST_P(ParserImplReservedKeywordTest, Function) {
- auto name = GetParam();
- auto p = parser("fn " + name + "() {}");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:4: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("fn " + name + "() {}");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:4: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, ModuleLet) {
- auto name = GetParam();
- auto p = parser("let " + name + " : i32 = 1;");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("let " + name + " : i32 = 1;");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, ModuleVar) {
- auto name = GetParam();
- auto p = parser("var " + name + " : i32 = 1;");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("var " + name + " : i32 = 1;");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, FunctionLet) {
- auto name = GetParam();
- auto p = parser("fn f() { let " + name + " : i32 = 1; }");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("fn f() { let " + name + " : i32 = 1; }");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, FunctionVar) {
- auto name = GetParam();
- auto p = parser("fn f() { var " + name + " : i32 = 1; }");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("fn f() { var " + name + " : i32 = 1; }");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, FunctionParam) {
- auto name = GetParam();
- auto p = parser("fn f(" + name + " : i32) {}");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("fn f(" + name + " : i32) {}");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, Struct) {
- auto name = GetParam();
- auto p = parser("struct " + name + " {};");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("struct " + name + " {};");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, StructMember) {
- auto name = GetParam();
- auto p = parser("struct S { " + name + " : i32, };");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:12: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("struct S { " + name + " : i32, };");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:12: '" + name + "' is a reserved keyword");
}
TEST_P(ParserImplReservedKeywordTest, Alias) {
- auto name = GetParam();
- auto p = parser("type " + name + " = i32;");
- EXPECT_FALSE(p->Parse());
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: '" + name + "' is a reserved keyword");
+ auto name = GetParam();
+ auto p = parser("type " + name + " = i32;");
+ EXPECT_FALSE(p->Parse());
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: '" + name + "' is a reserved keyword");
}
INSTANTIATE_TEST_SUITE_P(ParserImplReservedKeywordTest,
ParserImplReservedKeywordTest,
@@ -88,7 +88,6 @@ INSTANTIATE_TEST_SUITE_P(ParserImplReservedKeywordTest,
"const",
"do",
"enum",
- "f16",
"f64",
"handle",
"i8",
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_test.cc
new file mode 100644
index 00000000000..cf9f0898a87
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_test.cc
@@ -0,0 +1,83 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
+
+namespace tint::reader::wgsl {
+namespace {
+
+TEST_F(ParserImplTest, SampledTextureType_Invalid) {
+ auto p = parser("1234");
+ auto t = p->sampled_texture();
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_1d) {
+ auto p = parser("texture_1d");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k1d);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_2d) {
+ auto p = parser("texture_2d");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k2d);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_2dArray) {
+ auto p = parser("texture_2d_array");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k2dArray);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_3d) {
+ auto p = parser("texture_3d");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k3d);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_Cube) {
+ auto p = parser("texture_cube");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::kCube);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, SampledTextureType_kCubeArray) {
+ auto p = parser("texture_cube_array");
+ auto t = p->sampled_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::kCubeArray);
+ EXPECT_FALSE(p->has_error());
+}
+
+} // namespace
+} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_type_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_type_test.cc
deleted file mode 100644
index a48c32ce53a..00000000000
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampled_texture_type_test.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-
-namespace tint::reader::wgsl {
-namespace {
-
-TEST_F(ParserImplTest, SampledTextureType_Invalid) {
- auto p = parser("1234");
- auto t = p->sampled_texture_type();
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_1d) {
- auto p = parser("texture_1d");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k1d);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_2d) {
- auto p = parser("texture_2d");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k2d);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_2dArray) {
- auto p = parser("texture_2d_array");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k2dArray);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_3d) {
- auto p = parser("texture_3d");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k3d);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_Cube) {
- auto p = parser("texture_cube");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::kCube);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, SampledTextureType_kCubeArray) {
- auto p = parser("texture_cube_array");
- auto t = p->sampled_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::kCubeArray);
- EXPECT_FALSE(p->has_error());
-}
-
-} // namespace
-} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_type_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_test.cc
index 230d58b2dcf..7f1e5645b8e 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_sampler_test.cc
@@ -18,36 +18,36 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, SamplerType_Invalid) {
- auto p = parser("1234");
- auto t = p->sampler_type();
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("1234");
+ auto t = p->sampler();
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, SamplerType_Sampler) {
- auto p = parser("sampler");
- auto t = p->sampler_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Sampler>());
- EXPECT_FALSE(t->As<ast::Sampler>()->IsComparison());
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
+ auto p = parser("sampler");
+ auto t = p->sampler();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Sampler>());
+ EXPECT_FALSE(t->As<ast::Sampler>()->IsComparison());
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
}
TEST_F(ParserImplTest, SamplerType_ComparisonSampler) {
- auto p = parser("sampler_comparison");
- auto t = p->sampler_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Sampler>());
- EXPECT_TRUE(t->As<ast::Sampler>()->IsComparison());
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
+ auto p = parser("sampler_comparison");
+ auto t = p->sampler();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Sampler>());
+ EXPECT_TRUE(t->As<ast::Sampler>()->IsComparison());
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_shift_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_shift_expression_test.cc
index 62a06ab68d5..83c1255e9d3 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_shift_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_shift_expression_test.cc
@@ -18,90 +18,100 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ShiftExpression_Parses_ShiftLeft) {
- auto p = parser("a << true");
- auto e = p->shift_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kShiftLeft, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a << true");
+ auto e = p->shift_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kShiftLeft, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, ShiftExpression_Parses_ShiftRight) {
- auto p = parser("a >> true");
- auto e = p->shift_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::BinaryExpression>());
- auto* rel = e->As<ast::BinaryExpression>();
- EXPECT_EQ(ast::BinaryOp::kShiftRight, rel->op);
-
- ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
- auto* ident = rel->lhs->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
- ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
+ auto p = parser("a >> true");
+ auto e = p->shift_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ EXPECT_EQ(e->source.range.begin.line, 1u);
+ EXPECT_EQ(e->source.range.begin.column, 3u);
+ EXPECT_EQ(e->source.range.end.line, 1u);
+ EXPECT_EQ(e->source.range.end.column, 5u);
+
+ ASSERT_TRUE(e->Is<ast::BinaryExpression>());
+ auto* rel = e->As<ast::BinaryExpression>();
+ EXPECT_EQ(ast::BinaryOp::kShiftRight, rel->op);
+
+ ASSERT_TRUE(rel->lhs->Is<ast::IdentifierExpression>());
+ auto* ident = rel->lhs->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(rel->rhs->Is<ast::BoolLiteralExpression>());
+ ASSERT_TRUE(rel->rhs->As<ast::BoolLiteralExpression>()->value);
}
TEST_F(ParserImplTest, ShiftExpression_InvalidSpaceLeft) {
- auto p = parser("a < < true");
- auto e = p->shift_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- EXPECT_FALSE(e.value->Is<ast::BinaryExpression>());
+ auto p = parser("a < < true");
+ auto e = p->shift_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_FALSE(e.value->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, ShiftExpression_InvalidSpaceRight) {
- auto p = parser("a > > true");
- auto e = p->shift_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- EXPECT_FALSE(e.value->Is<ast::BinaryExpression>());
+ auto p = parser("a > > true");
+ auto e = p->shift_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_FALSE(e.value->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, ShiftExpression_InvalidLHS) {
- auto p = parser("if (a) {} << true");
- auto e = p->shift_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_EQ(e.value, nullptr);
+ auto p = parser("if (a) {} << true");
+ auto e = p->shift_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_EQ(e.value, nullptr);
}
TEST_F(ParserImplTest, ShiftExpression_InvalidRHS) {
- auto p = parser("true << if (a) {}");
- auto e = p->shift_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:9: unable to parse right side of << expression");
+ auto p = parser("true << if (a) {}");
+ auto e = p->shift_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:9: unable to parse right side of << expression");
}
TEST_F(ParserImplTest, ShiftExpression_NoOr_ReturnsLHS) {
- auto p = parser("a true");
- auto e = p->shift_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a true");
+ auto e = p->shift_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_singular_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_singular_expression_test.cc
index 2d6565245aa..4ab185bb6e7 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_singular_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_singular_expression_test.cc
@@ -18,217 +18,214 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, SingularExpression_Array_ConstantIndex) {
- auto p = parser("a[1]");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
- auto* idx = e->As<ast::IndexAccessorExpression>();
-
- ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
- auto* ident = idx->object->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(idx->index->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(idx->index->As<ast::SintLiteralExpression>()->value, 1);
+ auto p = parser("a[1]");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
+ auto* idx = e->As<ast::IndexAccessorExpression>();
+
+ ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
+ auto* ident = idx->object->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(idx->index->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(idx->index->As<ast::IntLiteralExpression>()->value, 1);
+ EXPECT_EQ(idx->index->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, SingularExpression_Array_ExpressionIndex) {
- auto p = parser("a[1 + b / 4]");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a[1 + b / 4]");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
- auto* idx = e->As<ast::IndexAccessorExpression>();
+ ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
+ auto* idx = e->As<ast::IndexAccessorExpression>();
- ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
- auto* ident = idx->object->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+ ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
+ auto* ident = idx->object->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
- ASSERT_TRUE(idx->index->Is<ast::BinaryExpression>());
+ ASSERT_TRUE(idx->index->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, SingularExpression_Array_MissingIndex) {
- auto p = parser("a[]");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: unable to parse expression inside []");
+ auto p = parser("a[]");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: unable to parse expression inside []");
}
TEST_F(ParserImplTest, SingularExpression_Array_MissingRightBrace) {
- auto p = parser("a[1");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:4: expected ']' for index accessor");
+ auto p = parser("a[1");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:4: expected ']' for index accessor");
}
TEST_F(ParserImplTest, SingularExpression_Array_InvalidIndex) {
- auto p = parser("a[if(a() {})]");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: unable to parse expression inside []");
+ auto p = parser("a[if(a() {})]");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: unable to parse expression inside []");
}
TEST_F(ParserImplTest, SingularExpression_Call_Empty) {
- auto p = parser("a()");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
+ auto p = parser("a()");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* c = e->As<ast::CallExpression>();
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* c = e->As<ast::CallExpression>();
- EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
+ EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("a"));
- EXPECT_EQ(c->args.size(), 0u);
+ EXPECT_EQ(c->args.size(), 0u);
}
TEST_F(ParserImplTest, SingularExpression_Call_WithArgs) {
- auto p = parser("test(1, b, 2 + 3 / b)");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* c = e->As<ast::CallExpression>();
-
- EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("test"));
-
- EXPECT_EQ(c->args.size(), 3u);
- EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
- EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
- EXPECT_TRUE(c->args[2]->Is<ast::BinaryExpression>());
+ auto p = parser("test(1, b, 2 + 3 / b)");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* c = e->As<ast::CallExpression>();
+
+ EXPECT_EQ(c->target.name->symbol, p->builder().Symbols().Get("test"));
+
+ EXPECT_EQ(c->args.size(), 3u);
+ EXPECT_TRUE(c->args[0]->Is<ast::IntLiteralExpression>());
+ EXPECT_TRUE(c->args[1]->Is<ast::IdentifierExpression>());
+ EXPECT_TRUE(c->args[2]->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, SingularExpression_Call_TrailingComma) {
- auto p = parser("a(b, )");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::CallExpression>());
- auto* c = e->As<ast::CallExpression>();
- EXPECT_EQ(c->args.size(), 1u);
+ auto p = parser("a(b, )");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::CallExpression>());
+ auto* c = e->As<ast::CallExpression>();
+ EXPECT_EQ(c->args.size(), 1u);
}
TEST_F(ParserImplTest, SingularExpression_Call_InvalidArg) {
- auto p = parser("a(if(a) {})");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
+ auto p = parser("a(if(a) {})");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
}
TEST_F(ParserImplTest, SingularExpression_Call_MissingRightParen) {
- auto p = parser("a(");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
+ auto p = parser("a(");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: expected ')' for function call");
}
TEST_F(ParserImplTest, SingularExpression_MemberAccessor) {
- auto p = parser("a.b");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::MemberAccessorExpression>());
-
- auto* m = e->As<ast::MemberAccessorExpression>();
- ASSERT_TRUE(m->structure->Is<ast::IdentifierExpression>());
- EXPECT_EQ(m->structure->As<ast::IdentifierExpression>()->symbol,
- p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(m->member->Is<ast::IdentifierExpression>());
- EXPECT_EQ(m->member->As<ast::IdentifierExpression>()->symbol,
- p->builder().Symbols().Get("b"));
+ auto p = parser("a.b");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::MemberAccessorExpression>());
+
+ auto* m = e->As<ast::MemberAccessorExpression>();
+ ASSERT_TRUE(m->structure->Is<ast::IdentifierExpression>());
+ EXPECT_EQ(m->structure->As<ast::IdentifierExpression>()->symbol,
+ p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(m->member->Is<ast::IdentifierExpression>());
+ EXPECT_EQ(m->member->As<ast::IdentifierExpression>()->symbol, p->builder().Symbols().Get("b"));
}
TEST_F(ParserImplTest, SingularExpression_MemberAccesssor_InvalidIdent) {
- auto p = parser("a.if");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: expected identifier for member accessor");
+ auto p = parser("a.if");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: expected identifier for member accessor");
}
TEST_F(ParserImplTest, SingularExpression_MemberAccessor_MissingIdent) {
- auto p = parser("a.");
- auto e = p->singular_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:3: expected identifier for member accessor");
+ auto p = parser("a.");
+ auto e = p->singular_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:3: expected identifier for member accessor");
}
TEST_F(ParserImplTest, SingularExpression_NonMatch_returnLHS) {
- auto p = parser("a b");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
+ auto p = parser("a b");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::IdentifierExpression>());
}
TEST_F(ParserImplTest, SingularExpression_Array_NestedIndexAccessor) {
- auto p = parser("a[b[c]]");
- auto e = p->singular_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- const auto* outer_accessor = e->As<ast::IndexAccessorExpression>();
- ASSERT_TRUE(outer_accessor);
-
- const auto* outer_object =
- outer_accessor->object->As<ast::IdentifierExpression>();
- ASSERT_TRUE(outer_object);
- EXPECT_EQ(outer_object->symbol, p->builder().Symbols().Get("a"));
-
- const auto* inner_accessor =
- outer_accessor->index->As<ast::IndexAccessorExpression>();
- ASSERT_TRUE(inner_accessor);
-
- const auto* inner_object =
- inner_accessor->object->As<ast::IdentifierExpression>();
- ASSERT_TRUE(inner_object);
- EXPECT_EQ(inner_object->symbol, p->builder().Symbols().Get("b"));
-
- const auto* index_expr =
- inner_accessor->index->As<ast::IdentifierExpression>();
- ASSERT_TRUE(index_expr);
- EXPECT_EQ(index_expr->symbol, p->builder().Symbols().Get("c"));
+ auto p = parser("a[b[c]]");
+ auto e = p->singular_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ const auto* outer_accessor = e->As<ast::IndexAccessorExpression>();
+ ASSERT_TRUE(outer_accessor);
+
+ const auto* outer_object = outer_accessor->object->As<ast::IdentifierExpression>();
+ ASSERT_TRUE(outer_object);
+ EXPECT_EQ(outer_object->symbol, p->builder().Symbols().Get("a"));
+
+ const auto* inner_accessor = outer_accessor->index->As<ast::IndexAccessorExpression>();
+ ASSERT_TRUE(inner_accessor);
+
+ const auto* inner_object = inner_accessor->object->As<ast::IdentifierExpression>();
+ ASSERT_TRUE(inner_object);
+ EXPECT_EQ(inner_object->symbol, p->builder().Symbols().Get("b"));
+
+ const auto* index_expr = inner_accessor->index->As<ast::IdentifierExpression>();
+ ASSERT_TRUE(index_expr);
+ EXPECT_EQ(index_expr->symbol, p->builder().Symbols().Get("c"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statement_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statement_test.cc
index d47ca5d2b2c..235c7485486 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statement_test.cc
@@ -21,257 +21,255 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Statement) {
- auto p = parser("return;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::ReturnStatement>());
+ auto p = parser("return;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, Statement_Semicolon) {
- auto p = parser(";");
- p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser(";");
+ p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
}
TEST_F(ParserImplTest, Statement_Return_NoValue) {
- auto p = parser("return;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::ReturnStatement>());
- auto* ret = e->As<ast::ReturnStatement>();
- ASSERT_EQ(ret->value, nullptr);
+ auto p = parser("return;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::ReturnStatement>());
+ auto* ret = e->As<ast::ReturnStatement>();
+ ASSERT_EQ(ret->value, nullptr);
}
TEST_F(ParserImplTest, Statement_Return_Value) {
- auto p = parser("return a + b * (.1 - .2);");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
+ auto p = parser("return a + b * (.1 - .2);");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::ReturnStatement>());
- auto* ret = e->As<ast::ReturnStatement>();
- ASSERT_NE(ret->value, nullptr);
- EXPECT_TRUE(ret->value->Is<ast::BinaryExpression>());
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::ReturnStatement>());
+ auto* ret = e->As<ast::ReturnStatement>();
+ ASSERT_NE(ret->value, nullptr);
+ EXPECT_TRUE(ret->value->Is<ast::BinaryExpression>());
}
TEST_F(ParserImplTest, Statement_Return_MissingSemi) {
- auto p = parser("return");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:7: expected ';' for return statement");
+ auto p = parser("return");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:7: expected ';' for return statement");
}
TEST_F(ParserImplTest, Statement_Return_Invalid) {
- auto p = parser("return if(a) {};");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:8: expected ';' for return statement");
+ auto p = parser("return if(a) {};");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:8: expected ';' for return statement");
}
TEST_F(ParserImplTest, Statement_If) {
- auto p = parser("if (a) {}");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::IfStatement>());
+ auto p = parser("if (a) {}");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::IfStatement>());
}
TEST_F(ParserImplTest, Statement_If_Invalid) {
- auto p = parser("if (a) { fn main() -> {}}");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:10: expected '}'");
+ auto p = parser("if (a) { fn main() -> {}}");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:10: expected '}'");
}
TEST_F(ParserImplTest, Statement_Variable) {
- auto p = parser("var a : i32 = 1;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ auto p = parser("var a : i32 = 1;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
}
TEST_F(ParserImplTest, Statement_Variable_Invalid) {
- auto p = parser("var a : i32 =;");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:14: missing constructor for variable declaration");
+ auto p = parser("var a : i32 =;");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:14: missing constructor for variable declaration");
}
TEST_F(ParserImplTest, Statement_Variable_MissingSemicolon) {
- auto p = parser("var a : i32");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:12: expected ';' for variable declaration");
+ auto p = parser("var a : i32");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:12: expected ';' for variable declaration");
}
TEST_F(ParserImplTest, Statement_Switch) {
- auto p = parser("switch (a) {}");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::SwitchStatement>());
+ auto p = parser("switch (a) {}");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::SwitchStatement>());
}
TEST_F(ParserImplTest, Statement_Switch_Invalid) {
- auto p = parser("switch (a) { case: {}}");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:18: unable to parse case selectors");
+ auto p = parser("switch (a) { case: {}}");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:18: unable to parse case selectors");
}
TEST_F(ParserImplTest, Statement_Loop) {
- auto p = parser("loop {}");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::LoopStatement>());
+ auto p = parser("loop {}");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::LoopStatement>());
}
TEST_F(ParserImplTest, Statement_Loop_Invalid) {
- auto p = parser("loop discard; }");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected '{' for loop");
+ auto p = parser("loop discard; }");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected '{' for loop");
}
TEST_F(ParserImplTest, Statement_Assignment) {
- auto p = parser("a = b;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::AssignmentStatement>());
+ auto p = parser("a = b;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, Statement_Assignment_Invalid) {
- auto p = parser("a = if(b) {};");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:5: unable to parse right side of assignment");
+ auto p = parser("a = if(b) {};");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:5: unable to parse right side of assignment");
}
TEST_F(ParserImplTest, Statement_Assignment_MissingSemicolon) {
- auto p = parser("a = b");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected ';' for assignment statement");
+ auto p = parser("a = b");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected ';' for assignment statement");
}
TEST_F(ParserImplTest, Statement_Break) {
- auto p = parser("break;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::BreakStatement>());
+ auto p = parser("break;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::BreakStatement>());
}
TEST_F(ParserImplTest, Statement_Break_MissingSemicolon) {
- auto p = parser("break");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected ';' for break statement");
+ auto p = parser("break");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected ';' for break statement");
}
TEST_F(ParserImplTest, Statement_Continue) {
- auto p = parser("continue;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::ContinueStatement>());
+ auto p = parser("continue;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::ContinueStatement>());
}
TEST_F(ParserImplTest, Statement_Continue_MissingSemicolon) {
- auto p = parser("continue");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:9: expected ';' for continue statement");
+ auto p = parser("continue");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:9: expected ';' for continue statement");
}
TEST_F(ParserImplTest, Statement_Discard) {
- auto p = parser("discard;");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::DiscardStatement>());
+ auto p = parser("discard;");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::DiscardStatement>());
}
TEST_F(ParserImplTest, Statement_Discard_MissingSemicolon) {
- auto p = parser("discard");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(p->error(), "1:8: expected ';' for discard statement");
+ auto p = parser("discard");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(p->error(), "1:8: expected ';' for discard statement");
}
TEST_F(ParserImplTest, Statement_Body) {
- auto p = parser("{ var i: i32; }");
- auto e = p->statement();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_TRUE(e->Is<ast::BlockStatement>());
- EXPECT_TRUE(e->As<ast::BlockStatement>()
- ->statements[0]
- ->Is<ast::VariableDeclStatement>());
+ auto p = parser("{ var i: i32; }");
+ auto e = p->statement();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_TRUE(e->Is<ast::BlockStatement>());
+ EXPECT_TRUE(e->As<ast::BlockStatement>()->statements[0]->Is<ast::VariableDeclStatement>());
}
TEST_F(ParserImplTest, Statement_Body_Invalid) {
- auto p = parser("{ fn main() -> {}}");
- auto e = p->statement();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:3: expected '}'");
+ auto p = parser("{ fn main() -> {}}");
+ auto e = p->statement();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:3: expected '}'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statements_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statements_test.cc
index 27819749bf6..b96a28a4a16 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statements_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_statements_test.cc
@@ -19,21 +19,21 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Statements) {
- auto p = parser("discard; return;");
- auto e = p->expect_statements();
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e->size(), 2u);
- EXPECT_TRUE(e.value[0]->Is<ast::DiscardStatement>());
- EXPECT_TRUE(e.value[1]->Is<ast::ReturnStatement>());
+ auto p = parser("discard; return;");
+ auto e = p->expect_statements();
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_EQ(e->size(), 2u);
+ EXPECT_TRUE(e.value[0]->Is<ast::DiscardStatement>());
+ EXPECT_TRUE(e.value[1]->Is<ast::ReturnStatement>());
}
TEST_F(ParserImplTest, Statements_Empty) {
- auto p = parser("");
- auto e = p->expect_statements();
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_EQ(e->size(), 0u);
+ auto p = parser("");
+ auto e = p->expect_statements();
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_EQ(e->size(), 0u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_class_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_class_test.cc
index 4d9110fd0d7..4abbe766925 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_class_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_class_test.cc
@@ -18,49 +18,48 @@ namespace tint::reader::wgsl {
namespace {
struct StorageClassData {
- const char* input;
- ast::StorageClass result;
+ const char* input;
+ ast::StorageClass result;
};
inline std::ostream& operator<<(std::ostream& out, StorageClassData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
class StorageClassTest : public ParserImplTestWithParam<StorageClassData> {};
TEST_P(StorageClassTest, Parses) {
- auto params = GetParam();
- auto p = parser(params.input);
+ auto params = GetParam();
+ auto p = parser(params.input);
- auto sc = p->expect_storage_class("test");
- EXPECT_FALSE(sc.errored);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(sc.value, params.result);
+ auto sc = p->expect_storage_class("test");
+ EXPECT_FALSE(sc.errored);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(sc.value, params.result);
- auto t = p->next();
- EXPECT_TRUE(t.IsEof());
+ auto t = p->next();
+ EXPECT_TRUE(t.IsEof());
}
INSTANTIATE_TEST_SUITE_P(
ParserImplTest,
StorageClassTest,
- testing::Values(
- StorageClassData{"uniform", ast::StorageClass::kUniform},
- StorageClassData{"workgroup", ast::StorageClass::kWorkgroup},
- StorageClassData{"storage", ast::StorageClass::kStorage},
- StorageClassData{"storage_buffer", ast::StorageClass::kStorage},
- StorageClassData{"private", ast::StorageClass::kPrivate},
- StorageClassData{"function", ast::StorageClass::kFunction}));
+ testing::Values(StorageClassData{"uniform", ast::StorageClass::kUniform},
+ StorageClassData{"workgroup", ast::StorageClass::kWorkgroup},
+ StorageClassData{"storage", ast::StorageClass::kStorage},
+ StorageClassData{"storage_buffer", ast::StorageClass::kStorage},
+ StorageClassData{"private", ast::StorageClass::kPrivate},
+ StorageClassData{"function", ast::StorageClass::kFunction}));
TEST_F(ParserImplTest, StorageClass_NoMatch) {
- auto p = parser("not-a-storage-class");
- auto sc = p->expect_storage_class("test");
- EXPECT_EQ(sc.errored, true);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:1: invalid storage class for test");
+ auto p = parser("not-a-storage-class");
+ auto sc = p->expect_storage_class("test");
+ EXPECT_EQ(sc.errored, true);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:1: invalid storage class for test");
- auto t = p->next();
- EXPECT_TRUE(t.IsIdentifier());
- EXPECT_EQ(t.to_str(), "not");
+ auto t = p->next();
+ EXPECT_TRUE(t.IsIdentifier());
+ EXPECT_EQ(t.to_str(), "not");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_test.cc
new file mode 100644
index 00000000000..6297a1ef27b
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_test.cc
@@ -0,0 +1,65 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
+
+namespace tint::reader::wgsl {
+namespace {
+
+TEST_F(ParserImplTest, StorageTextureType_Invalid) {
+ auto p = parser("abc");
+ auto t = p->storage_texture();
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, StorageTextureType_1d) {
+ auto p = parser("texture_storage_1d");
+ auto t = p->storage_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k1d);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, StorageTextureType_2d) {
+ auto p = parser("texture_storage_2d");
+ auto t = p->storage_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k2d);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, StorageTextureType_2dArray) {
+ auto p = parser("texture_storage_2d_array");
+ auto t = p->storage_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k2dArray);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, StorageTextureType_3d) {
+ auto p = parser("texture_storage_3d");
+ auto t = p->storage_texture();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TextureDimension::k3d);
+ EXPECT_FALSE(p->has_error());
+}
+
+} // namespace
+} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_type_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_type_test.cc
deleted file mode 100644
index 89911981504..00000000000
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_storage_texture_type_test.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-
-namespace tint::reader::wgsl {
-namespace {
-
-TEST_F(ParserImplTest, StorageTextureType_Invalid) {
- auto p = parser("abc");
- auto t = p->storage_texture_type();
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, StorageTextureType_1d) {
- auto p = parser("texture_storage_1d");
- auto t = p->storage_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k1d);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, StorageTextureType_2d) {
- auto p = parser("texture_storage_2d");
- auto t = p->storage_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k2d);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, StorageTextureType_2dArray) {
- auto p = parser("texture_storage_2d_array");
- auto t = p->storage_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k2dArray);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, StorageTextureType_3d) {
- auto p = parser("texture_storage_3d");
- auto t = p->storage_texture_type();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TextureDimension::k3d);
- EXPECT_FALSE(p->has_error());
-}
-
-} // namespace
-} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc
index aabd841fa13..a22abee1b1c 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc
@@ -19,54 +19,53 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AttributeDecl_Parses) {
- auto p = parser("@invariant");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
- ASSERT_EQ(attrs.value.size(), 1u);
- auto* invariant = attrs.value[0]->As<ast::Attribute>();
- EXPECT_TRUE(invariant->Is<ast::InvariantAttribute>());
+ auto p = parser("@invariant");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+ ASSERT_EQ(attrs.value.size(), 1u);
+ auto* invariant = attrs.value[0]->As<ast::Attribute>();
+ EXPECT_TRUE(invariant->Is<ast::InvariantAttribute>());
}
TEST_F(ParserImplTest, AttributeDecl_MissingParenLeft) {
- auto p = parser("@location 1)");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(), "1:11: expected '(' for location attribute");
+ auto p = parser("@location 1)");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), "1:11: expected '(' for location attribute");
}
TEST_F(ParserImplTest, AttributeDecl_MissingValue) {
- auto p = parser("@location()");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(),
- "1:11: expected signed integer literal for location attribute");
+ auto p = parser("@location()");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), "1:11: expected signed integer literal for location attribute");
}
TEST_F(ParserImplTest, AttributeDecl_MissingParenRight) {
- auto p = parser("@location(1");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(), "1:12: expected ')' for location attribute");
+ auto p = parser("@location(1");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), "1:12: expected ')' for location attribute");
}
TEST_F(ParserImplTest, AttributeDecl_Invalidattribute) {
- auto p = parser("@invalid");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
+ auto p = parser("@invalid");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc
index d17f9669930..b02fb5a177d 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc
@@ -18,87 +18,85 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, StructBodyDecl_Parses) {
- auto p = parser("{a : i32}");
+ auto p = parser("{a : i32}");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_body_decl();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_EQ(m.value.size(), 1u);
+ auto m = p->expect_struct_body_decl();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_EQ(m.value.size(), 1u);
- const auto* mem = m.value[0];
- EXPECT_EQ(mem->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(mem->type->Is<ast::I32>());
- EXPECT_EQ(mem->attributes.size(), 0u);
+ const auto* mem = m.value[0];
+ EXPECT_EQ(mem->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(mem->type->Is<ast::I32>());
+ EXPECT_EQ(mem->attributes.size(), 0u);
}
TEST_F(ParserImplTest, StructBodyDecl_Parses_TrailingComma) {
- auto p = parser("{a : i32,}");
+ auto p = parser("{a : i32,}");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_body_decl();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_EQ(m.value.size(), 1u);
+ auto m = p->expect_struct_body_decl();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_EQ(m.value.size(), 1u);
- const auto* mem = m.value[0];
- EXPECT_EQ(mem->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(mem->type->Is<ast::I32>());
- EXPECT_EQ(mem->attributes.size(), 0u);
+ const auto* mem = m.value[0];
+ EXPECT_EQ(mem->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(mem->type->Is<ast::I32>());
+ EXPECT_EQ(mem->attributes.size(), 0u);
}
TEST_F(ParserImplTest, StructBodyDecl_ParsesEmpty) {
- auto p = parser("{}");
- auto m = p->expect_struct_body_decl();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_EQ(m.value.size(), 0u);
+ auto p = parser("{}");
+ auto m = p->expect_struct_body_decl();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_EQ(m.value.size(), 0u);
}
TEST_F(ParserImplTest, StructBodyDecl_InvalidAlign) {
- auto p = parser(R"(
+ auto p = parser(R"(
{
@align(nan) a : i32,
})");
- auto m = p->expect_struct_body_decl();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(m.errored);
- EXPECT_EQ(p->error(),
- "3:10: expected signed integer literal for align attribute");
+ auto m = p->expect_struct_body_decl();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(m.errored);
+ EXPECT_EQ(p->error(), "3:10: expected signed integer literal for align attribute");
}
TEST_F(ParserImplTest, StructBodyDecl_InvalidSize) {
- auto p = parser(R"(
+ auto p = parser(R"(
{
@size(nan) a : i32,
})");
- auto m = p->expect_struct_body_decl();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(m.errored);
- EXPECT_EQ(p->error(),
- "3:9: expected signed integer literal for size attribute");
+ auto m = p->expect_struct_body_decl();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(m.errored);
+ EXPECT_EQ(p->error(), "3:9: expected signed integer literal for size attribute");
}
TEST_F(ParserImplTest, StructBodyDecl_MissingClosingBracket) {
- auto p = parser("{a : i32,");
- auto m = p->expect_struct_body_decl();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(m.errored);
- EXPECT_EQ(p->error(), "1:10: expected '}' for struct declaration");
+ auto p = parser("{a : i32,");
+ auto m = p->expect_struct_body_decl();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(m.errored);
+ EXPECT_EQ(p->error(), "1:10: expected '}' for struct declaration");
}
TEST_F(ParserImplTest, StructBodyDecl_InvalidToken) {
- auto p = parser(R"(
+ auto p = parser(R"(
{
a : i32,
1.23
} )");
- auto m = p->expect_struct_body_decl();
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(m.errored);
- EXPECT_EQ(p->error(), "4:3: expected '}' for struct declaration");
+ auto m = p->expect_struct_body_decl();
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(m.errored);
+ EXPECT_EQ(p->error(), "4:3: expected '}' for struct declaration");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_decl_test.cc
index be0a4edf5ba..106ffd581bc 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_decl_test.cc
@@ -19,109 +19,107 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, StructDecl_Parses) {
- auto p = parser(R"(
+ auto p = parser(R"(
struct S {
a : i32,
b : f32,
})");
- auto s = p->struct_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(s.errored);
- EXPECT_TRUE(s.matched);
- ASSERT_NE(s.value, nullptr);
- ASSERT_EQ(s->name, p->builder().Symbols().Register("S"));
- ASSERT_EQ(s->members.size(), 2u);
- EXPECT_EQ(s->members[0]->symbol, p->builder().Symbols().Register("a"));
- EXPECT_EQ(s->members[1]->symbol, p->builder().Symbols().Register("b"));
+ auto s = p->struct_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(s.errored);
+ EXPECT_TRUE(s.matched);
+ ASSERT_NE(s.value, nullptr);
+ ASSERT_EQ(s->name, p->builder().Symbols().Register("S"));
+ ASSERT_EQ(s->members.size(), 2u);
+ EXPECT_EQ(s->members[0]->symbol, p->builder().Symbols().Register("a"));
+ EXPECT_EQ(s->members[1]->symbol, p->builder().Symbols().Register("b"));
}
TEST_F(ParserImplTest, StructDecl_Unicode_Parses) {
- const std::string struct_ident = // "𝓼𝓽𝓻𝓾𝓬𝓽𝓾𝓻𝓮"
- "\xf0\x9d\x93\xbc\xf0\x9d\x93\xbd\xf0\x9d\x93\xbb\xf0\x9d\x93\xbe\xf0\x9d"
- "\x93\xac\xf0\x9d\x93\xbd\xf0\x9d\x93\xbe\xf0\x9d\x93\xbb\xf0\x9d\x93"
- "\xae";
- const std::string member_a_ident = // "𝕞𝕖𝕞𝕓𝕖𝕣_𝕒"
- "\xf0\x9d\x95\x9e\xf0\x9d\x95\x96\xf0\x9d\x95\x9e\xf0\x9d\x95\x93\xf0\x9d"
- "\x95\x96\xf0\x9d\x95\xa3\x5f\xf0\x9d\x95\x92";
- const std::string member_b_ident = // "𝔪𝔢𝔪𝔟𝔢𝔯_𝔟"
- "\xf0\x9d\x94\xaa\xf0\x9d\x94\xa2\xf0\x9d\x94\xaa\xf0\x9d\x94\x9f\xf0\x9d"
- "\x94\xa2\xf0\x9d\x94\xaf\x5f\xf0\x9d\x94\x9f";
-
- std::string src = R"(
+ const std::string struct_ident = // "𝓼𝓽𝓻𝓾𝓬𝓽𝓾𝓻𝓮"
+ "\xf0\x9d\x93\xbc\xf0\x9d\x93\xbd\xf0\x9d\x93\xbb\xf0\x9d\x93\xbe\xf0\x9d"
+ "\x93\xac\xf0\x9d\x93\xbd\xf0\x9d\x93\xbe\xf0\x9d\x93\xbb\xf0\x9d\x93"
+ "\xae";
+ const std::string member_a_ident = // "𝕞𝕖𝕞𝕓𝕖𝕣_𝕒"
+ "\xf0\x9d\x95\x9e\xf0\x9d\x95\x96\xf0\x9d\x95\x9e\xf0\x9d\x95\x93\xf0\x9d"
+ "\x95\x96\xf0\x9d\x95\xa3\x5f\xf0\x9d\x95\x92";
+ const std::string member_b_ident = // "𝔪𝔢𝔪𝔟𝔢𝔯_𝔟"
+ "\xf0\x9d\x94\xaa\xf0\x9d\x94\xa2\xf0\x9d\x94\xaa\xf0\x9d\x94\x9f\xf0\x9d"
+ "\x94\xa2\xf0\x9d\x94\xaf\x5f\xf0\x9d\x94\x9f";
+
+ std::string src = R"(
struct $struct {
$member_a : i32,
$member_b : f32,
})";
- src = utils::ReplaceAll(src, "$struct", struct_ident);
- src = utils::ReplaceAll(src, "$member_a", member_a_ident);
- src = utils::ReplaceAll(src, "$member_b", member_b_ident);
-
- auto p = parser(src);
-
- auto s = p->struct_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(s.errored);
- EXPECT_TRUE(s.matched);
- ASSERT_NE(s.value, nullptr);
- ASSERT_EQ(s->name, p->builder().Symbols().Register(struct_ident));
- ASSERT_EQ(s->members.size(), 2u);
- EXPECT_EQ(s->members[0]->symbol,
- p->builder().Symbols().Register(member_a_ident));
- EXPECT_EQ(s->members[1]->symbol,
- p->builder().Symbols().Register(member_b_ident));
+ src = utils::ReplaceAll(src, "$struct", struct_ident);
+ src = utils::ReplaceAll(src, "$member_a", member_a_ident);
+ src = utils::ReplaceAll(src, "$member_b", member_b_ident);
+
+ auto p = parser(src);
+
+ auto s = p->struct_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(s.errored);
+ EXPECT_TRUE(s.matched);
+ ASSERT_NE(s.value, nullptr);
+ ASSERT_EQ(s->name, p->builder().Symbols().Register(struct_ident));
+ ASSERT_EQ(s->members.size(), 2u);
+ EXPECT_EQ(s->members[0]->symbol, p->builder().Symbols().Register(member_a_ident));
+ EXPECT_EQ(s->members[1]->symbol, p->builder().Symbols().Register(member_b_ident));
}
TEST_F(ParserImplTest, StructDecl_EmptyMembers) {
- auto p = parser("struct S {}");
-
- auto s = p->struct_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(s.errored);
- EXPECT_TRUE(s.matched);
- ASSERT_NE(s.value, nullptr);
- ASSERT_EQ(s->members.size(), 0u);
+ auto p = parser("struct S {}");
+
+ auto s = p->struct_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(s.errored);
+ EXPECT_TRUE(s.matched);
+ ASSERT_NE(s.value, nullptr);
+ ASSERT_EQ(s->members.size(), 0u);
}
TEST_F(ParserImplTest, StructDecl_MissingIdent) {
- auto p = parser("struct {}");
+ auto p = parser("struct {}");
- auto s = p->struct_decl();
- EXPECT_TRUE(s.errored);
- EXPECT_FALSE(s.matched);
- EXPECT_EQ(s.value, nullptr);
+ auto s = p->struct_decl();
+ EXPECT_TRUE(s.errored);
+ EXPECT_FALSE(s.matched);
+ EXPECT_EQ(s.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected identifier for struct declaration");
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected identifier for struct declaration");
}
TEST_F(ParserImplTest, StructDecl_MissingBracketLeft) {
- auto p = parser("struct S }");
+ auto p = parser("struct S }");
- auto s = p->struct_decl();
- EXPECT_TRUE(s.errored);
- EXPECT_FALSE(s.matched);
- EXPECT_EQ(s.value, nullptr);
+ auto s = p->struct_decl();
+ EXPECT_TRUE(s.errored);
+ EXPECT_FALSE(s.matched);
+ EXPECT_EQ(s.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:10: expected '{' for struct declaration");
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected '{' for struct declaration");
}
// TODO(crbug.com/tint/1475): Remove this.
TEST_F(ParserImplTest, DEPRECATED_StructDecl_Parses_WithSemicolons) {
- auto p = parser(R"(
+ auto p = parser(R"(
struct S {
a : i32;
b : f32;
})");
- auto s = p->struct_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(s.errored);
- EXPECT_TRUE(s.matched);
- ASSERT_NE(s.value, nullptr);
- ASSERT_EQ(s->name, p->builder().Symbols().Register("S"));
- ASSERT_EQ(s->members.size(), 2u);
- EXPECT_EQ(s->members[0]->symbol, p->builder().Symbols().Register("a"));
- EXPECT_EQ(s->members[1]->symbol, p->builder().Symbols().Register("b"));
+ auto s = p->struct_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(s.errored);
+ EXPECT_TRUE(s.matched);
+ ASSERT_NE(s.value, nullptr);
+ ASSERT_EQ(s->name, p->builder().Symbols().Register("S"));
+ ASSERT_EQ(s->members.size(), 2u);
+ EXPECT_EQ(s->members[0]->symbol, p->builder().Symbols().Register("a"));
+ EXPECT_EQ(s->members[1]->symbol, p->builder().Symbols().Register("b"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_decl_test.cc
index b779f586777..2695074c6c0 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_decl_test.cc
@@ -18,34 +18,33 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AttributeDecl_EmptyStr) {
- auto p = parser("");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_EQ(attrs.value.size(), 0u);
+ auto p = parser("");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_EQ(attrs.value.size(), 0u);
}
TEST_F(ParserImplTest, AttributeDecl_Single) {
- auto p = parser("@size(4)");
- auto attrs = p->attribute_list();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(attrs.errored);
- EXPECT_TRUE(attrs.matched);
- ASSERT_EQ(attrs.value.size(), 1u);
- auto* attr = attrs.value[0]->As<ast::Attribute>();
- ASSERT_NE(attr, nullptr);
- EXPECT_TRUE(attr->Is<ast::StructMemberSizeAttribute>());
+ auto p = parser("@size(4)");
+ auto attrs = p->attribute_list();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(attrs.errored);
+ EXPECT_TRUE(attrs.matched);
+ ASSERT_EQ(attrs.value.size(), 1u);
+ auto* attr = attrs.value[0]->As<ast::Attribute>();
+ ASSERT_NE(attr, nullptr);
+ EXPECT_TRUE(attr->Is<ast::StructMemberSizeAttribute>());
}
TEST_F(ParserImplTest, AttributeDecl_InvalidAttribute) {
- auto p = parser("@size(nan)");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error()) << p->error();
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for size attribute");
+ auto p = parser("@size(nan)");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error()) << p->error();
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for size attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_test.cc
index f180da04090..d185f3736a9 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_attribute_test.cc
@@ -18,119 +18,115 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Attribute_Size) {
- auto p = parser("size(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- ASSERT_FALSE(p->has_error());
-
- auto* member_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(member_attr, nullptr);
- ASSERT_TRUE(member_attr->Is<ast::StructMemberSizeAttribute>());
-
- auto* o = member_attr->As<ast::StructMemberSizeAttribute>();
- EXPECT_EQ(o->size, 4u);
+ auto p = parser("size(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ ASSERT_FALSE(p->has_error());
+
+ auto* member_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(member_attr, nullptr);
+ ASSERT_TRUE(member_attr->Is<ast::StructMemberSizeAttribute>());
+
+ auto* o = member_attr->As<ast::StructMemberSizeAttribute>();
+ EXPECT_EQ(o->size, 4u);
}
TEST_F(ParserImplTest, Attribute_Size_MissingLeftParen) {
- auto p = parser("size 4)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:6: expected '(' for size attribute");
+ auto p = parser("size 4)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: expected '(' for size attribute");
}
TEST_F(ParserImplTest, Attribute_Size_MissingRightParen) {
- auto p = parser("size(4");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: expected ')' for size attribute");
+ auto p = parser("size(4");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected ')' for size attribute");
}
TEST_F(ParserImplTest, Attribute_Size_MissingValue) {
- auto p = parser("size()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:6: expected signed integer literal for size attribute");
+ auto p = parser("size()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: expected signed integer literal for size attribute");
}
TEST_F(ParserImplTest, Attribute_Size_MissingInvalid) {
- auto p = parser("size(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:6: expected signed integer literal for size attribute");
+ auto p = parser("size(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:6: expected signed integer literal for size attribute");
}
TEST_F(ParserImplTest, Attribute_Align) {
- auto p = parser("align(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- ASSERT_FALSE(p->has_error());
-
- auto* member_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(member_attr, nullptr);
- ASSERT_TRUE(member_attr->Is<ast::StructMemberAlignAttribute>());
-
- auto* o = member_attr->As<ast::StructMemberAlignAttribute>();
- EXPECT_EQ(o->align, 4u);
+ auto p = parser("align(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ ASSERT_FALSE(p->has_error());
+
+ auto* member_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(member_attr, nullptr);
+ ASSERT_TRUE(member_attr->Is<ast::StructMemberAlignAttribute>());
+
+ auto* o = member_attr->As<ast::StructMemberAlignAttribute>();
+ EXPECT_EQ(o->align, 4u);
}
TEST_F(ParserImplTest, Attribute_Align_MissingLeftParen) {
- auto p = parser("align 4)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: expected '(' for align attribute");
+ auto p = parser("align 4)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected '(' for align attribute");
}
TEST_F(ParserImplTest, Attribute_Align_MissingRightParen) {
- auto p = parser("align(4");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected ')' for align attribute");
+ auto p = parser("align(4");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected ')' for align attribute");
}
TEST_F(ParserImplTest, Attribute_Align_MissingValue) {
- auto p = parser("align()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for align attribute");
+ auto p = parser("align()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for align attribute");
}
TEST_F(ParserImplTest, Attribute_Align_MissingInvalid) {
- auto p = parser("align(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for align attribute");
+ auto p = parser("align(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for align attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_test.cc
index 28ec44f6964..3e8b60e7159 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_struct_member_test.cc
@@ -18,96 +18,95 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, StructMember_Parses) {
- auto p = parser("a : i32,");
+ auto p = parser("a : i32,");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_member();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_NE(m.value, nullptr);
+ auto m = p->expect_struct_member();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_NE(m.value, nullptr);
- EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(m->type->Is<ast::I32>());
- EXPECT_EQ(m->attributes.size(), 0u);
+ EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(m->type->Is<ast::I32>());
+ EXPECT_EQ(m->attributes.size(), 0u);
- EXPECT_EQ(m->source.range, (Source::Range{{1u, 1u}, {1u, 2u}}));
- EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 5u}, {1u, 8u}}));
+ EXPECT_EQ(m->source.range, (Source::Range{{1u, 1u}, {1u, 2u}}));
+ EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 5u}, {1u, 8u}}));
}
TEST_F(ParserImplTest, StructMember_ParsesWithAlignAttribute) {
- auto p = parser("@align(2) a : i32,");
+ auto p = parser("@align(2) a : i32,");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_member();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_NE(m.value, nullptr);
+ auto m = p->expect_struct_member();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_NE(m.value, nullptr);
- EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(m->type->Is<ast::I32>());
- EXPECT_EQ(m->attributes.size(), 1u);
- EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberAlignAttribute>());
- EXPECT_EQ(m->attributes[0]->As<ast::StructMemberAlignAttribute>()->align, 2u);
+ EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(m->type->Is<ast::I32>());
+ EXPECT_EQ(m->attributes.size(), 1u);
+ EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberAlignAttribute>());
+ EXPECT_EQ(m->attributes[0]->As<ast::StructMemberAlignAttribute>()->align, 2u);
- EXPECT_EQ(m->source.range, (Source::Range{{1u, 11u}, {1u, 12u}}));
- EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 15u}, {1u, 18u}}));
+ EXPECT_EQ(m->source.range, (Source::Range{{1u, 11u}, {1u, 12u}}));
+ EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 15u}, {1u, 18u}}));
}
TEST_F(ParserImplTest, StructMember_ParsesWithSizeAttribute) {
- auto p = parser("@size(2) a : i32,");
+ auto p = parser("@size(2) a : i32,");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_member();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_NE(m.value, nullptr);
+ auto m = p->expect_struct_member();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_NE(m.value, nullptr);
- EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(m->type->Is<ast::I32>());
- EXPECT_EQ(m->attributes.size(), 1u);
- EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberSizeAttribute>());
- EXPECT_EQ(m->attributes[0]->As<ast::StructMemberSizeAttribute>()->size, 2u);
+ EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(m->type->Is<ast::I32>());
+ EXPECT_EQ(m->attributes.size(), 1u);
+ EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberSizeAttribute>());
+ EXPECT_EQ(m->attributes[0]->As<ast::StructMemberSizeAttribute>()->size, 2u);
- EXPECT_EQ(m->source.range, (Source::Range{{1u, 10u}, {1u, 11u}}));
- EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 14u}, {1u, 17u}}));
+ EXPECT_EQ(m->source.range, (Source::Range{{1u, 10u}, {1u, 11u}}));
+ EXPECT_EQ(m->type->source.range, (Source::Range{{1u, 14u}, {1u, 17u}}));
}
TEST_F(ParserImplTest, StructMember_ParsesWithMultipleattributes) {
- auto p = parser(R"(@size(2)
+ auto p = parser(R"(@size(2)
@align(4) a : i32,)");
- auto& builder = p->builder();
+ auto& builder = p->builder();
- auto m = p->expect_struct_member();
- ASSERT_FALSE(p->has_error());
- ASSERT_FALSE(m.errored);
- ASSERT_NE(m.value, nullptr);
+ auto m = p->expect_struct_member();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_FALSE(m.errored);
+ ASSERT_NE(m.value, nullptr);
- EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
- EXPECT_TRUE(m->type->Is<ast::I32>());
- EXPECT_EQ(m->attributes.size(), 2u);
- EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberSizeAttribute>());
- EXPECT_EQ(m->attributes[0]->As<ast::StructMemberSizeAttribute>()->size, 2u);
- EXPECT_TRUE(m->attributes[1]->Is<ast::StructMemberAlignAttribute>());
- EXPECT_EQ(m->attributes[1]->As<ast::StructMemberAlignAttribute>()->align, 4u);
+ EXPECT_EQ(m->symbol, builder.Symbols().Get("a"));
+ EXPECT_TRUE(m->type->Is<ast::I32>());
+ EXPECT_EQ(m->attributes.size(), 2u);
+ EXPECT_TRUE(m->attributes[0]->Is<ast::StructMemberSizeAttribute>());
+ EXPECT_EQ(m->attributes[0]->As<ast::StructMemberSizeAttribute>()->size, 2u);
+ EXPECT_TRUE(m->attributes[1]->Is<ast::StructMemberAlignAttribute>());
+ EXPECT_EQ(m->attributes[1]->As<ast::StructMemberAlignAttribute>()->align, 4u);
- EXPECT_EQ(m->source.range, (Source::Range{{2u, 11u}, {2u, 12u}}));
- EXPECT_EQ(m->type->source.range, (Source::Range{{2u, 15u}, {2u, 18u}}));
+ EXPECT_EQ(m->source.range, (Source::Range{{2u, 11u}, {2u, 12u}}));
+ EXPECT_EQ(m->type->source.range, (Source::Range{{2u, 15u}, {2u, 18u}}));
}
TEST_F(ParserImplTest, StructMember_InvalidAttribute) {
- auto p = parser("@size(nan) a : i32,");
+ auto p = parser("@size(nan) a : i32,");
- auto m = p->expect_struct_member();
- ASSERT_TRUE(m.errored);
- ASSERT_EQ(m.value, nullptr);
+ auto m = p->expect_struct_member();
+ ASSERT_TRUE(m.errored);
+ ASSERT_EQ(m.value, nullptr);
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for size attribute");
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for size attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_body_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_body_test.cc
index 3efb0c5d47c..0f7384de83e 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_body_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_body_test.cc
@@ -18,251 +18,259 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, SwitchBody_Case) {
- auto p = parser("case 1 { a = 4; }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- auto* stmt = e->As<ast::CaseStatement>();
- ASSERT_EQ(stmt->selectors.size(), 1u);
- EXPECT_EQ(stmt->selectors[0]->ValueAsU32(), 1u);
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
+ auto p = parser("case 1 { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ auto* stmt = e->As<ast::CaseStatement>();
+ ASSERT_EQ(stmt->selectors.size(), 1u);
+ EXPECT_EQ(stmt->selectors[0]->value, 1);
+ EXPECT_EQ(stmt->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, SwitchBody_Case_WithColon) {
- auto p = parser("case 1: { a = 4; }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- auto* stmt = e->As<ast::CaseStatement>();
- ASSERT_EQ(stmt->selectors.size(), 1u);
- EXPECT_EQ(stmt->selectors[0]->ValueAsU32(), 1u);
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
+ auto p = parser("case 1: { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ auto* stmt = e->As<ast::CaseStatement>();
+ ASSERT_EQ(stmt->selectors.size(), 1u);
+ EXPECT_EQ(stmt->selectors[0]->value, 1);
+ EXPECT_EQ(stmt->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, SwitchBody_Case_TrailingComma) {
- auto p = parser("case 1, 2, { }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- auto* stmt = e->As<ast::CaseStatement>();
- ASSERT_EQ(stmt->selectors.size(), 2u);
- EXPECT_EQ(stmt->selectors[0]->ValueAsU32(), 1u);
- EXPECT_EQ(stmt->selectors[1]->ValueAsU32(), 2u);
+ auto p = parser("case 1, 2, { }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ auto* stmt = e->As<ast::CaseStatement>();
+ ASSERT_EQ(stmt->selectors.size(), 2u);
+ EXPECT_EQ(stmt->selectors[0]->value, 1);
+ EXPECT_EQ(stmt->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(stmt->selectors[1]->value, 2);
}
TEST_F(ParserImplTest, SwitchBody_Case_TrailingComma_WithColon) {
- auto p = parser("case 1, 2,: { }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- auto* stmt = e->As<ast::CaseStatement>();
- ASSERT_EQ(stmt->selectors.size(), 2u);
- EXPECT_EQ(stmt->selectors[0]->ValueAsU32(), 1u);
- EXPECT_EQ(stmt->selectors[1]->ValueAsU32(), 2u);
+ auto p = parser("case 1, 2,: { }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ auto* stmt = e->As<ast::CaseStatement>();
+ ASSERT_EQ(stmt->selectors.size(), 2u);
+ EXPECT_EQ(stmt->selectors[0]->value, 1);
+ EXPECT_EQ(stmt->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ EXPECT_EQ(stmt->selectors[1]->value, 2);
}
TEST_F(ParserImplTest, SwitchBody_Case_InvalidConstLiteral) {
- auto p = parser("case a == 4: { a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: unable to parse case selectors");
+ auto p = parser("case a == 4: { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: unable to parse case selectors");
}
TEST_F(ParserImplTest, SwitchBody_Case_InvalidSelector_bool) {
- auto p = parser("case true: { a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: invalid case selector must be an integer value");
+ auto p = parser("case true: { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: invalid case selector must be an integer value");
}
TEST_F(ParserImplTest, SwitchBody_Case_MissingConstLiteral) {
- auto p = parser("case: { a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:5: unable to parse case selectors");
+ auto p = parser("case: { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:5: unable to parse case selectors");
}
TEST_F(ParserImplTest, SwitchBody_Case_MissingBracketLeft) {
- auto p = parser("case 1 a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:8: expected '{' for case statement");
+ auto p = parser("case 1 a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:8: expected '{' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Case_MissingBracketLeft_WithColon) {
- auto p = parser("case 1: a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:9: expected '{' for case statement");
+ auto p = parser("case 1: a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:9: expected '{' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Case_MissingBracketRight) {
- auto p = parser("case 1: { a = 4; ");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:18: expected '}' for case statement");
+ auto p = parser("case 1: { a = 4; ");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:18: expected '}' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Case_InvalidCaseBody) {
- auto p = parser("case 1: { fn main() {} }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:11: expected '}' for case statement");
+ auto p = parser("case 1: { fn main() {} }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:11: expected '}' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Case_MultipleSelectors) {
- auto p = parser("case 1, 2 { }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- ASSERT_EQ(e->body->statements.size(), 0u);
- ASSERT_EQ(e->selectors.size(), 2u);
- ASSERT_EQ(e->selectors[0]->ValueAsI32(), 1);
- ASSERT_EQ(e->selectors[1]->ValueAsI32(), 2);
+ auto p = parser("case 1, 2 { }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ ASSERT_EQ(e->body->statements.size(), 0u);
+ ASSERT_EQ(e->selectors.size(), 2u);
+ ASSERT_EQ(e->selectors[0]->value, 1);
+ EXPECT_EQ(e->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_EQ(e->selectors[1]->value, 2);
+ EXPECT_EQ(e->selectors[1]->suffix, ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, SwitchBody_Case_MultipleSelectors_WithColon) {
- auto p = parser("case 1, 2: { }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_FALSE(e->IsDefault());
- ASSERT_EQ(e->body->statements.size(), 0u);
- ASSERT_EQ(e->selectors.size(), 2u);
- ASSERT_EQ(e->selectors[0]->ValueAsI32(), 1);
- ASSERT_EQ(e->selectors[1]->ValueAsI32(), 2);
+ auto p = parser("case 1, 2: { }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_FALSE(e->IsDefault());
+ ASSERT_EQ(e->body->statements.size(), 0u);
+ ASSERT_EQ(e->selectors.size(), 2u);
+ ASSERT_EQ(e->selectors[0]->value, 1);
+ EXPECT_EQ(e->selectors[0]->suffix, ast::IntLiteralExpression::Suffix::kNone);
+ ASSERT_EQ(e->selectors[1]->value, 2);
+ EXPECT_EQ(e->selectors[1]->suffix, ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, SwitchBody_Case_MultipleSelectorsMissingComma) {
- auto p = parser("case 1 2: { }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:8: expected '{' for case statement");
+ auto p = parser("case 1 2: { }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:8: expected '{' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Case_MultipleSelectorsStartsWithComma) {
- auto p = parser("case , 1, 2: { }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: unable to parse case selectors");
+ auto p = parser("case , 1, 2: { }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: unable to parse case selectors");
}
TEST_F(ParserImplTest, SwitchBody_Default) {
- auto p = parser("default { a = 4; }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_TRUE(e->IsDefault());
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
+ auto p = parser("default { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_TRUE(e->IsDefault());
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, SwitchBody_Default_WithColon) {
- auto p = parser("default: { a = 4; }");
- auto e = p->switch_body();
- EXPECT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::CaseStatement>());
- EXPECT_TRUE(e->IsDefault());
- ASSERT_EQ(e->body->statements.size(), 1u);
- EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
+ auto p = parser("default: { a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::CaseStatement>());
+ EXPECT_TRUE(e->IsDefault());
+ ASSERT_EQ(e->body->statements.size(), 1u);
+ EXPECT_TRUE(e->body->statements[0]->Is<ast::AssignmentStatement>());
}
TEST_F(ParserImplTest, SwitchBody_Default_MissingBracketLeft) {
- auto p = parser("default a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:9: expected '{' for case statement");
+ auto p = parser("default a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:9: expected '{' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Default_MissingBracketLeft_WithColon) {
- auto p = parser("default: a = 4; }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:10: expected '{' for case statement");
+ auto p = parser("default: a = 4; }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:10: expected '{' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Default_MissingBracketRight) {
- auto p = parser("default: { a = 4; ");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:19: expected '}' for case statement");
+ auto p = parser("default: { a = 4; ");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:19: expected '}' for case statement");
}
TEST_F(ParserImplTest, SwitchBody_Default_InvalidCaseBody) {
- auto p = parser("default: { fn main() {} }");
- auto e = p->switch_body();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(e.errored);
- EXPECT_FALSE(e.matched);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_EQ(p->error(), "1:12: expected '}' for case statement");
+ auto p = parser("default: { fn main() {} }");
+ auto e = p->switch_body();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(e.errored);
+ EXPECT_FALSE(e.matched);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_EQ(p->error(), "1:12: expected '}' for case statement");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_stmt_test.cc
index 12374679998..c898b123d86 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_switch_stmt_test.cc
@@ -18,112 +18,112 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, SwitchStmt_WithoutDefault) {
- auto p = parser(R"(switch a {
+ auto p = parser(R"(switch a {
case 1: {}
case 2: {}
})");
- auto e = p->switch_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::SwitchStatement>());
- ASSERT_EQ(e->body.size(), 2u);
- EXPECT_FALSE(e->body[0]->IsDefault());
- EXPECT_FALSE(e->body[1]->IsDefault());
+ auto e = p->switch_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::SwitchStatement>());
+ ASSERT_EQ(e->body.size(), 2u);
+ EXPECT_FALSE(e->body[0]->IsDefault());
+ EXPECT_FALSE(e->body[1]->IsDefault());
}
TEST_F(ParserImplTest, SwitchStmt_Empty) {
- auto p = parser("switch a { }");
- auto e = p->switch_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::SwitchStatement>());
- ASSERT_EQ(e->body.size(), 0u);
+ auto p = parser("switch a { }");
+ auto e = p->switch_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::SwitchStatement>());
+ ASSERT_EQ(e->body.size(), 0u);
}
TEST_F(ParserImplTest, SwitchStmt_DefaultInMiddle) {
- auto p = parser(R"(switch a {
+ auto p = parser(R"(switch a {
case 1: {}
default: {}
case 2: {}
})");
- auto e = p->switch_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::SwitchStatement>());
+ auto e = p->switch_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::SwitchStatement>());
- ASSERT_EQ(e->body.size(), 3u);
- ASSERT_FALSE(e->body[0]->IsDefault());
- ASSERT_TRUE(e->body[1]->IsDefault());
- ASSERT_FALSE(e->body[2]->IsDefault());
+ ASSERT_EQ(e->body.size(), 3u);
+ ASSERT_FALSE(e->body[0]->IsDefault());
+ ASSERT_TRUE(e->body[1]->IsDefault());
+ ASSERT_FALSE(e->body[2]->IsDefault());
}
TEST_F(ParserImplTest, SwitchStmt_WithParens) {
- auto p = parser("switch(a+b) { }");
- auto e = p->switch_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::SwitchStatement>());
- ASSERT_EQ(e->body.size(), 0u);
+ auto p = parser("switch(a+b) { }");
+ auto e = p->switch_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::SwitchStatement>());
+ ASSERT_EQ(e->body.size(), 0u);
}
TEST_F(ParserImplTest, SwitchStmt_InvalidExpression) {
- auto p = parser("switch a=b {}");
- auto e = p->switch_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: expected '{' for switch statement");
+ auto p = parser("switch a=b {}");
+ auto e = p->switch_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected '{' for switch statement");
}
TEST_F(ParserImplTest, SwitchStmt_MissingExpression) {
- auto p = parser("switch {}");
- auto e = p->switch_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: unable to parse selector expression");
+ auto p = parser("switch {}");
+ auto e = p->switch_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: unable to parse selector expression");
}
TEST_F(ParserImplTest, SwitchStmt_MissingBracketLeft) {
- auto p = parser("switch a }");
- auto e = p->switch_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:10: expected '{' for switch statement");
+ auto p = parser("switch a }");
+ auto e = p->switch_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected '{' for switch statement");
}
TEST_F(ParserImplTest, SwitchStmt_MissingBracketRight) {
- auto p = parser("switch a {");
- auto e = p->switch_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:11: expected '}' for switch statement");
+ auto p = parser("switch a {");
+ auto e = p->switch_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:11: expected '}' for switch statement");
}
TEST_F(ParserImplTest, SwitchStmt_InvalidBody) {
- auto p = parser(R"(switch a {
+ auto p = parser(R"(switch a {
case: {}
})");
- auto e = p->switch_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "2:7: unable to parse case selectors");
+ auto e = p->switch_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "2:7: unable to parse case selectors");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test.cc
index 5caa96b6163..99ca25da019 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test.cc
@@ -18,25 +18,25 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Empty) {
- auto p = parser("");
- ASSERT_TRUE(p->Parse()) << p->error();
+ auto p = parser("");
+ ASSERT_TRUE(p->Parse()) << p->error();
}
TEST_F(ParserImplTest, Parses) {
- auto p = parser(R"(
-@stage(fragment)
+ auto p = parser(R"(
+@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(.4, .2, .3, 1);
}
)");
- ASSERT_TRUE(p->Parse()) << p->error();
+ ASSERT_TRUE(p->Parse()) << p->error();
- Program program = p->program();
- ASSERT_EQ(1u, program.AST().Functions().size());
+ Program program = p->program();
+ ASSERT_EQ(1u, program.AST().Functions().size());
}
TEST_F(ParserImplTest, Parses_ExtraSemicolons) {
- auto p = parser(R"(
+ auto p = parser(R"(
;
struct S {
a : f32,
@@ -49,70 +49,70 @@ fn foo() -> S {
};;
;
)");
- ASSERT_TRUE(p->Parse()) << p->error();
+ ASSERT_TRUE(p->Parse()) << p->error();
- Program program = p->program();
- ASSERT_EQ(1u, program.AST().Functions().size());
- ASSERT_EQ(1u, program.AST().TypeDecls().size());
+ Program program = p->program();
+ ASSERT_EQ(1u, program.AST().Functions().size());
+ ASSERT_EQ(1u, program.AST().TypeDecls().size());
}
TEST_F(ParserImplTest, HandlesError) {
- auto p = parser(R"(
+ auto p = parser(R"(
fn main() -> { // missing return type
return;
})");
- ASSERT_FALSE(p->Parse());
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "2:15: unable to determine function return type");
+ ASSERT_FALSE(p->Parse());
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "2:15: unable to determine function return type");
}
TEST_F(ParserImplTest, HandlesUnexpectedToken) {
- auto p = parser(R"(
+ auto p = parser(R"(
fn main() {
}
foobar
)");
- ASSERT_FALSE(p->Parse());
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "4:1: unexpected token");
+ ASSERT_FALSE(p->Parse());
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "4:1: unexpected token");
}
TEST_F(ParserImplTest, HandlesBadToken_InMiddle) {
- auto p = parser(R"(
+ auto p = parser(R"(
fn main() {
- let f = 0x1p500000000000; // Exponent too big for hex float
+ let f = 0x1p10000000000000000000; // Exponent too big for hex float
return;
})");
- ASSERT_FALSE(p->Parse());
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "3:11: exponent is too large for hex float");
+ ASSERT_FALSE(p->Parse());
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "3:11: exponent is too large for hex float");
}
TEST_F(ParserImplTest, HandlesBadToken_AtModuleScope) {
- auto p = parser(R"(
+ auto p = parser(R"(
fn main() {
return;
}
-0x1p5000000000000
+0x1p10000000000000000000
)");
- ASSERT_FALSE(p->Parse());
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "5:1: exponent is too large for hex float");
+ ASSERT_FALSE(p->Parse());
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "5:1: exponent is too large for hex float");
}
TEST_F(ParserImplTest, Comments_TerminatedBlockComment) {
- auto p = parser(R"(
+ auto p = parser(R"(
/**
* Here is my shader.
*
* /* I can nest /**/ comments. */
* // I can nest line comments too.
**/
-@stage(fragment) // This is the stage
+@fragment // This is the stage
fn main(/*
no
parameters
@@ -120,20 +120,20 @@ parameters
return/*block_comments_delimit_tokens*/vec4<f32>(.4, .2, .3, 1);
}/* block comments are OK at EOF...*/)");
- ASSERT_TRUE(p->Parse()) << p->error();
- ASSERT_EQ(1u, p->program().AST().Functions().size());
+ ASSERT_TRUE(p->Parse()) << p->error();
+ ASSERT_EQ(1u, p->program().AST().Functions().size());
}
TEST_F(ParserImplTest, Comments_UnterminatedBlockComment) {
- auto p = parser(R"(
-@stage(fragment)
+ auto p = parser(R"(
+@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(.4, .2, .3, 1);
} /* unterminated block comments are invalid ...)");
- ASSERT_FALSE(p->Parse());
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "5:3: unterminated block comment") << p->error();
+ ASSERT_FALSE(p->Parse());
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "5:3: unterminated block comment") << p->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test_helper.h b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test_helper.h
index 8823fe1dcea..f6ee8ea3b7d 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test_helper.h
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_test_helper.h
@@ -27,46 +27,45 @@ namespace tint::reader::wgsl {
/// WGSL Parser test class
class ParserImplTest : public testing::Test, public ProgramBuilder {
- public:
- /// Constructor
- ParserImplTest();
- ~ParserImplTest() override;
+ public:
+ /// Constructor
+ ParserImplTest();
+ ~ParserImplTest() override;
- /// Retrieves the parser from the helper
- /// @param str the string to parse
- /// @returns the parser implementation
- std::unique_ptr<ParserImpl> parser(const std::string& str) {
- auto file = std::make_unique<Source::File>("test.wgsl", str);
- auto impl = std::make_unique<ParserImpl>(file.get());
- files_.emplace_back(std::move(file));
- return impl;
- }
+ /// Retrieves the parser from the helper
+ /// @param str the string to parse
+ /// @returns the parser implementation
+ std::unique_ptr<ParserImpl> parser(const std::string& str) {
+ auto file = std::make_unique<Source::File>("test.wgsl", str);
+ auto impl = std::make_unique<ParserImpl>(file.get());
+ files_.emplace_back(std::move(file));
+ return impl;
+ }
- private:
- std::vector<std::unique_ptr<Source::File>> files_;
+ private:
+ std::vector<std::unique_ptr<Source::File>> files_;
};
/// WGSL Parser test class with param
template <typename T>
-class ParserImplTestWithParam : public testing::TestWithParam<T>,
- public ProgramBuilder {
- public:
- /// Constructor
- ParserImplTestWithParam() = default;
- ~ParserImplTestWithParam() override = default;
+class ParserImplTestWithParam : public testing::TestWithParam<T>, public ProgramBuilder {
+ public:
+ /// Constructor
+ ParserImplTestWithParam() = default;
+ ~ParserImplTestWithParam() override = default;
- /// Retrieves the parser from the helper
- /// @param str the string to parse
- /// @returns the parser implementation
- std::unique_ptr<ParserImpl> parser(const std::string& str) {
- auto file = std::make_unique<Source::File>("test.wgsl", str);
- auto impl = std::make_unique<ParserImpl>(file.get());
- files_.emplace_back(std::move(file));
- return impl;
- }
+ /// Retrieves the parser from the helper
+ /// @param str the string to parse
+ /// @returns the parser implementation
+ std::unique_ptr<ParserImpl> parser(const std::string& str) {
+ auto file = std::make_unique<Source::File>("test.wgsl", str);
+ auto impl = std::make_unique<ParserImpl>(file.get());
+ files_.emplace_back(std::move(file));
+ return impl;
+ }
- private:
- std::vector<std::unique_ptr<Source::File>> files_;
+ private:
+ std::vector<std::unique_ptr<Source::File>> files_;
};
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texel_format_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texel_format_test.cc
index d1c6e7a1d5a..e7afb9e103a 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texel_format_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texel_format_test.cc
@@ -18,139 +18,139 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, ImageStorageType_Invalid) {
- auto p = parser("1234");
- auto t = p->expect_texel_format("test");
- EXPECT_TRUE(t.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:1: invalid format for test");
+ auto p = parser("1234");
+ auto t = p->expect_texel_format("test");
+ EXPECT_TRUE(t.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:1: invalid format for test");
}
TEST_F(ParserImplTest, ImageStorageType_R32Uint) {
- auto p = parser("r32uint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kR32Uint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("r32uint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kR32Uint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_R32Sint) {
- auto p = parser("r32sint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kR32Sint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("r32sint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kR32Sint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_R32Float) {
- auto p = parser("r32float");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kR32Float);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("r32float");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kR32Float);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba8Unorm) {
- auto p = parser("rgba8unorm");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Unorm);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba8unorm");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Unorm);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba8Snorm) {
- auto p = parser("rgba8snorm");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Snorm);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba8snorm");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Snorm);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba8Uint) {
- auto p = parser("rgba8uint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Uint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba8uint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Uint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba8Sint) {
- auto p = parser("rgba8sint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Sint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba8sint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba8Sint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rg32Uint) {
- auto p = parser("rg32uint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRg32Uint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rg32uint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRg32Uint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rg32Sint) {
- auto p = parser("rg32sint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRg32Sint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rg32sint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRg32Sint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rg32Float) {
- auto p = parser("rg32float");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRg32Float);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rg32float");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRg32Float);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba16Uint) {
- auto p = parser("rgba16uint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Uint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba16uint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Uint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba16Sint) {
- auto p = parser("rgba16sint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Sint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba16sint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Sint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba16Float) {
- auto p = parser("rgba16float");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Float);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba16float");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba16Float);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba32Uint) {
- auto p = parser("rgba32uint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Uint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba32uint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Uint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba32Sint) {
- auto p = parser("rgba32sint");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Sint);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba32sint");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Sint);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, ImageStorageType_Rgba32Float) {
- auto p = parser("rgba32float");
- auto t = p->expect_texel_format("test");
- EXPECT_FALSE(t.errored);
- EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Float);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("rgba32float");
+ auto t = p->expect_texel_format("test");
+ EXPECT_FALSE(t.errored);
+ EXPECT_EQ(t.value, ast::TexelFormat::kRgba32Float);
+ EXPECT_FALSE(p->has_error());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_test.cc
new file mode 100644
index 00000000000..162b41c15b8
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_test.cc
@@ -0,0 +1,261 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+
+namespace tint::reader::wgsl {
+namespace {
+
+TEST_F(ParserImplTest, TextureSamplerTypes_Invalid) {
+ auto p = parser("1234");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_FALSE(t.errored);
+ EXPECT_FALSE(p->has_error());
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_Sampler) {
+ auto p = parser("sampler");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Sampler>());
+ ASSERT_FALSE(t->As<ast::Sampler>()->IsComparison());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SamplerComparison) {
+ auto p = parser("sampler_comparison");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Sampler>());
+ ASSERT_TRUE(t->As<ast::Sampler>()->IsComparison());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_DepthTexture) {
+ auto p = parser("texture_depth_2d");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::DepthTexture>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_F32) {
+ auto p = parser("texture_1d<f32>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::SampledTexture>());
+ ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::F32>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k1d);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_I32) {
+ auto p = parser("texture_2d<i32>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::SampledTexture>());
+ ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::I32>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_U32) {
+ auto p = parser("texture_3d<u32>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::SampledTexture>());
+ ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::U32>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k3d);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingType) {
+ auto p = parser("texture_1d<>");
+ auto t = p->texture_samplers();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:12: invalid type for sampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingLessThan) {
+ auto p = parser("texture_1d");
+ auto t = p->texture_samplers();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:11: expected '<' for sampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingGreaterThan) {
+ auto p = parser("texture_1d<u32");
+ auto t = p->texture_samplers();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:15: expected '>' for sampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_I32) {
+ auto p = parser("texture_multisampled_2d<i32>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::MultisampledTexture>());
+ ASSERT_TRUE(t->As<ast::MultisampledTexture>()->type->Is<ast::I32>());
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 29u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_MissingType) {
+ auto p = parser("texture_multisampled_2d<>");
+ auto t = p->texture_samplers();
+ ASSERT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:25: invalid type for multisampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_MissingLessThan) {
+ auto p = parser("texture_multisampled_2d");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:24: expected '<' for multisampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_MissingGreaterThan) {
+ auto p = parser("texture_multisampled_2d<u32");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:28: expected '>' for multisampled texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_Readonly1dRg32Float) {
+ auto p = parser("texture_storage_1d<rg32float, read>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::StorageTexture>());
+ EXPECT_EQ(t->As<ast::StorageTexture>()->format, ast::TexelFormat::kRg32Float);
+ EXPECT_EQ(t->As<ast::StorageTexture>()->access, ast::Access::kRead);
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k1d);
+ EXPECT_EQ(t->source.range, (Source::Range{{1u, 1u}, {1u, 36u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_Writeonly2dR32Uint) {
+ auto p = parser("texture_storage_2d<r32uint, write>");
+ auto t = p->texture_samplers();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+
+ ASSERT_TRUE(t->Is<ast::Texture>());
+ ASSERT_TRUE(t->Is<ast::StorageTexture>());
+ EXPECT_EQ(t->As<ast::StorageTexture>()->format, ast::TexelFormat::kR32Uint);
+ EXPECT_EQ(t->As<ast::StorageTexture>()->access, ast::Access::kWrite);
+ EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
+ EXPECT_EQ(t->source.range, (Source::Range{{1u, 1u}, {1u, 35u}}));
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_InvalidType) {
+ auto p = parser("texture_storage_1d<abc, read>");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:20: invalid format for storage texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_InvalidAccess) {
+ auto p = parser("texture_storage_1d<r32float, abc>");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:30: invalid value for access control");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingType) {
+ auto p = parser("texture_storage_1d<>");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:20: invalid format for storage texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingLessThan) {
+ auto p = parser("texture_storage_1d");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:19: expected '<' for storage texture type");
+}
+
+TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingGreaterThan) {
+ auto p = parser("texture_storage_1d<r32uint, read");
+ auto t = p->texture_samplers();
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(t.errored);
+ EXPECT_EQ(p->error(), "1:33: expected '>' for storage texture type");
+}
+
+} // namespace
+} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_types_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_types_test.cc
deleted file mode 100644
index 25a06988e33..00000000000
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_texture_sampler_types_test.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-
-namespace tint::reader::wgsl {
-namespace {
-
-TEST_F(ParserImplTest, TextureSamplerTypes_Invalid) {
- auto p = parser("1234");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_FALSE(t.errored);
- EXPECT_FALSE(p->has_error());
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_Sampler) {
- auto p = parser("sampler");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Sampler>());
- ASSERT_FALSE(t->As<ast::Sampler>()->IsComparison());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SamplerComparison) {
- auto p = parser("sampler_comparison");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Sampler>());
- ASSERT_TRUE(t->As<ast::Sampler>()->IsComparison());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_DepthTexture) {
- auto p = parser("texture_depth_2d");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::DepthTexture>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_F32) {
- auto p = parser("texture_1d<f32>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::SampledTexture>());
- ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::F32>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k1d);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_I32) {
- auto p = parser("texture_2d<i32>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::SampledTexture>());
- ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::I32>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_U32) {
- auto p = parser("texture_3d<u32>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::SampledTexture>());
- ASSERT_TRUE(t->As<ast::SampledTexture>()->type->Is<ast::U32>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k3d);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 16u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingType) {
- auto p = parser("texture_1d<>");
- auto t = p->texture_sampler_types();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:12: invalid type for sampled texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingLessThan) {
- auto p = parser("texture_1d");
- auto t = p->texture_sampler_types();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:11: expected '<' for sampled texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_SampledTexture_MissingGreaterThan) {
- auto p = parser("texture_1d<u32");
- auto t = p->texture_sampler_types();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:15: expected '>' for sampled texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_I32) {
- auto p = parser("texture_multisampled_2d<i32>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::MultisampledTexture>());
- ASSERT_TRUE(t->As<ast::MultisampledTexture>()->type->Is<ast::I32>());
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 29u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_MultisampledTexture_MissingType) {
- auto p = parser("texture_multisampled_2d<>");
- auto t = p->texture_sampler_types();
- ASSERT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:25: invalid type for multisampled texture type");
-}
-
-TEST_F(ParserImplTest,
- TextureSamplerTypes_MultisampledTexture_MissingLessThan) {
- auto p = parser("texture_multisampled_2d");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:24: expected '<' for multisampled texture type");
-}
-
-TEST_F(ParserImplTest,
- TextureSamplerTypes_MultisampledTexture_MissingGreaterThan) {
- auto p = parser("texture_multisampled_2d<u32");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:28: expected '>' for multisampled texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_Readonly1dRg32Float) {
- auto p = parser("texture_storage_1d<rg32float, read>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
-
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::StorageTexture>());
- EXPECT_EQ(t->As<ast::StorageTexture>()->format, ast::TexelFormat::kRg32Float);
- EXPECT_EQ(t->As<ast::StorageTexture>()->access, ast::Access::kRead);
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k1d);
- EXPECT_EQ(t->source.range, (Source::Range{{1u, 1u}, {1u, 36u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_Writeonly2dR32Uint) {
- auto p = parser("texture_storage_2d<r32uint, write>");
- auto t = p->texture_sampler_types();
- ASSERT_FALSE(p->has_error()) << p->error();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
-
- ASSERT_TRUE(t->Is<ast::Texture>());
- ASSERT_TRUE(t->Is<ast::StorageTexture>());
- EXPECT_EQ(t->As<ast::StorageTexture>()->format, ast::TexelFormat::kR32Uint);
- EXPECT_EQ(t->As<ast::StorageTexture>()->access, ast::Access::kWrite);
- EXPECT_EQ(t->As<ast::Texture>()->dim, ast::TextureDimension::k2d);
- EXPECT_EQ(t->source.range, (Source::Range{{1u, 1u}, {1u, 35u}}));
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_InvalidType) {
- auto p = parser("texture_storage_1d<abc, read>");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:20: invalid format for storage texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_InvalidAccess) {
- auto p = parser("texture_storage_1d<r32float, abc>");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:30: invalid value for access control");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingType) {
- auto p = parser("texture_storage_1d<>");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:20: invalid format for storage texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingLessThan) {
- auto p = parser("texture_storage_1d");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:19: expected '<' for storage texture type");
-}
-
-TEST_F(ParserImplTest, TextureSamplerTypes_StorageTexture_MissingGreaterThan) {
- auto p = parser("texture_storage_1d<r32uint, read");
- auto t = p->texture_sampler_types();
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(t.errored);
- EXPECT_EQ(p->error(), "1:33: expected '>' for storage texture type");
-}
-
-} // namespace
-} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_alias_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_alias_test.cc
index e30d228889b..ea5cbe25b4b 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_alias_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_alias_test.cc
@@ -18,82 +18,82 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, TypeDecl_ParsesType) {
- auto p = parser("type a = i32");
+ auto p = parser("type a = i32");
- auto t = p->type_alias();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(t.errored);
- EXPECT_TRUE(t.matched);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t->Is<ast::Alias>());
- auto* alias = t->As<ast::Alias>();
- ASSERT_TRUE(alias->type->Is<ast::I32>());
+ auto t = p->type_alias();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(t.errored);
+ EXPECT_TRUE(t.matched);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t->Is<ast::Alias>());
+ auto* alias = t->As<ast::Alias>();
+ ASSERT_TRUE(alias->type->Is<ast::I32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 13u}}));
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 13u}}));
}
TEST_F(ParserImplTest, TypeDecl_Parses_Ident) {
- auto p = parser("type a = B");
+ auto p = parser("type a = B");
- auto t = p->type_alias();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(t.errored);
- EXPECT_TRUE(t.matched);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t.value->Is<ast::Alias>());
- auto* alias = t.value->As<ast::Alias>();
- EXPECT_EQ(p->builder().Symbols().NameFor(alias->name), "a");
- EXPECT_TRUE(alias->type->Is<ast::TypeName>());
- EXPECT_EQ(alias->source.range, (Source::Range{{1u, 1u}, {1u, 11u}}));
+ auto t = p->type_alias();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(t.errored);
+ EXPECT_TRUE(t.matched);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t.value->Is<ast::Alias>());
+ auto* alias = t.value->As<ast::Alias>();
+ EXPECT_EQ(p->builder().Symbols().NameFor(alias->name), "a");
+ EXPECT_TRUE(alias->type->Is<ast::TypeName>());
+ EXPECT_EQ(alias->source.range, (Source::Range{{1u, 1u}, {1u, 11u}}));
}
TEST_F(ParserImplTest, TypeDecl_Unicode_Parses_Ident) {
- const std::string ident = // "𝓶𝔂_𝓽𝔂𝓹𝓮"
- "\xf0\x9d\x93\xb6\xf0\x9d\x94\x82\x5f\xf0\x9d\x93\xbd\xf0\x9d\x94\x82\xf0"
- "\x9d\x93\xb9\xf0\x9d\x93\xae";
+ const std::string ident = // "𝓶𝔂_𝓽𝔂𝓹𝓮"
+ "\xf0\x9d\x93\xb6\xf0\x9d\x94\x82\x5f\xf0\x9d\x93\xbd\xf0\x9d\x94\x82\xf0"
+ "\x9d\x93\xb9\xf0\x9d\x93\xae";
- auto p = parser("type " + ident + " = i32");
+ auto p = parser("type " + ident + " = i32");
- auto t = p->type_alias();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(t.errored);
- EXPECT_TRUE(t.matched);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t.value->Is<ast::Alias>());
- auto* alias = t.value->As<ast::Alias>();
- EXPECT_EQ(p->builder().Symbols().NameFor(alias->name), ident);
- EXPECT_TRUE(alias->type->Is<ast::I32>());
- EXPECT_EQ(alias->source.range, (Source::Range{{1u, 1u}, {1u, 37u}}));
+ auto t = p->type_alias();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(t.errored);
+ EXPECT_TRUE(t.matched);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t.value->Is<ast::Alias>());
+ auto* alias = t.value->As<ast::Alias>();
+ EXPECT_EQ(p->builder().Symbols().NameFor(alias->name), ident);
+ EXPECT_TRUE(alias->type->Is<ast::I32>());
+ EXPECT_EQ(alias->source.range, (Source::Range{{1u, 1u}, {1u, 37u}}));
}
TEST_F(ParserImplTest, TypeDecl_MissingIdent) {
- auto p = parser("type = i32");
- auto t = p->type_alias();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected identifier for type alias");
+ auto p = parser("type = i32");
+ auto t = p->type_alias();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected identifier for type alias");
}
TEST_F(ParserImplTest, TypeDecl_InvalidIdent) {
- auto p = parser("type 123 = i32");
- auto t = p->type_alias();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_EQ(p->error(), "1:6: expected identifier for type alias");
+ auto p = parser("type 123 = i32");
+ auto t = p->type_alias();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_EQ(p->error(), "1:6: expected identifier for type alias");
}
TEST_F(ParserImplTest, TypeDecl_MissingEqual) {
- auto p = parser("type a i32");
- auto t = p->type_alias();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(t.value, nullptr);
- EXPECT_EQ(p->error(), "1:8: expected '=' for type alias");
+ auto p = parser("type a i32");
+ auto t = p->type_alias();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_EQ(p->error(), "1:8: expected '=' for type alias");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_decl_test.cc
index f649a268d1e..bc2bfe5d666 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_type_decl_test.cc
@@ -17,119 +17,129 @@
#include "src/tint/ast/matrix.h"
#include "src/tint/ast/sampler.h"
#include "src/tint/reader/wgsl/parser_impl_test_helper.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, TypeDecl_Invalid) {
- auto p = parser("1234");
- auto t = p->type_decl();
- EXPECT_EQ(t.errored, false);
- EXPECT_EQ(t.matched, false);
- EXPECT_EQ(t.value, nullptr);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("1234");
+ auto t = p->type_decl();
+ EXPECT_EQ(t.errored, false);
+ EXPECT_EQ(t.matched, false);
+ EXPECT_EQ(t.value, nullptr);
+ EXPECT_FALSE(p->has_error());
}
TEST_F(ParserImplTest, TypeDecl_Identifier) {
- auto p = parser("A");
+ auto p = parser("A");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- auto* type_name = t.value->As<ast::TypeName>();
- ASSERT_NE(type_name, nullptr);
- EXPECT_EQ(p->builder().Symbols().Get("A"), type_name->name);
- EXPECT_EQ(type_name->source.range, (Source::Range{{1u, 1u}, {1u, 2u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ auto* type_name = t.value->As<ast::TypeName>();
+ ASSERT_NE(type_name, nullptr);
+ EXPECT_EQ(p->builder().Symbols().Get("A"), type_name->name);
+ EXPECT_EQ(type_name->source.range, (Source::Range{{1u, 1u}, {1u, 2u}}));
}
TEST_F(ParserImplTest, TypeDecl_Bool) {
- auto p = parser("bool");
+ auto p = parser("bool");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_TRUE(t.value->Is<ast::Bool>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::Bool>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 5u}}));
+}
+
+TEST_F(ParserImplTest, TypeDecl_F16) {
+ auto p = parser("f16");
+
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::F16>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
}
TEST_F(ParserImplTest, TypeDecl_F32) {
- auto p = parser("f32");
+ auto p = parser("f32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_TRUE(t.value->Is<ast::F32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::F32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
}
TEST_F(ParserImplTest, TypeDecl_I32) {
- auto p = parser("i32");
+ auto p = parser("i32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_TRUE(t.value->Is<ast::I32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::I32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
}
TEST_F(ParserImplTest, TypeDecl_U32) {
- auto p = parser("u32");
+ auto p = parser("u32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_TRUE(t.value->Is<ast::U32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::U32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 4u}}));
}
struct VecData {
- const char* input;
- size_t count;
- Source::Range range;
+ const char* input;
+ size_t count;
+ Source::Range range;
};
inline std::ostream& operator<<(std::ostream& out, VecData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
class VecTest : public ParserImplTestWithParam<VecData> {};
TEST_P(VecTest, Parse) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- EXPECT_TRUE(t.value->Is<ast::Vector>());
- EXPECT_EQ(t.value->As<ast::Vector>()->width, params.count);
- EXPECT_EQ(t.value->source.range, params.range);
-}
-INSTANTIATE_TEST_SUITE_P(
- ParserImplTest,
- VecTest,
- testing::Values(VecData{"vec2<f32>", 2, {{1u, 1u}, {1u, 10u}}},
- VecData{"vec3<f32>", 3, {{1u, 1u}, {1u, 10u}}},
- VecData{"vec4<f32>", 4, {{1u, 1u}, {1u, 10u}}}));
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ EXPECT_TRUE(t.value->Is<ast::Vector>());
+ EXPECT_EQ(t.value->As<ast::Vector>()->width, params.count);
+ EXPECT_EQ(t.value->source.range, params.range);
+}
+INSTANTIATE_TEST_SUITE_P(ParserImplTest,
+ VecTest,
+ testing::Values(VecData{"vec2<f32>", 2, {{1u, 1u}, {1u, 10u}}},
+ VecData{"vec3<f32>", 3, {{1u, 1u}, {1u, 10u}}},
+ VecData{"vec4<f32>", 4, {{1u, 1u}, {1u, 10u}}}));
class VecMissingGreaterThanTest : public ParserImplTestWithParam<VecData> {};
TEST_P(VecMissingGreaterThanTest, Handles_Missing_GreaterThan) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:9: expected '>' for vector");
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:9: expected '>' for vector");
}
INSTANTIATE_TEST_SUITE_P(ParserImplTest,
VecMissingGreaterThanTest,
@@ -140,14 +150,14 @@ INSTANTIATE_TEST_SUITE_P(ParserImplTest,
class VecMissingType : public ParserImplTestWithParam<VecData> {};
TEST_P(VecMissingType, Handles_Missing_Type) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:6: invalid type for vector");
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:6: invalid type for vector");
}
INSTANTIATE_TEST_SUITE_P(ParserImplTest,
VecMissingType,
@@ -156,421 +166,441 @@ INSTANTIATE_TEST_SUITE_P(ParserImplTest,
VecData{"vec4<>", 4, {}}));
TEST_F(ParserImplTest, TypeDecl_Ptr) {
- auto p = parser("ptr<function, f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Pointer>());
+ auto p = parser("ptr<function, f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Pointer>());
- auto* ptr = t.value->As<ast::Pointer>();
- ASSERT_TRUE(ptr->type->Is<ast::F32>());
- ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
+ auto* ptr = t.value->As<ast::Pointer>();
+ ASSERT_TRUE(ptr->type->Is<ast::F32>());
+ ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 19u}}));
}
TEST_F(ParserImplTest, TypeDecl_Ptr_WithAccess) {
- auto p = parser("ptr<function, f32, read>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Pointer>());
+ auto p = parser("ptr<function, f32, read>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Pointer>());
- auto* ptr = t.value->As<ast::Pointer>();
- ASSERT_TRUE(ptr->type->Is<ast::F32>());
- ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
- ASSERT_EQ(ptr->access, ast::Access::kRead);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25u}}));
+ auto* ptr = t.value->As<ast::Pointer>();
+ ASSERT_TRUE(ptr->type->Is<ast::F32>());
+ ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
+ ASSERT_EQ(ptr->access, ast::Access::kRead);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25u}}));
}
TEST_F(ParserImplTest, TypeDecl_Ptr_ToVec) {
- auto p = parser("ptr<function, vec2<f32>>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Pointer>());
+ auto p = parser("ptr<function, vec2<f32>>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Pointer>());
- auto* ptr = t.value->As<ast::Pointer>();
- ASSERT_TRUE(ptr->type->Is<ast::Vector>());
- ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
+ auto* ptr = t.value->As<ast::Pointer>();
+ ASSERT_TRUE(ptr->type->Is<ast::Vector>());
+ ASSERT_EQ(ptr->storage_class, ast::StorageClass::kFunction);
- auto* vec = ptr->type->As<ast::Vector>();
- ASSERT_EQ(vec->width, 2u);
- ASSERT_TRUE(vec->type->Is<ast::F32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25}}));
+ auto* vec = ptr->type->As<ast::Vector>();
+ ASSERT_EQ(vec->width, 2u);
+ ASSERT_TRUE(vec->type->Is<ast::F32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 25}}));
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingLessThan) {
- auto p = parser("ptr private, f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:5: expected '<' for ptr declaration");
+ auto p = parser("ptr private, f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:5: expected '<' for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingGreaterThanAfterType) {
- auto p = parser("ptr<function, f32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:18: expected '>' for ptr declaration");
+ auto p = parser("ptr<function, f32");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:18: expected '>' for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingGreaterThanAfterAccess) {
- auto p = parser("ptr<function, f32, read");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:24: expected '>' for ptr declaration");
+ auto p = parser("ptr<function, f32, read");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:24: expected '>' for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingCommaAfterStorageClass) {
- auto p = parser("ptr<function f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:14: expected ',' for ptr declaration");
+ auto p = parser("ptr<function f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:14: expected ',' for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingCommaAfterAccess) {
- auto p = parser("ptr<function, f32 read>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:19: expected '>' for ptr declaration");
+ auto p = parser("ptr<function, f32 read>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:19: expected '>' for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingStorageClass) {
- auto p = parser("ptr<, f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
+ auto p = parser("ptr<, f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingType) {
- auto p = parser("ptr<function,>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:14: invalid type for ptr declaration");
+ auto p = parser("ptr<function,>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:14: invalid type for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingAccess) {
- auto p = parser("ptr<function, i32, >");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:20: expected identifier for access control");
+ auto p = parser("ptr<function, i32, >");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:20: expected identifier for access control");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_MissingParams) {
- auto p = parser("ptr<>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
+ auto p = parser("ptr<>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_BadStorageClass) {
- auto p = parser("ptr<unknown, f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
+ auto p = parser("ptr<unknown, f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:5: invalid storage class for ptr declaration");
}
TEST_F(ParserImplTest, TypeDecl_Ptr_BadAccess) {
- auto p = parser("ptr<function, i32, unknown>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:20: invalid value for access control");
+ auto p = parser("ptr<function, i32, unknown>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:20: invalid value for access control");
}
TEST_F(ParserImplTest, TypeDecl_Atomic) {
- auto p = parser("atomic<f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Atomic>());
+ auto p = parser("atomic<f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Atomic>());
- auto* atomic = t.value->As<ast::Atomic>();
- ASSERT_TRUE(atomic->type->Is<ast::F32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 12u}}));
+ auto* atomic = t.value->As<ast::Atomic>();
+ ASSERT_TRUE(atomic->type->Is<ast::F32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 12u}}));
}
TEST_F(ParserImplTest, TypeDecl_Atomic_ToVec) {
- auto p = parser("atomic<vec2<f32>>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Atomic>());
+ auto p = parser("atomic<vec2<f32>>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Atomic>());
- auto* atomic = t.value->As<ast::Atomic>();
- ASSERT_TRUE(atomic->type->Is<ast::Vector>());
+ auto* atomic = t.value->As<ast::Atomic>();
+ ASSERT_TRUE(atomic->type->Is<ast::Vector>());
- auto* vec = atomic->type->As<ast::Vector>();
- ASSERT_EQ(vec->width, 2u);
- ASSERT_TRUE(vec->type->Is<ast::F32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 18u}}));
+ auto* vec = atomic->type->As<ast::Vector>();
+ ASSERT_EQ(vec->width, 2u);
+ ASSERT_TRUE(vec->type->Is<ast::F32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 18u}}));
}
TEST_F(ParserImplTest, TypeDecl_Atomic_MissingLessThan) {
- auto p = parser("atomic f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:8: expected '<' for atomic declaration");
+ auto p = parser("atomic f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:8: expected '<' for atomic declaration");
}
TEST_F(ParserImplTest, TypeDecl_Atomic_MissingGreaterThan) {
- auto p = parser("atomic<f32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:11: expected '>' for atomic declaration");
+ auto p = parser("atomic<f32");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:11: expected '>' for atomic declaration");
}
TEST_F(ParserImplTest, TypeDecl_Atomic_MissingType) {
- auto p = parser("atomic<>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:8: invalid type for atomic declaration");
+ auto p = parser("atomic<>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:8: invalid type for atomic declaration");
+}
+
+TEST_F(ParserImplTest, TypeDecl_Array_AbstractIntLiteralSize) {
+ auto p = parser("array<f32, 5>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
+
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_FALSE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::F32>());
+ EXPECT_EQ(a->attributes.size(), 0u);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 14u}}));
+
+ auto* size = a->count->As<ast::IntLiteralExpression>();
+ ASSERT_NE(size, nullptr);
+ EXPECT_EQ(size->value, 5);
+ EXPECT_EQ(size->suffix, ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, TypeDecl_Array_SintLiteralSize) {
- auto p = parser("array<f32, 5>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Array>());
-
- auto* a = t.value->As<ast::Array>();
- ASSERT_FALSE(a->IsRuntimeArray());
- ASSERT_TRUE(a->type->Is<ast::F32>());
- EXPECT_EQ(a->attributes.size(), 0u);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 14u}}));
-
- auto* size = a->count->As<ast::SintLiteralExpression>();
- ASSERT_NE(size, nullptr);
- EXPECT_EQ(size->ValueAsI32(), 5);
+ auto p = parser("array<f32, 5i>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
+
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_FALSE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::F32>());
+ EXPECT_EQ(a->attributes.size(), 0u);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 15u}}));
+
+ auto* size = a->count->As<ast::IntLiteralExpression>();
+ ASSERT_NE(size, nullptr);
+ EXPECT_EQ(size->value, 5);
+ EXPECT_EQ(size->suffix, ast::IntLiteralExpression::Suffix::kI);
}
TEST_F(ParserImplTest, TypeDecl_Array_UintLiteralSize) {
- auto p = parser("array<f32, 5u>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Array>());
+ auto p = parser("array<f32, 5u>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
- auto* a = t.value->As<ast::Array>();
- ASSERT_FALSE(a->IsRuntimeArray());
- ASSERT_TRUE(a->type->Is<ast::F32>());
- EXPECT_EQ(a->attributes.size(), 0u);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 15u}}));
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_FALSE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::F32>());
+ EXPECT_EQ(a->attributes.size(), 0u);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 15u}}));
- auto* size = a->count->As<ast::UintLiteralExpression>();
- ASSERT_NE(size, nullptr);
- EXPECT_EQ(size->ValueAsU32(), 5u);
+ auto* size = a->count->As<ast::IntLiteralExpression>();
+ ASSERT_NE(size, nullptr);
+ EXPECT_EQ(size->suffix, ast::IntLiteralExpression::Suffix::kU);
}
TEST_F(ParserImplTest, TypeDecl_Array_ConstantSize) {
- auto p = parser("array<f32, size>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Array>());
+ auto p = parser("array<f32, size>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
- auto* a = t.value->As<ast::Array>();
- ASSERT_FALSE(a->IsRuntimeArray());
- ASSERT_TRUE(a->type->Is<ast::F32>());
- EXPECT_EQ(a->attributes.size(), 0u);
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_FALSE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::F32>());
+ EXPECT_EQ(a->attributes.size(), 0u);
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
- auto* count_expr = a->count->As<ast::IdentifierExpression>();
- ASSERT_NE(count_expr, nullptr);
- EXPECT_EQ(p->builder().Symbols().NameFor(count_expr->symbol), "size");
+ auto* count_expr = a->count->As<ast::IdentifierExpression>();
+ ASSERT_NE(count_expr, nullptr);
+ EXPECT_EQ(p->builder().Symbols().NameFor(count_expr->symbol), "size");
}
TEST_F(ParserImplTest, TypeDecl_Array_Runtime) {
- auto p = parser("array<u32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Array>());
+ auto p = parser("array<u32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
- auto* a = t.value->As<ast::Array>();
- ASSERT_TRUE(a->IsRuntimeArray());
- ASSERT_TRUE(a->type->Is<ast::U32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 11u}}));
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_TRUE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::U32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 11u}}));
}
TEST_F(ParserImplTest, TypeDecl_Array_Runtime_Vec) {
- auto p = parser("array<vec4<u32>>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(t.value->Is<ast::Array>());
-
- auto* a = t.value->As<ast::Array>();
- ASSERT_TRUE(a->IsRuntimeArray());
- ASSERT_TRUE(a->type->Is<ast::Vector>());
- EXPECT_EQ(a->type->As<ast::Vector>()->width, 4u);
- EXPECT_TRUE(a->type->As<ast::Vector>()->type->Is<ast::U32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
+ auto p = parser("array<vec4<u32>>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(t.value->Is<ast::Array>());
+
+ auto* a = t.value->As<ast::Array>();
+ ASSERT_TRUE(a->IsRuntimeArray());
+ ASSERT_TRUE(a->type->Is<ast::Vector>());
+ EXPECT_EQ(a->type->As<ast::Vector>()->width, 4u);
+ EXPECT_TRUE(a->type->As<ast::Vector>()->type->Is<ast::U32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 17u}}));
}
TEST_F(ParserImplTest, TypeDecl_Array_BadSize) {
- auto p = parser("array<f32, !>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:12: expected array size expression");
+ auto p = parser("array<f32, !>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:12: expected array size expression");
}
TEST_F(ParserImplTest, TypeDecl_Array_MissingSize) {
- auto p = parser("array<f32,>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:11: expected array size expression");
+ auto p = parser("array<f32,>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:11: expected array size expression");
}
TEST_F(ParserImplTest, TypeDecl_Array_MissingLessThan) {
- auto p = parser("array f32>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:7: expected '<' for array declaration");
+ auto p = parser("array f32>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:7: expected '<' for array declaration");
}
TEST_F(ParserImplTest, TypeDecl_Array_MissingGreaterThan) {
- auto p = parser("array<f32");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:10: expected '>' for array declaration");
+ auto p = parser("array<f32");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:10: expected '>' for array declaration");
}
TEST_F(ParserImplTest, TypeDecl_Array_MissingComma) {
- auto p = parser("array<f32 3>");
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:11: expected '>' for array declaration");
+ auto p = parser("array<f32 3>");
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:11: expected '>' for array declaration");
}
struct MatrixData {
- const char* input;
- size_t columns;
- size_t rows;
- Source::Range range;
+ const char* input;
+ size_t columns;
+ size_t rows;
+ Source::Range range;
};
inline std::ostream& operator<<(std::ostream& out, MatrixData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
class MatrixTest : public ParserImplTestWithParam<MatrixData> {};
TEST_P(MatrixTest, Parse) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_FALSE(p->has_error());
- EXPECT_TRUE(t.value->Is<ast::Matrix>());
- auto* mat = t.value->As<ast::Matrix>();
- EXPECT_EQ(mat->rows, params.rows);
- EXPECT_EQ(mat->columns, params.columns);
- EXPECT_EQ(t.value->source.range, params.range);
-}
-INSTANTIATE_TEST_SUITE_P(
- ParserImplTest,
- MatrixTest,
- testing::Values(MatrixData{"mat2x2<f32>", 2, 2, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat2x3<f32>", 2, 3, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat2x4<f32>", 2, 4, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat3x2<f32>", 3, 2, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat3x3<f32>", 3, 3, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat3x4<f32>", 3, 4, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat4x2<f32>", 4, 2, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat4x3<f32>", 4, 3, {{1u, 1u}, {1u, 12u}}},
- MatrixData{"mat4x4<f32>", 4, 4, {{1u, 1u}, {1u, 12u}}}));
-
-class MatrixMissingGreaterThanTest
- : public ParserImplTestWithParam<MatrixData> {};
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_FALSE(p->has_error());
+ EXPECT_TRUE(t.value->Is<ast::Matrix>());
+ auto* mat = t.value->As<ast::Matrix>();
+ EXPECT_EQ(mat->rows, params.rows);
+ EXPECT_EQ(mat->columns, params.columns);
+ EXPECT_EQ(t.value->source.range, params.range);
+}
+INSTANTIATE_TEST_SUITE_P(ParserImplTest,
+ MatrixTest,
+ testing::Values(MatrixData{"mat2x2<f32>", 2, 2, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat2x3<f32>", 2, 3, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat2x4<f32>", 2, 4, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat3x2<f32>", 3, 2, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat3x3<f32>", 3, 3, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat3x4<f32>", 3, 4, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat4x2<f32>", 4, 2, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat4x3<f32>", 4, 3, {{1u, 1u}, {1u, 12u}}},
+ MatrixData{"mat4x4<f32>", 4, 4, {{1u, 1u}, {1u, 12u}}}));
+
+class MatrixMissingGreaterThanTest : public ParserImplTestWithParam<MatrixData> {};
TEST_P(MatrixMissingGreaterThanTest, Handles_Missing_GreaterThan) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:11: expected '>' for matrix");
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:11: expected '>' for matrix");
}
INSTANTIATE_TEST_SUITE_P(ParserImplTest,
MatrixMissingGreaterThanTest,
@@ -587,14 +617,14 @@ INSTANTIATE_TEST_SUITE_P(ParserImplTest,
class MatrixMissingType : public ParserImplTestWithParam<MatrixData> {};
TEST_P(MatrixMissingType, Handles_Missing_Type) {
- auto params = GetParam();
- auto p = parser(params.input);
- auto t = p->type_decl();
- EXPECT_TRUE(t.errored);
- EXPECT_FALSE(t.matched);
- ASSERT_EQ(t.value, nullptr);
- ASSERT_TRUE(p->has_error());
- ASSERT_EQ(p->error(), "1:8: invalid type for matrix");
+ auto params = GetParam();
+ auto p = parser(params.input);
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.errored);
+ EXPECT_FALSE(t.matched);
+ ASSERT_EQ(t.value, nullptr);
+ ASSERT_TRUE(p->has_error());
+ ASSERT_EQ(p->error(), "1:8: invalid type for matrix");
}
INSTANTIATE_TEST_SUITE_P(ParserImplTest,
MatrixMissingType,
@@ -609,28 +639,28 @@ INSTANTIATE_TEST_SUITE_P(ParserImplTest,
MatrixData{"mat4x4<>", 4, 4, {}}));
TEST_F(ParserImplTest, TypeDecl_Sampler) {
- auto p = parser("sampler");
+ auto p = parser("sampler");
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr) << p->error();
- ASSERT_TRUE(t.value->Is<ast::Sampler>());
- ASSERT_FALSE(t.value->As<ast::Sampler>()->IsComparison());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr) << p->error();
+ ASSERT_TRUE(t.value->Is<ast::Sampler>());
+ ASSERT_FALSE(t.value->As<ast::Sampler>()->IsComparison());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 8u}}));
}
TEST_F(ParserImplTest, TypeDecl_Texture) {
- auto p = parser("texture_cube<f32>");
-
- auto t = p->type_decl();
- EXPECT_TRUE(t.matched);
- EXPECT_FALSE(t.errored);
- ASSERT_NE(t.value, nullptr);
- ASSERT_TRUE(t.value->Is<ast::Texture>());
- ASSERT_TRUE(t.value->Is<ast::SampledTexture>());
- ASSERT_TRUE(t.value->As<ast::SampledTexture>()->type->Is<ast::F32>());
- EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 18u}}));
+ auto p = parser("texture_cube<f32>");
+
+ auto t = p->type_decl();
+ EXPECT_TRUE(t.matched);
+ EXPECT_FALSE(t.errored);
+ ASSERT_NE(t.value, nullptr);
+ ASSERT_TRUE(t.value->Is<ast::Texture>());
+ ASSERT_TRUE(t.value->Is<ast::SampledTexture>());
+ ASSERT_TRUE(t.value->As<ast::SampledTexture>()->type->Is<ast::F32>());
+ EXPECT_EQ(t.value->source.range, (Source::Range{{1u, 1u}, {1u, 18u}}));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_unary_expression_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_unary_expression_test.cc
index 0ebefa41b97..184de66de5d 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_unary_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_unary_expression_test.cc
@@ -19,169 +19,177 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, UnaryExpression_Postix) {
- auto p = parser("a[2]");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
-
- ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
- auto* idx = e->As<ast::IndexAccessorExpression>();
- ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
- auto* ident = idx->object->As<ast::IdentifierExpression>();
- EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_TRUE(idx->index->Is<ast::SintLiteralExpression>());
- ASSERT_EQ(idx->index->As<ast::SintLiteralExpression>()->value, 2);
+ auto p = parser("a[2]");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+
+ ASSERT_TRUE(e->Is<ast::IndexAccessorExpression>());
+ auto* idx = e->As<ast::IndexAccessorExpression>();
+ ASSERT_TRUE(idx->object->Is<ast::IdentifierExpression>());
+ auto* ident = idx->object->As<ast::IdentifierExpression>();
+ EXPECT_EQ(ident->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_TRUE(idx->index->Is<ast::IntLiteralExpression>());
+ ASSERT_EQ(idx->index->As<ast::IntLiteralExpression>()->value, 2);
+ ASSERT_EQ(idx->index->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, UnaryExpression_Minus) {
- auto p = parser("- 1");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- ASSERT_EQ(u->op, ast::UnaryOp::kNegation);
-
- ASSERT_TRUE(u->expr->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(u->expr->As<ast::SintLiteralExpression>()->value, 1);
+ auto p = parser("- 1");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ ASSERT_EQ(u->op, ast::UnaryOp::kNegation);
+
+ ASSERT_TRUE(u->expr->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(u->expr->As<ast::IntLiteralExpression>()->value, 1);
+ ASSERT_EQ(u->expr->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, UnaryExpression_AddressOf) {
- auto p = parser("&x");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- EXPECT_EQ(u->op, ast::UnaryOp::kAddressOf);
- EXPECT_TRUE(u->expr->Is<ast::IdentifierExpression>());
+ auto p = parser("&x");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ EXPECT_EQ(u->op, ast::UnaryOp::kAddressOf);
+ EXPECT_TRUE(u->expr->Is<ast::IdentifierExpression>());
}
TEST_F(ParserImplTest, UnaryExpression_Dereference) {
- auto p = parser("*x");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- EXPECT_EQ(u->op, ast::UnaryOp::kIndirection);
- EXPECT_TRUE(u->expr->Is<ast::IdentifierExpression>());
+ auto p = parser("*x");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ EXPECT_EQ(u->op, ast::UnaryOp::kIndirection);
+ EXPECT_TRUE(u->expr->Is<ast::IdentifierExpression>());
}
TEST_F(ParserImplTest, UnaryExpression_AddressOf_Precedence) {
- auto p = parser("&x.y");
- auto e = p->logical_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- EXPECT_EQ(u->op, ast::UnaryOp::kAddressOf);
- EXPECT_TRUE(u->expr->Is<ast::MemberAccessorExpression>());
+ auto p = parser("&x.y");
+ auto e = p->logical_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ EXPECT_EQ(u->op, ast::UnaryOp::kAddressOf);
+ EXPECT_TRUE(u->expr->Is<ast::MemberAccessorExpression>());
}
TEST_F(ParserImplTest, UnaryExpression_Dereference_Precedence) {
- auto p = parser("*x.y");
- auto e = p->logical_or_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- EXPECT_EQ(u->op, ast::UnaryOp::kIndirection);
- EXPECT_TRUE(u->expr->Is<ast::MemberAccessorExpression>());
+ auto p = parser("*x.y");
+ auto e = p->logical_or_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ EXPECT_EQ(u->op, ast::UnaryOp::kIndirection);
+ EXPECT_TRUE(u->expr->Is<ast::MemberAccessorExpression>());
}
TEST_F(ParserImplTest, UnaryExpression_Minus_InvalidRHS) {
- auto p = parser("-if(a) {}");
- auto e = p->unary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:2: unable to parse right side of - expression");
+ auto p = parser("-if(a) {}");
+ auto e = p->unary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:2: unable to parse right side of - expression");
}
TEST_F(ParserImplTest, UnaryExpression_Bang) {
- auto p = parser("!1");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- ASSERT_EQ(u->op, ast::UnaryOp::kNot);
-
- ASSERT_TRUE(u->expr->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(u->expr->As<ast::SintLiteralExpression>()->value, 1);
+ auto p = parser("!1");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ ASSERT_EQ(u->op, ast::UnaryOp::kNot);
+
+ ASSERT_TRUE(u->expr->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(u->expr->As<ast::IntLiteralExpression>()->value, 1);
+ ASSERT_EQ(u->expr->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, UnaryExpression_Bang_InvalidRHS) {
- auto p = parser("!if (a) {}");
- auto e = p->unary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:2: unable to parse right side of ! expression");
+ auto p = parser("!if (a) {}");
+ auto e = p->unary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:2: unable to parse right side of ! expression");
}
TEST_F(ParserImplTest, UnaryExpression_Tilde) {
- auto p = parser("~1");
- auto e = p->unary_expression();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
-
- auto* u = e->As<ast::UnaryOpExpression>();
- ASSERT_EQ(u->op, ast::UnaryOp::kComplement);
-
- ASSERT_TRUE(u->expr->Is<ast::SintLiteralExpression>());
- EXPECT_EQ(u->expr->As<ast::SintLiteralExpression>()->value, 1);
+ auto p = parser("~1");
+ auto e = p->unary_expression();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::UnaryOpExpression>());
+
+ auto* u = e->As<ast::UnaryOpExpression>();
+ ASSERT_EQ(u->op, ast::UnaryOp::kComplement);
+
+ ASSERT_TRUE(u->expr->Is<ast::IntLiteralExpression>());
+ EXPECT_EQ(u->expr->As<ast::IntLiteralExpression>()->value, 1);
+ ASSERT_EQ(u->expr->As<ast::IntLiteralExpression>()->suffix,
+ ast::IntLiteralExpression::Suffix::kNone);
}
TEST_F(ParserImplTest, UnaryExpression_PrefixPlusPlus) {
- auto p = parser("++a");
- auto e = p->unary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:1: prefix increment and decrement operators are reserved for a "
- "future WGSL version");
+ auto p = parser("++a");
+ auto e = p->unary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(),
+ "1:1: prefix increment and decrement operators are reserved for a "
+ "future WGSL version");
}
TEST_F(ParserImplTest, UnaryExpression_PrefixMinusMinus) {
- auto p = parser("--a");
- auto e = p->unary_expression();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:1: prefix increment and decrement operators are reserved for a "
- "future WGSL version");
+ auto p = parser("--a");
+ auto e = p->unary_expression();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(),
+ "1:1: prefix increment and decrement operators are reserved for a "
+ "future WGSL version");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_list_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_list_test.cc
index 9a0284405fa..133dd369184 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_list_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_list_test.cc
@@ -18,43 +18,42 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, AttributeList_Parses) {
- auto p = parser(R"(@location(4) @builtin(position))");
- auto attrs = p->attribute_list();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(attrs.errored);
- ASSERT_TRUE(attrs.matched);
- ASSERT_EQ(attrs.value.size(), 2u);
-
- auto* attr_0 = attrs.value[0]->As<ast::Attribute>();
- auto* attr_1 = attrs.value[1]->As<ast::Attribute>();
- ASSERT_NE(attr_0, nullptr);
- ASSERT_NE(attr_1, nullptr);
-
- ASSERT_TRUE(attr_0->Is<ast::LocationAttribute>());
- EXPECT_EQ(attr_0->As<ast::LocationAttribute>()->value, 4u);
- ASSERT_TRUE(attr_1->Is<ast::BuiltinAttribute>());
- EXPECT_EQ(attr_1->As<ast::BuiltinAttribute>()->builtin,
- ast::Builtin::kPosition);
+ auto p = parser(R"(@location(4) @builtin(position))");
+ auto attrs = p->attribute_list();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(attrs.errored);
+ ASSERT_TRUE(attrs.matched);
+ ASSERT_EQ(attrs.value.size(), 2u);
+
+ auto* attr_0 = attrs.value[0]->As<ast::Attribute>();
+ auto* attr_1 = attrs.value[1]->As<ast::Attribute>();
+ ASSERT_NE(attr_0, nullptr);
+ ASSERT_NE(attr_1, nullptr);
+
+ ASSERT_TRUE(attr_0->Is<ast::LocationAttribute>());
+ EXPECT_EQ(attr_0->As<ast::LocationAttribute>()->value, 4u);
+ ASSERT_TRUE(attr_1->Is<ast::BuiltinAttribute>());
+ EXPECT_EQ(attr_1->As<ast::BuiltinAttribute>()->builtin, ast::Builtin::kPosition);
}
TEST_F(ParserImplTest, AttributeList_Invalid) {
- auto p = parser(R"(@invalid)");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(), R"(1:2: expected attribute)");
+ auto p = parser(R"(@invalid)");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), R"(1:2: expected attribute)");
}
TEST_F(ParserImplTest, AttributeList_InvalidValue) {
- auto p = parser("@builtin(invalid)");
- auto attrs = p->attribute_list();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(attrs.errored);
- EXPECT_FALSE(attrs.matched);
- EXPECT_TRUE(attrs.value.empty());
- EXPECT_EQ(p->error(), "1:10: invalid value for builtin attribute");
+ auto p = parser("@builtin(invalid)");
+ auto attrs = p->attribute_list();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(attrs.errored);
+ EXPECT_FALSE(attrs.matched);
+ EXPECT_TRUE(attrs.value.empty());
+ EXPECT_EQ(p->error(), "1:10: invalid value for builtin attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_test.cc
index 435378f3ff8..8833273f273 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_attribute_test.cc
@@ -18,395 +18,386 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, Attribute_Location) {
- auto p = parser("location(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::LocationAttribute>());
+ auto p = parser("location(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::LocationAttribute>());
- auto* loc = var_attr->As<ast::LocationAttribute>();
- EXPECT_EQ(loc->value, 4u);
+ auto* loc = var_attr->As<ast::LocationAttribute>();
+ EXPECT_EQ(loc->value, 4u);
}
TEST_F(ParserImplTest, Attribute_Location_MissingLeftParen) {
- auto p = parser("location 4)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:10: expected '(' for location attribute");
+ auto p = parser("location 4)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected '(' for location attribute");
}
TEST_F(ParserImplTest, Attribute_Location_MissingRightParen) {
- auto p = parser("location(4");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:11: expected ')' for location attribute");
+ auto p = parser("location(4");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:11: expected ')' for location attribute");
}
TEST_F(ParserImplTest, Attribute_Location_MissingValue) {
- auto p = parser("location()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:10: expected signed integer literal for location attribute");
+ auto p = parser("location()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected signed integer literal for location attribute");
}
TEST_F(ParserImplTest, Attribute_Location_MissingInvalid) {
- auto p = parser("location(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:10: expected signed integer literal for location attribute");
+ auto p = parser("location(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected signed integer literal for location attribute");
}
struct BuiltinData {
- const char* input;
- ast::Builtin result;
+ const char* input;
+ ast::Builtin result;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
class BuiltinTest : public ParserImplTestWithParam<BuiltinData> {};
TEST_P(BuiltinTest, Attribute_Builtin) {
- auto params = GetParam();
- auto p = parser(std::string("builtin(") + params.input + ")");
+ auto params = GetParam();
+ auto p = parser(std::string("builtin(") + params.input + ")");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_TRUE(var_attr->Is<ast::BuiltinAttribute>());
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_TRUE(var_attr->Is<ast::BuiltinAttribute>());
- auto* builtin = var_attr->As<ast::BuiltinAttribute>();
- EXPECT_EQ(builtin->builtin, params.result);
+ auto* builtin = var_attr->As<ast::BuiltinAttribute>();
+ EXPECT_EQ(builtin->builtin, params.result);
}
INSTANTIATE_TEST_SUITE_P(
ParserImplTest,
BuiltinTest,
- testing::Values(
- BuiltinData{"position", ast::Builtin::kPosition},
- BuiltinData{"vertex_index", ast::Builtin::kVertexIndex},
- BuiltinData{"instance_index", ast::Builtin::kInstanceIndex},
- BuiltinData{"front_facing", ast::Builtin::kFrontFacing},
- BuiltinData{"frag_depth", ast::Builtin::kFragDepth},
- BuiltinData{"local_invocation_id", ast::Builtin::kLocalInvocationId},
- BuiltinData{"local_invocation_idx",
- ast::Builtin::kLocalInvocationIndex},
- BuiltinData{"local_invocation_index",
- ast::Builtin::kLocalInvocationIndex},
- BuiltinData{"global_invocation_id", ast::Builtin::kGlobalInvocationId},
- BuiltinData{"workgroup_id", ast::Builtin::kWorkgroupId},
- BuiltinData{"num_workgroups", ast::Builtin::kNumWorkgroups},
- BuiltinData{"sample_index", ast::Builtin::kSampleIndex},
- BuiltinData{"sample_mask", ast::Builtin::kSampleMask}));
+ testing::Values(BuiltinData{"position", ast::Builtin::kPosition},
+ BuiltinData{"vertex_index", ast::Builtin::kVertexIndex},
+ BuiltinData{"instance_index", ast::Builtin::kInstanceIndex},
+ BuiltinData{"front_facing", ast::Builtin::kFrontFacing},
+ BuiltinData{"frag_depth", ast::Builtin::kFragDepth},
+ BuiltinData{"local_invocation_id", ast::Builtin::kLocalInvocationId},
+ BuiltinData{"local_invocation_idx", ast::Builtin::kLocalInvocationIndex},
+ BuiltinData{"local_invocation_index", ast::Builtin::kLocalInvocationIndex},
+ BuiltinData{"global_invocation_id", ast::Builtin::kGlobalInvocationId},
+ BuiltinData{"workgroup_id", ast::Builtin::kWorkgroupId},
+ BuiltinData{"num_workgroups", ast::Builtin::kNumWorkgroups},
+ BuiltinData{"sample_index", ast::Builtin::kSampleIndex},
+ BuiltinData{"sample_mask", ast::Builtin::kSampleMask}));
TEST_F(ParserImplTest, Attribute_Builtin_MissingLeftParen) {
- auto p = parser("builtin position)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: expected '(' for builtin attribute");
+ auto p = parser("builtin position)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected '(' for builtin attribute");
}
TEST_F(ParserImplTest, Attribute_Builtin_MissingRightParen) {
- auto p = parser("builtin(position");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:17: expected ')' for builtin attribute");
+ auto p = parser("builtin(position");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:17: expected ')' for builtin attribute");
}
TEST_F(ParserImplTest, Attribute_Builtin_MissingValue) {
- auto p = parser("builtin()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: expected identifier for builtin");
+ auto p = parser("builtin()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected identifier for builtin");
}
TEST_F(ParserImplTest, Attribute_Builtin_InvalidValue) {
- auto p = parser("builtin(other_thingy)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: invalid value for builtin attribute");
+ auto p = parser("builtin(other_thingy)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: invalid value for builtin attribute");
}
TEST_F(ParserImplTest, Attribute_Builtin_MissingInvalid) {
- auto p = parser("builtin(3)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: expected identifier for builtin");
+ auto p = parser("builtin(3)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected identifier for builtin");
}
TEST_F(ParserImplTest, Attribute_Interpolate_Flat) {
- auto p = parser("interpolate(flat)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
+ auto p = parser("interpolate(flat)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
- auto* interp = var_attr->As<ast::InterpolateAttribute>();
- EXPECT_EQ(interp->type, ast::InterpolationType::kFlat);
- EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kNone);
+ auto* interp = var_attr->As<ast::InterpolateAttribute>();
+ EXPECT_EQ(interp->type, ast::InterpolationType::kFlat);
+ EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kNone);
}
TEST_F(ParserImplTest, Attribute_Interpolate_Perspective_Center) {
- auto p = parser("interpolate(perspective, center)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
+ auto p = parser("interpolate(perspective, center)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
- auto* interp = var_attr->As<ast::InterpolateAttribute>();
- EXPECT_EQ(interp->type, ast::InterpolationType::kPerspective);
- EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kCenter);
+ auto* interp = var_attr->As<ast::InterpolateAttribute>();
+ EXPECT_EQ(interp->type, ast::InterpolationType::kPerspective);
+ EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kCenter);
}
TEST_F(ParserImplTest, Attribute_Interpolate_Perspective_Centroid) {
- auto p = parser("interpolate(perspective, centroid)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
+ auto p = parser("interpolate(perspective, centroid)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
- auto* interp = var_attr->As<ast::InterpolateAttribute>();
- EXPECT_EQ(interp->type, ast::InterpolationType::kPerspective);
- EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kCentroid);
+ auto* interp = var_attr->As<ast::InterpolateAttribute>();
+ EXPECT_EQ(interp->type, ast::InterpolationType::kPerspective);
+ EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kCentroid);
}
TEST_F(ParserImplTest, Attribute_Interpolate_Linear_Sample) {
- auto p = parser("interpolate(linear, sample)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
+ auto p = parser("interpolate(linear, sample)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::InterpolateAttribute>());
- auto* interp = var_attr->As<ast::InterpolateAttribute>();
- EXPECT_EQ(interp->type, ast::InterpolationType::kLinear);
- EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kSample);
+ auto* interp = var_attr->As<ast::InterpolateAttribute>();
+ EXPECT_EQ(interp->type, ast::InterpolationType::kLinear);
+ EXPECT_EQ(interp->sampling, ast::InterpolationSampling::kSample);
}
TEST_F(ParserImplTest, Attribute_Interpolate_MissingLeftParen) {
- auto p = parser("interpolate flat)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: expected '(' for interpolate attribute");
+ auto p = parser("interpolate flat)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: expected '(' for interpolate attribute");
}
TEST_F(ParserImplTest, Attribute_Interpolate_MissingRightParen) {
- auto p = parser("interpolate(flat");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:17: expected ')' for interpolate attribute");
+ auto p = parser("interpolate(flat");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:17: expected ')' for interpolate attribute");
}
TEST_F(ParserImplTest, Attribute_Interpolate_MissingFirstValue) {
- auto p = parser("interpolate()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: invalid interpolation type");
+ auto p = parser("interpolate()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: invalid interpolation type");
}
TEST_F(ParserImplTest, Attribute_Interpolate_InvalidFirstValue) {
- auto p = parser("interpolate(other_thingy)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: invalid interpolation type");
+ auto p = parser("interpolate(other_thingy)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: invalid interpolation type");
}
TEST_F(ParserImplTest, Attribute_Interpolate_MissingSecondValue) {
- auto p = parser("interpolate(perspective,)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:25: invalid interpolation sampling");
+ auto p = parser("interpolate(perspective,)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:25: invalid interpolation sampling");
}
TEST_F(ParserImplTest, Attribute_Interpolate_InvalidSecondValue) {
- auto p = parser("interpolate(perspective, nope)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:26: invalid interpolation sampling");
+ auto p = parser("interpolate(perspective, nope)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:26: invalid interpolation sampling");
}
TEST_F(ParserImplTest, Attribute_Binding) {
- auto p = parser("binding(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_NE(var_attr, nullptr);
- ASSERT_FALSE(p->has_error());
- ASSERT_TRUE(var_attr->Is<ast::BindingAttribute>());
+ auto p = parser("binding(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_FALSE(p->has_error());
+ ASSERT_TRUE(var_attr->Is<ast::BindingAttribute>());
- auto* binding = var_attr->As<ast::BindingAttribute>();
- EXPECT_EQ(binding->value, 4u);
+ auto* binding = var_attr->As<ast::BindingAttribute>();
+ EXPECT_EQ(binding->value, 4u);
}
TEST_F(ParserImplTest, Attribute_Binding_MissingLeftParen) {
- auto p = parser("binding 4)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:9: expected '(' for binding attribute");
+ auto p = parser("binding 4)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected '(' for binding attribute");
}
TEST_F(ParserImplTest, Attribute_Binding_MissingRightParen) {
- auto p = parser("binding(4");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:10: expected ')' for binding attribute");
+ auto p = parser("binding(4");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:10: expected ')' for binding attribute");
}
TEST_F(ParserImplTest, Attribute_Binding_MissingValue) {
- auto p = parser("binding()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:9: expected signed integer literal for binding attribute");
+ auto p = parser("binding()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected signed integer literal for binding attribute");
}
TEST_F(ParserImplTest, Attribute_Binding_MissingInvalid) {
- auto p = parser("binding(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:9: expected signed integer literal for binding attribute");
+ auto p = parser("binding(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:9: expected signed integer literal for binding attribute");
}
TEST_F(ParserImplTest, Attribute_group) {
- auto p = parser("group(4)");
- auto attr = p->attribute();
- EXPECT_TRUE(attr.matched);
- EXPECT_FALSE(attr.errored);
- ASSERT_NE(attr.value, nullptr);
- auto* var_attr = attr.value->As<ast::Attribute>();
- ASSERT_FALSE(p->has_error());
- ASSERT_NE(var_attr, nullptr);
- ASSERT_TRUE(var_attr->Is<ast::GroupAttribute>());
+ auto p = parser("group(4)");
+ auto attr = p->attribute();
+ EXPECT_TRUE(attr.matched);
+ EXPECT_FALSE(attr.errored);
+ ASSERT_NE(attr.value, nullptr);
+ auto* var_attr = attr.value->As<ast::Attribute>();
+ ASSERT_FALSE(p->has_error());
+ ASSERT_NE(var_attr, nullptr);
+ ASSERT_TRUE(var_attr->Is<ast::GroupAttribute>());
- auto* group = var_attr->As<ast::GroupAttribute>();
- EXPECT_EQ(group->value, 4u);
+ auto* group = var_attr->As<ast::GroupAttribute>();
+ EXPECT_EQ(group->value, 4u);
}
TEST_F(ParserImplTest, Attribute_Group_MissingLeftParen) {
- auto p = parser("group 2)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:7: expected '(' for group attribute");
+ auto p = parser("group 2)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected '(' for group attribute");
}
TEST_F(ParserImplTest, Attribute_Group_MissingRightParen) {
- auto p = parser("group(2");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:8: expected ')' for group attribute");
+ auto p = parser("group(2");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:8: expected ')' for group attribute");
}
TEST_F(ParserImplTest, Attribute_Group_MissingValue) {
- auto p = parser("group()");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for group attribute");
+ auto p = parser("group()");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for group attribute");
}
TEST_F(ParserImplTest, Attribute_Group_MissingInvalid) {
- auto p = parser("group(nan)");
- auto attr = p->attribute();
- EXPECT_FALSE(attr.matched);
- EXPECT_TRUE(attr.errored);
- EXPECT_EQ(attr.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(),
- "1:7: expected signed integer literal for group attribute");
+ auto p = parser("group(nan)");
+ auto attr = p->attribute();
+ EXPECT_FALSE(attr.matched);
+ EXPECT_TRUE(attr.errored);
+ EXPECT_EQ(attr.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:7: expected signed integer literal for group attribute");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_decl_test.cc
index 1b1e8109b07..70ec394afd7 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_decl_test.cc
@@ -17,93 +17,93 @@
namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, VariableDecl_Parses) {
- auto p = parser("var my_var : f32");
- auto v = p->variable_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_TRUE(v.matched);
- EXPECT_FALSE(v.errored);
- EXPECT_EQ(v->name, "my_var");
- EXPECT_NE(v->type, nullptr);
- EXPECT_TRUE(v->type->Is<ast::F32>());
+ auto p = parser("var my_var : f32");
+ auto v = p->variable_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_TRUE(v.matched);
+ EXPECT_FALSE(v.errored);
+ EXPECT_EQ(v->name, "my_var");
+ EXPECT_NE(v->type, nullptr);
+ EXPECT_TRUE(v->type->Is<ast::F32>());
- EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 11u}}));
- EXPECT_EQ(v->type->source.range, (Source::Range{{1u, 14u}, {1u, 17u}}));
+ EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 11u}}));
+ EXPECT_EQ(v->type->source.range, (Source::Range{{1u, 14u}, {1u, 17u}}));
}
TEST_F(ParserImplTest, VariableDecl_Unicode_Parses) {
- const std::string ident = // "𝖎𝖉𝖊𝖓𝖙𝖎𝖋𝖎𝖊𝖗123"
- "\xf0\x9d\x96\x8e\xf0\x9d\x96\x89\xf0\x9d\x96\x8a\xf0\x9d\x96\x93"
- "\xf0\x9d\x96\x99\xf0\x9d\x96\x8e\xf0\x9d\x96\x8b\xf0\x9d\x96\x8e"
- "\xf0\x9d\x96\x8a\xf0\x9d\x96\x97\x31\x32\x33";
+ const std::string ident = // "𝖎𝖉𝖊𝖓𝖙𝖎𝖋𝖎𝖊𝖗123"
+ "\xf0\x9d\x96\x8e\xf0\x9d\x96\x89\xf0\x9d\x96\x8a\xf0\x9d\x96\x93"
+ "\xf0\x9d\x96\x99\xf0\x9d\x96\x8e\xf0\x9d\x96\x8b\xf0\x9d\x96\x8e"
+ "\xf0\x9d\x96\x8a\xf0\x9d\x96\x97\x31\x32\x33";
- auto p = parser("var " + ident + " : f32");
- auto v = p->variable_decl();
- EXPECT_FALSE(p->has_error());
- EXPECT_TRUE(v.matched);
- EXPECT_FALSE(v.errored);
- EXPECT_EQ(v->name, ident);
- EXPECT_NE(v->type, nullptr);
- EXPECT_TRUE(v->type->Is<ast::F32>());
+ auto p = parser("var " + ident + " : f32");
+ auto v = p->variable_decl();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_TRUE(v.matched);
+ EXPECT_FALSE(v.errored);
+ EXPECT_EQ(v->name, ident);
+ EXPECT_NE(v->type, nullptr);
+ EXPECT_TRUE(v->type->Is<ast::F32>());
- EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 48u}}));
- EXPECT_EQ(v->type->source.range, (Source::Range{{1u, 51u}, {1u, 54u}}));
+ EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 48u}}));
+ EXPECT_EQ(v->type->source.range, (Source::Range{{1u, 51u}, {1u, 54u}}));
}
TEST_F(ParserImplTest, VariableDecl_Inferred_Parses) {
- auto p = parser("var my_var = 1.0");
- auto v = p->variable_decl(/*allow_inferred = */ true);
- EXPECT_FALSE(p->has_error());
- EXPECT_TRUE(v.matched);
- EXPECT_FALSE(v.errored);
- EXPECT_EQ(v->name, "my_var");
- EXPECT_EQ(v->type, nullptr);
+ auto p = parser("var my_var = 1.0");
+ auto v = p->variable_decl(/*allow_inferred = */ true);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_TRUE(v.matched);
+ EXPECT_FALSE(v.errored);
+ EXPECT_EQ(v->name, "my_var");
+ EXPECT_EQ(v->type, nullptr);
- EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 11u}}));
+ EXPECT_EQ(v->source.range, (Source::Range{{1u, 5u}, {1u, 11u}}));
}
TEST_F(ParserImplTest, VariableDecl_MissingVar) {
- auto p = parser("my_var : f32");
- auto v = p->variable_decl();
- EXPECT_FALSE(v.matched);
- EXPECT_FALSE(v.errored);
- EXPECT_FALSE(p->has_error());
+ auto p = parser("my_var : f32");
+ auto v = p->variable_decl();
+ EXPECT_FALSE(v.matched);
+ EXPECT_FALSE(v.errored);
+ EXPECT_FALSE(p->has_error());
- auto t = p->next();
- ASSERT_TRUE(t.IsIdentifier());
+ auto t = p->next();
+ ASSERT_TRUE(t.IsIdentifier());
}
TEST_F(ParserImplTest, VariableDecl_InvalidIdentDecl) {
- auto p = parser("var my_var f32");
- auto v = p->variable_decl();
- EXPECT_FALSE(v.matched);
- EXPECT_TRUE(v.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:12: expected ':' for variable declaration");
+ auto p = parser("var my_var f32");
+ auto v = p->variable_decl();
+ EXPECT_FALSE(v.matched);
+ EXPECT_TRUE(v.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:12: expected ':' for variable declaration");
}
TEST_F(ParserImplTest, VariableDecl_WithStorageClass) {
- auto p = parser("var<private> my_var : f32");
- auto v = p->variable_decl();
- EXPECT_TRUE(v.matched);
- EXPECT_FALSE(v.errored);
- EXPECT_FALSE(p->has_error());
- EXPECT_EQ(v->name, "my_var");
- EXPECT_TRUE(v->type->Is<ast::F32>());
- EXPECT_EQ(v->storage_class, ast::StorageClass::kPrivate);
+ auto p = parser("var<private> my_var : f32");
+ auto v = p->variable_decl();
+ EXPECT_TRUE(v.matched);
+ EXPECT_FALSE(v.errored);
+ EXPECT_FALSE(p->has_error());
+ EXPECT_EQ(v->name, "my_var");
+ EXPECT_TRUE(v->type->Is<ast::F32>());
+ EXPECT_EQ(v->storage_class, ast::StorageClass::kPrivate);
- EXPECT_EQ(v->source.range.begin.line, 1u);
- EXPECT_EQ(v->source.range.begin.column, 14u);
- EXPECT_EQ(v->source.range.end.line, 1u);
- EXPECT_EQ(v->source.range.end.column, 20u);
+ EXPECT_EQ(v->source.range.begin.line, 1u);
+ EXPECT_EQ(v->source.range.begin.column, 14u);
+ EXPECT_EQ(v->source.range.end.line, 1u);
+ EXPECT_EQ(v->source.range.end.column, 20u);
}
TEST_F(ParserImplTest, VariableDecl_InvalidStorageClass) {
- auto p = parser("var<unknown> my_var : f32");
- auto v = p->variable_decl();
- EXPECT_FALSE(v.matched);
- EXPECT_TRUE(v.errored);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:5: invalid storage class for variable declaration");
+ auto p = parser("var<unknown> my_var : f32");
+ auto v = p->variable_decl();
+ EXPECT_FALSE(v.matched);
+ EXPECT_TRUE(v.errored);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:5: invalid storage class for variable declaration");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_ident_decl_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_ident_decl_test.cc
index dfaa785a8e8..3ffb29ec6c7 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_ident_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_ident_decl_test.cc
@@ -18,59 +18,59 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, VariableIdentDecl_Parses) {
- auto p = parser("my_var : f32");
- auto decl = p->expect_variable_ident_decl("test");
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(decl.errored);
- ASSERT_EQ(decl->name, "my_var");
- ASSERT_NE(decl->type, nullptr);
- ASSERT_TRUE(decl->type->Is<ast::F32>());
+ auto p = parser("my_var : f32");
+ auto decl = p->expect_variable_ident_decl("test");
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(decl.errored);
+ ASSERT_EQ(decl->name, "my_var");
+ ASSERT_NE(decl->type, nullptr);
+ ASSERT_TRUE(decl->type->Is<ast::F32>());
- EXPECT_EQ(decl->source.range, (Source::Range{{1u, 1u}, {1u, 7u}}));
- EXPECT_EQ(decl->type->source.range, (Source::Range{{1u, 10u}, {1u, 13u}}));
+ EXPECT_EQ(decl->source.range, (Source::Range{{1u, 1u}, {1u, 7u}}));
+ EXPECT_EQ(decl->type->source.range, (Source::Range{{1u, 10u}, {1u, 13u}}));
}
TEST_F(ParserImplTest, VariableIdentDecl_Inferred_Parses) {
- auto p = parser("my_var = 1.0");
- auto decl = p->expect_variable_ident_decl("test", /*allow_inferred = */ true);
- ASSERT_FALSE(p->has_error()) << p->error();
- ASSERT_FALSE(decl.errored);
- ASSERT_EQ(decl->name, "my_var");
- ASSERT_EQ(decl->type, nullptr);
+ auto p = parser("my_var = 1.0");
+ auto decl = p->expect_variable_ident_decl("test", /*allow_inferred = */ true);
+ ASSERT_FALSE(p->has_error()) << p->error();
+ ASSERT_FALSE(decl.errored);
+ ASSERT_EQ(decl->name, "my_var");
+ ASSERT_EQ(decl->type, nullptr);
- EXPECT_EQ(decl->source.range, (Source::Range{{1u, 1u}, {1u, 7u}}));
+ EXPECT_EQ(decl->source.range, (Source::Range{{1u, 1u}, {1u, 7u}}));
}
TEST_F(ParserImplTest, VariableIdentDecl_MissingIdent) {
- auto p = parser(": f32");
- auto decl = p->expect_variable_ident_decl("test");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(decl.errored);
- ASSERT_EQ(p->error(), "1:1: expected identifier for test");
+ auto p = parser(": f32");
+ auto decl = p->expect_variable_ident_decl("test");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(decl.errored);
+ ASSERT_EQ(p->error(), "1:1: expected identifier for test");
}
TEST_F(ParserImplTest, VariableIdentDecl_MissingColon) {
- auto p = parser("my_var f32");
- auto decl = p->expect_variable_ident_decl("test");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(decl.errored);
- ASSERT_EQ(p->error(), "1:8: expected ':' for test");
+ auto p = parser("my_var f32");
+ auto decl = p->expect_variable_ident_decl("test");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(decl.errored);
+ ASSERT_EQ(p->error(), "1:8: expected ':' for test");
}
TEST_F(ParserImplTest, VariableIdentDecl_MissingType) {
- auto p = parser("my_var :");
- auto decl = p->expect_variable_ident_decl("test");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(decl.errored);
- ASSERT_EQ(p->error(), "1:9: invalid type for test");
+ auto p = parser("my_var :");
+ auto decl = p->expect_variable_ident_decl("test");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(decl.errored);
+ ASSERT_EQ(p->error(), "1:9: invalid type for test");
}
TEST_F(ParserImplTest, VariableIdentDecl_InvalidIdent) {
- auto p = parser("123 : f32");
- auto decl = p->expect_variable_ident_decl("test");
- ASSERT_TRUE(p->has_error());
- ASSERT_TRUE(decl.errored);
- ASSERT_EQ(p->error(), "1:1: expected identifier for test");
+ auto p = parser("123 : f32");
+ auto decl = p->expect_variable_ident_decl("test");
+ ASSERT_TRUE(p->has_error());
+ ASSERT_TRUE(decl.errored);
+ ASSERT_EQ(p->error(), "1:1: expected identifier for test");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_qualifier_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_qualifier_test.cc
index 6fa909f14d8..de63f923b06 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_qualifier_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_qualifier_test.cc
@@ -18,102 +18,93 @@ namespace tint::reader::wgsl {
namespace {
struct VariableStorageData {
- const char* input;
- ast::StorageClass storage_class;
- ast::Access access;
+ const char* input;
+ ast::StorageClass storage_class;
+ ast::Access access;
};
inline std::ostream& operator<<(std::ostream& out, VariableStorageData data) {
- out << std::string(data.input);
- return out;
+ out << std::string(data.input);
+ return out;
}
-class VariableQualifierTest
- : public ParserImplTestWithParam<VariableStorageData> {};
+class VariableQualifierTest : public ParserImplTestWithParam<VariableStorageData> {};
TEST_P(VariableQualifierTest, ParsesStorageClass) {
- auto params = GetParam();
- auto p = parser(std::string("<") + params.input + ">");
+ auto params = GetParam();
+ auto p = parser(std::string("<") + params.input + ">");
- auto sc = p->variable_qualifier();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(sc.errored);
- EXPECT_TRUE(sc.matched);
- EXPECT_EQ(sc->storage_class, params.storage_class);
- EXPECT_EQ(sc->access, params.access);
+ auto sc = p->variable_qualifier();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(sc.errored);
+ EXPECT_TRUE(sc.matched);
+ EXPECT_EQ(sc->storage_class, params.storage_class);
+ EXPECT_EQ(sc->access, params.access);
- auto t = p->next();
- EXPECT_TRUE(t.IsEof());
+ auto t = p->next();
+ EXPECT_TRUE(t.IsEof());
}
INSTANTIATE_TEST_SUITE_P(
ParserImplTest,
VariableQualifierTest,
testing::Values(
- VariableStorageData{"uniform", ast::StorageClass::kUniform,
- ast::Access::kUndefined},
- VariableStorageData{"workgroup", ast::StorageClass::kWorkgroup,
- ast::Access::kUndefined},
- VariableStorageData{"storage", ast::StorageClass::kStorage,
- ast::Access::kUndefined},
- VariableStorageData{"storage_buffer", ast::StorageClass::kStorage,
- ast::Access::kUndefined},
- VariableStorageData{"private", ast::StorageClass::kPrivate,
- ast::Access::kUndefined},
- VariableStorageData{"function", ast::StorageClass::kFunction,
- ast::Access::kUndefined},
- VariableStorageData{"storage, read", ast::StorageClass::kStorage,
- ast::Access::kRead},
- VariableStorageData{"storage, write", ast::StorageClass::kStorage,
- ast::Access::kWrite},
+ VariableStorageData{"uniform", ast::StorageClass::kUniform, ast::Access::kUndefined},
+ VariableStorageData{"workgroup", ast::StorageClass::kWorkgroup, ast::Access::kUndefined},
+ VariableStorageData{"storage", ast::StorageClass::kStorage, ast::Access::kUndefined},
+ VariableStorageData{"storage_buffer", ast::StorageClass::kStorage, ast::Access::kUndefined},
+ VariableStorageData{"private", ast::StorageClass::kPrivate, ast::Access::kUndefined},
+ VariableStorageData{"function", ast::StorageClass::kFunction, ast::Access::kUndefined},
+ VariableStorageData{"storage, read", ast::StorageClass::kStorage, ast::Access::kRead},
+ VariableStorageData{"storage, write", ast::StorageClass::kStorage, ast::Access::kWrite},
VariableStorageData{"storage, read_write", ast::StorageClass::kStorage,
ast::Access::kReadWrite}));
TEST_F(ParserImplTest, VariableQualifier_NoMatch) {
- auto p = parser("<not-a-storage-class>");
- auto sc = p->variable_qualifier();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(sc.errored);
- EXPECT_FALSE(sc.matched);
- EXPECT_EQ(p->error(), "1:2: invalid storage class for variable declaration");
+ auto p = parser("<not-a-storage-class>");
+ auto sc = p->variable_qualifier();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(sc.errored);
+ EXPECT_FALSE(sc.matched);
+ EXPECT_EQ(p->error(), "1:2: invalid storage class for variable declaration");
}
TEST_F(ParserImplTest, VariableQualifier_Empty) {
- auto p = parser("<>");
- auto sc = p->variable_qualifier();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(sc.errored);
- EXPECT_FALSE(sc.matched);
- EXPECT_EQ(p->error(), "1:2: invalid storage class for variable declaration");
+ auto p = parser("<>");
+ auto sc = p->variable_qualifier();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(sc.errored);
+ EXPECT_FALSE(sc.matched);
+ EXPECT_EQ(p->error(), "1:2: invalid storage class for variable declaration");
}
TEST_F(ParserImplTest, VariableQualifier_MissingLessThan) {
- auto p = parser("private>");
- auto sc = p->variable_qualifier();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(sc.errored);
- EXPECT_FALSE(sc.matched);
+ auto p = parser("private>");
+ auto sc = p->variable_qualifier();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(sc.errored);
+ EXPECT_FALSE(sc.matched);
- auto t = p->next();
- ASSERT_TRUE(t.Is(Token::Type::kPrivate));
+ auto t = p->next();
+ ASSERT_TRUE(t.Is(Token::Type::kPrivate));
}
TEST_F(ParserImplTest, VariableQualifier_MissingLessThan_AfterSC) {
- auto p = parser("private, >");
- auto sc = p->variable_qualifier();
- EXPECT_FALSE(p->has_error());
- EXPECT_FALSE(sc.errored);
- EXPECT_FALSE(sc.matched);
+ auto p = parser("private, >");
+ auto sc = p->variable_qualifier();
+ EXPECT_FALSE(p->has_error());
+ EXPECT_FALSE(sc.errored);
+ EXPECT_FALSE(sc.matched);
- auto t = p->next();
- ASSERT_TRUE(t.Is(Token::Type::kPrivate));
+ auto t = p->next();
+ ASSERT_TRUE(t.Is(Token::Type::kPrivate));
}
TEST_F(ParserImplTest, VariableQualifier_MissingGreaterThan) {
- auto p = parser("<private");
- auto sc = p->variable_qualifier();
- EXPECT_TRUE(p->has_error());
- EXPECT_TRUE(sc.errored);
- EXPECT_FALSE(sc.matched);
- EXPECT_EQ(p->error(), "1:9: expected '>' for variable declaration");
+ auto p = parser("<private");
+ auto sc = p->variable_qualifier();
+ EXPECT_TRUE(p->has_error());
+ EXPECT_TRUE(sc.errored);
+ EXPECT_FALSE(sc.matched);
+ EXPECT_EQ(p->error(), "1:9: expected '>' for variable declaration");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_stmt_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_stmt_test.cc
index 8a8d7dfc1f8..ee899478a89 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_stmt_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_impl_variable_stmt_test.cc
@@ -18,169 +18,169 @@ namespace tint::reader::wgsl {
namespace {
TEST_F(ParserImplTest, VariableStmt_VariableDecl) {
- auto p = parser("var a : i32;");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_EQ(e->source.range.begin.line, 1u);
- ASSERT_EQ(e->source.range.begin.column, 5u);
- ASSERT_EQ(e->source.range.end.line, 1u);
- ASSERT_EQ(e->source.range.end.column, 6u);
-
- EXPECT_EQ(e->variable->constructor, nullptr);
+ auto p = parser("var a : i32;");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_EQ(e->source.range.begin.line, 1u);
+ ASSERT_EQ(e->source.range.begin.column, 5u);
+ ASSERT_EQ(e->source.range.end.line, 1u);
+ ASSERT_EQ(e->source.range.end.column, 6u);
+
+ EXPECT_EQ(e->variable->constructor, nullptr);
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_WithInit) {
- auto p = parser("var a : i32 = 1;");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_EQ(e->source.range.begin.line, 1u);
- ASSERT_EQ(e->source.range.begin.column, 5u);
- ASSERT_EQ(e->source.range.end.line, 1u);
- ASSERT_EQ(e->source.range.end.column, 6u);
-
- ASSERT_NE(e->variable->constructor, nullptr);
- EXPECT_TRUE(e->variable->constructor->Is<ast::LiteralExpression>());
+ auto p = parser("var a : i32 = 1;");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_EQ(e->source.range.begin.line, 1u);
+ ASSERT_EQ(e->source.range.begin.column, 5u);
+ ASSERT_EQ(e->source.range.end.line, 1u);
+ ASSERT_EQ(e->source.range.end.column, 6u);
+
+ ASSERT_NE(e->variable->constructor, nullptr);
+ EXPECT_TRUE(e->variable->constructor->Is<ast::LiteralExpression>());
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_ConstructorInvalid) {
- auto p = parser("var a : i32 = if(a) {}");
- auto e = p->variable_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:15: missing constructor for variable declaration");
+ auto p = parser("var a : i32 = if(a) {}");
+ auto e = p->variable_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:15: missing constructor for variable declaration");
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_ArrayInit) {
- auto p = parser("var a : array<i32> = array<i32>();");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_NE(e->variable->constructor, nullptr);
- auto* call = e->variable->constructor->As<ast::CallExpression>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->target.name, nullptr);
- EXPECT_NE(call->target.type, nullptr);
+ auto p = parser("var a : array<i32> = array<i32>();");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_NE(e->variable->constructor, nullptr);
+ auto* call = e->variable->constructor->As<ast::CallExpression>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->target.name, nullptr);
+ EXPECT_NE(call->target.type, nullptr);
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_ArrayInit_NoSpace) {
- auto p = parser("var a : array<i32>=array<i32>();");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_NE(e->variable->constructor, nullptr);
- auto* call = e->variable->constructor->As<ast::CallExpression>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->target.name, nullptr);
- EXPECT_NE(call->target.type, nullptr);
+ auto p = parser("var a : array<i32>=array<i32>();");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_NE(e->variable->constructor, nullptr);
+ auto* call = e->variable->constructor->As<ast::CallExpression>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->target.name, nullptr);
+ EXPECT_NE(call->target.type, nullptr);
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_VecInit) {
- auto p = parser("var a : vec2<i32> = vec2<i32>();");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_NE(e->variable->constructor, nullptr);
- auto* call = e->variable->constructor->As<ast::CallExpression>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->target.name, nullptr);
- EXPECT_NE(call->target.type, nullptr);
+ auto p = parser("var a : vec2<i32> = vec2<i32>();");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_NE(e->variable->constructor, nullptr);
+ auto* call = e->variable->constructor->As<ast::CallExpression>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->target.name, nullptr);
+ EXPECT_NE(call->target.type, nullptr);
}
TEST_F(ParserImplTest, VariableStmt_VariableDecl_VecInit_NoSpace) {
- auto p = parser("var a : vec2<i32>=vec2<i32>();");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
- ASSERT_NE(e->variable, nullptr);
- EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
-
- ASSERT_NE(e->variable->constructor, nullptr);
- auto* call = e->variable->constructor->As<ast::CallExpression>();
- ASSERT_NE(call, nullptr);
- EXPECT_EQ(call->target.name, nullptr);
- EXPECT_NE(call->target.type, nullptr);
+ auto p = parser("var a : vec2<i32>=vec2<i32>();");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+ ASSERT_NE(e->variable, nullptr);
+ EXPECT_EQ(e->variable->symbol, p->builder().Symbols().Get("a"));
+
+ ASSERT_NE(e->variable->constructor, nullptr);
+ auto* call = e->variable->constructor->As<ast::CallExpression>();
+ ASSERT_NE(call, nullptr);
+ EXPECT_EQ(call->target.name, nullptr);
+ EXPECT_NE(call->target.type, nullptr);
}
TEST_F(ParserImplTest, VariableStmt_Let) {
- auto p = parser("let a : i32 = 1");
- auto e = p->variable_stmt();
- EXPECT_TRUE(e.matched);
- EXPECT_FALSE(e.errored);
- EXPECT_FALSE(p->has_error()) << p->error();
- ASSERT_NE(e.value, nullptr);
- ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
-
- ASSERT_EQ(e->source.range.begin.line, 1u);
- ASSERT_EQ(e->source.range.begin.column, 5u);
- ASSERT_EQ(e->source.range.end.line, 1u);
- ASSERT_EQ(e->source.range.end.column, 6u);
+ auto p = parser("let a : i32 = 1");
+ auto e = p->variable_stmt();
+ EXPECT_TRUE(e.matched);
+ EXPECT_FALSE(e.errored);
+ EXPECT_FALSE(p->has_error()) << p->error();
+ ASSERT_NE(e.value, nullptr);
+ ASSERT_TRUE(e->Is<ast::VariableDeclStatement>());
+
+ ASSERT_EQ(e->source.range.begin.line, 1u);
+ ASSERT_EQ(e->source.range.begin.column, 5u);
+ ASSERT_EQ(e->source.range.end.line, 1u);
+ ASSERT_EQ(e->source.range.end.column, 6u);
}
TEST_F(ParserImplTest, VariableStmt_Let_MissingEqual) {
- auto p = parser("let a : i32 1");
- auto e = p->variable_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:13: expected '=' for let declaration");
+ auto p = parser("let a : i32 1");
+ auto e = p->variable_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:13: expected '=' for let declaration");
}
TEST_F(ParserImplTest, VariableStmt_Let_MissingConstructor) {
- auto p = parser("let a : i32 =");
- auto e = p->variable_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:14: missing constructor for let declaration");
+ auto p = parser("let a : i32 =");
+ auto e = p->variable_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:14: missing constructor for let declaration");
}
TEST_F(ParserImplTest, VariableStmt_Let_InvalidConstructor) {
- auto p = parser("let a : i32 = if (a) {}");
- auto e = p->variable_stmt();
- EXPECT_FALSE(e.matched);
- EXPECT_TRUE(e.errored);
- EXPECT_EQ(e.value, nullptr);
- EXPECT_TRUE(p->has_error());
- EXPECT_EQ(p->error(), "1:15: missing constructor for let declaration");
+ auto p = parser("let a : i32 = if (a) {}");
+ auto e = p->variable_stmt();
+ EXPECT_FALSE(e.matched);
+ EXPECT_TRUE(e.errored);
+ EXPECT_EQ(e.value, nullptr);
+ EXPECT_TRUE(p->has_error());
+ EXPECT_EQ(p->error(), "1:15: missing constructor for let declaration");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_test.cc
index 55a6287dcc3..1f0e206c744 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/parser_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/parser_test.cc
@@ -24,37 +24,37 @@ namespace {
using ParserTest = testing::Test;
TEST_F(ParserTest, Empty) {
- Source::File file("test.wgsl", "");
- auto program = Parse(&file);
- auto errs = diag::Formatter().format(program.Diagnostics());
- ASSERT_TRUE(program.IsValid()) << errs;
+ Source::File file("test.wgsl", "");
+ auto program = Parse(&file);
+ auto errs = diag::Formatter().format(program.Diagnostics());
+ ASSERT_TRUE(program.IsValid()) << errs;
}
TEST_F(ParserTest, Parses) {
- Source::File file("test.wgsl", R"(
-@stage(fragment)
+ Source::File file("test.wgsl", R"(
+@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(.4, .2, .3, 1.);
}
)");
- auto program = Parse(&file);
- auto errs = diag::Formatter().format(program.Diagnostics());
- ASSERT_TRUE(program.IsValid()) << errs;
+ auto program = Parse(&file);
+ auto errs = diag::Formatter().format(program.Diagnostics());
+ ASSERT_TRUE(program.IsValid()) << errs;
- ASSERT_EQ(1u, program.AST().Functions().size());
+ ASSERT_EQ(1u, program.AST().Functions().size());
}
TEST_F(ParserTest, HandlesError) {
- Source::File file("test.wgsl", R"(
+ Source::File file("test.wgsl", R"(
fn main() -> { // missing return type
return;
})");
- auto program = Parse(&file);
- auto errs = diag::Formatter().format(program.Diagnostics());
- ASSERT_FALSE(program.IsValid()) << errs;
- EXPECT_EQ(errs,
- R"(test.wgsl:2:15 error: unable to determine function return type
+ auto program = Parse(&file);
+ auto errs = diag::Formatter().format(program.Diagnostics());
+ ASSERT_FALSE(program.IsValid()) << errs;
+ EXPECT_EQ(errs,
+ R"(test.wgsl:2:15 error: unable to determine function return type
fn main() -> { // missing return type
^
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/token.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/token.cc
index ef7abc4e3ef..4680eee0d7f 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/token.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/token.cc
@@ -18,246 +18,254 @@ namespace tint::reader::wgsl {
// static
std::string_view Token::TypeToName(Type type) {
- switch (type) {
- case Token::Type::kError:
- return "kError";
- case Token::Type::kEOF:
- return "kEOF";
- case Token::Type::kIdentifier:
- return "kIdentifier";
- case Token::Type::kFloatLiteral:
- return "kFloatLiteral";
- case Token::Type::kSintLiteral:
- return "kSintLiteral";
- case Token::Type::kUintLiteral:
- return "kUintLiteral";
- case Token::Type::kUninitialized:
- return "kUninitialized";
+ switch (type) {
+ case Token::Type::kError:
+ return "error";
+ case Token::Type::kEOF:
+ return "end of file";
+ case Token::Type::kIdentifier:
+ return "identifier";
+ case Token::Type::kFloatLiteral:
+ return "abstract float literal";
+ case Token::Type::kFloatLiteral_F:
+ return "'f'-suffixed float literal";
+ case Token::Type::kIntLiteral:
+ return "abstract integer literal";
+ case Token::Type::kIntLiteral_I:
+ return "'i'-suffixed integer literal";
+ case Token::Type::kIntLiteral_U:
+ return "'u'-suffixed integer literal";
+ case Token::Type::kUninitialized:
+ return "uninitialized";
- case Token::Type::kAnd:
- return "&";
- case Token::Type::kAndAnd:
- return "&&";
- case Token::Type::kArrow:
- return "->";
- case Token::Type::kAttr:
- return "@";
- case Token::Type::kForwardSlash:
- return "/";
- case Token::Type::kBang:
- return "!";
- case Token::Type::kBracketLeft:
- return "[";
- case Token::Type::kBracketRight:
- return "]";
- case Token::Type::kBraceLeft:
- return "{";
- case Token::Type::kBraceRight:
- return "}";
- case Token::Type::kColon:
- return ":";
- case Token::Type::kComma:
- return ",";
- case Token::Type::kEqual:
- return "=";
- case Token::Type::kEqualEqual:
- return "==";
- case Token::Type::kGreaterThan:
- return ">";
- case Token::Type::kGreaterThanEqual:
- return ">=";
- case Token::Type::kShiftRight:
- return ">>";
- case Token::Type::kLessThan:
- return "<";
- case Token::Type::kLessThanEqual:
- return "<=";
- case Token::Type::kShiftLeft:
- return "<<";
- case Token::Type::kMod:
- return "%";
- case Token::Type::kNotEqual:
- return "!=";
- case Token::Type::kMinus:
- return "-";
- case Token::Type::kMinusMinus:
- return "--";
- case Token::Type::kPeriod:
- return ".";
- case Token::Type::kPlus:
- return "+";
- case Token::Type::kPlusPlus:
- return "++";
- case Token::Type::kOr:
- return "|";
- case Token::Type::kOrOr:
- return "||";
- case Token::Type::kParenLeft:
- return "(";
- case Token::Type::kParenRight:
- return ")";
- case Token::Type::kSemicolon:
- return ";";
- case Token::Type::kStar:
- return "*";
- case Token::Type::kTilde:
- return "~";
- case Token::Type::kUnderscore:
- return "_";
- case Token::Type::kXor:
- return "^";
- case Token::Type::kPlusEqual:
- return "+=";
- case Token::Type::kMinusEqual:
- return "-=";
- case Token::Type::kTimesEqual:
- return "*=";
- case Token::Type::kDivisionEqual:
- return "/=";
- case Token::Type::kModuloEqual:
- return "%=";
- case Token::Type::kAndEqual:
- return "&=";
- case Token::Type::kOrEqual:
- return "|=";
- case Token::Type::kXorEqual:
- return "^=";
+ case Token::Type::kAnd:
+ return "&";
+ case Token::Type::kAndAnd:
+ return "&&";
+ case Token::Type::kArrow:
+ return "->";
+ case Token::Type::kAttr:
+ return "@";
+ case Token::Type::kForwardSlash:
+ return "/";
+ case Token::Type::kBang:
+ return "!";
+ case Token::Type::kBracketLeft:
+ return "[";
+ case Token::Type::kBracketRight:
+ return "]";
+ case Token::Type::kBraceLeft:
+ return "{";
+ case Token::Type::kBraceRight:
+ return "}";
+ case Token::Type::kColon:
+ return ":";
+ case Token::Type::kComma:
+ return ",";
+ case Token::Type::kEqual:
+ return "=";
+ case Token::Type::kEqualEqual:
+ return "==";
+ case Token::Type::kGreaterThan:
+ return ">";
+ case Token::Type::kGreaterThanEqual:
+ return ">=";
+ case Token::Type::kShiftRight:
+ return ">>";
+ case Token::Type::kLessThan:
+ return "<";
+ case Token::Type::kLessThanEqual:
+ return "<=";
+ case Token::Type::kShiftLeft:
+ return "<<";
+ case Token::Type::kMod:
+ return "%";
+ case Token::Type::kNotEqual:
+ return "!=";
+ case Token::Type::kMinus:
+ return "-";
+ case Token::Type::kMinusMinus:
+ return "--";
+ case Token::Type::kPeriod:
+ return ".";
+ case Token::Type::kPlus:
+ return "+";
+ case Token::Type::kPlusPlus:
+ return "++";
+ case Token::Type::kOr:
+ return "|";
+ case Token::Type::kOrOr:
+ return "||";
+ case Token::Type::kParenLeft:
+ return "(";
+ case Token::Type::kParenRight:
+ return ")";
+ case Token::Type::kSemicolon:
+ return ";";
+ case Token::Type::kStar:
+ return "*";
+ case Token::Type::kTilde:
+ return "~";
+ case Token::Type::kUnderscore:
+ return "_";
+ case Token::Type::kXor:
+ return "^";
+ case Token::Type::kPlusEqual:
+ return "+=";
+ case Token::Type::kMinusEqual:
+ return "-=";
+ case Token::Type::kTimesEqual:
+ return "*=";
+ case Token::Type::kDivisionEqual:
+ return "/=";
+ case Token::Type::kModuloEqual:
+ return "%=";
+ case Token::Type::kAndEqual:
+ return "&=";
+ case Token::Type::kOrEqual:
+ return "|=";
+ case Token::Type::kXorEqual:
+ return "^=";
- case Token::Type::kArray:
- return "array";
- case Token::Type::kAtomic:
- return "atomic";
- case Token::Type::kBitcast:
- return "bitcast";
- case Token::Type::kBool:
- return "bool";
- case Token::Type::kBreak:
- return "break";
- case Token::Type::kCase:
- return "case";
- case Token::Type::kContinue:
- return "continue";
- case Token::Type::kContinuing:
- return "continuing";
- case Token::Type::kDiscard:
- return "discard";
- case Token::Type::kDefault:
- return "default";
- case Token::Type::kElse:
- return "else";
- case Token::Type::kF32:
- return "f32";
- case Token::Type::kFallthrough:
- return "fallthrough";
- case Token::Type::kFalse:
- return "false";
- case Token::Type::kFn:
- return "fn";
- case Token::Type::kFor:
- return "for";
- case Token::Type::kFunction:
- return "function";
- case Token::Type::kI32:
- return "i32";
- case Token::Type::kIf:
- return "if";
- case Token::Type::kImport:
- return "import";
- case Token::Type::kLet:
- return "let";
- case Token::Type::kLoop:
- return "loop";
- case Token::Type::kMat2x2:
- return "mat2x2";
- case Token::Type::kMat2x3:
- return "mat2x3";
- case Token::Type::kMat2x4:
- return "mat2x4";
- case Token::Type::kMat3x2:
- return "mat3x2";
- case Token::Type::kMat3x3:
- return "mat3x3";
- case Token::Type::kMat3x4:
- return "mat3x4";
- case Token::Type::kMat4x2:
- return "mat4x2";
- case Token::Type::kMat4x3:
- return "mat4x3";
- case Token::Type::kMat4x4:
- return "mat4x4";
- case Token::Type::kOverride:
- return "override";
- case Token::Type::kPrivate:
- return "private";
- case Token::Type::kPtr:
- return "ptr";
- case Token::Type::kReturn:
- return "return";
- case Token::Type::kSampler:
- return "sampler";
- case Token::Type::kComparisonSampler:
- return "sampler_comparison";
- case Token::Type::kStorage:
- return "storage";
- case Token::Type::kStruct:
- return "struct";
- case Token::Type::kSwitch:
- return "switch";
- case Token::Type::kTextureDepth2d:
- return "texture_depth_2d";
- case Token::Type::kTextureDepth2dArray:
- return "texture_depth_2d_array";
- case Token::Type::kTextureDepthCube:
- return "texture_depth_cube";
- case Token::Type::kTextureDepthCubeArray:
- return "texture_depth_cube_array";
- case Token::Type::kTextureDepthMultisampled2d:
- return "texture_depth_multisampled_2d";
- case Token::Type::kTextureExternal:
- return "texture_external";
- case Token::Type::kTextureMultisampled2d:
- return "texture_multisampled_2d";
- case Token::Type::kTextureSampled1d:
- return "texture_1d";
- case Token::Type::kTextureSampled2d:
- return "texture_2d";
- case Token::Type::kTextureSampled2dArray:
- return "texture_2d_array";
- case Token::Type::kTextureSampled3d:
- return "texture_3d";
- case Token::Type::kTextureSampledCube:
- return "texture_cube";
- case Token::Type::kTextureSampledCubeArray:
- return "texture_cube_array";
- case Token::Type::kTextureStorage1d:
- return "texture_storage_1d";
- case Token::Type::kTextureStorage2d:
- return "texture_storage_2d";
- case Token::Type::kTextureStorage2dArray:
- return "texture_storage_2d_array";
- case Token::Type::kTextureStorage3d:
- return "texture_storage_3d";
- case Token::Type::kTrue:
- return "true";
- case Token::Type::kType:
- return "type";
- case Token::Type::kU32:
- return "u32";
- case Token::Type::kUniform:
- return "uniform";
- case Token::Type::kVar:
- return "var";
- case Token::Type::kVec2:
- return "vec2";
- case Token::Type::kVec3:
- return "vec3";
- case Token::Type::kVec4:
- return "vec4";
- case Token::Type::kWorkgroup:
- return "workgroup";
- }
+ case Token::Type::kArray:
+ return "array";
+ case Token::Type::kAtomic:
+ return "atomic";
+ case Token::Type::kBitcast:
+ return "bitcast";
+ case Token::Type::kBool:
+ return "bool";
+ case Token::Type::kBreak:
+ return "break";
+ case Token::Type::kCase:
+ return "case";
+ case Token::Type::kContinue:
+ return "continue";
+ case Token::Type::kContinuing:
+ return "continuing";
+ case Token::Type::kDiscard:
+ return "discard";
+ case Token::Type::kDefault:
+ return "default";
+ case Token::Type::kElse:
+ return "else";
+ case Token::Type::kEnable:
+ return "enable";
+ case Token::Type::kF16:
+ return "f16";
+ case Token::Type::kF32:
+ return "f32";
+ case Token::Type::kFallthrough:
+ return "fallthrough";
+ case Token::Type::kFalse:
+ return "false";
+ case Token::Type::kFn:
+ return "fn";
+ case Token::Type::kFor:
+ return "for";
+ case Token::Type::kFunction:
+ return "function";
+ case Token::Type::kI32:
+ return "i32";
+ case Token::Type::kIf:
+ return "if";
+ case Token::Type::kImport:
+ return "import";
+ case Token::Type::kLet:
+ return "let";
+ case Token::Type::kLoop:
+ return "loop";
+ case Token::Type::kMat2x2:
+ return "mat2x2";
+ case Token::Type::kMat2x3:
+ return "mat2x3";
+ case Token::Type::kMat2x4:
+ return "mat2x4";
+ case Token::Type::kMat3x2:
+ return "mat3x2";
+ case Token::Type::kMat3x3:
+ return "mat3x3";
+ case Token::Type::kMat3x4:
+ return "mat3x4";
+ case Token::Type::kMat4x2:
+ return "mat4x2";
+ case Token::Type::kMat4x3:
+ return "mat4x3";
+ case Token::Type::kMat4x4:
+ return "mat4x4";
+ case Token::Type::kOverride:
+ return "override";
+ case Token::Type::kPrivate:
+ return "private";
+ case Token::Type::kPtr:
+ return "ptr";
+ case Token::Type::kReturn:
+ return "return";
+ case Token::Type::kSampler:
+ return "sampler";
+ case Token::Type::kComparisonSampler:
+ return "sampler_comparison";
+ case Token::Type::kStorage:
+ return "storage";
+ case Token::Type::kStruct:
+ return "struct";
+ case Token::Type::kSwitch:
+ return "switch";
+ case Token::Type::kTextureDepth2d:
+ return "texture_depth_2d";
+ case Token::Type::kTextureDepth2dArray:
+ return "texture_depth_2d_array";
+ case Token::Type::kTextureDepthCube:
+ return "texture_depth_cube";
+ case Token::Type::kTextureDepthCubeArray:
+ return "texture_depth_cube_array";
+ case Token::Type::kTextureDepthMultisampled2d:
+ return "texture_depth_multisampled_2d";
+ case Token::Type::kTextureExternal:
+ return "texture_external";
+ case Token::Type::kTextureMultisampled2d:
+ return "texture_multisampled_2d";
+ case Token::Type::kTextureSampled1d:
+ return "texture_1d";
+ case Token::Type::kTextureSampled2d:
+ return "texture_2d";
+ case Token::Type::kTextureSampled2dArray:
+ return "texture_2d_array";
+ case Token::Type::kTextureSampled3d:
+ return "texture_3d";
+ case Token::Type::kTextureSampledCube:
+ return "texture_cube";
+ case Token::Type::kTextureSampledCubeArray:
+ return "texture_cube_array";
+ case Token::Type::kTextureStorage1d:
+ return "texture_storage_1d";
+ case Token::Type::kTextureStorage2d:
+ return "texture_storage_2d";
+ case Token::Type::kTextureStorage2dArray:
+ return "texture_storage_2d_array";
+ case Token::Type::kTextureStorage3d:
+ return "texture_storage_3d";
+ case Token::Type::kTrue:
+ return "true";
+ case Token::Type::kType:
+ return "type";
+ case Token::Type::kU32:
+ return "u32";
+ case Token::Type::kUniform:
+ return "uniform";
+ case Token::Type::kVar:
+ return "var";
+ case Token::Type::kVec2:
+ return "vec2";
+ case Token::Type::kVec3:
+ return "vec3";
+ case Token::Type::kVec4:
+ return "vec4";
+ case Token::Type::kWorkgroup:
+ return "workgroup";
+ }
- return "<unknown>";
+ return "<unknown>";
}
Token::Token() : type_(Type::kUninitialized) {}
@@ -271,14 +279,11 @@ Token::Token(Type type, const Source& source, const std::string& str)
Token::Token(Type type, const Source& source, const char* str)
: type_(type), source_(source), value_(std::string_view(str)) {}
-Token::Token(const Source& source, uint32_t val)
- : type_(Type::kUintLiteral), source_(source), value_(val) {}
+Token::Token(Type type, const Source& source, int64_t val)
+ : type_(type), source_(source), value_(val) {}
-Token::Token(const Source& source, int32_t val)
- : type_(Type::kSintLiteral), source_(source), value_(val) {}
-
-Token::Token(const Source& source, float val)
- : type_(Type::kFloatLiteral), source_(source), value_(val) {}
+Token::Token(Type type, const Source& source, double val)
+ : type_(type), source_(source), value_(val) {}
Token::Token(Type type, const Source& source) : type_(type), source_(source) {}
@@ -291,44 +296,44 @@ Token::~Token() = default;
Token& Token::operator=(const Token& rhs) = default;
bool Token::operator==(std::string_view ident) {
- if (type_ != Type::kIdentifier) {
- return false;
- }
- if (auto* view = std::get_if<std::string_view>(&value_)) {
- return *view == ident;
- }
- return std::get<std::string>(value_) == ident;
+ if (type_ != Type::kIdentifier) {
+ return false;
+ }
+ if (auto* view = std::get_if<std::string_view>(&value_)) {
+ return *view == ident;
+ }
+ return std::get<std::string>(value_) == ident;
}
std::string Token::to_str() const {
- switch (type_) {
- case Type::kFloatLiteral:
- return std::to_string(std::get<float>(value_));
- case Type::kSintLiteral:
- return std::to_string(std::get<int32_t>(value_));
- case Type::kUintLiteral:
- return std::to_string(std::get<uint32_t>(value_));
- case Type::kIdentifier:
- case Type::kError:
- if (auto* view = std::get_if<std::string_view>(&value_)) {
- return std::string(*view);
- }
- return std::get<std::string>(value_);
- default:
- return "";
- }
-}
-
-float Token::to_f32() const {
- return std::get<float>(value_);
+ switch (type_) {
+ case Type::kFloatLiteral:
+ return std::to_string(std::get<double>(value_));
+ case Type::kFloatLiteral_F:
+ return std::to_string(std::get<double>(value_)) + "f";
+ case Type::kIntLiteral:
+ return std::to_string(std::get<int64_t>(value_));
+ case Type::kIntLiteral_I:
+ return std::to_string(std::get<int64_t>(value_)) + "i";
+ case Type::kIntLiteral_U:
+ return std::to_string(std::get<int64_t>(value_)) + "u";
+ case Type::kIdentifier:
+ case Type::kError:
+ if (auto* view = std::get_if<std::string_view>(&value_)) {
+ return std::string(*view);
+ }
+ return std::get<std::string>(value_);
+ default:
+ return "";
+ }
}
-uint32_t Token::to_u32() const {
- return std::get<uint32_t>(value_);
+double Token::to_f64() const {
+ return std::get<double>(value_);
}
-int32_t Token::to_i32() const {
- return std::get<int32_t>(value_);
+int64_t Token::to_i64() const {
+ return std::get<int64_t>(value_);
}
} // namespace tint::reader::wgsl
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/token.h b/chromium/third_party/dawn/src/tint/reader/wgsl/token.h
index 7b5b6754ed2..0a68f9b0554 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/token.h
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/token.h
@@ -17,7 +17,8 @@
#include <string>
#include <string_view>
-#include <variant> // NOLINT: cpplint doesn't recognise this
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <variant> // NOLINT(build/include_order))
#include "src/tint/source.h"
@@ -25,391 +26,387 @@ namespace tint::reader::wgsl {
/// Stores tokens generated by the Lexer
class Token {
- public:
- /// The type of the parsed token
- enum class Type {
- /// Error result
- kError = -2,
- /// Uninitialized token
- kUninitialized = 0,
- /// End of input string reached
- kEOF,
+ public:
+ /// The type of the parsed token
+ enum class Type {
+ /// Error result
+ kError = -2,
+ /// Uninitialized token
+ kUninitialized = 0,
+ /// End of input string reached
+ kEOF,
- /// An identifier
- kIdentifier,
- /// A float value
- kFloatLiteral,
- /// An signed int value
- kSintLiteral,
- /// A unsigned int value
- kUintLiteral,
+ /// An identifier
+ kIdentifier,
+ /// A float literal with no suffix
+ kFloatLiteral,
+ /// A float literal with an 'f' suffix
+ kFloatLiteral_F,
+ /// An integer literal with no suffix
+ kIntLiteral,
+ /// An integer literal with an 'i' suffix
+ kIntLiteral_I,
+ /// An integer literal with a 'u' suffix
+ kIntLiteral_U,
- /// A '&'
- kAnd,
- /// A '&&'
- kAndAnd,
- /// A '->'
- kArrow,
- /// A '@'
- kAttr,
- /// A '/'
- kForwardSlash,
- /// A '!'
- kBang,
- /// A '['
- kBracketLeft,
- /// A ']'
- kBracketRight,
- /// A '{'
- kBraceLeft,
- /// A '}'
- kBraceRight,
- /// A ':'
- kColon,
- /// A ','
- kComma,
- /// A '='
- kEqual,
- /// A '=='
- kEqualEqual,
- /// A '>'
- kGreaterThan,
- /// A '>='
- kGreaterThanEqual,
- /// A '>>'
- kShiftRight,
- /// A '<'
- kLessThan,
- /// A '<='
- kLessThanEqual,
- /// A '<<'
- kShiftLeft,
- /// A '%'
- kMod,
- /// A '-'
- kMinus,
- /// A '--'
- kMinusMinus,
- /// A '!='
- kNotEqual,
- /// A '.'
- kPeriod,
- /// A '+'
- kPlus,
- /// A '++'
- kPlusPlus,
- /// A '|'
- kOr,
- /// A '||'
- kOrOr,
- /// A '('
- kParenLeft,
- /// A ')'
- kParenRight,
- /// A ';'
- kSemicolon,
- /// A '*'
- kStar,
- /// A '~'
- kTilde,
- /// A '_'
- kUnderscore,
- /// A '^'
- kXor,
- /// A '+='
- kPlusEqual,
- /// A '-='
- kMinusEqual,
- /// A '*='
- kTimesEqual,
- /// A '/='
- kDivisionEqual,
- /// A '%='
- kModuloEqual,
- /// A '&='
- kAndEqual,
- /// A '|='
- kOrEqual,
- /// A '^='
- kXorEqual,
+ /// A '&'
+ kAnd,
+ /// A '&&'
+ kAndAnd,
+ /// A '->'
+ kArrow,
+ /// A '@'
+ kAttr,
+ /// A '/'
+ kForwardSlash,
+ /// A '!'
+ kBang,
+ /// A '['
+ kBracketLeft,
+ /// A ']'
+ kBracketRight,
+ /// A '{'
+ kBraceLeft,
+ /// A '}'
+ kBraceRight,
+ /// A ':'
+ kColon,
+ /// A ','
+ kComma,
+ /// A '='
+ kEqual,
+ /// A '=='
+ kEqualEqual,
+ /// A '>'
+ kGreaterThan,
+ /// A '>='
+ kGreaterThanEqual,
+ /// A '>>'
+ kShiftRight,
+ /// A '<'
+ kLessThan,
+ /// A '<='
+ kLessThanEqual,
+ /// A '<<'
+ kShiftLeft,
+ /// A '%'
+ kMod,
+ /// A '-'
+ kMinus,
+ /// A '--'
+ kMinusMinus,
+ /// A '!='
+ kNotEqual,
+ /// A '.'
+ kPeriod,
+ /// A '+'
+ kPlus,
+ /// A '++'
+ kPlusPlus,
+ /// A '|'
+ kOr,
+ /// A '||'
+ kOrOr,
+ /// A '('
+ kParenLeft,
+ /// A ')'
+ kParenRight,
+ /// A ';'
+ kSemicolon,
+ /// A '*'
+ kStar,
+ /// A '~'
+ kTilde,
+ /// A '_'
+ kUnderscore,
+ /// A '^'
+ kXor,
+ /// A '+='
+ kPlusEqual,
+ /// A '-='
+ kMinusEqual,
+ /// A '*='
+ kTimesEqual,
+ /// A '/='
+ kDivisionEqual,
+ /// A '%='
+ kModuloEqual,
+ /// A '&='
+ kAndEqual,
+ /// A '|='
+ kOrEqual,
+ /// A '^='
+ kXorEqual,
- /// A 'array'
- kArray,
- /// A 'atomic'
- kAtomic,
- /// A 'bitcast'
- kBitcast,
- /// A 'bool'
- kBool,
- /// A 'break'
- kBreak,
- /// A 'case'
- kCase,
- /// A 'continue'
- kContinue,
- /// A 'continuing'
- kContinuing,
- /// A 'discard'
- kDiscard,
- /// A 'default'
- kDefault,
- /// A 'else'
- kElse,
- /// A 'f32'
- kF32,
- /// A 'fallthrough'
- kFallthrough,
- /// A 'false'
- kFalse,
- /// A 'fn'
- kFn,
- // A 'for'
- kFor,
- /// A 'function'
- kFunction,
- /// A 'i32'
- kI32,
- /// A 'if'
- kIf,
- /// A 'import'
- kImport,
- /// A 'let'
- kLet,
- /// A 'loop'
- kLoop,
- /// A 'mat2x2'
- kMat2x2,
- /// A 'mat2x3'
- kMat2x3,
- /// A 'mat2x4'
- kMat2x4,
- /// A 'mat3x2'
- kMat3x2,
- /// A 'mat3x3'
- kMat3x3,
- /// A 'mat3x4'
- kMat3x4,
- /// A 'mat4x2'
- kMat4x2,
- /// A 'mat4x3'
- kMat4x3,
- /// A 'mat4x4'
- kMat4x4,
- /// A 'override'
- kOverride,
- /// A 'private'
- kPrivate,
- /// A 'ptr'
- kPtr,
- /// A 'return'
- kReturn,
- /// A 'sampler'
- kSampler,
- /// A 'sampler_comparison'
- kComparisonSampler,
- /// A 'storage'
- kStorage,
- /// A 'struct'
- kStruct,
- /// A 'switch'
- kSwitch,
- /// A 'texture_depth_2d'
- kTextureDepth2d,
- /// A 'texture_depth_2d_array'
- kTextureDepth2dArray,
- /// A 'texture_depth_cube'
- kTextureDepthCube,
- /// A 'texture_depth_cube_array'
- kTextureDepthCubeArray,
- /// A 'texture_depth_multisampled_2d'
- kTextureDepthMultisampled2d,
- /// A 'texture_external'
- kTextureExternal,
- /// A 'texture_multisampled_2d'
- kTextureMultisampled2d,
- /// A 'texture_1d'
- kTextureSampled1d,
- /// A 'texture_2d'
- kTextureSampled2d,
- /// A 'texture_2d_array'
- kTextureSampled2dArray,
- /// A 'texture_3d'
- kTextureSampled3d,
- /// A 'texture_cube'
- kTextureSampledCube,
- /// A 'texture_cube_array'
- kTextureSampledCubeArray,
- /// A 'texture_storage_1d'
- kTextureStorage1d,
- /// A 'texture_storage_2d'
- kTextureStorage2d,
- /// A 'texture_storage_2d_array'
- kTextureStorage2dArray,
- /// A 'texture_storage_3d'
- kTextureStorage3d,
- /// A 'true'
- kTrue,
- /// A 'type'
- kType,
- /// A 'u32'
- kU32,
- /// A 'uniform'
- kUniform,
- /// A 'var'
- kVar,
- /// A 'vec2'
- kVec2,
- /// A 'vec3'
- kVec3,
- /// A 'vec4'
- kVec4,
- /// A 'workgroup'
- kWorkgroup,
- };
+ /// A 'array'
+ kArray,
+ /// A 'atomic'
+ kAtomic,
+ /// A 'bitcast'
+ kBitcast,
+ /// A 'bool'
+ kBool,
+ /// A 'break'
+ kBreak,
+ /// A 'case'
+ kCase,
+ /// A 'continue'
+ kContinue,
+ /// A 'continuing'
+ kContinuing,
+ /// A 'discard'
+ kDiscard,
+ /// A 'default'
+ kDefault,
+ /// A 'else'
+ kElse,
+ /// A 'enable'
+ kEnable,
+ /// A 'f16'
+ kF16,
+ /// A 'f32'
+ kF32,
+ /// A 'fallthrough'
+ kFallthrough,
+ /// A 'false'
+ kFalse,
+ /// A 'fn'
+ kFn,
+ // A 'for'
+ kFor,
+ /// A 'function'
+ kFunction,
+ /// A 'i32'
+ kI32,
+ /// A 'if'
+ kIf,
+ /// A 'import'
+ kImport,
+ /// A 'let'
+ kLet,
+ /// A 'loop'
+ kLoop,
+ /// A 'mat2x2'
+ kMat2x2,
+ /// A 'mat2x3'
+ kMat2x3,
+ /// A 'mat2x4'
+ kMat2x4,
+ /// A 'mat3x2'
+ kMat3x2,
+ /// A 'mat3x3'
+ kMat3x3,
+ /// A 'mat3x4'
+ kMat3x4,
+ /// A 'mat4x2'
+ kMat4x2,
+ /// A 'mat4x3'
+ kMat4x3,
+ /// A 'mat4x4'
+ kMat4x4,
+ /// A 'override'
+ kOverride,
+ /// A 'private'
+ kPrivate,
+ /// A 'ptr'
+ kPtr,
+ /// A 'return'
+ kReturn,
+ /// A 'sampler'
+ kSampler,
+ /// A 'sampler_comparison'
+ kComparisonSampler,
+ /// A 'storage'
+ kStorage,
+ /// A 'struct'
+ kStruct,
+ /// A 'switch'
+ kSwitch,
+ /// A 'texture_depth_2d'
+ kTextureDepth2d,
+ /// A 'texture_depth_2d_array'
+ kTextureDepth2dArray,
+ /// A 'texture_depth_cube'
+ kTextureDepthCube,
+ /// A 'texture_depth_cube_array'
+ kTextureDepthCubeArray,
+ /// A 'texture_depth_multisampled_2d'
+ kTextureDepthMultisampled2d,
+ /// A 'texture_external'
+ kTextureExternal,
+ /// A 'texture_multisampled_2d'
+ kTextureMultisampled2d,
+ /// A 'texture_1d'
+ kTextureSampled1d,
+ /// A 'texture_2d'
+ kTextureSampled2d,
+ /// A 'texture_2d_array'
+ kTextureSampled2dArray,
+ /// A 'texture_3d'
+ kTextureSampled3d,
+ /// A 'texture_cube'
+ kTextureSampledCube,
+ /// A 'texture_cube_array'
+ kTextureSampledCubeArray,
+ /// A 'texture_storage_1d'
+ kTextureStorage1d,
+ /// A 'texture_storage_2d'
+ kTextureStorage2d,
+ /// A 'texture_storage_2d_array'
+ kTextureStorage2dArray,
+ /// A 'texture_storage_3d'
+ kTextureStorage3d,
+ /// A 'true'
+ kTrue,
+ /// A 'type'
+ kType,
+ /// A 'u32'
+ kU32,
+ /// A 'uniform'
+ kUniform,
+ /// A 'var'
+ kVar,
+ /// A 'vec2'
+ kVec2,
+ /// A 'vec3'
+ kVec3,
+ /// A 'vec4'
+ kVec4,
+ /// A 'workgroup'
+ kWorkgroup,
+ };
- /// Converts a token type to a name
- /// @param type the type to convert
- /// @returns the token type as as string
- static std::string_view TypeToName(Type type);
+ /// Converts a token type to a name
+ /// @param type the type to convert
+ /// @returns the token type as as string
+ static std::string_view TypeToName(Type type);
- /// Creates an uninitialized token
- Token();
- /// Create a Token
- /// @param type the Token::Type of the token
- /// @param source the source of the token
- Token(Type type, const Source& source);
+ /// Creates an uninitialized token
+ Token();
+ /// Create a Token
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ Token(Type type, const Source& source);
- /// Create a string Token
- /// @param type the Token::Type of the token
- /// @param source the source of the token
- /// @param view the source string view for the token
- Token(Type type, const Source& source, const std::string_view& view);
- /// Create a string Token
- /// @param type the Token::Type of the token
- /// @param source the source of the token
- /// @param str the source string for the token
- Token(Type type, const Source& source, const std::string& str);
- /// Create a string Token
- /// @param type the Token::Type of the token
- /// @param source the source of the token
- /// @param str the source string for the token
- Token(Type type, const Source& source, const char* str);
- /// Create a unsigned integer Token
- /// @param source the source of the token
- /// @param val the source unsigned for the token
- Token(const Source& source, uint32_t val);
- /// Create a signed integer Token
- /// @param source the source of the token
- /// @param val the source integer for the token
- Token(const Source& source, int32_t val);
- /// Create a float Token
- /// @param source the source of the token
- /// @param val the source float for the token
- Token(const Source& source, float val);
- /// Move constructor
- Token(Token&&);
- /// Copy constructor
- Token(const Token&);
- ~Token();
+ /// Create a string Token
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ /// @param view the source string view for the token
+ Token(Type type, const Source& source, const std::string_view& view);
+ /// Create a string Token
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ /// @param str the source string for the token
+ Token(Type type, const Source& source, const std::string& str);
+ /// Create a string Token
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ /// @param str the source string for the token
+ Token(Type type, const Source& source, const char* str);
+ /// Create a integer Token of the given type
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ /// @param val the source unsigned for the token
+ Token(Type type, const Source& source, int64_t val);
+ /// Create a double Token
+ /// @param type the Token::Type of the token
+ /// @param source the source of the token
+ /// @param val the source double for the token
+ Token(Type type, const Source& source, double val);
+ /// Move constructor
+ Token(Token&&);
+ /// Copy constructor
+ Token(const Token&);
+ ~Token();
- /// Assignment operator
- /// @param b the token to copy
- /// @return Token
- Token& operator=(const Token& b);
+ /// Assignment operator
+ /// @param b the token to copy
+ /// @return Token
+ Token& operator=(const Token& b);
- /// Equality operator with an identifier
- /// @param ident the identifier string
- /// @return true if this token is an identifier and is equal to ident.
- bool operator==(std::string_view ident);
+ /// Equality operator with an identifier
+ /// @param ident the identifier string
+ /// @return true if this token is an identifier and is equal to ident.
+ bool operator==(std::string_view ident);
- /// Returns true if the token is of the given type
- /// @param t the type to check against.
- /// @returns true if the token is of type `t`
- bool Is(Type t) const { return type_ == t; }
+ /// Returns true if the token is of the given type
+ /// @param t the type to check against.
+ /// @returns true if the token is of type `t`
+ bool Is(Type t) const { return type_ == t; }
- /// @returns true if the token is uninitialized
- bool IsUninitialized() const { return type_ == Type::kUninitialized; }
- /// @returns true if the token is EOF
- bool IsEof() const { return type_ == Type::kEOF; }
- /// @returns true if the token is Error
- bool IsError() const { return type_ == Type::kError; }
- /// @returns true if the token is an identifier
- bool IsIdentifier() const { return type_ == Type::kIdentifier; }
- /// @returns true if the token is a literal
- bool IsLiteral() const {
- return type_ == Type::kSintLiteral || type_ == Type::kFalse ||
- type_ == Type::kUintLiteral || type_ == Type::kTrue ||
- type_ == Type::kFloatLiteral;
- }
- /// @returns true if token is a 'matNxM'
- bool IsMatrix() const {
- return type_ == Type::kMat2x2 || type_ == Type::kMat2x3 ||
- type_ == Type::kMat2x4 || type_ == Type::kMat3x2 ||
- type_ == Type::kMat3x3 || type_ == Type::kMat3x4 ||
- type_ == Type::kMat4x2 || type_ == Type::kMat4x3 ||
- type_ == Type::kMat4x4;
- }
- /// @returns true if token is a 'mat3xM'
- bool IsMat3xN() const {
- return type_ == Type::kMat3x2 || type_ == Type::kMat3x3 ||
- type_ == Type::kMat3x4;
- }
- /// @returns true if token is a 'mat4xM'
- bool IsMat4xN() const {
- return type_ == Type::kMat4x2 || type_ == Type::kMat4x3 ||
- type_ == Type::kMat4x4;
- }
- /// @returns true if token is a 'matNx3'
- bool IsMatNx3() const {
- return type_ == Type::kMat2x3 || type_ == Type::kMat3x3 ||
- type_ == Type::kMat4x3;
- }
- /// @returns true if token is a 'matNx4'
- bool IsMatNx4() const {
- return type_ == Type::kMat2x4 || type_ == Type::kMat3x4 ||
- type_ == Type::kMat4x4;
- }
+ /// @returns true if the token is uninitialized
+ bool IsUninitialized() const { return type_ == Type::kUninitialized; }
+ /// @returns true if the token is EOF
+ bool IsEof() const { return type_ == Type::kEOF; }
+ /// @returns true if the token is Error
+ bool IsError() const { return type_ == Type::kError; }
+ /// @returns true if the token is an identifier
+ bool IsIdentifier() const { return type_ == Type::kIdentifier; }
+ /// @returns true if the token is a literal
+ bool IsLiteral() const {
+ return type_ == Type::kIntLiteral || type_ == Type::kIntLiteral_I ||
+ type_ == Type::kIntLiteral_U || type_ == Type::kFalse || type_ == Type::kTrue ||
+ type_ == Type::kFloatLiteral || type_ == Type::kFloatLiteral_F;
+ }
+ /// @returns true if token is a 'matNxM'
+ bool IsMatrix() const {
+ return type_ == Type::kMat2x2 || type_ == Type::kMat2x3 || type_ == Type::kMat2x4 ||
+ type_ == Type::kMat3x2 || type_ == Type::kMat3x3 || type_ == Type::kMat3x4 ||
+ type_ == Type::kMat4x2 || type_ == Type::kMat4x3 || type_ == Type::kMat4x4;
+ }
+ /// @returns true if token is a 'mat3xM'
+ bool IsMat3xN() const {
+ return type_ == Type::kMat3x2 || type_ == Type::kMat3x3 || type_ == Type::kMat3x4;
+ }
+ /// @returns true if token is a 'mat4xM'
+ bool IsMat4xN() const {
+ return type_ == Type::kMat4x2 || type_ == Type::kMat4x3 || type_ == Type::kMat4x4;
+ }
+ /// @returns true if token is a 'matNx3'
+ bool IsMatNx3() const {
+ return type_ == Type::kMat2x3 || type_ == Type::kMat3x3 || type_ == Type::kMat4x3;
+ }
+ /// @returns true if token is a 'matNx4'
+ bool IsMatNx4() const {
+ return type_ == Type::kMat2x4 || type_ == Type::kMat3x4 || type_ == Type::kMat4x4;
+ }
- /// @returns true if token is a 'vecN'
- bool IsVector() const {
- return type_ == Type::kVec2 || type_ == Type::kVec3 || type_ == Type::kVec4;
- }
+ /// @returns true if token is a 'vecN'
+ bool IsVector() const {
+ return type_ == Type::kVec2 || type_ == Type::kVec3 || type_ == Type::kVec4;
+ }
- /// @returns the source information for this token
- Source source() const { return source_; }
+ /// @returns the source information for this token
+ Source source() const { return source_; }
- /// Returns the string value of the token
- /// @return std::string
- std::string to_str() const;
- /// Returns the float value of the token. 0 is returned if the token does not
- /// contain a float value.
- /// @return float
- float to_f32() const;
- /// Returns the uint32 value of the token. 0 is returned if the token does not
- /// contain a unsigned integer value.
- /// @return uint32_t
- uint32_t to_u32() const;
- /// Returns the int32 value of the token. 0 is returned if the token does not
- /// contain a signed integer value.
- /// @return int32_t
- int32_t to_i32() const;
+ /// Returns the string value of the token
+ /// @return std::string
+ std::string to_str() const;
+ /// Returns the float value of the token. 0 is returned if the token does not
+ /// contain a float value.
+ /// @return double
+ double to_f64() const;
+ /// Returns the int64_t value of the token. 0 is returned if the token does
+ /// not contain an integer value.
+ /// @return int64_t
+ int64_t to_i64() const;
- /// @returns the token type as string
- std::string_view to_name() const { return Token::TypeToName(type_); }
+ /// @returns the token type as string
+ std::string_view to_name() const { return Token::TypeToName(type_); }
- private:
- /// The Token::Type of the token
- Type type_ = Type::kError;
- /// The source where the token appeared
- Source source_;
- /// The value represented by the token
- std::variant<int32_t, uint32_t, float, std::string, std::string_view> value_;
+ private:
+ /// The Token::Type of the token
+ Type type_ = Type::kError;
+ /// The source where the token appeared
+ Source source_;
+ /// The value represented by the token
+ std::variant<int64_t, double, std::string, std::string_view> value_;
};
#ifndef NDEBUG
inline std::ostream& operator<<(std::ostream& out, Token::Type type) {
- out << Token::TypeToName(type);
- return out;
+ out << Token::TypeToName(type);
+ return out;
}
#endif // NDEBUG
diff --git a/chromium/third_party/dawn/src/tint/reader/wgsl/token_test.cc b/chromium/third_party/dawn/src/tint/reader/wgsl/token_test.cc
index 6d25cd7f67c..93fe8349ca3 100644
--- a/chromium/third_party/dawn/src/tint/reader/wgsl/token_test.cc
+++ b/chromium/third_party/dawn/src/tint/reader/wgsl/token_test.cc
@@ -16,59 +16,80 @@
#include <limits>
-#include "gtest/gtest.h"
+#include "gmock/gmock.h"
namespace tint::reader::wgsl {
namespace {
+using ::testing::EndsWith;
+using ::testing::Not;
+using ::testing::StartsWith;
+
using TokenTest = testing::Test;
-TEST_F(TokenTest, ReturnsF32) {
- Token t1(Source{}, -2.345f);
- EXPECT_EQ(t1.to_f32(), -2.345f);
+TEST_F(TokenTest, ReturnsF64) {
+ Token t1(Token::Type::kFloatLiteral_F, Source{}, -2.345);
+ EXPECT_EQ(t1.to_f64(), -2.345);
- Token t2(Source{}, 2.345f);
- EXPECT_EQ(t2.to_f32(), 2.345f);
+ Token t2(Token::Type::kFloatLiteral_F, Source{}, 2.345);
+ EXPECT_EQ(t2.to_f64(), 2.345);
}
TEST_F(TokenTest, ReturnsI32) {
- Token t1(Source{}, -2345);
- EXPECT_EQ(t1.to_i32(), -2345);
+ Token t1(Token::Type::kIntLiteral_I, Source{}, static_cast<int64_t>(-2345));
+ EXPECT_EQ(t1.to_i64(), -2345);
- Token t2(Source{}, 2345);
- EXPECT_EQ(t2.to_i32(), 2345);
+ Token t2(Token::Type::kIntLiteral_I, Source{}, static_cast<int64_t>(2345));
+ EXPECT_EQ(t2.to_i64(), 2345);
}
TEST_F(TokenTest, HandlesMaxI32) {
- Token t1(Source{}, std::numeric_limits<int32_t>::max());
- EXPECT_EQ(t1.to_i32(), std::numeric_limits<int32_t>::max());
+ Token t1(Token::Type::kIntLiteral_I, Source{},
+ static_cast<int64_t>(std::numeric_limits<int32_t>::max()));
+ EXPECT_EQ(t1.to_i64(), std::numeric_limits<int32_t>::max());
}
TEST_F(TokenTest, HandlesMinI32) {
- Token t1(Source{}, std::numeric_limits<int32_t>::min());
- EXPECT_EQ(t1.to_i32(), std::numeric_limits<int32_t>::min());
+ Token t1(Token::Type::kIntLiteral_I, Source{},
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min()));
+ EXPECT_EQ(t1.to_i64(), std::numeric_limits<int32_t>::min());
}
TEST_F(TokenTest, ReturnsU32) {
- Token t2(Source{}, 2345u);
- EXPECT_EQ(t2.to_u32(), 2345u);
+ Token t2(Token::Type::kIntLiteral_U, Source{}, static_cast<int64_t>(2345u));
+ EXPECT_EQ(t2.to_i64(), 2345u);
}
TEST_F(TokenTest, ReturnsMaxU32) {
- Token t1(Source{}, std::numeric_limits<uint32_t>::max());
- EXPECT_EQ(t1.to_u32(), std::numeric_limits<uint32_t>::max());
+ Token t1(Token::Type::kIntLiteral_U, Source{},
+ static_cast<int64_t>(std::numeric_limits<uint32_t>::max()));
+ EXPECT_EQ(t1.to_i64(), std::numeric_limits<uint32_t>::max());
}
TEST_F(TokenTest, Source) {
- Source src;
- src.range.begin = Source::Location{3, 9};
- src.range.end = Source::Location{4, 3};
-
- Token t(Token::Type::kUintLiteral, src);
- EXPECT_EQ(t.source().range.begin.line, 3u);
- EXPECT_EQ(t.source().range.begin.column, 9u);
- EXPECT_EQ(t.source().range.end.line, 4u);
- EXPECT_EQ(t.source().range.end.column, 3u);
+ Source src;
+ src.range.begin = Source::Location{3, 9};
+ src.range.end = Source::Location{4, 3};
+
+ Token t(Token::Type::kIntLiteral, src);
+ EXPECT_EQ(t.source().range.begin.line, 3u);
+ EXPECT_EQ(t.source().range.begin.column, 9u);
+ EXPECT_EQ(t.source().range.end.line, 4u);
+ EXPECT_EQ(t.source().range.end.column, 3u);
+}
+
+TEST_F(TokenTest, ToStr) {
+ double d = 123.0;
+ int64_t i = 123;
+ EXPECT_THAT(Token(Token::Type::kFloatLiteral, Source{}, d).to_str(), StartsWith("123"));
+ EXPECT_THAT(Token(Token::Type::kFloatLiteral, Source{}, d).to_str(), Not(EndsWith("f")));
+ EXPECT_THAT(Token(Token::Type::kFloatLiteral_F, Source{}, d).to_str(), StartsWith("123"));
+ EXPECT_THAT(Token(Token::Type::kFloatLiteral_F, Source{}, d).to_str(), EndsWith("f"));
+ EXPECT_EQ(Token(Token::Type::kIntLiteral, Source{}, i).to_str(), "123");
+ EXPECT_EQ(Token(Token::Type::kIntLiteral_I, Source{}, i).to_str(), "123i");
+ EXPECT_EQ(Token(Token::Type::kIntLiteral_U, Source{}, i).to_str(), "123u");
+ EXPECT_EQ(Token(Token::Type::kIdentifier, Source{}, "blah").to_str(), "blah");
+ EXPECT_EQ(Token(Token::Type::kError, Source{}, "blah").to_str(), "blah");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/array_accessor_test.cc b/chromium/third_party/dawn/src/tint/resolver/array_accessor_test.cc
index e506f23660d..d9ef10e4e71 100644
--- a/chromium/third_party/dawn/src/tint/resolver/array_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/array_accessor_test.cc
@@ -16,7 +16,9 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::resolver {
namespace {
@@ -24,286 +26,294 @@ namespace {
using ResolverIndexAccessorTest = ResolverTest;
TEST_F(ResolverIndexAccessorTest, Matrix_Dynamic_F32) {
- Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, 1.0f));
- WrapInFunction(acc);
+ Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, 1_f));
+ WrapInFunction(acc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
}
TEST_F(ResolverIndexAccessorTest, Matrix_Dynamic_Ref) {
- Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
- auto* acc = IndexAccessor("my_var", idx);
- WrapInFunction(Decl(idx), acc);
+ Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
+ auto* acc = IndexAccessor("my_var", idx);
+ WrapInFunction(Decl(idx), acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIndexAccessorTest, Matrix_BothDimensions_Dynamic_Ref) {
- Global("my_var", ty.mat4x4<f32>(), ast::StorageClass::kPrivate);
- auto* idx = Var("idx", ty.u32(), Expr(3u));
- auto* idy = Var("idy", ty.u32(), Expr(2u));
- auto* acc = IndexAccessor(IndexAccessor("my_var", idx), idy);
- WrapInFunction(Decl(idx), Decl(idy), acc);
+ Global("my_var", ty.mat4x4<f32>(), ast::StorageClass::kPrivate);
+ auto* idx = Var("idx", ty.u32(), Expr(3_u));
+ auto* idy = Var("idy", ty.u32(), Expr(2_u));
+ auto* acc = IndexAccessor(IndexAccessor("my_var", idx), idy);
+ WrapInFunction(Decl(idx), Decl(idy), acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIndexAccessorTest, Matrix_Dynamic) {
- GlobalConst("my_const", ty.mat2x3<f32>(), Construct(ty.mat2x3<f32>()));
- auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
- auto* acc = IndexAccessor("my_const", Expr(Source{{12, 34}}, idx));
- WrapInFunction(Decl(idx), acc);
+ GlobalConst("my_const", ty.mat2x3<f32>(), Construct(ty.mat2x3<f32>()));
+ auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
+ auto* acc = IndexAccessor("my_const", Expr(Source{{12, 34}}, idx));
+ WrapInFunction(Decl(idx), acc);
- EXPECT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "");
+ EXPECT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "");
}
TEST_F(ResolverIndexAccessorTest, Matrix_XDimension_Dynamic) {
- GlobalConst("my_var", ty.mat4x4<f32>(), Construct(ty.mat4x4<f32>()));
- auto* idx = Var("idx", ty.u32(), Expr(3u));
- auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, idx));
- WrapInFunction(Decl(idx), acc);
+ GlobalConst("my_var", ty.mat4x4<f32>(), Construct(ty.mat4x4<f32>()));
+ auto* idx = Var("idx", ty.u32(), Expr(3_u));
+ auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, idx));
+ WrapInFunction(Decl(idx), acc);
- EXPECT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "");
+ EXPECT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "");
}
TEST_F(ResolverIndexAccessorTest, Matrix_BothDimension_Dynamic) {
- GlobalConst("my_var", ty.mat4x4<f32>(), Construct(ty.mat4x4<f32>()));
- auto* idx = Var("idy", ty.u32(), Expr(2u));
- auto* acc =
- IndexAccessor(IndexAccessor("my_var", Expr(Source{{12, 34}}, idx)), 1);
- WrapInFunction(Decl(idx), acc);
-
- EXPECT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "");
+ GlobalConst("my_var", ty.mat4x4<f32>(), Construct(ty.mat4x4<f32>()));
+ auto* idx = Var("idy", ty.u32(), Expr(2_u));
+ auto* acc = IndexAccessor(IndexAccessor("my_var", Expr(Source{{12, 34}}, idx)), 1_i);
+ WrapInFunction(Decl(idx), acc);
+
+ EXPECT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "");
}
TEST_F(ResolverIndexAccessorTest, Matrix) {
- Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor("my_var", 2);
- WrapInFunction(acc);
+ auto* acc = IndexAccessor("my_var", 2_i);
+ WrapInFunction(acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(acc), nullptr);
- ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(acc), nullptr);
+ ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
- auto* ref = TypeOf(acc)->As<sem::Reference>();
- ASSERT_TRUE(ref->StoreType()->Is<sem::Vector>());
- EXPECT_EQ(ref->StoreType()->As<sem::Vector>()->Width(), 3u);
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ ASSERT_TRUE(ref->StoreType()->Is<sem::Vector>());
+ EXPECT_EQ(ref->StoreType()->As<sem::Vector>()->Width(), 3u);
}
TEST_F(ResolverIndexAccessorTest, Matrix_BothDimensions) {
- Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor(IndexAccessor("my_var", 2), 1);
- WrapInFunction(acc);
+ auto* acc = IndexAccessor(IndexAccessor("my_var", 2_i), 1_i);
+ WrapInFunction(acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(acc), nullptr);
- ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(acc), nullptr);
+ ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
- auto* ref = TypeOf(acc)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
}
TEST_F(ResolverIndexAccessorTest, Vector_F32) {
- Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, 2.0f));
- WrapInFunction(acc);
+ Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, 2_f));
+ WrapInFunction(acc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
}
TEST_F(ResolverIndexAccessorTest, Vector_Dynamic_Ref) {
- Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* idx = Var("idx", ty.i32(), Expr(2));
- auto* acc = IndexAccessor("my_var", idx);
- WrapInFunction(Decl(idx), acc);
+ Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* idx = Var("idx", ty.i32(), Expr(2_i));
+ auto* acc = IndexAccessor("my_var", idx);
+ WrapInFunction(Decl(idx), acc);
- EXPECT_TRUE(r()->Resolve());
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverIndexAccessorTest, Vector_Dynamic) {
- GlobalConst("my_var", ty.vec3<f32>(), Construct(ty.vec3<f32>()));
- auto* idx = Var("idx", ty.i32(), Expr(2));
- auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, idx));
- WrapInFunction(Decl(idx), acc);
+ GlobalConst("my_var", ty.vec3<f32>(), Construct(ty.vec3<f32>()));
+ auto* idx = Var("idx", ty.i32(), Expr(2_i));
+ auto* acc = IndexAccessor("my_var", Expr(Source{{12, 34}}, idx));
+ WrapInFunction(Decl(idx), acc);
- EXPECT_TRUE(r()->Resolve());
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverIndexAccessorTest, Vector) {
- Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor("my_var", 2);
- WrapInFunction(acc);
+ auto* acc = IndexAccessor("my_var", 2_i);
+ WrapInFunction(acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(acc), nullptr);
- ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(acc), nullptr);
+ ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
- auto* ref = TypeOf(acc)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
}
-TEST_F(ResolverIndexAccessorTest, Array) {
- auto* idx = Expr(2);
- Global("my_var", ty.array<f32, 3>(), ast::StorageClass::kPrivate);
-
- auto* acc = IndexAccessor("my_var", idx);
- WrapInFunction(acc);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverIndexAccessorTest, Array_Literal_i32) {
+ Global("my_var", ty.array<f32, 3>(), ast::StorageClass::kPrivate);
+ auto* acc = IndexAccessor("my_var", 2_i);
+ WrapInFunction(acc);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(acc), nullptr);
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+}
- ASSERT_NE(TypeOf(acc), nullptr);
- ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
+TEST_F(ResolverIndexAccessorTest, Array_Literal_u32) {
+ Global("my_var", ty.array<f32, 3>(), ast::StorageClass::kPrivate);
+ auto* acc = IndexAccessor("my_var", 2_u);
+ WrapInFunction(acc);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(acc), nullptr);
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+}
- auto* ref = TypeOf(acc)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+TEST_F(ResolverIndexAccessorTest, Array_Literal_AInt) {
+ Global("my_var", ty.array<f32, 3>(), ast::StorageClass::kPrivate);
+ auto* acc = IndexAccessor("my_var", 2_a);
+ WrapInFunction(acc);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(acc), nullptr);
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
}
TEST_F(ResolverIndexAccessorTest, Alias_Array) {
- auto* aary = Alias("myarrty", ty.array<f32, 3>());
+ auto* aary = Alias("myarrty", ty.array<f32, 3>());
- Global("my_var", ty.Of(aary), ast::StorageClass::kPrivate);
+ Global("my_var", ty.Of(aary), ast::StorageClass::kPrivate);
- auto* acc = IndexAccessor("my_var", 2);
- WrapInFunction(acc);
+ auto* acc = IndexAccessor("my_var", 2_i);
+ WrapInFunction(acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(acc), nullptr);
- ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(acc), nullptr);
+ ASSERT_TRUE(TypeOf(acc)->Is<sem::Reference>());
- auto* ref = TypeOf(acc)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+ auto* ref = TypeOf(acc)->As<sem::Reference>();
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
}
TEST_F(ResolverIndexAccessorTest, Array_Constant) {
- GlobalConst("my_var", ty.array<f32, 3>(), array<f32, 3>());
+ GlobalConst("my_var", ty.array<f32, 3>(), array<f32, 3>());
- auto* acc = IndexAccessor("my_var", 2);
- WrapInFunction(acc);
+ auto* acc = IndexAccessor("my_var", 2_i);
+ WrapInFunction(acc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(acc), nullptr);
- EXPECT_TRUE(TypeOf(acc)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(acc), nullptr);
+ EXPECT_TRUE(TypeOf(acc)->Is<sem::F32>());
}
TEST_F(ResolverIndexAccessorTest, Array_Dynamic_I32) {
- // let a : array<f32, 3> = 0;
- // var idx : i32 = 0;
- // var f : f32 = a[idx];
- auto* a = Const("a", ty.array<f32, 3>(), array<f32, 3>());
- auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
- auto* f = Var("f", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, idx)));
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(idx),
- Decl(f),
- },
- ast::AttributeList{});
-
- EXPECT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "");
+ // let a : array<f32, 3> = 0;
+ // var idx : i32 = 0;
+ // var f : f32 = a[idx];
+ auto* a = Let("a", ty.array<f32, 3>(), array<f32, 3>());
+ auto* idx = Var("idx", ty.i32(), Construct(ty.i32()));
+ auto* f = Var("f", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, idx)));
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(idx),
+ Decl(f),
+ },
+ ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "");
}
TEST_F(ResolverIndexAccessorTest, Array_Literal_F32) {
- // let a : array<f32, 3>;
- // var f : f32 = a[2.0f];
- auto* a = Const("a", ty.array<f32, 3>(), array<f32, 3>());
- auto* f =
- Var("a_2", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, 2.0f)));
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(f),
- },
- ast::AttributeList{});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
+ // let a : array<f32, 3>;
+ // var f : f32 = a[2.0f];
+ auto* a = Let("a", ty.array<f32, 3>(), array<f32, 3>());
+ auto* f = Var("a_2", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, 2_f)));
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(f),
+ },
+ ast::AttributeList{});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
}
TEST_F(ResolverIndexAccessorTest, Array_Literal_I32) {
- // let a : array<f32, 3>;
- // var f : f32 = a[2];
- auto* a = Const("a", ty.array<f32, 3>(), array<f32, 3>());
- auto* f = Var("a_2", ty.f32(), IndexAccessor("a", 2));
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(f),
- },
- ast::AttributeList{});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // let a : array<f32, 3>;
+ // var f : f32 = a[2i];
+ auto* a = Let("a", ty.array<f32, 3>(), array<f32, 3>());
+ auto* f = Var("a_2", ty.f32(), IndexAccessor("a", 2_i));
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(f),
+ },
+ ast::AttributeList{});
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverIndexAccessorTest, EXpr_Deref_FuncGoodParent) {
- // fn func(p: ptr<function, vec4<f32>>) -> f32 {
- // let idx: u32 = u32();
- // let x: f32 = (*p)[idx];
- // return x;
- // }
- auto* p =
- Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
- auto* idx = Const("idx", ty.u32(), Construct(ty.u32()));
- auto* star_p = Deref(p);
- auto* accessor_expr = IndexAccessor(Source{{12, 34}}, star_p, idx);
- auto* x = Var("x", ty.f32(), accessor_expr);
- Func("func", {p}, ty.f32(), {Decl(idx), Decl(x), Return(x)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverIndexAccessorTest, Expr_Deref_FuncGoodParent) {
+ // fn func(p: ptr<function, vec4<f32>>) -> f32 {
+ // let idx: u32 = u32();
+ // let x: f32 = (*p)[idx];
+ // return x;
+ // }
+ auto* p = Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
+ auto* idx = Let("idx", ty.u32(), Construct(ty.u32()));
+ auto* star_p = Deref(p);
+ auto* accessor_expr = IndexAccessor(Source{{12, 34}}, star_p, idx);
+ auto* x = Var("x", ty.f32(), accessor_expr);
+ Func("func", {p}, ty.f32(), {Decl(idx), Decl(x), Return(x)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverIndexAccessorTest, EXpr_Deref_FuncBadParent) {
- // fn func(p: ptr<function, vec4<f32>>) -> f32 {
- // let idx: u32 = u32();
- // let x: f32 = *p[idx];
- // return x;
- // }
- auto* p =
- Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
- auto* idx = Const("idx", ty.u32(), Construct(ty.u32()));
- auto* accessor_expr = IndexAccessor(Source{{12, 34}}, p, idx);
- auto* star_p = Deref(accessor_expr);
- auto* x = Var("x", ty.f32(), star_p);
- Func("func", {p}, ty.f32(), {Decl(idx), Decl(x), Return(x)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: cannot index type 'ptr<function, vec4<f32>, read_write>'");
+TEST_F(ResolverIndexAccessorTest, Expr_Deref_FuncBadParent) {
+ // fn func(p: ptr<function, vec4<f32>>) -> f32 {
+ // let idx: u32 = u32();
+ // let x: f32 = *p[idx];
+ // return x;
+ // }
+ auto* p = Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
+ auto* idx = Let("idx", ty.u32(), Construct(ty.u32()));
+ auto* accessor_expr = IndexAccessor(Source{{12, 34}}, p, idx);
+ auto* star_p = Deref(accessor_expr);
+ auto* x = Var("x", ty.f32(), star_p);
+ Func("func", {p}, ty.f32(), {Decl(idx), Decl(x), Return(x)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot index type 'ptr<function, vec4<f32>, read_write>'");
}
TEST_F(ResolverIndexAccessorTest, Exr_Deref_BadParent) {
- // var param: vec4<f32>
- // let x: f32 = *(&param)[0];
- auto* param = Var("param", ty.vec4<f32>());
- auto* idx = Var("idx", ty.u32(), Construct(ty.u32()));
- auto* addressOf_expr = AddressOf(param);
- auto* accessor_expr = IndexAccessor(Source{{12, 34}}, addressOf_expr, idx);
- auto* star_p = Deref(accessor_expr);
- auto* x = Var("x", ty.f32(), star_p);
- WrapInFunction(param, idx, x);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: cannot index type 'ptr<function, vec4<f32>, read_write>'");
+ // var param: vec4<f32>
+ // let x: f32 = *(&param)[0];
+ auto* param = Var("param", ty.vec4<f32>());
+ auto* idx = Var("idx", ty.u32(), Construct(ty.u32()));
+ auto* addressOf_expr = AddressOf(param);
+ auto* accessor_expr = IndexAccessor(Source{{12, 34}}, addressOf_expr, idx);
+ auto* star_p = Deref(accessor_expr);
+ auto* x = Var("x", ty.f32(), star_p);
+ WrapInFunction(param, idx, x);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot index type 'ptr<function, vec4<f32>, read_write>'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/assignment_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/assignment_validation_test.cc
index cc2519ab3ac..64363e39428 100644
--- a/chromium/third_party/dawn/src/tint/resolver/assignment_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/assignment_validation_test.cc
@@ -16,7 +16,9 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::resolver {
namespace {
@@ -24,367 +26,370 @@ namespace {
using ResolverAssignmentValidationTest = ResolverTest;
TEST_F(ResolverAssignmentValidationTest, ReadOnlyBuffer) {
- // struct S { m : i32 };
- // @group(0) @binding(0)
- // var<storage,read> a : S;
- auto* s = Structure("S", {Member("m", ty.i32())});
- Global(Source{{12, 34}}, "a", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("a", "m"), 1));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: cannot store into a read-only type 'ref<storage, "
- "i32, read>'");
+ // struct S { m : i32 };
+ // @group(0) @binding(0)
+ // var<storage,read> a : S;
+ auto* s = Structure("S", {Member("m", ty.i32())});
+ Global(Source{{12, 34}}, "a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("a", "m"), 1_i));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: cannot store into a read-only type 'ref<storage, "
+ "i32, read>'");
}
TEST_F(ResolverAssignmentValidationTest, AssignIncompatibleTypes) {
- // {
- // var a : i32 = 2;
- // a = 2.3;
- // }
+ // {
+ // var a : i32 = 2i;
+ // a = 2.3;
+ // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
- auto* assign = Assign(Source{{12, 34}}, "a", 2.3f);
- WrapInFunction(var, assign);
+ auto* assign = Assign(Source{{12, 34}}, "a", 2.3_f);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignArraysWithDifferentSizeExpressions_Pass) {
- // let len = 4u;
- // {
- // var a : array<f32, 4>;
- // var b : array<f32, len>;
- // a = b;
- // }
+TEST_F(ResolverAssignmentValidationTest, AssignArraysWithDifferentSizeExpressions_Pass) {
+ // let len = 4u;
+ // {
+ // var a : array<f32, 4u>;
+ // var b : array<f32, len>;
+ // a = b;
+ // }
- GlobalConst("len", nullptr, Expr(4u));
+ GlobalConst("len", nullptr, Expr(4_u));
- auto* a = Var("a", ty.array(ty.f32(), 4));
- auto* b = Var("b", ty.array(ty.f32(), "len"));
+ auto* a = Var("a", ty.array(ty.f32(), 4_u));
+ auto* b = Var("b", ty.array(ty.f32(), "len"));
- auto* assign = Assign(Source{{12, 34}}, "a", "b");
- WrapInFunction(a, b, assign);
+ auto* assign = Assign(Source{{12, 34}}, "a", "b");
+ WrapInFunction(a, b, assign);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignArraysWithDifferentSizeExpressions_Fail) {
- // let len = 5u;
- // {
- // var a : array<f32, 4>;
- // var b : array<f32, len>;
- // a = b;
- // }
+TEST_F(ResolverAssignmentValidationTest, AssignArraysWithDifferentSizeExpressions_Fail) {
+ // let len = 5u;
+ // {
+ // var a : array<f32, 4u>;
+ // var b : array<f32, len>;
+ // a = b;
+ // }
- GlobalConst("len", nullptr, Expr(5u));
+ GlobalConst("len", nullptr, Expr(5_u));
- auto* a = Var("a", ty.array(ty.f32(), 4));
- auto* b = Var("b", ty.array(ty.f32(), "len"));
+ auto* a = Var("a", ty.array(ty.f32(), 4_u));
+ auto* b = Var("b", ty.array(ty.f32(), "len"));
- auto* assign = Assign(Source{{12, 34}}, "a", "b");
- WrapInFunction(a, b, assign);
+ auto* assign = Assign(Source{{12, 34}}, "a", "b");
+ WrapInFunction(a, b, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign 'array<f32, 5>' to 'array<f32, 4>'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'array<f32, 5>' to 'array<f32, 4>'");
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignCompatibleTypesInBlockStatement_Pass) {
- // {
- // var a : i32 = 2;
- // a = 2
- // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Assign("a", 2));
+TEST_F(ResolverAssignmentValidationTest, AssignCompatibleTypesInBlockStatement_Pass) {
+ // {
+ // var a : i32 = 2i;
+ // a = 2i
+ // }
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, Assign("a", 2_i));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignIncompatibleTypesInBlockStatement_Fail) {
- // {
- // var a : i32 = 2;
- // a = 2.3;
- // }
+TEST_F(ResolverAssignmentValidationTest, AssignIncompatibleTypesInBlockStatement_Fail) {
+ // {
+ // var a : i32 = 2i;
+ // a = 2.3;
+ // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Assign(Source{{12, 34}}, "a", 2.3f));
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, Assign(Source{{12, 34}}, "a", 2.3_f));
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignIncompatibleTypesInNestedBlockStatement_Fail) {
- // {
- // {
- // var a : i32 = 2;
- // a = 2.3;
- // }
- // }
+TEST_F(ResolverAssignmentValidationTest, AssignIncompatibleTypesInNestedBlockStatement_Fail) {
+ // {
+ // {
+ // var a : i32 = 2i;
+ // a = 2.3;
+ // }
+ // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* inner_block = Block(Decl(var), Assign(Source{{12, 34}}, "a", 2.3f));
- auto* outer_block = Block(inner_block);
- WrapInFunction(outer_block);
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* inner_block = Block(Decl(var), Assign(Source{{12, 34}}, "a", 2.3_f));
+ auto* outer_block = Block(inner_block);
+ WrapInFunction(outer_block);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'f32' to 'i32'");
}
TEST_F(ResolverAssignmentValidationTest, AssignToScalar_Fail) {
- // var my_var : i32 = 2;
- // 1 = my_var;
+ // var my_var : i32 = 2i;
+ // 1 = my_var;
- auto* var = Var("my_var", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Assign(Expr(Source{{12, 34}}, 1), "my_var"));
+ WrapInFunction(Var("my_var", ty.i32(), ast::StorageClass::kNone, Expr(2_i)), //
+ Assign(Expr(Source{{12, 34}}, 1_i), "my_var"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign to value of type 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign to value of type 'i32'");
}
TEST_F(ResolverAssignmentValidationTest, AssignCompatibleTypes_Pass) {
- // var a : i32 = 2;
- // a = 2
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Assign(Source{{12, 34}}, "a", 2));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : i32 = 1i;
+ // a = 2i;
+ // a = 3;
+ WrapInFunction(Var("a", ty.i32(), ast::StorageClass::kNone, Expr(1_i)), //
+ Assign("a", 2_i), //
+ Assign("a", 3_a));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignCompatibleTypesThroughAlias_Pass) {
- // alias myint = i32;
- // var a : myint = 2;
- // a = 2
- auto* myint = Alias("myint", ty.i32());
- auto* var = Var("a", ty.Of(myint), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Assign(Source{{12, 34}}, "a", 2));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverAssignmentValidationTest, AssignCompatibleTypesThroughAlias_Pass) {
+ // alias myint = u32;
+ // var a : myint = 1u;
+ // a = 2u;
+ // a = 3;
+ auto* myint = Alias("myint", ty.u32());
+ WrapInFunction(Var("a", ty.Of(myint), ast::StorageClass::kNone, Expr(1_u)), //
+ Assign("a", 2_u), //
+ Assign("a", 3_a));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignCompatibleTypesInferRHSLoad_Pass) {
- // var a : i32 = 2;
- // var b : i32 = 3;
- // a = b;
- auto* var_a = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* var_b = Var("b", ty.i32(), ast::StorageClass::kNone, Expr(3));
- WrapInFunction(var_a, var_b, Assign(Source{{12, 34}}, "a", "b"));
+TEST_F(ResolverAssignmentValidationTest, AssignCompatibleTypesInferRHSLoad_Pass) {
+ // var a : i32 = 2i;
+ // var b : i32 = 3i;
+ // a = b;
+ WrapInFunction(Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i)), //
+ Var("b", ty.i32(), ast::StorageClass::kNone, Expr(3_i)), //
+ Assign("a", "b"));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverAssignmentValidationTest, AssignThroughPointer_Pass) {
- // var a : i32;
- // let b : ptr<function,i32> = &a;
- // *b = 2;
- const auto func = ast::StorageClass::kFunction;
- auto* var_a = Var("a", ty.i32(), func, Expr(2));
- auto* var_b = Const("b", ty.pointer<int>(func), AddressOf(Expr("a")));
- WrapInFunction(var_a, var_b, Assign(Source{{12, 34}}, Deref("b"), 2));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : i32;
+ // let b : ptr<function,i32> = &a;
+ // *b = 2i;
+ const auto func = ast::StorageClass::kFunction;
+ WrapInFunction(Var("a", ty.i32(), func, Expr(2_i)), //
+ Let("b", ty.pointer<i32>(func), AddressOf(Expr("a"))), //
+ Assign(Deref("b"), 2_i));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverAssignmentValidationTest, AssignToConstant_Fail) {
- // {
- // let a : i32 = 2;
- // a = 2
- // }
- auto* var = Const("a", ty.i32(), Expr(2));
- WrapInFunction(var, Assign(Expr(Source{{12, 34}}, "a"), 2));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign to const\nnote: 'a' is declared here:");
+TEST_F(ResolverAssignmentValidationTest, AssignMaterializedThroughPointer_Pass) {
+ // var a : i32;
+ // let b : ptr<function,i32> = &a;
+ // *b = 2;
+ const auto func = ast::StorageClass::kFunction;
+ auto* var_a = Var("a", ty.i32(), func, Expr(2_i));
+ auto* var_b = Let("b", ty.pointer<i32>(func), AddressOf(Expr("a")));
+ WrapInFunction(var_a, var_b, Assign(Deref("b"), 2_a));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverAssignmentValidationTest, AssignToLet_Fail) {
+ // {
+ // let a : i32 = 2i;
+ // a = 2i
+ // }
+ auto* var = Let("a", ty.i32(), Expr(2_i));
+ WrapInFunction(var, Assign(Expr(Source{{12, 34}}, "a"), 2_i));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign to const\nnote: 'a' is declared here:");
}
TEST_F(ResolverAssignmentValidationTest, AssignNonConstructible_Handle) {
- // var a : texture_storage_1d<rgba8unorm, write>;
- // var b : texture_storage_1d<rgba8unorm, write>;
- // a = b;
-
- auto make_type = [&] {
- return ty.storage_texture(ast::TextureDimension::k1d,
- ast::TexelFormat::kRgba8Unorm,
- ast::Access::kWrite);
- };
-
- Global("a", make_type(), ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- Global("b", make_type(), ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
-
- WrapInFunction(Assign(Source{{56, 78}}, "a", "b"));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: storage type of assignment must be constructible");
+ // var a : texture_storage_1d<rgba8unorm, write>;
+ // var b : texture_storage_1d<rgba8unorm, write>;
+ // a = b;
+
+ auto make_type = [&] {
+ return ty.storage_texture(ast::TextureDimension::k1d, ast::TexelFormat::kRgba8Unorm,
+ ast::Access::kWrite);
+ };
+
+ Global("a", make_type(), ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ Global("b", make_type(), ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+
+ WrapInFunction(Assign(Source{{56, 78}}, "a", "b"));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: storage type of assignment must be constructible");
}
TEST_F(ResolverAssignmentValidationTest, AssignNonConstructible_Atomic) {
- // struct S { a : atomic<i32>; };
- // @group(0) @binding(0) var<storage, read_write> v : S;
- // v.a = v.a;
-
- auto* s = Structure("S", {Member("a", ty.atomic(ty.i32()))});
- Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("v", "a"),
- MemberAccessor("v", "a")));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: storage type of assignment must be constructible");
+ // struct S { a : atomic<i32>; };
+ // @group(0) @binding(0) var<storage, read_write> v : S;
+ // v.a = v.a;
+
+ auto* s = Structure("S", {Member("a", ty.atomic(ty.i32()))});
+ Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("v", "a"), MemberAccessor("v", "a")));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: storage type of assignment must be constructible");
}
TEST_F(ResolverAssignmentValidationTest, AssignNonConstructible_RuntimeArray) {
- // struct S { a : array<f32>; };
- // @group(0) @binding(0) var<storage, read_write> v : S;
- // v.a = v.a;
-
- auto* s = Structure("S", {Member("a", ty.array(ty.f32()))});
- Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("v", "a"),
- MemberAccessor("v", "a")));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: storage type of assignment must be constructible");
+ // struct S { a : array<f32>; };
+ // @group(0) @binding(0) var<storage, read_write> v : S;
+ // v.a = v.a;
+
+ auto* s = Structure("S", {Member("a", ty.array(ty.f32()))});
+ Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ WrapInFunction(Assign(Source{{56, 78}}, MemberAccessor("v", "a"), MemberAccessor("v", "a")));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: storage type of assignment must be constructible");
}
-TEST_F(ResolverAssignmentValidationTest,
- AssignToPhony_NonConstructibleStruct_Fail) {
- // struct S {
- // arr: array<i32>;
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- // fn f() {
- // _ = s;
- // }
- auto* s = Structure("S", {Member("arr", ty.array<i32>())});
- Global("s", ty.Of(s), ast::StorageClass::kStorage, GroupAndBinding(0, 0));
-
- WrapInFunction(Assign(Phony(), Expr(Source{{12, 34}}, "s")));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign 'S' to '_'. "
- "'_' can only be assigned a constructible, pointer, texture or "
- "sampler type");
+TEST_F(ResolverAssignmentValidationTest, AssignToPhony_NonConstructibleStruct_Fail) {
+ // struct S {
+ // arr: array<i32>;
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ // fn f() {
+ // _ = s;
+ // }
+ auto* s = Structure("S", {Member("arr", ty.array<i32>())});
+ Global("s", ty.Of(s), ast::StorageClass::kStorage, GroupAndBinding(0, 0));
+
+ WrapInFunction(Assign(Phony(), Expr(Source{{12, 34}}, "s")));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot assign 'S' to '_'. "
+ "'_' can only be assigned a constructible, pointer, texture or "
+ "sampler type");
}
TEST_F(ResolverAssignmentValidationTest, AssignToPhony_DynamicArray_Fail) {
- // struct S {
- // arr: array<i32>;
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- // fn f() {
- // _ = s.arr;
- // }
- auto* s = Structure("S", {Member("arr", ty.array<i32>())});
- Global("s", ty.Of(s), ast::StorageClass::kStorage, GroupAndBinding(0, 0));
-
- WrapInFunction(Assign(Phony(), MemberAccessor(Source{{12, 34}}, "s", "arr")));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: cannot assign 'array<i32>' to '_'. "
- "'_' can only be assigned a constructible, pointer, texture or sampler "
- "type");
+ // struct S {
+ // arr: array<i32>;
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ // fn f() {
+ // _ = s.arr;
+ // }
+ auto* s = Structure("S", {Member("arr", ty.array<i32>())});
+ Global("s", ty.Of(s), ast::StorageClass::kStorage, GroupAndBinding(0, 0));
+
+ WrapInFunction(Assign(Phony(), MemberAccessor(Source{{12, 34}}, "s", "arr")));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot assign 'array<i32>' to '_'. "
+ "'_' can only be assigned a constructible, pointer, texture or sampler "
+ "type");
}
TEST_F(ResolverAssignmentValidationTest, AssignToPhony_Pass) {
- // struct S {
- // i: i32;
- // arr: array<i32>;
- // };
- // struct U {
- // i: i32;
- // };
- // @group(0) @binding(0) var tex texture_2d;
- // @group(0) @binding(1) var smp sampler;
- // @group(0) @binding(2) var<uniform> u : U;
- // @group(0) @binding(3) var<storage, read_write> s : S;
- // var<workgroup> wg : array<f32, 10>
- // fn f() {
- // _ = 1;
- // _ = 2u;
- // _ = 3.0;
- // _ = vec2<bool>();
- // _ = tex;
- // _ = smp;
- // _ = &s;
- // _ = s.i;
- // _ = &s.arr;
- // _ = u;
- // _ = u.i;
- // _ = wg;
- // _ = wg[3];
- // }
- auto* S = Structure("S", {
- Member("i", ty.i32()),
- Member("arr", ty.array<i32>()),
- });
- auto* U = Structure("U", {Member("i", ty.i32())});
- Global("tex", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(0, 0));
- Global("smp", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(0, 1));
- Global("u", ty.Of(U), ast::StorageClass::kUniform, GroupAndBinding(0, 2));
- Global("s", ty.Of(S), ast::StorageClass::kStorage, GroupAndBinding(0, 3));
- Global("wg", ty.array<f32, 10>(), ast::StorageClass::kWorkgroup);
-
- WrapInFunction(Assign(Phony(), 1), //
- Assign(Phony(), 2), //
- Assign(Phony(), 3), //
- Assign(Phony(), vec2<bool>()), //
- Assign(Phony(), "tex"), //
- Assign(Phony(), "smp"), //
- Assign(Phony(), AddressOf("s")), //
- Assign(Phony(), MemberAccessor("s", "i")), //
- Assign(Phony(), AddressOf(MemberAccessor("s", "arr"))), //
- Assign(Phony(), "u"), //
- Assign(Phony(), MemberAccessor("u", "i")), //
- Assign(Phony(), "wg"), //
- Assign(Phony(), IndexAccessor("wg", 3)));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // struct S {
+ // i: i32;
+ // arr: array<i32>;
+ // };
+ // struct U {
+ // i: i32;
+ // };
+ // @group(0) @binding(0) var tex texture_2d;
+ // @group(0) @binding(1) var smp sampler;
+ // @group(0) @binding(2) var<uniform> u : U;
+ // @group(0) @binding(3) var<storage, read_write> s : S;
+ // var<workgroup> wg : array<f32, 10>
+ // fn f() {
+ // _ = 1i;
+ // _ = 2u;
+ // _ = 3.0f;
+ // _ = 4;
+ // _ = 5.0;
+ // _ = vec2(6);
+ // _ = vec3(7.0);
+ // _ = vec4<bool>();
+ // _ = tex;
+ // _ = smp;
+ // _ = &s;
+ // _ = s.i;
+ // _ = &s.arr;
+ // _ = u;
+ // _ = u.i;
+ // _ = wg;
+ // _ = wg[3i];
+ // }
+ auto* S = Structure("S", {
+ Member("i", ty.i32()),
+ Member("arr", ty.array<i32>()),
+ });
+ auto* U = Structure("U", {Member("i", ty.i32())});
+ Global("tex", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(0, 0));
+ Global("smp", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(0, 1));
+ Global("u", ty.Of(U), ast::StorageClass::kUniform, GroupAndBinding(0, 2));
+ Global("s", ty.Of(S), ast::StorageClass::kStorage, GroupAndBinding(0, 3));
+ Global("wg", ty.array<f32, 10>(), ast::StorageClass::kWorkgroup);
+
+ WrapInFunction(Assign(Phony(), 1_i), //
+ Assign(Phony(), 2_u), //
+ Assign(Phony(), 3_f), //
+ Assign(Phony(), 4_a), //
+ Assign(Phony(), 5.0_a), //
+ Assign(Phony(), vec(nullptr, 2u, 6_a)), //
+ Assign(Phony(), vec(nullptr, 3u, 7.0_a)), //
+ Assign(Phony(), vec4<bool>()), //
+ Assign(Phony(), "tex"), //
+ Assign(Phony(), "smp"), //
+ Assign(Phony(), AddressOf("s")), //
+ Assign(Phony(), MemberAccessor("s", "i")), //
+ Assign(Phony(), AddressOf(MemberAccessor("s", "arr"))), //
+ Assign(Phony(), "u"), //
+ Assign(Phony(), MemberAccessor("u", "i")), //
+ Assign(Phony(), "wg"), //
+ Assign(Phony(), IndexAccessor("wg", 3_i)));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/atomics_test.cc b/chromium/third_party/dawn/src/tint/resolver/atomics_test.cc
index 02f800dd05f..c0fe5ce2995 100644
--- a/chromium/third_party/dawn/src/tint/resolver/atomics_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/atomics_test.cc
@@ -14,57 +14,52 @@
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/reference.h"
#include "gmock/gmock.h"
namespace tint::resolver {
namespace {
-struct ResolverAtomicTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverAtomicTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverAtomicTest, GlobalWorkgroupI32) {
- auto* g = Global("a", ty.atomic(Source{{12, 34}}, ty.i32()),
- ast::StorageClass::kWorkgroup);
+ auto* g = Global("a", ty.atomic(Source{{12, 34}}, ty.i32()), ast::StorageClass::kWorkgroup);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
- auto* atomic = TypeOf(g)->UnwrapRef()->As<sem::Atomic>();
- ASSERT_NE(atomic, nullptr);
- EXPECT_TRUE(atomic->Type()->Is<sem::I32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
+ auto* atomic = TypeOf(g)->UnwrapRef()->As<sem::Atomic>();
+ ASSERT_NE(atomic, nullptr);
+ EXPECT_TRUE(atomic->Type()->Is<sem::I32>());
}
TEST_F(ResolverAtomicTest, GlobalWorkgroupU32) {
- auto* g = Global("a", ty.atomic(Source{{12, 34}}, ty.u32()),
- ast::StorageClass::kWorkgroup);
+ auto* g = Global("a", ty.atomic(Source{{12, 34}}, ty.u32()), ast::StorageClass::kWorkgroup);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
- auto* atomic = TypeOf(g)->UnwrapRef()->As<sem::Atomic>();
- ASSERT_NE(atomic, nullptr);
- EXPECT_TRUE(atomic->Type()->Is<sem::U32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
+ auto* atomic = TypeOf(g)->UnwrapRef()->As<sem::Atomic>();
+ ASSERT_NE(atomic, nullptr);
+ EXPECT_TRUE(atomic->Type()->Is<sem::U32>());
}
TEST_F(ResolverAtomicTest, GlobalStorageStruct) {
- auto* s =
- Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
- auto* g = Global("g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* s = Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ auto* g = Global("g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
- auto* str = TypeOf(g)->UnwrapRef()->As<sem::Struct>();
- ASSERT_NE(str, nullptr);
- ASSERT_EQ(str->Members().size(), 1u);
- auto* atomic = str->Members()[0]->Type()->As<sem::Atomic>();
- ASSERT_NE(atomic, nullptr);
- ASSERT_TRUE(atomic->Type()->Is<sem::I32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(g)->Is<sem::Reference>());
+ auto* str = TypeOf(g)->UnwrapRef()->As<sem::Struct>();
+ ASSERT_NE(str, nullptr);
+ ASSERT_EQ(str->Members().size(), 1u);
+ auto* atomic = str->Members()[0]->Type()->As<sem::Atomic>();
+ ASSERT_NE(atomic, nullptr);
+ ASSERT_TRUE(atomic->Type()->Is<sem::I32>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/atomics_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/atomics_validation_test.cc
index 5e9668f482c..78c1fb94f71 100644
--- a/chromium/third_party/dawn/src/tint/resolver/atomics_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/atomics_validation_test.cc
@@ -14,322 +14,302 @@
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/reference.h"
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-struct ResolverAtomicValidationTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverAtomicValidationTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverAtomicValidationTest, StorageClass_WorkGroup) {
- Global("a", ty.atomic(Source{{12, 34}}, ty.i32()),
- ast::StorageClass::kWorkgroup);
+ Global("a", ty.atomic(Source{{12, 34}}, ty.i32()), ast::StorageClass::kWorkgroup);
- EXPECT_TRUE(r()->Resolve());
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverAtomicValidationTest, StorageClass_Storage) {
- Global("g", ty.atomic(Source{{12, 34}}, ty.i32()),
- ast::StorageClass::kStorage, ast::Access::kReadWrite,
- GroupAndBinding(0, 0));
+ Global("g", ty.atomic(Source{{12, 34}}, ty.i32()), ast::StorageClass::kStorage,
+ ast::Access::kReadWrite, GroupAndBinding(0, 0));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverAtomicValidationTest, StorageClass_Storage_Struct) {
- auto* s =
- Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
- Global("g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- GroupAndBinding(0, 0));
+ auto* s = Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ Global("g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ GroupAndBinding(0, 0));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverAtomicValidationTest, InvalidType) {
- Global("a", ty.atomic(ty.f32(Source{{12, 34}})),
- ast::StorageClass::kWorkgroup);
+ Global("a", ty.atomic(ty.f32(Source{{12, 34}})), ast::StorageClass::kWorkgroup);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: atomic only supports i32 or u32 types");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: atomic only supports i32 or u32 types");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_Simple) {
- Global("a", ty.atomic(Source{{12, 34}}, ty.i32()),
- ast::StorageClass::kPrivate);
+ Global("a", ty.atomic(Source{{12, 34}}, ty.i32()), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: atomic variables must have <storage> or <workgroup> "
- "storage class");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: atomic variables must have <storage> or <workgroup> "
+ "storage class");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_Array) {
- Global("a", ty.atomic(Source{{12, 34}}, ty.i32()),
- ast::StorageClass::kPrivate);
+ Global("a", ty.atomic(Source{{12, 34}}, ty.i32()), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: atomic variables must have <storage> or <workgroup> "
- "storage class");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: atomic variables must have <storage> or <workgroup> "
+ "storage class");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_Struct) {
- auto* s =
- Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "note: atomic sub-type of 's' is declared here");
+ auto* s = Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "note: atomic sub-type of 's' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_StructOfStruct) {
- // struct Inner { m : atomic<i32>; };
- // struct Outer { m : array<Inner, 4>; };
- // var<private> g : Outer;
-
- auto* Inner =
- Structure("Inner", {Member("m", ty.atomic(Source{{12, 34}}, ty.i32()))});
- auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
- Global("g", ty.Of(Outer), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "note: atomic sub-type of 'Outer' is declared here");
+ // struct Inner { m : atomic<i32>; };
+ // struct Outer { m : array<Inner, 4>; };
+ // var<private> g : Outer;
+
+ auto* Inner = Structure("Inner", {Member("m", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
+ Global("g", ty.Of(Outer), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "note: atomic sub-type of 'Outer' is declared here");
}
-TEST_F(ResolverAtomicValidationTest,
- InvalidStorageClass_StructOfStructOfArray) {
- // struct Inner { m : array<atomic<i32>, 4>; };
- // struct Outer { m : array<Inner, 4>; };
- // var<private> g : Outer;
-
- auto* Inner =
- Structure("Inner", {Member(Source{{12, 34}}, "m", ty.atomic(ty.i32()))});
- auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
- Global("g", ty.Of(Outer), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "12:34 note: atomic sub-type of 'Outer' is declared here");
+TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_StructOfStructOfArray) {
+ // struct Inner { m : array<atomic<i32>, 4>; };
+ // struct Outer { m : array<Inner, 4>; };
+ // var<private> g : Outer;
+
+ auto* Inner = Structure("Inner", {Member(Source{{12, 34}}, "m", ty.atomic(ty.i32()))});
+ auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
+ Global("g", ty.Of(Outer), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "12:34 note: atomic sub-type of 'Outer' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_ArrayOfArray) {
- // type AtomicArray = array<atomic<i32>, 5>;
- // var<private> v: array<s, 5>;
-
- auto* atomic_array = Alias(Source{{12, 34}}, "AtomicArray",
- ty.atomic(Source{{12, 34}}, ty.i32()));
- Global(Source{{56, 78}}, "v", ty.Of(atomic_array),
- ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class");
+ // type AtomicArray = array<atomic<i32>, 5>;
+ // var<private> v: array<s, 5>;
+
+ auto* atomic_array =
+ Alias(Source{{12, 34}}, "AtomicArray", ty.atomic(Source{{12, 34}}, ty.i32()));
+ Global(Source{{56, 78}}, "v", ty.Of(atomic_array), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_ArrayOfStruct) {
- // struct S{
- // m: atomic<u32>;
- // };
- // var<private> v: array<S, 5>;
-
- auto* s = Structure("S", {Member("m", ty.atomic<u32>())});
- Global(Source{{56, 78}}, "v", ty.array(ty.Of(s), 5),
- ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "note: atomic sub-type of 'array<S, 5>' is declared here");
+ // struct S{
+ // m: atomic<u32>;
+ // };
+ // var<private> v: array<S, 5u>;
+
+ auto* s = Structure("S", {Member("m", ty.atomic<u32>())});
+ Global(Source{{56, 78}}, "v", ty.array(ty.Of(s), 5_u), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "note: atomic sub-type of 'array<S, 5>' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_ArrayOfStructOfArray) {
- // type AtomicArray = array<atomic<i32>, 5>;
- // struct S{
- // m: AtomicArray;
- // };
- // var<private> v: array<S, 5>;
-
- auto* atomic_array = Alias(Source{{12, 34}}, "AtomicArray",
- ty.atomic(Source{{12, 34}}, ty.i32()));
- auto* s = Structure("S", {Member("m", ty.Of(atomic_array))});
- Global(Source{{56, 78}}, "v", ty.array(ty.Of(s), 5),
- ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "note: atomic sub-type of 'array<S, 5>' is declared here");
+ // type AtomicArray = array<atomic<i32>, 5u>;
+ // struct S{
+ // m: AtomicArray;
+ // };
+ // var<private> v: array<S, 5u>;
+
+ auto* atomic_array =
+ Alias(Source{{12, 34}}, "AtomicArray", ty.atomic(Source{{12, 34}}, ty.i32()));
+ auto* s = Structure("S", {Member("m", ty.Of(atomic_array))});
+ Global(Source{{56, 78}}, "v", ty.array(ty.Of(s), 5_u), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "note: atomic sub-type of 'array<S, 5>' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidStorageClass_Complex) {
- // type AtomicArray = array<atomic<i32>, 5>;
- // struct S6 { x: array<i32, 4>; };
- // struct S5 { x: S6;
- // y: AtomicArray;
- // z: array<atomic<u32>, 8>; };
- // struct S4 { x: S6;
- // y: S5;
- // z: array<atomic<i32>, 4>; };
- // struct S3 { x: S4; };
- // struct S2 { x: S3; };
- // struct S1 { x: S2; };
- // struct S0 { x: S1; };
- // var<private> g : S0;
-
- auto* atomic_array = Alias(Source{{12, 34}}, "AtomicArray",
- ty.atomic(Source{{12, 34}}, ty.i32()));
- auto* array_i32_4 = ty.array(ty.i32(), 4);
- auto* array_atomic_u32_8 = ty.array(ty.atomic(ty.u32()), 8);
- auto* array_atomic_i32_4 = ty.array(ty.atomic(ty.i32()), 4);
-
- auto* s6 = Structure("S6", {Member("x", array_i32_4)});
- auto* s5 = Structure("S5", {Member("x", ty.Of(s6)), //
- Member("y", ty.Of(atomic_array)), //
- Member("z", array_atomic_u32_8)}); //
- auto* s4 = Structure("S4", {Member("x", ty.Of(s6)), //
- Member("y", ty.Of(s5)), //
- Member("z", array_atomic_i32_4)}); //
- auto* s3 = Structure("S3", {Member("x", ty.Of(s4))});
- auto* s2 = Structure("S2", {Member("x", ty.Of(s3))});
- auto* s1 = Structure("S1", {Member("x", ty.Of(s2))});
- auto* s0 = Structure("S0", {Member("x", ty.Of(s1))});
- Global(Source{{56, 78}}, "g", ty.Of(s0), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables must have <storage> or <workgroup> "
- "storage class\n"
- "note: atomic sub-type of 'S0' is declared here");
+ // type AtomicArray = array<atomic<i32>, 5u>;
+ // struct S6 { x: array<i32, 4>; };
+ // struct S5 { x: S6;
+ // y: AtomicArray;
+ // z: array<atomic<u32>, 8u>; };
+ // struct S4 { x: S6;
+ // y: S5;
+ // z: array<atomic<i32>, 4u>; };
+ // struct S3 { x: S4; };
+ // struct S2 { x: S3; };
+ // struct S1 { x: S2; };
+ // struct S0 { x: S1; };
+ // var<private> g : S0;
+
+ auto* atomic_array =
+ Alias(Source{{12, 34}}, "AtomicArray", ty.atomic(Source{{12, 34}}, ty.i32()));
+ auto* array_i32_4 = ty.array(ty.i32(), 4_u);
+ auto* array_atomic_u32_8 = ty.array(ty.atomic(ty.u32()), 8_u);
+ auto* array_atomic_i32_4 = ty.array(ty.atomic(ty.i32()), 4_u);
+
+ auto* s6 = Structure("S6", {Member("x", array_i32_4)});
+ auto* s5 = Structure("S5", {Member("x", ty.Of(s6)), //
+ Member("y", ty.Of(atomic_array)), //
+ Member("z", array_atomic_u32_8)}); //
+ auto* s4 = Structure("S4", {Member("x", ty.Of(s6)), //
+ Member("y", ty.Of(s5)), //
+ Member("z", array_atomic_i32_4)}); //
+ auto* s3 = Structure("S3", {Member("x", ty.Of(s4))});
+ auto* s2 = Structure("S2", {Member("x", ty.Of(s3))});
+ auto* s1 = Structure("S1", {Member("x", ty.Of(s2))});
+ auto* s0 = Structure("S0", {Member("x", ty.Of(s1))});
+ Global(Source{{56, 78}}, "g", ty.Of(s0), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables must have <storage> or <workgroup> "
+ "storage class\n"
+ "note: atomic sub-type of 'S0' is declared here");
}
TEST_F(ResolverAtomicValidationTest, Struct_AccessMode_Read) {
- auto* s =
- Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "error: atomic variables in <storage> storage class must have read_write "
- "access mode\n"
- "note: atomic sub-type of 's' is declared here");
+ auto* s = Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables in <storage> storage class must have read_write "
+ "access mode\n"
+ "note: atomic sub-type of 's' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidAccessMode_Struct) {
- auto* s =
- Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "error: atomic variables in <storage> storage class must have read_write "
- "access mode\n"
- "note: atomic sub-type of 's' is declared here");
+ auto* s = Structure("s", {Member("a", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables in <storage> storage class must have read_write "
+ "access mode\n"
+ "note: atomic sub-type of 's' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidAccessMode_StructOfStruct) {
- // struct Inner { m : atomic<i32>; };
- // struct Outer { m : array<Inner, 4>; };
- // var<storage, read> g : Outer;
-
- auto* Inner =
- Structure("Inner", {Member("m", ty.atomic(Source{{12, 34}}, ty.i32()))});
- auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
- Global(Source{{56, 78}}, "g", ty.Of(Outer), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "error: atomic variables in <storage> storage class must have read_write "
- "access mode\n"
- "note: atomic sub-type of 'Outer' is declared here");
+ // struct Inner { m : atomic<i32>; };
+ // struct Outer { m : array<Inner, 4>; };
+ // var<storage, read> g : Outer;
+
+ auto* Inner = Structure("Inner", {Member("m", ty.atomic(Source{{12, 34}}, ty.i32()))});
+ auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
+ Global(Source{{56, 78}}, "g", ty.Of(Outer), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables in <storage> storage class must have read_write "
+ "access mode\n"
+ "note: atomic sub-type of 'Outer' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidAccessMode_StructOfStructOfArray) {
- // struct Inner { m : array<atomic<i32>, 4>; };
- // struct Outer { m : array<Inner, 4>; };
- // var<storage, read> g : Outer;
-
- auto* Inner =
- Structure("Inner", {Member(Source{{12, 34}}, "m", ty.atomic(ty.i32()))});
- auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
- Global(Source{{56, 78}}, "g", ty.Of(Outer), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables in <storage> storage class must have "
- "read_write access mode\n"
- "12:34 note: atomic sub-type of 'Outer' is declared here");
+ // struct Inner { m : array<atomic<i32>, 4>; };
+ // struct Outer { m : array<Inner, 4>; };
+ // var<storage, read> g : Outer;
+
+ auto* Inner = Structure("Inner", {Member(Source{{12, 34}}, "m", ty.atomic(ty.i32()))});
+ auto* Outer = Structure("Outer", {Member("m", ty.Of(Inner))});
+ Global(Source{{56, 78}}, "g", ty.Of(Outer), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables in <storage> storage class must have "
+ "read_write access mode\n"
+ "12:34 note: atomic sub-type of 'Outer' is declared here");
}
TEST_F(ResolverAtomicValidationTest, InvalidAccessMode_Complex) {
- // type AtomicArray = array<atomic<i32>, 5>;
- // struct S6 { x: array<i32, 4>; };
- // struct S5 { x: S6;
- // y: AtomicArray;
- // z: array<atomic<u32>, 8>; };
- // struct S4 { x: S6;
- // y: S5;
- // z: array<atomic<i32>, 4>; };
- // struct S3 { x: S4; };
- // struct S2 { x: S3; };
- // struct S1 { x: S2; };
- // struct S0 { x: S1; };
- // var<storage, read> g : S0;
-
- auto* atomic_array = Alias(Source{{12, 34}}, "AtomicArray",
- ty.atomic(Source{{12, 34}}, ty.i32()));
- auto* array_i32_4 = ty.array(ty.i32(), 4);
- auto* array_atomic_u32_8 = ty.array(ty.atomic(ty.u32()), 8);
- auto* array_atomic_i32_4 = ty.array(ty.atomic(ty.i32()), 4);
-
- auto* s6 = Structure("S6", {Member("x", array_i32_4)});
- auto* s5 = Structure("S5", {Member("x", ty.Of(s6)), //
- Member("y", ty.Of(atomic_array)), //
- Member("z", array_atomic_u32_8)}); //
- auto* s4 = Structure("S4", {Member("x", ty.Of(s6)), //
- Member("y", ty.Of(s5)), //
- Member("z", array_atomic_i32_4)}); //
- auto* s3 = Structure("S3", {Member("x", ty.Of(s4))});
- auto* s2 = Structure("S2", {Member("x", ty.Of(s3))});
- auto* s1 = Structure("S1", {Member("x", ty.Of(s2))});
- auto* s0 = Structure("S0", {Member("x", ty.Of(s1))});
- Global(Source{{56, 78}}, "g", ty.Of(s0), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: atomic variables in <storage> storage class must have "
- "read_write access mode\n"
- "note: atomic sub-type of 'S0' is declared here");
+ // type AtomicArray = array<atomic<i32>, 5>;
+ // struct S6 { x: array<i32, 4u>; };
+ // struct S5 { x: S6;
+ // y: AtomicArray;
+ // z: array<atomic<u32>, 8u>; };
+ // struct S4 { x: S6;
+ // y: S5;
+ // z: array<atomic<i32>, 4u>; };
+ // struct S3 { x: S4; };
+ // struct S2 { x: S3; };
+ // struct S1 { x: S2; };
+ // struct S0 { x: S1; };
+ // var<storage, read> g : S0;
+
+ auto* atomic_array =
+ Alias(Source{{12, 34}}, "AtomicArray", ty.atomic(Source{{12, 34}}, ty.i32()));
+ auto* array_i32_4 = ty.array(ty.i32(), 4_u);
+ auto* array_atomic_u32_8 = ty.array(ty.atomic(ty.u32()), 8_u);
+ auto* array_atomic_i32_4 = ty.array(ty.atomic(ty.i32()), 4_u);
+
+ auto* s6 = Structure("S6", {Member("x", array_i32_4)});
+ auto* s5 = Structure("S5", {Member("x", ty.Of(s6)), //
+ Member("y", ty.Of(atomic_array)), //
+ Member("z", array_atomic_u32_8)}); //
+ auto* s4 = Structure("S4", {Member("x", ty.Of(s6)), //
+ Member("y", ty.Of(s5)), //
+ Member("z", array_atomic_i32_4)}); //
+ auto* s3 = Structure("S3", {Member("x", ty.Of(s4))});
+ auto* s2 = Structure("S2", {Member("x", ty.Of(s3))});
+ auto* s1 = Structure("S1", {Member("x", ty.Of(s2))});
+ auto* s0 = Structure("S0", {Member("x", ty.Of(s1))});
+ Global(Source{{56, 78}}, "g", ty.Of(s0), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: atomic variables in <storage> storage class must have "
+ "read_write access mode\n"
+ "note: atomic sub-type of 'S0' is declared here");
}
TEST_F(ResolverAtomicValidationTest, Local) {
- WrapInFunction(Var("a", ty.atomic(Source{{12, 34}}, ty.i32())));
+ WrapInFunction(Var("a", ty.atomic(Source{{12, 34}}, ty.i32())));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function variable must have a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function variable must have a constructible type");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/attribute_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/attribute_validation_test.cc
index 3b8781be8a7..3280ebf46b9 100644
--- a/chromium/third_party/dawn/src/tint/resolver/attribute_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/attribute_validation_test.cc
@@ -19,6 +19,8 @@
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
// Helpers and typedefs
@@ -44,765 +46,722 @@ template <typename T>
using alias2 = builder::alias2<T>;
template <typename T>
using alias3 = builder::alias3<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
namespace AttributeTests {
namespace {
enum class AttributeKind {
- kAlign,
- kBinding,
- kBuiltin,
- kGroup,
- kId,
- kInterpolate,
- kInvariant,
- kLocation,
- kOffset,
- kSize,
- kStage,
- kStride,
- kWorkgroup,
-
- kBindingAndGroup,
+ kAlign,
+ kBinding,
+ kBuiltin,
+ kGroup,
+ kId,
+ kInterpolate,
+ kInvariant,
+ kLocation,
+ kOffset,
+ kSize,
+ kStage,
+ kStride,
+ kWorkgroup,
+
+ kBindingAndGroup,
};
static bool IsBindingAttribute(AttributeKind kind) {
- switch (kind) {
- case AttributeKind::kBinding:
- case AttributeKind::kGroup:
- case AttributeKind::kBindingAndGroup:
- return true;
- default:
- return false;
- }
+ switch (kind) {
+ case AttributeKind::kBinding:
+ case AttributeKind::kGroup:
+ case AttributeKind::kBindingAndGroup:
+ return true;
+ default:
+ return false;
+ }
}
struct TestParams {
- AttributeKind kind;
- bool should_pass;
+ AttributeKind kind;
+ bool should_pass;
};
struct TestWithParams : ResolverTestWithParam<TestParams> {};
static ast::AttributeList createAttributes(const Source& source,
ProgramBuilder& builder,
AttributeKind kind) {
- switch (kind) {
- case AttributeKind::kAlign:
- return {builder.create<ast::StructMemberAlignAttribute>(source, 4u)};
- case AttributeKind::kBinding:
- return {builder.create<ast::BindingAttribute>(source, 1u)};
- case AttributeKind::kBuiltin:
- return {builder.Builtin(source, ast::Builtin::kPosition)};
- case AttributeKind::kGroup:
- return {builder.create<ast::GroupAttribute>(source, 1u)};
- case AttributeKind::kId:
- return {builder.create<ast::IdAttribute>(source, 0u)};
- case AttributeKind::kInterpolate:
- return {builder.Interpolate(source, ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kCenter)};
- case AttributeKind::kInvariant:
- return {builder.Invariant(source)};
- case AttributeKind::kLocation:
- return {builder.Location(source, 1)};
- case AttributeKind::kOffset:
- return {builder.create<ast::StructMemberOffsetAttribute>(source, 4u)};
- case AttributeKind::kSize:
- return {builder.create<ast::StructMemberSizeAttribute>(source, 16u)};
- case AttributeKind::kStage:
- return {builder.Stage(source, ast::PipelineStage::kCompute)};
- case AttributeKind::kStride:
- return {builder.create<ast::StrideAttribute>(source, 4u)};
- case AttributeKind::kWorkgroup:
- return {builder.create<ast::WorkgroupAttribute>(source, builder.Expr(1))};
- case AttributeKind::kBindingAndGroup:
- return {builder.create<ast::BindingAttribute>(source, 1u),
- builder.create<ast::GroupAttribute>(source, 1u)};
- }
- return {};
+ switch (kind) {
+ case AttributeKind::kAlign:
+ return {builder.create<ast::StructMemberAlignAttribute>(source, 4u)};
+ case AttributeKind::kBinding:
+ return {builder.create<ast::BindingAttribute>(source, 1u)};
+ case AttributeKind::kBuiltin:
+ return {builder.Builtin(source, ast::Builtin::kPosition)};
+ case AttributeKind::kGroup:
+ return {builder.create<ast::GroupAttribute>(source, 1u)};
+ case AttributeKind::kId:
+ return {builder.create<ast::IdAttribute>(source, 0u)};
+ case AttributeKind::kInterpolate:
+ return {builder.Interpolate(source, ast::InterpolationType::kLinear,
+ ast::InterpolationSampling::kCenter)};
+ case AttributeKind::kInvariant:
+ return {builder.Invariant(source)};
+ case AttributeKind::kLocation:
+ return {builder.Location(source, 1)};
+ case AttributeKind::kOffset:
+ return {builder.create<ast::StructMemberOffsetAttribute>(source, 4u)};
+ case AttributeKind::kSize:
+ return {builder.create<ast::StructMemberSizeAttribute>(source, 16u)};
+ case AttributeKind::kStage:
+ return {builder.Stage(source, ast::PipelineStage::kCompute)};
+ case AttributeKind::kStride:
+ return {builder.create<ast::StrideAttribute>(source, 4u)};
+ case AttributeKind::kWorkgroup:
+ return {builder.create<ast::WorkgroupAttribute>(source, builder.Expr(1_i))};
+ case AttributeKind::kBindingAndGroup:
+ return {builder.create<ast::BindingAttribute>(source, 1_u),
+ builder.create<ast::GroupAttribute>(source, 1_u)};
+ }
+ return {};
}
namespace FunctionInputAndOutputTests {
using FunctionParameterAttributeTest = TestWithParams;
TEST_P(FunctionParameterAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- Func("main",
- ast::VariableList{Param("a", ty.vec4<f32>(),
- createAttributes({}, *this, params.kind))},
- ty.void_(), {});
+ Func("main",
+ ast::VariableList{Param("a", ty.vec4<f32>(), createAttributes({}, *this, params.kind))},
+ ty.void_(), {});
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: attribute is not valid for non-entry point function "
- "parameters");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: attribute is not valid for non-entry point function "
+ "parameters");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- FunctionParameterAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ FunctionParameterAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using FunctionReturnTypeAttributeTest = TestWithParams;
TEST_P(FunctionReturnTypeAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1.f)},
- {}, createAttributes({}, *this, params.kind));
+ Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1_f)}, {},
+ createAttributes({}, *this, params.kind));
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: attribute is not valid for non-entry point function "
- "return types");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: attribute is not valid for non-entry point function "
+ "return types");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- FunctionReturnTypeAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ FunctionReturnTypeAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
} // namespace FunctionInputAndOutputTests
namespace EntryPointInputAndOutputTests {
using ComputeShaderParameterAttributeTest = TestWithParams;
TEST_P(ComputeShaderParameterAttributeTest, IsValid) {
- auto& params = GetParam();
- auto* p = Param("a", ty.vec4<f32>(),
- createAttributes(Source{{12, 34}}, *this, params.kind));
- Func("main", ast::VariableList{p}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ auto& params = GetParam();
+ auto* p = Param("a", ty.vec4<f32>(), createAttributes(Source{{12, 34}}, *this, params.kind));
+ Func("main", ast::VariableList{p}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (params.kind == AttributeKind::kBuiltin) {
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(position) cannot be used in input of "
- "compute pipeline stage");
- } else if (params.kind == AttributeKind::kInterpolate ||
- params.kind == AttributeKind::kLocation ||
- params.kind == AttributeKind::kInvariant) {
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attribute is not valid for compute shader inputs");
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for function parameters");
+ EXPECT_FALSE(r()->Resolve());
+ if (params.kind == AttributeKind::kBuiltin) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(position) cannot be used in input of "
+ "compute pipeline stage");
+ } else if (params.kind == AttributeKind::kInterpolate ||
+ params.kind == AttributeKind::kLocation ||
+ params.kind == AttributeKind::kInvariant) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for compute shader inputs");
+ } else {
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for function parameters");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- ComputeShaderParameterAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ ComputeShaderParameterAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using FragmentShaderParameterAttributeTest = TestWithParams;
TEST_P(FragmentShaderParameterAttributeTest, IsValid) {
- auto& params = GetParam();
- auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
- if (params.kind != AttributeKind::kBuiltin &&
- params.kind != AttributeKind::kLocation) {
- attrs.push_back(Builtin(Source{{34, 56}}, ast::Builtin::kPosition));
- }
- auto* p = Param("a", ty.vec4<f32>(), attrs);
- Func("frag_main", {p}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for function parameters");
- }
+ auto& params = GetParam();
+ auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
+ if (params.kind != AttributeKind::kBuiltin && params.kind != AttributeKind::kLocation) {
+ attrs.push_back(Builtin(Source{{34, 56}}, ast::Builtin::kPosition));
+ }
+ auto* p = Param("a", ty.vec4<f32>(), attrs);
+ Func("frag_main", {p}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for function parameters");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- FragmentShaderParameterAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, true},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- // kInterpolate tested separately (requires @location)
- TestParams{AttributeKind::kInvariant, true},
- TestParams{AttributeKind::kLocation, true},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ FragmentShaderParameterAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, true},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ // kInterpolate tested separately (requires @location)
+ TestParams{AttributeKind::kInvariant, true},
+ TestParams{AttributeKind::kLocation, true},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using VertexShaderParameterAttributeTest = TestWithParams;
TEST_P(VertexShaderParameterAttributeTest, IsValid) {
- auto& params = GetParam();
- auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
- if (params.kind != AttributeKind::kLocation) {
- attrs.push_back(Location(Source{{34, 56}}, 2));
- }
- auto* p = Param("a", ty.vec4<f32>(), attrs);
- Func("vertex_main", ast::VariableList{p}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)},
- {Builtin(ast::Builtin::kPosition)});
-
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (params.kind == AttributeKind::kBuiltin) {
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(position) cannot be used in input of "
- "vertex pipeline stage");
- } else if (params.kind == AttributeKind::kInvariant) {
- EXPECT_EQ(r()->error(),
- "12:34 error: invariant attribute must only be applied to a "
- "position builtin");
+ auto& params = GetParam();
+ auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
+ if (params.kind != AttributeKind::kLocation) {
+ attrs.push_back(Location(Source{{34, 56}}, 2));
+ }
+ auto* p = Param("a", ty.vec4<f32>(), attrs);
+ Func("vertex_main", ast::VariableList{p}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)}, {Builtin(ast::Builtin::kPosition)});
+
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for function parameters");
+ EXPECT_FALSE(r()->Resolve());
+ if (params.kind == AttributeKind::kBuiltin) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(position) cannot be used in input of "
+ "vertex pipeline stage");
+ } else if (params.kind == AttributeKind::kInvariant) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: invariant attribute must only be applied to a "
+ "position builtin");
+ } else {
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for function parameters");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- VertexShaderParameterAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, true},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, true},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ VertexShaderParameterAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, true},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, true},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using ComputeShaderReturnTypeAttributeTest = TestWithParams;
TEST_P(ComputeShaderReturnTypeAttributeTest, IsValid) {
- auto& params = GetParam();
- Func("main", ast::VariableList{}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>(), 1.f))},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)},
- createAttributes(Source{{12, 34}}, *this, params.kind));
+ auto& params = GetParam();
+ Func("main", ast::VariableList{}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>(), 1_f))},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)},
+ createAttributes(Source{{12, 34}}, *this, params.kind));
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (params.kind == AttributeKind::kBuiltin) {
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(position) cannot be used in output of "
- "compute pipeline stage");
- } else if (params.kind == AttributeKind::kInterpolate ||
- params.kind == AttributeKind::kLocation ||
- params.kind == AttributeKind::kInvariant) {
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attribute is not valid for compute shader output");
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for entry point return "
- "types");
+ EXPECT_FALSE(r()->Resolve());
+ if (params.kind == AttributeKind::kBuiltin) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(position) cannot be used in output of "
+ "compute pipeline stage");
+ } else if (params.kind == AttributeKind::kInterpolate ||
+ params.kind == AttributeKind::kLocation ||
+ params.kind == AttributeKind::kInvariant) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for compute shader output");
+ } else {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for entry point return "
+ "types");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- ComputeShaderReturnTypeAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ ComputeShaderReturnTypeAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using FragmentShaderReturnTypeAttributeTest = TestWithParams;
TEST_P(FragmentShaderReturnTypeAttributeTest, IsValid) {
- auto& params = GetParam();
- auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
- attrs.push_back(Location(Source{{34, 56}}, 2));
- Func("frag_main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kFragment)}, attrs);
+ auto& params = GetParam();
+ auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
+ attrs.push_back(Location(Source{{34, 56}}, 2));
+ Func("frag_main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kFragment)}, attrs);
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (params.kind == AttributeKind::kBuiltin) {
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(position) cannot be used in output of "
- "fragment pipeline stage");
- } else if (params.kind == AttributeKind::kInvariant) {
- EXPECT_EQ(r()->error(),
- "12:34 error: invariant attribute must only be applied to a "
- "position builtin");
- } else if (params.kind == AttributeKind::kLocation) {
- EXPECT_EQ(r()->error(),
- "34:56 error: duplicate location attribute\n"
- "12:34 note: first attribute declared here");
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for entry point return "
- "types");
+ EXPECT_FALSE(r()->Resolve());
+ if (params.kind == AttributeKind::kBuiltin) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(position) cannot be used in output of "
+ "fragment pipeline stage");
+ } else if (params.kind == AttributeKind::kInvariant) {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: invariant attribute must only be applied to a "
+ "position builtin");
+ } else if (params.kind == AttributeKind::kLocation) {
+ EXPECT_EQ(r()->error(),
+ "34:56 error: duplicate location attribute\n"
+ "12:34 note: first attribute declared here");
+ } else {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for entry point return "
+ "types");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- FragmentShaderReturnTypeAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, true},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ FragmentShaderReturnTypeAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, true},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using VertexShaderReturnTypeAttributeTest = TestWithParams;
TEST_P(VertexShaderReturnTypeAttributeTest, IsValid) {
- auto& params = GetParam();
- auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
- // a vertex shader must include the 'position' builtin in its return type
- if (params.kind != AttributeKind::kBuiltin) {
- attrs.push_back(Builtin(Source{{34, 56}}, ast::Builtin::kPosition));
- }
- Func("vertex_main", ast::VariableList{}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)}, attrs);
-
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (params.kind == AttributeKind::kLocation) {
- EXPECT_EQ(r()->error(),
- "34:56 error: multiple entry point IO attributes\n"
- "12:34 note: previously consumed location(1)");
+ auto& params = GetParam();
+ auto attrs = createAttributes(Source{{12, 34}}, *this, params.kind);
+ // a vertex shader must include the 'position' builtin in its return type
+ if (params.kind != AttributeKind::kBuiltin) {
+ attrs.push_back(Builtin(Source{{34, 56}}, ast::Builtin::kPosition));
+ }
+ Func("vertex_main", ast::VariableList{}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)}, attrs);
+
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for entry point return "
- "types");
+ EXPECT_FALSE(r()->Resolve());
+ if (params.kind == AttributeKind::kLocation) {
+ EXPECT_EQ(r()->error(),
+ "34:56 error: multiple entry point IO attributes\n"
+ "12:34 note: previously consumed location(1)");
+ } else {
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for entry point return "
+ "types");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- VertexShaderReturnTypeAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, true},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- // kInterpolate tested separately (requires @location)
- TestParams{AttributeKind::kInvariant, true},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ VertexShaderReturnTypeAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, true},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ // kInterpolate tested separately (requires @location)
+ TestParams{AttributeKind::kInvariant, true},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using EntryPointParameterAttributeTest = TestWithParams;
TEST_F(EntryPointParameterAttributeTest, DuplicateAttribute) {
- Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1.f)},
- {Stage(ast::PipelineStage::kFragment)},
- {
- Location(Source{{12, 34}}, 2),
- Location(Source{{56, 78}}, 3),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate location attribute
+ Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)},
+ {
+ Location(Source{{12, 34}}, 2),
+ Location(Source{{56, 78}}, 3),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate location attribute
12:34 note: first attribute declared here)");
}
TEST_F(EntryPointParameterAttributeTest, DuplicateInternalAttribute) {
- auto* s = Param("s", ty.sampler(ast::SamplerKind::kSampler),
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- Disable(ast::DisabledValidation::kBindingPointCollision),
- Disable(ast::DisabledValidation::kEntryPointParameter),
- });
- Func("f", {s}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+ auto* s = Param("s", ty.sampler(ast::SamplerKind::kSampler),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ Disable(ast::DisabledValidation::kBindingPointCollision),
+ Disable(ast::DisabledValidation::kEntryPointParameter),
+ });
+ Func("f", {s}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
using EntryPointReturnTypeAttributeTest = ResolverTest;
TEST_F(EntryPointReturnTypeAttributeTest, DuplicateAttribute) {
- Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1.f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{
- Location(Source{{12, 34}}, 2),
- Location(Source{{56, 78}}, 3),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate location attribute
+ Func("main", ast::VariableList{}, ty.f32(), ast::StatementList{Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{
+ Location(Source{{12, 34}}, 2),
+ Location(Source{{56, 78}}, 3),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate location attribute
12:34 note: first attribute declared here)");
}
TEST_F(EntryPointReturnTypeAttributeTest, DuplicateInternalAttribute) {
- Func("f", {}, ty.i32(), {Return(1)}, {Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{
- Disable(ast::DisabledValidation::kBindingPointCollision),
- Disable(ast::DisabledValidation::kEntryPointParameter),
- });
+ Func("f", {}, ty.i32(), {Return(1_i)}, {Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{
+ Disable(ast::DisabledValidation::kBindingPointCollision),
+ Disable(ast::DisabledValidation::kEntryPointParameter),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace EntryPointInputAndOutputTests
namespace StructAndStructMemberTests {
using StructAttributeTest = TestWithParams;
-using SpirvBlockAttribute =
- transform::AddSpirvBlockAttribute::SpirvBlockAttribute;
+using SpirvBlockAttribute = transform::AddSpirvBlockAttribute::SpirvBlockAttribute;
TEST_P(StructAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* str = create<ast::Struct>(
- Sym("mystruct"), ast::StructMemberList{Member("a", ty.f32())},
- createAttributes(Source{{12, 34}}, *this, params.kind));
- AST().AddGlobalDeclaration(str);
+ auto* str = create<ast::Struct>(Sym("mystruct"), ast::StructMemberList{Member("a", ty.f32())},
+ createAttributes(Source{{12, 34}}, *this, params.kind));
+ AST().AddGlobalDeclaration(str);
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for struct declarations");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for struct declarations");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- StructAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ StructAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using StructMemberAttributeTest = TestWithParams;
TEST_P(StructMemberAttributeTest, IsValid) {
- auto& params = GetParam();
- ast::StructMemberList members;
- if (params.kind == AttributeKind::kBuiltin) {
- members.push_back(
- {Member("a", ty.vec4<f32>(),
- createAttributes(Source{{12, 34}}, *this, params.kind))});
- } else {
- members.push_back(
- {Member("a", ty.f32(),
- createAttributes(Source{{12, 34}}, *this, params.kind))});
- }
- Structure("mystruct", members);
- WrapInFunction();
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for structure members");
- }
+ auto& params = GetParam();
+ ast::StructMemberList members;
+ if (params.kind == AttributeKind::kBuiltin) {
+ members.push_back(
+ {Member("a", ty.vec4<f32>(), createAttributes(Source{{12, 34}}, *this, params.kind))});
+ } else {
+ members.push_back(
+ {Member("a", ty.f32(), createAttributes(Source{{12, 34}}, *this, params.kind))});
+ }
+ Structure("mystruct", members);
+ WrapInFunction();
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for structure members");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- StructMemberAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, true},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, true},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- // kInterpolate tested separately (requires @location)
- // kInvariant tested separately (requires position builtin)
- TestParams{AttributeKind::kLocation, true},
- TestParams{AttributeKind::kOffset, true},
- TestParams{AttributeKind::kSize, true},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ StructMemberAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, true},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, true},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ // kInterpolate tested separately (requires @location)
+ // kInvariant tested separately (requires position builtin)
+ TestParams{AttributeKind::kLocation, true},
+ TestParams{AttributeKind::kOffset, true},
+ TestParams{AttributeKind::kSize, true},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
TEST_F(StructMemberAttributeTest, DuplicateAttribute) {
- Structure(
- "mystruct",
- {
- Member(
- "a", ty.i32(),
+ Structure("mystruct",
{
- create<ast::StructMemberAlignAttribute>(Source{{12, 34}}, 4u),
- create<ast::StructMemberAlignAttribute>(Source{{56, 78}}, 8u),
- }),
- });
- WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate align attribute
+ Member("a", ty.i32(),
+ {
+ create<ast::StructMemberAlignAttribute>(Source{{12, 34}}, 4u),
+ create<ast::StructMemberAlignAttribute>(Source{{56, 78}}, 8u),
+ }),
+ });
+ WrapInFunction();
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate align attribute
12:34 note: first attribute declared here)");
}
TEST_F(StructMemberAttributeTest, InvariantAttributeWithPosition) {
- Structure("mystruct", {
- Member("a", ty.vec4<f32>(),
- {
- Invariant(),
- Builtin(ast::Builtin::kPosition),
- }),
- });
- WrapInFunction();
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ Structure("mystruct", {
+ Member("a", ty.vec4<f32>(),
+ {
+ Invariant(),
+ Builtin(ast::Builtin::kPosition),
+ }),
+ });
+ WrapInFunction();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(StructMemberAttributeTest, InvariantAttributeWithoutPosition) {
- Structure("mystruct", {
- Member("a", ty.vec4<f32>(),
- {
- Invariant(Source{{12, 34}}),
- }),
- });
- WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: invariant attribute must only be applied to a "
- "position builtin");
+ Structure("mystruct", {
+ Member("a", ty.vec4<f32>(),
+ {
+ Invariant(Source{{12, 34}}),
+ }),
+ });
+ WrapInFunction();
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: invariant attribute must only be applied to a "
+ "position builtin");
}
} // namespace StructAndStructMemberTests
using ArrayAttributeTest = TestWithParams;
TEST_P(ArrayAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* arr = ty.array(ty.f32(), nullptr,
- createAttributes(Source{{12, 34}}, *this, params.kind));
- Structure("mystruct", {
- Member("a", arr),
- });
+ auto* arr = ty.array(ty.f32(), nullptr, createAttributes(Source{{12, 34}}, *this, params.kind));
+ Structure("mystruct", {
+ Member("a", arr),
+ });
- WrapInFunction();
+ WrapInFunction();
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for array types");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for array types");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- ArrayAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, true},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ ArrayAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, true},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
using VariableAttributeTest = TestWithParams;
TEST_P(VariableAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- if (IsBindingAttribute(params.kind)) {
- Global("a", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone, nullptr,
- createAttributes(Source{{12, 34}}, *this, params.kind));
- } else {
- Global("a", ty.f32(), ast::StorageClass::kPrivate, nullptr,
- createAttributes(Source{{12, 34}}, *this, params.kind));
- }
+ if (IsBindingAttribute(params.kind)) {
+ Global("a", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone, nullptr,
+ createAttributes(Source{{12, 34}}, *this, params.kind));
+ } else {
+ Global("a", ty.f32(), ast::StorageClass::kPrivate, nullptr,
+ createAttributes(Source{{12, 34}}, *this, params.kind));
+ }
- WrapInFunction();
+ WrapInFunction();
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- if (!IsBindingAttribute(params.kind)) {
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for variables");
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ if (!IsBindingAttribute(params.kind)) {
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for variables");
+ }
}
- }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- VariableAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, false},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, true}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ VariableAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, false},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, true}));
TEST_F(VariableAttributeTest, DuplicateAttribute) {
- Global("a", ty.sampler(ast::SamplerKind::kSampler),
- ast::AttributeList{
- create<ast::BindingAttribute>(Source{{12, 34}}, 2),
- create<ast::GroupAttribute>(2),
- create<ast::BindingAttribute>(Source{{56, 78}}, 3),
- });
+ Global("a", ty.sampler(ast::SamplerKind::kSampler),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(Source{{12, 34}}, 2),
+ create<ast::GroupAttribute>(2),
+ create<ast::BindingAttribute>(Source{{56, 78}}, 3),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate binding attribute
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate binding attribute
12:34 note: first attribute declared here)");
}
TEST_F(VariableAttributeTest, LocalVariable) {
- auto* v = Var("a", ty.f32(),
- ast::AttributeList{
- create<ast::BindingAttribute>(Source{{12, 34}}, 2),
- });
+ auto* v = Var("a", ty.f32(),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(Source{{12, 34}}, 2),
+ });
- WrapInFunction(v);
+ WrapInFunction(v);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attributes are not valid on local variables");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attributes are not valid on local variables");
}
using ConstantAttributeTest = TestWithParams;
TEST_P(ConstantAttributeTest, IsValid) {
- auto& params = GetParam();
+ auto& params = GetParam();
- GlobalConst("a", ty.f32(), Expr(1.23f),
- createAttributes(Source{{12, 34}}, *this, params.kind));
+ GlobalConst("a", ty.f32(), Expr(1.23_f),
+ createAttributes(Source{{12, 34}}, *this, params.kind));
- WrapInFunction();
+ WrapInFunction();
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for constants");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for constants");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- ConstantAttributeTest,
- testing::Values(TestParams{AttributeKind::kAlign, false},
- TestParams{AttributeKind::kBinding, false},
- TestParams{AttributeKind::kBuiltin, false},
- TestParams{AttributeKind::kGroup, false},
- TestParams{AttributeKind::kId, true},
- TestParams{AttributeKind::kInterpolate, false},
- TestParams{AttributeKind::kInvariant, false},
- TestParams{AttributeKind::kLocation, false},
- TestParams{AttributeKind::kOffset, false},
- TestParams{AttributeKind::kSize, false},
- TestParams{AttributeKind::kStage, false},
- TestParams{AttributeKind::kStride, false},
- TestParams{AttributeKind::kWorkgroup, false},
- TestParams{AttributeKind::kBindingAndGroup, false}));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ ConstantAttributeTest,
+ testing::Values(TestParams{AttributeKind::kAlign, false},
+ TestParams{AttributeKind::kBinding, false},
+ TestParams{AttributeKind::kBuiltin, false},
+ TestParams{AttributeKind::kGroup, false},
+ TestParams{AttributeKind::kId, true},
+ TestParams{AttributeKind::kInterpolate, false},
+ TestParams{AttributeKind::kInvariant, false},
+ TestParams{AttributeKind::kLocation, false},
+ TestParams{AttributeKind::kOffset, false},
+ TestParams{AttributeKind::kSize, false},
+ TestParams{AttributeKind::kStage, false},
+ TestParams{AttributeKind::kStride, false},
+ TestParams{AttributeKind::kWorkgroup, false},
+ TestParams{AttributeKind::kBindingAndGroup, false}));
TEST_F(ConstantAttributeTest, DuplicateAttribute) {
- GlobalConst("a", ty.f32(), Expr(1.23f),
- ast::AttributeList{
- create<ast::IdAttribute>(Source{{12, 34}}, 0),
- create<ast::IdAttribute>(Source{{56, 78}}, 1),
- });
+ GlobalConst("a", ty.f32(), Expr(1.23_f),
+ ast::AttributeList{
+ create<ast::IdAttribute>(Source{{12, 34}}, 0),
+ create<ast::IdAttribute>(Source{{56, 78}}, 1),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate id attribute
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate id attribute
12:34 note: first attribute declared here)");
}
@@ -813,46 +772,46 @@ namespace ArrayStrideTests {
namespace {
struct Params {
- builder::ast_type_func_ptr create_el_type;
- uint32_t stride;
- bool should_pass;
+ builder::ast_type_func_ptr create_el_type;
+ uint32_t stride;
+ bool should_pass;
};
template <typename T>
constexpr Params ParamsFor(uint32_t stride, bool should_pass) {
- return Params{DataType<T>::AST, stride, should_pass};
+ return Params{DataType<T>::AST, stride, should_pass};
}
struct TestWithParams : ResolverTestWithParam<Params> {};
using ArrayStrideTest = TestWithParams;
TEST_P(ArrayStrideTest, All) {
- auto& params = GetParam();
- auto* el_ty = params.create_el_type(*this);
+ auto& params = GetParam();
+ auto* el_ty = params.create_el_type(*this);
- std::stringstream ss;
- ss << "el_ty: " << FriendlyName(el_ty) << ", stride: " << params.stride
- << ", should_pass: " << params.should_pass;
- SCOPED_TRACE(ss.str());
+ std::stringstream ss;
+ ss << "el_ty: " << FriendlyName(el_ty) << ", stride: " << params.stride
+ << ", should_pass: " << params.should_pass;
+ SCOPED_TRACE(ss.str());
- auto* arr = ty.array(Source{{12, 34}}, el_ty, 4, params.stride);
+ auto* arr = ty.array(Source{{12, 34}}, el_ty, 4_u, params.stride);
- Global("myarray", arr, ast::StorageClass::kPrivate);
+ Global("myarray", arr, ast::StorageClass::kPrivate);
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: arrays decorated with the stride attribute must "
- "have a stride that is at least the size of the element type, "
- "and be a multiple of the element type's alignment value.");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: arrays decorated with the stride attribute must "
+ "have a stride that is at least the size of the element type, "
+ "and be a multiple of the element type's alignment value.");
+ }
}
struct SizeAndAlignment {
- uint32_t size;
- uint32_t align;
+ uint32_t size;
+ uint32_t align;
};
constexpr SizeAndAlignment default_u32 = {4, 4};
constexpr SizeAndAlignment default_i32 = {4, 4};
@@ -864,68 +823,67 @@ constexpr SizeAndAlignment default_mat2x2 = {16, 8};
constexpr SizeAndAlignment default_mat3x3 = {48, 16};
constexpr SizeAndAlignment default_mat4x4 = {64, 16};
-INSTANTIATE_TEST_SUITE_P(
- ResolverAttributeValidationTest,
- ArrayStrideTest,
- testing::Values(
- // Succeed because stride >= element size (while being multiple of
- // element alignment)
- ParamsFor<u32>(default_u32.size, true),
- ParamsFor<i32>(default_i32.size, true),
- ParamsFor<f32>(default_f32.size, true),
- ParamsFor<vec2<f32>>(default_vec2.size, true),
- // vec3's default size is not a multiple of its alignment
- // ParamsFor<vec3<f32>, default_vec3.size, true},
- ParamsFor<vec4<f32>>(default_vec4.size, true),
- ParamsFor<mat2x2<f32>>(default_mat2x2.size, true),
- ParamsFor<mat3x3<f32>>(default_mat3x3.size, true),
- ParamsFor<mat4x4<f32>>(default_mat4x4.size, true),
-
- // Fail because stride is < element size
- ParamsFor<u32>(default_u32.size - 1, false),
- ParamsFor<i32>(default_i32.size - 1, false),
- ParamsFor<f32>(default_f32.size - 1, false),
- ParamsFor<vec2<f32>>(default_vec2.size - 1, false),
- ParamsFor<vec3<f32>>(default_vec3.size - 1, false),
- ParamsFor<vec4<f32>>(default_vec4.size - 1, false),
- ParamsFor<mat2x2<f32>>(default_mat2x2.size - 1, false),
- ParamsFor<mat3x3<f32>>(default_mat3x3.size - 1, false),
- ParamsFor<mat4x4<f32>>(default_mat4x4.size - 1, false),
-
- // Succeed because stride equals multiple of element alignment
- ParamsFor<u32>(default_u32.align * 7, true),
- ParamsFor<i32>(default_i32.align * 7, true),
- ParamsFor<f32>(default_f32.align * 7, true),
- ParamsFor<vec2<f32>>(default_vec2.align * 7, true),
- ParamsFor<vec3<f32>>(default_vec3.align * 7, true),
- ParamsFor<vec4<f32>>(default_vec4.align * 7, true),
- ParamsFor<mat2x2<f32>>(default_mat2x2.align * 7, true),
- ParamsFor<mat3x3<f32>>(default_mat3x3.align * 7, true),
- ParamsFor<mat4x4<f32>>(default_mat4x4.align * 7, true),
-
- // Fail because stride is not multiple of element alignment
- ParamsFor<u32>((default_u32.align - 1) * 7, false),
- ParamsFor<i32>((default_i32.align - 1) * 7, false),
- ParamsFor<f32>((default_f32.align - 1) * 7, false),
- ParamsFor<vec2<f32>>((default_vec2.align - 1) * 7, false),
- ParamsFor<vec3<f32>>((default_vec3.align - 1) * 7, false),
- ParamsFor<vec4<f32>>((default_vec4.align - 1) * 7, false),
- ParamsFor<mat2x2<f32>>((default_mat2x2.align - 1) * 7, false),
- ParamsFor<mat3x3<f32>>((default_mat3x3.align - 1) * 7, false),
- ParamsFor<mat4x4<f32>>((default_mat4x4.align - 1) * 7, false)));
+INSTANTIATE_TEST_SUITE_P(ResolverAttributeValidationTest,
+ ArrayStrideTest,
+ testing::Values(
+ // Succeed because stride >= element size (while being multiple of
+ // element alignment)
+ ParamsFor<u32>(default_u32.size, true),
+ ParamsFor<i32>(default_i32.size, true),
+ ParamsFor<f32>(default_f32.size, true),
+ ParamsFor<vec2<f32>>(default_vec2.size, true),
+ // vec3's default size is not a multiple of its alignment
+ // ParamsFor<vec3<f32>, default_vec3.size, true},
+ ParamsFor<vec4<f32>>(default_vec4.size, true),
+ ParamsFor<mat2x2<f32>>(default_mat2x2.size, true),
+ ParamsFor<mat3x3<f32>>(default_mat3x3.size, true),
+ ParamsFor<mat4x4<f32>>(default_mat4x4.size, true),
+
+ // Fail because stride is < element size
+ ParamsFor<u32>(default_u32.size - 1, false),
+ ParamsFor<i32>(default_i32.size - 1, false),
+ ParamsFor<f32>(default_f32.size - 1, false),
+ ParamsFor<vec2<f32>>(default_vec2.size - 1, false),
+ ParamsFor<vec3<f32>>(default_vec3.size - 1, false),
+ ParamsFor<vec4<f32>>(default_vec4.size - 1, false),
+ ParamsFor<mat2x2<f32>>(default_mat2x2.size - 1, false),
+ ParamsFor<mat3x3<f32>>(default_mat3x3.size - 1, false),
+ ParamsFor<mat4x4<f32>>(default_mat4x4.size - 1, false),
+
+ // Succeed because stride equals multiple of element alignment
+ ParamsFor<u32>(default_u32.align * 7, true),
+ ParamsFor<i32>(default_i32.align * 7, true),
+ ParamsFor<f32>(default_f32.align * 7, true),
+ ParamsFor<vec2<f32>>(default_vec2.align * 7, true),
+ ParamsFor<vec3<f32>>(default_vec3.align * 7, true),
+ ParamsFor<vec4<f32>>(default_vec4.align * 7, true),
+ ParamsFor<mat2x2<f32>>(default_mat2x2.align * 7, true),
+ ParamsFor<mat3x3<f32>>(default_mat3x3.align * 7, true),
+ ParamsFor<mat4x4<f32>>(default_mat4x4.align * 7, true),
+
+ // Fail because stride is not multiple of element alignment
+ ParamsFor<u32>((default_u32.align - 1) * 7, false),
+ ParamsFor<i32>((default_i32.align - 1) * 7, false),
+ ParamsFor<f32>((default_f32.align - 1) * 7, false),
+ ParamsFor<vec2<f32>>((default_vec2.align - 1) * 7, false),
+ ParamsFor<vec3<f32>>((default_vec3.align - 1) * 7, false),
+ ParamsFor<vec4<f32>>((default_vec4.align - 1) * 7, false),
+ ParamsFor<mat2x2<f32>>((default_mat2x2.align - 1) * 7, false),
+ ParamsFor<mat3x3<f32>>((default_mat3x3.align - 1) * 7, false),
+ ParamsFor<mat4x4<f32>>((default_mat4x4.align - 1) * 7, false)));
TEST_F(ArrayStrideTest, DuplicateAttribute) {
- auto* arr = ty.array(Source{{12, 34}}, ty.i32(), 4,
- {
- create<ast::StrideAttribute>(Source{{12, 34}}, 4),
- create<ast::StrideAttribute>(Source{{56, 78}}, 4),
- });
+ auto* arr = ty.array(Source{{12, 34}}, ty.i32(), 4_u,
+ {
+ create<ast::StrideAttribute>(Source{{12, 34}}, 4_i),
+ create<ast::StrideAttribute>(Source{{56, 78}}, 4_i),
+ });
- Global("myarray", arr, ast::StorageClass::kPrivate);
+ Global("myarray", arr, ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate stride attribute
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate stride attribute
12:34 note: first attribute declared here)");
}
@@ -937,147 +895,132 @@ namespace {
using ResourceAttributeTest = ResolverTest;
TEST_F(ResourceAttributeTest, UniformBufferMissingBinding) {
- auto* s = Structure("S", {Member("x", ty.i32())});
- Global(Source{{12, 34}}, "G", ty.Of(s), ast::StorageClass::kUniform);
+ auto* s = Structure("S", {Member("x", ty.i32())});
+ Global(Source{{12, 34}}, "G", ty.Of(s), ast::StorageClass::kUniform);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, StorageBufferMissingBinding) {
- auto* s = Structure("S", {Member("x", ty.i32())});
- Global(Source{{12, 34}}, "G", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ auto* s = Structure("S", {Member("x", ty.i32())});
+ Global(Source{{12, 34}}, "G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, TextureMissingBinding) {
- Global(Source{{12, 34}}, "G", ty.depth_texture(ast::TextureDimension::k2d),
- ast::StorageClass::kNone);
+ Global(Source{{12, 34}}, "G", ty.depth_texture(ast::TextureDimension::k2d),
+ ast::StorageClass::kNone);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, SamplerMissingBinding) {
- Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone);
+ Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, BindingPairMissingBinding) {
- Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::GroupAttribute>(1),
- });
+ Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::GroupAttribute>(1),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, BindingPairMissingGroup) {
- Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- });
+ Global(Source{{12, 34}}, "G", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: resource variables require @group and @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: resource variables require @group and @binding attributes)");
}
TEST_F(ResourceAttributeTest, BindingPointUsedTwiceByEntryPoint) {
- Global(Source{{12, 34}}, "A",
- ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
- Global(Source{{56, 78}}, "B",
- ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global(Source{{12, 34}}, "A", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+ Global(Source{{56, 78}}, "B", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("F", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec4<f32>(), ast::StorageClass::kNone,
+ Call("textureLoad", "A", vec2<i32>(1_i, 2_i), 0_i))),
+ Decl(Var("b", ty.vec4<f32>(), ast::StorageClass::kNone,
+ Call("textureLoad", "B", vec2<i32>(1_i, 2_i), 0_i))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
- Func("F", {}, ty.void_(),
- {
- Decl(Var("a", ty.vec4<f32>(), ast::StorageClass::kNone,
- Call("textureLoad", "A", vec2<i32>(1, 2), 0))),
- Decl(Var("b", ty.vec4<f32>(), ast::StorageClass::kNone,
- Call("textureLoad", "B", vec2<i32>(1, 2), 0))),
- },
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: entry point 'F' references multiple variables that use the same resource binding @group(2), @binding(1)
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: entry point 'F' references multiple variables that use the same resource binding @group(2), @binding(1)
12:34 note: first resource binding usage declared here)");
}
TEST_F(ResourceAttributeTest, BindingPointUsedTwiceByDifferentEntryPoints) {
- Global(Source{{12, 34}}, "A",
- ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
- Global(Source{{56, 78}}, "B",
- ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global(Source{{12, 34}}, "A", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+ Global(Source{{56, 78}}, "B", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("F_A", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec4<f32>(), ast::StorageClass::kNone,
+ Call("textureLoad", "A", vec2<i32>(1_i, 2_i), 0_i))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
+ Func("F_B", {}, ty.void_(),
+ {
+ Decl(Var("b", ty.vec4<f32>(), ast::StorageClass::kNone,
+ Call("textureLoad", "B", vec2<i32>(1_i, 2_i), 0_i))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
- Func("F_A", {}, ty.void_(),
- {
- Decl(Var("a", ty.vec4<f32>(), ast::StorageClass::kNone,
- Call("textureLoad", "A", vec2<i32>(1, 2), 0))),
- },
- {Stage(ast::PipelineStage::kFragment)});
- Func("F_B", {}, ty.void_(),
- {
- Decl(Var("b", ty.vec4<f32>(), ast::StorageClass::kNone,
- Call("textureLoad", "B", vec2<i32>(1, 2), 0))),
- },
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResourceAttributeTest, BindingPointOnNonResource) {
- Global(Source{{12, 34}}, "G", ty.f32(), ast::StorageClass::kPrivate,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global(Source{{12, 34}}, "G", ty.f32(), ast::StorageClass::kPrivate,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: non-resource variables must not have @group or @binding attributes)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: non-resource variables must not have @group or @binding attributes)");
}
} // namespace
@@ -1087,31 +1030,30 @@ namespace InvariantAttributeTests {
namespace {
using InvariantAttributeTests = ResolverTest;
TEST_F(InvariantAttributeTests, InvariantWithPosition) {
- auto* param = Param("p", ty.vec4<f32>(),
- {Invariant(Source{{12, 34}}),
- Builtin(Source{{56, 78}}, ast::Builtin::kPosition)});
- Func("main", ast::VariableList{param}, ty.vec4<f32>(),
- ast::StatementList{Return(Construct(ty.vec4<f32>()))},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{
- Location(0),
- });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* param =
+ Param("p", ty.vec4<f32>(),
+ {Invariant(Source{{12, 34}}), Builtin(Source{{56, 78}}, ast::Builtin::kPosition)});
+ Func("main", ast::VariableList{param}, ty.vec4<f32>(),
+ ast::StatementList{Return(Construct(ty.vec4<f32>()))},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{
+ Location(0),
+ });
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(InvariantAttributeTests, InvariantWithoutPosition) {
- auto* param =
- Param("p", ty.vec4<f32>(), {Invariant(Source{{12, 34}}), Location(0)});
- Func("main", ast::VariableList{param}, ty.vec4<f32>(),
- ast::StatementList{Return(Construct(ty.vec4<f32>()))},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{
- Location(0),
- });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: invariant attribute must only be applied to a "
- "position builtin");
+ auto* param = Param("p", ty.vec4<f32>(), {Invariant(Source{{12, 34}}), Location(0)});
+ Func("main", ast::VariableList{param}, ty.vec4<f32>(),
+ ast::StatementList{Return(Construct(ty.vec4<f32>()))},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{
+ Location(0),
+ });
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: invariant attribute must only be applied to a "
+ "position builtin");
}
} // namespace
} // namespace InvariantAttributeTests
@@ -1121,56 +1063,54 @@ namespace {
using WorkgroupAttribute = ResolverTest;
TEST_F(WorkgroupAttribute, ComputeShaderPass) {
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(WorkgroupAttribute, Missing) {
- Func(Source{{12, 34}}, "main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: a compute shader must include 'workgroup_size' in its "
- "attributes");
+ Func(Source{{12, 34}}, "main", {}, ty.void_(), {}, {Stage(ast::PipelineStage::kCompute)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: a compute shader must include 'workgroup_size' in its "
+ "attributes");
}
TEST_F(WorkgroupAttribute, NotAnEntryPoint) {
- Func("main", {}, ty.void_(), {},
- {create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
+ Func("main", {}, ty.void_(), {},
+ {create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: the workgroup_size attribute is only valid for "
- "compute stages");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: the workgroup_size attribute is only valid for "
+ "compute stages");
}
TEST_F(WorkgroupAttribute, NotAComputeShader) {
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: the workgroup_size attribute is only valid for "
- "compute stages");
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: the workgroup_size attribute is only valid for "
+ "compute stages");
}
TEST_F(WorkgroupAttribute, DuplicateAttribute) {
- Func(Source{{12, 34}}, "main", {}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Source{{12, 34}}, 1, nullptr, nullptr),
- WorkgroupSize(Source{{56, 78}}, 2, nullptr, nullptr),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate workgroup_size attribute
+ Func(Source{{12, 34}}, "main", {}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Source{{12, 34}}, 1_i, nullptr, nullptr),
+ WorkgroupSize(Source{{56, 78}}, 2_i, nullptr, nullptr),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate workgroup_size attribute
12:34 note: first attribute declared here)");
}
@@ -1183,184 +1123,157 @@ namespace {
using InterpolateTest = ResolverTest;
struct Params {
- ast::InterpolationType type;
- ast::InterpolationSampling sampling;
- bool should_pass;
+ ast::InterpolationType type;
+ ast::InterpolationSampling sampling;
+ bool should_pass;
};
struct TestWithParams : ResolverTestWithParam<Params> {};
using InterpolateParameterTest = TestWithParams;
TEST_P(InterpolateParameterTest, All) {
- auto& params = GetParam();
+ auto& params = GetParam();
- Func("main",
- ast::VariableList{Param(
- "a", ty.f32(),
- {Location(0),
- Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
- ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+ Func("main",
+ ast::VariableList{
+ Param("a", ty.f32(),
+ {Location(0), Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
+ ty.void_(), {}, ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
- if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: flat interpolation attribute must not have a "
- "sampling parameter");
- }
+ if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: flat interpolation attribute must not have a "
+ "sampling parameter");
+ }
}
TEST_P(InterpolateParameterTest, IntegerScalar) {
- auto& params = GetParam();
-
- Func("main",
- ast::VariableList{Param(
- "a", ty.i32(),
- {Location(0),
- Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
- ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- if (params.type != ast::InterpolationType::kFlat) {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: interpolation type must be 'flat' for integral "
- "user-defined IO types");
- } else if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: flat interpolation attribute must not have a "
- "sampling parameter");
- }
+ auto& params = GetParam();
+
+ Func("main",
+ ast::VariableList{
+ Param("a", ty.i32(),
+ {Location(0), Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
+ ty.void_(), {}, ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ if (params.type != ast::InterpolationType::kFlat) {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: interpolation type must be 'flat' for integral "
+ "user-defined IO types");
+ } else if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: flat interpolation attribute must not have a "
+ "sampling parameter");
+ }
}
TEST_P(InterpolateParameterTest, IntegerVector) {
- auto& params = GetParam();
-
- Func("main",
- ast::VariableList{Param(
- "a", ty.vec4<u32>(),
- {Location(0),
- Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
- ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- if (params.type != ast::InterpolationType::kFlat) {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: interpolation type must be 'flat' for integral "
- "user-defined IO types");
- } else if (params.should_pass) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: flat interpolation attribute must not have a "
- "sampling parameter");
- }
+ auto& params = GetParam();
+
+ Func("main",
+ ast::VariableList{
+ Param("a", ty.vec4<u32>(),
+ {Location(0), Interpolate(Source{{12, 34}}, params.type, params.sampling)})},
+ ty.void_(), {}, ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ if (params.type != ast::InterpolationType::kFlat) {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: interpolation type must be 'flat' for integral "
+ "user-defined IO types");
+ } else if (params.should_pass) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: flat interpolation attribute must not have a "
+ "sampling parameter");
+ }
}
INSTANTIATE_TEST_SUITE_P(
ResolverAttributeValidationTest,
InterpolateParameterTest,
- testing::Values(Params{ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kNone, true},
- Params{ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kCenter, true},
- Params{ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kCentroid, true},
- Params{ast::InterpolationType::kPerspective,
- ast::InterpolationSampling::kSample, true},
- Params{ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kNone, true},
- Params{ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kCenter, true},
- Params{ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kCentroid, true},
- Params{ast::InterpolationType::kLinear,
- ast::InterpolationSampling::kSample, true},
- // flat interpolation must not have a sampling type
- Params{ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kNone, true},
- Params{ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kCenter, false},
- Params{ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kCentroid, false},
- Params{ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kSample, false}));
+ testing::Values(
+ Params{ast::InterpolationType::kPerspective, ast::InterpolationSampling::kNone, true},
+ Params{ast::InterpolationType::kPerspective, ast::InterpolationSampling::kCenter, true},
+ Params{ast::InterpolationType::kPerspective, ast::InterpolationSampling::kCentroid, true},
+ Params{ast::InterpolationType::kPerspective, ast::InterpolationSampling::kSample, true},
+ Params{ast::InterpolationType::kLinear, ast::InterpolationSampling::kNone, true},
+ Params{ast::InterpolationType::kLinear, ast::InterpolationSampling::kCenter, true},
+ Params{ast::InterpolationType::kLinear, ast::InterpolationSampling::kCentroid, true},
+ Params{ast::InterpolationType::kLinear, ast::InterpolationSampling::kSample, true},
+ // flat interpolation must not have a sampling type
+ Params{ast::InterpolationType::kFlat, ast::InterpolationSampling::kNone, true},
+ Params{ast::InterpolationType::kFlat, ast::InterpolationSampling::kCenter, false},
+ Params{ast::InterpolationType::kFlat, ast::InterpolationSampling::kCentroid, false},
+ Params{ast::InterpolationType::kFlat, ast::InterpolationSampling::kSample, false}));
TEST_F(InterpolateTest, FragmentInput_Integer_MissingFlatInterpolation) {
- Func("main",
- ast::VariableList{Param(Source{{12, 34}}, "a", ty.i32(), {Location(0)})},
- ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: integral user-defined fragment inputs must have a flat interpolation attribute)");
+ Func("main", ast::VariableList{Param(Source{{12, 34}}, "a", ty.i32(), {Location(0)})},
+ ty.void_(), {}, ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: integral user-defined fragment inputs must have a flat interpolation attribute)");
}
TEST_F(InterpolateTest, VertexOutput_Integer_MissingFlatInterpolation) {
- auto* s = Structure(
- "S",
- {
- Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
- Member(Source{{12, 34}}, "u", ty.u32(), {Location(0)}),
- });
- Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: integral user-defined vertex outputs must have a flat interpolation attribute
+ auto* s = Structure("S", {
+ Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
+ Member(Source{{12, 34}}, "u", ty.u32(), {Location(0)}),
+ });
+ Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: integral user-defined vertex outputs must have a flat interpolation attribute
note: while analysing entry point 'main')");
}
TEST_F(InterpolateTest, MissingLocationAttribute_Parameter) {
- Func("main",
- ast::VariableList{
- Param("a", ty.vec4<f32>(),
- {Builtin(ast::Builtin::kPosition),
- Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kNone)})},
- ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: interpolate attribute must only be used with @location)");
+ Func("main",
+ ast::VariableList{Param("a", ty.vec4<f32>(),
+ {Builtin(ast::Builtin::kPosition),
+ Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
+ ast::InterpolationSampling::kNone)})},
+ ty.void_(), {}, ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: interpolate attribute must only be used with @location)");
}
TEST_F(InterpolateTest, MissingLocationAttribute_ReturnType) {
- Func("main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
- {Builtin(ast::Builtin::kPosition),
- Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kNone)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: interpolate attribute must only be used with @location)");
+ Func("main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
+ {Builtin(ast::Builtin::kPosition),
+ Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
+ ast::InterpolationSampling::kNone)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: interpolate attribute must only be used with @location)");
}
TEST_F(InterpolateTest, MissingLocationAttribute_Struct) {
- Structure(
- "S", {Member("a", ty.f32(),
- {Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
- ast::InterpolationSampling::kNone)})});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: interpolate attribute must only be used with @location)");
+ Structure("S", {Member("a", ty.f32(),
+ {Interpolate(Source{{12, 34}}, ast::InterpolationType::kFlat,
+ ast::InterpolationSampling::kNone)})});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: interpolate attribute must only be used with @location)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/bitcast_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/bitcast_validation_test.cc
index b7b7e9ec974..78223fc87ed 100644
--- a/chromium/third_party/dawn/src/tint/resolver/bitcast_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/bitcast_validation_test.cc
@@ -22,36 +22,36 @@ namespace tint::resolver {
namespace {
struct Type {
- template <typename T>
- static constexpr Type Create() {
- return Type{builder::DataType<T>::AST, builder::DataType<T>::Sem,
- builder::DataType<T>::Expr};
- }
-
- builder::ast_type_func_ptr ast;
- builder::sem_type_func_ptr sem;
- builder::ast_expr_func_ptr expr;
+ template <typename T>
+ static constexpr Type Create() {
+ return Type{builder::DataType<T>::AST, builder::DataType<T>::Sem,
+ builder::DataType<T>::Expr};
+ }
+
+ builder::ast_type_func_ptr ast;
+ builder::sem_type_func_ptr sem;
+ builder::ast_expr_func_ptr expr;
};
static constexpr Type kNumericScalars[] = {
- Type::Create<builder::f32>(),
- Type::Create<builder::i32>(),
- Type::Create<builder::u32>(),
+ Type::Create<f32>(),
+ Type::Create<i32>(),
+ Type::Create<u32>(),
};
static constexpr Type kVec2NumericScalars[] = {
- Type::Create<builder::vec2<builder::f32>>(),
- Type::Create<builder::vec2<builder::i32>>(),
- Type::Create<builder::vec2<builder::u32>>(),
+ Type::Create<builder::vec2<f32>>(),
+ Type::Create<builder::vec2<i32>>(),
+ Type::Create<builder::vec2<u32>>(),
};
static constexpr Type kVec3NumericScalars[] = {
- Type::Create<builder::vec3<builder::f32>>(),
- Type::Create<builder::vec3<builder::i32>>(),
- Type::Create<builder::vec3<builder::u32>>(),
+ Type::Create<builder::vec3<f32>>(),
+ Type::Create<builder::vec3<i32>>(),
+ Type::Create<builder::vec3<u32>>(),
};
static constexpr Type kVec4NumericScalars[] = {
- Type::Create<builder::vec4<builder::f32>>(),
- Type::Create<builder::vec4<builder::i32>>(),
- Type::Create<builder::vec4<builder::u32>>(),
+ Type::Create<builder::vec4<f32>>(),
+ Type::Create<builder::vec4<i32>>(),
+ Type::Create<builder::vec4<u32>>(),
};
static constexpr Type kInvalid[] = {
// A non-exhaustive selection of uncastable types
@@ -59,168 +59,152 @@ static constexpr Type kInvalid[] = {
Type::Create<builder::vec2<bool>>(),
Type::Create<builder::vec3<bool>>(),
Type::Create<builder::vec4<bool>>(),
- Type::Create<builder::array<2, builder::i32>>(),
- Type::Create<builder::array<3, builder::u32>>(),
- Type::Create<builder::array<4, builder::f32>>(),
+ Type::Create<builder::array<2, i32>>(),
+ Type::Create<builder::array<3, u32>>(),
+ Type::Create<builder::array<4, f32>>(),
Type::Create<builder::array<5, bool>>(),
- Type::Create<builder::mat2x2<builder::f32>>(),
- Type::Create<builder::mat3x3<builder::f32>>(),
- Type::Create<builder::mat4x4<builder::f32>>(),
- Type::Create<builder::ptr<builder::i32>>(),
- Type::Create<builder::ptr<builder::array<2, builder::i32>>>(),
- Type::Create<builder::ptr<builder::mat2x2<builder::f32>>>(),
+ Type::Create<builder::mat2x2<f32>>(),
+ Type::Create<builder::mat3x3<f32>>(),
+ Type::Create<builder::mat4x4<f32>>(),
+ Type::Create<builder::ptr<i32>>(),
+ Type::Create<builder::ptr<builder::array<2, i32>>>(),
+ Type::Create<builder::ptr<builder::mat2x2<f32>>>(),
};
-using ResolverBitcastValidationTest =
- ResolverTestWithParam<std::tuple<Type, Type>>;
+using ResolverBitcastValidationTest = ResolverTestWithParam<std::tuple<Type, Type>>;
////////////////////////////////////////////////////////////////////////////////
// Valid bitcasts
////////////////////////////////////////////////////////////////////////////////
using ResolverBitcastValidationTestPass = ResolverBitcastValidationTest;
TEST_P(ResolverBitcastValidationTestPass, Test) {
- auto src = std::get<0>(GetParam());
- auto dst = std::get<1>(GetParam());
+ auto src = std::get<0>(GetParam());
+ auto dst = std::get<1>(GetParam());
- auto* cast = Bitcast(dst.ast(*this), src.expr(*this, 0));
- WrapInFunction(cast);
+ auto* cast = Bitcast(dst.ast(*this), src.expr(*this, 0));
+ WrapInFunction(cast);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(cast), dst.sem(*this));
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(cast), dst.sem(*this));
}
INSTANTIATE_TEST_SUITE_P(Scalars,
ResolverBitcastValidationTestPass,
testing::Combine(testing::ValuesIn(kNumericScalars),
testing::ValuesIn(kNumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec2,
- ResolverBitcastValidationTestPass,
- testing::Combine(testing::ValuesIn(kVec2NumericScalars),
- testing::ValuesIn(kVec2NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec3,
- ResolverBitcastValidationTestPass,
- testing::Combine(testing::ValuesIn(kVec3NumericScalars),
- testing::ValuesIn(kVec3NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec4,
- ResolverBitcastValidationTestPass,
- testing::Combine(testing::ValuesIn(kVec4NumericScalars),
- testing::ValuesIn(kVec4NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec2,
+ ResolverBitcastValidationTestPass,
+ testing::Combine(testing::ValuesIn(kVec2NumericScalars),
+ testing::ValuesIn(kVec2NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec3,
+ ResolverBitcastValidationTestPass,
+ testing::Combine(testing::ValuesIn(kVec3NumericScalars),
+ testing::ValuesIn(kVec3NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec4,
+ ResolverBitcastValidationTestPass,
+ testing::Combine(testing::ValuesIn(kVec4NumericScalars),
+ testing::ValuesIn(kVec4NumericScalars)));
////////////////////////////////////////////////////////////////////////////////
// Invalid source type for bitcasts
////////////////////////////////////////////////////////////////////////////////
using ResolverBitcastValidationTestInvalidSrcTy = ResolverBitcastValidationTest;
TEST_P(ResolverBitcastValidationTestInvalidSrcTy, Test) {
- auto src = std::get<0>(GetParam());
- auto dst = std::get<1>(GetParam());
+ auto src = std::get<0>(GetParam());
+ auto dst = std::get<1>(GetParam());
- auto* cast = Bitcast(dst.ast(*this), Expr(Source{{12, 34}}, "src"));
- WrapInFunction(Const("src", nullptr, src.expr(*this, 0)), cast);
+ auto* cast = Bitcast(dst.ast(*this), Expr(Source{{12, 34}}, "src"));
+ WrapInFunction(Let("src", nullptr, src.expr(*this, 0)), cast);
- auto expected = "12:34 error: '" + src.sem(*this)->FriendlyName(Symbols()) +
- "' cannot be bitcast";
+ auto expected =
+ "12:34 error: '" + src.sem(*this)->FriendlyName(Symbols()) + "' cannot be bitcast";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), expected);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), expected);
}
INSTANTIATE_TEST_SUITE_P(Scalars,
ResolverBitcastValidationTestInvalidSrcTy,
testing::Combine(testing::ValuesIn(kInvalid),
testing::ValuesIn(kNumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec2,
- ResolverBitcastValidationTestInvalidSrcTy,
- testing::Combine(testing::ValuesIn(kInvalid),
- testing::ValuesIn(kVec2NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec3,
- ResolverBitcastValidationTestInvalidSrcTy,
- testing::Combine(testing::ValuesIn(kInvalid),
- testing::ValuesIn(kVec3NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec4,
- ResolverBitcastValidationTestInvalidSrcTy,
- testing::Combine(testing::ValuesIn(kInvalid),
- testing::ValuesIn(kVec4NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec2,
+ ResolverBitcastValidationTestInvalidSrcTy,
+ testing::Combine(testing::ValuesIn(kInvalid),
+ testing::ValuesIn(kVec2NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec3,
+ ResolverBitcastValidationTestInvalidSrcTy,
+ testing::Combine(testing::ValuesIn(kInvalid),
+ testing::ValuesIn(kVec3NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec4,
+ ResolverBitcastValidationTestInvalidSrcTy,
+ testing::Combine(testing::ValuesIn(kInvalid),
+ testing::ValuesIn(kVec4NumericScalars)));
////////////////////////////////////////////////////////////////////////////////
// Invalid target type for bitcasts
////////////////////////////////////////////////////////////////////////////////
using ResolverBitcastValidationTestInvalidDstTy = ResolverBitcastValidationTest;
TEST_P(ResolverBitcastValidationTestInvalidDstTy, Test) {
- auto src = std::get<0>(GetParam());
- auto dst = std::get<1>(GetParam());
+ auto src = std::get<0>(GetParam());
+ auto dst = std::get<1>(GetParam());
- // Use an alias so we can put a Source on the bitcast type
- Alias("T", dst.ast(*this));
- WrapInFunction(
- Bitcast(ty.type_name(Source{{12, 34}}, "T"), src.expr(*this, 0)));
+ // Use an alias so we can put a Source on the bitcast type
+ Alias("T", dst.ast(*this));
+ WrapInFunction(Bitcast(ty.type_name(Source{{12, 34}}, "T"), src.expr(*this, 0)));
- auto expected = "12:34 error: cannot bitcast to '" +
- dst.sem(*this)->FriendlyName(Symbols()) + "'";
+ auto expected =
+ "12:34 error: cannot bitcast to '" + dst.sem(*this)->FriendlyName(Symbols()) + "'";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), expected);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), expected);
}
INSTANTIATE_TEST_SUITE_P(Scalars,
ResolverBitcastValidationTestInvalidDstTy,
testing::Combine(testing::ValuesIn(kNumericScalars),
testing::ValuesIn(kInvalid)));
-INSTANTIATE_TEST_SUITE_P(
- Vec2,
- ResolverBitcastValidationTestInvalidDstTy,
- testing::Combine(testing::ValuesIn(kVec2NumericScalars),
- testing::ValuesIn(kInvalid)));
-INSTANTIATE_TEST_SUITE_P(
- Vec3,
- ResolverBitcastValidationTestInvalidDstTy,
- testing::Combine(testing::ValuesIn(kVec3NumericScalars),
- testing::ValuesIn(kInvalid)));
-INSTANTIATE_TEST_SUITE_P(
- Vec4,
- ResolverBitcastValidationTestInvalidDstTy,
- testing::Combine(testing::ValuesIn(kVec4NumericScalars),
- testing::ValuesIn(kInvalid)));
+INSTANTIATE_TEST_SUITE_P(Vec2,
+ ResolverBitcastValidationTestInvalidDstTy,
+ testing::Combine(testing::ValuesIn(kVec2NumericScalars),
+ testing::ValuesIn(kInvalid)));
+INSTANTIATE_TEST_SUITE_P(Vec3,
+ ResolverBitcastValidationTestInvalidDstTy,
+ testing::Combine(testing::ValuesIn(kVec3NumericScalars),
+ testing::ValuesIn(kInvalid)));
+INSTANTIATE_TEST_SUITE_P(Vec4,
+ ResolverBitcastValidationTestInvalidDstTy,
+ testing::Combine(testing::ValuesIn(kVec4NumericScalars),
+ testing::ValuesIn(kInvalid)));
////////////////////////////////////////////////////////////////////////////////
// Incompatible bitcast, but both src and dst types are valid
////////////////////////////////////////////////////////////////////////////////
using ResolverBitcastValidationTestIncompatible = ResolverBitcastValidationTest;
TEST_P(ResolverBitcastValidationTestIncompatible, Test) {
- auto src = std::get<0>(GetParam());
- auto dst = std::get<1>(GetParam());
+ auto src = std::get<0>(GetParam());
+ auto dst = std::get<1>(GetParam());
- WrapInFunction(Bitcast(Source{{12, 34}}, dst.ast(*this), src.expr(*this, 0)));
+ WrapInFunction(Bitcast(Source{{12, 34}}, dst.ast(*this), src.expr(*this, 0)));
- auto expected = "12:34 error: cannot bitcast from '" +
- src.sem(*this)->FriendlyName(Symbols()) + "' to '" +
- dst.sem(*this)->FriendlyName(Symbols()) + "'";
+ auto expected = "12:34 error: cannot bitcast from '" + src.sem(*this)->FriendlyName(Symbols()) +
+ "' to '" + dst.sem(*this)->FriendlyName(Symbols()) + "'";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), expected);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), expected);
}
-INSTANTIATE_TEST_SUITE_P(
- ScalarToVec2,
- ResolverBitcastValidationTestIncompatible,
- testing::Combine(testing::ValuesIn(kNumericScalars),
- testing::ValuesIn(kVec2NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec2ToVec3,
- ResolverBitcastValidationTestIncompatible,
- testing::Combine(testing::ValuesIn(kVec2NumericScalars),
- testing::ValuesIn(kVec3NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec3ToVec4,
- ResolverBitcastValidationTestIncompatible,
- testing::Combine(testing::ValuesIn(kVec3NumericScalars),
- testing::ValuesIn(kVec4NumericScalars)));
-INSTANTIATE_TEST_SUITE_P(
- Vec4ToScalar,
- ResolverBitcastValidationTestIncompatible,
- testing::Combine(testing::ValuesIn(kVec4NumericScalars),
- testing::ValuesIn(kNumericScalars)));
+INSTANTIATE_TEST_SUITE_P(ScalarToVec2,
+ ResolverBitcastValidationTestIncompatible,
+ testing::Combine(testing::ValuesIn(kNumericScalars),
+ testing::ValuesIn(kVec2NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec2ToVec3,
+ ResolverBitcastValidationTestIncompatible,
+ testing::Combine(testing::ValuesIn(kVec2NumericScalars),
+ testing::ValuesIn(kVec3NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec3ToVec4,
+ ResolverBitcastValidationTestIncompatible,
+ testing::Combine(testing::ValuesIn(kVec3NumericScalars),
+ testing::ValuesIn(kVec4NumericScalars)));
+INSTANTIATE_TEST_SUITE_P(Vec4ToScalar,
+ ResolverBitcastValidationTestIncompatible,
+ testing::Combine(testing::ValuesIn(kVec4NumericScalars),
+ testing::ValuesIn(kNumericScalars)));
} // namespace
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/builtin_test.cc b/chromium/third_party/dawn/src/tint/resolver/builtin_test.cc
index 732e70704d3..74a898ca709 100644
--- a/chromium/third_party/dawn/src/tint/resolver/builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/builtin_test.cc
@@ -32,13 +32,15 @@
#include "src/tint/sem/call.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/member_accessor_expression.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/variable.h"
using ::testing::ElementsAre;
using ::testing::HasSubstr;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -48,49 +50,48 @@ using ResolverBuiltinTest = ResolverTest;
using ResolverBuiltinDerivativeTest = ResolverTestWithParam<std::string>;
TEST_P(ResolverBuiltinDerivativeTest, Scalar) {
- auto name = GetParam();
+ auto name = GetParam();
- Global("ident", ty.f32(), ast::StorageClass::kPrivate);
+ Global("ident", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = Call(name, "ident");
- Func("func", {}, ty.void_(), {Ignore(expr)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ auto* expr = Call(name, "ident");
+ Func("func", {}, ty.void_(), {Ignore(expr)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
}
TEST_P(ResolverBuiltinDerivativeTest, Vector) {
- auto name = GetParam();
- Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto name = GetParam();
+ Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call(name, "ident");
- Func("func", {}, ty.void_(), {Ignore(expr)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ auto* expr = Call(name, "ident");
+ Func("func", {}, ty.void_(), {Ignore(expr)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 4u);
}
TEST_P(ResolverBuiltinDerivativeTest, MissingParam) {
- auto name = GetParam();
+ auto name = GetParam();
- auto* expr = Call(name);
- WrapInFunction(expr);
+ auto* expr = Call(name);
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "error: no matching call to " + name +
- "()\n\n"
- "2 candidate functions:\n " +
- name + "(f32) -> f32\n " + name +
- "(vecN<f32>) -> vecN<f32>\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + name +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ name + "(f32) -> f32\n " + name + "(vecN<f32>) -> vecN<f32>\n");
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
@@ -107,30 +108,30 @@ INSTANTIATE_TEST_SUITE_P(ResolverTest,
using ResolverBuiltinTest_BoolMethod = ResolverTestWithParam<std::string>;
TEST_P(ResolverBuiltinTest_BoolMethod, Scalar) {
- auto name = GetParam();
+ auto name = GetParam();
- Global("my_var", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Call(name, "my_var");
- WrapInFunction(expr);
+ auto* expr = Call(name, "my_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::Bool>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::Bool>());
}
TEST_P(ResolverBuiltinTest_BoolMethod, Vector) {
- auto name = GetParam();
+ auto name = GetParam();
- Global("my_var", ty.vec3<bool>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec3<bool>(), ast::StorageClass::kPrivate);
- auto* expr = Call(name, "my_var");
- WrapInFunction(expr);
+ auto* expr = Call(name, "my_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::Bool>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::Bool>());
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
ResolverBuiltinTest_BoolMethod,
@@ -138,167 +139,161 @@ INSTANTIATE_TEST_SUITE_P(ResolverTest,
enum class Texture { kF32, kI32, kU32 };
inline std::ostream& operator<<(std::ostream& out, Texture data) {
- if (data == Texture::kF32) {
- out << "f32";
- } else if (data == Texture::kI32) {
- out << "i32";
- } else {
- out << "u32";
- }
- return out;
+ if (data == Texture::kF32) {
+ out << "f32";
+ } else if (data == Texture::kI32) {
+ out << "i32";
+ } else {
+ out << "u32";
+ }
+ return out;
}
struct TextureTestParams {
- ast::TextureDimension dim;
- Texture type = Texture::kF32;
- ast::TexelFormat format = ast::TexelFormat::kR32Float;
+ ast::TextureDimension dim;
+ Texture type = Texture::kF32;
+ ast::TexelFormat format = ast::TexelFormat::kR32Float;
};
inline std::ostream& operator<<(std::ostream& out, TextureTestParams data) {
- out << data.dim << "_" << data.type;
- return out;
-}
-
-class ResolverBuiltinTest_TextureOperation
- : public ResolverTestWithParam<TextureTestParams> {
- public:
- /// Gets an appropriate type for the coords parameter depending the the
- /// dimensionality of the texture being sampled.
- /// @param dim dimensionality of the texture being sampled
- /// @param scalar the scalar type
- /// @returns a pointer to a type appropriate for the coord param
- const ast::Type* GetCoordsType(ast::TextureDimension dim,
- const ast::Type* scalar) {
- switch (dim) {
- case ast::TextureDimension::k1d:
- return scalar;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- return ty.vec(scalar, 2);
- case ast::TextureDimension::k3d:
- case ast::TextureDimension::kCube:
- case ast::TextureDimension::kCubeArray:
- return ty.vec(scalar, 3);
- default:
- [=]() { FAIL() << "Unsupported texture dimension: " << dim; }();
+ out << data.dim << "_" << data.type;
+ return out;
+}
+
+class ResolverBuiltinTest_TextureOperation : public ResolverTestWithParam<TextureTestParams> {
+ public:
+ /// Gets an appropriate type for the coords parameter depending the the
+ /// dimensionality of the texture being sampled.
+ /// @param dim dimensionality of the texture being sampled
+ /// @param scalar the scalar type
+ /// @returns a pointer to a type appropriate for the coord param
+ const ast::Type* GetCoordsType(ast::TextureDimension dim, const ast::Type* scalar) {
+ switch (dim) {
+ case ast::TextureDimension::k1d:
+ return scalar;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ return ty.vec(scalar, 2);
+ case ast::TextureDimension::k3d:
+ case ast::TextureDimension::kCube:
+ case ast::TextureDimension::kCubeArray:
+ return ty.vec(scalar, 3);
+ default:
+ [=]() { FAIL() << "Unsupported texture dimension: " << dim; }();
+ }
+ return nullptr;
}
- return nullptr;
- }
-
- void add_call_param(std::string name,
- const ast::Type* type,
- ast::ExpressionList* call_params) {
- if (type->IsAnyOf<ast::Texture, ast::Sampler>()) {
- Global(name, type,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- } else {
- Global(name, type, ast::StorageClass::kPrivate);
- }
+ void add_call_param(std::string name, const ast::Type* type, ast::ExpressionList* call_params) {
+ if (type->IsAnyOf<ast::Texture, ast::Sampler>()) {
+ Global(name, type,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- call_params->push_back(Expr(name));
- }
- const ast::Type* subtype(Texture type) {
- if (type == Texture::kF32) {
- return ty.f32();
+ } else {
+ Global(name, type, ast::StorageClass::kPrivate);
+ }
+
+ call_params->push_back(Expr(name));
}
- if (type == Texture::kI32) {
- return ty.i32();
+ const ast::Type* subtype(Texture type) {
+ if (type == Texture::kF32) {
+ return ty.f32();
+ }
+ if (type == Texture::kI32) {
+ return ty.i32();
+ }
+ return ty.u32();
}
- return ty.u32();
- }
};
-using ResolverBuiltinTest_SampledTextureOperation =
- ResolverBuiltinTest_TextureOperation;
+using ResolverBuiltinTest_SampledTextureOperation = ResolverBuiltinTest_TextureOperation;
TEST_P(ResolverBuiltinTest_SampledTextureOperation, TextureLoadSampled) {
- auto dim = GetParam().dim;
- auto type = GetParam().type;
+ auto dim = GetParam().dim;
+ auto type = GetParam().type;
- auto* s = subtype(type);
- auto* coords_type = GetCoordsType(dim, ty.i32());
- auto* texture_type = ty.sampled_texture(dim, s);
+ auto* s = subtype(type);
+ auto* coords_type = GetCoordsType(dim, ty.i32());
+ auto* texture_type = ty.sampled_texture(dim, s);
- ast::ExpressionList call_params;
+ ast::ExpressionList call_params;
- add_call_param("texture", texture_type, &call_params);
- add_call_param("coords", coords_type, &call_params);
- if (dim == ast::TextureDimension::k2dArray) {
- add_call_param("array_index", ty.i32(), &call_params);
- }
- add_call_param("level", ty.i32(), &call_params);
+ add_call_param("texture", texture_type, &call_params);
+ add_call_param("coords", coords_type, &call_params);
+ if (dim == ast::TextureDimension::k2dArray) {
+ add_call_param("array_index", ty.i32(), &call_params);
+ }
+ add_call_param("level", ty.i32(), &call_params);
- auto* expr = Call("textureLoad", call_params);
- WrapInFunction(expr);
+ auto* expr = Call("textureLoad", call_params);
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::Vector>());
- if (type == Texture::kF32) {
- EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
- } else if (type == Texture::kI32) {
- EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::I32>());
- } else {
- EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::U32>());
- }
- EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::Vector>());
+ if (type == Texture::kF32) {
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
+ } else if (type == Texture::kI32) {
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::I32>());
+ } else {
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::U32>());
+ }
+ EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 4u);
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_SampledTextureOperation,
- testing::Values(TextureTestParams{ast::TextureDimension::k1d},
- TextureTestParams{ast::TextureDimension::k2d},
- TextureTestParams{ast::TextureDimension::k2dArray},
- TextureTestParams{ast::TextureDimension::k3d}));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_SampledTextureOperation,
+ testing::Values(TextureTestParams{ast::TextureDimension::k1d},
+ TextureTestParams{ast::TextureDimension::k2d},
+ TextureTestParams{ast::TextureDimension::k2dArray},
+ TextureTestParams{ast::TextureDimension::k3d}));
TEST_F(ResolverBuiltinTest, Dot_Vec2) {
- Global("my_var", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "my_var", "my_var");
- WrapInFunction(expr);
+ auto* expr = Call("dot", "my_var", "my_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Dot_Vec3) {
- Global("my_var", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "my_var", "my_var");
- WrapInFunction(expr);
+ auto* expr = Call("dot", "my_var", "my_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::I32>());
}
TEST_F(ResolverBuiltinTest, Dot_Vec4) {
- Global("my_var", ty.vec4<u32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec4<u32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "my_var", "my_var");
- WrapInFunction(expr);
+ auto* expr = Call("dot", "my_var", "my_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::U32>());
}
TEST_F(ResolverBuiltinTest, Dot_Error_Scalar) {
- auto* expr = Call("dot", 1.0f, 1.0f);
- WrapInFunction(expr);
+ auto* expr = Call("dot", 1_f, 1_f);
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to dot(f32, f32)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to dot(f32, f32)
1 candidate function:
dot(vecN<T>, vecN<T>) -> T where: T is f32, i32 or u32
@@ -306,29 +301,29 @@ TEST_F(ResolverBuiltinTest, Dot_Error_Scalar) {
}
TEST_F(ResolverBuiltinTest, Select) {
- Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("bool_var", ty.vec3<bool>(), ast::StorageClass::kPrivate);
+ Global("bool_var", ty.vec3<bool>(), ast::StorageClass::kPrivate);
- auto* expr = Call("select", "my_var", "my_var", "bool_var");
- WrapInFunction(expr);
+ auto* expr = Call("select", "my_var", "my_var", "bool_var");
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::Vector>());
- EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::Vector>());
+ EXPECT_EQ(TypeOf(expr)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Vector>()->type()->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Select_Error_NoParams) {
- auto* expr = Call("select");
- WrapInFunction(expr);
+ auto* expr = Call("select");
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to select()
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to select()
3 candidate functions:
select(T, T, bool) -> T where: T is f32, i32, u32 or bool
@@ -338,13 +333,13 @@ TEST_F(ResolverBuiltinTest, Select_Error_NoParams) {
}
TEST_F(ResolverBuiltinTest, Select_Error_SelectorInt) {
- auto* expr = Call("select", 1, 1, 1);
- WrapInFunction(expr);
+ auto* expr = Call("select", 1_i, 1_i, 1_i);
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to select(i32, i32, i32)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to select(i32, i32, i32)
3 candidate functions:
select(T, T, bool) -> T where: T is f32, i32, u32 or bool
@@ -354,15 +349,14 @@ TEST_F(ResolverBuiltinTest, Select_Error_SelectorInt) {
}
TEST_F(ResolverBuiltinTest, Select_Error_Matrix) {
- auto* expr = Call(
- "select", mat2x2<f32>(vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f)),
- mat2x2<f32>(vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f)), Expr(true));
- WrapInFunction(expr);
+ auto* expr = Call("select", mat2x2<f32>(vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f)),
+ mat2x2<f32>(vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f)), Expr(true));
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to select(mat2x2<f32>, mat2x2<f32>, bool)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to select(mat2x2<f32>, mat2x2<f32>, bool)
3 candidate functions:
select(T, T, bool) -> T where: T is f32, i32, u32 or bool
@@ -372,13 +366,13 @@ TEST_F(ResolverBuiltinTest, Select_Error_Matrix) {
}
TEST_F(ResolverBuiltinTest, Select_Error_MismatchTypes) {
- auto* expr = Call("select", 1.0f, vec2<f32>(2.0f, 3.0f), Expr(true));
- WrapInFunction(expr);
+ auto* expr = Call("select", 1_f, vec2<f32>(2_f, 3_f), Expr(true));
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to select(f32, vec2<f32>, bool)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to select(f32, vec2<f32>, bool)
3 candidate functions:
select(T, T, bool) -> T where: T is f32, i32, u32 or bool
@@ -388,14 +382,13 @@ TEST_F(ResolverBuiltinTest, Select_Error_MismatchTypes) {
}
TEST_F(ResolverBuiltinTest, Select_Error_MismatchVectorSize) {
- auto* expr = Call("select", vec2<f32>(1.0f, 2.0f),
- vec3<f32>(3.0f, 4.0f, 5.0f), Expr(true));
- WrapInFunction(expr);
+ auto* expr = Call("select", vec2<f32>(1_f, 2_f), vec3<f32>(3_f, 4_f, 5_f), Expr(true));
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to select(vec2<f32>, vec3<f32>, bool)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to select(vec2<f32>, vec3<f32>, bool)
3 candidate functions:
select(T, T, bool) -> T where: T is f32, i32, u32 or bool
@@ -405,258 +398,248 @@ TEST_F(ResolverBuiltinTest, Select_Error_MismatchVectorSize) {
}
struct BuiltinData {
- const char* name;
- BuiltinType builtin;
+ const char* name;
+ BuiltinType builtin;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using ResolverBuiltinTest_Barrier = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_Barrier, InferType) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(CallStmt(call));
+ auto* call = Call(param.name);
+ WrapInFunction(CallStmt(call));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::Void>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::Void>());
}
TEST_P(ResolverBuiltinTest_Barrier, Error_TooManyParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec4<f32>(1.f, 2.f, 3.f, 4.f), 1.0f);
- WrapInFunction(CallStmt(call));
+ auto* call = Call(param.name, vec4<f32>(1_f, 2_f, 3_f, 4_f), 1_f);
+ WrapInFunction(CallStmt(call));
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " +
- std::string(param.name)));
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " + std::string(param.name)));
}
INSTANTIATE_TEST_SUITE_P(
ResolverTest,
ResolverBuiltinTest_Barrier,
testing::Values(BuiltinData{"storageBarrier", BuiltinType::kStorageBarrier},
- BuiltinData{"workgroupBarrier",
- BuiltinType::kWorkgroupBarrier}));
+ BuiltinData{"workgroupBarrier", BuiltinType::kWorkgroupBarrier}));
using ResolverBuiltinTest_DataPacking = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_DataPacking, InferType) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.builtin == BuiltinType::kPack4x8snorm ||
- param.builtin == BuiltinType::kPack4x8unorm;
+ bool pack4 =
+ param.builtin == BuiltinType::kPack4x8snorm || param.builtin == BuiltinType::kPack4x8unorm;
- auto* call = pack4 ? Call(param.name, vec4<f32>(1.f, 2.f, 3.f, 4.f))
- : Call(param.name, vec2<f32>(1.f, 2.f));
- WrapInFunction(call);
+ auto* call = pack4 ? Call(param.name, vec4<f32>(1_f, 2_f, 3_f, 4_f))
+ : Call(param.name, vec2<f32>(1_f, 2_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
}
TEST_P(ResolverBuiltinTest_DataPacking, Error_IncorrectParamType) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.builtin == BuiltinType::kPack4x8snorm ||
- param.builtin == BuiltinType::kPack4x8unorm;
+ bool pack4 =
+ param.builtin == BuiltinType::kPack4x8snorm || param.builtin == BuiltinType::kPack4x8unorm;
- auto* call = pack4 ? Call(param.name, vec4<i32>(1, 2, 3, 4))
- : Call(param.name, vec2<i32>(1, 2));
- WrapInFunction(call);
+ auto* call = pack4 ? Call(param.name, vec4<i32>(1_i, 2_i, 3_i, 4_i))
+ : Call(param.name, vec2<i32>(1_i, 2_i));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " +
- std::string(param.name)));
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " + std::string(param.name)));
}
TEST_P(ResolverBuiltinTest_DataPacking, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " +
- std::string(param.name)));
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " + std::string(param.name)));
}
TEST_P(ResolverBuiltinTest_DataPacking, Error_TooManyParams) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.builtin == BuiltinType::kPack4x8snorm ||
- param.builtin == BuiltinType::kPack4x8unorm;
+ bool pack4 =
+ param.builtin == BuiltinType::kPack4x8snorm || param.builtin == BuiltinType::kPack4x8unorm;
- auto* call = pack4 ? Call(param.name, vec4<f32>(1.f, 2.f, 3.f, 4.f), 1.0f)
- : Call(param.name, vec2<f32>(1.f, 2.f), 1.0f);
- WrapInFunction(call);
+ auto* call = pack4 ? Call(param.name, vec4<f32>(1_f, 2_f, 3_f, 4_f), 1_f)
+ : Call(param.name, vec2<f32>(1_f, 2_f), 1_f);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " +
- std::string(param.name)));
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " + std::string(param.name)));
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_DataPacking,
- testing::Values(BuiltinData{"pack4x8snorm", BuiltinType::kPack4x8snorm},
- BuiltinData{"pack4x8unorm", BuiltinType::kPack4x8unorm},
- BuiltinData{"pack2x16snorm", BuiltinType::kPack2x16snorm},
- BuiltinData{"pack2x16unorm", BuiltinType::kPack2x16unorm},
- BuiltinData{"pack2x16float", BuiltinType::kPack2x16float}));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_DataPacking,
+ testing::Values(BuiltinData{"pack4x8snorm", BuiltinType::kPack4x8snorm},
+ BuiltinData{"pack4x8unorm", BuiltinType::kPack4x8unorm},
+ BuiltinData{"pack2x16snorm", BuiltinType::kPack2x16snorm},
+ BuiltinData{"pack2x16unorm", BuiltinType::kPack2x16unorm},
+ BuiltinData{"pack2x16float",
+ BuiltinType::kPack2x16float}));
using ResolverBuiltinTest_DataUnpacking = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_DataUnpacking, InferType) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.builtin == BuiltinType::kUnpack4x8snorm ||
- param.builtin == BuiltinType::kUnpack4x8unorm;
+ bool pack4 = param.builtin == BuiltinType::kUnpack4x8snorm ||
+ param.builtin == BuiltinType::kUnpack4x8unorm;
- auto* call = Call(param.name, 1u);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_u);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- if (pack4) {
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 4u);
- } else {
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 2u);
- }
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ if (pack4) {
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 4u);
+ } else {
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 2u);
+ }
}
INSTANTIATE_TEST_SUITE_P(
ResolverTest,
ResolverBuiltinTest_DataUnpacking,
- testing::Values(
- BuiltinData{"unpack4x8snorm", BuiltinType::kUnpack4x8snorm},
- BuiltinData{"unpack4x8unorm", BuiltinType::kUnpack4x8unorm},
- BuiltinData{"unpack2x16snorm", BuiltinType::kUnpack2x16snorm},
- BuiltinData{"unpack2x16unorm", BuiltinType::kUnpack2x16unorm},
- BuiltinData{"unpack2x16float", BuiltinType::kUnpack2x16float}));
+ testing::Values(BuiltinData{"unpack4x8snorm", BuiltinType::kUnpack4x8snorm},
+ BuiltinData{"unpack4x8unorm", BuiltinType::kUnpack4x8unorm},
+ BuiltinData{"unpack2x16snorm", BuiltinType::kUnpack2x16snorm},
+ BuiltinData{"unpack2x16unorm", BuiltinType::kUnpack2x16unorm},
+ BuiltinData{"unpack2x16float", BuiltinType::kUnpack2x16float}));
using ResolverBuiltinTest_SingleParam = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_SingleParam, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_P(ResolverBuiltinTest_SingleParam, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_SingleParam, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) + "(f32) -> f32\n " +
- std::string(param.name) + "(vecN<f32>) -> vecN<f32>\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(f32) -> f32\n " +
+ std::string(param.name) + "(vecN<f32>) -> vecN<f32>\n");
}
TEST_P(ResolverBuiltinTest_SingleParam, Error_TooManyParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1, 2, 3);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_i, 2_i, 3_i);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "(i32, i32, i32)\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) + "(f32) -> f32\n " +
- std::string(param.name) + "(vecN<f32>) -> vecN<f32>\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "(i32, i32, i32)\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(f32) -> f32\n " +
+ std::string(param.name) + "(vecN<f32>) -> vecN<f32>\n");
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_SingleParam,
- testing::Values(BuiltinData{"acos", BuiltinType::kAcos},
- BuiltinData{"asin", BuiltinType::kAsin},
- BuiltinData{"atan", BuiltinType::kAtan},
- BuiltinData{"ceil", BuiltinType::kCeil},
- BuiltinData{"cos", BuiltinType::kCos},
- BuiltinData{"cosh", BuiltinType::kCosh},
- BuiltinData{"exp", BuiltinType::kExp},
- BuiltinData{"exp2", BuiltinType::kExp2},
- BuiltinData{"floor", BuiltinType::kFloor},
- BuiltinData{"fract", BuiltinType::kFract},
- BuiltinData{"inverseSqrt", BuiltinType::kInverseSqrt},
- BuiltinData{"log", BuiltinType::kLog},
- BuiltinData{"log2", BuiltinType::kLog2},
- BuiltinData{"round", BuiltinType::kRound},
- BuiltinData{"sign", BuiltinType::kSign},
- BuiltinData{"sin", BuiltinType::kSin},
- BuiltinData{"sinh", BuiltinType::kSinh},
- BuiltinData{"sqrt", BuiltinType::kSqrt},
- BuiltinData{"tan", BuiltinType::kTan},
- BuiltinData{"tanh", BuiltinType::kTanh},
- BuiltinData{"trunc", BuiltinType::kTrunc}));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_SingleParam,
+ testing::Values(BuiltinData{"acos", BuiltinType::kAcos},
+ BuiltinData{"asin", BuiltinType::kAsin},
+ BuiltinData{"atan", BuiltinType::kAtan},
+ BuiltinData{"ceil", BuiltinType::kCeil},
+ BuiltinData{"cos", BuiltinType::kCos},
+ BuiltinData{"cosh", BuiltinType::kCosh},
+ BuiltinData{"exp", BuiltinType::kExp},
+ BuiltinData{"exp2", BuiltinType::kExp2},
+ BuiltinData{"floor", BuiltinType::kFloor},
+ BuiltinData{"fract", BuiltinType::kFract},
+ BuiltinData{"inverseSqrt", BuiltinType::kInverseSqrt},
+ BuiltinData{"log", BuiltinType::kLog},
+ BuiltinData{"log2", BuiltinType::kLog2},
+ BuiltinData{"round", BuiltinType::kRound},
+ BuiltinData{"sign", BuiltinType::kSign},
+ BuiltinData{"sin", BuiltinType::kSin},
+ BuiltinData{"sinh", BuiltinType::kSinh},
+ BuiltinData{"sqrt", BuiltinType::kSqrt},
+ BuiltinData{"tan", BuiltinType::kTan},
+ BuiltinData{"tanh", BuiltinType::kTanh},
+ BuiltinData{"trunc", BuiltinType::kTrunc}));
using ResolverBuiltinDataTest = ResolverTest;
TEST_F(ResolverBuiltinDataTest, ArrayLength_Vector) {
- auto* ary = ty.array<i32>();
- auto* str = Structure("S", {Member("x", ary)});
- Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* ary = ty.array<i32>();
+ auto* str = Structure("S", {Member("x", ary)});
+ Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- auto* call = Call("arrayLength", AddressOf(MemberAccessor("a", "x")));
- WrapInFunction(call);
+ auto* call = Call("arrayLength", AddressOf(MemberAccessor("a", "x")));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
}
TEST_F(ResolverBuiltinDataTest, ArrayLength_Error_ArraySized) {
- Global("arr", ty.array<int, 4>(), ast::StorageClass::kPrivate);
- auto* call = Call("arrayLength", AddressOf("arr"));
- WrapInFunction(call);
+ Global("arr", ty.array<i32, 4>(), ast::StorageClass::kPrivate);
+ auto* call = Call("arrayLength", AddressOf("arr"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to arrayLength(ptr<private, array<i32, 4>, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to arrayLength(ptr<private, array<i32, 4>, read_write>)
1 candidate function:
arrayLength(ptr<storage, array<T>, A>) -> u32
@@ -664,23 +647,23 @@ TEST_F(ResolverBuiltinDataTest, ArrayLength_Error_ArraySized) {
}
TEST_F(ResolverBuiltinDataTest, Normalize_Vector) {
- auto* call = Call("normalize", vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call("normalize", vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_F(ResolverBuiltinDataTest, Normalize_Error_NoParams) {
- auto* call = Call("normalize");
- WrapInFunction(call);
+ auto* call = Call("normalize");
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to normalize()
+ EXPECT_EQ(r()->error(), R"(error: no matching call to normalize()
1 candidate function:
normalize(vecN<f32>) -> vecN<f32>
@@ -688,77 +671,76 @@ TEST_F(ResolverBuiltinDataTest, Normalize_Error_NoParams) {
}
TEST_F(ResolverBuiltinDataTest, FrexpScalar) {
- auto* call = Call("frexp", 1.0f);
- WrapInFunction(call);
+ auto* call = Call("frexp", 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- auto* ty = TypeOf(call)->As<sem::Struct>();
- ASSERT_NE(ty, nullptr);
- ASSERT_EQ(ty->Members().size(), 2u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ auto* ty = TypeOf(call)->As<sem::Struct>();
+ ASSERT_NE(ty, nullptr);
+ ASSERT_EQ(ty->Members().size(), 2u);
- auto* sig = ty->Members()[0];
- EXPECT_TRUE(sig->Type()->Is<sem::F32>());
- EXPECT_EQ(sig->Offset(), 0u);
- EXPECT_EQ(sig->Size(), 4u);
- EXPECT_EQ(sig->Align(), 4u);
- EXPECT_EQ(sig->Name(), Sym("sig"));
+ auto* sig = ty->Members()[0];
+ EXPECT_TRUE(sig->Type()->Is<sem::F32>());
+ EXPECT_EQ(sig->Offset(), 0u);
+ EXPECT_EQ(sig->Size(), 4u);
+ EXPECT_EQ(sig->Align(), 4u);
+ EXPECT_EQ(sig->Name(), Sym("sig"));
- auto* exp = ty->Members()[1];
- EXPECT_TRUE(exp->Type()->Is<sem::I32>());
- EXPECT_EQ(exp->Offset(), 4u);
- EXPECT_EQ(exp->Size(), 4u);
- EXPECT_EQ(exp->Align(), 4u);
- EXPECT_EQ(exp->Name(), Sym("exp"));
+ auto* exp = ty->Members()[1];
+ EXPECT_TRUE(exp->Type()->Is<sem::I32>());
+ EXPECT_EQ(exp->Offset(), 4u);
+ EXPECT_EQ(exp->Size(), 4u);
+ EXPECT_EQ(exp->Align(), 4u);
+ EXPECT_EQ(exp->Name(), Sym("exp"));
- EXPECT_EQ(ty->Size(), 8u);
- EXPECT_EQ(ty->SizeNoPadding(), 8u);
+ EXPECT_EQ(ty->Size(), 8u);
+ EXPECT_EQ(ty->SizeNoPadding(), 8u);
}
TEST_F(ResolverBuiltinDataTest, FrexpVector) {
- auto* call = Call("frexp", vec3<f32>());
- WrapInFunction(call);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(call), nullptr);
- auto* ty = TypeOf(call)->As<sem::Struct>();
- ASSERT_NE(ty, nullptr);
- ASSERT_EQ(ty->Members().size(), 2u);
-
- auto* sig = ty->Members()[0];
- ASSERT_TRUE(sig->Type()->Is<sem::Vector>());
- EXPECT_EQ(sig->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(sig->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sig->Offset(), 0u);
- EXPECT_EQ(sig->Size(), 12u);
- EXPECT_EQ(sig->Align(), 16u);
- EXPECT_EQ(sig->Name(), Sym("sig"));
-
- auto* exp = ty->Members()[1];
- ASSERT_TRUE(exp->Type()->Is<sem::Vector>());
- EXPECT_EQ(exp->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(exp->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(exp->Offset(), 16u);
- EXPECT_EQ(exp->Size(), 12u);
- EXPECT_EQ(exp->Align(), 16u);
- EXPECT_EQ(exp->Name(), Sym("exp"));
-
- EXPECT_EQ(ty->Size(), 32u);
- EXPECT_EQ(ty->SizeNoPadding(), 28u);
+ auto* call = Call("frexp", vec3<f32>());
+ WrapInFunction(call);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(call), nullptr);
+ auto* ty = TypeOf(call)->As<sem::Struct>();
+ ASSERT_NE(ty, nullptr);
+ ASSERT_EQ(ty->Members().size(), 2u);
+
+ auto* sig = ty->Members()[0];
+ ASSERT_TRUE(sig->Type()->Is<sem::Vector>());
+ EXPECT_EQ(sig->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(sig->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sig->Offset(), 0u);
+ EXPECT_EQ(sig->Size(), 12u);
+ EXPECT_EQ(sig->Align(), 16u);
+ EXPECT_EQ(sig->Name(), Sym("sig"));
+
+ auto* exp = ty->Members()[1];
+ ASSERT_TRUE(exp->Type()->Is<sem::Vector>());
+ EXPECT_EQ(exp->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(exp->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(exp->Offset(), 16u);
+ EXPECT_EQ(exp->Size(), 12u);
+ EXPECT_EQ(exp->Align(), 16u);
+ EXPECT_EQ(exp->Name(), Sym("exp"));
+
+ EXPECT_EQ(ty->Size(), 32u);
+ EXPECT_EQ(ty->SizeNoPadding(), 28u);
}
TEST_F(ResolverBuiltinDataTest, Frexp_Error_FirstParamInt) {
- Global("v", ty.i32(), ast::StorageClass::kWorkgroup);
- auto* call = Call("frexp", 1, AddressOf("v"));
- WrapInFunction(call);
+ Global("v", ty.i32(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("frexp", 1_i, AddressOf("v"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to frexp(i32, ptr<workgroup, i32, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to frexp(i32, ptr<workgroup, i32, read_write>)
2 candidate functions:
frexp(f32) -> __frexp_result
@@ -767,15 +749,14 @@ TEST_F(ResolverBuiltinDataTest, Frexp_Error_FirstParamInt) {
}
TEST_F(ResolverBuiltinDataTest, Frexp_Error_SecondParamFloatPtr) {
- Global("v", ty.f32(), ast::StorageClass::kWorkgroup);
- auto* call = Call("frexp", 1.0f, AddressOf("v"));
- WrapInFunction(call);
+ Global("v", ty.f32(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("frexp", 1_f, AddressOf("v"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to frexp(f32, ptr<workgroup, f32, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to frexp(f32, ptr<workgroup, f32, read_write>)
2 candidate functions:
frexp(f32) -> __frexp_result
@@ -784,12 +765,12 @@ TEST_F(ResolverBuiltinDataTest, Frexp_Error_SecondParamFloatPtr) {
}
TEST_F(ResolverBuiltinDataTest, Frexp_Error_SecondParamNotAPointer) {
- auto* call = Call("frexp", 1.0f, 1);
- WrapInFunction(call);
+ auto* call = Call("frexp", 1_f, 1_i);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to frexp(f32, i32)
+ EXPECT_EQ(r()->error(), R"(error: no matching call to frexp(f32, i32)
2 candidate functions:
frexp(f32) -> __frexp_result
@@ -798,15 +779,14 @@ TEST_F(ResolverBuiltinDataTest, Frexp_Error_SecondParamNotAPointer) {
}
TEST_F(ResolverBuiltinDataTest, Frexp_Error_VectorSizesDontMatch) {
- Global("v", ty.vec4<i32>(), ast::StorageClass::kWorkgroup);
- auto* call = Call("frexp", vec2<f32>(1.0f, 2.0f), AddressOf("v"));
- WrapInFunction(call);
+ Global("v", ty.vec4<i32>(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("frexp", vec2<f32>(1_f, 2_f), AddressOf("v"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to frexp(vec2<f32>, ptr<workgroup, vec4<i32>, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to frexp(vec2<f32>, ptr<workgroup, vec4<i32>, read_write>)
2 candidate functions:
frexp(vecN<f32>) -> __frexp_result_vecN
@@ -815,77 +795,76 @@ TEST_F(ResolverBuiltinDataTest, Frexp_Error_VectorSizesDontMatch) {
}
TEST_F(ResolverBuiltinDataTest, ModfScalar) {
- auto* call = Call("modf", 1.0f);
- WrapInFunction(call);
+ auto* call = Call("modf", 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- auto* ty = TypeOf(call)->As<sem::Struct>();
- ASSERT_NE(ty, nullptr);
- ASSERT_EQ(ty->Members().size(), 2u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ auto* ty = TypeOf(call)->As<sem::Struct>();
+ ASSERT_NE(ty, nullptr);
+ ASSERT_EQ(ty->Members().size(), 2u);
- auto* fract = ty->Members()[0];
- EXPECT_TRUE(fract->Type()->Is<sem::F32>());
- EXPECT_EQ(fract->Offset(), 0u);
- EXPECT_EQ(fract->Size(), 4u);
- EXPECT_EQ(fract->Align(), 4u);
- EXPECT_EQ(fract->Name(), Sym("fract"));
+ auto* fract = ty->Members()[0];
+ EXPECT_TRUE(fract->Type()->Is<sem::F32>());
+ EXPECT_EQ(fract->Offset(), 0u);
+ EXPECT_EQ(fract->Size(), 4u);
+ EXPECT_EQ(fract->Align(), 4u);
+ EXPECT_EQ(fract->Name(), Sym("fract"));
- auto* whole = ty->Members()[1];
- EXPECT_TRUE(whole->Type()->Is<sem::F32>());
- EXPECT_EQ(whole->Offset(), 4u);
- EXPECT_EQ(whole->Size(), 4u);
- EXPECT_EQ(whole->Align(), 4u);
- EXPECT_EQ(whole->Name(), Sym("whole"));
+ auto* whole = ty->Members()[1];
+ EXPECT_TRUE(whole->Type()->Is<sem::F32>());
+ EXPECT_EQ(whole->Offset(), 4u);
+ EXPECT_EQ(whole->Size(), 4u);
+ EXPECT_EQ(whole->Align(), 4u);
+ EXPECT_EQ(whole->Name(), Sym("whole"));
- EXPECT_EQ(ty->Size(), 8u);
- EXPECT_EQ(ty->SizeNoPadding(), 8u);
+ EXPECT_EQ(ty->Size(), 8u);
+ EXPECT_EQ(ty->SizeNoPadding(), 8u);
}
TEST_F(ResolverBuiltinDataTest, ModfVector) {
- auto* call = Call("modf", vec3<f32>());
- WrapInFunction(call);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(call), nullptr);
- auto* ty = TypeOf(call)->As<sem::Struct>();
- ASSERT_NE(ty, nullptr);
- ASSERT_EQ(ty->Members().size(), 2u);
-
- auto* fract = ty->Members()[0];
- ASSERT_TRUE(fract->Type()->Is<sem::Vector>());
- EXPECT_EQ(fract->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(fract->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(fract->Offset(), 0u);
- EXPECT_EQ(fract->Size(), 12u);
- EXPECT_EQ(fract->Align(), 16u);
- EXPECT_EQ(fract->Name(), Sym("fract"));
-
- auto* whole = ty->Members()[1];
- ASSERT_TRUE(whole->Type()->Is<sem::Vector>());
- EXPECT_EQ(whole->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(whole->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(whole->Offset(), 16u);
- EXPECT_EQ(whole->Size(), 12u);
- EXPECT_EQ(whole->Align(), 16u);
- EXPECT_EQ(whole->Name(), Sym("whole"));
-
- EXPECT_EQ(ty->Size(), 32u);
- EXPECT_EQ(ty->SizeNoPadding(), 28u);
+ auto* call = Call("modf", vec3<f32>());
+ WrapInFunction(call);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(call), nullptr);
+ auto* ty = TypeOf(call)->As<sem::Struct>();
+ ASSERT_NE(ty, nullptr);
+ ASSERT_EQ(ty->Members().size(), 2u);
+
+ auto* fract = ty->Members()[0];
+ ASSERT_TRUE(fract->Type()->Is<sem::Vector>());
+ EXPECT_EQ(fract->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(fract->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(fract->Offset(), 0u);
+ EXPECT_EQ(fract->Size(), 12u);
+ EXPECT_EQ(fract->Align(), 16u);
+ EXPECT_EQ(fract->Name(), Sym("fract"));
+
+ auto* whole = ty->Members()[1];
+ ASSERT_TRUE(whole->Type()->Is<sem::Vector>());
+ EXPECT_EQ(whole->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(whole->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(whole->Offset(), 16u);
+ EXPECT_EQ(whole->Size(), 12u);
+ EXPECT_EQ(whole->Align(), 16u);
+ EXPECT_EQ(whole->Name(), Sym("whole"));
+
+ EXPECT_EQ(ty->Size(), 32u);
+ EXPECT_EQ(ty->SizeNoPadding(), 28u);
}
TEST_F(ResolverBuiltinDataTest, Modf_Error_FirstParamInt) {
- Global("whole", ty.f32(), ast::StorageClass::kWorkgroup);
- auto* call = Call("modf", 1, AddressOf("whole"));
- WrapInFunction(call);
+ Global("whole", ty.f32(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("modf", 1_i, AddressOf("whole"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to modf(i32, ptr<workgroup, f32, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to modf(i32, ptr<workgroup, f32, read_write>)
2 candidate functions:
modf(f32) -> __modf_result
@@ -894,15 +873,14 @@ TEST_F(ResolverBuiltinDataTest, Modf_Error_FirstParamInt) {
}
TEST_F(ResolverBuiltinDataTest, Modf_Error_SecondParamIntPtr) {
- Global("whole", ty.i32(), ast::StorageClass::kWorkgroup);
- auto* call = Call("modf", 1.0f, AddressOf("whole"));
- WrapInFunction(call);
+ Global("whole", ty.i32(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("modf", 1_f, AddressOf("whole"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to modf(f32, ptr<workgroup, i32, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to modf(f32, ptr<workgroup, i32, read_write>)
2 candidate functions:
modf(f32) -> __modf_result
@@ -911,12 +889,12 @@ TEST_F(ResolverBuiltinDataTest, Modf_Error_SecondParamIntPtr) {
}
TEST_F(ResolverBuiltinDataTest, Modf_Error_SecondParamNotAPointer) {
- auto* call = Call("modf", 1.0f, 1.0f);
- WrapInFunction(call);
+ auto* call = Call("modf", 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to modf(f32, f32)
+ EXPECT_EQ(r()->error(), R"(error: no matching call to modf(f32, f32)
2 candidate functions:
modf(f32) -> __modf_result
@@ -925,15 +903,14 @@ TEST_F(ResolverBuiltinDataTest, Modf_Error_SecondParamNotAPointer) {
}
TEST_F(ResolverBuiltinDataTest, Modf_Error_VectorSizesDontMatch) {
- Global("whole", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
- auto* call = Call("modf", vec2<f32>(1.0f, 2.0f), AddressOf("whole"));
- WrapInFunction(call);
+ Global("whole", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
+ auto* call = Call("modf", vec2<f32>(1_f, 2_f), AddressOf("whole"));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: no matching call to modf(vec2<f32>, ptr<workgroup, vec4<f32>, read_write>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to modf(vec2<f32>, ptr<workgroup, vec4<f32>, read_write>)
2 candidate functions:
modf(vecN<f32>) -> __modf_result_vecN
@@ -941,234 +918,222 @@ TEST_F(ResolverBuiltinDataTest, Modf_Error_VectorSizesDontMatch) {
)");
}
-using ResolverBuiltinTest_SingleParam_FloatOrInt =
- ResolverTestWithParam<BuiltinData>;
+using ResolverBuiltinTest_SingleParam_FloatOrInt = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Float_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Float_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Sint_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, -1);
- WrapInFunction(call);
+ auto* call = Call(param.name, -1_i);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Sint_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<i32>(1, 1, 3));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<i32>(1_i, 1_i, 3_i));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Uint_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1u);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_u);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Uint_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<u32>(1u, 1u, 3u));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<u32>(1_u, 1_u, 3_u));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_SingleParam_FloatOrInt, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) +
- "(T) -> T where: T is f32, i32 or u32\n " +
- std::string(param.name) +
- "(vecN<T>) -> vecN<T> where: T is f32, i32 or u32\n");
+ EXPECT_EQ(r()->error(),
+ "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(T) -> T where: T is f32, i32 or u32\n " +
+ std::string(param.name) + "(vecN<T>) -> vecN<T> where: T is f32, i32 or u32\n");
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
ResolverBuiltinTest_SingleParam_FloatOrInt,
- testing::Values(BuiltinData{"abs",
- BuiltinType::kAbs}));
+ testing::Values(BuiltinData{"abs", BuiltinType::kAbs}));
TEST_F(ResolverBuiltinTest, Length_Scalar) {
- auto* call = Call("length", 1.f);
- WrapInFunction(call);
+ auto* call = Call("length", 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_F(ResolverBuiltinTest, Length_FloatVector) {
- auto* call = Call("length", vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call("length", vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
using ResolverBuiltinTest_TwoParam = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_TwoParam, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.f, 1.f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_P(ResolverBuiltinTest_TwoParam, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<f32>(1.0f, 1.0f, 3.0f),
- vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f), vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_TwoParam, Error_NoTooManyParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1, 2, 3);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_i, 2_i, 3_i);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "(i32, i32, i32)\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) + "(f32, f32) -> f32\n " +
- std::string(param.name) +
- "(vecN<f32>, vecN<f32>) -> vecN<f32>\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "(i32, i32, i32)\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(f32, f32) -> f32\n " +
+ std::string(param.name) + "(vecN<f32>, vecN<f32>) -> vecN<f32>\n");
}
TEST_P(ResolverBuiltinTest_TwoParam, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) + "(f32, f32) -> f32\n " +
- std::string(param.name) +
- "(vecN<f32>, vecN<f32>) -> vecN<f32>\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(f32, f32) -> f32\n " +
+ std::string(param.name) + "(vecN<f32>, vecN<f32>) -> vecN<f32>\n");
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_TwoParam,
- testing::Values(BuiltinData{"atan2", BuiltinType::kAtan2},
- BuiltinData{"pow", BuiltinType::kPow},
- BuiltinData{"step", BuiltinType::kStep}));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_TwoParam,
+ testing::Values(BuiltinData{"atan2", BuiltinType::kAtan2},
+ BuiltinData{"pow", BuiltinType::kPow},
+ BuiltinData{"step", BuiltinType::kStep}));
TEST_F(ResolverBuiltinTest, Distance_Scalar) {
- auto* call = Call("distance", 1.f, 1.f);
- WrapInFunction(call);
+ auto* call = Call("distance", 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_F(ResolverBuiltinTest, Distance_Vector) {
- auto* call = Call("distance", vec3<f32>(1.0f, 1.0f, 3.0f),
- vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call("distance", vec3<f32>(1_f, 1_f, 3_f), vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Cross) {
- auto* call =
- Call("cross", vec3<f32>(1.0f, 2.0f, 3.0f), vec3<f32>(1.0f, 2.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call("cross", vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_F(ResolverBuiltinTest, Cross_Error_NoArgs) {
- auto* call = Call("cross");
- WrapInFunction(call);
+ auto* call = Call("cross");
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to cross()
+ EXPECT_EQ(r()->error(), R"(error: no matching call to cross()
1 candidate function:
cross(vec3<f32>, vec3<f32>) -> vec3<f32>
@@ -1176,12 +1141,12 @@ TEST_F(ResolverBuiltinTest, Cross_Error_NoArgs) {
}
TEST_F(ResolverBuiltinTest, Cross_Error_Scalar) {
- auto* call = Call("cross", 1.0f, 1.0f);
- WrapInFunction(call);
+ auto* call = Call("cross", 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to cross(f32, f32)
+ EXPECT_EQ(r()->error(), R"(error: no matching call to cross(f32, f32)
1 candidate function:
cross(vec3<f32>, vec3<f32>) -> vec3<f32>
@@ -1189,13 +1154,13 @@ TEST_F(ResolverBuiltinTest, Cross_Error_Scalar) {
}
TEST_F(ResolverBuiltinTest, Cross_Error_Vec3Int) {
- auto* call = Call("cross", vec3<i32>(1, 2, 3), vec3<i32>(1, 2, 3));
- WrapInFunction(call);
+ auto* call = Call("cross", vec3<i32>(1_i, 2_i, 3_i), vec3<i32>(1_i, 2_i, 3_i));
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to cross(vec3<i32>, vec3<i32>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to cross(vec3<i32>, vec3<i32>)
1 candidate function:
cross(vec3<f32>, vec3<f32>) -> vec3<f32>
@@ -1203,15 +1168,14 @@ TEST_F(ResolverBuiltinTest, Cross_Error_Vec3Int) {
}
TEST_F(ResolverBuiltinTest, Cross_Error_Vec4) {
- auto* call = Call("cross", vec4<f32>(1.0f, 2.0f, 3.0f, 4.0f),
- vec4<f32>(1.0f, 2.0f, 3.0f, 4.0f));
+ auto* call = Call("cross", vec4<f32>(1_f, 2_f, 3_f, 4_f), vec4<f32>(1_f, 2_f, 3_f, 4_f));
- WrapInFunction(call);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to cross(vec4<f32>, vec4<f32>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to cross(vec4<f32>, vec4<f32>)
1 candidate function:
cross(vec3<f32>, vec3<f32>) -> vec3<f32>
@@ -1219,38 +1183,38 @@ TEST_F(ResolverBuiltinTest, Cross_Error_Vec4) {
}
TEST_F(ResolverBuiltinTest, Cross_Error_TooManyParams) {
- auto* call = Call("cross", vec3<f32>(1.0f, 2.0f, 3.0f),
- vec3<f32>(1.0f, 2.0f, 3.0f), vec3<f32>(1.0f, 2.0f, 3.0f));
+ auto* call =
+ Call("cross", vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(1_f, 2_f, 3_f));
- WrapInFunction(call);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: no matching call to cross(vec3<f32>, vec3<f32>, vec3<f32>)
+ EXPECT_EQ(r()->error(),
+ R"(error: no matching call to cross(vec3<f32>, vec3<f32>, vec3<f32>)
1 candidate function:
cross(vec3<f32>, vec3<f32>) -> vec3<f32>
)");
}
TEST_F(ResolverBuiltinTest, Normalize) {
- auto* call = Call("normalize", vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call("normalize", vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_F(ResolverBuiltinTest, Normalize_NoArgs) {
- auto* call = Call("normalize");
- WrapInFunction(call);
+ auto* call = Call("normalize");
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to normalize()
+ EXPECT_EQ(r()->error(), R"(error: no matching call to normalize()
1 candidate function:
normalize(vecN<f32>) -> vecN<f32>
@@ -1259,351 +1223,340 @@ TEST_F(ResolverBuiltinTest, Normalize_NoArgs) {
using ResolverBuiltinTest_ThreeParam = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_ThreeParam, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.f, 1.f, 1.f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f, 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_P(ResolverBuiltinTest_ThreeParam, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<f32>(1.0f, 1.0f, 3.0f),
- vec3<f32>(1.0f, 1.0f, 3.0f), vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f), vec3<f32>(1_f, 1_f, 3_f),
+ vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_ThreeParam, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("error: no matching call to " +
- std::string(param.name) + "()"));
+ EXPECT_THAT(r()->error(),
+ HasSubstr("error: no matching call to " + std::string(param.name) + "()"));
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_ThreeParam,
- testing::Values(BuiltinData{"mix", BuiltinType::kMix},
- BuiltinData{"smoothstep", BuiltinType::kSmoothstep},
- BuiltinData{"smoothStep", BuiltinType::kSmoothStep},
- BuiltinData{"fma", BuiltinType::kFma}));
-
-using ResolverBuiltinTest_ThreeParam_FloatOrInt =
- ResolverTestWithParam<BuiltinData>;
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_ThreeParam,
+ testing::Values(BuiltinData{"mix", BuiltinType::kMix},
+ BuiltinData{"smoothstep", BuiltinType::kSmoothstep},
+ BuiltinData{"smoothStep", BuiltinType::kSmoothStep},
+ BuiltinData{"fma", BuiltinType::kFma}));
+
+using ResolverBuiltinTest_ThreeParam_FloatOrInt = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Float_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.f, 1.f, 1.f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f, 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_scalar());
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Float_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<f32>(1.0f, 1.0f, 3.0f),
- vec3<f32>(1.0f, 1.0f, 3.0f), vec3<f32>(1.0f, 1.0f, 3.0f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f), vec3<f32>(1_f, 1_f, 3_f),
+ vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Sint_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1, 1, 1);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_i, 1_i, 1_i);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Sint_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<i32>(1, 1, 3), vec3<i32>(1, 1, 3),
- vec3<i32>(1, 1, 3));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<i32>(1_i, 1_i, 3_i), vec3<i32>(1_i, 1_i, 3_i),
+ vec3<i32>(1_i, 1_i, 3_i));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Uint_Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1u, 1u, 1u);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_u, 1_u, 1_u);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Uint_Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<u32>(1u, 1u, 3u), vec3<u32>(1u, 1u, 3u),
- vec3<u32>(1u, 1u, 3u));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<u32>(1_u, 1_u, 3_u), vec3<u32>(1_u, 1_u, 3_u),
+ vec3<u32>(1_u, 1_u, 3_u));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_ThreeParam_FloatOrInt, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) +
- "(T, T, T) -> T where: T is f32, i32 or u32\n " +
- std::string(param.name) +
- "(vecN<T>, vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 "
- "or u32\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) +
+ "(T, T, T) -> T where: T is f32, i32 or u32\n " +
+ std::string(param.name) +
+ "(vecN<T>, vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 "
+ "or u32\n");
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
ResolverBuiltinTest_ThreeParam_FloatOrInt,
- testing::Values(BuiltinData{"clamp",
- BuiltinType::kClamp}));
+ testing::Values(BuiltinData{"clamp", BuiltinType::kClamp}));
using ResolverBuiltinTest_Int_SingleParam = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_Int_SingleParam, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_i);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_integer_scalar());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_integer_scalar());
}
TEST_P(ResolverBuiltinTest_Int_SingleParam, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<i32>(1, 1, 3));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<i32>(1_i, 1_i, 3_i));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_Int_SingleParam, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "error: no matching call to " +
- std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) +
- "(T) -> T where: T is i32 or u32\n " +
- std::string(param.name) +
- "(vecN<T>) -> vecN<T> where: T is i32 or u32\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) + "(T) -> T where: T is i32 or u32\n " +
+ std::string(param.name) +
+ "(vecN<T>) -> vecN<T> where: T is i32 or u32\n");
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_Int_SingleParam,
- testing::Values(BuiltinData{"countOneBits", BuiltinType::kCountOneBits},
- BuiltinData{"reverseBits", BuiltinType::kReverseBits}));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_Int_SingleParam,
+ testing::Values(BuiltinData{"countOneBits", BuiltinType::kCountOneBits},
+ BuiltinData{"reverseBits", BuiltinType::kReverseBits}));
-using ResolverBuiltinTest_FloatOrInt_TwoParam =
- ResolverTestWithParam<BuiltinData>;
+using ResolverBuiltinTest_FloatOrInt_TwoParam = ResolverTestWithParam<BuiltinData>;
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Scalar_Signed) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1, 1);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_i, 1_i);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Scalar_Unsigned) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1u, 1u);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_u, 1_u);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::U32>());
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Scalar_Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, 1.0f, 1.0f);
- WrapInFunction(call);
+ auto* call = Call(param.name, 1_f, 1_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Vector_Signed) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<i32>(1, 1, 3), vec3<i32>(1, 1, 3));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<i32>(1_i, 1_i, 3_i), vec3<i32>(1_i, 1_i, 3_i));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_signed_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Vector_Unsigned) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name, vec3<u32>(1u, 1u, 3u), vec3<u32>(1u, 1u, 3u));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<u32>(1_u, 1_u, 3_u), vec3<u32>(1_u, 1_u, 3_u));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_unsigned_integer_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Vector_Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call =
- Call(param.name, vec3<f32>(1.f, 1.f, 3.f), vec3<f32>(1.f, 1.f, 3.f));
- WrapInFunction(call);
+ auto* call = Call(param.name, vec3<f32>(1_f, 1_f, 3_f), vec3<f32>(1_f, 1_f, 3_f));
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->is_float_vector());
- EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->is_float_vector());
+ EXPECT_EQ(TypeOf(call)->As<sem::Vector>()->Width(), 3u);
}
TEST_P(ResolverBuiltinTest_FloatOrInt_TwoParam, Error_NoParams) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* call = Call(param.name);
- WrapInFunction(call);
+ auto* call = Call(param.name);
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: no matching call to " + std::string(param.name) +
- "()\n\n"
- "2 candidate functions:\n " +
- std::string(param.name) +
- "(T, T) -> T where: T is f32, i32 or u32\n " +
- std::string(param.name) +
- "(vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 or u32\n");
+ EXPECT_EQ(r()->error(), "error: no matching call to " + std::string(param.name) +
+ "()\n\n"
+ "2 candidate functions:\n " +
+ std::string(param.name) +
+ "(T, T) -> T where: T is f32, i32 or u32\n " +
+ std::string(param.name) +
+ "(vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 or u32\n");
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
ResolverBuiltinTest_FloatOrInt_TwoParam,
testing::Values(BuiltinData{"min", BuiltinType::kMin},
- BuiltinData{"max",
- BuiltinType::kMax}));
+ BuiltinData{"max", BuiltinType::kMax}));
TEST_F(ResolverBuiltinTest, Determinant_2x2) {
- Global("var", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
- auto* call = Call("determinant", "var");
- WrapInFunction(call);
+ auto* call = Call("determinant", "var");
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Determinant_3x3) {
- Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* call = Call("determinant", "var");
- WrapInFunction(call);
+ auto* call = Call("determinant", "var");
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Determinant_4x4) {
- Global("var", ty.mat4x4<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat4x4<f32>(), ast::StorageClass::kPrivate);
- auto* call = Call("determinant", "var");
- WrapInFunction(call);
+ auto* call = Call("determinant", "var");
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverBuiltinTest, Determinant_NotSquare) {
- Global("var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* call = Call("determinant", "var");
- WrapInFunction(call);
+ auto* call = Call("determinant", "var");
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to determinant(mat2x3<f32>)
+ EXPECT_EQ(r()->error(), R"(error: no matching call to determinant(mat2x3<f32>)
1 candidate function:
determinant(matNxN<f32>) -> f32
@@ -1611,378 +1564,374 @@ TEST_F(ResolverBuiltinTest, Determinant_NotSquare) {
}
TEST_F(ResolverBuiltinTest, Determinant_NotMatrix) {
- Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ Global("var", ty.f32(), ast::StorageClass::kPrivate);
- auto* call = Call("determinant", "var");
- WrapInFunction(call);
+ auto* call = Call("determinant", "var");
+ WrapInFunction(call);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: no matching call to determinant(f32)
+ EXPECT_EQ(r()->error(), R"(error: no matching call to determinant(f32)
1 candidate function:
determinant(matNxN<f32>) -> f32
)");
}
-using ResolverBuiltinTest_Texture =
- ResolverTestWithParam<ast::builtin::test::TextureOverloadCase>;
+using ResolverBuiltinTest_Texture = ResolverTestWithParam<ast::builtin::test::TextureOverloadCase>;
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverBuiltinTest_Texture,
- testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
-
-std::string to_str(const std::string& function,
- const sem::ParameterList& params) {
- std::stringstream out;
- out << function << "(";
- bool first = true;
- for (auto* param : params) {
- if (!first) {
- out << ", ";
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverBuiltinTest_Texture,
+ testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
+
+std::string to_str(const std::string& function, const sem::ParameterList& params) {
+ std::stringstream out;
+ out << function << "(";
+ bool first = true;
+ for (auto* param : params) {
+ if (!first) {
+ out << ", ";
+ }
+ out << sem::str(param->Usage());
+ first = false;
+ }
+ out << ")";
+ return out.str();
+}
+
+const char* expected_texture_overload(ast::builtin::test::ValidTextureOverload overload) {
+ using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
+ switch (overload) {
+ case ValidTextureOverload::kDimensions1d:
+ case ValidTextureOverload::kDimensions2d:
+ case ValidTextureOverload::kDimensions2dArray:
+ case ValidTextureOverload::kDimensions3d:
+ case ValidTextureOverload::kDimensionsCube:
+ case ValidTextureOverload::kDimensionsCubeArray:
+ case ValidTextureOverload::kDimensionsMultisampled2d:
+ case ValidTextureOverload::kDimensionsDepth2d:
+ case ValidTextureOverload::kDimensionsDepth2dArray:
+ case ValidTextureOverload::kDimensionsDepthCube:
+ case ValidTextureOverload::kDimensionsDepthCubeArray:
+ case ValidTextureOverload::kDimensionsDepthMultisampled2d:
+ case ValidTextureOverload::kDimensionsStorageWO1d:
+ case ValidTextureOverload::kDimensionsStorageWO2d:
+ case ValidTextureOverload::kDimensionsStorageWO2dArray:
+ case ValidTextureOverload::kDimensionsStorageWO3d:
+ return R"(textureDimensions(texture))";
+ case ValidTextureOverload::kGather2dF32:
+ return R"(textureGather(component, texture, sampler, coords))";
+ case ValidTextureOverload::kGather2dOffsetF32:
+ return R"(textureGather(component, texture, sampler, coords, offset))";
+ case ValidTextureOverload::kGather2dArrayF32:
+ return R"(textureGather(component, texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kGather2dArrayOffsetF32:
+ return R"(textureGather(component, texture, sampler, coords, array_index, offset))";
+ case ValidTextureOverload::kGatherCubeF32:
+ return R"(textureGather(component, texture, sampler, coords))";
+ case ValidTextureOverload::kGatherCubeArrayF32:
+ return R"(textureGather(component, texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kGatherDepth2dF32:
+ return R"(textureGather(texture, sampler, coords))";
+ case ValidTextureOverload::kGatherDepth2dOffsetF32:
+ return R"(textureGather(texture, sampler, coords, offset))";
+ case ValidTextureOverload::kGatherDepth2dArrayF32:
+ return R"(textureGather(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
+ return R"(textureGather(texture, sampler, coords, array_index, offset))";
+ case ValidTextureOverload::kGatherDepthCubeF32:
+ return R"(textureGather(texture, sampler, coords))";
+ case ValidTextureOverload::kGatherDepthCubeArrayF32:
+ return R"(textureGather(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kGatherCompareDepth2dF32:
+ return R"(textureGatherCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
+ return R"(textureGatherCompare(texture, sampler, coords, depth_ref, offset))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
+ return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
+ return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref, offset))";
+ case ValidTextureOverload::kGatherCompareDepthCubeF32:
+ return R"(textureGatherCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
+ return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kNumLayers2dArray:
+ case ValidTextureOverload::kNumLayersCubeArray:
+ case ValidTextureOverload::kNumLayersDepth2dArray:
+ case ValidTextureOverload::kNumLayersDepthCubeArray:
+ case ValidTextureOverload::kNumLayersStorageWO2dArray:
+ return R"(textureNumLayers(texture))";
+ case ValidTextureOverload::kNumLevels2d:
+ case ValidTextureOverload::kNumLevels2dArray:
+ case ValidTextureOverload::kNumLevels3d:
+ case ValidTextureOverload::kNumLevelsCube:
+ case ValidTextureOverload::kNumLevelsCubeArray:
+ case ValidTextureOverload::kNumLevelsDepth2d:
+ case ValidTextureOverload::kNumLevelsDepth2dArray:
+ case ValidTextureOverload::kNumLevelsDepthCube:
+ case ValidTextureOverload::kNumLevelsDepthCubeArray:
+ return R"(textureNumLevels(texture))";
+ case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
+ case ValidTextureOverload::kNumSamplesMultisampled2d:
+ return R"(textureNumSamples(texture))";
+ case ValidTextureOverload::kDimensions2dLevel:
+ case ValidTextureOverload::kDimensions2dArrayLevel:
+ case ValidTextureOverload::kDimensions3dLevel:
+ case ValidTextureOverload::kDimensionsCubeLevel:
+ case ValidTextureOverload::kDimensionsCubeArrayLevel:
+ case ValidTextureOverload::kDimensionsDepth2dLevel:
+ case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
+ return R"(textureDimensions(texture, level))";
+ case ValidTextureOverload::kSample1dF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSample2dF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSample2dOffsetF32:
+ return R"(textureSample(texture, sampler, coords, offset))";
+ case ValidTextureOverload::kSample2dArrayF32:
+ return R"(textureSample(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kSample2dArrayOffsetF32:
+ return R"(textureSample(texture, sampler, coords, array_index, offset))";
+ case ValidTextureOverload::kSample3dF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSample3dOffsetF32:
+ return R"(textureSample(texture, sampler, coords, offset))";
+ case ValidTextureOverload::kSampleCubeF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSampleCubeArrayF32:
+ return R"(textureSample(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kSampleDepth2dF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSampleDepth2dOffsetF32:
+ return R"(textureSample(texture, sampler, coords, offset))";
+ case ValidTextureOverload::kSampleDepth2dArrayF32:
+ return R"(textureSample(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
+ return R"(textureSample(texture, sampler, coords, array_index, offset))";
+ case ValidTextureOverload::kSampleDepthCubeF32:
+ return R"(textureSample(texture, sampler, coords))";
+ case ValidTextureOverload::kSampleDepthCubeArrayF32:
+ return R"(textureSample(texture, sampler, coords, array_index))";
+ case ValidTextureOverload::kSampleBias2dF32:
+ return R"(textureSampleBias(texture, sampler, coords, bias))";
+ case ValidTextureOverload::kSampleBias2dOffsetF32:
+ return R"(textureSampleBias(texture, sampler, coords, bias, offset))";
+ case ValidTextureOverload::kSampleBias2dArrayF32:
+ return R"(textureSampleBias(texture, sampler, coords, array_index, bias))";
+ case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
+ return R"(textureSampleBias(texture, sampler, coords, array_index, bias, offset))";
+ case ValidTextureOverload::kSampleBias3dF32:
+ return R"(textureSampleBias(texture, sampler, coords, bias))";
+ case ValidTextureOverload::kSampleBias3dOffsetF32:
+ return R"(textureSampleBias(texture, sampler, coords, bias, offset))";
+ case ValidTextureOverload::kSampleBiasCubeF32:
+ return R"(textureSampleBias(texture, sampler, coords, bias))";
+ case ValidTextureOverload::kSampleBiasCubeArrayF32:
+ return R"(textureSampleBias(texture, sampler, coords, array_index, bias))";
+ case ValidTextureOverload::kSampleLevel2dF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level))";
+ case ValidTextureOverload::kSampleLevel2dOffsetF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
+ case ValidTextureOverload::kSampleLevel2dArrayF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
+ case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level, offset))";
+ case ValidTextureOverload::kSampleLevel3dF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level))";
+ case ValidTextureOverload::kSampleLevel3dOffsetF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
+ case ValidTextureOverload::kSampleLevelCubeF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level))";
+ case ValidTextureOverload::kSampleLevelCubeArrayF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
+ case ValidTextureOverload::kSampleLevelDepth2dF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level))";
+ case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level, offset))";
+ case ValidTextureOverload::kSampleLevelDepthCubeF32:
+ return R"(textureSampleLevel(texture, sampler, coords, level))";
+ case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
+ return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
+ case ValidTextureOverload::kSampleGrad2dF32:
+ return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
+ case ValidTextureOverload::kSampleGrad2dOffsetF32:
+ return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy, offset))";
+ case ValidTextureOverload::kSampleGrad2dArrayF32:
+ return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy))";
+ case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
+ return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy, offset))";
+ case ValidTextureOverload::kSampleGrad3dF32:
+ return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
+ case ValidTextureOverload::kSampleGrad3dOffsetF32:
+ return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy, offset))";
+ case ValidTextureOverload::kSampleGradCubeF32:
+ return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
+ case ValidTextureOverload::kSampleGradCubeArrayF32:
+ return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy))";
+ case ValidTextureOverload::kSampleCompareDepth2dF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref, offset))";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref, offset))";
+ case ValidTextureOverload::kSampleCompareDepthCubeF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref, offset))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref, offset))";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
+ return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
+ return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
+ case ValidTextureOverload::kLoad1dLevelF32:
+ case ValidTextureOverload::kLoad1dLevelU32:
+ case ValidTextureOverload::kLoad1dLevelI32:
+ case ValidTextureOverload::kLoad2dLevelF32:
+ case ValidTextureOverload::kLoad2dLevelU32:
+ case ValidTextureOverload::kLoad2dLevelI32:
+ return R"(textureLoad(texture, coords, level))";
+ case ValidTextureOverload::kLoad2dArrayLevelF32:
+ case ValidTextureOverload::kLoad2dArrayLevelU32:
+ case ValidTextureOverload::kLoad2dArrayLevelI32:
+ return R"(textureLoad(texture, coords, array_index, level))";
+ case ValidTextureOverload::kLoad3dLevelF32:
+ case ValidTextureOverload::kLoad3dLevelU32:
+ case ValidTextureOverload::kLoad3dLevelI32:
+ case ValidTextureOverload::kLoadDepth2dLevelF32:
+ return R"(textureLoad(texture, coords, level))";
+ case ValidTextureOverload::kLoadDepthMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dU32:
+ case ValidTextureOverload::kLoadMultisampled2dI32:
+ return R"(textureLoad(texture, coords, sample_index))";
+ case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
+ return R"(textureLoad(texture, coords, array_index, level))";
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return R"(textureStore(texture, coords, value))";
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ return R"(textureStore(texture, coords, array_index, value))";
}
- out << sem::str(param->Usage());
- first = false;
- }
- out << ")";
- return out.str();
-}
-
-const char* expected_texture_overload(
- ast::builtin::test::ValidTextureOverload overload) {
- using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
- switch (overload) {
- case ValidTextureOverload::kDimensions1d:
- case ValidTextureOverload::kDimensions2d:
- case ValidTextureOverload::kDimensions2dArray:
- case ValidTextureOverload::kDimensions3d:
- case ValidTextureOverload::kDimensionsCube:
- case ValidTextureOverload::kDimensionsCubeArray:
- case ValidTextureOverload::kDimensionsMultisampled2d:
- case ValidTextureOverload::kDimensionsDepth2d:
- case ValidTextureOverload::kDimensionsDepth2dArray:
- case ValidTextureOverload::kDimensionsDepthCube:
- case ValidTextureOverload::kDimensionsDepthCubeArray:
- case ValidTextureOverload::kDimensionsDepthMultisampled2d:
- case ValidTextureOverload::kDimensionsStorageWO1d:
- case ValidTextureOverload::kDimensionsStorageWO2d:
- case ValidTextureOverload::kDimensionsStorageWO2dArray:
- case ValidTextureOverload::kDimensionsStorageWO3d:
- return R"(textureDimensions(texture))";
- case ValidTextureOverload::kGather2dF32:
- return R"(textureGather(component, texture, sampler, coords))";
- case ValidTextureOverload::kGather2dOffsetF32:
- return R"(textureGather(component, texture, sampler, coords, offset))";
- case ValidTextureOverload::kGather2dArrayF32:
- return R"(textureGather(component, texture, sampler, coords, array_index))";
- case ValidTextureOverload::kGather2dArrayOffsetF32:
- return R"(textureGather(component, texture, sampler, coords, array_index, offset))";
- case ValidTextureOverload::kGatherCubeF32:
- return R"(textureGather(component, texture, sampler, coords))";
- case ValidTextureOverload::kGatherCubeArrayF32:
- return R"(textureGather(component, texture, sampler, coords, array_index))";
- case ValidTextureOverload::kGatherDepth2dF32:
- return R"(textureGather(texture, sampler, coords))";
- case ValidTextureOverload::kGatherDepth2dOffsetF32:
- return R"(textureGather(texture, sampler, coords, offset))";
- case ValidTextureOverload::kGatherDepth2dArrayF32:
- return R"(textureGather(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
- return R"(textureGather(texture, sampler, coords, array_index, offset))";
- case ValidTextureOverload::kGatherDepthCubeF32:
- return R"(textureGather(texture, sampler, coords))";
- case ValidTextureOverload::kGatherDepthCubeArrayF32:
- return R"(textureGather(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kGatherCompareDepth2dF32:
- return R"(textureGatherCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
- return R"(textureGatherCompare(texture, sampler, coords, depth_ref, offset))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
- return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
- return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref, offset))";
- case ValidTextureOverload::kGatherCompareDepthCubeF32:
- return R"(textureGatherCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
- return R"(textureGatherCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kNumLayers2dArray:
- case ValidTextureOverload::kNumLayersCubeArray:
- case ValidTextureOverload::kNumLayersDepth2dArray:
- case ValidTextureOverload::kNumLayersDepthCubeArray:
- case ValidTextureOverload::kNumLayersStorageWO2dArray:
- return R"(textureNumLayers(texture))";
- case ValidTextureOverload::kNumLevels2d:
- case ValidTextureOverload::kNumLevels2dArray:
- case ValidTextureOverload::kNumLevels3d:
- case ValidTextureOverload::kNumLevelsCube:
- case ValidTextureOverload::kNumLevelsCubeArray:
- case ValidTextureOverload::kNumLevelsDepth2d:
- case ValidTextureOverload::kNumLevelsDepth2dArray:
- case ValidTextureOverload::kNumLevelsDepthCube:
- case ValidTextureOverload::kNumLevelsDepthCubeArray:
- return R"(textureNumLevels(texture))";
- case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
- case ValidTextureOverload::kNumSamplesMultisampled2d:
- return R"(textureNumSamples(texture))";
- case ValidTextureOverload::kDimensions2dLevel:
- case ValidTextureOverload::kDimensions2dArrayLevel:
- case ValidTextureOverload::kDimensions3dLevel:
- case ValidTextureOverload::kDimensionsCubeLevel:
- case ValidTextureOverload::kDimensionsCubeArrayLevel:
- case ValidTextureOverload::kDimensionsDepth2dLevel:
- case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
- case ValidTextureOverload::kDimensionsDepthCubeLevel:
- case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
- return R"(textureDimensions(texture, level))";
- case ValidTextureOverload::kSample1dF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSample2dF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSample2dOffsetF32:
- return R"(textureSample(texture, sampler, coords, offset))";
- case ValidTextureOverload::kSample2dArrayF32:
- return R"(textureSample(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kSample2dArrayOffsetF32:
- return R"(textureSample(texture, sampler, coords, array_index, offset))";
- case ValidTextureOverload::kSample3dF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSample3dOffsetF32:
- return R"(textureSample(texture, sampler, coords, offset))";
- case ValidTextureOverload::kSampleCubeF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSampleCubeArrayF32:
- return R"(textureSample(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kSampleDepth2dF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSampleDepth2dOffsetF32:
- return R"(textureSample(texture, sampler, coords, offset))";
- case ValidTextureOverload::kSampleDepth2dArrayF32:
- return R"(textureSample(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
- return R"(textureSample(texture, sampler, coords, array_index, offset))";
- case ValidTextureOverload::kSampleDepthCubeF32:
- return R"(textureSample(texture, sampler, coords))";
- case ValidTextureOverload::kSampleDepthCubeArrayF32:
- return R"(textureSample(texture, sampler, coords, array_index))";
- case ValidTextureOverload::kSampleBias2dF32:
- return R"(textureSampleBias(texture, sampler, coords, bias))";
- case ValidTextureOverload::kSampleBias2dOffsetF32:
- return R"(textureSampleBias(texture, sampler, coords, bias, offset))";
- case ValidTextureOverload::kSampleBias2dArrayF32:
- return R"(textureSampleBias(texture, sampler, coords, array_index, bias))";
- case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
- return R"(textureSampleBias(texture, sampler, coords, array_index, bias, offset))";
- case ValidTextureOverload::kSampleBias3dF32:
- return R"(textureSampleBias(texture, sampler, coords, bias))";
- case ValidTextureOverload::kSampleBias3dOffsetF32:
- return R"(textureSampleBias(texture, sampler, coords, bias, offset))";
- case ValidTextureOverload::kSampleBiasCubeF32:
- return R"(textureSampleBias(texture, sampler, coords, bias))";
- case ValidTextureOverload::kSampleBiasCubeArrayF32:
- return R"(textureSampleBias(texture, sampler, coords, array_index, bias))";
- case ValidTextureOverload::kSampleLevel2dF32:
- return R"(textureSampleLevel(texture, sampler, coords, level))";
- case ValidTextureOverload::kSampleLevel2dOffsetF32:
- return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
- case ValidTextureOverload::kSampleLevel2dArrayF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
- case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level, offset))";
- case ValidTextureOverload::kSampleLevel3dF32:
- return R"(textureSampleLevel(texture, sampler, coords, level))";
- case ValidTextureOverload::kSampleLevel3dOffsetF32:
- return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
- case ValidTextureOverload::kSampleLevelCubeF32:
- return R"(textureSampleLevel(texture, sampler, coords, level))";
- case ValidTextureOverload::kSampleLevelCubeArrayF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
- case ValidTextureOverload::kSampleLevelDepth2dF32:
- return R"(textureSampleLevel(texture, sampler, coords, level))";
- case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
- return R"(textureSampleLevel(texture, sampler, coords, level, offset))";
- case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
- case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level, offset))";
- case ValidTextureOverload::kSampleLevelDepthCubeF32:
- return R"(textureSampleLevel(texture, sampler, coords, level))";
- case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
- return R"(textureSampleLevel(texture, sampler, coords, array_index, level))";
- case ValidTextureOverload::kSampleGrad2dF32:
- return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
- case ValidTextureOverload::kSampleGrad2dOffsetF32:
- return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy, offset))";
- case ValidTextureOverload::kSampleGrad2dArrayF32:
- return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy))";
- case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
- return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy, offset))";
- case ValidTextureOverload::kSampleGrad3dF32:
- return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
- case ValidTextureOverload::kSampleGrad3dOffsetF32:
- return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy, offset))";
- case ValidTextureOverload::kSampleGradCubeF32:
- return R"(textureSampleGrad(texture, sampler, coords, ddx, ddy))";
- case ValidTextureOverload::kSampleGradCubeArrayF32:
- return R"(textureSampleGrad(texture, sampler, coords, array_index, ddx, ddy))";
- case ValidTextureOverload::kSampleCompareDepth2dF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref, offset))";
- case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref, offset))";
- case ValidTextureOverload::kSampleCompareDepthCubeF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref, offset))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref, offset))";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
- return R"(textureSampleCompare(texture, sampler, coords, depth_ref))";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
- return R"(textureSampleCompare(texture, sampler, coords, array_index, depth_ref))";
- case ValidTextureOverload::kLoad1dLevelF32:
- case ValidTextureOverload::kLoad1dLevelU32:
- case ValidTextureOverload::kLoad1dLevelI32:
- case ValidTextureOverload::kLoad2dLevelF32:
- case ValidTextureOverload::kLoad2dLevelU32:
- case ValidTextureOverload::kLoad2dLevelI32:
- return R"(textureLoad(texture, coords, level))";
- case ValidTextureOverload::kLoad2dArrayLevelF32:
- case ValidTextureOverload::kLoad2dArrayLevelU32:
- case ValidTextureOverload::kLoad2dArrayLevelI32:
- return R"(textureLoad(texture, coords, array_index, level))";
- case ValidTextureOverload::kLoad3dLevelF32:
- case ValidTextureOverload::kLoad3dLevelU32:
- case ValidTextureOverload::kLoad3dLevelI32:
- case ValidTextureOverload::kLoadDepth2dLevelF32:
- return R"(textureLoad(texture, coords, level))";
- case ValidTextureOverload::kLoadDepthMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dU32:
- case ValidTextureOverload::kLoadMultisampled2dI32:
- return R"(textureLoad(texture, coords, sample_index))";
- case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
- return R"(textureLoad(texture, coords, array_index, level))";
- case ValidTextureOverload::kStoreWO1dRgba32float:
- case ValidTextureOverload::kStoreWO2dRgba32float:
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return R"(textureStore(texture, coords, value))";
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- return R"(textureStore(texture, coords, array_index, value))";
- }
- return "<unmatched texture overload>";
+ return "<unmatched texture overload>";
}
TEST_P(ResolverBuiltinTest_Texture, Call) {
- auto param = GetParam();
-
- param.BuildTextureVariable(this);
- param.BuildSamplerVariable(this);
-
- auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
- Func("func", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- if (std::string(param.function) == "textureDimensions") {
- switch (param.texture_dimension) {
- default:
- FAIL() << "invalid texture dimensions: " << param.texture_dimension;
- case ast::TextureDimension::k1d:
+ auto param = GetParam();
+
+ param.BuildTextureVariable(this);
+ param.BuildSamplerVariable(this);
+
+ auto* call = Call(param.function, param.args(this));
+ auto* stmt = CallStmt(call);
+ Func("func", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ if (std::string(param.function) == "textureDimensions") {
+ switch (param.texture_dimension) {
+ default:
+ FAIL() << "invalid texture dimensions: " << param.texture_dimension;
+ case ast::TextureDimension::k1d:
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ break;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::kCube:
+ case ast::TextureDimension::kCubeArray: {
+ auto* vec = As<sem::Vector>(TypeOf(call));
+ ASSERT_NE(vec, nullptr);
+ EXPECT_EQ(vec->Width(), 2u);
+ EXPECT_TRUE(vec->type()->Is<sem::I32>());
+ break;
+ }
+ case ast::TextureDimension::k3d: {
+ auto* vec = As<sem::Vector>(TypeOf(call));
+ ASSERT_NE(vec, nullptr);
+ EXPECT_EQ(vec->Width(), 3u);
+ EXPECT_TRUE(vec->type()->Is<sem::I32>());
+ break;
+ }
+ }
+ } else if (std::string(param.function) == "textureNumLayers") {
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ } else if (std::string(param.function) == "textureNumLevels") {
EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
- break;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::kCube:
- case ast::TextureDimension::kCubeArray: {
+ } else if (std::string(param.function) == "textureNumSamples") {
+ EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
+ } else if (std::string(param.function) == "textureStore") {
+ EXPECT_TRUE(TypeOf(call)->Is<sem::Void>());
+ } else if (std::string(param.function) == "textureGather") {
auto* vec = As<sem::Vector>(TypeOf(call));
ASSERT_NE(vec, nullptr);
- EXPECT_EQ(vec->Width(), 2u);
- EXPECT_TRUE(vec->type()->Is<sem::I32>());
- break;
- }
- case ast::TextureDimension::k3d: {
+ EXPECT_EQ(vec->Width(), 4u);
+ switch (param.texture_data_type) {
+ case ast::builtin::test::TextureDataType::kF32:
+ EXPECT_TRUE(vec->type()->Is<sem::F32>());
+ break;
+ case ast::builtin::test::TextureDataType::kU32:
+ EXPECT_TRUE(vec->type()->Is<sem::U32>());
+ break;
+ case ast::builtin::test::TextureDataType::kI32:
+ EXPECT_TRUE(vec->type()->Is<sem::I32>());
+ break;
+ }
+ } else if (std::string(param.function) == "textureGatherCompare") {
auto* vec = As<sem::Vector>(TypeOf(call));
ASSERT_NE(vec, nullptr);
- EXPECT_EQ(vec->Width(), 3u);
- EXPECT_TRUE(vec->type()->Is<sem::I32>());
- break;
- }
- }
- } else if (std::string(param.function) == "textureNumLayers") {
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
- } else if (std::string(param.function) == "textureNumLevels") {
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
- } else if (std::string(param.function) == "textureNumSamples") {
- EXPECT_TRUE(TypeOf(call)->Is<sem::I32>());
- } else if (std::string(param.function) == "textureStore") {
- EXPECT_TRUE(TypeOf(call)->Is<sem::Void>());
- } else if (std::string(param.function) == "textureGather") {
- auto* vec = As<sem::Vector>(TypeOf(call));
- ASSERT_NE(vec, nullptr);
- EXPECT_EQ(vec->Width(), 4u);
- switch (param.texture_data_type) {
- case ast::builtin::test::TextureDataType::kF32:
+ EXPECT_EQ(vec->Width(), 4u);
EXPECT_TRUE(vec->type()->Is<sem::F32>());
- break;
- case ast::builtin::test::TextureDataType::kU32:
- EXPECT_TRUE(vec->type()->Is<sem::U32>());
- break;
- case ast::builtin::test::TextureDataType::kI32:
- EXPECT_TRUE(vec->type()->Is<sem::I32>());
- break;
- }
- } else if (std::string(param.function) == "textureGatherCompare") {
- auto* vec = As<sem::Vector>(TypeOf(call));
- ASSERT_NE(vec, nullptr);
- EXPECT_EQ(vec->Width(), 4u);
- EXPECT_TRUE(vec->type()->Is<sem::F32>());
- } else {
- switch (param.texture_kind) {
- case ast::builtin::test::TextureKind::kRegular:
- case ast::builtin::test::TextureKind::kMultisampled:
- case ast::builtin::test::TextureKind::kStorage: {
- auto* vec = TypeOf(call)->As<sem::Vector>();
- ASSERT_NE(vec, nullptr);
- switch (param.texture_data_type) {
- case ast::builtin::test::TextureDataType::kF32:
- EXPECT_TRUE(vec->type()->Is<sem::F32>());
- break;
- case ast::builtin::test::TextureDataType::kU32:
- EXPECT_TRUE(vec->type()->Is<sem::U32>());
- break;
- case ast::builtin::test::TextureDataType::kI32:
- EXPECT_TRUE(vec->type()->Is<sem::I32>());
- break;
+ } else {
+ switch (param.texture_kind) {
+ case ast::builtin::test::TextureKind::kRegular:
+ case ast::builtin::test::TextureKind::kMultisampled:
+ case ast::builtin::test::TextureKind::kStorage: {
+ auto* vec = TypeOf(call)->As<sem::Vector>();
+ ASSERT_NE(vec, nullptr);
+ switch (param.texture_data_type) {
+ case ast::builtin::test::TextureDataType::kF32:
+ EXPECT_TRUE(vec->type()->Is<sem::F32>());
+ break;
+ case ast::builtin::test::TextureDataType::kU32:
+ EXPECT_TRUE(vec->type()->Is<sem::U32>());
+ break;
+ case ast::builtin::test::TextureDataType::kI32:
+ EXPECT_TRUE(vec->type()->Is<sem::I32>());
+ break;
+ }
+ break;
+ }
+ case ast::builtin::test::TextureKind::kDepth:
+ case ast::builtin::test::TextureKind::kDepthMultisampled: {
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ break;
+ }
}
- break;
- }
- case ast::builtin::test::TextureKind::kDepth:
- case ast::builtin::test::TextureKind::kDepthMultisampled: {
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
- break;
- }
}
- }
- auto* call_sem = Sem().Get(call);
- ASSERT_NE(call_sem, nullptr);
- auto* target = call_sem->Target();
- ASSERT_NE(target, nullptr);
+ auto* call_sem = Sem().Get<sem::Call>(call);
+ ASSERT_NE(call_sem, nullptr);
+ auto* target = call_sem->Target();
+ ASSERT_NE(target, nullptr);
- auto got = resolver::to_str(param.function, target->Parameters());
- auto* expected = expected_texture_overload(param.overload);
- EXPECT_EQ(got, expected);
+ auto got = resolver::to_str(param.function, target->Parameters());
+ auto* expected = expected_texture_overload(param.overload);
+ EXPECT_EQ(got, expected);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/builtin_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/builtin_validation_test.cc
index df6a0a93b5d..ab40296c68f 100644
--- a/chromium/third_party/dawn/src/tint/resolver/builtin_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/builtin_validation_test.cc
@@ -15,58 +15,56 @@
#include "src/tint/ast/builtin_texture_helper_test.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverBuiltinValidationTest = ResolverTest;
-TEST_F(ResolverBuiltinValidationTest,
- FunctionTypeMustMatchReturnStatementType_void_fail) {
- // fn func { return workgroupBarrier(); }
- Func("func", {}, ty.void_(),
- {
- Return(Call(Source{Source::Location{12, 34}}, "workgroupBarrier")),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin 'workgroupBarrier' does not return a value");
+TEST_F(ResolverBuiltinValidationTest, FunctionTypeMustMatchReturnStatementType_void_fail) {
+ // fn func { return workgroupBarrier(); }
+ Func("func", {}, ty.void_(),
+ {
+ Return(Call(Source{Source::Location{12, 34}}, "workgroupBarrier")),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: builtin 'workgroupBarrier' does not return a value");
}
TEST_F(ResolverBuiltinValidationTest, InvalidPipelineStageDirect) {
- // @stage(compute) @workgroup_size(1) fn func { return dpdx(1.0); }
+ // @compute @workgroup_size(1) fn func { return dpdx(1.0); }
- auto* dpdx = create<ast::CallExpression>(Source{{3, 4}}, Expr("dpdx"),
- ast::ExpressionList{Expr(1.0f)});
- Func(Source{{1, 2}}, "func", ast::VariableList{}, ty.void_(),
- {CallStmt(dpdx)},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ auto* dpdx =
+ create<ast::CallExpression>(Source{{3, 4}}, Expr("dpdx"), ast::ExpressionList{Expr(1_f)});
+ Func(Source{{1, 2}}, "func", ast::VariableList{}, ty.void_(), {CallStmt(dpdx)},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "3:4 error: built-in cannot be used by compute pipeline stage");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:4 error: built-in cannot be used by compute pipeline stage");
}
TEST_F(ResolverBuiltinValidationTest, InvalidPipelineStageIndirect) {
- // fn f0 { return dpdx(1.0); }
- // fn f1 { f0(); }
- // fn f2 { f1(); }
- // @stage(compute) @workgroup_size(1) fn main { return f2(); }
+ // fn f0 { return dpdx(1.0); }
+ // fn f1 { f0(); }
+ // fn f2 { f1(); }
+ // @compute @workgroup_size(1) fn main { return f2(); }
- auto* dpdx = create<ast::CallExpression>(Source{{3, 4}}, Expr("dpdx"),
- ast::ExpressionList{Expr(1.0f)});
- Func(Source{{1, 2}}, "f0", {}, ty.void_(), {CallStmt(dpdx)});
+ auto* dpdx =
+ create<ast::CallExpression>(Source{{3, 4}}, Expr("dpdx"), ast::ExpressionList{Expr(1_f)});
+ Func(Source{{1, 2}}, "f0", {}, ty.void_(), {CallStmt(dpdx)});
- Func(Source{{3, 4}}, "f1", {}, ty.void_(), {CallStmt(Call("f0"))});
+ Func(Source{{3, 4}}, "f1", {}, ty.void_(), {CallStmt(Call("f0"))});
- Func(Source{{5, 6}}, "f2", {}, ty.void_(), {CallStmt(Call("f1"))});
+ Func(Source{{5, 6}}, "f2", {}, ty.void_(), {CallStmt(Call("f1"))});
- Func(Source{{7, 8}}, "main", {}, ty.void_(), {CallStmt(Call("f2"))},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Func(Source{{7, 8}}, "main", {}, ty.void_(), {CallStmt(Call("f2"))},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(3:4 error: built-in cannot be used by compute pipeline stage
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:4 error: built-in cannot be used by compute pipeline stage
1:2 note: called by function 'f0'
3:4 note: called by function 'f1'
5:6 note: called by function 'f2'
@@ -74,49 +72,43 @@ TEST_F(ResolverBuiltinValidationTest, InvalidPipelineStageIndirect) {
}
TEST_F(ResolverBuiltinValidationTest, BuiltinRedeclaredAsFunction) {
- Func(Source{{12, 34}}, "mix", {}, ty.i32(), {});
+ Func(Source{{12, 34}}, "mix", {}, ty.i32(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a function)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a function)");
}
TEST_F(ResolverBuiltinValidationTest, BuiltinRedeclaredAsGlobalLet) {
- GlobalConst(Source{{12, 34}}, "mix", ty.i32(), Expr(1));
+ GlobalConst(Source{{12, 34}}, "mix", ty.i32(), Expr(1_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a module-scope let)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a module-scope let)");
}
TEST_F(ResolverBuiltinValidationTest, BuiltinRedeclaredAsGlobalVar) {
- Global(Source{{12, 34}}, "mix", ty.i32(), Expr(1),
- ast::StorageClass::kPrivate);
+ Global(Source{{12, 34}}, "mix", ty.i32(), Expr(1_i), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a module-scope var)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a module-scope var)");
}
TEST_F(ResolverBuiltinValidationTest, BuiltinRedeclaredAsAlias) {
- Alias(Source{{12, 34}}, "mix", ty.i32());
+ Alias(Source{{12, 34}}, "mix", ty.i32());
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: 'mix' is a builtin and cannot be redeclared as an alias)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: 'mix' is a builtin and cannot be redeclared as an alias)");
}
TEST_F(ResolverBuiltinValidationTest, BuiltinRedeclaredAsStruct) {
- Structure(Source{{12, 34}}, "mix", {Member("m", ty.i32())});
+ Structure(Source{{12, 34}}, "mix", {Member("m", ty.i32())});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a struct)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: 'mix' is a builtin and cannot be redeclared as a struct)");
}
namespace texture_constexpr_args {
@@ -125,276 +117,319 @@ using TextureOverloadCase = ast::builtin::test::TextureOverloadCase;
using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
using TextureKind = ast::builtin::test::TextureKind;
using TextureDataType = ast::builtin::test::TextureDataType;
-using u32 = ProgramBuilder::u32;
-using i32 = ProgramBuilder::i32;
-using f32 = ProgramBuilder::f32;
static std::vector<TextureOverloadCase> TextureCases(
std::unordered_set<ValidTextureOverload> overloads) {
- std::vector<TextureOverloadCase> cases;
- for (auto c : TextureOverloadCase::ValidCases()) {
- if (overloads.count(c.overload)) {
- cases.push_back(c);
+ std::vector<TextureOverloadCase> cases;
+ for (auto c : TextureOverloadCase::ValidCases()) {
+ if (overloads.count(c.overload)) {
+ cases.push_back(c);
+ }
}
- }
- return cases;
+ return cases;
}
enum class Position {
- kFirst,
- kLast,
+ kFirst,
+ kLast,
};
struct Parameter {
- const char* const name;
- const Position position;
- int min;
- int max;
+ const char* const name;
+ const Position position;
+ int min;
+ int max;
};
class Constexpr {
- public:
- enum class Kind {
- kScalar,
- kVec2,
- kVec3,
- kVec3_Scalar_Vec2,
- kVec3_Vec2_Scalar,
- kEmptyVec2,
- kEmptyVec3,
- };
-
- Constexpr(int32_t invalid_idx,
- Kind k,
- int32_t x = 0,
- int32_t y = 0,
- int32_t z = 0)
- : invalid_index(invalid_idx), kind(k), values{x, y, z} {}
-
- const ast::Expression* operator()(Source src, ProgramBuilder& b) {
- switch (kind) {
- case Kind::kScalar:
- return b.Expr(src, values[0]);
- case Kind::kVec2:
- return b.Construct(src, b.ty.vec2<i32>(), values[0], values[1]);
- case Kind::kVec3:
- return b.Construct(src, b.ty.vec3<i32>(), values[0], values[1],
- values[2]);
- case Kind::kVec3_Scalar_Vec2:
- return b.Construct(src, b.ty.vec3<i32>(), values[0],
- b.vec2<i32>(values[1], values[2]));
- case Kind::kVec3_Vec2_Scalar:
- return b.Construct(src, b.ty.vec3<i32>(),
- b.vec2<i32>(values[0], values[1]), values[2]);
- case Kind::kEmptyVec2:
- return b.Construct(src, b.ty.vec2<i32>());
- case Kind::kEmptyVec3:
- return b.Construct(src, b.ty.vec3<i32>());
+ public:
+ enum class Kind {
+ kScalar,
+ kVec2,
+ kVec3,
+ kVec3_Scalar_Vec2,
+ kVec3_Vec2_Scalar,
+ kEmptyVec2,
+ kEmptyVec3,
+ };
+
+ Constexpr(int32_t invalid_idx, Kind k, int32_t x = 0, int32_t y = 0, int32_t z = 0)
+ : invalid_index(invalid_idx), kind(k), values{x, y, z} {}
+
+ const ast::Expression* operator()(Source src, ProgramBuilder& b) {
+ switch (kind) {
+ case Kind::kScalar:
+ return b.Expr(src, i32(values[0]));
+ case Kind::kVec2:
+ return b.Construct(src, b.ty.vec2<i32>(), i32(values[0]), i32(values[1]));
+ case Kind::kVec3:
+ return b.Construct(src, b.ty.vec3<i32>(), i32(values[0]), i32(values[1]),
+ i32(values[2]));
+ case Kind::kVec3_Scalar_Vec2:
+ return b.Construct(src, b.ty.vec3<i32>(), i32(values[0]),
+ b.vec2<i32>(i32(values[1]), i32(values[2])));
+ case Kind::kVec3_Vec2_Scalar:
+ return b.Construct(src, b.ty.vec3<i32>(),
+ b.vec2<i32>(i32(values[0]), i32(values[1])), i32(values[2]));
+ case Kind::kEmptyVec2:
+ return b.Construct(src, b.ty.vec2<i32>());
+ case Kind::kEmptyVec3:
+ return b.Construct(src, b.ty.vec3<i32>());
+ }
+ return nullptr;
}
- return nullptr;
- }
- static const constexpr int32_t kValid = -1;
- const int32_t invalid_index; // Expected error value, or kValid
- const Kind kind;
- const std::array<int32_t, 3> values;
+ static const constexpr int32_t kValid = -1;
+ const int32_t invalid_index; // Expected error value, or kValid
+ const Kind kind;
+ const std::array<int32_t, 3> values;
};
static std::ostream& operator<<(std::ostream& out, Parameter param) {
- return out << param.name;
+ return out << param.name;
}
static std::ostream& operator<<(std::ostream& out, Constexpr expr) {
- switch (expr.kind) {
- case Constexpr::Kind::kScalar:
- return out << expr.values[0];
- case Constexpr::Kind::kVec2:
- return out << "vec2(" << expr.values[0] << ", " << expr.values[1] << ")";
- case Constexpr::Kind::kVec3:
- return out << "vec3(" << expr.values[0] << ", " << expr.values[1] << ", "
- << expr.values[2] << ")";
- case Constexpr::Kind::kVec3_Scalar_Vec2:
- return out << "vec3(" << expr.values[0] << ", vec2(" << expr.values[1]
- << ", " << expr.values[2] << "))";
- case Constexpr::Kind::kVec3_Vec2_Scalar:
- return out << "vec3(vec2(" << expr.values[0] << ", " << expr.values[1]
- << "), " << expr.values[2] << ")";
- case Constexpr::Kind::kEmptyVec2:
- return out << "vec2()";
- case Constexpr::Kind::kEmptyVec3:
- return out << "vec3()";
- }
- return out;
+ switch (expr.kind) {
+ case Constexpr::Kind::kScalar:
+ return out << expr.values[0];
+ case Constexpr::Kind::kVec2:
+ return out << "vec2(" << expr.values[0] << ", " << expr.values[1] << ")";
+ case Constexpr::Kind::kVec3:
+ return out << "vec3(" << expr.values[0] << ", " << expr.values[1] << ", "
+ << expr.values[2] << ")";
+ case Constexpr::Kind::kVec3_Scalar_Vec2:
+ return out << "vec3(" << expr.values[0] << ", vec2(" << expr.values[1] << ", "
+ << expr.values[2] << "))";
+ case Constexpr::Kind::kVec3_Vec2_Scalar:
+ return out << "vec3(vec2(" << expr.values[0] << ", " << expr.values[1] << "), "
+ << expr.values[2] << ")";
+ case Constexpr::Kind::kEmptyVec2:
+ return out << "vec2()";
+ case Constexpr::Kind::kEmptyVec3:
+ return out << "vec3()";
+ }
+ return out;
}
-using BuiltinTextureConstExprArgValidationTest = ResolverTestWithParam<
- std::tuple<TextureOverloadCase, Parameter, Constexpr>>;
+using BuiltinTextureConstExprArgValidationTest =
+ ResolverTestWithParam<std::tuple<TextureOverloadCase, Parameter, Constexpr>>;
TEST_P(BuiltinTextureConstExprArgValidationTest, Immediate) {
- auto& p = GetParam();
- auto overload = std::get<0>(p);
- auto param = std::get<1>(p);
- auto expr = std::get<2>(p);
+ auto& p = GetParam();
+ auto overload = std::get<0>(p);
+ auto param = std::get<1>(p);
+ auto expr = std::get<2>(p);
- overload.BuildTextureVariable(this);
- overload.BuildSamplerVariable(this);
+ overload.BuildTextureVariable(this);
+ overload.BuildSamplerVariable(this);
- auto args = overload.args(this);
- auto*& arg_to_replace =
- (param.position == Position::kFirst) ? args.front() : args.back();
+ auto args = overload.args(this);
+ auto*& arg_to_replace = (param.position == Position::kFirst) ? args.front() : args.back();
- // BuildTextureVariable() uses a Literal for scalars, and a CallExpression for
- // a vector constructor.
- bool is_vector = arg_to_replace->Is<ast::CallExpression>();
+ // BuildTextureVariable() uses a Literal for scalars, and a CallExpression for
+ // a vector constructor.
+ bool is_vector = arg_to_replace->Is<ast::CallExpression>();
- // Make the expression to be replaced, reachable. This keeps the resolver
- // happy.
- WrapInFunction(arg_to_replace);
+ // Make the expression to be replaced, reachable. This keeps the resolver
+ // happy.
+ WrapInFunction(arg_to_replace);
- arg_to_replace = expr(Source{{12, 34}}, *this);
+ arg_to_replace = expr(Source{{12, 34}}, *this);
- // Call the builtin with the constexpr argument replaced
- Func("func", {}, ty.void_(), {CallStmt(Call(overload.function, args))},
- {Stage(ast::PipelineStage::kFragment)});
+ // Call the builtin with the constexpr argument replaced
+ Func("func", {}, ty.void_(), {CallStmt(Call(overload.function, args))},
+ {Stage(ast::PipelineStage::kFragment)});
- if (expr.invalid_index == Constexpr::kValid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- std::stringstream err;
- if (is_vector) {
- err << "12:34 error: each component of the " << param.name
- << " argument must be at least " << param.min << " and at most "
- << param.max << ". " << param.name << " component "
- << expr.invalid_index << " is "
- << std::to_string(expr.values[expr.invalid_index]);
+ if (expr.invalid_index == Constexpr::kValid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
} else {
- err << "12:34 error: the " << param.name << " argument must be at least "
- << param.min << " and at most " << param.max << ". " << param.name
- << " is " << std::to_string(expr.values[expr.invalid_index]);
+ EXPECT_FALSE(r()->Resolve());
+ std::stringstream err;
+ if (is_vector) {
+ err << "12:34 error: each component of the " << param.name
+ << " argument must be at least " << param.min << " and at most " << param.max
+ << ". " << param.name << " component " << expr.invalid_index << " is "
+ << std::to_string(expr.values[expr.invalid_index]);
+ } else {
+ err << "12:34 error: the " << param.name << " argument must be at least " << param.min
+ << " and at most " << param.max << ". " << param.name << " is "
+ << std::to_string(expr.values[expr.invalid_index]);
+ }
+ EXPECT_EQ(r()->error(), err.str());
}
- EXPECT_EQ(r()->error(), err.str());
- }
}
TEST_P(BuiltinTextureConstExprArgValidationTest, GlobalConst) {
- auto& p = GetParam();
- auto overload = std::get<0>(p);
- auto param = std::get<1>(p);
- auto expr = std::get<2>(p);
+ auto& p = GetParam();
+ auto overload = std::get<0>(p);
+ auto param = std::get<1>(p);
+ auto expr = std::get<2>(p);
- // Build the global texture and sampler variables
- overload.BuildTextureVariable(this);
- overload.BuildSamplerVariable(this);
+ // Build the global texture and sampler variables
+ overload.BuildTextureVariable(this);
+ overload.BuildSamplerVariable(this);
- // Build the module-scope let 'G' with the offset value
- GlobalConst("G", nullptr, expr({}, *this));
+ // Build the module-scope let 'G' with the offset value
+ GlobalConst("G", nullptr, expr({}, *this));
- auto args = overload.args(this);
- auto*& arg_to_replace =
- (param.position == Position::kFirst) ? args.front() : args.back();
+ auto args = overload.args(this);
+ auto*& arg_to_replace = (param.position == Position::kFirst) ? args.front() : args.back();
- // Make the expression to be replaced, reachable. This keeps the resolver
- // happy.
- WrapInFunction(arg_to_replace);
+ // Make the expression to be replaced, reachable. This keeps the resolver
+ // happy.
+ WrapInFunction(arg_to_replace);
- arg_to_replace = Expr(Source{{12, 34}}, "G");
+ arg_to_replace = Expr(Source{{12, 34}}, "G");
- // Call the builtin with the constexpr argument replaced
- Func("func", {}, ty.void_(), {CallStmt(Call(overload.function, args))},
- {Stage(ast::PipelineStage::kFragment)});
+ // Call the builtin with the constexpr argument replaced
+ Func("func", {}, ty.void_(), {CallStmt(Call(overload.function, args))},
+ {Stage(ast::PipelineStage::kFragment)});
- EXPECT_FALSE(r()->Resolve());
- std::stringstream err;
- err << "12:34 error: the " << param.name
- << " argument must be a const_expression";
- EXPECT_EQ(r()->error(), err.str());
+ EXPECT_FALSE(r()->Resolve());
+ std::stringstream err;
+ err << "12:34 error: the " << param.name << " argument must be a const_expression";
+ EXPECT_EQ(r()->error(), err.str());
}
INSTANTIATE_TEST_SUITE_P(
Offset2D,
BuiltinTextureConstExprArgValidationTest,
- testing::Combine(
- testing::ValuesIn(TextureCases({
- ValidTextureOverload::kSample2dOffsetF32,
- ValidTextureOverload::kSample2dArrayOffsetF32,
- ValidTextureOverload::kSampleDepth2dOffsetF32,
- ValidTextureOverload::kSampleDepth2dArrayOffsetF32,
- ValidTextureOverload::kSampleBias2dOffsetF32,
- ValidTextureOverload::kSampleBias2dArrayOffsetF32,
- ValidTextureOverload::kSampleLevel2dOffsetF32,
- ValidTextureOverload::kSampleLevel2dArrayOffsetF32,
- ValidTextureOverload::kSampleLevelDepth2dOffsetF32,
- ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32,
- ValidTextureOverload::kSampleGrad2dOffsetF32,
- ValidTextureOverload::kSampleGrad2dArrayOffsetF32,
- ValidTextureOverload::kSampleCompareDepth2dOffsetF32,
- ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32,
- ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32,
- ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32,
- })),
- testing::Values(Parameter{"offset", Position::kLast, -8, 7}),
- testing::Values(
- Constexpr{Constexpr::kValid, Constexpr::Kind::kEmptyVec2},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kVec2, -1, 1},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kVec2, 7, -8},
- Constexpr{0, Constexpr::Kind::kVec2, 8, 0},
- Constexpr{1, Constexpr::Kind::kVec2, 0, 8},
- Constexpr{0, Constexpr::Kind::kVec2, -9, 0},
- Constexpr{1, Constexpr::Kind::kVec2, 0, -9},
- Constexpr{0, Constexpr::Kind::kVec2, 8, 8},
- Constexpr{0, Constexpr::Kind::kVec2, -9, -9})));
+ testing::Combine(testing::ValuesIn(TextureCases({
+ ValidTextureOverload::kSample2dOffsetF32,
+ ValidTextureOverload::kSample2dArrayOffsetF32,
+ ValidTextureOverload::kSampleDepth2dOffsetF32,
+ ValidTextureOverload::kSampleDepth2dArrayOffsetF32,
+ ValidTextureOverload::kSampleBias2dOffsetF32,
+ ValidTextureOverload::kSampleBias2dArrayOffsetF32,
+ ValidTextureOverload::kSampleLevel2dOffsetF32,
+ ValidTextureOverload::kSampleLevel2dArrayOffsetF32,
+ ValidTextureOverload::kSampleLevelDepth2dOffsetF32,
+ ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32,
+ ValidTextureOverload::kSampleGrad2dOffsetF32,
+ ValidTextureOverload::kSampleGrad2dArrayOffsetF32,
+ ValidTextureOverload::kSampleCompareDepth2dOffsetF32,
+ ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32,
+ ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32,
+ ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32,
+ })),
+ testing::Values(Parameter{"offset", Position::kLast, -8, 7}),
+ testing::Values(Constexpr{Constexpr::kValid, Constexpr::Kind::kEmptyVec2},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kVec2, -1, 1},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kVec2, 7, -8},
+ Constexpr{0, Constexpr::Kind::kVec2, 8, 0},
+ Constexpr{1, Constexpr::Kind::kVec2, 0, 8},
+ Constexpr{0, Constexpr::Kind::kVec2, -9, 0},
+ Constexpr{1, Constexpr::Kind::kVec2, 0, -9},
+ Constexpr{0, Constexpr::Kind::kVec2, 8, 8},
+ Constexpr{0, Constexpr::Kind::kVec2, -9, -9})));
INSTANTIATE_TEST_SUITE_P(
Offset3D,
BuiltinTextureConstExprArgValidationTest,
- testing::Combine(
- testing::ValuesIn(TextureCases({
- ValidTextureOverload::kSample3dOffsetF32,
- ValidTextureOverload::kSampleBias3dOffsetF32,
- ValidTextureOverload::kSampleLevel3dOffsetF32,
- ValidTextureOverload::kSampleGrad3dOffsetF32,
- })),
- testing::Values(Parameter{"offset", Position::kLast, -8, 7}),
- testing::Values(
- Constexpr{Constexpr::kValid, Constexpr::Kind::kEmptyVec3},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kVec3, 0, 0, 0},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kVec3, 7, -8, 7},
- Constexpr{0, Constexpr::Kind::kVec3, 10, 0, 0},
- Constexpr{1, Constexpr::Kind::kVec3, 0, 10, 0},
- Constexpr{2, Constexpr::Kind::kVec3, 0, 0, 10},
- Constexpr{0, Constexpr::Kind::kVec3, 10, 11, 12},
- Constexpr{0, Constexpr::Kind::kVec3_Scalar_Vec2, 10, 0, 0},
- Constexpr{1, Constexpr::Kind::kVec3_Scalar_Vec2, 0, 10, 0},
- Constexpr{2, Constexpr::Kind::kVec3_Scalar_Vec2, 0, 0, 10},
- Constexpr{0, Constexpr::Kind::kVec3_Scalar_Vec2, 10, 11, 12},
- Constexpr{0, Constexpr::Kind::kVec3_Vec2_Scalar, 10, 0, 0},
- Constexpr{1, Constexpr::Kind::kVec3_Vec2_Scalar, 0, 10, 0},
- Constexpr{2, Constexpr::Kind::kVec3_Vec2_Scalar, 0, 0, 10},
- Constexpr{0, Constexpr::Kind::kVec3_Vec2_Scalar, 10, 11, 12})));
+ testing::Combine(testing::ValuesIn(TextureCases({
+ ValidTextureOverload::kSample3dOffsetF32,
+ ValidTextureOverload::kSampleBias3dOffsetF32,
+ ValidTextureOverload::kSampleLevel3dOffsetF32,
+ ValidTextureOverload::kSampleGrad3dOffsetF32,
+ })),
+ testing::Values(Parameter{"offset", Position::kLast, -8, 7}),
+ testing::Values(Constexpr{Constexpr::kValid, Constexpr::Kind::kEmptyVec3},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kVec3, 0, 0, 0},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kVec3, 7, -8, 7},
+ Constexpr{0, Constexpr::Kind::kVec3, 10, 0, 0},
+ Constexpr{1, Constexpr::Kind::kVec3, 0, 10, 0},
+ Constexpr{2, Constexpr::Kind::kVec3, 0, 0, 10},
+ Constexpr{0, Constexpr::Kind::kVec3, 10, 11, 12},
+ Constexpr{0, Constexpr::Kind::kVec3_Scalar_Vec2, 10, 0, 0},
+ Constexpr{1, Constexpr::Kind::kVec3_Scalar_Vec2, 0, 10, 0},
+ Constexpr{2, Constexpr::Kind::kVec3_Scalar_Vec2, 0, 0, 10},
+ Constexpr{0, Constexpr::Kind::kVec3_Scalar_Vec2, 10, 11, 12},
+ Constexpr{0, Constexpr::Kind::kVec3_Vec2_Scalar, 10, 0, 0},
+ Constexpr{1, Constexpr::Kind::kVec3_Vec2_Scalar, 0, 10, 0},
+ Constexpr{2, Constexpr::Kind::kVec3_Vec2_Scalar, 0, 0, 10},
+ Constexpr{0, Constexpr::Kind::kVec3_Vec2_Scalar, 10, 11,
+ 12})));
INSTANTIATE_TEST_SUITE_P(
Component,
BuiltinTextureConstExprArgValidationTest,
- testing::Combine(
- testing::ValuesIn(
- TextureCases({ValidTextureOverload::kGather2dF32,
- ValidTextureOverload::kGather2dOffsetF32,
- ValidTextureOverload::kGather2dArrayF32,
- ValidTextureOverload::kGather2dArrayOffsetF32,
- ValidTextureOverload::kGatherCubeF32,
- ValidTextureOverload::kGatherCubeArrayF32})),
- testing::Values(Parameter{"component", Position::kFirst, 0, 3}),
- testing::Values(
- Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 0},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 1},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 2},
- Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 3},
- Constexpr{0, Constexpr::Kind::kScalar, 4},
- Constexpr{0, Constexpr::Kind::kScalar, 123},
- Constexpr{0, Constexpr::Kind::kScalar, -1})));
+ testing::Combine(testing::ValuesIn(TextureCases({ValidTextureOverload::kGather2dF32,
+ ValidTextureOverload::kGather2dOffsetF32,
+ ValidTextureOverload::kGather2dArrayF32,
+ ValidTextureOverload::kGather2dArrayOffsetF32,
+ ValidTextureOverload::kGatherCubeF32,
+ ValidTextureOverload::kGatherCubeArrayF32})),
+ testing::Values(Parameter{"component", Position::kFirst, 0, 3}),
+ testing::Values(Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 0},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 1},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 2},
+ Constexpr{Constexpr::kValid, Constexpr::Kind::kScalar, 3},
+ Constexpr{0, Constexpr::Kind::kScalar, 4},
+ Constexpr{0, Constexpr::Kind::kScalar, 123},
+ Constexpr{0, Constexpr::Kind::kScalar, -1})));
} // namespace texture_constexpr_args
+// TODO(crbug.com/tint/1497): Update or remove ResolverDP4aExtensionValidationTest when the
+// experimental extension chromium_experimental_dp4a is not needed.
+using ResolverDP4aExtensionValidationTest = ResolverTest;
+
+TEST_F(ResolverDP4aExtensionValidationTest, Dot4I8PackedWithExtension) {
+ // enable chromium_experimental_dp4a;
+ // fn func { return dot4I8Packed(1u, 2u); }
+ Enable(ast::Extension::kChromiumExperimentalDP4a);
+
+ Func("func", {}, ty.i32(),
+ {
+ Return(Call(Source{Source::Location{12, 34}}, "dot4I8Packed",
+ ast::ExpressionList{Expr(1_u), Expr(2_u)})),
+ });
+
+ EXPECT_TRUE(r()->Resolve());
+}
+
+TEST_F(ResolverDP4aExtensionValidationTest, Dot4I8PackedWithoutExtension) {
+ // fn func { return dot4I8Packed(1u, 2u); }
+ Func("func", {}, ty.i32(),
+ {
+ Return(Call(Source{Source::Location{12, 34}}, "dot4I8Packed",
+ ast::ExpressionList{Expr(1_u), Expr(2_u)})),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: cannot call built-in function 'dot4I8Packed' without extension chromium_experimental_dp4a)");
+}
+
+TEST_F(ResolverDP4aExtensionValidationTest, Dot4U8PackedWithExtension) {
+ // enable chromium_experimental_dp4a;
+ // fn func { return dot4U8Packed(1u, 2u); }
+ Enable(ast::Extension::kChromiumExperimentalDP4a);
+
+ Func("func", {}, ty.u32(),
+ {
+ Return(Call(Source{Source::Location{12, 34}}, "dot4U8Packed",
+ ast::ExpressionList{Expr(1_u), Expr(2_u)})),
+ });
+
+ EXPECT_TRUE(r()->Resolve());
+}
+
+TEST_F(ResolverDP4aExtensionValidationTest, Dot4U8PackedWithoutExtension) {
+ // fn func { return dot4U8Packed(1u, 2u); }
+ Func("func", {}, ty.u32(),
+ {
+ Return(Call(Source{Source::Location{12, 34}}, "dot4U8Packed",
+ ast::ExpressionList{Expr(1_u), Expr(2_u)})),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: cannot call built-in function 'dot4U8Packed' without extension chromium_experimental_dp4a)");
+}
+
} // namespace
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/builtins_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/builtins_validation_test.cc
index 4defd2c33a2..0c5948595e5 100644
--- a/chromium/third_party/dawn/src/tint/resolver/builtins_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/builtins_validation_test.cc
@@ -15,6 +15,8 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -26,1011 +28,871 @@ template <typename T>
using vec3 = builder::vec3<T>;
template <typename T>
using vec4 = builder::vec4<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
-class ResolverBuiltinsValidationTest : public resolver::TestHelper,
- public testing::Test {};
+class ResolverBuiltinsValidationTest : public resolver::TestHelper, public testing::Test {};
namespace StageTest {
struct Params {
- builder::ast_type_func_ptr type;
- ast::Builtin builtin;
- ast::PipelineStage stage;
- bool is_valid;
+ builder::ast_type_func_ptr type;
+ ast::Builtin builtin;
+ ast::PipelineStage stage;
+ bool is_valid;
};
template <typename T>
-constexpr Params ParamsFor(ast::Builtin builtin,
- ast::PipelineStage stage,
- bool is_valid) {
- return Params{DataType<T>::AST, builtin, stage, is_valid};
+constexpr Params ParamsFor(ast::Builtin builtin, ast::PipelineStage stage, bool is_valid) {
+ return Params{DataType<T>::AST, builtin, stage, is_valid};
}
static constexpr Params cases[] = {
- ParamsFor<vec4<f32>>(ast::Builtin::kPosition,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<vec4<f32>>(ast::Builtin::kPosition,
- ast::PipelineStage::kFragment,
- true),
- ParamsFor<vec4<f32>>(ast::Builtin::kPosition,
- ast::PipelineStage::kCompute,
- false),
-
- ParamsFor<u32>(ast::Builtin::kVertexIndex,
- ast::PipelineStage::kVertex,
- true),
- ParamsFor<u32>(ast::Builtin::kVertexIndex,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<u32>(ast::Builtin::kVertexIndex,
- ast::PipelineStage::kCompute,
- false),
-
- ParamsFor<u32>(ast::Builtin::kInstanceIndex,
- ast::PipelineStage::kVertex,
- true),
- ParamsFor<u32>(ast::Builtin::kInstanceIndex,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<u32>(ast::Builtin::kInstanceIndex,
- ast::PipelineStage::kCompute,
- false),
-
- ParamsFor<bool>(ast::Builtin::kFrontFacing,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<bool>(ast::Builtin::kFrontFacing,
- ast::PipelineStage::kFragment,
- true),
- ParamsFor<bool>(ast::Builtin::kFrontFacing,
- ast::PipelineStage::kCompute,
- false),
-
- ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId,
- ast::PipelineStage::kCompute,
- true),
-
- ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex,
- ast::PipelineStage::kCompute,
- true),
-
- ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId,
- ast::PipelineStage::kCompute,
- true),
-
- ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId,
- ast::PipelineStage::kCompute,
- true),
-
- ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups,
- ast::PipelineStage::kFragment,
- false),
- ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups,
- ast::PipelineStage::kCompute,
- true),
-
- ParamsFor<u32>(ast::Builtin::kSampleIndex,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<u32>(ast::Builtin::kSampleIndex,
- ast::PipelineStage::kFragment,
- true),
- ParamsFor<u32>(ast::Builtin::kSampleIndex,
- ast::PipelineStage::kCompute,
- false),
-
- ParamsFor<u32>(ast::Builtin::kSampleMask,
- ast::PipelineStage::kVertex,
- false),
- ParamsFor<u32>(ast::Builtin::kSampleMask,
- ast::PipelineStage::kFragment,
- true),
- ParamsFor<u32>(ast::Builtin::kSampleMask,
- ast::PipelineStage::kCompute,
- false),
+ ParamsFor<vec4<f32>>(ast::Builtin::kPosition, ast::PipelineStage::kVertex, false),
+ ParamsFor<vec4<f32>>(ast::Builtin::kPosition, ast::PipelineStage::kFragment, true),
+ ParamsFor<vec4<f32>>(ast::Builtin::kPosition, ast::PipelineStage::kCompute, false),
+
+ ParamsFor<u32>(ast::Builtin::kVertexIndex, ast::PipelineStage::kVertex, true),
+ ParamsFor<u32>(ast::Builtin::kVertexIndex, ast::PipelineStage::kFragment, false),
+ ParamsFor<u32>(ast::Builtin::kVertexIndex, ast::PipelineStage::kCompute, false),
+
+ ParamsFor<u32>(ast::Builtin::kInstanceIndex, ast::PipelineStage::kVertex, true),
+ ParamsFor<u32>(ast::Builtin::kInstanceIndex, ast::PipelineStage::kFragment, false),
+ ParamsFor<u32>(ast::Builtin::kInstanceIndex, ast::PipelineStage::kCompute, false),
+
+ ParamsFor<bool>(ast::Builtin::kFrontFacing, ast::PipelineStage::kVertex, false),
+ ParamsFor<bool>(ast::Builtin::kFrontFacing, ast::PipelineStage::kFragment, true),
+ ParamsFor<bool>(ast::Builtin::kFrontFacing, ast::PipelineStage::kCompute, false),
+
+ ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId, ast::PipelineStage::kVertex, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId, ast::PipelineStage::kFragment, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kLocalInvocationId, ast::PipelineStage::kCompute, true),
+
+ ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex, ast::PipelineStage::kVertex, false),
+ ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex, ast::PipelineStage::kFragment, false),
+ ParamsFor<u32>(ast::Builtin::kLocalInvocationIndex, ast::PipelineStage::kCompute, true),
+
+ ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId, ast::PipelineStage::kVertex, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId, ast::PipelineStage::kFragment, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kGlobalInvocationId, ast::PipelineStage::kCompute, true),
+
+ ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId, ast::PipelineStage::kVertex, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId, ast::PipelineStage::kFragment, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kWorkgroupId, ast::PipelineStage::kCompute, true),
+
+ ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups, ast::PipelineStage::kVertex, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups, ast::PipelineStage::kFragment, false),
+ ParamsFor<vec3<u32>>(ast::Builtin::kNumWorkgroups, ast::PipelineStage::kCompute, true),
+
+ ParamsFor<u32>(ast::Builtin::kSampleIndex, ast::PipelineStage::kVertex, false),
+ ParamsFor<u32>(ast::Builtin::kSampleIndex, ast::PipelineStage::kFragment, true),
+ ParamsFor<u32>(ast::Builtin::kSampleIndex, ast::PipelineStage::kCompute, false),
+
+ ParamsFor<u32>(ast::Builtin::kSampleMask, ast::PipelineStage::kVertex, false),
+ ParamsFor<u32>(ast::Builtin::kSampleMask, ast::PipelineStage::kFragment, true),
+ ParamsFor<u32>(ast::Builtin::kSampleMask, ast::PipelineStage::kCompute, false),
};
using ResolverBuiltinsStageTest = ResolverTestWithParam<Params>;
TEST_P(ResolverBuiltinsStageTest, All_input) {
- const Params& params = GetParam();
-
- auto* p = Global("p", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* input =
- Param("input", params.type(*this),
- ast::AttributeList{Builtin(Source{{12, 34}}, params.builtin)});
- switch (params.stage) {
- case ast::PipelineStage::kVertex:
- Func("main", {input}, ty.vec4<f32>(), {Return(p)},
- {Stage(ast::PipelineStage::kVertex)},
- {Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
- break;
- case ast::PipelineStage::kFragment:
- Func("main", {input}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)}, {});
- break;
- case ast::PipelineStage::kCompute:
- Func("main", {input}, ty.void_(), {},
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
- break;
- default:
- break;
- }
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- std::stringstream err;
- err << "12:34 error: builtin(" << params.builtin << ")";
- err << " cannot be used in input of " << params.stage << " pipeline stage";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), err.str());
- }
+ const Params& params = GetParam();
+
+ auto* p = Global("p", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto* input = Param("input", params.type(*this),
+ ast::AttributeList{Builtin(Source{{12, 34}}, params.builtin)});
+ switch (params.stage) {
+ case ast::PipelineStage::kVertex:
+ Func("main", {input}, ty.vec4<f32>(), {Return(p)}, {Stage(ast::PipelineStage::kVertex)},
+ {Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
+ break;
+ case ast::PipelineStage::kFragment:
+ Func("main", {input}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)}, {});
+ break;
+ case ast::PipelineStage::kCompute:
+ Func("main", {input}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ break;
+ default:
+ break;
+ }
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ std::stringstream err;
+ err << "12:34 error: builtin(" << params.builtin << ")";
+ err << " cannot be used in input of " << params.stage << " pipeline stage";
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), err.str());
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
ResolverBuiltinsStageTest,
testing::ValuesIn(cases));
TEST_F(ResolverBuiltinsValidationTest, FragDepthIsInput_Fail) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(frag_depth) fd: f32,
- // ) -> @location(0) f32 { return 1.0; }
- auto* fd = Param(
- "fd", ty.f32(),
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
- Func("fs_main", ast::VariableList{fd}, ty.f32(), {Return(1.0f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(frag_depth) cannot be used in input of "
- "fragment pipeline stage");
+ // @fragment
+ // fn fs_main(
+ // @builtin(frag_depth) fd: f32,
+ // ) -> @location(0) f32 { return 1.0; }
+ auto* fd = Param("fd", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
+ Func("fs_main", ast::VariableList{fd}, ty.f32(), {Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(frag_depth) cannot be used in input of "
+ "fragment pipeline stage");
}
TEST_F(ResolverBuiltinsValidationTest, FragDepthIsInputStruct_Fail) {
- // struct MyInputs {
- // @builtin(frag_depth) ff: f32;
- // };
- // @stage(fragment)
- // fn fragShader(arg: MyInputs) -> @location(0) f32 { return 1.0; }
-
- auto* s = Structure(
- "MyInputs", {Member("frag_depth", ty.f32(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kFragDepth)})});
-
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: builtin(frag_depth) cannot be used in input of "
- "fragment pipeline stage\n"
- "note: while analysing entry point 'fragShader'");
+ // struct MyInputs {
+ // @builtin(frag_depth) ff: f32;
+ // };
+ // @fragment
+ // fn fragShader(arg: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* s = Structure(
+ "MyInputs",
+ {Member("frag_depth", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)})});
+
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(frag_depth) cannot be used in input of "
+ "fragment pipeline stage\n"
+ "note: while analysing entry point 'fragShader'");
}
TEST_F(ResolverBuiltinsValidationTest, StructBuiltinInsideEntryPoint_Ignored) {
- // struct S {
- // @builtin(vertex_index) idx: u32;
- // };
- // @stage(fragment)
- // fn fragShader() { var s : S; }
+ // struct S {
+ // @builtin(vertex_index) idx: u32;
+ // };
+ // @fragment
+ // fn fragShader() { var s : S; }
- Structure("S",
- {Member("idx", ty.u32(), {Builtin(ast::Builtin::kVertexIndex)})});
+ Structure("S", {Member("idx", ty.u32(), {Builtin(ast::Builtin::kVertexIndex)})});
- Func("fragShader", {}, ty.void_(), {Decl(Var("s", ty.type_name("S")))},
- {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve());
+ Func("fragShader", {}, ty.void_(), {Decl(Var("s", ty.type_name("S")))},
+ {Stage(ast::PipelineStage::kFragment)});
+ EXPECT_TRUE(r()->Resolve());
}
} // namespace StageTest
TEST_F(ResolverBuiltinsValidationTest, PositionNotF32_Struct_Fail) {
- // struct MyInputs {
- // @builtin(kPosition) p: vec4<u32>;
- // };
- // @stage(fragment)
- // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
-
- auto* m = Member(
- "position", ty.vec4<u32>(),
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
- auto* s = Structure("MyInputs", {m});
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ // struct MyInputs {
+ // @builtin(kPosition) p: vec4<u32>;
+ // };
+ // @fragment
+ // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* m = Member("position", ty.vec4<u32>(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
+ auto* s = Structure("MyInputs", {m});
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
}
TEST_F(ResolverBuiltinsValidationTest, PositionNotF32_ReturnType_Fail) {
- // @stage(vertex)
- // fn main() -> @builtin(position) f32 { return 1.0; }
- Func("main", {}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kVertex)},
- {Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
+ // @vertex
+ // fn main() -> @builtin(position) f32 { return 1.0; }
+ Func("main", {}, ty.f32(), {Return(1_f)}, {Stage(ast::PipelineStage::kVertex)},
+ {Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
}
TEST_F(ResolverBuiltinsValidationTest, FragDepthNotF32_Struct_Fail) {
- // struct MyInputs {
- // @builtin(kFragDepth) p: i32;
- // };
- // @stage(fragment)
- // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+ // struct MyInputs {
+ // @builtin(kFragDepth) p: i32;
+ // };
+ // @fragment
+ // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* m = Member("frag_depth", ty.i32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
+ auto* s = Structure("MyInputs", {m});
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- auto* m = Member(
- "frag_depth", ty.i32(),
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
- auto* s = Structure("MyInputs", {m});
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(frag_depth) must be 'f32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(frag_depth) must be 'f32'");
}
TEST_F(ResolverBuiltinsValidationTest, SampleMaskNotU32_Struct_Fail) {
- // struct MyInputs {
- // @builtin(sample_mask) m: f32;
- // };
- // @stage(fragment)
- // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
-
- auto* s = Structure(
- "MyInputs", {Member("m", ty.f32(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kSampleMask)})});
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ // struct MyInputs {
+ // @builtin(sample_mask) m: f32;
+ // };
+ // @fragment
+ // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* s = Structure(
+ "MyInputs",
+ {Member("m", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kSampleMask)})});
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(sample_mask) must be 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(sample_mask) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, SampleMaskNotU32_ReturnType_Fail) {
- // @stage(fragment)
- // fn main() -> @builtin(sample_mask) i32 { return 1; }
- Func("main", {}, ty.i32(), {Return(1)},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(Source{{12, 34}}, ast::Builtin::kSampleMask)});
+ // @fragment
+ // fn main() -> @builtin(sample_mask) i32 { return 1; }
+ Func("main", {}, ty.i32(), {Return(1_i)}, {Stage(ast::PipelineStage::kFragment)},
+ {Builtin(Source{{12, 34}}, ast::Builtin::kSampleMask)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(sample_mask) must be 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(sample_mask) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, SampleMaskIsNotU32_Fail) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(sample_mask) arg: bool
- // ) -> @location(0) f32 { return 1.0; }
- auto* arg = Param(
- "arg", ty.bool_(),
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kSampleMask)});
- Func("fs_main", ast::VariableList{arg}, ty.f32(), {Return(1.0f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(sample_mask) must be 'u32'");
+ // @fragment
+ // fn fs_main(
+ // @builtin(sample_mask) arg: bool
+ // ) -> @location(0) f32 { return 1.0; }
+ auto* arg = Param("arg", ty.bool_(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kSampleMask)});
+ Func("fs_main", ast::VariableList{arg}, ty.f32(), {Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(sample_mask) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, SampleIndexIsNotU32_Struct_Fail) {
- // struct MyInputs {
- // @builtin(sample_index) m: f32;
- // };
- // @stage(fragment)
- // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+ // struct MyInputs {
+ // @builtin(sample_index) m: f32;
+ // };
+ // @fragment
+ // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* s = Structure(
+ "MyInputs",
+ {Member("m", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kSampleIndex)})});
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- auto* s = Structure(
- "MyInputs", {Member("m", ty.f32(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kSampleIndex)})});
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(sample_index) must be 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(sample_index) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, SampleIndexIsNotU32_Fail) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(sample_index) arg: bool
- // ) -> @location(0) f32 { return 1.0; }
- auto* arg = Param("arg", ty.bool_(),
- ast::AttributeList{
- Builtin(Source{{12, 34}}, ast::Builtin::kSampleIndex)});
- Func("fs_main", ast::VariableList{arg}, ty.f32(), {Return(1.0f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(sample_index) must be 'u32'");
+ // @fragment
+ // fn fs_main(
+ // @builtin(sample_index) arg: bool
+ // ) -> @location(0) f32 { return 1.0; }
+ auto* arg = Param("arg", ty.bool_(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kSampleIndex)});
+ Func("fs_main", ast::VariableList{arg}, ty.f32(), {Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(sample_index) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, PositionIsNotF32_Fail) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(kPosition) p: vec3<f32>,
- // ) -> @location(0) f32 { return 1.0; }
- auto* p = Param(
- "p", ty.vec3<f32>(),
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
- Func("fs_main", ast::VariableList{p}, ty.f32(), {Return(1.0f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
+ // @fragment
+ // fn fs_main(
+ // @builtin(kPosition) p: vec3<f32>,
+ // ) -> @location(0) f32 { return 1.0; }
+ auto* p = Param("p", ty.vec3<f32>(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kPosition)});
+ Func("fs_main", ast::VariableList{p}, ty.f32(), {Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(position) must be 'vec4<f32>'");
}
TEST_F(ResolverBuiltinsValidationTest, FragDepthIsNotF32_Fail) {
- // @stage(fragment)
- // fn fs_main() -> @builtin(kFragDepth) f32 { var fd: i32; return fd; }
- auto* fd = Var("fd", ty.i32());
- Func("fs_main", {}, ty.i32(), {Decl(fd), Return(fd)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(frag_depth) must be 'f32'");
+ // @fragment
+ // fn fs_main() -> @builtin(kFragDepth) f32 { var fd: i32; return fd; }
+ auto* fd = Var("fd", ty.i32());
+ Func("fs_main", {}, ty.i32(), {Decl(fd), Return(fd)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFragDepth)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(frag_depth) must be 'f32'");
}
TEST_F(ResolverBuiltinsValidationTest, VertexIndexIsNotU32_Fail) {
- // @stage(vertex)
- // fn main(
- // @builtin(kVertexIndex) vi : f32,
- // @builtin(kPosition) p :vec4<f32>
- // ) -> @builtin(kPosition) vec4<f32> { return vec4<f32>(); }
- auto* p = Param("p", ty.vec4<f32>(),
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- auto* vi = Param("vi", ty.f32(),
- ast::AttributeList{
- Builtin(Source{{12, 34}}, ast::Builtin::kVertexIndex)});
- Func("main", ast::VariableList{vi, p}, ty.vec4<f32>(), {Return(Expr("p"))},
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(vertex_index) must be 'u32'");
+ // @vertex
+ // fn main(
+ // @builtin(kVertexIndex) vi : f32,
+ // @builtin(kPosition) p :vec4<f32>
+ // ) -> @builtin(kPosition) vec4<f32> { return vec4<f32>(); }
+ auto* p = Param("p", ty.vec4<f32>(), ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ auto* vi = Param("vi", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kVertexIndex)});
+ Func("main", ast::VariableList{vi, p}, ty.vec4<f32>(), {Return(Expr("p"))},
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
+ ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(vertex_index) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, InstanceIndexIsNotU32) {
- // @stage(vertex)
- // fn main(
- // @builtin(kInstanceIndex) ii : f32,
- // @builtin(kPosition) p :vec4<f32>
- // ) -> @builtin(kPosition) vec4<f32> { return vec4<f32>(); }
- auto* p = Param("p", ty.vec4<f32>(),
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- auto* ii = Param("ii", ty.f32(),
- ast::AttributeList{Builtin(Source{{12, 34}},
- ast::Builtin::kInstanceIndex)});
- Func("main", ast::VariableList{ii, p}, ty.vec4<f32>(), {Return(Expr("p"))},
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(instance_index) must be 'u32'");
+ // @vertex
+ // fn main(
+ // @builtin(kInstanceIndex) ii : f32,
+ // @builtin(kPosition) p :vec4<f32>
+ // ) -> @builtin(kPosition) vec4<f32> { return vec4<f32>(); }
+ auto* p = Param("p", ty.vec4<f32>(), ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ auto* ii = Param("ii", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kInstanceIndex)});
+ Func("main", ast::VariableList{ii, p}, ty.vec4<f32>(), {Return(Expr("p"))},
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
+ ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(instance_index) must be 'u32'");
}
TEST_F(ResolverBuiltinsValidationTest, FragmentBuiltin_Pass) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(kPosition) p: vec4<f32>,
- // @builtin(front_facing) ff: bool,
- // @builtin(sample_index) si: u32,
- // @builtin(sample_mask) sm : u32
- // ) -> @builtin(frag_depth) f32 { var fd: f32; return fd; }
- auto* p = Param("p", ty.vec4<f32>(),
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- auto* ff = Param("ff", ty.bool_(),
- ast::AttributeList{Builtin(ast::Builtin::kFrontFacing)});
- auto* si = Param("si", ty.u32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleIndex)});
- auto* sm = Param("sm", ty.u32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleMask)});
- auto* var_fd = Var("fd", ty.f32());
- Func("fs_main", ast::VariableList{p, ff, si, sm}, ty.f32(),
- {Decl(var_fd), Return(var_fd)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{Builtin(ast::Builtin::kFragDepth)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // @fragment
+ // fn fs_main(
+ // @builtin(kPosition) p: vec4<f32>,
+ // @builtin(front_facing) ff: bool,
+ // @builtin(sample_index) si: u32,
+ // @builtin(sample_mask) sm : u32
+ // ) -> @builtin(frag_depth) f32 { var fd: f32; return fd; }
+ auto* p = Param("p", ty.vec4<f32>(), ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ auto* ff = Param("ff", ty.bool_(), ast::AttributeList{Builtin(ast::Builtin::kFrontFacing)});
+ auto* si = Param("si", ty.u32(), ast::AttributeList{Builtin(ast::Builtin::kSampleIndex)});
+ auto* sm = Param("sm", ty.u32(), ast::AttributeList{Builtin(ast::Builtin::kSampleMask)});
+ auto* var_fd = Var("fd", ty.f32());
+ Func("fs_main", ast::VariableList{p, ff, si, sm}, ty.f32(), {Decl(var_fd), Return(var_fd)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{Builtin(ast::Builtin::kFragDepth)});
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, VertexBuiltin_Pass) {
- // @stage(vertex)
- // fn main(
- // @builtin(vertex_index) vi : u32,
- // @builtin(instance_index) ii : u32,
- // ) -> @builtin(position) vec4<f32> { var p :vec4<f32>; return p; }
- auto* vi = Param("vi", ty.u32(),
- ast::AttributeList{
- Builtin(Source{{12, 34}}, ast::Builtin::kVertexIndex)});
-
- auto* ii = Param("ii", ty.u32(),
- ast::AttributeList{Builtin(Source{{12, 34}},
- ast::Builtin::kInstanceIndex)});
- auto* p = Var("p", ty.vec4<f32>());
- Func("main", ast::VariableList{vi, ii}, ty.vec4<f32>(),
- {
- Decl(p),
- Return(p),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // @vertex
+ // fn main(
+ // @builtin(vertex_index) vi : u32,
+ // @builtin(instance_index) ii : u32,
+ // ) -> @builtin(position) vec4<f32> { var p :vec4<f32>; return p; }
+ auto* vi = Param("vi", ty.u32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kVertexIndex)});
+
+ auto* ii = Param("ii", ty.u32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kInstanceIndex)});
+ auto* p = Var("p", ty.vec4<f32>());
+ Func("main", ast::VariableList{vi, ii}, ty.vec4<f32>(),
+ {
+ Decl(p),
+ Return(p),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
+ ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_Pass) {
- // @stage(compute) @workgroup_size(1)
- // fn main(
- // @builtin(local_invocationId) li_id: vec3<u32>,
- // @builtin(local_invocationIndex) li_index: u32,
- // @builtin(global_invocationId) gi: vec3<u32>,
- // @builtin(workgroup_id) wi: vec3<u32>,
- // @builtin(num_workgroups) nwgs: vec3<u32>,
- // ) {}
-
- auto* li_id =
- Param("li_id", ty.vec3<u32>(),
- ast::AttributeList{Builtin(ast::Builtin::kLocalInvocationId)});
- auto* li_index =
- Param("li_index", ty.u32(),
- ast::AttributeList{Builtin(ast::Builtin::kLocalInvocationIndex)});
- auto* gi =
- Param("gi", ty.vec3<u32>(),
- ast::AttributeList{Builtin(ast::Builtin::kGlobalInvocationId)});
- auto* wi = Param("wi", ty.vec3<u32>(),
- ast::AttributeList{Builtin(ast::Builtin::kWorkgroupId)});
- auto* nwgs = Param("nwgs", ty.vec3<u32>(),
- ast::AttributeList{Builtin(ast::Builtin::kNumWorkgroups)});
-
- Func("main", ast::VariableList{li_id, li_index, gi, wi, nwgs}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // @compute @workgroup_size(1)
+ // fn main(
+ // @builtin(local_invocationId) li_id: vec3<u32>,
+ // @builtin(local_invocationIndex) li_index: u32,
+ // @builtin(global_invocationId) gi: vec3<u32>,
+ // @builtin(workgroup_id) wi: vec3<u32>,
+ // @builtin(num_workgroups) nwgs: vec3<u32>,
+ // ) {}
+
+ auto* li_id = Param("li_id", ty.vec3<u32>(),
+ ast::AttributeList{Builtin(ast::Builtin::kLocalInvocationId)});
+ auto* li_index = Param("li_index", ty.u32(),
+ ast::AttributeList{Builtin(ast::Builtin::kLocalInvocationIndex)});
+ auto* gi =
+ Param("gi", ty.vec3<u32>(), ast::AttributeList{Builtin(ast::Builtin::kGlobalInvocationId)});
+ auto* wi = Param("wi", ty.vec3<u32>(), ast::AttributeList{Builtin(ast::Builtin::kWorkgroupId)});
+ auto* nwgs =
+ Param("nwgs", ty.vec3<u32>(), ast::AttributeList{Builtin(ast::Builtin::kNumWorkgroups)});
+
+ Func("main", ast::VariableList{li_id, li_index, gi, wi, nwgs}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_WorkGroupIdNotVec3U32) {
- auto* wi = Param("wi", ty.f32(),
- ast::AttributeList{
- Builtin(Source{{12, 34}}, ast::Builtin::kWorkgroupId)});
- Func("main", ast::VariableList{wi}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
+ auto* wi = Param("wi", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kWorkgroupId)});
+ Func("main", ast::VariableList{wi}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(workgroup_id) must be "
- "'vec3<u32>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of builtin(workgroup_id) must be "
+ "'vec3<u32>'");
}
TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_NumWorkgroupsNotVec3U32) {
- auto* nwgs = Param("nwgs", ty.f32(),
- ast::AttributeList{Builtin(Source{{12, 34}},
- ast::Builtin::kNumWorkgroups)});
- Func("main", ast::VariableList{nwgs}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(num_workgroups) must be "
- "'vec3<u32>'");
-}
-
-TEST_F(ResolverBuiltinsValidationTest,
- ComputeBuiltin_GlobalInvocationNotVec3U32) {
- auto* gi = Param("gi", ty.vec3<i32>(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kGlobalInvocationId)});
- Func("main", ast::VariableList{gi}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(global_invocation_id) must be "
- "'vec3<u32>'");
-}
-
-TEST_F(ResolverBuiltinsValidationTest,
- ComputeBuiltin_LocalInvocationIndexNotU32) {
- auto* li_index =
- Param("li_index", ty.vec3<u32>(),
- ast::AttributeList{Builtin(Source{{12, 34}},
- ast::Builtin::kLocalInvocationIndex)});
- Func("main", ast::VariableList{li_index}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: store type of builtin(local_invocation_index) must be "
- "'u32'");
-}
-
-TEST_F(ResolverBuiltinsValidationTest,
- ComputeBuiltin_LocalInvocationNotVec3U32) {
- auto* li_id = Param("li_id", ty.vec2<u32>(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kLocalInvocationId)});
- Func("main", ast::VariableList{li_id}, ty.void_(), {},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(local_invocation_id) must be "
- "'vec3<u32>'");
+ auto* nwgs = Param("nwgs", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kNumWorkgroups)});
+ Func("main", ast::VariableList{nwgs}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of builtin(num_workgroups) must be "
+ "'vec3<u32>'");
+}
+
+TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_GlobalInvocationNotVec3U32) {
+ auto* gi =
+ Param("gi", ty.vec3<i32>(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kGlobalInvocationId)});
+ Func("main", ast::VariableList{gi}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of builtin(global_invocation_id) must be "
+ "'vec3<u32>'");
+}
+
+TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_LocalInvocationIndexNotU32) {
+ auto* li_index =
+ Param("li_index", ty.vec3<u32>(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kLocalInvocationIndex)});
+ Func("main", ast::VariableList{li_index}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of builtin(local_invocation_index) must be "
+ "'u32'");
+}
+
+TEST_F(ResolverBuiltinsValidationTest, ComputeBuiltin_LocalInvocationNotVec3U32) {
+ auto* li_id =
+ Param("li_id", ty.vec2<u32>(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kLocalInvocationId)});
+ Func("main", ast::VariableList{li_id}, ty.void_(), {},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Expr(Source{Source::Location{12, 34}}, 2_i))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of builtin(local_invocation_id) must be "
+ "'vec3<u32>'");
}
TEST_F(ResolverBuiltinsValidationTest, FragmentBuiltinStruct_Pass) {
- // Struct MyInputs {
- // @builtin(kPosition) p: vec4<f32>;
- // @builtin(frag_depth) fd: f32;
- // @builtin(sample_index) si: u32;
- // @builtin(sample_mask) sm : u32;;
- // };
- // @stage(fragment)
- // fn fragShader(arg: MyInputs) -> @location(0) f32 { return 1.0; }
-
- auto* s = Structure(
- "MyInputs",
- {Member("position", ty.vec4<f32>(),
- ast::AttributeList{Builtin(ast::Builtin::kPosition)}),
- Member("front_facing", ty.bool_(),
- ast::AttributeList{Builtin(ast::Builtin::kFrontFacing)}),
- Member("sample_index", ty.u32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleIndex)}),
- Member("sample_mask", ty.u32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleMask)})});
- Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // Struct MyInputs {
+ // @builtin(kPosition) p: vec4<f32>;
+ // @builtin(frag_depth) fd: f32;
+ // @builtin(sample_index) si: u32;
+ // @builtin(sample_mask) sm : u32;;
+ // };
+ // @fragment
+ // fn fragShader(arg: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* s = Structure(
+ "MyInputs",
+ {Member("position", ty.vec4<f32>(), ast::AttributeList{Builtin(ast::Builtin::kPosition)}),
+ Member("front_facing", ty.bool_(),
+ ast::AttributeList{Builtin(ast::Builtin::kFrontFacing)}),
+ Member("sample_index", ty.u32(), ast::AttributeList{Builtin(ast::Builtin::kSampleIndex)}),
+ Member("sample_mask", ty.u32(), ast::AttributeList{Builtin(ast::Builtin::kSampleMask)})});
+ Func("fragShader", {Param("arg", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, FrontFacingParamIsNotBool_Fail) {
- // @stage(fragment)
- // fn fs_main(
- // @builtin(front_facing) is_front: i32;
- // ) -> @location(0) f32 { return 1.0; }
+ // @fragment
+ // fn fs_main(
+ // @builtin(front_facing) is_front: i32;
+ // ) -> @location(0) f32 { return 1.0; }
- auto* is_front = Param("is_front", ty.i32(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kFrontFacing)});
- Func("fs_main", ast::VariableList{is_front}, ty.f32(), {Return(1.0f)},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ auto* is_front =
+ Param("is_front", ty.i32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFrontFacing)});
+ Func("fs_main", ast::VariableList{is_front}, ty.f32(), {Return(1_f)},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(front_facing) must be 'bool'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(front_facing) must be 'bool'");
}
TEST_F(ResolverBuiltinsValidationTest, FrontFacingMemberIsNotBool_Fail) {
- // struct MyInputs {
- // @builtin(front_facing) pos: f32;
- // };
- // @stage(fragment)
- // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
-
- auto* s = Structure(
- "MyInputs", {Member("pos", ty.f32(),
- ast::AttributeList{Builtin(
- Source{{12, 34}}, ast::Builtin::kFrontFacing)})});
- Func("fragShader", {Param("is_front", ty.Of(s))}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ // struct MyInputs {
+ // @builtin(front_facing) pos: f32;
+ // };
+ // @fragment
+ // fn fragShader(is_front: MyInputs) -> @location(0) f32 { return 1.0; }
+
+ auto* s = Structure(
+ "MyInputs",
+ {Member("pos", ty.f32(),
+ ast::AttributeList{Builtin(Source{{12, 34}}, ast::Builtin::kFrontFacing)})});
+ Func("fragShader", {Param("is_front", ty.Of(s))}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of builtin(front_facing) must be 'bool'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: store type of builtin(front_facing) must be 'bool'");
}
TEST_F(ResolverBuiltinsValidationTest, Length_Float_Scalar) {
- auto* builtin = Call("length", 1.0f);
- WrapInFunction(builtin);
+ auto* builtin = Call("length", 1_f);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Length_Float_Vec2) {
- auto* builtin = Call("length", vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("length", vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Length_Float_Vec3) {
- auto* builtin = Call("length", vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("length", vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Length_Float_Vec4) {
- auto* builtin = Call("length", vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("length", vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Distance_Float_Scalar) {
- auto* builtin = Call("distance", 1.0f, 1.0f);
- WrapInFunction(builtin);
+ auto* builtin = Call("distance", 1_f, 1_f);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Distance_Float_Vec2) {
- auto* builtin =
- Call("distance", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("distance", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Distance_Float_Vec3) {
- auto* builtin = Call("distance", vec3<f32>(1.0f, 1.0f, 1.0f),
- vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("distance", vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Distance_Float_Vec4) {
- auto* builtin = Call("distance", vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f),
- vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("distance", vec4<f32>(1_f, 1_f, 1_f, 1_f), vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Determinant_Mat2x2) {
- auto* builtin = Call(
- "determinant", mat2x2<f32>(vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f)));
- WrapInFunction(builtin);
+ auto* builtin = Call("determinant", mat2x2<f32>(vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f)));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Determinant_Mat3x3) {
- auto* builtin = Call("determinant", mat3x3<f32>(vec3<f32>(1.0f, 1.0f, 1.0f),
- vec3<f32>(1.0f, 1.0f, 1.0f),
- vec3<f32>(1.0f, 1.0f, 1.0f)));
- WrapInFunction(builtin);
+ auto* builtin = Call(
+ "determinant",
+ mat3x3<f32>(vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f)));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Determinant_Mat4x4) {
- auto* builtin =
- Call("determinant", mat4x4<f32>(vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f),
- vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f),
- vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f),
- vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f)));
- WrapInFunction(builtin);
+ auto* builtin = Call("determinant",
+ mat4x4<f32>(vec4<f32>(1_f, 1_f, 1_f, 1_f), vec4<f32>(1_f, 1_f, 1_f, 1_f),
+ vec4<f32>(1_f, 1_f, 1_f, 1_f), vec4<f32>(1_f, 1_f, 1_f, 1_f)));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Frexp_Scalar) {
- auto* builtin = Call("frexp", 1.0f);
- WrapInFunction(builtin);
+ auto* builtin = Call("frexp", 1_f);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- EXPECT_TRUE(members[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(members[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ EXPECT_TRUE(members[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(members[1]->Type()->Is<sem::I32>());
}
TEST_F(ResolverBuiltinsValidationTest, Frexp_Vec2) {
- auto* builtin = Call("frexp", vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 2u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 2u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ auto* builtin = Call("frexp", vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 2u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 2u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
}
TEST_F(ResolverBuiltinsValidationTest, Frexp_Vec3) {
- auto* builtin = Call("frexp", vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ auto* builtin = Call("frexp", vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
}
TEST_F(ResolverBuiltinsValidationTest, Frexp_Vec4) {
- auto* builtin = Call("frexp", vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ auto* builtin = Call("frexp", vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
}
TEST_F(ResolverBuiltinsValidationTest, Modf_Scalar) {
- auto* builtin = Call("modf", 1.0f);
- WrapInFunction(builtin);
+ auto* builtin = Call("modf", 1_f);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- EXPECT_TRUE(members[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(members[1]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ EXPECT_TRUE(members[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(members[1]->Type()->Is<sem::F32>());
}
TEST_F(ResolverBuiltinsValidationTest, Modf_Vec2) {
- auto* builtin = Call("modf", vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 2u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 2u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ auto* builtin = Call("modf", vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 2u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 2u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
}
TEST_F(ResolverBuiltinsValidationTest, Modf_Vec3) {
- auto* builtin = Call("modf", vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ auto* builtin = Call("modf", vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
}
TEST_F(ResolverBuiltinsValidationTest, Modf_Vec4) {
- auto* builtin = Call("modf", vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
- ASSERT_TRUE(res_ty != nullptr);
- auto& members = res_ty->Members();
- ASSERT_EQ(members.size(), 2u);
- ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
- ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
- EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ auto* builtin = Call("modf", vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* res_ty = TypeOf(builtin)->As<sem::Struct>();
+ ASSERT_TRUE(res_ty != nullptr);
+ auto& members = res_ty->Members();
+ ASSERT_EQ(members.size(), 2u);
+ ASSERT_TRUE(members[0]->Type()->Is<sem::Vector>());
+ ASSERT_TRUE(members[1]->Type()->Is<sem::Vector>());
+ EXPECT_EQ(members[0]->Type()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(members[0]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(members[1]->Type()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(members[1]->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
}
TEST_F(ResolverBuiltinsValidationTest, Cross_Float_Vec3) {
- auto* builtin =
- Call("cross", vec3<f32>(1.0f, 1.0f, 1.0f), vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("cross", vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Dot_Float_Vec2) {
- auto* builtin = Call("dot", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("dot", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Dot_Float_Vec3) {
- auto* builtin =
- Call("dot", vec3<f32>(1.0f, 1.0f, 1.0f), vec3<f32>(1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("dot", vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Dot_Float_Vec4) {
- auto* builtin = Call("dot", vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f),
- vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
+ auto* builtin = Call("dot", vec4<f32>(1_f, 1_f, 1_f, 1_f), vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Float_Scalar) {
- auto* builtin = Call("select", Expr(1.0f), Expr(1.0f), Expr(true));
- WrapInFunction(builtin);
+ auto* builtin = Call("select", Expr(1_f), Expr(1_f), Expr(true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Integer_Scalar) {
- auto* builtin = Call("select", Expr(1), Expr(1), Expr(true));
- WrapInFunction(builtin);
+ auto* builtin = Call("select", Expr(1_i), Expr(1_i), Expr(true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Boolean_Scalar) {
- auto* builtin = Call("select", Expr(true), Expr(true), Expr(true));
- WrapInFunction(builtin);
+ auto* builtin = Call("select", Expr(true), Expr(true), Expr(true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Float_Vec2) {
- auto* builtin = Call("select", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f),
- vec2<bool>(true, true));
- WrapInFunction(builtin);
+ auto* builtin =
+ Call("select", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f), vec2<bool>(true, true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Integer_Vec2) {
- auto* builtin =
- Call("select", vec2<int>(1, 1), vec2<int>(1, 1), vec2<bool>(true, true));
- WrapInFunction(builtin);
+ auto* builtin =
+ Call("select", vec2<i32>(1_i, 1_i), vec2<i32>(1_i, 1_i), vec2<bool>(true, true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverBuiltinsValidationTest, Select_Boolean_Vec2) {
- auto* builtin = Call("select", vec2<bool>(true, true), vec2<bool>(true, true),
- vec2<bool>(true, true));
- WrapInFunction(builtin);
+ auto* builtin =
+ Call("select", vec2<bool>(true, true), vec2<bool>(true, true), vec2<bool>(true, true));
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
template <typename T>
-class ResolverBuiltinsValidationTestWithParams
- : public resolver::TestHelper,
- public testing::TestWithParam<T> {};
+class ResolverBuiltinsValidationTestWithParams : public resolver::TestHelper,
+ public testing::TestWithParam<T> {};
using FloatAllMatching =
ResolverBuiltinsValidationTestWithParams<std::tuple<std::string, uint32_t>>;
TEST_P(FloatAllMatching, Scalar) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(Expr(1.0f));
- }
- auto* builtin = Call(name, params);
- Func("func", {}, ty.void_(), {CallStmt(builtin)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(Expr(1_f));
+ }
+ auto* builtin = Call(name, params);
+ Func("func", {}, ty.void_(), {CallStmt(builtin)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->Is<sem::F32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->Is<sem::F32>());
}
TEST_P(FloatAllMatching, Vec2) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec2<f32>(1.0f, 1.0f));
- }
- auto* builtin = Call(name, params);
- Func("func", {}, ty.void_(), {CallStmt(builtin)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec2<f32>(1_f, 1_f));
+ }
+ auto* builtin = Call(name, params);
+ Func("func", {}, ty.void_(), {CallStmt(builtin)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
}
TEST_P(FloatAllMatching, Vec3) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec3<f32>(1.0f, 1.0f, 1.0f));
- }
- auto* builtin = Call(name, params);
- Func("func", {}, ty.void_(), {CallStmt(builtin)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec3<f32>(1_f, 1_f, 1_f));
+ }
+ auto* builtin = Call(name, params);
+ Func("func", {}, ty.void_(), {CallStmt(builtin)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
}
TEST_P(FloatAllMatching, Vec4) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- }
- auto* builtin = Call(name, params);
- Func("func", {}, ty.void_(), {CallStmt(builtin)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ }
+ auto* builtin = Call(name, params);
+ Func("func", {}, ty.void_(), {CallStmt(builtin)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_float_vector());
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
@@ -1069,7 +931,7 @@ INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
std::make_tuple("sign", 1),
std::make_tuple("sin", 1),
std::make_tuple("sinh", 1),
- std::make_tuple("smoothStep", 3),
+ std::make_tuple("smoothstep", 3),
std::make_tuple("sqrt", 1),
std::make_tuple("step", 2),
std::make_tuple("tan", 1),
@@ -1080,123 +942,123 @@ using IntegerAllMatching =
ResolverBuiltinsValidationTestWithParams<std::tuple<std::string, uint32_t>>;
TEST_P(IntegerAllMatching, ScalarUnsigned) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(Construct<uint32_t>(1));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(Construct<u32>(1_i));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->Is<sem::U32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->Is<sem::U32>());
}
TEST_P(IntegerAllMatching, Vec2Unsigned) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec2<uint32_t>(1u, 1u));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec2<u32>(1_u, 1_u));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
}
TEST_P(IntegerAllMatching, Vec3Unsigned) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec3<uint32_t>(1u, 1u, 1u));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec3<u32>(1_u, 1_u, 1_u));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
}
TEST_P(IntegerAllMatching, Vec4Unsigned) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec4<uint32_t>(1u, 1u, 1u, 1u));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec4<u32>(1_u, 1_u, 1_u, 1_u));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_unsigned_integer_vector());
}
TEST_P(IntegerAllMatching, ScalarSigned) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(Construct<int32_t>(1));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(Construct<i32>(1_i));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->Is<sem::I32>());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->Is<sem::I32>());
}
TEST_P(IntegerAllMatching, Vec2Signed) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec2<int32_t>(1, 1));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec2<i32>(1_i, 1_i));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
}
TEST_P(IntegerAllMatching, Vec3Signed) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec3<int32_t>(1, 1, 1));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec3<i32>(1_i, 1_i, 1_i));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
}
TEST_P(IntegerAllMatching, Vec4Signed) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec4<int32_t>(1, 1, 1, 1));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec4<i32>(1_i, 1_i, 1_i, 1_i));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(TypeOf(builtin)->is_signed_integer_vector());
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
@@ -1212,59 +1074,58 @@ using BooleanVectorInput =
ResolverBuiltinsValidationTestWithParams<std::tuple<std::string, uint32_t>>;
TEST_P(BooleanVectorInput, Vec2) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec2<bool>(true, true));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec2<bool>(true, true));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(BooleanVectorInput, Vec3) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec3<bool>(true, true, true));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec3<bool>(true, true, true));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(BooleanVectorInput, Vec4) {
- std::string name = std::get<0>(GetParam());
- uint32_t num_params = std::get<1>(GetParam());
+ std::string name = std::get<0>(GetParam());
+ uint32_t num_params = std::get<1>(GetParam());
- ast::ExpressionList params;
- for (uint32_t i = 0; i < num_params; ++i) {
- params.push_back(vec4<bool>(true, true, true, true));
- }
- auto* builtin = Call(name, params);
- WrapInFunction(builtin);
+ ast::ExpressionList params;
+ for (uint32_t i = 0; i < num_params; ++i) {
+ params.push_back(vec4<bool>(true, true, true, true));
+ }
+ auto* builtin = Call(name, params);
+ WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
BooleanVectorInput,
- ::testing::Values(std::make_tuple("all", 1),
- std::make_tuple("any", 1)));
+ ::testing::Values(std::make_tuple("all", 1), std::make_tuple("any", 1)));
using DataPacking4x8 = ResolverBuiltinsValidationTestWithParams<std::string>;
TEST_P(DataPacking4x8, Float_Vec4) {
- auto name = GetParam();
- auto* builtin = Call(name, vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f));
- WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto name = GetParam();
+ auto* builtin = Call(name, vec4<f32>(1_f, 1_f, 1_f, 1_f));
+ WrapInFunction(builtin);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
@@ -1274,17 +1135,15 @@ INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
using DataPacking2x16 = ResolverBuiltinsValidationTestWithParams<std::string>;
TEST_P(DataPacking2x16, Float_Vec2) {
- auto name = GetParam();
- auto* builtin = Call(name, vec2<f32>(1.0f, 1.0f));
- WrapInFunction(builtin);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto name = GetParam();
+ auto* builtin = Call(name, vec2<f32>(1_f, 1_f));
+ WrapInFunction(builtin);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
INSTANTIATE_TEST_SUITE_P(ResolverBuiltinsValidationTest,
DataPacking2x16,
- ::testing::Values("pack2x16snorm",
- "pack2x16unorm",
- "pack2x16float"));
+ ::testing::Values("pack2x16snorm", "pack2x16unorm", "pack2x16float"));
} // namespace
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/call_test.cc b/chromium/third_party/dawn/src/tint/resolver/call_test.cc
index 20c80bc8f70..39a6eb490cc 100644
--- a/chromium/third_party/dawn/src/tint/resolver/call_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/call_test.cc
@@ -18,6 +18,8 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
// Helpers and typedefs
@@ -51,20 +53,17 @@ template <typename T>
using alias2 = builder::alias2<T>;
template <typename T>
using alias3 = builder::alias3<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
using ResolverCallTest = ResolverTest;
struct Params {
- builder::ast_expr_func_ptr create_value;
- builder::ast_type_func_ptr create_type;
+ builder::ast_expr_func_ptr create_value;
+ builder::ast_type_func_ptr create_type;
};
template <typename T>
constexpr Params ParamsFor() {
- return Params{DataType<T>::Expr, DataType<T>::AST};
+ return Params{DataType<T>::Expr, DataType<T>::AST};
}
static constexpr Params all_param_types[] = {
@@ -82,34 +81,34 @@ static constexpr Params all_param_types[] = {
};
TEST_F(ResolverCallTest, Valid) {
- ast::VariableList params;
- ast::ExpressionList args;
- for (auto& p : all_param_types) {
- params.push_back(Param(Sym(), p.create_type(*this)));
- args.push_back(p.create_value(*this, 0));
- }
-
- auto* func = Func("foo", std::move(params), ty.f32(), {Return(1.23f)});
- auto* call_expr = Call("foo", std::move(args));
- WrapInFunction(call_expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* call = Sem().Get(call_expr);
- EXPECT_NE(call, nullptr);
- EXPECT_EQ(call->Target(), Sem().Get(func));
+ ast::VariableList params;
+ ast::ExpressionList args;
+ for (auto& p : all_param_types) {
+ params.push_back(Param(Sym(), p.create_type(*this)));
+ args.push_back(p.create_value(*this, 0));
+ }
+
+ auto* func = Func("foo", std::move(params), ty.f32(), {Return(1.23_f)});
+ auto* call_expr = Call("foo", std::move(args));
+ WrapInFunction(call_expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* call = Sem().Get<sem::Call>(call_expr);
+ EXPECT_NE(call, nullptr);
+ EXPECT_EQ(call->Target(), Sem().Get(func));
}
TEST_F(ResolverCallTest, OutOfOrder) {
- auto* call_expr = Call("b");
- Func("a", {}, ty.void_(), {CallStmt(call_expr)});
- auto* b = Func("b", {}, ty.void_(), {});
+ auto* call_expr = Call("b");
+ Func("a", {}, ty.void_(), {CallStmt(call_expr)});
+ auto* b = Func("b", {}, ty.void_(), {});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* call = Sem().Get(call_expr);
- EXPECT_NE(call, nullptr);
- EXPECT_EQ(call->Target(), Sem().Get(b));
+ auto* call = Sem().Get<sem::Call>(call_expr);
+ EXPECT_NE(call, nullptr);
+ EXPECT_EQ(call->Target(), Sem().Get(b));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/call_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/call_validation_test.cc
index 56cc3489c3d..4aa63509cff 100644
--- a/chromium/third_party/dawn/src/tint/resolver/call_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/call_validation_test.cc
@@ -18,267 +18,252 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverCallValidationTest = ResolverTest;
TEST_F(ResolverCallValidationTest, TooFewArgs) {
- Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(),
- {Return()});
- auto* call = Call(Source{{12, 34}}, "foo", 1);
- WrapInFunction(call);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: too few arguments in call to 'foo', expected 2, got 1");
+ Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(), {Return()});
+ auto* call = Call(Source{{12, 34}}, "foo", 1_i);
+ WrapInFunction(call);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: too few arguments in call to 'foo', expected 2, got 1");
}
TEST_F(ResolverCallValidationTest, TooManyArgs) {
- Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(),
- {Return()});
- auto* call = Call(Source{{12, 34}}, "foo", 1, 1.0f, 1.0f);
- WrapInFunction(call);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: too many arguments in call to 'foo', expected 2, got 3");
+ Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(), {Return()});
+ auto* call = Call(Source{{12, 34}}, "foo", 1_i, 1_f, 1_f);
+ WrapInFunction(call);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: too many arguments in call to 'foo', expected 2, got 3");
}
TEST_F(ResolverCallValidationTest, MismatchedArgs) {
- Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(),
- {Return()});
- auto* call = Call("foo", Expr(Source{{12, 34}}, true), 1.0f);
- WrapInFunction(call);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type mismatch for argument 1 in call to 'foo', "
- "expected 'i32', got 'bool'");
+ Func("foo", {Param(Sym(), ty.i32()), Param(Sym(), ty.f32())}, ty.void_(), {Return()});
+ auto* call = Call("foo", Expr(Source{{12, 34}}, true), 1_f);
+ WrapInFunction(call);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type mismatch for argument 1 in call to 'foo', "
+ "expected 'i32', got 'bool'");
}
TEST_F(ResolverCallValidationTest, UnusedRetval) {
- // fn func() -> f32 { return 1.0; }
- // fn main() {func(); return; }
+ // fn func() -> f32 { return 1.0; }
+ // fn main() {func(); return; }
- Func("func", {}, ty.f32(), {Return(Expr(1.0f))}, {});
+ Func("func", {}, ty.f32(), {Return(Expr(1_f))}, {});
- Func("main", {}, ty.void_(),
- {
- CallStmt(Source{{12, 34}}, Call("func")),
- Return(),
- });
+ Func("main", {}, ty.void_(),
+ {
+ CallStmt(Source{{12, 34}}, Call("func")),
+ Return(),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCallValidationTest, PointerArgument_VariableIdentExpr) {
- // fn foo(p: ptr<function, i32>) {}
- // fn main() {
- // var z: i32 = 1;
- // foo(&z);
- // }
- auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
- Func("foo", {param}, ty.void_(), {});
- Func("main", {}, ty.void_(),
- {
- Decl(Var("z", ty.i32(), Expr(1))),
- CallStmt(Call("foo", AddressOf(Source{{12, 34}}, Expr("z")))),
- });
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // fn foo(p: ptr<function, i32>) {}
+ // fn main() {
+ // var z: i32 = 1i;
+ // foo(&z);
+ // }
+ auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
+ Func("foo", {param}, ty.void_(), {});
+ Func("main", {}, ty.void_(),
+ {
+ Decl(Var("z", ty.i32(), Expr(1_i))),
+ CallStmt(Call("foo", AddressOf(Source{{12, 34}}, Expr("z")))),
+ });
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCallValidationTest, PointerArgument_ConstIdentExpr) {
- // fn foo(p: ptr<function, i32>) {}
- // fn main() {
- // let z: i32 = 1;
- // foo(&z);
- // }
- auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
- Func("foo", {param}, ty.void_(), {});
- Func("main", {}, ty.void_(),
- {
- Decl(Const("z", ty.i32(), Expr(1))),
- CallStmt(Call("foo", AddressOf(Expr(Source{{12, 34}}, "z")))),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
+ // fn foo(p: ptr<function, i32>) {}
+ // fn main() {
+ // let z: i32 = 1i;
+ // foo(&z);
+ // }
+ auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
+ Func("foo", {param}, ty.void_(), {});
+ Func("main", {}, ty.void_(),
+ {
+ Decl(Let("z", ty.i32(), Expr(1_i))),
+ CallStmt(Call("foo", AddressOf(Expr(Source{{12, 34}}, "z")))),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
}
TEST_F(ResolverCallValidationTest, PointerArgument_NotIdentExprVar) {
- // struct S { m: i32; };
- // fn foo(p: ptr<function, i32>) {}
- // fn main() {
- // var v: S;
- // foo(&v.m);
- // }
- auto* S = Structure("S", {Member("m", ty.i32())});
- auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
- Func("foo", {param}, ty.void_(), {});
- Func("main", {}, ty.void_(),
- {
- Decl(Var("v", ty.Of(S))),
- CallStmt(Call(
- "foo", AddressOf(Source{{12, 34}}, MemberAccessor("v", "m")))),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected an address-of expression of a variable "
- "identifier expression or a function parameter");
+ // struct S { m: i32; };
+ // fn foo(p: ptr<function, i32>) {}
+ // fn main() {
+ // var v: S;
+ // foo(&v.m);
+ // }
+ auto* S = Structure("S", {Member("m", ty.i32())});
+ auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
+ Func("foo", {param}, ty.void_(), {});
+ Func("main", {}, ty.void_(),
+ {
+ Decl(Var("v", ty.Of(S))),
+ CallStmt(Call("foo", AddressOf(Source{{12, 34}}, MemberAccessor("v", "m")))),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: expected an address-of expression of a variable "
+ "identifier expression or a function parameter");
}
TEST_F(ResolverCallValidationTest, PointerArgument_AddressOfMemberAccessor) {
- // struct S { m: i32; };
- // fn foo(p: ptr<function, i32>) {}
- // fn main() {
- // let v: S = S();
- // foo(&v.m);
- // }
- auto* S = Structure("S", {Member("m", ty.i32())});
- auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
- Func("foo", {param}, ty.void_(), {});
- Func("main", {}, ty.void_(),
- {
- Decl(Const("v", ty.Of(S), Construct(ty.Of(S)))),
- CallStmt(Call("foo", AddressOf(Expr(Source{{12, 34}},
- MemberAccessor("v", "m"))))),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
+ // struct S { m: i32; };
+ // fn foo(p: ptr<function, i32>) {}
+ // fn main() {
+ // let v: S = S();
+ // foo(&v.m);
+ // }
+ auto* S = Structure("S", {Member("m", ty.i32())});
+ auto* param = Param("p", ty.pointer<i32>(ast::StorageClass::kFunction));
+ Func("foo", {param}, ty.void_(), {});
+ Func("main", {}, ty.void_(),
+ {
+ Decl(Let("v", ty.Of(S), Construct(ty.Of(S)))),
+ CallStmt(Call("foo", AddressOf(MemberAccessor(Source{{12, 34}}, "v", "m")))),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
}
TEST_F(ResolverCallValidationTest, PointerArgument_FunctionParam) {
- // fn foo(p: ptr<function, i32>) {}
- // fn bar(p: ptr<function, i32>) {
- // foo(p);
- // }
- Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))},
- ty.void_(), {});
- Func("bar", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))},
- ty.void_(), ast::StatementList{CallStmt(Call("foo", Expr("p")))});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // fn foo(p: ptr<function, i32>) {}
+ // fn bar(p: ptr<function, i32>) {
+ // foo(p);
+ // }
+ Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))}, ty.void_(), {});
+ Func("bar", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))}, ty.void_(),
+ ast::StatementList{CallStmt(Call("foo", Expr("p")))});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCallValidationTest, PointerArgument_FunctionParamWithMain) {
- // fn foo(p: ptr<function, i32>) {}
- // fn bar(p: ptr<function, i32>) {
- // foo(p);
- // }
- // @stage(fragment)
- // fn main() {
- // var v: i32;
- // bar(&v);
- // }
- Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))},
- ty.void_(), {});
- Func("bar", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))},
- ty.void_(), ast::StatementList{CallStmt(Call("foo", Expr("p")))});
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(Var("v", ty.i32(), Expr(1))),
- CallStmt(Call("foo", AddressOf(Expr("v")))),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // fn foo(p: ptr<function, i32>) {}
+ // fn bar(p: ptr<function, i32>) {
+ // foo(p);
+ // }
+ // @fragment
+ // fn main() {
+ // var v: i32;
+ // bar(&v);
+ // }
+ Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))}, ty.void_(), {});
+ Func("bar", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))}, ty.void_(),
+ ast::StatementList{CallStmt(Call("foo", Expr("p")))});
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Var("v", ty.i32(), Expr(1_i))),
+ CallStmt(Call("foo", AddressOf(Expr("v")))),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCallValidationTest, LetPointer) {
- // fn x(p : ptr<function, i32>) -> i32 {}
- // @stage(fragment)
- // fn main() {
- // var v: i32;
- // let p: ptr<function, i32> = &v;
- // var c: i32 = x(p);
- // }
- Func("x", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))},
- ty.void_(), {});
- auto* v = Var("v", ty.i32());
- auto* p = Const("p", ty.pointer(ty.i32(), ast::StorageClass::kFunction),
- AddressOf(v));
- auto* c = Var("c", ty.i32(), ast::StorageClass::kNone,
- Call("x", Expr(Source{{12, 34}}, p)));
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(v),
- Decl(p),
- Decl(c),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected an address-of expression of a variable "
- "identifier expression or a function parameter");
+ // fn x(p : ptr<function, i32>) -> i32 {}
+ // @fragment
+ // fn main() {
+ // var v: i32;
+ // let p: ptr<function, i32> = &v;
+ // var c: i32 = x(p);
+ // }
+ Func("x", {Param("p", ty.pointer<i32>(ast::StorageClass::kFunction))}, ty.void_(), {});
+ auto* v = Var("v", ty.i32());
+ auto* p = Let("p", ty.pointer(ty.i32(), ast::StorageClass::kFunction), AddressOf(v));
+ auto* c = Var("c", ty.i32(), ast::StorageClass::kNone, Call("x", Expr(Source{{12, 34}}, p)));
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(v),
+ Decl(p),
+ Decl(c),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: expected an address-of expression of a variable "
+ "identifier expression or a function parameter");
}
TEST_F(ResolverCallValidationTest, LetPointerPrivate) {
- // let p: ptr<private, i32> = &v;
- // fn foo(p : ptr<private, i32>) -> i32 {}
- // var v: i32;
- // @stage(fragment)
- // fn main() {
- // var c: i32 = foo(p);
- // }
- Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kPrivate))},
- ty.void_(), {});
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* p = Const("p", ty.pointer(ty.i32(), ast::StorageClass::kPrivate),
- AddressOf(v));
- auto* c = Var("c", ty.i32(), ast::StorageClass::kNone,
- Call("foo", Expr(Source{{12, 34}}, p)));
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(p),
- Decl(c),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected an address-of expression of a variable "
- "identifier expression or a function parameter");
+ // let p: ptr<private, i32> = &v;
+ // fn foo(p : ptr<private, i32>) -> i32 {}
+ // var v: i32;
+ // @fragment
+ // fn main() {
+ // var c: i32 = foo(p);
+ // }
+ Func("foo", {Param("p", ty.pointer<i32>(ast::StorageClass::kPrivate))}, ty.void_(), {});
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* p = Let("p", ty.pointer(ty.i32(), ast::StorageClass::kPrivate), AddressOf(v));
+ auto* c = Var("c", ty.i32(), ast::StorageClass::kNone, Call("foo", Expr(Source{{12, 34}}, p)));
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(p),
+ Decl(c),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: expected an address-of expression of a variable "
+ "identifier expression or a function parameter");
}
TEST_F(ResolverCallValidationTest, CallVariable) {
- // var v : i32;
- // fn f() {
- // v();
- // }
- Global("v", ty.i32(), ast::StorageClass::kPrivate);
- Func("f", {}, ty.void_(), {CallStmt(Call(Source{{12, 34}}, "v"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: cannot call variable 'v'
+ // var v : i32;
+ // fn f() {
+ // v();
+ // }
+ Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ Func("f", {}, ty.void_(), {CallStmt(Call(Source{{12, 34}}, "v"))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(error: cannot call variable 'v'
note: 'v' declared here)");
}
TEST_F(ResolverCallValidationTest, CallVariableShadowsFunction) {
- // fn x() {}
- // fn f() {
- // var x : i32;
- // x();
- // }
- Func("x", {}, ty.void_(), {});
- Func("f", {}, ty.void_(),
- {
- Decl(Var(Source{{56, 78}}, "x", ty.i32())),
- CallStmt(Call(Source{{12, 34}}, "x")),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(error: cannot call variable 'x'
+ // fn x() {}
+ // fn f() {
+ // var x : i32;
+ // x();
+ // }
+ Func("x", {}, ty.void_(), {});
+ Func("f", {}, ty.void_(),
+ {
+ Decl(Var(Source{{56, 78}}, "x", ty.i32())),
+ CallStmt(Call(Source{{12, 34}}, "x")),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(error: cannot call variable 'x'
56:78 note: 'x' declared here)");
}
diff --git a/chromium/third_party/dawn/src/tint/resolver/compound_assignment_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/compound_assignment_validation_test.cc
index 740643c3df7..1ae70406b45 100644
--- a/chromium/third_party/dawn/src/tint/resolver/compound_assignment_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/compound_assignment_validation_test.cc
@@ -16,7 +16,11 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
+
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::resolver {
namespace {
@@ -24,274 +28,249 @@ namespace {
using ResolverCompoundAssignmentValidationTest = ResolverTest;
TEST_F(ResolverCompoundAssignmentValidationTest, CompatibleTypes) {
- // var a : i32 = 2;
- // a += 2
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var,
- CompoundAssign(Source{{12, 34}}, "a", 2, ast::BinaryOp::kAdd));
+ // var a : i32 = 2;
+ // a += 2
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, CompoundAssign(Source{{12, 34}}, "a", 2_i, ast::BinaryOp::kAdd));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCompoundAssignmentValidationTest, CompatibleTypesThroughAlias) {
- // alias myint = i32;
- // var a : myint = 2;
- // a += 2
- auto* myint = Alias("myint", ty.i32());
- auto* var = Var("a", ty.Of(myint), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var,
- CompoundAssign(Source{{12, 34}}, "a", 2, ast::BinaryOp::kAdd));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // alias myint = i32;
+ // var a : myint = 2;
+ // a += 2
+ auto* myint = Alias("myint", ty.i32());
+ auto* var = Var("a", ty.Of(myint), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, CompoundAssign(Source{{12, 34}}, "a", 2_i, ast::BinaryOp::kAdd));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverCompoundAssignmentValidationTest,
- CompatibleTypesAssignThroughPointer) {
- // var a : i32;
- // let b : ptr<function,i32> = &a;
- // *b += 2;
- const auto func = ast::StorageClass::kFunction;
- auto* var_a = Var("a", ty.i32(), func, Expr(2));
- auto* var_b = Const("b", ty.pointer<int>(func), AddressOf(Expr("a")));
- WrapInFunction(
- var_a, var_b,
- CompoundAssign(Source{{12, 34}}, Deref("b"), 2, ast::BinaryOp::kAdd));
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverCompoundAssignmentValidationTest, CompatibleTypesAssignThroughPointer) {
+ // var a : i32;
+ // let b : ptr<function,i32> = &a;
+ // *b += 2;
+ const auto func = ast::StorageClass::kFunction;
+ auto* var_a = Var("a", ty.i32(), func, Expr(2_i));
+ auto* var_b = Let("b", ty.pointer<i32>(func), AddressOf(Expr("a")));
+ WrapInFunction(var_a, var_b,
+ CompoundAssign(Source{{12, 34}}, Deref("b"), 2_i, ast::BinaryOp::kAdd));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCompoundAssignmentValidationTest, IncompatibleTypes) {
- // {
- // var a : i32 = 2;
- // a += 2.3;
- // }
+ // {
+ // var a : i32 = 2;
+ // a += 2.3;
+ // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
- auto* assign =
- CompoundAssign(Source{{12, 34}}, "a", 2.3f, ast::BinaryOp::kAdd);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", 2.3_f, ast::BinaryOp::kAdd);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: compound assignment operand types are invalid: i32 "
- "add f32");
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching overload for operator += (i32, f32)"));
}
TEST_F(ResolverCompoundAssignmentValidationTest, IncompatibleOp) {
- // {
- // var a : f32 = 1.0;
- // a |= 2.0;
- // }
+ // {
+ // var a : f32 = 1.0;
+ // a |= 2.0;
+ // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(1.f));
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(1_f));
- auto* assign =
- CompoundAssign(Source{{12, 34}}, "a", 2.0f, ast::BinaryOp::kOr);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", 2_f, ast::BinaryOp::kOr);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: compound assignment operand types are invalid: f32 or f32");
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching overload for operator |= (f32, f32)"));
}
TEST_F(ResolverCompoundAssignmentValidationTest, VectorScalar_Pass) {
- // {
- // var a : vec4<f32>;
- // a += 1.0;
- // }
+ // {
+ // var a : vec4<f32>;
+ // a += 1.0;
+ // }
- auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
- auto* assign =
- CompoundAssign(Source{{12, 34}}, "a", 1.f, ast::BinaryOp::kAdd);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", 1_f, ast::BinaryOp::kAdd);
+ WrapInFunction(var, assign);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCompoundAssignmentValidationTest, ScalarVector_Fail) {
- // {
- // var a : f32;
- // a += vec4<f32>();
- // }
+ // {
+ // var a : f32;
+ // a += vec4<f32>();
+ // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
- auto* assign =
- CompoundAssign(Source{{12, 34}}, "a", vec4<f32>(), ast::BinaryOp::kAdd);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", vec4<f32>(), ast::BinaryOp::kAdd);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'vec4<f32>' to 'f32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'vec4<f32>' to 'f32'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, MatrixScalar_Pass) {
- // {
- // var a : mat4x4<f32>;
- // a *= 2.0;
- // }
+ // {
+ // var a : mat4x4<f32>;
+ // a *= 2.0;
+ // }
- auto* var = Var("a", ty.mat4x4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.mat4x4<f32>(), ast::StorageClass::kNone);
- auto* assign =
- CompoundAssign(Source{{12, 34}}, "a", 2.f, ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", 2_f, ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCompoundAssignmentValidationTest, ScalarMatrix_Fail) {
- // {
- // var a : f32;
- // a *= mat4x4();
- // }
+ // {
+ // var a : f32;
+ // a *= mat4x4();
+ // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
- auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x4<f32>(),
- ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x4<f32>(), ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'mat4x4<f32>' to 'f32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'mat4x4<f32>' to 'f32'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, VectorMatrix_Pass) {
- // {
- // var a : vec4<f32>;
- // a *= mat4x4();
- // }
+ // {
+ // var a : vec4<f32>;
+ // a *= mat4x4();
+ // }
- auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
- auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x4<f32>(),
- ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x4<f32>(), ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverCompoundAssignmentValidationTest, VectorMatrix_ColumnMismatch) {
- // {
- // var a : vec4<f32>;
- // a *= mat4x2();
- // }
+ // {
+ // var a : vec4<f32>;
+ // a *= mat4x2();
+ // }
- auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
- auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x2<f32>(),
- ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat4x2<f32>(), ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: compound assignment operand types are invalid: "
- "vec4<f32> multiply mat4x2<f32>");
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching overload for operator *= (vec4<f32>, mat4x2<f32>)"));
}
TEST_F(ResolverCompoundAssignmentValidationTest, VectorMatrix_ResultMismatch) {
- // {
- // var a : vec4<f32>;
- // a *= mat2x4();
- // }
+ // {
+ // var a : vec4<f32>;
+ // a *= mat2x4();
+ // }
- auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.vec4<f32>(), ast::StorageClass::kNone);
- auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat2x4<f32>(),
- ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", mat2x4<f32>(), ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign 'vec2<f32>' to 'vec4<f32>'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'vec2<f32>' to 'vec4<f32>'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, MatrixVector_Fail) {
- // {
- // var a : mat4x4<f32>;
- // a *= vec4();
- // }
+ // {
+ // var a : mat4x4<f32>;
+ // a *= vec4();
+ // }
- auto* var = Var("a", ty.mat4x4<f32>(), ast::StorageClass::kNone);
+ auto* var = Var("a", ty.mat4x4<f32>(), ast::StorageClass::kNone);
- auto* assign = CompoundAssign(Source{{12, 34}}, "a", vec4<f32>(),
- ast::BinaryOp::kMultiply);
- WrapInFunction(var, assign);
+ auto* assign = CompoundAssign(Source{{12, 34}}, "a", vec4<f32>(), ast::BinaryOp::kMultiply);
+ WrapInFunction(var, assign);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign 'vec4<f32>' to 'mat4x4<f32>'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot assign 'vec4<f32>' to 'mat4x4<f32>'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, Phony) {
- // {
- // _ += 1;
- // }
- WrapInFunction(
- CompoundAssign(Source{{56, 78}}, Phony(), 1, ast::BinaryOp::kAdd));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: compound assignment operand types are invalid: void "
- "add i32");
+ // {
+ // _ += 1i;
+ // }
+ WrapInFunction(CompoundAssign(Source{{56, 78}}, Phony(), 1_i, ast::BinaryOp::kAdd));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("56:78 error: no matching overload for operator += (void, i32)"));
}
TEST_F(ResolverCompoundAssignmentValidationTest, ReadOnlyBuffer) {
- // @group(0) @binding(0) var<storage,read> a : i32;
- // {
- // a += 1;
- // }
- Global(Source{{12, 34}}, "a", ty.i32(), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
- WrapInFunction(CompoundAssign(Source{{56, 78}}, "a", 1, ast::BinaryOp::kAdd));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: cannot store into a read-only type 'ref<storage, "
- "i32, read>'");
+ // @group(0) @binding(0) var<storage,read> a : i32;
+ // {
+ // a += 1i;
+ // }
+ Global(Source{{12, 34}}, "a", ty.i32(), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+ WrapInFunction(CompoundAssign(Source{{56, 78}}, "a", 1_i, ast::BinaryOp::kAdd));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: cannot store into a read-only type 'ref<storage, i32, read>'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, LhsConstant) {
- // let a = 1;
- // a += 1;
- auto* a = Const(Source{{12, 34}}, "a", nullptr, Expr(1));
- WrapInFunction(
- a, CompoundAssign(Expr(Source{{56, 78}}, "a"), 1, ast::BinaryOp::kAdd));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: cannot assign to const
+ // let a = 1i;
+ // a += 1i;
+ auto* a = Let(Source{{12, 34}}, "a", nullptr, Expr(1_i));
+ WrapInFunction(a, CompoundAssign(Expr(Source{{56, 78}}, "a"), 1_i, ast::BinaryOp::kAdd));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(56:78 error: cannot assign to const
12:34 note: 'a' is declared here:)");
}
TEST_F(ResolverCompoundAssignmentValidationTest, LhsLiteral) {
- // 1 += 1;
- WrapInFunction(
- CompoundAssign(Expr(Source{{56, 78}}, 1), 1, ast::BinaryOp::kAdd));
+ // 1i += 1i;
+ WrapInFunction(CompoundAssign(Expr(Source{{56, 78}}, 1_i), 1_i, ast::BinaryOp::kAdd));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "56:78 error: cannot assign to value of type 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: cannot assign to value of type 'i32'");
}
TEST_F(ResolverCompoundAssignmentValidationTest, LhsAtomic) {
- // var<workgroup> a : atomic<i32>;
- // a += a;
- Global(Source{{12, 34}}, "a", ty.atomic(ty.i32()),
- ast::StorageClass::kWorkgroup);
- WrapInFunction(
- CompoundAssign(Source{{56, 78}}, "a", "a", ast::BinaryOp::kAdd));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: compound assignment operand types are invalid: "
- "atomic<i32> add atomic<i32>");
+ // var<workgroup> a : atomic<i32>;
+ // a += a;
+ Global(Source{{12, 34}}, "a", ty.atomic(ty.i32()), ast::StorageClass::kWorkgroup);
+ WrapInFunction(CompoundAssign(Source{{56, 78}}, "a", "a", ast::BinaryOp::kAdd));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("error: no matching overload for operator += (atomic<i32>, atomic<i32>)"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/compound_statement_test.cc b/chromium/third_party/dawn/src/tint/resolver/compound_statement_test.cc
index 29c0384b338..0444bd33798 100644
--- a/chromium/third_party/dawn/src/tint/resolver/compound_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/compound_statement_test.cc
@@ -22,356 +22,376 @@
#include "src/tint/sem/loop_statement.h"
#include "src/tint/sem/switch_statement.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverCompoundStatementTest = ResolverTest;
TEST_F(ResolverCompoundStatementTest, FunctionBlock) {
- // fn F() {
- // var x : 32;
- // }
- auto* stmt = Decl(Var("x", ty.i32()));
- auto* f = Func("F", {}, ty.void_(), {stmt});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* s = Sem().Get(stmt);
- ASSERT_NE(s, nullptr);
- ASSERT_NE(s->Block(), nullptr);
- ASSERT_TRUE(s->Block()->Is<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Block(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Function()->Declaration(), f);
- EXPECT_EQ(s->Block()->Parent(), nullptr);
-}
+ // fn F() {
+ // var x : 32;
+ // }
+ auto* stmt = Decl(Var("x", ty.i32()));
+ auto* f = Func("F", {}, ty.void_(), {stmt});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
-TEST_F(ResolverCompoundStatementTest, Block) {
- // fn F() {
- // {
- // var x : 32;
- // }
- // }
- auto* stmt = Decl(Var("x", ty.i32()));
- auto* block = Block(stmt);
- auto* f = Func("F", {}, ty.void_(), {block});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- {
- auto* s = Sem().Get(block);
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::BlockStatement>());
- EXPECT_EQ(s, s->Block());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- }
- {
auto* s = Sem().Get(stmt);
ASSERT_NE(s, nullptr);
ASSERT_NE(s->Block(), nullptr);
+ ASSERT_TRUE(s->Block()->Is<sem::FunctionBlockStatement>());
EXPECT_EQ(s->Block(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Block()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- ASSERT_TRUE(s->Block()->Parent()->Is<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
EXPECT_EQ(s->Function()->Declaration(), f);
- EXPECT_EQ(s->Block()->Parent()->Parent(), nullptr);
- }
+ EXPECT_EQ(s->Block()->Parent(), nullptr);
}
-TEST_F(ResolverCompoundStatementTest, Loop) {
- // fn F() {
- // loop {
- // break;
- // continuing {
- // stmt;
- // }
- // }
- // }
- auto* brk = Break();
- auto* stmt = Ignore(1);
- auto* loop = Loop(Block(brk), Block(stmt));
- auto* f = Func("F", {}, ty.void_(), {loop});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- {
- auto* s = Sem().Get(loop);
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::LoopStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* s = Sem().Get(brk);
- ASSERT_NE(s, nullptr);
- ASSERT_NE(s->Block(), nullptr);
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::LoopBlockStatement>());
-
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::LoopStatement>());
- EXPECT_TRUE(Is<sem::LoopStatement>(s->Parent()->Parent()));
-
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_TRUE(
- Is<sem::FunctionBlockStatement>(s->Parent()->Parent()->Parent()));
-
- EXPECT_EQ(s->Function()->Declaration(), f);
-
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(), nullptr);
- }
- {
- auto* s = Sem().Get(stmt);
- ASSERT_NE(s, nullptr);
- ASSERT_NE(s->Block(), nullptr);
- EXPECT_EQ(s->Parent(), s->Block());
-
- EXPECT_EQ(s->Parent(),
- s->FindFirstParent<sem::LoopContinuingBlockStatement>());
- EXPECT_TRUE(Is<sem::LoopContinuingBlockStatement>(s->Parent()));
-
- EXPECT_EQ(s->Parent()->Parent(),
- s->FindFirstParent<sem::LoopBlockStatement>());
- EXPECT_TRUE(Is<sem::LoopBlockStatement>(s->Parent()->Parent()));
-
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::LoopStatement>());
- EXPECT_TRUE(Is<sem::LoopStatement>(s->Parent()->Parent()->Parent()));
+TEST_F(ResolverCompoundStatementTest, Block) {
+ // fn F() {
+ // {
+ // var x : 32;
+ // }
+ // }
+ auto* stmt = Decl(Var("x", ty.i32()));
+ auto* block = Block(stmt);
+ auto* f = Func("F", {}, ty.void_(), {block});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(block);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::BlockStatement>());
+ EXPECT_EQ(s, s->Block());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
+ {
+ auto* s = Sem().Get(stmt);
+ ASSERT_NE(s, nullptr);
+ ASSERT_NE(s->Block(), nullptr);
+ EXPECT_EQ(s->Block(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Block()->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ ASSERT_TRUE(s->Block()->Parent()->Is<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Function()->Declaration(), f);
+ EXPECT_EQ(s->Block()->Parent()->Parent(), nullptr);
+ }
+}
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_TRUE(Is<sem::FunctionBlockStatement>(
- s->Parent()->Parent()->Parent()->Parent()));
- EXPECT_EQ(s->Function()->Declaration(), f);
+TEST_F(ResolverCompoundStatementTest, Loop) {
+ // fn F() {
+ // loop {
+ // break;
+ // continuing {
+ // stmt;
+ // }
+ // }
+ // }
+ auto* brk = Break();
+ auto* stmt = Ignore(1_i);
+ auto* loop = Loop(Block(brk), Block(stmt));
+ auto* f = Func("F", {}, ty.void_(), {loop});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(loop);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::LoopStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(brk);
+ ASSERT_NE(s, nullptr);
+ ASSERT_NE(s->Block(), nullptr);
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::LoopBlockStatement>());
+
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::LoopStatement>());
+ EXPECT_TRUE(Is<sem::LoopStatement>(s->Parent()->Parent()));
+
+ EXPECT_EQ(s->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()->Parent()));
+
+ EXPECT_EQ(s->Function()->Declaration(), f);
+
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(), nullptr);
+ }
+ {
+ auto* s = Sem().Get(stmt);
+ ASSERT_NE(s, nullptr);
+ ASSERT_NE(s->Block(), nullptr);
+ EXPECT_EQ(s->Parent(), s->Block());
+
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::LoopContinuingBlockStatement>());
+ EXPECT_TRUE(Is<sem::LoopContinuingBlockStatement>(s->Parent()));
+
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::LoopBlockStatement>());
+ EXPECT_TRUE(Is<sem::LoopBlockStatement>(s->Parent()->Parent()));
+
+ EXPECT_EQ(s->Parent()->Parent()->Parent(), s->FindFirstParent<sem::LoopStatement>());
+ EXPECT_TRUE(Is<sem::LoopStatement>(s->Parent()->Parent()->Parent()));
+
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()->Parent()->Parent()));
+ EXPECT_EQ(s->Function()->Declaration(), f);
+
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent()->Parent(), nullptr);
+ }
+}
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent()->Parent(), nullptr);
- }
+TEST_F(ResolverCompoundStatementTest, Loop_EmptyContinuing) {
+ // fn F() {
+ // loop {
+ // break;
+ // continuing {
+ // }
+ // }
+ // }
+ auto* brk = Break();
+ auto* loop = Loop(Block(brk), Block());
+ Func("F", {}, ty.void_(), {loop});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(loop);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::LoopStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(loop->continuing);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(Is<sem::LoopContinuingBlockStatement>(s));
+ EXPECT_TRUE(Is<sem::LoopStatement>(s->Parent()->Parent()));
+ }
}
TEST_F(ResolverCompoundStatementTest, ForLoop) {
- // fn F() {
- // for (var i : u32; true; i = i + 1u) {
- // return;
- // }
- // }
- auto* init = Decl(Var("i", ty.u32()));
- auto* cond = Expr(true);
- auto* cont = Assign("i", Add("i", 1u));
- auto* stmt = Return();
- auto* body = Block(stmt);
- auto* for_ = For(init, cond, cont, body);
- auto* f = Func("F", {}, ty.void_(), {for_});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- {
- auto* s = Sem().Get(for_);
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::ForLoopStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* s = Sem().Get(init);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::ForLoopStatement>());
- EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()));
- EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()));
- }
- { // Condition expression's statement is the for-loop itself
- auto* e = Sem().Get(cond);
- ASSERT_NE(e, nullptr);
- auto* s = e->Stmt();
- ASSERT_NE(s, nullptr);
- ASSERT_TRUE(Is<sem::ForLoopStatement>(s));
- ASSERT_NE(s->Parent(), nullptr);
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Block()));
- }
- {
- auto* s = Sem().Get(cont);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::ForLoopStatement>());
- EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()));
- EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()));
- }
- {
- auto* s = Sem().Get(stmt);
- ASSERT_NE(s, nullptr);
- ASSERT_NE(s->Block(), nullptr);
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Block(), s->FindFirstParent<sem::LoopBlockStatement>());
- EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()->Parent()));
- EXPECT_EQ(s->Block()->Parent(),
- s->FindFirstParent<sem::ForLoopStatement>());
- ASSERT_TRUE(
- Is<sem::FunctionBlockStatement>(s->Block()->Parent()->Parent()));
- EXPECT_EQ(s->Block()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Function()->Declaration(), f);
- EXPECT_EQ(s->Block()->Parent()->Parent()->Parent(), nullptr);
- }
+ // fn F() {
+ // for (var i : u32; true; i = i + 1u) {
+ // return;
+ // }
+ // }
+ auto* init = Decl(Var("i", ty.u32()));
+ auto* cond = Expr(true);
+ auto* cont = Assign("i", Add("i", 1_u));
+ auto* stmt = Return();
+ auto* body = Block(stmt);
+ auto* for_ = For(init, cond, cont, body);
+ auto* f = Func("F", {}, ty.void_(), {for_});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(for_);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::ForLoopStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(init);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::ForLoopStatement>());
+ EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()));
+ EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()));
+ }
+ { // Condition expression's statement is the for-loop itself
+ auto* e = Sem().Get(cond);
+ ASSERT_NE(e, nullptr);
+ auto* s = e->Stmt();
+ ASSERT_NE(s, nullptr);
+ ASSERT_TRUE(Is<sem::ForLoopStatement>(s));
+ ASSERT_NE(s->Parent(), nullptr);
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Block()));
+ }
+ {
+ auto* s = Sem().Get(cont);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::ForLoopStatement>());
+ EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()));
+ EXPECT_EQ(s->Block(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_TRUE(Is<sem::FunctionBlockStatement>(s->Parent()->Parent()));
+ }
+ {
+ auto* s = Sem().Get(stmt);
+ ASSERT_NE(s, nullptr);
+ ASSERT_NE(s->Block(), nullptr);
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Block(), s->FindFirstParent<sem::LoopBlockStatement>());
+ EXPECT_TRUE(Is<sem::ForLoopStatement>(s->Parent()->Parent()));
+ EXPECT_EQ(s->Block()->Parent(), s->FindFirstParent<sem::ForLoopStatement>());
+ ASSERT_TRUE(Is<sem::FunctionBlockStatement>(s->Block()->Parent()->Parent()));
+ EXPECT_EQ(s->Block()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Function()->Declaration(), f);
+ EXPECT_EQ(s->Block()->Parent()->Parent()->Parent(), nullptr);
+ }
}
TEST_F(ResolverCompoundStatementTest, If) {
- // fn F() {
- // if (cond_a) {
- // stat_a;
- // } else if (cond_b) {
- // stat_b;
- // } else {
- // stat_c;
- // }
- // }
-
- auto* cond_a = Expr(true);
- auto* stmt_a = Ignore(1);
- auto* cond_b = Expr(true);
- auto* stmt_b = Ignore(1);
- auto* stmt_c = Ignore(1);
- auto* if_stmt = If(cond_a, Block(stmt_a), Else(cond_b, Block(stmt_b)),
- Else(nullptr, Block(stmt_c)));
- WrapInFunction(if_stmt);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- {
- auto* s = Sem().Get(if_stmt);
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::IfStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* e = Sem().Get(cond_a);
- ASSERT_NE(e, nullptr);
- auto* s = e->Stmt();
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::IfStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* s = Sem().Get(stmt_a);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::IfStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
- {
- auto* e = Sem().Get(cond_b);
- ASSERT_NE(e, nullptr);
- auto* s = e->Stmt();
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::ElseStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::IfStatement>());
- EXPECT_EQ(s->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent()->Parent(), s->Block());
- }
- {
- auto* s = Sem().Get(stmt_b);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::ElseStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::IfStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
- {
- auto* s = Sem().Get(stmt_c);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::ElseStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::IfStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
+ // fn F() {
+ // if (cond_a) {
+ // stat_a;
+ // } else if (cond_b) {
+ // stat_b;
+ // } else {
+ // stat_c;
+ // }
+ // }
+
+ auto* cond_a = Expr(true);
+ auto* stmt_a = Ignore(1_i);
+ auto* cond_b = Expr(true);
+ auto* stmt_b = Ignore(1_i);
+ auto* stmt_c = Ignore(1_i);
+ auto* if_stmt = If(cond_a, Block(stmt_a), Else(If(cond_b, Block(stmt_b), Else(Block(stmt_c)))));
+ WrapInFunction(if_stmt);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(if_stmt);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::IfStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* e = Sem().Get(cond_a);
+ ASSERT_NE(e, nullptr);
+ auto* s = e->Stmt();
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::IfStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(stmt_a);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::IfStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
+ {
+ auto* e = Sem().Get(cond_b);
+ ASSERT_NE(e, nullptr);
+ auto* s = e->Stmt();
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::IfStatement>());
+ EXPECT_EQ(s->Parent(), s->Parent()->FindFirstParent<sem::IfStatement>());
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent()->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(stmt_b);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ auto* elseif = s->FindFirstParent<sem::IfStatement>();
+ EXPECT_EQ(s->Parent()->Parent(), elseif);
+ EXPECT_EQ(s->Parent()->Parent()->Parent(),
+ elseif->Parent()->FindFirstParent<sem::IfStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
+ {
+ auto* s = Sem().Get(stmt_c);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ auto* elseif = s->FindFirstParent<sem::IfStatement>();
+ EXPECT_EQ(s->Parent()->Parent(), elseif);
+ EXPECT_EQ(s->Parent()->Parent()->Parent(),
+ elseif->Parent()->FindFirstParent<sem::IfStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
}
TEST_F(ResolverCompoundStatementTest, Switch) {
- // fn F() {
- // switch (expr) {
- // case 1: {
- // stmt_a;
- // }
- // case 2: {
- // stmt_b;
- // }
- // default: {
- // stmt_c;
- // }
- // }
- // }
-
- auto* expr = Expr(5);
- auto* stmt_a = Ignore(1);
- auto* stmt_b = Ignore(1);
- auto* stmt_c = Ignore(1);
- auto* swi = Switch(expr, Case(Expr(1), Block(stmt_a)),
- Case(Expr(2), Block(stmt_b)), DefaultCase(Block(stmt_c)));
- WrapInFunction(swi);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- {
- auto* s = Sem().Get(swi);
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::SwitchStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* e = Sem().Get(expr);
- ASSERT_NE(e, nullptr);
- auto* s = e->Stmt();
- ASSERT_NE(s, nullptr);
- EXPECT_TRUE(s->Is<sem::SwitchStatement>());
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- }
- {
- auto* s = Sem().Get(stmt_a);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::SwitchStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
- {
- auto* s = Sem().Get(stmt_b);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::SwitchStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
- {
- auto* s = Sem().Get(stmt_c);
- ASSERT_NE(s, nullptr);
- EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
- EXPECT_EQ(s->Parent(), s->Block());
- EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::SwitchStatement>());
- EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
- s->FindFirstParent<sem::FunctionBlockStatement>());
- }
+ // fn F() {
+ // switch (expr) {
+ // case 1i: {
+ // stmt_a;
+ // }
+ // case 2i: {
+ // stmt_b;
+ // }
+ // default: {
+ // stmt_c;
+ // }
+ // }
+ // }
+
+ auto* expr = Expr(5_i);
+ auto* stmt_a = Ignore(1_i);
+ auto* stmt_b = Ignore(1_i);
+ auto* stmt_c = Ignore(1_i);
+ auto* swi = Switch(expr, Case(Expr(1_i), Block(stmt_a)), Case(Expr(2_i), Block(stmt_b)),
+ DefaultCase(Block(stmt_c)));
+ WrapInFunction(swi);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ {
+ auto* s = Sem().Get(swi);
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::SwitchStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* e = Sem().Get(expr);
+ ASSERT_NE(e, nullptr);
+ auto* s = e->Stmt();
+ ASSERT_NE(s, nullptr);
+ EXPECT_TRUE(s->Is<sem::SwitchStatement>());
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::FunctionBlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ }
+ {
+ auto* s = Sem().Get(stmt_a);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent(), s->FindFirstParent<sem::SwitchStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
+ {
+ auto* s = Sem().Get(stmt_b);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent(), s->FindFirstParent<sem::SwitchStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
+ {
+ auto* s = Sem().Get(stmt_c);
+ ASSERT_NE(s, nullptr);
+ EXPECT_EQ(s->Parent(), s->FindFirstParent<sem::BlockStatement>());
+ EXPECT_EQ(s->Parent(), s->Block());
+ EXPECT_EQ(s->Parent()->Parent(), s->FindFirstParent<sem::CaseStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent(), s->FindFirstParent<sem::SwitchStatement>());
+ EXPECT_EQ(s->Parent()->Parent()->Parent()->Parent(),
+ s->FindFirstParent<sem::FunctionBlockStatement>());
+ }
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/const_eval.cc b/chromium/third_party/dawn/src/tint/resolver/const_eval.cc
new file mode 100644
index 00000000000..fac2c7db78d
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/const_eval.cc
@@ -0,0 +1,19 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/const_eval.h"
+
+#include "src/tint/sem/constant.h"
+
+namespace tint::resolver::const_eval {} // namespace tint::resolver::const_eval
diff --git a/chromium/third_party/dawn/src/tint/resolver/const_eval.h b/chromium/third_party/dawn/src/tint/resolver/const_eval.h
new file mode 100644
index 00000000000..89bb3da8de4
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/const_eval.h
@@ -0,0 +1,37 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_RESOLVER_CONST_EVAL_H_
+#define SRC_TINT_RESOLVER_CONST_EVAL_H_
+
+#include <stddef.h>
+
+// Forward declarations
+namespace tint {
+class ProgramBuilder;
+} // namespace tint
+
+// Forward declarations
+namespace tint::sem {
+class Constant;
+} // namespace tint::sem
+
+namespace tint::resolver::const_eval {
+
+/// Typedef for a constant evaluation function
+using Function = sem::Constant(ProgramBuilder& builder, const sem::Constant* args, size_t num_args);
+
+} // namespace tint::resolver::const_eval
+
+#endif // SRC_TINT_RESOLVER_CONST_EVAL_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/control_block_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/control_block_validation_test.cc
index fdb2059a87d..a5ba7ca0fb9 100644
--- a/chromium/third_party/dawn/src/tint/resolver/control_block_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/control_block_validation_test.cc
@@ -18,346 +18,335 @@
#include "src/tint/ast/switch_statement.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-class ResolverControlBlockValidationTest : public TestHelper,
- public testing::Test {};
+class ResolverControlBlockValidationTest : public TestHelper, public testing::Test {};
-TEST_F(ResolverControlBlockValidationTest,
- SwitchSelectorExpressionNoneIntegerType_Fail) {
- // var a : f32 = 3.14;
- // switch (a) {
- // default: {}
- // }
- auto* var = Var("a", ty.f32(), Expr(3.14f));
+TEST_F(ResolverControlBlockValidationTest, SwitchSelectorExpressionNoneIntegerType_Fail) {
+ // var a : f32 = 3.14;
+ // switch (a) {
+ // default: {}
+ // }
+ auto* var = Var("a", ty.f32(), Expr(3.14_f));
- auto* block = Block(Decl(var), Switch(Expr(Source{{12, 34}}, "a"), //
- DefaultCase()));
+ auto* block = Block(Decl(var), Switch(Expr(Source{{12, 34}}, "a"), //
+ DefaultCase()));
- WrapInFunction(block);
+ WrapInFunction(block);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: switch statement selector expression must be of a "
- "scalar integer type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: switch statement selector expression must be of a "
+ "scalar integer type");
}
TEST_F(ResolverControlBlockValidationTest, SwitchWithoutDefault_Fail) {
- // var a : i32 = 2;
- // switch (a) {
- // case 1: {}
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
+ // var a : i32 = 2;
+ // switch (a) {
+ // case 1: {}
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
- auto* block = Block(Decl(var), //
- Switch(Source{{12, 34}}, "a", //
- Case(Expr(1))));
+ auto* block = Block(Decl(var), //
+ Switch(Source{{12, 34}}, "a", //
+ Case(Expr(1_i))));
- WrapInFunction(block);
+ WrapInFunction(block);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: switch statement must have a default clause");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: switch statement must have a default clause");
}
TEST_F(ResolverControlBlockValidationTest, SwitchWithTwoDefault_Fail) {
- // var a : i32 = 2;
- // switch (a) {
- // default: {}
- // case 1: {}
- // default: {}
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
-
- auto* block = Block(Decl(var), //
- Switch("a", //
- DefaultCase(), //
- Case(Expr(1)), //
- DefaultCase(Source{{12, 34}})));
-
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: switch statement must have exactly one default clause");
+ // var a : i32 = 2;
+ // switch (a) {
+ // default: {}
+ // case 1: {}
+ // default: {}
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ DefaultCase(), //
+ Case(Expr(1_i)), //
+ DefaultCase(Source{{12, 34}})));
+
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: switch statement must have exactly one default clause");
}
TEST_F(ResolverControlBlockValidationTest, UnreachableCode_Loop_continue) {
- // loop {
- // if (false) { break; }
- // var z: i32;
- // continue;
- // z = 1;
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* cont = Continue();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction(
- Loop(Block(If(false, Block(Break())), decl_z, cont, assign_z)));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(cont)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+ // loop {
+ // if (false) { break; }
+ // var z: i32;
+ // continue;
+ // z = 1;
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* cont = Continue();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction(Loop(Block(If(false, Block(Break())), decl_z, cont, assign_z)));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(cont)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
-TEST_F(ResolverControlBlockValidationTest,
- UnreachableCode_Loop_continue_InBlocks) {
- // loop {
- // if (false) { break; }
- // var z: i32;
- // {{{continue;}}}
- // z = 1;
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* cont = Continue();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction(Loop(Block(If(false, Block(Break())), decl_z,
- Block(Block(Block(cont))), assign_z)));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(cont)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+TEST_F(ResolverControlBlockValidationTest, UnreachableCode_Loop_continue_InBlocks) {
+ // loop {
+ // if (false) { break; }
+ // var z: i32;
+ // {{{continue;}}}
+ // z = 1;
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* cont = Continue();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction(
+ Loop(Block(If(false, Block(Break())), decl_z, Block(Block(Block(cont))), assign_z)));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(cont)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
TEST_F(ResolverControlBlockValidationTest, UnreachableCode_ForLoop_continue) {
- // for (;false;) {
- // var z: i32;
- // continue;
- // z = 1;
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* cont = Continue();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction(For(nullptr, false, nullptr, //
- Block(decl_z, cont, assign_z)));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(cont)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+ // for (;false;) {
+ // var z: i32;
+ // continue;
+ // z = 1;
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* cont = Continue();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction(For(nullptr, false, nullptr, //
+ Block(decl_z, cont, assign_z)));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(cont)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
-TEST_F(ResolverControlBlockValidationTest,
- UnreachableCode_ForLoop_continue_InBlocks) {
- // for (;false;) {
- // var z: i32;
- // {{{continue;}}}
- // z = 1;
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* cont = Continue();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction(For(nullptr, false, nullptr,
- Block(decl_z, Block(Block(Block(cont))), assign_z)));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(cont)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+TEST_F(ResolverControlBlockValidationTest, UnreachableCode_ForLoop_continue_InBlocks) {
+ // for (;false;) {
+ // var z: i32;
+ // {{{continue;}}}
+ // z = 1;
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* cont = Continue();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction(
+ For(nullptr, false, nullptr, Block(decl_z, Block(Block(Block(cont))), assign_z)));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(cont)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
TEST_F(ResolverControlBlockValidationTest, UnreachableCode_break) {
- // switch (1) {
- // case 1: {
- // var z: i32;
- // break;
- // z = 1;
- // default: {}
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* brk = Break();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction( //
- Block(Switch(1, //
- Case(Expr(1), Block(decl_z, brk, assign_z)), //
- DefaultCase())));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(brk)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+ // switch (1i) {
+ // case 1i: {
+ // var z: i32;
+ // break;
+ // z = 1i;
+ // default: {}
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* brk = Break();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction( //
+ Block(Switch(1_i, //
+ Case(Expr(1_i), Block(decl_z, brk, assign_z)), //
+ DefaultCase())));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(brk)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
TEST_F(ResolverControlBlockValidationTest, UnreachableCode_break_InBlocks) {
- // loop {
- // switch (1) {
- // case 1: { {{{break;}}} var a : u32 = 2;}
- // default: {}
- // }
- // break;
- // }
- auto* decl_z = Decl(Var("z", ty.i32()));
- auto* brk = Break();
- auto* assign_z = Assign(Source{{12, 34}}, "z", 1);
- WrapInFunction(Loop(Block(
- Switch(1, //
- Case(Expr(1), Block(decl_z, Block(Block(Block(brk))), assign_z)),
- DefaultCase()), //
- Break())));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
- EXPECT_TRUE(Sem().Get(brk)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
+ // loop {
+ // switch (1i) {
+ // case 1i: { {{{break;}}} var a : u32 = 2;}
+ // default: {}
+ // }
+ // break;
+ // }
+ auto* decl_z = Decl(Var("z", ty.i32()));
+ auto* brk = Break();
+ auto* assign_z = Assign(Source{{12, 34}}, "z", 1_i);
+ WrapInFunction(
+ Loop(Block(Switch(1_i, //
+ Case(Expr(1_i), Block(decl_z, Block(Block(Block(brk))), assign_z)),
+ DefaultCase()), //
+ Break())));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_z)->IsReachable());
+ EXPECT_TRUE(Sem().Get(brk)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_z)->IsReachable());
}
-TEST_F(ResolverControlBlockValidationTest,
- SwitchConditionTypeMustMatchSelectorType2_Fail) {
- // var a : u32 = 2;
- // switch (a) {
- // case 1: {}
- // default: {}
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
-
- auto* block = Block(Decl(var), Switch("a", //
- Case(Source{{12, 34}}, {Expr(1u)}), //
- DefaultCase()));
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: the case selector values must have the same type as "
- "the selector expression.");
+TEST_F(ResolverControlBlockValidationTest, SwitchConditionTypeMustMatchSelectorType2_Fail) {
+ // var a : u32 = 2;
+ // switch (a) {
+ // case 1i: {}
+ // default: {}
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+
+ auto* block = Block(Decl(var), Switch("a", //
+ Case(Source{{12, 34}}, {Expr(1_u)}), //
+ DefaultCase()));
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: the case selector values must have the same type as "
+ "the selector expression.");
}
-TEST_F(ResolverControlBlockValidationTest,
- SwitchConditionTypeMustMatchSelectorType_Fail) {
- // var a : u32 = 2;
- // switch (a) {
- // case -1: {}
- // default: {}
- // }
- auto* var = Var("a", ty.u32(), Expr(2u));
-
- auto* block = Block(Decl(var), //
- Switch("a", //
- Case(Source{{12, 34}}, {Expr(-1)}), //
- DefaultCase()));
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: the case selector values must have the same type as "
- "the selector expression.");
+TEST_F(ResolverControlBlockValidationTest, SwitchConditionTypeMustMatchSelectorType_Fail) {
+ // var a : u32 = 2;
+ // switch (a) {
+ // case -1i: {}
+ // default: {}
+ // }
+ auto* var = Var("a", ty.u32(), Expr(2_u));
+
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ Case(Source{{12, 34}}, {Expr(-1_i)}), //
+ DefaultCase()));
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: the case selector values must have the same type as "
+ "the selector expression.");
}
-TEST_F(ResolverControlBlockValidationTest,
- NonUniqueCaseSelectorValueUint_Fail) {
- // var a : u32 = 3;
- // switch (a) {
- // case 0u: {}
- // case 2u, 3u, 2u: {}
- // default: {}
- // }
- auto* var = Var("a", ty.u32(), Expr(3u));
-
- auto* block = Block(Decl(var), //
- Switch("a", //
- Case(Expr(0u)),
- Case({
- Expr(Source{{12, 34}}, 2u),
- Expr(3u),
- Expr(Source{{56, 78}}, 2u),
- }),
- DefaultCase()));
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: duplicate switch case '2'\n"
- "12:34 note: previous case declared here");
+TEST_F(ResolverControlBlockValidationTest, NonUniqueCaseSelectorValueUint_Fail) {
+ // var a : u32 = 3;
+ // switch (a) {
+ // case 0u: {}
+ // case 2u, 3u, 2u: {}
+ // default: {}
+ // }
+ auto* var = Var("a", ty.u32(), Expr(3_u));
+
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ Case(Expr(0_u)),
+ Case({
+ Expr(Source{{12, 34}}, 2_u),
+ Expr(3_u),
+ Expr(Source{{56, 78}}, 2_u),
+ }),
+ DefaultCase()));
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: duplicate switch case '2'\n"
+ "12:34 note: previous case declared here");
}
-TEST_F(ResolverControlBlockValidationTest,
- NonUniqueCaseSelectorValueSint_Fail) {
- // var a : i32 = 2;
- // switch (a) {
- // case -10: {}
- // case 0,1,2,-10: {}
- // default: {}
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
-
- auto* block = Block(Decl(var), //
- Switch("a", //
- Case(Expr(Source{{12, 34}}, -10)),
- Case({
- Expr(0),
- Expr(1),
- Expr(2),
- Expr(Source{{56, 78}}, -10),
- }),
- DefaultCase()));
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: duplicate switch case '-10'\n"
- "12:34 note: previous case declared here");
+TEST_F(ResolverControlBlockValidationTest, NonUniqueCaseSelectorValueSint_Fail) {
+ // var a : i32 = 2;
+ // switch (a) {
+ // case -10: {}
+ // case 0,1,2,-10: {}
+ // default: {}
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ Case(Expr(Source{{12, 34}}, -10_i)),
+ Case({
+ Expr(0_i),
+ Expr(1_i),
+ Expr(2_i),
+ Expr(Source{{56, 78}}, -10_i),
+ }),
+ DefaultCase()));
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: duplicate switch case '-10'\n"
+ "12:34 note: previous case declared here");
}
-TEST_F(ResolverControlBlockValidationTest,
- LastClauseLastStatementIsFallthrough_Fail) {
- // var a : i32 = 2;
- // switch (a) {
- // default: { fallthrough; }
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
- auto* fallthrough = create<ast::FallthroughStatement>(Source{{12, 34}});
- auto* block = Block(Decl(var), //
- Switch("a", //
- DefaultCase(Block(fallthrough))));
- WrapInFunction(block);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: a fallthrough statement must not be used in the last "
- "switch case");
+TEST_F(ResolverControlBlockValidationTest, LastClauseLastStatementIsFallthrough_Fail) {
+ // var a : i32 = 2;
+ // switch (a) {
+ // default: { fallthrough; }
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+ auto* fallthrough = create<ast::FallthroughStatement>(Source{{12, 34}});
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ DefaultCase(Block(fallthrough))));
+ WrapInFunction(block);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: a fallthrough statement must not be used in the last "
+ "switch case");
}
TEST_F(ResolverControlBlockValidationTest, SwitchCase_Pass) {
- // var a : i32 = 2;
- // switch (a) {
- // default: {}
- // case 5: {}
- // }
- auto* var = Var("a", ty.i32(), Expr(2));
-
- auto* block = Block(Decl(var), //
- Switch("a", //
- DefaultCase(Source{{12, 34}}), //
- Case(Expr(5))));
- WrapInFunction(block);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : i32 = 2;
+ // switch (a) {
+ // default: {}
+ // case 5: {}
+ // }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+
+ auto* block = Block(Decl(var), //
+ Switch("a", //
+ DefaultCase(Source{{12, 34}}), //
+ Case(Expr(5_i))));
+ WrapInFunction(block);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverControlBlockValidationTest, SwitchCaseAlias_Pass) {
- // type MyInt = u32;
- // var v: MyInt;
- // switch(v){
- // default: {}
- // }
+ // type MyInt = u32;
+ // var v: MyInt;
+ // switch(v){
+ // default: {}
+ // }
- auto* my_int = Alias("MyInt", ty.u32());
- auto* var = Var("a", ty.Of(my_int), Expr(2u));
- auto* block = Block(Decl(var), //
- Switch("a", DefaultCase(Source{{12, 34}})));
+ auto* my_int = Alias("MyInt", ty.u32());
+ auto* var = Var("a", ty.Of(my_int), Expr(2_u));
+ auto* block = Block(Decl(var), //
+ Switch("a", DefaultCase(Source{{12, 34}})));
- WrapInFunction(block);
+ WrapInFunction(block);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc
new file mode 100644
index 00000000000..5618fbab499
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc
@@ -0,0 +1,70 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/intrinsic-gen
+// using the template:
+// src/tint/resolver/ctor_conv_intrinsic.cc.tmpl
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+#include "src/tint/resolver/ctor_conv_intrinsic.h"
+
+namespace tint::resolver {
+
+const char* str(CtorConvIntrinsic i) {
+ switch (i) {
+ case CtorConvIntrinsic::kNone:
+ return "<none>";
+ case CtorConvIntrinsic::kI32:
+ return "i32";
+ case CtorConvIntrinsic::kU32:
+ return "u32";
+ case CtorConvIntrinsic::kF32:
+ return "f32";
+ case CtorConvIntrinsic::kBool:
+ return "bool";
+ case CtorConvIntrinsic::kVec2:
+ return "vec2";
+ case CtorConvIntrinsic::kVec3:
+ return "vec3";
+ case CtorConvIntrinsic::kVec4:
+ return "vec4";
+ case CtorConvIntrinsic::kMat2x2:
+ return "mat2x2";
+ case CtorConvIntrinsic::kMat2x3:
+ return "mat2x3";
+ case CtorConvIntrinsic::kMat2x4:
+ return "mat2x4";
+ case CtorConvIntrinsic::kMat3x2:
+ return "mat3x2";
+ case CtorConvIntrinsic::kMat3x3:
+ return "mat3x3";
+ case CtorConvIntrinsic::kMat3x4:
+ return "mat3x4";
+ case CtorConvIntrinsic::kMat4x2:
+ return "mat4x2";
+ case CtorConvIntrinsic::kMat4x3:
+ return "mat4x3";
+ case CtorConvIntrinsic::kMat4x4:
+ return "mat4x4";
+ }
+ return "<unknown>";
+}
+
+} // namespace tint::resolver
+
diff --git a/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc.tmpl b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc.tmpl
new file mode 100644
index 00000000000..ac98c4d22e6
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.cc.tmpl
@@ -0,0 +1,28 @@
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with tools/builtin-gen to generate ctor_conv_intrinsic.cc
+
+See:
+* tools/cmd/intrinsic-gen/gen for structures used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+#include "src/tint/resolver/ctor_conv_intrinsic.h"
+
+namespace tint::resolver {
+
+const char* str(CtorConvIntrinsic i) {
+ switch (i) {
+ case CtorConvIntrinsic::kNone:
+ return "<none>";
+{{- range .Sem.ConstructorsAndConverters }}
+ case CtorConvIntrinsic::k{{Title .Name}}:
+ return "{{.Name}}";
+{{- end }}
+ }
+ return "<unknown>";
+}
+
+} // namespace tint::resolver
+
diff --git a/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h
new file mode 100644
index 00000000000..7c686583038
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h
@@ -0,0 +1,100 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/intrinsic-gen
+// using the template:
+// src/tint/resolver/ctor_conv_intrinsic.h.tmpl
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+#ifndef SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
+#define SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
+
+#include <cstdint>
+
+namespace tint::resolver {
+
+/// CtorConvIntrinsic is an enumerator of types that have a constructor or converter overload
+/// declared in the intrinsic table.
+enum class CtorConvIntrinsic {
+ kNone = -1,
+ kI32,
+ kU32,
+ kF32,
+ kBool,
+ kVec2,
+ kVec3,
+ kVec4,
+ kMat2x2,
+ kMat2x3,
+ kMat2x4,
+ kMat3x2,
+ kMat3x3,
+ kMat3x4,
+ kMat4x2,
+ kMat4x3,
+ kMat4x4,
+};
+
+/// @returns the name of the type.
+const char* str(CtorConvIntrinsic i);
+
+/// @param n the width of the vector
+/// @return the CtorConvIntrinsic for a vector of width `n`
+inline CtorConvIntrinsic VectorCtorConvIntrinsic(uint32_t n) {
+ switch (n) {
+ case 2:
+ return CtorConvIntrinsic::kVec2;
+ case 3:
+ return CtorConvIntrinsic::kVec3;
+ case 4:
+ return CtorConvIntrinsic::kVec4;
+ }
+ return CtorConvIntrinsic::kNone;
+}
+
+/// @param c the number of columns in the matrix
+/// @param r the number of rows in the matrix
+/// @return the CtorConvIntrinsic for a matrix with `c` columns and `r` rows
+inline CtorConvIntrinsic MatrixCtorConvIntrinsic(uint32_t c, uint32_t r) {
+ switch ((c - 2) * 3 + (r - 2)) {
+ case 0:
+ return CtorConvIntrinsic::kMat2x2;
+ case 1:
+ return CtorConvIntrinsic::kMat2x3;
+ case 2:
+ return CtorConvIntrinsic::kMat2x4;
+ case 3:
+ return CtorConvIntrinsic::kMat3x2;
+ case 4:
+ return CtorConvIntrinsic::kMat3x3;
+ case 5:
+ return CtorConvIntrinsic::kMat3x4;
+ case 6:
+ return CtorConvIntrinsic::kMat4x2;
+ case 7:
+ return CtorConvIntrinsic::kMat4x3;
+ case 8:
+ return CtorConvIntrinsic::kMat4x4;
+ }
+ return CtorConvIntrinsic::kNone;
+}
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h.tmpl b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h.tmpl
new file mode 100644
index 00000000000..9c0da258050
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/ctor_conv_intrinsic.h.tmpl
@@ -0,0 +1,73 @@
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with tools/builtin-gen to generate ctor_conv_intrinsic.h
+
+See:
+* tools/cmd/intrinsic-gen/gen for structures used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+#ifndef SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
+#define SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
+
+#include <cstdint>
+
+namespace tint::resolver {
+
+/// CtorConvIntrinsic is an enumerator of types that have a constructor or converter overload
+/// declared in the intrinsic table.
+enum class CtorConvIntrinsic {
+ kNone = -1,
+{{- range .Sem.ConstructorsAndConverters }}
+ k{{Title .Name}},
+{{- end }}
+};
+
+/// @returns the name of the type.
+const char* str(CtorConvIntrinsic i);
+
+/// @param n the width of the vector
+/// @return the CtorConvIntrinsic for a vector of width `n`
+inline CtorConvIntrinsic VectorCtorConvIntrinsic(uint32_t n) {
+ switch (n) {
+ case 2:
+ return CtorConvIntrinsic::kVec2;
+ case 3:
+ return CtorConvIntrinsic::kVec3;
+ case 4:
+ return CtorConvIntrinsic::kVec4;
+ }
+ return CtorConvIntrinsic::kNone;
+}
+
+/// @param c the number of columns in the matrix
+/// @param r the number of rows in the matrix
+/// @return the CtorConvIntrinsic for a matrix with `c` columns and `r` rows
+inline CtorConvIntrinsic MatrixCtorConvIntrinsic(uint32_t c, uint32_t r) {
+ switch ((c - 2) * 3 + (r - 2)) {
+ case 0:
+ return CtorConvIntrinsic::kMat2x2;
+ case 1:
+ return CtorConvIntrinsic::kMat2x3;
+ case 2:
+ return CtorConvIntrinsic::kMat2x4;
+ case 3:
+ return CtorConvIntrinsic::kMat3x2;
+ case 4:
+ return CtorConvIntrinsic::kMat3x3;
+ case 5:
+ return CtorConvIntrinsic::kMat3x4;
+ case 6:
+ return CtorConvIntrinsic::kMat4x2;
+ case 7:
+ return CtorConvIntrinsic::kMat4x3;
+ case 8:
+ return CtorConvIntrinsic::kMat4x4;
+ }
+ return CtorConvIntrinsic::kNone;
+}
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_CTOR_CONV_INTRINSIC_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/dependency_graph.cc b/chromium/third_party/dawn/src/tint/resolver/dependency_graph.cc
index fd71ebe266c..7e668997c68 100644
--- a/chromium/third_party/dawn/src/tint/resolver/dependency_graph.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/dependency_graph.cc
@@ -40,48 +40,46 @@ struct Global;
/// Dependency describes how one global depends on another global
struct DependencyInfo {
- /// The source of the symbol that forms the dependency
- Source source;
- /// A string describing how the dependency is referenced. e.g. 'calls'
- const char* action = nullptr;
+ /// The source of the symbol that forms the dependency
+ Source source;
+ /// A string describing how the dependency is referenced. e.g. 'calls'
+ const char* action = nullptr;
};
/// DependencyEdge describes the two Globals used to define a dependency
/// relationship.
struct DependencyEdge {
- /// The Global that depends on #to
- const Global* from;
- /// The Global that is depended on by #from
- const Global* to;
+ /// The Global that depends on #to
+ const Global* from;
+ /// The Global that is depended on by #from
+ const Global* to;
};
/// DependencyEdgeCmp implements the contracts of std::equal_to<DependencyEdge>
/// and std::hash<DependencyEdge>.
struct DependencyEdgeCmp {
- /// Equality operator
- bool operator()(const DependencyEdge& lhs, const DependencyEdge& rhs) const {
- return lhs.from == rhs.from && lhs.to == rhs.to;
- }
- /// Hashing operator
- inline std::size_t operator()(const DependencyEdge& d) const {
- return utils::Hash(d.from, d.to);
- }
+ /// Equality operator
+ bool operator()(const DependencyEdge& lhs, const DependencyEdge& rhs) const {
+ return lhs.from == rhs.from && lhs.to == rhs.to;
+ }
+ /// Hashing operator
+ inline std::size_t operator()(const DependencyEdge& d) const {
+ return utils::Hash(d.from, d.to);
+ }
};
/// A map of DependencyEdge to DependencyInfo
-using DependencyEdges = std::unordered_map<DependencyEdge,
- DependencyInfo,
- DependencyEdgeCmp,
- DependencyEdgeCmp>;
+using DependencyEdges =
+ std::unordered_map<DependencyEdge, DependencyInfo, DependencyEdgeCmp, DependencyEdgeCmp>;
/// Global describes a module-scope variable, type or function.
struct Global {
- explicit Global(const ast::Node* n) : node(n) {}
+ explicit Global(const ast::Node* n) : node(n) {}
- /// The declaration ast::Node
- const ast::Node* node;
- /// A list of dependencies that this global depends on
- std::vector<Global*> deps;
+ /// The declaration ast::Node
+ const ast::Node* node;
+ /// A list of dependencies that this global depends on
+ std::vector<Global*> deps;
};
/// A map of global name to Global
@@ -89,638 +87,614 @@ using GlobalMap = std::unordered_map<Symbol, Global*>;
/// Raises an ICE that a global ast::Node type was not handled by this system.
void UnhandledNode(diag::List& diagnostics, const ast::Node* node) {
- TINT_ICE(Resolver, diagnostics)
- << "unhandled node type: " << node->TypeInfo().name;
+ TINT_ICE(Resolver, diagnostics) << "unhandled node type: " << node->TypeInfo().name;
}
/// Raises an error diagnostic with the given message and source.
-void AddError(diag::List& diagnostics,
- const std::string& msg,
- const Source& source) {
- diagnostics.add_error(diag::System::Resolver, msg, source);
+void AddError(diag::List& diagnostics, const std::string& msg, const Source& source) {
+ diagnostics.add_error(diag::System::Resolver, msg, source);
}
/// Raises a note diagnostic with the given message and source.
-void AddNote(diag::List& diagnostics,
- const std::string& msg,
- const Source& source) {
- diagnostics.add_note(diag::System::Resolver, msg, source);
+void AddNote(diag::List& diagnostics, const std::string& msg, const Source& source) {
+ diagnostics.add_note(diag::System::Resolver, msg, source);
}
/// DependencyScanner is used to traverse a module to build the list of
/// global-to-global dependencies.
class DependencyScanner {
- public:
- /// Constructor
- /// @param syms the program symbol table
- /// @param globals_by_name map of global symbol to Global pointer
- /// @param diagnostics diagnostic messages, appended with any errors found
- /// @param graph the dependency graph to populate with resolved symbols
- /// @param edges the map of globals-to-global dependency edges, which will
- /// be populated by calls to Scan()
- DependencyScanner(const SymbolTable& syms,
- const GlobalMap& globals_by_name,
- diag::List& diagnostics,
- DependencyGraph& graph,
- DependencyEdges& edges)
- : symbols_(syms),
- globals_(globals_by_name),
- diagnostics_(diagnostics),
- graph_(graph),
- dependency_edges_(edges) {
- // Register all the globals at global-scope
- for (auto it : globals_by_name) {
- scope_stack_.Set(it.first, it.second->node);
- }
- }
-
- /// Walks the global declarations, resolving symbols, and determining the
- /// dependencies of each global.
- void Scan(Global* global) {
- TINT_SCOPED_ASSIGNMENT(current_global_, global);
- Switch(
- global->node,
- [&](const ast::Struct* str) {
- Declare(str->name, str);
- for (auto* member : str->members) {
- TraverseType(member->type);
- }
- },
- [&](const ast::Alias* alias) {
- Declare(alias->name, alias);
- TraverseType(alias->type);
- },
- [&](const ast::Function* func) {
- Declare(func->symbol, func);
- TraverseAttributes(func->attributes);
- TraverseFunction(func);
- },
- [&](const ast::Variable* var) {
- Declare(var->symbol, var);
- TraverseType(var->type);
- if (var->constructor) {
- TraverseExpression(var->constructor);
- }
- },
- [&](Default) { UnhandledNode(diagnostics_, global->node); });
- }
-
- private:
- /// Traverses the function, performing symbol resolution and determining
- /// global dependencies.
- void TraverseFunction(const ast::Function* func) {
- // Perform symbol resolution on all the parameter types before registering
- // the parameters themselves. This allows the case of declaring a parameter
- // with the same identifier as its type.
- for (auto* param : func->params) {
- TraverseType(param->type);
- }
- // Resolve the return type
- TraverseType(func->return_type);
-
- // Push the scope stack for the parameters and function body.
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
-
- for (auto* param : func->params) {
- if (auto* shadows = scope_stack_.Get(param->symbol)) {
- graph_.shadows.emplace(param, shadows);
- }
- Declare(param->symbol, param);
- }
- if (func->body) {
- TraverseStatements(func->body->statements);
+ public:
+ /// Constructor
+ /// @param syms the program symbol table
+ /// @param globals_by_name map of global symbol to Global pointer
+ /// @param diagnostics diagnostic messages, appended with any errors found
+ /// @param graph the dependency graph to populate with resolved symbols
+ /// @param edges the map of globals-to-global dependency edges, which will
+ /// be populated by calls to Scan()
+ DependencyScanner(const SymbolTable& syms,
+ const GlobalMap& globals_by_name,
+ diag::List& diagnostics,
+ DependencyGraph& graph,
+ DependencyEdges& edges)
+ : symbols_(syms),
+ globals_(globals_by_name),
+ diagnostics_(diagnostics),
+ graph_(graph),
+ dependency_edges_(edges) {
+ // Register all the globals at global-scope
+ for (auto it : globals_by_name) {
+ scope_stack_.Set(it.first, it.second->node);
+ }
}
- }
- /// Traverses the statements, performing symbol resolution and determining
- /// global dependencies.
- void TraverseStatements(const ast::StatementList& stmts) {
- for (auto* s : stmts) {
- TraverseStatement(s);
+ /// Walks the global declarations, resolving symbols, and determining the
+ /// dependencies of each global.
+ void Scan(Global* global) {
+ TINT_SCOPED_ASSIGNMENT(current_global_, global);
+ Switch(
+ global->node,
+ [&](const ast::Struct* str) {
+ Declare(str->name, str);
+ for (auto* member : str->members) {
+ TraverseType(member->type);
+ }
+ },
+ [&](const ast::Alias* alias) {
+ Declare(alias->name, alias);
+ TraverseType(alias->type);
+ },
+ [&](const ast::Function* func) {
+ Declare(func->symbol, func);
+ TraverseAttributes(func->attributes);
+ TraverseFunction(func);
+ },
+ [&](const ast::Variable* var) {
+ Declare(var->symbol, var);
+ TraverseType(var->type);
+ if (var->constructor) {
+ TraverseExpression(var->constructor);
+ }
+ },
+ [&](const ast::Enable*) {
+ // Enable directives do not effect the dependency graph.
+ },
+ [&](Default) { UnhandledNode(diagnostics_, global->node); });
}
- }
- /// Traverses the statement, performing symbol resolution and determining
- /// global dependencies.
- void TraverseStatement(const ast::Statement* stmt) {
- if (!stmt) {
- return;
- }
- Switch(
- stmt, //
- [&](const ast::AssignmentStatement* a) {
- TraverseExpression(a->lhs);
- TraverseExpression(a->rhs);
- },
- [&](const ast::BlockStatement* b) {
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
- TraverseStatements(b->statements);
- },
- [&](const ast::CallStatement* r) { //
- TraverseExpression(r->expr);
- },
- [&](const ast::CompoundAssignmentStatement* a) {
- TraverseExpression(a->lhs);
- TraverseExpression(a->rhs);
- },
- [&](const ast::ForLoopStatement* l) {
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
- TraverseStatement(l->initializer);
- TraverseExpression(l->condition);
- TraverseStatement(l->continuing);
- TraverseStatement(l->body);
- },
- [&](const ast::IncrementDecrementStatement* i) {
- TraverseExpression(i->lhs);
- },
- [&](const ast::LoopStatement* l) {
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
- TraverseStatements(l->body->statements);
- TraverseStatement(l->continuing);
- },
- [&](const ast::IfStatement* i) {
- TraverseExpression(i->condition);
- TraverseStatement(i->body);
- for (auto* e : i->else_statements) {
- TraverseExpression(e->condition);
- TraverseStatement(e->body);
- }
- },
- [&](const ast::ReturnStatement* r) { //
- TraverseExpression(r->value);
- },
- [&](const ast::SwitchStatement* s) {
- TraverseExpression(s->condition);
- for (auto* c : s->body) {
- for (auto* sel : c->selectors) {
- TraverseExpression(sel);
+ private:
+ /// Traverses the function, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseFunction(const ast::Function* func) {
+ // Perform symbol resolution on all the parameter types before registering
+ // the parameters themselves. This allows the case of declaring a parameter
+ // with the same identifier as its type.
+ for (auto* param : func->params) {
+ TraverseType(param->type);
+ }
+ // Resolve the return type
+ TraverseType(func->return_type);
+
+ // Push the scope stack for the parameters and function body.
+ scope_stack_.Push();
+ TINT_DEFER(scope_stack_.Pop());
+
+ for (auto* param : func->params) {
+ if (auto* shadows = scope_stack_.Get(param->symbol)) {
+ graph_.shadows.emplace(param, shadows);
}
- TraverseStatement(c->body);
- }
- },
- [&](const ast::VariableDeclStatement* v) {
- if (auto* shadows = scope_stack_.Get(v->variable->symbol)) {
- graph_.shadows.emplace(v->variable, shadows);
- }
- TraverseType(v->variable->type);
- TraverseExpression(v->variable->constructor);
- Declare(v->variable->symbol, v->variable);
- },
- [&](Default) {
- if (!stmt->IsAnyOf<ast::BreakStatement, ast::ContinueStatement,
- ast::DiscardStatement,
- ast::FallthroughStatement>()) {
- UnhandledNode(diagnostics_, stmt);
- }
- });
- }
-
- /// Adds the symbol definition to the current scope, raising an error if two
- /// symbols collide within the same scope.
- void Declare(Symbol symbol, const ast::Node* node) {
- auto* old = scope_stack_.Set(symbol, node);
- if (old != nullptr && node != old) {
- auto name = symbols_.NameFor(symbol);
- AddError(diagnostics_, "redeclaration of '" + name + "'", node->source);
- AddNote(diagnostics_, "'" + name + "' previously declared here",
- old->source);
+ Declare(param->symbol, param);
+ }
+ if (func->body) {
+ TraverseStatements(func->body->statements);
+ }
}
- }
- /// Traverses the expression, performing symbol resolution and determining
- /// global dependencies.
- void TraverseExpression(const ast::Expression* root) {
- if (!root) {
- return;
+ /// Traverses the statements, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseStatements(const ast::StatementList& stmts) {
+ for (auto* s : stmts) {
+ TraverseStatement(s);
+ }
}
- ast::TraverseExpressions(
- root, diagnostics_, [&](const ast::Expression* expr) {
- Switch(
- expr,
- [&](const ast::IdentifierExpression* ident) {
- AddDependency(ident, ident->symbol, "identifier", "references");
- },
- [&](const ast::CallExpression* call) {
- if (call->target.name) {
- AddDependency(call->target.name, call->target.name->symbol,
- "function", "calls");
+
+ /// Traverses the statement, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseStatement(const ast::Statement* stmt) {
+ if (!stmt) {
+ return;
+ }
+ Switch(
+ stmt, //
+ [&](const ast::AssignmentStatement* a) {
+ TraverseExpression(a->lhs);
+ TraverseExpression(a->rhs);
+ },
+ [&](const ast::BlockStatement* b) {
+ scope_stack_.Push();
+ TINT_DEFER(scope_stack_.Pop());
+ TraverseStatements(b->statements);
+ },
+ [&](const ast::CallStatement* r) { //
+ TraverseExpression(r->expr);
+ },
+ [&](const ast::CompoundAssignmentStatement* a) {
+ TraverseExpression(a->lhs);
+ TraverseExpression(a->rhs);
+ },
+ [&](const ast::ForLoopStatement* l) {
+ scope_stack_.Push();
+ TINT_DEFER(scope_stack_.Pop());
+ TraverseStatement(l->initializer);
+ TraverseExpression(l->condition);
+ TraverseStatement(l->continuing);
+ TraverseStatement(l->body);
+ },
+ [&](const ast::IncrementDecrementStatement* i) { TraverseExpression(i->lhs); },
+ [&](const ast::LoopStatement* l) {
+ scope_stack_.Push();
+ TINT_DEFER(scope_stack_.Pop());
+ TraverseStatements(l->body->statements);
+ TraverseStatement(l->continuing);
+ },
+ [&](const ast::IfStatement* i) {
+ TraverseExpression(i->condition);
+ TraverseStatement(i->body);
+ if (i->else_statement) {
+ TraverseStatement(i->else_statement);
}
- if (call->target.type) {
- TraverseType(call->target.type);
+ },
+ [&](const ast::ReturnStatement* r) { //
+ TraverseExpression(r->value);
+ },
+ [&](const ast::SwitchStatement* s) {
+ TraverseExpression(s->condition);
+ for (auto* c : s->body) {
+ for (auto* sel : c->selectors) {
+ TraverseExpression(sel);
+ }
+ TraverseStatement(c->body);
}
- },
- [&](const ast::BitcastExpression* cast) {
- TraverseType(cast->type);
- });
- return ast::TraverseAction::Descend;
- });
- }
+ },
+ [&](const ast::VariableDeclStatement* v) {
+ if (auto* shadows = scope_stack_.Get(v->variable->symbol)) {
+ graph_.shadows.emplace(v->variable, shadows);
+ }
+ TraverseType(v->variable->type);
+ TraverseExpression(v->variable->constructor);
+ Declare(v->variable->symbol, v->variable);
+ },
+ [&](Default) {
+ if (!stmt->IsAnyOf<ast::BreakStatement, ast::ContinueStatement,
+ ast::DiscardStatement, ast::FallthroughStatement>()) {
+ UnhandledNode(diagnostics_, stmt);
+ }
+ });
+ }
- /// Traverses the type node, performing symbol resolution and determining
- /// global dependencies.
- void TraverseType(const ast::Type* ty) {
- if (!ty) {
- return;
+ /// Adds the symbol definition to the current scope, raising an error if two
+ /// symbols collide within the same scope.
+ void Declare(Symbol symbol, const ast::Node* node) {
+ auto* old = scope_stack_.Set(symbol, node);
+ if (old != nullptr && node != old) {
+ auto name = symbols_.NameFor(symbol);
+ AddError(diagnostics_, "redeclaration of '" + name + "'", node->source);
+ AddNote(diagnostics_, "'" + name + "' previously declared here", old->source);
+ }
}
- Switch(
- ty, //
- [&](const ast::Array* arr) {
- TraverseType(arr->type); //
- TraverseExpression(arr->count);
- },
- [&](const ast::Atomic* atomic) { //
- TraverseType(atomic->type);
- },
- [&](const ast::Matrix* mat) { //
- TraverseType(mat->type);
- },
- [&](const ast::Pointer* ptr) { //
- TraverseType(ptr->type);
- },
- [&](const ast::TypeName* tn) { //
- AddDependency(tn, tn->name, "type", "references");
- },
- [&](const ast::Vector* vec) { //
- TraverseType(vec->type);
- },
- [&](const ast::SampledTexture* tex) { //
- TraverseType(tex->type);
- },
- [&](const ast::MultisampledTexture* tex) { //
- TraverseType(tex->type);
- },
- [&](Default) {
- if (!ty->IsAnyOf<ast::Void, ast::Bool, ast::I32, ast::U32, ast::F32,
- ast::DepthTexture, ast::DepthMultisampledTexture,
- ast::StorageTexture, ast::ExternalTexture,
- ast::Sampler>()) {
- UnhandledNode(diagnostics_, ty);
- }
+
+ /// Traverses the expression, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseExpression(const ast::Expression* root) {
+ if (!root) {
+ return;
+ }
+ ast::TraverseExpressions(root, diagnostics_, [&](const ast::Expression* expr) {
+ Switch(
+ expr,
+ [&](const ast::IdentifierExpression* ident) {
+ AddDependency(ident, ident->symbol, "identifier", "references");
+ },
+ [&](const ast::CallExpression* call) {
+ if (call->target.name) {
+ AddDependency(call->target.name, call->target.name->symbol, "function",
+ "calls");
+ }
+ if (call->target.type) {
+ TraverseType(call->target.type);
+ }
+ },
+ [&](const ast::BitcastExpression* cast) { TraverseType(cast->type); });
+ return ast::TraverseAction::Descend;
});
- }
+ }
- /// Traverses the attribute list, performing symbol resolution and
- /// determining global dependencies.
- void TraverseAttributes(const ast::AttributeList& attrs) {
- for (auto* attr : attrs) {
- TraverseAttribute(attr);
+ /// Traverses the type node, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseType(const ast::Type* ty) {
+ if (!ty) {
+ return;
+ }
+ Switch(
+ ty, //
+ [&](const ast::Array* arr) {
+ TraverseType(arr->type); //
+ TraverseExpression(arr->count);
+ },
+ [&](const ast::Atomic* atomic) { //
+ TraverseType(atomic->type);
+ },
+ [&](const ast::Matrix* mat) { //
+ TraverseType(mat->type);
+ },
+ [&](const ast::Pointer* ptr) { //
+ TraverseType(ptr->type);
+ },
+ [&](const ast::TypeName* tn) { //
+ AddDependency(tn, tn->name, "type", "references");
+ },
+ [&](const ast::Vector* vec) { //
+ TraverseType(vec->type);
+ },
+ [&](const ast::SampledTexture* tex) { //
+ TraverseType(tex->type);
+ },
+ [&](const ast::MultisampledTexture* tex) { //
+ TraverseType(tex->type);
+ },
+ [&](Default) {
+ if (!ty->IsAnyOf<ast::Void, ast::Bool, ast::I32, ast::U32, ast::F16, ast::F32,
+ ast::DepthTexture, ast::DepthMultisampledTexture,
+ ast::StorageTexture, ast::ExternalTexture, ast::Sampler>()) {
+ UnhandledNode(diagnostics_, ty);
+ }
+ });
+ }
+
+ /// Traverses the attribute list, performing symbol resolution and
+ /// determining global dependencies.
+ void TraverseAttributes(const ast::AttributeList& attrs) {
+ for (auto* attr : attrs) {
+ TraverseAttribute(attr);
+ }
}
- }
-
- /// Traverses the attribute, performing symbol resolution and determining
- /// global dependencies.
- void TraverseAttribute(const ast::Attribute* attr) {
- if (auto* wg = attr->As<ast::WorkgroupAttribute>()) {
- TraverseExpression(wg->x);
- TraverseExpression(wg->y);
- TraverseExpression(wg->z);
- return;
+
+ /// Traverses the attribute, performing symbol resolution and determining
+ /// global dependencies.
+ void TraverseAttribute(const ast::Attribute* attr) {
+ if (auto* wg = attr->As<ast::WorkgroupAttribute>()) {
+ TraverseExpression(wg->x);
+ TraverseExpression(wg->y);
+ TraverseExpression(wg->z);
+ return;
+ }
+ if (attr->IsAnyOf<ast::BindingAttribute, ast::BuiltinAttribute, ast::GroupAttribute,
+ ast::IdAttribute, ast::InternalAttribute, ast::InterpolateAttribute,
+ ast::InvariantAttribute, ast::LocationAttribute, ast::StageAttribute,
+ ast::StrideAttribute, ast::StructMemberAlignAttribute,
+ ast::StructMemberOffsetAttribute, ast::StructMemberSizeAttribute>()) {
+ return;
+ }
+
+ UnhandledNode(diagnostics_, attr);
}
- if (attr->IsAnyOf<
- ast::BindingAttribute, ast::BuiltinAttribute, ast::GroupAttribute,
- ast::IdAttribute, ast::InternalAttribute, ast::InterpolateAttribute,
- ast::InvariantAttribute, ast::LocationAttribute,
- ast::StageAttribute, ast::StrideAttribute,
- ast::StructMemberAlignAttribute, ast::StructMemberOffsetAttribute,
- ast::StructMemberSizeAttribute>()) {
- return;
+
+ /// Adds the dependency from `from` to `to`, erroring if `to` cannot be
+ /// resolved.
+ void AddDependency(const ast::Node* from, Symbol to, const char* use, const char* action) {
+ auto* resolved = scope_stack_.Get(to);
+ if (!resolved) {
+ if (!IsBuiltin(to)) {
+ UnknownSymbol(to, from->source, use);
+ return;
+ }
+ }
+
+ if (auto* global = utils::Lookup(globals_, to); global && global->node == resolved) {
+ if (dependency_edges_
+ .emplace(DependencyEdge{current_global_, global},
+ DependencyInfo{from->source, action})
+ .second) {
+ current_global_->deps.emplace_back(global);
+ }
+ }
+
+ graph_.resolved_symbols.emplace(from, resolved);
}
- UnhandledNode(diagnostics_, attr);
- }
-
- /// Adds the dependency from `from` to `to`, erroring if `to` cannot be
- /// resolved.
- void AddDependency(const ast::Node* from,
- Symbol to,
- const char* use,
- const char* action) {
- auto* resolved = scope_stack_.Get(to);
- if (!resolved) {
- if (!IsBuiltin(to)) {
- UnknownSymbol(to, from->source, use);
- return;
- }
+ /// @returns true if `name` is the name of a builtin function
+ bool IsBuiltin(Symbol name) const {
+ return sem::ParseBuiltinType(symbols_.NameFor(name)) != sem::BuiltinType::kNone;
}
- if (auto* global = utils::Lookup(globals_, to);
- global && global->node == resolved) {
- if (dependency_edges_
- .emplace(DependencyEdge{current_global_, global},
- DependencyInfo{from->source, action})
- .second) {
- current_global_->deps.emplace_back(global);
- }
+ /// Appends an error to the diagnostics that the given symbol cannot be
+ /// resolved.
+ void UnknownSymbol(Symbol name, Source source, const char* use) {
+ AddError(diagnostics_, "unknown " + std::string(use) + ": '" + symbols_.NameFor(name) + "'",
+ source);
}
- graph_.resolved_symbols.emplace(from, resolved);
- }
-
- /// @returns true if `name` is the name of a builtin function
- bool IsBuiltin(Symbol name) const {
- return sem::ParseBuiltinType(symbols_.NameFor(name)) !=
- sem::BuiltinType::kNone;
- }
-
- /// Appends an error to the diagnostics that the given symbol cannot be
- /// resolved.
- void UnknownSymbol(Symbol name, Source source, const char* use) {
- AddError(
- diagnostics_,
- "unknown " + std::string(use) + ": '" + symbols_.NameFor(name) + "'",
- source);
- }
-
- using VariableMap = std::unordered_map<Symbol, const ast::Variable*>;
- const SymbolTable& symbols_;
- const GlobalMap& globals_;
- diag::List& diagnostics_;
- DependencyGraph& graph_;
- DependencyEdges& dependency_edges_;
-
- ScopeStack<const ast::Node*> scope_stack_;
- Global* current_global_ = nullptr;
+ using VariableMap = std::unordered_map<Symbol, const ast::Variable*>;
+ const SymbolTable& symbols_;
+ const GlobalMap& globals_;
+ diag::List& diagnostics_;
+ DependencyGraph& graph_;
+ DependencyEdges& dependency_edges_;
+
+ ScopeStack<Symbol, const ast::Node*> scope_stack_;
+ Global* current_global_ = nullptr;
};
/// The global dependency analysis system
struct DependencyAnalysis {
- public:
- /// Constructor
- DependencyAnalysis(const SymbolTable& symbols,
- diag::List& diagnostics,
- DependencyGraph& graph)
- : symbols_(symbols), diagnostics_(diagnostics), graph_(graph) {}
-
- /// Performs global dependency analysis on the module, emitting any errors to
- /// #diagnostics.
- /// @returns true if analysis found no errors, otherwise false.
- bool Run(const ast::Module& module) {
- // Collect all the named globals from the AST module
- GatherGlobals(module);
-
- // Traverse the named globals to build the dependency graph
- DetermineDependencies();
-
- // Sort the globals into dependency order
- SortGlobals();
-
- // Dump the dependency graph if TINT_DUMP_DEPENDENCY_GRAPH is non-zero
- DumpDependencyGraph();
-
- graph_.ordered_globals = std::move(sorted_);
-
- return !diagnostics_.contains_errors();
- }
-
- private:
- /// @param node the ast::Node of the global declaration
- /// @returns the symbol of the global declaration node
- /// @note will raise an ICE if the node is not a type, function or variable
- /// declaration
- Symbol SymbolOf(const ast::Node* node) const {
- return Switch(
- node, //
- [&](const ast::TypeDecl* td) { return td->name; },
- [&](const ast::Function* func) { return func->symbol; },
- [&](const ast::Variable* var) { return var->symbol; },
- [&](Default) {
- UnhandledNode(diagnostics_, node);
- return Symbol{};
- });
- }
-
- /// @param node the ast::Node of the global declaration
- /// @returns the name of the global declaration node
- /// @note will raise an ICE if the node is not a type, function or variable
- /// declaration
- std::string NameOf(const ast::Node* node) const {
- return symbols_.NameFor(SymbolOf(node));
- }
-
- /// @param node the ast::Node of the global declaration
- /// @returns a string representation of the global declaration kind
- /// @note will raise an ICE if the node is not a type, function or variable
- /// declaration
- std::string KindOf(const ast::Node* node) {
- return Switch(
- node, //
- [&](const ast::Struct*) { return "struct"; },
- [&](const ast::Alias*) { return "alias"; },
- [&](const ast::Function*) { return "function"; },
- [&](const ast::Variable* var) { return var->is_const ? "let" : "var"; },
- [&](Default) {
- UnhandledNode(diagnostics_, node);
- return "<error>";
- });
- }
-
- /// Traverses `module`, collecting all the global declarations and populating
- /// the #globals and #declaration_order fields.
- void GatherGlobals(const ast::Module& module) {
- for (auto* node : module.GlobalDeclarations()) {
- auto* global = allocator_.Create(node);
- globals_.emplace(SymbolOf(node), global);
- declaration_order_.emplace_back(global);
+ public:
+ /// Constructor
+ DependencyAnalysis(const SymbolTable& symbols, diag::List& diagnostics, DependencyGraph& graph)
+ : symbols_(symbols), diagnostics_(diagnostics), graph_(graph) {}
+
+ /// Performs global dependency analysis on the module, emitting any errors to
+ /// #diagnostics.
+ /// @returns true if analysis found no errors, otherwise false.
+ bool Run(const ast::Module& module) {
+ // Collect all the named globals from the AST module
+ GatherGlobals(module);
+
+ // Traverse the named globals to build the dependency graph
+ DetermineDependencies();
+
+ // Sort the globals into dependency order
+ SortGlobals();
+
+ // Dump the dependency graph if TINT_DUMP_DEPENDENCY_GRAPH is non-zero
+ DumpDependencyGraph();
+
+ graph_.ordered_globals = std::move(sorted_);
+
+ return !diagnostics_.contains_errors();
}
- }
-
- /// Walks the global declarations, determining the dependencies of each global
- /// and adding these to each global's Global::deps field.
- void DetermineDependencies() {
- DependencyScanner scanner(symbols_, globals_, diagnostics_, graph_,
- dependency_edges_);
- for (auto* global : declaration_order_) {
- scanner.Scan(global);
+
+ private:
+ /// @param node the ast::Node of the global declaration
+ /// @returns the symbol of the global declaration node
+ /// @note will raise an ICE if the node is not a type, function or variable
+ /// declaration
+ Symbol SymbolOf(const ast::Node* node) const {
+ return Switch(
+ node, //
+ [&](const ast::TypeDecl* td) { return td->name; },
+ [&](const ast::Function* func) { return func->symbol; },
+ [&](const ast::Variable* var) { return var->symbol; },
+ [&](Default) {
+ UnhandledNode(diagnostics_, node);
+ return Symbol{};
+ });
}
- }
-
- /// Performs a depth-first traversal of `root`'s dependencies, calling `enter`
- /// as the function decends into each dependency and `exit` when bubbling back
- /// up towards the root.
- /// @param enter is a function with the signature: `bool(Global*)`. The
- /// `enter` function returns true if TraverseDependencies() should traverse
- /// the dependency, otherwise it will be skipped.
- /// @param exit is a function with the signature: `void(Global*)`. The `exit`
- /// function is only called if the corresponding `enter` call returned true.
- template <typename ENTER, typename EXIT>
- void TraverseDependencies(const Global* root, ENTER&& enter, EXIT&& exit) {
- // Entry is a single entry in the traversal stack. Entry points to a
- // dep_idx'th dependency of Entry::global.
- struct Entry {
- const Global* global; // The parent global
- size_t dep_idx; // The dependency index in `global->deps`
- };
-
- if (!enter(root)) {
- return;
+
+ /// @param node the ast::Node of the global declaration
+ /// @returns the name of the global declaration node
+ /// @note will raise an ICE if the node is not a type, function or variable
+ /// declaration
+ std::string NameOf(const ast::Node* node) const { return symbols_.NameFor(SymbolOf(node)); }
+
+ /// @param node the ast::Node of the global declaration
+ /// @returns a string representation of the global declaration kind
+ /// @note will raise an ICE if the node is not a type, function or variable
+ /// declaration
+ std::string KindOf(const ast::Node* node) {
+ return Switch(
+ node, //
+ [&](const ast::Struct*) { return "struct"; },
+ [&](const ast::Alias*) { return "alias"; },
+ [&](const ast::Function*) { return "function"; },
+ [&](const ast::Variable* var) { return var->is_const ? "let" : "var"; },
+ [&](Default) {
+ UnhandledNode(diagnostics_, node);
+ return "<error>";
+ });
}
- std::vector<Entry> stack{Entry{root, 0}};
- while (true) {
- auto& entry = stack.back();
- // Have we exhausted the dependencies of entry.global?
- if (entry.dep_idx < entry.global->deps.size()) {
- // No, there's more dependencies to traverse.
- auto& dep = entry.global->deps[entry.dep_idx];
- // Does the caller want to enter this dependency?
- if (enter(dep)) { // Yes.
- stack.push_back(Entry{dep, 0}); // Enter the dependency.
- } else {
- entry.dep_idx++; // No. Skip this node.
- }
- } else {
- // Yes. Time to back up.
- // Exit this global, pop the stack, and if there's another parent node,
- // increment its dependency index, and loop again.
- exit(entry.global);
- stack.pop_back();
- if (stack.empty()) {
- return; // All done.
+ /// Traverses `module`, collecting all the global declarations and populating
+ /// the #globals and #declaration_order fields.
+ void GatherGlobals(const ast::Module& module) {
+ for (auto* node : module.GlobalDeclarations()) {
+ auto* global = allocator_.Create(node);
+ // Enable directives do not form a symbol. Skip them.
+ if (!node->Is<ast::Enable>()) {
+ globals_.emplace(SymbolOf(node), global);
+ }
+ declaration_order_.emplace_back(global);
}
- stack.back().dep_idx++;
- }
}
- }
- /// SortGlobals sorts the globals into dependency order, erroring if cyclic
- /// dependencies are found. The sorted dependencies are assigned to #sorted.
- void SortGlobals() {
- if (diagnostics_.contains_errors()) {
- return; // This code assumes there are no undeclared identifiers.
+ /// Walks the global declarations, determining the dependencies of each global
+ /// and adding these to each global's Global::deps field.
+ void DetermineDependencies() {
+ DependencyScanner scanner(symbols_, globals_, diagnostics_, graph_, dependency_edges_);
+ for (auto* global : declaration_order_) {
+ scanner.Scan(global);
+ }
}
- std::unordered_set<const Global*> visited;
- for (auto* global : declaration_order_) {
- utils::UniqueVector<const Global*> stack;
- TraverseDependencies(
- global,
- [&](const Global* g) { // Enter
- if (!stack.add(g)) {
- CyclicDependencyFound(g, stack);
- return false;
- }
- if (sorted_.contains(g->node)) {
- // Visited this global already.
- // stack was pushed, but exit() will not be called when we return
- // false, so pop here.
- stack.pop_back();
- return false;
+ /// Performs a depth-first traversal of `root`'s dependencies, calling `enter`
+ /// as the function decends into each dependency and `exit` when bubbling back
+ /// up towards the root.
+ /// @param enter is a function with the signature: `bool(Global*)`. The
+ /// `enter` function returns true if TraverseDependencies() should traverse
+ /// the dependency, otherwise it will be skipped.
+ /// @param exit is a function with the signature: `void(Global*)`. The `exit`
+ /// function is only called if the corresponding `enter` call returned true.
+ template <typename ENTER, typename EXIT>
+ void TraverseDependencies(const Global* root, ENTER&& enter, EXIT&& exit) {
+ // Entry is a single entry in the traversal stack. Entry points to a
+ // dep_idx'th dependency of Entry::global.
+ struct Entry {
+ const Global* global; // The parent global
+ size_t dep_idx; // The dependency index in `global->deps`
+ };
+
+ if (!enter(root)) {
+ return;
+ }
+
+ std::vector<Entry> stack{Entry{root, 0}};
+ while (true) {
+ auto& entry = stack.back();
+ // Have we exhausted the dependencies of entry.global?
+ if (entry.dep_idx < entry.global->deps.size()) {
+ // No, there's more dependencies to traverse.
+ auto& dep = entry.global->deps[entry.dep_idx];
+ // Does the caller want to enter this dependency?
+ if (enter(dep)) { // Yes.
+ stack.push_back(Entry{dep, 0}); // Enter the dependency.
+ } else {
+ entry.dep_idx++; // No. Skip this node.
+ }
+ } else {
+ // Yes. Time to back up.
+ // Exit this global, pop the stack, and if there's another parent node,
+ // increment its dependency index, and loop again.
+ exit(entry.global);
+ stack.pop_back();
+ if (stack.empty()) {
+ return; // All done.
+ }
+ stack.back().dep_idx++;
}
- return true;
- },
- [&](const Global* g) { // Exit. Only called if Enter returned true.
- sorted_.add(g->node);
- stack.pop_back();
- });
+ }
+ }
- sorted_.add(global->node);
+ /// SortGlobals sorts the globals into dependency order, erroring if cyclic
+ /// dependencies are found. The sorted dependencies are assigned to #sorted.
+ void SortGlobals() {
+ if (diagnostics_.contains_errors()) {
+ return; // This code assumes there are no undeclared identifiers.
+ }
- if (!stack.empty()) {
- // Each stack.push() must have a corresponding stack.pop_back().
- TINT_ICE(Resolver, diagnostics_)
- << "stack not empty after returning from TraverseDependencies()";
- }
- }
- }
-
- /// DepInfoFor() looks up the global dependency information for the dependency
- /// of global `from` depending on `to`.
- /// @note will raise an ICE if the edge is not found.
- DependencyInfo DepInfoFor(const Global* from, const Global* to) const {
- auto it = dependency_edges_.find(DependencyEdge{from, to});
- if (it != dependency_edges_.end()) {
- return it->second;
+ std::unordered_set<const Global*> visited;
+ for (auto* global : declaration_order_) {
+ utils::UniqueVector<const Global*> stack;
+ TraverseDependencies(
+ global,
+ [&](const Global* g) { // Enter
+ if (!stack.add(g)) {
+ CyclicDependencyFound(g, stack);
+ return false;
+ }
+ if (sorted_.contains(g->node)) {
+ // Visited this global already.
+ // stack was pushed, but exit() will not be called when we return
+ // false, so pop here.
+ stack.pop_back();
+ return false;
+ }
+ return true;
+ },
+ [&](const Global* g) { // Exit. Only called if Enter returned true.
+ sorted_.add(g->node);
+ stack.pop_back();
+ });
+
+ sorted_.add(global->node);
+
+ if (!stack.empty()) {
+ // Each stack.push() must have a corresponding stack.pop_back().
+ TINT_ICE(Resolver, diagnostics_)
+ << "stack not empty after returning from TraverseDependencies()";
+ }
+ }
}
- TINT_ICE(Resolver, diagnostics_)
- << "failed to find dependency info for edge: '" << NameOf(from->node)
- << "' -> '" << NameOf(to->node) << "'";
- return {};
- }
-
- /// CyclicDependencyFound() emits an error diagnostic for a cyclic dependency.
- /// @param root is the global that starts the cyclic dependency, which must be
- /// found in `stack`.
- /// @param stack is the global dependency stack that contains a loop.
- void CyclicDependencyFound(const Global* root,
- const std::vector<const Global*>& stack) {
- std::stringstream msg;
- msg << "cyclic dependency found: ";
- constexpr size_t kLoopNotStarted = ~0u;
- size_t loop_start = kLoopNotStarted;
- for (size_t i = 0; i < stack.size(); i++) {
- auto* e = stack[i];
- if (loop_start == kLoopNotStarted && e == root) {
- loop_start = i;
- }
- if (loop_start != kLoopNotStarted) {
- msg << "'" << NameOf(e->node) << "' -> ";
- }
+
+ /// DepInfoFor() looks up the global dependency information for the dependency
+ /// of global `from` depending on `to`.
+ /// @note will raise an ICE if the edge is not found.
+ DependencyInfo DepInfoFor(const Global* from, const Global* to) const {
+ auto it = dependency_edges_.find(DependencyEdge{from, to});
+ if (it != dependency_edges_.end()) {
+ return it->second;
+ }
+ TINT_ICE(Resolver, diagnostics_)
+ << "failed to find dependency info for edge: '" << NameOf(from->node) << "' -> '"
+ << NameOf(to->node) << "'";
+ return {};
}
- msg << "'" << NameOf(root->node) << "'";
- AddError(diagnostics_, msg.str(), root->node->source);
- for (size_t i = loop_start; i < stack.size(); i++) {
- auto* from = stack[i];
- auto* to = (i + 1 < stack.size()) ? stack[i + 1] : stack[loop_start];
- auto info = DepInfoFor(from, to);
- AddNote(diagnostics_,
- KindOf(from->node) + " '" + NameOf(from->node) + "' " +
- info.action + " " + KindOf(to->node) + " '" +
- NameOf(to->node) + "' here",
- info.source);
+
+ /// CyclicDependencyFound() emits an error diagnostic for a cyclic dependency.
+ /// @param root is the global that starts the cyclic dependency, which must be
+ /// found in `stack`.
+ /// @param stack is the global dependency stack that contains a loop.
+ void CyclicDependencyFound(const Global* root, const std::vector<const Global*>& stack) {
+ std::stringstream msg;
+ msg << "cyclic dependency found: ";
+ constexpr size_t kLoopNotStarted = ~0u;
+ size_t loop_start = kLoopNotStarted;
+ for (size_t i = 0; i < stack.size(); i++) {
+ auto* e = stack[i];
+ if (loop_start == kLoopNotStarted && e == root) {
+ loop_start = i;
+ }
+ if (loop_start != kLoopNotStarted) {
+ msg << "'" << NameOf(e->node) << "' -> ";
+ }
+ }
+ msg << "'" << NameOf(root->node) << "'";
+ AddError(diagnostics_, msg.str(), root->node->source);
+ for (size_t i = loop_start; i < stack.size(); i++) {
+ auto* from = stack[i];
+ auto* to = (i + 1 < stack.size()) ? stack[i + 1] : stack[loop_start];
+ auto info = DepInfoFor(from, to);
+ AddNote(diagnostics_,
+ KindOf(from->node) + " '" + NameOf(from->node) + "' " + info.action + " " +
+ KindOf(to->node) + " '" + NameOf(to->node) + "' here",
+ info.source);
+ }
}
- }
- void DumpDependencyGraph() {
+ void DumpDependencyGraph() {
#if TINT_DUMP_DEPENDENCY_GRAPH == 0
- if ((true)) {
- return;
- }
+ if ((true)) {
+ return;
+ }
#endif // TINT_DUMP_DEPENDENCY_GRAPH
- printf("=========================\n");
- printf("------ declaration ------ \n");
- for (auto* global : declaration_order_) {
- printf("%s\n", NameOf(global->node).c_str());
- }
- printf("------ dependencies ------ \n");
- for (auto* node : sorted_) {
- auto symbol = SymbolOf(node);
- auto* global = globals_.at(symbol);
- printf("%s depends on:\n", symbols_.NameFor(symbol).c_str());
- for (auto* dep : global->deps) {
- printf(" %s\n", NameOf(dep->node).c_str());
- }
+ printf("=========================\n");
+ printf("------ declaration ------ \n");
+ for (auto* global : declaration_order_) {
+ printf("%s\n", NameOf(global->node).c_str());
+ }
+ printf("------ dependencies ------ \n");
+ for (auto* node : sorted_) {
+ auto symbol = SymbolOf(node);
+ auto* global = globals_.at(symbol);
+ printf("%s depends on:\n", symbols_.NameFor(symbol).c_str());
+ for (auto* dep : global->deps) {
+ printf(" %s\n", NameOf(dep->node).c_str());
+ }
+ }
+ printf("=========================\n");
}
- printf("=========================\n");
- }
- /// Program symbols
- const SymbolTable& symbols_;
+ /// Program symbols
+ const SymbolTable& symbols_;
- /// Program diagnostics
- diag::List& diagnostics_;
+ /// Program diagnostics
+ diag::List& diagnostics_;
- /// The resulting dependency graph
- DependencyGraph& graph_;
+ /// The resulting dependency graph
+ DependencyGraph& graph_;
- /// Allocator of Globals
- utils::BlockAllocator<Global> allocator_;
+ /// Allocator of Globals
+ utils::BlockAllocator<Global> allocator_;
- /// Global map, keyed by name. Populated by GatherGlobals().
- GlobalMap globals_;
+ /// Global map, keyed by name. Populated by GatherGlobals().
+ GlobalMap globals_;
- /// Map of DependencyEdge to DependencyInfo. Populated by
- /// DetermineDependencies().
- DependencyEdges dependency_edges_;
+ /// Map of DependencyEdge to DependencyInfo. Populated by
+ /// DetermineDependencies().
+ DependencyEdges dependency_edges_;
- /// Globals in declaration order. Populated by GatherGlobals().
- std::vector<Global*> declaration_order_;
+ /// Globals in declaration order. Populated by GatherGlobals().
+ std::vector<Global*> declaration_order_;
- /// Globals in sorted dependency order. Populated by SortGlobals().
- utils::UniqueVector<const ast::Node*> sorted_;
+ /// Globals in sorted dependency order. Populated by SortGlobals().
+ utils::UniqueVector<const ast::Node*> sorted_;
};
} // namespace
@@ -733,8 +707,8 @@ bool DependencyGraph::Build(const ast::Module& module,
const SymbolTable& symbols,
diag::List& diagnostics,
DependencyGraph& output) {
- DependencyAnalysis da{symbols, diagnostics, output};
- return da.Run(module);
+ DependencyAnalysis da{symbols, diagnostics, output};
+ return da.Run(module);
}
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/dependency_graph.h b/chromium/third_party/dawn/src/tint/resolver/dependency_graph.h
index e8042f16ed9..0554817ebce 100644
--- a/chromium/third_party/dawn/src/tint/resolver/dependency_graph.h
+++ b/chromium/third_party/dawn/src/tint/resolver/dependency_graph.h
@@ -26,37 +26,37 @@ namespace tint::resolver {
/// DependencyGraph holds information about module-scope declaration dependency
/// analysis and symbol resolutions.
struct DependencyGraph {
- /// Constructor
- DependencyGraph();
- /// Move-constructor
- DependencyGraph(DependencyGraph&&);
- /// Destructor
- ~DependencyGraph();
+ /// Constructor
+ DependencyGraph();
+ /// Move-constructor
+ DependencyGraph(DependencyGraph&&);
+ /// Destructor
+ ~DependencyGraph();
- /// Build() performs symbol resolution and dependency analysis on `module`,
- /// populating `output` with the resulting dependency graph.
- /// @param module the AST module to analyse
- /// @param symbols the symbol table
- /// @param diagnostics the diagnostic list to populate with errors / warnings
- /// @param output the resulting DependencyGraph
- /// @returns true on success, false on error
- static bool Build(const ast::Module& module,
- const SymbolTable& symbols,
- diag::List& diagnostics,
- DependencyGraph& output);
+ /// Build() performs symbol resolution and dependency analysis on `module`,
+ /// populating `output` with the resulting dependency graph.
+ /// @param module the AST module to analyse
+ /// @param symbols the symbol table
+ /// @param diagnostics the diagnostic list to populate with errors / warnings
+ /// @param output the resulting DependencyGraph
+ /// @returns true on success, false on error
+ static bool Build(const ast::Module& module,
+ const SymbolTable& symbols,
+ diag::List& diagnostics,
+ DependencyGraph& output);
- /// All globals in dependency-sorted order.
- std::vector<const ast::Node*> ordered_globals;
+ /// All globals in dependency-sorted order.
+ std::vector<const ast::Node*> ordered_globals;
- /// Map of ast::IdentifierExpression or ast::TypeName to a type, function, or
- /// variable that declares the symbol.
- std::unordered_map<const ast::Node*, const ast::Node*> resolved_symbols;
+ /// Map of ast::IdentifierExpression or ast::TypeName to a type, function, or
+ /// variable that declares the symbol.
+ std::unordered_map<const ast::Node*, const ast::Node*> resolved_symbols;
- /// Map of ast::Variable to a type, function, or variable that is shadowed by
- /// the variable key. A declaration (X) shadows another (Y) if X and Y use
- /// the same symbol, and X is declared in a sub-scope of the scope that
- /// declares Y.
- std::unordered_map<const ast::Variable*, const ast::Node*> shadows;
+ /// Map of ast::Variable to a type, function, or variable that is shadowed by
+ /// the variable key. A declaration (X) shadows another (Y) if X and Y use
+ /// the same symbol, and X is declared in a sub-scope of the scope that
+ /// declares Y.
+ std::unordered_map<const ast::Variable*, const ast::Node*> shadows;
};
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/dependency_graph_test.cc b/chromium/third_party/dawn/src/tint/resolver/dependency_graph_test.cc
index 606fc5963e7..82b0a69b682 100644
--- a/chromium/third_party/dawn/src/tint/resolver/dependency_graph_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/dependency_graph_test.cc
@@ -20,6 +20,8 @@
#include "src/tint/resolver/dependency_graph.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -27,23 +29,22 @@ using ::testing::ElementsAre;
template <typename T>
class ResolverDependencyGraphTestWithParam : public ResolverTestWithParam<T> {
- public:
- DependencyGraph Build(std::string expected_error = "") {
- DependencyGraph graph;
- auto result = DependencyGraph::Build(this->AST(), this->Symbols(),
- this->Diagnostics(), graph);
- if (expected_error.empty()) {
- EXPECT_TRUE(result) << this->Diagnostics().str();
- } else {
- EXPECT_FALSE(result);
- EXPECT_EQ(expected_error, this->Diagnostics().str());
+ public:
+ DependencyGraph Build(std::string expected_error = "") {
+ DependencyGraph graph;
+ auto result =
+ DependencyGraph::Build(this->AST(), this->Symbols(), this->Diagnostics(), graph);
+ if (expected_error.empty()) {
+ EXPECT_TRUE(result) << this->Diagnostics().str();
+ } else {
+ EXPECT_FALSE(result);
+ EXPECT_EQ(expected_error, this->Diagnostics().str());
+ }
+ return graph;
}
- return graph;
- }
};
-using ResolverDependencyGraphTest =
- ResolverDependencyGraphTestWithParam<::testing::Test>;
+using ResolverDependencyGraphTest = ResolverDependencyGraphTestWithParam<::testing::Test>;
////////////////////////////////////////////////////////////////////////////////
// Parameterized test helpers
@@ -52,24 +53,23 @@ using ResolverDependencyGraphTest =
/// SymbolDeclKind is used by parameterized tests to enumerate the different
/// kinds of symbol declarations.
enum class SymbolDeclKind {
- GlobalVar,
- GlobalLet,
- Alias,
- Struct,
- Function,
- Parameter,
- LocalVar,
- LocalLet,
- NestedLocalVar,
- NestedLocalLet,
+ GlobalVar,
+ GlobalConst,
+ Alias,
+ Struct,
+ Function,
+ Parameter,
+ LocalVar,
+ LocalLet,
+ NestedLocalVar,
+ NestedLocalLet,
};
static constexpr SymbolDeclKind kAllSymbolDeclKinds[] = {
- SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalLet,
- SymbolDeclKind::Alias, SymbolDeclKind::Struct,
- SymbolDeclKind::Function, SymbolDeclKind::Parameter,
- SymbolDeclKind::LocalVar, SymbolDeclKind::LocalLet,
- SymbolDeclKind::NestedLocalVar, SymbolDeclKind::NestedLocalLet,
+ SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalConst, SymbolDeclKind::Alias,
+ SymbolDeclKind::Struct, SymbolDeclKind::Function, SymbolDeclKind::Parameter,
+ SymbolDeclKind::LocalVar, SymbolDeclKind::LocalLet, SymbolDeclKind::NestedLocalVar,
+ SymbolDeclKind::NestedLocalLet,
};
static constexpr SymbolDeclKind kTypeDeclKinds[] = {
@@ -78,26 +78,24 @@ static constexpr SymbolDeclKind kTypeDeclKinds[] = {
};
static constexpr SymbolDeclKind kValueDeclKinds[] = {
- SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalLet,
- SymbolDeclKind::Parameter, SymbolDeclKind::LocalVar,
- SymbolDeclKind::LocalLet, SymbolDeclKind::NestedLocalVar,
+ SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalConst, SymbolDeclKind::Parameter,
+ SymbolDeclKind::LocalVar, SymbolDeclKind::LocalLet, SymbolDeclKind::NestedLocalVar,
SymbolDeclKind::NestedLocalLet,
};
static constexpr SymbolDeclKind kGlobalDeclKinds[] = {
- SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalLet, SymbolDeclKind::Alias,
+ SymbolDeclKind::GlobalVar, SymbolDeclKind::GlobalConst, SymbolDeclKind::Alias,
SymbolDeclKind::Struct, SymbolDeclKind::Function,
};
static constexpr SymbolDeclKind kLocalDeclKinds[] = {
- SymbolDeclKind::Parameter, SymbolDeclKind::LocalVar,
- SymbolDeclKind::LocalLet, SymbolDeclKind::NestedLocalVar,
- SymbolDeclKind::NestedLocalLet,
+ SymbolDeclKind::Parameter, SymbolDeclKind::LocalVar, SymbolDeclKind::LocalLet,
+ SymbolDeclKind::NestedLocalVar, SymbolDeclKind::NestedLocalLet,
};
static constexpr SymbolDeclKind kGlobalValueDeclKinds[] = {
SymbolDeclKind::GlobalVar,
- SymbolDeclKind::GlobalLet,
+ SymbolDeclKind::GlobalConst,
};
static constexpr SymbolDeclKind kFuncDeclKinds[] = {
@@ -107,37 +105,37 @@ static constexpr SymbolDeclKind kFuncDeclKinds[] = {
/// SymbolUseKind is used by parameterized tests to enumerate the different
/// kinds of symbol uses.
enum class SymbolUseKind {
- GlobalVarType,
- GlobalVarArrayElemType,
- GlobalVarArraySizeValue,
- GlobalVarVectorElemType,
- GlobalVarMatrixElemType,
- GlobalVarSampledTexElemType,
- GlobalVarMultisampledTexElemType,
- GlobalVarValue,
- GlobalLetType,
- GlobalLetArrayElemType,
- GlobalLetArraySizeValue,
- GlobalLetVectorElemType,
- GlobalLetMatrixElemType,
- GlobalLetValue,
- AliasType,
- StructMemberType,
- CallFunction,
- ParameterType,
- LocalVarType,
- LocalVarArrayElemType,
- LocalVarArraySizeValue,
- LocalVarVectorElemType,
- LocalVarMatrixElemType,
- LocalVarValue,
- LocalLetType,
- LocalLetValue,
- NestedLocalVarType,
- NestedLocalVarValue,
- NestedLocalLetType,
- NestedLocalLetValue,
- WorkgroupSizeValue,
+ GlobalVarType,
+ GlobalVarArrayElemType,
+ GlobalVarArraySizeValue,
+ GlobalVarVectorElemType,
+ GlobalVarMatrixElemType,
+ GlobalVarSampledTexElemType,
+ GlobalVarMultisampledTexElemType,
+ GlobalVarValue,
+ GlobalLetType,
+ GlobalLetArrayElemType,
+ GlobalLetArraySizeValue,
+ GlobalLetVectorElemType,
+ GlobalLetMatrixElemType,
+ GlobalLetValue,
+ AliasType,
+ StructMemberType,
+ CallFunction,
+ ParameterType,
+ LocalVarType,
+ LocalVarArrayElemType,
+ LocalVarArraySizeValue,
+ LocalVarVectorElemType,
+ LocalVarMatrixElemType,
+ LocalVarValue,
+ LocalLetType,
+ LocalLetValue,
+ NestedLocalVarType,
+ NestedLocalVarValue,
+ NestedLocalLetType,
+ NestedLocalLetValue,
+ WorkgroupSizeValue,
};
static constexpr SymbolUseKind kTypeUseKinds[] = {
@@ -180,474 +178,466 @@ static constexpr SymbolUseKind kFuncUseKinds[] = {
/// @returns the description of the symbol declaration kind.
/// @note: This differs from the strings used in diagnostic messages.
std::ostream& operator<<(std::ostream& out, SymbolDeclKind kind) {
- switch (kind) {
- case SymbolDeclKind::GlobalVar:
- return out << "global var";
- case SymbolDeclKind::GlobalLet:
- return out << "global let";
- case SymbolDeclKind::Alias:
- return out << "alias";
- case SymbolDeclKind::Struct:
- return out << "struct";
- case SymbolDeclKind::Function:
- return out << "function";
- case SymbolDeclKind::Parameter:
- return out << "parameter";
- case SymbolDeclKind::LocalVar:
- return out << "local var";
- case SymbolDeclKind::LocalLet:
- return out << "local let";
- case SymbolDeclKind::NestedLocalVar:
- return out << "nested local var";
- case SymbolDeclKind::NestedLocalLet:
- return out << "nested local let";
- }
- return out << "<unknown>";
+ switch (kind) {
+ case SymbolDeclKind::GlobalVar:
+ return out << "global var";
+ case SymbolDeclKind::GlobalConst:
+ return out << "global let";
+ case SymbolDeclKind::Alias:
+ return out << "alias";
+ case SymbolDeclKind::Struct:
+ return out << "struct";
+ case SymbolDeclKind::Function:
+ return out << "function";
+ case SymbolDeclKind::Parameter:
+ return out << "parameter";
+ case SymbolDeclKind::LocalVar:
+ return out << "local var";
+ case SymbolDeclKind::LocalLet:
+ return out << "local let";
+ case SymbolDeclKind::NestedLocalVar:
+ return out << "nested local var";
+ case SymbolDeclKind::NestedLocalLet:
+ return out << "nested local let";
+ }
+ return out << "<unknown>";
}
/// @returns the description of the symbol use kind.
/// @note: This differs from the strings used in diagnostic messages.
std::ostream& operator<<(std::ostream& out, SymbolUseKind kind) {
- switch (kind) {
- case SymbolUseKind::GlobalVarType:
- return out << "global var type";
- case SymbolUseKind::GlobalVarValue:
- return out << "global var value";
- case SymbolUseKind::GlobalVarArrayElemType:
- return out << "global var array element type";
- case SymbolUseKind::GlobalVarArraySizeValue:
- return out << "global var array size value";
- case SymbolUseKind::GlobalVarVectorElemType:
- return out << "global var vector element type";
- case SymbolUseKind::GlobalVarMatrixElemType:
- return out << "global var matrix element type";
- case SymbolUseKind::GlobalVarSampledTexElemType:
- return out << "global var sampled_texture element type";
- case SymbolUseKind::GlobalVarMultisampledTexElemType:
- return out << "global var multisampled_texture element type";
- case SymbolUseKind::GlobalLetType:
- return out << "global let type";
- case SymbolUseKind::GlobalLetValue:
- return out << "global let value";
- case SymbolUseKind::GlobalLetArrayElemType:
- return out << "global let array element type";
- case SymbolUseKind::GlobalLetArraySizeValue:
- return out << "global let array size value";
- case SymbolUseKind::GlobalLetVectorElemType:
- return out << "global let vector element type";
- case SymbolUseKind::GlobalLetMatrixElemType:
- return out << "global let matrix element type";
- case SymbolUseKind::AliasType:
- return out << "alias type";
- case SymbolUseKind::StructMemberType:
- return out << "struct member type";
- case SymbolUseKind::CallFunction:
- return out << "call function";
- case SymbolUseKind::ParameterType:
- return out << "parameter type";
- case SymbolUseKind::LocalVarType:
- return out << "local var type";
- case SymbolUseKind::LocalVarArrayElemType:
- return out << "local var array element type";
- case SymbolUseKind::LocalVarArraySizeValue:
- return out << "local var array size value";
- case SymbolUseKind::LocalVarVectorElemType:
- return out << "local var vector element type";
- case SymbolUseKind::LocalVarMatrixElemType:
- return out << "local var matrix element type";
- case SymbolUseKind::LocalVarValue:
- return out << "local var value";
- case SymbolUseKind::LocalLetType:
- return out << "local let type";
- case SymbolUseKind::LocalLetValue:
- return out << "local let value";
- case SymbolUseKind::NestedLocalVarType:
- return out << "nested local var type";
- case SymbolUseKind::NestedLocalVarValue:
- return out << "nested local var value";
- case SymbolUseKind::NestedLocalLetType:
- return out << "nested local let type";
- case SymbolUseKind::NestedLocalLetValue:
- return out << "nested local let value";
- case SymbolUseKind::WorkgroupSizeValue:
- return out << "workgroup size value";
- }
- return out << "<unknown>";
+ switch (kind) {
+ case SymbolUseKind::GlobalVarType:
+ return out << "global var type";
+ case SymbolUseKind::GlobalVarValue:
+ return out << "global var value";
+ case SymbolUseKind::GlobalVarArrayElemType:
+ return out << "global var array element type";
+ case SymbolUseKind::GlobalVarArraySizeValue:
+ return out << "global var array size value";
+ case SymbolUseKind::GlobalVarVectorElemType:
+ return out << "global var vector element type";
+ case SymbolUseKind::GlobalVarMatrixElemType:
+ return out << "global var matrix element type";
+ case SymbolUseKind::GlobalVarSampledTexElemType:
+ return out << "global var sampled_texture element type";
+ case SymbolUseKind::GlobalVarMultisampledTexElemType:
+ return out << "global var multisampled_texture element type";
+ case SymbolUseKind::GlobalLetType:
+ return out << "global let type";
+ case SymbolUseKind::GlobalLetValue:
+ return out << "global let value";
+ case SymbolUseKind::GlobalLetArrayElemType:
+ return out << "global let array element type";
+ case SymbolUseKind::GlobalLetArraySizeValue:
+ return out << "global let array size value";
+ case SymbolUseKind::GlobalLetVectorElemType:
+ return out << "global let vector element type";
+ case SymbolUseKind::GlobalLetMatrixElemType:
+ return out << "global let matrix element type";
+ case SymbolUseKind::AliasType:
+ return out << "alias type";
+ case SymbolUseKind::StructMemberType:
+ return out << "struct member type";
+ case SymbolUseKind::CallFunction:
+ return out << "call function";
+ case SymbolUseKind::ParameterType:
+ return out << "parameter type";
+ case SymbolUseKind::LocalVarType:
+ return out << "local var type";
+ case SymbolUseKind::LocalVarArrayElemType:
+ return out << "local var array element type";
+ case SymbolUseKind::LocalVarArraySizeValue:
+ return out << "local var array size value";
+ case SymbolUseKind::LocalVarVectorElemType:
+ return out << "local var vector element type";
+ case SymbolUseKind::LocalVarMatrixElemType:
+ return out << "local var matrix element type";
+ case SymbolUseKind::LocalVarValue:
+ return out << "local var value";
+ case SymbolUseKind::LocalLetType:
+ return out << "local let type";
+ case SymbolUseKind::LocalLetValue:
+ return out << "local let value";
+ case SymbolUseKind::NestedLocalVarType:
+ return out << "nested local var type";
+ case SymbolUseKind::NestedLocalVarValue:
+ return out << "nested local var value";
+ case SymbolUseKind::NestedLocalLetType:
+ return out << "nested local let type";
+ case SymbolUseKind::NestedLocalLetValue:
+ return out << "nested local let value";
+ case SymbolUseKind::WorkgroupSizeValue:
+ return out << "workgroup size value";
+ }
+ return out << "<unknown>";
}
/// @returns the the diagnostic message name used for the given use
std::string DiagString(SymbolUseKind kind) {
- switch (kind) {
- case SymbolUseKind::GlobalVarType:
- case SymbolUseKind::GlobalVarArrayElemType:
- case SymbolUseKind::GlobalVarVectorElemType:
- case SymbolUseKind::GlobalVarMatrixElemType:
- case SymbolUseKind::GlobalVarSampledTexElemType:
- case SymbolUseKind::GlobalVarMultisampledTexElemType:
- case SymbolUseKind::GlobalLetType:
- case SymbolUseKind::GlobalLetArrayElemType:
- case SymbolUseKind::GlobalLetVectorElemType:
- case SymbolUseKind::GlobalLetMatrixElemType:
- case SymbolUseKind::AliasType:
- case SymbolUseKind::StructMemberType:
- case SymbolUseKind::ParameterType:
- case SymbolUseKind::LocalVarType:
- case SymbolUseKind::LocalVarArrayElemType:
- case SymbolUseKind::LocalVarVectorElemType:
- case SymbolUseKind::LocalVarMatrixElemType:
- case SymbolUseKind::LocalLetType:
- case SymbolUseKind::NestedLocalVarType:
- case SymbolUseKind::NestedLocalLetType:
- return "type";
- case SymbolUseKind::GlobalVarValue:
- case SymbolUseKind::GlobalVarArraySizeValue:
- case SymbolUseKind::GlobalLetValue:
- case SymbolUseKind::GlobalLetArraySizeValue:
- case SymbolUseKind::LocalVarValue:
- case SymbolUseKind::LocalVarArraySizeValue:
- case SymbolUseKind::LocalLetValue:
- case SymbolUseKind::NestedLocalVarValue:
- case SymbolUseKind::NestedLocalLetValue:
- case SymbolUseKind::WorkgroupSizeValue:
- return "identifier";
- case SymbolUseKind::CallFunction:
- return "function";
- }
- return "<unknown>";
+ switch (kind) {
+ case SymbolUseKind::GlobalVarType:
+ case SymbolUseKind::GlobalVarArrayElemType:
+ case SymbolUseKind::GlobalVarVectorElemType:
+ case SymbolUseKind::GlobalVarMatrixElemType:
+ case SymbolUseKind::GlobalVarSampledTexElemType:
+ case SymbolUseKind::GlobalVarMultisampledTexElemType:
+ case SymbolUseKind::GlobalLetType:
+ case SymbolUseKind::GlobalLetArrayElemType:
+ case SymbolUseKind::GlobalLetVectorElemType:
+ case SymbolUseKind::GlobalLetMatrixElemType:
+ case SymbolUseKind::AliasType:
+ case SymbolUseKind::StructMemberType:
+ case SymbolUseKind::ParameterType:
+ case SymbolUseKind::LocalVarType:
+ case SymbolUseKind::LocalVarArrayElemType:
+ case SymbolUseKind::LocalVarVectorElemType:
+ case SymbolUseKind::LocalVarMatrixElemType:
+ case SymbolUseKind::LocalLetType:
+ case SymbolUseKind::NestedLocalVarType:
+ case SymbolUseKind::NestedLocalLetType:
+ return "type";
+ case SymbolUseKind::GlobalVarValue:
+ case SymbolUseKind::GlobalVarArraySizeValue:
+ case SymbolUseKind::GlobalLetValue:
+ case SymbolUseKind::GlobalLetArraySizeValue:
+ case SymbolUseKind::LocalVarValue:
+ case SymbolUseKind::LocalVarArraySizeValue:
+ case SymbolUseKind::LocalLetValue:
+ case SymbolUseKind::NestedLocalVarValue:
+ case SymbolUseKind::NestedLocalLetValue:
+ case SymbolUseKind::WorkgroupSizeValue:
+ return "identifier";
+ case SymbolUseKind::CallFunction:
+ return "function";
+ }
+ return "<unknown>";
}
/// @returns the declaration scope depth for the symbol declaration kind.
/// Globals are at depth 0, parameters and locals are at depth 1,
/// nested locals are at depth 2.
int ScopeDepth(SymbolDeclKind kind) {
- switch (kind) {
- case SymbolDeclKind::GlobalVar:
- case SymbolDeclKind::GlobalLet:
- case SymbolDeclKind::Alias:
- case SymbolDeclKind::Struct:
- case SymbolDeclKind::Function:
- return 0;
- case SymbolDeclKind::Parameter:
- case SymbolDeclKind::LocalVar:
- case SymbolDeclKind::LocalLet:
- return 1;
- case SymbolDeclKind::NestedLocalVar:
- case SymbolDeclKind::NestedLocalLet:
- return 2;
- }
- return -1;
+ switch (kind) {
+ case SymbolDeclKind::GlobalVar:
+ case SymbolDeclKind::GlobalConst:
+ case SymbolDeclKind::Alias:
+ case SymbolDeclKind::Struct:
+ case SymbolDeclKind::Function:
+ return 0;
+ case SymbolDeclKind::Parameter:
+ case SymbolDeclKind::LocalVar:
+ case SymbolDeclKind::LocalLet:
+ return 1;
+ case SymbolDeclKind::NestedLocalVar:
+ case SymbolDeclKind::NestedLocalLet:
+ return 2;
+ }
+ return -1;
}
/// @returns the use depth for the symbol use kind.
/// Globals are at depth 0, parameters and locals are at depth 1,
/// nested locals are at depth 2.
int ScopeDepth(SymbolUseKind kind) {
- switch (kind) {
- case SymbolUseKind::GlobalVarType:
- case SymbolUseKind::GlobalVarValue:
- case SymbolUseKind::GlobalVarArrayElemType:
- case SymbolUseKind::GlobalVarArraySizeValue:
- case SymbolUseKind::GlobalVarVectorElemType:
- case SymbolUseKind::GlobalVarMatrixElemType:
- case SymbolUseKind::GlobalVarSampledTexElemType:
- case SymbolUseKind::GlobalVarMultisampledTexElemType:
- case SymbolUseKind::GlobalLetType:
- case SymbolUseKind::GlobalLetValue:
- case SymbolUseKind::GlobalLetArrayElemType:
- case SymbolUseKind::GlobalLetArraySizeValue:
- case SymbolUseKind::GlobalLetVectorElemType:
- case SymbolUseKind::GlobalLetMatrixElemType:
- case SymbolUseKind::AliasType:
- case SymbolUseKind::StructMemberType:
- case SymbolUseKind::WorkgroupSizeValue:
- return 0;
- case SymbolUseKind::CallFunction:
- case SymbolUseKind::ParameterType:
- case SymbolUseKind::LocalVarType:
- case SymbolUseKind::LocalVarArrayElemType:
- case SymbolUseKind::LocalVarArraySizeValue:
- case SymbolUseKind::LocalVarVectorElemType:
- case SymbolUseKind::LocalVarMatrixElemType:
- case SymbolUseKind::LocalVarValue:
- case SymbolUseKind::LocalLetType:
- case SymbolUseKind::LocalLetValue:
- return 1;
- case SymbolUseKind::NestedLocalVarType:
- case SymbolUseKind::NestedLocalVarValue:
- case SymbolUseKind::NestedLocalLetType:
- case SymbolUseKind::NestedLocalLetValue:
- return 2;
- }
- return -1;
+ switch (kind) {
+ case SymbolUseKind::GlobalVarType:
+ case SymbolUseKind::GlobalVarValue:
+ case SymbolUseKind::GlobalVarArrayElemType:
+ case SymbolUseKind::GlobalVarArraySizeValue:
+ case SymbolUseKind::GlobalVarVectorElemType:
+ case SymbolUseKind::GlobalVarMatrixElemType:
+ case SymbolUseKind::GlobalVarSampledTexElemType:
+ case SymbolUseKind::GlobalVarMultisampledTexElemType:
+ case SymbolUseKind::GlobalLetType:
+ case SymbolUseKind::GlobalLetValue:
+ case SymbolUseKind::GlobalLetArrayElemType:
+ case SymbolUseKind::GlobalLetArraySizeValue:
+ case SymbolUseKind::GlobalLetVectorElemType:
+ case SymbolUseKind::GlobalLetMatrixElemType:
+ case SymbolUseKind::AliasType:
+ case SymbolUseKind::StructMemberType:
+ case SymbolUseKind::WorkgroupSizeValue:
+ return 0;
+ case SymbolUseKind::CallFunction:
+ case SymbolUseKind::ParameterType:
+ case SymbolUseKind::LocalVarType:
+ case SymbolUseKind::LocalVarArrayElemType:
+ case SymbolUseKind::LocalVarArraySizeValue:
+ case SymbolUseKind::LocalVarVectorElemType:
+ case SymbolUseKind::LocalVarMatrixElemType:
+ case SymbolUseKind::LocalVarValue:
+ case SymbolUseKind::LocalLetType:
+ case SymbolUseKind::LocalLetValue:
+ return 1;
+ case SymbolUseKind::NestedLocalVarType:
+ case SymbolUseKind::NestedLocalVarValue:
+ case SymbolUseKind::NestedLocalLetType:
+ case SymbolUseKind::NestedLocalLetValue:
+ return 2;
+ }
+ return -1;
}
/// A helper for building programs that exercise symbol declaration tests.
struct SymbolTestHelper {
- /// The program builder
- ProgramBuilder* const builder;
- /// Parameters to a function that may need to be built
- std::vector<const ast::Variable*> parameters;
- /// Shallow function var / let declaration statements
- std::vector<const ast::Statement*> statements;
- /// Nested function local var / let declaration statements
- std::vector<const ast::Statement*> nested_statements;
- /// Function attributes
- ast::AttributeList func_attrs;
-
- /// Constructor
- /// @param builder the program builder
- explicit SymbolTestHelper(ProgramBuilder* builder);
-
- /// Destructor.
- ~SymbolTestHelper();
-
- /// Declares a symbol with the given kind
- /// @param kind the kind of symbol declaration
- /// @param symbol the symbol to use for the declaration
- /// @param source the source of the declaration
- /// @returns the declaration node
- const ast::Node* Add(SymbolDeclKind kind, Symbol symbol, Source source);
-
- /// Declares a use of a symbol with the given kind
- /// @param kind the kind of symbol use
- /// @param symbol the declaration symbol to use
- /// @param source the source of the use
- /// @returns the use node
- const ast::Node* Add(SymbolUseKind kind, Symbol symbol, Source source);
-
- /// Builds a function, if any parameter or local declarations have been added
- void Build();
+ /// The program builder
+ ProgramBuilder* const builder;
+ /// Parameters to a function that may need to be built
+ std::vector<const ast::Variable*> parameters;
+ /// Shallow function var / let declaration statements
+ std::vector<const ast::Statement*> statements;
+ /// Nested function local var / let declaration statements
+ std::vector<const ast::Statement*> nested_statements;
+ /// Function attributes
+ ast::AttributeList func_attrs;
+
+ /// Constructor
+ /// @param builder the program builder
+ explicit SymbolTestHelper(ProgramBuilder* builder);
+
+ /// Destructor.
+ ~SymbolTestHelper();
+
+ /// Declares a symbol with the given kind
+ /// @param kind the kind of symbol declaration
+ /// @param symbol the symbol to use for the declaration
+ /// @param source the source of the declaration
+ /// @returns the declaration node
+ const ast::Node* Add(SymbolDeclKind kind, Symbol symbol, Source source);
+
+ /// Declares a use of a symbol with the given kind
+ /// @param kind the kind of symbol use
+ /// @param symbol the declaration symbol to use
+ /// @param source the source of the use
+ /// @returns the use node
+ const ast::Node* Add(SymbolUseKind kind, Symbol symbol, Source source);
+
+ /// Builds a function, if any parameter or local declarations have been added
+ void Build();
};
SymbolTestHelper::SymbolTestHelper(ProgramBuilder* b) : builder(b) {}
SymbolTestHelper::~SymbolTestHelper() {}
-const ast::Node* SymbolTestHelper::Add(SymbolDeclKind kind,
- Symbol symbol,
- Source source) {
- auto& b = *builder;
- switch (kind) {
- case SymbolDeclKind::GlobalVar:
- return b.Global(source, symbol, b.ty.i32(), ast::StorageClass::kPrivate);
- case SymbolDeclKind::GlobalLet:
- return b.GlobalConst(source, symbol, b.ty.i32(), b.Expr(1));
- case SymbolDeclKind::Alias:
- return b.Alias(source, symbol, b.ty.i32());
- case SymbolDeclKind::Struct:
- return b.Structure(source, symbol, {b.Member("m", b.ty.i32())});
- case SymbolDeclKind::Function:
- return b.Func(source, symbol, {}, b.ty.void_(), {});
- case SymbolDeclKind::Parameter: {
- auto* node = b.Param(source, symbol, b.ty.i32());
- parameters.emplace_back(node);
- return node;
- }
- case SymbolDeclKind::LocalVar: {
- auto* node = b.Var(source, symbol, b.ty.i32());
- statements.emplace_back(b.Decl(node));
- return node;
- }
- case SymbolDeclKind::LocalLet: {
- auto* node = b.Const(source, symbol, b.ty.i32(), b.Expr(1));
- statements.emplace_back(b.Decl(node));
- return node;
- }
- case SymbolDeclKind::NestedLocalVar: {
- auto* node = b.Var(source, symbol, b.ty.i32());
- nested_statements.emplace_back(b.Decl(node));
- return node;
- }
- case SymbolDeclKind::NestedLocalLet: {
- auto* node = b.Const(source, symbol, b.ty.i32(), b.Expr(1));
- nested_statements.emplace_back(b.Decl(node));
- return node;
+const ast::Node* SymbolTestHelper::Add(SymbolDeclKind kind, Symbol symbol, Source source) {
+ auto& b = *builder;
+ switch (kind) {
+ case SymbolDeclKind::GlobalVar:
+ return b.Global(source, symbol, b.ty.i32(), ast::StorageClass::kPrivate);
+ case SymbolDeclKind::GlobalConst:
+ return b.GlobalConst(source, symbol, b.ty.i32(), b.Expr(1_i));
+ case SymbolDeclKind::Alias:
+ return b.Alias(source, symbol, b.ty.i32());
+ case SymbolDeclKind::Struct:
+ return b.Structure(source, symbol, {b.Member("m", b.ty.i32())});
+ case SymbolDeclKind::Function:
+ return b.Func(source, symbol, {}, b.ty.void_(), {});
+ case SymbolDeclKind::Parameter: {
+ auto* node = b.Param(source, symbol, b.ty.i32());
+ parameters.emplace_back(node);
+ return node;
+ }
+ case SymbolDeclKind::LocalVar: {
+ auto* node = b.Var(source, symbol, b.ty.i32());
+ statements.emplace_back(b.Decl(node));
+ return node;
+ }
+ case SymbolDeclKind::LocalLet: {
+ auto* node = b.Let(source, symbol, b.ty.i32(), b.Expr(1_i));
+ statements.emplace_back(b.Decl(node));
+ return node;
+ }
+ case SymbolDeclKind::NestedLocalVar: {
+ auto* node = b.Var(source, symbol, b.ty.i32());
+ nested_statements.emplace_back(b.Decl(node));
+ return node;
+ }
+ case SymbolDeclKind::NestedLocalLet: {
+ auto* node = b.Let(source, symbol, b.ty.i32(), b.Expr(1_i));
+ nested_statements.emplace_back(b.Decl(node));
+ return node;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
-const ast::Node* SymbolTestHelper::Add(SymbolUseKind kind,
- Symbol symbol,
- Source source) {
- auto& b = *builder;
- switch (kind) {
- case SymbolUseKind::GlobalVarType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(), node, ast::StorageClass::kPrivate);
- return node;
- }
- case SymbolUseKind::GlobalVarArrayElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(), b.ty.array(node, 4), ast::StorageClass::kPrivate);
- return node;
- }
- case SymbolUseKind::GlobalVarArraySizeValue: {
- auto* node = b.Expr(source, symbol);
- b.Global(b.Sym(), b.ty.array(b.ty.i32(), node),
- ast::StorageClass::kPrivate);
- return node;
- }
- case SymbolUseKind::GlobalVarVectorElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(), b.ty.vec3(node), ast::StorageClass::kPrivate);
- return node;
- }
- case SymbolUseKind::GlobalVarMatrixElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(), b.ty.mat3x4(node), ast::StorageClass::kPrivate);
- return node;
- }
- case SymbolUseKind::GlobalVarSampledTexElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(), b.ty.sampled_texture(ast::TextureDimension::k2d, node));
- return node;
- }
- case SymbolUseKind::GlobalVarMultisampledTexElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Global(b.Sym(),
- b.ty.multisampled_texture(ast::TextureDimension::k2d, node));
- return node;
- }
- case SymbolUseKind::GlobalVarValue: {
- auto* node = b.Expr(source, symbol);
- b.Global(b.Sym(), b.ty.i32(), ast::StorageClass::kPrivate, node);
- return node;
- }
- case SymbolUseKind::GlobalLetType: {
- auto* node = b.ty.type_name(source, symbol);
- b.GlobalConst(b.Sym(), node, b.Expr(1));
- return node;
- }
- case SymbolUseKind::GlobalLetArrayElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.GlobalConst(b.Sym(), b.ty.array(node, 4), b.Expr(1));
- return node;
- }
- case SymbolUseKind::GlobalLetArraySizeValue: {
- auto* node = b.Expr(source, symbol);
- b.GlobalConst(b.Sym(), b.ty.array(b.ty.i32(), node), b.Expr(1));
- return node;
- }
- case SymbolUseKind::GlobalLetVectorElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.GlobalConst(b.Sym(), b.ty.vec3(node), b.Expr(1));
- return node;
- }
- case SymbolUseKind::GlobalLetMatrixElemType: {
- auto* node = b.ty.type_name(source, symbol);
- b.GlobalConst(b.Sym(), b.ty.mat3x4(node), b.Expr(1));
- return node;
- }
- case SymbolUseKind::GlobalLetValue: {
- auto* node = b.Expr(source, symbol);
- b.GlobalConst(b.Sym(), b.ty.i32(), node);
- return node;
- }
- case SymbolUseKind::AliasType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Alias(b.Sym(), node);
- return node;
+const ast::Node* SymbolTestHelper::Add(SymbolUseKind kind, Symbol symbol, Source source) {
+ auto& b = *builder;
+ switch (kind) {
+ case SymbolUseKind::GlobalVarType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), node, ast::StorageClass::kPrivate);
+ return node;
+ }
+ case SymbolUseKind::GlobalVarArrayElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), b.ty.array(node, 4_i), ast::StorageClass::kPrivate);
+ return node;
+ }
+ case SymbolUseKind::GlobalVarArraySizeValue: {
+ auto* node = b.Expr(source, symbol);
+ b.Global(b.Sym(), b.ty.array(b.ty.i32(), node), ast::StorageClass::kPrivate);
+ return node;
+ }
+ case SymbolUseKind::GlobalVarVectorElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), b.ty.vec3(node), ast::StorageClass::kPrivate);
+ return node;
+ }
+ case SymbolUseKind::GlobalVarMatrixElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), b.ty.mat3x4(node), ast::StorageClass::kPrivate);
+ return node;
+ }
+ case SymbolUseKind::GlobalVarSampledTexElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), b.ty.sampled_texture(ast::TextureDimension::k2d, node));
+ return node;
+ }
+ case SymbolUseKind::GlobalVarMultisampledTexElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Global(b.Sym(), b.ty.multisampled_texture(ast::TextureDimension::k2d, node));
+ return node;
+ }
+ case SymbolUseKind::GlobalVarValue: {
+ auto* node = b.Expr(source, symbol);
+ b.Global(b.Sym(), b.ty.i32(), ast::StorageClass::kPrivate, node);
+ return node;
+ }
+ case SymbolUseKind::GlobalLetType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.GlobalConst(b.Sym(), node, b.Expr(1_i));
+ return node;
+ }
+ case SymbolUseKind::GlobalLetArrayElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.GlobalConst(b.Sym(), b.ty.array(node, 4_i), b.Expr(1_i));
+ return node;
+ }
+ case SymbolUseKind::GlobalLetArraySizeValue: {
+ auto* node = b.Expr(source, symbol);
+ b.GlobalConst(b.Sym(), b.ty.array(b.ty.i32(), node), b.Expr(1_i));
+ return node;
+ }
+ case SymbolUseKind::GlobalLetVectorElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.GlobalConst(b.Sym(), b.ty.vec3(node), b.Expr(1_i));
+ return node;
+ }
+ case SymbolUseKind::GlobalLetMatrixElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.GlobalConst(b.Sym(), b.ty.mat3x4(node), b.Expr(1_i));
+ return node;
+ }
+ case SymbolUseKind::GlobalLetValue: {
+ auto* node = b.Expr(source, symbol);
+ b.GlobalConst(b.Sym(), b.ty.i32(), node);
+ return node;
+ }
+ case SymbolUseKind::AliasType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Alias(b.Sym(), node);
+ return node;
+ }
+ case SymbolUseKind::StructMemberType: {
+ auto* node = b.ty.type_name(source, symbol);
+ b.Structure(b.Sym(), {b.Member("m", node)});
+ return node;
+ }
+ case SymbolUseKind::CallFunction: {
+ auto* node = b.Expr(source, symbol);
+ statements.emplace_back(b.CallStmt(b.Call(node)));
+ return node;
+ }
+ case SymbolUseKind::ParameterType: {
+ auto* node = b.ty.type_name(source, symbol);
+ parameters.emplace_back(b.Param(b.Sym(), node));
+ return node;
+ }
+ case SymbolUseKind::LocalVarType: {
+ auto* node = b.ty.type_name(source, symbol);
+ statements.emplace_back(b.Decl(b.Var(b.Sym(), node)));
+ return node;
+ }
+ case SymbolUseKind::LocalVarArrayElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.array(node, 4_u), b.Expr(1_i))));
+ return node;
+ }
+ case SymbolUseKind::LocalVarArraySizeValue: {
+ auto* node = b.Expr(source, symbol);
+ statements.emplace_back(
+ b.Decl(b.Var(b.Sym(), b.ty.array(b.ty.i32(), node), b.Expr(1_i))));
+ return node;
+ }
+ case SymbolUseKind::LocalVarVectorElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.vec3(node))));
+ return node;
+ }
+ case SymbolUseKind::LocalVarMatrixElemType: {
+ auto* node = b.ty.type_name(source, symbol);
+ statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.mat3x4(node))));
+ return node;
+ }
+ case SymbolUseKind::LocalVarValue: {
+ auto* node = b.Expr(source, symbol);
+ statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.i32(), node)));
+ return node;
+ }
+ case SymbolUseKind::LocalLetType: {
+ auto* node = b.ty.type_name(source, symbol);
+ statements.emplace_back(b.Decl(b.Let(b.Sym(), node, b.Expr(1_i))));
+ return node;
+ }
+ case SymbolUseKind::LocalLetValue: {
+ auto* node = b.Expr(source, symbol);
+ statements.emplace_back(b.Decl(b.Let(b.Sym(), b.ty.i32(), node)));
+ return node;
+ }
+ case SymbolUseKind::NestedLocalVarType: {
+ auto* node = b.ty.type_name(source, symbol);
+ nested_statements.emplace_back(b.Decl(b.Var(b.Sym(), node)));
+ return node;
+ }
+ case SymbolUseKind::NestedLocalVarValue: {
+ auto* node = b.Expr(source, symbol);
+ nested_statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.i32(), node)));
+ return node;
+ }
+ case SymbolUseKind::NestedLocalLetType: {
+ auto* node = b.ty.type_name(source, symbol);
+ nested_statements.emplace_back(b.Decl(b.Let(b.Sym(), node, b.Expr(1_i))));
+ return node;
+ }
+ case SymbolUseKind::NestedLocalLetValue: {
+ auto* node = b.Expr(source, symbol);
+ nested_statements.emplace_back(b.Decl(b.Let(b.Sym(), b.ty.i32(), node)));
+ return node;
+ }
+ case SymbolUseKind::WorkgroupSizeValue: {
+ auto* node = b.Expr(source, symbol);
+ func_attrs.emplace_back(b.WorkgroupSize(1_i, node, 2_i));
+ return node;
+ }
}
- case SymbolUseKind::StructMemberType: {
- auto* node = b.ty.type_name(source, symbol);
- b.Structure(b.Sym(), {b.Member("m", node)});
- return node;
- }
- case SymbolUseKind::CallFunction: {
- auto* node = b.Expr(source, symbol);
- statements.emplace_back(b.CallStmt(b.Call(node)));
- return node;
- }
- case SymbolUseKind::ParameterType: {
- auto* node = b.ty.type_name(source, symbol);
- parameters.emplace_back(b.Param(b.Sym(), node));
- return node;
- }
- case SymbolUseKind::LocalVarType: {
- auto* node = b.ty.type_name(source, symbol);
- statements.emplace_back(b.Decl(b.Var(b.Sym(), node)));
- return node;
- }
- case SymbolUseKind::LocalVarArrayElemType: {
- auto* node = b.ty.type_name(source, symbol);
- statements.emplace_back(
- b.Decl(b.Var(b.Sym(), b.ty.array(node, 4), b.Expr(1))));
- return node;
- }
- case SymbolUseKind::LocalVarArraySizeValue: {
- auto* node = b.Expr(source, symbol);
- statements.emplace_back(
- b.Decl(b.Var(b.Sym(), b.ty.array(b.ty.i32(), node), b.Expr(1))));
- return node;
- }
- case SymbolUseKind::LocalVarVectorElemType: {
- auto* node = b.ty.type_name(source, symbol);
- statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.vec3(node))));
- return node;
- }
- case SymbolUseKind::LocalVarMatrixElemType: {
- auto* node = b.ty.type_name(source, symbol);
- statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.mat3x4(node))));
- return node;
- }
- case SymbolUseKind::LocalVarValue: {
- auto* node = b.Expr(source, symbol);
- statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.i32(), node)));
- return node;
- }
- case SymbolUseKind::LocalLetType: {
- auto* node = b.ty.type_name(source, symbol);
- statements.emplace_back(b.Decl(b.Const(b.Sym(), node, b.Expr(1))));
- return node;
- }
- case SymbolUseKind::LocalLetValue: {
- auto* node = b.Expr(source, symbol);
- statements.emplace_back(b.Decl(b.Const(b.Sym(), b.ty.i32(), node)));
- return node;
- }
- case SymbolUseKind::NestedLocalVarType: {
- auto* node = b.ty.type_name(source, symbol);
- nested_statements.emplace_back(b.Decl(b.Var(b.Sym(), node)));
- return node;
- }
- case SymbolUseKind::NestedLocalVarValue: {
- auto* node = b.Expr(source, symbol);
- nested_statements.emplace_back(b.Decl(b.Var(b.Sym(), b.ty.i32(), node)));
- return node;
- }
- case SymbolUseKind::NestedLocalLetType: {
- auto* node = b.ty.type_name(source, symbol);
- nested_statements.emplace_back(b.Decl(b.Const(b.Sym(), node, b.Expr(1))));
- return node;
- }
- case SymbolUseKind::NestedLocalLetValue: {
- auto* node = b.Expr(source, symbol);
- nested_statements.emplace_back(
- b.Decl(b.Const(b.Sym(), b.ty.i32(), node)));
- return node;
- }
- case SymbolUseKind::WorkgroupSizeValue: {
- auto* node = b.Expr(source, symbol);
- func_attrs.emplace_back(b.WorkgroupSize(1, node, 2));
- return node;
- }
- }
- return nullptr;
+ return nullptr;
}
void SymbolTestHelper::Build() {
- auto& b = *builder;
- if (!nested_statements.empty()) {
- statements.emplace_back(b.Block(nested_statements));
- nested_statements.clear();
- }
- if (!parameters.empty() || !statements.empty() || !func_attrs.empty()) {
- b.Func("func", parameters, b.ty.void_(), statements, func_attrs);
- parameters.clear();
- statements.clear();
- func_attrs.clear();
- }
+ auto& b = *builder;
+ if (!nested_statements.empty()) {
+ statements.emplace_back(b.Block(nested_statements));
+ nested_statements.clear();
+ }
+ if (!parameters.empty() || !statements.empty() || !func_attrs.empty()) {
+ b.Func("func", parameters, b.ty.void_(), statements, func_attrs);
+ parameters.clear();
+ statements.clear();
+ func_attrs.clear();
+ }
}
////////////////////////////////////////////////////////////////////////////////
@@ -658,84 +648,81 @@ namespace used_before_decl_tests {
using ResolverDependencyGraphUsedBeforeDeclTest = ResolverDependencyGraphTest;
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, FuncCall) {
- // fn A() { B(); }
- // fn B() {}
+ // fn A() { B(); }
+ // fn B() {}
- Func("A", {}, ty.void_(), {CallStmt(Call(Expr(Source{{12, 34}}, "B")))});
- Func(Source{{56, 78}}, "B", {}, ty.void_(), {Return()});
+ Func("A", {}, ty.void_(), {CallStmt(Call(Expr(Source{{12, 34}}, "B")))});
+ Func(Source{{56, 78}}, "B", {}, ty.void_(), {Return()});
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, TypeConstructed) {
- // fn F() {
- // { _ = T(); }
- // }
- // type T = i32;
+ // fn F() {
+ // { _ = T(); }
+ // }
+ // type T = i32;
- Func("F", {}, ty.void_(),
- {Block(Ignore(Construct(ty.type_name(Source{{12, 34}}, "T"))))});
- Alias(Source{{56, 78}}, "T", ty.i32());
+ Func("F", {}, ty.void_(), {Block(Ignore(Construct(ty.type_name(Source{{12, 34}}, "T"))))});
+ Alias(Source{{56, 78}}, "T", ty.i32());
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, TypeUsedByLocal) {
- // fn F() {
- // { var v : T; }
- // }
- // type T = i32;
+ // fn F() {
+ // { var v : T; }
+ // }
+ // type T = i32;
- Func("F", {}, ty.void_(),
- {Block(Decl(Var("v", ty.type_name(Source{{12, 34}}, "T"))))});
- Alias(Source{{56, 78}}, "T", ty.i32());
+ Func("F", {}, ty.void_(), {Block(Decl(Var("v", ty.type_name(Source{{12, 34}}, "T"))))});
+ Alias(Source{{56, 78}}, "T", ty.i32());
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, TypeUsedByParam) {
- // fn F(p : T) {}
- // type T = i32;
+ // fn F(p : T) {}
+ // type T = i32;
- Func("F", {Param("p", ty.type_name(Source{{12, 34}}, "T"))}, ty.void_(), {});
- Alias(Source{{56, 78}}, "T", ty.i32());
+ Func("F", {Param("p", ty.type_name(Source{{12, 34}}, "T"))}, ty.void_(), {});
+ Alias(Source{{56, 78}}, "T", ty.i32());
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, TypeUsedAsReturnType) {
- // fn F() -> T {}
- // type T = i32;
+ // fn F() -> T {}
+ // type T = i32;
- Func("F", {}, ty.type_name(Source{{12, 34}}, "T"), {});
- Alias(Source{{56, 78}}, "T", ty.i32());
+ Func("F", {}, ty.type_name(Source{{12, 34}}, "T"), {});
+ Alias(Source{{56, 78}}, "T", ty.i32());
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, TypeByStructMember) {
- // struct S { m : T };
- // type T = i32;
+ // struct S { m : T };
+ // type T = i32;
- Structure("S", {Member("m", ty.type_name(Source{{12, 34}}, "T"))});
- Alias(Source{{56, 78}}, "T", ty.i32());
+ Structure("S", {Member("m", ty.type_name(Source{{12, 34}}, "T"))});
+ Alias(Source{{56, 78}}, "T", ty.i32());
- Build();
+ Build();
}
TEST_F(ResolverDependencyGraphUsedBeforeDeclTest, VarUsed) {
- // fn F() {
- // { G = 3.14f; }
- // }
- // var G: f32 = 2.1;
+ // fn F() {
+ // { G = 3.14f; }
+ // }
+ // var G: f32 = 2.1;
- Func("F", ast::VariableList{}, ty.void_(),
- {Block(Assign(Expr(Source{{12, 34}}, "G"), 3.14f))});
+ Func("F", ast::VariableList{}, ty.void_(),
+ {Block(Assign(Expr(Source{{12, 34}}, "G"), 3.14_f))});
- Global(Source{{56, 78}}, "G", ty.f32(), ast::StorageClass::kPrivate,
- Expr(2.1f));
+ Global(Source{{56, 78}}, "G", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1_f));
- Build();
+ Build();
}
} // namespace used_before_decl_tests
@@ -749,15 +736,15 @@ using ResolverDependencyGraphUndeclaredSymbolTest =
ResolverDependencyGraphTestWithParam<SymbolUseKind>;
TEST_P(ResolverDependencyGraphUndeclaredSymbolTest, Test) {
- const Symbol symbol = Sym("SYMBOL");
- const auto use_kind = GetParam();
+ const Symbol symbol = Sym("SYMBOL");
+ const auto use_kind = GetParam();
- // Build a use of a non-existent symbol
- SymbolTestHelper helper(this);
- helper.Add(use_kind, symbol, Source{{56, 78}});
- helper.Build();
+ // Build a use of a non-existent symbol
+ SymbolTestHelper helper(this);
+ helper.Add(use_kind, symbol, Source{{56, 78}});
+ helper.Build();
- Build("56:78 error: unknown " + DiagString(use_kind) + ": 'SYMBOL'");
+ Build("56:78 error: unknown " + DiagString(use_kind) + ": 'SYMBOL'");
}
INSTANTIATE_TEST_SUITE_P(Types,
@@ -782,31 +769,29 @@ namespace undeclared_tests {
using ResolverDependencyGraphDeclSelfUse = ResolverDependencyGraphTest;
TEST_F(ResolverDependencyGraphDeclSelfUse, GlobalVar) {
- const Symbol symbol = Sym("SYMBOL");
- Global(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123));
- Build(R"(error: cyclic dependency found: 'SYMBOL' -> 'SYMBOL'
+ const Symbol symbol = Sym("SYMBOL");
+ Global(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123_i));
+ Build(R"(error: cyclic dependency found: 'SYMBOL' -> 'SYMBOL'
12:34 note: var 'SYMBOL' references var 'SYMBOL' here)");
}
-TEST_F(ResolverDependencyGraphDeclSelfUse, GlobalLet) {
- const Symbol symbol = Sym("SYMBOL");
- GlobalConst(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123));
- Build(R"(error: cyclic dependency found: 'SYMBOL' -> 'SYMBOL'
+TEST_F(ResolverDependencyGraphDeclSelfUse, GlobalConst) {
+ const Symbol symbol = Sym("SYMBOL");
+ GlobalConst(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123_i));
+ Build(R"(error: cyclic dependency found: 'SYMBOL' -> 'SYMBOL'
12:34 note: let 'SYMBOL' references let 'SYMBOL' here)");
}
TEST_F(ResolverDependencyGraphDeclSelfUse, LocalVar) {
- const Symbol symbol = Sym("SYMBOL");
- WrapInFunction(
- Decl(Var(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123))));
- Build("12:34 error: unknown identifier: 'SYMBOL'");
+ const Symbol symbol = Sym("SYMBOL");
+ WrapInFunction(Decl(Var(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123_i))));
+ Build("12:34 error: unknown identifier: 'SYMBOL'");
}
TEST_F(ResolverDependencyGraphDeclSelfUse, LocalLet) {
- const Symbol symbol = Sym("SYMBOL");
- WrapInFunction(
- Decl(Const(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123))));
- Build("12:34 error: unknown identifier: 'SYMBOL'");
+ const Symbol symbol = Sym("SYMBOL");
+ WrapInFunction(Decl(Let(symbol, ty.i32(), Mul(Expr(Source{{12, 34}}, symbol), 123_i))));
+ Build("12:34 error: unknown identifier: 'SYMBOL'");
}
} // namespace undeclared_tests
@@ -819,180 +804,171 @@ namespace recursive_tests {
using ResolverDependencyGraphCyclicRefTest = ResolverDependencyGraphTest;
TEST_F(ResolverDependencyGraphCyclicRefTest, DirectCall) {
- // fn main() { main(); }
+ // fn main() { main(); }
- Func(Source{{12, 34}}, "main", {}, ty.void_(),
- {CallStmt(Call(Expr(Source{{56, 78}}, "main")))});
+ Func(Source{{12, 34}}, "main", {}, ty.void_(),
+ {CallStmt(Call(Expr(Source{{56, 78}}, "main")))});
- Build(R"(12:34 error: cyclic dependency found: 'main' -> 'main'
+ Build(R"(12:34 error: cyclic dependency found: 'main' -> 'main'
56:78 note: function 'main' calls function 'main' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, IndirectCall) {
- // 1: fn a() { b(); }
- // 2: fn e() { }
- // 3: fn d() { e(); b(); }
- // 4: fn c() { d(); }
- // 5: fn b() { c(); }
-
- Func(Source{{1, 1}}, "a", {}, ty.void_(),
- {CallStmt(Call(Expr(Source{{1, 10}}, "b")))});
- Func(Source{{2, 1}}, "e", {}, ty.void_(), {});
- Func(Source{{3, 1}}, "d", {}, ty.void_(),
- {
- CallStmt(Call(Expr(Source{{3, 10}}, "e"))),
- CallStmt(Call(Expr(Source{{3, 10}}, "b"))),
- });
- Func(Source{{4, 1}}, "c", {}, ty.void_(),
- {CallStmt(Call(Expr(Source{{4, 10}}, "d")))});
- Func(Source{{5, 1}}, "b", {}, ty.void_(),
- {CallStmt(Call(Expr(Source{{5, 10}}, "c")))});
-
- Build(R"(5:1 error: cyclic dependency found: 'b' -> 'c' -> 'd' -> 'b'
+ // 1: fn a() { b(); }
+ // 2: fn e() { }
+ // 3: fn d() { e(); b(); }
+ // 4: fn c() { d(); }
+ // 5: fn b() { c(); }
+
+ Func(Source{{1, 1}}, "a", {}, ty.void_(), {CallStmt(Call(Expr(Source{{1, 10}}, "b")))});
+ Func(Source{{2, 1}}, "e", {}, ty.void_(), {});
+ Func(Source{{3, 1}}, "d", {}, ty.void_(),
+ {
+ CallStmt(Call(Expr(Source{{3, 10}}, "e"))),
+ CallStmt(Call(Expr(Source{{3, 10}}, "b"))),
+ });
+ Func(Source{{4, 1}}, "c", {}, ty.void_(), {CallStmt(Call(Expr(Source{{4, 10}}, "d")))});
+ Func(Source{{5, 1}}, "b", {}, ty.void_(), {CallStmt(Call(Expr(Source{{5, 10}}, "c")))});
+
+ Build(R"(5:1 error: cyclic dependency found: 'b' -> 'c' -> 'd' -> 'b'
5:10 note: function 'b' calls function 'c' here
4:10 note: function 'c' calls function 'd' here
3:10 note: function 'd' calls function 'b' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, Alias_Direct) {
- // type T = T;
+ // type T = T;
- Alias(Source{{12, 34}}, "T", ty.type_name(Source{{56, 78}}, "T"));
+ Alias(Source{{12, 34}}, "T", ty.type_name(Source{{56, 78}}, "T"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(12:34 error: cyclic dependency found: 'T' -> 'T'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cyclic dependency found: 'T' -> 'T'
56:78 note: alias 'T' references alias 'T' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, Alias_Indirect) {
- // 1: type Y = Z;
- // 2: type X = Y;
- // 3: type Z = X;
+ // 1: type Y = Z;
+ // 2: type X = Y;
+ // 3: type Z = X;
- Alias(Source{{1, 1}}, "Y", ty.type_name(Source{{1, 10}}, "Z"));
- Alias(Source{{2, 1}}, "X", ty.type_name(Source{{2, 10}}, "Y"));
- Alias(Source{{3, 1}}, "Z", ty.type_name(Source{{3, 10}}, "X"));
+ Alias(Source{{1, 1}}, "Y", ty.type_name(Source{{1, 10}}, "Z"));
+ Alias(Source{{2, 1}}, "X", ty.type_name(Source{{2, 10}}, "Y"));
+ Alias(Source{{3, 1}}, "Z", ty.type_name(Source{{3, 10}}, "X"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
1:10 note: alias 'Y' references alias 'Z' here
3:10 note: alias 'Z' references alias 'X' here
2:10 note: alias 'X' references alias 'Y' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, Struct_Direct) {
- // struct S {
- // a: S;
- // };
+ // struct S {
+ // a: S;
+ // };
- Structure(Source{{12, 34}}, "S",
- {Member("a", ty.type_name(Source{{56, 78}}, "S"))});
+ Structure(Source{{12, 34}}, "S", {Member("a", ty.type_name(Source{{56, 78}}, "S"))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(12:34 error: cyclic dependency found: 'S' -> 'S'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cyclic dependency found: 'S' -> 'S'
56:78 note: struct 'S' references struct 'S' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, Struct_Indirect) {
- // 1: struct Y { z: Z; };
- // 2: struct X { y: Y; };
- // 3: struct Z { x: X; };
-
- Structure(Source{{1, 1}}, "Y",
- {Member("z", ty.type_name(Source{{1, 10}}, "Z"))});
- Structure(Source{{2, 1}}, "X",
- {Member("y", ty.type_name(Source{{2, 10}}, "Y"))});
- Structure(Source{{3, 1}}, "Z",
- {Member("x", ty.type_name(Source{{3, 10}}, "X"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
+ // 1: struct Y { z: Z; };
+ // 2: struct X { y: Y; };
+ // 3: struct Z { x: X; };
+
+ Structure(Source{{1, 1}}, "Y", {Member("z", ty.type_name(Source{{1, 10}}, "Z"))});
+ Structure(Source{{2, 1}}, "X", {Member("y", ty.type_name(Source{{2, 10}}, "Y"))});
+ Structure(Source{{3, 1}}, "Z", {Member("x", ty.type_name(Source{{3, 10}}, "X"))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
1:10 note: struct 'Y' references struct 'Z' here
3:10 note: struct 'Z' references struct 'X' here
2:10 note: struct 'X' references struct 'Y' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, GlobalVar_Direct) {
- // var<private> V : i32 = V;
+ // var<private> V : i32 = V;
- Global(Source{{12, 34}}, "V", ty.i32(), Expr(Source{{56, 78}}, "V"));
+ Global(Source{{12, 34}}, "V", ty.i32(), Expr(Source{{56, 78}}, "V"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(12:34 error: cyclic dependency found: 'V' -> 'V'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cyclic dependency found: 'V' -> 'V'
56:78 note: var 'V' references var 'V' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, GlobalLet_Direct) {
- // let V : i32 = V;
+ // let V : i32 = V;
- GlobalConst(Source{{12, 34}}, "V", ty.i32(), Expr(Source{{56, 78}}, "V"));
+ GlobalConst(Source{{12, 34}}, "V", ty.i32(), Expr(Source{{56, 78}}, "V"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(12:34 error: cyclic dependency found: 'V' -> 'V'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cyclic dependency found: 'V' -> 'V'
56:78 note: let 'V' references let 'V' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, GlobalVar_Indirect) {
- // 1: var<private> Y : i32 = Z;
- // 2: var<private> X : i32 = Y;
- // 3: var<private> Z : i32 = X;
+ // 1: var<private> Y : i32 = Z;
+ // 2: var<private> X : i32 = Y;
+ // 3: var<private> Z : i32 = X;
- Global(Source{{1, 1}}, "Y", ty.i32(), Expr(Source{{1, 10}}, "Z"));
- Global(Source{{2, 1}}, "X", ty.i32(), Expr(Source{{2, 10}}, "Y"));
- Global(Source{{3, 1}}, "Z", ty.i32(), Expr(Source{{3, 10}}, "X"));
+ Global(Source{{1, 1}}, "Y", ty.i32(), Expr(Source{{1, 10}}, "Z"));
+ Global(Source{{2, 1}}, "X", ty.i32(), Expr(Source{{2, 10}}, "Y"));
+ Global(Source{{3, 1}}, "Z", ty.i32(), Expr(Source{{3, 10}}, "X"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
1:10 note: var 'Y' references var 'Z' here
3:10 note: var 'Z' references var 'X' here
2:10 note: var 'X' references var 'Y' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, GlobalLet_Indirect) {
- // 1: let Y : i32 = Z;
- // 2: let X : i32 = Y;
- // 3: let Z : i32 = X;
+ // 1: let Y : i32 = Z;
+ // 2: let X : i32 = Y;
+ // 3: let Z : i32 = X;
- GlobalConst(Source{{1, 1}}, "Y", ty.i32(), Expr(Source{{1, 10}}, "Z"));
- GlobalConst(Source{{2, 1}}, "X", ty.i32(), Expr(Source{{2, 10}}, "Y"));
- GlobalConst(Source{{3, 1}}, "Z", ty.i32(), Expr(Source{{3, 10}}, "X"));
+ GlobalConst(Source{{1, 1}}, "Y", ty.i32(), Expr(Source{{1, 10}}, "Z"));
+ GlobalConst(Source{{2, 1}}, "X", ty.i32(), Expr(Source{{2, 10}}, "Y"));
+ GlobalConst(Source{{3, 1}}, "Z", ty.i32(), Expr(Source{{3, 10}}, "X"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(1:1 error: cyclic dependency found: 'Y' -> 'Z' -> 'X' -> 'Y'
1:10 note: let 'Y' references let 'Z' here
3:10 note: let 'Z' references let 'X' here
2:10 note: let 'X' references let 'Y' here)");
}
TEST_F(ResolverDependencyGraphCyclicRefTest, Mixed_RecursiveDependencies) {
- // 1: fn F() -> R { return Z; }
- // 2: type A = S;
- // 3: struct S { a : A };
- // 4: var Z = L;
- // 5: type R = A;
- // 6: let L : S = Z;
-
- Func(Source{{1, 1}}, "F", {}, ty.type_name(Source{{1, 5}}, "R"),
- {Return(Expr(Source{{1, 10}}, "Z"))});
- Alias(Source{{2, 1}}, "A", ty.type_name(Source{{2, 10}}, "S"));
- Structure(Source{{3, 1}}, "S",
- {Member("a", ty.type_name(Source{{3, 10}}, "A"))});
- Global(Source{{4, 1}}, "Z", nullptr, Expr(Source{{4, 10}}, "L"));
- Alias(Source{{5, 1}}, "R", ty.type_name(Source{{5, 10}}, "A"));
- GlobalConst(Source{{6, 1}}, "L", ty.type_name(Source{{5, 5}}, "S"),
- Expr(Source{{5, 10}}, "Z"));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(2:1 error: cyclic dependency found: 'A' -> 'S' -> 'A'
+ // 1: fn F() -> R { return Z; }
+ // 2: type A = S;
+ // 3: struct S { a : A };
+ // 4: var Z = L;
+ // 5: type R = A;
+ // 6: let L : S = Z;
+
+ Func(Source{{1, 1}}, "F", {}, ty.type_name(Source{{1, 5}}, "R"),
+ {Return(Expr(Source{{1, 10}}, "Z"))});
+ Alias(Source{{2, 1}}, "A", ty.type_name(Source{{2, 10}}, "S"));
+ Structure(Source{{3, 1}}, "S", {Member("a", ty.type_name(Source{{3, 10}}, "A"))});
+ Global(Source{{4, 1}}, "Z", nullptr, Expr(Source{{4, 10}}, "L"));
+ Alias(Source{{5, 1}}, "R", ty.type_name(Source{{5, 10}}, "A"));
+ GlobalConst(Source{{6, 1}}, "L", ty.type_name(Source{{5, 5}}, "S"), Expr(Source{{5, 10}}, "Z"));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(2:1 error: cyclic dependency found: 'A' -> 'S' -> 'A'
2:10 note: alias 'A' references struct 'S' here
3:10 note: struct 'S' references alias 'A' here
4:1 error: cyclic dependency found: 'Z' -> 'L' -> 'Z'
@@ -1008,40 +984,37 @@ TEST_F(ResolverDependencyGraphCyclicRefTest, Mixed_RecursiveDependencies) {
namespace redeclaration_tests {
using ResolverDependencyGraphRedeclarationTest =
- ResolverDependencyGraphTestWithParam<
- std::tuple<SymbolDeclKind, SymbolDeclKind>>;
+ ResolverDependencyGraphTestWithParam<std::tuple<SymbolDeclKind, SymbolDeclKind>>;
TEST_P(ResolverDependencyGraphRedeclarationTest, Test) {
- const auto symbol = Sym("SYMBOL");
+ const auto symbol = Sym("SYMBOL");
- auto a_kind = std::get<0>(GetParam());
- auto b_kind = std::get<1>(GetParam());
+ auto a_kind = std::get<0>(GetParam());
+ auto b_kind = std::get<1>(GetParam());
- auto a_source = Source{{12, 34}};
- auto b_source = Source{{56, 78}};
+ auto a_source = Source{{12, 34}};
+ auto b_source = Source{{56, 78}};
- if (a_kind != SymbolDeclKind::Parameter &&
- b_kind == SymbolDeclKind::Parameter) {
- std::swap(a_source, b_source); // Parameters are declared before locals
- }
+ if (a_kind != SymbolDeclKind::Parameter && b_kind == SymbolDeclKind::Parameter) {
+ std::swap(a_source, b_source); // Parameters are declared before locals
+ }
- SymbolTestHelper helper(this);
- helper.Add(a_kind, symbol, a_source);
- helper.Add(b_kind, symbol, b_source);
- helper.Build();
+ SymbolTestHelper helper(this);
+ helper.Add(a_kind, symbol, a_source);
+ helper.Add(b_kind, symbol, b_source);
+ helper.Build();
- bool error = ScopeDepth(a_kind) == ScopeDepth(b_kind);
+ bool error = ScopeDepth(a_kind) == ScopeDepth(b_kind);
- Build(error ? R"(56:78 error: redeclaration of 'SYMBOL'
+ Build(error ? R"(56:78 error: redeclaration of 'SYMBOL'
12:34 note: 'SYMBOL' previously declared here)"
- : "");
+ : "");
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverDependencyGraphRedeclarationTest,
- testing::Combine(testing::ValuesIn(kAllSymbolDeclKinds),
- testing::ValuesIn(kAllSymbolDeclKinds)));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverDependencyGraphRedeclarationTest,
+ testing::Combine(testing::ValuesIn(kAllSymbolDeclKinds),
+ testing::ValuesIn(kAllSymbolDeclKinds)));
} // namespace redeclaration_tests
@@ -1051,45 +1024,44 @@ INSTANTIATE_TEST_SUITE_P(
namespace ordered_globals {
using ResolverDependencyGraphOrderedGlobalsTest =
- ResolverDependencyGraphTestWithParam<
- std::tuple<SymbolDeclKind, SymbolUseKind>>;
+ ResolverDependencyGraphTestWithParam<std::tuple<SymbolDeclKind, SymbolUseKind>>;
TEST_P(ResolverDependencyGraphOrderedGlobalsTest, InOrder) {
- const Symbol symbol = Sym("SYMBOL");
- const auto decl_kind = std::get<0>(GetParam());
- const auto use_kind = std::get<1>(GetParam());
+ const Symbol symbol = Sym("SYMBOL");
+ const auto decl_kind = std::get<0>(GetParam());
+ const auto use_kind = std::get<1>(GetParam());
- // Declaration before use
- SymbolTestHelper helper(this);
- helper.Add(decl_kind, symbol, Source{{12, 34}});
- helper.Add(use_kind, symbol, Source{{56, 78}});
- helper.Build();
+ // Declaration before use
+ SymbolTestHelper helper(this);
+ helper.Add(decl_kind, symbol, Source{{12, 34}});
+ helper.Add(use_kind, symbol, Source{{56, 78}});
+ helper.Build();
- ASSERT_EQ(AST().GlobalDeclarations().size(), 2u);
+ ASSERT_EQ(AST().GlobalDeclarations().size(), 2u);
- auto* decl = AST().GlobalDeclarations()[0];
- auto* use = AST().GlobalDeclarations()[1];
- EXPECT_THAT(Build().ordered_globals, ElementsAre(decl, use));
+ auto* decl = AST().GlobalDeclarations()[0];
+ auto* use = AST().GlobalDeclarations()[1];
+ EXPECT_THAT(Build().ordered_globals, ElementsAre(decl, use));
}
TEST_P(ResolverDependencyGraphOrderedGlobalsTest, OutOfOrder) {
- const Symbol symbol = Sym("SYMBOL");
- const auto decl_kind = std::get<0>(GetParam());
- const auto use_kind = std::get<1>(GetParam());
-
- // Use before declaration
- SymbolTestHelper helper(this);
- helper.Add(use_kind, symbol, Source{{56, 78}});
- helper.Build(); // If the use is in a function, then ensure this function is
- // built before the symbol declaration
- helper.Add(decl_kind, symbol, Source{{12, 34}});
- helper.Build();
-
- ASSERT_EQ(AST().GlobalDeclarations().size(), 2u);
-
- auto* use = AST().GlobalDeclarations()[0];
- auto* decl = AST().GlobalDeclarations()[1];
- EXPECT_THAT(Build().ordered_globals, ElementsAre(decl, use));
+ const Symbol symbol = Sym("SYMBOL");
+ const auto decl_kind = std::get<0>(GetParam());
+ const auto use_kind = std::get<1>(GetParam());
+
+ // Use before declaration
+ SymbolTestHelper helper(this);
+ helper.Add(use_kind, symbol, Source{{56, 78}});
+ helper.Build(); // If the use is in a function, then ensure this function is
+ // built before the symbol declaration
+ helper.Add(decl_kind, symbol, Source{{12, 34}});
+ helper.Build();
+
+ ASSERT_EQ(AST().GlobalDeclarations().size(), 2u);
+
+ auto* use = AST().GlobalDeclarations()[0];
+ auto* decl = AST().GlobalDeclarations()[1];
+ EXPECT_THAT(Build().ordered_globals, ElementsAre(decl, use));
}
INSTANTIATE_TEST_SUITE_P(Types,
@@ -1097,11 +1069,10 @@ INSTANTIATE_TEST_SUITE_P(Types,
testing::Combine(testing::ValuesIn(kTypeDeclKinds),
testing::ValuesIn(kTypeUseKinds)));
-INSTANTIATE_TEST_SUITE_P(
- Values,
- ResolverDependencyGraphOrderedGlobalsTest,
- testing::Combine(testing::ValuesIn(kGlobalValueDeclKinds),
- testing::ValuesIn(kValueUseKinds)));
+INSTANTIATE_TEST_SUITE_P(Values,
+ ResolverDependencyGraphOrderedGlobalsTest,
+ testing::Combine(testing::ValuesIn(kGlobalValueDeclKinds),
+ testing::ValuesIn(kValueUseKinds)));
INSTANTIATE_TEST_SUITE_P(Functions,
ResolverDependencyGraphOrderedGlobalsTest,
@@ -1115,35 +1086,32 @@ INSTANTIATE_TEST_SUITE_P(Functions,
namespace resolved_symbols {
using ResolverDependencyGraphResolvedSymbolTest =
- ResolverDependencyGraphTestWithParam<
- std::tuple<SymbolDeclKind, SymbolUseKind>>;
+ ResolverDependencyGraphTestWithParam<std::tuple<SymbolDeclKind, SymbolUseKind>>;
TEST_P(ResolverDependencyGraphResolvedSymbolTest, Test) {
- const Symbol symbol = Sym("SYMBOL");
- const auto decl_kind = std::get<0>(GetParam());
- const auto use_kind = std::get<1>(GetParam());
-
- // Build a symbol declaration and a use of that symbol
- SymbolTestHelper helper(this);
- auto* decl = helper.Add(decl_kind, symbol, Source{{12, 34}});
- auto* use = helper.Add(use_kind, symbol, Source{{56, 78}});
- helper.Build();
-
- // If the declaration is visible to the use, then we expect the analysis to
- // succeed.
- bool expect_pass = ScopeDepth(decl_kind) <= ScopeDepth(use_kind);
- auto graph =
- Build(expect_pass ? "" : "56:78 error: unknown identifier: 'SYMBOL'");
-
- if (expect_pass) {
- // Check that the use resolves to the declaration
- auto* resolved_symbol = graph.resolved_symbols[use];
- EXPECT_EQ(resolved_symbol, decl)
- << "resolved: "
- << (resolved_symbol ? resolved_symbol->TypeInfo().name : "<null>")
- << "\n"
- << "decl: " << decl->TypeInfo().name;
- }
+ const Symbol symbol = Sym("SYMBOL");
+ const auto decl_kind = std::get<0>(GetParam());
+ const auto use_kind = std::get<1>(GetParam());
+
+ // Build a symbol declaration and a use of that symbol
+ SymbolTestHelper helper(this);
+ auto* decl = helper.Add(decl_kind, symbol, Source{{12, 34}});
+ auto* use = helper.Add(use_kind, symbol, Source{{56, 78}});
+ helper.Build();
+
+ // If the declaration is visible to the use, then we expect the analysis to
+ // succeed.
+ bool expect_pass = ScopeDepth(decl_kind) <= ScopeDepth(use_kind);
+ auto graph = Build(expect_pass ? "" : "56:78 error: unknown identifier: 'SYMBOL'");
+
+ if (expect_pass) {
+ // Check that the use resolves to the declaration
+ auto* resolved_symbol = graph.resolved_symbols[use];
+ EXPECT_EQ(resolved_symbol, decl)
+ << "resolved: " << (resolved_symbol ? resolved_symbol->TypeInfo().name : "<null>")
+ << "\n"
+ << "decl: " << decl->TypeInfo().name;
+ }
}
INSTANTIATE_TEST_SUITE_P(Types,
@@ -1168,30 +1136,26 @@ INSTANTIATE_TEST_SUITE_P(Functions,
////////////////////////////////////////////////////////////////////////////////
namespace shadowing {
-using ResolverDependencyShadowTest = ResolverDependencyGraphTestWithParam<
- std::tuple<SymbolDeclKind, SymbolDeclKind>>;
+using ResolverDependencyShadowTest =
+ ResolverDependencyGraphTestWithParam<std::tuple<SymbolDeclKind, SymbolDeclKind>>;
TEST_P(ResolverDependencyShadowTest, Test) {
- const Symbol symbol = Sym("SYMBOL");
- const auto outer_kind = std::get<0>(GetParam());
- const auto inner_kind = std::get<1>(GetParam());
-
- // Build a symbol declaration and a use of that symbol
- SymbolTestHelper helper(this);
- auto* outer = helper.Add(outer_kind, symbol, Source{{12, 34}});
- helper.Add(inner_kind, symbol, Source{{56, 78}});
- auto* inner_var = helper.nested_statements.size()
- ? helper.nested_statements[0]
- ->As<ast::VariableDeclStatement>()
- ->variable
- : helper.statements.size()
- ? helper.statements[0]
- ->As<ast::VariableDeclStatement>()
- ->variable
- : helper.parameters[0];
- helper.Build();
-
- EXPECT_EQ(Build().shadows[inner_var], outer);
+ const Symbol symbol = Sym("SYMBOL");
+ const auto outer_kind = std::get<0>(GetParam());
+ const auto inner_kind = std::get<1>(GetParam());
+
+ // Build a symbol declaration and a use of that symbol
+ SymbolTestHelper helper(this);
+ auto* outer = helper.Add(outer_kind, symbol, Source{{12, 34}});
+ helper.Add(inner_kind, symbol, Source{{56, 78}});
+ auto* inner_var = helper.nested_statements.size()
+ ? helper.nested_statements[0]->As<ast::VariableDeclStatement>()->variable
+ : helper.statements.size()
+ ? helper.statements[0]->As<ast::VariableDeclStatement>()->variable
+ : helper.parameters[0];
+ helper.Build();
+
+ EXPECT_EQ(Build().shadows[inner_var], outer);
}
INSTANTIATE_TEST_SUITE_P(LocalShadowGlobal,
@@ -1199,14 +1163,13 @@ INSTANTIATE_TEST_SUITE_P(LocalShadowGlobal,
testing::Combine(testing::ValuesIn(kGlobalDeclKinds),
testing::ValuesIn(kLocalDeclKinds)));
-INSTANTIATE_TEST_SUITE_P(
- NestedLocalShadowLocal,
- ResolverDependencyShadowTest,
- testing::Combine(testing::Values(SymbolDeclKind::Parameter,
- SymbolDeclKind::LocalVar,
- SymbolDeclKind::LocalLet),
- testing::Values(SymbolDeclKind::NestedLocalVar,
- SymbolDeclKind::NestedLocalLet)));
+INSTANTIATE_TEST_SUITE_P(NestedLocalShadowLocal,
+ ResolverDependencyShadowTest,
+ testing::Combine(testing::Values(SymbolDeclKind::Parameter,
+ SymbolDeclKind::LocalVar,
+ SymbolDeclKind::LocalLet),
+ testing::Values(SymbolDeclKind::NestedLocalVar,
+ SymbolDeclKind::NestedLocalLet)));
} // namespace shadowing
@@ -1218,120 +1181,116 @@ namespace ast_traversal {
using ResolverDependencyGraphTraversalTest = ResolverDependencyGraphTest;
TEST_F(ResolverDependencyGraphTraversalTest, SymbolsReached) {
- const auto value_sym = Sym("VALUE");
- const auto type_sym = Sym("TYPE");
- const auto func_sym = Sym("FUNC");
-
- const auto* value_decl =
- Global(value_sym, ty.i32(), ast::StorageClass::kPrivate);
- const auto* type_decl = Alias(type_sym, ty.i32());
- const auto* func_decl = Func(func_sym, {}, ty.void_(), {});
-
- struct SymbolUse {
- const ast::Node* decl = nullptr;
- const ast::Node* use = nullptr;
- std::string where;
- };
-
- std::vector<SymbolUse> symbol_uses;
-
- auto add_use = [&](const ast::Node* decl, auto* use, int line,
- const char* kind) {
- symbol_uses.emplace_back(SymbolUse{
- decl, use,
- std::string(__FILE__) + ":" + std::to_string(line) + ": " + kind});
- return use;
- };
+ const auto value_sym = Sym("VALUE");
+ const auto type_sym = Sym("TYPE");
+ const auto func_sym = Sym("FUNC");
+
+ const auto* value_decl = Global(value_sym, ty.i32(), ast::StorageClass::kPrivate);
+ const auto* type_decl = Alias(type_sym, ty.i32());
+ const auto* func_decl = Func(func_sym, {}, ty.void_(), {});
+
+ struct SymbolUse {
+ const ast::Node* decl = nullptr;
+ const ast::Node* use = nullptr;
+ std::string where;
+ };
+
+ std::vector<SymbolUse> symbol_uses;
+
+ auto add_use = [&](const ast::Node* decl, auto* use, int line, const char* kind) {
+ symbol_uses.emplace_back(
+ SymbolUse{decl, use, std::string(__FILE__) + ":" + std::to_string(line) + ": " + kind});
+ return use;
+ };
#define V add_use(value_decl, Expr(value_sym), __LINE__, "V()")
#define T add_use(type_decl, ty.type_name(type_sym), __LINE__, "T()")
#define F add_use(func_decl, Expr(func_sym), __LINE__, "F()")
- Alias(Sym(), T);
- Structure(Sym(), {Member(Sym(), T)});
- Global(Sym(), T, V);
- GlobalConst(Sym(), T, V);
- Func(Sym(), //
- {Param(Sym(), T)}, //
- T, // Return type
- {
- Decl(Var(Sym(), T, V)), //
- Decl(Const(Sym(), T, V)), //
- CallStmt(Call(F, V)), //
- Block( //
- Assign(V, V)), //
- If(V, //
- Block(Assign(V, V)), //
- Else(V, //
- Block(Assign(V, V)))), //
- Ignore(Bitcast(T, V)), //
- For(Decl(Var(Sym(), T, V)), //
- Equal(V, V), //
- Assign(V, V), //
- Block( //
- Assign(V, V))), //
- Loop(Block(Assign(V, V)), //
- Block(Assign(V, V))), //
- Switch(V, //
- Case(Expr(1), //
- Block(Assign(V, V))), //
- Case(Expr(2), //
- Block(Fallthrough())), //
- DefaultCase(Block(Assign(V, V)))), //
- Return(V), //
- Break(), //
- Discard(), //
- }); //
- // Exercise type traversal
- Global(Sym(), ty.atomic(T));
- Global(Sym(), ty.bool_());
- Global(Sym(), ty.i32());
- Global(Sym(), ty.u32());
- Global(Sym(), ty.f32());
- Global(Sym(), ty.array(T, V, 4));
- Global(Sym(), ty.vec3(T));
- Global(Sym(), ty.mat3x2(T));
- Global(Sym(), ty.pointer(T, ast::StorageClass::kPrivate));
- Global(Sym(), ty.sampled_texture(ast::TextureDimension::k2d, T));
- Global(Sym(), ty.depth_texture(ast::TextureDimension::k2d));
- Global(Sym(), ty.depth_multisampled_texture(ast::TextureDimension::k2d));
- Global(Sym(), ty.external_texture());
- Global(Sym(), ty.multisampled_texture(ast::TextureDimension::k2d, T));
- Global(Sym(), ty.storage_texture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Float,
- ast::Access::kRead)); //
- Global(Sym(), ty.sampler(ast::SamplerKind::kSampler));
- Func(Sym(), {}, ty.void_(), {});
+ Alias(Sym(), T);
+ Structure(Sym(), {Member(Sym(), T)});
+ Global(Sym(), T, V);
+ GlobalConst(Sym(), T, V);
+ Func(Sym(), //
+ {Param(Sym(), T)}, //
+ T, // Return type
+ {
+ Decl(Var(Sym(), T, V)), //
+ Decl(Let(Sym(), T, V)), //
+ CallStmt(Call(F, V)), //
+ Block( //
+ Assign(V, V)), //
+ If(V, //
+ Block(Assign(V, V)), //
+ Else(If(V, //
+ Block(Assign(V, V))))), //
+ Ignore(Bitcast(T, V)), //
+ For(Decl(Var(Sym(), T, V)), //
+ Equal(V, V), //
+ Assign(V, V), //
+ Block( //
+ Assign(V, V))), //
+ Loop(Block(Assign(V, V)), //
+ Block(Assign(V, V))), //
+ Switch(V, //
+ Case(Expr(1_i), //
+ Block(Assign(V, V))), //
+ Case(Expr(2_i), //
+ Block(Fallthrough())), //
+ DefaultCase(Block(Assign(V, V)))), //
+ Return(V), //
+ Break(), //
+ Discard(), //
+ }); //
+ // Exercise type traversal
+ Global(Sym(), ty.atomic(T));
+ Global(Sym(), ty.bool_());
+ Global(Sym(), ty.i32());
+ Global(Sym(), ty.u32());
+ Global(Sym(), ty.f32());
+ Global(Sym(), ty.array(T, V, 4));
+ Global(Sym(), ty.vec3(T));
+ Global(Sym(), ty.mat3x2(T));
+ Global(Sym(), ty.pointer(T, ast::StorageClass::kPrivate));
+ Global(Sym(), ty.sampled_texture(ast::TextureDimension::k2d, T));
+ Global(Sym(), ty.depth_texture(ast::TextureDimension::k2d));
+ Global(Sym(), ty.depth_multisampled_texture(ast::TextureDimension::k2d));
+ Global(Sym(), ty.external_texture());
+ Global(Sym(), ty.multisampled_texture(ast::TextureDimension::k2d, T));
+ Global(Sym(), ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Float,
+ ast::Access::kRead)); //
+ Global(Sym(), ty.sampler(ast::SamplerKind::kSampler));
+ Func(Sym(), {}, ty.void_(), {});
#undef V
#undef T
#undef F
- auto graph = Build();
- for (auto use : symbol_uses) {
- auto* resolved_symbol = graph.resolved_symbols[use.use];
- EXPECT_EQ(resolved_symbol, use.decl) << use.where;
- }
+ auto graph = Build();
+ for (auto use : symbol_uses) {
+ auto* resolved_symbol = graph.resolved_symbols[use.use];
+ EXPECT_EQ(resolved_symbol, use.decl) << use.where;
+ }
}
TEST_F(ResolverDependencyGraphTraversalTest, InferredType) {
- // Check that the nullptr of the var / let type doesn't make things explode
- Global("a", nullptr, Expr(1));
- GlobalConst("b", nullptr, Expr(1));
- WrapInFunction(Var("c", nullptr, Expr(1)), //
- Const("d", nullptr, Expr(1)));
- Build();
+ // Check that the nullptr of the var / let type doesn't make things explode
+ Global("a", nullptr, Expr(1_i));
+ GlobalConst("b", nullptr, Expr(1_i));
+ WrapInFunction(Var("c", nullptr, Expr(1_i)), //
+ Let("d", nullptr, Expr(1_i)));
+ Build();
}
// Reproduces an unbalanced stack push / pop bug in
// DependencyAnalysis::SortGlobals(), found by clusterfuzz.
// See: crbug.com/chromium/1273451
TEST_F(ResolverDependencyGraphTraversalTest, chromium_1273451) {
- Structure("A", {Member("a", ty.i32())});
- Structure("B", {Member("b", ty.i32())});
- Func("f", {Param("a", ty.type_name("A"))}, ty.type_name("B"),
- {
- Return(Construct(ty.type_name("B"))),
- });
- Build();
+ Structure("A", {Member("a", ty.i32())});
+ Structure("B", {Member("b", ty.i32())});
+ Func("f", {Param("a", ty.type_name("A"))}, ty.type_name("B"),
+ {
+ Return(Construct(ty.type_name("B"))),
+ });
+ Build();
}
} // namespace ast_traversal
diff --git a/chromium/third_party/dawn/src/tint/resolver/entry_point_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/entry_point_validation_test.cc
index 8458a10ca29..5e5df15e3fd 100644
--- a/chromium/third_party/dawn/src/tint/resolver/entry_point_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/entry_point_validation_test.cc
@@ -21,6 +21,8 @@
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -41,311 +43,278 @@ template <typename T>
using mat4x4 = builder::mat4x4<T>;
template <typename T>
using alias = builder::alias<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
-class ResolverEntryPointValidationTest : public TestHelper,
- public testing::Test {};
+class ResolverEntryPointValidationTest : public TestHelper, public testing::Test {};
TEST_F(ResolverEntryPointValidationTest, ReturnTypeAttribute_Location) {
- // @stage(fragment)
- // fn main() -> @location(0) f32 { return 1.0; }
- Func(Source{{12, 34}}, "main", {}, ty.f32(), {Return(1.0f)},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
+ // @fragment
+ // fn main() -> @location(0) f32 { return 1.0; }
+ Func(Source{{12, 34}}, "main", {}, ty.f32(), {Return(1_f)},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverEntryPointValidationTest, ReturnTypeAttribute_Builtin) {
- // @stage(vertex)
- // fn main() -> @builtin(position) vec4<f32> { return vec4<f32>(); }
- Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)},
- {Builtin(ast::Builtin::kPosition)});
+ // @vertex
+ // fn main() -> @builtin(position) vec4<f32> { return vec4<f32>(); }
+ Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)}, {Builtin(ast::Builtin::kPosition)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverEntryPointValidationTest, ReturnTypeAttribute_Missing) {
- // @stage(vertex)
- // fn main() -> f32 {
- // return 1.0;
- // }
- Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)});
+ // @vertex
+ // fn main() -> f32 {
+ // return 1.0;
+ // }
+ Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: missing entry point IO attribute on return type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing entry point IO attribute on return type");
}
TEST_F(ResolverEntryPointValidationTest, ReturnTypeAttribute_Multiple) {
- // @stage(vertex)
- // fn main() -> @location(0) @builtin(position) vec4<f32> {
- // return vec4<f32>();
- // }
- Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)},
- {Location(Source{{13, 43}}, 0),
- Builtin(Source{{14, 52}}, ast::Builtin::kPosition)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
+ // @vertex
+ // fn main() -> @location(0) @builtin(position) vec4<f32> {
+ // return vec4<f32>();
+ // }
+ Func(Source{{12, 34}}, "main", {}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)},
+ {Location(Source{{13, 43}}, 0), Builtin(Source{{14, 52}}, ast::Builtin::kPosition)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
13:43 note: previously consumed location(0))");
}
TEST_F(ResolverEntryPointValidationTest, ReturnType_Struct_Valid) {
- // struct Output {
- // @location(0) a : f32;
- // @builtin(frag_depth) b : f32;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output = Structure(
- "Output", {Member("a", ty.f32(), {Location(0)}),
- Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-}
-
-TEST_F(ResolverEntryPointValidationTest,
- ReturnType_Struct_MemberMultipleAttributes) {
- // struct Output {
- // @location(0) @builtin(frag_depth) a : f32;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output = Structure(
- "Output",
- {Member("a", ty.f32(),
- {Location(Source{{13, 43}}, 0),
- Builtin(Source{{14, 52}}, ast::Builtin::kFragDepth)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
+ // struct Output {
+ // @location(0) a : f32;
+ // @builtin(frag_depth) b : f32;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output =
+ Structure("Output", {Member("a", ty.f32(), {Location(0)}),
+ Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverEntryPointValidationTest, ReturnType_Struct_MemberMultipleAttributes) {
+ // struct Output {
+ // @location(0) @builtin(frag_depth) a : f32;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output =
+ Structure("Output", {Member("a", ty.f32(),
+ {Location(Source{{13, 43}}, 0),
+ Builtin(Source{{14, 52}}, ast::Builtin::kFragDepth)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
13:43 note: previously consumed location(0)
12:34 note: while analysing entry point 'main')");
}
-TEST_F(ResolverEntryPointValidationTest,
- ReturnType_Struct_MemberMissingAttribute) {
- // struct Output {
- // @location(0) a : f32;
- // b : f32;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output = Structure(
- "Output", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)}),
- Member(Source{{14, 52}}, "b", ty.f32(), {})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(14:52 error: missing entry point IO attribute
+TEST_F(ResolverEntryPointValidationTest, ReturnType_Struct_MemberMissingAttribute) {
+ // struct Output {
+ // @location(0) a : f32;
+ // b : f32;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output = Structure("Output", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)}),
+ Member(Source{{14, 52}}, "b", ty.f32(), {})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(14:52 error: missing entry point IO attribute
12:34 note: while analysing entry point 'main')");
}
TEST_F(ResolverEntryPointValidationTest, ReturnType_Struct_DuplicateBuiltins) {
- // struct Output {
- // @builtin(frag_depth) a : f32;
- // @builtin(frag_depth) b : f32;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output = Structure(
- "Output", {Member("a", ty.f32(), {Builtin(ast::Builtin::kFragDepth)}),
- Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: builtin(frag_depth) attribute appears multiple times as pipeline output
+ // struct Output {
+ // @builtin(frag_depth) a : f32;
+ // @builtin(frag_depth) b : f32;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output =
+ Structure("Output", {Member("a", ty.f32(), {Builtin(ast::Builtin::kFragDepth)}),
+ Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: builtin(frag_depth) attribute appears multiple times as pipeline output
12:34 note: while analysing entry point 'main')");
}
TEST_F(ResolverEntryPointValidationTest, ParameterAttribute_Location) {
- // @stage(fragment)
- // fn main(@location(0) param : f32) {}
- auto* param = Param("param", ty.f32(), {Location(0)});
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ // @fragment
+ // fn main(@location(0) param : f32) {}
+ auto* param = Param("param", ty.f32(), {Location(0)});
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverEntryPointValidationTest, ParameterAttribute_Missing) {
- // @stage(fragment)
- // fn main(param : f32) {}
- auto* param = Param(Source{{13, 43}}, "param", ty.vec4<f32>());
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ // @fragment
+ // fn main(param : f32) {}
+ auto* param = Param(Source{{13, 43}}, "param", ty.vec4<f32>());
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "13:43 error: missing entry point IO attribute on parameter");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "13:43 error: missing entry point IO attribute on parameter");
}
TEST_F(ResolverEntryPointValidationTest, ParameterAttribute_Multiple) {
- // @stage(fragment)
- // fn main(@location(0) @builtin(sample_index) param : u32) {}
- auto* param = Param("param", ty.u32(),
- {Location(Source{{13, 43}}, 0),
- Builtin(Source{{14, 52}}, ast::Builtin::kSampleIndex)});
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
+ // @fragment
+ // fn main(@location(0) @builtin(sample_index) param : u32) {}
+ auto* param = Param(
+ "param", ty.u32(),
+ {Location(Source{{13, 43}}, 0), Builtin(Source{{14, 52}}, ast::Builtin::kSampleIndex)});
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
13:43 note: previously consumed location(0))");
}
TEST_F(ResolverEntryPointValidationTest, Parameter_Struct_Valid) {
- // struct Input {
- // @location(0) a : f32;
- // @builtin(sample_index) b : u32;
- // };
- // @stage(fragment)
- // fn main(param : Input) {}
- auto* input = Structure(
- "Input", {Member("a", ty.f32(), {Location(0)}),
- Member("b", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
- auto* param = Param("param", ty.Of(input));
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-}
-
-TEST_F(ResolverEntryPointValidationTest,
- Parameter_Struct_MemberMultipleAttributes) {
- // struct Input {
- // @location(0) @builtin(sample_index) a : u32;
- // };
- // @stage(fragment)
- // fn main(param : Input) {}
- auto* input = Structure(
- "Input",
- {Member("a", ty.u32(),
- {Location(Source{{13, 43}}, 0),
- Builtin(Source{{14, 52}}, ast::Builtin::kSampleIndex)})});
- auto* param = Param("param", ty.Of(input));
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
+ // struct Input {
+ // @location(0) a : f32;
+ // @builtin(sample_index) b : u32;
+ // };
+ // @fragment
+ // fn main(param : Input) {}
+ auto* input =
+ Structure("Input", {Member("a", ty.f32(), {Location(0)}),
+ Member("b", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
+ auto* param = Param("param", ty.Of(input));
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverEntryPointValidationTest, Parameter_Struct_MemberMultipleAttributes) {
+ // struct Input {
+ // @location(0) @builtin(sample_index) a : u32;
+ // };
+ // @fragment
+ // fn main(param : Input) {}
+ auto* input =
+ Structure("Input", {Member("a", ty.u32(),
+ {Location(Source{{13, 43}}, 0),
+ Builtin(Source{{14, 52}}, ast::Builtin::kSampleIndex)})});
+ auto* param = Param("param", ty.Of(input));
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(14:52 error: multiple entry point IO attributes
13:43 note: previously consumed location(0)
12:34 note: while analysing entry point 'main')");
}
-TEST_F(ResolverEntryPointValidationTest,
- Parameter_Struct_MemberMissingAttribute) {
- // struct Input {
- // @location(0) a : f32;
- // b : f32;
- // };
- // @stage(fragment)
- // fn main(param : Input) {}
- auto* input = Structure(
- "Input", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)}),
- Member(Source{{14, 52}}, "b", ty.f32(), {})});
- auto* param = Param("param", ty.Of(input));
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(14:52 error: missing entry point IO attribute
+TEST_F(ResolverEntryPointValidationTest, Parameter_Struct_MemberMissingAttribute) {
+ // struct Input {
+ // @location(0) a : f32;
+ // b : f32;
+ // };
+ // @fragment
+ // fn main(param : Input) {}
+ auto* input = Structure("Input", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)}),
+ Member(Source{{14, 52}}, "b", ty.f32(), {})});
+ auto* param = Param("param", ty.Of(input));
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(14:52 error: missing entry point IO attribute
12:34 note: while analysing entry point 'main')");
}
TEST_F(ResolverEntryPointValidationTest, Parameter_DuplicateBuiltins) {
- // @stage(fragment)
- // fn main(@builtin(sample_index) param_a : u32,
- // @builtin(sample_index) param_b : u32) {}
- auto* param_a =
- Param("param_a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
- auto* param_b =
- Param("param_b", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
- Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: builtin(sample_index) attribute appears multiple times as "
- "pipeline input");
+ // @fragment
+ // fn main(@builtin(sample_index) param_a : u32,
+ // @builtin(sample_index) param_b : u32) {}
+ auto* param_a = Param("param_a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
+ auto* param_b = Param("param_b", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)});
+ Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: builtin(sample_index) attribute appears multiple times as "
+ "pipeline input");
}
TEST_F(ResolverEntryPointValidationTest, Parameter_Struct_DuplicateBuiltins) {
- // struct InputA {
- // @builtin(sample_index) a : u32;
- // };
- // struct InputB {
- // @builtin(sample_index) a : u32;
- // };
- // @stage(fragment)
- // fn main(param_a : InputA, param_b : InputB) {}
- auto* input_a = Structure(
- "InputA", {Member("a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
- auto* input_b = Structure(
- "InputB", {Member("a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
- auto* param_a = Param("param_a", ty.Of(input_a));
- auto* param_b = Param("param_b", ty.Of(input_b));
- Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: builtin(sample_index) attribute appears multiple times as pipeline input
+ // struct InputA {
+ // @builtin(sample_index) a : u32;
+ // };
+ // struct InputB {
+ // @builtin(sample_index) a : u32;
+ // };
+ // @fragment
+ // fn main(param_a : InputA, param_b : InputB) {}
+ auto* input_a =
+ Structure("InputA", {Member("a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
+ auto* input_b =
+ Structure("InputB", {Member("a", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})});
+ auto* param_a = Param("param_a", ty.Of(input_a));
+ auto* param_b = Param("param_b", ty.Of(input_b));
+ Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: builtin(sample_index) attribute appears multiple times as pipeline input
12:34 note: while analysing entry point 'main')");
}
TEST_F(ResolverEntryPointValidationTest, VertexShaderMustReturnPosition) {
- // @stage(vertex)
- // fn main() {}
- Func(Source{{12, 34}}, "main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kVertex)});
+ // @vertex
+ // fn main() {}
+ Func(Source{{12, 34}}, "main", {}, ty.void_(), {}, {Stage(ast::PipelineStage::kVertex)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: a vertex shader must include the 'position' builtin "
- "in its return type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: a vertex shader must include the 'position' builtin "
+ "in its return type");
}
namespace TypeValidationTests {
struct Params {
- builder::ast_type_func_ptr create_ast_type;
- bool is_valid;
+ builder::ast_type_func_ptr create_ast_type;
+ bool is_valid;
};
template <typename T>
constexpr Params ParamsFor(bool is_valid) {
- return Params{DataType<T>::AST, is_valid};
+ return Params{DataType<T>::AST, is_valid};
}
using TypeValidationTest = resolver::ResolverTestWithParam<Params>;
@@ -368,77 +337,73 @@ static constexpr Params cases[] = {
};
TEST_P(TypeValidationTest, BareInputs) {
- // @stage(fragment)
- // fn main(@location(0) @interpolate(flat) a : *) {}
- auto params = GetParam();
- auto* a = Param("a", params.create_ast_type(*this), {Location(0), Flat()});
- Func(Source{{12, 34}}, "main", {a}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- }
+ // @fragment
+ // fn main(@location(0) @interpolate(flat) a : *) {}
+ auto params = GetParam();
+ auto* a = Param("a", params.create_ast_type(*this), {Location(0), Flat()});
+ Func(Source{{12, 34}}, "main", {a}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ }
}
TEST_P(TypeValidationTest, StructInputs) {
- // struct Input {
- // @location(0) @interpolate(flat) a : *;
- // };
- // @stage(fragment)
- // fn main(a : Input) {}
- auto params = GetParam();
- auto* input = Structure("Input", {Member("a", params.create_ast_type(*this),
- {Location(0), Flat()})});
- auto* a = Param("a", ty.Of(input), {});
- Func(Source{{12, 34}}, "main", {a}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- }
+ // struct Input {
+ // @location(0) @interpolate(flat) a : *;
+ // };
+ // @fragment
+ // fn main(a : Input) {}
+ auto params = GetParam();
+ auto* input =
+ Structure("Input", {Member("a", params.create_ast_type(*this), {Location(0), Flat()})});
+ auto* a = Param("a", ty.Of(input), {});
+ Func(Source{{12, 34}}, "main", {a}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ }
}
TEST_P(TypeValidationTest, BareOutputs) {
- // @stage(fragment)
- // fn main() -> @location(0) * {
- // return *();
- // }
- auto params = GetParam();
- Func(Source{{12, 34}}, "main", {}, params.create_ast_type(*this),
- {Return(Construct(params.create_ast_type(*this)))},
- {Stage(ast::PipelineStage::kFragment)}, {Location(0)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- }
+ // @fragment
+ // fn main() -> @location(0) * {
+ // return *();
+ // }
+ auto params = GetParam();
+ Func(Source{{12, 34}}, "main", {}, params.create_ast_type(*this),
+ {Return(Construct(params.create_ast_type(*this)))}, {Stage(ast::PipelineStage::kFragment)},
+ {Location(0)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ }
}
TEST_P(TypeValidationTest, StructOutputs) {
- // struct Output {
- // @location(0) a : *;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto params = GetParam();
- auto* output = Structure(
- "Output", {Member("a", params.create_ast_type(*this), {Location(0)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- }
+ // struct Output {
+ // @location(0) a : *;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto params = GetParam();
+ auto* output = Structure("Output", {Member("a", params.create_ast_type(*this), {Location(0)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverEntryPointValidationTest,
TypeValidationTest,
@@ -451,341 +416,317 @@ namespace {
using LocationAttributeTests = ResolverTest;
TEST_F(LocationAttributeTests, Pass) {
- // @stage(fragment)
- // fn frag_main(@location(0) @interpolate(flat) a: i32) {}
+ // @fragment
+ // fn frag_main(@location(0) @interpolate(flat) a: i32) {}
- auto* p = Param(Source{{12, 34}}, "a", ty.i32(), {Location(0), Flat()});
- Func("frag_main", {p}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* p = Param(Source{{12, 34}}, "a", ty.i32(), {Location(0), Flat()});
+ Func("frag_main", {p}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(LocationAttributeTests, BadType_Input_bool) {
- // @stage(fragment)
- // fn frag_main(@location(0) a: bool) {}
+ // @fragment
+ // fn frag_main(@location(0) a: bool) {}
- auto* p =
- Param(Source{{12, 34}}, "a", ty.bool_(), {Location(Source{{34, 56}}, 0)});
- Func("frag_main", {p}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* p = Param(Source{{12, 34}}, "a", ty.bool_(), {Location(Source{{34, 56}}, 0)});
+ Func("frag_main", {p}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot apply 'location' attribute to declaration of "
- "type 'bool'\n"
- "34:56 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot apply 'location' attribute to declaration of "
+ "type 'bool'\n"
+ "34:56 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadType_Output_Array) {
- // @stage(fragment)
- // fn frag_main()->@location(0) array<f32, 2> { return array<f32, 2>(); }
+ // @fragment
+ // fn frag_main()->@location(0) array<f32, 2> { return array<f32, 2>(); }
- Func(Source{{12, 34}}, "frag_main", {}, ty.array<f32, 2>(),
- {Return(Construct(ty.array<f32, 2>()))},
- {Stage(ast::PipelineStage::kFragment)}, {Location(Source{{34, 56}}, 0)});
+ Func(Source{{12, 34}}, "frag_main", {}, ty.array<f32, 2>(),
+ {Return(Construct(ty.array<f32, 2>()))}, {Stage(ast::PipelineStage::kFragment)},
+ {Location(Source{{34, 56}}, 0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot apply 'location' attribute to declaration of "
- "type 'array<f32, 2>'\n"
- "34:56 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot apply 'location' attribute to declaration of "
+ "type 'array<f32, 2>'\n"
+ "34:56 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadType_Input_Struct) {
- // struct Input {
- // a : f32;
- // };
- // @stage(fragment)
- // fn main(@location(0) param : Input) {}
- auto* input = Structure("Input", {Member("a", ty.f32())});
- auto* param = Param(Source{{12, 34}}, "param", ty.Of(input),
- {Location(Source{{13, 43}}, 0)});
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot apply 'location' attribute to declaration of "
- "type 'Input'\n"
- "13:43 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ // struct Input {
+ // a : f32;
+ // };
+ // @fragment
+ // fn main(@location(0) param : Input) {}
+ auto* input = Structure("Input", {Member("a", ty.f32())});
+ auto* param = Param(Source{{12, 34}}, "param", ty.Of(input), {Location(Source{{13, 43}}, 0)});
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot apply 'location' attribute to declaration of "
+ "type 'Input'\n"
+ "13:43 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadType_Input_Struct_NestedStruct) {
- // struct Inner {
- // @location(0) b : f32;
- // };
- // struct Input {
- // a : Inner;
- // };
- // @stage(fragment)
- // fn main(param : Input) {}
- auto* inner = Structure(
- "Inner", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)})});
- auto* input =
- Structure("Input", {Member(Source{{14, 52}}, "a", ty.Of(inner))});
- auto* param = Param("param", ty.Of(input));
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "14:52 error: nested structures cannot be used for entry point IO\n"
- "12:34 note: while analysing entry point 'main'");
+ // struct Inner {
+ // @location(0) b : f32;
+ // };
+ // struct Input {
+ // a : Inner;
+ // };
+ // @fragment
+ // fn main(param : Input) {}
+ auto* inner = Structure("Inner", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)})});
+ auto* input = Structure("Input", {Member(Source{{14, 52}}, "a", ty.Of(inner))});
+ auto* param = Param("param", ty.Of(input));
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "14:52 error: nested structures cannot be used for entry point IO\n"
+ "12:34 note: while analysing entry point 'main'");
}
TEST_F(LocationAttributeTests, BadType_Input_Struct_RuntimeArray) {
- // struct Input {
- // @location(0) a : array<f32>;
- // };
- // @stage(fragment)
- // fn main(param : Input) {}
- auto* input = Structure("Input", {Member(Source{{13, 43}}, "a",
- ty.array<float>(), {Location(0)})});
- auto* param = Param("param", ty.Of(input));
- Func(Source{{12, 34}}, "main", {param}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "13:43 error: cannot apply 'location' attribute to declaration of "
- "type 'array<f32>'\n"
- "note: 'location' attribute must only be applied to declarations "
- "of numeric scalar or numeric vector type");
+ // struct Input {
+ // @location(0) a : array<f32>;
+ // };
+ // @fragment
+ // fn main(param : Input) {}
+ auto* input =
+ Structure("Input", {Member(Source{{13, 43}}, "a", ty.array<f32>(), {Location(0)})});
+ auto* param = Param("param", ty.Of(input));
+ Func(Source{{12, 34}}, "main", {param}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "13:43 error: cannot apply 'location' attribute to declaration of "
+ "type 'array<f32>'\n"
+ "note: 'location' attribute must only be applied to declarations "
+ "of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadMemberType_Input) {
- // struct S { @location(0) m: array<i32>; };
- // @stage(fragment)
- // fn frag_main( a: S) {}
+ // struct S { @location(0) m: array<i32>; };
+ // @fragment
+ // fn frag_main( a: S) {}
- auto* m = Member(Source{{34, 56}}, "m", ty.array<i32>(),
- ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- auto* s = Structure("S", {m});
- auto* p = Param("a", ty.Of(s));
+ auto* m = Member(Source{{34, 56}}, "m", ty.array<i32>(),
+ ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ auto* s = Structure("S", {m});
+ auto* p = Param("a", ty.Of(s));
- Func("frag_main", {p}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("frag_main", {p}, ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "34:56 error: cannot apply 'location' attribute to declaration of "
- "type 'array<i32>'\n"
- "12:34 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "34:56 error: cannot apply 'location' attribute to declaration of "
+ "type 'array<i32>'\n"
+ "12:34 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadMemberType_Output) {
- // struct S { @location(0) m: atomic<i32>; };
- // @stage(fragment)
- // fn frag_main() -> S {}
- auto* m = Member(Source{{34, 56}}, "m", ty.atomic<i32>(),
- ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- auto* s = Structure("S", {m});
+ // struct S { @location(0) m: atomic<i32>; };
+ // @fragment
+ // fn frag_main() -> S {}
+ auto* m = Member(Source{{34, 56}}, "m", ty.atomic<i32>(),
+ ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ auto* s = Structure("S", {m});
- Func("frag_main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
- {Stage(ast::PipelineStage::kFragment)}, {});
+ Func("frag_main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
+ {Stage(ast::PipelineStage::kFragment)}, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "34:56 error: cannot apply 'location' attribute to declaration of "
- "type 'atomic<i32>'\n"
- "12:34 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "34:56 error: cannot apply 'location' attribute to declaration of "
+ "type 'atomic<i32>'\n"
+ "12:34 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, BadMemberType_Unused) {
- // struct S { @location(0) m: mat3x2<f32>; };
+ // struct S { @location(0) m: mat3x2<f32>; };
- auto* m = Member(Source{{34, 56}}, "m", ty.mat3x2<f32>(),
- ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- Structure("S", {m});
+ auto* m = Member(Source{{34, 56}}, "m", ty.mat3x2<f32>(),
+ ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ Structure("S", {m});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "34:56 error: cannot apply 'location' attribute to declaration of "
- "type 'mat3x2<f32>'\n"
- "12:34 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "34:56 error: cannot apply 'location' attribute to declaration of "
+ "type 'mat3x2<f32>'\n"
+ "12:34 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, ReturnType_Struct_Valid) {
- // struct Output {
- // @location(0) a : f32;
- // @builtin(frag_depth) b : f32;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output = Structure(
- "Output", {Member("a", ty.f32(), {Location(0)}),
- Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // struct Output {
+ // @location(0) a : f32;
+ // @builtin(frag_depth) b : f32;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output =
+ Structure("Output", {Member("a", ty.f32(), {Location(0)}),
+ Member("b", ty.f32(), {Builtin(ast::Builtin::kFragDepth)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(LocationAttributeTests, ReturnType_Struct) {
- // struct Output {
- // a : f32;
- // };
- // @stage(vertex)
- // fn main() -> @location(0) Output {
- // return Output();
- // }
- auto* output = Structure("Output", {Member("a", ty.f32())});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))}, {Stage(ast::PipelineStage::kVertex)},
- {Location(Source{{13, 43}}, 0)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot apply 'location' attribute to declaration of "
- "type 'Output'\n"
- "13:43 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ // struct Output {
+ // a : f32;
+ // };
+ // @vertex
+ // fn main() -> @location(0) Output {
+ // return Output();
+ // }
+ auto* output = Structure("Output", {Member("a", ty.f32())});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kVertex)}, {Location(Source{{13, 43}}, 0)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot apply 'location' attribute to declaration of "
+ "type 'Output'\n"
+ "13:43 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, ReturnType_Struct_NestedStruct) {
- // struct Inner {
- // @location(0) b : f32;
- // };
- // struct Output {
- // a : Inner;
- // };
- // @stage(fragment)
- // fn main() -> Output { return Output(); }
- auto* inner = Structure(
- "Inner", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)})});
- auto* output =
- Structure("Output", {Member(Source{{14, 52}}, "a", ty.Of(inner))});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "14:52 error: nested structures cannot be used for entry point IO\n"
- "12:34 note: while analysing entry point 'main'");
+ // struct Inner {
+ // @location(0) b : f32;
+ // };
+ // struct Output {
+ // a : Inner;
+ // };
+ // @fragment
+ // fn main() -> Output { return Output(); }
+ auto* inner = Structure("Inner", {Member(Source{{13, 43}}, "a", ty.f32(), {Location(0)})});
+ auto* output = Structure("Output", {Member(Source{{14, 52}}, "a", ty.Of(inner))});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "14:52 error: nested structures cannot be used for entry point IO\n"
+ "12:34 note: while analysing entry point 'main'");
}
TEST_F(LocationAttributeTests, ReturnType_Struct_RuntimeArray) {
- // struct Output {
- // @location(0) a : array<f32>;
- // };
- // @stage(fragment)
- // fn main() -> Output {
- // return Output();
- // }
- auto* output =
- Structure("Output", {Member(Source{{13, 43}}, "a", ty.array<float>(),
- {Location(Source{{12, 34}}, 0)})});
- Func(Source{{12, 34}}, "main", {}, ty.Of(output),
- {Return(Construct(ty.Of(output)))},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "13:43 error: cannot apply 'location' attribute to declaration of "
- "type 'array<f32>'\n"
- "12:34 note: 'location' attribute must only be applied to "
- "declarations of numeric scalar or numeric vector type");
+ // struct Output {
+ // @location(0) a : array<f32>;
+ // };
+ // @fragment
+ // fn main() -> Output {
+ // return Output();
+ // }
+ auto* output = Structure("Output", {Member(Source{{13, 43}}, "a", ty.array<f32>(),
+ {Location(Source{{12, 34}}, 0)})});
+ Func(Source{{12, 34}}, "main", {}, ty.Of(output), {Return(Construct(ty.Of(output)))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "13:43 error: cannot apply 'location' attribute to declaration of "
+ "type 'array<f32>'\n"
+ "12:34 note: 'location' attribute must only be applied to "
+ "declarations of numeric scalar or numeric vector type");
}
TEST_F(LocationAttributeTests, ComputeShaderLocation_Input) {
- Func("main", {}, ty.i32(), {Return(Expr(1))},
- {Stage(ast::PipelineStage::kCompute),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))},
- ast::AttributeList{Location(Source{{12, 34}}, 1)});
+ Func("main", {}, ty.i32(), {Return(Expr(1_i))},
+ {Stage(ast::PipelineStage::kCompute),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))},
+ ast::AttributeList{Location(Source{{12, 34}}, 1)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for compute shader output");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for compute shader output");
}
TEST_F(LocationAttributeTests, ComputeShaderLocation_Output) {
- auto* input = Param("input", ty.i32(),
- ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- Func("main", {input}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
+ auto* input = Param("input", ty.i32(), ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ Func("main", {input}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for compute shader inputs");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: attribute is not valid for compute shader inputs");
}
TEST_F(LocationAttributeTests, ComputeShaderLocationStructMember_Output) {
- auto* m =
- Member("m", ty.i32(), ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- auto* s = Structure("S", {m});
- Func(Source{{56, 78}}, "main", {}, ty.Of(s),
- ast::StatementList{Return(Expr(Construct(ty.Of(s))))},
- {Stage(ast::PipelineStage::kCompute),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
+ auto* m = Member("m", ty.i32(), ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ auto* s = Structure("S", {m});
+ Func(Source{{56, 78}}, "main", {}, ty.Of(s),
+ ast::StatementList{Return(Expr(Construct(ty.Of(s))))},
+ {Stage(ast::PipelineStage::kCompute),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for compute shader output\n"
- "56:78 note: while analysing entry point 'main'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for compute shader output\n"
+ "56:78 note: while analysing entry point 'main'");
}
TEST_F(LocationAttributeTests, ComputeShaderLocationStructMember_Input) {
- auto* m =
- Member("m", ty.i32(), ast::AttributeList{Location(Source{{12, 34}}, 0u)});
- auto* s = Structure("S", {m});
- auto* input = Param("input", ty.Of(s));
- Func(Source{{56, 78}}, "main", {input}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1))});
+ auto* m = Member("m", ty.i32(), ast::AttributeList{Location(Source{{12, 34}}, 0u)});
+ auto* s = Structure("S", {m});
+ auto* input = Param("input", ty.Of(s));
+ Func(Source{{56, 78}}, "main", {input}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute),
+ create<ast::WorkgroupAttribute>(Source{{12, 34}}, Expr(1_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: attribute is not valid for compute shader inputs\n"
- "56:78 note: while analysing entry point 'main'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: attribute is not valid for compute shader inputs\n"
+ "56:78 note: while analysing entry point 'main'");
}
TEST_F(LocationAttributeTests, Duplicate_input) {
- // @stage(fragment)
- // fn main(@location(1) param_a : f32,
- // @location(1) param_b : f32) {}
- auto* param_a = Param("param_a", ty.f32(), {Location(1)});
- auto* param_b = Param("param_b", ty.f32(), {Location(Source{{12, 34}}, 1)});
- Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ // @fragment
+ // fn main(@location(1) param_a : f32,
+ // @location(1) param_b : f32) {}
+ auto* param_a = Param("param_a", ty.f32(), {Location(1)});
+ auto* param_b = Param("param_b", ty.f32(), {Location(Source{{12, 34}}, 1)});
+ Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: location(1) attribute appears multiple times");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: location(1) attribute appears multiple times");
}
TEST_F(LocationAttributeTests, Duplicate_struct) {
- // struct InputA {
- // @location(1) a : f32;
- // };
- // struct InputB {
- // @location(1) a : f32;
- // };
- // @stage(fragment)
- // fn main(param_a : InputA, param_b : InputB) {}
- auto* input_a = Structure("InputA", {Member("a", ty.f32(), {Location(1)})});
- auto* input_b = Structure(
- "InputB", {Member("a", ty.f32(), {Location(Source{{34, 56}}, 1)})});
- auto* param_a = Param("param_a", ty.Of(input_a));
- auto* param_b = Param("param_b", ty.Of(input_b));
- Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "34:56 error: location(1) attribute appears multiple times\n"
- "12:34 note: while analysing entry point 'main'");
+ // struct InputA {
+ // @location(1) a : f32;
+ // };
+ // struct InputB {
+ // @location(1) a : f32;
+ // };
+ // @fragment
+ // fn main(param_a : InputA, param_b : InputB) {}
+ auto* input_a = Structure("InputA", {Member("a", ty.f32(), {Location(1)})});
+ auto* input_b = Structure("InputB", {Member("a", ty.f32(), {Location(Source{{34, 56}}, 1)})});
+ auto* param_a = Param("param_a", ty.Of(input_a));
+ auto* param_b = Param("param_b", ty.Of(input_b));
+ Func(Source{{12, 34}}, "main", {param_a, param_b}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "34:56 error: location(1) attribute appears multiple times\n"
+ "12:34 note: while analysing entry point 'main'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/function_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/function_validation_test.cc
index d694f0ea1b8..cf8ddabb273 100644
--- a/chromium/third_party/dawn/src/tint/resolver/function_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/function_validation_test.cc
@@ -20,811 +20,788 @@
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-class ResolverFunctionValidationTest : public TestHelper,
- public testing::Test {};
+class ResolverFunctionValidationTest : public TestHelper, public testing::Test {};
TEST_F(ResolverFunctionValidationTest, DuplicateParameterName) {
- // fn func_a(common_name : f32) { }
- // fn func_b(common_name : f32) { }
- Func("func_a", {Param("common_name", ty.f32())}, ty.void_(), {});
- Func("func_b", {Param("common_name", ty.f32())}, ty.void_(), {});
+ // fn func_a(common_name : f32) { }
+ // fn func_b(common_name : f32) { }
+ Func("func_a", {Param("common_name", ty.f32())}, ty.void_(), {});
+ Func("func_b", {Param("common_name", ty.f32())}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, ParameterMayShadowGlobal) {
- // var<private> common_name : f32;
- // fn func(common_name : f32) { }
- Global("common_name", ty.f32(), ast::StorageClass::kPrivate);
- Func("func", {Param("common_name", ty.f32())}, ty.void_(), {});
+ // var<private> common_name : f32;
+ // fn func(common_name : f32) { }
+ Global("common_name", ty.f32(), ast::StorageClass::kPrivate);
+ Func("func", {Param("common_name", ty.f32())}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, LocalConflictsWithParameter) {
- // fn func(common_name : f32) {
- // let common_name = 1;
- // }
- Func("func", {Param(Source{{12, 34}}, "common_name", ty.f32())}, ty.void_(),
- {Decl(Const(Source{{56, 78}}, "common_name", nullptr, Expr(1)))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: redeclaration of 'common_name'
+ // fn func(common_name : f32) {
+ // let common_name = 1i;
+ // }
+ Func("func", {Param(Source{{12, 34}}, "common_name", ty.f32())}, ty.void_(),
+ {Decl(Let(Source{{56, 78}}, "common_name", nullptr, Expr(1_i)))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(56:78 error: redeclaration of 'common_name'
12:34 note: 'common_name' previously declared here)");
}
TEST_F(ResolverFunctionValidationTest, NestedLocalMayShadowParameter) {
- // fn func(common_name : f32) {
- // {
- // let common_name = 1;
- // }
- // }
- Func("func", {Param(Source{{12, 34}}, "common_name", ty.f32())}, ty.void_(),
- {Block(Decl(Const(Source{{56, 78}}, "common_name", nullptr, Expr(1))))});
+ // fn func(common_name : f32) {
+ // {
+ // let common_name = 1i;
+ // }
+ // }
+ Func("func", {Param(Source{{12, 34}}, "common_name", ty.f32())}, ty.void_(),
+ {Block(Decl(Let(Source{{56, 78}}, "common_name", nullptr, Expr(1_i))))});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest,
- VoidFunctionEndWithoutReturnStatement_Pass) {
- // fn func { var a:i32 = 2; }
- auto* var = Var("a", ty.i32(), Expr(2));
+TEST_F(ResolverFunctionValidationTest, VoidFunctionEndWithoutReturnStatement_Pass) {
+ // fn func { var a:i32 = 2i; }
+ auto* var = Var("a", ty.i32(), Expr(2_i));
- Func(Source{{12, 34}}, "func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- });
+ Func(Source{{12, 34}}, "func", {}, ty.void_(),
+ {
+ Decl(var),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, FunctionUsingSameVariableName_Pass) {
- // fn func() -> i32 {
- // var func:i32 = 0;
- // return func;
- // }
+ // fn func() -> i32 {
+ // var func:i32 = 0i;
+ // return func;
+ // }
+
+ auto* var = Var("func", ty.i32(), Expr(0_i));
+ Func("func", {}, ty.i32(),
+ {
+ Decl(var),
+ Return(Source{{12, 34}}, Expr("func")),
+ });
- auto* var = Var("func", ty.i32(), Expr(0));
- Func("func", ast::VariableList{}, ty.i32(),
- ast::StatementList{
- Decl(var),
- Return(Source{{12, 34}}, Expr("func")),
- },
- ast::AttributeList{});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionNameSameAsFunctionScopeVariableName_Pass) {
- // fn a() -> void { var b:i32 = 0; }
- // fn b() -> i32 { return 2; }
+TEST_F(ResolverFunctionValidationTest, FunctionNameSameAsFunctionScopeVariableName_Pass) {
+ // fn a() -> void { var b:i32 = 0i; }
+ // fn b() -> i32 { return 2; }
- auto* var = Var("b", ty.i32(), Expr(0));
- Func("a", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- },
- ast::AttributeList{});
+ auto* var = Var("b", ty.i32(), Expr(0_i));
+ Func("a", {}, ty.void_(),
+ {
+ Decl(var),
+ });
- Func(Source{{12, 34}}, "b", ast::VariableList{}, ty.i32(),
- ast::StatementList{
- Return(2),
- },
- ast::AttributeList{});
+ Func(Source{{12, 34}}, "b", {}, ty.i32(),
+ {
+ Return(2_i),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, UnreachableCode_return) {
- // fn func() -> {
- // var a : i32;
- // return;
- // a = 2;
- //}
+ // fn func() -> {
+ // var a : i32;
+ // return;
+ // a = 2i;
+ //}
- auto* decl_a = Decl(Var("a", ty.i32()));
- auto* ret = Return();
- auto* assign_a = Assign(Source{{12, 34}}, "a", 2);
+ auto* decl_a = Decl(Var("a", ty.i32()));
+ auto* ret = Return();
+ auto* assign_a = Assign(Source{{12, 34}}, "a", 2_i);
- Func("func", ast::VariableList{}, ty.void_(), {decl_a, ret, assign_a});
+ Func("func", {}, ty.void_(), {decl_a, ret, assign_a});
- ASSERT_TRUE(r()->Resolve());
+ ASSERT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
- EXPECT_TRUE(Sem().Get(ret)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
+ EXPECT_TRUE(Sem().Get(ret)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
}
TEST_F(ResolverFunctionValidationTest, UnreachableCode_return_InBlocks) {
- // fn func() -> {
- // var a : i32;
- // {{{return;}}}
- // a = 2;
- //}
+ // fn func() -> {
+ // var a : i32;
+ // {{{return;}}}
+ // a = 2i;
+ //}
- auto* decl_a = Decl(Var("a", ty.i32()));
- auto* ret = Return();
- auto* assign_a = Assign(Source{{12, 34}}, "a", 2);
+ auto* decl_a = Decl(Var("a", ty.i32()));
+ auto* ret = Return();
+ auto* assign_a = Assign(Source{{12, 34}}, "a", 2_i);
- Func("func", ast::VariableList{}, ty.void_(),
- {decl_a, Block(Block(Block(ret))), assign_a});
+ Func("func", {}, ty.void_(), {decl_a, Block(Block(Block(ret))), assign_a});
- ASSERT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
- EXPECT_TRUE(Sem().Get(ret)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
+ ASSERT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
+ EXPECT_TRUE(Sem().Get(ret)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
}
TEST_F(ResolverFunctionValidationTest, UnreachableCode_discard) {
- // fn func() -> {
- // var a : i32;
- // discard;
- // a = 2;
- //}
+ // fn func() -> {
+ // var a : i32;
+ // discard;
+ // a = 2i;
+ //}
- auto* decl_a = Decl(Var("a", ty.i32()));
- auto* discard = Discard();
- auto* assign_a = Assign(Source{{12, 34}}, "a", 2);
+ auto* decl_a = Decl(Var("a", ty.i32()));
+ auto* discard = Discard();
+ auto* assign_a = Assign(Source{{12, 34}}, "a", 2_i);
- Func("func", ast::VariableList{}, ty.void_(), {decl_a, discard, assign_a});
+ Func("func", {}, ty.void_(), {decl_a, discard, assign_a});
- ASSERT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
- EXPECT_TRUE(Sem().Get(discard)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
+ ASSERT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
+ EXPECT_TRUE(Sem().Get(discard)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
}
TEST_F(ResolverFunctionValidationTest, UnreachableCode_discard_InBlocks) {
- // fn func() -> {
- // var a : i32;
- // {{{discard;}}}
- // a = 2;
- //}
+ // fn func() -> {
+ // var a : i32;
+ // {{{discard;}}}
+ // a = 2i;
+ //}
- auto* decl_a = Decl(Var("a", ty.i32()));
- auto* discard = Discard();
- auto* assign_a = Assign(Source{{12, 34}}, "a", 2);
+ auto* decl_a = Decl(Var("a", ty.i32()));
+ auto* discard = Discard();
+ auto* assign_a = Assign(Source{{12, 34}}, "a", 2_i);
- Func("func", ast::VariableList{}, ty.void_(),
- {decl_a, Block(Block(Block(discard))), assign_a});
+ Func("func", {}, ty.void_(), {decl_a, Block(Block(Block(discard))), assign_a});
- ASSERT_TRUE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
- EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
- EXPECT_TRUE(Sem().Get(discard)->IsReachable());
- EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
+ ASSERT_TRUE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 warning: code is unreachable");
+ EXPECT_TRUE(Sem().Get(decl_a)->IsReachable());
+ EXPECT_TRUE(Sem().Get(discard)->IsReachable());
+ EXPECT_FALSE(Sem().Get(assign_a)->IsReachable());
}
TEST_F(ResolverFunctionValidationTest, FunctionEndWithoutReturnStatement_Fail) {
- // fn func() -> int { var a:i32 = 2; }
+ // fn func() -> int { var a:i32 = 2i; }
+
+ auto* var = Var("a", ty.i32(), Expr(2_i));
+
+ Func(Source{{12, 34}}, "func", {}, ty.i32(),
+ {
+ Decl(var),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing return at end of function");
+}
- auto* var = Var("a", ty.i32(), Expr(2));
+TEST_F(ResolverFunctionValidationTest, VoidFunctionEndWithoutReturnStatementEmptyBody_Pass) {
+ // fn func {}
- Func(Source{{12, 34}}, "func", ast::VariableList{}, ty.i32(),
- ast::StatementList{
- Decl(var),
- },
- ast::AttributeList{});
+ Func(Source{{12, 34}}, "func", {}, ty.void_(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing return at end of function");
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest,
- VoidFunctionEndWithoutReturnStatementEmptyBody_Pass) {
- // fn func {}
+TEST_F(ResolverFunctionValidationTest, FunctionEndWithoutReturnStatementEmptyBody_Fail) {
+ // fn func() -> int {}
- Func(Source{{12, 34}}, "func", ast::VariableList{}, ty.void_(),
- ast::StatementList{});
+ Func(Source{{12, 34}}, "func", {}, ty.i32(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing return at end of function");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionEndWithoutReturnStatementEmptyBody_Fail) {
- // fn func() -> int {}
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementType_Pass) {
+ // fn func { return; }
+
+ Func("func", {}, ty.void_(), {Return()});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
- Func(Source{{12, 34}}, "func", ast::VariableList{}, ty.i32(),
- ast::StatementList{}, ast::AttributeList{});
+TEST_F(ResolverFunctionValidationTest, VoidFunctionReturnsAInt) {
+ // fn func { return 2; }
+ Func("func", {}, ty.void_(), {Return(Source{{12, 34}}, Expr(2_a))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing return at end of function");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'abstract-int', expected 'void'");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementType_Pass) {
- // fn func { return; }
+TEST_F(ResolverFunctionValidationTest, VoidFunctionReturnsAFloat) {
+ // fn func { return 2.0; }
+ Func("func", {}, ty.void_(), {Return(Source{{12, 34}}, Expr(2.0_a))});
- Func("func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- });
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'abstract-float', expected 'void'");
+}
+
+TEST_F(ResolverFunctionValidationTest, VoidFunctionReturnsI32) {
+ // fn func { return 2i; }
+ Func("func", {}, ty.void_(), {Return(Source{{12, 34}}, Expr(2_i))});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'i32', expected 'void'");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementType_fail) {
- // fn func { return 2; }
- Func("func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(Source{{12, 34}}, Expr(2)),
- },
- ast::AttributeList{});
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementType_void_fail) {
+ // fn v { return; }
+ // fn func { return v(); }
+ Func("v", {}, ty.void_(), {Return()});
+ Func("func", {}, ty.void_(),
+ {
+ Return(Call(Source{{12, 34}}, "v")),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: return statement type must match its function return "
- "type, returned 'i32', expected 'void'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function 'v' does not return a value");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementType_void_fail) {
- // fn v { return; }
- // fn func { return v(); }
- Func("v", {}, ty.void_(), {Return()});
- Func("func", {}, ty.void_(),
- {
- Return(Call(Source{{12, 34}}, "v")),
- });
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementTypeMissing_fail) {
+ // fn func() -> f32 { return; }
+ Func("func", {}, ty.f32(),
+ {
+ Return(Source{{12, 34}}, nullptr),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: function 'v' does not return a value");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'void', expected 'f32'");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementTypeMissing_fail) {
- // fn func() -> f32 { return; }
- Func("func", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return(Source{{12, 34}}, nullptr),
- },
- ast::AttributeList{});
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementTypeF32_pass) {
+ // fn func() -> f32 { return 2.0; }
+ Func("func", {}, ty.f32(),
+ {
+ Return(Source{{12, 34}}, Expr(2_f)),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: return statement type must match its function return "
- "type, returned 'void', expected 'f32'");
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementTypeF32_pass) {
- // fn func() -> f32 { return 2.0; }
- Func("func", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return(Source{{12, 34}}, Expr(2.f)),
- },
- ast::AttributeList{});
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementTypeF32_fail) {
+ // fn func() -> f32 { return 2i; }
+ Func("func", {}, ty.f32(),
+ {
+ Return(Source{{12, 34}}, Expr(2_i)),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'i32', expected 'f32'");
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementTypeF32_fail) {
- // fn func() -> f32 { return 2; }
- Func("func", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return(Source{{12, 34}}, Expr(2)),
- },
- ast::AttributeList{});
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementTypeF32Alias_pass) {
+ // type myf32 = f32;
+ // fn func() -> myf32 { return 2.0; }
+ auto* myf32 = Alias("myf32", ty.f32());
+ Func("func", {}, ty.Of(myf32),
+ {
+ Return(Source{{12, 34}}, Expr(2_f)),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: return statement type must match its function return "
- "type, returned 'i32', expected 'f32'");
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementTypeF32Alias_pass) {
- // type myf32 = f32;
- // fn func() -> myf32 { return 2.0; }
- auto* myf32 = Alias("myf32", ty.f32());
- Func("func", ast::VariableList{}, ty.Of(myf32),
- ast::StatementList{
- Return(Source{{12, 34}}, Expr(2.f)),
- },
- ast::AttributeList{});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-}
-
-TEST_F(ResolverFunctionValidationTest,
- FunctionTypeMustMatchReturnStatementTypeF32Alias_fail) {
- // type myf32 = f32;
- // fn func() -> myf32 { return 2; }
- auto* myf32 = Alias("myf32", ty.f32());
- Func("func", ast::VariableList{}, ty.Of(myf32),
- ast::StatementList{
- Return(Source{{12, 34}}, Expr(2u)),
- },
- ast::AttributeList{});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: return statement type must match its function return "
- "type, returned 'u32', expected 'f32'");
+TEST_F(ResolverFunctionValidationTest, FunctionTypeMustMatchReturnStatementTypeF32Alias_fail) {
+ // type myf32 = f32;
+ // fn func() -> myf32 { return 2u; }
+ auto* myf32 = Alias("myf32", ty.f32());
+ Func("func", {}, ty.Of(myf32),
+ {
+ Return(Source{{12, 34}}, Expr(2_u)),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: return statement type must match its function return "
+ "type, returned 'u32', expected 'f32'");
}
TEST_F(ResolverFunctionValidationTest, CannotCallEntryPoint) {
- // @stage(compute) @workgroup_size(1) fn entrypoint() {}
- // fn func() { return entrypoint(); }
- Func("entrypoint", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ // @compute @workgroup_size(1) fn entrypoint() {}
+ // fn func() { return entrypoint(); }
+ Func("entrypoint", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Func("func", ast::VariableList{}, ty.void_(),
- {
- CallStmt(Call(Source{{12, 34}}, "entrypoint")),
- });
+ Func("func", {}, ty.void_(),
+ {
+ CallStmt(Call(Source{{12, 34}}, "entrypoint")),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
- R"(12:34 error: entry point functions cannot be the target of a function call)");
+ R"(12:34 error: entry point functions cannot be the target of a function call)");
}
TEST_F(ResolverFunctionValidationTest, PipelineStage_MustBeUnique_Fail) {
- // @stage(fragment)
- // @stage(vertex)
- // fn main() { return; }
- Func(Source{{12, 34}}, "main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{
- Stage(Source{{12, 34}}, ast::PipelineStage::kVertex),
- Stage(Source{{56, 78}}, ast::PipelineStage::kFragment),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(56:78 error: duplicate stage attribute
+ // @fragment
+ // @vertex
+ // fn main() { return; }
+ Func(Source{{12, 34}}, "main", {}, ty.void_(),
+ {
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(Source{{12, 34}}, ast::PipelineStage::kVertex),
+ Stage(Source{{56, 78}}, ast::PipelineStage::kFragment),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: duplicate stage attribute
12:34 note: first attribute declared here)");
}
TEST_F(ResolverFunctionValidationTest, NoPipelineEntryPoints) {
- Func("vtx_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
+ Func("vtx_func", {}, ty.void_(),
+ {
+ Return(),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, FunctionVarInitWithParam) {
- // fn foo(bar : f32){
- // var baz : f32 = bar;
- // }
+ // fn foo(bar : f32){
+ // var baz : f32 = bar;
+ // }
- auto* bar = Param("bar", ty.f32());
- auto* baz = Var("baz", ty.f32(), Expr("bar"));
+ auto* bar = Param("bar", ty.f32());
+ auto* baz = Var("baz", ty.f32(), Expr("bar"));
- Func("foo", ast::VariableList{bar}, ty.void_(), ast::StatementList{Decl(baz)},
- ast::AttributeList{});
+ Func("foo", ast::VariableList{bar}, ty.void_(), {Decl(baz)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, FunctionConstInitWithParam) {
- // fn foo(bar : f32){
- // let baz : f32 = bar;
- // }
+ // fn foo(bar : f32){
+ // let baz : f32 = bar;
+ // }
- auto* bar = Param("bar", ty.f32());
- auto* baz = Const("baz", ty.f32(), Expr("bar"));
+ auto* bar = Param("bar", ty.f32());
+ auto* baz = Let("baz", ty.f32(), Expr("bar"));
- Func("foo", ast::VariableList{bar}, ty.void_(), ast::StatementList{Decl(baz)},
- ast::AttributeList{});
+ Func("foo", ast::VariableList{bar}, ty.void_(), {Decl(baz)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, FunctionParamsConst) {
- Func("foo", {Param(Sym("arg"), ty.i32())}, ty.void_(),
- {Assign(Expr(Source{{12, 34}}, "arg"), Expr(1)), Return()});
+ Func("foo", {Param(Sym("arg"), ty.i32())}, ty.void_(),
+ {Assign(Expr(Source{{12, 34}}, "arg"), Expr(1_i)), Return()});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot assign to function parameter\nnote: 'arg' is "
- "declared here:");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot assign to function parameter\nnote: 'arg' is "
+ "declared here:");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_GoodType_ConstU32) {
- // let x = 4u;
- // let x = 8u;
- // @stage(compute) @workgroup_size(x, y, 16u)
- // fn main() {}
- auto* x = GlobalConst("x", ty.u32(), Expr(4u));
- auto* y = GlobalConst("y", ty.u32(), Expr(8u));
- auto* func = Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr("x"), Expr("y"), Expr(16u))});
+ // let x = 4u;
+ // let x = 8u;
+ // @compute @workgroup_size(x, y, 16u)
+ // fn main() {}
+ auto* x = GlobalConst("x", ty.u32(), Expr(4_u));
+ auto* y = GlobalConst("y", ty.u32(), Expr(8_u));
+ auto* func = Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize("x", "y", 16_u)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem_func = Sem().Get(func);
- auto* sem_x = Sem().Get<sem::GlobalVariable>(x);
- auto* sem_y = Sem().Get<sem::GlobalVariable>(y);
+ auto* sem_func = Sem().Get(func);
+ auto* sem_x = Sem().Get<sem::GlobalVariable>(x);
+ auto* sem_y = Sem().Get<sem::GlobalVariable>(y);
- ASSERT_NE(sem_func, nullptr);
- ASSERT_NE(sem_x, nullptr);
- ASSERT_NE(sem_y, nullptr);
+ ASSERT_NE(sem_func, nullptr);
+ ASSERT_NE(sem_x, nullptr);
+ ASSERT_NE(sem_y, nullptr);
- EXPECT_TRUE(sem_func->DirectlyReferencedGlobals().contains(sem_x));
- EXPECT_TRUE(sem_func->DirectlyReferencedGlobals().contains(sem_y));
+ EXPECT_TRUE(sem_func->DirectlyReferencedGlobals().contains(sem_x));
+ EXPECT_TRUE(sem_func->DirectlyReferencedGlobals().contains(sem_y));
+}
+
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_GoodType_I32) {
+ // @compute @workgroup_size(1i, 2i, 3i)
+ // fn main() {}
+
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_i, 2_i, 3_i)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_GoodType_U32) {
- // @stage(compute) @workgroup_size(1u, 2u, 3u)
- // fn main() {}
+ // @compute @workgroup_size(1u, 2u, 3u)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Source{{12, 34}}, Expr(1u), Expr(2u), Expr(3u))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_u, 2_u, 3_u)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverFunctionValidationTest, WorkgroupSize_MismatchTypeU32) {
- // @stage(compute) @workgroup_size(1u, 2u, 3)
- // fn main() {}
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_GoodType_I32_AInt) {
+ // @compute @workgroup_size(1, 2i, 3)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(1u), Expr(2u), Expr(Source{{12, 34}}, 3))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_a, 2_i, 3_a)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size arguments must be of the same type, "
- "either i32 or u32");
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_GoodType_U32_AInt) {
+ // @compute @workgroup_size(1u, 2, 3u)
+ // fn main() {}
+
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_u, 2_a, 3_u)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_MismatchType_U32) {
+ // @compute @workgroup_size(1u, 2, 3_i)
+ // fn main() {}
+
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_u, 2_a, 3_i)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size arguments must be of the same type, either i32 or u32");
}
-TEST_F(ResolverFunctionValidationTest, WorkgroupSize_MismatchTypeI32) {
- // @stage(compute) @workgroup_size(1, 2u, 3)
- // fn main() {}
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_MismatchType_I32) {
+ // @compute @workgroup_size(1_i, 2u, 3)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(1), Expr(Source{{12, 34}}, 2u), Expr(3))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_i, 2_u, 3_a)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size arguments must be of the same type, "
- "either i32 or u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size arguments must be of the same type, either i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_TypeMismatch) {
- // let x = 64u;
- // @stage(compute) @workgroup_size(1, x)
- // fn main() {}
- GlobalConst("x", ty.u32(), Expr(64u));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(1), Expr(Source{{12, 34}}, "x"))});
+ // let x = 64u;
+ // @compute @workgroup_size(1i, x)
+ // fn main() {}
+ GlobalConst("x", ty.u32(), Expr(64_u));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, 1_i, "x")});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size arguments must be of the same type, "
- "either i32 or u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size arguments must be of the same type, either i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_TypeMismatch2) {
- // let x = 64u;
- // let y = 32;
- // @stage(compute) @workgroup_size(x, y)
- // fn main() {}
- GlobalConst("x", ty.u32(), Expr(64u));
- GlobalConst("y", ty.i32(), Expr(32));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr("x"), Expr(Source{{12, 34}}, "y"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size arguments must be of the same type, "
- "either i32 or u32");
+ // let x = 64u;
+ // let y = 32i;
+ // @compute @workgroup_size(x, y)
+ // fn main() {}
+ GlobalConst("x", ty.u32(), Expr(64_u));
+ GlobalConst("y", ty.i32(), Expr(32_i));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, "x", "y")});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size arguments must be of the same type, either i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Mismatch_ConstU32) {
- // let x = 4u;
- // let x = 8u;
- // @stage(compute) @workgroup_size(x, y, 16
- // fn main() {}
- GlobalConst("x", ty.u32(), Expr(4u));
- GlobalConst("y", ty.u32(), Expr(8u));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr("x"), Expr("y"), Expr(Source{{12, 34}}, 16))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size arguments must be of the same type, "
- "either i32 or u32");
+ // let x = 4u;
+ // let x = 8u;
+ // @compute @workgroup_size(x, y, 16i)
+ // fn main() {}
+ GlobalConst("x", ty.u32(), Expr(4_u));
+ GlobalConst("y", ty.u32(), Expr(8_u));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Source{{12, 34}}, "x", "y", 16_i)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size arguments must be of the same type, either i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Literal_BadType) {
- // @stage(compute) @workgroup_size(64.0)
- // fn main() {}
+ // @compute @workgroup_size(64.0)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, 64.f))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, 64_f))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be either literal or "
- "module-scope constant of type i32 or u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size argument must be either literal or "
+ "module-scope constant of type i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Literal_Negative) {
- // @stage(compute) @workgroup_size(-2)
- // fn main() {}
+ // @compute @workgroup_size(-2i)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, -2))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, -2_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be at least 1");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: workgroup_size argument must be at least 1");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Literal_Zero) {
- // @stage(compute) @workgroup_size(0)
- // fn main() {}
+ // @compute @workgroup_size(0i)
+ // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, 0))});
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, 0_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be at least 1");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: workgroup_size argument must be at least 1");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_BadType) {
- // let x = 64.0;
- // @stage(compute) @workgroup_size(x)
- // fn main() {}
- GlobalConst("x", ty.f32(), Expr(64.f));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
+ // let x = 64.0;
+ // @compute @workgroup_size(x)
+ // fn main() {}
+ GlobalConst("x", ty.f32(), Expr(64_f));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be either literal or "
- "module-scope constant of type i32 or u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size argument must be either literal or "
+ "module-scope constant of type i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_Negative) {
- // let x = -2;
- // @stage(compute) @workgroup_size(x)
- // fn main() {}
- GlobalConst("x", ty.i32(), Expr(-2));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
+ // let x = -2i;
+ // @compute @workgroup_size(x)
+ // fn main() {}
+ GlobalConst("x", ty.i32(), Expr(-2_i));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be at least 1");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: workgroup_size argument must be at least 1");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_Zero) {
- // let x = 0;
- // @stage(compute) @workgroup_size(x)
- // fn main() {}
- GlobalConst("x", ty.i32(), Expr(0));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be at least 1");
-}
-
-TEST_F(ResolverFunctionValidationTest,
- WorkgroupSize_Const_NestedZeroValueConstructor) {
- // let x = i32(i32(i32()));
- // @stage(compute) @workgroup_size(x)
- // fn main() {}
- GlobalConst("x", ty.i32(),
- Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32()))));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be at least 1");
+ // let x = 0i;
+ // @compute @workgroup_size(x)
+ // fn main() {}
+ GlobalConst("x", ty.i32(), Expr(0_i));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: workgroup_size argument must be at least 1");
+}
+
+TEST_F(ResolverFunctionValidationTest, WorkgroupSize_Const_NestedZeroValueConstructor) {
+ // let x = i32(i32(i32()));
+ // @compute @workgroup_size(x)
+ // fn main() {}
+ GlobalConst("x", ty.i32(), Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32()))));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: workgroup_size argument must be at least 1");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_NonConst) {
- // var<private> x = 0;
- // @stage(compute) @workgroup_size(x)
- // fn main() {}
- Global("x", ty.i32(), ast::StorageClass::kPrivate, Expr(64));
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
+ // var<private> x = 64i;
+ // @compute @workgroup_size(x)
+ // fn main() {}
+ Global("x", ty.i32(), ast::StorageClass::kPrivate, Expr(64_i));
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(Expr(Source{{12, 34}}, "x"))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be either literal or "
- "module-scope constant of type i32 or u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size argument must be either literal or "
+ "module-scope constant of type i32 or u32");
}
TEST_F(ResolverFunctionValidationTest, WorkgroupSize_InvalidExpr) {
- // @stage(compute) @workgroup_size(i32(1))
- // fn main() {}
- Func("main", {}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(Construct(Source{{12, 34}}, ty.i32(), 1))});
+ // @compute @workgroup_size(i32(1))
+ // fn main() {}
+ Func("main", {}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(Construct(Source{{12, 34}}, ty.i32(), 1_i))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: workgroup_size argument must be either a literal or "
- "a module-scope constant");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: workgroup_size argument must be either a literal or "
+ "a module-scope constant");
}
TEST_F(ResolverFunctionValidationTest, ReturnIsConstructible_NonPlain) {
- auto* ret_type =
- ty.pointer(Source{{12, 34}}, ty.i32(), ast::StorageClass::kFunction);
- Func("f", {}, ret_type, {});
+ auto* ret_type = ty.pointer(Source{{12, 34}}, ty.i32(), ast::StorageClass::kFunction);
+ Func("f", {}, ret_type, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function return type must be a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function return type must be a constructible type");
}
TEST_F(ResolverFunctionValidationTest, ReturnIsConstructible_AtomicInt) {
- auto* ret_type = ty.atomic(Source{{12, 34}}, ty.i32());
- Func("f", {}, ret_type, {});
+ auto* ret_type = ty.atomic(Source{{12, 34}}, ty.i32());
+ Func("f", {}, ret_type, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function return type must be a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function return type must be a constructible type");
}
TEST_F(ResolverFunctionValidationTest, ReturnIsConstructible_ArrayOfAtomic) {
- auto* ret_type = ty.array(Source{{12, 34}}, ty.atomic(ty.i32()), 10);
- Func("f", {}, ret_type, {});
+ auto* ret_type = ty.array(Source{{12, 34}}, ty.atomic(ty.i32()), 10_u);
+ Func("f", {}, ret_type, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function return type must be a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function return type must be a constructible type");
}
TEST_F(ResolverFunctionValidationTest, ReturnIsConstructible_StructOfAtomic) {
- Structure("S", {Member("m", ty.atomic(ty.i32()))});
- auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
- Func("f", {}, ret_type, {});
+ Structure("S", {Member("m", ty.atomic(ty.i32()))});
+ auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
+ Func("f", {}, ret_type, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function return type must be a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function return type must be a constructible type");
}
TEST_F(ResolverFunctionValidationTest, ReturnIsConstructible_RuntimeArray) {
- auto* ret_type = ty.array(Source{{12, 34}}, ty.i32());
- Func("f", {}, ret_type, {});
+ auto* ret_type = ty.array(Source{{12, 34}}, ty.i32());
+ Func("f", {}, ret_type, {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function return type must be a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function return type must be a constructible type");
}
TEST_F(ResolverFunctionValidationTest, ParameterStoreType_NonAtomicFree) {
- Structure("S", {Member("m", ty.atomic(ty.i32()))});
- auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
- auto* bar = Param(Source{{12, 34}}, "bar", ret_type);
- Func("f", ast::VariableList{bar}, ty.void_(), {});
+ Structure("S", {Member("m", ty.atomic(ty.i32()))});
+ auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
+ auto* bar = Param(Source{{12, 34}}, "bar", ret_type);
+ Func("f", ast::VariableList{bar}, ty.void_(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: store type of function parameter must be a "
- "constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: store type of function parameter must be a "
+ "constructible type");
}
TEST_F(ResolverFunctionValidationTest, ParameterSotreType_AtomicFree) {
- Structure("S", {Member("m", ty.i32())});
- auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
- auto* bar = Param(Source{{12, 34}}, "bar", ret_type);
- Func("f", ast::VariableList{bar}, ty.void_(), {});
+ Structure("S", {Member("m", ty.i32())});
+ auto* ret_type = ty.type_name(Source{{12, 34}}, "S");
+ auto* bar = Param(Source{{12, 34}}, "bar", ret_type);
+ Func("f", ast::VariableList{bar}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, ParametersAtLimit) {
- ast::VariableList params;
- for (int i = 0; i < 255; i++) {
- params.emplace_back(Param("param_" + std::to_string(i), ty.i32()));
- }
- Func(Source{{12, 34}}, "f", params, ty.void_(), {});
+ ast::VariableList params;
+ for (int i = 0; i < 255; i++) {
+ params.emplace_back(Param("param_" + std::to_string(i), ty.i32()));
+ }
+ Func(Source{{12, 34}}, "f", params, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverFunctionValidationTest, ParametersOverLimit) {
- ast::VariableList params;
- for (int i = 0; i < 256; i++) {
- params.emplace_back(Param("param_" + std::to_string(i), ty.i32()));
- }
- Func(Source{{12, 34}}, "f", params, ty.void_(), {});
+ ast::VariableList params;
+ for (int i = 0; i < 256; i++) {
+ params.emplace_back(Param("param_" + std::to_string(i), ty.i32()));
+ }
+ Func(Source{{12, 34}}, "f", params, ty.void_(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: functions may declare at most 255 parameters");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: functions may declare at most 255 parameters");
}
TEST_F(ResolverFunctionValidationTest, ParameterVectorNoType) {
- // fn f(p : vec3) {}
+ // fn f(p : vec3) {}
- Func(Source{{12, 34}}, "f",
- {Param("p", create<ast::Vector>(Source{{12, 34}}, nullptr, 3))},
- ty.void_(), {});
+ Func(Source{{12, 34}}, "f", {Param("p", create<ast::Vector>(Source{{12, 34}}, nullptr, 3))},
+ ty.void_(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
}
TEST_F(ResolverFunctionValidationTest, ParameterMatrixNoType) {
- // fn f(p : vec3) {}
+ // fn f(p : vec3) {}
- Func(Source{{12, 34}}, "f",
- {Param("p", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3))},
- ty.void_(), {});
+ Func(Source{{12, 34}}, "f", {Param("p", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3))},
+ ty.void_(), {});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
}
struct TestParams {
- ast::StorageClass storage_class;
- bool should_pass;
+ ast::StorageClass storage_class;
+ bool should_pass;
};
struct TestWithParams : ResolverTestWithParam<TestParams> {};
using ResolverFunctionParameterValidationTest = TestWithParams;
TEST_P(ResolverFunctionParameterValidationTest, StorageClass) {
- auto& param = GetParam();
- auto* ptr_type = ty.pointer(Source{{12, 34}}, ty.i32(), param.storage_class);
- auto* arg = Param(Source{{12, 34}}, "p", ptr_type);
- Func("f", ast::VariableList{arg}, ty.void_(), {});
-
- if (param.should_pass) {
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- } else {
- std::stringstream ss;
- ss << param.storage_class;
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function parameter of pointer type cannot be in '" +
- ss.str() + "' storage class");
- }
-}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- ResolverFunctionParameterValidationTest,
- testing::Values(TestParams{ast::StorageClass::kNone, false},
- TestParams{ast::StorageClass::kInput, false},
- TestParams{ast::StorageClass::kOutput, false},
- TestParams{ast::StorageClass::kUniform, false},
- TestParams{ast::StorageClass::kWorkgroup, true},
- TestParams{ast::StorageClass::kUniformConstant, false},
- TestParams{ast::StorageClass::kStorage, false},
- TestParams{ast::StorageClass::kPrivate, true},
- TestParams{ast::StorageClass::kFunction, true}));
+ auto& param = GetParam();
+ auto* ptr_type = ty.pointer(Source{{12, 34}}, ty.i32(), param.storage_class);
+ auto* arg = Param(Source{{12, 34}}, "p", ptr_type);
+ Func("f", ast::VariableList{arg}, ty.void_(), {});
+
+ if (param.should_pass) {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ std::stringstream ss;
+ ss << param.storage_class;
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function parameter of pointer type cannot be in '" +
+ ss.str() + "' storage class");
+ }
+}
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ ResolverFunctionParameterValidationTest,
+ testing::Values(TestParams{ast::StorageClass::kNone, false},
+ TestParams{ast::StorageClass::kInput, false},
+ TestParams{ast::StorageClass::kOutput, false},
+ TestParams{ast::StorageClass::kUniform, false},
+ TestParams{ast::StorageClass::kWorkgroup, true},
+ TestParams{ast::StorageClass::kHandle, false},
+ TestParams{ast::StorageClass::kStorage, false},
+ TestParams{ast::StorageClass::kPrivate, true},
+ TestParams{ast::StorageClass::kFunction, true}));
} // namespace
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/host_shareable_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/host_shareable_validation_test.cc
index 80254f67ef7..01fbfb0a58c 100644
--- a/chromium/third_party/dawn/src/tint/resolver/host_shareable_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/host_shareable_validation_test.cc
@@ -24,82 +24,78 @@ namespace {
using ResolverHostShareableValidationTest = ResolverTest;
TEST_F(ResolverHostShareableValidationTest, BoolMember) {
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.bool_())});
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.bool_())});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
12:34 note: while analysing structure member S.x
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverHostShareableValidationTest, BoolVectorMember) {
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.vec3<bool>())});
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.vec3<bool>())});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'vec3<bool>' cannot be used in storage class 'storage' as it is non-host-shareable
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'vec3<bool>' cannot be used in storage class 'storage' as it is non-host-shareable
12:34 note: while analysing structure member S.x
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverHostShareableValidationTest, Aliases) {
- auto* a1 = Alias("a1", ty.bool_());
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.Of(a1))});
- auto* a2 = Alias("a2", ty.Of(s));
- Global(Source{{56, 78}}, "g", ty.Of(a2), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
+ auto* a1 = Alias("a1", ty.bool_());
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.Of(a1))});
+ auto* a2 = Alias("a2", ty.Of(s));
+ Global(Source{{56, 78}}, "g", ty.Of(a2), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
12:34 note: while analysing structure member S.x
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverHostShareableValidationTest, NestedStructures) {
- auto* i1 = Structure("I1", {Member(Source{{1, 2}}, "x", ty.bool_())});
- auto* i2 = Structure("I2", {Member(Source{{3, 4}}, "y", ty.Of(i1))});
- auto* i3 = Structure("I3", {Member(Source{{5, 6}}, "z", ty.Of(i2))});
+ auto* i1 = Structure("I1", {Member(Source{{1, 2}}, "x", ty.bool_())});
+ auto* i2 = Structure("I2", {Member(Source{{3, 4}}, "y", ty.Of(i1))});
+ auto* i3 = Structure("I3", {Member(Source{{5, 6}}, "z", ty.Of(i2))});
- auto* s = Structure("S", {Member(Source{{7, 8}}, "m", ty.Of(i3))});
+ auto* s = Structure("S", {Member(Source{{7, 8}}, "m", ty.Of(i3))});
- Global(Source{{9, 10}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global(Source{{9, 10}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(9:10 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
+ EXPECT_EQ(
+ r()->error(),
+ R"(9:10 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
1:2 note: while analysing structure member I1.x
3:4 note: while analysing structure member I2.y
5:6 note: while analysing structure member I3.z
@@ -108,35 +104,33 @@ TEST_F(ResolverHostShareableValidationTest, NestedStructures) {
}
TEST_F(ResolverHostShareableValidationTest, NoError) {
- auto* i1 =
- Structure("I1", {
- Member(Source{{1, 1}}, "x1", ty.f32()),
- Member(Source{{2, 1}}, "y1", ty.vec3<f32>()),
- Member(Source{{3, 1}}, "z1", ty.array<i32, 4>()),
- });
- auto* a1 = Alias("a1", ty.Of(i1));
- auto* i2 = Structure("I2", {
- Member(Source{{4, 1}}, "x2", ty.mat2x2<f32>()),
- Member(Source{{5, 1}}, "y2", ty.Of(i1)),
- });
- auto* a2 = Alias("a2", ty.Of(i2));
- auto* i3 = Structure("I3", {
- Member(Source{{4, 1}}, "x3", ty.Of(a1)),
- Member(Source{{5, 1}}, "y3", ty.Of(i2)),
- Member(Source{{6, 1}}, "z3", ty.Of(a2)),
- });
-
- auto* s = Structure("S", {Member(Source{{7, 8}}, "m", ty.Of(i3))});
-
- Global(Source{{9, 10}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- WrapInFunction();
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* i1 = Structure("I1", {
+ Member(Source{{1, 1}}, "x1", ty.f32()),
+ Member(Source{{2, 1}}, "y1", ty.vec3<f32>()),
+ Member(Source{{3, 1}}, "z1", ty.array<i32, 4>()),
+ });
+ auto* a1 = Alias("a1", ty.Of(i1));
+ auto* i2 = Structure("I2", {
+ Member(Source{{4, 1}}, "x2", ty.mat2x2<f32>()),
+ Member(Source{{5, 1}}, "y2", ty.Of(i1)),
+ });
+ auto* a2 = Alias("a2", ty.Of(i2));
+ auto* i3 = Structure("I3", {
+ Member(Source{{4, 1}}, "x3", ty.Of(a1)),
+ Member(Source{{5, 1}}, "y3", ty.Of(i2)),
+ Member(Source{{6, 1}}, "z3", ty.Of(a2)),
+ });
+
+ auto* s = Structure("S", {Member(Source{{7, 8}}, "m", ty.Of(i3))});
+
+ Global(Source{{9, 10}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ WrapInFunction();
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/increment_decrement_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/increment_decrement_validation_test.cc
index d97facf4485..e03352e97b9 100644
--- a/chromium/third_party/dawn/src/tint/resolver/increment_decrement_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/increment_decrement_validation_test.cc
@@ -17,217 +17,213 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverIncrementDecrementValidationTest = ResolverTest;
TEST_F(ResolverIncrementDecrementValidationTest, Increment_Signed) {
- // var a : i32 = 2;
- // a++;
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Increment(Source{{12, 34}}, "a"));
+ // var a : i32 = 2;
+ // a++;
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, Increment(Source{{12, 34}}, "a"));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, Decrement_Signed) {
- // var a : i32 = 2;
- // a--;
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- WrapInFunction(var, Decrement(Source{{12, 34}}, "a"));
+ // var a : i32 = 2;
+ // a--;
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ WrapInFunction(var, Decrement(Source{{12, 34}}, "a"));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, Increment_Unsigned) {
- // var a : u32 = 2u;
- // a++;
- auto* var = Var("a", ty.u32(), ast::StorageClass::kNone, Expr(2u));
- WrapInFunction(var, Increment(Source{{12, 34}}, "a"));
+ // var a : u32 = 2u;
+ // a++;
+ auto* var = Var("a", ty.u32(), ast::StorageClass::kNone, Expr(2_u));
+ WrapInFunction(var, Increment(Source{{12, 34}}, "a"));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, Decrement_Unsigned) {
- // var a : u32 = 2u;
- // a--;
- auto* var = Var("a", ty.u32(), ast::StorageClass::kNone, Expr(2u));
- WrapInFunction(var, Decrement(Source{{12, 34}}, "a"));
+ // var a : u32 = 2u;
+ // a--;
+ auto* var = Var("a", ty.u32(), ast::StorageClass::kNone, Expr(2_u));
+ WrapInFunction(var, Decrement(Source{{12, 34}}, "a"));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, ThroughPointer) {
- // var a : i32;
- // let b : ptr<function,i32> = &a;
- // *b++;
- auto* var_a = Var("a", ty.i32(), ast::StorageClass::kFunction);
- auto* var_b = Const("b", ty.pointer<int>(ast::StorageClass::kFunction),
- AddressOf(Expr("a")));
- WrapInFunction(var_a, var_b, Increment(Source{{12, 34}}, Deref("b")));
+ // var a : i32;
+ // let b : ptr<function,i32> = &a;
+ // *b++;
+ auto* var_a = Var("a", ty.i32(), ast::StorageClass::kFunction);
+ auto* var_b = Let("b", ty.pointer<i32>(ast::StorageClass::kFunction), AddressOf(Expr("a")));
+ WrapInFunction(var_a, var_b, Increment(Source{{12, 34}}, Deref("b")));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, ThroughArray) {
- // var a : array<i32, 4>;
- // a[1]++;
- auto* var_a = Var("a", ty.array(ty.i32(), 4), ast::StorageClass::kNone);
- WrapInFunction(var_a, Increment(Source{{12, 34}}, IndexAccessor("a", 1)));
+ // var a : array<i32, 4_u>;
+ // a[1i]++;
+ auto* var_a = Var("a", ty.array(ty.i32(), 4_u), ast::StorageClass::kNone);
+ WrapInFunction(var_a, Increment(Source{{12, 34}}, IndexAccessor("a", 1_i)));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, ThroughVector_Index) {
- // var a : vec4<i32>;
- // a.y++;
- auto* var_a = Var("a", ty.vec4(ty.i32()), ast::StorageClass::kNone);
- WrapInFunction(var_a, Increment(Source{{12, 34}}, IndexAccessor("a", 1)));
+ // var a : vec4<i32>;
+ // a[1i]++;
+ auto* var_a = Var("a", ty.vec4(ty.i32()), ast::StorageClass::kNone);
+ WrapInFunction(var_a, Increment(Source{{12, 34}}, IndexAccessor("a", 1_i)));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, ThroughVector_Member) {
- // var a : vec4<i32>;
- // a.y++;
- auto* var_a = Var("a", ty.vec4(ty.i32()), ast::StorageClass::kNone);
- WrapInFunction(var_a, Increment(Source{{12, 34}}, MemberAccessor("a", "y")));
+ // var a : vec4<i32>;
+ // a.y++;
+ auto* var_a = Var("a", ty.vec4(ty.i32()), ast::StorageClass::kNone);
+ WrapInFunction(var_a, Increment(Source{{12, 34}}, MemberAccessor("a", "y")));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, Float) {
- // var a : f32 = 2.0;
- // a++;
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.f));
- auto* inc = Increment(Expr(Source{{12, 34}}, "a"));
- WrapInFunction(var, inc);
+ // var a : f32 = 2.0;
+ // a++;
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
+ auto* inc = Increment(Expr(Source{{12, 34}}, "a"));
+ WrapInFunction(var, inc);
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: increment statement can only be applied to an "
- "integer scalar");
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: increment statement can only be applied to an "
+ "integer scalar");
}
TEST_F(ResolverIncrementDecrementValidationTest, Vector) {
- // var a : vec4<f32>;
- // a++;
- auto* var = Var("a", ty.vec4<i32>(), ast::StorageClass::kNone);
- auto* inc = Increment(Expr(Source{{12, 34}}, "a"));
- WrapInFunction(var, inc);
+ // var a : vec4<f32>;
+ // a++;
+ auto* var = Var("a", ty.vec4<i32>(), ast::StorageClass::kNone);
+ auto* inc = Increment(Expr(Source{{12, 34}}, "a"));
+ WrapInFunction(var, inc);
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: increment statement can only be applied to an "
- "integer scalar");
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: increment statement can only be applied to an "
+ "integer scalar");
}
TEST_F(ResolverIncrementDecrementValidationTest, Atomic) {
- // var<workgroup> a : atomic<i32>;
- // a++;
- Global(Source{{12, 34}}, "a", ty.atomic(ty.i32()),
- ast::StorageClass::kWorkgroup);
- WrapInFunction(Increment(Expr(Source{{56, 78}}, "a")));
+ // var<workgroup> a : atomic<i32>;
+ // a++;
+ Global(Source{{12, 34}}, "a", ty.atomic(ty.i32()), ast::StorageClass::kWorkgroup);
+ WrapInFunction(Increment(Expr(Source{{56, 78}}, "a")));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: increment statement can only be applied to an "
- "integer scalar");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: increment statement can only be applied to an "
+ "integer scalar");
}
TEST_F(ResolverIncrementDecrementValidationTest, Literal) {
- // 1++;
- WrapInFunction(Increment(Expr(Source{{56, 78}}, 1)));
+ // 1++;
+ WrapInFunction(Increment(Expr(Source{{56, 78}}, 1_i)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "56:78 error: cannot modify value of type 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: cannot modify value of type 'i32'");
}
TEST_F(ResolverIncrementDecrementValidationTest, Constant) {
- // let a = 1;
- // a++;
- auto* a = Const(Source{{12, 34}}, "a", nullptr, Expr(1));
- WrapInFunction(a, Increment(Expr(Source{{56, 78}}, "a")));
+ // let a = 1;
+ // a++;
+ auto* a = Let(Source{{12, 34}}, "a", nullptr, Expr(1_i));
+ WrapInFunction(a, Increment(Expr(Source{{56, 78}}, "a")));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify constant value
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify constant value
12:34 note: 'a' is declared here:)");
}
TEST_F(ResolverIncrementDecrementValidationTest, Parameter) {
- // fn func(a : i32)
- // {
- // a++;
- // }
- auto* a = Param(Source{{12, 34}}, "a", ty.i32());
- Func("func", {a}, ty.void_(), {Increment(Expr(Source{{56, 78}}, "a"))});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify function parameter
+ // fn func(a : i32)
+ // {
+ // a++;
+ // }
+ auto* a = Param(Source{{12, 34}}, "a", ty.i32());
+ Func("func", {a}, ty.void_(), {Increment(Expr(Source{{56, 78}}, "a"))});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify function parameter
12:34 note: 'a' is declared here:)");
}
TEST_F(ResolverIncrementDecrementValidationTest, ReturnValue) {
- // fn func() -> i32 {
- // return 0;
- // }
- // {
- // a++;
- // }
- Func("func", {}, ty.i32(), {Return(0)});
- WrapInFunction(Increment(Call(Source{{56, 78}}, "func")));
+ // fn func() -> i32 {
+ // return 0;
+ // }
+ // {
+ // a++;
+ // }
+ Func("func", {}, ty.i32(), {Return(0_i)});
+ WrapInFunction(Increment(Call(Source{{56, 78}}, "func")));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify value of type 'i32')");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), R"(56:78 error: cannot modify value of type 'i32')");
}
TEST_F(ResolverIncrementDecrementValidationTest, ReadOnlyBuffer) {
- // @group(0) @binding(0) var<storage,read> a : i32;
- // {
- // a++;
- // }
- Global(Source{{12, 34}}, "a", ty.i32(), ast::StorageClass::kStorage,
- ast::Access::kRead, GroupAndBinding(0, 0));
- WrapInFunction(Increment(Source{{56, 78}}, "a"));
+ // @group(0) @binding(0) var<storage,read> a : i32;
+ // {
+ // a++;
+ // }
+ Global(Source{{12, 34}}, "a", ty.i32(), ast::StorageClass::kStorage, ast::Access::kRead,
+ GroupAndBinding(0, 0));
+ WrapInFunction(Increment(Source{{56, 78}}, "a"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "56:78 error: cannot modify read-only type 'ref<storage, i32, read>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: cannot modify read-only type 'ref<storage, i32, read>'");
}
TEST_F(ResolverIncrementDecrementValidationTest, Phony) {
- // _++;
- WrapInFunction(Increment(Phony(Source{{56, 78}})));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "56:78 error: cannot modify value of type 'void'");
+ // _++;
+ WrapInFunction(Increment(Phony(Source{{56, 78}})));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: cannot modify value of type 'void'");
}
TEST_F(ResolverIncrementDecrementValidationTest, InForLoopInit) {
- // var a : i32 = 2;
- // for (a++; ; ) {
- // break;
- // }
- auto* a = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* loop =
- For(Increment(Source{{56, 78}}, "a"), nullptr, nullptr, Block(Break()));
- WrapInFunction(a, loop);
+ // var a : i32 = 2;
+ // for (a++; ; ) {
+ // break;
+ // }
+ auto* a = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* loop = For(Increment(Source{{56, 78}}, "a"), nullptr, nullptr, Block(Break()));
+ WrapInFunction(a, loop);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIncrementDecrementValidationTest, InForLoopCont) {
- // var a : i32 = 2;
- // for (; ; a++) {
- // break;
- // }
- auto* a = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* loop =
- For(nullptr, nullptr, Increment(Source{{56, 78}}, "a"), Block(Break()));
- WrapInFunction(a, loop);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : i32 = 2;
+ // for (; ; a++) {
+ // break;
+ // }
+ auto* a = Var("a", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* loop = For(nullptr, nullptr, Increment(Source{{56, 78}}, "a"), Block(Break()));
+ WrapInFunction(a, loop);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/inferred_type_test.cc b/chromium/third_party/dawn/src/tint/resolver/inferred_type_test.cc
index 32ed284e6d6..d8cc649ffa8 100644
--- a/chromium/third_party/dawn/src/tint/resolver/inferred_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/inferred_type_test.cc
@@ -17,6 +17,8 @@
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -37,21 +39,17 @@ template <typename T>
using mat4x4 = builder::mat4x4<T>;
template <typename T>
using alias = builder::alias<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
-struct ResolverInferredTypeTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverInferredTypeTest : public resolver::TestHelper, public testing::Test {};
struct Params {
- builder::ast_expr_func_ptr create_value;
- builder::sem_type_func_ptr create_expected_type;
+ builder::ast_expr_func_ptr create_value;
+ builder::sem_type_func_ptr create_expected_type;
};
template <typename T>
constexpr Params ParamsFor() {
- return Params{DataType<T>::Expr, DataType<T>::Sem};
+ return Params{DataType<T>::Expr, DataType<T>::Sem};
}
Params all_cases[] = {
@@ -78,95 +76,90 @@ Params all_cases[] = {
using ResolverInferredTypeParamTest = ResolverTestWithParam<Params>;
TEST_P(ResolverInferredTypeParamTest, GlobalLet_Pass) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* expected_type = params.create_expected_type(*this);
+ auto* expected_type = params.create_expected_type(*this);
- // let a = <type constructor>;
- auto* ctor_expr = params.create_value(*this, 0);
- auto* var = GlobalConst("a", nullptr, ctor_expr);
- WrapInFunction();
+ // let a = <type constructor>;
+ auto* ctor_expr = params.create_value(*this, 0);
+ auto* var = GlobalConst("a", nullptr, ctor_expr);
+ WrapInFunction();
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(var), expected_type);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(var), expected_type);
}
TEST_P(ResolverInferredTypeParamTest, GlobalVar_Fail) {
- auto& params = GetParam();
+ auto& params = GetParam();
- // var a = <type constructor>;
- auto* ctor_expr = params.create_value(*this, 0);
- Global(Source{{12, 34}}, "a", nullptr, ast::StorageClass::kPrivate,
- ctor_expr);
- WrapInFunction();
+ // var a = <type constructor>;
+ auto* ctor_expr = params.create_value(*this, 0);
+ Global(Source{{12, 34}}, "a", nullptr, ast::StorageClass::kPrivate, ctor_expr);
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: global var declaration must specify a type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: global var declaration must specify a type");
}
TEST_P(ResolverInferredTypeParamTest, LocalLet_Pass) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* expected_type = params.create_expected_type(*this);
+ auto* expected_type = params.create_expected_type(*this);
- // let a = <type constructor>;
- auto* ctor_expr = params.create_value(*this, 0);
- auto* var = Const("a", nullptr, ctor_expr);
- WrapInFunction(var);
+ // let a = <type constructor>;
+ auto* ctor_expr = params.create_value(*this, 0);
+ auto* var = Let("a", nullptr, ctor_expr);
+ WrapInFunction(var);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(var), expected_type);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(var), expected_type);
}
TEST_P(ResolverInferredTypeParamTest, LocalVar_Pass) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* expected_type = params.create_expected_type(*this);
+ auto* expected_type = params.create_expected_type(*this);
- // var a = <type constructor>;
- auto* ctor_expr = params.create_value(*this, 0);
- auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
- WrapInFunction(var);
+ // var a = <type constructor>;
+ auto* ctor_expr = params.create_value(*this, 0);
+ auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
+ WrapInFunction(var);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
}
-INSTANTIATE_TEST_SUITE_P(ResolverTest,
- ResolverInferredTypeParamTest,
- testing::ValuesIn(all_cases));
+INSTANTIATE_TEST_SUITE_P(ResolverTest, ResolverInferredTypeParamTest, testing::ValuesIn(all_cases));
TEST_F(ResolverInferredTypeTest, InferArray_Pass) {
- auto* type = ty.array(ty.u32(), 10);
- auto* expected_type =
- create<sem::Array>(create<sem::U32>(), 10u, 4u, 4u * 10u, 4u, 4u);
+ auto* type = ty.array(ty.u32(), 10_u);
+ auto* expected_type = create<sem::Array>(create<sem::U32>(), 10u, 4u, 4u * 10u, 4u, 4u);
- auto* ctor_expr = Construct(type);
- auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
- WrapInFunction(var);
+ auto* ctor_expr = Construct(type);
+ auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
+ WrapInFunction(var);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
}
TEST_F(ResolverInferredTypeTest, InferStruct_Pass) {
- auto* member = Member("x", ty.i32());
- auto* str = Structure("S", {member});
+ auto* member = Member("x", ty.i32());
+ auto* str = Structure("S", {member});
- auto* expected_type = create<sem::Struct>(
- str, str->name,
- sem::StructMemberList{create<sem::StructMember>(
- member, member->symbol, create<sem::I32>(), 0u, 0u, 0u, 4u)},
- 0u, 4u, 4u);
+ auto* expected_type =
+ create<sem::Struct>(str, str->name,
+ sem::StructMemberList{create<sem::StructMember>(
+ member, member->symbol, create<sem::I32>(), 0u, 0u, 0u, 4u)},
+ 0u, 4u, 4u);
- auto* ctor_expr = Construct(ty.Of(str));
+ auto* ctor_expr = Construct(ty.Of(str));
- auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
- WrapInFunction(var);
+ auto* var = Var("a", nullptr, ast::StorageClass::kFunction, ctor_expr);
+ WrapInFunction(var);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(TypeOf(var)->UnwrapRef(), expected_type);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.cc b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.cc
new file mode 100644
index 00000000000..eb8f30e47a8
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.cc
@@ -0,0 +1,1649 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/intrinsic_table.h"
+
+#include <algorithm>
+#include <limits>
+#include <unordered_map>
+#include <utility>
+
+#include "src/tint/program_builder.h"
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
+#include "src/tint/sem/abstract_numeric.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/pipeline_stage_set.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/type_constructor.h"
+#include "src/tint/sem/type_conversion.h"
+#include "src/tint/utils/hash.h"
+#include "src/tint/utils/map.h"
+#include "src/tint/utils/math.h"
+#include "src/tint/utils/scoped_assignment.h"
+
+namespace tint::resolver {
+namespace {
+
+// Forward declarations
+struct OverloadInfo;
+class Matchers;
+class NumberMatcher;
+class TypeMatcher;
+
+/// A special type that matches all TypeMatchers
+class Any final : public Castable<Any, sem::Type> {
+ public:
+ Any() = default;
+ ~Any() override = default;
+
+ // Stub implementations for sem::Type conformance.
+ size_t Hash() const override { return 0; }
+ bool Equals(const sem::Type&) const override { return false; }
+ std::string FriendlyName(const SymbolTable&) const override { return "<any>"; }
+};
+
+/// Number is an 32 bit unsigned integer, which can be in one of three states:
+/// * Invalid - Number has not been assigned a value
+/// * Valid - a fixed integer value
+/// * Any - matches any other non-invalid number
+struct Number {
+ static const Number any;
+ static const Number invalid;
+
+ /// Constructed as a valid number with the value v
+ explicit Number(uint32_t v) : value_(v), state_(kValid) {}
+
+ /// @returns the value of the number
+ inline uint32_t Value() const { return value_; }
+
+ /// @returns the true if the number is valid
+ inline bool IsValid() const { return state_ == kValid; }
+
+ /// @returns the true if the number is any
+ inline bool IsAny() const { return state_ == kAny; }
+
+ /// Assignment operator.
+ /// The number becomes valid, with the value n
+ inline Number& operator=(uint32_t n) {
+ value_ = n;
+ state_ = kValid;
+ return *this;
+ }
+
+ private:
+ enum State {
+ kInvalid,
+ kValid,
+ kAny,
+ };
+
+ constexpr explicit Number(State state) : state_(state) {}
+
+ uint32_t value_ = 0;
+ State state_ = kInvalid;
+};
+
+const Number Number::any{Number::kAny};
+const Number Number::invalid{Number::kInvalid};
+
+/// TemplateState holds the state of the template numbers and types.
+/// Used by the MatchState.
+class TemplateState {
+ public:
+ /// If the template type with index `idx` is undefined, then it is defined with the `ty` and
+ /// Type() returns `ty`.
+ /// If the template type is defined, and `ty` can be converted to the template type then the
+ /// template type is returned.
+ /// If the template type is defined, and the template type can be converted to `ty`, then the
+ /// template type is replaced with `ty`, and `ty` is returned.
+ /// If none of the above applies, then `ty` is a type mismatch for the template type, and
+ /// nullptr is returned.
+ const sem::Type* Type(size_t idx, const sem::Type* ty) {
+ auto res = types_.emplace(idx, ty);
+ if (res.second) {
+ return ty;
+ }
+ auto* existing = res.first->second;
+ if (existing == ty) {
+ return ty;
+ }
+ ty = sem::Type::Common({existing, ty});
+ if (ty) {
+ res.first->second = ty;
+ }
+ return ty;
+ }
+
+ /// If the number with index `idx` is undefined, then it is defined with the number `number` and
+ /// Num() returns true. If the number is defined, then `Num()` returns true iff it is equal to
+ /// `ty`.
+ bool Num(size_t idx, Number number) {
+ auto res = numbers_.emplace(idx, number.Value());
+ return res.second || res.first->second == number.Value();
+ }
+
+ /// Type returns the template type with index `idx`, or nullptr if the type was not defined.
+ const sem::Type* Type(size_t idx) const {
+ auto it = types_.find(idx);
+ return (it != types_.end()) ? it->second : nullptr;
+ }
+
+ /// SetType replaces the template type with index `idx` with type `ty`.
+ void SetType(size_t idx, const sem::Type* ty) { types_[idx] = ty; }
+
+ /// Type returns the number type with index `idx`.
+ Number Num(size_t idx) const {
+ auto it = numbers_.find(idx);
+ return (it != numbers_.end()) ? Number(it->second) : Number::invalid;
+ }
+
+ private:
+ std::unordered_map<size_t, const sem::Type*> types_;
+ std::unordered_map<size_t, uint32_t> numbers_;
+};
+
+/// Index type used for matcher indices
+using MatcherIndex = uint8_t;
+
+/// Index value used for template types / numbers that do not have a constraint
+constexpr MatcherIndex kNoMatcher = std::numeric_limits<MatcherIndex>::max();
+
+/// MatchState holds the state used to match an overload.
+class MatchState {
+ public:
+ MatchState(ProgramBuilder& b,
+ TemplateState& t,
+ const Matchers& m,
+ const OverloadInfo* o,
+ MatcherIndex const* matcher_indices)
+ : builder(b), templates(t), matchers(m), overload(o), matcher_indices_(matcher_indices) {}
+
+ /// The program builder
+ ProgramBuilder& builder;
+ /// The template types and numbers
+ TemplateState& templates;
+ /// The type and number matchers
+ Matchers const& matchers;
+ /// The current overload being evaluated
+ OverloadInfo const* overload;
+
+ /// Type uses the next TypeMatcher from the matcher indices to match the type
+ /// `ty`. If the type matches, the canonical expected type is returned. If the
+ /// type `ty` does not match, then nullptr is returned.
+ /// @note: The matcher indices are progressed on calling.
+ const sem::Type* Type(const sem::Type* ty);
+
+ /// Num uses the next NumMatcher from the matcher indices to match the number
+ /// `num`. If the number matches, the canonical expected number is returned.
+ /// If the number `num` does not match, then an invalid number is returned.
+ /// @note: The matcher indices are progressed on calling.
+ Number Num(Number num);
+
+ /// @returns a string representation of the next TypeMatcher from the matcher
+ /// indices.
+ /// @note: The matcher indices are progressed on calling.
+ std::string TypeName();
+
+ /// @returns a string representation of the next NumberMatcher from the
+ /// matcher indices.
+ /// @note: The matcher indices are progressed on calling.
+ std::string NumName();
+
+ private:
+ MatcherIndex const* matcher_indices_ = nullptr;
+};
+
+/// A TypeMatcher is the interface used to match an type used as part of an
+/// overload's parameter or return type.
+class TypeMatcher {
+ public:
+ /// Destructor
+ virtual ~TypeMatcher() = default;
+
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ virtual const sem::Type* Match(MatchState& state, const sem::Type* type) const = 0;
+
+ /// @return a string representation of the matcher. Used for printing error
+ /// messages when no overload is found.
+ virtual std::string String(MatchState* state) const = 0;
+};
+
+/// A NumberMatcher is the interface used to match a number or enumerator used
+/// as part of an overload's parameter or return type.
+class NumberMatcher {
+ public:
+ /// Destructor
+ virtual ~NumberMatcher() = default;
+
+ /// Checks whether the given number matches the matcher rules.
+ /// Match may define template numbers in state.
+ /// @param number the number to match
+ /// @returns true if the argument type is as expected.
+ virtual Number Match(MatchState& state, Number number) const = 0;
+
+ /// @return a string representation of the matcher. Used for printing error
+ /// messages when no overload is found.
+ virtual std::string String(MatchState* state) const = 0;
+};
+
+/// TemplateTypeMatcher is a Matcher for a template type.
+/// The TemplateTypeMatcher will initially match against any type, and then will only be further
+/// constrained based on the conversion rules defined at https://www.w3.org/TR/WGSL/#conversion-rank
+class TemplateTypeMatcher : public TypeMatcher {
+ public:
+ /// Constructor
+ explicit TemplateTypeMatcher(size_t index) : index_(index) {}
+
+ const sem::Type* Match(MatchState& state, const sem::Type* type) const override {
+ if (type->Is<Any>()) {
+ return state.templates.Type(index_);
+ }
+ if (auto* templates = state.templates.Type(index_, type)) {
+ return templates;
+ }
+ return nullptr;
+ }
+
+ std::string String(MatchState* state) const override;
+
+ private:
+ size_t index_;
+};
+
+/// TemplateNumberMatcher is a Matcher for a template number.
+/// The TemplateNumberMatcher will match against any number (so long as it is
+/// consistent for all uses in the overload)
+class TemplateNumberMatcher : public NumberMatcher {
+ public:
+ explicit TemplateNumberMatcher(size_t index) : index_(index) {}
+
+ Number Match(MatchState& state, Number number) const override {
+ if (number.IsAny()) {
+ return state.templates.Num(index_);
+ }
+ return state.templates.Num(index_, number) ? number : Number::invalid;
+ }
+
+ std::string String(MatchState* state) const override;
+
+ private:
+ size_t index_;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Binding functions for use in the generated builtin_table.inl
+// TODO(bclayton): See if we can move more of this hand-rolled code to the
+// template
+////////////////////////////////////////////////////////////////////////////////
+using TexelFormat = ast::TexelFormat;
+using Access = ast::Access;
+using StorageClass = ast::StorageClass;
+using ParameterUsage = sem::ParameterUsage;
+using PipelineStage = ast::PipelineStage;
+
+/// Unique flag bits for overloads
+enum class OverloadFlag {
+ kIsBuiltin, // The overload is a builtin ('fn')
+ kIsOperator, // The overload is an operator ('op')
+ kIsConstructor, // The overload is a type constructor ('ctor')
+ kIsConverter, // The overload is a type converter ('conv')
+ kSupportsVertexPipeline, // The overload can be used in vertex shaders
+ kSupportsFragmentPipeline, // The overload can be used in fragment shaders
+ kSupportsComputePipeline, // The overload can be used in compute shaders
+ kIsDeprecated, // The overload is deprecated
+};
+
+// An enum set of OverloadFlag, used by OperatorInfo
+using OverloadFlags = utils::EnumSet<OverloadFlag>;
+
+bool match_bool(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::Bool>();
+}
+
+const sem::AbstractFloat* build_af(MatchState& state) {
+ return state.builder.create<sem::AbstractFloat>();
+}
+
+bool match_af(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::AbstractFloat>();
+}
+
+const sem::AbstractInt* build_ai(MatchState& state) {
+ return state.builder.create<sem::AbstractInt>();
+}
+
+bool match_ai(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::AbstractInt>();
+}
+
+const sem::Bool* build_bool(MatchState& state) {
+ return state.builder.create<sem::Bool>();
+}
+
+const sem::F32* build_f32(MatchState& state) {
+ return state.builder.create<sem::F32>();
+}
+
+bool match_f32(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::F32, sem::AbstractNumeric>();
+}
+
+const sem::I32* build_i32(MatchState& state) {
+ return state.builder.create<sem::I32>();
+}
+
+bool match_i32(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::I32, sem::AbstractInt>();
+}
+
+const sem::U32* build_u32(MatchState& state) {
+ return state.builder.create<sem::U32>();
+}
+
+bool match_u32(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::U32, sem::AbstractInt>();
+}
+
+bool match_vec(const sem::Type* ty, Number& N, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ N = Number::any;
+ T = ty;
+ return true;
+ }
+
+ if (auto* v = ty->As<sem::Vector>()) {
+ N = v->Width();
+ T = v->type();
+ return true;
+ }
+ return false;
+}
+
+template <uint32_t N>
+bool match_vec(const sem::Type* ty, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+
+ if (auto* v = ty->As<sem::Vector>()) {
+ if (v->Width() == N) {
+ T = v->type();
+ return true;
+ }
+ }
+ return false;
+}
+
+const sem::Vector* build_vec(MatchState& state, Number N, const sem::Type* el) {
+ return state.builder.create<sem::Vector>(el, N.Value());
+}
+
+template <uint32_t N>
+const sem::Vector* build_vec(MatchState& state, const sem::Type* el) {
+ return state.builder.create<sem::Vector>(el, N);
+}
+
+constexpr auto match_vec2 = match_vec<2>;
+constexpr auto match_vec3 = match_vec<3>;
+constexpr auto match_vec4 = match_vec<4>;
+
+constexpr auto build_vec2 = build_vec<2>;
+constexpr auto build_vec3 = build_vec<3>;
+constexpr auto build_vec4 = build_vec<4>;
+
+bool match_mat(const sem::Type* ty, Number& M, Number& N, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ M = Number::any;
+ N = Number::any;
+ T = ty;
+ return true;
+ }
+ if (auto* m = ty->As<sem::Matrix>()) {
+ M = m->columns();
+ N = m->ColumnType()->Width();
+ T = m->type();
+ return true;
+ }
+ return false;
+}
+
+template <uint32_t C, uint32_t R>
+bool match_mat(const sem::Type* ty, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+ if (auto* m = ty->As<sem::Matrix>()) {
+ if (m->columns() == C && m->rows() == R) {
+ T = m->type();
+ return true;
+ }
+ }
+ return false;
+}
+
+const sem::Matrix* build_mat(MatchState& state, Number C, Number R, const sem::Type* T) {
+ auto* column_type = state.builder.create<sem::Vector>(T, R.Value());
+ return state.builder.create<sem::Matrix>(column_type, C.Value());
+}
+
+template <uint32_t C, uint32_t R>
+const sem::Matrix* build_mat(MatchState& state, const sem::Type* T) {
+ auto* column_type = state.builder.create<sem::Vector>(T, R);
+ return state.builder.create<sem::Matrix>(column_type, C);
+}
+
+constexpr auto build_mat2x2 = build_mat<2, 2>;
+constexpr auto build_mat2x3 = build_mat<2, 3>;
+constexpr auto build_mat2x4 = build_mat<2, 4>;
+constexpr auto build_mat3x2 = build_mat<3, 2>;
+constexpr auto build_mat3x3 = build_mat<3, 3>;
+constexpr auto build_mat3x4 = build_mat<3, 4>;
+constexpr auto build_mat4x2 = build_mat<4, 2>;
+constexpr auto build_mat4x3 = build_mat<4, 3>;
+constexpr auto build_mat4x4 = build_mat<4, 4>;
+
+constexpr auto match_mat2x2 = match_mat<2, 2>;
+constexpr auto match_mat2x3 = match_mat<2, 3>;
+constexpr auto match_mat2x4 = match_mat<2, 4>;
+constexpr auto match_mat3x2 = match_mat<3, 2>;
+constexpr auto match_mat3x3 = match_mat<3, 3>;
+constexpr auto match_mat3x4 = match_mat<3, 4>;
+constexpr auto match_mat4x2 = match_mat<4, 2>;
+constexpr auto match_mat4x3 = match_mat<4, 3>;
+constexpr auto match_mat4x4 = match_mat<4, 4>;
+
+bool match_array(const sem::Type* ty, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+
+ if (auto* a = ty->As<sem::Array>()) {
+ if (a->Count() == 0) {
+ T = a->ElemType();
+ return true;
+ }
+ }
+ return false;
+}
+
+const sem::Array* build_array(MatchState& state, const sem::Type* el) {
+ return state.builder.create<sem::Array>(el,
+ /* count */ 0u,
+ /* align */ 0u,
+ /* size */ 0u,
+ /* stride */ 0u,
+ /* stride_implicit */ 0u);
+}
+
+bool match_ptr(const sem::Type* ty, Number& S, const sem::Type*& T, Number& A) {
+ if (ty->Is<Any>()) {
+ S = Number::any;
+ T = ty;
+ A = Number::any;
+ return true;
+ }
+
+ if (auto* p = ty->As<sem::Pointer>()) {
+ S = Number(static_cast<uint32_t>(p->StorageClass()));
+ T = p->StoreType();
+ A = Number(static_cast<uint32_t>(p->Access()));
+ return true;
+ }
+ return false;
+}
+
+const sem::Pointer* build_ptr(MatchState& state, Number S, const sem::Type* T, Number& A) {
+ return state.builder.create<sem::Pointer>(T, static_cast<ast::StorageClass>(S.Value()),
+ static_cast<ast::Access>(A.Value()));
+}
+
+bool match_atomic(const sem::Type* ty, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+
+ if (auto* a = ty->As<sem::Atomic>()) {
+ T = a->Type();
+ return true;
+ }
+ return false;
+}
+
+const sem::Atomic* build_atomic(MatchState& state, const sem::Type* T) {
+ return state.builder.create<sem::Atomic>(T);
+}
+
+bool match_sampler(const sem::Type* ty) {
+ if (ty->Is<Any>()) {
+ return true;
+ }
+ return ty->Is([](const sem::Sampler* s) { return s->kind() == ast::SamplerKind::kSampler; });
+}
+
+const sem::Sampler* build_sampler(MatchState& state) {
+ return state.builder.create<sem::Sampler>(ast::SamplerKind::kSampler);
+}
+
+bool match_sampler_comparison(const sem::Type* ty) {
+ if (ty->Is<Any>()) {
+ return true;
+ }
+ return ty->Is(
+ [](const sem::Sampler* s) { return s->kind() == ast::SamplerKind::kComparisonSampler; });
+}
+
+const sem::Sampler* build_sampler_comparison(MatchState& state) {
+ return state.builder.create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+}
+
+bool match_texture(const sem::Type* ty, ast::TextureDimension dim, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+ if (auto* v = ty->As<sem::SampledTexture>()) {
+ if (v->dim() == dim) {
+ T = v->type();
+ return true;
+ }
+ }
+ return false;
+}
+
+#define JOIN(a, b) a##b
+
+#define DECLARE_SAMPLED_TEXTURE(suffix, dim) \
+ bool JOIN(match_texture_, suffix)(const sem::Type* ty, const sem::Type*& T) { \
+ return match_texture(ty, dim, T); \
+ } \
+ const sem::SampledTexture* JOIN(build_texture_, suffix)(MatchState & state, \
+ const sem::Type* T) { \
+ return state.builder.create<sem::SampledTexture>(dim, T); \
+ }
+
+DECLARE_SAMPLED_TEXTURE(1d, ast::TextureDimension::k1d)
+DECLARE_SAMPLED_TEXTURE(2d, ast::TextureDimension::k2d)
+DECLARE_SAMPLED_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
+DECLARE_SAMPLED_TEXTURE(3d, ast::TextureDimension::k3d)
+DECLARE_SAMPLED_TEXTURE(cube, ast::TextureDimension::kCube)
+DECLARE_SAMPLED_TEXTURE(cube_array, ast::TextureDimension::kCubeArray)
+#undef DECLARE_SAMPLED_TEXTURE
+
+bool match_texture_multisampled(const sem::Type* ty,
+ ast::TextureDimension dim,
+ const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+ if (auto* v = ty->As<sem::MultisampledTexture>()) {
+ if (v->dim() == dim) {
+ T = v->type();
+ return true;
+ }
+ }
+ return false;
+}
+
+#define DECLARE_MULTISAMPLED_TEXTURE(suffix, dim) \
+ bool JOIN(match_texture_multisampled_, suffix)(const sem::Type* ty, const sem::Type*& T) { \
+ return match_texture_multisampled(ty, dim, T); \
+ } \
+ const sem::MultisampledTexture* JOIN(build_texture_multisampled_, suffix)( \
+ MatchState & state, const sem::Type* T) { \
+ return state.builder.create<sem::MultisampledTexture>(dim, T); \
+ }
+
+DECLARE_MULTISAMPLED_TEXTURE(2d, ast::TextureDimension::k2d)
+#undef DECLARE_MULTISAMPLED_TEXTURE
+
+bool match_texture_depth(const sem::Type* ty, ast::TextureDimension dim) {
+ if (ty->Is<Any>()) {
+ return true;
+ }
+ return ty->Is([&](const sem::DepthTexture* t) { return t->dim() == dim; });
+}
+
+#define DECLARE_DEPTH_TEXTURE(suffix, dim) \
+ bool JOIN(match_texture_depth_, suffix)(const sem::Type* ty) { \
+ return match_texture_depth(ty, dim); \
+ } \
+ const sem::DepthTexture* JOIN(build_texture_depth_, suffix)(MatchState & state) { \
+ return state.builder.create<sem::DepthTexture>(dim); \
+ }
+
+DECLARE_DEPTH_TEXTURE(2d, ast::TextureDimension::k2d)
+DECLARE_DEPTH_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
+DECLARE_DEPTH_TEXTURE(cube, ast::TextureDimension::kCube)
+DECLARE_DEPTH_TEXTURE(cube_array, ast::TextureDimension::kCubeArray)
+#undef DECLARE_DEPTH_TEXTURE
+
+bool match_texture_depth_multisampled_2d(const sem::Type* ty) {
+ if (ty->Is<Any>()) {
+ return true;
+ }
+ return ty->Is([&](const sem::DepthMultisampledTexture* t) {
+ return t->dim() == ast::TextureDimension::k2d;
+ });
+}
+
+sem::DepthMultisampledTexture* build_texture_depth_multisampled_2d(MatchState& state) {
+ return state.builder.create<sem::DepthMultisampledTexture>(ast::TextureDimension::k2d);
+}
+
+bool match_texture_storage(const sem::Type* ty, ast::TextureDimension dim, Number& F, Number& A) {
+ if (ty->Is<Any>()) {
+ F = Number::any;
+ A = Number::any;
+ return true;
+ }
+ if (auto* v = ty->As<sem::StorageTexture>()) {
+ if (v->dim() == dim) {
+ F = Number(static_cast<uint32_t>(v->texel_format()));
+ A = Number(static_cast<uint32_t>(v->access()));
+ return true;
+ }
+ }
+ return false;
+}
+
+#define DECLARE_STORAGE_TEXTURE(suffix, dim) \
+ bool JOIN(match_texture_storage_, suffix)(const sem::Type* ty, Number& F, Number& A) { \
+ return match_texture_storage(ty, dim, F, A); \
+ } \
+ const sem::StorageTexture* JOIN(build_texture_storage_, suffix)(MatchState & state, Number F, \
+ Number A) { \
+ auto format = static_cast<TexelFormat>(F.Value()); \
+ auto access = static_cast<Access>(A.Value()); \
+ auto* T = sem::StorageTexture::SubtypeFor(format, state.builder.Types()); \
+ return state.builder.create<sem::StorageTexture>(dim, format, access, T); \
+ }
+
+DECLARE_STORAGE_TEXTURE(1d, ast::TextureDimension::k1d)
+DECLARE_STORAGE_TEXTURE(2d, ast::TextureDimension::k2d)
+DECLARE_STORAGE_TEXTURE(2d_array, ast::TextureDimension::k2dArray)
+DECLARE_STORAGE_TEXTURE(3d, ast::TextureDimension::k3d)
+#undef DECLARE_STORAGE_TEXTURE
+
+bool match_texture_external(const sem::Type* ty) {
+ return ty->IsAnyOf<Any, sem::ExternalTexture>();
+}
+
+const sem::ExternalTexture* build_texture_external(MatchState& state) {
+ return state.builder.create<sem::ExternalTexture>();
+}
+
+// Builtin types starting with a _ prefix cannot be declared in WGSL, so they
+// can only be used as return types. Because of this, they must only match Any,
+// which is used as the return type matcher.
+bool match_modf_result(const sem::Type* ty) {
+ return ty->Is<Any>();
+}
+bool match_modf_result_vec(const sem::Type* ty, Number& N) {
+ if (!ty->Is<Any>()) {
+ return false;
+ }
+ N = Number::any;
+ return true;
+}
+bool match_frexp_result(const sem::Type* ty) {
+ return ty->Is<Any>();
+}
+bool match_frexp_result_vec(const sem::Type* ty, Number& N) {
+ if (!ty->Is<Any>()) {
+ return false;
+ }
+ N = Number::any;
+ return true;
+}
+
+bool match_atomic_compare_exchange_result(const sem::Type* ty, const sem::Type*& T) {
+ if (ty->Is<Any>()) {
+ T = ty;
+ return true;
+ }
+ return false;
+}
+
+struct NameAndType {
+ std::string name;
+ sem::Type* type;
+};
+const sem::Struct* build_struct(MatchState& state,
+ std::string name,
+ std::initializer_list<NameAndType> member_names_and_types) {
+ uint32_t offset = 0;
+ uint32_t max_align = 0;
+ sem::StructMemberList members;
+ for (auto& m : member_names_and_types) {
+ uint32_t align = m.type->Align();
+ uint32_t size = m.type->Size();
+ offset = utils::RoundUp(align, offset);
+ max_align = std::max(max_align, align);
+ members.emplace_back(state.builder.create<sem::StructMember>(
+ /* declaration */ nullptr,
+ /* name */ state.builder.Sym(m.name),
+ /* type */ m.type,
+ /* index */ static_cast<uint32_t>(members.size()),
+ /* offset */ offset,
+ /* align */ align,
+ /* size */ size));
+ offset += size;
+ }
+ uint32_t size_without_padding = offset;
+ uint32_t size_with_padding = utils::RoundUp(max_align, offset);
+ return state.builder.create<sem::Struct>(
+ /* declaration */ nullptr,
+ /* name */ state.builder.Sym(name),
+ /* members */ members,
+ /* align */ max_align,
+ /* size */ size_with_padding,
+ /* size_no_padding */ size_without_padding);
+}
+
+const sem::Struct* build_modf_result(MatchState& state) {
+ auto* f32 = state.builder.create<sem::F32>();
+ return build_struct(state, "__modf_result", {{"fract", f32}, {"whole", f32}});
+}
+const sem::Struct* build_modf_result_vec(MatchState& state, Number& n) {
+ auto* vec_f32 = state.builder.create<sem::Vector>(state.builder.create<sem::F32>(), n.Value());
+ return build_struct(state, "__modf_result_vec" + std::to_string(n.Value()),
+ {{"fract", vec_f32}, {"whole", vec_f32}});
+}
+const sem::Struct* build_frexp_result(MatchState& state) {
+ auto* f32 = state.builder.create<sem::F32>();
+ auto* i32 = state.builder.create<sem::I32>();
+ return build_struct(state, "__frexp_result", {{"sig", f32}, {"exp", i32}});
+}
+const sem::Struct* build_frexp_result_vec(MatchState& state, Number& n) {
+ auto* vec_f32 = state.builder.create<sem::Vector>(state.builder.create<sem::F32>(), n.Value());
+ auto* vec_i32 = state.builder.create<sem::Vector>(state.builder.create<sem::I32>(), n.Value());
+ return build_struct(state, "__frexp_result_vec" + std::to_string(n.Value()),
+ {{"sig", vec_f32}, {"exp", vec_i32}});
+}
+
+const sem::Struct* build_atomic_compare_exchange_result(MatchState& state, const sem::Type* ty) {
+ return build_struct(
+ state, "__atomic_compare_exchange_result" + ty->FriendlyName(state.builder.Symbols()),
+ {{"old_value", const_cast<sem::Type*>(ty)},
+ {"exchanged", state.builder.create<sem::Bool>()}});
+}
+
+/// ParameterInfo describes a parameter
+struct ParameterInfo {
+ /// The parameter usage (parameter name in definition file)
+ const ParameterUsage usage;
+
+ /// Pointer to a list of indices that are used to match the parameter type.
+ /// The matcher indices index on Matchers::type and / or Matchers::number.
+ /// These indices are consumed by the matchers themselves.
+ /// The first index is always a TypeMatcher.
+ MatcherIndex const* const matcher_indices;
+};
+
+/// TemplateTypeInfo describes an template type
+struct TemplateTypeInfo {
+ /// Name of the template type (e.g. 'T')
+ const char* name;
+ /// Optional type matcher constraint.
+ /// Either an index in Matchers::type, or kNoMatcher
+ const MatcherIndex matcher_index;
+};
+
+/// TemplateNumberInfo describes a template number
+struct TemplateNumberInfo {
+ /// Name of the template number (e.g. 'N')
+ const char* name;
+ /// Optional number matcher constraint.
+ /// Either an index in Matchers::number, or kNoMatcher
+ const MatcherIndex matcher_index;
+};
+
+/// OverloadInfo describes a single function overload
+struct OverloadInfo {
+ /// Total number of parameters for the overload
+ const uint8_t num_parameters;
+ /// Total number of template types for the overload
+ const uint8_t num_template_types;
+ /// Total number of template numbers for the overload
+ const uint8_t num_template_numbers;
+ /// Pointer to the first template type
+ TemplateTypeInfo const* const template_types;
+ /// Pointer to the first template number
+ TemplateNumberInfo const* const template_numbers;
+ /// Pointer to the first parameter
+ ParameterInfo const* const parameters;
+ /// Pointer to a list of matcher indices that index on Matchers::type and
+ /// Matchers::number, used to build the return type. If the function has no
+ /// return type then this is null
+ MatcherIndex const* const return_matcher_indices;
+ /// The flags for the overload
+ OverloadFlags flags;
+ /// The function used to evaluate the overload at shader-creation time.
+ const_eval::Function* const const_eval_fn;
+};
+
+/// IntrinsicInfo describes a builtin function or operator overload
+struct IntrinsicInfo {
+ /// Number of overloads of the intrinsic
+ const uint8_t num_overloads;
+ /// Pointer to the start of the overloads for the function
+ OverloadInfo const* const overloads;
+};
+
+#include "intrinsic_table.inl"
+
+/// IntrinsicPrototype describes a fully matched intrinsic.
+struct IntrinsicPrototype {
+ /// Parameter describes a single parameter
+ struct Parameter {
+ /// Parameter type
+ const sem::Type* const type;
+ /// Parameter usage
+ ParameterUsage const usage = ParameterUsage::kNone;
+ };
+
+ /// Hasher provides a hash function for the IntrinsicPrototype
+ struct Hasher {
+ /// @param i the IntrinsicPrototype to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const IntrinsicPrototype& i) const {
+ size_t hash = utils::Hash(i.parameters.size());
+ for (auto& p : i.parameters) {
+ utils::HashCombine(&hash, p.type, p.usage);
+ }
+ return utils::Hash(hash, i.overload, i.return_type);
+ }
+ };
+
+ const OverloadInfo* overload = nullptr;
+ sem::Type const* return_type = nullptr;
+ std::vector<Parameter> parameters;
+};
+
+/// Equality operator for IntrinsicPrototype
+bool operator==(const IntrinsicPrototype& a, const IntrinsicPrototype& b) {
+ if (a.overload != b.overload || a.return_type != b.return_type ||
+ a.parameters.size() != b.parameters.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < a.parameters.size(); i++) {
+ auto& pa = a.parameters[i];
+ auto& pb = b.parameters[i];
+ if (pa.type != pb.type || pa.usage != pb.usage) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Impl is the private implementation of the IntrinsicTable interface.
+class Impl : public IntrinsicTable {
+ public:
+ explicit Impl(ProgramBuilder& builder);
+
+ Builtin Lookup(sem::BuiltinType builtin_type,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) override;
+
+ UnaryOperator Lookup(ast::UnaryOp op, const sem::Type* arg, const Source& source) override;
+
+ BinaryOperator Lookup(ast::BinaryOp op,
+ const sem::Type* lhs,
+ const sem::Type* rhs,
+ const Source& source,
+ bool is_compound) override;
+
+ const sem::CallTarget* Lookup(CtorConvIntrinsic type,
+ const sem::Type* template_arg,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) override;
+
+ private:
+ /// Candidate holds information about an overload evaluated for resolution.
+ struct Candidate {
+ /// The candidate overload
+ const OverloadInfo* overload;
+ /// The template types and numbers
+ TemplateState templates;
+ /// The parameter types for the candidate overload
+ std::vector<IntrinsicPrototype::Parameter> parameters;
+ /// The match-score of the candidate overload.
+ /// A score of zero indicates an exact match.
+ /// Non-zero scores are used for diagnostics when no overload matches.
+ /// Lower scores are displayed first (top-most).
+ size_t score;
+ };
+
+ /// A list of candidates
+ using Candidates = std::vector<Candidate>;
+
+ /// Callback function when no overloads match.
+ using OnNoMatch = std::function<void(Candidates)>;
+
+ /// Sorts the candidates based on their score, with the lowest (best-ranking) scores first.
+ static inline void SortCandidates(Candidates& candidates) {
+ std::stable_sort(candidates.begin(), candidates.end(),
+ [&](const Candidate& a, const Candidate& b) { return a.score < b.score; });
+ }
+
+ /// Attempts to find a single intrinsic overload that matches the provided argument types.
+ /// @param intrinsic the intrinsic being called
+ /// @param intrinsic_name the name of the intrinsic
+ /// @param args the argument types
+ /// @param templates initial template state. This may contain explicitly specified template
+ /// arguments. For example `vec3<f32>()` would have the first template-type
+ /// defined as `f32`.
+ /// @param on_no_match an error callback when no intrinsic overloads matched the provided
+ /// arguments.
+ /// @returns the matched intrinsic. If no intrinsic could be matched then IntrinsicPrototype
+ /// will hold nullptrs for IntrinsicPrototype::overload and
+ /// IntrinsicPrototype::return_type.
+ IntrinsicPrototype MatchIntrinsic(const IntrinsicInfo& intrinsic,
+ const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates,
+ OnNoMatch on_no_match) const;
+
+ /// Evaluates the single overload for the provided argument types.
+ /// @param overload the overload being considered
+ /// @param args the argument types
+ /// @param templates initial template state. This may contain explicitly specified template
+ /// arguments. For example `vec3<f32>()` would have the first template-type
+ /// template as `f32`.
+ /// @returns the evaluated Candidate information.
+ Candidate ScoreOverload(const OverloadInfo* overload,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates) const;
+
+ /// Performs overload resolution given the list of candidates, by ranking the conversions of
+ /// arguments to the each of the candidate's parameter types.
+ /// @param candidates the list of candidate overloads
+ /// @param intrinsic_name the name of the intrinsic
+ /// @param args the argument types
+ /// @param templates initial template state. This may contain explicitly specified template
+ /// arguments. For example `vec3<f32>()` would have the first template-type
+ /// template as `f32`.
+ /// @see https://www.w3.org/TR/WGSL/#overload-resolution-section
+ /// @returns the resolved Candidate.
+ Candidate ResolveCandidate(Candidates&& candidates,
+ const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates) const;
+
+ /// Match constructs a new MatchState
+ /// @param templates the template state used for matcher evaluation
+ /// @param overload the overload being evaluated
+ /// @param matcher_indices pointer to a list of matcher indices
+ MatchState Match(TemplateState& templates,
+ const OverloadInfo* overload,
+ MatcherIndex const* matcher_indices) const;
+
+ // Prints the overload for emitting diagnostics
+ void PrintOverload(std::ostream& ss,
+ const OverloadInfo* overload,
+ const char* intrinsic_name) const;
+
+ // Prints the list of candidates for emitting diagnostics
+ void PrintCandidates(std::ostream& ss,
+ const Candidates& candidates,
+ const char* intrinsic_name) const;
+
+ /// Raises an error when no overload is a clear winner of overload resolution
+ void ErrAmbiguousOverload(const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates,
+ Candidates candidates) const;
+
+ ProgramBuilder& builder;
+ Matchers matchers;
+ std::unordered_map<IntrinsicPrototype, sem::Builtin*, IntrinsicPrototype::Hasher> builtins;
+ std::unordered_map<IntrinsicPrototype, sem::TypeConstructor*, IntrinsicPrototype::Hasher>
+ constructors;
+ std::unordered_map<IntrinsicPrototype, sem::TypeConversion*, IntrinsicPrototype::Hasher>
+ converters;
+};
+
+/// @return a string representing a call to a builtin with the given argument
+/// types.
+std::string CallSignature(ProgramBuilder& builder,
+ const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ const sem::Type* template_arg = nullptr) {
+ std::stringstream ss;
+ ss << intrinsic_name;
+ if (template_arg) {
+ ss << "<" << template_arg->FriendlyName(builder.Symbols()) << ">";
+ }
+ ss << "(";
+ {
+ bool first = true;
+ for (auto* arg : args) {
+ if (!first) {
+ ss << ", ";
+ }
+ first = false;
+ ss << arg->UnwrapRef()->FriendlyName(builder.Symbols());
+ }
+ }
+ ss << ")";
+
+ return ss.str();
+}
+
+std::string TemplateTypeMatcher::String(MatchState* state) const {
+ return state->overload->template_types[index_].name;
+}
+
+std::string TemplateNumberMatcher::String(MatchState* state) const {
+ return state->overload->template_numbers[index_].name;
+}
+
+Impl::Impl(ProgramBuilder& b) : builder(b) {}
+
+Impl::Builtin Impl::Lookup(sem::BuiltinType builtin_type,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) {
+ const char* intrinsic_name = sem::str(builtin_type);
+
+ // Generates an error when no overloads match the provided arguments
+ auto on_no_match = [&](Candidates candidates) {
+ std::stringstream ss;
+ ss << "no matching call to " << CallSignature(builder, intrinsic_name, args) << std::endl;
+ if (!candidates.empty()) {
+ ss << std::endl
+ << candidates.size() << " candidate function" << (candidates.size() > 1 ? "s:" : ":")
+ << std::endl;
+ PrintCandidates(ss, candidates, intrinsic_name);
+ }
+ builder.Diagnostics().add_error(diag::System::Resolver, ss.str(), source);
+ };
+
+ // Resolve the intrinsic overload
+ auto match = MatchIntrinsic(kBuiltins[static_cast<size_t>(builtin_type)], intrinsic_name, args,
+ TemplateState{}, on_no_match);
+ if (!match.overload) {
+ return {};
+ }
+
+ // De-duplicate builtins that are identical.
+ auto* sem = utils::GetOrCreate(builtins, match, [&] {
+ std::vector<sem::Parameter*> params;
+ params.reserve(match.parameters.size());
+ for (auto& p : match.parameters) {
+ params.emplace_back(builder.create<sem::Parameter>(
+ nullptr, static_cast<uint32_t>(params.size()), p.type, ast::StorageClass::kNone,
+ ast::Access::kUndefined, p.usage));
+ }
+ sem::PipelineStageSet supported_stages;
+ if (match.overload->flags.Contains(OverloadFlag::kSupportsVertexPipeline)) {
+ supported_stages.Add(ast::PipelineStage::kVertex);
+ }
+ if (match.overload->flags.Contains(OverloadFlag::kSupportsFragmentPipeline)) {
+ supported_stages.Add(ast::PipelineStage::kFragment);
+ }
+ if (match.overload->flags.Contains(OverloadFlag::kSupportsComputePipeline)) {
+ supported_stages.Add(ast::PipelineStage::kCompute);
+ }
+ return builder.create<sem::Builtin>(
+ builtin_type, match.return_type, std::move(params), supported_stages,
+ match.overload->flags.Contains(OverloadFlag::kIsDeprecated));
+ });
+ return Builtin{sem, match.overload->const_eval_fn};
+}
+
+IntrinsicTable::UnaryOperator Impl::Lookup(ast::UnaryOp op,
+ const sem::Type* arg,
+ const Source& source) {
+ auto [intrinsic_index, intrinsic_name] = [&]() -> std::pair<size_t, const char*> {
+ switch (op) {
+ case ast::UnaryOp::kComplement:
+ return {kUnaryOperatorComplement, "operator ~ "};
+ case ast::UnaryOp::kNegation:
+ return {kUnaryOperatorMinus, "operator - "};
+ case ast::UnaryOp::kNot:
+ return {kUnaryOperatorNot, "operator ! "};
+ default:
+ return {0, "<unknown>"};
+ }
+ }();
+
+ // Generates an error when no overloads match the provided arguments
+ auto on_no_match = [&, name = intrinsic_name](Candidates candidates) {
+ std::stringstream ss;
+ ss << "no matching overload for " << CallSignature(builder, name, {arg}) << std::endl;
+ if (!candidates.empty()) {
+ ss << std::endl
+ << candidates.size() << " candidate operator" << (candidates.size() > 1 ? "s:" : ":")
+ << std::endl;
+ PrintCandidates(ss, candidates, name);
+ }
+ builder.Diagnostics().add_error(diag::System::Resolver, ss.str(), source);
+ };
+
+ // Resolve the intrinsic overload
+ auto match = MatchIntrinsic(kUnaryOperators[intrinsic_index], intrinsic_name, {arg},
+ TemplateState{}, on_no_match);
+ if (!match.overload) {
+ return {};
+ }
+
+ return UnaryOperator{match.return_type, match.parameters[0].type};
+}
+
+IntrinsicTable::BinaryOperator Impl::Lookup(ast::BinaryOp op,
+ const sem::Type* lhs,
+ const sem::Type* rhs,
+ const Source& source,
+ bool is_compound) {
+ auto [intrinsic_index, intrinsic_name] = [&]() -> std::pair<size_t, const char*> {
+ switch (op) {
+ case ast::BinaryOp::kAnd:
+ return {kBinaryOperatorAnd, is_compound ? "operator &= " : "operator & "};
+ case ast::BinaryOp::kOr:
+ return {kBinaryOperatorOr, is_compound ? "operator |= " : "operator | "};
+ case ast::BinaryOp::kXor:
+ return {kBinaryOperatorXor, is_compound ? "operator ^= " : "operator ^ "};
+ case ast::BinaryOp::kLogicalAnd:
+ return {kBinaryOperatorLogicalAnd, "operator && "};
+ case ast::BinaryOp::kLogicalOr:
+ return {kBinaryOperatorLogicalOr, "operator || "};
+ case ast::BinaryOp::kEqual:
+ return {kBinaryOperatorEqual, "operator == "};
+ case ast::BinaryOp::kNotEqual:
+ return {kBinaryOperatorNotEqual, "operator != "};
+ case ast::BinaryOp::kLessThan:
+ return {kBinaryOperatorLessThan, "operator < "};
+ case ast::BinaryOp::kGreaterThan:
+ return {kBinaryOperatorGreaterThan, "operator > "};
+ case ast::BinaryOp::kLessThanEqual:
+ return {kBinaryOperatorLessThanEqual, "operator <= "};
+ case ast::BinaryOp::kGreaterThanEqual:
+ return {kBinaryOperatorGreaterThanEqual, "operator >= "};
+ case ast::BinaryOp::kShiftLeft:
+ return {kBinaryOperatorShiftLeft, is_compound ? "operator <<= " : "operator << "};
+ case ast::BinaryOp::kShiftRight:
+ return {kBinaryOperatorShiftRight, is_compound ? "operator >>= " : "operator >> "};
+ case ast::BinaryOp::kAdd:
+ return {kBinaryOperatorPlus, is_compound ? "operator += " : "operator + "};
+ case ast::BinaryOp::kSubtract:
+ return {kBinaryOperatorMinus, is_compound ? "operator -= " : "operator - "};
+ case ast::BinaryOp::kMultiply:
+ return {kBinaryOperatorStar, is_compound ? "operator *= " : "operator * "};
+ case ast::BinaryOp::kDivide:
+ return {kBinaryOperatorDivide, is_compound ? "operator /= " : "operator / "};
+ case ast::BinaryOp::kModulo:
+ return {kBinaryOperatorModulo, is_compound ? "operator %= " : "operator % "};
+ default:
+ return {0, "<unknown>"};
+ }
+ }();
+
+ // Generates an error when no overloads match the provided arguments
+ auto on_no_match = [&, name = intrinsic_name](Candidates candidates) {
+ std::stringstream ss;
+ ss << "no matching overload for " << CallSignature(builder, name, {lhs, rhs}) << std::endl;
+ if (!candidates.empty()) {
+ ss << std::endl
+ << candidates.size() << " candidate operator" << (candidates.size() > 1 ? "s:" : ":")
+ << std::endl;
+ PrintCandidates(ss, candidates, name);
+ }
+ builder.Diagnostics().add_error(diag::System::Resolver, ss.str(), source);
+ };
+
+ // Resolve the intrinsic overload
+ auto match = MatchIntrinsic(kBinaryOperators[intrinsic_index], intrinsic_name, {lhs, rhs},
+ TemplateState{}, on_no_match);
+ if (!match.overload) {
+ return {};
+ }
+
+ return BinaryOperator{match.return_type, match.parameters[0].type, match.parameters[1].type};
+}
+
+const sem::CallTarget* Impl::Lookup(CtorConvIntrinsic type,
+ const sem::Type* template_arg,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) {
+ auto name = str(type);
+
+ // Generates an error when no overloads match the provided arguments
+ auto on_no_match = [&](Candidates candidates) {
+ std::stringstream ss;
+ ss << "no matching constructor for " << CallSignature(builder, name, args, template_arg)
+ << std::endl;
+ Candidates ctor, conv;
+ for (auto candidate : candidates) {
+ if (candidate.overload->flags.Contains(OverloadFlag::kIsConstructor)) {
+ ctor.emplace_back(candidate);
+ } else {
+ conv.emplace_back(candidate);
+ }
+ }
+ if (!ctor.empty()) {
+ ss << std::endl
+ << ctor.size() << " candidate constructor" << (ctor.size() > 1 ? "s:" : ":")
+ << std::endl;
+ PrintCandidates(ss, ctor, name);
+ }
+ if (!conv.empty()) {
+ ss << std::endl
+ << conv.size() << " candidate conversion" << (conv.size() > 1 ? "s:" : ":")
+ << std::endl;
+ PrintCandidates(ss, conv, name);
+ }
+ builder.Diagnostics().add_error(diag::System::Resolver, ss.str(), source);
+ };
+
+ // If a template type was provided, then close the 0'th type with this.
+ TemplateState templates;
+ if (template_arg) {
+ templates.Type(0, template_arg);
+ }
+
+ // Resolve the intrinsic overload
+ auto match = MatchIntrinsic(kConstructorsAndConverters[static_cast<size_t>(type)], name, args,
+ templates, on_no_match);
+ if (!match.overload) {
+ return {};
+ }
+
+ // Was this overload a constructor or conversion?
+ if (match.overload->flags.Contains(OverloadFlag::kIsConstructor)) {
+ sem::ParameterList params;
+ params.reserve(match.parameters.size());
+ for (auto& p : match.parameters) {
+ params.emplace_back(builder.create<sem::Parameter>(
+ nullptr, static_cast<uint32_t>(params.size()), p.type, ast::StorageClass::kNone,
+ ast::Access::kUndefined, p.usage));
+ }
+ return utils::GetOrCreate(constructors, match, [&]() {
+ return builder.create<sem::TypeConstructor>(match.return_type, std::move(params));
+ });
+ }
+
+ // Conversion.
+ return utils::GetOrCreate(converters, match, [&]() {
+ auto param = builder.create<sem::Parameter>(
+ nullptr, 0, match.parameters[0].type, ast::StorageClass::kNone, ast::Access::kUndefined,
+ match.parameters[0].usage);
+ return builder.create<sem::TypeConversion>(match.return_type, param);
+ });
+}
+
+IntrinsicPrototype Impl::MatchIntrinsic(const IntrinsicInfo& intrinsic,
+ const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates,
+ OnNoMatch on_no_match) const {
+ size_t num_matched = 0;
+ size_t match_idx = 0;
+ Candidates candidates;
+ candidates.reserve(intrinsic.num_overloads);
+ for (size_t overload_idx = 0; overload_idx < static_cast<size_t>(intrinsic.num_overloads);
+ overload_idx++) {
+ auto candidate = ScoreOverload(&intrinsic.overloads[overload_idx], args, templates);
+ if (candidate.score == 0) {
+ match_idx = overload_idx;
+ num_matched++;
+ }
+ candidates.emplace_back(std::move(candidate));
+ }
+
+ // How many candidates matched?
+ if (num_matched == 0) {
+ // Sort the candidates with the most promising first
+ SortCandidates(candidates);
+ on_no_match(std::move(candidates));
+ return {};
+ }
+
+ Candidate match;
+
+ if (num_matched == 1) {
+ match = std::move(candidates[match_idx]);
+ } else {
+ match = ResolveCandidate(std::move(candidates), intrinsic_name, args, std::move(templates));
+ if (!match.overload) {
+ // Ambiguous overload. ResolveCandidate() will have already raised an error diagnostic.
+ return {};
+ }
+ }
+
+ // Build the return type
+ const sem::Type* return_type = nullptr;
+ if (auto* indices = match.overload->return_matcher_indices) {
+ Any any;
+ return_type = Match(match.templates, match.overload, indices).Type(&any);
+ if (!return_type) {
+ TINT_ICE(Resolver, builder.Diagnostics()) << "MatchState.Match() returned null";
+ return {};
+ }
+ } else {
+ return_type = builder.create<sem::Void>();
+ }
+
+ return IntrinsicPrototype{match.overload, return_type, std::move(match.parameters)};
+}
+
+Impl::Candidate Impl::ScoreOverload(const OverloadInfo* overload,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates) const {
+ // Penalty weights for overload mismatching.
+ // This scoring is used to order the suggested overloads in diagnostic on overload mismatch, and
+ // has no impact for a correct program.
+ // The overloads with the lowest score will be displayed first (top-most).
+ constexpr int kMismatchedParamCountPenalty = 3;
+ constexpr int kMismatchedParamTypePenalty = 2;
+ constexpr int kMismatchedTemplateTypePenalty = 1;
+ constexpr int kMismatchedTemplateNumberPenalty = 1;
+
+ size_t num_parameters = static_cast<size_t>(overload->num_parameters);
+ size_t num_arguments = static_cast<size_t>(args.size());
+
+ size_t score = 0;
+
+ if (num_parameters != num_arguments) {
+ score += kMismatchedParamCountPenalty * (std::max(num_parameters, num_arguments) -
+ std::min(num_parameters, num_arguments));
+ }
+
+ // Invoke the matchers for each parameter <-> argument pair.
+ // If any arguments cannot be matched, then `score` will be increased.
+ // If the overload has any template types or numbers then these will be set based on the
+ // argument types. Template types may be refined by constraining with later argument types. For
+ // example calling `F<T>(T, T)` with the argument types (abstract-int, i32) will first set T to
+ // abstract-int when matching the first argument, and then constrained down to i32 when matching
+ // the second argument.
+ // Note that inferred template types are not tested against their matchers at this point.
+ auto num_params = std::min(num_parameters, num_arguments);
+ for (size_t p = 0; p < num_params; p++) {
+ auto& parameter = overload->parameters[p];
+ auto* indices = parameter.matcher_indices;
+ if (!Match(templates, overload, indices).Type(args[p]->UnwrapRef())) {
+ score += kMismatchedParamTypePenalty;
+ }
+ }
+
+ if (score == 0) {
+ // Check all constrained template types matched their constraint matchers.
+ // If the template type *does not* match any of the types in the constraint matcher, then
+ // `score` is incremented. If the template type *does* match a type, then the template type
+ // is replaced with the first matching type. The order of types in the template matcher is
+ // important here, which can be controlled with the [[precedence(N)]] decorations on the
+ // types in intrinsics.def.
+ for (size_t ot = 0; ot < overload->num_template_types; ot++) {
+ auto* matcher_index = &overload->template_types[ot].matcher_index;
+ if (*matcher_index != kNoMatcher) {
+ if (auto* template_type = templates.Type(ot)) {
+ if (auto* ty = Match(templates, overload, matcher_index).Type(template_type)) {
+ // Template type matched one of the types in the template type's matcher.
+ // Replace the template type with this type.
+ templates.SetType(ot, ty);
+ continue;
+ }
+ }
+ score += kMismatchedTemplateTypePenalty;
+ }
+ }
+ }
+
+ if (score == 0) {
+ // Check all constrained open numbers matched.
+ // Unlike template types, numbers are not constrained, so we're just checking that the
+ // inferred number matches the constraints on the overload. Increments `score` if the
+ // template numbers do not match their constraint matchers.
+ for (size_t on = 0; on < overload->num_template_numbers; on++) {
+ auto* matcher_index = &overload->template_numbers[on].matcher_index;
+ if (*matcher_index != kNoMatcher) {
+ auto template_num = templates.Num(on);
+ if (!template_num.IsValid() ||
+ !Match(templates, overload, matcher_index).Num(template_num).IsValid()) {
+ score += kMismatchedTemplateNumberPenalty;
+ }
+ }
+ }
+ }
+
+ // Now that all the template types have been finalized, we can construct the parameters.
+ std::vector<IntrinsicPrototype::Parameter> parameters;
+ if (score == 0) {
+ parameters.reserve(num_params);
+ for (size_t p = 0; p < num_params; p++) {
+ auto& parameter = overload->parameters[p];
+ auto* indices = parameter.matcher_indices;
+ auto* ty = Match(templates, overload, indices).Type(args[p]->UnwrapRef());
+ parameters.emplace_back(IntrinsicPrototype::Parameter{ty, parameter.usage});
+ }
+ }
+
+ return Candidate{overload, templates, parameters, score};
+}
+
+Impl::Candidate Impl::ResolveCandidate(Impl::Candidates&& candidates,
+ const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates) const {
+ std::vector<uint32_t> best_ranks(args.size(), 0xffffffff);
+ size_t num_matched = 0;
+ Candidate* best = nullptr;
+ for (auto& candidate : candidates) {
+ if (candidate.score > 0) {
+ continue; // Candidate has already been ruled out.
+ }
+ bool some_won = false; // An argument ranked less than the 'best' overload's argument
+ bool some_lost = false; // An argument ranked more than the 'best' overload's argument
+ for (size_t i = 0; i < args.size(); i++) {
+ auto rank = sem::Type::ConversionRank(args[i], candidate.parameters[i].type);
+ if (best_ranks[i] > rank) {
+ best_ranks[i] = rank;
+ some_won = true;
+ } else if (best_ranks[i] < rank) {
+ some_lost = true;
+ }
+ }
+ // If no arguments of this candidate ranked worse than the previous best candidate, then
+ // this candidate becomes the new best candidate.
+ // If no arguments of this candidate ranked better than the previous best candidate, then
+ // this candidate is removed from the list of matches.
+ // If neither of the above apply, then we have two candidates with no clear winner, which
+ // results in an ambiguous overload error. In this situation the loop ends with
+ // `num_matched > 1`.
+ if (some_won) {
+ // One or more arguments of this candidate ranked better than the previous best
+ // candidate's argument(s).
+ num_matched++;
+ if (!some_lost) {
+ // All arguments were at as-good or better than the previous best.
+ if (best) {
+ // Mark the previous best candidate as no longer being in the running, by
+ // setting its score to a non-zero value. We pick 1 as this is the closest to 0
+ // (match) as we can get.
+ best->score = 1;
+ num_matched--;
+ }
+ // This candidate is the new best.
+ best = &candidate;
+ }
+ } else {
+ // No arguments ranked better than the current best.
+ // Change the score of this candidate to a non-zero value, so that it's not considered a
+ // match.
+ candidate.score = 1;
+ }
+ }
+
+ if (num_matched > 1) {
+ // Re-sort the candidates with the most promising first
+ SortCandidates(candidates);
+ // Raise an error
+ ErrAmbiguousOverload(intrinsic_name, args, templates, candidates);
+ return {};
+ }
+
+ return std::move(*best);
+}
+
+MatchState Impl::Match(TemplateState& templates,
+ const OverloadInfo* overload,
+ MatcherIndex const* matcher_indices) const {
+ return MatchState(builder, templates, matchers, overload, matcher_indices);
+}
+
+void Impl::PrintOverload(std::ostream& ss,
+ const OverloadInfo* overload,
+ const char* intrinsic_name) const {
+ TemplateState templates;
+
+ ss << intrinsic_name << "(";
+ for (size_t p = 0; p < overload->num_parameters; p++) {
+ auto& parameter = overload->parameters[p];
+ if (p > 0) {
+ ss << ", ";
+ }
+ if (parameter.usage != ParameterUsage::kNone) {
+ ss << sem::str(parameter.usage) << ": ";
+ }
+ auto* indices = parameter.matcher_indices;
+ ss << Match(templates, overload, indices).TypeName();
+ }
+ ss << ")";
+ if (overload->return_matcher_indices) {
+ ss << " -> ";
+ auto* indices = overload->return_matcher_indices;
+ ss << Match(templates, overload, indices).TypeName();
+ }
+
+ bool first = true;
+ auto separator = [&] {
+ ss << (first ? " where: " : ", ");
+ first = false;
+ };
+ for (size_t i = 0; i < overload->num_template_types; i++) {
+ auto& template_type = overload->template_types[i];
+ if (template_type.matcher_index != kNoMatcher) {
+ separator();
+ ss << template_type.name;
+ auto* index = &template_type.matcher_index;
+ ss << " is " << Match(templates, overload, index).TypeName();
+ }
+ }
+ for (size_t i = 0; i < overload->num_template_numbers; i++) {
+ auto& template_number = overload->template_numbers[i];
+ if (template_number.matcher_index != kNoMatcher) {
+ separator();
+ ss << template_number.name;
+ auto* index = &template_number.matcher_index;
+ ss << " is " << Match(templates, overload, index).NumName();
+ }
+ }
+}
+
+void Impl::PrintCandidates(std::ostream& ss,
+ const Candidates& candidates,
+ const char* intrinsic_name) const {
+ for (auto& candidate : candidates) {
+ ss << " ";
+ PrintOverload(ss, candidate.overload, intrinsic_name);
+ ss << std::endl;
+ }
+}
+
+const sem::Type* MatchState::Type(const sem::Type* ty) {
+ MatcherIndex matcher_index = *matcher_indices_++;
+ auto* matcher = matchers.type[matcher_index];
+ return matcher->Match(*this, ty);
+}
+
+Number MatchState::Num(Number number) {
+ MatcherIndex matcher_index = *matcher_indices_++;
+ auto* matcher = matchers.number[matcher_index];
+ return matcher->Match(*this, number);
+}
+
+std::string MatchState::TypeName() {
+ MatcherIndex matcher_index = *matcher_indices_++;
+ auto* matcher = matchers.type[matcher_index];
+ return matcher->String(this);
+}
+
+std::string MatchState::NumName() {
+ MatcherIndex matcher_index = *matcher_indices_++;
+ auto* matcher = matchers.number[matcher_index];
+ return matcher->String(this);
+}
+
+void Impl::ErrAmbiguousOverload(const char* intrinsic_name,
+ const std::vector<const sem::Type*>& args,
+ TemplateState templates,
+ Candidates candidates) const {
+ std::stringstream ss;
+ ss << "ambiguous overload while attempting to match " << intrinsic_name;
+ for (size_t i = 0; i < std::numeric_limits<size_t>::max(); i++) {
+ if (auto* ty = templates.Type(i)) {
+ ss << ((i == 0) ? "<" : ", ") << ty->FriendlyName(builder.Symbols());
+ } else {
+ if (i > 0) {
+ ss << ">";
+ }
+ break;
+ }
+ }
+ ss << "(";
+ bool first = true;
+ for (auto* arg : args) {
+ if (!first) {
+ ss << ", ";
+ }
+ first = false;
+ ss << arg->FriendlyName(builder.Symbols());
+ }
+ ss << "):\n";
+ for (auto& candidate : candidates) {
+ if (candidate.score == 0) {
+ ss << " ";
+ PrintOverload(ss, candidate.overload, intrinsic_name);
+ ss << std::endl;
+ }
+ }
+ TINT_ICE(Resolver, builder.Diagnostics()) << ss.str();
+}
+
+} // namespace
+
+std::unique_ptr<IntrinsicTable> IntrinsicTable::Create(ProgramBuilder& builder) {
+ return std::make_unique<Impl>(builder);
+}
+
+IntrinsicTable::~IntrinsicTable() = default;
+
+} // namespace tint::resolver
+
+/// TypeInfo for the Any type declared in the anonymous namespace above
+TINT_INSTANTIATE_TYPEINFO(tint::resolver::Any);
diff --git a/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.h b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.h
new file mode 100644
index 00000000000..058b253f849
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.h
@@ -0,0 +1,118 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_RESOLVER_INTRINSIC_TABLE_H_
+#define SRC_TINT_RESOLVER_INTRINSIC_TABLE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "src/tint/resolver/const_eval.h"
+#include "src/tint/resolver/ctor_conv_intrinsic.h"
+#include "src/tint/sem/builtin.h"
+
+// Forward declarations
+namespace tint {
+class ProgramBuilder;
+} // namespace tint
+
+namespace tint::resolver {
+
+/// IntrinsicTable is a lookup table of all the WGSL builtin functions and intrinsic operators
+class IntrinsicTable {
+ public:
+ /// @param builder the program builder
+ /// @return a pointer to a newly created IntrinsicTable
+ static std::unique_ptr<IntrinsicTable> Create(ProgramBuilder& builder);
+
+ /// Destructor
+ virtual ~IntrinsicTable();
+
+ /// Builtin describes a resolved builtin function
+ struct Builtin {
+ /// The semantic info for the builtin
+ const sem::Builtin* sem = nullptr;
+ /// The constant evaluation function
+ const_eval::Function* const_eval_fn = nullptr;
+ };
+
+ /// UnaryOperator describes a resolved unary operator
+ struct UnaryOperator {
+ /// The result type of the unary operator
+ const sem::Type* result = nullptr;
+ /// The type of the parameter of the unary operator
+ const sem::Type* parameter = nullptr;
+ };
+
+ /// BinaryOperator describes a resolved binary operator
+ struct BinaryOperator {
+ /// The result type of the binary operator
+ const sem::Type* result = nullptr;
+ /// The type of LHS parameter of the binary operator
+ const sem::Type* lhs = nullptr;
+ /// The type of RHS parameter of the binary operator
+ const sem::Type* rhs = nullptr;
+ };
+
+ /// Lookup looks for the builtin overload with the given signature, raising an error diagnostic
+ /// if the builtin was not found.
+ /// @param type the builtin type
+ /// @param args the argument types passed to the builtin function
+ /// @param source the source of the builtin call
+ /// @return the semantic builtin if found, otherwise nullptr
+ virtual Builtin Lookup(sem::BuiltinType type,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) = 0;
+
+ /// Lookup looks for the unary op overload with the given signature, raising an error
+ /// diagnostic if the operator was not found.
+ /// @param op the unary operator
+ /// @param arg the type of the expression passed to the operator
+ /// @param source the source of the operator call
+ /// @return the operator call target signature. If the operator was not found
+ /// UnaryOperator::result will be nullptr.
+ virtual UnaryOperator Lookup(ast::UnaryOp op, const sem::Type* arg, const Source& source) = 0;
+
+ /// Lookup looks for the binary op overload with the given signature, raising an error
+ /// diagnostic if the operator was not found.
+ /// @param op the binary operator
+ /// @param lhs the LHS value type passed to the operator
+ /// @param rhs the RHS value type passed to the operator
+ /// @param source the source of the operator call
+ /// @param is_compound true if the binary operator is being used as a compound assignment
+ /// @return the operator call target signature. If the operator was not found
+ /// BinaryOperator::result will be nullptr.
+ virtual BinaryOperator Lookup(ast::BinaryOp op,
+ const sem::Type* lhs,
+ const sem::Type* rhs,
+ const Source& source,
+ bool is_compound) = 0;
+
+ /// Lookup looks for the type constructor or conversion overload for the given
+ /// CtorConvIntrinsic.
+ /// @param type the type being constructed or converted
+ /// @param template_arg the optional template argument
+ /// @param args the argument types passed to the constructor / conversion call
+ /// @param source the source of the call
+ /// @return a sem::TypeConstructor, sem::TypeConversion or nullptr if nothing matched
+ virtual const sem::CallTarget* Lookup(CtorConvIntrinsic type,
+ const sem::Type* template_arg,
+ const std::vector<const sem::Type*>& args,
+ const Source& source) = 0;
+};
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_INTRINSIC_TABLE_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl
new file mode 100644
index 00000000000..521e5e14f7a
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl
@@ -0,0 +1,14214 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/intrinsic-gen
+// using the template:
+// src/tint/resolver/intrinsic_table.inl.tmpl
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+// clang-format off
+
+/// TypeMatcher for 'type bool'
+/// @see src/tint/intrinsics.def:73:6
+class Bool : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Bool::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_bool(ty)) {
+ return nullptr;
+ }
+ return build_bool(state);
+}
+
+std::string Bool::String(MatchState*) const {
+ return "bool";
+}
+
+/// TypeMatcher for 'type af'
+/// @see src/tint/intrinsics.def:74:48
+class Af : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Af::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_af(ty)) {
+ return nullptr;
+ }
+ return build_af(state);
+}
+
+std::string Af::String(MatchState*) const {
+ std::stringstream ss;
+ ss << "abstract-float";
+ return ss.str();
+}
+
+/// TypeMatcher for 'type ai'
+/// @see src/tint/intrinsics.def:75:48
+class Ai : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Ai::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_ai(ty)) {
+ return nullptr;
+ }
+ return build_ai(state);
+}
+
+std::string Ai::String(MatchState*) const {
+ std::stringstream ss;
+ ss << "abstract-int";
+ return ss.str();
+}
+
+/// TypeMatcher for 'type i32'
+/// @see src/tint/intrinsics.def:76:21
+class I32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* I32::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_i32(ty)) {
+ return nullptr;
+ }
+ return build_i32(state);
+}
+
+std::string I32::String(MatchState*) const {
+ return "i32";
+}
+
+/// TypeMatcher for 'type u32'
+/// @see src/tint/intrinsics.def:77:21
+class U32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* U32::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_u32(ty)) {
+ return nullptr;
+ }
+ return build_u32(state);
+}
+
+std::string U32::String(MatchState*) const {
+ return "u32";
+}
+
+/// TypeMatcher for 'type f32'
+/// @see src/tint/intrinsics.def:78:21
+class F32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* F32::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_f32(ty)) {
+ return nullptr;
+ }
+ return build_f32(state);
+}
+
+std::string F32::String(MatchState*) const {
+ return "f32";
+}
+
+/// TypeMatcher for 'type vec2'
+/// @see src/tint/intrinsics.def:79:6
+class Vec2 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Vec2::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_vec2(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_vec2(state, T);
+}
+
+std::string Vec2::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "vec2<" + T + ">";
+}
+
+/// TypeMatcher for 'type vec3'
+/// @see src/tint/intrinsics.def:80:6
+class Vec3 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Vec3::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_vec3(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_vec3(state, T);
+}
+
+std::string Vec3::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "vec3<" + T + ">";
+}
+
+/// TypeMatcher for 'type vec4'
+/// @see src/tint/intrinsics.def:81:6
+class Vec4 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Vec4::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_vec4(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_vec4(state, T);
+}
+
+std::string Vec4::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "vec4<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat2x2'
+/// @see src/tint/intrinsics.def:82:6
+class Mat2X2 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat2X2::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat2x2(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat2x2(state, T);
+}
+
+std::string Mat2X2::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat2x2<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat2x3'
+/// @see src/tint/intrinsics.def:83:6
+class Mat2X3 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat2X3::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat2x3(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat2x3(state, T);
+}
+
+std::string Mat2X3::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat2x3<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat2x4'
+/// @see src/tint/intrinsics.def:84:6
+class Mat2X4 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat2X4::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat2x4(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat2x4(state, T);
+}
+
+std::string Mat2X4::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat2x4<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat3x2'
+/// @see src/tint/intrinsics.def:85:6
+class Mat3X2 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat3X2::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat3x2(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat3x2(state, T);
+}
+
+std::string Mat3X2::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat3x2<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat3x3'
+/// @see src/tint/intrinsics.def:86:6
+class Mat3X3 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat3X3::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat3x3(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat3x3(state, T);
+}
+
+std::string Mat3X3::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat3x3<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat3x4'
+/// @see src/tint/intrinsics.def:87:6
+class Mat3X4 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat3X4::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat3x4(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat3x4(state, T);
+}
+
+std::string Mat3X4::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat3x4<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat4x2'
+/// @see src/tint/intrinsics.def:88:6
+class Mat4X2 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat4X2::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat4x2(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat4x2(state, T);
+}
+
+std::string Mat4X2::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat4x2<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat4x3'
+/// @see src/tint/intrinsics.def:89:6
+class Mat4X3 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat4X3::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat4x3(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat4x3(state, T);
+}
+
+std::string Mat4X3::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat4x3<" + T + ">";
+}
+
+/// TypeMatcher for 'type mat4x4'
+/// @see src/tint/intrinsics.def:90:6
+class Mat4X4 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat4X4::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_mat4x4(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat4x4(state, T);
+}
+
+std::string Mat4X4::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "mat4x4<" + T + ">";
+}
+
+/// TypeMatcher for 'type vec'
+/// @see src/tint/intrinsics.def:91:34
+class Vec : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Vec::Match(MatchState& state, const sem::Type* ty) const {
+ Number N = Number::invalid;
+ const sem::Type* T = nullptr;
+ if (!match_vec(ty, N, T)) {
+ return nullptr;
+ }
+ N = state.Num(N);
+ if (!N.IsValid()) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_vec(state, N, T);
+}
+
+std::string Vec::String(MatchState* state) const {
+ const std::string N = state->NumName();
+ const std::string T = state->TypeName();
+ std::stringstream ss;
+ ss << "vec" << N << "<" << T << ">";
+ return ss.str();
+}
+
+/// TypeMatcher for 'type mat'
+/// @see src/tint/intrinsics.def:92:34
+class Mat : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Mat::Match(MatchState& state, const sem::Type* ty) const {
+ Number N = Number::invalid;
+ Number M = Number::invalid;
+ const sem::Type* T = nullptr;
+ if (!match_mat(ty, N, M, T)) {
+ return nullptr;
+ }
+ N = state.Num(N);
+ if (!N.IsValid()) {
+ return nullptr;
+ }
+ M = state.Num(M);
+ if (!M.IsValid()) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_mat(state, N, M, T);
+}
+
+std::string Mat::String(MatchState* state) const {
+ const std::string N = state->NumName();
+ const std::string M = state->NumName();
+ const std::string T = state->TypeName();
+ std::stringstream ss;
+ ss << "mat" << N << "x" << M << "<" << T << ">";
+ return ss.str();
+}
+
+/// TypeMatcher for 'type ptr'
+/// @see src/tint/intrinsics.def:93:6
+class Ptr : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Ptr::Match(MatchState& state, const sem::Type* ty) const {
+ Number S = Number::invalid;
+ const sem::Type* T = nullptr;
+ Number A = Number::invalid;
+ if (!match_ptr(ty, S, T, A)) {
+ return nullptr;
+ }
+ S = state.Num(S);
+ if (!S.IsValid()) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ A = state.Num(A);
+ if (!A.IsValid()) {
+ return nullptr;
+ }
+ return build_ptr(state, S, T, A);
+}
+
+std::string Ptr::String(MatchState* state) const {
+ const std::string S = state->NumName();
+ const std::string T = state->TypeName();
+ const std::string A = state->NumName();
+ return "ptr<" + S + ", " + T + ", " + A + ">";
+}
+
+/// TypeMatcher for 'type atomic'
+/// @see src/tint/intrinsics.def:94:6
+class Atomic : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Atomic::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_atomic(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_atomic(state, T);
+}
+
+std::string Atomic::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "atomic<" + T + ">";
+}
+
+/// TypeMatcher for 'type array'
+/// @see src/tint/intrinsics.def:95:6
+class Array : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Array::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_array(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_array(state, T);
+}
+
+std::string Array::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "array<" + T + ">";
+}
+
+/// TypeMatcher for 'type sampler'
+/// @see src/tint/intrinsics.def:96:6
+class Sampler : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Sampler::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_sampler(ty)) {
+ return nullptr;
+ }
+ return build_sampler(state);
+}
+
+std::string Sampler::String(MatchState*) const {
+ return "sampler";
+}
+
+/// TypeMatcher for 'type sampler_comparison'
+/// @see src/tint/intrinsics.def:97:6
+class SamplerComparison : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* SamplerComparison::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_sampler_comparison(ty)) {
+ return nullptr;
+ }
+ return build_sampler_comparison(state);
+}
+
+std::string SamplerComparison::String(MatchState*) const {
+ return "sampler_comparison";
+}
+
+/// TypeMatcher for 'type texture_1d'
+/// @see src/tint/intrinsics.def:98:6
+class Texture1D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Texture1D::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_1d(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_1d(state, T);
+}
+
+std::string Texture1D::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_1d<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_2d'
+/// @see src/tint/intrinsics.def:99:6
+class Texture2D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Texture2D::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_2d(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_2d(state, T);
+}
+
+std::string Texture2D::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_2d<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_2d_array'
+/// @see src/tint/intrinsics.def:100:6
+class Texture2DArray : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Texture2DArray::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_2d_array(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_2d_array(state, T);
+}
+
+std::string Texture2DArray::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_2d_array<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_3d'
+/// @see src/tint/intrinsics.def:101:6
+class Texture3D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Texture3D::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_3d(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_3d(state, T);
+}
+
+std::string Texture3D::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_3d<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_cube'
+/// @see src/tint/intrinsics.def:102:6
+class TextureCube : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureCube::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_cube(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_cube(state, T);
+}
+
+std::string TextureCube::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_cube<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_cube_array'
+/// @see src/tint/intrinsics.def:103:6
+class TextureCubeArray : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureCubeArray::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_cube_array(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_cube_array(state, T);
+}
+
+std::string TextureCubeArray::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_cube_array<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_multisampled_2d'
+/// @see src/tint/intrinsics.def:104:6
+class TextureMultisampled2D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureMultisampled2D::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_texture_multisampled_2d(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_texture_multisampled_2d(state, T);
+}
+
+std::string TextureMultisampled2D::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "texture_multisampled_2d<" + T + ">";
+}
+
+/// TypeMatcher for 'type texture_depth_2d'
+/// @see src/tint/intrinsics.def:105:6
+class TextureDepth2D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureDepth2D::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_depth_2d(ty)) {
+ return nullptr;
+ }
+ return build_texture_depth_2d(state);
+}
+
+std::string TextureDepth2D::String(MatchState*) const {
+ return "texture_depth_2d";
+}
+
+/// TypeMatcher for 'type texture_depth_2d_array'
+/// @see src/tint/intrinsics.def:106:6
+class TextureDepth2DArray : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureDepth2DArray::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_depth_2d_array(ty)) {
+ return nullptr;
+ }
+ return build_texture_depth_2d_array(state);
+}
+
+std::string TextureDepth2DArray::String(MatchState*) const {
+ return "texture_depth_2d_array";
+}
+
+/// TypeMatcher for 'type texture_depth_cube'
+/// @see src/tint/intrinsics.def:107:6
+class TextureDepthCube : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureDepthCube::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_depth_cube(ty)) {
+ return nullptr;
+ }
+ return build_texture_depth_cube(state);
+}
+
+std::string TextureDepthCube::String(MatchState*) const {
+ return "texture_depth_cube";
+}
+
+/// TypeMatcher for 'type texture_depth_cube_array'
+/// @see src/tint/intrinsics.def:108:6
+class TextureDepthCubeArray : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureDepthCubeArray::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_depth_cube_array(ty)) {
+ return nullptr;
+ }
+ return build_texture_depth_cube_array(state);
+}
+
+std::string TextureDepthCubeArray::String(MatchState*) const {
+ return "texture_depth_cube_array";
+}
+
+/// TypeMatcher for 'type texture_depth_multisampled_2d'
+/// @see src/tint/intrinsics.def:109:6
+class TextureDepthMultisampled2D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureDepthMultisampled2D::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_depth_multisampled_2d(ty)) {
+ return nullptr;
+ }
+ return build_texture_depth_multisampled_2d(state);
+}
+
+std::string TextureDepthMultisampled2D::String(MatchState*) const {
+ return "texture_depth_multisampled_2d";
+}
+
+/// TypeMatcher for 'type texture_storage_1d'
+/// @see src/tint/intrinsics.def:110:6
+class TextureStorage1D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureStorage1D::Match(MatchState& state, const sem::Type* ty) const {
+ Number F = Number::invalid;
+ Number A = Number::invalid;
+ if (!match_texture_storage_1d(ty, F, A)) {
+ return nullptr;
+ }
+ F = state.Num(F);
+ if (!F.IsValid()) {
+ return nullptr;
+ }
+ A = state.Num(A);
+ if (!A.IsValid()) {
+ return nullptr;
+ }
+ return build_texture_storage_1d(state, F, A);
+}
+
+std::string TextureStorage1D::String(MatchState* state) const {
+ const std::string F = state->NumName();
+ const std::string A = state->NumName();
+ return "texture_storage_1d<" + F + ", " + A + ">";
+}
+
+/// TypeMatcher for 'type texture_storage_2d'
+/// @see src/tint/intrinsics.def:111:6
+class TextureStorage2D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureStorage2D::Match(MatchState& state, const sem::Type* ty) const {
+ Number F = Number::invalid;
+ Number A = Number::invalid;
+ if (!match_texture_storage_2d(ty, F, A)) {
+ return nullptr;
+ }
+ F = state.Num(F);
+ if (!F.IsValid()) {
+ return nullptr;
+ }
+ A = state.Num(A);
+ if (!A.IsValid()) {
+ return nullptr;
+ }
+ return build_texture_storage_2d(state, F, A);
+}
+
+std::string TextureStorage2D::String(MatchState* state) const {
+ const std::string F = state->NumName();
+ const std::string A = state->NumName();
+ return "texture_storage_2d<" + F + ", " + A + ">";
+}
+
+/// TypeMatcher for 'type texture_storage_2d_array'
+/// @see src/tint/intrinsics.def:112:6
+class TextureStorage2DArray : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureStorage2DArray::Match(MatchState& state, const sem::Type* ty) const {
+ Number F = Number::invalid;
+ Number A = Number::invalid;
+ if (!match_texture_storage_2d_array(ty, F, A)) {
+ return nullptr;
+ }
+ F = state.Num(F);
+ if (!F.IsValid()) {
+ return nullptr;
+ }
+ A = state.Num(A);
+ if (!A.IsValid()) {
+ return nullptr;
+ }
+ return build_texture_storage_2d_array(state, F, A);
+}
+
+std::string TextureStorage2DArray::String(MatchState* state) const {
+ const std::string F = state->NumName();
+ const std::string A = state->NumName();
+ return "texture_storage_2d_array<" + F + ", " + A + ">";
+}
+
+/// TypeMatcher for 'type texture_storage_3d'
+/// @see src/tint/intrinsics.def:113:6
+class TextureStorage3D : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureStorage3D::Match(MatchState& state, const sem::Type* ty) const {
+ Number F = Number::invalid;
+ Number A = Number::invalid;
+ if (!match_texture_storage_3d(ty, F, A)) {
+ return nullptr;
+ }
+ F = state.Num(F);
+ if (!F.IsValid()) {
+ return nullptr;
+ }
+ A = state.Num(A);
+ if (!A.IsValid()) {
+ return nullptr;
+ }
+ return build_texture_storage_3d(state, F, A);
+}
+
+std::string TextureStorage3D::String(MatchState* state) const {
+ const std::string F = state->NumName();
+ const std::string A = state->NumName();
+ return "texture_storage_3d<" + F + ", " + A + ">";
+}
+
+/// TypeMatcher for 'type texture_external'
+/// @see src/tint/intrinsics.def:114:6
+class TextureExternal : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* TextureExternal::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_texture_external(ty)) {
+ return nullptr;
+ }
+ return build_texture_external(state);
+}
+
+std::string TextureExternal::String(MatchState*) const {
+ return "texture_external";
+}
+
+/// TypeMatcher for 'type __modf_result'
+/// @see src/tint/intrinsics.def:116:6
+class ModfResult : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ModfResult::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_modf_result(ty)) {
+ return nullptr;
+ }
+ return build_modf_result(state);
+}
+
+std::string ModfResult::String(MatchState*) const {
+ return "__modf_result";
+}
+
+/// TypeMatcher for 'type __modf_result_vec'
+/// @see src/tint/intrinsics.def:117:39
+class ModfResultVec : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ModfResultVec::Match(MatchState& state, const sem::Type* ty) const {
+ Number N = Number::invalid;
+ if (!match_modf_result_vec(ty, N)) {
+ return nullptr;
+ }
+ N = state.Num(N);
+ if (!N.IsValid()) {
+ return nullptr;
+ }
+ return build_modf_result_vec(state, N);
+}
+
+std::string ModfResultVec::String(MatchState* state) const {
+ const std::string N = state->NumName();
+ std::stringstream ss;
+ ss << "__modf_result_vec" << N;
+ return ss.str();
+}
+
+/// TypeMatcher for 'type __frexp_result'
+/// @see src/tint/intrinsics.def:118:6
+class FrexpResult : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* FrexpResult::Match(MatchState& state, const sem::Type* ty) const {
+ if (!match_frexp_result(ty)) {
+ return nullptr;
+ }
+ return build_frexp_result(state);
+}
+
+std::string FrexpResult::String(MatchState*) const {
+ return "__frexp_result";
+}
+
+/// TypeMatcher for 'type __frexp_result_vec'
+/// @see src/tint/intrinsics.def:119:40
+class FrexpResultVec : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* FrexpResultVec::Match(MatchState& state, const sem::Type* ty) const {
+ Number N = Number::invalid;
+ if (!match_frexp_result_vec(ty, N)) {
+ return nullptr;
+ }
+ N = state.Num(N);
+ if (!N.IsValid()) {
+ return nullptr;
+ }
+ return build_frexp_result_vec(state, N);
+}
+
+std::string FrexpResultVec::String(MatchState* state) const {
+ const std::string N = state->NumName();
+ std::stringstream ss;
+ ss << "__frexp_result_vec" << N;
+ return ss.str();
+}
+
+/// TypeMatcher for 'type __atomic_compare_exchange_result'
+/// @see src/tint/intrinsics.def:121:6
+class AtomicCompareExchangeResult : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* AtomicCompareExchangeResult::Match(MatchState& state, const sem::Type* ty) const {
+ const sem::Type* T = nullptr;
+ if (!match_atomic_compare_exchange_result(ty, T)) {
+ return nullptr;
+ }
+ T = state.Type(T);
+ if (T == nullptr) {
+ return nullptr;
+ }
+ return build_atomic_compare_exchange_result(state, T);
+}
+
+std::string AtomicCompareExchangeResult::String(MatchState* state) const {
+ const std::string T = state->TypeName();
+ return "__atomic_compare_exchange_result<" + T + ">";
+}
+
+/// TypeMatcher for 'match fiu32'
+/// @see src/tint/intrinsics.def:129:7
+class Fiu32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Fiu32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ return nullptr;
+}
+
+std::string Fiu32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << ", " << I32().String(nullptr) << " or " << U32().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match fi32'
+/// @see src/tint/intrinsics.def:130:7
+class Fi32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Fi32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ return nullptr;
+}
+
+std::string Fi32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << " or " << I32().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match iu32'
+/// @see src/tint/intrinsics.def:131:7
+class Iu32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Iu32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ return nullptr;
+}
+
+std::string Iu32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << I32().String(nullptr) << " or " << U32().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match scalar'
+/// @see src/tint/intrinsics.def:132:7
+class Scalar : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* Scalar::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ if (match_bool(ty)) {
+ return build_bool(state);
+ }
+ return nullptr;
+}
+
+std::string Scalar::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << ", " << I32().String(nullptr) << ", " << U32().String(nullptr) << " or " << Bool().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match abstract_or_scalar'
+/// @see src/tint/intrinsics.def:133:7
+class AbstractOrScalar : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* AbstractOrScalar::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_af(ty)) {
+ return build_af(state);
+ }
+ if (match_ai(ty)) {
+ return build_ai(state);
+ }
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ if (match_bool(ty)) {
+ return build_bool(state);
+ }
+ return nullptr;
+}
+
+std::string AbstractOrScalar::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << Ai().String(nullptr) << ", " << Af().String(nullptr) << ", " << F32().String(nullptr) << ", " << I32().String(nullptr) << ", " << U32().String(nullptr) << " or " << Bool().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match af_f32'
+/// @see src/tint/intrinsics.def:134:7
+class AfF32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* AfF32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_af(ty)) {
+ return build_af(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ return nullptr;
+}
+
+std::string AfF32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << Af().String(nullptr) << " or " << F32().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match scalar_no_f32'
+/// @see src/tint/intrinsics.def:135:7
+class ScalarNoF32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ScalarNoF32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_bool(ty)) {
+ return build_bool(state);
+ }
+ return nullptr;
+}
+
+std::string ScalarNoF32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << I32().String(nullptr) << ", " << U32().String(nullptr) << " or " << Bool().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match scalar_no_i32'
+/// @see src/tint/intrinsics.def:136:7
+class ScalarNoI32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ScalarNoI32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ if (match_bool(ty)) {
+ return build_bool(state);
+ }
+ return nullptr;
+}
+
+std::string ScalarNoI32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << ", " << U32().String(nullptr) << " or " << Bool().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match scalar_no_u32'
+/// @see src/tint/intrinsics.def:137:7
+class ScalarNoU32 : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ScalarNoU32::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ if (match_bool(ty)) {
+ return build_bool(state);
+ }
+ return nullptr;
+}
+
+std::string ScalarNoU32::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << ", " << I32().String(nullptr) << " or " << Bool().String(nullptr);
+ return ss.str();
+}
+
+/// TypeMatcher for 'match scalar_no_bool'
+/// @see src/tint/intrinsics.def:138:7
+class ScalarNoBool : public TypeMatcher {
+ public:
+ /// Checks whether the given type matches the matcher rules, and returns the
+ /// expected, canonicalized type on success.
+ /// Match may define and refine the template types and numbers in state.
+ /// @param state the MatchState
+ /// @param type the type to match
+ /// @returns the canonicalized type on match, otherwise nullptr
+ const sem::Type* Match(MatchState& state,
+ const sem::Type* type) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+const sem::Type* ScalarNoBool::Match(MatchState& state, const sem::Type* ty) const {
+ if (match_i32(ty)) {
+ return build_i32(state);
+ }
+ if (match_u32(ty)) {
+ return build_u32(state);
+ }
+ if (match_f32(ty)) {
+ return build_f32(state);
+ }
+ return nullptr;
+}
+
+std::string ScalarNoBool::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss << F32().String(nullptr) << ", " << I32().String(nullptr) << " or " << U32().String(nullptr);
+ return ss.str();
+}
+
+/// EnumMatcher for 'match f32_texel_format'
+/// @see src/tint/intrinsics.def:149:7
+class F32TexelFormat : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number F32TexelFormat::Match(MatchState&, Number number) const {
+ switch (static_cast<TexelFormat>(number.Value())) {
+ case TexelFormat::kRgba8Unorm:
+ case TexelFormat::kRgba8Snorm:
+ case TexelFormat::kRgba16Float:
+ case TexelFormat::kR32Float:
+ case TexelFormat::kRg32Float:
+ case TexelFormat::kRgba32Float:
+ return number;
+ default:
+ return Number::invalid;
+ }
+}
+
+std::string F32TexelFormat::String(MatchState*) const {
+ return "rgba8unorm, rgba8snorm, rgba16float, r32float, rg32float or rgba32float";
+}
+
+/// EnumMatcher for 'match i32_texel_format'
+/// @see src/tint/intrinsics.def:151:7
+class I32TexelFormat : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number I32TexelFormat::Match(MatchState&, Number number) const {
+ switch (static_cast<TexelFormat>(number.Value())) {
+ case TexelFormat::kRgba8Sint:
+ case TexelFormat::kRgba16Sint:
+ case TexelFormat::kR32Sint:
+ case TexelFormat::kRg32Sint:
+ case TexelFormat::kRgba32Sint:
+ return number;
+ default:
+ return Number::invalid;
+ }
+}
+
+std::string I32TexelFormat::String(MatchState*) const {
+ return "rgba8sint, rgba16sint, r32sint, rg32sint or rgba32sint";
+}
+
+/// EnumMatcher for 'match u32_texel_format'
+/// @see src/tint/intrinsics.def:153:7
+class U32TexelFormat : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number U32TexelFormat::Match(MatchState&, Number number) const {
+ switch (static_cast<TexelFormat>(number.Value())) {
+ case TexelFormat::kRgba8Uint:
+ case TexelFormat::kRgba16Uint:
+ case TexelFormat::kR32Uint:
+ case TexelFormat::kRg32Uint:
+ case TexelFormat::kRgba32Uint:
+ return number;
+ default:
+ return Number::invalid;
+ }
+}
+
+std::string U32TexelFormat::String(MatchState*) const {
+ return "rgba8uint, rgba16uint, r32uint, rg32uint or rgba32uint";
+}
+
+/// EnumMatcher for 'match write_only'
+/// @see src/tint/intrinsics.def:156:7
+class WriteOnly : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number WriteOnly::Match(MatchState&, Number number) const {
+ if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kWrite)) {
+ return Number(static_cast<uint32_t>(Access::kWrite));
+ }
+ return Number::invalid;
+}
+
+std::string WriteOnly::String(MatchState*) const {
+ return "write";
+}
+
+/// EnumMatcher for 'match function_private_workgroup'
+/// @see src/tint/intrinsics.def:158:7
+class FunctionPrivateWorkgroup : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number FunctionPrivateWorkgroup::Match(MatchState&, Number number) const {
+ switch (static_cast<StorageClass>(number.Value())) {
+ case StorageClass::kFunction:
+ case StorageClass::kPrivate:
+ case StorageClass::kWorkgroup:
+ return number;
+ default:
+ return Number::invalid;
+ }
+}
+
+std::string FunctionPrivateWorkgroup::String(MatchState*) const {
+ return "function, private or workgroup";
+}
+
+/// EnumMatcher for 'match workgroup_or_storage'
+/// @see src/tint/intrinsics.def:159:7
+class WorkgroupOrStorage : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number WorkgroupOrStorage::Match(MatchState&, Number number) const {
+ switch (static_cast<StorageClass>(number.Value())) {
+ case StorageClass::kWorkgroup:
+ case StorageClass::kStorage:
+ return number;
+ default:
+ return Number::invalid;
+ }
+}
+
+std::string WorkgroupOrStorage::String(MatchState*) const {
+ return "workgroup or storage";
+}
+
+/// EnumMatcher for 'match storage'
+class Storage : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number Storage::Match(MatchState&, Number number) const {
+ if (number.IsAny() || number.Value() == static_cast<uint32_t>(StorageClass::kStorage)) {
+ return Number(static_cast<uint32_t>(StorageClass::kStorage));
+ }
+ return Number::invalid;
+}
+
+std::string Storage::String(MatchState*) const {
+ return "storage";
+}
+
+/// EnumMatcher for 'match write'
+class Write : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number Write::Match(MatchState&, Number number) const {
+ if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kWrite)) {
+ return Number(static_cast<uint32_t>(Access::kWrite));
+ }
+ return Number::invalid;
+}
+
+std::string Write::String(MatchState*) const {
+ return "write";
+}
+
+/// EnumMatcher for 'match read_write'
+class ReadWrite : public NumberMatcher {
+ public:
+ /// Checks whether the given number matches the enum matcher rules.
+ /// Match may define template numbers in state.
+ /// @param state the MatchState
+ /// @param number the enum value as a Number
+ /// @return true if the enum value matches the set
+ Number Match(MatchState& state, Number number) const override;
+ /// @param state the MatchState
+ /// @return a string representation of the matcher.
+ std::string String(MatchState* state) const override;
+};
+
+Number ReadWrite::Match(MatchState&, Number number) const {
+ if (number.IsAny() || number.Value() == static_cast<uint32_t>(Access::kReadWrite)) {
+ return Number(static_cast<uint32_t>(Access::kReadWrite));
+ }
+ return Number::invalid;
+}
+
+std::string ReadWrite::String(MatchState*) const {
+ return "read_write";
+}
+
+/// Matchers holds type and number matchers
+class Matchers {
+ private:
+ TemplateTypeMatcher template_type_0_{0};
+ TemplateTypeMatcher template_type_1_{1};
+ TemplateNumberMatcher template_number_0_{0};
+ TemplateNumberMatcher template_number_1_{1};
+ TemplateNumberMatcher template_number_2_{2};
+ Bool Bool_;
+ Af Af_;
+ Ai Ai_;
+ I32 I32_;
+ U32 U32_;
+ F32 F32_;
+ Vec2 Vec2_;
+ Vec3 Vec3_;
+ Vec4 Vec4_;
+ Mat2X2 Mat2X2_;
+ Mat2X3 Mat2X3_;
+ Mat2X4 Mat2X4_;
+ Mat3X2 Mat3X2_;
+ Mat3X3 Mat3X3_;
+ Mat3X4 Mat3X4_;
+ Mat4X2 Mat4X2_;
+ Mat4X3 Mat4X3_;
+ Mat4X4 Mat4X4_;
+ Vec Vec_;
+ Mat Mat_;
+ Ptr Ptr_;
+ Atomic Atomic_;
+ Array Array_;
+ Sampler Sampler_;
+ SamplerComparison SamplerComparison_;
+ Texture1D Texture1D_;
+ Texture2D Texture2D_;
+ Texture2DArray Texture2DArray_;
+ Texture3D Texture3D_;
+ TextureCube TextureCube_;
+ TextureCubeArray TextureCubeArray_;
+ TextureMultisampled2D TextureMultisampled2D_;
+ TextureDepth2D TextureDepth2D_;
+ TextureDepth2DArray TextureDepth2DArray_;
+ TextureDepthCube TextureDepthCube_;
+ TextureDepthCubeArray TextureDepthCubeArray_;
+ TextureDepthMultisampled2D TextureDepthMultisampled2D_;
+ TextureStorage1D TextureStorage1D_;
+ TextureStorage2D TextureStorage2D_;
+ TextureStorage2DArray TextureStorage2DArray_;
+ TextureStorage3D TextureStorage3D_;
+ TextureExternal TextureExternal_;
+ ModfResult ModfResult_;
+ ModfResultVec ModfResultVec_;
+ FrexpResult FrexpResult_;
+ FrexpResultVec FrexpResultVec_;
+ AtomicCompareExchangeResult AtomicCompareExchangeResult_;
+ Fiu32 Fiu32_;
+ Fi32 Fi32_;
+ Iu32 Iu32_;
+ Scalar Scalar_;
+ AbstractOrScalar AbstractOrScalar_;
+ AfF32 AfF32_;
+ ScalarNoF32 ScalarNoF32_;
+ ScalarNoI32 ScalarNoI32_;
+ ScalarNoU32 ScalarNoU32_;
+ ScalarNoBool ScalarNoBool_;
+ F32TexelFormat F32TexelFormat_;
+ I32TexelFormat I32TexelFormat_;
+ U32TexelFormat U32TexelFormat_;
+ WriteOnly WriteOnly_;
+ FunctionPrivateWorkgroup FunctionPrivateWorkgroup_;
+ WorkgroupOrStorage WorkgroupOrStorage_;
+ Storage Storage_;
+ Write Write_;
+ ReadWrite ReadWrite_;
+
+ public:
+ /// Constructor
+ Matchers();
+ /// Destructor
+ ~Matchers();
+
+ /// The template types, types, and type matchers
+ TypeMatcher const* const type[59] = {
+ /* [0] */ &template_type_0_,
+ /* [1] */ &template_type_1_,
+ /* [2] */ &Bool_,
+ /* [3] */ &Af_,
+ /* [4] */ &Ai_,
+ /* [5] */ &I32_,
+ /* [6] */ &U32_,
+ /* [7] */ &F32_,
+ /* [8] */ &Vec2_,
+ /* [9] */ &Vec3_,
+ /* [10] */ &Vec4_,
+ /* [11] */ &Mat2X2_,
+ /* [12] */ &Mat2X3_,
+ /* [13] */ &Mat2X4_,
+ /* [14] */ &Mat3X2_,
+ /* [15] */ &Mat3X3_,
+ /* [16] */ &Mat3X4_,
+ /* [17] */ &Mat4X2_,
+ /* [18] */ &Mat4X3_,
+ /* [19] */ &Mat4X4_,
+ /* [20] */ &Vec_,
+ /* [21] */ &Mat_,
+ /* [22] */ &Ptr_,
+ /* [23] */ &Atomic_,
+ /* [24] */ &Array_,
+ /* [25] */ &Sampler_,
+ /* [26] */ &SamplerComparison_,
+ /* [27] */ &Texture1D_,
+ /* [28] */ &Texture2D_,
+ /* [29] */ &Texture2DArray_,
+ /* [30] */ &Texture3D_,
+ /* [31] */ &TextureCube_,
+ /* [32] */ &TextureCubeArray_,
+ /* [33] */ &TextureMultisampled2D_,
+ /* [34] */ &TextureDepth2D_,
+ /* [35] */ &TextureDepth2DArray_,
+ /* [36] */ &TextureDepthCube_,
+ /* [37] */ &TextureDepthCubeArray_,
+ /* [38] */ &TextureDepthMultisampled2D_,
+ /* [39] */ &TextureStorage1D_,
+ /* [40] */ &TextureStorage2D_,
+ /* [41] */ &TextureStorage2DArray_,
+ /* [42] */ &TextureStorage3D_,
+ /* [43] */ &TextureExternal_,
+ /* [44] */ &ModfResult_,
+ /* [45] */ &ModfResultVec_,
+ /* [46] */ &FrexpResult_,
+ /* [47] */ &FrexpResultVec_,
+ /* [48] */ &AtomicCompareExchangeResult_,
+ /* [49] */ &Fiu32_,
+ /* [50] */ &Fi32_,
+ /* [51] */ &Iu32_,
+ /* [52] */ &Scalar_,
+ /* [53] */ &AbstractOrScalar_,
+ /* [54] */ &AfF32_,
+ /* [55] */ &ScalarNoF32_,
+ /* [56] */ &ScalarNoI32_,
+ /* [57] */ &ScalarNoU32_,
+ /* [58] */ &ScalarNoBool_,
+ };
+
+ /// The template numbers, and number matchers
+ NumberMatcher const* const number[12] = {
+ /* [0] */ &template_number_0_,
+ /* [1] */ &template_number_1_,
+ /* [2] */ &template_number_2_,
+ /* [3] */ &F32TexelFormat_,
+ /* [4] */ &I32TexelFormat_,
+ /* [5] */ &U32TexelFormat_,
+ /* [6] */ &WriteOnly_,
+ /* [7] */ &FunctionPrivateWorkgroup_,
+ /* [8] */ &WorkgroupOrStorage_,
+ /* [9] */ &Storage_,
+ /* [10] */ &Write_,
+ /* [11] */ &ReadWrite_,
+ };
+};
+
+Matchers::Matchers() = default;
+Matchers::~Matchers() = default;
+
+constexpr MatcherIndex kMatcherIndices[] = {
+ /* [0] */ 22,
+ /* [1] */ 0,
+ /* [2] */ 23,
+ /* [3] */ 0,
+ /* [4] */ 11,
+ /* [5] */ 7,
+ /* [6] */ 22,
+ /* [7] */ 9,
+ /* [8] */ 24,
+ /* [9] */ 0,
+ /* [10] */ 0,
+ /* [11] */ 21,
+ /* [12] */ 0,
+ /* [13] */ 1,
+ /* [14] */ 7,
+ /* [15] */ 21,
+ /* [16] */ 0,
+ /* [17] */ 0,
+ /* [18] */ 7,
+ /* [19] */ 21,
+ /* [20] */ 0,
+ /* [21] */ 2,
+ /* [22] */ 7,
+ /* [23] */ 21,
+ /* [24] */ 1,
+ /* [25] */ 0,
+ /* [26] */ 7,
+ /* [27] */ 21,
+ /* [28] */ 1,
+ /* [29] */ 2,
+ /* [30] */ 7,
+ /* [31] */ 20,
+ /* [32] */ 0,
+ /* [33] */ 7,
+ /* [34] */ 41,
+ /* [35] */ 0,
+ /* [36] */ 1,
+ /* [37] */ 20,
+ /* [38] */ 0,
+ /* [39] */ 0,
+ /* [40] */ 20,
+ /* [41] */ 0,
+ /* [42] */ 2,
+ /* [43] */ 42,
+ /* [44] */ 5,
+ /* [45] */ 10,
+ /* [46] */ 7,
+ /* [47] */ 41,
+ /* [48] */ 5,
+ /* [49] */ 10,
+ /* [50] */ 0,
+ /* [51] */ 40,
+ /* [52] */ 5,
+ /* [53] */ 10,
+ /* [54] */ 1,
+ /* [55] */ 39,
+ /* [56] */ 5,
+ /* [57] */ 10,
+ /* [58] */ 5,
+ /* [59] */ 42,
+ /* [60] */ 4,
+ /* [61] */ 10,
+ /* [62] */ 6,
+ /* [63] */ 41,
+ /* [64] */ 4,
+ /* [65] */ 10,
+ /* [66] */ 2,
+ /* [67] */ 40,
+ /* [68] */ 4,
+ /* [69] */ 10,
+ /* [70] */ 39,
+ /* [71] */ 4,
+ /* [72] */ 10,
+ /* [73] */ 42,
+ /* [74] */ 3,
+ /* [75] */ 10,
+ /* [76] */ 20,
+ /* [77] */ 1,
+ /* [78] */ 7,
+ /* [79] */ 42,
+ /* [80] */ 0,
+ /* [81] */ 1,
+ /* [82] */ 40,
+ /* [83] */ 0,
+ /* [84] */ 1,
+ /* [85] */ 39,
+ /* [86] */ 0,
+ /* [87] */ 1,
+ /* [88] */ 41,
+ /* [89] */ 3,
+ /* [90] */ 10,
+ /* [91] */ 40,
+ /* [92] */ 3,
+ /* [93] */ 10,
+ /* [94] */ 39,
+ /* [95] */ 3,
+ /* [96] */ 10,
+ /* [97] */ 20,
+ /* [98] */ 0,
+ /* [99] */ 5,
+ /* [100] */ 20,
+ /* [101] */ 0,
+ /* [102] */ 6,
+ /* [103] */ 8,
+ /* [104] */ 7,
+ /* [105] */ 8,
+ /* [106] */ 0,
+ /* [107] */ 8,
+ /* [108] */ 1,
+ /* [109] */ 8,
+ /* [110] */ 5,
+ /* [111] */ 8,
+ /* [112] */ 6,
+ /* [113] */ 8,
+ /* [114] */ 2,
+ /* [115] */ 9,
+ /* [116] */ 0,
+ /* [117] */ 45,
+ /* [118] */ 0,
+ /* [119] */ 9,
+ /* [120] */ 1,
+ /* [121] */ 9,
+ /* [122] */ 7,
+ /* [123] */ 9,
+ /* [124] */ 5,
+ /* [125] */ 9,
+ /* [126] */ 6,
+ /* [127] */ 9,
+ /* [128] */ 2,
+ /* [129] */ 27,
+ /* [130] */ 0,
+ /* [131] */ 28,
+ /* [132] */ 0,
+ /* [133] */ 29,
+ /* [134] */ 0,
+ /* [135] */ 47,
+ /* [136] */ 0,
+ /* [137] */ 19,
+ /* [138] */ 0,
+ /* [139] */ 30,
+ /* [140] */ 0,
+ /* [141] */ 31,
+ /* [142] */ 0,
+ /* [143] */ 32,
+ /* [144] */ 0,
+ /* [145] */ 33,
+ /* [146] */ 0,
+ /* [147] */ 11,
+ /* [148] */ 0,
+ /* [149] */ 12,
+ /* [150] */ 7,
+ /* [151] */ 12,
+ /* [152] */ 0,
+ /* [153] */ 13,
+ /* [154] */ 7,
+ /* [155] */ 13,
+ /* [156] */ 0,
+ /* [157] */ 14,
+ /* [158] */ 7,
+ /* [159] */ 14,
+ /* [160] */ 0,
+ /* [161] */ 15,
+ /* [162] */ 7,
+ /* [163] */ 15,
+ /* [164] */ 0,
+ /* [165] */ 16,
+ /* [166] */ 7,
+ /* [167] */ 16,
+ /* [168] */ 0,
+ /* [169] */ 17,
+ /* [170] */ 7,
+ /* [171] */ 17,
+ /* [172] */ 0,
+ /* [173] */ 48,
+ /* [174] */ 0,
+ /* [175] */ 18,
+ /* [176] */ 7,
+ /* [177] */ 18,
+ /* [178] */ 0,
+ /* [179] */ 27,
+ /* [180] */ 7,
+ /* [181] */ 28,
+ /* [182] */ 7,
+ /* [183] */ 29,
+ /* [184] */ 7,
+ /* [185] */ 19,
+ /* [186] */ 7,
+ /* [187] */ 30,
+ /* [188] */ 7,
+ /* [189] */ 31,
+ /* [190] */ 7,
+ /* [191] */ 32,
+ /* [192] */ 7,
+ /* [193] */ 25,
+ /* [194] */ 26,
+ /* [195] */ 37,
+ /* [196] */ 36,
+ /* [197] */ 35,
+ /* [198] */ 34,
+ /* [199] */ 43,
+ /* [200] */ 38,
+ /* [201] */ 44,
+ /* [202] */ 46,
+};
+
+// Assert that the MatcherIndex is big enough to index all the matchers, plus
+// kNoMatcher.
+static_assert(static_cast<int>(sizeof(kMatcherIndices) / sizeof(kMatcherIndices[0])) <
+ static_cast<int>(std::numeric_limits<MatcherIndex>::max() - 1),
+ "MatcherIndex is not large enough to index kMatcherIndices");
+
+constexpr ParameterInfo kParameters[] = {
+ {
+ /* [0] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [1] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [2] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [3] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [4] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [5] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [6] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [7] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [8] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [9] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [10] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [11] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [12] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [13] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [14] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [15] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [16] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [17] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [18] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [19] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [20] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [21] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [22] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [23] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [24] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [25] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [26] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [27] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [28] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [29] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [30] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [31] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [32] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [33] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [34] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [35] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [36] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [37] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [38] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [39] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [40] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [41] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [42] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [43] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [44] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [45] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [46] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [47] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [48] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [49] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [50] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [51] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [52] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [53] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [54] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [55] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [56] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [57] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [58] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [59] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [60] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [61] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [62] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [63] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [64] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [65] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [66] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [67] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [68] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [69] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [70] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [71] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [72] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [73] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [74] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [75] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [76] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [77] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [78] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [79] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [80] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [81] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [82] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [83] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [84] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [85] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [86] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [87] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [88] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [89] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [90] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [91] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [92] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [93] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [94] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [95] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [96] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [97] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [98] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [99] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [100] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [101] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [102] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [103] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [104] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [105] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [106] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [107] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [108] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [109] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [110] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [111] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [112] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [113] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [114] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [115] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [116] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [117] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [118] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [119] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [120] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[191],
+ },
+ {
+ /* [121] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [122] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [123] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [124] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [125] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [126] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [127] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [128] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [129] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [130] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [131] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [132] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [133] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [134] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [135] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [136] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [137] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [138] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [139] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [140] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [141] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [142] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [143] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [144] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [145] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [146] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [147] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [148] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [149] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [150] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [151] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [152] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [153] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [154] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [155] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [156] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [157] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [158] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [159] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [160] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [161] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [162] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [163] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [164] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [165] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[191],
+ },
+ {
+ /* [166] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [167] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [168] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [169] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [170] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [171] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [172] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [173] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [174] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [175] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [176] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [177] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [178] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [179] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [180] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [181] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [182] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [183] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [184] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [185] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [186] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [187] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [188] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [189] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [190] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [191] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [192] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [193] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [194] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [195] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [196] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [197] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [198] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [199] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [200] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [201] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [202] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [203] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [204] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [205] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [206] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [207] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [208] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [209] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [210] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[189],
+ },
+ {
+ /* [211] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [212] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [213] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [214] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [215] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [216] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [217] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [218] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [219] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [220] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [221] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [222] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [223] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [224] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [225] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [226] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [227] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [228] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [229] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [230] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [231] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[143],
+ },
+ {
+ /* [232] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [233] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [234] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [235] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [236] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [237] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [238] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [239] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [240] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [241] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [242] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [243] */
+ /* usage */ ParameterUsage::kDdx,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [244] */
+ /* usage */ ParameterUsage::kDdy,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [245] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [246] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [247] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [248] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [249] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [250] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [251] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [252] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [253] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [254] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [255] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [256] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [257] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [258] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [259] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [260] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [261] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [262] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [263] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [264] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [265] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [266] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [267] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [268] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [269] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [270] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[191],
+ },
+ {
+ /* [271] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [272] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [273] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [274] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [275] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [276] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [277] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [278] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [279] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [280] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [281] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [282] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [283] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [284] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [285] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [286] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [287] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [288] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [289] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [290] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [291] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [292] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [293] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [294] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [295] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [296] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [297] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [298] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [299] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [300] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [301] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [302] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [303] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [304] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [305] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [306] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [307] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[189],
+ },
+ {
+ /* [308] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [309] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [310] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [311] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [312] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [313] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [314] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [315] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [316] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [317] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [318] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [319] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [320] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [321] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [322] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [323] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [324] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [325] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [326] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [327] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [328] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [329] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [330] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [331] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [332] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [333] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [334] */
+ /* usage */ ParameterUsage::kBias,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [335] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [336] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [337] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [338] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [339] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[189],
+ },
+ {
+ /* [340] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [341] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [342] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [343] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [344] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [345] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [346] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [347] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [348] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [349] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [350] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [351] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[191],
+ },
+ {
+ /* [352] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [353] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [354] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [355] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [356] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [357] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [358] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [359] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [360] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [361] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [362] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [363] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[183],
+ },
+ {
+ /* [364] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [365] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [366] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [367] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [368] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [369] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [370] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [371] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [372] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [373] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [374] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [375] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [376] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [377] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [378] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [379] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [380] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [381] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [382] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [383] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[88],
+ },
+ {
+ /* [384] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [385] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [386] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+ {
+ /* [387] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[63],
+ },
+ {
+ /* [388] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [389] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [390] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[57],
+ },
+ {
+ /* [391] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [392] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[194],
+ },
+ {
+ /* [393] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [394] */
+ /* usage */ ParameterUsage::kDepthRef,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [395] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [396] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [397] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [398] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [399] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[47],
+ },
+ {
+ /* [400] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [401] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [402] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[61],
+ },
+ {
+ /* [403] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [404] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [405] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [406] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [407] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [408] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [409] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [410] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [411] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [412] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [413] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [414] */
+ /* usage */ ParameterUsage::kArrayIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [415] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [416] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [417] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [418] */
+ /* usage */ ParameterUsage::kOffset,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [419] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [420] */
+ /* usage */ ParameterUsage::kY,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [421] */
+ /* usage */ ParameterUsage::kZ,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [422] */
+ /* usage */ ParameterUsage::kW,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [423] */
+ /* usage */ ParameterUsage::kComponent,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [424] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[141],
+ },
+ {
+ /* [425] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [426] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [427] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [428] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [429] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [430] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [431] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [432] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [433] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [434] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [435] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [436] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [437] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [438] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [439] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [440] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [441] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [442] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [443] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [444] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [445] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [446] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [447] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [448] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [449] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [450] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [451] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [452] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [453] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [454] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [455] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [456] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[199],
+ },
+ {
+ /* [457] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [458] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [459] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [460] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [461] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [462] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [463] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [464] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [465] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [466] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [467] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [468] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [469] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [470] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [471] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [472] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [473] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [474] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [475] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [476] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [477] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[94],
+ },
+ {
+ /* [478] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [479] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+ {
+ /* [480] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[91],
+ },
+ {
+ /* [481] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [482] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+ {
+ /* [483] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[73],
+ },
+ {
+ /* [484] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [485] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+ {
+ /* [486] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[70],
+ },
+ {
+ /* [487] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [488] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[57],
+ },
+ {
+ /* [489] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[67],
+ },
+ {
+ /* [490] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [491] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[57],
+ },
+ {
+ /* [492] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[59],
+ },
+ {
+ /* [493] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [494] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[57],
+ },
+ {
+ /* [495] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [496] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [497] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [498] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [499] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [500] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [501] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [502] */
+ /* usage */ ParameterUsage::kY,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [503] */
+ /* usage */ ParameterUsage::kZw,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [504] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [505] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [506] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [507] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [508] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [509] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [510] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [511] */
+ /* usage */ ParameterUsage::kYz,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [512] */
+ /* usage */ ParameterUsage::kW,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [513] */
+ /* usage */ ParameterUsage::kXy,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [514] */
+ /* usage */ ParameterUsage::kZ,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [515] */
+ /* usage */ ParameterUsage::kW,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [516] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[55],
+ },
+ {
+ /* [517] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [518] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[61],
+ },
+ {
+ /* [519] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[51],
+ },
+ {
+ /* [520] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [521] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[61],
+ },
+ {
+ /* [522] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [523] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [524] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [525] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [526] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [527] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [528] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [529] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [530] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [531] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [532] */
+ /* usage */ ParameterUsage::kY,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [533] */
+ /* usage */ ParameterUsage::kZ,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [534] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [535] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [536] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [537] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[43],
+ },
+ {
+ /* [538] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [539] */
+ /* usage */ ParameterUsage::kValue,
+ /* matcher indices */ &kMatcherIndices[61],
+ },
+ {
+ /* [540] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[129],
+ },
+ {
+ /* [541] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [542] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [543] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [544] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [545] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [546] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [547] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [548] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [549] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [550] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [551] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [552] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [553] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [554] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [555] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [556] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [557] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [558] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [559] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [560] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [561] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [562] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [563] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [564] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[189],
+ },
+ {
+ /* [565] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [566] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [567] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [568] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [569] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [570] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[187],
+ },
+ {
+ /* [571] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [572] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [573] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[139],
+ },
+ {
+ /* [574] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[123],
+ },
+ {
+ /* [575] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [576] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[145],
+ },
+ {
+ /* [577] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [578] */
+ /* usage */ ParameterUsage::kSampleIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [579] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [580] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [581] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [582] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[181],
+ },
+ {
+ /* [583] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [584] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [585] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[179],
+ },
+ {
+ /* [586] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [587] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [588] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[200],
+ },
+ {
+ /* [589] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [590] */
+ /* usage */ ParameterUsage::kSampleIndex,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [591] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [592] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [593] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [594] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [595] */
+ /* usage */ ParameterUsage::kSampler,
+ /* matcher indices */ &kMatcherIndices[193],
+ },
+ {
+ /* [596] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [597] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [598] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [599] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [600] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [601] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [602] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [603] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [604] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [605] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [606] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [607] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [608] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [609] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [610] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [611] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[129],
+ },
+ {
+ /* [612] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [613] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [614] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [615] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [616] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [617] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [618] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [619] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [620] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [621] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [622] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [623] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[139],
+ },
+ {
+ /* [624] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [625] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [626] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [627] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[141],
+ },
+ {
+ /* [628] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [629] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [630] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [631] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[143],
+ },
+ {
+ /* [632] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [633] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [634] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [635] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [636] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [637] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [638] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [639] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [640] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [641] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [642] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [643] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [644] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [645] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [646] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [647] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [648] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [649] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [650] */
+ /* usage */ ParameterUsage::kLevel,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [651] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [652] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [653] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [654] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [655] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [656] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [657] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [658] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [659] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [660] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [661] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [662] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [663] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[19],
+ },
+ {
+ /* [664] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[23],
+ },
+ {
+ /* [665] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[76],
+ },
+ {
+ /* [666] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [667] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [668] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [669] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [670] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [671] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [672] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [673] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [674] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [675] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [676] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [677] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [678] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [679] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [680] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [681] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [682] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [683] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [684] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [685] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [686] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [687] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [688] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [689] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [690] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [691] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [692] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [693] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [694] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [695] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [696] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [697] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [698] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [699] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [700] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [701] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [702] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [703] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [704] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [705] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [706] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [707] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [708] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [709] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [710] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [711] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [712] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [713] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [714] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [715] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [716] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [717] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [718] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [719] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [720] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [721] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [722] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [723] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [724] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [725] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [726] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [727] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[199],
+ },
+ {
+ /* [728] */
+ /* usage */ ParameterUsage::kCoords,
+ /* matcher indices */ &kMatcherIndices[109],
+ },
+ {
+ /* [729] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [730] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [731] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [732] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [733] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [734] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [735] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [736] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [737] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [738] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [739] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [740] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [741] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [742] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [743] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [744] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [745] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [746] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [747] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [748] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [749] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [750] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [751] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [752] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [753] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [754] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[100],
+ },
+ {
+ /* [755] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [756] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [757] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [758] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[100],
+ },
+ {
+ /* [759] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [760] */
+ /* usage */ ParameterUsage::kY,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [761] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [762] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [763] */
+ /* usage */ ParameterUsage::kXy,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [764] */
+ /* usage */ ParameterUsage::kZ,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [765] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [766] */
+ /* usage */ ParameterUsage::kYz,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [767] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [768] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [769] */
+ /* usage */ ParameterUsage::kXy,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [770] */
+ /* usage */ ParameterUsage::kZw,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [771] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [772] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [773] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [774] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [775] */
+ /* usage */ ParameterUsage::kXyz,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [776] */
+ /* usage */ ParameterUsage::kW,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [777] */
+ /* usage */ ParameterUsage::kX,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [778] */
+ /* usage */ ParameterUsage::kZyw,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [779] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [780] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [781] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [782] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [783] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [784] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [785] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [786] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [787] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [788] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [789] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [790] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [791] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [792] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[121],
+ },
+ {
+ /* [793] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [794] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [795] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [796] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [797] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [798] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [799] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [800] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [801] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [802] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[97],
+ },
+ {
+ /* [803] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [804] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [805] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [806] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [807] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [808] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [809] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[15],
+ },
+ {
+ /* [810] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [811] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [812] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [813] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [814] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [815] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [816] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [817] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [818] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [819] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [820] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [821] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [822] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [823] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [824] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [825] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [826] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [827] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [828] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [829] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [830] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [831] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [832] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [833] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [834] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [835] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [836] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [837] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [838] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [839] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [840] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[34],
+ },
+ {
+ /* [841] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [842] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [843] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [844] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[200],
+ },
+ {
+ /* [845] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[145],
+ },
+ {
+ /* [846] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [847] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[0],
+ },
+ {
+ /* [848] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [849] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [850] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [851] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[143],
+ },
+ {
+ /* [852] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[141],
+ },
+ {
+ /* [853] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[139],
+ },
+ {
+ /* [854] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [855] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [856] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[129],
+ },
+ {
+ /* [857] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[34],
+ },
+ {
+ /* [858] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [859] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [860] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [861] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [862] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [863] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [864] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [865] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [866] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[143],
+ },
+ {
+ /* [867] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [868] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [869] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [870] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [871] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [872] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [873] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [874] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [875] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [876] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [877] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [878] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [879] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [880] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [881] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [882] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [883] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[199],
+ },
+ {
+ /* [884] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[79],
+ },
+ {
+ /* [885] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [886] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[82],
+ },
+ {
+ /* [887] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[85],
+ },
+ {
+ /* [888] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[200],
+ },
+ {
+ /* [889] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[195],
+ },
+ {
+ /* [890] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[196],
+ },
+ {
+ /* [891] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[197],
+ },
+ {
+ /* [892] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[198],
+ },
+ {
+ /* [893] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[145],
+ },
+ {
+ /* [894] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[143],
+ },
+ {
+ /* [895] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[141],
+ },
+ {
+ /* [896] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[139],
+ },
+ {
+ /* [897] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[133],
+ },
+ {
+ /* [898] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[131],
+ },
+ {
+ /* [899] */
+ /* usage */ ParameterUsage::kTexture,
+ /* matcher indices */ &kMatcherIndices[129],
+ },
+ {
+ /* [900] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [901] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [902] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [903] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [904] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [905] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [906] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [907] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [908] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[11],
+ },
+ {
+ /* [909] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [910] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [911] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [912] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [913] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [914] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [915] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [916] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [917] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [918] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [919] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[185],
+ },
+ {
+ /* [920] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[44],
+ },
+ {
+ /* [921] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [922] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [923] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [924] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [925] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [926] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [927] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [928] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [929] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [930] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [931] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[175],
+ },
+ {
+ /* [932] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[105],
+ },
+ {
+ /* [933] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [934] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [935] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[107],
+ },
+ {
+ /* [936] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[107],
+ },
+ {
+ /* [937] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[107],
+ },
+ {
+ /* [938] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[107],
+ },
+ {
+ /* [939] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [940] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[115],
+ },
+ {
+ /* [941] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [942] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [943] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [944] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [945] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[119],
+ },
+ {
+ /* [946] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[119],
+ },
+ {
+ /* [947] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[119],
+ },
+ {
+ /* [948] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[119],
+ },
+ {
+ /* [949] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [950] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[49],
+ },
+ {
+ /* [951] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [952] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [953] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[37],
+ },
+ {
+ /* [954] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [955] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [956] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [957] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [958] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+ {
+ /* [959] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[53],
+ },
+ {
+ /* [960] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[53],
+ },
+ {
+ /* [961] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[53],
+ },
+ {
+ /* [962] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[53],
+ },
+ {
+ /* [963] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [964] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[4],
+ },
+ {
+ /* [965] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [966] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [967] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [968] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[169],
+ },
+ {
+ /* [969] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[149],
+ },
+ {
+ /* [970] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [971] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [972] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [973] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [974] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[153],
+ },
+ {
+ /* [975] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [976] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[5],
+ },
+ {
+ /* [977] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[103],
+ },
+ {
+ /* [978] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[21],
+ },
+ {
+ /* [979] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[157],
+ },
+ {
+ /* [980] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [981] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[6],
+ },
+ {
+ /* [982] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[31],
+ },
+ {
+ /* [983] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [984] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[161],
+ },
+ {
+ /* [985] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[1],
+ },
+ {
+ /* [986] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[40],
+ },
+ {
+ /* [987] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[62],
+ },
+ {
+ /* [988] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[165],
+ },
+ {
+ /* [989] */
+ /* usage */ ParameterUsage::kNone,
+ /* matcher indices */ &kMatcherIndices[45],
+ },
+};
+
+constexpr TemplateTypeInfo kTemplateTypes[] = {
+ {
+ /* [0] */
+ /* name */ "T",
+ /* matcher index */ 2,
+ },
+ {
+ /* [1] */
+ /* name */ "U",
+ /* matcher index */ 58,
+ },
+ {
+ /* [2] */
+ /* name */ "T",
+ /* matcher index */ 7,
+ },
+ {
+ /* [3] */
+ /* name */ "U",
+ /* matcher index */ 55,
+ },
+ {
+ /* [4] */
+ /* name */ "T",
+ /* matcher index */ 5,
+ },
+ {
+ /* [5] */
+ /* name */ "U",
+ /* matcher index */ 56,
+ },
+ {
+ /* [6] */
+ /* name */ "T",
+ /* matcher index */ 6,
+ },
+ {
+ /* [7] */
+ /* name */ "U",
+ /* matcher index */ 57,
+ },
+ {
+ /* [8] */
+ /* name */ "T",
+ /* matcher index */ 49,
+ },
+ {
+ /* [9] */
+ /* name */ "f32",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [10] */
+ /* name */ "T",
+ /* matcher index */ 54,
+ },
+ {
+ /* [11] */
+ /* name */ "T",
+ /* matcher index */ 51,
+ },
+ {
+ /* [12] */
+ /* name */ "T",
+ /* matcher index */ 53,
+ },
+ {
+ /* [13] */
+ /* name */ "T",
+ /* matcher index */ 52,
+ },
+ {
+ /* [14] */
+ /* name */ "T",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [15] */
+ /* name */ "T",
+ /* matcher index */ 58,
+ },
+ {
+ /* [16] */
+ /* name */ "T",
+ /* matcher index */ 55,
+ },
+ {
+ /* [17] */
+ /* name */ "T",
+ /* matcher index */ 57,
+ },
+ {
+ /* [18] */
+ /* name */ "T",
+ /* matcher index */ 56,
+ },
+ {
+ /* [19] */
+ /* name */ "T",
+ /* matcher index */ 50,
+ },
+};
+
+constexpr TemplateNumberInfo kTemplateNumbers[] = {
+ {
+ /* [0] */
+ /* name */ "K",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [1] */
+ /* name */ "C",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [2] */
+ /* name */ "R",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [3] */
+ /* name */ "F",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [4] */
+ /* name */ "A",
+ /* matcher index */ 6,
+ },
+ {
+ /* [5] */
+ /* name */ "M",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [6] */
+ /* name */ "N",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [7] */
+ /* name */ "M",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [8] */
+ /* name */ "A",
+ /* matcher index */ kNoMatcher,
+ },
+ {
+ /* [9] */
+ /* name */ "S",
+ /* matcher index */ 8,
+ },
+};
+
+constexpr OverloadInfo kOverloads[] = {
+ {
+ /* [0] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[899],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [1] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[611],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [2] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[898],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [3] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[615],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [4] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[897],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [5] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[619],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [6] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[896],
+ /* return matcher indices */ &kMatcherIndices[123],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [7] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[623],
+ /* return matcher indices */ &kMatcherIndices[123],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [8] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[895],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [9] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[627],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [10] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[894],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [11] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[631],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [12] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[893],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [13] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[892],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [14] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[637],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [15] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[891],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [16] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[641],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [17] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[890],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [18] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[645],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [19] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[889],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [20] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[649],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [21] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[888],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [22] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[3],
+ /* parameters */ &kParameters[887],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [23] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[3],
+ /* parameters */ &kParameters[886],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [24] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[3],
+ /* parameters */ &kParameters[840],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [25] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[3],
+ /* parameters */ &kParameters[884],
+ /* return matcher indices */ &kMatcherIndices[123],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [26] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[883],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [27] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[585],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [28] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[582],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [29] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[367],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [30] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[363],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [31] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[215],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [32] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[570],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [33] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[355],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [34] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[564],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [35] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[351],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [36] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[546],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [37] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[347],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [38] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[343],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [39] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[200],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [40] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[534],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [41] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[335],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [42] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[319],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [43] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[195],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [44] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[190],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [45] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[126],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [46] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[327],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [47] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[175],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [48] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[339],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [49] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[165],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [50] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[371],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [51] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[155],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [52] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[150],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [53] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[72],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [54] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[379],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [55] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[170],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [56] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[456],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [57] */
+ /* num parameters */ 0,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [58] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[950],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [59] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[951],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [60] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[419],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [61] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[513],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [62] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[510],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [63] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[501],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [64] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[769],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [65] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[775],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [66] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[777],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [67] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[2],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[959],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [68] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[4],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[960],
+ /* return matcher indices */ &kMatcherIndices[57],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [69] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[6],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[961],
+ /* return matcher indices */ &kMatcherIndices[61],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [70] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[0],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[962],
+ /* return matcher indices */ &kMatcherIndices[65],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [71] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[359],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [72] */
+ /* num parameters */ 5,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[255],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [73] */
+ /* num parameters */ 5,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[235],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [74] */
+ /* num parameters */ 6,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[96],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [75] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[423],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [76] */
+ /* num parameters */ 5,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[230],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [77] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[447],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [78] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[415],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [79] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[411],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [80] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[225],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [81] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[594],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [82] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[395],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [83] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[477],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [84] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[480],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [85] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[383],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [86] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[483],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [87] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[486],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [88] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[489],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [89] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[387],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [90] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[492],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [91] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[516],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [92] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[519],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [93] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[399],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [94] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[537],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [95] */
+ /* num parameters */ 0,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [96] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[940],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [97] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[941],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [98] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[531],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [99] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[763],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [100] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[765],
+ /* return matcher indices */ &kMatcherIndices[115],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [101] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[2],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[945],
+ /* return matcher indices */ &kMatcherIndices[121],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [102] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[4],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[946],
+ /* return matcher indices */ &kMatcherIndices[123],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [103] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[6],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[947],
+ /* return matcher indices */ &kMatcherIndices[125],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [104] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[0],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[948],
+ /* return matcher indices */ &kMatcherIndices[127],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [105] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[856],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [106] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[855],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [107] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[854],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [108] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[853],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [109] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[852],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [110] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[851],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [111] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[850],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [112] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[849],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [113] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[848],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [114] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[846],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [115] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[540],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [116] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[543],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [117] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[323],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [118] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[573],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [119] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[576],
+ /* return matcher indices */ &kMatcherIndices[49],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [120] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[579],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [121] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[407],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [122] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[588],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [123] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[727],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [124] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[685],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [125] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[681],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [126] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[679],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [127] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[673],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [128] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[671],
+ /* return matcher indices */ &kMatcherIndices[11],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [129] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[669],
+ /* return matcher indices */ &kMatcherIndices[11],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [130] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[1],
+ /* parameters */ &kParameters[667],
+ /* return matcher indices */ &kMatcherIndices[76],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [131] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[1],
+ /* parameters */ &kParameters[665],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [132] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 3,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[0],
+ /* parameters */ &kParameters[663],
+ /* return matcher indices */ &kMatcherIndices[27],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [133] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[331],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [134] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[180],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [135] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[290],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [136] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[132],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [137] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[315],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [138] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[250],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [139] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[307],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [140] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[270],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [141] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[240],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [142] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[84],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [143] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[102],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [144] */
+ /* num parameters */ 7,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[65],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [145] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[220],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [146] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[114],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [147] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[210],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [148] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[120],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [149] */
+ /* num parameters */ 0,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[105],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [150] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[932],
+ /* return matcher indices */ &kMatcherIndices[105],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [151] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[933],
+ /* return matcher indices */ &kMatcherIndices[105],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [152] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[12],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[759],
+ /* return matcher indices */ &kMatcherIndices[105],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [153] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[2],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[935],
+ /* return matcher indices */ &kMatcherIndices[103],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [154] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[4],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[936],
+ /* return matcher indices */ &kMatcherIndices[109],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [155] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[6],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[937],
+ /* return matcher indices */ &kMatcherIndices[111],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [156] */
+ /* num parameters */ 1,
+ /* num template types */ 2,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[0],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[938],
+ /* return matcher indices */ &kMatcherIndices[113],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [157] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[391],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [158] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[205],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [159] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[185],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [160] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[138],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [161] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[375],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [162] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[160],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [163] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[303],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [164] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[265],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [165] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[260],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [166] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[144],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [167] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[311],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [168] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[245],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [169] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[299],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [170] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[280],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [171] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[285],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [172] */
+ /* num parameters */ 6,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[108],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [173] */
+ /* num parameters */ 4,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[295],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [174] */
+ /* num parameters */ 5,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[275],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [175] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[149],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [176] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[969],
+ /* return matcher indices */ &kMatcherIndices[149],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [177] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[970],
+ /* return matcher indices */ &kMatcherIndices[151],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [178] */
+ /* num parameters */ 6,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[90],
+ /* return matcher indices */ &kMatcherIndices[151],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [179] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[781],
+ /* return matcher indices */ &kMatcherIndices[151],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [180] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[165],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [181] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[988],
+ /* return matcher indices */ &kMatcherIndices[165],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [182] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[983],
+ /* return matcher indices */ &kMatcherIndices[167],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [183] */
+ /* num parameters */ 12,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[16],
+ /* return matcher indices */ &kMatcherIndices[167],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [184] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[468],
+ /* return matcher indices */ &kMatcherIndices[167],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [185] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[169],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [186] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[968],
+ /* return matcher indices */ &kMatcherIndices[169],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [187] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[957],
+ /* return matcher indices */ &kMatcherIndices[171],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [188] */
+ /* num parameters */ 8,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[57],
+ /* return matcher indices */ &kMatcherIndices[171],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [189] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[431],
+ /* return matcher indices */ &kMatcherIndices[171],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [190] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[4],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [191] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[964],
+ /* return matcher indices */ &kMatcherIndices[4],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [192] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[965],
+ /* return matcher indices */ &kMatcherIndices[147],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [193] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[427],
+ /* return matcher indices */ &kMatcherIndices[147],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [194] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[779],
+ /* return matcher indices */ &kMatcherIndices[147],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [195] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[157],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [196] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[979],
+ /* return matcher indices */ &kMatcherIndices[157],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [197] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[980],
+ /* return matcher indices */ &kMatcherIndices[159],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [198] */
+ /* num parameters */ 6,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[78],
+ /* return matcher indices */ &kMatcherIndices[159],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [199] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[474],
+ /* return matcher indices */ &kMatcherIndices[159],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [200] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[153],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [201] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[974],
+ /* return matcher indices */ &kMatcherIndices[153],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [202] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[975],
+ /* return matcher indices */ &kMatcherIndices[155],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [203] */
+ /* num parameters */ 8,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[49],
+ /* return matcher indices */ &kMatcherIndices[155],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [204] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[783],
+ /* return matcher indices */ &kMatcherIndices[155],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [205] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[867],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [206] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[866],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [207] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[865],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [208] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[864],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [209] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[3],
+ /* parameters */ &kParameters[857],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [210] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[175],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [211] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[931],
+ /* return matcher indices */ &kMatcherIndices[175],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [212] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[928],
+ /* return matcher indices */ &kMatcherIndices[177],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [213] */
+ /* num parameters */ 12,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[28],
+ /* return matcher indices */ &kMatcherIndices[177],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [214] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[435],
+ /* return matcher indices */ &kMatcherIndices[177],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [215] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[695],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [216] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[693],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [217] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[691],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [218] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[689],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [219] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[687],
+ /* return matcher indices */ &kMatcherIndices[11],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [220] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[705],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [221] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[703],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [222] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[701],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [223] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[699],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [224] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[697],
+ /* return matcher indices */ &kMatcherIndices[11],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [225] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[185],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [226] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[919],
+ /* return matcher indices */ &kMatcherIndices[185],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [227] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[912],
+ /* return matcher indices */ &kMatcherIndices[137],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [228] */
+ /* num parameters */ 16,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[0],
+ /* return matcher indices */ &kMatcherIndices[137],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [229] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[443],
+ /* return matcher indices */ &kMatcherIndices[137],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [230] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[161],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [231] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[9],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[984],
+ /* return matcher indices */ &kMatcherIndices[161],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [232] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[985],
+ /* return matcher indices */ &kMatcherIndices[163],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [233] */
+ /* num parameters */ 9,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[40],
+ /* return matcher indices */ &kMatcherIndices[163],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [234] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[10],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[471],
+ /* return matcher indices */ &kMatcherIndices[163],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [235] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[633],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [236] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[629],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [237] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[625],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [238] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[621],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [239] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[653],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [240] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[651],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [241] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[647],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [242] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[643],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [243] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[661],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [244] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[659],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [245] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[657],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [246] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[655],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [247] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[617],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [248] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[613],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [249] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[609],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [250] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[607],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [251] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [252] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[920],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [253] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[18],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[921],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [254] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [255] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[929],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [256] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[15],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[930],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [257] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[522],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [258] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[525],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [259] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[528],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [260] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [261] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[923],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [262] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[17],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[924],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [263] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [264] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[926],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConstructor, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [265] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[16],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[927],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsConverter, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [266] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[459],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [267] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[462],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [268] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[465],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [269] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[858],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [270] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[859],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [271] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[871],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [272] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[872],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [273] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[869],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [274] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[870],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [275] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[439],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [276] */
+ /* num parameters */ 4,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[403],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [277] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[875],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [278] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[876],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [279] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[677],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [280] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[801],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [281] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[878],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [282] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[879],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [283] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[880],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [284] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[881],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [285] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[882],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [286] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[885],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [287] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[737],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [288] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[789],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [289] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[787],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [290] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[785],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [291] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[843],
+ /* return matcher indices */ &kMatcherIndices[202],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [292] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[868],
+ /* return matcher indices */ &kMatcherIndices[135],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [293] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[973],
+ /* return matcher indices */ &kMatcherIndices[201],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [294] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[905],
+ /* return matcher indices */ &kMatcherIndices[117],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [295] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[841],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [296] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[842],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [297] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[561],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [298] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[567],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [299] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[873],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [300] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[874],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [301] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[838],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [302] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[839],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [303] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[836],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [304] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[837],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [305] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[834],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [306] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[835],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [307] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[773],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [308] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[771],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [309] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[956],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [310] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[955],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [311] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[495],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [312] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[498],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [313] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[831],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [314] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[833],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [315] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[954],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [316] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[953],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [317] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[944],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [318] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[943],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [319] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[829],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [320] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[830],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [321] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[942],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [322] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[934],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [323] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[918],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [324] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[917],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [325] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[916],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [326] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[915],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [327] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[549],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [328] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[552],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [329] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[555],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline, OverloadFlag::kIsDeprecated),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [330] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[558],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline, OverloadFlag::kIsDeprecated),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [331] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[914],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [332] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[913],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [333] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[745],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [334] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[743],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [335] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[826],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [336] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[827],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [337] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[911],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [338] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[877],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [339] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[910],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [340] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[909],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [341] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[755],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [342] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[757],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [343] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[907],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [344] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[906],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [345] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[751],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [346] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[753],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [347] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[824],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [348] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[825],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [349] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[739],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [350] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[741],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [351] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[733],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [352] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[735],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [353] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[707],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [354] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[729],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [355] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[675],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [356] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[683],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [357] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[812],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [358] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[823],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [359] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[807],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [360] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[808],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [361] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[805],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [362] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[806],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [363] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[803],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [364] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[804],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [365] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[793],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [366] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[795],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [367] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[845],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [368] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[844],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [369] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[811],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [370] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[810],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [371] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[814],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [372] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[813],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [373] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[601],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [374] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[13],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[599],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [375] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[816],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [376] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[815],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [377] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[818],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [378] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[817],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [379] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[820],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [380] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[819],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [381] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[822],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [382] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[821],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [383] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[453],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [384] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[450],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [385] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[832],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [386] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[828],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [387] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[731],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [388] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[761],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [389] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[639],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [390] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[635],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [391] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[966],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [392] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[952],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [393] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[976],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [394] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[971],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [395] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[978],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [396] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[986],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [397] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[939],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [398] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[949],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [399] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[922],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [400] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[925],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [401] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[19],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[862],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [402] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[19],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[863],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [403] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[860],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [404] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[861],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [405] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[963],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [406] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[900],
+ /* return matcher indices */ &kMatcherIndices[37],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [407] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[747],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [408] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[749],
+ /* return matcher indices */ &kMatcherIndices[40],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [409] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[904],
+ /* return matcher indices */ &kMatcherIndices[103],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [410] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[711],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [411] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[713],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [412] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[715],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [413] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[717],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [414] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[719],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [415] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[721],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [416] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[723],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [417] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[725],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [418] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[847],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [419] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[605],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [420] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[603],
+ /* return matcher indices */ &kMatcherIndices[21],
+ /* flags */ OverloadFlags(OverloadFlag::kIsOperator, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [421] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[967],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [422] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [423] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[901],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [424] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[902],
+ /* return matcher indices */ &kMatcherIndices[45],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [425] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[903],
+ /* return matcher indices */ &kMatcherIndices[103],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [426] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[709],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [427] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[987],
+ /* return matcher indices */ &kMatcherIndices[103],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [428] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 2,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[5],
+ /* parameters */ &kParameters[908],
+ /* return matcher indices */ &kMatcherIndices[23],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [429] */
+ /* num parameters */ 0,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[990],
+ /* return matcher indices */ nullptr,
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [430] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[507],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [431] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[767],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [432] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[989],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [433] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[958],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [434] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[972],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [435] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[977],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [436] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[982],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [437] */
+ /* num parameters */ 3,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[504],
+ /* return matcher indices */ &kMatcherIndices[31],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [438] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[597],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [439] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[799],
+ /* return matcher indices */ &kMatcherIndices[44],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [440] */
+ /* num parameters */ 2,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[8],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[797],
+ /* return matcher indices */ &kMatcherIndices[1],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [441] */
+ /* num parameters */ 1,
+ /* num template types */ 0,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[6],
+ /* parameters */ &kParameters[809],
+ /* return matcher indices */ &kMatcherIndices[5],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [442] */
+ /* num parameters */ 2,
+ /* num template types */ 0,
+ /* num template numbers */ 0,
+ /* template types */ &kTemplateTypes[20],
+ /* template numbers */ &kTemplateNumbers[10],
+ /* parameters */ &kParameters[791],
+ /* return matcher indices */ &kMatcherIndices[121],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [443] */
+ /* num parameters */ 1,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[14],
+ /* template numbers */ &kTemplateNumbers[8],
+ /* parameters */ &kParameters[981],
+ /* return matcher indices */ &kMatcherIndices[62],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+ {
+ /* [444] */
+ /* num parameters */ 3,
+ /* num template types */ 1,
+ /* num template numbers */ 1,
+ /* template types */ &kTemplateTypes[11],
+ /* template numbers */ &kTemplateNumbers[9],
+ /* parameters */ &kParameters[591],
+ /* return matcher indices */ &kMatcherIndices[173],
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* const eval */ nullptr,
+ },
+};
+
+constexpr IntrinsicInfo kBuiltins[] = {
+ {
+ /* [0] */
+ /* fn abs<T : fiu32>(T) -> T */
+ /* fn abs<N : num, T : fiu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[405],
+ },
+ {
+ /* [1] */
+ /* fn acos(f32) -> f32 */
+ /* fn acos<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[399],
+ },
+ {
+ /* [2] */
+ /* fn all(bool) -> bool */
+ /* fn all<N : num>(vec<N, bool>) -> bool */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[397],
+ },
+ {
+ /* [3] */
+ /* fn any(bool) -> bool */
+ /* fn any<N : num>(vec<N, bool>) -> bool */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[395],
+ },
+ {
+ /* [4] */
+ /* fn arrayLength<T, A : access>(ptr<storage, array<T>, A>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[443],
+ },
+ {
+ /* [5] */
+ /* fn asin(f32) -> f32 */
+ /* fn asin<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[393],
+ },
+ {
+ /* [6] */
+ /* fn atan(f32) -> f32 */
+ /* fn atan<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[391],
+ },
+ {
+ /* [7] */
+ /* fn atan2(f32, f32) -> f32 */
+ /* fn atan2<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[387],
+ },
+ {
+ /* [8] */
+ /* fn ceil(f32) -> f32 */
+ /* fn ceil<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[385],
+ },
+ {
+ /* [9] */
+ /* fn clamp<T : fiu32>(T, T, T) -> T */
+ /* fn clamp<N : num, T : fiu32>(vec<N, T>, vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[383],
+ },
+ {
+ /* [10] */
+ /* fn cos(f32) -> f32 */
+ /* fn cos<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[381],
+ },
+ {
+ /* [11] */
+ /* fn cosh(f32) -> f32 */
+ /* fn cosh<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[379],
+ },
+ {
+ /* [12] */
+ /* fn countLeadingZeros<T : iu32>(T) -> T */
+ /* fn countLeadingZeros<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[377],
+ },
+ {
+ /* [13] */
+ /* fn countOneBits<T : iu32>(T) -> T */
+ /* fn countOneBits<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[375],
+ },
+ {
+ /* [14] */
+ /* fn countTrailingZeros<T : iu32>(T) -> T */
+ /* fn countTrailingZeros<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[371],
+ },
+ {
+ /* [15] */
+ /* fn cross(vec3<f32>, vec3<f32>) -> vec3<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[442],
+ },
+ {
+ /* [16] */
+ /* fn degrees(f32) -> f32 */
+ /* fn degrees<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[369],
+ },
+ {
+ /* [17] */
+ /* fn determinant<N : num>(mat<N, N, f32>) -> f32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[441],
+ },
+ {
+ /* [18] */
+ /* fn distance(f32, f32) -> f32 */
+ /* fn distance<N : num>(vec<N, f32>, vec<N, f32>) -> f32 */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[365],
+ },
+ {
+ /* [19] */
+ /* fn dot<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[440],
+ },
+ {
+ /* [20] */
+ /* fn dot4I8Packed(u32, u32) -> i32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[439],
+ },
+ {
+ /* [21] */
+ /* fn dot4U8Packed(u32, u32) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[438],
+ },
+ {
+ /* [22] */
+ /* fn dpdx(f32) -> f32 */
+ /* fn dpdx<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[363],
+ },
+ {
+ /* [23] */
+ /* fn dpdxCoarse(f32) -> f32 */
+ /* fn dpdxCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[361],
+ },
+ {
+ /* [24] */
+ /* fn dpdxFine(f32) -> f32 */
+ /* fn dpdxFine<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[359],
+ },
+ {
+ /* [25] */
+ /* fn dpdy(f32) -> f32 */
+ /* fn dpdy<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[357],
+ },
+ {
+ /* [26] */
+ /* fn dpdyCoarse(f32) -> f32 */
+ /* fn dpdyCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[347],
+ },
+ {
+ /* [27] */
+ /* fn dpdyFine(f32) -> f32 */
+ /* fn dpdyFine<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[335],
+ },
+ {
+ /* [28] */
+ /* fn exp(f32) -> f32 */
+ /* fn exp<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[319],
+ },
+ {
+ /* [29] */
+ /* fn exp2(f32) -> f32 */
+ /* fn exp2<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[313],
+ },
+ {
+ /* [30] */
+ /* fn extractBits<T : iu32>(T, u32, u32) -> T */
+ /* fn extractBits<N : num, T : iu32>(vec<N, T>, u32, u32) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[311],
+ },
+ {
+ /* [31] */
+ /* fn faceForward<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[437],
+ },
+ {
+ /* [32] */
+ /* fn firstLeadingBit<T : iu32>(T) -> T */
+ /* fn firstLeadingBit<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[305],
+ },
+ {
+ /* [33] */
+ /* fn firstTrailingBit<T : iu32>(T) -> T */
+ /* fn firstTrailingBit<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[303],
+ },
+ {
+ /* [34] */
+ /* fn floor(f32) -> f32 */
+ /* fn floor<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[301],
+ },
+ {
+ /* [35] */
+ /* fn fma(f32, f32, f32) -> f32 */
+ /* fn fma<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[297],
+ },
+ {
+ /* [36] */
+ /* fn fract(f32) -> f32 */
+ /* fn fract<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[295],
+ },
+ {
+ /* [37] */
+ /* fn frexp(f32) -> __frexp_result */
+ /* fn frexp<N : num>(vec<N, f32>) -> __frexp_result_vec<N> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[291],
+ },
+ {
+ /* [38] */
+ /* fn fwidth(f32) -> f32 */
+ /* fn fwidth<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[273],
+ },
+ {
+ /* [39] */
+ /* fn fwidthCoarse(f32) -> f32 */
+ /* fn fwidthCoarse<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[271],
+ },
+ {
+ /* [40] */
+ /* fn fwidthFine(f32) -> f32 */
+ /* fn fwidthFine<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[299],
+ },
+ {
+ /* [41] */
+ /* fn insertBits<T : iu32>(T, T, u32, u32) -> T */
+ /* fn insertBits<N : num, T : iu32>(vec<N, T>, vec<N, T>, u32, u32) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[275],
+ },
+ {
+ /* [42] */
+ /* fn inverseSqrt(f32) -> f32 */
+ /* fn inverseSqrt<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[277],
+ },
+ {
+ /* [43] */
+ /* fn ldexp(f32, i32) -> f32 */
+ /* fn ldexp<N : num>(vec<N, f32>, vec<N, i32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[279],
+ },
+ {
+ /* [44] */
+ /* fn length(f32) -> f32 */
+ /* fn length<N : num>(vec<N, f32>) -> f32 */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[281],
+ },
+ {
+ /* [45] */
+ /* fn log(f32) -> f32 */
+ /* fn log<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[283],
+ },
+ {
+ /* [46] */
+ /* fn log2(f32) -> f32 */
+ /* fn log2<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[285],
+ },
+ {
+ /* [47] */
+ /* fn max<T : fiu32>(T, T) -> T */
+ /* fn max<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[287],
+ },
+ {
+ /* [48] */
+ /* fn min<T : fiu32>(T, T) -> T */
+ /* fn min<N : num, T : fiu32>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[289],
+ },
+ {
+ /* [49] */
+ /* fn mix(f32, f32, f32) -> f32 */
+ /* fn mix<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* fn mix<N : num>(vec<N, f32>, vec<N, f32>, f32) -> vec<N, f32> */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[266],
+ },
+ {
+ /* [50] */
+ /* fn modf(f32) -> __modf_result */
+ /* fn modf<N : num>(vec<N, f32>) -> __modf_result_vec<N> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[293],
+ },
+ {
+ /* [51] */
+ /* fn normalize<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[436],
+ },
+ {
+ /* [52] */
+ /* fn pack2x16float(vec2<f32>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[435],
+ },
+ {
+ /* [53] */
+ /* fn pack2x16snorm(vec2<f32>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[434],
+ },
+ {
+ /* [54] */
+ /* fn pack2x16unorm(vec2<f32>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[421],
+ },
+ {
+ /* [55] */
+ /* fn pack4x8snorm(vec4<f32>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[433],
+ },
+ {
+ /* [56] */
+ /* fn pack4x8unorm(vec4<f32>) -> u32 */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[432],
+ },
+ {
+ /* [57] */
+ /* fn pow(f32, f32) -> f32 */
+ /* fn pow<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[307],
+ },
+ {
+ /* [58] */
+ /* fn radians(f32) -> f32 */
+ /* fn radians<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[309],
+ },
+ {
+ /* [59] */
+ /* fn reflect<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[431],
+ },
+ {
+ /* [60] */
+ /* fn refract<N : num>(vec<N, f32>, vec<N, f32>, f32) -> vec<N, f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[430],
+ },
+ {
+ /* [61] */
+ /* fn reverseBits<T : iu32>(T) -> T */
+ /* fn reverseBits<N : num, T : iu32>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[315],
+ },
+ {
+ /* [62] */
+ /* fn round(f32) -> f32 */
+ /* fn round<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[317],
+ },
+ {
+ /* [63] */
+ /* fn select<T : scalar>(T, T, bool) -> T */
+ /* fn select<T : scalar, N : num>(vec<N, T>, vec<N, T>, bool) -> vec<N, T> */
+ /* fn select<N : num, T : scalar>(vec<N, T>, vec<N, T>, vec<N, bool>) -> vec<N, T> */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[257],
+ },
+ {
+ /* [64] */
+ /* fn sign(f32) -> f32 */
+ /* fn sign<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[321],
+ },
+ {
+ /* [65] */
+ /* fn sin(f32) -> f32 */
+ /* fn sin<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[323],
+ },
+ {
+ /* [66] */
+ /* fn sinh(f32) -> f32 */
+ /* fn sinh<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[325],
+ },
+ {
+ /* [67] */
+ /* fn smoothstep(f32, f32, f32) -> f32 */
+ /* fn smoothstep<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[327],
+ },
+ {
+ /* [68] */
+ /* fn smoothStep(f32, f32, f32) -> f32 */
+ /* fn smoothStep<N : num>(vec<N, f32>, vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[329],
+ },
+ {
+ /* [69] */
+ /* fn sqrt(f32) -> f32 */
+ /* fn sqrt<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[331],
+ },
+ {
+ /* [70] */
+ /* fn step(f32, f32) -> f32 */
+ /* fn step<N : num>(vec<N, f32>, vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[333],
+ },
+ {
+ /* [71] */
+ /* fn storageBarrier() */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[429],
+ },
+ {
+ /* [72] */
+ /* fn tan(f32) -> f32 */
+ /* fn tan<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[337],
+ },
+ {
+ /* [73] */
+ /* fn tanh(f32) -> f32 */
+ /* fn tanh<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[339],
+ },
+ {
+ /* [74] */
+ /* fn transpose<M : num, N : num>(mat<M, N, f32>) -> mat<N, M, f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[428],
+ },
+ {
+ /* [75] */
+ /* fn trunc(f32) -> f32 */
+ /* fn trunc<N : num>(vec<N, f32>) -> vec<N, f32> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[343],
+ },
+ {
+ /* [76] */
+ /* fn unpack2x16float(u32) -> vec2<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[427],
+ },
+ {
+ /* [77] */
+ /* fn unpack2x16snorm(u32) -> vec2<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[409],
+ },
+ {
+ /* [78] */
+ /* fn unpack2x16unorm(u32) -> vec2<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[425],
+ },
+ {
+ /* [79] */
+ /* fn unpack4x8snorm(u32) -> vec4<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[424],
+ },
+ {
+ /* [80] */
+ /* fn unpack4x8unorm(u32) -> vec4<f32> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[423],
+ },
+ {
+ /* [81] */
+ /* fn workgroupBarrier() */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[422],
+ },
+ {
+ /* [82] */
+ /* fn textureDimensions<T : fiu32>(texture: texture_1d<T>) -> i32 */
+ /* fn textureDimensions<T : fiu32>(texture: texture_1d<T>, level: i32) -> i32 */
+ /* fn textureDimensions<T : fiu32>(texture: texture_2d<T>) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_2d<T>, level: i32) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_2d_array<T>) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_2d_array<T>, level: i32) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_3d<T>) -> vec3<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_3d<T>, level: i32) -> vec3<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_cube<T>) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_cube<T>, level: i32) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_cube_array<T>) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_cube_array<T>, level: i32) -> vec2<i32> */
+ /* fn textureDimensions<T : fiu32>(texture: texture_multisampled_2d<T>) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_2d) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_2d_array) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_cube) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_cube_array) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32> */
+ /* fn textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32> */
+ /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_1d<F, A>) -> i32 */
+ /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_2d<F, A>) -> vec2<i32> */
+ /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_2d_array<F, A>) -> vec2<i32> */
+ /* fn textureDimensions<F : texel_format, A : write_only>(texture: texture_storage_3d<F, A>) -> vec3<i32> */
+ /* fn textureDimensions(texture: texture_external) -> vec2<i32> */
+ /* num overloads */ 27,
+ /* overloads */ &kOverloads[0],
+ },
+ {
+ /* [83] */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>) -> vec4<T> */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_2d<T>, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<T> */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<T> */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_2d_array<T>, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<T> */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_cube<T>, sampler: sampler, coords: vec3<f32>) -> vec4<T> */
+ /* fn textureGather<T : fiu32>(@const component: i32, texture: texture_cube_array<T>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<T> */
+ /* fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
+ /* fn textureGather(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32> */
+ /* fn textureGather(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureGather(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
+ /* fn textureGather(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32> */
+ /* num overloads */ 12,
+ /* overloads */ &kOverloads[71],
+ },
+ {
+ /* [84] */
+ /* fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> vec4<f32> */
+ /* fn textureGatherCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> vec4<f32> */
+ /* fn textureGatherCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureGatherCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> vec4<f32> */
+ /* fn textureGatherCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> vec4<f32> */
+ /* num overloads */ 6,
+ /* overloads */ &kOverloads[157],
+ },
+ {
+ /* [85] */
+ /* fn textureNumLayers<T : fiu32>(texture: texture_2d_array<T>) -> i32 */
+ /* fn textureNumLayers<T : fiu32>(texture: texture_cube_array<T>) -> i32 */
+ /* fn textureNumLayers(texture: texture_depth_2d_array) -> i32 */
+ /* fn textureNumLayers(texture: texture_depth_cube_array) -> i32 */
+ /* fn textureNumLayers<F : texel_format, A : write_only>(texture: texture_storage_2d_array<F, A>) -> i32 */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[205],
+ },
+ {
+ /* [86] */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_1d<T>) -> i32 */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_2d<T>) -> i32 */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_2d_array<T>) -> i32 */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_3d<T>) -> i32 */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_cube<T>) -> i32 */
+ /* fn textureNumLevels<T : fiu32>(texture: texture_cube_array<T>) -> i32 */
+ /* fn textureNumLevels(texture: texture_depth_2d) -> i32 */
+ /* fn textureNumLevels(texture: texture_depth_2d_array) -> i32 */
+ /* fn textureNumLevels(texture: texture_depth_cube) -> i32 */
+ /* fn textureNumLevels(texture: texture_depth_cube_array) -> i32 */
+ /* num overloads */ 10,
+ /* overloads */ &kOverloads[105],
+ },
+ {
+ /* [87] */
+ /* fn textureNumSamples<T : fiu32>(texture: texture_multisampled_2d<T>) -> i32 */
+ /* fn textureNumSamples(texture: texture_depth_multisampled_2d) -> i32 */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[367],
+ },
+ {
+ /* [88] */
+ /* fn textureSample(texture: texture_1d<f32>, sampler: sampler, coords: f32) -> vec4<f32> */
+ /* fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32) -> vec4<f32> */
+ /* fn textureSample(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, @const offset: vec3<i32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>) -> vec4<f32> */
+ /* fn textureSample(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32) -> vec4<f32> */
+ /* fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>) -> f32 */
+ /* fn textureSample(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32) -> f32 */
+ /* fn textureSample(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSample(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>) -> f32 */
+ /* fn textureSample(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32) -> f32 */
+ /* num overloads */ 15,
+ /* overloads */ &kOverloads[27],
+ },
+ {
+ /* [89] */
+ /* fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, bias: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, bias: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, bias: f32, @const offset: vec3<i32>) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32> */
+ /* fn textureSampleBias(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, bias: f32) -> vec4<f32> */
+ /* num overloads */ 8,
+ /* overloads */ &kOverloads[133],
+ },
+ {
+ /* [90] */
+ /* fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompare(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompare(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleCompare(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompare(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32 */
+ /* num overloads */ 6,
+ /* overloads */ &kOverloads[169],
+ },
+ {
+ /* [91] */
+ /* fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompareLevel(texture: texture_depth_2d, sampler: sampler_comparison, coords: vec2<f32>, depth_ref: f32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompareLevel(texture: texture_depth_2d_array, sampler: sampler_comparison, coords: vec2<f32>, array_index: i32, depth_ref: f32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleCompareLevel(texture: texture_depth_cube, sampler: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32 */
+ /* fn textureSampleCompareLevel(texture: texture_depth_cube_array, sampler: sampler_comparison, coords: vec3<f32>, array_index: i32, depth_ref: f32) -> f32 */
+ /* num overloads */ 6,
+ /* overloads */ &kOverloads[163],
+ },
+ {
+ /* [92] */
+ /* fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, ddx: vec2<f32>, ddy: vec2<f32>, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>, @const offset: vec3<i32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
+ /* fn textureSampleGrad(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32> */
+ /* num overloads */ 8,
+ /* overloads */ &kOverloads[141],
+ },
+ {
+ /* [93] */
+ /* fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_2d<f32>, sampler: sampler, coords: vec2<f32>, level: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_2d_array<f32>, sampler: sampler, coords: vec2<f32>, array_index: i32, level: f32, @const offset: vec2<i32>) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_3d<f32>, sampler: sampler, coords: vec3<f32>, level: f32, @const offset: vec3<i32>) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_cube<f32>, sampler: sampler, coords: vec3<f32>, level: f32) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_cube_array<f32>, sampler: sampler, coords: vec3<f32>, array_index: i32, level: f32) -> vec4<f32> */
+ /* fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32) -> f32 */
+ /* fn textureSampleLevel(texture: texture_depth_2d, sampler: sampler, coords: vec2<f32>, level: i32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32) -> f32 */
+ /* fn textureSampleLevel(texture: texture_depth_2d_array, sampler: sampler, coords: vec2<f32>, array_index: i32, level: i32, @const offset: vec2<i32>) -> f32 */
+ /* fn textureSampleLevel(texture: texture_depth_cube, sampler: sampler, coords: vec3<f32>, level: i32) -> f32 */
+ /* fn textureSampleLevel(texture: texture_depth_cube_array, sampler: sampler, coords: vec3<f32>, array_index: i32, level: i32) -> f32 */
+ /* fn textureSampleLevel(texture: texture_external, sampler: sampler, coords: vec2<f32>) -> vec4<f32> */
+ /* num overloads */ 15,
+ /* overloads */ &kOverloads[42],
+ },
+ {
+ /* [94] */
+ /* fn textureStore(texture: texture_storage_1d<f32_texel_format, write>, coords: i32, value: vec4<f32>) */
+ /* fn textureStore(texture: texture_storage_2d<f32_texel_format, write>, coords: vec2<i32>, value: vec4<f32>) */
+ /* fn textureStore(texture: texture_storage_2d_array<f32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<f32>) */
+ /* fn textureStore(texture: texture_storage_3d<f32_texel_format, write>, coords: vec3<i32>, value: vec4<f32>) */
+ /* fn textureStore(texture: texture_storage_1d<i32_texel_format, write>, coords: i32, value: vec4<i32>) */
+ /* fn textureStore(texture: texture_storage_2d<i32_texel_format, write>, coords: vec2<i32>, value: vec4<i32>) */
+ /* fn textureStore(texture: texture_storage_2d_array<i32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<i32>) */
+ /* fn textureStore(texture: texture_storage_3d<i32_texel_format, write>, coords: vec3<i32>, value: vec4<i32>) */
+ /* fn textureStore(texture: texture_storage_1d<u32_texel_format, write>, coords: i32, value: vec4<u32>) */
+ /* fn textureStore(texture: texture_storage_2d<u32_texel_format, write>, coords: vec2<i32>, value: vec4<u32>) */
+ /* fn textureStore(texture: texture_storage_2d_array<u32_texel_format, write>, coords: vec2<i32>, array_index: i32, value: vec4<u32>) */
+ /* fn textureStore(texture: texture_storage_3d<u32_texel_format, write>, coords: vec3<i32>, value: vec4<u32>) */
+ /* num overloads */ 12,
+ /* overloads */ &kOverloads[83],
+ },
+ {
+ /* [95] */
+ /* fn textureLoad<T : fiu32>(texture: texture_1d<T>, coords: i32, level: i32) -> vec4<T> */
+ /* fn textureLoad<T : fiu32>(texture: texture_2d<T>, coords: vec2<i32>, level: i32) -> vec4<T> */
+ /* fn textureLoad<T : fiu32>(texture: texture_2d_array<T>, coords: vec2<i32>, array_index: i32, level: i32) -> vec4<T> */
+ /* fn textureLoad<T : fiu32>(texture: texture_3d<T>, coords: vec3<i32>, level: i32) -> vec4<T> */
+ /* fn textureLoad<T : fiu32>(texture: texture_multisampled_2d<T>, coords: vec2<i32>, sample_index: i32) -> vec4<T> */
+ /* fn textureLoad(texture: texture_depth_2d, coords: vec2<i32>, level: i32) -> f32 */
+ /* fn textureLoad(texture: texture_depth_2d_array, coords: vec2<i32>, array_index: i32, level: i32) -> f32 */
+ /* fn textureLoad(texture: texture_depth_multisampled_2d, coords: vec2<i32>, sample_index: i32) -> f32 */
+ /* fn textureLoad(texture: texture_external, coords: vec2<i32>) -> vec4<f32> */
+ /* num overloads */ 9,
+ /* overloads */ &kOverloads[115],
+ },
+ {
+ /* [96] */
+ /* fn atomicLoad<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[418],
+ },
+ {
+ /* [97] */
+ /* fn atomicStore<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[417],
+ },
+ {
+ /* [98] */
+ /* fn atomicAdd<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[416],
+ },
+ {
+ /* [99] */
+ /* fn atomicSub<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[415],
+ },
+ {
+ /* [100] */
+ /* fn atomicMax<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[414],
+ },
+ {
+ /* [101] */
+ /* fn atomicMin<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[413],
+ },
+ {
+ /* [102] */
+ /* fn atomicAnd<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[412],
+ },
+ {
+ /* [103] */
+ /* fn atomicOr<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[411],
+ },
+ {
+ /* [104] */
+ /* fn atomicXor<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[410],
+ },
+ {
+ /* [105] */
+ /* fn atomicExchange<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T) -> T */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[426],
+ },
+ {
+ /* [106] */
+ /* fn atomicCompareExchangeWeak<T : iu32, S : workgroup_or_storage>(ptr<S, atomic<T>, read_write>, T, T) -> __atomic_compare_exchange_result<T> */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[444],
+ },
+};
+
+constexpr IntrinsicInfo kUnaryOperators[] = {
+ {
+ /* [0] */
+ /* op !(bool) -> bool */
+ /* op !<N : num>(vec<N, bool>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[269],
+ },
+ {
+ /* [1] */
+ /* op ~<T : iu32>(T) -> T */
+ /* op ~<T : iu32, N : num>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[403],
+ },
+ {
+ /* [2] */
+ /* op -<T : fi32>(T) -> T */
+ /* op -<T : fi32, N : num>(vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[401],
+ },
+};
+constexpr uint8_t kUnaryOperatorNot = 0;
+constexpr uint8_t kUnaryOperatorComplement = 1;
+constexpr uint8_t kUnaryOperatorMinus = 2;
+
+constexpr IntrinsicInfo kBinaryOperators[] = {
+ {
+ /* [0] */
+ /* op +<T : fiu32>(T, T) -> T */
+ /* op +<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* op +<T : fiu32, N : num>(vec<N, T>, T) -> vec<N, T> */
+ /* op +<T : fiu32, N : num>(T, vec<N, T>) -> vec<N, T> */
+ /* op +<N : num, M : num>(mat<N, M, f32>, mat<N, M, f32>) -> mat<N, M, f32> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[220],
+ },
+ {
+ /* [1] */
+ /* op -<T : fiu32>(T, T) -> T */
+ /* op -<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* op -<T : fiu32, N : num>(vec<N, T>, T) -> vec<N, T> */
+ /* op -<T : fiu32, N : num>(T, vec<N, T>) -> vec<N, T> */
+ /* op -<N : num, M : num>(mat<N, M, f32>, mat<N, M, f32>) -> mat<N, M, f32> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[215],
+ },
+ {
+ /* [2] */
+ /* op *<T : fiu32>(T, T) -> T */
+ /* op *<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* op *<T : fiu32, N : num>(vec<N, T>, T) -> vec<N, T> */
+ /* op *<T : fiu32, N : num>(T, vec<N, T>) -> vec<N, T> */
+ /* op *<N : num, M : num>(f32, mat<N, M, f32>) -> mat<N, M, f32> */
+ /* op *<N : num, M : num>(mat<N, M, f32>, f32) -> mat<N, M, f32> */
+ /* op *<C : num, R : num>(mat<C, R, f32>, vec<C, f32>) -> vec<R, f32> */
+ /* op *<C : num, R : num>(vec<R, f32>, mat<C, R, f32>) -> vec<C, f32> */
+ /* op *<K : num, C : num, R : num>(mat<K, R, f32>, mat<C, K, f32>) -> mat<C, R, f32> */
+ /* num overloads */ 9,
+ /* overloads */ &kOverloads[124],
+ },
+ {
+ /* [3] */
+ /* op /<T : fiu32>(T, T) -> T */
+ /* op /<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* op /<T : fiu32, N : num>(vec<N, T>, T) -> vec<N, T> */
+ /* op /<T : fiu32, N : num>(T, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 4,
+ /* overloads */ &kOverloads[243],
+ },
+ {
+ /* [4] */
+ /* op %<T : fiu32>(T, T) -> T */
+ /* op %<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* op %<T : fiu32, N : num>(vec<N, T>, T) -> vec<N, T> */
+ /* op %<T : fiu32, N : num>(T, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 4,
+ /* overloads */ &kOverloads[239],
+ },
+ {
+ /* [5] */
+ /* op ^<T : iu32>(T, T) -> T */
+ /* op ^<T : iu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[389],
+ },
+ {
+ /* [6] */
+ /* op &(bool, bool) -> bool */
+ /* op &<N : num>(vec<N, bool>, vec<N, bool>) -> vec<N, bool> */
+ /* op &<T : iu32>(T, T) -> T */
+ /* op &<T : iu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 4,
+ /* overloads */ &kOverloads[235],
+ },
+ {
+ /* [7] */
+ /* op |(bool, bool) -> bool */
+ /* op |<N : num>(vec<N, bool>, vec<N, bool>) -> vec<N, bool> */
+ /* op |<T : iu32>(T, T) -> T */
+ /* op |<T : iu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, T> */
+ /* num overloads */ 4,
+ /* overloads */ &kOverloads[247],
+ },
+ {
+ /* [8] */
+ /* op &&(bool, bool) -> bool */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[419],
+ },
+ {
+ /* [9] */
+ /* op ||(bool, bool) -> bool */
+ /* num overloads */ 1,
+ /* overloads */ &kOverloads[420],
+ },
+ {
+ /* [10] */
+ /* op ==<T : scalar>(T, T) -> bool */
+ /* op ==<T : scalar, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[373],
+ },
+ {
+ /* [11] */
+ /* op !=<T : scalar>(T, T) -> bool */
+ /* op !=<T : scalar, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[355],
+ },
+ {
+ /* [12] */
+ /* op <<T : fiu32>(T, T) -> bool */
+ /* op <<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[353],
+ },
+ {
+ /* [13] */
+ /* op ><T : fiu32>(T, T) -> bool */
+ /* op ><T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[351],
+ },
+ {
+ /* [14] */
+ /* op <=<T : fiu32>(T, T) -> bool */
+ /* op <=<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[349],
+ },
+ {
+ /* [15] */
+ /* op >=<T : fiu32>(T, T) -> bool */
+ /* op >=<T : fiu32, N : num>(vec<N, T>, vec<N, T>) -> vec<N, bool> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[407],
+ },
+ {
+ /* [16] */
+ /* op <<<T : iu32>(T, u32) -> T */
+ /* op <<<T : iu32, N : num>(vec<N, T>, vec<N, u32>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[345],
+ },
+ {
+ /* [17] */
+ /* op >><T : iu32>(T, u32) -> T */
+ /* op >><T : iu32, N : num>(vec<N, T>, vec<N, u32>) -> vec<N, T> */
+ /* num overloads */ 2,
+ /* overloads */ &kOverloads[341],
+ },
+};
+constexpr uint8_t kBinaryOperatorPlus = 0;
+constexpr uint8_t kBinaryOperatorMinus = 1;
+constexpr uint8_t kBinaryOperatorStar = 2;
+constexpr uint8_t kBinaryOperatorDivide = 3;
+constexpr uint8_t kBinaryOperatorModulo = 4;
+constexpr uint8_t kBinaryOperatorXor = 5;
+constexpr uint8_t kBinaryOperatorAnd = 6;
+constexpr uint8_t kBinaryOperatorOr = 7;
+constexpr uint8_t kBinaryOperatorLogicalAnd = 8;
+constexpr uint8_t kBinaryOperatorLogicalOr = 9;
+constexpr uint8_t kBinaryOperatorEqual = 10;
+constexpr uint8_t kBinaryOperatorNotEqual = 11;
+constexpr uint8_t kBinaryOperatorLessThan = 12;
+constexpr uint8_t kBinaryOperatorGreaterThan = 13;
+constexpr uint8_t kBinaryOperatorLessThanEqual = 14;
+constexpr uint8_t kBinaryOperatorGreaterThanEqual = 15;
+constexpr uint8_t kBinaryOperatorShiftLeft = 16;
+constexpr uint8_t kBinaryOperatorShiftRight = 17;
+
+constexpr IntrinsicInfo kConstructorsAndConverters[] = {
+ {
+ /* [0] */
+ /* ctor i32() -> i32 */
+ /* ctor i32(i32) -> i32 */
+ /* conv i32<T : scalar_no_i32>(T) -> i32 */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[251],
+ },
+ {
+ /* [1] */
+ /* ctor u32() -> u32 */
+ /* ctor u32(u32) -> u32 */
+ /* conv u32<T : scalar_no_u32>(T) -> u32 */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[260],
+ },
+ {
+ /* [2] */
+ /* ctor f32() -> f32 */
+ /* ctor f32(f32) -> f32 */
+ /* conv f32<T : scalar_no_f32>(T) -> f32 */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[263],
+ },
+ {
+ /* [3] */
+ /* ctor bool() -> bool */
+ /* ctor bool(bool) -> bool */
+ /* conv bool<T : scalar_no_bool>(T) -> bool */
+ /* num overloads */ 3,
+ /* overloads */ &kOverloads[254],
+ },
+ {
+ /* [4] */
+ /* ctor vec2<T : scalar>() -> vec2<T> */
+ /* ctor vec2<T : scalar>(vec2<T>) -> vec2<T> */
+ /* ctor vec2<T : abstract_or_scalar>(T) -> vec2<T> */
+ /* ctor vec2<T : abstract_or_scalar>(x: T, y: T) -> vec2<T> */
+ /* conv vec2<T : f32, U : scalar_no_f32>(vec2<U>) -> vec2<f32> */
+ /* conv vec2<T : i32, U : scalar_no_i32>(vec2<U>) -> vec2<i32> */
+ /* conv vec2<T : u32, U : scalar_no_u32>(vec2<U>) -> vec2<u32> */
+ /* conv vec2<T : bool, U : scalar_no_bool>(vec2<U>) -> vec2<bool> */
+ /* num overloads */ 8,
+ /* overloads */ &kOverloads[149],
+ },
+ {
+ /* [5] */
+ /* ctor vec3<T : scalar>() -> vec3<T> */
+ /* ctor vec3<T : scalar>(vec3<T>) -> vec3<T> */
+ /* ctor vec3<T : abstract_or_scalar>(T) -> vec3<T> */
+ /* ctor vec3<T : abstract_or_scalar>(x: T, y: T, z: T) -> vec3<T> */
+ /* ctor vec3<T : abstract_or_scalar>(xy: vec2<T>, z: T) -> vec3<T> */
+ /* ctor vec3<T : abstract_or_scalar>(x: T, yz: vec2<T>) -> vec3<T> */
+ /* conv vec3<T : f32, U : scalar_no_f32>(vec3<U>) -> vec3<f32> */
+ /* conv vec3<T : i32, U : scalar_no_i32>(vec3<U>) -> vec3<i32> */
+ /* conv vec3<T : u32, U : scalar_no_u32>(vec3<U>) -> vec3<u32> */
+ /* conv vec3<T : bool, U : scalar_no_bool>(vec3<U>) -> vec3<bool> */
+ /* num overloads */ 10,
+ /* overloads */ &kOverloads[95],
+ },
+ {
+ /* [6] */
+ /* ctor vec4<T : scalar>() -> vec4<T> */
+ /* ctor vec4<T : scalar>(vec4<T>) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(T) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(x: T, y: T, z: T, w: T) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(xy: vec2<T>, z: T, w: T) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(x: T, yz: vec2<T>, w: T) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(x: T, y: T, zw: vec2<T>) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(xy: vec2<T>, zw: vec2<T>) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(xyz: vec3<T>, w: T) -> vec4<T> */
+ /* ctor vec4<T : abstract_or_scalar>(x: T, zyw: vec3<T>) -> vec4<T> */
+ /* conv vec4<T : f32, U : scalar_no_f32>(vec4<U>) -> vec4<f32> */
+ /* conv vec4<T : i32, U : scalar_no_i32>(vec4<U>) -> vec4<i32> */
+ /* conv vec4<T : u32, U : scalar_no_u32>(vec4<U>) -> vec4<u32> */
+ /* conv vec4<T : bool, U : scalar_no_bool>(vec4<U>) -> vec4<bool> */
+ /* num overloads */ 14,
+ /* overloads */ &kOverloads[57],
+ },
+ {
+ /* [7] */
+ /* ctor mat2x2() -> mat2x2<f32> */
+ /* ctor mat2x2<f32>(mat2x2<f32>) -> mat2x2<f32> */
+ /* ctor mat2x2<T : af_f32>(T) -> mat2x2<T> */
+ /* ctor mat2x2<T : af_f32>(T, T, T, T) -> mat2x2<T> */
+ /* ctor mat2x2<T : af_f32>(vec2<T>, vec2<T>) -> mat2x2<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[190],
+ },
+ {
+ /* [8] */
+ /* ctor mat2x3() -> mat2x3<f32> */
+ /* ctor mat2x3<f32>(mat2x3<f32>) -> mat2x3<f32> */
+ /* ctor mat2x3<T : af_f32>(T) -> mat2x3<T> */
+ /* ctor mat2x3<T : af_f32>(T, T, T, T, T, T) -> mat2x3<T> */
+ /* ctor mat2x3<T : af_f32>(vec3<T>, vec3<T>) -> mat2x3<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[175],
+ },
+ {
+ /* [9] */
+ /* ctor mat2x4() -> mat2x4<f32> */
+ /* ctor mat2x4<f32>(mat2x4<f32>) -> mat2x4<f32> */
+ /* ctor mat2x4<T : af_f32>(T) -> mat2x4<T> */
+ /* ctor mat2x4<T : af_f32>(T, T, T, T, T, T, T, T) -> mat2x4<T> */
+ /* ctor mat2x4<T : af_f32>(vec4<T>, vec4<T>) -> mat2x4<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[200],
+ },
+ {
+ /* [10] */
+ /* ctor mat3x2() -> mat3x2<f32> */
+ /* ctor mat3x2<f32>(mat3x2<f32>) -> mat3x2<f32> */
+ /* ctor mat3x2<T : af_f32>(T) -> mat3x2<T> */
+ /* ctor mat3x2<T : af_f32>(T, T, T, T, T, T) -> mat3x2<T> */
+ /* ctor mat3x2<T : af_f32>(vec2<T>, vec2<T>, vec2<T>) -> mat3x2<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[195],
+ },
+ {
+ /* [11] */
+ /* ctor mat3x3() -> mat3x3<f32> */
+ /* ctor mat3x3<f32>(mat3x3<f32>) -> mat3x3<f32> */
+ /* ctor mat3x3<T : af_f32>(T) -> mat3x3<T> */
+ /* ctor mat3x3<T : af_f32>(T, T, T, T, T, T, T, T, T) -> mat3x3<T> */
+ /* ctor mat3x3<T : af_f32>(vec3<T>, vec3<T>, vec3<T>) -> mat3x3<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[230],
+ },
+ {
+ /* [12] */
+ /* ctor mat3x4() -> mat3x4<f32> */
+ /* ctor mat3x4<f32>(mat3x4<f32>) -> mat3x4<f32> */
+ /* ctor mat3x4<T : af_f32>(T) -> mat3x4<T> */
+ /* ctor mat3x4<T : af_f32>(T, T, T, T, T, T, T, T, T, T, T, T) -> mat3x4<T> */
+ /* ctor mat3x4<T : af_f32>(vec4<T>, vec4<T>, vec4<T>) -> mat3x4<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[180],
+ },
+ {
+ /* [13] */
+ /* ctor mat4x2() -> mat4x2<f32> */
+ /* ctor mat4x2<f32>(mat4x2<f32>) -> mat4x2<f32> */
+ /* ctor mat4x2<T : af_f32>(T) -> mat4x2<T> */
+ /* ctor mat4x2<T : af_f32>(T, T, T, T, T, T, T, T) -> mat4x2<T> */
+ /* ctor mat4x2<T : af_f32>(vec2<T>, vec2<T>, vec2<T>, vec2<T>) -> mat4x2<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[185],
+ },
+ {
+ /* [14] */
+ /* ctor mat4x3() -> mat4x3<f32> */
+ /* ctor mat4x3<f32>(mat4x3<f32>) -> mat4x3<f32> */
+ /* ctor mat4x3<T : af_f32>(T) -> mat4x3<T> */
+ /* ctor mat4x3<T : af_f32>(T, T, T, T, T, T, T, T, T, T, T, T) -> mat4x3<T> */
+ /* ctor mat4x3<T : af_f32>(vec3<T>, vec3<T>, vec3<T>, vec3<T>) -> mat4x3<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[210],
+ },
+ {
+ /* [15] */
+ /* ctor mat4x4() -> mat4x4<f32> */
+ /* ctor mat4x4<f32>(mat4x4<f32>) -> mat4x4<f32> */
+ /* ctor mat4x4<T : af_f32>(T) -> mat4x4<T> */
+ /* ctor mat4x4<T : af_f32>(T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T) -> mat4x4<T> */
+ /* ctor mat4x4<T : af_f32>(vec4<T>, vec4<T>, vec4<T>, vec4<T>) -> mat4x4<T> */
+ /* num overloads */ 5,
+ /* overloads */ &kOverloads[225],
+ },
+};
+
+// clang-format on
diff --git a/chromium/third_party/dawn/src/tint/builtin_table.inl.tmpl b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl.tmpl
index b7604b84a9b..663013ed7f1 100644
--- a/chromium/third_party/dawn/src/tint/builtin_table.inl.tmpl
+++ b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table.inl.tmpl
@@ -4,7 +4,7 @@ Template file for use with tools/builtin-gen to generate builtin_table.inl
Used by BuiltinTable.cc for builtin overload resolution.
See:
-* tools/cmd/builtin-gen/gen for structures used by this template
+* tools/cmd/intrinsic-gen/gen for structures used by this template
* https://golang.org/pkg/text/template/ for documentation on the template syntax
--------------------------------------------------------------------------------
*/ -}}
@@ -23,7 +23,7 @@ See:
{{ end -}}
{{- end -}}
-{{- with BuiltinTable -}}
+{{- with IntrinsicTable -}}
{{- template "Matchers" . }}
constexpr MatcherIndex kMatcherIndices[] = {
@@ -51,8 +51,8 @@ constexpr ParameterInfo kParameters[] = {
{{- end }}
};
-constexpr OpenTypeInfo kOpenTypes[] = {
-{{- range $i, $o := .OpenTypes }}
+constexpr TemplateTypeInfo kTemplateTypes[] = {
+{{- range $i, $o := .TemplateTypes }}
{
/* [{{$i}}] */
/* name */ "{{$o.Name}}",
@@ -64,8 +64,8 @@ constexpr OpenTypeInfo kOpenTypes[] = {
{{- end }}
};
-constexpr OpenNumberInfo kOpenNumbers[] = {
-{{- range $i, $o := .OpenNumbers }}
+constexpr TemplateNumberInfo kTemplateNumbers[] = {
+{{- range $i, $o := .TemplateNumbers }}
{
/* [{{$i}}] */
/* name */ "{{$o.Name}}",
@@ -82,39 +82,90 @@ constexpr OverloadInfo kOverloads[] = {
{
/* [{{$i}}] */
/* num parameters */ {{$o.NumParameters}},
- /* num open types */ {{$o.NumOpenTypes}},
- /* num open numbers */ {{$o.NumOpenNumbers}},
- /* open types */
-{{- if $o.OpenTypesOffset }} &kOpenTypes[{{$o.OpenTypesOffset}}],
-{{- else }} nullptr,
+ /* num template types */ {{$o.NumTemplateTypes}},
+ /* num template numbers */ {{$o.NumTemplateNumbers}},
+ /* template types */
+{{- if $o.TemplateTypesOffset }} &kTemplateTypes[{{$o.TemplateTypesOffset}}],
+{{- else }} nullptr,
{{- end }}
- /* open numbers */
-{{- if $o.OpenNumbersOffset }} &kOpenNumbers[{{$o.OpenNumbersOffset}}]
-{{- else }} nullptr
+ /* template numbers */
+{{- if $o.TemplateNumbersOffset }} &kTemplateNumbers[{{$o.TemplateNumbersOffset}}]
+{{- else }} nullptr
{{- end }},
/* parameters */ &kParameters[{{$o.ParametersOffset}}],
/* return matcher indices */
{{- if $o.ReturnMatcherIndicesOffset }} &kMatcherIndices[{{$o.ReturnMatcherIndicesOffset}}]
{{- else }} nullptr
{{- end }},
- /* supported_stages */ PipelineStageSet(
+ /* flags */ OverloadFlags(OverloadFlag::kIs{{Title $o.Kind}}
{{- range $i, $u := $o.CanBeUsedInStage.List -}}
-{{- if $i -}}, {{end}}PipelineStage::k{{Title $u}}
-{{- end }}),
- /* is_deprecated */ {{$o.IsDeprecated}},
+ , OverloadFlag::kSupports{{Title $u}}Pipeline
+{{- end }}
+{{- if $o.IsDeprecated}}, OverloadFlag::kIsDeprecated{{end }}),
+ /* const eval */
+{{- if $o.ConstEvalFunction }} const_eval::{{$o.ConstEvalFunction}},
+{{- else }} nullptr,
+{{- end }}
+ },
+{{- end }}
+};
+
+constexpr IntrinsicInfo kBuiltins[] = {
+{{- range $i, $b := .Builtins }}
+ {
+ /* [{{$i}}] */
+{{- range $b.OverloadDescriptions }}
+ /* {{.}} */
+{{- end }}
+ /* num overloads */ {{$b.NumOverloads}},
+ /* overloads */ &kOverloads[{{$b.OverloadsOffset}}],
},
{{- end }}
};
-constexpr BuiltinInfo kBuiltins[] = {
-{{- range $i, $f := .Functions }}
+constexpr IntrinsicInfo kUnaryOperators[] = {
+{{- range $i, $o := .UnaryOperators }}
+ {
+ /* [{{$i}}] */
+{{- range $o.OverloadDescriptions }}
+ /* {{.}} */
+{{- end }}
+ /* num overloads */ {{$o.NumOverloads}},
+ /* overloads */ &kOverloads[{{$o.OverloadsOffset}}],
+ },
+{{- end }}
+};
+
+{{- range $i, $o := .UnaryOperators }}
+constexpr uint8_t kUnaryOperator{{template "OperatorName" $o.Name}} = {{$i}};
+{{- end }}
+
+constexpr IntrinsicInfo kBinaryOperators[] = {
+{{- range $i, $o := .BinaryOperators }}
{
/* [{{$i}}] */
-{{- range $f.OverloadDescriptions }}
+{{- range $o.OverloadDescriptions }}
/* {{.}} */
{{- end }}
- /* num overloads */ {{$f.NumOverloads}},
- /* overloads */ &kOverloads[{{$f.OverloadsOffset}}],
+ /* num overloads */ {{$o.NumOverloads}},
+ /* overloads */ &kOverloads[{{$o.OverloadsOffset}}],
+ },
+{{- end }}
+};
+
+{{- range $i, $o := .BinaryOperators }}
+constexpr uint8_t kBinaryOperator{{template "OperatorName" $o.Name}} = {{$i}};
+{{- end }}
+
+constexpr IntrinsicInfo kConstructorsAndConverters[] = {
+{{- range $i, $o := .ConstructorsAndConverters }}
+ {
+ /* [{{$i}}] */
+{{- range $o.OverloadDescriptions }}
+ /* {{.}} */
+{{- end }}
+ /* num overloads */ {{$o.NumOverloads}},
+ /* overloads */ &kOverloads[{{$o.OverloadsOffset}}],
},
{{- end }}
};
@@ -133,7 +184,7 @@ constexpr BuiltinInfo kBuiltins[] = {
class {{$class}} : public TypeMatcher {
public:
/// Checks whether the given type matches the matcher rules.
- /// Match may close open types and numbers in state.
+ /// Match may define and refine the template types and numbers in state.
/// @param state the MatchState
/// @param type the type to match
/// @returns the canonicalized type on match, otherwise nullptr
@@ -141,7 +192,7 @@ class {{$class}} : public TypeMatcher {
const sem::Type* type) const override;
/// @param state the MatchState
/// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
+ std::string String(MatchState* state) const override;
};
const sem::Type* {{$class}}::Match(MatchState& state, const sem::Type* ty) const {
@@ -160,7 +211,7 @@ const sem::Type* {{$class}}::Match(MatchState& state, const sem::Type* ty) const
return build_{{TrimLeft .Name "_"}}(state{{range .TemplateParams}}, {{.GetName}}{{end}});
}
-std::string {{$class}}::String(MatchState&{{if .TemplateParams}} state{{end}}) const {
+std::string {{$class}}::String(MatchState*{{if .TemplateParams}} state{{end}}) const {
{{- range .TemplateParams }}
{{- template "DeclareLocalTemplateParamName" . }}
{{- end }}
@@ -189,7 +240,7 @@ class {{$class}} : public TypeMatcher {
public:
/// Checks whether the given type matches the matcher rules, and returns the
/// expected, canonicalized type on success.
- /// Match may close open types and numbers in state.
+ /// Match may define and refine the template types and numbers in state.
/// @param state the MatchState
/// @param type the type to match
/// @returns the canonicalized type on match, otherwise nullptr
@@ -197,11 +248,11 @@ class {{$class}} : public TypeMatcher {
const sem::Type* type) const override;
/// @param state the MatchState
/// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
+ std::string String(MatchState* state) const override;
};
const sem::Type* {{$class}}::Match(MatchState& state, const sem::Type* ty) const {
-{{- range .Types }}
+{{- range .PrecedenceSortedTypes }}
if (match_{{.Name}}(ty)) {
return build_{{.Name}}(state);
}
@@ -209,15 +260,18 @@ const sem::Type* {{$class}}::Match(MatchState& state, const sem::Type* ty) const
return nullptr;
}
-std::string {{$class}}::String(MatchState&) const {
- return "
+std::string {{$class}}::String(MatchState*) const {
+ std::stringstream ss;
+ // Note: We pass nullptr to the TypeMatcher::String() functions, as 'matcher's do not support
+ // template arguments, nor can they match sub-types. As such, they have no use for the MatchState.
+ ss
{{- range .Types -}}
-{{- if IsFirstIn . $.Types }}{{.Name}}
-{{- else if IsLastIn . $.Types }} or {{.Name}}
-{{- else }}, {{.Name}}
+{{- if IsFirstIn . $.Types }} << {{PascalCase .Name}}().String(nullptr)
+{{- else if IsLastIn . $.Types }} << " or " << {{PascalCase .Name}}().String(nullptr)
+{{- else }} << ", " << {{PascalCase .Name}}().String(nullptr)
{{- end -}}
-{{- end -}}
- ";
+{{- end -}};
+ return ss.str();
}
{{ end -}}
@@ -233,14 +287,14 @@ std::string {{$class}}::String(MatchState&) const {
class {{$class}} : public NumberMatcher {
public:
/// Checks whether the given number matches the enum matcher rules.
- /// Match may close open types and numbers in state.
+ /// Match may define template numbers in state.
/// @param state the MatchState
/// @param number the enum value as a Number
/// @return true if the enum value matches the set
Number Match(MatchState& state, Number number) const override;
/// @param state the MatchState
/// @return a string representation of the matcher.
- std::string String(MatchState& state) const override;
+ std::string String(MatchState* state) const override;
};
{{ if eq 1 (len .Options) -}}
@@ -265,7 +319,7 @@ Number {{$class}}::Match(MatchState&, Number number) const {
}
{{- end }}
-std::string {{$class}}::String(MatchState&) const {
+std::string {{$class}}::String(MatchState*) const {
return "
{{- range .Options -}}
{{- if IsFirstIn . $.Options }}{{.Name}}
@@ -285,15 +339,15 @@ class Matchers {
private:
{{- $t_names := Map -}}
{{- $n_names := Map -}}
-{{- range Iterate .Sem.MaxOpenTypes -}}
-{{- $name := printf "open_type_%v" . -}}
+{{- range Iterate .Sem.MaxTemplateTypes -}}
+{{- $name := printf "template_type_%v" . -}}
{{- $t_names.Put . $name }}
- OpenTypeMatcher {{$name}}_{ {{- . -}} };
+ TemplateTypeMatcher {{$name}}_{ {{- . -}} };
{{- end }}
-{{- range Iterate .Sem.MaxOpenNumbers -}}
-{{- $name := printf "open_number_%v" . -}}
+{{- range Iterate .Sem.MaxTemplateNumbers -}}
+{{- $name := printf "template_number_%v" . -}}
{{- $n_names.Put . $name }}
- OpenNumberMatcher {{$name}}_{ {{- . -}} };
+ TemplateNumberMatcher {{$name}}_{ {{- . -}} };
{{- end }}
{{- range .Sem.Types -}}
{{- $name := PascalCase .Name -}}
@@ -317,7 +371,7 @@ class Matchers {
/// Destructor
~Matchers();
- /// The open-types, types, and type matchers
+ /// The template types, types, and type matchers
TypeMatcher const* const type[{{len .TMatchers}}] = {
{{- range $i, $m := .TMatchers }}
/* [{{$i}}] */
@@ -327,7 +381,7 @@ class Matchers {
{{- end }}
};
- /// The open-numbers, and number matchers
+ /// The template numbers, and number matchers
NumberMatcher const* const number[{{len .NMatchers}}] = {
{{- range $i, $m := .NMatchers }}
/* [{{$i}}] */
@@ -358,11 +412,11 @@ Matchers::~Matchers() = default;
{{- define "DeclareLocalTemplateParamName" -}}
{{- /* ------------------------------------------------------------------ */ -}}
{{- if IsTemplateTypeParam . }}
- const std::string {{.Name}} = state.TypeName();
+ const std::string {{.Name}} = state->TypeName();
{{- else if IsTemplateNumberParam . }}
- const std::string {{.Name}} = state.NumName();
+ const std::string {{.Name}} = state->NumName();
{{- else if IsTemplateEnumParam . }}
- const std::string {{.Name}} = state.NumName();
+ const std::string {{.Name}} = state->NumName();
{{- end -}}
{{- end -}}
@@ -399,3 +453,31 @@ Matchers::~Matchers() = default;
{{- end -}}
{{- end -}}
{{- end -}}
+
+{{- /* ------------------------------------------------------------------ */ -}}
+{{- define "OperatorName" -}}
+{{- /* ------------------------------------------------------------------ */ -}}
+{{- if eq . "<<" -}}ShiftLeft
+{{- else if eq . "&" -}}And
+{{- else if eq . "|" -}}Or
+{{- else if eq . "^" -}}Xor
+{{- else if eq . "&&" -}}LogicalAnd
+{{- else if eq . "||" -}}LogicalOr
+{{- else if eq . "==" -}}Equal
+{{- else if eq . "!" -}}Not
+{{- else if eq . "!=" -}}NotEqual
+{{- else if eq . "~" -}}Complement
+{{- else if eq . "<" -}}LessThan
+{{- else if eq . ">" -}}GreaterThan
+{{- else if eq . "<=" -}}LessThanEqual
+{{- else if eq . ">=" -}}GreaterThanEqual
+{{- else if eq . "<<" -}}ShiftLeft
+{{- else if eq . ">>" -}}ShiftRight
+{{- else if eq . "+" -}}Plus
+{{- else if eq . "-" -}}Minus
+{{- else if eq . "*" -}}Star
+{{- else if eq . "/" -}}Divide
+{{- else if eq . "%" -}}Modulo
+{{- else -}}<unknown-{{.}}>
+{{- end -}}
+{{- end -}}
diff --git a/chromium/third_party/dawn/src/tint/resolver/intrinsic_table_test.cc b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table_test.cc
new file mode 100644
index 00000000000..e2c651e8745
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/intrinsic_table_test.cc
@@ -0,0 +1,1254 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/intrinsic_table.h"
+
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "src/tint/program_builder.h"
+#include "src/tint/resolver/resolver_test_helper.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/type_constructor.h"
+#include "src/tint/sem/type_conversion.h"
+
+namespace tint::resolver {
+namespace {
+
+using ::testing::HasSubstr;
+
+using BuiltinType = sem::BuiltinType;
+using Parameter = sem::Parameter;
+using ParameterUsage = sem::ParameterUsage;
+
+using AFloatV = builder::vec<3, AFloat>;
+using AIntV = builder::vec<3, AInt>;
+using f32V = builder::vec<3, f32>;
+using i32V = builder::vec<3, i32>;
+using u32V = builder::vec<3, u32>;
+
+class IntrinsicTableTest : public testing::Test, public ProgramBuilder {
+ public:
+ std::unique_ptr<IntrinsicTable> table = IntrinsicTable::Create(*this);
+};
+
+TEST_F(IntrinsicTableTest, MatchF32) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kCos, {f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kCos);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchF32) {
+ auto* i32 = create<sem::I32>();
+ auto result = table->Lookup(BuiltinType::kCos, {i32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchU32) {
+ auto* f32 = create<sem::F32>();
+ auto* u32 = create<sem::U32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto result = table->Lookup(BuiltinType::kUnpack2x16float, {u32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kUnpack2x16float);
+ EXPECT_EQ(result.sem->ReturnType(), vec2_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), u32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchU32) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kUnpack2x16float, {f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchI32) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), vec4_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kLevel);
+}
+
+TEST_F(IntrinsicTableTest, MismatchI32) {
+ auto* f32 = create<sem::F32>();
+ auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchIU32AsI32) {
+ auto* i32 = create<sem::I32>();
+ auto result = table->Lookup(BuiltinType::kCountOneBits, {i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kCountOneBits);
+ EXPECT_EQ(result.sem->ReturnType(), i32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), i32);
+}
+
+TEST_F(IntrinsicTableTest, MatchIU32AsU32) {
+ auto* u32 = create<sem::U32>();
+ auto result = table->Lookup(BuiltinType::kCountOneBits, {u32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kCountOneBits);
+ EXPECT_EQ(result.sem->ReturnType(), u32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), u32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchIU32) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kCountOneBits, {f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchFIU32AsI32) {
+ auto* i32 = create<sem::I32>();
+ auto result = table->Lookup(BuiltinType::kClamp, {i32, i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kClamp);
+ EXPECT_EQ(result.sem->ReturnType(), i32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+}
+
+TEST_F(IntrinsicTableTest, MatchFIU32AsU32) {
+ auto* u32 = create<sem::U32>();
+ auto result = table->Lookup(BuiltinType::kClamp, {u32, u32, u32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kClamp);
+ EXPECT_EQ(result.sem->ReturnType(), u32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), u32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), u32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), u32);
+}
+
+TEST_F(IntrinsicTableTest, MatchFIU32AsF32) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kClamp, {f32, f32, f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kClamp);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchFIU32) {
+ auto* bool_ = create<sem::Bool>();
+ auto result = table->Lookup(BuiltinType::kClamp, {bool_, bool_, bool_}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchBool) {
+ auto* f32 = create<sem::F32>();
+ auto* bool_ = create<sem::Bool>();
+ auto result = table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kSelect);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), bool_);
+}
+
+TEST_F(IntrinsicTableTest, MismatchBool) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kSelect, {f32, f32, f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchPointer) {
+ auto* i32 = create<sem::I32>();
+ auto* atomicI32 = create<sem::Atomic>(i32);
+ auto* ptr =
+ create<sem::Pointer>(atomicI32, ast::StorageClass::kWorkgroup, ast::Access::kReadWrite);
+ auto result = table->Lookup(BuiltinType::kAtomicLoad, {ptr}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kAtomicLoad);
+ EXPECT_EQ(result.sem->ReturnType(), i32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), ptr);
+}
+
+TEST_F(IntrinsicTableTest, MismatchPointer) {
+ auto* i32 = create<sem::I32>();
+ auto* atomicI32 = create<sem::Atomic>(i32);
+ auto result = table->Lookup(BuiltinType::kAtomicLoad, {atomicI32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchArray) {
+ auto* arr = create<sem::Array>(create<sem::U32>(), 0u, 4u, 4u, 4u, 4u);
+ auto* arr_ptr = create<sem::Pointer>(arr, ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto result = table->Lookup(BuiltinType::kArrayLength, {arr_ptr}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kArrayLength);
+ EXPECT_TRUE(result.sem->ReturnType()->Is<sem::U32>());
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ auto* param_type = result.sem->Parameters()[0]->Type();
+ ASSERT_TRUE(param_type->Is<sem::Pointer>());
+ EXPECT_TRUE(param_type->As<sem::Pointer>()->StoreType()->Is<sem::Array>());
+}
+
+TEST_F(IntrinsicTableTest, MismatchArray) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kArrayLength, {f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchSampler) {
+ auto* f32 = create<sem::F32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto result = table->Lookup(BuiltinType::kTextureSample, {tex, sampler, vec2_f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureSample);
+ EXPECT_EQ(result.sem->ReturnType(), vec4_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), sampler);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kSampler);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), vec2_f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kCoords);
+}
+
+TEST_F(IntrinsicTableTest, MismatchSampler) {
+ auto* f32 = create<sem::F32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
+ auto result = table->Lookup(BuiltinType::kTextureSample, {tex, f32, vec2_f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchSampledTexture) {
+ auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* tex = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), vec4_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kLevel);
+}
+
+TEST_F(IntrinsicTableTest, MatchMultisampledTexture) {
+ auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* tex = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), vec4_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kSampleIndex);
+}
+
+TEST_F(IntrinsicTableTest, MatchDepthTexture) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* tex = create<sem::DepthTexture>(ast::TextureDimension::k2d);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kLevel);
+}
+
+TEST_F(IntrinsicTableTest, MatchDepthMultisampledTexture) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* tex = create<sem::DepthMultisampledTexture>(ast::TextureDimension::k2d);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32, i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), i32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kSampleIndex);
+}
+
+TEST_F(IntrinsicTableTest, MatchExternalTexture) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* tex = create<sem::ExternalTexture>();
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {tex, vec2_i32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureLoad);
+ EXPECT_EQ(result.sem->ReturnType(), vec4_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 2u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+}
+
+TEST_F(IntrinsicTableTest, MatchWOStorageTexture) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto* vec4_f32 = create<sem::Vector>(f32, 4u);
+ auto* subtype = sem::StorageTexture::SubtypeFor(ast::TexelFormat::kR32Float, Types());
+ auto* tex = create<sem::StorageTexture>(ast::TextureDimension::k2d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite, subtype);
+
+ auto result = table->Lookup(BuiltinType::kTextureStore, {tex, vec2_i32, vec4_f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kTextureStore);
+ EXPECT_TRUE(result.sem->ReturnType()->Is<sem::Void>());
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), tex);
+ EXPECT_EQ(result.sem->Parameters()[0]->Usage(), ParameterUsage::kTexture);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_i32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Usage(), ParameterUsage::kCoords);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), vec4_f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Usage(), ParameterUsage::kValue);
+}
+
+TEST_F(IntrinsicTableTest, MismatchTexture) {
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
+ auto* vec2_i32 = create<sem::Vector>(i32, 2u);
+ auto result = table->Lookup(BuiltinType::kTextureLoad, {f32, vec2_i32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, ImplicitLoadOnReference) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(
+ BuiltinType::kCos,
+ {create<sem::Reference>(f32, ast::StorageClass::kFunction, ast::Access::kReadWrite)},
+ Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kCos);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), f32);
+}
+
+TEST_F(IntrinsicTableTest, MatchTemplateType) {
+ auto* f32 = create<sem::F32>();
+ auto result = table->Lookup(BuiltinType::kClamp, {f32, f32, f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kClamp);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchTemplateType) {
+ auto* f32 = create<sem::F32>();
+ auto* u32 = create<sem::U32>();
+ auto result = table->Lookup(BuiltinType::kClamp, {f32, u32, f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchOpenSizeVector) {
+ auto* f32 = create<sem::F32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto result = table->Lookup(BuiltinType::kClamp, {vec2_f32, vec2_f32, vec2_f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kClamp);
+ EXPECT_EQ(result.sem->ReturnType(), vec2_f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 3u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), vec2_f32);
+ EXPECT_EQ(result.sem->Parameters()[1]->Type(), vec2_f32);
+ EXPECT_EQ(result.sem->Parameters()[2]->Type(), vec2_f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchOpenSizeVector) {
+ auto* f32 = create<sem::F32>();
+ auto* u32 = create<sem::U32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto result = table->Lookup(BuiltinType::kClamp, {vec2_f32, u32, vec2_f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, MatchOpenSizeMatrix) {
+ auto* f32 = create<sem::F32>();
+ auto* vec3_f32 = create<sem::Vector>(f32, 3u);
+ auto* mat3_f32 = create<sem::Matrix>(vec3_f32, 3u);
+ auto result = table->Lookup(BuiltinType::kDeterminant, {mat3_f32}, Source{});
+ ASSERT_NE(result.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+ EXPECT_EQ(result.sem->Type(), BuiltinType::kDeterminant);
+ EXPECT_EQ(result.sem->ReturnType(), f32);
+ ASSERT_EQ(result.sem->Parameters().size(), 1u);
+ EXPECT_EQ(result.sem->Parameters()[0]->Type(), mat3_f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchOpenSizeMatrix) {
+ auto* f32 = create<sem::F32>();
+ auto* vec2_f32 = create<sem::Vector>(f32, 2u);
+ auto* mat3x2_f32 = create<sem::Matrix>(vec2_f32, 3u);
+ auto result = table->Lookup(BuiltinType::kDeterminant, {mat3x2_f32}, Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, OverloadOrderByNumberOfParameters) {
+ // None of the arguments match, so expect the overloads with 2 parameters to
+ // come first
+ auto* bool_ = create<sem::Bool>();
+ table->Lookup(BuiltinType::kTextureDimensions, {bool_, bool_}, Source{});
+ ASSERT_EQ(Diagnostics().str(),
+ R"(error: no matching call to textureDimensions(bool, bool)
+
+27 candidate functions:
+ textureDimensions(texture: texture_1d<T>, level: i32) -> i32 where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_3d<T>, level: i32) -> vec3<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_1d<T>) -> i32 where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d_array<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_3d<T>) -> vec3<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube_array<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_multisampled_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_depth_2d) -> vec2<i32>
+ textureDimensions(texture: texture_depth_2d_array) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube_array) -> vec2<i32>
+ textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32>
+ textureDimensions(texture: texture_storage_1d<F, A>) -> i32 where: A is write
+ textureDimensions(texture: texture_storage_2d<F, A>) -> vec2<i32> where: A is write
+ textureDimensions(texture: texture_storage_2d_array<F, A>) -> vec2<i32> where: A is write
+ textureDimensions(texture: texture_storage_3d<F, A>) -> vec3<i32> where: A is write
+ textureDimensions(texture: texture_external) -> vec2<i32>
+)");
+}
+
+TEST_F(IntrinsicTableTest, OverloadOrderByMatchingParameter) {
+ auto* tex = create<sem::DepthTexture>(ast::TextureDimension::k2d);
+ auto* bool_ = create<sem::Bool>();
+ table->Lookup(BuiltinType::kTextureDimensions, {tex, bool_}, Source{});
+ ASSERT_EQ(Diagnostics().str(),
+ R"(error: no matching call to textureDimensions(texture_depth_2d, bool)
+
+27 candidate functions:
+ textureDimensions(texture: texture_depth_2d, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_2d) -> vec2<i32>
+ textureDimensions(texture: texture_1d<T>, level: i32) -> i32 where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_3d<T>, level: i32) -> vec3<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube_array<T>, level: i32) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_depth_2d_array, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube_array, level: i32) -> vec2<i32>
+ textureDimensions(texture: texture_1d<T>) -> i32 where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_2d_array<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_3d<T>) -> vec3<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_cube_array<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_multisampled_2d<T>) -> vec2<i32> where: T is f32, i32 or u32
+ textureDimensions(texture: texture_depth_2d_array) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube) -> vec2<i32>
+ textureDimensions(texture: texture_depth_cube_array) -> vec2<i32>
+ textureDimensions(texture: texture_depth_multisampled_2d) -> vec2<i32>
+ textureDimensions(texture: texture_storage_1d<F, A>) -> i32 where: A is write
+ textureDimensions(texture: texture_storage_2d<F, A>) -> vec2<i32> where: A is write
+ textureDimensions(texture: texture_storage_2d_array<F, A>) -> vec2<i32> where: A is write
+ textureDimensions(texture: texture_storage_3d<F, A>) -> vec3<i32> where: A is write
+ textureDimensions(texture: texture_external) -> vec2<i32>
+)");
+}
+
+TEST_F(IntrinsicTableTest, SameOverloadReturnsSameBuiltinPointer) {
+ auto* f32 = create<sem::F32>();
+ auto* vec2_f32 = create<sem::Vector>(create<sem::F32>(), 2u);
+ auto* bool_ = create<sem::Bool>();
+ auto a = table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
+ ASSERT_NE(a.sem, nullptr) << Diagnostics().str();
+
+ auto b = table->Lookup(BuiltinType::kSelect, {f32, f32, bool_}, Source{});
+ ASSERT_NE(b.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+
+ auto c = table->Lookup(BuiltinType::kSelect, {vec2_f32, vec2_f32, bool_}, Source{});
+ ASSERT_NE(c.sem, nullptr) << Diagnostics().str();
+ ASSERT_EQ(Diagnostics().str(), "");
+
+ EXPECT_EQ(a.sem, b.sem);
+ EXPECT_NE(a.sem, c.sem);
+ EXPECT_NE(b.sem, c.sem);
+}
+
+TEST_F(IntrinsicTableTest, MatchUnaryOp) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto result = table->Lookup(ast::UnaryOp::kNegation, vec3_i32, Source{{12, 34}});
+ EXPECT_EQ(result.result, vec3_i32);
+ EXPECT_EQ(result.result, vec3_i32);
+ EXPECT_EQ(Diagnostics().str(), "");
+}
+
+TEST_F(IntrinsicTableTest, MismatchUnaryOp) {
+ auto* bool_ = create<sem::Bool>();
+ auto result = table->Lookup(ast::UnaryOp::kNegation, bool_, Source{{12, 34}});
+ ASSERT_EQ(result.result, nullptr);
+ EXPECT_EQ(Diagnostics().str(), R"(12:34 error: no matching overload for operator - (bool)
+
+2 candidate operators:
+ operator - (T) -> T where: T is f32 or i32
+ operator - (vecN<T>) -> vecN<T> where: T is f32 or i32
+)");
+}
+
+TEST_F(IntrinsicTableTest, MatchBinaryOp) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto result = table->Lookup(ast::BinaryOp::kMultiply, i32, vec3_i32, Source{{12, 34}},
+ /* is_compound */ false);
+ EXPECT_EQ(result.result, vec3_i32);
+ EXPECT_EQ(result.lhs, i32);
+ EXPECT_EQ(result.rhs, vec3_i32);
+ EXPECT_EQ(Diagnostics().str(), "");
+}
+
+TEST_F(IntrinsicTableTest, MismatchBinaryOp) {
+ auto* f32 = create<sem::F32>();
+ auto* bool_ = create<sem::Bool>();
+ auto result = table->Lookup(ast::BinaryOp::kMultiply, f32, bool_, Source{{12, 34}},
+ /* is_compound */ false);
+ ASSERT_EQ(result.result, nullptr);
+ EXPECT_EQ(Diagnostics().str(), R"(12:34 error: no matching overload for operator * (f32, bool)
+
+9 candidate operators:
+ operator * (T, T) -> T where: T is f32, i32 or u32
+ operator * (vecN<T>, T) -> vecN<T> where: T is f32, i32 or u32
+ operator * (T, vecN<T>) -> vecN<T> where: T is f32, i32 or u32
+ operator * (f32, matNxM<f32>) -> matNxM<f32>
+ operator * (vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 or u32
+ operator * (matNxM<f32>, f32) -> matNxM<f32>
+ operator * (matCxR<f32>, vecC<f32>) -> vecR<f32>
+ operator * (vecR<f32>, matCxR<f32>) -> vecC<f32>
+ operator * (matKxR<f32>, matCxK<f32>) -> matCxR<f32>
+)");
+}
+
+TEST_F(IntrinsicTableTest, MatchCompoundOp) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto result = table->Lookup(ast::BinaryOp::kMultiply, i32, vec3_i32, Source{{12, 34}},
+ /* is_compound */ true);
+ EXPECT_EQ(result.result, vec3_i32);
+ EXPECT_EQ(result.lhs, i32);
+ EXPECT_EQ(result.rhs, vec3_i32);
+ EXPECT_EQ(Diagnostics().str(), "");
+}
+
+TEST_F(IntrinsicTableTest, MismatchCompoundOp) {
+ auto* f32 = create<sem::F32>();
+ auto* bool_ = create<sem::Bool>();
+ auto result = table->Lookup(ast::BinaryOp::kMultiply, f32, bool_, Source{{12, 34}},
+ /* is_compound */ true);
+ ASSERT_EQ(result.result, nullptr);
+ EXPECT_EQ(Diagnostics().str(), R"(12:34 error: no matching overload for operator *= (f32, bool)
+
+9 candidate operators:
+ operator *= (T, T) -> T where: T is f32, i32 or u32
+ operator *= (vecN<T>, T) -> vecN<T> where: T is f32, i32 or u32
+ operator *= (T, vecN<T>) -> vecN<T> where: T is f32, i32 or u32
+ operator *= (f32, matNxM<f32>) -> matNxM<f32>
+ operator *= (vecN<T>, vecN<T>) -> vecN<T> where: T is f32, i32 or u32
+ operator *= (matNxM<f32>, f32) -> matNxM<f32>
+ operator *= (matCxR<f32>, vecC<f32>) -> vecR<f32>
+ operator *= (vecR<f32>, matCxR<f32>) -> vecC<f32>
+ operator *= (matKxR<f32>, matCxK<f32>) -> matCxR<f32>
+)");
+}
+
+TEST_F(IntrinsicTableTest, MatchTypeConstructorImplicit) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto* result =
+ table->Lookup(CtorConvIntrinsic::kVec3, nullptr, {i32, i32, i32}, Source{{12, 34}});
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(result->ReturnType(), vec3_i32);
+ EXPECT_TRUE(result->Is<sem::TypeConstructor>());
+ ASSERT_EQ(result->Parameters().size(), 3u);
+ EXPECT_EQ(result->Parameters()[0]->Type(), i32);
+ EXPECT_EQ(result->Parameters()[1]->Type(), i32);
+ EXPECT_EQ(result->Parameters()[2]->Type(), i32);
+}
+
+TEST_F(IntrinsicTableTest, MatchTypeConstructorExplicit) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto* result = table->Lookup(CtorConvIntrinsic::kVec3, i32, {i32, i32, i32}, Source{{12, 34}});
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(result->ReturnType(), vec3_i32);
+ EXPECT_TRUE(result->Is<sem::TypeConstructor>());
+ ASSERT_EQ(result->Parameters().size(), 3u);
+ EXPECT_EQ(result->Parameters()[0]->Type(), i32);
+ EXPECT_EQ(result->Parameters()[1]->Type(), i32);
+ EXPECT_EQ(result->Parameters()[2]->Type(), i32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchTypeConstructorImplicit) {
+ auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* result =
+ table->Lookup(CtorConvIntrinsic::kVec3, nullptr, {i32, f32, i32}, Source{{12, 34}});
+ ASSERT_EQ(result, nullptr);
+ EXPECT_EQ(Diagnostics().str(), R"(12:34 error: no matching constructor for vec3(i32, f32, i32)
+
+6 candidate constructors:
+ vec3(x: T, y: T, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(xy: vec2<T>, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(x: T, yz: vec2<T>) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(vec3<T>) -> vec3<T> where: T is f32, i32, u32 or bool
+ vec3() -> vec3<T> where: T is f32, i32, u32 or bool
+
+4 candidate conversions:
+ vec3(vec3<U>) -> vec3<f32> where: T is f32, U is i32, u32 or bool
+ vec3(vec3<U>) -> vec3<i32> where: T is i32, U is f32, u32 or bool
+ vec3(vec3<U>) -> vec3<u32> where: T is u32, U is f32, i32 or bool
+ vec3(vec3<U>) -> vec3<bool> where: T is bool, U is f32, i32 or u32
+)");
+}
+
+TEST_F(IntrinsicTableTest, MismatchTypeConstructorExplicit) {
+ auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* result = table->Lookup(CtorConvIntrinsic::kVec3, i32, {i32, f32, i32}, Source{{12, 34}});
+ ASSERT_EQ(result, nullptr);
+ EXPECT_EQ(Diagnostics().str(),
+ R"(12:34 error: no matching constructor for vec3<i32>(i32, f32, i32)
+
+6 candidate constructors:
+ vec3(x: T, y: T, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(x: T, yz: vec2<T>) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(xy: vec2<T>, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(vec3<T>) -> vec3<T> where: T is f32, i32, u32 or bool
+ vec3() -> vec3<T> where: T is f32, i32, u32 or bool
+
+4 candidate conversions:
+ vec3(vec3<U>) -> vec3<f32> where: T is f32, U is i32, u32 or bool
+ vec3(vec3<U>) -> vec3<i32> where: T is i32, U is f32, u32 or bool
+ vec3(vec3<U>) -> vec3<u32> where: T is u32, U is f32, i32 or bool
+ vec3(vec3<U>) -> vec3<bool> where: T is bool, U is f32, i32 or u32
+)");
+}
+
+TEST_F(IntrinsicTableTest, MatchTypeConversion) {
+ auto* i32 = create<sem::I32>();
+ auto* vec3_i32 = create<sem::Vector>(i32, 3u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3_f32 = create<sem::Vector>(f32, 3u);
+ auto* result = table->Lookup(CtorConvIntrinsic::kVec3, i32, {vec3_f32}, Source{{12, 34}});
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(result->ReturnType(), vec3_i32);
+ EXPECT_TRUE(result->Is<sem::TypeConversion>());
+ ASSERT_EQ(result->Parameters().size(), 1u);
+ EXPECT_EQ(result->Parameters()[0]->Type(), vec3_f32);
+}
+
+TEST_F(IntrinsicTableTest, MismatchTypeConversion) {
+ auto* arr = create<sem::Array>(create<sem::U32>(), 0u, 4u, 4u, 4u, 4u);
+ auto* f32 = create<sem::F32>();
+ auto* result = table->Lookup(CtorConvIntrinsic::kVec3, f32, {arr}, Source{{12, 34}});
+ ASSERT_EQ(result, nullptr);
+ EXPECT_EQ(Diagnostics().str(),
+ R"(12:34 error: no matching constructor for vec3<f32>(array<u32>)
+
+6 candidate constructors:
+ vec3(vec3<T>) -> vec3<T> where: T is f32, i32, u32 or bool
+ vec3(T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3() -> vec3<T> where: T is f32, i32, u32 or bool
+ vec3(xy: vec2<T>, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(x: T, yz: vec2<T>) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+ vec3(x: T, y: T, z: T) -> vec3<T> where: T is abstract-int, abstract-float, f32, i32, u32 or bool
+
+4 candidate conversions:
+ vec3(vec3<U>) -> vec3<f32> where: T is f32, U is i32, u32 or bool
+ vec3(vec3<U>) -> vec3<i32> where: T is i32, U is f32, u32 or bool
+ vec3(vec3<U>) -> vec3<u32> where: T is u32, U is f32, i32 or bool
+ vec3(vec3<U>) -> vec3<bool> where: T is bool, U is f32, i32 or u32
+)");
+}
+
+TEST_F(IntrinsicTableTest, Err257Arguments) { // crbug.com/1323605
+ auto* f32 = create<sem::F32>();
+ std::vector<const sem::Type*> arg_tys(257, f32);
+ auto result = table->Lookup(BuiltinType::kAbs, std::move(arg_tys), Source{});
+ ASSERT_EQ(result.sem, nullptr);
+ ASSERT_THAT(Diagnostics().str(), HasSubstr("no matching call"));
+}
+
+TEST_F(IntrinsicTableTest, OverloadResolution) {
+ // i32(abstract-int) produces candidates for both:
+ // ctor i32(i32) -> i32
+ // conv i32<T: scalar_no_i32>(T) -> i32
+ // The first should win overload resolution.
+ auto* ai = create<sem::AbstractInt>();
+ auto* i32 = create<sem::I32>();
+ auto result = table->Lookup(CtorConvIntrinsic::kI32, nullptr, {ai}, Source{});
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(result->ReturnType(), i32);
+ EXPECT_EQ(result->Parameters().size(), 1u);
+ EXPECT_EQ(result->Parameters()[0]->Type(), i32);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// AbstractBinaryTests
+////////////////////////////////////////////////////////////////////////////////
+namespace AbstractBinaryTests {
+
+struct Case {
+ template <typename RESULT,
+ typename PARAM_LHS,
+ typename PARAM_RHS,
+ typename ARG_LHS,
+ typename ARG_RHS>
+ static Case Create(bool match = true) {
+ return {
+ match, //
+ builder::DataType<RESULT>::Sem, //
+ builder::DataType<PARAM_LHS>::Sem, //
+ builder::DataType<PARAM_RHS>::Sem, //
+ builder::DataType<ARG_LHS>::Sem, //
+ builder::DataType<ARG_RHS>::Sem, //
+ };
+ }
+ bool expected_match;
+ builder::sem_type_func_ptr expected_result;
+ builder::sem_type_func_ptr expected_param_lhs;
+ builder::sem_type_func_ptr expected_param_rhs;
+ builder::sem_type_func_ptr arg_lhs;
+ builder::sem_type_func_ptr arg_rhs;
+};
+
+struct IntrinsicTableAbstractBinaryTest : public ResolverTestWithParam<Case> {
+ std::unique_ptr<IntrinsicTable> table = IntrinsicTable::Create(*this);
+};
+
+TEST_P(IntrinsicTableAbstractBinaryTest, MatchAdd) {
+ auto* arg_lhs = GetParam().arg_lhs(*this);
+ auto* arg_rhs = GetParam().arg_rhs(*this);
+ auto result = table->Lookup(ast::BinaryOp::kAdd, arg_lhs, arg_rhs, Source{{12, 34}},
+ /* is_compound */ false);
+
+ bool matched = result.result != nullptr;
+ bool expected_match = GetParam().expected_match;
+ EXPECT_EQ(matched, expected_match) << Diagnostics().str();
+
+ auto* expected_result = GetParam().expected_result(*this);
+ EXPECT_TYPE(result.result, expected_result);
+
+ auto* expected_param_lhs = GetParam().expected_param_lhs(*this);
+ EXPECT_TYPE(result.lhs, expected_param_lhs);
+
+ auto* expected_param_rhs = GetParam().expected_param_rhs(*this);
+ EXPECT_TYPE(result.rhs, expected_param_rhs);
+}
+
+INSTANTIATE_TEST_SUITE_P(AFloat_AInt,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32, f32, f32, AFloat, AFloat>(),
+Case::Create<f32, f32, f32, AFloat, AInt>(),
+Case::Create<f32, f32, f32, AInt, AFloat>(),
+Case::Create<i32, i32, i32, AInt, AInt>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(VecAFloat_VecAInt,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32V, f32V, f32V, AFloatV, AFloatV>(),
+Case::Create<f32V, f32V, f32V, AFloatV, AIntV>(),
+Case::Create<f32V, f32V, f32V, AIntV, AFloatV>(),
+Case::Create<i32V, i32V, i32V, AIntV, AIntV>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(AFloat_f32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32, f32, f32, AFloat, f32>(),
+Case::Create<f32, f32, f32, f32, AFloat>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(VecAFloat_Vecf32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32V, f32V, f32V, AFloatV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_i32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<void, void, void, AFloat, i32>(false),
+Case::Create<void, void, void, i32, AFloat>(false)
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_Veci32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<void, void, void, AFloatV, i32V>(false),
+Case::Create<void, void, void, i32V, AFloatV>(false)
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_u32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<void, void, void, AFloat, u32>(false),
+Case::Create<void, void, void, u32, AFloat>(false)
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_Vecu32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<void, void, void, AFloatV, u32V>(false),
+Case::Create<void, void, void, u32V, AFloatV>(false)
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(AInt_f32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32, f32, f32, AInt, f32>(),
+Case::Create<f32, f32, f32, f32, AInt>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(VecAInt_Vecf32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<f32V, f32V, f32V, AIntV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(AInt_i32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<i32, i32, i32, AInt, i32>(),
+Case::Create<i32, i32, i32, i32, AInt>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(VecAInt_Veci32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<i32V, i32V, i32V, AIntV, i32V>(),
+Case::Create<i32V, i32V, i32V, i32V, AIntV>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(AInt_u32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<u32, u32, u32, AInt, u32>(),
+Case::Create<u32, u32, u32, u32, AInt>()
+ )); // clang-format on
+
+INSTANTIATE_TEST_SUITE_P(VecAInt_Vecu32,
+ IntrinsicTableAbstractBinaryTest,
+ testing::Values( // clang-format off
+// result | param lhs | param rhs | arg lhs | arg rhs
+Case::Create<u32V, u32V, u32V, AIntV, u32V>(),
+Case::Create<u32V, u32V, u32V, u32V, AIntV>()
+ )); // clang-format on
+
+} // namespace AbstractBinaryTests
+
+////////////////////////////////////////////////////////////////////////////////
+// AbstractTernaryTests
+////////////////////////////////////////////////////////////////////////////////
+namespace AbstractTernaryTests {
+
+struct Case {
+ template <typename RESULT,
+ typename PARAM_A,
+ typename PARAM_B,
+ typename PARAM_C,
+ typename ARG_A,
+ typename ARG_B,
+ typename ARG_C>
+ static Case Create(bool match = true) {
+ return {
+ match,
+ builder::DataType<RESULT>::Sem, //
+ builder::DataType<PARAM_A>::Sem, //
+ builder::DataType<PARAM_B>::Sem, //
+ builder::DataType<PARAM_C>::Sem, //
+ builder::DataType<ARG_A>::Sem, //
+ builder::DataType<ARG_B>::Sem, //
+ builder::DataType<ARG_C>::Sem, //
+ };
+ }
+ bool expected_match;
+ builder::sem_type_func_ptr expected_result;
+ builder::sem_type_func_ptr expected_param_a;
+ builder::sem_type_func_ptr expected_param_b;
+ builder::sem_type_func_ptr expected_param_c;
+ builder::sem_type_func_ptr arg_a;
+ builder::sem_type_func_ptr arg_b;
+ builder::sem_type_func_ptr arg_c;
+};
+
+struct IntrinsicTableAbstractTernaryTest : public ResolverTestWithParam<Case> {
+ std::unique_ptr<IntrinsicTable> table = IntrinsicTable::Create(*this);
+};
+
+TEST_P(IntrinsicTableAbstractTernaryTest, MatchClamp) {
+ auto* arg_a = GetParam().arg_a(*this);
+ auto* arg_b = GetParam().arg_b(*this);
+ auto* arg_c = GetParam().arg_c(*this);
+ auto builtin = table->Lookup(sem::BuiltinType::kClamp, {arg_a, arg_b, arg_c}, Source{{12, 34}});
+
+ bool matched = builtin.sem != nullptr;
+ bool expected_match = GetParam().expected_match;
+ EXPECT_EQ(matched, expected_match) << Diagnostics().str();
+
+ auto* result = builtin.sem ? builtin.sem->ReturnType() : nullptr;
+ auto* expected_result = GetParam().expected_result(*this);
+ EXPECT_TYPE(result, expected_result);
+
+ auto* param_a = builtin.sem ? builtin.sem->Parameters()[0]->Type() : nullptr;
+ auto* expected_param_a = GetParam().expected_param_a(*this);
+ EXPECT_TYPE(param_a, expected_param_a);
+
+ auto* param_b = builtin.sem ? builtin.sem->Parameters()[1]->Type() : nullptr;
+ auto* expected_param_b = GetParam().expected_param_b(*this);
+ EXPECT_TYPE(param_b, expected_param_b);
+
+ auto* param_c = builtin.sem ? builtin.sem->Parameters()[2]->Type() : nullptr;
+ auto* expected_param_c = GetParam().expected_param_c(*this);
+ EXPECT_TYPE(param_c, expected_param_c);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_AInt,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32, f32, f32, f32, AFloat, AFloat, AFloat>(),
+Case::Create<f32, f32, f32, f32, AFloat, AFloat, AInt>(),
+Case::Create<f32, f32, f32, f32, AFloat, AInt, AFloat>(),
+Case::Create<f32, f32, f32, f32, AFloat, AInt, AInt>(),
+Case::Create<f32, f32, f32, f32, AInt, AFloat, AFloat>(),
+Case::Create<f32, f32, f32, f32, AInt, AFloat, AInt>(),
+Case::Create<f32, f32, f32, f32, AInt, AInt, AFloat>(),
+Case::Create<i32, i32, i32, i32, AInt, AInt, AInt>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_VecAInt,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, AFloatV, AFloatV>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, AFloatV, AIntV>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, AIntV, AFloatV>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, AIntV, AIntV>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV, AFloatV, AFloatV>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV, AFloatV, AIntV>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV, AIntV, AFloatV>(),
+Case::Create<i32V, i32V, i32V, i32V, AIntV, AIntV, AIntV>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_f32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32, f32, f32, f32, AFloat, AFloat, f32>(),
+Case::Create<f32, f32, f32, f32, AFloat, f32, AFloat>(),
+Case::Create<f32, f32, f32, f32, AFloat, f32, f32>(),
+Case::Create<f32, f32, f32, f32, f32, AFloat, AFloat>(),
+Case::Create<f32, f32, f32, f32, f32, AFloat, f32>(),
+Case::Create<f32, f32, f32, f32, f32, f32, AFloat>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_Vecf32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, AFloatV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, f32V, AFloatV>(),
+Case::Create<f32V, f32V, f32V, f32V, AFloatV, f32V, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, AFloatV, AFloatV>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, AFloatV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, f32V, AFloatV> ()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_i32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<void, void, void, void, AFloat, AFloat, i32>(false),
+Case::Create<void, void, void, void, AFloat, i32, AFloat>(false),
+Case::Create<void, void, void, void, AFloat, i32, i32>(false),
+Case::Create<void, void, void, void, i32, AFloat, AFloat>(false),
+Case::Create<void, void, void, void, i32, AFloat, i32>(false),
+Case::Create<void, void, void, void, i32, i32, AFloat>(false)
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_Veci32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<void, void, void, void, AFloatV, AFloatV, i32V>(false),
+Case::Create<void, void, void, void, AFloatV, i32V, AFloatV>(false),
+Case::Create<void, void, void, void, AFloatV, i32V, i32V>(false),
+Case::Create<void, void, void, void, i32V, AFloatV, AFloatV>(false),
+Case::Create<void, void, void, void, i32V, AFloatV, i32V>(false),
+Case::Create<void, void, void, void, i32V, i32V, AFloatV>(false)
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AFloat_u32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<void, void, void, void, AFloat, AFloat, u32>(false),
+Case::Create<void, void, void, void, AFloat, u32, AFloat>(false),
+Case::Create<void, void, void, void, AFloat, u32, u32>(false),
+Case::Create<void, void, void, void, u32, AFloat, AFloat>(false),
+Case::Create<void, void, void, void, u32, AFloat, u32>(false),
+Case::Create<void, void, void, void, u32, u32, AFloat>(false)
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAFloat_Vecu32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<void, void, void, void, AFloatV, AFloatV, u32V>(false),
+Case::Create<void, void, void, void, AFloatV, u32V, AFloatV>(false),
+Case::Create<void, void, void, void, AFloatV, u32V, u32V>(false),
+Case::Create<void, void, void, void, u32V, AFloatV, AFloatV>(false),
+Case::Create<void, void, void, void, u32V, AFloatV, u32V>(false),
+Case::Create<void, void, void, void, u32V, u32V, AFloatV>(false)
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AInt_f32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32, f32, f32, f32, AInt, AInt, f32>(),
+Case::Create<f32, f32, f32, f32, AInt, f32, AInt>(),
+Case::Create<f32, f32, f32, f32, AInt, f32, f32>(),
+Case::Create<f32, f32, f32, f32, f32, AInt, AInt>(),
+Case::Create<f32, f32, f32, f32, f32, AInt, f32>(),
+Case::Create<f32, f32, f32, f32, f32, f32, AInt>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAInt_Vecf32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<f32V, f32V, f32V, f32V, AIntV, AIntV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV, f32V, AIntV>(),
+Case::Create<f32V, f32V, f32V, f32V, AIntV, f32V, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, AIntV, AIntV>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, AIntV, f32V>(),
+Case::Create<f32V, f32V, f32V, f32V, f32V, f32V, AIntV>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AInt_i32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<i32, i32, i32, i32, AInt, AInt, i32>(),
+Case::Create<i32, i32, i32, i32, AInt, i32, AInt>(),
+Case::Create<i32, i32, i32, i32, AInt, i32, i32>(),
+Case::Create<i32, i32, i32, i32, i32, AInt, AInt>(),
+Case::Create<i32, i32, i32, i32, i32, AInt, i32>(),
+Case::Create<i32, i32, i32, i32, i32, i32, AInt>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAInt_Veci32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<i32V, i32V, i32V, i32V, AIntV, AIntV, i32V>(),
+Case::Create<i32V, i32V, i32V, i32V, AIntV, i32V, AIntV>(),
+Case::Create<i32V, i32V, i32V, i32V, AIntV, i32V, i32V>(),
+Case::Create<i32V, i32V, i32V, i32V, i32V, AIntV, AIntV>(),
+Case::Create<i32V, i32V, i32V, i32V, i32V, AIntV, i32V>(),
+Case::Create<i32V, i32V, i32V, i32V, i32V, i32V, AIntV>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ AInt_u32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<u32, u32, u32, u32, AInt, AInt, u32>(),
+Case::Create<u32, u32, u32, u32, AInt, u32, AInt>(),
+Case::Create<u32, u32, u32, u32, AInt, u32, u32>(),
+Case::Create<u32, u32, u32, u32, u32, AInt, AInt>(),
+Case::Create<u32, u32, u32, u32, u32, AInt, u32>(),
+Case::Create<u32, u32, u32, u32, u32, u32, AInt>()
+ // clang-format on
+ ));
+
+INSTANTIATE_TEST_SUITE_P(
+ VecAInt_Vecu32,
+ IntrinsicTableAbstractTernaryTest,
+ testing::Values( // clang-format off
+// result | param a | param b | param c | arg a | arg b | arg c
+Case::Create<u32V, u32V, u32V, u32V, AIntV, AIntV, u32V>(),
+Case::Create<u32V, u32V, u32V, u32V, AIntV, u32V, AIntV>(),
+Case::Create<u32V, u32V, u32V, u32V, AIntV, u32V, u32V>(),
+Case::Create<u32V, u32V, u32V, u32V, u32V, AIntV, AIntV>(),
+Case::Create<u32V, u32V, u32V, u32V, u32V, AIntV, u32V>(),
+Case::Create<u32V, u32V, u32V, u32V, u32V, u32V, AIntV>()
+ // clang-format on
+ ));
+
+} // namespace AbstractTernaryTests
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/is_host_shareable_test.cc b/chromium/third_party/dawn/src/tint/resolver/is_host_shareable_test.cc
index df28e72db2f..a1679033066 100644
--- a/chromium/third_party/dawn/src/tint/resolver/is_host_shareable_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/is_host_shareable_test.cc
@@ -16,7 +16,7 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
namespace tint::resolver {
namespace {
@@ -24,96 +24,78 @@ namespace {
using ResolverIsHostShareable = ResolverTest;
TEST_F(ResolverIsHostShareable, Void) {
- EXPECT_FALSE(r()->IsHostShareable(create<sem::Void>()));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Void>()));
}
TEST_F(ResolverIsHostShareable, Bool) {
- EXPECT_FALSE(r()->IsHostShareable(create<sem::Bool>()));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Bool>()));
}
TEST_F(ResolverIsHostShareable, NumericScalar) {
- EXPECT_TRUE(r()->IsHostShareable(create<sem::I32>()));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::U32>()));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::F32>()));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::I32>()));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::U32>()));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::F32>()));
}
TEST_F(ResolverIsHostShareable, NumericVector) {
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 2u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 3u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 4u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 2u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 3u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 4u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 2u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 3u)));
- EXPECT_TRUE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 4u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::I32>(), 4u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::U32>(), 4u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Vector>(create<sem::F32>(), 4u)));
}
TEST_F(ResolverIsHostShareable, BoolVector) {
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
- EXPECT_FALSE(
- r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 2u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 3u)));
+ EXPECT_FALSE(r()->IsHostShareable(create<sem::Vector>(create<sem::Bool>(), 4u)));
}
TEST_F(ResolverIsHostShareable, Matrix) {
- auto* vec2 = create<sem::Vector>(create<sem::F32>(), 2u);
- auto* vec3 = create<sem::Vector>(create<sem::F32>(), 3u);
- auto* vec4 = create<sem::Vector>(create<sem::F32>(), 4u);
-
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 2u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 3u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 4u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 2u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 3u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 4u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 2u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 3u)));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 4u)));
+ auto* vec2 = create<sem::Vector>(create<sem::F32>(), 2u);
+ auto* vec3 = create<sem::Vector>(create<sem::F32>(), 3u);
+ auto* vec4 = create<sem::Vector>(create<sem::F32>(), 4u);
+
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec2, 4u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec3, 4u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 2u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 3u)));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Matrix>(vec4, 4u)));
}
TEST_F(ResolverIsHostShareable, Pointer) {
- auto* ptr = create<sem::Pointer>(
- create<sem::I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
- EXPECT_FALSE(r()->IsHostShareable(ptr));
+ auto* ptr = create<sem::Pointer>(create<sem::I32>(), ast::StorageClass::kPrivate,
+ ast::Access::kReadWrite);
+ EXPECT_FALSE(r()->IsHostShareable(ptr));
}
TEST_F(ResolverIsHostShareable, Atomic) {
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Atomic>(create<sem::I32>())));
- EXPECT_TRUE(r()->IsHostShareable(create<sem::Atomic>(create<sem::U32>())));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Atomic>(create<sem::I32>())));
+ EXPECT_TRUE(r()->IsHostShareable(create<sem::Atomic>(create<sem::U32>())));
}
TEST_F(ResolverIsHostShareable, ArraySizedOfHostShareable) {
- auto* arr = create<sem::Array>(create<sem::I32>(), 5u, 4u, 20u, 4u, 4u);
- EXPECT_TRUE(r()->IsHostShareable(arr));
+ auto* arr = create<sem::Array>(create<sem::I32>(), 5u, 4u, 20u, 4u, 4u);
+ EXPECT_TRUE(r()->IsHostShareable(arr));
}
TEST_F(ResolverIsHostShareable, ArrayUnsizedOfHostShareable) {
- auto* arr = create<sem::Array>(create<sem::I32>(), 0u, 4u, 4u, 4u, 4u);
- EXPECT_TRUE(r()->IsHostShareable(arr));
+ auto* arr = create<sem::Array>(create<sem::I32>(), 0u, 4u, 4u, 4u, 4u);
+ EXPECT_TRUE(r()->IsHostShareable(arr));
}
// Note: Structure tests covered in host_shareable_validation_test.cc
diff --git a/chromium/third_party/dawn/src/tint/resolver/is_storeable_test.cc b/chromium/third_party/dawn/src/tint/resolver/is_storeable_test.cc
index c9691f7d497..2d37d8b6e31 100644
--- a/chromium/third_party/dawn/src/tint/resolver/is_storeable_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/is_storeable_test.cc
@@ -16,7 +16,7 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
namespace tint::resolver {
namespace {
@@ -24,114 +24,113 @@ namespace {
using ResolverIsStorableTest = ResolverTest;
TEST_F(ResolverIsStorableTest, Void) {
- EXPECT_FALSE(r()->IsStorable(create<sem::Void>()));
+ EXPECT_FALSE(r()->IsStorable(create<sem::Void>()));
}
TEST_F(ResolverIsStorableTest, Scalar) {
- EXPECT_TRUE(r()->IsStorable(create<sem::Bool>()));
- EXPECT_TRUE(r()->IsStorable(create<sem::I32>()));
- EXPECT_TRUE(r()->IsStorable(create<sem::U32>()));
- EXPECT_TRUE(r()->IsStorable(create<sem::F32>()));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Bool>()));
+ EXPECT_TRUE(r()->IsStorable(create<sem::I32>()));
+ EXPECT_TRUE(r()->IsStorable(create<sem::U32>()));
+ EXPECT_TRUE(r()->IsStorable(create<sem::F32>()));
}
TEST_F(ResolverIsStorableTest, Vector) {
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 4u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 4u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 4u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::I32>(), 4u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::U32>(), 4u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Vector>(create<sem::F32>(), 4u)));
}
TEST_F(ResolverIsStorableTest, Matrix) {
- auto* vec2 = create<sem::Vector>(create<sem::F32>(), 2u);
- auto* vec3 = create<sem::Vector>(create<sem::F32>(), 3u);
- auto* vec4 = create<sem::Vector>(create<sem::F32>(), 4u);
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 4u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 4u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 2u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 3u)));
- EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 4u)));
+ auto* vec2 = create<sem::Vector>(create<sem::F32>(), 2u);
+ auto* vec3 = create<sem::Vector>(create<sem::F32>(), 3u);
+ auto* vec4 = create<sem::Vector>(create<sem::F32>(), 4u);
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec2, 4u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec3, 4u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 2u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 3u)));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Matrix>(vec4, 4u)));
}
TEST_F(ResolverIsStorableTest, Pointer) {
- auto* ptr = create<sem::Pointer>(
- create<sem::I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
- EXPECT_FALSE(r()->IsStorable(ptr));
+ auto* ptr = create<sem::Pointer>(create<sem::I32>(), ast::StorageClass::kPrivate,
+ ast::Access::kReadWrite);
+ EXPECT_FALSE(r()->IsStorable(ptr));
}
TEST_F(ResolverIsStorableTest, Atomic) {
- EXPECT_TRUE(r()->IsStorable(create<sem::Atomic>(create<sem::I32>())));
- EXPECT_TRUE(r()->IsStorable(create<sem::Atomic>(create<sem::U32>())));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Atomic>(create<sem::I32>())));
+ EXPECT_TRUE(r()->IsStorable(create<sem::Atomic>(create<sem::U32>())));
}
TEST_F(ResolverIsStorableTest, ArraySizedOfStorable) {
- auto* arr = create<sem::Array>(create<sem::I32>(), 5u, 4u, 20u, 4u, 4u);
- EXPECT_TRUE(r()->IsStorable(arr));
+ auto* arr = create<sem::Array>(create<sem::I32>(), 5u, 4u, 20u, 4u, 4u);
+ EXPECT_TRUE(r()->IsStorable(arr));
}
TEST_F(ResolverIsStorableTest, ArrayUnsizedOfStorable) {
- auto* arr = create<sem::Array>(create<sem::I32>(), 0u, 4u, 4u, 4u, 4u);
- EXPECT_TRUE(r()->IsStorable(arr));
+ auto* arr = create<sem::Array>(create<sem::I32>(), 0u, 4u, 4u, 4u, 4u);
+ EXPECT_TRUE(r()->IsStorable(arr));
}
TEST_F(ResolverIsStorableTest, Struct_AllMembersStorable) {
- Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIsStorableTest, Struct_SomeMembersNonStorable) {
- Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
}
TEST_F(ResolverIsStorableTest, Struct_NestedStorable) {
- auto* storable = Structure("Storable", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.Of(storable)),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* storable = Structure("Storable", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(storable)),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverIsStorableTest, Struct_NestedNonStorable) {
- auto* non_storable =
- Structure("nonstorable",
- {
- Member("a", ty.i32()),
- Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
- });
- Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.Of(non_storable)),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
+ auto* non_storable =
+ Structure("nonstorable", {
+ Member("a", ty.i32()),
+ Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
+ });
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(non_storable)),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/materialize_test.cc b/chromium/third_party/dawn/src/tint/resolver/materialize_test.cc
new file mode 100644
index 00000000000..22d5606321c
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/materialize_test.cc
@@ -0,0 +1,963 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/materialize.h"
+
+#include "src/tint/resolver/resolver.h"
+#include "src/tint/resolver/resolver_test_helper.h"
+#include "src/tint/sem/test_helper.h"
+
+#include "gmock/gmock.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint::resolver {
+namespace {
+
+using AFloatV = builder::vec<3, AFloat>;
+using AFloatM = builder::mat<3, 2, AFloat>;
+using AIntV = builder::vec<3, AInt>;
+using f32V = builder::vec<3, f32>;
+using f16V = builder::vec<3, f16>;
+using i32V = builder::vec<3, i32>;
+using u32V = builder::vec<3, u32>;
+using f32M = builder::mat<3, 2, f32>;
+
+constexpr double kHighestU32 = static_cast<double>(u32::kHighest);
+constexpr double kLowestU32 = static_cast<double>(u32::kLowest);
+constexpr double kHighestI32 = static_cast<double>(i32::kHighest);
+constexpr double kLowestI32 = static_cast<double>(i32::kLowest);
+constexpr double kHighestF32 = static_cast<double>(f32::kHighest);
+constexpr double kLowestF32 = static_cast<double>(f32::kLowest);
+constexpr double kTooBigF32 = static_cast<double>(3.5e+38);
+constexpr double kPiF64 = 3.141592653589793;
+constexpr double kPiF32 = 3.1415927410125732; // kPiF64 quantized to f32
+
+constexpr double kSubnormalF32 = 0x1.0p-128;
+
+enum class Expectation {
+ kMaterialize,
+ kNoMaterialize,
+ kInvalidConversion,
+ kValueCannotBeRepresented,
+};
+
+static std::ostream& operator<<(std::ostream& o, Expectation m) {
+ switch (m) {
+ case Expectation::kMaterialize:
+ return o << "materialize";
+ case Expectation::kNoMaterialize:
+ return o << "no-materialize";
+ case Expectation::kInvalidConversion:
+ return o << "invalid-conversion";
+ case Expectation::kValueCannotBeRepresented:
+ return o << "value cannot be represented";
+ }
+ return o << "<unknown>";
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// MaterializeAbstractNumericToConcreteType
+// Tests that an abstract-numeric will materialize to the expected concrete type
+////////////////////////////////////////////////////////////////////////////////////////////////////
+namespace materialize_abstract_numeric_to_concrete_type {
+
+// How should the materialization occur?
+enum class Method {
+ // var a : target_type = abstract_expr;
+ kVar,
+
+ // let a : target_type = abstract_expr;
+ kLet,
+
+ // var a : target_type;
+ // a = abstract_expr;
+ kAssign,
+
+ // _ = abstract_expr;
+ kPhonyAssign,
+
+ // fn F(v : target_type) {}
+ // fn x() {
+ // F(abstract_expr);
+ // }
+ kFnArg,
+
+ // min(target_expr, abstract_expr);
+ kBuiltinArg,
+
+ // fn F() : target_type {
+ // return abstract_expr;
+ // }
+ kReturn,
+
+ // array<target_type, 1>(abstract_expr);
+ kArray,
+
+ // struct S {
+ // v : target_type
+ // };
+ // fn x() {
+ // _ = S(abstract_expr)
+ // }
+ kStruct,
+
+ // target_expr + abstract_expr
+ kBinaryOp,
+
+ // switch (abstract_expr) {
+ // case target_expr: {}
+ // default: {}
+ // }
+ kSwitchCond,
+
+ // switch (target_expr) {
+ // case abstract_expr: {}
+ // default: {}
+ // }
+ kSwitchCase,
+
+ // switch (abstract_expr) {
+ // case 123: {}
+ // case target_expr: {}
+ // default: {}
+ // }
+ kSwitchCondWithAbstractCase,
+
+ // switch (target_expr) {
+ // case 123: {}
+ // case abstract_expr: {}
+ // default: {}
+ // }
+ kSwitchCaseWithAbstractCase,
+
+ // @workgroup_size(target_expr, abstract_expr, 123)
+ // @compute
+ // fn f() {}
+ kWorkgroupSize
+};
+
+static std::ostream& operator<<(std::ostream& o, Method m) {
+ switch (m) {
+ case Method::kVar:
+ return o << "var";
+ case Method::kLet:
+ return o << "let";
+ case Method::kAssign:
+ return o << "assign";
+ case Method::kPhonyAssign:
+ return o << "phony-assign";
+ case Method::kFnArg:
+ return o << "fn-arg";
+ case Method::kBuiltinArg:
+ return o << "builtin-arg";
+ case Method::kReturn:
+ return o << "return";
+ case Method::kArray:
+ return o << "array";
+ case Method::kStruct:
+ return o << "struct";
+ case Method::kBinaryOp:
+ return o << "binary-op";
+ case Method::kSwitchCond:
+ return o << "switch-cond";
+ case Method::kSwitchCase:
+ return o << "switch-case";
+ case Method::kSwitchCondWithAbstractCase:
+ return o << "switch-cond-with-abstract";
+ case Method::kSwitchCaseWithAbstractCase:
+ return o << "switch-case-with-abstract";
+ case Method::kWorkgroupSize:
+ return o << "workgroup-size";
+ }
+ return o << "<unknown>";
+}
+
+struct Data {
+ std::string target_type_name;
+ std::string target_element_type_name;
+ builder::ast_type_func_ptr target_ast_ty;
+ builder::sem_type_func_ptr target_sem_ty;
+ builder::ast_expr_func_ptr target_expr;
+ std::string abstract_type_name;
+ builder::ast_expr_func_ptr abstract_expr;
+ std::variant<AInt, AFloat> materialized_value;
+ double literal_value;
+};
+
+template <typename TARGET_TYPE, typename ABSTRACT_TYPE, typename MATERIALIZED_TYPE>
+Data Types(MATERIALIZED_TYPE materialized_value, double literal_value) {
+ using TargetDataType = builder::DataType<TARGET_TYPE>;
+ using AbstractDataType = builder::DataType<ABSTRACT_TYPE>;
+ using TargetElementDataType = builder::DataType<typename TargetDataType::ElementType>;
+ return {
+ TargetDataType::Name(), // target_type_name
+ TargetElementDataType::Name(), // target_element_type_name
+ TargetDataType::AST, // target_ast_ty
+ TargetDataType::Sem, // target_sem_ty
+ TargetDataType::Expr, // target_expr
+ AbstractDataType::Name(), // abstract_type_name
+ AbstractDataType::Expr, // abstract_expr
+ materialized_value,
+ literal_value,
+ };
+}
+
+template <typename TARGET_TYPE, typename ABSTRACT_TYPE>
+Data Types() {
+ using TargetDataType = builder::DataType<TARGET_TYPE>;
+ using AbstractDataType = builder::DataType<ABSTRACT_TYPE>;
+ using TargetElementDataType = builder::DataType<typename TargetDataType::ElementType>;
+ return {
+ TargetDataType::Name(), // target_type_name
+ TargetElementDataType::Name(), // target_element_type_name
+ TargetDataType::AST, // target_ast_ty
+ TargetDataType::Sem, // target_sem_ty
+ TargetDataType::Expr, // target_expr
+ AbstractDataType::Name(), // abstract_type_name
+ AbstractDataType::Expr, // abstract_expr
+ 0_a,
+ 0.0,
+ };
+}
+
+static std::ostream& operator<<(std::ostream& o, const Data& c) {
+ auto print_value = [&](auto&& v) { o << v; };
+ o << "[" << c.target_type_name << " <- " << c.abstract_type_name << "] [";
+ std::visit(print_value, c.materialized_value);
+ o << " <- " << c.literal_value << "]";
+ return o;
+}
+
+using MaterializeAbstractNumericToConcreteType =
+ resolver::ResolverTestWithParam<std::tuple<Expectation, Method, Data>>;
+
+TEST_P(MaterializeAbstractNumericToConcreteType, Test) {
+ // Once F16 is properly supported, we'll need to enable this:
+ // Enable(ast::Extension::kF16);
+
+ const auto& param = GetParam();
+ const auto& expectation = std::get<0>(param);
+ const auto& method = std::get<1>(param);
+ const auto& data = std::get<2>(param);
+
+ auto target_ty = [&] { return data.target_ast_ty(*this); };
+ auto target_expr = [&] { return data.target_expr(*this, 42); };
+ auto* abstract_expr = data.abstract_expr(*this, data.literal_value);
+ switch (method) {
+ case Method::kVar:
+ WrapInFunction(Decl(Var("a", target_ty(), abstract_expr)));
+ break;
+ case Method::kLet:
+ WrapInFunction(Decl(Let("a", target_ty(), abstract_expr)));
+ break;
+ case Method::kAssign:
+ WrapInFunction(Decl(Var("a", target_ty(), nullptr)), Assign("a", abstract_expr));
+ break;
+ case Method::kPhonyAssign:
+ WrapInFunction(Assign(Phony(), abstract_expr));
+ break;
+ case Method::kFnArg:
+ Func("F", {Param("P", target_ty())}, ty.void_(), {});
+ WrapInFunction(CallStmt(Call("F", abstract_expr)));
+ break;
+ case Method::kBuiltinArg:
+ WrapInFunction(CallStmt(Call("min", target_expr(), abstract_expr)));
+ break;
+ case Method::kReturn:
+ Func("F", {}, target_ty(), {Return(abstract_expr)});
+ break;
+ case Method::kArray:
+ WrapInFunction(Construct(ty.array(target_ty(), 1_i), abstract_expr));
+ break;
+ case Method::kStruct:
+ Structure("S", {Member("v", target_ty())});
+ WrapInFunction(Construct(ty.type_name("S"), abstract_expr));
+ break;
+ case Method::kBinaryOp:
+ WrapInFunction(Add(target_expr(), abstract_expr));
+ break;
+ case Method::kSwitchCond:
+ WrapInFunction(Switch(abstract_expr, //
+ Case(target_expr()->As<ast::IntLiteralExpression>()), //
+ DefaultCase()));
+ break;
+ case Method::kSwitchCase:
+ WrapInFunction(Switch(target_expr(), //
+ Case(abstract_expr->As<ast::IntLiteralExpression>()), //
+ DefaultCase()));
+ break;
+ case Method::kSwitchCondWithAbstractCase:
+ WrapInFunction(Switch(abstract_expr, //
+ Case(Expr(123_a)), //
+ Case(target_expr()->As<ast::IntLiteralExpression>()), //
+ DefaultCase()));
+ break;
+ case Method::kSwitchCaseWithAbstractCase:
+ WrapInFunction(Switch(target_expr(), //
+ Case(Expr(123_a)), //
+ Case(abstract_expr->As<ast::IntLiteralExpression>()), //
+ DefaultCase()));
+ break;
+ case Method::kWorkgroupSize:
+ Func("f", {}, ty.void_(), {},
+ {WorkgroupSize(target_expr(), abstract_expr, Expr(123_a)),
+ Stage(ast::PipelineStage::kCompute)});
+ break;
+ }
+
+ auto check_types_and_values = [&](const sem::Expression* expr) {
+ auto* target_sem_ty = data.target_sem_ty(*this);
+
+ EXPECT_TYPE(expr->Type(), target_sem_ty);
+ EXPECT_TYPE(expr->ConstantValue().Type(), target_sem_ty);
+
+ uint32_t num_elems = 0;
+ const sem::Type* target_sem_el_ty = sem::Type::ElementOf(target_sem_ty, &num_elems);
+ EXPECT_TYPE(expr->ConstantValue().ElementType(), target_sem_el_ty);
+ expr->ConstantValue().WithElements([&](auto&& vec) {
+ using VEC_TY = std::decay_t<decltype(vec)>;
+ using EL_TY = typename VEC_TY::value_type;
+ ASSERT_TRUE(std::holds_alternative<EL_TY>(data.materialized_value));
+ VEC_TY expected(num_elems, std::get<EL_TY>(data.materialized_value));
+ EXPECT_EQ(vec, expected);
+ });
+ };
+
+ switch (expectation) {
+ case Expectation::kMaterialize: {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* materialize = Sem().Get<sem::Materialize>(abstract_expr);
+ ASSERT_NE(materialize, nullptr);
+ check_types_and_values(materialize);
+ break;
+ }
+ case Expectation::kNoMaterialize: {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(abstract_expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->Is<sem::Materialize>());
+ check_types_and_values(sem);
+ break;
+ }
+ case Expectation::kInvalidConversion: {
+ ASSERT_FALSE(r()->Resolve());
+ std::string expect;
+ switch (method) {
+ case Method::kBuiltinArg:
+ expect = "error: no matching call to min(" + data.target_type_name + ", " +
+ data.abstract_type_name + ")";
+ break;
+ case Method::kBinaryOp:
+ expect = "error: no matching overload for operator + (" +
+ data.target_type_name + ", " + data.abstract_type_name + ")";
+ break;
+ default:
+ expect = "error: cannot convert value of type '" + data.abstract_type_name +
+ "' to type '" + data.target_type_name + "'";
+ break;
+ }
+ EXPECT_THAT(r()->error(), testing::StartsWith(expect));
+ break;
+ }
+ case Expectation::kValueCannotBeRepresented:
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), testing::HasSubstr("cannot be represented as '" +
+ data.target_element_type_name + "'"));
+ break;
+ }
+}
+
+/// Methods that support scalar materialization
+constexpr Method kScalarMethods[] = {
+ Method::kLet, Method::kVar, Method::kAssign, Method::kFnArg, Method::kBuiltinArg,
+ Method::kReturn, Method::kArray, Method::kStruct, Method::kBinaryOp,
+};
+
+/// Methods that support vector materialization
+constexpr Method kVectorMethods[] = {
+ Method::kLet, Method::kVar, Method::kAssign, Method::kFnArg, Method::kBuiltinArg,
+ Method::kReturn, Method::kArray, Method::kStruct, Method::kBinaryOp,
+};
+
+/// Methods that support matrix materialization
+constexpr Method kMatrixMethods[] = {
+ Method::kLet, Method::kVar, Method::kAssign, Method::kFnArg,
+ Method::kReturn, Method::kArray, Method::kStruct, Method::kBinaryOp,
+};
+
+/// Methods that support materialization for switch cases
+constexpr Method kSwitchMethods[] = {
+ Method::kSwitchCond,
+ Method::kSwitchCase,
+ Method::kSwitchCondWithAbstractCase,
+ Method::kSwitchCaseWithAbstractCase,
+};
+
+/// Methods that do not materialize
+constexpr Method kNoMaterializeMethods[] = {
+ Method::kPhonyAssign,
+ // TODO(crbug.com/tint/1504): Enable once we have abstract overloads of builtins / binary ops:
+ // Method::kBuiltinArg, Method::kBinaryOp,
+};
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeScalar,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kScalarMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, 0.0), //
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(-1_a, -1.0), //
+ Types<i32, AInt>(AInt(kHighestI32), kHighestI32), //
+ Types<i32, AInt>(AInt(kLowestI32), kLowestI32), //
+ Types<u32, AInt>(0_a, 0.0), //
+ Types<u32, AInt>(1_a, 1.0), //
+ Types<u32, AInt>(AInt(kHighestU32), kHighestU32), //
+ Types<u32, AInt>(AInt(kLowestU32), kLowestU32), //
+ Types<f32, AFloat>(0.0_a, 0.0), //
+ Types<f32, AFloat>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32, AFloat>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32, AFloat>(AFloat(kPiF32), kPiF64), //
+ Types<f32, AFloat>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32, AFloat>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ /* Types<f16, AFloat>(1.0_a), */ //
+ /* Types<f16, AFloat>(1.0_a), */ //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeVector,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kVectorMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32V, AIntV>(0_a, 0.0), //
+ Types<i32V, AIntV>(1_a, 1.0), //
+ Types<i32V, AIntV>(-1_a, -1.0), //
+ Types<i32V, AIntV>(AInt(kHighestI32), kHighestI32), //
+ Types<i32V, AIntV>(AInt(kLowestI32), kLowestI32), //
+ Types<u32V, AIntV>(0_a, 0.0), //
+ Types<u32V, AIntV>(1_a, 1.0), //
+ Types<u32V, AIntV>(AInt(kHighestU32), kHighestU32), //
+ Types<u32V, AIntV>(AInt(kLowestU32), kLowestU32), //
+ Types<f32V, AFloatV>(0.0_a, 0.0), //
+ Types<f32V, AFloatV>(1.0_a, 1.0), //
+ Types<f32V, AFloatV>(-1.0_a, -1.0), //
+ Types<f32V, AFloatV>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32V, AFloatV>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32V, AFloatV>(AFloat(kPiF32), kPiF64), //
+ Types<f32V, AFloatV>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32V, AFloatV>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ /* Types<f16V, AFloatV>(1.0_a), */ //
+ /* Types<f16V, AFloatV>(1.0_a), */ //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeMatrix,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kMatrixMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<f32M, AFloatM>(0.0_a, 0.0), //
+ Types<f32M, AFloatM>(1.0_a, 1.0), //
+ Types<f32M, AFloatM>(-1.0_a, -1.0), //
+ Types<f32M, AFloatM>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32M, AFloatM>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32M, AFloatM>(AFloat(kPiF32), kPiF64), //
+ Types<f32M, AFloatM>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32M, AFloatM>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ /* Types<f16V, AFloatM>(1.0_a), */ //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MaterializeSwitch,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kSwitchMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, 0.0), //
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(-1_a, -1.0), //
+ Types<i32, AInt>(AInt(kHighestI32), kHighestI32), //
+ Types<i32, AInt>(AInt(kLowestI32), kLowestI32), //
+ Types<u32, AInt>(0_a, 0.0), //
+ Types<u32, AInt>(1_a, 1.0), //
+ Types<u32, AInt>(AInt(kHighestU32), kHighestU32), //
+ Types<u32, AInt>(AInt(kLowestU32), kLowestU32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MaterializeWorkgroupSize,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::Values(Method::kWorkgroupSize),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(10_a, 10.0), //
+ Types<i32, AInt>(65535_a, 65535.0), //
+ Types<u32, AInt>(1_a, 1.0), //
+ Types<u32, AInt>(10_a, 10.0), //
+ Types<u32, AInt>(65535_a, 65535.0), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(NoMaterialize,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kNoMaterialize),
+ testing::ValuesIn(kNoMaterializeMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<AInt, AInt>(1_a, 1_a), //
+ Types<AIntV, AIntV>(1_a, 1_a), //
+ Types<AFloat, AFloat>(1.0_a, 1.0_a), //
+ Types<AFloatV, AFloatV>(1.0_a, 1.0_a), //
+ Types<AFloatM, AFloatM>(1.0_a, 1.0_a), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(InvalidConversion,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kInvalidConversion),
+ testing::ValuesIn(kScalarMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AFloat>(), //
+ Types<u32, AFloat>(), //
+ Types<i32V, AFloatV>(), //
+ Types<u32V, AFloatV>(), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(ScalarValueCannotBeRepresented,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kScalarMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, kHighestI32 + 1), //
+ Types<i32, AInt>(0_a, kLowestI32 - 1), //
+ Types<u32, AInt>(0_a, kHighestU32 + 1), //
+ Types<u32, AInt>(0_a, kLowestU32 - 1), //
+ Types<f32, AFloat>(0.0_a, kTooBigF32), //
+ Types<f32, AFloat>(0.0_a, -kTooBigF32), //
+ /* Types<f16, AFloat>(), */ //
+ /* Types<f16, AFloat>(), */ //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(VectorValueCannotBeRepresented,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kVectorMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32V, AIntV>(0_a, kHighestI32 + 1), //
+ Types<i32V, AIntV>(0_a, kLowestI32 - 1), //
+ Types<u32V, AIntV>(0_a, kHighestU32 + 1), //
+ Types<u32V, AIntV>(0_a, kLowestU32 - 1), //
+ Types<f32V, AFloatV>(0.0_a, kTooBigF32), //
+ Types<f32V, AFloatV>(0.0_a, -kTooBigF32), //
+ /* Types<f16V, AFloatV>(), */ //
+ /* Types<f16V, AFloatV>(), */ //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MatrixValueCannotBeRepresented,
+ MaterializeAbstractNumericToConcreteType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kMatrixMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<f32M, AFloatM>(0.0_a, kTooBigF32), //
+ Types<f32M, AFloatM>(0.0_a, -kTooBigF32), //
+ /* Types<f16M, AFloatM>(), */ //
+ /* Types<f16M, AFloatM>(), */ //
+ })));
+
+} // namespace materialize_abstract_numeric_to_concrete_type
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Tests that in the absence of a 'target type' an abstract-int will materialize to i32, and an
+// abstract-float will materialize to f32.
+////////////////////////////////////////////////////////////////////////////////////////////////////
+namespace materialize_abstract_numeric_to_default_type {
+
+// How should the materialization occur?
+enum class Method {
+ // var a = abstract_expr;
+ kVar,
+
+ // let a = abstract_expr;
+ kLet,
+
+ // min(abstract_expr, abstract_expr)
+ kBuiltinArg,
+
+ // bitcast<f32>(abstract_expr)
+ kBitcastF32Arg,
+
+ // bitcast<vec3<f32>>(abstract_expr)
+ kBitcastVec3F32Arg,
+
+ // array<i32, abstract_expr>()
+ kArrayLength,
+
+ // switch (abstract_expr) {
+ // case abstract_expr: {}
+ // default: {}
+ // }
+ kSwitch,
+
+ // @workgroup_size(abstract_expr)
+ // @compute
+ // fn f() {}
+ kWorkgroupSize,
+
+ // arr[abstract_expr]
+ kIndex,
+};
+
+static std::ostream& operator<<(std::ostream& o, Method m) {
+ switch (m) {
+ case Method::kVar:
+ return o << "var";
+ case Method::kLet:
+ return o << "let";
+ case Method::kBuiltinArg:
+ return o << "builtin-arg";
+ case Method::kBitcastF32Arg:
+ return o << "bitcast-f32-arg";
+ case Method::kBitcastVec3F32Arg:
+ return o << "bitcast-vec3-f32-arg";
+ case Method::kArrayLength:
+ return o << "array-length";
+ case Method::kSwitch:
+ return o << "switch";
+ case Method::kWorkgroupSize:
+ return o << "workgroup-size";
+ case Method::kIndex:
+ return o << "index";
+ }
+ return o << "<unknown>";
+}
+
+struct Data {
+ std::string expected_type_name;
+ std::string expected_element_type_name;
+ builder::sem_type_func_ptr expected_sem_ty;
+ std::string abstract_type_name;
+ builder::ast_expr_func_ptr abstract_expr;
+ std::variant<AInt, AFloat> materialized_value;
+ double literal_value;
+};
+
+template <typename EXPECTED_TYPE, typename ABSTRACT_TYPE, typename MATERIALIZED_TYPE>
+Data Types(MATERIALIZED_TYPE materialized_value, double literal_value) {
+ using ExpectedDataType = builder::DataType<EXPECTED_TYPE>;
+ using AbstractDataType = builder::DataType<ABSTRACT_TYPE>;
+ using TargetElementDataType = builder::DataType<typename ExpectedDataType::ElementType>;
+ return {
+ ExpectedDataType::Name(), // expected_type_name
+ TargetElementDataType::Name(), // expected_element_type_name
+ ExpectedDataType::Sem, // expected_sem_ty
+ AbstractDataType::Name(), // abstract_type_name
+ AbstractDataType::Expr, // abstract_expr
+ materialized_value,
+ literal_value,
+ };
+}
+
+static std::ostream& operator<<(std::ostream& o, const Data& c) {
+ auto print_value = [&](auto&& v) { o << v; };
+ o << "[" << c.expected_type_name << " <- " << c.abstract_type_name << "] [";
+ std::visit(print_value, c.materialized_value);
+ o << " <- " << c.literal_value << "]";
+ return o;
+}
+
+using MaterializeAbstractNumericToDefaultType =
+ resolver::ResolverTestWithParam<std::tuple<Expectation, Method, Data>>;
+
+TEST_P(MaterializeAbstractNumericToDefaultType, Test) {
+ // Once F16 is properly supported, we'll need to enable this:
+ // Enable(ast::Extension::kF16);
+
+ const auto& param = GetParam();
+ const auto& expectation = std::get<0>(param);
+ const auto& method = std::get<1>(param);
+ const auto& data = std::get<2>(param);
+
+ ast::ExpressionList abstract_exprs;
+ auto abstract_expr = [&] {
+ auto* expr = data.abstract_expr(*this, data.literal_value);
+ abstract_exprs.emplace_back(expr);
+ return expr;
+ };
+ switch (method) {
+ case Method::kVar:
+ WrapInFunction(Decl(Var("a", nullptr, abstract_expr())));
+ break;
+ case Method::kLet:
+ WrapInFunction(Decl(Let("a", nullptr, abstract_expr())));
+ break;
+ case Method::kBuiltinArg:
+ WrapInFunction(CallStmt(Call("min", abstract_expr(), abstract_expr())));
+ break;
+ case Method::kBitcastF32Arg:
+ WrapInFunction(Bitcast<f32>(abstract_expr()));
+ break;
+ case Method::kBitcastVec3F32Arg:
+ WrapInFunction(Bitcast(ty.vec3<f32>(), abstract_expr()));
+ break;
+ case Method::kArrayLength:
+ WrapInFunction(Construct(ty.array(ty.i32(), abstract_expr())));
+ break;
+ case Method::kSwitch:
+ WrapInFunction(Switch(abstract_expr(),
+ Case(abstract_expr()->As<ast::IntLiteralExpression>()),
+ DefaultCase()));
+ break;
+ case Method::kWorkgroupSize:
+ Func("f", {}, ty.void_(), {},
+ {WorkgroupSize(abstract_expr()), Stage(ast::PipelineStage::kCompute)});
+ break;
+ case Method::kIndex:
+ Global("arr", ty.array<i32, 4>(), ast::StorageClass::kPrivate);
+ WrapInFunction(IndexAccessor("arr", abstract_expr()));
+ break;
+ }
+
+ auto check_types_and_values = [&](const sem::Expression* expr) {
+ auto* expected_sem_ty = data.expected_sem_ty(*this);
+
+ EXPECT_TYPE(expr->Type(), expected_sem_ty);
+ EXPECT_TYPE(expr->ConstantValue().Type(), expected_sem_ty);
+
+ uint32_t num_elems = 0;
+ const sem::Type* expected_sem_el_ty = sem::Type::ElementOf(expected_sem_ty, &num_elems);
+ EXPECT_TYPE(expr->ConstantValue().ElementType(), expected_sem_el_ty);
+ expr->ConstantValue().WithElements([&](auto&& vec) {
+ using VEC_TY = std::decay_t<decltype(vec)>;
+ using EL_TY = typename VEC_TY::value_type;
+ ASSERT_TRUE(std::holds_alternative<EL_TY>(data.materialized_value));
+ VEC_TY expected(num_elems, std::get<EL_TY>(data.materialized_value));
+ EXPECT_EQ(vec, expected);
+ });
+ };
+
+ switch (expectation) {
+ case Expectation::kMaterialize: {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ for (auto* expr : abstract_exprs) {
+ auto* materialize = Sem().Get<sem::Materialize>(expr);
+ ASSERT_NE(materialize, nullptr);
+ check_types_and_values(materialize);
+ }
+ break;
+ }
+ case Expectation::kInvalidConversion: {
+ ASSERT_FALSE(r()->Resolve());
+ std::string expect;
+ switch (method) {
+ case Method::kBuiltinArg:
+ expect = "error: no matching call to min(" + data.abstract_type_name + ", " +
+ data.abstract_type_name + ")";
+ break;
+ default:
+ expect = "error: cannot convert value of type '" + data.abstract_type_name +
+ "' to type '" + data.expected_type_name + "'";
+ break;
+ }
+ EXPECT_THAT(r()->error(), testing::StartsWith(expect));
+ break;
+ }
+ case Expectation::kValueCannotBeRepresented:
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), testing::HasSubstr("cannot be represented as '" +
+ data.expected_element_type_name + "'"));
+ break;
+ default:
+ FAIL() << "unhandled expectation: " << expectation;
+ }
+}
+
+/// Methods that support scalar materialization
+constexpr Method kScalarMethods[] = {
+ Method::kLet,
+ Method::kVar,
+ Method::kBuiltinArg,
+ Method::kBitcastF32Arg,
+};
+
+/// Methods that support abstract-integer materialization
+/// Note: Doesn't contain kWorkgroupSize or kArrayLength as they have tighter constraints on the
+/// range of allowed integer values.
+constexpr Method kAIntMethods[] = {
+ Method::kSwitch,
+ Method::kIndex,
+};
+
+/// Methods that support vector materialization
+constexpr Method kVectorMethods[] = {
+ Method::kLet,
+ Method::kVar,
+ Method::kBuiltinArg,
+ Method::kBitcastVec3F32Arg,
+};
+
+/// Methods that support matrix materialization
+constexpr Method kMatrixMethods[] = {
+ Method::kLet,
+ Method::kVar,
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeScalar,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kScalarMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, 0.0), //
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(-1_a, -1.0), //
+ Types<i32, AInt>(AInt(kHighestI32), kHighestI32), //
+ Types<i32, AInt>(AInt(kLowestI32), kLowestI32), //
+ Types<f32, AFloat>(0.0_a, 0.0), //
+ Types<f32, AFloat>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32, AFloat>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32, AFloat>(AFloat(kPiF32), kPiF64), //
+ Types<f32, AFloat>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32, AFloat>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeVector,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kVectorMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32V, AIntV>(0_a, 0.0), //
+ Types<i32V, AIntV>(1_a, 1.0), //
+ Types<i32V, AIntV>(-1_a, -1.0), //
+ Types<i32V, AIntV>(AInt(kHighestI32), kHighestI32), //
+ Types<i32V, AIntV>(AInt(kLowestI32), kLowestI32), //
+ Types<f32V, AFloatV>(0.0_a, 0.0), //
+ Types<f32V, AFloatV>(1.0_a, 1.0), //
+ Types<f32V, AFloatV>(-1.0_a, -1.0), //
+ Types<f32V, AFloatV>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32V, AFloatV>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32V, AFloatV>(AFloat(kPiF32), kPiF64), //
+ Types<f32V, AFloatV>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32V, AFloatV>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeMatrix,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kMatrixMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<f32M, AFloatM>(0.0_a, 0.0), //
+ Types<f32M, AFloatM>(1.0_a, 1.0), //
+ Types<f32M, AFloatM>(-1.0_a, -1.0), //
+ Types<f32M, AFloatM>(AFloat(kHighestF32), kHighestF32), //
+ Types<f32M, AFloatM>(AFloat(kLowestF32), kLowestF32), //
+ Types<f32M, AFloatM>(AFloat(kPiF32), kPiF64), //
+ Types<f32M, AFloatM>(AFloat(kSubnormalF32), kSubnormalF32), //
+ Types<f32M, AFloatM>(AFloat(-kSubnormalF32), -kSubnormalF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MaterializeAInt,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::ValuesIn(kAIntMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, 0.0), //
+ Types<i32, AInt>(10_a, 10.0), //
+ Types<i32, AInt>(AInt(kHighestI32), kHighestI32), //
+ Types<i32, AInt>(AInt(kLowestI32), kLowestI32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(
+ MaterializeArrayLength,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::Values(Method::kArrayLength),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(10_a, 10.0), //
+ Types<i32, AInt>(1000_a, 1000.0), //
+ // Note: kHighestI32 cannot be used due to max-byte-size validation
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MaterializeWorkgroupSize,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kMaterialize),
+ testing::Values(Method::kWorkgroupSize),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(1_a, 1.0), //
+ Types<i32, AInt>(10_a, 10.0), //
+ Types<i32, AInt>(65535_a, 65535.0), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(ScalarValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kScalarMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, kHighestI32 + 1), //
+ Types<i32, AInt>(0_a, kLowestI32 - 1), //
+ Types<f32, AFloat>(0.0_a, kTooBigF32), //
+ Types<f32, AFloat>(0.0_a, -kTooBigF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(VectorValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kVectorMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32V, AIntV>(0_a, kHighestI32 + 1), //
+ Types<i32V, AIntV>(0_a, kLowestI32 - 1), //
+ Types<i32V, AIntV>(0_a, kHighestU32 + 1), //
+ Types<f32V, AFloatV>(0.0_a, kTooBigF32), //
+ Types<f32V, AFloatV>(0.0_a, -kTooBigF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(MatrixValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kMatrixMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<f32M, AFloatM>(0.0_a, kTooBigF32), //
+ Types<f32M, AFloatM>(0.0_a, -kTooBigF32), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(AIntValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::ValuesIn(kAIntMethods),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, kHighestI32 + 1), //
+ Types<i32, AInt>(0_a, kLowestI32 - 1), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(WorkgroupSizeValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::Values(Method::kWorkgroupSize),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, kHighestI32 + 1), //
+ Types<i32, AInt>(0_a, kLowestI32 - 1), //
+ })));
+
+INSTANTIATE_TEST_SUITE_P(ArrayLengthValueCannotBeRepresented,
+ MaterializeAbstractNumericToDefaultType,
+ testing::Combine(testing::Values(Expectation::kValueCannotBeRepresented),
+ testing::Values(Method::kArrayLength),
+ testing::ValuesIn(std::vector<Data>{
+ Types<i32, AInt>(0_a, kHighestI32 + 1), //
+ })));
+
+} // namespace materialize_abstract_numeric_to_default_type
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/pipeline_overridable_constant_test.cc b/chromium/third_party/dawn/src/tint/resolver/pipeline_overridable_constant_test.cc
index 222a5c00d70..035936c3f3e 100644
--- a/chromium/third_party/dawn/src/tint/resolver/pipeline_overridable_constant_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/pipeline_overridable_constant_test.cc
@@ -16,90 +16,91 @@
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
class ResolverPipelineOverridableConstantTest : public ResolverTest {
- protected:
- /// Verify that the AST node `var` was resolved to an overridable constant
- /// with an ID equal to `id`.
- /// @param var the overridable constant AST node
- /// @param id the expected constant ID
- void ExpectConstantId(const ast::Variable* var, uint16_t id) {
- auto* sem = Sem().Get<sem::GlobalVariable>(var);
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Declaration(), var);
- EXPECT_TRUE(sem->IsOverridable());
- EXPECT_EQ(sem->ConstantId(), id);
- EXPECT_FALSE(sem->ConstantValue());
- }
+ protected:
+ /// Verify that the AST node `var` was resolved to an overridable constant
+ /// with an ID equal to `id`.
+ /// @param var the overridable constant AST node
+ /// @param id the expected constant ID
+ void ExpectConstantId(const ast::Variable* var, uint16_t id) {
+ auto* sem = Sem().Get<sem::GlobalVariable>(var);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Declaration(), var);
+ EXPECT_TRUE(sem->IsOverridable());
+ EXPECT_EQ(sem->ConstantId(), id);
+ EXPECT_FALSE(sem->ConstantValue());
+ }
};
TEST_F(ResolverPipelineOverridableConstantTest, NonOverridable) {
- auto* a = GlobalConst("a", ty.f32(), Expr(1.f));
+ auto* a = GlobalConst("a", ty.f32(), Expr(1_f));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem_a = Sem().Get<sem::GlobalVariable>(a);
- ASSERT_NE(sem_a, nullptr);
- EXPECT_EQ(sem_a->Declaration(), a);
- EXPECT_FALSE(sem_a->IsOverridable());
- EXPECT_TRUE(sem_a->ConstantValue());
+ auto* sem_a = Sem().Get<sem::GlobalVariable>(a);
+ ASSERT_NE(sem_a, nullptr);
+ EXPECT_EQ(sem_a->Declaration(), a);
+ EXPECT_FALSE(sem_a->IsOverridable());
+ EXPECT_TRUE(sem_a->ConstantValue());
}
TEST_F(ResolverPipelineOverridableConstantTest, WithId) {
- auto* a = Override("a", ty.f32(), Expr(1.f), {Id(7u)});
+ auto* a = Override("a", ty.f32(), Expr(1_f), {Id(7u)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ExpectConstantId(a, 7u);
+ ExpectConstantId(a, 7u);
}
TEST_F(ResolverPipelineOverridableConstantTest, WithoutId) {
- auto* a = Override("a", ty.f32(), Expr(1.f));
+ auto* a = Override("a", ty.f32(), Expr(1_f));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ExpectConstantId(a, 0u);
+ ExpectConstantId(a, 0u);
}
TEST_F(ResolverPipelineOverridableConstantTest, WithAndWithoutIds) {
- std::vector<ast::Variable*> variables;
- auto* a = Override("a", ty.f32(), Expr(1.f));
- auto* b = Override("b", ty.f32(), Expr(1.f));
- auto* c = Override("c", ty.f32(), Expr(1.f), {Id(2u)});
- auto* d = Override("d", ty.f32(), Expr(1.f), {Id(4u)});
- auto* e = Override("e", ty.f32(), Expr(1.f));
- auto* f = Override("f", ty.f32(), Expr(1.f), {Id(1u)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- // Verify that constant id allocation order is deterministic.
- ExpectConstantId(a, 0u);
- ExpectConstantId(b, 3u);
- ExpectConstantId(c, 2u);
- ExpectConstantId(d, 4u);
- ExpectConstantId(e, 5u);
- ExpectConstantId(f, 1u);
+ std::vector<ast::Variable*> variables;
+ auto* a = Override("a", ty.f32(), Expr(1_f));
+ auto* b = Override("b", ty.f32(), Expr(1_f));
+ auto* c = Override("c", ty.f32(), Expr(1_f), {Id(2u)});
+ auto* d = Override("d", ty.f32(), Expr(1_f), {Id(4u)});
+ auto* e = Override("e", ty.f32(), Expr(1_f));
+ auto* f = Override("f", ty.f32(), Expr(1_f), {Id(1u)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ // Verify that constant id allocation order is deterministic.
+ ExpectConstantId(a, 0u);
+ ExpectConstantId(b, 3u);
+ ExpectConstantId(c, 2u);
+ ExpectConstantId(d, 4u);
+ ExpectConstantId(e, 5u);
+ ExpectConstantId(f, 1u);
}
TEST_F(ResolverPipelineOverridableConstantTest, DuplicateIds) {
- Override("a", ty.f32(), Expr(1.f), {Id(Source{{12, 34}}, 7u)});
- Override("b", ty.f32(), Expr(1.f), {Id(Source{{56, 78}}, 7u)});
+ Override("a", ty.f32(), Expr(1_f), {Id(Source{{12, 34}}, 7u)});
+ Override("b", ty.f32(), Expr(1_f), {Id(Source{{56, 78}}, 7u)});
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), R"(56:78 error: pipeline constant IDs must be unique
+ EXPECT_EQ(r()->error(), R"(56:78 error: pipeline constant IDs must be unique
12:34 note: a pipeline constant with an ID of 7 was previously declared here:)");
}
TEST_F(ResolverPipelineOverridableConstantTest, IdTooLarge) {
- Override("a", ty.f32(), Expr(1.f), {Id(Source{{12, 34}}, 65536u)});
+ Override("a", ty.f32(), Expr(1_f), {Id(Source{{12, 34}}, 65536u)});
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: pipeline constant IDs must be between 0 and 65535");
+ EXPECT_EQ(r()->error(), "12:34 error: pipeline constant IDs must be between 0 and 65535");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/ptr_ref_test.cc b/chromium/third_party/dawn/src/tint/resolver/ptr_ref_test.cc
index f19f9649ed7..69b7e54051f 100644
--- a/chromium/third_party/dawn/src/tint/resolver/ptr_ref_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/ptr_ref_test.cc
@@ -14,108 +14,95 @@
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "gmock/gmock.h"
namespace tint::resolver {
namespace {
-struct ResolverPtrRefTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverPtrRefTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverPtrRefTest, AddressOf) {
- // var v : i32;
- // &v
+ // var v : i32;
+ // &v
- auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
- auto* expr = AddressOf(v);
+ auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
+ auto* expr = AddressOf(v);
- WrapInFunction(v, expr);
+ WrapInFunction(v, expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(expr)->Is<sem::Pointer>());
- EXPECT_TRUE(TypeOf(expr)->As<sem::Pointer>()->StoreType()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(expr)->As<sem::Pointer>()->StorageClass(),
- ast::StorageClass::kFunction);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::Pointer>());
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Pointer>()->StoreType()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(expr)->As<sem::Pointer>()->StorageClass(), ast::StorageClass::kFunction);
}
TEST_F(ResolverPtrRefTest, AddressOfThenDeref) {
- // var v : i32;
- // *(&v)
+ // var v : i32;
+ // *(&v)
- auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
- auto* expr = Deref(AddressOf(v));
+ auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
+ auto* expr = Deref(AddressOf(v));
- WrapInFunction(v, expr);
+ WrapInFunction(v, expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(expr)->Is<sem::Reference>());
- EXPECT_TRUE(TypeOf(expr)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(expr)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
}
TEST_F(ResolverPtrRefTest, DefaultPtrStorageClass) {
- // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
-
- auto* buf = Structure("S", {Member("m", ty.i32())});
- auto* function = Var("f", ty.i32());
- auto* private_ = Global("p", ty.i32(), ast::StorageClass::kPrivate);
- auto* workgroup = Global("w", ty.i32(), ast::StorageClass::kWorkgroup);
- auto* uniform = Global("ub", ty.Of(buf), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
-
- auto* function_ptr =
- Const("f_ptr", ty.pointer(ty.i32(), ast::StorageClass::kFunction),
- AddressOf(function));
- auto* private_ptr =
- Const("p_ptr", ty.pointer(ty.i32(), ast::StorageClass::kPrivate),
- AddressOf(private_));
- auto* workgroup_ptr =
- Const("w_ptr", ty.pointer(ty.i32(), ast::StorageClass::kWorkgroup),
- AddressOf(workgroup));
- auto* uniform_ptr =
- Const("ub_ptr", ty.pointer(ty.Of(buf), ast::StorageClass::kUniform),
- AddressOf(uniform));
- auto* storage_ptr =
- Const("sb_ptr", ty.pointer(ty.Of(buf), ast::StorageClass::kStorage),
- AddressOf(storage));
-
- WrapInFunction(function, function_ptr, private_ptr, workgroup_ptr,
- uniform_ptr, storage_ptr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(function_ptr)->Is<sem::Pointer>())
- << "function_ptr is " << TypeOf(function_ptr)->TypeInfo().name;
- ASSERT_TRUE(TypeOf(private_ptr)->Is<sem::Pointer>())
- << "private_ptr is " << TypeOf(private_ptr)->TypeInfo().name;
- ASSERT_TRUE(TypeOf(workgroup_ptr)->Is<sem::Pointer>())
- << "workgroup_ptr is " << TypeOf(workgroup_ptr)->TypeInfo().name;
- ASSERT_TRUE(TypeOf(uniform_ptr)->Is<sem::Pointer>())
- << "uniform_ptr is " << TypeOf(uniform_ptr)->TypeInfo().name;
- ASSERT_TRUE(TypeOf(storage_ptr)->Is<sem::Pointer>())
- << "storage_ptr is " << TypeOf(storage_ptr)->TypeInfo().name;
-
- EXPECT_EQ(TypeOf(function_ptr)->As<sem::Pointer>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(private_ptr)->As<sem::Pointer>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(workgroup_ptr)->As<sem::Pointer>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(uniform_ptr)->As<sem::Pointer>()->Access(),
- ast::Access::kRead);
- EXPECT_EQ(TypeOf(storage_ptr)->As<sem::Pointer>()->Access(),
- ast::Access::kRead);
+ // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
+
+ auto* buf = Structure("S", {Member("m", ty.i32())});
+ auto* function = Var("f", ty.i32());
+ auto* private_ = Global("p", ty.i32(), ast::StorageClass::kPrivate);
+ auto* workgroup = Global("w", ty.i32(), ast::StorageClass::kWorkgroup);
+ auto* uniform = Global("ub", ty.Of(buf), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+
+ auto* function_ptr =
+ Let("f_ptr", ty.pointer(ty.i32(), ast::StorageClass::kFunction), AddressOf(function));
+ auto* private_ptr =
+ Let("p_ptr", ty.pointer(ty.i32(), ast::StorageClass::kPrivate), AddressOf(private_));
+ auto* workgroup_ptr =
+ Let("w_ptr", ty.pointer(ty.i32(), ast::StorageClass::kWorkgroup), AddressOf(workgroup));
+ auto* uniform_ptr =
+ Let("ub_ptr", ty.pointer(ty.Of(buf), ast::StorageClass::kUniform), AddressOf(uniform));
+ auto* storage_ptr =
+ Let("sb_ptr", ty.pointer(ty.Of(buf), ast::StorageClass::kStorage), AddressOf(storage));
+
+ WrapInFunction(function, function_ptr, private_ptr, workgroup_ptr, uniform_ptr, storage_ptr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(function_ptr)->Is<sem::Pointer>())
+ << "function_ptr is " << TypeOf(function_ptr)->TypeInfo().name;
+ ASSERT_TRUE(TypeOf(private_ptr)->Is<sem::Pointer>())
+ << "private_ptr is " << TypeOf(private_ptr)->TypeInfo().name;
+ ASSERT_TRUE(TypeOf(workgroup_ptr)->Is<sem::Pointer>())
+ << "workgroup_ptr is " << TypeOf(workgroup_ptr)->TypeInfo().name;
+ ASSERT_TRUE(TypeOf(uniform_ptr)->Is<sem::Pointer>())
+ << "uniform_ptr is " << TypeOf(uniform_ptr)->TypeInfo().name;
+ ASSERT_TRUE(TypeOf(storage_ptr)->Is<sem::Pointer>())
+ << "storage_ptr is " << TypeOf(storage_ptr)->TypeInfo().name;
+
+ EXPECT_EQ(TypeOf(function_ptr)->As<sem::Pointer>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(private_ptr)->As<sem::Pointer>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(workgroup_ptr)->As<sem::Pointer>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(uniform_ptr)->As<sem::Pointer>()->Access(), ast::Access::kRead);
+ EXPECT_EQ(TypeOf(storage_ptr)->As<sem::Pointer>()->Access(), ast::Access::kRead);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/ptr_ref_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/ptr_ref_validation_test.cc
index f86f6e28d58..b011dd03821 100644
--- a/chromium/third_party/dawn/src/tint/resolver/ptr_ref_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/ptr_ref_validation_test.cc
@@ -15,157 +15,149 @@
#include "src/tint/ast/bitcast_expression.h"
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-struct ResolverPtrRefValidationTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverPtrRefValidationTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverPtrRefValidationTest, AddressOfLiteral) {
- // &1
+ // &1
- auto* expr = AddressOf(Expr(Source{{12, 34}}, 1));
+ auto* expr = AddressOf(Expr(Source{{12, 34}}, 1_i));
- WrapInFunction(expr);
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
}
TEST_F(ResolverPtrRefValidationTest, AddressOfLet) {
- // let l : i32 = 1;
- // &l
- auto* l = Const("l", ty.i32(), Expr(1));
- auto* expr = AddressOf(Expr(Source{{12, 34}}, "l"));
+ // let l : i32 = 1;
+ // &l
+ auto* l = Let("l", ty.i32(), Expr(1_i));
+ auto* expr = AddressOf(Expr(Source{{12, 34}}, "l"));
- WrapInFunction(l, expr);
+ WrapInFunction(l, expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of expression");
}
TEST_F(ResolverPtrRefValidationTest, AddressOfHandle) {
- // @group(0) @binding(0) var t: texture_3d<f32>;
- // &t
- Global("t", ty.sampled_texture(ast::TextureDimension::k3d, ty.f32()),
- GroupAndBinding(0u, 0u));
- auto* expr = AddressOf(Expr(Source{{12, 34}}, "t"));
- WrapInFunction(expr);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot take the address of expression in handle "
- "storage class");
+ // @group(0) @binding(0) var t: texture_3d<f32>;
+ // &t
+ Global("t", ty.sampled_texture(ast::TextureDimension::k3d, ty.f32()), GroupAndBinding(0u, 0u));
+ auto* expr = AddressOf(Expr(Source{{12, 34}}, "t"));
+ WrapInFunction(expr);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot take the address of expression in handle "
+ "storage class");
}
TEST_F(ResolverPtrRefValidationTest, AddressOfVectorComponent_MemberAccessor) {
- // var v : vec4<i32>;
- // &v.y
- auto* v = Var("v", ty.vec4<i32>());
- auto* expr = AddressOf(MemberAccessor(Source{{12, 34}}, "v", "y"));
+ // var v : vec4<i32>;
+ // &v.y
+ auto* v = Var("v", ty.vec4<i32>());
+ auto* expr = AddressOf(MemberAccessor(Source{{12, 34}}, "v", "y"));
- WrapInFunction(v, expr);
+ WrapInFunction(v, expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot take the address of a vector component");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of a vector component");
}
TEST_F(ResolverPtrRefValidationTest, AddressOfVectorComponent_IndexAccessor) {
- // var v : vec4<i32>;
- // &v[2]
- auto* v = Var("v", ty.vec4<i32>());
- auto* expr = AddressOf(IndexAccessor(Source{{12, 34}}, "v", 2));
+ // var v : vec4<i32>;
+ // &v[2i]
+ auto* v = Var("v", ty.vec4<i32>());
+ auto* expr = AddressOf(IndexAccessor(Source{{12, 34}}, "v", 2_i));
- WrapInFunction(v, expr);
+ WrapInFunction(v, expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot take the address of a vector component");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot take the address of a vector component");
}
TEST_F(ResolverPtrRefValidationTest, IndirectOfAddressOfHandle) {
- // @group(0) @binding(0) var t: texture_3d<f32>;
- // *&t
- Global("t", ty.sampled_texture(ast::TextureDimension::k3d, ty.f32()),
- GroupAndBinding(0u, 0u));
- auto* expr = Deref(AddressOf(Expr(Source{{12, 34}}, "t")));
- WrapInFunction(expr);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot take the address of expression in handle "
- "storage class");
+ // @group(0) @binding(0) var t: texture_3d<f32>;
+ // *&t
+ Global("t", ty.sampled_texture(ast::TextureDimension::k3d, ty.f32()), GroupAndBinding(0u, 0u));
+ auto* expr = Deref(AddressOf(Expr(Source{{12, 34}}, "t")));
+ WrapInFunction(expr);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot take the address of expression in handle "
+ "storage class");
}
TEST_F(ResolverPtrRefValidationTest, DerefOfLiteral) {
- // *1
+ // *1
- auto* expr = Deref(Expr(Source{{12, 34}}, 1));
+ auto* expr = Deref(Expr(Source{{12, 34}}, 1_i));
- WrapInFunction(expr);
+ WrapInFunction(expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot dereference expression of type 'i32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot dereference expression of type 'i32'");
}
TEST_F(ResolverPtrRefValidationTest, DerefOfVar) {
- // var v : i32 = 1;
- // *1
- auto* v = Var("v", ty.i32());
- auto* expr = Deref(Expr(Source{{12, 34}}, "v"));
+ // var v : i32;
+ // *v
+ auto* v = Var("v", ty.i32());
+ auto* expr = Deref(Expr(Source{{12, 34}}, "v"));
- WrapInFunction(v, expr);
+ WrapInFunction(v, expr);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot dereference expression of type 'i32'");
+ EXPECT_EQ(r()->error(), "12:34 error: cannot dereference expression of type 'i32'");
}
TEST_F(ResolverPtrRefValidationTest, InferredPtrAccessMismatch) {
- // struct Inner {
- // arr: array<i32, 4>;
- // }
- // struct S {
- // inner: Inner;
- // }
- // @group(0) @binding(0) var<storage, read_write> s : S;
- // fn f() {
- // let p : pointer<storage, i32> = &s.inner.arr[2];
- // }
- auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
- auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
- auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- auto* expr =
- IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 4);
- auto* ptr =
- Const(Source{{12, 34}}, "p", ty.pointer<i32>(ast::StorageClass::kStorage),
- AddressOf(expr));
-
- WrapInFunction(ptr);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot initialize let of type "
- "'ptr<storage, i32, read>' with value of type "
- "'ptr<storage, i32, read_write>'");
+ // struct Inner {
+ // arr: array<i32, 4u>;
+ // }
+ // struct S {
+ // inner: Inner;
+ // }
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ // fn f() {
+ // let p : pointer<storage, i32> = &s.inner.arr[2i];
+ // }
+ auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
+ auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
+ auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ auto* expr = IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 2_i);
+ auto* ptr =
+ Let(Source{{12, 34}}, "p", ty.pointer<i32>(ast::StorageClass::kStorage), AddressOf(expr));
+
+ WrapInFunction(ptr);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot initialize let of type "
+ "'ptr<storage, i32, read>' with value of type "
+ "'ptr<storage, i32, read_write>'");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver.cc b/chromium/third_party/dawn/src/tint/resolver/resolver.cc
index 9d125e7a2a0..90ae4b08223 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver.cc
@@ -50,24 +50,28 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/ast/vector.h"
#include "src/tint/ast/workgroup_attribute.h"
+#include "src/tint/resolver/uniformity.h"
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/sem/for_loop_statement.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/if_statement.h"
#include "src/tint/sem/loop_statement.h"
+#include "src/tint/sem/materialize.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/pointer_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/sampler_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/pointer.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/sampler.h"
#include "src/tint/sem/statement.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/switch_statement.h"
#include "src/tint/sem/type_constructor.h"
@@ -84,2847 +88,2569 @@ namespace tint::resolver {
Resolver::Resolver(ProgramBuilder* builder)
: builder_(builder),
diagnostics_(builder->Diagnostics()),
- builtin_table_(BuiltinTable::Create(*builder)) {}
+ intrinsic_table_(IntrinsicTable::Create(*builder)),
+ sem_(builder, dependencies_),
+ validator_(builder, sem_) {}
Resolver::~Resolver() = default;
bool Resolver::Resolve() {
- if (builder_->Diagnostics().contains_errors()) {
- return false;
- }
+ if (builder_->Diagnostics().contains_errors()) {
+ return false;
+ }
- if (!DependencyGraph::Build(builder_->AST(), builder_->Symbols(),
- builder_->Diagnostics(), dependencies_)) {
- return false;
- }
+ if (!DependencyGraph::Build(builder_->AST(), builder_->Symbols(), builder_->Diagnostics(),
+ dependencies_)) {
+ return false;
+ }
- // Create the semantic module
- builder_->Sem().SetModule(
- builder_->create<sem::Module>(dependencies_.ordered_globals));
+ bool result = ResolveInternal();
- bool result = ResolveInternal();
+ if (!result && !diagnostics_.contains_errors()) {
+ TINT_ICE(Resolver, diagnostics_) << "resolving failed, but no error was raised";
+ return false;
+ }
- if (!result && !diagnostics_.contains_errors()) {
- TINT_ICE(Resolver, diagnostics_)
- << "resolving failed, but no error was raised";
- return false;
- }
+ // Create the semantic module
+ builder_->Sem().SetModule(builder_->create<sem::Module>(
+ std::move(dependencies_.ordered_globals), std::move(enabled_extensions_)));
- return result;
+ return result;
}
bool Resolver::ResolveInternal() {
- Mark(&builder_->AST());
-
- // Process all module-scope declarations in dependency order.
- for (auto* decl : dependencies_.ordered_globals) {
- Mark(decl);
- if (!Switch(
- decl, //
- [&](const ast::TypeDecl* td) { return TypeDecl(td); },
- [&](const ast::Function* func) { return Function(func); },
- [&](const ast::Variable* var) { return GlobalVariable(var); },
- [&](Default) {
- TINT_UNREACHABLE(Resolver, diagnostics_)
- << "unhandled global declaration: " << decl->TypeInfo().name;
- return nullptr;
- })) {
- return false;
+ Mark(&builder_->AST());
+
+ // Process all module-scope declarations in dependency order.
+ for (auto* decl : dependencies_.ordered_globals) {
+ Mark(decl);
+ if (!Switch<bool>(
+ decl, //
+ [&](const ast::Enable* e) { return Enable(e); },
+ [&](const ast::TypeDecl* td) { return TypeDecl(td); },
+ [&](const ast::Function* func) { return Function(func); },
+ [&](const ast::Variable* var) { return GlobalVariable(var); },
+ [&](Default) {
+ TINT_UNREACHABLE(Resolver, diagnostics_)
+ << "unhandled global declaration: " << decl->TypeInfo().name;
+ return false;
+ })) {
+ return false;
+ }
}
- }
- AllocateOverridableConstantIds();
+ AllocateOverridableConstantIds();
- SetShadows();
+ SetShadows();
- if (!ValidatePipelineStages()) {
- return false;
- }
+ if (!validator_.PipelineStages(entry_points_)) {
+ return false;
+ }
- bool result = true;
- for (auto* node : builder_->ASTNodes().Objects()) {
- if (marked_.count(node) == 0) {
- TINT_ICE(Resolver, diagnostics_) << "AST node '" << node->TypeInfo().name
- << "' was not reached by the resolver\n"
- << "At: " << node->source << "\n"
- << "Pointer: " << node;
- result = false;
+ if (!enabled_extensions_.contains(ast::Extension::kChromiumDisableUniformityAnalysis)) {
+ if (!AnalyzeUniformity(builder_, dependencies_)) {
+ // TODO(jrprice): Reject programs that fail uniformity analysis.
+ }
}
- }
- return result;
+ bool result = true;
+ for (auto* node : builder_->ASTNodes().Objects()) {
+ if (marked_.count(node) == 0) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "AST node '" << node->TypeInfo().name << "' was not reached by the resolver\n"
+ << "At: " << node->source << "\n"
+ << "Pointer: " << node;
+ result = false;
+ }
+ }
+
+ return result;
}
sem::Type* Resolver::Type(const ast::Type* ty) {
- Mark(ty);
- auto* s = Switch(
- ty, //
- [&](const ast::Void*) { return builder_->create<sem::Void>(); },
- [&](const ast::Bool*) { return builder_->create<sem::Bool>(); },
- [&](const ast::I32*) { return builder_->create<sem::I32>(); },
- [&](const ast::U32*) { return builder_->create<sem::U32>(); },
- [&](const ast::F32*) { return builder_->create<sem::F32>(); },
- [&](const ast::Vector* t) -> sem::Vector* {
- if (!t->type) {
- AddError("missing vector element type", t->source.End());
- return nullptr;
- }
- if (auto* el = Type(t->type)) {
- if (auto* vector = builder_->create<sem::Vector>(el, t->width)) {
- if (ValidateVector(vector, t->source)) {
- return vector;
+ Mark(ty);
+ auto* s = Switch(
+ ty, //
+ [&](const ast::Void*) { return builder_->create<sem::Void>(); },
+ [&](const ast::Bool*) { return builder_->create<sem::Bool>(); },
+ [&](const ast::I32*) { return builder_->create<sem::I32>(); },
+ [&](const ast::U32*) { return builder_->create<sem::U32>(); },
+ [&](const ast::F16* t) -> sem::F16* {
+ // Validate if f16 type is allowed.
+ if (!enabled_extensions_.contains(ast::Extension::kF16)) {
+ AddError("f16 used without 'f16' extension enabled", t->source);
+ return nullptr;
}
- }
- }
- return nullptr;
- },
- [&](const ast::Matrix* t) -> sem::Matrix* {
- if (!t->type) {
- AddError("missing matrix element type", t->source.End());
- return nullptr;
- }
- if (auto* el = Type(t->type)) {
- if (auto* column_type = builder_->create<sem::Vector>(el, t->rows)) {
- if (auto* matrix =
- builder_->create<sem::Matrix>(column_type, t->columns)) {
- if (ValidateMatrix(matrix, t->source)) {
- return matrix;
- }
+ return builder_->create<sem::F16>();
+ },
+ [&](const ast::F32*) { return builder_->create<sem::F32>(); },
+ [&](const ast::Vector* t) -> sem::Vector* {
+ if (!t->type) {
+ AddError("missing vector element type", t->source.End());
+ return nullptr;
+ }
+ if (auto* el = Type(t->type)) {
+ if (auto* vector = builder_->create<sem::Vector>(el, t->width)) {
+ if (validator_.Vector(vector, t->source)) {
+ return vector;
+ }
+ }
}
- }
- }
- return nullptr;
- },
- [&](const ast::Array* t) { return Array(t); },
- [&](const ast::Atomic* t) -> sem::Atomic* {
- if (auto* el = Type(t->type)) {
- auto* a = builder_->create<sem::Atomic>(el);
- if (!ValidateAtomic(t, a)) {
return nullptr;
- }
- return a;
- }
- return nullptr;
- },
- [&](const ast::Pointer* t) -> sem::Pointer* {
- if (auto* el = Type(t->type)) {
- auto access = t->access;
- if (access == ast::kUndefined) {
- access = DefaultAccessForStorageClass(t->storage_class);
- }
- return builder_->create<sem::Pointer>(el, t->storage_class, access);
- }
- return nullptr;
- },
- [&](const ast::Sampler* t) {
- return builder_->create<sem::Sampler>(t->kind);
- },
- [&](const ast::SampledTexture* t) -> sem::SampledTexture* {
- if (auto* el = Type(t->type)) {
- return builder_->create<sem::SampledTexture>(t->dim, el);
- }
- return nullptr;
- },
- [&](const ast::MultisampledTexture* t) -> sem::MultisampledTexture* {
- if (auto* el = Type(t->type)) {
- return builder_->create<sem::MultisampledTexture>(t->dim, el);
- }
- return nullptr;
- },
- [&](const ast::DepthTexture* t) {
- return builder_->create<sem::DepthTexture>(t->dim);
- },
- [&](const ast::DepthMultisampledTexture* t) {
- return builder_->create<sem::DepthMultisampledTexture>(t->dim);
- },
- [&](const ast::StorageTexture* t) -> sem::StorageTexture* {
- if (auto* el = Type(t->type)) {
- if (!ValidateStorageTexture(t)) {
+ },
+ [&](const ast::Matrix* t) -> sem::Matrix* {
+ if (!t->type) {
+ AddError("missing matrix element type", t->source.End());
+ return nullptr;
+ }
+ if (auto* el = Type(t->type)) {
+ if (auto* column_type = builder_->create<sem::Vector>(el, t->rows)) {
+ if (auto* matrix = builder_->create<sem::Matrix>(column_type, t->columns)) {
+ if (validator_.Matrix(matrix, t->source)) {
+ return matrix;
+ }
+ }
+ }
+ }
return nullptr;
- }
- return builder_->create<sem::StorageTexture>(t->dim, t->format,
- t->access, el);
- }
- return nullptr;
- },
- [&](const ast::ExternalTexture*) {
- return builder_->create<sem::ExternalTexture>();
- },
- [&](Default) {
- auto* resolved = ResolvedSymbol(ty);
- return Switch(
- resolved, //
- [&](sem::Type* type) { return type; },
- [&](sem::Variable* var) {
- auto name =
- builder_->Symbols().NameFor(var->Declaration()->symbol);
- AddError("cannot use variable '" + name + "' as type",
- ty->source);
- AddNote("'" + name + "' declared here",
- var->Declaration()->source);
- return nullptr;
- },
- [&](sem::Function* func) {
- auto name =
- builder_->Symbols().NameFor(func->Declaration()->symbol);
- AddError("cannot use function '" + name + "' as type",
- ty->source);
- AddNote("'" + name + "' declared here",
- func->Declaration()->source);
- return nullptr;
- },
- [&](Default) {
- if (auto* tn = ty->As<ast::TypeName>()) {
- if (IsBuiltin(tn->name)) {
- auto name = builder_->Symbols().NameFor(tn->name);
- AddError("cannot use builtin '" + name + "' as type",
- ty->source);
- return nullptr;
+ },
+ [&](const ast::Array* t) { return Array(t); },
+ [&](const ast::Atomic* t) -> sem::Atomic* {
+ if (auto* el = Type(t->type)) {
+ auto* a = builder_->create<sem::Atomic>(el);
+ if (!validator_.Atomic(t, a)) {
+ return nullptr;
}
- }
- TINT_UNREACHABLE(Resolver, diagnostics_)
- << "Unhandled resolved type '"
- << (resolved ? resolved->TypeInfo().name : "<null>")
- << "' resolved from ast::Type '" << ty->TypeInfo().name
- << "'";
- return nullptr;
- });
- });
+ return a;
+ }
+ return nullptr;
+ },
+ [&](const ast::Pointer* t) -> sem::Pointer* {
+ if (auto* el = Type(t->type)) {
+ auto access = t->access;
+ if (access == ast::kUndefined) {
+ access = DefaultAccessForStorageClass(t->storage_class);
+ }
+ return builder_->create<sem::Pointer>(el, t->storage_class, access);
+ }
+ return nullptr;
+ },
+ [&](const ast::Sampler* t) { return builder_->create<sem::Sampler>(t->kind); },
+ [&](const ast::SampledTexture* t) -> sem::SampledTexture* {
+ if (auto* el = Type(t->type)) {
+ return builder_->create<sem::SampledTexture>(t->dim, el);
+ }
+ return nullptr;
+ },
+ [&](const ast::MultisampledTexture* t) -> sem::MultisampledTexture* {
+ if (auto* el = Type(t->type)) {
+ return builder_->create<sem::MultisampledTexture>(t->dim, el);
+ }
+ return nullptr;
+ },
+ [&](const ast::DepthTexture* t) { return builder_->create<sem::DepthTexture>(t->dim); },
+ [&](const ast::DepthMultisampledTexture* t) {
+ return builder_->create<sem::DepthMultisampledTexture>(t->dim);
+ },
+ [&](const ast::StorageTexture* t) -> sem::StorageTexture* {
+ if (auto* el = Type(t->type)) {
+ if (!validator_.StorageTexture(t)) {
+ return nullptr;
+ }
+ return builder_->create<sem::StorageTexture>(t->dim, t->format, t->access, el);
+ }
+ return nullptr;
+ },
+ [&](const ast::ExternalTexture*) { return builder_->create<sem::ExternalTexture>(); },
+ [&](Default) {
+ auto* resolved = sem_.ResolvedSymbol(ty);
+ return Switch(
+ resolved, //
+ [&](sem::Type* type) { return type; },
+ [&](sem::Variable* var) {
+ auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
+ AddError("cannot use variable '" + name + "' as type", ty->source);
+ AddNote("'" + name + "' declared here", var->Declaration()->source);
+ return nullptr;
+ },
+ [&](sem::Function* func) {
+ auto name = builder_->Symbols().NameFor(func->Declaration()->symbol);
+ AddError("cannot use function '" + name + "' as type", ty->source);
+ AddNote("'" + name + "' declared here", func->Declaration()->source);
+ return nullptr;
+ },
+ [&](Default) {
+ if (auto* tn = ty->As<ast::TypeName>()) {
+ if (IsBuiltin(tn->name)) {
+ auto name = builder_->Symbols().NameFor(tn->name);
+ AddError("cannot use builtin '" + name + "' as type", ty->source);
+ return nullptr;
+ }
+ }
+ TINT_UNREACHABLE(Resolver, diagnostics_)
+ << "Unhandled resolved type '"
+ << (resolved ? resolved->TypeInfo().name : "<null>")
+ << "' resolved from ast::Type '" << ty->TypeInfo().name << "'";
+ return nullptr;
+ });
+ });
- if (s) {
- builder_->Sem().Add(ty, s);
- }
- return s;
+ if (s) {
+ builder_->Sem().Add(ty, s);
+ }
+ return s;
}
sem::Variable* Resolver::Variable(const ast::Variable* var,
VariableKind kind,
uint32_t index /* = 0 */) {
- const sem::Type* storage_ty = nullptr;
+ const sem::Type* storage_ty = nullptr;
- // If the variable has a declared type, resolve it.
- if (auto* ty = var->type) {
- storage_ty = Type(ty);
- if (!storage_ty) {
- return nullptr;
+ // If the variable has a declared type, resolve it.
+ if (auto* ty = var->type) {
+ storage_ty = Type(ty);
+ if (!storage_ty) {
+ return nullptr;
+ }
}
- }
- const sem::Expression* rhs = nullptr;
+ const sem::Expression* rhs = nullptr;
+
+ // Does the variable have a constructor?
+ if (var->constructor) {
+ rhs = Materialize(Expression(var->constructor), storage_ty);
+ if (!rhs) {
+ return nullptr;
+ }
- // Does the variable have a constructor?
- if (var->constructor) {
- rhs = Expression(var->constructor);
- if (!rhs) {
- return nullptr;
+ // If the variable has no declared type, infer it from the RHS
+ if (!storage_ty) {
+ if (!var->is_const && kind == VariableKind::kGlobal) {
+ AddError("global var declaration must specify a type", var->source);
+ return nullptr;
+ }
+
+ storage_ty = rhs->Type()->UnwrapRef(); // Implicit load of RHS
+ }
+ } else if (var->is_const && !var->is_overridable && kind != VariableKind::kParameter) {
+ AddError("let declaration must have an initializer", var->source);
+ return nullptr;
+ } else if (!var->type) {
+ AddError((kind == VariableKind::kGlobal)
+ ? "module scope var declaration requires a type and initializer"
+ : "function scope var declaration requires a type or initializer",
+ var->source);
+ return nullptr;
}
- // If the variable has no declared type, infer it from the RHS
if (!storage_ty) {
- if (!var->is_const && kind == VariableKind::kGlobal) {
- AddError("global var declaration must specify a type", var->source);
+ TINT_ICE(Resolver, diagnostics_) << "failed to determine storage type for variable '" +
+ builder_->Symbols().NameFor(var->symbol) + "'\n"
+ << "Source: " << var->source;
return nullptr;
- }
+ }
- storage_ty = rhs->Type()->UnwrapRef(); // Implicit load of RHS
+ auto storage_class = var->declared_storage_class;
+ if (storage_class == ast::StorageClass::kNone && !var->is_const) {
+ // No declared storage class. Infer from usage / type.
+ if (kind == VariableKind::kLocal) {
+ storage_class = ast::StorageClass::kFunction;
+ } else if (storage_ty->UnwrapRef()->is_handle()) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#module-scope-variables
+ // If the store type is a texture type or a sampler type, then the
+ // variable declaration must not have a storage class attribute. The
+ // storage class will always be handle.
+ storage_class = ast::StorageClass::kHandle;
+ }
}
- } else if (var->is_const && !var->is_overridable &&
- kind != VariableKind::kParameter) {
- AddError("let declaration must have an initializer", var->source);
- return nullptr;
- } else if (!var->type) {
- AddError(
- (kind == VariableKind::kGlobal)
- ? "module scope var declaration requires a type and initializer"
- : "function scope var declaration requires a type or initializer",
- var->source);
- return nullptr;
- }
- if (!storage_ty) {
- TINT_ICE(Resolver, diagnostics_)
- << "failed to determine storage type for variable '" +
- builder_->Symbols().NameFor(var->symbol) + "'\n"
- << "Source: " << var->source;
- return nullptr;
- }
-
- auto storage_class = var->declared_storage_class;
- if (storage_class == ast::StorageClass::kNone && !var->is_const) {
- // No declared storage class. Infer from usage / type.
- if (kind == VariableKind::kLocal) {
- storage_class = ast::StorageClass::kFunction;
- } else if (storage_ty->UnwrapRef()->is_handle()) {
- // https://gpuweb.github.io/gpuweb/wgsl/#module-scope-variables
- // If the store type is a texture type or a sampler type, then the
- // variable declaration must not have a storage class attribute. The
- // storage class will always be handle.
- storage_class = ast::StorageClass::kUniformConstant;
- }
- }
-
- if (kind == VariableKind::kLocal && !var->is_const &&
- storage_class != ast::StorageClass::kFunction &&
- IsValidationEnabled(var->attributes,
- ast::DisabledValidation::kIgnoreStorageClass)) {
- AddError("function variable has a non-function storage class", var->source);
- return nullptr;
- }
-
- auto access = var->declared_access;
- if (access == ast::Access::kUndefined) {
- access = DefaultAccessForStorageClass(storage_class);
- }
-
- auto* var_ty = storage_ty;
- if (!var->is_const) {
- // Variable declaration. Unlike `let`, `var` has storage.
- // Variables are always of a reference type to the declared storage type.
- var_ty =
- builder_->create<sem::Reference>(storage_ty, storage_class, access);
- }
-
- if (rhs && !ValidateVariableConstructorOrCast(var, storage_class, storage_ty,
- rhs->Type())) {
- return nullptr;
- }
-
- if (!ApplyStorageClassUsageToType(
- storage_class, const_cast<sem::Type*>(var_ty), var->source)) {
- AddNote(
- std::string("while instantiating ") +
- ((kind == VariableKind::kParameter) ? "parameter " : "variable ") +
- builder_->Symbols().NameFor(var->symbol),
- var->source);
- return nullptr;
- }
-
- if (kind == VariableKind::kParameter) {
- if (auto* ptr = var_ty->As<sem::Pointer>()) {
- // For MSL, we push module-scope variables into the entry point as pointer
- // parameters, so we also need to handle their store type.
- if (!ApplyStorageClassUsageToType(
- ptr->StorageClass(), const_cast<sem::Type*>(ptr->StoreType()),
- var->source)) {
- AddNote("while instantiating parameter " +
+ if (kind == VariableKind::kLocal && !var->is_const &&
+ storage_class != ast::StorageClass::kFunction &&
+ validator_.IsValidationEnabled(var->attributes,
+ ast::DisabledValidation::kIgnoreStorageClass)) {
+ AddError("function variable has a non-function storage class", var->source);
+ return nullptr;
+ }
+
+ auto access = var->declared_access;
+ if (access == ast::Access::kUndefined) {
+ access = DefaultAccessForStorageClass(storage_class);
+ }
+
+ auto* var_ty = storage_ty;
+ if (!var->is_const) {
+ // Variable declaration. Unlike `let`, `var` has storage.
+ // Variables are always of a reference type to the declared storage type.
+ var_ty = builder_->create<sem::Reference>(storage_ty, storage_class, access);
+ }
+
+ if (rhs && !validator_.VariableConstructorOrCast(var, storage_class, storage_ty, rhs->Type())) {
+ return nullptr;
+ }
+
+ if (!ApplyStorageClassUsageToType(storage_class, const_cast<sem::Type*>(var_ty), var->source)) {
+ AddNote(std::string("while instantiating ") +
+ ((kind == VariableKind::kParameter) ? "parameter " : "variable ") +
builder_->Symbols().NameFor(var->symbol),
var->source);
return nullptr;
- }
- }
- }
-
- switch (kind) {
- case VariableKind::kGlobal: {
- sem::BindingPoint binding_point;
- if (auto bp = var->BindingPoint()) {
- binding_point = {bp.group->value, bp.binding->value};
- }
-
- bool has_const_val = rhs && var->is_const && !var->is_overridable;
- auto* global = builder_->create<sem::GlobalVariable>(
- var, var_ty, storage_class, access,
- has_const_val ? rhs->ConstantValue() : sem::Constant{},
- binding_point);
-
- if (var->is_overridable) {
- global->SetIsOverridable();
- if (auto* id = ast::GetAttribute<ast::IdAttribute>(var->attributes)) {
- global->SetConstantId(static_cast<uint16_t>(id->value));
- }
- }
-
- global->SetConstructor(rhs);
-
- builder_->Sem().Add(var, global);
- return global;
- }
- case VariableKind::kLocal: {
- auto* local = builder_->create<sem::LocalVariable>(
- var, var_ty, storage_class, access, current_statement_,
- (rhs && var->is_const) ? rhs->ConstantValue() : sem::Constant{});
- builder_->Sem().Add(var, local);
- local->SetConstructor(rhs);
- return local;
- }
- case VariableKind::kParameter: {
- auto* param = builder_->create<sem::Parameter>(var, index, var_ty,
- storage_class, access);
- builder_->Sem().Add(var, param);
- return param;
- }
- }
-
- TINT_UNREACHABLE(Resolver, diagnostics_)
- << "unhandled VariableKind " << static_cast<int>(kind);
- return nullptr;
+ }
+
+ if (kind == VariableKind::kParameter) {
+ if (auto* ptr = var_ty->As<sem::Pointer>()) {
+ // For MSL, we push module-scope variables into the entry point as pointer
+ // parameters, so we also need to handle their store type.
+ if (!ApplyStorageClassUsageToType(
+ ptr->StorageClass(), const_cast<sem::Type*>(ptr->StoreType()), var->source)) {
+ AddNote("while instantiating parameter " + builder_->Symbols().NameFor(var->symbol),
+ var->source);
+ return nullptr;
+ }
+ }
+ }
+
+ switch (kind) {
+ case VariableKind::kGlobal: {
+ sem::BindingPoint binding_point;
+ if (auto bp = var->BindingPoint()) {
+ binding_point = {bp.group->value, bp.binding->value};
+ }
+
+ bool has_const_val = rhs && var->is_const && !var->is_overridable;
+ auto* global = builder_->create<sem::GlobalVariable>(
+ var, var_ty, storage_class, access,
+ has_const_val ? rhs->ConstantValue() : sem::Constant{}, binding_point);
+
+ if (var->is_overridable) {
+ global->SetIsOverridable();
+ if (auto* id = ast::GetAttribute<ast::IdAttribute>(var->attributes)) {
+ global->SetConstantId(static_cast<uint16_t>(id->value));
+ }
+ }
+
+ global->SetConstructor(rhs);
+
+ builder_->Sem().Add(var, global);
+ return global;
+ }
+ case VariableKind::kLocal: {
+ auto* local = builder_->create<sem::LocalVariable>(
+ var, var_ty, storage_class, access, current_statement_,
+ (rhs && var->is_const) ? rhs->ConstantValue() : sem::Constant{});
+ builder_->Sem().Add(var, local);
+ local->SetConstructor(rhs);
+ return local;
+ }
+ case VariableKind::kParameter: {
+ auto* param =
+ builder_->create<sem::Parameter>(var, index, var_ty, storage_class, access);
+ builder_->Sem().Add(var, param);
+ return param;
+ }
+ }
+
+ TINT_UNREACHABLE(Resolver, diagnostics_) << "unhandled VariableKind " << static_cast<int>(kind);
+ return nullptr;
}
-ast::Access Resolver::DefaultAccessForStorageClass(
- ast::StorageClass storage_class) {
- // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
- switch (storage_class) {
- case ast::StorageClass::kStorage:
- case ast::StorageClass::kUniform:
- case ast::StorageClass::kUniformConstant:
- return ast::Access::kRead;
- default:
- break;
- }
- return ast::Access::kReadWrite;
+ast::Access Resolver::DefaultAccessForStorageClass(ast::StorageClass storage_class) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
+ switch (storage_class) {
+ case ast::StorageClass::kStorage:
+ case ast::StorageClass::kUniform:
+ case ast::StorageClass::kHandle:
+ return ast::Access::kRead;
+ default:
+ break;
+ }
+ return ast::Access::kReadWrite;
}
void Resolver::AllocateOverridableConstantIds() {
- // The next pipeline constant ID to try to allocate.
- uint16_t next_constant_id = 0;
-
- // Allocate constant IDs in global declaration order, so that they are
- // deterministic.
- // TODO(crbug.com/tint/1192): If a transform changes the order or removes an
- // unused constant, the allocation may change on the next Resolver pass.
- for (auto* decl : builder_->AST().GlobalDeclarations()) {
- auto* var = decl->As<ast::Variable>();
- if (!var || !var->is_overridable) {
- continue;
- }
-
- uint16_t constant_id;
- if (auto* id_attr = ast::GetAttribute<ast::IdAttribute>(var->attributes)) {
- constant_id = static_cast<uint16_t>(id_attr->value);
- } else {
- // No ID was specified, so allocate the next available ID.
- constant_id = next_constant_id;
- while (constant_ids_.count(constant_id)) {
- if (constant_id == UINT16_MAX) {
- TINT_ICE(Resolver, builder_->Diagnostics())
- << "no more pipeline constant IDs available";
- return;
- }
- constant_id++;
- }
- next_constant_id = constant_id + 1;
- }
-
- auto* sem = Sem<sem::GlobalVariable>(var);
- const_cast<sem::GlobalVariable*>(sem)->SetConstantId(constant_id);
- }
+ // The next pipeline constant ID to try to allocate.
+ uint16_t next_constant_id = 0;
+
+ // Allocate constant IDs in global declaration order, so that they are
+ // deterministic.
+ // TODO(crbug.com/tint/1192): If a transform changes the order or removes an
+ // unused constant, the allocation may change on the next Resolver pass.
+ for (auto* decl : builder_->AST().GlobalDeclarations()) {
+ auto* var = decl->As<ast::Variable>();
+ if (!var || !var->is_overridable) {
+ continue;
+ }
+
+ uint16_t constant_id;
+ if (auto* id_attr = ast::GetAttribute<ast::IdAttribute>(var->attributes)) {
+ constant_id = static_cast<uint16_t>(id_attr->value);
+ } else {
+ // No ID was specified, so allocate the next available ID.
+ constant_id = next_constant_id;
+ while (constant_ids_.count(constant_id)) {
+ if (constant_id == UINT16_MAX) {
+ TINT_ICE(Resolver, builder_->Diagnostics())
+ << "no more pipeline constant IDs available";
+ return;
+ }
+ constant_id++;
+ }
+ next_constant_id = constant_id + 1;
+ }
+
+ auto* sem = sem_.Get<sem::GlobalVariable>(var);
+ const_cast<sem::GlobalVariable*>(sem)->SetConstantId(constant_id);
+ }
}
void Resolver::SetShadows() {
- for (auto it : dependencies_.shadows) {
- Switch(
- Sem(it.first), //
- [&](sem::LocalVariable* local) { local->SetShadows(Sem(it.second)); },
- [&](sem::Parameter* param) { param->SetShadows(Sem(it.second)); });
- }
+ for (auto it : dependencies_.shadows) {
+ Switch(
+ sem_.Get(it.first), //
+ [&](sem::LocalVariable* local) { local->SetShadows(sem_.Get(it.second)); },
+ [&](sem::Parameter* param) { param->SetShadows(sem_.Get(it.second)); });
+ }
}
sem::GlobalVariable* Resolver::GlobalVariable(const ast::Variable* var) {
- auto* sem = Variable(var, VariableKind::kGlobal);
- if (!sem) {
- return nullptr;
- }
+ auto* sem = Variable(var, VariableKind::kGlobal);
+ if (!sem) {
+ return nullptr;
+ }
- auto storage_class = sem->StorageClass();
- if (!var->is_const && storage_class == ast::StorageClass::kNone) {
- AddError("global variables must have a storage class", var->source);
- return nullptr;
- }
- if (var->is_const && storage_class != ast::StorageClass::kNone) {
- AddError("global constants shouldn't have a storage class", var->source);
- return nullptr;
- }
+ auto storage_class = sem->StorageClass();
+ if (!var->is_const && storage_class == ast::StorageClass::kNone) {
+ AddError("global variables must have a storage class", var->source);
+ return nullptr;
+ }
+ if (var->is_const && storage_class != ast::StorageClass::kNone) {
+ AddError("global constants shouldn't have a storage class", var->source);
+ return nullptr;
+ }
- for (auto* attr : var->attributes) {
- Mark(attr);
+ for (auto* attr : var->attributes) {
+ Mark(attr);
- if (auto* id_attr = attr->As<ast::IdAttribute>()) {
- // Track the constant IDs that are specified in the shader.
- constant_ids_.emplace(id_attr->value, sem);
+ if (auto* id_attr = attr->As<ast::IdAttribute>()) {
+ // Track the constant IDs that are specified in the shader.
+ constant_ids_.emplace(id_attr->value, sem);
+ }
}
- }
- if (!ValidateNoDuplicateAttributes(var->attributes)) {
- return nullptr;
- }
+ if (!validator_.NoDuplicateAttributes(var->attributes)) {
+ return nullptr;
+ }
- if (!ValidateGlobalVariable(sem)) {
- return nullptr;
- }
+ if (!validator_.GlobalVariable(sem, constant_ids_, atomic_composite_info_)) {
+ return nullptr;
+ }
- // TODO(bclayton): Call this at the end of resolve on all uniform and storage
- // referenced structs
- if (!ValidateStorageClassLayout(sem)) {
- return nullptr;
- }
+ // TODO(bclayton): Call this at the end of resolve on all uniform and storage
+ // referenced structs
+ if (!validator_.StorageClassLayout(sem, valid_type_storage_layouts_)) {
+ return nullptr;
+ }
- return sem->As<sem::GlobalVariable>();
+ return sem->As<sem::GlobalVariable>();
}
sem::Function* Resolver::Function(const ast::Function* decl) {
- uint32_t parameter_index = 0;
- std::unordered_map<Symbol, Source> parameter_names;
- std::vector<sem::Parameter*> parameters;
-
- // Resolve all the parameters
- for (auto* param : decl->params) {
- Mark(param);
-
- { // Check the parameter name is unique for the function
- auto emplaced = parameter_names.emplace(param->symbol, param->source);
- if (!emplaced.second) {
- auto name = builder_->Symbols().NameFor(param->symbol);
- AddError("redefinition of parameter '" + name + "'", param->source);
- AddNote("previous definition is here", emplaced.first->second);
+ uint32_t parameter_index = 0;
+ std::unordered_map<Symbol, Source> parameter_names;
+ std::vector<sem::Parameter*> parameters;
+
+ // Resolve all the parameters
+ for (auto* param : decl->params) {
+ Mark(param);
+
+ { // Check the parameter name is unique for the function
+ auto emplaced = parameter_names.emplace(param->symbol, param->source);
+ if (!emplaced.second) {
+ auto name = builder_->Symbols().NameFor(param->symbol);
+ AddError("redefinition of parameter '" + name + "'", param->source);
+ AddNote("previous definition is here", emplaced.first->second);
+ return nullptr;
+ }
+ }
+
+ auto* var =
+ As<sem::Parameter>(Variable(param, VariableKind::kParameter, parameter_index++));
+ if (!var) {
+ return nullptr;
+ }
+
+ for (auto* attr : param->attributes) {
+ Mark(attr);
+ }
+ if (!validator_.NoDuplicateAttributes(param->attributes)) {
+ return nullptr;
+ }
+
+ parameters.emplace_back(var);
+
+ auto* var_ty = const_cast<sem::Type*>(var->Type());
+ if (auto* str = var_ty->As<sem::Struct>()) {
+ switch (decl->PipelineStage()) {
+ case ast::PipelineStage::kVertex:
+ str->AddUsage(sem::PipelineStageUsage::kVertexInput);
+ break;
+ case ast::PipelineStage::kFragment:
+ str->AddUsage(sem::PipelineStageUsage::kFragmentInput);
+ break;
+ case ast::PipelineStage::kCompute:
+ str->AddUsage(sem::PipelineStageUsage::kComputeInput);
+ break;
+ case ast::PipelineStage::kNone:
+ break;
+ }
+ }
+ }
+
+ // Resolve the return type
+ sem::Type* return_type = nullptr;
+ if (auto* ty = decl->return_type) {
+ return_type = Type(ty);
+ if (!return_type) {
+ return nullptr;
+ }
+ } else {
+ return_type = builder_->create<sem::Void>();
+ }
+
+ if (auto* str = return_type->As<sem::Struct>()) {
+ if (!ApplyStorageClassUsageToType(ast::StorageClass::kNone, str, decl->source)) {
+ AddNote(
+ "while instantiating return type for " + builder_->Symbols().NameFor(decl->symbol),
+ decl->source);
+ return nullptr;
+ }
+
+ switch (decl->PipelineStage()) {
+ case ast::PipelineStage::kVertex:
+ str->AddUsage(sem::PipelineStageUsage::kVertexOutput);
+ break;
+ case ast::PipelineStage::kFragment:
+ str->AddUsage(sem::PipelineStageUsage::kFragmentOutput);
+ break;
+ case ast::PipelineStage::kCompute:
+ str->AddUsage(sem::PipelineStageUsage::kComputeOutput);
+ break;
+ case ast::PipelineStage::kNone:
+ break;
+ }
+ }
+
+ auto* func = builder_->create<sem::Function>(decl, return_type, parameters);
+ builder_->Sem().Add(decl, func);
+
+ TINT_SCOPED_ASSIGNMENT(current_function_, func);
+
+ if (!WorkgroupSize(decl)) {
return nullptr;
- }
- }
-
- auto* var = As<sem::Parameter>(
- Variable(param, VariableKind::kParameter, parameter_index++));
- if (!var) {
- return nullptr;
- }
-
- for (auto* attr : param->attributes) {
- Mark(attr);
- }
- if (!ValidateNoDuplicateAttributes(param->attributes)) {
- return nullptr;
- }
-
- parameters.emplace_back(var);
-
- auto* var_ty = const_cast<sem::Type*>(var->Type());
- if (auto* str = var_ty->As<sem::Struct>()) {
- switch (decl->PipelineStage()) {
- case ast::PipelineStage::kVertex:
- str->AddUsage(sem::PipelineStageUsage::kVertexInput);
- break;
- case ast::PipelineStage::kFragment:
- str->AddUsage(sem::PipelineStageUsage::kFragmentInput);
- break;
- case ast::PipelineStage::kCompute:
- str->AddUsage(sem::PipelineStageUsage::kComputeInput);
- break;
- case ast::PipelineStage::kNone:
- break;
- }
- }
- }
-
- // Resolve the return type
- sem::Type* return_type = nullptr;
- if (auto* ty = decl->return_type) {
- return_type = Type(ty);
- if (!return_type) {
- return nullptr;
- }
- } else {
- return_type = builder_->create<sem::Void>();
- }
-
- if (auto* str = return_type->As<sem::Struct>()) {
- if (!ApplyStorageClassUsageToType(ast::StorageClass::kNone, str,
- decl->source)) {
- AddNote("while instantiating return type for " +
- builder_->Symbols().NameFor(decl->symbol),
- decl->source);
- return nullptr;
- }
-
- switch (decl->PipelineStage()) {
- case ast::PipelineStage::kVertex:
- str->AddUsage(sem::PipelineStageUsage::kVertexOutput);
- break;
- case ast::PipelineStage::kFragment:
- str->AddUsage(sem::PipelineStageUsage::kFragmentOutput);
- break;
- case ast::PipelineStage::kCompute:
- str->AddUsage(sem::PipelineStageUsage::kComputeOutput);
- break;
- case ast::PipelineStage::kNone:
- break;
- }
- }
-
- auto* func = builder_->create<sem::Function>(decl, return_type, parameters);
- builder_->Sem().Add(decl, func);
-
- TINT_SCOPED_ASSIGNMENT(current_function_, func);
-
- if (!WorkgroupSize(decl)) {
- return nullptr;
- }
-
- if (decl->IsEntryPoint()) {
- entry_points_.emplace_back(func);
- }
-
- if (decl->body) {
- Mark(decl->body);
- if (current_compound_statement_) {
- TINT_ICE(Resolver, diagnostics_)
- << "Resolver::Function() called with a current compound statement";
- return nullptr;
- }
- auto* body = StatementScope(
- decl->body, builder_->create<sem::FunctionBlockStatement>(func),
- [&] { return Statements(decl->body->statements); });
- if (!body) {
- return nullptr;
- }
- func->Behaviors() = body->Behaviors();
- if (func->Behaviors().Contains(sem::Behavior::kReturn)) {
- // https://www.w3.org/TR/WGSL/#behaviors-rules
- // We assign a behavior to each function: it is its body’s behavior
- // (treating the body as a regular statement), with any "Return" replaced
- // by "Next".
- func->Behaviors().Remove(sem::Behavior::kReturn);
- func->Behaviors().Add(sem::Behavior::kNext);
- }
- }
-
- for (auto* attr : decl->attributes) {
- Mark(attr);
- }
- if (!ValidateNoDuplicateAttributes(decl->attributes)) {
- return nullptr;
- }
+ }
- for (auto* attr : decl->return_type_attributes) {
- Mark(attr);
- }
- if (!ValidateNoDuplicateAttributes(decl->return_type_attributes)) {
- return nullptr;
- }
+ if (decl->IsEntryPoint()) {
+ entry_points_.emplace_back(func);
+ }
- if (!ValidateFunction(func)) {
- return nullptr;
- }
+ if (decl->body) {
+ Mark(decl->body);
+ if (current_compound_statement_) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "Resolver::Function() called with a current compound statement";
+ return nullptr;
+ }
+ auto* body = StatementScope(decl->body, builder_->create<sem::FunctionBlockStatement>(func),
+ [&] { return Statements(decl->body->statements); });
+ if (!body) {
+ return nullptr;
+ }
+ func->Behaviors() = body->Behaviors();
+ if (func->Behaviors().Contains(sem::Behavior::kReturn)) {
+ // https://www.w3.org/TR/WGSL/#behaviors-rules
+ // We assign a behavior to each function: it is its body’s behavior
+ // (treating the body as a regular statement), with any "Return" replaced
+ // by "Next".
+ func->Behaviors().Remove(sem::Behavior::kReturn);
+ func->Behaviors().Add(sem::Behavior::kNext);
+ }
+ }
- // If this is an entry point, mark all transitively called functions as being
- // used by this entry point.
- if (decl->IsEntryPoint()) {
- for (auto* f : func->TransitivelyCalledFunctions()) {
- const_cast<sem::Function*>(f)->AddAncestorEntryPoint(func);
+ for (auto* attr : decl->attributes) {
+ Mark(attr);
+ }
+ if (!validator_.NoDuplicateAttributes(decl->attributes)) {
+ return nullptr;
}
- }
- return func;
-}
+ for (auto* attr : decl->return_type_attributes) {
+ Mark(attr);
+ }
+ if (!validator_.NoDuplicateAttributes(decl->return_type_attributes)) {
+ return nullptr;
+ }
-bool Resolver::WorkgroupSize(const ast::Function* func) {
- // Set work-group size defaults.
- sem::WorkgroupSize ws;
- for (int i = 0; i < 3; i++) {
- ws[i].value = 1;
- ws[i].overridable_const = nullptr;
- }
-
- auto* attr = ast::GetAttribute<ast::WorkgroupAttribute>(func->attributes);
- if (!attr) {
- return true;
- }
+ auto stage = current_function_ ? current_function_->Declaration()->PipelineStage()
+ : ast::PipelineStage::kNone;
+ if (!validator_.Function(func, stage)) {
+ return nullptr;
+ }
+
+ // If this is an entry point, mark all transitively called functions as being
+ // used by this entry point.
+ if (decl->IsEntryPoint()) {
+ for (auto* f : func->TransitivelyCalledFunctions()) {
+ const_cast<sem::Function*>(f)->AddAncestorEntryPoint(func);
+ }
+ }
- auto values = attr->Values();
- auto any_i32 = false;
- auto any_u32 = false;
- for (int i = 0; i < 3; i++) {
- // Each argument to this attribute can either be a literal, an
- // identifier for a module-scope constants, or nullptr if not specified.
+ return func;
+}
- auto* expr = values[i];
- if (!expr) {
- // Not specified, just use the default.
- continue;
+bool Resolver::WorkgroupSize(const ast::Function* func) {
+ // Set work-group size defaults.
+ sem::WorkgroupSize ws;
+ for (int i = 0; i < 3; i++) {
+ ws[i].value = 1;
+ ws[i].overridable_const = nullptr;
}
- auto* expr_sem = Expression(expr);
- if (!expr_sem) {
- return false;
+ auto* attr = ast::GetAttribute<ast::WorkgroupAttribute>(func->attributes);
+ if (!attr) {
+ return true;
}
+ auto values = attr->Values();
+ std::array<const sem::Expression*, 3> args = {};
+ std::array<const sem::Type*, 3> arg_tys = {};
+ size_t arg_count = 0;
+
constexpr const char* kErrBadType =
- "workgroup_size argument must be either literal or module-scope "
- "constant of type i32 or u32";
- constexpr const char* kErrInconsistentType =
- "workgroup_size arguments must be of the same type, either i32 "
+ "workgroup_size argument must be either literal or module-scope constant of type i32 "
"or u32";
- auto* ty = TypeOf(expr);
- bool is_i32 = ty->UnwrapRef()->Is<sem::I32>();
- bool is_u32 = ty->UnwrapRef()->Is<sem::U32>();
- if (!is_i32 && !is_u32) {
- AddError(kErrBadType, expr->source);
- return false;
- }
+ for (int i = 0; i < 3; i++) {
+ // Each argument to this attribute can either be a literal, an identifier for a module-scope
+ // constants, or nullptr if not specified.
+ auto* value = values[i];
+ if (!value) {
+ break;
+ }
+ const auto* expr = Expression(value);
+ if (!expr) {
+ return false;
+ }
+ auto* ty = expr->Type();
+ if (!ty->IsAnyOf<sem::I32, sem::U32, sem::AbstractInt>()) {
+ AddError(kErrBadType, value->source);
+ return false;
+ }
- any_i32 = any_i32 || is_i32;
- any_u32 = any_u32 || is_u32;
- if (any_i32 && any_u32) {
- AddError(kErrInconsistentType, expr->source);
- return false;
+ args[i] = expr;
+ arg_tys[i] = ty;
+ arg_count++;
}
- sem::Constant value;
-
- if (auto* user = Sem(expr)->As<sem::VariableUser>()) {
- // We have an variable of a module-scope constant.
- auto* decl = user->Variable()->Declaration();
- if (!decl->is_const) {
- AddError(kErrBadType, expr->source);
+ auto* common_ty = sem::Type::Common(arg_tys.data(), arg_count);
+ if (!common_ty) {
+ AddError("workgroup_size arguments must be of the same type, either i32 or u32",
+ attr->source);
return false;
- }
- // Capture the constant if it is pipeline-overridable.
- if (decl->is_overridable) {
- ws[i].overridable_const = decl;
- }
-
- if (decl->constructor) {
- value = Sem(decl->constructor)->ConstantValue();
- } else {
- // No constructor means this value must be overriden by the user.
- ws[i].value = 0;
- continue;
- }
- } else if (expr->Is<ast::LiteralExpression>()) {
- value = Sem(expr)->ConstantValue();
- } else {
- AddError(
- "workgroup_size argument must be either a literal or a "
- "module-scope constant",
- values[i]->source);
- return false;
}
- if (!value) {
- TINT_ICE(Resolver, diagnostics_)
- << "could not resolve constant workgroup_size constant value";
- continue;
- }
- // Validate and set the default value for this dimension.
- if (is_i32 ? value.Elements()[0].i32 < 1 : value.Elements()[0].u32 < 1) {
- AddError("workgroup_size argument must be at least 1", values[i]->source);
- return false;
+ // If all arguments are abstract-integers, then materialize to i32.
+ if (common_ty->Is<sem::AbstractInt>()) {
+ common_ty = builder_->create<sem::I32>();
}
- ws[i].value = is_i32 ? static_cast<uint32_t>(value.Elements()[0].i32)
- : value.Elements()[0].u32;
- }
+ for (size_t i = 0; i < arg_count; i++) {
+ auto* materialized = Materialize(args[i], common_ty);
+ if (!materialized) {
+ return false;
+ }
+
+ sem::Constant value;
+
+ if (auto* user = args[i]->As<sem::VariableUser>()) {
+ // We have an variable of a module-scope constant.
+ auto* decl = user->Variable()->Declaration();
+ if (!decl->is_const) {
+ AddError(kErrBadType, values[i]->source);
+ return false;
+ }
+ // Capture the constant if it is pipeline-overridable.
+ if (decl->is_overridable) {
+ ws[i].overridable_const = decl;
+ }
+
+ if (decl->constructor) {
+ value = sem_.Get(decl->constructor)->ConstantValue();
+ } else {
+ // No constructor means this value must be overriden by the user.
+ ws[i].value = 0;
+ continue;
+ }
+ } else if (values[i]->Is<ast::LiteralExpression>()) {
+ value = materialized->ConstantValue();
+ } else {
+ AddError(
+ "workgroup_size argument must be either a literal or a "
+ "module-scope constant",
+ values[i]->source);
+ return false;
+ }
+
+ if (!value) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "could not resolve constant workgroup_size constant value";
+ continue;
+ }
+ // validator_.Validate and set the default value for this dimension.
+ if (value.Element<AInt>(0).value < 1) {
+ AddError("workgroup_size argument must be at least 1", values[i]->source);
+ return false;
+ }
+
+ ws[i].value = value.Element<uint32_t>(0);
+ }
- current_function_->SetWorkgroupSize(std::move(ws));
- return true;
+ current_function_->SetWorkgroupSize(std::move(ws));
+ return true;
}
bool Resolver::Statements(const ast::StatementList& stmts) {
- sem::Behaviors behaviors{sem::Behavior::kNext};
+ sem::Behaviors behaviors{sem::Behavior::kNext};
- bool reachable = true;
- for (auto* stmt : stmts) {
- Mark(stmt);
- auto* sem = Statement(stmt);
- if (!sem) {
- return false;
- }
- // s1 s2:(B1∖{Next}) ∪ B2
- sem->SetIsReachable(reachable);
- if (reachable) {
- behaviors = (behaviors - sem::Behavior::kNext) + sem->Behaviors();
+ bool reachable = true;
+ for (auto* stmt : stmts) {
+ Mark(stmt);
+ auto* sem = Statement(stmt);
+ if (!sem) {
+ return false;
+ }
+ // s1 s2:(B1∖{Next}) ∪ B2
+ sem->SetIsReachable(reachable);
+ if (reachable) {
+ behaviors = (behaviors - sem::Behavior::kNext) + sem->Behaviors();
+ }
+ reachable = reachable && sem->Behaviors().Contains(sem::Behavior::kNext);
}
- reachable = reachable && sem->Behaviors().Contains(sem::Behavior::kNext);
- }
- current_statement_->Behaviors() = behaviors;
+ current_statement_->Behaviors() = behaviors;
- if (!ValidateStatements(stmts)) {
- return false;
- }
+ if (!validator_.Statements(stmts)) {
+ return false;
+ }
- return true;
+ return true;
}
sem::Statement* Resolver::Statement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- // Compound statements. These create their own sem::CompoundStatement
- // bindings.
- [&](const ast::BlockStatement* b) { return BlockStatement(b); },
- [&](const ast::ForLoopStatement* l) { return ForLoopStatement(l); },
- [&](const ast::LoopStatement* l) { return LoopStatement(l); },
- [&](const ast::IfStatement* i) { return IfStatement(i); },
- [&](const ast::SwitchStatement* s) { return SwitchStatement(s); },
-
- // Non-Compound statements
- [&](const ast::AssignmentStatement* a) { return AssignmentStatement(a); },
- [&](const ast::BreakStatement* b) { return BreakStatement(b); },
- [&](const ast::CallStatement* c) { return CallStatement(c); },
- [&](const ast::CompoundAssignmentStatement* c) {
- return CompoundAssignmentStatement(c);
- },
- [&](const ast::ContinueStatement* c) { return ContinueStatement(c); },
- [&](const ast::DiscardStatement* d) { return DiscardStatement(d); },
- [&](const ast::FallthroughStatement* f) {
- return FallthroughStatement(f);
- },
- [&](const ast::IncrementDecrementStatement* i) {
- return IncrementDecrementStatement(i);
- },
- [&](const ast::ReturnStatement* r) { return ReturnStatement(r); },
- [&](const ast::VariableDeclStatement* v) {
- return VariableDeclStatement(v);
- },
-
- // Error cases
- [&](const ast::CaseStatement*) {
- AddError("case statement can only be used inside a switch statement",
- stmt->source);
- return nullptr;
- },
- [&](const ast::ElseStatement*) {
- TINT_ICE(Resolver, diagnostics_)
- << "Resolver::Statement() encountered an Else statement. Else "
- "statements are embedded in If statements, so should never be "
- "encountered as top-level statements";
- return nullptr;
- },
- [&](Default) {
- AddError(
- "unknown statement type: " + std::string(stmt->TypeInfo().name),
- stmt->source);
- return nullptr;
- });
+ return Switch(
+ stmt,
+ // Compound statements. These create their own sem::CompoundStatement
+ // bindings.
+ [&](const ast::BlockStatement* b) { return BlockStatement(b); },
+ [&](const ast::ForLoopStatement* l) { return ForLoopStatement(l); },
+ [&](const ast::LoopStatement* l) { return LoopStatement(l); },
+ [&](const ast::IfStatement* i) { return IfStatement(i); },
+ [&](const ast::SwitchStatement* s) { return SwitchStatement(s); },
+
+ // Non-Compound statements
+ [&](const ast::AssignmentStatement* a) { return AssignmentStatement(a); },
+ [&](const ast::BreakStatement* b) { return BreakStatement(b); },
+ [&](const ast::CallStatement* c) { return CallStatement(c); },
+ [&](const ast::CompoundAssignmentStatement* c) { return CompoundAssignmentStatement(c); },
+ [&](const ast::ContinueStatement* c) { return ContinueStatement(c); },
+ [&](const ast::DiscardStatement* d) { return DiscardStatement(d); },
+ [&](const ast::FallthroughStatement* f) { return FallthroughStatement(f); },
+ [&](const ast::IncrementDecrementStatement* i) { return IncrementDecrementStatement(i); },
+ [&](const ast::ReturnStatement* r) { return ReturnStatement(r); },
+ [&](const ast::VariableDeclStatement* v) { return VariableDeclStatement(v); },
+
+ // Error cases
+ [&](const ast::CaseStatement*) {
+ AddError("case statement can only be used inside a switch statement", stmt->source);
+ return nullptr;
+ },
+ [&](Default) {
+ AddError("unknown statement type: " + std::string(stmt->TypeInfo().name), stmt->source);
+ return nullptr;
+ });
}
sem::CaseStatement* Resolver::CaseStatement(const ast::CaseStatement* stmt) {
- auto* sem = builder_->create<sem::CaseStatement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- for (auto* sel : stmt->selectors) {
- Mark(sel);
- }
- Mark(stmt->body);
- auto* body = BlockStatement(stmt->body);
- if (!body) {
- return false;
- }
- sem->SetBlock(body);
- sem->Behaviors() = body->Behaviors();
- return true;
- });
+ auto* sem =
+ builder_->create<sem::CaseStatement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ sem->Selectors().reserve(stmt->selectors.size());
+ for (auto* sel : stmt->selectors) {
+ auto* expr = Expression(sel);
+ if (!expr) {
+ return false;
+ }
+ sem->Selectors().emplace_back(expr);
+ }
+ Mark(stmt->body);
+ auto* body = BlockStatement(stmt->body);
+ if (!body) {
+ return false;
+ }
+ sem->SetBlock(body);
+ sem->Behaviors() = body->Behaviors();
+ return true;
+ });
}
sem::IfStatement* Resolver::IfStatement(const ast::IfStatement* stmt) {
- auto* sem = builder_->create<sem::IfStatement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto* cond = Expression(stmt->condition);
- if (!cond) {
- return false;
- }
- sem->SetCondition(cond);
- sem->Behaviors() = cond->Behaviors();
- sem->Behaviors().Remove(sem::Behavior::kNext);
-
- Mark(stmt->body);
- auto* body = builder_->create<sem::BlockStatement>(
- stmt->body, current_compound_statement_, current_function_);
- if (!StatementScope(stmt->body, body,
- [&] { return Statements(stmt->body->statements); })) {
- return false;
- }
- sem->Behaviors().Add(body->Behaviors());
-
- for (auto* else_stmt : stmt->else_statements) {
- Mark(else_stmt);
- auto* else_sem = ElseStatement(else_stmt);
- if (!else_sem) {
- return false;
- }
- sem->Behaviors().Add(else_sem->Behaviors());
- }
-
- if (stmt->else_statements.empty() ||
- stmt->else_statements.back()->condition != nullptr) {
- // https://www.w3.org/TR/WGSL/#behaviors-rules
- // if statements without an else branch are treated as if they had an
- // empty else branch (which adds Next to their behavior)
- sem->Behaviors().Add(sem::Behavior::kNext);
- }
+ auto* sem =
+ builder_->create<sem::IfStatement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto* cond = Expression(stmt->condition);
+ if (!cond) {
+ return false;
+ }
+ sem->SetCondition(cond);
+ sem->Behaviors() = cond->Behaviors();
+ sem->Behaviors().Remove(sem::Behavior::kNext);
+
+ Mark(stmt->body);
+ auto* body = builder_->create<sem::BlockStatement>(stmt->body, current_compound_statement_,
+ current_function_);
+ if (!StatementScope(stmt->body, body, [&] { return Statements(stmt->body->statements); })) {
+ return false;
+ }
+ sem->Behaviors().Add(body->Behaviors());
- return ValidateIfStatement(sem);
- });
-}
+ if (stmt->else_statement) {
+ Mark(stmt->else_statement);
+ auto* else_sem = Statement(stmt->else_statement);
+ if (!else_sem) {
+ return false;
+ }
+ sem->Behaviors().Add(else_sem->Behaviors());
+ } else {
+ // https://www.w3.org/TR/WGSL/#behaviors-rules
+ // if statements without an else branch are treated as if they had an
+ // empty else branch (which adds Next to their behavior)
+ sem->Behaviors().Add(sem::Behavior::kNext);
+ }
-sem::ElseStatement* Resolver::ElseStatement(const ast::ElseStatement* stmt) {
- auto* sem = builder_->create<sem::ElseStatement>(
- stmt, current_compound_statement_->As<sem::IfStatement>(),
- current_function_);
- return StatementScope(stmt, sem, [&] {
- if (auto* cond_expr = stmt->condition) {
- auto* cond = Expression(cond_expr);
- if (!cond) {
- return false;
- }
- sem->SetCondition(cond);
- // https://www.w3.org/TR/WGSL/#behaviors-rules
- // if statements with else if branches are treated as if they were nested
- // simple if/else statements
- sem->Behaviors() = cond->Behaviors();
- }
- sem->Behaviors().Remove(sem::Behavior::kNext);
-
- Mark(stmt->body);
- auto* body = builder_->create<sem::BlockStatement>(
- stmt->body, current_compound_statement_, current_function_);
- if (!StatementScope(stmt->body, body,
- [&] { return Statements(stmt->body->statements); })) {
- return false;
- }
- sem->Behaviors().Add(body->Behaviors());
-
- return ValidateElseStatement(sem);
- });
+ return validator_.IfStatement(sem);
+ });
}
sem::BlockStatement* Resolver::BlockStatement(const ast::BlockStatement* stmt) {
- auto* sem = builder_->create<sem::BlockStatement>(
- stmt->As<ast::BlockStatement>(), current_compound_statement_,
- current_function_);
- return StatementScope(stmt, sem,
- [&] { return Statements(stmt->statements); });
+ auto* sem = builder_->create<sem::BlockStatement>(
+ stmt->As<ast::BlockStatement>(), current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] { return Statements(stmt->statements); });
}
sem::LoopStatement* Resolver::LoopStatement(const ast::LoopStatement* stmt) {
- auto* sem = builder_->create<sem::LoopStatement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- Mark(stmt->body);
-
- auto* body = builder_->create<sem::LoopBlockStatement>(
- stmt->body, current_compound_statement_, current_function_);
- return StatementScope(stmt->body, body, [&] {
- if (!Statements(stmt->body->statements)) {
- return false;
- }
- auto& behaviors = sem->Behaviors();
- behaviors = body->Behaviors();
-
- if (stmt->continuing) {
- Mark(stmt->continuing);
- if (!stmt->continuing->Empty()) {
- auto* continuing = StatementScope(
- stmt->continuing,
- builder_->create<sem::LoopContinuingBlockStatement>(
- stmt->continuing, current_compound_statement_,
- current_function_),
- [&] { return Statements(stmt->continuing->statements); });
- if (!continuing) {
+ auto* sem =
+ builder_->create<sem::LoopStatement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ Mark(stmt->body);
+
+ auto* body = builder_->create<sem::LoopBlockStatement>(
+ stmt->body, current_compound_statement_, current_function_);
+ return StatementScope(stmt->body, body, [&] {
+ if (!Statements(stmt->body->statements)) {
+ return false;
+ }
+ auto& behaviors = sem->Behaviors();
+ behaviors = body->Behaviors();
+
+ if (stmt->continuing) {
+ Mark(stmt->continuing);
+ auto* continuing = StatementScope(
+ stmt->continuing,
+ builder_->create<sem::LoopContinuingBlockStatement>(
+ stmt->continuing, current_compound_statement_, current_function_),
+ [&] { return Statements(stmt->continuing->statements); });
+ if (!continuing) {
+ return false;
+ }
+ behaviors.Add(continuing->Behaviors());
+ }
+
+ if (behaviors.Contains(sem::Behavior::kBreak)) { // Does the loop exit?
+ behaviors.Add(sem::Behavior::kNext);
+ } else {
+ behaviors.Remove(sem::Behavior::kNext);
+ }
+ behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kContinue);
+
+ return validator_.LoopStatement(sem);
+ });
+ });
+}
+
+sem::ForLoopStatement* Resolver::ForLoopStatement(const ast::ForLoopStatement* stmt) {
+ auto* sem = builder_->create<sem::ForLoopStatement>(stmt, current_compound_statement_,
+ current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto& behaviors = sem->Behaviors();
+ if (auto* initializer = stmt->initializer) {
+ Mark(initializer);
+ auto* init = Statement(initializer);
+ if (!init) {
+ return false;
+ }
+ behaviors.Add(init->Behaviors());
+ }
+
+ if (auto* cond_expr = stmt->condition) {
+ auto* cond = Expression(cond_expr);
+ if (!cond) {
+ return false;
+ }
+ sem->SetCondition(cond);
+ behaviors.Add(cond->Behaviors());
+ }
+
+ if (auto* continuing = stmt->continuing) {
+ Mark(continuing);
+ auto* cont = Statement(continuing);
+ if (!cont) {
+ return false;
+ }
+ behaviors.Add(cont->Behaviors());
+ }
+
+ Mark(stmt->body);
+
+ auto* body = builder_->create<sem::LoopBlockStatement>(
+ stmt->body, current_compound_statement_, current_function_);
+ if (!StatementScope(stmt->body, body, [&] { return Statements(stmt->body->statements); })) {
return false;
- }
- behaviors.Add(continuing->Behaviors());
}
- }
- if (behaviors.Contains(sem::Behavior::kBreak)) { // Does the loop exit?
- behaviors.Add(sem::Behavior::kNext);
- } else {
- behaviors.Remove(sem::Behavior::kNext);
- }
- behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kContinue);
+ behaviors.Add(body->Behaviors());
+ if (stmt->condition || behaviors.Contains(sem::Behavior::kBreak)) { // Does the loop exit?
+ behaviors.Add(sem::Behavior::kNext);
+ } else {
+ behaviors.Remove(sem::Behavior::kNext);
+ }
+ behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kContinue);
- return ValidateLoopStatement(sem);
+ return validator_.ForLoopStatement(sem);
});
- });
}
-sem::ForLoopStatement* Resolver::ForLoopStatement(
- const ast::ForLoopStatement* stmt) {
- auto* sem = builder_->create<sem::ForLoopStatement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto& behaviors = sem->Behaviors();
- if (auto* initializer = stmt->initializer) {
- Mark(initializer);
- auto* init = Statement(initializer);
- if (!init) {
- return false;
- }
- behaviors.Add(init->Behaviors());
+sem::Expression* Resolver::Expression(const ast::Expression* root) {
+ std::vector<const ast::Expression*> sorted;
+ constexpr size_t kMaxExpressionDepth = 512U;
+ bool failed = false;
+ if (!ast::TraverseExpressions<ast::TraverseOrder::RightToLeft>(
+ root, diagnostics_, [&](const ast::Expression* expr, size_t depth) {
+ if (depth > kMaxExpressionDepth) {
+ AddError(
+ "reached max expression depth of " + std::to_string(kMaxExpressionDepth),
+ expr->source);
+ failed = true;
+ return ast::TraverseAction::Stop;
+ }
+ if (!Mark(expr)) {
+ failed = true;
+ return ast::TraverseAction::Stop;
+ }
+ sorted.emplace_back(expr);
+ return ast::TraverseAction::Descend;
+ })) {
+ return nullptr;
}
- if (auto* cond_expr = stmt->condition) {
- auto* cond = Expression(cond_expr);
- if (!cond) {
- return false;
- }
- sem->SetCondition(cond);
- behaviors.Add(cond->Behaviors());
+ if (failed) {
+ return nullptr;
}
- if (auto* continuing = stmt->continuing) {
- Mark(continuing);
- auto* cont = Statement(continuing);
- if (!cont) {
- return false;
- }
- behaviors.Add(cont->Behaviors());
+ for (auto* expr : utils::Reverse(sorted)) {
+ auto* sem_expr = Switch(
+ expr,
+ [&](const ast::IndexAccessorExpression* array) -> sem::Expression* {
+ return IndexAccessor(array);
+ },
+ [&](const ast::BinaryExpression* bin_op) -> sem::Expression* { return Binary(bin_op); },
+ [&](const ast::BitcastExpression* bitcast) -> sem::Expression* {
+ return Bitcast(bitcast);
+ },
+ [&](const ast::CallExpression* call) -> sem::Expression* { return Call(call); },
+ [&](const ast::IdentifierExpression* ident) -> sem::Expression* {
+ return Identifier(ident);
+ },
+ [&](const ast::LiteralExpression* literal) -> sem::Expression* {
+ return Literal(literal);
+ },
+ [&](const ast::MemberAccessorExpression* member) -> sem::Expression* {
+ return MemberAccessor(member);
+ },
+ [&](const ast::UnaryOpExpression* unary) -> sem::Expression* { return UnaryOp(unary); },
+ [&](const ast::PhonyExpression*) -> sem::Expression* {
+ return builder_->create<sem::Expression>(expr, builder_->create<sem::Void>(),
+ current_statement_, sem::Constant{},
+ /* has_side_effects */ false);
+ },
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unhandled expression type: " << expr->TypeInfo().name;
+ return nullptr;
+ });
+ if (!sem_expr) {
+ return nullptr;
+ }
+
+ builder_->Sem().Add(expr, sem_expr);
+ if (expr == root) {
+ return sem_expr;
+ }
}
- Mark(stmt->body);
+ TINT_ICE(Resolver, diagnostics_) << "Expression() did not find root node";
+ return nullptr;
+}
- auto* body = builder_->create<sem::LoopBlockStatement>(
- stmt->body, current_compound_statement_, current_function_);
- if (!StatementScope(stmt->body, body,
- [&] { return Statements(stmt->body->statements); })) {
- return false;
+const sem::Expression* Resolver::Materialize(const sem::Expression* expr,
+ const sem::Type* target_type /* = nullptr */) {
+ if (!expr) {
+ return nullptr; // Allow for Materialize(Expression(blah))
}
- behaviors.Add(body->Behaviors());
- if (stmt->condition ||
- behaviors.Contains(sem::Behavior::kBreak)) { // Does the loop exit?
- behaviors.Add(sem::Behavior::kNext);
- } else {
- behaviors.Remove(sem::Behavior::kNext);
- }
- behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kContinue);
+ // Helper for actually creating the the materialize node, performing the constant cast, updating
+ // the ast -> sem binding, and performing validation.
+ auto materialize = [&](const sem::Type* target_ty) -> sem::Materialize* {
+ auto* decl = expr->Declaration();
+ auto expr_val = EvaluateConstantValue(decl, expr->Type());
+ if (!expr_val) {
+ return nullptr;
+ }
+ if (!expr_val->IsValid()) {
+ TINT_ICE(Resolver, builder_->Diagnostics())
+ << decl->source
+ << "EvaluateConstantValue() returned invalid value for materialized value of type: "
+ << builder_->FriendlyName(expr->Type());
+ return nullptr;
+ }
+ auto materialized_val = ConvertValue(expr_val.Get(), target_ty, decl->source);
+ if (!materialized_val) {
+ return nullptr;
+ }
+ if (!materialized_val->IsValid()) {
+ TINT_ICE(Resolver, builder_->Diagnostics())
+ << decl->source << "ConvertValue(" << builder_->FriendlyName(expr_val->Type())
+ << " -> " << builder_->FriendlyName(target_ty) << ") returned invalid value";
+ return nullptr;
+ }
+ auto* m =
+ builder_->create<sem::Materialize>(expr, current_statement_, materialized_val.Get());
+ m->Behaviors() = expr->Behaviors();
+ builder_->Sem().Replace(decl, m);
+ return validator_.Materialize(m) ? m : nullptr;
+ };
- return ValidateForLoopStatement(sem);
- });
+ // Helpers for constructing semantic types
+ auto i32 = [&] { return builder_->create<sem::I32>(); };
+ auto f32 = [&] { return builder_->create<sem::F32>(); };
+ auto i32v = [&](uint32_t width) { return builder_->create<sem::Vector>(i32(), width); };
+ auto f32v = [&](uint32_t width) { return builder_->create<sem::Vector>(f32(), width); };
+ auto f32m = [&](uint32_t columns, uint32_t rows) {
+ return builder_->create<sem::Matrix>(f32v(rows), columns);
+ };
+
+ // Type dispatch based on the expression type
+ return Switch<sem::Expression*>(
+ expr->Type(), //
+ [&](const sem::AbstractInt*) { return materialize(target_type ? target_type : i32()); },
+ [&](const sem::AbstractFloat*) { return materialize(target_type ? target_type : f32()); },
+ [&](const sem::Vector* v) {
+ return Switch(
+ v->type(), //
+ [&](const sem::AbstractInt*) {
+ return materialize(target_type ? target_type : i32v(v->Width()));
+ },
+ [&](const sem::AbstractFloat*) {
+ return materialize(target_type ? target_type : f32v(v->Width()));
+ },
+ [&](Default) { return expr; });
+ },
+ [&](const sem::Matrix* m) {
+ return Switch(
+ m->type(), //
+ [&](const sem::AbstractFloat*) {
+ return materialize(target_type ? target_type : f32m(m->columns(), m->rows()));
+ },
+ [&](Default) { return expr; });
+ },
+ [&](Default) { return expr; });
}
-sem::Expression* Resolver::Expression(const ast::Expression* root) {
- std::vector<const ast::Expression*> sorted;
- bool mark_failed = false;
- if (!ast::TraverseExpressions<ast::TraverseOrder::RightToLeft>(
- root, diagnostics_, [&](const ast::Expression* expr) {
- if (!Mark(expr)) {
- mark_failed = true;
- return ast::TraverseAction::Stop;
+bool Resolver::MaterializeArguments(std::vector<const sem::Expression*>& args,
+ const sem::CallTarget* target) {
+ for (size_t i = 0, n = std::min(args.size(), target->Parameters().size()); i < n; i++) {
+ const auto* param_ty = target->Parameters()[i]->Type();
+ if (ShouldMaterializeArgument(param_ty)) {
+ auto* materialized = Materialize(args[i], param_ty);
+ if (!materialized) {
+ return false;
}
- sorted.emplace_back(expr);
- return ast::TraverseAction::Descend;
- })) {
- return nullptr;
- }
+ args[i] = materialized;
+ }
+ }
+ return true;
+}
- if (mark_failed) {
- return nullptr;
- }
+bool Resolver::ShouldMaterializeArgument(const sem::Type* parameter_ty) const {
+ const auto* param_el_ty = sem::Type::ElementOf(parameter_ty);
+ return param_el_ty && !param_el_ty->Is<sem::AbstractNumeric>();
+}
- for (auto* expr : utils::Reverse(sorted)) {
- auto* sem_expr = Switch(
- expr,
- [&](const ast::IndexAccessorExpression* array) -> sem::Expression* {
- return IndexAccessor(array);
- },
- [&](const ast::BinaryExpression* bin_op) -> sem::Expression* {
- return Binary(bin_op);
- },
- [&](const ast::BitcastExpression* bitcast) -> sem::Expression* {
- return Bitcast(bitcast);
- },
- [&](const ast::CallExpression* call) -> sem::Expression* {
- return Call(call);
- },
- [&](const ast::IdentifierExpression* ident) -> sem::Expression* {
- return Identifier(ident);
- },
- [&](const ast::LiteralExpression* literal) -> sem::Expression* {
- return Literal(literal);
- },
- [&](const ast::MemberAccessorExpression* member) -> sem::Expression* {
- return MemberAccessor(member);
- },
- [&](const ast::UnaryOpExpression* unary) -> sem::Expression* {
- return UnaryOp(unary);
- },
- [&](const ast::PhonyExpression*) -> sem::Expression* {
- return builder_->create<sem::Expression>(
- expr, builder_->create<sem::Void>(), current_statement_,
- sem::Constant{}, /* has_side_effects */ false);
+sem::Expression* Resolver::IndexAccessor(const ast::IndexAccessorExpression* expr) {
+ auto* idx = Materialize(sem_.Get(expr->index));
+ if (!idx) {
+ return nullptr;
+ }
+ auto* obj = sem_.Get(expr->object);
+ auto* obj_raw_ty = obj->Type();
+ auto* obj_ty = obj_raw_ty->UnwrapRef();
+ auto* ty = Switch(
+ obj_ty, //
+ [&](const sem::Array* arr) { return arr->ElemType(); },
+ [&](const sem::Vector* vec) { return vec->type(); },
+ [&](const sem::Matrix* mat) {
+ return builder_->create<sem::Vector>(mat->type(), mat->rows());
},
[&](Default) {
- TINT_ICE(Resolver, diagnostics_)
- << "unhandled expression type: " << expr->TypeInfo().name;
- return nullptr;
+ AddError("cannot index type '" + sem_.TypeNameOf(obj_ty) + "'", expr->source);
+ return nullptr;
});
- if (!sem_expr) {
- return nullptr;
+ if (ty == nullptr) {
+ return nullptr;
}
- builder_->Sem().Add(expr, sem_expr);
- if (expr == root) {
- return sem_expr;
+ auto* idx_ty = idx->Type()->UnwrapRef();
+ if (!idx_ty->IsAnyOf<sem::I32, sem::U32>()) {
+ AddError("index must be of type 'i32' or 'u32', found: '" + sem_.TypeNameOf(idx_ty) + "'",
+ idx->Declaration()->source);
+ return nullptr;
}
- }
- TINT_ICE(Resolver, diagnostics_) << "Expression() did not find root node";
- return nullptr;
-}
+ // If we're extracting from a reference, we return a reference.
+ if (auto* ref = obj_raw_ty->As<sem::Reference>()) {
+ ty = builder_->create<sem::Reference>(ty, ref->StorageClass(), ref->Access());
+ }
-sem::Expression* Resolver::IndexAccessor(
- const ast::IndexAccessorExpression* expr) {
- auto* idx = Sem(expr->index);
- auto* obj = Sem(expr->object);
- auto* obj_raw_ty = obj->Type();
- auto* obj_ty = obj_raw_ty->UnwrapRef();
- auto* ty = Switch(
- obj_ty, //
- [&](const sem::Array* arr) { return arr->ElemType(); },
- [&](const sem::Vector* vec) { return vec->type(); },
- [&](const sem::Matrix* mat) {
- return builder_->create<sem::Vector>(mat->type(), mat->rows());
- },
- [&](Default) {
- AddError("cannot index type '" + TypeNameOf(obj_ty) + "'",
- expr->source);
+ auto val = EvaluateConstantValue(expr, ty);
+ if (!val) {
return nullptr;
- });
- if (ty == nullptr) {
- return nullptr;
- }
-
- auto* idx_ty = idx->Type()->UnwrapRef();
- if (!idx_ty->IsAnyOf<sem::I32, sem::U32>()) {
- AddError("index must be of type 'i32' or 'u32', found: '" +
- TypeNameOf(idx_ty) + "'",
- idx->Declaration()->source);
- return nullptr;
- }
-
- // If we're extracting from a reference, we return a reference.
- if (auto* ref = obj_raw_ty->As<sem::Reference>()) {
- ty = builder_->create<sem::Reference>(ty, ref->StorageClass(),
- ref->Access());
- }
-
- auto val = EvaluateConstantValue(expr, ty);
- bool has_side_effects = idx->HasSideEffects() || obj->HasSideEffects();
- auto* sem = builder_->create<sem::Expression>(expr, ty, current_statement_,
- val, has_side_effects);
- sem->Behaviors() = idx->Behaviors() + obj->Behaviors();
- return sem;
+ }
+ bool has_side_effects = idx->HasSideEffects() || obj->HasSideEffects();
+ auto* sem = builder_->create<sem::Expression>(expr, ty, current_statement_, val.Get(),
+ has_side_effects, obj->SourceVariable());
+ sem->Behaviors() = idx->Behaviors() + obj->Behaviors();
+ return sem;
}
sem::Expression* Resolver::Bitcast(const ast::BitcastExpression* expr) {
- auto* inner = Sem(expr->expr);
- auto* ty = Type(expr->type);
- if (!ty) {
- return nullptr;
- }
+ auto* inner = Materialize(sem_.Get(expr->expr));
+ if (!inner) {
+ return nullptr;
+ }
+ auto* ty = Type(expr->type);
+ if (!ty) {
+ return nullptr;
+ }
- auto val = EvaluateConstantValue(expr, ty);
- auto* sem = builder_->create<sem::Expression>(expr, ty, current_statement_,
- val, inner->HasSideEffects());
+ auto val = EvaluateConstantValue(expr, ty);
+ if (!val) {
+ return nullptr;
+ }
+ auto* sem = builder_->create<sem::Expression>(expr, ty, current_statement_, val.Get(),
+ inner->HasSideEffects());
- sem->Behaviors() = inner->Behaviors();
+ sem->Behaviors() = inner->Behaviors();
- if (!ValidateBitcast(expr, ty)) {
- return nullptr;
- }
+ if (!validator_.Bitcast(expr, ty)) {
+ return nullptr;
+ }
- return sem;
+ return sem;
}
sem::Call* Resolver::Call(const ast::CallExpression* expr) {
- std::vector<const sem::Expression*> args(expr->args.size());
- std::vector<const sem::Type*> arg_tys(args.size());
- sem::Behaviors arg_behaviors;
-
- // The element type of all the arguments. Nullptr if argument types are
- // different.
- const sem::Type* arg_el_ty = nullptr;
-
- for (size_t i = 0; i < expr->args.size(); i++) {
- auto* arg = Sem(expr->args[i]);
- if (!arg) {
- return nullptr;
- }
- args[i] = arg;
- arg_tys[i] = args[i]->Type();
- arg_behaviors.Add(arg->Behaviors());
-
- // Determine the common argument element type
- auto* el_ty = arg_tys[i]->UnwrapRef();
- if (auto* vec = el_ty->As<sem::Vector>()) {
- el_ty = vec->type();
- } else if (auto* mat = el_ty->As<sem::Matrix>()) {
- el_ty = mat->type();
- }
- if (i == 0) {
- arg_el_ty = el_ty;
- } else if (arg_el_ty != el_ty) {
- arg_el_ty = nullptr;
- }
- }
-
- arg_behaviors.Remove(sem::Behavior::kNext);
-
- auto type_ctor_or_conv = [&](const sem::Type* ty) -> sem::Call* {
- // The call has resolved to a type constructor or cast.
- if (args.size() == 1) {
- auto* target = ty;
- auto* source = args[0]->Type()->UnwrapRef();
- if ((source != target) && //
- ((source->is_scalar() && target->is_scalar()) ||
- (source->Is<sem::Vector>() && target->Is<sem::Vector>()) ||
- (source->Is<sem::Matrix>() && target->Is<sem::Matrix>()))) {
- // Note: Matrix types currently cannot be converted (the element type
- // must only be f32). We implement this for the day we support other
- // matrix element types.
- return TypeConversion(expr, ty, args[0], arg_tys[0]);
- }
- }
- return TypeConstructor(expr, ty, std::move(args), std::move(arg_tys));
- };
-
- // Resolve the target of the CallExpression to determine whether this is a
- // function call, cast or type constructor expression.
- if (expr->target.type) {
- const sem::Type* ty = nullptr;
-
- auto err_cannot_infer_el_ty = [&](std::string name) {
- AddError(
- "cannot infer " + name +
- " element type, as constructor arguments have different types",
- expr->source);
- for (size_t i = 0; i < args.size(); i++) {
- auto* arg = args[i];
- AddNote("argument " + std::to_string(i) + " has type " +
- arg->Type()->FriendlyName(builder_->Symbols()),
- arg->Declaration()->source);
- }
- };
-
- if (!expr->args.empty()) {
- // vecN() without explicit element type?
- // Try to infer element type from args
- if (auto* vec = expr->target.type->As<ast::Vector>()) {
- if (!vec->type) {
- if (!arg_el_ty) {
- err_cannot_infer_el_ty("vector");
+ // A CallExpression can resolve to one of:
+ // * A function call.
+ // * A builtin call.
+ // * A type constructor.
+ // * A type conversion.
+
+ // Resolve all of the arguments, their types and the set of behaviors.
+ std::vector<const sem::Expression*> args(expr->args.size());
+ sem::Behaviors arg_behaviors;
+ for (size_t i = 0; i < expr->args.size(); i++) {
+ auto* arg = sem_.Get(expr->args[i]);
+ if (!arg) {
return nullptr;
- }
+ }
+ args[i] = arg;
+ arg_behaviors.Add(arg->Behaviors());
+ }
+ arg_behaviors.Remove(sem::Behavior::kNext);
+
+ // Did any arguments have side effects?
+ bool has_side_effects =
+ std::any_of(args.begin(), args.end(), [](auto* e) { return e->HasSideEffects(); });
- Mark(vec);
- auto* v = builder_->create<sem::Vector>(
- arg_el_ty, static_cast<uint32_t>(vec->width));
- if (!ValidateVector(v, vec->source)) {
+ // ct_ctor_or_conv is a helper for building either a sem::TypeConstructor or sem::TypeConversion
+ // call for a CtorConvIntrinsic with an optional template argument type.
+ auto ct_ctor_or_conv = [&](CtorConvIntrinsic ty, const sem::Type* template_arg) -> sem::Call* {
+ auto arg_tys = utils::Transform(args, [](auto* arg) { return arg->Type(); });
+ auto* call_target = intrinsic_table_->Lookup(ty, template_arg, arg_tys, expr->source);
+ if (!call_target) {
return nullptr;
- }
- builder_->Sem().Add(vec, v);
- ty = v;
- }
- }
-
- // matNxM() without explicit element type?
- // Try to infer element type from args
- if (auto* mat = expr->target.type->As<ast::Matrix>()) {
- if (!mat->type) {
- if (!arg_el_ty) {
- err_cannot_infer_el_ty("matrix");
+ }
+ if (!MaterializeArguments(args, call_target)) {
return nullptr;
- }
-
- Mark(mat);
- auto* column_type =
- builder_->create<sem::Vector>(arg_el_ty, mat->rows);
- auto* m = builder_->create<sem::Matrix>(column_type, mat->columns);
- if (!ValidateMatrix(m, mat->source)) {
+ }
+ auto val = EvaluateConstantValue(expr, call_target->ReturnType());
+ if (!val) {
return nullptr;
- }
- builder_->Sem().Add(mat, m);
- ty = m;
}
- }
+ return builder_->create<sem::Call>(expr, call_target, std::move(args), current_statement_,
+ val.Get(), has_side_effects);
+ };
+
+ // ct_ctor_or_conv is a helper for building either a sem::TypeConstructor or sem::TypeConversion
+ // call for the given semantic type.
+ auto ty_ctor_or_conv = [&](const sem::Type* ty) {
+ return Switch(
+ ty, //
+ [&](const sem::Vector* v) {
+ return ct_ctor_or_conv(VectorCtorConvIntrinsic(v->Width()), v->type());
+ },
+ [&](const sem::Matrix* m) {
+ return ct_ctor_or_conv(MatrixCtorConvIntrinsic(m->columns(), m->rows()), m->type());
+ },
+ [&](const sem::I32*) { return ct_ctor_or_conv(CtorConvIntrinsic::kI32, nullptr); },
+ [&](const sem::U32*) { return ct_ctor_or_conv(CtorConvIntrinsic::kU32, nullptr); },
+ [&](const sem::F32*) { return ct_ctor_or_conv(CtorConvIntrinsic::kF32, nullptr); },
+ [&](const sem::Bool*) { return ct_ctor_or_conv(CtorConvIntrinsic::kBool, nullptr); },
+ [&](const sem::Array* arr) -> sem::Call* {
+ auto* call_target = utils::GetOrCreate(
+ array_ctors_, ArrayConstructorSig{{arr, args.size()}},
+ [&]() -> sem::TypeConstructor* {
+ sem::ParameterList params(args.size());
+ for (size_t i = 0; i < args.size(); i++) {
+ params[i] = builder_->create<sem::Parameter>(
+ nullptr, // declaration
+ static_cast<uint32_t>(i), // index
+ arr->ElemType(), // type
+ ast::StorageClass::kNone, // storage_class
+ ast::Access::kUndefined); // access
+ }
+ return builder_->create<sem::TypeConstructor>(arr, std::move(params));
+ });
+ if (!MaterializeArguments(args, call_target)) {
+ return nullptr;
+ }
+ auto val = EvaluateConstantValue(expr, call_target->ReturnType());
+ if (!val) {
+ return nullptr;
+ }
+ return builder_->create<sem::Call>(expr, call_target, std::move(args),
+ current_statement_, val.Get(), has_side_effects);
+ },
+ [&](const sem::Struct* str) -> sem::Call* {
+ auto* call_target = utils::GetOrCreate(
+ struct_ctors_, StructConstructorSig{{str, args.size()}},
+ [&]() -> sem::TypeConstructor* {
+ sem::ParameterList params(std::min(args.size(), str->Members().size()));
+ for (size_t i = 0, n = params.size(); i < n; i++) {
+ params[i] = builder_->create<sem::Parameter>(
+ nullptr, // declaration
+ static_cast<uint32_t>(i), // index
+ str->Members()[i]->Type(), // type
+ ast::StorageClass::kNone, // storage_class
+ ast::Access::kUndefined); // access
+ }
+ return builder_->create<sem::TypeConstructor>(str, std::move(params));
+ });
+ if (!MaterializeArguments(args, call_target)) {
+ return nullptr;
+ }
+ auto val = EvaluateConstantValue(expr, call_target->ReturnType());
+ if (!val) {
+ return nullptr;
+ }
+ return builder_->create<sem::Call>(expr, call_target, std::move(args),
+ current_statement_, val.Get(), has_side_effects);
+ },
+ [&](Default) {
+ AddError("type is not constructible", expr->source);
+ return nullptr;
+ });
+ };
+
+ // ast::CallExpression has a target which is either an ast::Type or an ast::IdentifierExpression
+ sem::Call* call = nullptr;
+ if (expr->target.type) {
+ // ast::CallExpression has an ast::Type as the target.
+ // This call is either a type constructor or type conversion.
+ call = Switch(
+ expr->target.type,
+ [&](const ast::Vector* v) -> sem::Call* {
+ Mark(v);
+ // vector element type must be inferred if it was not specified.
+ sem::Type* template_arg = nullptr;
+ if (v->type) {
+ template_arg = Type(v->type);
+ if (!template_arg) {
+ return nullptr;
+ }
+ }
+ if (auto* c = ct_ctor_or_conv(VectorCtorConvIntrinsic(v->width), template_arg)) {
+ builder_->Sem().Add(expr->target.type, c->Target()->ReturnType());
+ return c;
+ }
+ return nullptr;
+ },
+ [&](const ast::Matrix* m) -> sem::Call* {
+ Mark(m);
+ // matrix element type must be inferred if it was not specified.
+ sem::Type* template_arg = nullptr;
+ if (m->type) {
+ template_arg = Type(m->type);
+ if (!template_arg) {
+ return nullptr;
+ }
+ }
+ if (auto* c = ct_ctor_or_conv(MatrixCtorConvIntrinsic(m->columns, m->rows),
+ template_arg)) {
+ builder_->Sem().Add(expr->target.type, c->Target()->ReturnType());
+ return c;
+ }
+ return nullptr;
+ },
+ [&](const ast::Type* ast) -> sem::Call* {
+ // Handler for AST types that do not have an optional element type.
+ if (auto* ty = Type(ast)) {
+ return ty_ctor_or_conv(ty);
+ }
+ return nullptr;
+ },
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << expr->source << " unhandled CallExpression target:\n"
+ << "type: "
+ << (expr->target.type ? expr->target.type->TypeInfo().name : "<null>");
+ return nullptr;
+ });
+ } else {
+ // ast::CallExpression has an ast::IdentifierExpression as the target.
+ // This call is either a function call, builtin call, type constructor or type conversion.
+ auto* ident = expr->target.name;
+ Mark(ident);
+ auto* resolved = sem_.ResolvedSymbol(ident);
+ call = Switch<sem::Call*>(
+ resolved, //
+ [&](sem::Type* ty) {
+ // A type constructor or conversions.
+ // Note: Unlike the code path where we're resolving the call target from an
+ // ast::Type, all types must already have the element type explicitly specified, so
+ // there's no need to infer element types.
+ return ty_ctor_or_conv(ty);
+ },
+ [&](sem::Function* func) {
+ return FunctionCall(expr, func, std::move(args), arg_behaviors);
+ },
+ [&](sem::Variable* var) {
+ auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
+ AddError("cannot call variable '" + name + "'", ident->source);
+ AddNote("'" + name + "' declared here", var->Declaration()->source);
+ return nullptr;
+ },
+ [&](Default) -> sem::Call* {
+ auto name = builder_->Symbols().NameFor(ident->symbol);
+ auto builtin_type = sem::ParseBuiltinType(name);
+ if (builtin_type != sem::BuiltinType::kNone) {
+ return BuiltinCall(expr, builtin_type, std::move(args));
+ }
+
+ TINT_ICE(Resolver, diagnostics_)
+ << expr->source << " unhandled CallExpression target:\n"
+ << "resolved: " << (resolved ? resolved->TypeInfo().name : "<null>") << "\n"
+ << "name: " << builder_->Symbols().NameFor(ident->symbol);
+ return nullptr;
+ });
}
- if (ty == nullptr) {
- ty = Type(expr->target.type);
- if (!ty) {
+ if (!call) {
return nullptr;
- }
- }
-
- return type_ctor_or_conv(ty);
- }
-
- auto* ident = expr->target.name;
- Mark(ident);
-
- auto* resolved = ResolvedSymbol(ident);
- return Switch(
- resolved, //
- [&](sem::Type* type) { return type_ctor_or_conv(type); },
- [&](sem::Function* func) {
- return FunctionCall(expr, func, std::move(args), arg_behaviors);
- },
- [&](sem::Variable* var) {
- auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
- AddError("cannot call variable '" + name + "'", ident->source);
- AddNote("'" + name + "' declared here", var->Declaration()->source);
- return nullptr;
- },
- [&](Default) -> sem::Call* {
- auto name = builder_->Symbols().NameFor(ident->symbol);
- auto builtin_type = sem::ParseBuiltinType(name);
- if (builtin_type != sem::BuiltinType::kNone) {
- return BuiltinCall(expr, builtin_type, std::move(args),
- std::move(arg_tys));
- }
-
- TINT_ICE(Resolver, diagnostics_)
- << expr->source << " unresolved CallExpression target:\n"
- << "resolved: " << (resolved ? resolved->TypeInfo().name : "<null>")
- << "\n"
- << "name: " << builder_->Symbols().NameFor(ident->symbol);
- return nullptr;
- });
+ }
+
+ return validator_.Call(call, current_statement_) ? call : nullptr;
}
sem::Call* Resolver::BuiltinCall(const ast::CallExpression* expr,
sem::BuiltinType builtin_type,
- const std::vector<const sem::Expression*> args,
- const std::vector<const sem::Type*> arg_tys) {
- auto* builtin =
- builtin_table_->Lookup(builtin_type, std::move(arg_tys), expr->source);
- if (!builtin) {
- return nullptr;
- }
+ std::vector<const sem::Expression*> args) {
+ IntrinsicTable::Builtin builtin;
+ {
+ auto arg_tys = utils::Transform(args, [](auto* arg) { return arg->Type(); });
+ builtin = intrinsic_table_->Lookup(builtin_type, arg_tys, expr->source);
+ if (!builtin.sem) {
+ return nullptr;
+ }
+ }
+
+ if (!MaterializeArguments(args, builtin.sem)) {
+ return nullptr;
+ }
+
+ if (builtin.sem->IsDeprecated()) {
+ AddWarning("use of deprecated builtin", expr->source);
+ }
+
+ // If the builtin is @const, and all arguments have constant values, evaluate the builtin now.
+ sem::Constant constant;
+ if (builtin.const_eval_fn) {
+ std::vector<sem::Constant> values(args.size());
+ bool is_const = true; // all arguments have constant values
+ for (size_t i = 0; i < values.size(); i++) {
+ if (auto v = args[i]->ConstantValue()) {
+ values[i] = std::move(v);
+ } else {
+ is_const = false;
+ break;
+ }
+ }
+ if (is_const) {
+ constant = builtin.const_eval_fn(*builder_, values.data(), args.size());
+ }
+ }
+
+ bool has_side_effects =
+ builtin.sem->HasSideEffects() ||
+ std::any_of(args.begin(), args.end(), [](auto* e) { return e->HasSideEffects(); });
+ auto* call = builder_->create<sem::Call>(expr, builtin.sem, std::move(args), current_statement_,
+ constant, has_side_effects);
- if (builtin->IsDeprecated()) {
- AddWarning("use of deprecated builtin", expr->source);
- }
+ current_function_->AddDirectlyCalledBuiltin(builtin.sem);
- bool has_side_effects = builtin->HasSideEffects() ||
- std::any_of(args.begin(), args.end(), [](auto* e) {
- return e->HasSideEffects();
- });
- auto* call = builder_->create<sem::Call>(expr, builtin, std::move(args),
- current_statement_, sem::Constant{},
- has_side_effects);
+ if (!validator_.RequiredExtensionForBuiltinFunction(call, enabled_extensions_)) {
+ return nullptr;
+ }
- current_function_->AddDirectlyCalledBuiltin(builtin);
+ if (IsTextureBuiltin(builtin_type)) {
+ if (!validator_.TextureBuiltinFunction(call)) {
+ return nullptr;
+ }
+ CollectTextureSamplerPairs(builtin.sem, call->Arguments());
+ }
- if (IsTextureBuiltin(builtin_type)) {
- if (!ValidateTextureBuiltinFunction(call)) {
- return nullptr;
+ if (!validator_.BuiltinCall(call)) {
+ return nullptr;
}
+
+ current_function_->AddDirectCall(call);
+
+ return call;
+}
+
+void Resolver::CollectTextureSamplerPairs(const sem::Builtin* builtin,
+ const std::vector<const sem::Expression*>& args) const {
// Collect a texture/sampler pair for this builtin.
const auto& signature = builtin->Signature();
int texture_index = signature.IndexOf(sem::ParameterUsage::kTexture);
if (texture_index == -1) {
- TINT_ICE(Resolver, diagnostics_)
- << "texture builtin without texture parameter";
+ TINT_ICE(Resolver, diagnostics_) << "texture builtin without texture parameter";
}
-
auto* texture = args[texture_index]->As<sem::VariableUser>()->Variable();
if (!texture->Type()->UnwrapRef()->Is<sem::StorageTexture>()) {
- int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
- const sem::Variable* sampler =
- sampler_index != -1
- ? args[sampler_index]->As<sem::VariableUser>()->Variable()
- : nullptr;
- current_function_->AddTextureSamplerPair(texture, sampler);
+ int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
+ const sem::Variable* sampler =
+ sampler_index != -1 ? args[sampler_index]->As<sem::VariableUser>()->Variable()
+ : nullptr;
+ current_function_->AddTextureSamplerPair(texture, sampler);
+ }
+}
+
+sem::Call* Resolver::FunctionCall(const ast::CallExpression* expr,
+ sem::Function* target,
+ std::vector<const sem::Expression*> args,
+ sem::Behaviors arg_behaviors) {
+ auto sym = expr->target.name->symbol;
+ auto name = builder_->Symbols().NameFor(sym);
+
+ if (!MaterializeArguments(args, target)) {
+ return nullptr;
}
- }
- if (!ValidateBuiltinCall(call)) {
- return nullptr;
- }
+ // TODO(crbug.com/tint/1420): For now, assume all function calls have side
+ // effects.
+ bool has_side_effects = true;
+ auto* call = builder_->create<sem::Call>(expr, target, std::move(args), current_statement_,
+ sem::Constant{}, has_side_effects);
- current_function_->AddDirectCall(call);
+ target->AddCallSite(call);
- return call;
-}
+ call->Behaviors() = arg_behaviors + target->Behaviors();
-sem::Call* Resolver::FunctionCall(
- const ast::CallExpression* expr,
- sem::Function* target,
- const std::vector<const sem::Expression*> args,
- sem::Behaviors arg_behaviors) {
- auto sym = expr->target.name->symbol;
- auto name = builder_->Symbols().NameFor(sym);
-
- // TODO(crbug.com/tint/1420): For now, assume all function calls have side
- // effects.
- bool has_side_effects = true;
- auto* call = builder_->create<sem::Call>(expr, target, std::move(args),
- current_statement_, sem::Constant{},
- has_side_effects);
-
- if (current_function_) {
- // Note: Requires called functions to be resolved first.
- // This is currently guaranteed as functions must be declared before
- // use.
- current_function_->AddTransitivelyCalledFunction(target);
- current_function_->AddDirectCall(call);
- for (auto* transitive_call : target->TransitivelyCalledFunctions()) {
- current_function_->AddTransitivelyCalledFunction(transitive_call);
+ if (!validator_.FunctionCall(call, current_statement_)) {
+ return nullptr;
}
- // We inherit any referenced variables from the callee.
- for (auto* var : target->TransitivelyReferencedGlobals()) {
- current_function_->AddTransitivelyReferencedGlobal(var);
+ if (current_function_) {
+ // Note: Requires called functions to be resolved first.
+ // This is currently guaranteed as functions must be declared before
+ // use.
+ current_function_->AddTransitivelyCalledFunction(target);
+ current_function_->AddDirectCall(call);
+ for (auto* transitive_call : target->TransitivelyCalledFunctions()) {
+ current_function_->AddTransitivelyCalledFunction(transitive_call);
+ }
+
+ // We inherit any referenced variables from the callee.
+ for (auto* var : target->TransitivelyReferencedGlobals()) {
+ current_function_->AddTransitivelyReferencedGlobal(var);
+ }
+
+ // Note: Validation *must* be performed before calling this method.
+ CollectTextureSamplerPairs(target, call->Arguments());
}
+ return call;
+}
+
+void Resolver::CollectTextureSamplerPairs(sem::Function* func,
+ const std::vector<const sem::Expression*>& args) const {
// Map all texture/sampler pairs from the target function to the
// current function. These can only be global or parameter
// variables. Resolve any parameter variables to the corresponding
// argument passed to the current function. Leave global variables
// as-is. Then add the mapped pair to the current function's list of
// texture/sampler pairs.
- for (sem::VariablePair pair : target->TextureSamplerPairs()) {
- const sem::Variable* texture = pair.first;
- const sem::Variable* sampler = pair.second;
- if (auto* param = texture->As<sem::Parameter>()) {
- texture = args[param->Index()]->As<sem::VariableUser>()->Variable();
- }
- if (sampler) {
- if (auto* param = sampler->As<sem::Parameter>()) {
- sampler = args[param->Index()]->As<sem::VariableUser>()->Variable();
+ for (sem::VariablePair pair : func->TextureSamplerPairs()) {
+ const sem::Variable* texture = pair.first;
+ const sem::Variable* sampler = pair.second;
+ if (auto* param = texture->As<sem::Parameter>()) {
+ texture = args[param->Index()]->As<sem::VariableUser>()->Variable();
+ }
+ if (sampler) {
+ if (auto* param = sampler->As<sem::Parameter>()) {
+ sampler = args[param->Index()]->As<sem::VariableUser>()->Variable();
+ }
}
- }
- current_function_->AddTextureSamplerPair(texture, sampler);
+ current_function_->AddTextureSamplerPair(texture, sampler);
}
- }
-
- target->AddCallSite(call);
+}
- call->Behaviors() = arg_behaviors + target->Behaviors();
+sem::Expression* Resolver::Literal(const ast::LiteralExpression* literal) {
+ auto* ty = Switch(
+ literal,
+ [&](const ast::IntLiteralExpression* i) -> sem::Type* {
+ switch (i->suffix) {
+ case ast::IntLiteralExpression::Suffix::kNone:
+ return builder_->create<sem::AbstractInt>();
+ case ast::IntLiteralExpression::Suffix::kI:
+ return builder_->create<sem::I32>();
+ case ast::IntLiteralExpression::Suffix::kU:
+ return builder_->create<sem::U32>();
+ }
+ return nullptr;
+ },
+ [&](const ast::FloatLiteralExpression* f) -> sem::Type* {
+ if (f->suffix == ast::FloatLiteralExpression::Suffix::kNone) {
+ return builder_->create<sem::AbstractFloat>();
+ }
+ return builder_->create<sem::F32>();
+ },
+ [&](const ast::BoolLiteralExpression*) { return builder_->create<sem::Bool>(); },
+ [&](Default) { return nullptr; });
- if (!ValidateFunctionCall(call)) {
- return nullptr;
- }
+ if (ty == nullptr) {
+ TINT_UNREACHABLE(Resolver, builder_->Diagnostics())
+ << "Unhandled literal type: " << literal->TypeInfo().name;
+ return nullptr;
+ }
- return call;
+ auto val = EvaluateConstantValue(literal, ty);
+ if (!val) {
+ return nullptr;
+ }
+ return builder_->create<sem::Expression>(literal, ty, current_statement_, val.Get(),
+ /* has_side_effects */ false);
}
-sem::Call* Resolver::TypeConversion(const ast::CallExpression* expr,
- const sem::Type* target,
- const sem::Expression* arg,
- const sem::Type* source) {
- // It is not valid to have a type-cast call expression inside a call
- // statement.
- if (IsCallStatement(expr)) {
- AddError("type cast evaluated but not used", expr->source);
- return nullptr;
- }
-
- auto* call_target = utils::GetOrCreate(
- type_conversions_, TypeConversionSig{target, source},
- [&]() -> sem::TypeConversion* {
- // Now that the argument types have been determined, make sure that
- // they obey the conversion rules laid out in
- // https://gpuweb.github.io/gpuweb/wgsl/#conversion-expr.
- bool ok = Switch(
- target,
- [&](const sem::Vector* vec_type) {
- return ValidateVectorConstructorOrCast(expr, vec_type);
- },
- [&](const sem::Matrix* mat_type) {
- // Note: Matrix types currently cannot be converted (the element
- // type must only be f32). We implement this for the day we
- // support other matrix element types.
- return ValidateMatrixConstructorOrCast(expr, mat_type);
- },
- [&](const sem::Array* arr_type) {
- return ValidateArrayConstructorOrCast(expr, arr_type);
- },
- [&](const sem::Struct* struct_type) {
- return ValidateStructureConstructorOrCast(expr, struct_type);
- },
- [&](Default) {
- if (target->is_scalar()) {
- return ValidateScalarConstructorOrCast(expr, target);
- }
- AddError("type is not constructible", expr->source);
- return false;
- });
- if (!ok) {
- return nullptr;
+sem::Expression* Resolver::Identifier(const ast::IdentifierExpression* expr) {
+ auto symbol = expr->symbol;
+ auto* resolved = sem_.ResolvedSymbol(expr);
+ if (auto* var = As<sem::Variable>(resolved)) {
+ auto* user = builder_->create<sem::VariableUser>(expr, current_statement_, var);
+
+ if (current_statement_) {
+ // If identifier is part of a loop continuing block, make sure it
+ // doesn't refer to a variable that is bypassed by a continue statement
+ // in the loop's body block.
+ if (auto* continuing_block =
+ current_statement_->FindFirstParent<sem::LoopContinuingBlockStatement>()) {
+ auto* loop_block = continuing_block->FindFirstParent<sem::LoopBlockStatement>();
+ if (loop_block->FirstContinue()) {
+ auto& decls = loop_block->Decls();
+ // If our identifier is in loop_block->decls, make sure its index is
+ // less than first_continue
+ auto iter = std::find_if(decls.begin(), decls.end(),
+ [&symbol](auto* v) { return v->symbol == symbol; });
+ if (iter != decls.end()) {
+ auto var_decl_index =
+ static_cast<size_t>(std::distance(decls.begin(), iter));
+ if (var_decl_index >= loop_block->NumDeclsAtFirstContinue()) {
+ AddError("continue statement bypasses declaration of '" +
+ builder_->Symbols().NameFor(symbol) + "'",
+ loop_block->FirstContinue()->source);
+ AddNote("identifier '" + builder_->Symbols().NameFor(symbol) +
+ "' declared here",
+ (*iter)->source);
+ AddNote("identifier '" + builder_->Symbols().NameFor(symbol) +
+ "' referenced in continuing block here",
+ expr->source);
+ return nullptr;
+ }
+ }
+ }
+ }
}
- auto* param = builder_->create<sem::Parameter>(
- nullptr, // declaration
- 0, // index
- source->UnwrapRef(), // type
- ast::StorageClass::kNone, // storage_class
- ast::Access::kUndefined); // access
- return builder_->create<sem::TypeConversion>(target, param);
- });
+ if (current_function_) {
+ if (auto* global = var->As<sem::GlobalVariable>()) {
+ current_function_->AddDirectlyReferencedGlobal(global);
+ }
+ }
- if (!call_target) {
- return nullptr;
- }
+ var->AddUser(user);
+ return user;
+ }
- auto val = EvaluateConstantValue(expr, target);
- bool has_side_effects = arg->HasSideEffects();
- return builder_->create<sem::Call>(expr, call_target,
- std::vector<const sem::Expression*>{arg},
- current_statement_, val, has_side_effects);
-}
+ if (Is<sem::Function>(resolved)) {
+ AddError("missing '(' for function call", expr->source.End());
+ return nullptr;
+ }
-sem::Call* Resolver::TypeConstructor(
- const ast::CallExpression* expr,
- const sem::Type* ty,
- const std::vector<const sem::Expression*> args,
- const std::vector<const sem::Type*> arg_tys) {
- // It is not valid to have a type-constructor call expression as a call
- // statement.
- if (IsCallStatement(expr)) {
- AddError("type constructor evaluated but not used", expr->source);
- return nullptr;
- }
-
- auto* call_target = utils::GetOrCreate(
- type_ctors_, TypeConstructorSig{ty, arg_tys},
- [&]() -> sem::TypeConstructor* {
- // Now that the argument types have been determined, make sure that
- // they obey the constructor type rules laid out in
- // https://gpuweb.github.io/gpuweb/wgsl/#type-constructor-expr.
- bool ok = Switch(
- ty,
- [&](const sem::Vector* vec_type) {
- return ValidateVectorConstructorOrCast(expr, vec_type);
- },
- [&](const sem::Matrix* mat_type) {
- return ValidateMatrixConstructorOrCast(expr, mat_type);
- },
- [&](const sem::Array* arr_type) {
- return ValidateArrayConstructorOrCast(expr, arr_type);
- },
- [&](const sem::Struct* struct_type) {
- return ValidateStructureConstructorOrCast(expr, struct_type);
- },
- [&](Default) {
- if (ty->is_scalar()) {
- return ValidateScalarConstructorOrCast(expr, ty);
- }
- AddError("type is not constructible", expr->source);
- return false;
- });
- if (!ok) {
- return nullptr;
- }
-
- return builder_->create<sem::TypeConstructor>(
- ty, utils::Transform(
- arg_tys,
- [&](const sem::Type* t, size_t i) -> const sem::Parameter* {
- return builder_->create<sem::Parameter>(
- nullptr, // declaration
- static_cast<uint32_t>(i), // index
- t->UnwrapRef(), // type
- ast::StorageClass::kNone, // storage_class
- ast::Access::kUndefined); // access
- }));
- });
-
- if (!call_target) {
- return nullptr;
- }
+ if (IsBuiltin(symbol)) {
+ AddError("missing '(' for builtin call", expr->source.End());
+ return nullptr;
+ }
- auto val = EvaluateConstantValue(expr, ty);
- bool has_side_effects = std::any_of(
- args.begin(), args.end(), [](auto* e) { return e->HasSideEffects(); });
- return builder_->create<sem::Call>(expr, call_target, std::move(args),
- current_statement_, val, has_side_effects);
-}
+ if (resolved->Is<sem::Type>()) {
+ AddError("missing '(' for type constructor or cast", expr->source.End());
+ return nullptr;
+ }
-sem::Expression* Resolver::Literal(const ast::LiteralExpression* literal) {
- auto* ty = TypeOf(literal);
- if (!ty) {
+ TINT_ICE(Resolver, diagnostics_)
+ << expr->source << " unresolved identifier:\n"
+ << "resolved: " << (resolved ? resolved->TypeInfo().name : "<null>") << "\n"
+ << "name: " << builder_->Symbols().NameFor(symbol);
return nullptr;
- }
-
- auto val = EvaluateConstantValue(literal, ty);
- return builder_->create<sem::Expression>(literal, ty, current_statement_, val,
- /* has_side_effects */ false);
}
-sem::Expression* Resolver::Identifier(const ast::IdentifierExpression* expr) {
- auto symbol = expr->symbol;
- auto* resolved = ResolvedSymbol(expr);
- if (auto* var = As<sem::Variable>(resolved)) {
- auto* user =
- builder_->create<sem::VariableUser>(expr, current_statement_, var);
-
- if (current_statement_) {
- // If identifier is part of a loop continuing block, make sure it
- // doesn't refer to a variable that is bypassed by a continue statement
- // in the loop's body block.
- if (auto* continuing_block =
- current_statement_
- ->FindFirstParent<sem::LoopContinuingBlockStatement>()) {
- auto* loop_block =
- continuing_block->FindFirstParent<sem::LoopBlockStatement>();
- if (loop_block->FirstContinue()) {
- auto& decls = loop_block->Decls();
- // If our identifier is in loop_block->decls, make sure its index is
- // less than first_continue
- auto iter =
- std::find_if(decls.begin(), decls.end(),
- [&symbol](auto* v) { return v->symbol == symbol; });
- if (iter != decls.end()) {
- auto var_decl_index =
- static_cast<size_t>(std::distance(decls.begin(), iter));
- if (var_decl_index >= loop_block->NumDeclsAtFirstContinue()) {
- AddError("continue statement bypasses declaration of '" +
- builder_->Symbols().NameFor(symbol) + "'",
- loop_block->FirstContinue()->source);
- AddNote("identifier '" + builder_->Symbols().NameFor(symbol) +
- "' declared here",
- (*iter)->source);
- AddNote("identifier '" + builder_->Symbols().NameFor(symbol) +
- "' referenced in continuing block here",
- expr->source);
- return nullptr;
+sem::Expression* Resolver::MemberAccessor(const ast::MemberAccessorExpression* expr) {
+ auto* structure = sem_.TypeOf(expr->structure);
+ auto* storage_ty = structure->UnwrapRef();
+ auto* source_var = sem_.Get(expr->structure)->SourceVariable();
+
+ const sem::Type* ret = nullptr;
+ std::vector<uint32_t> swizzle;
+
+ // Structure may be a side-effecting expression (e.g. function call).
+ auto* sem_structure = sem_.Get(expr->structure);
+ bool has_side_effects = sem_structure && sem_structure->HasSideEffects();
+
+ if (auto* str = storage_ty->As<sem::Struct>()) {
+ Mark(expr->member);
+ auto symbol = expr->member->symbol;
+
+ const sem::StructMember* member = nullptr;
+ for (auto* m : str->Members()) {
+ if (m->Name() == symbol) {
+ ret = m->Type();
+ member = m;
+ break;
}
- }
}
- }
- }
- if (current_function_) {
- if (auto* global = var->As<sem::GlobalVariable>()) {
- current_function_->AddDirectlyReferencedGlobal(global);
- }
- }
+ if (ret == nullptr) {
+ AddError("struct member " + builder_->Symbols().NameFor(symbol) + " not found",
+ expr->source);
+ return nullptr;
+ }
- var->AddUser(user);
- return user;
- }
+ // If we're extracting from a reference, we return a reference.
+ if (auto* ref = structure->As<sem::Reference>()) {
+ ret = builder_->create<sem::Reference>(ret, ref->StorageClass(), ref->Access());
+ }
- if (Is<sem::Function>(resolved)) {
- AddError("missing '(' for function call", expr->source.End());
- return nullptr;
- }
+ return builder_->create<sem::StructMemberAccess>(expr, ret, current_statement_, member,
+ has_side_effects, source_var);
+ }
+
+ if (auto* vec = storage_ty->As<sem::Vector>()) {
+ Mark(expr->member);
+ std::string s = builder_->Symbols().NameFor(expr->member->symbol);
+ auto size = s.size();
+ swizzle.reserve(s.size());
+
+ for (auto c : s) {
+ switch (c) {
+ case 'x':
+ case 'r':
+ swizzle.emplace_back(0);
+ break;
+ case 'y':
+ case 'g':
+ swizzle.emplace_back(1);
+ break;
+ case 'z':
+ case 'b':
+ swizzle.emplace_back(2);
+ break;
+ case 'w':
+ case 'a':
+ swizzle.emplace_back(3);
+ break;
+ default:
+ AddError("invalid vector swizzle character",
+ expr->member->source.Begin() + swizzle.size());
+ return nullptr;
+ }
- if (IsBuiltin(symbol)) {
- AddError("missing '(' for builtin call", expr->source.End());
- return nullptr;
- }
+ if (swizzle.back() >= vec->Width()) {
+ AddError("invalid vector swizzle member", expr->member->source);
+ return nullptr;
+ }
+ }
- if (resolved->Is<sem::Type>()) {
- AddError("missing '(' for type constructor or cast", expr->source.End());
- return nullptr;
- }
-
- TINT_ICE(Resolver, diagnostics_)
- << expr->source << " unresolved identifier:\n"
- << "resolved: " << (resolved ? resolved->TypeInfo().name : "<null>")
- << "\n"
- << "name: " << builder_->Symbols().NameFor(symbol);
- return nullptr;
-}
+ if (size < 1 || size > 4) {
+ AddError("invalid vector swizzle size", expr->member->source);
+ return nullptr;
+ }
-sem::Expression* Resolver::MemberAccessor(
- const ast::MemberAccessorExpression* expr) {
- auto* structure = TypeOf(expr->structure);
- auto* storage_ty = structure->UnwrapRef();
+ // All characters are valid, check if they're being mixed
+ auto is_rgba = [](char c) { return c == 'r' || c == 'g' || c == 'b' || c == 'a'; };
+ auto is_xyzw = [](char c) { return c == 'x' || c == 'y' || c == 'z' || c == 'w'; };
+ if (!std::all_of(s.begin(), s.end(), is_rgba) &&
+ !std::all_of(s.begin(), s.end(), is_xyzw)) {
+ AddError("invalid mixing of vector swizzle characters rgba with xyzw",
+ expr->member->source);
+ return nullptr;
+ }
- const sem::Type* ret = nullptr;
- std::vector<uint32_t> swizzle;
+ if (size == 1) {
+ // A single element swizzle is just the type of the vector.
+ ret = vec->type();
+ // If we're extracting from a reference, we return a reference.
+ if (auto* ref = structure->As<sem::Reference>()) {
+ ret = builder_->create<sem::Reference>(ret, ref->StorageClass(), ref->Access());
+ }
+ } else {
+ // The vector will have a number of components equal to the length of
+ // the swizzle.
+ ret = builder_->create<sem::Vector>(vec->type(), static_cast<uint32_t>(size));
+ }
+ return builder_->create<sem::Swizzle>(expr, ret, current_statement_, std::move(swizzle),
+ has_side_effects, source_var);
+ }
- // Structure may be a side-effecting expression (e.g. function call).
- auto* sem_structure = Sem(expr->structure);
- bool has_side_effects = sem_structure && sem_structure->HasSideEffects();
+ AddError("invalid member accessor expression. Expected vector or struct, got '" +
+ sem_.TypeNameOf(storage_ty) + "'",
+ expr->structure->source);
+ return nullptr;
+}
- if (auto* str = storage_ty->As<sem::Struct>()) {
- Mark(expr->member);
- auto symbol = expr->member->symbol;
+sem::Expression* Resolver::Binary(const ast::BinaryExpression* expr) {
+ const auto* lhs = sem_.Get(expr->lhs);
+ const auto* rhs = sem_.Get(expr->rhs);
+ auto* lhs_ty = lhs->Type()->UnwrapRef();
+ auto* rhs_ty = rhs->Type()->UnwrapRef();
- const sem::StructMember* member = nullptr;
- for (auto* m : str->Members()) {
- if (m->Name() == symbol) {
- ret = m->Type();
- member = m;
- break;
- }
+ auto op = intrinsic_table_->Lookup(expr->op, lhs_ty, rhs_ty, expr->source, false);
+ if (!op.result) {
+ return nullptr;
+ }
+ if (ShouldMaterializeArgument(op.lhs)) {
+ lhs = Materialize(lhs, op.lhs);
+ if (!lhs) {
+ return nullptr;
+ }
+ }
+ if (ShouldMaterializeArgument(op.rhs)) {
+ rhs = Materialize(rhs, op.rhs);
+ if (!rhs) {
+ return nullptr;
+ }
}
- if (ret == nullptr) {
- AddError(
- "struct member " + builder_->Symbols().NameFor(symbol) + " not found",
- expr->source);
- return nullptr;
+ auto val = EvaluateConstantValue(expr, op.result);
+ if (!val) {
+ return nullptr;
}
+ bool has_side_effects = lhs->HasSideEffects() || rhs->HasSideEffects();
+ auto* sem = builder_->create<sem::Expression>(expr, op.result, current_statement_, val.Get(),
+ has_side_effects);
+ sem->Behaviors() = lhs->Behaviors() + rhs->Behaviors();
- // If we're extracting from a reference, we return a reference.
- if (auto* ref = structure->As<sem::Reference>()) {
- ret = builder_->create<sem::Reference>(ret, ref->StorageClass(),
- ref->Access());
- }
-
- return builder_->create<sem::StructMemberAccess>(
- expr, ret, current_statement_, member, has_side_effects);
- }
-
- if (auto* vec = storage_ty->As<sem::Vector>()) {
- Mark(expr->member);
- std::string s = builder_->Symbols().NameFor(expr->member->symbol);
- auto size = s.size();
- swizzle.reserve(s.size());
-
- for (auto c : s) {
- switch (c) {
- case 'x':
- case 'r':
- swizzle.emplace_back(0);
- break;
- case 'y':
- case 'g':
- swizzle.emplace_back(1);
- break;
- case 'z':
- case 'b':
- swizzle.emplace_back(2);
- break;
- case 'w':
- case 'a':
- swizzle.emplace_back(3);
- break;
- default:
- AddError("invalid vector swizzle character",
- expr->member->source.Begin() + swizzle.size());
- return nullptr;
- }
+ return sem;
+}
- if (swizzle.back() >= vec->Width()) {
- AddError("invalid vector swizzle member", expr->member->source);
+sem::Expression* Resolver::UnaryOp(const ast::UnaryOpExpression* unary) {
+ const auto* expr = sem_.Get(unary->expr);
+ auto* expr_ty = expr->Type();
+ if (!expr_ty) {
return nullptr;
- }
}
- if (size < 1 || size > 4) {
- AddError("invalid vector swizzle size", expr->member->source);
- return nullptr;
- }
+ const sem::Type* ty = nullptr;
+ const sem::Variable* source_var = nullptr;
+
+ switch (unary->op) {
+ case ast::UnaryOp::kAddressOf:
+ if (auto* ref = expr_ty->As<sem::Reference>()) {
+ if (ref->StoreType()->UnwrapRef()->is_handle()) {
+ AddError("cannot take the address of expression in handle storage class",
+ unary->expr->source);
+ return nullptr;
+ }
- // All characters are valid, check if they're being mixed
- auto is_rgba = [](char c) {
- return c == 'r' || c == 'g' || c == 'b' || c == 'a';
- };
- auto is_xyzw = [](char c) {
- return c == 'x' || c == 'y' || c == 'z' || c == 'w';
- };
- if (!std::all_of(s.begin(), s.end(), is_rgba) &&
- !std::all_of(s.begin(), s.end(), is_xyzw)) {
- AddError("invalid mixing of vector swizzle characters rgba with xyzw",
- expr->member->source);
- return nullptr;
- }
-
- if (size == 1) {
- // A single element swizzle is just the type of the vector.
- ret = vec->type();
- // If we're extracting from a reference, we return a reference.
- if (auto* ref = structure->As<sem::Reference>()) {
- ret = builder_->create<sem::Reference>(ret, ref->StorageClass(),
- ref->Access());
- }
- } else {
- // The vector will have a number of components equal to the length of
- // the swizzle.
- ret = builder_->create<sem::Vector>(vec->type(),
- static_cast<uint32_t>(size));
- }
- return builder_->create<sem::Swizzle>(expr, ret, current_statement_,
- std::move(swizzle), has_side_effects);
- }
-
- AddError(
- "invalid member accessor expression. Expected vector or struct, got '" +
- TypeNameOf(storage_ty) + "'",
- expr->structure->source);
- return nullptr;
-}
+ auto* array = unary->expr->As<ast::IndexAccessorExpression>();
+ auto* member = unary->expr->As<ast::MemberAccessorExpression>();
+ if ((array && sem_.TypeOf(array->object)->UnwrapRef()->Is<sem::Vector>()) ||
+ (member && sem_.TypeOf(member->structure)->UnwrapRef()->Is<sem::Vector>())) {
+ AddError("cannot take the address of a vector component", unary->expr->source);
+ return nullptr;
+ }
-sem::Expression* Resolver::Binary(const ast::BinaryExpression* expr) {
- auto* lhs = Sem(expr->lhs);
- auto* rhs = Sem(expr->rhs);
- auto* lhs_ty = lhs->Type()->UnwrapRef();
- auto* rhs_ty = rhs->Type()->UnwrapRef();
-
- auto* ty = BinaryOpType(lhs_ty, rhs_ty, expr->op);
- if (!ty) {
- AddError(
- "Binary expression operand types are invalid for this operation: " +
- TypeNameOf(lhs_ty) + " " + FriendlyName(expr->op) + " " +
- TypeNameOf(rhs_ty),
- expr->source);
- return nullptr;
- }
+ ty = builder_->create<sem::Pointer>(ref->StoreType(), ref->StorageClass(),
+ ref->Access());
- auto val = EvaluateConstantValue(expr, ty);
- bool has_side_effects = lhs->HasSideEffects() || rhs->HasSideEffects();
- auto* sem = builder_->create<sem::Expression>(expr, ty, current_statement_,
- val, has_side_effects);
- sem->Behaviors() = lhs->Behaviors() + rhs->Behaviors();
+ source_var = expr->SourceVariable();
+ } else {
+ AddError("cannot take the address of expression", unary->expr->source);
+ return nullptr;
+ }
+ break;
+
+ case ast::UnaryOp::kIndirection:
+ if (auto* ptr = expr_ty->As<sem::Pointer>()) {
+ ty = builder_->create<sem::Reference>(ptr->StoreType(), ptr->StorageClass(),
+ ptr->Access());
+ source_var = expr->SourceVariable();
+ } else {
+ AddError("cannot dereference expression of type '" + sem_.TypeNameOf(expr_ty) + "'",
+ unary->expr->source);
+ return nullptr;
+ }
+ break;
- return sem;
-}
+ default: {
+ auto op = intrinsic_table_->Lookup(unary->op, expr_ty, unary->source);
+ if (!op.result) {
+ return nullptr;
+ }
+ if (ShouldMaterializeArgument(op.parameter)) {
+ expr = Materialize(expr, op.parameter);
+ if (!expr) {
+ return nullptr;
+ }
+ }
+ ty = op.result;
+ break;
+ }
+ }
-const sem::Type* Resolver::BinaryOpType(const sem::Type* lhs_ty,
- const sem::Type* rhs_ty,
- ast::BinaryOp op) {
- using Bool = sem::Bool;
- using F32 = sem::F32;
- using I32 = sem::I32;
- using U32 = sem::U32;
- using Matrix = sem::Matrix;
- using Vector = sem::Vector;
-
- auto* lhs_vec = lhs_ty->As<Vector>();
- auto* lhs_vec_elem_type = lhs_vec ? lhs_vec->type() : nullptr;
- auto* rhs_vec = rhs_ty->As<Vector>();
- auto* rhs_vec_elem_type = rhs_vec ? rhs_vec->type() : nullptr;
-
- const bool matching_vec_elem_types =
- lhs_vec_elem_type && rhs_vec_elem_type &&
- (lhs_vec_elem_type == rhs_vec_elem_type) &&
- (lhs_vec->Width() == rhs_vec->Width());
-
- const bool matching_types = matching_vec_elem_types || (lhs_ty == rhs_ty);
-
- // Binary logical expressions
- if (op == ast::BinaryOp::kLogicalAnd || op == ast::BinaryOp::kLogicalOr) {
- if (matching_types && lhs_ty->Is<Bool>()) {
- return lhs_ty;
- }
- }
- if (op == ast::BinaryOp::kOr || op == ast::BinaryOp::kAnd) {
- if (matching_types && lhs_ty->Is<Bool>()) {
- return lhs_ty;
- }
- if (matching_types && lhs_vec_elem_type && lhs_vec_elem_type->Is<Bool>()) {
- return lhs_ty;
- }
- }
-
- // Arithmetic expressions
- if (ast::IsArithmetic(op)) {
- // Binary arithmetic expressions over scalars
- if (matching_types && lhs_ty->is_numeric_scalar()) {
- return lhs_ty;
- }
-
- // Binary arithmetic expressions over vectors
- if (matching_types && lhs_vec_elem_type &&
- lhs_vec_elem_type->is_numeric_scalar()) {
- return lhs_ty;
- }
-
- // Binary arithmetic expressions with mixed scalar and vector operands
- if (lhs_vec_elem_type && (lhs_vec_elem_type == rhs_ty) &&
- rhs_ty->is_numeric_scalar()) {
- return lhs_ty;
- }
- if (rhs_vec_elem_type && (rhs_vec_elem_type == lhs_ty) &&
- lhs_ty->is_numeric_scalar()) {
- return rhs_ty;
- }
- }
-
- // Matrix arithmetic
- auto* lhs_mat = lhs_ty->As<Matrix>();
- auto* lhs_mat_elem_type = lhs_mat ? lhs_mat->type() : nullptr;
- auto* rhs_mat = rhs_ty->As<Matrix>();
- auto* rhs_mat_elem_type = rhs_mat ? rhs_mat->type() : nullptr;
- // Addition and subtraction of float matrices
- if ((op == ast::BinaryOp::kAdd || op == ast::BinaryOp::kSubtract) &&
- lhs_mat_elem_type && lhs_mat_elem_type->Is<F32>() && rhs_mat_elem_type &&
- rhs_mat_elem_type->Is<F32>() &&
- (lhs_mat->columns() == rhs_mat->columns()) &&
- (lhs_mat->rows() == rhs_mat->rows())) {
- return rhs_ty;
- }
- if (op == ast::BinaryOp::kMultiply) {
- // Multiplication of a matrix and a scalar
- if (lhs_ty->Is<F32>() && rhs_mat_elem_type &&
- rhs_mat_elem_type->Is<F32>()) {
- return rhs_ty;
- }
- if (lhs_mat_elem_type && lhs_mat_elem_type->Is<F32>() &&
- rhs_ty->Is<F32>()) {
- return lhs_ty;
- }
-
- // Vector times matrix
- if (lhs_vec_elem_type && lhs_vec_elem_type->Is<F32>() &&
- rhs_mat_elem_type && rhs_mat_elem_type->Is<F32>() &&
- (lhs_vec->Width() == rhs_mat->rows())) {
- return builder_->create<sem::Vector>(lhs_vec->type(), rhs_mat->columns());
- }
-
- // Matrix times vector
- if (lhs_mat_elem_type && lhs_mat_elem_type->Is<F32>() &&
- rhs_vec_elem_type && rhs_vec_elem_type->Is<F32>() &&
- (lhs_mat->columns() == rhs_vec->Width())) {
- return builder_->create<sem::Vector>(rhs_vec->type(), lhs_mat->rows());
- }
-
- // Matrix times matrix
- if (lhs_mat_elem_type && lhs_mat_elem_type->Is<F32>() &&
- rhs_mat_elem_type && rhs_mat_elem_type->Is<F32>() &&
- (lhs_mat->columns() == rhs_mat->rows())) {
- return builder_->create<sem::Matrix>(
- builder_->create<sem::Vector>(lhs_mat_elem_type, lhs_mat->rows()),
- rhs_mat->columns());
- }
- }
-
- // Comparison expressions
- if (ast::IsComparison(op)) {
- if (matching_types) {
- // Special case for bools: only == and !=
- if (lhs_ty->Is<Bool>() &&
- (op == ast::BinaryOp::kEqual || op == ast::BinaryOp::kNotEqual)) {
- return builder_->create<sem::Bool>();
- }
-
- // For the rest, we can compare i32, u32, and f32
- if (lhs_ty->IsAnyOf<I32, U32, F32>()) {
- return builder_->create<sem::Bool>();
- }
- }
-
- // Same for vectors
- if (matching_vec_elem_types) {
- if (lhs_vec_elem_type->Is<Bool>() &&
- (op == ast::BinaryOp::kEqual || op == ast::BinaryOp::kNotEqual)) {
- return builder_->create<sem::Vector>(builder_->create<sem::Bool>(),
- lhs_vec->Width());
- }
-
- if (lhs_vec_elem_type->is_numeric_scalar()) {
- return builder_->create<sem::Vector>(builder_->create<sem::Bool>(),
- lhs_vec->Width());
- }
- }
- }
-
- // Binary bitwise operations
- if (ast::IsBitwise(op)) {
- if (matching_types && lhs_ty->is_integer_scalar_or_vector()) {
- return lhs_ty;
- }
- }
-
- // Bit shift expressions
- if (ast::IsBitshift(op)) {
- // Type validation rules are the same for left or right shift, despite
- // differences in computation rules (i.e. right shift can be arithmetic or
- // logical depending on lhs type).
-
- if (lhs_ty->IsAnyOf<I32, U32>() && rhs_ty->Is<U32>()) {
- return lhs_ty;
- }
-
- if (lhs_vec_elem_type && lhs_vec_elem_type->IsAnyOf<I32, U32>() &&
- rhs_vec_elem_type && rhs_vec_elem_type->Is<U32>()) {
- return lhs_ty;
- }
- }
-
- return nullptr;
+ auto val = EvaluateConstantValue(unary, ty);
+ if (!val) {
+ return nullptr;
+ }
+ auto* sem = builder_->create<sem::Expression>(unary, ty, current_statement_, val.Get(),
+ expr->HasSideEffects(), source_var);
+ sem->Behaviors() = expr->Behaviors();
+ return sem;
}
-sem::Expression* Resolver::UnaryOp(const ast::UnaryOpExpression* unary) {
- auto* expr = Sem(unary->expr);
- auto* expr_ty = expr->Type();
- if (!expr_ty) {
- return nullptr;
- }
-
- const sem::Type* ty = nullptr;
-
- switch (unary->op) {
- case ast::UnaryOp::kNot:
- // Result type matches the deref'd inner type.
- ty = expr_ty->UnwrapRef();
- if (!ty->Is<sem::Bool>() && !ty->is_bool_vector()) {
- AddError(
- "cannot logical negate expression of type '" + TypeNameOf(expr_ty),
- unary->expr->source);
- return nullptr;
- }
- break;
-
- case ast::UnaryOp::kComplement:
- // Result type matches the deref'd inner type.
- ty = expr_ty->UnwrapRef();
- if (!ty->is_integer_scalar_or_vector()) {
- AddError("cannot bitwise complement expression of type '" +
- TypeNameOf(expr_ty),
- unary->expr->source);
- return nullptr;
- }
- break;
-
- case ast::UnaryOp::kNegation:
- // Result type matches the deref'd inner type.
- ty = expr_ty->UnwrapRef();
- if (!(ty->IsAnyOf<sem::F32, sem::I32>() ||
- ty->is_signed_integer_vector() || ty->is_float_vector())) {
- AddError("cannot negate expression of type '" + TypeNameOf(expr_ty),
- unary->expr->source);
- return nullptr;
- }
- break;
-
- case ast::UnaryOp::kAddressOf:
- if (auto* ref = expr_ty->As<sem::Reference>()) {
- if (ref->StoreType()->UnwrapRef()->is_handle()) {
- AddError(
- "cannot take the address of expression in handle storage class",
- unary->expr->source);
- return nullptr;
- }
-
- auto* array = unary->expr->As<ast::IndexAccessorExpression>();
- auto* member = unary->expr->As<ast::MemberAccessorExpression>();
- if ((array && TypeOf(array->object)->UnwrapRef()->Is<sem::Vector>()) ||
- (member &&
- TypeOf(member->structure)->UnwrapRef()->Is<sem::Vector>())) {
- AddError("cannot take the address of a vector component",
- unary->expr->source);
- return nullptr;
- }
-
- ty = builder_->create<sem::Pointer>(ref->StoreType(),
- ref->StorageClass(), ref->Access());
- } else {
- AddError("cannot take the address of expression", unary->expr->source);
- return nullptr;
- }
- break;
-
- case ast::UnaryOp::kIndirection:
- if (auto* ptr = expr_ty->As<sem::Pointer>()) {
- ty = builder_->create<sem::Reference>(
- ptr->StoreType(), ptr->StorageClass(), ptr->Access());
- } else {
- AddError("cannot dereference expression of type '" +
- TypeNameOf(expr_ty) + "'",
- unary->expr->source);
- return nullptr;
- }
- break;
- }
-
- auto val = EvaluateConstantValue(unary, ty);
- auto* sem = builder_->create<sem::Expression>(unary, ty, current_statement_,
- val, expr->HasSideEffects());
- sem->Behaviors() = expr->Behaviors();
- return sem;
+bool Resolver::Enable(const ast::Enable* enable) {
+ enabled_extensions_.add(enable->extension);
+ return true;
}
sem::Type* Resolver::TypeDecl(const ast::TypeDecl* named_type) {
- sem::Type* result = nullptr;
- if (auto* alias = named_type->As<ast::Alias>()) {
- result = Alias(alias);
- } else if (auto* str = named_type->As<ast::Struct>()) {
- result = Structure(str);
- } else {
- TINT_UNREACHABLE(Resolver, diagnostics_) << "Unhandled TypeDecl";
- }
-
- if (!result) {
- return nullptr;
- }
-
- builder_->Sem().Add(named_type, result);
- return result;
-}
+ sem::Type* result = nullptr;
+ if (auto* alias = named_type->As<ast::Alias>()) {
+ result = Alias(alias);
+ } else if (auto* str = named_type->As<ast::Struct>()) {
+ result = Structure(str);
+ } else {
+ TINT_UNREACHABLE(Resolver, diagnostics_) << "Unhandled TypeDecl";
+ }
-sem::Type* Resolver::TypeOf(const ast::Expression* expr) {
- auto* sem = Sem(expr);
- return sem ? const_cast<sem::Type*>(sem->Type()) : nullptr;
-}
+ if (!result) {
+ return nullptr;
+ }
-std::string Resolver::TypeNameOf(const sem::Type* ty) {
- return RawTypeNameOf(ty->UnwrapRef());
+ builder_->Sem().Add(named_type, result);
+ return result;
}
-std::string Resolver::RawTypeNameOf(const sem::Type* ty) {
- return ty->FriendlyName(builder_->Symbols());
-}
+sem::Array* Resolver::Array(const ast::Array* arr) {
+ auto source = arr->source;
-sem::Type* Resolver::TypeOf(const ast::LiteralExpression* lit) {
- return Switch(
- lit,
- [&](const ast::SintLiteralExpression*) {
- return builder_->create<sem::I32>();
- },
- [&](const ast::UintLiteralExpression*) {
- return builder_->create<sem::U32>();
- },
- [&](const ast::FloatLiteralExpression*) {
- return builder_->create<sem::F32>();
- },
- [&](const ast::BoolLiteralExpression*) {
- return builder_->create<sem::Bool>();
- },
- [&](Default) {
- TINT_UNREACHABLE(Resolver, diagnostics_)
- << "Unhandled literal type: " << lit->TypeInfo().name;
+ auto* elem_type = Type(arr->type);
+ if (!elem_type) {
return nullptr;
- });
-}
+ }
-sem::Array* Resolver::Array(const ast::Array* arr) {
- auto source = arr->source;
+ if (!validator_.IsPlain(elem_type)) { // Check must come before GetDefaultAlignAndSize()
+ AddError(sem_.TypeNameOf(elem_type) + " cannot be used as an element type of an array",
+ source);
+ return nullptr;
+ }
- auto* elem_type = Type(arr->type);
- if (!elem_type) {
- return nullptr;
- }
+ uint32_t el_align = elem_type->Align();
+ uint32_t el_size = elem_type->Size();
- if (!IsPlain(elem_type)) { // Check must come before GetDefaultAlignAndSize()
- AddError(TypeNameOf(elem_type) +
- " cannot be used as an element type of an array",
- source);
- return nullptr;
- }
+ if (!validator_.NoDuplicateAttributes(arr->attributes)) {
+ return nullptr;
+ }
- uint32_t el_align = elem_type->Align();
- uint32_t el_size = elem_type->Size();
+ // Look for explicit stride via @stride(n) attribute
+ uint32_t explicit_stride = 0;
+ for (auto* attr : arr->attributes) {
+ Mark(attr);
+ if (auto* sd = attr->As<ast::StrideAttribute>()) {
+ explicit_stride = sd->stride;
+ if (!validator_.ArrayStrideAttribute(sd, el_size, el_align, source)) {
+ return nullptr;
+ }
+ continue;
+ }
- if (!ValidateNoDuplicateAttributes(arr->attributes)) {
- return nullptr;
- }
-
- // Look for explicit stride via @stride(n) attribute
- uint32_t explicit_stride = 0;
- for (auto* attr : arr->attributes) {
- Mark(attr);
- if (auto* sd = attr->As<ast::StrideAttribute>()) {
- explicit_stride = sd->stride;
- if (!ValidateArrayStrideAttribute(sd, el_size, el_align, source)) {
+ AddError("attribute is not valid for array types", attr->source);
return nullptr;
- }
- continue;
}
- AddError("attribute is not valid for array types", attr->source);
- return nullptr;
- }
+ // Calculate implicit stride
+ uint64_t implicit_stride = utils::RoundUp<uint64_t>(el_align, el_size);
+
+ uint64_t stride = explicit_stride ? explicit_stride : implicit_stride;
- // Calculate implicit stride
- uint64_t implicit_stride = utils::RoundUp<uint64_t>(el_align, el_size);
+ // Evaluate the constant array size expression.
+ // sem::Array uses a size of 0 for a runtime-sized array.
+ uint32_t count = 0;
+ if (auto* count_expr = arr->count) {
+ const auto* count_sem = Materialize(Expression(count_expr));
+ if (!count_sem) {
+ return nullptr;
+ }
- uint64_t stride = explicit_stride ? explicit_stride : implicit_stride;
+ auto size_source = count_expr->source;
- // Evaluate the constant array size expression.
- // sem::Array uses a size of 0 for a runtime-sized array.
- uint32_t count = 0;
- if (auto* count_expr = arr->count) {
- auto* count_sem = Expression(count_expr);
- if (!count_sem) {
- return nullptr;
- }
+ auto* ty = count_sem->Type()->UnwrapRef();
+ if (!ty->is_integer_scalar()) {
+ AddError("array size must be integer scalar", size_source);
+ return nullptr;
+ }
+
+ if (auto* ident = count_expr->As<ast::IdentifierExpression>()) {
+ // Make sure the identifier is a non-overridable module-scope constant.
+ auto* var = sem_.ResolvedSymbol<sem::GlobalVariable>(ident);
+ if (!var || !var->Declaration()->is_const) {
+ AddError("array size identifier must be a module-scope constant", size_source);
+ return nullptr;
+ }
+ if (var->IsOverridable()) {
+ AddError("array size expression must not be pipeline-overridable", size_source);
+ return nullptr;
+ }
+
+ count_expr = var->Declaration()->constructor;
+ } else if (!count_expr->Is<ast::LiteralExpression>()) {
+ AddError(
+ "array size expression must be either a literal or a module-scope "
+ "constant",
+ size_source);
+ return nullptr;
+ }
- auto size_source = count_expr->source;
+ auto count_val = count_sem->ConstantValue();
+ if (!count_val) {
+ TINT_ICE(Resolver, diagnostics_) << "could not resolve array size expression";
+ return nullptr;
+ }
+
+ if (count_val.Element<AInt>(0).value < 1) {
+ AddError("array size must be at least 1", size_source);
+ return nullptr;
+ }
- auto* ty = count_sem->Type()->UnwrapRef();
- if (!ty->is_integer_scalar()) {
- AddError("array size must be integer scalar", size_source);
- return nullptr;
+ count = count_val.Element<uint32_t>(0);
}
- if (auto* ident = count_expr->As<ast::IdentifierExpression>()) {
- // Make sure the identifier is a non-overridable module-scope constant.
- auto* var = ResolvedSymbol<sem::GlobalVariable>(ident);
- if (!var || !var->Declaration()->is_const) {
- AddError("array size identifier must be a module-scope constant",
- size_source);
+ auto size = std::max<uint64_t>(count, 1) * stride;
+ if (size > std::numeric_limits<uint32_t>::max()) {
+ std::stringstream msg;
+ msg << "array size in bytes must not exceed 0x" << std::hex
+ << std::numeric_limits<uint32_t>::max() << ", but is 0x" << std::hex << size;
+ AddError(msg.str(), arr->source);
return nullptr;
- }
- if (var->IsOverridable()) {
- AddError("array size expression must not be pipeline-overridable",
- size_source);
+ }
+ if (stride > std::numeric_limits<uint32_t>::max() ||
+ implicit_stride > std::numeric_limits<uint32_t>::max()) {
+ TINT_ICE(Resolver, diagnostics_) << "calculated array stride exceeds uint32";
return nullptr;
- }
+ }
+ auto* out = builder_->create<sem::Array>(
+ elem_type, count, el_align, static_cast<uint32_t>(size), static_cast<uint32_t>(stride),
+ static_cast<uint32_t>(implicit_stride));
- count_expr = var->Declaration()->constructor;
- } else if (!count_expr->Is<ast::LiteralExpression>()) {
- AddError(
- "array size expression must be either a literal or a module-scope "
- "constant",
- size_source);
- return nullptr;
+ if (!validator_.Array(out, source)) {
+ return nullptr;
}
- auto count_val = count_sem->ConstantValue();
- if (!count_val) {
- TINT_ICE(Resolver, diagnostics_)
- << "could not resolve array size expression";
- return nullptr;
+ if (elem_type->Is<sem::Atomic>()) {
+ atomic_composite_info_.emplace(out, arr->type->source);
+ } else {
+ auto found = atomic_composite_info_.find(elem_type);
+ if (found != atomic_composite_info_.end()) {
+ atomic_composite_info_.emplace(out, found->second);
+ }
}
- if (ty->is_signed_integer_scalar() ? count_val.Elements()[0].i32 < 1
- : count_val.Elements()[0].u32 < 1u) {
- AddError("array size must be at least 1", size_source);
- return nullptr;
+ return out;
+}
+
+sem::Type* Resolver::Alias(const ast::Alias* alias) {
+ auto* ty = Type(alias->type);
+ if (!ty) {
+ return nullptr;
}
+ if (!validator_.Alias(alias)) {
+ return nullptr;
+ }
+ return ty;
+}
- count = count_val.Elements()[0].u32;
- }
+sem::Struct* Resolver::Structure(const ast::Struct* str) {
+ if (!validator_.NoDuplicateAttributes(str->attributes)) {
+ return nullptr;
+ }
+ for (auto* attr : str->attributes) {
+ Mark(attr);
+ }
+
+ sem::StructMemberList sem_members;
+ sem_members.reserve(str->members.size());
+
+ // Calculate the effective size and alignment of each field, and the overall
+ // size of the structure.
+ // For size, use the size attribute if provided, otherwise use the default
+ // size for the type.
+ // For alignment, use the alignment attribute if provided, otherwise use the
+ // default alignment for the member type.
+ // Diagnostic errors are raised if a basic rule is violated.
+ // Validation of storage-class rules requires analysing the actual variable
+ // usage of the structure, and so is performed as part of the variable
+ // validation.
+ uint64_t struct_size = 0;
+ uint64_t struct_align = 1;
+ std::unordered_map<Symbol, const ast::StructMember*> member_map;
+
+ for (auto* member : str->members) {
+ Mark(member);
+ auto result = member_map.emplace(member->symbol, member);
+ if (!result.second) {
+ AddError("redefinition of '" + builder_->Symbols().NameFor(member->symbol) + "'",
+ member->source);
+ AddNote("previous definition is here", result.first->second->source);
+ return nullptr;
+ }
- auto size = std::max<uint64_t>(count, 1) * stride;
- if (size > std::numeric_limits<uint32_t>::max()) {
- std::stringstream msg;
- msg << "array size in bytes must not exceed 0x" << std::hex
- << std::numeric_limits<uint32_t>::max() << ", but is 0x" << std::hex
- << size;
- AddError(msg.str(), arr->source);
- return nullptr;
- }
- if (stride > std::numeric_limits<uint32_t>::max() ||
- implicit_stride > std::numeric_limits<uint32_t>::max()) {
- TINT_ICE(Resolver, diagnostics_)
- << "calculated array stride exceeds uint32";
- return nullptr;
- }
- auto* out = builder_->create<sem::Array>(
- elem_type, count, el_align, static_cast<uint32_t>(size),
- static_cast<uint32_t>(stride), static_cast<uint32_t>(implicit_stride));
+ // Resolve member type
+ auto* type = Type(member->type);
+ if (!type) {
+ return nullptr;
+ }
- if (!ValidateArray(out, source)) {
- return nullptr;
- }
+ // validator_.Validate member type
+ if (!validator_.IsPlain(type)) {
+ AddError(sem_.TypeNameOf(type) + " cannot be used as the type of a structure member",
+ member->source);
+ return nullptr;
+ }
+
+ uint64_t offset = struct_size;
+ uint64_t align = type->Align();
+ uint64_t size = type->Size();
+
+ if (!validator_.NoDuplicateAttributes(member->attributes)) {
+ return nullptr;
+ }
+
+ bool has_offset_attr = false;
+ bool has_align_attr = false;
+ bool has_size_attr = false;
+ for (auto* attr : member->attributes) {
+ Mark(attr);
+ if (auto* o = attr->As<ast::StructMemberOffsetAttribute>()) {
+ // Offset attributes are not part of the WGSL spec, but are emitted
+ // by the SPIR-V reader.
+ if (o->offset < struct_size) {
+ AddError("offsets must be in ascending order", o->source);
+ return nullptr;
+ }
+ offset = o->offset;
+ align = 1;
+ has_offset_attr = true;
+ } else if (auto* a = attr->As<ast::StructMemberAlignAttribute>()) {
+ if (a->align <= 0 || !utils::IsPowerOfTwo(a->align)) {
+ AddError("align value must be a positive, power-of-two integer", a->source);
+ return nullptr;
+ }
+ align = a->align;
+ has_align_attr = true;
+ } else if (auto* s = attr->As<ast::StructMemberSizeAttribute>()) {
+ if (s->size < size) {
+ AddError("size must be at least as big as the type's size (" +
+ std::to_string(size) + ")",
+ s->source);
+ return nullptr;
+ }
+ size = s->size;
+ has_size_attr = true;
+ }
+ }
+
+ if (has_offset_attr && (has_align_attr || has_size_attr)) {
+ AddError("offset attributes cannot be used with align or size attributes",
+ member->source);
+ return nullptr;
+ }
- if (elem_type->Is<sem::Atomic>()) {
- atomic_composite_info_.emplace(out, arr->type->source);
- } else {
- auto found = atomic_composite_info_.find(elem_type);
- if (found != atomic_composite_info_.end()) {
- atomic_composite_info_.emplace(out, found->second);
+ offset = utils::RoundUp(align, offset);
+ if (offset > std::numeric_limits<uint32_t>::max()) {
+ std::stringstream msg;
+ msg << "struct member has byte offset 0x" << std::hex << offset
+ << ", but must not exceed 0x" << std::hex << std::numeric_limits<uint32_t>::max();
+ AddError(msg.str(), member->source);
+ return nullptr;
+ }
+
+ auto* sem_member = builder_->create<sem::StructMember>(
+ member, member->symbol, type, static_cast<uint32_t>(sem_members.size()),
+ static_cast<uint32_t>(offset), static_cast<uint32_t>(align),
+ static_cast<uint32_t>(size));
+ builder_->Sem().Add(member, sem_member);
+ sem_members.emplace_back(sem_member);
+
+ struct_size = offset + size;
+ struct_align = std::max(struct_align, align);
}
- }
- return out;
-}
+ uint64_t size_no_padding = struct_size;
+ struct_size = utils::RoundUp(struct_align, struct_size);
-sem::Type* Resolver::Alias(const ast::Alias* alias) {
- auto* ty = Type(alias->type);
- if (!ty) {
- return nullptr;
- }
- if (!ValidateAlias(alias)) {
- return nullptr;
- }
- return ty;
-}
+ if (struct_size > std::numeric_limits<uint32_t>::max()) {
+ std::stringstream msg;
+ msg << "struct size in bytes must not exceed 0x" << std::hex
+ << std::numeric_limits<uint32_t>::max() << ", but is 0x" << std::hex << struct_size;
+ AddError(msg.str(), str->source);
+ return nullptr;
+ }
+ if (struct_align > std::numeric_limits<uint32_t>::max()) {
+ TINT_ICE(Resolver, diagnostics_) << "calculated struct stride exceeds uint32";
+ return nullptr;
+ }
-sem::Struct* Resolver::Structure(const ast::Struct* str) {
- if (!ValidateNoDuplicateAttributes(str->attributes)) {
- return nullptr;
- }
- for (auto* attr : str->attributes) {
- Mark(attr);
- }
-
- sem::StructMemberList sem_members;
- sem_members.reserve(str->members.size());
-
- // Calculate the effective size and alignment of each field, and the overall
- // size of the structure.
- // For size, use the size attribute if provided, otherwise use the default
- // size for the type.
- // For alignment, use the alignment attribute if provided, otherwise use the
- // default alignment for the member type.
- // Diagnostic errors are raised if a basic rule is violated.
- // Validation of storage-class rules requires analysing the actual variable
- // usage of the structure, and so is performed as part of the variable
- // validation.
- uint64_t struct_size = 0;
- uint64_t struct_align = 1;
- std::unordered_map<Symbol, const ast::StructMember*> member_map;
-
- for (auto* member : str->members) {
- Mark(member);
- auto result = member_map.emplace(member->symbol, member);
- if (!result.second) {
- AddError("redefinition of '" +
- builder_->Symbols().NameFor(member->symbol) + "'",
- member->source);
- AddNote("previous definition is here", result.first->second->source);
- return nullptr;
- }
-
- // Resolve member type
- auto* type = Type(member->type);
- if (!type) {
- return nullptr;
- }
-
- // Validate member type
- if (!IsPlain(type)) {
- AddError(TypeNameOf(type) +
- " cannot be used as the type of a structure member",
- member->source);
- return nullptr;
- }
-
- uint64_t offset = struct_size;
- uint64_t align = type->Align();
- uint64_t size = type->Size();
-
- if (!ValidateNoDuplicateAttributes(member->attributes)) {
- return nullptr;
- }
-
- bool has_offset_attr = false;
- bool has_align_attr = false;
- bool has_size_attr = false;
- for (auto* attr : member->attributes) {
- Mark(attr);
- if (auto* o = attr->As<ast::StructMemberOffsetAttribute>()) {
- // Offset attributes are not part of the WGSL spec, but are emitted
- // by the SPIR-V reader.
- if (o->offset < struct_size) {
- AddError("offsets must be in ascending order", o->source);
- return nullptr;
- }
- offset = o->offset;
- align = 1;
- has_offset_attr = true;
- } else if (auto* a = attr->As<ast::StructMemberAlignAttribute>()) {
- if (a->align <= 0 || !utils::IsPowerOfTwo(a->align)) {
- AddError("align value must be a positive, power-of-two integer",
- a->source);
- return nullptr;
- }
- align = a->align;
- has_align_attr = true;
- } else if (auto* s = attr->As<ast::StructMemberSizeAttribute>()) {
- if (s->size < size) {
- AddError("size must be at least as big as the type's size (" +
- std::to_string(size) + ")",
- s->source);
- return nullptr;
- }
- size = s->size;
- has_size_attr = true;
- }
- }
-
- if (has_offset_attr && (has_align_attr || has_size_attr)) {
- AddError("offset attributes cannot be used with align or size attributes",
- member->source);
- return nullptr;
- }
-
- offset = utils::RoundUp(align, offset);
- if (offset > std::numeric_limits<uint32_t>::max()) {
- std::stringstream msg;
- msg << "struct member has byte offset 0x" << std::hex << offset
- << ", but must not exceed 0x" << std::hex
- << std::numeric_limits<uint32_t>::max();
- AddError(msg.str(), member->source);
- return nullptr;
- }
-
- auto* sem_member = builder_->create<sem::StructMember>(
- member, member->symbol, type, static_cast<uint32_t>(sem_members.size()),
- static_cast<uint32_t>(offset), static_cast<uint32_t>(align),
- static_cast<uint32_t>(size));
- builder_->Sem().Add(member, sem_member);
- sem_members.emplace_back(sem_member);
-
- struct_size = offset + size;
- struct_align = std::max(struct_align, align);
- }
-
- uint64_t size_no_padding = struct_size;
- struct_size = utils::RoundUp(struct_align, struct_size);
-
- if (struct_size > std::numeric_limits<uint32_t>::max()) {
- std::stringstream msg;
- msg << "struct size in bytes must not exceed 0x" << std::hex
- << std::numeric_limits<uint32_t>::max() << ", but is 0x" << std::hex
- << struct_size;
- AddError(msg.str(), str->source);
- return nullptr;
- }
- if (struct_align > std::numeric_limits<uint32_t>::max()) {
- TINT_ICE(Resolver, diagnostics_)
- << "calculated struct stride exceeds uint32";
- return nullptr;
- }
-
- auto* out = builder_->create<sem::Struct>(
- str, str->name, sem_members, static_cast<uint32_t>(struct_align),
- static_cast<uint32_t>(struct_size),
- static_cast<uint32_t>(size_no_padding));
-
- for (size_t i = 0; i < sem_members.size(); i++) {
- auto* mem_type = sem_members[i]->Type();
- if (mem_type->Is<sem::Atomic>()) {
- atomic_composite_info_.emplace(out,
- sem_members[i]->Declaration()->source);
- break;
- } else {
- auto found = atomic_composite_info_.find(mem_type);
- if (found != atomic_composite_info_.end()) {
- atomic_composite_info_.emplace(out, found->second);
- break;
- }
+ auto* out = builder_->create<sem::Struct>(
+ str, str->name, sem_members, static_cast<uint32_t>(struct_align),
+ static_cast<uint32_t>(struct_size), static_cast<uint32_t>(size_no_padding));
+
+ for (size_t i = 0; i < sem_members.size(); i++) {
+ auto* mem_type = sem_members[i]->Type();
+ if (mem_type->Is<sem::Atomic>()) {
+ atomic_composite_info_.emplace(out, sem_members[i]->Declaration()->source);
+ break;
+ } else {
+ auto found = atomic_composite_info_.find(mem_type);
+ if (found != atomic_composite_info_.end()) {
+ atomic_composite_info_.emplace(out, found->second);
+ break;
+ }
+ }
}
- }
- if (!ValidateStructure(out)) {
- return nullptr;
- }
+ auto stage = current_function_ ? current_function_->Declaration()->PipelineStage()
+ : ast::PipelineStage::kNone;
+ if (!validator_.Structure(out, stage)) {
+ return nullptr;
+ }
- return out;
+ return out;
}
sem::Statement* Resolver::ReturnStatement(const ast::ReturnStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto& behaviors = current_statement_->Behaviors();
- behaviors = sem::Behavior::kReturn;
-
- if (auto* value = stmt->value) {
- auto* expr = Expression(value);
- if (!expr) {
- return false;
- }
- behaviors.Add(expr->Behaviors() - sem::Behavior::kNext);
- }
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto& behaviors = current_statement_->Behaviors();
+ behaviors = sem::Behavior::kReturn;
+
+ const sem::Type* value_ty = nullptr;
+ if (auto* value = stmt->value) {
+ const auto* expr = Expression(value);
+ if (!expr) {
+ return false;
+ }
+ if (auto* ret_ty = current_function_->ReturnType(); !ret_ty->Is<sem::Void>()) {
+ expr = Materialize(expr, ret_ty);
+ if (!expr) {
+ return false;
+ }
+ }
+ behaviors.Add(expr->Behaviors() - sem::Behavior::kNext);
+ value_ty = expr->Type()->UnwrapRef();
+ } else {
+ value_ty = builder_->create<sem::Void>();
+ }
- // Validate after processing the return value expression so that its type
- // is available for validation.
- return ValidateReturn(stmt);
- });
+ // Validate after processing the return value expression so that its type
+ // is available for validation.
+ return validator_.Return(stmt, current_function_->ReturnType(), value_ty,
+ current_statement_);
+ });
}
-sem::SwitchStatement* Resolver::SwitchStatement(
- const ast::SwitchStatement* stmt) {
- auto* sem = builder_->create<sem::SwitchStatement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto& behaviors = sem->Behaviors();
+sem::SwitchStatement* Resolver::SwitchStatement(const ast::SwitchStatement* stmt) {
+ auto* sem = builder_->create<sem::SwitchStatement>(stmt, current_compound_statement_,
+ current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto& behaviors = sem->Behaviors();
- auto* cond = Expression(stmt->condition);
- if (!cond) {
- return false;
- }
- behaviors = cond->Behaviors() - sem::Behavior::kNext;
+ const auto* cond = Expression(stmt->condition);
+ if (!cond) {
+ return false;
+ }
+ behaviors = cond->Behaviors() - sem::Behavior::kNext;
- for (auto* case_stmt : stmt->body) {
- Mark(case_stmt);
- auto* c = CaseStatement(case_stmt);
- if (!c) {
- return false;
- }
- behaviors.Add(c->Behaviors());
- }
+ auto* cond_ty = cond->Type()->UnwrapRef();
- if (behaviors.Contains(sem::Behavior::kBreak)) {
- behaviors.Add(sem::Behavior::kNext);
- }
- behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kFallthrough);
+ utils::UniqueVector<const sem::Type*> types;
+ types.add(cond_ty);
- return ValidateSwitch(stmt);
- });
+ std::vector<sem::CaseStatement*> cases;
+ cases.reserve(stmt->body.size());
+ for (auto* case_stmt : stmt->body) {
+ Mark(case_stmt);
+ auto* c = CaseStatement(case_stmt);
+ if (!c) {
+ return false;
+ }
+ for (auto* expr : c->Selectors()) {
+ types.add(expr->Type()->UnwrapRef());
+ }
+ cases.emplace_back(c);
+ behaviors.Add(c->Behaviors());
+ sem->Cases().emplace_back(c);
+ }
+
+ // Determine the common type across all selectors and the switch expression
+ // This must materialize to an integer scalar (non-abstract).
+ auto* common_ty = sem::Type::Common(types.data(), types.size());
+ if (!common_ty || !common_ty->is_integer_scalar()) {
+ // No common type found or the common type was abstract.
+ // Pick i32 and let validation deal with any mismatches.
+ common_ty = builder_->create<sem::I32>();
+ }
+ cond = Materialize(cond, common_ty);
+ if (!cond) {
+ return false;
+ }
+ for (auto* c : cases) {
+ for (auto*& sel : c->Selectors()) { // Note: pointer reference
+ sel = Materialize(sel, common_ty);
+ if (!sel) {
+ return false;
+ }
+ }
+ }
+
+ if (behaviors.Contains(sem::Behavior::kBreak)) {
+ behaviors.Add(sem::Behavior::kNext);
+ }
+ behaviors.Remove(sem::Behavior::kBreak, sem::Behavior::kFallthrough);
+
+ return validator_.SwitchStatement(stmt);
+ });
}
-sem::Statement* Resolver::VariableDeclStatement(
- const ast::VariableDeclStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- Mark(stmt->variable);
+sem::Statement* Resolver::VariableDeclStatement(const ast::VariableDeclStatement* stmt) {
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ Mark(stmt->variable);
- auto* var = Variable(stmt->variable, VariableKind::kLocal);
- if (!var) {
- return false;
- }
+ auto* var = Variable(stmt->variable, VariableKind::kLocal);
+ if (!var) {
+ return false;
+ }
- for (auto* attr : stmt->variable->attributes) {
- Mark(attr);
- if (!attr->Is<ast::InternalAttribute>()) {
- AddError("attributes are not valid on local variables", attr->source);
- return false;
- }
- }
+ for (auto* attr : stmt->variable->attributes) {
+ Mark(attr);
+ if (!attr->Is<ast::InternalAttribute>()) {
+ AddError("attributes are not valid on local variables", attr->source);
+ return false;
+ }
+ }
- if (current_block_) { // Not all statements are inside a block
- current_block_->AddDecl(stmt->variable);
- }
+ if (current_block_) { // Not all statements are inside a block
+ current_block_->AddDecl(stmt->variable);
+ }
- if (auto* ctor = var->Constructor()) {
- sem->Behaviors() = ctor->Behaviors();
- }
+ if (auto* ctor = var->Constructor()) {
+ sem->Behaviors() = ctor->Behaviors();
+ }
- return ValidateVariable(var);
- });
+ return validator_.Variable(var);
+ });
}
-sem::Statement* Resolver::AssignmentStatement(
- const ast::AssignmentStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto* lhs = Expression(stmt->lhs);
- if (!lhs) {
- return false;
- }
+sem::Statement* Resolver::AssignmentStatement(const ast::AssignmentStatement* stmt) {
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto* lhs = Expression(stmt->lhs);
+ if (!lhs) {
+ return false;
+ }
- auto* rhs = Expression(stmt->rhs);
- if (!rhs) {
- return false;
- }
+ const bool is_phony_assignment = stmt->lhs->Is<ast::PhonyExpression>();
- auto& behaviors = sem->Behaviors();
- behaviors = rhs->Behaviors();
- if (!stmt->lhs->Is<ast::PhonyExpression>()) {
- behaviors.Add(lhs->Behaviors());
- }
+ const auto* rhs = Expression(stmt->rhs);
+ if (!rhs) {
+ return false;
+ }
- return ValidateAssignment(stmt, TypeOf(stmt->rhs));
- });
+ if (!is_phony_assignment) {
+ rhs = Materialize(rhs, lhs->Type()->UnwrapRef());
+ if (!rhs) {
+ return false;
+ }
+ }
+
+ auto& behaviors = sem->Behaviors();
+ behaviors = rhs->Behaviors();
+ if (!is_phony_assignment) {
+ behaviors.Add(lhs->Behaviors());
+ }
+
+ return validator_.Assignment(stmt, sem_.TypeOf(stmt->rhs));
+ });
}
sem::Statement* Resolver::BreakStatement(const ast::BreakStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- sem->Behaviors() = sem::Behavior::kBreak;
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ sem->Behaviors() = sem::Behavior::kBreak;
- return ValidateBreakStatement(sem);
- });
+ return validator_.BreakStatement(sem, current_statement_);
+ });
}
sem::Statement* Resolver::CallStatement(const ast::CallStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- if (auto* expr = Expression(stmt->expr)) {
- sem->Behaviors() = expr->Behaviors();
- return true;
- }
- return false;
- });
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ if (auto* expr = Expression(stmt->expr)) {
+ sem->Behaviors() = expr->Behaviors();
+ return true;
+ }
+ return false;
+ });
}
sem::Statement* Resolver::CompoundAssignmentStatement(
const ast::CompoundAssignmentStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto* lhs = Expression(stmt->lhs);
- if (!lhs) {
- return false;
- }
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto* lhs = Expression(stmt->lhs);
+ if (!lhs) {
+ return false;
+ }
- auto* rhs = Expression(stmt->rhs);
- if (!rhs) {
- return false;
- }
+ auto* rhs = Expression(stmt->rhs);
+ if (!rhs) {
+ return false;
+ }
- sem->Behaviors() = rhs->Behaviors() + lhs->Behaviors();
+ sem->Behaviors() = rhs->Behaviors() + lhs->Behaviors();
- auto* lhs_ty = lhs->Type()->UnwrapRef();
- auto* rhs_ty = rhs->Type()->UnwrapRef();
- auto* ty = BinaryOpType(lhs_ty, rhs_ty, stmt->op);
- if (!ty) {
- AddError("compound assignment operand types are invalid: " +
- TypeNameOf(lhs_ty) + " " + FriendlyName(stmt->op) + " " +
- TypeNameOf(rhs_ty),
- stmt->source);
- return false;
- }
- return ValidateAssignment(stmt, ty);
- });
+ auto* lhs_ty = lhs->Type()->UnwrapRef();
+ auto* rhs_ty = rhs->Type()->UnwrapRef();
+ auto* ty = intrinsic_table_->Lookup(stmt->op, lhs_ty, rhs_ty, stmt->source, true).result;
+ if (!ty) {
+ return false;
+ }
+ return validator_.Assignment(stmt, ty);
+ });
}
-sem::Statement* Resolver::ContinueStatement(
- const ast::ContinueStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- sem->Behaviors() = sem::Behavior::kContinue;
+sem::Statement* Resolver::ContinueStatement(const ast::ContinueStatement* stmt) {
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ sem->Behaviors() = sem::Behavior::kContinue;
- // Set if we've hit the first continue statement in our parent loop
- if (auto* block = sem->FindFirstParent<sem::LoopBlockStatement>()) {
- if (!block->FirstContinue()) {
- const_cast<sem::LoopBlockStatement*>(block)->SetFirstContinue(
- stmt, block->Decls().size());
- }
- }
+ // Set if we've hit the first continue statement in our parent loop
+ if (auto* block = sem->FindFirstParent<sem::LoopBlockStatement>()) {
+ if (!block->FirstContinue()) {
+ const_cast<sem::LoopBlockStatement*>(block)->SetFirstContinue(
+ stmt, block->Decls().size());
+ }
+ }
- return ValidateContinueStatement(sem);
- });
+ return validator_.ContinueStatement(sem, current_statement_);
+ });
}
sem::Statement* Resolver::DiscardStatement(const ast::DiscardStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- sem->Behaviors() = sem::Behavior::kDiscard;
- current_function_->SetHasDiscard();
-
- return ValidateDiscardStatement(sem);
- });
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ sem->Behaviors() = sem::Behavior::kDiscard;
+ current_function_->SetHasDiscard();
+
+ return validator_.DiscardStatement(sem, current_statement_);
+ });
}
-sem::Statement* Resolver::FallthroughStatement(
- const ast::FallthroughStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- sem->Behaviors() = sem::Behavior::kFallthrough;
+sem::Statement* Resolver::FallthroughStatement(const ast::FallthroughStatement* stmt) {
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ sem->Behaviors() = sem::Behavior::kFallthrough;
- return ValidateFallthroughStatement(sem);
- });
+ return validator_.FallthroughStatement(sem);
+ });
}
sem::Statement* Resolver::IncrementDecrementStatement(
const ast::IncrementDecrementStatement* stmt) {
- auto* sem = builder_->create<sem::Statement>(
- stmt, current_compound_statement_, current_function_);
- return StatementScope(stmt, sem, [&] {
- auto* lhs = Expression(stmt->lhs);
- if (!lhs) {
- return false;
- }
- sem->Behaviors() = lhs->Behaviors();
+ auto* sem =
+ builder_->create<sem::Statement>(stmt, current_compound_statement_, current_function_);
+ return StatementScope(stmt, sem, [&] {
+ auto* lhs = Expression(stmt->lhs);
+ if (!lhs) {
+ return false;
+ }
+ sem->Behaviors() = lhs->Behaviors();
- return ValidateIncrementDecrementStatement(stmt);
- });
+ return validator_.IncrementDecrementStatement(stmt);
+ });
}
bool Resolver::ApplyStorageClassUsageToType(ast::StorageClass sc,
sem::Type* ty,
const Source& usage) {
- ty = const_cast<sem::Type*>(ty->UnwrapRef());
+ ty = const_cast<sem::Type*>(ty->UnwrapRef());
+
+ if (auto* str = ty->As<sem::Struct>()) {
+ if (str->StorageClassUsage().count(sc)) {
+ return true; // Already applied
+ }
+
+ str->AddUsage(sc);
- if (auto* str = ty->As<sem::Struct>()) {
- if (str->StorageClassUsage().count(sc)) {
- return true; // Already applied
+ for (auto* member : str->Members()) {
+ if (!ApplyStorageClassUsageToType(sc, member->Type(), usage)) {
+ std::stringstream err;
+ err << "while analysing structure member " << sem_.TypeNameOf(str) << "."
+ << builder_->Symbols().NameFor(member->Declaration()->symbol);
+ AddNote(err.str(), member->Declaration()->source);
+ return false;
+ }
+ }
+ return true;
}
- str->AddUsage(sc);
+ if (auto* arr = ty->As<sem::Array>()) {
+ if (arr->IsRuntimeSized() && sc != ast::StorageClass::kStorage) {
+ AddError(
+ "runtime-sized arrays can only be used in the <storage> storage "
+ "class",
+ usage);
+ return false;
+ }
+
+ return ApplyStorageClassUsageToType(sc, const_cast<sem::Type*>(arr->ElemType()), usage);
+ }
- for (auto* member : str->Members()) {
- if (!ApplyStorageClassUsageToType(sc, member->Type(), usage)) {
+ if (ast::IsHostShareable(sc) && !validator_.IsHostShareable(ty)) {
std::stringstream err;
- err << "while analysing structure member " << TypeNameOf(str) << "."
- << builder_->Symbols().NameFor(member->Declaration()->symbol);
- AddNote(err.str(), member->Declaration()->source);
+ err << "Type '" << sem_.TypeNameOf(ty) << "' cannot be used in storage class '" << sc
+ << "' as it is non-host-shareable";
+ AddError(err.str(), usage);
return false;
- }
}
- return true;
- }
-
- if (auto* arr = ty->As<sem::Array>()) {
- if (arr->IsRuntimeSized() && sc != ast::StorageClass::kStorage) {
- AddError(
- "runtime-sized arrays can only be used in the <storage> storage "
- "class",
- usage);
- return false;
- }
-
- return ApplyStorageClassUsageToType(
- sc, const_cast<sem::Type*>(arr->ElemType()), usage);
- }
-
- if (ast::IsHostShareable(sc) && !IsHostShareable(ty)) {
- std::stringstream err;
- err << "Type '" << TypeNameOf(ty) << "' cannot be used in storage class '"
- << sc << "' as it is non-host-shareable";
- AddError(err.str(), usage);
- return false;
- }
- return true;
+ return true;
}
template <typename SEM, typename F>
-SEM* Resolver::StatementScope(const ast::Statement* ast,
- SEM* sem,
- F&& callback) {
- builder_->Sem().Add(ast, sem);
-
- auto* as_compound =
- As<sem::CompoundStatement, CastFlags::kDontErrorOnImpossibleCast>(sem);
- auto* as_block =
- As<sem::BlockStatement, CastFlags::kDontErrorOnImpossibleCast>(sem);
-
- TINT_SCOPED_ASSIGNMENT(current_statement_, sem);
- TINT_SCOPED_ASSIGNMENT(
- current_compound_statement_,
- as_compound ? as_compound : current_compound_statement_);
- TINT_SCOPED_ASSIGNMENT(current_block_, as_block ? as_block : current_block_);
-
- if (!callback()) {
- return nullptr;
- }
+SEM* Resolver::StatementScope(const ast::Statement* ast, SEM* sem, F&& callback) {
+ builder_->Sem().Add(ast, sem);
- return sem;
-}
+ auto* as_compound = As<sem::CompoundStatement, CastFlags::kDontErrorOnImpossibleCast>(sem);
+ auto* as_block = As<sem::BlockStatement, CastFlags::kDontErrorOnImpossibleCast>(sem);
+
+ TINT_SCOPED_ASSIGNMENT(current_statement_, sem);
+ TINT_SCOPED_ASSIGNMENT(current_compound_statement_,
+ as_compound ? as_compound : current_compound_statement_);
+ TINT_SCOPED_ASSIGNMENT(current_block_, as_block ? as_block : current_block_);
-std::string Resolver::VectorPretty(uint32_t size,
- const sem::Type* element_type) {
- sem::Vector vec_type(element_type, size);
- return vec_type.FriendlyName(builder_->Symbols());
+ if (!callback()) {
+ return nullptr;
+ }
+
+ return sem;
}
bool Resolver::Mark(const ast::Node* node) {
- if (node == nullptr) {
- TINT_ICE(Resolver, diagnostics_) << "Resolver::Mark() called with nullptr";
+ if (node == nullptr) {
+ TINT_ICE(Resolver, diagnostics_) << "Resolver::Mark() called with nullptr";
+ return false;
+ }
+ if (marked_.emplace(node).second) {
+ return true;
+ }
+ TINT_ICE(Resolver, diagnostics_) << "AST node '" << node->TypeInfo().name
+ << "' was encountered twice in the same AST of a Program\n"
+ << "At: " << node->source << "\n"
+ << "Pointer: " << node;
return false;
- }
- if (marked_.emplace(node).second) {
- return true;
- }
- TINT_ICE(Resolver, diagnostics_)
- << "AST node '" << node->TypeInfo().name
- << "' was encountered twice in the same AST of a Program\n"
- << "At: " << node->source << "\n"
- << "Pointer: " << node;
- return false;
}
void Resolver::AddError(const std::string& msg, const Source& source) const {
- diagnostics_.add_error(diag::System::Resolver, msg, source);
+ diagnostics_.add_error(diag::System::Resolver, msg, source);
}
void Resolver::AddWarning(const std::string& msg, const Source& source) const {
- diagnostics_.add_warning(diag::System::Resolver, msg, source);
+ diagnostics_.add_warning(diag::System::Resolver, msg, source);
}
void Resolver::AddNote(const std::string& msg, const Source& source) const {
- diagnostics_.add_note(diag::System::Resolver, msg, source);
-}
-
-// https://gpuweb.github.io/gpuweb/wgsl/#plain-types-section
-bool Resolver::IsPlain(const sem::Type* type) const {
- return type->is_scalar() ||
- type->IsAnyOf<sem::Atomic, sem::Vector, sem::Matrix, sem::Array,
- sem::Struct>();
-}
-
-// https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types
-bool Resolver::IsFixedFootprint(const sem::Type* type) const {
- return Switch(
- type, //
- [&](const sem::Vector*) { return true; }, //
- [&](const sem::Matrix*) { return true; }, //
- [&](const sem::Atomic*) { return true; },
- [&](const sem::Array* arr) {
- return !arr->IsRuntimeSized() && IsFixedFootprint(arr->ElemType());
- },
- [&](const sem::Struct* str) {
- for (auto* member : str->Members()) {
- if (!IsFixedFootprint(member->Type())) {
- return false;
- }
- }
- return true;
- },
- [&](Default) { return type->is_scalar(); });
-}
-
-// https://gpuweb.github.io/gpuweb/wgsl.html#storable-types
-bool Resolver::IsStorable(const sem::Type* type) const {
- return IsPlain(type) || type->IsAnyOf<sem::Texture, sem::Sampler>();
-}
-
-// https://gpuweb.github.io/gpuweb/wgsl.html#host-shareable-types
-bool Resolver::IsHostShareable(const sem::Type* type) const {
- if (type->IsAnyOf<sem::I32, sem::U32, sem::F32>()) {
- return true;
- }
- return Switch(
- type, //
- [&](const sem::Vector* vec) { return IsHostShareable(vec->type()); },
- [&](const sem::Matrix* mat) { return IsHostShareable(mat->type()); },
- [&](const sem::Array* arr) { return IsHostShareable(arr->ElemType()); },
- [&](const sem::Struct* str) {
- for (auto* member : str->Members()) {
- if (!IsHostShareable(member->Type())) {
- return false;
- }
- }
- return true;
- },
- [&](const sem::Atomic* atomic) {
- return IsHostShareable(atomic->Type());
- });
+ diagnostics_.add_note(diag::System::Resolver, msg, source);
}
bool Resolver::IsBuiltin(Symbol symbol) const {
- std::string name = builder_->Symbols().NameFor(symbol);
- return sem::ParseBuiltinType(name) != sem::BuiltinType::kNone;
-}
-
-bool Resolver::IsCallStatement(const ast::Expression* expr) const {
- return current_statement_ &&
- Is<ast::CallStatement>(current_statement_->Declaration(),
- [&](auto* stmt) { return stmt->expr == expr; });
-}
-
-const ast::Statement* Resolver::ClosestContinuing(bool stop_at_loop) const {
- for (const auto* s = current_statement_; s != nullptr; s = s->Parent()) {
- if (stop_at_loop && s->Is<sem::LoopStatement>()) {
- break;
- }
- if (s->Is<sem::LoopContinuingBlockStatement>()) {
- return s->Declaration();
- }
- if (auto* f = As<sem::ForLoopStatement>(s->Parent())) {
- if (f->Declaration()->continuing == s->Declaration()) {
- return s->Declaration();
- }
- if (stop_at_loop) {
- break;
- }
- }
- }
- return nullptr;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Resolver::TypeConversionSig
-////////////////////////////////////////////////////////////////////////////////
-bool Resolver::TypeConversionSig::operator==(
- const TypeConversionSig& rhs) const {
- return target == rhs.target && source == rhs.source;
-}
-std::size_t Resolver::TypeConversionSig::Hasher::operator()(
- const TypeConversionSig& sig) const {
- return utils::Hash(sig.target, sig.source);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Resolver::TypeConstructorSig
-////////////////////////////////////////////////////////////////////////////////
-Resolver::TypeConstructorSig::TypeConstructorSig(
- const sem::Type* ty,
- const std::vector<const sem::Type*> params)
- : type(ty), parameters(params) {}
-Resolver::TypeConstructorSig::TypeConstructorSig(const TypeConstructorSig&) =
- default;
-Resolver::TypeConstructorSig::~TypeConstructorSig() = default;
-
-bool Resolver::TypeConstructorSig::operator==(
- const TypeConstructorSig& rhs) const {
- return type == rhs.type && parameters == rhs.parameters;
-}
-std::size_t Resolver::TypeConstructorSig::Hasher::operator()(
- const TypeConstructorSig& sig) const {
- return utils::Hash(sig.type, sig.parameters);
+ std::string name = builder_->Symbols().NameFor(symbol);
+ return sem::ParseBuiltinType(name) != sem::BuiltinType::kNone;
}
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver.h b/chromium/third_party/dawn/src/tint/resolver/resolver.h
index ff879b08ce8..99996517d74 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver.h
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver.h
@@ -16,23 +16,25 @@
#define SRC_TINT_RESOLVER_RESOLVER_H_
#include <memory>
-#include <set>
#include <string>
+#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
-#include "src/tint/builtin_table.h"
#include "src/tint/program_builder.h"
#include "src/tint/resolver/dependency_graph.h"
+#include "src/tint/resolver/intrinsic_table.h"
+#include "src/tint/resolver/sem_helper.h"
+#include "src/tint/resolver/validator.h"
#include "src/tint/scope_stack.h"
#include "src/tint/sem/binding_point.h"
#include "src/tint/sem/block_statement.h"
#include "src/tint/sem/constant.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/struct.h"
-#include "src/tint/utils/map.h"
+#include "src/tint/utils/result.h"
#include "src/tint/utils/unique_vector.h"
// Forward declarations
@@ -59,7 +61,6 @@ class Atomic;
class BlockStatement;
class Builtin;
class CaseStatement;
-class ElseStatement;
class ForLoopStatement;
class IfStatement;
class LoopStatement;
@@ -72,481 +73,329 @@ namespace tint::resolver {
/// Resolves types for all items in the given tint program
class Resolver {
- public:
- /// Constructor
- /// @param builder the program builder
- explicit Resolver(ProgramBuilder* builder);
-
- /// Destructor
- ~Resolver();
-
- /// @returns error messages from the resolver
- std::string error() const { return diagnostics_.str(); }
-
- /// @returns true if the resolver was successful
- bool Resolve();
-
- /// @param type the given type
- /// @returns true if the given type is a plain type
- bool IsPlain(const sem::Type* type) const;
-
- /// @param type the given type
- /// @returns true if the given type is a fixed-footprint type
- bool IsFixedFootprint(const sem::Type* type) const;
-
- /// @param type the given type
- /// @returns true if the given type is storable
- bool IsStorable(const sem::Type* type) const;
-
- /// @param type the given type
- /// @returns true if the given type is host-shareable
- bool IsHostShareable(const sem::Type* type) const;
-
- private:
- /// Describes the context in which a variable is declared
- enum class VariableKind { kParameter, kLocal, kGlobal };
-
- std::set<std::pair<const sem::Type*, ast::StorageClass>>
- valid_type_storage_layouts_;
-
- /// Structure holding semantic information about a block (i.e. scope), such as
- /// parent block and variables declared in the block.
- /// Used to validate variable scoping rules.
- struct BlockInfo {
- enum class Type { kGeneric, kLoop, kLoopContinuing, kSwitchCase };
-
- BlockInfo(const ast::BlockStatement* block, Type type, BlockInfo* parent);
- ~BlockInfo();
-
- template <typename Pred>
- BlockInfo* FindFirstParent(Pred&& pred) {
- BlockInfo* curr = this;
- while (curr && !pred(curr)) {
- curr = curr->parent;
- }
- return curr;
- }
-
- BlockInfo* FindFirstParent(BlockInfo::Type ty) {
- return FindFirstParent(
- [ty](auto* block_info) { return block_info->type == ty; });
- }
-
- ast::BlockStatement const* const block;
- const Type type;
- BlockInfo* const parent;
- std::vector<const ast::Variable*> decls;
-
- // first_continue is set to the index of the first variable in decls
- // declared after the first continue statement in a loop block, if any.
- constexpr static size_t kNoContinue = size_t(~0);
- size_t first_continue = kNoContinue;
- };
-
- // Structure holding information for a TypeDecl
- struct TypeDeclInfo {
- ast::TypeDecl const* const ast;
- sem::Type* const sem;
- };
-
- /// Resolves the program, without creating final the semantic nodes.
- /// @returns true on success, false on error
- bool ResolveInternal();
-
- bool ValidatePipelineStages();
-
- /// Creates the nodes and adds them to the sem::Info mappings of the
- /// ProgramBuilder.
- void CreateSemanticNodes() const;
-
- /// Retrieves information for the requested import.
- /// @param src the source of the import
- /// @param path the import path
- /// @param name the method name to get information on
- /// @param params the parameters to the method call
- /// @param id out parameter for the external call ID. Must not be a nullptr.
- /// @returns the return type of `name` in `path` or nullptr on error.
- sem::Type* GetImportData(const Source& src,
- const std::string& path,
- const std::string& name,
- const ast::ExpressionList& params,
- uint32_t* id);
-
- //////////////////////////////////////////////////////////////////////////////
- // AST and Type traversal methods
- //////////////////////////////////////////////////////////////////////////////
-
- // Expression resolving methods
- // Returns the semantic node pointer on success, nullptr on failure.
- sem::Expression* IndexAccessor(const ast::IndexAccessorExpression*);
- sem::Expression* Binary(const ast::BinaryExpression*);
- sem::Expression* Bitcast(const ast::BitcastExpression*);
- sem::Call* Call(const ast::CallExpression*);
- sem::Expression* Expression(const ast::Expression*);
- sem::Function* Function(const ast::Function*);
- sem::Call* FunctionCall(const ast::CallExpression*,
- sem::Function* target,
- const std::vector<const sem::Expression*> args,
- sem::Behaviors arg_behaviors);
- sem::Expression* Identifier(const ast::IdentifierExpression*);
- sem::Call* BuiltinCall(const ast::CallExpression*,
- sem::BuiltinType,
- const std::vector<const sem::Expression*> args,
- const std::vector<const sem::Type*> arg_tys);
- sem::Expression* Literal(const ast::LiteralExpression*);
- sem::Expression* MemberAccessor(const ast::MemberAccessorExpression*);
- sem::Call* TypeConversion(const ast::CallExpression* expr,
- const sem::Type* ty,
- const sem::Expression* arg,
- const sem::Type* arg_ty);
- sem::Call* TypeConstructor(const ast::CallExpression* expr,
- const sem::Type* ty,
- const std::vector<const sem::Expression*> args,
- const std::vector<const sem::Type*> arg_tys);
- sem::Expression* UnaryOp(const ast::UnaryOpExpression*);
-
- // Statement resolving methods
- // Each return true on success, false on failure.
- sem::Statement* AssignmentStatement(const ast::AssignmentStatement*);
- sem::BlockStatement* BlockStatement(const ast::BlockStatement*);
- sem::Statement* BreakStatement(const ast::BreakStatement*);
- sem::Statement* CallStatement(const ast::CallStatement*);
- sem::CaseStatement* CaseStatement(const ast::CaseStatement*);
- sem::Statement* CompoundAssignmentStatement(
- const ast::CompoundAssignmentStatement*);
- sem::Statement* ContinueStatement(const ast::ContinueStatement*);
- sem::Statement* DiscardStatement(const ast::DiscardStatement*);
- sem::ElseStatement* ElseStatement(const ast::ElseStatement*);
- sem::Statement* FallthroughStatement(const ast::FallthroughStatement*);
- sem::ForLoopStatement* ForLoopStatement(const ast::ForLoopStatement*);
- sem::GlobalVariable* GlobalVariable(const ast::Variable*);
- sem::Statement* Parameter(const ast::Variable*);
- sem::IfStatement* IfStatement(const ast::IfStatement*);
- sem::Statement* IncrementDecrementStatement(
- const ast::IncrementDecrementStatement*);
- sem::LoopStatement* LoopStatement(const ast::LoopStatement*);
- sem::Statement* ReturnStatement(const ast::ReturnStatement*);
- sem::Statement* Statement(const ast::Statement*);
- sem::SwitchStatement* SwitchStatement(const ast::SwitchStatement* s);
- sem::Statement* VariableDeclStatement(const ast::VariableDeclStatement*);
- bool Statements(const ast::StatementList&);
-
- // Resolve the result type of a binary operator.
- // Returns nullptr if the types are not valid for this operator.
- const sem::Type* BinaryOpType(const sem::Type* lhs_ty,
- const sem::Type* rhs_ty,
- ast::BinaryOp op);
-
- // AST and Type validation methods
- // Each return true on success, false on failure.
- bool ValidateAlias(const ast::Alias*);
- bool ValidateArray(const sem::Array* arr, const Source& source);
- bool ValidateArrayStrideAttribute(const ast::StrideAttribute* attr,
- uint32_t el_size,
- uint32_t el_align,
- const Source& source);
- bool ValidateAtomic(const ast::Atomic* a, const sem::Atomic* s);
- bool ValidateAtomicVariable(const sem::Variable* var);
- bool ValidateAssignment(const ast::Statement* a, const sem::Type* rhs_ty);
- bool ValidateBitcast(const ast::BitcastExpression* cast, const sem::Type* to);
- bool ValidateBreakStatement(const sem::Statement* stmt);
- bool ValidateBuiltinAttribute(const ast::BuiltinAttribute* attr,
- const sem::Type* storage_type,
- const bool is_input);
- bool ValidateContinueStatement(const sem::Statement* stmt);
- bool ValidateDiscardStatement(const sem::Statement* stmt);
- bool ValidateElseStatement(const sem::ElseStatement* stmt);
- bool ValidateEntryPoint(const sem::Function* func);
- bool ValidateForLoopStatement(const sem::ForLoopStatement* stmt);
- bool ValidateFallthroughStatement(const sem::Statement* stmt);
- bool ValidateFunction(const sem::Function* func);
- bool ValidateFunctionCall(const sem::Call* call);
- bool ValidateGlobalVariable(const sem::Variable* var);
- bool ValidateIfStatement(const sem::IfStatement* stmt);
- bool ValidateIncrementDecrementStatement(
- const ast::IncrementDecrementStatement* stmt);
- bool ValidateInterpolateAttribute(const ast::InterpolateAttribute* attr,
- const sem::Type* storage_type);
- bool ValidateBuiltinCall(const sem::Call* call);
- bool ValidateLocationAttribute(const ast::LocationAttribute* location,
- const sem::Type* type,
- std::unordered_set<uint32_t>& locations,
- const Source& source,
- const bool is_input = false);
- bool ValidateLoopStatement(const sem::LoopStatement* stmt);
- bool ValidateMatrix(const sem::Matrix* ty, const Source& source);
- bool ValidateFunctionParameter(const ast::Function* func,
- const sem::Variable* var);
- bool ValidateParameter(const ast::Function* func, const sem::Variable* var);
- bool ValidateReturn(const ast::ReturnStatement* ret);
- bool ValidateStatements(const ast::StatementList& stmts);
- bool ValidateStorageTexture(const ast::StorageTexture* t);
- bool ValidateStructure(const sem::Struct* str);
- bool ValidateStructureConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Struct* struct_type);
- bool ValidateSwitch(const ast::SwitchStatement* s);
- bool ValidateVariable(const sem::Variable* var);
- bool ValidateVariableConstructorOrCast(const ast::Variable* var,
- ast::StorageClass storage_class,
- const sem::Type* storage_type,
- const sem::Type* rhs_type);
- bool ValidateVector(const sem::Vector* ty, const Source& source);
- bool ValidateVectorConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Vector* vec_type);
- bool ValidateMatrixConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Matrix* matrix_type);
- bool ValidateScalarConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Type* type);
- bool ValidateArrayConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Array* arr_type);
- bool ValidateTextureBuiltinFunction(const sem::Call* call);
- bool ValidateNoDuplicateAttributes(const ast::AttributeList& attributes);
- bool ValidateStorageClassLayout(const sem::Type* type,
- ast::StorageClass sc,
- Source source);
- bool ValidateStorageClassLayout(const sem::Variable* var);
-
- /// @returns true if the attribute list contains a
- /// ast::DisableValidationAttribute with the validation mode equal to
- /// `validation`
- bool IsValidationDisabled(const ast::AttributeList& attributes,
- ast::DisabledValidation validation) const;
-
- /// @returns true if the attribute list does not contains a
- /// ast::DisableValidationAttribute with the validation mode equal to
- /// `validation`
- bool IsValidationEnabled(const ast::AttributeList& attributes,
- ast::DisabledValidation validation) const;
-
- /// Resolves the WorkgroupSize for the given function, assigning it to
- /// current_function_
- bool WorkgroupSize(const ast::Function*);
-
- /// @returns the sem::Type for the ast::Type `ty`, building it if it
- /// hasn't been constructed already. If an error is raised, nullptr is
- /// returned.
- /// @param ty the ast::Type
- sem::Type* Type(const ast::Type* ty);
-
- /// @param named_type the named type to resolve
- /// @returns the resolved semantic type
- sem::Type* TypeDecl(const ast::TypeDecl* named_type);
-
- /// Builds and returns the semantic information for the array `arr`.
- /// This method does not mark the ast::Array node, nor attach the generated
- /// semantic information to the AST node.
- /// @returns the semantic Array information, or nullptr if an error is
- /// raised.
- /// @param arr the Array to get semantic information for
- sem::Array* Array(const ast::Array* arr);
-
- /// Builds and returns the semantic information for the alias `alias`.
- /// This method does not mark the ast::Alias node, nor attach the generated
- /// semantic information to the AST node.
- /// @returns the aliased type, or nullptr if an error is raised.
- sem::Type* Alias(const ast::Alias* alias);
-
- /// Builds and returns the semantic information for the structure `str`.
- /// This method does not mark the ast::Struct node, nor attach the generated
- /// semantic information to the AST node.
- /// @returns the semantic Struct information, or nullptr if an error is
- /// raised.
- sem::Struct* Structure(const ast::Struct* str);
-
- /// @returns the semantic info for the variable `var`. If an error is
- /// raised, nullptr is returned.
- /// @note this method does not resolve the attributes as these are
- /// context-dependent (global, local, parameter)
- /// @param var the variable to create or return the `VariableInfo` for
- /// @param kind what kind of variable we are declaring
- /// @param index the index of the parameter, if this variable is a parameter
- sem::Variable* Variable(const ast::Variable* var,
- VariableKind kind,
- uint32_t index = 0);
-
- /// Records the storage class usage for the given type, and any transient
- /// dependencies of the type. Validates that the type can be used for the
- /// given storage class, erroring if it cannot.
- /// @param sc the storage class to apply to the type and transitent types
- /// @param ty the type to apply the storage class on
- /// @param usage the Source of the root variable declaration that uses the
- /// given type and storage class. Used for generating sensible error
- /// messages.
- /// @returns true on success, false on error
- bool ApplyStorageClassUsageToType(ast::StorageClass sc,
- sem::Type* ty,
- const Source& usage);
-
- /// @param storage_class the storage class
- /// @returns the default access control for the given storage class
- ast::Access DefaultAccessForStorageClass(ast::StorageClass storage_class);
-
- /// Allocate constant IDs for pipeline-overridable constants.
- void AllocateOverridableConstantIds();
-
- /// Set the shadowing information on variable declarations.
- /// @note this method must only be called after all semantic nodes are built.
- void SetShadows();
-
- /// @returns the resolved type of the ast::Expression `expr`
- /// @param expr the expression
- sem::Type* TypeOf(const ast::Expression* expr);
-
- /// @returns the type name of the given semantic type, unwrapping
- /// references.
- std::string TypeNameOf(const sem::Type* ty);
-
- /// @returns the type name of the given semantic type, without unwrapping
- /// references.
- std::string RawTypeNameOf(const sem::Type* ty);
-
- /// @returns the semantic type of the AST literal `lit`
- /// @param lit the literal
- sem::Type* TypeOf(const ast::LiteralExpression* lit);
-
- /// StatementScope() does the following:
- /// * Creates the AST -> SEM mapping.
- /// * Assigns `sem` to #current_statement_
- /// * Assigns `sem` to #current_compound_statement_ if `sem` derives from
- /// sem::CompoundStatement.
- /// * Assigns `sem` to #current_block_ if `sem` derives from
- /// sem::BlockStatement.
- /// * Then calls `callback`.
- /// * Before returning #current_statement_, #current_compound_statement_, and
- /// #current_block_ are restored to their original values.
- /// @returns `sem` if `callback` returns true, otherwise `nullptr`.
- template <typename SEM, typename F>
- SEM* StatementScope(const ast::Statement* ast, SEM* sem, F&& callback);
-
- /// Returns a human-readable string representation of the vector type name
- /// with the given parameters.
- /// @param size the vector dimension
- /// @param element_type scalar vector sub-element type
- /// @return pretty string representation
- std::string VectorPretty(uint32_t size, const sem::Type* element_type);
-
- /// Mark records that the given AST node has been visited, and asserts that
- /// the given node has not already been seen. Diamonds in the AST are
- /// illegal.
- /// @param node the AST node.
- /// @returns true on success, false on error
- bool Mark(const ast::Node* node);
-
- /// Adds the given error message to the diagnostics
- void AddError(const std::string& msg, const Source& source) const;
-
- /// Adds the given warning message to the diagnostics
- void AddWarning(const std::string& msg, const Source& source) const;
-
- /// Adds the given note message to the diagnostics
- void AddNote(const std::string& msg, const Source& source) const;
-
- //////////////////////////////////////////////////////////////////////////////
- /// Constant value evaluation methods
- //////////////////////////////////////////////////////////////////////////////
- /// Cast `Value` to `target_type`
- /// @return the casted value
- sem::Constant ConstantCast(const sem::Constant& value,
- const sem::Type* target_elem_type);
-
- sem::Constant EvaluateConstantValue(const ast::Expression* expr,
- const sem::Type* type);
- sem::Constant EvaluateConstantValue(const ast::LiteralExpression* literal,
- const sem::Type* type);
- sem::Constant EvaluateConstantValue(const ast::CallExpression* call,
- const sem::Type* type);
-
- /// Sem is a helper for obtaining the semantic node for the given AST node.
- template <typename SEM = sem::Info::InferFromAST,
- typename AST_OR_TYPE = CastableBase>
- auto* Sem(const AST_OR_TYPE* ast) {
- using T = sem::Info::GetResultType<SEM, AST_OR_TYPE>;
- auto* sem = builder_->Sem().Get(ast);
- if (!sem) {
- TINT_ICE(Resolver, diagnostics_)
- << "AST node '" << ast->TypeInfo().name << "' had no semantic info\n"
- << "At: " << ast->source << "\n"
- << "Pointer: " << ast;
- }
- return const_cast<T*>(As<T>(sem));
- }
-
- /// @returns true if the symbol is the name of a builtin function.
- bool IsBuiltin(Symbol) const;
-
- /// @returns true if `expr` is the current CallStatement's CallExpression
- bool IsCallStatement(const ast::Expression* expr) const;
-
- /// Searches the current statement and up through parents of the current
- /// statement looking for a loop or for-loop continuing statement.
- /// @returns the closest continuing statement to the current statement that
- /// (transitively) owns the current statement.
- /// @param stop_at_loop if true then the function will return nullptr if a
- /// loop or for-loop was found before the continuing.
- const ast::Statement* ClosestContinuing(bool stop_at_loop) const;
-
- /// @returns the resolved symbol (function, type or variable) for the given
- /// ast::Identifier or ast::TypeName cast to the given semantic type.
- template <typename SEM = sem::Node>
- SEM* ResolvedSymbol(const ast::Node* node) {
- auto* resolved = utils::Lookup(dependencies_.resolved_symbols, node);
- return resolved ? const_cast<SEM*>(builder_->Sem().Get<SEM>(resolved))
- : nullptr;
- }
-
- struct TypeConversionSig {
- const sem::Type* target;
- const sem::Type* source;
-
- bool operator==(const TypeConversionSig&) const;
-
- /// Hasher provides a hash function for the TypeConversionSig
- struct Hasher {
- /// @param sig the TypeConversionSig to create a hash for
- /// @return the hash value
- std::size_t operator()(const TypeConversionSig& sig) const;
+ public:
+ /// Constructor
+ /// @param builder the program builder
+ explicit Resolver(ProgramBuilder* builder);
+
+ /// Destructor
+ ~Resolver();
+
+ /// @returns error messages from the resolver
+ std::string error() const { return diagnostics_.str(); }
+
+ /// @returns true if the resolver was successful
+ bool Resolve();
+
+ /// @param type the given type
+ /// @returns true if the given type is a plain type
+ bool IsPlain(const sem::Type* type) const { return validator_.IsPlain(type); }
+
+ /// @param type the given type
+ /// @returns true if the given type is a fixed-footprint type
+ bool IsFixedFootprint(const sem::Type* type) const { return validator_.IsFixedFootprint(type); }
+
+ /// @param type the given type
+ /// @returns true if the given type is storable
+ bool IsStorable(const sem::Type* type) const { return validator_.IsStorable(type); }
+
+ /// @param type the given type
+ /// @returns true if the given type is host-shareable
+ bool IsHostShareable(const sem::Type* type) const { return validator_.IsHostShareable(type); }
+
+ /// @returns the validator for testing
+ const Validator* GetValidatorForTesting() const { return &validator_; }
+
+ private:
+ /// Describes the context in which a variable is declared
+ enum class VariableKind { kParameter, kLocal, kGlobal };
+
+ Validator::ValidTypeStorageLayouts valid_type_storage_layouts_;
+
+ /// Structure holding semantic information about a block (i.e. scope), such as
+ /// parent block and variables declared in the block.
+ /// Used to validate variable scoping rules.
+ struct BlockInfo {
+ enum class Type { kGeneric, kLoop, kLoopContinuing, kSwitchCase };
+
+ BlockInfo(const ast::BlockStatement* block, Type type, BlockInfo* parent);
+ ~BlockInfo();
+
+ template <typename Pred>
+ BlockInfo* FindFirstParent(Pred&& pred) {
+ BlockInfo* curr = this;
+ while (curr && !pred(curr)) {
+ curr = curr->parent;
+ }
+ return curr;
+ }
+
+ BlockInfo* FindFirstParent(BlockInfo::Type ty) {
+ return FindFirstParent([ty](auto* block_info) { return block_info->type == ty; });
+ }
+
+ ast::BlockStatement const* const block;
+ const Type type;
+ BlockInfo* const parent;
+ std::vector<const ast::Variable*> decls;
+
+ // first_continue is set to the index of the first variable in decls
+ // declared after the first continue statement in a loop block, if any.
+ constexpr static size_t kNoContinue = size_t(~0);
+ size_t first_continue = kNoContinue;
};
- };
-
- struct TypeConstructorSig {
- const sem::Type* type;
- const std::vector<const sem::Type*> parameters;
-
- TypeConstructorSig(const sem::Type* ty,
- const std::vector<const sem::Type*> params);
- TypeConstructorSig(const TypeConstructorSig&);
- ~TypeConstructorSig();
- bool operator==(const TypeConstructorSig&) const;
-
- /// Hasher provides a hash function for the TypeConstructorSig
- struct Hasher {
- /// @param sig the TypeConstructorSig to create a hash for
- /// @return the hash value
- std::size_t operator()(const TypeConstructorSig& sig) const;
+
+ // Structure holding information for a TypeDecl
+ struct TypeDeclInfo {
+ ast::TypeDecl const* const ast;
+ sem::Type* const sem;
};
- };
-
- ProgramBuilder* const builder_;
- diag::List& diagnostics_;
- std::unique_ptr<BuiltinTable> const builtin_table_;
- DependencyGraph dependencies_;
- std::vector<sem::Function*> entry_points_;
- std::unordered_map<const sem::Type*, const Source&> atomic_composite_info_;
- std::unordered_set<const ast::Node*> marked_;
- std::unordered_map<uint32_t, const sem::Variable*> constant_ids_;
- std::unordered_map<TypeConversionSig,
- sem::CallTarget*,
- TypeConversionSig::Hasher>
- type_conversions_;
- std::unordered_map<TypeConstructorSig,
- sem::CallTarget*,
- TypeConstructorSig::Hasher>
- type_ctors_;
-
- sem::Function* current_function_ = nullptr;
- sem::Statement* current_statement_ = nullptr;
- sem::CompoundStatement* current_compound_statement_ = nullptr;
- sem::BlockStatement* current_block_ = nullptr;
+
+ /// Resolves the program, without creating final the semantic nodes.
+ /// @returns true on success, false on error
+ bool ResolveInternal();
+
+ /// Creates the nodes and adds them to the sem::Info mappings of the
+ /// ProgramBuilder.
+ void CreateSemanticNodes() const;
+
+ /// Retrieves information for the requested import.
+ /// @param src the source of the import
+ /// @param path the import path
+ /// @param name the method name to get information on
+ /// @param params the parameters to the method call
+ /// @param id out parameter for the external call ID. Must not be a nullptr.
+ /// @returns the return type of `name` in `path` or nullptr on error.
+ sem::Type* GetImportData(const Source& src,
+ const std::string& path,
+ const std::string& name,
+ const ast::ExpressionList& params,
+ uint32_t* id);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // AST and Type traversal methods
+ //////////////////////////////////////////////////////////////////////////////
+
+ // Expression resolving methods
+ // Returns the semantic node pointer on success, nullptr on failure.
+ sem::Expression* IndexAccessor(const ast::IndexAccessorExpression*);
+ sem::Expression* Binary(const ast::BinaryExpression*);
+ sem::Expression* Bitcast(const ast::BitcastExpression*);
+ sem::Call* Call(const ast::CallExpression*);
+ sem::Expression* Expression(const ast::Expression*);
+ sem::Function* Function(const ast::Function*);
+ sem::Call* FunctionCall(const ast::CallExpression*,
+ sem::Function* target,
+ std::vector<const sem::Expression*> args,
+ sem::Behaviors arg_behaviors);
+ sem::Expression* Identifier(const ast::IdentifierExpression*);
+ sem::Call* BuiltinCall(const ast::CallExpression*,
+ sem::BuiltinType,
+ std::vector<const sem::Expression*> args);
+ sem::Expression* Literal(const ast::LiteralExpression*);
+ sem::Expression* MemberAccessor(const ast::MemberAccessorExpression*);
+ sem::Expression* UnaryOp(const ast::UnaryOpExpression*);
+
+ /// If `expr` is not of an abstract-numeric type, then Materialize() will just return `expr`.
+ /// If `expr` is of an abstract-numeric type:
+ /// * Materialize will create and return a sem::Materialize node wrapping `expr`.
+ /// * The AST -> Sem binding will be updated to point to the new sem::Materialize node.
+ /// * The sem::Materialize node will have a new concrete type, which will be `target_type` if
+ /// not nullptr, otherwise:
+ /// * a type with the element type of `i32` (e.g. `i32`, `vec2<i32>`) if `expr` has a
+ /// element type of abstract-integer...
+ /// * ... or a type with the element type of `f32` (e.g. `f32`, vec3<f32>`, `mat2x3<f32>`)
+ /// if `expr` has a element type of abstract-float.
+ /// * The sem::Materialize constant value will be the value of `expr` value-converted to the
+ /// materialized type.
+ /// If `expr` is nullptr, then Materialize() will also return nullptr.
+ const sem::Expression* Materialize(const sem::Expression* expr,
+ const sem::Type* target_type = nullptr);
+
+ /// Materializes all the arguments in `args` to the parameter types of `target`.
+ /// @returns true on success, false on failure.
+ bool MaterializeArguments(std::vector<const sem::Expression*>& args,
+ const sem::CallTarget* target);
+
+ /// @returns true if an argument of an abstract numeric type, passed to a parameter of type
+ /// `parameter_ty` should be materialized.
+ bool ShouldMaterializeArgument(const sem::Type* parameter_ty) const;
+
+ // Statement resolving methods
+ // Each return true on success, false on failure.
+ sem::Statement* AssignmentStatement(const ast::AssignmentStatement*);
+ sem::BlockStatement* BlockStatement(const ast::BlockStatement*);
+ sem::Statement* BreakStatement(const ast::BreakStatement*);
+ sem::Statement* CallStatement(const ast::CallStatement*);
+ sem::CaseStatement* CaseStatement(const ast::CaseStatement*);
+ sem::Statement* CompoundAssignmentStatement(const ast::CompoundAssignmentStatement*);
+ sem::Statement* ContinueStatement(const ast::ContinueStatement*);
+ sem::Statement* DiscardStatement(const ast::DiscardStatement*);
+ sem::Statement* FallthroughStatement(const ast::FallthroughStatement*);
+ sem::ForLoopStatement* ForLoopStatement(const ast::ForLoopStatement*);
+ sem::GlobalVariable* GlobalVariable(const ast::Variable*);
+ sem::Statement* Parameter(const ast::Variable*);
+ sem::IfStatement* IfStatement(const ast::IfStatement*);
+ sem::Statement* IncrementDecrementStatement(const ast::IncrementDecrementStatement*);
+ sem::LoopStatement* LoopStatement(const ast::LoopStatement*);
+ sem::Statement* ReturnStatement(const ast::ReturnStatement*);
+ sem::Statement* Statement(const ast::Statement*);
+ sem::SwitchStatement* SwitchStatement(const ast::SwitchStatement* s);
+ sem::Statement* VariableDeclStatement(const ast::VariableDeclStatement*);
+ bool Statements(const ast::StatementList&);
+
+ // CollectTextureSamplerPairs() collects all the texture/sampler pairs from the target function
+ // / builtin, and records these on the current function by calling AddTextureSamplerPair().
+ void CollectTextureSamplerPairs(sem::Function* func,
+ const std::vector<const sem::Expression*>& args) const;
+ void CollectTextureSamplerPairs(const sem::Builtin* builtin,
+ const std::vector<const sem::Expression*>& args) const;
+
+ /// Resolves the WorkgroupSize for the given function, assigning it to
+ /// current_function_
+ bool WorkgroupSize(const ast::Function*);
+
+ /// @returns the sem::Type for the ast::Type `ty`, building it if it
+ /// hasn't been constructed already. If an error is raised, nullptr is
+ /// returned.
+ /// @param ty the ast::Type
+ sem::Type* Type(const ast::Type* ty);
+
+ /// @param enable the enable declaration
+ /// @returns the resolved extension
+ bool Enable(const ast::Enable* enable);
+
+ /// @param named_type the named type to resolve
+ /// @returns the resolved semantic type
+ sem::Type* TypeDecl(const ast::TypeDecl* named_type);
+
+ /// Builds and returns the semantic information for the array `arr`.
+ /// This method does not mark the ast::Array node, nor attach the generated
+ /// semantic information to the AST node.
+ /// @returns the semantic Array information, or nullptr if an error is
+ /// raised.
+ /// @param arr the Array to get semantic information for
+ sem::Array* Array(const ast::Array* arr);
+
+ /// Builds and returns the semantic information for the alias `alias`.
+ /// This method does not mark the ast::Alias node, nor attach the generated
+ /// semantic information to the AST node.
+ /// @returns the aliased type, or nullptr if an error is raised.
+ sem::Type* Alias(const ast::Alias* alias);
+
+ /// Builds and returns the semantic information for the structure `str`.
+ /// This method does not mark the ast::Struct node, nor attach the generated
+ /// semantic information to the AST node.
+ /// @returns the semantic Struct information, or nullptr if an error is
+ /// raised.
+ sem::Struct* Structure(const ast::Struct* str);
+
+ /// @returns the semantic info for the variable `var`. If an error is
+ /// raised, nullptr is returned.
+ /// @note this method does not resolve the attributes as these are
+ /// context-dependent (global, local, parameter)
+ /// @param var the variable to create or return the `VariableInfo` for
+ /// @param kind what kind of variable we are declaring
+ /// @param index the index of the parameter, if this variable is a parameter
+ sem::Variable* Variable(const ast::Variable* var, VariableKind kind, uint32_t index = 0);
+
+ /// Records the storage class usage for the given type, and any transient
+ /// dependencies of the type. Validates that the type can be used for the
+ /// given storage class, erroring if it cannot.
+ /// @param sc the storage class to apply to the type and transitent types
+ /// @param ty the type to apply the storage class on
+ /// @param usage the Source of the root variable declaration that uses the
+ /// given type and storage class. Used for generating sensible error
+ /// messages.
+ /// @returns true on success, false on error
+ bool ApplyStorageClassUsageToType(ast::StorageClass sc, sem::Type* ty, const Source& usage);
+
+ /// @param storage_class the storage class
+ /// @returns the default access control for the given storage class
+ ast::Access DefaultAccessForStorageClass(ast::StorageClass storage_class);
+
+ /// Allocate constant IDs for pipeline-overridable constants.
+ void AllocateOverridableConstantIds();
+
+ /// Set the shadowing information on variable declarations.
+ /// @note this method must only be called after all semantic nodes are built.
+ void SetShadows();
+ /// StatementScope() does the following:
+ /// * Creates the AST -> SEM mapping.
+ /// * Assigns `sem` to #current_statement_
+ /// * Assigns `sem` to #current_compound_statement_ if `sem` derives from
+ /// sem::CompoundStatement.
+ /// * Assigns `sem` to #current_block_ if `sem` derives from
+ /// sem::BlockStatement.
+ /// * Then calls `callback`.
+ /// * Before returning #current_statement_, #current_compound_statement_, and
+ /// #current_block_ are restored to their original values.
+ /// @returns `sem` if `callback` returns true, otherwise `nullptr`.
+ template <typename SEM, typename F>
+ SEM* StatementScope(const ast::Statement* ast, SEM* sem, F&& callback);
+
+ /// Mark records that the given AST node has been visited, and asserts that
+ /// the given node has not already been seen. Diamonds in the AST are
+ /// illegal.
+ /// @param node the AST node.
+ /// @returns true on success, false on error
+ bool Mark(const ast::Node* node);
+
+ /// Adds the given error message to the diagnostics
+ void AddError(const std::string& msg, const Source& source) const;
+
+ /// Adds the given warning message to the diagnostics
+ void AddWarning(const std::string& msg, const Source& source) const;
+
+ /// Adds the given note message to the diagnostics
+ void AddNote(const std::string& msg, const Source& source) const;
+
+ //////////////////////////////////////////////////////////////////////////////
+ /// Constant value evaluation methods
+ //////////////////////////////////////////////////////////////////////////////
+ /// The result type of a ConstantEvaluation method. Holds the constant value and a boolean,
+ /// which is true on success, false on an error.
+ using ConstantResult = utils::Result<sem::Constant>;
+
+ /// Convert the `value` to `target_type`
+ /// @return the converted value
+ ConstantResult ConvertValue(const sem::Constant& value,
+ const sem::Type* target_type,
+ const Source& source);
+ ConstantResult EvaluateConstantValue(const ast::Expression* expr, const sem::Type* type);
+ ConstantResult EvaluateConstantValue(const ast::LiteralExpression* literal,
+ const sem::Type* type);
+ ConstantResult EvaluateConstantValue(const ast::CallExpression* call, const sem::Type* type);
+
+ /// @returns true if the symbol is the name of a builtin function.
+ bool IsBuiltin(Symbol) const;
+
+ // ArrayConstructorSig represents a unique array constructor signature.
+ // It is a tuple of the array type and number of arguments provided.
+ using ArrayConstructorSig = utils::UnorderedKeyWrapper<std::tuple<const sem::Array*, size_t>>;
+
+ // StructConstructorSig represents a unique structure constructor signature.
+ // It is a tuple of the structure type and number of arguments provided.
+ using StructConstructorSig = utils::UnorderedKeyWrapper<std::tuple<const sem::Struct*, size_t>>;
+
+ ProgramBuilder* const builder_;
+ diag::List& diagnostics_;
+ std::unique_ptr<IntrinsicTable> const intrinsic_table_;
+ DependencyGraph dependencies_;
+ SemHelper sem_;
+ Validator validator_;
+ ast::Extensions enabled_extensions_;
+ std::vector<sem::Function*> entry_points_;
+ std::unordered_map<const sem::Type*, const Source&> atomic_composite_info_;
+ std::unordered_set<const ast::Node*> marked_;
+ std::unordered_map<uint32_t, const sem::Variable*> constant_ids_;
+ std::unordered_map<ArrayConstructorSig, sem::CallTarget*> array_ctors_;
+ std::unordered_map<StructConstructorSig, sem::CallTarget*> struct_ctors_;
+
+ sem::Function* current_function_ = nullptr;
+ sem::Statement* current_statement_ = nullptr;
+ sem::CompoundStatement* current_compound_statement_ = nullptr;
+ sem::BlockStatement* current_block_ = nullptr;
};
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_behavior_test.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_behavior_test.cc
index af0c9ce879e..3a3b14f4c63 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_behavior_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_behavior_test.cc
@@ -19,638 +19,608 @@
#include "src/tint/sem/expression.h"
#include "src/tint/sem/for_loop_statement.h"
#include "src/tint/sem/if_statement.h"
+#include "src/tint/sem/switch_statement.h"
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::resolver {
namespace {
class ResolverBehaviorTest : public ResolverTest {
- protected:
- void SetUp() override {
- // Create a function called 'DiscardOrNext' which returns an i32, and has
- // the behavior of {Discard, Return}, which when called, will have the
- // behavior {Discard, Next}.
- Func("DiscardOrNext", {}, ty.i32(),
- {
- If(true, Block(Discard())),
- Return(1),
- });
- }
+ protected:
+ void SetUp() override {
+ // Create a function called 'DiscardOrNext' which returns an i32, and has
+ // the behavior of {Discard, Return}, which when called, will have the
+ // behavior {Discard, Next}.
+ Func("DiscardOrNext", {}, ty.i32(),
+ {
+ If(true, Block(Discard())),
+ Return(1_i),
+ });
+ }
};
TEST_F(ResolverBehaviorTest, ExprBinaryOp_LHS) {
- auto* stmt = Decl(Var("lhs", ty.i32(), Add(Call("DiscardOrNext"), 1)));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("lhs", ty.i32(), Add(Call("DiscardOrNext"), 1_i)));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, ExprBinaryOp_RHS) {
- auto* stmt = Decl(Var("lhs", ty.i32(), Add(1, Call("DiscardOrNext"))));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("lhs", ty.i32(), Add(1_i, Call("DiscardOrNext"))));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, ExprBitcastOp) {
- auto* stmt = Decl(Var("lhs", ty.u32(), Bitcast<u32>(Call("DiscardOrNext"))));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("lhs", ty.u32(), Bitcast<u32>(Call("DiscardOrNext"))));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, ExprIndex_Arr) {
- Func("ArrayDiscardOrNext", {}, ty.array<i32, 4>(),
- {
- If(true, Block(Discard())),
- Return(Construct(ty.array<i32, 4>())),
- });
+ Func("ArrayDiscardOrNext", {}, ty.array<i32, 4>(),
+ {
+ If(true, Block(Discard())),
+ Return(Construct(ty.array<i32, 4>())),
+ });
- auto* stmt =
- Decl(Var("lhs", ty.i32(), IndexAccessor(Call("ArrayDiscardOrNext"), 1)));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("lhs", ty.i32(), IndexAccessor(Call("ArrayDiscardOrNext"), 1_i)));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, ExprIndex_Idx) {
- auto* stmt =
- Decl(Var("lhs", ty.i32(), IndexAccessor("arr", Call("DiscardOrNext"))));
- WrapInFunction(Decl(Var("arr", ty.array<i32, 4>())), //
- stmt);
+ auto* stmt = Decl(Var("lhs", ty.i32(), IndexAccessor("arr", Call("DiscardOrNext"))));
+ WrapInFunction(Decl(Var("arr", ty.array<i32, 4>())), //
+ stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, ExprUnaryOp) {
- auto* stmt = Decl(Var("lhs", ty.i32(),
- create<ast::UnaryOpExpression>(
- ast::UnaryOp::kComplement, Call("DiscardOrNext"))));
- WrapInFunction(stmt);
+ auto* stmt =
+ Decl(Var("lhs", ty.i32(),
+ create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Call("DiscardOrNext"))));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtAssign) {
- auto* stmt = Assign("lhs", "rhs");
- WrapInFunction(Decl(Var("lhs", ty.i32())), //
- Decl(Var("rhs", ty.i32())), //
- stmt);
+ auto* stmt = Assign("lhs", "rhs");
+ WrapInFunction(Decl(Var("lhs", ty.i32())), //
+ Decl(Var("rhs", ty.i32())), //
+ stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtAssign_LHSDiscardOrNext) {
- auto* stmt = Assign(IndexAccessor("lhs", Call("DiscardOrNext")), 1);
- WrapInFunction(Decl(Var("lhs", ty.array<i32, 4>())), //
- stmt);
+ auto* stmt = Assign(IndexAccessor("lhs", Call("DiscardOrNext")), 1_i);
+ WrapInFunction(Decl(Var("lhs", ty.array<i32, 4>())), //
+ stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtAssign_RHSDiscardOrNext) {
- auto* stmt = Assign("lhs", Call("DiscardOrNext"));
- WrapInFunction(Decl(Var("lhs", ty.i32())), //
- stmt);
+ auto* stmt = Assign("lhs", Call("DiscardOrNext"));
+ WrapInFunction(Decl(Var("lhs", ty.i32())), //
+ stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtBlockEmpty) {
- auto* stmt = Block();
- WrapInFunction(stmt);
+ auto* stmt = Block();
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtBlockSingleStmt) {
- auto* stmt = Block(Discard());
- WrapInFunction(stmt);
+ auto* stmt = Block(Discard());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtCallReturn) {
- Func("f", {}, ty.void_(), {Return()});
- auto* stmt = CallStmt(Call("f"));
- WrapInFunction(stmt);
+ Func("f", {}, ty.void_(), {Return()});
+ auto* stmt = CallStmt(Call("f"));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtCallFuncDiscard) {
- Func("f", {}, ty.void_(), {Discard()});
- auto* stmt = CallStmt(Call("f"));
- WrapInFunction(stmt);
+ Func("f", {}, ty.void_(), {Discard()});
+ auto* stmt = CallStmt(Call("f"));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtCallFuncMayDiscard) {
- auto* stmt = For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr,
- nullptr, Block(Break()));
- WrapInFunction(stmt);
+ auto* stmt =
+ For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr, nullptr, Block(Break()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtBreak) {
- auto* stmt = Break();
- WrapInFunction(Loop(Block(stmt)));
+ auto* stmt = Break();
+ WrapInFunction(Loop(Block(stmt)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kBreak);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kBreak);
}
TEST_F(ResolverBehaviorTest, StmtContinue) {
- auto* stmt = Continue();
- WrapInFunction(Loop(Block(If(true, Block(Break())), //
- stmt)));
+ auto* stmt = Continue();
+ WrapInFunction(Loop(Block(If(true, Block(Break())), //
+ stmt)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kContinue);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kContinue);
}
TEST_F(ResolverBehaviorTest, StmtDiscard) {
- auto* stmt = Discard();
- WrapInFunction(stmt);
+ auto* stmt = Discard();
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtForLoopEmpty_NoExit) {
- auto* stmt = For(Source{{12, 34}}, nullptr, nullptr, nullptr, Block());
- WrapInFunction(stmt);
+ auto* stmt = For(Source{{12, 34}}, nullptr, nullptr, nullptr, Block());
+ WrapInFunction(stmt);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: for-loop does not exit");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: for-loop does not exit");
}
TEST_F(ResolverBehaviorTest, StmtForLoopBreak) {
- auto* stmt = For(nullptr, nullptr, nullptr, Block(Break()));
- WrapInFunction(stmt);
+ auto* stmt = For(nullptr, nullptr, nullptr, Block(Break()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtForLoopContinue_NoExit) {
- auto* stmt =
- For(Source{{12, 34}}, nullptr, nullptr, nullptr, Block(Continue()));
- WrapInFunction(stmt);
+ auto* stmt = For(Source{{12, 34}}, nullptr, nullptr, nullptr, Block(Continue()));
+ WrapInFunction(stmt);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: for-loop does not exit");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: for-loop does not exit");
}
TEST_F(ResolverBehaviorTest, StmtForLoopDiscard) {
- auto* stmt = For(nullptr, nullptr, nullptr, Block(Discard()));
- WrapInFunction(stmt);
+ auto* stmt = For(nullptr, nullptr, nullptr, Block(Discard()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtForLoopReturn) {
- auto* stmt = For(nullptr, nullptr, nullptr, Block(Return()));
- WrapInFunction(stmt);
+ auto* stmt = For(nullptr, nullptr, nullptr, Block(Return()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
}
TEST_F(ResolverBehaviorTest, StmtForLoopBreak_InitCallFuncMayDiscard) {
- auto* stmt = For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr,
- nullptr, Block(Break()));
- WrapInFunction(stmt);
+ auto* stmt =
+ For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr, nullptr, Block(Break()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtForLoopEmpty_InitCallFuncMayDiscard) {
- auto* stmt = For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr,
- nullptr, Block());
- WrapInFunction(stmt);
+ auto* stmt = For(Decl(Var("v", ty.i32(), Call("DiscardOrNext"))), nullptr, nullptr, Block());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtForLoopEmpty_CondTrue) {
- auto* stmt = For(nullptr, true, nullptr, Block());
- WrapInFunction(stmt);
+ auto* stmt = For(nullptr, true, nullptr, Block());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtForLoopEmpty_CondCallFuncMayDiscard) {
- auto* stmt = For(nullptr, Equal(Call("DiscardOrNext"), 1), nullptr, Block());
- WrapInFunction(stmt);
+ auto* stmt = For(nullptr, Equal(Call("DiscardOrNext"), 1_i), nullptr, Block());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtIfTrue_ThenEmptyBlock) {
- auto* stmt = If(true, Block());
- WrapInFunction(stmt);
+ auto* stmt = If(true, Block());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtIfTrue_ThenDiscard) {
- auto* stmt = If(true, Block(Discard()));
- WrapInFunction(stmt);
+ auto* stmt = If(true, Block(Discard()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtIfTrue_ThenEmptyBlock_ElseDiscard) {
- auto* stmt = If(true, Block(), Else(Block(Discard())));
- WrapInFunction(stmt);
+ auto* stmt = If(true, Block(), Else(Block(Discard())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtIfTrue_ThenDiscard_ElseDiscard) {
- auto* stmt = If(true, Block(Discard()), Else(Block(Discard())));
- WrapInFunction(stmt);
+ auto* stmt = If(true, Block(Discard()), Else(Block(Discard())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtIfCallFuncMayDiscard_ThenEmptyBlock) {
- auto* stmt = If(Equal(Call("DiscardOrNext"), 1), Block());
- WrapInFunction(stmt);
+ auto* stmt = If(Equal(Call("DiscardOrNext"), 1_i), Block());
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtIfTrue_ThenEmptyBlock_ElseCallFuncMayDiscard) {
- auto* stmt = If(true, Block(), //
- Else(Equal(Call("DiscardOrNext"), 1), Block()));
- WrapInFunction(stmt);
+ auto* stmt = If(true, Block(), //
+ Else(If(Equal(Call("DiscardOrNext"), 1_i), Block())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtLetDecl) {
- auto* stmt = Decl(Const("v", ty.i32(), Expr(1)));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Let("v", ty.i32(), Expr(1_i)));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtLetDecl_RHSDiscardOrNext) {
- auto* stmt = Decl(Const("lhs", ty.i32(), Call("DiscardOrNext")));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Let("lhs", ty.i32(), Call("DiscardOrNext")));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtLoopEmpty_NoExit) {
- auto* stmt = Loop(Source{{12, 34}}, Block());
- WrapInFunction(stmt);
+ auto* stmt = Loop(Source{{12, 34}}, Block());
+ WrapInFunction(stmt);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
}
TEST_F(ResolverBehaviorTest, StmtLoopBreak) {
- auto* stmt = Loop(Block(Break()));
- WrapInFunction(stmt);
+ auto* stmt = Loop(Block(Break()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtLoopContinue_NoExit) {
- auto* stmt = Loop(Source{{12, 34}}, Block(Continue()));
- WrapInFunction(stmt);
+ auto* stmt = Loop(Source{{12, 34}}, Block(Continue()));
+ WrapInFunction(stmt);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
}
TEST_F(ResolverBehaviorTest, StmtLoopDiscard) {
- auto* stmt = Loop(Block(Discard()));
- WrapInFunction(stmt);
+ auto* stmt = Loop(Block(Discard()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtLoopReturn) {
- auto* stmt = Loop(Block(Return()));
- WrapInFunction(stmt);
+ auto* stmt = Loop(Block(Return()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
}
TEST_F(ResolverBehaviorTest, StmtLoopEmpty_ContEmpty_NoExit) {
- auto* stmt = Loop(Source{{12, 34}}, Block(), Block());
- WrapInFunction(stmt);
+ auto* stmt = Loop(Source{{12, 34}}, Block(), Block());
+ WrapInFunction(stmt);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: loop does not exit");
}
TEST_F(ResolverBehaviorTest, StmtLoopEmpty_ContIfTrueBreak) {
- auto* stmt = Loop(Block(), Block(If(true, Block(Break()))));
- WrapInFunction(stmt);
+ auto* stmt = Loop(Block(), Block(If(true, Block(Break()))));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtReturn) {
- auto* stmt = Return();
- WrapInFunction(stmt);
+ auto* stmt = Return();
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
}
TEST_F(ResolverBehaviorTest, StmtReturn_DiscardOrNext) {
- auto* stmt = Return(Call("DiscardOrNext"));
- Func("F", {}, ty.i32(), {stmt});
+ auto* stmt = Return(Call("DiscardOrNext"));
+ Func("F", {}, ty.i32(), {stmt});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kReturn, sem::Behavior::kDiscard));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kReturn, sem::Behavior::kDiscard));
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondTrue_DefaultEmpty) {
- auto* stmt = Switch(1, DefaultCase(Block()));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_DefaultEmpty) {
- auto* stmt = Switch(1, DefaultCase(Block()));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_DefaultDiscard) {
- auto* stmt = Switch(1, DefaultCase(Block(Discard())));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, DefaultCase(Block(Discard())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_DefaultReturn) {
- auto* stmt = Switch(1, DefaultCase(Block(Return())));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, DefaultCase(Block(Return())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kReturn);
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Empty_DefaultEmpty) {
- auto* stmt = Switch(1, Case(Expr(0), Block()), DefaultCase(Block()));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block()), DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Empty_DefaultDiscard) {
- auto* stmt = Switch(1, Case(Expr(0), Block()), DefaultCase(Block(Discard())));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block()), DefaultCase(Block(Discard())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kNext, sem::Behavior::kDiscard));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kNext, sem::Behavior::kDiscard));
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Empty_DefaultReturn) {
- auto* stmt = Switch(1, Case(Expr(0), Block()), DefaultCase(Block(Return())));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block()), DefaultCase(Block(Return())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kNext, sem::Behavior::kReturn));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kNext, sem::Behavior::kReturn));
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Discard_DefaultEmpty) {
- auto* stmt = Switch(1, Case(Expr(0), Block(Discard())), DefaultCase(Block()));
- WrapInFunction(stmt);
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block(Discard())), DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
-TEST_F(ResolverBehaviorTest,
- StmtSwitch_CondLiteral_Case0Discard_DefaultDiscard) {
- auto* stmt =
- Switch(1, Case(Expr(0), Block(Discard())), DefaultCase(Block(Discard())));
- WrapInFunction(stmt);
+TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Discard_DefaultDiscard) {
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block(Discard())), DefaultCase(Block(Discard())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kDiscard);
}
-TEST_F(ResolverBehaviorTest,
- StmtSwitch_CondLiteral_Case0Discard_DefaultReturn) {
- auto* stmt =
- Switch(1, Case(Expr(0), Block(Discard())), DefaultCase(Block(Return())));
- WrapInFunction(stmt);
+TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Discard_DefaultReturn) {
+ auto* stmt = Switch(1_i, Case(Expr(0_i), Block(Discard())), DefaultCase(Block(Return())));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kReturn));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kReturn));
}
-TEST_F(ResolverBehaviorTest,
- StmtSwitch_CondLiteral_Case0Discard_Case1Return_DefaultEmpty) {
- auto* stmt = Switch(1, //
- Case(Expr(0), Block(Discard())), //
- Case(Expr(1), Block(Return())), //
- DefaultCase(Block()));
- WrapInFunction(stmt);
+TEST_F(ResolverBehaviorTest, StmtSwitch_CondLiteral_Case0Discard_Case1Return_DefaultEmpty) {
+ auto* stmt = Switch(1_i, //
+ Case(Expr(0_i), Block(Discard())), //
+ Case(Expr(1_i), Block(Return())), //
+ DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext,
- sem::Behavior::kReturn));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext,
+ sem::Behavior::kReturn));
}
TEST_F(ResolverBehaviorTest, StmtSwitch_CondCallFuncMayDiscard_DefaultEmpty) {
- auto* stmt = Switch(Call("DiscardOrNext"), DefaultCase(Block()));
- WrapInFunction(stmt);
+ auto* stmt = Switch(Call("DiscardOrNext"), DefaultCase(Block()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
TEST_F(ResolverBehaviorTest, StmtVarDecl) {
- auto* stmt = Decl(Var("v", ty.i32()));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("v", ty.i32()));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behavior::kNext);
}
TEST_F(ResolverBehaviorTest, StmtVarDecl_RHSDiscardOrNext) {
- auto* stmt = Decl(Var("lhs", ty.i32(), Call("DiscardOrNext")));
- WrapInFunction(stmt);
+ auto* stmt = Decl(Var("lhs", ty.i32(), Call("DiscardOrNext")));
+ WrapInFunction(stmt);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(stmt);
- EXPECT_EQ(sem->Behaviors(),
- sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
+ auto* sem = Sem().Get(stmt);
+ EXPECT_EQ(sem->Behaviors(), sem::Behaviors(sem::Behavior::kDiscard, sem::Behavior::kNext));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_constants.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_constants.cc
index 85f0abdcfc4..b281f918eae 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_constants.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_constants.cc
@@ -14,129 +14,262 @@
#include "src/tint/resolver/resolver.h"
+#include <cmath>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <optional> // NOLINT(build/include_order))
+
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
#include "src/tint/sem/constant.h"
#include "src/tint/sem/type_constructor.h"
+#include "src/tint/utils/compiler_macros.h"
#include "src/tint/utils/map.h"
+#include "src/tint/utils/transform.h"
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::resolver {
+
namespace {
-using i32 = ProgramBuilder::i32;
-using u32 = ProgramBuilder::u32;
-using f32 = ProgramBuilder::f32;
+/// Converts and returns all the element values of `in` to the type `T`, using the converter
+/// function `CONVERTER`.
+/// @param elements_in the vector of elements to be converted
+/// @param converter a function-like with the signature `void(TO&, FROM)`
+/// @returns the elements converted to type T.
+template <typename T, typename ELEMENTS_IN, typename CONVERTER>
+sem::Constant::Elements Transform(const ELEMENTS_IN& elements_in, CONVERTER&& converter) {
+ TINT_BEGIN_DISABLE_WARNING(UNREACHABLE_CODE);
-} // namespace
+ return utils::Transform(elements_in, [&](auto value_in) {
+ if constexpr (std::is_same_v<UnwrapNumber<T>, bool>) {
+ return AInt(value_in != 0);
+ } else {
+ T converted{};
+ converter(converted, value_in);
+ if constexpr (IsFloatingPoint<UnwrapNumber<T>>) {
+ return AFloat(converted);
+ } else {
+ return AInt(converted);
+ }
+ }
+ });
+
+ TINT_END_DISABLE_WARNING(UNREACHABLE_CODE);
+}
-sem::Constant Resolver::EvaluateConstantValue(const ast::Expression* expr,
- const sem::Type* type) {
- if (auto* e = expr->As<ast::LiteralExpression>()) {
- return EvaluateConstantValue(e, type);
- }
- if (auto* e = expr->As<ast::CallExpression>()) {
- return EvaluateConstantValue(e, type);
- }
- return {};
+/// Converts and returns all the element values of `in` to the semantic type `el_ty`, using the
+/// converter function `CONVERTER`.
+/// @param in the constant to convert
+/// @param el_ty the target element type
+/// @param converter a function-like with the signature `void(TO&, FROM)`
+/// @returns the elements converted to `el_ty`
+template <typename CONVERTER>
+sem::Constant::Elements Transform(const sem::Constant::Elements& in,
+ const sem::Type* el_ty,
+ CONVERTER&& converter) {
+ return std::visit(
+ [&](auto&& v) {
+ return Switch(
+ el_ty, //
+ [&](const sem::AbstractInt*) { return Transform<AInt>(v, converter); },
+ [&](const sem::AbstractFloat*) { return Transform<AFloat>(v, converter); },
+ [&](const sem::I32*) { return Transform<i32>(v, converter); },
+ [&](const sem::U32*) { return Transform<u32>(v, converter); },
+ [&](const sem::F32*) { return Transform<f32>(v, converter); },
+ [&](const sem::F16*) { return Transform<f16>(v, converter); },
+ [&](const sem::Bool*) { return Transform<bool>(v, converter); },
+ [&](Default) -> sem::Constant::Elements {
+ diag::List diags;
+ TINT_UNREACHABLE(Semantic, diags)
+ << "invalid element type " << el_ty->TypeInfo().name;
+ return {};
+ });
+ },
+ in);
}
-sem::Constant Resolver::EvaluateConstantValue(
- const ast::LiteralExpression* literal,
- const sem::Type* type) {
- if (auto* lit = literal->As<ast::SintLiteralExpression>()) {
- return {type, {lit->ValueAsI32()}};
- }
- if (auto* lit = literal->As<ast::UintLiteralExpression>()) {
- return {type, {lit->ValueAsU32()}};
- }
- if (auto* lit = literal->As<ast::FloatLiteralExpression>()) {
- return {type, {lit->value}};
- }
- if (auto* lit = literal->As<ast::BoolLiteralExpression>()) {
- return {type, {lit->value}};
- }
- TINT_UNREACHABLE(Resolver, builder_->Diagnostics());
- return {};
+/// Converts and returns all the elements in `in` to the type `el_ty`.
+/// If the value does not fit in the target type, and:
+/// * the target type is an integer type, then the resulting value will be clamped to the integer's
+/// highest or lowest value.
+/// * the target type is an float type, then the resulting value will be either positive or
+/// negative infinity, based on the sign of the input value.
+/// @param in the input elements
+/// @param el_ty the target element type
+/// @returns the elements converted to `el_ty`
+sem::Constant::Elements ConvertElements(const sem::Constant::Elements& in, const sem::Type* el_ty) {
+ return Transform(in, el_ty, [](auto& el_out, auto el_in) {
+ using OUT = std::decay_t<decltype(el_out)>;
+ if (auto conv = CheckedConvert<OUT>(el_in)) {
+ el_out = conv.Get();
+ } else {
+ constexpr auto kInf = std::numeric_limits<double>::infinity();
+ switch (conv.Failure()) {
+ case ConversionFailure::kExceedsNegativeLimit:
+ el_out = IsFloatingPoint<UnwrapNumber<OUT>> ? OUT(-kInf) : OUT::kLowest;
+ break;
+ case ConversionFailure::kExceedsPositiveLimit:
+ el_out = IsFloatingPoint<UnwrapNumber<OUT>> ? OUT(kInf) : OUT::kHighest;
+ break;
+ }
+ }
+ });
}
-sem::Constant Resolver::EvaluateConstantValue(const ast::CallExpression* call,
- const sem::Type* type) {
- auto* vec = type->As<sem::Vector>();
+/// Converts and returns all the elements in `in` to the type `el_ty`, by performing a
+/// `CheckedConvert` on each element value. A single error diagnostic will be raised if an element
+/// value cannot be represented by the target type.
+/// @param in the input elements
+/// @param el_ty the target element type
+/// @returns the elements converted to `el_ty`, or a Failure if some elements could not be
+/// represented by the target type.
+utils::Result<sem::Constant::Elements> MaterializeElements(const sem::Constant::Elements& in,
+ const sem::Type* el_ty,
+ ProgramBuilder& builder,
+ Source source) {
+ std::optional<std::string> failure;
- // For now, only fold scalars and vectors
- if (!type->is_scalar() && !vec) {
- return {};
- }
+ auto out = Transform(in, el_ty, [&](auto& el_out, auto el_in) {
+ using OUT = std::decay_t<decltype(el_out)>;
+ if (auto conv = CheckedConvert<OUT>(el_in)) {
+ el_out = conv.Get();
+ } else if (!failure.has_value()) {
+ std::stringstream ss;
+ ss << "value " << el_in << " cannot be represented as ";
+ ss << "'" << builder.FriendlyName(el_ty) << "'";
+ failure = ss.str();
+ }
+ });
- auto* elem_type = vec ? vec->type() : type;
- int result_size = vec ? static_cast<int>(vec->Width()) : 1;
+ if (failure.has_value()) {
+ builder.Diagnostics().add_error(diag::System::Resolver, std::move(failure.value()), source);
+ return utils::Failure;
+ }
+
+ return out;
+}
- // For zero value init, return 0s
- if (call->args.empty()) {
- if (elem_type->Is<sem::I32>()) {
- return sem::Constant(type, sem::Constant::Scalars(result_size, 0));
+} // namespace
+
+utils::Result<sem::Constant> Resolver::EvaluateConstantValue(const ast::Expression* expr,
+ const sem::Type* type) {
+ if (auto* e = expr->As<ast::LiteralExpression>()) {
+ return EvaluateConstantValue(e, type);
}
- if (elem_type->Is<sem::U32>()) {
- return sem::Constant(type, sem::Constant::Scalars(result_size, 0u));
+ if (auto* e = expr->As<ast::CallExpression>()) {
+ return EvaluateConstantValue(e, type);
}
- if (elem_type->Is<sem::F32>()) {
- return sem::Constant(type, sem::Constant::Scalars(result_size, 0.f));
+ return sem::Constant{};
+}
+
+utils::Result<sem::Constant> Resolver::EvaluateConstantValue(const ast::LiteralExpression* literal,
+ const sem::Type* type) {
+ return Switch(
+ literal,
+ [&](const ast::BoolLiteralExpression* lit) {
+ return sem::Constant{type, {AInt(lit->value ? 1 : 0)}};
+ },
+ [&](const ast::IntLiteralExpression* lit) {
+ return sem::Constant{type, {AInt(lit->value)}};
+ },
+ [&](const ast::FloatLiteralExpression* lit) {
+ return sem::Constant{type, {AFloat(lit->value)}};
+ });
+}
+
+utils::Result<sem::Constant> Resolver::EvaluateConstantValue(const ast::CallExpression* call,
+ const sem::Type* ty) {
+ uint32_t result_size = 0;
+ auto* el_ty = sem::Type::ElementOf(ty, &result_size);
+ if (!el_ty) {
+ return sem::Constant{};
}
- if (elem_type->Is<sem::Bool>()) {
- return sem::Constant(type, sem::Constant::Scalars(result_size, false));
+
+ // ElementOf() will also return the element type of array, which we do not support.
+ if (ty->Is<sem::Array>()) {
+ return sem::Constant{};
}
- }
-
- // Build value for type_ctor from each child value by casting to
- // type_ctor's type.
- sem::Constant::Scalars elems;
- for (auto* expr : call->args) {
- auto* arg = builder_->Sem().Get(expr);
- if (!arg || !arg->ConstantValue()) {
- return {};
+
+ // For zero value init, return 0s
+ if (call->args.empty()) {
+ return Switch(
+ el_ty,
+ [&](const sem::AbstractInt*) {
+ return sem::Constant(ty, std::vector(result_size, AInt(0)));
+ },
+ [&](const sem::AbstractFloat*) {
+ return sem::Constant(ty, std::vector(result_size, AFloat(0)));
+ },
+ [&](const sem::I32*) { return sem::Constant(ty, std::vector(result_size, AInt(0))); },
+ [&](const sem::U32*) { return sem::Constant(ty, std::vector(result_size, AInt(0))); },
+ [&](const sem::F32*) { return sem::Constant(ty, std::vector(result_size, AFloat(0))); },
+ [&](const sem::F16*) { return sem::Constant(ty, std::vector(result_size, AFloat(0))); },
+ [&](const sem::Bool*) { return sem::Constant(ty, std::vector(result_size, AInt(0))); });
}
- auto cast = ConstantCast(arg->ConstantValue(), elem_type);
- elems.insert(elems.end(), cast.Elements().begin(), cast.Elements().end());
- }
-
- // Splat single-value initializers
- if (elems.size() == 1) {
- for (int i = 0; i < result_size - 1; ++i) {
- elems.emplace_back(elems[0]);
+
+ // Build value for type_ctor from each child value by converting to type_ctor's type.
+ std::optional<sem::Constant::Elements> elements;
+ for (auto* expr : call->args) {
+ auto* arg = builder_->Sem().Get(expr);
+ if (!arg) {
+ return sem::Constant{};
+ }
+ auto value = arg->ConstantValue();
+ if (!value) {
+ return sem::Constant{};
+ }
+
+ // Convert the elements to the desired type.
+ auto converted = ConvertElements(value.GetElements(), el_ty);
+
+ if (elements.has_value()) {
+ // Append the converted vector to elements
+ std::visit(
+ [&](auto&& dst) {
+ using VEC_TY = std::decay_t<decltype(dst)>;
+ const auto& src = std::get<VEC_TY>(converted);
+ dst.insert(dst.end(), src.begin(), src.end());
+ },
+ elements.value());
+ } else {
+ elements = std::move(converted);
+ }
}
- }
- return sem::Constant(type, std::move(elems));
+ // Splat single-value initializers
+ std::visit(
+ [&](auto&& v) {
+ if (v.size() == 1) {
+ for (uint32_t i = 0; i < result_size - 1; ++i) {
+ v.emplace_back(v[0]);
+ }
+ }
+ },
+ elements.value());
+
+ return sem::Constant(ty, std::move(elements.value()));
}
-sem::Constant Resolver::ConstantCast(const sem::Constant& value,
- const sem::Type* target_elem_type) {
- if (value.ElementType() == target_elem_type) {
- return value;
- }
-
- sem::Constant::Scalars elems;
- for (size_t i = 0; i < value.Elements().size(); ++i) {
- if (target_elem_type->Is<sem::I32>()) {
- elems.emplace_back(
- value.WithScalarAt(i, [](auto&& s) { return static_cast<i32>(s); }));
- } else if (target_elem_type->Is<sem::U32>()) {
- elems.emplace_back(
- value.WithScalarAt(i, [](auto&& s) { return static_cast<u32>(s); }));
- } else if (target_elem_type->Is<sem::F32>()) {
- elems.emplace_back(
- value.WithScalarAt(i, [](auto&& s) { return static_cast<f32>(s); }));
- } else if (target_elem_type->Is<sem::Bool>()) {
- elems.emplace_back(
- value.WithScalarAt(i, [](auto&& s) { return static_cast<bool>(s); }));
+utils::Result<sem::Constant> Resolver::ConvertValue(const sem::Constant& value,
+ const sem::Type* ty,
+ const Source& source) {
+ if (value.Type() == ty) {
+ return value;
}
- }
- auto* target_type =
- value.Type()->Is<sem::Vector>()
- ? builder_->create<sem::Vector>(target_elem_type,
- static_cast<uint32_t>(elems.size()))
- : target_elem_type;
+ auto* el_ty = sem::Type::ElementOf(ty);
+ if (el_ty == nullptr) {
+ return sem::Constant{};
+ }
+ if (value.ElementType() == el_ty) {
+ return sem::Constant(ty, value.GetElements());
+ }
- return sem::Constant(target_type, elems);
+ if (auto res = MaterializeElements(value.GetElements(), el_ty, *builder_, source)) {
+ return sem::Constant(ty, std::move(res.Get()));
+ }
+ return utils::Failure;
}
} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_constants_test.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_constants_test.cc
index b3a1be737e4..bbdbfeafd4f 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_constants_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_constants_test.cc
@@ -18,413 +18,497 @@
#include "src/tint/resolver/resolver_test_helper.h"
#include "src/tint/sem/expression.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-using Scalar = sem::Constant::Scalar;
-
using ResolverConstantsTest = ResolverTest;
TEST_F(ResolverConstantsTest, Scalar_i32) {
- auto* expr = Expr(99);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Type()->Is<sem::I32>());
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 99);
+ auto* expr = Expr(99_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Type()->Is<sem::I32>());
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 1u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 99);
}
TEST_F(ResolverConstantsTest, Scalar_u32) {
- auto* expr = Expr(99u);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Type()->Is<sem::U32>());
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].u32, 99u);
+ auto* expr = Expr(99_u);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Type()->Is<sem::U32>());
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 1u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 99u);
}
TEST_F(ResolverConstantsTest, Scalar_f32) {
- auto* expr = Expr(9.9f);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Type()->Is<sem::F32>());
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 9.9f);
+ auto* expr = Expr(9.9_f);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Type()->Is<sem::F32>());
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 1u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 9.9f);
}
TEST_F(ResolverConstantsTest, Scalar_bool) {
- auto* expr = Expr(true);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Type()->Is<sem::Bool>());
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].bool_, true);
+ auto* expr = Expr(true);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Type()->Is<sem::Bool>());
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_EQ(sem->ConstantValue().ElementType(), sem->Type());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 1u);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(0), true);
}
TEST_F(ResolverConstantsTest, Vec3_ZeroInit_i32) {
- auto* expr = vec3<i32>();
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 0);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].i32, 0);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].i32, 0);
+ auto* expr = vec3<i32>();
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 0);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 0);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 0);
}
TEST_F(ResolverConstantsTest, Vec3_ZeroInit_u32) {
- auto* expr = vec3<u32>();
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].u32, 0u);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].u32, 0u);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].u32, 0u);
+ auto* expr = vec3<u32>();
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 0u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 0u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 0u);
}
TEST_F(ResolverConstantsTest, Vec3_ZeroInit_f32) {
- auto* expr = vec3<f32>();
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 0u);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].f32, 0u);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].f32, 0u);
+ auto* expr = vec3<f32>();
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 0.0);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, 0.0);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 0.0);
}
TEST_F(ResolverConstantsTest, Vec3_ZeroInit_bool) {
- auto* expr = vec3<bool>();
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].bool_, false);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].bool_, false);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].bool_, false);
+ auto* expr = vec3<bool>();
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(0), false);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(1), false);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(2), false);
}
TEST_F(ResolverConstantsTest, Vec3_Splat_i32) {
- auto* expr = vec3<i32>(99);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 99);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].i32, 99);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].i32, 99);
+ auto* expr = vec3<i32>(99_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 99);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 99);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 99);
}
TEST_F(ResolverConstantsTest, Vec3_Splat_u32) {
- auto* expr = vec3<u32>(99u);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].u32, 99u);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].u32, 99u);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].u32, 99u);
+ auto* expr = vec3<u32>(99_u);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 99u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 99u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 99u);
}
TEST_F(ResolverConstantsTest, Vec3_Splat_f32) {
- auto* expr = vec3<f32>(9.9f);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 9.9f);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].f32, 9.9f);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].f32, 9.9f);
+ auto* expr = vec3<f32>(9.9_f);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 9.9f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, 9.9f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 9.9f);
}
TEST_F(ResolverConstantsTest, Vec3_Splat_bool) {
- auto* expr = vec3<bool>(true);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].bool_, true);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].bool_, true);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].bool_, true);
+ auto* expr = vec3<bool>(true);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(0), true);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(1), true);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(2), true);
}
TEST_F(ResolverConstantsTest, Vec3_FullConstruct_i32) {
- auto* expr = vec3<i32>(1, 2, 3);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 1);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].i32, 2);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].i32, 3);
+ auto* expr = vec3<i32>(1_i, 2_i, 3_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 1);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 2);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 3);
}
TEST_F(ResolverConstantsTest, Vec3_FullConstruct_u32) {
- auto* expr = vec3<u32>(1u, 2u, 3u);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].u32, 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].u32, 2u);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].u32, 3u);
+ auto* expr = vec3<u32>(1_u, 2_u, 3_u);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 1);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 2);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 3);
}
TEST_F(ResolverConstantsTest, Vec3_FullConstruct_f32) {
- auto* expr = vec3<f32>(1.f, 2.f, 3.f);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 1.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].f32, 2.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].f32, 3.f);
+ auto* expr = vec3<f32>(1_f, 2_f, 3_f);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 1.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, 2.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 3.f);
}
TEST_F(ResolverConstantsTest, Vec3_FullConstruct_bool) {
- auto* expr = vec3<bool>(true, false, true);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].bool_, true);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].bool_, false);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].bool_, true);
+ auto* expr = vec3<bool>(true, false, true);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(0), true);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(1), false);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(2), true);
}
TEST_F(ResolverConstantsTest, Vec3_MixConstruct_i32) {
- auto* expr = vec3<i32>(1, vec2<i32>(2, 3));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 1);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].i32, 2);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].i32, 3);
+ auto* expr = vec3<i32>(1_i, vec2<i32>(2_i, 3_i));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 1);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 2);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 3);
}
TEST_F(ResolverConstantsTest, Vec3_MixConstruct_u32) {
- auto* expr = vec3<u32>(vec2<u32>(1u, 2u), 3u);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].u32, 1u);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].u32, 2u);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].u32, 3u);
+ auto* expr = vec3<u32>(vec2<u32>(1_u, 2_u), 3_u);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 1);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 2);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 3);
}
TEST_F(ResolverConstantsTest, Vec3_MixConstruct_f32) {
- auto* expr = vec3<f32>(1.f, vec2<f32>(2.f, 3.f));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 1.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].f32, 2.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].f32, 3.f);
+ auto* expr = vec3<f32>(1_f, vec2<f32>(2_f, 3_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 1.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, 2.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 3.f);
}
TEST_F(ResolverConstantsTest, Vec3_MixConstruct_bool) {
- auto* expr = vec3<bool>(vec2<bool>(true, false), true);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].bool_, true);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].bool_, false);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].bool_, true);
+ auto* expr = vec3<bool>(vec2<bool>(true, false), true);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::Bool>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(0), true);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(1), false);
+ EXPECT_EQ(sem->ConstantValue().Element<bool>(2), true);
+}
+
+TEST_F(ResolverConstantsTest, Vec3_Convert_f32_to_i32) {
+ auto* expr = vec3<i32>(vec3<f32>(1.1_f, 2.2_f, 3.3_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, 1);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, 2);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, 3);
+}
+
+TEST_F(ResolverConstantsTest, Vec3_Convert_u32_to_f32) {
+ auto* expr = vec3<f32>(vec3<u32>(10_u, 20_u, 30_u));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 10.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, 20.f);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 30.f);
+}
+
+TEST_F(ResolverConstantsTest, Vec3_Convert_Large_f32_to_i32) {
+ auto* expr = vec3<i32>(vec3<f32>(1e10_f, -1e20_f, 1e30_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, i32::kHighest);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, i32::kLowest);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, i32::kHighest);
+}
+
+TEST_F(ResolverConstantsTest, Vec3_Convert_Large_f32_to_u32) {
+ auto* expr = vec3<u32>(vec3<f32>(1e10_f, -1e20_f, 1e30_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::U32>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(0).value, u32::kHighest);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(1).value, u32::kLowest);
+ EXPECT_EQ(sem->ConstantValue().Element<AInt>(2).value, u32::kHighest);
}
-TEST_F(ResolverConstantsTest, Vec3_Cast_f32_to_32) {
- auto* expr = vec3<i32>(vec3<f32>(1.1f, 2.2f, 3.3f));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::I32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].i32, 1);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].i32, 2);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].i32, 3);
+// TODO(crbug.com/tint/1502): Enable when f16 overloads are implemented
+TEST_F(ResolverConstantsTest, DISABLED_Vec3_Convert_Large_f32_to_f16) {
+ Enable(ast::Extension::kF16);
+
+ auto* expr = vec3<f16>(vec3<f32>(0.00001_f, -0.00002_f, 0.00003_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ constexpr auto kInf = std::numeric_limits<double>::infinity();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F16>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F16>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, kInf);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, -kInf);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, kInf);
}
-TEST_F(ResolverConstantsTest, Vec3_Cast_u32_to_f32) {
- auto* expr = vec3<f32>(vec3<u32>(10u, 20u, 30u));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = Sem().Get(expr);
- EXPECT_NE(sem, nullptr);
- ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
- EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
- EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F32>());
- ASSERT_EQ(sem->ConstantValue().Elements().size(), 3u);
- EXPECT_EQ(sem->ConstantValue().Elements()[0].f32, 10.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[1].f32, 20.f);
- EXPECT_EQ(sem->ConstantValue().Elements()[2].f32, 30.f);
+// TODO(crbug.com/tint/1502): Enable when f16 overloads are implemented
+TEST_F(ResolverConstantsTest, DISABLED_Vec3_Convert_Small_f32_to_f16) {
+ Enable(ast::Extension::kF16);
+
+ auto* expr = vec3<f16>(vec3<f32>(1e-10_f, -1e20_f, 1e30_f));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = Sem().Get(expr);
+ EXPECT_NE(sem, nullptr);
+ ASSERT_TRUE(sem->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(sem->Type()->As<sem::Vector>()->type()->Is<sem::F16>());
+ EXPECT_EQ(sem->Type()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Type(), sem->Type());
+ EXPECT_TRUE(sem->ConstantValue().ElementType()->Is<sem::F16>());
+ ASSERT_EQ(sem->ConstantValue().ElementCount(), 3u);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(0).value, 0.0);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(1).value, -0.0);
+ EXPECT_EQ(sem->ConstantValue().Element<AFloat>(2).value, 0.0);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_is_storeable_test.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_is_storeable_test.cc
new file mode 100644
index 00000000000..f4cde70cbc1
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_is_storeable_test.cc
@@ -0,0 +1,78 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/resolver.h"
+
+#include "gmock/gmock.h"
+#include "src/tint/resolver/resolver_test_helper.h"
+#include "src/tint/sem/atomic.h"
+
+namespace tint::resolver {
+namespace {
+
+using ResolverIsStorableTest = ResolverTest;
+
+TEST_F(ResolverIsStorableTest, Struct_AllMembersStorable) {
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverIsStorableTest, Struct_SomeMembersNonStorable) {
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
+}
+
+TEST_F(ResolverIsStorableTest, Struct_NestedStorable) {
+ auto* storable = Structure("Storable", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(storable)),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverIsStorableTest, Struct_NestedNonStorable) {
+ auto* non_storable =
+ Structure("nonstorable", {
+ Member("a", ty.i32()),
+ Member("b", ty.pointer<i32>(ast::StorageClass::kPrivate)),
+ });
+ Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(non_storable)),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(error: ptr<private, i32, read_write> cannot be used as the type of a structure member)");
+}
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_test.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_test.cc
index e7c2fb4d1c7..16725ba88a4 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_test.cc
@@ -39,14 +39,17 @@
#include "src/tint/sem/function.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
+#include "src/tint/sem/switch_statement.h"
#include "src/tint/sem/variable.h"
using ::testing::ElementsAre;
using ::testing::HasSubstr;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -81,1169 +84,1155 @@ template <typename T>
using alias2 = builder::alias2<T>;
template <typename T>
using alias3 = builder::alias3<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
using Op = ast::BinaryOp;
TEST_F(ResolverTest, Stmt_Assign) {
- auto* v = Var("v", ty.f32());
- auto* lhs = Expr("v");
- auto* rhs = Expr(2.3f);
+ auto* v = Var("v", ty.f32());
+ auto* lhs = Expr("v");
+ auto* rhs = Expr(2.3_f);
- auto* assign = Assign(lhs, rhs);
- WrapInFunction(v, assign);
+ auto* assign = Assign(lhs, rhs);
+ WrapInFunction(v, assign);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
- EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
- EXPECT_EQ(StmtOf(lhs), assign);
- EXPECT_EQ(StmtOf(rhs), assign);
+ EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(lhs), assign);
+ EXPECT_EQ(StmtOf(rhs), assign);
}
TEST_F(ResolverTest, Stmt_Case) {
- auto* v = Var("v", ty.f32());
- auto* lhs = Expr("v");
- auto* rhs = Expr(2.3f);
-
- auto* assign = Assign(lhs, rhs);
- auto* block = Block(assign);
- ast::CaseSelectorList lit;
- lit.push_back(create<ast::SintLiteralExpression>(3));
- auto* cse = create<ast::CaseStatement>(lit, block);
- auto* cond_var = Var("c", ty.i32());
- auto* sw = Switch(cond_var, cse, DefaultCase());
- WrapInFunction(v, cond_var, sw);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
- EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
- EXPECT_EQ(StmtOf(lhs), assign);
- EXPECT_EQ(StmtOf(rhs), assign);
- EXPECT_EQ(BlockOf(assign), block);
+ auto* v = Var("v", ty.f32());
+ auto* lhs = Expr("v");
+ auto* rhs = Expr(2.3_f);
+
+ auto* assign = Assign(lhs, rhs);
+ auto* block = Block(assign);
+ auto* sel = Expr(3_i);
+ auto* cse = Case(sel, block);
+ auto* def = DefaultCase();
+ auto* cond_var = Var("c", ty.i32());
+ auto* sw = Switch(cond_var, cse, def);
+ WrapInFunction(v, cond_var, sw);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
+ EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(lhs), assign);
+ EXPECT_EQ(StmtOf(rhs), assign);
+ EXPECT_EQ(BlockOf(assign), block);
+ auto* sem = Sem().Get(sw);
+ ASSERT_EQ(sem->Cases().size(), 2u);
+ EXPECT_EQ(sem->Cases()[0]->Declaration(), cse);
+ ASSERT_EQ(sem->Cases()[0]->Selectors().size(), 1u);
+ EXPECT_EQ(sem->Cases()[0]->Selectors()[0]->Declaration(), sel);
+ EXPECT_EQ(sem->Cases()[1]->Declaration(), def);
+ EXPECT_EQ(sem->Cases()[1]->Selectors().size(), 0u);
}
TEST_F(ResolverTest, Stmt_Block) {
- auto* v = Var("v", ty.f32());
- auto* lhs = Expr("v");
- auto* rhs = Expr(2.3f);
+ auto* v = Var("v", ty.f32());
+ auto* lhs = Expr("v");
+ auto* rhs = Expr(2.3_f);
- auto* assign = Assign(lhs, rhs);
- auto* block = Block(assign);
- WrapInFunction(v, block);
+ auto* assign = Assign(lhs, rhs);
+ auto* block = Block(assign);
+ WrapInFunction(v, block);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
- EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
- EXPECT_EQ(StmtOf(lhs), assign);
- EXPECT_EQ(StmtOf(rhs), assign);
- EXPECT_EQ(BlockOf(lhs), block);
- EXPECT_EQ(BlockOf(rhs), block);
- EXPECT_EQ(BlockOf(assign), block);
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
+ EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(lhs), assign);
+ EXPECT_EQ(StmtOf(rhs), assign);
+ EXPECT_EQ(BlockOf(lhs), block);
+ EXPECT_EQ(BlockOf(rhs), block);
+ EXPECT_EQ(BlockOf(assign), block);
}
TEST_F(ResolverTest, Stmt_If) {
- auto* v = Var("v", ty.f32());
- auto* else_lhs = Expr("v");
- auto* else_rhs = Expr(2.3f);
-
- auto* else_body = Block(Assign(else_lhs, else_rhs));
-
- auto* else_cond = Expr(true);
- auto* else_stmt = create<ast::ElseStatement>(else_cond, else_body);
-
- auto* lhs = Expr("v");
- auto* rhs = Expr(2.3f);
-
- auto* assign = Assign(lhs, rhs);
- auto* body = Block(assign);
- auto* cond = Expr(true);
- auto* stmt =
- create<ast::IfStatement>(cond, body, ast::ElseStatementList{else_stmt});
- WrapInFunction(v, stmt);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(stmt->condition), nullptr);
- ASSERT_NE(TypeOf(else_lhs), nullptr);
- ASSERT_NE(TypeOf(else_rhs), nullptr);
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
- EXPECT_TRUE(TypeOf(stmt->condition)->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(else_lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(else_rhs)->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
- EXPECT_EQ(StmtOf(lhs), assign);
- EXPECT_EQ(StmtOf(rhs), assign);
- EXPECT_EQ(StmtOf(cond), stmt);
- EXPECT_EQ(StmtOf(else_cond), else_stmt);
- EXPECT_EQ(BlockOf(lhs), body);
- EXPECT_EQ(BlockOf(rhs), body);
- EXPECT_EQ(BlockOf(else_lhs), else_body);
- EXPECT_EQ(BlockOf(else_rhs), else_body);
+ auto* v = Var("v", ty.f32());
+ auto* else_lhs = Expr("v");
+ auto* else_rhs = Expr(2.3_f);
+
+ auto* else_body = Block(Assign(else_lhs, else_rhs));
+
+ auto* else_cond = Expr(true);
+ auto* else_stmt = If(else_cond, else_body);
+
+ auto* lhs = Expr("v");
+ auto* rhs = Expr(2.3_f);
+
+ auto* assign = Assign(lhs, rhs);
+ auto* body = Block(assign);
+ auto* cond = Expr(true);
+ auto* stmt = If(cond, body, Else(else_stmt));
+ WrapInFunction(v, stmt);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(stmt->condition), nullptr);
+ ASSERT_NE(TypeOf(else_lhs), nullptr);
+ ASSERT_NE(TypeOf(else_rhs), nullptr);
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
+ EXPECT_TRUE(TypeOf(stmt->condition)->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(else_lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(else_rhs)->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(lhs), assign);
+ EXPECT_EQ(StmtOf(rhs), assign);
+ EXPECT_EQ(StmtOf(cond), stmt);
+ EXPECT_EQ(StmtOf(else_cond), else_stmt);
+ EXPECT_EQ(BlockOf(lhs), body);
+ EXPECT_EQ(BlockOf(rhs), body);
+ EXPECT_EQ(BlockOf(else_lhs), else_body);
+ EXPECT_EQ(BlockOf(else_rhs), else_body);
}
TEST_F(ResolverTest, Stmt_Loop) {
- auto* v = Var("v", ty.f32());
- auto* body_lhs = Expr("v");
- auto* body_rhs = Expr(2.3f);
-
- auto* body = Block(Assign(body_lhs, body_rhs), Break());
- auto* continuing_lhs = Expr("v");
- auto* continuing_rhs = Expr(2.3f);
-
- auto* continuing = Block(Assign(continuing_lhs, continuing_rhs));
- auto* stmt = Loop(body, continuing);
- WrapInFunction(v, stmt);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(body_lhs), nullptr);
- ASSERT_NE(TypeOf(body_rhs), nullptr);
- ASSERT_NE(TypeOf(continuing_lhs), nullptr);
- ASSERT_NE(TypeOf(continuing_rhs), nullptr);
- EXPECT_TRUE(TypeOf(body_lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(body_rhs)->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(continuing_lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(continuing_rhs)->Is<sem::F32>());
- EXPECT_EQ(BlockOf(body_lhs), body);
- EXPECT_EQ(BlockOf(body_rhs), body);
- EXPECT_EQ(BlockOf(continuing_lhs), continuing);
- EXPECT_EQ(BlockOf(continuing_rhs), continuing);
+ auto* v = Var("v", ty.f32());
+ auto* body_lhs = Expr("v");
+ auto* body_rhs = Expr(2.3_f);
+
+ auto* body = Block(Assign(body_lhs, body_rhs), Break());
+ auto* continuing_lhs = Expr("v");
+ auto* continuing_rhs = Expr(2.3_f);
+
+ auto* continuing = Block(Assign(continuing_lhs, continuing_rhs));
+ auto* stmt = Loop(body, continuing);
+ WrapInFunction(v, stmt);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(body_lhs), nullptr);
+ ASSERT_NE(TypeOf(body_rhs), nullptr);
+ ASSERT_NE(TypeOf(continuing_lhs), nullptr);
+ ASSERT_NE(TypeOf(continuing_rhs), nullptr);
+ EXPECT_TRUE(TypeOf(body_lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(body_rhs)->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(continuing_lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(continuing_rhs)->Is<sem::F32>());
+ EXPECT_EQ(BlockOf(body_lhs), body);
+ EXPECT_EQ(BlockOf(body_rhs), body);
+ EXPECT_EQ(BlockOf(continuing_lhs), continuing);
+ EXPECT_EQ(BlockOf(continuing_rhs), continuing);
}
TEST_F(ResolverTest, Stmt_Return) {
- auto* cond = Expr(2);
+ auto* cond = Expr(2_i);
- auto* ret = Return(cond);
- Func("test", {}, ty.i32(), {ret}, {});
+ auto* ret = Return(cond);
+ Func("test", {}, ty.i32(), {ret}, {});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(cond), nullptr);
- EXPECT_TRUE(TypeOf(cond)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(cond), nullptr);
+ EXPECT_TRUE(TypeOf(cond)->Is<sem::I32>());
}
TEST_F(ResolverTest, Stmt_Return_WithoutValue) {
- auto* ret = Return();
- WrapInFunction(ret);
+ auto* ret = Return();
+ WrapInFunction(ret);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, Stmt_Switch) {
- auto* v = Var("v", ty.f32());
- auto* lhs = Expr("v");
- auto* rhs = Expr(2.3f);
- auto* case_block = Block(Assign(lhs, rhs));
- auto* stmt = Switch(Expr(2), Case(Expr(3), case_block), DefaultCase());
- WrapInFunction(v, stmt);
+ auto* v = Var("v", ty.f32());
+ auto* lhs = Expr("v");
+ auto* rhs = Expr(2.3_f);
+ auto* case_block = Block(Assign(lhs, rhs));
+ auto* stmt = Switch(Expr(2_i), Case(Expr(3_i), case_block), DefaultCase());
+ WrapInFunction(v, stmt);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(stmt->condition), nullptr);
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
+ ASSERT_NE(TypeOf(stmt->condition), nullptr);
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
- EXPECT_TRUE(TypeOf(stmt->condition)->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
- EXPECT_EQ(BlockOf(lhs), case_block);
- EXPECT_EQ(BlockOf(rhs), case_block);
+ EXPECT_TRUE(TypeOf(stmt->condition)->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(lhs)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(rhs)->Is<sem::F32>());
+ EXPECT_EQ(BlockOf(lhs), case_block);
+ EXPECT_EQ(BlockOf(rhs), case_block);
}
TEST_F(ResolverTest, Stmt_Call) {
- ast::VariableList params;
- Func("my_func", params, ty.void_(), {Return()}, ast::AttributeList{});
+ ast::VariableList params;
+ Func("my_func", params, ty.void_(), {Return()}, ast::AttributeList{});
- auto* expr = Call("my_func");
+ auto* expr = Call("my_func");
- auto* call = CallStmt(expr);
- WrapInFunction(call);
+ auto* call = CallStmt(expr);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::Void>());
- EXPECT_EQ(StmtOf(expr), call);
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::Void>());
+ EXPECT_EQ(StmtOf(expr), call);
}
TEST_F(ResolverTest, Stmt_VariableDecl) {
- auto* var = Var("my_var", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* init = var->constructor;
+ auto* var = Var("my_var", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* init = var->constructor;
- auto* decl = Decl(var);
- WrapInFunction(decl);
+ auto* decl = Decl(var);
+ WrapInFunction(decl);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(init), nullptr);
- EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(init), nullptr);
+ EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
}
TEST_F(ResolverTest, Stmt_VariableDecl_Alias) {
- auto* my_int = Alias("MyInt", ty.i32());
- auto* var = Var("my_var", ty.Of(my_int), ast::StorageClass::kNone, Expr(2));
- auto* init = var->constructor;
+ auto* my_int = Alias("MyInt", ty.i32());
+ auto* var = Var("my_var", ty.Of(my_int), ast::StorageClass::kNone, Expr(2_i));
+ auto* init = var->constructor;
- auto* decl = Decl(var);
- WrapInFunction(decl);
+ auto* decl = Decl(var);
+ WrapInFunction(decl);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(init), nullptr);
- EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(init), nullptr);
+ EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
}
TEST_F(ResolverTest, Stmt_VariableDecl_ModuleScope) {
- auto* init = Expr(2);
- Global("my_var", ty.i32(), ast::StorageClass::kPrivate, init);
+ auto* init = Expr(2_i);
+ Global("my_var", ty.i32(), ast::StorageClass::kPrivate, init);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(init), nullptr);
- EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
- EXPECT_EQ(StmtOf(init), nullptr);
+ ASSERT_NE(TypeOf(init), nullptr);
+ EXPECT_TRUE(TypeOf(init)->Is<sem::I32>());
+ EXPECT_EQ(StmtOf(init), nullptr);
}
TEST_F(ResolverTest, Stmt_VariableDecl_OuterScopeAfterInnerScope) {
- // fn func_i32() {
- // {
- // var foo : i32 = 2;
- // var bar : i32 = foo;
- // }
- // var foo : f32 = 2.0;
- // var bar : f32 = foo;
- // }
-
- ast::VariableList params;
-
- // Declare i32 "foo" inside a block
- auto* foo_i32 = Var("foo", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* foo_i32_init = foo_i32->constructor;
- auto* foo_i32_decl = Decl(foo_i32);
-
- // Reference "foo" inside the block
- auto* bar_i32 = Var("bar", ty.i32(), ast::StorageClass::kNone, Expr("foo"));
- auto* bar_i32_init = bar_i32->constructor;
- auto* bar_i32_decl = Decl(bar_i32);
-
- auto* inner = Block(foo_i32_decl, bar_i32_decl);
-
- // Declare f32 "foo" at function scope
- auto* foo_f32 = Var("foo", ty.f32(), ast::StorageClass::kNone, Expr(2.f));
- auto* foo_f32_init = foo_f32->constructor;
- auto* foo_f32_decl = Decl(foo_f32);
-
- // Reference "foo" at function scope
- auto* bar_f32 = Var("bar", ty.f32(), ast::StorageClass::kNone, Expr("foo"));
- auto* bar_f32_init = bar_f32->constructor;
- auto* bar_f32_decl = Decl(bar_f32);
-
- Func("func", params, ty.void_(), {inner, foo_f32_decl, bar_f32_decl},
- ast::AttributeList{});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(foo_i32_init), nullptr);
- EXPECT_TRUE(TypeOf(foo_i32_init)->Is<sem::I32>());
- ASSERT_NE(TypeOf(foo_f32_init), nullptr);
- EXPECT_TRUE(TypeOf(foo_f32_init)->Is<sem::F32>());
- ASSERT_NE(TypeOf(bar_i32_init), nullptr);
- EXPECT_TRUE(TypeOf(bar_i32_init)->UnwrapRef()->Is<sem::I32>());
- ASSERT_NE(TypeOf(bar_f32_init), nullptr);
- EXPECT_TRUE(TypeOf(bar_f32_init)->UnwrapRef()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(foo_i32_init), foo_i32_decl);
- EXPECT_EQ(StmtOf(bar_i32_init), bar_i32_decl);
- EXPECT_EQ(StmtOf(foo_f32_init), foo_f32_decl);
- EXPECT_EQ(StmtOf(bar_f32_init), bar_f32_decl);
- EXPECT_TRUE(CheckVarUsers(foo_i32, {bar_i32->constructor}));
- EXPECT_TRUE(CheckVarUsers(foo_f32, {bar_f32->constructor}));
- ASSERT_NE(VarOf(bar_i32->constructor), nullptr);
- EXPECT_EQ(VarOf(bar_i32->constructor)->Declaration(), foo_i32);
- ASSERT_NE(VarOf(bar_f32->constructor), nullptr);
- EXPECT_EQ(VarOf(bar_f32->constructor)->Declaration(), foo_f32);
+ // fn func_i32() {
+ // {
+ // var foo : i32 = 2;
+ // var bar : i32 = foo;
+ // }
+ // var foo : f32 = 2.0;
+ // var bar : f32 = foo;
+ // }
+
+ ast::VariableList params;
+
+ // Declare i32 "foo" inside a block
+ auto* foo_i32 = Var("foo", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* foo_i32_init = foo_i32->constructor;
+ auto* foo_i32_decl = Decl(foo_i32);
+
+ // Reference "foo" inside the block
+ auto* bar_i32 = Var("bar", ty.i32(), ast::StorageClass::kNone, Expr("foo"));
+ auto* bar_i32_init = bar_i32->constructor;
+ auto* bar_i32_decl = Decl(bar_i32);
+
+ auto* inner = Block(foo_i32_decl, bar_i32_decl);
+
+ // Declare f32 "foo" at function scope
+ auto* foo_f32 = Var("foo", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
+ auto* foo_f32_init = foo_f32->constructor;
+ auto* foo_f32_decl = Decl(foo_f32);
+
+ // Reference "foo" at function scope
+ auto* bar_f32 = Var("bar", ty.f32(), ast::StorageClass::kNone, Expr("foo"));
+ auto* bar_f32_init = bar_f32->constructor;
+ auto* bar_f32_decl = Decl(bar_f32);
+
+ Func("func", params, ty.void_(), {inner, foo_f32_decl, bar_f32_decl}, ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(foo_i32_init), nullptr);
+ EXPECT_TRUE(TypeOf(foo_i32_init)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(foo_f32_init), nullptr);
+ EXPECT_TRUE(TypeOf(foo_f32_init)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(bar_i32_init), nullptr);
+ EXPECT_TRUE(TypeOf(bar_i32_init)->UnwrapRef()->Is<sem::I32>());
+ ASSERT_NE(TypeOf(bar_f32_init), nullptr);
+ EXPECT_TRUE(TypeOf(bar_f32_init)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(foo_i32_init), foo_i32_decl);
+ EXPECT_EQ(StmtOf(bar_i32_init), bar_i32_decl);
+ EXPECT_EQ(StmtOf(foo_f32_init), foo_f32_decl);
+ EXPECT_EQ(StmtOf(bar_f32_init), bar_f32_decl);
+ EXPECT_TRUE(CheckVarUsers(foo_i32, {bar_i32->constructor}));
+ EXPECT_TRUE(CheckVarUsers(foo_f32, {bar_f32->constructor}));
+ ASSERT_NE(VarOf(bar_i32->constructor), nullptr);
+ EXPECT_EQ(VarOf(bar_i32->constructor)->Declaration(), foo_i32);
+ ASSERT_NE(VarOf(bar_f32->constructor), nullptr);
+ EXPECT_EQ(VarOf(bar_f32->constructor)->Declaration(), foo_f32);
}
TEST_F(ResolverTest, Stmt_VariableDecl_ModuleScopeAfterFunctionScope) {
- // fn func_i32() {
- // var foo : i32 = 2;
- // }
- // var foo : f32 = 2.0;
- // fn func_f32() {
- // var bar : f32 = foo;
- // }
-
- ast::VariableList params;
-
- // Declare i32 "foo" inside a function
- auto* fn_i32 = Var("foo", ty.i32(), ast::StorageClass::kNone, Expr(2));
- auto* fn_i32_init = fn_i32->constructor;
- auto* fn_i32_decl = Decl(fn_i32);
- Func("func_i32", params, ty.void_(), {fn_i32_decl}, ast::AttributeList{});
-
- // Declare f32 "foo" at module scope
- auto* mod_f32 = Var("foo", ty.f32(), ast::StorageClass::kPrivate, Expr(2.f));
- auto* mod_init = mod_f32->constructor;
- AST().AddGlobalVariable(mod_f32);
-
- // Reference "foo" in another function
- auto* fn_f32 = Var("bar", ty.f32(), ast::StorageClass::kNone, Expr("foo"));
- auto* fn_f32_init = fn_f32->constructor;
- auto* fn_f32_decl = Decl(fn_f32);
- Func("func_f32", params, ty.void_(), {fn_f32_decl}, ast::AttributeList{});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(mod_init), nullptr);
- EXPECT_TRUE(TypeOf(mod_init)->Is<sem::F32>());
- ASSERT_NE(TypeOf(fn_i32_init), nullptr);
- EXPECT_TRUE(TypeOf(fn_i32_init)->Is<sem::I32>());
- ASSERT_NE(TypeOf(fn_f32_init), nullptr);
- EXPECT_TRUE(TypeOf(fn_f32_init)->UnwrapRef()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(fn_i32_init), fn_i32_decl);
- EXPECT_EQ(StmtOf(mod_init), nullptr);
- EXPECT_EQ(StmtOf(fn_f32_init), fn_f32_decl);
- EXPECT_TRUE(CheckVarUsers(fn_i32, {}));
- EXPECT_TRUE(CheckVarUsers(mod_f32, {fn_f32->constructor}));
- ASSERT_NE(VarOf(fn_f32->constructor), nullptr);
- EXPECT_EQ(VarOf(fn_f32->constructor)->Declaration(), mod_f32);
+ // fn func_i32() {
+ // var foo : i32 = 2;
+ // }
+ // var foo : f32 = 2.0;
+ // fn func_f32() {
+ // var bar : f32 = foo;
+ // }
+
+ ast::VariableList params;
+
+ // Declare i32 "foo" inside a function
+ auto* fn_i32 = Var("foo", ty.i32(), ast::StorageClass::kNone, Expr(2_i));
+ auto* fn_i32_init = fn_i32->constructor;
+ auto* fn_i32_decl = Decl(fn_i32);
+ Func("func_i32", params, ty.void_(), {fn_i32_decl}, ast::AttributeList{});
+
+ // Declare f32 "foo" at module scope
+ auto* mod_f32 = Var("foo", ty.f32(), ast::StorageClass::kPrivate, Expr(2_f));
+ auto* mod_init = mod_f32->constructor;
+ AST().AddGlobalVariable(mod_f32);
+
+ // Reference "foo" in another function
+ auto* fn_f32 = Var("bar", ty.f32(), ast::StorageClass::kNone, Expr("foo"));
+ auto* fn_f32_init = fn_f32->constructor;
+ auto* fn_f32_decl = Decl(fn_f32);
+ Func("func_f32", params, ty.void_(), {fn_f32_decl}, ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(mod_init), nullptr);
+ EXPECT_TRUE(TypeOf(mod_init)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(fn_i32_init), nullptr);
+ EXPECT_TRUE(TypeOf(fn_i32_init)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(fn_f32_init), nullptr);
+ EXPECT_TRUE(TypeOf(fn_f32_init)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(fn_i32_init), fn_i32_decl);
+ EXPECT_EQ(StmtOf(mod_init), nullptr);
+ EXPECT_EQ(StmtOf(fn_f32_init), fn_f32_decl);
+ EXPECT_TRUE(CheckVarUsers(fn_i32, {}));
+ EXPECT_TRUE(CheckVarUsers(mod_f32, {fn_f32->constructor}));
+ ASSERT_NE(VarOf(fn_f32->constructor), nullptr);
+ EXPECT_EQ(VarOf(fn_f32->constructor)->Declaration(), mod_f32);
}
TEST_F(ResolverTest, ArraySize_UnsignedLiteral) {
- // var<private> a : array<f32, 10u>;
- auto* a =
- Global("a", ty.array(ty.f32(), Expr(10u)), ast::StorageClass::kPrivate);
+ // var<private> a : array<f32, 10u>;
+ auto* a = Global("a", ty.array(ty.f32(), Expr(10_u)), ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(a), nullptr);
- auto* ref = TypeOf(a)->As<sem::Reference>();
- ASSERT_NE(ref, nullptr);
- auto* ary = ref->StoreType()->As<sem::Array>();
- EXPECT_EQ(ary->Count(), 10u);
+ ASSERT_NE(TypeOf(a), nullptr);
+ auto* ref = TypeOf(a)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ auto* ary = ref->StoreType()->As<sem::Array>();
+ EXPECT_EQ(ary->Count(), 10u);
}
TEST_F(ResolverTest, ArraySize_SignedLiteral) {
- // var<private> a : array<f32, 10>;
- auto* a =
- Global("a", ty.array(ty.f32(), Expr(10)), ast::StorageClass::kPrivate);
+ // var<private> a : array<f32, 10i>;
+ auto* a = Global("a", ty.array(ty.f32(), Expr(10_i)), ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(a), nullptr);
- auto* ref = TypeOf(a)->As<sem::Reference>();
- ASSERT_NE(ref, nullptr);
- auto* ary = ref->StoreType()->As<sem::Array>();
- EXPECT_EQ(ary->Count(), 10u);
+ ASSERT_NE(TypeOf(a), nullptr);
+ auto* ref = TypeOf(a)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ auto* ary = ref->StoreType()->As<sem::Array>();
+ EXPECT_EQ(ary->Count(), 10u);
}
TEST_F(ResolverTest, ArraySize_UnsignedConstant) {
- // let size = 0u;
- // var<private> a : array<f32, 10u>;
- GlobalConst("size", nullptr, Expr(10u));
- auto* a = Global("a", ty.array(ty.f32(), Expr("size")),
- ast::StorageClass::kPrivate);
+ // let size = 0u;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(10_u));
+ auto* a = Global("a", ty.array(ty.f32(), Expr("size")), ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(a), nullptr);
- auto* ref = TypeOf(a)->As<sem::Reference>();
- ASSERT_NE(ref, nullptr);
- auto* ary = ref->StoreType()->As<sem::Array>();
- EXPECT_EQ(ary->Count(), 10u);
+ ASSERT_NE(TypeOf(a), nullptr);
+ auto* ref = TypeOf(a)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ auto* ary = ref->StoreType()->As<sem::Array>();
+ EXPECT_EQ(ary->Count(), 10u);
}
TEST_F(ResolverTest, ArraySize_SignedConstant) {
- // let size = 0;
- // var<private> a : array<f32, 10>;
- GlobalConst("size", nullptr, Expr(10));
- auto* a = Global("a", ty.array(ty.f32(), Expr("size")),
- ast::StorageClass::kPrivate);
+ // let size = 0;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(10_i));
+ auto* a = Global("a", ty.array(ty.f32(), Expr("size")), ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(a), nullptr);
- auto* ref = TypeOf(a)->As<sem::Reference>();
- ASSERT_NE(ref, nullptr);
- auto* ary = ref->StoreType()->As<sem::Array>();
- EXPECT_EQ(ary->Count(), 10u);
+ ASSERT_NE(TypeOf(a), nullptr);
+ auto* ref = TypeOf(a)->As<sem::Reference>();
+ ASSERT_NE(ref, nullptr);
+ auto* ary = ref->StoreType()->As<sem::Array>();
+ EXPECT_EQ(ary->Count(), 10u);
}
TEST_F(ResolverTest, Expr_Bitcast) {
- Global("name", ty.f32(), ast::StorageClass::kPrivate);
+ Global("name", ty.f32(), ast::StorageClass::kPrivate);
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr("name"));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr("name"));
+ WrapInFunction(bitcast);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(bitcast), nullptr);
- EXPECT_TRUE(TypeOf(bitcast)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(bitcast), nullptr);
+ EXPECT_TRUE(TypeOf(bitcast)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Call) {
- ast::VariableList params;
- Func("my_func", params, ty.f32(), {Return(0.0f)}, ast::AttributeList{});
+ ast::VariableList params;
+ Func("my_func", params, ty.f32(), {Return(0_f)}, ast::AttributeList{});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Call_InBinaryOp) {
- ast::VariableList params;
- Func("func", params, ty.f32(), {Return(0.0f)}, ast::AttributeList{});
+ ast::VariableList params;
+ Func("func", params, ty.f32(), {Return(0_f)}, ast::AttributeList{});
- auto* expr = Add(Call("func"), Call("func"));
- WrapInFunction(expr);
+ auto* expr = Add(Call("func"), Call("func"));
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Call_WithParams) {
- Func("my_func", {Param(Sym(), ty.f32())}, ty.f32(),
- {
- Return(1.2f),
- });
+ Func("my_func", {Param(Sym(), ty.f32())}, ty.f32(),
+ {
+ Return(1.2_f),
+ });
- auto* param = Expr(2.4f);
+ auto* param = Expr(2.4_f);
- auto* call = Call("my_func", param);
- WrapInFunction(call);
+ auto* call = Call("my_func", param);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(param), nullptr);
- EXPECT_TRUE(TypeOf(param)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(param), nullptr);
+ EXPECT_TRUE(TypeOf(param)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Call_Builtin) {
- auto* call = Call("round", 2.4f);
- WrapInFunction(call);
+ auto* call = Call("round", 2.4_f);
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Cast) {
- Global("name", ty.f32(), ast::StorageClass::kPrivate);
+ Global("name", ty.f32(), ast::StorageClass::kPrivate);
- auto* cast = Construct(ty.f32(), "name");
- WrapInFunction(cast);
+ auto* cast = Construct(ty.f32(), "name");
+ WrapInFunction(cast);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(cast), nullptr);
- EXPECT_TRUE(TypeOf(cast)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(cast), nullptr);
+ EXPECT_TRUE(TypeOf(cast)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Constructor_Scalar) {
- auto* s = Expr(1.0f);
- WrapInFunction(s);
+ auto* s = Expr(1_f);
+ WrapInFunction(s);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(s), nullptr);
- EXPECT_TRUE(TypeOf(s)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(s), nullptr);
+ EXPECT_TRUE(TypeOf(s)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Constructor_Type_Vec2) {
- auto* tc = vec2<f32>(1.0f, 1.0f);
- WrapInFunction(tc);
+ auto* tc = vec2<f32>(1_f, 1_f);
+ WrapInFunction(tc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
}
TEST_F(ResolverTest, Expr_Constructor_Type_Vec3) {
- auto* tc = vec3<f32>(1.0f, 1.0f, 1.0f);
- WrapInFunction(tc);
+ auto* tc = vec3<f32>(1_f, 1_f, 1_f);
+ WrapInFunction(tc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
}
TEST_F(ResolverTest, Expr_Constructor_Type_Vec4) {
- auto* tc = vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f);
- WrapInFunction(tc);
+ auto* tc = vec4<f32>(1_f, 1_f, 1_f, 1_f);
+ WrapInFunction(tc);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
TEST_F(ResolverTest, Expr_Identifier_GlobalVariable) {
- auto* my_var = Global("my_var", ty.f32(), ast::StorageClass::kPrivate);
+ auto* my_var = Global("my_var", ty.f32(), ast::StorageClass::kPrivate);
- auto* ident = Expr("my_var");
- WrapInFunction(ident);
+ auto* ident = Expr("my_var");
+ WrapInFunction(ident);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(ident), nullptr);
- ASSERT_TRUE(TypeOf(ident)->Is<sem::Reference>());
- EXPECT_TRUE(TypeOf(ident)->UnwrapRef()->Is<sem::F32>());
- EXPECT_TRUE(CheckVarUsers(my_var, {ident}));
- ASSERT_NE(VarOf(ident), nullptr);
- EXPECT_EQ(VarOf(ident)->Declaration(), my_var);
+ ASSERT_NE(TypeOf(ident), nullptr);
+ ASSERT_TRUE(TypeOf(ident)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(ident)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_TRUE(CheckVarUsers(my_var, {ident}));
+ ASSERT_NE(VarOf(ident), nullptr);
+ EXPECT_EQ(VarOf(ident)->Declaration(), my_var);
}
TEST_F(ResolverTest, Expr_Identifier_GlobalConstant) {
- auto* my_var = GlobalConst("my_var", ty.f32(), Construct(ty.f32()));
+ auto* my_var = GlobalConst("my_var", ty.f32(), Construct(ty.f32()));
- auto* ident = Expr("my_var");
- WrapInFunction(ident);
+ auto* ident = Expr("my_var");
+ WrapInFunction(ident);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(ident), nullptr);
- EXPECT_TRUE(TypeOf(ident)->Is<sem::F32>());
- EXPECT_TRUE(CheckVarUsers(my_var, {ident}));
- ASSERT_NE(VarOf(ident), nullptr);
- EXPECT_EQ(VarOf(ident)->Declaration(), my_var);
+ ASSERT_NE(TypeOf(ident), nullptr);
+ EXPECT_TRUE(TypeOf(ident)->Is<sem::F32>());
+ EXPECT_TRUE(CheckVarUsers(my_var, {ident}));
+ ASSERT_NE(VarOf(ident), nullptr);
+ EXPECT_EQ(VarOf(ident)->Declaration(), my_var);
}
TEST_F(ResolverTest, Expr_Identifier_FunctionVariable_Const) {
- auto* my_var_a = Expr("my_var");
- auto* var = Const("my_var", ty.f32(), Construct(ty.f32()));
- auto* decl = Decl(Var("b", ty.f32(), ast::StorageClass::kNone, my_var_a));
+ auto* my_var_a = Expr("my_var");
+ auto* var = Let("my_var", ty.f32(), Construct(ty.f32()));
+ auto* decl = Decl(Var("b", ty.f32(), ast::StorageClass::kNone, my_var_a));
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- decl,
- },
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ decl,
+ },
+ ast::AttributeList{});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(my_var_a), nullptr);
- EXPECT_TRUE(TypeOf(my_var_a)->Is<sem::F32>());
- EXPECT_EQ(StmtOf(my_var_a), decl);
- EXPECT_TRUE(CheckVarUsers(var, {my_var_a}));
- ASSERT_NE(VarOf(my_var_a), nullptr);
- EXPECT_EQ(VarOf(my_var_a)->Declaration(), var);
+ ASSERT_NE(TypeOf(my_var_a), nullptr);
+ EXPECT_TRUE(TypeOf(my_var_a)->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(my_var_a), decl);
+ EXPECT_TRUE(CheckVarUsers(var, {my_var_a}));
+ ASSERT_NE(VarOf(my_var_a), nullptr);
+ EXPECT_EQ(VarOf(my_var_a)->Declaration(), var);
}
TEST_F(ResolverTest, IndexAccessor_Dynamic_Ref_F32) {
- // var a : array<bool, 10> = 0;
- // var idx : f32 = f32();
- // var f : f32 = a[idx];
- auto* a = Var("a", ty.array<bool, 10>(), array<bool, 10>());
- auto* idx = Var("idx", ty.f32(), Construct(ty.f32()));
- auto* f = Var("f", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, idx)));
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(idx),
- Decl(f),
- },
- ast::AttributeList{});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
+ // var a : array<bool, 10u> = 0;
+ // var idx : f32 = f32();
+ // var f : f32 = a[idx];
+ auto* a = Var("a", ty.array<bool, 10>(), array<bool, 10>());
+ auto* idx = Var("idx", ty.f32(), Construct(ty.f32()));
+ auto* f = Var("f", ty.f32(), IndexAccessor("a", Expr(Source{{12, 34}}, idx)));
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(idx),
+ Decl(f),
+ },
+ ast::AttributeList{});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: index must be of type 'i32' or 'u32', found: 'f32'");
}
TEST_F(ResolverTest, Expr_Identifier_FunctionVariable) {
- auto* my_var_a = Expr("my_var");
- auto* my_var_b = Expr("my_var");
- auto* assign = Assign(my_var_a, my_var_b);
-
- auto* var = Var("my_var", ty.f32());
-
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- assign,
- },
- ast::AttributeList{});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(my_var_a), nullptr);
- ASSERT_TRUE(TypeOf(my_var_a)->Is<sem::Reference>());
- EXPECT_TRUE(TypeOf(my_var_a)->UnwrapRef()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(my_var_a), assign);
- ASSERT_NE(TypeOf(my_var_b), nullptr);
- ASSERT_TRUE(TypeOf(my_var_b)->Is<sem::Reference>());
- EXPECT_TRUE(TypeOf(my_var_b)->UnwrapRef()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(my_var_b), assign);
- EXPECT_TRUE(CheckVarUsers(var, {my_var_a, my_var_b}));
- ASSERT_NE(VarOf(my_var_a), nullptr);
- EXPECT_EQ(VarOf(my_var_a)->Declaration(), var);
- ASSERT_NE(VarOf(my_var_b), nullptr);
- EXPECT_EQ(VarOf(my_var_b)->Declaration(), var);
+ auto* my_var_a = Expr("my_var");
+ auto* my_var_b = Expr("my_var");
+ auto* assign = Assign(my_var_a, my_var_b);
+
+ auto* var = Var("my_var", ty.f32());
+
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ assign,
+ },
+ ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(my_var_a), nullptr);
+ ASSERT_TRUE(TypeOf(my_var_a)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(my_var_a)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(my_var_a), assign);
+ ASSERT_NE(TypeOf(my_var_b), nullptr);
+ ASSERT_TRUE(TypeOf(my_var_b)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(my_var_b)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(my_var_b), assign);
+ EXPECT_TRUE(CheckVarUsers(var, {my_var_a, my_var_b}));
+ ASSERT_NE(VarOf(my_var_a), nullptr);
+ EXPECT_EQ(VarOf(my_var_a)->Declaration(), var);
+ ASSERT_NE(VarOf(my_var_b), nullptr);
+ EXPECT_EQ(VarOf(my_var_b)->Declaration(), var);
}
TEST_F(ResolverTest, Expr_Identifier_Function_Ptr) {
- auto* v = Expr("v");
- auto* p = Expr("p");
- auto* v_decl = Decl(Var("v", ty.f32()));
- auto* p_decl = Decl(
- Const("p", ty.pointer<f32>(ast::StorageClass::kFunction), AddressOf(v)));
- auto* assign = Assign(Deref(p), 1.23f);
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- v_decl,
- p_decl,
- assign,
- },
- ast::AttributeList{});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(v), nullptr);
- ASSERT_TRUE(TypeOf(v)->Is<sem::Reference>());
- EXPECT_TRUE(TypeOf(v)->UnwrapRef()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(v), p_decl);
- ASSERT_NE(TypeOf(p), nullptr);
- ASSERT_TRUE(TypeOf(p)->Is<sem::Pointer>());
- EXPECT_TRUE(TypeOf(p)->UnwrapPtr()->Is<sem::F32>());
- EXPECT_EQ(StmtOf(p), assign);
+ auto* v = Expr("v");
+ auto* p = Expr("p");
+ auto* v_decl = Decl(Var("v", ty.f32()));
+ auto* p_decl = Decl(Let("p", ty.pointer<f32>(ast::StorageClass::kFunction), AddressOf(v)));
+ auto* assign = Assign(Deref(p), 1.23_f);
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ v_decl,
+ p_decl,
+ assign,
+ },
+ ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(v), nullptr);
+ ASSERT_TRUE(TypeOf(v)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(v)->UnwrapRef()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(v), p_decl);
+ ASSERT_NE(TypeOf(p), nullptr);
+ ASSERT_TRUE(TypeOf(p)->Is<sem::Pointer>());
+ EXPECT_TRUE(TypeOf(p)->UnwrapPtr()->Is<sem::F32>());
+ EXPECT_EQ(StmtOf(p), assign);
}
TEST_F(ResolverTest, Expr_Call_Function) {
- Func("my_func", ast::VariableList{}, ty.f32(), {Return(0.0f)},
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.f32(), {Return(0_f)}, ast::AttributeList{});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(call), nullptr);
- EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(call), nullptr);
+ EXPECT_TRUE(TypeOf(call)->Is<sem::F32>());
}
TEST_F(ResolverTest, Expr_Identifier_Unknown) {
- auto* a = Expr("a");
- WrapInFunction(a);
+ auto* a = Expr("a");
+ WrapInFunction(a);
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
}
TEST_F(ResolverTest, Function_Parameters) {
- auto* param_a = Param("a", ty.f32());
- auto* param_b = Param("b", ty.i32());
- auto* param_c = Param("c", ty.u32());
-
- auto* func = Func("my_func",
- ast::VariableList{
- param_a,
- param_b,
- param_c,
- },
- ty.void_(), {});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->Parameters().size(), 3u);
- EXPECT_TRUE(func_sem->Parameters()[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(func_sem->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(func_sem->Parameters()[2]->Type()->Is<sem::U32>());
- EXPECT_EQ(func_sem->Parameters()[0]->Declaration(), param_a);
- EXPECT_EQ(func_sem->Parameters()[1]->Declaration(), param_b);
- EXPECT_EQ(func_sem->Parameters()[2]->Declaration(), param_c);
- EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
+ auto* param_a = Param("a", ty.f32());
+ auto* param_b = Param("b", ty.i32());
+ auto* param_c = Param("c", ty.u32());
+
+ auto* func = Func("my_func",
+ ast::VariableList{
+ param_a,
+ param_b,
+ param_c,
+ },
+ ty.void_(), {});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+ EXPECT_EQ(func_sem->Parameters().size(), 3u);
+ EXPECT_TRUE(func_sem->Parameters()[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(func_sem->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(func_sem->Parameters()[2]->Type()->Is<sem::U32>());
+ EXPECT_EQ(func_sem->Parameters()[0]->Declaration(), param_a);
+ EXPECT_EQ(func_sem->Parameters()[1]->Declaration(), param_b);
+ EXPECT_EQ(func_sem->Parameters()[2]->Declaration(), param_c);
+ EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
}
TEST_F(ResolverTest, Function_RegisterInputOutputVariables) {
- auto* s = Structure("S", {Member("m", ty.u32())});
-
- auto* sb_var = Global("sb_var", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- auto* wg_var = Global("wg_var", ty.f32(), ast::StorageClass::kWorkgroup);
- auto* priv_var = Global("priv_var", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Assign("wg_var", "wg_var"),
- Assign("sb_var", "sb_var"),
- Assign("priv_var", "priv_var"),
- });
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->Parameters().size(), 0u);
- EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
-
- const auto& vars = func_sem->TransitivelyReferencedGlobals();
- ASSERT_EQ(vars.size(), 3u);
- EXPECT_EQ(vars[0]->Declaration(), wg_var);
- EXPECT_EQ(vars[1]->Declaration(), sb_var);
- EXPECT_EQ(vars[2]->Declaration(), priv_var);
+ auto* s = Structure("S", {Member("m", ty.u32())});
+
+ auto* sb_var = Global("sb_var", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ auto* wg_var = Global("wg_var", ty.f32(), ast::StorageClass::kWorkgroup);
+ auto* priv_var = Global("priv_var", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Assign("wg_var", "wg_var"),
+ Assign("sb_var", "sb_var"),
+ Assign("priv_var", "priv_var"),
+ });
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+ EXPECT_EQ(func_sem->Parameters().size(), 0u);
+ EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
+
+ const auto& vars = func_sem->TransitivelyReferencedGlobals();
+ ASSERT_EQ(vars.size(), 3u);
+ EXPECT_EQ(vars[0]->Declaration(), wg_var);
+ EXPECT_EQ(vars[1]->Declaration(), sb_var);
+ EXPECT_EQ(vars[2]->Declaration(), priv_var);
}
TEST_F(ResolverTest, Function_RegisterInputOutputVariables_SubFunction) {
- auto* s = Structure("S", {Member("m", ty.u32())});
-
- auto* sb_var = Global("sb_var", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- auto* wg_var = Global("wg_var", ty.f32(), ast::StorageClass::kWorkgroup);
- auto* priv_var = Global("priv_var", ty.f32(), ast::StorageClass::kPrivate);
-
- Func("my_func", ast::VariableList{}, ty.f32(),
- {Assign("wg_var", "wg_var"), Assign("sb_var", "sb_var"),
- Assign("priv_var", "priv_var"), Return(0.0f)},
- ast::AttributeList{});
-
- auto* func2 = Func("func", ast::VariableList{}, ty.void_(),
- {
- WrapInStatement(Call("my_func")),
- },
- ast::AttributeList{});
+ auto* s = Structure("S", {Member("m", ty.u32())});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sb_var = Global("sb_var", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ auto* wg_var = Global("wg_var", ty.f32(), ast::StorageClass::kWorkgroup);
+ auto* priv_var = Global("priv_var", ty.f32(), ast::StorageClass::kPrivate);
- auto* func2_sem = Sem().Get(func2);
- ASSERT_NE(func2_sem, nullptr);
- EXPECT_EQ(func2_sem->Parameters().size(), 0u);
+ Func("my_func", ast::VariableList{}, ty.f32(),
+ {Assign("wg_var", "wg_var"), Assign("sb_var", "sb_var"), Assign("priv_var", "priv_var"),
+ Return(0_f)},
+ ast::AttributeList{});
- const auto& vars = func2_sem->TransitivelyReferencedGlobals();
- ASSERT_EQ(vars.size(), 3u);
- EXPECT_EQ(vars[0]->Declaration(), wg_var);
- EXPECT_EQ(vars[1]->Declaration(), sb_var);
- EXPECT_EQ(vars[2]->Declaration(), priv_var);
+ auto* func2 = Func("func", ast::VariableList{}, ty.void_(),
+ {
+ WrapInStatement(Call("my_func")),
+ },
+ ast::AttributeList{});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func2_sem = Sem().Get(func2);
+ ASSERT_NE(func2_sem, nullptr);
+ EXPECT_EQ(func2_sem->Parameters().size(), 0u);
+
+ const auto& vars = func2_sem->TransitivelyReferencedGlobals();
+ ASSERT_EQ(vars.size(), 3u);
+ EXPECT_EQ(vars[0]->Declaration(), wg_var);
+ EXPECT_EQ(vars[1]->Declaration(), sb_var);
+ EXPECT_EQ(vars[2]->Declaration(), priv_var);
}
TEST_F(ResolverTest, Function_NotRegisterFunctionVariable) {
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(Var("var", ty.f32())),
- Assign("var", 1.f),
- });
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Var("var", ty.f32())),
+ Assign("var", 1_f),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
- EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
+ EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
+ EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
}
TEST_F(ResolverTest, Function_NotRegisterFunctionConstant) {
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Decl(Const("var", ty.f32(), Construct(ty.f32()))),
- });
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Let("var", ty.f32(), Construct(ty.f32()))),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
- EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
+ EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
+ EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
}
TEST_F(ResolverTest, Function_NotRegisterFunctionParams) {
- auto* func = Func("my_func", {Const("var", ty.f32(), Construct(ty.f32()))},
- ty.void_(), {});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* func = Func("my_func", {Let("var", ty.f32(), Construct(ty.f32()))}, ty.void_(), {});
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
- EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
+ EXPECT_EQ(func_sem->TransitivelyReferencedGlobals().size(), 0u);
+ EXPECT_TRUE(func_sem->ReturnType()->Is<sem::Void>());
}
TEST_F(ResolverTest, Function_CallSites) {
- auto* foo = Func("foo", ast::VariableList{}, ty.void_(), {});
+ auto* foo = Func("foo", ast::VariableList{}, ty.void_(), {});
- auto* call_1 = Call("foo");
- auto* call_2 = Call("foo");
- auto* bar = Func("bar", ast::VariableList{}, ty.void_(),
- {
- CallStmt(call_1),
- CallStmt(call_2),
- });
+ auto* call_1 = Call("foo");
+ auto* call_2 = Call("foo");
+ auto* bar = Func("bar", ast::VariableList{}, ty.void_(),
+ {
+ CallStmt(call_1),
+ CallStmt(call_2),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* foo_sem = Sem().Get(foo);
- ASSERT_NE(foo_sem, nullptr);
- ASSERT_EQ(foo_sem->CallSites().size(), 2u);
- EXPECT_EQ(foo_sem->CallSites()[0]->Declaration(), call_1);
- EXPECT_EQ(foo_sem->CallSites()[1]->Declaration(), call_2);
+ auto* foo_sem = Sem().Get(foo);
+ ASSERT_NE(foo_sem, nullptr);
+ ASSERT_EQ(foo_sem->CallSites().size(), 2u);
+ EXPECT_EQ(foo_sem->CallSites()[0]->Declaration(), call_1);
+ EXPECT_EQ(foo_sem->CallSites()[1]->Declaration(), call_2);
- auto* bar_sem = Sem().Get(bar);
- ASSERT_NE(bar_sem, nullptr);
- EXPECT_EQ(bar_sem->CallSites().size(), 0u);
+ auto* bar_sem = Sem().Get(bar);
+ ASSERT_NE(bar_sem, nullptr);
+ EXPECT_EQ(bar_sem->CallSites().size(), 0u);
}
TEST_F(ResolverTest, Function_WorkgroupSize_NotSet) {
- // @stage(compute) @workgroup_size(1)
- // fn main() {}
- auto* func = Func("main", ast::VariableList{}, ty.void_(), {}, {});
+ // @compute @workgroup_size(1)
+ // fn main() {}
+ auto* func = Func("main", ast::VariableList{}, ty.void_(), {}, {});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 1u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 1u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 1u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 1u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 1u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 1u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
}
TEST_F(ResolverTest, Function_WorkgroupSize_Literals) {
- // @stage(compute) @workgroup_size(8, 2, 3)
- // fn main() {}
- auto* func =
- Func("main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(8, 2, 3)});
+ // @compute @workgroup_size(8, 2, 3)
+ // fn main() {}
+ auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(8_i, 2_i, 3_i)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 2u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 3u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 2u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 3u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
}
TEST_F(ResolverTest, Function_WorkgroupSize_Consts) {
- // let width = 16;
- // let height = 8;
- // let depth = 2;
- // @stage(compute) @workgroup_size(width, height, depth)
- // fn main() {}
- GlobalConst("width", ty.i32(), Expr(16));
- GlobalConst("height", ty.i32(), Expr(8));
- GlobalConst("depth", ty.i32(), Expr(2));
- auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth")});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
-
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 16u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 8u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 2u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
+ // let width = 16i;
+ // let height = 8i;
+ // let depth = 2i;
+ // @compute @workgroup_size(width, height, depth)
+ // fn main() {}
+ GlobalConst("width", ty.i32(), Expr(16_i));
+ GlobalConst("height", ty.i32(), Expr(8_i));
+ GlobalConst("depth", ty.i32(), Expr(2_i));
+ auto* func =
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize("width", "height", "depth")});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 16u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 8u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 2u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
}
TEST_F(ResolverTest, Function_WorkgroupSize_Consts_NestedInitializer) {
- // let width = i32(i32(i32(8)));
- // let height = i32(i32(i32(4)));
- // @stage(compute) @workgroup_size(width, height)
- // fn main() {}
- GlobalConst("width", ty.i32(),
- Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32(), 8))));
- GlobalConst("height", ty.i32(),
- Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32(), 4))));
- auto* func = Func(
- "main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize("width", "height")});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
-
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 4u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 1u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
+ // let width = i32(i32(i32(8i)));
+ // let height = i32(i32(i32(4i)));
+ // @compute @workgroup_size(width, height)
+ // fn main() {}
+ GlobalConst("width", ty.i32(),
+ Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32(), 8_i))));
+ GlobalConst("height", ty.i32(),
+ Construct(ty.i32(), Construct(ty.i32(), Construct(ty.i32(), 4_i))));
+ auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize("width", "height")});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 4u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 1u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
}
TEST_F(ResolverTest, Function_WorkgroupSize_OverridableConsts) {
- // @id(0) override width = 16;
- // @id(1) override height = 8;
- // @id(2) override depth = 2;
- // @stage(compute) @workgroup_size(width, height, depth)
- // fn main() {}
- auto* width = Override("width", ty.i32(), Expr(16), {Id(0)});
- auto* height = Override("height", ty.i32(), Expr(8), {Id(1)});
- auto* depth = Override("depth", ty.i32(), Expr(2), {Id(2)});
- auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth")});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
-
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 16u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 8u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 2u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, width);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, depth);
+ // @id(0) override width = 16i;
+ // @id(1) override height = 8i;
+ // @id(2) override depth = 2i;
+ // @compute @workgroup_size(width, height, depth)
+ // fn main() {}
+ auto* width = Override("width", ty.i32(), Expr(16_i), {Id(0)});
+ auto* height = Override("height", ty.i32(), Expr(8_i), {Id(1)});
+ auto* depth = Override("depth", ty.i32(), Expr(2_i), {Id(2)});
+ auto* func =
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize("width", "height", "depth")});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 16u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 8u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 2u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, width);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, depth);
}
TEST_F(ResolverTest, Function_WorkgroupSize_OverridableConsts_NoInit) {
- // @id(0) override width : i32;
- // @id(1) override height : i32;
- // @id(2) override depth : i32;
- // @stage(compute) @workgroup_size(width, height, depth)
- // fn main() {}
- auto* width = Override("width", ty.i32(), nullptr, {Id(0)});
- auto* height = Override("height", ty.i32(), nullptr, {Id(1)});
- auto* depth = Override("depth", ty.i32(), nullptr, {Id(2)});
- auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth")});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
-
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 0u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 0u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 0u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, width);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, depth);
+ // @id(0) override width : i32;
+ // @id(1) override height : i32;
+ // @id(2) override depth : i32;
+ // @compute @workgroup_size(width, height, depth)
+ // fn main() {}
+ auto* width = Override("width", ty.i32(), nullptr, {Id(0)});
+ auto* height = Override("height", ty.i32(), nullptr, {Id(1)});
+ auto* depth = Override("depth", ty.i32(), nullptr, {Id(2)});
+ auto* func =
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize("width", "height", "depth")});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
+
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 0u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 0u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 0u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, width);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, depth);
}
TEST_F(ResolverTest, Function_WorkgroupSize_Mixed) {
- // @id(1) override height = 2;
- // let depth = 3;
- // @stage(compute) @workgroup_size(8, height, depth)
- // fn main() {}
- auto* height = Override("height", ty.i32(), Expr(2), {Id(0)});
- GlobalConst("depth", ty.i32(), Expr(3));
- auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(8, "height", "depth")});
+ // @id(1) override height = 2i;
+ // let depth = 3i;
+ // @compute @workgroup_size(8, height, depth)
+ // fn main() {}
+ auto* height = Override("height", ty.i32(), Expr(2_i), {Id(0)});
+ GlobalConst("depth", ty.i32(), Expr(3_i));
+ auto* func = Func("main", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(8_i, "height", "depth")});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* func_sem = Sem().Get(func);
- ASSERT_NE(func_sem, nullptr);
+ auto* func_sem = Sem().Get(func);
+ ASSERT_NE(func_sem, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 2u);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 3u);
- EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
- EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
- EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].value, 8u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].value, 2u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].value, 3u);
+ EXPECT_EQ(func_sem->WorkgroupSize()[0].overridable_const, nullptr);
+ EXPECT_EQ(func_sem->WorkgroupSize()[1].overridable_const, height);
+ EXPECT_EQ(func_sem->WorkgroupSize()[2].overridable_const, nullptr);
}
TEST_F(ResolverTest, Expr_MemberAccessor_Struct) {
- auto* st = Structure("S", {Member("first_member", ty.i32()),
- Member("second_member", ty.f32())});
- Global("my_struct", ty.Of(st), ast::StorageClass::kPrivate);
+ auto* st =
+ Structure("S", {Member("first_member", ty.i32()), Member("second_member", ty.f32())});
+ Global("my_struct", ty.Of(st), ast::StorageClass::kPrivate);
- auto* mem = MemberAccessor("my_struct", "second_member");
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_struct", "second_member");
+ WrapInFunction(mem);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(mem), nullptr);
- ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(mem), nullptr);
+ ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
- auto* ref = TypeOf(mem)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
- auto* sma = Sem().Get(mem)->As<sem::StructMemberAccess>();
- ASSERT_NE(sma, nullptr);
- EXPECT_TRUE(sma->Member()->Type()->Is<sem::F32>());
- EXPECT_EQ(sma->Member()->Index(), 1u);
- EXPECT_EQ(sma->Member()->Declaration()->symbol,
- Symbols().Get("second_member"));
+ auto* ref = TypeOf(mem)->As<sem::Reference>();
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+ auto* sma = Sem().Get(mem)->As<sem::StructMemberAccess>();
+ ASSERT_NE(sma, nullptr);
+ EXPECT_TRUE(sma->Member()->Type()->Is<sem::F32>());
+ EXPECT_EQ(sma->Member()->Index(), 1u);
+ EXPECT_EQ(sma->Member()->Declaration()->symbol, Symbols().Get("second_member"));
}
TEST_F(ResolverTest, Expr_MemberAccessor_Struct_Alias) {
- auto* st = Structure("S", {Member("first_member", ty.i32()),
- Member("second_member", ty.f32())});
- auto* alias = Alias("alias", ty.Of(st));
- Global("my_struct", ty.Of(alias), ast::StorageClass::kPrivate);
+ auto* st =
+ Structure("S", {Member("first_member", ty.i32()), Member("second_member", ty.f32())});
+ auto* alias = Alias("alias", ty.Of(st));
+ Global("my_struct", ty.Of(alias), ast::StorageClass::kPrivate);
- auto* mem = MemberAccessor("my_struct", "second_member");
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_struct", "second_member");
+ WrapInFunction(mem);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(mem), nullptr);
- ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(mem), nullptr);
+ ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
- auto* ref = TypeOf(mem)->As<sem::Reference>();
- EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
- auto* sma = Sem().Get(mem)->As<sem::StructMemberAccess>();
- ASSERT_NE(sma, nullptr);
- EXPECT_TRUE(sma->Member()->Type()->Is<sem::F32>());
- EXPECT_EQ(sma->Member()->Index(), 1u);
+ auto* ref = TypeOf(mem)->As<sem::Reference>();
+ EXPECT_TRUE(ref->StoreType()->Is<sem::F32>());
+ auto* sma = Sem().Get(mem)->As<sem::StructMemberAccess>();
+ ASSERT_NE(sma, nullptr);
+ EXPECT_TRUE(sma->Member()->Type()->Is<sem::F32>());
+ EXPECT_EQ(sma->Member()->Index(), 1u);
}
TEST_F(ResolverTest, Expr_MemberAccessor_VectorSwizzle) {
- Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* mem = MemberAccessor("my_vec", "xzyw");
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_vec", "xzyw");
+ WrapInFunction(mem);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(mem), nullptr);
- ASSERT_TRUE(TypeOf(mem)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(mem)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(mem)->As<sem::Vector>()->Width(), 4u);
- ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
- EXPECT_THAT(Sem().Get(mem)->As<sem::Swizzle>()->Indices(),
- ElementsAre(0, 2, 1, 3));
+ ASSERT_NE(TypeOf(mem), nullptr);
+ ASSERT_TRUE(TypeOf(mem)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(mem)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(mem)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
+ EXPECT_THAT(Sem().Get(mem)->As<sem::Swizzle>()->Indices(), ElementsAre(0, 2, 1, 3));
}
TEST_F(ResolverTest, Expr_MemberAccessor_VectorSwizzle_SingleElement) {
- Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* mem = MemberAccessor("my_vec", "b");
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_vec", "b");
+ WrapInFunction(mem);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(mem), nullptr);
- ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
+ ASSERT_NE(TypeOf(mem), nullptr);
+ ASSERT_TRUE(TypeOf(mem)->Is<sem::Reference>());
- auto* ref = TypeOf(mem)->As<sem::Reference>();
- ASSERT_TRUE(ref->StoreType()->Is<sem::F32>());
- ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
- EXPECT_THAT(Sem().Get(mem)->As<sem::Swizzle>()->Indices(), ElementsAre(2));
+ auto* ref = TypeOf(mem)->As<sem::Reference>();
+ ASSERT_TRUE(ref->StoreType()->Is<sem::F32>());
+ ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
+ EXPECT_THAT(Sem().Get(mem)->As<sem::Swizzle>()->Indices(), ElementsAre(2));
}
TEST_F(ResolverTest, Expr_Accessor_MultiLevel) {
- // struct b {
- // vec4<f32> foo
- // }
- // struct A {
- // array<b, 3> mem
- // }
- // var c : A
- // c.mem[0].foo.yx
- // -> vec2<f32>
- //
- // fn f() {
- // c.mem[0].foo
- // }
- //
-
- auto* stB = Structure("B", {Member("foo", ty.vec4<f32>())});
- auto* stA = Structure("A", {Member("mem", ty.array(ty.Of(stB), 3))});
- Global("c", ty.Of(stA), ast::StorageClass::kPrivate);
-
- auto* mem = MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("c", "mem"), 0), "foo"),
- "yx");
- WrapInFunction(mem);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(mem), nullptr);
- ASSERT_TRUE(TypeOf(mem)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(mem)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(mem)->As<sem::Vector>()->Width(), 2u);
- ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
+ // struct b {
+ // vec4<f32> foo
+ // }
+ // struct A {
+ // array<b, 3u> mem
+ // }
+ // var c : A
+ // c.mem[0].foo.yx
+ // -> vec2<f32>
+ //
+ // fn f() {
+ // c.mem[0].foo
+ // }
+ //
+
+ auto* stB = Structure("B", {Member("foo", ty.vec4<f32>())});
+ auto* stA = Structure("A", {Member("mem", ty.array(ty.Of(stB), 3_i))});
+ Global("c", ty.Of(stA), ast::StorageClass::kPrivate);
+
+ auto* mem =
+ MemberAccessor(MemberAccessor(IndexAccessor(MemberAccessor("c", "mem"), 0_i), "foo"), "yx");
+ WrapInFunction(mem);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(mem), nullptr);
+ ASSERT_TRUE(TypeOf(mem)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(mem)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(mem)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_TRUE(Sem().Get(mem)->Is<sem::Swizzle>());
}
TEST_F(ResolverTest, Expr_MemberAccessor_InBinaryOp) {
- auto* st = Structure("S", {Member("first_member", ty.f32()),
- Member("second_member", ty.f32())});
- Global("my_struct", ty.Of(st), ast::StorageClass::kPrivate);
+ auto* st =
+ Structure("S", {Member("first_member", ty.f32()), Member("second_member", ty.f32())});
+ Global("my_struct", ty.Of(st), ast::StorageClass::kPrivate);
- auto* expr = Add(MemberAccessor("my_struct", "first_member"),
- MemberAccessor("my_struct", "second_member"));
- WrapInFunction(expr);
+ auto* expr = Add(MemberAccessor("my_struct", "first_member"),
+ MemberAccessor("my_struct", "second_member"));
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ EXPECT_TRUE(TypeOf(expr)->Is<sem::F32>());
}
namespace ExprBinaryTest {
template <typename T, int ID>
struct Aliased {
- using type = alias<T, ID>;
+ using type = alias<T, ID>;
};
template <int N, typename T, int ID>
struct Aliased<vec<N, T>, ID> {
- using type = vec<N, alias<T, ID>>;
+ using type = vec<N, alias<T, ID>>;
};
template <int N, int M, typename T, int ID>
struct Aliased<mat<N, M, T>, ID> {
- using type = mat<N, M, alias<T, ID>>;
+ using type = mat<N, M, alias<T, ID>>;
};
struct Params {
- ast::BinaryOp op;
- builder::ast_type_func_ptr create_lhs_type;
- builder::ast_type_func_ptr create_rhs_type;
- builder::ast_type_func_ptr create_lhs_alias_type;
- builder::ast_type_func_ptr create_rhs_alias_type;
- builder::sem_type_func_ptr create_result_type;
+ ast::BinaryOp op;
+ builder::ast_type_func_ptr create_lhs_type;
+ builder::ast_type_func_ptr create_rhs_type;
+ builder::ast_type_func_ptr create_lhs_alias_type;
+ builder::ast_type_func_ptr create_rhs_alias_type;
+ builder::sem_type_func_ptr create_result_type;
};
template <typename LHS, typename RHS, typename RES>
constexpr Params ParamsFor(ast::BinaryOp op) {
- return Params{op,
- DataType<LHS>::AST,
- DataType<RHS>::AST,
- DataType<typename Aliased<LHS, 0>::type>::AST,
- DataType<typename Aliased<RHS, 1>::type>::AST,
- DataType<RES>::Sem};
+ return Params{op,
+ DataType<LHS>::AST,
+ DataType<RHS>::AST,
+ DataType<typename Aliased<LHS, 0>::type>::AST,
+ DataType<typename Aliased<RHS, 1>::type>::AST,
+ DataType<RES>::Sem};
}
static constexpr ast::BinaryOp all_ops[] = {
@@ -1491,174 +1480,150 @@ static constexpr Params all_valid_cases[] = {
using Expr_Binary_Test_Valid = ResolverTestWithParam<Params>;
TEST_P(Expr_Binary_Test_Valid, All) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* lhs_type = params.create_lhs_type(*this);
- auto* rhs_type = params.create_rhs_type(*this);
- auto* result_type = params.create_result_type(*this);
+ auto* lhs_type = params.create_lhs_type(*this);
+ auto* rhs_type = params.create_rhs_type(*this);
+ auto* result_type = params.create_result_type(*this);
- std::stringstream ss;
- ss << FriendlyName(lhs_type) << " " << params.op << " "
- << FriendlyName(rhs_type);
- SCOPED_TRACE(ss.str());
+ std::stringstream ss;
+ ss << FriendlyName(lhs_type) << " " << params.op << " " << FriendlyName(rhs_type);
+ SCOPED_TRACE(ss.str());
- Global("lhs", lhs_type, ast::StorageClass::kPrivate);
- Global("rhs", rhs_type, ast::StorageClass::kPrivate);
+ Global("lhs", lhs_type, ast::StorageClass::kPrivate);
+ Global("rhs", rhs_type, ast::StorageClass::kPrivate);
- auto* expr =
- create<ast::BinaryExpression>(params.op, Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(params.op, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr) == result_type);
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr) == result_type);
}
-INSTANTIATE_TEST_SUITE_P(ResolverTest,
- Expr_Binary_Test_Valid,
- testing::ValuesIn(all_valid_cases));
+INSTANTIATE_TEST_SUITE_P(ResolverTest, Expr_Binary_Test_Valid, testing::ValuesIn(all_valid_cases));
enum class BinaryExprSide { Left, Right, Both };
-using Expr_Binary_Test_WithAlias_Valid =
- ResolverTestWithParam<std::tuple<Params, BinaryExprSide>>;
+using Expr_Binary_Test_WithAlias_Valid = ResolverTestWithParam<std::tuple<Params, BinaryExprSide>>;
TEST_P(Expr_Binary_Test_WithAlias_Valid, All) {
- const Params& params = std::get<0>(GetParam());
- BinaryExprSide side = std::get<1>(GetParam());
-
- auto* create_lhs_type =
- (side == BinaryExprSide::Left || side == BinaryExprSide::Both)
- ? params.create_lhs_alias_type
- : params.create_lhs_type;
- auto* create_rhs_type =
- (side == BinaryExprSide::Right || side == BinaryExprSide::Both)
- ? params.create_rhs_alias_type
- : params.create_rhs_type;
-
- auto* lhs_type = create_lhs_type(*this);
- auto* rhs_type = create_rhs_type(*this);
-
- std::stringstream ss;
- ss << FriendlyName(lhs_type) << " " << params.op << " "
- << FriendlyName(rhs_type);
-
- ss << ", After aliasing: " << FriendlyName(lhs_type) << " " << params.op
- << " " << FriendlyName(rhs_type);
- SCOPED_TRACE(ss.str());
-
- Global("lhs", lhs_type, ast::StorageClass::kPrivate);
- Global("rhs", rhs_type, ast::StorageClass::kPrivate);
-
- auto* expr =
- create<ast::BinaryExpression>(params.op, Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- // TODO(amaiorano): Bring this back once we have a way to get the canonical
- // type
- // auto* *result_type = params.create_result_type(*this);
- // ASSERT_TRUE(TypeOf(expr) == result_type);
-}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- Expr_Binary_Test_WithAlias_Valid,
- testing::Combine(testing::ValuesIn(all_valid_cases),
- testing::Values(BinaryExprSide::Left,
- BinaryExprSide::Right,
- BinaryExprSide::Both)));
+ const Params& params = std::get<0>(GetParam());
+ BinaryExprSide side = std::get<1>(GetParam());
+
+ auto* create_lhs_type = (side == BinaryExprSide::Left || side == BinaryExprSide::Both)
+ ? params.create_lhs_alias_type
+ : params.create_lhs_type;
+ auto* create_rhs_type = (side == BinaryExprSide::Right || side == BinaryExprSide::Both)
+ ? params.create_rhs_alias_type
+ : params.create_rhs_type;
+
+ auto* lhs_type = create_lhs_type(*this);
+ auto* rhs_type = create_rhs_type(*this);
+
+ std::stringstream ss;
+ ss << FriendlyName(lhs_type) << " " << params.op << " " << FriendlyName(rhs_type);
+
+ ss << ", After aliasing: " << FriendlyName(lhs_type) << " " << params.op << " "
+ << FriendlyName(rhs_type);
+ SCOPED_TRACE(ss.str());
+
+ Global("lhs", lhs_type, ast::StorageClass::kPrivate);
+ Global("rhs", rhs_type, ast::StorageClass::kPrivate);
+
+ auto* expr = create<ast::BinaryExpression>(params.op, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(expr), nullptr);
+ // TODO(amaiorano): Bring this back once we have a way to get the canonical
+ // type
+ // auto* *result_type = params.create_result_type(*this);
+ // ASSERT_TRUE(TypeOf(expr) == result_type);
+}
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ Expr_Binary_Test_WithAlias_Valid,
+ testing::Combine(testing::ValuesIn(all_valid_cases),
+ testing::Values(BinaryExprSide::Left,
+ BinaryExprSide::Right,
+ BinaryExprSide::Both)));
// This test works by taking the cartesian product of all possible
// (type * type * op), and processing only the triplets that are not found in
// the `all_valid_cases` table.
-using Expr_Binary_Test_Invalid =
- ResolverTestWithParam<std::tuple<builder::ast_type_func_ptr,
- builder::ast_type_func_ptr,
- ast::BinaryOp>>;
+using Expr_Binary_Test_Invalid = ResolverTestWithParam<
+ std::tuple<builder::ast_type_func_ptr, builder::ast_type_func_ptr, ast::BinaryOp>>;
TEST_P(Expr_Binary_Test_Invalid, All) {
- const builder::ast_type_func_ptr& lhs_create_type_func =
- std::get<0>(GetParam());
- const builder::ast_type_func_ptr& rhs_create_type_func =
- std::get<1>(GetParam());
- const ast::BinaryOp op = std::get<2>(GetParam());
-
- // Skip if valid case
- // TODO(amaiorano): replace linear lookup with O(1) if too slow
- for (auto& c : all_valid_cases) {
- if (c.create_lhs_type == lhs_create_type_func &&
- c.create_rhs_type == rhs_create_type_func && c.op == op) {
- return;
+ const builder::ast_type_func_ptr& lhs_create_type_func = std::get<0>(GetParam());
+ const builder::ast_type_func_ptr& rhs_create_type_func = std::get<1>(GetParam());
+ const ast::BinaryOp op = std::get<2>(GetParam());
+
+ // Skip if valid case
+ // TODO(amaiorano): replace linear lookup with O(1) if too slow
+ for (auto& c : all_valid_cases) {
+ if (c.create_lhs_type == lhs_create_type_func &&
+ c.create_rhs_type == rhs_create_type_func && c.op == op) {
+ return;
+ }
}
- }
- auto* lhs_type = lhs_create_type_func(*this);
- auto* rhs_type = rhs_create_type_func(*this);
+ auto* lhs_type = lhs_create_type_func(*this);
+ auto* rhs_type = rhs_create_type_func(*this);
- std::stringstream ss;
- ss << FriendlyName(lhs_type) << " " << op << " " << FriendlyName(rhs_type);
- SCOPED_TRACE(ss.str());
+ std::stringstream ss;
+ ss << FriendlyName(lhs_type) << " " << op << " " << FriendlyName(rhs_type);
+ SCOPED_TRACE(ss.str());
- Global("lhs", lhs_type, ast::StorageClass::kPrivate);
- Global("rhs", rhs_type, ast::StorageClass::kPrivate);
+ Global("lhs", lhs_type, ast::StorageClass::kPrivate);
+ Global("rhs", rhs_type, ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(Source{{12, 34}}, op, Expr("lhs"),
- Expr("rhs"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(Source{{12, 34}}, op, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
- ASSERT_FALSE(r()->Resolve());
- ASSERT_EQ(r()->error(),
- "12:34 error: Binary expression operand types are invalid for "
- "this operation: " +
- FriendlyName(lhs_type) + " " + ast::FriendlyName(expr->op) +
- " " + FriendlyName(rhs_type));
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching overload for operator "));
}
-INSTANTIATE_TEST_SUITE_P(
- ResolverTest,
- Expr_Binary_Test_Invalid,
- testing::Combine(testing::ValuesIn(all_create_type_funcs),
- testing::ValuesIn(all_create_type_funcs),
- testing::ValuesIn(all_ops)));
+INSTANTIATE_TEST_SUITE_P(ResolverTest,
+ Expr_Binary_Test_Invalid,
+ testing::Combine(testing::ValuesIn(all_create_type_funcs),
+ testing::ValuesIn(all_create_type_funcs),
+ testing::ValuesIn(all_ops)));
using Expr_Binary_Test_Invalid_VectorMatrixMultiply =
ResolverTestWithParam<std::tuple<bool, uint32_t, uint32_t, uint32_t>>;
TEST_P(Expr_Binary_Test_Invalid_VectorMatrixMultiply, All) {
- bool vec_by_mat = std::get<0>(GetParam());
- uint32_t vec_size = std::get<1>(GetParam());
- uint32_t mat_rows = std::get<2>(GetParam());
- uint32_t mat_cols = std::get<3>(GetParam());
-
- const ast::Type* lhs_type = nullptr;
- const ast::Type* rhs_type = nullptr;
- const sem::Type* result_type = nullptr;
- bool is_valid_expr;
-
- if (vec_by_mat) {
- lhs_type = ty.vec<f32>(vec_size);
- rhs_type = ty.mat<f32>(mat_cols, mat_rows);
- result_type = create<sem::Vector>(create<sem::F32>(), mat_cols);
- is_valid_expr = vec_size == mat_rows;
- } else {
- lhs_type = ty.mat<f32>(mat_cols, mat_rows);
- rhs_type = ty.vec<f32>(vec_size);
- result_type = create<sem::Vector>(create<sem::F32>(), mat_rows);
- is_valid_expr = vec_size == mat_cols;
- }
-
- Global("lhs", lhs_type, ast::StorageClass::kPrivate);
- Global("rhs", rhs_type, ast::StorageClass::kPrivate);
-
- auto* expr = Mul(Source{{12, 34}}, Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
-
- if (is_valid_expr) {
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(expr) == result_type);
- } else {
- ASSERT_FALSE(r()->Resolve());
- ASSERT_EQ(r()->error(),
- "12:34 error: Binary expression operand types are invalid for "
- "this operation: " +
- FriendlyName(lhs_type) + " " + ast::FriendlyName(expr->op) +
- " " + FriendlyName(rhs_type));
- }
+ bool vec_by_mat = std::get<0>(GetParam());
+ uint32_t vec_size = std::get<1>(GetParam());
+ uint32_t mat_rows = std::get<2>(GetParam());
+ uint32_t mat_cols = std::get<3>(GetParam());
+
+ const ast::Type* lhs_type = nullptr;
+ const ast::Type* rhs_type = nullptr;
+ const sem::Type* result_type = nullptr;
+ bool is_valid_expr;
+
+ if (vec_by_mat) {
+ lhs_type = ty.vec<f32>(vec_size);
+ rhs_type = ty.mat<f32>(mat_cols, mat_rows);
+ result_type = create<sem::Vector>(create<sem::F32>(), mat_cols);
+ is_valid_expr = vec_size == mat_rows;
+ } else {
+ lhs_type = ty.mat<f32>(mat_cols, mat_rows);
+ rhs_type = ty.vec<f32>(vec_size);
+ result_type = create<sem::Vector>(create<sem::F32>(), mat_rows);
+ is_valid_expr = vec_size == mat_cols;
+ }
+
+ Global("lhs", lhs_type, ast::StorageClass::kPrivate);
+ Global("rhs", rhs_type, ast::StorageClass::kPrivate);
+
+ auto* expr = Mul(Source{{12, 34}}, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
+
+ if (is_valid_expr) {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(expr) == result_type);
+ } else {
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("no matching overload for operator *"));
+ }
}
auto all_dimension_values = testing::Values(2u, 3u, 4u);
INSTANTIATE_TEST_SUITE_P(ResolverTest,
@@ -1671,36 +1636,32 @@ INSTANTIATE_TEST_SUITE_P(ResolverTest,
using Expr_Binary_Test_Invalid_MatrixMatrixMultiply =
ResolverTestWithParam<std::tuple<uint32_t, uint32_t, uint32_t, uint32_t>>;
TEST_P(Expr_Binary_Test_Invalid_MatrixMatrixMultiply, All) {
- uint32_t lhs_mat_rows = std::get<0>(GetParam());
- uint32_t lhs_mat_cols = std::get<1>(GetParam());
- uint32_t rhs_mat_rows = std::get<2>(GetParam());
- uint32_t rhs_mat_cols = std::get<3>(GetParam());
-
- auto* lhs_type = ty.mat<f32>(lhs_mat_cols, lhs_mat_rows);
- auto* rhs_type = ty.mat<f32>(rhs_mat_cols, rhs_mat_rows);
-
- auto* f32 = create<sem::F32>();
- auto* col = create<sem::Vector>(f32, lhs_mat_rows);
- auto* result_type = create<sem::Matrix>(col, rhs_mat_cols);
-
- Global("lhs", lhs_type, ast::StorageClass::kPrivate);
- Global("rhs", rhs_type, ast::StorageClass::kPrivate);
-
- auto* expr = Mul(Source{{12, 34}}, Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
-
- bool is_valid_expr = lhs_mat_cols == rhs_mat_rows;
- if (is_valid_expr) {
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(expr) == result_type);
- } else {
- ASSERT_FALSE(r()->Resolve());
- ASSERT_EQ(r()->error(),
- "12:34 error: Binary expression operand types are invalid for "
- "this operation: " +
- FriendlyName(lhs_type) + " " + ast::FriendlyName(expr->op) +
- " " + FriendlyName(rhs_type));
- }
+ uint32_t lhs_mat_rows = std::get<0>(GetParam());
+ uint32_t lhs_mat_cols = std::get<1>(GetParam());
+ uint32_t rhs_mat_rows = std::get<2>(GetParam());
+ uint32_t rhs_mat_cols = std::get<3>(GetParam());
+
+ auto* lhs_type = ty.mat<f32>(lhs_mat_cols, lhs_mat_rows);
+ auto* rhs_type = ty.mat<f32>(rhs_mat_cols, rhs_mat_rows);
+
+ auto* f32 = create<sem::F32>();
+ auto* col = create<sem::Vector>(f32, lhs_mat_rows);
+ auto* result_type = create<sem::Matrix>(col, rhs_mat_cols);
+
+ Global("lhs", lhs_type, ast::StorageClass::kPrivate);
+ Global("rhs", rhs_type, ast::StorageClass::kPrivate);
+
+ auto* expr = Mul(Source{{12, 34}}, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
+
+ bool is_valid_expr = lhs_mat_cols == rhs_mat_rows;
+ if (is_valid_expr) {
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(expr) == result_type);
+ } else {
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching overload for operator * "));
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
Expr_Binary_Test_Invalid_MatrixMatrixMultiply,
@@ -1713,30 +1674,30 @@ INSTANTIATE_TEST_SUITE_P(ResolverTest,
using UnaryOpExpressionTest = ResolverTestWithParam<ast::UnaryOp>;
TEST_P(UnaryOpExpressionTest, Expr_UnaryOp) {
- auto op = GetParam();
-
- if (op == ast::UnaryOp::kNot) {
- Global("ident", ty.vec4<bool>(), ast::StorageClass::kPrivate);
- } else if (op == ast::UnaryOp::kNegation || op == ast::UnaryOp::kComplement) {
- Global("ident", ty.vec4<i32>(), ast::StorageClass::kPrivate);
- } else {
- Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- }
- auto* der = create<ast::UnaryOpExpression>(op, Expr("ident"));
- WrapInFunction(der);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(der), nullptr);
- ASSERT_TRUE(TypeOf(der)->Is<sem::Vector>());
- if (op == ast::UnaryOp::kNot) {
- EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::Bool>());
- } else if (op == ast::UnaryOp::kNegation || op == ast::UnaryOp::kComplement) {
- EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::I32>());
- } else {
- EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::F32>());
- }
- EXPECT_EQ(TypeOf(der)->As<sem::Vector>()->Width(), 4u);
+ auto op = GetParam();
+
+ if (op == ast::UnaryOp::kNot) {
+ Global("ident", ty.vec4<bool>(), ast::StorageClass::kPrivate);
+ } else if (op == ast::UnaryOp::kNegation || op == ast::UnaryOp::kComplement) {
+ Global("ident", ty.vec4<i32>(), ast::StorageClass::kPrivate);
+ } else {
+ Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ }
+ auto* der = create<ast::UnaryOpExpression>(op, Expr("ident"));
+ WrapInFunction(der);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(der), nullptr);
+ ASSERT_TRUE(TypeOf(der)->Is<sem::Vector>());
+ if (op == ast::UnaryOp::kNot) {
+ EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ } else if (op == ast::UnaryOp::kNegation || op == ast::UnaryOp::kComplement) {
+ EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::I32>());
+ } else {
+ EXPECT_TRUE(TypeOf(der)->As<sem::Vector>()->type()->Is<sem::F32>());
+ }
+ EXPECT_EQ(TypeOf(der)->As<sem::Vector>()->Width(), 4u);
}
INSTANTIATE_TEST_SUITE_P(ResolverTest,
UnaryOpExpressionTest,
@@ -1745,436 +1706,432 @@ INSTANTIATE_TEST_SUITE_P(ResolverTest,
ast::UnaryOp::kNot));
TEST_F(ResolverTest, StorageClass_SetsIfMissing) {
- auto* var = Var("var", ty.i32());
+ auto* var = Var("var", ty.i32());
- auto* stmt = Decl(var);
- Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
+ auto* stmt = Decl(var);
+ Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kFunction);
+ EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kFunction);
}
TEST_F(ResolverTest, StorageClass_SetForSampler) {
- auto* t = ty.sampler(ast::SamplerKind::kSampler);
- auto* var = Global("var", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* t = ty.sampler(ast::SamplerKind::kSampler);
+ auto* var = Global("var", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get(var)->StorageClass(),
- ast::StorageClass::kUniformConstant);
+ EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kHandle);
}
TEST_F(ResolverTest, StorageClass_SetForTexture) {
- auto* t = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- auto* var = Global("var", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* t = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ auto* var = Global("var", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get(var)->StorageClass(),
- ast::StorageClass::kUniformConstant);
+ EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kHandle);
}
TEST_F(ResolverTest, StorageClass_DoesNotSetOnConst) {
- auto* var = Const("var", ty.i32(), Construct(ty.i32()));
- auto* stmt = Decl(var);
- Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
+ auto* var = Let("var", ty.i32(), Construct(ty.i32()));
+ auto* stmt = Decl(var);
+ Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kNone);
+ EXPECT_EQ(Sem().Get(var)->StorageClass(), ast::StorageClass::kNone);
}
TEST_F(ResolverTest, Access_SetForStorageBuffer) {
- // struct S { x : i32 };
- // var<storage> g : S;
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
- auto* var =
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ // struct S { x : i32 };
+ // var<storage> g : S;
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
+ auto* var = Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get(var)->Access(), ast::Access::kRead);
+ EXPECT_EQ(Sem().Get(var)->Access(), ast::Access::kRead);
}
TEST_F(ResolverTest, BindingPoint_SetForResources) {
- // @group(1) @binding(2) var s1 : sampler;
- // @group(3) @binding(4) var s2 : sampler;
- auto* s1 = Global(Sym(), ty.sampler(ast::SamplerKind::kSampler),
- ast::AttributeList{create<ast::GroupAttribute>(1),
- create<ast::BindingAttribute>(2)});
- auto* s2 = Global(Sym(), ty.sampler(ast::SamplerKind::kSampler),
- ast::AttributeList{create<ast::GroupAttribute>(3),
- create<ast::BindingAttribute>(4)});
+ // @group(1) @binding(2) var s1 : sampler;
+ // @group(3) @binding(4) var s2 : sampler;
+ auto* s1 = Global(
+ Sym(), ty.sampler(ast::SamplerKind::kSampler),
+ ast::AttributeList{create<ast::GroupAttribute>(1), create<ast::BindingAttribute>(2)});
+ auto* s2 = Global(
+ Sym(), ty.sampler(ast::SamplerKind::kSampler),
+ ast::AttributeList{create<ast::GroupAttribute>(3), create<ast::BindingAttribute>(4)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- EXPECT_EQ(Sem().Get<sem::GlobalVariable>(s1)->BindingPoint(),
- (sem::BindingPoint{1u, 2u}));
- EXPECT_EQ(Sem().Get<sem::GlobalVariable>(s2)->BindingPoint(),
- (sem::BindingPoint{3u, 4u}));
+ EXPECT_EQ(Sem().Get<sem::GlobalVariable>(s1)->BindingPoint(), (sem::BindingPoint{1u, 2u}));
+ EXPECT_EQ(Sem().Get<sem::GlobalVariable>(s2)->BindingPoint(), (sem::BindingPoint{3u, 4u}));
}
TEST_F(ResolverTest, Function_EntryPoints_StageAttribute) {
- // fn b() {}
- // fn c() { b(); }
- // fn a() { c(); }
- // fn ep_1() { a(); b(); }
- // fn ep_2() { c();}
- //
- // c -> {ep_1, ep_2}
- // a -> {ep_1}
- // b -> {ep_1, ep_2}
- // ep_1 -> {}
- // ep_2 -> {}
-
- Global("first", ty.f32(), ast::StorageClass::kPrivate);
- Global("second", ty.f32(), ast::StorageClass::kPrivate);
- Global("call_a", ty.f32(), ast::StorageClass::kPrivate);
- Global("call_b", ty.f32(), ast::StorageClass::kPrivate);
- Global("call_c", ty.f32(), ast::StorageClass::kPrivate);
-
- ast::VariableList params;
- auto* func_b =
- Func("b", params, ty.f32(), {Return(0.0f)}, ast::AttributeList{});
- auto* func_c =
- Func("c", params, ty.f32(), {Assign("second", Call("b")), Return(0.0f)},
- ast::AttributeList{});
-
- auto* func_a =
- Func("a", params, ty.f32(), {Assign("first", Call("c")), Return(0.0f)},
- ast::AttributeList{});
-
- auto* ep_1 = Func("ep_1", params, ty.void_(),
- {
- Assign("call_a", Call("a")),
- Assign("call_b", Call("b")),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
-
- auto* ep_2 = Func("ep_2", params, ty.void_(),
- {
- Assign("call_c", Call("c")),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* func_b_sem = Sem().Get(func_b);
- auto* func_a_sem = Sem().Get(func_a);
- auto* func_c_sem = Sem().Get(func_c);
- auto* ep_1_sem = Sem().Get(ep_1);
- auto* ep_2_sem = Sem().Get(ep_2);
- ASSERT_NE(func_b_sem, nullptr);
- ASSERT_NE(func_a_sem, nullptr);
- ASSERT_NE(func_c_sem, nullptr);
- ASSERT_NE(ep_1_sem, nullptr);
- ASSERT_NE(ep_2_sem, nullptr);
-
- EXPECT_EQ(func_b_sem->Parameters().size(), 0u);
- EXPECT_EQ(func_a_sem->Parameters().size(), 0u);
- EXPECT_EQ(func_c_sem->Parameters().size(), 0u);
-
- const auto& b_eps = func_b_sem->AncestorEntryPoints();
- ASSERT_EQ(2u, b_eps.size());
- EXPECT_EQ(Symbols().Register("ep_1"), b_eps[0]->Declaration()->symbol);
- EXPECT_EQ(Symbols().Register("ep_2"), b_eps[1]->Declaration()->symbol);
-
- const auto& a_eps = func_a_sem->AncestorEntryPoints();
- ASSERT_EQ(1u, a_eps.size());
- EXPECT_EQ(Symbols().Register("ep_1"), a_eps[0]->Declaration()->symbol);
-
- const auto& c_eps = func_c_sem->AncestorEntryPoints();
- ASSERT_EQ(2u, c_eps.size());
- EXPECT_EQ(Symbols().Register("ep_1"), c_eps[0]->Declaration()->symbol);
- EXPECT_EQ(Symbols().Register("ep_2"), c_eps[1]->Declaration()->symbol);
-
- EXPECT_TRUE(ep_1_sem->AncestorEntryPoints().empty());
- EXPECT_TRUE(ep_2_sem->AncestorEntryPoints().empty());
+ // fn b() {}
+ // fn c() { b(); }
+ // fn a() { c(); }
+ // fn ep_1() { a(); b(); }
+ // fn ep_2() { c();}
+ //
+ // c -> {ep_1, ep_2}
+ // a -> {ep_1}
+ // b -> {ep_1, ep_2}
+ // ep_1 -> {}
+ // ep_2 -> {}
+
+ Global("first", ty.f32(), ast::StorageClass::kPrivate);
+ Global("second", ty.f32(), ast::StorageClass::kPrivate);
+ Global("call_a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("call_b", ty.f32(), ast::StorageClass::kPrivate);
+ Global("call_c", ty.f32(), ast::StorageClass::kPrivate);
+
+ ast::VariableList params;
+ auto* func_b = Func("b", params, ty.f32(), {Return(0_f)}, ast::AttributeList{});
+ auto* func_c = Func("c", params, ty.f32(), {Assign("second", Call("b")), Return(0_f)},
+ ast::AttributeList{});
+
+ auto* func_a = Func("a", params, ty.f32(), {Assign("first", Call("c")), Return(0_f)},
+ ast::AttributeList{});
+
+ auto* ep_1 = Func("ep_1", params, ty.void_(),
+ {
+ Assign("call_a", Call("a")),
+ Assign("call_b", Call("b")),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+
+ auto* ep_2 = Func("ep_2", params, ty.void_(),
+ {
+ Assign("call_c", Call("c")),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* func_b_sem = Sem().Get(func_b);
+ auto* func_a_sem = Sem().Get(func_a);
+ auto* func_c_sem = Sem().Get(func_c);
+ auto* ep_1_sem = Sem().Get(ep_1);
+ auto* ep_2_sem = Sem().Get(ep_2);
+ ASSERT_NE(func_b_sem, nullptr);
+ ASSERT_NE(func_a_sem, nullptr);
+ ASSERT_NE(func_c_sem, nullptr);
+ ASSERT_NE(ep_1_sem, nullptr);
+ ASSERT_NE(ep_2_sem, nullptr);
+
+ EXPECT_EQ(func_b_sem->Parameters().size(), 0u);
+ EXPECT_EQ(func_a_sem->Parameters().size(), 0u);
+ EXPECT_EQ(func_c_sem->Parameters().size(), 0u);
+
+ const auto& b_eps = func_b_sem->AncestorEntryPoints();
+ ASSERT_EQ(2u, b_eps.size());
+ EXPECT_EQ(Symbols().Register("ep_1"), b_eps[0]->Declaration()->symbol);
+ EXPECT_EQ(Symbols().Register("ep_2"), b_eps[1]->Declaration()->symbol);
+
+ const auto& a_eps = func_a_sem->AncestorEntryPoints();
+ ASSERT_EQ(1u, a_eps.size());
+ EXPECT_EQ(Symbols().Register("ep_1"), a_eps[0]->Declaration()->symbol);
+
+ const auto& c_eps = func_c_sem->AncestorEntryPoints();
+ ASSERT_EQ(2u, c_eps.size());
+ EXPECT_EQ(Symbols().Register("ep_1"), c_eps[0]->Declaration()->symbol);
+ EXPECT_EQ(Symbols().Register("ep_2"), c_eps[1]->Declaration()->symbol);
+
+ EXPECT_TRUE(ep_1_sem->AncestorEntryPoints().empty());
+ EXPECT_TRUE(ep_2_sem->AncestorEntryPoints().empty());
}
// Check for linear-time traversal of functions reachable from entry points.
// See: crbug.com/tint/245
TEST_F(ResolverTest, Function_EntryPoints_LinearTime) {
- // fn lNa() { }
- // fn lNb() { }
- // ...
- // fn l2a() { l3a(); l3b(); }
- // fn l2b() { l3a(); l3b(); }
- // fn l1a() { l2a(); l2b(); }
- // fn l1b() { l2a(); l2b(); }
- // fn main() { l1a(); l1b(); }
-
- static constexpr int levels = 64;
-
- auto fn_a = [](int level) { return "l" + std::to_string(level + 1) + "a"; };
- auto fn_b = [](int level) { return "l" + std::to_string(level + 1) + "b"; };
-
- Func(fn_a(levels), {}, ty.void_(), {}, {});
- Func(fn_b(levels), {}, ty.void_(), {}, {});
+ // fn lNa() { }
+ // fn lNb() { }
+ // ...
+ // fn l2a() { l3a(); l3b(); }
+ // fn l2b() { l3a(); l3b(); }
+ // fn l1a() { l2a(); l2b(); }
+ // fn l1b() { l2a(); l2b(); }
+ // fn main() { l1a(); l1b(); }
+
+ static constexpr int levels = 64;
+
+ auto fn_a = [](int level) { return "l" + std::to_string(level + 1) + "a"; };
+ auto fn_b = [](int level) { return "l" + std::to_string(level + 1) + "b"; };
+
+ Func(fn_a(levels), {}, ty.void_(), {}, {});
+ Func(fn_b(levels), {}, ty.void_(), {}, {});
+
+ for (int i = levels - 1; i >= 0; i--) {
+ Func(fn_a(i), {}, ty.void_(),
+ {
+ CallStmt(Call(fn_a(i + 1))),
+ CallStmt(Call(fn_b(i + 1))),
+ },
+ {});
+ Func(fn_b(i), {}, ty.void_(),
+ {
+ CallStmt(Call(fn_a(i + 1))),
+ CallStmt(Call(fn_b(i + 1))),
+ },
+ {});
+ }
- for (int i = levels - 1; i >= 0; i--) {
- Func(fn_a(i), {}, ty.void_(),
- {
- CallStmt(Call(fn_a(i + 1))),
- CallStmt(Call(fn_b(i + 1))),
- },
- {});
- Func(fn_b(i), {}, ty.void_(),
+ Func("main", {}, ty.void_(),
{
- CallStmt(Call(fn_a(i + 1))),
- CallStmt(Call(fn_b(i + 1))),
+ CallStmt(Call(fn_a(0))),
+ CallStmt(Call(fn_b(0))),
},
- {});
- }
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- Func("main", {}, ty.void_(),
- {
- CallStmt(Call(fn_a(0))),
- CallStmt(Call(fn_b(0))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Test for crbug.com/tint/728
TEST_F(ResolverTest, ASTNodesAreReached) {
- Structure("A", {Member("x", ty.array<f32, 4>(4))});
- Structure("B", {Member("x", ty.array<f32, 4>(4))});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ Structure("A", {Member("x", ty.array<f32, 4>(4))});
+ Structure("B", {Member("x", ty.array<f32, 4>(4))});
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, ASTNodeNotReached) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.Expr("expr");
- Resolver(&b).Resolve();
- },
- "internal compiler error: AST node 'tint::ast::IdentifierExpression' was "
- "not reached by the resolver");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.Expr("expr");
+ Resolver(&b).Resolve();
+ },
+ "internal compiler error: AST node 'tint::ast::IdentifierExpression' was not reached by "
+ "the resolver");
}
TEST_F(ResolverTest, ASTNodeReachedTwice) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- b.Global("a", b.ty.i32(), ast::StorageClass::kPrivate, expr);
- b.Global("b", b.ty.i32(), ast::StorageClass::kPrivate, expr);
- Resolver(&b).Resolve();
- },
- "internal compiler error: AST node 'tint::ast::SintLiteralExpression' "
- "was encountered twice in the same AST of a Program");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ b.Global("a", b.ty.i32(), ast::StorageClass::kPrivate, expr);
+ b.Global("b", b.ty.i32(), ast::StorageClass::kPrivate, expr);
+ Resolver(&b).Resolve();
+ },
+ "internal compiler error: AST node 'tint::ast::IntLiteralExpression' was encountered twice "
+ "in the same AST of a Program");
}
TEST_F(ResolverTest, UnaryOp_Not) {
- Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* der = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot,
- Expr(Source{{12, 34}}, "ident"));
- WrapInFunction(der);
+ Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto* der = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr(Source{{12, 34}}, "ident"));
+ WrapInFunction(der);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot logical negate expression of type 'vec4<f32>");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching overload for operator ! (vec4<f32>)"));
}
TEST_F(ResolverTest, UnaryOp_Complement) {
- Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* der = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement,
- Expr(Source{{12, 34}}, "ident"));
- WrapInFunction(der);
+ Global("ident", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto* der =
+ create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr(Source{{12, 34}}, "ident"));
+ WrapInFunction(der);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: cannot bitwise complement expression of type 'vec4<f32>");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching overload for operator ~ (vec4<f32>)"));
}
TEST_F(ResolverTest, UnaryOp_Negation) {
- Global("ident", ty.u32(), ast::StorageClass::kPrivate);
- auto* der = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation,
- Expr(Source{{12, 34}}, "ident"));
- WrapInFunction(der);
+ Global("ident", ty.u32(), ast::StorageClass::kPrivate);
+ auto* der =
+ create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(Source{{12, 34}}, "ident"));
+ WrapInFunction(der);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: cannot negate expression of type 'u32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("error: no matching overload for operator - (u32)"));
}
TEST_F(ResolverTest, TextureSampler_TextureSample) {
- Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 1));
- Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
+ Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 1));
+ Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
- auto* call = CallStmt(Call("textureSample", "t", "s", vec2<f32>(1.0f, 2.0f)));
- const ast::Function* f = Func("test_function", {}, ty.void_(), {call},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* call = CallStmt(Call("textureSample", "t", "s", vec2<f32>(1_f, 2_f)));
+ const ast::Function* f =
+ Func("test_function", {}, ty.void_(), {call}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- const sem::Function* sf = Sem().Get(f);
- auto pairs = sf->TextureSamplerPairs();
- ASSERT_EQ(pairs.size(), 1u);
- EXPECT_TRUE(pairs[0].first != nullptr);
- EXPECT_TRUE(pairs[0].second != nullptr);
+ const sem::Function* sf = Sem().Get(f);
+ auto pairs = sf->TextureSamplerPairs();
+ ASSERT_EQ(pairs.size(), 1u);
+ EXPECT_TRUE(pairs[0].first != nullptr);
+ EXPECT_TRUE(pairs[0].second != nullptr);
}
TEST_F(ResolverTest, TextureSampler_TextureSampleInFunction) {
- Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 1));
- Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
+ Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 1));
+ Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
- auto* inner_call =
- CallStmt(Call("textureSample", "t", "s", vec2<f32>(1.0f, 2.0f)));
- const ast::Function* inner_func =
- Func("inner_func", {}, ty.void_(), {inner_call});
- auto* outer_call = CallStmt(Call("inner_func"));
- const ast::Function* outer_func =
- Func("outer_func", {}, ty.void_(), {outer_call},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* inner_call = CallStmt(Call("textureSample", "t", "s", vec2<f32>(1_f, 2_f)));
+ const ast::Function* inner_func = Func("inner_func", {}, ty.void_(), {inner_call});
+ auto* outer_call = CallStmt(Call("inner_func"));
+ const ast::Function* outer_func =
+ Func("outer_func", {}, ty.void_(), {outer_call}, {Stage(ast::PipelineStage::kFragment)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto inner_pairs = Sem().Get(inner_func)->TextureSamplerPairs();
- ASSERT_EQ(inner_pairs.size(), 1u);
- EXPECT_TRUE(inner_pairs[0].first != nullptr);
- EXPECT_TRUE(inner_pairs[0].second != nullptr);
+ auto inner_pairs = Sem().Get(inner_func)->TextureSamplerPairs();
+ ASSERT_EQ(inner_pairs.size(), 1u);
+ EXPECT_TRUE(inner_pairs[0].first != nullptr);
+ EXPECT_TRUE(inner_pairs[0].second != nullptr);
- auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
- ASSERT_EQ(outer_pairs.size(), 1u);
- EXPECT_TRUE(outer_pairs[0].first != nullptr);
- EXPECT_TRUE(outer_pairs[0].second != nullptr);
+ auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
+ ASSERT_EQ(outer_pairs.size(), 1u);
+ EXPECT_TRUE(outer_pairs[0].first != nullptr);
+ EXPECT_TRUE(outer_pairs[0].second != nullptr);
}
TEST_F(ResolverTest, TextureSampler_TextureSampleFunctionDiamondSameVariables) {
- Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 1));
- Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
-
- auto* inner_call_1 =
- CallStmt(Call("textureSample", "t", "s", vec2<f32>(1.0f, 2.0f)));
- const ast::Function* inner_func_1 =
- Func("inner_func_1", {}, ty.void_(), {inner_call_1});
- auto* inner_call_2 =
- CallStmt(Call("textureSample", "t", "s", vec2<f32>(3.0f, 4.0f)));
- const ast::Function* inner_func_2 =
- Func("inner_func_2", {}, ty.void_(), {inner_call_2});
- auto* outer_call_1 = CallStmt(Call("inner_func_1"));
- auto* outer_call_2 = CallStmt(Call("inner_func_2"));
- const ast::Function* outer_func =
- Func("outer_func", {}, ty.void_(), {outer_call_1, outer_call_2},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto inner_pairs_1 = Sem().Get(inner_func_1)->TextureSamplerPairs();
- ASSERT_EQ(inner_pairs_1.size(), 1u);
- EXPECT_TRUE(inner_pairs_1[0].first != nullptr);
- EXPECT_TRUE(inner_pairs_1[0].second != nullptr);
-
- auto inner_pairs_2 = Sem().Get(inner_func_2)->TextureSamplerPairs();
- ASSERT_EQ(inner_pairs_1.size(), 1u);
- EXPECT_TRUE(inner_pairs_2[0].first != nullptr);
- EXPECT_TRUE(inner_pairs_2[0].second != nullptr);
-
- auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
- ASSERT_EQ(outer_pairs.size(), 1u);
- EXPECT_TRUE(outer_pairs[0].first != nullptr);
- EXPECT_TRUE(outer_pairs[0].second != nullptr);
-}
-
-TEST_F(ResolverTest,
- TextureSampler_TextureSampleFunctionDiamondDifferentVariables) {
- Global("t1", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 1));
- Global("t2", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 2));
- Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 3));
-
- auto* inner_call_1 =
- CallStmt(Call("textureSample", "t1", "s", vec2<f32>(1.0f, 2.0f)));
- const ast::Function* inner_func_1 =
- Func("inner_func_1", {}, ty.void_(), {inner_call_1});
- auto* inner_call_2 =
- CallStmt(Call("textureSample", "t2", "s", vec2<f32>(3.0f, 4.0f)));
- const ast::Function* inner_func_2 =
- Func("inner_func_2", {}, ty.void_(), {inner_call_2});
- auto* outer_call_1 = CallStmt(Call("inner_func_1"));
- auto* outer_call_2 = CallStmt(Call("inner_func_2"));
- const ast::Function* outer_func =
- Func("outer_func", {}, ty.void_(), {outer_call_1, outer_call_2},
- {Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- auto inner_pairs_1 = Sem().Get(inner_func_1)->TextureSamplerPairs();
- ASSERT_EQ(inner_pairs_1.size(), 1u);
- EXPECT_TRUE(inner_pairs_1[0].first != nullptr);
- EXPECT_TRUE(inner_pairs_1[0].second != nullptr);
-
- auto inner_pairs_2 = Sem().Get(inner_func_2)->TextureSamplerPairs();
- ASSERT_EQ(inner_pairs_2.size(), 1u);
- EXPECT_TRUE(inner_pairs_2[0].first != nullptr);
- EXPECT_TRUE(inner_pairs_2[0].second != nullptr);
-
- auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
- ASSERT_EQ(outer_pairs.size(), 2u);
- EXPECT_TRUE(outer_pairs[0].first == inner_pairs_1[0].first);
- EXPECT_TRUE(outer_pairs[0].second == inner_pairs_1[0].second);
- EXPECT_TRUE(outer_pairs[1].first == inner_pairs_2[0].first);
- EXPECT_TRUE(outer_pairs[1].second == inner_pairs_2[0].second);
+ Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 1));
+ Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 2));
+
+ auto* inner_call_1 = CallStmt(Call("textureSample", "t", "s", vec2<f32>(1_f, 2_f)));
+ const ast::Function* inner_func_1 = Func("inner_func_1", {}, ty.void_(), {inner_call_1});
+ auto* inner_call_2 = CallStmt(Call("textureSample", "t", "s", vec2<f32>(3_f, 4_f)));
+ const ast::Function* inner_func_2 = Func("inner_func_2", {}, ty.void_(), {inner_call_2});
+ auto* outer_call_1 = CallStmt(Call("inner_func_1"));
+ auto* outer_call_2 = CallStmt(Call("inner_func_2"));
+ const ast::Function* outer_func =
+ Func("outer_func", {}, ty.void_(), {outer_call_1, outer_call_2},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto inner_pairs_1 = Sem().Get(inner_func_1)->TextureSamplerPairs();
+ ASSERT_EQ(inner_pairs_1.size(), 1u);
+ EXPECT_TRUE(inner_pairs_1[0].first != nullptr);
+ EXPECT_TRUE(inner_pairs_1[0].second != nullptr);
+
+ auto inner_pairs_2 = Sem().Get(inner_func_2)->TextureSamplerPairs();
+ ASSERT_EQ(inner_pairs_1.size(), 1u);
+ EXPECT_TRUE(inner_pairs_2[0].first != nullptr);
+ EXPECT_TRUE(inner_pairs_2[0].second != nullptr);
+
+ auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
+ ASSERT_EQ(outer_pairs.size(), 1u);
+ EXPECT_TRUE(outer_pairs[0].first != nullptr);
+ EXPECT_TRUE(outer_pairs[0].second != nullptr);
+}
+
+TEST_F(ResolverTest, TextureSampler_TextureSampleFunctionDiamondDifferentVariables) {
+ Global("t1", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 1));
+ Global("t2", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 2));
+ Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(1, 3));
+
+ auto* inner_call_1 = CallStmt(Call("textureSample", "t1", "s", vec2<f32>(1_f, 2_f)));
+ const ast::Function* inner_func_1 = Func("inner_func_1", {}, ty.void_(), {inner_call_1});
+ auto* inner_call_2 = CallStmt(Call("textureSample", "t2", "s", vec2<f32>(3_f, 4_f)));
+ const ast::Function* inner_func_2 = Func("inner_func_2", {}, ty.void_(), {inner_call_2});
+ auto* outer_call_1 = CallStmt(Call("inner_func_1"));
+ auto* outer_call_2 = CallStmt(Call("inner_func_2"));
+ const ast::Function* outer_func =
+ Func("outer_func", {}, ty.void_(), {outer_call_1, outer_call_2},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto inner_pairs_1 = Sem().Get(inner_func_1)->TextureSamplerPairs();
+ ASSERT_EQ(inner_pairs_1.size(), 1u);
+ EXPECT_TRUE(inner_pairs_1[0].first != nullptr);
+ EXPECT_TRUE(inner_pairs_1[0].second != nullptr);
+
+ auto inner_pairs_2 = Sem().Get(inner_func_2)->TextureSamplerPairs();
+ ASSERT_EQ(inner_pairs_2.size(), 1u);
+ EXPECT_TRUE(inner_pairs_2[0].first != nullptr);
+ EXPECT_TRUE(inner_pairs_2[0].second != nullptr);
+
+ auto outer_pairs = Sem().Get(outer_func)->TextureSamplerPairs();
+ ASSERT_EQ(outer_pairs.size(), 2u);
+ EXPECT_TRUE(outer_pairs[0].first == inner_pairs_1[0].first);
+ EXPECT_TRUE(outer_pairs[0].second == inner_pairs_1[0].second);
+ EXPECT_TRUE(outer_pairs[1].first == inner_pairs_2[0].first);
+ EXPECT_TRUE(outer_pairs[1].second == inner_pairs_2[0].second);
}
TEST_F(ResolverTest, TextureSampler_TextureDimensions) {
- Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(1, 2));
+ Global("t", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(1, 2));
- auto* call = Call("textureDimensions", "t");
- const ast::Function* f = WrapInFunction(call);
+ auto* call = Call("textureDimensions", "t");
+ const ast::Function* f = WrapInFunction(call);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- const sem::Function* sf = Sem().Get(f);
- auto pairs = sf->TextureSamplerPairs();
- ASSERT_EQ(pairs.size(), 1u);
- EXPECT_TRUE(pairs[0].first != nullptr);
- EXPECT_TRUE(pairs[0].second == nullptr);
+ const sem::Function* sf = Sem().Get(f);
+ auto pairs = sf->TextureSamplerPairs();
+ ASSERT_EQ(pairs.size(), 1u);
+ EXPECT_TRUE(pairs[0].first != nullptr);
+ EXPECT_TRUE(pairs[0].second == nullptr);
}
TEST_F(ResolverTest, ModuleDependencyOrderedDeclarations) {
- auto* f0 = Func("f0", {}, ty.void_(), {});
- auto* v0 = Global("v0", ty.i32(), ast::StorageClass::kPrivate);
- auto* a0 = Alias("a0", ty.i32());
- auto* s0 = Structure("s0", {Member("m", ty.i32())});
- auto* f1 = Func("f1", {}, ty.void_(), {});
- auto* v1 = Global("v1", ty.i32(), ast::StorageClass::kPrivate);
- auto* a1 = Alias("a1", ty.i32());
- auto* s1 = Structure("s1", {Member("m", ty.i32())});
- auto* f2 = Func("f2", {}, ty.void_(), {});
- auto* v2 = Global("v2", ty.i32(), ast::StorageClass::kPrivate);
- auto* a2 = Alias("a2", ty.i32());
- auto* s2 = Structure("s2", {Member("m", ty.i32())});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(Sem().Module(), nullptr);
- EXPECT_THAT(Sem().Module()->DependencyOrderedDeclarations(),
- ElementsAre(f0, v0, a0, s0, f1, v1, a1, s1, f2, v2, a2, s2));
+ auto* f0 = Func("f0", {}, ty.void_(), {});
+ auto* v0 = Global("v0", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a0 = Alias("a0", ty.i32());
+ auto* s0 = Structure("s0", {Member("m", ty.i32())});
+ auto* f1 = Func("f1", {}, ty.void_(), {});
+ auto* v1 = Global("v1", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a1 = Alias("a1", ty.i32());
+ auto* s1 = Structure("s1", {Member("m", ty.i32())});
+ auto* f2 = Func("f2", {}, ty.void_(), {});
+ auto* v2 = Global("v2", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a2 = Alias("a2", ty.i32());
+ auto* s2 = Structure("s2", {Member("m", ty.i32())});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(Sem().Module(), nullptr);
+ EXPECT_THAT(Sem().Module()->DependencyOrderedDeclarations(),
+ ElementsAre(f0, v0, a0, s0, f1, v1, a1, s1, f2, v2, a2, s2));
+}
+
+constexpr size_t kMaxExpressionDepth = 512U;
+
+TEST_F(ResolverTest, MaxExpressionDepth_Pass) {
+ auto* b = Var("b", ty.i32());
+ const ast::Expression* chain = nullptr;
+ for (size_t i = 0; i < kMaxExpressionDepth; ++i) {
+ chain = Add(chain ? chain : Expr("b"), Expr("b"));
+ }
+ auto* a = Let("a", nullptr, chain);
+ WrapInFunction(b, a);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverTest, MaxExpressionDepth_Fail) {
+ auto* b = Var("b", ty.i32());
+ const ast::Expression* chain = nullptr;
+ for (size_t i = 0; i < kMaxExpressionDepth + 1; ++i) {
+ chain = Add(chain ? chain : Expr("b"), Expr("b"));
+ }
+ auto* a = Let("a", nullptr, chain);
+ WrapInFunction(b, a);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("error: reached max expression depth of " +
+ std::to_string(kMaxExpressionDepth)));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_test_helper.h b/chromium/third_party/dawn/src/tint/resolver/resolver_test_helper.h
index 236b854b799..a0d71e55fe1 100644
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_test_helper.h
+++ b/chromium/third_party/dawn/src/tint/resolver/resolver_test_helper.h
@@ -15,6 +15,7 @@
#ifndef SRC_TINT_RESOLVER_RESOLVER_TEST_HELPER_H_
#define SRC_TINT_RESOLVER_RESOLVER_TEST_HELPER_H_
+#include <functional>
#include <memory>
#include <string>
#include <vector>
@@ -22,6 +23,8 @@
#include "gtest/gtest.h"
#include "src/tint/program_builder.h"
#include "src/tint/resolver/resolver.h"
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
#include "src/tint/sem/expression.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/variable.h"
@@ -30,101 +33,95 @@ namespace tint::resolver {
/// Helper class for testing
class TestHelper : public ProgramBuilder {
- public:
- /// Constructor
- TestHelper();
-
- /// Destructor
- ~TestHelper() override;
-
- /// @return a pointer to the Resolver
- Resolver* r() const { return resolver_.get(); }
-
- /// Returns the statement that holds the given expression.
- /// @param expr the ast::Expression
- /// @return the ast::Statement of the ast::Expression, or nullptr if the
- /// expression is not owned by a statement.
- const ast::Statement* StmtOf(const ast::Expression* expr) {
- auto* sem_stmt = Sem().Get(expr)->Stmt();
- return sem_stmt ? sem_stmt->Declaration() : nullptr;
- }
-
- /// Returns the BlockStatement that holds the given statement.
- /// @param stmt the ast::Statement
- /// @return the ast::BlockStatement that holds the ast::Statement, or nullptr
- /// if the statement is not owned by a BlockStatement.
- const ast::BlockStatement* BlockOf(const ast::Statement* stmt) {
- auto* sem_stmt = Sem().Get(stmt);
- return sem_stmt ? sem_stmt->Block()->Declaration() : nullptr;
- }
-
- /// Returns the BlockStatement that holds the given expression.
- /// @param expr the ast::Expression
- /// @return the ast::Statement of the ast::Expression, or nullptr if the
- /// expression is not indirectly owned by a BlockStatement.
- const ast::BlockStatement* BlockOf(const ast::Expression* expr) {
- auto* sem_stmt = Sem().Get(expr)->Stmt();
- return sem_stmt ? sem_stmt->Block()->Declaration() : nullptr;
- }
-
- /// Returns the semantic variable for the given identifier expression.
- /// @param expr the identifier expression
- /// @return the resolved sem::Variable of the identifier, or nullptr if
- /// the expression did not resolve to a variable.
- const sem::Variable* VarOf(const ast::Expression* expr) {
- auto* sem_ident = Sem().Get(expr);
- auto* var_user = sem_ident ? sem_ident->As<sem::VariableUser>() : nullptr;
- return var_user ? var_user->Variable() : nullptr;
- }
-
- /// Checks that all the users of the given variable are as expected
- /// @param var the variable to check
- /// @param expected_users the expected users of the variable
- /// @return true if all users are as expected
- bool CheckVarUsers(const ast::Variable* var,
- std::vector<const ast::Expression*>&& expected_users) {
- auto& var_users = Sem().Get(var)->Users();
- if (var_users.size() != expected_users.size()) {
- return false;
- }
- for (size_t i = 0; i < var_users.size(); i++) {
- if (var_users[i]->Declaration() != expected_users[i]) {
- return false;
- }
- }
- return true;
- }
-
- /// @param type a type
- /// @returns the name for `type` that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const ast::Type* type) {
- return type->FriendlyName(Symbols());
- }
-
- /// @param type a type
- /// @returns the name for `type` that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const sem::Type* type) {
- return type->FriendlyName(Symbols());
- }
-
- private:
- std::unique_ptr<Resolver> resolver_;
+ public:
+ /// Constructor
+ TestHelper();
+
+ /// Destructor
+ ~TestHelper() override;
+
+ /// @return a pointer to the Resolver
+ Resolver* r() const { return resolver_.get(); }
+
+ /// @return a pointer to the validator
+ const Validator* v() const { return resolver_->GetValidatorForTesting(); }
+
+ /// Returns the statement that holds the given expression.
+ /// @param expr the ast::Expression
+ /// @return the ast::Statement of the ast::Expression, or nullptr if the
+ /// expression is not owned by a statement.
+ const ast::Statement* StmtOf(const ast::Expression* expr) {
+ auto* sem_stmt = Sem().Get(expr)->Stmt();
+ return sem_stmt ? sem_stmt->Declaration() : nullptr;
+ }
+
+ /// Returns the BlockStatement that holds the given statement.
+ /// @param stmt the ast::Statement
+ /// @return the ast::BlockStatement that holds the ast::Statement, or nullptr
+ /// if the statement is not owned by a BlockStatement.
+ const ast::BlockStatement* BlockOf(const ast::Statement* stmt) {
+ auto* sem_stmt = Sem().Get(stmt);
+ return sem_stmt ? sem_stmt->Block()->Declaration() : nullptr;
+ }
+
+ /// Returns the BlockStatement that holds the given expression.
+ /// @param expr the ast::Expression
+ /// @return the ast::Statement of the ast::Expression, or nullptr if the
+ /// expression is not indirectly owned by a BlockStatement.
+ const ast::BlockStatement* BlockOf(const ast::Expression* expr) {
+ auto* sem_stmt = Sem().Get(expr)->Stmt();
+ return sem_stmt ? sem_stmt->Block()->Declaration() : nullptr;
+ }
+
+ /// Returns the semantic variable for the given identifier expression.
+ /// @param expr the identifier expression
+ /// @return the resolved sem::Variable of the identifier, or nullptr if
+ /// the expression did not resolve to a variable.
+ const sem::Variable* VarOf(const ast::Expression* expr) {
+ auto* sem_ident = Sem().Get(expr);
+ auto* var_user = sem_ident ? sem_ident->As<sem::VariableUser>() : nullptr;
+ return var_user ? var_user->Variable() : nullptr;
+ }
+
+ /// Checks that all the users of the given variable are as expected
+ /// @param var the variable to check
+ /// @param expected_users the expected users of the variable
+ /// @return true if all users are as expected
+ bool CheckVarUsers(const ast::Variable* var,
+ std::vector<const ast::Expression*>&& expected_users) {
+ auto& var_users = Sem().Get(var)->Users();
+ if (var_users.size() != expected_users.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < var_users.size(); i++) {
+ if (var_users[i]->Declaration() != expected_users[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /// @param type a type
+ /// @returns the name for `type` that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const ast::Type* type) { return type->FriendlyName(Symbols()); }
+
+ /// @param type a type
+ /// @returns the name for `type` that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const sem::Type* type) { return type->FriendlyName(Symbols()); }
+
+ private:
+ std::unique_ptr<Resolver> resolver_;
};
class ResolverTest : public TestHelper, public testing::Test {};
template <typename T>
-class ResolverTestWithParam : public TestHelper,
- public testing::TestWithParam<T> {};
+class ResolverTestWithParam : public TestHelper, public testing::TestWithParam<T> {};
namespace builder {
-using i32 = ProgramBuilder::i32;
-using u32 = ProgramBuilder::u32;
-using f32 = ProgramBuilder::f32;
-
template <uint32_t N, typename T>
struct vec {};
@@ -174,310 +171,426 @@ template <typename TO>
struct ptr {};
using ast_type_func_ptr = const ast::Type* (*)(ProgramBuilder& b);
-using ast_expr_func_ptr = const ast::Expression* (*)(ProgramBuilder& b,
- int elem_value);
+using ast_expr_func_ptr = const ast::Expression* (*)(ProgramBuilder& b, double elem_value);
using sem_type_func_ptr = const sem::Type* (*)(ProgramBuilder& b);
template <typename T>
struct DataType {};
+/// Helper that represents no-type. Returns nullptr for all static methods.
+template <>
+struct DataType<void> {
+ /// @return nullptr
+ static inline const ast::Type* AST(ProgramBuilder&) { return nullptr; }
+ /// @return nullptr
+ static inline const sem::Type* Sem(ProgramBuilder&) { return nullptr; }
+};
+
/// Helper for building bool types and expressions
template <>
struct DataType<bool> {
- /// false as bool is not a composite type
- static constexpr bool is_composite = false;
-
- /// @param b the ProgramBuilder
- /// @return a new AST bool type
- static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.bool_(); }
- /// @param b the ProgramBuilder
- /// @return the semantic bool type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::Bool>();
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the b
- /// @return a new AST expression of the bool type
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Expr(elem_value == 0);
- }
+ /// The element type
+ using ElementType = bool;
+
+ /// false as bool is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST bool type
+ static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.bool_(); }
+ /// @param b the ProgramBuilder
+ /// @return the semantic bool type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::Bool>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the b
+ /// @return a new AST expression of the bool type
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(std::equal_to<double>()(elem_value, 0));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "bool"; }
};
/// Helper for building i32 types and expressions
template <>
struct DataType<i32> {
- /// false as i32 is not a composite type
- static constexpr bool is_composite = false;
-
- /// @param b the ProgramBuilder
- /// @return a new AST i32 type
- static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.i32(); }
- /// @param b the ProgramBuilder
- /// @return the semantic i32 type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::I32>();
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value i32 will be initialized with
- /// @return a new AST i32 literal value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Expr(static_cast<i32>(elem_value));
- }
+ /// The element type
+ using ElementType = i32;
+
+ /// false as i32 is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST i32 type
+ static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.i32(); }
+ /// @param b the ProgramBuilder
+ /// @return the semantic i32 type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::I32>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value i32 will be initialized with
+ /// @return a new AST i32 literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(static_cast<i32>(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "i32"; }
};
/// Helper for building u32 types and expressions
template <>
struct DataType<u32> {
- /// false as u32 is not a composite type
- static constexpr bool is_composite = false;
-
- /// @param b the ProgramBuilder
- /// @return a new AST u32 type
- static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.u32(); }
- /// @param b the ProgramBuilder
- /// @return the semantic u32 type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::U32>();
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value u32 will be initialized with
- /// @return a new AST u32 literal value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Expr(static_cast<u32>(elem_value));
- }
+ /// The element type
+ using ElementType = u32;
+
+ /// false as u32 is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST u32 type
+ static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.u32(); }
+ /// @param b the ProgramBuilder
+ /// @return the semantic u32 type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::U32>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value u32 will be initialized with
+ /// @return a new AST u32 literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(static_cast<u32>(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "u32"; }
};
/// Helper for building f32 types and expressions
template <>
struct DataType<f32> {
- /// false as f32 is not a composite type
- static constexpr bool is_composite = false;
-
- /// @param b the ProgramBuilder
- /// @return a new AST f32 type
- static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.f32(); }
- /// @param b the ProgramBuilder
- /// @return the semantic f32 type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::F32>();
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value f32 will be initialized with
- /// @return a new AST f32 literal value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Expr(static_cast<f32>(elem_value));
- }
+ /// The element type
+ using ElementType = f32;
+
+ /// false as f32 is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST f32 type
+ static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.f32(); }
+ /// @param b the ProgramBuilder
+ /// @return the semantic f32 type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::F32>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value f32 will be initialized with
+ /// @return a new AST f32 literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(static_cast<f32>(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "f32"; }
+};
+
+/// Helper for building f16 types and expressions
+template <>
+struct DataType<f16> {
+ /// The element type
+ using ElementType = f16;
+
+ /// false as f16 is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST f16 type
+ static inline const ast::Type* AST(ProgramBuilder& b) { return b.ty.f16(); }
+ /// @param b the ProgramBuilder
+ /// @return the semantic f16 type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::F16>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value f16 will be initialized with
+ /// @return a new AST f16 literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(static_cast<f16>(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "f16"; }
+};
+
+/// Helper for building abstract float types and expressions
+template <>
+struct DataType<AFloat> {
+ /// The element type
+ using ElementType = AFloat;
+
+ /// false as AFloat is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @returns nullptr, as abstract floats are un-typeable
+ static inline const ast::Type* AST(ProgramBuilder&) { return nullptr; }
+ /// @param b the ProgramBuilder
+ /// @return the semantic abstract-float type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::AbstractFloat>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value the abstract-float literal will be constructed with
+ /// @return a new AST abstract-float literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(AFloat(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "abstract-float"; }
+};
+
+/// Helper for building abstract integer types and expressions
+template <>
+struct DataType<AInt> {
+ /// The element type
+ using ElementType = AInt;
+
+ /// false as AFloat is not a composite type
+ static constexpr bool is_composite = false;
+
+ /// @returns nullptr, as abstract integers are un-typeable
+ static inline const ast::Type* AST(ProgramBuilder&) { return nullptr; }
+ /// @param b the ProgramBuilder
+ /// @return the semantic abstract-int type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return b.create<sem::AbstractInt>(); }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value the abstract-int literal will be constructed with
+ /// @return a new AST abstract-int literal value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Expr(AInt(elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "abstract-int"; }
};
/// Helper for building vector types and expressions
template <uint32_t N, typename T>
struct DataType<vec<N, T>> {
- /// true as vectors are a composite type
- static constexpr bool is_composite = true;
-
- /// @param b the ProgramBuilder
- /// @return a new AST vector type
- static inline const ast::Type* AST(ProgramBuilder& b) {
- return b.ty.vec(DataType<T>::AST(b), N);
- }
- /// @param b the ProgramBuilder
- /// @return the semantic vector type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::Vector>(DataType<T>::Sem(b), N);
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element in the vector will be initialized
- /// with
- /// @return a new AST vector value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Construct(AST(b), ExprArgs(b, elem_value));
- }
-
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element will be initialized with
- /// @return the list of expressions that are used to construct the vector
- static inline ast::ExpressionList ExprArgs(ProgramBuilder& b,
- int elem_value) {
- ast::ExpressionList args;
- for (uint32_t i = 0; i < N; i++) {
- args.emplace_back(DataType<T>::Expr(b, elem_value));
- }
- return args;
- }
+ /// The element type
+ using ElementType = T;
+
+ /// true as vectors are a composite type
+ static constexpr bool is_composite = true;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST vector type
+ static inline const ast::Type* AST(ProgramBuilder& b) {
+ return b.ty.vec(DataType<T>::AST(b), N);
+ }
+ /// @param b the ProgramBuilder
+ /// @return the semantic vector type
+ static inline const sem::Type* Sem(ProgramBuilder& b) {
+ return b.create<sem::Vector>(DataType<T>::Sem(b), N);
+ }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element in the vector will be initialized
+ /// with
+ /// @return a new AST vector value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Construct(AST(b), ExprArgs(b, elem_value));
+ }
+
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element will be initialized with
+ /// @return the list of expressions that are used to construct the vector
+ static inline ast::ExpressionList ExprArgs(ProgramBuilder& b, double elem_value) {
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < N; i++) {
+ args.emplace_back(DataType<T>::Expr(b, elem_value));
+ }
+ return args;
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() {
+ return "vec" + std::to_string(N) + "<" + DataType<T>::Name() + ">";
+ }
};
/// Helper for building matrix types and expressions
template <uint32_t N, uint32_t M, typename T>
struct DataType<mat<N, M, T>> {
- /// true as matrices are a composite type
- static constexpr bool is_composite = true;
-
- /// @param b the ProgramBuilder
- /// @return a new AST matrix type
- static inline const ast::Type* AST(ProgramBuilder& b) {
- return b.ty.mat(DataType<T>::AST(b), N, M);
- }
- /// @param b the ProgramBuilder
- /// @return the semantic matrix type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- auto* column_type = b.create<sem::Vector>(DataType<T>::Sem(b), M);
- return b.create<sem::Matrix>(column_type, N);
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element in the matrix will be initialized
- /// with
- /// @return a new AST matrix value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Construct(AST(b), ExprArgs(b, elem_value));
- }
-
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element will be initialized with
- /// @return the list of expressions that are used to construct the matrix
- static inline ast::ExpressionList ExprArgs(ProgramBuilder& b,
- int elem_value) {
- ast::ExpressionList args;
- for (uint32_t i = 0; i < N; i++) {
- args.emplace_back(DataType<vec<M, T>>::Expr(b, elem_value));
- }
- return args;
- }
+ /// The element type
+ using ElementType = T;
+
+ /// true as matrices are a composite type
+ static constexpr bool is_composite = true;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST matrix type
+ static inline const ast::Type* AST(ProgramBuilder& b) {
+ return b.ty.mat(DataType<T>::AST(b), N, M);
+ }
+ /// @param b the ProgramBuilder
+ /// @return the semantic matrix type
+ static inline const sem::Type* Sem(ProgramBuilder& b) {
+ auto* column_type = b.create<sem::Vector>(DataType<T>::Sem(b), M);
+ return b.create<sem::Matrix>(column_type, N);
+ }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element in the matrix will be initialized
+ /// with
+ /// @return a new AST matrix value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Construct(AST(b), ExprArgs(b, elem_value));
+ }
+
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element will be initialized with
+ /// @return the list of expressions that are used to construct the matrix
+ static inline ast::ExpressionList ExprArgs(ProgramBuilder& b, double elem_value) {
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < N; i++) {
+ args.emplace_back(DataType<vec<M, T>>::Expr(b, elem_value));
+ }
+ return args;
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() {
+ return "mat" + std::to_string(N) + "x" + std::to_string(M) + "<" + DataType<T>::Name() +
+ ">";
+ }
};
/// Helper for building alias types and expressions
template <typename T, int ID>
struct DataType<alias<T, ID>> {
- /// true if the aliased type is a composite type
- static constexpr bool is_composite = DataType<T>::is_composite;
-
- /// @param b the ProgramBuilder
- /// @return a new AST alias type
- static inline const ast::Type* AST(ProgramBuilder& b) {
- auto name = b.Symbols().Register("alias_" + std::to_string(ID));
- if (!b.AST().LookupType(name)) {
- auto* type = DataType<T>::AST(b);
- b.AST().AddTypeDecl(b.ty.alias(name, type));
- }
- return b.create<ast::TypeName>(name);
- }
- /// @param b the ProgramBuilder
- /// @return the semantic aliased type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return DataType<T>::Sem(b);
- }
-
- /// @param b the ProgramBuilder
- /// @param elem_value the value nested elements will be initialized with
- /// @return a new AST expression of the alias type
- template <bool IS_COMPOSITE = is_composite>
- static inline traits::EnableIf<!IS_COMPOSITE, const ast::Expression*> Expr(
- ProgramBuilder& b,
- int elem_value) {
- // Cast
- return b.Construct(AST(b), DataType<T>::Expr(b, elem_value));
- }
-
- /// @param b the ProgramBuilder
- /// @param elem_value the value nested elements will be initialized with
- /// @return a new AST expression of the alias type
- template <bool IS_COMPOSITE = is_composite>
- static inline traits::EnableIf<IS_COMPOSITE, const ast::Expression*> Expr(
- ProgramBuilder& b,
- int elem_value) {
- // Construct
- return b.Construct(AST(b), DataType<T>::ExprArgs(b, elem_value));
- }
+ /// The element type
+ using ElementType = T;
+
+ /// true if the aliased type is a composite type
+ static constexpr bool is_composite = DataType<T>::is_composite;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST alias type
+ static inline const ast::Type* AST(ProgramBuilder& b) {
+ auto name = b.Symbols().Register("alias_" + std::to_string(ID));
+ if (!b.AST().LookupType(name)) {
+ auto* type = DataType<T>::AST(b);
+ b.AST().AddTypeDecl(b.ty.alias(name, type));
+ }
+ return b.create<ast::TypeName>(name);
+ }
+ /// @param b the ProgramBuilder
+ /// @return the semantic aliased type
+ static inline const sem::Type* Sem(ProgramBuilder& b) { return DataType<T>::Sem(b); }
+
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value nested elements will be initialized with
+ /// @return a new AST expression of the alias type
+ template <bool IS_COMPOSITE = is_composite>
+ static inline traits::EnableIf<!IS_COMPOSITE, const ast::Expression*> Expr(ProgramBuilder& b,
+ double elem_value) {
+ // Cast
+ return b.Construct(AST(b), DataType<T>::Expr(b, elem_value));
+ }
+
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value nested elements will be initialized with
+ /// @return a new AST expression of the alias type
+ template <bool IS_COMPOSITE = is_composite>
+ static inline traits::EnableIf<IS_COMPOSITE, const ast::Expression*> Expr(ProgramBuilder& b,
+ double elem_value) {
+ // Construct
+ return b.Construct(AST(b), DataType<T>::ExprArgs(b, elem_value));
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "alias_" + std::to_string(ID); }
};
/// Helper for building pointer types and expressions
template <typename T>
struct DataType<ptr<T>> {
- /// true if the pointer type is a composite type
- static constexpr bool is_composite = false;
-
- /// @param b the ProgramBuilder
- /// @return a new AST alias type
- static inline const ast::Type* AST(ProgramBuilder& b) {
- return b.create<ast::Pointer>(DataType<T>::AST(b),
- ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- }
- /// @param b the ProgramBuilder
- /// @return the semantic aliased type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- return b.create<sem::Pointer>(DataType<T>::Sem(b),
- ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- }
-
- /// @param b the ProgramBuilder
- /// @return a new AST expression of the alias type
- static inline const ast::Expression* Expr(ProgramBuilder& b, int /*unused*/) {
- auto sym = b.Symbols().New("global_for_ptr");
- b.Global(sym, DataType<T>::AST(b), ast::StorageClass::kPrivate);
- return b.AddressOf(sym);
- }
+ /// The element type
+ using ElementType = T;
+
+ /// true if the pointer type is a composite type
+ static constexpr bool is_composite = false;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST alias type
+ static inline const ast::Type* AST(ProgramBuilder& b) {
+ return b.create<ast::Pointer>(DataType<T>::AST(b), ast::StorageClass::kPrivate,
+ ast::Access::kReadWrite);
+ }
+ /// @param b the ProgramBuilder
+ /// @return the semantic aliased type
+ static inline const sem::Type* Sem(ProgramBuilder& b) {
+ return b.create<sem::Pointer>(DataType<T>::Sem(b), ast::StorageClass::kPrivate,
+ ast::Access::kReadWrite);
+ }
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST expression of the alias type
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double /*unused*/) {
+ auto sym = b.Symbols().New("global_for_ptr");
+ b.Global(sym, DataType<T>::AST(b), ast::StorageClass::kPrivate);
+ return b.AddressOf(sym);
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() { return "ptr<" + DataType<T>::Name() + ">"; }
};
/// Helper for building array types and expressions
template <uint32_t N, typename T>
struct DataType<array<N, T>> {
- /// true as arrays are a composite type
- static constexpr bool is_composite = true;
-
- /// @param b the ProgramBuilder
- /// @return a new AST array type
- static inline const ast::Type* AST(ProgramBuilder& b) {
- return b.ty.array(DataType<T>::AST(b), N);
- }
- /// @param b the ProgramBuilder
- /// @return the semantic array type
- static inline const sem::Type* Sem(ProgramBuilder& b) {
- auto* el = DataType<T>::Sem(b);
- return b.create<sem::Array>(
- /* element */ el,
- /* count */ N,
- /* align */ el->Align(),
- /* size */ el->Size(),
- /* stride */ el->Align(),
- /* implicit_stride */ el->Align());
- }
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element in the array will be initialized
- /// with
- /// @return a new AST array value expression
- static inline const ast::Expression* Expr(ProgramBuilder& b, int elem_value) {
- return b.Construct(AST(b), ExprArgs(b, elem_value));
- }
-
- /// @param b the ProgramBuilder
- /// @param elem_value the value each element will be initialized with
- /// @return the list of expressions that are used to construct the array
- static inline ast::ExpressionList ExprArgs(ProgramBuilder& b,
- int elem_value) {
- ast::ExpressionList args;
- for (uint32_t i = 0; i < N; i++) {
- args.emplace_back(DataType<T>::Expr(b, elem_value));
- }
- return args;
- }
+ /// The element type
+ using ElementType = T;
+
+ /// true as arrays are a composite type
+ static constexpr bool is_composite = true;
+
+ /// @param b the ProgramBuilder
+ /// @return a new AST array type
+ static inline const ast::Type* AST(ProgramBuilder& b) {
+ return b.ty.array(DataType<T>::AST(b), u32(N));
+ }
+ /// @param b the ProgramBuilder
+ /// @return the semantic array type
+ static inline const sem::Type* Sem(ProgramBuilder& b) {
+ auto* el = DataType<T>::Sem(b);
+ return b.create<sem::Array>(
+ /* element */ el,
+ /* count */ N,
+ /* align */ el->Align(),
+ /* size */ el->Size(),
+ /* stride */ el->Align(),
+ /* implicit_stride */ el->Align());
+ }
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element in the array will be initialized
+ /// with
+ /// @return a new AST array value expression
+ static inline const ast::Expression* Expr(ProgramBuilder& b, double elem_value) {
+ return b.Construct(AST(b), ExprArgs(b, elem_value));
+ }
+
+ /// @param b the ProgramBuilder
+ /// @param elem_value the value each element will be initialized with
+ /// @return the list of expressions that are used to construct the array
+ static inline ast::ExpressionList ExprArgs(ProgramBuilder& b, double elem_value) {
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < N; i++) {
+ args.emplace_back(DataType<T>::Expr(b, elem_value));
+ }
+ return args;
+ }
+ /// @returns the WGSL name for the type
+ static inline std::string Name() {
+ return "array<" + DataType<T>::Name() + ", " + std::to_string(N) + ">";
+ }
};
/// Struct of all creation pointer types
struct CreatePtrs {
- /// ast node type create function
- ast_type_func_ptr ast;
- /// ast expression type create function
- ast_expr_func_ptr expr;
- /// sem type create function
- sem_type_func_ptr sem;
+ /// ast node type create function
+ ast_type_func_ptr ast;
+ /// ast expression type create function
+ ast_expr_func_ptr expr;
+ /// sem type create function
+ sem_type_func_ptr sem;
};
/// Returns a CreatePtrs struct instance with all creation pointer types for
/// type `T`
template <typename T>
constexpr CreatePtrs CreatePtrsFor() {
- return {DataType<T>::AST, DataType<T>::Expr, DataType<T>::Sem};
+ return {DataType<T>::AST, DataType<T>::Expr, DataType<T>::Sem};
}
} // namespace builder
diff --git a/chromium/third_party/dawn/src/tint/resolver/resolver_validation.cc b/chromium/third_party/dawn/src/tint/resolver/resolver_validation.cc
deleted file mode 100644
index 708a4fab940..00000000000
--- a/chromium/third_party/dawn/src/tint/resolver/resolver_validation.cc
+++ /dev/null
@@ -1,2428 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/resolver/resolver.h"
-
-#include <algorithm>
-#include <limits>
-#include <utility>
-
-#include "src/tint/ast/alias.h"
-#include "src/tint/ast/array.h"
-#include "src/tint/ast/assignment_statement.h"
-#include "src/tint/ast/bitcast_expression.h"
-#include "src/tint/ast/break_statement.h"
-#include "src/tint/ast/call_statement.h"
-#include "src/tint/ast/continue_statement.h"
-#include "src/tint/ast/depth_texture.h"
-#include "src/tint/ast/disable_validation_attribute.h"
-#include "src/tint/ast/discard_statement.h"
-#include "src/tint/ast/fallthrough_statement.h"
-#include "src/tint/ast/for_loop_statement.h"
-#include "src/tint/ast/id_attribute.h"
-#include "src/tint/ast/if_statement.h"
-#include "src/tint/ast/internal_attribute.h"
-#include "src/tint/ast/interpolate_attribute.h"
-#include "src/tint/ast/loop_statement.h"
-#include "src/tint/ast/matrix.h"
-#include "src/tint/ast/pointer.h"
-#include "src/tint/ast/return_statement.h"
-#include "src/tint/ast/sampled_texture.h"
-#include "src/tint/ast/sampler.h"
-#include "src/tint/ast/storage_texture.h"
-#include "src/tint/ast/switch_statement.h"
-#include "src/tint/ast/traverse_expressions.h"
-#include "src/tint/ast/type_name.h"
-#include "src/tint/ast/unary_op_expression.h"
-#include "src/tint/ast/variable_decl_statement.h"
-#include "src/tint/ast/vector.h"
-#include "src/tint/ast/workgroup_attribute.h"
-#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/for_loop_statement.h"
-#include "src/tint/sem/function.h"
-#include "src/tint/sem/if_statement.h"
-#include "src/tint/sem/loop_statement.h"
-#include "src/tint/sem/member_accessor_expression.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/pointer_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/statement.h"
-#include "src/tint/sem/storage_texture_type.h"
-#include "src/tint/sem/struct.h"
-#include "src/tint/sem/switch_statement.h"
-#include "src/tint/sem/type_constructor.h"
-#include "src/tint/sem/type_conversion.h"
-#include "src/tint/sem/variable.h"
-#include "src/tint/utils/defer.h"
-#include "src/tint/utils/map.h"
-#include "src/tint/utils/math.h"
-#include "src/tint/utils/reverse.h"
-#include "src/tint/utils/scoped_assignment.h"
-#include "src/tint/utils/transform.h"
-
-namespace tint::resolver {
-namespace {
-
-bool IsValidStorageTextureDimension(ast::TextureDimension dim) {
- switch (dim) {
- case ast::TextureDimension::k1d:
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::k3d:
- return true;
- default:
- return false;
- }
-}
-
-bool IsValidStorageTextureTexelFormat(ast::TexelFormat format) {
- switch (format) {
- case ast::TexelFormat::kR32Uint:
- case ast::TexelFormat::kR32Sint:
- case ast::TexelFormat::kR32Float:
- case ast::TexelFormat::kRg32Uint:
- case ast::TexelFormat::kRg32Sint:
- case ast::TexelFormat::kRg32Float:
- case ast::TexelFormat::kRgba8Unorm:
- case ast::TexelFormat::kRgba8Snorm:
- case ast::TexelFormat::kRgba8Uint:
- case ast::TexelFormat::kRgba8Sint:
- case ast::TexelFormat::kRgba16Uint:
- case ast::TexelFormat::kRgba16Sint:
- case ast::TexelFormat::kRgba16Float:
- case ast::TexelFormat::kRgba32Uint:
- case ast::TexelFormat::kRgba32Sint:
- case ast::TexelFormat::kRgba32Float:
- return true;
- default:
- return false;
- }
-}
-
-// Helper to stringify a pipeline IO attribute.
-std::string attr_to_str(const ast::Attribute* attr) {
- std::stringstream str;
- if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
- str << "builtin(" << builtin->builtin << ")";
- } else if (auto* location = attr->As<ast::LocationAttribute>()) {
- str << "location(" << location->value << ")";
- }
- return str.str();
-}
-
-template <typename CALLBACK>
-void TraverseCallChain(diag::List& diagnostics,
- const sem::Function* from,
- const sem::Function* to,
- CALLBACK&& callback) {
- for (auto* f : from->TransitivelyCalledFunctions()) {
- if (f == to) {
- callback(f);
- return;
- }
- if (f->TransitivelyCalledFunctions().contains(to)) {
- TraverseCallChain(diagnostics, f, to, callback);
- callback(f);
- return;
- }
- }
- TINT_ICE(Resolver, diagnostics)
- << "TraverseCallChain() 'from' does not transitively call 'to'";
-}
-
-} // namespace
-
-bool Resolver::ValidateAtomic(const ast::Atomic* a, const sem::Atomic* s) {
- // https://gpuweb.github.io/gpuweb/wgsl/#atomic-types
- // T must be either u32 or i32.
- if (!s->Type()->IsAnyOf<sem::U32, sem::I32>()) {
- AddError("atomic only supports i32 or u32 types",
- a->type ? a->type->source : a->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateStorageTexture(const ast::StorageTexture* t) {
- switch (t->access) {
- case ast::Access::kWrite:
- break;
- case ast::Access::kUndefined:
- AddError("storage texture missing access control", t->source);
- return false;
- default:
- AddError("storage textures currently only support 'write' access control",
- t->source);
- return false;
- }
-
- if (!IsValidStorageTextureDimension(t->dim)) {
- AddError("cube dimensions for storage textures are not supported",
- t->source);
- return false;
- }
-
- if (!IsValidStorageTextureTexelFormat(t->format)) {
- AddError(
- "image format must be one of the texel formats specified for storage "
- "textues in https://gpuweb.github.io/gpuweb/wgsl/#texel-formats",
- t->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateVariableConstructorOrCast(
- const ast::Variable* var,
- ast::StorageClass storage_class,
- const sem::Type* storage_ty,
- const sem::Type* rhs_ty) {
- auto* value_type = rhs_ty->UnwrapRef(); // Implicit load of RHS
-
- // Value type has to match storage type
- if (storage_ty != value_type) {
- std::string decl = var->is_const ? "let" : "var";
- AddError("cannot initialize " + decl + " of type '" +
- TypeNameOf(storage_ty) + "' with value of type '" +
- TypeNameOf(rhs_ty) + "'",
- var->source);
- return false;
- }
-
- if (!var->is_const) {
- switch (storage_class) {
- case ast::StorageClass::kPrivate:
- case ast::StorageClass::kFunction:
- break; // Allowed an initializer
- default:
- // https://gpuweb.github.io/gpuweb/wgsl/#var-and-let
- // Optionally has an initializer expression, if the variable is in the
- // private or function storage classes.
- AddError("var of storage class '" +
- std::string(ast::ToString(storage_class)) +
- "' cannot have an initializer. var initializers are only "
- "supported for the storage classes "
- "'private' and 'function'",
- var->source);
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateStorageClassLayout(const sem::Type* store_ty,
- ast::StorageClass sc,
- Source source) {
- // https://gpuweb.github.io/gpuweb/wgsl/#storage-class-layout-constraints
-
- auto is_uniform_struct_or_array = [sc](const sem::Type* ty) {
- return sc == ast::StorageClass::kUniform &&
- ty->IsAnyOf<sem::Array, sem::Struct>();
- };
-
- auto is_uniform_struct = [sc](const sem::Type* ty) {
- return sc == ast::StorageClass::kUniform && ty->Is<sem::Struct>();
- };
-
- auto required_alignment_of = [&](const sem::Type* ty) {
- uint32_t actual_align = ty->Align();
- uint32_t required_align = actual_align;
- if (is_uniform_struct_or_array(ty)) {
- required_align = utils::RoundUp(16u, actual_align);
- }
- return required_align;
- };
-
- auto member_name_of = [this](const sem::StructMember* sm) {
- return builder_->Symbols().NameFor(sm->Declaration()->symbol);
- };
-
- // Cache result of type + storage class pair.
- if (!valid_type_storage_layouts_.emplace(store_ty, sc).second) {
- return true;
- }
-
- if (!ast::IsHostShareable(sc)) {
- return true;
- }
-
- if (auto* str = store_ty->As<sem::Struct>()) {
- for (size_t i = 0; i < str->Members().size(); ++i) {
- auto* const m = str->Members()[i];
- uint32_t required_align = required_alignment_of(m->Type());
-
- // Recurse into the member type.
- if (!ValidateStorageClassLayout(m->Type(), sc,
- m->Declaration()->type->source)) {
- AddNote("see layout of struct:\n" + str->Layout(builder_->Symbols()),
- str->Declaration()->source);
- return false;
- }
-
- // Validate that member is at a valid byte offset
- if (m->Offset() % required_align != 0) {
- AddError("the offset of a struct member of type '" +
- m->Type()->UnwrapRef()->FriendlyName(builder_->Symbols()) +
- "' in storage class '" + ast::ToString(sc) +
- "' must be a multiple of " +
- std::to_string(required_align) + " bytes, but '" +
- member_name_of(m) + "' is currently at offset " +
- std::to_string(m->Offset()) +
- ". Consider setting @align(" +
- std::to_string(required_align) + ") on this member",
- m->Declaration()->source);
-
- AddNote("see layout of struct:\n" + str->Layout(builder_->Symbols()),
- str->Declaration()->source);
-
- if (auto* member_str = m->Type()->As<sem::Struct>()) {
- AddNote("and layout of struct member:\n" +
- member_str->Layout(builder_->Symbols()),
- member_str->Declaration()->source);
- }
-
- return false;
- }
-
- // For uniform buffers, validate that the number of bytes between the
- // previous member of type struct and the current is a multiple of 16
- // bytes.
- auto* const prev_member = (i == 0) ? nullptr : str->Members()[i - 1];
- if (prev_member && is_uniform_struct(prev_member->Type())) {
- const uint32_t prev_to_curr_offset =
- m->Offset() - prev_member->Offset();
- if (prev_to_curr_offset % 16 != 0) {
- AddError(
- "uniform storage requires that the number of bytes between the "
- "start of the previous member of type struct and the current "
- "member be a multiple of 16 bytes, but there are currently " +
- std::to_string(prev_to_curr_offset) + " bytes between '" +
- member_name_of(prev_member) + "' and '" + member_name_of(m) +
- "'. Consider setting @align(16) on this member",
- m->Declaration()->source);
-
- AddNote("see layout of struct:\n" + str->Layout(builder_->Symbols()),
- str->Declaration()->source);
-
- auto* prev_member_str = prev_member->Type()->As<sem::Struct>();
- AddNote("and layout of previous member struct:\n" +
- prev_member_str->Layout(builder_->Symbols()),
- prev_member_str->Declaration()->source);
- return false;
- }
- }
- }
- }
-
- // For uniform buffer array members, validate that array elements are
- // aligned to 16 bytes
- if (auto* arr = store_ty->As<sem::Array>()) {
- // Recurse into the element type.
- // TODO(crbug.com/tint/1388): Ideally we'd pass the source for nested
- // element type here, but we can't easily get that from the semantic node.
- // We should consider recursing through the AST type nodes instead.
- if (!ValidateStorageClassLayout(arr->ElemType(), sc, source)) {
- return false;
- }
-
- if (sc == ast::StorageClass::kUniform) {
- // We already validated that this array member is itself aligned to 16
- // bytes above, so we only need to validate that stride is a multiple
- // of 16 bytes.
- if (arr->Stride() % 16 != 0) {
- // Since WGSL has no stride attribute, try to provide a useful hint
- // for how the shader author can resolve the issue.
- std::string hint;
- if (arr->ElemType()->is_scalar()) {
- hint =
- "Consider using a vector or struct as the element type "
- "instead.";
- } else if (auto* vec = arr->ElemType()->As<sem::Vector>();
- vec && vec->type()->Size() == 4) {
- hint = "Consider using a vec4 instead.";
- } else if (arr->ElemType()->Is<sem::Struct>()) {
- hint =
- "Consider using the @size attribute on the last struct "
- "member.";
- } else {
- hint =
- "Consider wrapping the element type in a struct and using "
- "the "
- "@size attribute.";
- }
- AddError(
- "uniform storage requires that array elements be aligned to 16 "
- "bytes, but array element alignment is currently " +
- std::to_string(arr->Stride()) + ". " + hint,
- source);
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateStorageClassLayout(const sem::Variable* var) {
- if (auto* str = var->Type()->UnwrapRef()->As<sem::Struct>()) {
- if (!ValidateStorageClassLayout(str, var->StorageClass(),
- str->Declaration()->source)) {
- AddNote("see declaration of variable", var->Declaration()->source);
- return false;
- }
- } else {
- Source source = var->Declaration()->source;
- if (var->Declaration()->type) {
- source = var->Declaration()->type->source;
- }
- if (!ValidateStorageClassLayout(var->Type()->UnwrapRef(),
- var->StorageClass(), source)) {
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateGlobalVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- if (!ValidateNoDuplicateAttributes(decl->attributes)) {
- return false;
- }
-
- for (auto* attr : decl->attributes) {
- if (decl->is_const) {
- if (auto* id_attr = attr->As<ast::IdAttribute>()) {
- uint32_t id = id_attr->value;
- auto it = constant_ids_.find(id);
- if (it != constant_ids_.end() && it->second != var) {
- AddError("pipeline constant IDs must be unique", attr->source);
- AddNote("a pipeline constant with an ID of " + std::to_string(id) +
- " was previously declared "
- "here:",
- ast::GetAttribute<ast::IdAttribute>(
- it->second->Declaration()->attributes)
- ->source);
- return false;
- }
- if (id > 65535) {
- AddError("pipeline constant IDs must be between 0 and 65535",
- attr->source);
- return false;
- }
- } else {
- AddError("attribute is not valid for constants", attr->source);
- return false;
- }
- } else {
- bool is_shader_io_attribute =
- attr->IsAnyOf<ast::BuiltinAttribute, ast::InterpolateAttribute,
- ast::InvariantAttribute, ast::LocationAttribute>();
- bool has_io_storage_class =
- var->StorageClass() == ast::StorageClass::kInput ||
- var->StorageClass() == ast::StorageClass::kOutput;
- if (!(attr->IsAnyOf<ast::BindingAttribute, ast::GroupAttribute,
- ast::InternalAttribute>()) &&
- (!is_shader_io_attribute || !has_io_storage_class)) {
- AddError("attribute is not valid for variables", attr->source);
- return false;
- }
- }
- }
-
- if (var->StorageClass() == ast::StorageClass::kFunction) {
- AddError(
- "variables declared at module scope must not be in the function "
- "storage class",
- decl->source);
- return false;
- }
-
- auto binding_point = decl->BindingPoint();
- switch (var->StorageClass()) {
- case ast::StorageClass::kUniform:
- case ast::StorageClass::kStorage:
- case ast::StorageClass::kUniformConstant: {
- // https://gpuweb.github.io/gpuweb/wgsl/#resource-interface
- // Each resource variable must be declared with both group and binding
- // attributes.
- if (!binding_point) {
- AddError(
- "resource variables require @group and @binding "
- "attributes",
- decl->source);
- return false;
- }
- break;
- }
- default:
- if (binding_point.binding || binding_point.group) {
- // https://gpuweb.github.io/gpuweb/wgsl/#attribute-binding
- // Must only be applied to a resource variable
- AddError(
- "non-resource variables must not have @group or @binding "
- "attributes",
- decl->source);
- return false;
- }
- }
-
- // https://gpuweb.github.io/gpuweb/wgsl/#variable-declaration
- // The access mode always has a default, and except for variables in the
- // storage storage class, must not be written.
- if (var->StorageClass() != ast::StorageClass::kStorage &&
- decl->declared_access != ast::Access::kUndefined) {
- AddError(
- "only variables in <storage> storage class may declare an access mode",
- decl->source);
- return false;
- }
-
- if (!decl->is_const) {
- if (!ValidateAtomicVariable(var)) {
- return false;
- }
- }
-
- return ValidateVariable(var);
-}
-
-// https://gpuweb.github.io/gpuweb/wgsl/#atomic-types
-// Atomic types may only be instantiated by variables in the workgroup storage
-// class or by storage buffer variables with a read_write access mode.
-bool Resolver::ValidateAtomicVariable(const sem::Variable* var) {
- auto sc = var->StorageClass();
- auto* decl = var->Declaration();
- auto access = var->Access();
- auto* type = var->Type()->UnwrapRef();
- auto source = decl->type ? decl->type->source : decl->source;
-
- if (type->Is<sem::Atomic>()) {
- if (sc != ast::StorageClass::kWorkgroup &&
- sc != ast::StorageClass::kStorage) {
- AddError(
- "atomic variables must have <storage> or <workgroup> storage class",
- source);
- return false;
- }
- } else if (type->IsAnyOf<sem::Struct, sem::Array>()) {
- auto found = atomic_composite_info_.find(type);
- if (found != atomic_composite_info_.end()) {
- if (sc != ast::StorageClass::kStorage &&
- sc != ast::StorageClass::kWorkgroup) {
- AddError(
- "atomic variables must have <storage> or <workgroup> storage class",
- source);
- AddNote(
- "atomic sub-type of '" + TypeNameOf(type) + "' is declared here",
- found->second);
- return false;
- } else if (sc == ast::StorageClass::kStorage &&
- access != ast::Access::kReadWrite) {
- AddError(
- "atomic variables in <storage> storage class must have read_write "
- "access mode",
- source);
- AddNote(
- "atomic sub-type of '" + TypeNameOf(type) + "' is declared here",
- found->second);
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto* storage_ty = var->Type()->UnwrapRef();
-
- if (var->Is<sem::GlobalVariable>()) {
- auto name = builder_->Symbols().NameFor(decl->symbol);
- if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
- auto* kind = var->Declaration()->is_const ? "let" : "var";
- AddError(
- "'" + name +
- "' is a builtin and cannot be redeclared as a module-scope " +
- kind,
- decl->source);
- return false;
- }
- }
-
- if (!decl->is_const && !IsStorable(storage_ty)) {
- AddError(TypeNameOf(storage_ty) + " cannot be used as the type of a var",
- decl->source);
- return false;
- }
-
- if (decl->is_const && !var->Is<sem::Parameter>() &&
- !(storage_ty->IsConstructible() || storage_ty->Is<sem::Pointer>())) {
- AddError(TypeNameOf(storage_ty) + " cannot be used as the type of a let",
- decl->source);
- return false;
- }
-
- if (auto* r = storage_ty->As<sem::MultisampledTexture>()) {
- if (r->dim() != ast::TextureDimension::k2d) {
- AddError("only 2d multisampled textures are supported", decl->source);
- return false;
- }
-
- if (!r->type()->UnwrapRef()->is_numeric_scalar()) {
- AddError("texture_multisampled_2d<type>: type must be f32, i32 or u32",
- decl->source);
- return false;
- }
- }
-
- if (var->Is<sem::LocalVariable>() && !decl->is_const &&
- IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::kIgnoreStorageClass)) {
- if (!var->Type()->UnwrapRef()->IsConstructible()) {
- AddError("function variable must have a constructible type",
- decl->type ? decl->type->source : decl->source);
- return false;
- }
- }
-
- if (storage_ty->is_handle() &&
- decl->declared_storage_class != ast::StorageClass::kNone) {
- // https://gpuweb.github.io/gpuweb/wgsl/#module-scope-variables
- // If the store type is a texture type or a sampler type, then the
- // variable declaration must not have a storage class attribute. The
- // storage class will always be handle.
- AddError("variables of type '" + TypeNameOf(storage_ty) +
- "' must not have a storage class",
- decl->source);
- return false;
- }
-
- if (IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::kIgnoreStorageClass) &&
- (decl->declared_storage_class == ast::StorageClass::kInput ||
- decl->declared_storage_class == ast::StorageClass::kOutput)) {
- AddError("invalid use of input/output storage class", decl->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateFunctionParameter(const ast::Function* func,
- const sem::Variable* var) {
- if (!ValidateVariable(var)) {
- return false;
- }
-
- auto* decl = var->Declaration();
-
- for (auto* attr : decl->attributes) {
- if (!func->IsEntryPoint() && !attr->Is<ast::InternalAttribute>()) {
- AddError("attribute is not valid for non-entry point function parameters",
- attr->source);
- return false;
- } else if (!attr->IsAnyOf<ast::BuiltinAttribute, ast::InvariantAttribute,
- ast::LocationAttribute, ast::InterpolateAttribute,
- ast::InternalAttribute>() &&
- (IsValidationEnabled(
- decl->attributes,
- ast::DisabledValidation::kEntryPointParameter) &&
- IsValidationEnabled(
- decl->attributes,
- ast::DisabledValidation::
- kIgnoreConstructibleFunctionParameter))) {
- AddError("attribute is not valid for function parameters", attr->source);
- return false;
- }
- }
-
- if (auto* ref = var->Type()->As<sem::Pointer>()) {
- auto sc = ref->StorageClass();
- if (!(sc == ast::StorageClass::kFunction ||
- sc == ast::StorageClass::kPrivate ||
- sc == ast::StorageClass::kWorkgroup) &&
- IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::kIgnoreStorageClass)) {
- std::stringstream ss;
- ss << "function parameter of pointer type cannot be in '" << sc
- << "' storage class";
- AddError(ss.str(), decl->source);
- return false;
- }
- }
-
- if (IsPlain(var->Type())) {
- if (!var->Type()->IsConstructible() &&
- IsValidationEnabled(
- decl->attributes,
- ast::DisabledValidation::kIgnoreConstructibleFunctionParameter)) {
- AddError("store type of function parameter must be a constructible type",
- decl->source);
- return false;
- }
- } else if (!var->Type()
- ->IsAnyOf<sem::Texture, sem::Sampler, sem::Pointer>()) {
- AddError(
- "store type of function parameter cannot be " + TypeNameOf(var->Type()),
- decl->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateBuiltinAttribute(const ast::BuiltinAttribute* attr,
- const sem::Type* storage_ty,
- const bool is_input) {
- auto* type = storage_ty->UnwrapRef();
- const auto stage = current_function_
- ? current_function_->Declaration()->PipelineStage()
- : ast::PipelineStage::kNone;
- std::stringstream stage_name;
- stage_name << stage;
- bool is_stage_mismatch = false;
- bool is_output = !is_input;
- switch (attr->builtin) {
- case ast::Builtin::kPosition:
- if (stage != ast::PipelineStage::kNone &&
- !((is_input && stage == ast::PipelineStage::kFragment) ||
- (is_output && stage == ast::PipelineStage::kVertex))) {
- is_stage_mismatch = true;
- }
- if (!(type->is_float_vector() && type->As<sem::Vector>()->Width() == 4)) {
- AddError("store type of " + attr_to_str(attr) + " must be 'vec4<f32>'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kGlobalInvocationId:
- case ast::Builtin::kLocalInvocationId:
- case ast::Builtin::kNumWorkgroups:
- case ast::Builtin::kWorkgroupId:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kCompute && is_input)) {
- is_stage_mismatch = true;
- }
- if (!(type->is_unsigned_integer_vector() &&
- type->As<sem::Vector>()->Width() == 3)) {
- AddError("store type of " + attr_to_str(attr) + " must be 'vec3<u32>'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kFragDepth:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kFragment && !is_input)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::F32>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'f32'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kFrontFacing:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kFragment && is_input)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::Bool>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'bool'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kLocalInvocationIndex:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kCompute && is_input)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::U32>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'u32'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kVertexIndex:
- case ast::Builtin::kInstanceIndex:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kVertex && is_input)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::U32>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'u32'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kSampleMask:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kFragment)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::U32>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'u32'",
- attr->source);
- return false;
- }
- break;
- case ast::Builtin::kSampleIndex:
- if (stage != ast::PipelineStage::kNone &&
- !(stage == ast::PipelineStage::kFragment && is_input)) {
- is_stage_mismatch = true;
- }
- if (!type->Is<sem::U32>()) {
- AddError("store type of " + attr_to_str(attr) + " must be 'u32'",
- attr->source);
- return false;
- }
- break;
- default:
- break;
- }
-
- if (is_stage_mismatch) {
- AddError(attr_to_str(attr) + " cannot be used in " +
- (is_input ? "input of " : "output of ") + stage_name.str() +
- " pipeline stage",
- attr->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateInterpolateAttribute(
- const ast::InterpolateAttribute* attr,
- const sem::Type* storage_ty) {
- auto* type = storage_ty->UnwrapRef();
-
- if (type->is_integer_scalar_or_vector() &&
- attr->type != ast::InterpolationType::kFlat) {
- AddError(
- "interpolation type must be 'flat' for integral user-defined IO types",
- attr->source);
- return false;
- }
-
- if (attr->type == ast::InterpolationType::kFlat &&
- attr->sampling != ast::InterpolationSampling::kNone) {
- AddError("flat interpolation attribute must not have a sampling parameter",
- attr->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateFunction(const sem::Function* func) {
- auto* decl = func->Declaration();
-
- auto name = builder_->Symbols().NameFor(decl->symbol);
- if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
- AddError(
- "'" + name + "' is a builtin and cannot be redeclared as a function",
- decl->source);
- return false;
- }
-
- auto workgroup_attr_count = 0;
- for (auto* attr : decl->attributes) {
- if (attr->Is<ast::WorkgroupAttribute>()) {
- workgroup_attr_count++;
- if (decl->PipelineStage() != ast::PipelineStage::kCompute) {
- AddError(
- "the workgroup_size attribute is only valid for compute stages",
- attr->source);
- return false;
- }
- } else if (!attr->IsAnyOf<ast::StageAttribute, ast::InternalAttribute>()) {
- AddError("attribute is not valid for functions", attr->source);
- return false;
- }
- }
-
- if (decl->params.size() > 255) {
- AddError("functions may declare at most 255 parameters", decl->source);
- return false;
- }
-
- for (size_t i = 0; i < decl->params.size(); i++) {
- if (!ValidateFunctionParameter(decl, func->Parameters()[i])) {
- return false;
- }
- }
-
- if (!func->ReturnType()->Is<sem::Void>()) {
- if (!func->ReturnType()->IsConstructible()) {
- AddError("function return type must be a constructible type",
- decl->return_type->source);
- return false;
- }
-
- if (decl->body) {
- sem::Behaviors behaviors{sem::Behavior::kNext};
- if (auto* last = decl->body->Last()) {
- behaviors = Sem(last)->Behaviors();
- }
- if (behaviors.Contains(sem::Behavior::kNext)) {
- AddError("missing return at end of function", decl->source);
- return false;
- }
- } else if (IsValidationEnabled(
- decl->attributes,
- ast::DisabledValidation::kFunctionHasNoBody)) {
- TINT_ICE(Resolver, diagnostics_)
- << "Function " << builder_->Symbols().NameFor(decl->symbol)
- << " has no body";
- }
-
- for (auto* attr : decl->return_type_attributes) {
- if (!decl->IsEntryPoint()) {
- AddError(
- "attribute is not valid for non-entry point function return types",
- attr->source);
- return false;
- }
- if (!attr->IsAnyOf<ast::BuiltinAttribute, ast::InternalAttribute,
- ast::LocationAttribute, ast::InterpolateAttribute,
- ast::InvariantAttribute>() &&
- (IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::kEntryPointParameter) &&
- IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::
- kIgnoreConstructibleFunctionParameter))) {
- AddError("attribute is not valid for entry point return types",
- attr->source);
- return false;
- }
- }
- }
-
- if (decl->IsEntryPoint()) {
- if (!ValidateEntryPoint(func)) {
- return false;
- }
- }
-
- // https://www.w3.org/TR/WGSL/#behaviors-rules
- // a function behavior is always one of {}, {Next}, {Discard}, or
- // {Next, Discard}.
- if (func->Behaviors() != sem::Behaviors{} && // NOLINT: bad warning
- func->Behaviors() != sem::Behavior::kNext &&
- func->Behaviors() != sem::Behavior::kDiscard &&
- func->Behaviors() != sem::Behaviors{sem::Behavior::kNext, //
- sem::Behavior::kDiscard}) {
- TINT_ICE(Resolver, diagnostics_)
- << "function '" << name << "' behaviors are: " << func->Behaviors();
- }
-
- return true;
-}
-
-bool Resolver::ValidateEntryPoint(const sem::Function* func) {
- auto* decl = func->Declaration();
-
- // Use a lambda to validate the entry point attributes for a type.
- // Persistent state is used to track which builtins and locations have
- // already been seen, in order to catch conflicts.
- // TODO(jrprice): This state could be stored in sem::Function instead, and
- // then passed to sem::Function since it would be useful there too.
- std::unordered_set<ast::Builtin> builtins;
- std::unordered_set<uint32_t> locations;
- enum class ParamOrRetType {
- kParameter,
- kReturnType,
- };
-
- // Inner lambda that is applied to a type and all of its members.
- auto validate_entry_point_attributes_inner = [&](const ast::AttributeList&
- attrs,
- const sem::Type* ty,
- Source source,
- ParamOrRetType param_or_ret,
- bool is_struct_member) {
- // Scan attributes for pipeline IO attributes.
- // Check for overlap with attributes that have been seen previously.
- const ast::Attribute* pipeline_io_attribute = nullptr;
- const ast::InterpolateAttribute* interpolate_attribute = nullptr;
- const ast::InvariantAttribute* invariant_attribute = nullptr;
- for (auto* attr : attrs) {
- auto is_invalid_compute_shader_attribute = false;
- if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
- if (pipeline_io_attribute) {
- AddError("multiple entry point IO attributes", attr->source);
- AddNote("previously consumed " + attr_to_str(pipeline_io_attribute),
- pipeline_io_attribute->source);
- return false;
- }
- pipeline_io_attribute = attr;
-
- if (builtins.count(builtin->builtin)) {
- AddError(attr_to_str(builtin) +
- " attribute appears multiple times as pipeline " +
- (param_or_ret == ParamOrRetType::kParameter ? "input"
- : "output"),
- decl->source);
- return false;
- }
-
- if (!ValidateBuiltinAttribute(
- builtin, ty,
- /* is_input */ param_or_ret == ParamOrRetType::kParameter)) {
- return false;
- }
- builtins.emplace(builtin->builtin);
- } else if (auto* location = attr->As<ast::LocationAttribute>()) {
- if (pipeline_io_attribute) {
- AddError("multiple entry point IO attributes", attr->source);
- AddNote("previously consumed " + attr_to_str(pipeline_io_attribute),
- pipeline_io_attribute->source);
- return false;
- }
- pipeline_io_attribute = attr;
-
- bool is_input = param_or_ret == ParamOrRetType::kParameter;
- if (!ValidateLocationAttribute(location, ty, locations, source,
- is_input)) {
- return false;
- }
- } else if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
- if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
- is_invalid_compute_shader_attribute = true;
- } else if (!ValidateInterpolateAttribute(interpolate, ty)) {
- return false;
- }
- interpolate_attribute = interpolate;
- } else if (auto* invariant = attr->As<ast::InvariantAttribute>()) {
- if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
- is_invalid_compute_shader_attribute = true;
- }
- invariant_attribute = invariant;
- }
- if (is_invalid_compute_shader_attribute) {
- std::string input_or_output =
- param_or_ret == ParamOrRetType::kParameter ? "inputs" : "output";
- AddError("attribute is not valid for compute shader " + input_or_output,
- attr->source);
- return false;
- }
- }
-
- if (IsValidationEnabled(attrs,
- ast::DisabledValidation::kEntryPointParameter)) {
- if (is_struct_member && ty->Is<sem::Struct>()) {
- AddError("nested structures cannot be used for entry point IO", source);
- return false;
- }
-
- if (!ty->Is<sem::Struct>() && !pipeline_io_attribute) {
- std::string err = "missing entry point IO attribute";
- if (!is_struct_member) {
- err +=
- (param_or_ret == ParamOrRetType::kParameter ? " on parameter"
- : " on return type");
- }
- AddError(err, source);
- return false;
- }
-
- if (pipeline_io_attribute &&
- pipeline_io_attribute->Is<ast::LocationAttribute>()) {
- if (ty->is_integer_scalar_or_vector() && !interpolate_attribute) {
- if (decl->PipelineStage() == ast::PipelineStage::kVertex &&
- param_or_ret == ParamOrRetType::kReturnType) {
- AddError(
- "integral user-defined vertex outputs must have a flat "
- "interpolation attribute",
- source);
- return false;
- }
- if (decl->PipelineStage() == ast::PipelineStage::kFragment &&
- param_or_ret == ParamOrRetType::kParameter) {
- AddError(
- "integral user-defined fragment inputs must have a flat "
- "interpolation attribute",
- source);
- return false;
- }
- }
- }
-
- if (interpolate_attribute) {
- if (!pipeline_io_attribute ||
- !pipeline_io_attribute->Is<ast::LocationAttribute>()) {
- AddError("interpolate attribute must only be used with @location",
- interpolate_attribute->source);
- return false;
- }
- }
-
- if (invariant_attribute) {
- bool has_position = false;
- if (pipeline_io_attribute) {
- if (auto* builtin =
- pipeline_io_attribute->As<ast::BuiltinAttribute>()) {
- has_position = (builtin->builtin == ast::Builtin::kPosition);
- }
- }
- if (!has_position) {
- AddError(
- "invariant attribute must only be applied to a position "
- "builtin",
- invariant_attribute->source);
- return false;
- }
- }
- }
- return true;
- };
-
- // Outer lambda for validating the entry point attributes for a type.
- auto validate_entry_point_attributes = [&](const ast::AttributeList& attrs,
- const sem::Type* ty, Source source,
- ParamOrRetType param_or_ret) {
- if (!validate_entry_point_attributes_inner(attrs, ty, source, param_or_ret,
- /*is_struct_member*/ false)) {
- return false;
- }
-
- if (auto* str = ty->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- if (!validate_entry_point_attributes_inner(
- member->Declaration()->attributes, member->Type(),
- member->Declaration()->source, param_or_ret,
- /*is_struct_member*/ true)) {
- AddNote("while analysing entry point '" +
- builder_->Symbols().NameFor(decl->symbol) + "'",
- decl->source);
- return false;
- }
- }
- }
-
- return true;
- };
-
- for (auto* param : func->Parameters()) {
- auto* param_decl = param->Declaration();
- if (!validate_entry_point_attributes(param_decl->attributes, param->Type(),
- param_decl->source,
- ParamOrRetType::kParameter)) {
- return false;
- }
- }
-
- // Clear IO sets after parameter validation. Builtin and location attributes
- // in return types should be validated independently from those used in
- // parameters.
- builtins.clear();
- locations.clear();
-
- if (!func->ReturnType()->Is<sem::Void>()) {
- if (!validate_entry_point_attributes(decl->return_type_attributes,
- func->ReturnType(), decl->source,
- ParamOrRetType::kReturnType)) {
- return false;
- }
- }
-
- if (decl->PipelineStage() == ast::PipelineStage::kVertex &&
- builtins.count(ast::Builtin::kPosition) == 0) {
- // Check module-scope variables, as the SPIR-V sanitizer generates these.
- bool found = false;
- for (auto* global : func->TransitivelyReferencedGlobals()) {
- if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
- global->Declaration()->attributes)) {
- if (builtin->builtin == ast::Builtin::kPosition) {
- found = true;
- break;
- }
- }
- }
- if (!found) {
- AddError(
- "a vertex shader must include the 'position' builtin in its return "
- "type",
- decl->source);
- return false;
- }
- }
-
- if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
- if (!ast::HasAttribute<ast::WorkgroupAttribute>(decl->attributes)) {
- AddError(
- "a compute shader must include 'workgroup_size' in its "
- "attributes",
- decl->source);
- return false;
- }
- }
-
- // Validate there are no resource variable binding collisions
- std::unordered_map<sem::BindingPoint, const ast::Variable*> binding_points;
- for (auto* var : func->TransitivelyReferencedGlobals()) {
- auto* var_decl = var->Declaration();
- if (!var_decl->BindingPoint()) {
- continue;
- }
- auto bp = var->BindingPoint();
- auto res = binding_points.emplace(bp, var_decl);
- if (!res.second &&
- IsValidationEnabled(decl->attributes,
- ast::DisabledValidation::kBindingPointCollision) &&
- IsValidationEnabled(res.first->second->attributes,
- ast::DisabledValidation::kBindingPointCollision)) {
- // https://gpuweb.github.io/gpuweb/wgsl/#resource-interface
- // Bindings must not alias within a shader stage: two different
- // variables in the resource interface of a given shader must not have
- // the same group and binding values, when considered as a pair of
- // values.
- auto func_name = builder_->Symbols().NameFor(decl->symbol);
- AddError("entry point '" + func_name +
- "' references multiple variables that use the "
- "same resource binding @group(" +
- std::to_string(bp.group) + "), @binding(" +
- std::to_string(bp.binding) + ")",
- var_decl->source);
- AddNote("first resource binding usage declared here",
- res.first->second->source);
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateStatements(const ast::StatementList& stmts) {
- for (auto* stmt : stmts) {
- if (!Sem(stmt)->IsReachable()) {
- /// TODO(https://github.com/gpuweb/gpuweb/issues/2378): This may need to
- /// become an error.
- AddWarning("code is unreachable", stmt->source);
- break;
- }
- }
- return true;
-}
-
-bool Resolver::ValidateBitcast(const ast::BitcastExpression* cast,
- const sem::Type* to) {
- auto* from = TypeOf(cast->expr)->UnwrapRef();
- if (!from->is_numeric_scalar_or_vector()) {
- AddError("'" + TypeNameOf(from) + "' cannot be bitcast",
- cast->expr->source);
- return false;
- }
- if (!to->is_numeric_scalar_or_vector()) {
- AddError("cannot bitcast to '" + TypeNameOf(to) + "'", cast->type->source);
- return false;
- }
-
- auto width = [&](const sem::Type* ty) {
- if (auto* vec = ty->As<sem::Vector>()) {
- return vec->Width();
- }
- return 1u;
- };
-
- if (width(from) != width(to)) {
- AddError("cannot bitcast from '" + TypeNameOf(from) + "' to '" +
- TypeNameOf(to) + "'",
- cast->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateBreakStatement(const sem::Statement* stmt) {
- if (!stmt->FindFirstParent<sem::LoopBlockStatement, sem::CaseStatement>()) {
- AddError("break statement must be in a loop or switch case",
- stmt->Declaration()->source);
- return false;
- }
- if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ true)) {
- auto fail = [&](const char* note_msg, const Source& note_src) {
- constexpr const char* kErrorMsg =
- "break statement in a continuing block must be the single statement "
- "of an if statement's true or false block, and that if statement "
- "must be the last statement of the continuing block";
- AddError(kErrorMsg, stmt->Declaration()->source);
- AddNote(note_msg, note_src);
- return false;
- };
-
- if (auto* block = stmt->Parent()->As<sem::BlockStatement>()) {
- auto* block_parent = block->Parent();
- auto* if_stmt = block_parent->As<sem::IfStatement>();
- auto* el_stmt = block_parent->As<sem::ElseStatement>();
- if (el_stmt) {
- if_stmt = el_stmt->Parent();
- }
- if (!if_stmt) {
- return fail("break statement is not directly in if statement block",
- stmt->Declaration()->source);
- }
- if (block->Declaration()->statements.size() != 1) {
- return fail("if statement block contains multiple statements",
- block->Declaration()->source);
- }
- for (auto* el : if_stmt->Declaration()->else_statements) {
- if (el->condition) {
- return fail("else has condition", el->condition->source);
- }
- bool el_contains_break = el_stmt && el == el_stmt->Declaration();
- if (el_contains_break) {
- if (auto* true_block = if_stmt->Declaration()->body;
- !true_block->Empty()) {
- return fail("non-empty true block", true_block->source);
- }
- } else {
- if (!el->body->Empty()) {
- return fail("non-empty false block", el->body->source);
- }
- }
- }
- if (if_stmt->Parent()->Declaration() != continuing) {
- return fail(
- "if statement containing break statement is not directly in "
- "continuing block",
- if_stmt->Declaration()->source);
- }
- if (auto* cont_block = continuing->As<ast::BlockStatement>()) {
- if (if_stmt->Declaration() != cont_block->Last()) {
- return fail(
- "if statement containing break statement is not the last "
- "statement of the continuing block",
- if_stmt->Declaration()->source);
- }
- }
- }
- }
- return true;
-}
-
-bool Resolver::ValidateContinueStatement(const sem::Statement* stmt) {
- if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ true)) {
- AddError("continuing blocks must not contain a continue statement",
- stmt->Declaration()->source);
- if (continuing != stmt->Declaration() &&
- continuing != stmt->Parent()->Declaration()) {
- AddNote("see continuing block here", continuing->source);
- }
- return false;
- }
-
- if (!stmt->FindFirstParent<sem::LoopBlockStatement>()) {
- AddError("continue statement must be in a loop",
- stmt->Declaration()->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateDiscardStatement(const sem::Statement* stmt) {
- if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false)) {
- AddError("continuing blocks must not contain a discard statement",
- stmt->Declaration()->source);
- if (continuing != stmt->Declaration() &&
- continuing != stmt->Parent()->Declaration()) {
- AddNote("see continuing block here", continuing->source);
- }
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateFallthroughStatement(const sem::Statement* stmt) {
- if (auto* block = As<sem::BlockStatement>(stmt->Parent())) {
- if (auto* c = As<sem::CaseStatement>(block->Parent())) {
- if (block->Declaration()->Last() == stmt->Declaration()) {
- if (auto* s = As<sem::SwitchStatement>(c->Parent())) {
- if (c->Declaration() != s->Declaration()->body.back()) {
- return true;
- }
- AddError(
- "a fallthrough statement must not be used in the last switch "
- "case",
- stmt->Declaration()->source);
- return false;
- }
- }
- }
- }
- AddError(
- "fallthrough must only be used as the last statement of a case block",
- stmt->Declaration()->source);
- return false;
-}
-
-bool Resolver::ValidateElseStatement(const sem::ElseStatement* stmt) {
- if (auto* cond = stmt->Condition()) {
- auto* cond_ty = cond->Type()->UnwrapRef();
- if (!cond_ty->Is<sem::Bool>()) {
- AddError(
- "else statement condition must be bool, got " + TypeNameOf(cond_ty),
- stmt->Condition()->Declaration()->source);
- return false;
- }
- }
- return true;
-}
-
-bool Resolver::ValidateLoopStatement(const sem::LoopStatement* stmt) {
- if (stmt->Behaviors().Empty()) {
- AddError("loop does not exit", stmt->Declaration()->source.Begin());
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateForLoopStatement(const sem::ForLoopStatement* stmt) {
- if (stmt->Behaviors().Empty()) {
- AddError("for-loop does not exit", stmt->Declaration()->source.Begin());
- return false;
- }
- if (auto* cond = stmt->Condition()) {
- auto* cond_ty = cond->Type()->UnwrapRef();
- if (!cond_ty->Is<sem::Bool>()) {
- AddError("for-loop condition must be bool, got " + TypeNameOf(cond_ty),
- stmt->Condition()->Declaration()->source);
- return false;
- }
- }
- return true;
-}
-
-bool Resolver::ValidateIfStatement(const sem::IfStatement* stmt) {
- auto* cond_ty = stmt->Condition()->Type()->UnwrapRef();
- if (!cond_ty->Is<sem::Bool>()) {
- AddError("if statement condition must be bool, got " + TypeNameOf(cond_ty),
- stmt->Condition()->Declaration()->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateBuiltinCall(const sem::Call* call) {
- if (call->Type()->Is<sem::Void>()) {
- bool is_call_statement = false;
- if (auto* call_stmt = As<ast::CallStatement>(call->Stmt()->Declaration())) {
- if (call_stmt->expr == call->Declaration()) {
- is_call_statement = true;
- }
- }
- if (!is_call_statement) {
- // https://gpuweb.github.io/gpuweb/wgsl/#function-call-expr
- // If the called function does not return a value, a function call
- // statement should be used instead.
- auto* ident = call->Declaration()->target.name;
- auto name = builder_->Symbols().NameFor(ident->symbol);
- AddError("builtin '" + name + "' does not return a value",
- call->Declaration()->source);
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateTextureBuiltinFunction(const sem::Call* call) {
- auto* builtin = call->Target()->As<sem::Builtin>();
- if (!builtin) {
- return false;
- }
-
- std::string func_name = builtin->str();
- auto& signature = builtin->Signature();
-
- auto check_arg_is_constexpr = [&](sem::ParameterUsage usage, int min,
- int max) {
- auto index = signature.IndexOf(usage);
- if (index < 0) {
- return true;
- }
- std::string name = sem::str(usage);
- auto* arg = call->Arguments()[index];
- if (auto values = arg->ConstantValue()) {
- // Assert that the constant values are of the expected type.
- if (!values.Type()->IsAnyOf<sem::I32, sem::Vector>() ||
- !values.ElementType()->Is<sem::I32>()) {
- TINT_ICE(Resolver, diagnostics_)
- << "failed to resolve '" + func_name + "' " << name
- << " parameter type";
- return false;
- }
-
- // Currently const_expr is restricted to literals and type constructors.
- // Check that that's all we have for the parameter.
- bool is_const_expr = true;
- ast::TraverseExpressions(
- arg->Declaration(), diagnostics_, [&](const ast::Expression* e) {
- if (e->IsAnyOf<ast::LiteralExpression, ast::CallExpression>()) {
- return ast::TraverseAction::Descend;
- }
- is_const_expr = false;
- return ast::TraverseAction::Stop;
- });
- if (is_const_expr) {
- auto vector = builtin->Parameters()[index]->Type()->Is<sem::Vector>();
- for (size_t i = 0; i < values.Elements().size(); i++) {
- auto value = values.Elements()[i].i32;
- if (value < min || value > max) {
- if (vector) {
- AddError("each component of the " + name +
- " argument must be at least " + std::to_string(min) +
- " and at most " + std::to_string(max) + ". " + name +
- " component " + std::to_string(i) + " is " +
- std::to_string(value),
- arg->Declaration()->source);
- } else {
- AddError("the " + name + " argument must be at least " +
- std::to_string(min) + " and at most " +
- std::to_string(max) + ". " + name + " is " +
- std::to_string(value),
- arg->Declaration()->source);
- }
- return false;
- }
- }
- return true;
- }
- }
- AddError("the " + name + " argument must be a const_expression",
- arg->Declaration()->source);
- return false;
- };
-
- return check_arg_is_constexpr(sem::ParameterUsage::kOffset, -8, 7) &&
- check_arg_is_constexpr(sem::ParameterUsage::kComponent, 0, 3);
-}
-
-bool Resolver::ValidateFunctionCall(const sem::Call* call) {
- auto* decl = call->Declaration();
- auto* target = call->Target()->As<sem::Function>();
- auto sym = decl->target.name->symbol;
- auto name = builder_->Symbols().NameFor(sym);
-
- if (target->Declaration()->IsEntryPoint()) {
- // https://www.w3.org/TR/WGSL/#function-restriction
- // An entry point must never be the target of a function call.
- AddError("entry point functions cannot be the target of a function call",
- decl->source);
- return false;
- }
-
- if (decl->args.size() != target->Parameters().size()) {
- bool more = decl->args.size() > target->Parameters().size();
- AddError("too " + (more ? std::string("many") : std::string("few")) +
- " arguments in call to '" + name + "', expected " +
- std::to_string(target->Parameters().size()) + ", got " +
- std::to_string(call->Arguments().size()),
- decl->source);
- return false;
- }
-
- for (size_t i = 0; i < call->Arguments().size(); ++i) {
- const sem::Variable* param = target->Parameters()[i];
- const ast::Expression* arg_expr = decl->args[i];
- auto* param_type = param->Type();
- auto* arg_type = TypeOf(arg_expr)->UnwrapRef();
-
- if (param_type != arg_type) {
- AddError("type mismatch for argument " + std::to_string(i + 1) +
- " in call to '" + name + "', expected '" +
- TypeNameOf(param_type) + "', got '" + TypeNameOf(arg_type) +
- "'",
- arg_expr->source);
- return false;
- }
-
- if (param_type->Is<sem::Pointer>()) {
- auto is_valid = false;
- if (auto* ident_expr = arg_expr->As<ast::IdentifierExpression>()) {
- auto* var = ResolvedSymbol<sem::Variable>(ident_expr);
- if (!var) {
- TINT_ICE(Resolver, diagnostics_) << "failed to resolve identifier";
- return false;
- }
- if (var->Is<sem::Parameter>()) {
- is_valid = true;
- }
- } else if (auto* unary = arg_expr->As<ast::UnaryOpExpression>()) {
- if (unary->op == ast::UnaryOp::kAddressOf) {
- if (auto* ident_unary =
- unary->expr->As<ast::IdentifierExpression>()) {
- auto* var = ResolvedSymbol<sem::Variable>(ident_unary);
- if (!var) {
- TINT_ICE(Resolver, diagnostics_)
- << "failed to resolve identifier";
- return false;
- }
- if (var->Declaration()->is_const) {
- TINT_ICE(Resolver, diagnostics_)
- << "Resolver::FunctionCall() encountered an address-of "
- "expression of a constant identifier expression";
- return false;
- }
- is_valid = true;
- }
- }
- }
-
- if (!is_valid &&
- IsValidationEnabled(
- param->Declaration()->attributes,
- ast::DisabledValidation::kIgnoreInvalidPointerArgument)) {
- AddError(
- "expected an address-of expression of a variable identifier "
- "expression or a function parameter",
- arg_expr->source);
- return false;
- }
- }
- }
-
- if (call->Type()->Is<sem::Void>()) {
- bool is_call_statement = false;
- if (auto* call_stmt = As<ast::CallStatement>(call->Stmt()->Declaration())) {
- if (call_stmt->expr == call->Declaration()) {
- is_call_statement = true;
- }
- }
- if (!is_call_statement) {
- // https://gpuweb.github.io/gpuweb/wgsl/#function-call-expr
- // If the called function does not return a value, a function call
- // statement should be used instead.
- AddError("function '" + name + "' does not return a value", decl->source);
- return false;
- }
- }
-
- if (call->Behaviors().Contains(sem::Behavior::kDiscard)) {
- if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false)) {
- AddError(
- "cannot call a function that may discard inside a continuing block",
- call->Declaration()->source);
- if (continuing != call->Stmt()->Declaration() &&
- continuing != call->Stmt()->Parent()->Declaration()) {
- AddNote("see continuing block here", continuing->source);
- }
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateStructureConstructorOrCast(
- const ast::CallExpression* ctor,
- const sem::Struct* struct_type) {
- if (!struct_type->IsConstructible()) {
- AddError("struct constructor has non-constructible type", ctor->source);
- return false;
- }
-
- if (ctor->args.size() > 0) {
- if (ctor->args.size() != struct_type->Members().size()) {
- std::string fm =
- ctor->args.size() < struct_type->Members().size() ? "few" : "many";
- AddError("struct constructor has too " + fm + " inputs: expected " +
- std::to_string(struct_type->Members().size()) + ", found " +
- std::to_string(ctor->args.size()),
- ctor->source);
- return false;
- }
- for (auto* member : struct_type->Members()) {
- auto* value = ctor->args[member->Index()];
- auto* value_ty = TypeOf(value);
- if (member->Type() != value_ty->UnwrapRef()) {
- AddError(
- "type in struct constructor does not match struct member type: "
- "expected '" +
- TypeNameOf(member->Type()) + "', found '" +
- TypeNameOf(value_ty) + "'",
- value->source);
- return false;
- }
- }
- }
- return true;
-}
-
-bool Resolver::ValidateArrayConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Array* array_type) {
- auto& values = ctor->args;
- auto* elem_ty = array_type->ElemType();
- for (auto* value : values) {
- auto* value_ty = TypeOf(value)->UnwrapRef();
- if (value_ty != elem_ty) {
- AddError(
- "type in array constructor does not match array type: "
- "expected '" +
- TypeNameOf(elem_ty) + "', found '" + TypeNameOf(value_ty) + "'",
- value->source);
- return false;
- }
- }
-
- if (array_type->IsRuntimeSized()) {
- AddError("cannot init a runtime-sized array", ctor->source);
- return false;
- } else if (!elem_ty->IsConstructible()) {
- AddError("array constructor has non-constructible element type",
- ctor->source);
- return false;
- } else if (!values.empty() && (values.size() != array_type->Count())) {
- std::string fm = values.size() < array_type->Count() ? "few" : "many";
- AddError("array constructor has too " + fm + " elements: expected " +
- std::to_string(array_type->Count()) + ", found " +
- std::to_string(values.size()),
- ctor->source);
- return false;
- } else if (values.size() > array_type->Count()) {
- AddError("array constructor has too many elements: expected " +
- std::to_string(array_type->Count()) + ", found " +
- std::to_string(values.size()),
- ctor->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateVectorConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Vector* vec_type) {
- auto& values = ctor->args;
- auto* elem_ty = vec_type->type();
- size_t value_cardinality_sum = 0;
- for (auto* value : values) {
- auto* value_ty = TypeOf(value)->UnwrapRef();
- if (value_ty->is_scalar()) {
- if (elem_ty != value_ty) {
- AddError(
- "type in vector constructor does not match vector type: "
- "expected '" +
- TypeNameOf(elem_ty) + "', found '" + TypeNameOf(value_ty) + "'",
- value->source);
- return false;
- }
-
- value_cardinality_sum++;
- } else if (auto* value_vec = value_ty->As<sem::Vector>()) {
- auto* value_elem_ty = value_vec->type();
- // A mismatch of vector type parameter T is only an error if multiple
- // arguments are present. A single argument constructor constitutes a
- // type conversion expression.
- if (elem_ty != value_elem_ty && values.size() > 1u) {
- AddError(
- "type in vector constructor does not match vector type: "
- "expected '" +
- TypeNameOf(elem_ty) + "', found '" + TypeNameOf(value_elem_ty) +
- "'",
- value->source);
- return false;
- }
-
- value_cardinality_sum += value_vec->Width();
- } else {
- // A vector constructor can only accept vectors and scalars.
- AddError("expected vector or scalar type in vector constructor; found: " +
- TypeNameOf(value_ty),
- value->source);
- return false;
- }
- }
-
- // A correct vector constructor must either be a zero-value expression,
- // a single-value initializer (splat) expression, or the number of components
- // of all constructor arguments must add up to the vector cardinality.
- if (value_cardinality_sum > 1 && value_cardinality_sum != vec_type->Width()) {
- if (values.empty()) {
- TINT_ICE(Resolver, diagnostics_)
- << "constructor arguments expected to be non-empty!";
- }
- const Source& values_start = values[0]->source;
- const Source& values_end = values[values.size() - 1]->source;
- AddError("attempted to construct '" + TypeNameOf(vec_type) + "' with " +
- std::to_string(value_cardinality_sum) + " component(s)",
- Source::Combine(values_start, values_end));
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateVector(const sem::Vector* ty, const Source& source) {
- if (!ty->type()->is_scalar()) {
- AddError("vector element type must be 'bool', 'f32', 'i32' or 'u32'",
- source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateMatrix(const sem::Matrix* ty, const Source& source) {
- if (!ty->is_float_matrix()) {
- AddError("matrix element type must be 'f32'", source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateMatrixConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Matrix* matrix_ty) {
- auto& values = ctor->args;
- // Zero Value expression
- if (values.empty()) {
- return true;
- }
-
- if (!ValidateMatrix(matrix_ty, ctor->source)) {
- return false;
- }
-
- std::vector<const sem::Type*> arg_tys;
- arg_tys.reserve(values.size());
- for (auto* value : values) {
- arg_tys.emplace_back(TypeOf(value)->UnwrapRef());
- }
-
- auto* elem_type = matrix_ty->type();
- auto num_elements = matrix_ty->columns() * matrix_ty->rows();
-
- // Print a generic error for an invalid matrix constructor, showing the
- // available overloads.
- auto print_error = [&]() {
- const Source& values_start = values[0]->source;
- const Source& values_end = values[values.size() - 1]->source;
- auto type_name = TypeNameOf(matrix_ty);
- auto elem_type_name = TypeNameOf(elem_type);
- std::stringstream ss;
- ss << "no matching constructor " + type_name << "(";
- for (size_t i = 0; i < values.size(); i++) {
- if (i > 0) {
- ss << ", ";
- }
- ss << arg_tys[i]->FriendlyName(builder_->Symbols());
- }
- ss << ")" << std::endl << std::endl;
- ss << "3 candidates available:" << std::endl;
- ss << " " << type_name << "()" << std::endl;
- ss << " " << type_name << "(" << elem_type_name << ",...,"
- << elem_type_name << ")"
- << " // " << std::to_string(num_elements) << " arguments" << std::endl;
- ss << " " << type_name << "(";
- for (uint32_t c = 0; c < matrix_ty->columns(); c++) {
- if (c > 0) {
- ss << ", ";
- }
- ss << VectorPretty(matrix_ty->rows(), elem_type);
- }
- ss << ")" << std::endl;
- AddError(ss.str(), Source::Combine(values_start, values_end));
- };
-
- const sem::Type* expected_arg_type = nullptr;
- if (num_elements == values.size()) {
- // Column-major construction from scalar elements.
- expected_arg_type = matrix_ty->type();
- } else if (matrix_ty->columns() == values.size()) {
- // Column-by-column construction from vectors.
- expected_arg_type = matrix_ty->ColumnType();
- } else {
- print_error();
- return false;
- }
-
- for (auto* arg_ty : arg_tys) {
- if (arg_ty != expected_arg_type) {
- print_error();
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateScalarConstructorOrCast(const ast::CallExpression* ctor,
- const sem::Type* ty) {
- if (ctor->args.size() == 0) {
- return true;
- }
- if (ctor->args.size() > 1) {
- AddError("expected zero or one value in constructor, got " +
- std::to_string(ctor->args.size()),
- ctor->source);
- return false;
- }
-
- // Validate constructor
- auto* value = ctor->args[0];
- auto* value_ty = TypeOf(value)->UnwrapRef();
-
- using Bool = sem::Bool;
- using I32 = sem::I32;
- using U32 = sem::U32;
- using F32 = sem::F32;
-
- const bool is_valid = (ty->Is<Bool>() && value_ty->is_scalar()) ||
- (ty->Is<I32>() && value_ty->is_scalar()) ||
- (ty->Is<U32>() && value_ty->is_scalar()) ||
- (ty->Is<F32>() && value_ty->is_scalar());
- if (!is_valid) {
- AddError("cannot construct '" + TypeNameOf(ty) +
- "' with a value of type '" + TypeNameOf(value_ty) + "'",
- ctor->source);
-
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidatePipelineStages() {
- auto check_workgroup_storage = [&](const sem::Function* func,
- const sem::Function* entry_point) {
- auto stage = entry_point->Declaration()->PipelineStage();
- if (stage != ast::PipelineStage::kCompute) {
- for (auto* var : func->DirectlyReferencedGlobals()) {
- if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
- std::stringstream stage_name;
- stage_name << stage;
- for (auto* user : var->Users()) {
- if (func == user->Stmt()->Function()) {
- AddError("workgroup memory cannot be used by " +
- stage_name.str() + " pipeline stage",
- user->Declaration()->source);
- break;
- }
- }
- AddNote("variable is declared here", var->Declaration()->source);
- if (func != entry_point) {
- TraverseCallChain(diagnostics_, entry_point, func,
- [&](const sem::Function* f) {
- AddNote("called by function '" +
- builder_->Symbols().NameFor(
- f->Declaration()->symbol) +
- "'",
- f->Declaration()->source);
- });
- AddNote("called by entry point '" +
- builder_->Symbols().NameFor(
- entry_point->Declaration()->symbol) +
- "'",
- entry_point->Declaration()->source);
- }
- return false;
- }
- }
- }
- return true;
- };
-
- for (auto* entry_point : entry_points_) {
- if (!check_workgroup_storage(entry_point, entry_point)) {
- return false;
- }
- for (auto* func : entry_point->TransitivelyCalledFunctions()) {
- if (!check_workgroup_storage(func, entry_point)) {
- return false;
- }
- }
- }
-
- auto check_builtin_calls = [&](const sem::Function* func,
- const sem::Function* entry_point) {
- auto stage = entry_point->Declaration()->PipelineStage();
- for (auto* builtin : func->DirectlyCalledBuiltins()) {
- if (!builtin->SupportedStages().Contains(stage)) {
- auto* call = func->FindDirectCallTo(builtin);
- std::stringstream err;
- err << "built-in cannot be used by " << stage << " pipeline stage";
- AddError(err.str(), call ? call->Declaration()->source
- : func->Declaration()->source);
- if (func != entry_point) {
- TraverseCallChain(
- diagnostics_, entry_point, func, [&](const sem::Function* f) {
- AddNote(
- "called by function '" +
- builder_->Symbols().NameFor(f->Declaration()->symbol) +
- "'",
- f->Declaration()->source);
- });
- AddNote("called by entry point '" +
- builder_->Symbols().NameFor(
- entry_point->Declaration()->symbol) +
- "'",
- entry_point->Declaration()->source);
- }
- return false;
- }
- }
- return true;
- };
-
- for (auto* entry_point : entry_points_) {
- if (!check_builtin_calls(entry_point, entry_point)) {
- return false;
- }
- for (auto* func : entry_point->TransitivelyCalledFunctions()) {
- if (!check_builtin_calls(func, entry_point)) {
- return false;
- }
- }
- }
- return true;
-}
-
-bool Resolver::ValidateArray(const sem::Array* arr, const Source& source) {
- auto* el_ty = arr->ElemType();
-
- if (!IsFixedFootprint(el_ty)) {
- AddError("an array element type cannot contain a runtime-sized array",
- source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateArrayStrideAttribute(const ast::StrideAttribute* attr,
- uint32_t el_size,
- uint32_t el_align,
- const Source& source) {
- auto stride = attr->stride;
- bool is_valid_stride =
- (stride >= el_size) && (stride >= el_align) && (stride % el_align == 0);
- if (!is_valid_stride) {
- // https://gpuweb.github.io/gpuweb/wgsl/#array-layout-rules
- // Arrays decorated with the stride attribute must have a stride that is
- // at least the size of the element type, and be a multiple of the
- // element type's alignment value.
- AddError(
- "arrays decorated with the stride attribute must have a stride "
- "that is at least the size of the element type, and be a multiple "
- "of the element type's alignment value.",
- source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateAlias(const ast::Alias* alias) {
- auto name = builder_->Symbols().NameFor(alias->name);
- if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
- AddError("'" + name + "' is a builtin and cannot be redeclared as an alias",
- alias->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateStructure(const sem::Struct* str) {
- auto name = builder_->Symbols().NameFor(str->Declaration()->name);
- if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
- AddError("'" + name + "' is a builtin and cannot be redeclared as a struct",
- str->Declaration()->source);
- return false;
- }
-
- if (str->Members().empty()) {
- AddError("structures must have at least one member",
- str->Declaration()->source);
- return false;
- }
-
- std::unordered_set<uint32_t> locations;
- for (auto* member : str->Members()) {
- if (auto* r = member->Type()->As<sem::Array>()) {
- if (r->IsRuntimeSized()) {
- if (member != str->Members().back()) {
- AddError(
- "runtime arrays may only appear as the last member of a struct",
- member->Declaration()->source);
- return false;
- }
- }
- } else if (!IsFixedFootprint(member->Type())) {
- AddError(
- "a struct that contains a runtime array cannot be nested inside "
- "another struct",
- member->Declaration()->source);
- return false;
- }
-
- auto has_location = false;
- auto has_position = false;
- const ast::InvariantAttribute* invariant_attribute = nullptr;
- const ast::InterpolateAttribute* interpolate_attribute = nullptr;
- for (auto* attr : member->Declaration()->attributes) {
- if (!attr->IsAnyOf<ast::BuiltinAttribute, //
- ast::InternalAttribute, //
- ast::InterpolateAttribute, //
- ast::InvariantAttribute, //
- ast::LocationAttribute, //
- ast::StructMemberOffsetAttribute, //
- ast::StructMemberSizeAttribute, //
- ast::StructMemberAlignAttribute>()) {
- if (attr->Is<ast::StrideAttribute>() &&
- IsValidationDisabled(
- member->Declaration()->attributes,
- ast::DisabledValidation::kIgnoreStrideAttribute)) {
- continue;
- }
- AddError("attribute is not valid for structure members", attr->source);
- return false;
- }
-
- if (auto* invariant = attr->As<ast::InvariantAttribute>()) {
- invariant_attribute = invariant;
- } else if (auto* location = attr->As<ast::LocationAttribute>()) {
- has_location = true;
- if (!ValidateLocationAttribute(location, member->Type(), locations,
- member->Declaration()->source)) {
- return false;
- }
- } else if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
- if (!ValidateBuiltinAttribute(builtin, member->Type(),
- /* is_input */ false)) {
- return false;
- }
- if (builtin->builtin == ast::Builtin::kPosition) {
- has_position = true;
- }
- } else if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
- interpolate_attribute = interpolate;
- if (!ValidateInterpolateAttribute(interpolate, member->Type())) {
- return false;
- }
- }
- }
-
- if (invariant_attribute && !has_position) {
- AddError("invariant attribute must only be applied to a position builtin",
- invariant_attribute->source);
- return false;
- }
-
- if (interpolate_attribute && !has_location) {
- AddError("interpolate attribute must only be used with @location",
- interpolate_attribute->source);
- return false;
- }
- }
-
- for (auto* attr : str->Declaration()->attributes) {
- if (!(attr->IsAnyOf<ast::InternalAttribute>())) {
- AddError("attribute is not valid for struct declarations", attr->source);
- return false;
- }
- }
-
- return true;
-}
-
-bool Resolver::ValidateLocationAttribute(
- const ast::LocationAttribute* location,
- const sem::Type* type,
- std::unordered_set<uint32_t>& locations,
- const Source& source,
- const bool is_input) {
- std::string inputs_or_output = is_input ? "inputs" : "output";
- if (current_function_ && current_function_->Declaration()->PipelineStage() ==
- ast::PipelineStage::kCompute) {
- AddError("attribute is not valid for compute shader " + inputs_or_output,
- location->source);
- return false;
- }
-
- if (!type->is_numeric_scalar_or_vector()) {
- std::string invalid_type = TypeNameOf(type);
- AddError("cannot apply 'location' attribute to declaration of type '" +
- invalid_type + "'",
- source);
- AddNote(
- "'location' attribute must only be applied to declarations of "
- "numeric scalar or numeric vector type",
- location->source);
- return false;
- }
-
- if (locations.count(location->value)) {
- AddError(attr_to_str(location) + " attribute appears multiple times",
- location->source);
- return false;
- }
- locations.emplace(location->value);
-
- return true;
-}
-
-bool Resolver::ValidateReturn(const ast::ReturnStatement* ret) {
- auto* func_type = current_function_->ReturnType();
-
- auto* ret_type = ret->value ? TypeOf(ret->value)->UnwrapRef()
- : builder_->create<sem::Void>();
-
- if (func_type->UnwrapRef() != ret_type) {
- AddError(
- "return statement type must match its function "
- "return type, returned '" +
- TypeNameOf(ret_type) + "', expected '" + TypeNameOf(func_type) +
- "'",
- ret->source);
- return false;
- }
-
- auto* sem = Sem(ret);
- if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false)) {
- AddError("continuing blocks must not contain a return statement",
- ret->source);
- if (continuing != sem->Declaration() &&
- continuing != sem->Parent()->Declaration()) {
- AddNote("see continuing block here", continuing->source);
- }
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateSwitch(const ast::SwitchStatement* s) {
- auto* cond_ty = TypeOf(s->condition)->UnwrapRef();
- if (!cond_ty->is_integer_scalar()) {
- AddError(
- "switch statement selector expression must be of a "
- "scalar integer type",
- s->condition->source);
- return false;
- }
-
- bool has_default = false;
- std::unordered_map<uint32_t, Source> selectors;
-
- for (auto* case_stmt : s->body) {
- if (case_stmt->IsDefault()) {
- if (has_default) {
- // More than one default clause
- AddError("switch statement must have exactly one default clause",
- case_stmt->source);
- return false;
- }
- has_default = true;
- }
-
- for (auto* selector : case_stmt->selectors) {
- if (cond_ty != TypeOf(selector)) {
- AddError(
- "the case selector values must have the same "
- "type as the selector expression.",
- case_stmt->source);
- return false;
- }
-
- auto v = selector->ValueAsU32();
- auto it = selectors.find(v);
- if (it != selectors.end()) {
- auto val = selector->Is<ast::IntLiteralExpression>()
- ? std::to_string(selector->ValueAsI32())
- : std::to_string(selector->ValueAsU32());
- AddError("duplicate switch case '" + val + "'", selector->source);
- AddNote("previous case declared here", it->second);
- return false;
- }
- selectors.emplace(v, selector->source);
- }
- }
-
- if (!has_default) {
- // No default clause
- AddError("switch statement must have a default clause", s->source);
- return false;
- }
-
- return true;
-}
-
-bool Resolver::ValidateAssignment(const ast::Statement* a,
- const sem::Type* rhs_ty) {
- const ast::Expression* lhs;
- const ast::Expression* rhs;
- if (auto* assign = a->As<ast::AssignmentStatement>()) {
- lhs = assign->lhs;
- rhs = assign->rhs;
- } else if (auto* compound = a->As<ast::CompoundAssignmentStatement>()) {
- lhs = compound->lhs;
- rhs = compound->rhs;
- } else {
- TINT_ICE(Resolver, diagnostics_) << "invalid assignment statement";
- return false;
- }
-
- if (lhs->Is<ast::PhonyExpression>()) {
- // https://www.w3.org/TR/WGSL/#phony-assignment-section
- auto* ty = rhs_ty->UnwrapRef();
- if (!ty->IsConstructible() &&
- !ty->IsAnyOf<sem::Pointer, sem::Texture, sem::Sampler>()) {
- AddError(
- "cannot assign '" + TypeNameOf(rhs_ty) +
- "' to '_'. '_' can only be assigned a constructible, pointer, "
- "texture or sampler type",
- rhs->source);
- return false;
- }
- return true; // RHS can be anything.
- }
-
- // https://gpuweb.github.io/gpuweb/wgsl/#assignment-statement
- auto const* lhs_ty = TypeOf(lhs);
-
- if (auto* var = ResolvedSymbol<sem::Variable>(lhs)) {
- auto* decl = var->Declaration();
- if (var->Is<sem::Parameter>()) {
- AddError("cannot assign to function parameter", lhs->source);
- AddNote("'" + builder_->Symbols().NameFor(decl->symbol) +
- "' is declared here:",
- decl->source);
- return false;
- }
- if (decl->is_const) {
- AddError("cannot assign to const", lhs->source);
- AddNote("'" + builder_->Symbols().NameFor(decl->symbol) +
- "' is declared here:",
- decl->source);
- return false;
- }
- }
-
- auto* lhs_ref = lhs_ty->As<sem::Reference>();
- if (!lhs_ref) {
- // LHS is not a reference, so it has no storage.
- AddError("cannot assign to value of type '" + TypeNameOf(lhs_ty) + "'",
- lhs->source);
- return false;
- }
-
- auto* storage_ty = lhs_ref->StoreType();
- auto* value_type = rhs_ty->UnwrapRef(); // Implicit load of RHS
-
- // Value type has to match storage type
- if (storage_ty != value_type) {
- AddError("cannot assign '" + TypeNameOf(rhs_ty) + "' to '" +
- TypeNameOf(lhs_ty) + "'",
- a->source);
- return false;
- }
- if (!storage_ty->IsConstructible()) {
- AddError("storage type of assignment must be constructible", a->source);
- return false;
- }
- if (lhs_ref->Access() == ast::Access::kRead) {
- AddError(
- "cannot store into a read-only type '" + RawTypeNameOf(lhs_ty) + "'",
- a->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateIncrementDecrementStatement(
- const ast::IncrementDecrementStatement* inc) {
- const ast::Expression* lhs = inc->lhs;
-
- // https://gpuweb.github.io/gpuweb/wgsl/#increment-decrement
-
- if (auto* var = ResolvedSymbol<sem::Variable>(lhs)) {
- auto* decl = var->Declaration();
- if (var->Is<sem::Parameter>()) {
- AddError("cannot modify function parameter", lhs->source);
- AddNote("'" + builder_->Symbols().NameFor(decl->symbol) +
- "' is declared here:",
- decl->source);
- return false;
- }
- if (decl->is_const) {
- AddError("cannot modify constant value", lhs->source);
- AddNote("'" + builder_->Symbols().NameFor(decl->symbol) +
- "' is declared here:",
- decl->source);
- return false;
- }
- }
-
- auto const* lhs_ty = TypeOf(lhs);
- auto* lhs_ref = lhs_ty->As<sem::Reference>();
- if (!lhs_ref) {
- // LHS is not a reference, so it has no storage.
- AddError("cannot modify value of type '" + TypeNameOf(lhs_ty) + "'",
- lhs->source);
- return false;
- }
-
- if (!lhs_ref->StoreType()->is_integer_scalar()) {
- const std::string kind = inc->increment ? "increment" : "decrement";
- AddError(kind + " statement can only be applied to an integer scalar",
- lhs->source);
- return false;
- }
-
- if (lhs_ref->Access() == ast::Access::kRead) {
- AddError("cannot modify read-only type '" + RawTypeNameOf(lhs_ty) + "'",
- inc->source);
- return false;
- }
- return true;
-}
-
-bool Resolver::ValidateNoDuplicateAttributes(
- const ast::AttributeList& attributes) {
- std::unordered_map<const TypeInfo*, Source> seen;
- for (auto* d : attributes) {
- auto res = seen.emplace(&d->TypeInfo(), d->source);
- if (!res.second && !d->Is<ast::InternalAttribute>()) {
- AddError("duplicate " + d->Name() + " attribute", d->source);
- AddNote("first attribute declared here", res.first->second);
- return false;
- }
- }
- return true;
-}
-
-bool Resolver::IsValidationDisabled(const ast::AttributeList& attributes,
- ast::DisabledValidation validation) const {
- for (auto* attribute : attributes) {
- if (auto* dv = attribute->As<ast::DisableValidationAttribute>()) {
- if (dv->validation == validation) {
- return true;
- }
- }
- }
- return false;
-}
-
-bool Resolver::IsValidationEnabled(const ast::AttributeList& attributes,
- ast::DisabledValidation validation) const {
- return !IsValidationDisabled(attributes, validation);
-}
-
-} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/sem_helper.cc b/chromium/third_party/dawn/src/tint/resolver/sem_helper.cc
new file mode 100644
index 00000000000..01a54806265
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/sem_helper.cc
@@ -0,0 +1,39 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/sem_helper.h"
+
+#include "src/tint/sem/expression.h"
+
+namespace tint::resolver {
+
+SemHelper::SemHelper(ProgramBuilder* builder, DependencyGraph& dependencies)
+ : builder_(builder), dependencies_(dependencies) {}
+
+SemHelper::~SemHelper() = default;
+
+std::string SemHelper::TypeNameOf(const sem::Type* ty) const {
+ return RawTypeNameOf(ty->UnwrapRef());
+}
+
+std::string SemHelper::RawTypeNameOf(const sem::Type* ty) const {
+ return ty->FriendlyName(builder_->Symbols());
+}
+
+sem::Type* SemHelper::TypeOf(const ast::Expression* expr) const {
+ auto* sem = Get(expr);
+ return sem ? const_cast<sem::Type*>(sem->Type()) : nullptr;
+}
+
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/sem_helper.h b/chromium/third_party/dawn/src/tint/resolver/sem_helper.h
new file mode 100644
index 00000000000..9b0967bc579
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/sem_helper.h
@@ -0,0 +1,82 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_RESOLVER_SEM_HELPER_H_
+#define SRC_TINT_RESOLVER_SEM_HELPER_H_
+
+#include <string>
+
+#include "src/tint/diagnostic/diagnostic.h"
+#include "src/tint/program_builder.h"
+#include "src/tint/resolver/dependency_graph.h"
+#include "src/tint/utils/map.h"
+
+namespace tint::resolver {
+
+/// Helper class to retrieve sem information.
+class SemHelper {
+ public:
+ /// Constructor
+ /// @param builder the program builder
+ /// @param dependencies the program dependency graph
+ explicit SemHelper(ProgramBuilder* builder, DependencyGraph& dependencies);
+ ~SemHelper();
+
+ /// Get is a helper for obtaining the semantic node for the given AST node.
+ /// @param ast the ast node to get the sem for
+ /// @returns the sem node for the provided |ast|
+ template <typename SEM = sem::Info::InferFromAST, typename AST_OR_TYPE = CastableBase>
+ auto* Get(const AST_OR_TYPE* ast) const {
+ using T = sem::Info::GetResultType<SEM, AST_OR_TYPE>;
+ auto* sem = builder_->Sem().Get(ast);
+ if (!sem) {
+ TINT_ICE(Resolver, builder_->Diagnostics())
+ << "AST node '" << ast->TypeInfo().name << "' had no semantic info\n"
+ << "At: " << ast->source << "\n"
+ << "Pointer: " << ast;
+ }
+ return const_cast<T*>(As<T>(sem));
+ }
+
+ /// @returns the resolved symbol (function, type or variable) for the given
+ /// ast::Identifier or ast::TypeName cast to the given semantic type.
+ /// @param node the node to retrieve
+ template <typename SEM = sem::Node>
+ SEM* ResolvedSymbol(const ast::Node* node) const {
+ auto* resolved = utils::Lookup(dependencies_.resolved_symbols, node);
+ return resolved ? const_cast<SEM*>(builder_->Sem().Get<SEM>(resolved)) : nullptr;
+ }
+
+ /// @returns the resolved type of the ast::Expression `expr`
+ /// @param expr the expression
+ sem::Type* TypeOf(const ast::Expression* expr) const;
+
+ /// @returns the type name of the given semantic type, unwrapping
+ /// references.
+ /// @param ty the type to look up
+ std::string TypeNameOf(const sem::Type* ty) const;
+
+ /// @returns the type name of the given semantic type, without unwrapping
+ /// references.
+ /// @param ty the type to look up
+ std::string RawTypeNameOf(const sem::Type* ty) const;
+
+ private:
+ ProgramBuilder* builder_;
+ DependencyGraph& dependencies_;
+};
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_SEM_HELPER_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/side_effects_test.cc b/chromium/third_party/dawn/src/tint/resolver/side_effects_test.cc
index 50f4e99d9d4..a50d9045f4e 100644
--- a/chromium/third_party/dawn/src/tint/resolver/side_effects_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/side_effects_test.cc
@@ -19,364 +19,366 @@
#include "src/tint/sem/expression.h"
#include "src/tint/sem/member_accessor_expression.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
struct SideEffectsTest : ResolverTest {
- template <typename T>
- void MakeSideEffectFunc(const char* name) {
- auto global = Sym();
- Global(global, ty.Of<T>(), ast::StorageClass::kPrivate);
- auto local = Sym();
- Func(name, {}, ty.Of<T>(),
- {
- Decl(Var(local, ty.Of<T>())),
- Assign(global, local),
- Return(global),
- });
- }
-
- template <typename MAKE_TYPE_FUNC>
- void MakeSideEffectFunc(const char* name, MAKE_TYPE_FUNC make_type) {
- auto global = Sym();
- Global(global, make_type(), ast::StorageClass::kPrivate);
- auto local = Sym();
- Func(name, {}, make_type(),
- {
- Decl(Var(local, make_type())),
- Assign(global, local),
- Return(global),
- });
- }
+ template <typename T>
+ void MakeSideEffectFunc(const char* name) {
+ auto global = Sym();
+ Global(global, ty.Of<T>(), ast::StorageClass::kPrivate);
+ auto local = Sym();
+ Func(name, {}, ty.Of<T>(),
+ {
+ Decl(Var(local, ty.Of<T>())),
+ Assign(global, local),
+ Return(global),
+ });
+ }
+
+ template <typename MAKE_TYPE_FUNC>
+ void MakeSideEffectFunc(const char* name, MAKE_TYPE_FUNC make_type) {
+ auto global = Sym();
+ Global(global, make_type(), ast::StorageClass::kPrivate);
+ auto local = Sym();
+ Func(name, {}, make_type(),
+ {
+ Decl(Var(local, make_type())),
+ Assign(global, local),
+ Return(global),
+ });
+ }
};
TEST_F(SideEffectsTest, Phony) {
- auto* expr = Phony();
- auto* body = Assign(expr, 1);
- WrapInFunction(body);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* expr = Phony();
+ auto* body = Assign(expr, 1_i);
+ WrapInFunction(body);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Literal) {
- auto* expr = Expr(1);
- WrapInFunction(expr);
+ auto* expr = Expr(1_i);
+ WrapInFunction(expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, VariableUser) {
- auto* var = Decl(Var("a", ty.i32()));
- auto* expr = Expr("a");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::VariableUser>());
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.i32()));
+ auto* expr = Expr("a");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::VariableUser>());
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_Builtin_NoSE) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = Call("dpdx", "a");
- Func("f", {}, ty.void_(), {Ignore(expr)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_FALSE(sem->HasSideEffects());
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ auto* expr = Call("dpdx", "a");
+ Func("f", {}, ty.void_(), {Ignore(expr)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_Builtin_NoSE_WithSEArg) {
- MakeSideEffectFunc<f32>("se");
- auto* expr = Call("dpdx", Call("se"));
- Func("f", {}, ty.void_(), {Ignore(expr)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<f32>("se");
+ auto* expr = Call("dpdx", Call("se"));
+ Func("f", {}, ty.void_(), {Ignore(expr)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_Builtin_SE) {
- Global("a", ty.atomic(ty.i32()), ast::StorageClass::kWorkgroup);
- auto* expr = Call("atomicAdd", AddressOf("a"), 1);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_TRUE(sem->HasSideEffects());
+ Global("a", ty.atomic(ty.i32()), ast::StorageClass::kWorkgroup);
+ auto* expr = Call("atomicAdd", AddressOf("a"), 1_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_Function) {
- Func("f", {}, ty.i32(), {Return(1)});
- auto* expr = Call("f");
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_TRUE(sem->HasSideEffects());
+ Func("f", {}, ty.i32(), {Return(1_i)});
+ auto* expr = Call("f");
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_TypeConversion_NoSE) {
- auto* var = Decl(Var("a", ty.i32()));
- auto* expr = Construct(ty.f32(), "a");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.i32()));
+ auto* expr = Construct(ty.f32(), "a");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_TypeConversion_SE) {
- MakeSideEffectFunc<i32>("se");
- auto* expr = Construct(ty.f32(), Call("se"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se");
+ auto* expr = Construct(ty.f32(), Call("se"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_TypeConstructor_NoSE) {
- auto* var = Decl(Var("a", ty.f32()));
- auto* expr = Construct(ty.f32(), "a");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.f32()));
+ auto* expr = Construct(ty.f32(), "a");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Call_TypeConstructor_SE) {
- MakeSideEffectFunc<f32>("se");
- auto* expr = Construct(ty.f32(), Call("se"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->Is<sem::Call>());
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<f32>("se");
+ auto* expr = Construct(ty.f32(), Call("se"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->Is<sem::Call>());
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, MemberAccessor_Struct_NoSE) {
- auto* s = Structure("S", {Member("m", ty.i32())});
- auto* var = Decl(Var("a", ty.Of(s)));
- auto* expr = MemberAccessor("a", "m");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* s = Structure("S", {Member("m", ty.i32())});
+ auto* var = Decl(Var("a", ty.Of(s)));
+ auto* expr = MemberAccessor("a", "m");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, MemberAccessor_Struct_SE) {
- auto* s = Structure("S", {Member("m", ty.i32())});
- MakeSideEffectFunc("se", [&] { return ty.Of(s); });
- auto* expr = MemberAccessor(Call("se"), "m");
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ auto* s = Structure("S", {Member("m", ty.i32())});
+ MakeSideEffectFunc("se", [&] { return ty.Of(s); });
+ auto* expr = MemberAccessor(Call("se"), "m");
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, MemberAccessor_Vector) {
- auto* var = Decl(Var("a", ty.vec4<f32>()));
- auto* expr = MemberAccessor("a", "x");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- EXPECT_TRUE(sem->Is<sem::MemberAccessorExpression>());
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.vec4<f32>()));
+ auto* expr = MemberAccessor("a", "x");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ EXPECT_TRUE(sem->Is<sem::MemberAccessorExpression>());
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, MemberAccessor_VectorSwizzleNoSE) {
- auto* var = Decl(Var("a", ty.vec4<f32>()));
- auto* expr = MemberAccessor("a", "xzyw");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- EXPECT_TRUE(sem->Is<sem::Swizzle>());
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.vec4<f32>()));
+ auto* expr = MemberAccessor("a", "xzyw");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ EXPECT_TRUE(sem->Is<sem::Swizzle>());
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, MemberAccessor_VectorSwizzleSE) {
- MakeSideEffectFunc("se", [&] { return ty.vec4<f32>(); });
- auto* expr = MemberAccessor(Call("se"), "xzyw");
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- EXPECT_TRUE(sem->Is<sem::Swizzle>());
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc("se", [&] { return ty.vec4<f32>(); });
+ auto* expr = MemberAccessor(Call("se"), "xzyw");
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ EXPECT_TRUE(sem->Is<sem::Swizzle>());
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Binary_NoSE) {
- auto* a = Decl(Var("a", ty.i32()));
- auto* b = Decl(Var("b", ty.i32()));
- auto* expr = Add("a", "b");
- WrapInFunction(a, b, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* a = Decl(Var("a", ty.i32()));
+ auto* b = Decl(Var("b", ty.i32()));
+ auto* expr = Add("a", "b");
+ WrapInFunction(a, b, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Binary_LeftSE) {
- MakeSideEffectFunc<i32>("se");
- auto* b = Decl(Var("b", ty.i32()));
- auto* expr = Add(Call("se"), "b");
- WrapInFunction(b, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se");
+ auto* b = Decl(Var("b", ty.i32()));
+ auto* expr = Add(Call("se"), "b");
+ WrapInFunction(b, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Binary_RightSE) {
- MakeSideEffectFunc<i32>("se");
- auto* a = Decl(Var("a", ty.i32()));
- auto* expr = Add("a", Call("se"));
- WrapInFunction(a, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se");
+ auto* a = Decl(Var("a", ty.i32()));
+ auto* expr = Add("a", Call("se"));
+ WrapInFunction(a, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Binary_BothSE) {
- MakeSideEffectFunc<i32>("se1");
- MakeSideEffectFunc<i32>("se2");
- auto* expr = Add(Call("se1"), Call("se2"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se1");
+ MakeSideEffectFunc<i32>("se2");
+ auto* expr = Add(Call("se1"), Call("se2"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Unary_NoSE) {
- auto* var = Decl(Var("a", ty.bool_()));
- auto* expr = Not("a");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.bool_()));
+ auto* expr = Not("a");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Unary_SE) {
- MakeSideEffectFunc<bool>("se");
- auto* expr = Not(Call("se"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<bool>("se");
+ auto* expr = Not(Call("se"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, IndexAccessor_NoSE) {
- auto* var = Decl(Var("a", ty.array<i32, 10>()));
- auto* expr = IndexAccessor("a", 0);
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.array<i32, 10>()));
+ auto* expr = IndexAccessor("a", 0_i);
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, IndexAccessor_ObjSE) {
- MakeSideEffectFunc("se", [&] { return ty.array<i32, 10>(); });
- auto* expr = IndexAccessor(Call("se"), 0);
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc("se", [&] { return ty.array<i32, 10>(); });
+ auto* expr = IndexAccessor(Call("se"), 0_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, IndexAccessor_IndexSE) {
- MakeSideEffectFunc<i32>("se");
- auto* var = Decl(Var("a", ty.array<i32, 10>()));
- auto* expr = IndexAccessor("a", Call("se"));
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se");
+ auto* var = Decl(Var("a", ty.array<i32, 10>()));
+ auto* expr = IndexAccessor("a", Call("se"));
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, IndexAccessor_BothSE) {
- MakeSideEffectFunc("se1", [&] { return ty.array<i32, 10>(); });
- MakeSideEffectFunc<i32>("se2");
- auto* expr = IndexAccessor(Call("se1"), Call("se2"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc("se1", [&] { return ty.array<i32, 10>(); });
+ MakeSideEffectFunc<i32>("se2");
+ auto* expr = IndexAccessor(Call("se1"), Call("se2"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Bitcast_NoSE) {
- auto* var = Decl(Var("a", ty.i32()));
- auto* expr = Bitcast<f32>("a");
- WrapInFunction(var, expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_FALSE(sem->HasSideEffects());
+ auto* var = Decl(Var("a", ty.i32()));
+ auto* expr = Bitcast<f32>("a");
+ WrapInFunction(var, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_FALSE(sem->HasSideEffects());
}
TEST_F(SideEffectsTest, Bitcast_SE) {
- MakeSideEffectFunc<i32>("se");
- auto* expr = Bitcast<f32>(Call("se"));
- WrapInFunction(expr);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = Sem().Get(expr);
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->HasSideEffects());
+ MakeSideEffectFunc<i32>("se");
+ auto* expr = Bitcast<f32>(Call("se"));
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* sem = Sem().Get(expr);
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->HasSideEffects());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/source_variable_test.cc b/chromium/third_party/dawn/src/tint/resolver/source_variable_test.cc
new file mode 100644
index 00000000000..cd943f3635f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/source_variable_test.cc
@@ -0,0 +1,291 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/resolver.h"
+
+#include "src/tint/resolver/resolver_test_helper.h"
+#include "src/tint/sem/member_accessor_expression.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint::resolver {
+namespace {
+
+class ResolverSourceVariableTest : public ResolverTest {};
+
+TEST_F(ResolverSourceVariableTest, GlobalPrivateVar) {
+ auto* a = Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalWorkgroupVar) {
+ auto* a = Global("a", ty.f32(), ast::StorageClass::kWorkgroup);
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalStorageVar) {
+ auto* a = Global("a", ty.f32(), ast::StorageClass::kStorage, GroupAndBinding(0, 0));
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalUniformVar) {
+ auto* a = Global("a", ty.f32(), ast::StorageClass::kUniform, GroupAndBinding(0, 0));
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalTextureVar) {
+ auto* a = Global("a", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ ast::StorageClass::kNone, GroupAndBinding(0, 0));
+ auto* expr = Expr(a);
+ WrapInFunction(Call("textureDimensions", expr));
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalOverride) {
+ auto* a = Override("a", ty.f32(), Expr(1_f));
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, GlobalConst) {
+ auto* a = GlobalConst("a", ty.f32(), Expr(1_f));
+ auto* expr = Expr(a);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, FunctionVar) {
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* expr = Expr(a);
+ WrapInFunction(a, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, FunctionLet) {
+ auto* a = Let("a", ty.f32(), Expr(1_f));
+ auto* expr = Expr(a);
+ WrapInFunction(a, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, Parameter) {
+ auto* a = Param("a", ty.f32());
+ auto* expr = Expr(a);
+ Func("foo", {a}, ty.void_(), {WrapInStatement(expr)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, PointerParameter) {
+ // fn foo(a : ptr<function, f32>)
+ // {
+ // let b = a;
+ // }
+ auto* param = Param("a", ty.pointer(ty.f32(), ast::StorageClass::kFunction));
+ auto* expr_param = Expr(param);
+ auto* let = Let("b", nullptr, expr_param);
+ auto* expr_let = Expr("b");
+ Func("foo", {param}, ty.void_(), {WrapInStatement(let), WrapInStatement(expr_let)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_param = Sem().Get(param);
+ EXPECT_EQ(Sem().Get(expr_param)->SourceVariable(), sem_param);
+ EXPECT_EQ(Sem().Get(expr_let)->SourceVariable(), sem_param);
+}
+
+TEST_F(ResolverSourceVariableTest, VarCopyVar) {
+ // {
+ // var a : f32;
+ // var b = a;
+ // }
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* expr_a = Expr(a);
+ auto* b = Var("b", ty.f32(), ast::StorageClass::kNone, expr_a);
+ auto* expr_b = Expr(b);
+ WrapInFunction(a, b, expr_b);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ auto* sem_b = Sem().Get(b);
+ EXPECT_EQ(Sem().Get(expr_a)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(expr_b)->SourceVariable(), sem_b);
+}
+
+TEST_F(ResolverSourceVariableTest, LetCopyVar) {
+ // {
+ // var a : f32;
+ // let b = a;
+ // }
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* expr_a = Expr(a);
+ auto* b = Let("b", ty.f32(), expr_a);
+ auto* expr_b = Expr(b);
+ WrapInFunction(a, b, expr_b);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ auto* sem_b = Sem().Get(b);
+ EXPECT_EQ(Sem().Get(expr_a)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(expr_b)->SourceVariable(), sem_b);
+}
+
+TEST_F(ResolverSourceVariableTest, ThroughIndexAccessor) {
+ // var<private> a : array<f32, 4u>;
+ // {
+ // a[2i]
+ // }
+ auto* a = Global("a", ty.array(ty.f32(), 4_u), ast::StorageClass::kPrivate);
+ auto* expr = IndexAccessor(a, 2_i);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, ThroughMemberAccessor) {
+ // struct S { f : f32 }
+ // var<private> a : S;
+ // {
+ // a.f
+ // }
+ auto* S = Structure("S", {Member("f", ty.f32())});
+ auto* a = Global("a", ty.Of(S), ast::StorageClass::kPrivate);
+ auto* expr = MemberAccessor(a, "f");
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, ThroughPointers) {
+ // var<private> a : f32;
+ // {
+ // let a_ptr1 = &*&a;
+ // let a_ptr2 = &*a_ptr1;
+ // }
+ auto* a = Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ auto* address_of_1 = AddressOf(a);
+ auto* deref_1 = Deref(address_of_1);
+ auto* address_of_2 = AddressOf(deref_1);
+ auto* a_ptr1 = Let("a_ptr1", nullptr, address_of_2);
+ auto* deref_2 = Deref(a_ptr1);
+ auto* address_of_3 = AddressOf(deref_2);
+ auto* a_ptr2 = Let("a_ptr2", nullptr, address_of_3);
+ WrapInFunction(a_ptr1, a_ptr2);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem_a = Sem().Get(a);
+ EXPECT_EQ(Sem().Get(address_of_1)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(address_of_2)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(address_of_3)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(deref_1)->SourceVariable(), sem_a);
+ EXPECT_EQ(Sem().Get(deref_2)->SourceVariable(), sem_a);
+}
+
+TEST_F(ResolverSourceVariableTest, Literal) {
+ auto* expr = Expr(1_f);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), nullptr);
+}
+
+TEST_F(ResolverSourceVariableTest, FunctionReturnValue) {
+ auto* expr = Call("min", 1_f, 2_f);
+ WrapInFunction(expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), nullptr);
+}
+
+TEST_F(ResolverSourceVariableTest, BinaryExpression) {
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* expr = Add(a, Expr(1_f));
+ WrapInFunction(a, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), nullptr);
+}
+
+TEST_F(ResolverSourceVariableTest, UnaryExpression) {
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(a));
+ WrapInFunction(a, expr);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+
+ EXPECT_EQ(Sem().Get(expr)->SourceVariable(), nullptr);
+}
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/storage_class_layout_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/storage_class_layout_validation_test.cc
index 23d4c87bc40..db379a062bf 100644
--- a/chromium/third_party/dawn/src/tint/resolver/storage_class_layout_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/storage_class_layout_validation_test.cc
@@ -17,32 +17,33 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverStorageClassLayoutValidationTest = ResolverTest;
// Detect unaligned member for storage buffers
-TEST_F(ResolverStorageClassLayoutValidationTest,
- StorageBuffer_UnalignedMember) {
- // struct S {
- // @size(5) a : f32;
- // @align(1) b : f32;
- // };
- // @group(0) @binding(0)
- // var<storage> a : S;
-
- Structure(Source{{12, 34}}, "S",
- {Member("a", ty.f32(), {MemberSize(5)}),
- Member(Source{{34, 56}}, "b", ty.f32(), {MemberAlign(1)})});
-
- Global(Source{{78, 90}}, "a", ty.type_name("S"), ast::StorageClass::kStorage,
- GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: the offset of a struct member of type 'f32' in storage class 'storage' must be a multiple of 4 bytes, but 'b' is currently at offset 5. Consider setting @align(4) on this member
+TEST_F(ResolverStorageClassLayoutValidationTest, StorageBuffer_UnalignedMember) {
+ // struct S {
+ // @size(5) a : f32;
+ // @align(1) b : f32;
+ // };
+ // @group(0) @binding(0)
+ // var<storage> a : S;
+
+ Structure(Source{{12, 34}}, "S",
+ {Member("a", ty.f32(), {MemberSize(5)}),
+ Member(Source{{34, 56}}, "b", ty.f32(), {MemberAlign(1)})});
+
+ Global(Source{{78, 90}}, "a", ty.type_name("S"), ast::StorageClass::kStorage,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: the offset of a struct member of type 'f32' in storage class 'storage' must be a multiple of 4 bytes, but 'b' is currently at offset 5. Consider setting @align(4) on this member
12:34 note: see layout of struct:
/* align(4) size(12) */ struct S {
/* offset(0) align(4) size( 5) */ a : f32;
@@ -52,55 +53,53 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- StorageBuffer_UnalignedMember_SuggestedFix) {
- // struct S {
- // @size(5) a : f32;
- // @align(4) b : f32;
- // };
- // @group(0) @binding(0)
- // var<storage> a : S;
-
- Structure(Source{{12, 34}}, "S",
- {Member("a", ty.f32(), {MemberSize(5)}),
- Member(Source{{34, 56}}, "b", ty.f32(), {MemberAlign(4)})});
-
- Global(Source{{78, 90}}, "a", ty.type_name("S"), ast::StorageClass::kStorage,
- GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverStorageClassLayoutValidationTest, StorageBuffer_UnalignedMember_SuggestedFix) {
+ // struct S {
+ // @size(5) a : f32;
+ // @align(4) b : f32;
+ // };
+ // @group(0) @binding(0)
+ // var<storage> a : S;
+
+ Structure(Source{{12, 34}}, "S",
+ {Member("a", ty.f32(), {MemberSize(5)}),
+ Member(Source{{34, 56}}, "b", ty.f32(), {MemberAlign(4)})});
+
+ Global(Source{{78, 90}}, "a", ty.type_name("S"), ast::StorageClass::kStorage,
+ GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Detect unaligned struct member for uniform buffers
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_UnalignedMember_Struct) {
- // struct Inner {
- // scalar : i32;
- // };
- //
- // struct Outer {
- // scalar : f32;
- // inner : Inner;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Structure(Source{{12, 34}}, "Inner", {Member("scalar", ty.i32())});
-
- Structure(Source{{34, 56}}, "Outer",
- {
- Member("scalar", ty.f32()),
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: the offset of a struct member of type 'Inner' in storage class 'uniform' must be a multiple of 16 bytes, but 'inner' is currently at offset 4. Consider setting @align(16) on this member
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_UnalignedMember_Struct) {
+ // struct Inner {
+ // scalar : i32;
+ // };
+ //
+ // struct Outer {
+ // scalar : f32;
+ // inner : Inner;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Structure(Source{{12, 34}}, "Inner", {Member("scalar", ty.i32())});
+
+ Structure(Source{{34, 56}}, "Outer",
+ {
+ Member("scalar", ty.f32()),
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: the offset of a struct member of type 'Inner' in storage class 'uniform' must be a multiple of 16 bytes, but 'inner' is currently at offset 4. Consider setting @align(16) on this member
34:56 note: see layout of struct:
/* align(4) size(8) */ struct Outer {
/* offset(0) align(4) size(4) */ scalar : f32;
@@ -115,60 +114,58 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
TEST_F(ResolverStorageClassLayoutValidationTest,
UniformBuffer_UnalignedMember_Struct_SuggestedFix) {
- // struct Inner {
- // scalar : i32;
- // };
- //
- // struct Outer {
- // scalar : f32;
- // @align(16) inner : Inner;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Structure(Source{{12, 34}}, "Inner", {Member("scalar", ty.i32())});
-
- Structure(Source{{34, 56}}, "Outer",
- {
- Member("scalar", ty.f32()),
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner"),
- {MemberAlign(16)}),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // struct Inner {
+ // scalar : i32;
+ // };
+ //
+ // struct Outer {
+ // scalar : f32;
+ // @align(16) inner : Inner;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Structure(Source{{12, 34}}, "Inner", {Member("scalar", ty.i32())});
+
+ Structure(Source{{34, 56}}, "Outer",
+ {
+ Member("scalar", ty.f32()),
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner"), {MemberAlign(16)}),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Detect unaligned array member for uniform buffers
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_UnalignedMember_Array) {
- // type Inner = @stride(16) array<f32, 10>;
- //
- // struct Outer {
- // scalar : f32;
- // inner : Inner;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
- Alias("Inner", ty.array(ty.f32(), 10, 16));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("scalar", ty.f32()),
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: the offset of a struct member of type '@stride(16) array<f32, 10>' in storage class 'uniform' must be a multiple of 16 bytes, but 'inner' is currently at offset 4. Consider setting @align(16) on this member
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_UnalignedMember_Array) {
+ // type Inner = @stride(16) array<f32, 10u>;
+ //
+ // struct Outer {
+ // scalar : f32;
+ // inner : Inner;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+ Alias("Inner", ty.array(ty.f32(), 10_u, 16));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("scalar", ty.f32()),
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: the offset of a struct member of type '@stride(16) array<f32, 10>' in storage class 'uniform' must be a multiple of 16 bytes, but 'inner' is currently at offset 4. Consider setting @align(16) on this member
12:34 note: see layout of struct:
/* align(4) size(164) */ struct Outer {
/* offset( 0) align(4) size( 4) */ scalar : f32;
@@ -177,64 +174,61 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_UnalignedMember_Array_SuggestedFix) {
- // type Inner = @stride(16) array<f32, 10>;
- //
- // struct Outer {
- // scalar : f32;
- // @align(16) inner : Inner;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
- Alias("Inner", ty.array(ty.f32(), 10, 16));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("scalar", ty.f32()),
- Member(Source{{34, 56}}, "inner", ty.type_name("Inner"),
- {MemberAlign(16)}),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_UnalignedMember_Array_SuggestedFix) {
+ // type Inner = @stride(16) array<f32, 10u>;
+ //
+ // struct Outer {
+ // scalar : f32;
+ // @align(16) inner : Inner;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+ Alias("Inner", ty.array(ty.f32(), 10_u, 16));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("scalar", ty.f32()),
+ Member(Source{{34, 56}}, "inner", ty.type_name("Inner"), {MemberAlign(16)}),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Detect uniform buffers with byte offset between 2 members that is not a
// multiple of 16 bytes
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_MembersOffsetNotMultipleOf16) {
- // struct Inner {
- // @align(1) @size(5) scalar : i32;
- // };
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Structure(Source{{12, 34}}, "Inner",
- {Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)})});
-
- Structure(Source{{34, 56}}, "Outer",
- {
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
- Member(Source{{78, 90}}, "scalar", ty.i32()),
- });
-
- Global(Source{{22, 24}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(78:90 error: uniform storage requires that the number of bytes between the start of the previous member of type struct and the current member be a multiple of 16 bytes, but there are currently 8 bytes between 'inner' and 'scalar'. Consider setting @align(16) on this member
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_MembersOffsetNotMultipleOf16) {
+ // struct Inner {
+ // @align(1) @size(5) scalar : i32;
+ // };
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Structure(Source{{12, 34}}, "Inner",
+ {Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)})});
+
+ Structure(Source{{34, 56}}, "Outer",
+ {
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
+ Member(Source{{78, 90}}, "scalar", ty.i32()),
+ });
+
+ Global(Source{{22, 24}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(78:90 error: uniform storage requires that the number of bytes between the start of the previous member of type struct and the current member be a multiple of 16 bytes, but there are currently 8 bytes between 'inner' and 'scalar'. Consider setting @align(16) on this member
34:56 note: see layout of struct:
/* align(4) size(12) */ struct Outer {
/* offset( 0) align(1) size( 5) */ inner : Inner;
@@ -251,42 +245,42 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
// See https://crbug.com/tint/1344
TEST_F(ResolverStorageClassLayoutValidationTest,
UniformBuffer_MembersOffsetNotMultipleOf16_InnerMoreMembersThanOuter) {
- // struct Inner {
- // a : i32;
- // b : i32;
- // c : i32;
- // @align(1) @size(5) scalar : i32;
- // };
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Structure(Source{{12, 34}}, "Inner",
- {
- Member("a", ty.i32()),
- Member("b", ty.i32()),
- Member("c", ty.i32()),
- Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)}),
- });
-
- Structure(Source{{34, 56}}, "Outer",
- {
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
- Member(Source{{78, 90}}, "scalar", ty.i32()),
- });
-
- Global(Source{{22, 24}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(78:90 error: uniform storage requires that the number of bytes between the start of the previous member of type struct and the current member be a multiple of 16 bytes, but there are currently 20 bytes between 'inner' and 'scalar'. Consider setting @align(16) on this member
+ // struct Inner {
+ // a : i32;
+ // b : i32;
+ // c : i32;
+ // @align(1) @size(5) scalar : i32;
+ // };
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Structure(Source{{12, 34}}, "Inner",
+ {
+ Member("a", ty.i32()),
+ Member("b", ty.i32()),
+ Member("c", ty.i32()),
+ Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)}),
+ });
+
+ Structure(Source{{34, 56}}, "Outer",
+ {
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
+ Member(Source{{78, 90}}, "scalar", ty.i32()),
+ });
+
+ Global(Source{{22, 24}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(78:90 error: uniform storage requires that the number of bytes between the start of the previous member of type struct and the current member be a multiple of 16 bytes, but there are currently 20 bytes between 'inner' and 'scalar'. Consider setting @align(16) on this member
34:56 note: see layout of struct:
/* align(4) size(24) */ struct Outer {
/* offset( 0) align(4) size(20) */ inner : Inner;
@@ -305,83 +299,81 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
TEST_F(ResolverStorageClassLayoutValidationTest,
UniformBuffer_MembersOffsetNotMultipleOf16_SuggestedFix) {
- // struct Inner {
- // @align(1) @size(5) scalar : i32;
- // };
- //
- // struct Outer {
- // @align(16) inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Structure(Source{{12, 34}}, "Inner",
- {Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)})});
-
- Structure(Source{{34, 56}}, "Outer",
- {
- Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
- Member(Source{{78, 90}}, "scalar", ty.i32(), {MemberAlign(16)}),
- });
-
- Global(Source{{22, 34}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // struct Inner {
+ // @align(1) @size(5) scalar : i32;
+ // };
+ //
+ // struct Outer {
+ // @align(16) inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Structure(Source{{12, 34}}, "Inner",
+ {Member("scalar", ty.i32(), {MemberAlign(1), MemberSize(5)})});
+
+ Structure(Source{{34, 56}}, "Outer",
+ {
+ Member(Source{{56, 78}}, "inner", ty.type_name("Inner")),
+ Member(Source{{78, 90}}, "scalar", ty.i32(), {MemberAlign(16)}),
+ });
+
+ Global(Source{{22, 34}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Make sure that this doesn't fail validation because vec3's align is 16, but
// size is 12. 's' should be at offset 12, which is okay here.
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_Vec3MemberOffset_NoFail) {
- // struct ScalarPackedAtEndOfVec3 {
- // v : vec3<f32>;
- // s : f32;
- // };
- // @group(0) @binding(0)
- // var<uniform> a : ScalarPackedAtEndOfVec3;
-
- Structure("ScalarPackedAtEndOfVec3", {
- Member("v", ty.vec3(ty.f32())),
- Member("s", ty.f32()),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("ScalarPackedAtEndOfVec3"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_Vec3MemberOffset_NoFail) {
+ // struct ScalarPackedAtEndOfVec3 {
+ // v : vec3<f32>;
+ // s : f32;
+ // };
+ // @group(0) @binding(0)
+ // var<uniform> a : ScalarPackedAtEndOfVec3;
+
+ Structure("ScalarPackedAtEndOfVec3", {
+ Member("v", ty.vec3(ty.f32())),
+ Member("s", ty.f32()),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("ScalarPackedAtEndOfVec3"),
+ ast::StorageClass::kUniform, GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
// Detect array stride must be a multiple of 16 bytes for uniform buffers
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_Scalar) {
- // type Inner = array<f32, 10>;
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Alias("Inner", ty.array(ty.f32(), 10));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
- Member("scalar", ty.i32()),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_Scalar) {
+ // type Inner = array<f32, 10u>;
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Alias("Inner", ty.array(ty.f32(), 10_u));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
+ Member("scalar", ty.i32()),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.
12:34 note: see layout of struct:
/* align(4) size(44) */ struct Outer {
/* offset( 0) align(4) size(40) */ inner : array<f32, 10>;
@@ -390,33 +382,32 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_Vector) {
- // type Inner = array<vec2<f32>, 10>;
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Alias("Inner", ty.array(ty.vec2<f32>(), 10));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
- Member("scalar", ty.i32()),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 8. Consider using a vec4 instead.
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_Vector) {
+ // type Inner = array<vec2<f32>, 10u>;
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Alias("Inner", ty.array(ty.vec2<f32>(), 10_u));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
+ Member("scalar", ty.i32()),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 8. Consider using a vec4 instead.
12:34 note: see layout of struct:
/* align(8) size(88) */ struct Outer {
/* offset( 0) align(8) size(80) */ inner : array<vec2<f32>, 10>;
@@ -426,41 +417,40 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_Struct) {
- // struct ArrayElem {
- // a : f32;
- // b : i32;
- // }
- // type Inner = array<ArrayElem, 10>;
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- auto* array_elem = Structure("ArrayElem", {
- Member("a", ty.f32()),
- Member("b", ty.i32()),
- });
- Alias("Inner", ty.array(ty.Of(array_elem), 10));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
- Member("scalar", ty.i32()),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 8. Consider using the @size attribute on the last struct member.
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_Struct) {
+ // struct ArrayElem {
+ // a : f32;
+ // b : i32;
+ // }
+ // type Inner = array<ArrayElem, 10u>;
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ auto* array_elem = Structure("ArrayElem", {
+ Member("a", ty.f32()),
+ Member("b", ty.i32()),
+ });
+ Alias("Inner", ty.array(ty.Of(array_elem), 10_u));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
+ Member("scalar", ty.i32()),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 8. Consider using the @size attribute on the last struct member.
12:34 note: see layout of struct:
/* align(4) size(84) */ struct Outer {
/* offset( 0) align(4) size(80) */ inner : array<ArrayElem, 10>;
@@ -469,41 +459,38 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_TopLevelArray) {
- // @group(0) @binding(0)
- // var<uniform> a : array<f32, 4>;
- Global(Source{{78, 90}}, "a", ty.array(Source{{34, 56}}, ty.f32(), 4),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.)");
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_TopLevelArray) {
+ // @group(0) @binding(0)
+ // var<uniform> a : array<f32, 4u>;
+ Global(Source{{78, 90}}, "a", ty.array(Source{{34, 56}}, ty.f32(), 4_u),
+ ast::StorageClass::kUniform, GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_NestedArray) {
- // struct Outer {
- // inner : array<array<f32, 4>, 4>
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : array<Outer, 4>;
-
- Structure(
- Source{{12, 34}}, "Outer",
- {
- Member("inner", ty.array(Source{{34, 56}}, ty.array(ty.f32(), 4), 4)),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_NestedArray) {
+ // struct Outer {
+ // inner : array<array<f32, 4u>, 4u>
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : array<Outer, 4u>;
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("inner", ty.array(Source{{34, 56}}, ty.array(ty.f32(), 4_u), 4_u)),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(
+ r()->error(),
+ R"(34:56 error: uniform storage requires that array elements be aligned to 16 bytes, but array element alignment is currently 4. Consider using a vector or struct as the element type instead.
12:34 note: see layout of struct:
/* align(4) size(64) */ struct Outer {
/* offset( 0) align(4) size(64) */ inner : array<array<f32, 4>, 4>;
@@ -511,30 +498,29 @@ TEST_F(ResolverStorageClassLayoutValidationTest,
78:90 note: see declaration of variable)");
}
-TEST_F(ResolverStorageClassLayoutValidationTest,
- UniformBuffer_InvalidArrayStride_SuggestedFix) {
- // type Inner = @stride(16) array<f32, 10>;
- //
- // struct Outer {
- // inner : Inner;
- // scalar : i32;
- // };
- //
- // @group(0) @binding(0)
- // var<uniform> a : Outer;
-
- Alias("Inner", ty.array(ty.f32(), 10, 16));
-
- Structure(Source{{12, 34}}, "Outer",
- {
- Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
- Member("scalar", ty.i32()),
- });
-
- Global(Source{{78, 90}}, "a", ty.type_name("Outer"),
- ast::StorageClass::kUniform, GroupAndBinding(0, 0));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverStorageClassLayoutValidationTest, UniformBuffer_InvalidArrayStride_SuggestedFix) {
+ // type Inner = @stride(16) array<f32, 10u>;
+ //
+ // struct Outer {
+ // inner : Inner;
+ // scalar : i32;
+ // };
+ //
+ // @group(0) @binding(0)
+ // var<uniform> a : Outer;
+
+ Alias("Inner", ty.array(ty.f32(), 10_u, 16));
+
+ Structure(Source{{12, 34}}, "Outer",
+ {
+ Member("inner", ty.type_name(Source{{34, 56}}, "Inner")),
+ Member("scalar", ty.i32()),
+ });
+
+ Global(Source{{78, 90}}, "a", ty.type_name("Outer"), ast::StorageClass::kUniform,
+ GroupAndBinding(0, 0));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/storage_class_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/storage_class_validation_test.cc
index 1173578dcd2..2d75167ea83 100644
--- a/chromium/third_party/dawn/src/tint/resolver/storage_class_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/storage_class_validation_test.cc
@@ -18,344 +18,331 @@
#include "src/tint/resolver/resolver_test_helper.h"
#include "src/tint/sem/struct.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverStorageClassValidationTest = ResolverTest;
TEST_F(ResolverStorageClassValidationTest, GlobalVariableNoStorageClass_Fail) {
- // var g : f32;
- Global(Source{{12, 34}}, "g", ty.f32(), ast::StorageClass::kNone);
+ // var g : f32;
+ Global(Source{{12, 34}}, "g", ty.f32(), ast::StorageClass::kNone);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: global variables must have a storage class");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: global variables must have a storage class");
}
-TEST_F(ResolverStorageClassValidationTest,
- GlobalVariableFunctionStorageClass_Fail) {
- // var<function> g : f32;
- Global(Source{{12, 34}}, "g", ty.f32(), ast::StorageClass::kFunction);
+TEST_F(ResolverStorageClassValidationTest, GlobalVariableFunctionStorageClass_Fail) {
+ // var<function> g : f32;
+ Global(Source{{12, 34}}, "g", ty.f32(), ast::StorageClass::kFunction);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: variables declared at module scope must not be in "
- "the function storage class");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: variables declared at module scope must not be in "
+ "the function storage class");
}
TEST_F(ResolverStorageClassValidationTest, Private_RuntimeArray) {
- Global(Source{{12, 34}}, "v", ty.array(ty.i32()),
- ast::StorageClass::kPrivate);
+ Global(Source{{12, 34}}, "v", ty.array(ty.i32()), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
12:34 note: while instantiating variable v)");
}
TEST_F(ResolverStorageClassValidationTest, Private_RuntimeArrayInStruct) {
- auto* s = Structure("S", {Member("m", ty.array(ty.i32()))});
- Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {Member("m", ty.array(ty.i32()))});
+ Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
note: while analysing structure member S.m
12:34 note: while instantiating variable v)");
}
TEST_F(ResolverStorageClassValidationTest, Workgroup_RuntimeArray) {
- Global(Source{{12, 34}}, "v", ty.array(ty.i32()),
- ast::StorageClass::kWorkgroup);
+ Global(Source{{12, 34}}, "v", ty.array(ty.i32()), ast::StorageClass::kWorkgroup);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
12:34 note: while instantiating variable v)");
}
TEST_F(ResolverStorageClassValidationTest, Workgroup_RuntimeArrayInStruct) {
- auto* s = Structure("S", {Member("m", ty.array(ty.i32()))});
- Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kWorkgroup);
+ auto* s = Structure("S", {Member("m", ty.array(ty.i32()))});
+ Global(Source{{12, 34}}, "v", ty.Of(s), ast::StorageClass::kWorkgroup);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
note: while analysing structure member S.m
12:34 note: while instantiating variable v)");
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferBool) {
- // var<storage> g : bool;
- Global(Source{{56, 78}}, "g", ty.bool_(), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
+ // var<storage> g : bool;
+ Global(Source{{56, 78}}, "g", ty.bool_(), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferPointer) {
- // var<storage> g : ptr<private, f32>;
- Global(Source{{56, 78}}, "g",
- ty.pointer(ty.f32(), ast::StorageClass::kPrivate),
- ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'ptr<private, f32, read_write>' cannot be used in storage class 'storage' as it is non-host-shareable
+ // var<storage> g : ptr<private, f32>;
+ Global(Source{{56, 78}}, "g", ty.pointer(ty.f32(), ast::StorageClass::kPrivate),
+ ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'ptr<private, f32, read_write>' cannot be used in storage class 'storage' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferIntScalar) {
- // var<storage> g : i32;
- Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // var<storage> g : i32;
+ Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferVector) {
- // var<storage> g : vec4<f32>;
- Global(Source{{56, 78}}, "g", ty.vec4<f32>(), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // var<storage> g : vec4<f32>;
+ Global(Source{{56, 78}}, "g", ty.vec4<f32>(), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferArray) {
- // var<storage, read> g : array<S, 3>;
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* a = ty.array(ty.Of(s), 3);
- Global(Source{{56, 78}}, "g", a, ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // var<storage, read> g : array<S, 3u>;
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* a = ty.array(ty.Of(s), 3_u);
+ Global(Source{{56, 78}}, "g", a, ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferBoolAlias) {
- // type a = bool;
- // var<storage, read> g : a;
- auto* a = Alias("a", ty.bool_());
- Global(Source{{56, 78}}, "g", ty.Of(a), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
+ // type a = bool;
+ // var<storage, read> g : a;
+ auto* a = Alias("a", ty.bool_());
+ Global(Source{{56, 78}}, "g", ty.Of(a), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'storage' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, NotStorage_AccessMode) {
- // var<private, read> g : a;
- Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kPrivate,
- ast::Access::kRead);
+ // var<private, read> g : a;
+ Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kPrivate, ast::Access::kRead);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: only variables in <storage> storage class may declare an access mode)");
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: only variables in <storage> storage class may declare an access mode)");
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferNoError_Basic) {
- // struct S { x : i32 };
- // var<storage, read> g : S;
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve());
+ // struct S { x : i32 };
+ // var<storage, read> g : S;
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve());
}
TEST_F(ResolverStorageClassValidationTest, StorageBufferNoError_Aliases) {
- // struct S { x : i32 };
- // type a1 = S;
- // var<storage, read> g : a1;
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
- auto* a1 = Alias("a1", ty.Of(s));
- auto* a2 = Alias("a2", ty.Of(a1));
- Global(Source{{56, 78}}, "g", ty.Of(a2), ast::StorageClass::kStorage,
- ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve());
+ // struct S { x : i32 };
+ // type a1 = S;
+ // var<storage, read> g : a1;
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
+ auto* a1 = Alias("a1", ty.Of(s));
+ auto* a2 = Alias("a2", ty.Of(a1));
+ Global(Source{{56, 78}}, "g", ty.Of(a2), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve());
}
TEST_F(ResolverStorageClassValidationTest, UniformBuffer_Struct_Runtime) {
- // struct S { m: array<f32>; };
- // @group(0) @binding(0) var<uniform, > svar : S;
+ // struct S { m: array<f32>; };
+ // @group(0) @binding(0) var<uniform, > svar : S;
- auto* s = Structure(Source{{12, 34}}, "S", {Member("m", ty.array<i32>())});
+ auto* s = Structure(Source{{12, 34}}, "S", {Member("m", ty.array<i32>())});
- Global(Source{{56, 78}}, "svar", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global(Source{{56, 78}}, "svar", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
note: while analysing structure member S.m
56:78 note: while instantiating variable svar)");
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferBool) {
- // var<uniform> g : bool;
- Global(Source{{56, 78}}, "g", ty.bool_(), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable
+ // var<uniform> g : bool;
+ Global(Source{{56, 78}}, "g", ty.bool_(), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferPointer) {
- // var<uniform> g : ptr<private, f32>;
- Global(Source{{56, 78}}, "g",
- ty.pointer(ty.f32(), ast::StorageClass::kPrivate),
- ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'ptr<private, f32, read_write>' cannot be used in storage class 'uniform' as it is non-host-shareable
+ // var<uniform> g : ptr<private, f32>;
+ Global(Source{{56, 78}}, "g", ty.pointer(ty.f32(), ast::StorageClass::kPrivate),
+ ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'ptr<private, f32, read_write>' cannot be used in storage class 'uniform' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferIntScalar) {
- // var<uniform> g : i32;
- Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // var<uniform> g : i32;
+ Global(Source{{56, 78}}, "g", ty.i32(), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferVector) {
- // var<uniform> g : vec4<f32>;
- Global(Source{{56, 78}}, "g", ty.vec4<f32>(), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // var<uniform> g : vec4<f32>;
+ Global(Source{{56, 78}}, "g", ty.vec4<f32>(), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferArray) {
- // struct S {
- // @size(16) f : f32;
- // }
- // var<uniform> g : array<S, 3>;
- auto* s = Structure("S", {Member("a", ty.f32(), {MemberSize(16)})});
- auto* a = ty.array(ty.Of(s), 3);
- Global(Source{{56, 78}}, "g", a, ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // struct S {
+ // @size(16) f : f32;
+ // }
+ // var<uniform> g : array<S, 3u>;
+ auto* s = Structure("S", {Member("a", ty.f32(), {MemberSize(16)})});
+ auto* a = ty.array(ty.Of(s), 3_u);
+ Global(Source{{56, 78}}, "g", a, ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferBoolAlias) {
- // type a = bool;
- // var<uniform> g : a;
- auto* a = Alias("a", ty.bool_());
- Global(Source{{56, 78}}, "g", ty.Of(a), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable
+ // type a = bool;
+ // var<uniform> g : a;
+ auto* a = Alias("a", ty.bool_());
+ Global(Source{{56, 78}}, "g", ty.Of(a), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(56:78 error: Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferNoError_Basic) {
- // struct S { x : i32 };
- // var<uniform> g : S;
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
- Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // struct S { x : i32 };
+ // var<uniform> g : S;
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
+ Global(Source{{56, 78}}, "g", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverStorageClassValidationTest, UniformBufferNoError_Aliases) {
- // struct S { x : i32 };
- // type a1 = S;
- // var<uniform> g : a1;
- auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
- auto* a1 = Alias("a1", ty.Of(s));
- Global(Source{{56, 78}}, "g", ty.Of(a1), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ // struct S { x : i32 };
+ // type a1 = S;
+ // var<uniform> g : a1;
+ auto* s = Structure("S", {Member(Source{{12, 34}}, "x", ty.i32())});
+ auto* a1 = Alias("a1", ty.Of(s));
+ Global(Source{{56, 78}}, "g", ty.Of(a1), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/struct_layout_test.cc b/chromium/third_party/dawn/src/tint/resolver/struct_layout_test.cc
index 7c2f4504a3b..854e87ae3ea 100644
--- a/chromium/third_party/dawn/src/tint/resolver/struct_layout_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/struct_layout_test.cc
@@ -18,385 +18,387 @@
#include "src/tint/resolver/resolver_test_helper.h"
#include "src/tint/sem/struct.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverStructLayoutTest = ResolverTest;
TEST_F(ResolverStructLayoutTest, Scalars) {
- auto* s = Structure("S", {
- Member("a", ty.f32()),
- Member("b", ty.u32()),
- Member("c", ty.i32()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 12u);
- EXPECT_EQ(sem->SizeNoPadding(), 12u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 3u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
- EXPECT_EQ(sem->Members()[1]->Align(), 4u);
- EXPECT_EQ(sem->Members()[1]->Size(), 4u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 8u);
- EXPECT_EQ(sem->Members()[2]->Align(), 4u);
- EXPECT_EQ(sem->Members()[2]->Size(), 4u);
+ auto* s = Structure("S", {
+ Member("a", ty.f32()),
+ Member("b", ty.u32()),
+ Member("c", ty.i32()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 12u);
+ EXPECT_EQ(sem->SizeNoPadding(), 12u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 3u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 8u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, Alias) {
- auto* alias_a = Alias("a", ty.f32());
- auto* alias_b = Alias("b", ty.f32());
-
- auto* s = Structure("S", {
- Member("a", ty.Of(alias_a)),
- Member("b", ty.Of(alias_b)),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 8u);
- EXPECT_EQ(sem->SizeNoPadding(), 8u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 2u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
- EXPECT_EQ(sem->Members()[1]->Align(), 4u);
- EXPECT_EQ(sem->Members()[1]->Size(), 4u);
+ auto* alias_a = Alias("a", ty.f32());
+ auto* alias_b = Alias("b", ty.f32());
+
+ auto* s = Structure("S", {
+ Member("a", ty.Of(alias_a)),
+ Member("b", ty.Of(alias_b)),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 8u);
+ EXPECT_EQ(sem->SizeNoPadding(), 8u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 2u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, ImplicitStrideArrayStaticSize) {
- auto* s = Structure("S", {
- Member("a", ty.array<i32, 3>()),
- Member("b", ty.array<f32, 5>()),
- Member("c", ty.array<f32, 1>()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 36u);
- EXPECT_EQ(sem->SizeNoPadding(), 36u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 3u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 12u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 12u);
- EXPECT_EQ(sem->Members()[1]->Align(), 4u);
- EXPECT_EQ(sem->Members()[1]->Size(), 20u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 32u);
- EXPECT_EQ(sem->Members()[2]->Align(), 4u);
- EXPECT_EQ(sem->Members()[2]->Size(), 4u);
+ auto* s = Structure("S", {
+ Member("a", ty.array<i32, 3>()),
+ Member("b", ty.array<f32, 5>()),
+ Member("c", ty.array<f32, 1>()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 36u);
+ EXPECT_EQ(sem->SizeNoPadding(), 36u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 3u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 12u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 12u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 20u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 32u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, ExplicitStrideArrayStaticSize) {
- auto* s = Structure("S", {
- Member("a", ty.array<i32, 3>(/*stride*/ 8)),
- Member("b", ty.array<f32, 5>(/*stride*/ 16)),
- Member("c", ty.array<f32, 1>(/*stride*/ 32)),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 136u);
- EXPECT_EQ(sem->SizeNoPadding(), 136u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 3u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 24u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 24u);
- EXPECT_EQ(sem->Members()[1]->Align(), 4u);
- EXPECT_EQ(sem->Members()[1]->Size(), 80u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 104u);
- EXPECT_EQ(sem->Members()[2]->Align(), 4u);
- EXPECT_EQ(sem->Members()[2]->Size(), 32u);
+ auto* s = Structure("S", {
+ Member("a", ty.array<i32, 3>(/*stride*/ 8)),
+ Member("b", ty.array<f32, 5>(/*stride*/ 16)),
+ Member("c", ty.array<f32, 1>(/*stride*/ 32)),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 136u);
+ EXPECT_EQ(sem->SizeNoPadding(), 136u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 3u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 24u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 24u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 80u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 104u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 32u);
}
TEST_F(ResolverStructLayoutTest, ImplicitStrideArrayRuntimeSized) {
- auto* s = Structure("S", {
- Member("c", ty.array<f32>()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 4u);
- EXPECT_EQ(sem->SizeNoPadding(), 4u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 1u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ auto* s = Structure("S", {
+ Member("c", ty.array<f32>()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 4u);
+ EXPECT_EQ(sem->SizeNoPadding(), 4u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 1u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, ExplicitStrideArrayRuntimeSized) {
- auto* s = Structure("S", {
- Member("c", ty.array<f32>(/*stride*/ 32)),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 32u);
- EXPECT_EQ(sem->SizeNoPadding(), 32u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 1u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 32u);
+ auto* s = Structure("S", {
+ Member("c", ty.array<f32>(/*stride*/ 32)),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 32u);
+ EXPECT_EQ(sem->SizeNoPadding(), 32u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 1u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 32u);
}
TEST_F(ResolverStructLayoutTest, ImplicitStrideArrayOfExplicitStrideArray) {
- auto* inner = ty.array<i32, 2>(/*stride*/ 16); // size: 32
- auto* outer = ty.array(inner, 12); // size: 12 * 32
- auto* s = Structure("S", {
- Member("c", outer),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 384u);
- EXPECT_EQ(sem->SizeNoPadding(), 384u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 1u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 384u);
+ auto* inner = ty.array<i32, 2>(/*stride*/ 16); // size: 32
+ auto* outer = ty.array(inner, 12_u); // size: 12 * 32
+ auto* s = Structure("S", {
+ Member("c", outer),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 384u);
+ EXPECT_EQ(sem->SizeNoPadding(), 384u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 1u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 384u);
}
TEST_F(ResolverStructLayoutTest, ImplicitStrideArrayOfStructure) {
- auto* inner = Structure("Inner", {
- Member("a", ty.vec2<i32>()),
- Member("b", ty.vec3<i32>()),
- Member("c", ty.vec4<i32>()),
- }); // size: 48
- auto* outer = ty.array(ty.Of(inner), 12); // size: 12 * 48
- auto* s = Structure("S", {
- Member("c", outer),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 576u);
- EXPECT_EQ(sem->SizeNoPadding(), 576u);
- EXPECT_EQ(sem->Align(), 16u);
- ASSERT_EQ(sem->Members().size(), 1u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 16u);
- EXPECT_EQ(sem->Members()[0]->Size(), 576u);
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec2<i32>()),
+ Member("b", ty.vec3<i32>()),
+ Member("c", ty.vec4<i32>()),
+ }); // size: 48
+ auto* outer = ty.array(ty.Of(inner), 12_u); // size: 12 * 48
+ auto* s = Structure("S", {
+ Member("c", outer),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 576u);
+ EXPECT_EQ(sem->SizeNoPadding(), 576u);
+ EXPECT_EQ(sem->Align(), 16u);
+ ASSERT_EQ(sem->Members().size(), 1u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 576u);
}
TEST_F(ResolverStructLayoutTest, Vector) {
- auto* s = Structure("S", {
- Member("a", ty.vec2<i32>()),
- Member("b", ty.vec3<i32>()),
- Member("c", ty.vec4<i32>()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 48u);
- EXPECT_EQ(sem->SizeNoPadding(), 48u);
- EXPECT_EQ(sem->Align(), 16u);
- ASSERT_EQ(sem->Members().size(), 3u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u); // vec2
- EXPECT_EQ(sem->Members()[0]->Align(), 8u);
- EXPECT_EQ(sem->Members()[0]->Size(), 8u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 16u); // vec3
- EXPECT_EQ(sem->Members()[1]->Align(), 16u);
- EXPECT_EQ(sem->Members()[1]->Size(), 12u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 32u); // vec4
- EXPECT_EQ(sem->Members()[2]->Align(), 16u);
- EXPECT_EQ(sem->Members()[2]->Size(), 16u);
+ auto* s = Structure("S", {
+ Member("a", ty.vec2<i32>()),
+ Member("b", ty.vec3<i32>()),
+ Member("c", ty.vec4<i32>()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 48u);
+ EXPECT_EQ(sem->SizeNoPadding(), 48u);
+ EXPECT_EQ(sem->Align(), 16u);
+ ASSERT_EQ(sem->Members().size(), 3u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u); // vec2
+ EXPECT_EQ(sem->Members()[0]->Align(), 8u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 8u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 16u); // vec3
+ EXPECT_EQ(sem->Members()[1]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 12u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 32u); // vec4
+ EXPECT_EQ(sem->Members()[2]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 16u);
}
TEST_F(ResolverStructLayoutTest, Matrix) {
- auto* s = Structure("S", {
- Member("a", ty.mat2x2<f32>()),
- Member("b", ty.mat2x3<f32>()),
- Member("c", ty.mat2x4<f32>()),
- Member("d", ty.mat3x2<f32>()),
- Member("e", ty.mat3x3<f32>()),
- Member("f", ty.mat3x4<f32>()),
- Member("g", ty.mat4x2<f32>()),
- Member("h", ty.mat4x3<f32>()),
- Member("i", ty.mat4x4<f32>()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 368u);
- EXPECT_EQ(sem->SizeNoPadding(), 368u);
- EXPECT_EQ(sem->Align(), 16u);
- ASSERT_EQ(sem->Members().size(), 9u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u); // mat2x2
- EXPECT_EQ(sem->Members()[0]->Align(), 8u);
- EXPECT_EQ(sem->Members()[0]->Size(), 16u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 16u); // mat2x3
- EXPECT_EQ(sem->Members()[1]->Align(), 16u);
- EXPECT_EQ(sem->Members()[1]->Size(), 32u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 48u); // mat2x4
- EXPECT_EQ(sem->Members()[2]->Align(), 16u);
- EXPECT_EQ(sem->Members()[2]->Size(), 32u);
- EXPECT_EQ(sem->Members()[3]->Offset(), 80u); // mat3x2
- EXPECT_EQ(sem->Members()[3]->Align(), 8u);
- EXPECT_EQ(sem->Members()[3]->Size(), 24u);
- EXPECT_EQ(sem->Members()[4]->Offset(), 112u); // mat3x3
- EXPECT_EQ(sem->Members()[4]->Align(), 16u);
- EXPECT_EQ(sem->Members()[4]->Size(), 48u);
- EXPECT_EQ(sem->Members()[5]->Offset(), 160u); // mat3x4
- EXPECT_EQ(sem->Members()[5]->Align(), 16u);
- EXPECT_EQ(sem->Members()[5]->Size(), 48u);
- EXPECT_EQ(sem->Members()[6]->Offset(), 208u); // mat4x2
- EXPECT_EQ(sem->Members()[6]->Align(), 8u);
- EXPECT_EQ(sem->Members()[6]->Size(), 32u);
- EXPECT_EQ(sem->Members()[7]->Offset(), 240u); // mat4x3
- EXPECT_EQ(sem->Members()[7]->Align(), 16u);
- EXPECT_EQ(sem->Members()[7]->Size(), 64u);
- EXPECT_EQ(sem->Members()[8]->Offset(), 304u); // mat4x4
- EXPECT_EQ(sem->Members()[8]->Align(), 16u);
- EXPECT_EQ(sem->Members()[8]->Size(), 64u);
+ auto* s = Structure("S", {
+ Member("a", ty.mat2x2<f32>()),
+ Member("b", ty.mat2x3<f32>()),
+ Member("c", ty.mat2x4<f32>()),
+ Member("d", ty.mat3x2<f32>()),
+ Member("e", ty.mat3x3<f32>()),
+ Member("f", ty.mat3x4<f32>()),
+ Member("g", ty.mat4x2<f32>()),
+ Member("h", ty.mat4x3<f32>()),
+ Member("i", ty.mat4x4<f32>()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 368u);
+ EXPECT_EQ(sem->SizeNoPadding(), 368u);
+ EXPECT_EQ(sem->Align(), 16u);
+ ASSERT_EQ(sem->Members().size(), 9u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u); // mat2x2
+ EXPECT_EQ(sem->Members()[0]->Align(), 8u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 16u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 16u); // mat2x3
+ EXPECT_EQ(sem->Members()[1]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 32u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 48u); // mat2x4
+ EXPECT_EQ(sem->Members()[2]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 32u);
+ EXPECT_EQ(sem->Members()[3]->Offset(), 80u); // mat3x2
+ EXPECT_EQ(sem->Members()[3]->Align(), 8u);
+ EXPECT_EQ(sem->Members()[3]->Size(), 24u);
+ EXPECT_EQ(sem->Members()[4]->Offset(), 112u); // mat3x3
+ EXPECT_EQ(sem->Members()[4]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[4]->Size(), 48u);
+ EXPECT_EQ(sem->Members()[5]->Offset(), 160u); // mat3x4
+ EXPECT_EQ(sem->Members()[5]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[5]->Size(), 48u);
+ EXPECT_EQ(sem->Members()[6]->Offset(), 208u); // mat4x2
+ EXPECT_EQ(sem->Members()[6]->Align(), 8u);
+ EXPECT_EQ(sem->Members()[6]->Size(), 32u);
+ EXPECT_EQ(sem->Members()[7]->Offset(), 240u); // mat4x3
+ EXPECT_EQ(sem->Members()[7]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[7]->Size(), 64u);
+ EXPECT_EQ(sem->Members()[8]->Offset(), 304u); // mat4x4
+ EXPECT_EQ(sem->Members()[8]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[8]->Size(), 64u);
}
TEST_F(ResolverStructLayoutTest, NestedStruct) {
- auto* inner = Structure("Inner", {
- Member("a", ty.mat3x3<f32>()),
- });
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.Of(inner)),
- Member("c", ty.i32()),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 80u);
- EXPECT_EQ(sem->SizeNoPadding(), 68u);
- EXPECT_EQ(sem->Align(), 16u);
- ASSERT_EQ(sem->Members().size(), 3u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 16u);
- EXPECT_EQ(sem->Members()[1]->Align(), 16u);
- EXPECT_EQ(sem->Members()[1]->Size(), 48u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 64u);
- EXPECT_EQ(sem->Members()[2]->Align(), 4u);
- EXPECT_EQ(sem->Members()[2]->Size(), 4u);
+ auto* inner = Structure("Inner", {
+ Member("a", ty.mat3x3<f32>()),
+ });
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(inner)),
+ Member("c", ty.i32()),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 80u);
+ EXPECT_EQ(sem->SizeNoPadding(), 68u);
+ EXPECT_EQ(sem->Align(), 16u);
+ ASSERT_EQ(sem->Members().size(), 3u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 16u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 48u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 64u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, SizeAttributes) {
- auto* inner = Structure("Inner", {
- Member("a", ty.f32(), {MemberSize(8)}),
- Member("b", ty.f32(), {MemberSize(16)}),
- Member("c", ty.f32(), {MemberSize(8)}),
- });
- auto* s = Structure("S", {
- Member("a", ty.f32(), {MemberSize(4)}),
- Member("b", ty.u32(), {MemberSize(8)}),
- Member("c", ty.Of(inner)),
- Member("d", ty.i32(), {MemberSize(32)}),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 76u);
- EXPECT_EQ(sem->SizeNoPadding(), 76u);
- EXPECT_EQ(sem->Align(), 4u);
- ASSERT_EQ(sem->Members().size(), 4u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
- EXPECT_EQ(sem->Members()[1]->Align(), 4u);
- EXPECT_EQ(sem->Members()[1]->Size(), 8u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 12u);
- EXPECT_EQ(sem->Members()[2]->Align(), 4u);
- EXPECT_EQ(sem->Members()[2]->Size(), 32u);
- EXPECT_EQ(sem->Members()[3]->Offset(), 44u);
- EXPECT_EQ(sem->Members()[3]->Align(), 4u);
- EXPECT_EQ(sem->Members()[3]->Size(), 32u);
+ auto* inner = Structure("Inner", {
+ Member("a", ty.f32(), {MemberSize(8)}),
+ Member("b", ty.f32(), {MemberSize(16)}),
+ Member("c", ty.f32(), {MemberSize(8)}),
+ });
+ auto* s = Structure("S", {
+ Member("a", ty.f32(), {MemberSize(4)}),
+ Member("b", ty.u32(), {MemberSize(8)}),
+ Member("c", ty.Of(inner)),
+ Member("d", ty.i32(), {MemberSize(32)}),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 76u);
+ EXPECT_EQ(sem->SizeNoPadding(), 76u);
+ EXPECT_EQ(sem->Align(), 4u);
+ ASSERT_EQ(sem->Members().size(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 8u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 12u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 32u);
+ EXPECT_EQ(sem->Members()[3]->Offset(), 44u);
+ EXPECT_EQ(sem->Members()[3]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[3]->Size(), 32u);
}
TEST_F(ResolverStructLayoutTest, AlignAttributes) {
- auto* inner = Structure("Inner", {
- Member("a", ty.f32(), {MemberAlign(8)}),
- Member("b", ty.f32(), {MemberAlign(16)}),
- Member("c", ty.f32(), {MemberAlign(4)}),
- });
- auto* s = Structure("S", {
- Member("a", ty.f32(), {MemberAlign(4)}),
- Member("b", ty.u32(), {MemberAlign(8)}),
- Member("c", ty.Of(inner)),
- Member("d", ty.i32(), {MemberAlign(32)}),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 96u);
- EXPECT_EQ(sem->SizeNoPadding(), 68u);
- EXPECT_EQ(sem->Align(), 32u);
- ASSERT_EQ(sem->Members().size(), 4u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 4u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
- EXPECT_EQ(sem->Members()[1]->Offset(), 8u);
- EXPECT_EQ(sem->Members()[1]->Align(), 8u);
- EXPECT_EQ(sem->Members()[1]->Size(), 4u);
- EXPECT_EQ(sem->Members()[2]->Offset(), 16u);
- EXPECT_EQ(sem->Members()[2]->Align(), 16u);
- EXPECT_EQ(sem->Members()[2]->Size(), 32u);
- EXPECT_EQ(sem->Members()[3]->Offset(), 64u);
- EXPECT_EQ(sem->Members()[3]->Align(), 32u);
- EXPECT_EQ(sem->Members()[3]->Size(), 4u);
+ auto* inner = Structure("Inner", {
+ Member("a", ty.f32(), {MemberAlign(8)}),
+ Member("b", ty.f32(), {MemberAlign(16)}),
+ Member("c", ty.f32(), {MemberAlign(4)}),
+ });
+ auto* s = Structure("S", {
+ Member("a", ty.f32(), {MemberAlign(4)}),
+ Member("b", ty.u32(), {MemberAlign(8)}),
+ Member("c", ty.Of(inner)),
+ Member("d", ty.i32(), {MemberAlign(32)}),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 96u);
+ EXPECT_EQ(sem->SizeNoPadding(), 68u);
+ EXPECT_EQ(sem->Align(), 32u);
+ ASSERT_EQ(sem->Members().size(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 4u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[1]->Offset(), 8u);
+ EXPECT_EQ(sem->Members()[1]->Align(), 8u);
+ EXPECT_EQ(sem->Members()[1]->Size(), 4u);
+ EXPECT_EQ(sem->Members()[2]->Offset(), 16u);
+ EXPECT_EQ(sem->Members()[2]->Align(), 16u);
+ EXPECT_EQ(sem->Members()[2]->Size(), 32u);
+ EXPECT_EQ(sem->Members()[3]->Offset(), 64u);
+ EXPECT_EQ(sem->Members()[3]->Align(), 32u);
+ EXPECT_EQ(sem->Members()[3]->Size(), 4u);
}
TEST_F(ResolverStructLayoutTest, StructWithLotsOfPadding) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberAlign(1024)}),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_EQ(sem->Size(), 1024u);
- EXPECT_EQ(sem->SizeNoPadding(), 4u);
- EXPECT_EQ(sem->Align(), 1024u);
- ASSERT_EQ(sem->Members().size(), 1u);
- EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
- EXPECT_EQ(sem->Members()[0]->Align(), 1024u);
- EXPECT_EQ(sem->Members()[0]->Size(), 4u);
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberAlign(1024)}),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_EQ(sem->Size(), 1024u);
+ EXPECT_EQ(sem->SizeNoPadding(), 4u);
+ EXPECT_EQ(sem->Align(), 1024u);
+ ASSERT_EQ(sem->Members().size(), 1u);
+ EXPECT_EQ(sem->Members()[0]->Offset(), 0u);
+ EXPECT_EQ(sem->Members()[0]->Align(), 1024u);
+ EXPECT_EQ(sem->Members()[0]->Size(), 4u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/struct_pipeline_stage_use_test.cc b/chromium/third_party/dawn/src/tint/resolver/struct_pipeline_stage_use_test.cc
index b843cead415..1ca80dffecd 100644
--- a/chromium/third_party/dawn/src/tint/resolver/struct_pipeline_stage_use_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/struct_pipeline_stage_use_test.cc
@@ -21,168 +21,163 @@
using ::testing::UnorderedElementsAre;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverPipelineStageUseTest = ResolverTest;
TEST_F(ResolverPipelineStageUseTest, UnusedStruct) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->PipelineStageUses().empty());
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->PipelineStageUses().empty());
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsNonEntryPointParam) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- Func("foo", {Param("param", ty.Of(s))}, ty.void_(), {}, {});
+ Func("foo", {Param("param", ty.Of(s))}, ty.void_(), {}, {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->PipelineStageUses().empty());
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->PipelineStageUses().empty());
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsNonEntryPointReturnType) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- Func("foo", {}, ty.Of(s), {Return(Construct(ty.Of(s), Expr(0.f)))}, {});
+ Func("foo", {}, ty.Of(s), {Return(Construct(ty.Of(s), Expr(0_f)))}, {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->PipelineStageUses().empty());
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->PipelineStageUses().empty());
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsVertexShaderParam) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- Func("main", {Param("param", ty.Of(s))}, ty.vec4<f32>(),
- {Return(Construct(ty.vec4<f32>()))},
- {Stage(ast::PipelineStage::kVertex)},
- {Builtin(ast::Builtin::kPosition)});
+ Func("main", {Param("param", ty.Of(s))}, ty.vec4<f32>(), {Return(Construct(ty.vec4<f32>()))},
+ {Stage(ast::PipelineStage::kVertex)}, {Builtin(ast::Builtin::kPosition)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kVertexInput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kVertexInput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsVertexShaderReturnType) {
- auto* s = Structure(
- "S", {Member("a", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
+ auto* s = Structure("S", {Member("a", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
- Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
- {Stage(ast::PipelineStage::kVertex)});
+ Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))}, {Stage(ast::PipelineStage::kVertex)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kVertexOutput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kVertexOutput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsFragmentShaderParam) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- Func("main", {Param("param", ty.Of(s))}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {Param("param", ty.Of(s))}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kFragmentInput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kFragmentInput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsFragmentShaderReturnType) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s), Expr(0.f)))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.Of(s), {Return(Construct(ty.Of(s), Expr(0_f)))},
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kFragmentOutput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kFragmentOutput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsComputeShaderParam) {
- auto* s = Structure(
- "S",
- {Member("a", ty.u32(), {Builtin(ast::Builtin::kLocalInvocationIndex)})});
+ auto* s =
+ Structure("S", {Member("a", ty.u32(), {Builtin(ast::Builtin::kLocalInvocationIndex)})});
- Func("main", {Param("param", ty.Of(s))}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Func("main", {Param("param", ty.Of(s))}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kComputeInput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kComputeInput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedMultipleStages) {
- auto* s = Structure(
- "S", {Member("a", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
+ auto* s = Structure("S", {Member("a", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
- Func("vert_main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
- {Stage(ast::PipelineStage::kVertex)});
+ Func("vert_main", {}, ty.Of(s), {Return(Construct(ty.Of(s)))},
+ {Stage(ast::PipelineStage::kVertex)});
- Func("frag_main", {Param("param", ty.Of(s))}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("frag_main", {Param("param", ty.Of(s))}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kVertexOutput,
- sem::PipelineStageUsage::kFragmentInput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kVertexOutput,
+ sem::PipelineStageUsage::kFragmentInput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsShaderParamViaAlias) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- auto* s_alias = Alias("S_alias", ty.Of(s));
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s_alias = Alias("S_alias", ty.Of(s));
- Func("main", {Param("param", ty.Of(s_alias))}, ty.void_(), {},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {Param("param", ty.Of(s_alias))}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kFragmentInput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kFragmentInput));
}
TEST_F(ResolverPipelineStageUseTest, StructUsedAsShaderReturnTypeViaAlias) {
- auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
- auto* s_alias = Alias("S_alias", ty.Of(s));
+ auto* s = Structure("S", {Member("a", ty.f32(), {Location(0)})});
+ auto* s_alias = Alias("S_alias", ty.Of(s));
- Func("main", {}, ty.Of(s_alias),
- {Return(Construct(ty.Of(s_alias), Expr(0.f)))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.Of(s_alias), {Return(Construct(ty.Of(s_alias), Expr(0_f)))},
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->PipelineStageUses(),
- UnorderedElementsAre(sem::PipelineStageUsage::kFragmentOutput));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->PipelineStageUses(),
+ UnorderedElementsAre(sem::PipelineStageUsage::kFragmentOutput));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/struct_storage_class_use_test.cc b/chromium/third_party/dawn/src/tint/resolver/struct_storage_class_use_test.cc
index bc7e7a090ae..72ed546fccf 100644
--- a/chromium/third_party/dawn/src/tint/resolver/struct_storage_class_use_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/struct_storage_class_use_test.cc
@@ -20,173 +20,164 @@
using ::testing::UnorderedElementsAre;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverStorageClassUseTest = ResolverTest;
TEST_F(ResolverStorageClassUseTest, UnreachableStruct) {
- auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* s = Structure("S", {Member("a", ty.f32())});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_TRUE(sem->StorageClassUsage().empty());
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_TRUE(sem->StorageClassUsage().empty());
}
TEST_F(ResolverStorageClassUseTest, StructReachableFromParameter) {
- auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* s = Structure("S", {Member("a", ty.f32())});
- Func("f", {Param("param", ty.Of(s))}, ty.void_(), {}, {});
+ Func("f", {Param("param", ty.Of(s))}, ty.void_(), {}, {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kNone));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kNone));
}
TEST_F(ResolverStorageClassUseTest, StructReachableFromReturnType) {
- auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* s = Structure("S", {Member("a", ty.f32())});
- Func("f", {}, ty.Of(s), {Return(Construct(ty.Of(s)))}, {});
+ Func("f", {}, ty.Of(s), {Return(Construct(ty.Of(s)))}, {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kNone));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kNone));
}
TEST_F(ResolverStorageClassUseTest, StructReachableFromGlobal) {
- auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* s = Structure("S", {Member("a", ty.f32())});
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kPrivate));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kPrivate));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaGlobalAlias) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* a = Alias("A", ty.Of(s));
- Global("g", ty.Of(a), ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* a = Alias("A", ty.Of(s));
+ Global("g", ty.Of(a), ast::StorageClass::kPrivate);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kPrivate));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kPrivate));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaGlobalStruct) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* o = Structure("O", {Member("a", ty.Of(s))});
- Global("g", ty.Of(o), ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* o = Structure("O", {Member("a", ty.Of(s))});
+ Global("g", ty.Of(o), ast::StorageClass::kPrivate);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kPrivate));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kPrivate));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaGlobalArray) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* a = ty.array(ty.Of(s), 3);
- Global("g", a, ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* a = ty.array(ty.Of(s), 3_u);
+ Global("g", a, ast::StorageClass::kPrivate);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kPrivate));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kPrivate));
}
TEST_F(ResolverStorageClassUseTest, StructReachableFromLocal) {
- auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* s = Structure("S", {Member("a", ty.f32())});
- WrapInFunction(Var("g", ty.Of(s)));
+ WrapInFunction(Var("g", ty.Of(s)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kFunction));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kFunction));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaLocalAlias) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* a = Alias("A", ty.Of(s));
- WrapInFunction(Var("g", ty.Of(a)));
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* a = Alias("A", ty.Of(s));
+ WrapInFunction(Var("g", ty.Of(a)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kFunction));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kFunction));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaLocalStruct) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* o = Structure("O", {Member("a", ty.Of(s))});
- WrapInFunction(Var("g", ty.Of(o)));
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* o = Structure("O", {Member("a", ty.Of(s))});
+ WrapInFunction(Var("g", ty.Of(o)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kFunction));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kFunction));
}
TEST_F(ResolverStorageClassUseTest, StructReachableViaLocalArray) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- auto* a = ty.array(ty.Of(s), 3);
- WrapInFunction(Var("g", a));
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ auto* a = ty.array(ty.Of(s), 3_u);
+ WrapInFunction(Var("g", a));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kFunction));
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(), UnorderedElementsAre(ast::StorageClass::kFunction));
}
TEST_F(ResolverStorageClassUseTest, StructMultipleStorageClassUses) {
- auto* s = Structure("S", {Member("a", ty.f32())});
- Global("x", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- Global("y", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
- WrapInFunction(Var("g", ty.Of(s)));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* sem = TypeOf(s)->As<sem::Struct>();
- ASSERT_NE(sem, nullptr);
- EXPECT_THAT(sem->StorageClassUsage(),
- UnorderedElementsAre(ast::StorageClass::kUniform,
- ast::StorageClass::kStorage,
- ast::StorageClass::kFunction));
+ auto* s = Structure("S", {Member("a", ty.f32())});
+ Global("x", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ Global("y", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+ WrapInFunction(Var("g", ty.Of(s)));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* sem = TypeOf(s)->As<sem::Struct>();
+ ASSERT_NE(sem, nullptr);
+ EXPECT_THAT(sem->StorageClassUsage(),
+ UnorderedElementsAre(ast::StorageClass::kUniform, ast::StorageClass::kStorage,
+ ast::StorageClass::kFunction));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/type_constructor_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/type_constructor_validation_test.cc
index 07bc95e86ce..3277ee854d3 100644
--- a/chromium/third_party/dawn/src/tint/resolver/type_constructor_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/type_constructor_validation_test.cc
@@ -14,10 +14,12 @@
#include "gmock/gmock.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/sem/type_constructor.h"
#include "src/tint/sem/type_conversion.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -31,79 +33,70 @@ using builder::alias3;
using builder::CreatePtrs;
using builder::CreatePtrsFor;
using builder::DataType;
-using builder::f32;
-using builder::i32;
using builder::mat2x2;
using builder::mat2x3;
using builder::mat3x2;
using builder::mat3x3;
using builder::mat4x4;
-using builder::u32;
using builder::vec2;
using builder::vec3;
using builder::vec4;
-class ResolverTypeConstructorValidationTest : public resolver::TestHelper,
- public testing::Test {};
+class ResolverTypeConstructorValidationTest : public resolver::TestHelper, public testing::Test {};
namespace InferTypeTest {
struct Params {
- builder::ast_type_func_ptr create_rhs_ast_type;
- builder::ast_expr_func_ptr create_rhs_ast_value;
- builder::sem_type_func_ptr create_rhs_sem_type;
+ builder::ast_type_func_ptr create_rhs_ast_type;
+ builder::ast_expr_func_ptr create_rhs_ast_value;
+ builder::sem_type_func_ptr create_rhs_sem_type;
};
template <typename T>
constexpr Params ParamsFor() {
- return Params{DataType<T>::AST, DataType<T>::Expr, DataType<T>::Sem};
+ return Params{DataType<T>::AST, DataType<T>::Expr, DataType<T>::Sem};
}
TEST_F(ResolverTypeConstructorValidationTest, InferTypeTest_Simple) {
- // var a = 1;
- // var b = a;
- auto* a = Var("a", nullptr, ast::StorageClass::kNone, Expr(1));
- auto* b = Var("b", nullptr, ast::StorageClass::kNone, Expr("a"));
- auto* a_ident = Expr("a");
- auto* b_ident = Expr("b");
-
- WrapInFunction(a, b, Assign(a_ident, "a"), Assign(b_ident, "b"));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(a_ident)->Is<sem::Reference>());
- EXPECT_TRUE(
- TypeOf(a_ident)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(a_ident)->As<sem::Reference>()->StorageClass(),
- ast::StorageClass::kFunction);
- ASSERT_TRUE(TypeOf(b_ident)->Is<sem::Reference>());
- EXPECT_TRUE(
- TypeOf(b_ident)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(b_ident)->As<sem::Reference>()->StorageClass(),
- ast::StorageClass::kFunction);
+ // var a = 1i;
+ // var b = a;
+ auto* a = Var("a", nullptr, ast::StorageClass::kNone, Expr(1_i));
+ auto* b = Var("b", nullptr, ast::StorageClass::kNone, Expr("a"));
+ auto* a_ident = Expr("a");
+ auto* b_ident = Expr("b");
+
+ WrapInFunction(a, b, Assign(a_ident, "a"), Assign(b_ident, "b"));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(TypeOf(a_ident)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(a_ident)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(a_ident)->As<sem::Reference>()->StorageClass(), ast::StorageClass::kFunction);
+ ASSERT_TRUE(TypeOf(b_ident)->Is<sem::Reference>());
+ EXPECT_TRUE(TypeOf(b_ident)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(b_ident)->As<sem::Reference>()->StorageClass(), ast::StorageClass::kFunction);
}
using InferTypeTest_FromConstructorExpression = ResolverTestWithParam<Params>;
TEST_P(InferTypeTest_FromConstructorExpression, All) {
- // e.g. for vec3<f32>
- // {
- // var a = vec3<f32>(0.0, 0.0, 0.0)
- // }
- auto& params = GetParam();
+ // e.g. for vec3<f32>
+ // {
+ // var a = vec3<f32>(0.0, 0.0, 0.0)
+ // }
+ auto& params = GetParam();
- auto* constructor_expr = params.create_rhs_ast_value(*this, 0);
+ auto* constructor_expr = params.create_rhs_ast_value(*this, 0);
- auto* a = Var("a", nullptr, ast::StorageClass::kNone, constructor_expr);
- // Self-assign 'a' to force the expression to be resolved so we can test its
- // type below
- auto* a_ident = Expr("a");
- WrapInFunction(Decl(a), Assign(a_ident, "a"));
+ auto* a = Var("a", nullptr, ast::StorageClass::kNone, constructor_expr);
+ // Self-assign 'a' to force the expression to be resolved so we can test its
+ // type below
+ auto* a_ident = Expr("a");
+ WrapInFunction(Decl(a), Assign(a_ident, "a"));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* got = TypeOf(a_ident);
- auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
- ast::StorageClass::kFunction,
- ast::Access::kReadWrite);
- ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
- << "expected: " << FriendlyName(expected) << "\n";
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* got = TypeOf(a_ident);
+ auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
+ ast::StorageClass::kFunction, ast::Access::kReadWrite);
+ ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
+ << "expected: " << FriendlyName(expected) << "\n";
}
static constexpr Params from_constructor_expression_cases[] = {
@@ -130,40 +123,40 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
using InferTypeTest_FromArithmeticExpression = ResolverTestWithParam<Params>;
TEST_P(InferTypeTest_FromArithmeticExpression, All) {
- // e.g. for vec3<f32>
- // {
- // var a = vec3<f32>(2.0, 2.0, 2.0) * 3.0;
- // }
- auto& params = GetParam();
-
- auto* arith_lhs_expr = params.create_rhs_ast_value(*this, 2);
- auto* arith_rhs_expr = params.create_rhs_ast_value(*this, 3);
- auto* constructor_expr = Mul(arith_lhs_expr, arith_rhs_expr);
-
- auto* a = Var("a", nullptr, constructor_expr);
- // Self-assign 'a' to force the expression to be resolved so we can test its
- // type below
- auto* a_ident = Expr("a");
- WrapInFunction(Decl(a), Assign(a_ident, "a"));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* got = TypeOf(a_ident);
- auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
- ast::StorageClass::kFunction,
- ast::Access::kReadWrite);
- ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
- << "expected: " << FriendlyName(expected) << "\n";
+ // e.g. for vec3<f32>
+ // {
+ // var a = vec3<f32>(2.0, 2.0, 2.0) * 3.0;
+ // }
+ auto& params = GetParam();
+
+ auto* arith_lhs_expr = params.create_rhs_ast_value(*this, 2);
+ auto* arith_rhs_expr = params.create_rhs_ast_value(*this, 3);
+ auto* constructor_expr = Mul(arith_lhs_expr, arith_rhs_expr);
+
+ auto* a = Var("a", nullptr, constructor_expr);
+ // Self-assign 'a' to force the expression to be resolved so we can test its
+ // type below
+ auto* a_ident = Expr("a");
+ WrapInFunction(Decl(a), Assign(a_ident, "a"));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* got = TypeOf(a_ident);
+ auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
+ ast::StorageClass::kFunction, ast::Access::kReadWrite);
+ ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
+ << "expected: " << FriendlyName(expected) << "\n";
}
static constexpr Params from_arithmetic_expression_cases[] = {
- ParamsFor<i32>(), ParamsFor<u32>(), ParamsFor<f32>(),
- ParamsFor<vec3<f32>>(), ParamsFor<mat3x3<f32>>(),
-
- // TODO(amaiorano): Uncomment once https://crbug.com/tint/680 is fixed
- // ParamsFor<alias<ty_i32>>(),
- // ParamsFor<alias<ty_u32>>(),
- // ParamsFor<alias<ty_f32>>(),
- // ParamsFor<alias<ty_vec3<f32>>>(),
- // ParamsFor<alias<ty_mat3x3<f32>>>(),
+ ParamsFor<i32>(),
+ ParamsFor<u32>(),
+ ParamsFor<f32>(),
+ ParamsFor<vec3<f32>>(),
+ ParamsFor<mat3x3<f32>>(),
+ ParamsFor<alias<i32>>(),
+ ParamsFor<alias<u32>>(),
+ ParamsFor<alias<f32>>(),
+ ParamsFor<alias<vec3<f32>>>(),
+ ParamsFor<alias<mat3x3<f32>>>(),
};
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
InferTypeTest_FromArithmeticExpression,
@@ -171,34 +164,33 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
using InferTypeTest_FromCallExpression = ResolverTestWithParam<Params>;
TEST_P(InferTypeTest_FromCallExpression, All) {
- // e.g. for vec3<f32>
- //
- // fn foo() -> vec3<f32> {
- // return vec3<f32>();
- // }
- //
- // fn bar()
- // {
- // var a = foo();
- // }
- auto& params = GetParam();
-
- Func("foo", {}, params.create_rhs_ast_type(*this),
- {Return(Construct(params.create_rhs_ast_type(*this)))}, {});
-
- auto* a = Var("a", nullptr, Call("foo"));
- // Self-assign 'a' to force the expression to be resolved so we can test its
- // type below
- auto* a_ident = Expr("a");
- WrapInFunction(Decl(a), Assign(a_ident, "a"));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* got = TypeOf(a_ident);
- auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
- ast::StorageClass::kFunction,
- ast::Access::kReadWrite);
- ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
- << "expected: " << FriendlyName(expected) << "\n";
+ // e.g. for vec3<f32>
+ //
+ // fn foo() -> vec3<f32> {
+ // return vec3<f32>();
+ // }
+ //
+ // fn bar()
+ // {
+ // var a = foo();
+ // }
+ auto& params = GetParam();
+
+ Func("foo", {}, params.create_rhs_ast_type(*this),
+ {Return(Construct(params.create_rhs_ast_type(*this)))}, {});
+
+ auto* a = Var("a", nullptr, Call("foo"));
+ // Self-assign 'a' to force the expression to be resolved so we can test its
+ // type below
+ auto* a_ident = Expr("a");
+ WrapInFunction(Decl(a), Assign(a_ident, "a"));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* got = TypeOf(a_ident);
+ auto* expected = create<sem::Reference>(params.create_rhs_sem_type(*this),
+ ast::StorageClass::kFunction, ast::Access::kReadWrite);
+ ASSERT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
+ << "expected: " << FriendlyName(expected) << "\n";
}
static constexpr Params from_call_expression_cases[] = {
ParamsFor<bool>(),
@@ -226,33 +218,35 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
namespace ConversionConstructTest {
enum class Kind {
- Construct,
- Conversion,
+ Construct,
+ Conversion,
};
struct Params {
- Kind kind;
- builder::ast_type_func_ptr lhs_type;
- builder::ast_type_func_ptr rhs_type;
- builder::ast_expr_func_ptr rhs_value_expr;
+ Kind kind;
+ builder::ast_type_func_ptr lhs_type;
+ builder::ast_type_func_ptr rhs_type;
+ builder::ast_expr_func_ptr rhs_value_expr;
};
template <typename LhsType, typename RhsType>
constexpr Params ParamsFor(Kind kind) {
- return Params{kind, DataType<LhsType>::AST, DataType<RhsType>::AST,
- DataType<RhsType>::Expr};
+ return Params{kind, DataType<LhsType>::AST, DataType<RhsType>::AST, DataType<RhsType>::Expr};
}
static constexpr Params valid_cases[] = {
- // Direct init (non-conversions)
- ParamsFor<bool, bool>(Kind::Construct), //
- ParamsFor<i32, i32>(Kind::Construct), //
- ParamsFor<u32, u32>(Kind::Construct), //
- ParamsFor<f32, f32>(Kind::Construct), //
- ParamsFor<vec3<bool>, vec3<bool>>(Kind::Construct), //
- ParamsFor<vec3<i32>, vec3<i32>>(Kind::Construct), //
- ParamsFor<vec3<u32>, vec3<u32>>(Kind::Construct), //
- ParamsFor<vec3<f32>, vec3<f32>>(Kind::Construct), //
+ // Identity
+ ParamsFor<bool, bool>(Kind::Construct), //
+ ParamsFor<i32, i32>(Kind::Construct), //
+ ParamsFor<u32, u32>(Kind::Construct), //
+ ParamsFor<f32, f32>(Kind::Construct), //
+ ParamsFor<vec3<bool>, vec3<bool>>(Kind::Construct), //
+ ParamsFor<vec3<i32>, vec3<i32>>(Kind::Construct), //
+ ParamsFor<vec3<u32>, vec3<u32>>(Kind::Construct), //
+ ParamsFor<vec3<f32>, vec3<f32>>(Kind::Construct), //
+ ParamsFor<mat3x3<f32>, mat3x3<f32>>(Kind::Construct), //
+ ParamsFor<mat2x3<f32>, mat2x3<f32>>(Kind::Construct), //
+ ParamsFor<mat3x2<f32>, mat3x2<f32>>(Kind::Construct), //
// Splat
ParamsFor<vec3<bool>, bool>(Kind::Construct), //
@@ -260,6 +254,10 @@ static constexpr Params valid_cases[] = {
ParamsFor<vec3<u32>, u32>(Kind::Construct), //
ParamsFor<vec3<f32>, f32>(Kind::Construct), //
+ ParamsFor<mat3x3<f32>, f32>(Kind::Construct), //
+ ParamsFor<mat2x3<f32>, f32>(Kind::Construct), //
+ ParamsFor<mat3x2<f32>, f32>(Kind::Construct), //
+
// Conversion
ParamsFor<bool, u32>(Kind::Conversion), //
ParamsFor<bool, i32>(Kind::Conversion), //
@@ -296,50 +294,50 @@ static constexpr Params valid_cases[] = {
using ConversionConstructorValidTest = ResolverTestWithParam<Params>;
TEST_P(ConversionConstructorValidTest, All) {
- auto& params = GetParam();
-
- // var a : <lhs_type1> = <lhs_type2>(<rhs_type>(<rhs_value_expr>));
- auto* lhs_type1 = params.lhs_type(*this);
- auto* lhs_type2 = params.lhs_type(*this);
- auto* rhs_type = params.rhs_type(*this);
- auto* rhs_value_expr = params.rhs_value_expr(*this, 0);
-
- std::stringstream ss;
- ss << FriendlyName(lhs_type1) << " = " << FriendlyName(lhs_type2) << "("
- << FriendlyName(rhs_type) << "(<rhs value expr>))";
- SCOPED_TRACE(ss.str());
-
- auto* arg = Construct(rhs_type, rhs_value_expr);
- auto* tc = Construct(lhs_type2, arg);
- auto* a = Var("a", lhs_type1, ast::StorageClass::kNone, tc);
-
- // Self-assign 'a' to force the expression to be resolved so we can test its
- // type below
- auto* a_ident = Expr("a");
- WrapInFunction(Decl(a), Assign(a_ident, "a"));
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- switch (params.kind) {
- case Kind::Construct: {
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_EQ(ctor->Parameters()[0]->Type(), TypeOf(arg));
- break;
+ auto& params = GetParam();
+
+ // var a : <lhs_type1> = <lhs_type2>(<rhs_type>(<rhs_value_expr>));
+ auto* lhs_type1 = params.lhs_type(*this);
+ auto* lhs_type2 = params.lhs_type(*this);
+ auto* rhs_type = params.rhs_type(*this);
+ auto* rhs_value_expr = params.rhs_value_expr(*this, 0);
+
+ std::stringstream ss;
+ ss << FriendlyName(lhs_type1) << " = " << FriendlyName(lhs_type2) << "("
+ << FriendlyName(rhs_type) << "(<rhs value expr>))";
+ SCOPED_TRACE(ss.str());
+
+ auto* arg = Construct(rhs_type, rhs_value_expr);
+ auto* tc = Construct(lhs_type2, arg);
+ auto* a = Var("a", lhs_type1, ast::StorageClass::kNone, tc);
+
+ // Self-assign 'a' to force the expression to be resolved so we can test its
+ // type below
+ auto* a_ident = Expr("a");
+ WrapInFunction(Decl(a), Assign(a_ident, "a"));
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ switch (params.kind) {
+ case Kind::Construct: {
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_EQ(ctor->Parameters()[0]->Type(), TypeOf(arg));
+ break;
+ }
+ case Kind::Conversion: {
+ auto* conv = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(conv, nullptr);
+ EXPECT_EQ(call->Type(), conv->ReturnType());
+ ASSERT_EQ(conv->Parameters().size(), 1u);
+ EXPECT_EQ(conv->Parameters()[0]->Type(), TypeOf(arg));
+ break;
+ }
}
- case Kind::Conversion: {
- auto* conv = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(conv, nullptr);
- EXPECT_EQ(call->Type(), conv->ReturnType());
- ASSERT_EQ(conv->Parameters().size(), 1u);
- EXPECT_EQ(conv->Parameters()[0]->Type(), TypeOf(arg));
- break;
- }
- }
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
ConversionConstructorValidTest,
@@ -365,272 +363,257 @@ constexpr CreatePtrs all_types[] = {
CreatePtrsFor<mat3x2<f32>>() //
};
-using ConversionConstructorInvalidTest =
- ResolverTestWithParam<std::tuple<CreatePtrs, // lhs
- CreatePtrs // rhs
- >>;
+using ConversionConstructorInvalidTest = ResolverTestWithParam<std::tuple<CreatePtrs, // lhs
+ CreatePtrs // rhs
+ >>;
TEST_P(ConversionConstructorInvalidTest, All) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto& lhs_params = std::get<0>(params);
- auto& rhs_params = std::get<1>(params);
+ auto& lhs_params = std::get<0>(params);
+ auto& rhs_params = std::get<1>(params);
- // Skip test for valid cases
- for (auto& v : valid_cases) {
- if (v.lhs_type == lhs_params.ast && v.rhs_type == rhs_params.ast &&
- v.rhs_value_expr == rhs_params.expr) {
- return;
+ // Skip test for valid cases
+ for (auto& v : valid_cases) {
+ if (v.lhs_type == lhs_params.ast && v.rhs_type == rhs_params.ast &&
+ v.rhs_value_expr == rhs_params.expr) {
+ return;
+ }
+ }
+ // Skip non-conversions
+ if (lhs_params.ast == rhs_params.ast) {
+ return;
}
- }
- // Skip non-conversions
- if (lhs_params.ast == rhs_params.ast) {
- return;
- }
- // var a : <lhs_type1> = <lhs_type2>(<rhs_type>(<rhs_value_expr>));
- auto* lhs_type1 = lhs_params.ast(*this);
- auto* lhs_type2 = lhs_params.ast(*this);
- auto* rhs_type = rhs_params.ast(*this);
- auto* rhs_value_expr = rhs_params.expr(*this, 0);
+ // var a : <lhs_type1> = <lhs_type2>(<rhs_type>(<rhs_value_expr>));
+ auto* lhs_type1 = lhs_params.ast(*this);
+ auto* lhs_type2 = lhs_params.ast(*this);
+ auto* rhs_type = rhs_params.ast(*this);
+ auto* rhs_value_expr = rhs_params.expr(*this, 0);
- std::stringstream ss;
- ss << FriendlyName(lhs_type1) << " = " << FriendlyName(lhs_type2) << "("
- << FriendlyName(rhs_type) << "(<rhs value expr>))";
- SCOPED_TRACE(ss.str());
+ std::stringstream ss;
+ ss << FriendlyName(lhs_type1) << " = " << FriendlyName(lhs_type2) << "("
+ << FriendlyName(rhs_type) << "(<rhs value expr>))";
+ SCOPED_TRACE(ss.str());
- auto* a = Var("a", lhs_type1, ast::StorageClass::kNone,
- Construct(lhs_type2, Construct(rhs_type, rhs_value_expr)));
+ auto* a = Var("a", lhs_type1, ast::StorageClass::kNone,
+ Construct(lhs_type2, Construct(rhs_type, rhs_value_expr)));
- // Self-assign 'a' to force the expression to be resolved so we can test its
- // type below
- auto* a_ident = Expr("a");
- WrapInFunction(Decl(a), Assign(a_ident, "a"));
+ // Self-assign 'a' to force the expression to be resolved so we can test its
+ // type below
+ auto* a_ident = Expr("a");
+ WrapInFunction(Decl(a), Assign(a_ident, "a"));
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
ConversionConstructorInvalidTest,
testing::Combine(testing::ValuesIn(all_types),
testing::ValuesIn(all_types)));
-TEST_F(ResolverTypeConstructorValidationTest,
- ConversionConstructorInvalid_TooManyInitializers) {
- auto* a = Var("a", ty.f32(), ast::StorageClass::kNone,
- Construct(Source{{12, 34}}, ty.f32(), Expr(1.0f), Expr(2.0f)));
- WrapInFunction(a);
+TEST_F(ResolverTypeConstructorValidationTest, ConversionConstructorInvalid_TooManyInitializers) {
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone,
+ Construct(Source{{12, 34}}, ty.f32(), Expr(1_f), Expr(2_f)));
+ WrapInFunction(a);
- ASSERT_FALSE(r()->Resolve());
- ASSERT_EQ(r()->error(),
- "12:34 error: expected zero or one value in constructor, got 2");
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for f32(f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- ConversionConstructorInvalid_InvalidInitializer) {
- auto* a =
- Var("a", ty.f32(), ast::StorageClass::kNone,
- Construct(Source{{12, 34}}, ty.f32(), Construct(ty.array<f32, 4>())));
- WrapInFunction(a);
+TEST_F(ResolverTypeConstructorValidationTest, ConversionConstructorInvalid_InvalidInitializer) {
+ auto* a = Var("a", ty.f32(), ast::StorageClass::kNone,
+ Construct(Source{{12, 34}}, ty.f32(), Construct(ty.array<f32, 4>())));
+ WrapInFunction(a);
- ASSERT_FALSE(r()->Resolve());
- ASSERT_EQ(r()->error(),
- "12:34 error: cannot construct 'f32' with a value of type "
- "'array<f32, 4>'");
+ ASSERT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for f32(array<f32, 4>)"));
}
} // namespace ConversionConstructTest
namespace ArrayConstructor {
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_ZeroValue_Pass) {
- // array<u32, 10>();
- auto* tc = array<u32, 10>();
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_ZeroValue_Pass) {
+ // array<u32, 10u>();
+ auto* tc = array<u32, 10>();
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- EXPECT_TRUE(call->Type()->Is<sem::Array>());
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 0u);
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ EXPECT_TRUE(call->Type()->Is<sem::Array>());
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 0u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_type_match) {
- // array<u32, 3>(0u, 10u. 20u);
- auto* tc = array<u32, 3>(Expr(0u), Expr(10u), Expr(20u));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_type_match) {
+ // array<u32, 3u>(0u, 10u. 20u);
+ auto* tc = array<u32, 3>(Expr(0_u), Expr(10_u), Expr(20_u));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- EXPECT_TRUE(call->Type()->Is<sem::Array>());
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::U32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ EXPECT_TRUE(call->Type()->Is<sem::Array>());
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::U32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_type_Mismatch_U32F32) {
- // array<u32, 3>(0u, 1.0f, 20u);
- auto* tc = array<u32, 3>(Expr(0u), Expr(Source{{12, 34}}, 1.0f), Expr(20u));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_type_Mismatch_U32F32) {
+ // array<u32, 3u>(0u, 1.0f, 20u);
+ auto* tc = array<u32, 3>(Expr(0_u), Expr(Source{{12, 34}}, 1_f), Expr(20_u));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'u32', found 'f32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'u32', found 'f32'");
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Array_ScalarArgumentTypeMismatch_F32I32) {
- // array<f32, 1>(1);
- auto* tc = array<f32, 1>(Expr(Source{{12, 34}}, 1));
- WrapInFunction(tc);
+ // array<f32, 1u>(1i);
+ auto* tc = array<f32, 1>(Expr(Source{{12, 34}}, 1_i));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'f32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'f32', found 'i32'");
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Array_ScalarArgumentTypeMismatch_U32I32) {
- // array<u32, 6>(1, 0u, 0u, 0u, 0u, 0u);
- auto* tc = array<u32, 1>(Expr(Source{{12, 34}}, 1), Expr(0u), Expr(0u),
- Expr(0u), Expr(0u));
- WrapInFunction(tc);
+ // array<u32, 1u>(1i, 0u, 0u, 0u, 0u, 0u);
+ auto* tc =
+ array<u32, 1>(Expr(Source{{12, 34}}, 1_i), Expr(0_u), Expr(0_u), Expr(0_u), Expr(0_u));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'u32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'u32', found 'i32'");
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Array_ScalarArgumentTypeMismatch_Vec2) {
- // array<i32, 3>(1, vec2<i32>());
- auto* tc =
- array<i32, 3>(Expr(1), Construct(Source{{12, 34}}, ty.vec2<i32>()));
- WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'i32', found 'vec2<i32>'");
+ // array<i32, 3u>(1i, vec2<i32>());
+ auto* tc = array<i32, 3>(Expr(1_i), Construct(Source{{12, 34}}, ty.vec2<i32>()));
+ WrapInFunction(tc);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'i32', found 'vec2<i32>'");
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_ArrayOfVector_SubElemTypeMismatch_I32U32) {
- // array<vec3<i32>, 2>(vec3<i32>(), vec3<u32>());
- auto* e0 = vec3<i32>();
- SetSource(Source::Location({12, 34}));
- auto* e1 = vec3<u32>();
- auto* t = Construct(ty.array(ty.vec3<i32>(), 2), e0, e1);
- WrapInFunction(t);
+ // array<vec3<i32>, 2u>(vec3<i32>(), vec3<u32>());
+ auto* e0 = vec3<i32>();
+ SetSource(Source::Location({12, 34}));
+ auto* e1 = vec3<u32>();
+ auto* t = Construct(ty.array(ty.vec3<i32>(), 2_i), e0, e1);
+ WrapInFunction(t);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'vec3<i32>', found 'vec3<u32>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'vec3<i32>', found 'vec3<u32>'");
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_ArrayOfVector_SubElemTypeMismatch_I32Bool) {
- // array<vec3<i32>, 2>(vec3<i32>(), vec3<bool>(true, true, false));
- SetSource(Source::Location({12, 34}));
- auto* e0 = vec3<bool>(true, true, false);
- auto* e1 = vec3<i32>();
- auto* t = Construct(ty.array(ty.vec3<i32>(), 2), e0, e1);
- WrapInFunction(t);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'vec3<i32>', found 'vec3<bool>'");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_ArrayOfArray_SubElemSizeMismatch) {
- // array<array<i32, 2>, 2>(array<i32, 3>(), array<i32, 2>());
- SetSource(Source::Location({12, 34}));
- auto* e0 = array<i32, 3>();
- auto* e1 = array<i32, 2>();
- auto* t = Construct(ty.array(ty.array<i32, 2>(), 2), e0, e1);
- WrapInFunction(t);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'array<i32, 2>', found 'array<i32, 3>'");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_ArrayOfArray_SubElemTypeMismatch) {
- // array<array<i32, 2>, 2>(array<i32, 2>(), array<u32, 2>());
- auto* e0 = array<i32, 2>();
- SetSource(Source::Location({12, 34}));
- auto* e1 = array<u32, 2>();
- auto* t = Construct(ty.array(ty.array<i32, 2>(), 2), e0, e1);
- WrapInFunction(t);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in array constructor does not match array type: "
- "expected 'array<i32, 2>', found 'array<u32, 2>'");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_TooFewElements) {
- // array<i32, 4>(1, 2, 3);
- SetSource(Source::Location({12, 34}));
- auto* tc = array<i32, 4>(Expr(1), Expr(2), Expr(3));
- WrapInFunction(tc);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: array constructor has too few elements: expected 4, "
- "found 3");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_TooManyElements) {
- // array<i32, 4>(1, 2, 3, 4, 5);
- SetSource(Source::Location({12, 34}));
- auto* tc = array<i32, 4>(Expr(1), Expr(2), Expr(3), Expr(4), Expr(5));
- WrapInFunction(tc);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: array constructor has too many "
- "elements: expected 4, "
- "found 5");
+ // array<vec3<i32>, 2u>(vec3<i32>(), vec3<bool>(true, true, false));
+ SetSource(Source::Location({12, 34}));
+ auto* e0 = vec3<bool>(true, true, false);
+ auto* e1 = vec3<i32>();
+ auto* t = Construct(ty.array(ty.vec3<i32>(), 2_i), e0, e1);
+ WrapInFunction(t);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'vec3<i32>', found 'vec3<bool>'");
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_ArrayOfArray_SubElemSizeMismatch) {
+ // array<array<i32, 2u>, 2u>(array<i32, 3u>(), array<i32, 2u>());
+ SetSource(Source::Location({12, 34}));
+ auto* e0 = array<i32, 3>();
+ auto* e1 = array<i32, 2>();
+ auto* t = Construct(ty.array(ty.array<i32, 2>(), 2_i), e0, e1);
+ WrapInFunction(t);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'array<i32, 2>', found 'array<i32, 3>'");
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_ArrayOfArray_SubElemTypeMismatch) {
+ // array<array<i32, 2u>, 2u>(array<i32, 2u>(), array<u32, 2u>());
+ auto* e0 = array<i32, 2>();
+ SetSource(Source::Location({12, 34}));
+ auto* e1 = array<u32, 2>();
+ auto* t = Construct(ty.array(ty.array<i32, 2>(), 2_i), e0, e1);
+ WrapInFunction(t);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: type in array constructor does not match array type: "
+ "expected 'array<i32, 2>', found 'array<u32, 2>'");
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_TooFewElements) {
+ // array<i32, 4u>(1i, 2i, 3i);
+ SetSource(Source::Location({12, 34}));
+ auto* tc = array<i32, 4>(Expr(1_i), Expr(2_i), Expr(3_i));
+ WrapInFunction(tc);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: array constructor has too few elements: expected 4, "
+ "found 3");
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_TooManyElements) {
+ // array<i32, 4u>(1i, 2i, 3i, 4i, 5i);
+ SetSource(Source::Location({12, 34}));
+ auto* tc = array<i32, 4>(Expr(1_i), Expr(2_i), Expr(3_i), Expr(4_i), Expr(5_i));
+ WrapInFunction(tc);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: array constructor has too many "
+ "elements: expected 4, "
+ "found 5");
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_Runtime) {
- // array<i32>(1);
- auto* tc = array(ty.i32(), nullptr, Expr(Source{{12, 34}}, 1));
- WrapInFunction(tc);
+ // array<i32>(1i);
+ auto* tc = array(ty.i32(), nullptr, Expr(Source{{12, 34}}, 1_i));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "error: cannot init a runtime-sized array");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "error: cannot init a runtime-sized array");
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Array_RuntimeZeroValue) {
- // array<i32>();
- auto* tc = array(ty.i32(), nullptr);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Array_RuntimeZeroValue) {
+ // array<i32>();
+ auto* tc = array(ty.i32(), nullptr);
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "error: cannot init a runtime-sized array");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "error: cannot init a runtime-sized array");
}
} // namespace ArrayConstructor
@@ -638,111 +621,111 @@ TEST_F(ResolverTypeConstructorValidationTest,
namespace ScalarConstructor {
TEST_F(ResolverTypeConstructorValidationTest, Expr_Construct_i32_Success) {
- auto* expr = Construct<i32>(Expr(123));
- WrapInFunction(expr);
+ auto* expr = Construct<i32>(Expr(123_i));
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::I32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Construct_u32_Success) {
- auto* expr = Construct<u32>(Expr(123u));
- WrapInFunction(expr);
+ auto* expr = Construct<u32>(Expr(123_u));
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::U32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Construct_f32_Success) {
- auto* expr = Construct<f32>(Expr(1.23f));
- WrapInFunction(expr);
+ auto* expr = Construct<f32>(Expr(1.23_f));
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Convert_f32_to_i32_Success) {
- auto* expr = Construct<i32>(1.23f);
- WrapInFunction(expr);
+ auto* expr = Construct<i32>(1.23_f);
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::I32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::I32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Convert_i32_to_u32_Success) {
- auto* expr = Construct<u32>(123);
- WrapInFunction(expr);
+ auto* expr = Construct<u32>(123_i);
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::U32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::U32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Convert_u32_to_f32_Success) {
- auto* expr = Construct<f32>(123u);
- WrapInFunction(expr);
+ auto* expr = Construct<f32>(123_u);
+ WrapInFunction(expr);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(expr), nullptr);
- ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
+ ASSERT_NE(TypeOf(expr), nullptr);
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::F32>());
- auto* call = Sem().Get(expr);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
+ auto* call = Sem().Get<sem::Call>(expr);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
}
} // namespace ScalarConstructor
@@ -751,1951 +734,1734 @@ namespace VectorConstructor {
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2F32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec2<f32>(Expr(Source{{12, 34}}, 1), 1.0f);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), 1_i, 2_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'f32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(i32, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2U32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec2<u32>(1u, Expr(Source{{12, 34}}, 1));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<u32>(), 1_u, 2_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'u32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<u32>(u32, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2I32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec2<i32>(Expr(Source{{12, 34}}, 1u), 1);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<i32>(), 1_u, 2_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'i32', found 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<i32>(u32, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2Bool_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec2<bool>(true, Expr(Source{{12, 34}}, 1));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<bool>(), true, 1_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'bool', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<bool>(bool, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2_Error_Vec3ArgumentCardinalityTooLarge) {
- auto* tc = vec2<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), vec3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec2<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(vec3<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2_Error_Vec4ArgumentCardinalityTooLarge) {
- auto* tc = vec2<f32>(Construct(Source{{12, 34}}, ty.vec4<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), vec4<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec2<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(vec4<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Error_TooManyArgumentsScalar) {
- auto* tc =
- vec2<f32>(Expr(Source{{12, 34}}, 1.0f), Expr(Source{{12, 40}}, 1.0f),
- Expr(Source{{12, 46}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Error_TooManyArgumentsScalar) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), 1_f, 2_f, 3_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec2<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(f32, f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Error_TooManyArgumentsVector) {
- auto* tc = vec2<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Error_TooManyArgumentsVector) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), vec2<f32>(), vec2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec2<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(vec2<f32>, vec2<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec2_Error_TooManyArgumentsVectorAndScalar) {
- auto* tc = vec2<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Expr(Source{{12, 40}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), vec2<f32>(), 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec2<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(vec2<f32>, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Error_InvalidArgumentType) {
- auto* tc = vec2<f32>(Construct(Source{{12, 34}}, ty.mat2x2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Error_InvalidArgumentType) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec2<f32>(), mat2x2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected vector or scalar type in vector "
- "constructor; found: mat2x2<f32>");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(mat2x2<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Success_ZeroValue) {
- auto* tc = vec2<f32>();
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Success_ZeroValue) {
+ auto* tc = vec2<f32>();
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 0u);
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 0u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2F32_Success_Scalar) {
- auto* tc = vec2<f32>(1.0f, 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2F32_Success_Scalar) {
+ auto* tc = vec2<f32>(1_f, 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2U32_Success_Scalar) {
- auto* tc = vec2<u32>(1u, 1u);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2U32_Success_Scalar) {
+ auto* tc = vec2<u32>(1_u, 1_u);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2I32_Success_Scalar) {
- auto* tc = vec2<i32>(1, 1);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2I32_Success_Scalar) {
+ auto* tc = vec2<i32>(1_i, 1_i);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2Bool_Success_Scalar) {
- auto* tc = vec2<bool>(true, false);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2Bool_Success_Scalar) {
+ auto* tc = vec2<bool>(true, false);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Bool>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Bool>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Success_Identity) {
- auto* tc = vec2<f32>(vec2<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Success_Identity) {
+ auto* tc = vec2<f32>(vec2<f32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec2_Success_Vec2TypeConversion) {
- auto* tc = vec2<f32>(vec2<i32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec2_Success_Vec2TypeConversion) {
+ auto* tc = vec2<f32>(vec2<i32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 2u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3F32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec3<f32>(1.0f, 1.0f, Expr(Source{{12, 34}}, 1));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), 1_f, 2_f, 3_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'f32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(f32, f32, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3U32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec3<u32>(1u, Expr(Source{{12, 34}}, 1), 1u);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<u32>(), 1_u, 2_i, 3_u));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'u32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<u32>(u32, i32, u32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3I32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec3<i32>(1, Expr(Source{{12, 34}}, 1u), 1);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<i32>(), 1_i, 2_u, 3_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'i32', found 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<i32>(i32, u32, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3Bool_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec3<bool>(true, Expr(Source{{12, 34}}, 1), false);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<bool>(), false, 1_i, true));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'bool', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<bool>(bool, i32, bool)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3_Error_Vec4ArgumentCardinalityTooLarge) {
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, ty.vec4<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), vec4<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(vec4<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_TooFewArgumentsScalar) {
- auto* tc =
- vec3<f32>(Expr(Source{{12, 34}}, 1.0f), Expr(Source{{12, 40}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_TooFewArgumentsScalar) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), 1_f, 2_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 2 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_TooManyArgumentsScalar) {
- auto* tc =
- vec3<f32>(Expr(Source{{12, 34}}, 1.0f), Expr(Source{{12, 40}}, 1.0f),
- Expr(Source{{12, 46}}, 1.0f), Expr(Source{{12, 52}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_TooManyArgumentsScalar) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), 1_f, 2_f, 3_f, 4_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(f32, f32, f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_TooFewArgumentsVec2) {
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_TooFewArgumentsVec2) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), vec2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 2 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(vec2<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_TooManyArgumentsVec2) {
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_TooManyArgumentsVec2) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), vec2<f32>(), vec2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(vec2<f32>, vec2<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec3_Error_TooManyArgumentsVec2AndScalar) {
- auto* tc =
- vec3<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Expr(Source{{12, 40}}, 1.0f), Expr(Source{{12, 46}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), vec2<f32>(), 1_f, 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(vec2<f32>, f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_TooManyArgumentsVec3) {
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()),
- Expr(Source{{12, 40}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_TooManyArgumentsVec3) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), vec3<f32>(), 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 4 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(vec3<f32>, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Error_InvalidArgumentType) {
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, ty.mat2x2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Error_InvalidArgumentType) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<f32>(), mat2x2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected vector or scalar type in vector "
- "constructor; found: mat2x2<f32>");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(mat2x2<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Success_ZeroValue) {
- auto* tc = vec3<f32>();
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Success_ZeroValue) {
+ auto* tc = vec3<f32>();
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 0u);
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 0u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3F32_Success_Scalar) {
- auto* tc = vec3<f32>(1.0f, 1.0f, 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3F32_Success_Scalar) {
+ auto* tc = vec3<f32>(1_f, 1_f, 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::F32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::F32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3U32_Success_Scalar) {
- auto* tc = vec3<u32>(1u, 1u, 1u);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3U32_Success_Scalar) {
+ auto* tc = vec3<u32>(1_u, 1_u, 1_u);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::U32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::U32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::U32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::U32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3I32_Success_Scalar) {
- auto* tc = vec3<i32>(1, 1, 1);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3I32_Success_Scalar) {
+ auto* tc = vec3<i32>(1_i, 1_i, 1_i);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3Bool_Success_Scalar) {
- auto* tc = vec3<bool>(true, false, true);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3Bool_Success_Scalar) {
+ auto* tc = vec3<bool>(true, false, true);
+ WrapInFunction(tc);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Bool>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::Bool>());
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Success_Vec2AndScalar) {
+ auto* tc = vec3<f32>(vec2<f32>(), 1_f);
+ WrapInFunction(tc);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Bool>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::Bool>());
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Success_Vec2AndScalar) {
- auto* tc = vec3<f32>(vec2<f32>(), 1.0f);
- WrapInFunction(tc);
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Success_ScalarAndVec2) {
+ auto* tc = vec3<f32>(1_f, vec2<f32>());
+ WrapInFunction(tc);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Vector>());
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Success_Identity) {
+ auto* tc = vec3<f32>(vec3<f32>());
+ WrapInFunction(tc);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
-
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::F32>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Success_ScalarAndVec2) {
- auto* tc = vec3<f32>(1.0f, vec2<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec3_Success_Vec3TypeConversion) {
+ auto* tc = vec3<f32>(vec3<i32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
-
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::F32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Vector>());
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Success_Identity) {
- auto* tc = vec3<f32>(vec3<f32>());
- WrapInFunction(tc);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
-
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec3_Success_Vec3TypeConversion) {
- auto* tc = vec3<f32>(vec3<i32>());
- WrapInFunction(tc);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 3u);
-
- auto* call = Sem().Get(tc);
- ASSERT_NE(call, nullptr);
- auto* ctor = call->Target()->As<sem::TypeConversion>();
- ASSERT_NE(ctor, nullptr);
- EXPECT_EQ(call->Type(), ctor->ReturnType());
- ASSERT_EQ(ctor->Parameters().size(), 1u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ auto* call = Sem().Get<sem::Call>(tc);
+ ASSERT_NE(call, nullptr);
+ auto* ctor = call->Target()->As<sem::TypeConversion>();
+ ASSERT_NE(ctor, nullptr);
+ EXPECT_EQ(call->Type(), ctor->ReturnType());
+ ASSERT_EQ(ctor->Parameters().size(), 1u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4F32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec4<f32>(1.0f, 1.0f, Expr(Source{{12, 34}}, 1), 1.0f);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), 1_f, 1_f, 1_i, 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'f32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(f32, f32, i32, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4U32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec4<u32>(1u, 1u, Expr(Source{{12, 34}}, 1), 1u);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<u32>(), 1_u, 1_u, 1_i, 1_u));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'u32', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<u32>(u32, u32, i32, u32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4I32_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec4<i32>(1, 1, Expr(Source{{12, 34}}, 1u), 1);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<i32>(), 1_i, 1_i, 1_u, 1_i));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'i32', found 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<i32>(i32, i32, u32, i32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4Bool_Error_ScalarArgumentTypeMismatch) {
- auto* tc = vec4<bool>(true, false, Expr(Source{{12, 34}}, 1), true);
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<bool>(), true, false, 1_i, true));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'bool', found 'i32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<bool>(bool, bool, i32, bool)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Error_TooFewArgumentsScalar) {
- auto* tc =
- vec4<f32>(Expr(Source{{12, 34}}, 1.0f), Expr(Source{{12, 40}}, 1.0f),
- Expr(Source{{12, 46}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Error_TooFewArgumentsScalar) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), 1_f, 2_f, 3_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(f32, f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Error_TooManyArgumentsScalar) {
- auto* tc =
- vec4<f32>(Expr(Source{{12, 34}}, 1.0f), Expr(Source{{12, 40}}, 1.0f),
- Expr(Source{{12, 46}}, 1.0f), Expr(Source{{12, 52}}, 1.0f),
- Expr(Source{{12, 58}}, 1.0f));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Error_TooManyArgumentsScalar) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), 1_f, 2_f, 3_f, 4_f, 5_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(f32, f32, f32, f32, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooFewArgumentsVec2AndScalar) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Expr(Source{{12, 40}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec2<f32>(), 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec2<f32>, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec2AndScalars) {
- auto* tc = vec4<f32>(
- Construct(Source{{12, 34}}, ty.vec2<f32>()), Expr(Source{{12, 40}}, 1.0f),
- Expr(Source{{12, 46}}, 1.0f), Expr(Source{{12, 52}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec2<f32>(), 1_f, 2_f, 3_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec2<f32>, f32, f32, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec2Vec2Scalar) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()),
- Expr(Source{{12, 46}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec2<f32>(), vec2<f32>(), 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec2<f32>, vec2<f32>, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec2Vec2Vec2) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(
+ Construct(Source{{12, 34}}, ty.vec4<f32>(), vec2<f32>(), vec2<f32>(), vec2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 6 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr(
+ "12:34 error: no matching constructor for vec4<f32>(vec2<f32>, vec2<f32>, vec2<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Error_TooFewArgumentsVec3) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Error_TooFewArgumentsVec3) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 3 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec3<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec3AndScalars) {
- auto* tc =
- vec4<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()),
- Expr(Source{{12, 40}}, 1.0f), Expr(Source{{12, 46}}, 1.0f));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec3<f32>(), 1_f, 2_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec3<f32>, f32, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec3AndVec2) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()),
- Construct(Source{{12, 40}}, ty.vec2<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec3<f32>(), vec2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec3<f32>, vec2<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec2AndVec3) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec2<f32>()),
- Construct(Source{{12, 40}}, ty.vec3<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec2<f32>(), vec3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 5 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec2<f32>, vec3<f32>)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vec4_Error_TooManyArgumentsVec3AndVec3) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.vec3<f32>()),
- Construct(Source{{12, 40}}, ty.vec3<f32>()));
- WrapInFunction(tc);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), vec3<f32>(), vec3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec4<f32>' with 6 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(vec3<f32>, vec3<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Error_InvalidArgumentType) {
- auto* tc = vec4<f32>(Construct(Source{{12, 34}}, ty.mat2x2<f32>()));
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Error_InvalidArgumentType) {
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec4<f32>(), mat2x2<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: expected vector or scalar type in vector "
- "constructor; found: mat2x2<f32>");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec4<f32>(mat2x2<f32>)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_ZeroValue) {
- auto* tc = vec4<f32>();
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_ZeroValue) {
+ auto* tc = vec4<f32>();
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4F32_Success_Scalar) {
- auto* tc = vec4<f32>(1.0f, 1.0f, 1.0f, 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4F32_Success_Scalar) {
+ auto* tc = vec4<f32>(1_f, 1_f, 1_f, 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4U32_Success_Scalar) {
- auto* tc = vec4<u32>(1u, 1u, 1u, 1u);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4U32_Success_Scalar) {
+ auto* tc = vec4<u32>(1_u, 1_u, 1_u, 1_u);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4I32_Success_Scalar) {
- auto* tc = vec4<i32>(1, 1, 1, 1);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4I32_Success_Scalar) {
+ auto* tc = vec4<i32>(1_i, 1_i, 1_i, 1_i);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4Bool_Success_Scalar) {
- auto* tc = vec4<bool>(true, false, true, false);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4Bool_Success_Scalar) {
+ auto* tc = vec4<bool>(true, false, true, false);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_Vec2ScalarScalar) {
- auto* tc = vec4<f32>(vec2<f32>(), 1.0f, 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_Vec2ScalarScalar) {
+ auto* tc = vec4<f32>(vec2<f32>(), 1_f, 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_ScalarVec2Scalar) {
- auto* tc = vec4<f32>(1.0f, vec2<f32>(), 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_ScalarVec2Scalar) {
+ auto* tc = vec4<f32>(1_f, vec2<f32>(), 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_ScalarScalarVec2) {
- auto* tc = vec4<f32>(1.0f, 1.0f, vec2<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_ScalarScalarVec2) {
+ auto* tc = vec4<f32>(1_f, 1_f, vec2<f32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_Vec2AndVec2) {
- auto* tc = vec4<f32>(vec2<f32>(), vec2<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_Vec2AndVec2) {
+ auto* tc = vec4<f32>(vec2<f32>(), vec2<f32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_Vec3AndScalar) {
- auto* tc = vec4<f32>(vec3<f32>(), 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_Vec3AndScalar) {
+ auto* tc = vec4<f32>(vec3<f32>(), 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_ScalarAndVec3) {
- auto* tc = vec4<f32>(1.0f, vec3<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_ScalarAndVec3) {
+ auto* tc = vec4<f32>(1_f, vec3<f32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_Identity) {
- auto* tc = vec4<f32>(vec4<f32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_Identity) {
+ auto* tc = vec4<f32>(vec4<f32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vec4_Success_Vec4TypeConversion) {
- auto* tc = vec4<f32>(vec4<i32>());
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vec4_Success_Vec4TypeConversion) {
+ auto* tc = vec4<f32>(vec4<i32>());
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_NestedVectorConstructors_InnerError) {
- auto* tc = vec4<f32>(vec4<f32>(1.0f, 1.0f,
- vec3<f32>(Expr(Source{{12, 34}}, 1.0f),
- Expr(Source{{12, 34}}, 1.0f))),
- 1.0f);
- WrapInFunction(tc);
+ WrapInFunction(vec4<f32>(vec4<f32>(1_f, 1_f, //
+ Construct(Source{{12, 34}}, ty.vec3<f32>(), 1_f, 1_f)),
+ 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: attempted to construct 'vec3<f32>' with 2 component(s)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<f32>(f32, f32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_NestedVectorConstructors_Success) {
- auto* tc = vec4<f32>(vec3<f32>(vec2<f32>(1.0f, 1.0f), 1.0f), 1.0f);
- WrapInFunction(tc);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_NestedVectorConstructors_Success) {
+ auto* tc = vec4<f32>(vec3<f32>(vec2<f32>(1_f, 1_f), 1_f), 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(tc), nullptr);
- ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
+ ASSERT_NE(TypeOf(tc), nullptr);
+ ASSERT_TRUE(TypeOf(tc)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(tc)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(tc)->As<sem::Vector>()->Width(), 4u);
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vector_Alias_Argument_Error) {
- auto* alias = Alias("UnsignedInt", ty.u32());
- Global("uint_var", ty.Of(alias), ast::StorageClass::kPrivate);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vector_Alias_Argument_Error) {
+ auto* alias = Alias("UnsignedInt", ty.u32());
+ Global("uint_var", ty.Of(alias), ast::StorageClass::kPrivate);
- auto* tc = vec2<f32>(Expr(Source{{12, 34}}, "uint_var"));
- WrapInFunction(tc);
+ auto* tc = Construct(Source{{12, 34}}, ty.vec2<f32>(), "uint_var");
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'f32', found 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for vec2<f32>(u32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vector_Alias_Argument_Success) {
- auto* f32_alias = Alias("Float32", ty.f32());
- auto* vec2_alias = Alias("VectorFloat2", ty.vec2<f32>());
- Global("my_f32", ty.Of(f32_alias), ast::StorageClass::kPrivate);
- Global("my_vec2", ty.Of(vec2_alias), ast::StorageClass::kPrivate);
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vector_Alias_Argument_Success) {
+ auto* f32_alias = Alias("Float32", ty.f32());
+ auto* vec2_alias = Alias("VectorFloat2", ty.vec2<f32>());
+ Global("my_f32", ty.Of(f32_alias), ast::StorageClass::kPrivate);
+ Global("my_vec2", ty.Of(vec2_alias), ast::StorageClass::kPrivate);
- auto* tc = vec3<f32>("my_vec2", "my_f32");
- WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* tc = vec3<f32>("my_vec2", "my_f32");
+ WrapInFunction(tc);
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vector_ElementTypeAlias_Error) {
- auto* f32_alias = Alias("Float32", ty.f32());
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vector_ElementTypeAlias_Error) {
+ auto* f32_alias = Alias("Float32", ty.f32());
- // vec2<Float32>(1.0f, 1u)
- auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
- auto* tc =
- Construct(Source{{12, 34}}, vec_type, 1.0f, Expr(Source{{12, 40}}, 1u));
- WrapInFunction(tc);
+ // vec2<Float32>(1.0f, 1u)
+ auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
+ WrapInFunction(Construct(Source{{12, 34}}, vec_type, 1_f, 1_u));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:40 error: type in vector constructor does not match vector "
- "type: expected 'f32', found 'u32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec2<f32>(f32, u32)"));
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_Constructor_Vector_ElementTypeAlias_Success) {
- auto* f32_alias = Alias("Float32", ty.f32());
+TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Vector_ElementTypeAlias_Success) {
+ auto* f32_alias = Alias("Float32", ty.f32());
- // vec2<Float32>(1.0f, 1.0f)
- auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
- auto* tc = Construct(Source{{12, 34}}, vec_type, 1.0f, 1.0f);
- WrapInFunction(tc);
+ // vec2<Float32>(1.0f, 1.0f)
+ auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
+ auto* tc = Construct(Source{{12, 34}}, vec_type, 1_f, 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vector_ArgumentElementTypeAlias_Error) {
- auto* f32_alias = Alias("Float32", ty.f32());
+ auto* f32_alias = Alias("Float32", ty.f32());
- // vec3<u32>(vec<Float32>(), 1.0f)
- auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
- auto* tc = vec3<u32>(Construct(Source{{12, 34}}, vec_type), 1.0f);
- WrapInFunction(tc);
+ // vec3<u32>(vec<Float32>(), 1.0f)
+ auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
+ WrapInFunction(Construct(Source{{12, 34}}, ty.vec3<u32>(), Construct(vec_type), 1_f));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type in vector constructor does not match vector "
- "type: expected 'u32', found 'f32'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("12:34 error: no matching constructor for vec3<u32>(vec2<f32>, f32)"));
}
TEST_F(ResolverTypeConstructorValidationTest,
Expr_Constructor_Vector_ArgumentElementTypeAlias_Success) {
- auto* f32_alias = Alias("Float32", ty.f32());
+ auto* f32_alias = Alias("Float32", ty.f32());
- // vec3<f32>(vec<Float32>(), 1.0f)
- auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
- auto* tc = vec3<f32>(Construct(Source{{12, 34}}, vec_type), 1.0f);
- WrapInFunction(tc);
+ // vec3<f32>(vec<Float32>(), 1.0f)
+ auto* vec_type = ty.vec(ty.Of(f32_alias), 2);
+ auto* tc = vec3<f32>(Construct(Source{{12, 34}}, vec_type), 1_f);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec2ElementTypeFromScalars) {
- auto* vec2_bool =
- Construct(create<ast::Vector>(nullptr, 2), Expr(true), Expr(false));
- auto* vec2_i32 = Construct(create<ast::Vector>(nullptr, 2), Expr(1), Expr(2));
- auto* vec2_u32 =
- Construct(create<ast::Vector>(nullptr, 2), Expr(1u), Expr(2u));
- auto* vec2_f32 =
- Construct(create<ast::Vector>(nullptr, 2), Expr(1.0f), Expr(2.0f));
- WrapInFunction(vec2_bool, vec2_i32, vec2_u32, vec2_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec2_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec2_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec2_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec2_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec2_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec2_bool)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_i32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_u32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_f32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_bool), TypeOf(vec2_bool->target.type));
- EXPECT_EQ(TypeOf(vec2_i32), TypeOf(vec2_i32->target.type));
- EXPECT_EQ(TypeOf(vec2_u32), TypeOf(vec2_u32->target.type));
- EXPECT_EQ(TypeOf(vec2_f32), TypeOf(vec2_f32->target.type));
+ auto* vec2_bool = Construct(create<ast::Vector>(nullptr, 2), Expr(true), Expr(false));
+ auto* vec2_i32 = Construct(create<ast::Vector>(nullptr, 2), Expr(1_i), Expr(2_i));
+ auto* vec2_u32 = Construct(create<ast::Vector>(nullptr, 2), Expr(1_u), Expr(2_u));
+ auto* vec2_f32 = Construct(create<ast::Vector>(nullptr, 2), Expr(1_f), Expr(2_f));
+ WrapInFunction(vec2_bool, vec2_i32, vec2_u32, vec2_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec2_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec2_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec2_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec2_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec2_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec2_bool)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_i32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_u32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_f32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_bool), TypeOf(vec2_bool->target.type));
+ EXPECT_EQ(TypeOf(vec2_i32), TypeOf(vec2_i32->target.type));
+ EXPECT_EQ(TypeOf(vec2_u32), TypeOf(vec2_u32->target.type));
+ EXPECT_EQ(TypeOf(vec2_f32), TypeOf(vec2_f32->target.type));
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec2ElementTypeFromVec2) {
- auto* vec2_bool =
- Construct(create<ast::Vector>(nullptr, 2), vec2<bool>(true, false));
- auto* vec2_i32 = Construct(create<ast::Vector>(nullptr, 2), vec2<i32>(1, 2));
- auto* vec2_u32 =
- Construct(create<ast::Vector>(nullptr, 2), vec2<u32>(1u, 2u));
- auto* vec2_f32 =
- Construct(create<ast::Vector>(nullptr, 2), vec2<f32>(1.0f, 2.0f));
- WrapInFunction(vec2_bool, vec2_i32, vec2_u32, vec2_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec2_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec2_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec2_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec2_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec2_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec2_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec2_bool)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_i32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_u32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_f32)->As<sem::Vector>()->Width(), 2u);
- EXPECT_EQ(TypeOf(vec2_bool), TypeOf(vec2_bool->target.type));
- EXPECT_EQ(TypeOf(vec2_i32), TypeOf(vec2_i32->target.type));
- EXPECT_EQ(TypeOf(vec2_u32), TypeOf(vec2_u32->target.type));
- EXPECT_EQ(TypeOf(vec2_f32), TypeOf(vec2_f32->target.type));
+ auto* vec2_bool = Construct(create<ast::Vector>(nullptr, 2), vec2<bool>(true, false));
+ auto* vec2_i32 = Construct(create<ast::Vector>(nullptr, 2), vec2<i32>(1_i, 2_i));
+ auto* vec2_u32 = Construct(create<ast::Vector>(nullptr, 2), vec2<u32>(1_u, 2_u));
+ auto* vec2_f32 = Construct(create<ast::Vector>(nullptr, 2), vec2<f32>(1_f, 2_f));
+ WrapInFunction(vec2_bool, vec2_i32, vec2_u32, vec2_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec2_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec2_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec2_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec2_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec2_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec2_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec2_bool)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_i32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_u32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_f32)->As<sem::Vector>()->Width(), 2u);
+ EXPECT_EQ(TypeOf(vec2_bool), TypeOf(vec2_bool->target.type));
+ EXPECT_EQ(TypeOf(vec2_i32), TypeOf(vec2_i32->target.type));
+ EXPECT_EQ(TypeOf(vec2_u32), TypeOf(vec2_u32->target.type));
+ EXPECT_EQ(TypeOf(vec2_f32), TypeOf(vec2_f32->target.type));
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec3ElementTypeFromScalars) {
- auto* vec3_bool = Construct(create<ast::Vector>(nullptr, 3), Expr(true),
- Expr(false), Expr(true));
- auto* vec3_i32 =
- Construct(create<ast::Vector>(nullptr, 3), Expr(1), Expr(2), Expr(3));
- auto* vec3_u32 =
- Construct(create<ast::Vector>(nullptr, 3), Expr(1u), Expr(2u), Expr(3u));
- auto* vec3_f32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1.0f),
- Expr(2.0f), Expr(3.0f));
- WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
- EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
- EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
- EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
+ auto* vec3_bool =
+ Construct(create<ast::Vector>(nullptr, 3), Expr(true), Expr(false), Expr(true));
+ auto* vec3_i32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_i), Expr(2_i), Expr(3_i));
+ auto* vec3_u32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_u), Expr(2_u), Expr(3_u));
+ auto* vec3_f32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_f), Expr(2_f), Expr(3_f));
+ WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
+ EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
+ EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
+ EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec3ElementTypeFromVec3) {
- auto* vec3_bool =
- Construct(create<ast::Vector>(nullptr, 3), vec3<bool>(true, false, true));
- auto* vec3_i32 =
- Construct(create<ast::Vector>(nullptr, 3), vec3<i32>(1, 2, 3));
- auto* vec3_u32 =
- Construct(create<ast::Vector>(nullptr, 3), vec3<u32>(1u, 2u, 3u));
- auto* vec3_f32 =
- Construct(create<ast::Vector>(nullptr, 3), vec3<f32>(1.0f, 2.0f, 3.0f));
- WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
- EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
- EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
- EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- InferVec3ElementTypeFromScalarAndVec2) {
- auto* vec3_bool = Construct(create<ast::Vector>(nullptr, 3), Expr(true),
- vec2<bool>(false, true));
- auto* vec3_i32 =
- Construct(create<ast::Vector>(nullptr, 3), Expr(1), vec2<i32>(2, 3));
- auto* vec3_u32 =
- Construct(create<ast::Vector>(nullptr, 3), Expr(1u), vec2<u32>(2u, 3u));
- auto* vec3_f32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1.0f),
- vec2<f32>(2.0f, 3.0f));
- WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
- EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
- EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
- EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
- EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
+ auto* vec3_bool = Construct(create<ast::Vector>(nullptr, 3), vec3<bool>(true, false, true));
+ auto* vec3_i32 = Construct(create<ast::Vector>(nullptr, 3), vec3<i32>(1_i, 2_i, 3_i));
+ auto* vec3_u32 = Construct(create<ast::Vector>(nullptr, 3), vec3<u32>(1_u, 2_u, 3_u));
+ auto* vec3_f32 = Construct(create<ast::Vector>(nullptr, 3), vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
+ EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
+ EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
+ EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, InferVec3ElementTypeFromScalarAndVec2) {
+ auto* vec3_bool =
+ Construct(create<ast::Vector>(nullptr, 3), Expr(true), vec2<bool>(false, true));
+ auto* vec3_i32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_i), vec2<i32>(2_i, 3_i));
+ auto* vec3_u32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_u), vec2<u32>(2_u, 3_u));
+ auto* vec3_f32 = Construct(create<ast::Vector>(nullptr, 3), Expr(1_f), vec2<f32>(2_f, 3_f));
+ WrapInFunction(vec3_bool, vec3_i32, vec3_u32, vec3_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec3_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec3_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec3_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec3_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec3_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec3_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec3_bool)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_i32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_u32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_f32)->As<sem::Vector>()->Width(), 3u);
+ EXPECT_EQ(TypeOf(vec3_bool), TypeOf(vec3_bool->target.type));
+ EXPECT_EQ(TypeOf(vec3_i32), TypeOf(vec3_i32->target.type));
+ EXPECT_EQ(TypeOf(vec3_u32), TypeOf(vec3_u32->target.type));
+ EXPECT_EQ(TypeOf(vec3_f32), TypeOf(vec3_f32->target.type));
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec4ElementTypeFromScalars) {
- auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4), Expr(true),
- Expr(false), Expr(true), Expr(false));
- auto* vec4_i32 = Construct(create<ast::Vector>(nullptr, 4), Expr(1), Expr(2),
- Expr(3), Expr(4));
- auto* vec4_u32 = Construct(create<ast::Vector>(nullptr, 4), Expr(1u),
- Expr(2u), Expr(3u), Expr(4u));
- auto* vec4_f32 = Construct(create<ast::Vector>(nullptr, 4), Expr(1.0f),
- Expr(2.0f), Expr(3.0f), Expr(4.0f));
- WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
- EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
- EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
- EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
+ auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4), Expr(true), Expr(false),
+ Expr(true), Expr(false));
+ auto* vec4_i32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_i), Expr(2_i), Expr(3_i), Expr(4_i));
+ auto* vec4_u32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_u), Expr(2_u), Expr(3_u), Expr(4_u));
+ auto* vec4_f32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_f), Expr(2_f), Expr(3_f), Expr(4_f));
+ WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
+ EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
+ EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
+ EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
}
TEST_F(ResolverTypeConstructorValidationTest, InferVec4ElementTypeFromVec4) {
- auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4),
- vec4<bool>(true, false, true, false));
- auto* vec4_i32 =
- Construct(create<ast::Vector>(nullptr, 4), vec4<i32>(1, 2, 3, 4));
- auto* vec4_u32 =
- Construct(create<ast::Vector>(nullptr, 4), vec4<u32>(1u, 2u, 3u, 4u));
- auto* vec4_f32 = Construct(create<ast::Vector>(nullptr, 4),
- vec4<f32>(1.0f, 2.0f, 3.0f, 4.0f));
- WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
- EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
- EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
- EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
-}
+ auto* vec4_bool =
+ Construct(create<ast::Vector>(nullptr, 4), vec4<bool>(true, false, true, false));
+ auto* vec4_i32 = Construct(create<ast::Vector>(nullptr, 4), vec4<i32>(1_i, 2_i, 3_i, 4_i));
+ auto* vec4_u32 = Construct(create<ast::Vector>(nullptr, 4), vec4<u32>(1_u, 2_u, 3_u, 4_u));
+ auto* vec4_f32 = Construct(create<ast::Vector>(nullptr, 4), vec4<f32>(1_f, 2_f, 3_f, 4_f));
+ WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
+ EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
+ EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
+ EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, InferVec4ElementTypeFromScalarAndVec3) {
+ auto* vec4_bool =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(true), vec3<bool>(false, true, false));
+ auto* vec4_i32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_i), vec3<i32>(2_i, 3_i, 4_i));
+ auto* vec4_u32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_u), vec3<u32>(2_u, 3_u, 4_u));
+ auto* vec4_f32 =
+ Construct(create<ast::Vector>(nullptr, 4), Expr(1_f), vec3<f32>(2_f, 3_f, 4_f));
+ WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
+ EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
+ EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
+ EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, InferVec4ElementTypeFromVec2AndVec2) {
+ auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4), vec2<bool>(true, false),
+ vec2<bool>(true, false));
+ auto* vec4_i32 =
+ Construct(create<ast::Vector>(nullptr, 4), vec2<i32>(1_i, 2_i), vec2<i32>(3_i, 4_i));
+ auto* vec4_u32 =
+ Construct(create<ast::Vector>(nullptr, 4), vec2<u32>(1_u, 2_u), vec2<u32>(3_u, 4_u));
+ auto* vec4_f32 =
+ Construct(create<ast::Vector>(nullptr, 4), vec2<f32>(1_f, 2_f), vec2<f32>(3_f, 4_f));
+ WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
+ ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
+ EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
+ EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
+ EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
+ EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
+ EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
+ EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVectorElementTypeWithoutArgs) {
+ WrapInFunction(Construct(Source{{12, 34}}, create<ast::Vector>(nullptr, 3)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for vec3()"));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec2ElementTypeFromScalarsMismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 2),
+ Expr(Source{{1, 2}}, 1_i), //
+ Expr(Source{{1, 3}}, 2_u)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("1:1 error: no matching constructor for vec2(i32, u32)"));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec3ElementTypeFromScalarsMismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 3),
+ Expr(Source{{1, 2}}, 1_i), //
+ Expr(Source{{1, 3}}, 2_u), //
+ Expr(Source{{1, 4}}, 3_i)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("1:1 error: no matching constructor for vec3(i32, u32, i32)"));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec3ElementTypeFromScalarAndVec2Mismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 3),
+ Expr(Source{{1, 2}}, 1_i), //
+ Construct(Source{{1, 3}}, ty.vec2<f32>(), 2_f, 3_f)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("1:1 error: no matching constructor for vec3(i32, vec2<f32>)"));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec4ElementTypeFromScalarsMismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
+ Expr(Source{{1, 2}}, 1_i), //
+ Expr(Source{{1, 3}}, 2_i), //
+ Expr(Source{{1, 4}}, 3_f), //
+ Expr(Source{{1, 5}}, 4_i)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("1:1 error: no matching constructor for vec4(i32, i32, f32, i32)"));
+}
+
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec4ElementTypeFromScalarAndVec3Mismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
+ Expr(Source{{1, 2}}, 1_i), //
+ Construct(Source{{1, 3}}, ty.vec3<u32>(), 2_u, 3_u, 4_u)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("1:1 error: no matching constructor for vec4(i32, vec3<u32>)"));
+}
-TEST_F(ResolverTypeConstructorValidationTest,
- InferVec4ElementTypeFromScalarAndVec3) {
- auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4), Expr(true),
- vec3<bool>(false, true, false));
- auto* vec4_i32 =
- Construct(create<ast::Vector>(nullptr, 4), Expr(1), vec3<i32>(2, 3, 4));
- auto* vec4_u32 = Construct(create<ast::Vector>(nullptr, 4), Expr(1u),
- vec3<u32>(2u, 3u, 4u));
- auto* vec4_f32 = Construct(create<ast::Vector>(nullptr, 4), Expr(1.0f),
- vec3<f32>(2.0f, 3.0f, 4.0f));
- WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
- EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
- EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
- EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
-}
+TEST_F(ResolverTypeConstructorValidationTest, CannotInferVec4ElementTypeFromVec2AndVec2Mismatch) {
+ WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
+ Construct(Source{{1, 2}}, ty.vec2<i32>(), 3_i, 4_i), //
+ Construct(Source{{1, 3}}, ty.vec2<u32>(), 3_u, 4_u)));
-TEST_F(ResolverTypeConstructorValidationTest,
- InferVec4ElementTypeFromVec2AndVec2) {
- auto* vec4_bool = Construct(create<ast::Vector>(nullptr, 4),
- vec2<bool>(true, false), vec2<bool>(true, false));
- auto* vec4_i32 = Construct(create<ast::Vector>(nullptr, 4), vec2<i32>(1, 2),
- vec2<i32>(3, 4));
- auto* vec4_u32 = Construct(create<ast::Vector>(nullptr, 4), vec2<u32>(1u, 2u),
- vec2<u32>(3u, 4u));
- auto* vec4_f32 = Construct(create<ast::Vector>(nullptr, 4),
- vec2<f32>(1.0f, 2.0f), vec2<f32>(3.0f, 4.0f));
- WrapInFunction(vec4_bool, vec4_i32, vec4_u32, vec4_f32);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(vec4_bool)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_i32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_u32)->Is<sem::Vector>());
- ASSERT_TRUE(TypeOf(vec4_f32)->Is<sem::Vector>());
- EXPECT_TRUE(TypeOf(vec4_bool)->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(vec4_i32)->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(vec4_u32)->As<sem::Vector>()->type()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(vec4_f32)->As<sem::Vector>()->type()->Is<sem::F32>());
- EXPECT_EQ(TypeOf(vec4_bool)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_i32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_u32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_f32)->As<sem::Vector>()->Width(), 4u);
- EXPECT_EQ(TypeOf(vec4_bool), TypeOf(vec4_bool->target.type));
- EXPECT_EQ(TypeOf(vec4_i32), TypeOf(vec4_i32->target.type));
- EXPECT_EQ(TypeOf(vec4_u32), TypeOf(vec4_u32->target.type));
- EXPECT_EQ(TypeOf(vec4_f32), TypeOf(vec4_f32->target.type));
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVectorElementTypeWithoutArgs) {
- WrapInFunction(Construct(create<ast::Vector>(Source{{12, 34}}, nullptr, 3)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec2ElementTypeFromScalarsMismatch) {
- WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 2),
- Expr(Source{{1, 2}}, 1), //
- Expr(Source{{1, 3}}, 2u)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type i32
-1:3 note: argument 1 has type u32)");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec3ElementTypeFromScalarsMismatch) {
- WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 3),
- Expr(Source{{1, 2}}, 1), //
- Expr(Source{{1, 3}}, 2u), //
- Expr(Source{{1, 4}}, 3)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type i32
-1:3 note: argument 1 has type u32
-1:4 note: argument 2 has type i32)");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec3ElementTypeFromScalarAndVec2Mismatch) {
- WrapInFunction(
- Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 3),
- Expr(Source{{1, 2}}, 1), //
- Construct(Source{{1, 3}}, ty.vec2<f32>(), 2.0f, 3.0f)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type i32
-1:3 note: argument 1 has type vec2<f32>)");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec4ElementTypeFromScalarsMismatch) {
- WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
- Expr(Source{{1, 2}}, 1), //
- Expr(Source{{1, 3}}, 2), //
- Expr(Source{{1, 4}}, 3.0f), //
- Expr(Source{{1, 5}}, 4)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type i32
-1:3 note: argument 1 has type i32
-1:4 note: argument 2 has type f32
-1:5 note: argument 3 has type i32)");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec4ElementTypeFromScalarAndVec3Mismatch) {
- WrapInFunction(
- Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
- Expr(Source{{1, 2}}, 1), //
- Construct(Source{{1, 3}}, ty.vec3<u32>(), 2u, 3u, 4u)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type i32
-1:3 note: argument 1 has type vec3<u32>)");
-}
-
-TEST_F(ResolverTypeConstructorValidationTest,
- CannotInferVec4ElementTypeFromVec2AndVec2Mismatch) {
- WrapInFunction(Construct(Source{{1, 1}}, create<ast::Vector>(nullptr, 4),
- Construct(Source{{1, 2}}, ty.vec2<i32>(), 3, 4), //
- Construct(Source{{1, 3}}, ty.vec2<u32>(), 3u, 4u)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(1:1 error: cannot infer vector element type, as constructor arguments have different types
-1:2 note: argument 0 has type vec2<i32>
-1:3 note: argument 1 has type vec2<u32>)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(),
+ HasSubstr("1:1 error: no matching constructor for vec4(vec2<i32>, vec2<u32>)"));
}
} // namespace VectorConstructor
namespace MatrixConstructor {
struct MatrixDimensions {
- uint32_t rows;
- uint32_t columns;
+ uint32_t rows;
+ uint32_t columns;
};
static std::string MatrixStr(const MatrixDimensions& dimensions) {
- return "mat" + std::to_string(dimensions.columns) + "x" +
- std::to_string(dimensions.rows) + "<f32>";
+ return "mat" + std::to_string(dimensions.columns) + "x" + std::to_string(dimensions.rows) +
+ "<f32>";
}
using MatrixConstructorTest = ResolverTestWithParam<MatrixDimensions>;
TEST_P(MatrixConstructorTest, Expr_ColumnConstructor_Error_TooFewArguments) {
- // matNxM<f32>(vecM<f32>(), ...); with N - 1 arguments
-
- const auto param = GetParam();
-
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns - 1; i++) {
- auto* vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- if (i > 1) {
- args_tys << ", ";
+ // matNxM<f32>(vecM<f32>(), ...); with N - 1 arguments
+
+ const auto param = GetParam();
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns - 1; i++) {
+ auto* vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<f32>";
}
- args_tys << "vec" << param.rows << "<f32>";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
TEST_P(MatrixConstructorTest, Expr_ElementConstructor_Error_TooFewArguments) {
- // matNxM<f32>(f32,...,f32); with N*M - 1 arguments
-
- const auto param = GetParam();
-
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns * param.rows - 1; i++) {
- args.push_back(Construct(Source{{12, i}}, ty.f32()));
- if (i > 1) {
- args_tys << ", ";
+ // matNxM<f32>(f32,...,f32); with N*M - 1 arguments
+
+ const auto param = GetParam();
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns * param.rows - 1; i++) {
+ args.push_back(Construct(ty.f32()));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "f32";
}
- args_tys << "f32";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
TEST_P(MatrixConstructorTest, Expr_ColumnConstructor_Error_TooManyArguments) {
- // matNxM<f32>(vecM<f32>(), ...); with N + 1 arguments
+ // matNxM<f32>(vecM<f32>(), ...); with N + 1 arguments
+
+ const auto param = GetParam();
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns + 1; i++) {
+ auto* vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<f32>";
+ }
- const auto param = GetParam();
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns + 1; i++) {
- auto* vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- if (i > 1) {
- args_tys << ", ";
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
+}
+
+TEST_P(MatrixConstructorTest, Expr_ElementConstructor_Error_TooManyArguments) {
+ // matNxM<f32>(f32,...,f32); with N*M + 1 arguments
+
+ const auto param = GetParam();
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns * param.rows + 1; i++) {
+ args.push_back(Construct(ty.f32()));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "f32";
}
- args_tys << "vec" << param.rows << "<f32>";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
-TEST_P(MatrixConstructorTest, Expr_ElementConstructor_Error_TooManyArguments) {
- // matNxM<f32>(f32,...,f32); with N*M + 1 arguments
+TEST_P(MatrixConstructorTest, Expr_ColumnConstructor_Error_InvalidArgumentType) {
+ // matNxM<f32>(vec<u32>, vec<u32>, ...); N arguments
- const auto param = GetParam();
+ const auto param = GetParam();
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns * param.rows + 1; i++) {
- args.push_back(Construct(Source{{12, i}}, ty.f32()));
- if (i > 1) {
- args_tys << ", ";
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec<u32>(param.rows);
+ args.push_back(Construct(vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<u32>";
}
- args_tys << "f32";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
-TEST_P(MatrixConstructorTest,
- Expr_ColumnConstructor_Error_InvalidArgumentType) {
- // matNxM<f32>(vec<u32>, vec<u32>, ...); N arguments
+TEST_P(MatrixConstructorTest, Expr_ElementConstructor_Error_InvalidArgumentType) {
+ // matNxM<f32>(u32, u32, ...); N*M arguments
- const auto param = GetParam();
+ const auto param = GetParam();
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec<u32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- if (i > 1) {
- args_tys << ", ";
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ args.push_back(Expr(1_u));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "u32";
}
- args_tys << "vec" << param.rows << "<u32>";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
-TEST_P(MatrixConstructorTest,
- Expr_ElementConstructor_Error_InvalidArgumentType) {
- // matNxM<f32>(u32, u32, ...); N*M arguments
+TEST_P(MatrixConstructorTest, Expr_ColumnConstructor_Error_TooFewRowsInVectorArgument) {
+ // matNxM<f32>(vecM<f32>(),...,vecM-1<f32>());
- const auto param = GetParam();
+ const auto param = GetParam();
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- args.push_back(Expr(Source{{12, i}}, 1u));
- if (i > 1) {
- args_tys << ", ";
+ // Skip the test if parameters would have resulted in an invalid vec1 type.
+ if (param.rows == 2) {
+ return;
}
- args_tys << "u32";
- }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* valid_vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(valid_vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<f32>";
+ }
+ const size_t kInvalidLoc = 2 * (param.columns - 1);
+ auto* invalid_vec_type = ty.vec<f32>(param.rows - 1);
+ args.push_back(Construct(Source{{12, kInvalidLoc}}, invalid_vec_type));
+ args_tys << ", vec" << (param.rows - 1) << "<f32>";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
-}
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
-TEST_P(MatrixConstructorTest,
- Expr_ColumnConstructor_Error_TooFewRowsInVectorArgument) {
- // matNxM<f32>(vecM<f32>(),...,vecM-1<f32>());
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
+}
- const auto param = GetParam();
+TEST_P(MatrixConstructorTest, Expr_ColumnConstructor_Error_TooManyRowsInVectorArgument) {
+ // matNxM<f32>(vecM<f32>(),...,vecM+1<f32>());
- // Skip the test if parameters would have resulted in an invalid vec1 type.
- if (param.rows == 2) {
- return;
- }
+ const auto param = GetParam();
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns - 1; i++) {
- auto* valid_vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, valid_vec_type));
- if (i > 1) {
- args_tys << ", ";
+ // Skip the test if parameters would have resulted in an invalid vec5 type.
+ if (param.rows == 4) {
+ return;
}
- args_tys << "vec" << param.rows << "<f32>";
- }
- const size_t kInvalidLoc = 2 * (param.columns - 1);
- auto* invalid_vec_type = ty.vec<f32>(param.rows - 1);
- args.push_back(Construct(Source{{12, kInvalidLoc}}, invalid_vec_type));
- args_tys << ", vec" << (param.rows - 1) << "<f32>";
-
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
-}
-
-TEST_P(MatrixConstructorTest,
- Expr_ColumnConstructor_Error_TooManyRowsInVectorArgument) {
- // matNxM<f32>(vecM<f32>(),...,vecM+1<f32>());
-
- const auto param = GetParam();
-
- // Skip the test if parameters would have resulted in an invalid vec5 type.
- if (param.rows == 4) {
- return;
- }
-
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns - 1; i++) {
- auto* valid_vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, valid_vec_type));
- if (i > 1) {
- args_tys << ", ";
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* valid_vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(valid_vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<f32>";
}
- args_tys << "vec" << param.rows << "<f32>";
- }
- const size_t kInvalidLoc = 2 * (param.columns - 1);
- auto* invalid_vec_type = ty.vec<f32>(param.rows + 1);
- args.push_back(Construct(Source{{12, kInvalidLoc}}, invalid_vec_type));
- args_tys << ", vec" << (param.rows + 1) << "<f32>";
+ auto* invalid_vec_type = ty.vec<f32>(param.rows + 1);
+ args.push_back(Construct(invalid_vec_type));
+ args_tys << ", vec" << (param.rows + 1) << "<f32>";
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
TEST_P(MatrixConstructorTest, Expr_Constructor_ZeroValue_Success) {
- // matNxM<f32>();
+ // matNxM<f32>();
- const auto param = GetParam();
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{{12, 40}}, matrix_type);
- WrapInFunction(tc);
+ const auto param = GetParam();
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 40}}, matrix_type);
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, Expr_Constructor_WithColumns_Success) {
- // matNxM<f32>(vecM<f32>(), ...); with N arguments
+ // matNxM<f32>(vecM<f32>(), ...); with N arguments
- const auto param = GetParam();
+ const auto param = GetParam();
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(vec_type));
+ }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, Expr_Constructor_WithElements_Success) {
- // matNxM<f32>(f32,...,f32); with N*M arguments
+ // matNxM<f32>(f32,...,f32); with N*M arguments
- const auto param = GetParam();
+ const auto param = GetParam();
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns * param.rows; i++) {
- args.push_back(Construct(Source{{12, i}}, ty.f32()));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns * param.rows; i++) {
+ args.push_back(Construct(ty.f32()));
+ }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, Expr_Constructor_ElementTypeAlias_Error) {
- // matNxM<Float32>(vecM<u32>(), ...); with N arguments
-
- const auto param = GetParam();
- auto* f32_alias = Alias("Float32", ty.f32());
-
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec(ty.u32(), param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- if (i > 1) {
- args_tys << ", ";
+ // matNxM<Float32>(vecM<u32>(), ...); with N arguments
+
+ const auto param = GetParam();
+ auto* f32_alias = Alias("Float32", ty.f32());
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec(ty.u32(), param.rows);
+ args.push_back(Construct(vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<u32>";
}
- args_tys << "vec" << param.rows << "<u32>";
- }
- auto* matrix_type = ty.mat(ty.Of(f32_alias), param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat(ty.Of(f32_alias), param.columns, param.rows);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
TEST_P(MatrixConstructorTest, Expr_Constructor_ElementTypeAlias_Success) {
- // matNxM<Float32>(vecM<f32>(), ...); with N arguments
+ // matNxM<Float32>(vecM<f32>(), ...); with N arguments
- const auto param = GetParam();
- auto* f32_alias = Alias("Float32", ty.f32());
+ const auto param = GetParam();
+ auto* f32_alias = Alias("Float32", ty.f32());
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec<f32>(param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec<f32>(param.rows);
+ args.push_back(Construct(vec_type));
+ }
- auto* matrix_type = ty.mat(ty.Of(f32_alias), param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat(ty.Of(f32_alias), param.columns, param.rows);
+ auto* tc = Construct(Source{}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeConstructorValidationTest,
- Expr_MatrixConstructor_ArgumentTypeAlias_Error) {
- auto* alias = Alias("VectorUnsigned2", ty.vec2<u32>());
- auto* tc =
- mat2x2<f32>(Construct(Source{{12, 34}}, ty.Of(alias)), vec2<f32>());
- WrapInFunction(tc);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: no matching constructor mat2x2<f32>(vec2<u32>, vec2<f32>)
+TEST_F(ResolverTypeConstructorValidationTest, Expr_MatrixConstructor_ArgumentTypeAlias_Error) {
+ auto* alias = Alias("VectorUnsigned2", ty.vec2<u32>());
+ auto* tc = Construct(Source{{12, 34}}, ty.mat2x2<f32>(), Construct(ty.Of(alias)), vec2<f32>());
+ WrapInFunction(tc);
-3 candidates available:
- mat2x2<f32>()
- mat2x2<f32>(f32,...,f32) // 4 arguments
- mat2x2<f32>(vec2<f32>, vec2<f32>)
-)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(
+ r()->error(),
+ HasSubstr("12:34 error: no matching constructor for mat2x2<f32>(vec2<u32>, vec2<f32>)"));
}
TEST_P(MatrixConstructorTest, Expr_Constructor_ArgumentTypeAlias_Success) {
- const auto param = GetParam();
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* vec_type = ty.vec<f32>(param.rows);
- auto* vec_alias = Alias("VectorFloat2", vec_type);
+ const auto param = GetParam();
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* vec_type = ty.vec<f32>(param.rows);
+ auto* vec_alias = Alias("VectorFloat2", vec_type);
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- args.push_back(Construct(Source{{12, i}}, ty.Of(vec_alias)));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ args.push_back(Construct(ty.Of(vec_alias)));
+ }
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* tc = Construct(Source{}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, Expr_Constructor_ArgumentElementTypeAlias_Error) {
- const auto param = GetParam();
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* f32_alias = Alias("UnsignedInt", ty.u32());
-
- std::stringstream args_tys;
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec(ty.Of(f32_alias), param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- if (i > 1) {
- args_tys << ", ";
+ const auto param = GetParam();
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* f32_alias = Alias("UnsignedInt", ty.u32());
+
+ std::stringstream args_tys;
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec(ty.Of(f32_alias), param.rows);
+ args.push_back(Construct(vec_type));
+ if (i > 0) {
+ args_tys << ", ";
+ }
+ args_tys << "vec" << param.rows << "<u32>";
}
- args_tys << "vec" << param.rows << "<u32>";
- }
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* tc = Construct(Source{{12, 34}}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), HasSubstr("12:1 error: no matching constructor " +
- MatrixStr(param) + "(" + args_tys.str() +
- ")\n\n3 candidates available:"));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr("12:34 error: no matching constructor for " +
+ MatrixStr(param) + "(" + args_tys.str() + ")"));
}
-TEST_P(MatrixConstructorTest,
- Expr_Constructor_ArgumentElementTypeAlias_Success) {
- const auto param = GetParam();
- auto* f32_alias = Alias("Float32", ty.f32());
+TEST_P(MatrixConstructorTest, Expr_Constructor_ArgumentElementTypeAlias_Success) {
+ const auto param = GetParam();
+ auto* f32_alias = Alias("Float32", ty.f32());
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- auto* vec_type = ty.vec(ty.Of(f32_alias), param.rows);
- args.push_back(Construct(Source{{12, i}}, vec_type));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ auto* vec_type = ty.vec(ty.Of(f32_alias), param.rows);
+ args.push_back(Construct(vec_type));
+ }
- auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = ty.mat<f32>(param.columns, param.rows);
+ auto* tc = Construct(Source{}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, InferElementTypeFromVectors) {
- const auto param = GetParam();
+ const auto param = GetParam();
- ast::ExpressionList args;
- for (uint32_t i = 1; i <= param.columns; i++) {
- args.push_back(Construct(ty.vec<f32>(param.rows)));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ args.push_back(Construct(ty.vec<f32>(param.rows)));
+ }
- auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
- auto* tc = Construct(Source{}, matrix_type, std::move(args));
- WrapInFunction(tc);
+ auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
+ auto* tc = Construct(Source{}, matrix_type, std::move(args));
+ WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, InferElementTypeFromScalars) {
- const auto param = GetParam();
+ const auto param = GetParam();
- ast::ExpressionList args;
- for (uint32_t i = 0; i < param.rows * param.columns; i++) {
- args.push_back(Expr(static_cast<f32>(i)));
- }
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.rows * param.columns; i++) {
+ args.push_back(Expr(static_cast<f32>(i)));
+ }
- auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
- WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
+ auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
+ WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_P(MatrixConstructorTest, CannotInferElementTypeFromVectors_Mismatch) {
- const auto param = GetParam();
-
- std::stringstream err;
- err << "12:34 error: cannot infer matrix element type, as constructor "
- "arguments have different types";
-
- ast::ExpressionList args;
- for (uint32_t i = 0; i < param.columns; i++) {
- err << "\n";
- auto src = Source{{1, 10 + i}};
- if (i == 1) {
- // Odd one out
- args.push_back(Construct(src, ty.vec<i32>(param.rows)));
- err << src << " note: argument " << i << " has type vec" << param.rows
- << "<i32>";
- } else {
- args.push_back(Construct(src, ty.vec<f32>(param.rows)));
- err << src << " note: argument " << i << " has type vec" << param.rows
- << "<f32>";
+ const auto param = GetParam();
+
+ std::stringstream err;
+ err << "12:34 error: no matching constructor for mat" << param.columns << "x" << param.rows
+ << "(";
+
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.columns; i++) {
+ if (i > 0) {
+ err << ", ";
+ }
+ if (i == 1) {
+ // Odd one out
+ args.push_back(Construct(ty.vec<i32>(param.rows)));
+ err << "vec" << param.rows << "<i32>";
+ } else {
+ args.push_back(Construct(ty.vec<f32>(param.rows)));
+ err << "vec" << param.rows << "<f32>";
+ }
}
- }
- auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
- WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
+ auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
+ WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), err.str());
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr(err.str()));
}
TEST_P(MatrixConstructorTest, CannotInferElementTypeFromScalars_Mismatch) {
- const auto param = GetParam();
-
- std::stringstream err;
- err << "12:34 error: cannot infer matrix element type, as constructor "
- "arguments have different types";
- ast::ExpressionList args;
- for (uint32_t i = 0; i < param.rows * param.columns; i++) {
- err << "\n";
- auto src = Source{{1, 10 + i}};
- if (i == 3) {
- args.push_back(Expr(src, static_cast<i32>(i))); // The odd one out
- err << src << " note: argument " << i << " has type i32";
- } else {
- args.push_back(Expr(src, static_cast<f32>(i)));
- err << src << " note: argument " << i << " has type f32";
+ const auto param = GetParam();
+
+ std::stringstream err;
+ err << "12:34 error: no matching constructor for mat" << param.columns << "x" << param.rows
+ << "(";
+
+ ast::ExpressionList args;
+ for (uint32_t i = 0; i < param.rows * param.columns; i++) {
+ if (i > 0) {
+ err << ", ";
+ }
+ if (i == 3) {
+ args.push_back(Expr(static_cast<i32>(i))); // The odd one out
+ err << "i32";
+ } else {
+ args.push_back(Expr(static_cast<f32>(i)));
+ err << "f32";
+ }
}
- }
- auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
- WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
+ err << ")";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_THAT(r()->error(), err.str());
+ auto* matrix_type = create<ast::Matrix>(nullptr, param.rows, param.columns);
+ WrapInFunction(Construct(Source{{12, 34}}, matrix_type, std::move(args)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_THAT(r()->error(), HasSubstr(err.str()));
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
@@ -2714,12 +2480,9 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
namespace StructConstructor {
using builder::CreatePtrs;
using builder::CreatePtrsFor;
-using builder::f32;
-using builder::i32;
using builder::mat2x2;
using builder::mat3x3;
using builder::mat4x4;
-using builder::u32;
using builder::vec2;
using builder::vec3;
using builder::vec4;
@@ -2744,94 +2507,91 @@ using StructConstructorInputsTest =
ResolverTestWithParam<std::tuple<CreatePtrs, // struct member type
uint32_t>>; // number of struct members
TEST_P(StructConstructorInputsTest, TooFew) {
- auto& param = GetParam();
- auto& str_params = std::get<0>(param);
- uint32_t N = std::get<1>(param);
-
- ast::StructMemberList members;
- ast::ExpressionList values;
- for (uint32_t i = 0; i < N; i++) {
- auto* struct_type = str_params.ast(*this);
- members.push_back(Member("member_" + std::to_string(i), struct_type));
- if (i < N - 1) {
- auto* ctor_value_expr = str_params.expr(*this, 0);
- values.push_back(ctor_value_expr);
+ auto& param = GetParam();
+ auto& str_params = std::get<0>(param);
+ uint32_t N = std::get<1>(param);
+
+ ast::StructMemberList members;
+ ast::ExpressionList values;
+ for (uint32_t i = 0; i < N; i++) {
+ auto* struct_type = str_params.ast(*this);
+ members.push_back(Member("member_" + std::to_string(i), struct_type));
+ if (i < N - 1) {
+ auto* ctor_value_expr = str_params.expr(*this, 0);
+ values.push_back(ctor_value_expr);
+ }
}
- }
- auto* s = Structure("s", members);
- auto* tc = Construct(Source{{12, 34}}, ty.Of(s), values);
- WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: struct constructor has too few inputs: expected " +
- std::to_string(N) + ", found " + std::to_string(N - 1));
+ auto* s = Structure("s", members);
+ auto* tc = Construct(Source{{12, 34}}, ty.Of(s), values);
+ WrapInFunction(tc);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: struct constructor has too few inputs: expected " +
+ std::to_string(N) + ", found " + std::to_string(N - 1));
}
TEST_P(StructConstructorInputsTest, TooMany) {
- auto& param = GetParam();
- auto& str_params = std::get<0>(param);
- uint32_t N = std::get<1>(param);
-
- ast::StructMemberList members;
- ast::ExpressionList values;
- for (uint32_t i = 0; i < N + 1; i++) {
- if (i < N) {
- auto* struct_type = str_params.ast(*this);
- members.push_back(Member("member_" + std::to_string(i), struct_type));
+ auto& param = GetParam();
+ auto& str_params = std::get<0>(param);
+ uint32_t N = std::get<1>(param);
+
+ ast::StructMemberList members;
+ ast::ExpressionList values;
+ for (uint32_t i = 0; i < N + 1; i++) {
+ if (i < N) {
+ auto* struct_type = str_params.ast(*this);
+ members.push_back(Member("member_" + std::to_string(i), struct_type));
+ }
+ auto* ctor_value_expr = str_params.expr(*this, 0);
+ values.push_back(ctor_value_expr);
}
- auto* ctor_value_expr = str_params.expr(*this, 0);
- values.push_back(ctor_value_expr);
- }
- auto* s = Structure("s", members);
- auto* tc = Construct(Source{{12, 34}}, ty.Of(s), values);
- WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: struct constructor has too many inputs: expected " +
- std::to_string(N) + ", found " + std::to_string(N + 1));
+ auto* s = Structure("s", members);
+ auto* tc = Construct(Source{{12, 34}}, ty.Of(s), values);
+ WrapInFunction(tc);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: struct constructor has too many inputs: expected " +
+ std::to_string(N) + ", found " + std::to_string(N + 1));
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
StructConstructorInputsTest,
- testing::Combine(testing::ValuesIn(all_types),
- number_of_members));
+ testing::Combine(testing::ValuesIn(all_types), number_of_members));
using StructConstructorTypeTest =
ResolverTestWithParam<std::tuple<CreatePtrs, // struct member type
CreatePtrs, // constructor value type
uint32_t>>; // number of struct members
TEST_P(StructConstructorTypeTest, AllTypes) {
- auto& param = GetParam();
- auto& str_params = std::get<0>(param);
- auto& ctor_params = std::get<1>(param);
- uint32_t N = std::get<2>(param);
-
- if (str_params.ast == ctor_params.ast) {
- return;
- }
-
- ast::StructMemberList members;
- ast::ExpressionList values;
- // make the last value of the constructor to have a different type
- uint32_t constructor_value_with_different_type = N - 1;
- for (uint32_t i = 0; i < N; i++) {
- auto* struct_type = str_params.ast(*this);
- members.push_back(Member("member_" + std::to_string(i), struct_type));
- auto* ctor_value_expr = (i == constructor_value_with_different_type)
- ? ctor_params.expr(*this, 0)
- : str_params.expr(*this, 0);
- values.push_back(ctor_value_expr);
- }
- auto* s = Structure("s", members);
- auto* tc = Construct(ty.Of(s), values);
- WrapInFunction(tc);
-
- std::string found = FriendlyName(ctor_params.ast(*this));
- std::string expected = FriendlyName(str_params.ast(*this));
- std::stringstream err;
- err << "error: type in struct constructor does not match struct member ";
- err << "type: expected '" << expected << "', found '" << found << "'";
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), err.str());
+ auto& param = GetParam();
+ auto& str_params = std::get<0>(param);
+ auto& ctor_params = std::get<1>(param);
+ uint32_t N = std::get<2>(param);
+
+ if (str_params.ast == ctor_params.ast) {
+ return;
+ }
+
+ ast::StructMemberList members;
+ ast::ExpressionList values;
+ // make the last value of the constructor to have a different type
+ uint32_t constructor_value_with_different_type = N - 1;
+ for (uint32_t i = 0; i < N; i++) {
+ auto* struct_type = str_params.ast(*this);
+ members.push_back(Member("member_" + std::to_string(i), struct_type));
+ auto* ctor_value_expr = (i == constructor_value_with_different_type)
+ ? ctor_params.expr(*this, 0)
+ : str_params.expr(*this, 0);
+ values.push_back(ctor_value_expr);
+ }
+ auto* s = Structure("s", members);
+ auto* tc = Construct(ty.Of(s), values);
+ WrapInFunction(tc);
+
+ std::string found = FriendlyName(ctor_params.ast(*this));
+ std::string expected = FriendlyName(str_params.ast(*this));
+ std::stringstream err;
+ err << "error: type in struct constructor does not match struct member ";
+ err << "type: expected '" << expected << "', found '" << found << "'";
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), err.str());
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
@@ -2841,94 +2601,85 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeConstructorValidationTest,
number_of_members));
TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Struct_Nested) {
- auto* inner_m = Member("m", ty.i32());
- auto* inner_s = Structure("inner_s", {inner_m});
+ auto* inner_m = Member("m", ty.i32());
+ auto* inner_s = Structure("inner_s", {inner_m});
- auto* m0 = Member("m0", ty.i32());
- auto* m1 = Member("m1", ty.Of(inner_s));
- auto* m2 = Member("m2", ty.i32());
- auto* s = Structure("s", {m0, m1, m2});
+ auto* m0 = Member("m0", ty.i32());
+ auto* m1 = Member("m1", ty.Of(inner_s));
+ auto* m2 = Member("m2", ty.i32());
+ auto* s = Structure("s", {m0, m1, m2});
- auto* tc = Construct(Source{{12, 34}}, ty.Of(s), 1, 1, 1);
- WrapInFunction(tc);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: type in struct constructor does not match struct member "
- "type: expected 'inner_s', found 'i32'");
+ auto* tc = Construct(Source{{12, 34}}, ty.Of(s), 1_i, 1_i, 1_i);
+ WrapInFunction(tc);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: type in struct constructor does not match struct member "
+ "type: expected 'inner_s', found 'i32'");
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Struct) {
- auto* m = Member("m", ty.i32());
- auto* s = Structure("MyInputs", {m});
- auto* tc = Construct(Source{{12, 34}}, ty.Of(s));
- WrapInFunction(tc);
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ auto* m = Member("m", ty.i32());
+ auto* s = Structure("MyInputs", {m});
+ auto* tc = Construct(Source{{12, 34}}, ty.Of(s));
+ WrapInFunction(tc);
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeConstructorValidationTest, Expr_Constructor_Struct_Empty) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str)));
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ WrapInFunction(Construct(ty.Of(str)));
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace StructConstructor
TEST_F(ResolverTypeConstructorValidationTest, NonConstructibleType_Atomic) {
- WrapInFunction(
- Assign(Phony(), Construct(Source{{12, 34}}, ty.atomic(ty.i32()))));
+ WrapInFunction(Assign(Phony(), Construct(Source{{12, 34}}, ty.atomic(ty.i32()))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
}
-TEST_F(ResolverTypeConstructorValidationTest,
- NonConstructibleType_AtomicArray) {
- WrapInFunction(Assign(
- Phony(), Construct(Source{{12, 34}}, ty.array(ty.atomic(ty.i32()), 4))));
+TEST_F(ResolverTypeConstructorValidationTest, NonConstructibleType_AtomicArray) {
+ WrapInFunction(
+ Assign(Phony(), Construct(Source{{12, 34}}, ty.array(ty.atomic(ty.i32()), 4_i))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: array constructor has non-constructible element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array constructor has non-constructible element type");
}
-TEST_F(ResolverTypeConstructorValidationTest,
- NonConstructibleType_AtomicStructMember) {
- auto* str = Structure("S", {Member("a", ty.atomic(ty.i32()))});
- WrapInFunction(Assign(Phony(), Construct(Source{{12, 34}}, ty.Of(str))));
+TEST_F(ResolverTypeConstructorValidationTest, NonConstructibleType_AtomicStructMember) {
+ auto* str = Structure("S", {Member("a", ty.atomic(ty.i32()))});
+ WrapInFunction(Assign(Phony(), Construct(Source{{12, 34}}, ty.Of(str))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: struct constructor has non-constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: struct constructor has non-constructible type");
}
TEST_F(ResolverTypeConstructorValidationTest, NonConstructibleType_Sampler) {
- WrapInFunction(Assign(
- Phony(),
- Construct(Source{{12, 34}}, ty.sampler(ast::SamplerKind::kSampler))));
+ WrapInFunction(
+ Assign(Phony(), Construct(Source{{12, 34}}, ty.sampler(ast::SamplerKind::kSampler))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
}
TEST_F(ResolverTypeConstructorValidationTest, TypeConstructorAsStatement) {
- WrapInFunction(
- CallStmt(Construct(Source{{12, 34}}, ty.vec2<f32>(), 1.f, 2.f)));
+ WrapInFunction(CallStmt(Construct(Source{{12, 34}}, ty.vec2<f32>(), 1_f, 2_f)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: type constructor evaluated but not used");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: type constructor evaluated but not used");
}
TEST_F(ResolverTypeConstructorValidationTest, TypeConversionAsStatement) {
- WrapInFunction(CallStmt(Construct(Source{{12, 34}}, ty.f32(), 1)));
+ WrapInFunction(CallStmt(Construct(Source{{12, 34}}, ty.f32(), 1_i)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: type cast evaluated but not used");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: type conversion evaluated but not used");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/type_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/type_validation_test.cc
index dadd7d2174f..a5e68cef343 100644
--- a/chromium/third_party/dawn/src/tint/resolver/type_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/type_validation_test.cc
@@ -17,11 +17,13 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
@@ -50,677 +52,656 @@ template <typename T>
using alias2 = builder::alias2<T>;
template <typename T>
using alias3 = builder::alias3<T>;
-using f32 = builder::f32;
-using i32 = builder::i32;
-using u32 = builder::u32;
-class ResolverTypeValidationTest : public resolver::TestHelper,
- public testing::Test {};
+class ResolverTypeValidationTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverTypeValidationTest, VariableDeclNoConstructor_Pass) {
- // {
- // var a :i32;
- // a = 2;
- // }
- auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, nullptr);
- auto* lhs = Expr("a");
- auto* rhs = Expr(2);
+ // {
+ // var a :i32;
+ // a = 2;
+ // }
+ auto* var = Var("a", ty.i32(), ast::StorageClass::kNone, nullptr);
+ auto* lhs = Expr("a");
+ auto* rhs = Expr(2_i);
- auto* body =
- Block(Decl(var), Assign(Source{Source::Location{12, 34}}, lhs, rhs));
+ auto* body = Block(Decl(var), Assign(Source{Source::Location{12, 34}}, lhs, rhs));
- WrapInFunction(body);
+ WrapInFunction(body);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_NE(TypeOf(lhs), nullptr);
- ASSERT_NE(TypeOf(rhs), nullptr);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_NE(TypeOf(lhs), nullptr);
+ ASSERT_NE(TypeOf(rhs), nullptr);
}
-TEST_F(ResolverTypeValidationTest, GlobalConstantNoConstructor_Pass) {
- // @id(0) override a :i32;
- Override(Source{{12, 34}}, "a", ty.i32(), nullptr, ast::AttributeList{Id(0)});
+TEST_F(ResolverTypeValidationTest, GlobalOverrideNoConstructor_Pass) {
+ // @id(0) override a :i32;
+ Override(Source{{12, 34}}, "a", ty.i32(), nullptr, ast::AttributeList{Id(0)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, GlobalVariableWithStorageClass_Pass) {
- // var<private> global_var: f32;
- Global(Source{{12, 34}}, "global_var", ty.f32(), ast::StorageClass::kPrivate);
+ // var<private> global_var: f32;
+ Global(Source{{12, 34}}, "global_var", ty.f32(), ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeValidationTest, GlobalConstantWithStorageClass_Fail) {
- // const<private> global_var: f32;
- AST().AddGlobalVariable(create<ast::Variable>(
- Source{{12, 34}}, Symbols().Register("global_var"),
- ast::StorageClass::kPrivate, ast::Access::kUndefined, ty.f32(), true,
- false, Expr(1.23f), ast::AttributeList{}));
+TEST_F(ResolverTypeValidationTest, GlobalLetWithStorageClass_Fail) {
+ // let<private> global_var: f32;
+ AST().AddGlobalVariable(create<ast::Variable>(
+ Source{{12, 34}}, Symbols().Register("global_var"), ast::StorageClass::kPrivate,
+ ast::Access::kUndefined, ty.f32(), true, false, Expr(1.23_f), ast::AttributeList{}));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: global constants shouldn't have a storage class");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: global constants shouldn't have a storage class");
}
TEST_F(ResolverTypeValidationTest, GlobalConstNoStorageClass_Pass) {
- // let global_var: f32;
- GlobalConst(Source{{12, 34}}, "global_var", ty.f32(), Construct(ty.f32()));
+ // let global_var: f32;
+ GlobalConst(Source{{12, 34}}, "global_var", ty.f32(), Construct(ty.f32()));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, GlobalVariableUnique_Pass) {
- // var global_var0 : f32 = 0.1;
- // var global_var1 : i32 = 0;
+ // var global_var0 : f32 = 0.1;
+ // var global_var1 : i32 = 0;
- Global("global_var0", ty.f32(), ast::StorageClass::kPrivate, Expr(0.1f));
+ Global("global_var0", ty.f32(), ast::StorageClass::kPrivate, Expr(0.1_f));
- Global(Source{{12, 34}}, "global_var1", ty.f32(), ast::StorageClass::kPrivate,
- Expr(1.0f));
+ Global(Source{{12, 34}}, "global_var1", ty.f32(), ast::StorageClass::kPrivate, Expr(1_f));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeValidationTest,
- GlobalVariableFunctionVariableNotUnique_Pass) {
- // fn my_func() {
- // var a: f32 = 2.0;
- // }
- // var a: f32 = 2.1;
+TEST_F(ResolverTypeValidationTest, GlobalVariableFunctionVariableNotUnique_Pass) {
+ // fn my_func() {
+ // var a: f32 = 2.0;
+ // }
+ // var a: f32 = 2.1;
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- Func("my_func", ast::VariableList{}, ty.void_(), {Decl(var)});
+ Func("my_func", ast::VariableList{}, ty.void_(), {Decl(var)});
- Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1f));
+ Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1_f));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, RedeclaredIdentifierInnerScope_Pass) {
- // {
- // if (true) { var a : f32 = 2.0; }
- // var a : f32 = 3.14;
- // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
+ // {
+ // if (true) { var a : f32 = 2.0; }
+ // var a : f32 = 3.14;
+ // }
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- auto* cond = Expr(true);
- auto* body = Block(Decl(var));
+ auto* cond = Expr(true);
+ auto* body = Block(Decl(var));
- auto* var_a_float = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(3.1f));
+ auto* var_a_float = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(3.1_f));
- auto* outer_body =
- Block(create<ast::IfStatement>(cond, body, ast::ElseStatementList{}),
- Decl(Source{{12, 34}}, var_a_float));
+ auto* outer_body = Block(If(cond, body), Decl(Source{{12, 34}}, var_a_float));
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_TRUE(r()->Resolve());
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverTypeValidationTest, RedeclaredIdentifierInnerScopeBlock_Pass) {
- // {
- // { var a : f32; }
- // var a : f32;
- // }
- auto* var_inner = Var("a", ty.f32(), ast::StorageClass::kNone);
- auto* inner = Block(Decl(Source{{12, 34}}, var_inner));
+ // {
+ // { var a : f32; }
+ // var a : f32;
+ // }
+ auto* var_inner = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* inner = Block(Decl(Source{{12, 34}}, var_inner));
- auto* var_outer = Var("a", ty.f32(), ast::StorageClass::kNone);
- auto* outer_body = Block(inner, Decl(var_outer));
+ auto* var_outer = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* outer_body = Block(inner, Decl(var_outer));
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeValidationTest,
- RedeclaredIdentifierDifferentFunctions_Pass) {
- // func0 { var a : f32 = 2.0; return; }
- // func1 { var a : f32 = 3.0; return; }
- auto* var0 = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
+TEST_F(ResolverTypeValidationTest, RedeclaredIdentifierDifferentFunctions_Pass) {
+ // func0 { var a : f32 = 2.0; return; }
+ // func1 { var a : f32 = 3.0; return; }
+ auto* var0 = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- auto* var1 = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(1.0f));
+ auto* var1 = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(1_f));
- Func("func0", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Source{{12, 34}}, var0),
- Return(),
- },
- ast::AttributeList{});
+ Func("func0", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Source{{12, 34}}, var0),
+ Return(),
+ },
+ ast::AttributeList{});
- Func("func1", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Source{{13, 34}}, var1),
- Return(),
- });
+ Func("func1", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Source{{13, 34}}, var1),
+ Return(),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_AIntLiteral_Pass) {
+ // var<private> a : array<f32, 4>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 4_a)), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedLiteral_Pass) {
- // var<private> a : array<f32, 4u>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 4u)),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var<private> a : array<f32, 4u>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 4_u)), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, ArraySize_SignedLiteral_Pass) {
- // var<private> a : array<f32, 4>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 4)),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var<private> a : array<f32, 4i>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 4_i)), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedConstant_Pass) {
- // let size = 4u;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(4u));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedLet_Pass) {
+ // let size = 4u;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(4_u));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_SignedLet_Pass) {
+ // let size = 4i;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(4_i));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
-TEST_F(ResolverTypeValidationTest, ArraySize_SignedConstant_Pass) {
- // let size = 4;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(4));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverTypeValidationTest, ArraySize_AIntLiteral_Zero) {
+ // var<private> a : array<f32, 0>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 0_a)), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
}
TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedLiteral_Zero) {
- // var<private> a : array<f32, 0u>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 0u)),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+ // var<private> a : array<f32, 0u>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 0_u)), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
}
TEST_F(ResolverTypeValidationTest, ArraySize_SignedLiteral_Zero) {
- // var<private> a : array<f32, 0>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 0)),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+ // var<private> a : array<f32, 0i>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 0_i)), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
}
TEST_F(ResolverTypeValidationTest, ArraySize_SignedLiteral_Negative) {
- // var<private> a : array<f32, -10>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, -10)),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedConstant_Zero) {
- // let size = 0u;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(0u));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_SignedConstant_Zero) {
- // let size = 0;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(0));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_SignedConstant_Negative) {
- // let size = -10;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(-10));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+ // var<private> a : array<f32, -10i>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, -10_i)), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_UnsignedLet_Zero) {
+ // let size = 0u;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(0_u));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_SignedLet_Zero) {
+ // let size = 0i;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(0_i));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_SignedLet_Negative) {
+ // let size = -10i;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(-10_i));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be at least 1");
}
TEST_F(ResolverTypeValidationTest, ArraySize_FloatLiteral) {
- // var<private> a : array<f32, 10.0>;
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 10.f)),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
+ // var<private> a : array<f32, 10.0>;
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, 10_f)), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
}
TEST_F(ResolverTypeValidationTest, ArraySize_IVecLiteral) {
- // var<private> a : array<f32, vec2<i32>(10, 10)>;
- Global(
- "a",
- ty.array(ty.f32(), Construct(Source{{12, 34}}, ty.vec2<i32>(), 10, 10)),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_FloatConstant) {
- // let size = 10.0;
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Expr(10.f));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_IVecConstant) {
- // let size = vec2<i32>(100, 100);
- // var<private> a : array<f32, size>;
- GlobalConst("size", nullptr, Construct(ty.vec2<i32>(), 100, 100));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
+ // var<private> a : array<f32, vec2<i32>(10, 10)>;
+ Global("a", ty.array(ty.f32(), Construct(Source{{12, 34}}, ty.vec2<i32>(), 10_i, 10_i)),
+ ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_FloatLet) {
+ // let size = 10.0;
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Expr(10_f));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_IVecLet) {
+ // let size = vec2<i32>(100, 100);
+ // var<private> a : array<f32, size>;
+ GlobalConst("size", nullptr, Construct(ty.vec2<i32>(), 100_i, 100_i));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size must be integer scalar");
}
TEST_F(ResolverTypeValidationTest, ArraySize_TooBig_ImplicitStride) {
- // var<private> a : array<f32, 0x40000000>;
- Global("a", ty.array(Source{{12, 34}}, ty.f32(), 0x40000000),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: array size in bytes must not exceed 0xffffffff, but "
- "is 0x100000000");
+ // var<private> a : array<f32, 0x40000000u>;
+ Global("a", ty.array(Source{{12, 34}}, ty.f32(), 0x40000000_u), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: array size in bytes must not exceed 0xffffffff, but "
+ "is 0x100000000");
}
TEST_F(ResolverTypeValidationTest, ArraySize_TooBig_ExplicitStride) {
- // var<private> a : @stride(8) array<f32, 0x20000000>;
- Global("a", ty.array(Source{{12, 34}}, ty.f32(), 0x20000000, 8),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: array size in bytes must not exceed 0xffffffff, but "
- "is 0x100000000");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_OverridableConstant) {
- // override size = 10;
- // var<private> a : array<f32, size>;
- Override("size", nullptr, Expr(10));
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: array size expression must not be pipeline-overridable");
+ // var<private> a : @stride(8) array<f32, 0x20000000u>;
+ Global("a", ty.array(Source{{12, 34}}, ty.f32(), 0x20000000_u, 8), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: array size in bytes must not exceed 0xffffffff, but "
+ "is 0x100000000");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_Overridable) {
+ // override size = 10i;
+ // var<private> a : array<f32, size>;
+ Override("size", nullptr, Expr(10_i));
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size expression must not be pipeline-overridable");
}
TEST_F(ResolverTypeValidationTest, ArraySize_ModuleVar) {
- // var<private> size : i32 = 10;
- // var<private> a : array<f32, size>;
- Global("size", ty.i32(), Expr(10), ast::StorageClass::kPrivate);
- Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: array size identifier must be a module-scope constant");
-}
-
-TEST_F(ResolverTypeValidationTest, ArraySize_FunctionConstant) {
- // {
- // let size = 10;
- // var a : array<f32, size>;
- // }
- auto* size = Const("size", nullptr, Expr(10));
- auto* a = Var("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")));
- WrapInFunction(Block(Decl(size), Decl(a)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: array size identifier must be a module-scope constant");
+ // var<private> size : i32 = 10i;
+ // var<private> a : array<f32, size>;
+ Global("size", ty.i32(), Expr(10_i), ast::StorageClass::kPrivate);
+ Global("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size identifier must be a module-scope constant");
+}
+
+TEST_F(ResolverTypeValidationTest, ArraySize_FunctionLet) {
+ // {
+ // let size = 10;
+ // var a : array<f32, size>;
+ // }
+ auto* size = Let("size", nullptr, Expr(10_i));
+ auto* a = Var("a", ty.array(ty.f32(), Expr(Source{{12, 34}}, "size")));
+ WrapInFunction(Block(Decl(size), Decl(a)));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: array size identifier must be a module-scope constant");
}
TEST_F(ResolverTypeValidationTest, ArraySize_InvalidExpr) {
- // var a : array<f32, i32(4)>;
- auto* size = Const("size", nullptr, Expr(10));
- auto* a =
- Var("a", ty.array(ty.f32(), Construct(Source{{12, 34}}, ty.i32(), 4)));
- WrapInFunction(Block(Decl(size), Decl(a)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: array size expression must be either a literal or a "
- "module-scope constant");
+ // var a : array<f32, i32(4i)>;
+ auto* a = Var("a", ty.array(ty.f32(), Construct(Source{{12, 34}}, ty.i32(), 4_i)));
+ WrapInFunction(Block(Decl(a)));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: array size expression must be either a literal or a "
+ "module-scope constant");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayInFunction_Fail) {
- /// @stage(vertex)
- // fn func() { var a : array<i32>; }
-
- auto* var =
- Var(Source{{12, 34}}, "a", ty.array<i32>(), ast::StorageClass::kNone);
-
- Func("func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kVertex),
- });
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ /// @vertex
+ // fn func() { var a : array<i32>; }
+
+ auto* var = Var(Source{{12, 34}}, "a", ty.array<i32>(), ast::StorageClass::kNone);
+
+ Func("func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kVertex),
+ });
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
12:34 note: while instantiating variable a)");
}
TEST_F(ResolverTypeValidationTest, Struct_Member_VectorNoType) {
- // struct S {
- // a: vec3;
- // };
+ // struct S {
+ // a: vec3;
+ // };
- Structure("S",
- {Member("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3))});
+ Structure("S", {Member("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
}
TEST_F(ResolverTypeValidationTest, Struct_Member_MatrixNoType) {
- // struct S {
- // a: mat3x3;
- // };
- Structure(
- "S", {Member("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3))});
+ // struct S {
+ // a: mat3x3;
+ // };
+ Structure("S", {Member("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3))});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
}
TEST_F(ResolverTypeValidationTest, Struct_TooBig) {
- // struct Foo {
- // a: array<f32, 0x20000000>;
- // b: array<f32, 0x20000000>;
- // };
+ // struct Foo {
+ // a: array<f32, 0x20000000>;
+ // b: array<f32, 0x20000000>;
+ // };
- Structure(Source{{12, 34}}, "Foo",
- {
- Member("a", ty.array<f32, 0x20000000>()),
- Member("b", ty.array<f32, 0x20000000>()),
- });
+ Structure(Source{{12, 34}}, "Foo",
+ {
+ Member("a", ty.array<f32, 0x20000000>()),
+ Member("b", ty.array<f32, 0x20000000>()),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: struct size in bytes must not exceed 0xffffffff, but "
- "is 0x100000000");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: struct size in bytes must not exceed 0xffffffff, but "
+ "is 0x100000000");
}
TEST_F(ResolverTypeValidationTest, Struct_MemberOffset_TooBig) {
- // struct Foo {
- // a: array<f32, 0x3fffffff>;
- // b: f32;
- // c: f32;
- // };
-
- Structure("Foo", {
- Member("a", ty.array<f32, 0x3fffffff>()),
- Member("b", ty.f32()),
- Member(Source{{12, 34}}, "c", ty.f32()),
- });
+ // struct Foo {
+ // a: array<f32, 0x3fffffff>;
+ // b: f32;
+ // c: f32;
+ // };
+
+ Structure("Foo", {
+ Member("a", ty.array<f32, 0x3fffffff>()),
+ Member("b", ty.f32()),
+ Member(Source{{12, 34}}, "c", ty.f32()),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: struct member has byte offset 0x100000000, but must "
- "not exceed 0xffffffff");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: struct member has byte offset 0x100000000, but must "
+ "not exceed 0xffffffff");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayIsLast_Pass) {
- // struct Foo {
- // vf: f32;
- // rt: array<f32>;
- // };
-
- Structure("Foo", {
- Member("vf", ty.f32()),
- Member("rt", ty.array<f32>()),
- });
+ // struct Foo {
+ // vf: f32;
+ // rt: array<f32>;
+ // };
+
+ Structure("Foo", {
+ Member("vf", ty.f32()),
+ Member("rt", ty.array<f32>()),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayInArray) {
- // struct Foo {
- // rt : array<array<f32>, 4>;
- // };
+ // struct Foo {
+ // rt : array<array<f32>, 4u>;
+ // };
- Structure("Foo",
- {Member("rt", ty.array(Source{{12, 34}}, ty.array<f32>(), 4))});
+ Structure("Foo", {Member("rt", ty.array(Source{{12, 34}}, ty.array<f32>(), 4_u))});
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- "12:34 error: an array element type cannot contain a runtime-sized "
- "array");
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ "12:34 error: an array element type cannot contain a runtime-sized "
+ "array");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayInStructInArray) {
- // struct Foo {
- // rt : array<f32>;
- // };
- // var<private> a : array<Foo, 4>;
+ // struct Foo {
+ // rt : array<f32>;
+ // };
+ // var<private> a : array<Foo, 4>;
- auto* foo = Structure("Foo", {Member("rt", ty.array<f32>())});
- Global("v", ty.array(Source{{12, 34}}, ty.Of(foo), 4),
- ast::StorageClass::kPrivate);
+ auto* foo = Structure("Foo", {Member("rt", ty.array<f32>())});
+ Global("v", ty.array(Source{{12, 34}}, ty.Of(foo), 4_u), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- "12:34 error: an array element type cannot contain a runtime-sized "
- "array");
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ "12:34 error: an array element type cannot contain a runtime-sized "
+ "array");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayInStructInStruct) {
- // struct Foo {
- // rt : array<f32>;
- // };
- // struct Outer {
- // inner : Foo;
- // };
+ // struct Foo {
+ // rt : array<f32>;
+ // };
+ // struct Outer {
+ // inner : Foo;
+ // };
- auto* foo = Structure("Foo", {Member("rt", ty.array<f32>())});
- Structure("Outer", {Member(Source{{12, 34}}, "inner", ty.Of(foo))});
+ auto* foo = Structure("Foo", {Member("rt", ty.array<f32>())});
+ Structure("Outer", {Member(Source{{12, 34}}, "inner", ty.Of(foo))});
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- "12:34 error: a struct that contains a runtime array cannot be "
- "nested inside another struct");
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ "12:34 error: a struct that contains a runtime array cannot be "
+ "nested inside another struct");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayIsNotLast_Fail) {
- // struct Foo {
- // rt: array<f32>;
- // vf: f32;
- // };
-
- Structure("Foo", {
- Member(Source{{12, 34}}, "rt", ty.array<f32>()),
- Member("vf", ty.f32()),
- });
+ // struct Foo {
+ // rt: array<f32>;
+ // vf: f32;
+ // };
- WrapInFunction();
+ Structure("Foo", {
+ Member(Source{{12, 34}}, "rt", ty.array<f32>()),
+ Member("vf", ty.f32()),
+ });
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime arrays may only appear as the last member of a struct)");
+ WrapInFunction();
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime arrays may only appear as the last member of a struct)");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayAsGlobalVariable) {
- Global(Source{{56, 78}}, "g", ty.array<i32>(), ast::StorageClass::kPrivate);
+ Global(Source{{56, 78}}, "g", ty.array<i32>(), ast::StorageClass::kPrivate);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayAsLocalVariable) {
- auto* v = Var(Source{{56, 78}}, "g", ty.array<i32>());
- WrapInFunction(v);
+ auto* v = Var(Source{{56, 78}}, "g", ty.array<i32>());
+ WrapInFunction(v);
- ASSERT_FALSE(r()->Resolve());
+ ASSERT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
+ EXPECT_EQ(r()->error(),
+ R"(56:78 error: runtime-sized arrays can only be used in the <storage> storage class
56:78 note: while instantiating variable g)");
}
TEST_F(ResolverTypeValidationTest, RuntimeArrayAsParameter_Fail) {
- // fn func(a : array<u32>) {}
- // @stage(vertex) fn main() {}
-
- auto* param = Param(Source{{12, 34}}, "a", ty.array<i32>());
-
- Func("func", ast::VariableList{param}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
-
- Func("main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kVertex),
- });
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ // fn func(a : array<u32>) {}
+ // @vertex fn main() {}
+
+ auto* param = Param(Source{{12, 34}}, "a", ty.array<i32>());
+
+ Func("func", ast::VariableList{param}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kVertex),
+ });
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
12:34 note: while instantiating parameter a)");
}
TEST_F(ResolverTypeValidationTest, PtrToRuntimeArrayAsParameter_Fail) {
- // fn func(a : ptr<workgroup, array<u32>>) {}
-
- auto* param =
- Param(Source{{12, 34}}, "a",
- ty.pointer(ty.array<i32>(), ast::StorageClass::kWorkgroup));
-
- Func("func", ast::VariableList{param}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ // fn func(a : ptr<workgroup, array<u32>>) {}
+
+ auto* param =
+ Param(Source{{12, 34}}, "a", ty.pointer(ty.array<i32>(), ast::StorageClass::kWorkgroup));
+
+ Func("func", ast::VariableList{param}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
12:34 note: while instantiating parameter a)");
}
TEST_F(ResolverTypeValidationTest, AliasRuntimeArrayIsNotLast_Fail) {
- // type RTArr = array<u32>;
- // struct s {
- // b: RTArr;
- // a: u32;
- //}
-
- auto* alias = Alias("RTArr", ty.array<u32>());
- Structure("s", {
- Member(Source{{12, 34}}, "b", ty.Of(alias)),
- Member("a", ty.u32()),
- });
+ // type RTArr = array<u32>;
+ // struct s {
+ // b: RTArr;
+ // a: u32;
+ //}
+
+ auto* alias = Alias("RTArr", ty.array<u32>());
+ Structure("s", {
+ Member(Source{{12, 34}}, "b", ty.Of(alias)),
+ Member("a", ty.u32()),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- "12:34 error: runtime arrays may only appear as the last member of "
- "a struct");
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ "12:34 error: runtime arrays may only appear as the last member of "
+ "a struct");
}
TEST_F(ResolverTypeValidationTest, AliasRuntimeArrayIsLast_Pass) {
- // type RTArr = array<u32>;
- // struct s {
- // a: u32;
- // b: RTArr;
- //}
-
- auto* alias = Alias("RTArr", ty.array<u32>());
- Structure("s", {
- Member("a", ty.u32()),
- Member("b", ty.Of(alias)),
- });
+ // type RTArr = array<u32>;
+ // struct s {
+ // a: u32;
+ // b: RTArr;
+ //}
+
+ auto* alias = Alias("RTArr", ty.array<u32>());
+ Structure("s", {
+ Member("a", ty.u32()),
+ Member("b", ty.Of(alias)),
+ });
- WrapInFunction();
+ WrapInFunction();
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTypeValidationTest, ArrayOfNonStorableType) {
- auto* tex_ty = ty.sampled_texture(ast::TextureDimension::k2d, ty.f32());
- Global("arr", ty.array(Source{{12, 34}}, tex_ty, 4),
- ast::StorageClass::kPrivate);
+ auto* tex_ty = ty.sampled_texture(ast::TextureDimension::k2d, ty.f32());
+ Global("arr", ty.array(Source{{12, 34}}, tex_ty, 4_i), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: texture_2d<f32> cannot be used as an element type of "
- "an array");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: texture_2d<f32> cannot be used as an element type of "
+ "an array");
}
TEST_F(ResolverTypeValidationTest, VariableAsType) {
- // var<private> a : i32;
- // var<private> b : a;
- Global("a", ty.i32(), ast::StorageClass::kPrivate);
- Global("b", ty.type_name("a"), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: cannot use variable 'a' as type
+ // var<private> a : i32;
+ // var<private> b : a;
+ Global("a", ty.i32(), ast::StorageClass::kPrivate);
+ Global("b", ty.type_name("a"), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(error: cannot use variable 'a' as type
note: 'a' declared here)");
}
TEST_F(ResolverTypeValidationTest, FunctionAsType) {
- // fn f() {}
- // var<private> v : f;
- Func("f", {}, ty.void_(), {});
- Global("v", ty.type_name("f"), ast::StorageClass::kPrivate);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(error: cannot use function 'f' as type
+ // fn f() {}
+ // var<private> v : f;
+ Func("f", {}, ty.void_(), {});
+ Global("v", ty.type_name("f"), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(error: cannot use function 'f' as type
note: 'f' declared here)");
}
TEST_F(ResolverTypeValidationTest, BuiltinAsType) {
- // var<private> v : max;
- Global("v", ty.type_name("max"), ast::StorageClass::kPrivate);
+ // var<private> v : max;
+ Global("v", ty.type_name("max"), ast::StorageClass::kPrivate);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "error: cannot use builtin 'max' as type");
+}
+
+TEST_F(ResolverTypeValidationTest, F16TypeUsedWithExtension) {
+ // enable f16;
+ // var<private> v : f16;
+ Enable(ast::Extension::kF16);
+
+ Global("v", ty.f16(), ast::StorageClass::kPrivate);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverTypeValidationTest, F16TypeUsedWithoutExtension) {
+ // var<private> v : f16;
+ Global("v", ty.f16(), ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "error: cannot use builtin 'max' as type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "error: f16 used without 'f16' extension enabled");
}
namespace GetCanonicalTests {
struct Params {
- builder::ast_type_func_ptr create_ast_type;
- builder::sem_type_func_ptr create_sem_type;
+ builder::ast_type_func_ptr create_ast_type;
+ builder::sem_type_func_ptr create_sem_type;
};
template <typename T>
constexpr Params ParamsFor() {
- return Params{DataType<T>::AST, DataType<T>::Sem};
+ return Params{DataType<T>::AST, DataType<T>::Sem};
}
static constexpr Params cases[] = {
@@ -749,69 +730,81 @@ static constexpr Params cases[] = {
using CanonicalTest = ResolverTestWithParam<Params>;
TEST_P(CanonicalTest, All) {
- auto& params = GetParam();
+ auto& params = GetParam();
- auto* type = params.create_ast_type(*this);
+ auto* type = params.create_ast_type(*this);
- auto* var = Var("v", type);
- auto* expr = Expr("v");
- WrapInFunction(var, expr);
+ auto* var = Var("v", type);
+ auto* expr = Expr("v");
+ WrapInFunction(var, expr);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
- auto* got = TypeOf(expr)->UnwrapRef();
- auto* expected = params.create_sem_type(*this);
+ auto* got = TypeOf(expr)->UnwrapRef();
+ auto* expected = params.create_sem_type(*this);
- EXPECT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
- << "expected: " << FriendlyName(expected) << "\n";
+ EXPECT_EQ(got, expected) << "got: " << FriendlyName(got) << "\n"
+ << "expected: " << FriendlyName(expected) << "\n";
}
-INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
- CanonicalTest,
- testing::ValuesIn(cases));
+INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest, CanonicalTest, testing::ValuesIn(cases));
} // namespace GetCanonicalTests
-namespace MultisampledTextureTests {
+namespace SampledTextureTests {
struct DimensionParams {
- ast::TextureDimension dim;
- bool is_valid;
+ ast::TextureDimension dim;
+ bool is_valid;
};
-static constexpr DimensionParams dimension_cases[] = {
- DimensionParams{ast::TextureDimension::k1d, false},
- DimensionParams{ast::TextureDimension::k2d, true},
- DimensionParams{ast::TextureDimension::k2dArray, false},
- DimensionParams{ast::TextureDimension::k3d, false},
- DimensionParams{ast::TextureDimension::kCube, false},
- DimensionParams{ast::TextureDimension::kCubeArray, false}};
+using SampledTextureDimensionTest = ResolverTestWithParam<DimensionParams>;
+TEST_P(SampledTextureDimensionTest, All) {
+ auto& params = GetParam();
+ Global(Source{{12, 34}}, "a", ty.sampled_texture(params.dim, ty.i32()),
+ ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
+ SampledTextureDimensionTest,
+ testing::Values( //
+ DimensionParams{ast::TextureDimension::k1d, true},
+ DimensionParams{ast::TextureDimension::k2d, true},
+ DimensionParams{ast::TextureDimension::k2dArray, true},
+ DimensionParams{ast::TextureDimension::k3d, true},
+ DimensionParams{ast::TextureDimension::kCube, true},
+ DimensionParams{ast::TextureDimension::kCubeArray, true}));
using MultisampledTextureDimensionTest = ResolverTestWithParam<DimensionParams>;
TEST_P(MultisampledTextureDimensionTest, All) {
- auto& params = GetParam();
- Global(Source{{12, 34}}, "a", ty.multisampled_texture(params.dim, ty.i32()),
- ast::StorageClass::kNone, nullptr,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ auto& params = GetParam();
+ Global(Source{{12, 34}}, "a", ty.multisampled_texture(params.dim, ty.i32()),
+ ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: only 2d multisampled textures are supported");
- }
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: only 2d multisampled textures are supported");
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
MultisampledTextureDimensionTest,
- testing::ValuesIn(dimension_cases));
+ testing::Values( //
+ DimensionParams{ast::TextureDimension::k1d, false},
+ DimensionParams{ast::TextureDimension::k2d, true},
+ DimensionParams{ast::TextureDimension::k2dArray, false},
+ DimensionParams{ast::TextureDimension::k3d, false},
+ DimensionParams{ast::TextureDimension::kCube, false},
+ DimensionParams{ast::TextureDimension::kCubeArray, false}));
struct TypeParams {
- builder::ast_type_func_ptr type_func;
- bool is_valid;
+ builder::ast_type_func_ptr type_func;
+ bool is_valid;
};
template <typename T>
constexpr TypeParams TypeParamsFor(bool is_valid) {
- return TypeParams{DataType<T>::AST, is_valid};
+ return TypeParams{DataType<T>::AST, is_valid};
}
static constexpr TypeParams type_cases[] = {
@@ -832,34 +825,52 @@ static constexpr TypeParams type_cases[] = {
TypeParamsFor<alias<mat3x3<f32>>>(false),
};
+using SampledTextureTypeTest = ResolverTestWithParam<TypeParams>;
+TEST_P(SampledTextureTypeTest, All) {
+ auto& params = GetParam();
+ Global(Source{{12, 34}}, "a",
+ ty.sampled_texture(ast::TextureDimension::k2d, params.type_func(*this)),
+ ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: texture_2d<type>: type must be f32, "
+ "i32 or u32");
+ }
+}
+INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
+ SampledTextureTypeTest,
+ testing::ValuesIn(type_cases));
+
using MultisampledTextureTypeTest = ResolverTestWithParam<TypeParams>;
TEST_P(MultisampledTextureTypeTest, All) {
- auto& params = GetParam();
- Global(Source{{12, 34}}, "a",
- ty.multisampled_texture(ast::TextureDimension::k2d,
- params.type_func(*this)),
- ast::StorageClass::kNone, nullptr,
- ast::AttributeList{GroupAndBinding(0, 0)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: texture_multisampled_2d<type>: type must be f32, "
- "i32 or u32");
- }
+ auto& params = GetParam();
+ Global(Source{{12, 34}}, "a",
+ ty.multisampled_texture(ast::TextureDimension::k2d, params.type_func(*this)),
+ ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: texture_multisampled_2d<type>: type must be f32, "
+ "i32 or u32");
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
MultisampledTextureTypeTest,
testing::ValuesIn(type_cases));
-} // namespace MultisampledTextureTests
+} // namespace SampledTextureTests
namespace StorageTextureTests {
struct DimensionParams {
- ast::TextureDimension dim;
- bool is_valid;
+ ast::TextureDimension dim;
+ bool is_valid;
};
static constexpr DimensionParams Dimension_cases[] = {
@@ -872,94 +883,85 @@ static constexpr DimensionParams Dimension_cases[] = {
using StorageTextureDimensionTest = ResolverTestWithParam<DimensionParams>;
TEST_P(StorageTextureDimensionTest, All) {
- // @group(0) @binding(0)
- // var a : texture_storage_*<ru32int, write>;
- auto& params = GetParam();
+ // @group(0) @binding(0)
+ // var a : texture_storage_*<ru32int, write>;
+ auto& params = GetParam();
- auto* st =
- ty.storage_texture(Source{{12, 34}}, params.dim,
- ast::TexelFormat::kR32Uint, ast::Access::kWrite);
+ auto* st = ty.storage_texture(Source{{12, 34}}, params.dim, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite);
- Global("a", st, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ Global("a", st, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 0)});
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cube dimensions for storage textures are not "
- "supported");
- }
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cube dimensions for storage textures are not "
+ "supported");
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
StorageTextureDimensionTest,
testing::ValuesIn(Dimension_cases));
struct FormatParams {
- ast::TexelFormat format;
- bool is_valid;
+ ast::TexelFormat format;
+ bool is_valid;
};
-static constexpr FormatParams format_cases[] = {
- FormatParams{ast::TexelFormat::kR32Float, true},
- FormatParams{ast::TexelFormat::kR32Sint, true},
- FormatParams{ast::TexelFormat::kR32Uint, true},
- FormatParams{ast::TexelFormat::kRg32Float, true},
- FormatParams{ast::TexelFormat::kRg32Sint, true},
- FormatParams{ast::TexelFormat::kRg32Uint, true},
- FormatParams{ast::TexelFormat::kRgba16Float, true},
- FormatParams{ast::TexelFormat::kRgba16Sint, true},
- FormatParams{ast::TexelFormat::kRgba16Uint, true},
- FormatParams{ast::TexelFormat::kRgba32Float, true},
- FormatParams{ast::TexelFormat::kRgba32Sint, true},
- FormatParams{ast::TexelFormat::kRgba32Uint, true},
- FormatParams{ast::TexelFormat::kRgba8Sint, true},
- FormatParams{ast::TexelFormat::kRgba8Snorm, true},
- FormatParams{ast::TexelFormat::kRgba8Uint, true},
- FormatParams{ast::TexelFormat::kRgba8Unorm, true}};
+static constexpr FormatParams format_cases[] = {FormatParams{ast::TexelFormat::kR32Float, true},
+ FormatParams{ast::TexelFormat::kR32Sint, true},
+ FormatParams{ast::TexelFormat::kR32Uint, true},
+ FormatParams{ast::TexelFormat::kRg32Float, true},
+ FormatParams{ast::TexelFormat::kRg32Sint, true},
+ FormatParams{ast::TexelFormat::kRg32Uint, true},
+ FormatParams{ast::TexelFormat::kRgba16Float, true},
+ FormatParams{ast::TexelFormat::kRgba16Sint, true},
+ FormatParams{ast::TexelFormat::kRgba16Uint, true},
+ FormatParams{ast::TexelFormat::kRgba32Float, true},
+ FormatParams{ast::TexelFormat::kRgba32Sint, true},
+ FormatParams{ast::TexelFormat::kRgba32Uint, true},
+ FormatParams{ast::TexelFormat::kRgba8Sint, true},
+ FormatParams{ast::TexelFormat::kRgba8Snorm, true},
+ FormatParams{ast::TexelFormat::kRgba8Uint, true},
+ FormatParams{ast::TexelFormat::kRgba8Unorm, true}};
using StorageTextureFormatTest = ResolverTestWithParam<FormatParams>;
TEST_P(StorageTextureFormatTest, All) {
- auto& params = GetParam();
- // @group(0) @binding(0)
- // var a : texture_storage_1d<*, write>;
- // @group(0) @binding(1)
- // var b : texture_storage_2d<*, write>;
- // @group(0) @binding(2)
- // var c : texture_storage_2d_array<*, write>;
- // @group(0) @binding(3)
- // var d : texture_storage_3d<*, write>;
-
- auto* st_a = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
- params.format, ast::Access::kWrite);
- Global("a", st_a, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 0)});
-
- auto* st_b = ty.storage_texture(ast::TextureDimension::k2d, params.format,
- ast::Access::kWrite);
- Global("b", st_b, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 1)});
-
- auto* st_c = ty.storage_texture(ast::TextureDimension::k2dArray,
- params.format, ast::Access::kWrite);
- Global("c", st_c, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 2)});
-
- auto* st_d = ty.storage_texture(ast::TextureDimension::k3d, params.format,
- ast::Access::kWrite);
- Global("d", st_d, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 3)});
-
- if (params.is_valid) {
- EXPECT_TRUE(r()->Resolve()) << r()->error();
- } else {
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: image format must be one of the texel formats "
- "specified for storage textues in "
- "https://gpuweb.github.io/gpuweb/wgsl/#texel-formats");
- }
+ auto& params = GetParam();
+ // @group(0) @binding(0)
+ // var a : texture_storage_1d<*, write>;
+ // @group(0) @binding(1)
+ // var b : texture_storage_2d<*, write>;
+ // @group(0) @binding(2)
+ // var c : texture_storage_2d_array<*, write>;
+ // @group(0) @binding(3)
+ // var d : texture_storage_3d<*, write>;
+
+ auto* st_a = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d, params.format,
+ ast::Access::kWrite);
+ Global("a", st_a, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 0)});
+
+ auto* st_b = ty.storage_texture(ast::TextureDimension::k2d, params.format, ast::Access::kWrite);
+ Global("b", st_b, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 1)});
+
+ auto* st_c =
+ ty.storage_texture(ast::TextureDimension::k2dArray, params.format, ast::Access::kWrite);
+ Global("c", st_c, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 2)});
+
+ auto* st_d = ty.storage_texture(ast::TextureDimension::k3d, params.format, ast::Access::kWrite);
+ Global("d", st_d, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 3)});
+
+ if (params.is_valid) {
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+ } else {
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: image format must be one of the texel formats "
+ "specified for storage textues in "
+ "https://gpuweb.github.io/gpuweb/wgsl/#texel-formats");
+ }
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
StorageTextureFormatTest,
@@ -968,89 +970,81 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
using StorageTextureAccessTest = ResolverTest;
TEST_F(StorageTextureAccessTest, MissingAccess_Fail) {
- // @group(0) @binding(0)
- // var a : texture_storage_1d<ru32int>;
+ // @group(0) @binding(0)
+ // var a : texture_storage_1d<ru32int>;
- auto* st =
- ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
- ast::TexelFormat::kR32Uint, ast::Access::kUndefined);
+ auto* st = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
+ ast::TexelFormat::kR32Uint, ast::Access::kUndefined);
- Global("a", st, ast::StorageClass::kNone,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ Global("a", st, ast::StorageClass::kNone, ast::AttributeList{GroupAndBinding(0, 0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: storage texture missing access control");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: storage texture missing access control");
}
TEST_F(StorageTextureAccessTest, RWAccess_Fail) {
- // @group(0) @binding(0)
- // var a : texture_storage_1d<ru32int, read_write>;
+ // @group(0) @binding(0)
+ // var a : texture_storage_1d<ru32int, read_write>;
- auto* st =
- ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
- ast::TexelFormat::kR32Uint, ast::Access::kReadWrite);
+ auto* st = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
+ ast::TexelFormat::kR32Uint, ast::Access::kReadWrite);
- Global("a", st, ast::StorageClass::kNone, nullptr,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ Global("a", st, ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: storage textures currently only support 'write' "
- "access control");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: storage textures currently only support 'write' "
+ "access control");
}
TEST_F(StorageTextureAccessTest, ReadOnlyAccess_Fail) {
- // @group(0) @binding(0)
- // var a : texture_storage_1d<ru32int, read>;
+ // @group(0) @binding(0)
+ // var a : texture_storage_1d<ru32int, read>;
- auto* st = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
- ast::TexelFormat::kR32Uint, ast::Access::kRead);
+ auto* st = ty.storage_texture(Source{{12, 34}}, ast::TextureDimension::k1d,
+ ast::TexelFormat::kR32Uint, ast::Access::kRead);
- Global("a", st, ast::StorageClass::kNone, nullptr,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ Global("a", st, ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: storage textures currently only support 'write' "
- "access control");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: storage textures currently only support 'write' "
+ "access control");
}
TEST_F(StorageTextureAccessTest, WriteOnlyAccess_Pass) {
- // @group(0) @binding(0)
- // var a : texture_storage_1d<ru32int, write>;
+ // @group(0) @binding(0)
+ // var a : texture_storage_1d<ru32int, write>;
- auto* st =
- ty.storage_texture(ast::TextureDimension::k1d, ast::TexelFormat::kR32Uint,
- ast::Access::kWrite);
+ auto* st = ty.storage_texture(ast::TextureDimension::k1d, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite);
- Global("a", st, ast::StorageClass::kNone, nullptr,
- ast::AttributeList{GroupAndBinding(0, 0)});
+ Global("a", st, ast::StorageClass::kNone, nullptr, ast::AttributeList{GroupAndBinding(0, 0)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
} // namespace StorageTextureTests
namespace MatrixTests {
struct Params {
- uint32_t columns;
- uint32_t rows;
- builder::ast_type_func_ptr elem_ty;
+ uint32_t columns;
+ uint32_t rows;
+ builder::ast_type_func_ptr elem_ty;
};
template <typename T>
constexpr Params ParamsFor(uint32_t columns, uint32_t rows) {
- return Params{columns, rows, DataType<T>::AST};
+ return Params{columns, rows, DataType<T>::AST};
}
using ValidMatrixTypes = ResolverTestWithParam<Params>;
TEST_P(ValidMatrixTypes, Okay) {
- // var a : matNxM<EL_TY>;
- auto& params = GetParam();
- Global("a", ty.mat(params.elem_ty(*this), params.columns, params.rows),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : matNxM<EL_TY>;
+ auto& params = GetParam();
+ Global("a", ty.mat(params.elem_ty(*this), params.columns, params.rows),
+ ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
ValidMatrixTypes,
@@ -1069,14 +1063,12 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
using InvalidMatrixElementTypes = ResolverTestWithParam<Params>;
TEST_P(InvalidMatrixElementTypes, InvalidElementType) {
- // var a : matNxM<EL_TY>;
- auto& params = GetParam();
- Global("a",
- ty.mat(Source{{12, 34}}, params.elem_ty(*this), params.columns,
- params.rows),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: matrix element type must be 'f32'");
+ // var a : matNxM<EL_TY>;
+ auto& params = GetParam();
+ Global("a", ty.mat(Source{{12, 34}}, params.elem_ty(*this), params.columns, params.rows),
+ ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: matrix element type must be 'f32'");
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
InvalidMatrixElementTypes,
@@ -1094,22 +1086,21 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
namespace VectorTests {
struct Params {
- uint32_t width;
- builder::ast_type_func_ptr elem_ty;
+ uint32_t width;
+ builder::ast_type_func_ptr elem_ty;
};
template <typename T>
constexpr Params ParamsFor(uint32_t width) {
- return Params{width, DataType<T>::AST};
+ return Params{width, DataType<T>::AST};
}
using ValidVectorTypes = ResolverTestWithParam<Params>;
TEST_P(ValidVectorTypes, Okay) {
- // var a : vecN<EL_TY>;
- auto& params = GetParam();
- Global("a", ty.vec(params.elem_ty(*this), params.width),
- ast::StorageClass::kPrivate);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // var a : vecN<EL_TY>;
+ auto& params = GetParam();
+ Global("a", ty.vec(params.elem_ty(*this), params.width), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
ValidVectorTypes,
@@ -1132,14 +1123,14 @@ INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
using InvalidVectorElementTypes = ResolverTestWithParam<Params>;
TEST_P(InvalidVectorElementTypes, InvalidElementType) {
- // var a : vecN<EL_TY>;
- auto& params = GetParam();
- Global("a", ty.vec(Source{{12, 34}}, params.elem_ty(*this), params.width),
- ast::StorageClass::kPrivate);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: vector element type must be 'bool', 'f32', 'i32' "
- "or 'u32'");
+ // var a : vecN<EL_TY>;
+ auto& params = GetParam();
+ Global("a", ty.vec(Source{{12, 34}}, params.elem_ty(*this), params.width),
+ ast::StorageClass::kPrivate);
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: vector element type must be 'bool', 'f32', 'i32' "
+ "or 'u32'");
}
INSTANTIATE_TEST_SUITE_P(ResolverTypeValidationTest,
InvalidVectorElementTypes,
diff --git a/chromium/third_party/dawn/src/tint/resolver/uniformity.cc b/chromium/third_party/dawn/src/tint/resolver/uniformity.cc
new file mode 100644
index 00000000000..97612a40100
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/uniformity.cc
@@ -0,0 +1,1567 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/uniformity.h"
+
+#include <limits>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "src/tint/program_builder.h"
+#include "src/tint/resolver/dependency_graph.h"
+#include "src/tint/scope_stack.h"
+#include "src/tint/sem/block_statement.h"
+#include "src/tint/sem/for_loop_statement.h"
+#include "src/tint/sem/function.h"
+#include "src/tint/sem/if_statement.h"
+#include "src/tint/sem/info.h"
+#include "src/tint/sem/loop_statement.h"
+#include "src/tint/sem/statement.h"
+#include "src/tint/sem/switch_statement.h"
+#include "src/tint/sem/type_constructor.h"
+#include "src/tint/sem/type_conversion.h"
+#include "src/tint/sem/variable.h"
+#include "src/tint/utils/block_allocator.h"
+#include "src/tint/utils/map.h"
+#include "src/tint/utils/unique_vector.h"
+
+// Set to `1` to dump the uniformity graph for each function in graphviz format.
+#define TINT_DUMP_UNIFORMITY_GRAPH 0
+
+namespace tint::resolver {
+
+namespace {
+
+/// CallSiteTag describes the uniformity requirements on the call sites of a function.
+enum CallSiteTag {
+ CallSiteRequiredToBeUniform,
+ CallSiteNoRestriction,
+};
+
+/// FunctionTag describes a functions effects on uniformity.
+enum FunctionTag {
+ SubsequentControlFlowMayBeNonUniform,
+ ReturnValueMayBeNonUniform,
+ NoRestriction,
+};
+
+/// ParameterTag describes the uniformity requirements of values passed to a function parameter.
+enum ParameterTag {
+ ParameterRequiredToBeUniform,
+ ParameterRequiredToBeUniformForSubsequentControlFlow,
+ ParameterRequiredToBeUniformForReturnValue,
+ ParameterNoRestriction,
+};
+
+/// Node represents a node in the graph of control flow and value nodes within the analysis of a
+/// single function.
+struct Node {
+ /// Constructor
+ /// @param a the corresponding AST node
+ explicit Node(const ast::Node* a) : ast(a) {}
+
+#if TINT_DUMP_UNIFORMITY_GRAPH
+ /// The node tag.
+ std::string tag;
+#endif
+
+ /// Type describes the type of the node, which is used to determine additional diagnostic
+ /// information.
+ enum Type {
+ kRegular,
+ kFunctionCallArgument,
+ kFunctionCallPointerArgumentResult,
+ kFunctionCallReturnValue,
+ };
+
+ /// The type of the node.
+ Type type = kRegular;
+
+ /// `true` if this node represents a potential control flow change.
+ bool affects_control_flow = false;
+
+ /// The corresponding AST node, or nullptr.
+ const ast::Node* ast = nullptr;
+
+ /// The function call argument index, if applicable.
+ uint32_t arg_index;
+
+ /// The set of edges from this node to other nodes in the graph.
+ utils::UniqueVector<Node*> edges;
+
+ /// The node that this node was visited from, or nullptr if not visited.
+ Node* visited_from = nullptr;
+
+ /// Add an edge to the `to` node.
+ /// @param to the destination node
+ void AddEdge(Node* to) { edges.add(to); }
+};
+
+/// ParameterInfo holds information about the uniformity requirements and effects for a particular
+/// function parameter.
+struct ParameterInfo {
+ /// The semantic node in corresponds to this parameter.
+ const sem::Parameter* sem;
+ /// The parameter's uniformity requirements.
+ ParameterTag tag = ParameterNoRestriction;
+ /// Will be `true` if this function may cause the contents of this pointer parameter to become
+ /// non-uniform.
+ bool pointer_may_become_non_uniform = false;
+ /// The parameters that are required to be uniform for the contents of this pointer parameter to
+ /// be uniform at function exit.
+ std::vector<const sem::Parameter*> pointer_param_output_sources;
+ /// The node in the graph that corresponds to this parameter's initial value.
+ Node* init_value;
+ /// The node in the graph that corresponds to this parameter's output value (or nullptr).
+ Node* pointer_return_value = nullptr;
+};
+
+/// FunctionInfo holds information about the uniformity requirements and effects for a particular
+/// function, as well as the control flow graph.
+struct FunctionInfo {
+ /// Constructor
+ /// @param func the AST function
+ /// @param builder the program builder
+ FunctionInfo(const ast::Function* func, const ProgramBuilder* builder) {
+ name = builder->Symbols().NameFor(func->symbol);
+ callsite_tag = CallSiteNoRestriction;
+ function_tag = NoRestriction;
+
+ // Create special nodes.
+ required_to_be_uniform = CreateNode("RequiredToBeUniform");
+ may_be_non_uniform = CreateNode("MayBeNonUniform");
+ cf_start = CreateNode("CF_start");
+ cf_return = CreateNode("CF_return");
+ if (func->return_type) {
+ value_return = CreateNode("Value_return");
+ }
+
+ // Create nodes for parameters.
+ parameters.resize(func->params.size());
+ for (size_t i = 0; i < func->params.size(); i++) {
+ auto* param = func->params[i];
+ auto param_name = builder->Symbols().NameFor(param->symbol);
+ auto* sem = builder->Sem().Get<sem::Parameter>(param);
+ parameters[i].sem = sem;
+
+ Node* node_init;
+ if (sem->Type()->Is<sem::Pointer>()) {
+ node_init = CreateNode("ptrparam_" + name + "_init");
+ parameters[i].pointer_return_value = CreateNode("ptrparam_" + name + "_return");
+ local_var_decls.insert(sem);
+ } else {
+ node_init = CreateNode("param_" + name);
+ }
+ parameters[i].init_value = node_init;
+ variables.Set(sem, node_init);
+ }
+ }
+
+ /// The name of the function.
+ std::string name;
+
+ /// The call site uniformity requirements.
+ CallSiteTag callsite_tag;
+ /// The function's uniformity effects.
+ FunctionTag function_tag;
+ /// The uniformity requirements of the function's parameters.
+ std::vector<ParameterInfo> parameters;
+
+ /// The control flow graph.
+ utils::BlockAllocator<Node> nodes;
+
+ /// Special `RequiredToBeUniform` node.
+ Node* required_to_be_uniform;
+ /// Special `MayBeNonUniform` node.
+ Node* may_be_non_uniform;
+ /// Special `CF_start` node.
+ Node* cf_start;
+ /// Special `CF_return` node.
+ Node* cf_return;
+ /// Special `Value_return` node.
+ Node* value_return;
+
+ /// Map from variables to their value nodes in the graph, scoped with respect to control flow.
+ ScopeStack<const sem::Variable*, Node*> variables;
+
+ /// The set of a local read-write vars that are in scope at any given point in the process.
+ /// Includes pointer parameters.
+ std::unordered_set<const sem::Variable*> local_var_decls;
+
+ /// LoopSwitchInfo tracks information about the value of variables for a control flow construct.
+ struct LoopSwitchInfo {
+ /// The type of this control flow construct.
+ std::string type;
+ /// The input values for local variables at the start of this construct.
+ std::unordered_map<const sem::Variable*, Node*> var_in_nodes;
+ /// The exit values for local variables at the end of this construct.
+ std::unordered_map<const sem::Variable*, Node*> var_exit_nodes;
+ };
+
+ /// Map from control flow statements to the corresponding LoopSwitchInfo structure.
+ std::unordered_map<const sem::Statement*, LoopSwitchInfo> loop_switch_infos;
+
+ /// Create a new node.
+ /// @param tag a tag used to identify the node for debugging purposes
+ /// @param ast the optional AST node that this node corresponds to
+ /// @returns the new node
+ Node* CreateNode([[maybe_unused]] std::string tag, const ast::Node* ast = nullptr) {
+ auto* node = nodes.Create(ast);
+
+#if TINT_DUMP_UNIFORMITY_GRAPH
+ // Make the tag unique and set it.
+ // This only matters if we're dumping the graph.
+ std::string unique_tag = tag;
+ int suffix = 0;
+ while (tags_.count(unique_tag)) {
+ unique_tag = tag + "_$" + std::to_string(++suffix);
+ }
+ tags_.insert(unique_tag);
+ node->tag = name + "." + unique_tag;
+#endif
+
+ return node;
+ }
+
+ /// Reset the visited status of every node in the graph.
+ void ResetVisited() {
+ for (auto* node : nodes.Objects()) {
+ node->visited_from = nullptr;
+ }
+ }
+
+ private:
+ /// A list of tags that have already been used within the current function.
+ std::unordered_set<std::string> tags_;
+};
+
+/// UniformityGraph is used to analyze the uniformity requirements and effects of functions in a
+/// module.
+class UniformityGraph {
+ public:
+ /// Constructor.
+ /// @param builder the program to analyze
+ explicit UniformityGraph(ProgramBuilder* builder)
+ : builder_(builder), sem_(builder->Sem()), diagnostics_(builder->Diagnostics()) {}
+
+ /// Destructor.
+ ~UniformityGraph() {}
+
+ /// Build and analyze the graph to determine whether the program satisfies the uniformity
+ /// constraints of WGSL.
+ /// @param dependency_graph the dependency-ordered module-scope declarations
+ /// @returns true if all uniformity constraints are satisfied, otherise false
+ bool Build(const DependencyGraph& dependency_graph) {
+#if TINT_DUMP_UNIFORMITY_GRAPH
+ std::cout << "digraph G {\n";
+ std::cout << "rankdir=BT\n";
+#endif
+
+ // Process all functions in the module.
+ bool success = true;
+ for (auto* decl : dependency_graph.ordered_globals) {
+ if (auto* func = decl->As<ast::Function>()) {
+ if (!ProcessFunction(func)) {
+ success = false;
+ break;
+ }
+ }
+ }
+
+#if TINT_DUMP_UNIFORMITY_GRAPH
+ std::cout << "\n}\n";
+#endif
+
+ return success;
+ }
+
+ private:
+ const ProgramBuilder* builder_;
+ const sem::Info& sem_;
+ diag::List& diagnostics_;
+
+ /// Map of analyzed function results.
+ std::unordered_map<const ast::Function*, FunctionInfo> functions_;
+
+ /// The function currently being analyzed.
+ FunctionInfo* current_function_;
+
+ /// Create a new node.
+ /// @param tag a tag used to identify the node for debugging purposes.
+ /// @param ast the optional AST node that this node corresponds to
+ /// @returns the new node
+ Node* CreateNode(std::string tag, const ast::Node* ast = nullptr) {
+ return current_function_->CreateNode(tag, ast);
+ }
+
+ /// Process a function.
+ /// @param func the function to process
+ /// @returns true if there are no uniformity issues, false otherwise
+ bool ProcessFunction(const ast::Function* func) {
+ functions_.emplace(func, FunctionInfo(func, builder_));
+ current_function_ = &functions_.at(func);
+
+ // Process function body.
+ if (func->body) {
+ auto* cf = ProcessStatement(current_function_->cf_start, func->body);
+ current_function_->cf_return->AddEdge(cf);
+ }
+
+#if TINT_DUMP_UNIFORMITY_GRAPH
+ // Dump the graph for this function as a subgraph.
+ std::cout << "\nsubgraph cluster_" << current_function_->name << " {\n";
+ std::cout << " label=" << current_function_->name << ";";
+ for (auto* node : current_function_->nodes.Objects()) {
+ std::cout << "\n \"" << node->tag << "\";";
+ for (auto* edge : node->edges) {
+ std::cout << "\n \"" << node->tag << "\" -> \"" << edge->tag << "\";";
+ }
+ }
+ std::cout << "\n}\n";
+#endif
+
+ // Look at which nodes are reachable from "RequiredToBeUniform".
+ {
+ utils::UniqueVector<Node*> reachable;
+ Traverse(current_function_->required_to_be_uniform, &reachable);
+ if (reachable.contains(current_function_->may_be_non_uniform)) {
+ MakeError(*current_function_, current_function_->may_be_non_uniform);
+ return false;
+ }
+ if (reachable.contains(current_function_->cf_start)) {
+ current_function_->callsite_tag = CallSiteRequiredToBeUniform;
+ }
+
+ // Set the parameter tag to ParameterRequiredToBeUniform for each parameter node that
+ // was reachable.
+ for (size_t i = 0; i < func->params.size(); i++) {
+ auto* param = func->params[i];
+ if (reachable.contains(current_function_->variables.Get(sem_.Get(param)))) {
+ current_function_->parameters[i].tag = ParameterRequiredToBeUniform;
+ }
+ }
+ }
+
+ // Look at which nodes are reachable from "CF_return"
+ {
+ utils::UniqueVector<Node*> reachable;
+ Traverse(current_function_->cf_return, &reachable);
+ if (reachable.contains(current_function_->may_be_non_uniform)) {
+ current_function_->function_tag = SubsequentControlFlowMayBeNonUniform;
+ }
+
+ // Set the parameter tag to ParameterRequiredToBeUniformForSubsequentControlFlow for
+ // each parameter node that was reachable.
+ for (size_t i = 0; i < func->params.size(); i++) {
+ auto* param = func->params[i];
+ if (reachable.contains(current_function_->variables.Get(sem_.Get(param)))) {
+ current_function_->parameters[i].tag =
+ ParameterRequiredToBeUniformForSubsequentControlFlow;
+ }
+ }
+ }
+
+ // If "Value_return" exists, look at which nodes are reachable from it
+ if (current_function_->value_return) {
+ utils::UniqueVector<Node*> reachable;
+ Traverse(current_function_->value_return, &reachable);
+ if (reachable.contains(current_function_->may_be_non_uniform)) {
+ current_function_->function_tag = ReturnValueMayBeNonUniform;
+ }
+
+ // Set the parameter tag to ParameterRequiredToBeUniformForReturnValue for each
+ // parameter node that was reachable.
+ for (size_t i = 0; i < func->params.size(); i++) {
+ auto* param = func->params[i];
+ if (reachable.contains(current_function_->variables.Get(sem_.Get(param)))) {
+ current_function_->parameters[i].tag =
+ ParameterRequiredToBeUniformForReturnValue;
+ }
+ }
+ }
+
+ // Traverse the graph for each pointer parameter.
+ for (size_t i = 0; i < func->params.size(); i++) {
+ if (current_function_->parameters[i].pointer_return_value == nullptr) {
+ continue;
+ }
+
+ // Reset "visited" state for all nodes.
+ current_function_->ResetVisited();
+
+ utils::UniqueVector<Node*> reachable;
+ Traverse(current_function_->parameters[i].pointer_return_value, &reachable);
+ if (reachable.contains(current_function_->may_be_non_uniform)) {
+ current_function_->parameters[i].pointer_may_become_non_uniform = true;
+ }
+
+ // Check every other parameter to see if they feed into this parameter's final value.
+ for (size_t j = 0; j < func->params.size(); j++) {
+ auto* param_source = sem_.Get<sem::Parameter>(func->params[j]);
+ if (reachable.contains(current_function_->parameters[j].init_value)) {
+ current_function_->parameters[i].pointer_param_output_sources.push_back(
+ param_source);
+ }
+ }
+ }
+
+ return true;
+ }
+
+ /// Process a statement, returning the new control flow node.
+ /// @param cf the input control flow node
+ /// @param stmt the statement to process d
+ /// @returns the new control flow node
+ Node* ProcessStatement(Node* cf, const ast::Statement* stmt) {
+ return Switch(
+ stmt,
+
+ [&](const ast::AssignmentStatement* a) {
+ auto [cf1, v1] = ProcessExpression(cf, a->rhs);
+ if (a->lhs->Is<ast::PhonyExpression>()) {
+ return cf1;
+ } else {
+ auto [cf2, l2] = ProcessLValueExpression(cf1, a->lhs);
+ l2->AddEdge(v1);
+ return cf2;
+ }
+ },
+
+ [&](const ast::BlockStatement* b) {
+ std::unordered_map<const sem::Variable*, Node*> scoped_assignments;
+ {
+ // Push a new scope for variable assignments in the block.
+ current_function_->variables.Push();
+ TINT_DEFER(current_function_->variables.Pop());
+
+ for (auto* s : b->statements) {
+ cf = ProcessStatement(cf, s);
+ if (!sem_.Get(s)->Behaviors().Contains(sem::Behavior::kNext)) {
+ break;
+ }
+ }
+
+ if (sem_.Get<sem::FunctionBlockStatement>(b)) {
+ // We've reached the end of the function body.
+ // Add edges from pointer parameter outputs to their current value.
+ for (auto param : current_function_->parameters) {
+ if (param.pointer_return_value) {
+ param.pointer_return_value->AddEdge(
+ current_function_->variables.Get(param.sem));
+ }
+ }
+ }
+
+ scoped_assignments = std::move(current_function_->variables.Top());
+ }
+
+ // Propagate all variables assignments to the containing scope if the behavior is
+ // either 'Next' or 'Fallthrough'.
+ auto& behaviors = sem_.Get(b)->Behaviors();
+ if (behaviors.Contains(sem::Behavior::kNext) ||
+ behaviors.Contains(sem::Behavior::kFallthrough)) {
+ for (auto var : scoped_assignments) {
+ current_function_->variables.Set(var.first, var.second);
+ }
+ }
+
+ // Remove any variables declared in this scope from the set of in-scope variables.
+ for (auto* d : sem_.Get<sem::BlockStatement>(b)->Decls()) {
+ current_function_->local_var_decls.erase(sem_.Get<sem::LocalVariable>(d));
+ }
+
+ return cf;
+ },
+
+ [&](const ast::BreakStatement* b) {
+ // Find the loop or switch statement that we are in.
+ auto* parent = sem_.Get(b)
+ ->FindFirstParent<sem::SwitchStatement, sem::LoopStatement,
+ sem::ForLoopStatement>();
+ TINT_ASSERT(Resolver, current_function_->loop_switch_infos.count(parent));
+ auto& info = current_function_->loop_switch_infos.at(parent);
+
+ // Propagate variable values to the loop/switch exit nodes.
+ for (auto* var : current_function_->local_var_decls) {
+ // Skip variables that were declared inside this loop/switch.
+ if (auto* lv = var->As<sem::LocalVariable>();
+ lv &&
+ lv->Statement()->FindFirstParent([&](auto* s) { return s == parent; })) {
+ continue;
+ }
+
+ // Add an edge from the variable exit node to its value at this point.
+ auto* exit_node = utils::GetOrCreate(info.var_exit_nodes, var, [&]() {
+ auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
+ return CreateNode(name + "_value_" + info.type + "_exit");
+ });
+ exit_node->AddEdge(current_function_->variables.Get(var));
+ }
+
+ return cf;
+ },
+
+ [&](const ast::CallStatement* c) {
+ auto [cf1, _] = ProcessCall(cf, c->expr);
+ return cf1;
+ },
+
+ [&](const ast::CompoundAssignmentStatement* c) {
+ // The compound assignment statement `a += b` is equivalent to `a = a + b`.
+ auto [cf1, v1] = ProcessExpression(cf, c->lhs);
+ auto [cf2, v2] = ProcessExpression(cf1, c->rhs);
+ auto* result = CreateNode("binary_expr_result");
+ result->AddEdge(v1);
+ result->AddEdge(v2);
+
+ auto [cf3, l3] = ProcessLValueExpression(cf2, c->lhs);
+ l3->AddEdge(result);
+ return cf3;
+ },
+
+ [&](const ast::ContinueStatement* c) {
+ // Find the loop statement that we are in.
+ auto* parent =
+ sem_.Get(c)->FindFirstParent<sem::LoopStatement, sem::ForLoopStatement>();
+ TINT_ASSERT(Resolver, current_function_->loop_switch_infos.count(parent));
+ auto& info = current_function_->loop_switch_infos.at(parent);
+
+ // Propagate assignments to the loop input nodes.
+ for (auto* var : current_function_->local_var_decls) {
+ // Skip variables that were declared inside this loop.
+ if (auto* lv = var->As<sem::LocalVariable>();
+ lv &&
+ lv->Statement()->FindFirstParent([&](auto* s) { return s == parent; })) {
+ continue;
+ }
+
+ // Add an edge from the variable's loop input node to its value at this point.
+ TINT_ASSERT(Resolver, info.var_in_nodes.count(var));
+ auto* in_node = info.var_in_nodes.at(var);
+ auto* out_node = current_function_->variables.Get(var);
+ if (out_node != in_node) {
+ in_node->AddEdge(out_node);
+ }
+ }
+ return cf;
+ },
+
+ [&](const ast::DiscardStatement*) { return cf; },
+
+ [&](const ast::FallthroughStatement*) { return cf; },
+
+ [&](const ast::ForLoopStatement* f) {
+ auto* sem_loop = sem_.Get(f);
+ auto* cfx = CreateNode("loop_start");
+
+ // Insert the initializer before the loop.
+ auto* cf_init = cf;
+ if (f->initializer) {
+ cf_init = ProcessStatement(cf, f->initializer);
+ }
+ auto* cf_start = cf_init;
+
+ auto& info = current_function_->loop_switch_infos[sem_loop];
+ info.type = "forloop";
+
+ // Create input nodes for any variables declared before this loop.
+ for (auto* v : current_function_->local_var_decls) {
+ auto name = builder_->Symbols().NameFor(v->Declaration()->symbol);
+ auto* in_node = CreateNode(name + "_value_forloop_in");
+ in_node->AddEdge(current_function_->variables.Get(v));
+ info.var_in_nodes[v] = in_node;
+ current_function_->variables.Set(v, in_node);
+ }
+
+ // Insert the condition at the start of the loop body.
+ if (f->condition) {
+ auto [cf_cond, v] = ProcessExpression(cfx, f->condition);
+ auto* cf_condition_end = CreateNode("for_condition_CFend", f);
+ cf_condition_end->affects_control_flow = true;
+ cf_condition_end->AddEdge(v);
+ cf_start = cf_condition_end;
+
+ // Propagate assignments to the loop exit nodes.
+ for (auto* var : current_function_->local_var_decls) {
+ auto* exit_node = utils::GetOrCreate(info.var_exit_nodes, var, [&]() {
+ auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
+ return CreateNode(name + "_value_" + info.type + "_exit");
+ });
+ exit_node->AddEdge(current_function_->variables.Get(var));
+ }
+ }
+ auto* cf1 = ProcessStatement(cf_start, f->body);
+
+ // Insert the continuing statement at the end of the loop body.
+ if (f->continuing) {
+ auto* cf2 = ProcessStatement(cf1, f->continuing);
+ cfx->AddEdge(cf2);
+ } else {
+ cfx->AddEdge(cf1);
+ }
+ cfx->AddEdge(cf);
+
+ // Add edges from variable loop input nodes to their values at the end of the loop.
+ for (auto v : info.var_in_nodes) {
+ auto* in_node = v.second;
+ auto* out_node = current_function_->variables.Get(v.first);
+ if (out_node != in_node) {
+ in_node->AddEdge(out_node);
+ }
+ }
+
+ // Set each variable's exit node as its value in the outer scope.
+ for (auto v : info.var_exit_nodes) {
+ current_function_->variables.Set(v.first, v.second);
+ }
+
+ current_function_->loop_switch_infos.erase(sem_loop);
+
+ if (sem_loop->Behaviors() == sem::Behaviors{sem::Behavior::kNext}) {
+ return cf;
+ } else {
+ return cfx;
+ }
+ },
+
+ [&](const ast::IfStatement* i) {
+ auto* sem_if = sem_.Get(i);
+ auto [_, v_cond] = ProcessExpression(cf, i->condition);
+
+ // Add a diagnostic node to capture the control flow change.
+ auto* v = current_function_->CreateNode("if_stmt", i);
+ v->affects_control_flow = true;
+ v->AddEdge(v_cond);
+
+ std::unordered_map<const sem::Variable*, Node*> true_vars;
+ std::unordered_map<const sem::Variable*, Node*> false_vars;
+
+ // Helper to process a statement with a new scope for variable assignments.
+ // Populates `assigned_vars` with new nodes for any variables that are assigned in
+ // this statement.
+ auto process_in_scope =
+ [&](Node* cf_in, const ast::Statement* s,
+ std::unordered_map<const sem::Variable*, Node*>& assigned_vars) {
+ // Push a new scope for variable assignments.
+ current_function_->variables.Push();
+
+ // Process the statement.
+ auto* cf_out = ProcessStatement(cf_in, s);
+
+ assigned_vars = current_function_->variables.Top();
+
+ // Pop the scope and return.
+ current_function_->variables.Pop();
+ return cf_out;
+ };
+
+ auto* cf1 = process_in_scope(v, i->body, true_vars);
+
+ bool true_has_next = sem_.Get(i->body)->Behaviors().Contains(sem::Behavior::kNext);
+ bool false_has_next = true;
+
+ Node* cf2 = nullptr;
+ if (i->else_statement) {
+ cf2 = process_in_scope(v, i->else_statement, false_vars);
+
+ false_has_next =
+ sem_.Get(i->else_statement)->Behaviors().Contains(sem::Behavior::kNext);
+ }
+
+ // Update values for any variables assigned in the if or else blocks.
+ for (auto* var : current_function_->local_var_decls) {
+ // Skip variables not assigned in either block.
+ if (true_vars.count(var) == 0 && false_vars.count(var) == 0) {
+ continue;
+ }
+
+ // Create an exit node for the variable.
+ auto name = builder_->Symbols().NameFor(var->Declaration()->symbol);
+ auto* out_node = CreateNode(name + "_value_if_exit");
+
+ // Add edges to the assigned value or the initial value.
+ // Only add edges if the behavior for that block contains 'Next'.
+ if (true_has_next) {
+ if (true_vars.count(var)) {
+ out_node->AddEdge(true_vars.at(var));
+ } else {
+ out_node->AddEdge(current_function_->variables.Get(var));
+ }
+ }
+ if (false_has_next) {
+ if (false_vars.count(var)) {
+ out_node->AddEdge(false_vars.at(var));
+ } else {
+ out_node->AddEdge(current_function_->variables.Get(var));
+ }
+ }
+
+ current_function_->variables.Set(var, out_node);
+ }
+
+ if (sem_if->Behaviors() != sem::Behaviors{sem::Behavior::kNext}) {
+ auto* cf_end = CreateNode("if_CFend");
+ cf_end->AddEdge(cf1);
+ if (cf2) {
+ cf_end->AddEdge(cf2);
+ }
+ return cf_end;
+ }
+ return cf;
+ },
+
+ [&](const ast::IncrementDecrementStatement* i) {
+ // The increment/decrement statement `i++` is equivalent to `i = i + 1`.
+ auto [cf1, v1] = ProcessExpression(cf, i->lhs);
+ auto* result = CreateNode("incdec_result");
+ result->AddEdge(v1);
+ result->AddEdge(cf1);
+
+ auto [cf2, l2] = ProcessLValueExpression(cf1, i->lhs);
+ l2->AddEdge(result);
+ return cf2;
+ },
+
+ [&](const ast::LoopStatement* l) {
+ auto* sem_loop = sem_.Get(l);
+ auto* cfx = CreateNode("loop_start");
+
+ auto& info = current_function_->loop_switch_infos[sem_loop];
+ info.type = "loop";
+
+ // Create input nodes for any variables declared before this loop.
+ for (auto* v : current_function_->local_var_decls) {
+ auto name = builder_->Symbols().NameFor(v->Declaration()->symbol);
+ auto* in_node = CreateNode(name + "_value_loop_in");
+ in_node->AddEdge(current_function_->variables.Get(v));
+ info.var_in_nodes[v] = in_node;
+ current_function_->variables.Set(v, in_node);
+ }
+
+ auto* cf1 = ProcessStatement(cfx, l->body);
+ if (l->continuing) {
+ auto* cf2 = ProcessStatement(cf1, l->continuing);
+ cfx->AddEdge(cf2);
+ } else {
+ cfx->AddEdge(cf1);
+ }
+ cfx->AddEdge(cf);
+
+ // Add edges from variable loop input nodes to their values at the end of the loop.
+ for (auto v : info.var_in_nodes) {
+ auto* in_node = v.second;
+ auto* out_node = current_function_->variables.Get(v.first);
+ if (out_node != in_node) {
+ in_node->AddEdge(out_node);
+ }
+ }
+
+ // Set each variable's exit node as its value in the outer scope.
+ for (auto v : info.var_exit_nodes) {
+ current_function_->variables.Set(v.first, v.second);
+ }
+
+ current_function_->loop_switch_infos.erase(sem_loop);
+
+ if (sem_loop->Behaviors() == sem::Behaviors{sem::Behavior::kNext}) {
+ return cf;
+ } else {
+ return cfx;
+ }
+ },
+ [&](const ast::ReturnStatement* r) {
+ Node* cf_ret;
+ if (r->value) {
+ auto [cf1, v] = ProcessExpression(cf, r->value);
+ current_function_->cf_return->AddEdge(cf1);
+ current_function_->value_return->AddEdge(v);
+ cf_ret = cf1;
+ } else {
+ TINT_ASSERT(Resolver, cf != nullptr);
+ current_function_->cf_return->AddEdge(cf);
+ cf_ret = cf;
+ }
+
+ // Add edges from each pointer parameter output to its current value.
+ for (auto param : current_function_->parameters) {
+ if (param.pointer_return_value) {
+ param.pointer_return_value->AddEdge(
+ current_function_->variables.Get(param.sem));
+ }
+ }
+
+ return cf_ret;
+ },
+ [&](const ast::SwitchStatement* s) {
+ auto* sem_switch = sem_.Get(s);
+ auto [cfx, v_cond] = ProcessExpression(cf, s->condition);
+
+ // Add a diagnostic node to capture the control flow change.
+ auto* v = current_function_->CreateNode("switch_stmt", s);
+ v->affects_control_flow = true;
+ v->AddEdge(v_cond);
+
+ Node* cf_end = nullptr;
+ if (sem_switch->Behaviors() != sem::Behaviors{sem::Behavior::kNext}) {
+ cf_end = CreateNode("switch_CFend");
+ }
+
+ auto& info = current_function_->loop_switch_infos[sem_switch];
+ info.type = "switch";
+
+ auto* cf_n = v;
+ bool previous_case_has_fallthrough = false;
+ for (auto* c : s->body) {
+ auto* sem_case = sem_.Get(c);
+
+ if (previous_case_has_fallthrough) {
+ cf_n = ProcessStatement(cf_n, c->body);
+ } else {
+ current_function_->variables.Push();
+ cf_n = ProcessStatement(v, c->body);
+ }
+
+ if (cf_end) {
+ cf_end->AddEdge(cf_n);
+ }
+
+ bool has_fallthrough =
+ sem_case->Behaviors().Contains(sem::Behavior::kFallthrough);
+ if (!has_fallthrough) {
+ if (sem_case->Behaviors().Contains(sem::Behavior::kNext)) {
+ // Propagate variable values to the switch exit nodes.
+ for (auto* var : current_function_->local_var_decls) {
+ // Skip variables that were declared inside the switch.
+ if (auto* lv = var->As<sem::LocalVariable>();
+ lv && lv->Statement()->FindFirstParent(
+ [&](auto* st) { return st == sem_switch; })) {
+ continue;
+ }
+
+ // Add an edge from the variable exit node to its new value.
+ auto* exit_node =
+ utils::GetOrCreate(info.var_exit_nodes, var, [&]() {
+ auto name =
+ builder_->Symbols().NameFor(var->Declaration()->symbol);
+ return CreateNode(name + "_value_" + info.type + "_exit");
+ });
+ exit_node->AddEdge(current_function_->variables.Get(var));
+ }
+ }
+ current_function_->variables.Pop();
+ }
+ previous_case_has_fallthrough = has_fallthrough;
+ }
+
+ // Update nodes for any variables assigned in the switch statement.
+ for (auto var : info.var_exit_nodes) {
+ current_function_->variables.Set(var.first, var.second);
+ }
+
+ return cf_end ? cf_end : cf;
+ },
+ [&](const ast::VariableDeclStatement* decl) {
+ Node* node;
+ if (decl->variable->constructor) {
+ auto [cf1, v] = ProcessExpression(cf, decl->variable->constructor);
+ cf = cf1;
+ node = v;
+ } else {
+ node = cf;
+ }
+ current_function_->variables.Set(sem_.Get(decl->variable), node);
+
+ if (!decl->variable->is_const) {
+ current_function_->local_var_decls.insert(
+ sem_.Get<sem::LocalVariable>(decl->variable));
+ }
+
+ return cf;
+ },
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unknown statement type: " << std::string(stmt->TypeInfo().name);
+ return nullptr;
+ });
+ }
+
+ /// Process an identifier expression.
+ /// @param cf the input control flow node
+ /// @param ident the identifier expression to process
+ /// @returns a pair of (control flow node, value node)
+ std::pair<Node*, Node*> ProcessIdentExpression(Node* cf,
+ const ast::IdentifierExpression* ident) {
+ // Helper to check if the entry point attribute of `obj` indicates non-uniformity.
+ auto has_nonuniform_entry_point_attribute = [](auto* obj) {
+ // Only the num_workgroups and workgroup_id builtins are uniform.
+ if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(obj->attributes)) {
+ if (builtin->builtin == ast::Builtin::kNumWorkgroups ||
+ builtin->builtin == ast::Builtin::kWorkgroupId) {
+ return false;
+ }
+ }
+ return true;
+ };
+
+ auto name = builder_->Symbols().NameFor(ident->symbol);
+ auto* sem = sem_.Get<sem::VariableUser>(ident)->Variable();
+ auto* node = CreateNode(name + "_ident_expr", ident);
+ return Switch(
+ sem,
+
+ [&](const sem::Parameter* param) {
+ auto* user_func = param->Owner()->As<sem::Function>();
+ if (user_func && user_func->Declaration()->IsEntryPoint()) {
+ if (auto* str = param->Type()->As<sem::Struct>()) {
+ // We consider the whole struct to be non-uniform if any one of its members
+ // is non-uniform.
+ bool uniform = true;
+ for (auto* member : str->Members()) {
+ if (has_nonuniform_entry_point_attribute(member->Declaration())) {
+ uniform = false;
+ }
+ }
+ node->AddEdge(uniform ? cf : current_function_->may_be_non_uniform);
+ return std::make_pair(cf, node);
+ } else {
+ if (has_nonuniform_entry_point_attribute(param->Declaration())) {
+ node->AddEdge(current_function_->may_be_non_uniform);
+ } else {
+ node->AddEdge(cf);
+ }
+ return std::make_pair(cf, node);
+ }
+ } else {
+ auto* x = current_function_->variables.Get(param);
+ node->AddEdge(cf);
+ node->AddEdge(x);
+ return std::make_pair(cf, node);
+ }
+ },
+
+ [&](const sem::GlobalVariable* global) {
+ if (global->Declaration()->is_const || global->Access() == ast::Access::kRead) {
+ node->AddEdge(cf);
+ } else {
+ node->AddEdge(current_function_->may_be_non_uniform);
+ }
+ return std::make_pair(cf, node);
+ },
+
+ [&](const sem::LocalVariable* local) {
+ node->AddEdge(cf);
+ if (auto* x = current_function_->variables.Get(local)) {
+ node->AddEdge(x);
+ }
+ return std::make_pair(cf, node);
+ },
+
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unknown identifier expression type: " << std::string(sem->TypeInfo().name);
+ return std::pair<Node*, Node*>(nullptr, nullptr);
+ });
+ }
+
+ /// Process an expression.
+ /// @param cf the input control flow node
+ /// @param expr the expression to process
+ /// @returns a pair of (control flow node, value node)
+ std::pair<Node*, Node*> ProcessExpression(Node* cf, const ast::Expression* expr) {
+ return Switch(
+ expr,
+
+ [&](const ast::BinaryExpression* b) {
+ if (b->IsLogical()) {
+ // Short-circuiting binary operators are a special case.
+ auto [cf1, v1] = ProcessExpression(cf, b->lhs);
+
+ // Add a diagnostic node to capture the control flow change.
+ auto* v1_cf = current_function_->CreateNode("short_circuit_op", b);
+ v1_cf->affects_control_flow = true;
+ v1_cf->AddEdge(v1);
+
+ auto [cf2, v2] = ProcessExpression(v1_cf, b->rhs);
+
+ if (sem_.Get(b)->Behaviors() == sem::Behaviors{sem::Behavior::kNext}) {
+ return std::pair<Node*, Node*>(cf, v2);
+ }
+ return std::pair<Node*, Node*>(cf2, v2);
+ } else {
+ auto [cf1, v1] = ProcessExpression(cf, b->lhs);
+ auto [cf2, v2] = ProcessExpression(cf1, b->rhs);
+ auto* result = CreateNode("binary_expr_result");
+ result->AddEdge(v1);
+ result->AddEdge(v2);
+ return std::pair<Node*, Node*>(cf2, result);
+ }
+ },
+
+ [&](const ast::BitcastExpression* b) { return ProcessExpression(cf, b->expr); },
+
+ [&](const ast::CallExpression* c) { return ProcessCall(cf, c); },
+
+ [&](const ast::IdentifierExpression* i) { return ProcessIdentExpression(cf, i); },
+
+ [&](const ast::IndexAccessorExpression* i) {
+ auto [cf1, v1] = ProcessExpression(cf, i->object);
+ auto [cf2, v2] = ProcessExpression(cf1, i->index);
+ auto* result = CreateNode("index_accessor_result");
+ result->AddEdge(v1);
+ result->AddEdge(v2);
+ return std::pair<Node*, Node*>(cf2, result);
+ },
+
+ [&](const ast::LiteralExpression*) { return std::make_pair(cf, cf); },
+
+ [&](const ast::MemberAccessorExpression* m) {
+ return ProcessExpression(cf, m->structure);
+ },
+
+ [&](const ast::UnaryOpExpression* u) {
+ if (u->op == ast::UnaryOp::kIndirection) {
+ // Cut the analysis short, since we only need to know the originating variable
+ // which is being accessed.
+ auto* source_var = sem_.Get(u)->SourceVariable();
+ auto* value = current_function_->variables.Get(source_var);
+ if (!value) {
+ value = cf;
+ }
+ return std::pair<Node*, Node*>(cf, value);
+ }
+ return ProcessExpression(cf, u->expr);
+ },
+
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unknown expression type: " << std::string(expr->TypeInfo().name);
+ return std::pair<Node*, Node*>(nullptr, nullptr);
+ });
+ }
+
+ /// Process an LValue expression.
+ /// @param cf the input control flow node
+ /// @param expr the expression to process
+ /// @returns a pair of (control flow node, variable node)
+ std::pair<Node*, Node*> ProcessLValueExpression(Node* cf, const ast::Expression* expr) {
+ return Switch(
+ expr,
+
+ [&](const ast::IdentifierExpression* i) {
+ auto name = builder_->Symbols().NameFor(i->symbol);
+ auto* sem = sem_.Get<sem::VariableUser>(i);
+ if (sem->Variable()->Is<sem::GlobalVariable>()) {
+ return std::make_pair(cf, current_function_->may_be_non_uniform);
+ } else if (auto* local = sem->Variable()->As<sem::LocalVariable>()) {
+ // Create a new value node for this variable.
+ auto* value = CreateNode(name + "_lvalue");
+ auto* old_value = current_function_->variables.Set(local, value);
+
+ // Aggregate values link back to their previous value, as they can never become
+ // uniform again.
+ if (!local->Type()->UnwrapRef()->is_scalar() && old_value) {
+ value->AddEdge(old_value);
+ }
+
+ return std::make_pair(cf, value);
+ } else {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unknown lvalue identifier expression type: "
+ << std::string(sem->Variable()->TypeInfo().name);
+ return std::pair<Node*, Node*>(nullptr, nullptr);
+ }
+ },
+
+ [&](const ast::IndexAccessorExpression* i) {
+ auto [cf1, l1] = ProcessLValueExpression(cf, i->object);
+ auto [cf2, v2] = ProcessExpression(cf1, i->index);
+ l1->AddEdge(v2);
+ return std::pair<Node*, Node*>(cf2, l1);
+ },
+
+ [&](const ast::MemberAccessorExpression* m) {
+ return ProcessLValueExpression(cf, m->structure);
+ },
+
+ [&](const ast::UnaryOpExpression* u) {
+ if (u->op == ast::UnaryOp::kIndirection) {
+ // Cut the analysis short, since we only need to know the originating variable
+ // that is being written to.
+ auto* source_var = sem_.Get(u)->SourceVariable();
+ auto name = builder_->Symbols().NameFor(source_var->Declaration()->symbol);
+ auto* deref = CreateNode(name + "_deref");
+ auto* old_value = current_function_->variables.Set(source_var, deref);
+
+ // Aggregate values link back to their previous value, as they can never become
+ // uniform again.
+ if (!source_var->Type()->UnwrapRef()->UnwrapPtr()->is_scalar() && old_value) {
+ deref->AddEdge(old_value);
+ }
+
+ return std::pair<Node*, Node*>(cf, deref);
+ }
+ return ProcessLValueExpression(cf, u->expr);
+ },
+
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "unknown lvalue expression type: " << std::string(expr->TypeInfo().name);
+ return std::pair<Node*, Node*>(nullptr, nullptr);
+ });
+ }
+
+ /// Process a function call expression.
+ /// @param cf the input control flow node
+ /// @param call the function call to process
+ /// @returns a pair of (control flow node, value node)
+ std::pair<Node*, Node*> ProcessCall(Node* cf, const ast::CallExpression* call) {
+ std::string name;
+ if (call->target.name) {
+ name = builder_->Symbols().NameFor(call->target.name->symbol);
+ } else {
+ name = call->target.type->FriendlyName(builder_->Symbols());
+ }
+
+ // Process call arguments
+ Node* cf_last_arg = cf;
+ std::vector<Node*> args;
+ for (size_t i = 0; i < call->args.size(); i++) {
+ auto [cf_i, arg_i] = ProcessExpression(cf_last_arg, call->args[i]);
+
+ // Capture the index of this argument in a new node.
+ // Note: This is an additional node that isn't described in the specification, for the
+ // purpose of providing diagnostic information.
+ Node* arg_node = CreateNode(name + "_arg_" + std::to_string(i), call);
+ arg_node->type = Node::kFunctionCallArgument;
+ arg_node->arg_index = static_cast<uint32_t>(i);
+ arg_node->AddEdge(arg_i);
+
+ cf_last_arg = cf_i;
+ args.push_back(arg_node);
+ }
+
+ // Note: This is an additional node that isn't described in the specification, for the
+ // purpose of providing diagnostic information.
+ Node* call_node = CreateNode(name + "_call", call);
+ call_node->AddEdge(cf_last_arg);
+
+ Node* result = CreateNode(name + "_return_value", call);
+ result->type = Node::kFunctionCallReturnValue;
+ Node* cf_after = CreateNode("CF_after_" + name, call);
+
+ // Get tags for the callee.
+ CallSiteTag callsite_tag = CallSiteNoRestriction;
+ FunctionTag function_tag = NoRestriction;
+ auto* sem = SemCall(call);
+ const FunctionInfo* func_info = nullptr;
+ Switch(
+ sem->Target(),
+ [&](const sem::Builtin* builtin) {
+ // Most builtins have no restrictions. The exceptions are barriers, derivatives, and
+ // some texture sampling builtins.
+ if (builtin->IsBarrier()) {
+ callsite_tag = CallSiteRequiredToBeUniform;
+ } else if (builtin->IsDerivative() ||
+ builtin->Type() == sem::BuiltinType::kTextureSample ||
+ builtin->Type() == sem::BuiltinType::kTextureSampleBias ||
+ builtin->Type() == sem::BuiltinType::kTextureSampleCompare) {
+ callsite_tag = CallSiteRequiredToBeUniform;
+ function_tag = ReturnValueMayBeNonUniform;
+ } else {
+ callsite_tag = CallSiteNoRestriction;
+ function_tag = NoRestriction;
+ }
+ },
+ [&](const sem::Function* func) {
+ // We must have already analyzed the user-defined function since we process
+ // functions in dependency order.
+ TINT_ASSERT(Resolver, functions_.count(func->Declaration()));
+ auto& info = functions_.at(func->Declaration());
+ callsite_tag = info.callsite_tag;
+ function_tag = info.function_tag;
+ func_info = &info;
+ },
+ [&](const sem::TypeConstructor*) {
+ callsite_tag = CallSiteNoRestriction;
+ function_tag = NoRestriction;
+ },
+ [&](const sem::TypeConversion*) {
+ callsite_tag = CallSiteNoRestriction;
+ function_tag = NoRestriction;
+ },
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_) << "unhandled function call target: " << name;
+ });
+
+ if (callsite_tag == CallSiteRequiredToBeUniform) {
+ current_function_->required_to_be_uniform->AddEdge(call_node);
+ }
+ cf_after->AddEdge(call_node);
+
+ if (function_tag == SubsequentControlFlowMayBeNonUniform) {
+ cf_after->AddEdge(current_function_->may_be_non_uniform);
+ cf_after->affects_control_flow = true;
+ } else if (function_tag == ReturnValueMayBeNonUniform) {
+ result->AddEdge(current_function_->may_be_non_uniform);
+ }
+
+ result->AddEdge(cf_after);
+
+ // For each argument, add edges based on parameter tags.
+ for (size_t i = 0; i < args.size(); i++) {
+ if (func_info) {
+ switch (func_info->parameters[i].tag) {
+ case ParameterRequiredToBeUniform:
+ current_function_->required_to_be_uniform->AddEdge(args[i]);
+ break;
+ case ParameterRequiredToBeUniformForSubsequentControlFlow:
+ cf_after->AddEdge(args[i]);
+ args[i]->affects_control_flow = true;
+ break;
+ case ParameterRequiredToBeUniformForReturnValue:
+ result->AddEdge(args[i]);
+ break;
+ case ParameterNoRestriction:
+ break;
+ }
+
+ auto* sem_arg = sem_.Get(call->args[i]);
+ if (sem_arg->Type()->Is<sem::Pointer>()) {
+ auto* ptr_result =
+ CreateNode(name + "_ptrarg_" + std::to_string(i) + "_result", call);
+ ptr_result->type = Node::kFunctionCallPointerArgumentResult;
+ ptr_result->arg_index = static_cast<uint32_t>(i);
+ if (func_info->parameters[i].pointer_may_become_non_uniform) {
+ ptr_result->AddEdge(current_function_->may_be_non_uniform);
+ } else {
+ // Add edges from the resulting pointer value to any other arguments that
+ // feed it.
+ for (auto* source : func_info->parameters[i].pointer_param_output_sources) {
+ ptr_result->AddEdge(args[source->Index()]);
+ }
+ }
+
+ // Update the current stored value for this pointer argument.
+ auto* source_var = sem_arg->SourceVariable();
+ TINT_ASSERT(Resolver, source_var);
+ current_function_->variables.Set(source_var, ptr_result);
+ }
+ } else {
+ // All builtin function parameters are RequiredToBeUniformForReturnValue, as are
+ // parameters for type constructors and type conversions.
+ // The arrayLength() builtin is a special case, as there is currently no way for it
+ // to have a non-uniform return value.
+ auto* builtin = sem->Target()->As<sem::Builtin>();
+ if (!builtin || builtin->Type() != sem::BuiltinType::kArrayLength) {
+ result->AddEdge(args[i]);
+ }
+ }
+ }
+
+ return {cf_after, result};
+ }
+
+ /// Traverse a graph starting at `source`, inserting all visited nodes into `reachable` and
+ /// recording which node they were reached from.
+ /// @param source the starting node
+ /// @param reachable the set of reachable nodes to populate, if required
+ void Traverse(Node* source, utils::UniqueVector<Node*>* reachable = nullptr) {
+ std::vector<Node*> to_visit{source};
+
+ while (!to_visit.empty()) {
+ auto* node = to_visit.back();
+ to_visit.pop_back();
+
+ if (reachable) {
+ reachable->add(node);
+ }
+ for (auto* to : node->edges) {
+ if (to->visited_from == nullptr) {
+ to->visited_from = node;
+ to_visit.push_back(to);
+ }
+ }
+ }
+ }
+
+ /// Trace back along a path from `start` until finding a node that matches a predicate.
+ /// @param start the starting node
+ /// @param pred the predicate function
+ /// @returns the first node found that matches the predicate, or nullptr
+ template <typename F>
+ Node* TraceBackAlongPathUntil(Node* start, F&& pred) {
+ auto* current = start;
+ while (current) {
+ if (pred(current)) {
+ break;
+ }
+ current = current->visited_from;
+ }
+ return current;
+ }
+
+ /// Recursively descend through the function called by `call` and the functions that it calls in
+ /// order to find a call to a builtin function that requires uniformity.
+ const ast::CallExpression* FindBuiltinThatRequiresUniformity(const ast::CallExpression* call) {
+ auto* target = SemCall(call)->Target();
+ if (target->Is<sem::Builtin>()) {
+ // This is a call to a builtin, so we must be done.
+ return call;
+ } else if (auto* user = target->As<sem::Function>()) {
+ // This is a call to a user-defined function, so inspect the functions called by that
+ // function and look for one whose node has an edge from the RequiredToBeUniform node.
+ auto& target_info = functions_.at(user->Declaration());
+ for (auto* call_node : target_info.required_to_be_uniform->edges) {
+ if (call_node->type == Node::kRegular) {
+ auto* child_call = call_node->ast->As<ast::CallExpression>();
+ return FindBuiltinThatRequiresUniformity(child_call);
+ }
+ }
+ TINT_ASSERT(Resolver, false && "unable to find child call with uniformity requirement");
+ } else {
+ TINT_ASSERT(Resolver, false && "unexpected call expression type");
+ }
+ return nullptr;
+ }
+
+ /// Add diagnostic notes to show where control flow became non-uniform on the way to a node.
+ /// @param function the function being analyzed
+ /// @param required_to_be_uniform the node to traverse from
+ /// @param may_be_non_uniform the node to traverse to
+ void ShowCauseOfNonUniformity(FunctionInfo& function,
+ Node* required_to_be_uniform,
+ Node* may_be_non_uniform) {
+ // Traverse the graph to generate a path from the node to the source of non-uniformity.
+ function.ResetVisited();
+ Traverse(required_to_be_uniform);
+
+ // Get the source of the non-uniform value.
+ auto* non_uniform_source = may_be_non_uniform->visited_from;
+ TINT_ASSERT(Resolver, non_uniform_source);
+
+ // Show where the non-uniform value results in non-uniform control flow.
+ auto* control_flow = TraceBackAlongPathUntil(
+ non_uniform_source, [](Node* node) { return node->affects_control_flow; });
+ if (control_flow) {
+ if (auto* call = control_flow->ast->As<ast::CallExpression>()) {
+ if (control_flow->type == Node::kFunctionCallArgument) {
+ auto idx = control_flow->arg_index;
+ diagnostics_.add_note(diag::System::Resolver,
+ "non-uniform function call argument causes subsequent "
+ "control flow to be non-uniform",
+ call->args[idx]->source);
+
+ // Recurse into the target function.
+ if (auto* user = SemCall(call)->Target()->As<sem::Function>()) {
+ auto& callee = functions_.at(user->Declaration());
+ ShowCauseOfNonUniformity(callee, callee.cf_return,
+ callee.parameters[idx].init_value);
+ }
+ }
+ } else {
+ diagnostics_.add_note(diag::System::Resolver,
+ "control flow depends on non-uniform value",
+ control_flow->ast->source);
+ }
+ // TODO(jrprice): There are cases where the function with uniformity requirements is not
+ // actually inside this control flow construct, for example:
+ // - A conditional interrupt (e.g. break), with a barrier elsewhere in the loop
+ // - A conditional assignment to a variable, which is later used to guard a barrier
+ // In these cases, the diagnostics are not entirely accurate as they may not highlight
+ // the actual cause of divergence.
+ }
+
+ // Show the source of the non-uniform value.
+ Switch(
+ non_uniform_source->ast,
+ [&](const ast::IdentifierExpression* ident) {
+ std::string var_type = "";
+ auto* var = sem_.Get<sem::VariableUser>(ident)->Variable();
+ switch (var->StorageClass()) {
+ case ast::StorageClass::kStorage:
+ var_type = "read_write storage buffer ";
+ break;
+ case ast::StorageClass::kWorkgroup:
+ var_type = "workgroup storage variable ";
+ break;
+ case ast::StorageClass::kPrivate:
+ var_type = "module-scope private variable ";
+ break;
+ default:
+ if (ast::HasAttribute<ast::BuiltinAttribute>(
+ var->Declaration()->attributes)) {
+ var_type = "builtin ";
+ } else if (ast::HasAttribute<ast::LocationAttribute>(
+ var->Declaration()->attributes)) {
+ var_type = "user-defined input ";
+ } else {
+ // TODO(jrprice): Provide more info for this case.
+ }
+ break;
+ }
+ diagnostics_.add_note(diag::System::Resolver,
+ "reading from " + var_type + "'" +
+ builder_->Symbols().NameFor(ident->symbol) +
+ "' may result in a non-uniform value",
+ ident->source);
+ },
+ [&](const ast::CallExpression* c) {
+ auto target_name = builder_->Symbols().NameFor(
+ c->target.name->As<ast::IdentifierExpression>()->symbol);
+ switch (non_uniform_source->type) {
+ case Node::kRegular: {
+ diagnostics_.add_note(
+ diag::System::Resolver,
+ "calling '" + target_name +
+ "' may cause subsequent control flow to be non-uniform",
+ c->source);
+
+ // Recurse into the target function.
+ if (auto* user = SemCall(c)->Target()->As<sem::Function>()) {
+ auto& callee = functions_.at(user->Declaration());
+ ShowCauseOfNonUniformity(callee, callee.cf_return,
+ callee.may_be_non_uniform);
+ }
+ break;
+ }
+ case Node::kFunctionCallReturnValue: {
+ diagnostics_.add_note(
+ diag::System::Resolver,
+ "return value of '" + target_name + "' may be non-uniform", c->source);
+ break;
+ }
+ case Node::kFunctionCallPointerArgumentResult: {
+ diagnostics_.add_note(
+ diag::System::Resolver,
+ "pointer contents may become non-uniform after calling '" +
+ target_name + "'",
+ c->args[non_uniform_source->arg_index]->source);
+ break;
+ }
+ default: {
+ TINT_ICE(Resolver, diagnostics_) << "unhandled source of non-uniformity";
+ break;
+ }
+ }
+ },
+ [&](Default) {
+ TINT_ICE(Resolver, diagnostics_) << "unhandled source of non-uniformity";
+ });
+ }
+
+ /// Generate an error message for a uniformity issue.
+ /// @param function the function that the diagnostic is being produced for
+ /// @param source_node the node that has caused a uniformity issue in `function`
+ /// @param note `true` if the diagnostic should be emitted as a note
+ void MakeError(FunctionInfo& function, Node* source_node, bool note = false) {
+ // Helper to produce a diagnostic message with the severity required by this invocation of
+ // the `MakeError` function.
+ auto report = [&](Source source, std::string msg) {
+ // TODO(jrprice): Switch to error instead of warning when feedback has settled.
+ diag::Diagnostic error{};
+ error.severity = note ? diag::Severity::Note : diag::Severity::Warning;
+ error.system = diag::System::Resolver;
+ error.source = source;
+ error.message = msg;
+ diagnostics_.add(std::move(error));
+ };
+
+ // Traverse the graph to generate a path from RequiredToBeUniform to the source node.
+ function.ResetVisited();
+ Traverse(function.required_to_be_uniform);
+ TINT_ASSERT(Resolver, source_node->visited_from);
+
+ // Find a node that is required to be uniform that has a path to the source node.
+ auto* cause = TraceBackAlongPathUntil(source_node, [&](Node* node) {
+ return node->visited_from == function.required_to_be_uniform;
+ });
+
+ // The node will always have a corresponding call expression.
+ auto* call = cause->ast->As<ast::CallExpression>();
+ TINT_ASSERT(Resolver, call);
+ auto* target = SemCall(call)->Target();
+
+ std::string func_name;
+ if (auto* builtin = target->As<sem::Builtin>()) {
+ func_name = builtin->str();
+ } else if (auto* user = target->As<sem::Function>()) {
+ func_name = builder_->Symbols().NameFor(user->Declaration()->symbol);
+ }
+
+ if (cause->type == Node::kFunctionCallArgument) {
+ // The requirement was on a function parameter.
+ auto param_name = builder_->Symbols().NameFor(
+ target->Parameters()[cause->arg_index]->Declaration()->symbol);
+ report(call->args[cause->arg_index]->source,
+ "parameter '" + param_name + "' of '" + func_name + "' must be uniform");
+
+ // If this is a call to a user-defined function, add a note to show the reason that the
+ // parameter is required to be uniform.
+ if (auto* user = target->As<sem::Function>()) {
+ auto& next_function = functions_.at(user->Declaration());
+ Node* next_cause = next_function.parameters[cause->arg_index].init_value;
+ MakeError(next_function, next_cause, true);
+ }
+ } else {
+ // The requirement was on a function callsite.
+ report(call->source,
+ "'" + func_name + "' must only be called from uniform control flow");
+
+ // If this is a call to a user-defined function, add a note to show the builtin that
+ // causes the uniformity requirement.
+ auto* innermost_call = FindBuiltinThatRequiresUniformity(call);
+ if (innermost_call != call) {
+ auto* sem_call = SemCall(call);
+ auto* sem_innermost_call = SemCall(innermost_call);
+
+ // Determine whether the builtin is being called directly or indirectly.
+ bool indirect = false;
+ if (sem_call->Target()->As<sem::Function>() !=
+ sem_innermost_call->Stmt()->Function()) {
+ indirect = true;
+ }
+
+ auto* builtin = sem_innermost_call->Target()->As<sem::Builtin>();
+ diagnostics_.add_note(diag::System::Resolver,
+ "'" + func_name + "' requires uniformity because it " +
+ (indirect ? "indirectly " : "") + "calls " +
+ builtin->str(),
+ innermost_call->source);
+ }
+ }
+
+ // Show the cause of non-uniformity (starting at the top-level error).
+ if (!note) {
+ ShowCauseOfNonUniformity(function, function.required_to_be_uniform,
+ function.may_be_non_uniform);
+ }
+ }
+
+ // Helper for obtaining the sem::Call node for the ast::CallExpression
+ const sem::Call* SemCall(const ast::CallExpression* expr) const {
+ return sem_.Get(expr)->UnwrapMaterialize()->As<sem::Call>();
+ }
+};
+
+} // namespace
+
+bool AnalyzeUniformity(ProgramBuilder* builder, const DependencyGraph& dependency_graph) {
+ UniformityGraph graph(builder);
+ return graph.Build(dependency_graph);
+}
+
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/uniformity.h b/chromium/third_party/dawn/src/tint/resolver/uniformity.h
new file mode 100644
index 00000000000..39827cf5200
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/uniformity.h
@@ -0,0 +1,36 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_RESOLVER_UNIFORMITY_H_
+#define SRC_TINT_RESOLVER_UNIFORMITY_H_
+
+// Forward declarations.
+namespace tint {
+namespace resolver {
+struct DependencyGraph;
+} // namespace resolver
+class ProgramBuilder;
+} // namespace tint
+
+namespace tint::resolver {
+
+/// Analyze the uniformity of a program.
+/// @param builder the program to analyze
+/// @param dependency_graph the dependency-ordered module-scope declarations
+/// @returns true if there are no uniformity issues, false otherwise
+bool AnalyzeUniformity(ProgramBuilder* builder, const resolver::DependencyGraph& dependency_graph);
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_UNIFORMITY_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/uniformity_test.cc b/chromium/third_party/dawn/src/tint/resolver/uniformity_test.cc
new file mode 100644
index 00000000000..7ed4a6b5c84
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/uniformity_test.cc
@@ -0,0 +1,6422 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include "src/tint/program_builder.h"
+#include "src/tint/reader/wgsl/parser.h"
+#include "src/tint/resolver/uniformity.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint::resolver {
+namespace {
+
+class UniformityAnalysisTestBase {
+ protected:
+ /// Parse and resolve a WGSL shader.
+ /// @param src the WGSL source code
+ /// @param should_pass true if `src` should pass the analysis, otherwise false
+ void RunTest(std::string src, bool should_pass) {
+ auto file = std::make_unique<Source::File>("test", src);
+ auto program = reader::wgsl::Parse(file.get());
+
+ diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+ error_ = diag::Formatter(style).format(program.Diagnostics());
+
+ bool valid = program.IsValid();
+ if (should_pass) {
+ EXPECT_TRUE(valid) << error_;
+ if (program.Diagnostics().count() == 1u) {
+ EXPECT_THAT(program.Diagnostics().str(), ::testing::HasSubstr("unreachable"));
+ } else {
+ EXPECT_EQ(program.Diagnostics().count(), 0u) << error_;
+ }
+ } else {
+ // TODO(jrprice): expect false when uniformity issues become errors.
+ EXPECT_TRUE(valid) << error_;
+ }
+ }
+
+ /// Build and resolve a program from a ProgramBuilder object.
+ /// @param builder the program builder
+ /// @returns true on success, false on failure
+ bool RunTest(ProgramBuilder&& builder) {
+ auto program = Program(std::move(builder));
+
+ diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+ error_ = diag::Formatter(style).format(program.Diagnostics());
+
+ return program.IsValid();
+ }
+
+ /// The error message from the parser or resolver, if any.
+ std::string error_;
+};
+
+class UniformityAnalysisTest : public UniformityAnalysisTestBase, public ::testing::Test {};
+
+class BasicTest : public UniformityAnalysisTestBase,
+ public ::testing::TestWithParam<std::tuple<int, int>> {
+ public:
+ /// Enum for the if-statement condition guarding a function call.
+ enum Condition {
+ // Uniform conditions:
+ kTrue,
+ kFalse,
+ kLiteral,
+ kModuleLet,
+ kPipelineOverridable,
+ kFuncLetUniformRhs,
+ kFuncVarUniform,
+ kFuncUniformRetVal,
+ kUniformBuffer,
+ kROStorageBuffer,
+ kLastUniformCondition = kROStorageBuffer,
+ // MayBeNonUniform conditions:
+ kFuncLetNonUniformRhs,
+ kFuncVarNonUniform,
+ kFuncNonUniformRetVal,
+ kRWStorageBuffer,
+ // End of range marker:
+ kEndOfConditionRange,
+ };
+
+ /// Enum for the function call statement.
+ enum Function {
+ // NoRestrictionFunctions:
+ kUserNoRestriction,
+ kMin,
+ kTextureSampleLevel,
+ kLastNoRestrictionFunction = kTextureSampleLevel,
+ // RequiredToBeUniform functions:
+ kUserRequiredToBeUniform,
+ kWorkgroupBarrier,
+ kStorageBarrier,
+ kTextureSample,
+ kTextureSampleBias,
+ kTextureSampleCompare,
+ kDpdx,
+ kDpdxCoarse,
+ kDpdxFine,
+ kDpdy,
+ kDpdyCoarse,
+ kDpdyFine,
+ kFwidth,
+ kFwidthCoarse,
+ kFwidthFine,
+ // End of range marker:
+ kEndOfFunctionRange,
+ };
+
+ /// Convert a condition to its string representation.
+ static std::string ConditionToStr(Condition c) {
+ switch (c) {
+ case kTrue:
+ return "true";
+ case kFalse:
+ return "false";
+ case kLiteral:
+ return "7 == 7";
+ case kModuleLet:
+ return "module_let == 0";
+ case kPipelineOverridable:
+ return "pipeline_overridable == 0";
+ case kFuncLetUniformRhs:
+ return "let_uniform_rhs == 0";
+ case kFuncVarUniform:
+ return "func_uniform == 0";
+ case kFuncUniformRetVal:
+ return "func_uniform_retval() == 0";
+ case kUniformBuffer:
+ return "u == 0";
+ case kROStorageBuffer:
+ return "ro == 0";
+ case kFuncLetNonUniformRhs:
+ return "let_nonuniform_rhs == 0";
+ case kFuncVarNonUniform:
+ return "func_non_uniform == 0";
+ case kFuncNonUniformRetVal:
+ return "func_nonuniform_retval() == 0";
+ case kRWStorageBuffer:
+ return "rw == 0";
+ case kEndOfConditionRange:
+ return "<invalid>";
+ }
+ return "<invalid>";
+ }
+
+ /// Convert a function call to its string representation.
+ static std::string FunctionToStr(Function f) {
+ switch (f) {
+ case kUserNoRestriction:
+ return "user_no_restriction()";
+ case kMin:
+ return "min(1, 1)";
+ case kTextureSampleLevel:
+ return "textureSampleLevel(t, s, vec2(0.5, 0.5), 0.0)";
+ case kUserRequiredToBeUniform:
+ return "user_required_to_be_uniform()";
+ case kWorkgroupBarrier:
+ return "workgroupBarrier()";
+ case kStorageBarrier:
+ return "storageBarrier()";
+ case kTextureSample:
+ return "textureSample(t, s, vec2(0.5, 0.5))";
+ case kTextureSampleBias:
+ return "textureSampleBias(t, s, vec2(0.5, 0.5), 2.0)";
+ case kTextureSampleCompare:
+ return "textureSampleCompare(td, sc, vec2(0.5, 0.5), 0.5)";
+ case kDpdx:
+ return "dpdx(1.0)";
+ case kDpdxCoarse:
+ return "dpdxCoarse(1.0)";
+ case kDpdxFine:
+ return "dpdxFine(1.0)";
+ case kDpdy:
+ return "dpdy(1.0)";
+ case kDpdyCoarse:
+ return "dpdyCoarse(1.0)";
+ case kDpdyFine:
+ return "dpdyFine(1.0)";
+ case kFwidth:
+ return "fwidth(1.0)";
+ case kFwidthCoarse:
+ return "fwidthCoarse(1.0)";
+ case kFwidthFine:
+ return "fwidthFine(1.0)";
+ case kEndOfFunctionRange:
+ return "<invalid>";
+ }
+ return "<invalid>";
+ }
+
+ /// @returns true if `c` is a condition that may be non-uniform.
+ static bool MayBeNonUniform(Condition c) { return c > kLastUniformCondition; }
+
+ /// @returns true if `f` is a function call that is required to be uniform.
+ static bool RequiredToBeUniform(Function f) { return f > kLastNoRestrictionFunction; }
+
+ /// Convert a test parameter pair of condition+function to a string that can be used as part of
+ /// a test name.
+ static std::string ParamsToName(::testing::TestParamInfo<ParamType> params) {
+ Condition c = static_cast<Condition>(std::get<0>(params.param));
+ Function f = static_cast<Function>(std::get<1>(params.param));
+ std::string name;
+#define CASE(c) \
+ case c: \
+ name += #c; \
+ break
+
+ switch (c) {
+ CASE(kTrue);
+ CASE(kFalse);
+ CASE(kLiteral);
+ CASE(kModuleLet);
+ CASE(kPipelineOverridable);
+ CASE(kFuncLetUniformRhs);
+ CASE(kFuncVarUniform);
+ CASE(kFuncUniformRetVal);
+ CASE(kUniformBuffer);
+ CASE(kROStorageBuffer);
+ CASE(kFuncLetNonUniformRhs);
+ CASE(kFuncVarNonUniform);
+ CASE(kFuncNonUniformRetVal);
+ CASE(kRWStorageBuffer);
+ case kEndOfConditionRange:
+ break;
+ }
+ name += "_";
+ switch (f) {
+ CASE(kUserNoRestriction);
+ CASE(kMin);
+ CASE(kTextureSampleLevel);
+ CASE(kUserRequiredToBeUniform);
+ CASE(kWorkgroupBarrier);
+ CASE(kStorageBarrier);
+ CASE(kTextureSample);
+ CASE(kTextureSampleBias);
+ CASE(kTextureSampleCompare);
+ CASE(kDpdx);
+ CASE(kDpdxCoarse);
+ CASE(kDpdxFine);
+ CASE(kDpdy);
+ CASE(kDpdyCoarse);
+ CASE(kDpdyFine);
+ CASE(kFwidth);
+ CASE(kFwidthCoarse);
+ CASE(kFwidthFine);
+ case kEndOfFunctionRange:
+ break;
+ }
+#undef CASE
+
+ return name;
+ }
+};
+
+// Test the uniformity constraints for a function call inside a conditional statement.
+TEST_P(BasicTest, ConditionalFunctionCall) {
+ auto condition = static_cast<Condition>(std::get<0>(GetParam()));
+ auto function = static_cast<Function>(std::get<1>(GetParam()));
+ std::string src = R"(
+var<private> p : i32;
+var<workgroup> w : i32;
+@group(0) @binding(0) var<uniform> u : i32;
+@group(0) @binding(0) var<storage, read> ro : i32;
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+@group(1) @binding(0) var t : texture_2d<f32>;
+@group(1) @binding(1) var td : texture_depth_2d;
+@group(1) @binding(2) var s : sampler;
+@group(1) @binding(3) var sc : sampler_comparison;
+
+let module_let : i32 = 42;
+@id(42) override pipeline_overridable : i32;
+
+fn user_no_restriction() {}
+fn user_required_to_be_uniform() { workgroupBarrier(); }
+
+fn func_uniform_retval() -> i32 { return u; }
+fn func_nonuniform_retval() -> i32 { return rw; }
+
+fn foo() {
+ let let_uniform_rhs = 7;
+ let let_nonuniform_rhs = rw;
+
+ var func_uniform = 7;
+ var func_non_uniform = 7;
+ func_non_uniform = rw;
+
+ if ()" + ConditionToStr(condition) +
+ R"() {
+ )" + FunctionToStr(function) +
+ R"(;
+ }
+}
+)";
+
+ bool should_pass = !(MayBeNonUniform(condition) && RequiredToBeUniform(function));
+ RunTest(src, should_pass);
+ if (!should_pass) {
+ EXPECT_THAT(error_, ::testing::StartsWith("test:31:5 warning: "));
+ EXPECT_THAT(error_, ::testing::HasSubstr("must only be called from uniform control flow"));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ UniformityAnalysisTest,
+ BasicTest,
+ ::testing::Combine(::testing::Range<int>(0, BasicTest::kEndOfConditionRange),
+ ::testing::Range<int>(0, BasicTest::kEndOfFunctionRange)),
+ BasicTest::ParamsToName);
+
+////////////////////////////////////////////////////////////////////////////////
+/// Test specific function and parameter tags that are not tested above.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, SubsequentControlFlowMayBeNonUniform_Pass) {
+ // Call a function that causes subsequent control flow to be non-uniform, and then call another
+ // function that doesn't require uniformity.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+var<private> p : i32;
+
+fn foo() {
+ if (rw == 0) {
+ p = 42;
+ return;
+ }
+ p = 5;
+ return;
+}
+
+fn bar() {
+ if (p == 42) {
+ p = 7;
+ }
+}
+
+fn main() {
+ foo();
+ bar();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, SubsequentControlFlowMayBeNonUniform_Fail) {
+ // Call a function that causes subsequent control flow to be non-uniform, and then call another
+ // function that requires uniformity.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+var<private> p : i32;
+
+fn foo() {
+ if (rw == 0) {
+ p = 42;
+ return;
+ }
+ p = 5;
+ return;
+}
+
+fn main() {
+ foo();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:17:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:16:3 note: calling 'foo' may cause subsequent control flow to be non-uniform
+ foo();
+ ^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (rw == 0) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ if (rw == 0) {
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, SubsequentControlFlowMayBeNonUniform_Nested_Fail) {
+ // Indirectly call a function that causes subsequent control flow to be non-uniform, and then
+ // call another function that requires uniformity.
+ // The lack of return statement in `foo()` requires that we implicitly add an edge from
+ // CF_return to that last control flow node of the function.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+var<private> p : i32;
+
+fn bar() {
+ if (rw == 0) {
+ p = 42;
+ return;
+ }
+ p = 5;
+ return;
+}
+
+fn foo() {
+ bar();
+}
+
+fn main() {
+ foo();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:21:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:20:3 note: calling 'foo' may cause subsequent control flow to be non-uniform
+ foo();
+ ^^^
+
+test:16:3 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ bar();
+ ^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (rw == 0) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ if (rw == 0) {
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ParameterNoRestriction_Pass) {
+ // Pass a non-uniform value as an argument, and then try to use the return value for
+ // control-flow guarding a barrier.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+var<private> p : i32;
+
+fn foo(i : i32) -> i32 {
+ if (i == 0) {
+ // This assignment is non-uniform, but shouldn't affect the return value.
+ p = 42;
+ }
+ return 7;
+}
+
+fn bar() {
+ let x = foo(rw);
+ if (x == 7) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniform_Pass) {
+ // Pass a uniform value as an argument to a function that uses that parameter for control-flow
+ // guarding a barrier.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> ro : i32;
+
+fn foo(i : i32) {
+ if (i == 0) {
+ workgroupBarrier();
+ }
+}
+
+fn bar() {
+ foo(ro);
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniform_Fail) {
+ // Pass a non-uniform value as an argument to a function that uses that parameter for
+ // control-flow guarding a barrier.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo(i : i32) {
+ if (i == 0) {
+ workgroupBarrier();
+ }
+}
+
+fn bar() {
+ foo(rw);
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:11:7 warning: parameter 'i' of 'foo' must be uniform
+ foo(rw);
+ ^^
+
+test:6:5 note: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:7 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ foo(rw);
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniformForReturnValue_Pass) {
+ // Pass a uniform value as an argument to a function that uses that parameter to produce the
+ // return value, and then use the return value for control-flow guarding a barrier.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> ro : i32;
+
+fn foo(i : i32) -> i32 {
+ return 1 + i;
+}
+
+fn bar() {
+ if (foo(ro) == 7) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniformForReturnValue_Fail) {
+ // Pass a non-uniform value as an argument to a function that uses that parameter to produce the
+ // return value, and then use the return value for control-flow guarding a barrier.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo(i : i32) -> i32 {
+ return 1 + i;
+}
+
+fn bar() {
+ if (foo(rw) == 7) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (foo(rw) == 7) {
+ ^^
+
+test:9:11 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ if (foo(rw) == 7) {
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniformForSubsequentControlFlow_Pass) {
+ // Pass a uniform value as an argument to a function that uses that parameter return early, and
+ // then invoke a barrier after calling that function.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> ro : i32;
+
+var<private> p : i32;
+
+fn foo(i : i32) {
+ if (i == 0) {
+ p = 42;
+ return;
+ }
+ p = 5;
+ return;
+}
+
+fn bar() {
+ foo(ro);
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ParameterRequiredToBeUniformForSubsequentControlFlow_Fail) {
+ // Pass a non-uniform value as an argument to a function that uses that parameter return early,
+ // and then invoke a barrier after calling that function.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+var<private> p : i32;
+
+fn foo(i : i32) {
+ if (i == 0) {
+ p = 42;
+ return;
+ }
+ p = 5;
+ return;
+}
+
+fn bar() {
+ foo(rw);
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:17:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:16:7 note: non-uniform function call argument causes subsequent control flow to be non-uniform
+ foo(rw);
+ ^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (i == 0) {
+ ^^
+
+test:7:7 note: reading from 'i' may result in a non-uniform value
+ if (i == 0) {
+ ^
+
+test:16:7 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ foo(rw);
+ ^^
+)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Test shader IO attributes.
+////////////////////////////////////////////////////////////////////////////////
+
+struct BuiltinEntry {
+ std::string name;
+ std::string type;
+ bool uniform;
+ BuiltinEntry(std::string n, std::string t, bool u) : name(n), type(t), uniform(u) {}
+};
+
+class ComputeBuiltin : public UniformityAnalysisTestBase,
+ public ::testing::TestWithParam<BuiltinEntry> {};
+TEST_P(ComputeBuiltin, AsParam) {
+ std::string src = R"(
+@compute @workgroup_size(64)
+fn main(@builtin()" + GetParam().name +
+ R"() b : )" + GetParam().type + R"() {
+ if (all(vec3(b) == vec3(0u))) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ bool should_pass = GetParam().uniform;
+ RunTest(src, should_pass);
+ if (!should_pass) {
+ EXPECT_EQ(
+ error_,
+ R"(test:5:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:4:3 note: control flow depends on non-uniform value
+ if (all(vec3(b) == vec3(0u))) {
+ ^^
+
+test:4:16 note: reading from builtin 'b' may result in a non-uniform value
+ if (all(vec3(b) == vec3(0u))) {
+ ^
+)");
+ }
+}
+
+TEST_P(ComputeBuiltin, InStruct) {
+ std::string src = R"(
+struct S {
+ @builtin()" + GetParam().name +
+ R"() b : )" + GetParam().type + R"(
+}
+
+@compute @workgroup_size(64)
+fn main(s : S) {
+ if (all(vec3(s.b) == vec3(0u))) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ bool should_pass = GetParam().uniform;
+ RunTest(src, should_pass);
+ if (!should_pass) {
+ EXPECT_EQ(
+ error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (all(vec3(s.b) == vec3(0u))) {
+ ^^
+
+test:8:16 note: reading from 's' may result in a non-uniform value
+ if (all(vec3(s.b) == vec3(0u))) {
+ ^
+)");
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(UniformityAnalysisTest,
+ ComputeBuiltin,
+ ::testing::Values(BuiltinEntry{"local_invocation_id", "vec3<u32>", false},
+ BuiltinEntry{"local_invocation_index", "u32", false},
+ BuiltinEntry{"global_invocation_id", "vec3<u32>", false},
+ BuiltinEntry{"workgroup_id", "vec3<u32>", true},
+ BuiltinEntry{"num_workgroups", "vec3<u32>", true}),
+ [](const ::testing::TestParamInfo<ComputeBuiltin::ParamType>& p) {
+ return p.param.name;
+ });
+
+TEST_F(UniformityAnalysisTest, ComputeBuiltin_MixedAttributesInStruct) {
+ // Mix both non-uniform and uniform shader IO attributes in the same structure. Even accessing
+ // just uniform member causes non-uniformity in this case.
+ std::string src = R"(
+struct S {
+ @builtin(num_workgroups) num_groups : vec3<u32>,
+ @builtin(local_invocation_index) idx : u32,
+}
+
+@compute @workgroup_size(64)
+fn main(s : S) {
+ if (s.num_groups.x == 0u) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (s.num_groups.x == 0u) {
+ ^^
+
+test:9:7 note: reading from 's' may result in a non-uniform value
+ if (s.num_groups.x == 0u) {
+ ^
+)");
+}
+
+class FragmentBuiltin : public UniformityAnalysisTestBase,
+ public ::testing::TestWithParam<BuiltinEntry> {};
+TEST_P(FragmentBuiltin, AsParam) {
+ std::string src = R"(
+@fragment
+fn main(@builtin()" + GetParam().name +
+ R"() b : )" + GetParam().type + R"() {
+ if (u32(vec4(b).x) == 0u) {
+ dpdx(0.5);
+ }
+}
+)";
+
+ bool should_pass = GetParam().uniform;
+ RunTest(src, should_pass);
+ if (!should_pass) {
+ EXPECT_EQ(error_,
+ R"(test:5:5 warning: 'dpdx' must only be called from uniform control flow
+ dpdx(0.5);
+ ^^^^
+
+test:4:3 note: control flow depends on non-uniform value
+ if (u32(vec4(b).x) == 0u) {
+ ^^
+
+test:4:16 note: reading from builtin 'b' may result in a non-uniform value
+ if (u32(vec4(b).x) == 0u) {
+ ^
+)");
+ }
+}
+
+TEST_P(FragmentBuiltin, InStruct) {
+ std::string src = R"(
+struct S {
+ @builtin()" + GetParam().name +
+ R"() b : )" + GetParam().type + R"(
+}
+
+@fragment
+fn main(s : S) {
+ if (u32(vec4(s.b).x) == 0u) {
+ dpdx(0.5);
+ }
+}
+)";
+
+ bool should_pass = GetParam().uniform;
+ RunTest(src, should_pass);
+ if (!should_pass) {
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'dpdx' must only be called from uniform control flow
+ dpdx(0.5);
+ ^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (u32(vec4(s.b).x) == 0u) {
+ ^^
+
+test:8:16 note: reading from 's' may result in a non-uniform value
+ if (u32(vec4(s.b).x) == 0u) {
+ ^
+)");
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(UniformityAnalysisTest,
+ FragmentBuiltin,
+ ::testing::Values(BuiltinEntry{"position", "vec4<f32>", false},
+ BuiltinEntry{"front_facing", "bool", false},
+ BuiltinEntry{"sample_index", "u32", false},
+ BuiltinEntry{"sample_mask", "u32", false}),
+ [](const ::testing::TestParamInfo<FragmentBuiltin::ParamType>& p) {
+ return p.param.name;
+ });
+
+TEST_F(UniformityAnalysisTest, FragmentLocation) {
+ std::string src = R"(
+@fragment
+fn main(@location(0) l : f32) {
+ if (l == 0.0) {
+ dpdx(0.5);
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:5:5 warning: 'dpdx' must only be called from uniform control flow
+ dpdx(0.5);
+ ^^^^
+
+test:4:3 note: control flow depends on non-uniform value
+ if (l == 0.0) {
+ ^^
+
+test:4:7 note: reading from user-defined input 'l' may result in a non-uniform value
+ if (l == 0.0) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, FragmentLocation_InStruct) {
+ std::string src = R"(
+struct S {
+ @location(0) l : f32
+}
+
+@fragment
+fn main(s : S) {
+ if (s.l == 0.0) {
+ dpdx(0.5);
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'dpdx' must only be called from uniform control flow
+ dpdx(0.5);
+ ^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (s.l == 0.0) {
+ ^^
+
+test:8:7 note: reading from 's' may result in a non-uniform value
+ if (s.l == 0.0) {
+ ^
+)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Test loop conditions and conditional break/continue statements.
+////////////////////////////////////////////////////////////////////////////////
+
+namespace LoopTest {
+
+enum ControlFlowInterrupt {
+ kBreak,
+ kContinue,
+ kReturn,
+ kDiscard,
+};
+enum Condition {
+ kNone,
+ kUniform,
+ kNonUniform,
+};
+
+using LoopTestParams = std::tuple<int, int>;
+
+static std::string ToStr(ControlFlowInterrupt interrupt) {
+ switch (interrupt) {
+ case kBreak:
+ return "break";
+ case kContinue:
+ return "continue";
+ case kReturn:
+ return "return";
+ case kDiscard:
+ return "discard";
+ }
+ return "";
+}
+
+static std::string ToStr(Condition condition) {
+ switch (condition) {
+ case kNone:
+ return "uncondtiional";
+ case kUniform:
+ return "uniform";
+ case kNonUniform:
+ return "nonuniform";
+ }
+ return "";
+}
+
+class LoopTest : public UniformityAnalysisTestBase,
+ public ::testing::TestWithParam<LoopTestParams> {
+ protected:
+ std::string MakeInterrupt(ControlFlowInterrupt interrupt, Condition condition) {
+ switch (condition) {
+ case kNone:
+ return ToStr(interrupt);
+ case kUniform:
+ return "if (uniform_var == 42) { " + ToStr(interrupt) + "; }";
+ case kNonUniform:
+ return "if (nonuniform_var == 42) { " + ToStr(interrupt) + "; }";
+ }
+ return "<invalid>";
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(UniformityAnalysisTest,
+ LoopTest,
+ ::testing::Combine(::testing::Range<int>(0, kDiscard + 1),
+ ::testing::Range<int>(0, kNonUniform + 1)),
+ [](const ::testing::TestParamInfo<LoopTestParams>& p) {
+ ControlFlowInterrupt interrupt =
+ static_cast<ControlFlowInterrupt>(std::get<0>(p.param));
+ auto condition = static_cast<Condition>(std::get<1>(p.param));
+ return ToStr(interrupt) + "_" + ToStr(condition);
+ });
+
+TEST_P(LoopTest, CallInBody_InterruptAfter) {
+ // Test control-flow interrupt in a loop after a function call that requires uniform control
+ // flow.
+ auto interrupt = static_cast<ControlFlowInterrupt>(std::get<0>(GetParam()));
+ auto condition = static_cast<Condition>(std::get<1>(GetParam()));
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_var : i32;
+@group(0) @binding(0) var<storage, read_write> nonuniform_var : i32;
+
+fn foo() {
+ loop {
+ // Pretend that this isn't an infinite loop, in case the interrupt is a
+ // continue statement.
+ if (false) {
+ break;
+ }
+
+ workgroupBarrier();
+ )" + MakeInterrupt(interrupt, condition) +
+ R"(;
+ }
+}
+)";
+
+ if (condition == kNonUniform) {
+ RunTest(src, false);
+ EXPECT_THAT(
+ error_,
+ ::testing::StartsWith(
+ R"(test:13:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();)"));
+ EXPECT_THAT(error_,
+ ::testing::HasSubstr("test:14:9 note: reading from read_write storage buffer "
+ "'nonuniform_var' may result in a non-uniform value"));
+ } else {
+ RunTest(src, true);
+ }
+}
+
+TEST_P(LoopTest, CallInBody_InterruptBefore) {
+ // Test control-flow interrupt in a loop before a function call that requires uniform control
+ // flow.
+ auto interrupt = static_cast<ControlFlowInterrupt>(std::get<0>(GetParam()));
+ auto condition = static_cast<Condition>(std::get<1>(GetParam()));
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_var : i32;
+@group(0) @binding(0) var<storage, read_write> nonuniform_var : i32;
+
+fn foo() {
+ loop {
+ // Pretend that this isn't an infinite loop, in case the interrupt is a
+ // continue statement.
+ if (false) {
+ break;
+ }
+
+ )" + MakeInterrupt(interrupt, condition) +
+ R"(;
+ workgroupBarrier();
+ }
+}
+)";
+
+ if (condition == kNonUniform) {
+ RunTest(src, false);
+
+ EXPECT_THAT(
+ error_,
+ ::testing::StartsWith(
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();)"));
+ EXPECT_THAT(error_,
+ ::testing::HasSubstr("test:13:9 note: reading from read_write storage buffer "
+ "'nonuniform_var' may result in a non-uniform value"));
+ } else {
+ RunTest(src, true);
+ }
+}
+
+TEST_P(LoopTest, CallInContinuing_InterruptInBody) {
+ // Test control-flow interrupt in a loop with a function call that requires uniform control flow
+ // in the continuing statement.
+ auto interrupt = static_cast<ControlFlowInterrupt>(std::get<0>(GetParam()));
+ auto condition = static_cast<Condition>(std::get<1>(GetParam()));
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_var : i32;
+@group(0) @binding(0) var<storage, read_write> nonuniform_var : i32;
+
+fn foo() {
+ loop {
+ // Pretend that this isn't an infinite loop, in case the interrupt is a
+ // continue statement.
+ if (false) {
+ break;
+ }
+
+ )" + MakeInterrupt(interrupt, condition) +
+ R"(;
+ continuing {
+ workgroupBarrier();
+ }
+ }
+}
+)";
+
+ if (condition == kNonUniform) {
+ RunTest(src, false);
+ EXPECT_THAT(
+ error_,
+ ::testing::StartsWith(
+ R"(test:15:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();)"));
+ EXPECT_THAT(error_,
+ ::testing::HasSubstr("test:13:9 note: reading from read_write storage buffer "
+ "'nonuniform_var' may result in a non-uniform value"));
+ } else {
+ RunTest(src, true);
+ }
+}
+
+TEST_F(UniformityAnalysisTest, Loop_CallInBody_UniformBreakInContinuing) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> n : i32;
+
+fn foo() {
+ var i = 0;
+ loop {
+ workgroupBarrier();
+ continuing {
+ i = i + 1;
+ if (i == n) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_CallInBody_NonUniformBreakInContinuing) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ var i = 0;
+ loop {
+ workgroupBarrier();
+ continuing {
+ i = i + 1;
+ if (i == n) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:10:7 note: control flow depends on non-uniform value
+ if (i == n) {
+ ^^
+
+test:10:16 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (i == n) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_CallInContinuing_UniformBreakInContinuing) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> n : i32;
+
+fn foo() {
+ var i = 0;
+ loop {
+ continuing {
+ workgroupBarrier();
+ i = i + 1;
+ if (i == n) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_CallInContinuing_NonUniformBreakInContinuing) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ var i = 0;
+ loop {
+ continuing {
+ workgroupBarrier();
+ i = i + 1;
+ if (i == n) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:10:7 note: control flow depends on non-uniform value
+ if (i == n) {
+ ^^
+
+test:10:16 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (i == n) {
+ ^
+)");
+}
+
+class LoopDeadCodeTest : public UniformityAnalysisTestBase, public ::testing::TestWithParam<int> {};
+
+INSTANTIATE_TEST_SUITE_P(UniformityAnalysisTest,
+ LoopDeadCodeTest,
+ ::testing::Range<int>(0, kDiscard + 1),
+ [](const ::testing::TestParamInfo<LoopDeadCodeTest::ParamType>& p) {
+ return ToStr(static_cast<ControlFlowInterrupt>(p.param));
+ });
+
+TEST_P(LoopDeadCodeTest, AfterInterrupt) {
+ // Dead code after a control-flow interrupt in a loop shouldn't cause uniformity errors.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ loop {
+ )" + ToStr(static_cast<ControlFlowInterrupt>(GetParam())) +
+ R"(;
+ if (n == 42) {
+ workgroupBarrier();
+ }
+ continuing {
+ // Pretend that this isn't an infinite loop, in case the interrupt is a
+ // continue statement.
+ if (false) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesNonUniformInLoopAfterBarrier) {
+ // Use a variable for a conditional barrier in a loop, and then assign a non-uniform value to
+ // that variable later in that loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesNonUniformInLoopAfterBarrier_BreakAtEnd) {
+ // Use a variable for a conditional barrier in a loop, and then assign a non-uniform value to
+ // that variable later in that loop. End the loop with a break statement to prevent the
+ // non-uniform value from causing an issue.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (v == 0) {
+ workgroupBarrier();
+ }
+
+ v = non_uniform;
+ break;
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_ConditionalAssignNonUniformWithBreak_BarrierInLoop) {
+ // In a conditional block, assign a non-uniform value and then break, then use a variable for a
+ // conditional barrier later in the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_ConditionalAssignNonUniformWithConditionalBreak_BarrierInLoop) {
+ // In a conditional block, assign a non-uniform value and then conditionally break, then use a
+ // variable for a conditional barrier later in the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (true) {
+ v = non_uniform;
+ if (true) {
+ break;
+ }
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_ConditionalAssignNonUniformWithBreak_BarrierAfterLoop) {
+ // In a conditional block, assign a non-uniform value and then break, then use a variable for a
+ // conditional barrier after the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesUniformBeforeSomeExits_BarrierAfterLoop) {
+ // Assign a non-uniform value, have two exit points only one of which assigns a uniform value,
+ // then use a variable for a conditional barrier after the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (true) {
+ break;
+ }
+
+ v = non_uniform;
+
+ if (false) {
+ v = 6;
+ break;
+ }
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:20:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:19:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:11:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesUniformBeforeAllExits_BarrierAfterLoop) {
+ // Assign a non-uniform value, have two exit points both of which assigns a uniform value,
+ // then use a variable for a conditional barrier after the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (true) {
+ v = 5;
+ break;
+ }
+
+ v = non_uniform;
+
+ if (false) {
+ v = 6;
+ break;
+ }
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_AssignNonUniformBeforeConditionalBreak_BarrierAfterLoop) {
+ // Assign a non-uniform value and then break in a conditional block, then use a variable for a
+ // conditional barrier after the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ v = non_uniform;
+ if (true) {
+ if (false) {
+ v = 5;
+ } else {
+ break;
+ }
+ v = 5;
+ }
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:20:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:19:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:7:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesNonUniformBeforeConditionalContinue_BarrierAtStart) {
+ // Use a variable for a conditional barrier in a loop, assign a non-uniform value to
+ // that variable later in that loop, then perform a conditional continue before assigning a
+ // uniform value to that variable.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ if (true) {
+ continue;
+ }
+
+ v = 5;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest,
+ Loop_VarBecomesUniformBeforeConditionalContinue_BarrierInContinuing) {
+ // Use a variable for a conditional barrier in the continuing statement of a loop, assign a
+ // non-uniform value to that variable later in that loop, then conditionally assign a uniform
+ // value before continuing.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ v = non_uniform;
+
+ if (false) {
+ v = 5;
+ continue;
+ }
+
+ continuing {
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ if (true) {
+ break;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:16:9 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:15:7 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:7:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesNonUniformBeforeConditionalContinue) {
+ // Use a variable for a conditional barrier in a loop, assign a non-uniform value to
+ // that variable later in that loop, then perform a conditional continue before assigning a
+ // uniform value to that variable.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ if (true) {
+ continue;
+ }
+
+ v = 5;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Loop_VarBecomesNonUniformInNestedLoopWithBreak_BarrierInLoop) {
+ // Use a variable for a conditional barrier in a loop, then conditionally assign a non-uniform
+ // value to that variable followed by a break in a nested loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ loop {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ v = 5;
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:14:13 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest,
+ Loop_VarBecomesNonUniformInNestedLoopWithBreak_BecomesUniformAgain_BarrierAfterLoop) {
+ // Conditionally assign a non-uniform value followed by a break in a nested loop, assign a
+ // uniform value in the outer loop, and then use a variable for a conditional barrier after the
+ // loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ loop {
+ if (false) {
+ break;
+ }
+
+ loop {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ }
+ v = 5;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_NonUniformValueNeverReachesContinuing) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ loop {
+ var v = non_uniform;
+ return;
+
+ continuing {
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_NonUniformBreakInBody_Reconverge) {
+ // Loops reconverge at exit, so test that we can call workgroupBarrier() after a loop that
+ // contains a non-uniform conditional break.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ var i = 0;
+ loop {
+ if (i == n) {
+ break;
+ }
+ i = i + 1;
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_NonUniformFunctionInBody_Reconverge) {
+ // Loops reconverge at exit, so test that we can call workgroupBarrier() after a loop that
+ // contains a call to a function that causes non-uniform control flow.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() {
+ if (n == 42) {
+ return;
+ } else {
+ return;
+ }
+}
+
+fn foo() {
+ loop {
+ bar();
+ break;
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Loop_NonUniformFunctionDiscard_NoReconvergence) {
+ // Loops should not reconverge after non-uniform discard statements.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() {
+ if (n == 42) {
+ discard;
+ }
+}
+
+fn foo() {
+ loop {
+ bar();
+ break;
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:12:5 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ bar();
+ ^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (n == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (n == 42) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_CallInside_UniformCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> n : i32;
+
+fn foo() {
+ for (var i = 0; i < n; i = i + 1) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_CallInside_NonUniformCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ for (var i = 0; i < n; i = i + 1) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ for (var i = 0; i < n; i = i + 1) {
+ ^^^
+
+test:5:23 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ for (var i = 0; i < n; i = i + 1) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_CallInside_InitializerCausesNonUniformFlow) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() -> i32 {
+ if (n == 42) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+fn foo() {
+ for (var i = bar(); i < 10; i = i + 1) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:16 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ for (var i = bar(); i < 10; i = i + 1) {
+ ^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (n == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (n == 42) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_CallInside_ContinuingCausesNonUniformFlow) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() -> i32 {
+ if (n == 42) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+fn foo() {
+ for (var i = 0; i < 10; i = i + bar()) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:35 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ for (var i = 0; i < 10; i = i + bar()) {
+ ^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (n == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (n == 42) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesNonUniformInContinuing_BarrierInLoop) {
+ // Use a variable for a conditional barrier in a loop, and then assign a non-uniform value to
+ // that variable in the continuing statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; v = non_uniform) {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:31 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ for (var i = 0; i < 10; v = non_uniform) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesUniformInContinuing_BarrierInLoop) {
+ // Use a variable for a conditional barrier in a loop, and then assign a uniform value to that
+ // variable in the continuing statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; v = 5) {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesNonUniformInContinuing_BarrierAfterLoop) {
+ // Use a variable for a conditional barrier after a loop, and assign a non-uniform value to
+ // that variable in the continuing statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; v = non_uniform) {
+ v = 5;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:31 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ for (var i = 0; i < 10; v = non_uniform) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesUniformInContinuing_BarrierAfterLoop) {
+ // Use a variable for a conditional barrier after a loop, and assign a uniform value to that
+ // variable in the continuing statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; v = 5) {
+ v = non_uniform;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesNonUniformInLoopAfterBarrier) {
+ // Use a variable for a conditional barrier in a loop, and then assign a non-uniform value to
+ // that variable later in that loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_ConditionalAssignNonUniformWithBreak_BarrierInLoop) {
+ // In a conditional block, assign a non-uniform value and then break, then use a variable for a
+ // conditional barrier later in the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_ConditionalAssignNonUniformWithBreak_BarrierAfterLoop) {
+ // In a conditional block, assign a non-uniform value and then break, then use a variable for a
+ // conditional barrier after the loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (true) {
+ v = non_uniform;
+ break;
+ }
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarRemainsNonUniformAtLoopEnd_BarrierAfterLoop) {
+ // Assign a non-uniform value, assign a uniform value before all explicit break points but leave
+ // the value non-uniform at loop exit, then use a variable for a conditional barrier after the
+ // loop.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (true) {
+ v = 5;
+ break;
+ }
+
+ v = non_uniform;
+
+ if (true) {
+ v = 6;
+ break;
+ }
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:21:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:20:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest,
+ ForLoop_VarBecomesNonUniformBeforeConditionalContinue_BarrierAtStart) {
+ // Use a variable for a conditional barrier in a loop, assign a non-uniform value to
+ // that variable later in that loop, then perform a conditional continue before assigning a
+ // uniform value to that variable.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ if (true) {
+ continue;
+ }
+
+ v = 5;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_VarBecomesNonUniformBeforeConditionalContinue) {
+ // Use a variable for a conditional barrier in a loop, assign a non-uniform value to
+ // that variable later in that loop, then perform a conditional continue before assigning a
+ // uniform value to that variable.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ for (var i = 0; i < 10; i++) {
+ if (v == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ v = non_uniform;
+ if (true) {
+ continue;
+ }
+
+ v = 5;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:5 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ForLoop_NonUniformCondition_Reconverge) {
+ // Loops reconverge at exit, so test that we can call workgroupBarrier() after a loop that has a
+ // non-uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn foo() {
+ for (var i = 0; i < n; i = i + 1) {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+} // namespace LoopTest
+
+////////////////////////////////////////////////////////////////////////////////
+/// If-else statement tests.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, IfElse_UniformCondition_BarrierInTrueBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_global : i32;
+
+fn foo() {
+ if (uniform_global == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_UniformCondition_BarrierInElseBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_global : i32;
+
+fn foo() {
+ if (uniform_global == 42) {
+ } else {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_UniformCondition_BarrierInElseIfBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> uniform_global : i32;
+
+fn foo() {
+ if (uniform_global == 42) {
+ } else if (true) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformCondition_BarrierInTrueBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformCondition_BarrierInElseBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ } else {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingCondition_NonUniformLHS_And) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn main() {
+ if ((non_uniform_global == 42) && false) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:34 note: control flow depends on non-uniform value
+ if ((non_uniform_global == 42) && false) {
+ ^^
+
+test:7:8 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if ((non_uniform_global == 42) && false) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingCondition_NonUniformRHS_And) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn main() {
+ if (false && (non_uniform_global == 42)) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (false && (non_uniform_global == 42)) {
+ ^^
+
+test:7:17 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (false && (non_uniform_global == 42)) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingCondition_NonUniformLHS_Or) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn main() {
+ if ((non_uniform_global == 42) || true) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:34 note: control flow depends on non-uniform value
+ if ((non_uniform_global == 42) || true) {
+ ^^
+
+test:7:8 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if ((non_uniform_global == 42) || true) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingCondition_NonUniformRHS_Or) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn main() {
+ if (true || (non_uniform_global == 42)) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (true || (non_uniform_global == 42)) {
+ ^^
+
+test:7:16 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (true || (non_uniform_global == 42)) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformCondition_BarrierInElseIfBlock) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ } else if (true) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_VarBecomesNonUniform_BeforeCondition) {
+ // Use a function-scope variable for control-flow guarding a barrier, and then assign to that
+ // variable before checking the condition.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v = 0;
+ v = rw;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:7 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_VarBecomesNonUniform_AfterCondition) {
+ // Use a function-scope variable for control-flow guarding a barrier, and then assign to that
+ // variable after checking the condition.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v = 0;
+ if (v == 0) {
+ v = rw;
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_VarBecomesNonUniformInIf_BarrierInElse) {
+ // Assign a non-uniform value to a variable in an if-block, and then use that variable for a
+ // conditional barrier in the else block.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ if (true) {
+ v = non_uniform;
+ } else {
+ if (v == 0) {
+ workgroupBarrier();
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_AssignNonUniformInIf_AssignUniformInElse) {
+ // Assign a non-uniform value to a variable in an if-block and a uniform value in the else
+ // block, and then use that variable for a conditional barrier after the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ if (true) {
+ if (true) {
+ v = non_uniform;
+ } else {
+ v = 5;
+ }
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_AssignNonUniformInIfWithReturn) {
+ // Assign a non-uniform value to a variable in an if-block followed by a return, and then use
+ // that variable for a conditional barrier after the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ if (true) {
+ v = non_uniform;
+ return;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_AssignNonUniformBeforeIf_BothBranchesAssignUniform) {
+ // Assign a non-uniform value to a variable before and if-else statement, assign uniform values
+ // in both branch of the if-else, and then use that variable for a conditional barrier after
+ // the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ v = non_uniform;
+ if (true) {
+ v = 5;
+ } else {
+ v = 6;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_AssignNonUniformBeforeIf_OnlyTrueBranchAssignsUniform) {
+ // Assign a non-uniform value to a variable before and if-else statement, assign a uniform value
+ // in the true branch of the if-else, and then use that variable for a conditional barrier after
+ // the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ v = non_uniform;
+ if (true) {
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_AssignNonUniformBeforeIf_OnlyFalseBranchAssignsUniform) {
+ // Assign a non-uniform value to a variable before and if-else statement, assign a uniform value
+ // in the false branch of the if-else, and then use that variable for a conditional barrier
+ // after the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ v = non_uniform;
+ if (true) {
+ } else {
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:13:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:12:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest,
+ IfElse_AssignNonUniformBeforeIf_OnlyTrueBranchAssignsUniform_FalseBranchReturns) {
+ // Assign a non-uniform value to a variable before and if-else statement, assign a uniform value
+ // in the true branch of the if-else, leave the variable untouched in the false branch and just
+ // return, and then use that variable for a conditional barrier after the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ v = non_uniform;
+ if (true) {
+ v = 5;
+ } else {
+ return;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest,
+ IfElse_AssignNonUniformBeforeIf_OnlyFalseBranchAssignsUniform_TrueBranchReturns) {
+ // Assign a non-uniform value to a variable before and if-else statement, assign a uniform value
+ // in the false branch of the if-else, leave the variable untouched in the true branch and just
+ // return, and then use that variable for a conditional barrier after the if-else statement.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ v = non_uniform;
+ if (true) {
+ return;
+ } else {
+ v = 5;
+ }
+
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformCondition_Reconverge) {
+ // If statements reconverge at exit, so test that we can call workgroupBarrier() after an if
+ // statement with a non-uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ } else {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingNonUniformConditionLHS_Reconverge) {
+ // If statements reconverge at exit, so test that we can call workgroupBarrier() after an if
+ // statement with a non-uniform condition that uses short-circuiting.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42 || true) {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_ShortCircuitingNonUniformConditionRHS_Reconverge) {
+ // If statements reconverge at exit, so test that we can call workgroupBarrier() after an if
+ // statement with a non-uniform condition that uses short-circuiting.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (false && non_uniform == 42) {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformFunctionCall_Reconverge) {
+ // If statements reconverge at exit, so test that we can call workgroupBarrier() after an if
+ // statement with a non-uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar() {
+ if (non_uniform == 42) {
+ return;
+ } else {
+ return;
+ }
+}
+
+fn foo() {
+ if (non_uniform == 42) {
+ bar();
+ } else {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformReturn_NoReconverge) {
+ // If statements should not reconverge after non-uniform returns.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ return;
+ } else {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, IfElse_NonUniformDiscard_NoReconverge) {
+ // If statements should not reconverge after non-uniform discards.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ if (non_uniform == 42) {
+ discard;
+ } else {
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Switch statement tests.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformCondition_BarrierInCase) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ switch (non_uniform) {
+ case 42: {
+ workgroupBarrier();
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ switch (non_uniform) {
+ ^^^^^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ switch (non_uniform) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformCondition_BarrierInDefault) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ switch (non_uniform) {
+ default: {
+ workgroupBarrier();
+ break;
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ switch (non_uniform) {
+ ^^^^^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ switch (non_uniform) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformBreak) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ switch (condition) {
+ case 42: {
+ if (non_uniform == 42) {
+ break;
+ }
+ workgroupBarrier();
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:11:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:7 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformBreakInDifferentCase) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ switch (condition) {
+ case 0: {
+ if (non_uniform == 42) {
+ break;
+ }
+ }
+ case 42: {
+ workgroupBarrier();
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformBreakInDifferentCase_Fallthrough) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ switch (condition) {
+ case 0: {
+ if (non_uniform == 42) {
+ break;
+ }
+ fallthrough;
+ }
+ case 42: {
+ workgroupBarrier();
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:7 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesNonUniformInDifferentCase_WithBreak) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = 0;
+ switch (condition) {
+ case 0: {
+ x = non_uniform;
+ break;
+ }
+ case 42: {
+ if (x == 0) {
+ workgroupBarrier();
+ }
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesNonUniformInDifferentCase_WithFallthrough) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = 0;
+ switch (condition) {
+ case 0: {
+ x = non_uniform;
+ fallthrough;
+ }
+ case 42: {
+ if (x == 0) {
+ workgroupBarrier();
+ }
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:9 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:7 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:9:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformInDifferentCase_WithBreak) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 5;
+ break;
+ }
+ case 42: {
+ if (x == 0) {
+ workgroupBarrier();
+ }
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:9 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:7 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:6:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformInDifferentCase_WithFallthrough) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 5;
+ fallthrough;
+ }
+ case 42: {
+ if (x == 0) {
+ workgroupBarrier();
+ }
+ }
+ default: {
+ }
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesNonUniformInCase_BarrierAfter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = 0;
+ switch (condition) {
+ case 0: {
+ x = non_uniform;
+ }
+ case 42: {
+ x = 5;
+ }
+ default: {
+ x = 6;
+ }
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:19:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:18:3 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:9:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformInAllCases_BarrierAfter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 4;
+ }
+ case 42: {
+ x = 5;
+ }
+ default: {
+ x = 6;
+ }
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformInSomeCases_BarrierAfter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 4;
+ }
+ case 42: {
+ }
+ default: {
+ x = 6;
+ }
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:18:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:17:3 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:6:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformInCasesThatDontReturn_BarrierAfter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 4;
+ }
+ case 42: {
+ return;
+ }
+ default: {
+ x = 6;
+ }
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_VarBecomesUniformAfterConditionalBreak_BarrierAfter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = non_uniform;
+ switch (condition) {
+ case 0: {
+ x = 4;
+ }
+ case 42: {
+ }
+ default: {
+ if (false) {
+ break;
+ }
+ x = 6;
+ }
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:21:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:20:3 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:6:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NestedInLoop_VarBecomesNonUniformWithBreak_BarrierInLoop) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = 0;
+ loop {
+ if (x == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ switch (condition) {
+ case 0: {
+ x = non_uniform;
+ break;
+ }
+ default: {
+ x = 6;
+ }
+ }
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:5 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:15:13 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ x = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NestedInLoop_VarBecomesNonUniformWithBreak_BarrierAfterLoop) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(0) var<uniform> condition : i32;
+
+fn foo() {
+ var x = 0;
+ loop {
+ if (false) {
+ break;
+ }
+ switch (condition) {
+ case 0: {
+ x = non_uniform;
+ break;
+ }
+ default: {
+ x = 6;
+ }
+ }
+ x = 5;
+ }
+ if (x == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformCondition_Reconverge) {
+ // Switch statements reconverge at exit, so test that we can call workgroupBarrier() after a
+ // switch statement that contains a non-uniform conditional break.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ switch (non_uniform) {
+ default: {
+ break;
+ }
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformBreak_Reconverge) {
+ // Switch statements reconverge at exit, so test that we can call workgroupBarrier() after a
+ // switch statement that contains a non-uniform conditional break.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ switch (42) {
+ default: {
+ if (non_uniform == 0) {
+ break;
+ }
+ break;
+ }
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformFunctionCall_Reconverge) {
+ // Switch statements reconverge at exit, so test that we can call workgroupBarrier() after a
+ // switch statement that contains a call to a function that causes non-uniform control flow.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() {
+ if (n == 42) {
+ return;
+ } else {
+ return;
+ }
+}
+
+fn foo() {
+ switch (42) {
+ default: {
+ bar();
+ break;
+ }
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, Switch_NonUniformFunctionDiscard_NoReconvergence) {
+ // Switch statements should not reconverge after non-uniform discards.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> n : i32;
+
+fn bar() {
+ if (n == 42) {
+ discard;
+ }
+}
+
+fn foo() {
+ switch (42) {
+ default: {
+ bar();
+ break;
+ }
+ }
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:17:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:7 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ bar();
+ ^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (n == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'n' may result in a non-uniform value
+ if (n == 42) {
+ ^
+)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Pointer tests.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ *&v = non_uniform;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ *&v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughCapturedPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv = &v;
+ *pv = non_uniform;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:7:9 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ *pv = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ *&v = 42;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughCapturedPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ let pv = &v;
+ *pv = 42;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughCapturedPointer_InNonUniformControlFlow) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv = &v;
+ if (non_uniform == 0) {
+ *pv = 42;
+ }
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:11:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (non_uniform == 0) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 0) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, LoadNonUniformThroughPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ if (*&v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:6:3 note: control flow depends on non-uniform value
+ if (*&v == 0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, LoadNonUniformThroughCapturedPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ let pv = &v;
+ if (*pv == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (*pv == 0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, LoadNonUniformThroughPointerParameter) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ if (*p == 0) {
+ workgroupBarrier();
+ }
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v);
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:7 warning: parameter 'p' of 'bar' must be uniform
+ bar(&v);
+ ^
+
+test:6:5 note: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, LoadUniformThroughPointer) {
+ std::string src = R"(
+fn foo() {
+ var v = 42;
+ if (*&v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, LoadUniformThroughCapturedPointer) {
+ std::string src = R"(
+fn foo() {
+ var v = 42;
+ let pv = &v;
+ if (*pv == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, LoadUniformThroughPointerParameter) {
+ std::string src = R"(
+fn bar(p : ptr<function, i32>) {
+ if (*p == 0) {
+ workgroupBarrier();
+ }
+}
+
+fn foo() {
+ var v = 42;
+ bar(&v);
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, StoreNonUniformAfterCapturingPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv = &v;
+ v = non_uniform;
+ if (*pv == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (*pv == 0) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StoreUniformAfterCapturingPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ let pv = &v;
+ v = 42;
+ if (*pv == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughLongChainOfPointers) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv1 = &*&v;
+ let pv2 = &*&*pv1;
+ *&*&*pv2 = non_uniform;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:14 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ *&*&*pv2 = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, LoadNonUniformThroughLongChainOfPointers) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = non_uniform;
+ let pv1 = &*&v;
+ let pv2 = &*&*pv1;
+ if (*&*&*pv2 == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (*&*&*pv2 == 0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThenNonUniformThroughDifferentPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv1 = &v;
+ let pv2 = &v;
+ *pv1 = 42;
+ *pv2 = non_uniform;
+ if (*pv1 == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:11:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:10:3 note: control flow depends on non-uniform value
+ if (*pv1 == 0) {
+ ^^
+
+test:9:10 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ *pv2 = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThenUniformThroughDifferentPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ var v = 0;
+ let pv1 = &v;
+ let pv2 = &v;
+ *pv1 = non_uniform;
+ *pv2 = 42;
+ if (*pv1 == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, UnmodifiedPointerParameterNonUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:11:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:10:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:8:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, UnmodifiedPointerParameterUniform) {
+ std::string src = R"(
+fn bar(p : ptr<function, i32>) {
+}
+
+fn foo() {
+ var v = 42;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughPointerInFunctionCall) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ *p = non_uniform;
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:10:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughPointerInFunctionCall) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ *p = 42;
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughPointerInFunctionCallViaArg) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>, a : i32) {
+ *p = a;
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v, non_uniform);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:10:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ bar(&v, non_uniform);
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughPointerInFunctionCallViaPointerArg) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>, a : ptr<function, i32>) {
+ *p = *a;
+}
+
+fn foo() {
+ var v = 0;
+ var a = non_uniform;
+ bar(&v, &a);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:13:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:12:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:10:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var a = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughPointerInFunctionCallViaArg) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>, a : i32) {
+ *p = a;
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v, 42);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughPointerInFunctionCallViaPointerArg) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>, a : ptr<function, i32>) {
+ *p = *a;
+}
+
+fn foo() {
+ var v = non_uniform;
+ var a = 42;
+ bar(&v, &a);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, AssignNonUniformThroughPointerInFunctionCallChain) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn f3(p : ptr<function, i32>, a : ptr<function, i32>) {
+ *p = *a;
+}
+
+fn f2(p : ptr<function, i32>, a : ptr<function, i32>) {
+ f3(p, a);
+}
+
+fn f1(p : ptr<function, i32>, a : ptr<function, i32>) {
+ f2(p, a);
+}
+
+fn foo() {
+ var v = 0;
+ var a = non_uniform;
+ f1(&v, &a);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:21:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:20:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:18:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var a = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, AssignUniformThroughPointerInFunctionCallChain) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn f3(p : ptr<function, i32>, a : ptr<function, i32>) {
+ *p = *a;
+}
+
+fn f2(p : ptr<function, i32>, a : ptr<function, i32>) {
+ f3(p, a);
+}
+
+fn f1(p : ptr<function, i32>, a : ptr<function, i32>) {
+ f2(p, a);
+}
+
+fn foo() {
+ var v = non_uniform;
+ var a = 42;
+ f1(&v, &a);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, MakePointerParamUniformInReturnExpression) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn zoo(p : ptr<function, i32>) -> i32 {
+ *p = 5;
+ return 6;
+}
+
+fn bar(p : ptr<function, i32>) -> i32 {
+ *p = non_uniform;
+ return zoo(p);
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, MakePointerParamNonUniformInReturnExpression) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn zoo(p : ptr<function, i32>) -> i32 {
+ *p = non_uniform;
+ return 6;
+}
+
+fn bar(p : ptr<function, i32>) -> i32 {
+ *p = 5;
+ return zoo(p);
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:18:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:17:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:16:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, PointerParamAssignNonUniformInTrueAndUniformInFalse) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ if (true) {
+ *p = non_uniform;
+ } else {
+ *p = 5;
+ }
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:16:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:15:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:14:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ConditionalAssignNonUniformToPointerParamAndReturn) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ if (true) {
+ *p = non_uniform;
+ return;
+ }
+ *p = 5;
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:16:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:15:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:14:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ConditionalAssignNonUniformToPointerParamAndBreakFromSwitch) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+@group(0) @binding(1) var<uniform> condition : i32;
+
+fn bar(p : ptr<function, i32>) {
+ switch (condition) {
+ case 0 {
+ if (true) {
+ *p = non_uniform;
+ break;
+ }
+ *p = 5;
+ }
+ default {
+ *p = 6;
+ }
+ }
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:24:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:23:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:22:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ConditionalAssignNonUniformToPointerParamAndBreakFromLoop) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ loop {
+ if (true) {
+ *p = non_uniform;
+ break;
+ }
+ *p = 5;
+ }
+}
+
+fn foo() {
+ var v = 0;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:18:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:17:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:16:7 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&v);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ConditionalAssignNonUniformToPointerParamAndContinue) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo(p : ptr<function, i32>) {
+ loop {
+ if (*p == 0) {
+ workgroupBarrier();
+ break;
+ }
+
+ if (true) {
+ *p = non_uniform;
+ continue;
+ }
+ *p = 5;
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:7:7 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:6:5 note: control flow depends on non-uniform value
+ if (*p == 0) {
+ ^^
+
+test:12:12 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ *p = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, PointerParamMaybeBecomesUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ if (true) {
+ *p = 5;
+ return;
+ }
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v);
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:12:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, NonUniformPointerParameterBecomesUniform_AfterUse) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>) {
+ *b = *a;
+ *a = 0;
+}
+
+fn foo() {
+ var a = non_uniform;
+ var b = 0;
+ bar(&a, &b);
+ if (b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:3 note: control flow depends on non-uniform value
+ if (b == 0) {
+ ^^
+
+test:10:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var a = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, NonUniformPointerParameterBecomesUniform_BeforeUse) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>) {
+ *a = 0;
+ *b = *a;
+}
+
+fn foo() {
+ var a = non_uniform;
+ var b = 0;
+ bar(&a, &b);
+ if (b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, UniformPointerParameterBecomesNonUniform_BeforeUse) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>) {
+ *a = non_uniform;
+ *b = *a;
+}
+
+fn foo() {
+ var a = 0;
+ var b = 0;
+ bar(&a, &b);
+ if (b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:3 note: control flow depends on non-uniform value
+ if (b == 0) {
+ ^^
+
+test:12:11 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&a, &b);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, UniformPointerParameterBecomesNonUniform_AfterUse) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>) {
+ *b = *a;
+ *a = non_uniform;
+}
+
+fn foo() {
+ var a = 0;
+ var b = 0;
+ bar(&a, &b);
+ if (b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, NonUniformPointerParameterUpdatedInPlace) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(p : ptr<function, i32>) {
+ (*p)++;
+}
+
+fn foo() {
+ var v = non_uniform;
+ bar(&v);
+ if (v == 1) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (v == 1) {
+ ^^
+
+test:9:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var v = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, MultiplePointerParametersBecomeNonUniform) {
+ // The analysis traverses the tree for each pointer parameter, and we need to make sure that we
+ // reset the "visited" state of nodes in between these traversals to properly capture each of
+ // their uniformity states.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>) {
+ *a = non_uniform;
+ *b = non_uniform;
+}
+
+fn foo() {
+ var a = 0;
+ var b = 0;
+ bar(&a, &b);
+ if (b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:3 note: control flow depends on non-uniform value
+ if (b == 0) {
+ ^^
+
+test:12:11 note: pointer contents may become non-uniform after calling 'bar'
+ bar(&a, &b);
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, MultiplePointerParametersWithEdgesToEachOther) {
+ // The analysis traverses the tree for each pointer parameter, and we need to make sure that we
+ // reset the "visited" state of nodes in between these traversals to properly capture each of
+ // their uniformity states.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn bar(a : ptr<function, i32>, b : ptr<function, i32>, c : ptr<function, i32>) {
+ *a = *a;
+ *b = *b;
+ *c = *a + *b;
+}
+
+fn foo() {
+ var a = non_uniform;
+ var b = 0;
+ var c = 0;
+ bar(&a, &b, &c);
+ if (c == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:16:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:15:3 note: control flow depends on non-uniform value
+ if (c == 0) {
+ ^^
+
+test:11:11 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ var a = non_uniform;
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, MaximumNumberOfPointerParameters) {
+ // Create a function with the maximum number of parameters, all pointers, to stress the
+ // quadratic nature of the analysis.
+ ProgramBuilder b;
+ auto& ty = b.ty;
+
+ // fn foo(p0 : ptr<function, i32>, p1 : ptr<function, i32>, ...) {
+ // let rhs = *p0 + *p1 + ... + *p244;
+ // *p1 = rhs;
+ // *p2 = rhs;
+ // ...
+ // *p254 = rhs;
+ // }
+ ast::VariableList params;
+ ast::StatementList foo_body;
+ const ast::Expression* rhs_init = b.Deref("p0");
+ for (int i = 1; i < 255; i++) {
+ rhs_init = b.Add(rhs_init, b.Deref("p" + std::to_string(i)));
+ }
+ foo_body.push_back(b.Decl(b.Let("rhs", nullptr, rhs_init)));
+ for (int i = 0; i < 255; i++) {
+ params.push_back(
+ b.Param("p" + std::to_string(i), ty.pointer(ty.i32(), ast::StorageClass::kFunction)));
+ if (i > 0) {
+ foo_body.push_back(b.Assign(b.Deref("p" + std::to_string(i)), "rhs"));
+ }
+ }
+ b.Func("foo", std::move(params), ty.void_(), foo_body);
+
+ // var<private> non_uniform_global : i32;
+ // fn main() {
+ // var v0 : i32;
+ // var v1 : i32;
+ // ...
+ // var v254 : i32;
+ // v0 = non_uniform_global;
+ // foo(&v0, &v1, ..., &v254);
+ // if (v254 == 0) {
+ // workgroupBarrier();
+ // }
+ // }
+ b.Global("non_uniform_global", ty.i32(), ast::StorageClass::kPrivate);
+ ast::StatementList main_body;
+ ast::ExpressionList args;
+ for (int i = 0; i < 255; i++) {
+ auto name = "v" + std::to_string(i);
+ main_body.push_back(b.Decl(b.Var(name, ty.i32())));
+ args.push_back(b.AddressOf(name));
+ }
+ main_body.push_back(b.Assign("v0", "non_uniform_global"));
+ main_body.push_back(b.CallStmt(b.create<ast::CallExpression>(b.Expr("foo"), args)));
+ main_body.push_back(
+ b.If(b.Equal("v254", 0_i), b.Block(b.CallStmt(b.Call("workgroupBarrier")))));
+ b.Func("main", {}, ty.void_(), main_body);
+
+ // TODO(jrprice): Expect false when uniformity issues become errors.
+ EXPECT_TRUE(RunTest(std::move(b))) << error_;
+ EXPECT_EQ(error_,
+ R"(warning: 'workgroupBarrier' must only be called from uniform control flow
+note: control flow depends on non-uniform value
+note: reading from module-scope private variable 'non_uniform_global' may result in a non-uniform value)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Tests to cover access to aggregate types.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, VectorElement_Uniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> v : vec4<i32>;
+
+fn foo() {
+ if (v[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_NonUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> v : array<i32>;
+
+fn foo() {
+ if (v[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (v[2] == 0) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'v' may result in a non-uniform value
+ if (v[2] == 0) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_BecomesNonUniform_BeforeCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ v[2] = rw;
+ if (v[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v[2] == 0) {
+ ^^
+
+test:6:10 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v[2] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_BecomesNonUniform_AfterCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ if (v[2] == 0) {
+ v[2] = rw;
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_DifferentElementBecomesNonUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ v[1] = rw;
+ if (v[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v[2] == 0) {
+ ^^
+
+test:6:10 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_ElementBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to an element, that element is
+ // still considered to be non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ v[1] = rw;
+ v[1] = 42;
+ if (v[1] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (v[1] == 0) {
+ ^^
+
+test:6:10 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_DifferentElementBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to an element, the whole vector
+ // is still considered to be non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ v[1] = rw;
+ v[2] = 42;
+ if (v[1] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (v[1] == 0) {
+ ^^
+
+test:6:10 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, VectorElement_NonUniform_AnyBuiltin) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+fn foo() {
+ var v : vec4<i32>;
+ v[1] = non_uniform_global;
+ if (any(v == vec4(42))) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (any(v == vec4(42))) {
+ ^^
+
+test:6:10 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ v[1] = non_uniform_global;
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_Uniform) {
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read> s : S;
+
+fn foo() {
+ if (s.b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_NonUniform) {
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> s : S;
+
+fn foo() {
+ if (s.b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (s.b == 0) {
+ ^^
+
+test:9:7 note: reading from read_write storage buffer 's' may result in a non-uniform value
+ if (s.b == 0) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_BecomesNonUniform_BeforeCondition) {
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var s : S;
+ s.b = rw;
+ if (s.b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (s.b == 0) {
+ ^^
+
+test:10:9 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ s.b = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_BecomesNonUniform_AfterCondition) {
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var s : S;
+ if (s.b == 0) {
+ s.b = rw;
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_DifferentMemberBecomesNonUniform) {
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var s : S;
+ s.a = rw;
+ if (s.b == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:12:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:11:3 note: control flow depends on non-uniform value
+ if (s.b == 0) {
+ ^^
+
+test:10:9 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ s.a = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_MemberBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to a member, that member is
+ // still considered to be non-uniform.
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var s : S;
+ s.a = rw;
+ s.a = 0;
+ if (s.a == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:13:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:12:3 note: control flow depends on non-uniform value
+ if (s.a == 0) {
+ ^^
+
+test:10:9 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ s.a = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StructMember_DifferentMemberBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to a member, the whole struct
+ // is still considered to be non-uniform.
+ std::string src = R"(
+struct S {
+ a : i32,
+ b : i32,
+}
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var s : S;
+ s.a = rw;
+ s.b = 0;
+ if (s.a == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:13:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:12:3 note: control flow depends on non-uniform value
+ if (s.a == 0) {
+ ^^
+
+test:10:9 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ s.a = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_Uniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read> arr : array<i32>;
+
+fn foo() {
+ if (arr[7] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_NonUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> arr : array<i32>;
+
+fn foo() {
+ if (arr[7] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (arr[7] == 0) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'arr' may result in a non-uniform value
+ if (arr[7] == 0) {
+ ^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_BecomesNonUniform_BeforeCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ arr[2] = rw;
+ if (arr[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (arr[2] == 0) {
+ ^^
+
+test:6:12 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ arr[2] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_BecomesNonUniform_AfterCondition) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ if (arr[2] == 0) {
+ arr[2] = rw;
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_DifferentElementBecomesNonUniform) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ arr[1] = rw;
+ if (arr[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (arr[2] == 0) {
+ ^^
+
+test:6:12 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ arr[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_DifferentElementBecomesNonUniformThroughPointer) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ let pa = &arr[1];
+ *pa = rw;
+ if (arr[2] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (arr[2] == 0) {
+ ^^
+
+test:7:9 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ *pa = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_ElementBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to an element, that element is
+ // still considered to be non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ arr[1] = rw;
+ arr[1] = 42;
+ if (arr[1] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (arr[1] == 0) {
+ ^^
+
+test:6:12 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ arr[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_DifferentElementBecomesUniform) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to an element, the whole array
+ // is still considered to be non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ arr[1] = rw;
+ arr[2] = 42;
+ if (arr[1] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:9:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:8:3 note: control flow depends on non-uniform value
+ if (arr[1] == 0) {
+ ^^
+
+test:6:12 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ arr[1] = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ArrayElement_ElementBecomesUniformThroughPointer) {
+ // For aggregate types, we conservatively consider them to be forever non-uniform once they
+ // become non-uniform. Test that after assigning a uniform value to an element through a
+ // pointer, the whole array is still considered to be non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var arr : array<i32, 4>;
+ let pa = &arr[2];
+ arr[1] = rw;
+ *pa = 42;
+ if (arr[1] == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (arr[1] == 0) {
+ ^^
+
+test:7:12 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ arr[1] = rw;
+ ^^
+)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Miscellaneous statement and expression tests.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, FunctionRequiresUniformFlowAndCausesNonUniformFlow) {
+ // Test that a function that requires uniform flow and then causes non-uniform flow can be
+ // called without error.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+fn foo() {
+ _ = dpdx(0.5);
+
+ if (non_uniform_global == 0) {
+ discard;
+ }
+}
+
+@fragment
+fn main() {
+ foo();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, TypeConstructor) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+fn foo() {
+ if (i32(non_uniform_global) == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (i32(non_uniform_global) == 0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (i32(non_uniform_global) == 0) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Conversion) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+fn foo() {
+ if (f32(non_uniform_global) == 0.0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (f32(non_uniform_global) == 0.0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (f32(non_uniform_global) == 0.0) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Bitcast) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+fn foo() {
+ if (bitcast<f32>(non_uniform_global) == 0.0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (bitcast<f32>(non_uniform_global) == 0.0) {
+ ^^
+
+test:5:20 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (bitcast<f32>(non_uniform_global) == 0.0) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, CompoundAssignment_NonUniformRHS) {
+ // Use compound assignment with a non-uniform RHS on a variable.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v = 0;
+ v += rw;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:6:8 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ v += rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, CompoundAssignment_UniformRHS_StillNonUniform) {
+ // Use compound assignment with a uniform RHS on a variable that is already non-uniform.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ var v = rw;
+ v += 1;
+ if (v == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:8:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (v == 0) {
+ ^^
+
+test:5:11 note: reading from read_write storage buffer 'rw' may result in a non-uniform value
+ var v = rw;
+ ^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, PhonyAssignment_LhsCausesNonUniformControlFlow) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> nonuniform_var : i32;
+
+fn bar() -> i32 {
+ if (nonuniform_var == 42) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+fn foo() {
+ _ = bar();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:14:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:7 note: calling 'bar' may cause subsequent control flow to be non-uniform
+ _ = bar();
+ ^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (nonuniform_var == 42) {
+ ^^
+
+test:5:7 note: reading from read_write storage buffer 'nonuniform_var' may result in a non-uniform value
+ if (nonuniform_var == 42) {
+ ^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_NoReconvergeLHS) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn non_uniform_discard_func() -> bool {
+ if (non_uniform_global == 42) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = non_uniform_discard_func() && false;
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:11 note: calling 'non_uniform_discard_func' may cause subsequent control flow to be non-uniform
+ let b = non_uniform_discard_func() && false;
+ ^^^^^^^^^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (non_uniform_global == 42) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (non_uniform_global == 42) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_NoReconvergeRHS) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn non_uniform_discard_func() -> bool {
+ if (non_uniform_global == 42) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = false && non_uniform_discard_func();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:20 note: calling 'non_uniform_discard_func' may cause subsequent control flow to be non-uniform
+ let b = false && non_uniform_discard_func();
+ ^^^^^^^^^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (non_uniform_global == 42) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (non_uniform_global == 42) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_NoReconvergeBoth) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn non_uniform_discard_func() -> bool {
+ if (non_uniform_global == 42) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = non_uniform_discard_func() && non_uniform_discard_func();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:3 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:14:41 note: calling 'non_uniform_discard_func' may cause subsequent control flow to be non-uniform
+ let b = non_uniform_discard_func() && non_uniform_discard_func();
+ ^^^^^^^^^^^^^^^^^^^^^^^^
+
+test:7:3 note: control flow depends on non-uniform value
+ if (non_uniform_global == 42) {
+ ^^
+
+test:7:7 note: reading from read_write storage buffer 'non_uniform_global' may result in a non-uniform value
+ if (non_uniform_global == 42) {
+ ^^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_ReconvergeLHS) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn uniform_discard_func() -> bool {
+ if (true) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = uniform_discard_func() && false;
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_ReconvergeRHS) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn uniform_discard_func() -> bool {
+ if (true) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = false && uniform_discard_func();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ShortCircuiting_ReconvergeBoth) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform_global : i32;
+
+var<private> p : i32;
+
+fn uniform_discard_func() -> bool {
+ if (true) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = uniform_discard_func() && uniform_discard_func();
+ workgroupBarrier();
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, DeadCode_AfterReturn) {
+ // Dead code after a return statement shouldn't cause uniformity errors.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ return;
+ if (non_uniform == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, DeadCode_AfterDiscard) {
+ // Dead code after a discard statement shouldn't cause uniformity errors.
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ discard;
+ if (non_uniform == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, ArrayLength) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> arr : array<f32>;
+
+fn foo() {
+ for (var i = 0u; i < arrayLength(&arr); i++) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, WorkgroupAtomics) {
+ std::string src = R"(
+var<workgroup> a : atomic<i32>;
+
+fn foo() {
+ if (atomicAdd(&a, 1) == 1) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (atomicAdd(&a, 1) == 1) {
+ ^^
+
+test:5:18 note: reading from workgroup storage variable 'a' may result in a non-uniform value
+ if (atomicAdd(&a, 1) == 1) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, StorageAtomics) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> a : atomic<i32>;
+
+fn foo() {
+ if (atomicAdd(&a, 1) == 1) {
+ storageBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:6:5 warning: 'storageBarrier' must only be called from uniform control flow
+ storageBarrier();
+ ^^^^^^^^^^^^^^
+
+test:5:3 note: control flow depends on non-uniform value
+ if (atomicAdd(&a, 1) == 1) {
+ ^^
+
+test:5:18 note: reading from read_write storage buffer 'a' may result in a non-uniform value
+ if (atomicAdd(&a, 1) == 1) {
+ ^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, DisableAnalysisWithExtension) {
+ std::string src = R"(
+enable chromium_disable_uniformity_analysis;
+
+@group(0) @binding(0) var<storage, read_write> rw : i32;
+
+fn foo() {
+ if (rw == 0) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, true);
+}
+
+TEST_F(UniformityAnalysisTest, StressGraphTraversalDepth) {
+ // Create a function with a very long sequence of variable declarations and assignments to
+ // test traversals of very deep graphs. This requires a non-recursive traversal algorithm.
+ ProgramBuilder b;
+ auto& ty = b.ty;
+
+ // var<private> v0 : i32 = 0i;
+ // fn foo() {
+ // let v1 = v0;
+ // let v2 = v1;
+ // ...
+ // let v{N} = v{N-1};
+ // if (v{N} == 0) {
+ // workgroupBarrier();
+ // }
+ // }
+ b.Global("v0", ty.i32(), ast::StorageClass::kPrivate, b.Expr(0_i));
+ ast::StatementList foo_body;
+ std::string v_last = "v0";
+ for (int i = 1; i < 100000; i++) {
+ auto v = "v" + std::to_string(i);
+ foo_body.push_back(b.Decl(b.Var(v, nullptr, b.Expr(v_last))));
+ v_last = v;
+ }
+ foo_body.push_back(b.If(b.Equal(v_last, 0_i), b.Block(b.CallStmt(b.Call("workgroupBarrier")))));
+ b.Func("foo", {}, ty.void_(), foo_body);
+
+ // TODO(jrprice): Expect false when uniformity issues become errors.
+ EXPECT_TRUE(RunTest(std::move(b))) << error_;
+ EXPECT_EQ(error_,
+ R"(warning: 'workgroupBarrier' must only be called from uniform control flow
+note: control flow depends on non-uniform value
+note: reading from module-scope private variable 'v0' may result in a non-uniform value)");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Tests for the quality of the error messages produced by the analysis.
+////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(UniformityAnalysisTest, Error_CallUserThatCallsBuiltinDirectly) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn foo() {
+ workgroupBarrier();
+}
+
+fn main() {
+ if (non_uniform == 42) {
+ foo();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:10:5 warning: 'foo' must only be called from uniform control flow
+ foo();
+ ^^^
+
+test:5:3 note: 'foo' requires uniformity because it calls workgroupBarrier
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:9:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:9:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_CallUserThatCallsBuiltinIndirectly) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn zoo() {
+ workgroupBarrier();
+}
+
+fn bar() {
+ zoo();
+}
+
+fn foo() {
+ bar();
+}
+
+fn main() {
+ if (non_uniform == 42) {
+ foo();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:18:5 warning: 'foo' must only be called from uniform control flow
+ foo();
+ ^^^
+
+test:5:3 note: 'foo' requires uniformity because it indirectly calls workgroupBarrier
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:17:3 note: control flow depends on non-uniform value
+ if (non_uniform == 42) {
+ ^^
+
+test:17:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ if (non_uniform == 42) {
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_ParametersRequireUniformityInChain) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn zoo(a : i32) {
+ if (a == 42) {
+ workgroupBarrier();
+ }
+}
+
+fn bar(b : i32) {
+ zoo(b);
+}
+
+fn foo(c : i32) {
+ bar(c);
+}
+
+fn main() {
+ foo(non_uniform);
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:19:7 warning: parameter 'c' of 'foo' must be uniform
+ foo(non_uniform);
+ ^^^^^^^^^^^
+
+test:15:7 note: parameter 'b' of 'bar' must be uniform
+ bar(c);
+ ^
+
+test:11:7 note: parameter 'a' of 'zoo' must be uniform
+ zoo(b);
+ ^
+
+test:6:5 note: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:19:7 note: reading from read_write storage buffer 'non_uniform' may result in a non-uniform value
+ foo(non_uniform);
+ ^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_ReturnValueMayBeNonUniformChain) {
+ std::string src = R"(
+@group(0) @binding(0) var<storage, read_write> non_uniform : i32;
+
+fn zoo() -> i32 {
+ return non_uniform;
+}
+
+fn bar() -> i32 {
+ return zoo();
+}
+
+fn foo() -> i32 {
+ return bar();
+}
+
+fn main() {
+ if (foo() == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:18:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:17:3 note: control flow depends on non-uniform value
+ if (foo() == 42) {
+ ^^
+
+test:17:7 note: return value of 'foo' may be non-uniform
+ if (foo() == 42) {
+ ^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_SubsequentControlFlowMayBeNonUniform) {
+ // Make sure we correctly identify the function call as the source of non-uniform control flow
+ // and not the if statement with the uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<uniform> uniform_value : i32;
+@group(0) @binding(1) var<storage, read_write> non_uniform_value : i32;
+
+fn foo() -> i32 {
+ if (non_uniform_value == 0) {
+ return 5;
+ }
+ return 6;
+}
+
+fn main() {
+ foo();
+ if (uniform_value == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:3 note: calling 'foo' may cause subsequent control flow to be non-uniform
+ foo();
+ ^^^
+
+test:6:3 note: control flow depends on non-uniform value
+ if (non_uniform_value == 0) {
+ ^^
+
+test:6:7 note: reading from read_write storage buffer 'non_uniform_value' may result in a non-uniform value
+ if (non_uniform_value == 0) {
+ ^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_ParameterRequiredToBeUniformForSubsequentControlFlow) {
+ // Make sure we correctly identify the function call as the source of non-uniform control flow
+ // and not the if statement with the uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<uniform> uniform_value : i32;
+@group(0) @binding(1) var<storage, read_write> non_uniform_value : i32;
+
+fn foo(x : i32) -> i32 {
+ if (x == 0) {
+ return 5;
+ }
+ return 6;
+}
+
+fn main() {
+ foo(non_uniform_value);
+ if (uniform_value == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:7 note: non-uniform function call argument causes subsequent control flow to be non-uniform
+ foo(non_uniform_value);
+ ^^^^^^^^^^^^^^^^^
+
+test:6:3 note: control flow depends on non-uniform value
+ if (x == 0) {
+ ^^
+
+test:6:7 note: reading from 'x' may result in a non-uniform value
+ if (x == 0) {
+ ^
+
+test:13:7 note: reading from read_write storage buffer 'non_uniform_value' may result in a non-uniform value
+ foo(non_uniform_value);
+ ^^^^^^^^^^^^^^^^^
+)");
+}
+
+TEST_F(UniformityAnalysisTest, Error_ShortCircuitingExprCausesNonUniformControlFlow) {
+ // Make sure we correctly identify the short-circuit as the source of non-uniform control flow
+ // and not the if statement with the uniform condition.
+ std::string src = R"(
+@group(0) @binding(0) var<uniform> uniform_value : i32;
+@group(0) @binding(1) var<storage, read_write> non_uniform_value : i32;
+
+fn non_uniform_discard_func() -> bool {
+ if (non_uniform_value == 42) {
+ discard;
+ }
+ return false;
+}
+
+fn main() {
+ let b = non_uniform_discard_func() && true;
+ if (uniform_value == 42) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ RunTest(src, false);
+ EXPECT_EQ(error_,
+ R"(test:15:5 warning: 'workgroupBarrier' must only be called from uniform control flow
+ workgroupBarrier();
+ ^^^^^^^^^^^^^^^^
+
+test:13:11 note: calling 'non_uniform_discard_func' may cause subsequent control flow to be non-uniform
+ let b = non_uniform_discard_func() && true;
+ ^^^^^^^^^^^^^^^^^^^^^^^^
+
+test:6:3 note: control flow depends on non-uniform value
+ if (non_uniform_value == 42) {
+ ^^
+
+test:6:7 note: reading from read_write storage buffer 'non_uniform_value' may result in a non-uniform value
+ if (non_uniform_value == 42) {
+ ^^^^^^^^^^^^^^^^^
+)");
+}
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/validation_test.cc
index 8b89f99f5ad..4fe5b2bfe3b 100644
--- a/chromium/third_party/dawn/src/tint/resolver/validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/validation_test.cc
@@ -34,69 +34,69 @@
#include "src/tint/sem/call.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/member_accessor_expression.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/variable.h"
using ::testing::ElementsAre;
using ::testing::HasSubstr;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
using ResolverValidationTest = ResolverTest;
class FakeStmt final : public Castable<FakeStmt, ast::Statement> {
- public:
- FakeStmt(ProgramID pid, Source src) : Base(pid, src) {}
- FakeStmt* Clone(CloneContext*) const override { return nullptr; }
+ public:
+ FakeStmt(ProgramID pid, Source src) : Base(pid, src) {}
+ FakeStmt* Clone(CloneContext*) const override { return nullptr; }
};
class FakeExpr final : public Castable<FakeExpr, ast::Expression> {
- public:
- FakeExpr(ProgramID pid, Source src) : Base(pid, src) {}
- FakeExpr* Clone(CloneContext*) const override { return nullptr; }
+ public:
+ FakeExpr(ProgramID pid, Source src) : Base(pid, src) {}
+ FakeExpr* Clone(CloneContext*) const override { return nullptr; }
};
TEST_F(ResolverValidationTest, WorkgroupMemoryUsedInVertexStage) {
- Global(Source{{1, 2}}, "wg", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
- Global("dst", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* stmt = Assign(Expr("dst"), Expr(Source{{3, 4}}, "wg"));
+ Global(Source{{1, 2}}, "wg", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
+ Global("dst", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto* stmt = Assign(Expr("dst"), Expr(Source{{3, 4}}, "wg"));
- Func(Source{{9, 10}}, "f0", ast::VariableList{}, ty.vec4<f32>(),
- {stmt, Return(Expr("dst"))},
- ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
- ast::AttributeList{Builtin(ast::Builtin::kPosition)});
+ Func(Source{{9, 10}}, "f0", ast::VariableList{}, ty.vec4<f32>(), {stmt, Return(Expr("dst"))},
+ ast::AttributeList{Stage(ast::PipelineStage::kVertex)},
+ ast::AttributeList{Builtin(ast::Builtin::kPosition)});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "3:4 error: workgroup memory cannot be used by vertex pipeline "
- "stage\n1:2 note: variable is declared here");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "3:4 error: workgroup memory cannot be used by vertex pipeline "
+ "stage\n1:2 note: variable is declared here");
}
TEST_F(ResolverValidationTest, WorkgroupMemoryUsedInFragmentStage) {
- // var<workgroup> wg : vec4<f32>;
- // var<workgroup> dst : vec4<f32>;
- // fn f2(){ dst = wg; }
- // fn f1() { f2(); }
- // @stage(fragment)
- // fn f0() {
- // f1();
- //}
-
- Global(Source{{1, 2}}, "wg", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
- Global("dst", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* stmt = Assign(Expr("dst"), Expr(Source{{3, 4}}, "wg"));
-
- Func(Source{{5, 6}}, "f2", {}, ty.void_(), {stmt});
- Func(Source{{7, 8}}, "f1", {}, ty.void_(), {CallStmt(Call("f2"))});
- Func(Source{{9, 10}}, "f0", {}, ty.void_(), {CallStmt(Call("f1"))},
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(3:4 error: workgroup memory cannot be used by fragment pipeline stage
+ // var<workgroup> wg : vec4<f32>;
+ // var<workgroup> dst : vec4<f32>;
+ // fn f2(){ dst = wg; }
+ // fn f1() { f2(); }
+ // @fragment
+ // fn f0() {
+ // f1();
+ //}
+
+ Global(Source{{1, 2}}, "wg", ty.vec4<f32>(), ast::StorageClass::kWorkgroup);
+ Global("dst", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ auto* stmt = Assign(Expr("dst"), Expr(Source{{3, 4}}, "wg"));
+
+ Func(Source{{5, 6}}, "f2", {}, ty.void_(), {stmt});
+ Func(Source{{7, 8}}, "f1", {}, ty.void_(), {CallStmt(Call("f2"))});
+ Func(Source{{9, 10}}, "f0", {}, ty.void_(), {CallStmt(Call("f1"))},
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:4 error: workgroup memory cannot be used by fragment pipeline stage
1:2 note: variable is declared here
5:6 note: called by function 'f2'
7:8 note: called by function 'f1'
@@ -104,1211 +104,1150 @@ TEST_F(ResolverValidationTest, WorkgroupMemoryUsedInFragmentStage) {
}
TEST_F(ResolverValidationTest, UnhandledStmt) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.WrapInFunction(b.create<FakeStmt>());
- Program(std::move(b));
- },
- "internal compiler error: unhandled node type: tint::resolver::FakeStmt");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.WrapInFunction(b.create<FakeStmt>());
+ Program(std::move(b));
+ },
+ "internal compiler error: unhandled node type: tint::resolver::FakeStmt");
}
TEST_F(ResolverValidationTest, Stmt_If_NonBool) {
- // if (1.23f) {}
+ // if (1.23f) {}
- WrapInFunction(If(Expr(Source{{12, 34}}, 1.23f), Block()));
+ WrapInFunction(If(Expr(Source{{12, 34}}, 1.23_f), Block()));
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: if statement condition must be bool, got f32");
+ EXPECT_EQ(r()->error(), "12:34 error: if statement condition must be bool, got f32");
}
-TEST_F(ResolverValidationTest, Stmt_Else_NonBool) {
- // else (1.23f) {}
+TEST_F(ResolverValidationTest, Stmt_ElseIf_NonBool) {
+ // else if (1.23f) {}
- WrapInFunction(
- If(Expr(true), Block(), Else(Expr(Source{{12, 34}}, 1.23f), Block())));
+ WrapInFunction(If(Expr(true), Block(), Else(If(Expr(Source{{12, 34}}, 1.23_f), Block()))));
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: else statement condition must be bool, got f32");
+ EXPECT_EQ(r()->error(), "12:34 error: if statement condition must be bool, got f32");
}
TEST_F(ResolverValidationTest, Expr_ErrUnknownExprType) {
- EXPECT_FATAL_FAILURE(
- {
- ProgramBuilder b;
- b.WrapInFunction(b.create<FakeExpr>());
- Resolver(&b).Resolve();
- },
- "internal compiler error: unhandled expression type: "
- "tint::resolver::FakeExpr");
+ EXPECT_FATAL_FAILURE(
+ {
+ ProgramBuilder b;
+ b.WrapInFunction(b.create<FakeExpr>());
+ Resolver(&b).Resolve();
+ },
+ "internal compiler error: unhandled expression type: "
+ "tint::resolver::FakeExpr");
}
TEST_F(ResolverValidationTest, Expr_DontCall_Function) {
- Func("func", {}, ty.void_(), {}, {});
- WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "func"));
+ Func("func", {}, ty.void_(), {}, {});
+ WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "func"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "3:8 error: missing '(' for function call");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:8 error: missing '(' for function call");
}
TEST_F(ResolverValidationTest, Expr_DontCall_Builtin) {
- WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "round"));
+ WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "round"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "3:8 error: missing '(' for builtin call");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:8 error: missing '(' for builtin call");
}
TEST_F(ResolverValidationTest, Expr_DontCall_Type) {
- Alias("T", ty.u32());
- WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "T"));
+ Alias("T", ty.u32());
+ WrapInFunction(Expr(Source{{{3, 3}, {3, 8}}}, "T"));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "3:8 error: missing '(' for type constructor or cast");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:8 error: missing '(' for type constructor or cast");
}
TEST_F(ResolverValidationTest, AssignmentStmt_InvalidLHS_BuiltinFunctionName) {
- // normalize = 2;
+ // normalize = 2;
- auto* lhs = Expr(Source{{12, 34}}, "normalize");
- auto* rhs = Expr(2);
- auto* assign = Assign(lhs, rhs);
- WrapInFunction(assign);
+ auto* lhs = Expr(Source{{12, 34}}, "normalize");
+ auto* rhs = Expr(2_i);
+ auto* assign = Assign(lhs, rhs);
+ WrapInFunction(assign);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing '(' for builtin call");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing '(' for builtin call");
}
TEST_F(ResolverValidationTest, UsingUndefinedVariable_Fail) {
- // b = 2;
+ // b = 2;
- auto* lhs = Expr(Source{{12, 34}}, "b");
- auto* rhs = Expr(2);
- auto* assign = Assign(lhs, rhs);
- WrapInFunction(assign);
+ auto* lhs = Expr(Source{{12, 34}}, "b");
+ auto* rhs = Expr(2_i);
+ auto* assign = Assign(lhs, rhs);
+ WrapInFunction(assign);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'b'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'b'");
}
TEST_F(ResolverValidationTest, UsingUndefinedVariableInBlockStatement_Fail) {
- // {
- // b = 2;
- // }
+ // {
+ // b = 2;
+ // }
- auto* lhs = Expr(Source{{12, 34}}, "b");
- auto* rhs = Expr(2);
+ auto* lhs = Expr(Source{{12, 34}}, "b");
+ auto* rhs = Expr(2_i);
- auto* body = Block(Assign(lhs, rhs));
- WrapInFunction(body);
+ auto* body = Block(Assign(lhs, rhs));
+ WrapInFunction(body);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'b'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'b'");
}
TEST_F(ResolverValidationTest, UsingUndefinedVariableGlobalVariable_Pass) {
- // var global_var: f32 = 2.1;
- // fn my_func() {
- // global_var = 3.14;
- // return;
- // }
-
- Global("global_var", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1f));
-
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Assign(Expr(Source{{12, 34}}, "global_var"), 3.14f),
- Return(),
- });
+ // var global_var: f32 = 2.1;
+ // fn my_func() {
+ // global_var = 3.14;
+ // return;
+ // }
+
+ Global("global_var", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1_f));
+
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Assign(Expr(Source{{12, 34}}, "global_var"), 3.14_f),
+ Return(),
+ });
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, UsingUndefinedVariableInnerScope_Fail) {
- // {
- // if (true) { var a : f32 = 2.0; }
- // a = 3.14;
- // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
+ // {
+ // if (true) { var a : f32 = 2.0; }
+ // a = 3.14;
+ // }
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- auto* cond = Expr(true);
- auto* body = Block(Decl(var));
+ auto* cond = Expr(true);
+ auto* body = Block(Decl(var));
- SetSource(Source{{12, 34}});
- auto* lhs = Expr(Source{{12, 34}}, "a");
- auto* rhs = Expr(3.14f);
+ SetSource(Source{{12, 34}});
+ auto* lhs = Expr(Source{{12, 34}}, "a");
+ auto* rhs = Expr(3.14_f);
- auto* outer_body =
- Block(create<ast::IfStatement>(cond, body, ast::ElseStatementList{}),
- Assign(lhs, rhs));
+ auto* outer_body = Block(If(cond, body), Assign(lhs, rhs));
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'a'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'a'");
}
TEST_F(ResolverValidationTest, UsingUndefinedVariableOuterScope_Pass) {
- // {
- // var a : f32 = 2.0;
- // if (true) { a = 3.14; }
- // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
+ // {
+ // var a : f32 = 2.0;
+ // if (true) { a = 3.14; }
+ // }
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- auto* lhs = Expr(Source{{12, 34}}, "a");
- auto* rhs = Expr(3.14f);
+ auto* lhs = Expr(Source{{12, 34}}, "a");
+ auto* rhs = Expr(3.14_f);
- auto* cond = Expr(true);
- auto* body = Block(Assign(lhs, rhs));
+ auto* cond = Expr(true);
+ auto* body = Block(Assign(lhs, rhs));
- auto* outer_body =
- Block(Decl(var),
- create<ast::IfStatement>(cond, body, ast::ElseStatementList{}));
+ auto* outer_body = Block(Decl(var), If(cond, body));
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, UsingUndefinedVariableDifferentScope_Fail) {
- // {
- // { var a : f32 = 2.0; }
- // { a = 3.14; }
- // }
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2.0f));
- auto* first_body = Block(Decl(var));
+ // {
+ // { var a : f32 = 2.0; }
+ // { a = 3.14; }
+ // }
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
+ auto* first_body = Block(Decl(var));
- auto* lhs = Expr(Source{{12, 34}}, "a");
- auto* rhs = Expr(3.14f);
- auto* second_body = Block(Assign(lhs, rhs));
+ auto* lhs = Expr(Source{{12, 34}}, "a");
+ auto* rhs = Expr(3.14_f);
+ auto* second_body = Block(Assign(lhs, rhs));
- auto* outer_body = Block(first_body, second_body);
+ auto* outer_body = Block(first_body, second_body);
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'a'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: unknown identifier: 'a'");
}
TEST_F(ResolverValidationTest, StorageClass_FunctionVariableWorkgroupClass) {
- auto* var = Var("var", ty.i32(), ast::StorageClass::kWorkgroup);
+ auto* var = Var("var", ty.i32(), ast::StorageClass::kWorkgroup);
- auto* stmt = Decl(var);
- Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
+ auto* stmt = Decl(var);
+ Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: function variable has a non-function storage class");
+ EXPECT_EQ(r()->error(), "error: function variable has a non-function storage class");
}
TEST_F(ResolverValidationTest, StorageClass_FunctionVariableI32) {
- auto* var = Var("s", ty.i32(), ast::StorageClass::kPrivate);
+ auto* var = Var("s", ty.i32(), ast::StorageClass::kPrivate);
- auto* stmt = Decl(var);
- Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
+ auto* stmt = Decl(var);
+ Func("func", ast::VariableList{}, ty.void_(), {stmt}, ast::AttributeList{});
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: function variable has a non-function storage class");
+ EXPECT_EQ(r()->error(), "error: function variable has a non-function storage class");
}
TEST_F(ResolverValidationTest, StorageClass_SamplerExplicitStorageClass) {
- auto* t = ty.sampler(ast::SamplerKind::kSampler);
- Global(Source{{12, 34}}, "var", t, ast::StorageClass::kUniformConstant,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* t = ty.sampler(ast::SamplerKind::kSampler);
+ Global(Source{{12, 34}}, "var", t, ast::StorageClass::kHandle,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_FALSE(r()->Resolve());
+ EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: variables of type 'sampler' must not have a storage class)");
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: variables of type 'sampler' must not have a storage class)");
}
TEST_F(ResolverValidationTest, StorageClass_TextureExplicitStorageClass) {
- auto* t = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- Global(Source{{12, 34}}, "var", t, ast::StorageClass::kUniformConstant,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* t = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ Global(Source{{12, 34}}, "var", t, ast::StorageClass::kHandle,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: variables of type 'texture_1d<f32>' must not have a storage class)");
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: variables of type 'texture_1d<f32>' must not have a storage class)");
}
TEST_F(ResolverValidationTest, Expr_MemberAccessor_VectorSwizzle_BadChar) {
- Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* ident = Expr(Source{{{3, 3}, {3, 7}}}, "xyqz");
+ auto* ident = Expr(Source{{{3, 3}, {3, 7}}}, "xyqz");
- auto* mem = MemberAccessor("my_vec", ident);
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_vec", ident);
+ WrapInFunction(mem);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "3:5 error: invalid vector swizzle character");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:5 error: invalid vector swizzle character");
}
TEST_F(ResolverValidationTest, Expr_MemberAccessor_VectorSwizzle_MixedChars) {
- Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* ident = Expr(Source{{{3, 3}, {3, 7}}}, "rgyw");
+ auto* ident = Expr(Source{{{3, 3}, {3, 7}}}, "rgyw");
- auto* mem = MemberAccessor("my_vec", ident);
- WrapInFunction(mem);
+ auto* mem = MemberAccessor("my_vec", ident);
+ WrapInFunction(mem);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "3:3 error: invalid mixing of vector swizzle characters rgba with xyzw");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "3:3 error: invalid mixing of vector swizzle characters rgba with xyzw");
}
TEST_F(ResolverValidationTest, Expr_MemberAccessor_VectorSwizzle_BadLength) {
- Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* ident = Expr(Source{{{3, 3}, {3, 8}}}, "zzzzz");
- auto* mem = MemberAccessor("my_vec", ident);
- WrapInFunction(mem);
+ auto* ident = Expr(Source{{{3, 3}, {3, 8}}}, "zzzzz");
+ auto* mem = MemberAccessor("my_vec", ident);
+ WrapInFunction(mem);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "3:3 error: invalid vector swizzle size");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:3 error: invalid vector swizzle size");
}
TEST_F(ResolverValidationTest, Expr_MemberAccessor_VectorSwizzle_BadIndex) {
- Global("my_vec", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- auto* ident = Expr(Source{{3, 3}}, "z");
- auto* mem = MemberAccessor("my_vec", ident);
- WrapInFunction(mem);
+ auto* ident = Expr(Source{{3, 3}}, "z");
+ auto* mem = MemberAccessor("my_vec", ident);
+ WrapInFunction(mem);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "3:3 error: invalid vector swizzle member");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "3:3 error: invalid vector swizzle member");
}
TEST_F(ResolverValidationTest, Expr_MemberAccessor_BadParent) {
- // var param: vec4<f32>
- // let ret: f32 = *(&param).x;
- auto* param = Var("param", ty.vec4<f32>());
- auto* x = Expr(Source{{{3, 3}, {3, 8}}}, "x");
+ // var param: vec4<f32>
+ // let ret: f32 = *(&param).x;
+ auto* param = Var("param", ty.vec4<f32>());
+ auto* x = Expr(Source{{{3, 3}, {3, 8}}}, "x");
- auto* addressOf_expr = AddressOf(Source{{12, 34}}, param);
- auto* accessor_expr = MemberAccessor(addressOf_expr, x);
- auto* star_p = Deref(accessor_expr);
- auto* ret = Var("r", ty.f32(), star_p);
- WrapInFunction(Decl(param), Decl(ret));
+ auto* addressOf_expr = AddressOf(Source{{12, 34}}, param);
+ auto* accessor_expr = MemberAccessor(addressOf_expr, x);
+ auto* star_p = Deref(accessor_expr);
+ auto* ret = Var("r", ty.f32(), star_p);
+ WrapInFunction(Decl(param), Decl(ret));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: invalid member accessor expression. Expected vector "
- "or struct, got 'ptr<function, vec4<f32>, read_write>'");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: invalid member accessor expression. Expected vector "
+ "or struct, got 'ptr<function, vec4<f32>, read_write>'");
}
TEST_F(ResolverValidationTest, EXpr_MemberAccessor_FuncGoodParent) {
- // fn func(p: ptr<function, vec4<f32>>) -> f32 {
- // let x: f32 = (*p).z;
- // return x;
- // }
- auto* p =
- Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
- auto* star_p = Deref(p);
- auto* z = Expr(Source{{{3, 3}, {3, 8}}}, "z");
- auto* accessor_expr = MemberAccessor(star_p, z);
- auto* x = Var("x", ty.f32(), accessor_expr);
- Func("func", {p}, ty.f32(), {Decl(x), Return(x)});
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // fn func(p: ptr<function, vec4<f32>>) -> f32 {
+ // let x: f32 = (*p).z;
+ // return x;
+ // }
+ auto* p = Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
+ auto* star_p = Deref(p);
+ auto* z = Expr(Source{{{3, 3}, {3, 8}}}, "z");
+ auto* accessor_expr = MemberAccessor(star_p, z);
+ auto* x = Var("x", ty.f32(), accessor_expr);
+ Func("func", {p}, ty.f32(), {Decl(x), Return(x)});
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, EXpr_MemberAccessor_FuncBadParent) {
- // fn func(p: ptr<function, vec4<f32>>) -> f32 {
- // let x: f32 = *p.z;
- // return x;
- // }
- auto* p =
- Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
- auto* z = Expr(Source{{{3, 3}, {3, 8}}}, "z");
- auto* accessor_expr = MemberAccessor(p, z);
- auto* star_p = Deref(accessor_expr);
- auto* x = Var("x", ty.f32(), star_p);
- Func("func", {p}, ty.f32(), {Decl(x), Return(x)});
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "error: invalid member accessor expression. "
- "Expected vector or struct, got 'ptr<function, vec4<f32>, read_write>'");
+ // fn func(p: ptr<function, vec4<f32>>) -> f32 {
+ // let x: f32 = *p.z;
+ // return x;
+ // }
+ auto* p = Param("p", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction));
+ auto* z = Expr(Source{{{3, 3}, {3, 8}}}, "z");
+ auto* accessor_expr = MemberAccessor(p, z);
+ auto* star_p = Deref(accessor_expr);
+ auto* x = Var("x", ty.f32(), star_p);
+ Func("func", {p}, ty.f32(), {Decl(x), Return(x)});
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "error: invalid member accessor expression. "
+ "Expected vector or struct, got 'ptr<function, vec4<f32>, read_write>'");
}
TEST_F(ResolverValidationTest,
Stmt_Loop_ContinueInLoopBodyBeforeDeclAndAfterDecl_UsageInContinuing) {
- // loop {
- // continue; // Bypasses z decl
- // var z : i32; // unreachable
- //
- // continuing {
- // z = 2;
- // }
- // }
-
- auto error_loc = Source{{12, 34}};
- auto* body =
- Block(Continue(),
- Decl(error_loc, Var("z", ty.i32(), ast::StorageClass::kNone)));
- auto* continuing = Block(Assign(Expr("z"), 2));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- R"(12:34 warning: code is unreachable
+ // loop {
+ // continue; // Bypasses z decl
+ // var z : i32; // unreachable
+ //
+ // continuing {
+ // z = 2;
+ // }
+ // }
+
+ auto error_loc = Source{{12, 34}};
+ auto* body = Block(Continue(), Decl(error_loc, Var("z", ty.i32(), ast::StorageClass::kNone)));
+ auto* continuing = Block(Assign(Expr("z"), 2_i));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 warning: code is unreachable
error: continue statement bypasses declaration of 'z'
note: identifier 'z' declared here
note: identifier 'z' referenced in continuing block here)");
}
-TEST_F(ResolverValidationTest,
- Stmt_Loop_ContinueInLoopBodyAfterDecl_UsageInContinuing_InBlocks) {
- // loop {
- // if (false) { break; }
- // var z : i32;
- // {{{continue;}}}
- // continue; // Ok
- //
- // continuing {
- // z = 2;
- // }
- // }
-
- auto* body = Block(If(false, Block(Break())), //
- Decl(Var("z", ty.i32(), ast::StorageClass::kNone)),
- Block(Block(Block(Continue()))));
- auto* continuing = Block(Assign(Expr("z"), 2));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-}
-
-TEST_F(ResolverValidationTest,
- Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageInContinuing) {
- // loop {
- // if (true) {
- // continue; // Still bypasses z decl (if we reach here)
- // }
- // var z : i32;
- // continuing {
- // z = 2;
- // }
- // }
-
- auto cont_loc = Source{{12, 34}};
- auto decl_loc = Source{{56, 78}};
- auto ref_loc = Source{{90, 12}};
- auto* body =
- Block(If(Expr(true), Block(Continue(cont_loc))),
- Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
- auto* continuing = Block(Assign(Expr(ref_loc, "z"), 2));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- R"(12:34 error: continue statement bypasses declaration of 'z'
+TEST_F(ResolverValidationTest, Stmt_Loop_ContinueInLoopBodyAfterDecl_UsageInContinuing_InBlocks) {
+ // loop {
+ // if (false) { break; }
+ // var z : i32;
+ // {{{continue;}}}
+ // continue; // Ok
+ //
+ // continuing {
+ // z = 2i;
+ // }
+ // }
+
+ auto* body =
+ Block(If(false, Block(Break())), //
+ Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), Block(Block(Block(Continue()))));
+ auto* continuing = Block(Assign(Expr("z"), 2_i));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverValidationTest, Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageInContinuing) {
+ // loop {
+ // if (true) {
+ // continue; // Still bypasses z decl (if we reach here)
+ // }
+ // var z : i32;
+ // continuing {
+ // z = 2i;
+ // }
+ // }
+
+ auto cont_loc = Source{{12, 34}};
+ auto decl_loc = Source{{56, 78}};
+ auto ref_loc = Source{{90, 12}};
+ auto* body = Block(If(Expr(true), Block(Continue(cont_loc))),
+ Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
+ auto* continuing = Block(Assign(Expr(ref_loc, "z"), 2_i));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continue statement bypasses declaration of 'z'
56:78 note: identifier 'z' declared here
90:12 note: identifier 'z' referenced in continuing block here)");
}
-TEST_F(
- ResolverValidationTest,
- Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageInContinuingSubscope) {
- // loop {
- // if (true) {
- // continue; // Still bypasses z decl (if we reach here)
- // }
- // var z : i32;
- // continuing {
- // if (true) {
- // z = 2; // Must fail even if z is in a sub-scope
- // }
- // }
- // }
-
- auto cont_loc = Source{{12, 34}};
- auto decl_loc = Source{{56, 78}};
- auto ref_loc = Source{{90, 12}};
- auto* body =
- Block(If(Expr(true), Block(Continue(cont_loc))),
- Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
-
- auto* continuing =
- Block(If(Expr(true), Block(Assign(Expr(ref_loc, "z"), 2))));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- R"(12:34 error: continue statement bypasses declaration of 'z'
+TEST_F(ResolverValidationTest,
+ Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageInContinuingSubscope) {
+ // loop {
+ // if (true) {
+ // continue; // Still bypasses z decl (if we reach here)
+ // }
+ // var z : i32;
+ // continuing {
+ // if (true) {
+ // z = 2i; // Must fail even if z is in a sub-scope
+ // }
+ // }
+ // }
+
+ auto cont_loc = Source{{12, 34}};
+ auto decl_loc = Source{{56, 78}};
+ auto ref_loc = Source{{90, 12}};
+ auto* body = Block(If(Expr(true), Block(Continue(cont_loc))),
+ Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
+
+ auto* continuing = Block(If(Expr(true), Block(Assign(Expr(ref_loc, "z"), 2_i))));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continue statement bypasses declaration of 'z'
56:78 note: identifier 'z' declared here
90:12 note: identifier 'z' referenced in continuing block here)");
}
-TEST_F(ResolverValidationTest,
- Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageOutsideBlock) {
- // loop {
- // if (true) {
- // continue; // bypasses z decl (if we reach here)
- // }
- // var z : i32;
- // continuing {
- // // Must fail even if z is used in an expression that isn't
- // // directly contained inside a block.
- // if (z < 2) {
- // }
- // }
- // }
-
- auto cont_loc = Source{{12, 34}};
- auto decl_loc = Source{{56, 78}};
- auto ref_loc = Source{{90, 12}};
- auto* body =
- Block(If(Expr(true), Block(Continue(cont_loc))),
- Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
- auto* compare = create<ast::BinaryExpression>(ast::BinaryOp::kLessThan,
- Expr(ref_loc, "z"), Expr(2));
- auto* continuing = Block(If(compare, Block()));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- R"(12:34 error: continue statement bypasses declaration of 'z'
+TEST_F(ResolverValidationTest, Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageOutsideBlock) {
+ // loop {
+ // if (true) {
+ // continue; // bypasses z decl (if we reach here)
+ // }
+ // var z : i32;
+ // continuing {
+ // // Must fail even if z is used in an expression that isn't
+ // // directly contained inside a block.
+ // if (z < 2i) {
+ // }
+ // }
+ // }
+
+ auto cont_loc = Source{{12, 34}};
+ auto decl_loc = Source{{56, 78}};
+ auto ref_loc = Source{{90, 12}};
+ auto* body = Block(If(Expr(true), Block(Continue(cont_loc))),
+ Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
+ auto* compare =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLessThan, Expr(ref_loc, "z"), Expr(2_i));
+ auto* continuing = Block(If(compare, Block()));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continue statement bypasses declaration of 'z'
56:78 note: identifier 'z' declared here
90:12 note: identifier 'z' referenced in continuing block here)");
}
TEST_F(ResolverValidationTest,
Stmt_Loop_ContinueInLoopBodySubscopeBeforeDecl_UsageInContinuingLoop) {
- // loop {
- // if (true) {
- // continue; // Still bypasses z decl (if we reach here)
- // }
- // var z : i32;
- // continuing {
- // loop {
- // z = 2; // Must fail even if z is in a sub-scope
- // }
- // }
- // }
-
- auto cont_loc = Source{{12, 34}};
- auto decl_loc = Source{{56, 78}};
- auto ref_loc = Source{{90, 12}};
- auto* body =
- Block(If(Expr(true), Block(Continue(cont_loc))),
- Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
-
- auto* continuing = Block(Loop(Block(Assign(Expr(ref_loc, "z"), 2))));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_FALSE(r()->Resolve()) << r()->error();
- EXPECT_EQ(r()->error(),
- R"(12:34 error: continue statement bypasses declaration of 'z'
+ // loop {
+ // if (true) {
+ // continue; // Still bypasses z decl (if we reach here)
+ // }
+ // var z : i32;
+ // continuing {
+ // loop {
+ // z = 2i; // Must fail even if z is in a sub-scope
+ // }
+ // }
+ // }
+
+ auto cont_loc = Source{{12, 34}};
+ auto decl_loc = Source{{56, 78}};
+ auto ref_loc = Source{{90, 12}};
+ auto* body = Block(If(Expr(true), Block(Continue(cont_loc))),
+ Decl(Var(decl_loc, "z", ty.i32(), ast::StorageClass::kNone)));
+
+ auto* continuing = Block(Loop(Block(Assign(Expr(ref_loc, "z"), 2_i))));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_FALSE(r()->Resolve()) << r()->error();
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continue statement bypasses declaration of 'z'
56:78 note: identifier 'z' declared here
90:12 note: identifier 'z' referenced in continuing block here)");
}
-TEST_F(ResolverValidationTest,
- Stmt_Loop_ContinueInNestedLoopBodyBeforeDecl_UsageInContinuing) {
- // loop {
- // loop {
- // if (true) { continue; } // OK: not part of the outer loop
- // break;
- // }
- // var z : i32;
- // break;
- // continuing {
- // z = 2;
- // }
- // }
-
- auto* inner_loop = Loop(Block( //
- If(true, Block(Continue())), //
- Break()));
- auto* body = Block(inner_loop, //
- Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
- Break());
- auto* continuing = Block(Assign("z", 2));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+TEST_F(ResolverValidationTest, Stmt_Loop_ContinueInNestedLoopBodyBeforeDecl_UsageInContinuing) {
+ // loop {
+ // loop {
+ // if (true) { continue; } // OK: not part of the outer loop
+ // break;
+ // }
+ // var z : i32;
+ // break;
+ // continuing {
+ // z = 2i;
+ // }
+ // }
+
+ auto* inner_loop = Loop(Block( //
+ If(true, Block(Continue())), //
+ Break()));
+ auto* body = Block(inner_loop, //
+ Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
+ Break());
+ auto* continuing = Block(Assign("z", 2_i));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest,
Stmt_Loop_ContinueInNestedLoopBodyBeforeDecl_UsageInContinuingSubscope) {
- // loop {
- // loop {
- // if (true) { continue; } // OK: not part of the outer loop
- // break;
- // }
- // var z : i32;
- // break;
- // continuing {
- // if (true) {
- // z = 2;
- // }
- // }
- // }
-
- auto* inner_loop = Loop(Block(If(true, Block(Continue())), //
- Break()));
- auto* body = Block(inner_loop, //
- Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
- Break());
- auto* continuing = Block(If(Expr(true), Block(Assign("z", 2))));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
-}
-
-TEST_F(ResolverValidationTest,
- Stmt_Loop_ContinueInNestedLoopBodyBeforeDecl_UsageInContinuingLoop) {
- // loop {
- // loop {
- // if (true) { continue; } // OK: not part of the outer loop
- // break;
- // }
- // var z : i32;
- // break;
- // continuing {
- // loop {
- // z = 2;
- // break;
- // }
- // }
- // }
-
- auto* inner_loop = Loop(Block(If(true, Block(Continue())), //
- Break()));
- auto* body = Block(inner_loop, //
- Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
- Break());
- auto* continuing = Block(Loop(Block(Assign("z", 2), //
- Break())));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // loop {
+ // loop {
+ // if (true) { continue; } // OK: not part of the outer loop
+ // break;
+ // }
+ // var z : i32;
+ // break;
+ // continuing {
+ // if (true) {
+ // z = 2i;
+ // }
+ // }
+ // }
+
+ auto* inner_loop = Loop(Block(If(true, Block(Continue())), //
+ Break()));
+ auto* body = Block(inner_loop, //
+ Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
+ Break());
+ auto* continuing = Block(If(Expr(true), Block(Assign("z", 2_i))));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
+}
+
+TEST_F(ResolverValidationTest, Stmt_Loop_ContinueInNestedLoopBodyBeforeDecl_UsageInContinuingLoop) {
+ // loop {
+ // loop {
+ // if (true) { continue; } // OK: not part of the outer loop
+ // break;
+ // }
+ // var z : i32;
+ // break;
+ // continuing {
+ // loop {
+ // z = 2i;
+ // break;
+ // }
+ // }
+ // }
+
+ auto* inner_loop = Loop(Block(If(true, Block(Continue())), //
+ Break()));
+ auto* body = Block(inner_loop, //
+ Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), //
+ Break());
+ auto* continuing = Block(Loop(Block(Assign("z", 2_i), //
+ Break())));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, Stmt_Loop_ContinueInLoopBodyAfterDecl_UsageInContinuing) {
- // loop {
- // var z : i32;
- // if (true) { continue; }
- // break;
- // continuing {
- // z = 2;
- // }
- // }
-
- auto error_loc = Source{{12, 34}};
- auto* body = Block(Decl(Var("z", ty.i32(), ast::StorageClass::kNone)),
- If(true, Block(Continue())), //
- Break());
- auto* continuing = Block(Assign(Expr(error_loc, "z"), 2));
- auto* loop_stmt = Loop(body, continuing);
- WrapInFunction(loop_stmt);
-
- EXPECT_TRUE(r()->Resolve());
+ // loop {
+ // var z : i32;
+ // if (true) { continue; }
+ // break;
+ // continuing {
+ // z = 2i;
+ // }
+ // }
+
+ auto error_loc = Source{{12, 34}};
+ auto* body =
+ Block(Decl(Var("z", ty.i32(), ast::StorageClass::kNone)), If(true, Block(Continue())), //
+ Break());
+ auto* continuing = Block(Assign(Expr(error_loc, "z"), 2_i));
+ auto* loop_stmt = Loop(body, continuing);
+ WrapInFunction(loop_stmt);
+
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverTest, Stmt_Loop_ReturnInContinuing_Direct) {
- // loop {
- // continuing {
- // return;
- // }
- // }
+ // loop {
+ // continuing {
+ // return;
+ // }
+ // }
- WrapInFunction(Loop( // loop
- Block(), // loop block
- Block( // loop continuing block
- Return(Source{{12, 34}}))));
+ WrapInFunction(Loop( // loop
+ Block(), // loop block
+ Block( // loop continuing block
+ Return(Source{{12, 34}}))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a return statement)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a return statement)");
}
TEST_F(ResolverTest, Stmt_Loop_ReturnInContinuing_Indirect) {
- // loop {
- // if (false) { break; }
- // continuing {
- // loop {
- // return;
- // }
- // }
- // }
-
- WrapInFunction(Loop( // outer loop
- Block(If(false, Block(Break()))), // outer loop block
- Block(Source{{56, 78}}, // outer loop continuing block
- Loop( // inner loop
- Block( // inner loop block
- Return(Source{{12, 34}}))))));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a return statement
+ // loop {
+ // if (false) { break; }
+ // continuing {
+ // loop {
+ // return;
+ // }
+ // }
+ // }
+
+ WrapInFunction(Loop( // outer loop
+ Block(If(false, Block(Break()))), // outer loop block
+ Block(Source{{56, 78}}, // outer loop continuing block
+ Loop( // inner loop
+ Block( // inner loop block
+ Return(Source{{12, 34}}))))));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a return statement
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_Loop_DiscardInContinuing_Direct) {
- // loop {
- // continuing {
- // discard;
- // }
- // }
+ // loop {
+ // continuing {
+ // discard;
+ // }
+ // }
- WrapInFunction(Loop( // loop
- Block(), // loop block
- Block( // loop continuing block
- Discard(Source{{12, 34}}))));
+ WrapInFunction(Loop( // loop
+ Block(), // loop block
+ Block( // loop continuing block
+ Discard(Source{{12, 34}}))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a discard statement)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a discard statement)");
}
TEST_F(ResolverTest, Stmt_Loop_DiscardInContinuing_Indirect) {
- // loop {
- // if (false) { break; }
- // continuing {
- // loop { discard; }
- // }
- // }
-
- WrapInFunction(Loop( // outer loop
- Block(If(false, Block(Break()))), // outer loop block
- Block(Source{{56, 78}}, // outer loop continuing block
- Loop( // inner loop
- Block( // inner loop block
- Discard(Source{{12, 34}}))))));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a discard statement
+ // loop {
+ // if (false) { break; }
+ // continuing {
+ // loop { discard; }
+ // }
+ // }
+
+ WrapInFunction(Loop( // outer loop
+ Block(If(false, Block(Break()))), // outer loop block
+ Block(Source{{56, 78}}, // outer loop continuing block
+ Loop( // inner loop
+ Block( // inner loop block
+ Discard(Source{{12, 34}}))))));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a discard statement
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_Loop_DiscardInContinuing_Indirect_ViaCall) {
- // fn MayDiscard() { if (true) { discard; } }
- // fn F() { MayDiscard(); }
- // loop {
- // continuing {
- // loop { F(); }
- // }
- // }
-
- Func("MayDiscard", {}, ty.void_(), {If(true, Block(Discard()))});
- Func("SomeFunc", {}, ty.void_(), {CallStmt(Call("MayDiscard"))});
-
- WrapInFunction(Loop( // outer loop
- Block(), // outer loop block
- Block(Source{{56, 78}}, // outer loop continuing block
- Loop( // inner loop
- Block( // inner loop block
- CallStmt(Call(Source{{12, 34}}, "SomeFunc")))))));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: cannot call a function that may discard inside a continuing block
+ // fn MayDiscard() { if (true) { discard; } }
+ // fn F() { MayDiscard(); }
+ // loop {
+ // continuing {
+ // loop { F(); }
+ // }
+ // }
+
+ Func("MayDiscard", {}, ty.void_(), {If(true, Block(Discard()))});
+ Func("SomeFunc", {}, ty.void_(), {CallStmt(Call("MayDiscard"))});
+
+ WrapInFunction(Loop( // outer loop
+ Block(), // outer loop block
+ Block(Source{{56, 78}}, // outer loop continuing block
+ Loop( // inner loop
+ Block( // inner loop block
+ CallStmt(Call(Source{{12, 34}}, "SomeFunc")))))));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cannot call a function that may discard inside a continuing block
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_Loop_ContinueInContinuing_Direct) {
- // loop {
- // continuing {
- // continue;
- // }
- // }
+ // loop {
+ // continuing {
+ // continue;
+ // }
+ // }
- WrapInFunction(Loop( // loop
- Block(), // loop block
- Block(Source{{56, 78}}, // loop continuing block
- Continue(Source{{12, 34}}))));
+ WrapInFunction(Loop( // loop
+ Block(), // loop block
+ Block(Source{{56, 78}}, // loop continuing block
+ Continue(Source{{12, 34}}))));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: continuing blocks must not contain a continue statement");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: continuing blocks must not contain a continue statement");
}
TEST_F(ResolverTest, Stmt_Loop_ContinueInContinuing_Indirect) {
- // loop {
- // if (false) { break; }
- // continuing {
- // loop {
- // if (false) { break; }
- // continue;
- // }
- // }
- // }
-
- WrapInFunction(Loop( // outer loop
- Block( // outer loop block
- If(false, Block(Break()))), // if (false) { break; }
- Block( // outer loop continuing block
- Loop( // inner loop
- Block( // inner loop block
- If(false, Block(Break())), // if (false) { break; }
- Continue(Source{{12, 34}})))))); // continue
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // loop {
+ // if (false) { break; }
+ // continuing {
+ // loop {
+ // if (false) { break; }
+ // continue;
+ // }
+ // }
+ // }
+
+ WrapInFunction(Loop( // outer loop
+ Block( // outer loop block
+ If(false, Block(Break()))), // if (false) { break; }
+ Block( // outer loop continuing block
+ Loop( // inner loop
+ Block( // inner loop block
+ If(false, Block(Break())), // if (false) { break; }
+ Continue(Source{{12, 34}})))))); // continue
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, Stmt_ForLoop_ReturnInContinuing_Direct) {
- // for(;; return) {
- // break;
- // }
+ // for(;; return) {
+ // break;
+ // }
- WrapInFunction(For(nullptr, nullptr, Return(Source{{12, 34}}), //
- Block(Break())));
+ WrapInFunction(For(nullptr, nullptr, Return(Source{{12, 34}}), //
+ Block(Break())));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a return statement)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a return statement)");
}
TEST_F(ResolverTest, Stmt_ForLoop_ReturnInContinuing_Indirect) {
- // for(;; loop { return }) {
- // break;
- // }
-
- WrapInFunction(For(nullptr, nullptr,
- Loop(Source{{56, 78}}, //
- Block(Return(Source{{12, 34}}))), //
- Block(Break())));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a return statement
+ // for(;; loop { return }) {
+ // break;
+ // }
+
+ WrapInFunction(For(nullptr, nullptr,
+ Loop(Source{{56, 78}}, //
+ Block(Return(Source{{12, 34}}))), //
+ Block(Break())));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a return statement
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_ForLoop_DiscardInContinuing_Direct) {
- // for(;; discard) {
- // break;
- // }
+ // for(;; discard) {
+ // break;
+ // }
- WrapInFunction(For(nullptr, nullptr, Discard(Source{{12, 34}}), //
- Block(Break())));
+ WrapInFunction(For(nullptr, nullptr, Discard(Source{{12, 34}}), //
+ Block(Break())));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a discard statement)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a discard statement)");
}
TEST_F(ResolverTest, Stmt_ForLoop_DiscardInContinuing_Indirect) {
- // for(;; loop { discard }) {
- // break;
- // }
-
- WrapInFunction(For(nullptr, nullptr,
- Loop(Source{{56, 78}}, //
- Block(Discard(Source{{12, 34}}))), //
- Block(Break())));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: continuing blocks must not contain a discard statement
+ // for(;; loop { discard }) {
+ // break;
+ // }
+
+ WrapInFunction(For(nullptr, nullptr,
+ Loop(Source{{56, 78}}, //
+ Block(Discard(Source{{12, 34}}))), //
+ Block(Break())));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: continuing blocks must not contain a discard statement
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_ForLoop_DiscardInContinuing_Indirect_ViaCall) {
- // fn MayDiscard() { if (true) { discard; } }
- // fn F() { MayDiscard(); }
- // for(;; loop { F() }) {
- // break;
- // }
-
- Func("MayDiscard", {}, ty.void_(), {If(true, Block(Discard()))});
- Func("F", {}, ty.void_(), {CallStmt(Call("MayDiscard"))});
-
- WrapInFunction(For(nullptr, nullptr,
- Loop(Source{{56, 78}}, //
- Block(CallStmt(Call(Source{{12, 34}}, "F")))), //
- Block(Break())));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: cannot call a function that may discard inside a continuing block
+ // fn MayDiscard() { if (true) { discard; } }
+ // fn F() { MayDiscard(); }
+ // for(;; loop { F() }) {
+ // break;
+ // }
+
+ Func("MayDiscard", {}, ty.void_(), {If(true, Block(Discard()))});
+ Func("F", {}, ty.void_(), {CallStmt(Call("MayDiscard"))});
+
+ WrapInFunction(For(nullptr, nullptr,
+ Loop(Source{{56, 78}}, //
+ Block(CallStmt(Call(Source{{12, 34}}, "F")))), //
+ Block(Break())));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: cannot call a function that may discard inside a continuing block
56:78 note: see continuing block here)");
}
TEST_F(ResolverTest, Stmt_ForLoop_ContinueInContinuing_Direct) {
- // for(;; continue) {
- // break;
- // }
+ // for(;; continue) {
+ // break;
+ // }
- WrapInFunction(For(nullptr, nullptr, Continue(Source{{12, 34}}), //
- Block(Break())));
+ WrapInFunction(For(nullptr, nullptr, Continue(Source{{12, 34}}), //
+ Block(Break())));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: continuing blocks must not contain a continue statement");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: continuing blocks must not contain a continue statement");
}
TEST_F(ResolverTest, Stmt_ForLoop_ContinueInContinuing_Indirect) {
- // for(;; loop { if (false) { break; } continue }) {
- // break;
- // }
+ // for(;; loop { if (false) { break; } continue }) {
+ // break;
+ // }
- WrapInFunction(For(nullptr, nullptr,
- Loop( //
- Block(If(false, Block(Break())), //
- Continue(Source{{12, 34}}))), //
- Block(Break())));
+ WrapInFunction(For(nullptr, nullptr,
+ Loop( //
+ Block(If(false, Block(Break())), //
+ Continue(Source{{12, 34}}))), //
+ Block(Break())));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, Stmt_ForLoop_CondIsBoolRef) {
- // var cond : bool = true;
- // for (; cond; ) {
- // }
+ // var cond : bool = true;
+ // for (; cond; ) {
+ // }
- auto* cond = Var("cond", ty.bool_(), Expr(true));
- WrapInFunction(Decl(cond), For(nullptr, "cond", nullptr, Block()));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* cond = Var("cond", ty.bool_(), Expr(true));
+ WrapInFunction(Decl(cond), For(nullptr, "cond", nullptr, Block()));
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverTest, Stmt_ForLoop_CondIsNotBool) {
- // for (; 1.0f; ) {
- // }
+ // for (; 1.0f; ) {
+ // }
- WrapInFunction(For(nullptr, Expr(Source{{12, 34}}, 1.0f), nullptr, Block()));
+ WrapInFunction(For(nullptr, Expr(Source{{12, 34}}, 1_f), nullptr, Block()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: for-loop condition must be bool, got f32");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: for-loop condition must be bool, got f32");
}
TEST_F(ResolverValidationTest, Stmt_ContinueInLoop) {
- WrapInFunction(Loop(Block(If(false, Block(Break())), //
- Continue(Source{{12, 34}}))));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ WrapInFunction(Loop(Block(If(false, Block(Break())), //
+ Continue(Source{{12, 34}}))));
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, Stmt_ContinueNotInLoop) {
- WrapInFunction(Continue(Source{{12, 34}}));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: continue statement must be in a loop");
+ WrapInFunction(Continue(Source{{12, 34}}));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: continue statement must be in a loop");
}
TEST_F(ResolverValidationTest, Stmt_BreakInLoop) {
- WrapInFunction(Loop(Block(Break(Source{{12, 34}}))));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ WrapInFunction(Loop(Block(Break(Source{{12, 34}}))));
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, Stmt_BreakInSwitch) {
- WrapInFunction(Loop(Block(Switch(Expr(1), //
- Case(Expr(1), //
- Block(Break())), //
- DefaultCase()), //
- Break()))); //
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ WrapInFunction(Loop(Block(Switch(Expr(1_i), //
+ Case(Expr(1_i), //
+ Block(Break())), //
+ DefaultCase()), //
+ Break()))); //
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfTrueInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block( // if(true) {
- Break(Source{{12, 34}})))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* cont = Block( // continuing {
+ If(true, Block( // if(true) {
+ Break(Source{{12, 34}})))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfElseInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block(), // if(true) {
- Else(Block( // } else {
- Break(Source{{12, 34}}))))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ auto* cont = Block( // continuing {
+ If(true, Block(), // if(true) {
+ Else(Block( // } else {
+ Break(Source{{12, 34}}))))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverValidationTest, Stmt_BreakInContinuing) {
- auto* cont = Block( // continuing {
- Block(Break(Source{{12, 34}}))); // break;
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "12:34 note: break statement is not directly in if statement block");
+ auto* cont = Block( // continuing {
+ Block(Break(Source{{12, 34}}))); // break;
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "12:34 note: break statement is not directly in if statement block");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfInIfInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block( // if(true) {
- If(Source{{56, 78}}, true, // if(true) {
- Block(Break(Source{{12, 34}})))))); // break;
- // }
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: if statement containing break statement is not directly in "
- "continuing block");
+ auto* cont = Block( // continuing {
+ If(true, Block( // if(true) {
+ If(Source{{56, 78}}, true, // if(true) {
+ Block(Break(Source{{12, 34}})))))); // break;
+ // }
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: if statement containing break statement is not directly in "
+ "continuing block");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfTrueMultipleStmtsInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block(Source{{56, 78}}, // if(true) {
- Assign(Phony(), 1), // _ = 1;
- Break(Source{{12, 34}})))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: if statement block contains multiple statements");
+ auto* cont = Block( // continuing {
+ If(true, Block(Source{{56, 78}}, // if(true) {
+ Assign(Phony(), 1_i), // _ = 1i;
+ Break(Source{{12, 34}})))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: if statement block contains multiple statements");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfElseMultipleStmtsInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block(), // if(true) {
- Else(Block(Source{{56, 78}}, // } else {
- Assign(Phony(), 1), // _ = 1;
- Break(Source{{12, 34}}))))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: if statement block contains multiple statements");
+ auto* cont = Block( // continuing {
+ If(true, Block(), // if(true) {
+ Else(Block(Source{{56, 78}}, // } else {
+ Assign(Phony(), 1_i), // _ = 1i;
+ Break(Source{{12, 34}}))))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: if statement block contains multiple statements");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfElseIfInContinuing) {
- auto* cont = Block( // continuing {
- If(true, Block(), // if(true) {
- Else(Expr(Source{{56, 78}}, true), // } else if (true) {
- Block(Break(Source{{12, 34}}))))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: else has condition");
+ auto* cont = Block( // continuing {
+ If(true, Block(), // if(true) {
+ Else(If(Source{{56, 78}}, Expr(true), // } else if (true) {
+ Block(Break(Source{{12, 34}})))))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: else has condition");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfNonEmptyElseInContinuing) {
- auto* cont = Block( // continuing {
- If(true, // if(true) {
- Block(Break(Source{{12, 34}})), // break;
- Else(Block(Source{{56, 78}}, // } else {
- Assign(Phony(), 1))))); // _ = 1;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: non-empty false block");
+ auto* cont = Block( // continuing {
+ If(true, // if(true) {
+ Block(Break(Source{{12, 34}})), // break;
+ Else(Block(Source{{56, 78}}, // } else {
+ Assign(Phony(), 1_i))))); // _ = 1i;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: non-empty false block");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfElseNonEmptyTrueInContinuing) {
- auto* cont = Block( // continuing {
- If(true, // if(true) {
- Block(Source{{56, 78}}, Assign(Phony(), 1)), // _ = 1;
- Else(Block( // } else {
- Break(Source{{12, 34}}))))); // break;
- // }
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: non-empty true block");
+ auto* cont = Block( // continuing {
+ If(true, // if(true) {
+ Block(Source{{56, 78}}, Assign(Phony(), 1_i)), // _ = 1i;
+ Else(Block( // } else {
+ Break(Source{{12, 34}}))))); // break;
+ // }
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: non-empty true block");
}
TEST_F(ResolverValidationTest, Stmt_BreakInIfInContinuingNotLast) {
- auto* cont = Block( // continuing {
- If(Source{{56, 78}}, true, // if(true) {
- Block(Break(Source{{12, 34}}))), // break;
- // }
- Assign(Phony(), 1)); // _ = 1;
- // }
- WrapInFunction(Loop(Block(), cont));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: break statement in a continuing block must be the single "
- "statement of an if statement's true or false block, and that if "
- "statement must be the last statement of the continuing block\n"
- "56:78 note: if statement containing break statement is not the last "
- "statement of the continuing block");
+ auto* cont = Block( // continuing {
+ If(Source{{56, 78}}, true, // if(true) {
+ Block(Break(Source{{12, 34}}))), // break;
+ // }
+ Assign(Phony(), 1_i)); // _ = 1i;
+ // }
+ WrapInFunction(Loop(Block(), cont));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: break statement in a continuing block must be the single "
+ "statement of an if statement's true or false block, and that if "
+ "statement must be the last statement of the continuing block\n"
+ "56:78 note: if statement containing break statement is not the last "
+ "statement of the continuing block");
}
TEST_F(ResolverValidationTest, Stmt_BreakNotInLoopOrSwitch) {
- WrapInFunction(Break(Source{{12, 34}}));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: break statement must be in a loop or switch case");
+ WrapInFunction(Break(Source{{12, 34}}));
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: break statement must be in a loop or switch case");
}
TEST_F(ResolverValidationTest, StructMemberDuplicateName) {
- Structure("S", {Member(Source{{12, 34}}, "a", ty.i32()),
- Member(Source{{56, 78}}, "a", ty.i32())});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: redefinition of 'a'\n12:34 note: previous definition "
- "is here");
+ Structure("S",
+ {Member(Source{{12, 34}}, "a", ty.i32()), Member(Source{{56, 78}}, "a", ty.i32())});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: redefinition of 'a'\n12:34 note: previous definition "
+ "is here");
}
TEST_F(ResolverValidationTest, StructMemberDuplicateNameDifferentTypes) {
- Structure("S", {Member(Source{{12, 34}}, "a", ty.bool_()),
- Member(Source{{12, 34}}, "a", ty.vec3<f32>())});
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: redefinition of 'a'\n12:34 note: previous definition "
- "is here");
+ Structure("S", {Member(Source{{12, 34}}, "a", ty.bool_()),
+ Member(Source{{12, 34}}, "a", ty.vec3<f32>())});
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: redefinition of 'a'\n12:34 note: previous definition "
+ "is here");
}
TEST_F(ResolverValidationTest, StructMemberDuplicateNamePass) {
- Structure("S", {Member("a", ty.i32()), Member("b", ty.f32())});
- Structure("S1", {Member("a", ty.i32()), Member("b", ty.f32())});
- EXPECT_TRUE(r()->Resolve());
+ Structure("S", {Member("a", ty.i32()), Member("b", ty.f32())});
+ Structure("S1", {Member("a", ty.i32()), Member("b", ty.f32())});
+ EXPECT_TRUE(r()->Resolve());
}
TEST_F(ResolverValidationTest, NonPOTStructMemberAlignAttribute) {
- Structure("S", {
- Member("a", ty.f32(), {MemberAlign(Source{{12, 34}}, 3)}),
- });
+ Structure("S", {
+ Member("a", ty.f32(), {MemberAlign(Source{{12, 34}}, 3)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: align value must be a positive, power-of-two integer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: align value must be a positive, power-of-two integer");
}
TEST_F(ResolverValidationTest, ZeroStructMemberAlignAttribute) {
- Structure("S", {
- Member("a", ty.f32(), {MemberAlign(Source{{12, 34}}, 0)}),
- });
+ Structure("S", {
+ Member("a", ty.f32(), {MemberAlign(Source{{12, 34}}, 0)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: align value must be a positive, power-of-two integer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: align value must be a positive, power-of-two integer");
}
TEST_F(ResolverValidationTest, ZeroStructMemberSizeAttribute) {
- Structure("S", {
- Member("a", ty.f32(), {MemberSize(Source{{12, 34}}, 0)}),
- });
+ Structure("S", {
+ Member("a", ty.f32(), {MemberSize(Source{{12, 34}}, 0)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: size must be at least as big as the type's size (4)");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: size must be at least as big as the type's size (4)");
}
TEST_F(ResolverValidationTest, OffsetAndSizeAttribute) {
- Structure("S", {
- Member(Source{{12, 34}}, "a", ty.f32(),
- {MemberOffset(0), MemberSize(4)}),
- });
+ Structure("S", {
+ Member(Source{{12, 34}}, "a", ty.f32(), {MemberOffset(0), MemberSize(4)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: offset attributes cannot be used with align or size "
- "attributes");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: offset attributes cannot be used with align or size "
+ "attributes");
}
TEST_F(ResolverValidationTest, OffsetAndAlignAttribute) {
- Structure("S", {
- Member(Source{{12, 34}}, "a", ty.f32(),
- {MemberOffset(0), MemberAlign(4)}),
- });
+ Structure("S", {
+ Member(Source{{12, 34}}, "a", ty.f32(), {MemberOffset(0), MemberAlign(4)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: offset attributes cannot be used with align or size "
- "attributes");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: offset attributes cannot be used with align or size "
+ "attributes");
}
TEST_F(ResolverValidationTest, OffsetAndAlignAndSizeAttribute) {
- Structure("S", {
- Member(Source{{12, 34}}, "a", ty.f32(),
- {MemberOffset(0), MemberAlign(4), MemberSize(4)}),
- });
+ Structure("S", {
+ Member(Source{{12, 34}}, "a", ty.f32(),
+ {MemberOffset(0), MemberAlign(4), MemberSize(4)}),
+ });
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: offset attributes cannot be used with align or size "
- "attributes");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: offset attributes cannot be used with align or size "
+ "attributes");
}
TEST_F(ResolverTest, Expr_Constructor_Cast_Pointer) {
- auto* vf = Var("vf", ty.f32());
- auto* c =
- Construct(Source{{12, 34}}, ty.pointer<i32>(ast::StorageClass::kFunction),
- ExprList(vf));
- auto* ip = Const("ip", ty.pointer<i32>(ast::StorageClass::kFunction), c);
- WrapInFunction(Decl(vf), Decl(ip));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
+ auto* vf = Var("vf", ty.f32());
+ auto* c =
+ Construct(Source{{12, 34}}, ty.pointer<i32>(ast::StorageClass::kFunction), ExprList(vf));
+ auto* ip = Let("ip", ty.pointer<i32>(ast::StorageClass::kFunction), c);
+ WrapInFunction(Decl(vf), Decl(ip));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: type is not constructible");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/validator.cc b/chromium/third_party/dawn/src/tint/resolver/validator.cc
new file mode 100644
index 00000000000..2b4d8a99d3d
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/validator.cc
@@ -0,0 +1,2277 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/validator.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "src/tint/ast/alias.h"
+#include "src/tint/ast/array.h"
+#include "src/tint/ast/assignment_statement.h"
+#include "src/tint/ast/bitcast_expression.h"
+#include "src/tint/ast/break_statement.h"
+#include "src/tint/ast/call_statement.h"
+#include "src/tint/ast/continue_statement.h"
+#include "src/tint/ast/depth_texture.h"
+#include "src/tint/ast/disable_validation_attribute.h"
+#include "src/tint/ast/discard_statement.h"
+#include "src/tint/ast/fallthrough_statement.h"
+#include "src/tint/ast/for_loop_statement.h"
+#include "src/tint/ast/id_attribute.h"
+#include "src/tint/ast/if_statement.h"
+#include "src/tint/ast/internal_attribute.h"
+#include "src/tint/ast/interpolate_attribute.h"
+#include "src/tint/ast/loop_statement.h"
+#include "src/tint/ast/matrix.h"
+#include "src/tint/ast/pointer.h"
+#include "src/tint/ast/return_statement.h"
+#include "src/tint/ast/sampled_texture.h"
+#include "src/tint/ast/sampler.h"
+#include "src/tint/ast/storage_texture.h"
+#include "src/tint/ast/switch_statement.h"
+#include "src/tint/ast/traverse_expressions.h"
+#include "src/tint/ast/type_name.h"
+#include "src/tint/ast/unary_op_expression.h"
+#include "src/tint/ast/variable_decl_statement.h"
+#include "src/tint/ast/vector.h"
+#include "src/tint/ast/workgroup_attribute.h"
+#include "src/tint/sem/abstract_numeric.h"
+#include "src/tint/sem/array.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/call.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/for_loop_statement.h"
+#include "src/tint/sem/function.h"
+#include "src/tint/sem/if_statement.h"
+#include "src/tint/sem/loop_statement.h"
+#include "src/tint/sem/materialize.h"
+#include "src/tint/sem/member_accessor_expression.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/pointer.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/statement.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/struct.h"
+#include "src/tint/sem/switch_statement.h"
+#include "src/tint/sem/type_constructor.h"
+#include "src/tint/sem/type_conversion.h"
+#include "src/tint/sem/variable.h"
+#include "src/tint/utils/defer.h"
+#include "src/tint/utils/map.h"
+#include "src/tint/utils/math.h"
+#include "src/tint/utils/reverse.h"
+#include "src/tint/utils/scoped_assignment.h"
+#include "src/tint/utils/transform.h"
+
+namespace tint::resolver {
+namespace {
+
+bool IsValidStorageTextureDimension(ast::TextureDimension dim) {
+ switch (dim) {
+ case ast::TextureDimension::k1d:
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::k3d:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsValidStorageTextureTexelFormat(ast::TexelFormat format) {
+ switch (format) {
+ case ast::TexelFormat::kR32Uint:
+ case ast::TexelFormat::kR32Sint:
+ case ast::TexelFormat::kR32Float:
+ case ast::TexelFormat::kRg32Uint:
+ case ast::TexelFormat::kRg32Sint:
+ case ast::TexelFormat::kRg32Float:
+ case ast::TexelFormat::kRgba8Unorm:
+ case ast::TexelFormat::kRgba8Snorm:
+ case ast::TexelFormat::kRgba8Uint:
+ case ast::TexelFormat::kRgba8Sint:
+ case ast::TexelFormat::kRgba16Uint:
+ case ast::TexelFormat::kRgba16Sint:
+ case ast::TexelFormat::kRgba16Float:
+ case ast::TexelFormat::kRgba32Uint:
+ case ast::TexelFormat::kRgba32Sint:
+ case ast::TexelFormat::kRgba32Float:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Helper to stringify a pipeline IO attribute.
+std::string attr_to_str(const ast::Attribute* attr) {
+ std::stringstream str;
+ if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
+ str << "builtin(" << builtin->builtin << ")";
+ } else if (auto* location = attr->As<ast::LocationAttribute>()) {
+ str << "location(" << location->value << ")";
+ }
+ return str.str();
+}
+
+template <typename CALLBACK>
+void TraverseCallChain(diag::List& diagnostics,
+ const sem::Function* from,
+ const sem::Function* to,
+ CALLBACK&& callback) {
+ for (auto* f : from->TransitivelyCalledFunctions()) {
+ if (f == to) {
+ callback(f);
+ return;
+ }
+ if (f->TransitivelyCalledFunctions().contains(to)) {
+ TraverseCallChain(diagnostics, f, to, callback);
+ callback(f);
+ return;
+ }
+ }
+ TINT_ICE(Resolver, diagnostics) << "TraverseCallChain() 'from' does not transitively call 'to'";
+}
+
+} // namespace
+
+Validator::Validator(ProgramBuilder* builder, SemHelper& sem)
+ : symbols_(builder->Symbols()), diagnostics_(builder->Diagnostics()), sem_(sem) {}
+
+Validator::~Validator() = default;
+
+void Validator::AddError(const std::string& msg, const Source& source) const {
+ diagnostics_.add_error(diag::System::Resolver, msg, source);
+}
+
+void Validator::AddWarning(const std::string& msg, const Source& source) const {
+ diagnostics_.add_warning(diag::System::Resolver, msg, source);
+}
+
+void Validator::AddNote(const std::string& msg, const Source& source) const {
+ diagnostics_.add_note(diag::System::Resolver, msg, source);
+}
+
+// https://gpuweb.github.io/gpuweb/wgsl/#plain-types-section
+bool Validator::IsPlain(const sem::Type* type) const {
+ return type->is_scalar() ||
+ type->IsAnyOf<sem::Atomic, sem::Vector, sem::Matrix, sem::Array, sem::Struct>();
+}
+
+// https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types
+bool Validator::IsFixedFootprint(const sem::Type* type) const {
+ return Switch(
+ type, //
+ [&](const sem::Vector*) { return true; }, //
+ [&](const sem::Matrix*) { return true; }, //
+ [&](const sem::Atomic*) { return true; },
+ [&](const sem::Array* arr) {
+ return !arr->IsRuntimeSized() && IsFixedFootprint(arr->ElemType());
+ },
+ [&](const sem::Struct* str) {
+ for (auto* member : str->Members()) {
+ if (!IsFixedFootprint(member->Type())) {
+ return false;
+ }
+ }
+ return true;
+ },
+ [&](Default) { return type->is_scalar(); });
+}
+
+// https://gpuweb.github.io/gpuweb/wgsl.html#host-shareable-types
+bool Validator::IsHostShareable(const sem::Type* type) const {
+ if (type->IsAnyOf<sem::I32, sem::U32, sem::F32, sem::F16>()) {
+ return true;
+ }
+ return Switch(
+ type, //
+ [&](const sem::Vector* vec) { return IsHostShareable(vec->type()); },
+ [&](const sem::Matrix* mat) { return IsHostShareable(mat->type()); },
+ [&](const sem::Array* arr) { return IsHostShareable(arr->ElemType()); },
+ [&](const sem::Struct* str) {
+ for (auto* member : str->Members()) {
+ if (!IsHostShareable(member->Type())) {
+ return false;
+ }
+ }
+ return true;
+ },
+ [&](const sem::Atomic* atomic) { return IsHostShareable(atomic->Type()); });
+}
+
+// https://gpuweb.github.io/gpuweb/wgsl.html#storable-types
+bool Validator::IsStorable(const sem::Type* type) const {
+ return IsPlain(type) || type->IsAnyOf<sem::Texture, sem::Sampler>();
+}
+
+const ast::Statement* Validator::ClosestContinuing(bool stop_at_loop,
+ sem::Statement* current_statement) const {
+ for (const auto* s = current_statement; s != nullptr; s = s->Parent()) {
+ if (stop_at_loop && s->Is<sem::LoopStatement>()) {
+ break;
+ }
+ if (s->Is<sem::LoopContinuingBlockStatement>()) {
+ return s->Declaration();
+ }
+ if (auto* f = As<sem::ForLoopStatement>(s->Parent())) {
+ if (f->Declaration()->continuing == s->Declaration()) {
+ return s->Declaration();
+ }
+ if (stop_at_loop) {
+ break;
+ }
+ }
+ }
+ return nullptr;
+}
+
+bool Validator::Atomic(const ast::Atomic* a, const sem::Atomic* s) const {
+ // https://gpuweb.github.io/gpuweb/wgsl/#atomic-types
+ // T must be either u32 or i32.
+ if (!s->Type()->IsAnyOf<sem::U32, sem::I32>()) {
+ AddError("atomic only supports i32 or u32 types", a->type ? a->type->source : a->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::StorageTexture(const ast::StorageTexture* t) const {
+ switch (t->access) {
+ case ast::Access::kWrite:
+ break;
+ case ast::Access::kUndefined:
+ AddError("storage texture missing access control", t->source);
+ return false;
+ default:
+ AddError("storage textures currently only support 'write' access control", t->source);
+ return false;
+ }
+
+ if (!IsValidStorageTextureDimension(t->dim)) {
+ AddError("cube dimensions for storage textures are not supported", t->source);
+ return false;
+ }
+
+ if (!IsValidStorageTextureTexelFormat(t->format)) {
+ AddError(
+ "image format must be one of the texel formats specified for storage "
+ "textues in https://gpuweb.github.io/gpuweb/wgsl/#texel-formats",
+ t->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::Materialize(const sem::Materialize* m) const {
+ auto* from = m->Expr()->Type();
+ auto* to = m->Type();
+
+ if (sem::Type::ConversionRank(from, to) == sem::Type::kNoConversion) {
+ AddError("cannot convert value of type '" + sem_.TypeNameOf(from) + "' to type '" +
+ sem_.TypeNameOf(to) + "'",
+ m->Expr()->Declaration()->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::VariableConstructorOrCast(const ast::Variable* var,
+ ast::StorageClass storage_class,
+ const sem::Type* storage_ty,
+ const sem::Type* rhs_ty) const {
+ auto* value_type = rhs_ty->UnwrapRef(); // Implicit load of RHS
+
+ // Value type has to match storage type
+ if (storage_ty != value_type) {
+ std::string decl = var->is_const ? "let" : "var";
+ AddError("cannot initialize " + decl + " of type '" + sem_.TypeNameOf(storage_ty) +
+ "' with value of type '" + sem_.TypeNameOf(rhs_ty) + "'",
+ var->source);
+ return false;
+ }
+
+ if (!var->is_const) {
+ switch (storage_class) {
+ case ast::StorageClass::kPrivate:
+ case ast::StorageClass::kFunction:
+ break; // Allowed an initializer
+ default:
+ // https://gpuweb.github.io/gpuweb/wgsl/#var-and-let
+ // Optionally has an initializer expression, if the variable is in the
+ // private or function storage classes.
+ AddError("var of storage class '" + std::string(ast::ToString(storage_class)) +
+ "' cannot have an initializer. var initializers are only "
+ "supported for the storage classes "
+ "'private' and 'function'",
+ var->source);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::StorageClassLayout(const sem::Type* store_ty,
+ ast::StorageClass sc,
+ Source source,
+ ValidTypeStorageLayouts& layouts) const {
+ // https://gpuweb.github.io/gpuweb/wgsl/#storage-class-layout-constraints
+
+ auto is_uniform_struct_or_array = [sc](const sem::Type* ty) {
+ return sc == ast::StorageClass::kUniform && ty->IsAnyOf<sem::Array, sem::Struct>();
+ };
+
+ auto is_uniform_struct = [sc](const sem::Type* ty) {
+ return sc == ast::StorageClass::kUniform && ty->Is<sem::Struct>();
+ };
+
+ auto required_alignment_of = [&](const sem::Type* ty) {
+ uint32_t actual_align = ty->Align();
+ uint32_t required_align = actual_align;
+ if (is_uniform_struct_or_array(ty)) {
+ required_align = utils::RoundUp(16u, actual_align);
+ }
+ return required_align;
+ };
+
+ auto member_name_of = [this](const sem::StructMember* sm) {
+ return symbols_.NameFor(sm->Declaration()->symbol);
+ };
+
+ // Cache result of type + storage class pair.
+ if (!layouts.emplace(store_ty, sc).second) {
+ return true;
+ }
+
+ if (!ast::IsHostShareable(sc)) {
+ return true;
+ }
+
+ if (auto* str = store_ty->As<sem::Struct>()) {
+ for (size_t i = 0; i < str->Members().size(); ++i) {
+ auto* const m = str->Members()[i];
+ uint32_t required_align = required_alignment_of(m->Type());
+
+ // Recurse into the member type.
+ if (!StorageClassLayout(m->Type(), sc, m->Declaration()->type->source, layouts)) {
+ AddNote("see layout of struct:\n" + str->Layout(symbols_),
+ str->Declaration()->source);
+ return false;
+ }
+
+ // Validate that member is at a valid byte offset
+ if (m->Offset() % required_align != 0) {
+ AddError("the offset of a struct member of type '" +
+ m->Type()->UnwrapRef()->FriendlyName(symbols_) +
+ "' in storage class '" + ast::ToString(sc) +
+ "' must be a multiple of " + std::to_string(required_align) +
+ " bytes, but '" + member_name_of(m) + "' is currently at offset " +
+ std::to_string(m->Offset()) + ". Consider setting @align(" +
+ std::to_string(required_align) + ") on this member",
+ m->Declaration()->source);
+
+ AddNote("see layout of struct:\n" + str->Layout(symbols_),
+ str->Declaration()->source);
+
+ if (auto* member_str = m->Type()->As<sem::Struct>()) {
+ AddNote("and layout of struct member:\n" + member_str->Layout(symbols_),
+ member_str->Declaration()->source);
+ }
+
+ return false;
+ }
+
+ // For uniform buffers, validate that the number of bytes between the
+ // previous member of type struct and the current is a multiple of 16
+ // bytes.
+ auto* const prev_member = (i == 0) ? nullptr : str->Members()[i - 1];
+ if (prev_member && is_uniform_struct(prev_member->Type())) {
+ const uint32_t prev_to_curr_offset = m->Offset() - prev_member->Offset();
+ if (prev_to_curr_offset % 16 != 0) {
+ AddError(
+ "uniform storage requires that the number of bytes between the "
+ "start of the previous member of type struct and the current "
+ "member be a multiple of 16 bytes, but there are currently " +
+ std::to_string(prev_to_curr_offset) + " bytes between '" +
+ member_name_of(prev_member) + "' and '" + member_name_of(m) +
+ "'. Consider setting @align(16) on this member",
+ m->Declaration()->source);
+
+ AddNote("see layout of struct:\n" + str->Layout(symbols_),
+ str->Declaration()->source);
+
+ auto* prev_member_str = prev_member->Type()->As<sem::Struct>();
+ AddNote("and layout of previous member struct:\n" +
+ prev_member_str->Layout(symbols_),
+ prev_member_str->Declaration()->source);
+ return false;
+ }
+ }
+ }
+ }
+
+ // For uniform buffer array members, validate that array elements are
+ // aligned to 16 bytes
+ if (auto* arr = store_ty->As<sem::Array>()) {
+ // Recurse into the element type.
+ // TODO(crbug.com/tint/1388): Ideally we'd pass the source for nested
+ // element type here, but we can't easily get that from the semantic node.
+ // We should consider recursing through the AST type nodes instead.
+ if (!StorageClassLayout(arr->ElemType(), sc, source, layouts)) {
+ return false;
+ }
+
+ if (sc == ast::StorageClass::kUniform) {
+ // We already validated that this array member is itself aligned to 16
+ // bytes above, so we only need to validate that stride is a multiple
+ // of 16 bytes.
+ if (arr->Stride() % 16 != 0) {
+ // Since WGSL has no stride attribute, try to provide a useful hint
+ // for how the shader author can resolve the issue.
+ std::string hint;
+ if (arr->ElemType()->is_scalar()) {
+ hint =
+ "Consider using a vector or struct as the element type "
+ "instead.";
+ } else if (auto* vec = arr->ElemType()->As<sem::Vector>();
+ vec && vec->type()->Size() == 4) {
+ hint = "Consider using a vec4 instead.";
+ } else if (arr->ElemType()->Is<sem::Struct>()) {
+ hint =
+ "Consider using the @size attribute on the last struct "
+ "member.";
+ } else {
+ hint =
+ "Consider wrapping the element type in a struct and using "
+ "the "
+ "@size attribute.";
+ }
+ AddError(
+ "uniform storage requires that array elements be aligned to 16 "
+ "bytes, but array element alignment is currently " +
+ std::to_string(arr->Stride()) + ". " + hint,
+ source);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Validator::StorageClassLayout(const sem::Variable* var,
+ ValidTypeStorageLayouts& layouts) const {
+ if (auto* str = var->Type()->UnwrapRef()->As<sem::Struct>()) {
+ if (!StorageClassLayout(str, var->StorageClass(), str->Declaration()->source, layouts)) {
+ AddNote("see declaration of variable", var->Declaration()->source);
+ return false;
+ }
+ } else {
+ Source source = var->Declaration()->source;
+ if (var->Declaration()->type) {
+ source = var->Declaration()->type->source;
+ }
+ if (!StorageClassLayout(var->Type()->UnwrapRef(), var->StorageClass(), source, layouts)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::GlobalVariable(
+ const sem::Variable* var,
+ std::unordered_map<uint32_t, const sem::Variable*> constant_ids,
+ std::unordered_map<const sem::Type*, const Source&> atomic_composite_info) const {
+ auto* decl = var->Declaration();
+ if (!NoDuplicateAttributes(decl->attributes)) {
+ return false;
+ }
+
+ for (auto* attr : decl->attributes) {
+ if (decl->is_const) {
+ if (auto* id_attr = attr->As<ast::IdAttribute>()) {
+ uint32_t id = id_attr->value;
+ auto it = constant_ids.find(id);
+ if (it != constant_ids.end() && it->second != var) {
+ AddError("pipeline constant IDs must be unique", attr->source);
+ AddNote(
+ "a pipeline constant with an ID of " + std::to_string(id) +
+ " was previously declared "
+ "here:",
+ ast::GetAttribute<ast::IdAttribute>(it->second->Declaration()->attributes)
+ ->source);
+ return false;
+ }
+ if (id > 65535) {
+ AddError("pipeline constant IDs must be between 0 and 65535", attr->source);
+ return false;
+ }
+ } else {
+ AddError("attribute is not valid for constants", attr->source);
+ return false;
+ }
+ } else {
+ bool is_shader_io_attribute =
+ attr->IsAnyOf<ast::BuiltinAttribute, ast::InterpolateAttribute,
+ ast::InvariantAttribute, ast::LocationAttribute>();
+ bool has_io_storage_class = var->StorageClass() == ast::StorageClass::kInput ||
+ var->StorageClass() == ast::StorageClass::kOutput;
+ if (!(attr->IsAnyOf<ast::BindingAttribute, ast::GroupAttribute,
+ ast::InternalAttribute>()) &&
+ (!is_shader_io_attribute || !has_io_storage_class)) {
+ AddError("attribute is not valid for variables", attr->source);
+ return false;
+ }
+ }
+ }
+
+ if (var->StorageClass() == ast::StorageClass::kFunction) {
+ AddError(
+ "variables declared at module scope must not be in the function "
+ "storage class",
+ decl->source);
+ return false;
+ }
+
+ auto binding_point = decl->BindingPoint();
+ switch (var->StorageClass()) {
+ case ast::StorageClass::kUniform:
+ case ast::StorageClass::kStorage:
+ case ast::StorageClass::kHandle: {
+ // https://gpuweb.github.io/gpuweb/wgsl/#resource-interface
+ // Each resource variable must be declared with both group and binding
+ // attributes.
+ if (!binding_point) {
+ AddError(
+ "resource variables require @group and @binding "
+ "attributes",
+ decl->source);
+ return false;
+ }
+ break;
+ }
+ default:
+ if (binding_point.binding || binding_point.group) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#attribute-binding
+ // Must only be applied to a resource variable
+ AddError(
+ "non-resource variables must not have @group or @binding "
+ "attributes",
+ decl->source);
+ return false;
+ }
+ }
+
+ // https://gpuweb.github.io/gpuweb/wgsl/#variable-declaration
+ // The access mode always has a default, and except for variables in the
+ // storage storage class, must not be written.
+ if (var->StorageClass() != ast::StorageClass::kStorage &&
+ decl->declared_access != ast::Access::kUndefined) {
+ AddError("only variables in <storage> storage class may declare an access mode",
+ decl->source);
+ return false;
+ }
+
+ if (!decl->is_const) {
+ if (!AtomicVariable(var, atomic_composite_info)) {
+ return false;
+ }
+ }
+
+ return Variable(var);
+}
+
+// https://gpuweb.github.io/gpuweb/wgsl/#atomic-types
+// Atomic types may only be instantiated by variables in the workgroup storage
+// class or by storage buffer variables with a read_write access mode.
+bool Validator::AtomicVariable(
+ const sem::Variable* var,
+ std::unordered_map<const sem::Type*, const Source&> atomic_composite_info) const {
+ auto sc = var->StorageClass();
+ auto* decl = var->Declaration();
+ auto access = var->Access();
+ auto* type = var->Type()->UnwrapRef();
+ auto source = decl->type ? decl->type->source : decl->source;
+
+ if (type->Is<sem::Atomic>()) {
+ if (sc != ast::StorageClass::kWorkgroup && sc != ast::StorageClass::kStorage) {
+ AddError("atomic variables must have <storage> or <workgroup> storage class", source);
+ return false;
+ }
+ } else if (type->IsAnyOf<sem::Struct, sem::Array>()) {
+ auto found = atomic_composite_info.find(type);
+ if (found != atomic_composite_info.end()) {
+ if (sc != ast::StorageClass::kStorage && sc != ast::StorageClass::kWorkgroup) {
+ AddError("atomic variables must have <storage> or <workgroup> storage class",
+ source);
+ AddNote("atomic sub-type of '" + sem_.TypeNameOf(type) + "' is declared here",
+ found->second);
+ return false;
+ } else if (sc == ast::StorageClass::kStorage && access != ast::Access::kReadWrite) {
+ AddError(
+ "atomic variables in <storage> storage class must have read_write "
+ "access mode",
+ source);
+ AddNote("atomic sub-type of '" + sem_.TypeNameOf(type) + "' is declared here",
+ found->second);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Validator::Variable(const sem::Variable* var) const {
+ auto* decl = var->Declaration();
+ auto* storage_ty = var->Type()->UnwrapRef();
+
+ if (var->Is<sem::GlobalVariable>()) {
+ auto name = symbols_.NameFor(decl->symbol);
+ if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
+ auto* kind = var->Declaration()->is_const ? "let" : "var";
+ AddError(
+ "'" + name + "' is a builtin and cannot be redeclared as a module-scope " + kind,
+ decl->source);
+ return false;
+ }
+ }
+
+ if (!decl->is_const && !IsStorable(storage_ty)) {
+ AddError(sem_.TypeNameOf(storage_ty) + " cannot be used as the type of a var",
+ decl->source);
+ return false;
+ }
+
+ if (decl->is_const && !var->Is<sem::Parameter>() &&
+ !(storage_ty->IsConstructible() || storage_ty->Is<sem::Pointer>())) {
+ AddError(sem_.TypeNameOf(storage_ty) + " cannot be used as the type of a let",
+ decl->source);
+ return false;
+ }
+
+ if (auto* r = storage_ty->As<sem::SampledTexture>()) {
+ if (!r->type()->UnwrapRef()->is_numeric_scalar()) {
+ AddError("texture_2d<type>: type must be f32, i32 or u32", decl->source);
+ return false;
+ }
+ }
+
+ if (auto* r = storage_ty->As<sem::MultisampledTexture>()) {
+ if (r->dim() != ast::TextureDimension::k2d) {
+ AddError("only 2d multisampled textures are supported", decl->source);
+ return false;
+ }
+
+ if (!r->type()->UnwrapRef()->is_numeric_scalar()) {
+ AddError("texture_multisampled_2d<type>: type must be f32, i32 or u32", decl->source);
+ return false;
+ }
+ }
+
+ if (var->Is<sem::LocalVariable>() && !decl->is_const &&
+ IsValidationEnabled(decl->attributes, ast::DisabledValidation::kIgnoreStorageClass)) {
+ if (!var->Type()->UnwrapRef()->IsConstructible()) {
+ AddError("function variable must have a constructible type",
+ decl->type ? decl->type->source : decl->source);
+ return false;
+ }
+ }
+
+ if (storage_ty->is_handle() && decl->declared_storage_class != ast::StorageClass::kNone) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#module-scope-variables
+ // If the store type is a texture type or a sampler type, then the
+ // variable declaration must not have a storage class attribute. The
+ // storage class will always be handle.
+ AddError(
+ "variables of type '" + sem_.TypeNameOf(storage_ty) + "' must not have a storage class",
+ decl->source);
+ return false;
+ }
+
+ if (IsValidationEnabled(decl->attributes, ast::DisabledValidation::kIgnoreStorageClass) &&
+ (decl->declared_storage_class == ast::StorageClass::kInput ||
+ decl->declared_storage_class == ast::StorageClass::kOutput)) {
+ AddError("invalid use of input/output storage class", decl->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::FunctionParameter(const ast::Function* func, const sem::Variable* var) const {
+ if (!Variable(var)) {
+ return false;
+ }
+
+ auto* decl = var->Declaration();
+
+ for (auto* attr : decl->attributes) {
+ if (!func->IsEntryPoint() && !attr->Is<ast::InternalAttribute>()) {
+ AddError("attribute is not valid for non-entry point function parameters",
+ attr->source);
+ return false;
+ } else if (!attr->IsAnyOf<ast::BuiltinAttribute, ast::InvariantAttribute,
+ ast::LocationAttribute, ast::InterpolateAttribute,
+ ast::InternalAttribute>() &&
+ (IsValidationEnabled(decl->attributes,
+ ast::DisabledValidation::kEntryPointParameter) &&
+ IsValidationEnabled(
+ decl->attributes,
+ ast::DisabledValidation::kIgnoreConstructibleFunctionParameter))) {
+ AddError("attribute is not valid for function parameters", attr->source);
+ return false;
+ }
+ }
+
+ if (auto* ref = var->Type()->As<sem::Pointer>()) {
+ auto sc = ref->StorageClass();
+ if (!(sc == ast::StorageClass::kFunction || sc == ast::StorageClass::kPrivate ||
+ sc == ast::StorageClass::kWorkgroup) &&
+ IsValidationEnabled(decl->attributes, ast::DisabledValidation::kIgnoreStorageClass)) {
+ std::stringstream ss;
+ ss << "function parameter of pointer type cannot be in '" << sc << "' storage class";
+ AddError(ss.str(), decl->source);
+ return false;
+ }
+ }
+
+ if (IsPlain(var->Type())) {
+ if (!var->Type()->IsConstructible() &&
+ IsValidationEnabled(decl->attributes,
+ ast::DisabledValidation::kIgnoreConstructibleFunctionParameter)) {
+ AddError("store type of function parameter must be a constructible type", decl->source);
+ return false;
+ }
+ } else if (!var->Type()->IsAnyOf<sem::Texture, sem::Sampler, sem::Pointer>()) {
+ AddError("store type of function parameter cannot be " + sem_.TypeNameOf(var->Type()),
+ decl->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::BuiltinAttribute(const ast::BuiltinAttribute* attr,
+ const sem::Type* storage_ty,
+ ast::PipelineStage stage,
+ const bool is_input) const {
+ auto* type = storage_ty->UnwrapRef();
+ std::stringstream stage_name;
+ stage_name << stage;
+ bool is_stage_mismatch = false;
+ bool is_output = !is_input;
+ switch (attr->builtin) {
+ case ast::Builtin::kPosition:
+ if (stage != ast::PipelineStage::kNone &&
+ !((is_input && stage == ast::PipelineStage::kFragment) ||
+ (is_output && stage == ast::PipelineStage::kVertex))) {
+ is_stage_mismatch = true;
+ }
+ if (!(type->is_float_vector() && type->As<sem::Vector>()->Width() == 4)) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'vec4<f32>'",
+ attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kGlobalInvocationId:
+ case ast::Builtin::kLocalInvocationId:
+ case ast::Builtin::kNumWorkgroups:
+ case ast::Builtin::kWorkgroupId:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kCompute && is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!(type->is_unsigned_integer_vector() && type->As<sem::Vector>()->Width() == 3)) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'vec3<u32>'",
+ attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kFragDepth:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kFragment && !is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::F32>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'f32'", attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kFrontFacing:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kFragment && is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::Bool>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'bool'", attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kLocalInvocationIndex:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kCompute && is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::U32>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'u32'", attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kVertexIndex:
+ case ast::Builtin::kInstanceIndex:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kVertex && is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::U32>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'u32'", attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kSampleMask:
+ if (stage != ast::PipelineStage::kNone && !(stage == ast::PipelineStage::kFragment)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::U32>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'u32'", attr->source);
+ return false;
+ }
+ break;
+ case ast::Builtin::kSampleIndex:
+ if (stage != ast::PipelineStage::kNone &&
+ !(stage == ast::PipelineStage::kFragment && is_input)) {
+ is_stage_mismatch = true;
+ }
+ if (!type->Is<sem::U32>()) {
+ AddError("store type of " + attr_to_str(attr) + " must be 'u32'", attr->source);
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (is_stage_mismatch) {
+ AddError(attr_to_str(attr) + " cannot be used in " +
+ (is_input ? "input of " : "output of ") + stage_name.str() + " pipeline stage",
+ attr->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::InterpolateAttribute(const ast::InterpolateAttribute* attr,
+ const sem::Type* storage_ty) const {
+ auto* type = storage_ty->UnwrapRef();
+
+ if (type->is_integer_scalar_or_vector() && attr->type != ast::InterpolationType::kFlat) {
+ AddError("interpolation type must be 'flat' for integral user-defined IO types",
+ attr->source);
+ return false;
+ }
+
+ if (attr->type == ast::InterpolationType::kFlat &&
+ attr->sampling != ast::InterpolationSampling::kNone) {
+ AddError("flat interpolation attribute must not have a sampling parameter", attr->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::Function(const sem::Function* func, ast::PipelineStage stage) const {
+ auto* decl = func->Declaration();
+
+ auto name = symbols_.NameFor(decl->symbol);
+ if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
+ AddError("'" + name + "' is a builtin and cannot be redeclared as a function",
+ decl->source);
+ return false;
+ }
+
+ for (auto* attr : decl->attributes) {
+ if (attr->Is<ast::WorkgroupAttribute>()) {
+ if (decl->PipelineStage() != ast::PipelineStage::kCompute) {
+ AddError("the workgroup_size attribute is only valid for compute stages",
+ attr->source);
+ return false;
+ }
+ } else if (!attr->IsAnyOf<ast::StageAttribute, ast::InternalAttribute>()) {
+ AddError("attribute is not valid for functions", attr->source);
+ return false;
+ }
+ }
+
+ if (decl->params.size() > 255) {
+ AddError("functions may declare at most 255 parameters", decl->source);
+ return false;
+ }
+
+ for (size_t i = 0; i < decl->params.size(); i++) {
+ if (!FunctionParameter(decl, func->Parameters()[i])) {
+ return false;
+ }
+ }
+
+ if (!func->ReturnType()->Is<sem::Void>()) {
+ if (!func->ReturnType()->IsConstructible()) {
+ AddError("function return type must be a constructible type",
+ decl->return_type->source);
+ return false;
+ }
+
+ if (decl->body) {
+ sem::Behaviors behaviors{sem::Behavior::kNext};
+ if (auto* last = decl->body->Last()) {
+ behaviors = sem_.Get(last)->Behaviors();
+ }
+ if (behaviors.Contains(sem::Behavior::kNext)) {
+ AddError("missing return at end of function", decl->source);
+ return false;
+ }
+ } else if (IsValidationEnabled(decl->attributes,
+ ast::DisabledValidation::kFunctionHasNoBody)) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "Function " << symbols_.NameFor(decl->symbol) << " has no body";
+ }
+
+ for (auto* attr : decl->return_type_attributes) {
+ if (!decl->IsEntryPoint()) {
+ AddError("attribute is not valid for non-entry point function return types",
+ attr->source);
+ return false;
+ }
+ if (!attr->IsAnyOf<ast::BuiltinAttribute, ast::InternalAttribute,
+ ast::LocationAttribute, ast::InterpolateAttribute,
+ ast::InvariantAttribute>() &&
+ (IsValidationEnabled(decl->attributes,
+ ast::DisabledValidation::kEntryPointParameter) &&
+ IsValidationEnabled(
+ decl->attributes,
+ ast::DisabledValidation::kIgnoreConstructibleFunctionParameter))) {
+ AddError("attribute is not valid for entry point return types", attr->source);
+ return false;
+ }
+ }
+ }
+
+ if (decl->IsEntryPoint()) {
+ if (!EntryPoint(func, stage)) {
+ return false;
+ }
+ }
+
+ // https://www.w3.org/TR/WGSL/#behaviors-rules
+ // a function behavior is always one of {}, {Next}, {Discard}, or
+ // {Next, Discard}.
+ if (func->Behaviors() != sem::Behaviors{} && // NOLINT: bad warning
+ func->Behaviors() != sem::Behavior::kNext && func->Behaviors() != sem::Behavior::kDiscard &&
+ func->Behaviors() != sem::Behaviors{sem::Behavior::kNext, //
+ sem::Behavior::kDiscard}) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "function '" << name << "' behaviors are: " << func->Behaviors();
+ }
+
+ return true;
+}
+
+bool Validator::EntryPoint(const sem::Function* func, ast::PipelineStage stage) const {
+ auto* decl = func->Declaration();
+
+ // Use a lambda to validate the entry point attributes for a type.
+ // Persistent state is used to track which builtins and locations have
+ // already been seen, in order to catch conflicts.
+ // TODO(jrprice): This state could be stored in sem::Function instead, and
+ // then passed to sem::Function since it would be useful there too.
+ std::unordered_set<ast::Builtin> builtins;
+ std::unordered_set<uint32_t> locations;
+ enum class ParamOrRetType {
+ kParameter,
+ kReturnType,
+ };
+
+ // Inner lambda that is applied to a type and all of its members.
+ auto validate_entry_point_attributes_inner = [&](const ast::AttributeList& attrs,
+ const sem::Type* ty, Source source,
+ ParamOrRetType param_or_ret,
+ bool is_struct_member) {
+ // Scan attributes for pipeline IO attributes.
+ // Check for overlap with attributes that have been seen previously.
+ const ast::Attribute* pipeline_io_attribute = nullptr;
+ const ast::InterpolateAttribute* interpolate_attribute = nullptr;
+ const ast::InvariantAttribute* invariant_attribute = nullptr;
+ for (auto* attr : attrs) {
+ auto is_invalid_compute_shader_attribute = false;
+
+ if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
+ if (pipeline_io_attribute) {
+ AddError("multiple entry point IO attributes", attr->source);
+ AddNote("previously consumed " + attr_to_str(pipeline_io_attribute),
+ pipeline_io_attribute->source);
+ return false;
+ }
+ pipeline_io_attribute = attr;
+
+ if (builtins.count(builtin->builtin)) {
+ AddError(attr_to_str(builtin) +
+ " attribute appears multiple times as pipeline " +
+ (param_or_ret == ParamOrRetType::kParameter ? "input" : "output"),
+ decl->source);
+ return false;
+ }
+
+ if (!BuiltinAttribute(builtin, ty, stage,
+ /* is_input */ param_or_ret == ParamOrRetType::kParameter)) {
+ return false;
+ }
+ builtins.emplace(builtin->builtin);
+ } else if (auto* location = attr->As<ast::LocationAttribute>()) {
+ if (pipeline_io_attribute) {
+ AddError("multiple entry point IO attributes", attr->source);
+ AddNote("previously consumed " + attr_to_str(pipeline_io_attribute),
+ pipeline_io_attribute->source);
+ return false;
+ }
+ pipeline_io_attribute = attr;
+
+ bool is_input = param_or_ret == ParamOrRetType::kParameter;
+
+ if (!LocationAttribute(location, ty, locations, stage, source, is_input)) {
+ return false;
+ }
+ } else if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
+ if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
+ is_invalid_compute_shader_attribute = true;
+ } else if (!InterpolateAttribute(interpolate, ty)) {
+ return false;
+ }
+ interpolate_attribute = interpolate;
+ } else if (auto* invariant = attr->As<ast::InvariantAttribute>()) {
+ if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
+ is_invalid_compute_shader_attribute = true;
+ }
+ invariant_attribute = invariant;
+ }
+ if (is_invalid_compute_shader_attribute) {
+ std::string input_or_output =
+ param_or_ret == ParamOrRetType::kParameter ? "inputs" : "output";
+ AddError("attribute is not valid for compute shader " + input_or_output,
+ attr->source);
+ return false;
+ }
+ }
+
+ if (IsValidationEnabled(attrs, ast::DisabledValidation::kEntryPointParameter)) {
+ if (is_struct_member && ty->Is<sem::Struct>()) {
+ AddError("nested structures cannot be used for entry point IO", source);
+ return false;
+ }
+
+ if (!ty->Is<sem::Struct>() && !pipeline_io_attribute) {
+ std::string err = "missing entry point IO attribute";
+ if (!is_struct_member) {
+ err += (param_or_ret == ParamOrRetType::kParameter ? " on parameter"
+ : " on return type");
+ }
+ AddError(err, source);
+ return false;
+ }
+
+ if (pipeline_io_attribute && pipeline_io_attribute->Is<ast::LocationAttribute>()) {
+ if (ty->is_integer_scalar_or_vector() && !interpolate_attribute) {
+ if (decl->PipelineStage() == ast::PipelineStage::kVertex &&
+ param_or_ret == ParamOrRetType::kReturnType) {
+ AddError(
+ "integral user-defined vertex outputs must have a flat "
+ "interpolation attribute",
+ source);
+ return false;
+ }
+ if (decl->PipelineStage() == ast::PipelineStage::kFragment &&
+ param_or_ret == ParamOrRetType::kParameter) {
+ AddError(
+ "integral user-defined fragment inputs must have a flat "
+ "interpolation attribute",
+ source);
+ return false;
+ }
+ }
+ }
+
+ if (interpolate_attribute) {
+ if (!pipeline_io_attribute ||
+ !pipeline_io_attribute->Is<ast::LocationAttribute>()) {
+ AddError("interpolate attribute must only be used with @location",
+ interpolate_attribute->source);
+ return false;
+ }
+ }
+
+ if (invariant_attribute) {
+ bool has_position = false;
+ if (pipeline_io_attribute) {
+ if (auto* builtin = pipeline_io_attribute->As<ast::BuiltinAttribute>()) {
+ has_position = (builtin->builtin == ast::Builtin::kPosition);
+ }
+ }
+ if (!has_position) {
+ AddError(
+ "invariant attribute must only be applied to a position "
+ "builtin",
+ invariant_attribute->source);
+ return false;
+ }
+ }
+ }
+ return true;
+ };
+
+ // Outer lambda for validating the entry point attributes for a type.
+ auto validate_entry_point_attributes = [&](const ast::AttributeList& attrs, const sem::Type* ty,
+ Source source, ParamOrRetType param_or_ret) {
+ if (!validate_entry_point_attributes_inner(attrs, ty, source, param_or_ret,
+ /*is_struct_member*/ false)) {
+ return false;
+ }
+
+ if (auto* str = ty->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ if (!validate_entry_point_attributes_inner(
+ member->Declaration()->attributes, member->Type(),
+ member->Declaration()->source, param_or_ret,
+ /*is_struct_member*/ true)) {
+ AddNote("while analysing entry point '" + symbols_.NameFor(decl->symbol) + "'",
+ decl->source);
+ return false;
+ }
+ }
+ }
+
+ return true;
+ };
+
+ for (auto* param : func->Parameters()) {
+ auto* param_decl = param->Declaration();
+ if (!validate_entry_point_attributes(param_decl->attributes, param->Type(),
+ param_decl->source, ParamOrRetType::kParameter)) {
+ return false;
+ }
+ }
+
+ // Clear IO sets after parameter validation. Builtin and location attributes
+ // in return types should be validated independently from those used in
+ // parameters.
+ builtins.clear();
+ locations.clear();
+
+ if (!func->ReturnType()->Is<sem::Void>()) {
+ if (!validate_entry_point_attributes(decl->return_type_attributes, func->ReturnType(),
+ decl->source, ParamOrRetType::kReturnType)) {
+ return false;
+ }
+ }
+
+ if (decl->PipelineStage() == ast::PipelineStage::kVertex &&
+ builtins.count(ast::Builtin::kPosition) == 0) {
+ // Check module-scope variables, as the SPIR-V sanitizer generates these.
+ bool found = false;
+ for (auto* global : func->TransitivelyReferencedGlobals()) {
+ if (auto* builtin =
+ ast::GetAttribute<ast::BuiltinAttribute>(global->Declaration()->attributes)) {
+ if (builtin->builtin == ast::Builtin::kPosition) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ AddError(
+ "a vertex shader must include the 'position' builtin in its return "
+ "type",
+ decl->source);
+ return false;
+ }
+ }
+
+ if (decl->PipelineStage() == ast::PipelineStage::kCompute) {
+ if (!ast::HasAttribute<ast::WorkgroupAttribute>(decl->attributes)) {
+ AddError(
+ "a compute shader must include 'workgroup_size' in its "
+ "attributes",
+ decl->source);
+ return false;
+ }
+ }
+
+ // Validate there are no resource variable binding collisions
+ std::unordered_map<sem::BindingPoint, const ast::Variable*> binding_points;
+ for (auto* var : func->TransitivelyReferencedGlobals()) {
+ auto* var_decl = var->Declaration();
+ if (!var_decl->BindingPoint()) {
+ continue;
+ }
+ auto bp = var->BindingPoint();
+ auto res = binding_points.emplace(bp, var_decl);
+ if (!res.second &&
+ IsValidationEnabled(decl->attributes,
+ ast::DisabledValidation::kBindingPointCollision) &&
+ IsValidationEnabled(res.first->second->attributes,
+ ast::DisabledValidation::kBindingPointCollision)) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#resource-interface
+ // Bindings must not alias within a shader stage: two different
+ // variables in the resource interface of a given shader must not have
+ // the same group and binding values, when considered as a pair of
+ // values.
+ auto func_name = symbols_.NameFor(decl->symbol);
+ AddError("entry point '" + func_name +
+ "' references multiple variables that use the "
+ "same resource binding @group(" +
+ std::to_string(bp.group) + "), @binding(" + std::to_string(bp.binding) +
+ ")",
+ var_decl->source);
+ AddNote("first resource binding usage declared here", res.first->second->source);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::Statements(const ast::StatementList& stmts) const {
+ for (auto* stmt : stmts) {
+ if (!sem_.Get(stmt)->IsReachable()) {
+ /// TODO(https://github.com/gpuweb/gpuweb/issues/2378): This may need to
+ /// become an error.
+ AddWarning("code is unreachable", stmt->source);
+ break;
+ }
+ }
+ return true;
+}
+
+bool Validator::Bitcast(const ast::BitcastExpression* cast, const sem::Type* to) const {
+ auto* from = sem_.TypeOf(cast->expr)->UnwrapRef();
+ if (!from->is_numeric_scalar_or_vector()) {
+ AddError("'" + sem_.TypeNameOf(from) + "' cannot be bitcast", cast->expr->source);
+ return false;
+ }
+ if (!to->is_numeric_scalar_or_vector()) {
+ AddError("cannot bitcast to '" + sem_.TypeNameOf(to) + "'", cast->type->source);
+ return false;
+ }
+
+ auto width = [&](const sem::Type* ty) {
+ if (auto* vec = ty->As<sem::Vector>()) {
+ return vec->Width();
+ }
+ return 1u;
+ };
+
+ if (width(from) != width(to)) {
+ AddError(
+ "cannot bitcast from '" + sem_.TypeNameOf(from) + "' to '" + sem_.TypeNameOf(to) + "'",
+ cast->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::BreakStatement(const sem::Statement* stmt,
+ sem::Statement* current_statement) const {
+ if (!stmt->FindFirstParent<sem::LoopBlockStatement, sem::CaseStatement>()) {
+ AddError("break statement must be in a loop or switch case", stmt->Declaration()->source);
+ return false;
+ }
+ if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ true, current_statement)) {
+ auto fail = [&](const char* note_msg, const Source& note_src) {
+ constexpr const char* kErrorMsg =
+ "break statement in a continuing block must be the single statement "
+ "of an if statement's true or false block, and that if statement "
+ "must be the last statement of the continuing block";
+ AddError(kErrorMsg, stmt->Declaration()->source);
+ AddNote(note_msg, note_src);
+ return false;
+ };
+
+ if (auto* block = stmt->Parent()->As<sem::BlockStatement>()) {
+ auto* block_parent = block->Parent();
+ auto* if_stmt = block_parent->As<sem::IfStatement>();
+ if (!if_stmt) {
+ return fail("break statement is not directly in if statement block",
+ stmt->Declaration()->source);
+ }
+ if (block->Declaration()->statements.size() != 1) {
+ return fail("if statement block contains multiple statements",
+ block->Declaration()->source);
+ }
+
+ if (if_stmt->Parent()->Is<sem::IfStatement>()) {
+ return fail("else has condition", if_stmt->Declaration()->source);
+ }
+
+ bool el_contains_break = block->Declaration() == if_stmt->Declaration()->else_statement;
+ if (el_contains_break) {
+ if (auto* true_block = if_stmt->Declaration()->body; !true_block->Empty()) {
+ return fail("non-empty true block", true_block->source);
+ }
+ } else {
+ auto* else_stmt = if_stmt->Declaration()->else_statement;
+ if (else_stmt) {
+ return fail("non-empty false block", else_stmt->source);
+ }
+ }
+
+ if (if_stmt->Parent()->Declaration() != continuing) {
+ return fail(
+ "if statement containing break statement is not directly in "
+ "continuing block",
+ if_stmt->Declaration()->source);
+ }
+ if (auto* cont_block = continuing->As<ast::BlockStatement>()) {
+ if (if_stmt->Declaration() != cont_block->Last()) {
+ return fail(
+ "if statement containing break statement is not the last "
+ "statement of the continuing block",
+ if_stmt->Declaration()->source);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool Validator::ContinueStatement(const sem::Statement* stmt,
+ sem::Statement* current_statement) const {
+ if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ true, current_statement)) {
+ AddError("continuing blocks must not contain a continue statement",
+ stmt->Declaration()->source);
+ if (continuing != stmt->Declaration() && continuing != stmt->Parent()->Declaration()) {
+ AddNote("see continuing block here", continuing->source);
+ }
+ return false;
+ }
+
+ if (!stmt->FindFirstParent<sem::LoopBlockStatement>()) {
+ AddError("continue statement must be in a loop", stmt->Declaration()->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::Call(const sem::Call* call, sem::Statement* current_statement) const {
+ auto* expr = call->Declaration();
+ bool is_call_stmt =
+ current_statement && Is<ast::CallStatement>(current_statement->Declaration(),
+ [&](auto* stmt) { return stmt->expr == expr; });
+
+ return Switch(
+ call->Target(), //
+ [&](const sem::TypeConversion*) {
+ if (is_call_stmt) {
+ AddError("type conversion evaluated but not used", call->Declaration()->source);
+ return false;
+ }
+ return true;
+ },
+ [&](const sem::TypeConstructor* ctor) {
+ if (is_call_stmt) {
+ AddError("type constructor evaluated but not used", call->Declaration()->source);
+ return false;
+ }
+ return Switch(
+ ctor->ReturnType(), //
+ [&](const sem::Array* arr) { return ArrayConstructor(expr, arr); },
+ [&](const sem::Struct* str) { return StructureConstructor(expr, str); },
+ [&](Default) { return true; });
+ },
+ [&](Default) { return true; });
+}
+
+bool Validator::DiscardStatement(const sem::Statement* stmt,
+ sem::Statement* current_statement) const {
+ if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false, current_statement)) {
+ AddError("continuing blocks must not contain a discard statement",
+ stmt->Declaration()->source);
+ if (continuing != stmt->Declaration() && continuing != stmt->Parent()->Declaration()) {
+ AddNote("see continuing block here", continuing->source);
+ }
+ return false;
+ }
+ return true;
+}
+
+bool Validator::FallthroughStatement(const sem::Statement* stmt) const {
+ if (auto* block = As<sem::BlockStatement>(stmt->Parent())) {
+ if (auto* c = As<sem::CaseStatement>(block->Parent())) {
+ if (block->Declaration()->Last() == stmt->Declaration()) {
+ if (auto* s = As<sem::SwitchStatement>(c->Parent())) {
+ if (c->Declaration() != s->Declaration()->body.back()) {
+ return true;
+ }
+ AddError(
+ "a fallthrough statement must not be used in the last switch "
+ "case",
+ stmt->Declaration()->source);
+ return false;
+ }
+ }
+ }
+ }
+ AddError("fallthrough must only be used as the last statement of a case block",
+ stmt->Declaration()->source);
+ return false;
+}
+
+bool Validator::LoopStatement(const sem::LoopStatement* stmt) const {
+ if (stmt->Behaviors().Empty()) {
+ AddError("loop does not exit", stmt->Declaration()->source.Begin());
+ return false;
+ }
+ return true;
+}
+
+bool Validator::ForLoopStatement(const sem::ForLoopStatement* stmt) const {
+ if (stmt->Behaviors().Empty()) {
+ AddError("for-loop does not exit", stmt->Declaration()->source.Begin());
+ return false;
+ }
+ if (auto* cond = stmt->Condition()) {
+ auto* cond_ty = cond->Type()->UnwrapRef();
+ if (!cond_ty->Is<sem::Bool>()) {
+ AddError("for-loop condition must be bool, got " + sem_.TypeNameOf(cond_ty),
+ stmt->Condition()->Declaration()->source);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Validator::IfStatement(const sem::IfStatement* stmt) const {
+ auto* cond_ty = stmt->Condition()->Type()->UnwrapRef();
+ if (!cond_ty->Is<sem::Bool>()) {
+ AddError("if statement condition must be bool, got " + sem_.TypeNameOf(cond_ty),
+ stmt->Condition()->Declaration()->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::BuiltinCall(const sem::Call* call) const {
+ if (call->Type()->Is<sem::Void>()) {
+ bool is_call_statement = false;
+ if (auto* call_stmt = As<ast::CallStatement>(call->Stmt()->Declaration())) {
+ if (call_stmt->expr == call->Declaration()) {
+ is_call_statement = true;
+ }
+ }
+ if (!is_call_statement) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#function-call-expr
+ // If the called function does not return a value, a function call
+ // statement should be used instead.
+ auto* ident = call->Declaration()->target.name;
+ auto name = symbols_.NameFor(ident->symbol);
+ AddError("builtin '" + name + "' does not return a value", call->Declaration()->source);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::TextureBuiltinFunction(const sem::Call* call) const {
+ auto* builtin = call->Target()->As<sem::Builtin>();
+ if (!builtin) {
+ return false;
+ }
+
+ std::string func_name = builtin->str();
+ auto& signature = builtin->Signature();
+
+ auto check_arg_is_constexpr = [&](sem::ParameterUsage usage, int min, int max) {
+ auto index = signature.IndexOf(usage);
+ if (index < 0) {
+ return true;
+ }
+ std::string name = sem::str(usage);
+ auto* arg = call->Arguments()[index];
+ if (auto values = arg->ConstantValue()) {
+ // Assert that the constant values are of the expected type.
+ if (!values.Type()->IsAnyOf<sem::I32, sem::Vector>() ||
+ !values.ElementType()->Is<sem::I32>()) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "failed to resolve '" + func_name + "' " << name << " parameter type";
+ return false;
+ }
+
+ // Currently const_expr is restricted to literals and type constructors.
+ // Check that that's all we have for the parameter.
+ bool is_const_expr = true;
+ ast::TraverseExpressions(
+ arg->Declaration(), diagnostics_, [&](const ast::Expression* e) {
+ if (e->IsAnyOf<ast::LiteralExpression, ast::CallExpression>()) {
+ return ast::TraverseAction::Descend;
+ }
+ is_const_expr = false;
+ return ast::TraverseAction::Stop;
+ });
+ if (is_const_expr) {
+ auto vector = builtin->Parameters()[index]->Type()->Is<sem::Vector>();
+ for (size_t i = 0, n = values.ElementCount(); i < n; i++) {
+ auto value = values.Element<AInt>(i).value;
+ if (value < min || value > max) {
+ if (vector) {
+ AddError("each component of the " + name +
+ " argument must be at least " + std::to_string(min) +
+ " and at most " + std::to_string(max) + ". " + name +
+ " component " + std::to_string(i) + " is " +
+ std::to_string(value),
+ arg->Declaration()->source);
+ } else {
+ AddError("the " + name + " argument must be at least " +
+ std::to_string(min) + " and at most " +
+ std::to_string(max) + ". " + name + " is " +
+ std::to_string(value),
+ arg->Declaration()->source);
+ }
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+ AddError("the " + name + " argument must be a const_expression",
+ arg->Declaration()->source);
+ return false;
+ };
+
+ return check_arg_is_constexpr(sem::ParameterUsage::kOffset, -8, 7) &&
+ check_arg_is_constexpr(sem::ParameterUsage::kComponent, 0, 3);
+}
+
+bool Validator::RequiredExtensionForBuiltinFunction(
+ const sem::Call* call,
+ const ast::Extensions& enabled_extensions) const {
+ const auto* builtin = call->Target()->As<sem::Builtin>();
+ if (!builtin) {
+ return true;
+ }
+
+ const auto extension = builtin->RequiredExtension();
+ if (extension == ast::Extension::kNone) {
+ return true;
+ }
+
+ if (!enabled_extensions.contains(extension)) {
+ AddError("cannot call built-in function '" + std::string(builtin->str()) +
+ "' without extension " + ast::str(extension),
+ call->Declaration()->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::FunctionCall(const sem::Call* call, sem::Statement* current_statement) const {
+ auto* decl = call->Declaration();
+ auto* target = call->Target()->As<sem::Function>();
+ auto sym = decl->target.name->symbol;
+ auto name = symbols_.NameFor(sym);
+
+ if (target->Declaration()->IsEntryPoint()) {
+ // https://www.w3.org/TR/WGSL/#function-restriction
+ // An entry point must never be the target of a function call.
+ AddError("entry point functions cannot be the target of a function call", decl->source);
+ return false;
+ }
+
+ if (decl->args.size() != target->Parameters().size()) {
+ bool more = decl->args.size() > target->Parameters().size();
+ AddError("too " + (more ? std::string("many") : std::string("few")) +
+ " arguments in call to '" + name + "', expected " +
+ std::to_string(target->Parameters().size()) + ", got " +
+ std::to_string(call->Arguments().size()),
+ decl->source);
+ return false;
+ }
+
+ for (size_t i = 0; i < call->Arguments().size(); ++i) {
+ const sem::Variable* param = target->Parameters()[i];
+ const ast::Expression* arg_expr = decl->args[i];
+ auto* param_type = param->Type();
+ auto* arg_type = sem_.TypeOf(arg_expr)->UnwrapRef();
+
+ if (param_type != arg_type) {
+ AddError("type mismatch for argument " + std::to_string(i + 1) + " in call to '" +
+ name + "', expected '" + sem_.TypeNameOf(param_type) + "', got '" +
+ sem_.TypeNameOf(arg_type) + "'",
+ arg_expr->source);
+ return false;
+ }
+
+ if (param_type->Is<sem::Pointer>()) {
+ auto is_valid = false;
+ if (auto* ident_expr = arg_expr->As<ast::IdentifierExpression>()) {
+ auto* var = sem_.ResolvedSymbol<sem::Variable>(ident_expr);
+ if (!var) {
+ TINT_ICE(Resolver, diagnostics_) << "failed to resolve identifier";
+ return false;
+ }
+ if (var->Is<sem::Parameter>()) {
+ is_valid = true;
+ }
+ } else if (auto* unary = arg_expr->As<ast::UnaryOpExpression>()) {
+ if (unary->op == ast::UnaryOp::kAddressOf) {
+ if (auto* ident_unary = unary->expr->As<ast::IdentifierExpression>()) {
+ auto* var = sem_.ResolvedSymbol<sem::Variable>(ident_unary);
+ if (!var) {
+ TINT_ICE(Resolver, diagnostics_) << "failed to resolve identifier";
+ return false;
+ }
+ if (var->Declaration()->is_const) {
+ TINT_ICE(Resolver, diagnostics_)
+ << "Resolver::FunctionCall() encountered an address-of "
+ "expression of a constant identifier expression";
+ return false;
+ }
+ is_valid = true;
+ }
+ }
+ }
+
+ if (!is_valid &&
+ IsValidationEnabled(param->Declaration()->attributes,
+ ast::DisabledValidation::kIgnoreInvalidPointerArgument)) {
+ AddError(
+ "expected an address-of expression of a variable identifier "
+ "expression or a function parameter",
+ arg_expr->source);
+ return false;
+ }
+ }
+ }
+
+ if (call->Type()->Is<sem::Void>()) {
+ bool is_call_statement = false;
+ if (auto* call_stmt = As<ast::CallStatement>(call->Stmt()->Declaration())) {
+ if (call_stmt->expr == call->Declaration()) {
+ is_call_statement = true;
+ }
+ }
+ if (!is_call_statement) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#function-call-expr
+ // If the called function does not return a value, a function call
+ // statement should be used instead.
+ AddError("function '" + name + "' does not return a value", decl->source);
+ return false;
+ }
+ }
+
+ if (call->Behaviors().Contains(sem::Behavior::kDiscard)) {
+ if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false, current_statement)) {
+ AddError("cannot call a function that may discard inside a continuing block",
+ call->Declaration()->source);
+ if (continuing != call->Stmt()->Declaration() &&
+ continuing != call->Stmt()->Parent()->Declaration()) {
+ AddNote("see continuing block here", continuing->source);
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::StructureConstructor(const ast::CallExpression* ctor,
+ const sem::Struct* struct_type) const {
+ if (!struct_type->IsConstructible()) {
+ AddError("struct constructor has non-constructible type", ctor->source);
+ return false;
+ }
+
+ if (ctor->args.size() > 0) {
+ if (ctor->args.size() != struct_type->Members().size()) {
+ std::string fm = ctor->args.size() < struct_type->Members().size() ? "few" : "many";
+ AddError("struct constructor has too " + fm + " inputs: expected " +
+ std::to_string(struct_type->Members().size()) + ", found " +
+ std::to_string(ctor->args.size()),
+ ctor->source);
+ return false;
+ }
+ for (auto* member : struct_type->Members()) {
+ auto* value = ctor->args[member->Index()];
+ auto* value_ty = sem_.TypeOf(value);
+ if (member->Type() != value_ty->UnwrapRef()) {
+ AddError(
+ "type in struct constructor does not match struct member type: "
+ "expected '" +
+ sem_.TypeNameOf(member->Type()) + "', found '" + sem_.TypeNameOf(value_ty) +
+ "'",
+ value->source);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool Validator::ArrayConstructor(const ast::CallExpression* ctor,
+ const sem::Array* array_type) const {
+ auto& values = ctor->args;
+ auto* elem_ty = array_type->ElemType();
+ for (auto* value : values) {
+ auto* value_ty = sem_.TypeOf(value)->UnwrapRef();
+ if (value_ty != elem_ty) {
+ AddError(
+ "type in array constructor does not match array type: "
+ "expected '" +
+ sem_.TypeNameOf(elem_ty) + "', found '" + sem_.TypeNameOf(value_ty) + "'",
+ value->source);
+ return false;
+ }
+ }
+
+ if (array_type->IsRuntimeSized()) {
+ AddError("cannot init a runtime-sized array", ctor->source);
+ return false;
+ } else if (!elem_ty->IsConstructible()) {
+ AddError("array constructor has non-constructible element type", ctor->source);
+ return false;
+ } else if (!values.empty() && (values.size() != array_type->Count())) {
+ std::string fm = values.size() < array_type->Count() ? "few" : "many";
+ AddError("array constructor has too " + fm + " elements: expected " +
+ std::to_string(array_type->Count()) + ", found " +
+ std::to_string(values.size()),
+ ctor->source);
+ return false;
+ } else if (values.size() > array_type->Count()) {
+ AddError("array constructor has too many elements: expected " +
+ std::to_string(array_type->Count()) + ", found " +
+ std::to_string(values.size()),
+ ctor->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::Vector(const sem::Vector* ty, const Source& source) const {
+ if (!ty->type()->is_scalar()) {
+ AddError("vector element type must be 'bool', 'f32', 'i32' or 'u32'", source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::Matrix(const sem::Matrix* ty, const Source& source) const {
+ if (!ty->is_float_matrix()) {
+ AddError("matrix element type must be 'f32'", source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::PipelineStages(const std::vector<sem::Function*>& entry_points) const {
+ auto check_workgroup_storage = [&](const sem::Function* func,
+ const sem::Function* entry_point) {
+ auto stage = entry_point->Declaration()->PipelineStage();
+ if (stage != ast::PipelineStage::kCompute) {
+ for (auto* var : func->DirectlyReferencedGlobals()) {
+ if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
+ std::stringstream stage_name;
+ stage_name << stage;
+ for (auto* user : var->Users()) {
+ if (func == user->Stmt()->Function()) {
+ AddError("workgroup memory cannot be used by " + stage_name.str() +
+ " pipeline stage",
+ user->Declaration()->source);
+ break;
+ }
+ }
+ AddNote("variable is declared here", var->Declaration()->source);
+ if (func != entry_point) {
+ TraverseCallChain(
+ diagnostics_, entry_point, func, [&](const sem::Function* f) {
+ AddNote("called by function '" +
+ symbols_.NameFor(f->Declaration()->symbol) + "'",
+ f->Declaration()->source);
+ });
+ AddNote("called by entry point '" +
+ symbols_.NameFor(entry_point->Declaration()->symbol) + "'",
+ entry_point->Declaration()->source);
+ }
+ return false;
+ }
+ }
+ }
+ return true;
+ };
+
+ for (auto* entry_point : entry_points) {
+ if (!check_workgroup_storage(entry_point, entry_point)) {
+ return false;
+ }
+ for (auto* func : entry_point->TransitivelyCalledFunctions()) {
+ if (!check_workgroup_storage(func, entry_point)) {
+ return false;
+ }
+ }
+ }
+
+ auto check_builtin_calls = [&](const sem::Function* func, const sem::Function* entry_point) {
+ auto stage = entry_point->Declaration()->PipelineStage();
+ for (auto* builtin : func->DirectlyCalledBuiltins()) {
+ if (!builtin->SupportedStages().Contains(stage)) {
+ auto* call = func->FindDirectCallTo(builtin);
+ std::stringstream err;
+ err << "built-in cannot be used by " << stage << " pipeline stage";
+ AddError(err.str(),
+ call ? call->Declaration()->source : func->Declaration()->source);
+ if (func != entry_point) {
+ TraverseCallChain(diagnostics_, entry_point, func, [&](const sem::Function* f) {
+ AddNote("called by function '" +
+ symbols_.NameFor(f->Declaration()->symbol) + "'",
+ f->Declaration()->source);
+ });
+ AddNote("called by entry point '" +
+ symbols_.NameFor(entry_point->Declaration()->symbol) + "'",
+ entry_point->Declaration()->source);
+ }
+ return false;
+ }
+ }
+ return true;
+ };
+
+ for (auto* entry_point : entry_points) {
+ if (!check_builtin_calls(entry_point, entry_point)) {
+ return false;
+ }
+ for (auto* func : entry_point->TransitivelyCalledFunctions()) {
+ if (!check_builtin_calls(func, entry_point)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool Validator::Array(const sem::Array* arr, const Source& source) const {
+ auto* el_ty = arr->ElemType();
+
+ if (!IsFixedFootprint(el_ty)) {
+ AddError("an array element type cannot contain a runtime-sized array", source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::ArrayStrideAttribute(const ast::StrideAttribute* attr,
+ uint32_t el_size,
+ uint32_t el_align,
+ const Source& source) const {
+ auto stride = attr->stride;
+ bool is_valid_stride = (stride >= el_size) && (stride >= el_align) && (stride % el_align == 0);
+ if (!is_valid_stride) {
+ // https://gpuweb.github.io/gpuweb/wgsl/#array-layout-rules
+ // Arrays decorated with the stride attribute must have a stride that is
+ // at least the size of the element type, and be a multiple of the
+ // element type's alignment value.
+ AddError(
+ "arrays decorated with the stride attribute must have a stride "
+ "that is at least the size of the element type, and be a multiple "
+ "of the element type's alignment value.",
+ source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::Alias(const ast::Alias* alias) const {
+ auto name = symbols_.NameFor(alias->name);
+ if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
+ AddError("'" + name + "' is a builtin and cannot be redeclared as an alias", alias->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::Structure(const sem::Struct* str, ast::PipelineStage stage) const {
+ auto name = symbols_.NameFor(str->Declaration()->name);
+ if (sem::ParseBuiltinType(name) != sem::BuiltinType::kNone) {
+ AddError("'" + name + "' is a builtin and cannot be redeclared as a struct",
+ str->Declaration()->source);
+ return false;
+ }
+
+ if (str->Members().empty()) {
+ AddError("structures must have at least one member", str->Declaration()->source);
+ return false;
+ }
+
+ std::unordered_set<uint32_t> locations;
+ for (auto* member : str->Members()) {
+ if (auto* r = member->Type()->As<sem::Array>()) {
+ if (r->IsRuntimeSized()) {
+ if (member != str->Members().back()) {
+ AddError("runtime arrays may only appear as the last member of a struct",
+ member->Declaration()->source);
+ return false;
+ }
+ }
+ } else if (!IsFixedFootprint(member->Type())) {
+ AddError(
+ "a struct that contains a runtime array cannot be nested inside "
+ "another struct",
+ member->Declaration()->source);
+ return false;
+ }
+
+ auto has_location = false;
+ auto has_position = false;
+ const ast::InvariantAttribute* invariant_attribute = nullptr;
+ const ast::InterpolateAttribute* interpolate_attribute = nullptr;
+ for (auto* attr : member->Declaration()->attributes) {
+ if (!attr->IsAnyOf<ast::BuiltinAttribute, //
+ ast::InternalAttribute, //
+ ast::InterpolateAttribute, //
+ ast::InvariantAttribute, //
+ ast::LocationAttribute, //
+ ast::StructMemberOffsetAttribute, //
+ ast::StructMemberSizeAttribute, //
+ ast::StructMemberAlignAttribute>()) {
+ if (attr->Is<ast::StrideAttribute>() &&
+ IsValidationDisabled(member->Declaration()->attributes,
+ ast::DisabledValidation::kIgnoreStrideAttribute)) {
+ continue;
+ }
+ AddError("attribute is not valid for structure members", attr->source);
+ return false;
+ }
+
+ if (auto* invariant = attr->As<ast::InvariantAttribute>()) {
+ invariant_attribute = invariant;
+ } else if (auto* location = attr->As<ast::LocationAttribute>()) {
+ has_location = true;
+ if (!LocationAttribute(location, member->Type(), locations, stage,
+ member->Declaration()->source)) {
+ return false;
+ }
+ } else if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
+ if (!BuiltinAttribute(builtin, member->Type(), stage,
+ /* is_input */ false)) {
+ return false;
+ }
+ if (builtin->builtin == ast::Builtin::kPosition) {
+ has_position = true;
+ }
+ } else if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
+ interpolate_attribute = interpolate;
+ if (!InterpolateAttribute(interpolate, member->Type())) {
+ return false;
+ }
+ }
+ }
+
+ if (invariant_attribute && !has_position) {
+ AddError("invariant attribute must only be applied to a position builtin",
+ invariant_attribute->source);
+ return false;
+ }
+
+ if (interpolate_attribute && !has_location) {
+ AddError("interpolate attribute must only be used with @location",
+ interpolate_attribute->source);
+ return false;
+ }
+ }
+
+ for (auto* attr : str->Declaration()->attributes) {
+ if (!(attr->IsAnyOf<ast::InternalAttribute>())) {
+ AddError("attribute is not valid for struct declarations", attr->source);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Validator::LocationAttribute(const ast::LocationAttribute* location,
+ const sem::Type* type,
+ std::unordered_set<uint32_t>& locations,
+ ast::PipelineStage stage,
+ const Source& source,
+ const bool is_input) const {
+ std::string inputs_or_output = is_input ? "inputs" : "output";
+ if (stage == ast::PipelineStage::kCompute) {
+ AddError("attribute is not valid for compute shader " + inputs_or_output, location->source);
+ return false;
+ }
+
+ if (!type->is_numeric_scalar_or_vector()) {
+ std::string invalid_type = sem_.TypeNameOf(type);
+ AddError("cannot apply 'location' attribute to declaration of type '" + invalid_type + "'",
+ source);
+ AddNote(
+ "'location' attribute must only be applied to declarations of "
+ "numeric scalar or numeric vector type",
+ location->source);
+ return false;
+ }
+
+ if (locations.count(location->value)) {
+ AddError(attr_to_str(location) + " attribute appears multiple times", location->source);
+ return false;
+ }
+ locations.emplace(location->value);
+
+ return true;
+}
+
+bool Validator::Return(const ast::ReturnStatement* ret,
+ const sem::Type* func_type,
+ const sem::Type* ret_type,
+ sem::Statement* current_statement) const {
+ if (func_type->UnwrapRef() != ret_type) {
+ AddError(
+ "return statement type must match its function "
+ "return type, returned '" +
+ sem_.TypeNameOf(ret_type) + "', expected '" + sem_.TypeNameOf(func_type) + "'",
+ ret->source);
+ return false;
+ }
+
+ auto* sem = sem_.Get(ret);
+ if (auto* continuing = ClosestContinuing(/*stop_at_loop*/ false, current_statement)) {
+ AddError("continuing blocks must not contain a return statement", ret->source);
+ if (continuing != sem->Declaration() && continuing != sem->Parent()->Declaration()) {
+ AddNote("see continuing block here", continuing->source);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::SwitchStatement(const ast::SwitchStatement* s) {
+ auto* cond_ty = sem_.TypeOf(s->condition)->UnwrapRef();
+ if (!cond_ty->is_integer_scalar()) {
+ AddError(
+ "switch statement selector expression must be of a "
+ "scalar integer type",
+ s->condition->source);
+ return false;
+ }
+
+ bool has_default = false;
+ std::unordered_map<int64_t, Source> selectors;
+
+ for (auto* case_stmt : s->body) {
+ if (case_stmt->IsDefault()) {
+ if (has_default) {
+ // More than one default clause
+ AddError("switch statement must have exactly one default clause",
+ case_stmt->source);
+ return false;
+ }
+ has_default = true;
+ }
+
+ for (auto* selector : case_stmt->selectors) {
+ if (cond_ty != sem_.TypeOf(selector)) {
+ AddError(
+ "the case selector values must have the same "
+ "type as the selector expression.",
+ case_stmt->source);
+ return false;
+ }
+
+ auto it = selectors.find(selector->value);
+ if (it != selectors.end()) {
+ auto val = std::to_string(selector->value);
+ AddError("duplicate switch case '" + val + "'", selector->source);
+ AddNote("previous case declared here", it->second);
+ return false;
+ }
+ selectors.emplace(selector->value, selector->source);
+ }
+ }
+
+ if (!has_default) {
+ // No default clause
+ AddError("switch statement must have a default clause", s->source);
+ return false;
+ }
+
+ return true;
+}
+
+bool Validator::Assignment(const ast::Statement* a, const sem::Type* rhs_ty) const {
+ const ast::Expression* lhs;
+ const ast::Expression* rhs;
+ if (auto* assign = a->As<ast::AssignmentStatement>()) {
+ lhs = assign->lhs;
+ rhs = assign->rhs;
+ } else if (auto* compound = a->As<ast::CompoundAssignmentStatement>()) {
+ lhs = compound->lhs;
+ rhs = compound->rhs;
+ } else {
+ TINT_ICE(Resolver, diagnostics_) << "invalid assignment statement";
+ return false;
+ }
+
+ if (lhs->Is<ast::PhonyExpression>()) {
+ // https://www.w3.org/TR/WGSL/#phony-assignment-section
+ auto* ty = rhs_ty->UnwrapRef();
+ if (!ty->IsConstructible() &&
+ !ty->IsAnyOf<sem::Pointer, sem::Texture, sem::Sampler, sem::AbstractNumeric>()) {
+ AddError("cannot assign '" + sem_.TypeNameOf(rhs_ty) +
+ "' to '_'. '_' can only be assigned a constructible, pointer, "
+ "texture or sampler type",
+ rhs->source);
+ return false;
+ }
+ return true; // RHS can be anything.
+ }
+
+ // https://gpuweb.github.io/gpuweb/wgsl/#assignment-statement
+ auto const* lhs_ty = sem_.TypeOf(lhs);
+
+ if (auto* var = sem_.ResolvedSymbol<sem::Variable>(lhs)) {
+ auto* decl = var->Declaration();
+ if (var->Is<sem::Parameter>()) {
+ AddError("cannot assign to function parameter", lhs->source);
+ AddNote("'" + symbols_.NameFor(decl->symbol) + "' is declared here:", decl->source);
+ return false;
+ }
+ if (decl->is_const) {
+ AddError("cannot assign to const", lhs->source);
+ AddNote("'" + symbols_.NameFor(decl->symbol) + "' is declared here:", decl->source);
+ return false;
+ }
+ }
+
+ auto* lhs_ref = lhs_ty->As<sem::Reference>();
+ if (!lhs_ref) {
+ // LHS is not a reference, so it has no storage.
+ AddError("cannot assign to value of type '" + sem_.TypeNameOf(lhs_ty) + "'", lhs->source);
+ return false;
+ }
+
+ auto* storage_ty = lhs_ref->StoreType();
+ auto* value_type = rhs_ty->UnwrapRef(); // Implicit load of RHS
+
+ // Value type has to match storage type
+ if (storage_ty != value_type) {
+ AddError(
+ "cannot assign '" + sem_.TypeNameOf(rhs_ty) + "' to '" + sem_.TypeNameOf(lhs_ty) + "'",
+ a->source);
+ return false;
+ }
+ if (!storage_ty->IsConstructible()) {
+ AddError("storage type of assignment must be constructible", a->source);
+ return false;
+ }
+ if (lhs_ref->Access() == ast::Access::kRead) {
+ AddError("cannot store into a read-only type '" + sem_.RawTypeNameOf(lhs_ty) + "'",
+ a->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::IncrementDecrementStatement(const ast::IncrementDecrementStatement* inc) const {
+ const ast::Expression* lhs = inc->lhs;
+
+ // https://gpuweb.github.io/gpuweb/wgsl/#increment-decrement
+
+ if (auto* var = sem_.ResolvedSymbol<sem::Variable>(lhs)) {
+ auto* decl = var->Declaration();
+ if (var->Is<sem::Parameter>()) {
+ AddError("cannot modify function parameter", lhs->source);
+ AddNote("'" + symbols_.NameFor(decl->symbol) + "' is declared here:", decl->source);
+ return false;
+ }
+ if (decl->is_const) {
+ AddError("cannot modify constant value", lhs->source);
+ AddNote("'" + symbols_.NameFor(decl->symbol) + "' is declared here:", decl->source);
+ return false;
+ }
+ }
+
+ auto const* lhs_ty = sem_.TypeOf(lhs);
+ auto* lhs_ref = lhs_ty->As<sem::Reference>();
+ if (!lhs_ref) {
+ // LHS is not a reference, so it has no storage.
+ AddError("cannot modify value of type '" + sem_.TypeNameOf(lhs_ty) + "'", lhs->source);
+ return false;
+ }
+
+ if (!lhs_ref->StoreType()->is_integer_scalar()) {
+ const std::string kind = inc->increment ? "increment" : "decrement";
+ AddError(kind + " statement can only be applied to an integer scalar", lhs->source);
+ return false;
+ }
+
+ if (lhs_ref->Access() == ast::Access::kRead) {
+ AddError("cannot modify read-only type '" + sem_.RawTypeNameOf(lhs_ty) + "'", inc->source);
+ return false;
+ }
+ return true;
+}
+
+bool Validator::NoDuplicateAttributes(const ast::AttributeList& attributes) const {
+ std::unordered_map<const TypeInfo*, Source> seen;
+ for (auto* d : attributes) {
+ auto res = seen.emplace(&d->TypeInfo(), d->source);
+ if (!res.second && !d->Is<ast::InternalAttribute>()) {
+ AddError("duplicate " + d->Name() + " attribute", d->source);
+ AddNote("first attribute declared here", res.first->second);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Validator::IsValidationDisabled(const ast::AttributeList& attributes,
+ ast::DisabledValidation validation) const {
+ for (auto* attribute : attributes) {
+ if (auto* dv = attribute->As<ast::DisableValidationAttribute>()) {
+ if (dv->validation == validation) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Validator::IsValidationEnabled(const ast::AttributeList& attributes,
+ ast::DisabledValidation validation) const {
+ return !IsValidationDisabled(attributes, validation);
+}
+
+std::string Validator::VectorPretty(uint32_t size, const sem::Type* element_type) const {
+ sem::Vector vec_type(element_type, size);
+ return vec_type.FriendlyName(symbols_);
+}
+
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/validator.h b/chromium/third_party/dawn/src/tint/resolver/validator.h
new file mode 100644
index 00000000000..b30fdc72a14
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/validator.h
@@ -0,0 +1,438 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_RESOLVER_VALIDATOR_H_
+#define SRC_TINT_RESOLVER_VALIDATOR_H_
+
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "src/tint/ast/pipeline_stage.h"
+#include "src/tint/program_builder.h"
+#include "src/tint/resolver/sem_helper.h"
+#include "src/tint/source.h"
+
+// Forward declarations
+namespace tint::ast {
+class IndexAccessorExpression;
+class BinaryExpression;
+class BitcastExpression;
+class CallExpression;
+class CallStatement;
+class CaseStatement;
+class ForLoopStatement;
+class Function;
+class IdentifierExpression;
+class LoopStatement;
+class MemberAccessorExpression;
+class ReturnStatement;
+class SwitchStatement;
+class UnaryOpExpression;
+class Variable;
+} // namespace tint::ast
+namespace tint::sem {
+class Array;
+class Atomic;
+class BlockStatement;
+class Builtin;
+class CaseStatement;
+class ForLoopStatement;
+class IfStatement;
+class LoopStatement;
+class Materialize;
+class Statement;
+class SwitchStatement;
+class TypeConstructor;
+} // namespace tint::sem
+
+namespace tint::resolver {
+
+/// Validation logic for various ast nodes. The validations in general should
+/// be shallow and depend on the resolver to call on children. The validations
+/// also assume that sem changes have already been made. The validation checks
+/// should not alter the AST or SEM trees.
+class Validator {
+ public:
+ /// The valid type storage layouts typedef
+ using ValidTypeStorageLayouts = std::set<std::pair<const sem::Type*, ast::StorageClass>>;
+
+ /// Constructor
+ /// @param builder the program builder
+ /// @param helper the SEM helper to validate with
+ Validator(ProgramBuilder* builder, SemHelper& helper);
+ ~Validator();
+
+ /// Adds the given error message to the diagnostics
+ /// @param msg the error message
+ /// @param source the error source
+ void AddError(const std::string& msg, const Source& source) const;
+
+ /// Adds the given warning message to the diagnostics
+ /// @param msg the warning message
+ /// @param source the warning source
+ void AddWarning(const std::string& msg, const Source& source) const;
+
+ /// Adds the given note message to the diagnostics
+ /// @param msg the note message
+ /// @param source the note source
+ void AddNote(const std::string& msg, const Source& source) const;
+
+ /// @param type the given type
+ /// @returns true if the given type is a plain type
+ bool IsPlain(const sem::Type* type) const;
+
+ /// @param type the given type
+ /// @returns true if the given type is a fixed-footprint type
+ bool IsFixedFootprint(const sem::Type* type) const;
+
+ /// @param type the given type
+ /// @returns true if the given type is storable
+ bool IsStorable(const sem::Type* type) const;
+
+ /// @param type the given type
+ /// @returns true if the given type is host-shareable
+ bool IsHostShareable(const sem::Type* type) const;
+
+ /// Validates pipeline stages
+ /// @param entry_points the entry points to the module
+ /// @returns true on success, false otherwise.
+ bool PipelineStages(const std::vector<sem::Function*>& entry_points) const;
+
+ /// Validates aliases
+ /// @param alias the alias to validate
+ /// @returns true on success, false otherwise.
+ bool Alias(const ast::Alias* alias) const;
+
+ /// Validates the array
+ /// @param arr the array to validate
+ /// @param source the source of the array
+ /// @returns true on success, false otherwise.
+ bool Array(const sem::Array* arr, const Source& source) const;
+
+ /// Validates an array stride attribute
+ /// @param attr the stride attribute to validate
+ /// @param el_size the element size
+ /// @param el_align the element alignment
+ /// @param source the source of the attribute
+ /// @returns true on success, false otherwise
+ bool ArrayStrideAttribute(const ast::StrideAttribute* attr,
+ uint32_t el_size,
+ uint32_t el_align,
+ const Source& source) const;
+
+ /// Validates an atomic
+ /// @param a the atomic ast node to validate
+ /// @param s the atomic sem node
+ /// @returns true on success, false otherwise.
+ bool Atomic(const ast::Atomic* a, const sem::Atomic* s) const;
+
+ /// Validates an atoic variable
+ /// @param var the variable to validate
+ /// @param atomic_composite_info store atomic information
+ /// @returns true on success, false otherwise.
+ bool AtomicVariable(
+ const sem::Variable* var,
+ std::unordered_map<const sem::Type*, const Source&> atomic_composite_info) const;
+
+ /// Validates an assignment
+ /// @param a the assignment statement
+ /// @param rhs_ty the type of the right hand side
+ /// @returns true on success, false otherwise.
+ bool Assignment(const ast::Statement* a, const sem::Type* rhs_ty) const;
+
+ /// Validates a bitcase
+ /// @param cast the bitcast expression
+ /// @param to the destination type
+ /// @returns true on success, false otherwise
+ bool Bitcast(const ast::BitcastExpression* cast, const sem::Type* to) const;
+
+ /// Validates a break statement
+ /// @param stmt the break statement to validate
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise.
+ bool BreakStatement(const sem::Statement* stmt, sem::Statement* current_statement) const;
+
+ /// Validates a builtin attribute
+ /// @param attr the attribute to validate
+ /// @param storage_type the attribute storage type
+ /// @param stage the current pipeline stage
+ /// @param is_input true if this is an input attribute
+ /// @returns true on success, false otherwise.
+ bool BuiltinAttribute(const ast::BuiltinAttribute* attr,
+ const sem::Type* storage_type,
+ ast::PipelineStage stage,
+ const bool is_input) const;
+
+ /// Validates a continue statement
+ /// @param stmt the continue statement to validate
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise
+ bool ContinueStatement(const sem::Statement* stmt, sem::Statement* current_statement) const;
+
+ /// Validates a call
+ /// @param call the call
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise
+ bool Call(const sem::Call* call, sem::Statement* current_statement) const;
+
+ /// Validates a discard statement
+ /// @param stmt the statement to validate
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise
+ bool DiscardStatement(const sem::Statement* stmt, sem::Statement* current_statement) const;
+
+ /// Validates an entry point
+ /// @param func the entry point function to validate
+ /// @param stage the pipeline stage for the entry point
+ /// @returns true on success, false otherwise
+ bool EntryPoint(const sem::Function* func, ast::PipelineStage stage) const;
+
+ /// Validates a for loop
+ /// @param stmt the for loop statement to validate
+ /// @returns true on success, false otherwise
+ bool ForLoopStatement(const sem::ForLoopStatement* stmt) const;
+
+ /// Validates a fallthrough statement
+ /// @param stmt the fallthrough to validate
+ /// @returns true on success, false otherwise
+ bool FallthroughStatement(const sem::Statement* stmt) const;
+
+ /// Validates a function
+ /// @param func the function to validate
+ /// @param stage the current pipeline stage
+ /// @returns true on success, false otherwise.
+ bool Function(const sem::Function* func, ast::PipelineStage stage) const;
+
+ /// Validates a function call
+ /// @param call the function call to validate
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise
+ bool FunctionCall(const sem::Call* call, sem::Statement* current_statement) const;
+
+ /// Validates a global variable
+ /// @param var the global variable to validate
+ /// @param constant_ids the set of constant ids in the module
+ /// @param atomic_composite_info atomic composite info in the module
+ /// @returns true on success, false otherwise
+ bool GlobalVariable(
+ const sem::Variable* var,
+ std::unordered_map<uint32_t, const sem::Variable*> constant_ids,
+ std::unordered_map<const sem::Type*, const Source&> atomic_composite_info) const;
+
+ /// Validates an if statement
+ /// @param stmt the statement to validate
+ /// @returns true on success, false otherwise
+ bool IfStatement(const sem::IfStatement* stmt) const;
+
+ /// Validates an increment or decrement statement
+ /// @param stmt the statement to validate
+ /// @returns true on success, false otherwise
+ bool IncrementDecrementStatement(const ast::IncrementDecrementStatement* stmt) const;
+
+ /// Validates an interpolate attribute
+ /// @param attr the interpolation attribute to validate
+ /// @param storage_type the storage type of the attached variable
+ /// @returns true on succes, false otherwise
+ bool InterpolateAttribute(const ast::InterpolateAttribute* attr,
+ const sem::Type* storage_type) const;
+
+ /// Validates a builtin call
+ /// @param call the builtin call to validate
+ /// @returns true on success, false otherwise.
+ bool BuiltinCall(const sem::Call* call) const;
+
+ /// Validates a location attribute
+ /// @param location the location attribute to validate
+ /// @param type the variable type
+ /// @param locations the set of locations in the module
+ /// @param stage the current pipeline stage
+ /// @param source the source of the attribute
+ /// @param is_input true if this is an input variable
+ /// @returns true on success, false otherwise.
+ bool LocationAttribute(const ast::LocationAttribute* location,
+ const sem::Type* type,
+ std::unordered_set<uint32_t>& locations,
+ ast::PipelineStage stage,
+ const Source& source,
+ const bool is_input = false) const;
+
+ /// Validates a loop statement
+ /// @param stmt the loop statement
+ /// @returns true on success, false otherwise.
+ bool LoopStatement(const sem::LoopStatement* stmt) const;
+
+ /// Validates a materialize of an abstract numeric value
+ /// @param m the materialize to validate
+ /// @returns true on success, false otherwise
+ bool Materialize(const sem::Materialize* m) const;
+
+ /// Validates a matrix
+ /// @param ty the matrix to validate
+ /// @param source the source of the matrix
+ /// @returns true on success, false otherwise
+ bool Matrix(const sem::Matrix* ty, const Source& source) const;
+
+ /// Validates a function parameter
+ /// @param func the function the variable is for
+ /// @param var the variable to validate
+ /// @returns true on success, false otherwise
+ bool FunctionParameter(const ast::Function* func, const sem::Variable* var) const;
+
+ /// Validates a return
+ /// @param ret the return statement to validate
+ /// @param func_type the return type of the curreunt function
+ /// @param ret_type the return type
+ /// @param current_statement the current statement being resolved
+ /// @returns true on success, false otherwise
+ bool Return(const ast::ReturnStatement* ret,
+ const sem::Type* func_type,
+ const sem::Type* ret_type,
+ sem::Statement* current_statement) const;
+
+ /// Validates a list of statements
+ /// @param stmts the statements to validate
+ /// @returns true on success, false otherwise
+ bool Statements(const ast::StatementList& stmts) const;
+
+ /// Validates a storage texture
+ /// @param t the texture to validate
+ /// @returns true on success, false otherwise
+ bool StorageTexture(const ast::StorageTexture* t) const;
+
+ /// Validates a structure
+ /// @param str the structure to validate
+ /// @param stage the current pipeline stage
+ /// @returns true on success, false otherwise.
+ bool Structure(const sem::Struct* str, ast::PipelineStage stage) const;
+
+ /// Validates a structure constructor
+ /// @param ctor the call expression to validate
+ /// @param struct_type the type of the structure
+ /// @returns true on success, false otherwise
+ bool StructureConstructor(const ast::CallExpression* ctor,
+ const sem::Struct* struct_type) const;
+
+ /// Validates a switch statement
+ /// @param s the switch to validate
+ /// @returns true on success, false otherwise
+ bool SwitchStatement(const ast::SwitchStatement* s);
+
+ /// Validates a variable
+ /// @param var the variable to validate
+ /// @returns true on success, false otherwise.
+ bool Variable(const sem::Variable* var) const;
+
+ /// Validates a variable constructor or cast
+ /// @param var the variable to validate
+ /// @param storage_class the storage class of the variable
+ /// @param storage_type the type of the storage
+ /// @param rhs_type the right hand side of the expression
+ /// @returns true on succes, false otherwise
+ bool VariableConstructorOrCast(const ast::Variable* var,
+ ast::StorageClass storage_class,
+ const sem::Type* storage_type,
+ const sem::Type* rhs_type) const;
+
+ /// Validates a vector
+ /// @param ty the vector to validate
+ /// @param source the source of the vector
+ /// @returns true on success, false otherwise
+ bool Vector(const sem::Vector* ty, const Source& source) const;
+
+ /// Validates an array constructor
+ /// @param ctor the call expresion to validate
+ /// @param arr_type the type of the array
+ /// @returns true on success, false otherwise
+ bool ArrayConstructor(const ast::CallExpression* ctor, const sem::Array* arr_type) const;
+
+ /// Validates a texture builtin function
+ /// @param call the builtin call to validate
+ /// @returns true on success, false otherwise
+ bool TextureBuiltinFunction(const sem::Call* call) const;
+
+ /// Validates an optional builtin function and its required extension.
+ /// @param call the builtin call to validate
+ /// @param enabled_extensions all the extensions declared in current module
+ /// @returns true on success, false otherwise
+ bool RequiredExtensionForBuiltinFunction(const sem::Call* call,
+ const ast::Extensions& enabled_extensions) const;
+
+ /// Validates there are no duplicate attributes
+ /// @param attributes the list of attributes to validate
+ /// @returns true on success, false otherwise.
+ bool NoDuplicateAttributes(const ast::AttributeList& attributes) const;
+
+ /// Validates a storage class layout
+ /// @param type the type to validate
+ /// @param sc the storage class
+ /// @param source the source of the type
+ /// @param layouts previously validated storage layouts
+ /// @returns true on success, false otherwise
+ bool StorageClassLayout(const sem::Type* type,
+ ast::StorageClass sc,
+ Source source,
+ ValidTypeStorageLayouts& layouts) const;
+
+ /// Validates a storage class layout
+ /// @param var the variable to validate
+ /// @param layouts previously validated storage layouts
+ /// @returns true on success, false otherwise.
+ bool StorageClassLayout(const sem::Variable* var, ValidTypeStorageLayouts& layouts) const;
+
+ /// @returns true if the attribute list contains a
+ /// ast::DisableValidationAttribute with the validation mode equal to
+ /// `validation`
+ /// @param attributes the attribute list to check
+ /// @param validation the validation mode to check
+ bool IsValidationDisabled(const ast::AttributeList& attributes,
+ ast::DisabledValidation validation) const;
+
+ /// @returns true if the attribute list does not contains a
+ /// ast::DisableValidationAttribute with the validation mode equal to
+ /// `validation`
+ /// @param attributes the attribute list to check
+ /// @param validation the validation mode to check
+ bool IsValidationEnabled(const ast::AttributeList& attributes,
+ ast::DisabledValidation validation) const;
+
+ private:
+ /// Searches the current statement and up through parents of the current
+ /// statement looking for a loop or for-loop continuing statement.
+ /// @returns the closest continuing statement to the current statement that
+ /// (transitively) owns the current statement.
+ /// @param stop_at_loop if true then the function will return nullptr if a
+ /// loop or for-loop was found before the continuing.
+ /// @param current_statement the current statement being resolved
+ const ast::Statement* ClosestContinuing(bool stop_at_loop,
+ sem::Statement* current_statement) const;
+
+ /// Returns a human-readable string representation of the vector type name
+ /// with the given parameters.
+ /// @param size the vector dimension
+ /// @param element_type scalar vector sub-element type
+ /// @return pretty string representation
+ std::string VectorPretty(uint32_t size, const sem::Type* element_type) const;
+
+ SymbolTable& symbols_;
+ diag::List& diagnostics_;
+ SemHelper& sem_;
+};
+
+} // namespace tint::resolver
+
+#endif // SRC_TINT_RESOLVER_VALIDATOR_H_
diff --git a/chromium/third_party/dawn/src/tint/resolver/validator_is_storeable_test.cc b/chromium/third_party/dawn/src/tint/resolver/validator_is_storeable_test.cc
new file mode 100644
index 00000000000..a5f612c9050
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/resolver/validator_is_storeable_test.cc
@@ -0,0 +1,86 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/resolver/validator.h"
+
+#include "gmock/gmock.h"
+#include "src/tint/resolver/resolver_test_helper.h"
+#include "src/tint/sem/atomic.h"
+
+namespace tint::resolver {
+namespace {
+
+using ValidatorIsStorableTest = ResolverTest;
+
+TEST_F(ValidatorIsStorableTest, Void) {
+ EXPECT_FALSE(v()->IsStorable(create<sem::Void>()));
+}
+
+TEST_F(ValidatorIsStorableTest, Scalar) {
+ EXPECT_TRUE(v()->IsStorable(create<sem::Bool>()));
+ EXPECT_TRUE(v()->IsStorable(create<sem::I32>()));
+ EXPECT_TRUE(v()->IsStorable(create<sem::U32>()));
+ EXPECT_TRUE(v()->IsStorable(create<sem::F32>()));
+}
+
+TEST_F(ValidatorIsStorableTest, Vector) {
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::I32>(), 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::I32>(), 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::I32>(), 4u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::U32>(), 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::U32>(), 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::U32>(), 4u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::F32>(), 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::F32>(), 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Vector>(create<sem::F32>(), 4u)));
+}
+
+TEST_F(ValidatorIsStorableTest, Matrix) {
+ auto* vec2 = create<sem::Vector>(create<sem::F32>(), 2u);
+ auto* vec3 = create<sem::Vector>(create<sem::F32>(), 3u);
+ auto* vec4 = create<sem::Vector>(create<sem::F32>(), 4u);
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec2, 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec2, 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec2, 4u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec3, 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec3, 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec3, 4u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec4, 2u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec4, 3u)));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Matrix>(vec4, 4u)));
+}
+
+TEST_F(ValidatorIsStorableTest, Pointer) {
+ auto* ptr = create<sem::Pointer>(create<sem::I32>(), ast::StorageClass::kPrivate,
+ ast::Access::kReadWrite);
+ EXPECT_FALSE(v()->IsStorable(ptr));
+}
+
+TEST_F(ValidatorIsStorableTest, Atomic) {
+ EXPECT_TRUE(v()->IsStorable(create<sem::Atomic>(create<sem::I32>())));
+ EXPECT_TRUE(v()->IsStorable(create<sem::Atomic>(create<sem::U32>())));
+}
+
+TEST_F(ValidatorIsStorableTest, ArraySizedOfStorable) {
+ auto* arr = create<sem::Array>(create<sem::I32>(), 5u, 4u, 20u, 4u, 4u);
+ EXPECT_TRUE(v()->IsStorable(arr));
+}
+
+TEST_F(ValidatorIsStorableTest, ArrayUnsizedOfStorable) {
+ auto* arr = create<sem::Array>(create<sem::I32>(), 0u, 4u, 4u, 4u, 4u);
+ EXPECT_TRUE(v()->IsStorable(arr));
+}
+
+} // namespace
+} // namespace tint::resolver
diff --git a/chromium/third_party/dawn/src/tint/resolver/var_let_test.cc b/chromium/third_party/dawn/src/tint/resolver/var_let_test.cc
index 2ace9332a38..43067365396 100644
--- a/chromium/third_party/dawn/src/tint/resolver/var_let_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/var_let_test.cc
@@ -14,681 +14,662 @@
#include "src/tint/resolver/resolver.h"
#include "src/tint/resolver/resolver_test_helper.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-struct ResolverVarLetTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverVarLetTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverVarLetTest, VarDeclWithoutConstructor) {
- // struct S { i : i32; }
- // alias A = S;
- // fn F(){
- // var i : i32;
- // var u : u32;
- // var f : f32;
- // var b : bool;
- // var s : S;
- // var a : A;
- // }
-
- auto* S = Structure("S", {Member("i", ty.i32())});
- auto* A = Alias("A", ty.Of(S));
-
- auto* i = Var("i", ty.i32(), ast::StorageClass::kNone);
- auto* u = Var("u", ty.u32(), ast::StorageClass::kNone);
- auto* f = Var("f", ty.f32(), ast::StorageClass::kNone);
- auto* b = Var("b", ty.bool_(), ast::StorageClass::kNone);
- auto* s = Var("s", ty.Of(S), ast::StorageClass::kNone);
- auto* a = Var("a", ty.Of(A), ast::StorageClass::kNone);
-
- Func("F", {}, ty.void_(),
- {
- Decl(i),
- Decl(u),
- Decl(f),
- Decl(b),
- Decl(s),
- Decl(a),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- // `var` declarations are always of reference type
- ASSERT_TRUE(TypeOf(i)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(u)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(f)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(b)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(s)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(a)->Is<sem::Reference>());
-
- EXPECT_TRUE(TypeOf(i)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(u)->As<sem::Reference>()->StoreType()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(f)->As<sem::Reference>()->StoreType()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(b)->As<sem::Reference>()->StoreType()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(s)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
- EXPECT_TRUE(TypeOf(a)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
-
- EXPECT_EQ(Sem().Get(i)->Constructor(), nullptr);
- EXPECT_EQ(Sem().Get(u)->Constructor(), nullptr);
- EXPECT_EQ(Sem().Get(f)->Constructor(), nullptr);
- EXPECT_EQ(Sem().Get(b)->Constructor(), nullptr);
- EXPECT_EQ(Sem().Get(s)->Constructor(), nullptr);
- EXPECT_EQ(Sem().Get(a)->Constructor(), nullptr);
+ // struct S { i : i32; }
+ // alias A = S;
+ // fn F(){
+ // var i : i32;
+ // var u : u32;
+ // var f : f32;
+ // var b : bool;
+ // var s : S;
+ // var a : A;
+ // }
+
+ auto* S = Structure("S", {Member("i", ty.i32())});
+ auto* A = Alias("A", ty.Of(S));
+
+ auto* i = Var("i", ty.i32(), ast::StorageClass::kNone);
+ auto* u = Var("u", ty.u32(), ast::StorageClass::kNone);
+ auto* f = Var("f", ty.f32(), ast::StorageClass::kNone);
+ auto* b = Var("b", ty.bool_(), ast::StorageClass::kNone);
+ auto* s = Var("s", ty.Of(S), ast::StorageClass::kNone);
+ auto* a = Var("a", ty.Of(A), ast::StorageClass::kNone);
+
+ Func("F", {}, ty.void_(),
+ {
+ Decl(i),
+ Decl(u),
+ Decl(f),
+ Decl(b),
+ Decl(s),
+ Decl(a),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ // `var` declarations are always of reference type
+ ASSERT_TRUE(TypeOf(i)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(u)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(f)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(b)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(s)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(a)->Is<sem::Reference>());
+
+ EXPECT_TRUE(TypeOf(i)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(u)->As<sem::Reference>()->StoreType()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(f)->As<sem::Reference>()->StoreType()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(b)->As<sem::Reference>()->StoreType()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(s)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
+ EXPECT_TRUE(TypeOf(a)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
+
+ EXPECT_EQ(Sem().Get(i)->Constructor(), nullptr);
+ EXPECT_EQ(Sem().Get(u)->Constructor(), nullptr);
+ EXPECT_EQ(Sem().Get(f)->Constructor(), nullptr);
+ EXPECT_EQ(Sem().Get(b)->Constructor(), nullptr);
+ EXPECT_EQ(Sem().Get(s)->Constructor(), nullptr);
+ EXPECT_EQ(Sem().Get(a)->Constructor(), nullptr);
}
TEST_F(ResolverVarLetTest, VarDeclWithConstructor) {
- // struct S { i : i32; }
- // alias A = S;
- // fn F(){
- // var i : i32 = 1;
- // var u : u32 = 1u;
- // var f : f32 = 1.f;
- // var b : bool = true;
- // var s : S = S(1);
- // var a : A = A(1);
- // }
-
- auto* S = Structure("S", {Member("i", ty.i32())});
- auto* A = Alias("A", ty.Of(S));
-
- auto* i_c = Expr(1);
- auto* u_c = Expr(1u);
- auto* f_c = Expr(1.f);
- auto* b_c = Expr(true);
- auto* s_c = Construct(ty.Of(S), Expr(1));
- auto* a_c = Construct(ty.Of(A), Expr(1));
-
- auto* i = Var("i", ty.i32(), ast::StorageClass::kNone, i_c);
- auto* u = Var("u", ty.u32(), ast::StorageClass::kNone, u_c);
- auto* f = Var("f", ty.f32(), ast::StorageClass::kNone, f_c);
- auto* b = Var("b", ty.bool_(), ast::StorageClass::kNone, b_c);
- auto* s = Var("s", ty.Of(S), ast::StorageClass::kNone, s_c);
- auto* a = Var("a", ty.Of(A), ast::StorageClass::kNone, a_c);
-
- Func("F", {}, ty.void_(),
- {
- Decl(i),
- Decl(u),
- Decl(f),
- Decl(b),
- Decl(s),
- Decl(a),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- // `var` declarations are always of reference type
- ASSERT_TRUE(TypeOf(i)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(u)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(f)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(b)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(s)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(a)->Is<sem::Reference>());
-
- EXPECT_TRUE(TypeOf(i)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
- EXPECT_TRUE(TypeOf(u)->As<sem::Reference>()->StoreType()->Is<sem::U32>());
- EXPECT_TRUE(TypeOf(f)->As<sem::Reference>()->StoreType()->Is<sem::F32>());
- EXPECT_TRUE(TypeOf(b)->As<sem::Reference>()->StoreType()->Is<sem::Bool>());
- EXPECT_TRUE(TypeOf(s)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
- EXPECT_TRUE(TypeOf(a)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
-
- EXPECT_EQ(Sem().Get(i)->Constructor()->Declaration(), i_c);
- EXPECT_EQ(Sem().Get(u)->Constructor()->Declaration(), u_c);
- EXPECT_EQ(Sem().Get(f)->Constructor()->Declaration(), f_c);
- EXPECT_EQ(Sem().Get(b)->Constructor()->Declaration(), b_c);
- EXPECT_EQ(Sem().Get(s)->Constructor()->Declaration(), s_c);
- EXPECT_EQ(Sem().Get(a)->Constructor()->Declaration(), a_c);
+ // struct S { i : i32; }
+ // alias A = S;
+ // fn F(){
+ // var i : i32 = 1i;
+ // var u : u32 = 1u;
+ // var f : f32 = 1.f;
+ // var b : bool = true;
+ // var s : S = S(1);
+ // var a : A = A(1);
+ // }
+
+ auto* S = Structure("S", {Member("i", ty.i32())});
+ auto* A = Alias("A", ty.Of(S));
+
+ auto* i_c = Expr(1_i);
+ auto* u_c = Expr(1_u);
+ auto* f_c = Expr(1_f);
+ auto* b_c = Expr(true);
+ auto* s_c = Construct(ty.Of(S), Expr(1_i));
+ auto* a_c = Construct(ty.Of(A), Expr(1_i));
+
+ auto* i = Var("i", ty.i32(), ast::StorageClass::kNone, i_c);
+ auto* u = Var("u", ty.u32(), ast::StorageClass::kNone, u_c);
+ auto* f = Var("f", ty.f32(), ast::StorageClass::kNone, f_c);
+ auto* b = Var("b", ty.bool_(), ast::StorageClass::kNone, b_c);
+ auto* s = Var("s", ty.Of(S), ast::StorageClass::kNone, s_c);
+ auto* a = Var("a", ty.Of(A), ast::StorageClass::kNone, a_c);
+
+ Func("F", {}, ty.void_(),
+ {
+ Decl(i),
+ Decl(u),
+ Decl(f),
+ Decl(b),
+ Decl(s),
+ Decl(a),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ // `var` declarations are always of reference type
+ ASSERT_TRUE(TypeOf(i)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(u)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(f)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(b)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(s)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(a)->Is<sem::Reference>());
+
+ EXPECT_TRUE(TypeOf(i)->As<sem::Reference>()->StoreType()->Is<sem::I32>());
+ EXPECT_TRUE(TypeOf(u)->As<sem::Reference>()->StoreType()->Is<sem::U32>());
+ EXPECT_TRUE(TypeOf(f)->As<sem::Reference>()->StoreType()->Is<sem::F32>());
+ EXPECT_TRUE(TypeOf(b)->As<sem::Reference>()->StoreType()->Is<sem::Bool>());
+ EXPECT_TRUE(TypeOf(s)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
+ EXPECT_TRUE(TypeOf(a)->As<sem::Reference>()->StoreType()->Is<sem::Struct>());
+
+ EXPECT_EQ(Sem().Get(i)->Constructor()->Declaration(), i_c);
+ EXPECT_EQ(Sem().Get(u)->Constructor()->Declaration(), u_c);
+ EXPECT_EQ(Sem().Get(f)->Constructor()->Declaration(), f_c);
+ EXPECT_EQ(Sem().Get(b)->Constructor()->Declaration(), b_c);
+ EXPECT_EQ(Sem().Get(s)->Constructor()->Declaration(), s_c);
+ EXPECT_EQ(Sem().Get(a)->Constructor()->Declaration(), a_c);
}
TEST_F(ResolverVarLetTest, LetDecl) {
- // struct S { i : i32; }
- // fn F(){
- // var v : i32;
- // let i : i32 = 1;
- // let u : u32 = 1u;
- // let f : f32 = 1.;
- // let b : bool = true;
- // let s : S = S(1);
- // let a : A = A(1);
- // let p : pointer<function, i32> = &v;
- // }
-
- auto* S = Structure("S", {Member("i", ty.i32())});
- auto* A = Alias("A", ty.Of(S));
- auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
-
- auto* i_c = Expr(1);
- auto* u_c = Expr(1u);
- auto* f_c = Expr(1.f);
- auto* b_c = Expr(true);
- auto* s_c = Construct(ty.Of(S), Expr(1));
- auto* a_c = Construct(ty.Of(A), Expr(1));
- auto* p_c = AddressOf(v);
-
- auto* i = Const("i", ty.i32(), i_c);
- auto* u = Const("u", ty.u32(), u_c);
- auto* f = Const("f", ty.f32(), f_c);
- auto* b = Const("b", ty.bool_(), b_c);
- auto* s = Const("s", ty.Of(S), s_c);
- auto* a = Const("a", ty.Of(A), a_c);
- auto* p = Const("p", ty.pointer<i32>(ast::StorageClass::kFunction), p_c);
-
- Func("F", {}, ty.void_(),
- {
- Decl(v),
- Decl(i),
- Decl(u),
- Decl(f),
- Decl(b),
- Decl(s),
- Decl(a),
- Decl(p),
- });
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- // `let` declarations are always of the storage type
- ASSERT_TRUE(TypeOf(i)->Is<sem::I32>());
- ASSERT_TRUE(TypeOf(u)->Is<sem::U32>());
- ASSERT_TRUE(TypeOf(f)->Is<sem::F32>());
- ASSERT_TRUE(TypeOf(b)->Is<sem::Bool>());
- ASSERT_TRUE(TypeOf(s)->Is<sem::Struct>());
- ASSERT_TRUE(TypeOf(a)->Is<sem::Struct>());
- ASSERT_TRUE(TypeOf(p)->Is<sem::Pointer>());
- ASSERT_TRUE(TypeOf(p)->As<sem::Pointer>()->StoreType()->Is<sem::I32>());
-
- EXPECT_EQ(Sem().Get(i)->Constructor()->Declaration(), i_c);
- EXPECT_EQ(Sem().Get(u)->Constructor()->Declaration(), u_c);
- EXPECT_EQ(Sem().Get(f)->Constructor()->Declaration(), f_c);
- EXPECT_EQ(Sem().Get(b)->Constructor()->Declaration(), b_c);
- EXPECT_EQ(Sem().Get(s)->Constructor()->Declaration(), s_c);
- EXPECT_EQ(Sem().Get(a)->Constructor()->Declaration(), a_c);
- EXPECT_EQ(Sem().Get(p)->Constructor()->Declaration(), p_c);
+ // struct S { i : i32; }
+ // fn F(){
+ // var v : i32;
+ // let i : i32 = 1i;
+ // let u : u32 = 1u;
+ // let f : f32 = 1.;
+ // let b : bool = true;
+ // let s : S = S(1);
+ // let a : A = A(1);
+ // let p : pointer<function, i32> = &v;
+ // }
+
+ auto* S = Structure("S", {Member("i", ty.i32())});
+ auto* A = Alias("A", ty.Of(S));
+ auto* v = Var("v", ty.i32(), ast::StorageClass::kNone);
+
+ auto* i_c = Expr(1_i);
+ auto* u_c = Expr(1_u);
+ auto* f_c = Expr(1_f);
+ auto* b_c = Expr(true);
+ auto* s_c = Construct(ty.Of(S), Expr(1_i));
+ auto* a_c = Construct(ty.Of(A), Expr(1_i));
+ auto* p_c = AddressOf(v);
+
+ auto* i = Let("i", ty.i32(), i_c);
+ auto* u = Let("u", ty.u32(), u_c);
+ auto* f = Let("f", ty.f32(), f_c);
+ auto* b = Let("b", ty.bool_(), b_c);
+ auto* s = Let("s", ty.Of(S), s_c);
+ auto* a = Let("a", ty.Of(A), a_c);
+ auto* p = Let("p", ty.pointer<i32>(ast::StorageClass::kFunction), p_c);
+
+ Func("F", {}, ty.void_(),
+ {
+ Decl(v),
+ Decl(i),
+ Decl(u),
+ Decl(f),
+ Decl(b),
+ Decl(s),
+ Decl(a),
+ Decl(p),
+ });
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ // `let` declarations are always of the storage type
+ ASSERT_TRUE(TypeOf(i)->Is<sem::I32>());
+ ASSERT_TRUE(TypeOf(u)->Is<sem::U32>());
+ ASSERT_TRUE(TypeOf(f)->Is<sem::F32>());
+ ASSERT_TRUE(TypeOf(b)->Is<sem::Bool>());
+ ASSERT_TRUE(TypeOf(s)->Is<sem::Struct>());
+ ASSERT_TRUE(TypeOf(a)->Is<sem::Struct>());
+ ASSERT_TRUE(TypeOf(p)->Is<sem::Pointer>());
+ ASSERT_TRUE(TypeOf(p)->As<sem::Pointer>()->StoreType()->Is<sem::I32>());
+
+ EXPECT_EQ(Sem().Get(i)->Constructor()->Declaration(), i_c);
+ EXPECT_EQ(Sem().Get(u)->Constructor()->Declaration(), u_c);
+ EXPECT_EQ(Sem().Get(f)->Constructor()->Declaration(), f_c);
+ EXPECT_EQ(Sem().Get(b)->Constructor()->Declaration(), b_c);
+ EXPECT_EQ(Sem().Get(s)->Constructor()->Declaration(), s_c);
+ EXPECT_EQ(Sem().Get(a)->Constructor()->Declaration(), a_c);
+ EXPECT_EQ(Sem().Get(p)->Constructor()->Declaration(), p_c);
}
TEST_F(ResolverVarLetTest, DefaultVarStorageClass) {
- // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
-
- auto* buf = Structure("S", {Member("m", ty.i32())});
- auto* function = Var("f", ty.i32());
- auto* private_ = Global("p", ty.i32(), ast::StorageClass::kPrivate);
- auto* workgroup = Global("w", ty.i32(), ast::StorageClass::kWorkgroup);
- auto* uniform = Global("ub", ty.Of(buf), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
- auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
- auto* handle = Global("h", ty.depth_texture(ast::TextureDimension::k2d),
- ast::AttributeList{
- create<ast::BindingAttribute>(2),
- create<ast::GroupAttribute>(0),
- });
-
- WrapInFunction(function);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(function)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(private_)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(workgroup)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(uniform)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(storage)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(handle)->Is<sem::Reference>());
-
- EXPECT_EQ(TypeOf(function)->As<sem::Reference>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(private_)->As<sem::Reference>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(workgroup)->As<sem::Reference>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(uniform)->As<sem::Reference>()->Access(),
- ast::Access::kRead);
- EXPECT_EQ(TypeOf(storage)->As<sem::Reference>()->Access(),
- ast::Access::kRead);
- EXPECT_EQ(TypeOf(handle)->As<sem::Reference>()->Access(), ast::Access::kRead);
+ // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
+
+ auto* buf = Structure("S", {Member("m", ty.i32())});
+ auto* function = Var("f", ty.i32());
+ auto* private_ = Global("p", ty.i32(), ast::StorageClass::kPrivate);
+ auto* workgroup = Global("w", ty.i32(), ast::StorageClass::kWorkgroup);
+ auto* uniform = Global("ub", ty.Of(buf), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+ auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+ auto* handle = Global("h", ty.depth_texture(ast::TextureDimension::k2d),
+ ast::AttributeList{
+ create<ast::BindingAttribute>(2),
+ create<ast::GroupAttribute>(0),
+ });
+
+ WrapInFunction(function);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(function)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(private_)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(workgroup)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(uniform)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(storage)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(handle)->Is<sem::Reference>());
+
+ EXPECT_EQ(TypeOf(function)->As<sem::Reference>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(private_)->As<sem::Reference>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(workgroup)->As<sem::Reference>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(uniform)->As<sem::Reference>()->Access(), ast::Access::kRead);
+ EXPECT_EQ(TypeOf(storage)->As<sem::Reference>()->Access(), ast::Access::kRead);
+ EXPECT_EQ(TypeOf(handle)->As<sem::Reference>()->Access(), ast::Access::kRead);
}
TEST_F(ResolverVarLetTest, ExplicitVarStorageClass) {
- // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
+ // https://gpuweb.github.io/gpuweb/wgsl/#storage-class
- auto* buf = Structure("S", {Member("m", ty.i32())});
- auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
+ auto* buf = Structure("S", {Member("m", ty.i32())});
+ auto* storage = Global("sb", ty.Of(buf), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- ASSERT_TRUE(TypeOf(storage)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(storage)->Is<sem::Reference>());
- EXPECT_EQ(TypeOf(storage)->As<sem::Reference>()->Access(),
- ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(storage)->As<sem::Reference>()->Access(), ast::Access::kReadWrite);
}
TEST_F(ResolverVarLetTest, LetInheritsAccessFromOriginatingVariable) {
- // struct Inner {
- // arr: array<i32, 4>;
- // }
- // struct S {
- // inner: Inner;
- // }
- // @group(0) @binding(0) var<storage, read_write> s : S;
- // fn f() {
- // let p = &s.inner.arr[2];
- // }
- auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
- auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
- auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- auto* expr =
- IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 4);
- auto* ptr = Const("p", nullptr, AddressOf(expr));
-
- WrapInFunction(ptr);
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- ASSERT_TRUE(TypeOf(expr)->Is<sem::Reference>());
- ASSERT_TRUE(TypeOf(ptr)->Is<sem::Pointer>());
-
- EXPECT_EQ(TypeOf(expr)->As<sem::Reference>()->Access(),
- ast::Access::kReadWrite);
- EXPECT_EQ(TypeOf(ptr)->As<sem::Pointer>()->Access(), ast::Access::kReadWrite);
+ // struct Inner {
+ // arr: array<i32, 4>;
+ // }
+ // struct S {
+ // inner: Inner;
+ // }
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ // fn f() {
+ // let p = &s.inner.arr[4];
+ // }
+ auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
+ auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
+ auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ auto* expr = IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 4_i);
+ auto* ptr = Let("p", nullptr, AddressOf(expr));
+
+ WrapInFunction(ptr);
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ ASSERT_TRUE(TypeOf(expr)->Is<sem::Reference>());
+ ASSERT_TRUE(TypeOf(ptr)->Is<sem::Pointer>());
+
+ EXPECT_EQ(TypeOf(expr)->As<sem::Reference>()->Access(), ast::Access::kReadWrite);
+ EXPECT_EQ(TypeOf(ptr)->As<sem::Pointer>()->Access(), ast::Access::kReadWrite);
}
TEST_F(ResolverVarLetTest, LocalShadowsAlias) {
- // type a = i32;
- //
- // fn X() {
- // var a = false;
- // }
- //
- // fn Y() {
- // let a = true;
- // }
-
- auto* t = Alias("a", ty.i32());
- auto* v = Var("a", nullptr, Expr(false));
- auto* l = Const("a", nullptr, Expr(false));
- Func("X", {}, ty.void_(), {Decl(v)});
- Func("Y", {}, ty.void_(), {Decl(l)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* type_t = Sem().Get(t);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), type_t);
- EXPECT_EQ(local_l->Shadows(), type_t);
+ // type a = i32;
+ //
+ // fn X() {
+ // var a = false;
+ // }
+ //
+ // fn Y() {
+ // let a = true;
+ // }
+
+ auto* t = Alias("a", ty.i32());
+ auto* v = Var("a", nullptr, Expr(false));
+ auto* l = Let("a", nullptr, Expr(false));
+ Func("X", {}, ty.void_(), {Decl(v)});
+ Func("Y", {}, ty.void_(), {Decl(l)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* type_t = Sem().Get(t);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), type_t);
+ EXPECT_EQ(local_l->Shadows(), type_t);
}
TEST_F(ResolverVarLetTest, LocalShadowsStruct) {
- // struct a {
- // m : i32;
- // };
- //
- // fn X() {
- // var a = true;
- // }
- //
- // fn Y() {
- // let a = false;
- // }
-
- auto* t = Structure("a", {Member("m", ty.i32())});
- auto* v = Var("a", nullptr, Expr(false));
- auto* l = Const("a", nullptr, Expr(false));
- Func("X", {}, ty.void_(), {Decl(v)});
- Func("Y", {}, ty.void_(), {Decl(l)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* type_t = Sem().Get(t);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), type_t);
- EXPECT_EQ(local_l->Shadows(), type_t);
+ // struct a {
+ // m : i32;
+ // };
+ //
+ // fn X() {
+ // var a = true;
+ // }
+ //
+ // fn Y() {
+ // let a = false;
+ // }
+
+ auto* t = Structure("a", {Member("m", ty.i32())});
+ auto* v = Var("a", nullptr, Expr(false));
+ auto* l = Let("a", nullptr, Expr(false));
+ Func("X", {}, ty.void_(), {Decl(v)});
+ Func("Y", {}, ty.void_(), {Decl(l)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* type_t = Sem().Get(t);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), type_t);
+ EXPECT_EQ(local_l->Shadows(), type_t);
}
TEST_F(ResolverVarLetTest, LocalShadowsFunction) {
- // fn a() {
- // var a = true;
- // }
- //
- // fn b() {
- // let b = false;
- // }
-
- auto* v = Var("a", nullptr, Expr(false));
- auto* l = Const("b", nullptr, Expr(false));
- auto* fa = Func("a", {}, ty.void_(), {Decl(v)});
- auto* fb = Func("b", {}, ty.void_(), {Decl(l)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
- auto* func_a = Sem().Get(fa);
- auto* func_b = Sem().Get(fb);
-
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
- ASSERT_NE(func_a, nullptr);
- ASSERT_NE(func_b, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), func_a);
- EXPECT_EQ(local_l->Shadows(), func_b);
+ // fn a() {
+ // var a = true;
+ // }
+ //
+ // fn b() {
+ // let b = false;
+ // }
+
+ auto* v = Var("a", nullptr, Expr(false));
+ auto* l = Let("b", nullptr, Expr(false));
+ auto* fa = Func("a", {}, ty.void_(), {Decl(v)});
+ auto* fb = Func("b", {}, ty.void_(), {Decl(l)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+ auto* func_a = Sem().Get(fa);
+ auto* func_b = Sem().Get(fb);
+
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+ ASSERT_NE(func_a, nullptr);
+ ASSERT_NE(func_b, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), func_a);
+ EXPECT_EQ(local_l->Shadows(), func_b);
}
TEST_F(ResolverVarLetTest, LocalShadowsGlobalVar) {
- // var<private> a : i32;
- //
- // fn X() {
- // var a = a;
- // }
- //
- // fn Y() {
- // let a = a;
- // }
-
- auto* g = Global("a", ty.i32(), ast::StorageClass::kPrivate);
- auto* v = Var("a", nullptr, Expr("a"));
- auto* l = Const("a", nullptr, Expr("a"));
- Func("X", {}, ty.void_(), {Decl(v)});
- Func("Y", {}, ty.void_(), {Decl(l)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* global = Sem().Get(g);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), global);
- EXPECT_EQ(local_l->Shadows(), global);
-
- auto* user_v =
- Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
- auto* user_l =
- Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
-
- ASSERT_NE(user_v, nullptr);
- ASSERT_NE(user_l, nullptr);
-
- EXPECT_EQ(user_v->Variable(), global);
- EXPECT_EQ(user_l->Variable(), global);
+ // var<private> a : i32;
+ //
+ // fn X() {
+ // var a = a;
+ // }
+ //
+ // fn Y() {
+ // let a = a;
+ // }
+
+ auto* g = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+ auto* v = Var("a", nullptr, Expr("a"));
+ auto* l = Let("a", nullptr, Expr("a"));
+ Func("X", {}, ty.void_(), {Decl(v)});
+ Func("Y", {}, ty.void_(), {Decl(l)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* global = Sem().Get(g);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), global);
+ EXPECT_EQ(local_l->Shadows(), global);
+
+ auto* user_v = Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
+ auto* user_l = Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
+
+ ASSERT_NE(user_v, nullptr);
+ ASSERT_NE(user_l, nullptr);
+
+ EXPECT_EQ(user_v->Variable(), global);
+ EXPECT_EQ(user_l->Variable(), global);
}
TEST_F(ResolverVarLetTest, LocalShadowsGlobalLet) {
- // let a : i32 = 1;
- //
- // fn X() {
- // var a = (a == 123);
- // }
- //
- // fn Y() {
- // let a = (a == 321);
- // }
-
- auto* g = GlobalConst("a", ty.i32(), Expr(1));
- auto* v = Var("a", nullptr, Expr("a"));
- auto* l = Const("a", nullptr, Expr("a"));
- Func("X", {}, ty.void_(), {Decl(v)});
- Func("Y", {}, ty.void_(), {Decl(l)});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* global = Sem().Get(g);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), global);
- EXPECT_EQ(local_l->Shadows(), global);
-
- auto* user_v =
- Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
- auto* user_l =
- Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
-
- ASSERT_NE(user_v, nullptr);
- ASSERT_NE(user_l, nullptr);
-
- EXPECT_EQ(user_v->Variable(), global);
- EXPECT_EQ(user_l->Variable(), global);
+ // let a : i32 = 1;
+ //
+ // fn X() {
+ // var a = (a == 123);
+ // }
+ //
+ // fn Y() {
+ // let a = (a == 321);
+ // }
+
+ auto* g = GlobalConst("a", ty.i32(), Expr(1_i));
+ auto* v = Var("a", nullptr, Expr("a"));
+ auto* l = Let("a", nullptr, Expr("a"));
+ Func("X", {}, ty.void_(), {Decl(v)});
+ Func("Y", {}, ty.void_(), {Decl(l)});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* global = Sem().Get(g);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), global);
+ EXPECT_EQ(local_l->Shadows(), global);
+
+ auto* user_v = Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
+ auto* user_l = Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
+
+ ASSERT_NE(user_v, nullptr);
+ ASSERT_NE(user_l, nullptr);
+
+ EXPECT_EQ(user_v->Variable(), global);
+ EXPECT_EQ(user_l->Variable(), global);
}
TEST_F(ResolverVarLetTest, LocalShadowsLocalVar) {
- // fn X() {
- // var a : i32;
- // {
- // var a = a;
- // }
- // {
- // let a = a;
- // }
- // }
-
- auto* s = Var("a", ty.i32(), Expr(1));
- auto* v = Var("a", nullptr, Expr("a"));
- auto* l = Const("a", nullptr, Expr("a"));
- Func("X", {}, ty.void_(), {Decl(s), Block(Decl(v)), Block(Decl(l))});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* local_s = Sem().Get<sem::LocalVariable>(s);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_s, nullptr);
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), local_s);
- EXPECT_EQ(local_l->Shadows(), local_s);
-
- auto* user_v =
- Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
- auto* user_l =
- Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
-
- ASSERT_NE(user_v, nullptr);
- ASSERT_NE(user_l, nullptr);
-
- EXPECT_EQ(user_v->Variable(), local_s);
- EXPECT_EQ(user_l->Variable(), local_s);
+ // fn X() {
+ // var a : i32;
+ // {
+ // var a = a;
+ // }
+ // {
+ // let a = a;
+ // }
+ // }
+
+ auto* s = Var("a", ty.i32(), Expr(1_i));
+ auto* v = Var("a", nullptr, Expr("a"));
+ auto* l = Let("a", nullptr, Expr("a"));
+ Func("X", {}, ty.void_(), {Decl(s), Block(Decl(v)), Block(Decl(l))});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* local_s = Sem().Get<sem::LocalVariable>(s);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_s, nullptr);
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), local_s);
+ EXPECT_EQ(local_l->Shadows(), local_s);
+
+ auto* user_v = Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
+ auto* user_l = Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
+
+ ASSERT_NE(user_v, nullptr);
+ ASSERT_NE(user_l, nullptr);
+
+ EXPECT_EQ(user_v->Variable(), local_s);
+ EXPECT_EQ(user_l->Variable(), local_s);
}
TEST_F(ResolverVarLetTest, LocalShadowsLocalLet) {
- // fn X() {
- // let a = 1;
- // {
- // var a = (a == 123);
- // }
- // {
- // let a = (a == 321);
- // }
- // }
-
- auto* s = Const("a", ty.i32(), Expr(1));
- auto* v = Var("a", nullptr, Expr("a"));
- auto* l = Const("a", nullptr, Expr("a"));
- Func("X", {}, ty.void_(), {Decl(s), Block(Decl(v)), Block(Decl(l))});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* local_s = Sem().Get<sem::LocalVariable>(s);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(local_s, nullptr);
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), local_s);
- EXPECT_EQ(local_l->Shadows(), local_s);
-
- auto* user_v =
- Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
- auto* user_l =
- Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
-
- ASSERT_NE(user_v, nullptr);
- ASSERT_NE(user_l, nullptr);
-
- EXPECT_EQ(user_v->Variable(), local_s);
- EXPECT_EQ(user_l->Variable(), local_s);
+ // fn X() {
+ // let a = 1;
+ // {
+ // var a = (a == 123);
+ // }
+ // {
+ // let a = (a == 321);
+ // }
+ // }
+
+ auto* s = Let("a", ty.i32(), Expr(1_i));
+ auto* v = Var("a", nullptr, Expr("a"));
+ auto* l = Let("a", nullptr, Expr("a"));
+ Func("X", {}, ty.void_(), {Decl(s), Block(Decl(v)), Block(Decl(l))});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* local_s = Sem().Get<sem::LocalVariable>(s);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(local_s, nullptr);
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), local_s);
+ EXPECT_EQ(local_l->Shadows(), local_s);
+
+ auto* user_v = Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
+ auto* user_l = Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
+
+ ASSERT_NE(user_v, nullptr);
+ ASSERT_NE(user_l, nullptr);
+
+ EXPECT_EQ(user_v->Variable(), local_s);
+ EXPECT_EQ(user_l->Variable(), local_s);
}
TEST_F(ResolverVarLetTest, LocalShadowsParam) {
- // fn F(a : i32) {
- // {
- // var a = a;
- // }
- // {
- // let a = a;
- // }
- // }
-
- auto* p = Param("a", ty.i32());
- auto* v = Var("a", nullptr, Expr("a"));
- auto* l = Const("a", nullptr, Expr("a"));
- Func("X", {p}, ty.void_(), {Block(Decl(v)), Block(Decl(l))});
-
- ASSERT_TRUE(r()->Resolve()) << r()->error();
-
- auto* param = Sem().Get<sem::Parameter>(p);
- auto* local_v = Sem().Get<sem::LocalVariable>(v);
- auto* local_l = Sem().Get<sem::LocalVariable>(l);
-
- ASSERT_NE(param, nullptr);
- ASSERT_NE(local_v, nullptr);
- ASSERT_NE(local_l, nullptr);
-
- EXPECT_EQ(local_v->Shadows(), param);
- EXPECT_EQ(local_l->Shadows(), param);
-
- auto* user_v =
- Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
- auto* user_l =
- Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
-
- ASSERT_NE(user_v, nullptr);
- ASSERT_NE(user_l, nullptr);
-
- EXPECT_EQ(user_v->Variable(), param);
- EXPECT_EQ(user_l->Variable(), param);
+ // fn F(a : i32) {
+ // {
+ // var a = a;
+ // }
+ // {
+ // let a = a;
+ // }
+ // }
+
+ auto* p = Param("a", ty.i32());
+ auto* v = Var("a", nullptr, Expr("a"));
+ auto* l = Let("a", nullptr, Expr("a"));
+ Func("X", {p}, ty.void_(), {Block(Decl(v)), Block(Decl(l))});
+
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
+
+ auto* param = Sem().Get<sem::Parameter>(p);
+ auto* local_v = Sem().Get<sem::LocalVariable>(v);
+ auto* local_l = Sem().Get<sem::LocalVariable>(l);
+
+ ASSERT_NE(param, nullptr);
+ ASSERT_NE(local_v, nullptr);
+ ASSERT_NE(local_l, nullptr);
+
+ EXPECT_EQ(local_v->Shadows(), param);
+ EXPECT_EQ(local_l->Shadows(), param);
+
+ auto* user_v = Sem().Get<sem::VariableUser>(local_v->Declaration()->constructor);
+ auto* user_l = Sem().Get<sem::VariableUser>(local_l->Declaration()->constructor);
+
+ ASSERT_NE(user_v, nullptr);
+ ASSERT_NE(user_l, nullptr);
+
+ EXPECT_EQ(user_v->Variable(), param);
+ EXPECT_EQ(user_l->Variable(), param);
}
TEST_F(ResolverVarLetTest, ParamShadowsFunction) {
- // fn a(a : bool) {
- // }
+ // fn a(a : bool) {
+ // }
- auto* p = Param("a", ty.bool_());
- auto* f = Func("a", {p}, ty.void_(), {});
+ auto* p = Param("a", ty.bool_());
+ auto* f = Func("a", {p}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* func = Sem().Get(f);
- auto* param = Sem().Get<sem::Parameter>(p);
+ auto* func = Sem().Get(f);
+ auto* param = Sem().Get<sem::Parameter>(p);
- ASSERT_NE(func, nullptr);
- ASSERT_NE(param, nullptr);
+ ASSERT_NE(func, nullptr);
+ ASSERT_NE(param, nullptr);
- EXPECT_EQ(param->Shadows(), func);
+ EXPECT_EQ(param->Shadows(), func);
}
TEST_F(ResolverVarLetTest, ParamShadowsGlobalVar) {
- // var<private> a : i32;
- //
- // fn F(a : bool) {
- // }
+ // var<private> a : i32;
+ //
+ // fn F(a : bool) {
+ // }
- auto* g = Global("a", ty.i32(), ast::StorageClass::kPrivate);
- auto* p = Param("a", ty.bool_());
- Func("F", {p}, ty.void_(), {});
+ auto* g = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+ auto* p = Param("a", ty.bool_());
+ Func("F", {p}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* global = Sem().Get(g);
- auto* param = Sem().Get<sem::Parameter>(p);
+ auto* global = Sem().Get(g);
+ auto* param = Sem().Get<sem::Parameter>(p);
- ASSERT_NE(global, nullptr);
- ASSERT_NE(param, nullptr);
+ ASSERT_NE(global, nullptr);
+ ASSERT_NE(param, nullptr);
- EXPECT_EQ(param->Shadows(), global);
+ EXPECT_EQ(param->Shadows(), global);
}
TEST_F(ResolverVarLetTest, ParamShadowsGlobalLet) {
- // let a : i32 = 1;
- //
- // fn F(a : bool) {
- // }
+ // let a : i32 = 1;
+ //
+ // fn F(a : bool) {
+ // }
- auto* g = GlobalConst("a", ty.i32(), Expr(1));
- auto* p = Param("a", ty.bool_());
- Func("F", {p}, ty.void_(), {});
+ auto* g = GlobalConst("a", ty.i32(), Expr(1_i));
+ auto* p = Param("a", ty.bool_());
+ Func("F", {p}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* global = Sem().Get(g);
- auto* param = Sem().Get<sem::Parameter>(p);
+ auto* global = Sem().Get(g);
+ auto* param = Sem().Get<sem::Parameter>(p);
- ASSERT_NE(global, nullptr);
- ASSERT_NE(param, nullptr);
+ ASSERT_NE(global, nullptr);
+ ASSERT_NE(param, nullptr);
- EXPECT_EQ(param->Shadows(), global);
+ EXPECT_EQ(param->Shadows(), global);
}
TEST_F(ResolverVarLetTest, ParamShadowsAlias) {
- // type a = i32;
- //
- // fn F(a : a) {
- // }
+ // type a = i32;
+ //
+ // fn F(a : a) {
+ // }
- auto* a = Alias("a", ty.i32());
- auto* p = Param("a", ty.type_name("a"));
- Func("F", {p}, ty.void_(), {});
+ auto* a = Alias("a", ty.i32());
+ auto* p = Param("a", ty.type_name("a"));
+ Func("F", {p}, ty.void_(), {});
- ASSERT_TRUE(r()->Resolve()) << r()->error();
+ ASSERT_TRUE(r()->Resolve()) << r()->error();
- auto* alias = Sem().Get(a);
- auto* param = Sem().Get<sem::Parameter>(p);
+ auto* alias = Sem().Get(a);
+ auto* param = Sem().Get<sem::Parameter>(p);
- ASSERT_NE(alias, nullptr);
- ASSERT_NE(param, nullptr);
+ ASSERT_NE(alias, nullptr);
+ ASSERT_NE(param, nullptr);
- EXPECT_EQ(param->Shadows(), alias);
- EXPECT_EQ(param->Type(), alias);
+ EXPECT_EQ(param->Shadows(), alias);
+ EXPECT_EQ(param->Type(), alias);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/resolver/var_let_validation_test.cc b/chromium/third_party/dawn/src/tint/resolver/var_let_validation_test.cc
index 6b8652959de..e1dc3435a4a 100644
--- a/chromium/third_party/dawn/src/tint/resolver/var_let_validation_test.cc
+++ b/chromium/third_party/dawn/src/tint/resolver/var_let_validation_test.cc
@@ -17,330 +17,302 @@
#include "gmock/gmock.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::resolver {
namespace {
-struct ResolverVarLetValidationTest : public resolver::TestHelper,
- public testing::Test {};
+struct ResolverVarLetValidationTest : public resolver::TestHelper, public testing::Test {};
TEST_F(ResolverVarLetValidationTest, LetNoInitializer) {
- // let a : i32;
- WrapInFunction(Const(Source{{12, 34}}, "a", ty.i32(), nullptr));
+ // let a : i32;
+ WrapInFunction(Let(Source{{12, 34}}, "a", ty.i32(), nullptr));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: let declaration must have an initializer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: let declaration must have an initializer");
}
TEST_F(ResolverVarLetValidationTest, GlobalLetNoInitializer) {
- // let a : i32;
- GlobalConst(Source{{12, 34}}, "a", ty.i32(), nullptr);
+ // let a : i32;
+ GlobalConst(Source{{12, 34}}, "a", ty.i32(), nullptr);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: let declaration must have an initializer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: let declaration must have an initializer");
}
TEST_F(ResolverVarLetValidationTest, VarNoInitializerNoType) {
- // var a;
- WrapInFunction(Var(Source{{12, 34}}, "a", nullptr));
+ // var a;
+ WrapInFunction(Var(Source{{12, 34}}, "a", nullptr));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function scope var declaration requires a type or "
- "initializer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: function scope var declaration requires a type or "
+ "initializer");
}
TEST_F(ResolverVarLetValidationTest, GlobalVarNoInitializerNoType) {
- // var a;
- Global(Source{{12, 34}}, "a", nullptr);
+ // var a;
+ Global(Source{{12, 34}}, "a", nullptr);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: module scope var declaration requires a type and "
- "initializer");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: module scope var declaration requires a type and "
+ "initializer");
}
TEST_F(ResolverVarLetValidationTest, VarTypeNotStorable) {
- // var i : i32;
- // var p : pointer<function, i32> = &v;
- auto* i = Var("i", ty.i32(), ast::StorageClass::kNone);
- auto* p =
- Var(Source{{56, 78}}, "a", ty.pointer<i32>(ast::StorageClass::kFunction),
- ast::StorageClass::kNone, AddressOf(Source{{12, 34}}, "i"));
- WrapInFunction(i, p);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: ptr<function, i32, read_write> cannot be used as the "
- "type of a var");
+ // var i : i32;
+ // var p : pointer<function, i32> = &v;
+ auto* i = Var("i", ty.i32(), ast::StorageClass::kNone);
+ auto* p = Var(Source{{56, 78}}, "a", ty.pointer<i32>(ast::StorageClass::kFunction),
+ ast::StorageClass::kNone, AddressOf(Source{{12, 34}}, "i"));
+ WrapInFunction(i, p);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "56:78 error: ptr<function, i32, read_write> cannot be used as the "
+ "type of a var");
}
TEST_F(ResolverVarLetValidationTest, LetTypeNotConstructible) {
- // @group(0) @binding(0) var t1 : texture_2d<f32>;
- // let t2 : t1;
- auto* t1 =
- Global("t1", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
- GroupAndBinding(0, 0));
- auto* t2 = Const(Source{{56, 78}}, "t2", nullptr, Expr(t1));
- WrapInFunction(t2);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "56:78 error: texture_2d<f32> cannot be used as the type of a let");
+ // @group(0) @binding(0) var t1 : texture_2d<f32>;
+ // let t2 : t1;
+ auto* t1 = Global("t1", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()),
+ GroupAndBinding(0, 0));
+ auto* t2 = Let(Source{{56, 78}}, "t2", nullptr, Expr(t1));
+ WrapInFunction(t2);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "56:78 error: texture_2d<f32> cannot be used as the type of a let");
}
TEST_F(ResolverVarLetValidationTest, LetConstructorWrongType) {
- // var v : i32 = 2u
- WrapInFunction(Const(Source{{3, 3}}, "v", ty.i32(), Expr(2u)));
+ // var v : i32 = 2u
+ WrapInFunction(Let(Source{{3, 3}}, "v", ty.i32(), Expr(2_u)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')");
}
TEST_F(ResolverVarLetValidationTest, VarConstructorWrongType) {
- // var v : i32 = 2u
- WrapInFunction(
- Var(Source{{3, 3}}, "v", ty.i32(), ast::StorageClass::kNone, Expr(2u)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')");
+ // var v : i32 = 2u
+ WrapInFunction(Var(Source{{3, 3}}, "v", ty.i32(), ast::StorageClass::kNone, Expr(2_u)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')");
}
TEST_F(ResolverVarLetValidationTest, LetConstructorWrongTypeViaAlias) {
- auto* a = Alias("I32", ty.i32());
- WrapInFunction(Const(Source{{3, 3}}, "v", ty.Of(a), Expr(2u)));
+ auto* a = Alias("I32", ty.i32());
+ WrapInFunction(Let(Source{{3, 3}}, "v", ty.Of(a), Expr(2_u)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')");
}
TEST_F(ResolverVarLetValidationTest, VarConstructorWrongTypeViaAlias) {
- auto* a = Alias("I32", ty.i32());
- WrapInFunction(
- Var(Source{{3, 3}}, "v", ty.Of(a), ast::StorageClass::kNone, Expr(2u)));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')");
+ auto* a = Alias("I32", ty.i32());
+ WrapInFunction(Var(Source{{3, 3}}, "v", ty.Of(a), ast::StorageClass::kNone, Expr(2_u)));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')");
}
TEST_F(ResolverVarLetValidationTest, LetOfPtrConstructedWithRef) {
- // var a : f32;
- // let b : ptr<function,f32> = a;
- const auto priv = ast::StorageClass::kFunction;
- auto* var_a = Var("a", ty.f32(), priv);
- auto* var_b =
- Const(Source{{12, 34}}, "b", ty.pointer<float>(priv), Expr("a"), {});
- WrapInFunction(var_a, var_b);
-
- ASSERT_FALSE(r()->Resolve());
-
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: cannot initialize let of type 'ptr<function, f32, read_write>' with value of type 'f32')");
+ // var a : f32;
+ // let b : ptr<function,f32> = a;
+ const auto priv = ast::StorageClass::kFunction;
+ auto* var_a = Var("a", ty.f32(), priv);
+ auto* var_b = Let(Source{{12, 34}}, "b", ty.pointer<f32>(priv), Expr("a"), {});
+ WrapInFunction(var_a, var_b);
+
+ ASSERT_FALSE(r()->Resolve());
+
+ EXPECT_EQ(
+ r()->error(),
+ R"(12:34 error: cannot initialize let of type 'ptr<function, f32, read_write>' with value of type 'f32')");
}
TEST_F(ResolverVarLetValidationTest, LocalLetRedeclared) {
- // let l : f32 = 1.;
- // let l : i32 = 0;
- auto* l1 = Const("l", ty.f32(), Expr(1.f));
- auto* l2 = Const(Source{{12, 34}}, "l", ty.i32(), Expr(0));
- WrapInFunction(l1, l2);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- "12:34 error: redeclaration of 'l'\nnote: 'l' previously declared here");
+ // let l : f32 = 1.;
+ // let l : i32 = 0;
+ auto* l1 = Let("l", ty.f32(), Expr(1_f));
+ auto* l2 = Let(Source{{12, 34}}, "l", ty.i32(), Expr(0_i));
+ WrapInFunction(l1, l2);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: redeclaration of 'l'\nnote: 'l' previously declared here");
}
TEST_F(ResolverVarLetValidationTest, GlobalVarRedeclaredAsLocal) {
- // var v : f32 = 2.1;
- // fn my_func() {
- // var v : f32 = 2.0;
- // return 0;
- // }
+ // var v : f32 = 2.1;
+ // fn my_func() {
+ // var v : f32 = 2.0;
+ // return 0;
+ // }
- Global("v", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1f));
+ Global("v", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1_f));
- WrapInFunction(Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone,
- Expr(2.0f)));
+ WrapInFunction(Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone, Expr(2_f)));
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverVarLetValidationTest, VarRedeclaredInInnerBlock) {
- // {
- // var v : f32;
- // { var v : f32; }
- // }
- auto* var_outer = Var("v", ty.f32(), ast::StorageClass::kNone);
- auto* var_inner =
- Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone);
- auto* inner = Block(Decl(var_inner));
- auto* outer_body = Block(Decl(var_outer), inner);
-
- WrapInFunction(outer_body);
-
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ // {
+ // var v : f32;
+ // { var v : f32; }
+ // }
+ auto* var_outer = Var("v", ty.f32(), ast::StorageClass::kNone);
+ auto* var_inner = Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone);
+ auto* inner = Block(Decl(var_inner));
+ auto* outer_body = Block(Decl(var_outer), inner);
+
+ WrapInFunction(outer_body);
+
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverVarLetValidationTest, VarRedeclaredInIfBlock) {
- // {
- // var v : f32 = 3.14;
- // if (true) { var v : f32 = 2.0; }
- // }
- auto* var_a_float = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(3.1f));
+ // {
+ // var v : f32 = 3.14;
+ // if (true) { var v : f32 = 2.0; }
+ // }
+ auto* var_a_float = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(3.1_f));
- auto* var = Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone,
- Expr(2.0f));
+ auto* var = Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone, Expr(2_f));
- auto* cond = Expr(true);
- auto* body = Block(Decl(var));
+ auto* cond = Expr(true);
+ auto* body = Block(Decl(var));
- auto* outer_body =
- Block(Decl(var_a_float),
- create<ast::IfStatement>(cond, body, ast::ElseStatementList{}));
+ auto* outer_body = Block(Decl(var_a_float), If(cond, body));
- WrapInFunction(outer_body);
+ WrapInFunction(outer_body);
- EXPECT_TRUE(r()->Resolve()) << r()->error();
+ EXPECT_TRUE(r()->Resolve()) << r()->error();
}
TEST_F(ResolverVarLetValidationTest, InferredPtrStorageAccessMismatch) {
- // struct Inner {
- // arr: array<i32, 4>;
- // }
- // struct S {
- // inner: Inner;
- // }
- // @group(0) @binding(0) var<storage> s : S;
- // fn f() {
- // let p : pointer<storage, i32, read_write> = &s.inner.arr[2];
- // }
- auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
- auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
- auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- auto* expr =
- IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 4);
- auto* ptr = Const(
- Source{{12, 34}}, "p",
- ty.pointer<i32>(ast::StorageClass::kStorage, ast::Access::kReadWrite),
- AddressOf(expr));
-
- WrapInFunction(ptr);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: cannot initialize let of type "
- "'ptr<storage, i32, read_write>' with value of type "
- "'ptr<storage, i32, read>'");
+ // struct Inner {
+ // arr: array<i32, 4>;
+ // }
+ // struct S {
+ // inner: Inner;
+ // }
+ // @group(0) @binding(0) var<storage> s : S;
+ // fn f() {
+ // let p : pointer<storage, i32, read_write> = &s.inner.arr[2i];
+ // }
+ auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())});
+ auto* buf = Structure("S", {Member("inner", ty.Of(inner))});
+ auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ auto* expr = IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 2_i);
+ auto* ptr =
+ Let(Source{{12, 34}}, "p",
+ ty.pointer<i32>(ast::StorageClass::kStorage, ast::Access::kReadWrite), AddressOf(expr));
+
+ WrapInFunction(ptr);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: cannot initialize let of type "
+ "'ptr<storage, i32, read_write>' with value of type "
+ "'ptr<storage, i32, read>'");
}
TEST_F(ResolverVarLetValidationTest, NonConstructibleType_Atomic) {
- auto* v = Var("v", ty.atomic(Source{{12, 34}}, ty.i32()));
- WrapInFunction(v);
+ auto* v = Var("v", ty.atomic(Source{{12, 34}}, ty.i32()));
+ WrapInFunction(v);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function variable must have a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function variable must have a constructible type");
}
TEST_F(ResolverVarLetValidationTest, NonConstructibleType_RuntimeArray) {
- auto* s = Structure("S", {Member(Source{{56, 78}}, "m", ty.array(ty.i32()))});
- auto* v = Var(Source{{12, 34}}, "v", ty.Of(s));
- WrapInFunction(v);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(
- r()->error(),
- R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
+ auto* s = Structure("S", {Member(Source{{56, 78}}, "m", ty.array(ty.i32()))});
+ auto* v = Var(Source{{12, 34}}, "v", ty.Of(s));
+ WrapInFunction(v);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class
56:78 note: while analysing structure member S.m
12:34 note: while instantiating variable v)");
}
TEST_F(ResolverVarLetValidationTest, NonConstructibleType_Struct_WithAtomic) {
- auto* s = Structure("S", {Member("m", ty.atomic(ty.i32()))});
- auto* v = Var("v", ty.Of(s));
- WrapInFunction(v);
+ auto* s = Structure("S", {Member("m", ty.atomic(ty.i32()))});
+ auto* v = Var("v", ty.Of(s));
+ WrapInFunction(v);
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "error: function variable must have a constructible type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "error: function variable must have a constructible type");
}
TEST_F(ResolverVarLetValidationTest, NonConstructibleType_InferredType) {
- // @group(0) @binding(0) var s : sampler;
- // fn foo() {
- // var v = s;
- // }
- Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(0, 0));
- auto* v = Var(Source{{12, 34}}, "v", nullptr, Expr("s"));
- WrapInFunction(v);
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: function variable must have a constructible type");
+ // @group(0) @binding(0) var s : sampler;
+ // fn foo() {
+ // var v = s;
+ // }
+ Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(0, 0));
+ auto* v = Var(Source{{12, 34}}, "v", nullptr, Expr("s"));
+ WrapInFunction(v);
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: function variable must have a constructible type");
}
TEST_F(ResolverVarLetValidationTest, InvalidStorageClassForInitializer) {
- // var<workgroup> v : f32 = 1.23;
- Global(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kWorkgroup,
- Expr(1.23f));
-
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(),
- "12:34 error: var of storage class 'workgroup' cannot have "
- "an initializer. var initializers are only supported for the "
- "storage classes 'private' and 'function'");
+ // var<workgroup> v : f32 = 1.23;
+ Global(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kWorkgroup, Expr(1.23_f));
+
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(),
+ "12:34 error: var of storage class 'workgroup' cannot have "
+ "an initializer. var initializers are only supported for the "
+ "storage classes 'private' and 'function'");
}
TEST_F(ResolverVarLetValidationTest, VectorLetNoType) {
- // let a : mat3x3 = mat3x3<f32>();
- WrapInFunction(Const("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3),
- vec3<f32>()));
+ // let a : mat3x3 = mat3x3<f32>();
+ WrapInFunction(Let("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3), vec3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
}
TEST_F(ResolverVarLetValidationTest, VectorVarNoType) {
- // var a : mat3x3;
- WrapInFunction(Var("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3)));
+ // var a : mat3x3;
+ WrapInFunction(Var("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing vector element type");
}
TEST_F(ResolverVarLetValidationTest, MatrixLetNoType) {
- // let a : mat3x3 = mat3x3<f32>();
- WrapInFunction(Const("a",
- create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3),
- mat3x3<f32>()));
+ // let a : mat3x3 = mat3x3<f32>();
+ WrapInFunction(Let("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3), mat3x3<f32>()));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
}
TEST_F(ResolverVarLetValidationTest, MatrixVarNoType) {
- // var a : mat3x3;
- WrapInFunction(
- Var("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3)));
+ // var a : mat3x3;
+ WrapInFunction(Var("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3)));
- EXPECT_FALSE(r()->Resolve());
- EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
+ EXPECT_FALSE(r()->Resolve());
+ EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/scope_stack.h b/chromium/third_party/dawn/src/tint/scope_stack.h
index 12445845e8c..6838f5b77ce 100644
--- a/chromium/third_party/dawn/src/tint/scope_stack.h
+++ b/chromium/third_party/dawn/src/tint/scope_stack.h
@@ -24,55 +24,65 @@ namespace tint {
/// Used to store a stack of scope information.
/// The stack starts with a global scope which can not be popped.
-template <class T>
+template <class K, class V>
class ScopeStack {
- public:
- /// Constructor
- ScopeStack() {
- // Push global bucket
- stack_.push_back({});
- }
- /// Copy Constructor
- ScopeStack(const ScopeStack&) = default;
- ~ScopeStack() = default;
+ public:
+ /// Constructor
+ ScopeStack() {
+ // Push global bucket
+ stack_.push_back({});
+ }
+ /// Copy Constructor
+ ScopeStack(const ScopeStack&) = default;
+ ~ScopeStack() = default;
+
+ /// Push a new scope on to the stack
+ void Push() { stack_.push_back({}); }
- /// Push a new scope on to the stack
- void Push() { stack_.push_back({}); }
+ /// Pop the scope off the top of the stack
+ void Pop() {
+ if (stack_.size() > 1) {
+ stack_.pop_back();
+ }
+ }
- /// Pop the scope off the top of the stack
- void Pop() {
- if (stack_.size() > 1) {
- stack_.pop_back();
+ /// Assigns the value into the top most scope of the stack.
+ /// @param key the key of the value
+ /// @param val the value
+ /// @returns the old value if there was an existing key at the top of the
+ /// stack, otherwise the zero initializer for type T.
+ V Set(const K& key, V val) {
+ std::swap(val, stack_.back()[key]);
+ return val;
}
- }
- /// Assigns the value into the top most scope of the stack.
- /// @param symbol the symbol of the value
- /// @param val the value
- /// @returns the old value if there was an existing symbol at the top of the
- /// stack, otherwise the zero initializer for type T.
- T Set(const Symbol& symbol, T val) {
- std::swap(val, stack_.back()[symbol]);
- return val;
- }
+ /// Retrieves a value from the stack
+ /// @param key the key to look for
+ /// @returns the value, or the zero initializer if the value was not found
+ V Get(const K& key) const {
+ for (auto iter = stack_.rbegin(); iter != stack_.rend(); ++iter) {
+ auto& map = *iter;
+ auto val = map.find(key);
+ if (val != map.end()) {
+ return val->second;
+ }
+ }
- /// Retrieves a value from the stack
- /// @param symbol the symbol to look for
- /// @returns the value, or the zero initializer if the value was not found
- T Get(const Symbol& symbol) const {
- for (auto iter = stack_.rbegin(); iter != stack_.rend(); ++iter) {
- auto& map = *iter;
- auto val = map.find(symbol);
- if (val != map.end()) {
- return val->second;
- }
+ return V{};
}
- return T{};
- }
+ /// Return the top scope of the stack.
+ /// @returns the top scope of the stack
+ const std::unordered_map<K, V>& Top() const { return stack_.back(); }
+
+ /// Clear the scope stack.
+ void Clear() {
+ stack_.clear();
+ stack_.push_back({});
+ }
- private:
- std::vector<std::unordered_map<Symbol, T>> stack_;
+ private:
+ std::vector<std::unordered_map<K, V>> stack_;
};
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/scope_stack_test.cc b/chromium/third_party/dawn/src/tint/scope_stack_test.cc
index 3754a41cb22..aeb7e73214f 100644
--- a/chromium/third_party/dawn/src/tint/scope_stack_test.cc
+++ b/chromium/third_party/dawn/src/tint/scope_stack_test.cc
@@ -22,49 +22,69 @@ namespace {
class ScopeStackTest : public ProgramBuilder, public testing::Test {};
TEST_F(ScopeStackTest, Get) {
- ScopeStack<uint32_t> s;
- Symbol a(1, ID());
- Symbol b(3, ID());
- s.Push();
- s.Set(a, 5u);
- s.Set(b, 10u);
+ ScopeStack<Symbol, uint32_t> s;
+ Symbol a(1, ID());
+ Symbol b(3, ID());
+ s.Push();
+ s.Set(a, 5u);
+ s.Set(b, 10u);
- EXPECT_EQ(s.Get(a), 5u);
- EXPECT_EQ(s.Get(b), 10u);
+ EXPECT_EQ(s.Get(a), 5u);
+ EXPECT_EQ(s.Get(b), 10u);
- s.Push();
+ s.Push();
- s.Set(a, 15u);
- EXPECT_EQ(s.Get(a), 15u);
- EXPECT_EQ(s.Get(b), 10u);
+ s.Set(a, 15u);
+ EXPECT_EQ(s.Get(a), 15u);
+ EXPECT_EQ(s.Get(b), 10u);
- s.Pop();
- EXPECT_EQ(s.Get(a), 5u);
- EXPECT_EQ(s.Get(b), 10u);
+ s.Pop();
+ EXPECT_EQ(s.Get(a), 5u);
+ EXPECT_EQ(s.Get(b), 10u);
}
TEST_F(ScopeStackTest, Get_MissingSymbol) {
- ScopeStack<uint32_t> s;
- Symbol sym(1, ID());
- EXPECT_EQ(s.Get(sym), 0u);
+ ScopeStack<Symbol, uint32_t> s;
+ Symbol sym(1, ID());
+ EXPECT_EQ(s.Get(sym), 0u);
}
TEST_F(ScopeStackTest, Set) {
- ScopeStack<uint32_t> s;
- Symbol a(1, ID());
- Symbol b(2, ID());
+ ScopeStack<Symbol, uint32_t> s;
+ Symbol a(1, ID());
+ Symbol b(2, ID());
- EXPECT_EQ(s.Set(a, 5u), 0u);
- EXPECT_EQ(s.Get(a), 5u);
+ EXPECT_EQ(s.Set(a, 5u), 0u);
+ EXPECT_EQ(s.Get(a), 5u);
- EXPECT_EQ(s.Set(b, 10u), 0u);
- EXPECT_EQ(s.Get(b), 10u);
+ EXPECT_EQ(s.Set(b, 10u), 0u);
+ EXPECT_EQ(s.Get(b), 10u);
- EXPECT_EQ(s.Set(a, 20u), 5u);
- EXPECT_EQ(s.Get(a), 20u);
+ EXPECT_EQ(s.Set(a, 20u), 5u);
+ EXPECT_EQ(s.Get(a), 20u);
- EXPECT_EQ(s.Set(b, 25u), 10u);
- EXPECT_EQ(s.Get(b), 25u);
+ EXPECT_EQ(s.Set(b, 25u), 10u);
+ EXPECT_EQ(s.Get(b), 25u);
+}
+
+TEST_F(ScopeStackTest, Clear) {
+ ScopeStack<Symbol, uint32_t> s;
+ Symbol a(1, ID());
+ Symbol b(2, ID());
+
+ EXPECT_EQ(s.Set(a, 5u), 0u);
+ EXPECT_EQ(s.Get(a), 5u);
+
+ s.Push();
+
+ EXPECT_EQ(s.Set(b, 10u), 0u);
+ EXPECT_EQ(s.Get(b), 10u);
+
+ s.Push();
+
+ s.Clear();
+ EXPECT_EQ(s.Get(a), 0u);
+ EXPECT_EQ(s.Get(b), 0u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_float.cc b/chromium/third_party/dawn/src/tint/sem/abstract_float.cc
new file mode 100644
index 00000000000..6f32e998e52
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_float.cc
@@ -0,0 +1,40 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/abstract_float.h"
+
+#include "src/tint/program_builder.h"
+#include "src/tint/utils/hash.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::AbstractFloat);
+
+namespace tint::sem {
+
+AbstractFloat::AbstractFloat() = default;
+AbstractFloat::AbstractFloat(AbstractFloat&&) = default;
+AbstractFloat::~AbstractFloat() = default;
+
+size_t AbstractFloat::Hash() const {
+ return utils::Hash(TypeInfo::Of<AbstractFloat>().full_hashcode);
+}
+
+bool AbstractFloat::Equals(const sem::Type& other) const {
+ return other.Is<AbstractFloat>();
+}
+
+std::string AbstractFloat::FriendlyName(const SymbolTable&) const {
+ return "abstract-float";
+}
+
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_float.h b/chromium/third_party/dawn/src/tint/sem/abstract_float.h
new file mode 100644
index 00000000000..77b8a78b17b
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_float.h
@@ -0,0 +1,49 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_ABSTRACT_FLOAT_H_
+#define SRC_TINT_SEM_ABSTRACT_FLOAT_H_
+
+#include <string>
+
+#include "src/tint/sem/abstract_numeric.h"
+
+namespace tint::sem {
+
+/// An abstract-float type.
+/// @see https://www.w3.org/TR/WGSL/#abstractFloat
+class AbstractFloat final : public Castable<AbstractFloat, AbstractNumeric> {
+ public:
+ /// Constructor
+ AbstractFloat();
+
+ /// Move constructor
+ AbstractFloat(AbstractFloat&&);
+ ~AbstractFloat() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type when printed in diagnostics.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_ABSTRACT_FLOAT_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_int.cc b/chromium/third_party/dawn/src/tint/sem/abstract_int.cc
new file mode 100644
index 00000000000..682c50aaaff
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_int.cc
@@ -0,0 +1,40 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/abstract_int.h"
+
+#include "src/tint/program_builder.h"
+#include "src/tint/utils/hash.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::AbstractInt);
+
+namespace tint::sem {
+
+AbstractInt::AbstractInt() = default;
+AbstractInt::AbstractInt(AbstractInt&&) = default;
+AbstractInt::~AbstractInt() = default;
+
+size_t AbstractInt::Hash() const {
+ return utils::Hash(TypeInfo::Of<AbstractInt>().full_hashcode);
+}
+
+bool AbstractInt::Equals(const sem::Type& other) const {
+ return other.Is<AbstractInt>();
+}
+
+std::string AbstractInt::FriendlyName(const SymbolTable&) const {
+ return "abstract-int";
+}
+
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_int.h b/chromium/third_party/dawn/src/tint/sem/abstract_int.h
new file mode 100644
index 00000000000..91a62997bd2
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_int.h
@@ -0,0 +1,49 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_ABSTRACT_INT_H_
+#define SRC_TINT_SEM_ABSTRACT_INT_H_
+
+#include <string>
+
+#include "src/tint/sem/abstract_numeric.h"
+
+namespace tint::sem {
+
+/// An abstract-int type.
+/// @see https://www.w3.org/TR/WGSL/#abstractint
+class AbstractInt final : public Castable<AbstractInt, AbstractNumeric> {
+ public:
+ /// Constructor
+ AbstractInt();
+
+ /// Move constructor
+ AbstractInt(AbstractInt&&);
+ ~AbstractInt() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type when printed in diagnostics.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_ABSTRACT_INT_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_numeric.cc b/chromium/third_party/dawn/src/tint/sem/abstract_numeric.cc
new file mode 100644
index 00000000000..6481a433ae2
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_numeric.cc
@@ -0,0 +1,37 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/abstract_numeric.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::AbstractNumeric);
+
+namespace tint::sem {
+
+AbstractNumeric::AbstractNumeric() = default;
+AbstractNumeric::AbstractNumeric(AbstractNumeric&&) = default;
+AbstractNumeric::~AbstractNumeric() = default;
+
+uint32_t AbstractNumeric::Size() const {
+ return 0;
+}
+
+uint32_t AbstractNumeric::Align() const {
+ return 0;
+}
+
+bool AbstractNumeric::IsConstructible() const {
+ return false;
+}
+
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/abstract_numeric.h b/chromium/third_party/dawn/src/tint/sem/abstract_numeric.h
new file mode 100644
index 00000000000..0b38448e055
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/abstract_numeric.h
@@ -0,0 +1,47 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_ABSTRACT_NUMERIC_H_
+#define SRC_TINT_SEM_ABSTRACT_NUMERIC_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// The base class for abstract-int and abstract-float types.
+/// @see https://www.w3.org/TR/WGSL/#types-for-creation-time-constants
+class AbstractNumeric : public Castable<AbstractNumeric, Type> {
+ public:
+ /// Constructor
+ AbstractNumeric();
+
+ /// Move constructor
+ AbstractNumeric(AbstractNumeric&&);
+ ~AbstractNumeric() override;
+
+ /// @returns 0, as the type is abstract.
+ uint32_t Size() const override;
+
+ /// @returns 0, as the type is abstract.
+ uint32_t Align() const override;
+
+ /// @returns 0, as the type is abstract.
+ bool IsConstructible() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_ABSTRACT_NUMERIC_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/array.cc b/chromium/third_party/dawn/src/tint/sem/array.cc
index 296979c3558..624623ade68 100644
--- a/chromium/third_party/dawn/src/tint/sem/array.cc
+++ b/chromium/third_party/dawn/src/tint/sem/array.cc
@@ -37,47 +37,46 @@ Array::Array(const Type* element,
implicit_stride_(implicit_stride),
constructible_(count > 0 // Runtime-sized arrays are not constructible
&& element->IsConstructible()) {
- TINT_ASSERT(Semantic, element_);
+ TINT_ASSERT(Semantic, element_);
}
size_t Array::Hash() const {
- return utils::Hash(TypeInfo::Of<Array>().full_hashcode, count_, align_, size_,
- stride_);
+ return utils::Hash(TypeInfo::Of<Array>().full_hashcode, count_, align_, size_, stride_);
}
bool Array::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Array>()) {
- // Note: implicit_stride is not part of the type_name string as this is
- // derived from the element type
- return o->element_ == element_ && o->count_ == count_ &&
- o->align_ == align_ && o->size_ == size_ && o->stride_ == stride_;
- }
- return false;
+ if (auto* o = other.As<Array>()) {
+ // Note: implicit_stride is not part of the type_name string as this is
+ // derived from the element type
+ return o->element_ == element_ && o->count_ == count_ && o->align_ == align_ &&
+ o->size_ == size_ && o->stride_ == stride_;
+ }
+ return false;
}
bool Array::IsConstructible() const {
- return constructible_;
+ return constructible_;
}
std::string Array::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- if (!IsStrideImplicit()) {
- out << "@stride(" << stride_ << ") ";
- }
- out << "array<" << element_->FriendlyName(symbols);
- if (!IsRuntimeSized()) {
- out << ", " << count_;
- }
- out << ">";
- return out.str();
+ std::ostringstream out;
+ if (!IsStrideImplicit()) {
+ out << "@stride(" << stride_ << ") ";
+ }
+ out << "array<" << element_->FriendlyName(symbols);
+ if (!IsRuntimeSized()) {
+ out << ", " << count_;
+ }
+ out << ">";
+ return out.str();
}
uint32_t Array::Align() const {
- return align_;
+ return align_;
}
uint32_t Array::Size() const {
- return size_;
+ return size_;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/array.h b/chromium/third_party/dawn/src/tint/sem/array.h
index e10777b67b8..7f72d8a8a1a 100644
--- a/chromium/third_party/dawn/src/tint/sem/array.h
+++ b/chromium/third_party/dawn/src/tint/sem/array.h
@@ -21,91 +21,86 @@
#include "src/tint/sem/node.h"
#include "src/tint/sem/type.h"
-// Forward declarations
-namespace tint::ast {
-class Array;
-} // namespace tint::ast
-
namespace tint::sem {
/// Array holds the semantic information for Array nodes.
class Array final : public Castable<Array, Type> {
- public:
- /// Constructor
- /// @param element the array element type
- /// @param count the number of elements in the array. 0 represents a
- /// runtime-sized array.
- /// @param align the byte alignment of the array
- /// @param size the byte size of the array
- /// @param stride the number of bytes from the start of one element of the
- /// array to the start of the next element
- /// @param implicit_stride the number of bytes from the start of one element
- /// of the array to the start of the next element, if there was no `@stride`
- /// attribute applied.
- Array(Type const* element,
- uint32_t count,
- uint32_t align,
- uint32_t size,
- uint32_t stride,
- uint32_t implicit_stride);
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @return the array element type
- Type const* ElemType() const { return element_; }
-
- /// @returns the number of elements in the array. 0 represents a runtime-sized
- /// array.
- uint32_t Count() const { return count_; }
-
- /// @returns the byte alignment of the array
- /// @note this may differ from the alignment of a structure member of this
- /// array type, if the member is annotated with the `@align(n)` attribute.
- uint32_t Align() const override;
-
- /// @returns the byte size of the array
- /// @note this may differ from the size of a structure member of this array
- /// type, if the member is annotated with the `@size(n)` attribute.
- uint32_t Size() const override;
-
- /// @returns the number of bytes from the start of one element of the
- /// array to the start of the next element
- uint32_t Stride() const { return stride_; }
-
- /// @returns the number of bytes from the start of one element of the
- /// array to the start of the next element, if there was no `@stride`
- /// attribute applied
- uint32_t ImplicitStride() const { return implicit_stride_; }
-
- /// @returns true if the value returned by Stride() matches the element's
- /// natural stride
- bool IsStrideImplicit() const { return stride_ == implicit_stride_; }
-
- /// @returns true if this array is runtime sized
- bool IsRuntimeSized() const { return count_ == 0; }
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- Type const* const element_;
- const uint32_t count_;
- const uint32_t align_;
- const uint32_t size_;
- const uint32_t stride_;
- const uint32_t implicit_stride_;
- const bool constructible_;
+ public:
+ /// Constructor
+ /// @param element the array element type
+ /// @param count the number of elements in the array. 0 represents a
+ /// runtime-sized array.
+ /// @param align the byte alignment of the array
+ /// @param size the byte size of the array
+ /// @param stride the number of bytes from the start of one element of the
+ /// array to the start of the next element
+ /// @param implicit_stride the number of bytes from the start of one element
+ /// of the array to the start of the next element, if there was no `@stride`
+ /// attribute applied.
+ Array(Type const* element,
+ uint32_t count,
+ uint32_t align,
+ uint32_t size,
+ uint32_t stride,
+ uint32_t implicit_stride);
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @return the array element type
+ Type const* ElemType() const { return element_; }
+
+ /// @returns the number of elements in the array. 0 represents a runtime-sized
+ /// array.
+ uint32_t Count() const { return count_; }
+
+ /// @returns the byte alignment of the array
+ /// @note this may differ from the alignment of a structure member of this
+ /// array type, if the member is annotated with the `@align(n)` attribute.
+ uint32_t Align() const override;
+
+ /// @returns the byte size of the array
+ /// @note this may differ from the size of a structure member of this array
+ /// type, if the member is annotated with the `@size(n)` attribute.
+ uint32_t Size() const override;
+
+ /// @returns the number of bytes from the start of one element of the
+ /// array to the start of the next element
+ uint32_t Stride() const { return stride_; }
+
+ /// @returns the number of bytes from the start of one element of the
+ /// array to the start of the next element, if there was no `@stride`
+ /// attribute applied
+ uint32_t ImplicitStride() const { return implicit_stride_; }
+
+ /// @returns true if the value returned by Stride() matches the element's
+ /// natural stride
+ bool IsStrideImplicit() const { return stride_ == implicit_stride_; }
+
+ /// @returns true if this array is runtime sized
+ bool IsRuntimeSized() const { return count_ == 0; }
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ Type const* const element_;
+ const uint32_t count_;
+ const uint32_t align_;
+ const uint32_t size_;
+ const uint32_t stride_;
+ const uint32_t implicit_stride_;
+ const bool constructible_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/atomic_type.cc b/chromium/third_party/dawn/src/tint/sem/atomic.cc
index 6376a7baac4..52951f3c818 100644
--- a/chromium/third_party/dawn/src/tint/sem/atomic_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/atomic.cc
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/utils/hash.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::Atomic);
@@ -23,36 +23,36 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Atomic);
namespace tint::sem {
Atomic::Atomic(const sem::Type* subtype) : subtype_(subtype) {
- TINT_ASSERT(AST, !subtype->Is<Reference>());
+ TINT_ASSERT(AST, !subtype->Is<Reference>());
}
size_t Atomic::Hash() const {
- return utils::Hash(TypeInfo::Of<Atomic>().full_hashcode, subtype_);
+ return utils::Hash(TypeInfo::Of<Atomic>().full_hashcode, subtype_);
}
bool Atomic::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Atomic>()) {
- return o->subtype_ == subtype_;
- }
- return false;
+ if (auto* o = other.As<Atomic>()) {
+ return o->subtype_ == subtype_;
+ }
+ return false;
}
std::string Atomic::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "atomic<" << subtype_->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "atomic<" << subtype_->FriendlyName(symbols) << ">";
+ return out.str();
}
uint32_t Atomic::Size() const {
- return subtype_->Size();
+ return subtype_->Size();
}
uint32_t Atomic::Align() const {
- return subtype_->Align();
+ return subtype_->Align();
}
bool Atomic::IsConstructible() const {
- return false;
+ return false;
}
Atomic::Atomic(Atomic&&) = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/atomic.h b/chromium/third_party/dawn/src/tint/sem/atomic.h
new file mode 100644
index 00000000000..7f6c814f693
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/atomic.h
@@ -0,0 +1,66 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_ATOMIC_H_
+#define SRC_TINT_SEM_ATOMIC_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A atomic type.
+class Atomic final : public Castable<Atomic, Type> {
+ public:
+ /// Constructor
+ /// @param subtype the atomic type
+ explicit Atomic(const sem::Type* subtype);
+
+ /// Move constructor
+ Atomic(Atomic&&);
+ ~Atomic() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the atomic type
+ const sem::Type* Type() const { return subtype_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns the size in bytes of the type.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ uint32_t Align() const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ private:
+ sem::Type const* const subtype_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_ATOMIC_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/atomic_type_test.cc b/chromium/third_party/dawn/src/tint/sem/atomic_test.cc
index 15aa484ce64..fbf9bc3b552 100644
--- a/chromium/third_party/dawn/src/tint/sem/atomic_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/atomic_test.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/test_helper.h"
@@ -22,34 +22,34 @@ namespace {
using AtomicTest = TestHelper;
TEST_F(AtomicTest, Creation) {
- auto* a = create<Atomic>(create<I32>());
- auto* b = create<Atomic>(create<I32>());
- auto* c = create<Atomic>(create<U32>());
- EXPECT_TRUE(a->Type()->Is<sem::I32>());
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
+ auto* a = create<Atomic>(create<I32>());
+ auto* b = create<Atomic>(create<I32>());
+ auto* c = create<Atomic>(create<U32>());
+ EXPECT_TRUE(a->Type()->Is<sem::I32>());
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
}
TEST_F(AtomicTest, Hash) {
- auto* a = create<Atomic>(create<I32>());
- auto* b = create<Atomic>(create<I32>());
- auto* c = create<Atomic>(create<U32>());
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
+ auto* a = create<Atomic>(create<I32>());
+ auto* b = create<Atomic>(create<I32>());
+ auto* c = create<Atomic>(create<U32>());
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
}
TEST_F(AtomicTest, Equals) {
- auto* a = create<Atomic>(create<I32>());
- auto* b = create<Atomic>(create<I32>());
- auto* c = create<Atomic>(create<U32>());
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<Atomic>(create<I32>());
+ auto* b = create<Atomic>(create<I32>());
+ auto* c = create<Atomic>(create<U32>());
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(AtomicTest, FriendlyName) {
- auto* a = create<Atomic>(create<I32>());
- EXPECT_EQ(a->FriendlyName(Symbols()), "atomic<i32>");
+ auto* a = create<Atomic>(create<I32>());
+ EXPECT_EQ(a->FriendlyName(Symbols()), "atomic<i32>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/atomic_type.h b/chromium/third_party/dawn/src/tint/sem/atomic_type.h
deleted file mode 100644
index e0dcb15396e..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/atomic_type.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_ATOMIC_TYPE_H_
-#define SRC_TINT_SEM_ATOMIC_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A atomic type.
-class Atomic final : public Castable<Atomic, Type> {
- public:
- /// Constructor
- /// @param subtype the atomic type
- explicit Atomic(const sem::Type* subtype);
-
- /// Move constructor
- Atomic(Atomic&&);
- ~Atomic() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the atomic type
- const sem::Type* Type() const { return subtype_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns the size in bytes of the type.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type.
- uint32_t Align() const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-typesd
- bool IsConstructible() const override;
-
- private:
- sem::Type const* const subtype_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_ATOMIC_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/behavior.cc b/chromium/third_party/dawn/src/tint/sem/behavior.cc
index 628aa60ac01..617794fc1ad 100644
--- a/chromium/third_party/dawn/src/tint/sem/behavior.cc
+++ b/chromium/third_party/dawn/src/tint/sem/behavior.cc
@@ -17,21 +17,21 @@
namespace tint::sem {
std::ostream& operator<<(std::ostream& out, Behavior behavior) {
- switch (behavior) {
- case Behavior::kReturn:
- return out << "Return";
- case Behavior::kDiscard:
- return out << "Discard";
- case Behavior::kBreak:
- return out << "Break";
- case Behavior::kContinue:
- return out << "Continue";
- case Behavior::kFallthrough:
- return out << "Fallthrough";
- case Behavior::kNext:
- return out << "Next";
- }
- return out << "<unknown>";
+ switch (behavior) {
+ case Behavior::kReturn:
+ return out << "Return";
+ case Behavior::kDiscard:
+ return out << "Discard";
+ case Behavior::kBreak:
+ return out << "Break";
+ case Behavior::kContinue:
+ return out << "Continue";
+ case Behavior::kFallthrough:
+ return out << "Fallthrough";
+ case Behavior::kNext:
+ return out << "Next";
+ }
+ return out << "<unknown>";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/behavior.h b/chromium/third_party/dawn/src/tint/sem/behavior.h
index e8d158d75d7..4acb8d5a22a 100644
--- a/chromium/third_party/dawn/src/tint/sem/behavior.h
+++ b/chromium/third_party/dawn/src/tint/sem/behavior.h
@@ -22,12 +22,12 @@ namespace tint::sem {
/// Behavior enumerates the possible behaviors of an expression or statement.
/// @see https://www.w3.org/TR/WGSL/#behaviors
enum class Behavior {
- kReturn,
- kDiscard,
- kBreak,
- kContinue,
- kFallthrough,
- kNext,
+ kReturn,
+ kDiscard,
+ kBreak,
+ kContinue,
+ kFallthrough,
+ kNext,
};
/// Behaviors is a set of Behavior
diff --git a/chromium/third_party/dawn/src/tint/sem/binding_point.h b/chromium/third_party/dawn/src/tint/sem/binding_point.h
index 8e8c6c7382f..993fb5edada 100644
--- a/chromium/third_party/dawn/src/tint/sem/binding_point.h
+++ b/chromium/third_party/dawn/src/tint/sem/binding_point.h
@@ -25,24 +25,22 @@ namespace tint::sem {
/// BindingPoint holds a group and binding index.
struct BindingPoint {
- /// The `@group` part of the binding point
- uint32_t group = 0;
- /// The `@binding` part of the binding point
- uint32_t binding = 0;
+ /// The `@group` part of the binding point
+ uint32_t group = 0;
+ /// The `@binding` part of the binding point
+ uint32_t binding = 0;
- /// Equality operator
- /// @param rhs the BindingPoint to compare against
- /// @returns true if this BindingPoint is equal to `rhs`
- inline bool operator==(const BindingPoint& rhs) const {
- return group == rhs.group && binding == rhs.binding;
- }
+ /// Equality operator
+ /// @param rhs the BindingPoint to compare against
+ /// @returns true if this BindingPoint is equal to `rhs`
+ inline bool operator==(const BindingPoint& rhs) const {
+ return group == rhs.group && binding == rhs.binding;
+ }
- /// Inequality operator
- /// @param rhs the BindingPoint to compare against
- /// @returns true if this BindingPoint is not equal to `rhs`
- inline bool operator!=(const BindingPoint& rhs) const {
- return !(*this == rhs);
- }
+ /// Inequality operator
+ /// @param rhs the BindingPoint to compare against
+ /// @returns true if this BindingPoint is not equal to `rhs`
+ inline bool operator!=(const BindingPoint& rhs) const { return !(*this == rhs); }
};
} // namespace tint::sem
@@ -54,13 +52,12 @@ namespace std {
/// std::unordered_set.
template <>
class hash<tint::sem::BindingPoint> {
- public:
- /// @param binding_point the binding point to create a hash for
- /// @return the hash value
- inline std::size_t operator()(
- const tint::sem::BindingPoint& binding_point) const {
- return tint::utils::Hash(binding_point.group, binding_point.binding);
- }
+ public:
+ /// @param binding_point the binding point to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const tint::sem::BindingPoint& binding_point) const {
+ return tint::utils::Hash(binding_point.group, binding_point.binding);
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/block_statement.cc b/chromium/third_party/dawn/src/tint/sem/block_statement.cc
index 707448301c9..51bad0f0586 100644
--- a/chromium/third_party/dawn/src/tint/sem/block_statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/block_statement.cc
@@ -32,16 +32,16 @@ BlockStatement::BlockStatement(const ast::BlockStatement* declaration,
BlockStatement::~BlockStatement() = default;
const ast::BlockStatement* BlockStatement::Declaration() const {
- return Base::Declaration()->As<ast::BlockStatement>();
+ return Base::Declaration()->As<ast::BlockStatement>();
}
void BlockStatement::AddDecl(const ast::Variable* var) {
- decls_.push_back(var);
+ decls_.push_back(var);
}
FunctionBlockStatement::FunctionBlockStatement(const sem::Function* function)
: Base(function->Declaration()->body, nullptr, function) {
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, function);
}
FunctionBlockStatement::~FunctionBlockStatement() = default;
@@ -50,16 +50,15 @@ LoopBlockStatement::LoopBlockStatement(const ast::BlockStatement* declaration,
const CompoundStatement* parent,
const sem::Function* function)
: Base(declaration, parent, function) {
- TINT_ASSERT(Semantic, parent);
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, parent);
+ TINT_ASSERT(Semantic, function);
}
LoopBlockStatement::~LoopBlockStatement() = default;
-void LoopBlockStatement::SetFirstContinue(
- const ast::ContinueStatement* first_continue,
- size_t num_decls) {
- first_continue_ = first_continue;
- num_decls_at_first_continue_ = num_decls;
+void LoopBlockStatement::SetFirstContinue(const ast::ContinueStatement* first_continue,
+ size_t num_decls) {
+ first_continue_ = first_continue;
+ num_decls_at_first_continue_ = num_decls;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/block_statement.h b/chromium/third_party/dawn/src/tint/sem/block_statement.h
index 6b71baf3850..4f12122dbdb 100644
--- a/chromium/third_party/dawn/src/tint/sem/block_statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/block_statement.h
@@ -24,7 +24,6 @@
namespace tint::ast {
class BlockStatement;
class ContinueStatement;
-class Function;
class Variable;
} // namespace tint::ast
@@ -33,85 +32,78 @@ namespace tint::sem {
/// Holds semantic information about a block, such as parent block and variables
/// declared in the block.
class BlockStatement : public Castable<BlockStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this block statement
- /// @param parent the owning statement
- /// @param function the owning function
- BlockStatement(const ast::BlockStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~BlockStatement() override;
-
- /// @returns the AST block statement associated with this semantic block
- /// statement
- const ast::BlockStatement* Declaration() const;
-
- /// @returns the declarations associated with this block
- const std::vector<const ast::Variable*>& Decls() const { return decls_; }
-
- /// Associates a declaration with this block.
- /// @param var a variable declaration to be added to the block
- void AddDecl(const ast::Variable* var);
-
- private:
- std::vector<const ast::Variable*> decls_;
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this block statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ BlockStatement(const ast::BlockStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~BlockStatement() override;
+
+ /// @returns the AST block statement associated with this semantic block
+ /// statement
+ const ast::BlockStatement* Declaration() const;
+
+ /// @returns the declarations associated with this block
+ const std::vector<const ast::Variable*>& Decls() const { return decls_; }
+
+ /// Associates a declaration with this block.
+ /// @param var a variable declaration to be added to the block
+ void AddDecl(const ast::Variable* var);
+
+ private:
+ std::vector<const ast::Variable*> decls_;
};
/// The root block statement for a function
-class FunctionBlockStatement final
- : public Castable<FunctionBlockStatement, BlockStatement> {
- public:
- /// Constructor
- /// @param function the owning function
- explicit FunctionBlockStatement(const sem::Function* function);
-
- /// Destructor
- ~FunctionBlockStatement() override;
+class FunctionBlockStatement final : public Castable<FunctionBlockStatement, BlockStatement> {
+ public:
+ /// Constructor
+ /// @param function the owning function
+ explicit FunctionBlockStatement(const sem::Function* function);
+
+ /// Destructor
+ ~FunctionBlockStatement() override;
};
/// Holds semantic information about a loop body block or for-loop body block
-class LoopBlockStatement final
- : public Castable<LoopBlockStatement, BlockStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this block statement
- /// @param parent the owning statement
- /// @param function the owning function
- LoopBlockStatement(const ast::BlockStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~LoopBlockStatement() override;
-
- /// @returns the first continue statement in this loop block, or nullptr if
- /// there are no continue statements in the block
- const ast::ContinueStatement* FirstContinue() const {
- return first_continue_;
- }
-
- /// @returns the number of variables declared before the first continue
- /// statement
- size_t NumDeclsAtFirstContinue() const {
- return num_decls_at_first_continue_;
- }
-
- /// Allows the resolver to record the first continue statement in the block
- /// and the number of variables declared prior to that statement.
- /// @param first_continue the first continue statement in the block
- /// @param num_decls the number of variable declarations before that continue
- void SetFirstContinue(const ast::ContinueStatement* first_continue,
- size_t num_decls);
-
- private:
- /// The first continue statement in this loop block.
- const ast::ContinueStatement* first_continue_ = nullptr;
-
- /// The number of variables declared before the first continue statement.
- size_t num_decls_at_first_continue_ = 0;
+class LoopBlockStatement final : public Castable<LoopBlockStatement, BlockStatement> {
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this block statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ LoopBlockStatement(const ast::BlockStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~LoopBlockStatement() override;
+
+ /// @returns the first continue statement in this loop block, or nullptr if
+ /// there are no continue statements in the block
+ const ast::ContinueStatement* FirstContinue() const { return first_continue_; }
+
+ /// @returns the number of variables declared before the first continue
+ /// statement
+ size_t NumDeclsAtFirstContinue() const { return num_decls_at_first_continue_; }
+
+ /// Allows the resolver to record the first continue statement in the block
+ /// and the number of variables declared prior to that statement.
+ /// @param first_continue the first continue statement in the block
+ /// @param num_decls the number of variable declarations before that continue
+ void SetFirstContinue(const ast::ContinueStatement* first_continue, size_t num_decls);
+
+ private:
+ /// The first continue statement in this loop block.
+ const ast::ContinueStatement* first_continue_ = nullptr;
+
+ /// The number of variables declared before the first continue statement.
+ size_t num_decls_at_first_continue_ = 0;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/bool_type.cc b/chromium/third_party/dawn/src/tint/sem/bool.cc
index 7a20768d43a..938a93550a9 100644
--- a/chromium/third_party/dawn/src/tint/sem/bool_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/bool.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/bool_type.h"
+#include "src/tint/sem/bool.h"
#include "src/tint/program_builder.h"
@@ -27,27 +27,27 @@ Bool::Bool(Bool&&) = default;
Bool::~Bool() = default;
size_t Bool::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<Bool>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<Bool>().full_hashcode);
}
bool Bool::Equals(const Type& other) const {
- return other.Is<Bool>();
+ return other.Is<Bool>();
}
std::string Bool::FriendlyName(const SymbolTable&) const {
- return "bool";
+ return "bool";
}
bool Bool::IsConstructible() const {
- return true;
+ return true;
}
uint32_t Bool::Size() const {
- return 4;
+ return 4;
}
uint32_t Bool::Align() const {
- return 4;
+ return 4;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/bool.h b/chromium/third_party/dawn/src/tint/sem/bool.h
new file mode 100644
index 00000000000..aae48d8dd7d
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/bool.h
@@ -0,0 +1,68 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_BOOL_H_
+#define SRC_TINT_SEM_BOOL_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+// X11 likes to #define Bool leading to confusing error messages.
+// If its defined, undefine it.
+#ifdef Bool
+#undef Bool
+#endif
+
+namespace tint::sem {
+
+/// A boolean type
+class Bool final : public Castable<Bool, Type> {
+ public:
+ /// Constructor
+ Bool();
+ /// Move constructor
+ Bool(Bool&&);
+ ~Bool() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type.
+ /// @note: booleans are not host-sharable, but still may exist in workgroup
+ /// storage.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ /// @note: booleans are not host-sharable, but still may exist in workgroup
+ /// storage.
+ uint32_t Align() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_BOOL_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/bool_type_test.cc b/chromium/third_party/dawn/src/tint/sem/bool_test.cc
index 390de43f1a4..bbd7f74901f 100644
--- a/chromium/third_party/dawn/src/tint/sem/bool_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/bool_test.cc
@@ -13,7 +13,7 @@
// limitations under the License.
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -21,27 +21,27 @@ namespace {
using BoolTest = TestHelper;
TEST_F(BoolTest, Creation) {
- auto* a = create<Bool>();
- auto* b = create<Bool>();
- EXPECT_EQ(a, b);
+ auto* a = create<Bool>();
+ auto* b = create<Bool>();
+ EXPECT_EQ(a, b);
}
TEST_F(BoolTest, Hash) {
- auto* a = create<Bool>();
- auto* b = create<Bool>();
- EXPECT_EQ(a->Hash(), b->Hash());
+ auto* a = create<Bool>();
+ auto* b = create<Bool>();
+ EXPECT_EQ(a->Hash(), b->Hash());
}
TEST_F(BoolTest, Equals) {
- auto* a = create<Bool>();
- auto* b = create<Bool>();
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<Bool>();
+ auto* b = create<Bool>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(BoolTest, FriendlyName) {
- Bool b;
- EXPECT_EQ(b.FriendlyName(Symbols()), "bool");
+ Bool b;
+ EXPECT_EQ(b.FriendlyName(Symbols()), "bool");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/bool_type.h b/chromium/third_party/dawn/src/tint/sem/bool_type.h
deleted file mode 100644
index 9e949bdfe43..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/bool_type.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_BOOL_TYPE_H_
-#define SRC_TINT_SEM_BOOL_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-// X11 likes to #define Bool leading to confusing error messages.
-// If its defined, undefine it.
-#ifdef Bool
-#undef Bool
-#endif
-
-namespace tint::sem {
-
-/// A boolean type
-class Bool final : public Castable<Bool, Type> {
- public:
- /// Constructor
- Bool();
- /// Move constructor
- Bool(Bool&&);
- ~Bool() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the size in bytes of the type.
- /// @note: booleans are not host-sharable, but still may exist in workgroup
- /// storage.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type.
- /// @note: booleans are not host-sharable, but still may exist in workgroup
- /// storage.
- uint32_t Align() const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_BOOL_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin.cc b/chromium/third_party/dawn/src/tint/sem/builtin.cc
index a822be5fcd4..bb2878be3a5 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin.cc
+++ b/chromium/third_party/dawn/src/tint/sem/builtin.cc
@@ -26,76 +26,65 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Builtin);
namespace tint::sem {
const char* Builtin::str() const {
- return sem::str(type_);
+ return sem::str(type_);
}
bool IsCoarseDerivativeBuiltin(BuiltinType i) {
- return i == BuiltinType::kDpdxCoarse || i == BuiltinType::kDpdyCoarse ||
- i == BuiltinType::kFwidthCoarse;
+ return i == BuiltinType::kDpdxCoarse || i == BuiltinType::kDpdyCoarse ||
+ i == BuiltinType::kFwidthCoarse;
}
bool IsFineDerivativeBuiltin(BuiltinType i) {
- return i == BuiltinType::kDpdxFine || i == BuiltinType::kDpdyFine ||
- i == BuiltinType::kFwidthFine;
+ return i == BuiltinType::kDpdxFine || i == BuiltinType::kDpdyFine ||
+ i == BuiltinType::kFwidthFine;
}
bool IsDerivativeBuiltin(BuiltinType i) {
- return i == BuiltinType::kDpdx || i == BuiltinType::kDpdy ||
- i == BuiltinType::kFwidth || IsCoarseDerivativeBuiltin(i) ||
- IsFineDerivativeBuiltin(i);
+ return i == BuiltinType::kDpdx || i == BuiltinType::kDpdy || i == BuiltinType::kFwidth ||
+ IsCoarseDerivativeBuiltin(i) || IsFineDerivativeBuiltin(i);
}
bool IsTextureBuiltin(BuiltinType i) {
- return IsImageQueryBuiltin(i) || i == BuiltinType::kTextureLoad ||
- i == BuiltinType::kTextureGather ||
- i == BuiltinType::kTextureGatherCompare ||
- i == BuiltinType::kTextureSample ||
- i == BuiltinType::kTextureSampleLevel ||
- i == BuiltinType::kTextureSampleBias ||
- i == BuiltinType::kTextureSampleCompare ||
- i == BuiltinType::kTextureSampleCompareLevel ||
- i == BuiltinType::kTextureSampleGrad ||
- i == BuiltinType::kTextureStore;
+ return IsImageQueryBuiltin(i) || i == BuiltinType::kTextureLoad ||
+ i == BuiltinType::kTextureGather || i == BuiltinType::kTextureGatherCompare ||
+ i == BuiltinType::kTextureSample || i == BuiltinType::kTextureSampleLevel ||
+ i == BuiltinType::kTextureSampleBias || i == BuiltinType::kTextureSampleCompare ||
+ i == BuiltinType::kTextureSampleCompareLevel || i == BuiltinType::kTextureSampleGrad ||
+ i == BuiltinType::kTextureStore;
}
bool IsImageQueryBuiltin(BuiltinType i) {
- return i == BuiltinType::kTextureDimensions ||
- i == BuiltinType::kTextureNumLayers ||
- i == BuiltinType::kTextureNumLevels ||
- i == BuiltinType::kTextureNumSamples;
+ return i == BuiltinType::kTextureDimensions || i == BuiltinType::kTextureNumLayers ||
+ i == BuiltinType::kTextureNumLevels || i == BuiltinType::kTextureNumSamples;
}
bool IsDataPackingBuiltin(BuiltinType i) {
- return i == BuiltinType::kPack4x8snorm || i == BuiltinType::kPack4x8unorm ||
- i == BuiltinType::kPack2x16snorm || i == BuiltinType::kPack2x16unorm ||
- i == BuiltinType::kPack2x16float;
+ return i == BuiltinType::kPack4x8snorm || i == BuiltinType::kPack4x8unorm ||
+ i == BuiltinType::kPack2x16snorm || i == BuiltinType::kPack2x16unorm ||
+ i == BuiltinType::kPack2x16float;
}
bool IsDataUnpackingBuiltin(BuiltinType i) {
- return i == BuiltinType::kUnpack4x8snorm ||
- i == BuiltinType::kUnpack4x8unorm ||
- i == BuiltinType::kUnpack2x16snorm ||
- i == BuiltinType::kUnpack2x16unorm ||
- i == BuiltinType::kUnpack2x16float;
+ return i == BuiltinType::kUnpack4x8snorm || i == BuiltinType::kUnpack4x8unorm ||
+ i == BuiltinType::kUnpack2x16snorm || i == BuiltinType::kUnpack2x16unorm ||
+ i == BuiltinType::kUnpack2x16float;
}
bool IsBarrierBuiltin(BuiltinType i) {
- return i == BuiltinType::kWorkgroupBarrier ||
- i == BuiltinType::kStorageBarrier;
+ return i == BuiltinType::kWorkgroupBarrier || i == BuiltinType::kStorageBarrier;
}
bool IsAtomicBuiltin(BuiltinType i) {
- return i == sem::BuiltinType::kAtomicLoad ||
- i == sem::BuiltinType::kAtomicStore ||
- i == sem::BuiltinType::kAtomicAdd ||
- i == sem::BuiltinType::kAtomicSub ||
- i == sem::BuiltinType::kAtomicMax ||
- i == sem::BuiltinType::kAtomicMin ||
- i == sem::BuiltinType::kAtomicAnd ||
- i == sem::BuiltinType::kAtomicOr ||
- i == sem::BuiltinType::kAtomicXor ||
- i == sem::BuiltinType::kAtomicExchange ||
- i == sem::BuiltinType::kAtomicCompareExchangeWeak;
+ return i == sem::BuiltinType::kAtomicLoad || i == sem::BuiltinType::kAtomicStore ||
+ i == sem::BuiltinType::kAtomicAdd || i == sem::BuiltinType::kAtomicSub ||
+ i == sem::BuiltinType::kAtomicMax || i == sem::BuiltinType::kAtomicMin ||
+ i == sem::BuiltinType::kAtomicAnd || i == sem::BuiltinType::kAtomicOr ||
+ i == sem::BuiltinType::kAtomicXor || i == sem::BuiltinType::kAtomicExchange ||
+ i == sem::BuiltinType::kAtomicCompareExchangeWeak;
+}
+
+bool IsDP4aBuiltin(BuiltinType i) {
+ return i == sem::BuiltinType::kDot4I8Packed || i == sem::BuiltinType::kDot4U8Packed;
}
Builtin::Builtin(BuiltinType type,
@@ -107,57 +96,68 @@ Builtin::Builtin(BuiltinType type,
type_(type),
supported_stages_(supported_stages),
is_deprecated_(is_deprecated) {
- for (auto* parameter : parameters) {
- parameter->SetOwner(this);
- }
+ for (auto* parameter : parameters) {
+ parameter->SetOwner(this);
+ }
}
Builtin::~Builtin() = default;
bool Builtin::IsCoarseDerivative() const {
- return IsCoarseDerivativeBuiltin(type_);
+ return IsCoarseDerivativeBuiltin(type_);
}
bool Builtin::IsFineDerivative() const {
- return IsFineDerivativeBuiltin(type_);
+ return IsFineDerivativeBuiltin(type_);
}
bool Builtin::IsDerivative() const {
- return IsDerivativeBuiltin(type_);
+ return IsDerivativeBuiltin(type_);
}
bool Builtin::IsTexture() const {
- return IsTextureBuiltin(type_);
+ return IsTextureBuiltin(type_);
}
bool Builtin::IsImageQuery() const {
- return IsImageQueryBuiltin(type_);
+ return IsImageQueryBuiltin(type_);
}
bool Builtin::IsDataPacking() const {
- return IsDataPackingBuiltin(type_);
+ return IsDataPackingBuiltin(type_);
}
bool Builtin::IsDataUnpacking() const {
- return IsDataUnpackingBuiltin(type_);
+ return IsDataUnpackingBuiltin(type_);
}
bool Builtin::IsBarrier() const {
- return IsBarrierBuiltin(type_);
+ return IsBarrierBuiltin(type_);
}
bool Builtin::IsAtomic() const {
- return IsAtomicBuiltin(type_);
+ return IsAtomicBuiltin(type_);
+}
+
+bool Builtin::IsDP4a() const {
+ return IsDP4aBuiltin(type_);
}
bool Builtin::HasSideEffects() const {
- if (IsAtomic() && type_ != sem::BuiltinType::kAtomicLoad) {
- return true;
- }
- if (type_ == sem::BuiltinType::kTextureStore) {
- return true;
- }
- return false;
+ if (IsAtomic() && type_ != sem::BuiltinType::kAtomicLoad) {
+ return true;
+ }
+ if (type_ == sem::BuiltinType::kTextureStore) {
+ return true;
+ }
+ return false;
+}
+
+ast::Extension Builtin::RequiredExtension() const {
+ if (IsDP4a()) {
+ return ast::Extension::kChromiumExperimentalDP4a;
+ }
+ return ast::Extension::kNone;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin.h b/chromium/third_party/dawn/src/tint/sem/builtin.h
index 61589eeed82..1dc61ad9d58 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin.h
+++ b/chromium/third_party/dawn/src/tint/sem/builtin.h
@@ -18,6 +18,7 @@
#include <string>
#include <vector>
+#include "src/tint/ast/extension.h"
#include "src/tint/sem/builtin_type.h"
#include "src/tint/sem/call_target.h"
#include "src/tint/sem/pipeline_stage_set.h"
@@ -70,74 +71,87 @@ bool IsBarrierBuiltin(BuiltinType i);
/// @returns true if the given `i` is a atomic builtin
bool IsAtomicBuiltin(BuiltinType i);
+/// Determins if the given `i` is a DP4a builtin
+/// @param i the builtin
+/// @returns true if the given `i` is a DP4a builtin
+bool IsDP4aBuiltin(BuiltinType i);
+
/// Builtin holds the semantic information for a builtin function.
class Builtin final : public Castable<Builtin, CallTarget> {
- public:
- /// Constructor
- /// @param type the builtin type
- /// @param return_type the return type for the builtin call
- /// @param parameters the parameters for the builtin overload
- /// @param supported_stages the pipeline stages that this builtin can be
- /// used in
- /// @param is_deprecated true if the particular overload is considered
- /// deprecated
- Builtin(BuiltinType type,
- const sem::Type* return_type,
- std::vector<Parameter*> parameters,
- PipelineStageSet supported_stages,
- bool is_deprecated);
+ public:
+ /// Constructor
+ /// @param type the builtin type
+ /// @param return_type the return type for the builtin call
+ /// @param parameters the parameters for the builtin overload
+ /// @param supported_stages the pipeline stages that this builtin can be
+ /// used in
+ /// @param is_deprecated true if the particular overload is considered
+ /// deprecated
+ Builtin(BuiltinType type,
+ const sem::Type* return_type,
+ std::vector<Parameter*> parameters,
+ PipelineStageSet supported_stages,
+ bool is_deprecated);
+
+ /// Destructor
+ ~Builtin() override;
+
+ /// @return the type of the builtin
+ BuiltinType Type() const { return type_; }
- /// Destructor
- ~Builtin() override;
+ /// @return the pipeline stages that this builtin can be used in
+ PipelineStageSet SupportedStages() const { return supported_stages_; }
- /// @return the type of the builtin
- BuiltinType Type() const { return type_; }
+ /// @return true if the builtin overload is considered deprecated
+ bool IsDeprecated() const { return is_deprecated_; }
- /// @return the pipeline stages that this builtin can be used in
- PipelineStageSet SupportedStages() const { return supported_stages_; }
+ /// @returns the name of the builtin function type. The spelling, including
+ /// case, matches the name in the WGSL spec.
+ const char* str() const;
- /// @return true if the builtin overload is considered deprecated
- bool IsDeprecated() const { return is_deprecated_; }
+ /// @returns true if builtin is a coarse derivative builtin
+ bool IsCoarseDerivative() const;
- /// @returns the name of the builtin function type. The spelling, including
- /// case, matches the name in the WGSL spec.
- const char* str() const;
+ /// @returns true if builtin is a fine a derivative builtin
+ bool IsFineDerivative() const;
- /// @returns true if builtin is a coarse derivative builtin
- bool IsCoarseDerivative() const;
+ /// @returns true if builtin is a derivative builtin
+ bool IsDerivative() const;
- /// @returns true if builtin is a fine a derivative builtin
- bool IsFineDerivative() const;
+ /// @returns true if builtin is a texture operation builtin
+ bool IsTexture() const;
- /// @returns true if builtin is a derivative builtin
- bool IsDerivative() const;
+ /// @returns true if builtin is a image query builtin
+ bool IsImageQuery() const;
- /// @returns true if builtin is a texture operation builtin
- bool IsTexture() const;
+ /// @returns true if builtin is a data packing builtin
+ bool IsDataPacking() const;
- /// @returns true if builtin is a image query builtin
- bool IsImageQuery() const;
+ /// @returns true if builtin is a data unpacking builtin
+ bool IsDataUnpacking() const;
- /// @returns true if builtin is a data packing builtin
- bool IsDataPacking() const;
+ /// @returns true if builtin is a barrier builtin
+ bool IsBarrier() const;
- /// @returns true if builtin is a data unpacking builtin
- bool IsDataUnpacking() const;
+ /// @returns true if builtin is a atomic builtin
+ bool IsAtomic() const;
- /// @returns true if builtin is a barrier builtin
- bool IsBarrier() const;
+ /// @returns true if builtin is a DP4a builtin (defined in the extension
+ /// chromium_experimental_DP4a)
+ bool IsDP4a() const;
- /// @returns true if builtin is a atomic builtin
- bool IsAtomic() const;
+ /// @returns true if intrinsic may have side-effects (i.e. writes to at least
+ /// one of its inputs)
+ bool HasSideEffects() const;
- /// @returns true if intrinsic may have side-effects (i.e. writes to at least
- /// one of its inputs)
- bool HasSideEffects() const;
+ /// @returns the required extension of this builtin function. Returns
+ /// ast::Extension::kNone if no extension is required.
+ ast::Extension RequiredExtension() const;
- private:
- const BuiltinType type_;
- const PipelineStageSet supported_stages_;
- const bool is_deprecated_;
+ private:
+ const BuiltinType type_;
+ const PipelineStageSet supported_stages_;
+ const bool is_deprecated_;
};
/// Constant value used by the degrees() builtin
@@ -153,13 +167,13 @@ namespace std {
/// Custom std::hash specialization for tint::sem::Builtin
template <>
class hash<tint::sem::Builtin> {
- public:
- /// @param i the Builtin to create a hash for
- /// @return the hash value
- inline std::size_t operator()(const tint::sem::Builtin& i) const {
- return tint::utils::Hash(i.Type(), i.SupportedStages(), i.ReturnType(),
- i.Parameters(), i.IsDeprecated());
- }
+ public:
+ /// @param i the Builtin to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const tint::sem::Builtin& i) const {
+ return tint::utils::Hash(i.Type(), i.SupportedStages(), i.ReturnType(), i.Parameters(),
+ i.IsDeprecated());
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin_test.cc b/chromium/third_party/dawn/src/tint/sem/builtin_test.cc
index d931bcf5127..cf14565c07d 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/builtin_test.cc
@@ -20,106 +20,107 @@ namespace tint::sem {
namespace {
struct BuiltinData {
- const char* name;
- BuiltinType builtin;
+ const char* name;
+ BuiltinType builtin;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using BuiltinTypeTest = testing::TestWithParam<BuiltinData>;
TEST_P(BuiltinTypeTest, Parse) {
- auto param = GetParam();
- EXPECT_EQ(ParseBuiltinType(param.name), param.builtin);
+ auto param = GetParam();
+ EXPECT_EQ(ParseBuiltinType(param.name), param.builtin);
}
INSTANTIATE_TEST_SUITE_P(
BuiltinTypeTest,
BuiltinTypeTest,
- testing::Values(
- BuiltinData{"abs", BuiltinType::kAbs},
- BuiltinData{"acos", BuiltinType::kAcos},
- BuiltinData{"all", BuiltinType::kAll},
- BuiltinData{"any", BuiltinType::kAny},
- BuiltinData{"arrayLength", BuiltinType::kArrayLength},
- BuiltinData{"asin", BuiltinType::kAsin},
- BuiltinData{"atan", BuiltinType::kAtan},
- BuiltinData{"atan2", BuiltinType::kAtan2},
- BuiltinData{"ceil", BuiltinType::kCeil},
- BuiltinData{"clamp", BuiltinType::kClamp},
- BuiltinData{"cos", BuiltinType::kCos},
- BuiltinData{"cosh", BuiltinType::kCosh},
- BuiltinData{"countOneBits", BuiltinType::kCountOneBits},
- BuiltinData{"cross", BuiltinType::kCross},
- BuiltinData{"determinant", BuiltinType::kDeterminant},
- BuiltinData{"distance", BuiltinType::kDistance},
- BuiltinData{"dot", BuiltinType::kDot},
- BuiltinData{"dpdx", BuiltinType::kDpdx},
- BuiltinData{"dpdxCoarse", BuiltinType::kDpdxCoarse},
- BuiltinData{"dpdxFine", BuiltinType::kDpdxFine},
- BuiltinData{"dpdy", BuiltinType::kDpdy},
- BuiltinData{"dpdyCoarse", BuiltinType::kDpdyCoarse},
- BuiltinData{"dpdyFine", BuiltinType::kDpdyFine},
- BuiltinData{"exp", BuiltinType::kExp},
- BuiltinData{"exp2", BuiltinType::kExp2},
- BuiltinData{"faceForward", BuiltinType::kFaceForward},
- BuiltinData{"floor", BuiltinType::kFloor},
- BuiltinData{"fma", BuiltinType::kFma},
- BuiltinData{"fract", BuiltinType::kFract},
- BuiltinData{"frexp", BuiltinType::kFrexp},
- BuiltinData{"fwidth", BuiltinType::kFwidth},
- BuiltinData{"fwidthCoarse", BuiltinType::kFwidthCoarse},
- BuiltinData{"fwidthFine", BuiltinType::kFwidthFine},
- BuiltinData{"inverseSqrt", BuiltinType::kInverseSqrt},
- BuiltinData{"ldexp", BuiltinType::kLdexp},
- BuiltinData{"length", BuiltinType::kLength},
- BuiltinData{"log", BuiltinType::kLog},
- BuiltinData{"log2", BuiltinType::kLog2},
- BuiltinData{"max", BuiltinType::kMax},
- BuiltinData{"min", BuiltinType::kMin},
- BuiltinData{"mix", BuiltinType::kMix},
- BuiltinData{"modf", BuiltinType::kModf},
- BuiltinData{"normalize", BuiltinType::kNormalize},
- BuiltinData{"pow", BuiltinType::kPow},
- BuiltinData{"reflect", BuiltinType::kReflect},
- BuiltinData{"reverseBits", BuiltinType::kReverseBits},
- BuiltinData{"round", BuiltinType::kRound},
- BuiltinData{"select", BuiltinType::kSelect},
- BuiltinData{"sign", BuiltinType::kSign},
- BuiltinData{"sin", BuiltinType::kSin},
- BuiltinData{"sinh", BuiltinType::kSinh},
- BuiltinData{"smoothstep", BuiltinType::kSmoothstep},
- BuiltinData{"smoothStep", BuiltinType::kSmoothStep},
- BuiltinData{"sqrt", BuiltinType::kSqrt},
- BuiltinData{"step", BuiltinType::kStep},
- BuiltinData{"storageBarrier", BuiltinType::kStorageBarrier},
- BuiltinData{"tan", BuiltinType::kTan},
- BuiltinData{"tanh", BuiltinType::kTanh},
- BuiltinData{"textureDimensions", BuiltinType::kTextureDimensions},
- BuiltinData{"textureLoad", BuiltinType::kTextureLoad},
- BuiltinData{"textureNumLayers", BuiltinType::kTextureNumLayers},
- BuiltinData{"textureNumLevels", BuiltinType::kTextureNumLevels},
- BuiltinData{"textureNumSamples", BuiltinType::kTextureNumSamples},
- BuiltinData{"textureSample", BuiltinType::kTextureSample},
- BuiltinData{"textureSampleBias", BuiltinType::kTextureSampleBias},
- BuiltinData{"textureSampleCompare", BuiltinType::kTextureSampleCompare},
- BuiltinData{"textureSampleCompareLevel",
- BuiltinType::kTextureSampleCompareLevel},
- BuiltinData{"textureSampleGrad", BuiltinType::kTextureSampleGrad},
- BuiltinData{"textureSampleLevel", BuiltinType::kTextureSampleLevel},
- BuiltinData{"trunc", BuiltinType::kTrunc},
- BuiltinData{"unpack2x16float", BuiltinType::kUnpack2x16float},
- BuiltinData{"unpack2x16snorm", BuiltinType::kUnpack2x16snorm},
- BuiltinData{"unpack2x16unorm", BuiltinType::kUnpack2x16unorm},
- BuiltinData{"unpack4x8snorm", BuiltinType::kUnpack4x8snorm},
- BuiltinData{"unpack4x8unorm", BuiltinType::kUnpack4x8unorm},
- BuiltinData{"workgroupBarrier", BuiltinType::kWorkgroupBarrier}));
+ testing::Values(BuiltinData{"abs", BuiltinType::kAbs},
+ BuiltinData{"acos", BuiltinType::kAcos},
+ BuiltinData{"all", BuiltinType::kAll},
+ BuiltinData{"any", BuiltinType::kAny},
+ BuiltinData{"arrayLength", BuiltinType::kArrayLength},
+ BuiltinData{"asin", BuiltinType::kAsin},
+ BuiltinData{"atan", BuiltinType::kAtan},
+ BuiltinData{"atan2", BuiltinType::kAtan2},
+ BuiltinData{"ceil", BuiltinType::kCeil},
+ BuiltinData{"clamp", BuiltinType::kClamp},
+ BuiltinData{"cos", BuiltinType::kCos},
+ BuiltinData{"cosh", BuiltinType::kCosh},
+ BuiltinData{"countOneBits", BuiltinType::kCountOneBits},
+ BuiltinData{"cross", BuiltinType::kCross},
+ BuiltinData{"determinant", BuiltinType::kDeterminant},
+ BuiltinData{"distance", BuiltinType::kDistance},
+ BuiltinData{"dot", BuiltinType::kDot},
+ BuiltinData{"dot4I8Packed", BuiltinType::kDot4I8Packed},
+ BuiltinData{"dot4U8Packed", BuiltinType::kDot4U8Packed},
+ BuiltinData{"dpdx", BuiltinType::kDpdx},
+ BuiltinData{"dpdxCoarse", BuiltinType::kDpdxCoarse},
+ BuiltinData{"dpdxFine", BuiltinType::kDpdxFine},
+ BuiltinData{"dpdy", BuiltinType::kDpdy},
+ BuiltinData{"dpdyCoarse", BuiltinType::kDpdyCoarse},
+ BuiltinData{"dpdyFine", BuiltinType::kDpdyFine},
+ BuiltinData{"exp", BuiltinType::kExp},
+ BuiltinData{"exp2", BuiltinType::kExp2},
+ BuiltinData{"faceForward", BuiltinType::kFaceForward},
+ BuiltinData{"floor", BuiltinType::kFloor},
+ BuiltinData{"fma", BuiltinType::kFma},
+ BuiltinData{"fract", BuiltinType::kFract},
+ BuiltinData{"frexp", BuiltinType::kFrexp},
+ BuiltinData{"fwidth", BuiltinType::kFwidth},
+ BuiltinData{"fwidthCoarse", BuiltinType::kFwidthCoarse},
+ BuiltinData{"fwidthFine", BuiltinType::kFwidthFine},
+ BuiltinData{"inverseSqrt", BuiltinType::kInverseSqrt},
+ BuiltinData{"ldexp", BuiltinType::kLdexp},
+ BuiltinData{"length", BuiltinType::kLength},
+ BuiltinData{"log", BuiltinType::kLog},
+ BuiltinData{"log2", BuiltinType::kLog2},
+ BuiltinData{"max", BuiltinType::kMax},
+ BuiltinData{"min", BuiltinType::kMin},
+ BuiltinData{"mix", BuiltinType::kMix},
+ BuiltinData{"modf", BuiltinType::kModf},
+ BuiltinData{"normalize", BuiltinType::kNormalize},
+ BuiltinData{"pow", BuiltinType::kPow},
+ BuiltinData{"reflect", BuiltinType::kReflect},
+ BuiltinData{"reverseBits", BuiltinType::kReverseBits},
+ BuiltinData{"round", BuiltinType::kRound},
+ BuiltinData{"select", BuiltinType::kSelect},
+ BuiltinData{"sign", BuiltinType::kSign},
+ BuiltinData{"sin", BuiltinType::kSin},
+ BuiltinData{"sinh", BuiltinType::kSinh},
+ BuiltinData{"smoothstep", BuiltinType::kSmoothstep},
+ BuiltinData{"smoothStep", BuiltinType::kSmoothStep},
+ BuiltinData{"sqrt", BuiltinType::kSqrt},
+ BuiltinData{"step", BuiltinType::kStep},
+ BuiltinData{"storageBarrier", BuiltinType::kStorageBarrier},
+ BuiltinData{"tan", BuiltinType::kTan},
+ BuiltinData{"tanh", BuiltinType::kTanh},
+ BuiltinData{"textureDimensions", BuiltinType::kTextureDimensions},
+ BuiltinData{"textureLoad", BuiltinType::kTextureLoad},
+ BuiltinData{"textureNumLayers", BuiltinType::kTextureNumLayers},
+ BuiltinData{"textureNumLevels", BuiltinType::kTextureNumLevels},
+ BuiltinData{"textureNumSamples", BuiltinType::kTextureNumSamples},
+ BuiltinData{"textureSample", BuiltinType::kTextureSample},
+ BuiltinData{"textureSampleBias", BuiltinType::kTextureSampleBias},
+ BuiltinData{"textureSampleCompare", BuiltinType::kTextureSampleCompare},
+ BuiltinData{"textureSampleCompareLevel",
+ BuiltinType::kTextureSampleCompareLevel},
+ BuiltinData{"textureSampleGrad", BuiltinType::kTextureSampleGrad},
+ BuiltinData{"textureSampleLevel", BuiltinType::kTextureSampleLevel},
+ BuiltinData{"trunc", BuiltinType::kTrunc},
+ BuiltinData{"unpack2x16float", BuiltinType::kUnpack2x16float},
+ BuiltinData{"unpack2x16snorm", BuiltinType::kUnpack2x16snorm},
+ BuiltinData{"unpack2x16unorm", BuiltinType::kUnpack2x16unorm},
+ BuiltinData{"unpack4x8snorm", BuiltinType::kUnpack4x8snorm},
+ BuiltinData{"unpack4x8unorm", BuiltinType::kUnpack4x8unorm},
+ BuiltinData{"workgroupBarrier", BuiltinType::kWorkgroupBarrier}));
TEST_F(BuiltinTypeTest, ParseNoMatch) {
- EXPECT_EQ(ParseBuiltinType("not_builtin"), BuiltinType::kNone);
+ EXPECT_EQ(ParseBuiltinType("not_builtin"), BuiltinType::kNone);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin_type.cc b/chromium/third_party/dawn/src/tint/sem/builtin_type.cc
index eb22a2f6482..870736953e3 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/builtin_type.cc
@@ -13,11 +13,11 @@
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
+// File generated by tools/intrinsic-gen
// using the template:
// src/tint/sem/builtin_type.cc.tmpl
-// and the builtin defintion file:
-// src/tint/builtins.def
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
//
// Do not modify this file directly
////////////////////////////////////////////////////////////////////////////////
@@ -29,545 +29,555 @@
namespace tint::sem {
BuiltinType ParseBuiltinType(const std::string& name) {
- if (name == "abs") {
- return BuiltinType::kAbs;
- }
- if (name == "acos") {
- return BuiltinType::kAcos;
- }
- if (name == "all") {
- return BuiltinType::kAll;
- }
- if (name == "any") {
- return BuiltinType::kAny;
- }
- if (name == "arrayLength") {
- return BuiltinType::kArrayLength;
- }
- if (name == "asin") {
- return BuiltinType::kAsin;
- }
- if (name == "atan") {
- return BuiltinType::kAtan;
- }
- if (name == "atan2") {
- return BuiltinType::kAtan2;
- }
- if (name == "ceil") {
- return BuiltinType::kCeil;
- }
- if (name == "clamp") {
- return BuiltinType::kClamp;
- }
- if (name == "cos") {
- return BuiltinType::kCos;
- }
- if (name == "cosh") {
- return BuiltinType::kCosh;
- }
- if (name == "countLeadingZeros") {
- return BuiltinType::kCountLeadingZeros;
- }
- if (name == "countOneBits") {
- return BuiltinType::kCountOneBits;
- }
- if (name == "countTrailingZeros") {
- return BuiltinType::kCountTrailingZeros;
- }
- if (name == "cross") {
- return BuiltinType::kCross;
- }
- if (name == "degrees") {
- return BuiltinType::kDegrees;
- }
- if (name == "determinant") {
- return BuiltinType::kDeterminant;
- }
- if (name == "distance") {
- return BuiltinType::kDistance;
- }
- if (name == "dot") {
- return BuiltinType::kDot;
- }
- if (name == "dpdx") {
- return BuiltinType::kDpdx;
- }
- if (name == "dpdxCoarse") {
- return BuiltinType::kDpdxCoarse;
- }
- if (name == "dpdxFine") {
- return BuiltinType::kDpdxFine;
- }
- if (name == "dpdy") {
- return BuiltinType::kDpdy;
- }
- if (name == "dpdyCoarse") {
- return BuiltinType::kDpdyCoarse;
- }
- if (name == "dpdyFine") {
- return BuiltinType::kDpdyFine;
- }
- if (name == "exp") {
- return BuiltinType::kExp;
- }
- if (name == "exp2") {
- return BuiltinType::kExp2;
- }
- if (name == "extractBits") {
- return BuiltinType::kExtractBits;
- }
- if (name == "faceForward") {
- return BuiltinType::kFaceForward;
- }
- if (name == "firstLeadingBit") {
- return BuiltinType::kFirstLeadingBit;
- }
- if (name == "firstTrailingBit") {
- return BuiltinType::kFirstTrailingBit;
- }
- if (name == "floor") {
- return BuiltinType::kFloor;
- }
- if (name == "fma") {
- return BuiltinType::kFma;
- }
- if (name == "fract") {
- return BuiltinType::kFract;
- }
- if (name == "frexp") {
- return BuiltinType::kFrexp;
- }
- if (name == "fwidth") {
- return BuiltinType::kFwidth;
- }
- if (name == "fwidthCoarse") {
- return BuiltinType::kFwidthCoarse;
- }
- if (name == "fwidthFine") {
- return BuiltinType::kFwidthFine;
- }
- if (name == "insertBits") {
- return BuiltinType::kInsertBits;
- }
- if (name == "inverseSqrt") {
- return BuiltinType::kInverseSqrt;
- }
- if (name == "ldexp") {
- return BuiltinType::kLdexp;
- }
- if (name == "length") {
- return BuiltinType::kLength;
- }
- if (name == "log") {
- return BuiltinType::kLog;
- }
- if (name == "log2") {
- return BuiltinType::kLog2;
- }
- if (name == "max") {
- return BuiltinType::kMax;
- }
- if (name == "min") {
- return BuiltinType::kMin;
- }
- if (name == "mix") {
- return BuiltinType::kMix;
- }
- if (name == "modf") {
- return BuiltinType::kModf;
- }
- if (name == "normalize") {
- return BuiltinType::kNormalize;
- }
- if (name == "pack2x16float") {
- return BuiltinType::kPack2x16float;
- }
- if (name == "pack2x16snorm") {
- return BuiltinType::kPack2x16snorm;
- }
- if (name == "pack2x16unorm") {
- return BuiltinType::kPack2x16unorm;
- }
- if (name == "pack4x8snorm") {
- return BuiltinType::kPack4x8snorm;
- }
- if (name == "pack4x8unorm") {
- return BuiltinType::kPack4x8unorm;
- }
- if (name == "pow") {
- return BuiltinType::kPow;
- }
- if (name == "radians") {
- return BuiltinType::kRadians;
- }
- if (name == "reflect") {
- return BuiltinType::kReflect;
- }
- if (name == "refract") {
- return BuiltinType::kRefract;
- }
- if (name == "reverseBits") {
- return BuiltinType::kReverseBits;
- }
- if (name == "round") {
- return BuiltinType::kRound;
- }
- if (name == "select") {
- return BuiltinType::kSelect;
- }
- if (name == "sign") {
- return BuiltinType::kSign;
- }
- if (name == "sin") {
- return BuiltinType::kSin;
- }
- if (name == "sinh") {
- return BuiltinType::kSinh;
- }
- if (name == "smoothstep") {
- return BuiltinType::kSmoothstep;
- }
- if (name == "smoothStep") {
- return BuiltinType::kSmoothStep;
- }
- if (name == "sqrt") {
- return BuiltinType::kSqrt;
- }
- if (name == "step") {
- return BuiltinType::kStep;
- }
- if (name == "storageBarrier") {
- return BuiltinType::kStorageBarrier;
- }
- if (name == "tan") {
- return BuiltinType::kTan;
- }
- if (name == "tanh") {
- return BuiltinType::kTanh;
- }
- if (name == "transpose") {
- return BuiltinType::kTranspose;
- }
- if (name == "trunc") {
- return BuiltinType::kTrunc;
- }
- if (name == "unpack2x16float") {
- return BuiltinType::kUnpack2x16float;
- }
- if (name == "unpack2x16snorm") {
- return BuiltinType::kUnpack2x16snorm;
- }
- if (name == "unpack2x16unorm") {
- return BuiltinType::kUnpack2x16unorm;
- }
- if (name == "unpack4x8snorm") {
- return BuiltinType::kUnpack4x8snorm;
- }
- if (name == "unpack4x8unorm") {
- return BuiltinType::kUnpack4x8unorm;
- }
- if (name == "workgroupBarrier") {
- return BuiltinType::kWorkgroupBarrier;
- }
- if (name == "textureDimensions") {
- return BuiltinType::kTextureDimensions;
- }
- if (name == "textureGather") {
- return BuiltinType::kTextureGather;
- }
- if (name == "textureGatherCompare") {
- return BuiltinType::kTextureGatherCompare;
- }
- if (name == "textureNumLayers") {
- return BuiltinType::kTextureNumLayers;
- }
- if (name == "textureNumLevels") {
- return BuiltinType::kTextureNumLevels;
- }
- if (name == "textureNumSamples") {
- return BuiltinType::kTextureNumSamples;
- }
- if (name == "textureSample") {
- return BuiltinType::kTextureSample;
- }
- if (name == "textureSampleBias") {
- return BuiltinType::kTextureSampleBias;
- }
- if (name == "textureSampleCompare") {
- return BuiltinType::kTextureSampleCompare;
- }
- if (name == "textureSampleCompareLevel") {
- return BuiltinType::kTextureSampleCompareLevel;
- }
- if (name == "textureSampleGrad") {
- return BuiltinType::kTextureSampleGrad;
- }
- if (name == "textureSampleLevel") {
- return BuiltinType::kTextureSampleLevel;
- }
- if (name == "textureStore") {
- return BuiltinType::kTextureStore;
- }
- if (name == "textureLoad") {
- return BuiltinType::kTextureLoad;
- }
- if (name == "atomicLoad") {
- return BuiltinType::kAtomicLoad;
- }
- if (name == "atomicStore") {
- return BuiltinType::kAtomicStore;
- }
- if (name == "atomicAdd") {
- return BuiltinType::kAtomicAdd;
- }
- if (name == "atomicSub") {
- return BuiltinType::kAtomicSub;
- }
- if (name == "atomicMax") {
- return BuiltinType::kAtomicMax;
- }
- if (name == "atomicMin") {
- return BuiltinType::kAtomicMin;
- }
- if (name == "atomicAnd") {
- return BuiltinType::kAtomicAnd;
- }
- if (name == "atomicOr") {
- return BuiltinType::kAtomicOr;
- }
- if (name == "atomicXor") {
- return BuiltinType::kAtomicXor;
- }
- if (name == "atomicExchange") {
- return BuiltinType::kAtomicExchange;
- }
- if (name == "atomicCompareExchangeWeak") {
- return BuiltinType::kAtomicCompareExchangeWeak;
- }
- return BuiltinType::kNone;
+ if (name == "abs") {
+ return BuiltinType::kAbs;
+ }
+ if (name == "acos") {
+ return BuiltinType::kAcos;
+ }
+ if (name == "all") {
+ return BuiltinType::kAll;
+ }
+ if (name == "any") {
+ return BuiltinType::kAny;
+ }
+ if (name == "arrayLength") {
+ return BuiltinType::kArrayLength;
+ }
+ if (name == "asin") {
+ return BuiltinType::kAsin;
+ }
+ if (name == "atan") {
+ return BuiltinType::kAtan;
+ }
+ if (name == "atan2") {
+ return BuiltinType::kAtan2;
+ }
+ if (name == "ceil") {
+ return BuiltinType::kCeil;
+ }
+ if (name == "clamp") {
+ return BuiltinType::kClamp;
+ }
+ if (name == "cos") {
+ return BuiltinType::kCos;
+ }
+ if (name == "cosh") {
+ return BuiltinType::kCosh;
+ }
+ if (name == "countLeadingZeros") {
+ return BuiltinType::kCountLeadingZeros;
+ }
+ if (name == "countOneBits") {
+ return BuiltinType::kCountOneBits;
+ }
+ if (name == "countTrailingZeros") {
+ return BuiltinType::kCountTrailingZeros;
+ }
+ if (name == "cross") {
+ return BuiltinType::kCross;
+ }
+ if (name == "degrees") {
+ return BuiltinType::kDegrees;
+ }
+ if (name == "determinant") {
+ return BuiltinType::kDeterminant;
+ }
+ if (name == "distance") {
+ return BuiltinType::kDistance;
+ }
+ if (name == "dot") {
+ return BuiltinType::kDot;
+ }
+ if (name == "dot4I8Packed") {
+ return BuiltinType::kDot4I8Packed;
+ }
+ if (name == "dot4U8Packed") {
+ return BuiltinType::kDot4U8Packed;
+ }
+ if (name == "dpdx") {
+ return BuiltinType::kDpdx;
+ }
+ if (name == "dpdxCoarse") {
+ return BuiltinType::kDpdxCoarse;
+ }
+ if (name == "dpdxFine") {
+ return BuiltinType::kDpdxFine;
+ }
+ if (name == "dpdy") {
+ return BuiltinType::kDpdy;
+ }
+ if (name == "dpdyCoarse") {
+ return BuiltinType::kDpdyCoarse;
+ }
+ if (name == "dpdyFine") {
+ return BuiltinType::kDpdyFine;
+ }
+ if (name == "exp") {
+ return BuiltinType::kExp;
+ }
+ if (name == "exp2") {
+ return BuiltinType::kExp2;
+ }
+ if (name == "extractBits") {
+ return BuiltinType::kExtractBits;
+ }
+ if (name == "faceForward") {
+ return BuiltinType::kFaceForward;
+ }
+ if (name == "firstLeadingBit") {
+ return BuiltinType::kFirstLeadingBit;
+ }
+ if (name == "firstTrailingBit") {
+ return BuiltinType::kFirstTrailingBit;
+ }
+ if (name == "floor") {
+ return BuiltinType::kFloor;
+ }
+ if (name == "fma") {
+ return BuiltinType::kFma;
+ }
+ if (name == "fract") {
+ return BuiltinType::kFract;
+ }
+ if (name == "frexp") {
+ return BuiltinType::kFrexp;
+ }
+ if (name == "fwidth") {
+ return BuiltinType::kFwidth;
+ }
+ if (name == "fwidthCoarse") {
+ return BuiltinType::kFwidthCoarse;
+ }
+ if (name == "fwidthFine") {
+ return BuiltinType::kFwidthFine;
+ }
+ if (name == "insertBits") {
+ return BuiltinType::kInsertBits;
+ }
+ if (name == "inverseSqrt") {
+ return BuiltinType::kInverseSqrt;
+ }
+ if (name == "ldexp") {
+ return BuiltinType::kLdexp;
+ }
+ if (name == "length") {
+ return BuiltinType::kLength;
+ }
+ if (name == "log") {
+ return BuiltinType::kLog;
+ }
+ if (name == "log2") {
+ return BuiltinType::kLog2;
+ }
+ if (name == "max") {
+ return BuiltinType::kMax;
+ }
+ if (name == "min") {
+ return BuiltinType::kMin;
+ }
+ if (name == "mix") {
+ return BuiltinType::kMix;
+ }
+ if (name == "modf") {
+ return BuiltinType::kModf;
+ }
+ if (name == "normalize") {
+ return BuiltinType::kNormalize;
+ }
+ if (name == "pack2x16float") {
+ return BuiltinType::kPack2x16float;
+ }
+ if (name == "pack2x16snorm") {
+ return BuiltinType::kPack2x16snorm;
+ }
+ if (name == "pack2x16unorm") {
+ return BuiltinType::kPack2x16unorm;
+ }
+ if (name == "pack4x8snorm") {
+ return BuiltinType::kPack4x8snorm;
+ }
+ if (name == "pack4x8unorm") {
+ return BuiltinType::kPack4x8unorm;
+ }
+ if (name == "pow") {
+ return BuiltinType::kPow;
+ }
+ if (name == "radians") {
+ return BuiltinType::kRadians;
+ }
+ if (name == "reflect") {
+ return BuiltinType::kReflect;
+ }
+ if (name == "refract") {
+ return BuiltinType::kRefract;
+ }
+ if (name == "reverseBits") {
+ return BuiltinType::kReverseBits;
+ }
+ if (name == "round") {
+ return BuiltinType::kRound;
+ }
+ if (name == "select") {
+ return BuiltinType::kSelect;
+ }
+ if (name == "sign") {
+ return BuiltinType::kSign;
+ }
+ if (name == "sin") {
+ return BuiltinType::kSin;
+ }
+ if (name == "sinh") {
+ return BuiltinType::kSinh;
+ }
+ if (name == "smoothstep") {
+ return BuiltinType::kSmoothstep;
+ }
+ if (name == "smoothStep") {
+ return BuiltinType::kSmoothStep;
+ }
+ if (name == "sqrt") {
+ return BuiltinType::kSqrt;
+ }
+ if (name == "step") {
+ return BuiltinType::kStep;
+ }
+ if (name == "storageBarrier") {
+ return BuiltinType::kStorageBarrier;
+ }
+ if (name == "tan") {
+ return BuiltinType::kTan;
+ }
+ if (name == "tanh") {
+ return BuiltinType::kTanh;
+ }
+ if (name == "transpose") {
+ return BuiltinType::kTranspose;
+ }
+ if (name == "trunc") {
+ return BuiltinType::kTrunc;
+ }
+ if (name == "unpack2x16float") {
+ return BuiltinType::kUnpack2x16float;
+ }
+ if (name == "unpack2x16snorm") {
+ return BuiltinType::kUnpack2x16snorm;
+ }
+ if (name == "unpack2x16unorm") {
+ return BuiltinType::kUnpack2x16unorm;
+ }
+ if (name == "unpack4x8snorm") {
+ return BuiltinType::kUnpack4x8snorm;
+ }
+ if (name == "unpack4x8unorm") {
+ return BuiltinType::kUnpack4x8unorm;
+ }
+ if (name == "workgroupBarrier") {
+ return BuiltinType::kWorkgroupBarrier;
+ }
+ if (name == "textureDimensions") {
+ return BuiltinType::kTextureDimensions;
+ }
+ if (name == "textureGather") {
+ return BuiltinType::kTextureGather;
+ }
+ if (name == "textureGatherCompare") {
+ return BuiltinType::kTextureGatherCompare;
+ }
+ if (name == "textureNumLayers") {
+ return BuiltinType::kTextureNumLayers;
+ }
+ if (name == "textureNumLevels") {
+ return BuiltinType::kTextureNumLevels;
+ }
+ if (name == "textureNumSamples") {
+ return BuiltinType::kTextureNumSamples;
+ }
+ if (name == "textureSample") {
+ return BuiltinType::kTextureSample;
+ }
+ if (name == "textureSampleBias") {
+ return BuiltinType::kTextureSampleBias;
+ }
+ if (name == "textureSampleCompare") {
+ return BuiltinType::kTextureSampleCompare;
+ }
+ if (name == "textureSampleCompareLevel") {
+ return BuiltinType::kTextureSampleCompareLevel;
+ }
+ if (name == "textureSampleGrad") {
+ return BuiltinType::kTextureSampleGrad;
+ }
+ if (name == "textureSampleLevel") {
+ return BuiltinType::kTextureSampleLevel;
+ }
+ if (name == "textureStore") {
+ return BuiltinType::kTextureStore;
+ }
+ if (name == "textureLoad") {
+ return BuiltinType::kTextureLoad;
+ }
+ if (name == "atomicLoad") {
+ return BuiltinType::kAtomicLoad;
+ }
+ if (name == "atomicStore") {
+ return BuiltinType::kAtomicStore;
+ }
+ if (name == "atomicAdd") {
+ return BuiltinType::kAtomicAdd;
+ }
+ if (name == "atomicSub") {
+ return BuiltinType::kAtomicSub;
+ }
+ if (name == "atomicMax") {
+ return BuiltinType::kAtomicMax;
+ }
+ if (name == "atomicMin") {
+ return BuiltinType::kAtomicMin;
+ }
+ if (name == "atomicAnd") {
+ return BuiltinType::kAtomicAnd;
+ }
+ if (name == "atomicOr") {
+ return BuiltinType::kAtomicOr;
+ }
+ if (name == "atomicXor") {
+ return BuiltinType::kAtomicXor;
+ }
+ if (name == "atomicExchange") {
+ return BuiltinType::kAtomicExchange;
+ }
+ if (name == "atomicCompareExchangeWeak") {
+ return BuiltinType::kAtomicCompareExchangeWeak;
+ }
+ return BuiltinType::kNone;
}
const char* str(BuiltinType i) {
- switch (i) {
- case BuiltinType::kNone:
- return "<none>";
- case BuiltinType::kAbs:
- return "abs";
- case BuiltinType::kAcos:
- return "acos";
- case BuiltinType::kAll:
- return "all";
- case BuiltinType::kAny:
- return "any";
- case BuiltinType::kArrayLength:
- return "arrayLength";
- case BuiltinType::kAsin:
- return "asin";
- case BuiltinType::kAtan:
- return "atan";
- case BuiltinType::kAtan2:
- return "atan2";
- case BuiltinType::kCeil:
- return "ceil";
- case BuiltinType::kClamp:
- return "clamp";
- case BuiltinType::kCos:
- return "cos";
- case BuiltinType::kCosh:
- return "cosh";
- case BuiltinType::kCountLeadingZeros:
- return "countLeadingZeros";
- case BuiltinType::kCountOneBits:
- return "countOneBits";
- case BuiltinType::kCountTrailingZeros:
- return "countTrailingZeros";
- case BuiltinType::kCross:
- return "cross";
- case BuiltinType::kDegrees:
- return "degrees";
- case BuiltinType::kDeterminant:
- return "determinant";
- case BuiltinType::kDistance:
- return "distance";
- case BuiltinType::kDot:
- return "dot";
- case BuiltinType::kDpdx:
- return "dpdx";
- case BuiltinType::kDpdxCoarse:
- return "dpdxCoarse";
- case BuiltinType::kDpdxFine:
- return "dpdxFine";
- case BuiltinType::kDpdy:
- return "dpdy";
- case BuiltinType::kDpdyCoarse:
- return "dpdyCoarse";
- case BuiltinType::kDpdyFine:
- return "dpdyFine";
- case BuiltinType::kExp:
- return "exp";
- case BuiltinType::kExp2:
- return "exp2";
- case BuiltinType::kExtractBits:
- return "extractBits";
- case BuiltinType::kFaceForward:
- return "faceForward";
- case BuiltinType::kFirstLeadingBit:
- return "firstLeadingBit";
- case BuiltinType::kFirstTrailingBit:
- return "firstTrailingBit";
- case BuiltinType::kFloor:
- return "floor";
- case BuiltinType::kFma:
- return "fma";
- case BuiltinType::kFract:
- return "fract";
- case BuiltinType::kFrexp:
- return "frexp";
- case BuiltinType::kFwidth:
- return "fwidth";
- case BuiltinType::kFwidthCoarse:
- return "fwidthCoarse";
- case BuiltinType::kFwidthFine:
- return "fwidthFine";
- case BuiltinType::kInsertBits:
- return "insertBits";
- case BuiltinType::kInverseSqrt:
- return "inverseSqrt";
- case BuiltinType::kLdexp:
- return "ldexp";
- case BuiltinType::kLength:
- return "length";
- case BuiltinType::kLog:
- return "log";
- case BuiltinType::kLog2:
- return "log2";
- case BuiltinType::kMax:
- return "max";
- case BuiltinType::kMin:
- return "min";
- case BuiltinType::kMix:
- return "mix";
- case BuiltinType::kModf:
- return "modf";
- case BuiltinType::kNormalize:
- return "normalize";
- case BuiltinType::kPack2x16float:
- return "pack2x16float";
- case BuiltinType::kPack2x16snorm:
- return "pack2x16snorm";
- case BuiltinType::kPack2x16unorm:
- return "pack2x16unorm";
- case BuiltinType::kPack4x8snorm:
- return "pack4x8snorm";
- case BuiltinType::kPack4x8unorm:
- return "pack4x8unorm";
- case BuiltinType::kPow:
- return "pow";
- case BuiltinType::kRadians:
- return "radians";
- case BuiltinType::kReflect:
- return "reflect";
- case BuiltinType::kRefract:
- return "refract";
- case BuiltinType::kReverseBits:
- return "reverseBits";
- case BuiltinType::kRound:
- return "round";
- case BuiltinType::kSelect:
- return "select";
- case BuiltinType::kSign:
- return "sign";
- case BuiltinType::kSin:
- return "sin";
- case BuiltinType::kSinh:
- return "sinh";
- case BuiltinType::kSmoothstep:
- return "smoothstep";
- case BuiltinType::kSmoothStep:
- return "smoothStep";
- case BuiltinType::kSqrt:
- return "sqrt";
- case BuiltinType::kStep:
- return "step";
- case BuiltinType::kStorageBarrier:
- return "storageBarrier";
- case BuiltinType::kTan:
- return "tan";
- case BuiltinType::kTanh:
- return "tanh";
- case BuiltinType::kTranspose:
- return "transpose";
- case BuiltinType::kTrunc:
- return "trunc";
- case BuiltinType::kUnpack2x16float:
- return "unpack2x16float";
- case BuiltinType::kUnpack2x16snorm:
- return "unpack2x16snorm";
- case BuiltinType::kUnpack2x16unorm:
- return "unpack2x16unorm";
- case BuiltinType::kUnpack4x8snorm:
- return "unpack4x8snorm";
- case BuiltinType::kUnpack4x8unorm:
- return "unpack4x8unorm";
- case BuiltinType::kWorkgroupBarrier:
- return "workgroupBarrier";
- case BuiltinType::kTextureDimensions:
- return "textureDimensions";
- case BuiltinType::kTextureGather:
- return "textureGather";
- case BuiltinType::kTextureGatherCompare:
- return "textureGatherCompare";
- case BuiltinType::kTextureNumLayers:
- return "textureNumLayers";
- case BuiltinType::kTextureNumLevels:
- return "textureNumLevels";
- case BuiltinType::kTextureNumSamples:
- return "textureNumSamples";
- case BuiltinType::kTextureSample:
- return "textureSample";
- case BuiltinType::kTextureSampleBias:
- return "textureSampleBias";
- case BuiltinType::kTextureSampleCompare:
- return "textureSampleCompare";
- case BuiltinType::kTextureSampleCompareLevel:
- return "textureSampleCompareLevel";
- case BuiltinType::kTextureSampleGrad:
- return "textureSampleGrad";
- case BuiltinType::kTextureSampleLevel:
- return "textureSampleLevel";
- case BuiltinType::kTextureStore:
- return "textureStore";
- case BuiltinType::kTextureLoad:
- return "textureLoad";
- case BuiltinType::kAtomicLoad:
- return "atomicLoad";
- case BuiltinType::kAtomicStore:
- return "atomicStore";
- case BuiltinType::kAtomicAdd:
- return "atomicAdd";
- case BuiltinType::kAtomicSub:
- return "atomicSub";
- case BuiltinType::kAtomicMax:
- return "atomicMax";
- case BuiltinType::kAtomicMin:
- return "atomicMin";
- case BuiltinType::kAtomicAnd:
- return "atomicAnd";
- case BuiltinType::kAtomicOr:
- return "atomicOr";
- case BuiltinType::kAtomicXor:
- return "atomicXor";
- case BuiltinType::kAtomicExchange:
- return "atomicExchange";
- case BuiltinType::kAtomicCompareExchangeWeak:
- return "atomicCompareExchangeWeak";
- }
- return "<unknown>";
+ switch (i) {
+ case BuiltinType::kNone:
+ return "<none>";
+ case BuiltinType::kAbs:
+ return "abs";
+ case BuiltinType::kAcos:
+ return "acos";
+ case BuiltinType::kAll:
+ return "all";
+ case BuiltinType::kAny:
+ return "any";
+ case BuiltinType::kArrayLength:
+ return "arrayLength";
+ case BuiltinType::kAsin:
+ return "asin";
+ case BuiltinType::kAtan:
+ return "atan";
+ case BuiltinType::kAtan2:
+ return "atan2";
+ case BuiltinType::kCeil:
+ return "ceil";
+ case BuiltinType::kClamp:
+ return "clamp";
+ case BuiltinType::kCos:
+ return "cos";
+ case BuiltinType::kCosh:
+ return "cosh";
+ case BuiltinType::kCountLeadingZeros:
+ return "countLeadingZeros";
+ case BuiltinType::kCountOneBits:
+ return "countOneBits";
+ case BuiltinType::kCountTrailingZeros:
+ return "countTrailingZeros";
+ case BuiltinType::kCross:
+ return "cross";
+ case BuiltinType::kDegrees:
+ return "degrees";
+ case BuiltinType::kDeterminant:
+ return "determinant";
+ case BuiltinType::kDistance:
+ return "distance";
+ case BuiltinType::kDot:
+ return "dot";
+ case BuiltinType::kDot4I8Packed:
+ return "dot4I8Packed";
+ case BuiltinType::kDot4U8Packed:
+ return "dot4U8Packed";
+ case BuiltinType::kDpdx:
+ return "dpdx";
+ case BuiltinType::kDpdxCoarse:
+ return "dpdxCoarse";
+ case BuiltinType::kDpdxFine:
+ return "dpdxFine";
+ case BuiltinType::kDpdy:
+ return "dpdy";
+ case BuiltinType::kDpdyCoarse:
+ return "dpdyCoarse";
+ case BuiltinType::kDpdyFine:
+ return "dpdyFine";
+ case BuiltinType::kExp:
+ return "exp";
+ case BuiltinType::kExp2:
+ return "exp2";
+ case BuiltinType::kExtractBits:
+ return "extractBits";
+ case BuiltinType::kFaceForward:
+ return "faceForward";
+ case BuiltinType::kFirstLeadingBit:
+ return "firstLeadingBit";
+ case BuiltinType::kFirstTrailingBit:
+ return "firstTrailingBit";
+ case BuiltinType::kFloor:
+ return "floor";
+ case BuiltinType::kFma:
+ return "fma";
+ case BuiltinType::kFract:
+ return "fract";
+ case BuiltinType::kFrexp:
+ return "frexp";
+ case BuiltinType::kFwidth:
+ return "fwidth";
+ case BuiltinType::kFwidthCoarse:
+ return "fwidthCoarse";
+ case BuiltinType::kFwidthFine:
+ return "fwidthFine";
+ case BuiltinType::kInsertBits:
+ return "insertBits";
+ case BuiltinType::kInverseSqrt:
+ return "inverseSqrt";
+ case BuiltinType::kLdexp:
+ return "ldexp";
+ case BuiltinType::kLength:
+ return "length";
+ case BuiltinType::kLog:
+ return "log";
+ case BuiltinType::kLog2:
+ return "log2";
+ case BuiltinType::kMax:
+ return "max";
+ case BuiltinType::kMin:
+ return "min";
+ case BuiltinType::kMix:
+ return "mix";
+ case BuiltinType::kModf:
+ return "modf";
+ case BuiltinType::kNormalize:
+ return "normalize";
+ case BuiltinType::kPack2x16float:
+ return "pack2x16float";
+ case BuiltinType::kPack2x16snorm:
+ return "pack2x16snorm";
+ case BuiltinType::kPack2x16unorm:
+ return "pack2x16unorm";
+ case BuiltinType::kPack4x8snorm:
+ return "pack4x8snorm";
+ case BuiltinType::kPack4x8unorm:
+ return "pack4x8unorm";
+ case BuiltinType::kPow:
+ return "pow";
+ case BuiltinType::kRadians:
+ return "radians";
+ case BuiltinType::kReflect:
+ return "reflect";
+ case BuiltinType::kRefract:
+ return "refract";
+ case BuiltinType::kReverseBits:
+ return "reverseBits";
+ case BuiltinType::kRound:
+ return "round";
+ case BuiltinType::kSelect:
+ return "select";
+ case BuiltinType::kSign:
+ return "sign";
+ case BuiltinType::kSin:
+ return "sin";
+ case BuiltinType::kSinh:
+ return "sinh";
+ case BuiltinType::kSmoothstep:
+ return "smoothstep";
+ case BuiltinType::kSmoothStep:
+ return "smoothStep";
+ case BuiltinType::kSqrt:
+ return "sqrt";
+ case BuiltinType::kStep:
+ return "step";
+ case BuiltinType::kStorageBarrier:
+ return "storageBarrier";
+ case BuiltinType::kTan:
+ return "tan";
+ case BuiltinType::kTanh:
+ return "tanh";
+ case BuiltinType::kTranspose:
+ return "transpose";
+ case BuiltinType::kTrunc:
+ return "trunc";
+ case BuiltinType::kUnpack2x16float:
+ return "unpack2x16float";
+ case BuiltinType::kUnpack2x16snorm:
+ return "unpack2x16snorm";
+ case BuiltinType::kUnpack2x16unorm:
+ return "unpack2x16unorm";
+ case BuiltinType::kUnpack4x8snorm:
+ return "unpack4x8snorm";
+ case BuiltinType::kUnpack4x8unorm:
+ return "unpack4x8unorm";
+ case BuiltinType::kWorkgroupBarrier:
+ return "workgroupBarrier";
+ case BuiltinType::kTextureDimensions:
+ return "textureDimensions";
+ case BuiltinType::kTextureGather:
+ return "textureGather";
+ case BuiltinType::kTextureGatherCompare:
+ return "textureGatherCompare";
+ case BuiltinType::kTextureNumLayers:
+ return "textureNumLayers";
+ case BuiltinType::kTextureNumLevels:
+ return "textureNumLevels";
+ case BuiltinType::kTextureNumSamples:
+ return "textureNumSamples";
+ case BuiltinType::kTextureSample:
+ return "textureSample";
+ case BuiltinType::kTextureSampleBias:
+ return "textureSampleBias";
+ case BuiltinType::kTextureSampleCompare:
+ return "textureSampleCompare";
+ case BuiltinType::kTextureSampleCompareLevel:
+ return "textureSampleCompareLevel";
+ case BuiltinType::kTextureSampleGrad:
+ return "textureSampleGrad";
+ case BuiltinType::kTextureSampleLevel:
+ return "textureSampleLevel";
+ case BuiltinType::kTextureStore:
+ return "textureStore";
+ case BuiltinType::kTextureLoad:
+ return "textureLoad";
+ case BuiltinType::kAtomicLoad:
+ return "atomicLoad";
+ case BuiltinType::kAtomicStore:
+ return "atomicStore";
+ case BuiltinType::kAtomicAdd:
+ return "atomicAdd";
+ case BuiltinType::kAtomicSub:
+ return "atomicSub";
+ case BuiltinType::kAtomicMax:
+ return "atomicMax";
+ case BuiltinType::kAtomicMin:
+ return "atomicMin";
+ case BuiltinType::kAtomicAnd:
+ return "atomicAnd";
+ case BuiltinType::kAtomicOr:
+ return "atomicOr";
+ case BuiltinType::kAtomicXor:
+ return "atomicXor";
+ case BuiltinType::kAtomicExchange:
+ return "atomicExchange";
+ case BuiltinType::kAtomicCompareExchangeWeak:
+ return "atomicCompareExchangeWeak";
+ }
+ return "<unknown>";
}
std::ostream& operator<<(std::ostream& out, BuiltinType i) {
- out << str(i);
- return out;
+ out << str(i);
+ return out;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin_type.cc.tmpl b/chromium/third_party/dawn/src/tint/sem/builtin_type.cc.tmpl
index ab072d92cf9..4e830dac957 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin_type.cc.tmpl
+++ b/chromium/third_party/dawn/src/tint/sem/builtin_type.cc.tmpl
@@ -3,7 +3,7 @@
Template file for use with tools/builtin-gen to generate builtin_type.cc
See:
-* tools/cmd/builtin-gen/gen for structures used by this template
+* tools/cmd/intrinsic-gen/gen for structures used by this template
* https://golang.org/pkg/text/template/ for documentation on the template syntax
--------------------------------------------------------------------------------
*/ -}}
@@ -15,29 +15,29 @@ See:
namespace tint::sem {
BuiltinType ParseBuiltinType(const std::string& name) {
-{{- range .Sem.Functions }}
- if (name == "{{.Name}}") {
- return BuiltinType::k{{Title .Name}};
- }
+{{- range .Sem.Builtins }}
+ if (name == "{{.Name}}") {
+ return BuiltinType::k{{Title .Name}};
+ }
{{- end }}
- return BuiltinType::kNone;
+ return BuiltinType::kNone;
}
const char* str(BuiltinType i) {
- switch (i) {
- case BuiltinType::kNone:
- return "<none>";
-{{- range .Sem.Functions }}
- case BuiltinType::k{{Title .Name}}:
- return "{{.Name}}";
+ switch (i) {
+ case BuiltinType::kNone:
+ return "<none>";
+{{- range .Sem.Builtins }}
+ case BuiltinType::k{{Title .Name}}:
+ return "{{.Name}}";
{{- end }}
- }
- return "<unknown>";
+ }
+ return "<unknown>";
}
std::ostream& operator<<(std::ostream& out, BuiltinType i) {
- out << str(i);
- return out;
+ out << str(i);
+ return out;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin_type.h b/chromium/third_party/dawn/src/tint/sem/builtin_type.h
index e2d741f9d79..96d3c30e942 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin_type.h
+++ b/chromium/third_party/dawn/src/tint/sem/builtin_type.h
@@ -13,11 +13,11 @@
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
+// File generated by tools/intrinsic-gen
// using the template:
// src/tint/sem/builtin_type.h.tmpl
-// and the builtin defintion file:
-// src/tint/builtins.def
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
//
// Do not modify this file directly
////////////////////////////////////////////////////////////////////////////////
@@ -32,112 +32,114 @@ namespace tint::sem {
/// Enumerator of all builtin functions
enum class BuiltinType {
- kNone = -1,
- kAbs,
- kAcos,
- kAll,
- kAny,
- kArrayLength,
- kAsin,
- kAtan,
- kAtan2,
- kCeil,
- kClamp,
- kCos,
- kCosh,
- kCountLeadingZeros,
- kCountOneBits,
- kCountTrailingZeros,
- kCross,
- kDegrees,
- kDeterminant,
- kDistance,
- kDot,
- kDpdx,
- kDpdxCoarse,
- kDpdxFine,
- kDpdy,
- kDpdyCoarse,
- kDpdyFine,
- kExp,
- kExp2,
- kExtractBits,
- kFaceForward,
- kFirstLeadingBit,
- kFirstTrailingBit,
- kFloor,
- kFma,
- kFract,
- kFrexp,
- kFwidth,
- kFwidthCoarse,
- kFwidthFine,
- kInsertBits,
- kInverseSqrt,
- kLdexp,
- kLength,
- kLog,
- kLog2,
- kMax,
- kMin,
- kMix,
- kModf,
- kNormalize,
- kPack2x16float,
- kPack2x16snorm,
- kPack2x16unorm,
- kPack4x8snorm,
- kPack4x8unorm,
- kPow,
- kRadians,
- kReflect,
- kRefract,
- kReverseBits,
- kRound,
- kSelect,
- kSign,
- kSin,
- kSinh,
- kSmoothstep,
- kSmoothStep,
- kSqrt,
- kStep,
- kStorageBarrier,
- kTan,
- kTanh,
- kTranspose,
- kTrunc,
- kUnpack2x16float,
- kUnpack2x16snorm,
- kUnpack2x16unorm,
- kUnpack4x8snorm,
- kUnpack4x8unorm,
- kWorkgroupBarrier,
- kTextureDimensions,
- kTextureGather,
- kTextureGatherCompare,
- kTextureNumLayers,
- kTextureNumLevels,
- kTextureNumSamples,
- kTextureSample,
- kTextureSampleBias,
- kTextureSampleCompare,
- kTextureSampleCompareLevel,
- kTextureSampleGrad,
- kTextureSampleLevel,
- kTextureStore,
- kTextureLoad,
- kAtomicLoad,
- kAtomicStore,
- kAtomicAdd,
- kAtomicSub,
- kAtomicMax,
- kAtomicMin,
- kAtomicAnd,
- kAtomicOr,
- kAtomicXor,
- kAtomicExchange,
- kAtomicCompareExchangeWeak,
+ kNone = -1,
+ kAbs,
+ kAcos,
+ kAll,
+ kAny,
+ kArrayLength,
+ kAsin,
+ kAtan,
+ kAtan2,
+ kCeil,
+ kClamp,
+ kCos,
+ kCosh,
+ kCountLeadingZeros,
+ kCountOneBits,
+ kCountTrailingZeros,
+ kCross,
+ kDegrees,
+ kDeterminant,
+ kDistance,
+ kDot,
+ kDot4I8Packed,
+ kDot4U8Packed,
+ kDpdx,
+ kDpdxCoarse,
+ kDpdxFine,
+ kDpdy,
+ kDpdyCoarse,
+ kDpdyFine,
+ kExp,
+ kExp2,
+ kExtractBits,
+ kFaceForward,
+ kFirstLeadingBit,
+ kFirstTrailingBit,
+ kFloor,
+ kFma,
+ kFract,
+ kFrexp,
+ kFwidth,
+ kFwidthCoarse,
+ kFwidthFine,
+ kInsertBits,
+ kInverseSqrt,
+ kLdexp,
+ kLength,
+ kLog,
+ kLog2,
+ kMax,
+ kMin,
+ kMix,
+ kModf,
+ kNormalize,
+ kPack2x16float,
+ kPack2x16snorm,
+ kPack2x16unorm,
+ kPack4x8snorm,
+ kPack4x8unorm,
+ kPow,
+ kRadians,
+ kReflect,
+ kRefract,
+ kReverseBits,
+ kRound,
+ kSelect,
+ kSign,
+ kSin,
+ kSinh,
+ kSmoothstep,
+ kSmoothStep,
+ kSqrt,
+ kStep,
+ kStorageBarrier,
+ kTan,
+ kTanh,
+ kTranspose,
+ kTrunc,
+ kUnpack2x16float,
+ kUnpack2x16snorm,
+ kUnpack2x16unorm,
+ kUnpack4x8snorm,
+ kUnpack4x8unorm,
+ kWorkgroupBarrier,
+ kTextureDimensions,
+ kTextureGather,
+ kTextureGatherCompare,
+ kTextureNumLayers,
+ kTextureNumLevels,
+ kTextureNumSamples,
+ kTextureSample,
+ kTextureSampleBias,
+ kTextureSampleCompare,
+ kTextureSampleCompareLevel,
+ kTextureSampleGrad,
+ kTextureSampleLevel,
+ kTextureStore,
+ kTextureLoad,
+ kAtomicLoad,
+ kAtomicStore,
+ kAtomicAdd,
+ kAtomicSub,
+ kAtomicMax,
+ kAtomicMin,
+ kAtomicAnd,
+ kAtomicOr,
+ kAtomicXor,
+ kAtomicExchange,
+ kAtomicCompareExchangeWeak,
};
/// Matches the BuiltinType by name
diff --git a/chromium/third_party/dawn/src/tint/sem/builtin_type.h.tmpl b/chromium/third_party/dawn/src/tint/sem/builtin_type.h.tmpl
index 7b018df53aa..258ac87f729 100644
--- a/chromium/third_party/dawn/src/tint/sem/builtin_type.h.tmpl
+++ b/chromium/third_party/dawn/src/tint/sem/builtin_type.h.tmpl
@@ -3,7 +3,7 @@
Template file for use with tools/builtin-gen to generate builtin_type.h
See:
-* tools/cmd/builtin-gen/gen for structures used by this template
+* tools/cmd/intrinsic-gen/gen for structures used by this template
* https://golang.org/pkg/text/template/ for documentation on the template syntax
--------------------------------------------------------------------------------
*/ -}}
@@ -18,9 +18,9 @@ namespace tint::sem {
/// Enumerator of all builtin functions
enum class BuiltinType {
- kNone = -1,
-{{- range .Sem.Functions }}
- k{{Title .Name}},
+ kNone = -1,
+{{- range .Sem.Builtins }}
+ k{{Title .Name}},
{{- end }}
};
diff --git a/chromium/third_party/dawn/src/tint/sem/call.cc b/chromium/third_party/dawn/src/tint/sem/call.cc
index 415316eab33..f688cce22f6 100644
--- a/chromium/third_party/dawn/src/tint/sem/call.cc
+++ b/chromium/third_party/dawn/src/tint/sem/call.cc
@@ -27,11 +27,7 @@ Call::Call(const ast::CallExpression* declaration,
const Statement* statement,
Constant constant,
bool has_side_effects)
- : Base(declaration,
- target->ReturnType(),
- statement,
- std::move(constant),
- has_side_effects),
+ : Base(declaration, target->ReturnType(), statement, std::move(constant), has_side_effects),
target_(target),
arguments_(std::move(arguments)) {}
diff --git a/chromium/third_party/dawn/src/tint/sem/call.h b/chromium/third_party/dawn/src/tint/sem/call.h
index 786bd252dea..c96179daa43 100644
--- a/chromium/third_party/dawn/src/tint/sem/call.h
+++ b/chromium/third_party/dawn/src/tint/sem/call.h
@@ -25,40 +25,38 @@ namespace tint::sem {
/// Call is the base class for semantic nodes that hold semantic information for
/// ast::CallExpression nodes.
class Call final : public Castable<Call, Expression> {
- public:
- /// Constructor
- /// @param declaration the AST node
- /// @param target the call target
- /// @param arguments the call arguments
- /// @param statement the statement that owns this expression
- /// @param constant the constant value of this expression
- /// @param has_side_effects whether this expression may have side effects
- Call(const ast::CallExpression* declaration,
- const CallTarget* target,
- std::vector<const sem::Expression*> arguments,
- const Statement* statement,
- Constant constant,
- bool has_side_effects);
+ public:
+ /// Constructor
+ /// @param declaration the AST node
+ /// @param target the call target
+ /// @param arguments the call arguments
+ /// @param statement the statement that owns this expression
+ /// @param constant the constant value of this expression
+ /// @param has_side_effects whether this expression may have side effects
+ Call(const ast::CallExpression* declaration,
+ const CallTarget* target,
+ std::vector<const sem::Expression*> arguments,
+ const Statement* statement,
+ Constant constant,
+ bool has_side_effects);
- /// Destructor
- ~Call() override;
+ /// Destructor
+ ~Call() override;
- /// @return the target of the call
- const CallTarget* Target() const { return target_; }
+ /// @return the target of the call
+ const CallTarget* Target() const { return target_; }
- /// @return the call arguments
- const std::vector<const sem::Expression*>& Arguments() const {
- return arguments_;
- }
+ /// @return the call arguments
+ const std::vector<const sem::Expression*>& Arguments() const { return arguments_; }
- /// @returns the AST node
- const ast::CallExpression* Declaration() const {
- return static_cast<const ast::CallExpression*>(declaration_);
- }
+ /// @returns the AST node
+ const ast::CallExpression* Declaration() const {
+ return static_cast<const ast::CallExpression*>(declaration_);
+ }
- private:
- CallTarget const* const target_;
- std::vector<const sem::Expression*> arguments_;
+ private:
+ CallTarget const* const target_;
+ std::vector<const sem::Expression*> arguments_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/call_target.cc b/chromium/third_party/dawn/src/tint/sem/call_target.cc
index 7fc3c81e542..67bde0e144c 100644
--- a/chromium/third_party/dawn/src/tint/sem/call_target.cc
+++ b/chromium/third_party/dawn/src/tint/sem/call_target.cc
@@ -21,43 +21,40 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::CallTarget);
namespace tint::sem {
-CallTarget::CallTarget(const sem::Type* return_type,
- const ParameterList& parameters)
+CallTarget::CallTarget(const sem::Type* return_type, const ParameterList& parameters)
: signature_{return_type, parameters} {
- TINT_ASSERT(Semantic, return_type);
+ TINT_ASSERT(Semantic, return_type);
}
CallTarget::CallTarget(const CallTarget&) = default;
CallTarget::~CallTarget() = default;
-CallTargetSignature::CallTargetSignature(const sem::Type* ret_ty,
- const ParameterList& params)
+CallTargetSignature::CallTargetSignature(const sem::Type* ret_ty, const ParameterList& params)
: return_type(ret_ty), parameters(params) {}
CallTargetSignature::CallTargetSignature(const CallTargetSignature&) = default;
CallTargetSignature::~CallTargetSignature() = default;
int CallTargetSignature::IndexOf(ParameterUsage usage) const {
- for (size_t i = 0; i < parameters.size(); i++) {
- if (parameters[i]->Usage() == usage) {
- return static_cast<int>(i);
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->Usage() == usage) {
+ return static_cast<int>(i);
+ }
}
- }
- return -1;
+ return -1;
}
bool CallTargetSignature::operator==(const CallTargetSignature& other) const {
- if (return_type != other.return_type ||
- parameters.size() != other.parameters.size()) {
- return false;
- }
- for (size_t i = 0; i < parameters.size(); i++) {
- auto* a = parameters[i];
- auto* b = other.parameters[i];
- if (a->Type() != b->Type() || a->Usage() != b->Usage()) {
- return false;
+ if (return_type != other.return_type || parameters.size() != other.parameters.size()) {
+ return false;
}
- }
- return true;
+ for (size_t i = 0; i < parameters.size(); i++) {
+ auto* a = parameters[i];
+ auto* b = other.parameters[i];
+ if (a->Type() != b->Type() || a->Usage() != b->Usage()) {
+ return false;
+ }
+ }
+ return true;
}
} // namespace tint::sem
@@ -66,11 +63,11 @@ namespace std {
std::size_t hash<tint::sem::CallTargetSignature>::operator()(
const tint::sem::CallTargetSignature& sig) const {
- size_t hash = tint::utils::Hash(sig.parameters.size());
- for (auto* p : sig.parameters) {
- tint::utils::HashCombine(&hash, p->Type(), p->Usage());
- }
- return tint::utils::Hash(hash, sig.return_type);
+ size_t hash = tint::utils::Hash(sig.parameters.size());
+ for (auto* p : sig.parameters) {
+ tint::utils::HashCombine(&hash, p->Type(), p->Usage());
+ }
+ return tint::utils::Hash(hash, sig.return_type);
}
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/call_target.h b/chromium/third_party/dawn/src/tint/sem/call_target.h
index 01650eb3de6..64716b2913b 100644
--- a/chromium/third_party/dawn/src/tint/sem/call_target.h
+++ b/chromium/third_party/dawn/src/tint/sem/call_target.h
@@ -18,7 +18,7 @@
#include <vector>
#include "src/tint/sem/node.h"
-#include "src/tint/sem/sampler_type.h"
+#include "src/tint/sem/sampler.h"
#include "src/tint/sem/variable.h"
#include "src/tint/utils/hash.h"
@@ -31,59 +31,59 @@ namespace tint::sem {
/// CallTargetSignature holds the return type and parameters for a call target
struct CallTargetSignature {
- /// Constructor
- /// @param ret_ty the call target return type
- /// @param params the call target parameters
- CallTargetSignature(const sem::Type* ret_ty, const ParameterList& params);
-
- /// Copy constructor
- CallTargetSignature(const CallTargetSignature&);
-
- /// Destructor
- ~CallTargetSignature();
-
- /// The type of the call target return value
- const sem::Type* const return_type = nullptr;
- /// The parameters of the call target
- const ParameterList parameters;
-
- /// Equality operator
- /// @param other the signature to compare this to
- /// @returns true if this signature is equal to other
- bool operator==(const CallTargetSignature& other) const;
-
- /// @param usage the parameter usage to find
- /// @returns the index of the parameter with the given usage, or -1 if no
- /// parameter with the given usage exists.
- int IndexOf(ParameterUsage usage) const;
+ /// Constructor
+ /// @param ret_ty the call target return type
+ /// @param params the call target parameters
+ CallTargetSignature(const sem::Type* ret_ty, const ParameterList& params);
+
+ /// Copy constructor
+ CallTargetSignature(const CallTargetSignature&);
+
+ /// Destructor
+ ~CallTargetSignature();
+
+ /// The type of the call target return value
+ const sem::Type* const return_type = nullptr;
+ /// The parameters of the call target
+ const ParameterList parameters;
+
+ /// Equality operator
+ /// @param other the signature to compare this to
+ /// @returns true if this signature is equal to other
+ bool operator==(const CallTargetSignature& other) const;
+
+ /// @param usage the parameter usage to find
+ /// @returns the index of the parameter with the given usage, or -1 if no
+ /// parameter with the given usage exists.
+ int IndexOf(ParameterUsage usage) const;
};
/// CallTarget is the base for callable functions, builtins, type constructors
/// and type casts.
class CallTarget : public Castable<CallTarget, Node> {
- public:
- /// Constructor
- /// @param return_type the return type of the call target
- /// @param parameters the parameters for the call target
- CallTarget(const sem::Type* return_type, const ParameterList& parameters);
+ public:
+ /// Constructor
+ /// @param return_type the return type of the call target
+ /// @param parameters the parameters for the call target
+ CallTarget(const sem::Type* return_type, const ParameterList& parameters);
- /// Copy constructor
- CallTarget(const CallTarget&);
+ /// Copy constructor
+ CallTarget(const CallTarget&);
- /// Destructor
- ~CallTarget() override;
+ /// Destructor
+ ~CallTarget() override;
- /// @return the return type of the call target
- const sem::Type* ReturnType() const { return signature_.return_type; }
+ /// @return the return type of the call target
+ const sem::Type* ReturnType() const { return signature_.return_type; }
- /// @return the parameters of the call target
- const ParameterList& Parameters() const { return signature_.parameters; }
+ /// @return the parameters of the call target
+ const ParameterList& Parameters() const { return signature_.parameters; }
- /// @return the signature of the call target
- const CallTargetSignature& Signature() const { return signature_; }
+ /// @return the signature of the call target
+ const CallTargetSignature& Signature() const { return signature_; }
- private:
- CallTargetSignature signature_;
+ private:
+ CallTargetSignature signature_;
};
} // namespace tint::sem
@@ -95,10 +95,10 @@ namespace std {
/// std::unordered_set.
template <>
class hash<tint::sem::CallTargetSignature> {
- public:
- /// @param sig the CallTargetSignature to hash
- /// @return the hash value
- std::size_t operator()(const tint::sem::CallTargetSignature& sig) const;
+ public:
+ /// @param sig the CallTargetSignature to hash
+ /// @return the hash value
+ std::size_t operator()(const tint::sem::CallTargetSignature& sig) const;
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/constant.cc b/chromium/third_party/dawn/src/tint/sem/constant.cc
index 3b23d05ebf4..80869920ccb 100644
--- a/chromium/third_party/dawn/src/tint/sem/constant.cc
+++ b/chromium/third_party/dawn/src/tint/sem/constant.cc
@@ -14,7 +14,7 @@
#include "src/tint/sem/constant.h"
-#include <functional>
+#include <cmath>
#include <utility>
#include "src/tint/debug.h"
@@ -24,36 +24,30 @@
namespace tint::sem {
namespace {
+size_t CountElements(const Constant::Elements& elements) {
+ return std::visit([](auto&& vec) { return vec.size(); }, elements);
+}
-const Type* ElemType(const Type* ty, size_t num_elements) {
- diag::List diag;
- if (ty->is_scalar()) {
- if (num_elements != 1) {
- TINT_ICE(Semantic, diag)
- << "sem::Constant() type <-> num_element mismatch. type: '"
- << ty->TypeInfo().name << "' num_elements: " << num_elements;
- }
- return ty;
- }
- if (auto* vec = ty->As<Vector>()) {
- if (num_elements != vec->Width()) {
- TINT_ICE(Semantic, diag)
- << "sem::Constant() type <-> num_element mismatch. type: '"
- << ty->TypeInfo().name << "' num_elements: " << num_elements;
+template <typename T>
+bool IsNegativeFloat(T value) {
+ (void)value;
+ if constexpr (IsFloatingPoint<T>) {
+ return std::signbit(value);
+ } else {
+ return false;
}
- TINT_ASSERT(Semantic, vec->type()->is_scalar());
- return vec->type();
- }
- TINT_UNREACHABLE(Semantic, diag) << "Unsupported sem::Constant type";
- return nullptr;
}
} // namespace
Constant::Constant() {}
-Constant::Constant(const sem::Type* ty, Scalars els)
- : type_(ty), elem_type_(ElemType(ty, els.size())), elems_(std::move(els)) {}
+Constant::Constant(const sem::Type* ty, Elements els)
+ : type_(ty), elem_type_(CheckElemType(ty, CountElements(els))), elems_(std::move(els)) {}
+
+Constant::Constant(const sem::Type* ty, AInts vec) : Constant(ty, Elements{std::move(vec)}) {}
+
+Constant::Constant(const sem::Type* ty, AFloats vec) : Constant(ty, Elements{std::move(vec)}) {}
Constant::Constant(const Constant&) = default;
@@ -62,21 +56,58 @@ Constant::~Constant() = default;
Constant& Constant::operator=(const Constant& rhs) = default;
bool Constant::AnyZero() const {
- for (size_t i = 0; i < Elements().size(); ++i) {
- if (WithScalarAt(i, [&](auto&& s) {
- // Use std::equal_to to work around -Wfloat-equal warnings
- auto equals_to =
- std::equal_to<std::remove_reference_t<decltype(s)>>{};
-
- if (equals_to(s, 0)) {
- return true;
- }
- return false;
- })) {
- return true;
+ return WithElements([&](auto&& vec) {
+ using T = typename std::decay_t<decltype(vec)>::value_type;
+ for (auto el : vec) {
+ if (el == T(0) && !IsNegativeFloat(el.value)) {
+ return true;
+ }
+ }
+ return false;
+ });
+}
+
+bool Constant::AllZero() const {
+ return WithElements([&](auto&& vec) {
+ using T = typename std::decay_t<decltype(vec)>::value_type;
+ for (auto el : vec) {
+ if (el != T(0) || IsNegativeFloat(el.value)) {
+ return false;
+ }
+ }
+ return true;
+ });
+}
+
+bool Constant::AllEqual(size_t start, size_t end) const {
+ return WithElements([&](auto&& vec) {
+ if (!vec.empty()) {
+ auto value = vec[start];
+ bool float_sign = IsNegativeFloat(vec[start].value);
+ for (size_t i = start + 1; i < end; i++) {
+ if (vec[i] != value || float_sign != IsNegativeFloat(vec[i].value)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ });
+}
+
+const Type* Constant::CheckElemType(const sem::Type* ty, size_t num_elements) {
+ diag::List diag;
+ if (ty->is_abstract_or_scalar() || ty->IsAnyOf<Vector, Matrix>()) {
+ uint32_t count = 0;
+ auto* el_ty = Type::ElementOf(ty, &count);
+ if (num_elements != count) {
+ TINT_ICE(Semantic, diag) << "sem::Constant() type <-> element mismatch. type: '"
+ << ty->TypeInfo().name << "' element: " << num_elements;
+ }
+ TINT_ASSERT(Semantic, el_ty->is_abstract_or_scalar());
+ return el_ty;
}
- }
- return false;
+ TINT_UNREACHABLE(Semantic, diag) << "Unsupported sem::Constant type: " << ty->TypeInfo().name;
+ return nullptr;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/constant.h b/chromium/third_party/dawn/src/tint/sem/constant.h
index cfb6f1e7e3b..c0ba9e6b5ca 100644
--- a/chromium/third_party/dawn/src/tint/sem/constant.h
+++ b/chromium/third_party/dawn/src/tint/sem/constant.h
@@ -15,6 +15,10 @@
#ifndef SRC_TINT_SEM_CONSTANT_H_
#define SRC_TINT_SEM_CONSTANT_H_
+#include <ostream>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <utility>
+#include <variant> // NOLINT(build/include_order)
#include <vector>
#include "src/tint/program_builder.h"
@@ -22,122 +26,162 @@
namespace tint::sem {
-/// A Constant is compile-time known expression value, expressed as a flattened
-/// list of scalar values. Value may be of a scalar or vector type.
+/// A Constant holds a compile-time evaluated expression value, expressed as a flattened list of
+/// element values. The expression type may be of an abstract-numeric, scalar, vector or matrix
+/// type. Constant holds the element values in either a vector of abstract-integer (AInt) or
+/// abstract-float (AFloat), depending on the element type.
class Constant {
- using i32 = ProgramBuilder::i32;
- using u32 = ProgramBuilder::u32;
- using f32 = ProgramBuilder::f32;
-
- public:
- /// Scalar holds a single constant scalar value, as a union of an i32, u32,
- /// f32 or boolean.
- union Scalar {
- /// The scalar value as a i32
- int32_t i32;
- /// The scalar value as a u32
- uint32_t u32;
- /// The scalar value as a f32
- float f32;
- /// The scalar value as a bool
- bool bool_;
-
- /// Constructs the scalar with the i32 value `v`
- /// @param v the value of the Scalar
- Scalar(ProgramBuilder::i32 v) : i32(v) {} // NOLINT
-
- /// Constructs the scalar with the u32 value `v`
- /// @param v the value of the Scalar
- Scalar(ProgramBuilder::u32 v) : u32(v) {} // NOLINT
-
- /// Constructs the scalar with the f32 value `v`
- /// @param v the value of the Scalar
- Scalar(ProgramBuilder::f32 v) : f32(v) {} // NOLINT
-
- /// Constructs the scalar with the bool value `v`
- /// @param v the value of the Scalar
- Scalar(bool v) : bool_(v) {} // NOLINT
- };
-
- /// Scalars is a list of scalar values
- using Scalars = std::vector<Scalar>;
-
- /// Constructs an invalid Constant
- Constant();
-
- /// Constructs a Constant of the given type and element values
- /// @param ty the Constant type
- /// @param els the Constant element values
- Constant(const Type* ty, Scalars els);
-
- /// Copy constructor
- Constant(const Constant&);
-
- /// Destructor
- ~Constant();
-
- /// Copy assignment
- /// @param other the Constant to copy
- /// @returns this Constant
- Constant& operator=(const Constant& other);
-
- /// @returns true if the Constant has been initialized
- bool IsValid() const { return type_ != nullptr; }
-
- /// @return true if the Constant has been initialized
- operator bool() const { return IsValid(); }
-
- /// @returns the type of the Constant
- const sem::Type* Type() const { return type_; }
-
- /// @returns the element type of the Constant
- const sem::Type* ElementType() const { return elem_type_; }
-
- /// @returns the constant's scalar elements
- const Scalars& Elements() const { return elems_; }
-
- /// @returns true if any scalar element is zero
- bool AnyZero() const;
-
- /// Calls `func(s)` with s being the current scalar value at `index`.
- /// `func` is typically a lambda of the form '[](auto&& s)'.
- /// @param index the index of the scalar value
- /// @param func a function with signature `T(S)`
- /// @return the value returned by func.
- template <typename Func>
- auto WithScalarAt(size_t index, Func&& func) const {
- auto* elem_type = ElementType();
- if (elem_type->Is<I32>()) {
- return func(elems_[index].i32);
+ public:
+ /// AInts is a vector of AInt, used to hold elements of the WGSL types:
+ /// * abstract-integer
+ /// * i32
+ /// * u32
+ /// * bool (0 or 1)
+ using AInts = std::vector<AInt>;
+
+ /// AFloats is a vector of AFloat, used to hold elements of the WGSL types:
+ /// * abstract-float
+ /// * f32
+ /// * f16
+ using AFloats = std::vector<AFloat>;
+
+ /// Elements is either a vector of AInts or AFloats
+ using Elements = std::variant<AInts, AFloats>;
+
+ /// Helper that resolves to either AInt or AFloat based on the element type T.
+ template <typename T>
+ using ElementFor = std::conditional_t<IsFloatingPoint<UnwrapNumber<T>>, AFloat, AInt>;
+
+ /// Helper that resolves to either AInts or AFloats based on the element type T.
+ template <typename T>
+ using ElementVectorFor = std::conditional_t<IsFloatingPoint<UnwrapNumber<T>>, AFloats, AInts>;
+
+ /// Constructs an invalid Constant
+ Constant();
+
+ /// Constructs a Constant of the given type and element values
+ /// @param ty the Constant type
+ /// @param els the Constant element values
+ Constant(const sem::Type* ty, Elements els);
+
+ /// Constructs a Constant of the given type and element values
+ /// @param ty the Constant type
+ /// @param vec the Constant element values
+ Constant(const sem::Type* ty, AInts vec);
+
+ /// Constructs a Constant of the given type and element values
+ /// @param ty the Constant type
+ /// @param vec the Constant element values
+ Constant(const sem::Type* ty, AFloats vec);
+
+ /// Constructs a Constant of the given type and element values
+ /// @param ty the Constant type
+ /// @param els the Constant element values
+ template <typename T>
+ Constant(const sem::Type* ty, std::initializer_list<T> els);
+
+ /// Copy constructor
+ Constant(const Constant&);
+
+ /// Destructor
+ ~Constant();
+
+ /// Copy assignment
+ /// @param other the Constant to copy
+ /// @returns this Constant
+ Constant& operator=(const Constant& other);
+
+ /// @returns true if the Constant has been initialized
+ bool IsValid() const { return type_ != nullptr; }
+
+ /// @return true if the Constant has been initialized
+ operator bool() const { return IsValid(); }
+
+ /// @returns the type of the Constant
+ const sem::Type* Type() const { return type_; }
+
+ /// @returns the number of elements
+ size_t ElementCount() const {
+ return std::visit([](auto&& v) { return v.size(); }, elems_);
}
- if (elem_type->Is<U32>()) {
- return func(elems_[index].u32);
- }
- if (elem_type->Is<F32>()) {
- return func(elems_[index].f32);
+
+ /// @returns the element type of the Constant
+ const sem::Type* ElementType() const { return elem_type_; }
+
+ /// @returns the constant's elements
+ const Elements& GetElements() const { return elems_; }
+
+ /// WithElements calls the function `f` with the vector of elements as either AFloats or AInts
+ /// @param f a function-like with the signature `R(auto&&)`.
+ /// @returns the result of calling `f`.
+ template <typename F>
+ auto WithElements(F&& f) const {
+ return std::visit(std::forward<F>(f), elems_);
}
- if (elem_type->Is<Bool>()) {
- return func(elems_[index].bool_);
+
+ /// WithElements calls the function `f` with the element vector as either AFloats or AInts
+ /// @param f a function-like with the signature `R(auto&&)`.
+ /// @returns the result of calling `f`.
+ template <typename F>
+ auto WithElements(F&& f) {
+ return std::visit(std::forward<F>(f), elems_);
}
- diag::List diags;
- TINT_UNREACHABLE(Semantic, diags)
- << "invalid scalar type " << type_->TypeInfo().name;
- return func(~0);
- }
-
- /// @param index the index of the scalar value
- /// @return the value of the scalar `static_cast` to type T.
- template <typename T>
- T ElementAs(size_t index) const {
- return WithScalarAt(index, [](auto val) { return static_cast<T>(val); });
- }
-
- private:
- const sem::Type* type_ = nullptr;
- const sem::Type* elem_type_ = nullptr;
- Scalars elems_;
+
+ /// @returns the elements as a vector of AInt
+ inline const AInts& IElements() const { return std::get<AInts>(elems_); }
+
+ /// @returns the elements as a vector of AFloat
+ inline const AFloats& FElements() const { return std::get<AFloats>(elems_); }
+
+ /// @returns true if any element is positive zero
+ bool AnyZero() const;
+
+ /// @returns true if all elements are positive zero
+ bool AllZero() const;
+
+ /// @returns true if all elements are the same value, with the same sign-bit.
+ bool AllEqual() const { return AllEqual(0, ElementCount()); }
+
+ /// @param start the first element index
+ /// @param end one past the last element index
+ /// @returns true if all elements between `[start, end)` are the same value
+ bool AllEqual(size_t start, size_t end) const;
+
+ /// @param index the index of the element
+ /// @return the element at `index`, which must be of type `T`.
+ template <typename T>
+ T Element(size_t index) const;
+
+ private:
+ /// Checks that the provided type matches the number of expected elements.
+ /// @returns the element type of `ty`.
+ const sem::Type* CheckElemType(const sem::Type* ty, size_t num_elements);
+
+ const sem::Type* type_ = nullptr;
+ const sem::Type* elem_type_ = nullptr;
+ Elements elems_;
};
+template <typename T>
+Constant::Constant(const sem::Type* ty, std::initializer_list<T> els)
+ : type_(ty), elem_type_(CheckElemType(type_, els.size())) {
+ ElementVectorFor<T> elements;
+ elements.reserve(els.size());
+ for (auto el : els) {
+ elements.emplace_back(ElementFor<T>(el));
+ }
+ elems_ = Elements{std::move(elements)};
+}
+
+template <typename T>
+T Constant::Element(size_t index) const {
+ if constexpr (std::is_same_v<ElementVectorFor<T>, AFloats>) {
+ return static_cast<T>(FElements()[index].value);
+ } else {
+ return static_cast<T>(IElements()[index].value);
+ }
+}
+
} // namespace tint::sem
#endif // SRC_TINT_SEM_CONSTANT_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/constant_test.cc b/chromium/third_party/dawn/src/tint/sem/constant_test.cc
new file mode 100644
index 00000000000..ed9fef8bf07
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/constant_test.cc
@@ -0,0 +1,304 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/constant.h"
+
+#include <gmock/gmock.h>
+
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
+#include "src/tint/sem/test_helper.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint::sem {
+namespace {
+
+using ConstantTest = TestHelper;
+
+TEST_F(ConstantTest, ConstructorInitializerList) {
+ {
+ auto i = AInt(AInt::kHighest);
+ Constant c(create<AbstractInt>(), {i});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(i)); });
+ }
+ {
+ auto i = i32(i32::kHighest);
+ Constant c(create<I32>(), {i});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(i)); });
+ }
+ {
+ auto i = u32(u32::kHighest);
+ Constant c(create<U32>(), {i});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(i)); });
+ }
+ {
+ Constant c(create<Bool>(), {false});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(0_a)); });
+ }
+ {
+ Constant c(create<Bool>(), {true});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(1_a)); });
+ }
+ {
+ auto f = AFloat(AFloat::kHighest);
+ Constant c(create<AbstractFloat>(), {f});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(f)); });
+ }
+ {
+ auto f = f32(f32::kHighest);
+ Constant c(create<F32>(), {f});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(f)); });
+ }
+ {
+ auto f = f16(f16::kHighest);
+ Constant c(create<F16>(), {f});
+ c.WithElements([&](auto&& vec) { EXPECT_THAT(vec, testing::ElementsAre(f)); });
+ }
+}
+
+TEST_F(ConstantTest, Element_ai) {
+ Constant c(create<AbstractInt>(), {1_a});
+ EXPECT_EQ(c.Element<AInt>(0), 1_a);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_i32) {
+ Constant c(create<I32>(), {1_a});
+ EXPECT_EQ(c.Element<i32>(0), 1_i);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_u32) {
+ Constant c(create<U32>(), {1_a});
+ EXPECT_EQ(c.Element<u32>(0), 1_u);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_bool) {
+ Constant c(create<Bool>(), {true});
+ EXPECT_EQ(c.Element<bool>(0), true);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_af) {
+ Constant c(create<AbstractFloat>(), {1.0_a});
+ EXPECT_EQ(c.Element<AFloat>(0), 1.0_a);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_f32) {
+ Constant c(create<F32>(), {1.0_a});
+ EXPECT_EQ(c.Element<f32>(0), 1.0_f);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_f16) {
+ Constant c(create<F16>(), {1.0_a});
+ EXPECT_EQ(c.Element<f16>(0), 1.0_h);
+ EXPECT_EQ(c.ElementCount(), 1u);
+}
+
+TEST_F(ConstantTest, Element_vec3_ai) {
+ Constant c(create<Vector>(create<AbstractInt>(), 3u), {1_a, 2_a, 3_a});
+ EXPECT_EQ(c.Element<AInt>(0), 1_a);
+ EXPECT_EQ(c.Element<AInt>(1), 2_a);
+ EXPECT_EQ(c.Element<AInt>(2), 3_a);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_vec3_i32) {
+ Constant c(create<Vector>(create<I32>(), 3u), {1_a, 2_a, 3_a});
+ EXPECT_EQ(c.Element<i32>(0), 1_i);
+ EXPECT_EQ(c.Element<i32>(1), 2_i);
+ EXPECT_EQ(c.Element<i32>(2), 3_i);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_vec3_u32) {
+ Constant c(create<Vector>(create<U32>(), 3u), {1_a, 2_a, 3_a});
+ EXPECT_EQ(c.Element<u32>(0), 1_u);
+ EXPECT_EQ(c.Element<u32>(1), 2_u);
+ EXPECT_EQ(c.Element<u32>(2), 3_u);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_vec3_bool) {
+ Constant c(create<Vector>(create<Bool>(), 2u), {true, false});
+ EXPECT_EQ(c.Element<bool>(0), true);
+ EXPECT_EQ(c.Element<bool>(1), false);
+ EXPECT_EQ(c.ElementCount(), 2u);
+}
+
+TEST_F(ConstantTest, Element_vec3_af) {
+ Constant c(create<Vector>(create<AbstractFloat>(), 3u), {1.0_a, 2.0_a, 3.0_a});
+ EXPECT_EQ(c.Element<AFloat>(0), 1.0_a);
+ EXPECT_EQ(c.Element<AFloat>(1), 2.0_a);
+ EXPECT_EQ(c.Element<AFloat>(2), 3.0_a);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_vec3_f32) {
+ Constant c(create<Vector>(create<F32>(), 3u), {1.0_a, 2.0_a, 3.0_a});
+ EXPECT_EQ(c.Element<f32>(0), 1.0_f);
+ EXPECT_EQ(c.Element<f32>(1), 2.0_f);
+ EXPECT_EQ(c.Element<f32>(2), 3.0_f);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_vec3_f16) {
+ Constant c(create<Vector>(create<F16>(), 3u), {1.0_a, 2.0_a, 3.0_a});
+ EXPECT_EQ(c.Element<f16>(0), 1.0_h);
+ EXPECT_EQ(c.Element<f16>(1), 2.0_h);
+ EXPECT_EQ(c.Element<f16>(2), 3.0_h);
+ EXPECT_EQ(c.ElementCount(), 3u);
+}
+
+TEST_F(ConstantTest, Element_mat2x3_af) {
+ Constant c(create<Matrix>(create<Vector>(create<AbstractFloat>(), 3u), 2u),
+ {1.0_a, 2.0_a, 3.0_a, 4.0_a, 5.0_a, 6.0_a});
+ EXPECT_EQ(c.Element<AFloat>(0), 1.0_a);
+ EXPECT_EQ(c.Element<AFloat>(1), 2.0_a);
+ EXPECT_EQ(c.Element<AFloat>(2), 3.0_a);
+ EXPECT_EQ(c.Element<AFloat>(3), 4.0_a);
+ EXPECT_EQ(c.Element<AFloat>(4), 5.0_a);
+ EXPECT_EQ(c.Element<AFloat>(5), 6.0_a);
+ EXPECT_EQ(c.ElementCount(), 6u);
+}
+
+TEST_F(ConstantTest, Element_mat2x3_f32) {
+ Constant c(create<Matrix>(create<Vector>(create<F32>(), 3u), 2u),
+ {1.0_a, 2.0_a, 3.0_a, 4.0_a, 5.0_a, 6.0_a});
+ EXPECT_EQ(c.Element<f32>(0), 1.0_f);
+ EXPECT_EQ(c.Element<f32>(1), 2.0_f);
+ EXPECT_EQ(c.Element<f32>(2), 3.0_f);
+ EXPECT_EQ(c.Element<f32>(3), 4.0_f);
+ EXPECT_EQ(c.Element<f32>(4), 5.0_f);
+ EXPECT_EQ(c.Element<f32>(5), 6.0_f);
+ EXPECT_EQ(c.ElementCount(), 6u);
+}
+
+TEST_F(ConstantTest, Element_mat2x3_f16) {
+ Constant c(create<Matrix>(create<Vector>(create<F16>(), 3u), 2u),
+ {1.0_a, 2.0_a, 3.0_a, 4.0_a, 5.0_a, 6.0_a});
+ EXPECT_EQ(c.Element<f16>(0), 1.0_h);
+ EXPECT_EQ(c.Element<f16>(1), 2.0_h);
+ EXPECT_EQ(c.Element<f16>(2), 3.0_h);
+ EXPECT_EQ(c.Element<f16>(3), 4.0_h);
+ EXPECT_EQ(c.Element<f16>(4), 5.0_h);
+ EXPECT_EQ(c.Element<f16>(5), 6.0_h);
+ EXPECT_EQ(c.ElementCount(), 6u);
+}
+
+TEST_F(ConstantTest, AnyZero) {
+ auto* vec3_ai = create<Vector>(create<AbstractInt>(), 3u);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 3_a}).AnyZero(), false);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 2_a, 3_a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 0_a, 3_a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 0_a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 0_a, 0_a}).AnyZero(), true);
+
+ auto* vec3_af = create<Vector>(create<AbstractFloat>(), 3u);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 3._a}).AnyZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 2._a, 3._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 0._a, 3._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 0._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 0._a, 0._a}).AnyZero(), true);
+
+ EXPECT_EQ(Constant(vec3_af, {1._a, -2._a, 3._a}).AnyZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -2._a, 3._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {1._a, -0._a, 3._a}).AnyZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, -2._a, 0._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, 0._a}).AnyZero(), true);
+ EXPECT_EQ(Constant(vec3_af, {-0._a, -0._a, -0._a}).AnyZero(), false);
+}
+
+TEST_F(ConstantTest, AllZero) {
+ auto* vec3_ai = create<Vector>(create<AbstractInt>(), 3u);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 3_a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 2_a, 3_a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 0_a, 3_a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 0_a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 0_a, 0_a}).AllZero(), true);
+
+ auto* vec3_af = create<Vector>(create<AbstractFloat>(), 3u);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 2._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 0._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 0._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 0._a, 0._a}).AllZero(), true);
+
+ EXPECT_EQ(Constant(vec3_af, {1._a, -2._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -2._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, -0._a, 3._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, -2._a, 0._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, 0._a}).AllZero(), false);
+ EXPECT_EQ(Constant(vec3_af, {-0._a, -0._a, -0._a}).AllZero(), false);
+}
+
+TEST_F(ConstantTest, AllEqual) {
+ auto* vec3_ai = create<Vector>(create<AbstractInt>(), 3u);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 3_a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 1_a, 3_a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 3_a, 3_a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 1_a, 1_a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_ai, {2_a, 2_a, 2_a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_ai, {3_a, 3_a, 3_a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 0_a, 0_a}).AllEqual(), true);
+
+ auto* vec3_af = create<Vector>(create<AbstractFloat>(), 3u);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 3._a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 1._a, 3._a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 3._a, 3._a}).AllEqual(), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 1._a, 1._a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_af, {2._a, 2._a, 2._a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_af, {3._a, 3._a, 3._a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 0._a, 0._a}).AllEqual(), true);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, 0._a}).AllEqual(), false);
+}
+
+TEST_F(ConstantTest, AllEqualRange) {
+ auto* vec3_ai = create<Vector>(create<AbstractInt>(), 3u);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 2_a, 3_a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 1_a, 3_a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 3_a, 3_a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 1_a, 1_a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_ai, {2_a, 2_a, 2_a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_ai, {2_a, 2_a, 3_a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_ai, {1_a, 0_a, 0_a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 1_a, 0_a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 0_a, 1_a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_ai, {0_a, 0_a, 0_a}).AllEqual(1, 3), true);
+
+ auto* vec3_af = create<Vector>(create<AbstractFloat>(), 3u);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 2._a, 3._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 1._a, 3._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 3._a, 3._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 1._a, 1._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {2._a, 2._a, 2._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {2._a, 2._a, 3._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {1._a, 0._a, 0._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 1._a, 0._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 0._a, 1._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, 0._a, 0._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {1._a, -0._a, 0._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -1._a, 0._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, 1._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, 0._a}).AllEqual(1, 3), false);
+ EXPECT_EQ(Constant(vec3_af, {0._a, -0._a, -0._a}).AllEqual(1, 3), true);
+ EXPECT_EQ(Constant(vec3_af, {-0._a, -0._a, -0._a}).AllEqual(1, 3), true);
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.cc
index bb5b8a7747c..dd4e1080d90 100644
--- a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/depth_multisampled_texture_type.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -23,37 +23,34 @@ namespace tint::sem {
namespace {
bool IsValidDepthDimension(ast::TextureDimension dim) {
- return dim == ast::TextureDimension::k2d;
+ return dim == ast::TextureDimension::k2d;
}
} // namespace
-DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension dim)
- : Base(dim) {
- TINT_ASSERT(Semantic, IsValidDepthDimension(dim));
+DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension dim) : Base(dim) {
+ TINT_ASSERT(Semantic, IsValidDepthDimension(dim));
}
-DepthMultisampledTexture::DepthMultisampledTexture(DepthMultisampledTexture&&) =
- default;
+DepthMultisampledTexture::DepthMultisampledTexture(DepthMultisampledTexture&&) = default;
DepthMultisampledTexture::~DepthMultisampledTexture() = default;
size_t DepthMultisampledTexture::Hash() const {
- return utils::Hash(TypeInfo::Of<DepthMultisampledTexture>().full_hashcode,
- dim());
+ return utils::Hash(TypeInfo::Of<DepthMultisampledTexture>().full_hashcode, dim());
}
bool DepthMultisampledTexture::Equals(const sem::Type& other) const {
- if (auto* o = other.As<DepthMultisampledTexture>()) {
- return o->dim() == dim();
- }
- return false;
+ if (auto* o = other.As<DepthMultisampledTexture>()) {
+ return o->dim() == dim();
+ }
+ return false;
}
std::string DepthMultisampledTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_depth_multisampled_" << dim();
- return out.str();
+ std::ostringstream out;
+ out << "texture_depth_multisampled_" << dim();
+ return out.str();
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.h b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.h
new file mode 100644
index 00000000000..a81954e028a
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_H_
+#define SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+
+/// A multisampled depth texture type.
+class DepthMultisampledTexture final : public Castable<DepthMultisampledTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ explicit DepthMultisampledTexture(ast::TextureDimension dim);
+ /// Move constructor
+ DepthMultisampledTexture(DepthMultisampledTexture&&);
+ ~DepthMultisampledTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_test.cc
new file mode 100644
index 00000000000..0ee3dfdc3d8
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_test.cc
@@ -0,0 +1,62 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/depth_multisampled_texture.h"
+
+#include "src/tint/sem/test_helper.h"
+
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+
+namespace tint::sem {
+namespace {
+
+using DepthMultisampledTextureTest = TestHelper;
+
+TEST_F(DepthMultisampledTextureTest, Creation) {
+ auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+
+ EXPECT_EQ(a, b);
+}
+
+TEST_F(DepthMultisampledTextureTest, Hash) {
+ auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+}
+
+TEST_F(DepthMultisampledTextureTest, Equals) {
+ auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
+
+ EXPECT_TRUE(a->Equals(*a));
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(DepthMultisampledTextureTest, Dim) {
+ DepthMultisampledTexture d(ast::TextureDimension::k2d);
+ EXPECT_EQ(d.dim(), ast::TextureDimension::k2d);
+}
+
+TEST_F(DepthMultisampledTextureTest, FriendlyName) {
+ DepthMultisampledTexture d(ast::TextureDimension::k2d);
+ EXPECT_EQ(d.FriendlyName(Symbols()), "texture_depth_multisampled_2d");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.h b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.h
deleted file mode 100644
index de234176b16..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-
-/// A multisampled depth texture type.
-class DepthMultisampledTexture final
- : public Castable<DepthMultisampledTexture, Texture> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- explicit DepthMultisampledTexture(ast::TextureDimension dim);
- /// Move constructor
- DepthMultisampledTexture(DepthMultisampledTexture&&);
- ~DepthMultisampledTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_DEPTH_MULTISAMPLED_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type_test.cc
deleted file mode 100644
index ea929863846..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/depth_multisampled_texture_type_test.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-
-#include "src/tint/sem/test_helper.h"
-
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using DepthMultisampledTextureTest = TestHelper;
-
-TEST_F(DepthMultisampledTextureTest, Creation) {
- auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
-
- EXPECT_EQ(a, b);
-}
-
-TEST_F(DepthMultisampledTextureTest, Hash) {
- auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
-
- EXPECT_EQ(a->Hash(), b->Hash());
-}
-
-TEST_F(DepthMultisampledTextureTest, Equals) {
- auto* a = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthMultisampledTexture>(ast::TextureDimension::k2d);
-
- EXPECT_TRUE(a->Equals(*a));
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(DepthMultisampledTextureTest, Dim) {
- DepthMultisampledTexture d(ast::TextureDimension::k2d);
- EXPECT_EQ(d.dim(), ast::TextureDimension::k2d);
-}
-
-TEST_F(DepthMultisampledTextureTest, FriendlyName) {
- DepthMultisampledTexture d(ast::TextureDimension::k2d);
- EXPECT_EQ(d.FriendlyName(Symbols()), "texture_depth_multisampled_2d");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/depth_texture.cc
index 1ff00604276..664a721fccd 100644
--- a/chromium/third_party/dawn/src/tint/sem/depth_texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/depth_texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -23,16 +23,14 @@ namespace tint::sem {
namespace {
bool IsValidDepthDimension(ast::TextureDimension dim) {
- return dim == ast::TextureDimension::k2d ||
- dim == ast::TextureDimension::k2dArray ||
- dim == ast::TextureDimension::kCube ||
- dim == ast::TextureDimension::kCubeArray;
+ return dim == ast::TextureDimension::k2d || dim == ast::TextureDimension::k2dArray ||
+ dim == ast::TextureDimension::kCube || dim == ast::TextureDimension::kCubeArray;
}
} // namespace
DepthTexture::DepthTexture(ast::TextureDimension dim) : Base(dim) {
- TINT_ASSERT(Semantic, IsValidDepthDimension(dim));
+ TINT_ASSERT(Semantic, IsValidDepthDimension(dim));
}
DepthTexture::DepthTexture(DepthTexture&&) = default;
@@ -40,20 +38,20 @@ DepthTexture::DepthTexture(DepthTexture&&) = default;
DepthTexture::~DepthTexture() = default;
size_t DepthTexture::Hash() const {
- return utils::Hash(TypeInfo::Of<DepthTexture>().full_hashcode, dim());
+ return utils::Hash(TypeInfo::Of<DepthTexture>().full_hashcode, dim());
}
bool DepthTexture::Equals(const sem::Type& other) const {
- if (auto* o = other.As<DepthTexture>()) {
- return o->dim() == dim();
- }
- return false;
+ if (auto* o = other.As<DepthTexture>()) {
+ return o->dim() == dim();
+ }
+ return false;
}
std::string DepthTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_depth_" << dim();
- return out.str();
+ std::ostringstream out;
+ out << "texture_depth_" << dim();
+ return out.str();
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_texture.h b/chromium/third_party/dawn/src/tint/sem/depth_texture.h
new file mode 100644
index 00000000000..9a2e6d0eda3
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/depth_texture.h
@@ -0,0 +1,49 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_DEPTH_TEXTURE_H_
+#define SRC_TINT_SEM_DEPTH_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+
+/// A depth texture type.
+class DepthTexture final : public Castable<DepthTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ explicit DepthTexture(ast::TextureDimension dim);
+ /// Move constructor
+ DepthTexture(DepthTexture&&);
+ ~DepthTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_DEPTH_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/depth_texture_test.cc
new file mode 100644
index 00000000000..7a869fb278f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/depth_texture_test.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/depth_texture.h"
+
+#include "src/tint/sem/test_helper.h"
+
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+
+namespace tint::sem {
+namespace {
+
+using DepthTextureTest = TestHelper;
+
+TEST_F(DepthTextureTest, Creation) {
+ auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+}
+
+TEST_F(DepthTextureTest, Hash) {
+ auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+}
+
+TEST_F(DepthTextureTest, Equals) {
+ auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
+ auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(DepthTextureTest, IsTexture) {
+ DepthTexture d(ast::TextureDimension::kCube);
+ Texture* ty = &d;
+ EXPECT_TRUE(ty->Is<DepthTexture>());
+ EXPECT_FALSE(ty->Is<ExternalTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
+}
+
+TEST_F(DepthTextureTest, Dim) {
+ DepthTexture d(ast::TextureDimension::kCube);
+ EXPECT_EQ(d.dim(), ast::TextureDimension::kCube);
+}
+
+TEST_F(DepthTextureTest, FriendlyName) {
+ DepthTexture d(ast::TextureDimension::kCube);
+ EXPECT_EQ(d.FriendlyName(Symbols()), "texture_depth_cube");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_texture_type.h b/chromium/third_party/dawn/src/tint/sem/depth_texture_type.h
deleted file mode 100644
index 0f257857d91..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/depth_texture_type.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_DEPTH_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_DEPTH_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-
-/// A depth texture type.
-class DepthTexture final : public Castable<DepthTexture, Texture> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- explicit DepthTexture(ast::TextureDimension dim);
- /// Move constructor
- DepthTexture(DepthTexture&&);
- ~DepthTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_DEPTH_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/depth_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/depth_texture_type_test.cc
deleted file mode 100644
index 28bb84ad03e..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/depth_texture_type_test.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/depth_texture_type.h"
-
-#include "src/tint/sem/test_helper.h"
-
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using DepthTextureTest = TestHelper;
-
-TEST_F(DepthTextureTest, Creation) {
- auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
-}
-
-TEST_F(DepthTextureTest, Hash) {
- auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
-}
-
-TEST_F(DepthTextureTest, Equals) {
- auto* a = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* b = create<DepthTexture>(ast::TextureDimension::k2d);
- auto* c = create<DepthTexture>(ast::TextureDimension::k2dArray);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(DepthTextureTest, IsTexture) {
- DepthTexture d(ast::TextureDimension::kCube);
- Texture* ty = &d;
- EXPECT_TRUE(ty->Is<DepthTexture>());
- EXPECT_FALSE(ty->Is<ExternalTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
-}
-
-TEST_F(DepthTextureTest, Dim) {
- DepthTexture d(ast::TextureDimension::kCube);
- EXPECT_EQ(d.dim(), ast::TextureDimension::kCube);
-}
-
-TEST_F(DepthTextureTest, FriendlyName) {
- DepthTexture d(ast::TextureDimension::kCube);
- EXPECT_EQ(d.FriendlyName(Symbols()), "texture_depth_cube");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/expression.cc b/chromium/third_party/dawn/src/tint/sem/expression.cc
index a5f2b1bb9d1..57ec68bf213 100644
--- a/chromium/third_party/dawn/src/tint/sem/expression.cc
+++ b/chromium/third_party/dawn/src/tint/sem/expression.cc
@@ -16,6 +16,8 @@
#include <utility>
+#include "src/tint/sem/materialize.h"
+
TINT_INSTANTIATE_TYPEINFO(tint::sem::Expression);
namespace tint::sem {
@@ -24,15 +26,24 @@ Expression::Expression(const ast::Expression* declaration,
const sem::Type* type,
const Statement* statement,
Constant constant,
- bool has_side_effects)
+ bool has_side_effects,
+ const Variable* source_var /* = nullptr */)
: declaration_(declaration),
+ source_variable_(source_var),
type_(type),
statement_(statement),
constant_(std::move(constant)),
has_side_effects_(has_side_effects) {
- TINT_ASSERT(Semantic, type_);
+ TINT_ASSERT(Semantic, type_);
}
Expression::~Expression() = default;
+const Expression* Expression::UnwrapMaterialize() const {
+ if (auto* m = As<Materialize>()) {
+ return m->Expr();
+ }
+ return this;
+}
+
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/expression.h b/chromium/third_party/dawn/src/tint/sem/expression.h
index 23eae1bcb32..a7838516bb8 100644
--- a/chromium/third_party/dawn/src/tint/sem/expression.h
+++ b/chromium/third_party/dawn/src/tint/sem/expression.h
@@ -24,58 +24,73 @@
namespace tint::sem {
class Statement;
class Type;
+class Variable;
} // namespace tint::sem
namespace tint::sem {
/// Expression holds the semantic information for expression nodes.
class Expression : public Castable<Expression, Node> {
- public:
- /// Constructor
- /// @param declaration the AST node
- /// @param type the resolved type of the expression
- /// @param statement the statement that owns this expression
- /// @param constant the constant value of the expression. May be invalid
- /// @param has_side_effects true if this expression may have side-effects
- Expression(const ast::Expression* declaration,
- const sem::Type* type,
- const Statement* statement,
- Constant constant,
- bool has_side_effects);
-
- /// Destructor
- ~Expression() override;
-
- /// @returns the AST node
- const ast::Expression* Declaration() const { return declaration_; }
-
- /// @return the resolved type of the expression
- const sem::Type* Type() const { return type_; }
-
- /// @return the statement that owns this expression
- const Statement* Stmt() const { return statement_; }
-
- /// @return the constant value of this expression
- const Constant& ConstantValue() const { return constant_; }
-
- /// @return the behaviors of this statement
- const sem::Behaviors& Behaviors() const { return behaviors_; }
-
- /// @return the behaviors of this statement
- sem::Behaviors& Behaviors() { return behaviors_; }
-
- /// @return true of this expression may have side effects
- bool HasSideEffects() const { return has_side_effects_; }
-
- protected:
- /// The AST expression node for this semantic expression
- const ast::Expression* const declaration_;
-
- private:
- const sem::Type* const type_;
- const Statement* const statement_;
- const Constant constant_;
- sem::Behaviors behaviors_{sem::Behavior::kNext};
- const bool has_side_effects_;
+ public:
+ /// Constructor
+ /// @param declaration the AST node
+ /// @param type the resolved type of the expression
+ /// @param statement the statement that owns this expression
+ /// @param constant the constant value of the expression. May be invalid
+ /// @param has_side_effects true if this expression may have side-effects
+ /// @param source_var the (optional) source variable for this expression
+ Expression(const ast::Expression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ Constant constant,
+ bool has_side_effects,
+ const Variable* source_var = nullptr);
+
+ /// Destructor
+ ~Expression() override;
+
+ /// @returns the AST node
+ const ast::Expression* Declaration() const { return declaration_; }
+
+ /// @return the resolved type of the expression
+ const sem::Type* Type() const { return type_; }
+
+ /// @return the statement that owns this expression
+ const Statement* Stmt() const { return statement_; }
+
+ /// @return the constant value of this expression
+ const Constant& ConstantValue() const { return constant_; }
+
+ /// Returns the variable or parameter that this expression derives from.
+ /// For reference and pointer expressions, this will either be the originating
+ /// variable or a function parameter. For other types of expressions, it will
+ /// either be the parameter or constant declaration, or nullptr.
+ /// @return the source variable of this expression, or nullptr
+ const Variable* SourceVariable() const { return source_variable_; }
+
+ /// @return the behaviors of this statement
+ const sem::Behaviors& Behaviors() const { return behaviors_; }
+
+ /// @return the behaviors of this statement
+ sem::Behaviors& Behaviors() { return behaviors_; }
+
+ /// @return true of this expression may have side effects
+ bool HasSideEffects() const { return has_side_effects_; }
+
+ /// @return the inner expression node if this is a Materialize, otherwise this.
+ const Expression* UnwrapMaterialize() const;
+
+ protected:
+ /// The AST expression node for this semantic expression
+ const ast::Expression* const declaration_;
+ /// The source variable for this semantic expression, or nullptr
+ const Variable* source_variable_;
+
+ private:
+ const sem::Type* const type_;
+ const Statement* const statement_;
+ const Constant constant_;
+ sem::Behaviors behaviors_{sem::Behavior::kNext};
+ const bool has_side_effects_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/expression_test.cc b/chromium/third_party/dawn/src/tint/sem/expression_test.cc
new file mode 100644
index 00000000000..fc1adeb88d4
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/expression_test.cc
@@ -0,0 +1,39 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/expression.h"
+
+#include "src/tint/sem/test_helper.h"
+
+#include "src/tint/sem/materialize.h"
+
+using namespace tint::number_suffixes; // NOLINT
+
+namespace tint::sem {
+namespace {
+
+using ExpressionTest = TestHelper;
+
+TEST_F(ExpressionTest, UnwrapMaterialize) {
+ auto* a = create<Expression>(/* declaration */ nullptr, create<I32>(), /* statement */ nullptr,
+ Constant{},
+ /* has_side_effects */ false, /* source_var */ nullptr);
+ auto* b = create<Materialize>(a, /* statement */ nullptr, Constant{create<I32>(), {1_a}});
+
+ EXPECT_EQ(a, a->UnwrapMaterialize());
+ EXPECT_EQ(a, b->UnwrapMaterialize());
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/external_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/external_texture.cc
index adfd3734d5f..d677f65937c 100644
--- a/chromium/third_party/dawn/src/tint/sem/external_texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/external_texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/external_texture_type.h"
+#include "src/tint/sem/external_texture.h"
#include "src/tint/program_builder.h"
@@ -27,15 +27,15 @@ ExternalTexture::ExternalTexture(ExternalTexture&&) = default;
ExternalTexture::~ExternalTexture() = default;
size_t ExternalTexture::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<ExternalTexture>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<ExternalTexture>().full_hashcode);
}
bool ExternalTexture::Equals(const sem::Type& other) const {
- return other.Is<ExternalTexture>();
+ return other.Is<ExternalTexture>();
}
std::string ExternalTexture::FriendlyName(const SymbolTable&) const {
- return "texture_external";
+ return "texture_external";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/external_texture.h b/chromium/third_party/dawn/src/tint/sem/external_texture.h
new file mode 100644
index 00000000000..3cfbd415fa9
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/external_texture.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_EXTERNAL_TEXTURE_H_
+#define SRC_TINT_SEM_EXTERNAL_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+
+/// An external texture type
+class ExternalTexture final : public Castable<ExternalTexture, Texture> {
+ public:
+ /// Constructor
+ ExternalTexture();
+
+ /// Move constructor
+ ExternalTexture(ExternalTexture&&);
+ ~ExternalTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_EXTERNAL_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/external_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/external_texture_test.cc
new file mode 100644
index 00000000000..aecd58ac5d4
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/external_texture_test.cc
@@ -0,0 +1,70 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/external_texture.h"
+
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+using ExternalTextureTest = TestHelper;
+
+TEST_F(ExternalTextureTest, Creation) {
+ auto* a = create<ExternalTexture>();
+ auto* b = create<ExternalTexture>();
+ EXPECT_EQ(a, b);
+}
+
+TEST_F(ExternalTextureTest, Hash) {
+ auto* a = create<ExternalTexture>();
+ auto* b = create<ExternalTexture>();
+ EXPECT_EQ(a->Hash(), b->Hash());
+}
+
+TEST_F(ExternalTextureTest, Equals) {
+ auto* a = create<ExternalTexture>();
+ auto* b = create<ExternalTexture>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(ExternalTextureTest, IsTexture) {
+ F32 f32;
+ ExternalTexture s;
+ Texture* ty = &s;
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_TRUE(ty->Is<ExternalTexture>());
+ EXPECT_FALSE(ty->Is<MultisampledTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
+}
+
+TEST_F(ExternalTextureTest, Dim) {
+ F32 f32;
+ ExternalTexture s;
+ EXPECT_EQ(s.dim(), ast::TextureDimension::k2d);
+}
+
+TEST_F(ExternalTextureTest, FriendlyName) {
+ ExternalTexture s;
+ EXPECT_EQ(s.FriendlyName(Symbols()), "texture_external");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/external_texture_type.h b/chromium/third_party/dawn/src/tint/sem/external_texture_type.h
deleted file mode 100644
index 4a070054e20..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/external_texture_type.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_EXTERNAL_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_EXTERNAL_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-
-/// An external texture type
-class ExternalTexture final : public Castable<ExternalTexture, Texture> {
- public:
- /// Constructor
- ExternalTexture();
-
- /// Move constructor
- ExternalTexture(ExternalTexture&&);
- ~ExternalTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_EXTERNAL_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/external_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/external_texture_type_test.cc
deleted file mode 100644
index 439b3bc006c..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/external_texture_type_test.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/external_texture_type.h"
-
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-#include "src/tint/sem/test_helper.h"
-
-namespace tint::sem {
-namespace {
-
-using ExternalTextureTest = TestHelper;
-
-TEST_F(ExternalTextureTest, Creation) {
- auto* a = create<ExternalTexture>();
- auto* b = create<ExternalTexture>();
- EXPECT_EQ(a, b);
-}
-
-TEST_F(ExternalTextureTest, Hash) {
- auto* a = create<ExternalTexture>();
- auto* b = create<ExternalTexture>();
- EXPECT_EQ(a->Hash(), b->Hash());
-}
-
-TEST_F(ExternalTextureTest, Equals) {
- auto* a = create<ExternalTexture>();
- auto* b = create<ExternalTexture>();
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(ExternalTextureTest, IsTexture) {
- F32 f32;
- ExternalTexture s;
- Texture* ty = &s;
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_TRUE(ty->Is<ExternalTexture>());
- EXPECT_FALSE(ty->Is<MultisampledTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
-}
-
-TEST_F(ExternalTextureTest, Dim) {
- F32 f32;
- ExternalTexture s;
- EXPECT_EQ(s.dim(), ast::TextureDimension::k2d);
-}
-
-TEST_F(ExternalTextureTest, FriendlyName) {
- ExternalTexture s;
- EXPECT_EQ(s.FriendlyName(Symbols()), "texture_external");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/f16.cc b/chromium/third_party/dawn/src/tint/sem/f16.cc
new file mode 100644
index 00000000000..7da65fa91d1
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/f16.cc
@@ -0,0 +1,55 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/f16.h"
+
+#include "src/tint/program_builder.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::F16);
+
+namespace tint {
+namespace sem {
+
+F16::F16() = default;
+
+F16::F16(F16&&) = default;
+
+F16::~F16() = default;
+
+size_t F16::Hash() const {
+ return static_cast<size_t>(TypeInfo::Of<F16>().full_hashcode);
+}
+
+bool F16::Equals(const Type& other) const {
+ return other.Is<F16>();
+}
+
+std::string F16::FriendlyName(const SymbolTable&) const {
+ return "f16";
+}
+
+bool F16::IsConstructible() const {
+ return true;
+}
+
+uint32_t F16::Size() const {
+ return 2;
+}
+
+uint32_t F16::Align() const {
+ return 2;
+}
+
+} // namespace sem
+} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/sem/f16.h b/chromium/third_party/dawn/src/tint/sem/f16.h
new file mode 100644
index 00000000000..72984c13e7f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/f16.h
@@ -0,0 +1,58 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_F16_H_
+#define SRC_TINT_SEM_F16_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A float 16 type
+class F16 : public Castable<F16, Type> {
+ public:
+ /// Constructor
+ F16();
+ /// Move constructor
+ F16(F16&&);
+ ~F16() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ uint32_t Align() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_F16_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/f16_test.cc b/chromium/third_party/dawn/src/tint/sem/f16_test.cc
new file mode 100644
index 00000000000..28fd0dab6be
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/f16_test.cc
@@ -0,0 +1,48 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+namespace {
+
+using F16Test = TestHelper;
+
+TEST_F(F16Test, Creation) {
+ auto* a = create<F16>();
+ auto* b = create<F16>();
+ EXPECT_EQ(a, b);
+}
+
+TEST_F(F16Test, Hash) {
+ auto* a = create<F16>();
+ auto* b = create<F16>();
+ EXPECT_EQ(a->Hash(), b->Hash());
+}
+
+TEST_F(F16Test, Equals) {
+ auto* a = create<F16>();
+ auto* b = create<F16>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(F16Test, FriendlyName) {
+ F16 f;
+ EXPECT_EQ(f.FriendlyName(Symbols()), "f16");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/f32_type.cc b/chromium/third_party/dawn/src/tint/sem/f32.cc
index c5b013e3d43..83fffccbc37 100644
--- a/chromium/third_party/dawn/src/tint/sem/f32_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/f32.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/f32_type.h"
+#include "src/tint/sem/f32.h"
#include "src/tint/program_builder.h"
@@ -27,27 +27,27 @@ F32::F32(F32&&) = default;
F32::~F32() = default;
size_t F32::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<F32>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<F32>().full_hashcode);
}
bool F32::Equals(const Type& other) const {
- return other.Is<F32>();
+ return other.Is<F32>();
}
std::string F32::FriendlyName(const SymbolTable&) const {
- return "f32";
+ return "f32";
}
bool F32::IsConstructible() const {
- return true;
+ return true;
}
uint32_t F32::Size() const {
- return 4;
+ return 4;
}
uint32_t F32::Align() const {
- return 4;
+ return 4;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/f32.h b/chromium/third_party/dawn/src/tint/sem/f32.h
new file mode 100644
index 00000000000..c7d7ad6c9c7
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/f32.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_F32_H_
+#define SRC_TINT_SEM_F32_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A float 32 type
+class F32 final : public Castable<F32, Type> {
+ public:
+ /// Constructor
+ F32();
+ /// Move constructor
+ F32(F32&&);
+ ~F32() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ uint32_t Align() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_F32_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/f32_type_test.cc b/chromium/third_party/dawn/src/tint/sem/f32_test.cc
index 7fd7b76a817..de4a3e89f6b 100644
--- a/chromium/third_party/dawn/src/tint/sem/f32_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/f32_test.cc
@@ -13,7 +13,7 @@
// limitations under the License.
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -21,27 +21,27 @@ namespace {
using F32Test = TestHelper;
TEST_F(F32Test, Creation) {
- auto* a = create<F32>();
- auto* b = create<F32>();
- EXPECT_EQ(a, b);
+ auto* a = create<F32>();
+ auto* b = create<F32>();
+ EXPECT_EQ(a, b);
}
TEST_F(F32Test, Hash) {
- auto* a = create<F32>();
- auto* b = create<F32>();
- EXPECT_EQ(a->Hash(), b->Hash());
+ auto* a = create<F32>();
+ auto* b = create<F32>();
+ EXPECT_EQ(a->Hash(), b->Hash());
}
TEST_F(F32Test, Equals) {
- auto* a = create<F32>();
- auto* b = create<F32>();
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<F32>();
+ auto* b = create<F32>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(F32Test, FriendlyName) {
- F32 f;
- EXPECT_EQ(f.FriendlyName(Symbols()), "f32");
+ F32 f;
+ EXPECT_EQ(f.FriendlyName(Symbols()), "f32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/f32_type.h b/chromium/third_party/dawn/src/tint/sem/f32_type.h
deleted file mode 100644
index 0d0fabf6180..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/f32_type.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_F32_TYPE_H_
-#define SRC_TINT_SEM_F32_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A float 32 type
-class F32 final : public Castable<F32, Type> {
- public:
- /// Constructor
- F32();
- /// Move constructor
- F32(F32&&);
- ~F32() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the size in bytes of the type.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type.
- uint32_t Align() const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_F32_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/for_loop_statement.cc b/chromium/third_party/dawn/src/tint/sem/for_loop_statement.cc
index 55996330ca7..1e6aa6dccb5 100644
--- a/chromium/third_party/dawn/src/tint/sem/for_loop_statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/for_loop_statement.cc
@@ -28,7 +28,7 @@ ForLoopStatement::ForLoopStatement(const ast::ForLoopStatement* declaration,
ForLoopStatement::~ForLoopStatement() = default;
const ast::ForLoopStatement* ForLoopStatement::Declaration() const {
- return static_cast<const ast::ForLoopStatement*>(Base::Declaration());
+ return static_cast<const ast::ForLoopStatement*>(Base::Declaration());
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/for_loop_statement.h b/chromium/third_party/dawn/src/tint/sem/for_loop_statement.h
index cd7b7614ec1..9f7d62ad44a 100644
--- a/chromium/third_party/dawn/src/tint/sem/for_loop_statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/for_loop_statement.h
@@ -28,32 +28,31 @@ class Expression;
namespace tint::sem {
/// Holds semantic information about a for-loop statement
-class ForLoopStatement final
- : public Castable<ForLoopStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this for-loop statement
- /// @param parent the owning statement
- /// @param function the owning function
- ForLoopStatement(const ast::ForLoopStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~ForLoopStatement() override;
-
- /// @returns the AST node
- const ast::ForLoopStatement* Declaration() const;
-
- /// @returns the for-loop condition expression
- const Expression* Condition() const { return condition_; }
-
- /// Sets the for-loop condition expression
- /// @param condition the for-loop condition expression
- void SetCondition(const Expression* condition) { condition_ = condition; }
-
- private:
- const Expression* condition_ = nullptr;
+class ForLoopStatement final : public Castable<ForLoopStatement, CompoundStatement> {
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this for-loop statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ ForLoopStatement(const ast::ForLoopStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~ForLoopStatement() override;
+
+ /// @returns the AST node
+ const ast::ForLoopStatement* Declaration() const;
+
+ /// @returns the for-loop condition expression
+ const Expression* Condition() const { return condition_; }
+
+ /// Sets the for-loop condition expression
+ /// @param condition the for-loop condition expression
+ void SetCondition(const Expression* condition) { condition_ = condition; }
+
+ private:
+ const Expression* condition_ = nullptr;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/function.cc b/chromium/third_party/dawn/src/tint/sem/function.cc
index 4ffda9a91b6..790933b94a6 100644
--- a/chromium/third_party/dawn/src/tint/sem/function.cc
+++ b/chromium/third_party/dawn/src/tint/sem/function.cc
@@ -15,11 +15,11 @@
#include "src/tint/sem/function.h"
#include "src/tint/ast/function.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/variable.h"
#include "src/tint/utils/to_const_ptr_vec.h"
@@ -32,164 +32,155 @@ Function::Function(const ast::Function* declaration,
std::vector<Parameter*> parameters)
: Base(return_type, utils::ToConstPtrVec(parameters)),
declaration_(declaration),
- workgroup_size_{WorkgroupDimension{1}, WorkgroupDimension{1},
- WorkgroupDimension{1}} {
- for (auto* parameter : parameters) {
- parameter->SetOwner(this);
- }
+ workgroup_size_{WorkgroupDimension{1}, WorkgroupDimension{1}, WorkgroupDimension{1}} {
+ for (auto* parameter : parameters) {
+ parameter->SetOwner(this);
+ }
}
Function::~Function() = default;
std::vector<std::pair<const Variable*, const ast::LocationAttribute*>>
Function::TransitivelyReferencedLocationVariables() const {
- std::vector<std::pair<const Variable*, const ast::LocationAttribute*>> ret;
-
- for (auto* var : TransitivelyReferencedGlobals()) {
- for (auto* attr : var->Declaration()->attributes) {
- if (auto* location = attr->As<ast::LocationAttribute>()) {
- ret.push_back({var, location});
- break;
- }
+ std::vector<std::pair<const Variable*, const ast::LocationAttribute*>> ret;
+
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ for (auto* attr : var->Declaration()->attributes) {
+ if (auto* location = attr->As<ast::LocationAttribute>()) {
+ ret.push_back({var, location});
+ break;
+ }
+ }
}
- }
- return ret;
+ return ret;
}
-Function::VariableBindings Function::TransitivelyReferencedUniformVariables()
- const {
- VariableBindings ret;
+Function::VariableBindings Function::TransitivelyReferencedUniformVariables() const {
+ VariableBindings ret;
- for (auto* var : TransitivelyReferencedGlobals()) {
- if (var->StorageClass() != ast::StorageClass::kUniform) {
- continue;
- }
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ if (var->StorageClass() != ast::StorageClass::kUniform) {
+ continue;
+ }
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- ret.push_back({var, binding_point});
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ ret.push_back({var, binding_point});
+ }
}
- }
- return ret;
+ return ret;
}
-Function::VariableBindings
-Function::TransitivelyReferencedStorageBufferVariables() const {
- VariableBindings ret;
+Function::VariableBindings Function::TransitivelyReferencedStorageBufferVariables() const {
+ VariableBindings ret;
- for (auto* var : TransitivelyReferencedGlobals()) {
- if (var->StorageClass() != ast::StorageClass::kStorage) {
- continue;
- }
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ if (var->StorageClass() != ast::StorageClass::kStorage) {
+ continue;
+ }
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- ret.push_back({var, binding_point});
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ ret.push_back({var, binding_point});
+ }
}
- }
- return ret;
+ return ret;
}
std::vector<std::pair<const Variable*, const ast::BuiltinAttribute*>>
Function::TransitivelyReferencedBuiltinVariables() const {
- std::vector<std::pair<const Variable*, const ast::BuiltinAttribute*>> ret;
-
- for (auto* var : TransitivelyReferencedGlobals()) {
- for (auto* attr : var->Declaration()->attributes) {
- if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
- ret.push_back({var, builtin});
- break;
- }
+ std::vector<std::pair<const Variable*, const ast::BuiltinAttribute*>> ret;
+
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ for (auto* attr : var->Declaration()->attributes) {
+ if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
+ ret.push_back({var, builtin});
+ break;
+ }
+ }
}
- }
- return ret;
+ return ret;
}
-Function::VariableBindings Function::TransitivelyReferencedSamplerVariables()
- const {
- return TransitivelyReferencedSamplerVariablesImpl(ast::SamplerKind::kSampler);
+Function::VariableBindings Function::TransitivelyReferencedSamplerVariables() const {
+ return TransitivelyReferencedSamplerVariablesImpl(ast::SamplerKind::kSampler);
}
-Function::VariableBindings
-Function::TransitivelyReferencedComparisonSamplerVariables() const {
- return TransitivelyReferencedSamplerVariablesImpl(
- ast::SamplerKind::kComparisonSampler);
+Function::VariableBindings Function::TransitivelyReferencedComparisonSamplerVariables() const {
+ return TransitivelyReferencedSamplerVariablesImpl(ast::SamplerKind::kComparisonSampler);
}
-Function::VariableBindings
-Function::TransitivelyReferencedSampledTextureVariables() const {
- return TransitivelyReferencedSampledTextureVariablesImpl(false);
+Function::VariableBindings Function::TransitivelyReferencedSampledTextureVariables() const {
+ return TransitivelyReferencedSampledTextureVariablesImpl(false);
}
-Function::VariableBindings
-Function::TransitivelyReferencedMultisampledTextureVariables() const {
- return TransitivelyReferencedSampledTextureVariablesImpl(true);
+Function::VariableBindings Function::TransitivelyReferencedMultisampledTextureVariables() const {
+ return TransitivelyReferencedSampledTextureVariablesImpl(true);
}
Function::VariableBindings Function::TransitivelyReferencedVariablesOfType(
const tint::TypeInfo* type) const {
- VariableBindings ret;
- for (auto* var : TransitivelyReferencedGlobals()) {
- auto* unwrapped_type = var->Type()->UnwrapRef();
- if (unwrapped_type->TypeInfo().Is(type)) {
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- ret.push_back({var, binding_point});
- }
+ VariableBindings ret;
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+ if (unwrapped_type->TypeInfo().Is(type)) {
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ ret.push_back({var, binding_point});
+ }
+ }
}
- }
- return ret;
+ return ret;
}
bool Function::HasAncestorEntryPoint(Symbol symbol) const {
- for (const auto* point : ancestor_entry_points_) {
- if (point->Declaration()->symbol == symbol) {
- return true;
+ for (const auto* point : ancestor_entry_points_) {
+ if (point->Declaration()->symbol == symbol) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
Function::VariableBindings Function::TransitivelyReferencedSamplerVariablesImpl(
ast::SamplerKind kind) const {
- VariableBindings ret;
-
- for (auto* var : TransitivelyReferencedGlobals()) {
- auto* unwrapped_type = var->Type()->UnwrapRef();
- auto* sampler = unwrapped_type->As<sem::Sampler>();
- if (sampler == nullptr || sampler->kind() != kind) {
- continue;
+ VariableBindings ret;
+
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+ auto* sampler = unwrapped_type->As<sem::Sampler>();
+ if (sampler == nullptr || sampler->kind() != kind) {
+ continue;
+ }
+
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ ret.push_back({var, binding_point});
+ }
}
-
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- ret.push_back({var, binding_point});
- }
- }
- return ret;
+ return ret;
}
-Function::VariableBindings
-Function::TransitivelyReferencedSampledTextureVariablesImpl(
+Function::VariableBindings Function::TransitivelyReferencedSampledTextureVariablesImpl(
bool multisampled) const {
- VariableBindings ret;
+ VariableBindings ret;
- for (auto* var : TransitivelyReferencedGlobals()) {
- auto* unwrapped_type = var->Type()->UnwrapRef();
- auto* texture = unwrapped_type->As<sem::Texture>();
- if (texture == nullptr) {
- continue;
- }
+ for (auto* var : TransitivelyReferencedGlobals()) {
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+ auto* texture = unwrapped_type->As<sem::Texture>();
+ if (texture == nullptr) {
+ continue;
+ }
- auto is_multisampled = texture->Is<sem::MultisampledTexture>();
- auto is_sampled = texture->Is<sem::SampledTexture>();
+ auto is_multisampled = texture->Is<sem::MultisampledTexture>();
+ auto is_sampled = texture->Is<sem::SampledTexture>();
- if ((multisampled && !is_multisampled) || (!multisampled && !is_sampled)) {
- continue;
- }
+ if ((multisampled && !is_multisampled) || (!multisampled && !is_sampled)) {
+ continue;
+ }
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- ret.push_back({var, binding_point});
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ ret.push_back({var, binding_point});
+ }
}
- }
- return ret;
+ return ret;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/function.h b/chromium/third_party/dawn/src/tint/sem/function.h
index 68136765fb0..f95920a6bf8 100644
--- a/chromium/third_party/dawn/src/tint/sem/function.h
+++ b/chromium/third_party/dawn/src/tint/sem/function.h
@@ -40,11 +40,11 @@ namespace tint::sem {
/// WorkgroupDimension describes the size of a single dimension of an entry
/// point's workgroup size.
struct WorkgroupDimension {
- /// The size of this dimension.
- uint32_t value;
- /// A pipeline-overridable constant that overrides the size, or nullptr if
- /// this dimension is not overridable.
- const ast::Variable* overridable_const = nullptr;
+ /// The size of this dimension.
+ uint32_t value;
+ /// A pipeline-overridable constant that overrides the size, or nullptr if
+ /// this dimension is not overridable.
+ const ast::Variable* overridable_const = nullptr;
};
/// WorkgroupSize is a three-dimensional array of WorkgroupDimensions.
@@ -52,234 +52,222 @@ using WorkgroupSize = std::array<WorkgroupDimension, 3>;
/// Function holds the semantic information for function nodes.
class Function final : public Castable<Function, CallTarget> {
- public:
- /// A vector of [Variable*, ast::VariableBindingPoint] pairs
- using VariableBindings =
- std::vector<std::pair<const Variable*, ast::VariableBindingPoint>>;
-
- /// Constructor
- /// @param declaration the ast::Function
- /// @param return_type the return type of the function
- /// @param parameters the parameters to the function
- Function(const ast::Function* declaration,
- Type* return_type,
- std::vector<Parameter*> parameters);
-
- /// Destructor
- ~Function() override;
-
- /// @returns the ast::Function declaration
- const ast::Function* Declaration() const { return declaration_; }
-
- /// @returns the workgroup size {x, y, z} for the function.
- const sem::WorkgroupSize& WorkgroupSize() const { return workgroup_size_; }
-
- /// Sets the workgroup size {x, y, z} for the function.
- /// @param workgroup_size the new workgroup size of the function
- void SetWorkgroupSize(sem::WorkgroupSize workgroup_size) {
- workgroup_size_ = std::move(workgroup_size);
- }
-
- /// @returns all directly referenced global variables
- const utils::UniqueVector<const GlobalVariable*>& DirectlyReferencedGlobals()
- const {
- return directly_referenced_globals_;
- }
-
- /// Records that this function directly references the given global variable.
- /// Note: Implicitly adds this global to the transtively-called globals.
- /// @param global the module-scope variable
- void AddDirectlyReferencedGlobal(const sem::GlobalVariable* global) {
- directly_referenced_globals_.add(global);
- transitively_referenced_globals_.add(global);
- }
-
- /// @returns all transitively referenced global variables
- const utils::UniqueVector<const GlobalVariable*>&
- TransitivelyReferencedGlobals() const {
- return transitively_referenced_globals_;
- }
-
- /// Records that this function transitively references the given global
- /// variable.
- /// @param global the module-scoped variable
- void AddTransitivelyReferencedGlobal(const sem::GlobalVariable* global) {
- transitively_referenced_globals_.add(global);
- }
-
- /// @returns the list of functions that this function transitively calls.
- const utils::UniqueVector<const Function*>& TransitivelyCalledFunctions()
- const {
- return transitively_called_functions_;
- }
-
- /// Records that this function transitively calls `function`.
- /// @param function the function this function transitively calls
- void AddTransitivelyCalledFunction(const Function* function) {
- transitively_called_functions_.add(function);
- }
-
- /// @returns the list of builtins that this function directly calls.
- const utils::UniqueVector<const Builtin*>& DirectlyCalledBuiltins() const {
- return directly_called_builtins_;
- }
-
- /// Records that this function transitively calls `builtin`.
- /// @param builtin the builtin this function directly calls
- void AddDirectlyCalledBuiltin(const Builtin* builtin) {
- directly_called_builtins_.add(builtin);
- }
-
- /// Adds the given texture/sampler pair to the list of unique pairs
- /// that this function uses (directly or indirectly). These can only
- /// be parameters to this function or global variables. Uniqueness is
- /// ensured by texture_sampler_pairs_ being a UniqueVector.
- /// @param texture the texture (must be non-null)
- /// @param sampler the sampler (null indicates a texture-only reference)
- void AddTextureSamplerPair(const sem::Variable* texture,
- const sem::Variable* sampler) {
- texture_sampler_pairs_.add(VariablePair(texture, sampler));
- }
-
- /// @returns the list of texture/sampler pairs that this function uses
- /// (directly or indirectly).
- const std::vector<VariablePair>& TextureSamplerPairs() const {
- return texture_sampler_pairs_;
- }
-
- /// @returns the list of direct calls to functions / builtins made by this
- /// function
- std::vector<const Call*> DirectCallStatements() const {
- return direct_calls_;
- }
-
- /// Adds a record of the direct function / builtin calls made by this
- /// function
- /// @param call the call
- void AddDirectCall(const Call* call) { direct_calls_.emplace_back(call); }
-
- /// @param target the target of a call
- /// @returns the Call to the given CallTarget, or nullptr the target was not
- /// called by this function.
- const Call* FindDirectCallTo(const CallTarget* target) const {
- for (auto* call : direct_calls_) {
- if (call->Target() == target) {
- return call;
- }
+ public:
+ /// A vector of [Variable*, ast::VariableBindingPoint] pairs
+ using VariableBindings = std::vector<std::pair<const Variable*, ast::VariableBindingPoint>>;
+
+ /// Constructor
+ /// @param declaration the ast::Function
+ /// @param return_type the return type of the function
+ /// @param parameters the parameters to the function
+ Function(const ast::Function* declaration,
+ Type* return_type,
+ std::vector<Parameter*> parameters);
+
+ /// Destructor
+ ~Function() override;
+
+ /// @returns the ast::Function declaration
+ const ast::Function* Declaration() const { return declaration_; }
+
+ /// @returns the workgroup size {x, y, z} for the function.
+ const sem::WorkgroupSize& WorkgroupSize() const { return workgroup_size_; }
+
+ /// Sets the workgroup size {x, y, z} for the function.
+ /// @param workgroup_size the new workgroup size of the function
+ void SetWorkgroupSize(sem::WorkgroupSize workgroup_size) {
+ workgroup_size_ = std::move(workgroup_size);
}
- return nullptr;
- }
-
- /// @returns the list of callsites of this function
- std::vector<const Call*> CallSites() const { return callsites_; }
-
- /// Adds a record of a callsite to this function
- /// @param call the callsite
- void AddCallSite(const Call* call) { callsites_.emplace_back(call); }
-
- /// @returns the ancestor entry points
- const std::vector<const Function*>& AncestorEntryPoints() const {
- return ancestor_entry_points_;
- }
-
- /// Adds a record that the given entry point transitively calls this function
- /// @param entry_point the entry point that transtively calls this function
- void AddAncestorEntryPoint(const sem::Function* entry_point) {
- ancestor_entry_points_.emplace_back(entry_point);
- }
-
- /// Retrieves any referenced location variables
- /// @returns the <variable, attribute> pair.
- std::vector<std::pair<const Variable*, const ast::LocationAttribute*>>
- TransitivelyReferencedLocationVariables() const;
-
- /// Retrieves any referenced builtin variables
- /// @returns the <variable, attribute> pair.
- std::vector<std::pair<const Variable*, const ast::BuiltinAttribute*>>
- TransitivelyReferencedBuiltinVariables() const;
-
- /// Retrieves any referenced uniform variables. Note, the variables must be
- /// decorated with both binding and group attributes.
- /// @returns the referenced uniforms
- VariableBindings TransitivelyReferencedUniformVariables() const;
-
- /// Retrieves any referenced storagebuffer variables. Note, the variables
- /// must be decorated with both binding and group attributes.
- /// @returns the referenced storagebuffers
- VariableBindings TransitivelyReferencedStorageBufferVariables() const;
-
- /// Retrieves any referenced regular Sampler variables. Note, the
- /// variables must be decorated with both binding and group attributes.
- /// @returns the referenced storagebuffers
- VariableBindings TransitivelyReferencedSamplerVariables() const;
-
- /// Retrieves any referenced comparison Sampler variables. Note, the
- /// variables must be decorated with both binding and group attributes.
- /// @returns the referenced storagebuffers
- VariableBindings TransitivelyReferencedComparisonSamplerVariables() const;
-
- /// Retrieves any referenced sampled textures variables. Note, the
- /// variables must be decorated with both binding and group attributes.
- /// @returns the referenced sampled textures
- VariableBindings TransitivelyReferencedSampledTextureVariables() const;
-
- /// Retrieves any referenced multisampled textures variables. Note, the
- /// variables must be decorated with both binding and group attributes.
- /// @returns the referenced sampled textures
- VariableBindings TransitivelyReferencedMultisampledTextureVariables() const;
-
- /// Retrieves any referenced variables of the given type. Note, the variables
- /// must be decorated with both binding and group attributes.
- /// @param type the type of the variables to find
- /// @returns the referenced variables
- VariableBindings TransitivelyReferencedVariablesOfType(
- const tint::TypeInfo* type) const;
-
- /// Retrieves any referenced variables of the given type. Note, the variables
- /// must be decorated with both binding and group attributes.
- /// @returns the referenced variables
- template <typename T>
- VariableBindings TransitivelyReferencedVariablesOfType() const {
- return TransitivelyReferencedVariablesOfType(&TypeInfo::Of<T>());
- }
-
- /// Checks if the given entry point is an ancestor
- /// @param sym the entry point symbol
- /// @returns true if `sym` is an ancestor entry point of this function
- bool HasAncestorEntryPoint(Symbol sym) const;
-
- /// Sets that this function has a discard statement
- void SetHasDiscard() { has_discard_ = true; }
-
- /// Returns true if this function has a discard statement
- /// @returns true if this function has a discard statement
- bool HasDiscard() const { return has_discard_; }
-
- /// @return the behaviors of this function
- const sem::Behaviors& Behaviors() const { return behaviors_; }
-
- /// @return the behaviors of this function
- sem::Behaviors& Behaviors() { return behaviors_; }
-
- private:
- VariableBindings TransitivelyReferencedSamplerVariablesImpl(
- ast::SamplerKind kind) const;
- VariableBindings TransitivelyReferencedSampledTextureVariablesImpl(
- bool multisampled) const;
-
- const ast::Function* const declaration_;
-
- sem::WorkgroupSize workgroup_size_;
- utils::UniqueVector<const GlobalVariable*> directly_referenced_globals_;
- utils::UniqueVector<const GlobalVariable*> transitively_referenced_globals_;
- utils::UniqueVector<const Function*> transitively_called_functions_;
- utils::UniqueVector<const Builtin*> directly_called_builtins_;
- utils::UniqueVector<VariablePair> texture_sampler_pairs_;
- std::vector<const Call*> direct_calls_;
- std::vector<const Call*> callsites_;
- std::vector<const Function*> ancestor_entry_points_;
- bool has_discard_ = false;
- sem::Behaviors behaviors_{sem::Behavior::kNext};
+
+ /// @returns all directly referenced global variables
+ const utils::UniqueVector<const GlobalVariable*>& DirectlyReferencedGlobals() const {
+ return directly_referenced_globals_;
+ }
+
+ /// Records that this function directly references the given global variable.
+ /// Note: Implicitly adds this global to the transtively-called globals.
+ /// @param global the module-scope variable
+ void AddDirectlyReferencedGlobal(const sem::GlobalVariable* global) {
+ directly_referenced_globals_.add(global);
+ transitively_referenced_globals_.add(global);
+ }
+
+ /// @returns all transitively referenced global variables
+ const utils::UniqueVector<const GlobalVariable*>& TransitivelyReferencedGlobals() const {
+ return transitively_referenced_globals_;
+ }
+
+ /// Records that this function transitively references the given global
+ /// variable.
+ /// @param global the module-scoped variable
+ void AddTransitivelyReferencedGlobal(const sem::GlobalVariable* global) {
+ transitively_referenced_globals_.add(global);
+ }
+
+ /// @returns the list of functions that this function transitively calls.
+ const utils::UniqueVector<const Function*>& TransitivelyCalledFunctions() const {
+ return transitively_called_functions_;
+ }
+
+ /// Records that this function transitively calls `function`.
+ /// @param function the function this function transitively calls
+ void AddTransitivelyCalledFunction(const Function* function) {
+ transitively_called_functions_.add(function);
+ }
+
+ /// @returns the list of builtins that this function directly calls.
+ const utils::UniqueVector<const Builtin*>& DirectlyCalledBuiltins() const {
+ return directly_called_builtins_;
+ }
+
+ /// Records that this function transitively calls `builtin`.
+ /// @param builtin the builtin this function directly calls
+ void AddDirectlyCalledBuiltin(const Builtin* builtin) {
+ directly_called_builtins_.add(builtin);
+ }
+
+ /// Adds the given texture/sampler pair to the list of unique pairs
+ /// that this function uses (directly or indirectly). These can only
+ /// be parameters to this function or global variables. Uniqueness is
+ /// ensured by texture_sampler_pairs_ being a UniqueVector.
+ /// @param texture the texture (must be non-null)
+ /// @param sampler the sampler (null indicates a texture-only reference)
+ void AddTextureSamplerPair(const sem::Variable* texture, const sem::Variable* sampler) {
+ texture_sampler_pairs_.add(VariablePair(texture, sampler));
+ }
+
+ /// @returns the list of texture/sampler pairs that this function uses
+ /// (directly or indirectly).
+ const std::vector<VariablePair>& TextureSamplerPairs() const { return texture_sampler_pairs_; }
+
+ /// @returns the list of direct calls to functions / builtins made by this
+ /// function
+ std::vector<const Call*> DirectCallStatements() const { return direct_calls_; }
+
+ /// Adds a record of the direct function / builtin calls made by this
+ /// function
+ /// @param call the call
+ void AddDirectCall(const Call* call) { direct_calls_.emplace_back(call); }
+
+ /// @param target the target of a call
+ /// @returns the Call to the given CallTarget, or nullptr the target was not
+ /// called by this function.
+ const Call* FindDirectCallTo(const CallTarget* target) const {
+ for (auto* call : direct_calls_) {
+ if (call->Target() == target) {
+ return call;
+ }
+ }
+ return nullptr;
+ }
+
+ /// @returns the list of callsites of this function
+ std::vector<const Call*> CallSites() const { return callsites_; }
+
+ /// Adds a record of a callsite to this function
+ /// @param call the callsite
+ void AddCallSite(const Call* call) { callsites_.emplace_back(call); }
+
+ /// @returns the ancestor entry points
+ const std::vector<const Function*>& AncestorEntryPoints() const {
+ return ancestor_entry_points_;
+ }
+
+ /// Adds a record that the given entry point transitively calls this function
+ /// @param entry_point the entry point that transtively calls this function
+ void AddAncestorEntryPoint(const sem::Function* entry_point) {
+ ancestor_entry_points_.emplace_back(entry_point);
+ }
+
+ /// Retrieves any referenced location variables
+ /// @returns the <variable, attribute> pair.
+ std::vector<std::pair<const Variable*, const ast::LocationAttribute*>>
+ TransitivelyReferencedLocationVariables() const;
+
+ /// Retrieves any referenced builtin variables
+ /// @returns the <variable, attribute> pair.
+ std::vector<std::pair<const Variable*, const ast::BuiltinAttribute*>>
+ TransitivelyReferencedBuiltinVariables() const;
+
+ /// Retrieves any referenced uniform variables. Note, the variables must be
+ /// decorated with both binding and group attributes.
+ /// @returns the referenced uniforms
+ VariableBindings TransitivelyReferencedUniformVariables() const;
+
+ /// Retrieves any referenced storagebuffer variables. Note, the variables
+ /// must be decorated with both binding and group attributes.
+ /// @returns the referenced storagebuffers
+ VariableBindings TransitivelyReferencedStorageBufferVariables() const;
+
+ /// Retrieves any referenced regular Sampler variables. Note, the
+ /// variables must be decorated with both binding and group attributes.
+ /// @returns the referenced storagebuffers
+ VariableBindings TransitivelyReferencedSamplerVariables() const;
+
+ /// Retrieves any referenced comparison Sampler variables. Note, the
+ /// variables must be decorated with both binding and group attributes.
+ /// @returns the referenced storagebuffers
+ VariableBindings TransitivelyReferencedComparisonSamplerVariables() const;
+
+ /// Retrieves any referenced sampled textures variables. Note, the
+ /// variables must be decorated with both binding and group attributes.
+ /// @returns the referenced sampled textures
+ VariableBindings TransitivelyReferencedSampledTextureVariables() const;
+
+ /// Retrieves any referenced multisampled textures variables. Note, the
+ /// variables must be decorated with both binding and group attributes.
+ /// @returns the referenced sampled textures
+ VariableBindings TransitivelyReferencedMultisampledTextureVariables() const;
+
+ /// Retrieves any referenced variables of the given type. Note, the variables
+ /// must be decorated with both binding and group attributes.
+ /// @param type the type of the variables to find
+ /// @returns the referenced variables
+ VariableBindings TransitivelyReferencedVariablesOfType(const tint::TypeInfo* type) const;
+
+ /// Retrieves any referenced variables of the given type. Note, the variables
+ /// must be decorated with both binding and group attributes.
+ /// @returns the referenced variables
+ template <typename T>
+ VariableBindings TransitivelyReferencedVariablesOfType() const {
+ return TransitivelyReferencedVariablesOfType(&TypeInfo::Of<T>());
+ }
+
+ /// Checks if the given entry point is an ancestor
+ /// @param sym the entry point symbol
+ /// @returns true if `sym` is an ancestor entry point of this function
+ bool HasAncestorEntryPoint(Symbol sym) const;
+
+ /// Sets that this function has a discard statement
+ void SetHasDiscard() { has_discard_ = true; }
+
+ /// Returns true if this function has a discard statement
+ /// @returns true if this function has a discard statement
+ bool HasDiscard() const { return has_discard_; }
+
+ /// @return the behaviors of this function
+ const sem::Behaviors& Behaviors() const { return behaviors_; }
+
+ /// @return the behaviors of this function
+ sem::Behaviors& Behaviors() { return behaviors_; }
+
+ private:
+ VariableBindings TransitivelyReferencedSamplerVariablesImpl(ast::SamplerKind kind) const;
+ VariableBindings TransitivelyReferencedSampledTextureVariablesImpl(bool multisampled) const;
+
+ const ast::Function* const declaration_;
+
+ sem::WorkgroupSize workgroup_size_;
+ utils::UniqueVector<const GlobalVariable*> directly_referenced_globals_;
+ utils::UniqueVector<const GlobalVariable*> transitively_referenced_globals_;
+ utils::UniqueVector<const Function*> transitively_called_functions_;
+ utils::UniqueVector<const Builtin*> directly_called_builtins_;
+ utils::UniqueVector<VariablePair> texture_sampler_pairs_;
+ std::vector<const Call*> direct_calls_;
+ std::vector<const Call*> callsites_;
+ std::vector<const Function*> ancestor_entry_points_;
+ bool has_discard_ = false;
+ sem::Behaviors behaviors_{sem::Behavior::kNext};
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/i32_type.cc b/chromium/third_party/dawn/src/tint/sem/i32.cc
index 0865c03b884..d5a1e26ed90 100644
--- a/chromium/third_party/dawn/src/tint/sem/i32_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/i32.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/i32_type.h"
+#include "src/tint/sem/i32.h"
#include "src/tint/program_builder.h"
@@ -27,27 +27,27 @@ I32::I32(I32&&) = default;
I32::~I32() = default;
size_t I32::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<I32>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<I32>().full_hashcode);
}
bool I32::Equals(const Type& other) const {
- return other.Is<I32>();
+ return other.Is<I32>();
}
std::string I32::FriendlyName(const SymbolTable&) const {
- return "i32";
+ return "i32";
}
bool I32::IsConstructible() const {
- return true;
+ return true;
}
uint32_t I32::Size() const {
- return 4;
+ return 4;
}
uint32_t I32::Align() const {
- return 4;
+ return 4;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/i32.h b/chromium/third_party/dawn/src/tint/sem/i32.h
new file mode 100644
index 00000000000..3b564db5120
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/i32.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_I32_H_
+#define SRC_TINT_SEM_I32_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A signed int 32 type.
+class I32 final : public Castable<I32, Type> {
+ public:
+ /// Constructor
+ I32();
+ /// Move constructor
+ I32(I32&&);
+ ~I32() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ uint32_t Align() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_I32_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/i32_type_test.cc b/chromium/third_party/dawn/src/tint/sem/i32_test.cc
index 679331a094e..2ccc92cbf75 100644
--- a/chromium/third_party/dawn/src/tint/sem/i32_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/i32_test.cc
@@ -13,7 +13,7 @@
// limitations under the License.
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -21,27 +21,27 @@ namespace {
using I32Test = TestHelper;
TEST_F(I32Test, Creation) {
- auto* a = create<I32>();
- auto* b = create<I32>();
- EXPECT_EQ(a, b);
+ auto* a = create<I32>();
+ auto* b = create<I32>();
+ EXPECT_EQ(a, b);
}
TEST_F(I32Test, Hash) {
- auto* a = create<I32>();
- auto* b = create<I32>();
- EXPECT_EQ(a->Hash(), b->Hash());
+ auto* a = create<I32>();
+ auto* b = create<I32>();
+ EXPECT_EQ(a->Hash(), b->Hash());
}
TEST_F(I32Test, Equals) {
- auto* a = create<I32>();
- auto* b = create<I32>();
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<I32>();
+ auto* b = create<I32>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(I32Test, FriendlyName) {
- I32 i;
- EXPECT_EQ(i.FriendlyName(Symbols()), "i32");
+ I32 i;
+ EXPECT_EQ(i.FriendlyName(Symbols()), "i32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/i32_type.h b/chromium/third_party/dawn/src/tint/sem/i32_type.h
deleted file mode 100644
index f6747a2340e..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/i32_type.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_I32_TYPE_H_
-#define SRC_TINT_SEM_I32_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A signed int 32 type.
-class I32 final : public Castable<I32, Type> {
- public:
- /// Constructor
- I32();
- /// Move constructor
- I32(I32&&);
- ~I32() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the size in bytes of the type.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type.
- uint32_t Align() const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_I32_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/if_statement.cc b/chromium/third_party/dawn/src/tint/sem/if_statement.cc
index 5102b898c2f..a79555a3259 100644
--- a/chromium/third_party/dawn/src/tint/sem/if_statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/if_statement.cc
@@ -17,7 +17,6 @@
#include "src/tint/program_builder.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::IfStatement);
-TINT_INSTANTIATE_TYPEINFO(tint::sem::ElseStatement);
namespace tint::sem {
@@ -29,18 +28,7 @@ IfStatement::IfStatement(const ast::IfStatement* declaration,
IfStatement::~IfStatement() = default;
const ast::IfStatement* IfStatement::Declaration() const {
- return static_cast<const ast::IfStatement*>(Base::Declaration());
-}
-
-ElseStatement::ElseStatement(const ast::ElseStatement* declaration,
- const IfStatement* parent,
- const sem::Function* function)
- : Base(declaration, parent, function) {}
-
-ElseStatement::~ElseStatement() = default;
-
-const ast::ElseStatement* ElseStatement::Declaration() const {
- return static_cast<const ast::ElseStatement*>(Base::Declaration());
+ return static_cast<const ast::IfStatement*>(Base::Declaration());
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/if_statement.h b/chromium/third_party/dawn/src/tint/sem/if_statement.h
index 8e4fb276211..e9ecda08ea1 100644
--- a/chromium/third_party/dawn/src/tint/sem/if_statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/if_statement.h
@@ -20,7 +20,6 @@
// Forward declarations
namespace tint::ast {
class IfStatement;
-class ElseStatement;
} // namespace tint::ast
namespace tint::sem {
class Expression;
@@ -30,63 +29,30 @@ namespace tint::sem {
/// Holds semantic information about an if statement
class IfStatement final : public Castable<IfStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this if statement
- /// @param parent the owning statement
- /// @param function the owning function
- IfStatement(const ast::IfStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~IfStatement() override;
-
- /// @returns the AST node
- const ast::IfStatement* Declaration() const;
-
- /// @returns the if-statement condition expression
- const Expression* Condition() const { return condition_; }
-
- /// Sets the if-statement condition expression
- /// @param condition the if condition expression
- void SetCondition(const Expression* condition) { condition_ = condition; }
-
- private:
- const Expression* condition_ = nullptr;
-};
-
-/// Holds semantic information about an else statement
-class ElseStatement final : public Castable<ElseStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this else statement
- /// @param parent the owning statement
- /// @param function the owning function
- ElseStatement(const ast::ElseStatement* declaration,
- const IfStatement* parent,
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this if statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ IfStatement(const ast::IfStatement* declaration,
+ const CompoundStatement* parent,
const sem::Function* function);
- /// Destructor
- ~ElseStatement() override;
-
- /// @returns the AST node
- const ast::ElseStatement* Declaration() const;
+ /// Destructor
+ ~IfStatement() override;
- /// @returns the else-statement condition expression
- const Expression* Condition() const { return condition_; }
+ /// @returns the AST node
+ const ast::IfStatement* Declaration() const;
- /// @return the statement that encloses this statement
- const IfStatement* Parent() const {
- return static_cast<const IfStatement*>(Statement::Parent());
- }
+ /// @returns the if-statement condition expression
+ const Expression* Condition() const { return condition_; }
- /// Sets the else-statement condition expression
- /// @param condition the else condition expression
- void SetCondition(const Expression* condition) { condition_ = condition; }
+ /// Sets the if-statement condition expression
+ /// @param condition the if condition expression
+ void SetCondition(const Expression* condition) { condition_ = condition; }
- private:
- const Expression* condition_ = nullptr;
+ private:
+ const Expression* condition_ = nullptr;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/info.h b/chromium/third_party/dawn/src/tint/sem/info.h
index 353f67fcfbe..41321cff57b 100644
--- a/chromium/third_party/dawn/src/tint/sem/info.h
+++ b/chromium/third_party/dawn/src/tint/sem/info.h
@@ -31,85 +31,90 @@ namespace tint::sem {
/// Info holds all the resolved semantic information for a Program.
class Info {
- public:
- /// Placeholder type used by Get() to provide a default value for EXPLICIT_SEM
- using InferFromAST = std::nullptr_t;
-
- /// Resolves to the return type of the Get() method given the desired sementic
- /// type and AST type.
- template <typename SEM, typename AST_OR_TYPE>
- using GetResultType =
- std::conditional_t<std::is_same<SEM, InferFromAST>::value,
- SemanticNodeTypeFor<AST_OR_TYPE>,
- SEM>;
-
- /// Constructor
- Info();
-
- /// Move constructor
- Info(Info&&);
-
- /// Destructor
- ~Info();
-
- /// Move assignment operator
- /// @param rhs the Program to move
- /// @return this Program
- Info& operator=(Info&& rhs);
-
- /// Get looks up the semantic information for the AST or type node `node`.
- /// @param node the AST or type node
- /// @returns a pointer to the semantic node if found, otherwise nullptr
- template <typename SEM = InferFromAST,
- typename AST_OR_TYPE = CastableBase,
- typename RESULT = GetResultType<SEM, AST_OR_TYPE>>
- const RESULT* Get(const AST_OR_TYPE* node) const {
- auto it = map_.find(node);
- if (it == map_.end()) {
- return nullptr;
+ public:
+ /// Placeholder type used by Get() to provide a default value for EXPLICIT_SEM
+ using InferFromAST = std::nullptr_t;
+
+ /// Resolves to the return type of the Get() method given the desired sementic
+ /// type and AST type.
+ template <typename SEM, typename AST_OR_TYPE>
+ using GetResultType = std::conditional_t<std::is_same<SEM, InferFromAST>::value,
+ SemanticNodeTypeFor<AST_OR_TYPE>,
+ SEM>;
+
+ /// Constructor
+ Info();
+
+ /// Move constructor
+ Info(Info&&);
+
+ /// Destructor
+ ~Info();
+
+ /// Move assignment operator
+ /// @param rhs the Program to move
+ /// @return this Program
+ Info& operator=(Info&& rhs);
+
+ /// Get looks up the semantic information for the AST or type node `node`.
+ /// @param node the AST or type node
+ /// @returns a pointer to the semantic node if found, otherwise nullptr
+ template <typename SEM = InferFromAST,
+ typename AST_OR_TYPE = CastableBase,
+ typename RESULT = GetResultType<SEM, AST_OR_TYPE>>
+ const RESULT* Get(const AST_OR_TYPE* node) const {
+ auto it = map_.find(node);
+ if (it == map_.end()) {
+ return nullptr;
+ }
+ return As<RESULT>(it->second);
}
- return As<RESULT>(it->second);
- }
-
- /// Add registers the semantic node `sem_node` for the AST or type node
- /// `node`.
- /// @param node the AST or type node
- /// @param sem_node the semantic node
- template <typename AST_OR_TYPE>
- void Add(const AST_OR_TYPE* node,
- const SemanticNodeTypeFor<AST_OR_TYPE>* sem_node) {
- // Check there's no semantic info already existing for the node
- TINT_ASSERT(Semantic, Get(node) == nullptr);
- map_.emplace(node, sem_node);
- }
-
- /// Wrap returns a new Info created with the contents of `inner`.
- /// The Info returned by Wrap is intended to temporarily extend the contents
- /// of an existing immutable Info.
- /// As the copied contents are owned by `inner`, `inner` must not be
- /// destructed or assigned while using the returned Info.
- /// @param inner the immutable Info to extend
- /// @return the Info that wraps `inner`
- static Info Wrap(const Info& inner) {
- Info out;
- out.map_ = inner.map_;
- out.module_ = inner.module_;
- return out;
- }
-
- /// Assigns the semantic module.
- /// @param module the module to assign.
- void SetModule(sem::Module* module) { module_ = module; }
-
- /// @returns the semantic module.
- const sem::Module* Module() const { return module_; }
-
- private:
- // TODO(crbug.com/tint/724): Once finished, this map should be:
- // std::unordered_map<const ast::Node*, const sem::Node*>
- std::unordered_map<const CastableBase*, const CastableBase*> map_;
- // The semantic module
- sem::Module* module_ = nullptr;
+
+ /// Add registers the semantic node `sem_node` for the AST or type node `node`.
+ /// @param node the AST or type node
+ /// @param sem_node the semantic node
+ template <typename AST_OR_TYPE>
+ void Add(const AST_OR_TYPE* node, const SemanticNodeTypeFor<AST_OR_TYPE>* sem_node) {
+ // Check there's no semantic info already existing for the node
+ TINT_ASSERT(Semantic, Get(node) == nullptr);
+ map_.emplace(node, sem_node);
+ }
+
+ /// Replace replaces any existing semantic node `sem_node` for the AST or type node `node`.
+ /// @param node the AST or type node
+ /// @param sem_node the new semantic node
+ template <typename AST_OR_TYPE>
+ void Replace(const AST_OR_TYPE* node, const SemanticNodeTypeFor<AST_OR_TYPE>* sem_node) {
+ map_[node] = sem_node;
+ }
+
+ /// Wrap returns a new Info created with the contents of `inner`.
+ /// The Info returned by Wrap is intended to temporarily extend the contents
+ /// of an existing immutable Info.
+ /// As the copied contents are owned by `inner`, `inner` must not be
+ /// destructed or assigned while using the returned Info.
+ /// @param inner the immutable Info to extend
+ /// @return the Info that wraps `inner`
+ static Info Wrap(const Info& inner) {
+ Info out;
+ out.map_ = inner.map_;
+ out.module_ = inner.module_;
+ return out;
+ }
+
+ /// Assigns the semantic module.
+ /// @param module the module to assign.
+ void SetModule(sem::Module* module) { module_ = module; }
+
+ /// @returns the semantic module.
+ const sem::Module* Module() const { return module_; }
+
+ private:
+ // TODO(crbug.com/tint/724): Once finished, this map should be:
+ // std::unordered_map<const ast::Node*, const sem::Node*>
+ std::unordered_map<const CastableBase*, const CastableBase*> map_;
+ // The semantic module
+ sem::Module* module_ = nullptr;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/loop_statement.cc b/chromium/third_party/dawn/src/tint/sem/loop_statement.cc
index a26bf9e37ed..eee735ce2b5 100644
--- a/chromium/third_party/dawn/src/tint/sem/loop_statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/loop_statement.cc
@@ -25,19 +25,18 @@ LoopStatement::LoopStatement(const ast::LoopStatement* declaration,
const CompoundStatement* parent,
const sem::Function* function)
: Base(declaration, parent, function) {
- TINT_ASSERT(Semantic, parent);
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, parent);
+ TINT_ASSERT(Semantic, function);
}
LoopStatement::~LoopStatement() = default;
-LoopContinuingBlockStatement::LoopContinuingBlockStatement(
- const ast::BlockStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function)
+LoopContinuingBlockStatement::LoopContinuingBlockStatement(const ast::BlockStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function)
: Base(declaration, parent, function) {
- TINT_ASSERT(Semantic, parent);
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, parent);
+ TINT_ASSERT(Semantic, function);
}
LoopContinuingBlockStatement::~LoopContinuingBlockStatement() = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/loop_statement.h b/chromium/third_party/dawn/src/tint/sem/loop_statement.h
index ccf3be09654..502fa7ce86e 100644
--- a/chromium/third_party/dawn/src/tint/sem/loop_statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/loop_statement.h
@@ -26,33 +26,33 @@ namespace tint::sem {
/// Holds semantic information about a loop statement
class LoopStatement final : public Castable<LoopStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this loop statement
- /// @param parent the owning statement
- /// @param function the owning function
- LoopStatement(const ast::LoopStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~LoopStatement() override;
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this loop statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ LoopStatement(const ast::LoopStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~LoopStatement() override;
};
/// Holds semantic information about a loop continuing block
class LoopContinuingBlockStatement final
: public Castable<LoopContinuingBlockStatement, BlockStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this block statement
- /// @param parent the owning statement
- /// @param function the owning function
- LoopContinuingBlockStatement(const ast::BlockStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~LoopContinuingBlockStatement() override;
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this block statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ LoopContinuingBlockStatement(const ast::BlockStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~LoopContinuingBlockStatement() override;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/materialize.cc b/chromium/third_party/dawn/src/tint/sem/materialize.cc
new file mode 100644
index 00000000000..76dd9d44055
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/materialize.cc
@@ -0,0 +1,36 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0(the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/materialize.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::Materialize);
+
+namespace tint::sem {
+
+Materialize::Materialize(const Expression* expr, const Statement* statement, Constant constant)
+ : Base(/* declaration */ expr->Declaration(),
+ /* type */ constant.Type(),
+ /* statement */ statement,
+ /* constant */ constant,
+ /* has_side_effects */ false,
+ /* source_var */ expr->SourceVariable()),
+ expr_(expr) {
+ // Materialize nodes only wrap compile-time expressions, and so the Materialize expression must
+ // have a constant value.
+ TINT_ASSERT(Semantic, constant.IsValid());
+}
+
+Materialize::~Materialize() = default;
+
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/materialize.h b/chromium/third_party/dawn/src/tint/sem/materialize.h
new file mode 100644
index 00000000000..a7c0e3af5eb
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/materialize.h
@@ -0,0 +1,48 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0(the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_MATERIALIZE_H_
+#define SRC_TINT_SEM_MATERIALIZE_H_
+
+#include "src/tint/sem/expression.h"
+
+namespace tint::sem {
+
+/// Materialize is a semantic expression which represents the materialization of a value of an
+/// abstract numeric type to a value of a concrete type.
+/// Abstract numeric materialization is implicit in WGSL, so the Materialize semantic node shares
+/// the same AST node as the inner semantic node.
+/// Abstract numerics types may only be used by compile-time expressions, so a Materialize semantic
+/// node must have a valid Constant value.
+class Materialize final : public Castable<Materialize, Expression> {
+ public:
+ /// Constructor
+ /// @param expr the inner expression, being materialized
+ /// @param statement the statement that owns this expression
+ /// @param constant the constant value of this expression
+ Materialize(const Expression* expr, const Statement* statement, Constant constant);
+
+ /// Destructor
+ ~Materialize() override;
+
+ /// @return the target of the call
+ const Expression* Expr() const { return expr_; }
+
+ private:
+ Expression const* const expr_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_MATERIALIZE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/matrix_type.cc b/chromium/third_party/dawn/src/tint/sem/matrix.cc
index cfd9c1c4866..7f0383b28e0 100644
--- a/chromium/third_party/dawn/src/tint/sem/matrix_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/matrix.cc
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/matrix_type.h"
+#include "src/tint/sem/matrix.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/vector.h"
#include "src/tint/utils/hash.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::Matrix);
@@ -27,10 +27,10 @@ Matrix::Matrix(const Vector* column_type, uint32_t columns)
column_type_(column_type),
rows_(column_type->Width()),
columns_(columns) {
- TINT_ASSERT(AST, rows_ > 1);
- TINT_ASSERT(AST, rows_ < 5);
- TINT_ASSERT(AST, columns_ > 1);
- TINT_ASSERT(AST, columns_ < 5);
+ TINT_ASSERT(AST, rows_ > 1);
+ TINT_ASSERT(AST, rows_ < 5);
+ TINT_ASSERT(AST, columns_ > 1);
+ TINT_ASSERT(AST, columns_ < 5);
}
Matrix::Matrix(Matrix&&) = default;
@@ -38,39 +38,36 @@ Matrix::Matrix(Matrix&&) = default;
Matrix::~Matrix() = default;
size_t Matrix::Hash() const {
- return utils::Hash(TypeInfo::Of<Vector>().full_hashcode, rows_, columns_,
- column_type_);
+ return utils::Hash(TypeInfo::Of<Vector>().full_hashcode, rows_, columns_, column_type_);
}
bool Matrix::Equals(const Type& other) const {
- if (auto* v = other.As<Matrix>()) {
- return v->rows_ == rows_ && v->columns_ == columns_ &&
- v->column_type_ == column_type_;
- }
- return false;
+ if (auto* v = other.As<Matrix>()) {
+ return v->rows_ == rows_ && v->columns_ == columns_ && v->column_type_ == column_type_;
+ }
+ return false;
}
std::string Matrix::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "mat" << columns_ << "x" << rows_ << "<"
- << subtype_->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "mat" << columns_ << "x" << rows_ << "<" << subtype_->FriendlyName(symbols) << ">";
+ return out.str();
}
bool Matrix::IsConstructible() const {
- return true;
+ return true;
}
uint32_t Matrix::Size() const {
- return column_type_->Align() * columns();
+ return column_type_->Align() * columns();
}
uint32_t Matrix::Align() const {
- return column_type_->Align();
+ return column_type_->Align();
}
uint32_t Matrix::ColumnStride() const {
- return column_type_->Align();
+ return column_type_->Align();
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/matrix.h b/chromium/third_party/dawn/src/tint/sem/matrix.h
new file mode 100644
index 00000000000..0321c4b0a7e
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/matrix.h
@@ -0,0 +1,85 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_MATRIX_H_
+#define SRC_TINT_SEM_MATRIX_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+// Forward declarations
+namespace tint::sem {
+class Vector;
+} // namespace tint::sem
+
+namespace tint::sem {
+
+/// A matrix type
+class Matrix final : public Castable<Matrix, Type> {
+ public:
+ /// Constructor
+ /// @param column_type the type of a column of the matrix
+ /// @param columns the number of columns in the matrix
+ Matrix(const Vector* column_type, uint32_t columns);
+ /// Move constructor
+ Matrix(Matrix&&);
+ ~Matrix() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the type of the matrix
+ const Type* type() const { return subtype_; }
+ /// @returns the number of rows in the matrix
+ uint32_t rows() const { return rows_; }
+ /// @returns the number of columns in the matrix
+ uint32_t columns() const { return columns_; }
+
+ /// @returns the column-vector type of the matrix
+ const Vector* ColumnType() const { return column_type_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type. This may include tail padding.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type. This may include tail
+ /// padding.
+ uint32_t Align() const override;
+
+ /// @returns the number of bytes between columns of the matrix
+ uint32_t ColumnStride() const;
+
+ private:
+ const Type* const subtype_;
+ const Vector* const column_type_;
+ const uint32_t rows_;
+ const uint32_t columns_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_MATRIX_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/matrix_test.cc b/chromium/third_party/dawn/src/tint/sem/matrix_test.cc
new file mode 100644
index 00000000000..a82a2d25df5
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/matrix_test.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+namespace {
+
+using MatrixTest = TestHelper;
+
+TEST_F(MatrixTest, Creation) {
+ auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
+ auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
+ auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
+
+ EXPECT_EQ(a->type(), create<I32>());
+ EXPECT_EQ(a->rows(), 3u);
+ EXPECT_EQ(a->columns(), 4u);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+}
+
+TEST_F(MatrixTest, Hash) {
+ auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
+ auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
+ auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+ EXPECT_NE(a->Hash(), e->Hash());
+}
+
+TEST_F(MatrixTest, Equals) {
+ auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
+ auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
+ auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
+ auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(*e));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(MatrixTest, FriendlyName) {
+ I32 i32;
+ Vector c{&i32, 3};
+ Matrix m{&c, 2};
+ EXPECT_EQ(m.FriendlyName(Symbols()), "mat2x3<i32>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/matrix_type.h b/chromium/third_party/dawn/src/tint/sem/matrix_type.h
deleted file mode 100644
index b5b96699a31..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/matrix_type.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_MATRIX_TYPE_H_
-#define SRC_TINT_SEM_MATRIX_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-// Forward declarations
-namespace tint::sem {
-class Vector;
-} // namespace tint::sem
-
-namespace tint::sem {
-
-/// A matrix type
-class Matrix final : public Castable<Matrix, Type> {
- public:
- /// Constructor
- /// @param column_type the type of a column of the matrix
- /// @param columns the number of columns in the matrix
- Matrix(const Vector* column_type, uint32_t columns);
- /// Move constructor
- Matrix(Matrix&&);
- ~Matrix() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the type of the matrix
- const Type* type() const { return subtype_; }
- /// @returns the number of rows in the matrix
- uint32_t rows() const { return rows_; }
- /// @returns the number of columns in the matrix
- uint32_t columns() const { return columns_; }
-
- /// @returns the column-vector type of the matrix
- const Vector* ColumnType() const { return column_type_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the size in bytes of the type. This may include tail padding.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type. This may include tail
- /// padding.
- uint32_t Align() const override;
-
- /// @returns the number of bytes between columns of the matrix
- uint32_t ColumnStride() const;
-
- private:
- const Type* const subtype_;
- const Vector* const column_type_;
- const uint32_t rows_;
- const uint32_t columns_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_MATRIX_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/matrix_type_test.cc b/chromium/third_party/dawn/src/tint/sem/matrix_type_test.cc
deleted file mode 100644
index 52ae36b6a0f..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/matrix_type_test.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using MatrixTest = TestHelper;
-
-TEST_F(MatrixTest, Creation) {
- auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
- auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
- auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
-
- EXPECT_EQ(a->type(), create<I32>());
- EXPECT_EQ(a->rows(), 3u);
- EXPECT_EQ(a->columns(), 4u);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
-}
-
-TEST_F(MatrixTest, Hash) {
- auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
- auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
- auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
- EXPECT_NE(a->Hash(), e->Hash());
-}
-
-TEST_F(MatrixTest, Equals) {
- auto* a = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* b = create<Matrix>(create<Vector>(create<I32>(), 3u), 4u);
- auto* c = create<Matrix>(create<Vector>(create<F32>(), 3u), 4u);
- auto* d = create<Matrix>(create<Vector>(create<I32>(), 2u), 4u);
- auto* e = create<Matrix>(create<Vector>(create<I32>(), 3u), 2u);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(*e));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(MatrixTest, FriendlyName) {
- I32 i32;
- Vector c{&i32, 3};
- Matrix m{&c, 2};
- EXPECT_EQ(m.FriendlyName(Symbols()), "mat2x3<i32>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.cc b/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.cc
index 4e04ab479ca..9dcca76ef0c 100644
--- a/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.cc
+++ b/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.cc
@@ -23,22 +23,22 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Swizzle);
namespace tint::sem {
-MemberAccessorExpression::MemberAccessorExpression(
- const ast::MemberAccessorExpression* declaration,
- const sem::Type* type,
- const Statement* statement,
- bool has_side_effects)
- : Base(declaration, type, statement, Constant{}, has_side_effects) {}
+MemberAccessorExpression::MemberAccessorExpression(const ast::MemberAccessorExpression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ bool has_side_effects,
+ const Variable* source_var /* = nullptr */)
+ : Base(declaration, type, statement, Constant{}, has_side_effects, source_var) {}
MemberAccessorExpression::~MemberAccessorExpression() = default;
-StructMemberAccess::StructMemberAccess(
- const ast::MemberAccessorExpression* declaration,
- const sem::Type* type,
- const Statement* statement,
- const StructMember* member,
- bool has_side_effects)
- : Base(declaration, type, statement, has_side_effects), member_(member) {}
+StructMemberAccess::StructMemberAccess(const ast::MemberAccessorExpression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ const StructMember* member,
+ bool has_side_effects,
+ const Variable* source_var /* = nullptr */)
+ : Base(declaration, type, statement, has_side_effects, source_var), member_(member) {}
StructMemberAccess::~StructMemberAccess() = default;
@@ -46,8 +46,9 @@ Swizzle::Swizzle(const ast::MemberAccessorExpression* declaration,
const sem::Type* type,
const Statement* statement,
std::vector<uint32_t> indices,
- bool has_side_effects)
- : Base(declaration, type, statement, has_side_effects),
+ bool has_side_effects,
+ const Variable* source_var /* = nullptr */)
+ : Base(declaration, type, statement, has_side_effects, source_var),
indices_(std::move(indices)) {}
Swizzle::~Swizzle() = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.h b/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.h
index 342acbe3232..0233541e318 100644
--- a/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.h
+++ b/chromium/third_party/dawn/src/tint/sem/member_accessor_expression.h
@@ -32,75 +32,79 @@ namespace tint::sem {
/// MemberAccessorExpression holds the semantic information for a
/// ast::MemberAccessorExpression node.
-class MemberAccessorExpression
- : public Castable<MemberAccessorExpression, Expression> {
- public:
- /// Constructor
- /// @param declaration the AST node
- /// @param type the resolved type of the expression
- /// @param statement the statement that owns this expression
- /// @param has_side_effects whether this expression may have side effects
- MemberAccessorExpression(const ast::MemberAccessorExpression* declaration,
- const sem::Type* type,
- const Statement* statement,
- bool has_side_effects);
-
- /// Destructor
- ~MemberAccessorExpression() override;
+class MemberAccessorExpression : public Castable<MemberAccessorExpression, Expression> {
+ public:
+ /// Constructor
+ /// @param declaration the AST node
+ /// @param type the resolved type of the expression
+ /// @param statement the statement that owns this expression
+ /// @param has_side_effects whether this expression may have side effects
+ /// @param source_var the (optional) source variable for this expression
+ MemberAccessorExpression(const ast::MemberAccessorExpression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ bool has_side_effects,
+ const Variable* source_var = nullptr);
+
+ /// Destructor
+ ~MemberAccessorExpression() override;
};
/// StructMemberAccess holds the semantic information for a
/// ast::MemberAccessorExpression node that represents an access to a structure
/// member.
-class StructMemberAccess final
- : public Castable<StructMemberAccess, MemberAccessorExpression> {
- public:
- /// Constructor
- /// @param declaration the AST node
- /// @param type the resolved type of the expression
- /// @param statement the statement that owns this expression
- /// @param member the structure member
- /// @param has_side_effects whether this expression may have side effects
- StructMemberAccess(const ast::MemberAccessorExpression* declaration,
- const sem::Type* type,
- const Statement* statement,
- const StructMember* member,
- bool has_side_effects);
-
- /// Destructor
- ~StructMemberAccess() override;
-
- /// @returns the structure member
- StructMember const* Member() const { return member_; }
-
- private:
- StructMember const* const member_;
+class StructMemberAccess final : public Castable<StructMemberAccess, MemberAccessorExpression> {
+ public:
+ /// Constructor
+ /// @param declaration the AST node
+ /// @param type the resolved type of the expression
+ /// @param statement the statement that owns this expression
+ /// @param member the structure member
+ /// @param has_side_effects whether this expression may have side effects
+ /// @param source_var the (optional) source variable for this expression
+ StructMemberAccess(const ast::MemberAccessorExpression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ const StructMember* member,
+ bool has_side_effects,
+ const Variable* source_var = nullptr);
+
+ /// Destructor
+ ~StructMemberAccess() override;
+
+ /// @returns the structure member
+ StructMember const* Member() const { return member_; }
+
+ private:
+ StructMember const* const member_;
};
/// Swizzle holds the semantic information for a ast::MemberAccessorExpression
/// node that represents a vector swizzle.
class Swizzle final : public Castable<Swizzle, MemberAccessorExpression> {
- public:
- /// Constructor
- /// @param declaration the AST node
- /// @param type the resolved type of the expression
- /// @param statement the statement that owns this expression
- /// @param indices the swizzle indices
- /// @param has_side_effects whether this expression may have side effects
- Swizzle(const ast::MemberAccessorExpression* declaration,
- const sem::Type* type,
- const Statement* statement,
- std::vector<uint32_t> indices,
- bool has_side_effects);
-
- /// Destructor
- ~Swizzle() override;
-
- /// @return the swizzle indices, if this is a vector swizzle
- const std::vector<uint32_t>& Indices() const { return indices_; }
-
- private:
- std::vector<uint32_t> const indices_;
+ public:
+ /// Constructor
+ /// @param declaration the AST node
+ /// @param type the resolved type of the expression
+ /// @param statement the statement that owns this expression
+ /// @param indices the swizzle indices
+ /// @param has_side_effects whether this expression may have side effects
+ /// @param source_var the (optional) source variable for this expression
+ Swizzle(const ast::MemberAccessorExpression* declaration,
+ const sem::Type* type,
+ const Statement* statement,
+ std::vector<uint32_t> indices,
+ bool has_side_effects,
+ const Variable* source_var = nullptr);
+
+ /// Destructor
+ ~Swizzle() override;
+
+ /// @return the swizzle indices, if this is a vector swizzle
+ const std::vector<uint32_t>& Indices() const { return indices_; }
+
+ private:
+ std::vector<uint32_t> const indices_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/module.cc b/chromium/third_party/dawn/src/tint/sem/module.cc
index 83b71360a51..7c606505728 100644
--- a/chromium/third_party/dawn/src/tint/sem/module.cc
+++ b/chromium/third_party/dawn/src/tint/sem/module.cc
@@ -21,8 +21,8 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Module);
namespace tint::sem {
-Module::Module(std::vector<const ast::Node*> dep_ordered_decls)
- : dep_ordered_decls_(std::move(dep_ordered_decls)) {}
+Module::Module(std::vector<const ast::Node*> dep_ordered_decls, ast::Extensions extensions)
+ : dep_ordered_decls_(std::move(dep_ordered_decls)), extensions_(std::move(extensions)) {}
Module::~Module() = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/module.h b/chromium/third_party/dawn/src/tint/sem/module.h
index 1077b7e967d..dffe003590d 100644
--- a/chromium/third_party/dawn/src/tint/sem/module.h
+++ b/chromium/third_party/dawn/src/tint/sem/module.h
@@ -17,12 +17,12 @@
#include <vector>
+#include "src/tint/ast/extension.h"
#include "src/tint/sem/node.h"
// Forward declarations
namespace tint::ast {
class Node;
-class Module;
} // namespace tint::ast
namespace tint::sem {
@@ -30,21 +30,26 @@ namespace tint::sem {
/// Module holds the top-level semantic types, functions and global variables
/// used by a Program.
class Module final : public Castable<Module, Node> {
- public:
- /// Constructor
- /// @param dep_ordered_decls the dependency-ordered module-scope declarations
- explicit Module(std::vector<const ast::Node*> dep_ordered_decls);
-
- /// Destructor
- ~Module() override;
-
- /// @returns the dependency-ordered global declarations for the module
- const std::vector<const ast::Node*>& DependencyOrderedDeclarations() const {
- return dep_ordered_decls_;
- }
-
- private:
- const std::vector<const ast::Node*> dep_ordered_decls_;
+ public:
+ /// Constructor
+ /// @param dep_ordered_decls the dependency-ordered module-scope declarations
+ /// @param extensions the list of enabled extensions in the module
+ Module(std::vector<const ast::Node*> dep_ordered_decls, ast::Extensions extensions);
+
+ /// Destructor
+ ~Module() override;
+
+ /// @returns the dependency-ordered global declarations for the module
+ const std::vector<const ast::Node*>& DependencyOrderedDeclarations() const {
+ return dep_ordered_decls_;
+ }
+
+ /// @returns the list of enabled extensions in the module
+ const ast::Extensions& Extensions() const { return extensions_; }
+
+ private:
+ const std::vector<const ast::Node*> dep_ordered_decls_;
+ ast::Extensions extensions_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/multisampled_texture.cc
index 72ce2f510ed..922b96713d2 100644
--- a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/multisampled_texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/multisampled_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -21,10 +21,9 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::MultisampledTexture);
namespace tint::sem {
-MultisampledTexture::MultisampledTexture(ast::TextureDimension dim,
- const Type* type)
+MultisampledTexture::MultisampledTexture(ast::TextureDimension dim, const Type* type)
: Base(dim), type_(type) {
- TINT_ASSERT(Semantic, type_);
+ TINT_ASSERT(Semantic, type_);
}
MultisampledTexture::MultisampledTexture(MultisampledTexture&&) = default;
@@ -32,23 +31,20 @@ MultisampledTexture::MultisampledTexture(MultisampledTexture&&) = default;
MultisampledTexture::~MultisampledTexture() = default;
size_t MultisampledTexture::Hash() const {
- return utils::Hash(TypeInfo::Of<MultisampledTexture>().full_hashcode, dim(),
- type_);
+ return utils::Hash(TypeInfo::Of<MultisampledTexture>().full_hashcode, dim(), type_);
}
bool MultisampledTexture::Equals(const sem::Type& other) const {
- if (auto* o = other.As<MultisampledTexture>()) {
- return o->dim() == dim() && o->type_ == type_;
- }
- return false;
+ if (auto* o = other.As<MultisampledTexture>()) {
+ return o->dim() == dim() && o->type_ == type_;
+ }
+ return false;
}
-std::string MultisampledTexture::FriendlyName(
- const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "texture_multisampled_" << dim() << "<" << type_->FriendlyName(symbols)
- << ">";
- return out.str();
+std::string MultisampledTexture::FriendlyName(const SymbolTable& symbols) const {
+ std::ostringstream out;
+ out << "texture_multisampled_" << dim() << "<" << type_->FriendlyName(symbols) << ">";
+ return out.str();
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/multisampled_texture.h b/chromium/third_party/dawn/src/tint/sem/multisampled_texture.h
new file mode 100644
index 00000000000..f178056d71e
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/multisampled_texture.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_MULTISAMPLED_TEXTURE_H_
+#define SRC_TINT_SEM_MULTISAMPLED_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+
+/// A multisampled texture type.
+class MultisampledTexture final : public Castable<MultisampledTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ /// @param type the data type of the multisampled texture
+ MultisampledTexture(ast::TextureDimension dim, const Type* type);
+ /// Move constructor
+ MultisampledTexture(MultisampledTexture&&);
+ ~MultisampledTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the subtype of the sampled texture
+ const Type* type() const { return type_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ const Type* const type_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_MULTISAMPLED_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/multisampled_texture_test.cc
new file mode 100644
index 00000000000..3243a5f0e03
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/multisampled_texture_test.cc
@@ -0,0 +1,89 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/multisampled_texture.h"
+
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+using MultisampledTextureTest = TestHelper;
+
+TEST_F(MultisampledTextureTest, Creation) {
+ auto* a = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* b = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* c = create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
+ auto* d = create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+}
+
+TEST_F(MultisampledTextureTest, Hash) {
+ auto* a = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* b = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* c = create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
+ auto* d = create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+}
+
+TEST_F(MultisampledTextureTest, Equals) {
+ auto* a = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* b = create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* c = create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
+ auto* d = create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(MultisampledTextureTest, IsTexture) {
+ F32 f32;
+ MultisampledTexture s(ast::TextureDimension::kCube, &f32);
+ Texture* ty = &s;
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_FALSE(ty->Is<ExternalTexture>());
+ EXPECT_TRUE(ty->Is<MultisampledTexture>());
+ EXPECT_FALSE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
+}
+
+TEST_F(MultisampledTextureTest, Dim) {
+ F32 f32;
+ MultisampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.dim(), ast::TextureDimension::k3d);
+}
+
+TEST_F(MultisampledTextureTest, Type) {
+ F32 f32;
+ MultisampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.type(), &f32);
+}
+
+TEST_F(MultisampledTextureTest, FriendlyName) {
+ F32 f32;
+ MultisampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.FriendlyName(Symbols()), "texture_multisampled_3d<f32>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.h b/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.h
deleted file mode 100644
index 0e993ec74c6..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_MULTISAMPLED_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_MULTISAMPLED_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-
-/// A multisampled texture type.
-class MultisampledTexture final
- : public Castable<MultisampledTexture, Texture> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- /// @param type the data type of the multisampled texture
- MultisampledTexture(ast::TextureDimension dim, const Type* type);
- /// Move constructor
- MultisampledTexture(MultisampledTexture&&);
- ~MultisampledTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the subtype of the sampled texture
- const Type* type() const { return type_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- const Type* const type_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_MULTISAMPLED_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type_test.cc
deleted file mode 100644
index eb53ccd60a8..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/multisampled_texture_type_test.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/multisampled_texture_type.h"
-
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-#include "src/tint/sem/test_helper.h"
-
-namespace tint::sem {
-namespace {
-
-using MultisampledTextureTest = TestHelper;
-
-TEST_F(MultisampledTextureTest, Creation) {
- auto* a =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* b =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* c =
- create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
- auto* d =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
-}
-
-TEST_F(MultisampledTextureTest, Hash) {
- auto* a =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* b =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* c =
- create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
- auto* d =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
-}
-
-TEST_F(MultisampledTextureTest, Equals) {
- auto* a =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* b =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* c =
- create<MultisampledTexture>(ast::TextureDimension::k3d, create<F32>());
- auto* d =
- create<MultisampledTexture>(ast::TextureDimension::k2d, create<I32>());
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(MultisampledTextureTest, IsTexture) {
- F32 f32;
- MultisampledTexture s(ast::TextureDimension::kCube, &f32);
- Texture* ty = &s;
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_FALSE(ty->Is<ExternalTexture>());
- EXPECT_TRUE(ty->Is<MultisampledTexture>());
- EXPECT_FALSE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
-}
-
-TEST_F(MultisampledTextureTest, Dim) {
- F32 f32;
- MultisampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.dim(), ast::TextureDimension::k3d);
-}
-
-TEST_F(MultisampledTextureTest, Type) {
- F32 f32;
- MultisampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.type(), &f32);
-}
-
-TEST_F(MultisampledTextureTest, FriendlyName) {
- F32 f32;
- MultisampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.FriendlyName(Symbols()), "texture_multisampled_3d<f32>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/node.h b/chromium/third_party/dawn/src/tint/sem/node.h
index d03b0425724..3f2df55f06d 100644
--- a/chromium/third_party/dawn/src/tint/sem/node.h
+++ b/chromium/third_party/dawn/src/tint/sem/node.h
@@ -21,15 +21,15 @@ namespace tint::sem {
/// Node is the base class for all semantic nodes
class Node : public Castable<Node> {
- public:
- /// Constructor
- Node();
+ public:
+ /// Constructor
+ Node();
- /// Copy constructor
- Node(const Node&);
+ /// Copy constructor
+ Node(const Node&);
- /// Destructor
- ~Node() override;
+ /// Destructor
+ ~Node() override;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc b/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc
index 3bea2378281..010272a0a40 100644
--- a/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc
+++ b/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc
@@ -13,11 +13,11 @@
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
+// File generated by tools/intrinsic-gen
// using the template:
// src/tint/sem/parameter_usage.cc.tmpl
-// and the builtin defintion file:
-// src/tint/builtins.def
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
//
// Do not modify this file directly
////////////////////////////////////////////////////////////////////////////////
@@ -27,37 +27,55 @@
namespace tint::sem {
const char* str(ParameterUsage usage) {
- switch (usage) {
- case ParameterUsage::kNone:
- return "none";
- case ParameterUsage::kArrayIndex:
- return "array_index";
- case ParameterUsage::kBias:
- return "bias";
- case ParameterUsage::kComponent:
- return "component";
- case ParameterUsage::kCoords:
- return "coords";
- case ParameterUsage::kDdx:
- return "ddx";
- case ParameterUsage::kDdy:
- return "ddy";
- case ParameterUsage::kDepthRef:
- return "depth_ref";
- case ParameterUsage::kLevel:
- return "level";
- case ParameterUsage::kOffset:
- return "offset";
- case ParameterUsage::kSampleIndex:
- return "sample_index";
- case ParameterUsage::kSampler:
- return "sampler";
- case ParameterUsage::kTexture:
- return "texture";
- case ParameterUsage::kValue:
- return "value";
- }
- return "<unknown>";
+ switch (usage) {
+ case ParameterUsage::kNone:
+ return "none";
+ case ParameterUsage::kArrayIndex:
+ return "array_index";
+ case ParameterUsage::kBias:
+ return "bias";
+ case ParameterUsage::kComponent:
+ return "component";
+ case ParameterUsage::kCoords:
+ return "coords";
+ case ParameterUsage::kDdx:
+ return "ddx";
+ case ParameterUsage::kDdy:
+ return "ddy";
+ case ParameterUsage::kDepthRef:
+ return "depth_ref";
+ case ParameterUsage::kLevel:
+ return "level";
+ case ParameterUsage::kOffset:
+ return "offset";
+ case ParameterUsage::kSampleIndex:
+ return "sample_index";
+ case ParameterUsage::kSampler:
+ return "sampler";
+ case ParameterUsage::kTexture:
+ return "texture";
+ case ParameterUsage::kValue:
+ return "value";
+ case ParameterUsage::kW:
+ return "w";
+ case ParameterUsage::kX:
+ return "x";
+ case ParameterUsage::kXy:
+ return "xy";
+ case ParameterUsage::kXyz:
+ return "xyz";
+ case ParameterUsage::kY:
+ return "y";
+ case ParameterUsage::kYz:
+ return "yz";
+ case ParameterUsage::kZ:
+ return "z";
+ case ParameterUsage::kZw:
+ return "zw";
+ case ParameterUsage::kZyw:
+ return "zyw";
+ }
+ return "<unknown>";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc.tmpl b/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc.tmpl
index 1339171d6c8..bc413e103a9 100644
--- a/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc.tmpl
+++ b/chromium/third_party/dawn/src/tint/sem/parameter_usage.cc.tmpl
@@ -3,7 +3,7 @@
Template file for use with tools/builtin-gen to generate parameter_usage.cc
See:
-* tools/cmd/builtin-gen/gen for structures used by this template
+* tools/cmd/intrinsic-gen/gen for structures used by this template
* https://golang.org/pkg/text/template/ for documentation on the template syntax
--------------------------------------------------------------------------------
*/ -}}
@@ -13,15 +13,15 @@ See:
namespace tint::sem {
const char* str(ParameterUsage usage) {
- switch (usage) {
- case ParameterUsage::kNone:
- return "none";
+ switch (usage) {
+ case ParameterUsage::kNone:
+ return "none";
{{- range .Sem.UniqueParameterNames }}
- case ParameterUsage::k{{PascalCase .}}:
- return "{{.}}";
+ case ParameterUsage::k{{PascalCase .}}:
+ return "{{.}}";
{{- end }}
- }
- return "<unknown>";
+ }
+ return "<unknown>";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/parameter_usage.h b/chromium/third_party/dawn/src/tint/sem/parameter_usage.h
index 56f6e782e94..b17ae3e502e 100644
--- a/chromium/third_party/dawn/src/tint/sem/parameter_usage.h
+++ b/chromium/third_party/dawn/src/tint/sem/parameter_usage.h
@@ -13,11 +13,11 @@
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
+// File generated by tools/intrinsic-gen
// using the template:
// src/tint/sem/parameter_usage.h.tmpl
-// and the builtin defintion file:
-// src/tint/builtins.def
+// and the intrinsic defintion file:
+// src/tint/intrinsics.def
//
// Do not modify this file directly
////////////////////////////////////////////////////////////////////////////////
@@ -30,20 +30,29 @@ namespace tint::sem {
/// ParameterUsage is extra metadata for identifying a parameter based on its
/// overload position
enum class ParameterUsage {
- kNone = -1,
- kArrayIndex,
- kBias,
- kComponent,
- kCoords,
- kDdx,
- kDdy,
- kDepthRef,
- kLevel,
- kOffset,
- kSampleIndex,
- kSampler,
- kTexture,
- kValue,
+ kNone = -1,
+ kArrayIndex,
+ kBias,
+ kComponent,
+ kCoords,
+ kDdx,
+ kDdy,
+ kDepthRef,
+ kLevel,
+ kOffset,
+ kSampleIndex,
+ kSampler,
+ kTexture,
+ kValue,
+ kW,
+ kX,
+ kXy,
+ kXyz,
+ kY,
+ kYz,
+ kZ,
+ kZw,
+ kZyw,
};
/// @returns a string representation of the given parameter usage.
diff --git a/chromium/third_party/dawn/src/tint/sem/parameter_usage.h.tmpl b/chromium/third_party/dawn/src/tint/sem/parameter_usage.h.tmpl
index 1edb674ad48..b1b57895364 100644
--- a/chromium/third_party/dawn/src/tint/sem/parameter_usage.h.tmpl
+++ b/chromium/third_party/dawn/src/tint/sem/parameter_usage.h.tmpl
@@ -3,7 +3,7 @@
Template file for use with tools/builtin-gen to generate parameter_usage.h
See:
-* tools/cmd/builtin-gen/gen for structures used by this template
+* tools/cmd/intrinsic-gen/gen for structures used by this template
* https://golang.org/pkg/text/template/ for documentation on the template syntax
--------------------------------------------------------------------------------
*/ -}}
@@ -16,9 +16,9 @@ namespace tint::sem {
/// ParameterUsage is extra metadata for identifying a parameter based on its
/// overload position
enum class ParameterUsage {
- kNone = -1,
+ kNone = -1,
{{- range .Sem.UniqueParameterNames }}
- k{{PascalCase .}},
+ k{{PascalCase .}},
{{- end }}
};
diff --git a/chromium/third_party/dawn/src/tint/sem/pointer_type.cc b/chromium/third_party/dawn/src/tint/sem/pointer.cc
index b1f137a5420..e00a4bfc49b 100644
--- a/chromium/third_party/dawn/src/tint/sem/pointer_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/pointer.cc
@@ -12,46 +12,43 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/pointer_type.h"
+#include "src/tint/sem/pointer.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/utils/hash.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::Pointer);
namespace tint::sem {
-Pointer::Pointer(const Type* subtype,
- ast::StorageClass storage_class,
- ast::Access access)
+Pointer::Pointer(const Type* subtype, ast::StorageClass storage_class, ast::Access access)
: subtype_(subtype), storage_class_(storage_class), access_(access) {
- TINT_ASSERT(Semantic, !subtype->Is<Reference>());
- TINT_ASSERT(Semantic, access != ast::Access::kUndefined);
+ TINT_ASSERT(Semantic, !subtype->Is<Reference>());
+ TINT_ASSERT(Semantic, access != ast::Access::kUndefined);
}
size_t Pointer::Hash() const {
- return utils::Hash(TypeInfo::Of<Pointer>().full_hashcode, storage_class_,
- subtype_, access_);
+ return utils::Hash(TypeInfo::Of<Pointer>().full_hashcode, storage_class_, subtype_, access_);
}
bool Pointer::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Pointer>()) {
- return o->storage_class_ == storage_class_ && o->subtype_ == subtype_ &&
- o->access_ == access_;
- }
- return false;
+ if (auto* o = other.As<Pointer>()) {
+ return o->storage_class_ == storage_class_ && o->subtype_ == subtype_ &&
+ o->access_ == access_;
+ }
+ return false;
}
std::string Pointer::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "ptr<";
- if (storage_class_ != ast::StorageClass::kNone) {
- out << storage_class_ << ", ";
- }
- out << subtype_->FriendlyName(symbols) << ", " << access_;
- out << ">";
- return out.str();
+ std::ostringstream out;
+ out << "ptr<";
+ if (storage_class_ != ast::StorageClass::kNone) {
+ out << storage_class_ << ", ";
+ }
+ out << subtype_->FriendlyName(symbols) << ", " << access_;
+ out << ">";
+ return out.str();
}
Pointer::Pointer(Pointer&&) = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/pointer.h b/chromium/third_party/dawn/src/tint/sem/pointer.h
new file mode 100644
index 00000000000..0c82e776a2b
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/pointer.h
@@ -0,0 +1,68 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_POINTER_H_
+#define SRC_TINT_SEM_POINTER_H_
+
+#include <string>
+
+#include "src/tint/ast/access.h"
+#include "src/tint/ast/storage_class.h"
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A pointer type.
+class Pointer final : public Castable<Pointer, Type> {
+ public:
+ /// Constructor
+ /// @param subtype the pointee type
+ /// @param storage_class the storage class of the pointer
+ /// @param access the resolved access control of the reference
+ Pointer(const Type* subtype, ast::StorageClass storage_class, ast::Access access);
+
+ /// Move constructor
+ Pointer(Pointer&&);
+ ~Pointer() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the pointee type
+ const Type* StoreType() const { return subtype_; }
+
+ /// @returns the storage class of the pointer
+ ast::StorageClass StorageClass() const { return storage_class_; }
+
+ /// @returns the access control of the reference
+ ast::Access Access() const { return access_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ Type const* const subtype_;
+ ast::StorageClass const storage_class_;
+ ast::Access const access_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_POINTER_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/pointer_test.cc b/chromium/third_party/dawn/src/tint/sem/pointer_test.cc
new file mode 100644
index 00000000000..575db417da7
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/pointer_test.cc
@@ -0,0 +1,78 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+namespace {
+
+using PointerTest = TestHelper;
+
+TEST_F(PointerTest, Creation) {
+ auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_TRUE(a->StoreType()->Is<sem::I32>());
+ EXPECT_EQ(a->StorageClass(), ast::StorageClass::kStorage);
+ EXPECT_EQ(a->Access(), ast::Access::kReadWrite);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+}
+
+TEST_F(PointerTest, Hash) {
+ auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+ EXPECT_NE(a->Hash(), e->Hash());
+}
+
+TEST_F(PointerTest, Equals) {
+ auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(*e));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(PointerTest, FriendlyName) {
+ auto* r = create<Pointer>(create<I32>(), ast::StorageClass::kNone, ast::Access::kRead);
+ EXPECT_EQ(r->FriendlyName(Symbols()), "ptr<i32, read>");
+}
+
+TEST_F(PointerTest, FriendlyNameWithStorageClass) {
+ auto* r = create<Pointer>(create<I32>(), ast::StorageClass::kWorkgroup, ast::Access::kRead);
+ EXPECT_EQ(r->FriendlyName(Symbols()), "ptr<workgroup, i32, read>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/pointer_type.h b/chromium/third_party/dawn/src/tint/sem/pointer_type.h
deleted file mode 100644
index c3697c14513..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/pointer_type.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_POINTER_TYPE_H_
-#define SRC_TINT_SEM_POINTER_TYPE_H_
-
-#include <string>
-
-#include "src/tint/ast/access.h"
-#include "src/tint/ast/storage_class.h"
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A pointer type.
-class Pointer final : public Castable<Pointer, Type> {
- public:
- /// Constructor
- /// @param subtype the pointee type
- /// @param storage_class the storage class of the pointer
- /// @param access the resolved access control of the reference
- Pointer(const Type* subtype,
- ast::StorageClass storage_class,
- ast::Access access);
-
- /// Move constructor
- Pointer(Pointer&&);
- ~Pointer() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the pointee type
- const Type* StoreType() const { return subtype_; }
-
- /// @returns the storage class of the pointer
- ast::StorageClass StorageClass() const { return storage_class_; }
-
- /// @returns the access control of the reference
- ast::Access Access() const { return access_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- Type const* const subtype_;
- ast::StorageClass const storage_class_;
- ast::Access const access_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_POINTER_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/pointer_type_test.cc b/chromium/third_party/dawn/src/tint/sem/pointer_type_test.cc
deleted file mode 100644
index 713677d230d..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/pointer_type_test.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using PointerTest = TestHelper;
-
-TEST_F(PointerTest, Creation) {
- auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->StoreType()->Is<sem::I32>());
- EXPECT_EQ(a->StorageClass(), ast::StorageClass::kStorage);
- EXPECT_EQ(a->Access(), ast::Access::kReadWrite);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
-}
-
-TEST_F(PointerTest, Hash) {
- auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
- EXPECT_NE(a->Hash(), e->Hash());
-}
-
-TEST_F(PointerTest, Equals) {
- auto* a = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Pointer>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Pointer>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Pointer>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(*e));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(PointerTest, FriendlyName) {
- auto* r = create<Pointer>(create<I32>(), ast::StorageClass::kNone,
- ast::Access::kRead);
- EXPECT_EQ(r->FriendlyName(Symbols()), "ptr<i32, read>");
-}
-
-TEST_F(PointerTest, FriendlyNameWithStorageClass) {
- auto* r = create<Pointer>(create<I32>(), ast::StorageClass::kWorkgroup,
- ast::Access::kRead);
- EXPECT_EQ(r->FriendlyName(Symbols()), "ptr<workgroup, i32, read>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/reference_type.cc b/chromium/third_party/dawn/src/tint/sem/reference.cc
index 6eaceda5591..4751563eb11 100644
--- a/chromium/third_party/dawn/src/tint/sem/reference_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/reference.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -21,36 +21,33 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Reference);
namespace tint::sem {
-Reference::Reference(const Type* subtype,
- ast::StorageClass storage_class,
- ast::Access access)
+Reference::Reference(const Type* subtype, ast::StorageClass storage_class, ast::Access access)
: subtype_(subtype), storage_class_(storage_class), access_(access) {
- TINT_ASSERT(Semantic, !subtype->Is<Reference>());
- TINT_ASSERT(Semantic, access != ast::Access::kUndefined);
+ TINT_ASSERT(Semantic, !subtype->Is<Reference>());
+ TINT_ASSERT(Semantic, access != ast::Access::kUndefined);
}
size_t Reference::Hash() const {
- return utils::Hash(TypeInfo::Of<Reference>().full_hashcode, storage_class_,
- subtype_, access_);
+ return utils::Hash(TypeInfo::Of<Reference>().full_hashcode, storage_class_, subtype_, access_);
}
bool Reference::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Reference>()) {
- return o->storage_class_ == storage_class_ && o->subtype_ == subtype_ &&
- o->access_ == access_;
- }
- return false;
+ if (auto* o = other.As<Reference>()) {
+ return o->storage_class_ == storage_class_ && o->subtype_ == subtype_ &&
+ o->access_ == access_;
+ }
+ return false;
}
std::string Reference::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "ref<";
- if (storage_class_ != ast::StorageClass::kNone) {
- out << storage_class_ << ", ";
- }
- out << subtype_->FriendlyName(symbols) << ", " << access_;
- out << ">";
- return out.str();
+ std::ostringstream out;
+ out << "ref<";
+ if (storage_class_ != ast::StorageClass::kNone) {
+ out << storage_class_ << ", ";
+ }
+ out << subtype_->FriendlyName(symbols) << ", " << access_;
+ out << ">";
+ return out.str();
}
Reference::Reference(Reference&&) = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/reference.h b/chromium/third_party/dawn/src/tint/sem/reference.h
new file mode 100644
index 00000000000..5db9b62c83f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/reference.h
@@ -0,0 +1,68 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_REFERENCE_H_
+#define SRC_TINT_SEM_REFERENCE_H_
+
+#include <string>
+
+#include "src/tint/ast/access.h"
+#include "src/tint/ast/storage_class.h"
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A reference type.
+class Reference final : public Castable<Reference, Type> {
+ public:
+ /// Constructor
+ /// @param subtype the pointee type
+ /// @param storage_class the storage class of the reference
+ /// @param access the resolved access control of the reference
+ Reference(const Type* subtype, ast::StorageClass storage_class, ast::Access access);
+
+ /// Move constructor
+ Reference(Reference&&);
+ ~Reference() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the pointee type
+ const Type* StoreType() const { return subtype_; }
+
+ /// @returns the storage class of the reference
+ ast::StorageClass StorageClass() const { return storage_class_; }
+
+ /// @returns the resolved access control of the reference.
+ ast::Access Access() const { return access_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ Type const* const subtype_;
+ ast::StorageClass const storage_class_;
+ ast::Access const access_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_REFERENCE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/reference_test.cc b/chromium/third_party/dawn/src/tint/sem/reference_test.cc
new file mode 100644
index 00000000000..27b1ebd9d01
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/reference_test.cc
@@ -0,0 +1,90 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+using ReferenceTest = TestHelper;
+
+TEST_F(ReferenceTest, Creation) {
+ auto* a =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c =
+ create<Reference>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d =
+ create<Reference>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_TRUE(a->StoreType()->Is<sem::I32>());
+ EXPECT_EQ(a->StorageClass(), ast::StorageClass::kStorage);
+ EXPECT_EQ(a->Access(), ast::Access::kReadWrite);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+}
+
+TEST_F(ReferenceTest, Hash) {
+ auto* a =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c =
+ create<Reference>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d =
+ create<Reference>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+ EXPECT_NE(a->Hash(), e->Hash());
+}
+
+TEST_F(ReferenceTest, Equals) {
+ auto* a =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* b =
+ create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* c =
+ create<Reference>(create<F32>(), ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ auto* d =
+ create<Reference>(create<I32>(), ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+ auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage, ast::Access::kRead);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(*e));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(ReferenceTest, FriendlyName) {
+ auto* r = create<Reference>(create<I32>(), ast::StorageClass::kNone, ast::Access::kRead);
+ EXPECT_EQ(r->FriendlyName(Symbols()), "ref<i32, read>");
+}
+
+TEST_F(ReferenceTest, FriendlyNameWithStorageClass) {
+ auto* r = create<Reference>(create<I32>(), ast::StorageClass::kWorkgroup, ast::Access::kRead);
+ EXPECT_EQ(r->FriendlyName(Symbols()), "ref<workgroup, i32, read>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/reference_type.h b/chromium/third_party/dawn/src/tint/sem/reference_type.h
deleted file mode 100644
index d2639a48491..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/reference_type.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_REFERENCE_TYPE_H_
-#define SRC_TINT_SEM_REFERENCE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/ast/access.h"
-#include "src/tint/ast/storage_class.h"
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A reference type.
-class Reference final : public Castable<Reference, Type> {
- public:
- /// Constructor
- /// @param subtype the pointee type
- /// @param storage_class the storage class of the reference
- /// @param access the resolved access control of the reference
- Reference(const Type* subtype,
- ast::StorageClass storage_class,
- ast::Access access);
-
- /// Move constructor
- Reference(Reference&&);
- ~Reference() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the pointee type
- const Type* StoreType() const { return subtype_; }
-
- /// @returns the storage class of the reference
- ast::StorageClass StorageClass() const { return storage_class_; }
-
- /// @returns the resolved access control of the reference.
- ast::Access Access() const { return access_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- Type const* const subtype_;
- ast::StorageClass const storage_class_;
- ast::Access const access_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_REFERENCE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/reference_type_test.cc b/chromium/third_party/dawn/src/tint/sem/reference_type_test.cc
deleted file mode 100644
index 9d99598c0f9..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/reference_type_test.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/test_helper.h"
-
-namespace tint::sem {
-namespace {
-
-using ReferenceTest = TestHelper;
-
-TEST_F(ReferenceTest, Creation) {
- auto* a = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Reference>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Reference>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->StoreType()->Is<sem::I32>());
- EXPECT_EQ(a->StorageClass(), ast::StorageClass::kStorage);
- EXPECT_EQ(a->Access(), ast::Access::kReadWrite);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
-}
-
-TEST_F(ReferenceTest, Hash) {
- auto* a = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Reference>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Reference>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
- EXPECT_NE(a->Hash(), e->Hash());
-}
-
-TEST_F(ReferenceTest, Equals) {
- auto* a = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* b = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* c = create<Reference>(create<F32>(), ast::StorageClass::kStorage,
- ast::Access::kReadWrite);
- auto* d = create<Reference>(create<I32>(), ast::StorageClass::kPrivate,
- ast::Access::kReadWrite);
- auto* e = create<Reference>(create<I32>(), ast::StorageClass::kStorage,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(*e));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(ReferenceTest, FriendlyName) {
- auto* r = create<Reference>(create<I32>(), ast::StorageClass::kNone,
- ast::Access::kRead);
- EXPECT_EQ(r->FriendlyName(Symbols()), "ref<i32, read>");
-}
-
-TEST_F(ReferenceTest, FriendlyNameWithStorageClass) {
- auto* r = create<Reference>(create<I32>(), ast::StorageClass::kWorkgroup,
- ast::Access::kRead);
- EXPECT_EQ(r->FriendlyName(Symbols()), "ref<workgroup, i32, read>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/sampled_texture.cc
index 5d9d9c56603..260901aa926 100644
--- a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/sampled_texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -23,7 +23,7 @@ namespace tint::sem {
SampledTexture::SampledTexture(ast::TextureDimension dim, const Type* type)
: Base(dim), type_(type) {
- TINT_ASSERT(Semantic, type_);
+ TINT_ASSERT(Semantic, type_);
}
SampledTexture::SampledTexture(SampledTexture&&) = default;
@@ -31,21 +31,20 @@ SampledTexture::SampledTexture(SampledTexture&&) = default;
SampledTexture::~SampledTexture() = default;
size_t SampledTexture::Hash() const {
- return utils::Hash(TypeInfo::Of<SampledTexture>().full_hashcode, dim(),
- type_);
+ return utils::Hash(TypeInfo::Of<SampledTexture>().full_hashcode, dim(), type_);
}
bool SampledTexture::Equals(const sem::Type& other) const {
- if (auto* o = other.As<SampledTexture>()) {
- return o->dim() == dim() && o->type_ == type_;
- }
- return false;
+ if (auto* o = other.As<SampledTexture>()) {
+ return o->dim() == dim() && o->type_ == type_;
+ }
+ return false;
}
std::string SampledTexture::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "texture_" << dim() << "<" << type_->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "texture_" << dim() << "<" << type_->FriendlyName(symbols) << ">";
+ return out.str();
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampled_texture.h b/chromium/third_party/dawn/src/tint/sem/sampled_texture.h
new file mode 100644
index 00000000000..15e7949ad49
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/sampled_texture.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_SAMPLED_TEXTURE_H_
+#define SRC_TINT_SEM_SAMPLED_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+
+/// A sampled texture type.
+class SampledTexture final : public Castable<SampledTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ /// @param type the data type of the sampled texture
+ SampledTexture(ast::TextureDimension dim, const Type* type);
+ /// Move constructor
+ SampledTexture(SampledTexture&&);
+ ~SampledTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the subtype of the sampled texture
+ Type* type() const { return const_cast<Type*>(type_); }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ const Type* const type_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_SAMPLED_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/sampled_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/sampled_texture_test.cc
new file mode 100644
index 00000000000..8a3321f524f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/sampled_texture_test.cc
@@ -0,0 +1,93 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/sampled_texture.h"
+
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/storage_texture.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+using SampledTextureTest = TestHelper;
+
+TEST_F(SampledTextureTest, Creation) {
+ auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
+
+ EXPECT_TRUE(a->type()->Is<F32>());
+ EXPECT_EQ(a->dim(), ast::TextureDimension::kCube);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+}
+
+TEST_F(SampledTextureTest, Hash) {
+ auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+}
+
+TEST_F(SampledTextureTest, Equals) {
+ auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
+ auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
+ auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(SampledTextureTest, IsTexture) {
+ F32 f32;
+ SampledTexture s(ast::TextureDimension::kCube, &f32);
+ Texture* ty = &s;
+ EXPECT_FALSE(ty->Is<DepthTexture>());
+ EXPECT_FALSE(ty->Is<ExternalTexture>());
+ EXPECT_TRUE(ty->Is<SampledTexture>());
+ EXPECT_FALSE(ty->Is<StorageTexture>());
+}
+
+TEST_F(SampledTextureTest, Dim) {
+ F32 f32;
+ SampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.dim(), ast::TextureDimension::k3d);
+}
+
+TEST_F(SampledTextureTest, Type) {
+ F32 f32;
+ SampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.type(), &f32);
+}
+
+TEST_F(SampledTextureTest, FriendlyName) {
+ F32 f32;
+ SampledTexture s(ast::TextureDimension::k3d, &f32);
+ EXPECT_EQ(s.FriendlyName(Symbols()), "texture_3d<f32>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type.h b/chromium/third_party/dawn/src/tint/sem/sampled_texture_type.h
deleted file mode 100644
index b67737e6798..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_SAMPLED_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_SAMPLED_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-
-/// A sampled texture type.
-class SampledTexture final : public Castable<SampledTexture, Texture> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- /// @param type the data type of the sampled texture
- SampledTexture(ast::TextureDimension dim, const Type* type);
- /// Move constructor
- SampledTexture(SampledTexture&&);
- ~SampledTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the subtype of the sampled texture
- Type* type() const { return const_cast<Type*>(type_); }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- const Type* const type_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_SAMPLED_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/sampled_texture_type_test.cc
deleted file mode 100644
index 7ddb48206c7..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/sampled_texture_type_test.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/sampled_texture_type.h"
-
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
-#include "src/tint/sem/test_helper.h"
-
-namespace tint::sem {
-namespace {
-
-using SampledTextureTest = TestHelper;
-
-TEST_F(SampledTextureTest, Creation) {
- auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
-
- EXPECT_TRUE(a->type()->Is<F32>());
- EXPECT_EQ(a->dim(), ast::TextureDimension::kCube);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
-}
-
-TEST_F(SampledTextureTest, Hash) {
- auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
-}
-
-TEST_F(SampledTextureTest, Equals) {
- auto* a = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* b = create<SampledTexture>(ast::TextureDimension::kCube, create<F32>());
- auto* c = create<SampledTexture>(ast::TextureDimension::k2d, create<F32>());
- auto* d = create<SampledTexture>(ast::TextureDimension::kCube, create<I32>());
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(SampledTextureTest, IsTexture) {
- F32 f32;
- SampledTexture s(ast::TextureDimension::kCube, &f32);
- Texture* ty = &s;
- EXPECT_FALSE(ty->Is<DepthTexture>());
- EXPECT_FALSE(ty->Is<ExternalTexture>());
- EXPECT_TRUE(ty->Is<SampledTexture>());
- EXPECT_FALSE(ty->Is<StorageTexture>());
-}
-
-TEST_F(SampledTextureTest, Dim) {
- F32 f32;
- SampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.dim(), ast::TextureDimension::k3d);
-}
-
-TEST_F(SampledTextureTest, Type) {
- F32 f32;
- SampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.type(), &f32);
-}
-
-TEST_F(SampledTextureTest, FriendlyName) {
- F32 f32;
- SampledTexture s(ast::TextureDimension::k3d, &f32);
- EXPECT_EQ(s.FriendlyName(Symbols()), "texture_3d<f32>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler_type.cc b/chromium/third_party/dawn/src/tint/sem/sampler.cc
index 6558a17bf37..20993cc9dcd 100644
--- a/chromium/third_party/dawn/src/tint/sem/sampler_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/sampler.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/sampler_type.h"
+#include "src/tint/sem/sampler.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -28,18 +28,18 @@ Sampler::Sampler(Sampler&&) = default;
Sampler::~Sampler() = default;
size_t Sampler::Hash() const {
- return utils::Hash(TypeInfo::Of<Sampler>().full_hashcode, kind_);
+ return utils::Hash(TypeInfo::Of<Sampler>().full_hashcode, kind_);
}
bool Sampler::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Sampler>()) {
- return o->kind_ == kind_;
- }
- return false;
+ if (auto* o = other.As<Sampler>()) {
+ return o->kind_ == kind_;
+ }
+ return false;
}
std::string Sampler::FriendlyName(const SymbolTable&) const {
- return kind_ == ast::SamplerKind::kSampler ? "sampler" : "sampler_comparison";
+ return kind_ == ast::SamplerKind::kSampler ? "sampler" : "sampler_comparison";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler.h b/chromium/third_party/dawn/src/tint/sem/sampler.h
new file mode 100644
index 00000000000..96c184fda64
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/sampler.h
@@ -0,0 +1,59 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_SAMPLER_H_
+#define SRC_TINT_SEM_SAMPLER_H_
+
+#include <string>
+
+#include "src/tint/ast/sampler.h"
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A sampler type.
+class Sampler final : public Castable<Sampler, Type> {
+ public:
+ /// Constructor
+ /// @param kind the kind of sampler
+ explicit Sampler(ast::SamplerKind kind);
+ /// Move constructor
+ Sampler(Sampler&&);
+ ~Sampler() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the sampler type
+ ast::SamplerKind kind() const { return kind_; }
+
+ /// @returns true if this is a comparison sampler
+ bool IsComparison() const { return kind_ == ast::SamplerKind::kComparisonSampler; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ private:
+ ast::SamplerKind const kind_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_SAMPLER_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler_test.cc b/chromium/third_party/dawn/src/tint/sem/sampler_test.cc
new file mode 100644
index 00000000000..caa61dcc183
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/sampler_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+namespace {
+
+using SamplerTest = TestHelper;
+
+TEST_F(SamplerTest, Creation) {
+ auto* a = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* b = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
+
+ EXPECT_EQ(a->kind(), ast::SamplerKind::kSampler);
+ EXPECT_EQ(c->kind(), ast::SamplerKind::kComparisonSampler);
+
+ EXPECT_FALSE(a->IsComparison());
+ EXPECT_TRUE(c->IsComparison());
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+}
+
+TEST_F(SamplerTest, Hash) {
+ auto* a = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* b = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+}
+
+TEST_F(SamplerTest, Equals) {
+ auto* a = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* b = create<Sampler>(ast::SamplerKind::kSampler);
+ auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(SamplerTest, FriendlyNameSampler) {
+ Sampler s{ast::SamplerKind::kSampler};
+ EXPECT_EQ(s.FriendlyName(Symbols()), "sampler");
+}
+
+TEST_F(SamplerTest, FriendlyNameComparisonSampler) {
+ Sampler s{ast::SamplerKind::kComparisonSampler};
+ EXPECT_EQ(s.FriendlyName(Symbols()), "sampler_comparison");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler_texture_pair.h b/chromium/third_party/dawn/src/tint/sem/sampler_texture_pair.h
index f9b665f899a..71f3e3cafa0 100644
--- a/chromium/third_party/dawn/src/tint/sem/sampler_texture_pair.h
+++ b/chromium/third_party/dawn/src/tint/sem/sampler_texture_pair.h
@@ -24,25 +24,23 @@ namespace tint::sem {
/// Mapping of a sampler to a texture it samples.
struct SamplerTexturePair {
- /// group & binding values for a sampler.
- BindingPoint sampler_binding_point;
- /// group & binding values for a texture samepled by the sampler.
- BindingPoint texture_binding_point;
+ /// group & binding values for a sampler.
+ BindingPoint sampler_binding_point;
+ /// group & binding values for a texture samepled by the sampler.
+ BindingPoint texture_binding_point;
- /// Equality operator
- /// @param rhs the SamplerTexturePair to compare against
- /// @returns true if this SamplerTexturePair is equal to `rhs`
- inline bool operator==(const SamplerTexturePair& rhs) const {
- return sampler_binding_point == rhs.sampler_binding_point &&
- texture_binding_point == rhs.texture_binding_point;
- }
+ /// Equality operator
+ /// @param rhs the SamplerTexturePair to compare against
+ /// @returns true if this SamplerTexturePair is equal to `rhs`
+ inline bool operator==(const SamplerTexturePair& rhs) const {
+ return sampler_binding_point == rhs.sampler_binding_point &&
+ texture_binding_point == rhs.texture_binding_point;
+ }
- /// Inequality operator
- /// @param rhs the SamplerTexturePair to compare against
- /// @returns true if this SamplerTexturePair is not equal to `rhs`
- inline bool operator!=(const SamplerTexturePair& rhs) const {
- return !(*this == rhs);
- }
+ /// Inequality operator
+ /// @param rhs the SamplerTexturePair to compare against
+ /// @returns true if this SamplerTexturePair is not equal to `rhs`
+ inline bool operator!=(const SamplerTexturePair& rhs) const { return !(*this == rhs); }
};
} // namespace tint::sem
@@ -54,14 +52,12 @@ namespace std {
/// std::unordered_set.
template <>
class hash<tint::sem::SamplerTexturePair> {
- public:
- /// @param stp the texture pair to create a hash for
- /// @return the hash value
- inline std::size_t operator()(
- const tint::sem::SamplerTexturePair& stp) const {
- return tint::utils::Hash(stp.sampler_binding_point,
- stp.texture_binding_point);
- }
+ public:
+ /// @param stp the texture pair to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const tint::sem::SamplerTexturePair& stp) const {
+ return tint::utils::Hash(stp.sampler_binding_point, stp.texture_binding_point);
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler_type.h b/chromium/third_party/dawn/src/tint/sem/sampler_type.h
deleted file mode 100644
index d1fffab7f93..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/sampler_type.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_SAMPLER_TYPE_H_
-#define SRC_TINT_SEM_SAMPLER_TYPE_H_
-
-#include <string>
-
-#include "src/tint/ast/sampler.h"
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A sampler type.
-class Sampler final : public Castable<Sampler, Type> {
- public:
- /// Constructor
- /// @param kind the kind of sampler
- explicit Sampler(ast::SamplerKind kind);
- /// Move constructor
- Sampler(Sampler&&);
- ~Sampler() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the sampler type
- ast::SamplerKind kind() const { return kind_; }
-
- /// @returns true if this is a comparison sampler
- bool IsComparison() const {
- return kind_ == ast::SamplerKind::kComparisonSampler;
- }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- private:
- ast::SamplerKind const kind_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_SAMPLER_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/sampler_type_test.cc b/chromium/third_party/dawn/src/tint/sem/sampler_type_test.cc
deleted file mode 100644
index edd57d20e84..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/sampler_type_test.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using SamplerTest = TestHelper;
-
-TEST_F(SamplerTest, Creation) {
- auto* a = create<Sampler>(ast::SamplerKind::kSampler);
- auto* b = create<Sampler>(ast::SamplerKind::kSampler);
- auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
-
- EXPECT_EQ(a->kind(), ast::SamplerKind::kSampler);
- EXPECT_EQ(c->kind(), ast::SamplerKind::kComparisonSampler);
-
- EXPECT_FALSE(a->IsComparison());
- EXPECT_TRUE(c->IsComparison());
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
-}
-
-TEST_F(SamplerTest, Hash) {
- auto* a = create<Sampler>(ast::SamplerKind::kSampler);
- auto* b = create<Sampler>(ast::SamplerKind::kSampler);
- auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
-}
-
-TEST_F(SamplerTest, Equals) {
- auto* a = create<Sampler>(ast::SamplerKind::kSampler);
- auto* b = create<Sampler>(ast::SamplerKind::kSampler);
- auto* c = create<Sampler>(ast::SamplerKind::kComparisonSampler);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(SamplerTest, FriendlyNameSampler) {
- Sampler s{ast::SamplerKind::kSampler};
- EXPECT_EQ(s.FriendlyName(Symbols()), "sampler");
-}
-
-TEST_F(SamplerTest, FriendlyNameComparisonSampler) {
- Sampler s{ast::SamplerKind::kComparisonSampler};
- EXPECT_EQ(s.FriendlyName(Symbols()), "sampler_comparison");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/sem_array_test.cc b/chromium/third_party/dawn/src/tint/sem/sem_array_test.cc
index fc78f38baff..4b61fd77401 100644
--- a/chromium/third_party/dawn/src/tint/sem/sem_array_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/sem_array_test.cc
@@ -13,7 +13,7 @@
// limitations under the License.
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -21,108 +21,108 @@ namespace {
using ArrayTest = TestHelper;
TEST_F(ArrayTest, CreateSizedArray) {
- auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
- auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
- auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
- auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
- auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
-
- EXPECT_EQ(a->ElemType(), create<U32>());
- EXPECT_EQ(a->Count(), 2u);
- EXPECT_EQ(a->Align(), 4u);
- EXPECT_EQ(a->Size(), 8u);
- EXPECT_EQ(a->Stride(), 32u);
- EXPECT_EQ(a->ImplicitStride(), 16u);
- EXPECT_FALSE(a->IsStrideImplicit());
- EXPECT_FALSE(a->IsRuntimeSized());
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
- EXPECT_NE(a, f);
- EXPECT_NE(a, g);
+ auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
+ auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
+ auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
+ auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
+ auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
+
+ EXPECT_EQ(a->ElemType(), create<U32>());
+ EXPECT_EQ(a->Count(), 2u);
+ EXPECT_EQ(a->Align(), 4u);
+ EXPECT_EQ(a->Size(), 8u);
+ EXPECT_EQ(a->Stride(), 32u);
+ EXPECT_EQ(a->ImplicitStride(), 16u);
+ EXPECT_FALSE(a->IsStrideImplicit());
+ EXPECT_FALSE(a->IsRuntimeSized());
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+ EXPECT_NE(a, f);
+ EXPECT_NE(a, g);
}
TEST_F(ArrayTest, CreateRuntimeArray) {
- auto* a = create<Array>(create<U32>(), 0u, 4u, 8u, 32u, 32u);
- auto* b = create<Array>(create<U32>(), 0u, 4u, 8u, 32u, 32u);
- auto* c = create<Array>(create<U32>(), 0u, 5u, 8u, 32u, 32u);
- auto* d = create<Array>(create<U32>(), 0u, 4u, 9u, 32u, 32u);
- auto* e = create<Array>(create<U32>(), 0u, 4u, 8u, 33u, 32u);
- auto* f = create<Array>(create<U32>(), 0u, 4u, 8u, 33u, 17u);
-
- EXPECT_EQ(a->ElemType(), create<U32>());
- EXPECT_EQ(a->Count(), 0u);
- EXPECT_EQ(a->Align(), 4u);
- EXPECT_EQ(a->Size(), 8u);
- EXPECT_EQ(a->Stride(), 32u);
- EXPECT_EQ(a->ImplicitStride(), 32u);
- EXPECT_TRUE(a->IsStrideImplicit());
- EXPECT_TRUE(a->IsRuntimeSized());
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
- EXPECT_NE(a, f);
+ auto* a = create<Array>(create<U32>(), 0u, 4u, 8u, 32u, 32u);
+ auto* b = create<Array>(create<U32>(), 0u, 4u, 8u, 32u, 32u);
+ auto* c = create<Array>(create<U32>(), 0u, 5u, 8u, 32u, 32u);
+ auto* d = create<Array>(create<U32>(), 0u, 4u, 9u, 32u, 32u);
+ auto* e = create<Array>(create<U32>(), 0u, 4u, 8u, 33u, 32u);
+ auto* f = create<Array>(create<U32>(), 0u, 4u, 8u, 33u, 17u);
+
+ EXPECT_EQ(a->ElemType(), create<U32>());
+ EXPECT_EQ(a->Count(), 0u);
+ EXPECT_EQ(a->Align(), 4u);
+ EXPECT_EQ(a->Size(), 8u);
+ EXPECT_EQ(a->Stride(), 32u);
+ EXPECT_EQ(a->ImplicitStride(), 32u);
+ EXPECT_TRUE(a->IsStrideImplicit());
+ EXPECT_TRUE(a->IsRuntimeSized());
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+ EXPECT_NE(a, f);
}
TEST_F(ArrayTest, Hash) {
- auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
- auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
- auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
- auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
- auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
- EXPECT_NE(a->Hash(), e->Hash());
- EXPECT_NE(a->Hash(), f->Hash());
- EXPECT_NE(a->Hash(), g->Hash());
+ auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
+ auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
+ auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
+ auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
+ auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+ EXPECT_NE(a->Hash(), e->Hash());
+ EXPECT_NE(a->Hash(), f->Hash());
+ EXPECT_NE(a->Hash(), g->Hash());
}
TEST_F(ArrayTest, Equals) {
- auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
- auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
- auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
- auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
- auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
- auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(*e));
- EXPECT_FALSE(a->Equals(*f));
- EXPECT_FALSE(a->Equals(*g));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* b = create<Array>(create<U32>(), 2u, 4u, 8u, 32u, 16u);
+ auto* c = create<Array>(create<U32>(), 3u, 4u, 8u, 32u, 16u);
+ auto* d = create<Array>(create<U32>(), 2u, 5u, 8u, 32u, 16u);
+ auto* e = create<Array>(create<U32>(), 2u, 4u, 9u, 32u, 16u);
+ auto* f = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 16u);
+ auto* g = create<Array>(create<U32>(), 2u, 4u, 8u, 33u, 17u);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(*e));
+ EXPECT_FALSE(a->Equals(*f));
+ EXPECT_FALSE(a->Equals(*g));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(ArrayTest, FriendlyNameRuntimeSized) {
- auto* arr = create<Array>(create<I32>(), 0u, 0u, 4u, 4u, 4u);
- EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32>");
+ auto* arr = create<Array>(create<I32>(), 0u, 0u, 4u, 4u, 4u);
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32>");
}
TEST_F(ArrayTest, FriendlyNameStaticSized) {
- auto* arr = create<Array>(create<I32>(), 5u, 4u, 20u, 4u, 4u);
- EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, 5>");
+ auto* arr = create<Array>(create<I32>(), 5u, 4u, 20u, 4u, 4u);
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "array<i32, 5>");
}
TEST_F(ArrayTest, FriendlyNameRuntimeSizedNonImplicitStride) {
- auto* arr = create<Array>(create<I32>(), 0u, 0u, 4u, 8u, 4u);
- EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(8) array<i32>");
+ auto* arr = create<Array>(create<I32>(), 0u, 0u, 4u, 8u, 4u);
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(8) array<i32>");
}
TEST_F(ArrayTest, FriendlyNameStaticSizedNonImplicitStride) {
- auto* arr = create<Array>(create<I32>(), 5u, 4u, 20u, 8u, 4u);
- EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(8) array<i32, 5>");
+ auto* arr = create<Array>(create<I32>(), 5u, 4u, 20u, 8u, 4u);
+ EXPECT_EQ(arr->FriendlyName(Symbols()), "@stride(8) array<i32, 5>");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/sem_struct_test.cc b/chromium/third_party/dawn/src/tint/sem/sem_struct_test.cc
index d788be991f1..4a6371284d9 100644
--- a/chromium/third_party/dawn/src/tint/sem/sem_struct_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/sem_struct_test.cc
@@ -14,7 +14,7 @@
#include "src/tint/sem/struct.h"
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -22,85 +22,72 @@ namespace {
using StructTest = TestHelper;
TEST_F(StructTest, Creation) {
- auto name = Sym("S");
- auto* impl =
- create<ast::Struct>(name, ast::StructMemberList{}, ast::AttributeList{});
- auto* ptr = impl;
- auto* s =
- create<sem::Struct>(impl, impl->name, StructMemberList{}, 4u /* align */,
- 8u /* size */, 16u /* size_no_padding */);
- EXPECT_EQ(s->Declaration(), ptr);
- EXPECT_EQ(s->Align(), 4u);
- EXPECT_EQ(s->Size(), 8u);
- EXPECT_EQ(s->SizeNoPadding(), 16u);
+ auto name = Sym("S");
+ auto* impl = create<ast::Struct>(name, ast::StructMemberList{}, ast::AttributeList{});
+ auto* ptr = impl;
+ auto* s = create<sem::Struct>(impl, impl->name, StructMemberList{}, 4u /* align */,
+ 8u /* size */, 16u /* size_no_padding */);
+ EXPECT_EQ(s->Declaration(), ptr);
+ EXPECT_EQ(s->Align(), 4u);
+ EXPECT_EQ(s->Size(), 8u);
+ EXPECT_EQ(s->SizeNoPadding(), 16u);
}
TEST_F(StructTest, Hash) {
- auto* a_impl = create<ast::Struct>(Sym("a"), ast::StructMemberList{},
- ast::AttributeList{});
- auto* a = create<sem::Struct>(a_impl, a_impl->name, StructMemberList{},
- 4u /* align */, 4u /* size */,
- 4u /* size_no_padding */);
- auto* b_impl = create<ast::Struct>(Sym("b"), ast::StructMemberList{},
- ast::AttributeList{});
- auto* b = create<sem::Struct>(b_impl, b_impl->name, StructMemberList{},
- 4u /* align */, 4u /* size */,
- 4u /* size_no_padding */);
-
- EXPECT_NE(a->Hash(), b->Hash());
+ auto* a_impl = create<ast::Struct>(Sym("a"), ast::StructMemberList{}, ast::AttributeList{});
+ auto* a = create<sem::Struct>(a_impl, a_impl->name, StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+ auto* b_impl = create<ast::Struct>(Sym("b"), ast::StructMemberList{}, ast::AttributeList{});
+ auto* b = create<sem::Struct>(b_impl, b_impl->name, StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+
+ EXPECT_NE(a->Hash(), b->Hash());
}
TEST_F(StructTest, Equals) {
- auto* a_impl = create<ast::Struct>(Sym("a"), ast::StructMemberList{},
- ast::AttributeList{});
- auto* a = create<sem::Struct>(a_impl, a_impl->name, StructMemberList{},
- 4u /* align */, 4u /* size */,
- 4u /* size_no_padding */);
- auto* b_impl = create<ast::Struct>(Sym("b"), ast::StructMemberList{},
- ast::AttributeList{});
- auto* b = create<sem::Struct>(b_impl, b_impl->name, StructMemberList{},
- 4u /* align */, 4u /* size */,
- 4u /* size_no_padding */);
-
- EXPECT_TRUE(a->Equals(*a));
- EXPECT_FALSE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a_impl = create<ast::Struct>(Sym("a"), ast::StructMemberList{}, ast::AttributeList{});
+ auto* a = create<sem::Struct>(a_impl, a_impl->name, StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+ auto* b_impl = create<ast::Struct>(Sym("b"), ast::StructMemberList{}, ast::AttributeList{});
+ auto* b = create<sem::Struct>(b_impl, b_impl->name, StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+
+ EXPECT_TRUE(a->Equals(*a));
+ EXPECT_FALSE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(StructTest, FriendlyName) {
- auto name = Sym("my_struct");
- auto* impl =
- create<ast::Struct>(name, ast::StructMemberList{}, ast::AttributeList{});
- auto* s =
- create<sem::Struct>(impl, impl->name, StructMemberList{}, 4u /* align */,
- 4u /* size */, 4u /* size_no_padding */);
- EXPECT_EQ(s->FriendlyName(Symbols()), "my_struct");
+ auto name = Sym("my_struct");
+ auto* impl = create<ast::Struct>(name, ast::StructMemberList{}, ast::AttributeList{});
+ auto* s = create<sem::Struct>(impl, impl->name, StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "my_struct");
}
TEST_F(StructTest, Layout) {
- auto* inner_st = //
- Structure("Inner", {
- Member("a", ty.i32()),
- Member("b", ty.u32()),
- Member("c", ty.f32()),
- Member("d", ty.vec3<f32>()),
- Member("e", ty.mat4x2<f32>()),
- });
-
- auto* outer_st =
- Structure("Outer", {
- Member("inner", ty.type_name("Inner")),
- Member("a", ty.i32()),
- });
-
- auto p = Build();
- ASSERT_TRUE(p.IsValid()) << p.Diagnostics().str();
-
- auto* sem_inner_st = p.Sem().Get(inner_st);
- auto* sem_outer_st = p.Sem().Get(outer_st);
-
- EXPECT_EQ(sem_inner_st->Layout(p.Symbols()),
- R"(/* align(16) size(64) */ struct Inner {
+ auto* inner_st = //
+ Structure("Inner", {
+ Member("a", ty.i32()),
+ Member("b", ty.u32()),
+ Member("c", ty.f32()),
+ Member("d", ty.vec3<f32>()),
+ Member("e", ty.mat4x2<f32>()),
+ });
+
+ auto* outer_st = Structure("Outer", {
+ Member("inner", ty.type_name("Inner")),
+ Member("a", ty.i32()),
+ });
+
+ auto p = Build();
+ ASSERT_TRUE(p.IsValid()) << p.Diagnostics().str();
+
+ auto* sem_inner_st = p.Sem().Get(inner_st);
+ auto* sem_outer_st = p.Sem().Get(outer_st);
+
+ EXPECT_EQ(sem_inner_st->Layout(p.Symbols()),
+ R"(/* align(16) size(64) */ struct Inner {
/* offset( 0) align( 4) size( 4) */ a : i32;
/* offset( 4) align( 4) size( 4) */ b : u32;
/* offset( 8) align( 4) size( 4) */ c : f32;
@@ -110,8 +97,8 @@ TEST_F(StructTest, Layout) {
/* offset(32) align( 8) size(32) */ e : mat4x2<f32>;
/* */ };)");
- EXPECT_EQ(sem_outer_st->Layout(p.Symbols()),
- R"(/* align(16) size(80) */ struct Outer {
+ EXPECT_EQ(sem_outer_st->Layout(p.Symbols()),
+ R"(/* align(16) size(80) */ struct Outer {
/* offset( 0) align(16) size(64) */ inner : Inner;
/* offset(64) align( 4) size( 4) */ a : i32;
/* offset(68) align( 1) size(12) */ // -- implicit struct size padding --;
diff --git a/chromium/third_party/dawn/src/tint/sem/statement.cc b/chromium/third_party/dawn/src/tint/sem/statement.cc
index 2046f630bc2..fb8e557f878 100644
--- a/chromium/third_party/dawn/src/tint/sem/statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/statement.cc
@@ -33,7 +33,7 @@ Statement::Statement(const ast::Statement* declaration,
Statement::~Statement() = default;
const BlockStatement* Statement::Block() const {
- return FindFirstParent<BlockStatement>();
+ return FindFirstParent<BlockStatement>();
}
CompoundStatement::CompoundStatement(const ast::Statement* declaration,
diff --git a/chromium/third_party/dawn/src/tint/sem/statement.h b/chromium/third_party/dawn/src/tint/sem/statement.h
index 76336d7b268..bdcb55c7064 100644
--- a/chromium/third_party/dawn/src/tint/sem/statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/statement.h
@@ -20,7 +20,6 @@
// Forward declarations
namespace tint::ast {
-class Function;
class Statement;
} // namespace tint::ast
namespace tint::sem {
@@ -37,143 +36,141 @@ namespace detail {
/// resolves to CompoundStatement.
template <typename... TYPES>
struct FindFirstParentReturn {
- /// The pointer type returned by Statement::FindFirstParent()
- using type = CompoundStatement;
+ /// The pointer type returned by Statement::FindFirstParent()
+ using type = CompoundStatement;
};
/// A specialization of FindFirstParentReturn for a single template argument.
/// FindFirstParentReturn::type resolves to the single template argument.
template <typename T>
struct FindFirstParentReturn<T> {
- /// The pointer type returned by Statement::FindFirstParent()
- using type = T;
+ /// The pointer type returned by Statement::FindFirstParent()
+ using type = T;
};
template <typename... TYPES>
-using FindFirstParentReturnType =
- typename FindFirstParentReturn<TYPES...>::type;
+using FindFirstParentReturnType = typename FindFirstParentReturn<TYPES...>::type;
} // namespace detail
/// Statement holds the semantic information for a statement.
class Statement : public Castable<Statement, Node> {
- public:
- /// Constructor
- /// @param declaration the AST node for this statement
- /// @param parent the owning statement
- /// @param function the owning function
- Statement(const ast::Statement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
-
- /// Destructor
- ~Statement() override;
-
- /// @return the AST node for this statement
- const ast::Statement* Declaration() const { return declaration_; }
-
- /// @return the statement that encloses this statement
- const CompoundStatement* Parent() const { return parent_; }
-
- /// @returns the closest enclosing parent that satisfies the given predicate,
- /// which may be the statement itself, or nullptr if no match is found.
- /// @param pred a predicate that the resulting block must satisfy
- template <typename Pred>
- const CompoundStatement* FindFirstParent(Pred&& pred) const;
-
- /// @returns the closest enclosing parent that is of one of the types in
- /// `TYPES`, which may be the statement itself, or nullptr if no match is
- /// found. If `TYPES` is a single template argument, the return type is a
- /// pointer to that template argument type, otherwise a CompoundStatement
- /// pointer is returned.
- template <typename... TYPES>
- const detail::FindFirstParentReturnType<TYPES...>* FindFirstParent() const;
-
- /// @return the closest enclosing block for this statement
- const BlockStatement* Block() const;
-
- /// @returns the function that owns this statement
- const sem::Function* Function() const { return function_; }
-
- /// @return the behaviors of this statement
- const sem::Behaviors& Behaviors() const { return behaviors_; }
-
- /// @return the behaviors of this statement
- sem::Behaviors& Behaviors() { return behaviors_; }
-
- /// @returns true if this statement is reachable by control flow according to
- /// the behavior analysis
- bool IsReachable() const { return is_reachable_; }
-
- /// @param is_reachable whether this statement is reachable by control flow
- /// according to the behavior analysis
- void SetIsReachable(bool is_reachable) { is_reachable_ = is_reachable; }
-
- private:
- const ast::Statement* const declaration_;
- const CompoundStatement* const parent_;
- const sem::Function* const function_;
- sem::Behaviors behaviors_{sem::Behavior::kNext};
- bool is_reachable_ = true;
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ Statement(const ast::Statement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~Statement() override;
+
+ /// @return the AST node for this statement
+ const ast::Statement* Declaration() const { return declaration_; }
+
+ /// @return the statement that encloses this statement
+ const CompoundStatement* Parent() const { return parent_; }
+
+ /// @returns the closest enclosing parent that satisfies the given predicate,
+ /// which may be the statement itself, or nullptr if no match is found.
+ /// @param pred a predicate that the resulting block must satisfy
+ template <typename Pred>
+ const CompoundStatement* FindFirstParent(Pred&& pred) const;
+
+ /// @returns the closest enclosing parent that is of one of the types in
+ /// `TYPES`, which may be the statement itself, or nullptr if no match is
+ /// found. If `TYPES` is a single template argument, the return type is a
+ /// pointer to that template argument type, otherwise a CompoundStatement
+ /// pointer is returned.
+ template <typename... TYPES>
+ const detail::FindFirstParentReturnType<TYPES...>* FindFirstParent() const;
+
+ /// @return the closest enclosing block for this statement
+ const BlockStatement* Block() const;
+
+ /// @returns the function that owns this statement
+ const sem::Function* Function() const { return function_; }
+
+ /// @return the behaviors of this statement
+ const sem::Behaviors& Behaviors() const { return behaviors_; }
+
+ /// @return the behaviors of this statement
+ sem::Behaviors& Behaviors() { return behaviors_; }
+
+ /// @returns true if this statement is reachable by control flow according to
+ /// the behavior analysis
+ bool IsReachable() const { return is_reachable_; }
+
+ /// @param is_reachable whether this statement is reachable by control flow
+ /// according to the behavior analysis
+ void SetIsReachable(bool is_reachable) { is_reachable_ = is_reachable; }
+
+ private:
+ const ast::Statement* const declaration_;
+ const CompoundStatement* const parent_;
+ const sem::Function* const function_;
+ sem::Behaviors behaviors_{sem::Behavior::kNext};
+ bool is_reachable_ = true;
};
/// CompoundStatement is the base class of statements that can hold other
/// statements.
class CompoundStatement : public Castable<Statement, Statement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this statement
- /// @param statement the owning statement
- /// @param function the owning function
- CompoundStatement(const ast::Statement* declaration,
- const CompoundStatement* statement,
- const sem::Function* function);
-
- /// Destructor
- ~CompoundStatement() override;
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this statement
+ /// @param statement the owning statement
+ /// @param function the owning function
+ CompoundStatement(const ast::Statement* declaration,
+ const CompoundStatement* statement,
+ const sem::Function* function);
+
+ /// Destructor
+ ~CompoundStatement() override;
};
template <typename Pred>
const CompoundStatement* Statement::FindFirstParent(Pred&& pred) const {
- if (auto* self = As<CompoundStatement>()) {
- if (pred(self)) {
- return self;
+ if (auto* self = As<CompoundStatement>()) {
+ if (pred(self)) {
+ return self;
+ }
+ }
+ const auto* curr = parent_;
+ while (curr && !pred(curr)) {
+ curr = curr->Parent();
}
- }
- const auto* curr = parent_;
- while (curr && !pred(curr)) {
- curr = curr->Parent();
- }
- return curr;
+ return curr;
}
template <typename... TYPES>
-const detail::FindFirstParentReturnType<TYPES...>* Statement::FindFirstParent()
- const {
- using ReturnType = detail::FindFirstParentReturnType<TYPES...>;
- if (sizeof...(TYPES) == 1) {
- if (auto* p = As<ReturnType>()) {
- return p;
- }
- const auto* curr = parent_;
- while (curr) {
- if (auto* p = curr->As<ReturnType>()) {
- return p;
- }
- curr = curr->Parent();
- }
- } else {
- if (IsAnyOf<TYPES...>()) {
- return As<ReturnType>();
- }
- const auto* curr = parent_;
- while (curr) {
- if (curr->IsAnyOf<TYPES...>()) {
- return curr->As<ReturnType>();
- }
- curr = curr->Parent();
+const detail::FindFirstParentReturnType<TYPES...>* Statement::FindFirstParent() const {
+ using ReturnType = detail::FindFirstParentReturnType<TYPES...>;
+ if (sizeof...(TYPES) == 1) {
+ if (auto* p = As<ReturnType>()) {
+ return p;
+ }
+ const auto* curr = parent_;
+ while (curr) {
+ if (auto* p = curr->As<ReturnType>()) {
+ return p;
+ }
+ curr = curr->Parent();
+ }
+ } else {
+ if (IsAnyOf<TYPES...>()) {
+ return As<ReturnType>();
+ }
+ const auto* curr = parent_;
+ while (curr) {
+ if (curr->IsAnyOf<TYPES...>()) {
+ return curr->As<ReturnType>();
+ }
+ curr = curr->Parent();
+ }
}
- }
- return nullptr;
+ return nullptr;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture.cc b/chromium/third_party/dawn/src/tint/sem/storage_texture.cc
new file mode 100644
index 00000000000..d8393d77d88
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/storage_texture.cc
@@ -0,0 +1,85 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/storage_texture.h"
+
+#include "src/tint/program_builder.h"
+#include "src/tint/utils/hash.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::sem::StorageTexture);
+
+namespace tint::sem {
+
+StorageTexture::StorageTexture(ast::TextureDimension dim,
+ ast::TexelFormat format,
+ ast::Access access,
+ sem::Type* subtype)
+ : Base(dim), texel_format_(format), access_(access), subtype_(subtype) {}
+
+StorageTexture::StorageTexture(StorageTexture&&) = default;
+
+StorageTexture::~StorageTexture() = default;
+
+size_t StorageTexture::Hash() const {
+ return utils::Hash(TypeInfo::Of<StorageTexture>().full_hashcode, dim(), texel_format_, access_);
+}
+
+bool StorageTexture::Equals(const sem::Type& other) const {
+ if (auto* o = other.As<StorageTexture>()) {
+ return o->dim() == dim() && o->texel_format_ == texel_format_ && o->access_ == access_;
+ }
+ return false;
+}
+
+std::string StorageTexture::FriendlyName(const SymbolTable&) const {
+ std::ostringstream out;
+ out << "texture_storage_" << dim() << "<" << texel_format_ << ", " << access_ << ">";
+ return out.str();
+}
+
+sem::Type* StorageTexture::SubtypeFor(ast::TexelFormat format, sem::Manager& type_mgr) {
+ switch (format) {
+ case ast::TexelFormat::kR32Uint:
+ case ast::TexelFormat::kRgba8Uint:
+ case ast::TexelFormat::kRg32Uint:
+ case ast::TexelFormat::kRgba16Uint:
+ case ast::TexelFormat::kRgba32Uint: {
+ return type_mgr.Get<sem::U32>();
+ }
+
+ case ast::TexelFormat::kR32Sint:
+ case ast::TexelFormat::kRgba8Sint:
+ case ast::TexelFormat::kRg32Sint:
+ case ast::TexelFormat::kRgba16Sint:
+ case ast::TexelFormat::kRgba32Sint: {
+ return type_mgr.Get<sem::I32>();
+ }
+
+ case ast::TexelFormat::kRgba8Unorm:
+ case ast::TexelFormat::kRgba8Snorm:
+ case ast::TexelFormat::kR32Float:
+ case ast::TexelFormat::kRg32Float:
+ case ast::TexelFormat::kRgba16Float:
+ case ast::TexelFormat::kRgba32Float: {
+ return type_mgr.Get<sem::F32>();
+ }
+
+ case ast::TexelFormat::kNone:
+ break;
+ }
+
+ return nullptr;
+}
+
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture.h b/chromium/third_party/dawn/src/tint/sem/storage_texture.h
new file mode 100644
index 00000000000..68dbff95a5d
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/storage_texture.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_STORAGE_TEXTURE_H_
+#define SRC_TINT_SEM_STORAGE_TEXTURE_H_
+
+#include <string>
+
+#include "src/tint/ast/access.h"
+#include "src/tint/ast/storage_texture.h"
+#include "src/tint/sem/texture.h"
+
+// Forward declarations
+namespace tint::sem {
+class Manager;
+} // namespace tint::sem
+
+namespace tint::sem {
+
+/// A storage texture type.
+class StorageTexture final : public Castable<StorageTexture, Texture> {
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ /// @param format the texel format of the texture
+ /// @param access the access control type of the texture
+ /// @param subtype the storage subtype. Use SubtypeFor() to calculate this.
+ StorageTexture(ast::TextureDimension dim,
+ ast::TexelFormat format,
+ ast::Access access,
+ sem::Type* subtype);
+
+ /// Move constructor
+ StorageTexture(StorageTexture&&);
+ ~StorageTexture() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the storage subtype
+ Type* type() const { return subtype_; }
+
+ /// @returns the texel format
+ ast::TexelFormat texel_format() const { return texel_format_; }
+
+ /// @returns the access control
+ ast::Access access() const { return access_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @param format the storage texture image format
+ /// @param type_mgr the sem::Manager used to build the returned type
+ /// @returns the storage texture subtype for the given TexelFormat
+ static sem::Type* SubtypeFor(ast::TexelFormat format, sem::Manager& type_mgr);
+
+ private:
+ ast::TexelFormat const texel_format_;
+ ast::Access const access_;
+ Type* const subtype_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_STORAGE_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture_test.cc b/chromium/third_party/dawn/src/tint/sem/storage_texture_test.cc
new file mode 100644
index 00000000000..8a9351ee064
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/storage_texture_test.cc
@@ -0,0 +1,147 @@
+// Copyright 2020 The Tint Authors->
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/storage_texture.h"
+
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/external_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+struct StorageTextureTest : public TestHelper {
+ StorageTexture* Create(ast::TextureDimension dims, ast::TexelFormat fmt, ast::Access access) {
+ auto* subtype = StorageTexture::SubtypeFor(fmt, Types());
+ return create<StorageTexture>(dims, fmt, access, subtype);
+ }
+};
+
+TEST_F(StorageTextureTest, Creation) {
+ auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* c =
+ Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
+ auto* d =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float, ast::Access::kReadWrite);
+ auto* e =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float, ast::Access::kRead);
+
+ EXPECT_TRUE(a->type()->Is<F32>());
+ EXPECT_EQ(a->dim(), ast::TextureDimension::kCube);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+ EXPECT_NE(a, e);
+}
+
+TEST_F(StorageTextureTest, Hash) {
+ auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* c =
+ Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
+ auto* d =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float, ast::Access::kReadWrite);
+ auto* e =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float, ast::Access::kRead);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+ EXPECT_NE(a->Hash(), e->Hash());
+}
+
+TEST_F(StorageTextureTest, Equals) {
+ auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ auto* c =
+ Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
+ auto* d =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float, ast::Access::kReadWrite);
+ auto* e =
+ Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float, ast::Access::kRead);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(*e));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(StorageTextureTest, Dim) {
+ auto* s = Create(ast::TextureDimension::k2dArray, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ EXPECT_EQ(s->dim(), ast::TextureDimension::k2dArray);
+}
+
+TEST_F(StorageTextureTest, Format) {
+ auto* s = Create(ast::TextureDimension::k2dArray, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ EXPECT_EQ(s->texel_format(), ast::TexelFormat::kRgba32Float);
+}
+
+TEST_F(StorageTextureTest, FriendlyName) {
+ auto* s = Create(ast::TextureDimension::k2dArray, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+ EXPECT_EQ(s->FriendlyName(Symbols()), "texture_storage_2d_array<rgba32float, read_write>");
+}
+
+TEST_F(StorageTextureTest, F32) {
+ Type* s = Create(ast::TextureDimension::k2dArray, ast::TexelFormat::kRgba32Float,
+ ast::Access::kReadWrite);
+
+ auto program = Build();
+
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<F32>());
+}
+
+TEST_F(StorageTextureTest, U32) {
+ auto* subtype = sem::StorageTexture::SubtypeFor(ast::TexelFormat::kRg32Uint, Types());
+ Type* s = create<StorageTexture>(ast::TextureDimension::k2dArray, ast::TexelFormat::kRg32Uint,
+ ast::Access::kReadWrite, subtype);
+
+ auto program = Build();
+
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<U32>());
+}
+
+TEST_F(StorageTextureTest, I32) {
+ auto* subtype = sem::StorageTexture::SubtypeFor(ast::TexelFormat::kRgba32Sint, Types());
+ Type* s = create<StorageTexture>(ast::TextureDimension::k2dArray, ast::TexelFormat::kRgba32Sint,
+ ast::Access::kReadWrite, subtype);
+
+ auto program = Build();
+
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ ASSERT_TRUE(s->Is<Texture>());
+ ASSERT_TRUE(s->Is<StorageTexture>());
+ EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<I32>());
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture_type.cc b/chromium/third_party/dawn/src/tint/sem/storage_texture_type.cc
deleted file mode 100644
index 1b017ddbf83..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/storage_texture_type.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/storage_texture_type.h"
-
-#include "src/tint/program_builder.h"
-#include "src/tint/utils/hash.h"
-
-TINT_INSTANTIATE_TYPEINFO(tint::sem::StorageTexture);
-
-namespace tint::sem {
-
-StorageTexture::StorageTexture(ast::TextureDimension dim,
- ast::TexelFormat format,
- ast::Access access,
- sem::Type* subtype)
- : Base(dim), texel_format_(format), access_(access), subtype_(subtype) {}
-
-StorageTexture::StorageTexture(StorageTexture&&) = default;
-
-StorageTexture::~StorageTexture() = default;
-
-size_t StorageTexture::Hash() const {
- return utils::Hash(TypeInfo::Of<StorageTexture>().full_hashcode, dim(),
- texel_format_, access_);
-}
-
-bool StorageTexture::Equals(const sem::Type& other) const {
- if (auto* o = other.As<StorageTexture>()) {
- return o->dim() == dim() && o->texel_format_ == texel_format_ &&
- o->access_ == access_;
- }
- return false;
-}
-
-std::string StorageTexture::FriendlyName(const SymbolTable&) const {
- std::ostringstream out;
- out << "texture_storage_" << dim() << "<" << texel_format_ << ", " << access_
- << ">";
- return out.str();
-}
-
-sem::Type* StorageTexture::SubtypeFor(ast::TexelFormat format,
- sem::Manager& type_mgr) {
- switch (format) {
- case ast::TexelFormat::kR32Uint:
- case ast::TexelFormat::kRgba8Uint:
- case ast::TexelFormat::kRg32Uint:
- case ast::TexelFormat::kRgba16Uint:
- case ast::TexelFormat::kRgba32Uint: {
- return type_mgr.Get<sem::U32>();
- }
-
- case ast::TexelFormat::kR32Sint:
- case ast::TexelFormat::kRgba8Sint:
- case ast::TexelFormat::kRg32Sint:
- case ast::TexelFormat::kRgba16Sint:
- case ast::TexelFormat::kRgba32Sint: {
- return type_mgr.Get<sem::I32>();
- }
-
- case ast::TexelFormat::kRgba8Unorm:
- case ast::TexelFormat::kRgba8Snorm:
- case ast::TexelFormat::kR32Float:
- case ast::TexelFormat::kRg32Float:
- case ast::TexelFormat::kRgba16Float:
- case ast::TexelFormat::kRgba32Float: {
- return type_mgr.Get<sem::F32>();
- }
-
- case ast::TexelFormat::kNone:
- break;
- }
-
- return nullptr;
-}
-
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture_type.h b/chromium/third_party/dawn/src/tint/sem/storage_texture_type.h
deleted file mode 100644
index 71569e78128..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/storage_texture_type.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_STORAGE_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_STORAGE_TEXTURE_TYPE_H_
-
-#include <string>
-
-#include "src/tint/ast/access.h"
-#include "src/tint/ast/storage_texture.h"
-#include "src/tint/sem/texture_type.h"
-
-// Forward declarations
-namespace tint::sem {
-class Manager;
-} // namespace tint::sem
-
-namespace tint::sem {
-
-/// A storage texture type.
-class StorageTexture final : public Castable<StorageTexture, Texture> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- /// @param format the texel format of the texture
- /// @param access the access control type of the texture
- /// @param subtype the storage subtype. Use SubtypeFor() to calculate this.
- StorageTexture(ast::TextureDimension dim,
- ast::TexelFormat format,
- ast::Access access,
- sem::Type* subtype);
-
- /// Move constructor
- StorageTexture(StorageTexture&&);
- ~StorageTexture() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the storage subtype
- Type* type() const { return subtype_; }
-
- /// @returns the texel format
- ast::TexelFormat texel_format() const { return texel_format_; }
-
- /// @returns the access control
- ast::Access access() const { return access_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @param format the storage texture image format
- /// @param type_mgr the sem::Manager used to build the returned type
- /// @returns the storage texture subtype for the given TexelFormat
- static sem::Type* SubtypeFor(ast::TexelFormat format, sem::Manager& type_mgr);
-
- private:
- ast::TexelFormat const texel_format_;
- ast::Access const access_;
- Type* const subtype_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_STORAGE_TEXTURE_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/storage_texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/storage_texture_type_test.cc
deleted file mode 100644
index 73361ce14b3..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/storage_texture_type_test.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2020 The Tint Authors->
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/storage_texture_type.h"
-
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/external_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/test_helper.h"
-
-namespace tint::sem {
-namespace {
-
-struct StorageTextureTest : public TestHelper {
- StorageTexture* Create(ast::TextureDimension dims,
- ast::TexelFormat fmt,
- ast::Access access) {
- auto* subtype = StorageTexture::SubtypeFor(fmt, Types());
- return create<StorageTexture>(dims, fmt, access, subtype);
- }
-};
-
-TEST_F(StorageTextureTest, Creation) {
- auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* c = Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* d = Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float,
- ast::Access::kReadWrite);
- auto* e = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->type()->Is<F32>());
- EXPECT_EQ(a->dim(), ast::TextureDimension::kCube);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
- EXPECT_NE(a, e);
-}
-
-TEST_F(StorageTextureTest, Hash) {
- auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* c = Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* d = Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float,
- ast::Access::kReadWrite);
- auto* e = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kRead);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
- EXPECT_NE(a->Hash(), e->Hash());
-}
-
-TEST_F(StorageTextureTest, Equals) {
- auto* a = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* b = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* c = Create(ast::TextureDimension::k2d, ast::TexelFormat::kRgba32Float,
- ast::Access::kReadWrite);
- auto* d = Create(ast::TextureDimension::kCube, ast::TexelFormat::kR32Float,
- ast::Access::kReadWrite);
- auto* e = Create(ast::TextureDimension::kCube, ast::TexelFormat::kRgba32Float,
- ast::Access::kRead);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(*e));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(StorageTextureTest, Dim) {
- auto* s = Create(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
- EXPECT_EQ(s->dim(), ast::TextureDimension::k2dArray);
-}
-
-TEST_F(StorageTextureTest, Format) {
- auto* s = Create(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
- EXPECT_EQ(s->texel_format(), ast::TexelFormat::kRgba32Float);
-}
-
-TEST_F(StorageTextureTest, FriendlyName) {
- auto* s = Create(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
- EXPECT_EQ(s->FriendlyName(Symbols()),
- "texture_storage_2d_array<rgba32float, read_write>");
-}
-
-TEST_F(StorageTextureTest, F32) {
- Type* s = Create(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRgba32Float, ast::Access::kReadWrite);
-
- auto program = Build();
-
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<F32>());
-}
-
-TEST_F(StorageTextureTest, U32) {
- auto* subtype =
- sem::StorageTexture::SubtypeFor(ast::TexelFormat::kRg32Uint, Types());
- Type* s = create<StorageTexture>(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRg32Uint,
- ast::Access::kReadWrite, subtype);
-
- auto program = Build();
-
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<U32>());
-}
-
-TEST_F(StorageTextureTest, I32) {
- auto* subtype =
- sem::StorageTexture::SubtypeFor(ast::TexelFormat::kRgba32Sint, Types());
- Type* s = create<StorageTexture>(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kRgba32Sint,
- ast::Access::kReadWrite, subtype);
-
- auto program = Build();
-
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- ASSERT_TRUE(s->Is<Texture>());
- ASSERT_TRUE(s->Is<StorageTexture>());
- EXPECT_TRUE(s->As<StorageTexture>()->type()->Is<I32>());
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/struct.cc b/chromium/third_party/dawn/src/tint/sem/struct.cc
index ce3029d38e4..eb0583b03b0 100644
--- a/chromium/third_party/dawn/src/tint/sem/struct.cc
+++ b/chromium/third_party/dawn/src/tint/sem/struct.cc
@@ -40,127 +40,119 @@ Struct::Struct(const ast::Struct* declaration,
align_(align),
size_(size),
size_no_padding_(size_no_padding) {
- constructible_ = true;
- for (auto* member : members_) {
- if (!member->Type()->IsConstructible()) {
- constructible_ = false;
- break;
+ constructible_ = true;
+ for (auto* member : members_) {
+ if (!member->Type()->IsConstructible()) {
+ constructible_ = false;
+ break;
+ }
}
- }
}
Struct::~Struct() = default;
size_t Struct::Hash() const {
- return utils::Hash(TypeInfo::Of<Struct>().full_hashcode, name_);
+ return utils::Hash(TypeInfo::Of<Struct>().full_hashcode, name_);
}
bool Struct::Equals(const sem::Type& other) const {
- if (auto* o = other.As<Struct>()) {
- return o->name_ == name_;
- }
- return false;
+ if (auto* o = other.As<Struct>()) {
+ return o->name_ == name_;
+ }
+ return false;
}
const StructMember* Struct::FindMember(Symbol name) const {
- for (auto* member : members_) {
- if (member->Declaration()->symbol == name) {
- return member;
+ for (auto* member : members_) {
+ if (member->Declaration()->symbol == name) {
+ return member;
+ }
}
- }
- return nullptr;
+ return nullptr;
}
uint32_t Struct::Align() const {
- return align_;
+ return align_;
}
uint32_t Struct::Size() const {
- return size_;
+ return size_;
}
std::string Struct::FriendlyName(const SymbolTable& symbols) const {
- return symbols.NameFor(name_);
+ return symbols.NameFor(name_);
}
std::string Struct::Layout(const tint::SymbolTable& symbols) const {
- std::stringstream ss;
-
- auto member_name_of = [&](const sem::StructMember* sm) {
- return symbols.NameFor(sm->Declaration()->symbol);
- };
-
- if (Members().empty()) {
- return {};
- }
- const auto* const last_member = Members().back();
- const uint32_t last_member_struct_padding_offset =
- last_member->Offset() + last_member->Size();
-
- // Compute max widths to align output
- const auto offset_w =
- static_cast<int>(::log10(last_member_struct_padding_offset)) + 1;
- const auto size_w = static_cast<int>(::log10(Size())) + 1;
- const auto align_w = static_cast<int>(::log10(Align())) + 1;
-
- auto print_struct_begin_line = [&](size_t align, size_t size,
- std::string struct_name) {
- ss << "/* " << std::setw(offset_w) << " "
- << "align(" << std::setw(align_w) << align << ") size("
- << std::setw(size_w) << size << ") */ struct " << struct_name << " {\n";
- };
-
- auto print_struct_end_line = [&]() {
- ss << "/* "
- << std::setw(offset_w + size_w + align_w) << " "
- << "*/ };";
- };
-
- auto print_member_line = [&](size_t offset, size_t align, size_t size,
- std::string s) {
- ss << "/* offset(" << std::setw(offset_w) << offset << ") align("
- << std::setw(align_w) << align << ") size(" << std::setw(size_w) << size
- << ") */ " << s << ";\n";
- };
-
- print_struct_begin_line(Align(), Size(), UnwrapRef()->FriendlyName(symbols));
-
- for (size_t i = 0; i < Members().size(); ++i) {
- auto* const m = Members()[i];
-
- // Output field alignment padding, if any
- auto* const prev_member = (i == 0) ? nullptr : Members()[i - 1];
- if (prev_member) {
- uint32_t padding =
- m->Offset() - (prev_member->Offset() + prev_member->Size());
- if (padding > 0) {
- size_t padding_offset = m->Offset() - padding;
- print_member_line(padding_offset, 1, padding,
- "// -- implicit field alignment padding --");
- }
- }
+ std::stringstream ss;
+
+ auto member_name_of = [&](const sem::StructMember* sm) {
+ return symbols.NameFor(sm->Declaration()->symbol);
+ };
- // Output member
- std::string member_name = member_name_of(m);
- print_member_line(
- m->Offset(), m->Align(), m->Size(),
- member_name + " : " + m->Type()->UnwrapRef()->FriendlyName(symbols));
- }
+ if (Members().empty()) {
+ return {};
+ }
+ const auto* const last_member = Members().back();
+ const uint32_t last_member_struct_padding_offset = last_member->Offset() + last_member->Size();
+
+ // Compute max widths to align output
+ const auto offset_w = static_cast<int>(::log10(last_member_struct_padding_offset)) + 1;
+ const auto size_w = static_cast<int>(::log10(Size())) + 1;
+ const auto align_w = static_cast<int>(::log10(Align())) + 1;
+
+ auto print_struct_begin_line = [&](size_t align, size_t size, std::string struct_name) {
+ ss << "/* " << std::setw(offset_w) << " "
+ << "align(" << std::setw(align_w) << align << ") size(" << std::setw(size_w) << size
+ << ") */ struct " << struct_name << " {\n";
+ };
+
+ auto print_struct_end_line = [&]() {
+ ss << "/* " << std::setw(offset_w + size_w + align_w) << " "
+ << "*/ };";
+ };
+
+ auto print_member_line = [&](size_t offset, size_t align, size_t size, std::string s) {
+ ss << "/* offset(" << std::setw(offset_w) << offset << ") align(" << std::setw(align_w)
+ << align << ") size(" << std::setw(size_w) << size << ") */ " << s << ";\n";
+ };
+
+ print_struct_begin_line(Align(), Size(), UnwrapRef()->FriendlyName(symbols));
+
+ for (size_t i = 0; i < Members().size(); ++i) {
+ auto* const m = Members()[i];
+
+ // Output field alignment padding, if any
+ auto* const prev_member = (i == 0) ? nullptr : Members()[i - 1];
+ if (prev_member) {
+ uint32_t padding = m->Offset() - (prev_member->Offset() + prev_member->Size());
+ if (padding > 0) {
+ size_t padding_offset = m->Offset() - padding;
+ print_member_line(padding_offset, 1, padding,
+ "// -- implicit field alignment padding --");
+ }
+ }
+
+ // Output member
+ std::string member_name = member_name_of(m);
+ print_member_line(m->Offset(), m->Align(), m->Size(),
+ member_name + " : " + m->Type()->UnwrapRef()->FriendlyName(symbols));
+ }
- // Output struct size padding, if any
- uint32_t struct_padding = Size() - last_member_struct_padding_offset;
- if (struct_padding > 0) {
- print_member_line(last_member_struct_padding_offset, 1, struct_padding,
- "// -- implicit struct size padding --");
- }
+ // Output struct size padding, if any
+ uint32_t struct_padding = Size() - last_member_struct_padding_offset;
+ if (struct_padding > 0) {
+ print_member_line(last_member_struct_padding_offset, 1, struct_padding,
+ "// -- implicit struct size padding --");
+ }
- print_struct_end_line();
+ print_struct_end_line();
- return ss.str();
+ return ss.str();
}
bool Struct::IsConstructible() const {
- return constructible_;
+ return constructible_;
}
StructMember::StructMember(const ast::StructMember* declaration,
diff --git a/chromium/third_party/dawn/src/tint/sem/struct.h b/chromium/third_party/dawn/src/tint/sem/struct.h
index f5e8081dece..fe9169dc12c 100644
--- a/chromium/third_party/dawn/src/tint/sem/struct.h
+++ b/chromium/third_party/dawn/src/tint/sem/struct.h
@@ -43,189 +43,183 @@ using StructMemberList = std::vector<const StructMember*>;
/// Metadata to capture how a structure is used in a shader module.
enum class PipelineStageUsage {
- kVertexInput,
- kVertexOutput,
- kFragmentInput,
- kFragmentOutput,
- kComputeInput,
- kComputeOutput,
+ kVertexInput,
+ kVertexOutput,
+ kFragmentInput,
+ kFragmentOutput,
+ kComputeInput,
+ kComputeOutput,
};
/// Struct holds the semantic information for structures.
class Struct final : public Castable<Struct, Type> {
- public:
- /// Constructor
- /// @param declaration the AST structure declaration
- /// @param name the name of the structure
- /// @param members the structure members
- /// @param align the byte alignment of the structure
- /// @param size the byte size of the structure
- /// @param size_no_padding size of the members without the end of structure
- /// alignment padding
- Struct(const ast::Struct* declaration,
- Symbol name,
- StructMemberList members,
- uint32_t align,
- uint32_t size,
- uint32_t size_no_padding);
-
- /// Destructor
- ~Struct() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the struct
- const ast::Struct* Declaration() const { return declaration_; }
-
- /// @returns the name of the structure
- Symbol Name() const { return name_; }
-
- /// @returns the members of the structure
- const StructMemberList& Members() const { return members_; }
-
- /// @param name the member name to look for
- /// @returns the member with the given name, or nullptr if it was not found.
- const StructMember* FindMember(Symbol name) const;
-
- /// @returns the byte alignment of the structure
- /// @note this may differ from the alignment of a structure member of this
- /// structure type, if the member is annotated with the `@align(n)`
- /// attribute.
- uint32_t Align() const override;
-
- /// @returns the byte size of the structure
- /// @note this may differ from the size of a structure member of this
- /// structure type, if the member is annotated with the `@size(n)`
- /// attribute.
- uint32_t Size() const override;
-
- /// @returns the byte size of the members without the end of structure
- /// alignment padding
- uint32_t SizeNoPadding() const { return size_no_padding_; }
-
- /// Adds the StorageClass usage to the structure.
- /// @param usage the storage usage
- void AddUsage(ast::StorageClass usage) {
- storage_class_usage_.emplace(usage);
- }
-
- /// @returns the set of storage class uses of this structure
- const std::unordered_set<ast::StorageClass>& StorageClassUsage() const {
- return storage_class_usage_;
- }
-
- /// @param usage the ast::StorageClass usage type to query
- /// @returns true iff this structure has been used as the given storage class
- bool UsedAs(ast::StorageClass usage) const {
- return storage_class_usage_.count(usage) > 0;
- }
-
- /// @returns true iff this structure has been used by storage class that's
- /// host-shareable.
- bool IsHostShareable() const {
- for (auto sc : storage_class_usage_) {
- if (ast::IsHostShareable(sc)) {
- return true;
- }
+ public:
+ /// Constructor
+ /// @param declaration the AST structure declaration
+ /// @param name the name of the structure
+ /// @param members the structure members
+ /// @param align the byte alignment of the structure
+ /// @param size the byte size of the structure
+ /// @param size_no_padding size of the members without the end of structure
+ /// alignment padding
+ Struct(const ast::Struct* declaration,
+ Symbol name,
+ StructMemberList members,
+ uint32_t align,
+ uint32_t size,
+ uint32_t size_no_padding);
+
+ /// Destructor
+ ~Struct() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the struct
+ const ast::Struct* Declaration() const { return declaration_; }
+
+ /// @returns the name of the structure
+ Symbol Name() const { return name_; }
+
+ /// @returns the members of the structure
+ const StructMemberList& Members() const { return members_; }
+
+ /// @param name the member name to look for
+ /// @returns the member with the given name, or nullptr if it was not found.
+ const StructMember* FindMember(Symbol name) const;
+
+ /// @returns the byte alignment of the structure
+ /// @note this may differ from the alignment of a structure member of this
+ /// structure type, if the member is annotated with the `@align(n)`
+ /// attribute.
+ uint32_t Align() const override;
+
+ /// @returns the byte size of the structure
+ /// @note this may differ from the size of a structure member of this
+ /// structure type, if the member is annotated with the `@size(n)`
+ /// attribute.
+ uint32_t Size() const override;
+
+ /// @returns the byte size of the members without the end of structure
+ /// alignment padding
+ uint32_t SizeNoPadding() const { return size_no_padding_; }
+
+ /// Adds the StorageClass usage to the structure.
+ /// @param usage the storage usage
+ void AddUsage(ast::StorageClass usage) { storage_class_usage_.emplace(usage); }
+
+ /// @returns the set of storage class uses of this structure
+ const std::unordered_set<ast::StorageClass>& StorageClassUsage() const {
+ return storage_class_usage_;
}
- return false;
- }
-
- /// Adds the pipeline stage usage to the structure.
- /// @param usage the storage usage
- void AddUsage(PipelineStageUsage usage) {
- pipeline_stage_uses_.emplace(usage);
- }
-
- /// @returns the set of entry point uses of this structure
- const std::unordered_set<PipelineStageUsage>& PipelineStageUses() const {
- return pipeline_stage_uses_;
- }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @param symbols the program's symbol table
- /// @returns a multiline string that describes the layout of this struct,
- /// including size and alignment information.
- std::string Layout(const tint::SymbolTable& symbols) const;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- private:
- uint64_t LargestMemberBaseAlignment(MemoryLayout mem_layout) const;
-
- ast::Struct const* const declaration_;
- const Symbol name_;
- const StructMemberList members_;
- const uint32_t align_;
- const uint32_t size_;
- const uint32_t size_no_padding_;
- std::unordered_set<ast::StorageClass> storage_class_usage_;
- std::unordered_set<PipelineStageUsage> pipeline_stage_uses_;
- bool constructible_;
+
+ /// @param usage the ast::StorageClass usage type to query
+ /// @returns true iff this structure has been used as the given storage class
+ bool UsedAs(ast::StorageClass usage) const { return storage_class_usage_.count(usage) > 0; }
+
+ /// @returns true iff this structure has been used by storage class that's
+ /// host-shareable.
+ bool IsHostShareable() const {
+ for (auto sc : storage_class_usage_) {
+ if (ast::IsHostShareable(sc)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /// Adds the pipeline stage usage to the structure.
+ /// @param usage the storage usage
+ void AddUsage(PipelineStageUsage usage) { pipeline_stage_uses_.emplace(usage); }
+
+ /// @returns the set of entry point uses of this structure
+ const std::unordered_set<PipelineStageUsage>& PipelineStageUses() const {
+ return pipeline_stage_uses_;
+ }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns a multiline string that describes the layout of this struct,
+ /// including size and alignment information.
+ std::string Layout(const tint::SymbolTable& symbols) const;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ private:
+ uint64_t LargestMemberBaseAlignment(MemoryLayout mem_layout) const;
+
+ ast::Struct const* const declaration_;
+ const Symbol name_;
+ const StructMemberList members_;
+ const uint32_t align_;
+ const uint32_t size_;
+ const uint32_t size_no_padding_;
+ std::unordered_set<ast::StorageClass> storage_class_usage_;
+ std::unordered_set<PipelineStageUsage> pipeline_stage_uses_;
+ bool constructible_;
};
/// StructMember holds the semantic information for structure members.
class StructMember : public Castable<StructMember, Node> {
- public:
- /// Constructor
- /// @param declaration the AST declaration node
- /// @param name the name of the structure
- /// @param type the type of the member
- /// @param index the index of the member in the structure
- /// @param offset the byte offset from the base of the structure
- /// @param align the byte alignment of the member
- /// @param size the byte size of the member
- StructMember(const ast::StructMember* declaration,
- Symbol name,
- sem::Type* type,
- uint32_t index,
- uint32_t offset,
- uint32_t align,
- uint32_t size);
-
- /// Destructor
- ~StructMember() override;
-
- /// @returns the AST declaration node
- const ast::StructMember* Declaration() const { return declaration_; }
-
- /// @returns the name of the structure
- Symbol Name() const { return name_; }
-
- /// @returns the type of the member
- sem::Type* Type() const { return type_; }
-
- /// @returns the member index
- uint32_t Index() const { return index_; }
-
- /// @returns byte offset from base of structure
- uint32_t Offset() const { return offset_; }
-
- /// @returns the alignment of the member in bytes
- uint32_t Align() const { return align_; }
-
- /// @returns byte size
- uint32_t Size() const { return size_; }
-
- private:
- const ast::StructMember* const declaration_;
- const Symbol name_;
- sem::Type* const type_;
- const uint32_t index_;
- const uint32_t offset_;
- const uint32_t align_;
- const uint32_t size_;
+ public:
+ /// Constructor
+ /// @param declaration the AST declaration node
+ /// @param name the name of the structure
+ /// @param type the type of the member
+ /// @param index the index of the member in the structure
+ /// @param offset the byte offset from the base of the structure
+ /// @param align the byte alignment of the member
+ /// @param size the byte size of the member
+ StructMember(const ast::StructMember* declaration,
+ Symbol name,
+ sem::Type* type,
+ uint32_t index,
+ uint32_t offset,
+ uint32_t align,
+ uint32_t size);
+
+ /// Destructor
+ ~StructMember() override;
+
+ /// @returns the AST declaration node
+ const ast::StructMember* Declaration() const { return declaration_; }
+
+ /// @returns the name of the structure
+ Symbol Name() const { return name_; }
+
+ /// @returns the type of the member
+ sem::Type* Type() const { return type_; }
+
+ /// @returns the member index
+ uint32_t Index() const { return index_; }
+
+ /// @returns byte offset from base of structure
+ uint32_t Offset() const { return offset_; }
+
+ /// @returns the alignment of the member in bytes
+ uint32_t Align() const { return align_; }
+
+ /// @returns byte size
+ uint32_t Size() const { return size_; }
+
+ private:
+ const ast::StructMember* const declaration_;
+ const Symbol name_;
+ sem::Type* const type_;
+ const uint32_t index_;
+ const uint32_t offset_;
+ const uint32_t align_;
+ const uint32_t size_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/switch_statement.cc b/chromium/third_party/dawn/src/tint/sem/switch_statement.cc
index 9800ed61aeb..ed3942dce4b 100644
--- a/chromium/third_party/dawn/src/tint/sem/switch_statement.cc
+++ b/chromium/third_party/dawn/src/tint/sem/switch_statement.cc
@@ -25,27 +25,27 @@ SwitchStatement::SwitchStatement(const ast::SwitchStatement* declaration,
const CompoundStatement* parent,
const sem::Function* function)
: Base(declaration, parent, function) {
- TINT_ASSERT(Semantic, parent);
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, parent);
+ TINT_ASSERT(Semantic, function);
}
SwitchStatement::~SwitchStatement() = default;
const ast::SwitchStatement* SwitchStatement::Declaration() const {
- return static_cast<const ast::SwitchStatement*>(Base::Declaration());
+ return static_cast<const ast::SwitchStatement*>(Base::Declaration());
}
CaseStatement::CaseStatement(const ast::CaseStatement* declaration,
const CompoundStatement* parent,
const sem::Function* function)
: Base(declaration, parent, function) {
- TINT_ASSERT(Semantic, parent);
- TINT_ASSERT(Semantic, function);
+ TINT_ASSERT(Semantic, parent);
+ TINT_ASSERT(Semantic, function);
}
CaseStatement::~CaseStatement() = default;
const ast::CaseStatement* CaseStatement::Declaration() const {
- return static_cast<const ast::CaseStatement*>(Base::Declaration());
+ return static_cast<const ast::CaseStatement*>(Base::Declaration());
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/switch_statement.h b/chromium/third_party/dawn/src/tint/sem/switch_statement.h
index b56bc8598b3..a6b5c00f927 100644
--- a/chromium/third_party/dawn/src/tint/sem/switch_statement.h
+++ b/chromium/third_party/dawn/src/tint/sem/switch_statement.h
@@ -15,6 +15,8 @@
#ifndef SRC_TINT_SEM_SWITCH_STATEMENT_H_
#define SRC_TINT_SEM_SWITCH_STATEMENT_H_
+#include <vector>
+
#include "src/tint/sem/block_statement.h"
// Forward declarations
@@ -22,53 +24,72 @@ namespace tint::ast {
class CaseStatement;
class SwitchStatement;
} // namespace tint::ast
+namespace tint::sem {
+class CaseStatement;
+class Expression;
+} // namespace tint::sem
namespace tint::sem {
/// Holds semantic information about an switch statement
-class SwitchStatement final
- : public Castable<SwitchStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this switch statement
- /// @param parent the owning statement
- /// @param function the owning function
- SwitchStatement(const ast::SwitchStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
+class SwitchStatement final : public Castable<SwitchStatement, CompoundStatement> {
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this switch statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ SwitchStatement(const ast::SwitchStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~SwitchStatement() override;
- /// Destructor
- ~SwitchStatement() override;
+ /// @return the AST node for this statement
+ const ast::SwitchStatement* Declaration() const;
- /// @return the AST node for this statement
- const ast::SwitchStatement* Declaration() const;
+ /// @returns the case statements for this switch
+ std::vector<const CaseStatement*>& Cases() { return cases_; }
+
+ /// @returns the case statements for this switch
+ const std::vector<const CaseStatement*>& Cases() const { return cases_; }
+
+ private:
+ std::vector<const CaseStatement*> cases_;
};
/// Holds semantic information about a switch case statement
class CaseStatement final : public Castable<CaseStatement, CompoundStatement> {
- public:
- /// Constructor
- /// @param declaration the AST node for this case statement
- /// @param parent the owning statement
- /// @param function the owning function
- CaseStatement(const ast::CaseStatement* declaration,
- const CompoundStatement* parent,
- const sem::Function* function);
+ public:
+ /// Constructor
+ /// @param declaration the AST node for this case statement
+ /// @param parent the owning statement
+ /// @param function the owning function
+ CaseStatement(const ast::CaseStatement* declaration,
+ const CompoundStatement* parent,
+ const sem::Function* function);
+
+ /// Destructor
+ ~CaseStatement() override;
+
+ /// @return the AST node for this statement
+ const ast::CaseStatement* Declaration() const;
- /// Destructor
- ~CaseStatement() override;
+ /// @param body the case body block statement
+ void SetBlock(const BlockStatement* body) { body_ = body; }
- /// @return the AST node for this statement
- const ast::CaseStatement* Declaration() const;
+ /// @returns the case body block statement
+ const BlockStatement* Body() const { return body_; }
- /// @param body the case body block statement
- void SetBlock(const BlockStatement* body) { body_ = body; }
+ /// @returns the selectors for the case
+ std::vector<const Expression*>& Selectors() { return selectors_; }
- /// @returns the case body block statement
- const BlockStatement* Body() const { return body_; }
+ /// @returns the selectors for the case
+ const std::vector<const Expression*>& Selectors() const { return selectors_; }
- private:
- const BlockStatement* body_ = nullptr;
+ private:
+ const BlockStatement* body_ = nullptr;
+ std::vector<const Expression*> selectors_;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/test_helper.h b/chromium/third_party/dawn/src/tint/sem/test_helper.h
index 412e36a61c8..e1b4eb33457 100644
--- a/chromium/third_party/dawn/src/tint/sem/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/sem/test_helper.h
@@ -25,17 +25,17 @@ namespace tint::sem {
/// Helper class for testing
template <typename BASE>
class TestHelperBase : public BASE, public ProgramBuilder {
- public:
- /// Builds and returns the program. Must only be called once per test
- /// @return the built program
- Program Build() {
- diag::Formatter formatter;
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << formatter.format(Diagnostics());
- }();
- return Program(std::move(*this));
- }
+ public:
+ /// Builds and returns the program. Must only be called once per test
+ /// @return the built program
+ Program Build() {
+ diag::Formatter formatter;
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << formatter.format(Diagnostics());
+ }();
+ return Program(std::move(*this));
+ }
};
using TestHelper = TestHelperBase<testing::Test>;
@@ -44,4 +44,16 @@ using TestParamHelper = TestHelperBase<testing::TestWithParam<T>>;
} // namespace tint::sem
+/// Helper macro for testing that a semantic type was as expected
+#define EXPECT_TYPE(GOT, EXPECT) \
+ do { \
+ const sem::Type* got = GOT; \
+ const sem::Type* expect = EXPECT; \
+ if (got != expect) { \
+ ADD_FAILURE() << #GOT " != " #EXPECT "\n" \
+ << " " #GOT ": " << FriendlyName(got) << "\n" \
+ << " " #EXPECT ": " << FriendlyName(expect); \
+ } \
+ } while (false)
+
#endif // SRC_TINT_SEM_TEST_HELPER_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/texture_type.cc b/chromium/third_party/dawn/src/tint/sem/texture.cc
index e14b606c84c..02fc918fe25 100644
--- a/chromium/third_party/dawn/src/tint/sem/texture_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/texture.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::Texture);
diff --git a/chromium/third_party/dawn/src/tint/sem/texture_type.h b/chromium/third_party/dawn/src/tint/sem/texture.h
index 9aa333065b8..67dd026d2cf 100644
--- a/chromium/third_party/dawn/src/tint/sem/texture_type.h
+++ b/chromium/third_party/dawn/src/tint/sem/texture.h
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef SRC_TINT_SEM_TEXTURE_TYPE_H_
-#define SRC_TINT_SEM_TEXTURE_TYPE_H_
+#ifndef SRC_TINT_SEM_TEXTURE_H_
+#define SRC_TINT_SEM_TEXTURE_H_
#include "src/tint/ast/texture.h"
#include "src/tint/sem/type.h"
@@ -22,21 +22,21 @@ namespace tint::sem {
/// A texture type.
class Texture : public Castable<Texture, Type> {
- public:
- /// Constructor
- /// @param dim the dimensionality of the texture
- explicit Texture(ast::TextureDimension dim);
- /// Move constructor
- Texture(Texture&&);
- ~Texture() override;
-
- /// @returns the texture dimension
- ast::TextureDimension dim() const { return dim_; }
-
- private:
- ast::TextureDimension const dim_;
+ public:
+ /// Constructor
+ /// @param dim the dimensionality of the texture
+ explicit Texture(ast::TextureDimension dim);
+ /// Move constructor
+ Texture(Texture&&);
+ ~Texture() override;
+
+ /// @returns the texture dimension
+ ast::TextureDimension dim() const { return dim_; }
+
+ private:
+ ast::TextureDimension const dim_;
};
} // namespace tint::sem
-#endif // SRC_TINT_SEM_TEXTURE_TYPE_H_
+#endif // SRC_TINT_SEM_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/texture_type_test.cc b/chromium/third_party/dawn/src/tint/sem/texture_test.cc
index 8737415d928..c948c32e251 100644
--- a/chromium/third_party/dawn/src/tint/sem/texture_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/texture_test.cc
@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/test_helper.h"
namespace tint::sem {
@@ -23,12 +23,12 @@ namespace {
using TextureTypeDimTest = TestParamHelper<ast::TextureDimension>;
TEST_P(TextureTypeDimTest, DimMustMatch) {
- // Check that the dim() query returns the right dimensionality.
- F32 f32;
- // TextureType is an abstract class, so use concrete class
- // SampledTexture in its stead.
- SampledTexture st(GetParam(), &f32);
- EXPECT_EQ(st.dim(), GetParam());
+ // Check that the dim() query returns the right dimensionality.
+ F32 f32;
+ // TextureType is an abstract class, so use concrete class
+ // SampledTexture in its stead.
+ SampledTexture st(GetParam(), &f32);
+ EXPECT_EQ(st.dim(), GetParam());
}
INSTANTIATE_TEST_SUITE_P(Dimensions,
diff --git a/chromium/third_party/dawn/src/tint/sem/type.cc b/chromium/third_party/dawn/src/tint/sem/type.cc
index f8c32e7ad13..40666e33412 100644
--- a/chromium/third_party/dawn/src/tint/sem/type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/type.cc
@@ -14,16 +14,20 @@
#include "src/tint/sem/type.h"
-#include "src/tint/sem/bool_type.h"
-#include "src/tint/sem/f32_type.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/matrix_type.h"
-#include "src/tint/sem/pointer_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/texture_type.h"
-#include "src/tint/sem/u32_type.h"
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
+#include "src/tint/sem/array.h"
+#include "src/tint/sem/bool.h"
+#include "src/tint/sem/f16.h"
+#include "src/tint/sem/f32.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/matrix.h"
+#include "src/tint/sem/pointer.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/texture.h"
+#include "src/tint/sem/u32.h"
+#include "src/tint/sem/vector.h"
TINT_INSTANTIATE_TYPEINFO(tint::sem::Type);
@@ -36,121 +40,218 @@ Type::Type(Type&&) = default;
Type::~Type() = default;
const Type* Type::UnwrapPtr() const {
- auto* type = this;
- while (auto* ptr = type->As<sem::Pointer>()) {
- type = ptr->StoreType();
- }
- return type;
+ auto* type = this;
+ while (auto* ptr = type->As<sem::Pointer>()) {
+ type = ptr->StoreType();
+ }
+ return type;
}
const Type* Type::UnwrapRef() const {
- auto* type = this;
- if (auto* ref = type->As<sem::Reference>()) {
- type = ref->StoreType();
- }
- return type;
+ auto* type = this;
+ if (auto* ref = type->As<sem::Reference>()) {
+ type = ref->StoreType();
+ }
+ return type;
}
uint32_t Type::Size() const {
- return 0;
+ return 0;
}
uint32_t Type::Align() const {
- return 0;
+ return 0;
}
bool Type::IsConstructible() const {
- return false;
+ return false;
}
bool Type::is_scalar() const {
- return IsAnyOf<F32, U32, I32, Bool>();
+ return IsAnyOf<F16, F32, U32, I32, Bool>();
+}
+
+bool Type::is_abstract_or_scalar() const {
+ return IsAnyOf<F16, F32, U32, I32, Bool, AbstractNumeric>();
}
bool Type::is_numeric_scalar() const {
- return IsAnyOf<F32, U32, I32>();
+ return IsAnyOf<F16, F32, U32, I32>();
}
bool Type::is_float_scalar() const {
- return Is<F32>();
+ return IsAnyOf<F16, F32>();
}
bool Type::is_float_matrix() const {
- return Is([](const Matrix* m) { return m->type()->is_float_scalar(); });
+ return Is([](const Matrix* m) { return m->type()->is_float_scalar(); });
}
bool Type::is_square_float_matrix() const {
- return Is([](const Matrix* m) {
- return m->type()->is_float_scalar() && m->rows() == m->columns();
- });
+ return Is(
+ [](const Matrix* m) { return m->type()->is_float_scalar() && m->rows() == m->columns(); });
}
bool Type::is_float_vector() const {
- return Is([](const Vector* v) { return v->type()->is_float_scalar(); });
+ return Is([](const Vector* v) { return v->type()->is_float_scalar(); });
}
bool Type::is_float_scalar_or_vector() const {
- return is_float_scalar() || is_float_vector();
+ return is_float_scalar() || is_float_vector();
}
bool Type::is_float_scalar_or_vector_or_matrix() const {
- return is_float_scalar() || is_float_vector() || is_float_matrix();
+ return is_float_scalar() || is_float_vector() || is_float_matrix();
}
bool Type::is_integer_scalar() const {
- return IsAnyOf<U32, I32>();
+ return IsAnyOf<U32, I32>();
}
bool Type::is_signed_integer_scalar() const {
- return Is<I32>();
+ return Is<I32>();
}
bool Type::is_unsigned_integer_scalar() const {
- return Is<U32>();
+ return Is<U32>();
}
bool Type::is_signed_integer_vector() const {
- return Is([](const Vector* v) { return v->type()->Is<I32>(); });
+ return Is([](const Vector* v) { return v->type()->Is<I32>(); });
}
bool Type::is_unsigned_integer_vector() const {
- return Is([](const Vector* v) { return v->type()->Is<U32>(); });
+ return Is([](const Vector* v) { return v->type()->Is<U32>(); });
}
bool Type::is_unsigned_scalar_or_vector() const {
- return Is<U32>() || is_unsigned_integer_vector();
+ return Is<U32>() || is_unsigned_integer_vector();
}
bool Type::is_signed_scalar_or_vector() const {
- return Is<I32>() || is_signed_integer_vector();
+ return Is<I32>() || is_signed_integer_vector();
}
bool Type::is_integer_scalar_or_vector() const {
- return is_unsigned_scalar_or_vector() || is_signed_scalar_or_vector();
+ return is_unsigned_scalar_or_vector() || is_signed_scalar_or_vector();
}
bool Type::is_bool_vector() const {
- return Is([](const Vector* v) { return v->type()->Is<Bool>(); });
+ return Is([](const Vector* v) { return v->type()->Is<Bool>(); });
}
bool Type::is_bool_scalar_or_vector() const {
- return Is<Bool>() || is_bool_vector();
+ return Is<Bool>() || is_bool_vector();
}
bool Type::is_numeric_vector() const {
- return Is([](const Vector* v) { return v->type()->is_numeric_scalar(); });
+ return Is([](const Vector* v) { return v->type()->is_numeric_scalar(); });
}
bool Type::is_scalar_vector() const {
- return Is([](const Vector* v) { return v->type()->is_scalar(); });
+ return Is([](const Vector* v) { return v->type()->is_scalar(); });
}
bool Type::is_numeric_scalar_or_vector() const {
- return is_numeric_scalar() || is_numeric_vector();
+ return is_numeric_scalar() || is_numeric_vector();
}
bool Type::is_handle() const {
- return IsAnyOf<Sampler, Texture>();
+ return IsAnyOf<Sampler, Texture>();
+}
+
+uint32_t Type::ConversionRank(const Type* from, const Type* to) {
+ if (from->UnwrapRef() == to) {
+ return 0;
+ }
+ return Switch(
+ from,
+ [&](const AbstractFloat*) {
+ return Switch(
+ to, //
+ [&](const F32*) { return 1; }, //
+ [&](const F16*) { return 2; }, //
+ [&](Default) { return kNoConversion; });
+ },
+ [&](const AbstractInt*) {
+ return Switch(
+ to, //
+ [&](const I32*) { return 3; }, //
+ [&](const U32*) { return 4; }, //
+ [&](const AbstractFloat*) { return 5; }, //
+ [&](const F32*) { return 6; }, //
+ [&](const F16*) { return 7; }, //
+ [&](Default) { return kNoConversion; });
+ },
+ [&](const Vector* from_vec) {
+ if (auto* to_vec = to->As<Vector>()) {
+ if (from_vec->Width() == to_vec->Width()) {
+ return ConversionRank(from_vec->type(), to_vec->type());
+ }
+ }
+ return kNoConversion;
+ },
+ [&](const Matrix* from_mat) {
+ if (auto* to_mat = to->As<Matrix>()) {
+ if (from_mat->columns() == to_mat->columns() &&
+ from_mat->rows() == to_mat->rows()) {
+ return ConversionRank(from_mat->type(), to_mat->type());
+ }
+ }
+ return kNoConversion;
+ },
+ [&](Default) { return kNoConversion; });
+}
+
+const Type* Type::ElementOf(const Type* ty, uint32_t* count /* = nullptr */) {
+ if (ty->is_abstract_or_scalar()) {
+ if (count) {
+ *count = 1;
+ }
+ return ty;
+ }
+ return Switch(
+ ty, //
+ [&](const Vector* v) {
+ if (count) {
+ *count = v->Width();
+ }
+ return v->type();
+ },
+ [&](const Matrix* m) {
+ if (count) {
+ *count = m->columns() * m->rows();
+ }
+ return m->type();
+ },
+ [&](const Array* a) {
+ if (count) {
+ *count = a->Count();
+ }
+ return a->ElemType();
+ });
+}
+
+const sem::Type* Type::Common(Type const* const* types, size_t count) {
+ if (count == 0) {
+ return nullptr;
+ }
+ const auto* common = types[0];
+ for (size_t i = 1; i < count; i++) {
+ auto* ty = types[i];
+ if (ty == common) {
+ continue; // ty == common
+ }
+ if (sem::Type::ConversionRank(ty, common) != sem::Type::kNoConversion) {
+ continue; // ty can be converted to common.
+ }
+ if (sem::Type::ConversionRank(common, ty) != sem::Type::kNoConversion) {
+ common = ty; // common can be converted to ty.
+ continue;
+ }
+ return nullptr; // Conversion is not valid.
+ }
+ return common;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/type.h b/chromium/third_party/dawn/src/tint/sem/type.h
index c93c4e62567..99876375f8b 100644
--- a/chromium/third_party/dawn/src/tint/sem/type.h
+++ b/chromium/third_party/dawn/src/tint/sem/type.h
@@ -33,89 +33,125 @@ enum class MemoryLayout { kUniformBuffer, kStorageBuffer };
/// Base class for a type in the system
class Type : public Castable<Type, Node> {
- public:
- /// Move constructor
- Type(Type&&);
- ~Type() override;
-
- /// @returns a hash of the type.
- virtual size_t Hash() const = 0;
-
- /// @returns true if the this type is equal to the given type
- virtual bool Equals(const Type&) const = 0;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- virtual std::string FriendlyName(const SymbolTable& symbols) const = 0;
-
- /// @returns the inner most pointee type if this is a pointer, `this`
- /// otherwise
- const Type* UnwrapPtr() const;
-
- /// @returns the inner type if this is a reference, `this` otherwise
- const Type* UnwrapRef() const;
-
- /// @returns the size in bytes of the type. This may include tail padding.
- /// @note opaque types will return a size of 0.
- virtual uint32_t Size() const;
-
- /// @returns the alignment in bytes of the type. This may include tail
- /// padding.
- /// @note opaque types will return a size of 0.
- virtual uint32_t Align() const;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- virtual bool IsConstructible() const;
-
- /// @returns true if this type is a scalar
- bool is_scalar() const;
- /// @returns true if this type is a numeric scalar
- bool is_numeric_scalar() const;
- /// @returns true if this type is a float scalar
- bool is_float_scalar() const;
- /// @returns true if this type is a float matrix
- bool is_float_matrix() const;
- /// @returns true if this type is a square float matrix
- bool is_square_float_matrix() const;
- /// @returns true if this type is a float vector
- bool is_float_vector() const;
- /// @returns true if this type is a float scalar or vector
- bool is_float_scalar_or_vector() const;
- /// @returns true if this type is a float scalar or vector or matrix
- bool is_float_scalar_or_vector_or_matrix() const;
- /// @returns true if this type is an integer scalar
- bool is_integer_scalar() const;
- /// @returns true if this type is a signed integer scalar
- bool is_signed_integer_scalar() const;
- /// @returns true if this type is an unsigned integer scalar
- bool is_unsigned_integer_scalar() const;
- /// @returns true if this type is a signed integer vector
- bool is_signed_integer_vector() const;
- /// @returns true if this type is an unsigned vector
- bool is_unsigned_integer_vector() const;
- /// @returns true if this type is an unsigned scalar or vector
- bool is_unsigned_scalar_or_vector() const;
- /// @returns true if this type is a signed scalar or vector
- bool is_signed_scalar_or_vector() const;
- /// @returns true if this type is an integer scalar or vector
- bool is_integer_scalar_or_vector() const;
- /// @returns true if this type is a boolean vector
- bool is_bool_vector() const;
- /// @returns true if this type is boolean scalar or vector
- bool is_bool_scalar_or_vector() const;
- /// @returns true if this type is a numeric vector
- bool is_numeric_vector() const;
- /// @returns true if this type is a vector of scalar type
- bool is_scalar_vector() const;
- /// @returns true if this type is a numeric scale or vector
- bool is_numeric_scalar_or_vector() const;
- /// @returns true if this type is a handle type
- bool is_handle() const;
-
- protected:
- Type();
+ public:
+ /// Move constructor
+ Type(Type&&);
+ ~Type() override;
+
+ /// @returns a hash of the type.
+ virtual size_t Hash() const = 0;
+
+ /// @returns true if the this type is equal to the given type
+ virtual bool Equals(const Type&) const = 0;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ virtual std::string FriendlyName(const SymbolTable& symbols) const = 0;
+
+ /// @returns the inner most pointee type if this is a pointer, `this`
+ /// otherwise
+ const Type* UnwrapPtr() const;
+
+ /// @returns the inner type if this is a reference, `this` otherwise
+ const Type* UnwrapRef() const;
+
+ /// @returns the size in bytes of the type. This may include tail padding.
+ /// @note opaque types will return a size of 0.
+ virtual uint32_t Size() const;
+
+ /// @returns the alignment in bytes of the type. This may include tail
+ /// padding.
+ /// @note opaque types will return a size of 0.
+ virtual uint32_t Align() const;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ virtual bool IsConstructible() const;
+
+ /// @returns true if this type is a scalar
+ bool is_scalar() const;
+ /// @returns true if this type is a scalar or an abstract numeric
+ bool is_abstract_or_scalar() const;
+ /// @returns true if this type is a numeric scalar
+ bool is_numeric_scalar() const;
+ /// @returns true if this type is a float scalar
+ bool is_float_scalar() const;
+ /// @returns true if this type is a float matrix
+ bool is_float_matrix() const;
+ /// @returns true if this type is a square float matrix
+ bool is_square_float_matrix() const;
+ /// @returns true if this type is a float vector
+ bool is_float_vector() const;
+ /// @returns true if this type is a float scalar or vector
+ bool is_float_scalar_or_vector() const;
+ /// @returns true if this type is a float scalar or vector or matrix
+ bool is_float_scalar_or_vector_or_matrix() const;
+ /// @returns true if this type is an integer scalar
+ bool is_integer_scalar() const;
+ /// @returns true if this type is a signed integer scalar
+ bool is_signed_integer_scalar() const;
+ /// @returns true if this type is an unsigned integer scalar
+ bool is_unsigned_integer_scalar() const;
+ /// @returns true if this type is a signed integer vector
+ bool is_signed_integer_vector() const;
+ /// @returns true if this type is an unsigned vector
+ bool is_unsigned_integer_vector() const;
+ /// @returns true if this type is an unsigned scalar or vector
+ bool is_unsigned_scalar_or_vector() const;
+ /// @returns true if this type is a signed scalar or vector
+ bool is_signed_scalar_or_vector() const;
+ /// @returns true if this type is an integer scalar or vector
+ bool is_integer_scalar_or_vector() const;
+ /// @returns true if this type is a boolean vector
+ bool is_bool_vector() const;
+ /// @returns true if this type is boolean scalar or vector
+ bool is_bool_scalar_or_vector() const;
+ /// @returns true if this type is a numeric vector
+ bool is_numeric_vector() const;
+ /// @returns true if this type is a vector of scalar type
+ bool is_scalar_vector() const;
+ /// @returns true if this type is a numeric scale or vector
+ bool is_numeric_scalar_or_vector() const;
+ /// @returns true if this type is a handle type
+ bool is_handle() const;
+
+ /// kNoConversion is returned from ConversionRank() when the implicit conversion is not
+ /// permitted.
+ static constexpr uint32_t kNoConversion = 0xffffffffu;
+
+ /// ConversionRank returns the implicit conversion rank when attempting to convert `from` to
+ /// `to`. Lower ranks are preferred over higher ranks.
+ /// @param from the source type
+ /// @param to the destination type
+ /// @returns the rank value for converting from type `from` to type `to`, or #kNoConversion if
+ /// the implicit conversion is not allowed.
+ /// @see https://www.w3.org/TR/WGSL/#conversion-rank
+ static uint32_t ConversionRank(const Type* from, const Type* to);
+
+ /// @param ty the type to obtain the element type from
+ /// @param count if not null, then this is assigned the number of elements in the type
+ /// @returns `ty` if `ty` is an abstract or scalar, the element type if ty is a vector, matrix
+ /// or array, otherwise nullptr.
+ static const Type* ElementOf(const Type* ty, uint32_t* count = nullptr);
+
+ /// @param types a pointer to a list of `const Type*`.
+ /// @param count the number of types in `types`.
+ /// @returns the lowest-ranking type that all types in `types` can be implicitly converted to,
+ /// or nullptr if there is no consistent common type across all types in `types`.
+ /// @see https://www.w3.org/TR/WGSL/#conversion-rank
+ static const sem::Type* Common(Type const* const* types, size_t count);
+
+ /// @param types an initializer_list of `const Type*`.
+ /// @returns the lowest-ranking type that all types in `types` can be implicitly converted to,
+ /// or nullptr if there is no consistent common type across all types in `types`.
+ /// @see https://www.w3.org/TR/WGSL/#conversion-rank
+ static const sem::Type* Common(std::initializer_list<const Type*> types) {
+ return Common(types.begin(), types.size());
+ }
+
+ protected:
+ Type();
};
} // namespace tint::sem
@@ -125,20 +161,20 @@ namespace std {
/// std::hash specialization for tint::sem::Type
template <>
struct hash<tint::sem::Type> {
- /// @param type the type to obtain a hash from
- /// @returns the hash of the semantic type
- size_t operator()(const tint::sem::Type& type) const { return type.Hash(); }
+ /// @param type the type to obtain a hash from
+ /// @returns the hash of the semantic type
+ size_t operator()(const tint::sem::Type& type) const { return type.Hash(); }
};
/// std::equal_to specialization for tint::sem::Type
template <>
struct equal_to<tint::sem::Type> {
- /// @param a the first type to compare
- /// @param b the second type to compare
- /// @returns true if the two types are equal
- bool operator()(const tint::sem::Type& a, const tint::sem::Type& b) const {
- return a.Equals(b);
- }
+ /// @param a the first type to compare
+ /// @param b the second type to compare
+ /// @returns true if the two types are equal
+ bool operator()(const tint::sem::Type& a, const tint::sem::Type& b) const {
+ return a.Equals(b);
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/type_constructor.cc b/chromium/third_party/dawn/src/tint/sem/type_constructor.cc
index 4213edbaf26..34f6e2a71f4 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_constructor.cc
+++ b/chromium/third_party/dawn/src/tint/sem/type_constructor.cc
@@ -18,8 +18,7 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::TypeConstructor);
namespace tint::sem {
-TypeConstructor::TypeConstructor(const sem::Type* type,
- const ParameterList& parameters)
+TypeConstructor::TypeConstructor(const sem::Type* type, const ParameterList& parameters)
: Base(type, parameters) {}
TypeConstructor::~TypeConstructor() = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/type_constructor.h b/chromium/third_party/dawn/src/tint/sem/type_constructor.h
index d9ff7a1d4c1..f3d4221550b 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_constructor.h
+++ b/chromium/third_party/dawn/src/tint/sem/type_constructor.h
@@ -21,14 +21,14 @@ namespace tint::sem {
/// TypeConstructor is the CallTarget for a type constructor.
class TypeConstructor final : public Castable<TypeConstructor, CallTarget> {
- public:
- /// Constructor
- /// @param type the type that's being constructed
- /// @param parameters the type constructor parameters
- TypeConstructor(const sem::Type* type, const ParameterList& parameters);
+ public:
+ /// Constructor
+ /// @param type the type that's being constructed
+ /// @param parameters the type constructor parameters
+ TypeConstructor(const sem::Type* type, const ParameterList& parameters);
- /// Destructor
- ~TypeConstructor() override;
+ /// Destructor
+ ~TypeConstructor() override;
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/type_conversion.cc b/chromium/third_party/dawn/src/tint/sem/type_conversion.cc
index 47a4a71fdad..5da2928582e 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_conversion.cc
+++ b/chromium/third_party/dawn/src/tint/sem/type_conversion.cc
@@ -18,8 +18,7 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::TypeConversion);
namespace tint::sem {
-TypeConversion::TypeConversion(const sem::Type* type,
- const sem::Parameter* parameter)
+TypeConversion::TypeConversion(const sem::Type* type, const sem::Parameter* parameter)
: Base(type, ParameterList{parameter}) {}
TypeConversion::~TypeConversion() = default;
diff --git a/chromium/third_party/dawn/src/tint/sem/type_conversion.h b/chromium/third_party/dawn/src/tint/sem/type_conversion.h
index 5433641936b..e40056508e7 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_conversion.h
+++ b/chromium/third_party/dawn/src/tint/sem/type_conversion.h
@@ -21,20 +21,20 @@ namespace tint::sem {
/// TypeConversion is the CallTarget for a type conversion (cast).
class TypeConversion final : public Castable<TypeConversion, CallTarget> {
- public:
- /// Constructor
- /// @param type the target type of the cast
- /// @param parameter the type cast parameter
- TypeConversion(const sem::Type* type, const sem::Parameter* parameter);
+ public:
+ /// Constructor
+ /// @param type the target type of the cast
+ /// @param parameter the type cast parameter
+ TypeConversion(const sem::Type* type, const sem::Parameter* parameter);
- /// Destructor
- ~TypeConversion() override;
+ /// Destructor
+ ~TypeConversion() override;
- /// @returns the cast source type
- const sem::Type* Source() const { return Parameters()[0]->Type(); }
+ /// @returns the cast source type
+ const sem::Type* Source() const { return Parameters()[0]->Type(); }
- /// @returns the cast target type
- const sem::Type* Target() const { return ReturnType(); }
+ /// @returns the cast target type
+ const sem::Type* Target() const { return ReturnType(); }
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/type_manager.h b/chromium/third_party/dawn/src/tint/sem/type_manager.h
index 6d68af0d6bd..fb086896742 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_manager.h
+++ b/chromium/third_party/dawn/src/tint/sem/type_manager.h
@@ -26,43 +26,43 @@ namespace tint::sem {
/// The type manager holds all the pointers to the known types.
class Manager final : public utils::UniqueAllocator<Type> {
- public:
- /// Iterator is the type returned by begin() and end()
- using Iterator = utils::BlockAllocator<Type>::ConstIterator;
+ public:
+ /// Iterator is the type returned by begin() and end()
+ using Iterator = utils::BlockAllocator<Type>::ConstIterator;
- /// Constructor
- Manager();
+ /// Constructor
+ Manager();
- /// Move constructor
- Manager(Manager&&);
+ /// Move constructor
+ Manager(Manager&&);
- /// Move assignment operator
- /// @param rhs the Manager to move
- /// @return this Manager
- Manager& operator=(Manager&& rhs);
+ /// Move assignment operator
+ /// @param rhs the Manager to move
+ /// @return this Manager
+ Manager& operator=(Manager&& rhs);
- /// Destructor
- ~Manager();
+ /// Destructor
+ ~Manager();
- /// Wrap returns a new Manager created with the types of `inner`.
- /// The Manager returned by Wrap is intended to temporarily extend the types
- /// of an existing immutable Manager.
- /// As the copied types are owned by `inner`, `inner` must not be destructed
- /// or assigned while using the returned Manager.
- /// TODO(bclayton) - Evaluate whether there are safer alternatives to this
- /// function. See crbug.com/tint/460.
- /// @param inner the immutable Manager to extend
- /// @return the Manager that wraps `inner`
- static Manager Wrap(const Manager& inner) {
- Manager out;
- out.items = inner.items;
- return out;
- }
+ /// Wrap returns a new Manager created with the types of `inner`.
+ /// The Manager returned by Wrap is intended to temporarily extend the types
+ /// of an existing immutable Manager.
+ /// As the copied types are owned by `inner`, `inner` must not be destructed
+ /// or assigned while using the returned Manager.
+ /// TODO(bclayton) - Evaluate whether there are safer alternatives to this
+ /// function. See crbug.com/tint/460.
+ /// @param inner the immutable Manager to extend
+ /// @return the Manager that wraps `inner`
+ static Manager Wrap(const Manager& inner) {
+ Manager out;
+ out.items = inner.items;
+ return out;
+ }
- /// @returns an iterator to the beginning of the types
- Iterator begin() const { return allocator.Objects().begin(); }
- /// @returns an iterator to the end of the types
- Iterator end() const { return allocator.Objects().end(); }
+ /// @returns an iterator to the beginning of the types
+ Iterator begin() const { return allocator.Objects().begin(); }
+ /// @returns an iterator to the end of the types
+ Iterator end() const { return allocator.Objects().end(); }
};
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/type_manager_test.cc b/chromium/third_party/dawn/src/tint/sem/type_manager_test.cc
index 6cbea9be924..c670db00ca6 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_manager_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/type_manager_test.cc
@@ -15,66 +15,66 @@
#include "src/tint/sem/type_manager.h"
#include "gtest/gtest.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/u32_type.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/u32.h"
namespace tint::sem {
namespace {
template <typename T>
size_t count(const T& range_loopable) {
- size_t n = 0;
- for (auto it : range_loopable) {
- (void)it;
- n++;
- }
- return n;
+ size_t n = 0;
+ for (auto it : range_loopable) {
+ (void)it;
+ n++;
+ }
+ return n;
}
using TypeManagerTest = testing::Test;
TEST_F(TypeManagerTest, GetUnregistered) {
- Manager tm;
- auto* t = tm.Get<I32>();
- ASSERT_NE(t, nullptr);
- EXPECT_TRUE(t->Is<I32>());
+ Manager tm;
+ auto* t = tm.Get<I32>();
+ ASSERT_NE(t, nullptr);
+ EXPECT_TRUE(t->Is<I32>());
}
TEST_F(TypeManagerTest, GetSameTypeReturnsSamePtr) {
- Manager tm;
- auto* t = tm.Get<I32>();
- ASSERT_NE(t, nullptr);
- EXPECT_TRUE(t->Is<I32>());
+ Manager tm;
+ auto* t = tm.Get<I32>();
+ ASSERT_NE(t, nullptr);
+ EXPECT_TRUE(t->Is<I32>());
- auto* t2 = tm.Get<I32>();
- EXPECT_EQ(t, t2);
+ auto* t2 = tm.Get<I32>();
+ EXPECT_EQ(t, t2);
}
TEST_F(TypeManagerTest, GetDifferentTypeReturnsDifferentPtr) {
- Manager tm;
- Type* t = tm.Get<I32>();
- ASSERT_NE(t, nullptr);
- EXPECT_TRUE(t->Is<I32>());
-
- Type* t2 = tm.Get<U32>();
- ASSERT_NE(t2, nullptr);
- EXPECT_NE(t, t2);
- EXPECT_TRUE(t2->Is<U32>());
+ Manager tm;
+ Type* t = tm.Get<I32>();
+ ASSERT_NE(t, nullptr);
+ EXPECT_TRUE(t->Is<I32>());
+
+ Type* t2 = tm.Get<U32>();
+ ASSERT_NE(t2, nullptr);
+ EXPECT_NE(t, t2);
+ EXPECT_TRUE(t2->Is<U32>());
}
TEST_F(TypeManagerTest, WrapDoesntAffectInner) {
- Manager inner;
- Manager outer = Manager::Wrap(inner);
+ Manager inner;
+ Manager outer = Manager::Wrap(inner);
- inner.Get<I32>();
+ inner.Get<I32>();
- EXPECT_EQ(count(inner), 1u);
- EXPECT_EQ(count(outer), 0u);
+ EXPECT_EQ(count(inner), 1u);
+ EXPECT_EQ(count(outer), 0u);
- outer.Get<U32>();
+ outer.Get<U32>();
- EXPECT_EQ(count(inner), 1u);
- EXPECT_EQ(count(outer), 1u);
+ EXPECT_EQ(count(inner), 1u);
+ EXPECT_EQ(count(outer), 1u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/type_mappings.h b/chromium/third_party/dawn/src/tint/sem/type_mappings.h
index 1bbc46f5f11..0e54eed7599 100644
--- a/chromium/third_party/dawn/src/tint/sem/type_mappings.h
+++ b/chromium/third_party/dawn/src/tint/sem/type_mappings.h
@@ -22,7 +22,6 @@ namespace tint::ast {
class Array;
class CallExpression;
class Expression;
-class ElseStatement;
class ForLoopStatement;
class Function;
class IfStatement;
@@ -31,6 +30,7 @@ class Node;
class Statement;
class Struct;
class StructMember;
+class SwitchStatement;
class Type;
class TypeDecl;
class Variable;
@@ -39,7 +39,6 @@ namespace tint::sem {
class Array;
class Call;
class Expression;
-class ElseStatement;
class ForLoopStatement;
class Function;
class IfStatement;
@@ -48,6 +47,7 @@ class Node;
class Statement;
class Struct;
class StructMember;
+class SwitchStatement;
class Type;
class Variable;
} // namespace tint::sem
@@ -59,30 +59,29 @@ namespace tint::sem {
/// corresponding semantic node types. The standard operator overload resolving
/// rules will be used to infer the return type based on the argument type.
struct TypeMappings {
- //! @cond Doxygen_Suppress
- Array* operator()(ast::Array*);
- Call* operator()(ast::CallExpression*);
- Expression* operator()(ast::Expression*);
- ElseStatement* operator()(ast::ElseStatement*);
- ForLoopStatement* operator()(ast::ForLoopStatement*);
- Function* operator()(ast::Function*);
- IfStatement* operator()(ast::IfStatement*);
- MemberAccessorExpression* operator()(ast::MemberAccessorExpression*);
- Node* operator()(ast::Node*);
- Statement* operator()(ast::Statement*);
- Struct* operator()(ast::Struct*);
- StructMember* operator()(ast::StructMember*);
- Type* operator()(ast::Type*);
- Type* operator()(ast::TypeDecl*);
- Variable* operator()(ast::Variable*);
- //! @endcond
+ //! @cond Doxygen_Suppress
+ Array* operator()(ast::Array*);
+ Expression* operator()(ast::Expression*);
+ ForLoopStatement* operator()(ast::ForLoopStatement*);
+ Function* operator()(ast::Function*);
+ IfStatement* operator()(ast::IfStatement*);
+ MemberAccessorExpression* operator()(ast::MemberAccessorExpression*);
+ Node* operator()(ast::Node*);
+ Statement* operator()(ast::Statement*);
+ Struct* operator()(ast::Struct*);
+ StructMember* operator()(ast::StructMember*);
+ SwitchStatement* operator()(ast::SwitchStatement*);
+ Type* operator()(ast::Type*);
+ Type* operator()(ast::TypeDecl*);
+ Variable* operator()(ast::Variable*);
+ //! @endcond
};
/// SemanticNodeTypeFor resolves to the appropriate sem::Node type for the
/// AST or type node `AST_OR_TYPE`.
template <typename AST_OR_TYPE>
-using SemanticNodeTypeFor = typename std::remove_pointer<decltype(
- TypeMappings()(std::declval<AST_OR_TYPE*>()))>::type;
+using SemanticNodeTypeFor =
+ typename std::remove_pointer<decltype(TypeMappings()(std::declval<AST_OR_TYPE*>()))>::type;
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/type_test.cc b/chromium/third_party/dawn/src/tint/sem/type_test.cc
new file mode 100644
index 00000000000..c11efea9f28
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/type_test.cc
@@ -0,0 +1,393 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/abstract_float.h"
+#include "src/tint/sem/abstract_int.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/test_helper.h"
+
+namespace tint::sem {
+namespace {
+
+using TypeTest = TestHelper;
+
+TEST_F(TypeTest, ConversionRank) {
+ auto* af = create<AbstractFloat>();
+ auto* ai = create<AbstractInt>();
+ auto* f32 = create<F32>();
+ auto* f16 = create<F16>();
+ auto* i32 = create<I32>();
+ auto* u32 = create<U32>();
+ auto* vec3_f32 = create<Vector>(f32, 3u);
+ auto* vec3_f16 = create<Vector>(f16, 3u);
+ auto* vec4_f32 = create<Vector>(f32, 4u);
+ auto* vec3_u32 = create<Vector>(u32, 3u);
+ auto* vec3_i32 = create<Vector>(i32, 3u);
+ auto* vec3_af = create<Vector>(af, 3u);
+ auto* vec3_ai = create<Vector>(ai, 3u);
+ auto* mat3x4_f32 = create<Matrix>(vec4_f32, 3u);
+ auto* mat4x3_f32 = create<Matrix>(vec3_f32, 4u);
+ auto* mat4x3_f16 = create<Matrix>(vec3_f16, 4u);
+ auto* mat4x3_af = create<Matrix>(vec3_af, 4u);
+ auto* ref_u32 = create<Reference>(u32, ast::StorageClass::kPrivate, ast::Access::kReadWrite);
+
+ EXPECT_EQ(Type::ConversionRank(i32, i32), 0u);
+ EXPECT_EQ(Type::ConversionRank(f32, f32), 0u);
+ EXPECT_EQ(Type::ConversionRank(u32, u32), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_f32, vec3_f32), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_f16, vec3_f16), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec4_f32, vec4_f32), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_u32, vec3_u32), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_i32, vec3_i32), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_af, vec3_af), 0u);
+ EXPECT_EQ(Type::ConversionRank(vec3_ai, vec3_ai), 0u);
+ EXPECT_EQ(Type::ConversionRank(mat3x4_f32, mat3x4_f32), 0u);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_f32, mat4x3_f32), 0u);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_f16, mat4x3_f16), 0u);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_af, mat4x3_af), 0u);
+ EXPECT_EQ(Type::ConversionRank(ref_u32, u32), 0u);
+
+ EXPECT_EQ(Type::ConversionRank(af, f32), 1u);
+ EXPECT_EQ(Type::ConversionRank(vec3_af, vec3_f32), 1u);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_af, mat4x3_f32), 1u);
+ EXPECT_EQ(Type::ConversionRank(af, f16), 2u);
+ EXPECT_EQ(Type::ConversionRank(vec3_af, vec3_f16), 2u);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_af, mat4x3_f16), 2u);
+ EXPECT_EQ(Type::ConversionRank(ai, i32), 3u);
+ EXPECT_EQ(Type::ConversionRank(vec3_ai, vec3_i32), 3u);
+ EXPECT_EQ(Type::ConversionRank(ai, u32), 4u);
+ EXPECT_EQ(Type::ConversionRank(vec3_ai, vec3_u32), 4u);
+ EXPECT_EQ(Type::ConversionRank(ai, af), 5u);
+ EXPECT_EQ(Type::ConversionRank(ai, f32), 6u);
+ EXPECT_EQ(Type::ConversionRank(ai, f16), 7u);
+
+ EXPECT_EQ(Type::ConversionRank(i32, f32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(f32, u32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(u32, i32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(vec3_u32, vec3_f32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(vec3_f32, vec4_f32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(mat3x4_f32, mat4x3_f32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_f32, mat3x4_f32), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_f32, mat4x3_af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(f32, af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(f16, af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(vec3_f16, vec3_af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(mat4x3_f16, mat4x3_af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(i32, af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(u32, af), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(af, ai), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(f32, ai), Type::kNoConversion);
+ EXPECT_EQ(Type::ConversionRank(f16, ai), Type::kNoConversion);
+}
+
+TEST_F(TypeTest, ElementOf) {
+ auto* f32 = create<F32>();
+ auto* f16 = create<F16>();
+ auto* i32 = create<I32>();
+ auto* u32 = create<U32>();
+ auto* vec2_f32 = create<Vector>(f32, 2u);
+ auto* vec3_f16 = create<Vector>(f16, 3u);
+ auto* vec4_f32 = create<Vector>(f32, 4u);
+ auto* vec3_u32 = create<Vector>(u32, 3u);
+ auto* vec3_i32 = create<Vector>(i32, 3u);
+ auto* mat2x4_f32 = create<Matrix>(vec4_f32, 2u);
+ auto* mat4x2_f32 = create<Matrix>(vec2_f32, 4u);
+ auto* mat4x3_f16 = create<Matrix>(vec3_f16, 4u);
+ auto* arr_i32 = create<Array>(
+ /* element */ i32,
+ /* count */ 5u,
+ /* align */ 4u,
+ /* size */ 5u * 4u,
+ /* stride */ 5u * 4u,
+ /* implicit_stride */ 5u * 4u);
+
+ // No count
+ EXPECT_TYPE(Type::ElementOf(f32), f32);
+ EXPECT_TYPE(Type::ElementOf(f16), f16);
+ EXPECT_TYPE(Type::ElementOf(i32), i32);
+ EXPECT_TYPE(Type::ElementOf(u32), u32);
+ EXPECT_TYPE(Type::ElementOf(vec2_f32), f32);
+ EXPECT_TYPE(Type::ElementOf(vec3_f16), f16);
+ EXPECT_TYPE(Type::ElementOf(vec4_f32), f32);
+ EXPECT_TYPE(Type::ElementOf(vec3_u32), u32);
+ EXPECT_TYPE(Type::ElementOf(vec3_i32), i32);
+ EXPECT_TYPE(Type::ElementOf(mat2x4_f32), f32);
+ EXPECT_TYPE(Type::ElementOf(mat4x2_f32), f32);
+ EXPECT_TYPE(Type::ElementOf(mat4x3_f16), f16);
+ EXPECT_TYPE(Type::ElementOf(arr_i32), i32);
+
+ // With count
+ uint32_t count = 0;
+ EXPECT_TYPE(Type::ElementOf(f32, &count), f32);
+ EXPECT_EQ(count, 1u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(f16, &count), f16);
+ EXPECT_EQ(count, 1u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(i32, &count), i32);
+ EXPECT_EQ(count, 1u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(u32, &count), u32);
+ EXPECT_EQ(count, 1u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(vec2_f32, &count), f32);
+ EXPECT_EQ(count, 2u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(vec3_f16, &count), f16);
+ EXPECT_EQ(count, 3u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(vec4_f32, &count), f32);
+ EXPECT_EQ(count, 4u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(vec3_u32, &count), u32);
+ EXPECT_EQ(count, 3u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(vec3_i32, &count), i32);
+ EXPECT_EQ(count, 3u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(mat2x4_f32, &count), f32);
+ EXPECT_EQ(count, 8u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(mat4x2_f32, &count), f32);
+ EXPECT_EQ(count, 8u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(mat4x3_f16, &count), f16);
+ EXPECT_EQ(count, 12u);
+ count = 0;
+ EXPECT_TYPE(Type::ElementOf(arr_i32, &count), i32);
+ EXPECT_EQ(count, 5u);
+}
+
+TEST_F(TypeTest, Common2) {
+ auto* ai = create<AbstractInt>();
+ auto* af = create<AbstractFloat>();
+ auto* f32 = create<F32>();
+ auto* f16 = create<F16>();
+ auto* i32 = create<I32>();
+ auto* u32 = create<U32>();
+
+ EXPECT_TYPE(Type::Common({ai, ai}), ai);
+ EXPECT_TYPE(Type::Common({af, af}), af);
+ EXPECT_TYPE(Type::Common({f32, f32}), f32);
+ EXPECT_TYPE(Type::Common({f16, f16}), f16);
+ EXPECT_TYPE(Type::Common({i32, i32}), i32);
+ EXPECT_TYPE(Type::Common({u32, u32}), u32);
+
+ EXPECT_TYPE(Type::Common({i32, u32}), nullptr);
+ EXPECT_TYPE(Type::Common({u32, f32}), nullptr);
+ EXPECT_TYPE(Type::Common({f32, f16}), nullptr);
+ EXPECT_TYPE(Type::Common({f16, i32}), nullptr);
+
+ EXPECT_TYPE(Type::Common({ai, af}), af);
+ EXPECT_TYPE(Type::Common({ai, f32}), f32);
+ EXPECT_TYPE(Type::Common({ai, f16}), f16);
+ EXPECT_TYPE(Type::Common({ai, i32}), i32);
+ EXPECT_TYPE(Type::Common({ai, u32}), u32);
+
+ EXPECT_TYPE(Type::Common({af, ai}), af);
+ EXPECT_TYPE(Type::Common({f32, ai}), f32);
+ EXPECT_TYPE(Type::Common({f16, ai}), f16);
+ EXPECT_TYPE(Type::Common({i32, ai}), i32);
+ EXPECT_TYPE(Type::Common({u32, ai}), u32);
+
+ EXPECT_TYPE(Type::Common({ai, af}), af);
+ EXPECT_TYPE(Type::Common({f32, af}), f32);
+ EXPECT_TYPE(Type::Common({f16, af}), f16);
+ EXPECT_TYPE(Type::Common({i32, af}), nullptr);
+ EXPECT_TYPE(Type::Common({u32, af}), nullptr);
+
+ EXPECT_TYPE(Type::Common({af, ai}), af);
+ EXPECT_TYPE(Type::Common({af, f32}), f32);
+ EXPECT_TYPE(Type::Common({af, f16}), f16);
+ EXPECT_TYPE(Type::Common({af, i32}), nullptr);
+ EXPECT_TYPE(Type::Common({af, u32}), nullptr);
+
+ auto* vec3_ai = create<Vector>(ai, 3u);
+ auto* vec3_af = create<Vector>(af, 3u);
+ auto* vec3_f32 = create<Vector>(f32, 3u);
+ auto* vec3_f16 = create<Vector>(f16, 3u);
+ auto* vec4_f32 = create<Vector>(f32, 4u);
+ auto* vec3_u32 = create<Vector>(u32, 3u);
+ auto* vec3_i32 = create<Vector>(i32, 3u);
+
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_ai}), vec3_ai);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_af}), vec3_af);
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec4_f32}), vec4_f32);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_u32}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_i32}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_u32}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_i32}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_ai}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_ai}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec3_ai}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_ai}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_ai}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec3_af, vec4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_u32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_i32}), nullptr);
+
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_af}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_af}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_af}), nullptr);
+
+ auto* mat4x3_af = create<Matrix>(vec3_af, 4u);
+ auto* mat3x4_f32 = create<Matrix>(vec4_f32, 3u);
+ auto* mat4x3_f32 = create<Matrix>(vec3_f32, 4u);
+ auto* mat4x3_f16 = create<Matrix>(vec3_f16, 4u);
+
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_af}), mat4x3_af);
+ EXPECT_TYPE(Type::Common({mat3x4_f32, mat3x4_f32}), mat3x4_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f32, mat4x3_f32}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f16, mat4x3_f16}), mat4x3_f16);
+
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat3x4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_f32}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_f16}), mat4x3_f16);
+
+ EXPECT_TYPE(Type::Common({mat3x4_f32, mat4x3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({mat4x3_f32, mat4x3_af}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f16, mat4x3_af}), mat4x3_f16);
+}
+
+TEST_F(TypeTest, Common3) {
+ auto* ai = create<AbstractInt>();
+ auto* af = create<AbstractFloat>();
+ auto* f32 = create<F32>();
+ auto* f16 = create<F16>();
+ auto* i32 = create<I32>();
+ auto* u32 = create<U32>();
+
+ EXPECT_TYPE(Type::Common({ai, ai, ai}), ai);
+ EXPECT_TYPE(Type::Common({af, af, af}), af);
+ EXPECT_TYPE(Type::Common({f32, f32, f32}), f32);
+ EXPECT_TYPE(Type::Common({f16, f16, f16}), f16);
+ EXPECT_TYPE(Type::Common({i32, i32, i32}), i32);
+ EXPECT_TYPE(Type::Common({u32, u32, u32}), u32);
+
+ EXPECT_TYPE(Type::Common({ai, af, ai}), af);
+ EXPECT_TYPE(Type::Common({ai, f32, ai}), f32);
+ EXPECT_TYPE(Type::Common({ai, f16, ai}), f16);
+ EXPECT_TYPE(Type::Common({ai, i32, ai}), i32);
+ EXPECT_TYPE(Type::Common({ai, u32, ai}), u32);
+
+ EXPECT_TYPE(Type::Common({af, ai, af}), af);
+ EXPECT_TYPE(Type::Common({f32, ai, f32}), f32);
+ EXPECT_TYPE(Type::Common({f16, ai, f16}), f16);
+ EXPECT_TYPE(Type::Common({i32, ai, i32}), i32);
+ EXPECT_TYPE(Type::Common({u32, ai, u32}), u32);
+
+ EXPECT_TYPE(Type::Common({ai, f32, ai}), f32);
+ EXPECT_TYPE(Type::Common({ai, f16, ai}), f16);
+ EXPECT_TYPE(Type::Common({ai, i32, ai}), i32);
+ EXPECT_TYPE(Type::Common({ai, u32, ai}), u32);
+
+ EXPECT_TYPE(Type::Common({f32, ai, f32}), f32);
+ EXPECT_TYPE(Type::Common({f16, ai, f16}), f16);
+ EXPECT_TYPE(Type::Common({i32, ai, i32}), i32);
+ EXPECT_TYPE(Type::Common({u32, ai, u32}), u32);
+
+ EXPECT_TYPE(Type::Common({af, f32, af}), f32);
+ EXPECT_TYPE(Type::Common({af, f16, af}), f16);
+ EXPECT_TYPE(Type::Common({af, i32, af}), nullptr);
+ EXPECT_TYPE(Type::Common({af, u32, af}), nullptr);
+
+ EXPECT_TYPE(Type::Common({f32, af, f32}), f32);
+ EXPECT_TYPE(Type::Common({f16, af, f16}), f16);
+ EXPECT_TYPE(Type::Common({i32, af, i32}), nullptr);
+ EXPECT_TYPE(Type::Common({u32, af, u32}), nullptr);
+
+ EXPECT_TYPE(Type::Common({ai, af, f32}), f32);
+ EXPECT_TYPE(Type::Common({ai, af, f16}), f16);
+ EXPECT_TYPE(Type::Common({ai, af, i32}), nullptr);
+ EXPECT_TYPE(Type::Common({ai, af, u32}), nullptr);
+
+ auto* vec3_ai = create<Vector>(ai, 3u);
+ auto* vec3_af = create<Vector>(af, 3u);
+ auto* vec3_f32 = create<Vector>(f32, 3u);
+ auto* vec3_f16 = create<Vector>(f16, 3u);
+ auto* vec4_f32 = create<Vector>(f32, 4u);
+ auto* vec3_u32 = create<Vector>(u32, 3u);
+ auto* vec3_i32 = create<Vector>(i32, 3u);
+
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_ai, vec3_ai}), vec3_ai);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_af, vec3_af}), vec3_af);
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_f32, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_f16, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec4_f32, vec4_f32}), vec4_f32);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_u32, vec3_u32}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_i32, vec3_i32}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_ai, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_ai, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec3_ai, vec4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_ai, vec3_u32}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_ai, vec3_i32}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_f32, vec3_ai}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_f16, vec3_ai}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec4_f32, vec3_ai}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_u32, vec3_ai}), vec3_u32);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_i32, vec3_ai}), vec3_i32);
+
+ EXPECT_TYPE(Type::Common({vec3_f32, vec3_af, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_f16, vec3_af, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec4_f32, vec3_af, vec4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_u32, vec3_af, vec3_u32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_i32, vec3_af, vec3_i32}), nullptr);
+
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_f32, vec3_af}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_f16, vec3_af}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec3_af, vec4_f32, vec3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_u32, vec3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_af, vec3_i32, vec3_af}), nullptr);
+
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_af, vec3_f32}), vec3_f32);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_af, vec3_f16}), vec3_f16);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_af, vec4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_af, vec3_u32}), nullptr);
+ EXPECT_TYPE(Type::Common({vec3_ai, vec3_af, vec3_i32}), nullptr);
+
+ auto* mat4x3_af = create<Matrix>(vec3_af, 4u);
+ auto* mat3x4_f32 = create<Matrix>(vec4_f32, 3u);
+ auto* mat4x3_f32 = create<Matrix>(vec3_f32, 4u);
+ auto* mat4x3_f16 = create<Matrix>(vec3_f16, 4u);
+
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_af, mat4x3_af}), mat4x3_af);
+ EXPECT_TYPE(Type::Common({mat3x4_f32, mat3x4_f32, mat3x4_f32}), mat3x4_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f32, mat4x3_f32, mat4x3_f32}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f16, mat4x3_f16, mat4x3_f16}), mat4x3_f16);
+
+ EXPECT_TYPE(Type::Common({mat3x4_f32, mat4x3_af, mat3x4_f32}), nullptr);
+ EXPECT_TYPE(Type::Common({mat4x3_f32, mat4x3_af, mat4x3_f32}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_f16, mat4x3_af, mat4x3_f16}), mat4x3_f16);
+
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat3x4_f32, mat4x3_af}), nullptr);
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_f32, mat4x3_af}), mat4x3_f32);
+ EXPECT_TYPE(Type::Common({mat4x3_af, mat4x3_f16, mat4x3_af}), mat4x3_f16);
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/u32_type.cc b/chromium/third_party/dawn/src/tint/sem/u32.cc
index f73b61756b2..dc3bd1de767 100644
--- a/chromium/third_party/dawn/src/tint/sem/u32_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/u32.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/u32_type.h"
+#include "src/tint/sem/u32.h"
#include "src/tint/program_builder.h"
@@ -27,27 +27,27 @@ U32::~U32() = default;
U32::U32(U32&&) = default;
size_t U32::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<U32>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<U32>().full_hashcode);
}
bool U32::Equals(const Type& other) const {
- return other.Is<U32>();
+ return other.Is<U32>();
}
std::string U32::FriendlyName(const SymbolTable&) const {
- return "u32";
+ return "u32";
}
bool U32::IsConstructible() const {
- return true;
+ return true;
}
uint32_t U32::Size() const {
- return 4;
+ return 4;
}
uint32_t U32::Align() const {
- return 4;
+ return 4;
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/u32.h b/chromium/third_party/dawn/src/tint/sem/u32.h
new file mode 100644
index 00000000000..5ae01fbae7a
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/u32.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_U32_H_
+#define SRC_TINT_SEM_U32_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A unsigned int 32 type.
+class U32 final : public Castable<U32, Type> {
+ public:
+ /// Constructor
+ U32();
+ /// Move constructor
+ U32(U32&&);
+ ~U32() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the size in bytes of the type.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type.
+ uint32_t Align() const override;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_U32_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/u32_type_test.cc b/chromium/third_party/dawn/src/tint/sem/u32_test.cc
index dae4c422f36..1716aa57d69 100644
--- a/chromium/third_party/dawn/src/tint/sem/u32_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/sem/u32_test.cc
@@ -13,7 +13,7 @@
// limitations under the License.
#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
+#include "src/tint/sem/texture.h"
namespace tint::sem {
namespace {
@@ -21,27 +21,27 @@ namespace {
using U32Test = TestHelper;
TEST_F(U32Test, Creation) {
- auto* a = create<U32>();
- auto* b = create<U32>();
- EXPECT_EQ(a, b);
+ auto* a = create<U32>();
+ auto* b = create<U32>();
+ EXPECT_EQ(a, b);
}
TEST_F(U32Test, Hash) {
- auto* a = create<U32>();
- auto* b = create<U32>();
- EXPECT_EQ(a->Hash(), b->Hash());
+ auto* a = create<U32>();
+ auto* b = create<U32>();
+ EXPECT_EQ(a->Hash(), b->Hash());
}
TEST_F(U32Test, Equals) {
- auto* a = create<U32>();
- auto* b = create<U32>();
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(Void{}));
+ auto* a = create<U32>();
+ auto* b = create<U32>();
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(Void{}));
}
TEST_F(U32Test, FriendlyName) {
- U32 u;
- EXPECT_EQ(u.FriendlyName(Symbols()), "u32");
+ U32 u;
+ EXPECT_EQ(u.FriendlyName(Symbols()), "u32");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/sem/u32_type.h b/chromium/third_party/dawn/src/tint/sem/u32_type.h
deleted file mode 100644
index d81886c96a0..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/u32_type.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_U32_TYPE_H_
-#define SRC_TINT_SEM_U32_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A unsigned int 32 type.
-class U32 final : public Castable<U32, Type> {
- public:
- /// Constructor
- U32();
- /// Move constructor
- U32(U32&&);
- ~U32() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the size in bytes of the type.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type.
- uint32_t Align() const override;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_U32_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/variable.cc b/chromium/third_party/dawn/src/tint/sem/variable.cc
index af3deb9b5bf..0ada5aeeed9 100644
--- a/chromium/third_party/dawn/src/tint/sem/variable.cc
+++ b/chromium/third_party/dawn/src/tint/sem/variable.cc
@@ -68,9 +68,7 @@ Parameter::Parameter(const ast::Variable* declaration,
ast::StorageClass storage_class,
ast::Access access,
const ParameterUsage usage /* = ParameterUsage::kNone */)
- : Base(declaration, type, storage_class, access, Constant{}),
- index_(index),
- usage_(usage) {}
+ : Base(declaration, type, storage_class, access, Constant{}), index_(index), usage_(usage) {}
Parameter::~Parameter() = default;
@@ -82,6 +80,13 @@ VariableUser::VariableUser(const ast::IdentifierExpression* declaration,
statement,
variable->ConstantValue(),
/* has_side_effects */ false),
- variable_(variable) {}
+ variable_(variable) {
+ auto* type = variable->Type();
+ if (type->Is<sem::Pointer>() && variable->Constructor()) {
+ source_variable_ = variable->Constructor()->SourceVariable();
+ } else {
+ source_variable_ = variable;
+ }
+}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/variable.h b/chromium/third_party/dawn/src/tint/sem/variable.h
index ca97d64b19a..7026ca75062 100644
--- a/chromium/third_party/dawn/src/tint/sem/variable.h
+++ b/chromium/third_party/dawn/src/tint/sem/variable.h
@@ -40,189 +40,185 @@ namespace tint::sem {
/// Variable is the base class for local variables, global variables and
/// parameters.
class Variable : public Castable<Variable, Node> {
- public:
- /// Constructor
- /// @param declaration the AST declaration node
- /// @param type the variable type
- /// @param storage_class the variable storage class
- /// @param access the variable access control type
- /// @param constant_value the constant value for the variable. May be invalid
- Variable(const ast::Variable* declaration,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- Constant constant_value);
-
- /// Destructor
- ~Variable() override;
-
- /// @returns the AST declaration node
- const ast::Variable* Declaration() const { return declaration_; }
-
- /// @returns the canonical type for the variable
- const sem::Type* Type() const { return type_; }
-
- /// @returns the storage class for the variable
- ast::StorageClass StorageClass() const { return storage_class_; }
-
- /// @returns the access control for the variable
- ast::Access Access() const { return access_; }
-
- /// @return the constant value of this expression
- const Constant& ConstantValue() const { return constant_value_; }
-
- /// @returns the variable constructor expression, or nullptr if the variable
- /// does not have one.
- const Expression* Constructor() const { return constructor_; }
-
- /// Sets the variable constructor expression.
- /// @param constructor the constructor expression to assign to this variable.
- void SetConstructor(const Expression* constructor) {
- constructor_ = constructor;
- }
-
- /// @returns the expressions that use the variable
- const std::vector<const VariableUser*>& Users() const { return users_; }
-
- /// @param user the user to add
- void AddUser(const VariableUser* user) { users_.emplace_back(user); }
-
- private:
- const ast::Variable* const declaration_;
- const sem::Type* const type_;
- const ast::StorageClass storage_class_;
- const ast::Access access_;
- const Constant constant_value_;
- const Expression* constructor_ = nullptr;
- std::vector<const VariableUser*> users_;
+ public:
+ /// Constructor
+ /// @param declaration the AST declaration node
+ /// @param type the variable type
+ /// @param storage_class the variable storage class
+ /// @param access the variable access control type
+ /// @param constant_value the constant value for the variable. May be invalid
+ Variable(const ast::Variable* declaration,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ Constant constant_value);
+
+ /// Destructor
+ ~Variable() override;
+
+ /// @returns the AST declaration node
+ const ast::Variable* Declaration() const { return declaration_; }
+
+ /// @returns the canonical type for the variable
+ const sem::Type* Type() const { return type_; }
+
+ /// @returns the storage class for the variable
+ ast::StorageClass StorageClass() const { return storage_class_; }
+
+ /// @returns the access control for the variable
+ ast::Access Access() const { return access_; }
+
+ /// @return the constant value of this expression
+ const Constant& ConstantValue() const { return constant_value_; }
+
+ /// @returns the variable constructor expression, or nullptr if the variable
+ /// does not have one.
+ const Expression* Constructor() const { return constructor_; }
+
+ /// Sets the variable constructor expression.
+ /// @param constructor the constructor expression to assign to this variable.
+ void SetConstructor(const Expression* constructor) { constructor_ = constructor; }
+
+ /// @returns the expressions that use the variable
+ const std::vector<const VariableUser*>& Users() const { return users_; }
+
+ /// @param user the user to add
+ void AddUser(const VariableUser* user) { users_.emplace_back(user); }
+
+ private:
+ const ast::Variable* const declaration_;
+ const sem::Type* const type_;
+ const ast::StorageClass storage_class_;
+ const ast::Access access_;
+ const Constant constant_value_;
+ const Expression* constructor_ = nullptr;
+ std::vector<const VariableUser*> users_;
};
/// LocalVariable is a function-scope variable
class LocalVariable final : public Castable<LocalVariable, Variable> {
- public:
- /// Constructor
- /// @param declaration the AST declaration node
- /// @param type the variable type
- /// @param storage_class the variable storage class
- /// @param access the variable access control type
- /// @param statement the statement that declared this local variable
- /// @param constant_value the constant value for the variable. May be invalid
- LocalVariable(const ast::Variable* declaration,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const sem::Statement* statement,
- Constant constant_value);
-
- /// Destructor
- ~LocalVariable() override;
-
- /// @returns the statement that declares this local variable
- const sem::Statement* Statement() const { return statement_; }
-
- /// @returns the Type, Function or Variable that this local variable shadows
- const sem::Node* Shadows() const { return shadows_; }
-
- /// Sets the Type, Function or Variable that this local variable shadows
- /// @param shadows the Type, Function or Variable that this variable shadows
- void SetShadows(const sem::Node* shadows) { shadows_ = shadows; }
-
- private:
- const sem::Statement* const statement_;
- const sem::Node* shadows_ = nullptr;
+ public:
+ /// Constructor
+ /// @param declaration the AST declaration node
+ /// @param type the variable type
+ /// @param storage_class the variable storage class
+ /// @param access the variable access control type
+ /// @param statement the statement that declared this local variable
+ /// @param constant_value the constant value for the variable. May be invalid
+ LocalVariable(const ast::Variable* declaration,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const sem::Statement* statement,
+ Constant constant_value);
+
+ /// Destructor
+ ~LocalVariable() override;
+
+ /// @returns the statement that declares this local variable
+ const sem::Statement* Statement() const { return statement_; }
+
+ /// @returns the Type, Function or Variable that this local variable shadows
+ const sem::Node* Shadows() const { return shadows_; }
+
+ /// Sets the Type, Function or Variable that this local variable shadows
+ /// @param shadows the Type, Function or Variable that this variable shadows
+ void SetShadows(const sem::Node* shadows) { shadows_ = shadows; }
+
+ private:
+ const sem::Statement* const statement_;
+ const sem::Node* shadows_ = nullptr;
};
/// GlobalVariable is a module-scope variable
class GlobalVariable final : public Castable<GlobalVariable, Variable> {
- public:
- /// Constructor
- /// @param declaration the AST declaration node
- /// @param type the variable type
- /// @param storage_class the variable storage class
- /// @param access the variable access control type
- /// @param constant_value the constant value for the variable. May be invalid
- /// @param binding_point the optional resource binding point of the variable
- GlobalVariable(const ast::Variable* declaration,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- Constant constant_value,
- sem::BindingPoint binding_point = {});
-
- /// Destructor
- ~GlobalVariable() override;
-
- /// @returns the resource binding point for the variable
- sem::BindingPoint BindingPoint() const { return binding_point_; }
-
- /// @param id the constant identifier to assign to this variable
- void SetConstantId(uint16_t id) {
- constant_id_ = id;
- is_overridable_ = true;
- }
-
- /// @returns the pipeline constant ID associated with the variable
- uint16_t ConstantId() const { return constant_id_; }
-
- /// @param is_overridable true if this is a pipeline overridable constant
- void SetIsOverridable(bool is_overridable = true) {
- is_overridable_ = is_overridable;
- }
-
- /// @returns true if this is pipeline overridable constant
- bool IsOverridable() const { return is_overridable_; }
-
- private:
- const sem::BindingPoint binding_point_;
-
- bool is_overridable_ = false;
- uint16_t constant_id_ = 0;
+ public:
+ /// Constructor
+ /// @param declaration the AST declaration node
+ /// @param type the variable type
+ /// @param storage_class the variable storage class
+ /// @param access the variable access control type
+ /// @param constant_value the constant value for the variable. May be invalid
+ /// @param binding_point the optional resource binding point of the variable
+ GlobalVariable(const ast::Variable* declaration,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ Constant constant_value,
+ sem::BindingPoint binding_point = {});
+
+ /// Destructor
+ ~GlobalVariable() override;
+
+ /// @returns the resource binding point for the variable
+ sem::BindingPoint BindingPoint() const { return binding_point_; }
+
+ /// @param id the constant identifier to assign to this variable
+ void SetConstantId(uint16_t id) {
+ constant_id_ = id;
+ is_overridable_ = true;
+ }
+
+ /// @returns the pipeline constant ID associated with the variable
+ uint16_t ConstantId() const { return constant_id_; }
+
+ /// @param is_overridable true if this is a pipeline overridable constant
+ void SetIsOverridable(bool is_overridable = true) { is_overridable_ = is_overridable; }
+
+ /// @returns true if this is pipeline overridable constant
+ bool IsOverridable() const { return is_overridable_; }
+
+ private:
+ const sem::BindingPoint binding_point_;
+
+ bool is_overridable_ = false;
+ uint16_t constant_id_ = 0;
};
/// Parameter is a function parameter
class Parameter final : public Castable<Parameter, Variable> {
- public:
- /// Constructor for function parameters
- /// @param declaration the AST declaration node
- /// @param index the index of the parmeter in the function
- /// @param type the variable type
- /// @param storage_class the variable storage class
- /// @param access the variable access control type
- /// @param usage the semantic usage for the parameter
- Parameter(const ast::Variable* declaration,
- uint32_t index,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const ParameterUsage usage = ParameterUsage::kNone);
-
- /// Destructor
- ~Parameter() override;
-
- /// @return the index of the parmeter in the function
- uint32_t Index() const { return index_; }
-
- /// @returns the semantic usage for the parameter
- ParameterUsage Usage() const { return usage_; }
-
- /// @returns the CallTarget owner of this parameter
- CallTarget const* Owner() const { return owner_; }
-
- /// @param owner the CallTarget owner of this parameter
- void SetOwner(CallTarget const* owner) { owner_ = owner; }
-
- /// @returns the Type, Function or Variable that this local variable shadows
- const sem::Node* Shadows() const { return shadows_; }
-
- /// Sets the Type, Function or Variable that this local variable shadows
- /// @param shadows the Type, Function or Variable that this variable shadows
- void SetShadows(const sem::Node* shadows) { shadows_ = shadows; }
-
- private:
- const uint32_t index_;
- const ParameterUsage usage_;
- CallTarget const* owner_ = nullptr;
- const sem::Node* shadows_ = nullptr;
+ public:
+ /// Constructor for function parameters
+ /// @param declaration the AST declaration node
+ /// @param index the index of the parmeter in the function
+ /// @param type the variable type
+ /// @param storage_class the variable storage class
+ /// @param access the variable access control type
+ /// @param usage the semantic usage for the parameter
+ Parameter(const ast::Variable* declaration,
+ uint32_t index,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const ParameterUsage usage = ParameterUsage::kNone);
+
+ /// Destructor
+ ~Parameter() override;
+
+ /// @return the index of the parmeter in the function
+ uint32_t Index() const { return index_; }
+
+ /// @returns the semantic usage for the parameter
+ ParameterUsage Usage() const { return usage_; }
+
+ /// @returns the CallTarget owner of this parameter
+ CallTarget const* Owner() const { return owner_; }
+
+ /// @param owner the CallTarget owner of this parameter
+ void SetOwner(CallTarget const* owner) { owner_ = owner; }
+
+ /// @returns the Type, Function or Variable that this local variable shadows
+ const sem::Node* Shadows() const { return shadows_; }
+
+ /// Sets the Type, Function or Variable that this local variable shadows
+ /// @param shadows the Type, Function or Variable that this variable shadows
+ void SetShadows(const sem::Node* shadows) { shadows_ = shadows; }
+
+ private:
+ const uint32_t index_;
+ const ParameterUsage usage_;
+ CallTarget const* owner_ = nullptr;
+ const sem::Node* shadows_ = nullptr;
};
/// ParameterList is a list of Parameter
@@ -231,20 +227,20 @@ using ParameterList = std::vector<const Parameter*>;
/// VariableUser holds the semantic information for an identifier expression
/// node that resolves to a variable.
class VariableUser final : public Castable<VariableUser, Expression> {
- public:
- /// Constructor
- /// @param declaration the AST identifier node
- /// @param statement the statement that owns this expression
- /// @param variable the semantic variable
- VariableUser(const ast::IdentifierExpression* declaration,
- Statement* statement,
- sem::Variable* variable);
-
- /// @returns the variable that this expression refers to
- const sem::Variable* Variable() const { return variable_; }
-
- private:
- const sem::Variable* const variable_;
+ public:
+ /// Constructor
+ /// @param declaration the AST identifier node
+ /// @param statement the statement that owns this expression
+ /// @param variable the semantic variable
+ VariableUser(const ast::IdentifierExpression* declaration,
+ Statement* statement,
+ sem::Variable* variable);
+
+ /// @returns the variable that this expression refers to
+ const sem::Variable* Variable() const { return variable_; }
+
+ private:
+ const sem::Variable* const variable_;
};
/// A pair of sem::Variables. Can be hashed.
@@ -257,12 +253,12 @@ namespace std {
/// Custom std::hash specialization for VariablePair
template <>
class hash<tint::sem::VariablePair> {
- public:
- /// @param i the variable pair to create a hash for
- /// @return the hash value
- inline std::size_t operator()(const tint::sem::VariablePair& i) const {
- return tint::utils::Hash(i.first, i.second);
- }
+ public:
+ /// @param i the variable pair to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const tint::sem::VariablePair& i) const {
+ return tint::utils::Hash(i.first, i.second);
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/sem/vector_type.cc b/chromium/third_party/dawn/src/tint/sem/vector.cc
index 1df39457f8f..2df7cf0a334 100644
--- a/chromium/third_party/dawn/src/tint/sem/vector_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/vector.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/vector.h"
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
@@ -21,10 +21,9 @@ TINT_INSTANTIATE_TYPEINFO(tint::sem::Vector);
namespace tint::sem {
-Vector::Vector(Type const* subtype, uint32_t width)
- : subtype_(subtype), width_(width) {
- TINT_ASSERT(Semantic, width_ > 1);
- TINT_ASSERT(Semantic, width_ < 5);
+Vector::Vector(Type const* subtype, uint32_t width) : subtype_(subtype), width_(width) {
+ TINT_ASSERT(Semantic, width_ > 1);
+ TINT_ASSERT(Semantic, width_ < 5);
}
Vector::Vector(Vector&&) = default;
@@ -32,56 +31,40 @@ Vector::Vector(Vector&&) = default;
Vector::~Vector() = default;
size_t Vector::Hash() const {
- return utils::Hash(TypeInfo::Of<Vector>().full_hashcode, width_, subtype_);
+ return utils::Hash(TypeInfo::Of<Vector>().full_hashcode, width_, subtype_);
}
bool Vector::Equals(const Type& other) const {
- if (auto* v = other.As<Vector>()) {
- return v->width_ == width_ && v->subtype_ == subtype_;
- }
- return false;
+ if (auto* v = other.As<Vector>()) {
+ return v->width_ == width_ && v->subtype_ == subtype_;
+ }
+ return false;
}
std::string Vector::FriendlyName(const SymbolTable& symbols) const {
- std::ostringstream out;
- out << "vec" << width_ << "<" << subtype_->FriendlyName(symbols) << ">";
- return out.str();
+ std::ostringstream out;
+ out << "vec" << width_ << "<" << subtype_->FriendlyName(symbols) << ">";
+ return out.str();
}
bool Vector::IsConstructible() const {
- return true;
+ return true;
}
uint32_t Vector::Size() const {
- return SizeOf(width_);
+ return subtype_->Size() * width_;
}
uint32_t Vector::Align() const {
- return AlignOf(width_);
-}
-
-uint32_t Vector::SizeOf(uint32_t width) {
- switch (width) {
- case 2:
- return 8;
- case 3:
- return 12;
- case 4:
- return 16;
- }
- return 0; // Unreachable
-}
-
-uint32_t Vector::AlignOf(uint32_t width) {
- switch (width) {
- case 2:
- return 8;
- case 3:
- return 16;
- case 4:
- return 16;
- }
- return 0; // Unreachable
+ switch (width_) {
+ case 2:
+ return subtype_->Size() * 2;
+ case 3:
+ return subtype_->Size() * 4;
+ case 4:
+ return subtype_->Size() * 4;
+ }
+ return 0; // Unreachable
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/vector.h b/chromium/third_party/dawn/src/tint/sem/vector.h
new file mode 100644
index 00000000000..8d9e7a335bc
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/vector.h
@@ -0,0 +1,79 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_SEM_VECTOR_H_
+#define SRC_TINT_SEM_VECTOR_H_
+
+#include <string>
+
+#include "src/tint/sem/type.h"
+
+namespace tint::sem {
+
+/// A vector type.
+class Vector final : public Castable<Vector, Type> {
+ public:
+ /// Constructor
+ /// @param subtype the vector element type
+ /// @param size the number of elements in the vector
+ Vector(Type const* subtype, uint32_t size);
+ /// Move constructor
+ Vector(Vector&&);
+ ~Vector() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @returns the type of the vector elements
+ const Type* type() const { return subtype_; }
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
+
+ /// @returns true if constructible as per
+ /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
+ bool IsConstructible() const override;
+
+ /// @returns the number of elements in the vector
+ uint32_t Width() const { return width_; }
+
+ /// @returns the size in bytes of the type. This may include tail padding.
+ uint32_t Size() const override;
+
+ /// @returns the alignment in bytes of the type. This may include tail
+ /// padding.
+ uint32_t Align() const override;
+
+ /// @param width the width of the vector
+ /// @returns the size in bytes of a vector of the given width.
+ static uint32_t SizeOf(uint32_t width);
+
+ /// @param width the width of the vector
+ /// @returns the alignment in bytes of a vector of the given width.
+ static uint32_t AlignOf(uint32_t width);
+
+ private:
+ Type const* const subtype_;
+ const uint32_t width_;
+};
+
+} // namespace tint::sem
+
+#endif // SRC_TINT_SEM_VECTOR_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/vector_test.cc b/chromium/third_party/dawn/src/tint/sem/vector_test.cc
new file mode 100644
index 00000000000..adeca55cf28
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/sem/vector_test.cc
@@ -0,0 +1,67 @@
+// Copyright 2020 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/sem/test_helper.h"
+#include "src/tint/sem/texture.h"
+
+namespace tint::sem {
+namespace {
+
+using VectorTest = TestHelper;
+
+TEST_F(VectorTest, Creation) {
+ auto* a = create<Vector>(create<I32>(), 2u);
+ auto* b = create<Vector>(create<I32>(), 2u);
+ auto* c = create<Vector>(create<F32>(), 2u);
+ auto* d = create<Vector>(create<F32>(), 3u);
+
+ EXPECT_EQ(a->type(), create<I32>());
+ EXPECT_EQ(a->Width(), 2u);
+
+ EXPECT_EQ(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(a, d);
+}
+
+TEST_F(VectorTest, Hash) {
+ auto* a = create<Vector>(create<I32>(), 2u);
+ auto* b = create<Vector>(create<I32>(), 2u);
+ auto* c = create<Vector>(create<F32>(), 2u);
+ auto* d = create<Vector>(create<F32>(), 3u);
+
+ EXPECT_EQ(a->Hash(), b->Hash());
+ EXPECT_NE(a->Hash(), c->Hash());
+ EXPECT_NE(a->Hash(), d->Hash());
+}
+
+TEST_F(VectorTest, Equals) {
+ auto* a = create<Vector>(create<I32>(), 2u);
+ auto* b = create<Vector>(create<I32>(), 2u);
+ auto* c = create<Vector>(create<F32>(), 2u);
+ auto* d = create<Vector>(create<F32>(), 3u);
+
+ EXPECT_TRUE(a->Equals(*b));
+ EXPECT_FALSE(a->Equals(*c));
+ EXPECT_FALSE(a->Equals(*d));
+ EXPECT_FALSE(a->Equals(Void{}));
+}
+
+TEST_F(VectorTest, FriendlyName) {
+ auto* f32 = create<F32>();
+ auto* v = create<Vector>(f32, 3u);
+ EXPECT_EQ(v->FriendlyName(Symbols()), "vec3<f32>");
+}
+
+} // namespace
+} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/vector_type.h b/chromium/third_party/dawn/src/tint/sem/vector_type.h
deleted file mode 100644
index 4542c5b9829..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/vector_type.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_SEM_VECTOR_TYPE_H_
-#define SRC_TINT_SEM_VECTOR_TYPE_H_
-
-#include <string>
-
-#include "src/tint/sem/type.h"
-
-namespace tint::sem {
-
-/// A vector type.
-class Vector final : public Castable<Vector, Type> {
- public:
- /// Constructor
- /// @param subtype the vector element type
- /// @param size the number of elements in the vector
- Vector(Type const* subtype, uint32_t size);
- /// Move constructor
- Vector(Vector&&);
- ~Vector() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @returns the type of the vector elements
- const Type* type() const { return subtype_; }
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
-
- /// @returns true if constructible as per
- /// https://gpuweb.github.io/gpuweb/wgsl/#constructible-types
- bool IsConstructible() const override;
-
- /// @returns the number of elements in the vector
- uint32_t Width() const { return width_; }
-
- /// @returns the size in bytes of the type. This may include tail padding.
- uint32_t Size() const override;
-
- /// @returns the alignment in bytes of the type. This may include tail
- /// padding.
- uint32_t Align() const override;
-
- /// @param width the width of the vector
- /// @returns the size in bytes of a vector of the given width.
- static uint32_t SizeOf(uint32_t width);
-
- /// @param width the width of the vector
- /// @returns the alignment in bytes of a vector of the given width.
- static uint32_t AlignOf(uint32_t width);
-
- private:
- Type const* const subtype_;
- const uint32_t width_;
-};
-
-} // namespace tint::sem
-
-#endif // SRC_TINT_SEM_VECTOR_TYPE_H_
diff --git a/chromium/third_party/dawn/src/tint/sem/vector_type_test.cc b/chromium/third_party/dawn/src/tint/sem/vector_type_test.cc
deleted file mode 100644
index 4d5391807c3..00000000000
--- a/chromium/third_party/dawn/src/tint/sem/vector_type_test.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2020 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/sem/test_helper.h"
-#include "src/tint/sem/texture_type.h"
-
-namespace tint::sem {
-namespace {
-
-using VectorTest = TestHelper;
-
-TEST_F(VectorTest, Creation) {
- auto* a = create<Vector>(create<I32>(), 2u);
- auto* b = create<Vector>(create<I32>(), 2u);
- auto* c = create<Vector>(create<F32>(), 2u);
- auto* d = create<Vector>(create<F32>(), 3u);
-
- EXPECT_EQ(a->type(), create<I32>());
- EXPECT_EQ(a->Width(), 2u);
-
- EXPECT_EQ(a, b);
- EXPECT_NE(a, c);
- EXPECT_NE(a, d);
-}
-
-TEST_F(VectorTest, Hash) {
- auto* a = create<Vector>(create<I32>(), 2u);
- auto* b = create<Vector>(create<I32>(), 2u);
- auto* c = create<Vector>(create<F32>(), 2u);
- auto* d = create<Vector>(create<F32>(), 3u);
-
- EXPECT_EQ(a->Hash(), b->Hash());
- EXPECT_NE(a->Hash(), c->Hash());
- EXPECT_NE(a->Hash(), d->Hash());
-}
-
-TEST_F(VectorTest, Equals) {
- auto* a = create<Vector>(create<I32>(), 2u);
- auto* b = create<Vector>(create<I32>(), 2u);
- auto* c = create<Vector>(create<F32>(), 2u);
- auto* d = create<Vector>(create<F32>(), 3u);
-
- EXPECT_TRUE(a->Equals(*b));
- EXPECT_FALSE(a->Equals(*c));
- EXPECT_FALSE(a->Equals(*d));
- EXPECT_FALSE(a->Equals(Void{}));
-}
-
-TEST_F(VectorTest, FriendlyName) {
- auto* f32 = create<F32>();
- auto* v = create<Vector>(f32, 3u);
- EXPECT_EQ(v->FriendlyName(Symbols()), "vec3<f32>");
-}
-
-} // namespace
-} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/void_type.cc b/chromium/third_party/dawn/src/tint/sem/void.cc
index 6dae92132b4..b20b96e502b 100644
--- a/chromium/third_party/dawn/src/tint/sem/void_type.cc
+++ b/chromium/third_party/dawn/src/tint/sem/void.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/void_type.h"
+#include "src/tint/sem/void.h"
#include "src/tint/program_builder.h"
@@ -27,15 +27,15 @@ Void::Void(Void&&) = default;
Void::~Void() = default;
size_t Void::Hash() const {
- return static_cast<size_t>(TypeInfo::Of<Void>().full_hashcode);
+ return static_cast<size_t>(TypeInfo::Of<Void>().full_hashcode);
}
bool Void::Equals(const Type& other) const {
- return other.Is<Void>();
+ return other.Is<Void>();
}
std::string Void::FriendlyName(const SymbolTable&) const {
- return "void";
+ return "void";
}
} // namespace tint::sem
diff --git a/chromium/third_party/dawn/src/tint/sem/void_type.h b/chromium/third_party/dawn/src/tint/sem/void.h
index 1131cb8df1e..21cc3b1fa93 100644
--- a/chromium/third_party/dawn/src/tint/sem/void_type.h
+++ b/chromium/third_party/dawn/src/tint/sem/void.h
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef SRC_TINT_SEM_VOID_TYPE_H_
-#define SRC_TINT_SEM_VOID_TYPE_H_
+#ifndef SRC_TINT_SEM_VOID_H_
+#define SRC_TINT_SEM_VOID_H_
#include <string>
@@ -23,26 +23,26 @@ namespace tint::sem {
/// A void type
class Void final : public Castable<Void, Type> {
- public:
- /// Constructor
- Void();
- /// Move constructor
- Void(Void&&);
- ~Void() override;
-
- /// @returns a hash of the type.
- size_t Hash() const override;
-
- /// @param other the other type to compare against
- /// @returns true if the this type is equal to the given type
- bool Equals(const Type& other) const override;
-
- /// @param symbols the program's symbol table
- /// @returns the name for this type that closely resembles how it would be
- /// declared in WGSL.
- std::string FriendlyName(const SymbolTable& symbols) const override;
+ public:
+ /// Constructor
+ Void();
+ /// Move constructor
+ Void(Void&&);
+ ~Void() override;
+
+ /// @returns a hash of the type.
+ size_t Hash() const override;
+
+ /// @param other the other type to compare against
+ /// @returns true if the this type is equal to the given type
+ bool Equals(const Type& other) const override;
+
+ /// @param symbols the program's symbol table
+ /// @returns the name for this type that closely resembles how it would be
+ /// declared in WGSL.
+ std::string FriendlyName(const SymbolTable& symbols) const override;
};
} // namespace tint::sem
-#endif // SRC_TINT_SEM_VOID_TYPE_H_
+#endif // SRC_TINT_SEM_VOID_H_
diff --git a/chromium/third_party/dawn/src/tint/source.cc b/chromium/third_party/dawn/src/tint/source.cc
index 9a172e3a790..5dbed6cf371 100644
--- a/chromium/third_party/dawn/src/tint/source.cc
+++ b/chromium/third_party/dawn/src/tint/source.cc
@@ -19,41 +19,96 @@
#include <string_view>
#include <utility>
+#include "src/tint/text/unicode.h"
+
namespace tint {
namespace {
+
+bool ParseLineBreak(std::string_view str, size_t i, bool* is_line_break, size_t* line_break_size) {
+ // See https://www.w3.org/TR/WGSL/#blankspace
+
+ auto* utf8 = reinterpret_cast<const uint8_t*>(&str[i]);
+ auto [cp, n] = text::utf8::Decode(utf8, str.size() - i);
+
+ if (n == 0) {
+ return false;
+ }
+
+ static const auto kLF = text::CodePoint(0x000A); // line feed
+ static const auto kVTab = text::CodePoint(0x000B); // vertical tab
+ static const auto kFF = text::CodePoint(0x000C); // form feed
+ static const auto kNL = text::CodePoint(0x0085); // next line
+ static const auto kCR = text::CodePoint(0x000D); // carriage return
+ static const auto kLS = text::CodePoint(0x2028); // line separator
+ static const auto kPS = text::CodePoint(0x2029); // parargraph separator
+
+ if (cp == kLF || cp == kVTab || cp == kFF || cp == kNL || cp == kPS || cp == kLS) {
+ *is_line_break = true;
+ *line_break_size = n;
+ return true;
+ }
+
+ // Handle CRLF as one line break, and CR alone as one line break
+ if (cp == kCR) {
+ *is_line_break = true;
+ *line_break_size = n;
+
+ if (auto next_i = i + n; next_i < str.size()) {
+ auto* next_utf8 = reinterpret_cast<const uint8_t*>(&str[next_i]);
+ auto [next_cp, next_n] = text::utf8::Decode(next_utf8, str.size() - next_i);
+
+ if (next_n == 0) {
+ return false;
+ }
+
+ if (next_cp == kLF) {
+ // CRLF as one break
+ *line_break_size = n + next_n;
+ }
+ }
+
+ return true;
+ }
+
+ *is_line_break = false;
+ return true;
+}
+
std::vector<std::string_view> SplitLines(std::string_view str) {
- std::vector<std::string_view> lines;
-
- size_t lineStart = 0;
- for (size_t i = 0; i < str.size(); ++i) {
- if (str[i] == '\n') {
- // Handle CRLF on Windows
- size_t curr = i;
- if (i > 0 && str[i - 1] == '\r') {
- --curr;
- }
- lines.push_back(str.substr(lineStart, curr - lineStart));
- lineStart = i + 1;
+ std::vector<std::string_view> lines;
+
+ size_t lineStart = 0;
+ for (size_t i = 0; i < str.size();) {
+ bool is_line_break{};
+ size_t line_break_size{};
+ // We don't handle decode errors from ParseLineBreak. Instead, we rely on
+ // the Lexer to do so.
+ ParseLineBreak(str, i, &is_line_break, &line_break_size);
+ if (is_line_break) {
+ lines.push_back(str.substr(lineStart, i - lineStart));
+ i += line_break_size;
+ lineStart = i;
+ } else {
+ ++i;
+ }
+ }
+ if (lineStart < str.size()) {
+ lines.push_back(str.substr(lineStart));
}
- }
- if (lineStart < str.size()) {
- lines.push_back(str.substr(lineStart));
- }
- return lines;
+ return lines;
}
-std::vector<std::string_view> CopyRelativeStringViews(
- const std::vector<std::string_view>& src_list,
- const std::string_view& src_view,
- const std::string_view& dst_view) {
- std::vector<std::string_view> out(src_list.size());
- for (size_t i = 0; i < src_list.size(); i++) {
- auto offset = static_cast<size_t>(&src_list[i].front() - &src_view.front());
- auto count = src_list[i].length();
- out[i] = dst_view.substr(offset, count);
- }
- return out;
+std::vector<std::string_view> CopyRelativeStringViews(const std::vector<std::string_view>& src_list,
+ const std::string_view& src_view,
+ const std::string_view& dst_view) {
+ std::vector<std::string_view> out(src_list.size());
+ for (size_t i = 0; i < src_list.size(); i++) {
+ auto offset = static_cast<size_t>(&src_list[i].front() - &src_view.front());
+ auto count = src_list[i].length();
+ out[i] = dst_view.substr(offset, count);
+ }
+ return out;
}
} // namespace
@@ -71,56 +126,56 @@ Source::FileContent::~FileContent() = default;
Source::File::~File() = default;
std::ostream& operator<<(std::ostream& out, const Source& source) {
- auto rng = source.range;
-
- if (source.file) {
- out << source.file->path << ":";
- }
- if (rng.begin.line) {
- out << rng.begin.line << ":";
- if (rng.begin.column) {
- out << rng.begin.column;
- }
+ auto rng = source.range;
if (source.file) {
- out << std::endl << std::endl;
-
- auto repeat = [&](char c, size_t n) {
- while (n--) {
- out << c;
+ out << source.file->path << ":";
+ }
+ if (rng.begin.line) {
+ out << rng.begin.line << ":";
+ if (rng.begin.column) {
+ out << rng.begin.column;
}
- };
-
- for (size_t line = rng.begin.line; line <= rng.end.line; line++) {
- if (line < source.file->content.lines.size() + 1) {
- auto len = source.file->content.lines[line - 1].size();
-
- out << source.file->content.lines[line - 1];
-
- out << std::endl;
-
- if (line == rng.begin.line && line == rng.end.line) {
- // Single line
- repeat(' ', rng.begin.column - 1);
- repeat('^', std::max<size_t>(rng.end.column - rng.begin.column, 1));
- } else if (line == rng.begin.line) {
- // Start of multi-line
- repeat(' ', rng.begin.column - 1);
- repeat('^', len - (rng.begin.column - 1));
- } else if (line == rng.end.line) {
- // End of multi-line
- repeat('^', rng.end.column - 1);
- } else {
- // Middle of multi-line
- repeat('^', len);
- }
-
- out << std::endl;
+
+ if (source.file) {
+ out << std::endl << std::endl;
+
+ auto repeat = [&](char c, size_t n) {
+ while (n--) {
+ out << c;
+ }
+ };
+
+ for (size_t line = rng.begin.line; line <= rng.end.line; line++) {
+ if (line < source.file->content.lines.size() + 1) {
+ auto len = source.file->content.lines[line - 1].size();
+
+ out << source.file->content.lines[line - 1];
+
+ out << std::endl;
+
+ if (line == rng.begin.line && line == rng.end.line) {
+ // Single line
+ repeat(' ', rng.begin.column - 1);
+ repeat('^', std::max<size_t>(rng.end.column - rng.begin.column, 1));
+ } else if (line == rng.begin.line) {
+ // Start of multi-line
+ repeat(' ', rng.begin.column - 1);
+ repeat('^', len - (rng.begin.column - 1));
+ } else if (line == rng.end.line) {
+ // End of multi-line
+ repeat('^', rng.end.column - 1);
+ } else {
+ // Middle of multi-line
+ repeat('^', len);
+ }
+
+ out << std::endl;
+ }
+ }
}
- }
}
- }
- return out;
+ return out;
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/source.h b/chromium/third_party/dawn/src/tint/source.h
index 931cdf1189b..734e6936d19 100644
--- a/chromium/third_party/dawn/src/tint/source.h
+++ b/chromium/third_party/dawn/src/tint/source.h
@@ -26,186 +26,180 @@ namespace tint {
/// Source describes a range of characters within a source file.
class Source {
- public:
- /// FileContent describes the content of a source file encoded using utf-8.
- class FileContent {
- public:
- /// Constructs the FileContent with the given file content.
- /// @param data the file contents
- explicit FileContent(const std::string& data);
-
- /// Copy constructor
- /// @param rhs the FileContent to copy
- FileContent(const FileContent& rhs);
-
- /// Destructor
- ~FileContent();
-
- /// The original un-split file content
- const std::string data;
- /// A string_view over #data
- const std::string_view data_view;
- /// #data split by lines
- const std::vector<std::string_view> lines;
- };
-
- /// File describes a source file, including path and content.
- class File {
- public:
- /// Constructs the File with the given file path and content.
- /// @param p the path for this file
- /// @param c the file contents
- inline File(const std::string& p, const std::string& c)
- : path(p), content(c) {}
-
- /// Copy constructor
- File(const File&) = default;
-
- /// Move constructor
- File(File&&) = default;
-
- /// Destructor
- ~File();
-
- /// file path
- const std::string path;
- /// file content
- const FileContent content;
- };
-
- /// Location holds a 1-based line and column index.
- class Location {
- public:
- /// the 1-based line number. 0 represents no line information.
- size_t line = 0;
- /// the 1-based column number in utf8-code units (bytes).
- /// 0 represents no column information.
- size_t column = 0;
-
- /// Returns true of `this` location is lexicographically less than `rhs`
- /// @param rhs location to compare against
- /// @returns true if `this` < `rhs`
- inline bool operator<(const Source::Location& rhs) {
- return std::tie(line, column) < std::tie(rhs.line, rhs.column);
- }
-
- /// Returns true of `this` location is equal to `rhs`
- /// @param rhs location to compare against
- /// @returns true if `this` == `rhs`
- inline bool operator==(const Location& rhs) const {
- return line == rhs.line && column == rhs.column;
- }
-
- /// Returns true of `this` location is not equal to `rhs`
- /// @param rhs location to compare against
- /// @returns true if `this` != `rhs`
- inline bool operator!=(const Location& rhs) const {
- return !(*this == rhs);
- }
- };
-
- /// Range holds a Location interval described by [begin, end).
- class Range {
- public:
- /// Constructs a zero initialized Range.
- inline Range() = default;
-
- /// Constructs a zero-length Range starting at `loc`
- /// @param loc the start and end location for the range
- inline constexpr explicit Range(const Location& loc)
- : begin(loc), end(loc) {}
-
- /// Constructs the Range beginning at `b` and ending at `e`
- /// @param b the range start location
- /// @param e the range end location
- inline constexpr Range(const Location& b, const Location& e)
- : begin(b), end(e) {}
-
- /// Return a column-shifted Range
+ public:
+ /// FileContent describes the content of a source file encoded using utf-8.
+ class FileContent {
+ public:
+ /// Constructs the FileContent with the given file content.
+ /// @param data the file contents
+ explicit FileContent(const std::string& data);
+
+ /// Copy constructor
+ /// @param rhs the FileContent to copy
+ FileContent(const FileContent& rhs);
+
+ /// Destructor
+ ~FileContent();
+
+ /// The original un-split file content
+ const std::string data;
+ /// A string_view over #data
+ const std::string_view data_view;
+ /// #data split by lines
+ const std::vector<std::string_view> lines;
+ };
+
+ /// File describes a source file, including path and content.
+ class File {
+ public:
+ /// Constructs the File with the given file path and content.
+ /// @param p the path for this file
+ /// @param c the file contents
+ inline File(const std::string& p, const std::string& c) : path(p), content(c) {}
+
+ /// Copy constructor
+ File(const File&) = default;
+
+ /// Move constructor
+ File(File&&) = default;
+
+ /// Destructor
+ ~File();
+
+ /// file path
+ const std::string path;
+ /// file content
+ const FileContent content;
+ };
+
+ /// Location holds a 1-based line and column index.
+ class Location {
+ public:
+ /// the 1-based line number. 0 represents no line information.
+ size_t line = 0;
+ /// the 1-based column number in utf8-code units (bytes).
+ /// 0 represents no column information.
+ size_t column = 0;
+
+ /// Returns true of `this` location is lexicographically less than `rhs`
+ /// @param rhs location to compare against
+ /// @returns true if `this` < `rhs`
+ inline bool operator<(const Source::Location& rhs) {
+ return std::tie(line, column) < std::tie(rhs.line, rhs.column);
+ }
+
+ /// Returns true of `this` location is equal to `rhs`
+ /// @param rhs location to compare against
+ /// @returns true if `this` == `rhs`
+ inline bool operator==(const Location& rhs) const {
+ return line == rhs.line && column == rhs.column;
+ }
+
+ /// Returns true of `this` location is not equal to `rhs`
+ /// @param rhs location to compare against
+ /// @returns true if `this` != `rhs`
+ inline bool operator!=(const Location& rhs) const { return !(*this == rhs); }
+ };
+
+ /// Range holds a Location interval described by [begin, end).
+ class Range {
+ public:
+ /// Constructs a zero initialized Range.
+ inline Range() = default;
+
+ /// Constructs a zero-length Range starting at `loc`
+ /// @param loc the start and end location for the range
+ inline constexpr explicit Range(const Location& loc) : begin(loc), end(loc) {}
+
+ /// Constructs the Range beginning at `b` and ending at `e`
+ /// @param b the range start location
+ /// @param e the range end location
+ inline constexpr Range(const Location& b, const Location& e) : begin(b), end(e) {}
+
+ /// Return a column-shifted Range
+ /// @param n the number of characters to shift by
+ /// @returns a Range with a #begin and #end column shifted by `n`
+ inline Range operator+(size_t n) const {
+ return Range{{begin.line, begin.column + n}, {end.line, end.column + n}};
+ }
+
+ /// Returns true of `this` range is not equal to `rhs`
+ /// @param rhs range to compare against
+ /// @returns true if `this` != `rhs`
+ inline bool operator==(const Range& rhs) const {
+ return begin == rhs.begin && end == rhs.end;
+ }
+
+ /// Returns true of `this` range is equal to `rhs`
+ /// @param rhs range to compare against
+ /// @returns true if `this` == `rhs`
+ inline bool operator!=(const Range& rhs) const { return !(*this == rhs); }
+
+ /// The location of the first character in the range.
+ Location begin;
+ /// The location of one-past the last character in the range.
+ Location end;
+ };
+
+ /// Constructs the Source with an zero initialized Range and null File.
+ inline Source() : range() {}
+
+ /// Constructs the Source with the Range `rng` and a null File
+ /// @param rng the source range
+ inline explicit Source(const Range& rng) : range(rng) {}
+
+ /// Constructs the Source with the Range `loc` and a null File
+ /// @param loc the start and end location for the source range
+ inline explicit Source(const Location& loc) : range(Range(loc)) {}
+
+ /// Constructs the Source with the Range `rng` and File `file`
+ /// @param rng the source range
+ /// @param f the source file
+ inline Source(const Range& rng, File const* f) : range(rng), file(f) {}
+
+ /// @returns a Source that points to the begin range of this Source.
+ inline Source Begin() const { return Source(Range{range.begin}, file); }
+
+ /// @returns a Source that points to the end range of this Source.
+ inline Source End() const { return Source(Range{range.end}, file); }
+
+ /// Return a column-shifted Source
/// @param n the number of characters to shift by
- /// @returns a Range with a #begin and #end column shifted by `n`
- inline Range operator+(size_t n) const {
- return Range{{begin.line, begin.column + n}, {end.line, end.column + n}};
- }
+ /// @returns a Source with the range's columns shifted by `n`
+ inline Source operator+(size_t n) const { return Source(range + n, file); }
- /// Returns true of `this` range is not equal to `rhs`
- /// @param rhs range to compare against
- /// @returns true if `this` != `rhs`
- inline bool operator==(const Range& rhs) const {
- return begin == rhs.begin && end == rhs.end;
+ /// Returns true of `this` Source is lexicographically less than `rhs`
+ /// @param rhs source to compare against
+ /// @returns true if `this` < `rhs`
+ inline bool operator<(const Source& rhs) {
+ if (file != rhs.file) {
+ return false;
+ }
+ return range.begin < rhs.range.begin;
}
- /// Returns true of `this` range is equal to `rhs`
- /// @param rhs range to compare against
- /// @returns true if `this` == `rhs`
- inline bool operator!=(const Range& rhs) const { return !(*this == rhs); }
-
- /// The location of the first character in the range.
- Location begin;
- /// The location of one-past the last character in the range.
- Location end;
- };
-
- /// Constructs the Source with an zero initialized Range and null File.
- inline Source() : range() {}
-
- /// Constructs the Source with the Range `rng` and a null File
- /// @param rng the source range
- inline explicit Source(const Range& rng) : range(rng) {}
-
- /// Constructs the Source with the Range `loc` and a null File
- /// @param loc the start and end location for the source range
- inline explicit Source(const Location& loc) : range(Range(loc)) {}
-
- /// Constructs the Source with the Range `rng` and File `file`
- /// @param rng the source range
- /// @param f the source file
- inline Source(const Range& rng, File const* f) : range(rng), file(f) {}
-
- /// @returns a Source that points to the begin range of this Source.
- inline Source Begin() const { return Source(Range{range.begin}, file); }
-
- /// @returns a Source that points to the end range of this Source.
- inline Source End() const { return Source(Range{range.end}, file); }
-
- /// Return a column-shifted Source
- /// @param n the number of characters to shift by
- /// @returns a Source with the range's columns shifted by `n`
- inline Source operator+(size_t n) const { return Source(range + n, file); }
-
- /// Returns true of `this` Source is lexicographically less than `rhs`
- /// @param rhs source to compare against
- /// @returns true if `this` < `rhs`
- inline bool operator<(const Source& rhs) {
- if (file != rhs.file) {
- return false;
+ /// Helper function that returns the range union of two source locations. The
+ /// `start` and `end` locations are assumed to refer to the same source file.
+ /// @param start the start source of the range
+ /// @param end the end source of the range
+ /// @returns the combined source
+ inline static Source Combine(const Source& start, const Source& end) {
+ return Source(Source::Range(start.range.begin, end.range.end), start.file);
}
- return range.begin < rhs.range.begin;
- }
-
- /// Helper function that returns the range union of two source locations. The
- /// `start` and `end` locations are assumed to refer to the same source file.
- /// @param start the start source of the range
- /// @param end the end source of the range
- /// @returns the combined source
- inline static Source Combine(const Source& start, const Source& end) {
- return Source(Source::Range(start.range.begin, end.range.end), start.file);
- }
-
- /// range is the span of text this source refers to in #file
- Range range;
- /// file is the optional source content this source refers to
- const File* file = nullptr;
+
+ /// range is the span of text this source refers to in #file
+ Range range;
+ /// file is the optional source content this source refers to
+ const File* file = nullptr;
};
/// Writes the Source::Location to the std::ostream.
/// @param out the std::ostream to write to
/// @param loc the location to write
/// @returns out so calls can be chained
-inline std::ostream& operator<<(std::ostream& out,
- const Source::Location& loc) {
- out << loc.line << ":" << loc.column;
- return out;
+inline std::ostream& operator<<(std::ostream& out, const Source::Location& loc) {
+ out << loc.line << ":" << loc.column;
+ return out;
}
/// Writes the Source::Range to the std::ostream.
@@ -213,8 +207,8 @@ inline std::ostream& operator<<(std::ostream& out,
/// @param range the range to write
/// @returns out so calls can be chained
inline std::ostream& operator<<(std::ostream& out, const Source::Range& range) {
- out << "[" << range.begin << ", " << range.end << "]";
- return out;
+ out << "[" << range.begin << ", " << range.end << "]";
+ return out;
}
/// Writes the Source to the std::ostream.
@@ -227,10 +221,9 @@ std::ostream& operator<<(std::ostream& out, const Source& source);
/// @param out the std::ostream to write to
/// @param content the file content to write
/// @returns out so calls can be chained
-inline std::ostream& operator<<(std::ostream& out,
- const Source::FileContent& content) {
- out << content.data;
- return out;
+inline std::ostream& operator<<(std::ostream& out, const Source::FileContent& content) {
+ out << content.data;
+ return out;
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/source_test.cc b/chromium/third_party/dawn/src/tint/source_test.cc
index a3b9825095b..5cc00781650 100644
--- a/chromium/third_party/dawn/src/tint/source_test.cc
+++ b/chromium/third_party/dawn/src/tint/source_test.cc
@@ -29,38 +29,74 @@ line three)";
using SourceFileContentTest = testing::Test;
TEST_F(SourceFileContentTest, Ctor) {
- Source::FileContent fc(kSource);
- EXPECT_EQ(fc.data, kSource);
- EXPECT_EQ(fc.data_view, kSource);
- ASSERT_EQ(fc.lines.size(), 3u);
- EXPECT_EQ(fc.lines[0], "line one");
- EXPECT_EQ(fc.lines[1], "line two");
- EXPECT_EQ(fc.lines[2], "line three");
+ Source::FileContent fc(kSource);
+ EXPECT_EQ(fc.data, kSource);
+ EXPECT_EQ(fc.data_view, kSource);
+ ASSERT_EQ(fc.lines.size(), 3u);
+ EXPECT_EQ(fc.lines[0], "line one");
+ EXPECT_EQ(fc.lines[1], "line two");
+ EXPECT_EQ(fc.lines[2], "line three");
}
TEST_F(SourceFileContentTest, CopyCtor) {
- auto src = std::make_unique<Source::FileContent>(kSource);
- Source::FileContent fc{*src};
- src.reset();
- EXPECT_EQ(fc.data, kSource);
- EXPECT_EQ(fc.data_view, kSource);
- ASSERT_EQ(fc.lines.size(), 3u);
- EXPECT_EQ(fc.lines[0], "line one");
- EXPECT_EQ(fc.lines[1], "line two");
- EXPECT_EQ(fc.lines[2], "line three");
+ auto src = std::make_unique<Source::FileContent>(kSource);
+ Source::FileContent fc{*src};
+ src.reset();
+ EXPECT_EQ(fc.data, kSource);
+ EXPECT_EQ(fc.data_view, kSource);
+ ASSERT_EQ(fc.lines.size(), 3u);
+ EXPECT_EQ(fc.lines[0], "line one");
+ EXPECT_EQ(fc.lines[1], "line two");
+ EXPECT_EQ(fc.lines[2], "line three");
}
TEST_F(SourceFileContentTest, MoveCtor) {
- auto src = std::make_unique<Source::FileContent>(kSource);
- Source::FileContent fc{std::move(*src)};
- src.reset();
- EXPECT_EQ(fc.data, kSource);
- EXPECT_EQ(fc.data_view, kSource);
- ASSERT_EQ(fc.lines.size(), 3u);
- EXPECT_EQ(fc.lines[0], "line one");
- EXPECT_EQ(fc.lines[1], "line two");
- EXPECT_EQ(fc.lines[2], "line three");
+ auto src = std::make_unique<Source::FileContent>(kSource);
+ Source::FileContent fc{std::move(*src)};
+ src.reset();
+ EXPECT_EQ(fc.data, kSource);
+ EXPECT_EQ(fc.data_view, kSource);
+ ASSERT_EQ(fc.lines.size(), 3u);
+ EXPECT_EQ(fc.lines[0], "line one");
+ EXPECT_EQ(fc.lines[1], "line two");
+ EXPECT_EQ(fc.lines[2], "line three");
}
+// Line break code points
+#define kCR "\r"
+#define kLF "\n"
+#define kVTab "\x0B"
+#define kFF "\x0C"
+#define kNL "\xC2\x85"
+#define kLS "\xE2\x80\xA8"
+#define kPS "\xE2\x80\xA9"
+
+using LineBreakTest = testing::TestWithParam<const char*>;
+TEST_P(LineBreakTest, Single) {
+ std::string src = "line one";
+ src += GetParam();
+ src += "line two";
+
+ Source::FileContent fc(src);
+ EXPECT_EQ(fc.lines.size(), 2u);
+ EXPECT_EQ(fc.lines[0], "line one");
+ EXPECT_EQ(fc.lines[1], "line two");
+}
+TEST_P(LineBreakTest, Double) {
+ std::string src = "line one";
+ src += GetParam();
+ src += GetParam();
+ src += "line two";
+
+ Source::FileContent fc(src);
+ EXPECT_EQ(fc.lines.size(), 3u);
+ EXPECT_EQ(fc.lines[0], "line one");
+ EXPECT_EQ(fc.lines[1], "");
+ EXPECT_EQ(fc.lines[2], "line two");
+}
+INSTANTIATE_TEST_SUITE_P(SourceFileContentTest,
+ LineBreakTest,
+ testing::Values(kVTab, kFF, kNL, kLS, kPS, kLF, kCR, kCR kLF));
+
} // namespace
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/symbol.cc b/chromium/third_party/dawn/src/tint/symbol.cc
index 43218a78e4d..09656975eed 100644
--- a/chromium/third_party/dawn/src/tint/symbol.cc
+++ b/chromium/third_party/dawn/src/tint/symbol.cc
@@ -20,8 +20,7 @@ namespace tint {
Symbol::Symbol() = default;
-Symbol::Symbol(uint32_t val, tint::ProgramID program_id)
- : val_(val), program_id_(program_id) {}
+Symbol::Symbol(uint32_t val, tint::ProgramID program_id) : val_(val), program_id_(program_id) {}
#if TINT_SYMBOL_STORE_DEBUG_NAME
Symbol::Symbol(uint32_t val, tint::ProgramID program_id, std::string debug_name)
@@ -39,19 +38,17 @@ Symbol& Symbol::operator=(const Symbol& o) = default;
Symbol& Symbol::operator=(Symbol&& o) = default;
bool Symbol::operator==(const Symbol& other) const {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Symbol, program_id_,
- other.program_id_);
- return val_ == other.val_;
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Symbol, program_id_, other.program_id_);
+ return val_ == other.val_;
}
bool Symbol::operator<(const Symbol& other) const {
- TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Symbol, program_id_,
- other.program_id_);
- return val_ < other.val_;
+ TINT_ASSERT_PROGRAM_IDS_EQUAL_IF_VALID(Symbol, program_id_, other.program_id_);
+ return val_ < other.val_;
}
std::string Symbol::to_str() const {
- return "$" + std::to_string(val_);
+ return "$" + std::to_string(val_);
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/symbol.h b/chromium/third_party/dawn/src/tint/symbol.h
index 801fe6236bf..1cbc6b2245b 100644
--- a/chromium/third_party/dawn/src/tint/symbol.h
+++ b/chromium/third_party/dawn/src/tint/symbol.h
@@ -30,74 +30,74 @@ namespace tint {
/// A symbol representing a string in the system
class Symbol {
- public:
- /// Constructor
- /// An invalid symbol
- Symbol();
- /// Constructor
- /// @param val the symbol value
- /// @param program_id the identifier of the program that owns this Symbol
- Symbol(uint32_t val, tint::ProgramID program_id);
+ public:
+ /// Constructor
+ /// An invalid symbol
+ Symbol();
+ /// Constructor
+ /// @param val the symbol value
+ /// @param program_id the identifier of the program that owns this Symbol
+ Symbol(uint32_t val, tint::ProgramID program_id);
#if TINT_SYMBOL_STORE_DEBUG_NAME
- /// Constructor
- /// @param val the symbol value
- /// @param program_id the identifier of the program that owns this Symbol
- /// @param debug_name name of symbols used only for debugging
- Symbol(uint32_t val, tint::ProgramID program_id, std::string debug_name);
+ /// Constructor
+ /// @param val the symbol value
+ /// @param program_id the identifier of the program that owns this Symbol
+ /// @param debug_name name of symbols used only for debugging
+ Symbol(uint32_t val, tint::ProgramID program_id, std::string debug_name);
#endif
- /// Copy constructor
- /// @param o the symbol to copy
- Symbol(const Symbol& o);
- /// Move constructor
- /// @param o the symbol to move
- Symbol(Symbol&& o);
- /// Destructor
- ~Symbol();
-
- /// Copy assignment
- /// @param o the other symbol
- /// @returns the symbol after doing the copy
- Symbol& operator=(const Symbol& o);
- /// Move assignment
- /// @param o the other symbol
- /// @returns teh symbol after doing the move
- Symbol& operator=(Symbol&& o);
-
- /// Comparison operator
- /// @param o the other symbol
- /// @returns true if the symbols are the same
- bool operator==(const Symbol& o) const;
-
- /// Less-than operator
- /// @param o the other symbol
- /// @returns true if this symbol is ordered before symbol `o`
- bool operator<(const Symbol& o) const;
-
- /// @returns true if the symbol is valid
- bool IsValid() const { return val_ != static_cast<uint32_t>(-1); }
-
- /// @returns the value for the symbol
- uint32_t value() const { return val_; }
-
- /// Convert the symbol to a string
- /// @return the string representation of the symbol
- std::string to_str() const;
-
- /// @returns the identifier of the Program that owns this symbol.
- tint::ProgramID ProgramID() const { return program_id_; }
-
- private:
- uint32_t val_ = static_cast<uint32_t>(-1);
- tint::ProgramID program_id_;
+ /// Copy constructor
+ /// @param o the symbol to copy
+ Symbol(const Symbol& o);
+ /// Move constructor
+ /// @param o the symbol to move
+ Symbol(Symbol&& o);
+ /// Destructor
+ ~Symbol();
+
+ /// Copy assignment
+ /// @param o the other symbol
+ /// @returns the symbol after doing the copy
+ Symbol& operator=(const Symbol& o);
+ /// Move assignment
+ /// @param o the other symbol
+ /// @returns teh symbol after doing the move
+ Symbol& operator=(Symbol&& o);
+
+ /// Comparison operator
+ /// @param o the other symbol
+ /// @returns true if the symbols are the same
+ bool operator==(const Symbol& o) const;
+
+ /// Less-than operator
+ /// @param o the other symbol
+ /// @returns true if this symbol is ordered before symbol `o`
+ bool operator<(const Symbol& o) const;
+
+ /// @returns true if the symbol is valid
+ bool IsValid() const { return val_ != static_cast<uint32_t>(-1); }
+
+ /// @returns the value for the symbol
+ uint32_t value() const { return val_; }
+
+ /// Convert the symbol to a string
+ /// @return the string representation of the symbol
+ std::string to_str() const;
+
+ /// @returns the identifier of the Program that owns this symbol.
+ tint::ProgramID ProgramID() const { return program_id_; }
+
+ private:
+ uint32_t val_ = static_cast<uint32_t>(-1);
+ tint::ProgramID program_id_;
#if TINT_SYMBOL_STORE_DEBUG_NAME
- std::string debug_name_;
+ std::string debug_name_;
#endif
};
/// @param sym the Symbol
/// @returns the ProgramID that owns the given Symbol
inline ProgramID ProgramIDOf(Symbol sym) {
- return sym.IsValid() ? sym.ProgramID() : ProgramID();
+ return sym.IsValid() ? sym.ProgramID() : ProgramID();
}
} // namespace tint
@@ -108,12 +108,12 @@ namespace std {
/// keys for std::unordered_map and std::unordered_set.
template <>
class hash<tint::Symbol> {
- public:
- /// @param sym the symbol to return
- /// @return the Symbol internal value
- inline std::size_t operator()(const tint::Symbol& sym) const {
- return static_cast<std::size_t>(sym.value());
- }
+ public:
+ /// @param sym the symbol to return
+ /// @return the Symbol internal value
+ inline std::size_t operator()(const tint::Symbol& sym) const {
+ return static_cast<std::size_t>(sym.value());
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/symbol_table.cc b/chromium/third_party/dawn/src/tint/symbol_table.cc
index 6b382dc8c0b..2b20fed9a08 100644
--- a/chromium/third_party/dawn/src/tint/symbol_table.cc
+++ b/chromium/third_party/dawn/src/tint/symbol_table.cc
@@ -18,8 +18,7 @@
namespace tint {
-SymbolTable::SymbolTable(tint::ProgramID program_id)
- : program_id_(program_id) {}
+SymbolTable::SymbolTable(tint::ProgramID program_id) : program_id_(program_id) {}
SymbolTable::SymbolTable(const SymbolTable&) = default;
@@ -32,54 +31,55 @@ SymbolTable& SymbolTable::operator=(const SymbolTable& other) = default;
SymbolTable& SymbolTable::operator=(SymbolTable&&) = default;
Symbol SymbolTable::Register(const std::string& name) {
- TINT_ASSERT(Symbol, !name.empty());
+ TINT_ASSERT(Symbol, !name.empty());
- auto it = name_to_symbol_.find(name);
- if (it != name_to_symbol_.end())
- return it->second;
+ auto it = name_to_symbol_.find(name);
+ if (it != name_to_symbol_.end()) {
+ return it->second;
+ }
#if TINT_SYMBOL_STORE_DEBUG_NAME
- Symbol sym(next_symbol_, program_id_, name);
+ Symbol sym(next_symbol_, program_id_, name);
#else
- Symbol sym(next_symbol_, program_id_);
+ Symbol sym(next_symbol_, program_id_);
#endif
- ++next_symbol_;
+ ++next_symbol_;
- name_to_symbol_[name] = sym;
- symbol_to_name_[sym] = name;
+ name_to_symbol_[name] = sym;
+ symbol_to_name_[sym] = name;
- return sym;
+ return sym;
}
Symbol SymbolTable::Get(const std::string& name) const {
- auto it = name_to_symbol_.find(name);
- return it != name_to_symbol_.end() ? it->second : Symbol();
+ auto it = name_to_symbol_.find(name);
+ return it != name_to_symbol_.end() ? it->second : Symbol();
}
std::string SymbolTable::NameFor(const Symbol symbol) const {
- TINT_ASSERT_PROGRAM_IDS_EQUAL(Symbol, program_id_, symbol);
- auto it = symbol_to_name_.find(symbol);
- if (it == symbol_to_name_.end()) {
- return symbol.to_str();
- }
+ TINT_ASSERT_PROGRAM_IDS_EQUAL(Symbol, program_id_, symbol);
+ auto it = symbol_to_name_.find(symbol);
+ if (it == symbol_to_name_.end()) {
+ return symbol.to_str();
+ }
- return it->second;
+ return it->second;
}
Symbol SymbolTable::New(std::string prefix /* = "" */) {
- if (prefix.empty()) {
- prefix = "tint_symbol";
- }
- auto it = name_to_symbol_.find(prefix);
- if (it == name_to_symbol_.end()) {
- return Register(prefix);
- }
- std::string name;
- size_t i = 1;
- do {
- name = prefix + "_" + std::to_string(i++);
- } while (name_to_symbol_.count(name));
- return Register(name);
+ if (prefix.empty()) {
+ prefix = "tint_symbol";
+ }
+ auto it = name_to_symbol_.find(prefix);
+ if (it == name_to_symbol_.end()) {
+ return Register(prefix);
+ }
+ std::string name;
+ size_t i = 1;
+ do {
+ name = prefix + "_" + std::to_string(i++);
+ } while (name_to_symbol_.count(name));
+ return Register(name);
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/symbol_table.h b/chromium/third_party/dawn/src/tint/symbol_table.h
index 214916e78f3..e07d4df7cc8 100644
--- a/chromium/third_party/dawn/src/tint/symbol_table.h
+++ b/chromium/third_party/dawn/src/tint/symbol_table.h
@@ -24,76 +24,76 @@ namespace tint {
/// Holds mappings from symbols to their associated string names
class SymbolTable {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this symbol
- /// table
- explicit SymbolTable(tint::ProgramID program_id);
- /// Copy constructor
- SymbolTable(const SymbolTable&);
- /// Move Constructor
- SymbolTable(SymbolTable&&);
- /// Destructor
- ~SymbolTable();
-
- /// Copy assignment
- /// @param other the symbol table to copy
- /// @returns the new symbol table
- SymbolTable& operator=(const SymbolTable& other);
- /// Move assignment
- /// @param other the symbol table to move
- /// @returns the symbol table
- SymbolTable& operator=(SymbolTable&& other);
-
- /// Registers a name into the symbol table, returning the Symbol.
- /// @param name the name to register
- /// @returns the symbol representing the given name
- Symbol Register(const std::string& name);
-
- /// Returns the symbol for the given `name`
- /// @param name the name to lookup
- /// @returns the symbol for the name or symbol::kInvalid if not found.
- Symbol Get(const std::string& name) const;
-
- /// Returns the name for the given symbol
- /// @param symbol the symbol to retrieve the name for
- /// @returns the symbol name or "" if not found
- std::string NameFor(const Symbol symbol) const;
-
- /// Returns a new unique symbol with the given name, possibly suffixed with a
- /// unique number.
- /// @param name the symbol name
- /// @returns a new, unnamed symbol with the given name. If the name is already
- /// taken then this will be suffixed with an underscore and a unique numerical
- /// value
- Symbol New(std::string name = "");
-
- /// Foreach calls the callback function `F` for each symbol in the table.
- /// @param callback must be a function or function-like object with the
- /// signature: `void(Symbol, const std::string&)`
- template <typename F>
- void Foreach(F&& callback) const {
- for (auto it : symbol_to_name_) {
- callback(it.first, it.second);
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this symbol
+ /// table
+ explicit SymbolTable(tint::ProgramID program_id);
+ /// Copy constructor
+ SymbolTable(const SymbolTable&);
+ /// Move Constructor
+ SymbolTable(SymbolTable&&);
+ /// Destructor
+ ~SymbolTable();
+
+ /// Copy assignment
+ /// @param other the symbol table to copy
+ /// @returns the new symbol table
+ SymbolTable& operator=(const SymbolTable& other);
+ /// Move assignment
+ /// @param other the symbol table to move
+ /// @returns the symbol table
+ SymbolTable& operator=(SymbolTable&& other);
+
+ /// Registers a name into the symbol table, returning the Symbol.
+ /// @param name the name to register
+ /// @returns the symbol representing the given name
+ Symbol Register(const std::string& name);
+
+ /// Returns the symbol for the given `name`
+ /// @param name the name to lookup
+ /// @returns the symbol for the name or symbol::kInvalid if not found.
+ Symbol Get(const std::string& name) const;
+
+ /// Returns the name for the given symbol
+ /// @param symbol the symbol to retrieve the name for
+ /// @returns the symbol name or "" if not found
+ std::string NameFor(const Symbol symbol) const;
+
+ /// Returns a new unique symbol with the given name, possibly suffixed with a
+ /// unique number.
+ /// @param name the symbol name
+ /// @returns a new, unnamed symbol with the given name. If the name is already
+ /// taken then this will be suffixed with an underscore and a unique numerical
+ /// value
+ Symbol New(std::string name = "");
+
+ /// Foreach calls the callback function `F` for each symbol in the table.
+ /// @param callback must be a function or function-like object with the
+ /// signature: `void(Symbol, const std::string&)`
+ template <typename F>
+ void Foreach(F&& callback) const {
+ for (auto it : symbol_to_name_) {
+ callback(it.first, it.second);
+ }
}
- }
- /// @returns the identifier of the Program that owns this symbol table.
- tint::ProgramID ProgramID() const { return program_id_; }
+ /// @returns the identifier of the Program that owns this symbol table.
+ tint::ProgramID ProgramID() const { return program_id_; }
- private:
- // The value to be associated to the next registered symbol table entry.
- uint32_t next_symbol_ = 1;
+ private:
+ // The value to be associated to the next registered symbol table entry.
+ uint32_t next_symbol_ = 1;
- std::unordered_map<Symbol, std::string> symbol_to_name_;
- std::unordered_map<std::string, Symbol> name_to_symbol_;
- tint::ProgramID program_id_;
+ std::unordered_map<Symbol, std::string> symbol_to_name_;
+ std::unordered_map<std::string, Symbol> name_to_symbol_;
+ tint::ProgramID program_id_;
};
/// @param symbol_table the SymbolTable
/// @returns the ProgramID that owns the given SymbolTable
inline ProgramID ProgramIDOf(const SymbolTable& symbol_table) {
- return symbol_table.ProgramID();
+ return symbol_table.ProgramID();
}
} // namespace tint
diff --git a/chromium/third_party/dawn/src/tint/symbol_table_test.cc b/chromium/third_party/dawn/src/tint/symbol_table_test.cc
index 0905f8b3ad9..1cf6d1aa9ce 100644
--- a/chromium/third_party/dawn/src/tint/symbol_table_test.cc
+++ b/chromium/third_party/dawn/src/tint/symbol_table_test.cc
@@ -22,41 +22,41 @@ namespace {
using SymbolTableTest = testing::Test;
TEST_F(SymbolTableTest, GeneratesSymbolForName) {
- auto program_id = ProgramID::New();
- SymbolTable s{program_id};
- EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
- EXPECT_EQ(Symbol(2, program_id), s.Register("another_name"));
+ auto program_id = ProgramID::New();
+ SymbolTable s{program_id};
+ EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
+ EXPECT_EQ(Symbol(2, program_id), s.Register("another_name"));
}
TEST_F(SymbolTableTest, DeduplicatesNames) {
- auto program_id = ProgramID::New();
- SymbolTable s{program_id};
- EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
- EXPECT_EQ(Symbol(2, program_id), s.Register("another_name"));
- EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
+ auto program_id = ProgramID::New();
+ SymbolTable s{program_id};
+ EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
+ EXPECT_EQ(Symbol(2, program_id), s.Register("another_name"));
+ EXPECT_EQ(Symbol(1, program_id), s.Register("name"));
}
TEST_F(SymbolTableTest, ReturnsNameForSymbol) {
- auto program_id = ProgramID::New();
- SymbolTable s{program_id};
- auto sym = s.Register("name");
- EXPECT_EQ("name", s.NameFor(sym));
+ auto program_id = ProgramID::New();
+ SymbolTable s{program_id};
+ auto sym = s.Register("name");
+ EXPECT_EQ("name", s.NameFor(sym));
}
TEST_F(SymbolTableTest, ReturnsBlankForMissingSymbol) {
- auto program_id = ProgramID::New();
- SymbolTable s{program_id};
- EXPECT_EQ("$2", s.NameFor(Symbol(2, program_id)));
+ auto program_id = ProgramID::New();
+ SymbolTable s{program_id};
+ EXPECT_EQ("$2", s.NameFor(Symbol(2, program_id)));
}
TEST_F(SymbolTableTest, AssertsForBlankString) {
- EXPECT_FATAL_FAILURE(
- {
- auto program_id = ProgramID::New();
- SymbolTable s{program_id};
- s.Register("");
- },
- "internal compiler error");
+ EXPECT_FATAL_FAILURE(
+ {
+ auto program_id = ProgramID::New();
+ SymbolTable s{program_id};
+ s.Register("");
+ },
+ "internal compiler error");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/symbol_test.cc b/chromium/third_party/dawn/src/tint/symbol_test.cc
index 8e3f6c08b29..22135b56653 100644
--- a/chromium/third_party/dawn/src/tint/symbol_test.cc
+++ b/chromium/third_party/dawn/src/tint/symbol_test.cc
@@ -22,29 +22,29 @@ namespace {
using SymbolTest = testing::Test;
TEST_F(SymbolTest, ToStr) {
- Symbol sym(1, ProgramID::New());
- EXPECT_EQ("$1", sym.to_str());
+ Symbol sym(1, ProgramID::New());
+ EXPECT_EQ("$1", sym.to_str());
}
TEST_F(SymbolTest, CopyAssign) {
- Symbol sym1(1, ProgramID::New());
- Symbol sym2;
+ Symbol sym1(1, ProgramID::New());
+ Symbol sym2;
- EXPECT_FALSE(sym2.IsValid());
- sym2 = sym1;
- EXPECT_TRUE(sym2.IsValid());
- EXPECT_EQ(sym2, sym1);
+ EXPECT_FALSE(sym2.IsValid());
+ sym2 = sym1;
+ EXPECT_TRUE(sym2.IsValid());
+ EXPECT_EQ(sym2, sym1);
}
TEST_F(SymbolTest, Comparison) {
- auto program_id = ProgramID::New();
- Symbol sym1(1, program_id);
- Symbol sym2(2, program_id);
- Symbol sym3(1, program_id);
-
- EXPECT_TRUE(sym1 == sym3);
- EXPECT_FALSE(sym1 == sym2);
- EXPECT_FALSE(sym3 == sym2);
+ auto program_id = ProgramID::New();
+ Symbol sym1(1, program_id);
+ Symbol sym2(2, program_id);
+ Symbol sym3(1, program_id);
+
+ EXPECT_TRUE(sym1 == sym3);
+ EXPECT_FALSE(sym1 == sym2);
+ EXPECT_FALSE(sym3 == sym2);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/test_main.cc b/chromium/third_party/dawn/src/tint/test_main.cc
index d44f9c8ea5b..ca68271587c 100644
--- a/chromium/third_party/dawn/src/tint/test_main.cc
+++ b/chromium/third_party/dawn/src/tint/test_main.cc
@@ -26,58 +26,58 @@
namespace {
void TintInternalCompilerErrorReporter(const tint::diag::List& diagnostics) {
- FAIL() << diagnostics.str();
+ FAIL() << diagnostics.str();
}
struct Flags {
- bool spirv_reader_dump_converted = false;
-
- bool parse(int argc, char** argv) {
- bool errored = false;
- for (int i = 1; i < argc && !errored; i++) {
- auto match = [&](std::string name) { return name == argv[i]; };
-
- if (match("--dump-spirv")) {
- spirv_reader_dump_converted = true;
- } else {
- std::cout << "Unknown flag '" << argv[i] << "'" << std::endl;
- return false;
- }
+ bool spirv_reader_dump_converted = false;
+
+ bool parse(int argc, char** argv) {
+ bool errored = false;
+ for (int i = 1; i < argc && !errored; i++) {
+ auto match = [&](std::string name) { return name == argv[i]; };
+
+ if (match("--dump-spirv")) {
+ spirv_reader_dump_converted = true;
+ } else {
+ std::cout << "Unknown flag '" << argv[i] << "'" << std::endl;
+ return false;
+ }
+ }
+ return true;
}
- return true;
- }
};
} // namespace
// Entry point for tint unit tests
int main(int argc, char** argv) {
- testing::InitGoogleMock(&argc, argv);
+ testing::InitGoogleMock(&argc, argv);
#if TINT_BUILD_WGSL_WRITER
- tint::Program::printer = [](const tint::Program* program) {
- auto result = tint::writer::wgsl::Generate(program, {});
- if (!result.error.empty()) {
- return "error: " + result.error;
- }
- return result.wgsl;
- };
+ tint::Program::printer = [](const tint::Program* program) {
+ auto result = tint::writer::wgsl::Generate(program, {});
+ if (!result.error.empty()) {
+ return "error: " + result.error;
+ }
+ return result.wgsl;
+ };
#endif // TINT_BUILD_WGSL_WRITER
- Flags flags;
- if (!flags.parse(argc, argv)) {
- return -1;
- }
+ Flags flags;
+ if (!flags.parse(argc, argv)) {
+ return -1;
+ }
#if TINT_BUILD_SPV_READER
- if (flags.spirv_reader_dump_converted) {
- tint::reader::spirv::test::DumpSuccessfullyConvertedSpirv();
- }
+ if (flags.spirv_reader_dump_converted) {
+ tint::reader::spirv::test::DumpSuccessfullyConvertedSpirv();
+ }
#endif // TINT_BUILD_SPV_READER
- tint::SetInternalCompilerErrorReporter(&TintInternalCompilerErrorReporter);
+ tint::SetInternalCompilerErrorReporter(&TintInternalCompilerErrorReporter);
- auto res = RUN_ALL_TESTS();
+ auto res = RUN_ALL_TESTS();
- return res;
+ return res;
}
diff --git a/chromium/third_party/dawn/src/tint/text/unicode.cc b/chromium/third_party/dawn/src/tint/text/unicode.cc
index b48f295e249..bf28c4e5bcc 100644
--- a/chromium/third_party/dawn/src/tint/text/unicode.cc
+++ b/chromium/third_party/dawn/src/tint/text/unicode.cc
@@ -20,493 +20,404 @@ namespace tint::text {
namespace {
struct CodePointRange {
- uint32_t first; // First code point in the interval
- uint32_t last; // Last code point in the interval (inclusive)
+ uint32_t first; // First code point in the interval
+ uint32_t last; // Last code point in the interval (inclusive)
};
inline bool operator<(CodePoint code_point, CodePointRange range) {
- return code_point < range.first;
+ return code_point < range.first;
}
inline bool operator<(CodePointRange range, CodePoint code_point) {
- return range.last < code_point;
+ return range.last < code_point;
}
// Interval ranges of all code points in the Unicode 14 XID_Start set
// This array needs to be in ascending order.
constexpr CodePointRange kXIDStartRanges[] = {
- {0x00041, 0x0005a}, {0x00061, 0x0007a}, {0x000aa, 0x000aa},
- {0x000b5, 0x000b5}, {0x000ba, 0x000ba}, {0x000c0, 0x000d6},
- {0x000d8, 0x000f6}, {0x000f8, 0x002c1}, {0x002c6, 0x002d1},
- {0x002e0, 0x002e4}, {0x002ec, 0x002ec}, {0x002ee, 0x002ee},
- {0x00370, 0x00374}, {0x00376, 0x00377}, {0x0037b, 0x0037d},
- {0x0037f, 0x0037f}, {0x00386, 0x00386}, {0x00388, 0x0038a},
- {0x0038c, 0x0038c}, {0x0038e, 0x003a1}, {0x003a3, 0x003f5},
- {0x003f7, 0x00481}, {0x0048a, 0x0052f}, {0x00531, 0x00556},
- {0x00559, 0x00559}, {0x00560, 0x00588}, {0x005d0, 0x005ea},
- {0x005ef, 0x005f2}, {0x00620, 0x0064a}, {0x0066e, 0x0066f},
- {0x00671, 0x006d3}, {0x006d5, 0x006d5}, {0x006e5, 0x006e6},
- {0x006ee, 0x006ef}, {0x006fa, 0x006fc}, {0x006ff, 0x006ff},
- {0x00710, 0x00710}, {0x00712, 0x0072f}, {0x0074d, 0x007a5},
- {0x007b1, 0x007b1}, {0x007ca, 0x007ea}, {0x007f4, 0x007f5},
- {0x007fa, 0x007fa}, {0x00800, 0x00815}, {0x0081a, 0x0081a},
- {0x00824, 0x00824}, {0x00828, 0x00828}, {0x00840, 0x00858},
- {0x00860, 0x0086a}, {0x00870, 0x00887}, {0x00889, 0x0088e},
- {0x008a0, 0x008c9}, {0x00904, 0x00939}, {0x0093d, 0x0093d},
- {0x00950, 0x00950}, {0x00958, 0x00961}, {0x00971, 0x00980},
- {0x00985, 0x0098c}, {0x0098f, 0x00990}, {0x00993, 0x009a8},
- {0x009aa, 0x009b0}, {0x009b2, 0x009b2}, {0x009b6, 0x009b9},
- {0x009bd, 0x009bd}, {0x009ce, 0x009ce}, {0x009dc, 0x009dd},
- {0x009df, 0x009e1}, {0x009f0, 0x009f1}, {0x009fc, 0x009fc},
- {0x00a05, 0x00a0a}, {0x00a0f, 0x00a10}, {0x00a13, 0x00a28},
- {0x00a2a, 0x00a30}, {0x00a32, 0x00a33}, {0x00a35, 0x00a36},
- {0x00a38, 0x00a39}, {0x00a59, 0x00a5c}, {0x00a5e, 0x00a5e},
- {0x00a72, 0x00a74}, {0x00a85, 0x00a8d}, {0x00a8f, 0x00a91},
- {0x00a93, 0x00aa8}, {0x00aaa, 0x00ab0}, {0x00ab2, 0x00ab3},
- {0x00ab5, 0x00ab9}, {0x00abd, 0x00abd}, {0x00ad0, 0x00ad0},
- {0x00ae0, 0x00ae1}, {0x00af9, 0x00af9}, {0x00b05, 0x00b0c},
- {0x00b0f, 0x00b10}, {0x00b13, 0x00b28}, {0x00b2a, 0x00b30},
- {0x00b32, 0x00b33}, {0x00b35, 0x00b39}, {0x00b3d, 0x00b3d},
- {0x00b5c, 0x00b5d}, {0x00b5f, 0x00b61}, {0x00b71, 0x00b71},
- {0x00b83, 0x00b83}, {0x00b85, 0x00b8a}, {0x00b8e, 0x00b90},
- {0x00b92, 0x00b95}, {0x00b99, 0x00b9a}, {0x00b9c, 0x00b9c},
- {0x00b9e, 0x00b9f}, {0x00ba3, 0x00ba4}, {0x00ba8, 0x00baa},
- {0x00bae, 0x00bb9}, {0x00bd0, 0x00bd0}, {0x00c05, 0x00c0c},
- {0x00c0e, 0x00c10}, {0x00c12, 0x00c28}, {0x00c2a, 0x00c39},
- {0x00c3d, 0x00c3d}, {0x00c58, 0x00c5a}, {0x00c5d, 0x00c5d},
- {0x00c60, 0x00c61}, {0x00c80, 0x00c80}, {0x00c85, 0x00c8c},
- {0x00c8e, 0x00c90}, {0x00c92, 0x00ca8}, {0x00caa, 0x00cb3},
- {0x00cb5, 0x00cb9}, {0x00cbd, 0x00cbd}, {0x00cdd, 0x00cde},
- {0x00ce0, 0x00ce1}, {0x00cf1, 0x00cf2}, {0x00d04, 0x00d0c},
- {0x00d0e, 0x00d10}, {0x00d12, 0x00d3a}, {0x00d3d, 0x00d3d},
- {0x00d4e, 0x00d4e}, {0x00d54, 0x00d56}, {0x00d5f, 0x00d61},
- {0x00d7a, 0x00d7f}, {0x00d85, 0x00d96}, {0x00d9a, 0x00db1},
- {0x00db3, 0x00dbb}, {0x00dbd, 0x00dbd}, {0x00dc0, 0x00dc6},
- {0x00e01, 0x00e30}, {0x00e32, 0x00e32}, {0x00e40, 0x00e46},
- {0x00e81, 0x00e82}, {0x00e84, 0x00e84}, {0x00e86, 0x00e8a},
- {0x00e8c, 0x00ea3}, {0x00ea5, 0x00ea5}, {0x00ea7, 0x00eb0},
- {0x00eb2, 0x00eb2}, {0x00ebd, 0x00ebd}, {0x00ec0, 0x00ec4},
- {0x00ec6, 0x00ec6}, {0x00edc, 0x00edf}, {0x00f00, 0x00f00},
- {0x00f40, 0x00f47}, {0x00f49, 0x00f6c}, {0x00f88, 0x00f8c},
- {0x01000, 0x0102a}, {0x0103f, 0x0103f}, {0x01050, 0x01055},
- {0x0105a, 0x0105d}, {0x01061, 0x01061}, {0x01065, 0x01066},
- {0x0106e, 0x01070}, {0x01075, 0x01081}, {0x0108e, 0x0108e},
- {0x010a0, 0x010c5}, {0x010c7, 0x010c7}, {0x010cd, 0x010cd},
- {0x010d0, 0x010fa}, {0x010fc, 0x01248}, {0x0124a, 0x0124d},
- {0x01250, 0x01256}, {0x01258, 0x01258}, {0x0125a, 0x0125d},
- {0x01260, 0x01288}, {0x0128a, 0x0128d}, {0x01290, 0x012b0},
- {0x012b2, 0x012b5}, {0x012b8, 0x012be}, {0x012c0, 0x012c0},
- {0x012c2, 0x012c5}, {0x012c8, 0x012d6}, {0x012d8, 0x01310},
- {0x01312, 0x01315}, {0x01318, 0x0135a}, {0x01380, 0x0138f},
- {0x013a0, 0x013f5}, {0x013f8, 0x013fd}, {0x01401, 0x0166c},
- {0x0166f, 0x0167f}, {0x01681, 0x0169a}, {0x016a0, 0x016ea},
- {0x016ee, 0x016f8}, {0x01700, 0x01711}, {0x0171f, 0x01731},
- {0x01740, 0x01751}, {0x01760, 0x0176c}, {0x0176e, 0x01770},
- {0x01780, 0x017b3}, {0x017d7, 0x017d7}, {0x017dc, 0x017dc},
- {0x01820, 0x01878}, {0x01880, 0x018a8}, {0x018aa, 0x018aa},
- {0x018b0, 0x018f5}, {0x01900, 0x0191e}, {0x01950, 0x0196d},
- {0x01970, 0x01974}, {0x01980, 0x019ab}, {0x019b0, 0x019c9},
- {0x01a00, 0x01a16}, {0x01a20, 0x01a54}, {0x01aa7, 0x01aa7},
- {0x01b05, 0x01b33}, {0x01b45, 0x01b4c}, {0x01b83, 0x01ba0},
- {0x01bae, 0x01baf}, {0x01bba, 0x01be5}, {0x01c00, 0x01c23},
- {0x01c4d, 0x01c4f}, {0x01c5a, 0x01c7d}, {0x01c80, 0x01c88},
- {0x01c90, 0x01cba}, {0x01cbd, 0x01cbf}, {0x01ce9, 0x01cec},
- {0x01cee, 0x01cf3}, {0x01cf5, 0x01cf6}, {0x01cfa, 0x01cfa},
- {0x01d00, 0x01dbf}, {0x01e00, 0x01f15}, {0x01f18, 0x01f1d},
- {0x01f20, 0x01f45}, {0x01f48, 0x01f4d}, {0x01f50, 0x01f57},
- {0x01f59, 0x01f59}, {0x01f5b, 0x01f5b}, {0x01f5d, 0x01f5d},
- {0x01f5f, 0x01f7d}, {0x01f80, 0x01fb4}, {0x01fb6, 0x01fbc},
- {0x01fbe, 0x01fbe}, {0x01fc2, 0x01fc4}, {0x01fc6, 0x01fcc},
- {0x01fd0, 0x01fd3}, {0x01fd6, 0x01fdb}, {0x01fe0, 0x01fec},
- {0x01ff2, 0x01ff4}, {0x01ff6, 0x01ffc}, {0x02071, 0x02071},
- {0x0207f, 0x0207f}, {0x02090, 0x0209c}, {0x02102, 0x02102},
- {0x02107, 0x02107}, {0x0210a, 0x02113}, {0x02115, 0x02115},
- {0x02118, 0x0211d}, {0x02124, 0x02124}, {0x02126, 0x02126},
- {0x02128, 0x02128}, {0x0212a, 0x02139}, {0x0213c, 0x0213f},
- {0x02145, 0x02149}, {0x0214e, 0x0214e}, {0x02160, 0x02188},
- {0x02c00, 0x02ce4}, {0x02ceb, 0x02cee}, {0x02cf2, 0x02cf3},
- {0x02d00, 0x02d25}, {0x02d27, 0x02d27}, {0x02d2d, 0x02d2d},
- {0x02d30, 0x02d67}, {0x02d6f, 0x02d6f}, {0x02d80, 0x02d96},
- {0x02da0, 0x02da6}, {0x02da8, 0x02dae}, {0x02db0, 0x02db6},
- {0x02db8, 0x02dbe}, {0x02dc0, 0x02dc6}, {0x02dc8, 0x02dce},
- {0x02dd0, 0x02dd6}, {0x02dd8, 0x02dde}, {0x03005, 0x03007},
- {0x03021, 0x03029}, {0x03031, 0x03035}, {0x03038, 0x0303c},
- {0x03041, 0x03096}, {0x0309d, 0x0309f}, {0x030a1, 0x030fa},
- {0x030fc, 0x030ff}, {0x03105, 0x0312f}, {0x03131, 0x0318e},
- {0x031a0, 0x031bf}, {0x031f0, 0x031ff}, {0x03400, 0x04dbf},
- {0x04e00, 0x0a48c}, {0x0a4d0, 0x0a4fd}, {0x0a500, 0x0a60c},
- {0x0a610, 0x0a61f}, {0x0a62a, 0x0a62b}, {0x0a640, 0x0a66e},
- {0x0a67f, 0x0a69d}, {0x0a6a0, 0x0a6ef}, {0x0a717, 0x0a71f},
- {0x0a722, 0x0a788}, {0x0a78b, 0x0a7ca}, {0x0a7d0, 0x0a7d1},
- {0x0a7d3, 0x0a7d3}, {0x0a7d5, 0x0a7d9}, {0x0a7f2, 0x0a801},
- {0x0a803, 0x0a805}, {0x0a807, 0x0a80a}, {0x0a80c, 0x0a822},
- {0x0a840, 0x0a873}, {0x0a882, 0x0a8b3}, {0x0a8f2, 0x0a8f7},
- {0x0a8fb, 0x0a8fb}, {0x0a8fd, 0x0a8fe}, {0x0a90a, 0x0a925},
- {0x0a930, 0x0a946}, {0x0a960, 0x0a97c}, {0x0a984, 0x0a9b2},
- {0x0a9cf, 0x0a9cf}, {0x0a9e0, 0x0a9e4}, {0x0a9e6, 0x0a9ef},
- {0x0a9fa, 0x0a9fe}, {0x0aa00, 0x0aa28}, {0x0aa40, 0x0aa42},
- {0x0aa44, 0x0aa4b}, {0x0aa60, 0x0aa76}, {0x0aa7a, 0x0aa7a},
- {0x0aa7e, 0x0aaaf}, {0x0aab1, 0x0aab1}, {0x0aab5, 0x0aab6},
- {0x0aab9, 0x0aabd}, {0x0aac0, 0x0aac0}, {0x0aac2, 0x0aac2},
- {0x0aadb, 0x0aadd}, {0x0aae0, 0x0aaea}, {0x0aaf2, 0x0aaf4},
- {0x0ab01, 0x0ab06}, {0x0ab09, 0x0ab0e}, {0x0ab11, 0x0ab16},
- {0x0ab20, 0x0ab26}, {0x0ab28, 0x0ab2e}, {0x0ab30, 0x0ab5a},
- {0x0ab5c, 0x0ab69}, {0x0ab70, 0x0abe2}, {0x0ac00, 0x0d7a3},
- {0x0d7b0, 0x0d7c6}, {0x0d7cb, 0x0d7fb}, {0x0f900, 0x0fa6d},
- {0x0fa70, 0x0fad9}, {0x0fb00, 0x0fb06}, {0x0fb13, 0x0fb17},
- {0x0fb1d, 0x0fb1d}, {0x0fb1f, 0x0fb28}, {0x0fb2a, 0x0fb36},
- {0x0fb38, 0x0fb3c}, {0x0fb3e, 0x0fb3e}, {0x0fb40, 0x0fb41},
- {0x0fb43, 0x0fb44}, {0x0fb46, 0x0fbb1}, {0x0fbd3, 0x0fc5d},
- {0x0fc64, 0x0fd3d}, {0x0fd50, 0x0fd8f}, {0x0fd92, 0x0fdc7},
- {0x0fdf0, 0x0fdf9}, {0x0fe71, 0x0fe71}, {0x0fe73, 0x0fe73},
- {0x0fe77, 0x0fe77}, {0x0fe79, 0x0fe79}, {0x0fe7b, 0x0fe7b},
- {0x0fe7d, 0x0fe7d}, {0x0fe7f, 0x0fefc}, {0x0ff21, 0x0ff3a},
- {0x0ff41, 0x0ff5a}, {0x0ff66, 0x0ff9d}, {0x0ffa0, 0x0ffbe},
- {0x0ffc2, 0x0ffc7}, {0x0ffca, 0x0ffcf}, {0x0ffd2, 0x0ffd7},
- {0x0ffda, 0x0ffdc}, {0x10000, 0x1000b}, {0x1000d, 0x10026},
- {0x10028, 0x1003a}, {0x1003c, 0x1003d}, {0x1003f, 0x1004d},
- {0x10050, 0x1005d}, {0x10080, 0x100fa}, {0x10140, 0x10174},
- {0x10280, 0x1029c}, {0x102a0, 0x102d0}, {0x10300, 0x1031f},
- {0x1032d, 0x1034a}, {0x10350, 0x10375}, {0x10380, 0x1039d},
- {0x103a0, 0x103c3}, {0x103c8, 0x103cf}, {0x103d1, 0x103d5},
- {0x10400, 0x1049d}, {0x104b0, 0x104d3}, {0x104d8, 0x104fb},
- {0x10500, 0x10527}, {0x10530, 0x10563}, {0x10570, 0x1057a},
- {0x1057c, 0x1058a}, {0x1058c, 0x10592}, {0x10594, 0x10595},
- {0x10597, 0x105a1}, {0x105a3, 0x105b1}, {0x105b3, 0x105b9},
- {0x105bb, 0x105bc}, {0x10600, 0x10736}, {0x10740, 0x10755},
- {0x10760, 0x10767}, {0x10780, 0x10785}, {0x10787, 0x107b0},
- {0x107b2, 0x107ba}, {0x10800, 0x10805}, {0x10808, 0x10808},
- {0x1080a, 0x10835}, {0x10837, 0x10838}, {0x1083c, 0x1083c},
- {0x1083f, 0x10855}, {0x10860, 0x10876}, {0x10880, 0x1089e},
- {0x108e0, 0x108f2}, {0x108f4, 0x108f5}, {0x10900, 0x10915},
- {0x10920, 0x10939}, {0x10980, 0x109b7}, {0x109be, 0x109bf},
- {0x10a00, 0x10a00}, {0x10a10, 0x10a13}, {0x10a15, 0x10a17},
- {0x10a19, 0x10a35}, {0x10a60, 0x10a7c}, {0x10a80, 0x10a9c},
- {0x10ac0, 0x10ac7}, {0x10ac9, 0x10ae4}, {0x10b00, 0x10b35},
- {0x10b40, 0x10b55}, {0x10b60, 0x10b72}, {0x10b80, 0x10b91},
- {0x10c00, 0x10c48}, {0x10c80, 0x10cb2}, {0x10cc0, 0x10cf2},
- {0x10d00, 0x10d23}, {0x10e80, 0x10ea9}, {0x10eb0, 0x10eb1},
- {0x10f00, 0x10f1c}, {0x10f27, 0x10f27}, {0x10f30, 0x10f45},
- {0x10f70, 0x10f81}, {0x10fb0, 0x10fc4}, {0x10fe0, 0x10ff6},
- {0x11003, 0x11037}, {0x11071, 0x11072}, {0x11075, 0x11075},
- {0x11083, 0x110af}, {0x110d0, 0x110e8}, {0x11103, 0x11126},
- {0x11144, 0x11144}, {0x11147, 0x11147}, {0x11150, 0x11172},
- {0x11176, 0x11176}, {0x11183, 0x111b2}, {0x111c1, 0x111c4},
- {0x111da, 0x111da}, {0x111dc, 0x111dc}, {0x11200, 0x11211},
- {0x11213, 0x1122b}, {0x11280, 0x11286}, {0x11288, 0x11288},
- {0x1128a, 0x1128d}, {0x1128f, 0x1129d}, {0x1129f, 0x112a8},
- {0x112b0, 0x112de}, {0x11305, 0x1130c}, {0x1130f, 0x11310},
- {0x11313, 0x11328}, {0x1132a, 0x11330}, {0x11332, 0x11333},
- {0x11335, 0x11339}, {0x1133d, 0x1133d}, {0x11350, 0x11350},
- {0x1135d, 0x11361}, {0x11400, 0x11434}, {0x11447, 0x1144a},
- {0x1145f, 0x11461}, {0x11480, 0x114af}, {0x114c4, 0x114c5},
- {0x114c7, 0x114c7}, {0x11580, 0x115ae}, {0x115d8, 0x115db},
- {0x11600, 0x1162f}, {0x11644, 0x11644}, {0x11680, 0x116aa},
- {0x116b8, 0x116b8}, {0x11700, 0x1171a}, {0x11740, 0x11746},
- {0x11800, 0x1182b}, {0x118a0, 0x118df}, {0x118ff, 0x11906},
- {0x11909, 0x11909}, {0x1190c, 0x11913}, {0x11915, 0x11916},
- {0x11918, 0x1192f}, {0x1193f, 0x1193f}, {0x11941, 0x11941},
- {0x119a0, 0x119a7}, {0x119aa, 0x119d0}, {0x119e1, 0x119e1},
- {0x119e3, 0x119e3}, {0x11a00, 0x11a00}, {0x11a0b, 0x11a32},
- {0x11a3a, 0x11a3a}, {0x11a50, 0x11a50}, {0x11a5c, 0x11a89},
- {0x11a9d, 0x11a9d}, {0x11ab0, 0x11af8}, {0x11c00, 0x11c08},
- {0x11c0a, 0x11c2e}, {0x11c40, 0x11c40}, {0x11c72, 0x11c8f},
- {0x11d00, 0x11d06}, {0x11d08, 0x11d09}, {0x11d0b, 0x11d30},
- {0x11d46, 0x11d46}, {0x11d60, 0x11d65}, {0x11d67, 0x11d68},
- {0x11d6a, 0x11d89}, {0x11d98, 0x11d98}, {0x11ee0, 0x11ef2},
- {0x11fb0, 0x11fb0}, {0x12000, 0x12399}, {0x12400, 0x1246e},
- {0x12480, 0x12543}, {0x12f90, 0x12ff0}, {0x13000, 0x1342e},
- {0x14400, 0x14646}, {0x16800, 0x16a38}, {0x16a40, 0x16a5e},
- {0x16a70, 0x16abe}, {0x16ad0, 0x16aed}, {0x16b00, 0x16b2f},
- {0x16b40, 0x16b43}, {0x16b63, 0x16b77}, {0x16b7d, 0x16b8f},
- {0x16e40, 0x16e7f}, {0x16f00, 0x16f4a}, {0x16f50, 0x16f50},
- {0x16f93, 0x16f9f}, {0x16fe0, 0x16fe1}, {0x16fe3, 0x16fe3},
- {0x17000, 0x187f7}, {0x18800, 0x18cd5}, {0x18d00, 0x18d08},
- {0x1aff0, 0x1aff3}, {0x1aff5, 0x1affb}, {0x1affd, 0x1affe},
- {0x1b000, 0x1b122}, {0x1b150, 0x1b152}, {0x1b164, 0x1b167},
- {0x1b170, 0x1b2fb}, {0x1bc00, 0x1bc6a}, {0x1bc70, 0x1bc7c},
- {0x1bc80, 0x1bc88}, {0x1bc90, 0x1bc99}, {0x1d400, 0x1d454},
- {0x1d456, 0x1d49c}, {0x1d49e, 0x1d49f}, {0x1d4a2, 0x1d4a2},
- {0x1d4a5, 0x1d4a6}, {0x1d4a9, 0x1d4ac}, {0x1d4ae, 0x1d4b9},
- {0x1d4bb, 0x1d4bb}, {0x1d4bd, 0x1d4c3}, {0x1d4c5, 0x1d505},
- {0x1d507, 0x1d50a}, {0x1d50d, 0x1d514}, {0x1d516, 0x1d51c},
- {0x1d51e, 0x1d539}, {0x1d53b, 0x1d53e}, {0x1d540, 0x1d544},
- {0x1d546, 0x1d546}, {0x1d54a, 0x1d550}, {0x1d552, 0x1d6a5},
- {0x1d6a8, 0x1d6c0}, {0x1d6c2, 0x1d6da}, {0x1d6dc, 0x1d6fa},
- {0x1d6fc, 0x1d714}, {0x1d716, 0x1d734}, {0x1d736, 0x1d74e},
- {0x1d750, 0x1d76e}, {0x1d770, 0x1d788}, {0x1d78a, 0x1d7a8},
- {0x1d7aa, 0x1d7c2}, {0x1d7c4, 0x1d7cb}, {0x1df00, 0x1df1e},
- {0x1e100, 0x1e12c}, {0x1e137, 0x1e13d}, {0x1e14e, 0x1e14e},
- {0x1e290, 0x1e2ad}, {0x1e2c0, 0x1e2eb}, {0x1e7e0, 0x1e7e6},
- {0x1e7e8, 0x1e7eb}, {0x1e7ed, 0x1e7ee}, {0x1e7f0, 0x1e7fe},
- {0x1e800, 0x1e8c4}, {0x1e900, 0x1e943}, {0x1e94b, 0x1e94b},
- {0x1ee00, 0x1ee03}, {0x1ee05, 0x1ee1f}, {0x1ee21, 0x1ee22},
- {0x1ee24, 0x1ee24}, {0x1ee27, 0x1ee27}, {0x1ee29, 0x1ee32},
- {0x1ee34, 0x1ee37}, {0x1ee39, 0x1ee39}, {0x1ee3b, 0x1ee3b},
- {0x1ee42, 0x1ee42}, {0x1ee47, 0x1ee47}, {0x1ee49, 0x1ee49},
- {0x1ee4b, 0x1ee4b}, {0x1ee4d, 0x1ee4f}, {0x1ee51, 0x1ee52},
- {0x1ee54, 0x1ee54}, {0x1ee57, 0x1ee57}, {0x1ee59, 0x1ee59},
- {0x1ee5b, 0x1ee5b}, {0x1ee5d, 0x1ee5d}, {0x1ee5f, 0x1ee5f},
- {0x1ee61, 0x1ee62}, {0x1ee64, 0x1ee64}, {0x1ee67, 0x1ee6a},
- {0x1ee6c, 0x1ee72}, {0x1ee74, 0x1ee77}, {0x1ee79, 0x1ee7c},
- {0x1ee7e, 0x1ee7e}, {0x1ee80, 0x1ee89}, {0x1ee8b, 0x1ee9b},
- {0x1eea1, 0x1eea3}, {0x1eea5, 0x1eea9}, {0x1eeab, 0x1eebb},
- {0x20000, 0x2a6df}, {0x2a700, 0x2b738}, {0x2b740, 0x2b81d},
- {0x2b820, 0x2cea1}, {0x2ceb0, 0x2ebe0}, {0x2f800, 0x2fa1d},
- {0x30000, 0x3134a},
+ {0x00041, 0x0005a}, {0x00061, 0x0007a}, {0x000aa, 0x000aa}, {0x000b5, 0x000b5},
+ {0x000ba, 0x000ba}, {0x000c0, 0x000d6}, {0x000d8, 0x000f6}, {0x000f8, 0x002c1},
+ {0x002c6, 0x002d1}, {0x002e0, 0x002e4}, {0x002ec, 0x002ec}, {0x002ee, 0x002ee},
+ {0x00370, 0x00374}, {0x00376, 0x00377}, {0x0037b, 0x0037d}, {0x0037f, 0x0037f},
+ {0x00386, 0x00386}, {0x00388, 0x0038a}, {0x0038c, 0x0038c}, {0x0038e, 0x003a1},
+ {0x003a3, 0x003f5}, {0x003f7, 0x00481}, {0x0048a, 0x0052f}, {0x00531, 0x00556},
+ {0x00559, 0x00559}, {0x00560, 0x00588}, {0x005d0, 0x005ea}, {0x005ef, 0x005f2},
+ {0x00620, 0x0064a}, {0x0066e, 0x0066f}, {0x00671, 0x006d3}, {0x006d5, 0x006d5},
+ {0x006e5, 0x006e6}, {0x006ee, 0x006ef}, {0x006fa, 0x006fc}, {0x006ff, 0x006ff},
+ {0x00710, 0x00710}, {0x00712, 0x0072f}, {0x0074d, 0x007a5}, {0x007b1, 0x007b1},
+ {0x007ca, 0x007ea}, {0x007f4, 0x007f5}, {0x007fa, 0x007fa}, {0x00800, 0x00815},
+ {0x0081a, 0x0081a}, {0x00824, 0x00824}, {0x00828, 0x00828}, {0x00840, 0x00858},
+ {0x00860, 0x0086a}, {0x00870, 0x00887}, {0x00889, 0x0088e}, {0x008a0, 0x008c9},
+ {0x00904, 0x00939}, {0x0093d, 0x0093d}, {0x00950, 0x00950}, {0x00958, 0x00961},
+ {0x00971, 0x00980}, {0x00985, 0x0098c}, {0x0098f, 0x00990}, {0x00993, 0x009a8},
+ {0x009aa, 0x009b0}, {0x009b2, 0x009b2}, {0x009b6, 0x009b9}, {0x009bd, 0x009bd},
+ {0x009ce, 0x009ce}, {0x009dc, 0x009dd}, {0x009df, 0x009e1}, {0x009f0, 0x009f1},
+ {0x009fc, 0x009fc}, {0x00a05, 0x00a0a}, {0x00a0f, 0x00a10}, {0x00a13, 0x00a28},
+ {0x00a2a, 0x00a30}, {0x00a32, 0x00a33}, {0x00a35, 0x00a36}, {0x00a38, 0x00a39},
+ {0x00a59, 0x00a5c}, {0x00a5e, 0x00a5e}, {0x00a72, 0x00a74}, {0x00a85, 0x00a8d},
+ {0x00a8f, 0x00a91}, {0x00a93, 0x00aa8}, {0x00aaa, 0x00ab0}, {0x00ab2, 0x00ab3},
+ {0x00ab5, 0x00ab9}, {0x00abd, 0x00abd}, {0x00ad0, 0x00ad0}, {0x00ae0, 0x00ae1},
+ {0x00af9, 0x00af9}, {0x00b05, 0x00b0c}, {0x00b0f, 0x00b10}, {0x00b13, 0x00b28},
+ {0x00b2a, 0x00b30}, {0x00b32, 0x00b33}, {0x00b35, 0x00b39}, {0x00b3d, 0x00b3d},
+ {0x00b5c, 0x00b5d}, {0x00b5f, 0x00b61}, {0x00b71, 0x00b71}, {0x00b83, 0x00b83},
+ {0x00b85, 0x00b8a}, {0x00b8e, 0x00b90}, {0x00b92, 0x00b95}, {0x00b99, 0x00b9a},
+ {0x00b9c, 0x00b9c}, {0x00b9e, 0x00b9f}, {0x00ba3, 0x00ba4}, {0x00ba8, 0x00baa},
+ {0x00bae, 0x00bb9}, {0x00bd0, 0x00bd0}, {0x00c05, 0x00c0c}, {0x00c0e, 0x00c10},
+ {0x00c12, 0x00c28}, {0x00c2a, 0x00c39}, {0x00c3d, 0x00c3d}, {0x00c58, 0x00c5a},
+ {0x00c5d, 0x00c5d}, {0x00c60, 0x00c61}, {0x00c80, 0x00c80}, {0x00c85, 0x00c8c},
+ {0x00c8e, 0x00c90}, {0x00c92, 0x00ca8}, {0x00caa, 0x00cb3}, {0x00cb5, 0x00cb9},
+ {0x00cbd, 0x00cbd}, {0x00cdd, 0x00cde}, {0x00ce0, 0x00ce1}, {0x00cf1, 0x00cf2},
+ {0x00d04, 0x00d0c}, {0x00d0e, 0x00d10}, {0x00d12, 0x00d3a}, {0x00d3d, 0x00d3d},
+ {0x00d4e, 0x00d4e}, {0x00d54, 0x00d56}, {0x00d5f, 0x00d61}, {0x00d7a, 0x00d7f},
+ {0x00d85, 0x00d96}, {0x00d9a, 0x00db1}, {0x00db3, 0x00dbb}, {0x00dbd, 0x00dbd},
+ {0x00dc0, 0x00dc6}, {0x00e01, 0x00e30}, {0x00e32, 0x00e32}, {0x00e40, 0x00e46},
+ {0x00e81, 0x00e82}, {0x00e84, 0x00e84}, {0x00e86, 0x00e8a}, {0x00e8c, 0x00ea3},
+ {0x00ea5, 0x00ea5}, {0x00ea7, 0x00eb0}, {0x00eb2, 0x00eb2}, {0x00ebd, 0x00ebd},
+ {0x00ec0, 0x00ec4}, {0x00ec6, 0x00ec6}, {0x00edc, 0x00edf}, {0x00f00, 0x00f00},
+ {0x00f40, 0x00f47}, {0x00f49, 0x00f6c}, {0x00f88, 0x00f8c}, {0x01000, 0x0102a},
+ {0x0103f, 0x0103f}, {0x01050, 0x01055}, {0x0105a, 0x0105d}, {0x01061, 0x01061},
+ {0x01065, 0x01066}, {0x0106e, 0x01070}, {0x01075, 0x01081}, {0x0108e, 0x0108e},
+ {0x010a0, 0x010c5}, {0x010c7, 0x010c7}, {0x010cd, 0x010cd}, {0x010d0, 0x010fa},
+ {0x010fc, 0x01248}, {0x0124a, 0x0124d}, {0x01250, 0x01256}, {0x01258, 0x01258},
+ {0x0125a, 0x0125d}, {0x01260, 0x01288}, {0x0128a, 0x0128d}, {0x01290, 0x012b0},
+ {0x012b2, 0x012b5}, {0x012b8, 0x012be}, {0x012c0, 0x012c0}, {0x012c2, 0x012c5},
+ {0x012c8, 0x012d6}, {0x012d8, 0x01310}, {0x01312, 0x01315}, {0x01318, 0x0135a},
+ {0x01380, 0x0138f}, {0x013a0, 0x013f5}, {0x013f8, 0x013fd}, {0x01401, 0x0166c},
+ {0x0166f, 0x0167f}, {0x01681, 0x0169a}, {0x016a0, 0x016ea}, {0x016ee, 0x016f8},
+ {0x01700, 0x01711}, {0x0171f, 0x01731}, {0x01740, 0x01751}, {0x01760, 0x0176c},
+ {0x0176e, 0x01770}, {0x01780, 0x017b3}, {0x017d7, 0x017d7}, {0x017dc, 0x017dc},
+ {0x01820, 0x01878}, {0x01880, 0x018a8}, {0x018aa, 0x018aa}, {0x018b0, 0x018f5},
+ {0x01900, 0x0191e}, {0x01950, 0x0196d}, {0x01970, 0x01974}, {0x01980, 0x019ab},
+ {0x019b0, 0x019c9}, {0x01a00, 0x01a16}, {0x01a20, 0x01a54}, {0x01aa7, 0x01aa7},
+ {0x01b05, 0x01b33}, {0x01b45, 0x01b4c}, {0x01b83, 0x01ba0}, {0x01bae, 0x01baf},
+ {0x01bba, 0x01be5}, {0x01c00, 0x01c23}, {0x01c4d, 0x01c4f}, {0x01c5a, 0x01c7d},
+ {0x01c80, 0x01c88}, {0x01c90, 0x01cba}, {0x01cbd, 0x01cbf}, {0x01ce9, 0x01cec},
+ {0x01cee, 0x01cf3}, {0x01cf5, 0x01cf6}, {0x01cfa, 0x01cfa}, {0x01d00, 0x01dbf},
+ {0x01e00, 0x01f15}, {0x01f18, 0x01f1d}, {0x01f20, 0x01f45}, {0x01f48, 0x01f4d},
+ {0x01f50, 0x01f57}, {0x01f59, 0x01f59}, {0x01f5b, 0x01f5b}, {0x01f5d, 0x01f5d},
+ {0x01f5f, 0x01f7d}, {0x01f80, 0x01fb4}, {0x01fb6, 0x01fbc}, {0x01fbe, 0x01fbe},
+ {0x01fc2, 0x01fc4}, {0x01fc6, 0x01fcc}, {0x01fd0, 0x01fd3}, {0x01fd6, 0x01fdb},
+ {0x01fe0, 0x01fec}, {0x01ff2, 0x01ff4}, {0x01ff6, 0x01ffc}, {0x02071, 0x02071},
+ {0x0207f, 0x0207f}, {0x02090, 0x0209c}, {0x02102, 0x02102}, {0x02107, 0x02107},
+ {0x0210a, 0x02113}, {0x02115, 0x02115}, {0x02118, 0x0211d}, {0x02124, 0x02124},
+ {0x02126, 0x02126}, {0x02128, 0x02128}, {0x0212a, 0x02139}, {0x0213c, 0x0213f},
+ {0x02145, 0x02149}, {0x0214e, 0x0214e}, {0x02160, 0x02188}, {0x02c00, 0x02ce4},
+ {0x02ceb, 0x02cee}, {0x02cf2, 0x02cf3}, {0x02d00, 0x02d25}, {0x02d27, 0x02d27},
+ {0x02d2d, 0x02d2d}, {0x02d30, 0x02d67}, {0x02d6f, 0x02d6f}, {0x02d80, 0x02d96},
+ {0x02da0, 0x02da6}, {0x02da8, 0x02dae}, {0x02db0, 0x02db6}, {0x02db8, 0x02dbe},
+ {0x02dc0, 0x02dc6}, {0x02dc8, 0x02dce}, {0x02dd0, 0x02dd6}, {0x02dd8, 0x02dde},
+ {0x03005, 0x03007}, {0x03021, 0x03029}, {0x03031, 0x03035}, {0x03038, 0x0303c},
+ {0x03041, 0x03096}, {0x0309d, 0x0309f}, {0x030a1, 0x030fa}, {0x030fc, 0x030ff},
+ {0x03105, 0x0312f}, {0x03131, 0x0318e}, {0x031a0, 0x031bf}, {0x031f0, 0x031ff},
+ {0x03400, 0x04dbf}, {0x04e00, 0x0a48c}, {0x0a4d0, 0x0a4fd}, {0x0a500, 0x0a60c},
+ {0x0a610, 0x0a61f}, {0x0a62a, 0x0a62b}, {0x0a640, 0x0a66e}, {0x0a67f, 0x0a69d},
+ {0x0a6a0, 0x0a6ef}, {0x0a717, 0x0a71f}, {0x0a722, 0x0a788}, {0x0a78b, 0x0a7ca},
+ {0x0a7d0, 0x0a7d1}, {0x0a7d3, 0x0a7d3}, {0x0a7d5, 0x0a7d9}, {0x0a7f2, 0x0a801},
+ {0x0a803, 0x0a805}, {0x0a807, 0x0a80a}, {0x0a80c, 0x0a822}, {0x0a840, 0x0a873},
+ {0x0a882, 0x0a8b3}, {0x0a8f2, 0x0a8f7}, {0x0a8fb, 0x0a8fb}, {0x0a8fd, 0x0a8fe},
+ {0x0a90a, 0x0a925}, {0x0a930, 0x0a946}, {0x0a960, 0x0a97c}, {0x0a984, 0x0a9b2},
+ {0x0a9cf, 0x0a9cf}, {0x0a9e0, 0x0a9e4}, {0x0a9e6, 0x0a9ef}, {0x0a9fa, 0x0a9fe},
+ {0x0aa00, 0x0aa28}, {0x0aa40, 0x0aa42}, {0x0aa44, 0x0aa4b}, {0x0aa60, 0x0aa76},
+ {0x0aa7a, 0x0aa7a}, {0x0aa7e, 0x0aaaf}, {0x0aab1, 0x0aab1}, {0x0aab5, 0x0aab6},
+ {0x0aab9, 0x0aabd}, {0x0aac0, 0x0aac0}, {0x0aac2, 0x0aac2}, {0x0aadb, 0x0aadd},
+ {0x0aae0, 0x0aaea}, {0x0aaf2, 0x0aaf4}, {0x0ab01, 0x0ab06}, {0x0ab09, 0x0ab0e},
+ {0x0ab11, 0x0ab16}, {0x0ab20, 0x0ab26}, {0x0ab28, 0x0ab2e}, {0x0ab30, 0x0ab5a},
+ {0x0ab5c, 0x0ab69}, {0x0ab70, 0x0abe2}, {0x0ac00, 0x0d7a3}, {0x0d7b0, 0x0d7c6},
+ {0x0d7cb, 0x0d7fb}, {0x0f900, 0x0fa6d}, {0x0fa70, 0x0fad9}, {0x0fb00, 0x0fb06},
+ {0x0fb13, 0x0fb17}, {0x0fb1d, 0x0fb1d}, {0x0fb1f, 0x0fb28}, {0x0fb2a, 0x0fb36},
+ {0x0fb38, 0x0fb3c}, {0x0fb3e, 0x0fb3e}, {0x0fb40, 0x0fb41}, {0x0fb43, 0x0fb44},
+ {0x0fb46, 0x0fbb1}, {0x0fbd3, 0x0fc5d}, {0x0fc64, 0x0fd3d}, {0x0fd50, 0x0fd8f},
+ {0x0fd92, 0x0fdc7}, {0x0fdf0, 0x0fdf9}, {0x0fe71, 0x0fe71}, {0x0fe73, 0x0fe73},
+ {0x0fe77, 0x0fe77}, {0x0fe79, 0x0fe79}, {0x0fe7b, 0x0fe7b}, {0x0fe7d, 0x0fe7d},
+ {0x0fe7f, 0x0fefc}, {0x0ff21, 0x0ff3a}, {0x0ff41, 0x0ff5a}, {0x0ff66, 0x0ff9d},
+ {0x0ffa0, 0x0ffbe}, {0x0ffc2, 0x0ffc7}, {0x0ffca, 0x0ffcf}, {0x0ffd2, 0x0ffd7},
+ {0x0ffda, 0x0ffdc}, {0x10000, 0x1000b}, {0x1000d, 0x10026}, {0x10028, 0x1003a},
+ {0x1003c, 0x1003d}, {0x1003f, 0x1004d}, {0x10050, 0x1005d}, {0x10080, 0x100fa},
+ {0x10140, 0x10174}, {0x10280, 0x1029c}, {0x102a0, 0x102d0}, {0x10300, 0x1031f},
+ {0x1032d, 0x1034a}, {0x10350, 0x10375}, {0x10380, 0x1039d}, {0x103a0, 0x103c3},
+ {0x103c8, 0x103cf}, {0x103d1, 0x103d5}, {0x10400, 0x1049d}, {0x104b0, 0x104d3},
+ {0x104d8, 0x104fb}, {0x10500, 0x10527}, {0x10530, 0x10563}, {0x10570, 0x1057a},
+ {0x1057c, 0x1058a}, {0x1058c, 0x10592}, {0x10594, 0x10595}, {0x10597, 0x105a1},
+ {0x105a3, 0x105b1}, {0x105b3, 0x105b9}, {0x105bb, 0x105bc}, {0x10600, 0x10736},
+ {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785}, {0x10787, 0x107b0},
+ {0x107b2, 0x107ba}, {0x10800, 0x10805}, {0x10808, 0x10808}, {0x1080a, 0x10835},
+ {0x10837, 0x10838}, {0x1083c, 0x1083c}, {0x1083f, 0x10855}, {0x10860, 0x10876},
+ {0x10880, 0x1089e}, {0x108e0, 0x108f2}, {0x108f4, 0x108f5}, {0x10900, 0x10915},
+ {0x10920, 0x10939}, {0x10980, 0x109b7}, {0x109be, 0x109bf}, {0x10a00, 0x10a00},
+ {0x10a10, 0x10a13}, {0x10a15, 0x10a17}, {0x10a19, 0x10a35}, {0x10a60, 0x10a7c},
+ {0x10a80, 0x10a9c}, {0x10ac0, 0x10ac7}, {0x10ac9, 0x10ae4}, {0x10b00, 0x10b35},
+ {0x10b40, 0x10b55}, {0x10b60, 0x10b72}, {0x10b80, 0x10b91}, {0x10c00, 0x10c48},
+ {0x10c80, 0x10cb2}, {0x10cc0, 0x10cf2}, {0x10d00, 0x10d23}, {0x10e80, 0x10ea9},
+ {0x10eb0, 0x10eb1}, {0x10f00, 0x10f1c}, {0x10f27, 0x10f27}, {0x10f30, 0x10f45},
+ {0x10f70, 0x10f81}, {0x10fb0, 0x10fc4}, {0x10fe0, 0x10ff6}, {0x11003, 0x11037},
+ {0x11071, 0x11072}, {0x11075, 0x11075}, {0x11083, 0x110af}, {0x110d0, 0x110e8},
+ {0x11103, 0x11126}, {0x11144, 0x11144}, {0x11147, 0x11147}, {0x11150, 0x11172},
+ {0x11176, 0x11176}, {0x11183, 0x111b2}, {0x111c1, 0x111c4}, {0x111da, 0x111da},
+ {0x111dc, 0x111dc}, {0x11200, 0x11211}, {0x11213, 0x1122b}, {0x11280, 0x11286},
+ {0x11288, 0x11288}, {0x1128a, 0x1128d}, {0x1128f, 0x1129d}, {0x1129f, 0x112a8},
+ {0x112b0, 0x112de}, {0x11305, 0x1130c}, {0x1130f, 0x11310}, {0x11313, 0x11328},
+ {0x1132a, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339}, {0x1133d, 0x1133d},
+ {0x11350, 0x11350}, {0x1135d, 0x11361}, {0x11400, 0x11434}, {0x11447, 0x1144a},
+ {0x1145f, 0x11461}, {0x11480, 0x114af}, {0x114c4, 0x114c5}, {0x114c7, 0x114c7},
+ {0x11580, 0x115ae}, {0x115d8, 0x115db}, {0x11600, 0x1162f}, {0x11644, 0x11644},
+ {0x11680, 0x116aa}, {0x116b8, 0x116b8}, {0x11700, 0x1171a}, {0x11740, 0x11746},
+ {0x11800, 0x1182b}, {0x118a0, 0x118df}, {0x118ff, 0x11906}, {0x11909, 0x11909},
+ {0x1190c, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x1192f}, {0x1193f, 0x1193f},
+ {0x11941, 0x11941}, {0x119a0, 0x119a7}, {0x119aa, 0x119d0}, {0x119e1, 0x119e1},
+ {0x119e3, 0x119e3}, {0x11a00, 0x11a00}, {0x11a0b, 0x11a32}, {0x11a3a, 0x11a3a},
+ {0x11a50, 0x11a50}, {0x11a5c, 0x11a89}, {0x11a9d, 0x11a9d}, {0x11ab0, 0x11af8},
+ {0x11c00, 0x11c08}, {0x11c0a, 0x11c2e}, {0x11c40, 0x11c40}, {0x11c72, 0x11c8f},
+ {0x11d00, 0x11d06}, {0x11d08, 0x11d09}, {0x11d0b, 0x11d30}, {0x11d46, 0x11d46},
+ {0x11d60, 0x11d65}, {0x11d67, 0x11d68}, {0x11d6a, 0x11d89}, {0x11d98, 0x11d98},
+ {0x11ee0, 0x11ef2}, {0x11fb0, 0x11fb0}, {0x12000, 0x12399}, {0x12400, 0x1246e},
+ {0x12480, 0x12543}, {0x12f90, 0x12ff0}, {0x13000, 0x1342e}, {0x14400, 0x14646},
+ {0x16800, 0x16a38}, {0x16a40, 0x16a5e}, {0x16a70, 0x16abe}, {0x16ad0, 0x16aed},
+ {0x16b00, 0x16b2f}, {0x16b40, 0x16b43}, {0x16b63, 0x16b77}, {0x16b7d, 0x16b8f},
+ {0x16e40, 0x16e7f}, {0x16f00, 0x16f4a}, {0x16f50, 0x16f50}, {0x16f93, 0x16f9f},
+ {0x16fe0, 0x16fe1}, {0x16fe3, 0x16fe3}, {0x17000, 0x187f7}, {0x18800, 0x18cd5},
+ {0x18d00, 0x18d08}, {0x1aff0, 0x1aff3}, {0x1aff5, 0x1affb}, {0x1affd, 0x1affe},
+ {0x1b000, 0x1b122}, {0x1b150, 0x1b152}, {0x1b164, 0x1b167}, {0x1b170, 0x1b2fb},
+ {0x1bc00, 0x1bc6a}, {0x1bc70, 0x1bc7c}, {0x1bc80, 0x1bc88}, {0x1bc90, 0x1bc99},
+ {0x1d400, 0x1d454}, {0x1d456, 0x1d49c}, {0x1d49e, 0x1d49f}, {0x1d4a2, 0x1d4a2},
+ {0x1d4a5, 0x1d4a6}, {0x1d4a9, 0x1d4ac}, {0x1d4ae, 0x1d4b9}, {0x1d4bb, 0x1d4bb},
+ {0x1d4bd, 0x1d4c3}, {0x1d4c5, 0x1d505}, {0x1d507, 0x1d50a}, {0x1d50d, 0x1d514},
+ {0x1d516, 0x1d51c}, {0x1d51e, 0x1d539}, {0x1d53b, 0x1d53e}, {0x1d540, 0x1d544},
+ {0x1d546, 0x1d546}, {0x1d54a, 0x1d550}, {0x1d552, 0x1d6a5}, {0x1d6a8, 0x1d6c0},
+ {0x1d6c2, 0x1d6da}, {0x1d6dc, 0x1d6fa}, {0x1d6fc, 0x1d714}, {0x1d716, 0x1d734},
+ {0x1d736, 0x1d74e}, {0x1d750, 0x1d76e}, {0x1d770, 0x1d788}, {0x1d78a, 0x1d7a8},
+ {0x1d7aa, 0x1d7c2}, {0x1d7c4, 0x1d7cb}, {0x1df00, 0x1df1e}, {0x1e100, 0x1e12c},
+ {0x1e137, 0x1e13d}, {0x1e14e, 0x1e14e}, {0x1e290, 0x1e2ad}, {0x1e2c0, 0x1e2eb},
+ {0x1e7e0, 0x1e7e6}, {0x1e7e8, 0x1e7eb}, {0x1e7ed, 0x1e7ee}, {0x1e7f0, 0x1e7fe},
+ {0x1e800, 0x1e8c4}, {0x1e900, 0x1e943}, {0x1e94b, 0x1e94b}, {0x1ee00, 0x1ee03},
+ {0x1ee05, 0x1ee1f}, {0x1ee21, 0x1ee22}, {0x1ee24, 0x1ee24}, {0x1ee27, 0x1ee27},
+ {0x1ee29, 0x1ee32}, {0x1ee34, 0x1ee37}, {0x1ee39, 0x1ee39}, {0x1ee3b, 0x1ee3b},
+ {0x1ee42, 0x1ee42}, {0x1ee47, 0x1ee47}, {0x1ee49, 0x1ee49}, {0x1ee4b, 0x1ee4b},
+ {0x1ee4d, 0x1ee4f}, {0x1ee51, 0x1ee52}, {0x1ee54, 0x1ee54}, {0x1ee57, 0x1ee57},
+ {0x1ee59, 0x1ee59}, {0x1ee5b, 0x1ee5b}, {0x1ee5d, 0x1ee5d}, {0x1ee5f, 0x1ee5f},
+ {0x1ee61, 0x1ee62}, {0x1ee64, 0x1ee64}, {0x1ee67, 0x1ee6a}, {0x1ee6c, 0x1ee72},
+ {0x1ee74, 0x1ee77}, {0x1ee79, 0x1ee7c}, {0x1ee7e, 0x1ee7e}, {0x1ee80, 0x1ee89},
+ {0x1ee8b, 0x1ee9b}, {0x1eea1, 0x1eea3}, {0x1eea5, 0x1eea9}, {0x1eeab, 0x1eebb},
+ {0x20000, 0x2a6df}, {0x2a700, 0x2b738}, {0x2b740, 0x2b81d}, {0x2b820, 0x2cea1},
+ {0x2ceb0, 0x2ebe0}, {0x2f800, 0x2fa1d}, {0x30000, 0x3134a},
};
// Number of ranges in kXIDStartRanges
-constexpr size_t kNumXIDStartRanges =
- sizeof(kXIDStartRanges) / sizeof(kXIDStartRanges[0]);
+constexpr size_t kNumXIDStartRanges = sizeof(kXIDStartRanges) / sizeof(kXIDStartRanges[0]);
// The additional code point interval ranges for the Unicode 14 XID_Continue
// set. This extends the values in kXIDStartRanges.
// This array needs to be in ascending order.
constexpr CodePointRange kXIDContinueRanges[] = {
- {0x00030, 0x00039}, {0x0005f, 0x0005f}, {0x000b7, 0x000b7},
- {0x00300, 0x0036f}, {0x00387, 0x00387}, {0x00483, 0x00487},
- {0x00591, 0x005bd}, {0x005bf, 0x005bf}, {0x005c1, 0x005c2},
- {0x005c4, 0x005c5}, {0x005c7, 0x005c7}, {0x00610, 0x0061a},
- {0x0064b, 0x00669}, {0x00670, 0x00670}, {0x006d6, 0x006dc},
- {0x006df, 0x006e4}, {0x006e7, 0x006e8}, {0x006ea, 0x006ed},
- {0x006f0, 0x006f9}, {0x00711, 0x00711}, {0x00730, 0x0074a},
- {0x007a6, 0x007b0}, {0x007c0, 0x007c9}, {0x007eb, 0x007f3},
- {0x007fd, 0x007fd}, {0x00816, 0x00819}, {0x0081b, 0x00823},
- {0x00825, 0x00827}, {0x00829, 0x0082d}, {0x00859, 0x0085b},
- {0x00898, 0x0089f}, {0x008ca, 0x008e1}, {0x008e3, 0x00903},
- {0x0093a, 0x0093c}, {0x0093e, 0x0094f}, {0x00951, 0x00957},
- {0x00962, 0x00963}, {0x00966, 0x0096f}, {0x00981, 0x00983},
- {0x009bc, 0x009bc}, {0x009be, 0x009c4}, {0x009c7, 0x009c8},
- {0x009cb, 0x009cd}, {0x009d7, 0x009d7}, {0x009e2, 0x009e3},
- {0x009e6, 0x009ef}, {0x009fe, 0x009fe}, {0x00a01, 0x00a03},
- {0x00a3c, 0x00a3c}, {0x00a3e, 0x00a42}, {0x00a47, 0x00a48},
- {0x00a4b, 0x00a4d}, {0x00a51, 0x00a51}, {0x00a66, 0x00a71},
- {0x00a75, 0x00a75}, {0x00a81, 0x00a83}, {0x00abc, 0x00abc},
- {0x00abe, 0x00ac5}, {0x00ac7, 0x00ac9}, {0x00acb, 0x00acd},
- {0x00ae2, 0x00ae3}, {0x00ae6, 0x00aef}, {0x00afa, 0x00aff},
- {0x00b01, 0x00b03}, {0x00b3c, 0x00b3c}, {0x00b3e, 0x00b44},
- {0x00b47, 0x00b48}, {0x00b4b, 0x00b4d}, {0x00b55, 0x00b57},
- {0x00b62, 0x00b63}, {0x00b66, 0x00b6f}, {0x00b82, 0x00b82},
- {0x00bbe, 0x00bc2}, {0x00bc6, 0x00bc8}, {0x00bca, 0x00bcd},
- {0x00bd7, 0x00bd7}, {0x00be6, 0x00bef}, {0x00c00, 0x00c04},
- {0x00c3c, 0x00c3c}, {0x00c3e, 0x00c44}, {0x00c46, 0x00c48},
- {0x00c4a, 0x00c4d}, {0x00c55, 0x00c56}, {0x00c62, 0x00c63},
- {0x00c66, 0x00c6f}, {0x00c81, 0x00c83}, {0x00cbc, 0x00cbc},
- {0x00cbe, 0x00cc4}, {0x00cc6, 0x00cc8}, {0x00cca, 0x00ccd},
- {0x00cd5, 0x00cd6}, {0x00ce2, 0x00ce3}, {0x00ce6, 0x00cef},
- {0x00d00, 0x00d03}, {0x00d3b, 0x00d3c}, {0x00d3e, 0x00d44},
- {0x00d46, 0x00d48}, {0x00d4a, 0x00d4d}, {0x00d57, 0x00d57},
- {0x00d62, 0x00d63}, {0x00d66, 0x00d6f}, {0x00d81, 0x00d83},
- {0x00dca, 0x00dca}, {0x00dcf, 0x00dd4}, {0x00dd6, 0x00dd6},
- {0x00dd8, 0x00ddf}, {0x00de6, 0x00def}, {0x00df2, 0x00df3},
- {0x00e31, 0x00e31}, {0x00e33, 0x00e3a}, {0x00e47, 0x00e4e},
- {0x00e50, 0x00e59}, {0x00eb1, 0x00eb1}, {0x00eb3, 0x00ebc},
- {0x00ec8, 0x00ecd}, {0x00ed0, 0x00ed9}, {0x00f18, 0x00f19},
- {0x00f20, 0x00f29}, {0x00f35, 0x00f35}, {0x00f37, 0x00f37},
- {0x00f39, 0x00f39}, {0x00f3e, 0x00f3f}, {0x00f71, 0x00f84},
- {0x00f86, 0x00f87}, {0x00f8d, 0x00f97}, {0x00f99, 0x00fbc},
- {0x00fc6, 0x00fc6}, {0x0102b, 0x0103e}, {0x01040, 0x01049},
- {0x01056, 0x01059}, {0x0105e, 0x01060}, {0x01062, 0x01064},
- {0x01067, 0x0106d}, {0x01071, 0x01074}, {0x01082, 0x0108d},
- {0x0108f, 0x0109d}, {0x0135d, 0x0135f}, {0x01369, 0x01371},
- {0x01712, 0x01715}, {0x01732, 0x01734}, {0x01752, 0x01753},
- {0x01772, 0x01773}, {0x017b4, 0x017d3}, {0x017dd, 0x017dd},
- {0x017e0, 0x017e9}, {0x0180b, 0x0180d}, {0x0180f, 0x01819},
- {0x018a9, 0x018a9}, {0x01920, 0x0192b}, {0x01930, 0x0193b},
- {0x01946, 0x0194f}, {0x019d0, 0x019da}, {0x01a17, 0x01a1b},
- {0x01a55, 0x01a5e}, {0x01a60, 0x01a7c}, {0x01a7f, 0x01a89},
- {0x01a90, 0x01a99}, {0x01ab0, 0x01abd}, {0x01abf, 0x01ace},
- {0x01b00, 0x01b04}, {0x01b34, 0x01b44}, {0x01b50, 0x01b59},
- {0x01b6b, 0x01b73}, {0x01b80, 0x01b82}, {0x01ba1, 0x01bad},
- {0x01bb0, 0x01bb9}, {0x01be6, 0x01bf3}, {0x01c24, 0x01c37},
- {0x01c40, 0x01c49}, {0x01c50, 0x01c59}, {0x01cd0, 0x01cd2},
- {0x01cd4, 0x01ce8}, {0x01ced, 0x01ced}, {0x01cf4, 0x01cf4},
- {0x01cf7, 0x01cf9}, {0x01dc0, 0x01dff}, {0x0203f, 0x02040},
- {0x02054, 0x02054}, {0x020d0, 0x020dc}, {0x020e1, 0x020e1},
- {0x020e5, 0x020f0}, {0x02cef, 0x02cf1}, {0x02d7f, 0x02d7f},
- {0x02de0, 0x02dff}, {0x0302a, 0x0302f}, {0x03099, 0x0309a},
- {0x0a620, 0x0a629}, {0x0a66f, 0x0a66f}, {0x0a674, 0x0a67d},
- {0x0a69e, 0x0a69f}, {0x0a6f0, 0x0a6f1}, {0x0a802, 0x0a802},
- {0x0a806, 0x0a806}, {0x0a80b, 0x0a80b}, {0x0a823, 0x0a827},
- {0x0a82c, 0x0a82c}, {0x0a880, 0x0a881}, {0x0a8b4, 0x0a8c5},
- {0x0a8d0, 0x0a8d9}, {0x0a8e0, 0x0a8f1}, {0x0a8ff, 0x0a909},
- {0x0a926, 0x0a92d}, {0x0a947, 0x0a953}, {0x0a980, 0x0a983},
- {0x0a9b3, 0x0a9c0}, {0x0a9d0, 0x0a9d9}, {0x0a9e5, 0x0a9e5},
- {0x0a9f0, 0x0a9f9}, {0x0aa29, 0x0aa36}, {0x0aa43, 0x0aa43},
- {0x0aa4c, 0x0aa4d}, {0x0aa50, 0x0aa59}, {0x0aa7b, 0x0aa7d},
- {0x0aab0, 0x0aab0}, {0x0aab2, 0x0aab4}, {0x0aab7, 0x0aab8},
- {0x0aabe, 0x0aabf}, {0x0aac1, 0x0aac1}, {0x0aaeb, 0x0aaef},
- {0x0aaf5, 0x0aaf6}, {0x0abe3, 0x0abea}, {0x0abec, 0x0abed},
- {0x0abf0, 0x0abf9}, {0x0fb1e, 0x0fb1e}, {0x0fe00, 0x0fe0f},
- {0x0fe20, 0x0fe2f}, {0x0fe33, 0x0fe34}, {0x0fe4d, 0x0fe4f},
- {0x0ff10, 0x0ff19}, {0x0ff3f, 0x0ff3f}, {0x0ff9e, 0x0ff9f},
- {0x101fd, 0x101fd}, {0x102e0, 0x102e0}, {0x10376, 0x1037a},
- {0x104a0, 0x104a9}, {0x10a01, 0x10a03}, {0x10a05, 0x10a06},
- {0x10a0c, 0x10a0f}, {0x10a38, 0x10a3a}, {0x10a3f, 0x10a3f},
- {0x10ae5, 0x10ae6}, {0x10d24, 0x10d27}, {0x10d30, 0x10d39},
- {0x10eab, 0x10eac}, {0x10f46, 0x10f50}, {0x10f82, 0x10f85},
- {0x11000, 0x11002}, {0x11038, 0x11046}, {0x11066, 0x11070},
- {0x11073, 0x11074}, {0x1107f, 0x11082}, {0x110b0, 0x110ba},
- {0x110c2, 0x110c2}, {0x110f0, 0x110f9}, {0x11100, 0x11102},
- {0x11127, 0x11134}, {0x11136, 0x1113f}, {0x11145, 0x11146},
- {0x11173, 0x11173}, {0x11180, 0x11182}, {0x111b3, 0x111c0},
- {0x111c9, 0x111cc}, {0x111ce, 0x111d9}, {0x1122c, 0x11237},
- {0x1123e, 0x1123e}, {0x112df, 0x112ea}, {0x112f0, 0x112f9},
- {0x11300, 0x11303}, {0x1133b, 0x1133c}, {0x1133e, 0x11344},
- {0x11347, 0x11348}, {0x1134b, 0x1134d}, {0x11357, 0x11357},
- {0x11362, 0x11363}, {0x11366, 0x1136c}, {0x11370, 0x11374},
- {0x11435, 0x11446}, {0x11450, 0x11459}, {0x1145e, 0x1145e},
- {0x114b0, 0x114c3}, {0x114d0, 0x114d9}, {0x115af, 0x115b5},
- {0x115b8, 0x115c0}, {0x115dc, 0x115dd}, {0x11630, 0x11640},
- {0x11650, 0x11659}, {0x116ab, 0x116b7}, {0x116c0, 0x116c9},
- {0x1171d, 0x1172b}, {0x11730, 0x11739}, {0x1182c, 0x1183a},
- {0x118e0, 0x118e9}, {0x11930, 0x11935}, {0x11937, 0x11938},
- {0x1193b, 0x1193e}, {0x11940, 0x11940}, {0x11942, 0x11943},
- {0x11950, 0x11959}, {0x119d1, 0x119d7}, {0x119da, 0x119e0},
- {0x119e4, 0x119e4}, {0x11a01, 0x11a0a}, {0x11a33, 0x11a39},
- {0x11a3b, 0x11a3e}, {0x11a47, 0x11a47}, {0x11a51, 0x11a5b},
- {0x11a8a, 0x11a99}, {0x11c2f, 0x11c36}, {0x11c38, 0x11c3f},
- {0x11c50, 0x11c59}, {0x11c92, 0x11ca7}, {0x11ca9, 0x11cb6},
- {0x11d31, 0x11d36}, {0x11d3a, 0x11d3a}, {0x11d3c, 0x11d3d},
- {0x11d3f, 0x11d45}, {0x11d47, 0x11d47}, {0x11d50, 0x11d59},
- {0x11d8a, 0x11d8e}, {0x11d90, 0x11d91}, {0x11d93, 0x11d97},
- {0x11da0, 0x11da9}, {0x11ef3, 0x11ef6}, {0x16a60, 0x16a69},
- {0x16ac0, 0x16ac9}, {0x16af0, 0x16af4}, {0x16b30, 0x16b36},
- {0x16b50, 0x16b59}, {0x16f4f, 0x16f4f}, {0x16f51, 0x16f87},
- {0x16f8f, 0x16f92}, {0x16fe4, 0x16fe4}, {0x16ff0, 0x16ff1},
- {0x1bc9d, 0x1bc9e}, {0x1cf00, 0x1cf2d}, {0x1cf30, 0x1cf46},
- {0x1d165, 0x1d169}, {0x1d16d, 0x1d172}, {0x1d17b, 0x1d182},
- {0x1d185, 0x1d18b}, {0x1d1aa, 0x1d1ad}, {0x1d242, 0x1d244},
- {0x1d7ce, 0x1d7ff}, {0x1da00, 0x1da36}, {0x1da3b, 0x1da6c},
- {0x1da75, 0x1da75}, {0x1da84, 0x1da84}, {0x1da9b, 0x1da9f},
- {0x1daa1, 0x1daaf}, {0x1e000, 0x1e006}, {0x1e008, 0x1e018},
- {0x1e01b, 0x1e021}, {0x1e023, 0x1e024}, {0x1e026, 0x1e02a},
- {0x1e130, 0x1e136}, {0x1e140, 0x1e149}, {0x1e2ae, 0x1e2ae},
- {0x1e2ec, 0x1e2f9}, {0x1e8d0, 0x1e8d6}, {0x1e944, 0x1e94a},
+ {0x00030, 0x00039}, {0x0005f, 0x0005f}, {0x000b7, 0x000b7}, {0x00300, 0x0036f},
+ {0x00387, 0x00387}, {0x00483, 0x00487}, {0x00591, 0x005bd}, {0x005bf, 0x005bf},
+ {0x005c1, 0x005c2}, {0x005c4, 0x005c5}, {0x005c7, 0x005c7}, {0x00610, 0x0061a},
+ {0x0064b, 0x00669}, {0x00670, 0x00670}, {0x006d6, 0x006dc}, {0x006df, 0x006e4},
+ {0x006e7, 0x006e8}, {0x006ea, 0x006ed}, {0x006f0, 0x006f9}, {0x00711, 0x00711},
+ {0x00730, 0x0074a}, {0x007a6, 0x007b0}, {0x007c0, 0x007c9}, {0x007eb, 0x007f3},
+ {0x007fd, 0x007fd}, {0x00816, 0x00819}, {0x0081b, 0x00823}, {0x00825, 0x00827},
+ {0x00829, 0x0082d}, {0x00859, 0x0085b}, {0x00898, 0x0089f}, {0x008ca, 0x008e1},
+ {0x008e3, 0x00903}, {0x0093a, 0x0093c}, {0x0093e, 0x0094f}, {0x00951, 0x00957},
+ {0x00962, 0x00963}, {0x00966, 0x0096f}, {0x00981, 0x00983}, {0x009bc, 0x009bc},
+ {0x009be, 0x009c4}, {0x009c7, 0x009c8}, {0x009cb, 0x009cd}, {0x009d7, 0x009d7},
+ {0x009e2, 0x009e3}, {0x009e6, 0x009ef}, {0x009fe, 0x009fe}, {0x00a01, 0x00a03},
+ {0x00a3c, 0x00a3c}, {0x00a3e, 0x00a42}, {0x00a47, 0x00a48}, {0x00a4b, 0x00a4d},
+ {0x00a51, 0x00a51}, {0x00a66, 0x00a71}, {0x00a75, 0x00a75}, {0x00a81, 0x00a83},
+ {0x00abc, 0x00abc}, {0x00abe, 0x00ac5}, {0x00ac7, 0x00ac9}, {0x00acb, 0x00acd},
+ {0x00ae2, 0x00ae3}, {0x00ae6, 0x00aef}, {0x00afa, 0x00aff}, {0x00b01, 0x00b03},
+ {0x00b3c, 0x00b3c}, {0x00b3e, 0x00b44}, {0x00b47, 0x00b48}, {0x00b4b, 0x00b4d},
+ {0x00b55, 0x00b57}, {0x00b62, 0x00b63}, {0x00b66, 0x00b6f}, {0x00b82, 0x00b82},
+ {0x00bbe, 0x00bc2}, {0x00bc6, 0x00bc8}, {0x00bca, 0x00bcd}, {0x00bd7, 0x00bd7},
+ {0x00be6, 0x00bef}, {0x00c00, 0x00c04}, {0x00c3c, 0x00c3c}, {0x00c3e, 0x00c44},
+ {0x00c46, 0x00c48}, {0x00c4a, 0x00c4d}, {0x00c55, 0x00c56}, {0x00c62, 0x00c63},
+ {0x00c66, 0x00c6f}, {0x00c81, 0x00c83}, {0x00cbc, 0x00cbc}, {0x00cbe, 0x00cc4},
+ {0x00cc6, 0x00cc8}, {0x00cca, 0x00ccd}, {0x00cd5, 0x00cd6}, {0x00ce2, 0x00ce3},
+ {0x00ce6, 0x00cef}, {0x00d00, 0x00d03}, {0x00d3b, 0x00d3c}, {0x00d3e, 0x00d44},
+ {0x00d46, 0x00d48}, {0x00d4a, 0x00d4d}, {0x00d57, 0x00d57}, {0x00d62, 0x00d63},
+ {0x00d66, 0x00d6f}, {0x00d81, 0x00d83}, {0x00dca, 0x00dca}, {0x00dcf, 0x00dd4},
+ {0x00dd6, 0x00dd6}, {0x00dd8, 0x00ddf}, {0x00de6, 0x00def}, {0x00df2, 0x00df3},
+ {0x00e31, 0x00e31}, {0x00e33, 0x00e3a}, {0x00e47, 0x00e4e}, {0x00e50, 0x00e59},
+ {0x00eb1, 0x00eb1}, {0x00eb3, 0x00ebc}, {0x00ec8, 0x00ecd}, {0x00ed0, 0x00ed9},
+ {0x00f18, 0x00f19}, {0x00f20, 0x00f29}, {0x00f35, 0x00f35}, {0x00f37, 0x00f37},
+ {0x00f39, 0x00f39}, {0x00f3e, 0x00f3f}, {0x00f71, 0x00f84}, {0x00f86, 0x00f87},
+ {0x00f8d, 0x00f97}, {0x00f99, 0x00fbc}, {0x00fc6, 0x00fc6}, {0x0102b, 0x0103e},
+ {0x01040, 0x01049}, {0x01056, 0x01059}, {0x0105e, 0x01060}, {0x01062, 0x01064},
+ {0x01067, 0x0106d}, {0x01071, 0x01074}, {0x01082, 0x0108d}, {0x0108f, 0x0109d},
+ {0x0135d, 0x0135f}, {0x01369, 0x01371}, {0x01712, 0x01715}, {0x01732, 0x01734},
+ {0x01752, 0x01753}, {0x01772, 0x01773}, {0x017b4, 0x017d3}, {0x017dd, 0x017dd},
+ {0x017e0, 0x017e9}, {0x0180b, 0x0180d}, {0x0180f, 0x01819}, {0x018a9, 0x018a9},
+ {0x01920, 0x0192b}, {0x01930, 0x0193b}, {0x01946, 0x0194f}, {0x019d0, 0x019da},
+ {0x01a17, 0x01a1b}, {0x01a55, 0x01a5e}, {0x01a60, 0x01a7c}, {0x01a7f, 0x01a89},
+ {0x01a90, 0x01a99}, {0x01ab0, 0x01abd}, {0x01abf, 0x01ace}, {0x01b00, 0x01b04},
+ {0x01b34, 0x01b44}, {0x01b50, 0x01b59}, {0x01b6b, 0x01b73}, {0x01b80, 0x01b82},
+ {0x01ba1, 0x01bad}, {0x01bb0, 0x01bb9}, {0x01be6, 0x01bf3}, {0x01c24, 0x01c37},
+ {0x01c40, 0x01c49}, {0x01c50, 0x01c59}, {0x01cd0, 0x01cd2}, {0x01cd4, 0x01ce8},
+ {0x01ced, 0x01ced}, {0x01cf4, 0x01cf4}, {0x01cf7, 0x01cf9}, {0x01dc0, 0x01dff},
+ {0x0203f, 0x02040}, {0x02054, 0x02054}, {0x020d0, 0x020dc}, {0x020e1, 0x020e1},
+ {0x020e5, 0x020f0}, {0x02cef, 0x02cf1}, {0x02d7f, 0x02d7f}, {0x02de0, 0x02dff},
+ {0x0302a, 0x0302f}, {0x03099, 0x0309a}, {0x0a620, 0x0a629}, {0x0a66f, 0x0a66f},
+ {0x0a674, 0x0a67d}, {0x0a69e, 0x0a69f}, {0x0a6f0, 0x0a6f1}, {0x0a802, 0x0a802},
+ {0x0a806, 0x0a806}, {0x0a80b, 0x0a80b}, {0x0a823, 0x0a827}, {0x0a82c, 0x0a82c},
+ {0x0a880, 0x0a881}, {0x0a8b4, 0x0a8c5}, {0x0a8d0, 0x0a8d9}, {0x0a8e0, 0x0a8f1},
+ {0x0a8ff, 0x0a909}, {0x0a926, 0x0a92d}, {0x0a947, 0x0a953}, {0x0a980, 0x0a983},
+ {0x0a9b3, 0x0a9c0}, {0x0a9d0, 0x0a9d9}, {0x0a9e5, 0x0a9e5}, {0x0a9f0, 0x0a9f9},
+ {0x0aa29, 0x0aa36}, {0x0aa43, 0x0aa43}, {0x0aa4c, 0x0aa4d}, {0x0aa50, 0x0aa59},
+ {0x0aa7b, 0x0aa7d}, {0x0aab0, 0x0aab0}, {0x0aab2, 0x0aab4}, {0x0aab7, 0x0aab8},
+ {0x0aabe, 0x0aabf}, {0x0aac1, 0x0aac1}, {0x0aaeb, 0x0aaef}, {0x0aaf5, 0x0aaf6},
+ {0x0abe3, 0x0abea}, {0x0abec, 0x0abed}, {0x0abf0, 0x0abf9}, {0x0fb1e, 0x0fb1e},
+ {0x0fe00, 0x0fe0f}, {0x0fe20, 0x0fe2f}, {0x0fe33, 0x0fe34}, {0x0fe4d, 0x0fe4f},
+ {0x0ff10, 0x0ff19}, {0x0ff3f, 0x0ff3f}, {0x0ff9e, 0x0ff9f}, {0x101fd, 0x101fd},
+ {0x102e0, 0x102e0}, {0x10376, 0x1037a}, {0x104a0, 0x104a9}, {0x10a01, 0x10a03},
+ {0x10a05, 0x10a06}, {0x10a0c, 0x10a0f}, {0x10a38, 0x10a3a}, {0x10a3f, 0x10a3f},
+ {0x10ae5, 0x10ae6}, {0x10d24, 0x10d27}, {0x10d30, 0x10d39}, {0x10eab, 0x10eac},
+ {0x10f46, 0x10f50}, {0x10f82, 0x10f85}, {0x11000, 0x11002}, {0x11038, 0x11046},
+ {0x11066, 0x11070}, {0x11073, 0x11074}, {0x1107f, 0x11082}, {0x110b0, 0x110ba},
+ {0x110c2, 0x110c2}, {0x110f0, 0x110f9}, {0x11100, 0x11102}, {0x11127, 0x11134},
+ {0x11136, 0x1113f}, {0x11145, 0x11146}, {0x11173, 0x11173}, {0x11180, 0x11182},
+ {0x111b3, 0x111c0}, {0x111c9, 0x111cc}, {0x111ce, 0x111d9}, {0x1122c, 0x11237},
+ {0x1123e, 0x1123e}, {0x112df, 0x112ea}, {0x112f0, 0x112f9}, {0x11300, 0x11303},
+ {0x1133b, 0x1133c}, {0x1133e, 0x11344}, {0x11347, 0x11348}, {0x1134b, 0x1134d},
+ {0x11357, 0x11357}, {0x11362, 0x11363}, {0x11366, 0x1136c}, {0x11370, 0x11374},
+ {0x11435, 0x11446}, {0x11450, 0x11459}, {0x1145e, 0x1145e}, {0x114b0, 0x114c3},
+ {0x114d0, 0x114d9}, {0x115af, 0x115b5}, {0x115b8, 0x115c0}, {0x115dc, 0x115dd},
+ {0x11630, 0x11640}, {0x11650, 0x11659}, {0x116ab, 0x116b7}, {0x116c0, 0x116c9},
+ {0x1171d, 0x1172b}, {0x11730, 0x11739}, {0x1182c, 0x1183a}, {0x118e0, 0x118e9},
+ {0x11930, 0x11935}, {0x11937, 0x11938}, {0x1193b, 0x1193e}, {0x11940, 0x11940},
+ {0x11942, 0x11943}, {0x11950, 0x11959}, {0x119d1, 0x119d7}, {0x119da, 0x119e0},
+ {0x119e4, 0x119e4}, {0x11a01, 0x11a0a}, {0x11a33, 0x11a39}, {0x11a3b, 0x11a3e},
+ {0x11a47, 0x11a47}, {0x11a51, 0x11a5b}, {0x11a8a, 0x11a99}, {0x11c2f, 0x11c36},
+ {0x11c38, 0x11c3f}, {0x11c50, 0x11c59}, {0x11c92, 0x11ca7}, {0x11ca9, 0x11cb6},
+ {0x11d31, 0x11d36}, {0x11d3a, 0x11d3a}, {0x11d3c, 0x11d3d}, {0x11d3f, 0x11d45},
+ {0x11d47, 0x11d47}, {0x11d50, 0x11d59}, {0x11d8a, 0x11d8e}, {0x11d90, 0x11d91},
+ {0x11d93, 0x11d97}, {0x11da0, 0x11da9}, {0x11ef3, 0x11ef6}, {0x16a60, 0x16a69},
+ {0x16ac0, 0x16ac9}, {0x16af0, 0x16af4}, {0x16b30, 0x16b36}, {0x16b50, 0x16b59},
+ {0x16f4f, 0x16f4f}, {0x16f51, 0x16f87}, {0x16f8f, 0x16f92}, {0x16fe4, 0x16fe4},
+ {0x16ff0, 0x16ff1}, {0x1bc9d, 0x1bc9e}, {0x1cf00, 0x1cf2d}, {0x1cf30, 0x1cf46},
+ {0x1d165, 0x1d169}, {0x1d16d, 0x1d172}, {0x1d17b, 0x1d182}, {0x1d185, 0x1d18b},
+ {0x1d1aa, 0x1d1ad}, {0x1d242, 0x1d244}, {0x1d7ce, 0x1d7ff}, {0x1da00, 0x1da36},
+ {0x1da3b, 0x1da6c}, {0x1da75, 0x1da75}, {0x1da84, 0x1da84}, {0x1da9b, 0x1da9f},
+ {0x1daa1, 0x1daaf}, {0x1e000, 0x1e006}, {0x1e008, 0x1e018}, {0x1e01b, 0x1e021},
+ {0x1e023, 0x1e024}, {0x1e026, 0x1e02a}, {0x1e130, 0x1e136}, {0x1e140, 0x1e149},
+ {0x1e2ae, 0x1e2ae}, {0x1e2ec, 0x1e2f9}, {0x1e8d0, 0x1e8d6}, {0x1e944, 0x1e94a},
{0x1e950, 0x1e959}, {0x1fbf0, 0x1fbf9}, {0xe0100, 0xe01ef},
};
// Number of ranges in kXIDContinueRanges
-constexpr size_t kNumXIDContinueRanges =
- sizeof(kXIDContinueRanges) / sizeof(kXIDContinueRanges[0]);
+constexpr size_t kNumXIDContinueRanges = sizeof(kXIDContinueRanges) / sizeof(kXIDContinueRanges[0]);
} // namespace
bool CodePoint::IsXIDStart() const {
- return std::binary_search(kXIDStartRanges,
- kXIDStartRanges + kNumXIDStartRanges, *this);
+ return std::binary_search(kXIDStartRanges, kXIDStartRanges + kNumXIDStartRanges, *this);
}
bool CodePoint::IsXIDContinue() const {
- return IsXIDStart() ||
- std::binary_search(kXIDContinueRanges,
- kXIDContinueRanges + kNumXIDContinueRanges, *this);
+ return IsXIDStart() || std::binary_search(kXIDContinueRanges,
+ kXIDContinueRanges + kNumXIDContinueRanges, *this);
}
std::ostream& operator<<(std::ostream& out, CodePoint code_point) {
- if (code_point < 0x7f) {
- // See https://en.cppreference.com/w/cpp/language/escape
- switch (code_point) {
- case '\a':
- return out << R"('\a')";
- case '\b':
- return out << R"('\b')";
- case '\f':
- return out << R"('\f')";
- case '\n':
- return out << R"('\n')";
- case '\r':
- return out << R"('\r')";
- case '\t':
- return out << R"('\t')";
- case '\v':
- return out << R"('\v')";
+ if (code_point < 0x7f) {
+ // See https://en.cppreference.com/w/cpp/language/escape
+ switch (code_point) {
+ case '\a':
+ return out << R"('\a')";
+ case '\b':
+ return out << R"('\b')";
+ case '\f':
+ return out << R"('\f')";
+ case '\n':
+ return out << R"('\n')";
+ case '\r':
+ return out << R"('\r')";
+ case '\t':
+ return out << R"('\t')";
+ case '\v':
+ return out << R"('\v')";
+ }
+ return out << "'" << static_cast<char>(code_point) << "'";
}
- return out << "'" << static_cast<char>(code_point) << "'";
- }
- return out << "'U+" << std::hex << code_point.value << "'";
+ return out << "'U+" << std::hex << code_point.value << "'";
}
namespace utf8 {
std::pair<CodePoint, size_t> Decode(const uint8_t* ptr, size_t len) {
- if (len < 1) {
- return {};
- }
+ if (len < 1) {
+ return {};
+ }
- // Lookup table for the first byte of a UTF-8 sequence.
- // 0 indicates an invalid length.
- // Note that bit encodings that can fit in a smaller number of bytes are
- // invalid (e.g. 0xc0). Code points that exceed the unicode maximum of
- // 0x10FFFF are also invalid (0xf5+).
- // See: https://en.wikipedia.org/wiki/UTF-8#Encoding and
- // https://datatracker.ietf.org/doc/html/rfc3629#section-3
- static constexpr uint8_t kSequenceLength[256] = {
- // 0 1 2 3 4 5 6 7 8 9 a b c d e f
- /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x20 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- /* 0x80 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x90 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0xa0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0xb0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0xc0 */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- /* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- /* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 0xf0 */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- };
+ // Lookup table for the first byte of a UTF-8 sequence.
+ // 0 indicates an invalid length.
+ // Note that bit encodings that can fit in a smaller number of bytes are
+ // invalid (e.g. 0xc0). Code points that exceed the unicode maximum of
+ // 0x10FFFF are also invalid (0xf5+).
+ // See: https://en.wikipedia.org/wiki/UTF-8#Encoding and
+ // https://datatracker.ietf.org/doc/html/rfc3629#section-3
+ static constexpr uint8_t kSequenceLength[256] = {
+ // 0 1 2 3 4 5 6 7 8 9 a b c d e f
+ /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x20 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x80 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x90 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0xa0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0xb0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0xc0 */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ /* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ /* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xf0 */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
- uint8_t n = kSequenceLength[ptr[0]];
- if (n > len) {
- return {};
- }
+ uint8_t n = kSequenceLength[ptr[0]];
+ if (n > len) {
+ return {};
+ }
- CodePoint c;
+ CodePoint c;
- uint8_t valid = 0x80;
- switch (n) {
- // Note: n=0 (invalid) is correctly handled without a case.
- case 1:
- c = CodePoint{ptr[0]};
- break;
- case 2:
- valid &= ptr[1];
- c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00011111) << 6) |
- (static_cast<uint32_t>(ptr[1] & 0b00111111))};
- break;
- case 3:
- valid &= ptr[1] & ptr[2];
- c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00001111) << 12) |
- (static_cast<uint32_t>(ptr[1] & 0b00111111) << 6) |
- (static_cast<uint32_t>(ptr[2] & 0b00111111))};
- break;
- case 4:
- valid &= ptr[1] & ptr[2] & ptr[3];
- c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00000111) << 18) |
- (static_cast<uint32_t>(ptr[1] & 0b00111111) << 12) |
- (static_cast<uint32_t>(ptr[2] & 0b00111111) << 6) |
- (static_cast<uint32_t>(ptr[3] & 0b00111111))};
- break;
- }
- if (!valid) {
- n = 0;
- c = 0;
- }
- return {c, n};
+ uint8_t valid = 0x80;
+ switch (n) {
+ // Note: n=0 (invalid) is correctly handled without a case.
+ case 1:
+ c = CodePoint{ptr[0]};
+ break;
+ case 2:
+ valid &= ptr[1];
+ c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00011111) << 6) |
+ (static_cast<uint32_t>(ptr[1] & 0b00111111))};
+ break;
+ case 3:
+ valid &= ptr[1] & ptr[2];
+ c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00001111) << 12) |
+ (static_cast<uint32_t>(ptr[1] & 0b00111111) << 6) |
+ (static_cast<uint32_t>(ptr[2] & 0b00111111))};
+ break;
+ case 4:
+ valid &= ptr[1] & ptr[2] & ptr[3];
+ c = CodePoint{(static_cast<uint32_t>(ptr[0] & 0b00000111) << 18) |
+ (static_cast<uint32_t>(ptr[1] & 0b00111111) << 12) |
+ (static_cast<uint32_t>(ptr[2] & 0b00111111) << 6) |
+ (static_cast<uint32_t>(ptr[3] & 0b00111111))};
+ break;
+ }
+ if (!valid) {
+ n = 0;
+ c = 0;
+ }
+ return {c, n};
}
bool IsASCII(std::string_view str) {
- for (auto c : str) {
- if (c & 0x80) {
- return false;
+ for (auto c : str) {
+ if (c & 0x80) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
} // namespace utf8
diff --git a/chromium/third_party/dawn/src/tint/text/unicode.h b/chromium/third_party/dawn/src/tint/text/unicode.h
index 1d2a1b0c6fc..f0aa272149d 100644
--- a/chromium/third_party/dawn/src/tint/text/unicode.h
+++ b/chromium/third_party/dawn/src/tint/text/unicode.h
@@ -24,34 +24,34 @@ namespace tint::text {
/// CodePoint is a unicode code point.
struct CodePoint {
- /// Constructor
- inline CodePoint() = default;
-
- /// Constructor
- /// @param v the code point value
- inline explicit CodePoint(uint32_t v) : value(v) {}
-
- /// @returns the code point value
- inline operator uint32_t() const { return value; }
-
- /// Assignment operator
- /// @param v the new value for the code point
- /// @returns this CodePoint
- inline CodePoint& operator=(uint32_t v) {
- value = v;
- return *this;
- }
-
- /// @returns true if this CodePoint is in the XID_Start set.
- /// @see https://unicode.org/reports/tr31/
- bool IsXIDStart() const;
-
- /// @returns true if this CodePoint is in the XID_Continue set.
- /// @see https://unicode.org/reports/tr31/
- bool IsXIDContinue() const;
-
- /// The code point value
- uint32_t value = 0;
+ /// Constructor
+ inline CodePoint() = default;
+
+ /// Constructor
+ /// @param v the code point value
+ inline explicit CodePoint(uint32_t v) : value(v) {}
+
+ /// @returns the code point value
+ inline operator uint32_t() const { return value; }
+
+ /// Assignment operator
+ /// @param v the new value for the code point
+ /// @returns this CodePoint
+ inline CodePoint& operator=(uint32_t v) {
+ value = v;
+ return *this;
+ }
+
+ /// @returns true if this CodePoint is in the XID_Start set.
+ /// @see https://unicode.org/reports/tr31/
+ bool IsXIDStart() const;
+
+ /// @returns true if this CodePoint is in the XID_Continue set.
+ /// @see https://unicode.org/reports/tr31/
+ bool IsXIDContinue() const;
+
+ /// The code point value
+ uint32_t value = 0;
};
/// Writes the CodePoint to the std::ostream.
diff --git a/chromium/third_party/dawn/src/tint/text/unicode_test.cc b/chromium/third_party/dawn/src/tint/text/unicode_test.cc
index 38221a4e126..67bbeadd4e2 100644
--- a/chromium/third_party/dawn/src/tint/text/unicode_test.cc
+++ b/chromium/third_party/dawn/src/tint/text/unicode_test.cc
@@ -30,21 +30,21 @@ namespace tint::text {
namespace {
struct CodePointCase {
- CodePoint code_point;
- bool is_xid_start;
- bool is_xid_continue;
+ CodePoint code_point;
+ bool is_xid_start;
+ bool is_xid_continue;
};
std::ostream& operator<<(std::ostream& out, CodePointCase c) {
- return out << c.code_point;
+ return out << c.code_point;
}
class CodePointTest : public testing::TestWithParam<CodePointCase> {};
TEST_P(CodePointTest, CharacterSets) {
- auto param = GetParam();
- EXPECT_EQ(param.code_point.IsXIDStart(), param.is_xid_start);
- EXPECT_EQ(param.code_point.IsXIDContinue(), param.is_xid_continue);
+ auto param = GetParam();
+ EXPECT_EQ(param.code_point.IsXIDStart(), param.is_xid_start);
+ EXPECT_EQ(param.code_point.IsXIDContinue(), param.is_xid_continue);
}
INSTANTIATE_TEST_SUITE_P(
@@ -222,136 +222,131 @@ INSTANTIATE_TEST_SUITE_P(
namespace {
struct CodePointAndWidth {
- CodePoint code_point;
- size_t width;
+ CodePoint code_point;
+ size_t width;
};
bool operator==(const CodePointAndWidth& a, const CodePointAndWidth& b) {
- return a.code_point == b.code_point && a.width == b.width;
+ return a.code_point == b.code_point && a.width == b.width;
}
std::ostream& operator<<(std::ostream& out, CodePointAndWidth cpw) {
- return out << "code_point: " << cpw.code_point << ", width: " << cpw.width;
+ return out << "code_point: " << cpw.code_point << ", width: " << cpw.width;
}
struct DecodeUTF8Case {
- std::string string;
- std::vector<CodePointAndWidth> expected;
+ std::string string;
+ std::vector<CodePointAndWidth> expected;
};
std::ostream& operator<<(std::ostream& out, DecodeUTF8Case c) {
- return out << "'" << c.string << "'";
+ return out << "'" << c.string << "'";
}
class DecodeUTF8Test : public testing::TestWithParam<DecodeUTF8Case> {};
TEST_P(DecodeUTF8Test, Valid) {
- auto param = GetParam();
-
- const uint8_t* data = reinterpret_cast<const uint8_t*>(param.string.data());
- const size_t len = param.string.size();
-
- std::vector<CodePointAndWidth> got;
- size_t offset = 0;
- while (offset < len) {
- auto [code_point, width] = utf8::Decode(data + offset, len - offset);
- if (width == 0) {
- FAIL() << "Decode() failed at byte offset " << offset;
+ auto param = GetParam();
+
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(param.string.data());
+ const size_t len = param.string.size();
+
+ std::vector<CodePointAndWidth> got;
+ size_t offset = 0;
+ while (offset < len) {
+ auto [code_point, width] = utf8::Decode(data + offset, len - offset);
+ if (width == 0) {
+ FAIL() << "Decode() failed at byte offset " << offset;
+ }
+ offset += width;
+ got.emplace_back(CodePointAndWidth{code_point, width});
}
- offset += width;
- got.emplace_back(CodePointAndWidth{code_point, width});
- }
- EXPECT_THAT(got, ::testing::ElementsAreArray(param.expected));
+ EXPECT_THAT(got, ::testing::ElementsAreArray(param.expected));
}
-INSTANTIATE_TEST_SUITE_P(
- AsciiLetters,
- DecodeUTF8Test,
- ::testing::ValuesIn({
- DecodeUTF8Case{"a", {{C('a'), 1}}},
- DecodeUTF8Case{"abc", {{C('a'), 1}, {C('b'), 1}, {C('c'), 1}}},
- DecodeUTF8Case{"def", {{C('d'), 1}, {C('e'), 1}, {C('f'), 1}}},
- DecodeUTF8Case{"gh", {{C('g'), 1}, {C('h'), 1}}},
- DecodeUTF8Case{"ij", {{C('i'), 1}, {C('j'), 1}}},
- DecodeUTF8Case{"klm", {{C('k'), 1}, {C('l'), 1}, {C('m'), 1}}},
- DecodeUTF8Case{"nop", {{C('n'), 1}, {C('o'), 1}, {C('p'), 1}}},
- DecodeUTF8Case{"qr", {{C('q'), 1}, {C('r'), 1}}},
- DecodeUTF8Case{"stu", {{C('s'), 1}, {C('t'), 1}, {C('u'), 1}}},
- DecodeUTF8Case{"vw", {{C('v'), 1}, {C('w'), 1}}},
- DecodeUTF8Case{"xyz", {{C('x'), 1}, {C('y'), 1}, {C('z'), 1}}},
- DecodeUTF8Case{"A", {{C('A'), 1}}},
- DecodeUTF8Case{"ABC", {{C('A'), 1}, {C('B'), 1}, {C('C'), 1}}},
- DecodeUTF8Case{"DEF", {{C('D'), 1}, {C('E'), 1}, {C('F'), 1}}},
- DecodeUTF8Case{"GH", {{C('G'), 1}, {C('H'), 1}}},
- DecodeUTF8Case{"IJ", {{C('I'), 1}, {C('J'), 1}}},
- DecodeUTF8Case{"KLM", {{C('K'), 1}, {C('L'), 1}, {C('M'), 1}}},
- DecodeUTF8Case{"NOP", {{C('N'), 1}, {C('O'), 1}, {C('P'), 1}}},
- DecodeUTF8Case{"QR", {{C('Q'), 1}, {C('R'), 1}}},
- DecodeUTF8Case{"STU", {{C('S'), 1}, {C('T'), 1}, {C('U'), 1}}},
- DecodeUTF8Case{"VW", {{C('V'), 1}, {C('W'), 1}}},
- DecodeUTF8Case{"XYZ", {{C('X'), 1}, {C('Y'), 1}, {C('Z'), 1}}},
- }));
-
-INSTANTIATE_TEST_SUITE_P(
- AsciiNumbers,
- DecodeUTF8Test,
- ::testing::ValuesIn({
- DecodeUTF8Case{"012", {{C('0'), 1}, {C('1'), 1}, {C('2'), 1}}},
- DecodeUTF8Case{"345", {{C('3'), 1}, {C('4'), 1}, {C('5'), 1}}},
- DecodeUTF8Case{"678", {{C('6'), 1}, {C('7'), 1}, {C('8'), 1}}},
- DecodeUTF8Case{"9", {{C('9'), 1}}},
- }));
-
-INSTANTIATE_TEST_SUITE_P(
- AsciiSymbols,
- DecodeUTF8Test,
- ::testing::ValuesIn({
- DecodeUTF8Case{"!\"#", {{C('!'), 1}, {C('"'), 1}, {C('#'), 1}}},
- DecodeUTF8Case{"$%&", {{C('$'), 1}, {C('%'), 1}, {C('&'), 1}}},
- DecodeUTF8Case{"'()", {{C('\''), 1}, {C('('), 1}, {C(')'), 1}}},
- DecodeUTF8Case{"*,-", {{C('*'), 1}, {C(','), 1}, {C('-'), 1}}},
- DecodeUTF8Case{"/`@", {{C('/'), 1}, {C('`'), 1}, {C('@'), 1}}},
- DecodeUTF8Case{"^\\[", {{C('^'), 1}, {C('\\'), 1}, {C('['), 1}}},
- DecodeUTF8Case{"]_|", {{C(']'), 1}, {C('_'), 1}, {C('|'), 1}}},
- DecodeUTF8Case{"{}", {{C('{'), 1}, {C('}'), 1}}},
- }));
-
-INSTANTIATE_TEST_SUITE_P(
- AsciiSpecial,
- DecodeUTF8Test,
- ::testing::ValuesIn({
- DecodeUTF8Case{"", {}},
- DecodeUTF8Case{" \t\n", {{C(' '), 1}, {C('\t'), 1}, {C('\n'), 1}}},
- DecodeUTF8Case{"\a\b\f", {{C('\a'), 1}, {C('\b'), 1}, {C('\f'), 1}}},
- DecodeUTF8Case{"\n\r\t", {{C('\n'), 1}, {C('\r'), 1}, {C('\t'), 1}}},
- DecodeUTF8Case{"\v", {{C('\v'), 1}}},
- }));
-
-INSTANTIATE_TEST_SUITE_P(
- Hindi,
- DecodeUTF8Test,
- ::testing::ValuesIn({DecodeUTF8Case{
- // नमस्ते दुनिया
- "\xe0\xa4\xa8\xe0\xa4\xae\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa5"
- "\x87\x20\xe0\xa4\xa6\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xaf"
- "\xe0\xa4\xbe",
- {
- {C(0x0928), 3}, // न
- {C(0x092e), 3}, // म
- {C(0x0938), 3}, // स
- {C(0x094d), 3}, // ्
- {C(0x0924), 3}, // त
- {C(0x0947), 3}, // े
- {C(' '), 1},
- {C(0x0926), 3}, // द
- {C(0x0941), 3}, // ु
- {C(0x0928), 3}, // न
- {C(0x093f), 3}, // ि
- {C(0x092f), 3}, // य
- {C(0x093e), 3}, // ा
- },
- }}));
+INSTANTIATE_TEST_SUITE_P(AsciiLetters,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({
+ DecodeUTF8Case{"a", {{C('a'), 1}}},
+ DecodeUTF8Case{"abc", {{C('a'), 1}, {C('b'), 1}, {C('c'), 1}}},
+ DecodeUTF8Case{"def", {{C('d'), 1}, {C('e'), 1}, {C('f'), 1}}},
+ DecodeUTF8Case{"gh", {{C('g'), 1}, {C('h'), 1}}},
+ DecodeUTF8Case{"ij", {{C('i'), 1}, {C('j'), 1}}},
+ DecodeUTF8Case{"klm", {{C('k'), 1}, {C('l'), 1}, {C('m'), 1}}},
+ DecodeUTF8Case{"nop", {{C('n'), 1}, {C('o'), 1}, {C('p'), 1}}},
+ DecodeUTF8Case{"qr", {{C('q'), 1}, {C('r'), 1}}},
+ DecodeUTF8Case{"stu", {{C('s'), 1}, {C('t'), 1}, {C('u'), 1}}},
+ DecodeUTF8Case{"vw", {{C('v'), 1}, {C('w'), 1}}},
+ DecodeUTF8Case{"xyz", {{C('x'), 1}, {C('y'), 1}, {C('z'), 1}}},
+ DecodeUTF8Case{"A", {{C('A'), 1}}},
+ DecodeUTF8Case{"ABC", {{C('A'), 1}, {C('B'), 1}, {C('C'), 1}}},
+ DecodeUTF8Case{"DEF", {{C('D'), 1}, {C('E'), 1}, {C('F'), 1}}},
+ DecodeUTF8Case{"GH", {{C('G'), 1}, {C('H'), 1}}},
+ DecodeUTF8Case{"IJ", {{C('I'), 1}, {C('J'), 1}}},
+ DecodeUTF8Case{"KLM", {{C('K'), 1}, {C('L'), 1}, {C('M'), 1}}},
+ DecodeUTF8Case{"NOP", {{C('N'), 1}, {C('O'), 1}, {C('P'), 1}}},
+ DecodeUTF8Case{"QR", {{C('Q'), 1}, {C('R'), 1}}},
+ DecodeUTF8Case{"STU", {{C('S'), 1}, {C('T'), 1}, {C('U'), 1}}},
+ DecodeUTF8Case{"VW", {{C('V'), 1}, {C('W'), 1}}},
+ DecodeUTF8Case{"XYZ", {{C('X'), 1}, {C('Y'), 1}, {C('Z'), 1}}},
+ }));
+
+INSTANTIATE_TEST_SUITE_P(AsciiNumbers,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({
+ DecodeUTF8Case{"012", {{C('0'), 1}, {C('1'), 1}, {C('2'), 1}}},
+ DecodeUTF8Case{"345", {{C('3'), 1}, {C('4'), 1}, {C('5'), 1}}},
+ DecodeUTF8Case{"678", {{C('6'), 1}, {C('7'), 1}, {C('8'), 1}}},
+ DecodeUTF8Case{"9", {{C('9'), 1}}},
+ }));
+
+INSTANTIATE_TEST_SUITE_P(AsciiSymbols,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({
+ DecodeUTF8Case{"!\"#", {{C('!'), 1}, {C('"'), 1}, {C('#'), 1}}},
+ DecodeUTF8Case{"$%&", {{C('$'), 1}, {C('%'), 1}, {C('&'), 1}}},
+ DecodeUTF8Case{"'()", {{C('\''), 1}, {C('('), 1}, {C(')'), 1}}},
+ DecodeUTF8Case{"*,-", {{C('*'), 1}, {C(','), 1}, {C('-'), 1}}},
+ DecodeUTF8Case{"/`@", {{C('/'), 1}, {C('`'), 1}, {C('@'), 1}}},
+ DecodeUTF8Case{"^\\[", {{C('^'), 1}, {C('\\'), 1}, {C('['), 1}}},
+ DecodeUTF8Case{"]_|", {{C(']'), 1}, {C('_'), 1}, {C('|'), 1}}},
+ DecodeUTF8Case{"{}", {{C('{'), 1}, {C('}'), 1}}},
+ }));
+
+INSTANTIATE_TEST_SUITE_P(AsciiSpecial,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({
+ DecodeUTF8Case{"", {}},
+ DecodeUTF8Case{" \t\n", {{C(' '), 1}, {C('\t'), 1}, {C('\n'), 1}}},
+ DecodeUTF8Case{"\a\b\f", {{C('\a'), 1}, {C('\b'), 1}, {C('\f'), 1}}},
+ DecodeUTF8Case{"\n\r\t", {{C('\n'), 1}, {C('\r'), 1}, {C('\t'), 1}}},
+ DecodeUTF8Case{"\v", {{C('\v'), 1}}},
+ }));
+
+INSTANTIATE_TEST_SUITE_P(Hindi,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({DecodeUTF8Case{
+ // नमस्ते दुनिया
+ "\xe0\xa4\xa8\xe0\xa4\xae\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa5"
+ "\x87\x20\xe0\xa4\xa6\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xaf"
+ "\xe0\xa4\xbe",
+ {
+ {C(0x0928), 3}, // न
+ {C(0x092e), 3}, // म
+ {C(0x0938), 3}, // स
+ {C(0x094d), 3}, // ्
+ {C(0x0924), 3}, // त
+ {C(0x0947), 3}, // े
+ {C(' '), 1},
+ {C(0x0926), 3}, // द
+ {C(0x0941), 3}, // ु
+ {C(0x0928), 3}, // न
+ {C(0x093f), 3}, // ि
+ {C(0x092f), 3}, // य
+ {C(0x093e), 3}, // ा
+ },
+ }}));
INSTANTIATE_TEST_SUITE_P(Mandarin,
DecodeUTF8Test,
@@ -412,29 +407,28 @@ INSTANTIATE_TEST_SUITE_P(Emoji,
},
}}));
-INSTANTIATE_TEST_SUITE_P(
- Random,
- DecodeUTF8Test,
- ::testing::ValuesIn({DecodeUTF8Case{
- // Øⓑꚫ쁹Ǵ𐌒岾🥍ⴵ㍨又ᮗ
- "\xc3\x98\xe2\x93\x91\xea\x9a\xab\xec\x81\xb9\xc7\xb4\xf0\x90\x8c\x92"
- "\xe5\xb2\xbe\xf0\x9f\xa5\x8d\xe2\xb4\xb5\xe3\x8d\xa8\xe5\x8f\x88\xe1"
- "\xae\x97",
- {
- {C(0x000d8), 2}, // Ø
- {C(0x024d1), 3}, // ⓑ
- {C(0x0a6ab), 3}, // ꚫ
- {C(0x0c079), 3}, // 쁹
- {C(0x001f4), 2}, // Ǵ
- {C(0x10312), 4}, // 𐌒
- {C(0x05cbe), 3}, // 岾
- {C(0x1f94d), 4}, // 🥍
- {C(0x02d35), 3}, // ⴵ
- {C(0x03368), 3}, // ㍨
- {C(0x053c8), 3}, // 又
- {C(0x01b97), 3}, // ᮗ
- },
- }}));
+INSTANTIATE_TEST_SUITE_P(Random,
+ DecodeUTF8Test,
+ ::testing::ValuesIn({DecodeUTF8Case{
+ // Øⓑꚫ쁹Ǵ𐌒岾🥍ⴵ㍨又ᮗ
+ "\xc3\x98\xe2\x93\x91\xea\x9a\xab\xec\x81\xb9\xc7\xb4\xf0\x90\x8c\x92"
+ "\xe5\xb2\xbe\xf0\x9f\xa5\x8d\xe2\xb4\xb5\xe3\x8d\xa8\xe5\x8f\x88\xe1"
+ "\xae\x97",
+ {
+ {C(0x000d8), 2}, // Ø
+ {C(0x024d1), 3}, // ⓑ
+ {C(0x0a6ab), 3}, // ꚫ
+ {C(0x0c079), 3}, // 쁹
+ {C(0x001f4), 2}, // Ǵ
+ {C(0x10312), 4}, // 𐌒
+ {C(0x05cbe), 3}, // 岾
+ {C(0x1f94d), 4}, // 🥍
+ {C(0x02d35), 3}, // ⴵ
+ {C(0x03368), 3}, // ㍨
+ {C(0x053c8), 3}, // 又
+ {C(0x01b97), 3}, // ᮗ
+ },
+ }}));
} // namespace
@@ -445,52 +439,51 @@ namespace {
class DecodeUTF8InvalidTest : public testing::TestWithParam<const char*> {};
TEST_P(DecodeUTF8InvalidTest, Invalid) {
- auto* param = GetParam();
+ auto* param = GetParam();
- const uint8_t* data = reinterpret_cast<const uint8_t*>(param);
- const size_t len = std::string(param).size();
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(param);
+ const size_t len = std::string(param).size();
- auto [code_point, width] = utf8::Decode(data, len);
- EXPECT_EQ(code_point, CodePoint(0));
- EXPECT_EQ(width, 0u);
+ auto [code_point, width] = utf8::Decode(data, len);
+ EXPECT_EQ(code_point, CodePoint(0));
+ EXPECT_EQ(width, 0u);
}
-INSTANTIATE_TEST_SUITE_P(
- Invalid,
- DecodeUTF8InvalidTest,
- ::testing::ValuesIn({
- "\x80\x80\x80\x80", // 10000000
- "\x81\x80\x80\x80", // 10000001
- "\x8f\x80\x80\x80", // 10001111
- "\x90\x80\x80\x80", // 10010000
- "\x91\x80\x80\x80", // 10010001
- "\x9f\x80\x80\x80", // 10011111
- "\xa0\x80\x80\x80", // 10100000
- "\xa1\x80\x80\x80", // 10100001
- "\xaf\x80\x80\x80", // 10101111
- "\xb0\x80\x80\x80", // 10110000
- "\xb1\x80\x80\x80", // 10110001
- "\xbf\x80\x80\x80", // 10111111
- "\xc0\x80\x80\x80", // 11000000
- "\xc1\x80\x80\x80", // 11000001
- "\xf5\x80\x80\x80", // 11110101
- "\xf6\x80\x80\x80", // 11110110
- "\xf7\x80\x80\x80", // 11110111
- "\xf8\x80\x80\x80", // 11111000
- "\xfe\x80\x80\x80", // 11111110
- "\xff\x80\x80\x80", // 11111111
-
- "\xd0", // 2-bytes, missing second byte
- "\xe8\x8f", // 3-bytes, missing third byte
- "\xf4\x8f\x8f", // 4-bytes, missing fourth byte
-
- "\xd0\x7f", // 2-bytes, second byte MSB unset
- "\xe8\x7f\x8f", // 3-bytes, second byte MSB unset
- "\xe8\x8f\x7f", // 3-bytes, third byte MSB unset
- "\xf4\x7f\x8f\x8f", // 4-bytes, second byte MSB unset
- "\xf4\x8f\x7f\x8f", // 4-bytes, third byte MSB unset
- "\xf4\x8f\x8f\x7f", // 4-bytes, fourth byte MSB unset
- }));
+INSTANTIATE_TEST_SUITE_P(Invalid,
+ DecodeUTF8InvalidTest,
+ ::testing::ValuesIn({
+ "\x80\x80\x80\x80", // 10000000
+ "\x81\x80\x80\x80", // 10000001
+ "\x8f\x80\x80\x80", // 10001111
+ "\x90\x80\x80\x80", // 10010000
+ "\x91\x80\x80\x80", // 10010001
+ "\x9f\x80\x80\x80", // 10011111
+ "\xa0\x80\x80\x80", // 10100000
+ "\xa1\x80\x80\x80", // 10100001
+ "\xaf\x80\x80\x80", // 10101111
+ "\xb0\x80\x80\x80", // 10110000
+ "\xb1\x80\x80\x80", // 10110001
+ "\xbf\x80\x80\x80", // 10111111
+ "\xc0\x80\x80\x80", // 11000000
+ "\xc1\x80\x80\x80", // 11000001
+ "\xf5\x80\x80\x80", // 11110101
+ "\xf6\x80\x80\x80", // 11110110
+ "\xf7\x80\x80\x80", // 11110111
+ "\xf8\x80\x80\x80", // 11111000
+ "\xfe\x80\x80\x80", // 11111110
+ "\xff\x80\x80\x80", // 11111111
+
+ "\xd0", // 2-bytes, missing second byte
+ "\xe8\x8f", // 3-bytes, missing third byte
+ "\xf4\x8f\x8f", // 4-bytes, missing fourth byte
+
+ "\xd0\x7f", // 2-bytes, second byte MSB unset
+ "\xe8\x7f\x8f", // 3-bytes, second byte MSB unset
+ "\xe8\x8f\x7f", // 3-bytes, third byte MSB unset
+ "\xf4\x7f\x8f\x8f", // 4-bytes, second byte MSB unset
+ "\xf4\x8f\x7f\x8f", // 4-bytes, third byte MSB unset
+ "\xf4\x8f\x8f\x7f", // 4-bytes, fourth byte MSB unset
+ }));
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/traits.h b/chromium/third_party/dawn/src/tint/traits.h
index dc104cc8e99..eb81b3f0dbd 100644
--- a/chromium/third_party/dawn/src/tint/traits.h
+++ b/chromium/third_party/dawn/src/tint/traits.h
@@ -31,47 +31,44 @@ using NthTypeOf = typename std::tuple_element<N, std::tuple<Types...>>::type;
/// Signature describes the signature of a function.
template <typename RETURN, typename... PARAMETERS>
struct Signature {
- /// The return type of the function signature
- using ret = RETURN;
- /// The parameters of the function signature held in a std::tuple
- using parameters = std::tuple<PARAMETERS...>;
- /// The type of the Nth parameter of function signature
- template <std::size_t N>
- using parameter = NthTypeOf<N, PARAMETERS...>;
- /// The total number of parameters
- static constexpr std::size_t parameter_count = sizeof...(PARAMETERS);
+ /// The return type of the function signature
+ using ret = RETURN;
+ /// The parameters of the function signature held in a std::tuple
+ using parameters = std::tuple<PARAMETERS...>;
+ /// The type of the Nth parameter of function signature
+ template <std::size_t N>
+ using parameter = NthTypeOf<N, PARAMETERS...>;
+ /// The total number of parameters
+ static constexpr std::size_t parameter_count = sizeof...(PARAMETERS);
};
/// SignatureOf is a traits helper that infers the signature of the function,
/// method, static method, lambda, or function-like object `F`.
template <typename F>
struct SignatureOf {
- /// The signature of the function-like object `F`
- using type = typename SignatureOf<decltype(&F::operator())>::type;
+ /// The signature of the function-like object `F`
+ using type = typename SignatureOf<decltype(&F::operator())>::type;
};
/// SignatureOf specialization for a regular function or static method.
template <typename R, typename... ARGS>
struct SignatureOf<R (*)(ARGS...)> {
- /// The signature of the function-like object `F`
- using type = Signature<typename std::decay<R>::type,
- typename std::decay<ARGS>::type...>;
+ /// The signature of the function-like object `F`
+ using type = Signature<typename std::decay<R>::type, typename std::decay<ARGS>::type...>;
};
/// SignatureOf specialization for a non-static method.
template <typename R, typename C, typename... ARGS>
struct SignatureOf<R (C::*)(ARGS...)> {
- /// The signature of the function-like object `F`
- using type = Signature<typename std::decay<R>::type,
- typename std::decay<ARGS>::type...>;
+ /// The signature of the function-like object `F`
+ using type = Signature<typename std::decay<R>::type, typename std::decay<ARGS>::type...>;
};
/// SignatureOf specialization for a non-static, const method.
template <typename R, typename C, typename... ARGS>
struct SignatureOf<R (C::*)(ARGS...) const> {
- /// The signature of the function-like object `F`
- using type = Signature<typename std::decay<R>::type,
- typename std::decay<ARGS>::type...>;
+ /// The signature of the function-like object `F`
+ using type = Signature<typename std::decay<R>::type, typename std::decay<ARGS>::type...>;
};
/// SignatureOfT is an alias to `typename SignatureOf<F>::type`.
@@ -90,8 +87,7 @@ using ReturnType = typename SignatureOfT<F>::ret;
/// `BASE`.
template <typename T, typename BASE>
static constexpr bool IsTypeOrDerived =
- std::is_base_of<BASE, Decay<T>>::value ||
- std::is_same<BASE, Decay<T>>::value;
+ std::is_base_of<BASE, Decay<T>>::value || std::is_same<BASE, Decay<T>>::value;
/// If `CONDITION` is true then EnableIf resolves to type T, otherwise an
/// invalid type.
@@ -111,13 +107,13 @@ using EnableIfIsNotType = EnableIf<!IsTypeOrDerived<T, BASE>, T>;
/// @returns the std::index_sequence with all the indices shifted by OFFSET.
template <std::size_t OFFSET, std::size_t... INDICES>
constexpr auto Shift(std::index_sequence<INDICES...>) {
- return std::integer_sequence<std::size_t, OFFSET + INDICES...>{};
+ return std::integer_sequence<std::size_t, OFFSET + INDICES...>{};
}
/// @returns a std::integer_sequence with the integers `[OFFSET..OFFSET+COUNT)`
template <std::size_t OFFSET, std::size_t COUNT>
constexpr auto Range() {
- return Shift<OFFSET>(std::make_index_sequence<COUNT>{});
+ return Shift<OFFSET>(std::make_index_sequence<COUNT>{});
}
namespace detail {
@@ -125,11 +121,9 @@ namespace detail {
/// @returns the tuple `t` swizzled by `INDICES`
template <typename TUPLE, std::size_t... INDICES>
constexpr auto Swizzle(TUPLE&& t, std::index_sequence<INDICES...>)
- -> std::tuple<
- std::tuple_element_t<INDICES, std::remove_reference_t<TUPLE>>...> {
- return {std::forward<
- std::tuple_element_t<INDICES, std::remove_reference_t<TUPLE>>>(
- std::get<INDICES>(std::forward<TUPLE>(t)))...};
+ -> std::tuple<std::tuple_element_t<INDICES, std::remove_reference_t<TUPLE>>...> {
+ return {std::forward<std::tuple_element_t<INDICES, std::remove_reference_t<TUPLE>>>(
+ std::get<INDICES>(std::forward<TUPLE>(t)))...};
}
/// @returns a nullptr of the tuple type `TUPLE` swizzled by `INDICES`.
@@ -138,8 +132,8 @@ constexpr auto Swizzle(TUPLE&& t, std::index_sequence<INDICES...>)
/// types.
template <typename TUPLE, std::size_t... INDICES>
constexpr auto* SwizzlePtrTy(std::index_sequence<INDICES...>) {
- using Swizzled = std::tuple<std::tuple_element_t<INDICES, TUPLE>...>;
- return static_cast<Swizzled*>(nullptr);
+ using Swizzled = std::tuple<std::tuple_element_t<INDICES, TUPLE>...>;
+ return static_cast<Swizzled*>(nullptr);
}
} // namespace detail
@@ -148,14 +142,14 @@ constexpr auto* SwizzlePtrTy(std::index_sequence<INDICES...>) {
/// `[OFFSET..OFFSET+COUNT)`
template <std::size_t OFFSET, std::size_t COUNT, typename TUPLE>
constexpr auto Slice(TUPLE&& t) {
- return detail::Swizzle<TUPLE>(std::forward<TUPLE>(t), Range<OFFSET, COUNT>());
+ return detail::Swizzle<TUPLE>(std::forward<TUPLE>(t), Range<OFFSET, COUNT>());
}
/// Resolves to the slice of the tuple `t` with the tuple elements
/// `[OFFSET..OFFSET+COUNT)`
template <std::size_t OFFSET, std::size_t COUNT, typename TUPLE>
-using SliceTuple = std::remove_pointer_t<decltype(
- detail::SwizzlePtrTy<TUPLE>(Range<OFFSET, COUNT>()))>;
+using SliceTuple =
+ std::remove_pointer_t<decltype(detail::SwizzlePtrTy<TUPLE>(Range<OFFSET, COUNT>()))>;
} // namespace tint::traits
diff --git a/chromium/third_party/dawn/src/tint/traits_test.cc b/chromium/third_party/dawn/src/tint/traits_test.cc
index 6af0bd478c3..c10010786dd 100644
--- a/chromium/third_party/dawn/src/tint/traits_test.cc
+++ b/chromium/third_party/dawn/src/tint/traits_test.cc
@@ -25,209 +25,201 @@ void F3(int, S, float) {}
} // namespace
TEST(ParamType, Function) {
- F1({}); // Avoid unused method warning
- F3(0, {}, 0); // Avoid unused method warning
- static_assert(std::is_same_v<ParameterType<decltype(&F1), 0>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&F3), 0>, int>);
- static_assert(std::is_same_v<ParameterType<decltype(&F3), 1>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&F3), 2>, float>);
- static_assert(std::is_same_v<ReturnType<decltype(&F1)>, void>);
- static_assert(std::is_same_v<ReturnType<decltype(&F3)>, void>);
- static_assert(SignatureOfT<decltype(&F1)>::parameter_count == 1);
- static_assert(SignatureOfT<decltype(&F3)>::parameter_count == 3);
+ F1({}); // Avoid unused method warning
+ F3(0, {}, 0); // Avoid unused method warning
+ static_assert(std::is_same_v<ParameterType<decltype(&F1), 0>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&F3), 0>, int>);
+ static_assert(std::is_same_v<ParameterType<decltype(&F3), 1>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&F3), 2>, float>);
+ static_assert(std::is_same_v<ReturnType<decltype(&F1)>, void>);
+ static_assert(std::is_same_v<ReturnType<decltype(&F3)>, void>);
+ static_assert(SignatureOfT<decltype(&F1)>::parameter_count == 1);
+ static_assert(SignatureOfT<decltype(&F3)>::parameter_count == 3);
}
TEST(ParamType, Method) {
- class C {
- public:
- void F1(S) {}
- void F3(int, S, float) {}
- };
- C().F1({}); // Avoid unused method warning
- C().F3(0, {}, 0); // Avoid unused method warning
- static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
- static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
- static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
+ class C {
+ public:
+ void F1(S) {}
+ void F3(int, S, float) {}
+ };
+ C().F1({}); // Avoid unused method warning
+ C().F3(0, {}, 0); // Avoid unused method warning
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
+ static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
+ static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
}
TEST(ParamType, ConstMethod) {
- class C {
- public:
- void F1(S) const {}
- void F3(int, S, float) const {}
- };
- C().F1({}); // Avoid unused method warning
- C().F3(0, {}, 0); // Avoid unused method warning
- static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
- static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
- static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
+ class C {
+ public:
+ void F1(S) const {}
+ void F3(int, S, float) const {}
+ };
+ C().F1({}); // Avoid unused method warning
+ C().F3(0, {}, 0); // Avoid unused method warning
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
+ static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
+ static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
}
TEST(ParamType, StaticMethod) {
- class C {
- public:
- static void F1(S) {}
- static void F3(int, S, float) {}
- };
- C::F1({}); // Avoid unused method warning
- C::F3(0, {}, 0); // Avoid unused method warning
- static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
- static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
- static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
- static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
+ class C {
+ public:
+ static void F1(S) {}
+ static void F3(int, S, float) {}
+ };
+ C::F1({}); // Avoid unused method warning
+ C::F3(0, {}, 0); // Avoid unused method warning
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F1), 0>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 0>, int>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 1>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(&C::F3), 2>, float>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F1)>, void>);
+ static_assert(std::is_same_v<ReturnType<decltype(&C::F3)>, void>);
+ static_assert(SignatureOfT<decltype(&C::F1)>::parameter_count == 1);
+ static_assert(SignatureOfT<decltype(&C::F3)>::parameter_count == 3);
}
TEST(ParamType, FunctionLike) {
- using F1 = std::function<void(S)>;
- using F3 = std::function<void(int, S, float)>;
- static_assert(std::is_same_v<ParameterType<F1, 0>, S>);
- static_assert(std::is_same_v<ParameterType<F3, 0>, int>);
- static_assert(std::is_same_v<ParameterType<F3, 1>, S>);
- static_assert(std::is_same_v<ParameterType<F3, 2>, float>);
- static_assert(std::is_same_v<ReturnType<F1>, void>);
- static_assert(std::is_same_v<ReturnType<F3>, void>);
- static_assert(SignatureOfT<F1>::parameter_count == 1);
- static_assert(SignatureOfT<F3>::parameter_count == 3);
+ using F1 = std::function<void(S)>;
+ using F3 = std::function<void(int, S, float)>;
+ static_assert(std::is_same_v<ParameterType<F1, 0>, S>);
+ static_assert(std::is_same_v<ParameterType<F3, 0>, int>);
+ static_assert(std::is_same_v<ParameterType<F3, 1>, S>);
+ static_assert(std::is_same_v<ParameterType<F3, 2>, float>);
+ static_assert(std::is_same_v<ReturnType<F1>, void>);
+ static_assert(std::is_same_v<ReturnType<F3>, void>);
+ static_assert(SignatureOfT<F1>::parameter_count == 1);
+ static_assert(SignatureOfT<F3>::parameter_count == 3);
}
TEST(ParamType, Lambda) {
- auto l1 = [](S) {};
- auto l3 = [](int, S, float) {};
- static_assert(std::is_same_v<ParameterType<decltype(l1), 0>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(l3), 0>, int>);
- static_assert(std::is_same_v<ParameterType<decltype(l3), 1>, S>);
- static_assert(std::is_same_v<ParameterType<decltype(l3), 2>, float>);
- static_assert(std::is_same_v<ReturnType<decltype(l1)>, void>);
- static_assert(std::is_same_v<ReturnType<decltype(l3)>, void>);
- static_assert(SignatureOfT<decltype(l1)>::parameter_count == 1);
- static_assert(SignatureOfT<decltype(l3)>::parameter_count == 3);
+ auto l1 = [](S) {};
+ auto l3 = [](int, S, float) {};
+ static_assert(std::is_same_v<ParameterType<decltype(l1), 0>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(l3), 0>, int>);
+ static_assert(std::is_same_v<ParameterType<decltype(l3), 1>, S>);
+ static_assert(std::is_same_v<ParameterType<decltype(l3), 2>, float>);
+ static_assert(std::is_same_v<ReturnType<decltype(l1)>, void>);
+ static_assert(std::is_same_v<ReturnType<decltype(l3)>, void>);
+ static_assert(SignatureOfT<decltype(l1)>::parameter_count == 1);
+ static_assert(SignatureOfT<decltype(l3)>::parameter_count == 3);
}
TEST(Slice, Empty) {
- auto sliced = Slice<0, 0>(std::make_tuple<>());
- static_assert(std::tuple_size_v<decltype(sliced)> == 0);
+ auto sliced = Slice<0, 0>(std::make_tuple<>());
+ static_assert(std::tuple_size_v<decltype(sliced)> == 0);
}
TEST(Slice, SingleElementSliceEmpty) {
- auto sliced = Slice<0, 0>(std::make_tuple<int>(1));
- static_assert(std::tuple_size_v<decltype(sliced)> == 0);
+ auto sliced = Slice<0, 0>(std::make_tuple<int>(1));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 0);
}
TEST(Slice, SingleElementSliceFull) {
- auto sliced = Slice<0, 1>(std::make_tuple<int>(1));
- static_assert(std::tuple_size_v<decltype(sliced)> == 1);
- static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>,
- "");
- EXPECT_EQ(std::get<0>(sliced), 1);
+ auto sliced = Slice<0, 1>(std::make_tuple<int>(1));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 1);
+ static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>, "");
+ EXPECT_EQ(std::get<0>(sliced), 1);
}
TEST(Slice, MixedTupleSliceEmpty) {
- auto sliced = Slice<1, 0>(std::make_tuple<int, bool, float>(1, true, 2.0f));
- static_assert(std::tuple_size_v<decltype(sliced)> == 0);
+ auto sliced = Slice<1, 0>(std::make_tuple<int, bool, float>(1, true, 2.0f));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 0);
}
TEST(Slice, MixedTupleSliceFull) {
- auto sliced = Slice<0, 3>(std::make_tuple<int, bool, float>(1, true, 2.0f));
- static_assert(std::tuple_size_v<decltype(sliced)> == 3);
- static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>,
- "");
- static_assert(std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, bool>,
- "");
- static_assert(
- std::is_same_v<std::tuple_element_t<2, decltype(sliced)>, float>);
- EXPECT_EQ(std::get<0>(sliced), 1);
- EXPECT_EQ(std::get<1>(sliced), true);
- EXPECT_EQ(std::get<2>(sliced), 2.0f);
+ auto sliced = Slice<0, 3>(std::make_tuple<int, bool, float>(1, true, 2.0f));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 3);
+ static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>, "");
+ static_assert(std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, bool>, "");
+ static_assert(std::is_same_v<std::tuple_element_t<2, decltype(sliced)>, float>);
+ EXPECT_EQ(std::get<0>(sliced), 1);
+ EXPECT_EQ(std::get<1>(sliced), true);
+ EXPECT_EQ(std::get<2>(sliced), 2.0f);
}
TEST(Slice, MixedTupleSliceLowPart) {
- auto sliced = Slice<0, 2>(std::make_tuple<int, bool, float>(1, true, 2.0f));
- static_assert(std::tuple_size_v<decltype(sliced)> == 2);
- static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>,
- "");
- static_assert(std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, bool>,
- "");
- EXPECT_EQ(std::get<0>(sliced), 1);
- EXPECT_EQ(std::get<1>(sliced), true);
+ auto sliced = Slice<0, 2>(std::make_tuple<int, bool, float>(1, true, 2.0f));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 2);
+ static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, int>, "");
+ static_assert(std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, bool>, "");
+ EXPECT_EQ(std::get<0>(sliced), 1);
+ EXPECT_EQ(std::get<1>(sliced), true);
}
TEST(Slice, MixedTupleSliceHighPart) {
- auto sliced = Slice<1, 2>(std::make_tuple<int, bool, float>(1, true, 2.0f));
- static_assert(std::tuple_size_v<decltype(sliced)> == 2);
- static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, bool>,
- "");
- static_assert(
- std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, float>);
- EXPECT_EQ(std::get<0>(sliced), true);
- EXPECT_EQ(std::get<1>(sliced), 2.0f);
+ auto sliced = Slice<1, 2>(std::make_tuple<int, bool, float>(1, true, 2.0f));
+ static_assert(std::tuple_size_v<decltype(sliced)> == 2);
+ static_assert(std::is_same_v<std::tuple_element_t<0, decltype(sliced)>, bool>, "");
+ static_assert(std::is_same_v<std::tuple_element_t<1, decltype(sliced)>, float>);
+ EXPECT_EQ(std::get<0>(sliced), true);
+ EXPECT_EQ(std::get<1>(sliced), 2.0f);
}
TEST(Slice, PreservesRValueRef) {
- int i;
- int& int_ref = i;
- auto tuple = std::forward_as_tuple(std::move(int_ref));
- static_assert(std::is_same_v<int&&, //
- std::tuple_element_t<0, decltype(tuple)>>);
- auto sliced = Slice<0, 1>(std::move(tuple));
- static_assert(std::is_same_v<int&&, //
- std::tuple_element_t<0, decltype(sliced)>>);
+ int i;
+ int& int_ref = i;
+ auto tuple = std::forward_as_tuple(std::move(int_ref));
+ static_assert(std::is_same_v<int&&, //
+ std::tuple_element_t<0, decltype(tuple)>>);
+ auto sliced = Slice<0, 1>(std::move(tuple));
+ static_assert(std::is_same_v<int&&, //
+ std::tuple_element_t<0, decltype(sliced)>>);
}
TEST(SliceTuple, Empty) {
- using sliced = SliceTuple<0, 0, std::tuple<>>;
- static_assert(std::tuple_size_v<sliced> == 0);
+ using sliced = SliceTuple<0, 0, std::tuple<>>;
+ static_assert(std::tuple_size_v<sliced> == 0);
}
TEST(SliceTuple, SingleElementSliceEmpty) {
- using sliced = SliceTuple<0, 0, std::tuple<int>>;
- static_assert(std::tuple_size_v<sliced> == 0);
+ using sliced = SliceTuple<0, 0, std::tuple<int>>;
+ static_assert(std::tuple_size_v<sliced> == 0);
}
TEST(SliceTuple, SingleElementSliceFull) {
- using sliced = SliceTuple<0, 1, std::tuple<int>>;
- static_assert(std::tuple_size_v<sliced> == 1);
- static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
+ using sliced = SliceTuple<0, 1, std::tuple<int>>;
+ static_assert(std::tuple_size_v<sliced> == 1);
+ static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
}
TEST(SliceTuple, MixedTupleSliceEmpty) {
- using sliced = SliceTuple<1, 0, std::tuple<int, bool, float>>;
- static_assert(std::tuple_size_v<sliced> == 0);
+ using sliced = SliceTuple<1, 0, std::tuple<int, bool, float>>;
+ static_assert(std::tuple_size_v<sliced> == 0);
}
TEST(SliceTuple, MixedTupleSliceFull) {
- using sliced = SliceTuple<0, 3, std::tuple<int, bool, float>>;
- static_assert(std::tuple_size_v<sliced> == 3);
- static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
- static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, bool>);
- static_assert(std::is_same_v<std::tuple_element_t<2, sliced>, float>);
+ using sliced = SliceTuple<0, 3, std::tuple<int, bool, float>>;
+ static_assert(std::tuple_size_v<sliced> == 3);
+ static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
+ static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, bool>);
+ static_assert(std::is_same_v<std::tuple_element_t<2, sliced>, float>);
}
TEST(SliceTuple, MixedTupleSliceLowPart) {
- using sliced = SliceTuple<0, 2, std::tuple<int, bool, float>>;
- static_assert(std::tuple_size_v<sliced> == 2);
- static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
- static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, bool>);
+ using sliced = SliceTuple<0, 2, std::tuple<int, bool, float>>;
+ static_assert(std::tuple_size_v<sliced> == 2);
+ static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, int>);
+ static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, bool>);
}
TEST(SliceTuple, MixedTupleSliceHighPart) {
- using sliced = SliceTuple<1, 2, std::tuple<int, bool, float>>;
- static_assert(std::tuple_size_v<sliced> == 2);
- static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, bool>);
- static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, float>);
+ using sliced = SliceTuple<1, 2, std::tuple<int, bool, float>>;
+ static_assert(std::tuple_size_v<sliced> == 2);
+ static_assert(std::is_same_v<std::tuple_element_t<0, sliced>, bool>);
+ static_assert(std::is_same_v<std::tuple_element_t<1, sliced>, float>);
}
} // namespace tint::traits
diff --git a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.cc b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.cc
index 0710d2bba63..f037649e4ad 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.cc
+++ b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.cc
@@ -20,30 +20,27 @@
TINT_INSTANTIATE_TYPEINFO(tint::transform::AddEmptyEntryPoint);
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
AddEmptyEntryPoint::AddEmptyEntryPoint() = default;
AddEmptyEntryPoint::~AddEmptyEntryPoint() = default;
-bool AddEmptyEntryPoint::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* func : program->AST().Functions()) {
- if (func->IsEntryPoint()) {
- return false;
+bool AddEmptyEntryPoint::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* func : program->AST().Functions()) {
+ if (func->IsEntryPoint()) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
-void AddEmptyEntryPoint::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- ctx.dst->Func(ctx.dst->Symbols().New("unused_entry_point"), {},
- ctx.dst->ty.void_(), {},
- {ctx.dst->Stage(ast::PipelineStage::kCompute),
- ctx.dst->WorkgroupSize(1)});
- ctx.Clone();
+void AddEmptyEntryPoint::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ ctx.dst->Func(ctx.dst->Symbols().New("unused_entry_point"), {}, ctx.dst->ty.void_(), {},
+ {ctx.dst->Stage(ast::PipelineStage::kCompute), ctx.dst->WorkgroupSize(1_i)});
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.h b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.h
index eb9dccd5a97..553035504be 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.h
+++ b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point.h
@@ -20,30 +20,26 @@
namespace tint::transform {
/// Add an empty entry point to the module, if no other entry points exist.
-class AddEmptyEntryPoint final
- : public Castable<AddEmptyEntryPoint, Transform> {
- public:
- /// Constructor
- AddEmptyEntryPoint();
- /// Destructor
- ~AddEmptyEntryPoint() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class AddEmptyEntryPoint final : public Castable<AddEmptyEntryPoint, Transform> {
+ public:
+ /// Constructor
+ AddEmptyEntryPoint();
+ /// Destructor
+ ~AddEmptyEntryPoint() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point_test.cc b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point_test.cc
index 0854251e030..44f90055526 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/add_empty_entry_point_test.cc
@@ -24,62 +24,62 @@ namespace {
using AddEmptyEntryPointTest = TransformTest;
TEST_F(AddEmptyEntryPointTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_TRUE(ShouldRun<AddEmptyEntryPoint>(src));
+ EXPECT_TRUE(ShouldRun<AddEmptyEntryPoint>(src));
}
TEST_F(AddEmptyEntryPointTest, ShouldRunExistingEntryPoint) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn existing() {}
)";
- EXPECT_FALSE(ShouldRun<AddEmptyEntryPoint>(src));
+ EXPECT_FALSE(ShouldRun<AddEmptyEntryPoint>(src));
}
TEST_F(AddEmptyEntryPointTest, EmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1i)
fn unused_entry_point() {
}
)";
- auto got = Run<AddEmptyEntryPoint>(src);
+ auto got = Run<AddEmptyEntryPoint>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddEmptyEntryPointTest, ExistingEntryPoint) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<AddEmptyEntryPoint>(src);
+ auto got = Run<AddEmptyEntryPoint>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddEmptyEntryPointTest, NameClash) {
- auto* src = R"(var<private> unused_entry_point : f32;)";
+ auto* src = R"(var<private> unused_entry_point : f32;)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1i)
fn unused_entry_point_1() {
}
var<private> unused_entry_point : f32;
)";
- auto got = Run<AddEmptyEntryPoint>(src);
+ auto got = Run<AddEmptyEntryPoint>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.cc b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.cc
index 91ab991d7b5..38e0de66d5d 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.cc
+++ b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.cc
@@ -23,8 +23,7 @@
#include "src/tint/utils/map.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::AddSpirvBlockAttribute);
-TINT_INSTANTIATE_TYPEINFO(
- tint::transform::AddSpirvBlockAttribute::SpirvBlockAttribute);
+TINT_INSTANTIATE_TYPEINFO(tint::transform::AddSpirvBlockAttribute::SpirvBlockAttribute);
namespace tint::transform {
@@ -32,89 +31,81 @@ AddSpirvBlockAttribute::AddSpirvBlockAttribute() = default;
AddSpirvBlockAttribute::~AddSpirvBlockAttribute() = default;
-void AddSpirvBlockAttribute::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- auto& sem = ctx.src->Sem();
-
- // Collect the set of structs that are nested in other types.
- std::unordered_set<const sem::Struct*> nested_structs;
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* arr = sem.Get<sem::Array>(node->As<ast::Array>())) {
- if (auto* nested_str = arr->ElemType()->As<sem::Struct>()) {
- nested_structs.insert(nested_str);
- }
- } else if (auto* str = sem.Get<sem::Struct>(node->As<ast::Struct>())) {
- for (auto* member : str->Members()) {
- if (auto* nested_str = member->Type()->As<sem::Struct>()) {
- nested_structs.insert(nested_str);
+void AddSpirvBlockAttribute::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ auto& sem = ctx.src->Sem();
+
+ // Collect the set of structs that are nested in other types.
+ std::unordered_set<const sem::Struct*> nested_structs;
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* arr = sem.Get<sem::Array>(node->As<ast::Array>())) {
+ if (auto* nested_str = arr->ElemType()->As<sem::Struct>()) {
+ nested_structs.insert(nested_str);
+ }
+ } else if (auto* str = sem.Get<sem::Struct>(node->As<ast::Struct>())) {
+ for (auto* member : str->Members()) {
+ if (auto* nested_str = member->Type()->As<sem::Struct>()) {
+ nested_structs.insert(nested_str);
+ }
+ }
}
- }
- }
- }
-
- // A map from a type in the source program to a block-decorated wrapper that
- // contains it in the destination program.
- std::unordered_map<const sem::Type*, const ast::Struct*> wrapper_structs;
-
- // Process global variables that are buffers.
- for (auto* var : ctx.src->AST().GlobalVariables()) {
- auto* sem_var = sem.Get<sem::GlobalVariable>(var);
- if (var->declared_storage_class != ast::StorageClass::kStorage &&
- var->declared_storage_class != ast::StorageClass::kUniform) {
- continue;
}
- auto* ty = sem.Get(var->type);
- auto* str = ty->As<sem::Struct>();
- if (!str || nested_structs.count(str)) {
- const char* kMemberName = "inner";
-
- // This is a non-struct or a struct that is nested somewhere else, so we
- // need to wrap it first.
- auto* wrapper = utils::GetOrCreate(wrapper_structs, ty, [&]() {
- auto* block =
- ctx.dst->ASTNodes().Create<SpirvBlockAttribute>(ctx.dst->ID());
- auto wrapper_name = ctx.src->Symbols().NameFor(var->symbol) + "_block";
- auto* ret = ctx.dst->create<ast::Struct>(
- ctx.dst->Symbols().New(wrapper_name),
- ast::StructMemberList{
- ctx.dst->Member(kMemberName, CreateASTTypeFor(ctx, ty))},
- ast::AttributeList{block});
- ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), var, ret);
- return ret;
- });
- ctx.Replace(var->type, ctx.dst->ty.Of(wrapper));
-
- // Insert a member accessor to get the original type from the wrapper at
- // any usage of the original variable.
- for (auto* user : sem_var->Users()) {
- ctx.Replace(
- user->Declaration(),
- ctx.dst->MemberAccessor(ctx.Clone(var->symbol), kMemberName));
- }
- } else {
- // Add a block attribute to this struct directly.
- auto* block =
- ctx.dst->ASTNodes().Create<SpirvBlockAttribute>(ctx.dst->ID());
- ctx.InsertFront(str->Declaration()->attributes, block);
+ // A map from a type in the source program to a block-decorated wrapper that
+ // contains it in the destination program.
+ std::unordered_map<const sem::Type*, const ast::Struct*> wrapper_structs;
+
+ // Process global variables that are buffers.
+ for (auto* var : ctx.src->AST().GlobalVariables()) {
+ auto* sem_var = sem.Get<sem::GlobalVariable>(var);
+ if (var->declared_storage_class != ast::StorageClass::kStorage &&
+ var->declared_storage_class != ast::StorageClass::kUniform) {
+ continue;
+ }
+
+ auto* ty = sem.Get(var->type);
+ auto* str = ty->As<sem::Struct>();
+ if (!str || nested_structs.count(str)) {
+ const char* kMemberName = "inner";
+
+ // This is a non-struct or a struct that is nested somewhere else, so we
+ // need to wrap it first.
+ auto* wrapper = utils::GetOrCreate(wrapper_structs, ty, [&]() {
+ auto* block = ctx.dst->ASTNodes().Create<SpirvBlockAttribute>(ctx.dst->ID());
+ auto wrapper_name = ctx.src->Symbols().NameFor(var->symbol) + "_block";
+ auto* ret = ctx.dst->create<ast::Struct>(
+ ctx.dst->Symbols().New(wrapper_name),
+ ast::StructMemberList{ctx.dst->Member(kMemberName, CreateASTTypeFor(ctx, ty))},
+ ast::AttributeList{block});
+ ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), var, ret);
+ return ret;
+ });
+ ctx.Replace(var->type, ctx.dst->ty.Of(wrapper));
+
+ // Insert a member accessor to get the original type from the wrapper at
+ // any usage of the original variable.
+ for (auto* user : sem_var->Users()) {
+ ctx.Replace(user->Declaration(),
+ ctx.dst->MemberAccessor(ctx.Clone(var->symbol), kMemberName));
+ }
+ } else {
+ // Add a block attribute to this struct directly.
+ auto* block = ctx.dst->ASTNodes().Create<SpirvBlockAttribute>(ctx.dst->ID());
+ ctx.InsertFront(str->Declaration()->attributes, block);
+ }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
-AddSpirvBlockAttribute::SpirvBlockAttribute::SpirvBlockAttribute(ProgramID pid)
- : Base(pid) {}
+AddSpirvBlockAttribute::SpirvBlockAttribute::SpirvBlockAttribute(ProgramID pid) : Base(pid) {}
AddSpirvBlockAttribute::SpirvBlockAttribute::~SpirvBlockAttribute() = default;
std::string AddSpirvBlockAttribute::SpirvBlockAttribute::InternalName() const {
- return "spirv_block";
+ return "spirv_block";
}
const AddSpirvBlockAttribute::SpirvBlockAttribute*
AddSpirvBlockAttribute::SpirvBlockAttribute::Clone(CloneContext* ctx) const {
- return ctx->dst->ASTNodes()
- .Create<AddSpirvBlockAttribute::SpirvBlockAttribute>(ctx->dst->ID());
+ return ctx->dst->ASTNodes().Create<AddSpirvBlockAttribute::SpirvBlockAttribute>(ctx->dst->ID());
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.h b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.h
index 386a34118a9..67faaa5ec1e 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.h
+++ b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute.h
@@ -27,46 +27,42 @@ namespace tint::transform {
/// store type of a buffer. If that structure is nested inside another structure
/// or an array, then it is wrapped inside another structure which gets the
/// `@internal(spirv_block)` attribute instead.
-class AddSpirvBlockAttribute final
- : public Castable<AddSpirvBlockAttribute, Transform> {
- public:
- /// SpirvBlockAttribute is an InternalAttribute that is used to decorate a
- // structure that needs a SPIR-V block attribute.
- class SpirvBlockAttribute final
- : public Castable<SpirvBlockAttribute, ast::InternalAttribute> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- explicit SpirvBlockAttribute(ProgramID program_id);
- /// Destructor
- ~SpirvBlockAttribute() override;
+class AddSpirvBlockAttribute final : public Castable<AddSpirvBlockAttribute, Transform> {
+ public:
+ /// SpirvBlockAttribute is an InternalAttribute that is used to decorate a
+ // structure that needs a SPIR-V block attribute.
+ class SpirvBlockAttribute final : public Castable<SpirvBlockAttribute, ast::InternalAttribute> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ explicit SpirvBlockAttribute(ProgramID program_id);
+ /// Destructor
+ ~SpirvBlockAttribute() override;
- /// @return a short description of the internal attribute which will be
- /// displayed as `@internal(<name>)`
- std::string InternalName() const override;
+ /// @return a short description of the internal attribute which will be
+ /// displayed as `@internal(<name>)`
+ std::string InternalName() const override;
- /// Performs a deep clone of this object using the CloneContext `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned object
- const SpirvBlockAttribute* Clone(CloneContext* ctx) const override;
- };
+ /// Performs a deep clone of this object using the CloneContext `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned object
+ const SpirvBlockAttribute* Clone(CloneContext* ctx) const override;
+ };
- /// Constructor
- AddSpirvBlockAttribute();
+ /// Constructor
+ AddSpirvBlockAttribute();
- /// Destructor
- ~AddSpirvBlockAttribute() override;
+ /// Destructor
+ ~AddSpirvBlockAttribute() override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute_test.cc b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute_test.cc
index b68920c25e3..455be60df30 100644
--- a/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/add_spirv_block_attribute_test.cc
@@ -25,64 +25,64 @@ namespace {
using AddSpirvBlockAttributeTest = TransformTest;
TEST_F(AddSpirvBlockAttributeTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Noop_UsedForPrivateVar) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
}
var<private> p : S;
-@stage(fragment)
+@fragment
fn main() {
p.f = 1.0;
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Noop_UsedForShaderIO) {
- auto* src = R"(
+ auto* src = R"(
struct S {
@location(0)
f : f32,
}
-@stage(fragment)
+@fragment
fn main() -> S {
return S();
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, BasicScalar) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0)
var<uniform> u : f32;
-@stage(fragment)
+@fragment
fn main() {
let f = u;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(spirv_block)
struct u_block {
inner : f32,
@@ -90,28 +90,28 @@ struct u_block {
@group(0) @binding(0) var<uniform> u : u_block;
-@stage(fragment)
+@fragment
fn main() {
let f = u.inner;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, BasicArray) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0)
var<uniform> u : array<vec4<f32>, 4u>;
-@stage(fragment)
+@fragment
fn main() {
let a = u;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(spirv_block)
struct u_block {
inner : array<vec4<f32>, 4u>,
@@ -119,30 +119,30 @@ struct u_block {
@group(0) @binding(0) var<uniform> u : u_block;
-@stage(fragment)
+@fragment
fn main() {
let a = u.inner;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, BasicArray_Alias) {
- auto* src = R"(
+ auto* src = R"(
type Numbers = array<vec4<f32>, 4u>;
@group(0) @binding(0)
var<uniform> u : Numbers;
-@stage(fragment)
+@fragment
fn main() {
let a = u;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type Numbers = array<vec4<f32>, 4u>;
@internal(spirv_block)
@@ -152,19 +152,19 @@ struct u_block {
@group(0) @binding(0) var<uniform> u : u_block;
-@stage(fragment)
+@fragment
fn main() {
let a = u.inner;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, BasicStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
};
@@ -172,12 +172,12 @@ struct S {
@group(0) @binding(0)
var<uniform> u : S;
-@stage(fragment)
+@fragment
fn main() {
let f = u.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(spirv_block)
struct S {
f : f32,
@@ -185,19 +185,19 @@ struct S {
@group(0) @binding(0) var<uniform> u : S;
-@stage(fragment)
+@fragment
fn main() {
let f = u.f;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Nested_OuterBuffer_InnerNotBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Inner {
f : f32,
};
@@ -209,12 +209,12 @@ struct Outer {
@group(0) @binding(0)
var<uniform> u : Outer;
-@stage(fragment)
+@fragment
fn main() {
let f = u.i.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Inner {
f : f32,
}
@@ -226,19 +226,19 @@ struct Outer {
@group(0) @binding(0) var<uniform> u : Outer;
-@stage(fragment)
+@fragment
fn main() {
let f = u.i.f;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Nested_OuterBuffer_InnerBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Inner {
f : f32,
};
@@ -253,13 +253,13 @@ var<uniform> u0 : Outer;
@group(0) @binding(1)
var<uniform> u1 : Inner;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Inner {
f : f32,
}
@@ -278,20 +278,20 @@ struct u1_block {
@group(0) @binding(1) var<uniform> u1 : u1_block;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.inner.f;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Nested_OuterNotBuffer_InnerBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Inner {
f : f32,
};
@@ -305,13 +305,13 @@ var<private> p : Outer;
@group(0) @binding(1)
var<uniform> u : Inner;
-@stage(fragment)
+@fragment
fn main() {
let f0 = p.i.f;
let f1 = u.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Inner {
f : f32,
}
@@ -329,20 +329,20 @@ struct u_block {
@group(0) @binding(1) var<uniform> u : u_block;
-@stage(fragment)
+@fragment
fn main() {
let f0 = p.i.f;
let f1 = u.inner.f;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Nested_InnerUsedForMultipleBuffers) {
- auto* src = R"(
+ auto* src = R"(
struct Inner {
f : f32,
};
@@ -360,14 +360,14 @@ var<uniform> u1 : Inner;
@group(0) @binding(2)
var<uniform> u2 : Inner;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.f;
let f2 = u2.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Inner {
f : f32,
}
@@ -388,7 +388,7 @@ struct u1_block {
@group(0) @binding(2) var<uniform> u2 : u1_block;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.inner.f;
@@ -396,13 +396,13 @@ fn main() {
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, StructInArray) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
};
@@ -410,13 +410,13 @@ struct S {
@group(0) @binding(0)
var<uniform> u : S;
-@stage(fragment)
+@fragment
fn main() {
let f = u.f;
let a = array<S, 4>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
f : f32,
}
@@ -428,20 +428,20 @@ struct u_block {
@group(0) @binding(0) var<uniform> u : u_block;
-@stage(fragment)
+@fragment
fn main() {
let f = u.inner.f;
let a = array<S, 4>();
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, StructInArray_MultipleBuffers) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
};
@@ -452,14 +452,14 @@ var<uniform> u0 : S;
@group(0) @binding(1)
var<uniform> u1 : S;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.f;
let f1 = u1.f;
let a = array<S, 4>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
f : f32,
}
@@ -473,7 +473,7 @@ struct u0_block {
@group(0) @binding(1) var<uniform> u1 : u0_block;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.inner.f;
let f1 = u1.inner.f;
@@ -481,13 +481,13 @@ fn main() {
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(AddSpirvBlockAttributeTest, Aliases_Nested_OuterBuffer_InnerBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Inner {
f : f32,
};
@@ -506,13 +506,13 @@ var<uniform> u0 : MyOuter;
@group(0) @binding(1)
var<uniform> u1 : MyInner;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.f;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Inner {
f : f32,
}
@@ -535,22 +535,21 @@ struct u1_block {
@group(0) @binding(1) var<uniform> u1 : u1_block;
-@stage(fragment)
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.inner.f;
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(AddSpirvBlockAttributeTest,
- Aliases_Nested_OuterBuffer_InnerBuffer_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(AddSpirvBlockAttributeTest, Aliases_Nested_OuterBuffer_InnerBuffer_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.f;
@@ -574,8 +573,8 @@ struct Inner {
f : f32,
};
)";
- auto* expect = R"(
-@stage(fragment)
+ auto* expect = R"(
+@fragment
fn main() {
let f0 = u0.i.f;
let f1 = u1.inner.f;
@@ -604,9 +603,9 @@ struct Inner {
}
)";
- auto got = Run<AddSpirvBlockAttribute>(src);
+ auto got = Run<AddSpirvBlockAttribute>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.cc b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.cc
index 52c68f2c848..86c45345d8b 100644
--- a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.cc
+++ b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.cc
@@ -43,132 +43,124 @@ ArrayLengthFromUniform::~ArrayLengthFromUniform() = default;
/// sem::GlobalVariable for the storage buffer.
template <typename F>
static void IterateArrayLengthOnStorageVar(CloneContext& ctx, F&& functor) {
- auto& sem = ctx.src->Sem();
+ auto& sem = ctx.src->Sem();
- // Find all calls to the arrayLength() builtin.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- auto* call_expr = node->As<ast::CallExpression>();
- if (!call_expr) {
- continue;
- }
+ // Find all calls to the arrayLength() builtin.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ auto* call_expr = node->As<ast::CallExpression>();
+ if (!call_expr) {
+ continue;
+ }
- auto* call = sem.Get(call_expr);
- auto* builtin = call->Target()->As<sem::Builtin>();
- if (!builtin || builtin->Type() != sem::BuiltinType::kArrayLength) {
- continue;
- }
+ auto* call = sem.Get(call_expr)->UnwrapMaterialize()->As<sem::Call>();
+ auto* builtin = call->Target()->As<sem::Builtin>();
+ if (!builtin || builtin->Type() != sem::BuiltinType::kArrayLength) {
+ continue;
+ }
- // Get the storage buffer that contains the runtime array.
- // Since we require SimplifyPointers, we can assume that the arrayLength()
- // call has one of two forms:
- // arrayLength(&struct_var.array_member)
- // arrayLength(&array_var)
- auto* param = call_expr->args[0]->As<ast::UnaryOpExpression>();
- if (!param || param->op != ast::UnaryOp::kAddressOf) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "expected form of arrayLength argument to be &array_var or "
- "&struct_var.array_member";
- break;
- }
- auto* storage_buffer_expr = param->expr;
- if (auto* accessor = param->expr->As<ast::MemberAccessorExpression>()) {
- storage_buffer_expr = accessor->structure;
- }
- auto* storage_buffer_sem = sem.Get<sem::VariableUser>(storage_buffer_expr);
- if (!storage_buffer_sem) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "expected form of arrayLength argument to be &array_var or "
- "&struct_var.array_member";
- break;
- }
+ // Get the storage buffer that contains the runtime array.
+ // Since we require SimplifyPointers, we can assume that the arrayLength()
+ // call has one of two forms:
+ // arrayLength(&struct_var.array_member)
+ // arrayLength(&array_var)
+ auto* param = call_expr->args[0]->As<ast::UnaryOpExpression>();
+ if (!param || param->op != ast::UnaryOp::kAddressOf) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "expected form of arrayLength argument to be &array_var or "
+ "&struct_var.array_member";
+ break;
+ }
+ auto* storage_buffer_expr = param->expr;
+ if (auto* accessor = param->expr->As<ast::MemberAccessorExpression>()) {
+ storage_buffer_expr = accessor->structure;
+ }
+ auto* storage_buffer_sem = sem.Get<sem::VariableUser>(storage_buffer_expr);
+ if (!storage_buffer_sem) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "expected form of arrayLength argument to be &array_var or "
+ "&struct_var.array_member";
+ break;
+ }
- // Get the index to use for the buffer size array.
- auto* var = tint::As<sem::GlobalVariable>(storage_buffer_sem->Variable());
- if (!var) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "storage buffer is not a global variable";
- break;
+ // Get the index to use for the buffer size array.
+ auto* var = tint::As<sem::GlobalVariable>(storage_buffer_sem->Variable());
+ if (!var) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "storage buffer is not a global variable";
+ break;
+ }
+ functor(call_expr, storage_buffer_sem, var);
}
- functor(call_expr, storage_buffer_sem, var);
- }
}
-bool ArrayLengthFromUniform::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* fn : program->AST().Functions()) {
- if (auto* sem_fn = program->Sem().Get(fn)) {
- for (auto* builtin : sem_fn->DirectlyCalledBuiltins()) {
- if (builtin->Type() == sem::BuiltinType::kArrayLength) {
- return true;
+bool ArrayLengthFromUniform::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* fn : program->AST().Functions()) {
+ if (auto* sem_fn = program->Sem().Get(fn)) {
+ for (auto* builtin : sem_fn->DirectlyCalledBuiltins()) {
+ if (builtin->Type() == sem::BuiltinType::kArrayLength) {
+ return true;
+ }
+ }
}
- }
}
- }
- return false;
+ return false;
}
-void ArrayLengthFromUniform::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const {
- auto* cfg = inputs.Get<Config>();
- if (cfg == nullptr) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
- return;
- }
-
- const char* kBufferSizeMemberName = "buffer_size";
-
- // Determine the size of the buffer size array.
- uint32_t max_buffer_size_index = 0;
-
- IterateArrayLengthOnStorageVar(
- ctx, [&](const ast::CallExpression*, const sem::VariableUser*,
- const sem::GlobalVariable* var) {
+void ArrayLengthFromUniform::Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const {
+ auto* cfg = inputs.Get<Config>();
+ if (cfg == nullptr) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+ return;
+ }
+
+ const char* kBufferSizeMemberName = "buffer_size";
+
+ // Determine the size of the buffer size array.
+ uint32_t max_buffer_size_index = 0;
+
+ IterateArrayLengthOnStorageVar(ctx, [&](const ast::CallExpression*, const sem::VariableUser*,
+ const sem::GlobalVariable* var) {
auto binding = var->BindingPoint();
auto idx_itr = cfg->bindpoint_to_size_index.find(binding);
if (idx_itr == cfg->bindpoint_to_size_index.end()) {
- return;
+ return;
}
if (idx_itr->second > max_buffer_size_index) {
- max_buffer_size_index = idx_itr->second;
+ max_buffer_size_index = idx_itr->second;
}
- });
-
- // Get (or create, on first call) the uniform buffer that will receive the
- // size of each storage buffer in the module.
- const ast::Variable* buffer_size_ubo = nullptr;
- auto get_ubo = [&]() {
- if (!buffer_size_ubo) {
- // Emit an array<vec4<u32>, N>, where N is 1/4 number of elements.
- // We do this because UBOs require an element stride that is 16-byte
- // aligned.
- auto* buffer_size_struct = ctx.dst->Structure(
- ctx.dst->Sym(),
- {ctx.dst->Member(
- kBufferSizeMemberName,
- ctx.dst->ty.array(ctx.dst->ty.vec4(ctx.dst->ty.u32()),
- (max_buffer_size_index / 4) + 1))});
- buffer_size_ubo = ctx.dst->Global(
- ctx.dst->Sym(), ctx.dst->ty.Of(buffer_size_struct),
- ast::StorageClass::kUniform,
- ast::AttributeList{ctx.dst->GroupAndBinding(
- cfg->ubo_binding.group, cfg->ubo_binding.binding)});
- }
- return buffer_size_ubo;
- };
+ });
+
+ // Get (or create, on first call) the uniform buffer that will receive the
+ // size of each storage buffer in the module.
+ const ast::Variable* buffer_size_ubo = nullptr;
+ auto get_ubo = [&]() {
+ if (!buffer_size_ubo) {
+ // Emit an array<vec4<u32>, N>, where N is 1/4 number of elements.
+ // We do this because UBOs require an element stride that is 16-byte
+ // aligned.
+ auto* buffer_size_struct = ctx.dst->Structure(
+ ctx.dst->Sym(),
+ {ctx.dst->Member(kBufferSizeMemberName,
+ ctx.dst->ty.array(ctx.dst->ty.vec4(ctx.dst->ty.u32()),
+ u32((max_buffer_size_index / 4) + 1)))});
+ buffer_size_ubo = ctx.dst->Global(
+ ctx.dst->Sym(), ctx.dst->ty.Of(buffer_size_struct), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ ctx.dst->GroupAndBinding(cfg->ubo_binding.group, cfg->ubo_binding.binding)});
+ }
+ return buffer_size_ubo;
+ };
- std::unordered_set<uint32_t> used_size_indices;
+ std::unordered_set<uint32_t> used_size_indices;
- IterateArrayLengthOnStorageVar(
- ctx, [&](const ast::CallExpression* call_expr,
- const sem::VariableUser* storage_buffer_sem,
- const sem::GlobalVariable* var) {
+ IterateArrayLengthOnStorageVar(ctx, [&](const ast::CallExpression* call_expr,
+ const sem::VariableUser* storage_buffer_sem,
+ const sem::GlobalVariable* var) {
auto binding = var->BindingPoint();
auto idx_itr = cfg->bindpoint_to_size_index.find(binding);
if (idx_itr == cfg->bindpoint_to_size_index.end()) {
- return;
+ return;
}
uint32_t size_index = idx_itr->second;
@@ -177,11 +169,9 @@ void ArrayLengthFromUniform::Run(CloneContext& ctx,
// Load the total storage buffer size from the UBO.
uint32_t array_index = size_index / 4;
auto* vec_expr = ctx.dst->IndexAccessor(
- ctx.dst->MemberAccessor(get_ubo()->symbol, kBufferSizeMemberName),
- array_index);
+ ctx.dst->MemberAccessor(get_ubo()->symbol, kBufferSizeMemberName), u32(array_index));
uint32_t vec_index = size_index % 4;
- auto* total_storage_buffer_size =
- ctx.dst->IndexAccessor(vec_expr, vec_index);
+ auto* total_storage_buffer_size = ctx.dst->IndexAccessor(vec_expr, u32(vec_index));
// Calculate actual array length
// total_storage_buffer_size - array_offset
@@ -191,39 +181,35 @@ void ArrayLengthFromUniform::Run(CloneContext& ctx,
auto* storage_buffer_type = storage_buffer_sem->Type()->UnwrapRef();
const sem::Array* array_type = nullptr;
if (auto* str = storage_buffer_type->As<sem::Struct>()) {
- // The variable is a struct, so subtract the byte offset of the array
- // member.
- auto* array_member_sem = str->Members().back();
- array_type = array_member_sem->Type()->As<sem::Array>();
- total_size = ctx.dst->Sub(total_storage_buffer_size,
- array_member_sem->Offset());
+ // The variable is a struct, so subtract the byte offset of the array
+ // member.
+ auto* array_member_sem = str->Members().back();
+ array_type = array_member_sem->Type()->As<sem::Array>();
+ total_size = ctx.dst->Sub(total_storage_buffer_size, u32(array_member_sem->Offset()));
} else if (auto* arr = storage_buffer_type->As<sem::Array>()) {
- array_type = arr;
+ array_type = arr;
} else {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "expected form of arrayLength argument to be &array_var or "
- "&struct_var.array_member";
- return;
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "expected form of arrayLength argument to be &array_var or "
+ "&struct_var.array_member";
+ return;
}
- auto* array_length = ctx.dst->Div(total_size, array_type->Stride());
+ auto* array_length = ctx.dst->Div(total_size, u32(array_type->Stride()));
ctx.Replace(call_expr, array_length);
- });
+ });
- ctx.Clone();
+ ctx.Clone();
- outputs.Add<Result>(used_size_indices);
+ outputs.Add<Result>(used_size_indices);
}
-ArrayLengthFromUniform::Config::Config(sem::BindingPoint ubo_bp)
- : ubo_binding(ubo_bp) {}
+ArrayLengthFromUniform::Config::Config(sem::BindingPoint ubo_bp) : ubo_binding(ubo_bp) {}
ArrayLengthFromUniform::Config::Config(const Config&) = default;
-ArrayLengthFromUniform::Config& ArrayLengthFromUniform::Config::operator=(
- const Config&) = default;
+ArrayLengthFromUniform::Config& ArrayLengthFromUniform::Config::operator=(const Config&) = default;
ArrayLengthFromUniform::Config::~Config() = default;
-ArrayLengthFromUniform::Result::Result(
- std::unordered_set<uint32_t> used_size_indices_in)
+ArrayLengthFromUniform::Result::Result(std::unordered_set<uint32_t> used_size_indices_in)
: used_size_indices(std::move(used_size_indices_in)) {}
ArrayLengthFromUniform::Result::Result(const Result&) = default;
ArrayLengthFromUniform::Result::~Result() = default;
diff --git a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.h b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.h
index 9a3a5d52701..c34c529da01 100644
--- a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.h
+++ b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform.h
@@ -52,71 +52,67 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * SimplifyPointers
-class ArrayLengthFromUniform final
- : public Castable<ArrayLengthFromUniform, Transform> {
- public:
- /// Constructor
- ArrayLengthFromUniform();
- /// Destructor
- ~ArrayLengthFromUniform() override;
-
- /// Configuration options for the ArrayLengthFromUniform transform.
- struct Config final : public Castable<Data, transform::Data> {
+class ArrayLengthFromUniform final : public Castable<ArrayLengthFromUniform, Transform> {
+ public:
/// Constructor
- /// @param ubo_bp the binding point to use for the generated uniform buffer.
- explicit Config(sem::BindingPoint ubo_bp);
-
- /// Copy constructor
- Config(const Config&);
-
- /// Copy assignment
- /// @return this Config
- Config& operator=(const Config&);
-
- /// Destructor
- ~Config() override;
-
- /// The binding point to use for the generated uniform buffer.
- sem::BindingPoint ubo_binding;
-
- /// The mapping from binding point to the index for the buffer size lookup.
- std::unordered_map<sem::BindingPoint, uint32_t> bindpoint_to_size_index;
- };
-
- /// Information produced about what the transform did.
- /// If there were no calls to the arrayLength() builtin, then no Result will
- /// be emitted.
- struct Result final : public Castable<Result, transform::Data> {
- /// Constructor
- /// @param used_size_indices Indices into the UBO that are statically used.
- explicit Result(std::unordered_set<uint32_t> used_size_indices);
-
- /// Copy constructor
- Result(const Result&);
-
+ ArrayLengthFromUniform();
/// Destructor
- ~Result() override;
-
- /// Indices into the UBO that are statically used.
- std::unordered_set<uint32_t> used_size_indices;
- };
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ ~ArrayLengthFromUniform() override;
+
+ /// Configuration options for the ArrayLengthFromUniform transform.
+ struct Config final : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param ubo_bp the binding point to use for the generated uniform buffer.
+ explicit Config(sem::BindingPoint ubo_bp);
+
+ /// Copy constructor
+ Config(const Config&);
+
+ /// Copy assignment
+ /// @return this Config
+ Config& operator=(const Config&);
+
+ /// Destructor
+ ~Config() override;
+
+ /// The binding point to use for the generated uniform buffer.
+ sem::BindingPoint ubo_binding;
+
+ /// The mapping from binding point to the index for the buffer size lookup.
+ std::unordered_map<sem::BindingPoint, uint32_t> bindpoint_to_size_index;
+ };
+
+ /// Information produced about what the transform did.
+ /// If there were no calls to the arrayLength() builtin, then no Result will
+ /// be emitted.
+ struct Result final : public Castable<Result, transform::Data> {
+ /// Constructor
+ /// @param used_size_indices Indices into the UBO that are statically used.
+ explicit Result(std::unordered_set<uint32_t> used_size_indices);
+
+ /// Copy constructor
+ Result(const Result&);
+
+ /// Destructor
+ ~Result() override;
+
+ /// Indices into the UBO that are statically used.
+ std::unordered_set<uint32_t> used_size_indices;
+ };
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform_test.cc b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform_test.cc
index 42a334ce4ef..109904ce266 100644
--- a/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/array_length_from_uniform_test.cc
@@ -26,13 +26,13 @@ namespace {
using ArrayLengthFromUniformTest = TransformTest;
TEST_F(ArrayLengthFromUniformTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<ArrayLengthFromUniform>(src));
+ EXPECT_FALSE(ShouldRun<ArrayLengthFromUniform>(src));
}
TEST_F(ArrayLengthFromUniformTest, ShouldRunNoArrayLength) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -40,16 +40,16 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
}
)";
- EXPECT_FALSE(ShouldRun<ArrayLengthFromUniform>(src));
+ EXPECT_FALSE(ShouldRun<ArrayLengthFromUniform>(src));
}
TEST_F(ArrayLengthFromUniformTest, ShouldRunWithArrayLength) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -57,17 +57,17 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
)";
- EXPECT_TRUE(ShouldRun<ArrayLengthFromUniform>(src));
+ EXPECT_TRUE(ShouldRun<ArrayLengthFromUniform>(src));
}
TEST_F(ArrayLengthFromUniformTest, Error_MissingTransformData) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -75,32 +75,32 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
)";
- auto* expect =
- "error: missing transform data for "
- "tint::transform::ArrayLengthFromUniform";
+ auto* expect =
+ "error: missing transform data for "
+ "tint::transform::ArrayLengthFromUniform";
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ArrayLengthFromUniformTest, Basic) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var<storage, read> sb : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 1u>,
}
@@ -109,27 +109,27 @@ struct tint_symbol {
@group(0) @binding(0) var<storage, read> sb : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = (tint_symbol_1.buffer_size[0u][0u] / 4u);
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
TEST_F(ArrayLengthFromUniformTest, BasicInStruct) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -137,13 +137,13 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 1u>,
}
@@ -157,27 +157,27 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = ((tint_symbol_1.buffer_size[0u][0u] - 4u) / 4u);
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
TEST_F(ArrayLengthFromUniformTest, MultipleStorageBuffers) {
- auto* src = R"(
+ auto* src = R"(
struct SB1 {
x : i32,
arr1 : array<i32>,
@@ -197,7 +197,7 @@ struct SB4 {
@group(3) @binding(2) var<storage, read> sb4 : SB4;
@group(4) @binding(2) var<storage, read> sb5 : array<vec4<f32>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = arrayLength(&(sb1.arr1));
var len2 : u32 = arrayLength(&(sb2.arr2));
@@ -208,7 +208,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 2u>,
}
@@ -240,7 +240,7 @@ struct SB4 {
@group(4) @binding(2) var<storage, read> sb5 : array<vec4<f32>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = ((tint_symbol_1.buffer_size[0u][0u] - 4u) / 4u);
var len2 : u32 = ((tint_symbol_1.buffer_size[0u][1u] - 16u) / 16u);
@@ -251,25 +251,25 @@ fn main() {
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2u}, 0);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{1u, 2u}, 1);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{2u, 2u}, 2);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{3u, 2u}, 3);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{4u, 2u}, 4);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2u}, 0);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{1u, 2u}, 1);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{2u, 2u}, 2);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{3u, 2u}, 3);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{4u, 2u}, 4);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0, 1, 2, 3, 4}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0, 1, 2, 3, 4}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
TEST_F(ArrayLengthFromUniformTest, MultipleUnusedStorageBuffers) {
- auto* src = R"(
+ auto* src = R"(
struct SB1 {
x : i32,
arr1 : array<i32>,
@@ -289,7 +289,7 @@ struct SB4 {
@group(3) @binding(2) var<storage, read> sb4 : SB4;
@group(4) @binding(2) var<storage, read> sb5 : array<vec4<f32>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = arrayLength(&(sb1.arr1));
var len3 : u32 = arrayLength(&sb3);
@@ -297,7 +297,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 1u>,
}
@@ -329,7 +329,7 @@ struct SB4 {
@group(4) @binding(2) var<storage, read> sb5 : array<vec4<f32>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = ((tint_symbol_1.buffer_size[0u][0u] - 4u) / 4u);
var len3 : u32 = (tint_symbol_1.buffer_size[0u][2u] / 16u);
@@ -337,25 +337,25 @@ fn main() {
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2u}, 0);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{1u, 2u}, 1);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{2u, 2u}, 2);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{3u, 2u}, 3);
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{4u, 2u}, 4);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2u}, 0);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{1u, 2u}, 1);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{2u, 2u}, 2);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{3u, 2u}, 3);
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{4u, 2u}, 4);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0, 2}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0, 2}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
TEST_F(ArrayLengthFromUniformTest, NoArrayLengthCalls) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -363,26 +363,26 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = &(sb.arr);
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(src, str(got));
- EXPECT_EQ(got.data.Get<ArrayLengthFromUniform::Result>(), nullptr);
+ EXPECT_EQ(src, str(got));
+ EXPECT_EQ(got.data.Get<ArrayLengthFromUniform::Result>(), nullptr);
}
TEST_F(ArrayLengthFromUniformTest, MissingBindingPointToIndexMapping) {
- auto* src = R"(
+ auto* src = R"(
struct SB1 {
x : i32,
arr1 : array<i32>,
@@ -397,7 +397,7 @@ struct SB2 {
@group(1) @binding(2) var<storage, read> sb2 : SB2;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = arrayLength(&(sb1.arr1));
var len2 : u32 = arrayLength(&(sb2.arr2));
@@ -405,7 +405,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 1u>,
}
@@ -426,7 +426,7 @@ struct SB2 {
@group(1) @binding(2) var<storage, read> sb2 : SB2;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = ((tint_symbol_1.buffer_size[0u][0u] - 4u) / 4u);
var len2 : u32 = arrayLength(&(sb2.arr2));
@@ -434,22 +434,22 @@ fn main() {
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2}, 0);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2}, 0);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
TEST_F(ArrayLengthFromUniformTest, OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
@@ -462,14 +462,14 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
buffer_size : array<vec4<u32>, 1u>,
}
@group(0) @binding(30) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = ((tint_symbol_1.buffer_size[0u][0u] - 4u) / 4u);
}
@@ -482,17 +482,17 @@ struct SB {
}
)";
- ArrayLengthFromUniform::Config cfg({0, 30u});
- cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
+ ArrayLengthFromUniform::Config cfg({0, 30u});
+ cfg.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 0}, 0);
- DataMap data;
- data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
+ DataMap data;
+ data.Add<ArrayLengthFromUniform::Config>(std::move(cfg));
- auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
+ auto got = Run<Unshadow, SimplifyPointers, ArrayLengthFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
- EXPECT_EQ(std::unordered_set<uint32_t>({0}),
- got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
+ EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(std::unordered_set<uint32_t>({0}),
+ got.data.Get<ArrayLengthFromUniform::Result>()->used_size_indices);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/binding_remapper.cc b/chromium/third_party/dawn/src/tint/transform/binding_remapper.cc
index 3934b207d0b..e3b7afdf896 100644
--- a/chromium/third_party/dawn/src/tint/transform/binding_remapper.cc
+++ b/chromium/third_party/dawn/src/tint/transform/binding_remapper.cc
@@ -28,9 +28,7 @@ TINT_INSTANTIATE_TYPEINFO(tint::transform::BindingRemapper::Remappings);
namespace tint::transform {
-BindingRemapper::Remappings::Remappings(BindingPoints bp,
- AccessControls ac,
- bool may_collide)
+BindingRemapper::Remappings::Remappings(BindingPoints bp, AccessControls ac, bool may_collide)
: binding_points(std::move(bp)),
access_controls(std::move(ac)),
allow_collisions(may_collide) {}
@@ -42,120 +40,112 @@ BindingRemapper::BindingRemapper() = default;
BindingRemapper::~BindingRemapper() = default;
bool BindingRemapper::ShouldRun(const Program*, const DataMap& inputs) const {
- if (auto* remappings = inputs.Get<Remappings>()) {
- return !remappings->binding_points.empty() ||
- !remappings->access_controls.empty();
- }
- return false;
+ if (auto* remappings = inputs.Get<Remappings>()) {
+ return !remappings->binding_points.empty() || !remappings->access_controls.empty();
+ }
+ return false;
}
-void BindingRemapper::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* remappings = inputs.Get<Remappings>();
- if (!remappings) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
- return;
- }
-
- // A set of post-remapped binding points that need to be decorated with a
- // DisableValidationAttribute to disable binding-point-collision validation
- std::unordered_set<sem::BindingPoint> add_collision_attr;
-
- if (remappings->allow_collisions) {
- // Scan for binding point collisions generated by this transform.
- // Populate all collisions in the `add_collision_attr` set.
- for (auto* func_ast : ctx.src->AST().Functions()) {
- if (!func_ast->IsEntryPoint()) {
- continue;
- }
- auto* func = ctx.src->Sem().Get(func_ast);
- std::unordered_map<sem::BindingPoint, int> binding_point_counts;
- for (auto* var : func->TransitivelyReferencedGlobals()) {
- if (auto binding_point = var->Declaration()->BindingPoint()) {
- BindingPoint from{binding_point.group->value,
- binding_point.binding->value};
- auto bp_it = remappings->binding_points.find(from);
- if (bp_it != remappings->binding_points.end()) {
- // Remapped
- BindingPoint to = bp_it->second;
- if (binding_point_counts[to]++) {
- add_collision_attr.emplace(to);
+void BindingRemapper::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* remappings = inputs.Get<Remappings>();
+ if (!remappings) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+ return;
+ }
+
+ // A set of post-remapped binding points that need to be decorated with a
+ // DisableValidationAttribute to disable binding-point-collision validation
+ std::unordered_set<sem::BindingPoint> add_collision_attr;
+
+ if (remappings->allow_collisions) {
+ // Scan for binding point collisions generated by this transform.
+ // Populate all collisions in the `add_collision_attr` set.
+ for (auto* func_ast : ctx.src->AST().Functions()) {
+ if (!func_ast->IsEntryPoint()) {
+ continue;
}
- } else {
- // No remapping
- if (binding_point_counts[from]++) {
- add_collision_attr.emplace(from);
+ auto* func = ctx.src->Sem().Get(func_ast);
+ std::unordered_map<sem::BindingPoint, int> binding_point_counts;
+ for (auto* var : func->TransitivelyReferencedGlobals()) {
+ if (auto binding_point = var->Declaration()->BindingPoint()) {
+ BindingPoint from{binding_point.group->value, binding_point.binding->value};
+ auto bp_it = remappings->binding_points.find(from);
+ if (bp_it != remappings->binding_points.end()) {
+ // Remapped
+ BindingPoint to = bp_it->second;
+ if (binding_point_counts[to]++) {
+ add_collision_attr.emplace(to);
+ }
+ } else {
+ // No remapping
+ if (binding_point_counts[from]++) {
+ add_collision_attr.emplace(from);
+ }
+ }
+ }
}
- }
}
- }
}
- }
-
- for (auto* var : ctx.src->AST().GlobalVariables()) {
- if (auto binding_point = var->BindingPoint()) {
- // The original binding point
- BindingPoint from{binding_point.group->value,
- binding_point.binding->value};
-
- // The binding point after remapping
- BindingPoint bp = from;
-
- // Replace any group or binding attributes.
- // Note: This has to be performed *before* remapping access controls, as
- // `ctx.Clone(var->attributes)` depend on these replacements.
- auto bp_it = remappings->binding_points.find(from);
- if (bp_it != remappings->binding_points.end()) {
- BindingPoint to = bp_it->second;
- auto* new_group = ctx.dst->create<ast::GroupAttribute>(to.group);
- auto* new_binding = ctx.dst->create<ast::BindingAttribute>(to.binding);
-
- ctx.Replace(binding_point.group, new_group);
- ctx.Replace(binding_point.binding, new_binding);
- bp = to;
- }
-
- // Replace any access controls.
- auto ac_it = remappings->access_controls.find(from);
- if (ac_it != remappings->access_controls.end()) {
- ast::Access ac = ac_it->second;
- if (ac > ast::Access::kLastValid) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "invalid access mode (" +
- std::to_string(static_cast<uint32_t>(ac)) + ")");
- return;
- }
- auto* sem = ctx.src->Sem().Get(var);
- if (sem->StorageClass() != ast::StorageClass::kStorage) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "cannot apply access control to variable with storage class " +
- std::string(ast::ToString(sem->StorageClass())));
- return;
+
+ for (auto* var : ctx.src->AST().GlobalVariables()) {
+ if (auto binding_point = var->BindingPoint()) {
+ // The original binding point
+ BindingPoint from{binding_point.group->value, binding_point.binding->value};
+
+ // The binding point after remapping
+ BindingPoint bp = from;
+
+ // Replace any group or binding attributes.
+ // Note: This has to be performed *before* remapping access controls, as
+ // `ctx.Clone(var->attributes)` depend on these replacements.
+ auto bp_it = remappings->binding_points.find(from);
+ if (bp_it != remappings->binding_points.end()) {
+ BindingPoint to = bp_it->second;
+ auto* new_group = ctx.dst->create<ast::GroupAttribute>(to.group);
+ auto* new_binding = ctx.dst->create<ast::BindingAttribute>(to.binding);
+
+ ctx.Replace(binding_point.group, new_group);
+ ctx.Replace(binding_point.binding, new_binding);
+ bp = to;
+ }
+
+ // Replace any access controls.
+ auto ac_it = remappings->access_controls.find(from);
+ if (ac_it != remappings->access_controls.end()) {
+ ast::Access ac = ac_it->second;
+ if (ac > ast::Access::kLastValid) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform,
+ "invalid access mode (" + std::to_string(static_cast<uint32_t>(ac)) + ")");
+ return;
+ }
+ auto* sem = ctx.src->Sem().Get(var);
+ if (sem->StorageClass() != ast::StorageClass::kStorage) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform,
+ "cannot apply access control to variable with storage class " +
+ std::string(ast::ToString(sem->StorageClass())));
+ return;
+ }
+ auto* ty = sem->Type()->UnwrapRef();
+ const ast::Type* inner_ty = CreateASTTypeFor(ctx, ty);
+ auto* new_var = ctx.dst->create<ast::Variable>(
+ ctx.Clone(var->source), ctx.Clone(var->symbol), var->declared_storage_class, ac,
+ inner_ty, false, false, ctx.Clone(var->constructor),
+ ctx.Clone(var->attributes));
+ ctx.Replace(var, new_var);
+ }
+
+ // Add `DisableValidationAttribute`s if required
+ if (add_collision_attr.count(bp)) {
+ auto* attribute = ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision);
+ ctx.InsertBefore(var->attributes, *var->attributes.begin(), attribute);
+ }
}
- auto* ty = sem->Type()->UnwrapRef();
- const ast::Type* inner_ty = CreateASTTypeFor(ctx, ty);
- auto* new_var = ctx.dst->create<ast::Variable>(
- ctx.Clone(var->source), ctx.Clone(var->symbol),
- var->declared_storage_class, ac, inner_ty, false, false,
- ctx.Clone(var->constructor), ctx.Clone(var->attributes));
- ctx.Replace(var, new_var);
- }
-
- // Add `DisableValidationAttribute`s if required
- if (add_collision_attr.count(bp)) {
- auto* attribute =
- ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision);
- ctx.InsertBefore(var->attributes, *var->attributes.begin(), attribute);
- }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/binding_remapper.h b/chromium/third_party/dawn/src/tint/transform/binding_remapper.h
index 3e9f613470a..77fc5bce87d 100644
--- a/chromium/third_party/dawn/src/tint/transform/binding_remapper.h
+++ b/chromium/third_party/dawn/src/tint/transform/binding_remapper.h
@@ -29,60 +29,57 @@ using BindingPoint = sem::BindingPoint;
/// BindingRemapper is a transform used to remap resource binding points and
/// access controls.
class BindingRemapper final : public Castable<BindingRemapper, Transform> {
- public:
- /// BindingPoints is a map of old binding point to new binding point
- using BindingPoints = std::unordered_map<BindingPoint, BindingPoint>;
+ public:
+ /// BindingPoints is a map of old binding point to new binding point
+ using BindingPoints = std::unordered_map<BindingPoint, BindingPoint>;
- /// AccessControls is a map of old binding point to new access control
- using AccessControls = std::unordered_map<BindingPoint, ast::Access>;
+ /// AccessControls is a map of old binding point to new access control
+ using AccessControls = std::unordered_map<BindingPoint, ast::Access>;
+
+ /// Remappings is consumed by the BindingRemapper transform.
+ /// Data holds information about shader usage and constant buffer offsets.
+ struct Remappings final : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param bp a map of new binding points
+ /// @param ac a map of new access controls
+ /// @param may_collide If true, then validation will be disabled for
+ /// binding point collisions generated by this transform
+ Remappings(BindingPoints bp, AccessControls ac, bool may_collide = true);
+
+ /// Copy constructor
+ Remappings(const Remappings&);
+
+ /// Destructor
+ ~Remappings() override;
+
+ /// A map of old binding point to new binding point
+ const BindingPoints binding_points;
+
+ /// A map of old binding point to new access controls
+ const AccessControls access_controls;
+
+ /// If true, then validation will be disabled for binding point collisions
+ /// generated by this transform
+ const bool allow_collisions;
+ };
- /// Remappings is consumed by the BindingRemapper transform.
- /// Data holds information about shader usage and constant buffer offsets.
- struct Remappings final : public Castable<Data, transform::Data> {
/// Constructor
- /// @param bp a map of new binding points
- /// @param ac a map of new access controls
- /// @param may_collide If true, then validation will be disabled for
- /// binding point collisions generated by this transform
- Remappings(BindingPoints bp, AccessControls ac, bool may_collide = true);
-
- /// Copy constructor
- Remappings(const Remappings&);
-
- /// Destructor
- ~Remappings() override;
-
- /// A map of old binding point to new binding point
- const BindingPoints binding_points;
-
- /// A map of old binding point to new access controls
- const AccessControls access_controls;
-
- /// If true, then validation will be disabled for binding point collisions
- /// generated by this transform
- const bool allow_collisions;
- };
-
- /// Constructor
- BindingRemapper();
- ~BindingRemapper() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ BindingRemapper();
+ ~BindingRemapper() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/binding_remapper_test.cc b/chromium/third_party/dawn/src/tint/transform/binding_remapper_test.cc
index 70c723216bb..32748866081 100644
--- a/chromium/third_party/dawn/src/tint/transform/binding_remapper_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/binding_remapper_test.cc
@@ -24,48 +24,48 @@ namespace {
using BindingRemapperTest = TransformTest;
TEST_F(BindingRemapperTest, ShouldRunNoRemappings) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<BindingRemapper>(src));
+ EXPECT_FALSE(ShouldRun<BindingRemapper>(src));
}
TEST_F(BindingRemapperTest, ShouldRunEmptyRemappings) {
- auto* src = R"()";
+ auto* src = R"()";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
- BindingRemapper::AccessControls{});
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
+ BindingRemapper::AccessControls{});
- EXPECT_FALSE(ShouldRun<BindingRemapper>(src, data));
+ EXPECT_FALSE(ShouldRun<BindingRemapper>(src, data));
}
TEST_F(BindingRemapperTest, ShouldRunBindingPointRemappings) {
- auto* src = R"()";
+ auto* src = R"()";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{
- {{2, 1}, {1, 2}},
- },
- BindingRemapper::AccessControls{});
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{
+ {{2, 1}, {1, 2}},
+ },
+ BindingRemapper::AccessControls{});
- EXPECT_TRUE(ShouldRun<BindingRemapper>(src, data));
+ EXPECT_TRUE(ShouldRun<BindingRemapper>(src, data));
}
TEST_F(BindingRemapperTest, ShouldRunAccessControlRemappings) {
- auto* src = R"()";
+ auto* src = R"()";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
- BindingRemapper::AccessControls{
- {{2, 1}, ast::Access::kWrite},
- });
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
+ BindingRemapper::AccessControls{
+ {{2, 1}, ast::Access::kWrite},
+ });
- EXPECT_TRUE(ShouldRun<BindingRemapper>(src, data));
+ EXPECT_TRUE(ShouldRun<BindingRemapper>(src, data));
}
TEST_F(BindingRemapperTest, NoRemappings) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
}
@@ -74,23 +74,23 @@ struct S {
@group(3) @binding(2) var<storage, read> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
- BindingRemapper::AccessControls{});
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(BindingRemapper::BindingPoints{},
+ BindingRemapper::AccessControls{});
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, RemapBindingPoints) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -99,12 +99,12 @@ struct S {
@group(3) @binding(2) var<storage, read> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
@@ -113,26 +113,26 @@ struct S {
@group(3) @binding(2) var<storage, read> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{
- {{2, 1}, {1, 2}}, // Remap
- {{4, 5}, {6, 7}}, // Not found
- // Keep @group(3) @binding(2) as is
- },
- BindingRemapper::AccessControls{});
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{
+ {{2, 1}, {1, 2}}, // Remap
+ {{4, 5}, {6, 7}}, // Not found
+ // Keep @group(3) @binding(2) as is
+ },
+ BindingRemapper::AccessControls{});
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, RemapAccessControls) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -143,12 +143,12 @@ struct S {
@group(4) @binding(3) var<storage, read> c : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
@@ -159,26 +159,26 @@ struct S {
@group(4) @binding(3) var<storage, read> c : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{},
- BindingRemapper::AccessControls{
- {{2, 1}, ast::Access::kWrite}, // Modify access control
- // Keep @group(3) @binding(2) as is
- {{4, 3}, ast::Access::kRead}, // Add access control
- });
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{},
+ BindingRemapper::AccessControls{
+ {{2, 1}, ast::Access::kWrite}, // Modify access control
+ // Keep @group(3) @binding(2) as is
+ {{4, 3}, ast::Access::kRead}, // Add access control
+ });
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, RemapAll) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -187,12 +187,12 @@ struct S {
@group(3) @binding(2) var<storage, read> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
@@ -201,28 +201,28 @@ struct S {
@group(6) @binding(7) var<storage, write> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{
- {{2, 1}, {4, 5}},
- {{3, 2}, {6, 7}},
- },
- BindingRemapper::AccessControls{
- {{2, 1}, ast::Access::kWrite},
- {{3, 2}, ast::Access::kWrite},
- });
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{
+ {{2, 1}, {4, 5}},
+ {{3, 2}, {6, 7}},
+ },
+ BindingRemapper::AccessControls{
+ {{2, 1}, ast::Access::kWrite},
+ {{3, 2}, ast::Access::kWrite},
+ });
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, BindingCollisionsSameEntryPoint) {
- auto* src = R"(
+ auto* src = R"(
struct S {
i : i32,
};
@@ -235,13 +235,13 @@ struct S {
@group(5) @binding(4) var<storage, read> d : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
let x : i32 = (((a.i + b.i) + c.i) + d.i);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
i : i32,
}
@@ -254,27 +254,27 @@ struct S {
@internal(disable_validation__binding_point_collision) @group(5) @binding(4) var<storage, read> d : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
let x : i32 = (((a.i + b.i) + c.i) + d.i);
}
)";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{
- {{2, 1}, {1, 1}},
- {{3, 2}, {1, 1}},
- {{4, 3}, {5, 4}},
- },
- BindingRemapper::AccessControls{}, true);
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{
+ {{2, 1}, {1, 1}},
+ {{3, 2}, {1, 1}},
+ {{4, 3}, {5, 4}},
+ },
+ BindingRemapper::AccessControls{}, true);
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, BindingCollisionsDifferentEntryPoints) {
- auto* src = R"(
+ auto* src = R"(
struct S {
i : i32,
};
@@ -287,18 +287,18 @@ struct S {
@group(5) @binding(4) var<storage, read> d : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f1() {
let x : i32 = (a.i + c.i);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f2() {
let x : i32 = (b.i + d.i);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
i : i32,
}
@@ -311,32 +311,32 @@ struct S {
@group(5) @binding(4) var<storage, read> d : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f1() {
let x : i32 = (a.i + c.i);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f2() {
let x : i32 = (b.i + d.i);
}
)";
- DataMap data;
- data.Add<BindingRemapper::Remappings>(
- BindingRemapper::BindingPoints{
- {{2, 1}, {1, 1}},
- {{3, 2}, {1, 1}},
- {{4, 3}, {5, 4}},
- },
- BindingRemapper::AccessControls{}, true);
- auto got = Run<BindingRemapper>(src, data);
+ DataMap data;
+ data.Add<BindingRemapper::Remappings>(
+ BindingRemapper::BindingPoints{
+ {{2, 1}, {1, 1}},
+ {{3, 2}, {1, 1}},
+ {{4, 3}, {5, 4}},
+ },
+ BindingRemapper::AccessControls{}, true);
+ auto got = Run<BindingRemapper>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BindingRemapperTest, NoData) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
}
@@ -345,16 +345,16 @@ struct S {
@group(3) @binding(2) var<storage, read> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<BindingRemapper>(src);
+ auto got = Run<BindingRemapper>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.cc b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.cc
index c33d40fe5a5..9bde1cc7792 100644
--- a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.cc
+++ b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.cc
@@ -21,6 +21,8 @@
#include "src/tint/sem/call.h"
#include "src/tint/utils/map.h"
+using namespace tint::number_suffixes; // NOLINT
+
TINT_INSTANTIATE_TYPEINFO(tint::transform::BuiltinPolyfill);
TINT_INSTANTIATE_TYPEINFO(tint::transform::BuiltinPolyfill::Config);
@@ -28,569 +30,526 @@ namespace tint::transform {
/// The PIMPL state for the BuiltinPolyfill transform
struct BuiltinPolyfill::State {
- /// Constructor
- /// @param c the CloneContext
- /// @param p the builtins to polyfill
- State(CloneContext& c, Builtins p) : ctx(c), polyfill(p) {}
-
- /// The clone context
- CloneContext& ctx;
- /// The builtins to polyfill
- Builtins polyfill;
- /// The destination program builder
- ProgramBuilder& b = *ctx.dst;
- /// The source clone context
- const sem::Info& sem = ctx.src->Sem();
-
- /// Builds the polyfill function for the `countLeadingZeros` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol countLeadingZeros(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_count_leading_zeros");
- uint32_t width = WidthOf(ty);
-
- // Returns either u32 or vecN<u32>
- auto U = [&]() -> const ast::Type* {
- if (width == 1) {
- return b.ty.u32();
- }
- return b.ty.vec<u32>(width);
- };
- auto V = [&](uint32_t value) -> const ast::Expression* {
- return ScalarOrVector(width, value);
- };
- b.Func(
- name, {b.Param("v", T(ty))}, T(ty),
- {
- // var x = U(v);
- b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
- // let b16 = select(0, 16, x <= 0x0000ffff);
- b.Decl(b.Const("b16", nullptr,
- b.Call("select", V(0), V(16),
- b.LessThanEqual("x", V(0x0000ffff))))),
- // x = x << b16;
- b.Assign("x", b.Shl("x", "b16")),
- // let b8 = select(0, 8, x <= 0x00ffffff);
- b.Decl(b.Const("b8", nullptr,
- b.Call("select", V(0), V(8),
- b.LessThanEqual("x", V(0x00ffffff))))),
- // x = x << b8;
- b.Assign("x", b.Shl("x", "b8")),
- // let b4 = select(0, 4, x <= 0x0fffffff);
- b.Decl(b.Const("b4", nullptr,
- b.Call("select", V(0), V(4),
- b.LessThanEqual("x", V(0x0fffffff))))),
- // x = x << b4;
- b.Assign("x", b.Shl("x", "b4")),
- // let b2 = select(0, 2, x <= 0x3fffffff);
- b.Decl(b.Const("b2", nullptr,
- b.Call("select", V(0), V(2),
- b.LessThanEqual("x", V(0x3fffffff))))),
- // x = x << b2;
- b.Assign("x", b.Shl("x", "b2")),
- // let b1 = select(0, 1, x <= 0x7fffffff);
- b.Decl(b.Const("b1", nullptr,
- b.Call("select", V(0), V(1),
- b.LessThanEqual("x", V(0x7fffffff))))),
- // let is_zero = select(0, 1, x == 0);
- b.Decl(b.Const("is_zero", nullptr,
- b.Call("select", V(0), V(1), b.Equal("x", V(0))))),
- // return R((b16 | b8 | b4 | b2 | b1) + zero);
- b.Return(b.Construct(
- T(ty),
- b.Add(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"),
- "is_zero"))),
- });
- return name;
- }
-
- /// Builds the polyfill function for the `countTrailingZeros` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol countTrailingZeros(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_count_trailing_zeros");
- uint32_t width = WidthOf(ty);
-
- // Returns either u32 or vecN<u32>
- auto U = [&]() -> const ast::Type* {
- if (width == 1) {
- return b.ty.u32();
- }
- return b.ty.vec<u32>(width);
- };
- auto V = [&](uint32_t value) -> const ast::Expression* {
- return ScalarOrVector(width, value);
- };
- auto B = [&](const ast::Expression* value) -> const ast::Expression* {
- if (width == 1) {
- return b.Construct<bool>(value);
- }
- return b.Construct(b.ty.vec<bool>(width), value);
- };
- b.Func(
- name, {b.Param("v", T(ty))}, T(ty),
- {
- // var x = U(v);
- b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
- // let b16 = select(16, 0, bool(x & 0x0000ffff));
- b.Decl(b.Const(
- "b16", nullptr,
- b.Call("select", V(16), V(0), B(b.And("x", V(0x0000ffff)))))),
- // x = x >> b16;
- b.Assign("x", b.Shr("x", "b16")),
- // let b8 = select(8, 0, bool(x & 0x000000ff));
- b.Decl(b.Const(
- "b8", nullptr,
- b.Call("select", V(8), V(0), B(b.And("x", V(0x000000ff)))))),
- // x = x >> b8;
- b.Assign("x", b.Shr("x", "b8")),
- // let b4 = select(4, 0, bool(x & 0x0000000f));
- b.Decl(b.Const(
- "b4", nullptr,
- b.Call("select", V(4), V(0), B(b.And("x", V(0x0000000f)))))),
- // x = x >> b4;
- b.Assign("x", b.Shr("x", "b4")),
- // let b2 = select(2, 0, bool(x & 0x00000003));
- b.Decl(b.Const(
- "b2", nullptr,
- b.Call("select", V(2), V(0), B(b.And("x", V(0x00000003)))))),
- // x = x >> b2;
- b.Assign("x", b.Shr("x", "b2")),
- // let b1 = select(1, 0, bool(x & 0x00000001));
- b.Decl(b.Const(
- "b1", nullptr,
- b.Call("select", V(1), V(0), B(b.And("x", V(0x00000001)))))),
- // let is_zero = select(0, 1, x == 0);
- b.Decl(b.Const("is_zero", nullptr,
- b.Call("select", V(0), V(1), b.Equal("x", V(0))))),
- // return R((b16 | b8 | b4 | b2 | b1) + zero);
- b.Return(b.Construct(
- T(ty),
- b.Add(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"),
- "is_zero"))),
- });
- return name;
- }
-
- /// Builds the polyfill function for the `extractBits` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol extractBits(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_extract_bits");
- uint32_t width = WidthOf(ty);
-
- constexpr uint32_t W = 32u; // 32-bit
-
- auto vecN_u32 =
- [&](const ast::Expression* value) -> const ast::Expression* {
- if (width == 1) {
- return value;
- }
- return b.Construct(b.ty.vec<u32>(width), value);
- };
-
- ast::StatementList body = {
- b.Decl(b.Const("s", nullptr, b.Call("min", "offset", W))),
- b.Decl(b.Const("e", nullptr, b.Call("min", W, b.Add("s", "count")))),
- };
-
- switch (polyfill.extract_bits) {
- case Level::kFull:
- body.emplace_back(b.Decl(b.Const("shl", nullptr, b.Sub(W, "e"))));
- body.emplace_back(b.Decl(b.Const("shr", nullptr, b.Add("shl", "s"))));
- body.emplace_back(b.Return(b.Shr(b.Shl("v", vecN_u32(b.Expr("shl"))),
- vecN_u32(b.Expr("shr")))));
- break;
- case Level::kClampParameters:
- body.emplace_back(
- b.Return(b.Call("extractBits", "v", "s", b.Sub("e", "s"))));
- break;
- default:
- TINT_ICE(Transform, b.Diagnostics())
- << "unhandled polyfill level: "
- << static_cast<int>(polyfill.extract_bits);
- return {};
+ /// Constructor
+ /// @param c the CloneContext
+ /// @param p the builtins to polyfill
+ State(CloneContext& c, Builtins p) : ctx(c), polyfill(p) {}
+
+ /// The clone context
+ CloneContext& ctx;
+ /// The builtins to polyfill
+ Builtins polyfill;
+ /// The destination program builder
+ ProgramBuilder& b = *ctx.dst;
+ /// The source clone context
+ const sem::Info& sem = ctx.src->Sem();
+
+ /// Builds the polyfill function for the `countLeadingZeros` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol countLeadingZeros(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_count_leading_zeros");
+ uint32_t width = WidthOf(ty);
+
+ // Returns either u32 or vecN<u32>
+ auto U = [&]() -> const ast::Type* {
+ if (width == 1) {
+ return b.ty.u32();
+ }
+ return b.ty.vec<u32>(width);
+ };
+ auto V = [&](uint32_t value) -> const ast::Expression* {
+ return ScalarOrVector(width, u32(value));
+ };
+ b.Func(
+ name, {b.Param("v", T(ty))}, T(ty),
+ {
+ // var x = U(v);
+ b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
+ // let b16 = select(0, 16, x <= 0x0000ffff);
+ b.Decl(b.Let("b16", nullptr,
+ b.Call("select", V(0), V(16), b.LessThanEqual("x", V(0x0000ffff))))),
+ // x = x << b16;
+ b.Assign("x", b.Shl("x", "b16")),
+ // let b8 = select(0, 8, x <= 0x00ffffff);
+ b.Decl(b.Let("b8", nullptr,
+ b.Call("select", V(0), V(8), b.LessThanEqual("x", V(0x00ffffff))))),
+ // x = x << b8;
+ b.Assign("x", b.Shl("x", "b8")),
+ // let b4 = select(0, 4, x <= 0x0fffffff);
+ b.Decl(b.Let("b4", nullptr,
+ b.Call("select", V(0), V(4), b.LessThanEqual("x", V(0x0fffffff))))),
+ // x = x << b4;
+ b.Assign("x", b.Shl("x", "b4")),
+ // let b2 = select(0, 2, x <= 0x3fffffff);
+ b.Decl(b.Let("b2", nullptr,
+ b.Call("select", V(0), V(2), b.LessThanEqual("x", V(0x3fffffff))))),
+ // x = x << b2;
+ b.Assign("x", b.Shl("x", "b2")),
+ // let b1 = select(0, 1, x <= 0x7fffffff);
+ b.Decl(b.Let("b1", nullptr,
+ b.Call("select", V(0), V(1), b.LessThanEqual("x", V(0x7fffffff))))),
+ // let is_zero = select(0, 1, x == 0);
+ b.Decl(b.Let("is_zero", nullptr, b.Call("select", V(0), V(1), b.Equal("x", V(0))))),
+ // return R((b16 | b8 | b4 | b2 | b1) + zero);
+ b.Return(b.Construct(
+ T(ty),
+ b.Add(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"), "is_zero"))),
+ });
+ return name;
}
- b.Func(name,
- {
- b.Param("v", T(ty)),
- b.Param("offset", b.ty.u32()),
- b.Param("count", b.ty.u32()),
- },
- T(ty), body);
-
- return name;
- }
-
- /// Builds the polyfill function for the `firstLeadingBit` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol firstLeadingBit(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_first_leading_bit");
- uint32_t width = WidthOf(ty);
-
- // Returns either u32 or vecN<u32>
- auto U = [&]() -> const ast::Type* {
- if (width == 1) {
- return b.ty.u32();
- }
- return b.ty.vec<u32>(width);
- };
- auto V = [&](uint32_t value) -> const ast::Expression* {
- return ScalarOrVector(width, value);
- };
- auto B = [&](const ast::Expression* value) -> const ast::Expression* {
- if (width == 1) {
- return b.Construct<bool>(value);
- }
- return b.Construct(b.ty.vec<bool>(width), value);
- };
-
- const ast::Expression* x = nullptr;
- if (ty->is_unsigned_scalar_or_vector()) {
- x = b.Expr("v");
- } else {
- // If ty is signed, then the value is inverted if the sign is negative
- x = b.Call("select", //
- b.Construct(U(), "v"), //
- b.Construct(U(), b.Complement("v")), //
- b.LessThan("v", ScalarOrVector(width, 0)));
+ /// Builds the polyfill function for the `countTrailingZeros` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol countTrailingZeros(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_count_trailing_zeros");
+ uint32_t width = WidthOf(ty);
+
+ // Returns either u32 or vecN<u32>
+ auto U = [&]() -> const ast::Type* {
+ if (width == 1) {
+ return b.ty.u32();
+ }
+ return b.ty.vec<u32>(width);
+ };
+ auto V = [&](uint32_t value) -> const ast::Expression* {
+ return ScalarOrVector(width, u32(value));
+ };
+ auto B = [&](const ast::Expression* value) -> const ast::Expression* {
+ if (width == 1) {
+ return b.Construct<bool>(value);
+ }
+ return b.Construct(b.ty.vec<bool>(width), value);
+ };
+ b.Func(
+ name, {b.Param("v", T(ty))}, T(ty),
+ {
+ // var x = U(v);
+ b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
+ // let b16 = select(16, 0, bool(x & 0x0000ffff));
+ b.Decl(b.Let("b16", nullptr,
+ b.Call("select", V(16), V(0), B(b.And("x", V(0x0000ffff)))))),
+ // x = x >> b16;
+ b.Assign("x", b.Shr("x", "b16")),
+ // let b8 = select(8, 0, bool(x & 0x000000ff));
+ b.Decl(b.Let("b8", nullptr,
+ b.Call("select", V(8), V(0), B(b.And("x", V(0x000000ff)))))),
+ // x = x >> b8;
+ b.Assign("x", b.Shr("x", "b8")),
+ // let b4 = select(4, 0, bool(x & 0x0000000f));
+ b.Decl(b.Let("b4", nullptr,
+ b.Call("select", V(4), V(0), B(b.And("x", V(0x0000000f)))))),
+ // x = x >> b4;
+ b.Assign("x", b.Shr("x", "b4")),
+ // let b2 = select(2, 0, bool(x & 0x00000003));
+ b.Decl(b.Let("b2", nullptr,
+ b.Call("select", V(2), V(0), B(b.And("x", V(0x00000003)))))),
+ // x = x >> b2;
+ b.Assign("x", b.Shr("x", "b2")),
+ // let b1 = select(1, 0, bool(x & 0x00000001));
+ b.Decl(b.Let("b1", nullptr,
+ b.Call("select", V(1), V(0), B(b.And("x", V(0x00000001)))))),
+ // let is_zero = select(0, 1, x == 0);
+ b.Decl(b.Let("is_zero", nullptr, b.Call("select", V(0), V(1), b.Equal("x", V(0))))),
+ // return R((b16 | b8 | b4 | b2 | b1) + zero);
+ b.Return(b.Construct(
+ T(ty),
+ b.Add(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"), "is_zero"))),
+ });
+ return name;
}
- b.Func(name, {b.Param("v", T(ty))}, T(ty),
- {
- // var x = v; (unsigned)
- // var x = select(U(v), ~U(v), v < 0); (signed)
- b.Decl(b.Var("x", nullptr, x)),
- // let b16 = select(0, 16, bool(x & 0xffff0000));
- b.Decl(b.Const("b16", nullptr,
- b.Call("select", V(0), V(16),
- B(b.And("x", V(0xffff0000)))))),
- // x = x >> b16;
- b.Assign("x", b.Shr("x", "b16")),
- // let b8 = select(0, 8, bool(x & 0x0000ff00));
- b.Decl(b.Const(
- "b8", nullptr,
- b.Call("select", V(0), V(8), B(b.And("x", V(0x0000ff00)))))),
- // x = x >> b8;
- b.Assign("x", b.Shr("x", "b8")),
- // let b4 = select(0, 4, bool(x & 0x000000f0));
- b.Decl(b.Const(
- "b4", nullptr,
- b.Call("select", V(0), V(4), B(b.And("x", V(0x000000f0)))))),
- // x = x >> b4;
- b.Assign("x", b.Shr("x", "b4")),
- // let b2 = select(0, 2, bool(x & 0x0000000c));
- b.Decl(b.Const(
- "b2", nullptr,
- b.Call("select", V(0), V(2), B(b.And("x", V(0x0000000c)))))),
- // x = x >> b2;
- b.Assign("x", b.Shr("x", "b2")),
- // let b1 = select(0, 1, bool(x & 0x00000002));
- b.Decl(b.Const(
- "b1", nullptr,
- b.Call("select", V(0), V(1), B(b.And("x", V(0x00000002)))))),
- // let is_zero = select(0, 0xffffffff, x == 0);
- b.Decl(b.Const("is_zero", nullptr,
- b.Call("select", V(0), V(0xffffffff),
- b.Equal("x", V(0))))),
- // return R(b16 | b8 | b4 | b2 | b1 | zero);
- b.Return(b.Construct(
- T(ty),
- b.Or(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"),
- "is_zero"))),
- });
- return name;
- }
-
- /// Builds the polyfill function for the `firstTrailingBit` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol firstTrailingBit(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_first_trailing_bit");
- uint32_t width = WidthOf(ty);
-
- // Returns either u32 or vecN<u32>
- auto U = [&]() -> const ast::Type* {
- if (width == 1) {
- return b.ty.u32();
- }
- return b.ty.vec<u32>(width);
- };
- auto V = [&](uint32_t value) -> const ast::Expression* {
- return ScalarOrVector(width, value);
- };
- auto B = [&](const ast::Expression* value) -> const ast::Expression* {
- if (width == 1) {
- return b.Construct<bool>(value);
- }
- return b.Construct(b.ty.vec<bool>(width), value);
- };
- b.Func(name, {b.Param("v", T(ty))}, T(ty),
- {
- // var x = U(v);
- b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
- // let b16 = select(16, 0, bool(x & 0x0000ffff));
- b.Decl(b.Const("b16", nullptr,
- b.Call("select", V(16), V(0),
- B(b.And("x", V(0x0000ffff)))))),
- // x = x >> b16;
- b.Assign("x", b.Shr("x", "b16")),
- // let b8 = select(8, 0, bool(x & 0x000000ff));
- b.Decl(b.Const(
- "b8", nullptr,
- b.Call("select", V(8), V(0), B(b.And("x", V(0x000000ff)))))),
- // x = x >> b8;
- b.Assign("x", b.Shr("x", "b8")),
- // let b4 = select(4, 0, bool(x & 0x0000000f));
- b.Decl(b.Const(
- "b4", nullptr,
- b.Call("select", V(4), V(0), B(b.And("x", V(0x0000000f)))))),
- // x = x >> b4;
- b.Assign("x", b.Shr("x", "b4")),
- // let b2 = select(2, 0, bool(x & 0x00000003));
- b.Decl(b.Const(
- "b2", nullptr,
- b.Call("select", V(2), V(0), B(b.And("x", V(0x00000003)))))),
- // x = x >> b2;
- b.Assign("x", b.Shr("x", "b2")),
- // let b1 = select(1, 0, bool(x & 0x00000001));
- b.Decl(b.Const(
- "b1", nullptr,
- b.Call("select", V(1), V(0), B(b.And("x", V(0x00000001)))))),
- // let is_zero = select(0, 0xffffffff, x == 0);
- b.Decl(b.Const("is_zero", nullptr,
- b.Call("select", V(0), V(0xffffffff),
- b.Equal("x", V(0))))),
- // return R(b16 | b8 | b4 | b2 | b1 | is_zero);
- b.Return(b.Construct(
- T(ty),
- b.Or(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"),
- "is_zero"))),
- });
- return name;
- }
-
- /// Builds the polyfill function for the `insertBits` builtin
- /// @param ty the parameter and return type for the function
- /// @return the polyfill function name
- Symbol insertBits(const sem::Type* ty) {
- auto name = b.Symbols().New("tint_insert_bits");
- uint32_t width = WidthOf(ty);
-
- constexpr uint32_t W = 32u; // 32-bit
-
- auto V = [&](auto value) -> const ast::Expression* {
- const ast::Expression* expr = b.Expr(value);
- if (!ty->is_unsigned_scalar_or_vector()) {
- expr = b.Construct<i32>(expr);
- }
- if (ty->Is<sem::Vector>()) {
- expr = b.Construct(T(ty), expr);
- }
- return expr;
- };
- auto U = [&](auto value) -> const ast::Expression* {
- if (width == 1) {
- return b.Expr(value);
- }
- return b.vec(b.ty.u32(), width, value);
- };
-
- ast::StatementList body = {
- b.Decl(b.Const("s", nullptr, b.Call("min", "offset", W))),
- b.Decl(b.Const("e", nullptr, b.Call("min", W, b.Add("s", "count")))),
- };
-
- switch (polyfill.insert_bits) {
- case Level::kFull:
- // let mask = ((1 << s) - 1) ^ ((1 << e) - 1)
- body.emplace_back(b.Decl(b.Const(
- "mask", nullptr,
- b.Xor(b.Sub(b.Shl(1u, "s"), 1u), b.Sub(b.Shl(1u, "e"), 1u)))));
- // return ((n << s) & mask) | (v & ~mask)
- body.emplace_back(b.Return(b.Or(b.And(b.Shl("n", U("s")), V("mask")),
- b.And("v", V(b.Complement("mask"))))));
- break;
- case Level::kClampParameters:
- body.emplace_back(
- b.Return(b.Call("insertBits", "v", "n", "s", b.Sub("e", "s"))));
- break;
- default:
- TINT_ICE(Transform, b.Diagnostics())
- << "unhandled polyfill level: "
- << static_cast<int>(polyfill.insert_bits);
- return {};
+ /// Builds the polyfill function for the `extractBits` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol extractBits(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_extract_bits");
+ uint32_t width = WidthOf(ty);
+
+ constexpr uint32_t W = 32u; // 32-bit
+
+ auto vecN_u32 = [&](const ast::Expression* value) -> const ast::Expression* {
+ if (width == 1) {
+ return value;
+ }
+ return b.Construct(b.ty.vec<u32>(width), value);
+ };
+
+ ast::StatementList body = {
+ b.Decl(b.Let("s", nullptr, b.Call("min", "offset", u32(W)))),
+ b.Decl(b.Let("e", nullptr, b.Call("min", u32(W), b.Add("s", "count")))),
+ };
+
+ switch (polyfill.extract_bits) {
+ case Level::kFull:
+ body.emplace_back(b.Decl(b.Let("shl", nullptr, b.Sub(u32(W), "e"))));
+ body.emplace_back(b.Decl(b.Let("shr", nullptr, b.Add("shl", "s"))));
+ body.emplace_back(
+ b.Return(b.Shr(b.Shl("v", vecN_u32(b.Expr("shl"))), vecN_u32(b.Expr("shr")))));
+ break;
+ case Level::kClampParameters:
+ body.emplace_back(b.Return(b.Call("extractBits", "v", "s", b.Sub("e", "s"))));
+ break;
+ default:
+ TINT_ICE(Transform, b.Diagnostics())
+ << "unhandled polyfill level: " << static_cast<int>(polyfill.extract_bits);
+ return {};
+ }
+
+ b.Func(name,
+ {
+ b.Param("v", T(ty)),
+ b.Param("offset", b.ty.u32()),
+ b.Param("count", b.ty.u32()),
+ },
+ T(ty), body);
+
+ return name;
}
- b.Func(name,
- {
- b.Param("v", T(ty)),
- b.Param("n", T(ty)),
- b.Param("offset", b.ty.u32()),
- b.Param("count", b.ty.u32()),
- },
- T(ty), body);
-
- return name;
- }
-
- private:
- /// Aliases
- using u32 = ProgramBuilder::u32;
- using i32 = ProgramBuilder::i32;
-
- /// @returns the AST type for the given sem type
- const ast::Type* T(const sem::Type* ty) const {
- return CreateASTTypeFor(ctx, ty);
- }
-
- /// @returns 1 if `ty` is not a vector, otherwise the vector width
- uint32_t WidthOf(const sem::Type* ty) const {
- if (auto* v = ty->As<sem::Vector>()) {
- return v->Width();
+ /// Builds the polyfill function for the `firstLeadingBit` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol firstLeadingBit(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_first_leading_bit");
+ uint32_t width = WidthOf(ty);
+
+ // Returns either u32 or vecN<u32>
+ auto U = [&]() -> const ast::Type* {
+ if (width == 1) {
+ return b.ty.u32();
+ }
+ return b.ty.vec<u32>(width);
+ };
+ auto V = [&](uint32_t value) -> const ast::Expression* {
+ return ScalarOrVector(width, u32(value));
+ };
+ auto B = [&](const ast::Expression* value) -> const ast::Expression* {
+ if (width == 1) {
+ return b.Construct<bool>(value);
+ }
+ return b.Construct(b.ty.vec<bool>(width), value);
+ };
+
+ const ast::Expression* x = nullptr;
+ if (ty->is_unsigned_scalar_or_vector()) {
+ x = b.Expr("v");
+ } else {
+ // If ty is signed, then the value is inverted if the sign is negative
+ x = b.Call("select", //
+ b.Construct(U(), "v"), //
+ b.Construct(U(), b.Complement("v")), //
+ b.LessThan("v", ScalarOrVector(width, 0_i)));
+ }
+
+ b.Func(
+ name, {b.Param("v", T(ty))}, T(ty),
+ {
+ // var x = v; (unsigned)
+ // var x = select(U(v), ~U(v), v < 0); (signed)
+ b.Decl(b.Var("x", nullptr, x)),
+ // let b16 = select(0, 16, bool(x & 0xffff0000));
+ b.Decl(b.Let("b16", nullptr,
+ b.Call("select", V(0), V(16), B(b.And("x", V(0xffff0000)))))),
+ // x = x >> b16;
+ b.Assign("x", b.Shr("x", "b16")),
+ // let b8 = select(0, 8, bool(x & 0x0000ff00));
+ b.Decl(b.Let("b8", nullptr,
+ b.Call("select", V(0), V(8), B(b.And("x", V(0x0000ff00)))))),
+ // x = x >> b8;
+ b.Assign("x", b.Shr("x", "b8")),
+ // let b4 = select(0, 4, bool(x & 0x000000f0));
+ b.Decl(b.Let("b4", nullptr,
+ b.Call("select", V(0), V(4), B(b.And("x", V(0x000000f0)))))),
+ // x = x >> b4;
+ b.Assign("x", b.Shr("x", "b4")),
+ // let b2 = select(0, 2, bool(x & 0x0000000c));
+ b.Decl(b.Let("b2", nullptr,
+ b.Call("select", V(0), V(2), B(b.And("x", V(0x0000000c)))))),
+ // x = x >> b2;
+ b.Assign("x", b.Shr("x", "b2")),
+ // let b1 = select(0, 1, bool(x & 0x00000002));
+ b.Decl(b.Let("b1", nullptr,
+ b.Call("select", V(0), V(1), B(b.And("x", V(0x00000002)))))),
+ // let is_zero = select(0, 0xffffffff, x == 0);
+ b.Decl(b.Let("is_zero", nullptr,
+ b.Call("select", V(0), V(0xffffffff), b.Equal("x", V(0))))),
+ // return R(b16 | b8 | b4 | b2 | b1 | zero);
+ b.Return(b.Construct(
+ T(ty), b.Or(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"), "is_zero"))),
+ });
+ return name;
}
- return 1;
- }
-
- /// @returns a scalar or vector with the given width, with each element with
- /// the given value.
- template <typename T>
- const ast::Expression* ScalarOrVector(uint32_t width, T value) const {
- if (width == 1) {
- return b.Expr(value);
+
+ /// Builds the polyfill function for the `firstTrailingBit` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol firstTrailingBit(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_first_trailing_bit");
+ uint32_t width = WidthOf(ty);
+
+ // Returns either u32 or vecN<u32>
+ auto U = [&]() -> const ast::Type* {
+ if (width == 1) {
+ return b.ty.u32();
+ }
+ return b.ty.vec<u32>(width);
+ };
+ auto V = [&](uint32_t value) -> const ast::Expression* {
+ return ScalarOrVector(width, u32(value));
+ };
+ auto B = [&](const ast::Expression* value) -> const ast::Expression* {
+ if (width == 1) {
+ return b.Construct<bool>(value);
+ }
+ return b.Construct(b.ty.vec<bool>(width), value);
+ };
+ b.Func(
+ name, {b.Param("v", T(ty))}, T(ty),
+ {
+ // var x = U(v);
+ b.Decl(b.Var("x", nullptr, b.Construct(U(), b.Expr("v")))),
+ // let b16 = select(16, 0, bool(x & 0x0000ffff));
+ b.Decl(b.Let("b16", nullptr,
+ b.Call("select", V(16), V(0), B(b.And("x", V(0x0000ffff)))))),
+ // x = x >> b16;
+ b.Assign("x", b.Shr("x", "b16")),
+ // let b8 = select(8, 0, bool(x & 0x000000ff));
+ b.Decl(b.Let("b8", nullptr,
+ b.Call("select", V(8), V(0), B(b.And("x", V(0x000000ff)))))),
+ // x = x >> b8;
+ b.Assign("x", b.Shr("x", "b8")),
+ // let b4 = select(4, 0, bool(x & 0x0000000f));
+ b.Decl(b.Let("b4", nullptr,
+ b.Call("select", V(4), V(0), B(b.And("x", V(0x0000000f)))))),
+ // x = x >> b4;
+ b.Assign("x", b.Shr("x", "b4")),
+ // let b2 = select(2, 0, bool(x & 0x00000003));
+ b.Decl(b.Let("b2", nullptr,
+ b.Call("select", V(2), V(0), B(b.And("x", V(0x00000003)))))),
+ // x = x >> b2;
+ b.Assign("x", b.Shr("x", "b2")),
+ // let b1 = select(1, 0, bool(x & 0x00000001));
+ b.Decl(b.Let("b1", nullptr,
+ b.Call("select", V(1), V(0), B(b.And("x", V(0x00000001)))))),
+ // let is_zero = select(0, 0xffffffff, x == 0);
+ b.Decl(b.Let("is_zero", nullptr,
+ b.Call("select", V(0), V(0xffffffff), b.Equal("x", V(0))))),
+ // return R(b16 | b8 | b4 | b2 | b1 | is_zero);
+ b.Return(b.Construct(
+ T(ty), b.Or(b.Or(b.Or(b.Or(b.Or("b16", "b8"), "b4"), "b2"), "b1"), "is_zero"))),
+ });
+ return name;
+ }
+
+ /// Builds the polyfill function for the `insertBits` builtin
+ /// @param ty the parameter and return type for the function
+ /// @return the polyfill function name
+ Symbol insertBits(const sem::Type* ty) {
+ auto name = b.Symbols().New("tint_insert_bits");
+ uint32_t width = WidthOf(ty);
+
+ constexpr uint32_t W = 32u; // 32-bit
+
+ auto V = [&](auto value) -> const ast::Expression* {
+ const ast::Expression* expr = b.Expr(value);
+ if (!ty->is_unsigned_scalar_or_vector()) {
+ expr = b.Construct<i32>(expr);
+ }
+ if (ty->Is<sem::Vector>()) {
+ expr = b.Construct(T(ty), expr);
+ }
+ return expr;
+ };
+ auto U = [&](auto value) -> const ast::Expression* {
+ if (width == 1) {
+ return b.Expr(value);
+ }
+ return b.vec(b.ty.u32(), width, value);
+ };
+
+ ast::StatementList body = {
+ b.Decl(b.Let("s", nullptr, b.Call("min", "offset", u32(W)))),
+ b.Decl(b.Let("e", nullptr, b.Call("min", u32(W), b.Add("s", "count")))),
+ };
+
+ switch (polyfill.insert_bits) {
+ case Level::kFull:
+ // let mask = ((1 << s) - 1) ^ ((1 << e) - 1)
+ body.emplace_back(
+ b.Decl(b.Let("mask", nullptr,
+ b.Xor(b.Sub(b.Shl(1_u, "s"), 1_u), b.Sub(b.Shl(1_u, "e"), 1_u)))));
+ // return ((n << s) & mask) | (v & ~mask)
+ body.emplace_back(b.Return(b.Or(b.And(b.Shl("n", U("s")), V("mask")),
+ b.And("v", V(b.Complement("mask"))))));
+ break;
+ case Level::kClampParameters:
+ body.emplace_back(b.Return(b.Call("insertBits", "v", "n", "s", b.Sub("e", "s"))));
+ break;
+ default:
+ TINT_ICE(Transform, b.Diagnostics())
+ << "unhandled polyfill level: " << static_cast<int>(polyfill.insert_bits);
+ return {};
+ }
+
+ b.Func(name,
+ {
+ b.Param("v", T(ty)),
+ b.Param("n", T(ty)),
+ b.Param("offset", b.ty.u32()),
+ b.Param("count", b.ty.u32()),
+ },
+ T(ty), body);
+
+ return name;
+ }
+
+ private:
+ /// @returns the AST type for the given sem type
+ const ast::Type* T(const sem::Type* ty) const { return CreateASTTypeFor(ctx, ty); }
+
+ /// @returns 1 if `ty` is not a vector, otherwise the vector width
+ uint32_t WidthOf(const sem::Type* ty) const {
+ if (auto* v = ty->As<sem::Vector>()) {
+ return v->Width();
+ }
+ return 1;
+ }
+
+ /// @returns a scalar or vector with the given width, with each element with
+ /// the given value.
+ template <typename T>
+ const ast::Expression* ScalarOrVector(uint32_t width, T value) const {
+ if (width == 1) {
+ return b.Expr(value);
+ }
+ return b.Construct(b.ty.vec<T>(width), value);
}
- return b.Construct(b.ty.vec<T>(width), value);
- }
};
BuiltinPolyfill::BuiltinPolyfill() = default;
BuiltinPolyfill::~BuiltinPolyfill() = default;
-bool BuiltinPolyfill::ShouldRun(const Program* program,
- const DataMap& data) const {
- if (auto* cfg = data.Get<Config>()) {
- auto builtins = cfg->builtins;
- auto& sem = program->Sem();
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* call = sem.Get<sem::Call>(node)) {
- if (auto* builtin = call->Target()->As<sem::Builtin>()) {
- switch (builtin->Type()) {
- case sem::BuiltinType::kCountLeadingZeros:
- if (builtins.count_leading_zeros) {
- return true;
- }
- break;
- case sem::BuiltinType::kCountTrailingZeros:
- if (builtins.count_trailing_zeros) {
- return true;
- }
- break;
- case sem::BuiltinType::kExtractBits:
- if (builtins.extract_bits != Level::kNone) {
- return true;
- }
- break;
- case sem::BuiltinType::kFirstLeadingBit:
- if (builtins.first_leading_bit) {
- return true;
- }
- break;
- case sem::BuiltinType::kFirstTrailingBit:
- if (builtins.first_trailing_bit) {
- return true;
- }
- break;
- case sem::BuiltinType::kInsertBits:
- if (builtins.insert_bits != Level::kNone) {
- return true;
- }
- break;
- default:
- break;
- }
+bool BuiltinPolyfill::ShouldRun(const Program* program, const DataMap& data) const {
+ if (auto* cfg = data.Get<Config>()) {
+ auto builtins = cfg->builtins;
+ auto& sem = program->Sem();
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* call = sem.Get<sem::Call>(node)) {
+ if (auto* builtin = call->Target()->As<sem::Builtin>()) {
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kCountLeadingZeros:
+ if (builtins.count_leading_zeros) {
+ return true;
+ }
+ break;
+ case sem::BuiltinType::kCountTrailingZeros:
+ if (builtins.count_trailing_zeros) {
+ return true;
+ }
+ break;
+ case sem::BuiltinType::kExtractBits:
+ if (builtins.extract_bits != Level::kNone) {
+ return true;
+ }
+ break;
+ case sem::BuiltinType::kFirstLeadingBit:
+ if (builtins.first_leading_bit) {
+ return true;
+ }
+ break;
+ case sem::BuiltinType::kFirstTrailingBit:
+ if (builtins.first_trailing_bit) {
+ return true;
+ }
+ break;
+ case sem::BuiltinType::kInsertBits:
+ if (builtins.insert_bits != Level::kNone) {
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
- }
}
- }
- return false;
+ return false;
}
-void BuiltinPolyfill::Run(CloneContext& ctx,
- const DataMap& data,
- DataMap&) const {
- auto* cfg = data.Get<Config>();
- if (!cfg) {
- ctx.Clone();
- return;
- }
+void BuiltinPolyfill::Run(CloneContext& ctx, const DataMap& data, DataMap&) const {
+ auto* cfg = data.Get<Config>();
+ if (!cfg) {
+ ctx.Clone();
+ return;
+ }
- std::unordered_map<const sem::Builtin*, Symbol> polyfills;
+ std::unordered_map<const sem::Builtin*, Symbol> polyfills;
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) -> const ast::CallExpression* {
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::CallExpression* {
auto builtins = cfg->builtins;
State s{ctx, builtins};
if (auto* call = s.sem.Get<sem::Call>(expr)) {
- if (auto* builtin = call->Target()->As<sem::Builtin>()) {
- Symbol polyfill;
- switch (builtin->Type()) {
- case sem::BuiltinType::kCountLeadingZeros:
- if (builtins.count_leading_zeros) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.countLeadingZeros(builtin->ReturnType());
- });
- }
- break;
- case sem::BuiltinType::kCountTrailingZeros:
- if (builtins.count_trailing_zeros) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.countTrailingZeros(builtin->ReturnType());
- });
- }
- break;
- case sem::BuiltinType::kExtractBits:
- if (builtins.extract_bits != Level::kNone) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.extractBits(builtin->ReturnType());
- });
- }
- break;
- case sem::BuiltinType::kFirstLeadingBit:
- if (builtins.first_leading_bit) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.firstLeadingBit(builtin->ReturnType());
- });
+ if (auto* builtin = call->Target()->As<sem::Builtin>()) {
+ Symbol polyfill;
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kCountLeadingZeros:
+ if (builtins.count_leading_zeros) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.countLeadingZeros(builtin->ReturnType());
+ });
+ }
+ break;
+ case sem::BuiltinType::kCountTrailingZeros:
+ if (builtins.count_trailing_zeros) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.countTrailingZeros(builtin->ReturnType());
+ });
+ }
+ break;
+ case sem::BuiltinType::kExtractBits:
+ if (builtins.extract_bits != Level::kNone) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.extractBits(builtin->ReturnType());
+ });
+ }
+ break;
+ case sem::BuiltinType::kFirstLeadingBit:
+ if (builtins.first_leading_bit) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.firstLeadingBit(builtin->ReturnType());
+ });
+ }
+ break;
+ case sem::BuiltinType::kFirstTrailingBit:
+ if (builtins.first_trailing_bit) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.firstTrailingBit(builtin->ReturnType());
+ });
+ }
+ break;
+ case sem::BuiltinType::kInsertBits:
+ if (builtins.insert_bits != Level::kNone) {
+ polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
+ return s.insertBits(builtin->ReturnType());
+ });
+ }
+ break;
+ default:
+ break;
}
- break;
- case sem::BuiltinType::kFirstTrailingBit:
- if (builtins.first_trailing_bit) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.firstTrailingBit(builtin->ReturnType());
- });
- }
- break;
- case sem::BuiltinType::kInsertBits:
- if (builtins.insert_bits != Level::kNone) {
- polyfill = utils::GetOrCreate(polyfills, builtin, [&] {
- return s.insertBits(builtin->ReturnType());
- });
+ if (polyfill.IsValid()) {
+ return s.b.Call(polyfill, ctx.Clone(call->Declaration()->args));
}
- break;
- default:
- break;
- }
- if (polyfill.IsValid()) {
- return s.b.Call(polyfill, ctx.Clone(call->Declaration()->args));
}
- }
}
return nullptr;
- });
+ });
- ctx.Clone();
+ ctx.Clone();
}
BuiltinPolyfill::Config::Config(const Builtins& b) : builtins(b) {}
diff --git a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.h b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.h
index ada10158e25..8453189fb5a 100644
--- a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.h
+++ b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill.h
@@ -21,73 +21,70 @@ namespace tint::transform {
/// Implements builtins for backends that do not have a native implementation.
class BuiltinPolyfill final : public Castable<BuiltinPolyfill, Transform> {
- public:
- /// Constructor
- BuiltinPolyfill();
- /// Destructor
- ~BuiltinPolyfill() override;
+ public:
+ /// Constructor
+ BuiltinPolyfill();
+ /// Destructor
+ ~BuiltinPolyfill() override;
- /// Enumerator of polyfill levels
- enum class Level {
- /// No polyfill needed, supported by the backend.
- kNone,
- /// Clamp the parameters to the inner implementation.
- kClampParameters,
- /// Polyfill the entire function
- kFull,
- };
+ /// Enumerator of polyfill levels
+ enum class Level {
+ /// No polyfill needed, supported by the backend.
+ kNone,
+ /// Clamp the parameters to the inner implementation.
+ kClampParameters,
+ /// Polyfill the entire function
+ kFull,
+ };
- /// Specifies the builtins that should be polyfilled by the transform.
- struct Builtins {
- /// Should `countLeadingZeros()` be polyfilled?
- bool count_leading_zeros = false;
- /// Should `countTrailingZeros()` be polyfilled?
- bool count_trailing_zeros = false;
- /// What level should `extractBits()` be polyfilled?
- Level extract_bits = Level::kNone;
- /// Should `firstLeadingBit()` be polyfilled?
- bool first_leading_bit = false;
- /// Should `firstTrailingBit()` be polyfilled?
- bool first_trailing_bit = false;
- /// Should `insertBits()` be polyfilled?
- Level insert_bits = Level::kNone;
- };
+ /// Specifies the builtins that should be polyfilled by the transform.
+ struct Builtins {
+ /// Should `countLeadingZeros()` be polyfilled?
+ bool count_leading_zeros = false;
+ /// Should `countTrailingZeros()` be polyfilled?
+ bool count_trailing_zeros = false;
+ /// What level should `extractBits()` be polyfilled?
+ Level extract_bits = Level::kNone;
+ /// Should `firstLeadingBit()` be polyfilled?
+ bool first_leading_bit = false;
+ /// Should `firstTrailingBit()` be polyfilled?
+ bool first_trailing_bit = false;
+ /// Should `insertBits()` be polyfilled?
+ Level insert_bits = Level::kNone;
+ };
- /// Config is consumed by the BuiltinPolyfill transform.
- /// Config specifies the builtins that should be polyfilled.
- struct Config final : public Castable<Data, transform::Data> {
- /// Constructor
- /// @param b the list of builtins to polyfill
- explicit Config(const Builtins& b);
+ /// Config is consumed by the BuiltinPolyfill transform.
+ /// Config specifies the builtins that should be polyfilled.
+ struct Config final : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param b the list of builtins to polyfill
+ explicit Config(const Builtins& b);
- /// Copy constructor
- Config(const Config&);
+ /// Copy constructor
+ Config(const Config&);
- /// Destructor
- ~Config() override;
+ /// Destructor
+ ~Config() override;
- /// The builtins to polyfill
- const Builtins builtins;
- };
+ /// The builtins to polyfill
+ const Builtins builtins;
+ };
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- struct State;
+ protected:
+ struct State;
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill_test.cc b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill_test.cc
index c5cc2c5ff7c..bc3dda8cbac 100644
--- a/chromium/third_party/dawn/src/tint/transform/builtin_polyfill_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/builtin_polyfill_test.cc
@@ -26,51 +26,51 @@ using Level = BuiltinPolyfill::Level;
using BuiltinPolyfillTest = TransformTest;
TEST_F(BuiltinPolyfillTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
}
TEST_F(BuiltinPolyfillTest, EmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<BuiltinPolyfill>(src);
+ auto got = Run<BuiltinPolyfill>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// countLeadingZeros
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillCountLeadingZeros() {
- BuiltinPolyfill::Builtins builtins;
- builtins.count_leading_zeros = true;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.count_leading_zeros = true;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunCountLeadingZeros) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
countLeadingZeros(0xf);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillCountLeadingZeros()));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillCountLeadingZeros()));
}
TEST_F(BuiltinPolyfillTest, CountLeadingZeros_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = countLeadingZeros(15);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_leading_zeros(v : i32) -> i32 {
var x = u32(v);
let b16 = select(0u, 16u, (x <= 65535u));
@@ -91,19 +91,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountLeadingZeros_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = countLeadingZeros(15u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_leading_zeros(v : u32) -> u32 {
var x = u32(v);
let b16 = select(0u, 16u, (x <= 65535u));
@@ -124,19 +124,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountLeadingZeros_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = countLeadingZeros(vec3<i32>(15));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_leading_zeros(v : vec3<i32>) -> vec3<i32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(0u), vec3<u32>(16u), (x <= vec3<u32>(65535u)));
@@ -157,19 +157,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountLeadingZeros_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = countLeadingZeros(vec3<u32>(15u));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_leading_zeros(v : vec3<u32>) -> vec3<u32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(0u), vec3<u32>(16u), (x <= vec3<u32>(65535u)));
@@ -190,41 +190,41 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountLeadingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// countTrailingZeros
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillCountTrailingZeros() {
- BuiltinPolyfill::Builtins builtins;
- builtins.count_trailing_zeros = true;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.count_trailing_zeros = true;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunCountTrailingZeros) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
countTrailingZeros(0xf);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillCountTrailingZeros()));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillCountTrailingZeros()));
}
TEST_F(BuiltinPolyfillTest, CountTrailingZeros_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = countTrailingZeros(15);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_trailing_zeros(v : i32) -> i32 {
var x = u32(v);
let b16 = select(16u, 0u, bool((x & 65535u)));
@@ -245,19 +245,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountTrailingZeros_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = countTrailingZeros(15u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_trailing_zeros(v : u32) -> u32 {
var x = u32(v);
let b16 = select(16u, 0u, bool((x & 65535u)));
@@ -278,19 +278,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountTrailingZeros_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = countTrailingZeros(vec3<i32>(15));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_trailing_zeros(v : vec3<i32>) -> vec3<i32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(16u), vec3<u32>(0u), vec3<bool>((x & vec3<u32>(65535u))));
@@ -311,19 +311,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, CountTrailingZeros_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = countTrailingZeros(vec3<u32>(15u));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_count_trailing_zeros(v : vec3<u32>) -> vec3<u32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(16u), vec3<u32>(0u), vec3<bool>((x & vec3<u32>(65535u))));
@@ -344,46 +344,43 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
+ auto got = Run<BuiltinPolyfill>(src, polyfillCountTrailingZeros());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// extractBits
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillExtractBits(Level level) {
- BuiltinPolyfill::Builtins builtins;
- builtins.extract_bits = level;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.extract_bits = level;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunExtractBits) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
extractBits(1234, 5u, 6u);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_FALSE(
- ShouldRun<BuiltinPolyfill>(src, polyfillExtractBits(Level::kNone)));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(
- src, polyfillExtractBits(Level::kClampParameters)));
- EXPECT_TRUE(
- ShouldRun<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull)));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src, polyfillExtractBits(Level::kNone)));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters)));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull)));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Full_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = extractBits(1234, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : i32, offset : u32, count : u32) -> i32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -397,19 +394,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Full_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = extractBits(1234u, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : u32, offset : u32, count : u32) -> u32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -423,19 +420,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Full_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = extractBits(vec3<i32>(1234), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : vec3<i32>, offset : u32, count : u32) -> vec3<i32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -449,19 +446,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Full_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = extractBits(vec3<u32>(1234u), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : vec3<u32>, offset : u32, count : u32) -> vec3<u32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -475,19 +472,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Clamp_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = extractBits(1234, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : i32, offset : u32, count : u32) -> i32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -499,20 +496,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Clamp_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = extractBits(1234u, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : u32, offset : u32, count : u32) -> u32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -524,20 +520,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Clamp_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = extractBits(vec3<i32>(1234), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : vec3<i32>, offset : u32, count : u32) -> vec3<i32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -549,20 +544,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, ExtractBits_Clamp_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = extractBits(vec3<u32>(1234u), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_extract_bits(v : vec3<u32>, offset : u32, count : u32) -> vec3<u32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -574,44 +568,43 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillExtractBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// firstLeadingBit
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillFirstLeadingBit() {
- BuiltinPolyfill::Builtins builtins;
- builtins.first_leading_bit = true;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.first_leading_bit = true;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunFirstLeadingBit) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
firstLeadingBit(0xf);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillFirstLeadingBit()));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillFirstLeadingBit()));
}
TEST_F(BuiltinPolyfillTest, FirstLeadingBit_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = firstLeadingBit(15);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_leading_bit(v : i32) -> i32 {
- var x = select(u32(v), u32(~(v)), (v < 0));
+ var x = select(u32(v), u32(~(v)), (v < 0i));
let b16 = select(0u, 16u, bool((x & 4294901760u)));
x = (x >> b16);
let b8 = select(0u, 8u, bool((x & 65280u)));
@@ -630,19 +623,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstLeadingBit_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = firstLeadingBit(15u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_leading_bit(v : u32) -> u32 {
var x = v;
let b16 = select(0u, 16u, bool((x & 4294901760u)));
@@ -663,21 +656,21 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstLeadingBit_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = firstLeadingBit(vec3<i32>(15));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_leading_bit(v : vec3<i32>) -> vec3<i32> {
- var x = select(vec3<u32>(v), vec3<u32>(~(v)), (v < vec3<i32>(0)));
+ var x = select(vec3<u32>(v), vec3<u32>(~(v)), (v < vec3<i32>(0i)));
let b16 = select(vec3<u32>(0u), vec3<u32>(16u), vec3<bool>((x & vec3<u32>(4294901760u))));
x = (x >> b16);
let b8 = select(vec3<u32>(0u), vec3<u32>(8u), vec3<bool>((x & vec3<u32>(65280u))));
@@ -696,19 +689,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstLeadingBit_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = firstLeadingBit(vec3<u32>(15u));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_leading_bit(v : vec3<u32>) -> vec3<u32> {
var x = v;
let b16 = select(vec3<u32>(0u), vec3<u32>(16u), vec3<bool>((x & vec3<u32>(4294901760u))));
@@ -729,41 +722,41 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstLeadingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// firstTrailingBit
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillFirstTrailingBit() {
- BuiltinPolyfill::Builtins builtins;
- builtins.first_trailing_bit = true;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.first_trailing_bit = true;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunFirstTrailingBit) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
firstTrailingBit(0xf);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillFirstTrailingBit()));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillFirstTrailingBit()));
}
TEST_F(BuiltinPolyfillTest, FirstTrailingBit_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = firstTrailingBit(15);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_trailing_bit(v : i32) -> i32 {
var x = u32(v);
let b16 = select(16u, 0u, bool((x & 65535u)));
@@ -784,19 +777,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstTrailingBit_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = firstTrailingBit(15u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_trailing_bit(v : u32) -> u32 {
var x = u32(v);
let b16 = select(16u, 0u, bool((x & 65535u)));
@@ -817,19 +810,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstTrailingBit_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = firstTrailingBit(vec3<i32>(15));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_trailing_bit(v : vec3<i32>) -> vec3<i32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(16u), vec3<u32>(0u), vec3<bool>((x & vec3<u32>(65535u))));
@@ -850,19 +843,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, FirstTrailingBit_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = firstTrailingBit(vec3<u32>(15u));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_first_trailing_bit(v : vec3<u32>) -> vec3<u32> {
var x = vec3<u32>(v);
let b16 = select(vec3<u32>(16u), vec3<u32>(0u), vec3<bool>((x & vec3<u32>(65535u))));
@@ -883,46 +876,43 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
+ auto got = Run<BuiltinPolyfill>(src, polyfillFirstTrailingBit());
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
////////////////////////////////////////////////////////////////////////////////
// insertBits
////////////////////////////////////////////////////////////////////////////////
DataMap polyfillInsertBits(Level level) {
- BuiltinPolyfill::Builtins builtins;
- builtins.insert_bits = level;
- DataMap data;
- data.Add<BuiltinPolyfill::Config>(builtins);
- return data;
+ BuiltinPolyfill::Builtins builtins;
+ builtins.insert_bits = level;
+ DataMap data;
+ data.Add<BuiltinPolyfill::Config>(builtins);
+ return data;
}
TEST_F(BuiltinPolyfillTest, ShouldRunInsertBits) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
insertBits(1234, 5678, 5u, 6u);
}
)";
- EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
- EXPECT_FALSE(
- ShouldRun<BuiltinPolyfill>(src, polyfillInsertBits(Level::kNone)));
- EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(
- src, polyfillInsertBits(Level::kClampParameters)));
- EXPECT_TRUE(
- ShouldRun<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull)));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src));
+ EXPECT_FALSE(ShouldRun<BuiltinPolyfill>(src, polyfillInsertBits(Level::kNone)));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters)));
+ EXPECT_TRUE(ShouldRun<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull)));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Full_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = insertBits(1234, 5678, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : i32, n : i32, offset : u32, count : u32) -> i32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -935,19 +925,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Full_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = insertBits(1234u, 5678u, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : u32, n : u32, offset : u32, count : u32) -> u32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -960,19 +950,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Full_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = insertBits(vec3<i32>(1234), vec3<i32>(5678), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : vec3<i32>, n : vec3<i32>, offset : u32, count : u32) -> vec3<i32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -985,19 +975,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Full_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = insertBits(vec3<u32>(1234u), vec3<u32>(5678u), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : vec3<u32>, n : vec3<u32>, offset : u32, count : u32) -> vec3<u32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -1010,19 +1000,19 @@ fn f() {
}
)";
- auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kFull));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Clamp_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : i32 = insertBits(1234, 5678, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : i32, n : i32, offset : u32, count : u32) -> i32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -1034,20 +1024,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Clamp_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : u32 = insertBits(1234u, 5678u, 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : u32, n : u32, offset : u32, count : u32) -> u32 {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -1059,20 +1048,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Clamp_vec3_i32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<i32> = insertBits(vec3<i32>(1234), vec3<i32>(5678), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : vec3<i32>, n : vec3<i32>, offset : u32, count : u32) -> vec3<i32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -1084,20 +1072,19 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(BuiltinPolyfillTest, InsertBits_Clamp_vec3_u32) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let r : vec3<u32> = insertBits(vec3<u32>(1234u), vec3<u32>(5678u), 5u, 6u);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_insert_bits(v : vec3<u32>, n : vec3<u32>, offset : u32, count : u32) -> vec3<u32> {
let s = min(offset, 32u);
let e = min(32u, (s + count));
@@ -1109,10 +1096,9 @@ fn f() {
}
)";
- auto got =
- Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
+ auto got = Run<BuiltinPolyfill>(src, polyfillInsertBits(Level::kClampParameters));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/calculate_array_length.cc b/chromium/third_party/dawn/src/tint/transform/calculate_array_length.cc
index 589d21e8dfb..bdda1cd43ba 100644
--- a/chromium/third_party/dawn/src/tint/transform/calculate_array_length.cc
+++ b/chromium/third_party/dawn/src/tint/transform/calculate_array_length.cc
@@ -31,8 +31,9 @@
#include "src/tint/utils/map.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::CalculateArrayLength);
-TINT_INSTANTIATE_TYPEINFO(
- tint::transform::CalculateArrayLength::BufferSizeIntrinsic);
+TINT_INSTANTIATE_TYPEINFO(tint::transform::CalculateArrayLength::BufferSizeIntrinsic);
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::transform {
@@ -41,201 +42,191 @@ namespace {
/// ArrayUsage describes a runtime array usage.
/// It is used as a key by the array_length_by_usage map.
struct ArrayUsage {
- ast::BlockStatement const* const block;
- sem::Variable const* const buffer;
- bool operator==(const ArrayUsage& rhs) const {
- return block == rhs.block && buffer == rhs.buffer;
- }
- struct Hasher {
- inline std::size_t operator()(const ArrayUsage& u) const {
- return utils::Hash(u.block, u.buffer);
+ ast::BlockStatement const* const block;
+ sem::Variable const* const buffer;
+ bool operator==(const ArrayUsage& rhs) const {
+ return block == rhs.block && buffer == rhs.buffer;
}
- };
+ struct Hasher {
+ inline std::size_t operator()(const ArrayUsage& u) const {
+ return utils::Hash(u.block, u.buffer);
+ }
+ };
};
} // namespace
-CalculateArrayLength::BufferSizeIntrinsic::BufferSizeIntrinsic(ProgramID pid)
- : Base(pid) {}
+CalculateArrayLength::BufferSizeIntrinsic::BufferSizeIntrinsic(ProgramID pid) : Base(pid) {}
CalculateArrayLength::BufferSizeIntrinsic::~BufferSizeIntrinsic() = default;
std::string CalculateArrayLength::BufferSizeIntrinsic::InternalName() const {
- return "intrinsic_buffer_size";
+ return "intrinsic_buffer_size";
}
-const CalculateArrayLength::BufferSizeIntrinsic*
-CalculateArrayLength::BufferSizeIntrinsic::Clone(CloneContext* ctx) const {
- return ctx->dst->ASTNodes().Create<CalculateArrayLength::BufferSizeIntrinsic>(
- ctx->dst->ID());
+const CalculateArrayLength::BufferSizeIntrinsic* CalculateArrayLength::BufferSizeIntrinsic::Clone(
+ CloneContext* ctx) const {
+ return ctx->dst->ASTNodes().Create<CalculateArrayLength::BufferSizeIntrinsic>(ctx->dst->ID());
}
CalculateArrayLength::CalculateArrayLength() = default;
CalculateArrayLength::~CalculateArrayLength() = default;
-bool CalculateArrayLength::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* fn : program->AST().Functions()) {
- if (auto* sem_fn = program->Sem().Get(fn)) {
- for (auto* builtin : sem_fn->DirectlyCalledBuiltins()) {
- if (builtin->Type() == sem::BuiltinType::kArrayLength) {
- return true;
+bool CalculateArrayLength::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* fn : program->AST().Functions()) {
+ if (auto* sem_fn = program->Sem().Get(fn)) {
+ for (auto* builtin : sem_fn->DirectlyCalledBuiltins()) {
+ if (builtin->Type() == sem::BuiltinType::kArrayLength) {
+ return true;
+ }
+ }
}
- }
}
- }
- return false;
+ return false;
}
-void CalculateArrayLength::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- auto& sem = ctx.src->Sem();
-
- // get_buffer_size_intrinsic() emits the function decorated with
- // BufferSizeIntrinsic that is transformed by the HLSL writer into a call to
- // [RW]ByteAddressBuffer.GetDimensions().
- std::unordered_map<const sem::Type*, Symbol> buffer_size_intrinsics;
- auto get_buffer_size_intrinsic = [&](const sem::Type* buffer_type) {
- return utils::GetOrCreate(buffer_size_intrinsics, buffer_type, [&] {
- auto name = ctx.dst->Sym();
- auto* type = CreateASTTypeFor(ctx, buffer_type);
- auto* disable_validation = ctx.dst->Disable(
- ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
- ctx.dst->AST().AddFunction(ctx.dst->create<ast::Function>(
- name,
- ast::VariableList{
- // Note: The buffer parameter requires the kStorage StorageClass
- // in order for HLSL to emit this as a ByteAddressBuffer.
- ctx.dst->create<ast::Variable>(
- ctx.dst->Sym("buffer"), ast::StorageClass::kStorage,
- ast::Access::kUndefined, type, true, false, nullptr,
- ast::AttributeList{disable_validation}),
- ctx.dst->Param("result",
- ctx.dst->ty.pointer(ctx.dst->ty.u32(),
- ast::StorageClass::kFunction)),
- },
- ctx.dst->ty.void_(), nullptr,
- ast::AttributeList{
- ctx.dst->ASTNodes().Create<BufferSizeIntrinsic>(ctx.dst->ID()),
- },
- ast::AttributeList{}));
-
- return name;
- });
- };
-
- std::unordered_map<ArrayUsage, Symbol, ArrayUsage::Hasher>
- array_length_by_usage;
-
- // Find all the arrayLength() calls...
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* call_expr = node->As<ast::CallExpression>()) {
- auto* call = sem.Get(call_expr);
- if (auto* builtin = call->Target()->As<sem::Builtin>()) {
- if (builtin->Type() == sem::BuiltinType::kArrayLength) {
- // We're dealing with an arrayLength() call
-
- // A runtime-sized array can only appear as the store type of a
- // variable, or the last element of a structure (which cannot itself
- // be nested). Given that we require SimplifyPointers, we can assume
- // that the arrayLength() call has one of two forms:
- // arrayLength(&struct_var.array_member)
- // arrayLength(&array_var)
- auto* arg = call_expr->args[0];
- auto* address_of = arg->As<ast::UnaryOpExpression>();
- if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "arrayLength() expected address-of, got "
- << arg->TypeInfo().name;
- }
- auto* storage_buffer_expr = address_of->expr;
- if (auto* accessor =
- storage_buffer_expr->As<ast::MemberAccessorExpression>()) {
- storage_buffer_expr = accessor->structure;
- }
- auto* storage_buffer_sem =
- sem.Get<sem::VariableUser>(storage_buffer_expr);
- if (!storage_buffer_sem) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "expected form of arrayLength argument to be &array_var or "
- "&struct_var.array_member";
- break;
- }
- auto* storage_buffer_var = storage_buffer_sem->Variable();
- auto* storage_buffer_type = storage_buffer_sem->Type()->UnwrapRef();
-
- // Generate BufferSizeIntrinsic for this storage type if we haven't
- // already
- auto buffer_size = get_buffer_size_intrinsic(storage_buffer_type);
-
- // Find the current statement block
- auto* block = call->Stmt()->Block()->Declaration();
-
- auto array_length = utils::GetOrCreate(
- array_length_by_usage, {block, storage_buffer_var}, [&] {
- // First time this array length is used for this block.
- // Let's calculate it.
-
- // Construct the variable that'll hold the result of
- // RWByteAddressBuffer.GetDimensions()
- auto* buffer_size_result = ctx.dst->Decl(
- ctx.dst->Var(ctx.dst->Sym(), ctx.dst->ty.u32(),
- ast::StorageClass::kNone, ctx.dst->Expr(0u)));
-
- // Call storage_buffer.GetDimensions(&buffer_size_result)
- auto* call_get_dims = ctx.dst->CallStmt(ctx.dst->Call(
- // BufferSizeIntrinsic(X, ARGS...) is
- // translated to:
- // X.GetDimensions(ARGS..) by the writer
- buffer_size, ctx.Clone(storage_buffer_expr),
- ctx.dst->AddressOf(
- ctx.dst->Expr(buffer_size_result->variable->symbol))));
-
- // Calculate actual array length
- // total_storage_buffer_size - array_offset
- // array_length = ----------------------------------------
- // array_stride
- auto name = ctx.dst->Sym();
- const ast::Expression* total_size =
- ctx.dst->Expr(buffer_size_result->variable);
- const sem::Array* array_type = nullptr;
- if (auto* str = storage_buffer_type->As<sem::Struct>()) {
- // The variable is a struct, so subtract the byte offset of
- // the array member.
- auto* array_member_sem = str->Members().back();
- array_type = array_member_sem->Type()->As<sem::Array>();
- total_size =
- ctx.dst->Sub(total_size, array_member_sem->Offset());
- } else if (auto* arr = storage_buffer_type->As<sem::Array>()) {
- array_type = arr;
- } else {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "expected form of arrayLength argument to be "
- "&array_var or &struct_var.array_member";
- return name;
+void CalculateArrayLength::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ auto& sem = ctx.src->Sem();
+
+ // get_buffer_size_intrinsic() emits the function decorated with
+ // BufferSizeIntrinsic that is transformed by the HLSL writer into a call to
+ // [RW]ByteAddressBuffer.GetDimensions().
+ std::unordered_map<const sem::Type*, Symbol> buffer_size_intrinsics;
+ auto get_buffer_size_intrinsic = [&](const sem::Type* buffer_type) {
+ return utils::GetOrCreate(buffer_size_intrinsics, buffer_type, [&] {
+ auto name = ctx.dst->Sym();
+ auto* type = CreateASTTypeFor(ctx, buffer_type);
+ auto* disable_validation =
+ ctx.dst->Disable(ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
+ ctx.dst->AST().AddFunction(ctx.dst->create<ast::Function>(
+ name,
+ ast::VariableList{
+ // Note: The buffer parameter requires the kStorage StorageClass
+ // in order for HLSL to emit this as a ByteAddressBuffer.
+ ctx.dst->create<ast::Variable>(ctx.dst->Sym("buffer"),
+ ast::StorageClass::kStorage,
+ ast::Access::kUndefined, type, true, false,
+ nullptr, ast::AttributeList{disable_validation}),
+ ctx.dst->Param("result", ctx.dst->ty.pointer(ctx.dst->ty.u32(),
+ ast::StorageClass::kFunction)),
+ },
+ ctx.dst->ty.void_(), nullptr,
+ ast::AttributeList{
+ ctx.dst->ASTNodes().Create<BufferSizeIntrinsic>(ctx.dst->ID()),
+ },
+ ast::AttributeList{}));
+
+ return name;
+ });
+ };
+
+ std::unordered_map<ArrayUsage, Symbol, ArrayUsage::Hasher> array_length_by_usage;
+
+ // Find all the arrayLength() calls...
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* call_expr = node->As<ast::CallExpression>()) {
+ auto* call = sem.Get(call_expr)->UnwrapMaterialize()->As<sem::Call>();
+ if (auto* builtin = call->Target()->As<sem::Builtin>()) {
+ if (builtin->Type() == sem::BuiltinType::kArrayLength) {
+ // We're dealing with an arrayLength() call
+
+ // A runtime-sized array can only appear as the store type of a
+ // variable, or the last element of a structure (which cannot itself
+ // be nested). Given that we require SimplifyPointers, we can assume
+ // that the arrayLength() call has one of two forms:
+ // arrayLength(&struct_var.array_member)
+ // arrayLength(&array_var)
+ auto* arg = call_expr->args[0];
+ auto* address_of = arg->As<ast::UnaryOpExpression>();
+ if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "arrayLength() expected address-of, got " << arg->TypeInfo().name;
+ }
+ auto* storage_buffer_expr = address_of->expr;
+ if (auto* accessor = storage_buffer_expr->As<ast::MemberAccessorExpression>()) {
+ storage_buffer_expr = accessor->structure;
+ }
+ auto* storage_buffer_sem = sem.Get<sem::VariableUser>(storage_buffer_expr);
+ if (!storage_buffer_sem) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "expected form of arrayLength argument to be &array_var or "
+ "&struct_var.array_member";
+ break;
+ }
+ auto* storage_buffer_var = storage_buffer_sem->Variable();
+ auto* storage_buffer_type = storage_buffer_sem->Type()->UnwrapRef();
+
+ // Generate BufferSizeIntrinsic for this storage type if we haven't
+ // already
+ auto buffer_size = get_buffer_size_intrinsic(storage_buffer_type);
+
+ // Find the current statement block
+ auto* block = call->Stmt()->Block()->Declaration();
+
+ auto array_length =
+ utils::GetOrCreate(array_length_by_usage, {block, storage_buffer_var}, [&] {
+ // First time this array length is used for this block.
+ // Let's calculate it.
+
+ // Construct the variable that'll hold the result of
+ // RWByteAddressBuffer.GetDimensions()
+ auto* buffer_size_result = ctx.dst->Decl(
+ ctx.dst->Var(ctx.dst->Sym(), ctx.dst->ty.u32(),
+ ast::StorageClass::kNone, ctx.dst->Expr(0_u)));
+
+ // Call storage_buffer.GetDimensions(&buffer_size_result)
+ auto* call_get_dims = ctx.dst->CallStmt(ctx.dst->Call(
+ // BufferSizeIntrinsic(X, ARGS...) is
+ // translated to:
+ // X.GetDimensions(ARGS..) by the writer
+ buffer_size, ctx.Clone(storage_buffer_expr),
+ ctx.dst->AddressOf(
+ ctx.dst->Expr(buffer_size_result->variable->symbol))));
+
+ // Calculate actual array length
+ // total_storage_buffer_size - array_offset
+ // array_length = ----------------------------------------
+ // array_stride
+ auto name = ctx.dst->Sym();
+ const ast::Expression* total_size =
+ ctx.dst->Expr(buffer_size_result->variable);
+ const sem::Array* array_type = nullptr;
+ if (auto* str = storage_buffer_type->As<sem::Struct>()) {
+ // The variable is a struct, so subtract the byte offset of
+ // the array member.
+ auto* array_member_sem = str->Members().back();
+ array_type = array_member_sem->Type()->As<sem::Array>();
+ total_size =
+ ctx.dst->Sub(total_size, u32(array_member_sem->Offset()));
+ } else if (auto* arr = storage_buffer_type->As<sem::Array>()) {
+ array_type = arr;
+ } else {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "expected form of arrayLength argument to be "
+ "&array_var or &struct_var.array_member";
+ return name;
+ }
+ uint32_t array_stride = array_type->Size();
+ auto* array_length_var = ctx.dst->Decl(
+ ctx.dst->Let(name, ctx.dst->ty.u32(),
+ ctx.dst->Div(total_size, u32(array_stride))));
+
+ // Insert the array length calculations at the top of the block
+ ctx.InsertBefore(block->statements, block->statements[0],
+ buffer_size_result);
+ ctx.InsertBefore(block->statements, block->statements[0],
+ call_get_dims);
+ ctx.InsertBefore(block->statements, block->statements[0],
+ array_length_var);
+ return name;
+ });
+
+ // Replace the call to arrayLength() with the array length variable
+ ctx.Replace(call_expr, ctx.dst->Expr(array_length));
}
- uint32_t array_stride = array_type->Size();
- auto* array_length_var = ctx.dst->Decl(
- ctx.dst->Const(name, ctx.dst->ty.u32(),
- ctx.dst->Div(total_size, array_stride)));
-
- // Insert the array length calculations at the top of the block
- ctx.InsertBefore(block->statements, block->statements[0],
- buffer_size_result);
- ctx.InsertBefore(block->statements, block->statements[0],
- call_get_dims);
- ctx.InsertBefore(block->statements, block->statements[0],
- array_length_var);
- return name;
- });
-
- // Replace the call to arrayLength() with the array length variable
- ctx.Replace(call_expr, ctx.dst->Expr(array_length));
+ }
}
- }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/calculate_array_length.h b/chromium/third_party/dawn/src/tint/transform/calculate_array_length.h
index 344f6f078e6..401e081aa73 100644
--- a/chromium/third_party/dawn/src/tint/transform/calculate_array_length.h
+++ b/chromium/third_party/dawn/src/tint/transform/calculate_array_length.h
@@ -32,50 +32,45 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * SimplifyPointers
-class CalculateArrayLength final
- : public Castable<CalculateArrayLength, Transform> {
- public:
- /// BufferSizeIntrinsic is an InternalAttribute that's applied to intrinsic
- /// functions used to obtain the runtime size of a storage buffer.
- class BufferSizeIntrinsic final
- : public Castable<BufferSizeIntrinsic, ast::InternalAttribute> {
- public:
- /// Constructor
- /// @param program_id the identifier of the program that owns this node
- explicit BufferSizeIntrinsic(ProgramID program_id);
- /// Destructor
- ~BufferSizeIntrinsic() override;
+class CalculateArrayLength final : public Castable<CalculateArrayLength, Transform> {
+ public:
+ /// BufferSizeIntrinsic is an InternalAttribute that's applied to intrinsic
+ /// functions used to obtain the runtime size of a storage buffer.
+ class BufferSizeIntrinsic final : public Castable<BufferSizeIntrinsic, ast::InternalAttribute> {
+ public:
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ explicit BufferSizeIntrinsic(ProgramID program_id);
+ /// Destructor
+ ~BufferSizeIntrinsic() override;
- /// @return "buffer_size"
- std::string InternalName() const override;
+ /// @return "buffer_size"
+ std::string InternalName() const override;
- /// Performs a deep clone of this object using the CloneContext `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned object
- const BufferSizeIntrinsic* Clone(CloneContext* ctx) const override;
- };
+ /// Performs a deep clone of this object using the CloneContext `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned object
+ const BufferSizeIntrinsic* Clone(CloneContext* ctx) const override;
+ };
- /// Constructor
- CalculateArrayLength();
- /// Destructor
- ~CalculateArrayLength() override;
+ /// Constructor
+ CalculateArrayLength();
+ /// Destructor
+ ~CalculateArrayLength() override;
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/calculate_array_length_test.cc b/chromium/third_party/dawn/src/tint/transform/calculate_array_length_test.cc
index dec1698de02..e2674b05729 100644
--- a/chromium/third_party/dawn/src/tint/transform/calculate_array_length_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/calculate_array_length_test.cc
@@ -24,13 +24,13 @@ namespace {
using CalculateArrayLengthTest = TransformTest;
TEST_F(CalculateArrayLengthTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<CalculateArrayLength>(src));
+ EXPECT_FALSE(ShouldRun<CalculateArrayLength>(src));
}
TEST_F(CalculateArrayLengthTest, ShouldRunNoArrayLength) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -38,16 +38,16 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
}
)";
- EXPECT_FALSE(ShouldRun<CalculateArrayLength>(src));
+ EXPECT_FALSE(ShouldRun<CalculateArrayLength>(src));
}
TEST_F(CalculateArrayLengthTest, ShouldRunWithArrayLength) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -55,32 +55,32 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
)";
- EXPECT_TRUE(ShouldRun<CalculateArrayLength>(src));
+ EXPECT_TRUE(ShouldRun<CalculateArrayLength>(src));
}
TEST_F(CalculateArrayLengthTest, BasicArray) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var<storage, read> sb : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : array<i32>, result : ptr<function, u32>)
@group(0) @binding(0) var<storage, read> sb : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb, &(tint_symbol_1));
@@ -89,13 +89,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, BasicInStruct) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -103,13 +103,13 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len : u32 = arrayLength(&sb.arr);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, result : ptr<function, u32>)
@@ -120,7 +120,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb, &(tint_symbol_1));
@@ -129,25 +129,25 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, ArrayOfStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
}
@group(0) @binding(0) var<storage, read> arr : array<S>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let len = arrayLength(&arr);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : array<S>, result : ptr<function, u32>)
@@ -157,7 +157,7 @@ struct S {
@group(0) @binding(0) var<storage, read> arr : array<S>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(arr, &(tint_symbol_1));
@@ -166,25 +166,25 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, ArrayOfArrayOfStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
}
@group(0) @binding(0) var<storage, read> arr : array<array<S, 4>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let len = arrayLength(&arr);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : array<array<S, 4u>>, result : ptr<function, u32>)
@@ -194,7 +194,7 @@ struct S {
@group(0) @binding(0) var<storage, read> arr : array<array<S, 4>>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(arr, &(tint_symbol_1));
@@ -203,16 +203,16 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, InSameBlock) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var<storage, read> sb : array<i32>;;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : u32 = arrayLength(&sb);
var b : u32 = arrayLength(&sb);
@@ -220,13 +220,13 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : array<i32>, result : ptr<function, u32>)
@group(0) @binding(0) var<storage, read> sb : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb, &(tint_symbol_1));
@@ -237,13 +237,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, InSameBlock_Struct) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -251,7 +251,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : u32 = arrayLength(&sb.arr);
var b : u32 = arrayLength(&sb.arr);
@@ -259,7 +259,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, result : ptr<function, u32>)
@@ -270,7 +270,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb, &(tint_symbol_1));
@@ -281,13 +281,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, Nested) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -295,7 +295,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
if (true) {
var len : u32 = arrayLength(&sb.arr);
@@ -307,7 +307,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, result : ptr<function, u32>)
@@ -318,7 +318,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
if (true) {
var tint_symbol_1 : u32 = 0u;
@@ -336,13 +336,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, MultipleStorageBuffers) {
- auto* src = R"(
+ auto* src = R"(
struct SB1 {
x : i32,
arr1 : array<i32>,
@@ -359,7 +359,7 @@ struct SB2 {
@group(0) @binding(2) var<storage, read> sb3 : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = arrayLength(&(sb1.arr1));
var len2 : u32 = arrayLength(&(sb2.arr2));
@@ -368,7 +368,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB1, result : ptr<function, u32>)
@@ -394,7 +394,7 @@ struct SB2 {
@group(0) @binding(2) var<storage, read> sb3 : array<i32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb1, &(tint_symbol_1));
@@ -412,13 +412,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, Shadowing) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
x : i32,
arr : array<i32>,
@@ -427,7 +427,7 @@ struct SB {
@group(0) @binding(0) var<storage, read> a : SB;
@group(0) @binding(1) var<storage, read> b : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x = &a;
var a : u32 = arrayLength(&a.arr);
@@ -437,8 +437,8 @@ fn main() {
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, result : ptr<function, u32>)
@@ -451,7 +451,7 @@ struct SB {
@group(0) @binding(1) var<storage, read> b : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(a, &(tint_symbol_1));
@@ -466,14 +466,14 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CalculateArrayLengthTest, OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var len1 : u32 = arrayLength(&(sb1.arr1));
var len2 : u32 = arrayLength(&(sb2.arr2));
@@ -498,7 +498,7 @@ struct SB2 {
@group(0) @binding(2) var<storage, read> sb3 : array<i32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_buffer_size)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB1, result : ptr<function, u32>)
@@ -508,7 +508,7 @@ fn tint_symbol_3(@internal(disable_validation__ignore_constructible_function_par
@internal(intrinsic_buffer_size)
fn tint_symbol_6(@internal(disable_validation__ignore_constructible_function_parameter) buffer : array<i32>, result : ptr<function, u32>)
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var tint_symbol_1 : u32 = 0u;
tint_symbol(sb1, &(tint_symbol_1));
@@ -542,9 +542,9 @@ struct SB2 {
@group(0) @binding(2) var<storage, read> sb3 : array<i32>;
)";
- auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
+ auto got = Run<Unshadow, SimplifyPointers, CalculateArrayLength>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.cc b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.cc
index 9d72887cddb..44d577bb521 100644
--- a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.cc
+++ b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.cc
@@ -25,6 +25,8 @@
#include "src/tint/sem/function.h"
#include "src/tint/transform/unshadow.h"
+using namespace tint::number_suffixes; // NOLINT
+
TINT_INSTANTIATE_TYPEINFO(tint::transform::CanonicalizeEntryPointIO);
TINT_INSTANTIATE_TYPEINFO(tint::transform::CanonicalizeEntryPointIO::Config);
@@ -38,730 +40,702 @@ namespace {
// Comparison function used to reorder struct members such that all members with
// location attributes appear first (ordered by location slot), followed by
// those with builtin attributes.
-bool StructMemberComparator(const ast::StructMember* a,
- const ast::StructMember* b) {
- auto* a_loc = ast::GetAttribute<ast::LocationAttribute>(a->attributes);
- auto* b_loc = ast::GetAttribute<ast::LocationAttribute>(b->attributes);
- auto* a_blt = ast::GetAttribute<ast::BuiltinAttribute>(a->attributes);
- auto* b_blt = ast::GetAttribute<ast::BuiltinAttribute>(b->attributes);
- if (a_loc) {
- if (!b_loc) {
- // `a` has location attribute and `b` does not: `a` goes first.
- return true;
- }
- // Both have location attributes: smallest goes first.
- return a_loc->value < b_loc->value;
- } else {
- if (b_loc) {
- // `b` has location attribute and `a` does not: `b` goes first.
- return false;
+bool StructMemberComparator(const ast::StructMember* a, const ast::StructMember* b) {
+ auto* a_loc = ast::GetAttribute<ast::LocationAttribute>(a->attributes);
+ auto* b_loc = ast::GetAttribute<ast::LocationAttribute>(b->attributes);
+ auto* a_blt = ast::GetAttribute<ast::BuiltinAttribute>(a->attributes);
+ auto* b_blt = ast::GetAttribute<ast::BuiltinAttribute>(b->attributes);
+ if (a_loc) {
+ if (!b_loc) {
+ // `a` has location attribute and `b` does not: `a` goes first.
+ return true;
+ }
+ // Both have location attributes: smallest goes first.
+ return a_loc->value < b_loc->value;
+ } else {
+ if (b_loc) {
+ // `b` has location attribute and `a` does not: `b` goes first.
+ return false;
+ }
+ // Both are builtins: order doesn't matter, just use enum value.
+ return a_blt->builtin < b_blt->builtin;
}
- // Both are builtins: order doesn't matter, just use enum value.
- return a_blt->builtin < b_blt->builtin;
- }
}
// Returns true if `attr` is a shader IO attribute.
bool IsShaderIOAttribute(const ast::Attribute* attr) {
- return attr->IsAnyOf<ast::BuiltinAttribute, ast::InterpolateAttribute,
- ast::InvariantAttribute, ast::LocationAttribute>();
+ return attr->IsAnyOf<ast::BuiltinAttribute, ast::InterpolateAttribute, ast::InvariantAttribute,
+ ast::LocationAttribute>();
}
// Returns true if `attrs` contains a `sample_mask` builtin.
bool HasSampleMask(const ast::AttributeList& attrs) {
- auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(attrs);
- return builtin && builtin->builtin == ast::Builtin::kSampleMask;
+ auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(attrs);
+ return builtin && builtin->builtin == ast::Builtin::kSampleMask;
}
} // namespace
/// State holds the current transform state for a single entry point.
struct CanonicalizeEntryPointIO::State {
- /// OutputValue represents a shader result that the wrapper function produces.
- struct OutputValue {
- /// The name of the output value.
- std::string name;
- /// The type of the output value.
- const ast::Type* type;
- /// The shader IO attributes.
- ast::AttributeList attributes;
- /// The value itself.
- const ast::Expression* value;
- };
-
- /// The clone context.
- CloneContext& ctx;
- /// The transform config.
- CanonicalizeEntryPointIO::Config const cfg;
- /// The entry point function (AST).
- const ast::Function* func_ast;
- /// The entry point function (SEM).
- const sem::Function* func_sem;
-
- /// The new entry point wrapper function's parameters.
- ast::VariableList wrapper_ep_parameters;
- /// The members of the wrapper function's struct parameter.
- ast::StructMemberList wrapper_struct_param_members;
- /// The name of the wrapper function's struct parameter.
- Symbol wrapper_struct_param_name;
- /// The parameters that will be passed to the original function.
- ast::ExpressionList inner_call_parameters;
- /// The members of the wrapper function's struct return type.
- ast::StructMemberList wrapper_struct_output_members;
- /// The wrapper function output values.
- std::vector<OutputValue> wrapper_output_values;
- /// The body of the wrapper function.
- ast::StatementList wrapper_body;
- /// Input names used by the entrypoint
- std::unordered_set<std::string> input_names;
-
- /// Constructor
- /// @param context the clone context
- /// @param config the transform config
- /// @param function the entry point function
- State(CloneContext& context,
- const CanonicalizeEntryPointIO::Config& config,
- const ast::Function* function)
- : ctx(context),
- cfg(config),
- func_ast(function),
- func_sem(ctx.src->Sem().Get(function)) {}
-
- /// Clones the shader IO attributes from `src`.
- /// @param src the attributes to clone
- /// @param do_interpolate whether to clone InterpolateAttribute
- /// @return the cloned attributes
- ast::AttributeList CloneShaderIOAttributes(const ast::AttributeList& src,
- bool do_interpolate) {
- ast::AttributeList new_attributes;
- for (auto* attr : src) {
- if (IsShaderIOAttribute(attr) &&
- (do_interpolate || !attr->Is<ast::InterpolateAttribute>())) {
- new_attributes.push_back(ctx.Clone(attr));
- }
- }
- return new_attributes;
- }
-
- /// Create or return a symbol for the wrapper function's struct parameter.
- /// @returns the symbol for the struct parameter
- Symbol InputStructSymbol() {
- if (!wrapper_struct_param_name.IsValid()) {
- wrapper_struct_param_name = ctx.dst->Sym();
- }
- return wrapper_struct_param_name;
- }
-
- /// Add a shader input to the entry point.
- /// @param name the name of the shader input
- /// @param type the type of the shader input
- /// @param attributes the attributes to apply to the shader input
- /// @returns an expression which evaluates to the value of the shader input
- const ast::Expression* AddInput(std::string name,
- const sem::Type* type,
- ast::AttributeList attributes) {
- auto* ast_type = CreateASTTypeFor(ctx, type);
- if (cfg.shader_style == ShaderStyle::kSpirv ||
- cfg.shader_style == ShaderStyle::kGlsl) {
- // Vulkan requires that integer user-defined fragment inputs are
- // always decorated with `Flat`.
- // TODO(crbug.com/tint/1224): Remove this once a flat interpolation
- // attribute is required for integers.
- if (type->is_integer_scalar_or_vector() &&
- ast::HasAttribute<ast::LocationAttribute>(attributes) &&
- !ast::HasAttribute<ast::InterpolateAttribute>(attributes) &&
- func_ast->PipelineStage() == ast::PipelineStage::kFragment) {
- attributes.push_back(ctx.dst->Interpolate(
- ast::InterpolationType::kFlat, ast::InterpolationSampling::kNone));
- }
-
- // Disable validation for use of the `input` storage class.
- attributes.push_back(
- ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
-
- // In GLSL, if it's a builtin, override the name with the
- // corresponding gl_ builtin name
- auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(attributes);
- if (cfg.shader_style == ShaderStyle::kGlsl && builtin) {
- name = GLSLBuiltinToString(builtin->builtin, func_ast->PipelineStage(),
- ast::StorageClass::kInput);
- }
- auto symbol = ctx.dst->Symbols().New(name);
-
- // Create the global variable and use its value for the shader input.
- const ast::Expression* value = ctx.dst->Expr(symbol);
-
- if (builtin) {
- if (cfg.shader_style == ShaderStyle::kGlsl) {
- value = FromGLSLBuiltin(builtin->builtin, value, ast_type);
- } else if (builtin->builtin == ast::Builtin::kSampleMask) {
- // Vulkan requires the type of a SampleMask builtin to be an array.
- // Declare it as array<u32, 1> and then load the first element.
- ast_type = ctx.dst->ty.array(ast_type, 1);
- value = ctx.dst->IndexAccessor(value, 0);
+ /// OutputValue represents a shader result that the wrapper function produces.
+ struct OutputValue {
+ /// The name of the output value.
+ std::string name;
+ /// The type of the output value.
+ const ast::Type* type;
+ /// The shader IO attributes.
+ ast::AttributeList attributes;
+ /// The value itself.
+ const ast::Expression* value;
+ };
+
+ /// The clone context.
+ CloneContext& ctx;
+ /// The transform config.
+ CanonicalizeEntryPointIO::Config const cfg;
+ /// The entry point function (AST).
+ const ast::Function* func_ast;
+ /// The entry point function (SEM).
+ const sem::Function* func_sem;
+
+ /// The new entry point wrapper function's parameters.
+ ast::VariableList wrapper_ep_parameters;
+ /// The members of the wrapper function's struct parameter.
+ ast::StructMemberList wrapper_struct_param_members;
+ /// The name of the wrapper function's struct parameter.
+ Symbol wrapper_struct_param_name;
+ /// The parameters that will be passed to the original function.
+ ast::ExpressionList inner_call_parameters;
+ /// The members of the wrapper function's struct return type.
+ ast::StructMemberList wrapper_struct_output_members;
+ /// The wrapper function output values.
+ std::vector<OutputValue> wrapper_output_values;
+ /// The body of the wrapper function.
+ ast::StatementList wrapper_body;
+ /// Input names used by the entrypoint
+ std::unordered_set<std::string> input_names;
+
+ /// Constructor
+ /// @param context the clone context
+ /// @param config the transform config
+ /// @param function the entry point function
+ State(CloneContext& context,
+ const CanonicalizeEntryPointIO::Config& config,
+ const ast::Function* function)
+ : ctx(context), cfg(config), func_ast(function), func_sem(ctx.src->Sem().Get(function)) {}
+
+ /// Clones the shader IO attributes from `src`.
+ /// @param src the attributes to clone
+ /// @param do_interpolate whether to clone InterpolateAttribute
+ /// @return the cloned attributes
+ ast::AttributeList CloneShaderIOAttributes(const ast::AttributeList& src, bool do_interpolate) {
+ ast::AttributeList new_attributes;
+ for (auto* attr : src) {
+ if (IsShaderIOAttribute(attr) &&
+ (do_interpolate || !attr->Is<ast::InterpolateAttribute>())) {
+ new_attributes.push_back(ctx.Clone(attr));
+ }
}
- }
- ctx.dst->Global(symbol, ast_type, ast::StorageClass::kInput,
- std::move(attributes));
- return value;
- } else if (cfg.shader_style == ShaderStyle::kMsl &&
- ast::HasAttribute<ast::BuiltinAttribute>(attributes)) {
- // If this input is a builtin and we are targeting MSL, then add it to the
- // parameter list and pass it directly to the inner function.
- Symbol symbol = input_names.emplace(name).second
- ? ctx.dst->Symbols().Register(name)
- : ctx.dst->Symbols().New(name);
- wrapper_ep_parameters.push_back(
- ctx.dst->Param(symbol, ast_type, std::move(attributes)));
- return ctx.dst->Expr(symbol);
- } else {
- // Otherwise, move it to the new structure member list.
- Symbol symbol = input_names.emplace(name).second
- ? ctx.dst->Symbols().Register(name)
- : ctx.dst->Symbols().New(name);
- wrapper_struct_param_members.push_back(
- ctx.dst->Member(symbol, ast_type, std::move(attributes)));
- return ctx.dst->MemberAccessor(InputStructSymbol(), symbol);
- }
- }
-
- /// Add a shader output to the entry point.
- /// @param name the name of the shader output
- /// @param type the type of the shader output
- /// @param attributes the attributes to apply to the shader output
- /// @param value the value of the shader output
- void AddOutput(std::string name,
- const sem::Type* type,
- ast::AttributeList attributes,
- const ast::Expression* value) {
- // Vulkan requires that integer user-defined vertex outputs are
- // always decorated with `Flat`.
- // TODO(crbug.com/tint/1224): Remove this once a flat interpolation
- // attribute is required for integers.
- if (cfg.shader_style == ShaderStyle::kSpirv &&
- type->is_integer_scalar_or_vector() &&
- ast::HasAttribute<ast::LocationAttribute>(attributes) &&
- !ast::HasAttribute<ast::InterpolateAttribute>(attributes) &&
- func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
- attributes.push_back(ctx.dst->Interpolate(
- ast::InterpolationType::kFlat, ast::InterpolationSampling::kNone));
+ return new_attributes;
}
- // In GLSL, if it's a builtin, override the name with the
- // corresponding gl_ builtin name
- if (cfg.shader_style == ShaderStyle::kGlsl) {
- if (auto* b = ast::GetAttribute<ast::BuiltinAttribute>(attributes)) {
- name = GLSLBuiltinToString(b->builtin, func_ast->PipelineStage(),
- ast::StorageClass::kOutput);
- value = ToGLSLBuiltin(b->builtin, value, type);
- }
+ /// Create or return a symbol for the wrapper function's struct parameter.
+ /// @returns the symbol for the struct parameter
+ Symbol InputStructSymbol() {
+ if (!wrapper_struct_param_name.IsValid()) {
+ wrapper_struct_param_name = ctx.dst->Sym();
+ }
+ return wrapper_struct_param_name;
}
- OutputValue output;
- output.name = name;
- output.type = CreateASTTypeFor(ctx, type);
- output.attributes = std::move(attributes);
- output.value = value;
- wrapper_output_values.push_back(output);
- }
-
- /// Process a non-struct parameter.
- /// This creates a new object for the shader input, moving the shader IO
- /// attributes to it. It also adds an expression to the list of parameters
- /// that will be passed to the original function.
- /// @param param the original function parameter
- void ProcessNonStructParameter(const sem::Parameter* param) {
- // Remove the shader IO attributes from the inner function parameter, and
- // attach them to the new object instead.
- ast::AttributeList attributes;
- for (auto* attr : param->Declaration()->attributes) {
- if (IsShaderIOAttribute(attr)) {
- ctx.Remove(param->Declaration()->attributes, attr);
- attributes.push_back(ctx.Clone(attr));
- }
+ /// Add a shader input to the entry point.
+ /// @param name the name of the shader input
+ /// @param type the type of the shader input
+ /// @param attributes the attributes to apply to the shader input
+ /// @returns an expression which evaluates to the value of the shader input
+ const ast::Expression* AddInput(std::string name,
+ const sem::Type* type,
+ ast::AttributeList attributes) {
+ auto* ast_type = CreateASTTypeFor(ctx, type);
+ if (cfg.shader_style == ShaderStyle::kSpirv || cfg.shader_style == ShaderStyle::kGlsl) {
+ // Vulkan requires that integer user-defined fragment inputs are
+ // always decorated with `Flat`.
+ // TODO(crbug.com/tint/1224): Remove this once a flat interpolation
+ // attribute is required for integers.
+ if (type->is_integer_scalar_or_vector() &&
+ ast::HasAttribute<ast::LocationAttribute>(attributes) &&
+ !ast::HasAttribute<ast::InterpolateAttribute>(attributes) &&
+ func_ast->PipelineStage() == ast::PipelineStage::kFragment) {
+ attributes.push_back(ctx.dst->Interpolate(ast::InterpolationType::kFlat,
+ ast::InterpolationSampling::kNone));
+ }
+
+ // Disable validation for use of the `input` storage class.
+ attributes.push_back(ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
+
+ // In GLSL, if it's a builtin, override the name with the
+ // corresponding gl_ builtin name
+ auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(attributes);
+ if (cfg.shader_style == ShaderStyle::kGlsl && builtin) {
+ name = GLSLBuiltinToString(builtin->builtin, func_ast->PipelineStage(),
+ ast::StorageClass::kInput);
+ }
+ auto symbol = ctx.dst->Symbols().New(name);
+
+ // Create the global variable and use its value for the shader input.
+ const ast::Expression* value = ctx.dst->Expr(symbol);
+
+ if (builtin) {
+ if (cfg.shader_style == ShaderStyle::kGlsl) {
+ value = FromGLSLBuiltin(builtin->builtin, value, ast_type);
+ } else if (builtin->builtin == ast::Builtin::kSampleMask) {
+ // Vulkan requires the type of a SampleMask builtin to be an array.
+ // Declare it as array<u32, 1> and then load the first element.
+ ast_type = ctx.dst->ty.array(ast_type, 1_u);
+ value = ctx.dst->IndexAccessor(value, 0_i);
+ }
+ }
+ ctx.dst->Global(symbol, ast_type, ast::StorageClass::kInput, std::move(attributes));
+ return value;
+ } else if (cfg.shader_style == ShaderStyle::kMsl &&
+ ast::HasAttribute<ast::BuiltinAttribute>(attributes)) {
+ // If this input is a builtin and we are targeting MSL, then add it to the
+ // parameter list and pass it directly to the inner function.
+ Symbol symbol = input_names.emplace(name).second ? ctx.dst->Symbols().Register(name)
+ : ctx.dst->Symbols().New(name);
+ wrapper_ep_parameters.push_back(
+ ctx.dst->Param(symbol, ast_type, std::move(attributes)));
+ return ctx.dst->Expr(symbol);
+ } else {
+ // Otherwise, move it to the new structure member list.
+ Symbol symbol = input_names.emplace(name).second ? ctx.dst->Symbols().Register(name)
+ : ctx.dst->Symbols().New(name);
+ wrapper_struct_param_members.push_back(
+ ctx.dst->Member(symbol, ast_type, std::move(attributes)));
+ return ctx.dst->MemberAccessor(InputStructSymbol(), symbol);
+ }
}
- auto name = ctx.src->Symbols().NameFor(param->Declaration()->symbol);
- auto* input_expr = AddInput(name, param->Type(), std::move(attributes));
- inner_call_parameters.push_back(input_expr);
- }
-
- /// Process a struct parameter.
- /// This creates new objects for each struct member, moving the shader IO
- /// attributes to them. It also creates the structure that will be passed to
- /// the original function.
- /// @param param the original function parameter
- void ProcessStructParameter(const sem::Parameter* param) {
- auto* str = param->Type()->As<sem::Struct>();
-
- // Recreate struct members in the outer entry point and build an initializer
- // list to pass them through to the inner function.
- ast::ExpressionList inner_struct_values;
- for (auto* member : str->Members()) {
- if (member->Type()->Is<sem::Struct>()) {
- TINT_ICE(Transform, ctx.dst->Diagnostics()) << "nested IO struct";
- continue;
- }
-
- auto* member_ast = member->Declaration();
- auto name = ctx.src->Symbols().NameFor(member_ast->symbol);
-
- // In GLSL, do not add interpolation attributes on vertex input
- bool do_interpolate = true;
- if (cfg.shader_style == ShaderStyle::kGlsl &&
- func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
- do_interpolate = false;
- }
- auto attributes =
- CloneShaderIOAttributes(member_ast->attributes, do_interpolate);
- auto* input_expr = AddInput(name, member->Type(), std::move(attributes));
- inner_struct_values.push_back(input_expr);
+ /// Add a shader output to the entry point.
+ /// @param name the name of the shader output
+ /// @param type the type of the shader output
+ /// @param attributes the attributes to apply to the shader output
+ /// @param value the value of the shader output
+ void AddOutput(std::string name,
+ const sem::Type* type,
+ ast::AttributeList attributes,
+ const ast::Expression* value) {
+ // Vulkan requires that integer user-defined vertex outputs are
+ // always decorated with `Flat`.
+ // TODO(crbug.com/tint/1224): Remove this once a flat interpolation
+ // attribute is required for integers.
+ if (cfg.shader_style == ShaderStyle::kSpirv && type->is_integer_scalar_or_vector() &&
+ ast::HasAttribute<ast::LocationAttribute>(attributes) &&
+ !ast::HasAttribute<ast::InterpolateAttribute>(attributes) &&
+ func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
+ attributes.push_back(ctx.dst->Interpolate(ast::InterpolationType::kFlat,
+ ast::InterpolationSampling::kNone));
+ }
+
+ // In GLSL, if it's a builtin, override the name with the
+ // corresponding gl_ builtin name
+ if (cfg.shader_style == ShaderStyle::kGlsl) {
+ if (auto* b = ast::GetAttribute<ast::BuiltinAttribute>(attributes)) {
+ name = GLSLBuiltinToString(b->builtin, func_ast->PipelineStage(),
+ ast::StorageClass::kOutput);
+ value = ToGLSLBuiltin(b->builtin, value, type);
+ }
+ }
+
+ OutputValue output;
+ output.name = name;
+ output.type = CreateASTTypeFor(ctx, type);
+ output.attributes = std::move(attributes);
+ output.value = value;
+ wrapper_output_values.push_back(output);
}
- // Construct the original structure using the new shader input objects.
- inner_call_parameters.push_back(ctx.dst->Construct(
- ctx.Clone(param->Declaration()->type), inner_struct_values));
- }
-
- /// Process the entry point return type.
- /// This generates a list of output values that are returned by the original
- /// function.
- /// @param inner_ret_type the original function return type
- /// @param original_result the result object produced by the original function
- void ProcessReturnType(const sem::Type* inner_ret_type,
- Symbol original_result) {
- bool do_interpolate = true;
- // In GLSL, do not add interpolation attributes on fragment output
- if (cfg.shader_style == ShaderStyle::kGlsl &&
- func_ast->PipelineStage() == ast::PipelineStage::kFragment) {
- do_interpolate = false;
+ /// Process a non-struct parameter.
+ /// This creates a new object for the shader input, moving the shader IO
+ /// attributes to it. It also adds an expression to the list of parameters
+ /// that will be passed to the original function.
+ /// @param param the original function parameter
+ void ProcessNonStructParameter(const sem::Parameter* param) {
+ // Remove the shader IO attributes from the inner function parameter, and
+ // attach them to the new object instead.
+ ast::AttributeList attributes;
+ for (auto* attr : param->Declaration()->attributes) {
+ if (IsShaderIOAttribute(attr)) {
+ ctx.Remove(param->Declaration()->attributes, attr);
+ attributes.push_back(ctx.Clone(attr));
+ }
+ }
+
+ auto name = ctx.src->Symbols().NameFor(param->Declaration()->symbol);
+ auto* input_expr = AddInput(name, param->Type(), std::move(attributes));
+ inner_call_parameters.push_back(input_expr);
}
- if (auto* str = inner_ret_type->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- if (member->Type()->Is<sem::Struct>()) {
- TINT_ICE(Transform, ctx.dst->Diagnostics()) << "nested IO struct";
- continue;
+
+ /// Process a struct parameter.
+ /// This creates new objects for each struct member, moving the shader IO
+ /// attributes to them. It also creates the structure that will be passed to
+ /// the original function.
+ /// @param param the original function parameter
+ void ProcessStructParameter(const sem::Parameter* param) {
+ auto* str = param->Type()->As<sem::Struct>();
+
+ // Recreate struct members in the outer entry point and build an initializer
+ // list to pass them through to the inner function.
+ ast::ExpressionList inner_struct_values;
+ for (auto* member : str->Members()) {
+ if (member->Type()->Is<sem::Struct>()) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics()) << "nested IO struct";
+ continue;
+ }
+
+ auto* member_ast = member->Declaration();
+ auto name = ctx.src->Symbols().NameFor(member_ast->symbol);
+
+ // In GLSL, do not add interpolation attributes on vertex input
+ bool do_interpolate = true;
+ if (cfg.shader_style == ShaderStyle::kGlsl &&
+ func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
+ do_interpolate = false;
+ }
+ auto attributes = CloneShaderIOAttributes(member_ast->attributes, do_interpolate);
+ auto* input_expr = AddInput(name, member->Type(), std::move(attributes));
+ inner_struct_values.push_back(input_expr);
}
- auto* member_ast = member->Declaration();
- auto name = ctx.src->Symbols().NameFor(member_ast->symbol);
- auto attributes =
- CloneShaderIOAttributes(member_ast->attributes, do_interpolate);
-
- // Extract the original structure member.
- AddOutput(name, member->Type(), std::move(attributes),
- ctx.dst->MemberAccessor(original_result, name));
- }
- } else if (!inner_ret_type->Is<sem::Void>()) {
- auto attributes = CloneShaderIOAttributes(
- func_ast->return_type_attributes, do_interpolate);
-
- // Propagate the non-struct return value as is.
- AddOutput("value", func_sem->ReturnType(), std::move(attributes),
- ctx.dst->Expr(original_result));
+ // Construct the original structure using the new shader input objects.
+ inner_call_parameters.push_back(
+ ctx.dst->Construct(ctx.Clone(param->Declaration()->type), inner_struct_values));
}
- }
-
- /// Add a fixed sample mask to the wrapper function output.
- /// If there is already a sample mask, bitwise-and it with the fixed mask.
- /// Otherwise, create a new output value from the fixed mask.
- void AddFixedSampleMask() {
- // Check the existing output values for a sample mask builtin.
- for (auto& outval : wrapper_output_values) {
- if (HasSampleMask(outval.attributes)) {
- // Combine the authored sample mask with the fixed mask.
- outval.value = ctx.dst->And(outval.value, cfg.fixed_sample_mask);
- return;
- }
+
+ /// Process the entry point return type.
+ /// This generates a list of output values that are returned by the original
+ /// function.
+ /// @param inner_ret_type the original function return type
+ /// @param original_result the result object produced by the original function
+ void ProcessReturnType(const sem::Type* inner_ret_type, Symbol original_result) {
+ bool do_interpolate = true;
+ // In GLSL, do not add interpolation attributes on fragment output
+ if (cfg.shader_style == ShaderStyle::kGlsl &&
+ func_ast->PipelineStage() == ast::PipelineStage::kFragment) {
+ do_interpolate = false;
+ }
+ if (auto* str = inner_ret_type->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ if (member->Type()->Is<sem::Struct>()) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics()) << "nested IO struct";
+ continue;
+ }
+
+ auto* member_ast = member->Declaration();
+ auto name = ctx.src->Symbols().NameFor(member_ast->symbol);
+ auto attributes = CloneShaderIOAttributes(member_ast->attributes, do_interpolate);
+
+ // Extract the original structure member.
+ AddOutput(name, member->Type(), std::move(attributes),
+ ctx.dst->MemberAccessor(original_result, name));
+ }
+ } else if (!inner_ret_type->Is<sem::Void>()) {
+ auto attributes =
+ CloneShaderIOAttributes(func_ast->return_type_attributes, do_interpolate);
+
+ // Propagate the non-struct return value as is.
+ AddOutput("value", func_sem->ReturnType(), std::move(attributes),
+ ctx.dst->Expr(original_result));
+ }
}
- // No existing sample mask builtin was found, so create a new output value
- // using the fixed sample mask.
- AddOutput("fixed_sample_mask", ctx.dst->create<sem::U32>(),
- {ctx.dst->Builtin(ast::Builtin::kSampleMask)},
- ctx.dst->Expr(cfg.fixed_sample_mask));
- }
-
- /// Add a point size builtin to the wrapper function output.
- void AddVertexPointSize() {
- // Create a new output value and assign it a literal 1.0 value.
- AddOutput("vertex_point_size", ctx.dst->create<sem::F32>(),
- {ctx.dst->Builtin(ast::Builtin::kPointSize)}, ctx.dst->Expr(1.f));
- }
-
- /// Create an expression for gl_Position.[component]
- /// @param component the component of gl_Position to access
- /// @returns the new expression
- const ast::Expression* GLPosition(const char* component) {
- Symbol pos = ctx.dst->Symbols().Register("gl_Position");
- Symbol c = ctx.dst->Symbols().Register(component);
- return ctx.dst->MemberAccessor(ctx.dst->Expr(pos), ctx.dst->Expr(c));
- }
-
- /// Create the wrapper function's struct parameter and type objects.
- void CreateInputStruct() {
- // Sort the struct members to satisfy HLSL interfacing matching rules.
- std::sort(wrapper_struct_param_members.begin(),
- wrapper_struct_param_members.end(), StructMemberComparator);
-
- // Create the new struct type.
- auto struct_name = ctx.dst->Sym();
- auto* in_struct = ctx.dst->create<ast::Struct>(
- struct_name, wrapper_struct_param_members, ast::AttributeList{});
- ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), func_ast, in_struct);
-
- // Create a new function parameter using this struct type.
- auto* param =
- ctx.dst->Param(InputStructSymbol(), ctx.dst->ty.type_name(struct_name));
- wrapper_ep_parameters.push_back(param);
- }
-
- /// Create and return the wrapper function's struct result object.
- /// @returns the struct type
- ast::Struct* CreateOutputStruct() {
- ast::StatementList assignments;
-
- auto wrapper_result = ctx.dst->Symbols().New("wrapper_result");
-
- // Create the struct members and their corresponding assignment statements.
- std::unordered_set<std::string> member_names;
- for (auto& outval : wrapper_output_values) {
- // Use the original output name, unless that is already taken.
- Symbol name;
- if (member_names.count(outval.name)) {
- name = ctx.dst->Symbols().New(outval.name);
- } else {
- name = ctx.dst->Symbols().Register(outval.name);
- }
- member_names.insert(ctx.dst->Symbols().NameFor(name));
-
- wrapper_struct_output_members.push_back(
- ctx.dst->Member(name, outval.type, std::move(outval.attributes)));
- assignments.push_back(ctx.dst->Assign(
- ctx.dst->MemberAccessor(wrapper_result, name), outval.value));
+ /// Add a fixed sample mask to the wrapper function output.
+ /// If there is already a sample mask, bitwise-and it with the fixed mask.
+ /// Otherwise, create a new output value from the fixed mask.
+ void AddFixedSampleMask() {
+ // Check the existing output values for a sample mask builtin.
+ for (auto& outval : wrapper_output_values) {
+ if (HasSampleMask(outval.attributes)) {
+ // Combine the authored sample mask with the fixed mask.
+ outval.value = ctx.dst->And(outval.value, u32(cfg.fixed_sample_mask));
+ return;
+ }
+ }
+
+ // No existing sample mask builtin was found, so create a new output value
+ // using the fixed sample mask.
+ AddOutput("fixed_sample_mask", ctx.dst->create<sem::U32>(),
+ {ctx.dst->Builtin(ast::Builtin::kSampleMask)},
+ ctx.dst->Expr(u32(cfg.fixed_sample_mask)));
}
- // Sort the struct members to satisfy HLSL interfacing matching rules.
- std::sort(wrapper_struct_output_members.begin(),
- wrapper_struct_output_members.end(), StructMemberComparator);
-
- // Create the new struct type.
- auto* out_struct = ctx.dst->create<ast::Struct>(
- ctx.dst->Sym(), wrapper_struct_output_members, ast::AttributeList{});
- ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), func_ast, out_struct);
-
- // Create the output struct object, assign its members, and return it.
- auto* result_object =
- ctx.dst->Var(wrapper_result, ctx.dst->ty.type_name(out_struct->name));
- wrapper_body.push_back(ctx.dst->Decl(result_object));
- wrapper_body.insert(wrapper_body.end(), assignments.begin(),
- assignments.end());
- wrapper_body.push_back(ctx.dst->Return(wrapper_result));
-
- return out_struct;
- }
-
- /// Create and assign the wrapper function's output variables.
- void CreateGlobalOutputVariables() {
- for (auto& outval : wrapper_output_values) {
- // Disable validation for use of the `output` storage class.
- ast::AttributeList attributes = std::move(outval.attributes);
- attributes.push_back(
- ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
-
- // Create the global variable and assign it the output value.
- auto name = ctx.dst->Symbols().New(outval.name);
- auto* type = outval.type;
- const ast::Expression* lhs = ctx.dst->Expr(name);
- if (HasSampleMask(attributes)) {
- // Vulkan requires the type of a SampleMask builtin to be an array.
- // Declare it as array<u32, 1> and then store to the first element.
- type = ctx.dst->ty.array(type, 1);
- lhs = ctx.dst->IndexAccessor(lhs, 0);
- }
- ctx.dst->Global(name, type, ast::StorageClass::kOutput,
- std::move(attributes));
- wrapper_body.push_back(ctx.dst->Assign(lhs, outval.value));
+ /// Add a point size builtin to the wrapper function output.
+ void AddVertexPointSize() {
+ // Create a new output value and assign it a literal 1.0 value.
+ AddOutput("vertex_point_size", ctx.dst->create<sem::F32>(),
+ {ctx.dst->Builtin(ast::Builtin::kPointSize)}, ctx.dst->Expr(1_f));
}
- }
-
- // Recreate the original function without entry point attributes and call it.
- /// @returns the inner function call expression
- const ast::CallExpression* CallInnerFunction() {
- Symbol inner_name;
- if (cfg.shader_style == ShaderStyle::kGlsl) {
- // In GLSL, clone the original entry point name, as the wrapper will be
- // called "main".
- inner_name = ctx.Clone(func_ast->symbol);
- } else {
- // Add a suffix to the function name, as the wrapper function will take
- // the original entry point name.
- auto ep_name = ctx.src->Symbols().NameFor(func_ast->symbol);
- inner_name = ctx.dst->Symbols().New(ep_name + "_inner");
+
+ /// Create an expression for gl_Position.[component]
+ /// @param component the component of gl_Position to access
+ /// @returns the new expression
+ const ast::Expression* GLPosition(const char* component) {
+ Symbol pos = ctx.dst->Symbols().Register("gl_Position");
+ Symbol c = ctx.dst->Symbols().Register(component);
+ return ctx.dst->MemberAccessor(ctx.dst->Expr(pos), ctx.dst->Expr(c));
}
- // Clone everything, dropping the function and return type attributes.
- // The parameter attributes will have already been stripped during
- // processing.
- auto* inner_function = ctx.dst->create<ast::Function>(
- inner_name, ctx.Clone(func_ast->params),
- ctx.Clone(func_ast->return_type), ctx.Clone(func_ast->body),
- ast::AttributeList{}, ast::AttributeList{});
- ctx.Replace(func_ast, inner_function);
-
- // Call the function.
- return ctx.dst->Call(inner_function->symbol, inner_call_parameters);
- }
-
- /// Process the entry point function.
- void Process() {
- bool needs_fixed_sample_mask = false;
- bool needs_vertex_point_size = false;
- if (func_ast->PipelineStage() == ast::PipelineStage::kFragment &&
- cfg.fixed_sample_mask != 0xFFFFFFFF) {
- needs_fixed_sample_mask = true;
+ /// Create the wrapper function's struct parameter and type objects.
+ void CreateInputStruct() {
+ // Sort the struct members to satisfy HLSL interfacing matching rules.
+ std::sort(wrapper_struct_param_members.begin(), wrapper_struct_param_members.end(),
+ StructMemberComparator);
+
+ // Create the new struct type.
+ auto struct_name = ctx.dst->Sym();
+ auto* in_struct = ctx.dst->create<ast::Struct>(struct_name, wrapper_struct_param_members,
+ ast::AttributeList{});
+ ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), func_ast, in_struct);
+
+ // Create a new function parameter using this struct type.
+ auto* param = ctx.dst->Param(InputStructSymbol(), ctx.dst->ty.type_name(struct_name));
+ wrapper_ep_parameters.push_back(param);
}
- if (func_ast->PipelineStage() == ast::PipelineStage::kVertex &&
- cfg.emit_vertex_point_size) {
- needs_vertex_point_size = true;
+
+ /// Create and return the wrapper function's struct result object.
+ /// @returns the struct type
+ ast::Struct* CreateOutputStruct() {
+ ast::StatementList assignments;
+
+ auto wrapper_result = ctx.dst->Symbols().New("wrapper_result");
+
+ // Create the struct members and their corresponding assignment statements.
+ std::unordered_set<std::string> member_names;
+ for (auto& outval : wrapper_output_values) {
+ // Use the original output name, unless that is already taken.
+ Symbol name;
+ if (member_names.count(outval.name)) {
+ name = ctx.dst->Symbols().New(outval.name);
+ } else {
+ name = ctx.dst->Symbols().Register(outval.name);
+ }
+ member_names.insert(ctx.dst->Symbols().NameFor(name));
+
+ wrapper_struct_output_members.push_back(
+ ctx.dst->Member(name, outval.type, std::move(outval.attributes)));
+ assignments.push_back(
+ ctx.dst->Assign(ctx.dst->MemberAccessor(wrapper_result, name), outval.value));
+ }
+
+ // Sort the struct members to satisfy HLSL interfacing matching rules.
+ std::sort(wrapper_struct_output_members.begin(), wrapper_struct_output_members.end(),
+ StructMemberComparator);
+
+ // Create the new struct type.
+ auto* out_struct = ctx.dst->create<ast::Struct>(
+ ctx.dst->Sym(), wrapper_struct_output_members, ast::AttributeList{});
+ ctx.InsertBefore(ctx.src->AST().GlobalDeclarations(), func_ast, out_struct);
+
+ // Create the output struct object, assign its members, and return it.
+ auto* result_object = ctx.dst->Var(wrapper_result, ctx.dst->ty.type_name(out_struct->name));
+ wrapper_body.push_back(ctx.dst->Decl(result_object));
+ wrapper_body.insert(wrapper_body.end(), assignments.begin(), assignments.end());
+ wrapper_body.push_back(ctx.dst->Return(wrapper_result));
+
+ return out_struct;
}
- // Exit early if there is no shader IO to handle.
- if (func_sem->Parameters().size() == 0 &&
- func_sem->ReturnType()->Is<sem::Void>() && !needs_fixed_sample_mask &&
- !needs_vertex_point_size && cfg.shader_style != ShaderStyle::kGlsl) {
- return;
+ /// Create and assign the wrapper function's output variables.
+ void CreateGlobalOutputVariables() {
+ for (auto& outval : wrapper_output_values) {
+ // Disable validation for use of the `output` storage class.
+ ast::AttributeList attributes = std::move(outval.attributes);
+ attributes.push_back(ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
+
+ // Create the global variable and assign it the output value.
+ auto name = ctx.dst->Symbols().New(outval.name);
+ auto* type = outval.type;
+ const ast::Expression* lhs = ctx.dst->Expr(name);
+ if (HasSampleMask(attributes)) {
+ // Vulkan requires the type of a SampleMask builtin to be an array.
+ // Declare it as array<u32, 1> and then store to the first element.
+ type = ctx.dst->ty.array(type, 1_u);
+ lhs = ctx.dst->IndexAccessor(lhs, 0_i);
+ }
+ ctx.dst->Global(name, type, ast::StorageClass::kOutput, std::move(attributes));
+ wrapper_body.push_back(ctx.dst->Assign(lhs, outval.value));
+ }
}
- // Process the entry point parameters, collecting those that need to be
- // aggregated into a single structure.
- if (!func_sem->Parameters().empty()) {
- for (auto* param : func_sem->Parameters()) {
- if (param->Type()->Is<sem::Struct>()) {
- ProcessStructParameter(param);
+ // Recreate the original function without entry point attributes and call it.
+ /// @returns the inner function call expression
+ const ast::CallExpression* CallInnerFunction() {
+ Symbol inner_name;
+ if (cfg.shader_style == ShaderStyle::kGlsl) {
+ // In GLSL, clone the original entry point name, as the wrapper will be
+ // called "main".
+ inner_name = ctx.Clone(func_ast->symbol);
} else {
- ProcessNonStructParameter(param);
+ // Add a suffix to the function name, as the wrapper function will take
+ // the original entry point name.
+ auto ep_name = ctx.src->Symbols().NameFor(func_ast->symbol);
+ inner_name = ctx.dst->Symbols().New(ep_name + "_inner");
}
- }
- // Create a structure parameter for the outer entry point if necessary.
- if (!wrapper_struct_param_members.empty()) {
- CreateInputStruct();
- }
+ // Clone everything, dropping the function and return type attributes.
+ // The parameter attributes will have already been stripped during
+ // processing.
+ auto* inner_function = ctx.dst->create<ast::Function>(
+ inner_name, ctx.Clone(func_ast->params), ctx.Clone(func_ast->return_type),
+ ctx.Clone(func_ast->body), ast::AttributeList{}, ast::AttributeList{});
+ ctx.Replace(func_ast, inner_function);
+
+ // Call the function.
+ return ctx.dst->Call(inner_function->symbol, inner_call_parameters);
}
- // Recreate the original function and call it.
- auto* call_inner = CallInnerFunction();
+ /// Process the entry point function.
+ void Process() {
+ bool needs_fixed_sample_mask = false;
+ bool needs_vertex_point_size = false;
+ if (func_ast->PipelineStage() == ast::PipelineStage::kFragment &&
+ cfg.fixed_sample_mask != 0xFFFFFFFF) {
+ needs_fixed_sample_mask = true;
+ }
+ if (func_ast->PipelineStage() == ast::PipelineStage::kVertex &&
+ cfg.emit_vertex_point_size) {
+ needs_vertex_point_size = true;
+ }
- // Process the return type, and start building the wrapper function body.
- std::function<const ast::Type*()> wrapper_ret_type = [&] {
- return ctx.dst->ty.void_();
- };
- if (func_sem->ReturnType()->Is<sem::Void>()) {
- // The function call is just a statement with no result.
- wrapper_body.push_back(ctx.dst->CallStmt(call_inner));
- } else {
- // Capture the result of calling the original function.
- auto* inner_result = ctx.dst->Const(
- ctx.dst->Symbols().New("inner_result"), nullptr, call_inner);
- wrapper_body.push_back(ctx.dst->Decl(inner_result));
-
- // Process the original return type to determine the outputs that the
- // outer function needs to produce.
- ProcessReturnType(func_sem->ReturnType(), inner_result->symbol);
- }
+ // Exit early if there is no shader IO to handle.
+ if (func_sem->Parameters().size() == 0 && func_sem->ReturnType()->Is<sem::Void>() &&
+ !needs_fixed_sample_mask && !needs_vertex_point_size &&
+ cfg.shader_style != ShaderStyle::kGlsl) {
+ return;
+ }
- // Add a fixed sample mask, if necessary.
- if (needs_fixed_sample_mask) {
- AddFixedSampleMask();
- }
+ // Process the entry point parameters, collecting those that need to be
+ // aggregated into a single structure.
+ if (!func_sem->Parameters().empty()) {
+ for (auto* param : func_sem->Parameters()) {
+ if (param->Type()->Is<sem::Struct>()) {
+ ProcessStructParameter(param);
+ } else {
+ ProcessNonStructParameter(param);
+ }
+ }
+
+ // Create a structure parameter for the outer entry point if necessary.
+ if (!wrapper_struct_param_members.empty()) {
+ CreateInputStruct();
+ }
+ }
- // Add the pointsize builtin, if necessary.
- if (needs_vertex_point_size) {
- AddVertexPointSize();
- }
+ // Recreate the original function and call it.
+ auto* call_inner = CallInnerFunction();
- // Produce the entry point outputs, if necessary.
- if (!wrapper_output_values.empty()) {
- if (cfg.shader_style == ShaderStyle::kSpirv ||
- cfg.shader_style == ShaderStyle::kGlsl) {
- CreateGlobalOutputVariables();
- } else {
- auto* output_struct = CreateOutputStruct();
- wrapper_ret_type = [&, output_struct] {
- return ctx.dst->ty.type_name(output_struct->name);
- };
- }
- }
+ // Process the return type, and start building the wrapper function body.
+ std::function<const ast::Type*()> wrapper_ret_type = [&] { return ctx.dst->ty.void_(); };
+ if (func_sem->ReturnType()->Is<sem::Void>()) {
+ // The function call is just a statement with no result.
+ wrapper_body.push_back(ctx.dst->CallStmt(call_inner));
+ } else {
+ // Capture the result of calling the original function.
+ auto* inner_result =
+ ctx.dst->Let(ctx.dst->Symbols().New("inner_result"), nullptr, call_inner);
+ wrapper_body.push_back(ctx.dst->Decl(inner_result));
+
+ // Process the original return type to determine the outputs that the
+ // outer function needs to produce.
+ ProcessReturnType(func_sem->ReturnType(), inner_result->symbol);
+ }
- if (cfg.shader_style == ShaderStyle::kGlsl &&
- func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
- auto* pos_y = GLPosition("y");
- auto* negate_pos_y = ctx.dst->create<ast::UnaryOpExpression>(
- ast::UnaryOp::kNegation, GLPosition("y"));
- wrapper_body.push_back(ctx.dst->Assign(pos_y, negate_pos_y));
+ // Add a fixed sample mask, if necessary.
+ if (needs_fixed_sample_mask) {
+ AddFixedSampleMask();
+ }
- auto* two_z = ctx.dst->Mul(ctx.dst->Expr(2.0f), GLPosition("z"));
- auto* fixed_z = ctx.dst->Sub(two_z, GLPosition("w"));
- wrapper_body.push_back(ctx.dst->Assign(GLPosition("z"), fixed_z));
- }
+ // Add the pointsize builtin, if necessary.
+ if (needs_vertex_point_size) {
+ AddVertexPointSize();
+ }
- // Create the wrapper entry point function.
- // For GLSL, use "main", otherwise take the name of the original
- // entry point function.
- Symbol name;
- if (cfg.shader_style == ShaderStyle::kGlsl) {
- name = ctx.dst->Symbols().New("main");
- } else {
- name = ctx.Clone(func_ast->symbol);
- }
+ // Produce the entry point outputs, if necessary.
+ if (!wrapper_output_values.empty()) {
+ if (cfg.shader_style == ShaderStyle::kSpirv || cfg.shader_style == ShaderStyle::kGlsl) {
+ CreateGlobalOutputVariables();
+ } else {
+ auto* output_struct = CreateOutputStruct();
+ wrapper_ret_type = [&, output_struct] {
+ return ctx.dst->ty.type_name(output_struct->name);
+ };
+ }
+ }
- auto* wrapper_func = ctx.dst->create<ast::Function>(
- name, wrapper_ep_parameters, wrapper_ret_type(),
- ctx.dst->Block(wrapper_body), ctx.Clone(func_ast->attributes),
- ast::AttributeList{});
- ctx.InsertAfter(ctx.src->AST().GlobalDeclarations(), func_ast,
- wrapper_func);
- }
-
- /// Retrieve the gl_ string corresponding to a builtin.
- /// @param builtin the builtin
- /// @param stage the current pipeline stage
- /// @param storage_class the storage class (input or output)
- /// @returns the gl_ string corresponding to that builtin
- const char* GLSLBuiltinToString(ast::Builtin builtin,
- ast::PipelineStage stage,
- ast::StorageClass storage_class) {
- switch (builtin) {
- case ast::Builtin::kPosition:
- switch (stage) {
- case ast::PipelineStage::kVertex:
- return "gl_Position";
- case ast::PipelineStage::kFragment:
- return "gl_FragCoord";
- default:
- return "";
+ if (cfg.shader_style == ShaderStyle::kGlsl &&
+ func_ast->PipelineStage() == ast::PipelineStage::kVertex) {
+ auto* pos_y = GLPosition("y");
+ auto* negate_pos_y =
+ ctx.dst->create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, GLPosition("y"));
+ wrapper_body.push_back(ctx.dst->Assign(pos_y, negate_pos_y));
+
+ auto* two_z = ctx.dst->Mul(ctx.dst->Expr(2_f), GLPosition("z"));
+ auto* fixed_z = ctx.dst->Sub(two_z, GLPosition("w"));
+ wrapper_body.push_back(ctx.dst->Assign(GLPosition("z"), fixed_z));
}
- case ast::Builtin::kVertexIndex:
- return "gl_VertexID";
- case ast::Builtin::kInstanceIndex:
- return "gl_InstanceID";
- case ast::Builtin::kFrontFacing:
- return "gl_FrontFacing";
- case ast::Builtin::kFragDepth:
- return "gl_FragDepth";
- case ast::Builtin::kLocalInvocationId:
- return "gl_LocalInvocationID";
- case ast::Builtin::kLocalInvocationIndex:
- return "gl_LocalInvocationIndex";
- case ast::Builtin::kGlobalInvocationId:
- return "gl_GlobalInvocationID";
- case ast::Builtin::kNumWorkgroups:
- return "gl_NumWorkGroups";
- case ast::Builtin::kWorkgroupId:
- return "gl_WorkGroupID";
- case ast::Builtin::kSampleIndex:
- return "gl_SampleID";
- case ast::Builtin::kSampleMask:
- if (storage_class == ast::StorageClass::kInput) {
- return "gl_SampleMaskIn";
+
+ // Create the wrapper entry point function.
+ // For GLSL, use "main", otherwise take the name of the original
+ // entry point function.
+ Symbol name;
+ if (cfg.shader_style == ShaderStyle::kGlsl) {
+ name = ctx.dst->Symbols().New("main");
} else {
- return "gl_SampleMask";
+ name = ctx.Clone(func_ast->symbol);
}
- default:
- return "";
+
+ auto* wrapper_func = ctx.dst->create<ast::Function>(
+ name, wrapper_ep_parameters, wrapper_ret_type(), ctx.dst->Block(wrapper_body),
+ ctx.Clone(func_ast->attributes), ast::AttributeList{});
+ ctx.InsertAfter(ctx.src->AST().GlobalDeclarations(), func_ast, wrapper_func);
}
- }
-
- /// Convert a given GLSL builtin value to the corresponding WGSL value.
- /// @param builtin the builtin variable
- /// @param value the value to convert
- /// @param ast_type (inout) the incoming WGSL and outgoing GLSL types
- /// @returns an expression representing the GLSL builtin converted to what
- /// WGSL expects
- const ast::Expression* FromGLSLBuiltin(ast::Builtin builtin,
- const ast::Expression* value,
- const ast::Type*& ast_type) {
- switch (builtin) {
- case ast::Builtin::kVertexIndex:
- case ast::Builtin::kInstanceIndex:
- case ast::Builtin::kSampleIndex:
- // GLSL uses i32 for these, so bitcast to u32.
- value = ctx.dst->Bitcast(ast_type, value);
- ast_type = ctx.dst->ty.i32();
- break;
- case ast::Builtin::kSampleMask:
- // gl_SampleMask is an array of i32. Retrieve the first element and
- // bitcast it to u32.
- value = ctx.dst->IndexAccessor(value, 0);
- value = ctx.dst->Bitcast(ast_type, value);
- ast_type = ctx.dst->ty.array(ctx.dst->ty.i32(), 1);
- break;
- default:
- break;
+
+ /// Retrieve the gl_ string corresponding to a builtin.
+ /// @param builtin the builtin
+ /// @param stage the current pipeline stage
+ /// @param storage_class the storage class (input or output)
+ /// @returns the gl_ string corresponding to that builtin
+ const char* GLSLBuiltinToString(ast::Builtin builtin,
+ ast::PipelineStage stage,
+ ast::StorageClass storage_class) {
+ switch (builtin) {
+ case ast::Builtin::kPosition:
+ switch (stage) {
+ case ast::PipelineStage::kVertex:
+ return "gl_Position";
+ case ast::PipelineStage::kFragment:
+ return "gl_FragCoord";
+ default:
+ return "";
+ }
+ case ast::Builtin::kVertexIndex:
+ return "gl_VertexID";
+ case ast::Builtin::kInstanceIndex:
+ return "gl_InstanceID";
+ case ast::Builtin::kFrontFacing:
+ return "gl_FrontFacing";
+ case ast::Builtin::kFragDepth:
+ return "gl_FragDepth";
+ case ast::Builtin::kLocalInvocationId:
+ return "gl_LocalInvocationID";
+ case ast::Builtin::kLocalInvocationIndex:
+ return "gl_LocalInvocationIndex";
+ case ast::Builtin::kGlobalInvocationId:
+ return "gl_GlobalInvocationID";
+ case ast::Builtin::kNumWorkgroups:
+ return "gl_NumWorkGroups";
+ case ast::Builtin::kWorkgroupId:
+ return "gl_WorkGroupID";
+ case ast::Builtin::kSampleIndex:
+ return "gl_SampleID";
+ case ast::Builtin::kSampleMask:
+ if (storage_class == ast::StorageClass::kInput) {
+ return "gl_SampleMaskIn";
+ } else {
+ return "gl_SampleMask";
+ }
+ default:
+ return "";
+ }
}
- return value;
- }
-
- /// Convert a given WGSL value to the type expected when assigning to a
- /// GLSL builtin.
- /// @param builtin the builtin variable
- /// @param value the value to convert
- /// @param type (out) the type to which the value was converted
- /// @returns the converted value which can be assigned to the GLSL builtin
- const ast::Expression* ToGLSLBuiltin(ast::Builtin builtin,
- const ast::Expression* value,
- const sem::Type*& type) {
- switch (builtin) {
- case ast::Builtin::kVertexIndex:
- case ast::Builtin::kInstanceIndex:
- case ast::Builtin::kSampleIndex:
- case ast::Builtin::kSampleMask:
- type = ctx.dst->create<sem::I32>();
- value = ctx.dst->Bitcast(CreateASTTypeFor(ctx, type), value);
- break;
- default:
- break;
+
+ /// Convert a given GLSL builtin value to the corresponding WGSL value.
+ /// @param builtin the builtin variable
+ /// @param value the value to convert
+ /// @param ast_type (inout) the incoming WGSL and outgoing GLSL types
+ /// @returns an expression representing the GLSL builtin converted to what
+ /// WGSL expects
+ const ast::Expression* FromGLSLBuiltin(ast::Builtin builtin,
+ const ast::Expression* value,
+ const ast::Type*& ast_type) {
+ switch (builtin) {
+ case ast::Builtin::kVertexIndex:
+ case ast::Builtin::kInstanceIndex:
+ case ast::Builtin::kSampleIndex:
+ // GLSL uses i32 for these, so bitcast to u32.
+ value = ctx.dst->Bitcast(ast_type, value);
+ ast_type = ctx.dst->ty.i32();
+ break;
+ case ast::Builtin::kSampleMask:
+ // gl_SampleMask is an array of i32. Retrieve the first element and
+ // bitcast it to u32.
+ value = ctx.dst->IndexAccessor(value, 0_i);
+ value = ctx.dst->Bitcast(ast_type, value);
+ ast_type = ctx.dst->ty.array(ctx.dst->ty.i32(), 1_u);
+ break;
+ default:
+ break;
+ }
+ return value;
}
- return value;
- }
-};
-void CanonicalizeEntryPointIO::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* cfg = inputs.Get<Config>();
- if (cfg == nullptr) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
- return;
- }
-
- // Remove entry point IO attributes from struct declarations.
- // New structures will be created for each entry point, as necessary.
- for (auto* ty : ctx.src->AST().TypeDecls()) {
- if (auto* struct_ty = ty->As<ast::Struct>()) {
- for (auto* member : struct_ty->members) {
- for (auto* attr : member->attributes) {
- if (IsShaderIOAttribute(attr)) {
- ctx.Remove(member->attributes, attr);
- }
+ /// Convert a given WGSL value to the type expected when assigning to a
+ /// GLSL builtin.
+ /// @param builtin the builtin variable
+ /// @param value the value to convert
+ /// @param type (out) the type to which the value was converted
+ /// @returns the converted value which can be assigned to the GLSL builtin
+ const ast::Expression* ToGLSLBuiltin(ast::Builtin builtin,
+ const ast::Expression* value,
+ const sem::Type*& type) {
+ switch (builtin) {
+ case ast::Builtin::kVertexIndex:
+ case ast::Builtin::kInstanceIndex:
+ case ast::Builtin::kSampleIndex:
+ case ast::Builtin::kSampleMask:
+ type = ctx.dst->create<sem::I32>();
+ value = ctx.dst->Bitcast(CreateASTTypeFor(ctx, type), value);
+ break;
+ default:
+ break;
}
- }
+ return value;
}
- }
+};
- for (auto* func_ast : ctx.src->AST().Functions()) {
- if (!func_ast->IsEntryPoint()) {
- continue;
+void CanonicalizeEntryPointIO::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* cfg = inputs.Get<Config>();
+ if (cfg == nullptr) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+ return;
}
- State state(ctx, *cfg, func_ast);
- state.Process();
- }
+ // Remove entry point IO attributes from struct declarations.
+ // New structures will be created for each entry point, as necessary.
+ for (auto* ty : ctx.src->AST().TypeDecls()) {
+ if (auto* struct_ty = ty->As<ast::Struct>()) {
+ for (auto* member : struct_ty->members) {
+ for (auto* attr : member->attributes) {
+ if (IsShaderIOAttribute(attr)) {
+ ctx.Remove(member->attributes, attr);
+ }
+ }
+ }
+ }
+ }
+
+ for (auto* func_ast : ctx.src->AST().Functions()) {
+ if (!func_ast->IsEntryPoint()) {
+ continue;
+ }
+
+ State state(ctx, *cfg, func_ast);
+ state.Process();
+ }
- ctx.Clone();
+ ctx.Clone();
}
CanonicalizeEntryPointIO::Config::Config(ShaderStyle style,
diff --git a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.h b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.h
index eab4128819d..95f8b197dff 100644
--- a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.h
+++ b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io.h
@@ -34,7 +34,7 @@ namespace tint::transform {
/// @location(2) loc2 : vec4<u32>;
/// };
///
-/// @stage(fragment)
+/// @fragment
/// fn frag_main(@builtin(position) coord : vec4<f32>,
/// locations : Locations) -> @location(0) f32 {
/// if (coord.w > 1.0) {
@@ -71,7 +71,7 @@ namespace tint::transform {
/// return col;
/// }
///
-/// @stage(fragment)
+/// @fragment
/// fn frag_main(in : frag_main_in) -> frag_main_out {
/// let inner_retval = frag_main_inner(in.coord, Locations(in.loc1, in.loc2));
/// var wrapper_result : frag_main_out;
@@ -82,64 +82,61 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * Unshadow
-class CanonicalizeEntryPointIO final
- : public Castable<CanonicalizeEntryPointIO, Transform> {
- public:
- /// ShaderStyle is an enumerator of different ways to emit shader IO.
- enum class ShaderStyle {
- /// Target SPIR-V (using global variables).
- kSpirv,
- /// Target GLSL (using global variables).
- kGlsl,
- /// Target MSL (using non-struct function parameters for builtins).
- kMsl,
- /// Target HLSL (using structures for all IO).
- kHlsl,
- };
+class CanonicalizeEntryPointIO final : public Castable<CanonicalizeEntryPointIO, Transform> {
+ public:
+ /// ShaderStyle is an enumerator of different ways to emit shader IO.
+ enum class ShaderStyle {
+ /// Target SPIR-V (using global variables).
+ kSpirv,
+ /// Target GLSL (using global variables).
+ kGlsl,
+ /// Target MSL (using non-struct function parameters for builtins).
+ kMsl,
+ /// Target HLSL (using structures for all IO).
+ kHlsl,
+ };
- /// Configuration options for the transform.
- struct Config final : public Castable<Config, Data> {
- /// Constructor
- /// @param style the approach to use for emitting shader IO.
- /// @param sample_mask an optional sample mask to combine with shader masks
- /// @param emit_vertex_point_size `true` to generate a pointsize builtin
- explicit Config(ShaderStyle style,
- uint32_t sample_mask = 0xFFFFFFFF,
- bool emit_vertex_point_size = false);
+ /// Configuration options for the transform.
+ struct Config final : public Castable<Config, Data> {
+ /// Constructor
+ /// @param style the approach to use for emitting shader IO.
+ /// @param sample_mask an optional sample mask to combine with shader masks
+ /// @param emit_vertex_point_size `true` to generate a pointsize builtin
+ explicit Config(ShaderStyle style,
+ uint32_t sample_mask = 0xFFFFFFFF,
+ bool emit_vertex_point_size = false);
- /// Copy constructor
- Config(const Config&);
+ /// Copy constructor
+ Config(const Config&);
- /// Destructor
- ~Config() override;
+ /// Destructor
+ ~Config() override;
- /// The approach to use for emitting shader IO.
- const ShaderStyle shader_style;
+ /// The approach to use for emitting shader IO.
+ const ShaderStyle shader_style;
- /// A fixed sample mask to combine into masks produced by fragment shaders.
- const uint32_t fixed_sample_mask;
+ /// A fixed sample mask to combine into masks produced by fragment shaders.
+ const uint32_t fixed_sample_mask;
- /// Set to `true` to generate a pointsize builtin and have it set to 1.0
- /// from all vertex shaders in the module.
- const bool emit_vertex_point_size;
- };
+ /// Set to `true` to generate a pointsize builtin and have it set to 1.0
+ /// from all vertex shaders in the module.
+ const bool emit_vertex_point_size;
+ };
- /// Constructor
- CanonicalizeEntryPointIO();
- ~CanonicalizeEntryPointIO() override;
+ /// Constructor
+ CanonicalizeEntryPointIO();
+ ~CanonicalizeEntryPointIO() override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
- struct State;
+ struct State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io_test.cc b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io_test.cc
index bf4d6999109..f17c5f5e880 100644
--- a/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/canonicalize_entry_point_io_test.cc
@@ -23,43 +23,42 @@ namespace {
using CanonicalizeEntryPointIOTest = TransformTest;
TEST_F(CanonicalizeEntryPointIOTest, Error_MissingTransformData) {
- auto* src = "";
+ auto* src = "";
- auto* expect =
- "error: missing transform data for "
- "tint::transform::CanonicalizeEntryPointIO";
+ auto* expect =
+ "error: missing transform data for "
+ "tint::transform::CanonicalizeEntryPointIO";
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, NoShaderIO) {
- // Test that we do not introduce wrapper functions when there is no shader IO
- // to process.
- auto* src = R"(
-@stage(fragment)
+ // Test that we do not introduce wrapper functions when there is no shader IO
+ // to process.
+ auto* src = R"(
+@fragment
fn frag_main() {
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Parameters_Spirv) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(1) loc1 : f32,
@location(2) @interpolate(flat) loc2 : vec4<u32>,
@builtin(position) coord : vec4<f32>) {
@@ -67,7 +66,7 @@ fn frag_main(@location(1) loc1 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(1) @internal(disable_validation__ignore_storage_class) var<in> loc1_1 : f32;
@location(2) @interpolate(flat) @internal(disable_validation__ignore_storage_class) var<in> loc2_1 : vec4<u32>;
@@ -78,23 +77,22 @@ fn frag_main_inner(loc1 : f32, loc2 : vec4<u32>, coord : vec4<f32>) {
var col : f32 = (coord.x * loc1);
}
-@stage(fragment)
+@fragment
fn frag_main() {
frag_main_inner(loc1_1, loc2_1, coord_1);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Parameters_Msl) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(1) loc1 : f32,
@location(2) @interpolate(flat) loc2 : vec4<u32>,
@builtin(position) coord : vec4<f32>) {
@@ -102,7 +100,7 @@ fn frag_main(@location(1) loc1 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(1)
loc1 : f32,
@@ -114,23 +112,22 @@ fn frag_main_inner(loc1 : f32, loc2 : vec4<u32>, coord : vec4<f32>) {
var col : f32 = (coord.x * loc1);
}
-@stage(fragment)
+@fragment
fn frag_main(@builtin(position) coord : vec4<f32>, tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc1, tint_symbol.loc2, coord);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Parameters_Hlsl) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(1) loc1 : f32,
@location(2) @interpolate(flat) loc2 : vec4<u32>,
@builtin(position) coord : vec4<f32>) {
@@ -138,7 +135,7 @@ fn frag_main(@location(1) loc1 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(1)
loc1 : f32,
@@ -152,31 +149,30 @@ fn frag_main_inner(loc1 : f32, loc2 : vec4<u32>, coord : vec4<f32>) {
var col : f32 = (coord.x * loc1);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc1, tint_symbol.loc2, tint_symbol.coord);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Parameter_TypeAlias) {
- auto* src = R"(
+ auto* src = R"(
type myf32 = f32;
-@stage(fragment)
+@fragment
fn frag_main(@location(1) loc1 : myf32) {
var x : myf32 = loc1;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type myf32 = f32;
struct tint_symbol_1 {
@@ -188,23 +184,22 @@ fn frag_main_inner(loc1 : myf32) {
var x : myf32 = loc1;
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc1);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Parameter_TypeAlias_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(1) loc1 : myf32) {
var x : myf32 = loc1;
}
@@ -212,7 +207,7 @@ fn frag_main(@location(1) loc1 : myf32) {
type myf32 = f32;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(1)
loc1 : f32,
@@ -222,7 +217,7 @@ fn frag_main_inner(loc1 : myf32) {
var x : myf32 = loc1;
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc1);
}
@@ -230,16 +225,15 @@ fn frag_main(tint_symbol : tint_symbol_1) {
type myf32 = f32;
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_Spirv) {
- auto* src = R"(
+ auto* src = R"(
struct FragBuiltins {
@builtin(position) coord : vec4<f32>,
};
@@ -248,7 +242,7 @@ struct FragLocations {
@location(2) @interpolate(flat) loc2 : vec4<u32>,
};
-@stage(fragment)
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -256,7 +250,7 @@ fn frag_main(@location(0) loc0 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> loc0_1 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> loc1_1 : f32;
@@ -278,23 +272,22 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main() {
frag_main_inner(loc0_1, FragLocations(loc1_1, loc2_1), FragBuiltins(coord_1));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_Spirv_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -310,7 +303,7 @@ struct FragLocations {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> loc0_1 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> loc1_1 : f32;
@@ -323,7 +316,7 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main() {
frag_main_inner(loc0_1, FragLocations(loc1_1, loc2_1), FragBuiltins(coord_1));
}
@@ -338,16 +331,15 @@ struct FragLocations {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_kMsl) {
- auto* src = R"(
+ auto* src = R"(
struct FragBuiltins {
@builtin(position) coord : vec4<f32>,
};
@@ -356,7 +348,7 @@ struct FragLocations {
@location(2) @interpolate(flat) loc2 : vec4<u32>,
};
-@stage(fragment)
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -364,7 +356,7 @@ fn frag_main(@location(0) loc0 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragBuiltins {
coord : vec4<f32>,
}
@@ -387,23 +379,22 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main(@builtin(position) coord : vec4<f32>, tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc0, FragLocations(tint_symbol.loc1, tint_symbol.loc2), FragBuiltins(coord));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_kMsl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -419,7 +410,7 @@ struct FragLocations {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
loc0 : f32,
@@ -433,7 +424,7 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main(@builtin(position) coord : vec4<f32>, tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc0, FragLocations(tint_symbol.loc1, tint_symbol.loc2), FragBuiltins(coord));
}
@@ -448,16 +439,15 @@ struct FragLocations {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_Hlsl) {
- auto* src = R"(
+ auto* src = R"(
struct FragBuiltins {
@builtin(position) coord : vec4<f32>,
};
@@ -466,7 +456,7 @@ struct FragLocations {
@location(2) @interpolate(flat) loc2 : vec4<u32>,
};
-@stage(fragment)
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -474,7 +464,7 @@ fn frag_main(@location(0) loc0 : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragBuiltins {
coord : vec4<f32>,
}
@@ -499,23 +489,22 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc0, FragLocations(tint_symbol.loc1, tint_symbol.loc2), FragBuiltins(tint_symbol.coord));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, StructParameters_Hlsl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(@location(0) loc0 : f32,
locations : FragLocations,
builtins : FragBuiltins) {
@@ -531,7 +520,7 @@ struct FragLocations {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
loc0 : f32,
@@ -547,7 +536,7 @@ fn frag_main_inner(loc0 : f32, locations : FragLocations, builtins : FragBuiltin
var col : f32 = ((builtins.coord.x * locations.loc1) + loc0);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(tint_symbol.loc0, FragLocations(tint_symbol.loc1, tint_symbol.loc2), FragBuiltins(tint_symbol.coord));
}
@@ -562,53 +551,51 @@ struct FragLocations {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_NonStruct_Spirv) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> @builtin(frag_depth) f32 {
return 1.0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(frag_depth) @internal(disable_validation__ignore_storage_class) var<out> value : f32;
fn frag_main_inner() -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn frag_main() {
let inner_result = frag_main_inner();
value = inner_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_NonStruct_Msl) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> @builtin(frag_depth) f32 {
return 1.0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(frag_depth)
value : f32,
@@ -618,7 +605,7 @@ fn frag_main_inner() -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -627,23 +614,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_NonStruct_Hlsl) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> @builtin(frag_depth) f32 {
return 1.0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(frag_depth)
value : f32,
@@ -653,7 +639,7 @@ fn frag_main_inner() -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -662,23 +648,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Spirv) {
- auto* src = R"(
+ auto* src = R"(
struct FragOutput {
@location(0) color : vec4<f32>,
@builtin(frag_depth) depth : f32,
@builtin(sample_mask) mask : u32,
};
-@stage(fragment)
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -688,12 +673,12 @@ fn frag_main() -> FragOutput {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<out> color_1 : vec4<f32>;
@builtin(frag_depth) @internal(disable_validation__ignore_storage_class) var<out> depth_1 : f32;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> mask_1 : array<u32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> mask_1 : array<u32, 1u>;
struct FragOutput {
color : vec4<f32>,
@@ -709,26 +694,25 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() {
let inner_result = frag_main_inner();
color_1 = inner_result.color;
depth_1 = inner_result.depth;
- mask_1[0] = inner_result.mask;
+ mask_1[0i] = inner_result.mask;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Spirv_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -744,12 +728,12 @@ struct FragOutput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<out> color_1 : vec4<f32>;
@builtin(frag_depth) @internal(disable_validation__ignore_storage_class) var<out> depth_1 : f32;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> mask_1 : array<u32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> mask_1 : array<u32, 1u>;
fn frag_main_inner() -> FragOutput {
var output : FragOutput;
@@ -759,12 +743,12 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() {
let inner_result = frag_main_inner();
color_1 = inner_result.color;
depth_1 = inner_result.depth;
- mask_1[0] = inner_result.mask;
+ mask_1[0i] = inner_result.mask;
}
struct FragOutput {
@@ -774,23 +758,22 @@ struct FragOutput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Msl) {
- auto* src = R"(
+ auto* src = R"(
struct FragOutput {
@location(0) color : vec4<f32>,
@builtin(frag_depth) depth : f32,
@builtin(sample_mask) mask : u32,
};
-@stage(fragment)
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -800,7 +783,7 @@ fn frag_main() -> FragOutput {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragOutput {
color : vec4<f32>,
depth : f32,
@@ -824,7 +807,7 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -835,17 +818,16 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Msl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -861,7 +843,7 @@ struct FragOutput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
color : vec4<f32>,
@@ -879,7 +861,7 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -896,23 +878,22 @@ struct FragOutput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Hlsl) {
- auto* src = R"(
+ auto* src = R"(
struct FragOutput {
@location(0) color : vec4<f32>,
@builtin(frag_depth) depth : f32,
@builtin(sample_mask) mask : u32,
};
-@stage(fragment)
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -922,7 +903,7 @@ fn frag_main() -> FragOutput {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragOutput {
color : vec4<f32>,
depth : f32,
@@ -946,7 +927,7 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -957,17 +938,16 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Return_Struct_Hlsl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> FragOutput {
var output : FragOutput;
output.depth = 1.0;
@@ -983,7 +963,7 @@ struct FragOutput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
color : vec4<f32>,
@@ -1001,7 +981,7 @@ fn frag_main_inner() -> FragOutput {
return output;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -1018,17 +998,15 @@ struct FragOutput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Spirv) {
- auto* src = R"(
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Spirv) {
+ auto* src = R"(
struct FragmentInput {
@location(0) value : f32,
@location(1) mul : f32,
@@ -1038,18 +1016,18 @@ fn foo(x : FragmentInput) -> f32 {
return x.value * x.mul;
}
-@stage(fragment)
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> value_1 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> mul_1 : f32;
@@ -1071,7 +1049,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1() {
frag_main1_inner(FragmentInput(value_1, mul_1));
}
@@ -1080,29 +1058,27 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2() {
frag_main2_inner(FragmentInput(value_2, mul_2));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Spirv_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Spirv_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
@@ -1117,7 +1093,7 @@ struct FragmentInput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> value_1 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> mul_1 : f32;
@@ -1130,7 +1106,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1() {
frag_main1_inner(FragmentInput(value_1, mul_1));
}
@@ -1139,7 +1115,7 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2() {
frag_main2_inner(FragmentInput(value_2, mul_2));
}
@@ -1154,17 +1130,15 @@ struct FragmentInput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Msl) {
- auto* src = R"(
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Msl) {
+ auto* src = R"(
struct FragmentInput {
@location(0) value : f32,
@location(1) mul : f32,
@@ -1174,18 +1148,18 @@ fn foo(x : FragmentInput) -> f32 {
return x.value * x.mul;
}
-@stage(fragment)
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragmentInput {
value : f32,
mul : f32,
@@ -1206,7 +1180,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.value, tint_symbol.mul));
}
@@ -1222,29 +1196,27 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(tint_symbol_2 : tint_symbol_3) {
frag_main2_inner(FragmentInput(tint_symbol_2.value, tint_symbol_2.mul));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Msl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Msl_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
@@ -1259,7 +1231,7 @@ struct FragmentInput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
value : f32,
@@ -1271,7 +1243,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.value, tint_symbol.mul));
}
@@ -1287,7 +1259,7 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(tint_symbol_2 : tint_symbol_3) {
frag_main2_inner(FragmentInput(tint_symbol_2.value, tint_symbol_2.mul));
}
@@ -1302,17 +1274,15 @@ struct FragmentInput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Hlsl) {
- auto* src = R"(
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Hlsl) {
+ auto* src = R"(
struct FragmentInput {
@location(0) value : f32,
@location(1) mul : f32,
@@ -1322,18 +1292,18 @@ fn foo(x : FragmentInput) -> f32 {
return x.value * x.mul;
}
-@stage(fragment)
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragmentInput {
value : f32,
mul : f32,
@@ -1354,7 +1324,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.value, tint_symbol.mul));
}
@@ -1370,29 +1340,27 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(tint_symbol_2 : tint_symbol_3) {
frag_main2_inner(FragmentInput(tint_symbol_2.value, tint_symbol_2.mul));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- StructParameters_SharedDeviceFunction_Hlsl_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(CanonicalizeEntryPointIOTest, StructParameters_SharedDeviceFunction_Hlsl_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn frag_main1(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
@@ -1407,7 +1375,7 @@ struct FragmentInput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
value : f32,
@@ -1419,7 +1387,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.value, tint_symbol.mul));
}
@@ -1435,7 +1403,7 @@ fn frag_main2_inner(inputs : FragmentInput) {
var x : f32 = foo(inputs);
}
-@stage(fragment)
+@fragment
fn frag_main2(tint_symbol_2 : tint_symbol_3) {
frag_main2_inner(FragmentInput(tint_symbol_2.value, tint_symbol_2.mul));
}
@@ -1450,16 +1418,15 @@ struct FragmentInput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_ModuleScopeVariable) {
- auto* src = R"(
+ auto* src = R"(
struct FragmentInput {
@location(0) col1 : f32,
@location(1) col2 : f32,
@@ -1475,7 +1442,7 @@ fn bar() -> f32 {
return global_inputs.col2 * 2.0;
}
-@stage(fragment)
+@fragment
fn frag_main1(inputs : FragmentInput) {
global_inputs = inputs;
var r : f32 = foo();
@@ -1483,7 +1450,7 @@ fn frag_main1(inputs : FragmentInput) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragmentInput {
col1 : f32,
col2 : f32,
@@ -1512,23 +1479,22 @@ fn frag_main1_inner(inputs : FragmentInput) {
var g : f32 = bar();
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.col1, tint_symbol.col2));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_ModuleScopeVariable_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main1(inputs : FragmentInput) {
global_inputs = inputs;
var r : f32 = foo();
@@ -1551,7 +1517,7 @@ struct FragmentInput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
col1 : f32,
@@ -1565,7 +1531,7 @@ fn frag_main1_inner(inputs : FragmentInput) {
var g : f32 = bar();
}
-@stage(fragment)
+@fragment
fn frag_main1(tint_symbol : tint_symbol_1) {
frag_main1_inner(FragmentInput(tint_symbol.col1, tint_symbol.col2));
}
@@ -1586,16 +1552,15 @@ struct FragmentInput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_TypeAliases) {
- auto* src = R"(
+ auto* src = R"(
type myf32 = f32;
struct FragmentInput {
@@ -1616,14 +1581,14 @@ fn foo(x : MyFragmentInput) -> myf32 {
return x.col1;
}
-@stage(fragment)
+@fragment
fn frag_main(inputs : MyFragmentInput) -> MyFragmentOutput {
var x : myf32 = foo(inputs);
return MyFragmentOutput(x, inputs.col2);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type myf32 = f32;
struct FragmentInput {
@@ -1663,7 +1628,7 @@ fn frag_main_inner(inputs : MyFragmentInput) -> MyFragmentOutput {
return MyFragmentOutput(x, inputs.col2);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = frag_main_inner(MyFragmentInput(tint_symbol.col1, tint_symbol.col2));
var wrapper_result : tint_symbol_2;
@@ -1673,17 +1638,16 @@ fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_TypeAliases_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(inputs : MyFragmentInput) -> MyFragmentOutput {
var x : myf32 = foo(inputs);
return MyFragmentOutput(x, inputs.col2);
@@ -1710,7 +1674,7 @@ struct FragmentOutput {
type myf32 = f32;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
col1 : f32,
@@ -1730,7 +1694,7 @@ fn frag_main_inner(inputs : MyFragmentInput) -> MyFragmentOutput {
return MyFragmentOutput(x, inputs.col2);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = frag_main_inner(MyFragmentInput(tint_symbol.col1, tint_symbol.col2));
var wrapper_result : tint_symbol_2;
@@ -1760,16 +1724,15 @@ struct FragmentOutput {
type myf32 = f32;
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, InterpolateAttributes) {
- auto* src = R"(
+ auto* src = R"(
struct VertexOut {
@builtin(position) pos : vec4<f32>,
@location(1) @interpolate(flat) loc1 : f32,
@@ -1782,19 +1745,19 @@ struct FragmentIn {
@location(2) @interpolate(linear, sample) loc2 : f32,
};
-@stage(vertex)
+@vertex
fn vert_main() -> VertexOut {
return VertexOut();
}
-@stage(fragment)
+@fragment
fn frag_main(inputs : FragmentIn,
@location(3) @interpolate(perspective, centroid) loc3 : f32) {
let x = inputs.loc1 + inputs.loc2 + loc3;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertexOut {
pos : vec4<f32>,
loc1 : f32,
@@ -1822,7 +1785,7 @@ fn vert_main_inner() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
@@ -1846,29 +1809,28 @@ fn frag_main_inner(inputs : FragmentIn, loc3 : f32) {
let x = ((inputs.loc1 + inputs.loc2) + loc3);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol_1 : tint_symbol_2) {
frag_main_inner(FragmentIn(tint_symbol_1.loc1, tint_symbol_1.loc2), tint_symbol_1.loc3);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, InterpolateAttributes_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(inputs : FragmentIn,
@location(3) @interpolate(perspective, centroid) loc3 : f32) {
let x = inputs.loc1 + inputs.loc2 + loc3;
}
-@stage(vertex)
+@vertex
fn vert_main() -> VertexOut {
return VertexOut();
}
@@ -1886,7 +1848,7 @@ struct FragmentIn {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(1) @interpolate(flat)
loc1 : f32,
@@ -1900,7 +1862,7 @@ fn frag_main_inner(inputs : FragmentIn, loc3 : f32) {
let x = ((inputs.loc1 + inputs.loc2) + loc3);
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) {
frag_main_inner(FragmentIn(tint_symbol.loc1, tint_symbol.loc2), tint_symbol.loc3);
}
@@ -1920,7 +1882,7 @@ fn vert_main_inner() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol_2 {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol_2;
@@ -1944,18 +1906,17 @@ struct FragmentIn {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, InterpolateAttributes_Integers_Spirv) {
- // Test that we add a Flat attribute to integers that are vertex outputs and
- // fragment inputs, but not vertex inputs or fragment outputs.
- auto* src = R"(
+ // Test that we add a Flat attribute to integers that are vertex outputs and
+ // fragment inputs, but not vertex inputs or fragment outputs.
+ auto* src = R"(
struct VertexIn {
@location(0) i : i32,
@location(1) u : u32,
@@ -1978,19 +1939,19 @@ struct FragmentInterface {
@location(3) @interpolate(flat) vu : vec4<u32>,
};
-@stage(vertex)
+@vertex
fn vert_main(in : VertexIn) -> VertexOut {
return VertexOut(in.i, in.u, in.vi, in.vu, vec4<f32>());
}
-@stage(fragment)
+@fragment
fn frag_main(inputs : FragmentInterface) -> FragmentInterface {
return inputs;
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> i_1 : i32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> u_1 : u32;
@@ -2051,7 +2012,7 @@ fn vert_main_inner(in : VertexIn) -> VertexOut {
return VertexOut(in.i, in.u, in.vi, in.vu, vec4<f32>());
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner(VertexIn(i_1, u_1, vi_1, vu_1));
i_2 = inner_result.i;
@@ -2065,7 +2026,7 @@ fn frag_main_inner(inputs : FragmentInterface) -> FragmentInterface {
return inputs;
}
-@stage(fragment)
+@fragment
fn frag_main() {
let inner_result_1 = frag_main_inner(FragmentInterface(i_3, u_3, vi_3, vu_3));
i_4 = inner_result_1.i;
@@ -2075,25 +2036,23 @@ fn frag_main() {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- InterpolateAttributes_Integers_Spirv_OutOfOrder) {
- // Test that we add a Flat attribute to integers that are vertex outputs and
- // fragment inputs, but not vertex inputs or fragment outputs.
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, InterpolateAttributes_Integers_Spirv_OutOfOrder) {
+ // Test that we add a Flat attribute to integers that are vertex outputs and
+ // fragment inputs, but not vertex inputs or fragment outputs.
+ auto* src = R"(
+@vertex
fn vert_main(in : VertexIn) -> VertexOut {
return VertexOut(in.i, in.u, in.vi, in.vu, vec4<f32>());
}
-@stage(fragment)
+@fragment
fn frag_main(inputs : FragmentInterface) -> FragmentInterface {
return inputs;
}
@@ -2121,8 +2080,8 @@ struct FragmentInterface {
};
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> i_1 : i32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> u_1 : u32;
@@ -2161,7 +2120,7 @@ fn vert_main_inner(in : VertexIn) -> VertexOut {
return VertexOut(in.i, in.u, in.vi, in.vu, vec4<f32>());
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner(VertexIn(i_1, u_1, vi_1, vu_1));
i_2 = inner_result.i;
@@ -2175,7 +2134,7 @@ fn frag_main_inner(inputs : FragmentInterface) -> FragmentInterface {
return inputs;
}
-@stage(fragment)
+@fragment
fn frag_main() {
let inner_result_1 = frag_main_inner(FragmentInterface(i_3, u_3, vi_3, vu_3));
i_4 = inner_result_1.i;
@@ -2207,32 +2166,31 @@ struct FragmentInterface {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, InvariantAttributes) {
- auto* src = R"(
+ auto* src = R"(
struct VertexOut {
@builtin(position) @invariant pos : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn main1() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn main2() -> @builtin(position) @invariant vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertexOut {
pos : vec4<f32>,
}
@@ -2246,7 +2204,7 @@ fn main1_inner() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn main1() -> tint_symbol {
let inner_result = main1_inner();
var wrapper_result : tint_symbol;
@@ -2263,7 +2221,7 @@ fn main2_inner() -> vec4<f32> {
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn main2() -> tint_symbol_1 {
let inner_result_1 = main2_inner();
var wrapper_result_1 : tint_symbol_1;
@@ -2272,22 +2230,21 @@ fn main2() -> tint_symbol_1 {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, InvariantAttributes_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main1() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn main2() -> @builtin(position) @invariant vec4<f32> {
return vec4<f32>();
}
@@ -2297,7 +2254,7 @@ struct VertexOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(position) @invariant
pos : vec4<f32>,
@@ -2307,7 +2264,7 @@ fn main1_inner() -> VertexOut {
return VertexOut();
}
-@stage(vertex)
+@vertex
fn main1() -> tint_symbol {
let inner_result = main1_inner();
var wrapper_result : tint_symbol;
@@ -2324,7 +2281,7 @@ fn main2_inner() -> vec4<f32> {
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn main2() -> tint_symbol_1 {
let inner_result_1 = main2_inner();
var wrapper_result_1 : tint_symbol_1;
@@ -2337,16 +2294,15 @@ struct VertexOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_LayoutAttributes) {
- auto* src = R"(
+ auto* src = R"(
struct FragmentInput {
@size(16) @location(1) value : f32,
@builtin(position) @align(32) coord : vec4<f32>,
@@ -2357,13 +2313,13 @@ struct FragmentOutput {
@size(16) @location(1) @interpolate(flat) value : f32,
};
-@stage(fragment)
+@fragment
fn frag_main(inputs : FragmentInput) -> FragmentOutput {
return FragmentOutput(inputs.coord.x * inputs.value + inputs.loc0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragmentInput {
@size(16)
value : f32,
@@ -2396,7 +2352,7 @@ fn frag_main_inner(inputs : FragmentInput) -> FragmentOutput {
return FragmentOutput(((inputs.coord.x * inputs.value) + inputs.loc0));
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = frag_main_inner(FragmentInput(tint_symbol.value, tint_symbol.coord, tint_symbol.loc0));
var wrapper_result : tint_symbol_2;
@@ -2405,17 +2361,16 @@ fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, Struct_LayoutAttributes_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main(inputs : FragmentInput) -> FragmentOutput {
return FragmentOutput(inputs.coord.x * inputs.value + inputs.loc0);
}
@@ -2431,7 +2386,7 @@ struct FragmentOutput {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0) @interpolate(linear, sample)
loc0 : f32,
@@ -2450,7 +2405,7 @@ fn frag_main_inner(inputs : FragmentInput) -> FragmentOutput {
return FragmentOutput(((inputs.coord.x * inputs.value) + inputs.loc0));
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = frag_main_inner(FragmentInput(tint_symbol.value, tint_symbol.coord, tint_symbol.loc0));
var wrapper_result : tint_symbol_2;
@@ -2473,16 +2428,15 @@ struct FragmentOutput {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, SortedMembers) {
- auto* src = R"(
+ auto* src = R"(
struct VertexOutput {
@location(1) @interpolate(flat) b : u32,
@builtin(position) pos : vec4<f32>,
@@ -2497,12 +2451,12 @@ struct FragmentInputExtra {
@location(0) a : f32,
};
-@stage(vertex)
+@vertex
fn vert_main() -> VertexOutput {
return VertexOutput();
}
-@stage(fragment)
+@fragment
fn frag_main(@builtin(front_facing) ff : bool,
@location(2) @interpolate(flat) c : i32,
inputs : FragmentInputExtra,
@@ -2510,7 +2464,7 @@ fn frag_main(@builtin(front_facing) ff : bool,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertexOutput {
b : u32,
pos : vec4<f32>,
@@ -2542,7 +2496,7 @@ fn vert_main_inner() -> VertexOutput {
return VertexOutput();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
@@ -2572,28 +2526,27 @@ struct tint_symbol_2 {
fn frag_main_inner(ff : bool, c : i32, inputs : FragmentInputExtra, b : u32) {
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol_1 : tint_symbol_2) {
frag_main_inner(tint_symbol_1.ff, tint_symbol_1.c, FragmentInputExtra(tint_symbol_1.d, tint_symbol_1.pos, tint_symbol_1.a), tint_symbol_1.b);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, SortedMembers_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn vert_main() -> VertexOutput {
return VertexOutput();
}
-@stage(fragment)
+@fragment
fn frag_main(@builtin(front_facing) ff : bool,
@location(2) @interpolate(flat) c : i32,
inputs : FragmentInputExtra,
@@ -2615,7 +2568,7 @@ struct FragmentInputExtra {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
a : f32,
@@ -2633,7 +2586,7 @@ fn vert_main_inner() -> VertexOutput {
return VertexOutput();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
@@ -2663,7 +2616,7 @@ struct tint_symbol_2 {
fn frag_main_inner(ff : bool, c : i32, inputs : FragmentInputExtra, b : u32) {
}
-@stage(fragment)
+@fragment
fn frag_main(tint_symbol_1 : tint_symbol_2) {
frag_main_inner(tint_symbol_1.ff, tint_symbol_1.c, FragmentInputExtra(tint_symbol_1.d, tint_symbol_1.pos, tint_symbol_1.a), tint_symbol_1.b);
}
@@ -2683,22 +2636,21 @@ struct FragmentInputExtra {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, DontRenameSymbols) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn tint_symbol_1(@location(0) col : f32) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
@location(0)
col : f32,
@@ -2707,28 +2659,27 @@ struct tint_symbol_2 {
fn tint_symbol_1_inner(col : f32) {
}
-@stage(fragment)
+@fragment
fn tint_symbol_1(tint_symbol : tint_symbol_2) {
tint_symbol_1_inner(tint_symbol.col);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_VoidNoReturn) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(sample_mask)
fixed_sample_mask : u32,
@@ -2737,7 +2688,7 @@ struct tint_symbol {
fn frag_main_inner() {
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2746,23 +2697,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_VoidWithReturn) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() {
return;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(sample_mask)
fixed_sample_mask : u32,
@@ -2772,7 +2722,7 @@ fn frag_main_inner() {
return;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2781,23 +2731,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_WithAuthoredMask) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> @builtin(sample_mask) u32 {
return 7u;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(sample_mask)
value : u32,
@@ -2807,7 +2756,7 @@ fn frag_main_inner() -> u32 {
return 7u;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2816,23 +2765,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_WithoutAuthoredMask) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main() -> @location(0) f32 {
return 1.0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
value : f32,
@@ -2844,7 +2792,7 @@ fn frag_main_inner() -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2854,29 +2802,28 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_StructWithAuthoredMask) {
- auto* src = R"(
+ auto* src = R"(
struct Output {
@builtin(frag_depth) depth : f32,
@builtin(sample_mask) mask : u32,
@location(0) value : f32,
};
-@stage(fragment)
+@fragment
fn frag_main() -> Output {
return Output(0.5, 7u, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Output {
depth : f32,
mask : u32,
@@ -2896,7 +2843,7 @@ fn frag_main_inner() -> Output {
return Output(0.5, 7u, 1.0);
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2907,18 +2854,16 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- FixedSampleMask_StructWithAuthoredMask_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_StructWithAuthoredMask_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn frag_main() -> Output {
return Output(0.5, 7u, 1.0);
}
@@ -2930,7 +2875,7 @@ struct Output {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
value : f32,
@@ -2944,7 +2889,7 @@ fn frag_main_inner() -> Output {
return Output(0.5, 7u, 1.0);
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -2961,29 +2906,27 @@ struct Output {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- FixedSampleMask_StructWithoutAuthoredMask) {
- auto* src = R"(
+TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_StructWithoutAuthoredMask) {
+ auto* src = R"(
struct Output {
@builtin(frag_depth) depth : f32,
@location(0) value : f32,
};
-@stage(fragment)
+@fragment
fn frag_main() -> Output {
return Output(0.5, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Output {
depth : f32,
value : f32,
@@ -3002,7 +2945,7 @@ fn frag_main_inner() -> Output {
return Output(0.5, 1.0);
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -3013,18 +2956,16 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- FixedSampleMask_StructWithoutAuthoredMask_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_StructWithoutAuthoredMask_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn frag_main() -> Output {
return Output(0.5, 1.0);
}
@@ -3035,7 +2976,7 @@ struct Output {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@location(0)
value : f32,
@@ -3049,7 +2990,7 @@ fn frag_main_inner() -> Output {
return Output(0.5, 1.0);
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -3065,37 +3006,36 @@ struct Output {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_MultipleShaders) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn frag_main1() -> @builtin(sample_mask) u32 {
return 7u;
}
-@stage(fragment)
+@fragment
fn frag_main2() -> @location(0) f32 {
return 1.0;
}
-@stage(vertex)
+@vertex
fn vert_main1() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(sample_mask)
value : u32,
@@ -3105,7 +3045,7 @@ fn frag_main1_inner() -> u32 {
return 7u;
}
-@stage(fragment)
+@fragment
fn frag_main1() -> tint_symbol {
let inner_result = frag_main1_inner();
var wrapper_result : tint_symbol;
@@ -3124,7 +3064,7 @@ fn frag_main2_inner() -> f32 {
return 1.0;
}
-@stage(fragment)
+@fragment
fn frag_main2() -> tint_symbol_1 {
let inner_result_1 = frag_main2_inner();
var wrapper_result_1 : tint_symbol_1;
@@ -3142,7 +3082,7 @@ fn vert_main1_inner() -> vec4<f32> {
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn vert_main1() -> tint_symbol_2 {
let inner_result_2 = vert_main1_inner();
var wrapper_result_2 : tint_symbol_2;
@@ -3150,33 +3090,32 @@ fn vert_main1() -> tint_symbol_2 {
return wrapper_result_2;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03u);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, FixedSampleMask_AvoidNameClash) {
- auto* src = R"(
+ auto* src = R"(
struct FragOut {
@location(0) fixed_sample_mask : vec4<f32>,
@location(1) fixed_sample_mask_1 : vec4<f32>,
};
-@stage(fragment)
+@fragment
fn frag_main() -> FragOut {
return FragOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct FragOut {
fixed_sample_mask : vec4<f32>,
fixed_sample_mask_1 : vec4<f32>,
@@ -3195,7 +3134,7 @@ fn frag_main_inner() -> FragOut {
return FragOut();
}
-@stage(fragment)
+@fragment
fn frag_main() -> tint_symbol {
let inner_result = frag_main_inner();
var wrapper_result : tint_symbol;
@@ -3206,24 +3145,22 @@ fn frag_main() -> tint_symbol {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0x03);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_ReturnNonStruct_Spirv) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnNonStruct_Spirv) {
+ auto* src = R"(
+@vertex
fn vert_main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(position) @internal(disable_validation__ignore_storage_class) var<out> value : vec4<f32>;
@builtin(pointsize) @internal(disable_validation__ignore_storage_class) var<out> vertex_point_size : f32;
@@ -3232,31 +3169,31 @@ fn vert_main_inner() -> vec4<f32> {
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner();
value = inner_result;
- vertex_point_size = 1.0;
+ vertex_point_size = 1.0f;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnNonStruct_Msl) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn vert_main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(position)
value : vec4<f32>,
@@ -3268,37 +3205,37 @@ fn vert_main_inner() -> vec4<f32> {
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
wrapper_result.value = inner_result;
- wrapper_result.vertex_point_size = 1.0;
+ wrapper_result.vertex_point_size = 1.0f;
return wrapper_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnStruct_Spirv) {
- auto* src = R"(
+ auto* src = R"(
struct VertOut {
@builtin(position) pos : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn vert_main() -> VertOut {
return VertOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(position) @internal(disable_validation__ignore_storage_class) var<out> pos_1 : vec4<f32>;
@builtin(pointsize) @internal(disable_validation__ignore_storage_class) var<out> vertex_point_size : f32;
@@ -3311,26 +3248,25 @@ fn vert_main_inner() -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner();
pos_1 = inner_result.pos;
- vertex_point_size = 1.0;
+ vertex_point_size = 1.0f;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_ReturnStruct_Spirv_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnStruct_Spirv_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn vert_main() -> VertOut {
return VertOut();
}
@@ -3340,7 +3276,7 @@ struct VertOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(position) @internal(disable_validation__ignore_storage_class) var<out> pos_1 : vec4<f32>;
@builtin(pointsize) @internal(disable_validation__ignore_storage_class) var<out> vertex_point_size : f32;
@@ -3349,11 +3285,11 @@ fn vert_main_inner() -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner();
pos_1 = inner_result.pos;
- vertex_point_size = 1.0;
+ vertex_point_size = 1.0f;
}
struct VertOut {
@@ -3361,27 +3297,27 @@ struct VertOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnStruct_Msl) {
- auto* src = R"(
+ auto* src = R"(
struct VertOut {
@builtin(position) pos : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn vert_main() -> VertOut {
return VertOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertOut {
pos : vec4<f32>,
}
@@ -3397,28 +3333,27 @@ fn vert_main_inner() -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
wrapper_result.pos = inner_result.pos;
- wrapper_result.vertex_point_size = 1.0;
+ wrapper_result.vertex_point_size = 1.0f;
return wrapper_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_ReturnStruct_Msl_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_ReturnStruct_Msl_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn vert_main() -> VertOut {
return VertOut();
}
@@ -3428,7 +3363,7 @@ struct VertOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
@builtin(position)
pos : vec4<f32>,
@@ -3440,12 +3375,12 @@ fn vert_main_inner() -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() -> tint_symbol {
let inner_result = vert_main_inner();
var wrapper_result : tint_symbol;
wrapper_result.pos = inner_result.pos;
- wrapper_result.vertex_point_size = 1.0;
+ wrapper_result.vertex_point_size = 1.0f;
return wrapper_result;
}
@@ -3454,16 +3389,16 @@ struct VertOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Spirv) {
- auto* src = R"(
+ auto* src = R"(
var<private> vertex_point_size : f32;
var<private> vertex_point_size_1 : f32;
var<private> vertex_point_size_2 : f32;
@@ -3481,14 +3416,14 @@ struct VertOut {
@builtin(position) vertex_point_size_1 : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> collide_2 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> collide_3 : f32;
@@ -3523,27 +3458,26 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner(VertIn1(collide_2), VertIn2(collide_3));
vertex_point_size_3 = inner_result.vertex_point_size;
vertex_point_size_1_1 = inner_result.vertex_point_size_1;
- vertex_point_size_4 = 1.0;
+ vertex_point_size_4 = 1.0f;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_AvoidNameClash_Spirv_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Spirv_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
@@ -3567,7 +3501,7 @@ struct VertOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@location(0) @internal(disable_validation__ignore_storage_class) var<in> collide_2 : f32;
@location(1) @internal(disable_validation__ignore_storage_class) var<in> collide_3 : f32;
@@ -3583,12 +3517,12 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main() {
let inner_result = vert_main_inner(VertIn1(collide_2), VertIn2(collide_3));
vertex_point_size_3 = inner_result.vertex_point_size;
vertex_point_size_1_1 = inner_result.vertex_point_size_1;
- vertex_point_size_4 = 1.0;
+ vertex_point_size_4 = 1.0f;
}
struct VertIn1 {
@@ -3611,16 +3545,16 @@ struct VertOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Msl) {
- auto* src = R"(
+ auto* src = R"(
struct VertIn1 {
@location(0) collide : f32,
};
@@ -3634,14 +3568,14 @@ struct VertOut {
@builtin(position) vertex_point_size_1 : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertIn1 {
collide : f32,
}
@@ -3676,29 +3610,28 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = vert_main_inner(VertIn1(tint_symbol.collide), VertIn2(tint_symbol.collide_2));
var wrapper_result : tint_symbol_2;
wrapper_result.vertex_point_size = inner_result.vertex_point_size;
wrapper_result.vertex_point_size_1 = inner_result.vertex_point_size_1;
- wrapper_result.vertex_point_size_2 = 1.0;
+ wrapper_result.vertex_point_size_2 = 1.0f;
return wrapper_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_AvoidNameClash_Msl_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Msl_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
@@ -3718,7 +3651,7 @@ struct VertOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
collide : f32,
@@ -3740,13 +3673,13 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = vert_main_inner(VertIn1(tint_symbol.collide), VertIn2(tint_symbol.collide_2));
var wrapper_result : tint_symbol_2;
wrapper_result.vertex_point_size = inner_result.vertex_point_size;
wrapper_result.vertex_point_size_1 = inner_result.vertex_point_size_1;
- wrapper_result.vertex_point_size_2 = 1.0;
+ wrapper_result.vertex_point_size_2 = 1.0f;
return wrapper_result;
}
@@ -3764,16 +3697,16 @@ struct VertOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kMsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kMsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Hlsl) {
- auto* src = R"(
+ auto* src = R"(
struct VertIn1 {
@location(0) collide : f32,
};
@@ -3787,14 +3720,14 @@ struct VertOut {
@builtin(position) vertex_point_size_1 : vec4<f32>,
};
-@stage(vertex)
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct VertIn1 {
collide : f32,
}
@@ -3829,29 +3762,28 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = vert_main_inner(VertIn1(tint_symbol.collide), VertIn2(tint_symbol.collide_2));
var wrapper_result : tint_symbol_2;
wrapper_result.vertex_point_size = inner_result.vertex_point_size;
wrapper_result.vertex_point_size_1 = inner_result.vertex_point_size_1;
- wrapper_result.vertex_point_size_2 = 1.0;
+ wrapper_result.vertex_point_size_2 = 1.0f;
return wrapper_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(CanonicalizeEntryPointIOTest,
- EmitVertexPointSize_AvoidNameClash_Hlsl_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+TEST_F(CanonicalizeEntryPointIOTest, EmitVertexPointSize_AvoidNameClash_Hlsl_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn vert_main(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
let x = collide.collide + collide_1.collide;
return VertOut();
@@ -3871,7 +3803,7 @@ struct VertOut {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
@location(0)
collide : f32,
@@ -3893,13 +3825,13 @@ fn vert_main_inner(collide : VertIn1, collide_1 : VertIn2) -> VertOut {
return VertOut();
}
-@stage(vertex)
+@vertex
fn vert_main(tint_symbol : tint_symbol_1) -> tint_symbol_2 {
let inner_result = vert_main_inner(VertIn1(tint_symbol.collide), VertIn2(tint_symbol.collide_2));
var wrapper_result : tint_symbol_2;
wrapper_result.vertex_point_size = inner_result.vertex_point_size;
wrapper_result.vertex_point_size_1 = inner_result.vertex_point_size_1;
- wrapper_result.vertex_point_size_2 = 1.0;
+ wrapper_result.vertex_point_size_2 = 1.0f;
return wrapper_result;
}
@@ -3917,17 +3849,17 @@ struct VertOut {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl, 0xFFFFFFFF, true);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl,
+ 0xFFFFFFFF, true);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, SpirvSampleMaskBuiltins) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(sample_index) sample_index : u32,
@builtin(sample_mask) mask_in : u32
) -> @builtin(sample_mask) u32 {
@@ -3935,35 +3867,34 @@ fn main(@builtin(sample_index) sample_index : u32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(sample_index) @internal(disable_validation__ignore_storage_class) var<in> sample_index_1 : u32;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<in> mask_in_1 : array<u32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<in> mask_in_1 : array<u32, 1u>;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> value : array<u32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> value : array<u32, 1u>;
fn main_inner(sample_index : u32, mask_in : u32) -> u32 {
return mask_in;
}
-@stage(fragment)
+@fragment
fn main() {
- let inner_result = main_inner(sample_index_1, mask_in_1[0]);
- value[0] = inner_result;
+ let inner_result = main_inner(sample_index_1, mask_in_1[0i]);
+ value[0i] = inner_result;
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kSpirv);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, GLSLSampleMaskBuiltins) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn fragment_main(@builtin(sample_index) sample_index : u32,
@builtin(sample_mask) mask_in : u32
) -> @builtin(sample_mask) u32 {
@@ -3971,35 +3902,34 @@ fn fragment_main(@builtin(sample_index) sample_index : u32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(sample_index) @internal(disable_validation__ignore_storage_class) var<in> gl_SampleID : i32;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<in> gl_SampleMaskIn : array<i32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<in> gl_SampleMaskIn : array<i32, 1u>;
-@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> gl_SampleMask : array<i32, 1>;
+@builtin(sample_mask) @internal(disable_validation__ignore_storage_class) var<out> gl_SampleMask : array<i32, 1u>;
fn fragment_main(sample_index : u32, mask_in : u32) -> u32 {
return mask_in;
}
-@stage(fragment)
+@fragment
fn main() {
- let inner_result = fragment_main(bitcast<u32>(gl_SampleID), bitcast<u32>(gl_SampleMaskIn[0]));
- gl_SampleMask[0] = bitcast<i32>(inner_result);
+ let inner_result = fragment_main(bitcast<u32>(gl_SampleID), bitcast<u32>(gl_SampleMaskIn[0i]));
+ gl_SampleMask[0i] = bitcast<i32>(inner_result);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CanonicalizeEntryPointIOTest, GLSLVertexInstanceIndexBuiltins) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn vertex_main(@builtin(vertex_index) vertexID : u32,
@builtin(instance_index) instanceID : u32
) -> @builtin(position) vec4<f32> {
@@ -4007,7 +3937,7 @@ fn vertex_main(@builtin(vertex_index) vertexID : u32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@builtin(vertex_index) @internal(disable_validation__ignore_storage_class) var<in> gl_VertexID : i32;
@builtin(instance_index) @internal(disable_validation__ignore_storage_class) var<in> gl_InstanceID : i32;
@@ -4018,21 +3948,20 @@ fn vertex_main(vertexID : u32, instanceID : u32) -> vec4<f32> {
return vec4<f32>((f32(vertexID) + f32(instanceID)));
}
-@stage(vertex)
+@vertex
fn main() {
let inner_result = vertex_main(bitcast<u32>(gl_VertexID), bitcast<u32>(gl_InstanceID));
gl_Position = inner_result;
gl_Position.y = -(gl_Position.y);
- gl_Position.z = ((2.0 * gl_Position.z) - gl_Position.w);
+ gl_Position.z = ((2.0f * gl_Position.z) - gl_Position.w);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/combine_samplers.cc b/chromium/third_party/dawn/src/tint/transform/combine_samplers.cc
index 4b1e892a3e7..c9d4913b99a 100644
--- a/chromium/third_party/dawn/src/tint/transform/combine_samplers.cc
+++ b/chromium/third_party/dawn/src/tint/transform/combine_samplers.cc
@@ -30,8 +30,8 @@ TINT_INSTANTIATE_TYPEINFO(tint::transform::CombineSamplers::BindingInfo);
namespace {
bool IsGlobal(const tint::sem::VariablePair& pair) {
- return pair.first->Is<tint::sem::GlobalVariable>() &&
- (!pair.second || pair.second->Is<tint::sem::GlobalVariable>());
+ return pair.first->Is<tint::sem::GlobalVariable>() &&
+ (!pair.second || pair.second->Is<tint::sem::GlobalVariable>());
}
} // namespace
@@ -46,308 +46,296 @@ CombineSamplers::BindingInfo::~BindingInfo() = default;
/// The PIMPL state for the CombineSamplers transform
struct CombineSamplers::State {
- /// The clone context
- CloneContext& ctx;
-
- /// The binding info
- const BindingInfo* binding_info;
-
- /// Map from a texture/sampler pair to the corresponding combined sampler
- /// variable
- using CombinedTextureSamplerMap =
- std::unordered_map<sem::VariablePair, const ast::Variable*>;
-
- /// Use sem::BindingPoint without scope.
- using BindingPoint = sem::BindingPoint;
-
- /// A map of all global texture/sampler variable pairs to the global
- /// combined sampler variable that will replace it.
- CombinedTextureSamplerMap global_combined_texture_samplers_;
-
- /// A map of all texture/sampler variable pairs that contain a function
- /// parameter to the combined sampler function paramter that will replace it.
- std::unordered_map<const sem::Function*, CombinedTextureSamplerMap>
- function_combined_texture_samplers_;
-
- /// Placeholder global samplers used when a function contains texture-only
- /// references (one comparison sampler, one regular). These are also used as
- /// temporary sampler parameters to the texture builtins to satisfy the WGSL
- /// resolver, but are then ignored and removed by the GLSL writer.
- const ast::Variable* placeholder_samplers_[2] = {};
-
- /// Group and binding attributes used by all combined sampler globals.
- /// Group 0 and binding 0 are used, with collisions disabled.
- /// @returns the newly-created attribute list
- ast::AttributeList Attributes() const {
- auto attributes = ctx.dst->GroupAndBinding(0, 0);
- attributes.push_back(
- ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision));
- return attributes;
- }
-
- /// Constructor
- /// @param context the clone context
- /// @param info the binding map information
- State(CloneContext& context, const BindingInfo* info)
- : ctx(context), binding_info(info) {}
-
- /// Creates a combined sampler global variables.
- /// (Note this is actually a Texture node at the AST level, but it will be
- /// written as the corresponding sampler (eg., sampler2D) on GLSL output.)
- /// @param texture_var the texture (global) variable
- /// @param sampler_var the sampler (global) variable
- /// @param name the default name to use (may be overridden by map lookup)
- /// @returns the newly-created global variable
- const ast::Variable* CreateCombinedGlobal(const sem::Variable* texture_var,
- const sem::Variable* sampler_var,
- std::string name) {
- SamplerTexturePair bp_pair;
- bp_pair.texture_binding_point =
- texture_var->As<sem::GlobalVariable>()->BindingPoint();
- bp_pair.sampler_binding_point =
- sampler_var ? sampler_var->As<sem::GlobalVariable>()->BindingPoint()
- : binding_info->placeholder_binding_point;
- auto it = binding_info->binding_map.find(bp_pair);
- if (it != binding_info->binding_map.end()) {
- name = it->second;
+ /// The clone context
+ CloneContext& ctx;
+
+ /// The binding info
+ const BindingInfo* binding_info;
+
+ /// Map from a texture/sampler pair to the corresponding combined sampler
+ /// variable
+ using CombinedTextureSamplerMap = std::unordered_map<sem::VariablePair, const ast::Variable*>;
+
+ /// Use sem::BindingPoint without scope.
+ using BindingPoint = sem::BindingPoint;
+
+ /// A map of all global texture/sampler variable pairs to the global
+ /// combined sampler variable that will replace it.
+ CombinedTextureSamplerMap global_combined_texture_samplers_;
+
+ /// A map of all texture/sampler variable pairs that contain a function
+ /// parameter to the combined sampler function paramter that will replace it.
+ std::unordered_map<const sem::Function*, CombinedTextureSamplerMap>
+ function_combined_texture_samplers_;
+
+ /// Placeholder global samplers used when a function contains texture-only
+ /// references (one comparison sampler, one regular). These are also used as
+ /// temporary sampler parameters to the texture builtins to satisfy the WGSL
+ /// resolver, but are then ignored and removed by the GLSL writer.
+ const ast::Variable* placeholder_samplers_[2] = {};
+
+ /// Group and binding attributes used by all combined sampler globals.
+ /// Group 0 and binding 0 are used, with collisions disabled.
+ /// @returns the newly-created attribute list
+ ast::AttributeList Attributes() const {
+ auto attributes = ctx.dst->GroupAndBinding(0, 0);
+ attributes.push_back(ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision));
+ return attributes;
}
- const ast::Type* type = CreateCombinedASTTypeFor(texture_var, sampler_var);
- Symbol symbol = ctx.dst->Symbols().New(name);
- return ctx.dst->Global(symbol, type, Attributes());
- }
-
- /// Creates placeholder global sampler variables.
- /// @param kind the sampler kind to create for
- /// @returns the newly-created global variable
- const ast::Variable* CreatePlaceholder(ast::SamplerKind kind) {
- const ast::Type* type = ctx.dst->ty.sampler(kind);
- const char* name = kind == ast::SamplerKind::kComparisonSampler
- ? "placeholder_comparison_sampler"
- : "placeholder_sampler";
- Symbol symbol = ctx.dst->Symbols().New(name);
- return ctx.dst->Global(symbol, type, Attributes());
- }
-
- /// Creates ast::Type for a given texture and sampler variable pair.
- /// Depth textures with no samplers are turned into the corresponding
- /// f32 texture (e.g., texture_depth_2d -> texture_2d<f32>).
- /// @param texture the texture variable of interest
- /// @param sampler the texture variable of interest
- /// @returns the newly-created type
- const ast::Type* CreateCombinedASTTypeFor(const sem::Variable* texture,
- const sem::Variable* sampler) {
- const sem::Type* texture_type = texture->Type()->UnwrapRef();
- const sem::DepthTexture* depth = texture_type->As<sem::DepthTexture>();
- if (depth && !sampler) {
- return ctx.dst->create<ast::SampledTexture>(depth->dim(),
- ctx.dst->create<ast::F32>());
- } else {
- return CreateASTTypeFor(ctx, texture_type);
- }
- }
-
- /// Performs the transformation
- void Run() {
- auto& sem = ctx.src->Sem();
-
- // Remove all texture and sampler global variables. These will be replaced
- // by combined samplers.
- for (auto* var : ctx.src->AST().GlobalVariables()) {
- auto* type = sem.Get(var->type);
- if (type && type->IsAnyOf<sem::Texture, sem::Sampler>() &&
- !type->Is<sem::StorageTexture>()) {
- ctx.Remove(ctx.src->AST().GlobalDeclarations(), var);
- } else if (auto binding_point = var->BindingPoint()) {
- if (binding_point.group->value == 0 &&
- binding_point.binding->value == 0) {
- auto* attribute =
- ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision);
- ctx.InsertFront(var->attributes, attribute);
+
+ /// Constructor
+ /// @param context the clone context
+ /// @param info the binding map information
+ State(CloneContext& context, const BindingInfo* info) : ctx(context), binding_info(info) {}
+
+ /// Creates a combined sampler global variables.
+ /// (Note this is actually a Texture node at the AST level, but it will be
+ /// written as the corresponding sampler (eg., sampler2D) on GLSL output.)
+ /// @param texture_var the texture (global) variable
+ /// @param sampler_var the sampler (global) variable
+ /// @param name the default name to use (may be overridden by map lookup)
+ /// @returns the newly-created global variable
+ const ast::Variable* CreateCombinedGlobal(const sem::Variable* texture_var,
+ const sem::Variable* sampler_var,
+ std::string name) {
+ SamplerTexturePair bp_pair;
+ bp_pair.texture_binding_point = texture_var->As<sem::GlobalVariable>()->BindingPoint();
+ bp_pair.sampler_binding_point = sampler_var
+ ? sampler_var->As<sem::GlobalVariable>()->BindingPoint()
+ : binding_info->placeholder_binding_point;
+ auto it = binding_info->binding_map.find(bp_pair);
+ if (it != binding_info->binding_map.end()) {
+ name = it->second;
}
- }
+ const ast::Type* type = CreateCombinedASTTypeFor(texture_var, sampler_var);
+ Symbol symbol = ctx.dst->Symbols().New(name);
+ return ctx.dst->Global(symbol, type, Attributes());
}
- // Rewrite all function signatures to use combined samplers, and remove
- // separate textures & samplers. Create new combined globals where found.
- ctx.ReplaceAll([&](const ast::Function* src) -> const ast::Function* {
- if (auto* func = sem.Get(src)) {
- auto pairs = func->TextureSamplerPairs();
- if (pairs.empty()) {
- return nullptr;
- }
- ast::VariableList params;
- for (auto pair : func->TextureSamplerPairs()) {
- const sem::Variable* texture_var = pair.first;
- const sem::Variable* sampler_var = pair.second;
- std::string name =
- ctx.src->Symbols().NameFor(texture_var->Declaration()->symbol);
- if (sampler_var) {
- name += "_" + ctx.src->Symbols().NameFor(
- sampler_var->Declaration()->symbol);
- }
- if (IsGlobal(pair)) {
- // Both texture and sampler are global; add a new global variable
- // to represent the combined sampler (if not already created).
- utils::GetOrCreate(global_combined_texture_samplers_, pair, [&] {
- return CreateCombinedGlobal(texture_var, sampler_var, name);
- });
- } else {
- // Either texture or sampler (or both) is a function parameter;
- // add a new function parameter to represent the combined sampler.
- const ast::Type* type =
- CreateCombinedASTTypeFor(texture_var, sampler_var);
- const ast::Variable* var =
- ctx.dst->Param(ctx.dst->Symbols().New(name), type);
- params.push_back(var);
- function_combined_texture_samplers_[func][pair] = var;
- }
- }
- // Filter out separate textures and samplers from the original
- // function signature.
- for (auto* var : src->params) {
- if (!sem.Get(var->type)->IsAnyOf<sem::Texture, sem::Sampler>()) {
- params.push_back(ctx.Clone(var));
- }
+ /// Creates placeholder global sampler variables.
+ /// @param kind the sampler kind to create for
+ /// @returns the newly-created global variable
+ const ast::Variable* CreatePlaceholder(ast::SamplerKind kind) {
+ const ast::Type* type = ctx.dst->ty.sampler(kind);
+ const char* name = kind == ast::SamplerKind::kComparisonSampler
+ ? "placeholder_comparison_sampler"
+ : "placeholder_sampler";
+ Symbol symbol = ctx.dst->Symbols().New(name);
+ return ctx.dst->Global(symbol, type, Attributes());
+ }
+
+ /// Creates ast::Type for a given texture and sampler variable pair.
+ /// Depth textures with no samplers are turned into the corresponding
+ /// f32 texture (e.g., texture_depth_2d -> texture_2d<f32>).
+ /// @param texture the texture variable of interest
+ /// @param sampler the texture variable of interest
+ /// @returns the newly-created type
+ const ast::Type* CreateCombinedASTTypeFor(const sem::Variable* texture,
+ const sem::Variable* sampler) {
+ const sem::Type* texture_type = texture->Type()->UnwrapRef();
+ const sem::DepthTexture* depth = texture_type->As<sem::DepthTexture>();
+ if (depth && !sampler) {
+ return ctx.dst->create<ast::SampledTexture>(depth->dim(), ctx.dst->create<ast::F32>());
+ } else {
+ return CreateASTTypeFor(ctx, texture_type);
}
- // Create a new function signature that differs only in the parameter
- // list.
- auto symbol = ctx.Clone(src->symbol);
- auto* return_type = ctx.Clone(src->return_type);
- auto* body = ctx.Clone(src->body);
- auto attributes = ctx.Clone(src->attributes);
- auto return_type_attributes = ctx.Clone(src->return_type_attributes);
- return ctx.dst->create<ast::Function>(
- symbol, params, return_type, body, std::move(attributes),
- std::move(return_type_attributes));
- }
- return nullptr;
- });
-
- // Replace all function call expressions containing texture or
- // sampler parameters to use the current function's combined samplers or
- // the combined global samplers, as appropriate.
- ctx.ReplaceAll([&](const ast::CallExpression* expr)
- -> const ast::Expression* {
- if (auto* call = sem.Get(expr)) {
- ast::ExpressionList args;
- // Replace all texture builtin calls.
- if (auto* builtin = call->Target()->As<sem::Builtin>()) {
- const auto& signature = builtin->Signature();
- int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
- int texture_index = signature.IndexOf(sem::ParameterUsage::kTexture);
- if (texture_index == -1) {
- return nullptr;
- }
- const sem::Expression* texture = call->Arguments()[texture_index];
- // We don't want to combine storage textures with anything, since
- // they never have associated samplers in GLSL.
- if (texture->Type()->UnwrapRef()->Is<sem::StorageTexture>()) {
- return nullptr;
- }
- const sem::Expression* sampler =
- sampler_index != -1 ? call->Arguments()[sampler_index] : nullptr;
- auto* texture_var = texture->As<sem::VariableUser>()->Variable();
- auto* sampler_var =
- sampler ? sampler->As<sem::VariableUser>()->Variable() : nullptr;
- sem::VariablePair new_pair(texture_var, sampler_var);
- for (auto* arg : expr->args) {
- auto* type = ctx.src->TypeOf(arg)->UnwrapRef();
- if (type->Is<sem::Texture>()) {
- const ast::Variable* var =
- IsGlobal(new_pair)
- ? global_combined_texture_samplers_[new_pair]
- : function_combined_texture_samplers_
- [call->Stmt()->Function()][new_pair];
- args.push_back(ctx.dst->Expr(var->symbol));
- } else if (auto* sampler_type = type->As<sem::Sampler>()) {
- ast::SamplerKind kind = sampler_type->kind();
- int index = (kind == ast::SamplerKind::kSampler) ? 0 : 1;
- const ast::Variable*& p = placeholder_samplers_[index];
- if (!p) {
- p = CreatePlaceholder(kind);
- }
- args.push_back(ctx.dst->Expr(p->symbol));
- } else {
- args.push_back(ctx.Clone(arg));
+ }
+
+ /// Performs the transformation
+ void Run() {
+ auto& sem = ctx.src->Sem();
+
+ // Remove all texture and sampler global variables. These will be replaced
+ // by combined samplers.
+ for (auto* var : ctx.src->AST().GlobalVariables()) {
+ auto* type = sem.Get(var->type);
+ if (type && type->IsAnyOf<sem::Texture, sem::Sampler>() &&
+ !type->Is<sem::StorageTexture>()) {
+ ctx.Remove(ctx.src->AST().GlobalDeclarations(), var);
+ } else if (auto binding_point = var->BindingPoint()) {
+ if (binding_point.group->value == 0 && binding_point.binding->value == 0) {
+ auto* attribute =
+ ctx.dst->Disable(ast::DisabledValidation::kBindingPointCollision);
+ ctx.InsertFront(var->attributes, attribute);
+ }
}
- }
- const ast::Expression* value =
- ctx.dst->Call(ctx.Clone(expr->target.name), args);
- if (builtin->Type() == sem::BuiltinType::kTextureLoad &&
- texture_var->Type()->UnwrapRef()->Is<sem::DepthTexture>() &&
- !call->Stmt()->Declaration()->Is<ast::CallStatement>()) {
- value = ctx.dst->MemberAccessor(value, "x");
- }
- return value;
}
- // Replace all function calls.
- if (auto* callee = call->Target()->As<sem::Function>()) {
- for (auto pair : callee->TextureSamplerPairs()) {
- // Global pairs used by the callee do not require a function
- // parameter at the call site.
- if (IsGlobal(pair)) {
- continue;
- }
- const sem::Variable* texture_var = pair.first;
- const sem::Variable* sampler_var = pair.second;
- if (auto* param = texture_var->As<sem::Parameter>()) {
- const sem::Expression* texture =
- call->Arguments()[param->Index()];
- texture_var = texture->As<sem::VariableUser>()->Variable();
- }
- if (sampler_var) {
- if (auto* param = sampler_var->As<sem::Parameter>()) {
- const sem::Expression* sampler =
- call->Arguments()[param->Index()];
- sampler_var = sampler->As<sem::VariableUser>()->Variable();
- }
+
+ // Rewrite all function signatures to use combined samplers, and remove
+ // separate textures & samplers. Create new combined globals where found.
+ ctx.ReplaceAll([&](const ast::Function* src) -> const ast::Function* {
+ if (auto* func = sem.Get(src)) {
+ auto pairs = func->TextureSamplerPairs();
+ if (pairs.empty()) {
+ return nullptr;
+ }
+ ast::VariableList params;
+ for (auto pair : func->TextureSamplerPairs()) {
+ const sem::Variable* texture_var = pair.first;
+ const sem::Variable* sampler_var = pair.second;
+ std::string name =
+ ctx.src->Symbols().NameFor(texture_var->Declaration()->symbol);
+ if (sampler_var) {
+ name +=
+ "_" + ctx.src->Symbols().NameFor(sampler_var->Declaration()->symbol);
+ }
+ if (IsGlobal(pair)) {
+ // Both texture and sampler are global; add a new global variable
+ // to represent the combined sampler (if not already created).
+ utils::GetOrCreate(global_combined_texture_samplers_, pair, [&] {
+ return CreateCombinedGlobal(texture_var, sampler_var, name);
+ });
+ } else {
+ // Either texture or sampler (or both) is a function parameter;
+ // add a new function parameter to represent the combined sampler.
+ const ast::Type* type = CreateCombinedASTTypeFor(texture_var, sampler_var);
+ const ast::Variable* var =
+ ctx.dst->Param(ctx.dst->Symbols().New(name), type);
+ params.push_back(var);
+ function_combined_texture_samplers_[func][pair] = var;
+ }
+ }
+ // Filter out separate textures and samplers from the original
+ // function signature.
+ for (auto* var : src->params) {
+ if (!sem.Get(var->type)->IsAnyOf<sem::Texture, sem::Sampler>()) {
+ params.push_back(ctx.Clone(var));
+ }
+ }
+ // Create a new function signature that differs only in the parameter
+ // list.
+ auto symbol = ctx.Clone(src->symbol);
+ auto* return_type = ctx.Clone(src->return_type);
+ auto* body = ctx.Clone(src->body);
+ auto attributes = ctx.Clone(src->attributes);
+ auto return_type_attributes = ctx.Clone(src->return_type_attributes);
+ return ctx.dst->create<ast::Function>(symbol, params, return_type, body,
+ std::move(attributes),
+ std::move(return_type_attributes));
}
- sem::VariablePair new_pair(texture_var, sampler_var);
- // If both texture and sampler are (now) global, pass that
- // global variable to the callee. Otherwise use the caller's
- // function parameter for this pair.
- const ast::Variable* var =
- IsGlobal(new_pair) ? global_combined_texture_samplers_[new_pair]
- : function_combined_texture_samplers_
- [call->Stmt()->Function()][new_pair];
- auto* arg = ctx.dst->Expr(var->symbol);
- args.push_back(arg);
- }
- // Append all of the remaining non-texture and non-sampler
- // parameters.
- for (auto* arg : expr->args) {
- if (!ctx.src->TypeOf(arg)
- ->UnwrapRef()
- ->IsAnyOf<sem::Texture, sem::Sampler>()) {
- args.push_back(ctx.Clone(arg));
+ return nullptr;
+ });
+
+ // Replace all function call expressions containing texture or
+ // sampler parameters to use the current function's combined samplers or
+ // the combined global samplers, as appropriate.
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::Expression* {
+ if (auto* call = sem.Get(expr)->UnwrapMaterialize()->As<sem::Call>()) {
+ ast::ExpressionList args;
+ // Replace all texture builtin calls.
+ if (auto* builtin = call->Target()->As<sem::Builtin>()) {
+ const auto& signature = builtin->Signature();
+ int sampler_index = signature.IndexOf(sem::ParameterUsage::kSampler);
+ int texture_index = signature.IndexOf(sem::ParameterUsage::kTexture);
+ if (texture_index == -1) {
+ return nullptr;
+ }
+ const sem::Expression* texture = call->Arguments()[texture_index];
+ // We don't want to combine storage textures with anything, since
+ // they never have associated samplers in GLSL.
+ if (texture->Type()->UnwrapRef()->Is<sem::StorageTexture>()) {
+ return nullptr;
+ }
+ const sem::Expression* sampler =
+ sampler_index != -1 ? call->Arguments()[sampler_index] : nullptr;
+ auto* texture_var = texture->As<sem::VariableUser>()->Variable();
+ auto* sampler_var =
+ sampler ? sampler->As<sem::VariableUser>()->Variable() : nullptr;
+ sem::VariablePair new_pair(texture_var, sampler_var);
+ for (auto* arg : expr->args) {
+ auto* type = ctx.src->TypeOf(arg)->UnwrapRef();
+ if (type->Is<sem::Texture>()) {
+ const ast::Variable* var =
+ IsGlobal(new_pair)
+ ? global_combined_texture_samplers_[new_pair]
+ : function_combined_texture_samplers_[call->Stmt()->Function()]
+ [new_pair];
+ args.push_back(ctx.dst->Expr(var->symbol));
+ } else if (auto* sampler_type = type->As<sem::Sampler>()) {
+ ast::SamplerKind kind = sampler_type->kind();
+ int index = (kind == ast::SamplerKind::kSampler) ? 0 : 1;
+ const ast::Variable*& p = placeholder_samplers_[index];
+ if (!p) {
+ p = CreatePlaceholder(kind);
+ }
+ args.push_back(ctx.dst->Expr(p->symbol));
+ } else {
+ args.push_back(ctx.Clone(arg));
+ }
+ }
+ const ast::Expression* value =
+ ctx.dst->Call(ctx.Clone(expr->target.name), args);
+ if (builtin->Type() == sem::BuiltinType::kTextureLoad &&
+ texture_var->Type()->UnwrapRef()->Is<sem::DepthTexture>() &&
+ !call->Stmt()->Declaration()->Is<ast::CallStatement>()) {
+ value = ctx.dst->MemberAccessor(value, "x");
+ }
+ return value;
+ }
+ // Replace all function calls.
+ if (auto* callee = call->Target()->As<sem::Function>()) {
+ for (auto pair : callee->TextureSamplerPairs()) {
+ // Global pairs used by the callee do not require a function
+ // parameter at the call site.
+ if (IsGlobal(pair)) {
+ continue;
+ }
+ const sem::Variable* texture_var = pair.first;
+ const sem::Variable* sampler_var = pair.second;
+ if (auto* param = texture_var->As<sem::Parameter>()) {
+ const sem::Expression* texture = call->Arguments()[param->Index()];
+ texture_var = texture->As<sem::VariableUser>()->Variable();
+ }
+ if (sampler_var) {
+ if (auto* param = sampler_var->As<sem::Parameter>()) {
+ const sem::Expression* sampler = call->Arguments()[param->Index()];
+ sampler_var = sampler->As<sem::VariableUser>()->Variable();
+ }
+ }
+ sem::VariablePair new_pair(texture_var, sampler_var);
+ // If both texture and sampler are (now) global, pass that
+ // global variable to the callee. Otherwise use the caller's
+ // function parameter for this pair.
+ const ast::Variable* var =
+ IsGlobal(new_pair)
+ ? global_combined_texture_samplers_[new_pair]
+ : function_combined_texture_samplers_[call->Stmt()->Function()]
+ [new_pair];
+ auto* arg = ctx.dst->Expr(var->symbol);
+ args.push_back(arg);
+ }
+ // Append all of the remaining non-texture and non-sampler
+ // parameters.
+ for (auto* arg : expr->args) {
+ if (!ctx.src->TypeOf(arg)
+ ->UnwrapRef()
+ ->IsAnyOf<sem::Texture, sem::Sampler>()) {
+ args.push_back(ctx.Clone(arg));
+ }
+ }
+ return ctx.dst->Call(ctx.Clone(expr->target.name), args);
+ }
}
- }
- return ctx.dst->Call(ctx.Clone(expr->target.name), args);
- }
- }
- return nullptr;
- });
+ return nullptr;
+ });
- ctx.Clone();
- }
+ ctx.Clone();
+ }
};
CombineSamplers::CombineSamplers() = default;
CombineSamplers::~CombineSamplers() = default;
-void CombineSamplers::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* binding_info = inputs.Get<BindingInfo>();
- if (!binding_info) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
- return;
- }
-
- State(ctx, binding_info).Run();
+void CombineSamplers::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* binding_info = inputs.Get<BindingInfo>();
+ if (!binding_info) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+ return;
+ }
+
+ State(ctx, binding_info).Run();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/combine_samplers.h b/chromium/third_party/dawn/src/tint/transform/combine_samplers.h
index d15d2ab8551..8dfc0987928 100644
--- a/chromium/third_party/dawn/src/tint/transform/combine_samplers.h
+++ b/chromium/third_party/dawn/src/tint/transform/combine_samplers.h
@@ -53,54 +53,52 @@ namespace tint::transform {
/// (dimensionality, component type, etc). The GLSL writer outputs such
/// (Tint) Textures as (GLSL) Samplers.
class CombineSamplers final : public Castable<CombineSamplers, Transform> {
- public:
- /// A pair of binding points.
- using SamplerTexturePair = sem::SamplerTexturePair;
+ public:
+ /// A pair of binding points.
+ using SamplerTexturePair = sem::SamplerTexturePair;
- /// A map from a sampler/texture pair to a named global.
- using BindingMap = std::unordered_map<SamplerTexturePair, std::string>;
+ /// A map from a sampler/texture pair to a named global.
+ using BindingMap = std::unordered_map<SamplerTexturePair, std::string>;
- /// The client-provided mapping from separate texture and sampler binding
- /// points to combined sampler binding point.
- struct BindingInfo final : public Castable<Data, transform::Data> {
- /// Constructor
- /// @param map the map of all (texture, sampler) -> (combined) pairs
- /// @param placeholder the binding point to use for placeholder samplers.
- BindingInfo(const BindingMap& map, const sem::BindingPoint& placeholder);
+ /// The client-provided mapping from separate texture and sampler binding
+ /// points to combined sampler binding point.
+ struct BindingInfo final : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param map the map of all (texture, sampler) -> (combined) pairs
+ /// @param placeholder the binding point to use for placeholder samplers.
+ BindingInfo(const BindingMap& map, const sem::BindingPoint& placeholder);
+
+ /// Copy constructor
+ /// @param other the other BindingInfo to copy
+ BindingInfo(const BindingInfo& other);
+
+ /// Destructor
+ ~BindingInfo() override;
- /// Copy constructor
- /// @param other the other BindingInfo to copy
- BindingInfo(const BindingInfo& other);
+ /// A map of bindings from (texture, sampler) -> combined sampler.
+ BindingMap binding_map;
+
+ /// The binding point to use for placeholder samplers.
+ sem::BindingPoint placeholder_binding_point;
+ };
+
+ /// Constructor
+ CombineSamplers();
/// Destructor
- ~BindingInfo() override;
-
- /// A map of bindings from (texture, sampler) -> combined sampler.
- BindingMap binding_map;
-
- /// The binding point to use for placeholder samplers.
- sem::BindingPoint placeholder_binding_point;
- };
-
- /// Constructor
- CombineSamplers();
-
- /// Destructor
- ~CombineSamplers() override;
-
- protected:
- /// The PIMPL state for this transform
- struct State;
-
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ ~CombineSamplers() override;
+
+ protected:
+ /// The PIMPL state for this transform
+ struct State;
+
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/combine_samplers_test.cc b/chromium/third_party/dawn/src/tint/transform/combine_samplers_test.cc
index cb60103a322..cad310959ed 100644
--- a/chromium/third_party/dawn/src/tint/transform/combine_samplers_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/combine_samplers_test.cc
@@ -25,19 +25,18 @@ namespace {
using CombineSamplersTest = TransformTest;
TEST_F(CombineSamplersTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePair) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
@group(0) @binding(1) var s : sampler;
@@ -46,7 +45,7 @@ fn main() -> vec4<f32> {
return textureSample(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -56,16 +55,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePair_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return textureSample(t, s, vec2<f32>(1.0, 2.0));
}
@@ -74,7 +72,7 @@ fn main() -> vec4<f32> {
@group(0) @binding(1) var s : sampler;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -84,16 +82,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairInAFunction) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
@group(0) @binding(1) var s : sampler;
@@ -106,7 +103,7 @@ fn main() -> vec4<f32> {
return sample(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn sample(t_s : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -120,16 +117,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairInAFunction_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return sample(t, s, vec2<f32>(1.0, 2.0));
}
@@ -142,7 +138,7 @@ fn sample(t : texture_2d<f32>, s : sampler, coords : vec2<f32>) -> vec4<f32> {
@group(0) @binding(0) var t : texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
fn main() -> vec4<f32> {
@@ -156,16 +152,15 @@ fn sample(t_s_1 : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairRename) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(1) var t : texture_2d<f32>;
@group(2) @binding(3) var s : sampler;
@@ -174,7 +169,7 @@ fn main() -> vec4<f32> {
return textureSample(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var fuzzy : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -184,23 +179,23 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- CombineSamplers::BindingMap map;
- sem::SamplerTexturePair pair;
- pair.texture_binding_point.group = 0;
- pair.texture_binding_point.binding = 1;
- pair.sampler_binding_point.group = 2;
- pair.sampler_binding_point.binding = 3;
- map[pair] = "fuzzy";
- sem::BindingPoint placeholder{1024, 0};
- data.Add<CombineSamplers::BindingInfo>(map, placeholder);
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ CombineSamplers::BindingMap map;
+ sem::SamplerTexturePair pair;
+ pair.texture_binding_point.group = 0;
+ pair.texture_binding_point.binding = 1;
+ pair.sampler_binding_point.group = 2;
+ pair.sampler_binding_point.binding = 3;
+ map[pair] = "fuzzy";
+ sem::BindingPoint placeholder{1024, 0};
+ data.Add<CombineSamplers::BindingInfo>(map, placeholder);
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairRenameMiss) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(1) var t : texture_2d<f32>;
@group(2) @binding(3) var s : sampler;
@@ -209,7 +204,7 @@ fn main() -> vec4<f32> {
return textureSample(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -219,23 +214,23 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- CombineSamplers::BindingMap map;
- sem::SamplerTexturePair pair;
- pair.texture_binding_point.group = 3;
- pair.texture_binding_point.binding = 2;
- pair.sampler_binding_point.group = 1;
- pair.sampler_binding_point.binding = 0;
- map[pair] = "fuzzy";
- sem::BindingPoint placeholder{1024, 0};
- data.Add<CombineSamplers::BindingInfo>(map, placeholder);
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ CombineSamplers::BindingMap map;
+ sem::SamplerTexturePair pair;
+ pair.texture_binding_point.group = 3;
+ pair.texture_binding_point.binding = 2;
+ pair.sampler_binding_point.group = 1;
+ pair.sampler_binding_point.binding = 0;
+ map[pair] = "fuzzy";
+ sem::BindingPoint placeholder{1024, 0};
+ data.Add<CombineSamplers::BindingInfo>(map, placeholder);
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, AliasedTypes) {
- auto* src = R"(
+ auto* src = R"(
type Tex2d = texture_2d<f32>;
@@ -251,7 +246,7 @@ fn main() -> vec4<f32> {
return sample(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type Tex2d = texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -267,16 +262,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, AliasedTypes_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return sample(t, s, vec2<f32>(1.0, 2.0));
}
@@ -290,7 +284,7 @@ fn sample(t : Tex2d, s : sampler, coords : vec2<f32>) -> vec4<f32> {
type Tex2d = texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
fn main() -> vec4<f32> {
@@ -306,16 +300,15 @@ fn sample(t_s_1 : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
type Tex2d = texture_2d<f32>;
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairInTwoFunctions) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
@group(0) @binding(1) var s : sampler;
@@ -332,7 +325,7 @@ fn main() -> vec4<f32> {
return f(t, s, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn g(t_s : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -350,16 +343,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, SimplePairInTwoFunctions_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return f(t, s, vec2<f32>(1.0, 2.0));
}
@@ -375,7 +367,7 @@ fn g(t : texture_2d<f32>, s : sampler, coords : vec2<f32>) -> vec4<f32> {
@group(0) @binding(1) var s : sampler;
@group(0) @binding(0) var t : texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var t_s : texture_2d<f32>;
fn main() -> vec4<f32> {
@@ -393,16 +385,15 @@ fn g(t_s_2 : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TwoFunctionsGenerateSamePair) {
- auto* src = R"(
+ auto* src = R"(
@group(1) @binding(0) var tex : texture_2d<f32>;
@group(1) @binding(1) var samp : sampler;
@@ -419,7 +410,7 @@ fn main() -> vec4<f32> {
return f() + g();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -437,16 +428,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, ThreeTexturesThreeSamplers) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex1 : texture_2d<f32>;
@group(0) @binding(1) var tex2 : texture_2d<f32>;
@group(0) @binding(2) var tex3 : texture_2d<f32>;
@@ -471,7 +461,7 @@ fn main() -> vec4<f32> {
+ sample(tex3, samp3);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn sample(t_s : texture_2d<f32>) -> vec4<f32> {
@@ -501,16 +491,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TwoFunctionsTwoTexturesDiamond) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex1 : texture_2d<f32>;
@group(0) @binding(1) var tex2 : texture_2d<f32>;
@@ -529,7 +518,7 @@ fn main() -> vec4<f32> {
return f(tex1, tex2, samp, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn sample(t_s : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -549,16 +538,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TwoFunctionsTwoSamplersDiamond) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_2d<f32>;
@group(0) @binding(1) var samp1 : sampler;
@@ -577,7 +565,7 @@ fn main() -> vec4<f32> {
return f(tex, samp1, samp2, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn sample(t_s : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -597,16 +585,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, GlobalTextureLocalSampler) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_2d<f32>;
@group(0) @binding(1) var samp1 : sampler;
@@ -621,7 +608,7 @@ fn main() -> vec4<f32> {
return f(samp1, samp2, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn f(tex_s1 : texture_2d<f32>, tex_s2 : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -637,16 +624,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, GlobalTextureLocalSampler_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return f(samp1, samp2, vec2<f32>(1.0, 2.0));
}
@@ -659,7 +645,7 @@ fn f(s1 : sampler, s2 : sampler, coords : vec2<f32>) -> vec4<f32> {
@group(0) @binding(2) var samp2 : sampler;
@group(0) @binding(0) var tex : texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp1 : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp2 : texture_2d<f32>;
@@ -675,16 +661,15 @@ fn f(tex_s1 : texture_2d<f32>, tex_s2 : texture_2d<f32>, coords : vec2<f32>) ->
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, LocalTextureGlobalSampler) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex1 : texture_2d<f32>;
@group(0) @binding(1) var tex2 : texture_2d<f32>;
@@ -699,7 +684,7 @@ fn main() -> vec4<f32> {
return f(tex1, tex2, vec2<f32>(1.0, 2.0));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
fn f(t1_samp : texture_2d<f32>, t2_samp : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32> {
@@ -715,16 +700,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, LocalTextureGlobalSampler_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return f(tex1, tex2, vec2<f32>(1.0, 2.0));
}
@@ -737,7 +721,7 @@ fn f(t1 : texture_2d<f32>, t2 : texture_2d<f32>, coords : vec2<f32>) -> vec4<f32
@group(0) @binding(0) var tex1 : texture_2d<f32>;
@group(0) @binding(1) var tex2 : texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex1_samp : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex2_samp : texture_2d<f32>;
@@ -753,16 +737,15 @@ fn f(t1_samp : texture_2d<f32>, t2_samp : texture_2d<f32>, coords : vec2<f32>) -
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TextureLoadNoSampler) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_2d<f32>;
fn f(t : texture_2d<f32>, coords : vec2<i32>) -> vec4<f32> {
@@ -773,7 +756,7 @@ fn main() -> vec4<f32> {
return f(tex, vec2<i32>(1, 2));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f(t_1 : texture_2d<f32>, coords : vec2<i32>) -> vec4<f32> {
return textureLoad(t_1, coords, 0);
}
@@ -785,23 +768,23 @@ fn main() -> vec4<f32> {
}
)";
- sem::BindingPoint placeholder{1024, 0};
- sem::SamplerTexturePair pair;
- pair.texture_binding_point.group = 0;
- pair.texture_binding_point.binding = 0;
- pair.sampler_binding_point.group = placeholder.group;
- pair.sampler_binding_point.binding = placeholder.binding;
- CombineSamplers::BindingMap map;
- map[pair] = "fred";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(map, placeholder);
- auto got = Run<CombineSamplers>(src, data);
+ sem::BindingPoint placeholder{1024, 0};
+ sem::SamplerTexturePair pair;
+ pair.texture_binding_point.group = 0;
+ pair.texture_binding_point.binding = 0;
+ pair.sampler_binding_point.group = placeholder.group;
+ pair.sampler_binding_point.binding = placeholder.binding;
+ CombineSamplers::BindingMap map;
+ map[pair] = "fred";
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(map, placeholder);
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TextureWithAndWithoutSampler) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_2d<f32>;
@group(0) @binding(1) var samp : sampler;
@@ -810,7 +793,7 @@ fn main() -> vec4<f32> {
textureSample(tex, samp, vec2<f32>());
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var fred : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var barney : texture_2d<f32>;
@@ -822,30 +805,30 @@ fn main() -> vec4<f32> {
}
)";
- sem::BindingPoint placeholder{1024, 0};
- sem::BindingPoint tex{0, 0};
- sem::BindingPoint samp{0, 1};
- sem::SamplerTexturePair pair, placeholder_pair;
- pair.texture_binding_point.group = tex.group;
- pair.texture_binding_point.binding = tex.binding;
- pair.sampler_binding_point.group = samp.group;
- pair.sampler_binding_point.binding = samp.binding;
- placeholder_pair.texture_binding_point.group = tex.group;
- placeholder_pair.texture_binding_point.binding = tex.binding;
- placeholder_pair.sampler_binding_point.group = placeholder.group;
- placeholder_pair.sampler_binding_point.binding = placeholder.binding;
- CombineSamplers::BindingMap map;
- map[pair] = "barney";
- map[placeholder_pair] = "fred";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(map, placeholder);
- auto got = Run<CombineSamplers>(src, data);
+ sem::BindingPoint placeholder{1024, 0};
+ sem::BindingPoint tex{0, 0};
+ sem::BindingPoint samp{0, 1};
+ sem::SamplerTexturePair pair, placeholder_pair;
+ pair.texture_binding_point.group = tex.group;
+ pair.texture_binding_point.binding = tex.binding;
+ pair.sampler_binding_point.group = samp.group;
+ pair.sampler_binding_point.binding = samp.binding;
+ placeholder_pair.texture_binding_point.group = tex.group;
+ placeholder_pair.texture_binding_point.binding = tex.binding;
+ placeholder_pair.sampler_binding_point.group = placeholder.group;
+ placeholder_pair.sampler_binding_point.binding = placeholder.binding;
+ CombineSamplers::BindingMap map;
+ map[pair] = "barney";
+ map[placeholder_pair] = "fred";
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(map, placeholder);
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TextureSampleCompare) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_depth_2d;
@group(0) @binding(1) var samp : sampler_comparison;
@@ -854,7 +837,7 @@ fn main() -> vec4<f32> {
return vec4<f32>(textureSampleCompare(tex, samp, vec2<f32>(1.0, 2.0), 0.5));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_depth_2d;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_comparison_sampler : sampler_comparison;
@@ -864,16 +847,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TextureSampleCompareInAFunction) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex : texture_depth_2d;
@group(0) @binding(1) var samp : sampler_comparison;
@@ -886,11 +868,11 @@ fn main() -> vec4<f32> {
return vec4<f32>(f(tex, samp, vec2<f32>(1.0, 2.0)));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_comparison_sampler : sampler_comparison;
fn f(t_s : texture_depth_2d, coords : vec2<f32>) -> f32 {
- return textureSampleCompare(t_s, placeholder_comparison_sampler, coords, 5.0);
+ return textureSampleCompare(t_s, placeholder_comparison_sampler, coords, 5.0f);
}
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_depth_2d;
@@ -900,16 +882,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, TextureSampleCompareInAFunction_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return vec4<f32>(f(tex, samp, vec2<f32>(1.0, 2.0)));
}
@@ -921,7 +902,7 @@ fn f(t : texture_depth_2d, s : sampler_comparison, coords : vec2<f32>) -> f32 {
@group(0) @binding(0) var tex : texture_depth_2d;
@group(0) @binding(1) var samp : sampler_comparison;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_depth_2d;
fn main() -> vec4<f32> {
@@ -931,20 +912,19 @@ fn main() -> vec4<f32> {
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_comparison_sampler : sampler_comparison;
fn f(t_s : texture_depth_2d, coords : vec2<f32>) -> f32 {
- return textureSampleCompare(t_s, placeholder_comparison_sampler, coords, 5.0);
+ return textureSampleCompare(t_s, placeholder_comparison_sampler, coords, 5.0f);
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, BindingPointCollision) {
- auto* src = R"(
+ auto* src = R"(
@group(1) @binding(0) var tex : texture_2d<f32>;
@group(1) @binding(1) var samp : sampler;
@@ -955,7 +935,7 @@ fn main() -> vec4<f32> {
return textureSample(tex, samp, gcoords);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(disable_validation__binding_point_collision) @group(0) @binding(0) var<uniform> gcoords : vec2<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_2d<f32>;
@@ -967,16 +947,15 @@ fn main() -> vec4<f32> {
}
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(CombineSamplersTest, BindingPointCollision_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn main() -> vec4<f32> {
return textureSample(tex, samp, gcoords);
}
@@ -986,7 +965,7 @@ fn main() -> vec4<f32> {
@group(1) @binding(0) var tex : texture_2d<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var tex_samp : texture_2d<f32>;
@group(0) @binding(0) @internal(disable_validation__binding_point_collision) var placeholder_sampler : sampler;
@@ -998,12 +977,11 @@ fn main() -> vec4<f32> {
@internal(disable_validation__binding_point_collision) @group(0) @binding(0) var<uniform> gcoords : vec2<f32>;
)";
- DataMap data;
- data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(),
- sem::BindingPoint());
- auto got = Run<CombineSamplers>(src, data);
+ DataMap data;
+ data.Add<CombineSamplers::BindingInfo>(CombineSamplers::BindingMap(), sem::BindingPoint());
+ auto got = Run<CombineSamplers>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.cc b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.cc
index 699eeb4fbc9..a90a6e2711c 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.cc
@@ -27,10 +27,10 @@
#include "src/tint/ast/unary_op.h"
#include "src/tint/program_builder.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/call.h"
#include "src/tint/sem/member_accessor_expression.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/variable.h"
@@ -38,6 +38,8 @@
#include "src/tint/utils/hash.h"
#include "src/tint/utils/map.h"
+using namespace tint::number_suffixes; // NOLINT
+
TINT_INSTANTIATE_TYPEINFO(tint::transform::DecomposeMemoryAccess);
TINT_INSTANTIATE_TYPEINFO(tint::transform::DecomposeMemoryAccess::Intrinsic);
@@ -48,176 +50,169 @@ namespace {
/// Offset is a simple ast::Expression builder interface, used to build byte
/// offsets for storage and uniform buffer accesses.
struct Offset : Castable<Offset> {
- /// @returns builds and returns the ast::Expression in `ctx.dst`
- virtual const ast::Expression* Build(CloneContext& ctx) const = 0;
+ /// @returns builds and returns the ast::Expression in `ctx.dst`
+ virtual const ast::Expression* Build(CloneContext& ctx) const = 0;
};
/// OffsetExpr is an implementation of Offset that clones and casts the given
/// expression to `u32`.
struct OffsetExpr : Offset {
- const ast::Expression* const expr = nullptr;
+ const ast::Expression* const expr = nullptr;
- explicit OffsetExpr(const ast::Expression* e) : expr(e) {}
+ explicit OffsetExpr(const ast::Expression* e) : expr(e) {}
- const ast::Expression* Build(CloneContext& ctx) const override {
- auto* type = ctx.src->Sem().Get(expr)->Type()->UnwrapRef();
- auto* res = ctx.Clone(expr);
- if (!type->Is<sem::U32>()) {
- res = ctx.dst->Construct<ProgramBuilder::u32>(res);
+ const ast::Expression* Build(CloneContext& ctx) const override {
+ auto* type = ctx.src->Sem().Get(expr)->Type()->UnwrapRef();
+ auto* res = ctx.Clone(expr);
+ if (!type->Is<sem::U32>()) {
+ res = ctx.dst->Construct<u32>(res);
+ }
+ return res;
}
- return res;
- }
};
/// OffsetLiteral is an implementation of Offset that constructs a u32 literal
/// value.
struct OffsetLiteral : Castable<OffsetLiteral, Offset> {
- uint32_t const literal = 0;
+ uint32_t const literal = 0;
- explicit OffsetLiteral(uint32_t lit) : literal(lit) {}
+ explicit OffsetLiteral(uint32_t lit) : literal(lit) {}
- const ast::Expression* Build(CloneContext& ctx) const override {
- return ctx.dst->Expr(literal);
- }
+ const ast::Expression* Build(CloneContext& ctx) const override {
+ return ctx.dst->Expr(u32(literal));
+ }
};
/// OffsetBinOp is an implementation of Offset that constructs a binary-op of
/// two Offsets.
struct OffsetBinOp : Offset {
- ast::BinaryOp op;
- Offset const* lhs = nullptr;
- Offset const* rhs = nullptr;
-
- const ast::Expression* Build(CloneContext& ctx) const override {
- return ctx.dst->create<ast::BinaryExpression>(op, lhs->Build(ctx),
- rhs->Build(ctx));
- }
+ ast::BinaryOp op;
+ Offset const* lhs = nullptr;
+ Offset const* rhs = nullptr;
+
+ const ast::Expression* Build(CloneContext& ctx) const override {
+ return ctx.dst->create<ast::BinaryExpression>(op, lhs->Build(ctx), rhs->Build(ctx));
+ }
};
/// LoadStoreKey is the unordered map key to a load or store intrinsic.
struct LoadStoreKey {
- ast::StorageClass const storage_class; // buffer storage class
- sem::Type const* buf_ty = nullptr; // buffer type
- sem::Type const* el_ty = nullptr; // element type
- bool operator==(const LoadStoreKey& rhs) const {
- return storage_class == rhs.storage_class && buf_ty == rhs.buf_ty &&
- el_ty == rhs.el_ty;
- }
- struct Hasher {
- inline std::size_t operator()(const LoadStoreKey& u) const {
- return utils::Hash(u.storage_class, u.buf_ty, u.el_ty);
+ ast::StorageClass const storage_class; // buffer storage class
+ sem::Type const* buf_ty = nullptr; // buffer type
+ sem::Type const* el_ty = nullptr; // element type
+ bool operator==(const LoadStoreKey& rhs) const {
+ return storage_class == rhs.storage_class && buf_ty == rhs.buf_ty && el_ty == rhs.el_ty;
}
- };
+ struct Hasher {
+ inline std::size_t operator()(const LoadStoreKey& u) const {
+ return utils::Hash(u.storage_class, u.buf_ty, u.el_ty);
+ }
+ };
};
/// AtomicKey is the unordered map key to an atomic intrinsic.
struct AtomicKey {
- sem::Type const* buf_ty = nullptr; // buffer type
- sem::Type const* el_ty = nullptr; // element type
- sem::BuiltinType const op; // atomic op
- bool operator==(const AtomicKey& rhs) const {
- return buf_ty == rhs.buf_ty && el_ty == rhs.el_ty && op == rhs.op;
- }
- struct Hasher {
- inline std::size_t operator()(const AtomicKey& u) const {
- return utils::Hash(u.buf_ty, u.el_ty, u.op);
+ sem::Type const* buf_ty = nullptr; // buffer type
+ sem::Type const* el_ty = nullptr; // element type
+ sem::BuiltinType const op; // atomic op
+ bool operator==(const AtomicKey& rhs) const {
+ return buf_ty == rhs.buf_ty && el_ty == rhs.el_ty && op == rhs.op;
}
- };
+ struct Hasher {
+ inline std::size_t operator()(const AtomicKey& u) const {
+ return utils::Hash(u.buf_ty, u.el_ty, u.op);
+ }
+ };
};
-bool IntrinsicDataTypeFor(const sem::Type* ty,
- DecomposeMemoryAccess::Intrinsic::DataType& out) {
- if (ty->Is<sem::I32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kI32;
- return true;
- }
- if (ty->Is<sem::U32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kU32;
- return true;
- }
- if (ty->Is<sem::F32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kF32;
- return true;
- }
- if (auto* vec = ty->As<sem::Vector>()) {
- switch (vec->Width()) {
- case 2:
- if (vec->type()->Is<sem::I32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2I32;
- return true;
- }
- if (vec->type()->Is<sem::U32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2U32;
- return true;
- }
- if (vec->type()->Is<sem::F32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2F32;
- return true;
- }
- break;
- case 3:
- if (vec->type()->Is<sem::I32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3I32;
- return true;
- }
- if (vec->type()->Is<sem::U32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3U32;
- return true;
- }
- if (vec->type()->Is<sem::F32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3F32;
- return true;
- }
- break;
- case 4:
- if (vec->type()->Is<sem::I32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4I32;
- return true;
- }
- if (vec->type()->Is<sem::U32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4U32;
- return true;
- }
- if (vec->type()->Is<sem::F32>()) {
- out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4F32;
- return true;
+bool IntrinsicDataTypeFor(const sem::Type* ty, DecomposeMemoryAccess::Intrinsic::DataType& out) {
+ if (ty->Is<sem::I32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kI32;
+ return true;
+ }
+ if (ty->Is<sem::U32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kU32;
+ return true;
+ }
+ if (ty->Is<sem::F32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kF32;
+ return true;
+ }
+ if (auto* vec = ty->As<sem::Vector>()) {
+ switch (vec->Width()) {
+ case 2:
+ if (vec->type()->Is<sem::I32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2I32;
+ return true;
+ }
+ if (vec->type()->Is<sem::U32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2U32;
+ return true;
+ }
+ if (vec->type()->Is<sem::F32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec2F32;
+ return true;
+ }
+ break;
+ case 3:
+ if (vec->type()->Is<sem::I32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3I32;
+ return true;
+ }
+ if (vec->type()->Is<sem::U32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3U32;
+ return true;
+ }
+ if (vec->type()->Is<sem::F32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec3F32;
+ return true;
+ }
+ break;
+ case 4:
+ if (vec->type()->Is<sem::I32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4I32;
+ return true;
+ }
+ if (vec->type()->Is<sem::U32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4U32;
+ return true;
+ }
+ if (vec->type()->Is<sem::F32>()) {
+ out = DecomposeMemoryAccess::Intrinsic::DataType::kVec4F32;
+ return true;
+ }
+ break;
}
- break;
+ return false;
}
- return false;
- }
- return false;
+ return false;
}
/// @returns a DecomposeMemoryAccess::Intrinsic attribute that can be applied
/// to a stub function to load the type `ty`.
-DecomposeMemoryAccess::Intrinsic* IntrinsicLoadFor(
- ProgramBuilder* builder,
- ast::StorageClass storage_class,
- const sem::Type* ty) {
- DecomposeMemoryAccess::Intrinsic::DataType type;
- if (!IntrinsicDataTypeFor(ty, type)) {
- return nullptr;
- }
- return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
- builder->ID(), DecomposeMemoryAccess::Intrinsic::Op::kLoad, storage_class,
- type);
+DecomposeMemoryAccess::Intrinsic* IntrinsicLoadFor(ProgramBuilder* builder,
+ ast::StorageClass storage_class,
+ const sem::Type* ty) {
+ DecomposeMemoryAccess::Intrinsic::DataType type;
+ if (!IntrinsicDataTypeFor(ty, type)) {
+ return nullptr;
+ }
+ return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
+ builder->ID(), DecomposeMemoryAccess::Intrinsic::Op::kLoad, storage_class, type);
}
/// @returns a DecomposeMemoryAccess::Intrinsic attribute that can be applied
/// to a stub function to store the type `ty`.
-DecomposeMemoryAccess::Intrinsic* IntrinsicStoreFor(
- ProgramBuilder* builder,
- ast::StorageClass storage_class,
- const sem::Type* ty) {
- DecomposeMemoryAccess::Intrinsic::DataType type;
- if (!IntrinsicDataTypeFor(ty, type)) {
- return nullptr;
- }
- return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
- builder->ID(), DecomposeMemoryAccess::Intrinsic::Op::kStore,
- storage_class, type);
+DecomposeMemoryAccess::Intrinsic* IntrinsicStoreFor(ProgramBuilder* builder,
+ ast::StorageClass storage_class,
+ const sem::Type* ty) {
+ DecomposeMemoryAccess::Intrinsic::DataType type;
+ if (!IntrinsicDataTypeFor(ty, type)) {
+ return nullptr;
+ }
+ return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
+ builder->ID(), DecomposeMemoryAccess::Intrinsic::Op::kStore, storage_class, type);
}
/// @returns a DecomposeMemoryAccess::Intrinsic attribute that can be applied
@@ -225,769 +220,759 @@ DecomposeMemoryAccess::Intrinsic* IntrinsicStoreFor(
DecomposeMemoryAccess::Intrinsic* IntrinsicAtomicFor(ProgramBuilder* builder,
sem::BuiltinType ity,
const sem::Type* ty) {
- auto op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicLoad;
- switch (ity) {
- case sem::BuiltinType::kAtomicLoad:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicLoad;
- break;
- case sem::BuiltinType::kAtomicStore:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicStore;
- break;
- case sem::BuiltinType::kAtomicAdd:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicAdd;
- break;
- case sem::BuiltinType::kAtomicSub:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicSub;
- break;
- case sem::BuiltinType::kAtomicMax:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicMax;
- break;
- case sem::BuiltinType::kAtomicMin:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicMin;
- break;
- case sem::BuiltinType::kAtomicAnd:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicAnd;
- break;
- case sem::BuiltinType::kAtomicOr:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicOr;
- break;
- case sem::BuiltinType::kAtomicXor:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicXor;
- break;
- case sem::BuiltinType::kAtomicExchange:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicExchange;
- break;
- case sem::BuiltinType::kAtomicCompareExchangeWeak:
- op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicCompareExchangeWeak;
- break;
- default:
- TINT_ICE(Transform, builder->Diagnostics())
- << "invalid IntrinsicType for DecomposeMemoryAccess::Intrinsic: "
- << ty->TypeInfo().name;
- break;
- }
-
- DecomposeMemoryAccess::Intrinsic::DataType type;
- if (!IntrinsicDataTypeFor(ty, type)) {
- return nullptr;
- }
- return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
- builder->ID(), op, ast::StorageClass::kStorage, type);
+ auto op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicLoad;
+ switch (ity) {
+ case sem::BuiltinType::kAtomicLoad:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicLoad;
+ break;
+ case sem::BuiltinType::kAtomicStore:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicStore;
+ break;
+ case sem::BuiltinType::kAtomicAdd:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicAdd;
+ break;
+ case sem::BuiltinType::kAtomicSub:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicSub;
+ break;
+ case sem::BuiltinType::kAtomicMax:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicMax;
+ break;
+ case sem::BuiltinType::kAtomicMin:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicMin;
+ break;
+ case sem::BuiltinType::kAtomicAnd:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicAnd;
+ break;
+ case sem::BuiltinType::kAtomicOr:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicOr;
+ break;
+ case sem::BuiltinType::kAtomicXor:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicXor;
+ break;
+ case sem::BuiltinType::kAtomicExchange:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicExchange;
+ break;
+ case sem::BuiltinType::kAtomicCompareExchangeWeak:
+ op = DecomposeMemoryAccess::Intrinsic::Op::kAtomicCompareExchangeWeak;
+ break;
+ default:
+ TINT_ICE(Transform, builder->Diagnostics())
+ << "invalid IntrinsicType for DecomposeMemoryAccess::Intrinsic: "
+ << ty->TypeInfo().name;
+ break;
+ }
+
+ DecomposeMemoryAccess::Intrinsic::DataType type;
+ if (!IntrinsicDataTypeFor(ty, type)) {
+ return nullptr;
+ }
+ return builder->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
+ builder->ID(), op, ast::StorageClass::kStorage, type);
}
/// BufferAccess describes a single storage or uniform buffer access
struct BufferAccess {
- sem::Expression const* var = nullptr; // Storage buffer variable
- Offset const* offset = nullptr; // The byte offset on var
- sem::Type const* type = nullptr; // The type of the access
- operator bool() const { return var; } // Returns true if valid
+ sem::Expression const* var = nullptr; // Storage buffer variable
+ Offset const* offset = nullptr; // The byte offset on var
+ sem::Type const* type = nullptr; // The type of the access
+ operator bool() const { return var; } // Returns true if valid
};
/// Store describes a single storage or uniform buffer write
struct Store {
- const ast::AssignmentStatement* assignment; // The AST assignment statement
- BufferAccess target; // The target for the write
+ const ast::AssignmentStatement* assignment; // The AST assignment statement
+ BufferAccess target; // The target for the write
};
} // namespace
/// State holds the current transform state
struct DecomposeMemoryAccess::State {
- /// The clone context
- CloneContext& ctx;
- /// Alias to `*ctx.dst`
- ProgramBuilder& b;
- /// Map of AST expression to storage or uniform buffer access
- /// This map has entries added when encountered, and removed when outer
- /// expressions chain the access.
- /// Subset of #expression_order, as expressions are not removed from
- /// #expression_order.
- std::unordered_map<const ast::Expression*, BufferAccess> accesses;
- /// The visited order of AST expressions (superset of #accesses)
- std::vector<const ast::Expression*> expression_order;
- /// [buffer-type, element-type] -> load function name
- std::unordered_map<LoadStoreKey, Symbol, LoadStoreKey::Hasher> load_funcs;
- /// [buffer-type, element-type] -> store function name
- std::unordered_map<LoadStoreKey, Symbol, LoadStoreKey::Hasher> store_funcs;
- /// [buffer-type, element-type, atomic-op] -> load function name
- std::unordered_map<AtomicKey, Symbol, AtomicKey::Hasher> atomic_funcs;
- /// List of storage or uniform buffer writes
- std::vector<Store> stores;
- /// Allocations for offsets
- utils::BlockAllocator<Offset> offsets_;
-
- /// Constructor
- /// @param context the CloneContext
- explicit State(CloneContext& context) : ctx(context), b(*ctx.dst) {}
-
- /// @param offset the offset value to wrap in an Offset
- /// @returns an Offset for the given literal value
- const Offset* ToOffset(uint32_t offset) {
- return offsets_.Create<OffsetLiteral>(offset);
- }
-
- /// @param expr the expression to convert to an Offset
- /// @returns an Offset for the given ast::Expression
- const Offset* ToOffset(const ast::Expression* expr) {
- if (auto* u32 = expr->As<ast::UintLiteralExpression>()) {
- return offsets_.Create<OffsetLiteral>(u32->value);
- } else if (auto* i32 = expr->As<ast::SintLiteralExpression>()) {
- if (i32->value > 0) {
- return offsets_.Create<OffsetLiteral>(i32->value);
- }
- }
- return offsets_.Create<OffsetExpr>(expr);
- }
-
- /// @param offset the Offset that is returned
- /// @returns the given offset (pass-through)
- const Offset* ToOffset(const Offset* offset) { return offset; }
-
- /// @param lhs_ the left-hand side of the add expression
- /// @param rhs_ the right-hand side of the add expression
- /// @return an Offset that is a sum of lhs and rhs, performing basic constant
- /// folding if possible
- template <typename LHS, typename RHS>
- const Offset* Add(LHS&& lhs_, RHS&& rhs_) {
- auto* lhs = ToOffset(std::forward<LHS>(lhs_));
- auto* rhs = ToOffset(std::forward<RHS>(rhs_));
- auto* lhs_lit = tint::As<OffsetLiteral>(lhs);
- auto* rhs_lit = tint::As<OffsetLiteral>(rhs);
- if (lhs_lit && lhs_lit->literal == 0) {
- return rhs;
- }
- if (rhs_lit && rhs_lit->literal == 0) {
- return lhs;
- }
- if (lhs_lit && rhs_lit) {
- if (static_cast<uint64_t>(lhs_lit->literal) +
- static_cast<uint64_t>(rhs_lit->literal) <=
- 0xffffffff) {
- return offsets_.Create<OffsetLiteral>(lhs_lit->literal +
- rhs_lit->literal);
- }
- }
- auto* out = offsets_.Create<OffsetBinOp>();
- out->op = ast::BinaryOp::kAdd;
- out->lhs = lhs;
- out->rhs = rhs;
- return out;
- }
-
- /// @param lhs_ the left-hand side of the multiply expression
- /// @param rhs_ the right-hand side of the multiply expression
- /// @return an Offset that is the multiplication of lhs and rhs, performing
- /// basic constant folding if possible
- template <typename LHS, typename RHS>
- const Offset* Mul(LHS&& lhs_, RHS&& rhs_) {
- auto* lhs = ToOffset(std::forward<LHS>(lhs_));
- auto* rhs = ToOffset(std::forward<RHS>(rhs_));
- auto* lhs_lit = tint::As<OffsetLiteral>(lhs);
- auto* rhs_lit = tint::As<OffsetLiteral>(rhs);
- if (lhs_lit && lhs_lit->literal == 0) {
- return offsets_.Create<OffsetLiteral>(0);
+ /// The clone context
+ CloneContext& ctx;
+ /// Alias to `*ctx.dst`
+ ProgramBuilder& b;
+ /// Map of AST expression to storage or uniform buffer access
+ /// This map has entries added when encountered, and removed when outer
+ /// expressions chain the access.
+ /// Subset of #expression_order, as expressions are not removed from
+ /// #expression_order.
+ std::unordered_map<const ast::Expression*, BufferAccess> accesses;
+ /// The visited order of AST expressions (superset of #accesses)
+ std::vector<const ast::Expression*> expression_order;
+ /// [buffer-type, element-type] -> load function name
+ std::unordered_map<LoadStoreKey, Symbol, LoadStoreKey::Hasher> load_funcs;
+ /// [buffer-type, element-type] -> store function name
+ std::unordered_map<LoadStoreKey, Symbol, LoadStoreKey::Hasher> store_funcs;
+ /// [buffer-type, element-type, atomic-op] -> load function name
+ std::unordered_map<AtomicKey, Symbol, AtomicKey::Hasher> atomic_funcs;
+ /// List of storage or uniform buffer writes
+ std::vector<Store> stores;
+ /// Allocations for offsets
+ utils::BlockAllocator<Offset> offsets_;
+
+ /// Constructor
+ /// @param context the CloneContext
+ explicit State(CloneContext& context) : ctx(context), b(*ctx.dst) {}
+
+ /// @param offset the offset value to wrap in an Offset
+ /// @returns an Offset for the given literal value
+ const Offset* ToOffset(uint32_t offset) { return offsets_.Create<OffsetLiteral>(offset); }
+
+ /// @param expr the expression to convert to an Offset
+ /// @returns an Offset for the given ast::Expression
+ const Offset* ToOffset(const ast::Expression* expr) {
+ if (auto* lit = expr->As<ast::IntLiteralExpression>()) {
+ if (lit->value > 0) {
+ return offsets_.Create<OffsetLiteral>(static_cast<uint32_t>(lit->value));
+ }
+ }
+ return offsets_.Create<OffsetExpr>(expr);
}
- if (rhs_lit && rhs_lit->literal == 0) {
- return offsets_.Create<OffsetLiteral>(0);
+
+ /// @param offset the Offset that is returned
+ /// @returns the given offset (pass-through)
+ const Offset* ToOffset(const Offset* offset) { return offset; }
+
+ /// @param lhs_ the left-hand side of the add expression
+ /// @param rhs_ the right-hand side of the add expression
+ /// @return an Offset that is a sum of lhs and rhs, performing basic constant
+ /// folding if possible
+ template <typename LHS, typename RHS>
+ const Offset* Add(LHS&& lhs_, RHS&& rhs_) {
+ auto* lhs = ToOffset(std::forward<LHS>(lhs_));
+ auto* rhs = ToOffset(std::forward<RHS>(rhs_));
+ auto* lhs_lit = tint::As<OffsetLiteral>(lhs);
+ auto* rhs_lit = tint::As<OffsetLiteral>(rhs);
+ if (lhs_lit && lhs_lit->literal == 0) {
+ return rhs;
+ }
+ if (rhs_lit && rhs_lit->literal == 0) {
+ return lhs;
+ }
+ if (lhs_lit && rhs_lit) {
+ if (static_cast<uint64_t>(lhs_lit->literal) + static_cast<uint64_t>(rhs_lit->literal) <=
+ 0xffffffff) {
+ return offsets_.Create<OffsetLiteral>(lhs_lit->literal + rhs_lit->literal);
+ }
+ }
+ auto* out = offsets_.Create<OffsetBinOp>();
+ out->op = ast::BinaryOp::kAdd;
+ out->lhs = lhs;
+ out->rhs = rhs;
+ return out;
}
- if (lhs_lit && lhs_lit->literal == 1) {
- return rhs;
+
+ /// @param lhs_ the left-hand side of the multiply expression
+ /// @param rhs_ the right-hand side of the multiply expression
+ /// @return an Offset that is the multiplication of lhs and rhs, performing
+ /// basic constant folding if possible
+ template <typename LHS, typename RHS>
+ const Offset* Mul(LHS&& lhs_, RHS&& rhs_) {
+ auto* lhs = ToOffset(std::forward<LHS>(lhs_));
+ auto* rhs = ToOffset(std::forward<RHS>(rhs_));
+ auto* lhs_lit = tint::As<OffsetLiteral>(lhs);
+ auto* rhs_lit = tint::As<OffsetLiteral>(rhs);
+ if (lhs_lit && lhs_lit->literal == 0) {
+ return offsets_.Create<OffsetLiteral>(0);
+ }
+ if (rhs_lit && rhs_lit->literal == 0) {
+ return offsets_.Create<OffsetLiteral>(0);
+ }
+ if (lhs_lit && lhs_lit->literal == 1) {
+ return rhs;
+ }
+ if (rhs_lit && rhs_lit->literal == 1) {
+ return lhs;
+ }
+ if (lhs_lit && rhs_lit) {
+ return offsets_.Create<OffsetLiteral>(lhs_lit->literal * rhs_lit->literal);
+ }
+ auto* out = offsets_.Create<OffsetBinOp>();
+ out->op = ast::BinaryOp::kMultiply;
+ out->lhs = lhs;
+ out->rhs = rhs;
+ return out;
}
- if (rhs_lit && rhs_lit->literal == 1) {
- return lhs;
+
+ /// AddAccess() adds the `expr -> access` map item to #accesses, and `expr`
+ /// to #expression_order.
+ /// @param expr the expression that performs the access
+ /// @param access the access
+ void AddAccess(const ast::Expression* expr, const BufferAccess& access) {
+ TINT_ASSERT(Transform, access.type);
+ accesses.emplace(expr, access);
+ expression_order.emplace_back(expr);
}
- if (lhs_lit && rhs_lit) {
- return offsets_.Create<OffsetLiteral>(lhs_lit->literal *
- rhs_lit->literal);
+
+ /// TakeAccess() removes the `node` item from #accesses (if it exists),
+ /// returning the BufferAccess. If #accesses does not hold an item for
+ /// `node`, an invalid BufferAccess is returned.
+ /// @param node the expression that performed an access
+ /// @return the BufferAccess for the given expression
+ BufferAccess TakeAccess(const ast::Expression* node) {
+ auto lhs_it = accesses.find(node);
+ if (lhs_it == accesses.end()) {
+ return {};
+ }
+ auto access = lhs_it->second;
+ accesses.erase(node);
+ return access;
}
- auto* out = offsets_.Create<OffsetBinOp>();
- out->op = ast::BinaryOp::kMultiply;
- out->lhs = lhs;
- out->rhs = rhs;
- return out;
- }
-
- /// AddAccess() adds the `expr -> access` map item to #accesses, and `expr`
- /// to #expression_order.
- /// @param expr the expression that performs the access
- /// @param access the access
- void AddAccess(const ast::Expression* expr, const BufferAccess& access) {
- TINT_ASSERT(Transform, access.type);
- accesses.emplace(expr, access);
- expression_order.emplace_back(expr);
- }
-
- /// TakeAccess() removes the `node` item from #accesses (if it exists),
- /// returning the BufferAccess. If #accesses does not hold an item for
- /// `node`, an invalid BufferAccess is returned.
- /// @param node the expression that performed an access
- /// @return the BufferAccess for the given expression
- BufferAccess TakeAccess(const ast::Expression* node) {
- auto lhs_it = accesses.find(node);
- if (lhs_it == accesses.end()) {
- return {};
+
+ /// LoadFunc() returns a symbol to an intrinsic function that loads an element
+ /// of type `el_ty` from a storage or uniform buffer of type `buf_ty`.
+ /// The emitted function has the signature:
+ /// `fn load(buf : buf_ty, offset : u32) -> el_ty`
+ /// @param buf_ty the storage or uniform buffer type
+ /// @param el_ty the storage or uniform buffer element type
+ /// @param var_user the variable user
+ /// @return the name of the function that performs the load
+ Symbol LoadFunc(const sem::Type* buf_ty,
+ const sem::Type* el_ty,
+ const sem::VariableUser* var_user) {
+ auto storage_class = var_user->Variable()->StorageClass();
+ return utils::GetOrCreate(load_funcs, LoadStoreKey{storage_class, buf_ty, el_ty}, [&] {
+ auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
+ auto* disable_validation =
+ b.Disable(ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
+
+ ast::VariableList params = {
+ // Note: The buffer parameter requires the StorageClass in
+ // order for HLSL to emit this as a ByteAddressBuffer or cbuffer
+ // array.
+ b.create<ast::Variable>(b.Sym("buffer"), storage_class,
+ var_user->Variable()->Access(), buf_ast_ty, true, false,
+ nullptr, ast::AttributeList{disable_validation}),
+ b.Param("offset", b.ty.u32()),
+ };
+
+ auto name = b.Sym();
+
+ if (auto* intrinsic = IntrinsicLoadFor(ctx.dst, storage_class, el_ty)) {
+ auto* el_ast_ty = CreateASTTypeFor(ctx, el_ty);
+ auto* func = b.create<ast::Function>(
+ name, params, el_ast_ty, nullptr,
+ ast::AttributeList{
+ intrinsic,
+ b.Disable(ast::DisabledValidation::kFunctionHasNoBody),
+ },
+ ast::AttributeList{});
+ b.AST().AddFunction(func);
+ } else if (auto* arr_ty = el_ty->As<sem::Array>()) {
+ // fn load_func(buf : buf_ty, offset : u32) -> array<T, N> {
+ // var arr : array<T, N>;
+ // for (var i = 0u; i < array_count; i = i + 1) {
+ // arr[i] = el_load_func(buf, offset + i * array_stride)
+ // }
+ // return arr;
+ // }
+ auto load = LoadFunc(buf_ty, arr_ty->ElemType()->UnwrapRef(), var_user);
+ auto* arr = b.Var(b.Symbols().New("arr"), CreateASTTypeFor(ctx, arr_ty));
+ auto* i = b.Var(b.Symbols().New("i"), nullptr, b.Expr(0_u));
+ auto* for_init = b.Decl(i);
+ auto* for_cond = b.create<ast::BinaryExpression>(
+ ast::BinaryOp::kLessThan, b.Expr(i), b.Expr(u32(arr_ty->Count())));
+ auto* for_cont = b.Assign(i, b.Add(i, 1_u));
+ auto* arr_el = b.IndexAccessor(arr, i);
+ auto* el_offset = b.Add(b.Expr("offset"), b.Mul(i, u32(arr_ty->Stride())));
+ auto* el_val = b.Call(load, "buffer", el_offset);
+ auto* for_loop =
+ b.For(for_init, for_cond, for_cont, b.Block(b.Assign(arr_el, el_val)));
+
+ b.Func(name, params, CreateASTTypeFor(ctx, arr_ty),
+ {
+ b.Decl(arr),
+ for_loop,
+ b.Return(arr),
+ });
+ } else {
+ ast::ExpressionList values;
+ if (auto* mat_ty = el_ty->As<sem::Matrix>()) {
+ auto* vec_ty = mat_ty->ColumnType();
+ Symbol load = LoadFunc(buf_ty, vec_ty, var_user);
+ for (uint32_t i = 0; i < mat_ty->columns(); i++) {
+ auto* offset = b.Add("offset", u32(i * mat_ty->ColumnStride()));
+ values.emplace_back(b.Call(load, "buffer", offset));
+ }
+ } else if (auto* str = el_ty->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ auto* offset = b.Add("offset", u32(member->Offset()));
+ Symbol load = LoadFunc(buf_ty, member->Type()->UnwrapRef(), var_user);
+ values.emplace_back(b.Call(load, "buffer", offset));
+ }
+ }
+ b.Func(name, params, CreateASTTypeFor(ctx, el_ty),
+ {
+ b.Return(b.Construct(CreateASTTypeFor(ctx, el_ty), values)),
+ });
+ }
+ return name;
+ });
}
- auto access = lhs_it->second;
- accesses.erase(node);
- return access;
- }
-
- /// LoadFunc() returns a symbol to an intrinsic function that loads an element
- /// of type `el_ty` from a storage or uniform buffer of type `buf_ty`.
- /// The emitted function has the signature:
- /// `fn load(buf : buf_ty, offset : u32) -> el_ty`
- /// @param buf_ty the storage or uniform buffer type
- /// @param el_ty the storage or uniform buffer element type
- /// @param var_user the variable user
- /// @return the name of the function that performs the load
- Symbol LoadFunc(const sem::Type* buf_ty,
- const sem::Type* el_ty,
- const sem::VariableUser* var_user) {
- auto storage_class = var_user->Variable()->StorageClass();
- return utils::GetOrCreate(
- load_funcs, LoadStoreKey{storage_class, buf_ty, el_ty}, [&] {
- auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
- auto* disable_validation = b.Disable(
- ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
-
- ast::VariableList params = {
- // Note: The buffer parameter requires the StorageClass in
- // order for HLSL to emit this as a ByteAddressBuffer or cbuffer
- // array.
- b.create<ast::Variable>(b.Sym("buffer"), storage_class,
- var_user->Variable()->Access(),
- buf_ast_ty, true, false, nullptr,
- ast::AttributeList{disable_validation}),
- b.Param("offset", b.ty.u32()),
- };
-
- auto name = b.Sym();
-
- if (auto* intrinsic =
- IntrinsicLoadFor(ctx.dst, storage_class, el_ty)) {
+
+ /// StoreFunc() returns a symbol to an intrinsic function that stores an
+ /// element of type `el_ty` to a storage buffer of type `buf_ty`.
+ /// The function has the signature:
+ /// `fn store(buf : buf_ty, offset : u32, value : el_ty)`
+ /// @param buf_ty the storage buffer type
+ /// @param el_ty the storage buffer element type
+ /// @param var_user the variable user
+ /// @return the name of the function that performs the store
+ Symbol StoreFunc(const sem::Type* buf_ty,
+ const sem::Type* el_ty,
+ const sem::VariableUser* var_user) {
+ auto storage_class = var_user->Variable()->StorageClass();
+ return utils::GetOrCreate(store_funcs, LoadStoreKey{storage_class, buf_ty, el_ty}, [&] {
+ auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
auto* el_ast_ty = CreateASTTypeFor(ctx, el_ty);
- auto* func = b.create<ast::Function>(
- name, params, el_ast_ty, nullptr,
- ast::AttributeList{
- intrinsic,
- b.Disable(ast::DisabledValidation::kFunctionHasNoBody),
- },
- ast::AttributeList{});
- b.AST().AddFunction(func);
- } else if (auto* arr_ty = el_ty->As<sem::Array>()) {
- // fn load_func(buf : buf_ty, offset : u32) -> array<T, N> {
- // var arr : array<T, N>;
- // for (var i = 0u; i < array_count; i = i + 1) {
- // arr[i] = el_load_func(buf, offset + i * array_stride)
- // }
- // return arr;
- // }
- auto load =
- LoadFunc(buf_ty, arr_ty->ElemType()->UnwrapRef(), var_user);
- auto* arr =
- b.Var(b.Symbols().New("arr"), CreateASTTypeFor(ctx, arr_ty));
- auto* i = b.Var(b.Symbols().New("i"), nullptr, b.Expr(0u));
- auto* for_init = b.Decl(i);
- auto* for_cond = b.create<ast::BinaryExpression>(
- ast::BinaryOp::kLessThan, b.Expr(i), b.Expr(arr_ty->Count()));
- auto* for_cont = b.Assign(i, b.Add(i, 1u));
- auto* arr_el = b.IndexAccessor(arr, i);
- auto* el_offset =
- b.Add(b.Expr("offset"), b.Mul(i, arr_ty->Stride()));
- auto* el_val = b.Call(load, "buffer", el_offset);
- auto* for_loop = b.For(for_init, for_cond, for_cont,
- b.Block(b.Assign(arr_el, el_val)));
-
- b.Func(name, params, CreateASTTypeFor(ctx, arr_ty),
- {
- b.Decl(arr),
- for_loop,
- b.Return(arr),
- });
- } else {
- ast::ExpressionList values;
- if (auto* mat_ty = el_ty->As<sem::Matrix>()) {
- auto* vec_ty = mat_ty->ColumnType();
- Symbol load = LoadFunc(buf_ty, vec_ty, var_user);
- for (uint32_t i = 0; i < mat_ty->columns(); i++) {
- auto* offset = b.Add("offset", i * mat_ty->ColumnStride());
- values.emplace_back(b.Call(load, "buffer", offset));
- }
- } else if (auto* str = el_ty->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- auto* offset = b.Add("offset", member->Offset());
- Symbol load =
- LoadFunc(buf_ty, member->Type()->UnwrapRef(), var_user);
- values.emplace_back(b.Call(load, "buffer", offset));
- }
+ auto* disable_validation =
+ b.Disable(ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
+ ast::VariableList params{
+ // Note: The buffer parameter requires the StorageClass in
+ // order for HLSL to emit this as a ByteAddressBuffer.
+
+ b.create<ast::Variable>(b.Sym("buffer"), storage_class,
+ var_user->Variable()->Access(), buf_ast_ty, true, false,
+ nullptr, ast::AttributeList{disable_validation}),
+ b.Param("offset", b.ty.u32()),
+ b.Param("value", el_ast_ty),
+ };
+
+ auto name = b.Sym();
+
+ if (auto* intrinsic = IntrinsicStoreFor(ctx.dst, storage_class, el_ty)) {
+ auto* func = b.create<ast::Function>(
+ name, params, b.ty.void_(), nullptr,
+ ast::AttributeList{
+ intrinsic,
+ b.Disable(ast::DisabledValidation::kFunctionHasNoBody),
+ },
+ ast::AttributeList{});
+ b.AST().AddFunction(func);
+ } else {
+ ast::StatementList body;
+ if (auto* arr_ty = el_ty->As<sem::Array>()) {
+ // fn store_func(buf : buf_ty, offset : u32, value : el_ty) {
+ // var array = value; // No dynamic indexing on constant arrays
+ // for (var i = 0u; i < array_count; i = i + 1) {
+ // arr[i] = el_store_func(buf, offset + i * array_stride,
+ // value[i])
+ // }
+ // return arr;
+ // }
+ auto* array = b.Var(b.Symbols().New("array"), nullptr, b.Expr("value"));
+ auto store = StoreFunc(buf_ty, arr_ty->ElemType()->UnwrapRef(), var_user);
+ auto* i = b.Var(b.Symbols().New("i"), nullptr, b.Expr(0_u));
+ auto* for_init = b.Decl(i);
+ auto* for_cond = b.create<ast::BinaryExpression>(
+ ast::BinaryOp::kLessThan, b.Expr(i), b.Expr(u32(arr_ty->Count())));
+ auto* for_cont = b.Assign(i, b.Add(i, 1_u));
+ auto* arr_el = b.IndexAccessor(array, i);
+ auto* el_offset = b.Add(b.Expr("offset"), b.Mul(i, u32(arr_ty->Stride())));
+ auto* store_stmt = b.CallStmt(b.Call(store, "buffer", el_offset, arr_el));
+ auto* for_loop = b.For(for_init, for_cond, for_cont, b.Block(store_stmt));
+
+ body = {b.Decl(array), for_loop};
+ } else if (auto* mat_ty = el_ty->As<sem::Matrix>()) {
+ auto* vec_ty = mat_ty->ColumnType();
+ Symbol store = StoreFunc(buf_ty, vec_ty, var_user);
+ for (uint32_t i = 0; i < mat_ty->columns(); i++) {
+ auto* offset = b.Add("offset", u32(i * mat_ty->ColumnStride()));
+ auto* access = b.IndexAccessor("value", u32(i));
+ auto* call = b.Call(store, "buffer", offset, access);
+ body.emplace_back(b.CallStmt(call));
+ }
+ } else if (auto* str = el_ty->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ auto* offset = b.Add("offset", u32(member->Offset()));
+ auto* access =
+ b.MemberAccessor("value", ctx.Clone(member->Declaration()->symbol));
+ Symbol store = StoreFunc(buf_ty, member->Type()->UnwrapRef(), var_user);
+ auto* call = b.Call(store, "buffer", offset, access);
+ body.emplace_back(b.CallStmt(call));
+ }
+ }
+ b.Func(name, params, b.ty.void_(), body);
}
- b.Func(
- name, params, CreateASTTypeFor(ctx, el_ty),
- {
- b.Return(b.Construct(CreateASTTypeFor(ctx, el_ty), values)),
- });
- }
- return name;
+
+ return name;
});
- }
-
- /// StoreFunc() returns a symbol to an intrinsic function that stores an
- /// element of type `el_ty` to a storage buffer of type `buf_ty`.
- /// The function has the signature:
- /// `fn store(buf : buf_ty, offset : u32, value : el_ty)`
- /// @param buf_ty the storage buffer type
- /// @param el_ty the storage buffer element type
- /// @param var_user the variable user
- /// @return the name of the function that performs the store
- Symbol StoreFunc(const sem::Type* buf_ty,
- const sem::Type* el_ty,
- const sem::VariableUser* var_user) {
- auto storage_class = var_user->Variable()->StorageClass();
- return utils::GetOrCreate(
- store_funcs, LoadStoreKey{storage_class, buf_ty, el_ty}, [&] {
- auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
- auto* el_ast_ty = CreateASTTypeFor(ctx, el_ty);
- auto* disable_validation = b.Disable(
- ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
- ast::VariableList params{
- // Note: The buffer parameter requires the StorageClass in
- // order for HLSL to emit this as a ByteAddressBuffer.
-
- b.create<ast::Variable>(b.Sym("buffer"), storage_class,
- var_user->Variable()->Access(),
- buf_ast_ty, true, false, nullptr,
- ast::AttributeList{disable_validation}),
- b.Param("offset", b.ty.u32()),
- b.Param("value", el_ast_ty),
- };
-
- auto name = b.Sym();
-
- if (auto* intrinsic =
- IntrinsicStoreFor(ctx.dst, storage_class, el_ty)) {
+ }
+
+ /// AtomicFunc() returns a symbol to an intrinsic function that performs an
+ /// atomic operation from a storage buffer of type `buf_ty`. The function has
+ /// the signature:
+ // `fn atomic_op(buf : buf_ty, offset : u32, ...) -> T`
+ /// @param buf_ty the storage buffer type
+ /// @param el_ty the storage buffer element type
+ /// @param intrinsic the atomic intrinsic
+ /// @param var_user the variable user
+ /// @return the name of the function that performs the load
+ Symbol AtomicFunc(const sem::Type* buf_ty,
+ const sem::Type* el_ty,
+ const sem::Builtin* intrinsic,
+ const sem::VariableUser* var_user) {
+ auto op = intrinsic->Type();
+ return utils::GetOrCreate(atomic_funcs, AtomicKey{buf_ty, el_ty, op}, [&] {
+ auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
+ auto* disable_validation =
+ b.Disable(ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
+ // The first parameter to all WGSL atomics is the expression to the
+ // atomic. This is replaced with two parameters: the buffer and offset.
+
+ ast::VariableList params = {
+ // Note: The buffer parameter requires the kStorage StorageClass in
+ // order for HLSL to emit this as a ByteAddressBuffer.
+ b.create<ast::Variable>(b.Sym("buffer"), ast::StorageClass::kStorage,
+ var_user->Variable()->Access(), buf_ast_ty, true, false,
+ nullptr, ast::AttributeList{disable_validation}),
+ b.Param("offset", b.ty.u32()),
+ };
+
+ // Other parameters are copied as-is:
+ for (size_t i = 1; i < intrinsic->Parameters().size(); i++) {
+ auto* param = intrinsic->Parameters()[i];
+ auto* ty = CreateASTTypeFor(ctx, param->Type());
+ params.emplace_back(b.Param("param_" + std::to_string(i), ty));
+ }
+
+ auto* atomic = IntrinsicAtomicFor(ctx.dst, op, el_ty);
+ if (atomic == nullptr) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "IntrinsicAtomicFor() returned nullptr for op " << op << " and type "
+ << el_ty->TypeInfo().name;
+ }
+
+ const ast::Type* ret_ty = nullptr;
+
+ // For intrinsics that return a struct, there is no AST node for it, so create one now.
+ if (intrinsic->Type() == sem::BuiltinType::kAtomicCompareExchangeWeak) {
+ auto* str = intrinsic->ReturnType()->As<sem::Struct>();
+ TINT_ASSERT(Transform, str && str->Declaration() == nullptr);
+
+ ast::StructMemberList ast_members;
+ ast_members.reserve(str->Members().size());
+ for (auto& m : str->Members()) {
+ ast_members.push_back(
+ b.Member(ctx.Clone(m->Name()), CreateASTTypeFor(ctx, m->Type())));
+ }
+
+ auto name = b.Symbols().New("atomic_compare_exchange_weak_ret_type");
+ auto* new_str = b.Structure(name, std::move(ast_members));
+ ret_ty = b.ty.Of(new_str);
+ } else {
+ ret_ty = CreateASTTypeFor(ctx, intrinsic->ReturnType());
+ }
+
auto* func = b.create<ast::Function>(
- name, params, b.ty.void_(), nullptr,
+ b.Symbols().New(std::string{"tint_"} + intrinsic->str()), params, ret_ty, nullptr,
ast::AttributeList{
- intrinsic,
+ atomic,
b.Disable(ast::DisabledValidation::kFunctionHasNoBody),
},
ast::AttributeList{});
- b.AST().AddFunction(func);
- } else {
- ast::StatementList body;
- if (auto* arr_ty = el_ty->As<sem::Array>()) {
- // fn store_func(buf : buf_ty, offset : u32, value : el_ty) {
- // var array = value; // No dynamic indexing on constant arrays
- // for (var i = 0u; i < array_count; i = i + 1) {
- // arr[i] = el_store_func(buf, offset + i * array_stride,
- // value[i])
- // }
- // return arr;
- // }
- auto* array =
- b.Var(b.Symbols().New("array"), nullptr, b.Expr("value"));
- auto store =
- StoreFunc(buf_ty, arr_ty->ElemType()->UnwrapRef(), var_user);
- auto* i = b.Var(b.Symbols().New("i"), nullptr, b.Expr(0u));
- auto* for_init = b.Decl(i);
- auto* for_cond = b.create<ast::BinaryExpression>(
- ast::BinaryOp::kLessThan, b.Expr(i), b.Expr(arr_ty->Count()));
- auto* for_cont = b.Assign(i, b.Add(i, 1u));
- auto* arr_el = b.IndexAccessor(array, i);
- auto* el_offset =
- b.Add(b.Expr("offset"), b.Mul(i, arr_ty->Stride()));
- auto* store_stmt =
- b.CallStmt(b.Call(store, "buffer", el_offset, arr_el));
- auto* for_loop =
- b.For(for_init, for_cond, for_cont, b.Block(store_stmt));
-
- body = {b.Decl(array), for_loop};
- } else if (auto* mat_ty = el_ty->As<sem::Matrix>()) {
- auto* vec_ty = mat_ty->ColumnType();
- Symbol store = StoreFunc(buf_ty, vec_ty, var_user);
- for (uint32_t i = 0; i < mat_ty->columns(); i++) {
- auto* offset = b.Add("offset", i * mat_ty->ColumnStride());
- auto* access = b.IndexAccessor("value", i);
- auto* call = b.Call(store, "buffer", offset, access);
- body.emplace_back(b.CallStmt(call));
- }
- } else if (auto* str = el_ty->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- auto* offset = b.Add("offset", member->Offset());
- auto* access = b.MemberAccessor(
- "value", ctx.Clone(member->Declaration()->symbol));
- Symbol store =
- StoreFunc(buf_ty, member->Type()->UnwrapRef(), var_user);
- auto* call = b.Call(store, "buffer", offset, access);
- body.emplace_back(b.CallStmt(call));
- }
- }
- b.Func(name, params, b.ty.void_(), body);
- }
- return name;
+ b.AST().AddFunction(func);
+ return func->symbol;
});
- }
-
- /// AtomicFunc() returns a symbol to an intrinsic function that performs an
- /// atomic operation from a storage buffer of type `buf_ty`. The function has
- /// the signature:
- // `fn atomic_op(buf : buf_ty, offset : u32, ...) -> T`
- /// @param buf_ty the storage buffer type
- /// @param el_ty the storage buffer element type
- /// @param intrinsic the atomic intrinsic
- /// @param var_user the variable user
- /// @return the name of the function that performs the load
- Symbol AtomicFunc(const sem::Type* buf_ty,
- const sem::Type* el_ty,
- const sem::Builtin* intrinsic,
- const sem::VariableUser* var_user) {
- auto op = intrinsic->Type();
- return utils::GetOrCreate(atomic_funcs, AtomicKey{buf_ty, el_ty, op}, [&] {
- auto* buf_ast_ty = CreateASTTypeFor(ctx, buf_ty);
- auto* disable_validation = b.Disable(
- ast::DisabledValidation::kIgnoreConstructibleFunctionParameter);
- // The first parameter to all WGSL atomics is the expression to the
- // atomic. This is replaced with two parameters: the buffer and offset.
-
- ast::VariableList params = {
- // Note: The buffer parameter requires the kStorage StorageClass in
- // order for HLSL to emit this as a ByteAddressBuffer.
- b.create<ast::Variable>(b.Sym("buffer"), ast::StorageClass::kStorage,
- var_user->Variable()->Access(), buf_ast_ty,
- true, false, nullptr,
- ast::AttributeList{disable_validation}),
- b.Param("offset", b.ty.u32()),
- };
-
- // Other parameters are copied as-is:
- for (size_t i = 1; i < intrinsic->Parameters().size(); i++) {
- auto* param = intrinsic->Parameters()[i];
- auto* ty = CreateASTTypeFor(ctx, param->Type());
- params.emplace_back(b.Param("param_" + std::to_string(i), ty));
- }
-
- auto* atomic = IntrinsicAtomicFor(ctx.dst, op, el_ty);
- if (atomic == nullptr) {
- TINT_ICE(Transform, b.Diagnostics())
- << "IntrinsicAtomicFor() returned nullptr for op " << op
- << " and type " << el_ty->TypeInfo().name;
- }
-
- auto* ret_ty = CreateASTTypeFor(ctx, intrinsic->ReturnType());
- auto* func = b.create<ast::Function>(
- b.Sym(), params, ret_ty, nullptr,
- ast::AttributeList{
- atomic,
- b.Disable(ast::DisabledValidation::kFunctionHasNoBody),
- },
- ast::AttributeList{});
-
- b.AST().AddFunction(func);
- return func->symbol;
- });
- }
+ }
};
-DecomposeMemoryAccess::Intrinsic::Intrinsic(ProgramID pid,
- Op o,
- ast::StorageClass sc,
- DataType ty)
+DecomposeMemoryAccess::Intrinsic::Intrinsic(ProgramID pid, Op o, ast::StorageClass sc, DataType ty)
: Base(pid), op(o), storage_class(sc), type(ty) {}
DecomposeMemoryAccess::Intrinsic::~Intrinsic() = default;
std::string DecomposeMemoryAccess::Intrinsic::InternalName() const {
- std::stringstream ss;
- switch (op) {
- case Op::kLoad:
- ss << "intrinsic_load_";
- break;
- case Op::kStore:
- ss << "intrinsic_store_";
- break;
- case Op::kAtomicLoad:
- ss << "intrinsic_atomic_load_";
- break;
- case Op::kAtomicStore:
- ss << "intrinsic_atomic_store_";
- break;
- case Op::kAtomicAdd:
- ss << "intrinsic_atomic_add_";
- break;
- case Op::kAtomicSub:
- ss << "intrinsic_atomic_sub_";
- break;
- case Op::kAtomicMax:
- ss << "intrinsic_atomic_max_";
- break;
- case Op::kAtomicMin:
- ss << "intrinsic_atomic_min_";
- break;
- case Op::kAtomicAnd:
- ss << "intrinsic_atomic_and_";
- break;
- case Op::kAtomicOr:
- ss << "intrinsic_atomic_or_";
- break;
- case Op::kAtomicXor:
- ss << "intrinsic_atomic_xor_";
- break;
- case Op::kAtomicExchange:
- ss << "intrinsic_atomic_exchange_";
- break;
- case Op::kAtomicCompareExchangeWeak:
- ss << "intrinsic_atomic_compare_exchange_weak_";
- break;
- }
- ss << storage_class << "_";
- switch (type) {
- case DataType::kU32:
- ss << "u32";
- break;
- case DataType::kF32:
- ss << "f32";
- break;
- case DataType::kI32:
- ss << "i32";
- break;
- case DataType::kVec2U32:
- ss << "vec2_u32";
- break;
- case DataType::kVec2F32:
- ss << "vec2_f32";
- break;
- case DataType::kVec2I32:
- ss << "vec2_i32";
- break;
- case DataType::kVec3U32:
- ss << "vec3_u32";
- break;
- case DataType::kVec3F32:
- ss << "vec3_f32";
- break;
- case DataType::kVec3I32:
- ss << "vec3_i32";
- break;
- case DataType::kVec4U32:
- ss << "vec4_u32";
- break;
- case DataType::kVec4F32:
- ss << "vec4_f32";
- break;
- case DataType::kVec4I32:
- ss << "vec4_i32";
- break;
- }
- return ss.str();
+ std::stringstream ss;
+ switch (op) {
+ case Op::kLoad:
+ ss << "intrinsic_load_";
+ break;
+ case Op::kStore:
+ ss << "intrinsic_store_";
+ break;
+ case Op::kAtomicLoad:
+ ss << "intrinsic_atomic_load_";
+ break;
+ case Op::kAtomicStore:
+ ss << "intrinsic_atomic_store_";
+ break;
+ case Op::kAtomicAdd:
+ ss << "intrinsic_atomic_add_";
+ break;
+ case Op::kAtomicSub:
+ ss << "intrinsic_atomic_sub_";
+ break;
+ case Op::kAtomicMax:
+ ss << "intrinsic_atomic_max_";
+ break;
+ case Op::kAtomicMin:
+ ss << "intrinsic_atomic_min_";
+ break;
+ case Op::kAtomicAnd:
+ ss << "intrinsic_atomic_and_";
+ break;
+ case Op::kAtomicOr:
+ ss << "intrinsic_atomic_or_";
+ break;
+ case Op::kAtomicXor:
+ ss << "intrinsic_atomic_xor_";
+ break;
+ case Op::kAtomicExchange:
+ ss << "intrinsic_atomic_exchange_";
+ break;
+ case Op::kAtomicCompareExchangeWeak:
+ ss << "intrinsic_atomic_compare_exchange_weak_";
+ break;
+ }
+ ss << storage_class << "_";
+ switch (type) {
+ case DataType::kU32:
+ ss << "u32";
+ break;
+ case DataType::kF32:
+ ss << "f32";
+ break;
+ case DataType::kI32:
+ ss << "i32";
+ break;
+ case DataType::kVec2U32:
+ ss << "vec2_u32";
+ break;
+ case DataType::kVec2F32:
+ ss << "vec2_f32";
+ break;
+ case DataType::kVec2I32:
+ ss << "vec2_i32";
+ break;
+ case DataType::kVec3U32:
+ ss << "vec3_u32";
+ break;
+ case DataType::kVec3F32:
+ ss << "vec3_f32";
+ break;
+ case DataType::kVec3I32:
+ ss << "vec3_i32";
+ break;
+ case DataType::kVec4U32:
+ ss << "vec4_u32";
+ break;
+ case DataType::kVec4F32:
+ ss << "vec4_f32";
+ break;
+ case DataType::kVec4I32:
+ ss << "vec4_i32";
+ break;
+ }
+ return ss.str();
}
const DecomposeMemoryAccess::Intrinsic* DecomposeMemoryAccess::Intrinsic::Clone(
CloneContext* ctx) const {
- return ctx->dst->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(
- ctx->dst->ID(), op, storage_class, type);
+ return ctx->dst->ASTNodes().Create<DecomposeMemoryAccess::Intrinsic>(ctx->dst->ID(), op,
+ storage_class, type);
+}
+
+bool DecomposeMemoryAccess::Intrinsic::IsAtomic() const {
+ return op != Op::kLoad && op != Op::kStore;
}
DecomposeMemoryAccess::DecomposeMemoryAccess() = default;
DecomposeMemoryAccess::~DecomposeMemoryAccess() = default;
-bool DecomposeMemoryAccess::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* decl : program->AST().GlobalDeclarations()) {
- if (auto* var = program->Sem().Get<sem::Variable>(decl)) {
- if (var->StorageClass() == ast::StorageClass::kStorage ||
- var->StorageClass() == ast::StorageClass::kUniform) {
- return true;
- }
+bool DecomposeMemoryAccess::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* decl : program->AST().GlobalDeclarations()) {
+ if (auto* var = program->Sem().Get<sem::Variable>(decl)) {
+ if (var->StorageClass() == ast::StorageClass::kStorage ||
+ var->StorageClass() == ast::StorageClass::kUniform) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
-void DecomposeMemoryAccess::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- auto& sem = ctx.src->Sem();
-
- State state(ctx);
-
- // Scan the AST nodes for storage and uniform buffer accesses. Complex
- // expression chains (e.g. `storage_buffer.foo.bar[20].x`) are handled by
- // maintaining an offset chain via the `state.TakeAccess()`,
- // `state.AddAccess()` methods.
- //
- // Inner-most expression nodes are guaranteed to be visited first because AST
- // nodes are fully immutable and require their children to be constructed
- // first so their pointer can be passed to the parent's constructor.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* ident = node->As<ast::IdentifierExpression>()) {
- // X
- if (auto* var = sem.Get<sem::VariableUser>(ident)) {
- if (var->Variable()->StorageClass() == ast::StorageClass::kStorage ||
- var->Variable()->StorageClass() == ast::StorageClass::kUniform) {
- // Variable to a storage or uniform buffer
- state.AddAccess(ident, {
- var,
- state.ToOffset(0u),
- var->Type()->UnwrapRef(),
- });
+void DecomposeMemoryAccess::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ auto& sem = ctx.src->Sem();
+
+ State state(ctx);
+
+ // Scan the AST nodes for storage and uniform buffer accesses. Complex
+ // expression chains (e.g. `storage_buffer.foo.bar[20].x`) are handled by
+ // maintaining an offset chain via the `state.TakeAccess()`,
+ // `state.AddAccess()` methods.
+ //
+ // Inner-most expression nodes are guaranteed to be visited first because AST
+ // nodes are fully immutable and require their children to be constructed
+ // first so their pointer can be passed to the parent's constructor.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* ident = node->As<ast::IdentifierExpression>()) {
+ // X
+ if (auto* var = sem.Get<sem::VariableUser>(ident)) {
+ if (var->Variable()->StorageClass() == ast::StorageClass::kStorage ||
+ var->Variable()->StorageClass() == ast::StorageClass::kUniform) {
+ // Variable to a storage or uniform buffer
+ state.AddAccess(ident, {
+ var,
+ state.ToOffset(0u),
+ var->Type()->UnwrapRef(),
+ });
+ }
+ }
+ continue;
}
- }
- continue;
- }
- if (auto* accessor = node->As<ast::MemberAccessorExpression>()) {
- // X.Y
- auto* accessor_sem = sem.Get(accessor);
- if (auto* swizzle = accessor_sem->As<sem::Swizzle>()) {
- if (swizzle->Indices().size() == 1) {
- if (auto access = state.TakeAccess(accessor->structure)) {
- auto* vec_ty = access.type->As<sem::Vector>();
- auto* offset =
- state.Mul(vec_ty->type()->Size(), swizzle->Indices()[0]);
- state.AddAccess(accessor, {
- access.var,
- state.Add(access.offset, offset),
- vec_ty->type()->UnwrapRef(),
- });
- }
+ if (auto* accessor = node->As<ast::MemberAccessorExpression>()) {
+ // X.Y
+ auto* accessor_sem = sem.Get(accessor);
+ if (auto* swizzle = accessor_sem->As<sem::Swizzle>()) {
+ if (swizzle->Indices().size() == 1) {
+ if (auto access = state.TakeAccess(accessor->structure)) {
+ auto* vec_ty = access.type->As<sem::Vector>();
+ auto* offset = state.Mul(vec_ty->type()->Size(), swizzle->Indices()[0]);
+ state.AddAccess(accessor, {
+ access.var,
+ state.Add(access.offset, offset),
+ vec_ty->type()->UnwrapRef(),
+ });
+ }
+ }
+ } else {
+ if (auto access = state.TakeAccess(accessor->structure)) {
+ auto* str_ty = access.type->As<sem::Struct>();
+ auto* member = str_ty->FindMember(accessor->member->symbol);
+ auto offset = member->Offset();
+ state.AddAccess(accessor, {
+ access.var,
+ state.Add(access.offset, offset),
+ member->Type()->UnwrapRef(),
+ });
+ }
+ }
+ continue;
}
- } else {
- if (auto access = state.TakeAccess(accessor->structure)) {
- auto* str_ty = access.type->As<sem::Struct>();
- auto* member = str_ty->FindMember(accessor->member->symbol);
- auto offset = member->Offset();
- state.AddAccess(accessor, {
- access.var,
- state.Add(access.offset, offset),
- member->Type()->UnwrapRef(),
- });
+
+ if (auto* accessor = node->As<ast::IndexAccessorExpression>()) {
+ if (auto access = state.TakeAccess(accessor->object)) {
+ // X[Y]
+ if (auto* arr = access.type->As<sem::Array>()) {
+ auto* offset = state.Mul(arr->Stride(), accessor->index);
+ state.AddAccess(accessor, {
+ access.var,
+ state.Add(access.offset, offset),
+ arr->ElemType()->UnwrapRef(),
+ });
+ continue;
+ }
+ if (auto* vec_ty = access.type->As<sem::Vector>()) {
+ auto* offset = state.Mul(vec_ty->type()->Size(), accessor->index);
+ state.AddAccess(accessor, {
+ access.var,
+ state.Add(access.offset, offset),
+ vec_ty->type()->UnwrapRef(),
+ });
+ continue;
+ }
+ if (auto* mat_ty = access.type->As<sem::Matrix>()) {
+ auto* offset = state.Mul(mat_ty->ColumnStride(), accessor->index);
+ state.AddAccess(accessor, {
+ access.var,
+ state.Add(access.offset, offset),
+ mat_ty->ColumnType(),
+ });
+ continue;
+ }
+ }
}
- }
- continue;
- }
- if (auto* accessor = node->As<ast::IndexAccessorExpression>()) {
- if (auto access = state.TakeAccess(accessor->object)) {
- // X[Y]
- if (auto* arr = access.type->As<sem::Array>()) {
- auto* offset = state.Mul(arr->Stride(), accessor->index);
- state.AddAccess(accessor, {
- access.var,
- state.Add(access.offset, offset),
- arr->ElemType()->UnwrapRef(),
- });
- continue;
+ if (auto* op = node->As<ast::UnaryOpExpression>()) {
+ if (op->op == ast::UnaryOp::kAddressOf) {
+ // &X
+ if (auto access = state.TakeAccess(op->expr)) {
+ // HLSL does not support pointers, so just take the access from the
+ // reference and place it on the pointer.
+ state.AddAccess(op, access);
+ continue;
+ }
+ }
}
- if (auto* vec_ty = access.type->As<sem::Vector>()) {
- auto* offset = state.Mul(vec_ty->type()->Size(), accessor->index);
- state.AddAccess(accessor, {
- access.var,
- state.Add(access.offset, offset),
- vec_ty->type()->UnwrapRef(),
- });
- continue;
+
+ if (auto* assign = node->As<ast::AssignmentStatement>()) {
+ // X = Y
+ // Move the LHS access to a store.
+ if (auto lhs = state.TakeAccess(assign->lhs)) {
+ state.stores.emplace_back(Store{assign, lhs});
+ }
}
- if (auto* mat_ty = access.type->As<sem::Matrix>()) {
- auto* offset = state.Mul(mat_ty->ColumnStride(), accessor->index);
- state.AddAccess(accessor, {
- access.var,
- state.Add(access.offset, offset),
- mat_ty->ColumnType(),
- });
- continue;
+
+ if (auto* call_expr = node->As<ast::CallExpression>()) {
+ auto* call = sem.Get(call_expr)->UnwrapMaterialize()->As<sem::Call>();
+ if (auto* builtin = call->Target()->As<sem::Builtin>()) {
+ if (builtin->Type() == sem::BuiltinType::kArrayLength) {
+ // arrayLength(X)
+ // Don't convert X into a load, this builtin actually requires the
+ // real pointer.
+ state.TakeAccess(call_expr->args[0]);
+ continue;
+ }
+ if (builtin->IsAtomic()) {
+ if (auto access = state.TakeAccess(call_expr->args[0])) {
+ // atomic___(X)
+ ctx.Replace(call_expr, [=, &ctx, &state] {
+ auto* buf = access.var->Declaration();
+ auto* offset = access.offset->Build(ctx);
+ auto* buf_ty = access.var->Type()->UnwrapRef();
+ auto* el_ty = access.type->UnwrapRef()->As<sem::Atomic>()->Type();
+ Symbol func = state.AtomicFunc(buf_ty, el_ty, builtin,
+ access.var->As<sem::VariableUser>());
+
+ ast::ExpressionList args{ctx.Clone(buf), offset};
+ for (size_t i = 1; i < call_expr->args.size(); i++) {
+ auto* arg = call_expr->args[i];
+ args.emplace_back(ctx.Clone(arg));
+ }
+ return ctx.dst->Call(func, args);
+ });
+ }
+ }
+ }
}
- }
}
- if (auto* op = node->As<ast::UnaryOpExpression>()) {
- if (op->op == ast::UnaryOp::kAddressOf) {
- // &X
- if (auto access = state.TakeAccess(op->expr)) {
- // HLSL does not support pointers, so just take the access from the
- // reference and place it on the pointer.
- state.AddAccess(op, access);
- continue;
+ // All remaining accesses are loads, transform these into calls to the
+ // corresponding load function
+ for (auto* expr : state.expression_order) {
+ auto access_it = state.accesses.find(expr);
+ if (access_it == state.accesses.end()) {
+ continue;
}
- }
+ BufferAccess access = access_it->second;
+ ctx.Replace(expr, [=, &ctx, &state] {
+ auto* buf = access.var->Declaration();
+ auto* offset = access.offset->Build(ctx);
+ auto* buf_ty = access.var->Type()->UnwrapRef();
+ auto* el_ty = access.type->UnwrapRef();
+ Symbol func = state.LoadFunc(buf_ty, el_ty, access.var->As<sem::VariableUser>());
+ return ctx.dst->Call(func, ctx.CloneWithoutTransform(buf), offset);
+ });
}
- if (auto* assign = node->As<ast::AssignmentStatement>()) {
- // X = Y
- // Move the LHS access to a store.
- if (auto lhs = state.TakeAccess(assign->lhs)) {
- state.stores.emplace_back(Store{assign, lhs});
- }
+ // And replace all storage and uniform buffer assignments with stores
+ for (auto store : state.stores) {
+ ctx.Replace(store.assignment, [=, &ctx, &state] {
+ auto* buf = store.target.var->Declaration();
+ auto* offset = store.target.offset->Build(ctx);
+ auto* buf_ty = store.target.var->Type()->UnwrapRef();
+ auto* el_ty = store.target.type->UnwrapRef();
+ auto* value = store.assignment->rhs;
+ Symbol func = state.StoreFunc(buf_ty, el_ty, store.target.var->As<sem::VariableUser>());
+ auto* call =
+ ctx.dst->Call(func, ctx.CloneWithoutTransform(buf), offset, ctx.Clone(value));
+ return ctx.dst->CallStmt(call);
+ });
}
- if (auto* call_expr = node->As<ast::CallExpression>()) {
- auto* call = sem.Get(call_expr);
- if (auto* builtin = call->Target()->As<sem::Builtin>()) {
- if (builtin->Type() == sem::BuiltinType::kArrayLength) {
- // arrayLength(X)
- // Don't convert X into a load, this builtin actually requires the
- // real pointer.
- state.TakeAccess(call_expr->args[0]);
- continue;
- }
- if (builtin->IsAtomic()) {
- if (auto access = state.TakeAccess(call_expr->args[0])) {
- // atomic___(X)
- ctx.Replace(call_expr, [=, &ctx, &state] {
- auto* buf = access.var->Declaration();
- auto* offset = access.offset->Build(ctx);
- auto* buf_ty = access.var->Type()->UnwrapRef();
- auto* el_ty = access.type->UnwrapRef()->As<sem::Atomic>()->Type();
- Symbol func = state.AtomicFunc(
- buf_ty, el_ty, builtin, access.var->As<sem::VariableUser>());
-
- ast::ExpressionList args{ctx.Clone(buf), offset};
- for (size_t i = 1; i < call_expr->args.size(); i++) {
- auto* arg = call_expr->args[i];
- args.emplace_back(ctx.Clone(arg));
- }
- return ctx.dst->Call(func, args);
- });
- }
- }
- }
- }
- }
-
- // All remaining accesses are loads, transform these into calls to the
- // corresponding load function
- for (auto* expr : state.expression_order) {
- auto access_it = state.accesses.find(expr);
- if (access_it == state.accesses.end()) {
- continue;
- }
- BufferAccess access = access_it->second;
- ctx.Replace(expr, [=, &ctx, &state] {
- auto* buf = access.var->Declaration();
- auto* offset = access.offset->Build(ctx);
- auto* buf_ty = access.var->Type()->UnwrapRef();
- auto* el_ty = access.type->UnwrapRef();
- Symbol func =
- state.LoadFunc(buf_ty, el_ty, access.var->As<sem::VariableUser>());
- return ctx.dst->Call(func, ctx.CloneWithoutTransform(buf), offset);
- });
- }
-
- // And replace all storage and uniform buffer assignments with stores
- for (auto store : state.stores) {
- ctx.Replace(store.assignment, [=, &ctx, &state] {
- auto* buf = store.target.var->Declaration();
- auto* offset = store.target.offset->Build(ctx);
- auto* buf_ty = store.target.var->Type()->UnwrapRef();
- auto* el_ty = store.target.type->UnwrapRef();
- auto* value = store.assignment->rhs;
- Symbol func = state.StoreFunc(buf_ty, el_ty,
- store.target.var->As<sem::VariableUser>());
- auto* call = ctx.dst->Call(func, ctx.CloneWithoutTransform(buf), offset,
- ctx.Clone(value));
- return ctx.dst->CallStmt(call);
- });
- }
-
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.h b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.h
index 9aa0eb5e7bc..76cb23e2ff3 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.h
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access.h
@@ -30,99 +30,98 @@ namespace tint::transform {
/// DecomposeMemoryAccess is a transform used to replace storage and uniform
/// buffer accesses with a combination of load, store or atomic functions on
/// primitive types.
-class DecomposeMemoryAccess final
- : public Castable<DecomposeMemoryAccess, Transform> {
- public:
- /// Intrinsic is an InternalAttribute that's used to decorate a stub function
- /// so that the HLSL transforms this into calls to
- /// `[RW]ByteAddressBuffer.Load[N]()` or `[RW]ByteAddressBuffer.Store[N]()`,
- /// with a possible cast.
- class Intrinsic final : public Castable<Intrinsic, ast::InternalAttribute> {
- public:
- /// Intrinsic op
- enum class Op {
- kLoad,
- kStore,
- kAtomicLoad,
- kAtomicStore,
- kAtomicAdd,
- kAtomicSub,
- kAtomicMax,
- kAtomicMin,
- kAtomicAnd,
- kAtomicOr,
- kAtomicXor,
- kAtomicExchange,
- kAtomicCompareExchangeWeak,
- };
-
- /// Intrinsic data type
- enum class DataType {
- kU32,
- kF32,
- kI32,
- kVec2U32,
- kVec2F32,
- kVec2I32,
- kVec3U32,
- kVec3F32,
- kVec3I32,
- kVec4U32,
- kVec4F32,
- kVec4I32,
+class DecomposeMemoryAccess final : public Castable<DecomposeMemoryAccess, Transform> {
+ public:
+ /// Intrinsic is an InternalAttribute that's used to decorate a stub function
+ /// so that the HLSL transforms this into calls to
+ /// `[RW]ByteAddressBuffer.Load[N]()` or `[RW]ByteAddressBuffer.Store[N]()`,
+ /// with a possible cast.
+ class Intrinsic final : public Castable<Intrinsic, ast::InternalAttribute> {
+ public:
+ /// Intrinsic op
+ enum class Op {
+ kLoad,
+ kStore,
+ kAtomicLoad,
+ kAtomicStore,
+ kAtomicAdd,
+ kAtomicSub,
+ kAtomicMax,
+ kAtomicMin,
+ kAtomicAnd,
+ kAtomicOr,
+ kAtomicXor,
+ kAtomicExchange,
+ kAtomicCompareExchangeWeak,
+ };
+
+ /// Intrinsic data type
+ enum class DataType {
+ kU32,
+ kF32,
+ kI32,
+ kVec2U32,
+ kVec2F32,
+ kVec2I32,
+ kVec3U32,
+ kVec3F32,
+ kVec3I32,
+ kVec4U32,
+ kVec4F32,
+ kVec4I32,
+ };
+
+ /// Constructor
+ /// @param program_id the identifier of the program that owns this node
+ /// @param o the op of the intrinsic
+ /// @param sc the storage class of the buffer
+ /// @param ty the data type of the intrinsic
+ Intrinsic(ProgramID program_id, Op o, ast::StorageClass sc, DataType ty);
+ /// Destructor
+ ~Intrinsic() override;
+
+ /// @return a short description of the internal attribute which will be
+ /// displayed as `@internal(<name>)`
+ std::string InternalName() const override;
+
+ /// Performs a deep clone of this object using the CloneContext `ctx`.
+ /// @param ctx the clone context
+ /// @return the newly cloned object
+ const Intrinsic* Clone(CloneContext* ctx) const override;
+
+ /// @return true if op is atomic
+ bool IsAtomic() const;
+
+ /// The op of the intrinsic
+ const Op op;
+
+ /// The storage class of the buffer this intrinsic operates on
+ ast::StorageClass const storage_class;
+
+ /// The type of the intrinsic
+ const DataType type;
};
/// Constructor
- /// @param program_id the identifier of the program that owns this node
- /// @param o the op of the intrinsic
- /// @param sc the storage class of the buffer
- /// @param ty the data type of the intrinsic
- Intrinsic(ProgramID program_id, Op o, ast::StorageClass sc, DataType ty);
+ DecomposeMemoryAccess();
/// Destructor
- ~Intrinsic() override;
-
- /// @return a short description of the internal attribute which will be
- /// displayed as `@internal(<name>)`
- std::string InternalName() const override;
-
- /// Performs a deep clone of this object using the CloneContext `ctx`.
- /// @param ctx the clone context
- /// @return the newly cloned object
- const Intrinsic* Clone(CloneContext* ctx) const override;
-
- /// The op of the intrinsic
- const Op op;
-
- /// The storage class of the buffer this intrinsic operates on
- ast::StorageClass const storage_class;
-
- /// The type of the intrinsic
- const DataType type;
- };
-
- /// Constructor
- DecomposeMemoryAccess();
- /// Destructor
- ~DecomposeMemoryAccess() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- struct State;
+ ~DecomposeMemoryAccess() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ struct State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access_test.cc b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access_test.cc
index dadb4226dad..4b96bcbb424 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_memory_access_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_memory_access_test.cc
@@ -22,35 +22,35 @@ namespace {
using DecomposeMemoryAccessTest = TransformTest;
TEST_F(DecomposeMemoryAccessTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<DecomposeMemoryAccess>(src));
+ EXPECT_FALSE(ShouldRun<DecomposeMemoryAccess>(src));
}
TEST_F(DecomposeMemoryAccessTest, ShouldRunStorageBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Buffer {
i : i32,
};
@group(0) @binding(0) var<storage, read_write> sb : Buffer;
)";
- EXPECT_TRUE(ShouldRun<DecomposeMemoryAccess>(src));
+ EXPECT_TRUE(ShouldRun<DecomposeMemoryAccess>(src));
}
TEST_F(DecomposeMemoryAccessTest, ShouldRunUniformBuffer) {
- auto* src = R"(
+ auto* src = R"(
struct Buffer {
i : i32,
};
@group(0) @binding(0) var<uniform> ub : Buffer;
)";
- EXPECT_TRUE(ShouldRun<DecomposeMemoryAccess>(src));
+ EXPECT_TRUE(ShouldRun<DecomposeMemoryAccess>(src));
}
TEST_F(DecomposeMemoryAccessTest, SB_BasicLoad) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : i32,
b : u32,
@@ -78,7 +78,7 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = sb.a;
var b : u32 = sb.b;
@@ -105,7 +105,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : i32,
b : u32,
@@ -213,7 +213,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
return arr;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = tint_symbol(sb, 0u);
var b : u32 = tint_symbol_1(sb, 4u);
@@ -240,14 +240,14 @@ fn main() {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, SB_BasicLoad_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var a : i32 = sb.a;
var b : u32 = sb.b;
@@ -301,7 +301,7 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_storage_i32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
@@ -382,7 +382,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
return arr;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = tint_symbol(sb, 0u);
var b : u32 = tint_symbol_1(sb, 4u);
@@ -436,13 +436,13 @@ struct SB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, UB_BasicLoad) {
- auto* src = R"(
+ auto* src = R"(
struct UB {
a : i32,
b : u32,
@@ -470,7 +470,7 @@ struct UB {
@group(0) @binding(0) var<uniform> ub : UB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = ub.a;
var b : u32 = ub.b;
@@ -497,7 +497,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct UB {
a : i32,
b : u32,
@@ -605,7 +605,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
return arr;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = tint_symbol(ub, 0u);
var b : u32 = tint_symbol_1(ub, 4u);
@@ -632,14 +632,14 @@ fn main() {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, UB_BasicLoad_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var a : i32 = ub.a;
var b : u32 = ub.b;
@@ -693,7 +693,7 @@ struct UB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_uniform_i32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : UB, offset : u32) -> i32
@@ -774,7 +774,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
return arr;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a : i32 = tint_symbol(ub, 0u);
var b : u32 = tint_symbol_1(ub, 4u);
@@ -828,13 +828,13 @@ struct UB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, SB_BasicStore) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : i32,
b : u32,
@@ -862,7 +862,7 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
sb.a = i32();
sb.b = u32();
@@ -889,7 +889,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : i32,
b : u32,
@@ -1014,7 +1014,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
}
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
tint_symbol(sb, 0u, i32());
tint_symbol_1(sb, 4u, u32());
@@ -1041,14 +1041,14 @@ fn main() {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, SB_BasicStore_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
sb.a = i32();
sb.b = u32();
@@ -1102,7 +1102,7 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_store_storage_i32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, value : i32)
@@ -1200,7 +1200,7 @@ fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_pa
}
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
tint_symbol(sb, 0u, i32());
tint_symbol_1(sb, 4u, u32());
@@ -1254,13 +1254,13 @@ struct SB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, LoadStructure) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : i32,
b : u32,
@@ -1288,13 +1288,13 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : SB = sb;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : i32,
b : u32,
@@ -1406,20 +1406,20 @@ fn tint_symbol(@internal(disable_validation__ignore_constructible_function_param
return SB(tint_symbol_1(buffer, (offset + 0u)), tint_symbol_2(buffer, (offset + 4u)), tint_symbol_3(buffer, (offset + 8u)), tint_symbol_4(buffer, (offset + 16u)), tint_symbol_5(buffer, (offset + 24u)), tint_symbol_6(buffer, (offset + 32u)), tint_symbol_7(buffer, (offset + 48u)), tint_symbol_8(buffer, (offset + 64u)), tint_symbol_9(buffer, (offset + 80u)), tint_symbol_10(buffer, (offset + 96u)), tint_symbol_11(buffer, (offset + 112u)), tint_symbol_12(buffer, (offset + 128u)), tint_symbol_13(buffer, (offset + 144u)), tint_symbol_14(buffer, (offset + 160u)), tint_symbol_15(buffer, (offset + 192u)), tint_symbol_16(buffer, (offset + 224u)), tint_symbol_17(buffer, (offset + 256u)), tint_symbol_18(buffer, (offset + 304u)), tint_symbol_19(buffer, (offset + 352u)), tint_symbol_20(buffer, (offset + 384u)), tint_symbol_21(buffer, (offset + 448u)), tint_symbol_22(buffer, (offset + 512u)));
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : SB = tint_symbol(sb, 0u);
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, LoadStructure_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var x : SB = sb;
}
@@ -1452,7 +1452,7 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_storage_i32) @internal(disable_validation__function_has_no_body)
fn tint_symbol_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
@@ -1537,7 +1537,7 @@ fn tint_symbol(@internal(disable_validation__ignore_constructible_function_param
return SB(tint_symbol_1(buffer, (offset + 0u)), tint_symbol_2(buffer, (offset + 4u)), tint_symbol_3(buffer, (offset + 8u)), tint_symbol_4(buffer, (offset + 16u)), tint_symbol_5(buffer, (offset + 24u)), tint_symbol_6(buffer, (offset + 32u)), tint_symbol_7(buffer, (offset + 48u)), tint_symbol_8(buffer, (offset + 64u)), tint_symbol_9(buffer, (offset + 80u)), tint_symbol_10(buffer, (offset + 96u)), tint_symbol_11(buffer, (offset + 112u)), tint_symbol_12(buffer, (offset + 128u)), tint_symbol_13(buffer, (offset + 144u)), tint_symbol_14(buffer, (offset + 160u)), tint_symbol_15(buffer, (offset + 192u)), tint_symbol_16(buffer, (offset + 224u)), tint_symbol_17(buffer, (offset + 256u)), tint_symbol_18(buffer, (offset + 304u)), tint_symbol_19(buffer, (offset + 352u)), tint_symbol_20(buffer, (offset + 384u)), tint_symbol_21(buffer, (offset + 448u)), tint_symbol_22(buffer, (offset + 512u)));
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : SB = tint_symbol(sb, 0u);
}
@@ -1570,13 +1570,13 @@ struct SB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, StoreStructure) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : i32,
b : u32,
@@ -1604,13 +1604,13 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
sb = SB();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : i32,
b : u32,
@@ -1760,20 +1760,20 @@ fn tint_symbol(@internal(disable_validation__ignore_constructible_function_param
tint_symbol_22(buffer, (offset + 512u), value.v);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
tint_symbol(sb, 0u, SB());
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, StoreStructure_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
sb = SB();
}
@@ -1806,7 +1806,7 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_store_storage_i32) @internal(disable_validation__function_has_no_body)
fn tint_symbol_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, value : i32)
@@ -1929,7 +1929,7 @@ fn tint_symbol(@internal(disable_validation__ignore_constructible_function_param
tint_symbol_22(buffer, (offset + 512u), value.v);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
tint_symbol(sb, 0u, SB());
}
@@ -1962,13 +1962,13 @@ struct SB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, ComplexStaticAccessChain) {
- auto* src = R"(
+ auto* src = R"(
// sizeof(S1) == 32
// alignof(S1) == 16
struct S1 {
@@ -1993,20 +1993,20 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : f32 = sb.b[4].b[1].b.z;
}
)";
- // sb.b[4].b[1].b.z
- // ^ ^ ^ ^ ^ ^
- // | | | | | |
- // 128 | |688 | 712
- // | | |
- // 640 656 704
+ // sb.b[4].b[1].b.z
+ // ^ ^ ^ ^ ^ ^
+ // | | | | | |
+ // 128 | |688 | 712
+ // | | |
+ // 640 656 704
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
a : i32,
b : vec3<f32>,
@@ -2030,20 +2030,20 @@ struct SB {
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : f32 = tint_symbol(sb, 712u);
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, ComplexStaticAccessChain_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var x : f32 = sb.b[4].b[1].b.z;
}
@@ -2069,18 +2069,18 @@ struct S1 {
};
)";
- // sb.b[4].b[1].b.z
- // ^ ^ ^ ^ ^ ^
- // | | | | | |
- // 128 | |688 | 712
- // | | |
- // 640 656 704
+ // sb.b[4].b[1].b.z
+ // ^ ^ ^ ^ ^ ^
+ // | | | | | |
+ // 128 | |688 | 712
+ // | | |
+ // 640 656 704
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var x : f32 = tint_symbol(sb, 712u);
}
@@ -2106,13 +2106,13 @@ struct S1 {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, ComplexDynamicAccessChain) {
- auto* src = R"(
+ auto* src = R"(
struct S1 {
a : i32,
b : vec3<f32>,
@@ -2133,7 +2133,7 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2142,7 +2142,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
a : i32,
b : vec3<f32>,
@@ -2166,7 +2166,7 @@ struct SB {
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2175,14 +2175,14 @@ fn main() {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, ComplexDynamicAccessChain_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2211,11 +2211,11 @@ struct S1 {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2244,13 +2244,13 @@ struct S1 {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, ComplexDynamicAccessChainWithAliases) {
- auto* src = R"(
+ auto* src = R"(
struct S1 {
a : i32,
b : vec3<f32>,
@@ -2279,7 +2279,7 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2288,7 +2288,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
a : i32,
b : vec3<f32>,
@@ -2320,7 +2320,7 @@ struct SB {
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2329,15 +2329,14 @@ fn main() {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(DecomposeMemoryAccessTest,
- ComplexDynamicAccessChainWithAliases_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(DecomposeMemoryAccessTest, ComplexDynamicAccessChainWithAliases_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2374,11 +2373,11 @@ struct S1 {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_load_storage_f32) @internal(disable_validation__function_has_no_body)
fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> f32
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var i : i32 = 4;
var j : u32 = 1u;
@@ -2415,13 +2414,13 @@ struct S1 {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, StorageBufferAtomics) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
padding : vec4<f32>,
a : atomic<i32>,
@@ -2430,7 +2429,7 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
atomicStore(&sb.a, 123);
atomicLoad(&sb.a);
@@ -2458,7 +2457,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
padding : vec4<f32>,
a : atomic<i32>,
@@ -2468,106 +2467,116 @@ struct SB {
@group(0) @binding(0) var<storage, read_write> sb : SB;
@internal(intrinsic_atomic_store_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32)
+fn tint_atomicStore(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32)
@internal(intrinsic_atomic_load_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
+fn tint_atomicLoad(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
@internal(intrinsic_atomic_add_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_2(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicAdd(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_sub_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_3(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicSub(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_max_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_4(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicMax(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_min_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_5(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicMin(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_and_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_6(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicAnd(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_or_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_7(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicOr(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_xor_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_8(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicXor(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_exchange_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_9(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicExchange(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+
+struct atomic_compare_exchange_weak_ret_type {
+ old_value : i32,
+ exchanged : bool,
+}
@internal(intrinsic_atomic_compare_exchange_weak_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_10(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32, param_2 : i32) -> vec2<i32>
+fn tint_atomicCompareExchangeWeak(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32, param_2 : i32) -> atomic_compare_exchange_weak_ret_type
@internal(intrinsic_atomic_store_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_11(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32)
+fn tint_atomicStore_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32)
@internal(intrinsic_atomic_load_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_12(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> u32
+fn tint_atomicLoad_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> u32
@internal(intrinsic_atomic_add_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_13(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicAdd_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_sub_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_14(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicSub_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_max_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_15(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicMax_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_min_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_16(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicMin_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_and_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_17(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicAnd_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_or_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_18(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicOr_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_xor_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_19(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicXor_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_exchange_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_20(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicExchange_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+
+struct atomic_compare_exchange_weak_ret_type_1 {
+ old_value : u32,
+ exchanged : bool,
+}
@internal(intrinsic_atomic_compare_exchange_weak_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32, param_2 : u32) -> vec2<u32>
+fn tint_atomicCompareExchangeWeak_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32, param_2 : u32) -> atomic_compare_exchange_weak_ret_type_1
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
- tint_symbol(sb, 16u, 123);
- tint_symbol_1(sb, 16u);
- tint_symbol_2(sb, 16u, 123);
- tint_symbol_3(sb, 16u, 123);
- tint_symbol_4(sb, 16u, 123);
- tint_symbol_5(sb, 16u, 123);
- tint_symbol_6(sb, 16u, 123);
- tint_symbol_7(sb, 16u, 123);
- tint_symbol_8(sb, 16u, 123);
- tint_symbol_9(sb, 16u, 123);
- tint_symbol_10(sb, 16u, 123, 345);
- tint_symbol_11(sb, 20u, 123u);
- tint_symbol_12(sb, 20u);
- tint_symbol_13(sb, 20u, 123u);
- tint_symbol_14(sb, 20u, 123u);
- tint_symbol_15(sb, 20u, 123u);
- tint_symbol_16(sb, 20u, 123u);
- tint_symbol_17(sb, 20u, 123u);
- tint_symbol_18(sb, 20u, 123u);
- tint_symbol_19(sb, 20u, 123u);
- tint_symbol_20(sb, 20u, 123u);
- tint_symbol_21(sb, 20u, 123u, 345u);
+ tint_atomicStore(sb, 16u, 123);
+ tint_atomicLoad(sb, 16u);
+ tint_atomicAdd(sb, 16u, 123);
+ tint_atomicSub(sb, 16u, 123);
+ tint_atomicMax(sb, 16u, 123);
+ tint_atomicMin(sb, 16u, 123);
+ tint_atomicAnd(sb, 16u, 123);
+ tint_atomicOr(sb, 16u, 123);
+ tint_atomicXor(sb, 16u, 123);
+ tint_atomicExchange(sb, 16u, 123);
+ tint_atomicCompareExchangeWeak(sb, 16u, 123, 345);
+ tint_atomicStore_1(sb, 20u, 123u);
+ tint_atomicLoad_1(sb, 20u);
+ tint_atomicAdd_1(sb, 20u, 123u);
+ tint_atomicSub_1(sb, 20u, 123u);
+ tint_atomicMax_1(sb, 20u, 123u);
+ tint_atomicMin_1(sb, 20u, 123u);
+ tint_atomicAnd_1(sb, 20u, 123u);
+ tint_atomicOr_1(sb, 20u, 123u);
+ tint_atomicXor_1(sb, 20u, 123u);
+ tint_atomicExchange_1(sb, 20u, 123u);
+ tint_atomicCompareExchangeWeak_1(sb, 20u, 123u, 345u);
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, StorageBufferAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
atomicStore(&sb.a, 123);
atomicLoad(&sb.a);
@@ -2603,97 +2612,107 @@ struct SB {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
@internal(intrinsic_atomic_store_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32)
+fn tint_atomicStore(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32)
@internal(intrinsic_atomic_load_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
+fn tint_atomicLoad(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> i32
@internal(intrinsic_atomic_add_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_2(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicAdd(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_sub_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_3(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicSub(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_max_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_4(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicMax(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_min_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_5(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicMin(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_and_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_6(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicAnd(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_or_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_7(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicOr(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_xor_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_8(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicXor(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
@internal(intrinsic_atomic_exchange_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_9(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+fn tint_atomicExchange(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32) -> i32
+
+struct atomic_compare_exchange_weak_ret_type {
+ old_value : i32,
+ exchanged : bool,
+}
@internal(intrinsic_atomic_compare_exchange_weak_storage_i32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_10(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32, param_2 : i32) -> vec2<i32>
+fn tint_atomicCompareExchangeWeak(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : i32, param_2 : i32) -> atomic_compare_exchange_weak_ret_type
@internal(intrinsic_atomic_store_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_11(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32)
+fn tint_atomicStore_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32)
@internal(intrinsic_atomic_load_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_12(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> u32
+fn tint_atomicLoad_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32) -> u32
@internal(intrinsic_atomic_add_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_13(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicAdd_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_sub_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_14(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicSub_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_max_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_15(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicMax_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_min_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_16(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicMin_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_and_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_17(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicAnd_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_or_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_18(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicOr_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_xor_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_19(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicXor_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
@internal(intrinsic_atomic_exchange_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_20(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+fn tint_atomicExchange_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32) -> u32
+
+struct atomic_compare_exchange_weak_ret_type_1 {
+ old_value : u32,
+ exchanged : bool,
+}
@internal(intrinsic_atomic_compare_exchange_weak_storage_u32) @internal(disable_validation__function_has_no_body)
-fn tint_symbol_21(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32, param_2 : u32) -> vec2<u32>
+fn tint_atomicCompareExchangeWeak_1(@internal(disable_validation__ignore_constructible_function_parameter) buffer : SB, offset : u32, param_1 : u32, param_2 : u32) -> atomic_compare_exchange_weak_ret_type_1
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
- tint_symbol(sb, 16u, 123);
- tint_symbol_1(sb, 16u);
- tint_symbol_2(sb, 16u, 123);
- tint_symbol_3(sb, 16u, 123);
- tint_symbol_4(sb, 16u, 123);
- tint_symbol_5(sb, 16u, 123);
- tint_symbol_6(sb, 16u, 123);
- tint_symbol_7(sb, 16u, 123);
- tint_symbol_8(sb, 16u, 123);
- tint_symbol_9(sb, 16u, 123);
- tint_symbol_10(sb, 16u, 123, 345);
- tint_symbol_11(sb, 20u, 123u);
- tint_symbol_12(sb, 20u);
- tint_symbol_13(sb, 20u, 123u);
- tint_symbol_14(sb, 20u, 123u);
- tint_symbol_15(sb, 20u, 123u);
- tint_symbol_16(sb, 20u, 123u);
- tint_symbol_17(sb, 20u, 123u);
- tint_symbol_18(sb, 20u, 123u);
- tint_symbol_19(sb, 20u, 123u);
- tint_symbol_20(sb, 20u, 123u);
- tint_symbol_21(sb, 20u, 123u, 345u);
+ tint_atomicStore(sb, 16u, 123);
+ tint_atomicLoad(sb, 16u);
+ tint_atomicAdd(sb, 16u, 123);
+ tint_atomicSub(sb, 16u, 123);
+ tint_atomicMax(sb, 16u, 123);
+ tint_atomicMin(sb, 16u, 123);
+ tint_atomicAnd(sb, 16u, 123);
+ tint_atomicOr(sb, 16u, 123);
+ tint_atomicXor(sb, 16u, 123);
+ tint_atomicExchange(sb, 16u, 123);
+ tint_atomicCompareExchangeWeak(sb, 16u, 123, 345);
+ tint_atomicStore_1(sb, 20u, 123u);
+ tint_atomicLoad_1(sb, 20u);
+ tint_atomicAdd_1(sb, 20u, 123u);
+ tint_atomicSub_1(sb, 20u, 123u);
+ tint_atomicMax_1(sb, 20u, 123u);
+ tint_atomicMin_1(sb, 20u, 123u);
+ tint_atomicAnd_1(sb, 20u, 123u);
+ tint_atomicOr_1(sb, 20u, 123u);
+ tint_atomicXor_1(sb, 20u, 123u);
+ tint_atomicExchange_1(sb, 20u, 123u);
+ tint_atomicCompareExchangeWeak_1(sb, 20u, 123u, 345u);
}
@group(0) @binding(0) var<storage, read_write> sb : SB;
@@ -2705,13 +2724,13 @@ struct SB {
}
)";
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, WorkgroupBufferAtomics) {
- auto* src = R"(
+ auto* src = R"(
struct S {
padding : vec4<f32>,
a : atomic<i32>,
@@ -2720,7 +2739,7 @@ struct S {
var<workgroup> w : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
atomicStore(&(w.a), 123);
atomicLoad(&(w.a));
@@ -2747,16 +2766,16 @@ fn main() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeMemoryAccessTest, WorkgroupBufferAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
atomicStore(&(w.a), 123);
atomicLoad(&(w.a));
@@ -2791,11 +2810,11 @@ struct S {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<DecomposeMemoryAccess>(src);
+ auto got = Run<DecomposeMemoryAccess>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.cc b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.cc
index 74b6903b36f..ba6252b0251 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.cc
@@ -40,121 +40,115 @@ DecomposeStridedArray::DecomposeStridedArray() = default;
DecomposeStridedArray::~DecomposeStridedArray() = default;
-bool DecomposeStridedArray::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* ast = node->As<ast::Array>()) {
- if (ast::GetAttribute<ast::StrideAttribute>(ast->attributes)) {
- return true;
- }
+bool DecomposeStridedArray::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* ast = node->As<ast::Array>()) {
+ if (ast::GetAttribute<ast::StrideAttribute>(ast->attributes)) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
-void DecomposeStridedArray::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- const auto& sem = ctx.src->Sem();
-
- static constexpr const char* kMemberName = "el";
-
- // Maps an array type in the source program to the name of the struct wrapper
- // type in the target program.
- std::unordered_map<const sem::Array*, Symbol> decomposed;
-
- // Find and replace all arrays with a @stride attribute with a array that has
- // the @stride removed. If the source array stride does not match the natural
- // stride for the array element type, then replace the array element type with
- // a structure, holding a single field with a @size attribute equal to the
- // array stride.
- ctx.ReplaceAll([&](const ast::Array* ast) -> const ast::Array* {
- if (auto* arr = sem.Get(ast)) {
- if (!arr->IsStrideImplicit()) {
- auto el_ty = utils::GetOrCreate(decomposed, arr, [&] {
- auto name = ctx.dst->Symbols().New("strided_arr");
- auto* member_ty = ctx.Clone(ast->type);
- auto* member = ctx.dst->Member(kMemberName, member_ty,
- {ctx.dst->MemberSize(arr->Stride())});
- ctx.dst->Structure(name, {member});
- return name;
- });
- auto* count = ctx.Clone(ast->count);
- return ctx.dst->ty.array(ctx.dst->ty.type_name(el_ty), count);
- }
- if (ast::GetAttribute<ast::StrideAttribute>(ast->attributes)) {
- // Strip the @stride attribute
- auto* ty = ctx.Clone(ast->type);
- auto* count = ctx.Clone(ast->count);
- return ctx.dst->ty.array(ty, count);
- }
- }
- return nullptr;
- });
-
- // Find all array index-accessors expressions for arrays that have had their
- // element changed to a single field structure. These expressions are adjusted
- // to insert an additional member accessor for the single structure field.
- // Example: `arr[i]` -> `arr[i].el`
- ctx.ReplaceAll(
- [&](const ast::IndexAccessorExpression* idx) -> const ast::Expression* {
- if (auto* ty = ctx.src->TypeOf(idx->object)) {
- if (auto* arr = ty->UnwrapRef()->As<sem::Array>()) {
+void DecomposeStridedArray::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ const auto& sem = ctx.src->Sem();
+
+ static constexpr const char* kMemberName = "el";
+
+ // Maps an array type in the source program to the name of the struct wrapper
+ // type in the target program.
+ std::unordered_map<const sem::Array*, Symbol> decomposed;
+
+ // Find and replace all arrays with a @stride attribute with a array that has
+ // the @stride removed. If the source array stride does not match the natural
+ // stride for the array element type, then replace the array element type with
+ // a structure, holding a single field with a @size attribute equal to the
+ // array stride.
+ ctx.ReplaceAll([&](const ast::Array* ast) -> const ast::Array* {
+ if (auto* arr = sem.Get(ast)) {
if (!arr->IsStrideImplicit()) {
- auto* expr = ctx.CloneWithoutTransform(idx);
- return ctx.dst->MemberAccessor(expr, kMemberName);
+ auto el_ty = utils::GetOrCreate(decomposed, arr, [&] {
+ auto name = ctx.dst->Symbols().New("strided_arr");
+ auto* member_ty = ctx.Clone(ast->type);
+ auto* member = ctx.dst->Member(kMemberName, member_ty,
+ {ctx.dst->MemberSize(arr->Stride())});
+ ctx.dst->Structure(name, {member});
+ return name;
+ });
+ auto* count = ctx.Clone(ast->count);
+ return ctx.dst->ty.array(ctx.dst->ty.type_name(el_ty), count);
+ }
+ if (ast::GetAttribute<ast::StrideAttribute>(ast->attributes)) {
+ // Strip the @stride attribute
+ auto* ty = ctx.Clone(ast->type);
+ auto* count = ctx.Clone(ast->count);
+ return ctx.dst->ty.array(ty, count);
}
- }
}
return nullptr;
- });
-
- // Find all array type constructor expressions for array types that have had
- // their element changed to a single field structure. These constructors are
- // adjusted to wrap each of the arguments with an additional constructor for
- // the new element structure type.
- // Example:
- // `@stride(32) array<i32, 3>(1, 2, 3)`
- // ->
- // `array<strided_arr, 3>(strided_arr(1), strided_arr(2), strided_arr(3))`
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) -> const ast::Expression* {
- if (!expr->args.empty()) {
- if (auto* call = sem.Get(expr)) {
- if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
- if (auto* arr = ctor->ReturnType()->As<sem::Array>()) {
- // Begin by cloning the array constructor type or name
- // If this is an unaliased array, this may add a new entry to
- // decomposed.
- // If this is an aliased array, decomposed should already be
- // populated with any strided aliases.
- ast::CallExpression::Target target;
- if (expr->target.type) {
- target.type = ctx.Clone(expr->target.type);
- } else {
- target.name = ctx.Clone(expr->target.name);
- }
+ });
- ast::ExpressionList args;
- if (auto it = decomposed.find(arr); it != decomposed.end()) {
- args.reserve(expr->args.size());
- for (auto* arg : expr->args) {
- args.emplace_back(
- ctx.dst->Call(it->second, ctx.Clone(arg)));
- }
- } else {
- args = ctx.Clone(expr->args);
+ // Find all array index-accessors expressions for arrays that have had their
+ // element changed to a single field structure. These expressions are adjusted
+ // to insert an additional member accessor for the single structure field.
+ // Example: `arr[i]` -> `arr[i].el`
+ ctx.ReplaceAll([&](const ast::IndexAccessorExpression* idx) -> const ast::Expression* {
+ if (auto* ty = ctx.src->TypeOf(idx->object)) {
+ if (auto* arr = ty->UnwrapRef()->As<sem::Array>()) {
+ if (!arr->IsStrideImplicit()) {
+ auto* expr = ctx.CloneWithoutTransform(idx);
+ return ctx.dst->MemberAccessor(expr, kMemberName);
+ }
+ }
+ }
+ return nullptr;
+ });
+
+ // Find all array type constructor expressions for array types that have had
+ // their element changed to a single field structure. These constructors are
+ // adjusted to wrap each of the arguments with an additional constructor for
+ // the new element structure type.
+ // Example:
+ // `@stride(32) array<i32, 3>(1, 2, 3)`
+ // ->
+ // `array<strided_arr, 3>(strided_arr(1), strided_arr(2), strided_arr(3))`
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::Expression* {
+ if (!expr->args.empty()) {
+ if (auto* call = sem.Get(expr)->UnwrapMaterialize()->As<sem::Call>()) {
+ if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
+ if (auto* arr = ctor->ReturnType()->As<sem::Array>()) {
+ // Begin by cloning the array constructor type or name
+ // If this is an unaliased array, this may add a new entry to
+ // decomposed.
+ // If this is an aliased array, decomposed should already be
+ // populated with any strided aliases.
+ ast::CallExpression::Target target;
+ if (expr->target.type) {
+ target.type = ctx.Clone(expr->target.type);
+ } else {
+ target.name = ctx.Clone(expr->target.name);
+ }
+
+ ast::ExpressionList args;
+ if (auto it = decomposed.find(arr); it != decomposed.end()) {
+ args.reserve(expr->args.size());
+ for (auto* arg : expr->args) {
+ args.emplace_back(ctx.dst->Call(it->second, ctx.Clone(arg)));
+ }
+ } else {
+ args = ctx.Clone(expr->args);
+ }
+
+ return target.type ? ctx.dst->Construct(target.type, args)
+ : ctx.dst->Call(target.name, args);
+ }
}
-
- return target.type ? ctx.dst->Construct(target.type, args)
- : ctx.dst->Call(target.name, args);
- }
}
- }
}
return nullptr;
- });
- ctx.Clone();
+ });
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.h b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.h
index 505f5cbecd5..5dbaaa5f5d2 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.h
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array.h
@@ -27,31 +27,27 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * SimplifyPointers
-class DecomposeStridedArray final
- : public Castable<DecomposeStridedArray, Transform> {
- public:
- /// Constructor
- DecomposeStridedArray();
-
- /// Destructor
- ~DecomposeStridedArray() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class DecomposeStridedArray final : public Castable<DecomposeStridedArray, Transform> {
+ public:
+ /// Constructor
+ DecomposeStridedArray();
+
+ /// Destructor
+ ~DecomposeStridedArray() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array_test.cc b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array_test.cc
index 98ef2c42a99..ffc66956e10 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_array_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_array_test.cc
@@ -23,549 +23,517 @@
#include "src/tint/transform/test_helper.h"
#include "src/tint/transform/unshadow.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
namespace {
using DecomposeStridedArrayTest = TransformTest;
-using f32 = ProgramBuilder::f32;
TEST_F(DecomposeStridedArrayTest, ShouldRunEmptyModule) {
- ProgramBuilder b;
- EXPECT_FALSE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
+ ProgramBuilder b;
+ EXPECT_FALSE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
}
TEST_F(DecomposeStridedArrayTest, ShouldRunNonStridedArray) {
- // var<private> arr : array<f32, 4>
+ // var<private> arr : array<f32, 4u>
- ProgramBuilder b;
- b.Global("arr", b.ty.array<f32, 4>(), ast::StorageClass::kPrivate);
- EXPECT_FALSE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
+ ProgramBuilder b;
+ b.Global("arr", b.ty.array<f32, 4u>(), ast::StorageClass::kPrivate);
+ EXPECT_FALSE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
}
TEST_F(DecomposeStridedArrayTest, ShouldRunDefaultStridedArray) {
- // var<private> arr : @stride(4) array<f32, 4>
+ // var<private> arr : @stride(4) array<f32, 4u>
- ProgramBuilder b;
- b.Global("arr", b.ty.array<f32, 4>(4), ast::StorageClass::kPrivate);
- EXPECT_TRUE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
+ ProgramBuilder b;
+ b.Global("arr", b.ty.array<f32, 4u>(4), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
}
TEST_F(DecomposeStridedArrayTest, ShouldRunExplicitStridedArray) {
- // var<private> arr : @stride(16) array<f32, 4>
+ // var<private> arr : @stride(16) array<f32, 4u>
- ProgramBuilder b;
- b.Global("arr", b.ty.array<f32, 4>(16), ast::StorageClass::kPrivate);
- EXPECT_TRUE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
+ ProgramBuilder b;
+ b.Global("arr", b.ty.array<f32, 4u>(16), ast::StorageClass::kPrivate);
+ EXPECT_TRUE(ShouldRun<DecomposeStridedArray>(Program(std::move(b))));
}
TEST_F(DecomposeStridedArrayTest, Empty) {
- auto* src = R"()";
- auto* expect = src;
+ auto* src = R"()";
+ auto* expect = src;
- auto got = Run<DecomposeStridedArray>(src);
+ auto got = Run<DecomposeStridedArray>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, PrivateDefaultStridedArray) {
- // var<private> arr : @stride(4) array<f32, 4>
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(4) array<f32, 4> = a;
- // let b : f32 = arr[1];
- // }
-
- ProgramBuilder b;
- b.Global("arr", b.ty.array<f32, 4>(4), ast::StorageClass::kPrivate);
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array<f32, 4>(4), b.Expr("arr"))),
- b.Decl(b.Const("b", b.ty.f32(), b.IndexAccessor("arr", 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
-var<private> arr : array<f32, 4>;
-
-@stage(compute) @workgroup_size(1)
+ // var<private> arr : @stride(4) array<f32, 4u>
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(4) array<f32, 4u> = a;
+ // let b : f32 = arr[1];
+ // }
+
+ ProgramBuilder b;
+ b.Global("arr", b.ty.array<f32, 4u>(4), ast::StorageClass::kPrivate);
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array<f32, 4u>(4), b.Expr("arr"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor("arr", 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
+var<private> arr : array<f32, 4u>;
+
+@compute @workgroup_size(1i)
fn f() {
- let a : array<f32, 4> = arr;
- let b : f32 = arr[1];
+ let a : array<f32, 4u> = arr;
+ let b : f32 = arr[1i];
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, PrivateStridedArray) {
- // var<private> arr : @stride(32) array<f32, 4>
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(32) array<f32, 4> = a;
- // let b : f32 = arr[1];
- // }
-
- ProgramBuilder b;
- b.Global("arr", b.ty.array<f32, 4>(32), ast::StorageClass::kPrivate);
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array<f32, 4>(32), b.Expr("arr"))),
- b.Decl(b.Const("b", b.ty.f32(), b.IndexAccessor("arr", 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // var<private> arr : @stride(32) array<f32, 4u>
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(32) array<f32, 4u> = a;
+ // let b : f32 = arr[1];
+ // }
+
+ ProgramBuilder b;
+ b.Global("arr", b.ty.array<f32, 4u>(32), ast::StorageClass::kPrivate);
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array<f32, 4u>(32), b.Expr("arr"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor("arr", 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct strided_arr {
@size(32)
el : f32,
}
-var<private> arr : array<strided_arr, 4>;
+var<private> arr : array<strided_arr, 4u>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let a : array<strided_arr, 4> = arr;
- let b : f32 = arr[1].el;
+ let a : array<strided_arr, 4u> = arr;
+ let b : f32 = arr[1i].el;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, ReadUniformStridedArray) {
- // struct S {
- // a : @stride(32) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<uniform> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(32) array<f32, 4> = s.a;
- // let b : f32 = s.a[1];
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(32))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform,
- b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array<f32, 4>(32),
- b.MemberAccessor("s", "a"))),
- b.Decl(b.Const("b", b.ty.f32(),
- b.IndexAccessor(b.MemberAccessor("s", "a"), 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // a : @stride(32) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<uniform> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(32) array<f32, 4u> = s.a;
+ // let b : f32 = s.a[1];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(32))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array<f32, 4u>(32), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct strided_arr {
@size(32)
el : f32,
}
struct S {
- a : array<strided_arr, 4>,
+ a : array<strided_arr, 4u>,
}
@group(0) @binding(0) var<uniform> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let a : array<strided_arr, 4> = s.a;
- let b : f32 = s.a[1].el;
+ let a : array<strided_arr, 4u> = s.a;
+ let b : f32 = s.a[1i].el;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, ReadUniformDefaultStridedArray) {
- // struct S {
- // a : @stride(16) array<vec4<f32>, 4>,
- // };
- // @group(0) @binding(0) var<uniform> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(16) array<vec4<f32>, 4> = s.a;
- // let b : f32 = s.a[1][2];
- // }
- ProgramBuilder b;
- auto* S =
- b.Structure("S", {b.Member("a", b.ty.array(b.ty.vec4<f32>(), 4, 16))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform,
- b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array(b.ty.vec4<f32>(), 4, 16),
- b.MemberAccessor("s", "a"))),
- b.Decl(b.Const(
- "b", b.ty.f32(),
- b.IndexAccessor(b.IndexAccessor(b.MemberAccessor("s", "a"), 1),
- 2))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect =
- R"(
+ // struct S {
+ // a : @stride(16) array<vec4<f32>, 4u>,
+ // };
+ // @group(0) @binding(0) var<uniform> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(16) array<vec4<f32>, 4u> = s.a;
+ // let b : f32 = s.a[1][2];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array(b.ty.vec4<f32>(), 4_u, 16))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Func(
+ "f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array(b.ty.vec4<f32>(), 4_u, 16), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.f32(),
+ b.IndexAccessor(b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i), 2_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect =
+ R"(
struct S {
- a : array<vec4<f32>, 4>,
+ a : array<vec4<f32>, 4u>,
}
@group(0) @binding(0) var<uniform> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let a : array<vec4<f32>, 4> = s.a;
- let b : f32 = s.a[1][2];
+ let a : array<vec4<f32>, 4u> = s.a;
+ let b : f32 = s.a[1i][2i];
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, ReadStorageStridedArray) {
- // struct S {
- // a : @stride(32) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<storage> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(32) array<f32, 4> = s.a;
- // let b : f32 = s.a[1];
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(32))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array<f32, 4>(32),
- b.MemberAccessor("s", "a"))),
- b.Decl(b.Const("b", b.ty.f32(),
- b.IndexAccessor(b.MemberAccessor("s", "a"), 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // a : @stride(32) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<storage> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(32) array<f32, 4u> = s.a;
+ // let b : f32 = s.a[1];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(32))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array<f32, 4u>(32), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct strided_arr {
@size(32)
el : f32,
}
struct S {
- a : array<strided_arr, 4>,
+ a : array<strided_arr, 4u>,
}
@group(0) @binding(0) var<storage> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let a : array<strided_arr, 4> = s.a;
- let b : f32 = s.a[1].el;
+ let a : array<strided_arr, 4u> = s.a;
+ let b : f32 = s.a[1i].el;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, ReadStorageDefaultStridedArray) {
- // struct S {
- // a : @stride(4) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<storage> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : @stride(4) array<f32, 4> = s.a;
- // let b : f32 = s.a[1];
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(4))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.array<f32, 4>(4),
- b.MemberAccessor("s", "a"))),
- b.Decl(b.Const("b", b.ty.f32(),
- b.IndexAccessor(b.MemberAccessor("s", "a"), 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // a : @stride(4) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<storage> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : @stride(4) array<f32, 4u> = s.a;
+ // let b : f32 = s.a[1];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(4))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.array<f32, 4u>(4), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
- a : array<f32, 4>,
+ a : array<f32, 4u>,
}
@group(0) @binding(0) var<storage> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let a : array<f32, 4> = s.a;
- let b : f32 = s.a[1];
+ let a : array<f32, 4u> = s.a;
+ let b : f32 = s.a[1i];
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, WriteStorageStridedArray) {
- // struct S {
- // a : @stride(32) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // s.a = @stride(32) array<f32, 4>();
- // s.a = @stride(32) array<f32, 4>(1.0, 2.0, 3.0, 4.0);
- // s.a[1] = 5.0;
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(32))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.array<f32, 4>(32))),
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.array<f32, 4>(32), 1.0f, 2.0f, 3.0f, 4.0f)),
- b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1), 5.0f),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect =
- R"(
+ // struct S {
+ // a : @stride(32) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // s.a = @stride(32) array<f32, 4u>();
+ // s.a = @stride(32) array<f32, 4u>(1.0, 2.0, 3.0, 4.0);
+ // s.a[1i] = 5.0;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(32))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Assign(b.MemberAccessor("s", "a"), b.Construct(b.ty.array<f32, 4u>(32))),
+ b.Assign(b.MemberAccessor("s", "a"),
+ b.Construct(b.ty.array<f32, 4u>(32), 1_f, 2_f, 3_f, 4_f)),
+ b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i), 5_f),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect =
+ R"(
struct strided_arr {
@size(32)
el : f32,
}
struct S {
- a : array<strided_arr, 4>,
+ a : array<strided_arr, 4u>,
}
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- s.a = array<strided_arr, 4>();
- s.a = array<strided_arr, 4>(strided_arr(1.0), strided_arr(2.0), strided_arr(3.0), strided_arr(4.0));
- s.a[1].el = 5.0;
+ s.a = array<strided_arr, 4u>();
+ s.a = array<strided_arr, 4u>(strided_arr(1.0f), strided_arr(2.0f), strided_arr(3.0f), strided_arr(4.0f));
+ s.a[1i].el = 5.0f;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, WriteStorageDefaultStridedArray) {
- // struct S {
- // a : @stride(4) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // s.a = @stride(4) array<f32, 4>();
- // s.a = @stride(4) array<f32, 4>(1.0, 2.0, 3.0, 4.0);
- // s.a[1] = 5.0;
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(4))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.array<f32, 4>(4))),
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.array<f32, 4>(4), 1.0f, 2.0f, 3.0f, 4.0f)),
- b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1), 5.0f),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect =
- R"(
+ // struct S {
+ // a : @stride(4) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // s.a = @stride(4) array<f32, 4u>();
+ // s.a = @stride(4) array<f32, 4u>(1.0, 2.0, 3.0, 4.0);
+ // s.a[1] = 5.0;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(4))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Assign(b.MemberAccessor("s", "a"), b.Construct(b.ty.array<f32, 4u>(4))),
+ b.Assign(b.MemberAccessor("s", "a"),
+ b.Construct(b.ty.array<f32, 4u>(4), 1_f, 2_f, 3_f, 4_f)),
+ b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i), 5_f),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect =
+ R"(
struct S {
- a : array<f32, 4>,
+ a : array<f32, 4u>,
}
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- s.a = array<f32, 4>();
- s.a = array<f32, 4>(1.0, 2.0, 3.0, 4.0);
- s.a[1] = 5.0;
+ s.a = array<f32, 4u>();
+ s.a = array<f32, 4u>(1.0f, 2.0f, 3.0f, 4.0f);
+ s.a[1i] = 5.0f;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, ReadWriteViaPointerLets) {
- // struct S {
- // a : @stride(32) array<f32, 4>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a = &s.a;
- // let b = &*&*(a);
- // let c = *b;
- // let d = (*b)[1];
- // (*b) = @stride(32) array<f32, 4>(1.0, 2.0, 3.0, 4.0);
- // (*b)[1] = 5.0;
- // }
- ProgramBuilder b;
- auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4>(32))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", nullptr,
- b.AddressOf(b.MemberAccessor("s", "a")))),
- b.Decl(b.Const("b", nullptr,
- b.AddressOf(b.Deref(b.AddressOf(b.Deref("a")))))),
- b.Decl(b.Const("c", nullptr, b.Deref("b"))),
- b.Decl(b.Const("d", nullptr, b.IndexAccessor(b.Deref("b"), 1))),
- b.Assign(b.Deref("b"), b.Construct(b.ty.array<f32, 4>(32), 1.0f,
- 2.0f, 3.0f, 4.0f)),
- b.Assign(b.IndexAccessor(b.Deref("b"), 1), 5.0f),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect =
- R"(
+ // struct S {
+ // a : @stride(32) array<f32, 4u>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a = &s.a;
+ // let b = &*&*(a);
+ // let c = *b;
+ // let d = (*b)[1];
+ // (*b) = @stride(32) array<f32, 4u>(1.0, 2.0, 3.0, 4.0);
+ // (*b)[1] = 5.0;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure("S", {b.Member("a", b.ty.array<f32, 4u>(32))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", nullptr, b.AddressOf(b.MemberAccessor("s", "a")))),
+ b.Decl(b.Let("b", nullptr, b.AddressOf(b.Deref(b.AddressOf(b.Deref("a")))))),
+ b.Decl(b.Let("c", nullptr, b.Deref("b"))),
+ b.Decl(b.Let("d", nullptr, b.IndexAccessor(b.Deref("b"), 1_i))),
+ b.Assign(b.Deref("b"), b.Construct(b.ty.array<f32, 4u>(32), 1_f, 2_f, 3_f, 4_f)),
+ b.Assign(b.IndexAccessor(b.Deref("b"), 1_i), 5_f),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect =
+ R"(
struct strided_arr {
@size(32)
el : f32,
}
struct S {
- a : array<strided_arr, 4>,
+ a : array<strided_arr, 4u>,
}
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let c = s.a;
- let d = s.a[1].el;
- s.a = array<strided_arr, 4>(strided_arr(1.0), strided_arr(2.0), strided_arr(3.0), strided_arr(4.0));
- s.a[1].el = 5.0;
+ let d = s.a[1i].el;
+ s.a = array<strided_arr, 4u>(strided_arr(1.0f), strided_arr(2.0f), strided_arr(3.0f), strided_arr(4.0f));
+ s.a[1i].el = 5.0f;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, PrivateAliasedStridedArray) {
- // type ARR = @stride(32) array<f32, 4>;
- // struct S {
- // a : ARR,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : ARR = s.a;
- // let b : f32 = s.a[1];
- // s.a = ARR();
- // s.a = ARR(1.0, 2.0, 3.0, 4.0);
- // s.a[1] = 5.0;
- // }
- ProgramBuilder b;
- b.Alias("ARR", b.ty.array<f32, 4>(32));
- auto* S = b.Structure("S", {b.Member("a", b.ty.type_name("ARR"))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(
- b.Const("a", b.ty.type_name("ARR"), b.MemberAccessor("s", "a"))),
- b.Decl(b.Const("b", b.ty.f32(),
- b.IndexAccessor(b.MemberAccessor("s", "a"), 1))),
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.type_name("ARR"))),
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.type_name("ARR"), 1.0f, 2.0f, 3.0f, 4.0f)),
- b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1), 5.0f),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // type ARR = @stride(32) array<f32, 4u>;
+ // struct S {
+ // a : ARR,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : ARR = s.a;
+ // let b : f32 = s.a[1];
+ // s.a = ARR();
+ // s.a = ARR(1.0, 2.0, 3.0, 4.0);
+ // s.a[1] = 5.0;
+ // }
+ ProgramBuilder b;
+ b.Alias("ARR", b.ty.array<f32, 4u>(32));
+ auto* S = b.Structure("S", {b.Member("a", b.ty.type_name("ARR"))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.type_name("ARR"), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.f32(), b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i))),
+ b.Assign(b.MemberAccessor("s", "a"), b.Construct(b.ty.type_name("ARR"))),
+ b.Assign(b.MemberAccessor("s", "a"),
+ b.Construct(b.ty.type_name("ARR"), 1_f, 2_f, 3_f, 4_f)),
+ b.Assign(b.IndexAccessor(b.MemberAccessor("s", "a"), 1_i), 5_f),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct strided_arr {
@size(32)
el : f32,
}
-type ARR = array<strided_arr, 4>;
+type ARR = array<strided_arr, 4u>;
struct S {
a : ARR,
@@ -573,102 +541,99 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let a : ARR = s.a;
- let b : f32 = s.a[1].el;
+ let b : f32 = s.a[1i].el;
s.a = ARR();
- s.a = ARR(strided_arr(1.0), strided_arr(2.0), strided_arr(3.0), strided_arr(4.0));
- s.a[1].el = 5.0;
+ s.a = ARR(strided_arr(1.0f), strided_arr(2.0f), strided_arr(3.0f), strided_arr(4.0f));
+ s.a[1i].el = 5.0f;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedArrayTest, PrivateNestedStridedArray) {
- // type ARR_A = @stride(8) array<f32, 2>;
- // type ARR_B = @stride(128) array<@stride(16) array<ARR_A, 3>, 4>;
- // struct S {
- // a : ARR_B,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a : ARR_B = s.a;
- // let b : array<@stride(8) array<f32, 2>, 3> = s.a[3];
- // let c = s.a[3][2];
- // let d = s.a[3][2][1];
- // s.a = ARR_B();
- // s.a[3][2][1] = 5.0;
- // }
-
- ProgramBuilder b;
- b.Alias("ARR_A", b.ty.array<f32, 2>(8));
- b.Alias("ARR_B",
- b.ty.array( //
- b.ty.array(b.ty.type_name("ARR_A"), 3, 16), //
- 4, 128));
- auto* S = b.Structure("S", {b.Member("a", b.ty.type_name("ARR_B"))});
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("a", b.ty.type_name("ARR_B"),
- b.MemberAccessor("s", "a"))),
- b.Decl(b.Const("b", b.ty.array(b.ty.type_name("ARR_A"), 3, 16),
+ // type ARR_A = @stride(8) array<f32, 2u>;
+ // type ARR_B = @stride(128) array<@stride(16) array<ARR_A, 3u>, 4u>;
+ // struct S {
+ // a : ARR_B,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a : ARR_B = s.a;
+ // let b : array<@stride(8) array<f32, 2u>, 3u> = s.a[3];
+ // let c = s.a[3][2];
+ // let d = s.a[3][2][1];
+ // s.a = ARR_B();
+ // s.a[3][2][1] = 5.0;
+ // }
+
+ ProgramBuilder b;
+ b.Alias("ARR_A", b.ty.array<f32, 2>(8));
+ b.Alias("ARR_B",
+ b.ty.array( //
+ b.ty.array(b.ty.type_name("ARR_A"), 3_u, 16), //
+ 4_u, 128));
+ auto* S = b.Structure("S", {b.Member("a", b.ty.type_name("ARR_B"))});
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", b.ty.type_name("ARR_B"), b.MemberAccessor("s", "a"))),
+ b.Decl(b.Let("b", b.ty.array(b.ty.type_name("ARR_A"), 3_u, 16),
b.IndexAccessor( //
b.MemberAccessor("s", "a"), //
- 3))),
- b.Decl(b.Const("c", b.ty.type_name("ARR_A"),
+ 3_i))),
+ b.Decl(b.Let("c", b.ty.type_name("ARR_A"),
b.IndexAccessor( //
b.IndexAccessor( //
b.MemberAccessor("s", "a"), //
- 3),
- 2))),
- b.Decl(b.Const("d", b.ty.f32(),
+ 3_i),
+ 2_i))),
+ b.Decl(b.Let("d", b.ty.f32(),
b.IndexAccessor( //
b.IndexAccessor( //
b.IndexAccessor( //
b.MemberAccessor("s", "a"), //
- 3),
- 2),
- 1))),
- b.Assign(b.MemberAccessor("s", "a"),
- b.Construct(b.ty.type_name("ARR_B"))),
- b.Assign(b.IndexAccessor( //
- b.IndexAccessor( //
- b.IndexAccessor( //
- b.MemberAccessor("s", "a"), //
- 3),
- 2),
- 1),
- 5.0f),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect =
- R"(
+ 3_i),
+ 2_i),
+ 1_i))),
+ b.Assign(b.MemberAccessor("s", "a"), b.Construct(b.ty.type_name("ARR_B"))),
+ b.Assign(b.IndexAccessor( //
+ b.IndexAccessor( //
+ b.IndexAccessor( //
+ b.MemberAccessor("s", "a"), //
+ 3_i),
+ 2_i),
+ 1_i),
+ 5_f),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect =
+ R"(
struct strided_arr {
@size(8)
el : f32,
}
-type ARR_A = array<strided_arr, 2>;
+type ARR_A = array<strided_arr, 2u>;
struct strided_arr_1 {
@size(128)
- el : array<ARR_A, 3>,
+ el : array<ARR_A, 3u>,
}
-type ARR_B = array<strided_arr_1, 4>;
+type ARR_B = array<strided_arr_1, 4u>;
struct S {
a : ARR_B,
@@ -676,21 +641,20 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let a : ARR_B = s.a;
- let b : array<ARR_A, 3> = s.a[3].el;
- let c : ARR_A = s.a[3].el[2];
- let d : f32 = s.a[3].el[2][1].el;
+ let b : array<ARR_A, 3u> = s.a[3i].el;
+ let c : ARR_A = s.a[3i].el[2i];
+ let d : f32 = s.a[3i].el[2i][1i].el;
s.a = ARR_B();
- s.a[3].el[2][1].el = 5.0;
+ s.a[3i].el[2i][1i].el = 5.0f;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedArray>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.cc b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.cc
index fd7194d2f15..4f9d6c8db9e 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.cc
@@ -32,28 +32,25 @@ namespace {
/// MatrixInfo describes a matrix member with a custom stride
struct MatrixInfo {
- /// The stride in bytes between columns of the matrix
- uint32_t stride = 0;
- /// The type of the matrix
- const sem::Matrix* matrix = nullptr;
-
- /// @returns a new ast::Array that holds an vector column for each row of the
- /// matrix.
- const ast::Array* array(ProgramBuilder* b) const {
- return b->ty.array(b->ty.vec<ProgramBuilder::f32>(matrix->rows()),
- matrix->columns(), stride);
- }
-
- /// Equality operator
- bool operator==(const MatrixInfo& info) const {
- return stride == info.stride && matrix == info.matrix;
- }
- /// Hash function
- struct Hasher {
- size_t operator()(const MatrixInfo& t) const {
- return utils::Hash(t.stride, t.matrix);
+ /// The stride in bytes between columns of the matrix
+ uint32_t stride = 0;
+ /// The type of the matrix
+ const sem::Matrix* matrix = nullptr;
+
+ /// @returns a new ast::Array that holds an vector column for each row of the
+ /// matrix.
+ const ast::Array* array(ProgramBuilder* b) const {
+ return b->ty.array(b->ty.vec<f32>(matrix->rows()), u32(matrix->columns()), stride);
}
- };
+
+ /// Equality operator
+ bool operator==(const MatrixInfo& info) const {
+ return stride == info.stride && matrix == info.matrix;
+ }
+ /// Hash function
+ struct Hasher {
+ size_t operator()(const MatrixInfo& t) const { return utils::Hash(t.stride, t.matrix); }
+ };
};
/// Return type of the callback function of GatherCustomStrideMatrixMembers
@@ -71,33 +68,33 @@ enum GatherResult { kContinue, kStop };
/// scanning will continue.
template <typename F>
void GatherCustomStrideMatrixMembers(const Program* program, F&& callback) {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* str = node->As<ast::Struct>()) {
- auto* str_ty = program->Sem().Get(str);
- if (!str_ty->UsedAs(ast::StorageClass::kUniform) &&
- !str_ty->UsedAs(ast::StorageClass::kStorage)) {
- continue;
- }
- for (auto* member : str_ty->Members()) {
- auto* matrix = member->Type()->As<sem::Matrix>();
- if (!matrix) {
- continue;
- }
- auto* attr = ast::GetAttribute<ast::StrideAttribute>(
- member->Declaration()->attributes);
- if (!attr) {
- continue;
- }
- uint32_t stride = attr->stride;
- if (matrix->ColumnStride() == stride) {
- continue;
- }
- if (callback(member, matrix, stride) == GatherResult::kStop) {
- return;
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* str = node->As<ast::Struct>()) {
+ auto* str_ty = program->Sem().Get(str);
+ if (!str_ty->UsedAs(ast::StorageClass::kUniform) &&
+ !str_ty->UsedAs(ast::StorageClass::kStorage)) {
+ continue;
+ }
+ for (auto* member : str_ty->Members()) {
+ auto* matrix = member->Type()->As<sem::Matrix>();
+ if (!matrix) {
+ continue;
+ }
+ auto* attr =
+ ast::GetAttribute<ast::StrideAttribute>(member->Declaration()->attributes);
+ if (!attr) {
+ continue;
+ }
+ uint32_t stride = attr->stride;
+ if (matrix->ColumnStride() == stride) {
+ continue;
+ }
+ if (callback(member, matrix, stride) == GatherResult::kStop) {
+ return;
+ }
+ }
}
- }
}
- }
}
} // namespace
@@ -106,144 +103,133 @@ DecomposeStridedMatrix::DecomposeStridedMatrix() = default;
DecomposeStridedMatrix::~DecomposeStridedMatrix() = default;
-bool DecomposeStridedMatrix::ShouldRun(const Program* program,
- const DataMap&) const {
- bool should_run = false;
- GatherCustomStrideMatrixMembers(
- program, [&](const sem::StructMember*, sem::Matrix*, uint32_t) {
+bool DecomposeStridedMatrix::ShouldRun(const Program* program, const DataMap&) const {
+ bool should_run = false;
+ GatherCustomStrideMatrixMembers(program, [&](const sem::StructMember*, sem::Matrix*, uint32_t) {
should_run = true;
return GatherResult::kStop;
- });
- return should_run;
+ });
+ return should_run;
}
-void DecomposeStridedMatrix::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- // Scan the program for all storage and uniform structure matrix members with
- // a custom stride attribute. Replace these matrices with an equivalent array,
- // and populate the `decomposed` map with the members that have been replaced.
- std::unordered_map<const ast::StructMember*, MatrixInfo> decomposed;
- GatherCustomStrideMatrixMembers(
- ctx.src, [&](const sem::StructMember* member, sem::Matrix* matrix,
- uint32_t stride) {
- // We've got ourselves a struct member of a matrix type with a custom
- // stride. Replace this with an array of column vectors.
- MatrixInfo info{stride, matrix};
- auto* replacement = ctx.dst->Member(
- member->Offset(), ctx.Clone(member->Name()), info.array(ctx.dst));
- ctx.Replace(member->Declaration(), replacement);
- decomposed.emplace(member->Declaration(), info);
- return GatherResult::kContinue;
- });
-
- // For all expressions where a single matrix column vector was indexed, we can
- // preserve these without calling conversion functions.
- // Example:
- // ssbo.mat[2] -> ssbo.mat[2]
- ctx.ReplaceAll([&](const ast::IndexAccessorExpression* expr)
- -> const ast::IndexAccessorExpression* {
- if (auto* access =
- ctx.src->Sem().Get<sem::StructMemberAccess>(expr->object)) {
- auto it = decomposed.find(access->Member()->Declaration());
- if (it != decomposed.end()) {
- auto* obj = ctx.CloneWithoutTransform(expr->object);
- auto* idx = ctx.Clone(expr->index);
- return ctx.dst->IndexAccessor(obj, idx);
- }
- }
- return nullptr;
- });
-
- // For all struct member accesses to the matrix on the LHS of an assignment,
- // we need to convert the matrix to the array before assigning to the
- // structure.
- // Example:
- // ssbo.mat = mat_to_arr(m)
- std::unordered_map<MatrixInfo, Symbol, MatrixInfo::Hasher> mat_to_arr;
- ctx.ReplaceAll([&](const ast::AssignmentStatement* stmt)
- -> const ast::Statement* {
- if (auto* access = ctx.src->Sem().Get<sem::StructMemberAccess>(stmt->lhs)) {
- auto it = decomposed.find(access->Member()->Declaration());
- if (it == decomposed.end()) {
- return nullptr;
- }
- MatrixInfo info = it->second;
- auto fn = utils::GetOrCreate(mat_to_arr, info, [&] {
- auto name = ctx.dst->Symbols().New(
- "mat" + std::to_string(info.matrix->columns()) + "x" +
- std::to_string(info.matrix->rows()) + "_stride_" +
- std::to_string(info.stride) + "_to_arr");
-
- auto matrix = [&] { return CreateASTTypeFor(ctx, info.matrix); };
- auto array = [&] { return info.array(ctx.dst); };
-
- auto mat = ctx.dst->Sym("m");
- ast::ExpressionList columns(info.matrix->columns());
- for (uint32_t i = 0; i < static_cast<uint32_t>(columns.size()); i++) {
- columns[i] = ctx.dst->IndexAccessor(mat, i);
+void DecomposeStridedMatrix::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ // Scan the program for all storage and uniform structure matrix members with
+ // a custom stride attribute. Replace these matrices with an equivalent array,
+ // and populate the `decomposed` map with the members that have been replaced.
+ std::unordered_map<const ast::StructMember*, MatrixInfo> decomposed;
+ GatherCustomStrideMatrixMembers(
+ ctx.src, [&](const sem::StructMember* member, sem::Matrix* matrix, uint32_t stride) {
+ // We've got ourselves a struct member of a matrix type with a custom
+ // stride. Replace this with an array of column vectors.
+ MatrixInfo info{stride, matrix};
+ auto* replacement =
+ ctx.dst->Member(member->Offset(), ctx.Clone(member->Name()), info.array(ctx.dst));
+ ctx.Replace(member->Declaration(), replacement);
+ decomposed.emplace(member->Declaration(), info);
+ return GatherResult::kContinue;
+ });
+
+ // For all expressions where a single matrix column vector was indexed, we can
+ // preserve these without calling conversion functions.
+ // Example:
+ // ssbo.mat[2] -> ssbo.mat[2]
+ ctx.ReplaceAll(
+ [&](const ast::IndexAccessorExpression* expr) -> const ast::IndexAccessorExpression* {
+ if (auto* access = ctx.src->Sem().Get<sem::StructMemberAccess>(expr->object)) {
+ auto it = decomposed.find(access->Member()->Declaration());
+ if (it != decomposed.end()) {
+ auto* obj = ctx.CloneWithoutTransform(expr->object);
+ auto* idx = ctx.Clone(expr->index);
+ return ctx.dst->IndexAccessor(obj, idx);
+ }
+ }
+ return nullptr;
+ });
+
+ // For all struct member accesses to the matrix on the LHS of an assignment,
+ // we need to convert the matrix to the array before assigning to the
+ // structure.
+ // Example:
+ // ssbo.mat = mat_to_arr(m)
+ std::unordered_map<MatrixInfo, Symbol, MatrixInfo::Hasher> mat_to_arr;
+ ctx.ReplaceAll([&](const ast::AssignmentStatement* stmt) -> const ast::Statement* {
+ if (auto* access = ctx.src->Sem().Get<sem::StructMemberAccess>(stmt->lhs)) {
+ auto it = decomposed.find(access->Member()->Declaration());
+ if (it == decomposed.end()) {
+ return nullptr;
+ }
+ MatrixInfo info = it->second;
+ auto fn = utils::GetOrCreate(mat_to_arr, info, [&] {
+ auto name =
+ ctx.dst->Symbols().New("mat" + std::to_string(info.matrix->columns()) + "x" +
+ std::to_string(info.matrix->rows()) + "_stride_" +
+ std::to_string(info.stride) + "_to_arr");
+
+ auto matrix = [&] { return CreateASTTypeFor(ctx, info.matrix); };
+ auto array = [&] { return info.array(ctx.dst); };
+
+ auto mat = ctx.dst->Sym("m");
+ ast::ExpressionList columns(info.matrix->columns());
+ for (uint32_t i = 0; i < static_cast<uint32_t>(columns.size()); i++) {
+ columns[i] = ctx.dst->IndexAccessor(mat, u32(i));
+ }
+ ctx.dst->Func(name,
+ {
+ ctx.dst->Param(mat, matrix()),
+ },
+ array(),
+ {
+ ctx.dst->Return(ctx.dst->Construct(array(), columns)),
+ });
+ return name;
+ });
+ auto* lhs = ctx.CloneWithoutTransform(stmt->lhs);
+ auto* rhs = ctx.dst->Call(fn, ctx.Clone(stmt->rhs));
+ return ctx.dst->Assign(lhs, rhs);
}
- ctx.dst->Func(name,
- {
- ctx.dst->Param(mat, matrix()),
- },
- array(),
- {
- ctx.dst->Return(ctx.dst->Construct(array(), columns)),
- });
- return name;
- });
- auto* lhs = ctx.CloneWithoutTransform(stmt->lhs);
- auto* rhs = ctx.dst->Call(fn, ctx.Clone(stmt->rhs));
- return ctx.dst->Assign(lhs, rhs);
- }
- return nullptr;
- });
-
- // For all other struct member accesses, we need to convert the array to the
- // matrix type. Example:
- // m = arr_to_mat(ssbo.mat)
- std::unordered_map<MatrixInfo, Symbol, MatrixInfo::Hasher> arr_to_mat;
- ctx.ReplaceAll(
- [&](const ast::MemberAccessorExpression* expr) -> const ast::Expression* {
+ return nullptr;
+ });
+
+ // For all other struct member accesses, we need to convert the array to the
+ // matrix type. Example:
+ // m = arr_to_mat(ssbo.mat)
+ std::unordered_map<MatrixInfo, Symbol, MatrixInfo::Hasher> arr_to_mat;
+ ctx.ReplaceAll([&](const ast::MemberAccessorExpression* expr) -> const ast::Expression* {
if (auto* access = ctx.src->Sem().Get<sem::StructMemberAccess>(expr)) {
- auto it = decomposed.find(access->Member()->Declaration());
- if (it == decomposed.end()) {
- return nullptr;
- }
- MatrixInfo info = it->second;
- auto fn = utils::GetOrCreate(arr_to_mat, info, [&] {
- auto name = ctx.dst->Symbols().New(
- "arr_to_mat" + std::to_string(info.matrix->columns()) + "x" +
- std::to_string(info.matrix->rows()) + "_stride_" +
- std::to_string(info.stride));
-
- auto matrix = [&] { return CreateASTTypeFor(ctx, info.matrix); };
- auto array = [&] { return info.array(ctx.dst); };
-
- auto arr = ctx.dst->Sym("arr");
- ast::ExpressionList columns(info.matrix->columns());
- for (uint32_t i = 0; i < static_cast<uint32_t>(columns.size());
- i++) {
- columns[i] = ctx.dst->IndexAccessor(arr, i);
+ auto it = decomposed.find(access->Member()->Declaration());
+ if (it == decomposed.end()) {
+ return nullptr;
}
- ctx.dst->Func(
- name,
- {
- ctx.dst->Param(arr, array()),
- },
- matrix(),
- {
- ctx.dst->Return(ctx.dst->Construct(matrix(), columns)),
- });
- return name;
- });
- return ctx.dst->Call(fn, ctx.CloneWithoutTransform(expr));
+ MatrixInfo info = it->second;
+ auto fn = utils::GetOrCreate(arr_to_mat, info, [&] {
+ auto name = ctx.dst->Symbols().New(
+ "arr_to_mat" + std::to_string(info.matrix->columns()) + "x" +
+ std::to_string(info.matrix->rows()) + "_stride_" + std::to_string(info.stride));
+
+ auto matrix = [&] { return CreateASTTypeFor(ctx, info.matrix); };
+ auto array = [&] { return info.array(ctx.dst); };
+
+ auto arr = ctx.dst->Sym("arr");
+ ast::ExpressionList columns(info.matrix->columns());
+ for (uint32_t i = 0; i < static_cast<uint32_t>(columns.size()); i++) {
+ columns[i] = ctx.dst->IndexAccessor(arr, u32(i));
+ }
+ ctx.dst->Func(name,
+ {
+ ctx.dst->Param(arr, array()),
+ },
+ matrix(),
+ {
+ ctx.dst->Return(ctx.dst->Construct(matrix(), columns)),
+ });
+ return name;
+ });
+ return ctx.dst->Call(fn, ctx.CloneWithoutTransform(expr));
}
return nullptr;
- });
+ });
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.h b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.h
index bcde5aa5b7a..40e9c3e237e 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.h
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix.h
@@ -27,31 +27,27 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * SimplifyPointers
-class DecomposeStridedMatrix final
- : public Castable<DecomposeStridedMatrix, Transform> {
- public:
- /// Constructor
- DecomposeStridedMatrix();
-
- /// Destructor
- ~DecomposeStridedMatrix() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class DecomposeStridedMatrix final : public Castable<DecomposeStridedMatrix, Transform> {
+ public:
+ /// Constructor
+ DecomposeStridedMatrix();
+
+ /// Destructor
+ ~DecomposeStridedMatrix() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix_test.cc b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix_test.cc
index 73389991d66..06169d763d0 100644
--- a/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/decompose_strided_matrix_test.cc
@@ -24,72 +24,69 @@
#include "src/tint/transform/test_helper.h"
#include "src/tint/transform/unshadow.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
namespace {
using DecomposeStridedMatrixTest = TransformTest;
-using f32 = ProgramBuilder::f32;
TEST_F(DecomposeStridedMatrixTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<DecomposeStridedMatrix>(src));
+ EXPECT_FALSE(ShouldRun<DecomposeStridedMatrix>(src));
}
TEST_F(DecomposeStridedMatrixTest, ShouldRunNonStridedMatrox) {
- auto* src = R"(
+ auto* src = R"(
var<private> m : mat3x2<f32>;
)";
- EXPECT_FALSE(ShouldRun<DecomposeStridedMatrix>(src));
+ EXPECT_FALSE(ShouldRun<DecomposeStridedMatrix>(src));
}
TEST_F(DecomposeStridedMatrixTest, Empty) {
- auto* src = R"()";
- auto* expect = src;
+ auto* src = R"()";
+ auto* expect = src;
- auto got = Run<DecomposeStridedMatrix>(src);
+ auto got = Run<DecomposeStridedMatrix>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadUniformMatrix) {
- // struct S {
- // @offset(16) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<uniform> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : mat2x2<f32> = s.m;
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(16),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform,
- b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(16) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<uniform> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : mat2x2<f32> = s.m;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(16),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(16)
padding : u32,
@@ -102,55 +99,51 @@ fn arr_to_mat2x2_stride_32(arr : @stride(32) array<vec2<f32>, 2u>) -> mat2x2<f32
return mat2x2<f32>(arr[0u], arr[1u]);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let x : mat2x2<f32> = arr_to_mat2x2_stride_32(s.m);
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadUniformColumn) {
- // struct S {
- // @offset(16) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<uniform> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : vec2<f32> = s.m[1];
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(16),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform,
- b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.vec2<f32>(),
- b.IndexAccessor(b.MemberAccessor("s", "m"), 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(16) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<uniform> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : vec2<f32> = s.m[1];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(16),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Func(
+ "f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.vec2<f32>(), b.IndexAccessor(b.MemberAccessor("s", "m"), 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(16)
padding : u32,
@@ -159,55 +152,50 @@ struct S {
@group(0) @binding(0) var<uniform> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let x : vec2<f32> = s.m[1];
+ let x : vec2<f32> = s.m[1i];
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadUniformMatrix_DefaultStride) {
- // struct S {
- // @offset(16) @stride(8)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<uniform> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : mat2x2<f32> = s.m;
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(16),
- b.create<ast::StrideAttribute>(8),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform,
- b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(16) @stride(8)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<uniform> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : mat2x2<f32> = s.m;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(16),
+ b.create<ast::StrideAttribute>(8),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(16)
padding : u32,
@@ -217,55 +205,51 @@ struct S {
@group(0) @binding(0) var<uniform> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let x : mat2x2<f32> = s.m;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadStorageMatrix) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : mat2x2<f32> = s.m;
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : mat2x2<f32> = s.m;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -278,55 +262,52 @@ fn arr_to_mat2x2_stride_32(arr : @stride(32) array<vec2<f32>, 2u>) -> mat2x2<f32
return mat2x2<f32>(arr[0u], arr[1u]);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let x : mat2x2<f32> = arr_to_mat2x2_stride_32(s.m);
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadStorageColumn) {
- // struct S {
- // @offset(16) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : vec2<f32> = s.m[1];
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(16),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.vec2<f32>(),
- b.IndexAccessor(b.MemberAccessor("s", "m"), 1))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(16) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : vec2<f32> = s.m[1];
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(16),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func(
+ "f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.vec2<f32>(), b.IndexAccessor(b.MemberAccessor("s", "m"), 1_i))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(16)
padding : u32,
@@ -335,56 +316,52 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- let x : vec2<f32> = s.m[1];
+ let x : vec2<f32> = s.m[1i];
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, WriteStorageMatrix) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // s.m = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Assign(b.MemberAccessor("s", "m"),
- b.mat2x2<f32>(b.vec2<f32>(1.0f, 2.0f),
- b.vec2<f32>(3.0f, 4.0f))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // s.m = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Assign(b.MemberAccessor("s", "m"),
+ b.mat2x2<f32>(b.vec2<f32>(1_f, 2_f), b.vec2<f32>(3_f, 4_f))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -397,55 +374,51 @@ fn mat2x2_stride_32_to_arr(m : mat2x2<f32>) -> @stride(32) array<vec2<f32>, 2u>
return @stride(32) array<vec2<f32>, 2u>(m[0u], m[1u]);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- s.m = mat2x2_stride_32_to_arr(mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0)));
+ s.m = mat2x2_stride_32_to_arr(mat2x2<f32>(vec2<f32>(1.0f, 2.0f), vec2<f32>(3.0f, 4.0f)));
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, WriteStorageColumn) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // s.m[1] = vec2<f32>(1.0, 2.0);
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func("f", {}, b.ty.void_(),
- {
- b.Assign(b.IndexAccessor(b.MemberAccessor("s", "m"), 1),
- b.vec2<f32>(1.0f, 2.0f)),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // s.m[1] = vec2<f32>(1.0, 2.0);
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Assign(b.IndexAccessor(b.MemberAccessor("s", "m"), 1_i), b.vec2<f32>(1_f, 2_f)),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -454,70 +427,63 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- s.m[1] = vec2<f32>(1.0, 2.0);
+ s.m[1i] = vec2<f32>(1.0f, 2.0f);
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadWriteViaPointerLets) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // @group(0) @binding(0) var<storage, read_write> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let a = &s.m;
- // let b = &*&*(a);
- // let x = *b;
- // let y = (*b)[1];
- // let z = x[1];
- // (*b) = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
- // (*b)[1] = vec2<f32>(5.0, 6.0);
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage,
- ast::Access::kReadWrite, b.GroupAndBinding(0, 0));
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(
- b.Const("a", nullptr, b.AddressOf(b.MemberAccessor("s", "m")))),
- b.Decl(b.Const("b", nullptr,
- b.AddressOf(b.Deref(b.AddressOf(b.Deref("a")))))),
- b.Decl(b.Const("x", nullptr, b.Deref("b"))),
- b.Decl(b.Const("y", nullptr, b.IndexAccessor(b.Deref("b"), 1))),
- b.Decl(b.Const("z", nullptr, b.IndexAccessor("x", 1))),
- b.Assign(b.Deref("b"), b.mat2x2<f32>(b.vec2<f32>(1.0f, 2.0f),
- b.vec2<f32>(3.0f, 4.0f))),
- b.Assign(b.IndexAccessor(b.Deref("b"), 1), b.vec2<f32>(5.0f, 6.0f)),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // @group(0) @binding(0) var<storage, read_write> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let a = &s.m;
+ // let b = &*&*(a);
+ // let x = *b;
+ // let y = (*b)[1];
+ // let z = x[1];
+ // (*b) = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
+ // (*b)[1] = vec2<f32>(5.0, 6.0);
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ b.GroupAndBinding(0, 0));
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("a", nullptr, b.AddressOf(b.MemberAccessor("s", "m")))),
+ b.Decl(b.Let("b", nullptr, b.AddressOf(b.Deref(b.AddressOf(b.Deref("a")))))),
+ b.Decl(b.Let("x", nullptr, b.Deref("b"))),
+ b.Decl(b.Let("y", nullptr, b.IndexAccessor(b.Deref("b"), 1_i))),
+ b.Decl(b.Let("z", nullptr, b.IndexAccessor("x", 1_i))),
+ b.Assign(b.Deref("b"), b.mat2x2<f32>(b.vec2<f32>(1_f, 2_f), b.vec2<f32>(3_f, 4_f))),
+ b.Assign(b.IndexAccessor(b.Deref("b"), 1_i), b.vec2<f32>(5_f, 6_f)),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -534,58 +500,54 @@ fn mat2x2_stride_32_to_arr(m : mat2x2<f32>) -> @stride(32) array<vec2<f32>, 2u>
return @stride(32) array<vec2<f32>, 2u>(m[0u], m[1u]);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let x = arr_to_mat2x2_stride_32(s.m);
- let y = s.m[1];
- let z = x[1];
- s.m = mat2x2_stride_32_to_arr(mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0)));
- s.m[1] = vec2<f32>(5.0, 6.0);
+ let y = s.m[1i];
+ let z = x[1i];
+ s.m = mat2x2_stride_32_to_arr(mat2x2<f32>(vec2<f32>(1.0f, 2.0f), vec2<f32>(3.0f, 4.0f)));
+ s.m[1i] = vec2<f32>(5.0f, 6.0f);
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, ReadPrivateMatrix) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // var<private> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // let x : mat2x2<f32> = s.m;
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kPrivate);
- b.Func(
- "f", {}, b.ty.void_(),
- {
- b.Decl(b.Const("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // var<private> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // let x : mat2x2<f32> = s.m;
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kPrivate);
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Decl(b.Let("x", b.ty.mat2x2<f32>(), b.MemberAccessor("s", "m"))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -595,55 +557,51 @@ struct S {
var<private> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
let x : mat2x2<f32> = s.m;
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(DecomposeStridedMatrixTest, WritePrivateMatrix) {
- // struct S {
- // @offset(8) @stride(32)
- // @internal(ignore_stride_attribute)
- // m : mat2x2<f32>,
- // };
- // var<private> s : S;
- //
- // @stage(compute) @workgroup_size(1)
- // fn f() {
- // s.m = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
- // }
- ProgramBuilder b;
- auto* S = b.Structure(
- "S",
- {
- b.Member(
- "m", b.ty.mat2x2<f32>(),
- {
- b.create<ast::StructMemberOffsetAttribute>(8),
- b.create<ast::StrideAttribute>(32),
- b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
- }),
- });
- b.Global("s", b.ty.Of(S), ast::StorageClass::kPrivate);
- b.Func("f", {}, b.ty.void_(),
- {
- b.Assign(b.MemberAccessor("s", "m"),
- b.mat2x2<f32>(b.vec2<f32>(1.0f, 2.0f),
- b.vec2<f32>(3.0f, 4.0f))),
- },
- {
- b.Stage(ast::PipelineStage::kCompute),
- b.WorkgroupSize(1),
- });
-
- auto* expect = R"(
+ // struct S {
+ // @offset(8) @stride(32)
+ // @internal(ignore_stride_attribute)
+ // m : mat2x2<f32>,
+ // };
+ // var<private> s : S;
+ //
+ // @compute @workgroup_size(1)
+ // fn f() {
+ // s.m = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
+ // }
+ ProgramBuilder b;
+ auto* S = b.Structure(
+ "S", {
+ b.Member("m", b.ty.mat2x2<f32>(),
+ {
+ b.create<ast::StructMemberOffsetAttribute>(8),
+ b.create<ast::StrideAttribute>(32),
+ b.Disable(ast::DisabledValidation::kIgnoreStrideAttribute),
+ }),
+ });
+ b.Global("s", b.ty.Of(S), ast::StorageClass::kPrivate);
+ b.Func("f", {}, b.ty.void_(),
+ {
+ b.Assign(b.MemberAccessor("s", "m"),
+ b.mat2x2<f32>(b.vec2<f32>(1_f, 2_f), b.vec2<f32>(3_f, 4_f))),
+ },
+ {
+ b.Stage(ast::PipelineStage::kCompute),
+ b.WorkgroupSize(1_i),
+ });
+
+ auto* expect = R"(
struct S {
@size(8)
padding : u32,
@@ -653,16 +611,15 @@ struct S {
var<private> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn f() {
- s.m = mat2x2<f32>(vec2<f32>(1.0, 2.0), vec2<f32>(3.0, 4.0));
+ s.m = mat2x2<f32>(vec2<f32>(1.0f, 2.0f), vec2<f32>(3.0f, 4.0f));
}
)";
- auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(
- Program(std::move(b)));
+ auto got = Run<Unshadow, SimplifyPointers, DecomposeStridedMatrix>(Program(std::move(b)));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.cc b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.cc
new file mode 100644
index 00000000000..7a3002377df
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.cc
@@ -0,0 +1,40 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/transform/disable_uniformity_analysis.h"
+
+#include <utility>
+
+#include "src/tint/program_builder.h"
+#include "src/tint/sem/module.h"
+
+TINT_INSTANTIATE_TYPEINFO(tint::transform::DisableUniformityAnalysis);
+
+namespace tint::transform {
+
+DisableUniformityAnalysis::DisableUniformityAnalysis() = default;
+
+DisableUniformityAnalysis::~DisableUniformityAnalysis() = default;
+
+bool DisableUniformityAnalysis::ShouldRun(const Program* program, const DataMap&) const {
+ return !program->Sem().Module()->Extensions().contains(
+ ast::Extension::kChromiumDisableUniformityAnalysis);
+}
+
+void DisableUniformityAnalysis::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ ctx.dst->Enable(ast::Extension::kChromiumDisableUniformityAnalysis);
+ ctx.Clone();
+}
+
+} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.h b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.h
new file mode 100644
index 00000000000..3c9fb53743d
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis.h
@@ -0,0 +1,47 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_TRANSFORM_DISABLE_UNIFORMITY_ANALYSIS_H_
+#define SRC_TINT_TRANSFORM_DISABLE_UNIFORMITY_ANALYSIS_H_
+
+#include "src/tint/transform/transform.h"
+
+namespace tint::transform {
+
+/// Disable uniformity analysis for the program.
+class DisableUniformityAnalysis final : public Castable<DisableUniformityAnalysis, Transform> {
+ public:
+ /// Constructor
+ DisableUniformityAnalysis();
+ /// Destructor
+ ~DisableUniformityAnalysis() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+};
+
+} // namespace tint::transform
+
+#endif // SRC_TINT_TRANSFORM_DISABLE_UNIFORMITY_ANALYSIS_H_
diff --git a/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis_test.cc b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis_test.cc
new file mode 100644
index 00000000000..3bcac0e58da
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/transform/disable_uniformity_analysis_test.cc
@@ -0,0 +1,73 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/transform/disable_uniformity_analysis.h"
+
+#include <string>
+#include <utility>
+
+#include "src/tint/transform/test_helper.h"
+
+namespace tint::transform {
+namespace {
+
+using DisableUniformityAnalysisTest = TransformTest;
+
+TEST_F(DisableUniformityAnalysisTest, ShouldRunEmptyModule) {
+ auto* src = R"()";
+
+ EXPECT_TRUE(ShouldRun<DisableUniformityAnalysis>(src));
+}
+
+TEST_F(DisableUniformityAnalysisTest, ShouldRunExtensionAlreadyPresent) {
+ auto* src = R"(
+enable chromium_disable_uniformity_analysis;
+)";
+
+ EXPECT_FALSE(ShouldRun<DisableUniformityAnalysis>(src));
+}
+
+TEST_F(DisableUniformityAnalysisTest, EmptyModule) {
+ auto* src = R"()";
+
+ auto* expect = R"(
+enable chromium_disable_uniformity_analysis;
+)";
+
+ auto got = Run<DisableUniformityAnalysis>(src);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_F(DisableUniformityAnalysisTest, NonEmptyModule) {
+ auto* src = R"(
+@group(0) @binding(0) var<storage, read> global : i32;
+
+@compute @workgroup_size(64)
+fn main() {
+ if ((global == 42)) {
+ workgroupBarrier();
+ }
+}
+)";
+
+ auto expect = "\nenable chromium_disable_uniformity_analysis;\n" + std::string(src);
+
+ auto got = Run<DisableUniformityAnalysis>(src);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+} // namespace
+} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.cc b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.cc
index 0473fae1f7e..2f775ca1198 100644
--- a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.cc
+++ b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.cc
@@ -27,168 +27,160 @@
TINT_INSTANTIATE_TYPEINFO(tint::transform::ExpandCompoundAssignment);
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
ExpandCompoundAssignment::ExpandCompoundAssignment() = default;
ExpandCompoundAssignment::~ExpandCompoundAssignment() = default;
-bool ExpandCompoundAssignment::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (node->IsAnyOf<ast::CompoundAssignmentStatement,
- ast::IncrementDecrementStatement>()) {
- return true;
+bool ExpandCompoundAssignment::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (node->IsAnyOf<ast::CompoundAssignmentStatement, ast::IncrementDecrementStatement>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
/// Internal class used to collect statement expansions during the transform.
class State {
- private:
- /// The clone context.
- CloneContext& ctx;
-
- /// The program builder.
- ProgramBuilder& b;
-
- /// The HoistToDeclBefore helper instance.
- HoistToDeclBefore hoist_to_decl_before;
-
- public:
- /// Constructor
- /// @param context the clone context
- explicit State(CloneContext& context)
- : ctx(context), b(*ctx.dst), hoist_to_decl_before(ctx) {}
-
- /// Replace `stmt` with a regular assignment statement of the form:
- /// lhs = lhs op rhs
- /// The LHS expression will only be evaluated once, and any side effects will
- /// be hoisted to `let` declarations above the assignment statement.
- /// @param stmt the statement to replace
- /// @param lhs the lhs expression from the source statement
- /// @param rhs the rhs expression in the destination module
- /// @param op the binary operator
- void Expand(const ast::Statement* stmt,
- const ast::Expression* lhs,
- const ast::Expression* rhs,
- ast::BinaryOp op) {
- // Helper function to create the new LHS expression. This will be called
- // twice when building the non-compound assignment statement, so must
- // not produce expressions that cause side effects.
- std::function<const ast::Expression*()> new_lhs;
-
- // Helper function to create a variable that is a pointer to `expr`.
- auto hoist_pointer_to = [&](const ast::Expression* expr) {
- auto name = b.Sym();
- auto* ptr = b.AddressOf(ctx.Clone(expr));
- auto* decl = b.Decl(b.Const(name, nullptr, ptr));
- hoist_to_decl_before.InsertBefore(ctx.src->Sem().Get(stmt), decl);
- return name;
- };
-
- // Helper function to hoist `expr` to a let declaration.
- auto hoist_expr_to_let = [&](const ast::Expression* expr) {
- auto name = b.Sym();
- auto* decl = b.Decl(b.Const(name, nullptr, ctx.Clone(expr)));
- hoist_to_decl_before.InsertBefore(ctx.src->Sem().Get(stmt), decl);
- return name;
- };
-
- // Helper function that returns `true` if the type of `expr` is a vector.
- auto is_vec = [&](const ast::Expression* expr) {
- return ctx.src->Sem().Get(expr)->Type()->UnwrapRef()->Is<sem::Vector>();
- };
-
- // Hoist the LHS expression subtree into local constants to produce a new
- // LHS that we can evaluate twice.
- // We need to special case compound assignments to vector components since
- // we cannot take the address of a vector component.
- auto* index_accessor = lhs->As<ast::IndexAccessorExpression>();
- auto* member_accessor = lhs->As<ast::MemberAccessorExpression>();
- if (lhs->Is<ast::IdentifierExpression>() ||
- (member_accessor &&
- member_accessor->structure->Is<ast::IdentifierExpression>())) {
- // This is the simple case with no side effects, so we can just use the
- // original LHS expression directly.
- // Before:
- // foo.bar += rhs;
- // After:
- // foo.bar = foo.bar + rhs;
- new_lhs = [&]() { return ctx.Clone(lhs); };
- } else if (index_accessor && is_vec(index_accessor->object)) {
- // This is the case for vector component via an array accessor. We need
- // to capture a pointer to the vector and also the index value.
- // Before:
- // v[idx()] += rhs;
- // After:
- // let vec_ptr = &v;
- // let index = idx();
- // (*vec_ptr)[index] = (*vec_ptr)[index] + rhs;
- auto lhs_ptr = hoist_pointer_to(index_accessor->object);
- auto index = hoist_expr_to_let(index_accessor->index);
- new_lhs = [&, lhs_ptr, index]() {
- return b.IndexAccessor(b.Deref(lhs_ptr), index);
- };
- } else if (member_accessor && is_vec(member_accessor->structure)) {
- // This is the case for vector component via a member accessor. We just
- // need to capture a pointer to the vector.
- // Before:
- // a[idx()].y += rhs;
- // After:
- // let vec_ptr = &a[idx()];
- // (*vec_ptr).y = (*vec_ptr).y + rhs;
- auto lhs_ptr = hoist_pointer_to(member_accessor->structure);
- new_lhs = [&, lhs_ptr]() {
- return b.MemberAccessor(b.Deref(lhs_ptr),
- ctx.Clone(member_accessor->member));
- };
- } else {
- // For all other statements that may have side-effecting expressions, we
- // just need to capture a pointer to the whole LHS.
- // Before:
- // a[idx()] += rhs;
- // After:
- // let lhs_ptr = &a[idx()];
- // (*lhs_ptr) = (*lhs_ptr) + rhs;
- auto lhs_ptr = hoist_pointer_to(lhs);
- new_lhs = [&, lhs_ptr]() { return b.Deref(lhs_ptr); };
+ private:
+ /// The clone context.
+ CloneContext& ctx;
+
+ /// The program builder.
+ ProgramBuilder& b;
+
+ /// The HoistToDeclBefore helper instance.
+ HoistToDeclBefore hoist_to_decl_before;
+
+ public:
+ /// Constructor
+ /// @param context the clone context
+ explicit State(CloneContext& context) : ctx(context), b(*ctx.dst), hoist_to_decl_before(ctx) {}
+
+ /// Replace `stmt` with a regular assignment statement of the form:
+ /// lhs = lhs op rhs
+ /// The LHS expression will only be evaluated once, and any side effects will
+ /// be hoisted to `let` declarations above the assignment statement.
+ /// @param stmt the statement to replace
+ /// @param lhs the lhs expression from the source statement
+ /// @param rhs the rhs expression in the destination module
+ /// @param op the binary operator
+ void Expand(const ast::Statement* stmt,
+ const ast::Expression* lhs,
+ const ast::Expression* rhs,
+ ast::BinaryOp op) {
+ // Helper function to create the new LHS expression. This will be called
+ // twice when building the non-compound assignment statement, so must
+ // not produce expressions that cause side effects.
+ std::function<const ast::Expression*()> new_lhs;
+
+ // Helper function to create a variable that is a pointer to `expr`.
+ auto hoist_pointer_to = [&](const ast::Expression* expr) {
+ auto name = b.Sym();
+ auto* ptr = b.AddressOf(ctx.Clone(expr));
+ auto* decl = b.Decl(b.Let(name, nullptr, ptr));
+ hoist_to_decl_before.InsertBefore(ctx.src->Sem().Get(stmt), decl);
+ return name;
+ };
+
+ // Helper function to hoist `expr` to a let declaration.
+ auto hoist_expr_to_let = [&](const ast::Expression* expr) {
+ auto name = b.Sym();
+ auto* decl = b.Decl(b.Let(name, nullptr, ctx.Clone(expr)));
+ hoist_to_decl_before.InsertBefore(ctx.src->Sem().Get(stmt), decl);
+ return name;
+ };
+
+ // Helper function that returns `true` if the type of `expr` is a vector.
+ auto is_vec = [&](const ast::Expression* expr) {
+ return ctx.src->Sem().Get(expr)->Type()->UnwrapRef()->Is<sem::Vector>();
+ };
+
+ // Hoist the LHS expression subtree into local constants to produce a new
+ // LHS that we can evaluate twice.
+ // We need to special case compound assignments to vector components since
+ // we cannot take the address of a vector component.
+ auto* index_accessor = lhs->As<ast::IndexAccessorExpression>();
+ auto* member_accessor = lhs->As<ast::MemberAccessorExpression>();
+ if (lhs->Is<ast::IdentifierExpression>() ||
+ (member_accessor && member_accessor->structure->Is<ast::IdentifierExpression>())) {
+ // This is the simple case with no side effects, so we can just use the
+ // original LHS expression directly.
+ // Before:
+ // foo.bar += rhs;
+ // After:
+ // foo.bar = foo.bar + rhs;
+ new_lhs = [&]() { return ctx.Clone(lhs); };
+ } else if (index_accessor && is_vec(index_accessor->object)) {
+ // This is the case for vector component via an array accessor. We need
+ // to capture a pointer to the vector and also the index value.
+ // Before:
+ // v[idx()] += rhs;
+ // After:
+ // let vec_ptr = &v;
+ // let index = idx();
+ // (*vec_ptr)[index] = (*vec_ptr)[index] + rhs;
+ auto lhs_ptr = hoist_pointer_to(index_accessor->object);
+ auto index = hoist_expr_to_let(index_accessor->index);
+ new_lhs = [&, lhs_ptr, index]() { return b.IndexAccessor(b.Deref(lhs_ptr), index); };
+ } else if (member_accessor && is_vec(member_accessor->structure)) {
+ // This is the case for vector component via a member accessor. We just
+ // need to capture a pointer to the vector.
+ // Before:
+ // a[idx()].y += rhs;
+ // After:
+ // let vec_ptr = &a[idx()];
+ // (*vec_ptr).y = (*vec_ptr).y + rhs;
+ auto lhs_ptr = hoist_pointer_to(member_accessor->structure);
+ new_lhs = [&, lhs_ptr]() {
+ return b.MemberAccessor(b.Deref(lhs_ptr), ctx.Clone(member_accessor->member));
+ };
+ } else {
+ // For all other statements that may have side-effecting expressions, we
+ // just need to capture a pointer to the whole LHS.
+ // Before:
+ // a[idx()] += rhs;
+ // After:
+ // let lhs_ptr = &a[idx()];
+ // (*lhs_ptr) = (*lhs_ptr) + rhs;
+ auto lhs_ptr = hoist_pointer_to(lhs);
+ new_lhs = [&, lhs_ptr]() { return b.Deref(lhs_ptr); };
+ }
+
+ // Replace the statement with a regular assignment statement.
+ auto* value = b.create<ast::BinaryExpression>(op, new_lhs(), rhs);
+ ctx.Replace(stmt, b.Assign(new_lhs(), value));
}
- // Replace the statement with a regular assignment statement.
- auto* value = b.create<ast::BinaryExpression>(op, new_lhs(), rhs);
- ctx.Replace(stmt, b.Assign(new_lhs(), value));
- }
-
- /// Finalize the transformation and clone the module.
- void Finalize() {
- hoist_to_decl_before.Apply();
- ctx.Clone();
- }
+ /// Finalize the transformation and clone the module.
+ void Finalize() {
+ hoist_to_decl_before.Apply();
+ ctx.Clone();
+ }
};
-void ExpandCompoundAssignment::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state(ctx);
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* assign = node->As<ast::CompoundAssignmentStatement>()) {
- state.Expand(assign, assign->lhs, ctx.Clone(assign->rhs), assign->op);
- } else if (auto* inc_dec = node->As<ast::IncrementDecrementStatement>()) {
- // For increment/decrement statements, `i++` becomes `i = i + 1`.
- // TODO(jrprice): Simplify this when we have untyped literals.
- auto* sem_lhs = ctx.src->Sem().Get(inc_dec->lhs);
- const ast::IntLiteralExpression* one =
- sem_lhs->Type()->UnwrapRef()->is_signed_integer_scalar()
- ? ctx.dst->Expr(1)->As<ast::IntLiteralExpression>()
- : ctx.dst->Expr(1u)->As<ast::IntLiteralExpression>();
- auto op =
- inc_dec->increment ? ast::BinaryOp::kAdd : ast::BinaryOp::kSubtract;
- state.Expand(inc_dec, inc_dec->lhs, one, op);
+void ExpandCompoundAssignment::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state(ctx);
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* assign = node->As<ast::CompoundAssignmentStatement>()) {
+ state.Expand(assign, assign->lhs, ctx.Clone(assign->rhs), assign->op);
+ } else if (auto* inc_dec = node->As<ast::IncrementDecrementStatement>()) {
+ // For increment/decrement statements, `i++` becomes `i = i + 1`.
+ // TODO(jrprice): Simplify this when we have untyped literals.
+ auto* sem_lhs = ctx.src->Sem().Get(inc_dec->lhs);
+ const ast::IntLiteralExpression* one =
+ sem_lhs->Type()->UnwrapRef()->is_signed_integer_scalar()
+ ? ctx.dst->Expr(1_i)->As<ast::IntLiteralExpression>()
+ : ctx.dst->Expr(1_u)->As<ast::IntLiteralExpression>();
+ auto op = inc_dec->increment ? ast::BinaryOp::kAdd : ast::BinaryOp::kSubtract;
+ state.Expand(inc_dec, inc_dec->lhs, one, op);
+ }
}
- }
- state.Finalize();
+ state.Finalize();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.h b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.h
index b461bed82ea..d38d297d144 100644
--- a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.h
+++ b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment.h
@@ -38,30 +38,26 @@ namespace tint::transform {
///
/// This transform also handles increment and decrement statements in the same
/// manner, by replacing `i++` with `i = i + 1`.
-class ExpandCompoundAssignment
- : public Castable<ExpandCompoundAssignment, Transform> {
- public:
- /// Constructor
- ExpandCompoundAssignment();
- /// Destructor
- ~ExpandCompoundAssignment() override;
+class ExpandCompoundAssignment : public Castable<ExpandCompoundAssignment, Transform> {
+ public:
+ /// Constructor
+ ExpandCompoundAssignment();
+ /// Destructor
+ ~ExpandCompoundAssignment() override;
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment_test.cc b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment_test.cc
index d3fa5100031..613a13535ac 100644
--- a/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/expand_compound_assignment_test.cc
@@ -24,55 +24,55 @@ namespace {
using ExpandCompoundAssignmentTest = TransformTest;
TEST_F(ExpandCompoundAssignmentTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<ExpandCompoundAssignment>(src));
+ EXPECT_FALSE(ShouldRun<ExpandCompoundAssignment>(src));
}
TEST_F(ExpandCompoundAssignmentTest, ShouldRunHasCompoundAssignment) {
- auto* src = R"(
+ auto* src = R"(
fn foo() {
var v : i32;
v += 1;
}
)";
- EXPECT_TRUE(ShouldRun<ExpandCompoundAssignment>(src));
+ EXPECT_TRUE(ShouldRun<ExpandCompoundAssignment>(src));
}
TEST_F(ExpandCompoundAssignmentTest, ShouldRunHasIncrementDecrement) {
- auto* src = R"(
+ auto* src = R"(
fn foo() {
var v : i32;
v++;
}
)";
- EXPECT_TRUE(ShouldRun<ExpandCompoundAssignment>(src));
+ EXPECT_TRUE(ShouldRun<ExpandCompoundAssignment>(src));
}
TEST_F(ExpandCompoundAssignmentTest, Basic) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : i32;
v += 1;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : i32;
v = (v + 1);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsPointer) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : i32;
let p = &v;
@@ -80,7 +80,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : i32;
let p = &(v);
@@ -89,13 +89,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsStructMember) {
- auto* src = R"(
+ auto* src = R"(
struct S {
m : f32,
}
@@ -106,7 +106,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
m : f32,
}
@@ -117,13 +117,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsArrayElement) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<i32, 4>;
fn idx() -> i32 {
@@ -136,7 +136,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<i32, 4>;
fn idx() -> i32 {
@@ -150,13 +150,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsVectorComponent_ArrayAccessor) {
- auto* src = R"(
+ auto* src = R"(
var<private> v : vec4<i32>;
fn idx() -> i32 {
@@ -169,7 +169,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> v : vec4<i32>;
fn idx() -> i32 {
@@ -184,33 +184,33 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsVectorComponent_MemberAccessor) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : vec4<i32>;
v.y += 1;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : vec4<i32>;
v.y = (v.y + 1);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsMatrixColumn) {
- auto* src = R"(
+ auto* src = R"(
var<private> m : mat4x4<f32>;
fn idx() -> i32 {
@@ -223,7 +223,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> m : mat4x4<f32>;
fn idx() -> i32 {
@@ -237,13 +237,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsMatrixElement) {
- auto* src = R"(
+ auto* src = R"(
var<private> m : mat4x4<f32>;
fn idx1() -> i32 {
@@ -261,7 +261,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> m : mat4x4<f32>;
fn idx1() -> i32 {
@@ -281,13 +281,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, LhsMultipleSideEffects) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : array<vec4<f32>, 3>,
}
@@ -316,7 +316,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<vec4<f32>, 3>,
}
@@ -347,13 +347,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, ForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -375,7 +375,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -399,13 +399,13 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, ForLoopCont) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -427,7 +427,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -457,93 +457,93 @@ fn main() {
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_I32) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : i32;
v++;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : i32;
- v = (v + 1);
+ v = (v + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_U32) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : u32;
v++;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : u32;
v = (v + 1u);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Decrement_I32) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : i32;
v--;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : i32;
- v = (v - 1);
+ v = (v - 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Decrement_U32) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : u32;
v--;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : u32;
v = (v - 1u);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_LhsPointer) {
- auto* src = R"(
+ auto* src = R"(
fn main() {
var v : i32;
let p = &v;
@@ -551,22 +551,22 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : i32;
let p = &(v);
let tint_symbol = &(*(p));
- *(tint_symbol) = (*(tint_symbol) + 1);
+ *(tint_symbol) = (*(tint_symbol) + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_LhsStructMember) {
- auto* src = R"(
+ auto* src = R"(
struct S {
m : i32,
}
@@ -577,24 +577,24 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
m : i32,
}
fn main() {
var s : S;
- s.m = (s.m + 1);
+ s.m = (s.m + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_LhsArrayElement) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<i32, 4>;
fn idx() -> i32 {
@@ -607,7 +607,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<i32, 4>;
fn idx() -> i32 {
@@ -617,18 +617,17 @@ fn idx() -> i32 {
fn main() {
let tint_symbol = &(a[idx()]);
- *(tint_symbol) = (*(tint_symbol) + 1);
+ *(tint_symbol) = (*(tint_symbol) + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ExpandCompoundAssignmentTest,
- Increment_LhsVectorComponent_ArrayAccessor) {
- auto* src = R"(
+TEST_F(ExpandCompoundAssignmentTest, Increment_LhsVectorComponent_ArrayAccessor) {
+ auto* src = R"(
var<private> v : vec4<i32>;
fn idx() -> i32 {
@@ -641,7 +640,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> v : vec4<i32>;
fn idx() -> i32 {
@@ -652,38 +651,37 @@ fn idx() -> i32 {
fn main() {
let tint_symbol = &(v);
let tint_symbol_1 = idx();
- (*(tint_symbol))[tint_symbol_1] = ((*(tint_symbol))[tint_symbol_1] + 1);
+ (*(tint_symbol))[tint_symbol_1] = ((*(tint_symbol))[tint_symbol_1] + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ExpandCompoundAssignmentTest,
- Increment_LhsVectorComponent_MemberAccessor) {
- auto* src = R"(
+TEST_F(ExpandCompoundAssignmentTest, Increment_LhsVectorComponent_MemberAccessor) {
+ auto* src = R"(
fn main() {
var v : vec4<i32>;
v.y++;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn main() {
var v : vec4<i32>;
- v.y = (v.y + 1);
+ v.y = (v.y + 1i);
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ExpandCompoundAssignmentTest, Increment_ForLoopCont) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -705,7 +703,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<vec4<i32>, 4>;
var<private> p : i32;
@@ -729,15 +727,15 @@ fn main() {
continuing {
let tint_symbol = &(a[idx1()]);
let tint_symbol_1 = idx2();
- (*(tint_symbol))[tint_symbol_1] = ((*(tint_symbol))[tint_symbol_1] + 1);
+ (*(tint_symbol))[tint_symbol_1] = ((*(tint_symbol))[tint_symbol_1] + 1i);
}
}
}
)";
- auto got = Run<ExpandCompoundAssignment>(src);
+ auto got = Run<ExpandCompoundAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/first_index_offset.cc b/chromium/third_party/dawn/src/tint/transform/first_index_offset.cc
index 46a2dcd2454..9d89b83acdc 100644
--- a/chromium/third_party/dawn/src/tint/transform/first_index_offset.cc
+++ b/chromium/third_party/dawn/src/tint/transform/first_index_offset.cc
@@ -38,18 +38,11 @@ constexpr char kFirstInstanceName[] = "first_instance_index";
} // namespace
FirstIndexOffset::BindingPoint::BindingPoint() = default;
-FirstIndexOffset::BindingPoint::BindingPoint(uint32_t b, uint32_t g)
- : binding(b), group(g) {}
+FirstIndexOffset::BindingPoint::BindingPoint(uint32_t b, uint32_t g) : binding(b), group(g) {}
FirstIndexOffset::BindingPoint::~BindingPoint() = default;
-FirstIndexOffset::Data::Data(bool has_vtx_index,
- bool has_inst_index,
- uint32_t first_vtx_offset,
- uint32_t first_inst_offset)
- : has_vertex_index(has_vtx_index),
- has_instance_index(has_inst_index),
- first_vertex_offset(first_vtx_offset),
- first_instance_offset(first_inst_offset) {}
+FirstIndexOffset::Data::Data(bool has_vtx_or_inst_index)
+ : has_vertex_or_instance_index(has_vtx_or_inst_index) {}
FirstIndexOffset::Data::Data(const Data&) = default;
FirstIndexOffset::Data::~Data() = default;
@@ -57,130 +50,109 @@ FirstIndexOffset::FirstIndexOffset() = default;
FirstIndexOffset::~FirstIndexOffset() = default;
bool FirstIndexOffset::ShouldRun(const Program* program, const DataMap&) const {
- for (auto* fn : program->AST().Functions()) {
- if (fn->PipelineStage() == ast::PipelineStage::kVertex) {
- return true;
+ for (auto* fn : program->AST().Functions()) {
+ if (fn->PipelineStage() == ast::PipelineStage::kVertex) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
-void FirstIndexOffset::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const {
- // Get the uniform buffer binding point
- uint32_t ub_binding = binding_;
- uint32_t ub_group = group_;
- if (auto* binding_point = inputs.Get<BindingPoint>()) {
- ub_binding = binding_point->binding;
- ub_group = binding_point->group;
- }
-
- // Map of builtin usages
- std::unordered_map<const sem::Variable*, const char*> builtin_vars;
- std::unordered_map<const sem::StructMember*, const char*> builtin_members;
-
- bool has_vertex_index = false;
- bool has_instance_index = false;
-
- // Traverse the AST scanning for builtin accesses via variables (includes
- // parameters) or structure member accesses.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* var = node->As<ast::Variable>()) {
- for (auto* attr : var->attributes) {
- if (auto* builtin_attr = attr->As<ast::BuiltinAttribute>()) {
- ast::Builtin builtin = builtin_attr->builtin;
- if (builtin == ast::Builtin::kVertexIndex) {
- auto* sem_var = ctx.src->Sem().Get(var);
- builtin_vars.emplace(sem_var, kFirstVertexName);
- has_vertex_index = true;
- }
- if (builtin == ast::Builtin::kInstanceIndex) {
- auto* sem_var = ctx.src->Sem().Get(var);
- builtin_vars.emplace(sem_var, kFirstInstanceName);
- has_instance_index = true;
- }
- }
- }
+void FirstIndexOffset::Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const {
+ // Get the uniform buffer binding point
+ uint32_t ub_binding = binding_;
+ uint32_t ub_group = group_;
+ if (auto* binding_point = inputs.Get<BindingPoint>()) {
+ ub_binding = binding_point->binding;
+ ub_group = binding_point->group;
}
- if (auto* member = node->As<ast::StructMember>()) {
- for (auto* attr : member->attributes) {
- if (auto* builtin_attr = attr->As<ast::BuiltinAttribute>()) {
- ast::Builtin builtin = builtin_attr->builtin;
- if (builtin == ast::Builtin::kVertexIndex) {
- auto* sem_mem = ctx.src->Sem().Get(member);
- builtin_members.emplace(sem_mem, kFirstVertexName);
- has_vertex_index = true;
- }
- if (builtin == ast::Builtin::kInstanceIndex) {
- auto* sem_mem = ctx.src->Sem().Get(member);
- builtin_members.emplace(sem_mem, kFirstInstanceName);
- has_instance_index = true;
- }
+
+ // Map of builtin usages
+ std::unordered_map<const sem::Variable*, const char*> builtin_vars;
+ std::unordered_map<const sem::StructMember*, const char*> builtin_members;
+
+ bool has_vertex_or_instance_index = false;
+
+ // Traverse the AST scanning for builtin accesses via variables (includes
+ // parameters) or structure member accesses.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* var = node->As<ast::Variable>()) {
+ for (auto* attr : var->attributes) {
+ if (auto* builtin_attr = attr->As<ast::BuiltinAttribute>()) {
+ ast::Builtin builtin = builtin_attr->builtin;
+ if (builtin == ast::Builtin::kVertexIndex) {
+ auto* sem_var = ctx.src->Sem().Get(var);
+ builtin_vars.emplace(sem_var, kFirstVertexName);
+ has_vertex_or_instance_index = true;
+ }
+ if (builtin == ast::Builtin::kInstanceIndex) {
+ auto* sem_var = ctx.src->Sem().Get(var);
+ builtin_vars.emplace(sem_var, kFirstInstanceName);
+ has_vertex_or_instance_index = true;
+ }
+ }
+ }
}
- }
- }
- }
-
- // Byte offsets on the uniform buffer
- uint32_t vertex_index_offset = 0;
- uint32_t instance_index_offset = 0;
-
- if (has_vertex_index || has_instance_index) {
- // Add uniform buffer members and calculate byte offsets
- uint32_t offset = 0;
- ast::StructMemberList members;
- if (has_vertex_index) {
- members.push_back(ctx.dst->Member(kFirstVertexName, ctx.dst->ty.u32()));
- vertex_index_offset = offset;
- offset += 4;
- }
- if (has_instance_index) {
- members.push_back(ctx.dst->Member(kFirstInstanceName, ctx.dst->ty.u32()));
- instance_index_offset = offset;
- offset += 4;
- }
- auto* struct_ = ctx.dst->Structure(ctx.dst->Sym(), std::move(members));
-
- // Create a global to hold the uniform buffer
- Symbol buffer_name = ctx.dst->Sym();
- ctx.dst->Global(buffer_name, ctx.dst->ty.Of(struct_),
- ast::StorageClass::kUniform, nullptr,
- ast::AttributeList{
- ctx.dst->create<ast::BindingAttribute>(ub_binding),
- ctx.dst->create<ast::GroupAttribute>(ub_group),
- });
-
- // Fix up all references to the builtins with the offsets
- ctx.ReplaceAll(
- [=, &ctx](const ast::Expression* expr) -> const ast::Expression* {
- if (auto* sem = ctx.src->Sem().Get(expr)) {
- if (auto* user = sem->As<sem::VariableUser>()) {
- auto it = builtin_vars.find(user->Variable());
- if (it != builtin_vars.end()) {
- return ctx.dst->Add(
- ctx.CloneWithoutTransform(expr),
- ctx.dst->MemberAccessor(buffer_name, it->second));
- }
+ if (auto* member = node->As<ast::StructMember>()) {
+ for (auto* attr : member->attributes) {
+ if (auto* builtin_attr = attr->As<ast::BuiltinAttribute>()) {
+ ast::Builtin builtin = builtin_attr->builtin;
+ if (builtin == ast::Builtin::kVertexIndex) {
+ auto* sem_mem = ctx.src->Sem().Get(member);
+ builtin_members.emplace(sem_mem, kFirstVertexName);
+ has_vertex_or_instance_index = true;
+ }
+ if (builtin == ast::Builtin::kInstanceIndex) {
+ auto* sem_mem = ctx.src->Sem().Get(member);
+ builtin_members.emplace(sem_mem, kFirstInstanceName);
+ has_vertex_or_instance_index = true;
+ }
+ }
}
- if (auto* access = sem->As<sem::StructMemberAccess>()) {
- auto it = builtin_members.find(access->Member());
- if (it != builtin_members.end()) {
- return ctx.dst->Add(
- ctx.CloneWithoutTransform(expr),
- ctx.dst->MemberAccessor(buffer_name, it->second));
- }
+ }
+ }
+
+ if (has_vertex_or_instance_index) {
+ // Add uniform buffer members and calculate byte offsets
+ ast::StructMemberList members;
+ members.push_back(ctx.dst->Member(kFirstVertexName, ctx.dst->ty.u32()));
+ members.push_back(ctx.dst->Member(kFirstInstanceName, ctx.dst->ty.u32()));
+ auto* struct_ = ctx.dst->Structure(ctx.dst->Sym(), std::move(members));
+
+ // Create a global to hold the uniform buffer
+ Symbol buffer_name = ctx.dst->Sym();
+ ctx.dst->Global(buffer_name, ctx.dst->ty.Of(struct_), ast::StorageClass::kUniform, nullptr,
+ ast::AttributeList{
+ ctx.dst->create<ast::BindingAttribute>(ub_binding),
+ ctx.dst->create<ast::GroupAttribute>(ub_group),
+ });
+
+ // Fix up all references to the builtins with the offsets
+ ctx.ReplaceAll([=, &ctx](const ast::Expression* expr) -> const ast::Expression* {
+ if (auto* sem = ctx.src->Sem().Get(expr)) {
+ if (auto* user = sem->As<sem::VariableUser>()) {
+ auto it = builtin_vars.find(user->Variable());
+ if (it != builtin_vars.end()) {
+ return ctx.dst->Add(ctx.CloneWithoutTransform(expr),
+ ctx.dst->MemberAccessor(buffer_name, it->second));
+ }
+ }
+ if (auto* access = sem->As<sem::StructMemberAccess>()) {
+ auto it = builtin_members.find(access->Member());
+ if (it != builtin_members.end()) {
+ return ctx.dst->Add(ctx.CloneWithoutTransform(expr),
+ ctx.dst->MemberAccessor(buffer_name, it->second));
+ }
+ }
}
- }
- // Not interested in this experssion. Just clone.
- return nullptr;
+ // Not interested in this experssion. Just clone.
+ return nullptr;
});
- }
+ }
- ctx.Clone();
+ ctx.Clone();
- outputs.Add<Data>(has_vertex_index, has_instance_index, vertex_index_offset,
- instance_index_offset);
+ outputs.Add<Data>(has_vertex_or_instance_index);
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/first_index_offset.h b/chromium/third_party/dawn/src/tint/transform/first_index_offset.h
index 43d003d4edb..04758cde8de 100644
--- a/chromium/third_party/dawn/src/tint/transform/first_index_offset.h
+++ b/chromium/third_party/dawn/src/tint/transform/first_index_offset.h
@@ -58,82 +58,68 @@ namespace tint::transform {
/// ```
///
class FirstIndexOffset final : public Castable<FirstIndexOffset, Transform> {
- public:
- /// BindingPoint is consumed by the FirstIndexOffset transform.
- /// BindingPoint specifies the binding point of the first index uniform
- /// buffer.
- struct BindingPoint final : public Castable<BindingPoint, transform::Data> {
- /// Constructor
- BindingPoint();
+ public:
+ /// BindingPoint is consumed by the FirstIndexOffset transform.
+ /// BindingPoint specifies the binding point of the first index uniform
+ /// buffer.
+ struct BindingPoint final : public Castable<BindingPoint, transform::Data> {
+ /// Constructor
+ BindingPoint();
+
+ /// Constructor
+ /// @param b the binding index
+ /// @param g the binding group
+ BindingPoint(uint32_t b, uint32_t g);
+
+ /// Destructor
+ ~BindingPoint() override;
+
+ /// `@binding()` for the first vertex / first instance uniform buffer
+ uint32_t binding = 0;
+ /// `@group()` for the first vertex / first instance uniform buffer
+ uint32_t group = 0;
+ };
+
+ /// Data is outputted by the FirstIndexOffset transform.
+ /// Data holds information about shader usage and constant buffer offsets.
+ struct Data final : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param has_vtx_or_inst_index True if the shader uses vertex_index or
+ /// instance_index
+ explicit Data(bool has_vtx_or_inst_index);
+
+ /// Copy constructor
+ Data(const Data&);
+
+ /// Destructor
+ ~Data() override;
+
+ /// True if the shader uses vertex_index
+ const bool has_vertex_or_instance_index;
+ };
/// Constructor
- /// @param b the binding index
- /// @param g the binding group
- BindingPoint(uint32_t b, uint32_t g);
-
- /// Destructor
- ~BindingPoint() override;
-
- /// `@binding()` for the first vertex / first instance uniform buffer
- uint32_t binding = 0;
- /// `@group()` for the first vertex / first instance uniform buffer
- uint32_t group = 0;
- };
-
- /// Data is outputted by the FirstIndexOffset transform.
- /// Data holds information about shader usage and constant buffer offsets.
- struct Data final : public Castable<Data, transform::Data> {
- /// Constructor
- /// @param has_vtx_index True if the shader uses vertex_index
- /// @param has_inst_index True if the shader uses instance_index
- /// @param first_vtx_offset Offset of first vertex into constant buffer
- /// @param first_inst_offset Offset of first instance into constant buffer
- Data(bool has_vtx_index,
- bool has_inst_index,
- uint32_t first_vtx_offset,
- uint32_t first_inst_offset);
-
- /// Copy constructor
- Data(const Data&);
-
+ FirstIndexOffset();
/// Destructor
- ~Data() override;
-
- /// True if the shader uses vertex_index
- const bool has_vertex_index;
- /// True if the shader uses instance_index
- const bool has_instance_index;
- /// Offset of first vertex into constant buffer
- const uint32_t first_vertex_offset;
- /// Offset of first instance into constant buffer
- const uint32_t first_instance_offset;
- };
-
- /// Constructor
- FirstIndexOffset();
- /// Destructor
- ~FirstIndexOffset() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- private:
- uint32_t binding_ = 0;
- uint32_t group_ = 0;
+ ~FirstIndexOffset() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ private:
+ uint32_t binding_ = 0;
+ uint32_t group_ = 0;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/first_index_offset_test.cc b/chromium/third_party/dawn/src/tint/transform/first_index_offset_test.cc
index ee914390ce7..c159261aa84 100644
--- a/chromium/third_party/dawn/src/tint/transform/first_index_offset_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/first_index_offset_test.cc
@@ -26,88 +26,86 @@ namespace {
using FirstIndexOffsetTest = TransformTest;
TEST_F(FirstIndexOffsetTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<FirstIndexOffset>(src));
+ EXPECT_FALSE(ShouldRun<FirstIndexOffset>(src));
}
TEST_F(FirstIndexOffsetTest, ShouldRunFragmentStage) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn entry() {
return;
}
)";
- EXPECT_FALSE(ShouldRun<FirstIndexOffset>(src));
+ EXPECT_FALSE(ShouldRun<FirstIndexOffset>(src));
}
TEST_F(FirstIndexOffsetTest, ShouldRunVertexStage) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- EXPECT_TRUE(ShouldRun<FirstIndexOffset>(src));
+ EXPECT_TRUE(ShouldRun<FirstIndexOffset>(src));
}
TEST_F(FirstIndexOffsetTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(0, 0);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(0, 0);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- EXPECT_EQ(data, nullptr);
+ EXPECT_EQ(data, nullptr);
}
TEST_F(FirstIndexOffsetTest, BasicVertexShader) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(0, 0);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(0, 0);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, false);
- EXPECT_EQ(data->has_instance_index, false);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, false);
}
TEST_F(FirstIndexOffsetTest, BasicModuleVertexIndex) {
- auto* src = R"(
+ auto* src = R"(
fn test(vert_idx : u32) -> u32 {
return vert_idx;
}
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
test(vert_idx);
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
+ first_instance_index : u32,
}
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
@@ -116,31 +114,28 @@ fn test(vert_idx : u32) -> u32 {
return vert_idx;
}
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
test((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, false);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleVertexIndex_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
test(vert_idx);
return vec4<f32>();
@@ -151,14 +146,15 @@ fn test(vert_idx : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
+ first_instance_index : u32,
}
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
test((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
@@ -169,36 +165,34 @@ fn test(vert_idx : u32) -> u32 {
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, false);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleInstanceIndex) {
- auto* src = R"(
+ auto* src = R"(
fn test(inst_idx : u32) -> u32 {
return inst_idx;
}
-@stage(vertex)
+@vertex
fn entry(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
test(inst_idx);
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
+ first_vertex_index : u32,
first_instance_index : u32,
}
@@ -208,31 +202,28 @@ fn test(inst_idx : u32) -> u32 {
return inst_idx;
}
-@stage(vertex)
+@vertex
fn entry(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
test((inst_idx + tint_symbol_1.first_instance_index));
return vec4<f32>();
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 7);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 7);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, false);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleInstanceIndex_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
test(inst_idx);
return vec4<f32>();
@@ -243,14 +234,15 @@ fn test(inst_idx : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
+ first_vertex_index : u32,
first_instance_index : u32,
}
@binding(1) @group(7) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(vertex)
+@vertex
fn entry(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
test((inst_idx + tint_symbol_1.first_instance_index));
return vec4<f32>();
@@ -261,23 +253,20 @@ fn test(inst_idx : u32) -> u32 {
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 7);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 7);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, false);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleBothIndex) {
- auto* src = R"(
+ auto* src = R"(
fn test(instance_idx : u32, vert_idx : u32) -> u32 {
return instance_idx + vert_idx;
}
@@ -287,14 +276,14 @@ struct Inputs {
@builtin(vertex_index) vert_idx : u32,
};
-@stage(vertex)
+@vertex
fn entry(inputs : Inputs) -> @builtin(position) vec4<f32> {
test(inputs.instance_idx, inputs.vert_idx);
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
first_instance_index : u32,
@@ -313,31 +302,28 @@ struct Inputs {
vert_idx : u32,
}
-@stage(vertex)
+@vertex
fn entry(inputs : Inputs) -> @builtin(position) vec4<f32> {
test((inputs.instance_idx + tint_symbol_1.first_instance_index), (inputs.vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 4u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleBothIndex_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry(inputs : Inputs) -> @builtin(position) vec4<f32> {
test(inputs.instance_idx, inputs.vert_idx);
return vec4<f32>();
@@ -353,7 +339,7 @@ fn test(instance_idx : u32, vert_idx : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
first_instance_index : u32,
@@ -361,7 +347,7 @@ struct tint_symbol {
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(vertex)
+@vertex
fn entry(inputs : Inputs) -> @builtin(position) vec4<f32> {
test((inputs.instance_idx + tint_symbol_1.first_instance_index), (inputs.vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
@@ -379,23 +365,20 @@ fn test(instance_idx : u32, vert_idx : u32) -> u32 {
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 4u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, NestedCalls) {
- auto* src = R"(
+ auto* src = R"(
fn func1(vert_idx : u32) -> u32 {
return vert_idx;
}
@@ -404,16 +387,17 @@ fn func2(vert_idx : u32) -> u32 {
return func1(vert_idx);
}
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func2(vert_idx);
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
+ first_instance_index : u32,
}
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
@@ -426,31 +410,28 @@ fn func2(vert_idx : u32) -> u32 {
return func1(vert_idx);
}
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func2((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, false);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, NestedCalls_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func2(vert_idx);
return vec4<f32>();
@@ -465,14 +446,15 @@ fn func1(vert_idx : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
+ first_instance_index : u32,
}
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func2((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
@@ -487,47 +469,44 @@ fn func1(vert_idx : u32) -> u32 {
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, false);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 0u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, MultipleEntryPoints) {
- auto* src = R"(
+ auto* src = R"(
fn func(i : u32) -> u32 {
return i;
}
-@stage(vertex)
+@vertex
fn entry_a(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func(vert_idx);
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_b(@builtin(vertex_index) vert_idx : u32, @builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(vert_idx + inst_idx);
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_c(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(inst_idx);
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
first_instance_index : u32,
@@ -539,55 +518,52 @@ fn func(i : u32) -> u32 {
return i;
}
-@stage(vertex)
+@vertex
fn entry_a(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_b(@builtin(vertex_index) vert_idx : u32, @builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(((vert_idx + tint_symbol_1.first_vertex_index) + (inst_idx + tint_symbol_1.first_instance_index)));
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_c(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func((inst_idx + tint_symbol_1.first_instance_index));
return vec4<f32>();
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 4u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, MultipleEntryPoints_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry_a(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func(vert_idx);
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_b(@builtin(vertex_index) vert_idx : u32, @builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(vert_idx + inst_idx);
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_c(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(inst_idx);
return vec4<f32>();
@@ -598,7 +574,7 @@ fn func(i : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol {
first_vertex_index : u32,
first_instance_index : u32,
@@ -606,19 +582,19 @@ struct tint_symbol {
@binding(1) @group(2) var<uniform> tint_symbol_1 : tint_symbol;
-@stage(vertex)
+@vertex
fn entry_a(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32> {
func((vert_idx + tint_symbol_1.first_vertex_index));
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_b(@builtin(vertex_index) vert_idx : u32, @builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func(((vert_idx + tint_symbol_1.first_vertex_index) + (inst_idx + tint_symbol_1.first_instance_index)));
return vec4<f32>();
}
-@stage(vertex)
+@vertex
fn entry_c(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32> {
func((inst_idx + tint_symbol_1.first_instance_index));
return vec4<f32>();
@@ -629,19 +605,16 @@ fn func(i : u32) -> u32 {
}
)";
- DataMap config;
- config.Add<FirstIndexOffset::BindingPoint>(1, 2);
- auto got = Run<FirstIndexOffset>(src, std::move(config));
+ DataMap config;
+ config.Add<FirstIndexOffset::BindingPoint>(1, 2);
+ auto got = Run<FirstIndexOffset>(src, std::move(config));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<FirstIndexOffset::Data>();
+ auto* data = got.data.Get<FirstIndexOffset::Data>();
- ASSERT_NE(data, nullptr);
- EXPECT_EQ(data->has_vertex_index, true);
- EXPECT_EQ(data->has_instance_index, true);
- EXPECT_EQ(data->first_vertex_offset, 0u);
- EXPECT_EQ(data->first_instance_offset, 4u);
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_constants.cc b/chromium/third_party/dawn/src/tint/transform/fold_constants.cc
deleted file mode 100644
index b814c5cdc80..00000000000
--- a/chromium/third_party/dawn/src/tint/transform/fold_constants.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/transform/fold_constants.h"
-
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "src/tint/program_builder.h"
-#include "src/tint/sem/call.h"
-#include "src/tint/sem/expression.h"
-#include "src/tint/sem/type_constructor.h"
-#include "src/tint/sem/type_conversion.h"
-
-TINT_INSTANTIATE_TYPEINFO(tint::transform::FoldConstants);
-
-namespace tint::transform {
-
-FoldConstants::FoldConstants() = default;
-
-FoldConstants::~FoldConstants() = default;
-
-void FoldConstants::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- ctx.ReplaceAll([&](const ast::Expression* expr) -> const ast::Expression* {
- auto* call = ctx.src->Sem().Get<sem::Call>(expr);
- if (!call) {
- return nullptr;
- }
-
- auto value = call->ConstantValue();
- if (!value.IsValid()) {
- return nullptr;
- }
-
- auto* ty = call->Type();
-
- if (!call->Target()->IsAnyOf<sem::TypeConversion, sem::TypeConstructor>()) {
- return nullptr;
- }
-
- // If original ctor expression had no init values, don't replace the
- // expression
- if (call->Arguments().empty()) {
- return nullptr;
- }
-
- if (auto* vec = ty->As<sem::Vector>()) {
- uint32_t vec_size = static_cast<uint32_t>(vec->Width());
-
- // We'd like to construct the new vector with the same number of
- // constructor args that the original node had, but after folding
- // constants, cases like the following are problematic:
- //
- // vec3<f32> = vec3<f32>(vec2<f32>, 1.0) // vec_size=3, ctor_size=2
- //
- // In this case, creating a vec3 with 2 args is invalid, so we should
- // create it with 3. So what we do is construct with vec_size args,
- // except if the original vector was single-value initialized, in
- // which case, we only construct with one arg again.
- uint32_t ctor_size = (call->Arguments().size() == 1) ? 1 : vec_size;
-
- ast::ExpressionList ctors;
- for (uint32_t i = 0; i < ctor_size; ++i) {
- value.WithScalarAt(
- i, [&](auto&& s) { ctors.emplace_back(ctx.dst->Expr(s)); });
- }
-
- auto* el_ty = CreateASTTypeFor(ctx, vec->type());
- return ctx.dst->vec(el_ty, vec_size, ctors);
- }
-
- if (ty->is_scalar()) {
- return value.WithScalarAt(0,
- [&](auto&& s) -> const ast::LiteralExpression* {
- return ctx.dst->Expr(s);
- });
- }
-
- return nullptr;
- });
-
- ctx.Clone();
-}
-
-} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_constants.h b/chromium/third_party/dawn/src/tint/transform/fold_constants.h
deleted file mode 100644
index fbe7abd7b12..00000000000
--- a/chromium/third_party/dawn/src/tint/transform/fold_constants.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_TINT_TRANSFORM_FOLD_CONSTANTS_H_
-#define SRC_TINT_TRANSFORM_FOLD_CONSTANTS_H_
-
-#include "src/tint/transform/transform.h"
-
-namespace tint::transform {
-
-/// FoldConstants transforms the AST by folding constant expressions
-class FoldConstants final : public Castable<FoldConstants, Transform> {
- public:
- /// Constructor
- FoldConstants();
-
- /// Destructor
- ~FoldConstants() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-};
-
-} // namespace tint::transform
-
-#endif // SRC_TINT_TRANSFORM_FOLD_CONSTANTS_H_
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_constants_test.cc b/chromium/third_party/dawn/src/tint/transform/fold_constants_test.cc
deleted file mode 100644
index d8121bc256d..00000000000
--- a/chromium/third_party/dawn/src/tint/transform/fold_constants_test.cc
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/tint/transform/fold_constants.h"
-
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "src/tint/transform/test_helper.h"
-
-namespace tint::transform {
-namespace {
-
-using FoldConstantsTest = TransformTest;
-
-TEST_F(FoldConstantsTest, Module_Scalar_NoConversion) {
- auto* src = R"(
-var<private> a : i32 = i32(123);
-var<private> b : u32 = u32(123u);
-var<private> c : f32 = f32(123.0);
-var<private> d : bool = bool(true);
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : i32 = 123;
-
-var<private> b : u32 = 123u;
-
-var<private> c : f32 = 123.0;
-
-var<private> d : bool = true;
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Scalar_Conversion) {
- auto* src = R"(
-var<private> a : i32 = i32(123.0);
-var<private> b : u32 = u32(123);
-var<private> c : f32 = f32(123u);
-var<private> d : bool = bool(123);
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : i32 = 123;
-
-var<private> b : u32 = 123u;
-
-var<private> c : f32 = 123.0;
-
-var<private> d : bool = true;
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Scalar_MultipleConversions) {
- auto* src = R"(
-var<private> a : i32 = i32(u32(f32(u32(i32(123.0)))));
-var<private> b : u32 = u32(i32(f32(i32(u32(123)))));
-var<private> c : f32 = f32(u32(i32(u32(f32(123u)))));
-var<private> d : bool = bool(i32(f32(i32(u32(123)))));
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : i32 = 123;
-
-var<private> b : u32 = 123u;
-
-var<private> c : f32 = 123.0;
-
-var<private> d : bool = true;
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Vector_NoConversion) {
- auto* src = R"(
-var<private> a : vec3<i32> = vec3<i32>(123);
-var<private> b : vec3<u32> = vec3<u32>(123u);
-var<private> c : vec3<f32> = vec3<f32>(123.0);
-var<private> d : vec3<bool> = vec3<bool>(true);
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : vec3<i32> = vec3<i32>(123);
-
-var<private> b : vec3<u32> = vec3<u32>(123u);
-
-var<private> c : vec3<f32> = vec3<f32>(123.0);
-
-var<private> d : vec3<bool> = vec3<bool>(true);
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Vector_Conversion) {
- auto* src = R"(
-var<private> a : vec3<i32> = vec3<i32>(vec3<f32>(123.0));
-var<private> b : vec3<u32> = vec3<u32>(vec3<i32>(123));
-var<private> c : vec3<f32> = vec3<f32>(vec3<u32>(123u));
-var<private> d : vec3<bool> = vec3<bool>(vec3<i32>(123));
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : vec3<i32> = vec3<i32>(123);
-
-var<private> b : vec3<u32> = vec3<u32>(123u);
-
-var<private> c : vec3<f32> = vec3<f32>(123.0);
-
-var<private> d : vec3<bool> = vec3<bool>(true);
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Vector_MultipleConversions) {
- auto* src = R"(
-var<private> a : vec3<i32> = vec3<i32>(vec3<u32>(vec3<f32>(vec3<u32>(u32(123.0)))));
-var<private> b : vec3<u32> = vec3<u32>(vec3<i32>(vec3<f32>(vec3<i32>(i32(123)))));
-var<private> c : vec3<f32> = vec3<f32>(vec3<u32>(vec3<i32>(vec3<u32>(u32(123u)))));
-var<private> d : vec3<bool> = vec3<bool>(vec3<i32>(vec3<f32>(vec3<i32>(i32(123)))));
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : vec3<i32> = vec3<i32>(123);
-
-var<private> b : vec3<u32> = vec3<u32>(123u);
-
-var<private> c : vec3<f32> = vec3<f32>(123.0);
-
-var<private> d : vec3<bool> = vec3<bool>(true);
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Module_Vector_MixedSizeConversions) {
- auto* src = R"(
-var<private> a : vec4<i32> = vec4<i32>(vec3<i32>(vec3<u32>(1u, 2u, 3u)), 4);
-var<private> b : vec4<i32> = vec4<i32>(vec2<i32>(vec2<u32>(1u, 2u)), vec2<i32>(4, 5));
-var<private> c : vec4<i32> = vec4<i32>(1, vec2<i32>(vec2<f32>(2.0, 3.0)), 4);
-var<private> d : vec4<i32> = vec4<i32>(1, 2, vec2<i32>(vec2<f32>(3.0, 4.0)));
-var<private> e : vec4<bool> = vec4<bool>(false, bool(f32(1.0)), vec2<bool>(vec2<i32>(0, i32(4u))));
-
-fn f() {
-}
-)";
-
- auto* expect = R"(
-var<private> a : vec4<i32> = vec4<i32>(1, 2, 3, 4);
-
-var<private> b : vec4<i32> = vec4<i32>(1, 2, 4, 5);
-
-var<private> c : vec4<i32> = vec4<i32>(1, 2, 3, 4);
-
-var<private> d : vec4<i32> = vec4<i32>(1, 2, 3, 4);
-
-var<private> e : vec4<bool> = vec4<bool>(false, true, false, true);
-
-fn f() {
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Scalar_NoConversion) {
- auto* src = R"(
-fn f() {
- var a : i32 = i32(123);
- var b : u32 = u32(123u);
- var c : f32 = f32(123.0);
- var d : bool = bool(true);
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : i32 = 123;
- var b : u32 = 123u;
- var c : f32 = 123.0;
- var d : bool = true;
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Scalar_Conversion) {
- auto* src = R"(
-fn f() {
- var a : i32 = i32(123.0);
- var b : u32 = u32(123);
- var c : f32 = f32(123u);
- var d : bool = bool(123);
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : i32 = 123;
- var b : u32 = 123u;
- var c : f32 = 123.0;
- var d : bool = true;
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Scalar_MultipleConversions) {
- auto* src = R"(
-fn f() {
- var a : i32 = i32(u32(f32(u32(i32(123.0)))));
- var b : u32 = u32(i32(f32(i32(u32(123)))));
- var c : f32 = f32(u32(i32(u32(f32(123u)))));
- var d : bool = bool(i32(f32(i32(u32(123)))));
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : i32 = 123;
- var b : u32 = 123u;
- var c : f32 = 123.0;
- var d : bool = true;
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Vector_NoConversion) {
- auto* src = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(123);
- var b : vec3<u32> = vec3<u32>(123u);
- var c : vec3<f32> = vec3<f32>(123.0);
- var d : vec3<bool> = vec3<bool>(true);
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(123);
- var b : vec3<u32> = vec3<u32>(123u);
- var c : vec3<f32> = vec3<f32>(123.0);
- var d : vec3<bool> = vec3<bool>(true);
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Vector_Conversion) {
- auto* src = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(vec3<f32>(123.0));
- var b : vec3<u32> = vec3<u32>(vec3<i32>(123));
- var c : vec3<f32> = vec3<f32>(vec3<u32>(123u));
- var d : vec3<bool> = vec3<bool>(vec3<i32>(123));
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(123);
- var b : vec3<u32> = vec3<u32>(123u);
- var c : vec3<f32> = vec3<f32>(123.0);
- var d : vec3<bool> = vec3<bool>(true);
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Vector_MultipleConversions) {
- auto* src = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(vec3<u32>(vec3<f32>(vec3<u32>(u32(123.0)))));
- var b : vec3<u32> = vec3<u32>(vec3<i32>(vec3<f32>(vec3<i32>(i32(123)))));
- var c : vec3<f32> = vec3<f32>(vec3<u32>(vec3<i32>(vec3<u32>(u32(123u)))));
- var d : vec3<bool> = vec3<bool>(vec3<i32>(vec3<f32>(vec3<i32>(i32(123)))));
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : vec3<i32> = vec3<i32>(123);
- var b : vec3<u32> = vec3<u32>(123u);
- var c : vec3<f32> = vec3<f32>(123.0);
- var d : vec3<bool> = vec3<bool>(true);
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Vector_MixedSizeConversions) {
- auto* src = R"(
-fn f() {
- var a : vec4<i32> = vec4<i32>(vec3<i32>(vec3<u32>(1u, 2u, 3u)), 4);
- var b : vec4<i32> = vec4<i32>(vec2<i32>(vec2<u32>(1u, 2u)), vec2<i32>(4, 5));
- var c : vec4<i32> = vec4<i32>(1, vec2<i32>(vec2<f32>(2.0, 3.0)), 4);
- var d : vec4<i32> = vec4<i32>(1, 2, vec2<i32>(vec2<f32>(3.0, 4.0)));
- var e : vec4<bool> = vec4<bool>(false, bool(f32(1.0)), vec2<bool>(vec2<i32>(0, i32(4u))));
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : vec4<i32> = vec4<i32>(1, 2, 3, 4);
- var b : vec4<i32> = vec4<i32>(1, 2, 4, 5);
- var c : vec4<i32> = vec4<i32>(1, 2, 3, 4);
- var d : vec4<i32> = vec4<i32>(1, 2, 3, 4);
- var e : vec4<bool> = vec4<bool>(false, true, false, true);
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(FoldConstantsTest, Function_Vector_ConstantWithNonConstant) {
- auto* src = R"(
-fn f() {
- var a : f32 = f32();
- var b : vec2<f32> = vec2<f32>(f32(i32(1)), a);
-}
-)";
-
- auto* expect = R"(
-fn f() {
- var a : f32 = f32();
- var b : vec2<f32> = vec2<f32>(1.0, a);
-}
-)";
-
- auto got = Run<FoldConstants>(src);
-
- EXPECT_EQ(expect, str(got));
-}
-
-} // namespace
-} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.cc b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.cc
index a0f02a8f004..5bcdaa49000 100644
--- a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.cc
+++ b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.cc
@@ -27,19 +27,19 @@ namespace tint::transform {
namespace {
const ast::VariableDeclStatement* AsTrivialLetDecl(const ast::Statement* stmt) {
- auto* var_decl = stmt->As<ast::VariableDeclStatement>();
- if (!var_decl) {
- return nullptr;
- }
- auto* var = var_decl->variable;
- if (!var->is_const) {
- return nullptr;
- }
- auto* ctor = var->constructor;
- if (!IsAnyOf<ast::IdentifierExpression, ast::LiteralExpression>(ctor)) {
- return nullptr;
- }
- return var_decl;
+ auto* var_decl = stmt->As<ast::VariableDeclStatement>();
+ if (!var_decl) {
+ return nullptr;
+ }
+ auto* var = var_decl->variable;
+ if (!var->is_const) {
+ return nullptr;
+ }
+ auto* ctor = var->constructor;
+ if (!IsAnyOf<ast::IdentifierExpression, ast::LiteralExpression>(ctor)) {
+ return nullptr;
+ }
+ return var_decl;
}
} // namespace
@@ -48,43 +48,41 @@ FoldTrivialSingleUseLets::FoldTrivialSingleUseLets() = default;
FoldTrivialSingleUseLets::~FoldTrivialSingleUseLets() = default;
-void FoldTrivialSingleUseLets::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* block = node->As<ast::BlockStatement>()) {
- auto& stmts = block->statements;
- for (size_t stmt_idx = 0; stmt_idx < stmts.size(); stmt_idx++) {
- auto* stmt = stmts[stmt_idx];
- if (auto* let_decl = AsTrivialLetDecl(stmt)) {
- auto* let = let_decl->variable;
- auto* sem_let = ctx.src->Sem().Get(let);
- auto& users = sem_let->Users();
- if (users.size() != 1) {
- continue; // Does not have a single user.
- }
+void FoldTrivialSingleUseLets::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* block = node->As<ast::BlockStatement>()) {
+ auto& stmts = block->statements;
+ for (size_t stmt_idx = 0; stmt_idx < stmts.size(); stmt_idx++) {
+ auto* stmt = stmts[stmt_idx];
+ if (auto* let_decl = AsTrivialLetDecl(stmt)) {
+ auto* let = let_decl->variable;
+ auto* sem_let = ctx.src->Sem().Get(let);
+ auto& users = sem_let->Users();
+ if (users.size() != 1) {
+ continue; // Does not have a single user.
+ }
- auto* user = users[0];
- auto* user_stmt = user->Stmt()->Declaration();
+ auto* user = users[0];
+ auto* user_stmt = user->Stmt()->Declaration();
- for (size_t i = stmt_idx; i < stmts.size(); i++) {
- if (user_stmt == stmts[i]) {
- auto* user_expr = user->Declaration();
- ctx.Remove(stmts, let_decl);
- ctx.Replace(user_expr, ctx.Clone(let->constructor));
- }
- if (!AsTrivialLetDecl(stmts[i])) {
- // Stop if we hit a statement that isn't the single use of the
- // let, and isn't a let itself.
- break;
+ for (size_t i = stmt_idx; i < stmts.size(); i++) {
+ if (user_stmt == stmts[i]) {
+ auto* user_expr = user->Declaration();
+ ctx.Remove(stmts, let_decl);
+ ctx.Replace(user_expr, ctx.Clone(let->constructor));
+ }
+ if (!AsTrivialLetDecl(stmts[i])) {
+ // Stop if we hit a statement that isn't the single use of the
+ // let, and isn't a let itself.
+ break;
+ }
+ }
+ }
}
- }
}
- }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.h b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.h
index d343b764829..4036f029780 100644
--- a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.h
+++ b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets.h
@@ -33,25 +33,22 @@ namespace tint::transform {
/// single usage.
/// These rules prevent any hoisting of the let that may affect execution
/// behaviour.
-class FoldTrivialSingleUseLets final
- : public Castable<FoldTrivialSingleUseLets, Transform> {
- public:
- /// Constructor
- FoldTrivialSingleUseLets();
-
- /// Destructor
- ~FoldTrivialSingleUseLets() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class FoldTrivialSingleUseLets final : public Castable<FoldTrivialSingleUseLets, Transform> {
+ public:
+ /// Constructor
+ FoldTrivialSingleUseLets();
+
+ /// Destructor
+ ~FoldTrivialSingleUseLets() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets_test.cc b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets_test.cc
index e08c19117fc..00159e9e93f 100644
--- a/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/fold_trivial_single_use_lets_test.cc
@@ -22,35 +22,35 @@ namespace {
using FoldTrivialSingleUseLetsTest = TransformTest;
TEST_F(FoldTrivialSingleUseLetsTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, Single) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
_ = x;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
_ = 1;
}
)";
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, Multiple) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
let y = 2;
@@ -59,19 +59,19 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
_ = ((1 + 2) + 3);
}
)";
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, Chained) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
let y = x;
@@ -80,19 +80,19 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
_ = 1;
}
)";
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, NoFold_NonTrivialLet) {
- auto* src = R"(
+ auto* src = R"(
fn function_with_posssible_side_effect() -> i32 {
return 1;
}
@@ -104,15 +104,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, NoFold_NonTrivialLet_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
let y = function_with_posssible_side_effect();
@@ -124,15 +124,15 @@ fn function_with_posssible_side_effect() -> i32 {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, NoFold_UseInSubBlock) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
{
@@ -141,30 +141,30 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, NoFold_MultipleUses) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let x = 1;
_ = (x + x);
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(FoldTrivialSingleUseLetsTest, NoFold_Shadowing) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var y = 1;
let x = y;
@@ -175,11 +175,11 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<FoldTrivialSingleUseLets>(src);
+ auto got = Run<FoldTrivialSingleUseLets>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.cc b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.cc
index 14d5edbfec9..8fff0a8c1be 100644
--- a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.cc
+++ b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.cc
@@ -25,50 +25,48 @@ ForLoopToLoop::ForLoopToLoop() = default;
ForLoopToLoop::~ForLoopToLoop() = default;
bool ForLoopToLoop::ShouldRun(const Program* program, const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (node->Is<ast::ForLoopStatement>()) {
- return true;
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (node->Is<ast::ForLoopStatement>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
void ForLoopToLoop::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- ctx.ReplaceAll(
- [&](const ast::ForLoopStatement* for_loop) -> const ast::Statement* {
+ ctx.ReplaceAll([&](const ast::ForLoopStatement* for_loop) -> const ast::Statement* {
ast::StatementList stmts;
if (auto* cond = for_loop->condition) {
- // !condition
- auto* not_cond = ctx.dst->create<ast::UnaryOpExpression>(
- ast::UnaryOp::kNot, ctx.Clone(cond));
+ // !condition
+ auto* not_cond =
+ ctx.dst->create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, ctx.Clone(cond));
- // { break; }
- auto* break_body =
- ctx.dst->Block(ctx.dst->create<ast::BreakStatement>());
+ // { break; }
+ auto* break_body = ctx.dst->Block(ctx.dst->create<ast::BreakStatement>());
- // if (!condition) { break; }
- stmts.emplace_back(ctx.dst->If(not_cond, break_body));
+ // if (!condition) { break; }
+ stmts.emplace_back(ctx.dst->If(not_cond, break_body));
}
for (auto* stmt : for_loop->body->statements) {
- stmts.emplace_back(ctx.Clone(stmt));
+ stmts.emplace_back(ctx.Clone(stmt));
}
const ast::BlockStatement* continuing = nullptr;
if (auto* cont = for_loop->continuing) {
- continuing = ctx.dst->Block(ctx.Clone(cont));
+ continuing = ctx.dst->Block(ctx.Clone(cont));
}
auto* body = ctx.dst->Block(stmts);
auto* loop = ctx.dst->create<ast::LoopStatement>(body, continuing);
if (auto* init = for_loop->initializer) {
- return ctx.dst->Block(ctx.Clone(init), loop);
+ return ctx.dst->Block(ctx.Clone(init), loop);
}
return loop;
- });
+ });
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.h b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.h
index 54286d4c288..5ab690a567b 100644
--- a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.h
+++ b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop.h
@@ -22,29 +22,26 @@ namespace tint::transform {
/// ForLoopToLoop is a Transform that converts a for-loop statement into a loop
/// statement. This is required by the SPIR-V writer.
class ForLoopToLoop final : public Castable<ForLoopToLoop, Transform> {
- public:
- /// Constructor
- ForLoopToLoop();
-
- /// Destructor
- ~ForLoopToLoop() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ ForLoopToLoop();
+
+ /// Destructor
+ ~ForLoopToLoop() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop_test.cc b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop_test.cc
index 84ffa984612..172e1fc15db 100644
--- a/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/for_loop_to_loop_test.cc
@@ -22,13 +22,13 @@ namespace {
using ForLoopToLoopTest = TransformTest;
TEST_F(ForLoopToLoopTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<ForLoopToLoop>(src));
+ EXPECT_FALSE(ShouldRun<ForLoopToLoop>(src));
}
TEST_F(ForLoopToLoopTest, ShouldRunHasForLoop) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (;;) {
break;
@@ -36,21 +36,21 @@ fn f() {
}
)";
- EXPECT_TRUE(ShouldRun<ForLoopToLoop>(src));
+ EXPECT_TRUE(ShouldRun<ForLoopToLoop>(src));
}
TEST_F(ForLoopToLoopTest, EmptyModule) {
- auto* src = "";
- auto* expect = src;
+ auto* src = "";
+ auto* expect = src;
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test an empty for loop.
TEST_F(ForLoopToLoopTest, Empty) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (;;) {
break;
@@ -58,7 +58,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
loop {
break;
@@ -66,14 +66,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop with non-empty body.
TEST_F(ForLoopToLoopTest, Body) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (;;) {
discard;
@@ -81,7 +81,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
loop {
discard;
@@ -89,14 +89,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop declaring a variable in the initializer statement.
TEST_F(ForLoopToLoopTest, InitializerStatementDecl) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (var i: i32;;) {
break;
@@ -104,7 +104,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
{
var i : i32;
@@ -115,15 +115,15 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop declaring and initializing a variable in the initializer
// statement.
TEST_F(ForLoopToLoopTest, InitializerStatementDeclEqual) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (var i: i32 = 0;;) {
break;
@@ -131,7 +131,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
{
var i : i32 = 0;
@@ -142,14 +142,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop declaring a const variable in the initializer statement.
TEST_F(ForLoopToLoopTest, InitializerStatementConstDecl) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (let i: i32 = 0;;) {
break;
@@ -157,7 +157,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
{
let i : i32 = 0;
@@ -168,14 +168,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop assigning a variable in the initializer statement.
TEST_F(ForLoopToLoopTest, InitializerStatementAssignment) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i: i32;
for (i = 0;;) {
@@ -184,7 +184,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
{
@@ -196,14 +196,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop calling a function in the initializer statement.
TEST_F(ForLoopToLoopTest, InitializerStatementFuncCall) {
- auto* src = R"(
+ auto* src = R"(
fn a(x : i32, y : i32) {
}
@@ -216,7 +216,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(x : i32, y : i32) {
}
@@ -232,21 +232,21 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop with a break condition
TEST_F(ForLoopToLoopTest, BreakCondition) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (; 0 == 1;) {
}
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
loop {
if (!((0 == 1))) {
@@ -256,14 +256,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop assigning a variable in the continuing statement.
TEST_F(ForLoopToLoopTest, ContinuingAssignment) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var x: i32;
for (;;x = 2) {
@@ -272,7 +272,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var x : i32;
loop {
@@ -285,14 +285,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop calling a function in the continuing statement.
TEST_F(ForLoopToLoopTest, ContinuingFuncCall) {
- auto* src = R"(
+ auto* src = R"(
fn a(x : i32, y : i32) {
}
@@ -305,7 +305,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(x : i32, y : i32) {
}
@@ -322,14 +322,14 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test a for loop with all statements non-empty.
TEST_F(ForLoopToLoopTest, All) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var a : i32;
for(var i : i32 = 0; i < 4; i = i + 1) {
@@ -341,7 +341,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var a : i32;
{
@@ -363,9 +363,9 @@ fn f() {
}
)";
- auto got = Run<ForLoopToLoop>(src);
+ auto got = Run<ForLoopToLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.cc b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.cc
index e888c017adc..d6cdded9a06 100644
--- a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.cc
+++ b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.cc
@@ -22,7 +22,7 @@
#include "src/tint/program_builder.h"
#include "src/tint/sem/expression.h"
#include "src/tint/sem/member_accessor_expression.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/variable.h"
#include "src/tint/transform/simplify_pointers.h"
@@ -34,189 +34,166 @@ namespace tint::transform {
/// Private implementation of LocalizeStructArrayAssignment transform
class LocalizeStructArrayAssignment::State {
- private:
- CloneContext& ctx;
- ProgramBuilder& b;
-
- /// Returns true if `expr` contains an index accessor expression to a
- /// structure member of array type.
- bool ContainsStructArrayIndex(const ast::Expression* expr) {
- bool result = false;
- ast::TraverseExpressions(
- expr, b.Diagnostics(), [&](const ast::IndexAccessorExpression* ia) {
- // Indexing using a runtime value?
- auto* idx_sem = ctx.src->Sem().Get(ia->index);
- if (!idx_sem->ConstantValue().IsValid()) {
- // Indexing a member access expr?
- if (auto* ma = ia->object->As<ast::MemberAccessorExpression>()) {
- // That accesses an array?
- if (ctx.src->TypeOf(ma)->UnwrapRef()->Is<sem::Array>()) {
- result = true;
- return ast::TraverseAction::Stop;
- }
- }
- }
- return ast::TraverseAction::Descend;
- });
-
- return result;
- }
-
- // Returns the type and storage class of the originating variable of the lhs
- // of the assignment statement.
- // See https://www.w3.org/TR/WGSL/#originating-variable-section
- std::pair<const sem::Type*, ast::StorageClass>
- GetOriginatingTypeAndStorageClass(
- const ast::AssignmentStatement* assign_stmt) {
- // Get first IdentifierExpr from lhs of assignment, which should resolve to
- // the pointer or reference of the originating variable of the assignment.
- // TraverseExpressions traverses left to right, and this code depends on the
- // fact that for an assignment statement, the variable will be the left-most
- // expression.
- // TODO(crbug.com/tint/1341): do this in the Resolver, setting the
- // originating variable on sem::Expression.
- const ast::IdentifierExpression* ident = nullptr;
- ast::TraverseExpressions(assign_stmt->lhs, b.Diagnostics(),
- [&](const ast::IdentifierExpression* id) {
- ident = id;
- return ast::TraverseAction::Stop;
- });
- auto* sem_var_user = ctx.src->Sem().Get<sem::VariableUser>(ident);
- if (!sem_var_user) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Expected to find variable of lhs of assignment statement";
- return {};
+ private:
+ CloneContext& ctx;
+ ProgramBuilder& b;
+
+ /// Returns true if `expr` contains an index accessor expression to a
+ /// structure member of array type.
+ bool ContainsStructArrayIndex(const ast::Expression* expr) {
+ bool result = false;
+ ast::TraverseExpressions(
+ expr, b.Diagnostics(), [&](const ast::IndexAccessorExpression* ia) {
+ // Indexing using a runtime value?
+ auto* idx_sem = ctx.src->Sem().Get(ia->index);
+ if (!idx_sem->ConstantValue().IsValid()) {
+ // Indexing a member access expr?
+ if (auto* ma = ia->object->As<ast::MemberAccessorExpression>()) {
+ // That accesses an array?
+ if (ctx.src->TypeOf(ma)->UnwrapRef()->Is<sem::Array>()) {
+ result = true;
+ return ast::TraverseAction::Stop;
+ }
+ }
+ }
+ return ast::TraverseAction::Descend;
+ });
+
+ return result;
}
- auto* var = sem_var_user->Variable();
- if (auto* ptr = var->Type()->As<sem::Pointer>()) {
- return {ptr->StoreType(), ptr->StorageClass()};
+ // Returns the type and storage class of the originating variable of the lhs
+ // of the assignment statement.
+ // See https://www.w3.org/TR/WGSL/#originating-variable-section
+ std::pair<const sem::Type*, ast::StorageClass> GetOriginatingTypeAndStorageClass(
+ const ast::AssignmentStatement* assign_stmt) {
+ auto* source_var = ctx.src->Sem().Get(assign_stmt->lhs)->SourceVariable();
+ if (!source_var) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "Unable to determine originating variable for lhs of assignment "
+ "statement";
+ return {};
+ }
+
+ auto* type = source_var->Type();
+ if (auto* ref = type->As<sem::Reference>()) {
+ return {ref->StoreType(), ref->StorageClass()};
+ } else if (auto* ptr = type->As<sem::Pointer>()) {
+ return {ptr->StoreType(), ptr->StorageClass()};
+ }
+
+ TINT_ICE(Transform, b.Diagnostics())
+ << "Expecting to find variable of type pointer or reference on lhs "
+ "of assignment statement";
+ return {};
}
- auto* ref = var->Type()->As<sem::Reference>();
- if (!ref) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Expecting to find variable of type pointer or reference on lhs "
- "of assignment statement";
- return {};
- }
+ public:
+ /// Constructor
+ /// @param ctx_in the CloneContext primed with the input program and
+ /// ProgramBuilder
+ explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst) {}
+
+ /// Runs the transform
+ void Run() {
+ struct Shared {
+ bool process_nested_nodes = false;
+ ast::StatementList insert_before_stmts;
+ ast::StatementList insert_after_stmts;
+ } s;
+
+ ctx.ReplaceAll([&](const ast::AssignmentStatement* assign_stmt) -> const ast::Statement* {
+ // Process if it's an assignment statement to a dynamically indexed array
+ // within a struct on a function or private storage variable. This
+ // specific use-case is what FXC fails to compile with:
+ // error X3500: array reference cannot be used as an l-value; not natively
+ // addressable
+ if (!ContainsStructArrayIndex(assign_stmt->lhs)) {
+ return nullptr;
+ }
+ auto og = GetOriginatingTypeAndStorageClass(assign_stmt);
+ if (!(og.first->Is<sem::Struct>() && (og.second == ast::StorageClass::kFunction ||
+ og.second == ast::StorageClass::kPrivate))) {
+ return nullptr;
+ }
+
+ // Reset shared state for this assignment statement
+ s = Shared{};
- return {ref->StoreType(), ref->StorageClass()};
- }
-
- public:
- /// Constructor
- /// @param ctx_in the CloneContext primed with the input program and
- /// ProgramBuilder
- explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst) {}
-
- /// Runs the transform
- void Run() {
- struct Shared {
- bool process_nested_nodes = false;
- ast::StatementList insert_before_stmts;
- ast::StatementList insert_after_stmts;
- } s;
-
- ctx.ReplaceAll([&](const ast::AssignmentStatement* assign_stmt)
- -> const ast::Statement* {
- // Process if it's an assignment statement to a dynamically indexed array
- // within a struct on a function or private storage variable. This
- // specific use-case is what FXC fails to compile with:
- // error X3500: array reference cannot be used as an l-value; not natively
- // addressable
- if (!ContainsStructArrayIndex(assign_stmt->lhs)) {
- return nullptr;
- }
- auto og = GetOriginatingTypeAndStorageClass(assign_stmt);
- if (!(og.first->Is<sem::Struct>() &&
- (og.second == ast::StorageClass::kFunction ||
- og.second == ast::StorageClass::kPrivate))) {
- return nullptr;
- }
-
- // Reset shared state for this assignment statement
- s = Shared{};
-
- const ast::Expression* new_lhs = nullptr;
- {
- TINT_SCOPED_ASSIGNMENT(s.process_nested_nodes, true);
- new_lhs = ctx.Clone(assign_stmt->lhs);
- }
-
- auto* new_assign_stmt = b.Assign(new_lhs, ctx.Clone(assign_stmt->rhs));
-
- // Combine insert_before_stmts + new_assign_stmt + insert_after_stmts into
- // a block and return it
- ast::StatementList stmts = std::move(s.insert_before_stmts);
- stmts.reserve(1 + s.insert_after_stmts.size());
- stmts.emplace_back(new_assign_stmt);
- stmts.insert(stmts.end(), s.insert_after_stmts.begin(),
- s.insert_after_stmts.end());
-
- return b.Block(std::move(stmts));
- });
-
- ctx.ReplaceAll([&](const ast::IndexAccessorExpression* index_access)
- -> const ast::Expression* {
- if (!s.process_nested_nodes) {
- return nullptr;
- }
-
- // Indexing a member access expr?
- auto* mem_access =
- index_access->object->As<ast::MemberAccessorExpression>();
- if (!mem_access) {
- return nullptr;
- }
-
- // Process any nested IndexAccessorExpressions
- mem_access = ctx.Clone(mem_access);
-
- // Store the address of the member access into a let as we need to read
- // the value twice e.g. let tint_symbol = &(s.a1);
- auto mem_access_ptr = b.Sym();
- s.insert_before_stmts.push_back(
- b.Decl(b.Const(mem_access_ptr, nullptr, b.AddressOf(mem_access))));
-
- // Disable further transforms when cloning
- TINT_SCOPED_ASSIGNMENT(s.process_nested_nodes, false);
-
- // Copy entire array out of struct into local temp var
- // e.g. var tint_symbol_1 = *(tint_symbol);
- auto tmp_var = b.Sym();
- s.insert_before_stmts.push_back(
- b.Decl(b.Var(tmp_var, nullptr, b.Deref(mem_access_ptr))));
-
- // Replace input index_access with a clone of itself, but with its
- // .object replaced by the new temp var. This is returned from this
- // function to modify the original assignment statement. e.g.
- // tint_symbol_1[uniforms.i]
- auto* new_index_access =
- b.IndexAccessor(tmp_var, ctx.Clone(index_access->index));
-
- // Assign temp var back to array
- // e.g. *(tint_symbol) = tint_symbol_1;
- auto* assign_rhs_to_temp = b.Assign(b.Deref(mem_access_ptr), tmp_var);
- s.insert_after_stmts.insert(s.insert_after_stmts.begin(),
- assign_rhs_to_temp); // push_front
-
- return new_index_access;
- });
-
- ctx.Clone();
- }
+ const ast::Expression* new_lhs = nullptr;
+ {
+ TINT_SCOPED_ASSIGNMENT(s.process_nested_nodes, true);
+ new_lhs = ctx.Clone(assign_stmt->lhs);
+ }
+
+ auto* new_assign_stmt = b.Assign(new_lhs, ctx.Clone(assign_stmt->rhs));
+
+ // Combine insert_before_stmts + new_assign_stmt + insert_after_stmts into
+ // a block and return it
+ ast::StatementList stmts = std::move(s.insert_before_stmts);
+ stmts.reserve(1 + s.insert_after_stmts.size());
+ stmts.emplace_back(new_assign_stmt);
+ stmts.insert(stmts.end(), s.insert_after_stmts.begin(), s.insert_after_stmts.end());
+
+ return b.Block(std::move(stmts));
+ });
+
+ ctx.ReplaceAll(
+ [&](const ast::IndexAccessorExpression* index_access) -> const ast::Expression* {
+ if (!s.process_nested_nodes) {
+ return nullptr;
+ }
+
+ // Indexing a member access expr?
+ auto* mem_access = index_access->object->As<ast::MemberAccessorExpression>();
+ if (!mem_access) {
+ return nullptr;
+ }
+
+ // Process any nested IndexAccessorExpressions
+ mem_access = ctx.Clone(mem_access);
+
+ // Store the address of the member access into a let as we need to read
+ // the value twice e.g. let tint_symbol = &(s.a1);
+ auto mem_access_ptr = b.Sym();
+ s.insert_before_stmts.push_back(
+ b.Decl(b.Let(mem_access_ptr, nullptr, b.AddressOf(mem_access))));
+
+ // Disable further transforms when cloning
+ TINT_SCOPED_ASSIGNMENT(s.process_nested_nodes, false);
+
+ // Copy entire array out of struct into local temp var
+ // e.g. var tint_symbol_1 = *(tint_symbol);
+ auto tmp_var = b.Sym();
+ s.insert_before_stmts.push_back(
+ b.Decl(b.Var(tmp_var, nullptr, b.Deref(mem_access_ptr))));
+
+ // Replace input index_access with a clone of itself, but with its
+ // .object replaced by the new temp var. This is returned from this
+ // function to modify the original assignment statement. e.g.
+ // tint_symbol_1[uniforms.i]
+ auto* new_index_access = b.IndexAccessor(tmp_var, ctx.Clone(index_access->index));
+
+ // Assign temp var back to array
+ // e.g. *(tint_symbol) = tint_symbol_1;
+ auto* assign_rhs_to_temp = b.Assign(b.Deref(mem_access_ptr), tmp_var);
+ s.insert_after_stmts.insert(s.insert_after_stmts.begin(),
+ assign_rhs_to_temp); // push_front
+
+ return new_index_access;
+ });
+
+ ctx.Clone();
+ }
};
LocalizeStructArrayAssignment::LocalizeStructArrayAssignment() = default;
LocalizeStructArrayAssignment::~LocalizeStructArrayAssignment() = default;
-void LocalizeStructArrayAssignment::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state(ctx);
- state.Run();
+void LocalizeStructArrayAssignment::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state(ctx);
+ state.Run();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.h b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.h
index 2c45203e1b3..129c8491bb0 100644
--- a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.h
+++ b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment.h
@@ -27,28 +27,25 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * SimplifyPointers
-class LocalizeStructArrayAssignment
- : public Castable<LocalizeStructArrayAssignment, Transform> {
- public:
- /// Constructor
- LocalizeStructArrayAssignment();
-
- /// Destructor
- ~LocalizeStructArrayAssignment() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- private:
- class State;
+class LocalizeStructArrayAssignment : public Castable<LocalizeStructArrayAssignment, Transform> {
+ public:
+ /// Constructor
+ LocalizeStructArrayAssignment();
+
+ /// Destructor
+ ~LocalizeStructArrayAssignment() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ private:
+ class State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment_test.cc b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment_test.cc
index d2027851303..e85a6000295 100644
--- a/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/localize_struct_array_assignment_test.cc
@@ -24,15 +24,14 @@ namespace {
using LocalizeStructArrayAssignmentTest = TransformTest;
TEST_F(LocalizeStructArrayAssignmentTest, EmptyModule) {
- auto* src = R"()";
- auto* expect = src;
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto* src = R"()";
+ auto* expect = src;
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructArray) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
};
@@ -47,7 +46,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -55,7 +54,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
}
@@ -70,7 +69,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -83,14 +82,13 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructArray_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -112,8 +110,8 @@ struct Uniforms {
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -140,13 +138,12 @@ struct Uniforms {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructStructArray) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
};
@@ -165,7 +162,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -173,7 +170,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
}
@@ -192,7 +189,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -205,14 +202,13 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructStructArray_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -238,8 +234,8 @@ struct Uniforms {
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -270,13 +266,12 @@ struct Uniforms {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructArrayArray) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -292,7 +287,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -300,7 +295,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -316,7 +311,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -329,13 +324,12 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructArrayStruct) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
};
@@ -354,7 +348,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -362,7 +356,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
}
@@ -381,7 +375,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -394,13 +388,12 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, StructArrayStructArray) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -420,7 +413,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -428,7 +421,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -448,7 +441,7 @@ struct OuterS {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -464,13 +457,12 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, IndexingWithSideEffectFunc) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -496,7 +488,7 @@ fn getNextIndex() -> u32 {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -504,7 +496,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
j : u32,
@@ -531,7 +523,7 @@ fn getNextIndex() -> u32 {
@group(1) @binding(4) var<uniform> uniforms : Uniforms;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -547,15 +539,13 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(LocalizeStructArrayAssignmentTest,
- IndexingWithSideEffectFunc_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(LocalizeStructArrayAssignmentTest, IndexingWithSideEffectFunc_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -588,8 +578,8 @@ struct InnerS {
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s : OuterS;
@@ -631,13 +621,12 @@ struct InnerS {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, ViaPointerArg) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
};
@@ -654,14 +643,14 @@ fn f(p : ptr<function, OuterS>) {
(*p).a1[uniforms.i] = v;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var s1 : OuterS;
f(&s1);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
}
@@ -686,21 +675,20 @@ fn f(p : ptr<function, OuterS>) {
}
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var s1 : OuterS;
f(&(s1));
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, ViaPointerArg_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
var s1 : OuterS;
f(&s1);
@@ -725,8 +713,8 @@ struct Uniforms {
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
var s1 : OuterS;
f(&(s1));
@@ -757,13 +745,12 @@ struct Uniforms {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, ViaPointerVar) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
};
@@ -782,7 +769,7 @@ fn f(p : ptr<function, InnerS>, v : InnerS) {
*(p) = v;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -791,7 +778,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Uniforms {
i : u32,
}
@@ -810,7 +797,7 @@ fn f(p : ptr<function, InnerS>, v : InnerS) {
*(p) = v;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var v : InnerS;
var s1 : OuterS;
@@ -824,13 +811,12 @@ fn main() {
}
)";
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LocalizeStructArrayAssignmentTest, VectorAssignment) {
- auto* src = R"(
+ auto* src = R"(
struct Uniforms {
i : u32,
}
@@ -845,7 +831,7 @@ fn f(i : u32) -> u32 {
return (i + 1u);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var s1 : OuterS;
var v : vec3<f32>;
@@ -854,13 +840,12 @@ fn main() {
}
)";
- // Transform does nothing here as we're not actually assigning to the array in
- // the struct.
- auto* expect = src;
+ // Transform does nothing here as we're not actually assigning to the array in
+ // the struct.
+ auto* expect = src;
- auto got =
- Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<Unshadow, SimplifyPointers, LocalizeStructArrayAssignment>(src);
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.cc b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.cc
index d01ac48e26a..3e0a4b5412e 100644
--- a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.cc
+++ b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.cc
@@ -29,24 +29,22 @@ namespace tint::transform {
namespace {
bool IsBlockWithSingleBreak(const ast::BlockStatement* block) {
- if (block->statements.size() != 1) {
- return false;
- }
- return block->statements[0]->Is<ast::BreakStatement>();
+ if (block->statements.size() != 1) {
+ return false;
+ }
+ return block->statements[0]->Is<ast::BreakStatement>();
}
-bool IsVarUsedByStmt(const sem::Info& sem,
- const ast::Variable* var,
- const ast::Statement* stmt) {
- auto* var_sem = sem.Get(var);
- for (auto* user : var_sem->Users()) {
- if (auto* s = user->Stmt()) {
- if (s->Declaration() == stmt) {
- return true;
- }
+bool IsVarUsedByStmt(const sem::Info& sem, const ast::Variable* var, const ast::Statement* stmt) {
+ auto* var_sem = sem.Get(var);
+ for (auto* user : var_sem->Users()) {
+ if (auto* s = user->Stmt()) {
+ if (s->Declaration() == stmt) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
} // namespace
@@ -56,88 +54,83 @@ LoopToForLoop::LoopToForLoop() = default;
LoopToForLoop::~LoopToForLoop() = default;
bool LoopToForLoop::ShouldRun(const Program* program, const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (node->Is<ast::LoopStatement>()) {
- return true;
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (node->Is<ast::LoopStatement>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
void LoopToForLoop::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- ctx.ReplaceAll([&](const ast::LoopStatement* loop) -> const ast::Statement* {
- // For loop condition is taken from the first statement in the loop.
- // This requires an if-statement with either:
- // * A true block with no else statements, and the true block contains a
- // single 'break' statement.
- // * An empty true block with a single, no-condition else statement
- // containing a single 'break' statement.
- // Examples:
- // loop { if (condition) { break; } ... }
- // loop { if (condition) {} else { break; } ... }
- auto& stmts = loop->body->statements;
- if (stmts.empty()) {
- return nullptr;
- }
- auto* if_stmt = stmts[0]->As<ast::IfStatement>();
- if (!if_stmt) {
- return nullptr;
- }
-
- bool negate_condition = false;
- if (IsBlockWithSingleBreak(if_stmt->body) &&
- if_stmt->else_statements.empty()) {
- negate_condition = true;
- } else if (if_stmt->body->Empty() && if_stmt->else_statements.size() == 1 &&
- if_stmt->else_statements[0]->condition == nullptr &&
- IsBlockWithSingleBreak(if_stmt->else_statements[0]->body)) {
- negate_condition = false;
- } else {
- return nullptr;
- }
-
- // The continuing block must be empty or contain a single, assignment or
- // function call statement.
- const ast::Statement* continuing = nullptr;
- if (auto* loop_cont = loop->continuing) {
- if (loop_cont->statements.size() != 1) {
- return nullptr;
- }
-
- continuing = loop_cont->statements[0];
- if (!continuing
- ->IsAnyOf<ast::AssignmentStatement, ast::CallStatement>()) {
- return nullptr;
- }
-
- // And the continuing statement must not use any of the variables declared
- // in the loop body.
- for (auto* stmt : loop->body->statements) {
- if (auto* var_decl = stmt->As<ast::VariableDeclStatement>()) {
- if (IsVarUsedByStmt(ctx.src->Sem(), var_decl->variable, continuing)) {
+ ctx.ReplaceAll([&](const ast::LoopStatement* loop) -> const ast::Statement* {
+ // For loop condition is taken from the first statement in the loop.
+ // This requires an if-statement with either:
+ // * A true block with no else statements, and the true block contains a
+ // single 'break' statement.
+ // * An empty true block with a single, no-condition else statement
+ // containing a single 'break' statement.
+ // Examples:
+ // loop { if (condition) { break; } ... }
+ // loop { if (condition) {} else { break; } ... }
+ auto& stmts = loop->body->statements;
+ if (stmts.empty()) {
+ return nullptr;
+ }
+ auto* if_stmt = stmts[0]->As<ast::IfStatement>();
+ if (!if_stmt) {
+ return nullptr;
+ }
+ auto* else_stmt = tint::As<ast::BlockStatement>(if_stmt->else_statement);
+
+ bool negate_condition = false;
+ if (IsBlockWithSingleBreak(if_stmt->body) && if_stmt->else_statement == nullptr) {
+ negate_condition = true;
+ } else if (if_stmt->body->Empty() && else_stmt && IsBlockWithSingleBreak(else_stmt)) {
+ negate_condition = false;
+ } else {
return nullptr;
- }
}
- }
- continuing = ctx.Clone(continuing);
- }
+ // The continuing block must be empty or contain a single, assignment or
+ // function call statement.
+ const ast::Statement* continuing = nullptr;
+ if (auto* loop_cont = loop->continuing) {
+ if (loop_cont->statements.size() != 1) {
+ return nullptr;
+ }
+
+ continuing = loop_cont->statements[0];
+ if (!continuing->IsAnyOf<ast::AssignmentStatement, ast::CallStatement>()) {
+ return nullptr;
+ }
+
+ // And the continuing statement must not use any of the variables declared
+ // in the loop body.
+ for (auto* stmt : loop->body->statements) {
+ if (auto* var_decl = stmt->As<ast::VariableDeclStatement>()) {
+ if (IsVarUsedByStmt(ctx.src->Sem(), var_decl->variable, continuing)) {
+ return nullptr;
+ }
+ }
+ }
+
+ continuing = ctx.Clone(continuing);
+ }
- auto* condition = ctx.Clone(if_stmt->condition);
- if (negate_condition) {
- condition = ctx.dst->create<ast::UnaryOpExpression>(ast::UnaryOp::kNot,
- condition);
- }
+ auto* condition = ctx.Clone(if_stmt->condition);
+ if (negate_condition) {
+ condition = ctx.dst->create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, condition);
+ }
- ast::Statement* initializer = nullptr;
+ ast::Statement* initializer = nullptr;
- ctx.Remove(loop->body->statements, if_stmt);
- auto* body = ctx.Clone(loop->body);
- return ctx.dst->create<ast::ForLoopStatement>(initializer, condition,
- continuing, body);
- });
+ ctx.Remove(loop->body->statements, if_stmt);
+ auto* body = ctx.Clone(loop->body);
+ return ctx.dst->create<ast::ForLoopStatement>(initializer, condition, continuing, body);
+ });
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.h b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.h
index b6482aebed9..0623d79b85f 100644
--- a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.h
+++ b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop.h
@@ -22,29 +22,26 @@ namespace tint::transform {
/// LoopToForLoop is a Transform that attempts to convert WGSL `loop {}`
/// statements into a for-loop statement.
class LoopToForLoop : public Castable<LoopToForLoop, Transform> {
- public:
- /// Constructor
- LoopToForLoop();
-
- /// Destructor
- ~LoopToForLoop() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ LoopToForLoop();
+
+ /// Destructor
+ ~LoopToForLoop() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop_test.cc b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop_test.cc
index c34b0e84b0c..e3d7eccbef1 100644
--- a/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/loop_to_for_loop_test.cc
@@ -22,13 +22,13 @@ namespace {
using LoopToForLoopTest = TransformTest;
TEST_F(LoopToForLoopTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<LoopToForLoop>(src));
+ EXPECT_FALSE(ShouldRun<LoopToForLoop>(src));
}
TEST_F(LoopToForLoopTest, ShouldRunHasForLoop) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
loop {
break;
@@ -36,20 +36,20 @@ fn f() {
}
)";
- EXPECT_TRUE(ShouldRun<LoopToForLoop>(src));
+ EXPECT_TRUE(ShouldRun<LoopToForLoop>(src));
}
TEST_F(LoopToForLoopTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, IfBreak) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -67,7 +67,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
i = 0;
@@ -77,13 +77,13 @@ fn f() {
}
)";
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, IfElseBreak) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -102,7 +102,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
i = 0;
@@ -112,13 +112,13 @@ fn f() {
}
)";
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, Nested) {
- auto* src = R"(
+ auto* src = R"(
let N = 16u;
fn f() {
@@ -150,7 +150,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
let N = 16u;
fn f() {
@@ -167,13 +167,13 @@ fn f() {
}
)";
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, NoTransform_IfMultipleStmts) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -191,15 +191,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, NoTransform_IfElseMultipleStmts) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -218,15 +218,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, NoTransform_ContinuingIsCompound) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -244,15 +244,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, NoTransform_ContinuingMultipleStmts) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -270,15 +270,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(LoopToForLoopTest, NoTransform_ContinuingUsesVarDeclInLoopBody) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
i = 0;
@@ -295,11 +295,63 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<LoopToForLoop>(src);
+ auto got = Run<LoopToForLoop>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_F(LoopToForLoopTest, NoTransform_IfBreakWithElse) {
+ auto* src = R"(
+fn f() {
+ var i : i32;
+ i = 0;
+ loop {
+ if ((i > 15)) {
+ break;
+ } else {
+ }
+ _ = 123;
+
+ continuing {
+ i = (i + 1);
+ }
+ }
+}
+)";
+
+ auto* expect = src;
+
+ auto got = Run<LoopToForLoop>(src);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_F(LoopToForLoopTest, NoTransform_IfBreakWithElseIf) {
+ auto* src = R"(
+fn f() {
+ var i : i32;
+ i = 0;
+ loop {
+ if ((i > 15)) {
+ break;
+ } else if (true) {
+ }
+ _ = 123;
+
+ continuing {
+ i = (i + 1);
+ }
+ }
+}
+)";
+
+ auto* expect = src;
+
+ auto got = Run<LoopToForLoop>(src);
+
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/manager.cc b/chromium/third_party/dawn/src/tint/transform/manager.cc
index a52f17531ac..e5f7682ead8 100644
--- a/chromium/third_party/dawn/src/tint/transform/manager.cc
+++ b/chromium/third_party/dawn/src/tint/transform/manager.cc
@@ -32,53 +32,47 @@ Manager::Manager() = default;
Manager::~Manager() = default;
Output Manager::Run(const Program* program, const DataMap& data) const {
- const Program* in = program;
+ const Program* in = program;
#if TINT_PRINT_PROGRAM_FOR_EACH_TRANSFORM
- auto print_program = [&](const char* msg, const Transform* transform) {
- auto wgsl = Program::printer(in);
- std::cout << "---------------------------------------------------------"
- << std::endl;
- std::cout << "-- " << msg << " " << transform->TypeInfo().name << ":"
- << std::endl;
- std::cout << "---------------------------------------------------------"
- << std::endl;
- std::cout << wgsl << std::endl;
- std::cout << "---------------------------------------------------------"
- << std::endl
- << std::endl;
- };
+ auto print_program = [&](const char* msg, const Transform* transform) {
+ auto wgsl = Program::printer(in);
+ std::cout << "---------------------------------------------------------" << std::endl;
+ std::cout << "-- " << msg << " " << transform->TypeInfo().name << ":" << std::endl;
+ std::cout << "---------------------------------------------------------" << std::endl;
+ std::cout << wgsl << std::endl;
+ std::cout << "---------------------------------------------------------" << std::endl
+ << std::endl;
+ };
#endif
- Output out;
- for (const auto& transform : transforms_) {
- if (!transform->ShouldRun(in, data)) {
- TINT_IF_PRINT_PROGRAM(std::cout << "Skipping "
- << transform->TypeInfo().name);
- continue;
- }
- TINT_IF_PRINT_PROGRAM(print_program("Input to", transform.get()));
+ Output out;
+ for (const auto& transform : transforms_) {
+ if (!transform->ShouldRun(in, data)) {
+ TINT_IF_PRINT_PROGRAM(std::cout << "Skipping " << transform->TypeInfo().name << std::endl);
+ continue;
+ }
+ TINT_IF_PRINT_PROGRAM(print_program("Input to", transform.get()));
- auto res = transform->Run(in, data);
- out.program = std::move(res.program);
- out.data.Add(std::move(res.data));
- in = &out.program;
- if (!in->IsValid()) {
- TINT_IF_PRINT_PROGRAM(
- print_program("Invalid output of", transform.get()));
- return out;
- }
+ auto res = transform->Run(in, data);
+ out.program = std::move(res.program);
+ out.data.Add(std::move(res.data));
+ in = &out.program;
+ if (!in->IsValid()) {
+ TINT_IF_PRINT_PROGRAM(print_program("Invalid output of", transform.get()));
+ return out;
+ }
- if (transform == transforms_.back()) {
- TINT_IF_PRINT_PROGRAM(print_program("Output of", transform.get()));
+ if (transform == transforms_.back()) {
+ TINT_IF_PRINT_PROGRAM(print_program("Output of", transform.get()));
+ }
}
- }
- if (program == in) {
- out.program = program->Clone();
- }
+ if (program == in) {
+ out.program = program->Clone();
+ }
- return out;
+ return out;
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/manager.h b/chromium/third_party/dawn/src/tint/transform/manager.h
index fb614d3920b..9f5c6bcf218 100644
--- a/chromium/third_party/dawn/src/tint/transform/manager.h
+++ b/chromium/third_party/dawn/src/tint/transform/manager.h
@@ -28,33 +28,33 @@ namespace tint::transform {
/// If any inner transform fails the manager will return immediately and
/// the error can be retrieved with the Output's diagnostics.
class Manager : public Castable<Manager, Transform> {
- public:
- /// Constructor
- Manager();
- ~Manager() override;
-
- /// Add pass to the manager
- /// @param transform the transform to append
- void append(std::unique_ptr<Transform> transform) {
- transforms_.push_back(std::move(transform));
- }
-
- /// Add pass to the manager of type `T`, constructed with the provided
- /// arguments.
- /// @param args the arguments to forward to the `T` constructor
- template <typename T, typename... ARGS>
- void Add(ARGS&&... args) {
- transforms_.emplace_back(std::make_unique<T>(std::forward<ARGS>(args)...));
- }
-
- /// Runs the transforms on `program`, returning the transformation result.
- /// @param program the source program to transform
- /// @param data optional extra transform-specific input data
- /// @returns the transformed program and diagnostics
- Output Run(const Program* program, const DataMap& data = {}) const override;
-
- private:
- std::vector<std::unique_ptr<Transform>> transforms_;
+ public:
+ /// Constructor
+ Manager();
+ ~Manager() override;
+
+ /// Add pass to the manager
+ /// @param transform the transform to append
+ void append(std::unique_ptr<Transform> transform) {
+ transforms_.push_back(std::move(transform));
+ }
+
+ /// Add pass to the manager of type `T`, constructed with the provided
+ /// arguments.
+ /// @param args the arguments to forward to the `T` constructor
+ template <typename T, typename... ARGS>
+ void Add(ARGS&&... args) {
+ transforms_.emplace_back(std::make_unique<T>(std::forward<ARGS>(args)...));
+ }
+
+ /// Runs the transforms on `program`, returning the transformation result.
+ /// @param program the source program to transform
+ /// @param data optional extra transform-specific input data
+ /// @returns the transformed program and diagnostics
+ Output Run(const Program* program, const DataMap& data = {}) const override;
+
+ private:
+ std::vector<std::unique_ptr<Transform>> transforms_;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.cc b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.cc
index b510d7d1dfb..22bcd5c1bc1 100644
--- a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.cc
+++ b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.cc
@@ -32,366 +32,350 @@ namespace tint::transform {
namespace {
// Returns `true` if `type` is or contains a matrix type.
bool ContainsMatrix(const sem::Type* type) {
- type = type->UnwrapRef();
- if (type->Is<sem::Matrix>()) {
- return true;
- } else if (auto* ary = type->As<sem::Array>()) {
- return ContainsMatrix(ary->ElemType());
- } else if (auto* str = type->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- if (ContainsMatrix(member->Type())) {
+ type = type->UnwrapRef();
+ if (type->Is<sem::Matrix>()) {
return true;
- }
+ } else if (auto* ary = type->As<sem::Array>()) {
+ return ContainsMatrix(ary->ElemType());
+ } else if (auto* str = type->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ if (ContainsMatrix(member->Type())) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
} // namespace
/// State holds the current transform state.
struct ModuleScopeVarToEntryPointParam::State {
- /// The clone context.
- CloneContext& ctx;
-
- /// Constructor
- /// @param context the clone context
- explicit State(CloneContext& context) : ctx(context) {}
-
- /// Clone any struct types that are contained in `ty` (including `ty` itself),
- /// and add it to the global declarations now, so that they precede new global
- /// declarations that need to reference them.
- /// @param ty the type to clone
- void CloneStructTypes(const sem::Type* ty) {
- if (auto* str = ty->As<sem::Struct>()) {
- if (!cloned_structs_.emplace(str).second) {
- // The struct has already been cloned.
- return;
- }
-
- // Recurse into members.
- for (auto* member : str->Members()) {
- CloneStructTypes(member->Type());
- }
-
- // Clone the struct and add it to the global declaration list.
- // Remove the old declaration.
- auto* ast_str = str->Declaration();
- ctx.dst->AST().AddTypeDecl(ctx.Clone(ast_str));
- ctx.Remove(ctx.src->AST().GlobalDeclarations(), ast_str);
- } else if (auto* arr = ty->As<sem::Array>()) {
- CloneStructTypes(arr->ElemType());
- }
- }
-
- /// Process the module.
- void Process() {
- // Predetermine the list of function calls that need to be replaced.
- using CallList = std::vector<const ast::CallExpression*>;
- std::unordered_map<const ast::Function*, CallList> calls_to_replace;
-
- std::vector<const ast::Function*> functions_to_process;
-
- // Build a list of functions that transitively reference any module-scope
- // variables.
- for (auto* func_ast : ctx.src->AST().Functions()) {
- auto* func_sem = ctx.src->Sem().Get(func_ast);
-
- bool needs_processing = false;
- for (auto* var : func_sem->TransitivelyReferencedGlobals()) {
- if (var->StorageClass() != ast::StorageClass::kNone) {
- needs_processing = true;
- break;
- }
- }
- if (needs_processing) {
- functions_to_process.push_back(func_ast);
-
- // Find all of the calls to this function that will need to be replaced.
- for (auto* call : func_sem->CallSites()) {
- calls_to_replace[call->Stmt()->Function()->Declaration()].push_back(
- call->Declaration());
- }
- }
- }
+ /// The clone context.
+ CloneContext& ctx;
+
+ /// Constructor
+ /// @param context the clone context
+ explicit State(CloneContext& context) : ctx(context) {}
+
+ /// Clone any struct types that are contained in `ty` (including `ty` itself),
+ /// and add it to the global declarations now, so that they precede new global
+ /// declarations that need to reference them.
+ /// @param ty the type to clone
+ void CloneStructTypes(const sem::Type* ty) {
+ if (auto* str = ty->As<sem::Struct>()) {
+ if (!cloned_structs_.emplace(str).second) {
+ // The struct has already been cloned.
+ return;
+ }
- // Build a list of `&ident` expressions. We'll use this later to avoid
- // generating expressions of the form `&*ident`, which break WGSL validation
- // rules when this expression is passed to a function.
- // TODO(jrprice): We should add support for bidirectional SEM tree traversal
- // so that we can do this on the fly instead.
- std::unordered_map<const ast::IdentifierExpression*,
- const ast::UnaryOpExpression*>
- ident_to_address_of;
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- auto* address_of = node->As<ast::UnaryOpExpression>();
- if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
- continue;
- }
- if (auto* ident = address_of->expr->As<ast::IdentifierExpression>()) {
- ident_to_address_of[ident] = address_of;
- }
- }
+ // Recurse into members.
+ for (auto* member : str->Members()) {
+ CloneStructTypes(member->Type());
+ }
- for (auto* func_ast : functions_to_process) {
- auto* func_sem = ctx.src->Sem().Get(func_ast);
- bool is_entry_point = func_ast->IsEntryPoint();
-
- // Map module-scope variables onto their replacement.
- struct NewVar {
- Symbol symbol;
- bool is_pointer;
- bool is_wrapped;
- };
- const char* kWrappedArrayMemberName = "arr";
- std::unordered_map<const sem::Variable*, NewVar> var_to_newvar;
-
- // We aggregate all workgroup variables into a struct to avoid hitting
- // MSL's limit for threadgroup memory arguments.
- Symbol workgroup_parameter_symbol;
- ast::StructMemberList workgroup_parameter_members;
- auto workgroup_param = [&]() {
- if (!workgroup_parameter_symbol.IsValid()) {
- workgroup_parameter_symbol = ctx.dst->Sym();
- }
- return workgroup_parameter_symbol;
- };
-
- for (auto* var : func_sem->TransitivelyReferencedGlobals()) {
- auto sc = var->StorageClass();
- auto* ty = var->Type()->UnwrapRef();
- if (sc == ast::StorageClass::kNone) {
- continue;
- }
- if (sc != ast::StorageClass::kPrivate &&
- sc != ast::StorageClass::kStorage &&
- sc != ast::StorageClass::kUniform &&
- sc != ast::StorageClass::kUniformConstant &&
- sc != ast::StorageClass::kWorkgroup) {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "unhandled module-scope storage class (" << sc << ")";
+ // Clone the struct and add it to the global declaration list.
+ // Remove the old declaration.
+ auto* ast_str = str->Declaration();
+ ctx.dst->AST().AddTypeDecl(ctx.Clone(ast_str));
+ ctx.Remove(ctx.src->AST().GlobalDeclarations(), ast_str);
+ } else if (auto* arr = ty->As<sem::Array>()) {
+ CloneStructTypes(arr->ElemType());
}
+ }
- // This is the symbol for the variable that replaces the module-scope
- // var.
- auto new_var_symbol = ctx.dst->Sym();
-
- // Helper to create an AST node for the store type of the variable.
- auto store_type = [&]() { return CreateASTTypeFor(ctx, ty); };
-
- // Track whether the new variable is a pointer or not.
- bool is_pointer = false;
-
- // Track whether the new variable was wrapped in a struct or not.
- bool is_wrapped = false;
-
- if (is_entry_point) {
- if (var->Type()->UnwrapRef()->is_handle()) {
- // For a texture or sampler variable, redeclare it as an entry point
- // parameter. Disable entry point parameter validation.
- auto* disable_validation =
- ctx.dst->Disable(ast::DisabledValidation::kEntryPointParameter);
- auto attrs = ctx.Clone(var->Declaration()->attributes);
- attrs.push_back(disable_validation);
- auto* param = ctx.dst->Param(new_var_symbol, store_type(), attrs);
- ctx.InsertFront(func_ast->params, param);
- } else if (sc == ast::StorageClass::kStorage ||
- sc == ast::StorageClass::kUniform) {
- // Variables into the Storage and Uniform storage classes are
- // redeclared as entry point parameters with a pointer type.
- auto attributes = ctx.Clone(var->Declaration()->attributes);
- attributes.push_back(ctx.dst->Disable(
- ast::DisabledValidation::kEntryPointParameter));
- attributes.push_back(
- ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
-
- auto* param_type = store_type();
- if (auto* arr = ty->As<sem::Array>();
- arr && arr->IsRuntimeSized()) {
- // Wrap runtime-sized arrays in structures, so that we can declare
- // pointers to them. Ideally we'd just emit the array itself as a
- // pointer, but this is not representable in Tint's AST.
- CloneStructTypes(ty);
- auto* wrapper = ctx.dst->Structure(
- ctx.dst->Sym(),
- {ctx.dst->Member(kWrappedArrayMemberName, param_type)});
- param_type = ctx.dst->ty.Of(wrapper);
- is_wrapped = true;
+ /// Process the module.
+ void Process() {
+ // Predetermine the list of function calls that need to be replaced.
+ using CallList = std::vector<const ast::CallExpression*>;
+ std::unordered_map<const ast::Function*, CallList> calls_to_replace;
+
+ std::vector<const ast::Function*> functions_to_process;
+
+ // Build a list of functions that transitively reference any module-scope
+ // variables.
+ for (auto* func_ast : ctx.src->AST().Functions()) {
+ auto* func_sem = ctx.src->Sem().Get(func_ast);
+
+ bool needs_processing = false;
+ for (auto* var : func_sem->TransitivelyReferencedGlobals()) {
+ if (var->StorageClass() != ast::StorageClass::kNone) {
+ needs_processing = true;
+ break;
+ }
+ }
+ if (needs_processing) {
+ functions_to_process.push_back(func_ast);
+
+ // Find all of the calls to this function that will need to be replaced.
+ for (auto* call : func_sem->CallSites()) {
+ calls_to_replace[call->Stmt()->Function()->Declaration()].push_back(
+ call->Declaration());
+ }
}
-
- param_type = ctx.dst->ty.pointer(
- param_type, sc, var->Declaration()->declared_access);
- auto* param =
- ctx.dst->Param(new_var_symbol, param_type, attributes);
- ctx.InsertFront(func_ast->params, param);
- is_pointer = true;
- } else if (sc == ast::StorageClass::kWorkgroup &&
- ContainsMatrix(var->Type())) {
- // Due to a bug in the MSL compiler, we use a threadgroup memory
- // argument for any workgroup allocation that contains a matrix.
- // See crbug.com/tint/938.
- // TODO(jrprice): Do this for all other workgroup variables too.
-
- // Create a member in the workgroup parameter struct.
- auto member = ctx.Clone(var->Declaration()->symbol);
- workgroup_parameter_members.push_back(
- ctx.dst->Member(member, store_type()));
- CloneStructTypes(var->Type()->UnwrapRef());
-
- // Create a function-scope variable that is a pointer to the member.
- auto* member_ptr = ctx.dst->AddressOf(ctx.dst->MemberAccessor(
- ctx.dst->Deref(workgroup_param()), member));
- auto* local_var =
- ctx.dst->Const(new_var_symbol,
- ctx.dst->ty.pointer(
- store_type(), ast::StorageClass::kWorkgroup),
- member_ptr);
- ctx.InsertFront(func_ast->body->statements,
- ctx.dst->Decl(local_var));
- is_pointer = true;
- } else {
- // Variables in the Private and Workgroup storage classes are
- // redeclared at function scope. Disable storage class validation on
- // this variable.
- auto* disable_validation =
- ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass);
- auto* constructor = ctx.Clone(var->Declaration()->constructor);
- auto* local_var =
- ctx.dst->Var(new_var_symbol, store_type(), sc, constructor,
- ast::AttributeList{disable_validation});
- ctx.InsertFront(func_ast->body->statements,
- ctx.dst->Decl(local_var));
- }
- } else {
- // For a regular function, redeclare the variable as a parameter.
- // Use a pointer for non-handle types.
- auto* param_type = store_type();
- ast::AttributeList attributes;
- if (!var->Type()->UnwrapRef()->is_handle()) {
- param_type = ctx.dst->ty.pointer(
- param_type, sc, var->Declaration()->declared_access);
- is_pointer = true;
-
- // Disable validation of the parameter's storage class and of
- // arguments passed it.
- attributes.push_back(
- ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
- attributes.push_back(ctx.dst->Disable(
- ast::DisabledValidation::kIgnoreInvalidPointerArgument));
- }
- ctx.InsertBack(
- func_ast->params,
- ctx.dst->Param(new_var_symbol, param_type, attributes));
}
- // Replace all uses of the module-scope variable.
- // For non-entry points, dereference non-handle pointer parameters.
- for (auto* user : var->Users()) {
- if (user->Stmt()->Function()->Declaration() == func_ast) {
- const ast::Expression* expr = ctx.dst->Expr(new_var_symbol);
- if (is_pointer) {
- // If this identifier is used by an address-of operator, just
- // remove the address-of instead of adding a deref, since we
- // already have a pointer.
- auto* ident =
- user->Declaration()->As<ast::IdentifierExpression>();
- if (ident_to_address_of.count(ident)) {
- ctx.Replace(ident_to_address_of[ident], expr);
+ // Build a list of `&ident` expressions. We'll use this later to avoid
+ // generating expressions of the form `&*ident`, which break WGSL validation
+ // rules when this expression is passed to a function.
+ // TODO(jrprice): We should add support for bidirectional SEM tree traversal
+ // so that we can do this on the fly instead.
+ std::unordered_map<const ast::IdentifierExpression*, const ast::UnaryOpExpression*>
+ ident_to_address_of;
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ auto* address_of = node->As<ast::UnaryOpExpression>();
+ if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
continue;
- }
-
- expr = ctx.dst->Deref(expr);
}
- if (is_wrapped) {
- // Get the member from the wrapper structure.
- expr = ctx.dst->MemberAccessor(expr, kWrappedArrayMemberName);
+ if (auto* ident = address_of->expr->As<ast::IdentifierExpression>()) {
+ ident_to_address_of[ident] = address_of;
}
- ctx.Replace(user->Declaration(), expr);
- }
}
- var_to_newvar[var] = {new_var_symbol, is_pointer, is_wrapped};
- }
-
- if (!workgroup_parameter_members.empty()) {
- // Create the workgroup memory parameter.
- // The parameter is a struct that contains members for each workgroup
- // variable.
- auto* str = ctx.dst->Structure(ctx.dst->Sym(),
- std::move(workgroup_parameter_members));
- auto* param_type = ctx.dst->ty.pointer(ctx.dst->ty.Of(str),
- ast::StorageClass::kWorkgroup);
- auto* disable_validation =
- ctx.dst->Disable(ast::DisabledValidation::kEntryPointParameter);
- auto* param =
- ctx.dst->Param(workgroup_param(), param_type, {disable_validation});
- ctx.InsertFront(func_ast->params, param);
- }
-
- // Pass the variables as pointers to any functions that need them.
- for (auto* call : calls_to_replace[func_ast]) {
- auto* target =
- ctx.src->AST().Functions().Find(call->target.name->symbol);
- auto* target_sem = ctx.src->Sem().Get(target);
-
- // Add new arguments for any variables that are needed by the callee.
- // For entry points, pass non-handle types as pointers.
- for (auto* target_var : target_sem->TransitivelyReferencedGlobals()) {
- auto sc = target_var->StorageClass();
- if (sc == ast::StorageClass::kNone) {
- continue;
- }
-
- auto new_var = var_to_newvar[target_var];
- bool is_handle = target_var->Type()->UnwrapRef()->is_handle();
- const ast::Expression* arg = ctx.dst->Expr(new_var.symbol);
- if (new_var.is_wrapped) {
- // The variable is wrapped in a struct, so we need to pass a pointer
- // to the struct member instead.
- arg = ctx.dst->AddressOf(ctx.dst->MemberAccessor(
- ctx.dst->Deref(arg), kWrappedArrayMemberName));
- } else if (is_entry_point && !is_handle && !new_var.is_pointer) {
- // We need to pass a pointer and we don't already have one, so take
- // the address of the new variable.
- arg = ctx.dst->AddressOf(arg);
- }
- ctx.InsertBack(call->args, arg);
+ for (auto* func_ast : functions_to_process) {
+ auto* func_sem = ctx.src->Sem().Get(func_ast);
+ bool is_entry_point = func_ast->IsEntryPoint();
+
+ // Map module-scope variables onto their replacement.
+ struct NewVar {
+ Symbol symbol;
+ bool is_pointer;
+ bool is_wrapped;
+ };
+ const char* kWrappedArrayMemberName = "arr";
+ std::unordered_map<const sem::Variable*, NewVar> var_to_newvar;
+
+ // We aggregate all workgroup variables into a struct to avoid hitting
+ // MSL's limit for threadgroup memory arguments.
+ Symbol workgroup_parameter_symbol;
+ ast::StructMemberList workgroup_parameter_members;
+ auto workgroup_param = [&]() {
+ if (!workgroup_parameter_symbol.IsValid()) {
+ workgroup_parameter_symbol = ctx.dst->Sym();
+ }
+ return workgroup_parameter_symbol;
+ };
+
+ for (auto* var : func_sem->TransitivelyReferencedGlobals()) {
+ auto sc = var->StorageClass();
+ auto* ty = var->Type()->UnwrapRef();
+ if (sc == ast::StorageClass::kNone) {
+ continue;
+ }
+ if (sc != ast::StorageClass::kPrivate && sc != ast::StorageClass::kStorage &&
+ sc != ast::StorageClass::kUniform && sc != ast::StorageClass::kHandle &&
+ sc != ast::StorageClass::kWorkgroup) {
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "unhandled module-scope storage class (" << sc << ")";
+ }
+
+ // This is the symbol for the variable that replaces the module-scope
+ // var.
+ auto new_var_symbol = ctx.dst->Sym();
+
+ // Helper to create an AST node for the store type of the variable.
+ auto store_type = [&]() { return CreateASTTypeFor(ctx, ty); };
+
+ // Track whether the new variable is a pointer or not.
+ bool is_pointer = false;
+
+ // Track whether the new variable was wrapped in a struct or not.
+ bool is_wrapped = false;
+
+ if (is_entry_point) {
+ if (var->Type()->UnwrapRef()->is_handle()) {
+ // For a texture or sampler variable, redeclare it as an entry point
+ // parameter. Disable entry point parameter validation.
+ auto* disable_validation =
+ ctx.dst->Disable(ast::DisabledValidation::kEntryPointParameter);
+ auto attrs = ctx.Clone(var->Declaration()->attributes);
+ attrs.push_back(disable_validation);
+ auto* param = ctx.dst->Param(new_var_symbol, store_type(), attrs);
+ ctx.InsertFront(func_ast->params, param);
+ } else if (sc == ast::StorageClass::kStorage ||
+ sc == ast::StorageClass::kUniform) {
+ // Variables into the Storage and Uniform storage classes are
+ // redeclared as entry point parameters with a pointer type.
+ auto attributes = ctx.Clone(var->Declaration()->attributes);
+ attributes.push_back(
+ ctx.dst->Disable(ast::DisabledValidation::kEntryPointParameter));
+ attributes.push_back(
+ ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
+
+ auto* param_type = store_type();
+ if (auto* arr = ty->As<sem::Array>(); arr && arr->IsRuntimeSized()) {
+ // Wrap runtime-sized arrays in structures, so that we can declare
+ // pointers to them. Ideally we'd just emit the array itself as a
+ // pointer, but this is not representable in Tint's AST.
+ CloneStructTypes(ty);
+ auto* wrapper = ctx.dst->Structure(
+ ctx.dst->Sym(),
+ {ctx.dst->Member(kWrappedArrayMemberName, param_type)});
+ param_type = ctx.dst->ty.Of(wrapper);
+ is_wrapped = true;
+ }
+
+ param_type = ctx.dst->ty.pointer(param_type, sc,
+ var->Declaration()->declared_access);
+ auto* param = ctx.dst->Param(new_var_symbol, param_type, attributes);
+ ctx.InsertFront(func_ast->params, param);
+ is_pointer = true;
+ } else if (sc == ast::StorageClass::kWorkgroup && ContainsMatrix(var->Type())) {
+ // Due to a bug in the MSL compiler, we use a threadgroup memory
+ // argument for any workgroup allocation that contains a matrix.
+ // See crbug.com/tint/938.
+ // TODO(jrprice): Do this for all other workgroup variables too.
+
+ // Create a member in the workgroup parameter struct.
+ auto member = ctx.Clone(var->Declaration()->symbol);
+ workgroup_parameter_members.push_back(
+ ctx.dst->Member(member, store_type()));
+ CloneStructTypes(var->Type()->UnwrapRef());
+
+ // Create a function-scope variable that is a pointer to the member.
+ auto* member_ptr = ctx.dst->AddressOf(
+ ctx.dst->MemberAccessor(ctx.dst->Deref(workgroup_param()), member));
+ auto* local_var = ctx.dst->Let(
+ new_var_symbol,
+ ctx.dst->ty.pointer(store_type(), ast::StorageClass::kWorkgroup),
+ member_ptr);
+ ctx.InsertFront(func_ast->body->statements, ctx.dst->Decl(local_var));
+ is_pointer = true;
+ } else {
+ // Variables in the Private and Workgroup storage classes are
+ // redeclared at function scope. Disable storage class validation on
+ // this variable.
+ auto* disable_validation =
+ ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass);
+ auto* constructor = ctx.Clone(var->Declaration()->constructor);
+ auto* local_var =
+ ctx.dst->Var(new_var_symbol, store_type(), sc, constructor,
+ ast::AttributeList{disable_validation});
+ ctx.InsertFront(func_ast->body->statements, ctx.dst->Decl(local_var));
+ }
+ } else {
+ // For a regular function, redeclare the variable as a parameter.
+ // Use a pointer for non-handle types.
+ auto* param_type = store_type();
+ ast::AttributeList attributes;
+ if (!var->Type()->UnwrapRef()->is_handle()) {
+ param_type = ctx.dst->ty.pointer(param_type, sc,
+ var->Declaration()->declared_access);
+ is_pointer = true;
+
+ // Disable validation of the parameter's storage class and of
+ // arguments passed it.
+ attributes.push_back(
+ ctx.dst->Disable(ast::DisabledValidation::kIgnoreStorageClass));
+ attributes.push_back(ctx.dst->Disable(
+ ast::DisabledValidation::kIgnoreInvalidPointerArgument));
+ }
+ ctx.InsertBack(func_ast->params,
+ ctx.dst->Param(new_var_symbol, param_type, attributes));
+ }
+
+ // Replace all uses of the module-scope variable.
+ // For non-entry points, dereference non-handle pointer parameters.
+ for (auto* user : var->Users()) {
+ if (user->Stmt()->Function()->Declaration() == func_ast) {
+ const ast::Expression* expr = ctx.dst->Expr(new_var_symbol);
+ if (is_pointer) {
+ // If this identifier is used by an address-of operator, just
+ // remove the address-of instead of adding a deref, since we
+ // already have a pointer.
+ auto* ident = user->Declaration()->As<ast::IdentifierExpression>();
+ if (ident_to_address_of.count(ident)) {
+ ctx.Replace(ident_to_address_of[ident], expr);
+ continue;
+ }
+
+ expr = ctx.dst->Deref(expr);
+ }
+ if (is_wrapped) {
+ // Get the member from the wrapper structure.
+ expr = ctx.dst->MemberAccessor(expr, kWrappedArrayMemberName);
+ }
+ ctx.Replace(user->Declaration(), expr);
+ }
+ }
+
+ var_to_newvar[var] = {new_var_symbol, is_pointer, is_wrapped};
+ }
+
+ if (!workgroup_parameter_members.empty()) {
+ // Create the workgroup memory parameter.
+ // The parameter is a struct that contains members for each workgroup
+ // variable.
+ auto* str =
+ ctx.dst->Structure(ctx.dst->Sym(), std::move(workgroup_parameter_members));
+ auto* param_type =
+ ctx.dst->ty.pointer(ctx.dst->ty.Of(str), ast::StorageClass::kWorkgroup);
+ auto* disable_validation =
+ ctx.dst->Disable(ast::DisabledValidation::kEntryPointParameter);
+ auto* param = ctx.dst->Param(workgroup_param(), param_type, {disable_validation});
+ ctx.InsertFront(func_ast->params, param);
+ }
+
+ // Pass the variables as pointers to any functions that need them.
+ for (auto* call : calls_to_replace[func_ast]) {
+ auto* target = ctx.src->AST().Functions().Find(call->target.name->symbol);
+ auto* target_sem = ctx.src->Sem().Get(target);
+
+ // Add new arguments for any variables that are needed by the callee.
+ // For entry points, pass non-handle types as pointers.
+ for (auto* target_var : target_sem->TransitivelyReferencedGlobals()) {
+ auto sc = target_var->StorageClass();
+ if (sc == ast::StorageClass::kNone) {
+ continue;
+ }
+
+ auto new_var = var_to_newvar[target_var];
+ bool is_handle = target_var->Type()->UnwrapRef()->is_handle();
+ const ast::Expression* arg = ctx.dst->Expr(new_var.symbol);
+ if (new_var.is_wrapped) {
+ // The variable is wrapped in a struct, so we need to pass a pointer
+ // to the struct member instead.
+ arg = ctx.dst->AddressOf(
+ ctx.dst->MemberAccessor(ctx.dst->Deref(arg), kWrappedArrayMemberName));
+ } else if (is_entry_point && !is_handle && !new_var.is_pointer) {
+ // We need to pass a pointer and we don't already have one, so take
+ // the address of the new variable.
+ arg = ctx.dst->AddressOf(arg);
+ }
+ ctx.InsertBack(call->args, arg);
+ }
+ }
}
- }
- }
- // Now remove all module-scope variables with these storage classes.
- for (auto* var_ast : ctx.src->AST().GlobalVariables()) {
- auto* var_sem = ctx.src->Sem().Get(var_ast);
- if (var_sem->StorageClass() != ast::StorageClass::kNone) {
- ctx.Remove(ctx.src->AST().GlobalDeclarations(), var_ast);
- }
+ // Now remove all module-scope variables with these storage classes.
+ for (auto* var_ast : ctx.src->AST().GlobalVariables()) {
+ auto* var_sem = ctx.src->Sem().Get(var_ast);
+ if (var_sem->StorageClass() != ast::StorageClass::kNone) {
+ ctx.Remove(ctx.src->AST().GlobalDeclarations(), var_ast);
+ }
+ }
}
- }
- private:
- std::unordered_set<const sem::Struct*> cloned_structs_;
+ private:
+ std::unordered_set<const sem::Struct*> cloned_structs_;
};
ModuleScopeVarToEntryPointParam::ModuleScopeVarToEntryPointParam() = default;
ModuleScopeVarToEntryPointParam::~ModuleScopeVarToEntryPointParam() = default;
-bool ModuleScopeVarToEntryPointParam::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* decl : program->AST().GlobalDeclarations()) {
- if (decl->Is<ast::Variable>()) {
- return true;
+bool ModuleScopeVarToEntryPointParam::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* decl : program->AST().GlobalDeclarations()) {
+ if (decl->Is<ast::Variable>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
-void ModuleScopeVarToEntryPointParam::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state{ctx};
- state.Process();
- ctx.Clone();
+void ModuleScopeVarToEntryPointParam::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state{ctx};
+ state.Process();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.h b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.h
index 82970577982..e3a50f40135 100644
--- a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.h
+++ b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param.h
@@ -43,7 +43,7 @@ namespace tint::transform {
/// p = p + f;
/// }
///
-/// @stage(compute) @workgroup_size(1)
+/// @compute @workgroup_size(1)
/// fn main() {
/// foo();
/// }
@@ -55,7 +55,7 @@ namespace tint::transform {
/// *p = *p + (*sptr).f;
/// }
///
-/// @stage(compute) @workgroup_size(1)
+/// @compute @workgroup_size(1)
/// fn main(sptr : ptr<storage, S, read>) {
/// var<private> p : f32 = 2.0;
/// foo(&p, sptr);
@@ -63,30 +63,27 @@ namespace tint::transform {
/// ```
class ModuleScopeVarToEntryPointParam
: public Castable<ModuleScopeVarToEntryPointParam, Transform> {
- public:
- /// Constructor
- ModuleScopeVarToEntryPointParam();
- /// Destructor
- ~ModuleScopeVarToEntryPointParam() override;
+ public:
+ /// Constructor
+ ModuleScopeVarToEntryPointParam();
+ /// Destructor
+ ~ModuleScopeVarToEntryPointParam() override;
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
- struct State;
+ struct State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param_test.cc b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param_test.cc
index 3089355b5f3..9e81d318bdc 100644
--- a/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/module_scope_var_to_entry_point_param_test.cc
@@ -24,32 +24,32 @@ namespace {
using ModuleScopeVarToEntryPointParamTest = TransformTest;
TEST_F(ModuleScopeVarToEntryPointParamTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<ModuleScopeVarToEntryPointParam>(src));
+ EXPECT_FALSE(ShouldRun<ModuleScopeVarToEntryPointParam>(src));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, ShouldRunHasGlobal) {
- auto* src = R"(
+ auto* src = R"(
var<private> v : i32;
)";
- EXPECT_TRUE(ShouldRun<ModuleScopeVarToEntryPointParam>(src));
+ EXPECT_TRUE(ShouldRun<ModuleScopeVarToEntryPointParam>(src));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Basic) {
- auto* src = R"(
+ auto* src = R"(
var<private> p : f32;
var<workgroup> w : f32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
w = p;
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol : f32;
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_1 : f32;
@@ -57,14 +57,14 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Basic_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
w = p;
}
@@ -73,8 +73,8 @@ var<workgroup> w : f32;
var<private> p : f32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol : f32;
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_1 : f32;
@@ -82,13 +82,13 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, FunctionCalls) {
- auto* src = R"(
+ auto* src = R"(
var<private> p : f32;
var<workgroup> w : f32;
@@ -106,13 +106,13 @@ fn foo(a : f32) {
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn no_uses() {
}
@@ -127,7 +127,7 @@ fn foo(a : f32, @internal(disable_validation__ignore_storage_class) @internal(di
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_4 : f32;
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol_5 : f32;
@@ -135,14 +135,14 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, FunctionCalls_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
@@ -165,8 +165,8 @@ var<private> p : f32;
var<workgroup> w : f32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32;
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol_1 : f32;
@@ -188,24 +188,24 @@ fn bar(a : f32, b : f32, @internal(disable_validation__ignore_storage_class) @in
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Constructors) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : f32 = 1.0;
var<private> b : f32 = f32();
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x : f32 = a + b;
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32 = 1.0;
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_1 : f32 = f32();
@@ -213,14 +213,14 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Constructors_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
let x : f32 = a + b;
}
@@ -229,8 +229,8 @@ var<private> b : f32 = f32();
var<private> a : f32 = 1.0;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32 = 1.0;
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_1 : f32 = f32();
@@ -238,17 +238,17 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Pointers) {
- auto* src = R"(
+ auto* src = R"(
var<private> p : f32;
var<workgroup> w : f32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let p_ptr : ptr<private, f32> = &p;
let w_ptr : ptr<workgroup, f32> = &w;
@@ -257,8 +257,8 @@ fn main() {
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32;
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol_1 : f32;
@@ -269,14 +269,14 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Pointers_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
let p_ptr : ptr<private, f32> = &p;
let w_ptr : ptr<workgroup, f32> = &w;
@@ -288,8 +288,8 @@ var<workgroup> w : f32;
var<private> p : f32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32;
@internal(disable_validation__ignore_storage_class) var<workgroup> tint_symbol_1 : f32;
@@ -300,13 +300,13 @@ fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, FoldAddressOfDeref) {
- auto* src = R"(
+ auto* src = R"(
var<private> v : f32;
fn bar(p : ptr<private, f32>) {
@@ -317,13 +317,13 @@ fn foo() {
bar(&v);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
foo();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn bar(p : ptr<private, f32>) {
*(p) = 0.0;
}
@@ -332,21 +332,21 @@ fn foo(@internal(disable_validation__ignore_storage_class) @internal(disable_val
bar(tint_symbol);
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol_1 : f32;
foo(&(tint_symbol_1));
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, FoldAddressOfDeref_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
foo();
}
@@ -362,8 +362,8 @@ fn bar(p : ptr<private, f32>) {
var<private> v : f32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main() {
@internal(disable_validation__ignore_storage_class) var<private> tint_symbol : f32;
foo(&(tint_symbol));
@@ -378,13 +378,13 @@ fn bar(p : ptr<private, f32>) {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffers_Basic) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -394,33 +394,33 @@ var<uniform> u : S;
@group(0) @binding(1)
var<storage> s : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = u;
_ = s;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<uniform, S>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_1 : ptr<storage, S>) {
_ = *(tint_symbol);
_ = *(tint_symbol_1);
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffers_Basic_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
_ = u;
_ = s;
@@ -435,8 +435,8 @@ struct S {
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<uniform, S>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_1 : ptr<storage, S>) {
_ = *(tint_symbol);
_ = *(tint_symbol_1);
@@ -447,41 +447,41 @@ struct S {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArray) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0)
var<storage> buffer : array<f32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
arr : array<f32>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArray_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
@@ -490,24 +490,24 @@ fn main() {
var<storage> buffer : array<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
arr : array<f32>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArrayInsideFunction) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0)
var<storage> buffer : array<f32>;
@@ -515,13 +515,13 @@ fn foo() {
_ = buffer[0];
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
foo();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
arr : array<f32>,
}
@@ -530,21 +530,20 @@ fn foo(@internal(disable_validation__ignore_storage_class) @internal(disable_val
_ = (*(tint_symbol))[0];
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_1 : ptr<storage, tint_symbol_2>) {
foo(&((*(tint_symbol_1)).arr));
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ModuleScopeVarToEntryPointParamTest,
- Buffer_RuntimeArrayInsideFunction_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArrayInsideFunction_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
foo();
}
@@ -556,12 +555,12 @@ fn foo() {
@group(0) @binding(0) var<storage> buffer : array<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
arr : array<f32>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
foo(&((*(tint_symbol)).arr));
}
@@ -571,46 +570,45 @@ fn foo(@internal(disable_validation__ignore_storage_class) @internal(disable_val
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArray_Alias) {
- auto* src = R"(
+ auto* src = R"(
type myarray = array<f32>;
@group(0) @binding(0)
var<storage> buffer : myarray;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
arr : array<f32>,
}
type myarray = array<f32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ModuleScopeVarToEntryPointParamTest,
- Buffer_RuntimeArray_Alias_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_RuntimeArray_Alias_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
@@ -620,12 +618,12 @@ fn main() {
type myarray = array<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_1 {
arr : array<f32>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
@@ -633,13 +631,13 @@ fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_paramete
type myarray = array<f32>;
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_ArrayOfStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
f : f32,
};
@@ -647,13 +645,13 @@ struct S {
@group(0) @binding(0)
var<storage> buffer : array<S>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
f : f32,
}
@@ -662,20 +660,20 @@ struct tint_symbol_1 {
arr : array<S>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffer_ArrayOfStruct_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
_ = buffer[0];
}
@@ -687,7 +685,7 @@ struct S {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
f : f32,
}
@@ -696,19 +694,19 @@ struct tint_symbol_1 {
arr : array<S>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<storage, tint_symbol_1>) {
_ = (*(tint_symbol)).arr[0];
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffers_FunctionCalls) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -733,13 +731,13 @@ fn foo(a : f32) {
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
@@ -759,20 +757,20 @@ fn foo(a : f32, @internal(disable_validation__ignore_storage_class) @internal(di
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_4 : ptr<uniform, S>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_5 : ptr<storage, S>) {
foo(1.0, tint_symbol_4, tint_symbol_5);
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Buffers_FunctionCalls_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
@@ -802,8 +800,8 @@ var<uniform> u : S;
var<storage> s : S;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol : ptr<uniform, S>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) @internal(disable_validation__ignore_storage_class) tint_symbol_1 : ptr<storage, S>) {
foo(1.0, tint_symbol, tint_symbol_1);
}
@@ -828,38 +826,38 @@ struct S {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, HandleTypes_Basic) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
@group(0) @binding(1) var s : sampler;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
_ = t;
_ = s;
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) tint_symbol : texture_2d<f32>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) tint_symbol_1 : sampler) {
_ = tint_symbol;
_ = tint_symbol_1;
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, HandleTypes_FunctionCalls) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
@group(0) @binding(1) var s : sampler;
@@ -878,13 +876,13 @@ fn foo(a : f32) {
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn no_uses() {
}
@@ -900,21 +898,20 @@ fn foo(a : f32, tint_symbol_2 : texture_2d<f32>, tint_symbol_3 : sampler) {
no_uses();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) tint_symbol_4 : texture_2d<f32>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) tint_symbol_5 : sampler) {
foo(1.0, tint_symbol_4, tint_symbol_5);
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ModuleScopeVarToEntryPointParamTest,
- HandleTypes_FunctionCalls_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ModuleScopeVarToEntryPointParamTest, HandleTypes_FunctionCalls_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
foo(1.0);
}
@@ -938,8 +935,8 @@ fn bar(a : f32, b : f32) {
@group(0) @binding(1) var s : sampler;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn main(@group(0) @binding(0) @internal(disable_validation__entry_point_parameter) tint_symbol : texture_2d<f32>, @group(0) @binding(1) @internal(disable_validation__entry_point_parameter) tint_symbol_1 : sampler) {
foo(1.0, tint_symbol, tint_symbol_1);
}
@@ -960,40 +957,40 @@ fn bar(a : f32, b : f32, tint_symbol_4 : texture_2d<f32>, tint_symbol_5 : sample
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, Matrix) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> m : mat2x2<f32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x = m;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
m : mat2x2<f32>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr<workgroup, tint_symbol_2>) {
let tint_symbol : ptr<workgroup, mat2x2<f32>> = &((*(tint_symbol_1)).m);
let x = *(tint_symbol);
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, NestedMatrix) {
- auto* src = R"(
+ auto* src = R"(
struct S1 {
m : mat2x2<f32>,
};
@@ -1002,13 +999,13 @@ struct S2 {
};
var<workgroup> m : array<S2, 4>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x = m;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
m : mat2x2<f32>,
}
@@ -1021,22 +1018,22 @@ struct tint_symbol_2 {
m : array<S2, 4u>,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr<workgroup, tint_symbol_2>) {
let tint_symbol : ptr<workgroup, array<S2, 4u>> = &((*(tint_symbol_1)).m);
let x = *(tint_symbol);
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test that we do not duplicate a struct type used by multiple workgroup
// variables that are promoted to threadgroup memory arguments.
TEST_F(ModuleScopeVarToEntryPointParamTest, DuplicateThreadgroupArgumentTypes) {
- auto* src = R"(
+ auto* src = R"(
struct S {
m : mat2x2<f32>,
};
@@ -1045,14 +1042,14 @@ var<workgroup> a : S;
var<workgroup> b : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x = a;
let y = b;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
m : mat2x2<f32>,
}
@@ -1062,7 +1059,7 @@ struct tint_symbol_3 {
b : S,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr<workgroup, tint_symbol_3>) {
let tint_symbol : ptr<workgroup, S> = &((*(tint_symbol_1)).a);
let tint_symbol_2 : ptr<workgroup, S> = &((*(tint_symbol_1)).b);
@@ -1071,17 +1068,16 @@ fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Test that we do not duplicate a struct type used by multiple workgroup
// variables that are promoted to threadgroup memory arguments.
-TEST_F(ModuleScopeVarToEntryPointParamTest,
- DuplicateThreadgroupArgumentTypes_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ModuleScopeVarToEntryPointParamTest, DuplicateThreadgroupArgumentTypes_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
let x = a;
let y = b;
@@ -1095,7 +1091,7 @@ struct S {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
m : mat2x2<f32>,
}
@@ -1105,7 +1101,7 @@ struct tint_symbol_3 {
b : S,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr<workgroup, tint_symbol_3>) {
let tint_symbol : ptr<workgroup, S> = &((*(tint_symbol_1)).a);
let tint_symbol_2 : ptr<workgroup, S> = &((*(tint_symbol_1)).b);
@@ -1114,13 +1110,13 @@ fn main(@internal(disable_validation__entry_point_parameter) tint_symbol_1 : ptr
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, UnusedVariables) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
};
@@ -1136,32 +1132,32 @@ var<storage> sb : S;
@group(0) @binding(2) var t : texture_2d<f32>;
@group(0) @binding(3) var s : sampler;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
}
)";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ModuleScopeVarToEntryPointParamTest, EmtpyModule) {
- auto* src = "";
+ auto* src = "";
- auto got = Run<ModuleScopeVarToEntryPointParam>(src);
+ auto got = Run<ModuleScopeVarToEntryPointParam>(src);
- EXPECT_EQ(src, str(got));
+ EXPECT_EQ(src, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.cc b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.cc
index 00466ac1390..2a9e20e9a08 100644
--- a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.cc
+++ b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.cc
@@ -24,8 +24,9 @@
#include "src/tint/sem/variable.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::MultiplanarExternalTexture);
-TINT_INSTANTIATE_TYPEINFO(
- tint::transform::MultiplanarExternalTexture::NewBindingPoints);
+TINT_INSTANTIATE_TYPEINFO(tint::transform::MultiplanarExternalTexture::NewBindingPoints);
+
+using namespace tint::number_suffixes; // NOLINT
namespace tint::transform {
namespace {
@@ -33,394 +34,428 @@ namespace {
/// This struct stores symbols for new bindings created as a result of
/// transforming a texture_external instance.
struct NewBindingSymbols {
- Symbol params;
- Symbol plane_0;
- Symbol plane_1;
+ Symbol params;
+ Symbol plane_0;
+ Symbol plane_1;
};
} // namespace
/// State holds the current transform state
struct MultiplanarExternalTexture::State {
- /// The clone context.
- CloneContext& ctx;
-
- /// ProgramBuilder for the context
- ProgramBuilder& b;
-
- /// Destination binding locations for the expanded texture_external provided
- /// as input into the transform.
- const NewBindingPoints* new_binding_points;
-
- /// Symbol for the ExternalTextureParams struct
- Symbol params_struct_sym;
-
- /// Symbol for the textureLoadExternal function
- Symbol texture_load_external_sym;
-
- /// Symbol for the textureSampleExternal function
- Symbol texture_sample_external_sym;
-
- /// Storage for new bindings that have been created corresponding to an
- /// original texture_external binding.
- std::unordered_map<const sem::Variable*, NewBindingSymbols>
- new_binding_symbols;
-
- /// Constructor
- /// @param context the clone
- /// @param newBindingPoints the input destination binding locations for the
- /// expanded texture_external
- State(CloneContext& context, const NewBindingPoints* newBindingPoints)
- : ctx(context), b(*context.dst), new_binding_points(newBindingPoints) {}
-
- /// Processes the module
- void Process() {
- auto& sem = ctx.src->Sem();
-
- // For each texture_external binding, we replace it with a texture_2d<f32>
- // binding and create two additional bindings (one texture_2d<f32> to
- // represent the secondary plane and one uniform buffer for the
- // ExternalTextureParams struct).
- for (auto* var : ctx.src->AST().GlobalVariables()) {
- auto* sem_var = sem.Get(var);
- if (!sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
- continue;
- }
-
- // If the attributes are empty, then this must be a texture_external
- // passed as a function parameter. These variables are transformed
- // elsewhere.
- if (var->attributes.empty()) {
- continue;
- }
-
- // If we find a texture_external binding, we know we must emit the
- // ExternalTextureParams struct.
- if (!params_struct_sym.IsValid()) {
- createExtTexParamsStruct();
- }
-
- // The binding points for the newly introduced bindings must have been
- // provided to this transform. We fetch the new binding points by
- // providing the original texture_external binding points into the
- // passed map.
- BindingPoint bp = {var->BindingPoint().group->value,
- var->BindingPoint().binding->value};
-
- BindingsMap::const_iterator it =
- new_binding_points->bindings_map.find(bp);
- if (it == new_binding_points->bindings_map.end()) {
- b.Diagnostics().add_error(
- diag::System::Transform,
- "missing new binding points for texture_external at binding {" +
- std::to_string(bp.group) + "," + std::to_string(bp.binding) +
- "}");
- continue;
- }
-
- BindingPoints bps = it->second;
-
- // Symbols for the newly created bindings must be saved so they can be
- // passed as parameters later. These are placed in a map and keyed by
- // the source symbol associated with the texture_external binding that
- // corresponds with the new destination bindings.
- // NewBindingSymbols new_binding_syms;
- auto& syms = new_binding_symbols[sem_var];
- syms.plane_0 = ctx.Clone(var->symbol);
- syms.plane_1 = b.Symbols().New("ext_tex_plane_1");
- b.Global(syms.plane_1,
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
- b.GroupAndBinding(bps.plane_1.group, bps.plane_1.binding));
- syms.params = b.Symbols().New("ext_tex_params");
- b.Global(syms.params, b.ty.type_name("ExternalTextureParams"),
- ast::StorageClass::kUniform,
- b.GroupAndBinding(bps.params.group, bps.params.binding));
-
- // Replace the original texture_external binding with a texture_2d<f32>
- // binding.
- ast::AttributeList cloned_attributes = ctx.Clone(var->attributes);
- const ast::Expression* cloned_constructor = ctx.Clone(var->constructor);
-
- auto* replacement =
- b.Var(syms.plane_0,
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
- cloned_constructor, cloned_attributes);
- ctx.Replace(var, replacement);
- }
+ /// The clone context.
+ CloneContext& ctx;
+
+ /// ProgramBuilder for the context
+ ProgramBuilder& b;
+
+ /// Destination binding locations for the expanded texture_external provided
+ /// as input into the transform.
+ const NewBindingPoints* new_binding_points;
+
+ /// Symbol for the GammaTransferParams
+ Symbol gamma_transfer_struct_sym;
+
+ /// Symbol for the ExternalTextureParams struct
+ Symbol params_struct_sym;
+
+ /// Symbol for the textureLoadExternal function
+ Symbol texture_load_external_sym;
+
+ /// Symbol for the textureSampleExternal function
+ Symbol texture_sample_external_sym;
+
+ /// Symbol for the gammaCorrection function
+ Symbol gamma_correction_sym;
+
+ /// Storage for new bindings that have been created corresponding to an
+ /// original texture_external binding.
+ std::unordered_map<const sem::Variable*, NewBindingSymbols> new_binding_symbols;
+
+ /// Constructor
+ /// @param context the clone
+ /// @param newBindingPoints the input destination binding locations for the
+ /// expanded texture_external
+ State(CloneContext& context, const NewBindingPoints* newBindingPoints)
+ : ctx(context), b(*context.dst), new_binding_points(newBindingPoints) {}
+
+ /// Processes the module
+ void Process() {
+ auto& sem = ctx.src->Sem();
+
+ // For each texture_external binding, we replace it with a texture_2d<f32>
+ // binding and create two additional bindings (one texture_2d<f32> to
+ // represent the secondary plane and one uniform buffer for the
+ // ExternalTextureParams struct).
+ for (auto* var : ctx.src->AST().GlobalVariables()) {
+ auto* sem_var = sem.Get(var);
+ if (!sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
+ continue;
+ }
+
+ // If the attributes are empty, then this must be a texture_external
+ // passed as a function parameter. These variables are transformed
+ // elsewhere.
+ if (var->attributes.empty()) {
+ continue;
+ }
- // We must update all the texture_external parameters for user declared
- // functions.
- for (auto* fn : ctx.src->AST().Functions()) {
- for (const ast::Variable* param : fn->params) {
- if (auto* sem_var = sem.Get(param)) {
- if (!sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
- continue;
- }
- // If we find a texture_external, we must ensure the
- // ExternalTextureParams struct exists.
- if (!params_struct_sym.IsValid()) {
- createExtTexParamsStruct();
- }
- // When a texture_external is found, we insert all components
- // the texture_external into the parameter list. We must also place
- // the new symbols into the transform state so they can be used when
- // transforming function calls.
- auto& syms = new_binding_symbols[sem_var];
- syms.plane_0 = ctx.Clone(param->symbol);
- syms.plane_1 = b.Symbols().New("ext_tex_plane_1");
- syms.params = b.Symbols().New("ext_tex_params");
- auto tex2d_f32 = [&] {
- return b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32());
- };
- ctx.Replace(param, b.Param(syms.plane_0, tex2d_f32()));
- ctx.InsertAfter(fn->params, param,
- b.Param(syms.plane_1, tex2d_f32()));
- ctx.InsertAfter(
- fn->params, param,
- b.Param(syms.params, b.ty.type_name(params_struct_sym)));
+ // If we find a texture_external binding, we know we must emit the
+ // ExternalTextureParams struct.
+ if (!params_struct_sym.IsValid()) {
+ createExtTexParamsStructs();
+ }
+
+ // The binding points for the newly introduced bindings must have been
+ // provided to this transform. We fetch the new binding points by
+ // providing the original texture_external binding points into the
+ // passed map.
+ BindingPoint bp = {var->BindingPoint().group->value,
+ var->BindingPoint().binding->value};
+
+ BindingsMap::const_iterator it = new_binding_points->bindings_map.find(bp);
+ if (it == new_binding_points->bindings_map.end()) {
+ b.Diagnostics().add_error(
+ diag::System::Transform,
+ "missing new binding points for texture_external at binding {" +
+ std::to_string(bp.group) + "," + std::to_string(bp.binding) + "}");
+ continue;
+ }
+
+ BindingPoints bps = it->second;
+
+ // Symbols for the newly created bindings must be saved so they can be
+ // passed as parameters later. These are placed in a map and keyed by
+ // the source symbol associated with the texture_external binding that
+ // corresponds with the new destination bindings.
+ // NewBindingSymbols new_binding_syms;
+ auto& syms = new_binding_symbols[sem_var];
+ syms.plane_0 = ctx.Clone(var->symbol);
+ syms.plane_1 = b.Symbols().New("ext_tex_plane_1");
+ b.Global(syms.plane_1, b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
+ b.GroupAndBinding(bps.plane_1.group, bps.plane_1.binding));
+ syms.params = b.Symbols().New("ext_tex_params");
+ b.Global(syms.params, b.ty.type_name("ExternalTextureParams"),
+ ast::StorageClass::kUniform,
+ b.GroupAndBinding(bps.params.group, bps.params.binding));
+
+ // Replace the original texture_external binding with a texture_2d<f32>
+ // binding.
+ ast::AttributeList cloned_attributes = ctx.Clone(var->attributes);
+ const ast::Expression* cloned_constructor = ctx.Clone(var->constructor);
+
+ auto* replacement =
+ b.Var(syms.plane_0, b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
+ cloned_constructor, cloned_attributes);
+ ctx.Replace(var, replacement);
}
- }
- }
- // Transform the original textureLoad and textureSampleLevel calls into
- // textureLoadExternal and textureSampleExternal calls.
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) -> const ast::CallExpression* {
- auto* builtin = sem.Get(expr)->Target()->As<sem::Builtin>();
-
- if (builtin && !builtin->Parameters().empty() &&
- builtin->Parameters()[0]->Type()->Is<sem::ExternalTexture>() &&
- builtin->Type() != sem::BuiltinType::kTextureDimensions) {
- if (auto* var_user = sem.Get<sem::VariableUser>(expr->args[0])) {
- auto it = new_binding_symbols.find(var_user->Variable());
- if (it == new_binding_symbols.end()) {
- // If valid new binding locations were not provided earlier, we
- // would have been unable to create these symbols. An error
- // message was emitted earlier, so just return early to avoid
- // internal compiler errors and retain a clean error message.
- return nullptr;
- }
- auto& syms = it->second;
-
- if (builtin->Type() == sem::BuiltinType::kTextureLoad) {
- return createTexLdExt(expr, syms);
- }
-
- if (builtin->Type() == sem::BuiltinType::kTextureSampleLevel) {
- return createTexSmpExt(expr, syms);
- }
+ // We must update all the texture_external parameters for user declared
+ // functions.
+ for (auto* fn : ctx.src->AST().Functions()) {
+ for (const ast::Variable* param : fn->params) {
+ if (auto* sem_var = sem.Get(param)) {
+ if (!sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
+ continue;
+ }
+ // If we find a texture_external, we must ensure the
+ // ExternalTextureParams struct exists.
+ if (!params_struct_sym.IsValid()) {
+ createExtTexParamsStructs();
+ }
+ // When a texture_external is found, we insert all components
+ // the texture_external into the parameter list. We must also place
+ // the new symbols into the transform state so they can be used when
+ // transforming function calls.
+ auto& syms = new_binding_symbols[sem_var];
+ syms.plane_0 = ctx.Clone(param->symbol);
+ syms.plane_1 = b.Symbols().New("ext_tex_plane_1");
+ syms.params = b.Symbols().New("ext_tex_params");
+ auto tex2d_f32 = [&] {
+ return b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32());
+ };
+ ctx.Replace(param, b.Param(syms.plane_0, tex2d_f32()));
+ ctx.InsertAfter(fn->params, param, b.Param(syms.plane_1, tex2d_f32()));
+ ctx.InsertAfter(fn->params, param,
+ b.Param(syms.params, b.ty.type_name(params_struct_sym)));
+ }
}
+ }
- } else if (sem.Get(expr)->Target()->Is<sem::Function>()) {
- // The call expression may be to a user-defined function that
- // contains a texture_external parameter. These need to be expanded
- // out to multiple plane textures and the texture parameters
- // structure.
- for (auto* arg : expr->args) {
- if (auto* var_user = sem.Get<sem::VariableUser>(arg)) {
- // Check if a parameter is a texture_external by trying to find
- // it in the transform state.
- auto it = new_binding_symbols.find(var_user->Variable());
- if (it != new_binding_symbols.end()) {
- auto& syms = it->second;
- // When we find a texture_external, we must unpack it into its
- // components.
- ctx.Replace(arg, b.Expr(syms.plane_0));
- ctx.InsertAfter(expr->args, arg, b.Expr(syms.plane_1));
- ctx.InsertAfter(expr->args, arg, b.Expr(syms.params));
+ // Transform the original textureLoad and textureSampleLevel calls into
+ // textureLoadExternal and textureSampleExternal calls.
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::CallExpression* {
+ auto* call = sem.Get(expr)->UnwrapMaterialize()->As<sem::Call>();
+ auto* builtin = call->Target()->As<sem::Builtin>();
+
+ if (builtin && !builtin->Parameters().empty() &&
+ builtin->Parameters()[0]->Type()->Is<sem::ExternalTexture>() &&
+ builtin->Type() != sem::BuiltinType::kTextureDimensions) {
+ if (auto* var_user = sem.Get<sem::VariableUser>(expr->args[0])) {
+ auto it = new_binding_symbols.find(var_user->Variable());
+ if (it == new_binding_symbols.end()) {
+ // If valid new binding locations were not provided earlier, we
+ // would have been unable to create these symbols. An error
+ // message was emitted earlier, so just return early to avoid
+ // internal compiler errors and retain a clean error message.
+ return nullptr;
+ }
+ auto& syms = it->second;
+
+ if (builtin->Type() == sem::BuiltinType::kTextureLoad) {
+ return createTexLdExt(expr, syms);
+ }
+
+ if (builtin->Type() == sem::BuiltinType::kTextureSampleLevel) {
+ return createTexSmpExt(expr, syms);
+ }
+ }
+
+ } else if (call->Target()->Is<sem::Function>()) {
+ // The call expression may be to a user-defined function that
+ // contains a texture_external parameter. These need to be expanded
+ // out to multiple plane textures and the texture parameters
+ // structure.
+ for (auto* arg : expr->args) {
+ if (auto* var_user = sem.Get<sem::VariableUser>(arg)) {
+ // Check if a parameter is a texture_external by trying to find
+ // it in the transform state.
+ auto it = new_binding_symbols.find(var_user->Variable());
+ if (it != new_binding_symbols.end()) {
+ auto& syms = it->second;
+ // When we find a texture_external, we must unpack it into its
+ // components.
+ ctx.Replace(arg, b.Expr(syms.plane_0));
+ ctx.InsertAfter(expr->args, arg, b.Expr(syms.plane_1));
+ ctx.InsertAfter(expr->args, arg, b.Expr(syms.params));
+ }
+ }
}
- }
}
- }
- return nullptr;
+ return nullptr;
});
- }
-
- /// Creates the ExternalTextureParams struct.
- void createExtTexParamsStruct() {
- ast::StructMemberList member_list = {
- b.Member("numPlanes", b.ty.u32()), b.Member("vr", b.ty.f32()),
- b.Member("ug", b.ty.f32()), b.Member("vg", b.ty.f32()),
- b.Member("ub", b.ty.f32())};
-
- params_struct_sym = b.Symbols().New("ExternalTextureParams");
-
- b.Structure(params_struct_sym, member_list);
- }
-
- /// Constructs a StatementList containing all the statements making up the
- /// bodies of the textureSampleExternal and textureLoadExternal functions.
- /// @param call_type determines which function body to generate
- /// @returns a statement list that makes of the body of the chosen function
- ast::StatementList createTexFnExtStatementList(sem::BuiltinType call_type) {
- using f32 = ProgramBuilder::f32;
- const ast::CallExpression* single_plane_call = nullptr;
- const ast::CallExpression* plane_0_call = nullptr;
- const ast::CallExpression* plane_1_call = nullptr;
- if (call_type == sem::BuiltinType::kTextureSampleLevel) {
- // textureSampleLevel(plane0, smp, coord.xy, 0.0);
- single_plane_call =
- b.Call("textureSampleLevel", "plane0", "smp", "coord", 0.0f);
- // textureSampleLevel(plane0, smp, coord.xy, 0.0);
- plane_0_call =
- b.Call("textureSampleLevel", "plane0", "smp", "coord", 0.0f);
- // textureSampleLevel(plane1, smp, coord.xy, 0.0);
- plane_1_call =
- b.Call("textureSampleLevel", "plane1", "smp", "coord", 0.0f);
- } else if (call_type == sem::BuiltinType::kTextureLoad) {
- // textureLoad(plane0, coords.xy, 0);
- single_plane_call = b.Call("textureLoad", "plane0", "coord", 0);
- // textureLoad(plane0, coords.xy, 0);
- plane_0_call = b.Call("textureLoad", "plane0", "coord", 0);
- // textureLoad(plane1, coords.xy, 0);
- plane_1_call = b.Call("textureLoad", "plane1", "coord", 0);
- } else {
- TINT_ICE(Transform, b.Diagnostics())
- << "unhandled builtin: " << call_type;
}
- return {
- // if (params.numPlanes == 1u) {
- // return singlePlaneCall
- // }
- b.If(b.create<ast::BinaryExpression>(
- ast::BinaryOp::kEqual, b.MemberAccessor("params", "numPlanes"),
- b.Expr(1u)),
- b.Block(b.Return(single_plane_call))),
- // let y = plane0Call.r - 0.0625;
- b.Decl(b.Const("y", nullptr,
- b.Sub(b.MemberAccessor(plane_0_call, "r"), 0.0625f))),
- // let uv = plane1Call.rg - 0.5;
- b.Decl(b.Const("uv", nullptr,
- b.Sub(b.MemberAccessor(plane_1_call, "rg"), 0.5f))),
- // let u = uv.x;
- b.Decl(b.Const("u", nullptr, b.MemberAccessor("uv", "x"))),
- // let v = uv.y;
- b.Decl(b.Const("v", nullptr, b.MemberAccessor("uv", "y"))),
- // let r = 1.164 * y + params.vr * v;
- b.Decl(b.Const("r", nullptr,
- b.Add(b.Mul(1.164f, "y"),
- b.Mul(b.MemberAccessor("params", "vr"), "v")))),
- // let g = 1.164 * y - params.ug * u - params.vg * v;
- b.Decl(
- b.Const("g", nullptr,
- b.Sub(b.Sub(b.Mul(1.164f, "y"),
- b.Mul(b.MemberAccessor("params", "ug"), "u")),
- b.Mul(b.MemberAccessor("params", "vg"), "v")))),
- // let b = 1.164 * y + params.ub * u;
- b.Decl(b.Const("b", nullptr,
- b.Add(b.Mul(1.164f, "y"),
- b.Mul(b.MemberAccessor("params", "ub"), "u")))),
- // return vec4<f32>(r, g, b, 1.0);
- b.Return(b.vec4<f32>("r", "g", "b", 1.0f)),
- };
- }
-
- /// Creates the textureSampleExternal function if needed and returns a call
- /// expression to it.
- /// @param expr the call expression being transformed
- /// @param syms the expanded symbols to be used in the new call
- /// @returns a call expression to textureSampleExternal
- const ast::CallExpression* createTexSmpExt(const ast::CallExpression* expr,
- NewBindingSymbols syms) {
- ast::ExpressionList params;
- const ast::Expression* plane_0_binding_param = ctx.Clone(expr->args[0]);
-
- if (expr->args.size() != 3) {
- TINT_ICE(Transform, b.Diagnostics())
- << "expected textureSampleLevel call with a "
- "texture_external to have 3 parameters, found "
- << expr->args.size() << " parameters";
- }
+ /// Creates the parameter structs associated with the transform.
+ void createExtTexParamsStructs() {
+ // Create GammaTransferParams struct.
+ ast::StructMemberList gamma_transfer_member_list = {
+ b.Member("G", b.ty.f32()), b.Member("A", b.ty.f32()), b.Member("B", b.ty.f32()),
+ b.Member("C", b.ty.f32()), b.Member("D", b.ty.f32()), b.Member("E", b.ty.f32()),
+ b.Member("F", b.ty.f32()), b.Member("padding", b.ty.u32())};
- if (!texture_sample_external_sym.IsValid()) {
- texture_sample_external_sym = b.Symbols().New("textureSampleExternal");
+ gamma_transfer_struct_sym = b.Symbols().New("GammaTransferParams");
- // Emit the textureSampleExternal function.
- ast::VariableList varList = {
- b.Param("plane0",
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
- b.Param("plane1",
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
- b.Param("smp", b.ty.sampler(ast::SamplerKind::kSampler)),
- b.Param("coord", b.ty.vec2(b.ty.f32())),
- b.Param("params", b.ty.type_name(params_struct_sym))};
+ b.Structure(gamma_transfer_struct_sym, gamma_transfer_member_list);
- ast::StatementList statementList =
- createTexFnExtStatementList(sem::BuiltinType::kTextureSampleLevel);
+ // Create ExternalTextureParams struct.
+ ast::StructMemberList ext_tex_params_member_list = {
+ b.Member("numPlanes", b.ty.u32()),
+ b.Member("yuvToRgbConversionMatrix", b.ty.mat3x4(b.ty.f32())),
+ b.Member("gammaDecodeParams", b.ty.type_name("GammaTransferParams")),
+ b.Member("gammaEncodeParams", b.ty.type_name("GammaTransferParams")),
+ b.Member("gamutConversionMatrix", b.ty.mat3x3(b.ty.f32()))};
- b.Func(texture_sample_external_sym, varList, b.ty.vec4(b.ty.f32()),
- statementList, {});
+ params_struct_sym = b.Symbols().New("ExternalTextureParams");
+
+ b.Structure(params_struct_sym, ext_tex_params_member_list);
}
- const ast::IdentifierExpression* exp = b.Expr(texture_sample_external_sym);
- params = {plane_0_binding_param, b.Expr(syms.plane_1),
- ctx.Clone(expr->args[1]), ctx.Clone(expr->args[2]),
- b.Expr(syms.params)};
- return b.Call(exp, params);
- }
-
- /// Creates the textureLoadExternal function if needed and returns a call
- /// expression to it.
- /// @param expr the call expression being transformed
- /// @param syms the expanded symbols to be used in the new call
- /// @returns a call expression to textureLoadExternal
- const ast::CallExpression* createTexLdExt(const ast::CallExpression* expr,
- NewBindingSymbols syms) {
- ast::ExpressionList params;
- const ast::Expression* plane_0_binding_param = ctx.Clone(expr->args[0]);
-
- if (expr->args.size() != 2) {
- TINT_ICE(Transform, b.Diagnostics())
- << "expected textureLoad call with a texture_external "
- "to have 2 parameters, found "
- << expr->args.size() << " parameters";
+ /// Creates the gammaCorrection function if needed and returns a call
+ /// expression to it.
+ void createGammaCorrectionFn() {
+ ast::VariableList varList = {b.Param("v", b.ty.vec3<f32>()),
+ b.Param("params", b.ty.type_name(gamma_transfer_struct_sym))};
+
+ ast::StatementList statementList = {
+ // let cond = abs(v) < vec3(params.D);
+ b.Decl(b.Let(
+ "cond", nullptr,
+ b.LessThan(b.Call("abs", "v"), b.vec3<f32>(b.MemberAccessor("params", "D"))))),
+ // let t = sign(v) * ((params.C * abs(v)) + params.F);
+ b.Decl(b.Let("t", nullptr,
+ b.Mul(b.Call("sign", "v"),
+ b.Add(b.Mul(b.MemberAccessor("params", "C"), b.Call("abs", "v")),
+ b.MemberAccessor("params", "F"))))),
+ // let f = (sign(v) * pow(((params.A * abs(v)) + params.B),
+ // vec3(params.G))) + params.E;
+ b.Decl(b.Let(
+ "f", nullptr,
+ b.Mul(b.Call("sign", "v"),
+ b.Add(b.Call("pow",
+ b.Add(b.Mul(b.MemberAccessor("params", "A"), b.Call("abs", "v")),
+ b.MemberAccessor("params", "B")),
+ b.vec3<f32>(b.MemberAccessor("params", "G"))),
+ b.MemberAccessor("params", "E"))))),
+ // return select(f, t, cond);
+ b.Return(b.Call("select", "f", "t", "cond"))};
+
+ gamma_correction_sym = b.Symbols().New("gammaCorrection");
+
+ b.Func(gamma_correction_sym, varList, b.ty.vec3<f32>(), statementList, {});
}
- if (!texture_load_external_sym.IsValid()) {
- texture_load_external_sym = b.Symbols().New("textureLoadExternal");
+ /// Constructs a StatementList containing all the statements making up the
+ /// bodies of the textureSampleExternal and textureLoadExternal functions.
+ /// @param call_type determines which function body to generate
+ /// @returns a statement list that makes of the body of the chosen function
+ ast::StatementList createTexFnExtStatementList(sem::BuiltinType call_type) {
+ const ast::CallExpression* single_plane_call = nullptr;
+ const ast::CallExpression* plane_0_call = nullptr;
+ const ast::CallExpression* plane_1_call = nullptr;
+ if (call_type == sem::BuiltinType::kTextureSampleLevel) {
+ // textureSampleLevel(plane0, smp, coord.xy, 0.0);
+ single_plane_call = b.Call("textureSampleLevel", "plane0", "smp", "coord", 0_f);
+ // textureSampleLevel(plane0, smp, coord.xy, 0.0);
+ plane_0_call = b.Call("textureSampleLevel", "plane0", "smp", "coord", 0_f);
+ // textureSampleLevel(plane1, smp, coord.xy, 0.0);
+ plane_1_call = b.Call("textureSampleLevel", "plane1", "smp", "coord", 0_f);
+ } else if (call_type == sem::BuiltinType::kTextureLoad) {
+ // textureLoad(plane0, coords.xy, 0);
+ single_plane_call = b.Call("textureLoad", "plane0", "coord", 0_i);
+ // textureLoad(plane0, coords.xy, 0);
+ plane_0_call = b.Call("textureLoad", "plane0", "coord", 0_i);
+ // textureLoad(plane1, coords.xy, 0);
+ plane_1_call = b.Call("textureLoad", "plane1", "coord", 0_i);
+ } else {
+ TINT_ICE(Transform, b.Diagnostics()) << "unhandled builtin: " << call_type;
+ }
- // Emit the textureLoadExternal function.
- ast::VariableList var_list = {
- b.Param("plane0",
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
- b.Param("plane1",
- b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
- b.Param("coord", b.ty.vec2(b.ty.i32())),
- b.Param("params", b.ty.type_name(params_struct_sym))};
+ return {
+ // var color: vec3<f32>;
+ b.Decl(b.Var("color", b.ty.vec3(b.ty.f32()))),
+ // if ((params.numPlanes == 1u))
+ b.If(b.create<ast::BinaryExpression>(
+ ast::BinaryOp::kEqual, b.MemberAccessor("params", "numPlanes"), b.Expr(1_u)),
+ b.Block(
+ // color = textureLoad(plane0, coord, 0).rgb;
+ b.Assign("color", b.MemberAccessor(single_plane_call, "rgb"))),
+ b.Else(b.Block(
+ // color = vec4<f32>(plane_0_call.r, plane_1_call.rg, 1.0) *
+ // params.yuvToRgbConversionMatrix;
+ b.Assign("color",
+ b.Mul(b.vec4<f32>(b.MemberAccessor(plane_0_call, "r"),
+ b.MemberAccessor(plane_1_call, "rg"), 1_f),
+ b.MemberAccessor("params", "yuvToRgbConversionMatrix")))))),
+ // color = gammaConversion(color, gammaDecodeParams);
+ b.Assign("color", b.Call("gammaCorrection", "color",
+ b.MemberAccessor("params", "gammaDecodeParams"))),
+ // color = (params.gamutConversionMatrix * color);
+ b.Assign("color", b.Mul(b.MemberAccessor("params", "gamutConversionMatrix"), "color")),
+ // color = gammaConversion(color, gammaEncodeParams);
+ b.Assign("color", b.Call("gammaCorrection", "color",
+ b.MemberAccessor("params", "gammaEncodeParams"))),
+ // return vec4<f32>(color, 1.f);
+ b.Return(b.vec4<f32>("color", 1_f))};
+ }
- ast::StatementList statement_list =
- createTexFnExtStatementList(sem::BuiltinType::kTextureLoad);
+ /// Creates the textureSampleExternal function if needed and returns a call
+ /// expression to it.
+ /// @param expr the call expression being transformed
+ /// @param syms the expanded symbols to be used in the new call
+ /// @returns a call expression to textureSampleExternal
+ const ast::CallExpression* createTexSmpExt(const ast::CallExpression* expr,
+ NewBindingSymbols syms) {
+ ast::ExpressionList params;
+ const ast::Expression* plane_0_binding_param = ctx.Clone(expr->args[0]);
+
+ if (expr->args.size() != 3) {
+ TINT_ICE(Transform, b.Diagnostics()) << "expected textureSampleLevel call with a "
+ "texture_external to have 3 parameters, found "
+ << expr->args.size() << " parameters";
+ }
- b.Func(texture_load_external_sym, var_list, b.ty.vec4(b.ty.f32()),
- statement_list, {});
+ // TextureSampleExternal calls the gammaCorrection function, so ensure it
+ // exists.
+ if (!gamma_correction_sym.IsValid()) {
+ createGammaCorrectionFn();
+ }
+
+ if (!texture_sample_external_sym.IsValid()) {
+ texture_sample_external_sym = b.Symbols().New("textureSampleExternal");
+
+ // Emit the textureSampleExternal function.
+ ast::VariableList varList = {
+ b.Param("plane0", b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
+ b.Param("plane1", b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
+ b.Param("smp", b.ty.sampler(ast::SamplerKind::kSampler)),
+ b.Param("coord", b.ty.vec2(b.ty.f32())),
+ b.Param("params", b.ty.type_name(params_struct_sym))};
+
+ ast::StatementList statementList =
+ createTexFnExtStatementList(sem::BuiltinType::kTextureSampleLevel);
+
+ b.Func(texture_sample_external_sym, varList, b.ty.vec4(b.ty.f32()), statementList, {});
+ }
+
+ const ast::IdentifierExpression* exp = b.Expr(texture_sample_external_sym);
+ params = {plane_0_binding_param, b.Expr(syms.plane_1), ctx.Clone(expr->args[1]),
+ ctx.Clone(expr->args[2]), b.Expr(syms.params)};
+ return b.Call(exp, params);
}
- const ast::IdentifierExpression* exp = b.Expr(texture_load_external_sym);
- params = {plane_0_binding_param, b.Expr(syms.plane_1),
- ctx.Clone(expr->args[1]), b.Expr(syms.params)};
- return b.Call(exp, params);
- }
+ /// Creates the textureLoadExternal function if needed and returns a call
+ /// expression to it.
+ /// @param expr the call expression being transformed
+ /// @param syms the expanded symbols to be used in the new call
+ /// @returns a call expression to textureLoadExternal
+ const ast::CallExpression* createTexLdExt(const ast::CallExpression* expr,
+ NewBindingSymbols syms) {
+ ast::ExpressionList params;
+ const ast::Expression* plane_0_binding_param = ctx.Clone(expr->args[0]);
+
+ if (expr->args.size() != 2) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "expected textureLoad call with a texture_external "
+ "to have 2 parameters, found "
+ << expr->args.size() << " parameters";
+ }
+
+ // TextureLoadExternal calls the gammaCorrection function, so ensure it
+ // exists.
+ if (!gamma_correction_sym.IsValid()) {
+ createGammaCorrectionFn();
+ }
+
+ if (!texture_load_external_sym.IsValid()) {
+ texture_load_external_sym = b.Symbols().New("textureLoadExternal");
+
+ // Emit the textureLoadExternal function.
+ ast::VariableList var_list = {
+ b.Param("plane0", b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
+ b.Param("plane1", b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32())),
+ b.Param("coord", b.ty.vec2(b.ty.i32())),
+ b.Param("params", b.ty.type_name(params_struct_sym))};
+
+ ast::StatementList statement_list =
+ createTexFnExtStatementList(sem::BuiltinType::kTextureLoad);
+
+ b.Func(texture_load_external_sym, var_list, b.ty.vec4(b.ty.f32()), statement_list, {});
+ }
+
+ const ast::IdentifierExpression* exp = b.Expr(texture_load_external_sym);
+ params = {plane_0_binding_param, b.Expr(syms.plane_1), ctx.Clone(expr->args[1]),
+ b.Expr(syms.params)};
+ return b.Call(exp, params);
+ }
};
-MultiplanarExternalTexture::NewBindingPoints::NewBindingPoints(
- BindingsMap inputBindingsMap)
+MultiplanarExternalTexture::NewBindingPoints::NewBindingPoints(BindingsMap inputBindingsMap)
: bindings_map(std::move(inputBindingsMap)) {}
MultiplanarExternalTexture::NewBindingPoints::~NewBindingPoints() = default;
MultiplanarExternalTexture::MultiplanarExternalTexture() = default;
MultiplanarExternalTexture::~MultiplanarExternalTexture() = default;
-bool MultiplanarExternalTexture::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* ty = node->As<ast::Type>()) {
- if (program->Sem().Get<sem::ExternalTexture>(ty)) {
- return true;
- }
+bool MultiplanarExternalTexture::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* ty = node->As<ast::Type>()) {
+ if (program->Sem().Get<sem::ExternalTexture>(ty)) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
// Within this transform, an instance of a texture_external binding is unpacked
@@ -430,23 +465,21 @@ bool MultiplanarExternalTexture::ShouldRun(const Program* program,
// texture_external parameter will be transformed into a newly generated version
// of the function, which can perform the desired operation on a single RGBA
// plane or on separate Y and UV planes.
-void MultiplanarExternalTexture::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* new_binding_points = inputs.Get<NewBindingPoints>();
+void MultiplanarExternalTexture::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* new_binding_points = inputs.Get<NewBindingPoints>();
- if (!new_binding_points) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing new binding point data for " + std::string(TypeInfo().name));
- return;
- }
+ if (!new_binding_points) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform,
+ "missing new binding point data for " + std::string(TypeInfo().name));
+ return;
+ }
- State state(ctx, new_binding_points);
+ State state(ctx, new_binding_points);
- state.Process();
+ state.Process();
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.h b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.h
index ab2298ced09..88cbc981826 100644
--- a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.h
+++ b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture.h
@@ -31,12 +31,12 @@ using BindingPoint = sem::BindingPoint;
/// This struct identifies the binding groups and locations for new bindings to
/// use when transforming a texture_external instance.
struct BindingPoints {
- /// The desired binding location of the texture_2d representing plane #1 when
- /// a texture_external binding is expanded.
- BindingPoint plane_1;
- /// The desired binding location of the ExternalTextureParams uniform when a
- /// texture_external binding is expanded.
- BindingPoint params;
+ /// The desired binding location of the texture_2d representing plane #1 when
+ /// a texture_external binding is expanded.
+ BindingPoint plane_1;
+ /// The desired binding location of the ExternalTextureParams uniform when a
+ /// texture_external binding is expanded.
+ BindingPoint params;
};
/// Within the MultiplanarExternalTexture transform, each instance of a
@@ -47,52 +47,48 @@ struct BindingPoints {
/// transformed into a newly generated version of the function, which can
/// perform the desired operation on a single RGBA plane or on seperate Y and UV
/// planes.
-class MultiplanarExternalTexture
- : public Castable<MultiplanarExternalTexture, Transform> {
- public:
- /// BindingsMap is a map where the key is the binding location of a
- /// texture_external and the value is a struct containing the desired
- /// locations for new bindings expanded from the texture_external instance.
- using BindingsMap = std::unordered_map<BindingPoint, BindingPoints>;
-
- /// NewBindingPoints is consumed by the MultiplanarExternalTexture transform.
- /// Data holds information about location of each texture_external binding and
- /// which binding slots it should expand into.
- struct NewBindingPoints : public Castable<Data, transform::Data> {
- /// Constructor
- /// @param bm a map to the new binding slots to use.
- explicit NewBindingPoints(BindingsMap bm);
+class MultiplanarExternalTexture : public Castable<MultiplanarExternalTexture, Transform> {
+ public:
+ /// BindingsMap is a map where the key is the binding location of a
+ /// texture_external and the value is a struct containing the desired
+ /// locations for new bindings expanded from the texture_external instance.
+ using BindingsMap = std::unordered_map<BindingPoint, BindingPoints>;
+
+ /// NewBindingPoints is consumed by the MultiplanarExternalTexture transform.
+ /// Data holds information about location of each texture_external binding and
+ /// which binding slots it should expand into.
+ struct NewBindingPoints : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param bm a map to the new binding slots to use.
+ explicit NewBindingPoints(BindingsMap bm);
+
+ /// Destructor
+ ~NewBindingPoints() override;
+
+ /// A map of new binding points to use.
+ const BindingsMap bindings_map;
+ };
+ /// Constructor
+ MultiplanarExternalTexture();
/// Destructor
- ~NewBindingPoints() override;
-
- /// A map of new binding points to use.
- const BindingsMap bindings_map;
- };
-
- /// Constructor
- MultiplanarExternalTexture();
- /// Destructor
- ~MultiplanarExternalTexture() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- struct State;
-
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ ~MultiplanarExternalTexture() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ struct State;
+
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture_test.cc b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture_test.cc
index b0a79d61800..63d12f1c21d 100644
--- a/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/multiplanar_external_texture_test.cc
@@ -21,83 +21,82 @@ namespace {
using MultiplanarExternalTextureTest = TransformTest;
TEST_F(MultiplanarExternalTextureTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<MultiplanarExternalTexture>(src));
+ EXPECT_FALSE(ShouldRun<MultiplanarExternalTexture>(src));
}
TEST_F(MultiplanarExternalTextureTest, ShouldRunHasExternalTextureAlias) {
- auto* src = R"(
+ auto* src = R"(
type ET = texture_external;
)";
- EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
+ EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
}
TEST_F(MultiplanarExternalTextureTest, ShouldRunHasExternalTextureGlobal) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var ext_tex : texture_external;
)";
- EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
+ EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
}
TEST_F(MultiplanarExternalTextureTest, ShouldRunHasExternalTextureParam) {
- auto* src = R"(
+ auto* src = R"(
fn f(ext_tex : texture_external) {}
)";
- EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
+ EXPECT_TRUE(ShouldRun<MultiplanarExternalTexture>(src));
}
// Running the transform without passing in data for the new bindings should
// result in an error.
TEST_F(MultiplanarExternalTextureTest, ErrorNoPassedData) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var s : sampler;
@group(0) @binding(1) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy);
}
)";
- auto* expect =
- R"(error: missing new binding point data for tint::transform::MultiplanarExternalTexture)";
+ auto* expect =
+ R"(error: missing new binding point data for tint::transform::MultiplanarExternalTexture)";
- auto got = Run<MultiplanarExternalTexture>(src);
- EXPECT_EQ(expect, str(got));
+ auto got = Run<MultiplanarExternalTexture>(src);
+ EXPECT_EQ(expect, str(got));
}
// Running the transform with incorrect binding data should result in an error.
TEST_F(MultiplanarExternalTextureTest, ErrorIncorrectBindingPont) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var s : sampler;
@group(0) @binding(1) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy);
}
)";
- auto* expect =
- R"(error: missing new binding points for texture_external at binding {0,1})";
+ auto* expect = R"(error: missing new binding points for texture_external at binding {0,1})";
- DataMap data;
- // This bindings map specifies 0,0 as the location of the texture_external,
- // which is incorrect.
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ // This bindings map specifies 0,0 as the location of the texture_external,
+ // which is incorrect.
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with a textureDimensions call.
TEST_F(MultiplanarExternalTextureTest, Dimensions) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
var dim : vec2<i32>;
dim = textureDimensions(ext_tex);
@@ -105,13 +104,24 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(1) var ext_tex_plane_1 : texture_2d<f32>;
@@ -120,7 +130,7 @@ struct ExternalTextureParams {
@group(0) @binding(0) var ext_tex : texture_2d<f32>;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
var dim : vec2<i32>;
dim = textureDimensions(ext_tex);
@@ -128,17 +138,17 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with a textureDimensions call.
TEST_F(MultiplanarExternalTextureTest, Dimensions_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
var dim : vec2<i32>;
dim = textureDimensions(ext_tex);
@@ -148,20 +158,31 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var ext_tex : texture_external;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(1) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(2) var<uniform> ext_tex_params : ExternalTextureParams;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
var dim : vec2<i32>;
dim = textureDimensions(ext_tex);
@@ -171,32 +192,43 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var ext_tex : texture_2d<f32>;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Test that the transform works with a textureSampleLevel call.
TEST_F(MultiplanarExternalTextureTest, BasicTextureSampleLevel) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var s : sampler;
@group(0) @binding(1) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@@ -207,37 +239,43 @@ struct ExternalTextureParams {
@group(0) @binding(1) var ext_tex : texture_2d<f32>;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleExternal(ext_tex, ext_tex_plane_1, s, coord.xy, ext_tex_params);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Test that the transform works with a textureSampleLevel call.
TEST_F(MultiplanarExternalTextureTest, BasicTextureSampleLevel_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy);
}
@@ -246,34 +284,51 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var s : sampler;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleExternal(ext_tex, ext_tex_plane_1, s, coord.xy, ext_tex_params);
}
@@ -283,31 +338,42 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var s : sampler;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with a textureLoad call.
TEST_F(MultiplanarExternalTextureTest, BasicTextureLoad) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureLoad(ext_tex, vec2<i32>(1, 1));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(1) var ext_tex_plane_1 : texture_2d<f32>;
@@ -316,37 +382,43 @@ struct ExternalTextureParams {
@group(0) @binding(0) var ext_tex : texture_2d<f32>;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureLoadExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, coord : vec2<i32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureLoad(plane0, coord, 0);
+ color = textureLoad(plane0, coord, 0i).rgb;
+ } else {
+ color = (vec4<f32>(textureLoad(plane0, coord, 0i).r, textureLoad(plane1, coord, 0i).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureLoad(plane0, coord, 0).r - 0.0625);
- let uv = (textureLoad(plane1, coord, 0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureLoadExternal(ext_tex, ext_tex_plane_1, vec2<i32>(1, 1), ext_tex_params);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with a textureLoad call.
TEST_F(MultiplanarExternalTextureTest, BasicTextureLoad_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureLoad(ext_tex, vec2<i32>(1, 1));
}
@@ -354,34 +426,51 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var ext_tex : texture_external;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(1) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(2) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureLoadExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, coord : vec2<i32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureLoad(plane0, coord, 0);
+ color = textureLoad(plane0, coord, 0i).rgb;
+ } else {
+ color = (vec4<f32>(textureLoad(plane0, coord, 0i).r, textureLoad(plane1, coord, 0i).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureLoad(plane0, coord, 0).r - 0.0625);
- let uv = (textureLoad(plane1, coord, 0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureLoadExternal(ext_tex, ext_tex_plane_1, vec2<i32>(1, 1), ext_tex_params);
}
@@ -389,33 +478,44 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(0) var ext_tex : texture_2d<f32>;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with both a textureSampleLevel and textureLoad
// call.
TEST_F(MultiplanarExternalTextureTest, TextureSampleAndTextureLoad) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var s : sampler;
@group(0) @binding(1) var ext_tex : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy) + textureLoad(ext_tex, vec2<i32>(1, 1));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@@ -426,52 +526,57 @@ struct ExternalTextureParams {
@group(0) @binding(1) var ext_tex : texture_2d<f32>;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn textureLoadExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, coord : vec2<i32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureLoad(plane0, coord, 0);
+ color = textureLoad(plane0, coord, 0i).rgb;
+ } else {
+ color = (vec4<f32>(textureLoad(plane0, coord, 0i).r, textureLoad(plane1, coord, 0i).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureLoad(plane0, coord, 0).r - 0.0625);
- let uv = (textureLoad(plane1, coord, 0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return (textureSampleExternal(ext_tex, ext_tex_plane_1, s, coord.xy, ext_tex_params) + textureLoadExternal(ext_tex, ext_tex_plane_1, vec2<i32>(1, 1), ext_tex_params));
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with both a textureSampleLevel and textureLoad
// call.
TEST_F(MultiplanarExternalTextureTest, TextureSampleAndTextureLoad_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy) + textureLoad(ext_tex, vec2<i32>(1, 1));
}
@@ -480,48 +585,64 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(1) var ext_tex : texture_external;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn textureLoadExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, coord : vec2<i32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureLoad(plane0, coord, 0);
+ color = textureLoad(plane0, coord, 0i).rgb;
+ } else {
+ color = (vec4<f32>(textureLoad(plane0, coord, 0i).r, textureLoad(plane1, coord, 0i).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureLoad(plane0, coord, 0).r - 0.0625);
- let uv = (textureLoad(plane1, coord, 0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return (textureSampleExternal(ext_tex, ext_tex_plane_1, s, coord.xy, ext_tex_params) + textureLoadExternal(ext_tex, ext_tex_plane_1, vec2<i32>(1, 1), ext_tex_params));
}
@@ -531,35 +652,46 @@ fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
@group(0) @binding(1) var ext_tex : texture_2d<f32>;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 1}, {{0, 2}, {0, 3}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with many instances of texture_external.
TEST_F(MultiplanarExternalTextureTest, ManyTextureSampleLevel) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var s : sampler;
@group(0) @binding(1) var ext_tex : texture_external;
@group(0) @binding(2) var ext_tex_1 : texture_external;
@group(0) @binding(3) var ext_tex_2 : texture_external;
@group(1) @binding(0) var ext_tex_3 : texture_external;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return textureSampleLevel(ext_tex, s, coord.xy) + textureSampleLevel(ext_tex_1, s, coord.xy) + textureSampleLevel(ext_tex_2, s, coord.xy) + textureSampleLevel(ext_tex_3, s, coord.xy);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(4) var ext_tex_plane_1 : texture_2d<f32>;
@@ -588,42 +720,47 @@ struct ExternalTextureParams {
@group(1) @binding(0) var ext_tex_3 : texture_2d<f32>;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord : vec4<f32>) -> @location(0) vec4<f32> {
return (((textureSampleExternal(ext_tex, ext_tex_plane_1, s, coord.xy, ext_tex_params) + textureSampleExternal(ext_tex_1, ext_tex_plane_1_1, s, coord.xy, ext_tex_params_1)) + textureSampleExternal(ext_tex_2, ext_tex_plane_1_2, s, coord.xy, ext_tex_params_2)) + textureSampleExternal(ext_tex_3, ext_tex_plane_1_3, s, coord.xy, ext_tex_params_3));
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 1}, {{0, 4}, {0, 5}}},
- {{0, 2}, {{0, 6}, {0, 7}}},
- {{0, 3}, {{0, 8}, {0, 9}}},
- {{1, 0}, {{1, 1}, {1, 2}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 1}, {{0, 4}, {0, 5}}},
+ {{0, 2}, {{0, 6}, {0, 7}}},
+ {{0, 3}, {{0, 8}, {0, 9}}},
+ {{1, 0}, {{1, 1}, {1, 2}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the texture_external passed as a function parameter produces the
// correct output.
TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParam) {
- auto* src = R"(
+ auto* src = R"(
fn f(t : texture_external, s : sampler) {
textureSampleLevel(t, s, vec2<f32>(1.0, 2.0));
}
@@ -631,37 +768,54 @@ fn f(t : texture_external, s : sampler) {
@group(0) @binding(0) var ext_tex : texture_external;
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, smp);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -672,26 +826,24 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the texture_external passed as a function parameter produces the
// correct output.
-TEST_F(MultiplanarExternalTextureTest,
- ExternalTexturePassedAsParam_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParam_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn main() {
f(ext_tex, smp);
}
@@ -704,36 +856,53 @@ fn f(t : texture_external, s : sampler) {
@group(0) @binding(1) var smp : sampler;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -744,19 +913,18 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1
@group(0) @binding(1) var smp : sampler;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the texture_external passed as a parameter not in the first
// position produces the correct output.
TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsSecondParam) {
- auto* src = R"(
+ auto* src = R"(
fn f(s : sampler, t : texture_external) {
textureSampleLevel(t, s, vec2<f32>(1.0, 2.0));
}
@@ -764,37 +932,54 @@ fn f(s : sampler, t : texture_external) {
@group(0) @binding(0) var ext_tex : texture_external;
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(smp, ext_tex);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(s : sampler, t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams) {
@@ -805,24 +990,23 @@ fn f(s : sampler, t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(smp, ext_tex, ext_tex_plane_1, ext_tex_params);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that multiple texture_external params passed to a function produces the
// correct output.
TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParamMultiple) {
- auto* src = R"(
+ auto* src = R"(
fn f(t : texture_external, s : sampler, t2 : texture_external) {
textureSampleLevel(t, s, vec2<f32>(1.0, 2.0));
textureSampleLevel(t2, s, vec2<f32>(1.0, 2.0));
@@ -832,19 +1016,30 @@ fn f(t : texture_external, s : sampler, t2 : texture_external) {
@group(0) @binding(1) var smp : sampler;
@group(0) @binding(2) var ext_tex2 : texture_external;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, smp, ext_tex2);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(3) var ext_tex_plane_1 : texture_2d<f32>;
@@ -855,18 +1050,24 @@ struct ExternalTextureParams {
@group(0) @binding(6) var<uniform> ext_tex_params_1 : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2 : ExternalTextureParams, s : sampler, t2 : texture_2d<f32>, ext_tex_plane_1_3 : texture_2d<f32>, ext_tex_params_3 : ExternalTextureParams) {
@@ -880,27 +1081,25 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2
@group(0) @binding(2) var ext_tex2 : texture_2d<f32>;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp, ext_tex2, ext_tex_plane_1_1, ext_tex_params_1);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 3}, {0, 4}}},
- {{0, 2}, {{0, 5}, {0, 6}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 3}, {0, 4}}},
+ {{0, 2}, {{0, 5}, {0, 6}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that multiple texture_external params passed to a function produces the
// correct output.
-TEST_F(MultiplanarExternalTextureTest,
- ExternalTexturePassedAsParamMultiple_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParamMultiple_OutOfOrder) {
+ auto* src = R"(
+@fragment
fn main() {
f(ext_tex, smp, ext_tex2);
}
@@ -916,13 +1115,24 @@ fn f(t : texture_external, s : sampler, t2 : texture_external) {
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(3) var ext_tex_plane_1 : texture_2d<f32>;
@@ -933,23 +1143,29 @@ struct ExternalTextureParams {
@group(0) @binding(6) var<uniform> ext_tex_params_1 : ExternalTextureParams;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp, ext_tex2, ext_tex_plane_1_1, ext_tex_params_1);
}
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2 : ExternalTextureParams, s : sampler, t2 : texture_2d<f32>, ext_tex_plane_1_3 : texture_2d<f32>, ext_tex_params_3 : ExternalTextureParams) {
@@ -963,20 +1179,19 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2
@group(0) @binding(2) var ext_tex2 : texture_2d<f32>;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 3}, {0, 4}}},
- {{0, 2}, {{0, 5}, {0, 6}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 3}, {0, 4}}},
+ {{0, 2}, {{0, 5}, {0, 6}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the texture_external passed to as a parameter to multiple
// functions produces the correct output.
TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParamNested) {
- auto* src = R"(
+ auto* src = R"(
fn nested(t : texture_external, s : sampler) {
textureSampleLevel(t, s, vec2<f32>(1.0, 2.0));
}
@@ -988,37 +1203,54 @@ fn f(t : texture_external, s : sampler) {
@group(0) @binding(0) var ext_tex : texture_external;
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, smp);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn nested(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -1033,25 +1265,23 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the texture_external passed to as a parameter to multiple
// functions produces the correct output.
-TEST_F(MultiplanarExternalTextureTest,
- ExternalTexturePassedAsParamNested_OutOfOrder) {
- auto* src = R"(
+TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParamNested_OutOfOrder) {
+ auto* src = R"(
fn nested(t : texture_external, s : sampler) {
textureSampleLevel(t, s, vec2<f32>(1.0, 2.0));
}
@@ -1063,37 +1293,54 @@ fn f(t : texture_external, s : sampler) {
@group(0) @binding(0) var ext_tex : texture_external;
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, smp);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn nested(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -1108,37 +1355,46 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_2 : texture_2d<f32>, ext_tex_params_2
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the transform works with a function using an external texture,
// even if there's no external texture declared at module scope.
-TEST_F(MultiplanarExternalTextureTest,
- ExternalTexturePassedAsParamWithoutGlobalDecl) {
- auto* src = R"(
+TEST_F(MultiplanarExternalTextureTest, ExternalTexturePassedAsParamWithoutGlobalDecl) {
+ auto* src = R"(
fn f(ext_tex : texture_external) -> vec2<i32> {
return textureDimensions(ext_tex);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
fn f(ext_tex : texture_2d<f32>, ext_tex_plane_1 : texture_2d<f32>, ext_tex_params : ExternalTextureParams) -> vec2<i32> {
@@ -1146,16 +1402,16 @@ fn f(ext_tex : texture_2d<f32>, ext_tex_plane_1 : texture_2d<f32>, ext_tex_param
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(
+ MultiplanarExternalTexture::BindingsMap{{{0, 0}, {{0, 1}, {0, 2}}}});
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the the transform handles aliases to external textures
TEST_F(MultiplanarExternalTextureTest, ExternalTextureAlias) {
- auto* src = R"(
+ auto* src = R"(
type ET = texture_external;
fn f(t : ET, s : sampler) {
@@ -1165,19 +1421,30 @@ fn f(t : ET, s : sampler) {
@group(0) @binding(0) var ext_tex : ET;
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, smp);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@@ -1186,18 +1453,24 @@ struct ExternalTextureParams {
type ET = texture_external;
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -1208,24 +1481,23 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1
@group(0) @binding(1) var smp : sampler;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
// Tests that the the transform handles aliases to external textures
TEST_F(MultiplanarExternalTextureTest, ExternalTextureAlias_OutOfOrder) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main() {
f(ext_tex, smp);
}
@@ -1240,36 +1512,53 @@ fn f(t : ET, s : sampler) {
type ET = texture_external;
)";
- auto* expect = R"(
+ auto* expect = R"(
+struct GammaTransferParams {
+ G : f32,
+ A : f32,
+ B : f32,
+ C : f32,
+ D : f32,
+ E : f32,
+ F : f32,
+ padding : u32,
+}
+
struct ExternalTextureParams {
numPlanes : u32,
- vr : f32,
- ug : f32,
- vg : f32,
- ub : f32,
+ yuvToRgbConversionMatrix : mat3x4<f32>,
+ gammaDecodeParams : GammaTransferParams,
+ gammaEncodeParams : GammaTransferParams,
+ gamutConversionMatrix : mat3x3<f32>,
}
@group(0) @binding(2) var ext_tex_plane_1 : texture_2d<f32>;
@group(0) @binding(3) var<uniform> ext_tex_params : ExternalTextureParams;
-@stage(fragment)
+@fragment
fn main() {
f(ext_tex, ext_tex_plane_1, ext_tex_params, smp);
}
+fn gammaCorrection(v : vec3<f32>, params : GammaTransferParams) -> vec3<f32> {
+ let cond = (abs(v) < vec3<f32>(params.D));
+ let t = (sign(v) * ((params.C * abs(v)) + params.F));
+ let f = (sign(v) * (pow(((params.A * abs(v)) + params.B), vec3<f32>(params.G)) + params.E));
+ return select(f, t, cond);
+}
+
fn textureSampleExternal(plane0 : texture_2d<f32>, plane1 : texture_2d<f32>, smp : sampler, coord : vec2<f32>, params : ExternalTextureParams) -> vec4<f32> {
+ var color : vec3<f32>;
if ((params.numPlanes == 1u)) {
- return textureSampleLevel(plane0, smp, coord, 0.0);
+ color = textureSampleLevel(plane0, smp, coord, 0.0f).rgb;
+ } else {
+ color = (vec4<f32>(textureSampleLevel(plane0, smp, coord, 0.0f).r, textureSampleLevel(plane1, smp, coord, 0.0f).rg, 1.0f) * params.yuvToRgbConversionMatrix);
}
- let y = (textureSampleLevel(plane0, smp, coord, 0.0).r - 0.0625);
- let uv = (textureSampleLevel(plane1, smp, coord, 0.0).rg - 0.5);
- let u = uv.x;
- let v = uv.y;
- let r = ((1.164000034 * y) + (params.vr * v));
- let g = (((1.164000034 * y) - (params.ug * u)) - (params.vg * v));
- let b = ((1.164000034 * y) + (params.ub * u));
- return vec4<f32>(r, g, b, 1.0);
+ color = gammaCorrection(color, params.gammaDecodeParams);
+ color = (params.gamutConversionMatrix * color);
+ color = gammaCorrection(color, params.gammaEncodeParams);
+ return vec4<f32>(color, 1.0f);
}
fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1 : ExternalTextureParams, s : sampler) {
@@ -1282,13 +1571,12 @@ fn f(t : texture_2d<f32>, ext_tex_plane_1_1 : texture_2d<f32>, ext_tex_params_1
type ET = texture_external;
)";
- DataMap data;
- data.Add<MultiplanarExternalTexture::NewBindingPoints>(
- MultiplanarExternalTexture::BindingsMap{
- {{0, 0}, {{0, 2}, {0, 3}}},
- });
- auto got = Run<MultiplanarExternalTexture>(src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<MultiplanarExternalTexture::NewBindingPoints>(MultiplanarExternalTexture::BindingsMap{
+ {{0, 0}, {{0, 2}, {0, 3}}},
+ });
+ auto got = Run<MultiplanarExternalTexture>(src, data);
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.cc b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.cc
index 72f7fb82eb8..0bb1518544d 100644
--- a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.cc
+++ b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.cc
@@ -32,135 +32,147 @@ namespace {
/// Accessor describes the identifiers used in a member accessor that is being
/// used to retrieve the num_workgroups builtin from a parameter.
struct Accessor {
- Symbol param;
- Symbol member;
-
- /// Equality operator
- bool operator==(const Accessor& other) const {
- return param == other.param && member == other.member;
- }
- /// Hash function
- struct Hasher {
- size_t operator()(const Accessor& a) const {
- return utils::Hash(a.param, a.member);
+ Symbol param;
+ Symbol member;
+
+ /// Equality operator
+ bool operator==(const Accessor& other) const {
+ return param == other.param && member == other.member;
}
- };
+ /// Hash function
+ struct Hasher {
+ size_t operator()(const Accessor& a) const { return utils::Hash(a.param, a.member); }
+ };
};
} // namespace
NumWorkgroupsFromUniform::NumWorkgroupsFromUniform() = default;
NumWorkgroupsFromUniform::~NumWorkgroupsFromUniform() = default;
-bool NumWorkgroupsFromUniform::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* attr = node->As<ast::BuiltinAttribute>()) {
- if (attr->builtin == ast::Builtin::kNumWorkgroups) {
- return true;
- }
+bool NumWorkgroupsFromUniform::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* attr = node->As<ast::BuiltinAttribute>()) {
+ if (attr->builtin == ast::Builtin::kNumWorkgroups) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
-void NumWorkgroupsFromUniform::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* cfg = inputs.Get<Config>();
- if (cfg == nullptr) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
- return;
- }
-
- const char* kNumWorkgroupsMemberName = "num_workgroups";
-
- // Find all entry point parameters that declare the num_workgroups builtin.
- std::unordered_set<Accessor, Accessor::Hasher> to_replace;
- for (auto* func : ctx.src->AST().Functions()) {
- // num_workgroups is only valid for compute stages.
- if (func->PipelineStage() != ast::PipelineStage::kCompute) {
- continue;
+void NumWorkgroupsFromUniform::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* cfg = inputs.Get<Config>();
+ if (cfg == nullptr) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+ return;
}
- for (auto* param : ctx.src->Sem().Get(func)->Parameters()) {
- // Because the CanonicalizeEntryPointIO transform has been run, builtins
- // will only appear as struct members.
- auto* str = param->Type()->As<sem::Struct>();
- if (!str) {
- continue;
- }
-
- for (auto* member : str->Members()) {
- auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
- member->Declaration()->attributes);
- if (!builtin || builtin->builtin != ast::Builtin::kNumWorkgroups) {
- continue;
+ const char* kNumWorkgroupsMemberName = "num_workgroups";
+
+ // Find all entry point parameters that declare the num_workgroups builtin.
+ std::unordered_set<Accessor, Accessor::Hasher> to_replace;
+ for (auto* func : ctx.src->AST().Functions()) {
+ // num_workgroups is only valid for compute stages.
+ if (func->PipelineStage() != ast::PipelineStage::kCompute) {
+ continue;
}
- // Capture the symbols that would be used to access this member, which
- // we will replace later. We currently have no way to get from the
- // parameter directly to the member accessor expressions that use it.
- to_replace.insert(
- {param->Declaration()->symbol, member->Declaration()->symbol});
-
- // Remove the struct member.
- // The CanonicalizeEntryPointIO transform will have generated this
- // struct uniquely for this particular entry point, so we know that
- // there will be no other uses of this struct in the module and that we
- // can safely modify it here.
- ctx.Remove(str->Declaration()->members, member->Declaration());
-
- // If this is the only member, remove the struct and parameter too.
- if (str->Members().size() == 1) {
- ctx.Remove(func->params, param->Declaration());
- ctx.Remove(ctx.src->AST().GlobalDeclarations(), str->Declaration());
+ for (auto* param : ctx.src->Sem().Get(func)->Parameters()) {
+ // Because the CanonicalizeEntryPointIO transform has been run, builtins
+ // will only appear as struct members.
+ auto* str = param->Type()->As<sem::Struct>();
+ if (!str) {
+ continue;
+ }
+
+ for (auto* member : str->Members()) {
+ auto* builtin =
+ ast::GetAttribute<ast::BuiltinAttribute>(member->Declaration()->attributes);
+ if (!builtin || builtin->builtin != ast::Builtin::kNumWorkgroups) {
+ continue;
+ }
+
+ // Capture the symbols that would be used to access this member, which
+ // we will replace later. We currently have no way to get from the
+ // parameter directly to the member accessor expressions that use it.
+ to_replace.insert({param->Declaration()->symbol, member->Declaration()->symbol});
+
+ // Remove the struct member.
+ // The CanonicalizeEntryPointIO transform will have generated this
+ // struct uniquely for this particular entry point, so we know that
+ // there will be no other uses of this struct in the module and that we
+ // can safely modify it here.
+ ctx.Remove(str->Declaration()->members, member->Declaration());
+
+ // If this is the only member, remove the struct and parameter too.
+ if (str->Members().size() == 1) {
+ ctx.Remove(func->params, param->Declaration());
+ ctx.Remove(ctx.src->AST().GlobalDeclarations(), str->Declaration());
+ }
+ }
}
- }
- }
- }
-
- // Get (or create, on first call) the uniform buffer that will receive the
- // number of workgroups.
- const ast::Variable* num_workgroups_ubo = nullptr;
- auto get_ubo = [&]() {
- if (!num_workgroups_ubo) {
- auto* num_workgroups_struct = ctx.dst->Structure(
- ctx.dst->Sym(),
- {ctx.dst->Member(kNumWorkgroupsMemberName,
- ctx.dst->ty.vec3(ctx.dst->ty.u32()))});
- num_workgroups_ubo = ctx.dst->Global(
- ctx.dst->Sym(), ctx.dst->ty.Of(num_workgroups_struct),
- ast::StorageClass::kUniform,
- ast::AttributeList{ctx.dst->GroupAndBinding(
- cfg->ubo_binding.group, cfg->ubo_binding.binding)});
- }
- return num_workgroups_ubo;
- };
-
- // Now replace all the places where the builtins are accessed with the value
- // loaded from the uniform buffer.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- auto* accessor = node->As<ast::MemberAccessorExpression>();
- if (!accessor) {
- continue;
- }
- auto* ident = accessor->structure->As<ast::IdentifierExpression>();
- if (!ident) {
- continue;
}
- if (to_replace.count({ident->symbol, accessor->member->symbol})) {
- ctx.Replace(accessor, ctx.dst->MemberAccessor(get_ubo()->symbol,
- kNumWorkgroupsMemberName));
+ // Get (or create, on first call) the uniform buffer that will receive the
+ // number of workgroups.
+ const ast::Variable* num_workgroups_ubo = nullptr;
+ auto get_ubo = [&]() {
+ if (!num_workgroups_ubo) {
+ auto* num_workgroups_struct = ctx.dst->Structure(
+ ctx.dst->Sym(),
+ {ctx.dst->Member(kNumWorkgroupsMemberName, ctx.dst->ty.vec3(ctx.dst->ty.u32()))});
+
+ uint32_t group, binding;
+ if (cfg->ubo_binding.has_value()) {
+ // If cfg->ubo_binding holds a value, use the specified binding point.
+ group = cfg->ubo_binding->group;
+ binding = cfg->ubo_binding->binding;
+ } else {
+ // If cfg->ubo_binding holds no value, use the binding 0 of the largest used group
+ // plus 1, or group 0 if no resource bound.
+ group = 0;
+
+ for (auto* var : ctx.src->AST().GlobalVariables()) {
+ if (auto binding_point = var->BindingPoint()) {
+ if (binding_point.group->value >= group) {
+ group = binding_point.group->value + 1;
+ }
+ }
+ }
+
+ binding = 0;
+ }
+
+ num_workgroups_ubo = ctx.dst->Global(
+ ctx.dst->Sym(), ctx.dst->ty.Of(num_workgroups_struct), ast::StorageClass::kUniform,
+ ast::AttributeList{ctx.dst->GroupAndBinding(group, binding)});
+ }
+ return num_workgroups_ubo;
+ };
+
+ // Now replace all the places where the builtins are accessed with the value
+ // loaded from the uniform buffer.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ auto* accessor = node->As<ast::MemberAccessorExpression>();
+ if (!accessor) {
+ continue;
+ }
+ auto* ident = accessor->structure->As<ast::IdentifierExpression>();
+ if (!ident) {
+ continue;
+ }
+
+ if (to_replace.count({ident->symbol, accessor->member->symbol})) {
+ ctx.Replace(accessor,
+ ctx.dst->MemberAccessor(get_ubo()->symbol, kNumWorkgroupsMemberName));
+ }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
-NumWorkgroupsFromUniform::Config::Config(sem::BindingPoint ubo_bp)
+NumWorkgroupsFromUniform::Config::Config(std::optional<sem::BindingPoint> ubo_bp)
: ubo_binding(ubo_bp) {}
NumWorkgroupsFromUniform::Config::Config(const Config&) = default;
NumWorkgroupsFromUniform::Config::~Config() = default;
diff --git a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.h b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.h
index e4cf20ed97c..9f0b6c1a7e5 100644
--- a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.h
+++ b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform.h
@@ -15,6 +15,8 @@
#ifndef SRC_TINT_TRANSFORM_NUM_WORKGROUPS_FROM_UNIFORM_H_
#define SRC_TINT_TRANSFORM_NUM_WORKGROUPS_FROM_UNIFORM_H_
+#include <optional> // NOLINT(build/include_order)
+
#include "src/tint/sem/binding_point.h"
#include "src/tint/transform/transform.h"
@@ -42,46 +44,47 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * CanonicalizeEntryPointIO
-class NumWorkgroupsFromUniform
- : public Castable<NumWorkgroupsFromUniform, Transform> {
- public:
- /// Constructor
- NumWorkgroupsFromUniform();
- /// Destructor
- ~NumWorkgroupsFromUniform() override;
-
- /// Configuration options for the NumWorkgroupsFromUniform transform.
- struct Config : public Castable<Data, transform::Data> {
+class NumWorkgroupsFromUniform : public Castable<NumWorkgroupsFromUniform, Transform> {
+ public:
/// Constructor
- /// @param ubo_bp the binding point to use for the generated uniform buffer.
- explicit Config(sem::BindingPoint ubo_bp);
+ NumWorkgroupsFromUniform();
+ /// Destructor
+ ~NumWorkgroupsFromUniform() override;
- /// Copy constructor
- Config(const Config&);
+ /// Configuration options for the NumWorkgroupsFromUniform transform.
+ struct Config : public Castable<Data, transform::Data> {
+ /// Constructor
+ /// @param ubo_bp the binding point to use for the generated uniform buffer. If ubo_bp
+ /// contains no value, a free binding point will be used to ensure the generated program is
+ /// valid. Specifically, binding 0 of the largest used group plus 1 is used if at least one
+ /// resource is bound, otherwise group 0 binding 0 is used.
+ explicit Config(std::optional<sem::BindingPoint> ubo_bp);
- /// Destructor
- ~Config() override;
+ /// Copy constructor
+ Config(const Config&);
+
+ /// Destructor
+ ~Config() override;
- /// The binding point to use for the generated uniform buffer.
- sem::BindingPoint ubo_binding;
- };
+ /// The binding point to use for the generated uniform buffer. If ubo_bp contains no value,
+ /// a free binding point will be used. Specifically, binding 0 of the largest used group
+ /// plus 1 is used if at least one resource is bound, otherwise group 0 binding 0 is used.
+ std::optional<sem::BindingPoint> ubo_binding;
+ };
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform_test.cc b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform_test.cc
index 734d11be840..8562c01d2a1 100644
--- a/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/num_workgroups_from_uniform_test.cc
@@ -26,44 +26,42 @@ namespace {
using NumWorkgroupsFromUniformTest = TransformTest;
TEST_F(NumWorkgroupsFromUniformTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<NumWorkgroupsFromUniform>(src));
+ EXPECT_FALSE(ShouldRun<NumWorkgroupsFromUniform>(src));
}
TEST_F(NumWorkgroupsFromUniformTest, ShouldRunHasNumWorkgroups) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main(@builtin(num_workgroups) num_wgs : vec3<u32>) {
}
)";
- EXPECT_TRUE(ShouldRun<NumWorkgroupsFromUniform>(src));
+ EXPECT_TRUE(ShouldRun<NumWorkgroupsFromUniform>(src));
}
TEST_F(NumWorkgroupsFromUniformTest, Error_MissingTransformData) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main(@builtin(num_workgroups) num_wgs : vec3<u32>) {
}
)";
- auto* expect =
- "error: missing transform data for "
- "tint::transform::NumWorkgroupsFromUniform";
+ auto* expect =
+ "error: missing transform data for "
+ "tint::transform::NumWorkgroupsFromUniform";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, Basic) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main(@builtin(num_workgroups) num_wgs : vec3<u32>) {
let groups_x = num_wgs.x;
let groups_y = num_wgs.y;
@@ -71,7 +69,7 @@ fn main(@builtin(num_workgroups) num_wgs : vec3<u32>) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
num_workgroups : vec3<u32>,
}
@@ -84,28 +82,26 @@ fn main_inner(num_wgs : vec3<u32>) {
let groups_z = num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
main_inner(tint_symbol_3.num_workgroups);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, StructOnlyMember) {
- auto* src = R"(
+ auto* src = R"(
struct Builtins {
@builtin(num_workgroups) num_wgs : vec3<u32>,
};
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(in : Builtins) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
@@ -113,7 +109,7 @@ fn main(in : Builtins) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
num_workgroups : vec3<u32>,
}
@@ -130,24 +126,22 @@ fn main_inner(in : Builtins) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
main_inner(Builtins(tint_symbol_3.num_workgroups));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, StructOnlyMember_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main(in : Builtins) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
@@ -159,7 +153,7 @@ struct Builtins {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
num_workgroups : vec3<u32>,
}
@@ -172,7 +166,7 @@ fn main_inner(in : Builtins) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
main_inner(Builtins(tint_symbol_3.num_workgroups));
}
@@ -182,24 +176,22 @@ struct Builtins {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, StructMultipleMembers) {
- auto* src = R"(
+ auto* src = R"(
struct Builtins {
@builtin(global_invocation_id) gid : vec3<u32>,
@builtin(num_workgroups) num_wgs : vec3<u32>,
@builtin(workgroup_id) wgid : vec3<u32>,
};
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(in : Builtins) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
@@ -207,7 +199,7 @@ fn main(in : Builtins) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
num_workgroups : vec3<u32>,
}
@@ -233,24 +225,22 @@ fn main_inner(in : Builtins) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(tint_symbol : tint_symbol_1) {
main_inner(Builtins(tint_symbol.gid, tint_symbol_3.num_workgroups, tint_symbol.wgid));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, StructMultipleMembers_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main(in : Builtins) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
@@ -265,7 +255,7 @@ struct Builtins {
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_2 {
num_workgroups : vec3<u32>,
}
@@ -285,7 +275,7 @@ fn main_inner(in : Builtins) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(tint_symbol : tint_symbol_1) {
main_inner(Builtins(tint_symbol.gid, tint_symbol_3.num_workgroups, tint_symbol.wgid));
}
@@ -297,17 +287,15 @@ struct Builtins {
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, MultipleEntryPoints) {
- auto* src = R"(
+ auto* src = R"(
struct Builtins1 {
@builtin(num_workgroups) num_wgs : vec3<u32>,
};
@@ -318,21 +306,21 @@ struct Builtins2 {
@builtin(workgroup_id) wgid : vec3<u32>,
};
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main1(in : Builtins1) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main2(in : Builtins2) {
let groups_x = in.num_wgs.x;
let groups_y = in.num_wgs.y;
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main3(@builtin(num_workgroups) num_wgs : vec3<u32>) {
let groups_x = num_wgs.x;
let groups_y = num_wgs.y;
@@ -340,7 +328,7 @@ fn main3(@builtin(num_workgroups) num_wgs : vec3<u32>) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_symbol_6 {
num_workgroups : vec3<u32>,
}
@@ -363,7 +351,7 @@ fn main1_inner(in : Builtins1) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main1() {
main1_inner(Builtins1(tint_symbol_7.num_workgroups));
}
@@ -381,7 +369,7 @@ fn main2_inner(in : Builtins2) {
let groups_z = in.num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main2(tint_symbol_2 : tint_symbol_3) {
main2_inner(Builtins2(tint_symbol_2.gid, tint_symbol_7.num_workgroups, tint_symbol_2.wgid));
}
@@ -392,34 +380,32 @@ fn main3_inner(num_wgs : vec3<u32>) {
let groups_z = num_wgs.z;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main3() {
main3_inner(tint_symbol_7.num_workgroups);
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
TEST_F(NumWorkgroupsFromUniformTest, NoUsages) {
- auto* src = R"(
+ auto* src = R"(
struct Builtins {
@builtin(global_invocation_id) gid : vec3<u32>,
@builtin(workgroup_id) wgid : vec3<u32>,
};
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(in : Builtins) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct Builtins {
gid : vec3<u32>,
wgid : vec3<u32>,
@@ -435,19 +421,273 @@ struct tint_symbol_1 {
fn main_inner(in : Builtins) {
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main(tint_symbol : tint_symbol_1) {
main_inner(Builtins(tint_symbol.gid, tint_symbol.wgid));
}
)";
- DataMap data;
- data.Add<CanonicalizeEntryPointIO::Config>(
- CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
- auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(
- src, data);
- EXPECT_EQ(expect, str(got));
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<NumWorkgroupsFromUniform::Config>(sem::BindingPoint{0, 30u});
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
+}
+
+// Test that group 0 binding 0 is used if no bound resource in the program and binding point is not
+// specified in NumWorkgroupsFromUniform::Config.
+TEST_F(NumWorkgroupsFromUniformTest, UnspecifiedBindingPoint_NoResourceBound) {
+ auto* src = R"(
+struct Builtins1 {
+ @builtin(num_workgroups) num_wgs : vec3<u32>,
+};
+
+struct Builtins2 {
+ @builtin(global_invocation_id) gid : vec3<u32>,
+ @builtin(num_workgroups) num_wgs : vec3<u32>,
+ @builtin(workgroup_id) wgid : vec3<u32>,
+};
+
+@compute @workgroup_size(1)
+fn main1(in : Builtins1) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main2(in : Builtins2) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main3(@builtin(num_workgroups) num_wgs : vec3<u32>) {
+ let groups_x = num_wgs.x;
+ let groups_y = num_wgs.y;
+ let groups_z = num_wgs.z;
+}
+)";
+
+ auto* expect = R"(
+struct tint_symbol_6 {
+ num_workgroups : vec3<u32>,
+}
+
+@group(0) @binding(0) var<uniform> tint_symbol_7 : tint_symbol_6;
+
+struct Builtins1 {
+ num_wgs : vec3<u32>,
+}
+
+struct Builtins2 {
+ gid : vec3<u32>,
+ num_wgs : vec3<u32>,
+ wgid : vec3<u32>,
+}
+
+fn main1_inner(in : Builtins1) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main1() {
+ main1_inner(Builtins1(tint_symbol_7.num_workgroups));
+}
+
+struct tint_symbol_3 {
+ @builtin(global_invocation_id)
+ gid : vec3<u32>,
+ @builtin(workgroup_id)
+ wgid : vec3<u32>,
+}
+
+fn main2_inner(in : Builtins2) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main2(tint_symbol_2 : tint_symbol_3) {
+ main2_inner(Builtins2(tint_symbol_2.gid, tint_symbol_7.num_workgroups, tint_symbol_2.wgid));
+}
+
+fn main3_inner(num_wgs : vec3<u32>) {
+ let groups_x = num_wgs.x;
+ let groups_y = num_wgs.y;
+ let groups_z = num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main3() {
+ main3_inner(tint_symbol_7.num_workgroups);
+}
+)";
+
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ // Make binding point unspecified.
+ data.Add<NumWorkgroupsFromUniform::Config>(std::nullopt);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
+}
+
+// Test that binding 0 of the largest used group plus 1 is used if at least one resource is bound in
+// the program and binding point is not specified in NumWorkgroupsFromUniform::Config.
+TEST_F(NumWorkgroupsFromUniformTest, UnspecifiedBindingPoint_MultipleResourceBound) {
+ auto* src = R"(
+struct Builtins1 {
+ @builtin(num_workgroups) num_wgs : vec3<u32>,
+};
+
+struct Builtins2 {
+ @builtin(global_invocation_id) gid : vec3<u32>,
+ @builtin(num_workgroups) num_wgs : vec3<u32>,
+ @builtin(workgroup_id) wgid : vec3<u32>,
+};
+
+struct S0 {
+ @size(4)
+ m0 : u32,
+ m1 : array<u32>,
+};
+
+struct S1 {
+ @size(4)
+ m0 : u32,
+ m1 : array<u32, 6>,
+};
+
+@group(0) @binding(0) var g2 : texture_2d<f32>;
+@group(1) @binding(0) var g3 : texture_depth_2d;
+@group(1) @binding(1) var g4 : texture_storage_2d<rg32float, write>;
+@group(3) @binding(0) var g5 : texture_depth_cube_array;
+@group(4) @binding(0) var g6 : texture_external;
+
+@group(0) @binding(1) var<storage, write> g8 : S0;
+@group(1) @binding(3) var<storage, read> g9 : S0;
+@group(3) @binding(2) var<storage, read_write> g10 : S0;
+
+@compute @workgroup_size(1)
+fn main1(in : Builtins1) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+ g8.m0 = 1u;
+}
+
+@compute @workgroup_size(1)
+fn main2(in : Builtins2) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main3(@builtin(num_workgroups) num_wgs : vec3<u32>) {
+ let groups_x = num_wgs.x;
+ let groups_y = num_wgs.y;
+ let groups_z = num_wgs.z;
+}
+)";
+
+ auto* expect = R"(
+struct tint_symbol_6 {
+ num_workgroups : vec3<u32>,
+}
+
+@group(5) @binding(0) var<uniform> tint_symbol_7 : tint_symbol_6;
+
+struct Builtins1 {
+ num_wgs : vec3<u32>,
+}
+
+struct Builtins2 {
+ gid : vec3<u32>,
+ num_wgs : vec3<u32>,
+ wgid : vec3<u32>,
+}
+
+struct S0 {
+ @size(4)
+ m0 : u32,
+ m1 : array<u32>,
+}
+
+struct S1 {
+ @size(4)
+ m0 : u32,
+ m1 : array<u32, 6>,
+}
+
+@group(0) @binding(0) var g2 : texture_2d<f32>;
+
+@group(1) @binding(0) var g3 : texture_depth_2d;
+
+@group(1) @binding(1) var g4 : texture_storage_2d<rg32float, write>;
+
+@group(3) @binding(0) var g5 : texture_depth_cube_array;
+
+@group(4) @binding(0) var g6 : texture_external;
+
+@group(0) @binding(1) var<storage, write> g8 : S0;
+
+@group(1) @binding(3) var<storage, read> g9 : S0;
+
+@group(3) @binding(2) var<storage, read_write> g10 : S0;
+
+fn main1_inner(in : Builtins1) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+ g8.m0 = 1u;
+}
+
+@compute @workgroup_size(1)
+fn main1() {
+ main1_inner(Builtins1(tint_symbol_7.num_workgroups));
+}
+
+struct tint_symbol_3 {
+ @builtin(global_invocation_id)
+ gid : vec3<u32>,
+ @builtin(workgroup_id)
+ wgid : vec3<u32>,
+}
+
+fn main2_inner(in : Builtins2) {
+ let groups_x = in.num_wgs.x;
+ let groups_y = in.num_wgs.y;
+ let groups_z = in.num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main2(tint_symbol_2 : tint_symbol_3) {
+ main2_inner(Builtins2(tint_symbol_2.gid, tint_symbol_7.num_workgroups, tint_symbol_2.wgid));
+}
+
+fn main3_inner(num_wgs : vec3<u32>) {
+ let groups_x = num_wgs.x;
+ let groups_y = num_wgs.y;
+ let groups_z = num_wgs.z;
+}
+
+@compute @workgroup_size(1)
+fn main3() {
+ main3_inner(tint_symbol_7.num_workgroups);
+}
+)";
+
+ DataMap data;
+ data.Add<CanonicalizeEntryPointIO::Config>(CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ // Make binding point unspecified.
+ data.Add<NumWorkgroupsFromUniform::Config>(std::nullopt);
+ auto got = Run<Unshadow, CanonicalizeEntryPointIO, NumWorkgroupsFromUniform>(src, data);
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.cc b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.cc
index a60dd6b6eec..6e0ba55ccc0 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.cc
+++ b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.cc
@@ -27,57 +27,55 @@ PromoteInitializersToConstVar::PromoteInitializersToConstVar() = default;
PromoteInitializersToConstVar::~PromoteInitializersToConstVar() = default;
-void PromoteInitializersToConstVar::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- HoistToDeclBefore hoist_to_decl_before(ctx);
+void PromoteInitializersToConstVar::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ HoistToDeclBefore hoist_to_decl_before(ctx);
- // Hoists array and structure initializers to a constant variable, declared
- // just before the statement of usage.
- auto type_ctor_to_let = [&](const ast::CallExpression* expr) {
- auto* ctor = ctx.src->Sem().Get(expr);
- if (!ctor->Target()->Is<sem::TypeConstructor>()) {
- return true;
- }
- auto* sem_stmt = ctor->Stmt();
- if (!sem_stmt) {
- // Expression is outside of a statement. This usually means the
- // expression is part of a global (module-scope) constant declaration.
- // These must be constexpr, and so cannot contain the type of
- // expressions that must be sanitized.
- return true;
- }
+ // Hoists array and structure initializers to a constant variable, declared
+ // just before the statement of usage.
+ auto type_ctor_to_let = [&](const ast::CallExpression* expr) {
+ auto* ctor = ctx.src->Sem().Get(expr)->UnwrapMaterialize()->As<sem::Call>();
+ if (!ctor->Target()->Is<sem::TypeConstructor>()) {
+ return true;
+ }
+ auto* sem_stmt = ctor->Stmt();
+ if (!sem_stmt) {
+ // Expression is outside of a statement. This usually means the
+ // expression is part of a global (module-scope) constant declaration.
+ // These must be constexpr, and so cannot contain the type of
+ // expressions that must be sanitized.
+ return true;
+ }
- auto* stmt = sem_stmt->Declaration();
+ auto* stmt = sem_stmt->Declaration();
- if (auto* src_var_decl = stmt->As<ast::VariableDeclStatement>()) {
- if (src_var_decl->variable->constructor == expr) {
- // This statement is just a variable declaration with the
- // initializer as the constructor value. This is what we're
- // attempting to transform to, and so ignore.
- return true;
- }
- }
+ if (auto* src_var_decl = stmt->As<ast::VariableDeclStatement>()) {
+ if (src_var_decl->variable->constructor == expr) {
+ // This statement is just a variable declaration with the
+ // initializer as the constructor value. This is what we're
+ // attempting to transform to, and so ignore.
+ return true;
+ }
+ }
- auto* src_ty = ctor->Type();
- if (!src_ty->IsAnyOf<sem::Array, sem::Struct>()) {
- // We only care about array and struct initializers
- return true;
- }
+ auto* src_ty = ctor->Type();
+ if (!src_ty->IsAnyOf<sem::Array, sem::Struct>()) {
+ // We only care about array and struct initializers
+ return true;
+ }
- return hoist_to_decl_before.Add(ctor, expr, true);
- };
+ return hoist_to_decl_before.Add(ctor, expr, true);
+ };
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* call_expr = node->As<ast::CallExpression>()) {
- if (!type_ctor_to_let(call_expr)) {
- return;
- }
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* call_expr = node->As<ast::CallExpression>()) {
+ if (!type_ctor_to_let(call_expr)) {
+ return;
+ }
+ }
}
- }
- hoist_to_decl_before.Apply();
- ctx.Clone();
+ hoist_to_decl_before.Apply();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.h b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.h
index 586e27d5696..67a32c48b82 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.h
+++ b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var.h
@@ -22,25 +22,22 @@ namespace tint::transform {
/// A transform that hoists the array and structure initializers to a constant
/// variable, declared just before the statement of usage.
/// @see crbug.com/tint/406
-class PromoteInitializersToConstVar
- : public Castable<PromoteInitializersToConstVar, Transform> {
- public:
- /// Constructor
- PromoteInitializersToConstVar();
-
- /// Destructor
- ~PromoteInitializersToConstVar() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class PromoteInitializersToConstVar : public Castable<PromoteInitializersToConstVar, Transform> {
+ public:
+ /// Constructor
+ PromoteInitializersToConstVar();
+
+ /// Destructor
+ ~PromoteInitializersToConstVar() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var_test.cc b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var_test.cc
index 87b9edc8bf3..f322478ce0c 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/promote_initializers_to_const_var_test.cc
@@ -22,16 +22,16 @@ namespace {
using PromoteInitializersToConstVarTest = TransformTest;
TEST_F(PromoteInitializersToConstVarTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<PromoteInitializersToConstVar>(src);
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, BasicArray) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var f0 = 1.0;
var f1 = 2.0;
@@ -41,7 +41,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var f0 = 1.0;
var f1 = 2.0;
@@ -52,14 +52,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, BasicStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : i32,
b : f32,
@@ -71,7 +71,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : i32,
b : f32,
@@ -84,14 +84,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, BasicStruct_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var x = S(1, 2.0, vec3<f32>()).b;
}
@@ -103,7 +103,7 @@ struct S {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
let tint_symbol = S(1, 2.0, vec3<f32>());
var x = tint_symbol.b;
@@ -116,14 +116,14 @@ struct S {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var insert_after = 1;
for(var i = array<f32, 4u>(0.0, 1.0, 2.0, 3.0)[2]; ; ) {
@@ -132,7 +132,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var insert_after = 1;
let tint_symbol = array<f32, 4u>(0.0, 1.0, 2.0, 3.0);
@@ -142,14 +142,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, StructInForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : i32,
b : f32,
@@ -164,7 +164,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : i32,
b : f32,
@@ -180,14 +180,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, StructInForLoopInit_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var insert_after = 1;
for(var x = S(1, 2.0, vec3<f32>()).b; ; ) {
@@ -202,7 +202,7 @@ struct S {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var insert_after = 1;
let tint_symbol = S(1, 2.0, vec3<f32>());
@@ -218,14 +218,14 @@ struct S {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInForLoopCond) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var f = 1.0;
for(; f == array<f32, 1u>(f)[0]; f = f + 1.0) {
@@ -234,7 +234,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var f = 1.0;
loop {
@@ -253,14 +253,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInForLoopCont) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var f = 0.0;
for(; f < 10.0; f = f + array<f32, 1u>(1.0)[0]) {
@@ -269,7 +269,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var f = 0.0;
loop {
@@ -288,14 +288,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInForLoopInitCondCont) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for(var f = array<f32, 1u>(0.0)[0];
f < array<f32, 1u>(1.0)[0];
@@ -305,7 +305,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
let tint_symbol = array<f32, 1u>(0.0);
{
@@ -328,14 +328,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var f = 1.0;
if (true) {
@@ -346,7 +346,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var f = 1.0;
if (true) {
@@ -360,14 +360,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInElseIfChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var f = 1.0;
if (true) {
@@ -386,7 +386,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var f = 1.0;
if (true) {
@@ -411,20 +411,20 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, ArrayInArrayArray) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = array<array<f32, 2u>, 2u>(array<f32, 2u>(1.0, 2.0), array<f32, 2u>(3.0, 4.0))[0][1];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
let tint_symbol = array<f32, 2u>(1.0, 2.0);
let tint_symbol_1 = array<f32, 2u>(3.0, 4.0);
@@ -433,14 +433,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, StructNested) {
- auto* src = R"(
+ auto* src = R"(
struct S1 {
a : i32,
};
@@ -460,7 +460,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
a : i32,
}
@@ -483,14 +483,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, Mixed) {
- auto* src = R"(
+ auto* src = R"(
struct S1 {
a : i32,
};
@@ -504,7 +504,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S1 {
a : i32,
}
@@ -523,14 +523,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, Mixed_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var x = S2(array<S1, 3u>(S1(1), S1(2), S1(3))).a[1].a;
}
@@ -544,7 +544,7 @@ struct S1 {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
let tint_symbol = S1(1);
let tint_symbol_1 = S1(2);
@@ -563,14 +563,14 @@ struct S1 {
}
)";
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, NoChangeOnVarDecl) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : i32,
b : f32,
@@ -587,16 +587,16 @@ let module_arr : array<f32, 4u> = array<f32, 4u>(0.0, 1.0, 2.0, 3.0);
let module_str : S = S(1, 2.0, 3);
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteInitializersToConstVarTest, NoChangeOnVarDecl_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var local_arr = array<f32, 4u>(0.0, 1.0, 2.0, 3.0);
var local_str = S(1, 2.0, 3);
@@ -613,12 +613,12 @@ struct S {
let module_arr : array<f32, 4u> = array<f32, 4u>(0.0, 1.0, 2.0, 3.0);
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteInitializersToConstVar>(src);
+ DataMap data;
+ auto got = Run<PromoteInitializersToConstVar>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.cc b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.cc
index 9fd19db3b40..6f1cc4c7974 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.cc
+++ b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.cc
@@ -39,62 +39,58 @@ namespace {
// Base state class for common members
class StateBase {
- protected:
- CloneContext& ctx;
- ProgramBuilder& b;
- const sem::Info& sem;
+ protected:
+ CloneContext& ctx;
+ ProgramBuilder& b;
+ const sem::Info& sem;
- explicit StateBase(CloneContext& ctx_in)
- : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
+ explicit StateBase(CloneContext& ctx_in)
+ : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
};
// This first transform converts side-effecting for-loops to loops and else-ifs
// to else {if}s so that the next transform, DecomposeSideEffects, can insert
// hoisted expressions above their current location.
-struct SimplifySideEffectStatements
- : Castable<PromoteSideEffectsToDecl, Transform> {
- class State;
- void Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const override;
+struct SimplifySideEffectStatements : Castable<PromoteSideEffectsToDecl, Transform> {
+ class State;
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const override;
};
class SimplifySideEffectStatements::State : public StateBase {
- HoistToDeclBefore hoist_to_decl_before;
-
- public:
- explicit State(CloneContext& ctx_in)
- : StateBase(ctx_in), hoist_to_decl_before(ctx_in) {}
-
- void Run() {
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* expr = node->As<ast::Expression>()) {
- auto* sem_expr = sem.Get(expr);
- if (!sem_expr || !sem_expr->HasSideEffects()) {
- continue;
+ HoistToDeclBefore hoist_to_decl_before;
+
+ public:
+ explicit State(CloneContext& ctx_in) : StateBase(ctx_in), hoist_to_decl_before(ctx_in) {}
+
+ void Run() {
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* expr = node->As<ast::Expression>()) {
+ auto* sem_expr = sem.Get(expr);
+ if (!sem_expr || !sem_expr->HasSideEffects()) {
+ continue;
+ }
+
+ hoist_to_decl_before.Prepare(sem_expr);
+ }
}
- hoist_to_decl_before.Prepare(sem_expr);
- }
+ hoist_to_decl_before.Apply();
+ ctx.Clone();
}
-
- hoist_to_decl_before.Apply();
- ctx.Clone();
- }
};
-void SimplifySideEffectStatements::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state(ctx);
- state.Run();
+void SimplifySideEffectStatements::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state(ctx);
+ state.Run();
}
// Decomposes side-effecting expressions to ensure order of evaluation. This
// handles both breaking down logical binary expressions for short-circuit
// evaluation, as well as hoisting expressions to ensure order of evaluation.
struct DecomposeSideEffects : Castable<PromoteSideEffectsToDecl, Transform> {
- class CollectHoistsState;
- class DecomposeState;
- void Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const override;
+ class CollectHoistsState;
+ class DecomposeState;
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const override;
};
// CollectHoistsState traverses the AST top-down, identifying which expressions
@@ -103,604 +99,567 @@ struct DecomposeSideEffects : Castable<PromoteSideEffectsToDecl, Transform> {
// expressions.
using ToHoistSet = std::unordered_set<const ast::Expression*>;
class DecomposeSideEffects::CollectHoistsState : public StateBase {
- // Expressions to hoist because they either cause or receive side-effects.
- ToHoistSet to_hoist;
-
- // Used to mark expressions as not or no longer having side-effects.
- std::unordered_set<const ast::Expression*> no_side_effects;
-
- // Returns true if `expr` has side-effects. Unlike invoking
- // sem::Expression::HasSideEffects(), this function takes into account whether
- // `expr` has been hoisted, returning false in that case. Furthermore, it
- // returns the correct result on parent expression nodes by traversing the
- // expression tree, memoizing the results to ensure O(1) amortized lookup.
- bool HasSideEffects(const ast::Expression* expr) {
- if (no_side_effects.count(expr)) {
- return false;
+ // Expressions to hoist because they either cause or receive side-effects.
+ ToHoistSet to_hoist;
+
+ // Used to mark expressions as not or no longer having side-effects.
+ std::unordered_set<const ast::Expression*> no_side_effects;
+
+ // Returns true if `expr` has side-effects. Unlike invoking
+ // sem::Expression::HasSideEffects(), this function takes into account whether
+ // `expr` has been hoisted, returning false in that case. Furthermore, it
+ // returns the correct result on parent expression nodes by traversing the
+ // expression tree, memoizing the results to ensure O(1) amortized lookup.
+ bool HasSideEffects(const ast::Expression* expr) {
+ if (no_side_effects.count(expr)) {
+ return false;
+ }
+
+ return Switch(
+ expr,
+ [&](const ast::CallExpression* e) -> bool { return sem.Get(e)->HasSideEffects(); },
+ [&](const ast::BinaryExpression* e) {
+ if (HasSideEffects(e->lhs) || HasSideEffects(e->rhs)) {
+ return true;
+ }
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::IndexAccessorExpression* e) {
+ if (HasSideEffects(e->object) || HasSideEffects(e->index)) {
+ return true;
+ }
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::MemberAccessorExpression* e) {
+ if (HasSideEffects(e->structure) || HasSideEffects(e->member)) {
+ return true;
+ }
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::BitcastExpression* e) { //
+ if (HasSideEffects(e->expr)) {
+ return true;
+ }
+ no_side_effects.insert(e);
+ return false;
+ },
+
+ [&](const ast::UnaryOpExpression* e) { //
+ if (HasSideEffects(e->expr)) {
+ return true;
+ }
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::IdentifierExpression* e) {
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::LiteralExpression* e) {
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](const ast::PhonyExpression* e) {
+ no_side_effects.insert(e);
+ return false;
+ },
+ [&](Default) {
+ TINT_ICE(Transform, b.Diagnostics()) << "Unhandled expression type";
+ return false;
+ });
}
- return Switch(
- expr,
- [&](const ast::CallExpression* e) -> bool {
- return sem.Get(e)->HasSideEffects();
- },
- [&](const ast::BinaryExpression* e) {
- if (HasSideEffects(e->lhs) || HasSideEffects(e->rhs)) {
- return true;
- }
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::IndexAccessorExpression* e) {
- if (HasSideEffects(e->object) || HasSideEffects(e->index)) {
- return true;
- }
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::MemberAccessorExpression* e) {
- if (HasSideEffects(e->structure) || HasSideEffects(e->member)) {
- return true;
- }
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::BitcastExpression* e) { //
- if (HasSideEffects(e->expr)) {
- return true;
- }
- no_side_effects.insert(e);
- return false;
- },
-
- [&](const ast::UnaryOpExpression* e) { //
- if (HasSideEffects(e->expr)) {
- return true;
- }
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::IdentifierExpression* e) {
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::LiteralExpression* e) {
- no_side_effects.insert(e);
- return false;
- },
- [&](const ast::PhonyExpression* e) {
- no_side_effects.insert(e);
- return false;
- },
- [&](Default) {
- TINT_ICE(Transform, b.Diagnostics()) << "Unhandled expression type";
- return false;
- });
- }
-
- // Adds `e` to `to_hoist` for hoisting to a let later on.
- void Hoist(const ast::Expression* e) {
- no_side_effects.insert(e);
- to_hoist.emplace(e);
- }
-
- // Hoists any expressions in `maybe_hoist` and clears it
- void Flush(ast::ExpressionList& maybe_hoist) {
- for (auto* m : maybe_hoist) {
- Hoist(m);
+ // Adds `e` to `to_hoist` for hoisting to a let later on.
+ void Hoist(const ast::Expression* e) {
+ no_side_effects.insert(e);
+ to_hoist.emplace(e);
}
- maybe_hoist.clear();
- }
-
- // Recursive function that processes expressions for side-effects. It
- // traverses the expression tree child before parent, left-to-right. Each call
- // returns whether the input expression should maybe be hoisted, allowing the
- // parent node to decide whether to hoist or not. Generally:
- // * When 'true' is returned, the expression is added to the maybe_hoist list.
- // * When a side-effecting expression is met, we flush the expressions in the
- // maybe_hoist list, as they are potentially receivers of the side-effects.
- // * For index and member accessor expressions, special care is taken to not
- // over-hoist the lhs expressions, as these may be be chained to refer to a
- // single memory location.
- bool ProcessExpression(const ast::Expression* expr,
- ast::ExpressionList& maybe_hoist) {
- auto process = [&](const ast::Expression* e) -> bool {
- return ProcessExpression(e, maybe_hoist);
- };
-
- auto default_process = [&](const ast::Expression* e) {
- auto maybe = process(e);
- if (maybe) {
- maybe_hoist.emplace_back(e);
- }
- if (HasSideEffects(e)) {
- Flush(maybe_hoist);
- }
- return false;
- };
-
- auto binary_process = [&](auto* lhs, auto* rhs) {
- // If neither side causes side-effects, but at least one receives them,
- // let parent node hoist. This avoids over-hoisting side-effect receivers
- // of compound binary expressions (e.g. for "((a && b) && c) && f()", we
- // don't want to hoist each of "a", "b", and "c" separately, but want to
- // hoist "((a && b) && c)".
- if (!HasSideEffects(lhs) && !HasSideEffects(rhs)) {
- auto lhs_maybe = process(lhs);
- auto rhs_maybe = process(rhs);
- if (lhs_maybe || rhs_maybe) {
- return true;
+
+ // Hoists any expressions in `maybe_hoist` and clears it
+ void Flush(ast::ExpressionList& maybe_hoist) {
+ for (auto* m : maybe_hoist) {
+ Hoist(m);
}
- return false;
- }
-
- default_process(lhs);
- default_process(rhs);
- return false;
- };
-
- auto accessor_process = [&](auto* lhs, auto* rhs) {
- auto maybe = process(lhs);
- // If lhs is a variable, let parent node hoist otherwise flush it right
- // away. This is to avoid over-hoisting the lhs of accessor chains (e.g.
- // for "v[a][b][c] + g()" we want to hoist all of "v[a][b][c]", not "t1 =
- // v[a]", then "t2 = t1[b]" then "t3 = t2[c]").
- if (maybe && HasSideEffects(lhs)) {
- maybe_hoist.emplace_back(lhs);
- Flush(maybe_hoist);
- maybe = false;
- }
- default_process(rhs);
- return maybe;
- };
-
- return Switch(
- expr,
- [&](const ast::CallExpression* e) -> bool {
- // We eagerly flush any variables in maybe_hoist for the current
- // call expression. Then we scope maybe_hoist to the processing of
- // the call args. This ensures that given: g(c, a(0), d) we hoist
- // 'c' because of 'a(0)', but not 'd' because there's no need, since
- // the call to g() will be hoisted if necessary.
- if (HasSideEffects(e)) {
- Flush(maybe_hoist);
- }
-
- TINT_SCOPED_ASSIGNMENT(maybe_hoist, {});
- for (auto* a : e->args) {
- default_process(a);
- }
-
- // Always hoist this call, even if it has no side-effects to ensure
- // left-to-right order of evaluation.
- // E.g. for "no_side_effects() + side_effects()", we want to hoist
- // no_side_effects() first.
- return true;
- },
- [&](const ast::IdentifierExpression* e) {
- if (auto* sem_e = sem.Get(e)) {
- if (auto* var_user = sem_e->As<sem::VariableUser>()) {
- // Don't hoist constants.
- if (var_user->ConstantValue().IsValid()) {
- return false;
- }
- // Don't hoist read-only variables as they cannot receive
- // side-effects.
- if (var_user->Variable()->Access() == ast::Access::kRead) {
- return false;
- }
- return true;
+ maybe_hoist.clear();
+ }
+
+ // Recursive function that processes expressions for side-effects. It
+ // traverses the expression tree child before parent, left-to-right. Each call
+ // returns whether the input expression should maybe be hoisted, allowing the
+ // parent node to decide whether to hoist or not. Generally:
+ // * When 'true' is returned, the expression is added to the maybe_hoist list.
+ // * When a side-effecting expression is met, we flush the expressions in the
+ // maybe_hoist list, as they are potentially receivers of the side-effects.
+ // * For index and member accessor expressions, special care is taken to not
+ // over-hoist the lhs expressions, as these may be be chained to refer to a
+ // single memory location.
+ bool ProcessExpression(const ast::Expression* expr, ast::ExpressionList& maybe_hoist) {
+ auto process = [&](const ast::Expression* e) -> bool {
+ return ProcessExpression(e, maybe_hoist);
+ };
+
+ auto default_process = [&](const ast::Expression* e) {
+ auto maybe = process(e);
+ if (maybe) {
+ maybe_hoist.emplace_back(e);
+ }
+ if (HasSideEffects(e)) {
+ Flush(maybe_hoist);
}
- }
- return false;
- },
- [&](const ast::BinaryExpression* e) {
- if (e->IsLogical() && HasSideEffects(e)) {
- // Don't hoist children of logical binary expressions with
- // side-effects. These will be handled by DecomposeState.
- process(e->lhs);
- process(e->rhs);
- return false;
- }
- return binary_process(e->lhs, e->rhs);
- },
- [&](const ast::BitcastExpression* e) { //
- return process(e->expr);
- },
- [&](const ast::UnaryOpExpression* e) { //
- auto r = process(e->expr);
- // Don't hoist address-of expressions.
- // E.g. for "g(&b, a(0))", we hoist "a(0)" only.
- if (e->op == ast::UnaryOp::kAddressOf) {
return false;
- }
- return r;
- },
- [&](const ast::IndexAccessorExpression* e) {
- return accessor_process(e->object, e->index);
- },
- [&](const ast::MemberAccessorExpression* e) {
- return accessor_process(e->structure, e->member);
- },
- [&](const ast::LiteralExpression*) {
- // Leaf
- return false;
- },
- [&](const ast::PhonyExpression*) {
- // Leaf
- return false;
- },
- [&](Default) {
- TINT_ICE(Transform, b.Diagnostics()) << "Unhandled expression type";
- return false;
- });
- }
+ };
+
+ auto binary_process = [&](auto* lhs, auto* rhs) {
+ // If neither side causes side-effects, but at least one receives them,
+ // let parent node hoist. This avoids over-hoisting side-effect receivers
+ // of compound binary expressions (e.g. for "((a && b) && c) && f()", we
+ // don't want to hoist each of "a", "b", and "c" separately, but want to
+ // hoist "((a && b) && c)".
+ if (!HasSideEffects(lhs) && !HasSideEffects(rhs)) {
+ auto lhs_maybe = process(lhs);
+ auto rhs_maybe = process(rhs);
+ if (lhs_maybe || rhs_maybe) {
+ return true;
+ }
+ return false;
+ }
- // Starts the recursive processing of a statement's expression(s) to hoist
- // side-effects to lets.
- void ProcessStatement(const ast::Expression* expr) {
- if (!expr) {
- return;
+ default_process(lhs);
+ default_process(rhs);
+ return false;
+ };
+
+ auto accessor_process = [&](auto* lhs, auto* rhs) {
+ auto maybe = process(lhs);
+ // If lhs is a variable, let parent node hoist otherwise flush it right
+ // away. This is to avoid over-hoisting the lhs of accessor chains (e.g.
+ // for "v[a][b][c] + g()" we want to hoist all of "v[a][b][c]", not "t1 =
+ // v[a]", then "t2 = t1[b]" then "t3 = t2[c]").
+ if (maybe && HasSideEffects(lhs)) {
+ maybe_hoist.emplace_back(lhs);
+ Flush(maybe_hoist);
+ maybe = false;
+ }
+ default_process(rhs);
+ return maybe;
+ };
+
+ return Switch(
+ expr,
+ [&](const ast::CallExpression* e) -> bool {
+ // We eagerly flush any variables in maybe_hoist for the current
+ // call expression. Then we scope maybe_hoist to the processing of
+ // the call args. This ensures that given: g(c, a(0), d) we hoist
+ // 'c' because of 'a(0)', but not 'd' because there's no need, since
+ // the call to g() will be hoisted if necessary.
+ if (HasSideEffects(e)) {
+ Flush(maybe_hoist);
+ }
+
+ TINT_SCOPED_ASSIGNMENT(maybe_hoist, {});
+ for (auto* a : e->args) {
+ default_process(a);
+ }
+
+ // Always hoist this call, even if it has no side-effects to ensure
+ // left-to-right order of evaluation.
+ // E.g. for "no_side_effects() + side_effects()", we want to hoist
+ // no_side_effects() first.
+ return true;
+ },
+ [&](const ast::IdentifierExpression* e) {
+ if (auto* sem_e = sem.Get(e)) {
+ if (auto* var_user = sem_e->As<sem::VariableUser>()) {
+ // Don't hoist constants.
+ if (var_user->ConstantValue().IsValid()) {
+ return false;
+ }
+ // Don't hoist read-only variables as they cannot receive
+ // side-effects.
+ if (var_user->Variable()->Access() == ast::Access::kRead) {
+ return false;
+ }
+ return true;
+ }
+ }
+ return false;
+ },
+ [&](const ast::BinaryExpression* e) {
+ if (e->IsLogical() && HasSideEffects(e)) {
+ // Don't hoist children of logical binary expressions with
+ // side-effects. These will be handled by DecomposeState.
+ process(e->lhs);
+ process(e->rhs);
+ return false;
+ }
+ return binary_process(e->lhs, e->rhs);
+ },
+ [&](const ast::BitcastExpression* e) { //
+ return process(e->expr);
+ },
+ [&](const ast::UnaryOpExpression* e) { //
+ auto r = process(e->expr);
+ // Don't hoist address-of expressions.
+ // E.g. for "g(&b, a(0))", we hoist "a(0)" only.
+ if (e->op == ast::UnaryOp::kAddressOf) {
+ return false;
+ }
+ return r;
+ },
+ [&](const ast::IndexAccessorExpression* e) {
+ return accessor_process(e->object, e->index);
+ },
+ [&](const ast::MemberAccessorExpression* e) {
+ return accessor_process(e->structure, e->member);
+ },
+ [&](const ast::LiteralExpression*) {
+ // Leaf
+ return false;
+ },
+ [&](const ast::PhonyExpression*) {
+ // Leaf
+ return false;
+ },
+ [&](Default) {
+ TINT_ICE(Transform, b.Diagnostics()) << "Unhandled expression type";
+ return false;
+ });
}
- ast::ExpressionList maybe_hoist;
- ProcessExpression(expr, maybe_hoist);
- }
-
- // Special case for processing assignment statement expressions, as we must
- // evaluate the rhs before the lhs, and possibly hoist the rhs expression.
- void ProcessAssignment(const ast::Expression* lhs,
- const ast::Expression* rhs) {
- // Evaluate rhs before lhs
- ast::ExpressionList maybe_hoist;
- if (ProcessExpression(rhs, maybe_hoist)) {
- maybe_hoist.emplace_back(rhs);
- }
+ // Starts the recursive processing of a statement's expression(s) to hoist
+ // side-effects to lets.
+ void ProcessStatement(const ast::Expression* expr) {
+ if (!expr) {
+ return;
+ }
- // If the rhs has side-effects, it may affect the lhs, so hoist it right
- // away. e.g. "b[c] = a(0);"
- if (HasSideEffects(rhs)) {
- // Technically, we can always hoist rhs, but don't bother doing so when
- // the lhs is just a variable or phony.
- if (!lhs->IsAnyOf<ast::IdentifierExpression, ast::PhonyExpression>()) {
- Flush(maybe_hoist);
- }
+ ast::ExpressionList maybe_hoist;
+ ProcessExpression(expr, maybe_hoist);
}
- // If maybe_hoist still has values, it means they are potential side-effect
- // receivers. We pass this in while processing the lhs, in which case they
- // may get hoisted if the lhs has side-effects. E.g. "b[a(0)] = c;".
- ProcessExpression(lhs, maybe_hoist);
- }
-
- public:
- explicit CollectHoistsState(CloneContext& ctx_in) : StateBase(ctx_in) {}
-
- ToHoistSet Run() {
- // Traverse all statements, recursively processing their expression tree(s)
- // to hoist side-effects to lets.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- auto* stmt = node->As<ast::Statement>();
- if (!stmt) {
- continue;
- }
-
- Switch(
- stmt,
- [&](const ast::AssignmentStatement* s) {
- ProcessAssignment(s->lhs, s->rhs);
- },
- [&](const ast::CallStatement* s) { //
- ProcessStatement(s->expr);
- },
- [&](const ast::ElseStatement* s) { //
- ProcessStatement(s->condition);
- },
- [&](const ast::ForLoopStatement* s) {
- ProcessStatement(s->condition);
- },
- [&](const ast::IfStatement* s) { //
- ProcessStatement(s->condition);
- },
- [&](const ast::ReturnStatement* s) { //
- ProcessStatement(s->value);
- },
- [&](const ast::SwitchStatement* s) {
- ProcessStatement(s->condition);
- },
- [&](const ast::VariableDeclStatement* s) {
- ProcessStatement(s->variable->constructor);
- });
+ // Special case for processing assignment statement expressions, as we must
+ // evaluate the rhs before the lhs, and possibly hoist the rhs expression.
+ void ProcessAssignment(const ast::Expression* lhs, const ast::Expression* rhs) {
+ // Evaluate rhs before lhs
+ ast::ExpressionList maybe_hoist;
+ if (ProcessExpression(rhs, maybe_hoist)) {
+ maybe_hoist.emplace_back(rhs);
+ }
+
+ // If the rhs has side-effects, it may affect the lhs, so hoist it right
+ // away. e.g. "b[c] = a(0);"
+ if (HasSideEffects(rhs)) {
+ // Technically, we can always hoist rhs, but don't bother doing so when
+ // the lhs is just a variable or phony.
+ if (!lhs->IsAnyOf<ast::IdentifierExpression, ast::PhonyExpression>()) {
+ Flush(maybe_hoist);
+ }
+ }
+
+ // If maybe_hoist still has values, it means they are potential side-effect
+ // receivers. We pass this in while processing the lhs, in which case they
+ // may get hoisted if the lhs has side-effects. E.g. "b[a(0)] = c;".
+ ProcessExpression(lhs, maybe_hoist);
}
- return std::move(to_hoist);
- }
+ public:
+ explicit CollectHoistsState(CloneContext& ctx_in) : StateBase(ctx_in) {}
+
+ ToHoistSet Run() {
+ // Traverse all statements, recursively processing their expression tree(s)
+ // to hoist side-effects to lets.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ auto* stmt = node->As<ast::Statement>();
+ if (!stmt) {
+ continue;
+ }
+
+ Switch(
+ stmt, [&](const ast::AssignmentStatement* s) { ProcessAssignment(s->lhs, s->rhs); },
+ [&](const ast::CallStatement* s) { //
+ ProcessStatement(s->expr);
+ },
+ [&](const ast::ForLoopStatement* s) { ProcessStatement(s->condition); },
+ [&](const ast::IfStatement* s) { //
+ ProcessStatement(s->condition);
+ },
+ [&](const ast::ReturnStatement* s) { //
+ ProcessStatement(s->value);
+ },
+ [&](const ast::SwitchStatement* s) { ProcessStatement(s->condition); },
+ [&](const ast::VariableDeclStatement* s) {
+ ProcessStatement(s->variable->constructor);
+ });
+ }
+
+ return std::move(to_hoist);
+ }
};
// DecomposeState performs the actual transforming of the AST to ensure order of
// evaluation, using the set of expressions to hoist collected by
// CollectHoistsState.
class DecomposeSideEffects::DecomposeState : public StateBase {
- ToHoistSet to_hoist;
-
- // Returns true if `binary_expr` should be decomposed for short-circuit eval.
- bool IsLogicalWithSideEffects(const ast::BinaryExpression* binary_expr) {
- return binary_expr->IsLogical() &&
- (sem.Get(binary_expr->lhs)->HasSideEffects() ||
- sem.Get(binary_expr->rhs)->HasSideEffects());
- }
-
- // Recursive function used to decompose an expression for short-circuit eval.
- const ast::Expression* Decompose(const ast::Expression* expr,
- ast::StatementList* curr_stmts) {
- // Helper to avoid passing in same args.
- auto decompose = [&](auto& e) { return Decompose(e, curr_stmts); };
-
- // Clones `expr`, possibly hoisting it to a let.
- auto clone_maybe_hoisted =
- [&](const ast::Expression* e) -> const ast::Expression* {
- if (to_hoist.count(e)) {
- auto name = b.Symbols().New();
- auto* v = b.Const(name, nullptr, ctx.Clone(e));
- auto* decl = b.Decl(v);
- curr_stmts->push_back(decl);
- return b.Expr(name);
- }
- return ctx.Clone(e);
- };
-
- return Switch(
- expr,
- [&](const ast::BinaryExpression* bin_expr) -> const ast::Expression* {
- if (!IsLogicalWithSideEffects(bin_expr)) {
- // No short-circuit, emit usual binary expr
- ctx.Replace(bin_expr->lhs, decompose(bin_expr->lhs));
- ctx.Replace(bin_expr->rhs, decompose(bin_expr->rhs));
- return clone_maybe_hoisted(bin_expr);
- }
-
- // Decompose into ifs to implement short-circuiting
- // For example, 'let r = a && b' becomes:
- //
- // var temp = a;
- // if (temp) {
- // temp = b;
- // }
- // let r = temp;
- //
- // and similarly, 'let r = a || b' becomes:
- //
- // var temp = a;
- // if (!temp) {
- // temp = b;
- // }
- // let r = temp;
- //
- // Further, compound logical binary expressions are also handled
- // recursively, for example, 'let r = (a && (b && c))' becomes:
- //
- // var temp = a;
- // if (temp) {
- // var temp2 = b;
- // if (temp2) {
- // temp2 = c;
- // }
- // temp = temp2;
- // }
- // let r = temp;
-
- auto name = b.Sym();
- curr_stmts->push_back(
- b.Decl(b.Var(name, nullptr, decompose(bin_expr->lhs))));
-
- const ast::Expression* if_cond = nullptr;
- if (bin_expr->IsLogicalOr()) {
- if_cond = b.Not(name);
- } else {
- if_cond = b.Expr(name);
- }
-
- const ast::BlockStatement* if_body = nullptr;
- {
- ast::StatementList stmts;
- TINT_SCOPED_ASSIGNMENT(curr_stmts, &stmts);
- auto* new_rhs = decompose(bin_expr->rhs);
- curr_stmts->push_back(b.Assign(name, new_rhs));
- if_body = b.Block(std::move(*curr_stmts));
- }
-
- curr_stmts->push_back(b.If(if_cond, if_body));
-
- return b.Expr(name);
- },
- [&](const ast::IndexAccessorExpression* idx) {
- ctx.Replace(idx->object, decompose(idx->object));
- ctx.Replace(idx->index, decompose(idx->index));
- return clone_maybe_hoisted(idx);
- },
- [&](const ast::BitcastExpression* bitcast) {
- ctx.Replace(bitcast->expr, decompose(bitcast->expr));
- return clone_maybe_hoisted(bitcast);
- },
- [&](const ast::CallExpression* call) {
- if (call->target.name) {
- ctx.Replace(call->target.name, decompose(call->target.name));
- }
- for (auto* a : call->args) {
- ctx.Replace(a, decompose(a));
- }
- return clone_maybe_hoisted(call);
- },
- [&](const ast::MemberAccessorExpression* member) {
- ctx.Replace(member->structure, decompose(member->structure));
- ctx.Replace(member->member, decompose(member->member));
- return clone_maybe_hoisted(member);
- },
- [&](const ast::UnaryOpExpression* unary) {
- ctx.Replace(unary->expr, decompose(unary->expr));
- return clone_maybe_hoisted(unary);
- },
- [&](const ast::LiteralExpression* lit) {
- return clone_maybe_hoisted(lit); // Leaf expression, just clone as is
- },
- [&](const ast::IdentifierExpression* id) {
- return clone_maybe_hoisted(id); // Leaf expression, just clone as is
- },
- [&](const ast::PhonyExpression* phony) {
- return clone_maybe_hoisted(
- phony); // Leaf expression, just clone as is
- },
- [&](Default) {
- TINT_ICE(AST, b.Diagnostics())
- << "unhandled expression type: " << expr->TypeInfo().name;
- return nullptr;
- });
- }
-
- // Inserts statements in `stmts` before `stmt`
- void InsertBefore(const ast::StatementList& stmts,
- const ast::Statement* stmt) {
- if (!stmts.empty()) {
- auto ip = utils::GetInsertionPoint(ctx, stmt);
- for (auto* s : stmts) {
- ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, s);
- }
+ ToHoistSet to_hoist;
+
+ // Returns true if `binary_expr` should be decomposed for short-circuit eval.
+ bool IsLogicalWithSideEffects(const ast::BinaryExpression* binary_expr) {
+ return binary_expr->IsLogical() && (sem.Get(binary_expr->lhs)->HasSideEffects() ||
+ sem.Get(binary_expr->rhs)->HasSideEffects());
}
- }
-
- // Decomposes expressions of `stmt`, returning a replacement statement or
- // nullptr if not replacing it.
- const ast::Statement* DecomposeStatement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- [&](const ast::AssignmentStatement* s) -> const ast::Statement* {
- if (!sem.Get(s->lhs)->HasSideEffects() &&
- !sem.Get(s->rhs)->HasSideEffects()) {
- return nullptr;
- }
- // rhs before lhs
- ast::StatementList stmts;
- ctx.Replace(s->rhs, Decompose(s->rhs, &stmts));
- ctx.Replace(s->lhs, Decompose(s->lhs, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::CallStatement* s) -> const ast::Statement* {
- if (!sem.Get(s->expr)->HasSideEffects()) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(s->expr, Decompose(s->expr, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::ElseStatement* s) -> const ast::Statement* {
- if (!s->condition || !sem.Get(s->condition)->HasSideEffects()) {
- return nullptr;
- }
- // NOTE: We shouldn't reach here as else-if with side-effect
- // conditions are simplified to else { if } by
- // SimplifySideEffectStatements.
- ast::StatementList stmts;
- ctx.Replace(s->condition, Decompose(s->condition, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::ForLoopStatement* s) -> const ast::Statement* {
- if (!s->condition || !sem.Get(s->condition)->HasSideEffects()) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(s->condition, Decompose(s->condition, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::IfStatement* s) -> const ast::Statement* {
- if (!sem.Get(s->condition)->HasSideEffects()) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(s->condition, Decompose(s->condition, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::ReturnStatement* s) -> const ast::Statement* {
- if (!s->value || !sem.Get(s->value)->HasSideEffects()) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(s->value, Decompose(s->value, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::SwitchStatement* s) -> const ast::Statement* {
- if (!sem.Get(s->condition)) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(s->condition, Decompose(s->condition, &stmts));
- InsertBefore(stmts, s);
- return ctx.CloneWithoutTransform(s);
- },
- [&](const ast::VariableDeclStatement* s) -> const ast::Statement* {
- auto* var = s->variable;
- if (!var->constructor ||
- !sem.Get(var->constructor)->HasSideEffects()) {
- return nullptr;
- }
- ast::StatementList stmts;
- ctx.Replace(var->constructor, Decompose(var->constructor, &stmts));
- InsertBefore(stmts, s);
- return b.Decl(ctx.CloneWithoutTransform(var));
- },
- [](Default) -> const ast::Statement* {
- // Other statement types don't have expressions
- return nullptr;
- });
- }
-
- public:
- explicit DecomposeState(CloneContext& ctx_in, ToHoistSet to_hoist_in)
- : StateBase(ctx_in), to_hoist(std::move(to_hoist_in)) {}
-
- void Run() {
- // We replace all BlockStatements as this allows us to iterate over the
- // block statements and ctx.InsertBefore hoisted declarations on them.
- ctx.ReplaceAll(
- [&](const ast::BlockStatement* block) -> const ast::Statement* {
- for (auto* stmt : block->statements) {
- if (auto* new_stmt = DecomposeStatement(stmt)) {
- ctx.Replace(stmt, new_stmt);
+
+ // Recursive function used to decompose an expression for short-circuit eval.
+ const ast::Expression* Decompose(const ast::Expression* expr, ast::StatementList* curr_stmts) {
+ // Helper to avoid passing in same args.
+ auto decompose = [&](auto& e) { return Decompose(e, curr_stmts); };
+
+ // Clones `expr`, possibly hoisting it to a let.
+ auto clone_maybe_hoisted = [&](const ast::Expression* e) -> const ast::Expression* {
+ if (to_hoist.count(e)) {
+ auto name = b.Symbols().New();
+ auto* v = b.Let(name, nullptr, ctx.Clone(e));
+ auto* decl = b.Decl(v);
+ curr_stmts->push_back(decl);
+ return b.Expr(name);
}
+ return ctx.Clone(e);
+ };
+
+ return Switch(
+ expr,
+ [&](const ast::BinaryExpression* bin_expr) -> const ast::Expression* {
+ if (!IsLogicalWithSideEffects(bin_expr)) {
+ // No short-circuit, emit usual binary expr
+ ctx.Replace(bin_expr->lhs, decompose(bin_expr->lhs));
+ ctx.Replace(bin_expr->rhs, decompose(bin_expr->rhs));
+ return clone_maybe_hoisted(bin_expr);
+ }
+
+ // Decompose into ifs to implement short-circuiting
+ // For example, 'let r = a && b' becomes:
+ //
+ // var temp = a;
+ // if (temp) {
+ // temp = b;
+ // }
+ // let r = temp;
+ //
+ // and similarly, 'let r = a || b' becomes:
+ //
+ // var temp = a;
+ // if (!temp) {
+ // temp = b;
+ // }
+ // let r = temp;
+ //
+ // Further, compound logical binary expressions are also handled
+ // recursively, for example, 'let r = (a && (b && c))' becomes:
+ //
+ // var temp = a;
+ // if (temp) {
+ // var temp2 = b;
+ // if (temp2) {
+ // temp2 = c;
+ // }
+ // temp = temp2;
+ // }
+ // let r = temp;
+
+ auto name = b.Sym();
+ curr_stmts->push_back(b.Decl(b.Var(name, nullptr, decompose(bin_expr->lhs))));
+
+ const ast::Expression* if_cond = nullptr;
+ if (bin_expr->IsLogicalOr()) {
+ if_cond = b.Not(name);
+ } else {
+ if_cond = b.Expr(name);
+ }
+
+ const ast::BlockStatement* if_body = nullptr;
+ {
+ ast::StatementList stmts;
+ TINT_SCOPED_ASSIGNMENT(curr_stmts, &stmts);
+ auto* new_rhs = decompose(bin_expr->rhs);
+ curr_stmts->push_back(b.Assign(name, new_rhs));
+ if_body = b.Block(std::move(*curr_stmts));
+ }
+
+ curr_stmts->push_back(b.If(if_cond, if_body));
+
+ return b.Expr(name);
+ },
+ [&](const ast::IndexAccessorExpression* idx) {
+ ctx.Replace(idx->object, decompose(idx->object));
+ ctx.Replace(idx->index, decompose(idx->index));
+ return clone_maybe_hoisted(idx);
+ },
+ [&](const ast::BitcastExpression* bitcast) {
+ ctx.Replace(bitcast->expr, decompose(bitcast->expr));
+ return clone_maybe_hoisted(bitcast);
+ },
+ [&](const ast::CallExpression* call) {
+ if (call->target.name) {
+ ctx.Replace(call->target.name, decompose(call->target.name));
+ }
+ for (auto* a : call->args) {
+ ctx.Replace(a, decompose(a));
+ }
+ return clone_maybe_hoisted(call);
+ },
+ [&](const ast::MemberAccessorExpression* member) {
+ ctx.Replace(member->structure, decompose(member->structure));
+ ctx.Replace(member->member, decompose(member->member));
+ return clone_maybe_hoisted(member);
+ },
+ [&](const ast::UnaryOpExpression* unary) {
+ ctx.Replace(unary->expr, decompose(unary->expr));
+ return clone_maybe_hoisted(unary);
+ },
+ [&](const ast::LiteralExpression* lit) {
+ return clone_maybe_hoisted(lit); // Leaf expression, just clone as is
+ },
+ [&](const ast::IdentifierExpression* id) {
+ return clone_maybe_hoisted(id); // Leaf expression, just clone as is
+ },
+ [&](const ast::PhonyExpression* phony) {
+ return clone_maybe_hoisted(phony); // Leaf expression, just clone as is
+ },
+ [&](Default) {
+ TINT_ICE(AST, b.Diagnostics())
+ << "unhandled expression type: " << expr->TypeInfo().name;
+ return nullptr;
+ });
+ }
- // Handle for loops, as they are the only other AST node that
- // contains statements outside of BlockStatements.
- if (auto* fl = stmt->As<ast::ForLoopStatement>()) {
- if (auto* new_stmt = DecomposeStatement(fl->initializer)) {
- ctx.Replace(fl->initializer, new_stmt);
- }
- if (auto* new_stmt = DecomposeStatement(fl->continuing)) {
- ctx.Replace(fl->continuing, new_stmt);
- }
+ // Inserts statements in `stmts` before `stmt`
+ void InsertBefore(const ast::StatementList& stmts, const ast::Statement* stmt) {
+ if (!stmts.empty()) {
+ auto ip = utils::GetInsertionPoint(ctx, stmt);
+ for (auto* s : stmts) {
+ ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, s);
}
- }
- return nullptr;
+ }
+ }
+
+ // Decomposes expressions of `stmt`, returning a replacement statement or
+ // nullptr if not replacing it.
+ const ast::Statement* DecomposeStatement(const ast::Statement* stmt) {
+ return Switch(
+ stmt,
+ [&](const ast::AssignmentStatement* s) -> const ast::Statement* {
+ if (!sem.Get(s->lhs)->HasSideEffects() && !sem.Get(s->rhs)->HasSideEffects()) {
+ return nullptr;
+ }
+ // rhs before lhs
+ ast::StatementList stmts;
+ ctx.Replace(s->rhs, Decompose(s->rhs, &stmts));
+ ctx.Replace(s->lhs, Decompose(s->lhs, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::CallStatement* s) -> const ast::Statement* {
+ if (!sem.Get(s->expr)->HasSideEffects()) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(s->expr, Decompose(s->expr, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::ForLoopStatement* s) -> const ast::Statement* {
+ if (!s->condition || !sem.Get(s->condition)->HasSideEffects()) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(s->condition, Decompose(s->condition, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::IfStatement* s) -> const ast::Statement* {
+ if (!sem.Get(s->condition)->HasSideEffects()) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(s->condition, Decompose(s->condition, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::ReturnStatement* s) -> const ast::Statement* {
+ if (!s->value || !sem.Get(s->value)->HasSideEffects()) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(s->value, Decompose(s->value, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::SwitchStatement* s) -> const ast::Statement* {
+ if (!sem.Get(s->condition)) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(s->condition, Decompose(s->condition, &stmts));
+ InsertBefore(stmts, s);
+ return ctx.CloneWithoutTransform(s);
+ },
+ [&](const ast::VariableDeclStatement* s) -> const ast::Statement* {
+ auto* var = s->variable;
+ if (!var->constructor || !sem.Get(var->constructor)->HasSideEffects()) {
+ return nullptr;
+ }
+ ast::StatementList stmts;
+ ctx.Replace(var->constructor, Decompose(var->constructor, &stmts));
+ InsertBefore(stmts, s);
+ return b.Decl(ctx.CloneWithoutTransform(var));
+ },
+ [](Default) -> const ast::Statement* {
+ // Other statement types don't have expressions
+ return nullptr;
+ });
+ }
+
+ public:
+ explicit DecomposeState(CloneContext& ctx_in, ToHoistSet to_hoist_in)
+ : StateBase(ctx_in), to_hoist(std::move(to_hoist_in)) {}
+
+ void Run() {
+ // We replace all BlockStatements as this allows us to iterate over the
+ // block statements and ctx.InsertBefore hoisted declarations on them.
+ ctx.ReplaceAll([&](const ast::BlockStatement* block) -> const ast::Statement* {
+ for (auto* stmt : block->statements) {
+ if (auto* new_stmt = DecomposeStatement(stmt)) {
+ ctx.Replace(stmt, new_stmt);
+ }
+
+ // Handle for loops, as they are the only other AST node that
+ // contains statements outside of BlockStatements.
+ if (auto* fl = stmt->As<ast::ForLoopStatement>()) {
+ if (auto* new_stmt = DecomposeStatement(fl->initializer)) {
+ ctx.Replace(fl->initializer, new_stmt);
+ }
+ if (auto* new_stmt = DecomposeStatement(fl->continuing)) {
+ ctx.Replace(fl->continuing, new_stmt);
+ }
+ }
+ }
+ return nullptr;
});
- ctx.Clone();
- }
+ ctx.Clone();
+ }
};
-void DecomposeSideEffects::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- // First collect side-effecting expressions to hoist
- CollectHoistsState collect_hoists_state{ctx};
- auto to_hoist = collect_hoists_state.Run();
+void DecomposeSideEffects::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ // First collect side-effecting expressions to hoist
+ CollectHoistsState collect_hoists_state{ctx};
+ auto to_hoist = collect_hoists_state.Run();
- // Now decompose these expressions
- DecomposeState decompose_state{ctx, std::move(to_hoist)};
- decompose_state.Run();
+ // Now decompose these expressions
+ DecomposeState decompose_state{ctx, std::move(to_hoist)};
+ decompose_state.Run();
}
} // namespace
@@ -708,14 +667,13 @@ void DecomposeSideEffects::Run(CloneContext& ctx,
PromoteSideEffectsToDecl::PromoteSideEffectsToDecl() = default;
PromoteSideEffectsToDecl::~PromoteSideEffectsToDecl() = default;
-Output PromoteSideEffectsToDecl::Run(const Program* program,
- const DataMap& data) const {
- transform::Manager manager;
- manager.Add<SimplifySideEffectStatements>();
- manager.Add<DecomposeSideEffects>();
+Output PromoteSideEffectsToDecl::Run(const Program* program, const DataMap& data) const {
+ transform::Manager manager;
+ manager.Add<SimplifySideEffectStatements>();
+ manager.Add<DecomposeSideEffects>();
- auto output = manager.Run(program, data);
- return output;
+ auto output = manager.Run(program, data);
+ return output;
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.h b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.h
index cdc9241b33d..1e629b344f2 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.h
+++ b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl.h
@@ -23,21 +23,20 @@ namespace tint::transform {
/// declarations before the statement of usage with the goal of ensuring
/// left-to-right order of evaluation, while respecting short-circuit
/// evaluation.
-class PromoteSideEffectsToDecl
- : public Castable<PromoteSideEffectsToDecl, Transform> {
- public:
- /// Constructor
- PromoteSideEffectsToDecl();
-
- /// Destructor
- ~PromoteSideEffectsToDecl() override;
-
- protected:
- /// Runs the transform on `program`, returning the transformation result.
- /// @param program the source program to transform
- /// @param data optional extra transform-specific data
- /// @returns the transformation result
- Output Run(const Program* program, const DataMap& data = {}) const override;
+class PromoteSideEffectsToDecl : public Castable<PromoteSideEffectsToDecl, Transform> {
+ public:
+ /// Constructor
+ PromoteSideEffectsToDecl();
+
+ /// Destructor
+ ~PromoteSideEffectsToDecl() override;
+
+ protected:
+ /// Runs the transform on `program`, returning the transformation result.
+ /// @param program the source program to transform
+ /// @param data optional extra transform-specific data
+ /// @returns the transformation result
+ Output Run(const Program* program, const DataMap& data = {}) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl_test.cc b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl_test.cc
index 299d7066c84..9d9115fe19e 100644
--- a/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/promote_side_effects_to_decl_test.cc
@@ -22,17 +22,17 @@ namespace {
using PromoteSideEffectsToDeclTest = TransformTest;
TEST_F(PromoteSideEffectsToDeclTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Unary_Arith_SE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -42,16 +42,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_BothSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -65,7 +65,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -81,14 +81,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_LeftSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -99,7 +99,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -111,14 +111,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -129,7 +129,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -142,14 +142,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_LeftmostSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -162,7 +162,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -176,14 +176,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_RightmostSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -196,7 +196,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -211,14 +211,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_MiddleSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -232,7 +232,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -248,14 +248,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_ThreeSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(v : i32) -> i32 {
return v;
}
@@ -265,7 +265,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(v : i32) -> i32 {
return v;
}
@@ -278,14 +278,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Constants_NoRecvSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -295,7 +295,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -306,14 +306,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Constants_RecvSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -324,7 +324,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -338,14 +338,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Constants_ConstAndSEAndVar) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -357,7 +357,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -370,14 +370,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Constants_VarAndSEAndConst) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -388,7 +388,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -401,15 +401,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- Binary_Arith_Constants_SEAndVarAndConstAndVar) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Constants_SEAndVarAndConstAndVar) {
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -421,7 +420,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -434,14 +433,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Builtins_WithSE) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : atomic<i32>,
}
@@ -454,7 +453,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : atomic<i32>,
}
@@ -468,14 +467,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Builtins_NoSEAndVar) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : atomic<i32>,
}
@@ -488,16 +487,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Builtins_NoSEAndSE) {
- auto* src = R"(
+ auto* src = R"(
struct SB {
a : atomic<i32>,
}
@@ -514,7 +513,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct SB {
a : atomic<i32>,
}
@@ -533,14 +532,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_Vector_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a() -> i32 {
return 1;
}
@@ -552,7 +551,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() -> i32 {
return 1;
}
@@ -567,14 +566,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InCall) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -589,7 +588,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -609,14 +608,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InTypeCtor) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
@@ -628,7 +627,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -646,14 +645,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InTypeConversion) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
@@ -665,7 +664,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -680,14 +679,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InIntrinsic) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
@@ -699,7 +698,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -717,14 +716,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InMemberAccessor) {
- auto* src = R"(
+ auto* src = R"(
struct S {
v : i32,
@@ -740,7 +739,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
v : i32,
}
@@ -758,14 +757,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InUnary) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -776,7 +775,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -790,14 +789,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InBitcast) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -808,7 +807,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -821,14 +820,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -842,7 +841,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -857,14 +856,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InForLoopCond) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -877,7 +876,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -896,14 +895,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InForLoopCont) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -918,7 +917,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -940,14 +939,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InForLoopInitCondCont) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -963,7 +962,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -994,14 +993,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1016,7 +1015,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1034,14 +1033,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InElseIfChain) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1064,7 +1063,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1094,14 +1093,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InReturn) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1112,7 +1111,7 @@ fn f() -> i32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1125,14 +1124,14 @@ fn f() -> i32 {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Arith_InSwitch) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1146,7 +1145,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -1162,14 +1161,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_LeftSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1180,7 +1179,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1195,14 +1194,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1213,7 +1212,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1228,14 +1227,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_BothSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1245,7 +1244,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1259,14 +1258,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_LeftmostSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1279,7 +1278,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1304,14 +1303,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_RightmostSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1324,7 +1323,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1341,14 +1340,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_MiddleSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1361,7 +1360,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1386,14 +1385,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_Constants_NoRecvSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1403,7 +1402,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1425,14 +1424,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_Constants_RecvSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1443,7 +1442,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1466,15 +1465,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- Binary_Logical_Constants_ConstAndSEAndVar) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_Constants_ConstAndSEAndVar) {
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1486,7 +1484,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1506,15 +1504,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- Binary_Logical_Constants_VarAndSEAndConst) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_Constants_VarAndSEAndConst) {
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1525,7 +1522,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1544,15 +1541,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- Binary_Logical_Constants_SEAndVarAndConstAndVar) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_Constants_SEAndVarAndConstAndVar) {
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1564,7 +1560,7 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1588,14 +1584,14 @@ fn main() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_MixedSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1608,7 +1604,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1645,14 +1641,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_NestedAnds) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1662,7 +1658,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1692,14 +1688,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_NestedOrs) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1709,7 +1705,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1739,14 +1735,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_MultipleStatements) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1758,7 +1754,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1778,14 +1774,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InCall) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1800,7 +1796,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1819,14 +1815,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InTypeCtor) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
@@ -1838,7 +1834,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1866,14 +1862,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InTypeConversion) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
@@ -1885,7 +1881,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -1906,16 +1902,16 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Make sure we process logical binary expressions of non-logical binary
// expressions.
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_OfNonLogical) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
@@ -1927,7 +1923,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -1944,14 +1940,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InIntrinsic) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
@@ -1963,7 +1959,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -1991,14 +1987,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InMemberAccessor) {
- auto* src = R"(
+ auto* src = R"(
struct S {
v : bool,
@@ -2014,7 +2010,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
v : bool,
}
@@ -2039,14 +2035,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InUnary) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
@@ -2058,7 +2054,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2073,14 +2069,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InBitcast) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2091,7 +2087,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2106,14 +2102,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2127,7 +2123,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2145,14 +2141,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InForLoopCond) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2165,7 +2161,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2187,14 +2183,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InForLoopCont) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2209,7 +2205,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2234,14 +2230,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InForLoopInitCondCont) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2257,7 +2253,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2297,14 +2293,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2319,7 +2315,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2340,14 +2336,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Logical_InElseIfChain) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2370,7 +2366,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> bool {
return true;
}
@@ -2405,14 +2401,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_NoSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2428,16 +2424,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_OneSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2452,7 +2448,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2468,14 +2464,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_AllSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2489,7 +2485,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2506,14 +2502,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_MiddleNotSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2529,7 +2525,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2547,14 +2543,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_InBinary) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2571,7 +2567,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2593,14 +2589,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_2D_LeftSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2612,7 +2608,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2625,14 +2621,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_2D_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2646,7 +2642,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2660,14 +2656,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_2D_BothSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2678,7 +2674,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2691,14 +2687,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToPhony) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2708,7 +2704,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2718,14 +2714,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToArray1D) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2736,7 +2732,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2749,14 +2745,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToArray2D) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2767,7 +2763,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2781,14 +2777,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToArray3D) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2799,7 +2795,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2814,14 +2810,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToArray_FromArray) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2834,7 +2830,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2851,14 +2847,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToVec_BothSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2869,7 +2865,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2882,14 +2878,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToVec_LeftSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2901,7 +2897,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2915,14 +2911,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Assignment_ToVec_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2934,7 +2930,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2947,14 +2943,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, TypeConstructor_Struct) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2970,7 +2966,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -2989,14 +2985,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, TypeConstructor_Array1D) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -3006,7 +3002,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -3019,14 +3015,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, TypeConstructor_Array2D) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -3036,7 +3032,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 1;
}
@@ -3052,14 +3048,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, MemberAccessor_Vec) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> vec3<i32> {
return vec3<i32>();
}
@@ -3069,7 +3065,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> vec3<i32> {
return vec3<i32>();
}
@@ -3081,14 +3077,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, MemberAccessor_Struct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : i32,
@@ -3103,7 +3099,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : i32,
@@ -3120,14 +3116,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, MemberAccessor_Struct_Mixed) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : i32,
@@ -3152,7 +3148,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : i32,
@@ -3184,14 +3180,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_Plus_SE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3202,7 +3198,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3215,14 +3211,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_Of_SE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3233,7 +3229,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3245,14 +3241,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor2_Of_LeftSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3263,7 +3259,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3275,14 +3271,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor2_Of_RightSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3293,7 +3289,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3305,14 +3301,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor2_Of_SEAndVar) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3324,7 +3320,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3337,14 +3333,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor2_Of_VarAndSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3356,7 +3352,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3370,14 +3366,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessorOfVar_Plus_SE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3389,7 +3385,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3404,14 +3400,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessor_Plus_IndexAccessorOfSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3422,7 +3418,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3435,15 +3431,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- AssignTo_IndexAccessorOfIndexAccessorOfSE) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, AssignTo_IndexAccessorOfIndexAccessorOfSE) {
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3455,7 +3450,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3468,15 +3463,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(PromoteSideEffectsToDeclTest,
- AssignTo_IndexAccessorOfIndexAccessorOfLiteralPlusSE) {
- auto* src = R"(
+TEST_F(PromoteSideEffectsToDeclTest, AssignTo_IndexAccessorOfIndexAccessorOfLiteralPlusSE) {
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3488,7 +3482,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3502,15 +3496,15 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest,
AssignTo_IndexAccessorOfIndexAccessorOfLiteralPlusIndexAccessorOfSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3522,7 +3516,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3536,14 +3530,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, IndexAccessorOfLhsSERhsSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3557,7 +3551,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3573,14 +3567,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, BinaryIndexAccessorOfLhsSERhsSE) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3594,7 +3588,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return i;
}
@@ -3610,16 +3604,16 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, BinaryMemberAccessorPlusSE) {
- // bclayton@'s example:
- // https://dawn-review.googlesource.com/c/tint/+/78620/6..8/src/transform/promote_side_effects_to_decl.cc#b490
- auto* src = R"(
+ // bclayton@'s example:
+ // https://dawn-review.googlesource.com/c/tint/+/78620/6..8/src/transform/promote_side_effects_to_decl.cc#b490
+ auto* src = R"(
fn modify_vec(p : ptr<function, vec4<i32>>) -> i32 {
(*p).x = 42;
return 0;
@@ -3632,7 +3626,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn modify_vec(p : ptr<function, vec4<i32>>) -> i32 {
(*(p)).x = 42;
return 0;
@@ -3646,15 +3640,15 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_ReadOnlyArgAndSE) {
- // Make sure that read-only args don't get hoisted (tex and samp)
- auto* src = R"(
+ // Make sure that read-only args don't get hoisted (tex and samp)
+ auto* src = R"(
@group(1) @binding(1) var tex: texture_2d_array<u32>;
@group(1) @binding(2) var samp: sampler;
@@ -3667,7 +3661,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(1) @binding(1) var tex : texture_2d_array<u32>;
@group(1) @binding(2) var samp : sampler;
@@ -3682,15 +3676,15 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Call_PtrArgAndSE) {
- // Make sure that read-only args don't get hoisted (tex and samp)
- auto* src = R"(
+ // Make sure that read-only args don't get hoisted (tex and samp)
+ auto* src = R"(
var<private> b : i32 = 0;
@@ -3710,7 +3704,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> b : i32 = 0;
fn a(i : i32) -> i32 {
@@ -3728,14 +3722,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, TypeCtor_VarPlusI32CtorPlusVar) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b = 0;
var c = 0;
@@ -3744,16 +3738,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Mixed_ArithPlusLogical) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3771,7 +3765,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3795,14 +3789,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Mixed_LogicalPlusArith) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3820,7 +3814,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3844,14 +3838,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Mixed_ArithAndLogicalArgs) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3869,7 +3863,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3892,14 +3886,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Mixed_LogicalAndArithArgs) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3917,7 +3911,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3940,14 +3934,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(PromoteSideEffectsToDeclTest, Binary_Mixed_Complex) {
- auto* src = R"(
+ auto* src = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -3969,7 +3963,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(i : i32) -> i32 {
return 0;
}
@@ -4001,10 +3995,10 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.cc b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.cc
index 9ee05c04d9a..5c2413e1d47 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.cc
@@ -34,95 +34,89 @@ namespace tint::transform {
namespace {
class State {
- private:
- CloneContext& ctx;
- ProgramBuilder& b;
- const sem::Info& sem;
-
- // Map of switch statement to 'tint_continue' variable.
- std::unordered_map<const ast::SwitchStatement*, Symbol>
- switch_to_cont_var_name;
-
- // If `cont` is within a switch statement within a loop, returns a pointer to
- // that switch statement.
- static const ast::SwitchStatement* GetParentSwitchInLoop(
- const sem::Info& sem,
- const ast::ContinueStatement* cont) {
- // Find whether first parent is a switch or a loop
- auto* sem_stmt = sem.Get(cont);
- auto* sem_parent =
- sem_stmt->FindFirstParent<sem::SwitchStatement, sem::LoopBlockStatement,
- sem::ForLoopStatement>();
- if (!sem_parent) {
- return nullptr;
+ private:
+ CloneContext& ctx;
+ ProgramBuilder& b;
+ const sem::Info& sem;
+
+ // Map of switch statement to 'tint_continue' variable.
+ std::unordered_map<const ast::SwitchStatement*, Symbol> switch_to_cont_var_name;
+
+ // If `cont` is within a switch statement within a loop, returns a pointer to
+ // that switch statement.
+ static const ast::SwitchStatement* GetParentSwitchInLoop(const sem::Info& sem,
+ const ast::ContinueStatement* cont) {
+ // Find whether first parent is a switch or a loop
+ auto* sem_stmt = sem.Get(cont);
+ auto* sem_parent = sem_stmt->FindFirstParent<sem::SwitchStatement, sem::LoopBlockStatement,
+ sem::ForLoopStatement>();
+ if (!sem_parent) {
+ return nullptr;
+ }
+ return sem_parent->Declaration()->As<ast::SwitchStatement>();
}
- return sem_parent->Declaration()->As<ast::SwitchStatement>();
- }
-
- public:
- /// Constructor
- /// @param ctx_in the context
- explicit State(CloneContext& ctx_in)
- : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
-
- /// Returns true if this transform should be run for the given program
- static bool ShouldRun(const Program* program) {
- for (auto* node : program->ASTNodes().Objects()) {
- auto* stmt = node->As<ast::ContinueStatement>();
- if (!stmt) {
- continue;
- }
- if (GetParentSwitchInLoop(program->Sem(), stmt)) {
- return true;
- }
- }
- return false;
- }
-
- /// Runs the transform
- void Run() {
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- auto* cont = node->As<ast::ContinueStatement>();
- if (!cont) {
- continue;
- }
-
- // If first parent is not a switch within a loop, skip
- auto* switch_stmt = GetParentSwitchInLoop(sem, cont);
- if (!switch_stmt) {
- continue;
- }
-
- auto cont_var_name =
- tint::utils::GetOrCreate(switch_to_cont_var_name, switch_stmt, [&]() {
- // Create and insert 'var tint_continue : bool = false;' before the
- // switch.
- auto var_name = b.Symbols().New("tint_continue");
- auto* decl = b.Decl(b.Var(var_name, b.ty.bool_(), b.Expr(false)));
- auto ip = utils::GetInsertionPoint(ctx, switch_stmt);
- ctx.InsertBefore(ip.first->Declaration()->statements, ip.second,
- decl);
-
- // Create and insert 'if (tint_continue) { continue; }' after
- // switch.
- auto* if_stmt = b.If(b.Expr(var_name), b.Block(b.Continue()));
- ctx.InsertAfter(ip.first->Declaration()->statements, ip.second,
- if_stmt);
-
- // Return the new var name
- return var_name;
- });
-
- // Replace 'continue;' with '{ tint_continue = true; break; }'
- auto* new_stmt = b.Block( //
- b.Assign(b.Expr(cont_var_name), true), //
- b.Break());
-
- ctx.Replace(cont, new_stmt);
+
+ public:
+ /// Constructor
+ /// @param ctx_in the context
+ explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
+
+ /// Returns true if this transform should be run for the given program
+ static bool ShouldRun(const Program* program) {
+ for (auto* node : program->ASTNodes().Objects()) {
+ auto* stmt = node->As<ast::ContinueStatement>();
+ if (!stmt) {
+ continue;
+ }
+ if (GetParentSwitchInLoop(program->Sem(), stmt)) {
+ return true;
+ }
+ }
+ return false;
}
- ctx.Clone();
- }
+ /// Runs the transform
+ void Run() {
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ auto* cont = node->As<ast::ContinueStatement>();
+ if (!cont) {
+ continue;
+ }
+
+ // If first parent is not a switch within a loop, skip
+ auto* switch_stmt = GetParentSwitchInLoop(sem, cont);
+ if (!switch_stmt) {
+ continue;
+ }
+
+ auto cont_var_name =
+ tint::utils::GetOrCreate(switch_to_cont_var_name, switch_stmt, [&]() {
+ // Create and insert 'var tint_continue : bool = false;' before the
+ // switch.
+ auto var_name = b.Symbols().New("tint_continue");
+ auto* decl = b.Decl(b.Var(var_name, b.ty.bool_(), b.Expr(false)));
+ auto ip = utils::GetInsertionPoint(ctx, switch_stmt);
+ ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, decl);
+
+ // Create and insert 'if (tint_continue) { continue; }' after
+ // switch.
+ auto* if_stmt = b.If(b.Expr(var_name), b.Block(b.Continue()));
+ ctx.InsertAfter(ip.first->Declaration()->statements, ip.second, if_stmt);
+
+ // Return the new var name
+ return var_name;
+ });
+
+ // Replace 'continue;' with '{ tint_continue = true; break; }'
+ auto* new_stmt = b.Block( //
+ b.Assign(b.Expr(cont_var_name), true), //
+ b.Break());
+
+ ctx.Replace(cont, new_stmt);
+ }
+
+ ctx.Clone();
+ }
};
} // namespace
@@ -130,16 +124,13 @@ class State {
RemoveContinueInSwitch::RemoveContinueInSwitch() = default;
RemoveContinueInSwitch::~RemoveContinueInSwitch() = default;
-bool RemoveContinueInSwitch::ShouldRun(const Program* program,
- const DataMap& /*data*/) const {
- return State::ShouldRun(program);
+bool RemoveContinueInSwitch::ShouldRun(const Program* program, const DataMap& /*data*/) const {
+ return State::ShouldRun(program);
}
-void RemoveContinueInSwitch::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state(ctx);
- state.Run();
+void RemoveContinueInSwitch::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state(ctx);
+ state.Run();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.h b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.h
index f8756605b62..e7062255008 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.h
+++ b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch.h
@@ -23,31 +23,27 @@ namespace tint::transform {
/// bool variable, and checking if the variable is set after the switch to
/// continue. It is necessary to work around FXC "error X3708: continue cannot
/// be used in a switch". See crbug.com/tint/1080.
-class RemoveContinueInSwitch
- : public Castable<RemoveContinueInSwitch, Transform> {
- public:
- /// Constructor
- RemoveContinueInSwitch();
-
- /// Destructor
- ~RemoveContinueInSwitch() override;
-
- protected:
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class RemoveContinueInSwitch : public Castable<RemoveContinueInSwitch, Transform> {
+ public:
+ /// Constructor
+ RemoveContinueInSwitch();
+
+ /// Destructor
+ ~RemoveContinueInSwitch() override;
+
+ protected:
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch_test.cc b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch_test.cc
index 70f167debb4..a1e7b6e1e1d 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_continue_in_switch_test.cc
@@ -21,7 +21,7 @@ namespace {
using RemoveContinueInSwitchTest = TransformTest;
TEST_F(RemoveContinueInSwitchTest, ShouldRun_True) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -39,17 +39,17 @@ fn f() {
}
)";
- EXPECT_TRUE(ShouldRun<RemoveContinueInSwitch>(src));
+ EXPECT_TRUE(ShouldRun<RemoveContinueInSwitch>(src));
}
TEST_F(RemoveContinueInSwitchTest, ShouldRunEmptyModule_False) {
- auto* src = "";
+ auto* src = "";
- EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
+ EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
}
TEST_F(RemoveContinueInSwitchTest, ShouldRunContinueNotInSwitch_False) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -70,11 +70,11 @@ fn f() {
}
)";
- EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
+ EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
}
TEST_F(RemoveContinueInSwitchTest, ShouldRunContinueInLoopInSwitch_False) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
switch(i) {
@@ -94,21 +94,21 @@ fn f() {
}
)";
- EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
+ EXPECT_FALSE(ShouldRun<RemoveContinueInSwitch>(src));
}
TEST_F(RemoveContinueInSwitchTest, EmptyModule) {
- auto* src = "";
- auto* expect = src;
+ auto* src = "";
+ auto* expect = src;
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, SingleContinue) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -132,7 +132,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i = 0;
loop {
@@ -163,14 +163,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, MultipleContinues) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -202,7 +202,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i = 0;
loop {
@@ -247,14 +247,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, MultipleSwitch) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -287,7 +287,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i = 0;
loop {
@@ -332,14 +332,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, NestedLoopSwitch) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
loop {
@@ -374,7 +374,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i = 0;
loop {
@@ -423,14 +423,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, ExtraScopes) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i = 0;
var a = true;
@@ -462,7 +462,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i = 0;
var a = true;
@@ -501,14 +501,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveContinueInSwitchTest, ForLoop) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (var i = 0; i < 4; i = i + 1) {
let marker1 = 0;
@@ -527,7 +527,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
for(var i = 0; (i < 4); i = (i + 1)) {
let marker1 = 0;
@@ -553,10 +553,10 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<RemoveContinueInSwitch>(src, data);
+ DataMap data;
+ auto got = Run<RemoveContinueInSwitch>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_phonies.cc b/chromium/third_party/dawn/src/tint/transform/remove_phonies.cc
index 9904e348de4..7ca11944874 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_phonies.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_phonies.cc
@@ -34,31 +34,31 @@ namespace tint::transform {
namespace {
struct SinkSignature {
- std::vector<const sem::Type*> types;
+ std::vector<const sem::Type*> types;
- bool operator==(const SinkSignature& other) const {
- if (types.size() != other.types.size()) {
- return false;
- }
- for (size_t i = 0; i < types.size(); i++) {
- if (types[i] != other.types[i]) {
- return false;
- }
- }
- return true;
- }
-
- struct Hasher {
- /// @param sig the CallTargetSignature to hash
- /// @return the hash value
- std::size_t operator()(const SinkSignature& sig) const {
- size_t hash = tint::utils::Hash(sig.types.size());
- for (auto* ty : sig.types) {
- tint::utils::HashCombine(&hash, ty);
- }
- return hash;
+ bool operator==(const SinkSignature& other) const {
+ if (types.size() != other.types.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < types.size(); i++) {
+ if (types[i] != other.types[i]) {
+ return false;
+ }
+ }
+ return true;
}
- };
+
+ struct Hasher {
+ /// @param sig the CallTargetSignature to hash
+ /// @return the hash value
+ std::size_t operator()(const SinkSignature& sig) const {
+ size_t hash = tint::utils::Hash(sig.types.size());
+ for (auto* ty : sig.types) {
+ tint::utils::HashCombine(&hash, ty);
+ }
+ return hash;
+ }
+ };
};
} // namespace
@@ -68,87 +68,90 @@ RemovePhonies::RemovePhonies() = default;
RemovePhonies::~RemovePhonies() = default;
bool RemovePhonies::ShouldRun(const Program* program, const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (node->Is<ast::PhonyExpression>()) {
- return true;
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (node->Is<ast::PhonyExpression>()) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
void RemovePhonies::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- auto& sem = ctx.src->Sem();
-
- std::unordered_map<SinkSignature, Symbol, SinkSignature::Hasher> sinks;
-
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* stmt = node->As<ast::AssignmentStatement>()) {
- if (stmt->lhs->Is<ast::PhonyExpression>()) {
- std::vector<const ast::Expression*> side_effects;
- if (!ast::TraverseExpressions(
- stmt->rhs, ctx.dst->Diagnostics(),
- [&](const ast::CallExpression* call) {
- // ast::CallExpression may map to a function or builtin call
- // (both may have side-effects), or a type constructor or
- // type conversion (both do not have side effects).
- if (sem.Get(call)
- ->Target()
- ->IsAnyOf<sem::Function, sem::Builtin>()) {
- side_effects.push_back(call);
- return ast::TraverseAction::Skip;
- }
- return ast::TraverseAction::Descend;
- })) {
- return;
- }
-
- if (side_effects.empty()) {
- // Phony assignment with no side effects.
- // Just remove it.
- RemoveStatement(ctx, stmt);
- continue;
- }
-
- if (side_effects.size() == 1) {
- if (auto* call = side_effects[0]->As<ast::CallExpression>()) {
- // Phony assignment with single call side effect.
- // Replace phony assignment with call.
- ctx.Replace(
- stmt, [&, call] { return ctx.dst->CallStmt(ctx.Clone(call)); });
- continue;
- }
- }
-
- // Phony assignment with multiple side effects.
- // Generate a call to a placeholder function with the side
- // effects as arguments.
- ctx.Replace(stmt, [&, side_effects] {
- SinkSignature sig;
- for (auto* arg : side_effects) {
- sig.types.push_back(sem.Get(arg)->Type()->UnwrapRef());
- }
- auto sink = utils::GetOrCreate(sinks, sig, [&] {
- auto name = ctx.dst->Symbols().New("phony_sink");
- ast::VariableList params;
- for (auto* ty : sig.types) {
- auto* ast_ty = CreateASTTypeFor(ctx, ty);
- params.push_back(
- ctx.dst->Param("p" + std::to_string(params.size()), ast_ty));
+ auto& sem = ctx.src->Sem();
+
+ std::unordered_map<SinkSignature, Symbol, SinkSignature::Hasher> sinks;
+
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* stmt = node->As<ast::AssignmentStatement>()) {
+ if (stmt->lhs->Is<ast::PhonyExpression>()) {
+ std::vector<const ast::Expression*> side_effects;
+ if (!ast::TraverseExpressions(
+ stmt->rhs, ctx.dst->Diagnostics(), [&](const ast::CallExpression* expr) {
+ // ast::CallExpression may map to a function or builtin call
+ // (both may have side-effects), or a type constructor or
+ // type conversion (both do not have side effects).
+ auto* call = sem.Get<sem::Call>(expr);
+ if (!call) {
+ // Semantic node must be a Materialize, in which case the expression
+ // was creation-time (compile time), so could not have side effects.
+ // Just skip.
+ return ast::TraverseAction::Skip;
+ }
+ if (call->Target()->IsAnyOf<sem::Function, sem::Builtin>()) {
+ side_effects.push_back(expr);
+ return ast::TraverseAction::Skip;
+ }
+ return ast::TraverseAction::Descend;
+ })) {
+ return;
+ }
+
+ if (side_effects.empty()) {
+ // Phony assignment with no side effects.
+ // Just remove it.
+ RemoveStatement(ctx, stmt);
+ continue;
+ }
+
+ if (side_effects.size() == 1) {
+ if (auto* call = side_effects[0]->As<ast::CallExpression>()) {
+ // Phony assignment with single call side effect.
+ // Replace phony assignment with call.
+ ctx.Replace(stmt, [&, call] { return ctx.dst->CallStmt(ctx.Clone(call)); });
+ continue;
+ }
+ }
+
+ // Phony assignment with multiple side effects.
+ // Generate a call to a placeholder function with the side
+ // effects as arguments.
+ ctx.Replace(stmt, [&, side_effects] {
+ SinkSignature sig;
+ for (auto* arg : side_effects) {
+ sig.types.push_back(sem.Get(arg)->Type()->UnwrapRef());
+ }
+ auto sink = utils::GetOrCreate(sinks, sig, [&] {
+ auto name = ctx.dst->Symbols().New("phony_sink");
+ ast::VariableList params;
+ for (auto* ty : sig.types) {
+ auto* ast_ty = CreateASTTypeFor(ctx, ty);
+ params.push_back(
+ ctx.dst->Param("p" + std::to_string(params.size()), ast_ty));
+ }
+ ctx.dst->Func(name, params, ctx.dst->ty.void_(), {});
+ return name;
+ });
+ ast::ExpressionList args;
+ for (auto* arg : side_effects) {
+ args.push_back(ctx.Clone(arg));
+ }
+ return ctx.dst->CallStmt(ctx.dst->Call(sink, args));
+ });
}
- ctx.dst->Func(name, params, ctx.dst->ty.void_(), {});
- return name;
- });
- ast::ExpressionList args;
- for (auto* arg : side_effects) {
- args.push_back(ctx.Clone(arg));
- }
- return ctx.dst->CallStmt(ctx.dst->Call(sink, args));
- });
- }
+ }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_phonies.h b/chromium/third_party/dawn/src/tint/transform/remove_phonies.h
index 6e355f18a4f..20128a0e4ec 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_phonies.h
+++ b/chromium/third_party/dawn/src/tint/transform/remove_phonies.h
@@ -26,29 +26,26 @@ namespace tint::transform {
/// while preserving function call expressions in the RHS of the assignment that
/// may have side-effects.
class RemovePhonies : public Castable<RemovePhonies, Transform> {
- public:
- /// Constructor
- RemovePhonies();
-
- /// Destructor
- ~RemovePhonies() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ RemovePhonies();
+
+ /// Destructor
+ ~RemovePhonies() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_phonies_test.cc b/chromium/third_party/dawn/src/tint/transform/remove_phonies_test.cc
index e6faa3e2dc9..220f1db4649 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_phonies_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_phonies_test.cc
@@ -26,32 +26,32 @@ namespace {
using RemovePhoniesTest = TransformTest;
TEST_F(RemovePhoniesTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<RemovePhonies>(src));
+ EXPECT_FALSE(ShouldRun<RemovePhonies>(src));
}
TEST_F(RemovePhoniesTest, ShouldRunHasPhony) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
_ = 1;
}
)";
- EXPECT_TRUE(ShouldRun<RemovePhonies>(src));
+ EXPECT_TRUE(ShouldRun<RemovePhonies>(src));
}
TEST_F(RemovePhoniesTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, NoSideEffects) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
fn f() {
@@ -68,7 +68,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) var t : texture_2d<f32>;
fn f() {
@@ -76,13 +76,13 @@ fn f() {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, SingleSideEffects) {
- auto* src = R"(
+ auto* src = R"(
fn neg(a : i32) -> i32 {
return -(a);
}
@@ -103,7 +103,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn neg(a : i32) -> i32 {
return -(a);
}
@@ -124,13 +124,13 @@ fn f() {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, SingleSideEffects_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
_ = neg(1);
_ = add(2, 3);
@@ -151,7 +151,7 @@ fn neg(a : i32) -> i32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
neg(1);
add(2, 3);
@@ -172,13 +172,13 @@ fn neg(a : i32) -> i32 {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, MultipleSideEffects) {
- auto* src = R"(
+ auto* src = R"(
fn neg(a : i32) -> i32 {
return -(a);
}
@@ -199,7 +199,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn neg(a : i32) -> i32 {
return -(a);
}
@@ -229,13 +229,13 @@ fn f() {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, MultipleSideEffects_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
_ = (1 + add(2 + add(3, 4), 5)) * add(6, 7) * neg(8);
_ = add(9, neg(10)) + neg(11);
@@ -256,7 +256,7 @@ fn xor(a : u32, b : u32) -> u32 {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn phony_sink(p0 : i32, p1 : i32, p2 : i32) {
}
@@ -286,13 +286,13 @@ fn xor(a : u32, b : u32) -> u32 {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, ForLoop) {
- auto* src = R"(
+ auto* src = R"(
struct S {
arr : array<i32>,
};
@@ -321,7 +321,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
arr : array<i32>,
}
@@ -353,13 +353,13 @@ fn f() {
}
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemovePhoniesTest, ForLoop_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
for (_ = &s.arr; ;_ = &s.arr) {
break;
@@ -388,7 +388,7 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn phony_sink(p0 : i32, p1 : i32) {
}
@@ -420,9 +420,9 @@ struct S {
@group(0) @binding(0) var<storage, read_write> s : S;
)";
- auto got = Run<RemovePhonies>(src);
+ auto got = Run<RemovePhonies>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.cc b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.cc
index 3e13ad7aaa1..964d767c35a 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.cc
@@ -36,30 +36,27 @@ RemoveUnreachableStatements::RemoveUnreachableStatements() = default;
RemoveUnreachableStatements::~RemoveUnreachableStatements() = default;
-bool RemoveUnreachableStatements::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* stmt = program->Sem().Get<sem::Statement>(node)) {
- if (!stmt->IsReachable()) {
- return true;
- }
+bool RemoveUnreachableStatements::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* stmt = program->Sem().Get<sem::Statement>(node)) {
+ if (!stmt->IsReachable()) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
-void RemoveUnreachableStatements::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* stmt = ctx.src->Sem().Get<sem::Statement>(node)) {
- if (!stmt->IsReachable()) {
- RemoveStatement(ctx, stmt->Declaration());
- }
+void RemoveUnreachableStatements::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* stmt = ctx.src->Sem().Get<sem::Statement>(node)) {
+ if (!stmt->IsReachable()) {
+ RemoveStatement(ctx, stmt->Declaration());
+ }
+ }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.h b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.h
index a474efb49db..c75da3d45f6 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.h
+++ b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements.h
@@ -24,31 +24,27 @@ namespace tint::transform {
/// RemoveUnreachableStatements is a Transform that removes all statements
/// marked as unreachable.
-class RemoveUnreachableStatements
- : public Castable<RemoveUnreachableStatements, Transform> {
- public:
- /// Constructor
- RemoveUnreachableStatements();
-
- /// Destructor
- ~RemoveUnreachableStatements() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+class RemoveUnreachableStatements : public Castable<RemoveUnreachableStatements, Transform> {
+ public:
+ /// Constructor
+ RemoveUnreachableStatements();
+
+ /// Destructor
+ ~RemoveUnreachableStatements() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements_test.cc b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements_test.cc
index 43c19508adb..4b0a2655ddb 100644
--- a/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/remove_unreachable_statements_test.cc
@@ -22,13 +22,13 @@ namespace {
using RemoveUnreachableStatementsTest = TransformTest;
TEST_F(RemoveUnreachableStatementsTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<RemoveUnreachableStatements>(src));
+ EXPECT_FALSE(ShouldRun<RemoveUnreachableStatements>(src));
}
TEST_F(RemoveUnreachableStatementsTest, ShouldRunHasNoUnreachable) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
var x = 1;
@@ -36,11 +36,11 @@ fn f() {
}
)";
- EXPECT_FALSE(ShouldRun<RemoveUnreachableStatements>(src));
+ EXPECT_FALSE(ShouldRun<RemoveUnreachableStatements>(src));
}
TEST_F(RemoveUnreachableStatementsTest, ShouldRunHasUnreachable) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
return;
if (true) {
@@ -49,20 +49,20 @@ fn f() {
}
)";
- EXPECT_TRUE(ShouldRun<RemoveUnreachableStatements>(src));
+ EXPECT_TRUE(ShouldRun<RemoveUnreachableStatements>(src));
}
TEST_F(RemoveUnreachableStatementsTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, Return) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
return;
var remove_me = 1;
@@ -72,19 +72,19 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
return;
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, NestedReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
{
{
@@ -98,7 +98,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
{
{
@@ -108,13 +108,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, Discard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
discard;
var remove_me = 1;
@@ -124,19 +124,19 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
discard;
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, NestedDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
{
{
@@ -150,7 +150,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
{
{
@@ -160,13 +160,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, CallToFuncWithDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn DISCARD() {
discard;
}
@@ -180,7 +180,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn DISCARD() {
discard;
}
@@ -190,13 +190,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, CallToFuncWithIfDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn DISCARD() {
if (true) {
discard;
@@ -212,15 +212,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfDiscardElseDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
discard;
@@ -234,7 +234,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
if (true) {
discard;
@@ -244,13 +244,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfDiscardElseReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
discard;
@@ -264,7 +264,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
if (true) {
discard;
@@ -274,13 +274,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
discard;
@@ -292,15 +292,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
return;
@@ -312,15 +312,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfElseDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
} else {
@@ -333,15 +333,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, IfElseReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
if (true) {
} else {
@@ -354,15 +354,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, LoopWithDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
loop {
var a = 1;
@@ -379,7 +379,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
loop {
var a = 1;
@@ -392,13 +392,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, LoopWithConditionalBreak) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
loop {
var a = 1;
@@ -417,15 +417,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, LoopWithConditionalBreakInContinuing) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
loop {
@@ -442,15 +442,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, SwitchDefaultDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
switch(1) {
default: {
@@ -464,7 +464,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
switch(1) {
default: {
@@ -474,13 +474,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, SwitchCaseReturnDefaultDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
switch(1) {
case 0: {
@@ -497,7 +497,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
switch(1) {
case 0: {
@@ -510,13 +510,13 @@ fn f() {
}
)";
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, SwitchCaseBreakDefaultDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
switch(1) {
case 0: {
@@ -533,15 +533,15 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RemoveUnreachableStatementsTest, SwitchCaseReturnDefaultBreak) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
switch(1) {
case 0: {
@@ -558,11 +558,11 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<RemoveUnreachableStatements>(src);
+ auto got = Run<RemoveUnreachableStatements>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/renamer.cc b/chromium/third_party/dawn/src/tint/transform/renamer.cc
index 962791182b2..562a52fc7fe 100644
--- a/chromium/third_party/dawn/src/tint/transform/renamer.cc
+++ b/chromium/third_party/dawn/src/tint/transform/renamer.cc
@@ -1253,114 +1253,107 @@ Renamer::Renamer() = default;
Renamer::~Renamer() = default;
Output Renamer::Run(const Program* in, const DataMap& inputs) const {
- ProgramBuilder out;
- // Disable auto-cloning of symbols, since we want to rename them.
- CloneContext ctx(&out, in, false);
+ ProgramBuilder out;
+ // Disable auto-cloning of symbols, since we want to rename them.
+ CloneContext ctx(&out, in, false);
- // Swizzles, builtin calls and builtin structure members need to keep their
- // symbols preserved.
- std::unordered_set<const ast::IdentifierExpression*> preserve;
- for (auto* node : in->ASTNodes().Objects()) {
- if (auto* member = node->As<ast::MemberAccessorExpression>()) {
- auto* sem = in->Sem().Get(member);
- if (!sem) {
- TINT_ICE(Transform, out.Diagnostics())
- << "MemberAccessorExpression has no semantic info";
- continue;
- }
- if (sem->Is<sem::Swizzle>()) {
- preserve.emplace(member->member);
- } else if (auto* str_expr = in->Sem().Get(member->structure)) {
- if (auto* ty = str_expr->Type()->UnwrapRef()->As<sem::Struct>()) {
- if (ty->Declaration() == nullptr) { // Builtin structure
- preserve.emplace(member->member);
- }
+ // Swizzles, builtin calls and builtin structure members need to keep their
+ // symbols preserved.
+ std::unordered_set<const ast::IdentifierExpression*> preserve;
+ for (auto* node : in->ASTNodes().Objects()) {
+ if (auto* member = node->As<ast::MemberAccessorExpression>()) {
+ auto* sem = in->Sem().Get(member);
+ if (!sem) {
+ TINT_ICE(Transform, out.Diagnostics())
+ << "MemberAccessorExpression has no semantic info";
+ continue;
+ }
+ if (sem->Is<sem::Swizzle>()) {
+ preserve.emplace(member->member);
+ } else if (auto* str_expr = in->Sem().Get(member->structure)) {
+ if (auto* ty = str_expr->Type()->UnwrapRef()->As<sem::Struct>()) {
+ if (ty->Declaration() == nullptr) { // Builtin structure
+ preserve.emplace(member->member);
+ }
+ }
+ }
+ } else if (auto* call = node->As<ast::CallExpression>()) {
+ auto* sem = in->Sem().Get(call)->UnwrapMaterialize()->As<sem::Call>();
+ if (!sem) {
+ TINT_ICE(Transform, out.Diagnostics()) << "CallExpression has no semantic info";
+ continue;
+ }
+ if (sem->Target()->Is<sem::Builtin>()) {
+ preserve.emplace(call->target.name);
+ }
}
- }
- } else if (auto* call = node->As<ast::CallExpression>()) {
- auto* sem = in->Sem().Get(call);
- if (!sem) {
- TINT_ICE(Transform, out.Diagnostics())
- << "CallExpression has no semantic info";
- continue;
- }
- if (sem->Target()->Is<sem::Builtin>()) {
- preserve.emplace(call->target.name);
- }
}
- }
- Data::Remappings remappings;
+ Data::Remappings remappings;
- Target target = Target::kAll;
- bool preserve_unicode = false;
+ Target target = Target::kAll;
+ bool preserve_unicode = false;
- if (auto* cfg = inputs.Get<Config>()) {
- target = cfg->target;
- preserve_unicode = cfg->preserve_unicode;
- }
-
- ctx.ReplaceAll([&](Symbol sym_in) {
- auto name_in = ctx.src->Symbols().NameFor(sym_in);
- if (preserve_unicode || text::utf8::IsASCII(name_in)) {
- switch (target) {
- case Target::kAll:
- // Always rename.
- break;
- case Target::kGlslKeywords:
- if (!std::binary_search(
- kReservedKeywordsGLSL,
- kReservedKeywordsGLSL +
- sizeof(kReservedKeywordsGLSL) / sizeof(const char*),
- name_in) &&
- name_in.compare(0, 3, "gl_")) {
- // No match, just reuse the original name.
- return ctx.dst->Symbols().New(name_in);
- }
- break;
- case Target::kHlslKeywords:
- if (!std::binary_search(
- kReservedKeywordsHLSL,
- kReservedKeywordsHLSL +
- sizeof(kReservedKeywordsHLSL) / sizeof(const char*),
- name_in)) {
- // No match, just reuse the original name.
- return ctx.dst->Symbols().New(name_in);
- }
- break;
- case Target::kMslKeywords:
- if (!std::binary_search(
- kReservedKeywordsMSL,
- kReservedKeywordsMSL +
- sizeof(kReservedKeywordsMSL) / sizeof(const char*),
- name_in)) {
- // No match, just reuse the original name.
- return ctx.dst->Symbols().New(name_in);
- }
- break;
- }
+ if (auto* cfg = inputs.Get<Config>()) {
+ target = cfg->target;
+ preserve_unicode = cfg->preserve_unicode;
}
- auto sym_out = ctx.dst->Sym();
- remappings.emplace(name_in, ctx.dst->Symbols().NameFor(sym_out));
- return sym_out;
- });
+ ctx.ReplaceAll([&](Symbol sym_in) {
+ auto name_in = ctx.src->Symbols().NameFor(sym_in);
+ if (preserve_unicode || text::utf8::IsASCII(name_in)) {
+ switch (target) {
+ case Target::kAll:
+ // Always rename.
+ break;
+ case Target::kGlslKeywords:
+ if (!std::binary_search(kReservedKeywordsGLSL,
+ kReservedKeywordsGLSL +
+ sizeof(kReservedKeywordsGLSL) / sizeof(const char*),
+ name_in) &&
+ name_in.compare(0, 3, "gl_")) {
+ // No match, just reuse the original name.
+ return ctx.dst->Symbols().New(name_in);
+ }
+ break;
+ case Target::kHlslKeywords:
+ if (!std::binary_search(kReservedKeywordsHLSL,
+ kReservedKeywordsHLSL +
+ sizeof(kReservedKeywordsHLSL) / sizeof(const char*),
+ name_in)) {
+ // No match, just reuse the original name.
+ return ctx.dst->Symbols().New(name_in);
+ }
+ break;
+ case Target::kMslKeywords:
+ if (!std::binary_search(kReservedKeywordsMSL,
+ kReservedKeywordsMSL +
+ sizeof(kReservedKeywordsMSL) / sizeof(const char*),
+ name_in)) {
+ // No match, just reuse the original name.
+ return ctx.dst->Symbols().New(name_in);
+ }
+ break;
+ }
+ }
- ctx.ReplaceAll([&](const ast::IdentifierExpression* ident)
- -> const ast::IdentifierExpression* {
- if (preserve.count(ident)) {
- auto sym_in = ident->symbol;
- auto str = in->Symbols().NameFor(sym_in);
- auto sym_out = out.Symbols().Register(str);
- return ctx.dst->create<ast::IdentifierExpression>(
- ctx.Clone(ident->source), sym_out);
- }
- return nullptr; // Clone ident. Uses the symbol remapping above.
- });
- ctx.Clone();
+ auto sym_out = ctx.dst->Sym();
+ remappings.emplace(name_in, ctx.dst->Symbols().NameFor(sym_out));
+ return sym_out;
+ });
+
+ ctx.ReplaceAll([&](const ast::IdentifierExpression* ident) -> const ast::IdentifierExpression* {
+ if (preserve.count(ident)) {
+ auto sym_in = ident->symbol;
+ auto str = in->Symbols().NameFor(sym_in);
+ auto sym_out = out.Symbols().Register(str);
+ return ctx.dst->create<ast::IdentifierExpression>(ctx.Clone(ident->source), sym_out);
+ }
+ return nullptr; // Clone ident. Uses the symbol remapping above.
+ });
+ ctx.Clone();
- return Output(Program(std::move(out)),
- std::make_unique<Data>(std::move(remappings)));
+ return Output(Program(std::move(out)), std::make_unique<Data>(std::move(remappings)));
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/renamer.h b/chromium/third_party/dawn/src/tint/transform/renamer.h
index ad37b0c1edf..354acdade18 100644
--- a/chromium/third_party/dawn/src/tint/transform/renamer.h
+++ b/chromium/third_party/dawn/src/tint/transform/renamer.h
@@ -24,72 +24,72 @@ namespace tint::transform {
/// Renamer is a Transform that renames all the symbols in a program.
class Renamer : public Castable<Renamer, Transform> {
- public:
- /// Data is outputted by the Renamer transform.
- /// Data holds information about shader usage and constant buffer offsets.
- struct Data : public Castable<Data, transform::Data> {
- /// Remappings is a map of old symbol name to new symbol name
- using Remappings = std::unordered_map<std::string, std::string>;
-
- /// Constructor
- /// @param remappings the symbol remappings
- explicit Data(Remappings&& remappings);
-
- /// Copy constructor
- Data(const Data&);
-
- /// Destructor
- ~Data() override;
-
- /// A map of old symbol name to new symbol name
- const Remappings remappings;
- };
-
- /// Target is an enumerator of rename targets that can be used
- enum class Target {
- /// Rename every symbol.
- kAll,
- /// Only rename symbols that are reserved keywords in GLSL.
- kGlslKeywords,
- /// Only rename symbols that are reserved keywords in HLSL.
- kHlslKeywords,
- /// Only rename symbols that are reserved keywords in MSL.
- kMslKeywords,
- };
-
- /// Optional configuration options for the transform.
- /// If omitted, then the renamer will use Target::kAll.
- struct Config : public Castable<Config, transform::Data> {
- /// Constructor
- /// @param tgt the targets to rename
- /// @param keep_unicode if false, symbols with non-ascii code-points are
- /// renamed
- explicit Config(Target tgt, bool keep_unicode = false);
-
- /// Copy constructor
- Config(const Config&);
+ public:
+ /// Data is outputted by the Renamer transform.
+ /// Data holds information about shader usage and constant buffer offsets.
+ struct Data : public Castable<Data, transform::Data> {
+ /// Remappings is a map of old symbol name to new symbol name
+ using Remappings = std::unordered_map<std::string, std::string>;
+
+ /// Constructor
+ /// @param remappings the symbol remappings
+ explicit Data(Remappings&& remappings);
+
+ /// Copy constructor
+ Data(const Data&);
+
+ /// Destructor
+ ~Data() override;
+
+ /// A map of old symbol name to new symbol name
+ const Remappings remappings;
+ };
+
+ /// Target is an enumerator of rename targets that can be used
+ enum class Target {
+ /// Rename every symbol.
+ kAll,
+ /// Only rename symbols that are reserved keywords in GLSL.
+ kGlslKeywords,
+ /// Only rename symbols that are reserved keywords in HLSL.
+ kHlslKeywords,
+ /// Only rename symbols that are reserved keywords in MSL.
+ kMslKeywords,
+ };
+
+ /// Optional configuration options for the transform.
+ /// If omitted, then the renamer will use Target::kAll.
+ struct Config : public Castable<Config, transform::Data> {
+ /// Constructor
+ /// @param tgt the targets to rename
+ /// @param keep_unicode if false, symbols with non-ascii code-points are
+ /// renamed
+ explicit Config(Target tgt, bool keep_unicode = false);
+
+ /// Copy constructor
+ Config(const Config&);
+
+ /// Destructor
+ ~Config() override;
+
+ /// The targets to rename
+ Target const target = Target::kAll;
+
+ /// If false, symbols with non-ascii code-points are renamed.
+ bool preserve_unicode = false;
+ };
+
+ /// Constructor using a the configuration provided in the input Data
+ Renamer();
/// Destructor
- ~Config() override;
-
- /// The targets to rename
- Target const target = Target::kAll;
-
- /// If false, symbols with non-ascii code-points are renamed.
- bool preserve_unicode = false;
- };
-
- /// Constructor using a the configuration provided in the input Data
- Renamer();
-
- /// Destructor
- ~Renamer() override;
+ ~Renamer() override;
- /// Runs the transform on `program`, returning the transformation result.
- /// @param program the source program to transform
- /// @param data optional extra transform-specific input data
- /// @returns the transformation result
- Output Run(const Program* program, const DataMap& data = {}) const override;
+ /// Runs the transform on `program`, returning the transformation result.
+ /// @param program the source program to transform
+ /// @param data optional extra transform-specific input data
+ /// @returns the transformation result
+ Output Run(const Program* program, const DataMap& data = {}) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/renamer_test.cc b/chromium/third_party/dawn/src/tint/transform/renamer_test.cc
index e3f9458ba76..516b164af7b 100644
--- a/chromium/third_party/dawn/src/tint/transform/renamer_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/renamer_test.cc
@@ -32,25 +32,25 @@ using ::testing::ContainerEq;
using RenamerTest = TransformTest;
TEST_F(RenamerTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_EQ(data->remappings.size(), 0u);
+ ASSERT_EQ(data->remappings.size(), 0u);
}
TEST_F(RenamerTest, BasicModuleVertexIndex) {
- auto* src = R"(
+ auto* src = R"(
fn test(vert_idx : u32) -> u32 {
return vert_idx;
}
-@stage(vertex)
+@vertex
fn entry(@builtin(vertex_index) vert_idx : u32
) -> @builtin(position) vec4<f32> {
_ = test(vert_idx);
@@ -58,36 +58,36 @@ fn entry(@builtin(vertex_index) vert_idx : u32
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_symbol(tint_symbol_1 : u32) -> u32 {
return tint_symbol_1;
}
-@stage(vertex)
+@vertex
fn tint_symbol_2(@builtin(vertex_index) tint_symbol_1 : u32) -> @builtin(position) vec4<f32> {
_ = tint_symbol(tint_symbol_1);
return vec4<f32>();
}
)";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_NE(data, nullptr);
- Renamer::Data::Remappings expected_remappings = {
- {"vert_idx", "tint_symbol_1"},
- {"test", "tint_symbol"},
- {"entry", "tint_symbol_2"},
- };
- EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
+ ASSERT_NE(data, nullptr);
+ Renamer::Data::Remappings expected_remappings = {
+ {"vert_idx", "tint_symbol_1"},
+ {"test", "tint_symbol"},
+ {"entry", "tint_symbol_2"},
+ };
+ EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
}
TEST_F(RenamerTest, PreserveSwizzles) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry() -> @builtin(position) vec4<f32> {
var v : vec4<f32>;
var rgba : f32;
@@ -96,8 +96,8 @@ fn entry() -> @builtin(position) vec4<f32> {
}
)";
- auto* expect = R"(
-@stage(vertex)
+ auto* expect = R"(
+@vertex
fn tint_symbol() -> @builtin(position) vec4<f32> {
var tint_symbol_1 : vec4<f32>;
var tint_symbol_2 : f32;
@@ -106,56 +106,56 @@ fn tint_symbol() -> @builtin(position) vec4<f32> {
}
)";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_NE(data, nullptr);
- Renamer::Data::Remappings expected_remappings = {
- {"entry", "tint_symbol"},
- {"v", "tint_symbol_1"},
- {"rgba", "tint_symbol_2"},
- {"xyzw", "tint_symbol_3"},
- };
- EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
+ ASSERT_NE(data, nullptr);
+ Renamer::Data::Remappings expected_remappings = {
+ {"entry", "tint_symbol"},
+ {"v", "tint_symbol_1"},
+ {"rgba", "tint_symbol_2"},
+ {"xyzw", "tint_symbol_3"},
+ };
+ EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
}
TEST_F(RenamerTest, PreserveBuiltins) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry() -> @builtin(position) vec4<f32> {
var blah : vec4<f32>;
return abs(blah);
}
)";
- auto* expect = R"(
-@stage(vertex)
+ auto* expect = R"(
+@vertex
fn tint_symbol() -> @builtin(position) vec4<f32> {
var tint_symbol_1 : vec4<f32>;
return abs(tint_symbol_1);
}
)";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_NE(data, nullptr);
- Renamer::Data::Remappings expected_remappings = {
- {"entry", "tint_symbol"},
- {"blah", "tint_symbol_1"},
- };
- EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
+ ASSERT_NE(data, nullptr);
+ Renamer::Data::Remappings expected_remappings = {
+ {"entry", "tint_symbol"},
+ {"blah", "tint_symbol_1"},
+ };
+ EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
}
TEST_F(RenamerTest, PreserveBuiltinTypes) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn entry() {
var a = modf(1.0).whole;
var b = modf(1.0).fract;
@@ -164,8 +164,8 @@ fn entry() {
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn tint_symbol() {
var tint_symbol_1 = modf(1.0).whole;
var tint_symbol_2 = modf(1.0).fract;
@@ -174,42 +174,42 @@ fn tint_symbol() {
}
)";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_NE(data, nullptr);
- Renamer::Data::Remappings expected_remappings = {
- {"entry", "tint_symbol"}, {"a", "tint_symbol_1"}, {"b", "tint_symbol_2"},
- {"c", "tint_symbol_3"}, {"d", "tint_symbol_4"},
- };
- EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
+ ASSERT_NE(data, nullptr);
+ Renamer::Data::Remappings expected_remappings = {
+ {"entry", "tint_symbol"}, {"a", "tint_symbol_1"}, {"b", "tint_symbol_2"},
+ {"c", "tint_symbol_3"}, {"d", "tint_symbol_4"},
+ };
+ EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
}
TEST_F(RenamerTest, PreserveUnicode) {
- auto src = R"(
-@stage(fragment)
+ auto src = R"(
+@fragment
fn frag_main() {
var )" + std::string(kUnicodeIdentifier) +
- R"( : i32;
+ R"( : i32;
}
)";
- auto expect = src;
+ auto expect = src;
- DataMap inputs;
- inputs.Add<Renamer::Config>(Renamer::Target::kMslKeywords,
- /* preserve_unicode */ true);
- auto got = Run<Renamer>(src, inputs);
+ DataMap inputs;
+ inputs.Add<Renamer::Config>(Renamer::Target::kMslKeywords,
+ /* preserve_unicode */ true);
+ auto got = Run<Renamer>(src, inputs);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RenamerTest, AttemptSymbolCollision) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn entry() -> @builtin(position) vec4<f32> {
var tint_symbol : vec4<f32>;
var tint_symbol_2 : vec4<f32>;
@@ -218,8 +218,8 @@ fn entry() -> @builtin(position) vec4<f32> {
}
)";
- auto* expect = R"(
-@stage(vertex)
+ auto* expect = R"(
+@vertex
fn tint_symbol() -> @builtin(position) vec4<f32> {
var tint_symbol_1 : vec4<f32>;
var tint_symbol_2 : vec4<f32>;
@@ -228,20 +228,20 @@ fn tint_symbol() -> @builtin(position) vec4<f32> {
}
)";
- auto got = Run<Renamer>(src);
+ auto got = Run<Renamer>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
- auto* data = got.data.Get<Renamer::Data>();
+ auto* data = got.data.Get<Renamer::Data>();
- ASSERT_NE(data, nullptr);
- Renamer::Data::Remappings expected_remappings = {
- {"entry", "tint_symbol"},
- {"tint_symbol", "tint_symbol_1"},
- {"tint_symbol_2", "tint_symbol_2"},
- {"tint_symbol_4", "tint_symbol_3"},
- };
- EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
+ ASSERT_NE(data, nullptr);
+ Renamer::Data::Remappings expected_remappings = {
+ {"entry", "tint_symbol"},
+ {"tint_symbol", "tint_symbol_1"},
+ {"tint_symbol_2", "tint_symbol_2"},
+ {"tint_symbol_4", "tint_symbol_3"},
+ };
+ EXPECT_THAT(data->remappings, ContainerEq(expected_remappings));
}
using RenamerTestGlsl = TransformTestWithParam<std::string>;
@@ -249,81 +249,81 @@ using RenamerTestHlsl = TransformTestWithParam<std::string>;
using RenamerTestMsl = TransformTestWithParam<std::string>;
TEST_P(RenamerTestGlsl, Keywords) {
- auto keyword = GetParam();
+ auto keyword = GetParam();
- auto src = R"(
-@stage(fragment)
+ auto src = R"(
+@fragment
fn frag_main() {
var )" + keyword +
- R"( : i32;
+ R"( : i32;
}
)";
- auto* expect = R"(
-@stage(fragment)
+ auto* expect = R"(
+@fragment
fn frag_main() {
var tint_symbol : i32;
}
)";
- DataMap inputs;
- inputs.Add<Renamer::Config>(Renamer::Target::kGlslKeywords,
- /* preserve_unicode */ false);
- auto got = Run<Renamer>(src, inputs);
+ DataMap inputs;
+ inputs.Add<Renamer::Config>(Renamer::Target::kGlslKeywords,
+ /* preserve_unicode */ false);
+ auto got = Run<Renamer>(src, inputs);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_P(RenamerTestHlsl, Keywords) {
- auto keyword = GetParam();
+ auto keyword = GetParam();
- auto src = R"(
-@stage(fragment)
+ auto src = R"(
+@fragment
fn frag_main() {
var )" + keyword +
- R"( : i32;
+ R"( : i32;
}
)";
- auto* expect = R"(
-@stage(fragment)
+ auto* expect = R"(
+@fragment
fn frag_main() {
var tint_symbol : i32;
}
)";
- DataMap inputs;
- inputs.Add<Renamer::Config>(Renamer::Target::kHlslKeywords,
- /* preserve_unicode */ false);
- auto got = Run<Renamer>(src, inputs);
+ DataMap inputs;
+ inputs.Add<Renamer::Config>(Renamer::Target::kHlslKeywords,
+ /* preserve_unicode */ false);
+ auto got = Run<Renamer>(src, inputs);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_P(RenamerTestMsl, Keywords) {
- auto keyword = GetParam();
+ auto keyword = GetParam();
- auto src = R"(
-@stage(fragment)
+ auto src = R"(
+@fragment
fn frag_main() {
var )" + keyword +
- R"( : i32;
+ R"( : i32;
}
)";
- auto* expect = R"(
-@stage(fragment)
+ auto* expect = R"(
+@fragment
fn frag_main() {
var tint_symbol : i32;
}
)";
- DataMap inputs;
- inputs.Add<Renamer::Config>(Renamer::Target::kMslKeywords,
- /* preserve_unicode */ false);
- auto got = Run<Renamer>(src, inputs);
+ DataMap inputs;
+ inputs.Add<Renamer::Config>(Renamer::Target::kMslKeywords,
+ /* preserve_unicode */ false);
+ auto got = Run<Renamer>(src, inputs);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
INSTANTIATE_TEST_SUITE_P(RenamerTestGlsl,
diff --git a/chromium/third_party/dawn/src/tint/transform/robustness.cc b/chromium/third_party/dawn/src/tint/transform/robustness.cc
index 4a67d495f7f..aab1e0cc00b 100644
--- a/chromium/third_party/dawn/src/tint/transform/robustness.cc
+++ b/chromium/third_party/dawn/src/tint/transform/robustness.cc
@@ -22,265 +22,258 @@
#include "src/tint/sem/block_statement.h"
#include "src/tint/sem/call.h"
#include "src/tint/sem/expression.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/sem/statement.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::Robustness);
TINT_INSTANTIATE_TYPEINFO(tint::transform::Robustness::Config);
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
/// State holds the current transform state
struct Robustness::State {
- /// The clone context
- CloneContext& ctx;
-
- /// Set of storage classes to not apply the transform to
- std::unordered_set<ast::StorageClass> omitted_classes;
-
- /// Applies the transformation state to `ctx`.
- void Transform() {
- ctx.ReplaceAll([&](const ast::IndexAccessorExpression* expr) {
- return Transform(expr);
- });
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) { return Transform(expr); });
- }
-
- /// Apply bounds clamping to array, vector and matrix indexing
- /// @param expr the array, vector or matrix index expression
- /// @return the clamped replacement expression, or nullptr if `expr` should be
- /// cloned without changes.
- const ast::IndexAccessorExpression* Transform(
- const ast::IndexAccessorExpression* expr) {
- auto* ret_type = ctx.src->Sem().Get(expr->object)->Type();
-
- auto* ref = ret_type->As<sem::Reference>();
- if (ref && omitted_classes.count(ref->StorageClass()) != 0) {
- return nullptr;
- }
+ /// The clone context
+ CloneContext& ctx;
- auto* ret_unwrapped = ret_type->UnwrapRef();
-
- ProgramBuilder& b = *ctx.dst;
- using u32 = ProgramBuilder::u32;
-
- struct Value {
- const ast::Expression* expr = nullptr; // If null, then is a constant
- union {
- uint32_t u32 = 0; // use if is_signed == false
- int32_t i32; // use if is_signed == true
- };
- bool is_signed = false;
- };
-
- Value size; // size of the array, vector or matrix
- size.is_signed = false; // size is always unsigned
- if (auto* vec = ret_unwrapped->As<sem::Vector>()) {
- size.u32 = vec->Width();
-
- } else if (auto* arr = ret_unwrapped->As<sem::Array>()) {
- size.u32 = arr->Count();
- } else if (auto* mat = ret_unwrapped->As<sem::Matrix>()) {
- // The row accessor would have been an embedded index accessor and already
- // handled, so we just need to do columns here.
- size.u32 = mat->columns();
- } else {
- return nullptr;
- }
+ /// Set of storage classes to not apply the transform to
+ std::unordered_set<ast::StorageClass> omitted_classes;
- if (size.u32 == 0) {
- if (!ret_unwrapped->Is<sem::Array>()) {
- b.Diagnostics().add_error(diag::System::Transform,
- "invalid 0 sized non-array", expr->source);
- return nullptr;
- }
- // Runtime sized array
- auto* arr = ctx.Clone(expr->object);
- size.expr = b.Call("arrayLength", b.AddressOf(arr));
+ /// Applies the transformation state to `ctx`.
+ void Transform() {
+ ctx.ReplaceAll([&](const ast::IndexAccessorExpression* expr) { return Transform(expr); });
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) { return Transform(expr); });
}
- // Calculate the maximum possible index value (size-1u)
- // Size must be positive (non-zero), so we can safely subtract 1 here
- // without underflow.
- Value limit;
- limit.is_signed = false; // Like size, limit is always unsigned.
- if (size.expr) {
- // Dynamic size
- limit.expr = b.Sub(size.expr, 1u);
- } else {
- // Constant size
- limit.u32 = size.u32 - 1u;
- }
+ /// Apply bounds clamping to array, vector and matrix indexing
+ /// @param expr the array, vector or matrix index expression
+ /// @return the clamped replacement expression, or nullptr if `expr` should be
+ /// cloned without changes.
+ const ast::IndexAccessorExpression* Transform(const ast::IndexAccessorExpression* expr) {
+ auto* ret_type = ctx.src->Sem().Get(expr->object)->Type();
- Value idx; // index value
+ auto* ref = ret_type->As<sem::Reference>();
+ if (ref && omitted_classes.count(ref->StorageClass()) != 0) {
+ return nullptr;
+ }
- auto* idx_sem = ctx.src->Sem().Get(expr->index);
- auto* idx_ty = idx_sem->Type()->UnwrapRef();
- if (!idx_ty->IsAnyOf<sem::I32, sem::U32>()) {
- TINT_ICE(Transform, b.Diagnostics()) << "index must be u32 or i32, got "
- << idx_sem->Type()->TypeInfo().name;
- return nullptr;
- }
+ auto* ret_unwrapped = ret_type->UnwrapRef();
+
+ ProgramBuilder& b = *ctx.dst;
+
+ struct Value {
+ const ast::Expression* expr = nullptr; // If null, then is a constant
+ union {
+ uint32_t u32 = 0; // use if is_signed == false
+ int32_t i32; // use if is_signed == true
+ };
+ bool is_signed = false;
+ };
+
+ Value size; // size of the array, vector or matrix
+ size.is_signed = false; // size is always unsigned
+ if (auto* vec = ret_unwrapped->As<sem::Vector>()) {
+ size.u32 = vec->Width();
+
+ } else if (auto* arr = ret_unwrapped->As<sem::Array>()) {
+ size.u32 = arr->Count();
+ } else if (auto* mat = ret_unwrapped->As<sem::Matrix>()) {
+ // The row accessor would have been an embedded index accessor and already
+ // handled, so we just need to do columns here.
+ size.u32 = mat->columns();
+ } else {
+ return nullptr;
+ }
- if (auto idx_constant = idx_sem->ConstantValue()) {
- // Constant value index
- if (idx_constant.Type()->Is<sem::I32>()) {
- idx.i32 = idx_constant.Elements()[0].i32;
- idx.is_signed = true;
- } else if (idx_constant.Type()->Is<sem::U32>()) {
- idx.u32 = idx_constant.Elements()[0].u32;
- idx.is_signed = false;
- } else {
- TINT_ICE(Transform, b.Diagnostics())
- << "unsupported constant value for accessor "
- << idx_constant.Type()->TypeInfo().name;
- return nullptr;
- }
- } else {
- // Dynamic value index
- idx.expr = ctx.Clone(expr->index);
- idx.is_signed = idx_ty->Is<sem::I32>();
- }
+ if (size.u32 == 0) {
+ if (!ret_unwrapped->Is<sem::Array>()) {
+ b.Diagnostics().add_error(diag::System::Transform, "invalid 0 sized non-array",
+ expr->source);
+ return nullptr;
+ }
+ // Runtime sized array
+ auto* arr = ctx.Clone(expr->object);
+ size.expr = b.Call("arrayLength", b.AddressOf(arr));
+ }
- // Clamp the index so that it cannot exceed limit.
- if (idx.expr || limit.expr) {
- // One of, or both of idx and limit are non-constant.
-
- // If the index is signed, cast it to a u32 (with clamping if constant).
- if (idx.is_signed) {
- if (idx.expr) {
- // We don't use a max(idx, 0) here, as that incurs a runtime
- // performance cost, and if the unsigned value will be clamped by
- // limit, resulting in a value between [0..limit)
- idx.expr = b.Construct<u32>(idx.expr);
- idx.is_signed = false;
+ // Calculate the maximum possible index value (size-1u)
+ // Size must be positive (non-zero), so we can safely subtract 1 here
+ // without underflow.
+ Value limit;
+ limit.is_signed = false; // Like size, limit is always unsigned.
+ if (size.expr) {
+ // Dynamic size
+ limit.expr = b.Sub(size.expr, 1_u);
} else {
- idx.u32 = static_cast<uint32_t>(std::max(idx.i32, 0));
- idx.is_signed = false;
+ // Constant size
+ limit.u32 = size.u32 - 1u;
}
- }
-
- // Convert idx and limit to expressions, so we can emit `min(idx, limit)`.
- if (!idx.expr) {
- idx.expr = b.Expr(idx.u32);
- }
- if (!limit.expr) {
- limit.expr = b.Expr(limit.u32);
- }
-
- // Perform the clamp with `min(idx, limit)`
- idx.expr = b.Call("min", idx.expr, limit.expr);
- } else {
- // Both idx and max are constant.
- if (idx.is_signed) {
- // The index is signed. Calculate limit as signed.
- int32_t signed_limit = static_cast<int32_t>(
- std::min<uint32_t>(limit.u32, std::numeric_limits<int32_t>::max()));
- idx.i32 = std::max(idx.i32, 0);
- idx.i32 = std::min(idx.i32, signed_limit);
- } else {
- // The index is unsigned.
- idx.u32 = std::min(idx.u32, limit.u32);
- }
- }
- // Convert idx to an expression, so we can emit the new accessor.
- if (!idx.expr) {
- idx.expr = idx.is_signed
- ? static_cast<const ast::Expression*>(b.Expr(idx.i32))
- : static_cast<const ast::Expression*>(b.Expr(idx.u32));
- }
+ Value idx; // index value
- // Clone arguments outside of create() call to have deterministic ordering
- auto src = ctx.Clone(expr->source);
- auto* obj = ctx.Clone(expr->object);
- return b.IndexAccessor(src, obj, idx.expr);
- }
-
- /// @param type builtin type
- /// @returns true if the given builtin is a texture function that requires
- /// argument clamping,
- bool TextureBuiltinNeedsClamping(sem::BuiltinType type) {
- return type == sem::BuiltinType::kTextureLoad ||
- type == sem::BuiltinType::kTextureStore;
- }
-
- /// Apply bounds clamping to the coordinates, array index and level arguments
- /// of the `textureLoad()` and `textureStore()` builtins.
- /// @param expr the builtin call expression
- /// @return the clamped replacement call expression, or nullptr if `expr`
- /// should be cloned without changes.
- const ast::CallExpression* Transform(const ast::CallExpression* expr) {
- auto* call = ctx.src->Sem().Get(expr);
- auto* call_target = call->Target();
- auto* builtin = call_target->As<sem::Builtin>();
- if (!builtin || !TextureBuiltinNeedsClamping(builtin->Type())) {
- return nullptr; // No transform, just clone.
- }
+ auto* idx_sem = ctx.src->Sem().Get(expr->index);
+ auto* idx_ty = idx_sem->Type()->UnwrapRef();
+ if (!idx_ty->IsAnyOf<sem::I32, sem::U32>()) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "index must be u32 or i32, got " << idx_sem->Type()->TypeInfo().name;
+ return nullptr;
+ }
- ProgramBuilder& b = *ctx.dst;
-
- // Indices of the mandatory texture and coords parameters, and the optional
- // array and level parameters.
- auto& signature = builtin->Signature();
- auto texture_idx = signature.IndexOf(sem::ParameterUsage::kTexture);
- auto coords_idx = signature.IndexOf(sem::ParameterUsage::kCoords);
- auto array_idx = signature.IndexOf(sem::ParameterUsage::kArrayIndex);
- auto level_idx = signature.IndexOf(sem::ParameterUsage::kLevel);
-
- auto* texture_arg = expr->args[texture_idx];
- auto* coords_arg = expr->args[coords_idx];
- auto* coords_ty = builtin->Parameters()[coords_idx]->Type();
-
- // If the level is provided, then we need to clamp this. As the level is
- // used by textureDimensions() and the texture[Load|Store]() calls, we need
- // to clamp both usages.
- // TODO(bclayton): We probably want to place this into a let so that the
- // calculation can be reused. This is fiddly to get right.
- std::function<const ast::Expression*()> level_arg;
- if (level_idx >= 0) {
- level_arg = [&] {
- auto* arg = expr->args[level_idx];
- auto* num_levels = b.Call("textureNumLevels", ctx.Clone(texture_arg));
- auto* zero = b.Expr(0);
- auto* max = ctx.dst->Sub(num_levels, 1);
- auto* clamped = b.Call("clamp", ctx.Clone(arg), zero, max);
- return clamped;
- };
- }
+ if (auto idx_constant = idx_sem->ConstantValue()) {
+ // Constant value index
+ if (idx_constant.Type()->Is<sem::I32>()) {
+ idx.i32 = static_cast<int32_t>(idx_constant.Element<AInt>(0).value);
+ idx.is_signed = true;
+ } else if (idx_constant.Type()->Is<sem::U32>()) {
+ idx.u32 = static_cast<uint32_t>(idx_constant.Element<AInt>(0).value);
+ idx.is_signed = false;
+ } else {
+ TINT_ICE(Transform, b.Diagnostics()) << "unsupported constant value for accessor "
+ << idx_constant.Type()->TypeInfo().name;
+ return nullptr;
+ }
+ } else {
+ // Dynamic value index
+ idx.expr = ctx.Clone(expr->index);
+ idx.is_signed = idx_ty->Is<sem::I32>();
+ }
- // Clamp the coordinates argument
- {
- auto* texture_dims =
- level_arg
- ? b.Call("textureDimensions", ctx.Clone(texture_arg), level_arg())
- : b.Call("textureDimensions", ctx.Clone(texture_arg));
- auto* zero = b.Construct(CreateASTTypeFor(ctx, coords_ty));
- auto* max = ctx.dst->Sub(
- texture_dims, b.Construct(CreateASTTypeFor(ctx, coords_ty), 1));
- auto* clamped_coords = b.Call("clamp", ctx.Clone(coords_arg), zero, max);
- ctx.Replace(coords_arg, clamped_coords);
- }
+ // Clamp the index so that it cannot exceed limit.
+ if (idx.expr || limit.expr) {
+ // One of, or both of idx and limit are non-constant.
+
+ // If the index is signed, cast it to a u32 (with clamping if constant).
+ if (idx.is_signed) {
+ if (idx.expr) {
+ // We don't use a max(idx, 0) here, as that incurs a runtime
+ // performance cost, and if the unsigned value will be clamped by
+ // limit, resulting in a value between [0..limit)
+ idx.expr = b.Construct<u32>(idx.expr);
+ idx.is_signed = false;
+ } else {
+ idx.u32 = static_cast<uint32_t>(std::max(idx.i32, 0));
+ idx.is_signed = false;
+ }
+ }
+
+ // Convert idx and limit to expressions, so we can emit `min(idx, limit)`.
+ if (!idx.expr) {
+ idx.expr = b.Expr(u32(idx.u32));
+ }
+ if (!limit.expr) {
+ limit.expr = b.Expr(u32(limit.u32));
+ }
+
+ // Perform the clamp with `min(idx, limit)`
+ idx.expr = b.Call("min", idx.expr, limit.expr);
+ } else {
+ // Both idx and max are constant.
+ if (idx.is_signed) {
+ // The index is signed. Calculate limit as signed.
+ int32_t signed_limit = static_cast<int32_t>(
+ std::min<uint32_t>(limit.u32, std::numeric_limits<int32_t>::max()));
+ idx.i32 = std::max(idx.i32, 0);
+ idx.i32 = std::min(idx.i32, signed_limit);
+ } else {
+ // The index is unsigned.
+ idx.u32 = std::min(idx.u32, limit.u32);
+ }
+ }
- // Clamp the array_index argument, if provided
- if (array_idx >= 0) {
- auto* arg = expr->args[array_idx];
- auto* num_layers = b.Call("textureNumLayers", ctx.Clone(texture_arg));
- auto* zero = b.Expr(0);
- auto* max = ctx.dst->Sub(num_layers, 1);
- auto* clamped = b.Call("clamp", ctx.Clone(arg), zero, max);
- ctx.Replace(arg, clamped);
+ // Convert idx to an expression, so we can emit the new accessor.
+ if (!idx.expr) {
+ idx.expr = idx.is_signed ? static_cast<const ast::Expression*>(b.Expr(i32(idx.i32)))
+ : static_cast<const ast::Expression*>(b.Expr(u32(idx.u32)));
+ }
+
+ // Clone arguments outside of create() call to have deterministic ordering
+ auto src = ctx.Clone(expr->source);
+ auto* obj = ctx.Clone(expr->object);
+ return b.IndexAccessor(src, obj, idx.expr);
}
- // Clamp the level argument, if provided
- if (level_idx >= 0) {
- auto* arg = expr->args[level_idx];
- ctx.Replace(arg, level_arg ? level_arg() : ctx.dst->Expr(0));
+ /// @param type builtin type
+ /// @returns true if the given builtin is a texture function that requires
+ /// argument clamping,
+ bool TextureBuiltinNeedsClamping(sem::BuiltinType type) {
+ return type == sem::BuiltinType::kTextureLoad || type == sem::BuiltinType::kTextureStore;
}
- return nullptr; // Clone, which will use the argument replacements above.
- }
+ /// Apply bounds clamping to the coordinates, array index and level arguments
+ /// of the `textureLoad()` and `textureStore()` builtins.
+ /// @param expr the builtin call expression
+ /// @return the clamped replacement call expression, or nullptr if `expr`
+ /// should be cloned without changes.
+ const ast::CallExpression* Transform(const ast::CallExpression* expr) {
+ auto* call = ctx.src->Sem().Get(expr)->UnwrapMaterialize()->As<sem::Call>();
+ auto* call_target = call->Target();
+ auto* builtin = call_target->As<sem::Builtin>();
+ if (!builtin || !TextureBuiltinNeedsClamping(builtin->Type())) {
+ return nullptr; // No transform, just clone.
+ }
+
+ ProgramBuilder& b = *ctx.dst;
+
+ // Indices of the mandatory texture and coords parameters, and the optional
+ // array and level parameters.
+ auto& signature = builtin->Signature();
+ auto texture_idx = signature.IndexOf(sem::ParameterUsage::kTexture);
+ auto coords_idx = signature.IndexOf(sem::ParameterUsage::kCoords);
+ auto array_idx = signature.IndexOf(sem::ParameterUsage::kArrayIndex);
+ auto level_idx = signature.IndexOf(sem::ParameterUsage::kLevel);
+
+ auto* texture_arg = expr->args[texture_idx];
+ auto* coords_arg = expr->args[coords_idx];
+ auto* coords_ty = builtin->Parameters()[coords_idx]->Type();
+
+ // If the level is provided, then we need to clamp this. As the level is
+ // used by textureDimensions() and the texture[Load|Store]() calls, we need
+ // to clamp both usages.
+ // TODO(bclayton): We probably want to place this into a let so that the
+ // calculation can be reused. This is fiddly to get right.
+ std::function<const ast::Expression*()> level_arg;
+ if (level_idx >= 0) {
+ level_arg = [&] {
+ auto* arg = expr->args[level_idx];
+ auto* num_levels = b.Call("textureNumLevels", ctx.Clone(texture_arg));
+ auto* zero = b.Expr(0_i);
+ auto* max = ctx.dst->Sub(num_levels, 1_i);
+ auto* clamped = b.Call("clamp", ctx.Clone(arg), zero, max);
+ return clamped;
+ };
+ }
+
+ // Clamp the coordinates argument
+ {
+ auto* texture_dims =
+ level_arg ? b.Call("textureDimensions", ctx.Clone(texture_arg), level_arg())
+ : b.Call("textureDimensions", ctx.Clone(texture_arg));
+ auto* zero = b.Construct(CreateASTTypeFor(ctx, coords_ty));
+ auto* max =
+ ctx.dst->Sub(texture_dims, b.Construct(CreateASTTypeFor(ctx, coords_ty), 1_i));
+ auto* clamped_coords = b.Call("clamp", ctx.Clone(coords_arg), zero, max);
+ ctx.Replace(coords_arg, clamped_coords);
+ }
+
+ // Clamp the array_index argument, if provided
+ if (array_idx >= 0) {
+ auto* arg = expr->args[array_idx];
+ auto* num_layers = b.Call("textureNumLayers", ctx.Clone(texture_arg));
+ auto* zero = b.Expr(0_i);
+ auto* max = ctx.dst->Sub(num_layers, 1_i);
+ auto* clamped = b.Call("clamp", ctx.Clone(arg), zero, max);
+ ctx.Replace(arg, clamped);
+ }
+
+ // Clamp the level argument, if provided
+ if (level_idx >= 0) {
+ auto* arg = expr->args[level_idx];
+ ctx.Replace(arg, level_arg ? level_arg() : ctx.dst->Expr(0_i));
+ }
+
+ return nullptr; // Clone, which will use the argument replacements above.
+ }
};
Robustness::Config::Config() = default;
@@ -292,27 +285,27 @@ Robustness::Robustness() = default;
Robustness::~Robustness() = default;
void Robustness::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
- Config cfg;
- if (auto* cfg_data = inputs.Get<Config>()) {
- cfg = *cfg_data;
- }
-
- std::unordered_set<ast::StorageClass> omitted_classes;
- for (auto sc : cfg.omitted_classes) {
- switch (sc) {
- case StorageClass::kUniform:
- omitted_classes.insert(ast::StorageClass::kUniform);
- break;
- case StorageClass::kStorage:
- omitted_classes.insert(ast::StorageClass::kStorage);
- break;
+ Config cfg;
+ if (auto* cfg_data = inputs.Get<Config>()) {
+ cfg = *cfg_data;
+ }
+
+ std::unordered_set<ast::StorageClass> omitted_classes;
+ for (auto sc : cfg.omitted_classes) {
+ switch (sc) {
+ case StorageClass::kUniform:
+ omitted_classes.insert(ast::StorageClass::kUniform);
+ break;
+ case StorageClass::kStorage:
+ omitted_classes.insert(ast::StorageClass::kStorage);
+ break;
+ }
}
- }
- State state{ctx, std::move(omitted_classes)};
+ State state{ctx, std::move(omitted_classes)};
- state.Transform();
- ctx.Clone();
+ state.Transform();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/robustness.h b/chromium/third_party/dawn/src/tint/transform/robustness.h
index 79ddf0891c1..138b48cec3d 100644
--- a/chromium/third_party/dawn/src/tint/transform/robustness.h
+++ b/chromium/third_party/dawn/src/tint/transform/robustness.h
@@ -32,51 +32,49 @@ namespace tint::transform {
/// to zero and any access past the end of the array will clamp to
/// (array length - 1).
class Robustness : public Castable<Robustness, Transform> {
- public:
- /// Storage class to be skipped in the transform
- enum class StorageClass {
- kUniform,
- kStorage,
- };
-
- /// Configuration options for the transform
- struct Config : public Castable<Config, Data> {
- /// Constructor
- Config();
+ public:
+ /// Storage class to be skipped in the transform
+ enum class StorageClass {
+ kUniform,
+ kStorage,
+ };
+
+ /// Configuration options for the transform
+ struct Config : public Castable<Config, Data> {
+ /// Constructor
+ Config();
+
+ /// Copy constructor
+ Config(const Config&);
- /// Copy constructor
- Config(const Config&);
+ /// Destructor
+ ~Config() override;
+ /// Assignment operator
+ /// @returns this Config
+ Config& operator=(const Config&);
+
+ /// Storage classes to omit from apply the transform to.
+ /// This allows for optimizing on hardware that provide safe accesses.
+ std::unordered_set<StorageClass> omitted_classes;
+ };
+
+ /// Constructor
+ Robustness();
/// Destructor
- ~Config() override;
-
- /// Assignment operator
- /// @returns this Config
- Config& operator=(const Config&);
-
- /// Storage classes to omit from apply the transform to.
- /// This allows for optimizing on hardware that provide safe accesses.
- std::unordered_set<StorageClass> omitted_classes;
- };
-
- /// Constructor
- Robustness();
- /// Destructor
- ~Robustness() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- private:
- struct State;
+ ~Robustness() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ private:
+ struct State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/robustness_test.cc b/chromium/third_party/dawn/src/tint/transform/robustness_test.cc
index db113b22810..fb0e5a40a6f 100644
--- a/chromium/third_party/dawn/src/tint/transform/robustness_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/robustness_test.cc
@@ -22,7 +22,7 @@ namespace {
using RobustnessTest = TransformTest;
TEST_F(RobustnessTest, Array_Idx_Clamp) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
let c : u32 = 1u;
@@ -32,7 +32,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
let c : u32 = 1u;
@@ -42,13 +42,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Clamp_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let b : f32 = a[c];
}
@@ -58,7 +58,7 @@ let c : u32 = 1u;
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
let b : f32 = a[1u];
}
@@ -68,13 +68,13 @@ let c : u32 = 1u;
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Nested_Scalar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
var<private> b : array<i32, 5>;
@@ -86,7 +86,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
var<private> b : array<i32, 5>;
@@ -98,13 +98,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Nested_Scalar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var c : f32 = a[ b[i] ];
}
@@ -116,7 +116,7 @@ var<private> b : array<i32, 5>;
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var c : f32 = a[min(u32(b[min(i, 4u)]), 2u)];
}
@@ -128,57 +128,57 @@ var<private> b : array<i32, 5>;
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Scalar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Scalar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Expr) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
var<private> c : i32;
@@ -188,7 +188,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
var<private> c : i32;
@@ -198,13 +198,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Expr_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[c + 2 - 3];
}
@@ -214,7 +214,7 @@ var<private> c : i32;
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var b : f32 = a[min(u32(((c + 2) - 3)), 2u)];
}
@@ -224,13 +224,13 @@ var<private> c : i32;
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Negative) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
fn f() {
@@ -238,21 +238,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
fn f() {
- var b : f32 = a[0];
+ var b : f32 = a[0i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_Negative_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[-1];
}
@@ -260,21 +260,21 @@ fn f() {
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[0];
+ var b : f32 = a[0i];
}
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_OutOfBounds) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<f32, 3>;
fn f() {
@@ -282,21 +282,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<f32, 3>;
fn f() {
- var b : f32 = a[2];
+ var b : f32 = a[2i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Array_Idx_OutOfBounds_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[3];
}
@@ -304,23 +304,23 @@ fn f() {
var<private> a : array<f32, 3>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2];
+ var b : f32 = a[2i];
}
var<private> a : array<f32, 3>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// TODO(crbug.com/tint/1177) - Validation currently forbids arrays larger than
// 0xffffffff. If WGSL supports 64-bit indexing, re-enable this test.
TEST_F(RobustnessTest, DISABLED_LargeArrays_Idx) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : array<f32, 0x7fffffff>,
b : array<f32>,
@@ -358,7 +358,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<f32, 2147483647>,
b : array<f32>,
@@ -392,57 +392,57 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Scalar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Scalar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[1];
+ var b : f32 = a[1i];
}
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Expr) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -452,7 +452,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -462,13 +462,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Expr_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[c + 2 - 3];
}
@@ -478,7 +478,7 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var b : f32 = a[min(u32(((c + 2) - 3)), 2u)];
}
@@ -488,13 +488,13 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Scalar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
fn f() {
@@ -502,21 +502,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
fn f() {
- var b : f32 = a.xy[1];
+ var b : f32 = a.xy[1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Scalar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a.xy[2];
}
@@ -524,21 +524,21 @@ fn f() {
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a.xy[1];
+ var b : f32 = a.xy[1i];
}
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Var) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -548,7 +548,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -558,13 +558,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Var_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a.xy[c];
}
@@ -574,7 +574,7 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var b : f32 = a.xy[min(u32(c), 1u)];
}
@@ -584,13 +584,13 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Expr) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -600,7 +600,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
var<private> c : i32;
@@ -610,13 +610,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Swizzle_Idx_Expr_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a.xy[c + 2 - 3];
}
@@ -626,7 +626,7 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var b : f32 = a.xy[min(u32(((c + 2) - 3)), 1u)];
}
@@ -636,13 +636,13 @@ var<private> c : i32;
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Negative) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
fn f() {
@@ -650,21 +650,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
fn f() {
- var b : f32 = a[0];
+ var b : f32 = a[0i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_Negative_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[-1];
}
@@ -672,21 +672,21 @@ fn f() {
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[0];
+ var b : f32 = a[0i];
}
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_OutOfBounds) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : vec3<f32>;
fn f() {
@@ -694,21 +694,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : vec3<f32>;
fn f() {
- var b : f32 = a[2];
+ var b : f32 = a[2i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Vector_Idx_OutOfBounds_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[3];
}
@@ -716,65 +716,65 @@ fn f() {
var<private> a : vec3<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2];
+ var b : f32 = a[2i];
}
var<private> a : vec3<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Scalar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Scalar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Expr_Column) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
var<private> c : i32;
@@ -784,23 +784,23 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
var<private> c : i32;
fn f() {
- var b : f32 = a[min(u32(((c + 2) - 3)), 2u)][1];
+ var b : f32 = a[min(u32(((c + 2) - 3)), 2u)][1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Expr_Column_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[c + 2 - 3][1];
}
@@ -810,9 +810,9 @@ var<private> c : i32;
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[min(u32(((c + 2) - 3)), 2u)][1];
+ var b : f32 = a[min(u32(((c + 2) - 3)), 2u)][1i];
}
var<private> c : i32;
@@ -820,13 +820,13 @@ var<private> c : i32;
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Expr_Row) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
var<private> c : i32;
@@ -836,23 +836,23 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
var<private> c : i32;
fn f() {
- var b : f32 = a[1][min(u32(((c + 2) - 3)), 1u)];
+ var b : f32 = a[1i][min(u32(((c + 2) - 3)), 1u)];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Expr_Row_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[1][c + 2 - 3];
}
@@ -862,9 +862,9 @@ var<private> c : i32;
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[1][min(u32(((c + 2) - 3)), 1u)];
+ var b : f32 = a[1i][min(u32(((c + 2) - 3)), 1u)];
}
var<private> c : i32;
@@ -872,13 +872,13 @@ var<private> c : i32;
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Negative_Column) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
fn f() {
@@ -886,21 +886,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[0][1];
+ var b : f32 = a[0i][1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Negative_Column_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[-1][1];
}
@@ -908,21 +908,21 @@ fn f() {
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[0][1];
+ var b : f32 = a[0i][1i];
}
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Negative_Row) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
fn f() {
@@ -930,21 +930,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[2][0];
+ var b : f32 = a[2i][0i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_Negative_Row_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[2][-1];
}
@@ -952,21 +952,21 @@ fn f() {
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2][0];
+ var b : f32 = a[2i][0i];
}
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_OutOfBounds_Column) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
fn f() {
@@ -974,21 +974,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_OutOfBounds_Column_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[5][1];
}
@@ -996,21 +996,21 @@ fn f() {
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_OutOfBounds_Row) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : mat3x2<f32>;
fn f() {
@@ -1018,21 +1018,21 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : mat3x2<f32>;
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, Matrix_Idx_OutOfBounds_Row_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var b : f32 = a[2][5];
}
@@ -1040,57 +1040,57 @@ fn f() {
var<private> a : mat3x2<f32>;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- var b : f32 = a[2][1];
+ var b : f32 = a[2i][1i];
}
var<private> a : mat3x2<f32>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// TODO(dsinclair): Implement when constant_id exists
TEST_F(RobustnessTest, DISABLED_Vector_Constant_Id_Clamps) {
- // @id(1300) override idx : i32;
- // var a : vec3<f32>
- // var b : f32 = a[idx]
- //
- // ->var b : f32 = a[min(u32(idx), 2)]
+ // @id(1300) override idx : i32;
+ // var a : vec3<f32>
+ // var b : f32 = a[idx]
+ //
+ // ->var b : f32 = a[min(u32(idx), 2)]
}
// TODO(dsinclair): Implement when constant_id exists
TEST_F(RobustnessTest, DISABLED_Array_Constant_Id_Clamps) {
- // @id(1300) override idx : i32;
- // var a : array<f32, 4>
- // var b : f32 = a[idx]
- //
- // -> var b : f32 = a[min(u32(idx), 3)]
+ // @id(1300) override idx : i32;
+ // var a : array<f32, 4>
+ // var b : f32 = a[idx]
+ //
+ // -> var b : f32 = a[min(u32(idx), 3)]
}
// TODO(dsinclair): Implement when constant_id exists
TEST_F(RobustnessTest, DISABLED_Matrix_Column_Constant_Id_Clamps) {
- // @id(1300) override idx : i32;
- // var a : mat3x2<f32>
- // var b : f32 = a[idx][1]
- //
- // -> var b : f32 = a[min(u32(idx), 2)][1]
+ // @id(1300) override idx : i32;
+ // var a : mat3x2<f32>
+ // var b : f32 = a[idx][1]
+ //
+ // -> var b : f32 = a[min(u32(idx), 2)][1]
}
// TODO(dsinclair): Implement when constant_id exists
TEST_F(RobustnessTest, DISABLED_Matrix_Row_Constant_Id_Clamps) {
- // @id(1300) override idx : i32;
- // var a : mat3x2<f32>
- // var b : f32 = a[1][idx]
- //
- // -> var b : f32 = a[1][min(u32(idx), 0, 1)]
+ // @id(1300) override idx : i32;
+ // var a : mat3x2<f32>
+ // var b : f32 = a[1][idx]
+ //
+ // -> var b : f32 = a[1][min(u32(idx), 0, 1)]
}
TEST_F(RobustnessTest, RuntimeArray_Clamps) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
b : array<f32>,
@@ -1102,7 +1102,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
b : array<f32>,
@@ -1115,13 +1115,13 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, RuntimeArray_Clamps_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var d : f32 = s.b[25];
}
@@ -1134,7 +1134,7 @@ struct S {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var d : f32 = s.b[min(25u, (arrayLength(&(s.b)) - 1u))];
}
@@ -1147,14 +1147,14 @@ struct S {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Clamp textureLoad() coord, array_index and level values
TEST_F(RobustnessTest, TextureLoad_Clamp) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex_1d : texture_1d<f32>;
@group(0) @binding(0) var tex_2d : texture_2d<f32>;
@group(0) @binding(0) var tex_2d_arr : texture_2d_array<f32>;
@@ -1180,8 +1180,8 @@ fn f() {
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
@group(0) @binding(0) var tex_1d : texture_1d<f32>;
@group(0) @binding(0) var tex_2d : texture_2d<f32>;
@@ -1202,25 +1202,25 @@ fn f() {
var array_idx : i32;
var level_idx : i32;
var sample_idx : i32;
- textureLoad(tex_1d, clamp(1, i32(), (textureDimensions(tex_1d, clamp(level_idx, 0, (textureNumLevels(tex_1d) - 1))) - i32(1))), clamp(level_idx, 0, (textureNumLevels(tex_1d) - 1)));
- textureLoad(tex_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d, clamp(level_idx, 0, (textureNumLevels(tex_2d) - 1))) - vec2<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_2d) - 1)));
- textureLoad(tex_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d_arr, clamp(level_idx, 0, (textureNumLevels(tex_2d_arr) - 1))) - vec2<i32>(1))), clamp(array_idx, 0, (textureNumLayers(tex_2d_arr) - 1)), clamp(level_idx, 0, (textureNumLevels(tex_2d_arr) - 1)));
- textureLoad(tex_3d, clamp(vec3<i32>(1, 2, 3), vec3<i32>(), (textureDimensions(tex_3d, clamp(level_idx, 0, (textureNumLevels(tex_3d) - 1))) - vec3<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_3d) - 1)));
- textureLoad(tex_ms_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_ms_2d) - vec2<i32>(1))), sample_idx);
- textureLoad(tex_depth_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d, clamp(level_idx, 0, (textureNumLevels(tex_depth_2d) - 1))) - vec2<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_depth_2d) - 1)));
- textureLoad(tex_depth_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d_arr, clamp(level_idx, 0, (textureNumLevels(tex_depth_2d_arr) - 1))) - vec2<i32>(1))), clamp(array_idx, 0, (textureNumLayers(tex_depth_2d_arr) - 1)), clamp(level_idx, 0, (textureNumLevels(tex_depth_2d_arr) - 1)));
- textureLoad(tex_external, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_external) - vec2<i32>(1))));
+ textureLoad(tex_1d, clamp(1, i32(), (textureDimensions(tex_1d, clamp(level_idx, 0i, (textureNumLevels(tex_1d) - 1i))) - i32(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_1d) - 1i)));
+ textureLoad(tex_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d, clamp(level_idx, 0i, (textureNumLevels(tex_2d) - 1i))) - vec2<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_2d) - 1i)));
+ textureLoad(tex_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d_arr, clamp(level_idx, 0i, (textureNumLevels(tex_2d_arr) - 1i))) - vec2<i32>(1i))), clamp(array_idx, 0i, (textureNumLayers(tex_2d_arr) - 1i)), clamp(level_idx, 0i, (textureNumLevels(tex_2d_arr) - 1i)));
+ textureLoad(tex_3d, clamp(vec3<i32>(1, 2, 3), vec3<i32>(), (textureDimensions(tex_3d, clamp(level_idx, 0i, (textureNumLevels(tex_3d) - 1i))) - vec3<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_3d) - 1i)));
+ textureLoad(tex_ms_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_ms_2d) - vec2<i32>(1i))), sample_idx);
+ textureLoad(tex_depth_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d, clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d) - 1i))) - vec2<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d) - 1i)));
+ textureLoad(tex_depth_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d_arr, clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d_arr) - 1i))) - vec2<i32>(1i))), clamp(array_idx, 0i, (textureNumLayers(tex_depth_2d_arr) - 1i)), clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d_arr) - 1i)));
+ textureLoad(tex_external, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_external) - vec2<i32>(1i))));
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Clamp textureLoad() coord, array_index and level values
TEST_F(RobustnessTest, TextureLoad_Clamp_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var array_idx : i32;
var level_idx : i32;
@@ -1246,20 +1246,20 @@ fn f() {
@group(0) @binding(0) var tex_external : texture_external;
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
fn f() {
var array_idx : i32;
var level_idx : i32;
var sample_idx : i32;
- textureLoad(tex_1d, clamp(1, i32(), (textureDimensions(tex_1d, clamp(level_idx, 0, (textureNumLevels(tex_1d) - 1))) - i32(1))), clamp(level_idx, 0, (textureNumLevels(tex_1d) - 1)));
- textureLoad(tex_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d, clamp(level_idx, 0, (textureNumLevels(tex_2d) - 1))) - vec2<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_2d) - 1)));
- textureLoad(tex_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d_arr, clamp(level_idx, 0, (textureNumLevels(tex_2d_arr) - 1))) - vec2<i32>(1))), clamp(array_idx, 0, (textureNumLayers(tex_2d_arr) - 1)), clamp(level_idx, 0, (textureNumLevels(tex_2d_arr) - 1)));
- textureLoad(tex_3d, clamp(vec3<i32>(1, 2, 3), vec3<i32>(), (textureDimensions(tex_3d, clamp(level_idx, 0, (textureNumLevels(tex_3d) - 1))) - vec3<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_3d) - 1)));
- textureLoad(tex_ms_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_ms_2d) - vec2<i32>(1))), sample_idx);
- textureLoad(tex_depth_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d, clamp(level_idx, 0, (textureNumLevels(tex_depth_2d) - 1))) - vec2<i32>(1))), clamp(level_idx, 0, (textureNumLevels(tex_depth_2d) - 1)));
- textureLoad(tex_depth_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d_arr, clamp(level_idx, 0, (textureNumLevels(tex_depth_2d_arr) - 1))) - vec2<i32>(1))), clamp(array_idx, 0, (textureNumLayers(tex_depth_2d_arr) - 1)), clamp(level_idx, 0, (textureNumLevels(tex_depth_2d_arr) - 1)));
- textureLoad(tex_external, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_external) - vec2<i32>(1))));
+ textureLoad(tex_1d, clamp(1, i32(), (textureDimensions(tex_1d, clamp(level_idx, 0i, (textureNumLevels(tex_1d) - 1i))) - i32(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_1d) - 1i)));
+ textureLoad(tex_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d, clamp(level_idx, 0i, (textureNumLevels(tex_2d) - 1i))) - vec2<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_2d) - 1i)));
+ textureLoad(tex_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_2d_arr, clamp(level_idx, 0i, (textureNumLevels(tex_2d_arr) - 1i))) - vec2<i32>(1i))), clamp(array_idx, 0i, (textureNumLayers(tex_2d_arr) - 1i)), clamp(level_idx, 0i, (textureNumLevels(tex_2d_arr) - 1i)));
+ textureLoad(tex_3d, clamp(vec3<i32>(1, 2, 3), vec3<i32>(), (textureDimensions(tex_3d, clamp(level_idx, 0i, (textureNumLevels(tex_3d) - 1i))) - vec3<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_3d) - 1i)));
+ textureLoad(tex_ms_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_ms_2d) - vec2<i32>(1i))), sample_idx);
+ textureLoad(tex_depth_2d, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d, clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d) - 1i))) - vec2<i32>(1i))), clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d) - 1i)));
+ textureLoad(tex_depth_2d_arr, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_depth_2d_arr, clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d_arr) - 1i))) - vec2<i32>(1i))), clamp(array_idx, 0i, (textureNumLayers(tex_depth_2d_arr) - 1i)), clamp(level_idx, 0i, (textureNumLevels(tex_depth_2d_arr) - 1i)));
+ textureLoad(tex_external, clamp(vec2<i32>(1, 2), vec2<i32>(), (textureDimensions(tex_external) - vec2<i32>(1i))));
}
@group(0) @binding(0) var tex_1d : texture_1d<f32>;
@@ -1279,14 +1279,14 @@ fn f() {
@group(0) @binding(0) var tex_external : texture_external;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Clamp textureStore() coord, array_index and level values
TEST_F(RobustnessTest, TextureStore_Clamp) {
- auto* src = R"(
+ auto* src = R"(
@group(0) @binding(0) var tex1d : texture_storage_1d<rgba8sint, write>;
@group(0) @binding(1) var tex2d : texture_storage_2d<rgba8sint, write>;
@@ -1303,7 +1303,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
@group(0) @binding(0) var tex1d : texture_storage_1d<rgba8sint, write>;
@group(0) @binding(1) var tex2d : texture_storage_2d<rgba8sint, write>;
@@ -1313,21 +1313,21 @@ fn f() {
@group(0) @binding(3) var tex3d : texture_storage_3d<rgba8sint, write>;
fn f() {
- textureStore(tex1d, clamp(10, i32(), (textureDimensions(tex1d) - i32(1))), vec4<i32>());
- textureStore(tex2d, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d) - vec2<i32>(1))), vec4<i32>());
- textureStore(tex2d_arr, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d_arr) - vec2<i32>(1))), clamp(50, 0, (textureNumLayers(tex2d_arr) - 1)), vec4<i32>());
- textureStore(tex3d, clamp(vec3<i32>(10, 20, 30), vec3<i32>(), (textureDimensions(tex3d) - vec3<i32>(1))), vec4<i32>());
+ textureStore(tex1d, clamp(10, i32(), (textureDimensions(tex1d) - i32(1i))), vec4<i32>());
+ textureStore(tex2d, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d) - vec2<i32>(1i))), vec4<i32>());
+ textureStore(tex2d_arr, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d_arr) - vec2<i32>(1i))), clamp(50, 0i, (textureNumLayers(tex2d_arr) - 1i)), vec4<i32>());
+ textureStore(tex3d, clamp(vec3<i32>(10, 20, 30), vec3<i32>(), (textureDimensions(tex3d) - vec3<i32>(1i))), vec4<i32>());
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// Clamp textureStore() coord, array_index and level values
TEST_F(RobustnessTest, TextureStore_Clamp_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
textureStore(tex1d, 10, vec4<i32>());
textureStore(tex2d, vec2<i32>(10, 20), vec4<i32>());
@@ -1345,12 +1345,12 @@ fn f() {
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
- textureStore(tex1d, clamp(10, i32(), (textureDimensions(tex1d) - i32(1))), vec4<i32>());
- textureStore(tex2d, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d) - vec2<i32>(1))), vec4<i32>());
- textureStore(tex2d_arr, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d_arr) - vec2<i32>(1))), clamp(50, 0, (textureNumLayers(tex2d_arr) - 1)), vec4<i32>());
- textureStore(tex3d, clamp(vec3<i32>(10, 20, 30), vec3<i32>(), (textureDimensions(tex3d) - vec3<i32>(1))), vec4<i32>());
+ textureStore(tex1d, clamp(10, i32(), (textureDimensions(tex1d) - i32(1i))), vec4<i32>());
+ textureStore(tex2d, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d) - vec2<i32>(1i))), vec4<i32>());
+ textureStore(tex2d_arr, clamp(vec2<i32>(10, 20), vec2<i32>(), (textureDimensions(tex2d_arr) - vec2<i32>(1i))), clamp(50, 0i, (textureNumLayers(tex2d_arr) - 1i)), vec4<i32>());
+ textureStore(tex3d, clamp(vec3<i32>(10, 20, 30), vec3<i32>(), (textureDimensions(tex3d) - vec3<i32>(1i))), vec4<i32>());
}
@group(0) @binding(0) var tex1d : texture_storage_1d<rgba8sint, write>;
@@ -1362,29 +1362,29 @@ fn f() {
@group(0) @binding(3) var tex3d : texture_storage_3d<rgba8sint, write>;
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// TODO(dsinclair): Test for scoped variables when shadowing is implemented
TEST_F(RobustnessTest, DISABLED_Shadowed_Variable) {
- // var a : array<f32, 3>;
- // var i : u32;
- // {
- // var a : array<f32, 5>;
- // var b : f32 = a[i];
- // }
- // var c : f32 = a[i];
- //
- // -> var b : f32 = a[min(u32(i), 4)];
- // var c : f32 = a[min(u32(i), 2)];
- FAIL();
+ // var a : array<f32, 3>;
+ // var i : u32;
+ // {
+ // var a : array<f32, 5>;
+ // var b : f32 = a[i];
+ // }
+ // var c : f32 = a[i];
+ //
+ // -> var b : f32 = a[min(u32(i), 4)];
+ // var c : f32 = a[min(u32(i), 2)];
+ FAIL();
}
// Check that existing use of min() and arrayLength() do not get renamed.
TEST_F(RobustnessTest, DontRenameSymbols) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : f32,
b : array<f32>,
@@ -1401,7 +1401,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : f32,
b : array<f32>,
@@ -1418,9 +1418,9 @@ fn f() {
}
)";
- auto got = Run<Robustness>(src);
+ auto got = Run<Robustness>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
const char* kOmitSourceShader = R"(
@@ -1481,7 +1481,7 @@ fn f() {
)";
TEST_F(RobustnessTest, OmitNone) {
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<f32, 4>,
b : array<f32>,
@@ -1498,21 +1498,21 @@ struct U {
@group(1) @binding(0) var<uniform> u : U;
fn f() {
- var i32_sa1 : f32 = s.a[3];
- var i32_sa2 : f32 = s.a[1];
- var i32_sa3 : f32 = s.a[0];
- var i32_sa4 : f32 = s.a[0];
- var i32_sa5 : f32 = s.a[0];
+ var i32_sa1 : f32 = s.a[3i];
+ var i32_sa2 : f32 = s.a[1i];
+ var i32_sa3 : f32 = s.a[0i];
+ var i32_sa4 : f32 = s.a[0i];
+ var i32_sa5 : f32 = s.a[0i];
var i32_sb1 : f32 = s.b[min(4u, (arrayLength(&(s.b)) - 1u))];
var i32_sb2 : f32 = s.b[min(1u, (arrayLength(&(s.b)) - 1u))];
var i32_sb3 : f32 = s.b[min(0u, (arrayLength(&(s.b)) - 1u))];
var i32_sb4 : f32 = s.b[min(0u, (arrayLength(&(s.b)) - 1u))];
var i32_sb5 : f32 = s.b[min(0u, (arrayLength(&(s.b)) - 1u))];
- var i32_ua1 : f32 = u.a[3].x;
- var i32_ua2 : f32 = u.a[1].x;
- var i32_ua3 : f32 = u.a[0].x;
- var i32_ua4 : f32 = u.a[0].x;
- var i32_ua5 : f32 = u.a[0].x;
+ var i32_ua1 : f32 = u.a[3i].x;
+ var i32_ua2 : f32 = u.a[1i].x;
+ var i32_ua3 : f32 = u.a[0i].x;
+ var i32_ua4 : f32 = u.a[0i].x;
+ var i32_ua5 : f32 = u.a[0i].x;
var u32_sa1 : f32 = s.a[0u];
var u32_sa2 : f32 = s.a[1u];
var u32_sa3 : f32 = s.a[3u];
@@ -1534,17 +1534,17 @@ fn f() {
}
)";
- Robustness::Config cfg;
- DataMap data;
- data.Add<Robustness::Config>(cfg);
+ Robustness::Config cfg;
+ DataMap data;
+ data.Add<Robustness::Config>(cfg);
- auto got = Run<Robustness>(kOmitSourceShader, data);
+ auto got = Run<Robustness>(kOmitSourceShader, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, OmitStorage) {
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<f32, 4>,
b : array<f32>,
@@ -1571,11 +1571,11 @@ fn f() {
var i32_sb3 : f32 = s.b[0];
var i32_sb4 : f32 = s.b[-1];
var i32_sb5 : f32 = s.b[-4];
- var i32_ua1 : f32 = u.a[3].x;
- var i32_ua2 : f32 = u.a[1].x;
- var i32_ua3 : f32 = u.a[0].x;
- var i32_ua4 : f32 = u.a[0].x;
- var i32_ua5 : f32 = u.a[0].x;
+ var i32_ua1 : f32 = u.a[3i].x;
+ var i32_ua2 : f32 = u.a[1i].x;
+ var i32_ua3 : f32 = u.a[0i].x;
+ var i32_ua4 : f32 = u.a[0i].x;
+ var i32_ua5 : f32 = u.a[0i].x;
var u32_sa1 : f32 = s.a[0u];
var u32_sa2 : f32 = s.a[1u];
var u32_sa3 : f32 = s.a[3u];
@@ -1597,19 +1597,19 @@ fn f() {
}
)";
- Robustness::Config cfg;
- cfg.omitted_classes.insert(Robustness::StorageClass::kStorage);
+ Robustness::Config cfg;
+ cfg.omitted_classes.insert(Robustness::StorageClass::kStorage);
- DataMap data;
- data.Add<Robustness::Config>(cfg);
+ DataMap data;
+ data.Add<Robustness::Config>(cfg);
- auto got = Run<Robustness>(kOmitSourceShader, data);
+ auto got = Run<Robustness>(kOmitSourceShader, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, OmitUniform) {
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<f32, 4>,
b : array<f32>,
@@ -1626,11 +1626,11 @@ struct U {
@group(1) @binding(0) var<uniform> u : U;
fn f() {
- var i32_sa1 : f32 = s.a[3];
- var i32_sa2 : f32 = s.a[1];
- var i32_sa3 : f32 = s.a[0];
- var i32_sa4 : f32 = s.a[0];
- var i32_sa5 : f32 = s.a[0];
+ var i32_sa1 : f32 = s.a[3i];
+ var i32_sa2 : f32 = s.a[1i];
+ var i32_sa3 : f32 = s.a[0i];
+ var i32_sa4 : f32 = s.a[0i];
+ var i32_sa5 : f32 = s.a[0i];
var i32_sb1 : f32 = s.b[min(4u, (arrayLength(&(s.b)) - 1u))];
var i32_sb2 : f32 = s.b[min(1u, (arrayLength(&(s.b)) - 1u))];
var i32_sb3 : f32 = s.b[min(0u, (arrayLength(&(s.b)) - 1u))];
@@ -1662,19 +1662,19 @@ fn f() {
}
)";
- Robustness::Config cfg;
- cfg.omitted_classes.insert(Robustness::StorageClass::kUniform);
+ Robustness::Config cfg;
+ cfg.omitted_classes.insert(Robustness::StorageClass::kUniform);
- DataMap data;
- data.Add<Robustness::Config>(cfg);
+ DataMap data;
+ data.Add<Robustness::Config>(cfg);
- auto got = Run<Robustness>(kOmitSourceShader, data);
+ auto got = Run<Robustness>(kOmitSourceShader, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(RobustnessTest, OmitBoth) {
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : array<f32, 4>,
b : array<f32>,
@@ -1727,16 +1727,16 @@ fn f() {
}
)";
- Robustness::Config cfg;
- cfg.omitted_classes.insert(Robustness::StorageClass::kStorage);
- cfg.omitted_classes.insert(Robustness::StorageClass::kUniform);
+ Robustness::Config cfg;
+ cfg.omitted_classes.insert(Robustness::StorageClass::kStorage);
+ cfg.omitted_classes.insert(Robustness::StorageClass::kUniform);
- DataMap data;
- data.Add<Robustness::Config>(cfg);
+ DataMap data;
+ data.Add<Robustness::Config>(cfg);
- auto got = Run<Robustness>(kOmitSourceShader, data);
+ auto got = Run<Robustness>(kOmitSourceShader, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/simplify_pointers.cc b/chromium/third_party/dawn/src/tint/transform/simplify_pointers.cc
index 4cc9391efbe..b8f82bc49a5 100644
--- a/chromium/third_party/dawn/src/tint/transform/simplify_pointers.cc
+++ b/chromium/third_party/dawn/src/tint/transform/simplify_pointers.cc
@@ -35,195 +35,193 @@ namespace {
/// PointerOp describes either possible indirection or address-of action on an
/// expression.
struct PointerOp {
- /// Positive: Number of times the `expr` was dereferenced (*expr)
- /// Negative: Number of times the `expr` was 'addressed-of' (&expr)
- /// Zero: no pointer op on `expr`
- int indirections = 0;
- /// The expression being operated on
- const ast::Expression* expr = nullptr;
+ /// Positive: Number of times the `expr` was dereferenced (*expr)
+ /// Negative: Number of times the `expr` was 'addressed-of' (&expr)
+ /// Zero: no pointer op on `expr`
+ int indirections = 0;
+ /// The expression being operated on
+ const ast::Expression* expr = nullptr;
};
} // namespace
/// The PIMPL state for the SimplifyPointers transform
struct SimplifyPointers::State {
- /// The clone context
- CloneContext& ctx;
-
- /// Constructor
- /// @param context the clone context
- explicit State(CloneContext& context) : ctx(context) {}
-
- /// Traverses the expression `expr` looking for non-literal array indexing
- /// expressions that would affect the computed address of a pointer
- /// expression. The function-like argument `cb` is called for each found.
- /// @param expr the expression to traverse
- /// @param cb a function-like object with the signature
- /// `void(const ast::Expression*)`, which is called for each array index
- /// expression
- template <typename F>
- static void CollectSavedArrayIndices(const ast::Expression* expr, F&& cb) {
- if (auto* a = expr->As<ast::IndexAccessorExpression>()) {
- CollectSavedArrayIndices(a->object, cb);
- if (!a->index->Is<ast::LiteralExpression>()) {
- cb(a->index);
- }
- return;
- }
-
- if (auto* m = expr->As<ast::MemberAccessorExpression>()) {
- CollectSavedArrayIndices(m->structure, cb);
- return;
- }
-
- if (auto* u = expr->As<ast::UnaryOpExpression>()) {
- CollectSavedArrayIndices(u->expr, cb);
- return;
- }
+ /// The clone context
+ CloneContext& ctx;
+
+ /// Constructor
+ /// @param context the clone context
+ explicit State(CloneContext& context) : ctx(context) {}
+
+ /// Traverses the expression `expr` looking for non-literal array indexing
+ /// expressions that would affect the computed address of a pointer
+ /// expression. The function-like argument `cb` is called for each found.
+ /// @param expr the expression to traverse
+ /// @param cb a function-like object with the signature
+ /// `void(const ast::Expression*)`, which is called for each array index
+ /// expression
+ template <typename F>
+ static void CollectSavedArrayIndices(const ast::Expression* expr, F&& cb) {
+ if (auto* a = expr->As<ast::IndexAccessorExpression>()) {
+ CollectSavedArrayIndices(a->object, cb);
+ if (!a->index->Is<ast::LiteralExpression>()) {
+ cb(a->index);
+ }
+ return;
+ }
- // Note: Other ast::Expression types can be safely ignored as they cannot be
- // used to generate a reference or pointer.
- // See https://gpuweb.github.io/gpuweb/wgsl/#forming-references-and-pointers
- }
-
- /// Reduce walks the expression chain, collapsing all address-of and
- /// indirection ops into a PointerOp.
- /// @param in the expression to walk
- /// @returns the reduced PointerOp
- PointerOp Reduce(const ast::Expression* in) const {
- PointerOp op{0, in};
- while (true) {
- if (auto* unary = op.expr->As<ast::UnaryOpExpression>()) {
- switch (unary->op) {
- case ast::UnaryOp::kIndirection:
- op.indirections++;
- op.expr = unary->expr;
- continue;
- case ast::UnaryOp::kAddressOf:
- op.indirections--;
- op.expr = unary->expr;
- continue;
- default:
- break;
+ if (auto* m = expr->As<ast::MemberAccessorExpression>()) {
+ CollectSavedArrayIndices(m->structure, cb);
+ return;
}
- }
- if (auto* user = ctx.src->Sem().Get<sem::VariableUser>(op.expr)) {
- auto* var = user->Variable();
- if (var->Is<sem::LocalVariable>() && //
- var->Declaration()->is_const && //
- var->Type()->Is<sem::Pointer>()) {
- op.expr = var->Declaration()->constructor;
- continue;
+
+ if (auto* u = expr->As<ast::UnaryOpExpression>()) {
+ CollectSavedArrayIndices(u->expr, cb);
+ return;
}
- }
- return op;
+
+ // Note: Other ast::Expression types can be safely ignored as they cannot be
+ // used to generate a reference or pointer.
+ // See https://gpuweb.github.io/gpuweb/wgsl/#forming-references-and-pointers
}
- }
-
- /// Performs the transformation
- void Run() {
- // A map of saved expressions to their saved variable name
- std::unordered_map<const ast::Expression*, Symbol> saved_vars;
-
- // Register the ast::Expression transform handler.
- // This performs two different transformations:
- // * Identifiers that resolve to the pointer-typed `let` declarations are
- // replaced with the recursively inlined initializer expression for the
- // `let` declaration.
- // * Sub-expressions inside the pointer-typed `let` initializer expression
- // that have been hoisted to a saved variable are replaced with the saved
- // variable identifier.
- ctx.ReplaceAll([&](const ast::Expression* expr) -> const ast::Expression* {
- // Look to see if we need to swap this Expression with a saved variable.
- auto it = saved_vars.find(expr);
- if (it != saved_vars.end()) {
- return ctx.dst->Expr(it->second);
- }
-
- // Reduce the expression, folding away chains of address-of / indirections
- auto op = Reduce(expr);
-
- // Clone the reduced root expression
- expr = ctx.CloneWithoutTransform(op.expr);
-
- // And reapply the minimum number of address-of / indirections
- for (int i = 0; i < op.indirections; i++) {
- expr = ctx.dst->Deref(expr);
- }
- for (int i = 0; i > op.indirections; i--) {
- expr = ctx.dst->AddressOf(expr);
- }
- return expr;
- });
-
- // Find all the pointer-typed `let` declarations.
- // Note that these must be function-scoped, as module-scoped `let`s are not
- // permitted.
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* let = node->As<ast::VariableDeclStatement>()) {
- if (!let->variable->is_const) {
- continue; // Not a `let` declaration. Ignore.
- }
- auto* var = ctx.src->Sem().Get(let->variable);
- if (!var->Type()->Is<sem::Pointer>()) {
- continue; // Not a pointer type. Ignore.
+ /// Reduce walks the expression chain, collapsing all address-of and
+ /// indirection ops into a PointerOp.
+ /// @param in the expression to walk
+ /// @returns the reduced PointerOp
+ PointerOp Reduce(const ast::Expression* in) const {
+ PointerOp op{0, in};
+ while (true) {
+ if (auto* unary = op.expr->As<ast::UnaryOpExpression>()) {
+ switch (unary->op) {
+ case ast::UnaryOp::kIndirection:
+ op.indirections++;
+ op.expr = unary->expr;
+ continue;
+ case ast::UnaryOp::kAddressOf:
+ op.indirections--;
+ op.expr = unary->expr;
+ continue;
+ default:
+ break;
+ }
+ }
+ if (auto* user = ctx.src->Sem().Get<sem::VariableUser>(op.expr)) {
+ auto* var = user->Variable();
+ if (var->Is<sem::LocalVariable>() && //
+ var->Declaration()->is_const && //
+ var->Type()->Is<sem::Pointer>()) {
+ op.expr = var->Declaration()->constructor;
+ continue;
+ }
+ }
+ return op;
}
+ }
- // We're dealing with a pointer-typed `let` declaration.
-
- // Scan the initializer expression for array index expressions that need
- // to be hoist to temporary "saved" variables.
- std::vector<const ast::VariableDeclStatement*> saved;
- CollectSavedArrayIndices(
- var->Declaration()->constructor,
- [&](const ast::Expression* idx_expr) {
- // We have a sub-expression that needs to be saved.
- // Create a new variable
- auto saved_name = ctx.dst->Symbols().New(
- ctx.src->Symbols().NameFor(var->Declaration()->symbol) +
- "_save");
- auto* decl = ctx.dst->Decl(
- ctx.dst->Const(saved_name, nullptr, ctx.Clone(idx_expr)));
- saved.emplace_back(decl);
- // Record the substitution of `idx_expr` to the saved variable
- // with the symbol `saved_name`. This will be used by the
- // ReplaceAll() handler above.
- saved_vars.emplace(idx_expr, saved_name);
- });
-
- // Find the place to insert the saved declarations.
- // Special care needs to be made for lets declared as the initializer
- // part of for-loops. In this case the block will hold the for-loop
- // statement, not the let.
- if (!saved.empty()) {
- auto* stmt = ctx.src->Sem().Get(let);
- auto* block = stmt->Block();
- // Find the statement owned by the block (either the let decl or a
- // for-loop)
- while (block != stmt->Parent()) {
- stmt = stmt->Parent();
- }
- // Declare the stored variables just before stmt. Order here is
- // important as order-of-operations needs to be preserved.
- // CollectSavedArrayIndices() visits the LHS of an index accessor
- // before the index expression.
- for (auto* decl : saved) {
- // Note that repeated calls to InsertBefore() with the same `before`
- // argument will result in nodes to inserted in the order the
- // calls are made (last call is inserted last).
- ctx.InsertBefore(block->Declaration()->statements,
- stmt->Declaration(), decl);
- }
+ /// Performs the transformation
+ void Run() {
+ // A map of saved expressions to their saved variable name
+ std::unordered_map<const ast::Expression*, Symbol> saved_vars;
+
+ // Register the ast::Expression transform handler.
+ // This performs two different transformations:
+ // * Identifiers that resolve to the pointer-typed `let` declarations are
+ // replaced with the recursively inlined initializer expression for the
+ // `let` declaration.
+ // * Sub-expressions inside the pointer-typed `let` initializer expression
+ // that have been hoisted to a saved variable are replaced with the saved
+ // variable identifier.
+ ctx.ReplaceAll([&](const ast::Expression* expr) -> const ast::Expression* {
+ // Look to see if we need to swap this Expression with a saved variable.
+ auto it = saved_vars.find(expr);
+ if (it != saved_vars.end()) {
+ return ctx.dst->Expr(it->second);
+ }
+
+ // Reduce the expression, folding away chains of address-of / indirections
+ auto op = Reduce(expr);
+
+ // Clone the reduced root expression
+ expr = ctx.CloneWithoutTransform(op.expr);
+
+ // And reapply the minimum number of address-of / indirections
+ for (int i = 0; i < op.indirections; i++) {
+ expr = ctx.dst->Deref(expr);
+ }
+ for (int i = 0; i > op.indirections; i--) {
+ expr = ctx.dst->AddressOf(expr);
+ }
+ return expr;
+ });
+
+ // Find all the pointer-typed `let` declarations.
+ // Note that these must be function-scoped, as module-scoped `let`s are not
+ // permitted.
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* let = node->As<ast::VariableDeclStatement>()) {
+ if (!let->variable->is_const) {
+ continue; // Not a `let` declaration. Ignore.
+ }
+
+ auto* var = ctx.src->Sem().Get(let->variable);
+ if (!var->Type()->Is<sem::Pointer>()) {
+ continue; // Not a pointer type. Ignore.
+ }
+
+ // We're dealing with a pointer-typed `let` declaration.
+
+ // Scan the initializer expression for array index expressions that need
+ // to be hoist to temporary "saved" variables.
+ std::vector<const ast::VariableDeclStatement*> saved;
+ CollectSavedArrayIndices(
+ var->Declaration()->constructor, [&](const ast::Expression* idx_expr) {
+ // We have a sub-expression that needs to be saved.
+ // Create a new variable
+ auto saved_name = ctx.dst->Symbols().New(
+ ctx.src->Symbols().NameFor(var->Declaration()->symbol) + "_save");
+ auto* decl =
+ ctx.dst->Decl(ctx.dst->Let(saved_name, nullptr, ctx.Clone(idx_expr)));
+ saved.emplace_back(decl);
+ // Record the substitution of `idx_expr` to the saved variable
+ // with the symbol `saved_name`. This will be used by the
+ // ReplaceAll() handler above.
+ saved_vars.emplace(idx_expr, saved_name);
+ });
+
+ // Find the place to insert the saved declarations.
+ // Special care needs to be made for lets declared as the initializer
+ // part of for-loops. In this case the block will hold the for-loop
+ // statement, not the let.
+ if (!saved.empty()) {
+ auto* stmt = ctx.src->Sem().Get(let);
+ auto* block = stmt->Block();
+ // Find the statement owned by the block (either the let decl or a
+ // for-loop)
+ while (block != stmt->Parent()) {
+ stmt = stmt->Parent();
+ }
+ // Declare the stored variables just before stmt. Order here is
+ // important as order-of-operations needs to be preserved.
+ // CollectSavedArrayIndices() visits the LHS of an index accessor
+ // before the index expression.
+ for (auto* decl : saved) {
+ // Note that repeated calls to InsertBefore() with the same `before`
+ // argument will result in nodes to inserted in the order the
+ // calls are made (last call is inserted last).
+ ctx.InsertBefore(block->Declaration()->statements, stmt->Declaration(),
+ decl);
+ }
+ }
+
+ // As the original `let` declaration will be fully inlined, there's no
+ // need for the original declaration to exist. Remove it.
+ RemoveStatement(ctx, let);
+ }
}
-
- // As the original `let` declaration will be fully inlined, there's no
- // need for the original declaration to exist. Remove it.
- RemoveStatement(ctx, let);
- }
+ ctx.Clone();
}
- ctx.Clone();
- }
};
SimplifyPointers::SimplifyPointers() = default;
@@ -231,7 +229,7 @@ SimplifyPointers::SimplifyPointers() = default;
SimplifyPointers::~SimplifyPointers() = default;
void SimplifyPointers::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- State(ctx).Run();
+ State(ctx).Run();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/simplify_pointers.h b/chromium/third_party/dawn/src/tint/transform/simplify_pointers.h
index 3bd49503ea1..267b7b2d323 100644
--- a/chromium/third_party/dawn/src/tint/transform/simplify_pointers.h
+++ b/chromium/third_party/dawn/src/tint/transform/simplify_pointers.h
@@ -32,25 +32,23 @@ namespace tint::transform {
/// @note Depends on the following transforms to have been run first:
/// * Unshadow
class SimplifyPointers : public Castable<SimplifyPointers, Transform> {
- public:
- /// Constructor
- SimplifyPointers();
-
- /// Destructor
- ~SimplifyPointers() override;
-
- protected:
- struct State;
-
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ SimplifyPointers();
+
+ /// Destructor
+ ~SimplifyPointers() override;
+
+ protected:
+ struct State;
+
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/simplify_pointers_test.cc b/chromium/third_party/dawn/src/tint/transform/simplify_pointers_test.cc
index 6266b6f9520..9848ff31bbf 100644
--- a/chromium/third_party/dawn/src/tint/transform/simplify_pointers_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/simplify_pointers_test.cc
@@ -23,16 +23,16 @@ namespace {
using SimplifyPointersTest = TransformTest;
TEST_F(SimplifyPointersTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, FoldPointer) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var v : i32;
let p : ptr<function, i32> = &v;
@@ -40,20 +40,20 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var v : i32;
let x : i32 = v;
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, AddressOfDeref) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var v : i32;
let p : ptr<function, i32> = &(v);
@@ -66,7 +66,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var v : i32;
var a = v;
@@ -75,13 +75,13 @@ fn f() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, DerefAddressOf) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var v : i32;
let x : i32 = *(&(v));
@@ -90,7 +90,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var v : i32;
let x : i32 = v;
@@ -99,13 +99,13 @@ fn f() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, ComplexChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var a : array<mat4x4<f32>, 4>;
let ap : ptr<function, array<mat4x4<f32>, 4>> = &a;
@@ -115,20 +115,20 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var a : array<mat4x4<f32>, 4>;
let v : vec4<f32> = a[3][2];
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, SavedVars) {
- auto* src = R"(
+ auto* src = R"(
struct S {
i : i32,
};
@@ -152,7 +152,7 @@ fn matrix() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
i : i32,
}
@@ -176,13 +176,13 @@ fn matrix() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, DontSaveLiterals) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var arr : array<i32, 2>;
let p1 : ptr<function, i32> = &arr[1];
@@ -190,20 +190,20 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var arr : array<i32, 2>;
arr[1] = 4;
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, SavedVarsChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var arr : array<array<i32, 2>, 2>;
let i : i32 = 0;
@@ -214,7 +214,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var arr : array<array<i32, 2>, 2>;
let i : i32 = 0;
@@ -225,18 +225,18 @@ fn f() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, ForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn foo() -> i32 {
return 1;
}
-@stage(fragment)
+@fragment
fn main() {
var arr = array<f32, 4>();
for (let a = &arr[foo()]; ;) {
@@ -246,12 +246,12 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn foo() -> i32 {
return 1;
}
-@stage(fragment)
+@fragment
fn main() {
var arr = array<f32, 4>();
let a_save = foo();
@@ -262,13 +262,13 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, MultiSavedVarsInSinglePtrLetExpr) {
- auto* src = R"(
+ auto* src = R"(
fn x() -> i32 {
return 1;
}
@@ -297,7 +297,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn x() -> i32 {
return 1;
}
@@ -328,16 +328,16 @@ fn f() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SimplifyPointersTest, ShadowPointer) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : array<i32, 2>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
let x = &a;
var a : i32 = (*x)[0];
@@ -347,10 +347,10 @@ fn main() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : array<i32, 2>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn main() {
var a_1 : i32 = a[0];
{
@@ -359,9 +359,9 @@ fn main() {
}
)";
- auto got = Run<Unshadow, SimplifyPointers>(src);
+ auto got = Run<Unshadow, SimplifyPointers>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/single_entry_point.cc b/chromium/third_party/dawn/src/tint/transform/single_entry_point.cc
index fe92483b177..82324c77453 100644
--- a/chromium/third_party/dawn/src/tint/transform/single_entry_point.cc
+++ b/chromium/third_party/dawn/src/tint/transform/single_entry_point.cc
@@ -30,86 +30,82 @@ SingleEntryPoint::SingleEntryPoint() = default;
SingleEntryPoint::~SingleEntryPoint() = default;
-void SingleEntryPoint::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto* cfg = inputs.Get<Config>();
- if (cfg == nullptr) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "missing transform data for " + std::string(TypeInfo().name));
-
- return;
- }
-
- // Find the target entry point.
- const ast::Function* entry_point = nullptr;
- for (auto* f : ctx.src->AST().Functions()) {
- if (!f->IsEntryPoint()) {
- continue;
+void SingleEntryPoint::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto* cfg = inputs.Get<Config>();
+ if (cfg == nullptr) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform, "missing transform data for " + std::string(TypeInfo().name));
+
+ return;
+ }
+
+ // Find the target entry point.
+ const ast::Function* entry_point = nullptr;
+ for (auto* f : ctx.src->AST().Functions()) {
+ if (!f->IsEntryPoint()) {
+ continue;
+ }
+ if (ctx.src->Symbols().NameFor(f->symbol) == cfg->entry_point_name) {
+ entry_point = f;
+ break;
+ }
}
- if (ctx.src->Symbols().NameFor(f->symbol) == cfg->entry_point_name) {
- entry_point = f;
- break;
+ if (entry_point == nullptr) {
+ ctx.dst->Diagnostics().add_error(diag::System::Transform,
+ "entry point '" + cfg->entry_point_name + "' not found");
+ return;
}
- }
- if (entry_point == nullptr) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "entry point '" + cfg->entry_point_name + "' not found");
- return;
- }
-
- auto& sem = ctx.src->Sem();
-
- // Build set of referenced module-scope variables for faster lookups later.
- std::unordered_set<const ast::Variable*> referenced_vars;
- for (auto* var : sem.Get(entry_point)->TransitivelyReferencedGlobals()) {
- referenced_vars.emplace(var->Declaration());
- }
-
- // Clone any module-scope variables, types, and functions that are statically
- // referenced by the target entry point.
- for (auto* decl : ctx.src->AST().GlobalDeclarations()) {
- if (auto* ty = decl->As<ast::TypeDecl>()) {
- // TODO(jrprice): Strip unused types.
- ctx.dst->AST().AddTypeDecl(ctx.Clone(ty));
- } else if (auto* var = decl->As<ast::Variable>()) {
- if (referenced_vars.count(var)) {
- if (var->is_overridable) {
- // It is an overridable constant
- if (!ast::HasAttribute<ast::IdAttribute>(var->attributes)) {
- // If the constant doesn't already have an @id() attribute, add one
- // so that its allocated ID so that it won't be affected by other
- // stripped away constants
- auto* global = sem.Get(var)->As<sem::GlobalVariable>();
- const auto* id = ctx.dst->Id(global->ConstantId());
- ctx.InsertFront(var->attributes, id);
- }
+
+ auto& sem = ctx.src->Sem();
+
+ // Build set of referenced module-scope variables for faster lookups later.
+ std::unordered_set<const ast::Variable*> referenced_vars;
+ for (auto* var : sem.Get(entry_point)->TransitivelyReferencedGlobals()) {
+ referenced_vars.emplace(var->Declaration());
+ }
+
+ // Clone any module-scope variables, types, and functions that are statically
+ // referenced by the target entry point.
+ for (auto* decl : ctx.src->AST().GlobalDeclarations()) {
+ if (auto* ty = decl->As<ast::TypeDecl>()) {
+ // TODO(jrprice): Strip unused types.
+ ctx.dst->AST().AddTypeDecl(ctx.Clone(ty));
+ } else if (auto* var = decl->As<ast::Variable>()) {
+ if (referenced_vars.count(var)) {
+ if (var->is_overridable) {
+ // It is an overridable constant
+ if (!ast::HasAttribute<ast::IdAttribute>(var->attributes)) {
+ // If the constant doesn't already have an @id() attribute, add one
+ // so that its allocated ID so that it won't be affected by other
+ // stripped away constants
+ auto* global = sem.Get(var)->As<sem::GlobalVariable>();
+ const auto* id = ctx.dst->Id(global->ConstantId());
+ ctx.InsertFront(var->attributes, id);
+ }
+ }
+ ctx.dst->AST().AddGlobalVariable(ctx.Clone(var));
+ }
+ } else if (auto* func = decl->As<ast::Function>()) {
+ if (sem.Get(func)->HasAncestorEntryPoint(entry_point->symbol)) {
+ ctx.dst->AST().AddFunction(ctx.Clone(func));
+ }
+ } else if (auto* ext = decl->As<ast::Enable>()) {
+ ctx.dst->AST().AddEnable(ctx.Clone(ext));
+ } else {
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
+ << "unhandled global declaration: " << decl->TypeInfo().name;
+ return;
}
- ctx.dst->AST().AddGlobalVariable(ctx.Clone(var));
- }
- } else if (auto* func = decl->As<ast::Function>()) {
- if (sem.Get(func)->HasAncestorEntryPoint(entry_point->symbol)) {
- ctx.dst->AST().AddFunction(ctx.Clone(func));
- }
- } else {
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << "unhandled global declaration: " << decl->TypeInfo().name;
- return;
}
- }
- // Clone the entry point.
- ctx.dst->AST().AddFunction(ctx.Clone(entry_point));
+ // Clone the entry point.
+ ctx.dst->AST().AddFunction(ctx.Clone(entry_point));
}
-SingleEntryPoint::Config::Config(std::string entry_point)
- : entry_point_name(entry_point) {}
+SingleEntryPoint::Config::Config(std::string entry_point) : entry_point_name(entry_point) {}
SingleEntryPoint::Config::Config(const Config&) = default;
SingleEntryPoint::Config::~Config() = default;
-SingleEntryPoint::Config& SingleEntryPoint::Config::operator=(const Config&) =
- default;
+SingleEntryPoint::Config& SingleEntryPoint::Config::operator=(const Config&) = default;
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/single_entry_point.h b/chromium/third_party/dawn/src/tint/transform/single_entry_point.h
index b5aed68f4ce..0a922a78d03 100644
--- a/chromium/third_party/dawn/src/tint/transform/single_entry_point.h
+++ b/chromium/third_party/dawn/src/tint/transform/single_entry_point.h
@@ -26,43 +26,41 @@ namespace tint::transform {
/// All module-scope variables, types, and functions that are not used by the
/// target entry point will also be removed.
class SingleEntryPoint : public Castable<SingleEntryPoint, Transform> {
- public:
- /// Configuration options for the transform
- struct Config : public Castable<Config, Data> {
- /// Constructor
- /// @param entry_point the name of the entry point to keep
- explicit Config(std::string entry_point = "");
+ public:
+ /// Configuration options for the transform
+ struct Config : public Castable<Config, Data> {
+ /// Constructor
+ /// @param entry_point the name of the entry point to keep
+ explicit Config(std::string entry_point = "");
- /// Copy constructor
- Config(const Config&);
+ /// Copy constructor
+ Config(const Config&);
- /// Destructor
- ~Config() override;
+ /// Destructor
+ ~Config() override;
- /// Assignment operator
- /// @returns this Config
- Config& operator=(const Config&);
+ /// Assignment operator
+ /// @returns this Config
+ Config& operator=(const Config&);
- /// The name of the entry point to keep.
- std::string entry_point_name;
- };
+ /// The name of the entry point to keep.
+ std::string entry_point_name;
+ };
- /// Constructor
- SingleEntryPoint();
+ /// Constructor
+ SingleEntryPoint();
- /// Destructor
- ~SingleEntryPoint() override;
+ /// Destructor
+ ~SingleEntryPoint() override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/single_entry_point_test.cc b/chromium/third_party/dawn/src/tint/transform/single_entry_point_test.cc
index 750f5c3ba5d..8445f61f013 100644
--- a/chromium/third_party/dawn/src/tint/transform/single_entry_point_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/single_entry_point_test.cc
@@ -24,119 +24,118 @@ namespace {
using SingleEntryPointTest = TransformTest;
TEST_F(SingleEntryPointTest, Error_MissingTransformData) {
- auto* src = "";
+ auto* src = "";
- auto* expect =
- "error: missing transform data for tint::transform::SingleEntryPoint";
+ auto* expect = "error: missing transform data for tint::transform::SingleEntryPoint";
- auto got = Run<SingleEntryPoint>(src);
+ auto got = Run<SingleEntryPoint>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, Error_NoEntryPoints) {
- auto* src = "";
+ auto* src = "";
- auto* expect = "error: entry point 'main' not found";
+ auto* expect = "error: entry point 'main' not found";
- DataMap data;
- data.Add<SingleEntryPoint::Config>("main");
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>("main");
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, Error_InvalidEntryPoint) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = "error: entry point '_' not found";
+ auto* expect = "error: entry point '_' not found";
- SingleEntryPoint::Config cfg("_");
+ SingleEntryPoint::Config cfg("_");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, Error_NotAnEntryPoint) {
- auto* src = R"(
+ auto* src = R"(
fn foo() {}
-@stage(fragment)
+@fragment
fn main() {}
)";
- auto* expect = "error: entry point 'foo' not found";
+ auto* expect = "error: entry point 'foo' not found";
- SingleEntryPoint::Config cfg("foo");
+ SingleEntryPoint::Config cfg("foo");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, SingleEntryPoint) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn main() {
}
)";
- SingleEntryPoint::Config cfg("main");
+ SingleEntryPoint::Config cfg("main");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(src, str(got));
+ EXPECT_EQ(src, str(got));
}
TEST_F(SingleEntryPointTest, MultipleEntryPoints) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn vert_main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
-@stage(fragment)
+@fragment
fn frag_main() {
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
}
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn comp_main1() {
}
)";
- SingleEntryPoint::Config cfg("comp_main1");
+ SingleEntryPoint::Config cfg("comp_main1");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, GlobalVariables) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : f32;
var<private> b : f32;
@@ -145,48 +144,48 @@ var<private> c : f32;
var<private> d : f32;
-@stage(vertex)
+@vertex
fn vert_main() -> @builtin(position) vec4<f32> {
a = 0.0;
return vec4<f32>();
}
-@stage(fragment)
+@fragment
fn frag_main() {
b = 0.0;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
c = 0.0;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
d = 0.0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> c : f32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
c = 0.0;
}
)";
- SingleEntryPoint::Config cfg("comp_main1");
+ SingleEntryPoint::Config cfg("comp_main1");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, GlobalConstants) {
- auto* src = R"(
+ auto* src = R"(
let a : f32 = 1.0;
let b : f32 = 1.0;
@@ -195,182 +194,182 @@ let c : f32 = 1.0;
let d : f32 = 1.0;
-@stage(vertex)
+@vertex
fn vert_main() -> @builtin(position) vec4<f32> {
let local_a : f32 = a;
return vec4<f32>();
}
-@stage(fragment)
+@fragment
fn frag_main() {
let local_b : f32 = b;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
let local_c : f32 = c;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
let local_d : f32 = d;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
let c : f32 = 1.0;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
let local_c : f32 = c;
}
)";
- SingleEntryPoint::Config cfg("comp_main1");
+ SingleEntryPoint::Config cfg("comp_main1");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, WorkgroupSizeLetPreserved) {
- auto* src = R"(
+ auto* src = R"(
let size : i32 = 1;
-@stage(compute) @workgroup_size(size)
+@compute @workgroup_size(size)
fn main() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- SingleEntryPoint::Config cfg("main");
+ SingleEntryPoint::Config cfg("main");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, OverridableConstants) {
- auto* src = R"(
+ auto* src = R"(
@id(1001) override c1 : u32 = 1u;
override c2 : u32 = 1u;
@id(0) override c3 : u32 = 1u;
@id(9999) override c4 : u32 = 1u;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
let local_d = c1;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
let local_d = c2;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main3() {
let local_d = c3;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main4() {
let local_d = c4;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main5() {
let local_d = 1u;
}
)";
- {
- SingleEntryPoint::Config cfg("comp_main1");
- auto* expect = R"(
+ {
+ SingleEntryPoint::Config cfg("comp_main1");
+ auto* expect = R"(
@id(1001) override c1 : u32 = 1u;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
let local_d = c1;
}
)";
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
- }
-
- {
- SingleEntryPoint::Config cfg("comp_main2");
- // The decorator is replaced with the one with explicit id
- // And should not be affected by other constants stripped away
- auto* expect = R"(
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
+ EXPECT_EQ(expect, str(got));
+ }
+
+ {
+ SingleEntryPoint::Config cfg("comp_main2");
+ // The decorator is replaced with the one with explicit id
+ // And should not be affected by other constants stripped away
+ auto* expect = R"(
@id(1) override c2 : u32 = 1u;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
let local_d = c2;
}
)";
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
- }
-
- {
- SingleEntryPoint::Config cfg("comp_main3");
- auto* expect = R"(
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
+ EXPECT_EQ(expect, str(got));
+ }
+
+ {
+ SingleEntryPoint::Config cfg("comp_main3");
+ auto* expect = R"(
@id(0) override c3 : u32 = 1u;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main3() {
let local_d = c3;
}
)";
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
- }
-
- {
- SingleEntryPoint::Config cfg("comp_main4");
- auto* expect = R"(
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
+ EXPECT_EQ(expect, str(got));
+ }
+
+ {
+ SingleEntryPoint::Config cfg("comp_main4");
+ auto* expect = R"(
@id(9999) override c4 : u32 = 1u;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main4() {
let local_d = c4;
}
)";
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
- }
-
- {
- SingleEntryPoint::Config cfg("comp_main5");
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
+ EXPECT_EQ(expect, str(got));
+ }
+
+ {
+ SingleEntryPoint::Config cfg("comp_main5");
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn comp_main5() {
let local_d = 1u;
}
)";
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
- }
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
+ EXPECT_EQ(expect, str(got));
+ }
}
TEST_F(SingleEntryPointTest, CalledFunctions) {
- auto* src = R"(
+ auto* src = R"(
fn inner1() {
}
@@ -390,18 +389,18 @@ fn outer2() {
inner_shared();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
outer1();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
outer2();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn inner1() {
}
@@ -413,23 +412,23 @@ fn outer1() {
inner_shared();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
outer1();
}
)";
- SingleEntryPoint::Config cfg("comp_main1");
+ SingleEntryPoint::Config cfg("comp_main1");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(SingleEntryPointTest, GlobalsReferencedByCalledFunctions) {
- auto* src = R"(
+ auto* src = R"(
var<private> inner1_var : f32;
var<private> inner2_var : f32;
@@ -464,18 +463,18 @@ fn outer2() {
outer2_var = 0.0;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
outer1();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main2() {
outer2();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> inner1_var : f32;
var<private> inner_shared_var : f32;
@@ -496,19 +495,19 @@ fn outer1() {
outer1_var = 0.0;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn comp_main1() {
outer1();
}
)";
- SingleEntryPoint::Config cfg("comp_main1");
+ SingleEntryPoint::Config cfg("comp_main1");
- DataMap data;
- data.Add<SingleEntryPoint::Config>(cfg);
- auto got = Run<SingleEntryPoint>(src, data);
+ DataMap data;
+ data.Add<SingleEntryPoint::Config>(cfg);
+ auto got = Run<SingleEntryPoint>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/test_helper.h b/chromium/third_party/dawn/src/tint/transform/test_helper.h
index 7d7015c096c..42218a72e62 100644
--- a/chromium/third_party/dawn/src/tint/transform/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/transform/test_helper.h
@@ -31,115 +31,112 @@ namespace tint::transform {
/// @returns the output program as a WGSL string, or an error string if the
/// program is not valid.
inline std::string str(const Program& program) {
- diag::Formatter::Style style;
- style.print_newline_at_end = false;
-
- if (!program.IsValid()) {
- return diag::Formatter(style).format(program.Diagnostics());
- }
-
- writer::wgsl::Options options;
- auto result = writer::wgsl::Generate(&program, options);
- if (!result.success) {
- return "WGSL writer failed:\n" + result.error;
- }
-
- auto res = result.wgsl;
- if (res.empty()) {
- return res;
- }
- // The WGSL sometimes has two trailing newlines. Strip them
- while (res.back() == '\n') {
- res.pop_back();
- }
- if (res.empty()) {
- return res;
- }
- return "\n" + res + "\n";
+ diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+
+ if (!program.IsValid()) {
+ return diag::Formatter(style).format(program.Diagnostics());
+ }
+
+ writer::wgsl::Options options;
+ auto result = writer::wgsl::Generate(&program, options);
+ if (!result.success) {
+ return "WGSL writer failed:\n" + result.error;
+ }
+
+ auto res = result.wgsl;
+ if (res.empty()) {
+ return res;
+ }
+ // The WGSL sometimes has two trailing newlines. Strip them
+ while (res.back() == '\n') {
+ res.pop_back();
+ }
+ if (res.empty()) {
+ return res;
+ }
+ return "\n" + res + "\n";
}
/// Helper class for testing transforms
template <typename BASE>
class TransformTestBase : public BASE {
- public:
- /// Transforms and returns the WGSL source `in`, transformed using
- /// `transform`.
- /// @param transform the transform to apply
- /// @param in the input WGSL source
- /// @param data the optional DataMap to pass to Transform::Run()
- /// @return the transformed output
- Output Run(std::string in,
- std::unique_ptr<transform::Transform> transform,
- const DataMap& data = {}) {
- std::vector<std::unique_ptr<transform::Transform>> transforms;
- transforms.emplace_back(std::move(transform));
- return Run(std::move(in), std::move(transforms), data);
- }
-
- /// Transforms and returns the WGSL source `in`, transformed using
- /// a transform of type `TRANSFORM`.
- /// @param in the input WGSL source
- /// @param data the optional DataMap to pass to Transform::Run()
- /// @return the transformed output
- template <typename... TRANSFORMS>
- Output Run(std::string in, const DataMap& data = {}) {
- auto file = std::make_unique<Source::File>("test", in);
- auto program = reader::wgsl::Parse(file.get());
-
- // Keep this pointer alive after Transform() returns
- files_.emplace_back(std::move(file));
-
- return Run<TRANSFORMS...>(std::move(program), data);
- }
-
- /// Transforms and returns program `program`, transformed using a transform of
- /// type `TRANSFORM`.
- /// @param program the input Program
- /// @param data the optional DataMap to pass to Transform::Run()
- /// @return the transformed output
- template <typename... TRANSFORMS>
- Output Run(Program&& program, const DataMap& data = {}) {
- if (!program.IsValid()) {
- return Output(std::move(program));
+ public:
+ /// Transforms and returns the WGSL source `in`, transformed using
+ /// `transform`.
+ /// @param transform the transform to apply
+ /// @param in the input WGSL source
+ /// @param data the optional DataMap to pass to Transform::Run()
+ /// @return the transformed output
+ Output Run(std::string in,
+ std::unique_ptr<transform::Transform> transform,
+ const DataMap& data = {}) {
+ std::vector<std::unique_ptr<transform::Transform>> transforms;
+ transforms.emplace_back(std::move(transform));
+ return Run(std::move(in), std::move(transforms), data);
}
- Manager manager;
- for (auto* transform_ptr :
- std::initializer_list<Transform*>{new TRANSFORMS()...}) {
- manager.append(std::unique_ptr<Transform>(transform_ptr));
+ /// Transforms and returns the WGSL source `in`, transformed using
+ /// a transform of type `TRANSFORM`.
+ /// @param in the input WGSL source
+ /// @param data the optional DataMap to pass to Transform::Run()
+ /// @return the transformed output
+ template <typename... TRANSFORMS>
+ Output Run(std::string in, const DataMap& data = {}) {
+ auto file = std::make_unique<Source::File>("test", in);
+ auto program = reader::wgsl::Parse(file.get());
+
+ // Keep this pointer alive after Transform() returns
+ files_.emplace_back(std::move(file));
+
+ return Run<TRANSFORMS...>(std::move(program), data);
+ }
+
+ /// Transforms and returns program `program`, transformed using a transform of
+ /// type `TRANSFORM`.
+ /// @param program the input Program
+ /// @param data the optional DataMap to pass to Transform::Run()
+ /// @return the transformed output
+ template <typename... TRANSFORMS>
+ Output Run(Program&& program, const DataMap& data = {}) {
+ if (!program.IsValid()) {
+ return Output(std::move(program));
+ }
+
+ Manager manager;
+ for (auto* transform_ptr : std::initializer_list<Transform*>{new TRANSFORMS()...}) {
+ manager.append(std::unique_ptr<Transform>(transform_ptr));
+ }
+ return manager.Run(&program, data);
+ }
+
+ /// @param program the input program
+ /// @param data the optional DataMap to pass to Transform::Run()
+ /// @return true if the transform should be run for the given input.
+ template <typename TRANSFORM>
+ bool ShouldRun(Program&& program, const DataMap& data = {}) {
+ EXPECT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ const Transform& t = TRANSFORM();
+ return t.ShouldRun(&program, data);
+ }
+
+ /// @param in the input WGSL source
+ /// @param data the optional DataMap to pass to Transform::Run()
+ /// @return true if the transform should be run for the given input.
+ template <typename TRANSFORM>
+ bool ShouldRun(std::string in, const DataMap& data = {}) {
+ auto file = std::make_unique<Source::File>("test", in);
+ auto program = reader::wgsl::Parse(file.get());
+ return ShouldRun<TRANSFORM>(std::move(program), data);
}
- return manager.Run(&program, data);
- }
-
- /// @param program the input program
- /// @param data the optional DataMap to pass to Transform::Run()
- /// @return true if the transform should be run for the given input.
- template <typename TRANSFORM>
- bool ShouldRun(Program&& program, const DataMap& data = {}) {
- EXPECT_TRUE(program.IsValid()) << program.Diagnostics().str();
- const Transform& t = TRANSFORM();
- return t.ShouldRun(&program, data);
- }
-
- /// @param in the input WGSL source
- /// @param data the optional DataMap to pass to Transform::Run()
- /// @return true if the transform should be run for the given input.
- template <typename TRANSFORM>
- bool ShouldRun(std::string in, const DataMap& data = {}) {
- auto file = std::make_unique<Source::File>("test", in);
- auto program = reader::wgsl::Parse(file.get());
- return ShouldRun<TRANSFORM>(std::move(program), data);
- }
-
- /// @param output the output of the transform
- /// @returns the output program as a WGSL string, or an error string if the
- /// program is not valid.
- std::string str(const Output& output) {
- return transform::str(output.program);
- }
-
- private:
- std::vector<std::unique_ptr<Source::File>> files_;
+
+ /// @param output the output of the transform
+ /// @returns the output program as a WGSL string, or an error string if the
+ /// program is not valid.
+ std::string str(const Output& output) { return transform::str(output.program); }
+
+ private:
+ std::vector<std::unique_ptr<Source::File>> files_;
};
using TransformTest = TransformTestBase<testing::Test>;
diff --git a/chromium/third_party/dawn/src/tint/transform/transform.cc b/chromium/third_party/dawn/src/tint/transform/transform.cc
index 018bc95ca33..f3ab1736739 100644
--- a/chromium/third_party/dawn/src/tint/transform/transform.cc
+++ b/chromium/third_party/dawn/src/tint/transform/transform.cc
@@ -18,12 +18,12 @@
#include <string>
#include "src/tint/program_builder.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/block_statement.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
#include "src/tint/sem/for_loop_statement.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampler_type.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampler.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::Transform);
TINT_INSTANTIATE_TYPEINFO(tint::transform::Data);
@@ -45,114 +45,112 @@ Output::Output(Program&& p) : program(std::move(p)) {}
Transform::Transform() = default;
Transform::~Transform() = default;
-Output Transform::Run(const Program* program,
- const DataMap& data /* = {} */) const {
- ProgramBuilder builder;
- CloneContext ctx(&builder, program);
- Output output;
- Run(ctx, data, output.data);
- output.program = Program(std::move(builder));
- return output;
+Output Transform::Run(const Program* program, const DataMap& data /* = {} */) const {
+ ProgramBuilder builder;
+ CloneContext ctx(&builder, program);
+ Output output;
+ Run(ctx, data, output.data);
+ output.program = Program(std::move(builder));
+ return output;
}
void Transform::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- TINT_UNIMPLEMENTED(Transform, ctx.dst->Diagnostics())
- << "Transform::Run() unimplemented for " << TypeInfo().name;
+ TINT_UNIMPLEMENTED(Transform, ctx.dst->Diagnostics())
+ << "Transform::Run() unimplemented for " << TypeInfo().name;
}
bool Transform::ShouldRun(const Program*, const DataMap&) const {
- return true;
+ return true;
}
void Transform::RemoveStatement(CloneContext& ctx, const ast::Statement* stmt) {
- auto* sem = ctx.src->Sem().Get(stmt);
- if (auto* block = tint::As<sem::BlockStatement>(sem->Parent())) {
- ctx.Remove(block->Declaration()->statements, stmt);
- return;
- }
- if (tint::Is<sem::ForLoopStatement>(sem->Parent())) {
- ctx.Replace(stmt, static_cast<ast::Expression*>(nullptr));
- return;
- }
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "unable to remove statement from parent of type "
- << sem->TypeInfo().name;
+ auto* sem = ctx.src->Sem().Get(stmt);
+ if (auto* block = tint::As<sem::BlockStatement>(sem->Parent())) {
+ ctx.Remove(block->Declaration()->statements, stmt);
+ return;
+ }
+ if (tint::Is<sem::ForLoopStatement>(sem->Parent())) {
+ ctx.Replace(stmt, static_cast<ast::Expression*>(nullptr));
+ return;
+ }
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "unable to remove statement from parent of type " << sem->TypeInfo().name;
}
-const ast::Type* Transform::CreateASTTypeFor(CloneContext& ctx,
- const sem::Type* ty) {
- if (ty->Is<sem::Void>()) {
- return ctx.dst->create<ast::Void>();
- }
- if (ty->Is<sem::I32>()) {
- return ctx.dst->create<ast::I32>();
- }
- if (ty->Is<sem::U32>()) {
- return ctx.dst->create<ast::U32>();
- }
- if (ty->Is<sem::F32>()) {
- return ctx.dst->create<ast::F32>();
- }
- if (ty->Is<sem::Bool>()) {
- return ctx.dst->create<ast::Bool>();
- }
- if (auto* m = ty->As<sem::Matrix>()) {
- auto* el = CreateASTTypeFor(ctx, m->type());
- return ctx.dst->create<ast::Matrix>(el, m->rows(), m->columns());
- }
- if (auto* v = ty->As<sem::Vector>()) {
- auto* el = CreateASTTypeFor(ctx, v->type());
- return ctx.dst->create<ast::Vector>(el, v->Width());
- }
- if (auto* a = ty->As<sem::Array>()) {
- auto* el = CreateASTTypeFor(ctx, a->ElemType());
- ast::AttributeList attrs;
- if (!a->IsStrideImplicit()) {
- attrs.emplace_back(ctx.dst->create<ast::StrideAttribute>(a->Stride()));
- }
- if (a->IsRuntimeSized()) {
- return ctx.dst->ty.array(el, nullptr, std::move(attrs));
- } else {
- return ctx.dst->ty.array(el, a->Count(), std::move(attrs));
- }
- }
- if (auto* s = ty->As<sem::Struct>()) {
- return ctx.dst->create<ast::TypeName>(ctx.Clone(s->Declaration()->name));
- }
- if (auto* s = ty->As<sem::Reference>()) {
- return CreateASTTypeFor(ctx, s->StoreType());
- }
- if (auto* a = ty->As<sem::Atomic>()) {
- return ctx.dst->create<ast::Atomic>(CreateASTTypeFor(ctx, a->Type()));
- }
- if (auto* t = ty->As<sem::DepthTexture>()) {
- return ctx.dst->create<ast::DepthTexture>(t->dim());
- }
- if (auto* t = ty->As<sem::DepthMultisampledTexture>()) {
- return ctx.dst->create<ast::DepthMultisampledTexture>(t->dim());
- }
- if (ty->Is<sem::ExternalTexture>()) {
- return ctx.dst->create<ast::ExternalTexture>();
- }
- if (auto* t = ty->As<sem::MultisampledTexture>()) {
- return ctx.dst->create<ast::MultisampledTexture>(
- t->dim(), CreateASTTypeFor(ctx, t->type()));
- }
- if (auto* t = ty->As<sem::SampledTexture>()) {
- return ctx.dst->create<ast::SampledTexture>(
- t->dim(), CreateASTTypeFor(ctx, t->type()));
- }
- if (auto* t = ty->As<sem::StorageTexture>()) {
- return ctx.dst->create<ast::StorageTexture>(
- t->dim(), t->texel_format(), CreateASTTypeFor(ctx, t->type()),
- t->access());
- }
- if (auto* s = ty->As<sem::Sampler>()) {
- return ctx.dst->create<ast::Sampler>(s->kind());
- }
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << "Unhandled type: " << ty->TypeInfo().name;
- return nullptr;
+const ast::Type* Transform::CreateASTTypeFor(CloneContext& ctx, const sem::Type* ty) {
+ if (ty->Is<sem::Void>()) {
+ return ctx.dst->create<ast::Void>();
+ }
+ if (ty->Is<sem::I32>()) {
+ return ctx.dst->create<ast::I32>();
+ }
+ if (ty->Is<sem::U32>()) {
+ return ctx.dst->create<ast::U32>();
+ }
+ if (ty->Is<sem::F16>()) {
+ return ctx.dst->create<ast::F16>();
+ }
+ if (ty->Is<sem::F32>()) {
+ return ctx.dst->create<ast::F32>();
+ }
+ if (ty->Is<sem::Bool>()) {
+ return ctx.dst->create<ast::Bool>();
+ }
+ if (auto* m = ty->As<sem::Matrix>()) {
+ auto* el = CreateASTTypeFor(ctx, m->type());
+ return ctx.dst->create<ast::Matrix>(el, m->rows(), m->columns());
+ }
+ if (auto* v = ty->As<sem::Vector>()) {
+ auto* el = CreateASTTypeFor(ctx, v->type());
+ return ctx.dst->create<ast::Vector>(el, v->Width());
+ }
+ if (auto* a = ty->As<sem::Array>()) {
+ auto* el = CreateASTTypeFor(ctx, a->ElemType());
+ ast::AttributeList attrs;
+ if (!a->IsStrideImplicit()) {
+ attrs.emplace_back(ctx.dst->create<ast::StrideAttribute>(a->Stride()));
+ }
+ if (a->IsRuntimeSized()) {
+ return ctx.dst->ty.array(el, nullptr, std::move(attrs));
+ } else {
+ return ctx.dst->ty.array(el, u32(a->Count()), std::move(attrs));
+ }
+ }
+ if (auto* s = ty->As<sem::Struct>()) {
+ return ctx.dst->create<ast::TypeName>(ctx.Clone(s->Declaration()->name));
+ }
+ if (auto* s = ty->As<sem::Reference>()) {
+ return CreateASTTypeFor(ctx, s->StoreType());
+ }
+ if (auto* a = ty->As<sem::Atomic>()) {
+ return ctx.dst->create<ast::Atomic>(CreateASTTypeFor(ctx, a->Type()));
+ }
+ if (auto* t = ty->As<sem::DepthTexture>()) {
+ return ctx.dst->create<ast::DepthTexture>(t->dim());
+ }
+ if (auto* t = ty->As<sem::DepthMultisampledTexture>()) {
+ return ctx.dst->create<ast::DepthMultisampledTexture>(t->dim());
+ }
+ if (ty->Is<sem::ExternalTexture>()) {
+ return ctx.dst->create<ast::ExternalTexture>();
+ }
+ if (auto* t = ty->As<sem::MultisampledTexture>()) {
+ return ctx.dst->create<ast::MultisampledTexture>(t->dim(),
+ CreateASTTypeFor(ctx, t->type()));
+ }
+ if (auto* t = ty->As<sem::SampledTexture>()) {
+ return ctx.dst->create<ast::SampledTexture>(t->dim(), CreateASTTypeFor(ctx, t->type()));
+ }
+ if (auto* t = ty->As<sem::StorageTexture>()) {
+ return ctx.dst->create<ast::StorageTexture>(t->dim(), t->texel_format(),
+ CreateASTTypeFor(ctx, t->type()), t->access());
+ }
+ if (auto* s = ty->As<sem::Sampler>()) {
+ return ctx.dst->create<ast::Sampler>(s->kind());
+ }
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
+ << "Unhandled type: " << ty->TypeInfo().name;
+ return nullptr;
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/transform.h b/chromium/third_party/dawn/src/tint/transform/transform.h
index 0a3b4f0526c..de5761736f7 100644
--- a/chromium/third_party/dawn/src/tint/transform/transform.h
+++ b/chromium/third_party/dawn/src/tint/transform/transform.h
@@ -27,176 +27,171 @@ namespace tint::transform {
/// Data is the base class for transforms that accept extra input or emit extra
/// output information along with a Program.
class Data : public Castable<Data> {
- public:
- /// Constructor
- Data();
+ public:
+ /// Constructor
+ Data();
- /// Copy constructor
- Data(const Data&);
+ /// Copy constructor
+ Data(const Data&);
- /// Destructor
- ~Data() override;
+ /// Destructor
+ ~Data() override;
- /// Assignment operator
- /// @returns this Data
- Data& operator=(const Data&);
+ /// Assignment operator
+ /// @returns this Data
+ Data& operator=(const Data&);
};
/// DataMap is a map of Data unique pointers keyed by the Data's ClassID.
class DataMap {
- public:
- /// Constructor
- DataMap();
-
- /// Move constructor
- DataMap(DataMap&&);
-
- /// Constructor
- /// @param data_unique_ptrs a variadic list of additional data unique_ptrs
- /// produced by the transform
- template <typename... DATA>
- explicit DataMap(DATA... data_unique_ptrs) {
- PutAll(std::forward<DATA>(data_unique_ptrs)...);
- }
-
- /// Destructor
- ~DataMap();
-
- /// Move assignment operator
- /// @param rhs the DataMap to move into this DataMap
- /// @return this DataMap
- DataMap& operator=(DataMap&& rhs);
-
- /// Adds the data into DataMap keyed by the ClassID of type T.
- /// @param data the data to add to the DataMap
- template <typename T>
- void Put(std::unique_ptr<T>&& data) {
- static_assert(std::is_base_of<Data, T>::value,
- "T does not derive from Data");
- map_[&TypeInfo::Of<T>()] = std::move(data);
- }
-
- /// Creates the data of type `T` with the provided arguments and adds it into
- /// DataMap keyed by the ClassID of type T.
- /// @param args the arguments forwarded to the constructor for type T
- template <typename T, typename... ARGS>
- void Add(ARGS&&... args) {
- Put(std::make_unique<T>(std::forward<ARGS>(args)...));
- }
-
- /// @returns a pointer to the Data placed into the DataMap with a call to
- /// Put()
- template <typename T>
- T const* Get() const {
- return const_cast<DataMap*>(this)->Get<T>();
- }
-
- /// @returns a pointer to the Data placed into the DataMap with a call to
- /// Put()
- template <typename T>
- T* Get() {
- auto it = map_.find(&TypeInfo::Of<T>());
- if (it == map_.end()) {
- return nullptr;
+ public:
+ /// Constructor
+ DataMap();
+
+ /// Move constructor
+ DataMap(DataMap&&);
+
+ /// Constructor
+ /// @param data_unique_ptrs a variadic list of additional data unique_ptrs
+ /// produced by the transform
+ template <typename... DATA>
+ explicit DataMap(DATA... data_unique_ptrs) {
+ PutAll(std::forward<DATA>(data_unique_ptrs)...);
}
- return static_cast<T*>(it->second.get());
- }
-
- /// Add moves all the data from other into this DataMap
- /// @param other the DataMap to move into this DataMap
- void Add(DataMap&& other) {
- for (auto& it : other.map_) {
- map_.emplace(it.first, std::move(it.second));
+
+ /// Destructor
+ ~DataMap();
+
+ /// Move assignment operator
+ /// @param rhs the DataMap to move into this DataMap
+ /// @return this DataMap
+ DataMap& operator=(DataMap&& rhs);
+
+ /// Adds the data into DataMap keyed by the ClassID of type T.
+ /// @param data the data to add to the DataMap
+ template <typename T>
+ void Put(std::unique_ptr<T>&& data) {
+ static_assert(std::is_base_of<Data, T>::value, "T does not derive from Data");
+ map_[&TypeInfo::Of<T>()] = std::move(data);
+ }
+
+ /// Creates the data of type `T` with the provided arguments and adds it into
+ /// DataMap keyed by the ClassID of type T.
+ /// @param args the arguments forwarded to the constructor for type T
+ template <typename T, typename... ARGS>
+ void Add(ARGS&&... args) {
+ Put(std::make_unique<T>(std::forward<ARGS>(args)...));
+ }
+
+ /// @returns a pointer to the Data placed into the DataMap with a call to
+ /// Put()
+ template <typename T>
+ T const* Get() const {
+ return const_cast<DataMap*>(this)->Get<T>();
+ }
+
+ /// @returns a pointer to the Data placed into the DataMap with a call to
+ /// Put()
+ template <typename T>
+ T* Get() {
+ auto it = map_.find(&TypeInfo::Of<T>());
+ if (it == map_.end()) {
+ return nullptr;
+ }
+ return static_cast<T*>(it->second.get());
+ }
+
+ /// Add moves all the data from other into this DataMap
+ /// @param other the DataMap to move into this DataMap
+ void Add(DataMap&& other) {
+ for (auto& it : other.map_) {
+ map_.emplace(it.first, std::move(it.second));
+ }
+ other.map_.clear();
+ }
+
+ private:
+ template <typename T0>
+ void PutAll(T0&& first) {
+ Put(std::forward<T0>(first));
+ }
+
+ template <typename T0, typename... Tn>
+ void PutAll(T0&& first, Tn&&... remainder) {
+ Put(std::forward<T0>(first));
+ PutAll(std::forward<Tn>(remainder)...);
}
- other.map_.clear();
- }
-
- private:
- template <typename T0>
- void PutAll(T0&& first) {
- Put(std::forward<T0>(first));
- }
-
- template <typename T0, typename... Tn>
- void PutAll(T0&& first, Tn&&... remainder) {
- Put(std::forward<T0>(first));
- PutAll(std::forward<Tn>(remainder)...);
- }
-
- std::unordered_map<const TypeInfo*, std::unique_ptr<Data>> map_;
+
+ std::unordered_map<const TypeInfo*, std::unique_ptr<Data>> map_;
};
/// The return type of Run()
class Output {
- public:
- /// Constructor
- Output();
-
- /// Constructor
- /// @param program the program to move into this Output
- explicit Output(Program&& program);
-
- /// Constructor
- /// @param program_ the program to move into this Output
- /// @param data_ a variadic list of additional data unique_ptrs produced by
- /// the transform
- template <typename... DATA>
- Output(Program&& program_, DATA... data_)
- : program(std::move(program_)), data(std::forward<DATA>(data_)...) {}
-
- /// The transformed program. May be empty on error.
- Program program;
-
- /// Extra output generated by the transforms.
- DataMap data;
+ public:
+ /// Constructor
+ Output();
+
+ /// Constructor
+ /// @param program the program to move into this Output
+ explicit Output(Program&& program);
+
+ /// Constructor
+ /// @param program_ the program to move into this Output
+ /// @param data_ a variadic list of additional data unique_ptrs produced by
+ /// the transform
+ template <typename... DATA>
+ Output(Program&& program_, DATA... data_)
+ : program(std::move(program_)), data(std::forward<DATA>(data_)...) {}
+
+ /// The transformed program. May be empty on error.
+ Program program;
+
+ /// Extra output generated by the transforms.
+ DataMap data;
};
/// Interface for Program transforms
class Transform : public Castable<Transform> {
- public:
- /// Constructor
- Transform();
- /// Destructor
- ~Transform() override;
-
- /// Runs the transform on `program`, returning the transformation result.
- /// @param program the source program to transform
- /// @param data optional extra transform-specific input data
- /// @returns the transformation result
- virtual Output Run(const Program* program, const DataMap& data = {}) const;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- virtual bool ShouldRun(const Program* program,
- const DataMap& data = {}) const;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- virtual void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const;
-
- /// Removes the statement `stmt` from the transformed program.
- /// RemoveStatement handles edge cases, like statements in the initializer and
- /// continuing of for-loops.
- /// @param ctx the clone context
- /// @param stmt the statement to remove when the program is cloned
- static void RemoveStatement(CloneContext& ctx, const ast::Statement* stmt);
-
- /// CreateASTTypeFor constructs new ast::Type nodes that reconstructs the
- /// semantic type `ty`.
- /// @param ctx the clone context
- /// @param ty the semantic type to reconstruct
- /// @returns a ast::Type that when resolved, will produce the semantic type
- /// `ty`.
- static const ast::Type* CreateASTTypeFor(CloneContext& ctx,
- const sem::Type* ty);
+ public:
+ /// Constructor
+ Transform();
+ /// Destructor
+ ~Transform() override;
+
+ /// Runs the transform on `program`, returning the transformation result.
+ /// @param program the source program to transform
+ /// @param data optional extra transform-specific input data
+ /// @returns the transformation result
+ virtual Output Run(const Program* program, const DataMap& data = {}) const;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ virtual bool ShouldRun(const Program* program, const DataMap& data = {}) const;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ virtual void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const;
+
+ /// Removes the statement `stmt` from the transformed program.
+ /// RemoveStatement handles edge cases, like statements in the initializer and
+ /// continuing of for-loops.
+ /// @param ctx the clone context
+ /// @param stmt the statement to remove when the program is cloned
+ static void RemoveStatement(CloneContext& ctx, const ast::Statement* stmt);
+
+ /// CreateASTTypeFor constructs new ast::Type nodes that reconstructs the
+ /// semantic type `ty`.
+ /// @param ctx the clone context
+ /// @param ty the semantic type to reconstruct
+ /// @returns a ast::Type that when resolved, will produce the semantic type
+ /// `ty`.
+ static const ast::Type* CreateASTTypeFor(CloneContext& ctx, const sem::Type* ty);
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/transform_test.cc b/chromium/third_party/dawn/src/tint/transform/transform_test.cc
index c100c092836..3e342c5ec1b 100644
--- a/chromium/third_party/dawn/src/tint/transform/transform_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/transform_test.cc
@@ -23,98 +23,82 @@ namespace {
// Inherit from Transform so we have access to protected methods
struct CreateASTTypeForTest : public testing::Test, public Transform {
- Output Run(const Program*, const DataMap&) const override { return {}; }
+ Output Run(const Program*, const DataMap&) const override { return {}; }
- const ast::Type* create(
- std::function<sem::Type*(ProgramBuilder&)> create_sem_type) {
- ProgramBuilder sem_type_builder;
- auto* sem_type = create_sem_type(sem_type_builder);
- Program program(std::move(sem_type_builder));
- CloneContext ctx(&ast_type_builder, &program, false);
- return CreateASTTypeFor(ctx, sem_type);
- }
+ const ast::Type* create(std::function<sem::Type*(ProgramBuilder&)> create_sem_type) {
+ ProgramBuilder sem_type_builder;
+ auto* sem_type = create_sem_type(sem_type_builder);
+ Program program(std::move(sem_type_builder));
+ CloneContext ctx(&ast_type_builder, &program, false);
+ return CreateASTTypeFor(ctx, sem_type);
+ }
- ProgramBuilder ast_type_builder;
+ ProgramBuilder ast_type_builder;
};
TEST_F(CreateASTTypeForTest, Basic) {
- EXPECT_TRUE(create([](ProgramBuilder& b) {
- return b.create<sem::I32>();
- })->Is<ast::I32>());
- EXPECT_TRUE(create([](ProgramBuilder& b) {
- return b.create<sem::U32>();
- })->Is<ast::U32>());
- EXPECT_TRUE(create([](ProgramBuilder& b) {
- return b.create<sem::F32>();
- })->Is<ast::F32>());
- EXPECT_TRUE(create([](ProgramBuilder& b) {
- return b.create<sem::Bool>();
- })->Is<ast::Bool>());
- EXPECT_TRUE(create([](ProgramBuilder& b) {
- return b.create<sem::Void>();
- })->Is<ast::Void>());
+ EXPECT_TRUE(create([](ProgramBuilder& b) { return b.create<sem::I32>(); })->Is<ast::I32>());
+ EXPECT_TRUE(create([](ProgramBuilder& b) { return b.create<sem::U32>(); })->Is<ast::U32>());
+ EXPECT_TRUE(create([](ProgramBuilder& b) { return b.create<sem::F32>(); })->Is<ast::F32>());
+ EXPECT_TRUE(create([](ProgramBuilder& b) { return b.create<sem::Bool>(); })->Is<ast::Bool>());
+ EXPECT_TRUE(create([](ProgramBuilder& b) { return b.create<sem::Void>(); })->Is<ast::Void>());
}
TEST_F(CreateASTTypeForTest, Matrix) {
- auto* mat = create([](ProgramBuilder& b) {
- auto* column_type = b.create<sem::Vector>(b.create<sem::F32>(), 2u);
- return b.create<sem::Matrix>(column_type, 3u);
- });
- ASSERT_TRUE(mat->Is<ast::Matrix>());
- ASSERT_TRUE(mat->As<ast::Matrix>()->type->Is<ast::F32>());
- ASSERT_EQ(mat->As<ast::Matrix>()->columns, 3u);
- ASSERT_EQ(mat->As<ast::Matrix>()->rows, 2u);
+ auto* mat = create([](ProgramBuilder& b) {
+ auto* column_type = b.create<sem::Vector>(b.create<sem::F32>(), 2u);
+ return b.create<sem::Matrix>(column_type, 3u);
+ });
+ ASSERT_TRUE(mat->Is<ast::Matrix>());
+ ASSERT_TRUE(mat->As<ast::Matrix>()->type->Is<ast::F32>());
+ ASSERT_EQ(mat->As<ast::Matrix>()->columns, 3u);
+ ASSERT_EQ(mat->As<ast::Matrix>()->rows, 2u);
}
TEST_F(CreateASTTypeForTest, Vector) {
- auto* vec = create([](ProgramBuilder& b) {
- return b.create<sem::Vector>(b.create<sem::F32>(), 2u);
- });
- ASSERT_TRUE(vec->Is<ast::Vector>());
- ASSERT_TRUE(vec->As<ast::Vector>()->type->Is<ast::F32>());
- ASSERT_EQ(vec->As<ast::Vector>()->width, 2u);
+ auto* vec =
+ create([](ProgramBuilder& b) { return b.create<sem::Vector>(b.create<sem::F32>(), 2u); });
+ ASSERT_TRUE(vec->Is<ast::Vector>());
+ ASSERT_TRUE(vec->As<ast::Vector>()->type->Is<ast::F32>());
+ ASSERT_EQ(vec->As<ast::Vector>()->width, 2u);
}
TEST_F(CreateASTTypeForTest, ArrayImplicitStride) {
- auto* arr = create([](ProgramBuilder& b) {
- return b.create<sem::Array>(b.create<sem::F32>(), 2u, 4u, 4u, 32u, 32u);
- });
- ASSERT_TRUE(arr->Is<ast::Array>());
- ASSERT_TRUE(arr->As<ast::Array>()->type->Is<ast::F32>());
- ASSERT_EQ(arr->As<ast::Array>()->attributes.size(), 0u);
+ auto* arr = create([](ProgramBuilder& b) {
+ return b.create<sem::Array>(b.create<sem::F32>(), 2u, 4u, 4u, 32u, 32u);
+ });
+ ASSERT_TRUE(arr->Is<ast::Array>());
+ ASSERT_TRUE(arr->As<ast::Array>()->type->Is<ast::F32>());
+ ASSERT_EQ(arr->As<ast::Array>()->attributes.size(), 0u);
- auto* size = arr->As<ast::Array>()->count->As<ast::IntLiteralExpression>();
- ASSERT_NE(size, nullptr);
- EXPECT_EQ(size->ValueAsI32(), 2);
+ auto* size = arr->As<ast::Array>()->count->As<ast::IntLiteralExpression>();
+ ASSERT_NE(size, nullptr);
+ EXPECT_EQ(size->value, 2);
}
TEST_F(CreateASTTypeForTest, ArrayNonImplicitStride) {
- auto* arr = create([](ProgramBuilder& b) {
- return b.create<sem::Array>(b.create<sem::F32>(), 2u, 4u, 4u, 64u, 32u);
- });
- ASSERT_TRUE(arr->Is<ast::Array>());
- ASSERT_TRUE(arr->As<ast::Array>()->type->Is<ast::F32>());
- ASSERT_EQ(arr->As<ast::Array>()->attributes.size(), 1u);
- ASSERT_TRUE(arr->As<ast::Array>()->attributes[0]->Is<ast::StrideAttribute>());
- ASSERT_EQ(
- arr->As<ast::Array>()->attributes[0]->As<ast::StrideAttribute>()->stride,
- 64u);
+ auto* arr = create([](ProgramBuilder& b) {
+ return b.create<sem::Array>(b.create<sem::F32>(), 2u, 4u, 4u, 64u, 32u);
+ });
+ ASSERT_TRUE(arr->Is<ast::Array>());
+ ASSERT_TRUE(arr->As<ast::Array>()->type->Is<ast::F32>());
+ ASSERT_EQ(arr->As<ast::Array>()->attributes.size(), 1u);
+ ASSERT_TRUE(arr->As<ast::Array>()->attributes[0]->Is<ast::StrideAttribute>());
+ ASSERT_EQ(arr->As<ast::Array>()->attributes[0]->As<ast::StrideAttribute>()->stride, 64u);
- auto* size = arr->As<ast::Array>()->count->As<ast::IntLiteralExpression>();
- ASSERT_NE(size, nullptr);
- EXPECT_EQ(size->ValueAsI32(), 2);
+ auto* size = arr->As<ast::Array>()->count->As<ast::IntLiteralExpression>();
+ ASSERT_NE(size, nullptr);
+ EXPECT_EQ(size->value, 2);
}
TEST_F(CreateASTTypeForTest, Struct) {
- auto* str = create([](ProgramBuilder& b) {
- auto* decl = b.Structure("S", {});
- return b.create<sem::Struct>(decl, decl->name, sem::StructMemberList{},
- 4u /* align */, 4u /* size */,
- 4u /* size_no_padding */);
- });
- ASSERT_TRUE(str->Is<ast::TypeName>());
- EXPECT_EQ(ast_type_builder.Symbols().NameFor(str->As<ast::TypeName>()->name),
- "S");
+ auto* str = create([](ProgramBuilder& b) {
+ auto* decl = b.Structure("S", {});
+ return b.create<sem::Struct>(decl, decl->name, sem::StructMemberList{}, 4u /* align */,
+ 4u /* size */, 4u /* size_no_padding */);
+ });
+ ASSERT_TRUE(str->Is<ast::TypeName>());
+ EXPECT_EQ(ast_type_builder.Symbols().NameFor(str->As<ast::TypeName>()->name), "S");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/unshadow.cc b/chromium/third_party/dawn/src/tint/transform/unshadow.cc
index 9c28675147c..dcf90daa8b8 100644
--- a/chromium/third_party/dawn/src/tint/transform/unshadow.cc
+++ b/chromium/third_party/dawn/src/tint/transform/unshadow.cc
@@ -30,60 +30,60 @@ namespace tint::transform {
/// The PIMPL state for the Unshadow transform
struct Unshadow::State {
- /// The clone context
- CloneContext& ctx;
-
- /// Constructor
- /// @param context the clone context
- explicit State(CloneContext& context) : ctx(context) {}
-
- /// Performs the transformation
- void Run() {
- auto& sem = ctx.src->Sem();
-
- // Maps a variable to its new name.
- std::unordered_map<const sem::Variable*, Symbol> renamed_to;
-
- auto rename = [&](const sem::Variable* var) -> const ast::Variable* {
- auto* decl = var->Declaration();
- auto name = ctx.src->Symbols().NameFor(decl->symbol);
- auto symbol = ctx.dst->Symbols().New(name);
- renamed_to.emplace(var, symbol);
-
- auto source = ctx.Clone(decl->source);
- auto* type = ctx.Clone(decl->type);
- auto* constructor = ctx.Clone(decl->constructor);
- auto attributes = ctx.Clone(decl->attributes);
- return ctx.dst->create<ast::Variable>(
- source, symbol, decl->declared_storage_class, decl->declared_access,
- type, decl->is_const, decl->is_overridable, constructor, attributes);
- };
-
- ctx.ReplaceAll([&](const ast::Variable* var) -> const ast::Variable* {
- if (auto* local = sem.Get<sem::LocalVariable>(var)) {
- if (local->Shadows()) {
- return rename(local);
- }
- }
- if (auto* param = sem.Get<sem::Parameter>(var)) {
- if (param->Shadows()) {
- return rename(param);
- }
- }
- return nullptr;
- });
- ctx.ReplaceAll([&](const ast::IdentifierExpression* ident)
- -> const tint::ast::IdentifierExpression* {
- if (auto* user = sem.Get<sem::VariableUser>(ident)) {
- auto it = renamed_to.find(user->Variable());
- if (it != renamed_to.end()) {
- return ctx.dst->Expr(it->second);
- }
- }
- return nullptr;
- });
- ctx.Clone();
- }
+ /// The clone context
+ CloneContext& ctx;
+
+ /// Constructor
+ /// @param context the clone context
+ explicit State(CloneContext& context) : ctx(context) {}
+
+ /// Performs the transformation
+ void Run() {
+ auto& sem = ctx.src->Sem();
+
+ // Maps a variable to its new name.
+ std::unordered_map<const sem::Variable*, Symbol> renamed_to;
+
+ auto rename = [&](const sem::Variable* var) -> const ast::Variable* {
+ auto* decl = var->Declaration();
+ auto name = ctx.src->Symbols().NameFor(decl->symbol);
+ auto symbol = ctx.dst->Symbols().New(name);
+ renamed_to.emplace(var, symbol);
+
+ auto source = ctx.Clone(decl->source);
+ auto* type = ctx.Clone(decl->type);
+ auto* constructor = ctx.Clone(decl->constructor);
+ auto attributes = ctx.Clone(decl->attributes);
+ return ctx.dst->create<ast::Variable>(source, symbol, decl->declared_storage_class,
+ decl->declared_access, type, decl->is_const,
+ decl->is_overridable, constructor, attributes);
+ };
+
+ ctx.ReplaceAll([&](const ast::Variable* var) -> const ast::Variable* {
+ if (auto* local = sem.Get<sem::LocalVariable>(var)) {
+ if (local->Shadows()) {
+ return rename(local);
+ }
+ }
+ if (auto* param = sem.Get<sem::Parameter>(var)) {
+ if (param->Shadows()) {
+ return rename(param);
+ }
+ }
+ return nullptr;
+ });
+ ctx.ReplaceAll(
+ [&](const ast::IdentifierExpression* ident) -> const tint::ast::IdentifierExpression* {
+ if (auto* user = sem.Get<sem::VariableUser>(ident)) {
+ auto it = renamed_to.find(user->Variable());
+ if (it != renamed_to.end()) {
+ return ctx.dst->Expr(it->second);
+ }
+ }
+ return nullptr;
+ });
+ ctx.Clone();
+ }
};
Unshadow::Unshadow() = default;
@@ -91,7 +91,7 @@ Unshadow::Unshadow() = default;
Unshadow::~Unshadow() = default;
void Unshadow::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
- State(ctx).Run();
+ State(ctx).Run();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/unshadow.h b/chromium/third_party/dawn/src/tint/transform/unshadow.h
index bfea67763c8..ce5e9758aa2 100644
--- a/chromium/third_party/dawn/src/tint/transform/unshadow.h
+++ b/chromium/third_party/dawn/src/tint/transform/unshadow.h
@@ -22,25 +22,23 @@ namespace tint::transform {
/// Unshadow is a Transform that renames any variables that shadow another
/// variable.
class Unshadow : public Castable<Unshadow, Transform> {
- public:
- /// Constructor
- Unshadow();
-
- /// Destructor
- ~Unshadow() override;
-
- protected:
- struct State;
-
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ Unshadow();
+
+ /// Destructor
+ ~Unshadow() override;
+
+ protected:
+ struct State;
+
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/unshadow_test.cc b/chromium/third_party/dawn/src/tint/transform/unshadow_test.cc
index ccb9fba8a42..30e1db55e5b 100644
--- a/chromium/third_party/dawn/src/tint/transform/unshadow_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/unshadow_test.cc
@@ -22,16 +22,16 @@ namespace {
using UnshadowTest = TransformTest;
TEST_F(UnshadowTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, Noop) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : i32;
let b : i32 = 1;
@@ -46,15 +46,15 @@ fn F(c : i32) {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsAlias) {
- auto* src = R"(
+ auto* src = R"(
type a = i32;
fn X() {
@@ -66,7 +66,7 @@ fn Y() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type a = i32;
fn X() {
@@ -78,13 +78,13 @@ fn Y() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsAlias_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
var a = false;
}
@@ -96,7 +96,7 @@ fn Y() {
type a = i32;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
var a_1 = false;
}
@@ -108,13 +108,13 @@ fn Y() {
type a = i32;
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsStruct) {
- auto* src = R"(
+ auto* src = R"(
struct a {
m : i32,
};
@@ -128,7 +128,7 @@ fn Y() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct a {
m : i32,
}
@@ -142,13 +142,13 @@ fn Y() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsStruct_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
var a = true;
}
@@ -163,7 +163,7 @@ struct a {
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
var a_1 = true;
}
@@ -177,13 +177,13 @@ struct a {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsFunction) {
- auto* src = R"(
+ auto* src = R"(
fn a() {
var a = true;
var b = false;
@@ -195,7 +195,7 @@ fn b() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a() {
var a_1 = true;
var b_1 = false;
@@ -207,13 +207,13 @@ fn b() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsFunction_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn b() {
let a = true;
let b = false;
@@ -226,7 +226,7 @@ fn a() {
)";
- auto* expect = R"(
+ auto* expect = R"(
fn b() {
let a_1 = true;
let b_1 = false;
@@ -238,13 +238,13 @@ fn a() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsGlobalVar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : i32;
fn X() {
@@ -256,7 +256,7 @@ fn Y() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : i32;
fn X() {
@@ -268,13 +268,13 @@ fn Y() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsGlobalVar_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
var a = (a == 123);
}
@@ -286,7 +286,7 @@ fn Y() {
var<private> a : i32;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
var a_1 = (a == 123);
}
@@ -298,13 +298,13 @@ fn Y() {
var<private> a : i32;
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsGlobalLet) {
- auto* src = R"(
+ auto* src = R"(
let a : i32 = 1;
fn X() {
@@ -316,7 +316,7 @@ fn Y() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
let a : i32 = 1;
fn X() {
@@ -328,13 +328,13 @@ fn Y() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsGlobalLet_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
var a = (a == 123);
}
@@ -346,7 +346,7 @@ fn Y() {
let a : i32 = 1;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
var a_1 = (a == 123);
}
@@ -358,13 +358,13 @@ fn Y() {
let a : i32 = 1;
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsLocalVar) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
var a : i32;
{
@@ -376,7 +376,7 @@ fn X() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
var a : i32;
{
@@ -388,13 +388,13 @@ fn X() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsLocalLet) {
- auto* src = R"(
+ auto* src = R"(
fn X() {
let a = 1;
{
@@ -406,7 +406,7 @@ fn X() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn X() {
let a = 1;
{
@@ -418,13 +418,13 @@ fn X() {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, LocalShadowsParam) {
- auto* src = R"(
+ auto* src = R"(
fn F(a : i32) {
{
var a = (a == 123);
@@ -435,7 +435,7 @@ fn F(a : i32) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn F(a : i32) {
{
var a_1 = (a == 123);
@@ -446,13 +446,13 @@ fn F(a : i32) {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsFunction) {
- auto* src = R"(
+ auto* src = R"(
fn a(a : i32) {
{
var a = (a == 123);
@@ -463,7 +463,7 @@ fn a(a : i32) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn a(a_1 : i32) {
{
var a_2 = (a_1 == 123);
@@ -474,73 +474,73 @@ fn a(a_1 : i32) {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsGlobalVar) {
- auto* src = R"(
+ auto* src = R"(
var<private> a : i32;
fn F(a : bool) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> a : i32;
fn F(a_1 : bool) {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsGlobalLet) {
- auto* src = R"(
+ auto* src = R"(
let a : i32 = 1;
fn F(a : bool) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
let a : i32 = 1;
fn F(a_1 : bool) {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsGlobalLet_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn F(a : bool) {
}
let a : i32 = 1;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn F(a_1 : bool) {
}
let a : i32 = 1;
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsAlias) {
- auto* src = R"(
+ auto* src = R"(
type a = i32;
fn F(a : a) {
@@ -553,7 +553,7 @@ fn F(a : a) {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type a = i32;
fn F(a_1 : a) {
@@ -566,13 +566,13 @@ fn F(a_1 : a) {
}
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnshadowTest, ParamShadowsAlias_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn F(a : a) {
{
var a = (a == 123);
@@ -585,7 +585,7 @@ fn F(a : a) {
type a = i32;
)";
- auto* expect = R"(
+ auto* expect = R"(
fn F(a_1 : a) {
{
var a_2 = (a_1 == 123);
@@ -598,9 +598,9 @@ fn F(a_1 : a) {
type a = i32;
)";
- auto got = Run<Unshadow>(src);
+ auto got = Run<Unshadow>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.cc b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.cc
index b15f3a093cd..e1ba74c2e9f 100644
--- a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.cc
+++ b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.cc
@@ -36,335 +36,299 @@ namespace tint::transform {
namespace {
class State {
- private:
- CloneContext& ctx;
- ProgramBuilder& b;
- const sem::Info& sem;
- Symbol module_discard_var_name; // Use ModuleDiscardVarName() to read
- Symbol module_discard_func_name; // Use ModuleDiscardFuncName() to read
-
- // If `block`'s parent is of type TO, returns pointer to it.
- template <typename TO>
- const TO* ParentAs(const ast::BlockStatement* block) {
- if (auto* sem_block = sem.Get(block)) {
- return As<TO>(sem_block->Parent());
+ private:
+ CloneContext& ctx;
+ ProgramBuilder& b;
+ const sem::Info& sem;
+ Symbol module_discard_var_name; // Use ModuleDiscardVarName() to read
+ Symbol module_discard_func_name; // Use ModuleDiscardFuncName() to read
+
+ // Returns true if `sem_expr` contains a call expression that may
+ // (transitively) execute a discard statement.
+ bool MayDiscard(const sem::Expression* sem_expr) {
+ return sem_expr && sem_expr->Behaviors().Contains(sem::Behavior::kDiscard);
}
- return nullptr;
- }
-
- // Returns true if `sem_expr` contains a call expression that may
- // (transitively) execute a discard statement.
- bool MayDiscard(const sem::Expression* sem_expr) {
- return sem_expr && sem_expr->Behaviors().Contains(sem::Behavior::kDiscard);
- }
-
- // Lazily creates and returns the name of the module bool variable for whether
- // to discard: "tint_discard".
- Symbol ModuleDiscardVarName() {
- if (!module_discard_var_name.IsValid()) {
- module_discard_var_name = b.Symbols().New("tint_discard");
- ctx.dst->Global(module_discard_var_name, b.ty.bool_(), b.Expr(false),
- ast::StorageClass::kPrivate);
+
+ // Lazily creates and returns the name of the module bool variable for whether
+ // to discard: "tint_discard".
+ Symbol ModuleDiscardVarName() {
+ if (!module_discard_var_name.IsValid()) {
+ module_discard_var_name = b.Symbols().New("tint_discard");
+ ctx.dst->Global(module_discard_var_name, b.ty.bool_(), b.Expr(false),
+ ast::StorageClass::kPrivate);
+ }
+ return module_discard_var_name;
}
- return module_discard_var_name;
- }
-
- // Lazily creates and returns the name of the function that contains a single
- // discard statement: "tint_discard_func".
- // We do this to avoid having multiple discard statements in a single program,
- // which causes problems in certain backends (see crbug.com/1118).
- Symbol ModuleDiscardFuncName() {
- if (!module_discard_func_name.IsValid()) {
- module_discard_func_name = b.Symbols().New("tint_discard_func");
- b.Func(module_discard_func_name, {}, b.ty.void_(), {b.Discard()});
+
+ // Lazily creates and returns the name of the function that contains a single
+ // discard statement: "tint_discard_func".
+ // We do this to avoid having multiple discard statements in a single program,
+ // which causes problems in certain backends (see crbug.com/1118).
+ Symbol ModuleDiscardFuncName() {
+ if (!module_discard_func_name.IsValid()) {
+ module_discard_func_name = b.Symbols().New("tint_discard_func");
+ b.Func(module_discard_func_name, {}, b.ty.void_(), {b.Discard()});
+ }
+ return module_discard_func_name;
}
- return module_discard_func_name;
- }
-
- // Creates "return <default return value>;" based on the return type of
- // `stmt`'s owning function.
- const ast::ReturnStatement* Return(const ast::Statement* stmt) {
- const ast::Expression* ret_val = nullptr;
- auto* ret_type = sem.Get(stmt)->Function()->Declaration()->return_type;
- if (!ret_type->Is<ast::Void>()) {
- ret_val = b.Construct(ctx.Clone(ret_type));
+
+ // Creates "return <default return value>;" based on the return type of
+ // `stmt`'s owning function.
+ const ast::ReturnStatement* Return(const ast::Statement* stmt) {
+ const ast::Expression* ret_val = nullptr;
+ auto* ret_type = sem.Get(stmt)->Function()->Declaration()->return_type;
+ if (!ret_type->Is<ast::Void>()) {
+ ret_val = b.Construct(ctx.Clone(ret_type));
+ }
+ return b.Return(ret_val);
}
- return b.Return(ret_val);
- }
-
- // Returns true if the function `stmt` is in is an entry point
- bool IsInEntryPointFunc(const ast::Statement* stmt) {
- return sem.Get(stmt)->Function()->Declaration()->IsEntryPoint();
- }
-
- // Creates "tint_discard_func();"
- const ast::CallStatement* CallDiscardFunc() {
- auto func_name = ModuleDiscardFuncName();
- return b.CallStmt(b.Call(func_name));
- }
-
- // Creates and returns a new if-statement of the form:
- //
- // if (tint_discard) {
- // return <default value>;
- // }
- //
- // or if `stmt` is in a entry point function:
- //
- // if (tint_discard) {
- // tint_discard_func();
- // return <default value>;
- // }
- //
- const ast::IfStatement* IfDiscardReturn(const ast::Statement* stmt) {
- ast::StatementList stmts;
-
- // For entry point functions, also emit the discard statement
- if (IsInEntryPointFunc(stmt)) {
- stmts.emplace_back(CallDiscardFunc());
+
+ // Returns true if the function `stmt` is in is an entry point
+ bool IsInEntryPointFunc(const ast::Statement* stmt) {
+ return sem.Get(stmt)->Function()->Declaration()->IsEntryPoint();
}
- stmts.emplace_back(Return(stmt));
-
- auto var_name = ModuleDiscardVarName();
- return b.If(var_name, b.Block(stmts));
- }
-
- // Hoists `sem_expr` to a let followed by an `IfDiscardReturn` before `stmt`.
- // For example, if `stmt` is:
- //
- // return f();
- //
- // This function will transform this to:
- //
- // let t1 = f();
- // if (tint_discard) {
- // return;
- // }
- // return t1;
- //
- const ast::Statement* HoistAndInsertBefore(const ast::Statement* stmt,
- const sem::Expression* sem_expr) {
- auto* expr = sem_expr->Declaration();
-
- auto ip = utils::GetInsertionPoint(ctx, stmt);
- auto var_name = b.Sym();
- auto* decl = b.Decl(b.Var(var_name, nullptr, ctx.Clone(expr)));
- ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, decl);
-
- ctx.InsertBefore(ip.first->Declaration()->statements, ip.second,
- IfDiscardReturn(stmt));
-
- auto* var_expr = b.Expr(var_name);
-
- // Special handling for CallStatement as we can only replace its expression
- // with a CallExpression.
- if (stmt->Is<ast::CallStatement>()) {
- // We could replace the call statement with no statement, but we can't do
- // that with transforms (yet), so just return a phony assignment.
- return b.Assign(b.Phony(), var_expr);
+ // Creates "tint_discard_func();"
+ const ast::CallStatement* CallDiscardFunc() {
+ auto func_name = ModuleDiscardFuncName();
+ return b.CallStmt(b.Call(func_name));
}
- ctx.Replace(expr, var_expr);
- return ctx.CloneWithoutTransform(stmt);
- }
+ // Creates and returns a new if-statement of the form:
+ //
+ // if (tint_discard) {
+ // return <default value>;
+ // }
+ //
+ // or if `stmt` is in a entry point function:
+ //
+ // if (tint_discard) {
+ // tint_discard_func();
+ // return <default value>;
+ // }
+ //
+ const ast::IfStatement* IfDiscardReturn(const ast::Statement* stmt) {
+ ast::StatementList stmts;
+
+ // For entry point functions, also emit the discard statement
+ if (IsInEntryPointFunc(stmt)) {
+ stmts.emplace_back(CallDiscardFunc());
+ }
+
+ stmts.emplace_back(Return(stmt));
+
+ auto var_name = ModuleDiscardVarName();
+ return b.If(var_name, b.Block(stmts));
+ }
- // Returns true if `stmt` is a for-loop initializer statement.
- bool IsForLoopInitStatement(const ast::Statement* stmt) {
- if (auto* sem_stmt = sem.Get(stmt)) {
- if (auto* sem_fl = As<sem::ForLoopStatement>(sem_stmt->Parent())) {
- return sem_fl->Declaration()->initializer == stmt;
- }
+ // Hoists `sem_expr` to a let followed by an `IfDiscardReturn` before `stmt`.
+ // For example, if `stmt` is:
+ //
+ // return f();
+ //
+ // This function will transform this to:
+ //
+ // let t1 = f();
+ // if (tint_discard) {
+ // return;
+ // }
+ // return t1;
+ //
+ const ast::Statement* HoistAndInsertBefore(const ast::Statement* stmt,
+ const sem::Expression* sem_expr) {
+ auto* expr = sem_expr->Declaration();
+
+ auto ip = utils::GetInsertionPoint(ctx, stmt);
+ auto var_name = b.Sym();
+ auto* decl = b.Decl(b.Var(var_name, nullptr, ctx.Clone(expr)));
+ ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, decl);
+
+ ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, IfDiscardReturn(stmt));
+
+ auto* var_expr = b.Expr(var_name);
+
+ // Special handling for CallStatement as we can only replace its expression
+ // with a CallExpression.
+ if (stmt->Is<ast::CallStatement>()) {
+ // We could replace the call statement with no statement, but we can't do
+ // that with transforms (yet), so just return a phony assignment.
+ return b.Assign(b.Phony(), var_expr);
+ }
+
+ ctx.Replace(expr, var_expr);
+ return ctx.CloneWithoutTransform(stmt);
}
- return false;
- }
-
- // Inserts an `IfDiscardReturn` after `stmt` if possible (i.e. `stmt` is not
- // in a for-loop init), otherwise falls back to HoistAndInsertBefore, hoisting
- // `sem_expr` to a let followed by an `IfDiscardReturn` before `stmt`.
- //
- // For example, if `stmt` is:
- //
- // let r = f();
- //
- // This function will transform this to:
- //
- // let r = f();
- // if (tint_discard) {
- // return;
- // }
- const ast::Statement* TryInsertAfter(const ast::Statement* stmt,
- const sem::Expression* sem_expr) {
- // If `stmt` is the init of a for-loop, hoist and insert before instead.
- if (IsForLoopInitStatement(stmt)) {
- return HoistAndInsertBefore(stmt, sem_expr);
+
+ // Returns true if `stmt` is a for-loop initializer statement.
+ bool IsForLoopInitStatement(const ast::Statement* stmt) {
+ if (auto* sem_stmt = sem.Get(stmt)) {
+ if (auto* sem_fl = As<sem::ForLoopStatement>(sem_stmt->Parent())) {
+ return sem_fl->Declaration()->initializer == stmt;
+ }
+ }
+ return false;
}
- auto ip = utils::GetInsertionPoint(ctx, stmt);
- ctx.InsertAfter(ip.first->Declaration()->statements, ip.second,
- IfDiscardReturn(stmt));
- return nullptr; // Don't replace current statement
- }
-
- // Replaces the input discard statement with either setting the module level
- // discard bool ("tint_discard = true"), or calling the discard function
- // ("tint_discard_func()"), followed by a default return statement.
- //
- // Replaces "discard;" with:
- //
- // tint_discard = true;
- // return;
- //
- // Or if `stmt` is a entry point function, replaces with:
- //
- // tint_discard_func();
- // return;
- //
- const ast::Statement* ReplaceDiscardStatement(
- const ast::DiscardStatement* stmt) {
- const ast::Statement* to_insert = nullptr;
- if (IsInEntryPointFunc(stmt)) {
- to_insert = CallDiscardFunc();
- } else {
- auto var_name = ModuleDiscardVarName();
- to_insert = b.Assign(var_name, true);
+ // Inserts an `IfDiscardReturn` after `stmt` if possible (i.e. `stmt` is not
+ // in a for-loop init), otherwise falls back to HoistAndInsertBefore, hoisting
+ // `sem_expr` to a let followed by an `IfDiscardReturn` before `stmt`.
+ //
+ // For example, if `stmt` is:
+ //
+ // let r = f();
+ //
+ // This function will transform this to:
+ //
+ // let r = f();
+ // if (tint_discard) {
+ // return;
+ // }
+ const ast::Statement* TryInsertAfter(const ast::Statement* stmt,
+ const sem::Expression* sem_expr) {
+ // If `stmt` is the init of a for-loop, hoist and insert before instead.
+ if (IsForLoopInitStatement(stmt)) {
+ return HoistAndInsertBefore(stmt, sem_expr);
+ }
+
+ auto ip = utils::GetInsertionPoint(ctx, stmt);
+ ctx.InsertAfter(ip.first->Declaration()->statements, ip.second, IfDiscardReturn(stmt));
+ return nullptr; // Don't replace current statement
}
- auto ip = utils::GetInsertionPoint(ctx, stmt);
- ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, to_insert);
- return Return(stmt);
- }
-
- // Handle statement
- const ast::Statement* Statement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- [&](const ast::DiscardStatement* s) -> const ast::Statement* {
- return ReplaceDiscardStatement(s);
- },
- [&](const ast::AssignmentStatement* s) -> const ast::Statement* {
- auto* sem_lhs = sem.Get(s->lhs);
- auto* sem_rhs = sem.Get(s->rhs);
- if (MayDiscard(sem_lhs)) {
- if (MayDiscard(sem_rhs)) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Unexpected: both sides of assignment statement may "
- "discard. Make sure transform::PromoteSideEffectsToDecl "
- "was run first.";
- }
- return TryInsertAfter(s, sem_lhs);
- } else if (MayDiscard(sem_rhs)) {
- return TryInsertAfter(s, sem_rhs);
- }
- return nullptr;
- },
- [&](const ast::CallStatement* s) -> const ast::Statement* {
- auto* sem_expr = sem.Get(s->expr);
- if (!MayDiscard(sem_expr)) {
- return nullptr;
- }
- return TryInsertAfter(s, sem_expr);
- },
- [&](const ast::ElseStatement* s) -> const ast::Statement* {
- if (MayDiscard(sem.Get(s->condition))) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Unexpected ElseIf condition that may discard. Make sure "
- "transform::PromoteSideEffectsToDecl was run first.";
- }
- return nullptr;
- },
- [&](const ast::ForLoopStatement* s) -> const ast::Statement* {
- if (MayDiscard(sem.Get(s->condition))) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Unexpected ForLoopStatement condition that may discard. "
- "Make sure transform::PromoteSideEffectsToDecl was run "
- "first.";
- }
- return nullptr;
- },
- [&](const ast::IfStatement* s) -> const ast::Statement* {
- auto* sem_expr = sem.Get(s->condition);
- if (!MayDiscard(sem_expr)) {
- return nullptr;
- }
- return HoistAndInsertBefore(s, sem_expr);
- },
- [&](const ast::ReturnStatement* s) -> const ast::Statement* {
- auto* sem_expr = sem.Get(s->value);
- if (!MayDiscard(sem_expr)) {
- return nullptr;
- }
- return HoistAndInsertBefore(s, sem_expr);
- },
- [&](const ast::SwitchStatement* s) -> const ast::Statement* {
- auto* sem_expr = sem.Get(s->condition);
- if (!MayDiscard(sem_expr)) {
- return nullptr;
- }
- return HoistAndInsertBefore(s, sem_expr);
- },
- [&](const ast::VariableDeclStatement* s) -> const ast::Statement* {
- auto* var = s->variable;
- if (!var->constructor) {
- return nullptr;
- }
- auto* sem_expr = sem.Get(var->constructor);
- if (!MayDiscard(sem_expr)) {
- return nullptr;
- }
- return TryInsertAfter(s, sem_expr);
- });
- }
-
- public:
- /// Constructor
- /// @param ctx_in the context
- explicit State(CloneContext& ctx_in)
- : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
-
- /// Runs the transform
- void Run() {
- ctx.ReplaceAll(
- [&](const ast::BlockStatement* block) -> const ast::Statement* {
- // If this block is for an else-if statement, process the else-if now
- // before processing its block statements.
- // NOTE: we can't replace else statements at this point - this would
- // need to be done when replacing the parent if-statement. However, in
- // this transform, we don't ever expect to need to do this as else-ifs
- // are converted to else { if } by PromoteSideEffectsToDecl, so this
- // is only for validation.
- if (auto* sem_else = ParentAs<sem::ElseStatement>(block)) {
- if (auto* new_stmt = Statement(sem_else->Declaration())) {
- TINT_ASSERT(Transform, new_stmt == nullptr);
- return nullptr;
- }
- }
+ // Replaces the input discard statement with either setting the module level
+ // discard bool ("tint_discard = true"), or calling the discard function
+ // ("tint_discard_func()"), followed by a default return statement.
+ //
+ // Replaces "discard;" with:
+ //
+ // tint_discard = true;
+ // return;
+ //
+ // Or if `stmt` is a entry point function, replaces with:
+ //
+ // tint_discard_func();
+ // return;
+ //
+ const ast::Statement* ReplaceDiscardStatement(const ast::DiscardStatement* stmt) {
+ const ast::Statement* to_insert = nullptr;
+ if (IsInEntryPointFunc(stmt)) {
+ to_insert = CallDiscardFunc();
+ } else {
+ auto var_name = ModuleDiscardVarName();
+ to_insert = b.Assign(var_name, true);
+ }
+
+ auto ip = utils::GetInsertionPoint(ctx, stmt);
+ ctx.InsertBefore(ip.first->Declaration()->statements, ip.second, to_insert);
+ return Return(stmt);
+ }
- // Iterate block statements and replace them as needed.
- for (auto* stmt : block->statements) {
- if (auto* new_stmt = Statement(stmt)) {
- ctx.Replace(stmt, new_stmt);
- }
+ // Handle statement
+ const ast::Statement* Statement(const ast::Statement* stmt) {
+ return Switch(
+ stmt,
+ [&](const ast::DiscardStatement* s) -> const ast::Statement* {
+ return ReplaceDiscardStatement(s);
+ },
+ [&](const ast::AssignmentStatement* s) -> const ast::Statement* {
+ auto* sem_lhs = sem.Get(s->lhs);
+ auto* sem_rhs = sem.Get(s->rhs);
+ if (MayDiscard(sem_lhs)) {
+ if (MayDiscard(sem_rhs)) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "Unexpected: both sides of assignment statement may "
+ "discard. Make sure transform::PromoteSideEffectsToDecl "
+ "was run first.";
+ }
+ return TryInsertAfter(s, sem_lhs);
+ } else if (MayDiscard(sem_rhs)) {
+ return TryInsertAfter(s, sem_rhs);
+ }
+ return nullptr;
+ },
+ [&](const ast::CallStatement* s) -> const ast::Statement* {
+ auto* sem_expr = sem.Get(s->expr);
+ if (!MayDiscard(sem_expr)) {
+ return nullptr;
+ }
+ return TryInsertAfter(s, sem_expr);
+ },
+ [&](const ast::ForLoopStatement* s) -> const ast::Statement* {
+ if (MayDiscard(sem.Get(s->condition))) {
+ TINT_ICE(Transform, b.Diagnostics())
+ << "Unexpected ForLoopStatement condition that may discard. "
+ "Make sure transform::PromoteSideEffectsToDecl was run "
+ "first.";
+ }
+ return nullptr;
+ },
+ [&](const ast::IfStatement* s) -> const ast::Statement* {
+ auto* sem_expr = sem.Get(s->condition);
+ if (!MayDiscard(sem_expr)) {
+ return nullptr;
+ }
+ return HoistAndInsertBefore(s, sem_expr);
+ },
+ [&](const ast::ReturnStatement* s) -> const ast::Statement* {
+ auto* sem_expr = sem.Get(s->value);
+ if (!MayDiscard(sem_expr)) {
+ return nullptr;
+ }
+ return HoistAndInsertBefore(s, sem_expr);
+ },
+ [&](const ast::SwitchStatement* s) -> const ast::Statement* {
+ auto* sem_expr = sem.Get(s->condition);
+ if (!MayDiscard(sem_expr)) {
+ return nullptr;
+ }
+ return HoistAndInsertBefore(s, sem_expr);
+ },
+ [&](const ast::VariableDeclStatement* s) -> const ast::Statement* {
+ auto* var = s->variable;
+ if (!var->constructor) {
+ return nullptr;
+ }
+ auto* sem_expr = sem.Get(var->constructor);
+ if (!MayDiscard(sem_expr)) {
+ return nullptr;
+ }
+ return TryInsertAfter(s, sem_expr);
+ });
+ }
- // Handle for loops, as they are the only other AST node that
- // contains statements outside of BlockStatements.
- if (auto* fl = stmt->As<ast::ForLoopStatement>()) {
- if (auto* new_stmt = Statement(fl->initializer)) {
- ctx.Replace(fl->initializer, new_stmt);
- }
- if (auto* new_stmt = Statement(fl->continuing)) {
- // NOTE: Should never reach here as we cannot discard in a
- // continuing block.
- ctx.Replace(fl->continuing, new_stmt);
- }
+ public:
+ /// Constructor
+ /// @param ctx_in the context
+ explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst), sem(ctx_in.src->Sem()) {}
+
+ /// Runs the transform
+ void Run() {
+ ctx.ReplaceAll([&](const ast::BlockStatement* block) -> const ast::Statement* {
+ // Iterate block statements and replace them as needed.
+ for (auto* stmt : block->statements) {
+ if (auto* new_stmt = Statement(stmt)) {
+ ctx.Replace(stmt, new_stmt);
+ }
+
+ // Handle for loops, as they are the only other AST node that
+ // contains statements outside of BlockStatements.
+ if (auto* fl = stmt->As<ast::ForLoopStatement>()) {
+ if (auto* new_stmt = Statement(fl->initializer)) {
+ ctx.Replace(fl->initializer, new_stmt);
+ }
+ if (auto* new_stmt = Statement(fl->continuing)) {
+ // NOTE: Should never reach here as we cannot discard in a
+ // continuing block.
+ ctx.Replace(fl->continuing, new_stmt);
+ }
+ }
}
- }
- return nullptr;
+ return nullptr;
});
- ctx.Clone();
- }
+ ctx.Clone();
+ }
};
} // namespace
@@ -372,22 +336,19 @@ class State {
UnwindDiscardFunctions::UnwindDiscardFunctions() = default;
UnwindDiscardFunctions::~UnwindDiscardFunctions() = default;
-void UnwindDiscardFunctions::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- State state(ctx);
- state.Run();
+void UnwindDiscardFunctions::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ State state(ctx);
+ state.Run();
}
-bool UnwindDiscardFunctions::ShouldRun(const Program* program,
- const DataMap& /*data*/) const {
- auto& sem = program->Sem();
- for (auto* f : program->AST().Functions()) {
- if (sem.Get(f)->Behaviors().Contains(sem::Behavior::kDiscard)) {
- return true;
+bool UnwindDiscardFunctions::ShouldRun(const Program* program, const DataMap& /*data*/) const {
+ auto& sem = program->Sem();
+ for (auto* f : program->AST().Functions()) {
+ if (sem.Get(f)->Behaviors().Contains(sem::Behavior::kDiscard)) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.h b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.h
index 42bbecb31d4..3b1d838ceb1 100644
--- a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.h
+++ b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions.h
@@ -36,31 +36,27 @@ namespace tint::transform {
///
/// @note Depends on the following transforms to have been run first:
/// * PromoteSideEffectsToDecl
-class UnwindDiscardFunctions
- : public Castable<UnwindDiscardFunctions, Transform> {
- public:
- /// Constructor
- UnwindDiscardFunctions();
+class UnwindDiscardFunctions : public Castable<UnwindDiscardFunctions, Transform> {
+ public:
+ /// Constructor
+ UnwindDiscardFunctions();
- /// Destructor
- ~UnwindDiscardFunctions() override;
+ /// Destructor
+ ~UnwindDiscardFunctions() override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions_test.cc b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions_test.cc
index 0b8c0fcae0d..481df9dfdbd 100644
--- a/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/unwind_discard_functions_test.cc
@@ -22,31 +22,31 @@ namespace {
using UnwindDiscardFunctionsTest = TransformTest;
TEST_F(UnwindDiscardFunctionsTest, EmptyModule) {
- auto* src = "";
- auto* expect = src;
+ auto* src = "";
+ auto* expect = src;
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ShouldRun_NoDiscardFunc) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
}
)";
- EXPECT_FALSE(ShouldRun<UnwindDiscardFunctions>(src));
+ EXPECT_FALSE(ShouldRun<UnwindDiscardFunctions>(src));
}
TEST_F(UnwindDiscardFunctionsTest, SingleDiscardFunc_NoCall) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
discard;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() {
@@ -55,14 +55,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, MultipleDiscardFuncs_NoCall) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
discard;
let marker1 = 0;
@@ -73,7 +73,7 @@ fn g() {
let marker1 = 0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() {
@@ -89,27 +89,27 @@ fn g() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Call_VoidReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
discard;
let marker1 = 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
f();
let marker1 = 0;
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() {
@@ -122,7 +122,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
f();
if (tint_discard) {
@@ -134,14 +134,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Call_NonVoidReturn) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : i32,
@@ -156,7 +156,7 @@ fn f() -> S {
return s;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
f();
@@ -164,7 +164,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : i32,
@@ -186,7 +186,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
f();
@@ -199,14 +199,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Call_Nested) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
let marker1 = 0;
if (true) {
@@ -230,7 +230,7 @@ fn h() -> i32{
return 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
h();
@@ -238,7 +238,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -275,7 +275,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
h();
@@ -288,14 +288,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Call_Multiple) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
discard;
let marker1 = 0;
@@ -311,7 +311,7 @@ fn h() {
let marker1 = 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
f();
@@ -323,7 +323,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() {
@@ -348,7 +348,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
f();
@@ -373,15 +373,15 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Call_DiscardFuncDeclaredBelow) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
f();
let marker1 = 0;
@@ -393,14 +393,14 @@ fn f() {
let marker1 = 0;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn tint_discard_func() {
discard;
}
var<private> tint_discard : bool = false;
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
f();
if (tint_discard) {
@@ -418,14 +418,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, If) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -433,7 +433,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
if (f() == 42) {
let marker1 = 0;
@@ -441,7 +441,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -456,7 +456,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let tint_symbol = f();
if (tint_discard) {
@@ -470,14 +470,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -485,7 +485,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
if (true) {
let marker1 = 0;
@@ -497,7 +497,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -512,7 +512,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
if (true) {
let marker1 = 0;
@@ -532,14 +532,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ForLoop_Init_Assignment) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -547,7 +547,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
var a = 0;
@@ -558,7 +558,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -573,7 +573,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
var a = 0;
@@ -590,14 +590,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ForLoop_Init_Call) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -605,7 +605,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
for (f(); ; ) {
@@ -615,7 +615,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -630,7 +630,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
var tint_symbol = f();
@@ -646,14 +646,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ForLoop_Init_VariableDecl) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -661,7 +661,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
for (let i = f(); ; ) {
@@ -671,7 +671,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -686,7 +686,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
var tint_symbol = f();
@@ -702,14 +702,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ForLoop_Cond) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -717,7 +717,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
for (; f() == 42; ) {
@@ -727,7 +727,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -742,7 +742,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
loop {
@@ -763,14 +763,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, ForLoop_Cont) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -778,7 +778,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
for (; ; f()) {
@@ -788,20 +788,20 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect =
- R"(test:12:12 error: cannot call a function that may discard inside a continuing block
+ auto* expect =
+ R"(test:12:12 error: cannot call a function that may discard inside a continuing block
for (; ; f()) {
^
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Switch) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -809,7 +809,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
switch (f()) {
case 0: {
@@ -828,7 +828,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -843,7 +843,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
var tint_symbol = f();
if (tint_discard) {
@@ -868,14 +868,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Return) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : i32,
@@ -893,14 +893,14 @@ fn g() -> S {
return f();
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
g();
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : i32,
@@ -929,7 +929,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let marker1 = 0;
g();
@@ -941,14 +941,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, VariableDecl) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -956,14 +956,14 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
var a = f();
let marker1 = 0;
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -978,7 +978,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
var a = f();
if (tint_discard) {
@@ -990,14 +990,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Assignment_RightDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -1005,7 +1005,7 @@ fn f() -> i32 {
return 42;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
var a : i32;
a = f();
@@ -1013,7 +1013,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -1028,7 +1028,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
var a : i32;
a = f();
@@ -1041,14 +1041,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Assignment_LeftDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -1056,7 +1056,7 @@ fn f() -> i32 {
return 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
var b = array<i32, 10>();
b[f()] = 10;
@@ -1064,7 +1064,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -1079,7 +1079,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
var b = array<i32, 10>();
let tint_symbol = f();
@@ -1093,14 +1093,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Assignment_BothDiscard) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -1115,7 +1115,7 @@ fn g() -> i32 {
return 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
var b = array<i32, 10>();
b[f()] = g();
@@ -1123,7 +1123,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -1146,7 +1146,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
var b = array<i32, 10>();
let tint_symbol = g();
@@ -1165,14 +1165,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Binary_Arith_MultipleDiscardFuncs) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -1194,7 +1194,7 @@ fn h() -> i32{
return 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
if ((f() + g() + h()) == 0) {
let marker1 = 0;
@@ -1202,7 +1202,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -1233,7 +1233,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let tint_symbol = f();
if (tint_discard) {
@@ -1257,14 +1257,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, Binary_Logical_MultipleDiscardFuncs) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> i32 {
if (true) {
discard;
@@ -1286,7 +1286,7 @@ fn h() -> i32{
return 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
if (f() == 1 && g() == 2 && h() == 3) {
let marker1 = 0;
@@ -1294,7 +1294,7 @@ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard : bool = false;
fn f() -> i32 {
@@ -1325,7 +1325,7 @@ fn tint_discard_func() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
let tint_symbol_2 = f();
if (tint_discard) {
@@ -1357,14 +1357,14 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(UnwindDiscardFunctionsTest, EnsureNoSymbolCollision) {
- auto* src = R"(
+ auto* src = R"(
var<private> tint_discard_func : i32;
var<private> tint_discard : i32;
@@ -1373,14 +1373,14 @@ fn f() {
let marker1 = 0;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
f();
let marker1 = 0;
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<private> tint_discard_func : i32;
var<private> tint_discard : i32;
@@ -1397,7 +1397,7 @@ fn tint_discard_func_1() {
discard;
}
-@stage(fragment)
+@fragment
fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
f();
if (tint_discard_1) {
@@ -1409,10 +1409,10 @@ fn main(@builtin(position) coord_in : vec4<f32>) -> @location(0) vec4<f32> {
}
)";
- DataMap data;
- auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
+ DataMap data;
+ auto got = Run<PromoteSideEffectsToDecl, UnwindDiscardFunctions>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.cc b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.cc
index 0f00e0c7cbc..d10d134d34d 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.cc
+++ b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.cc
@@ -19,40 +19,39 @@
namespace tint::transform::utils {
-InsertionPoint GetInsertionPoint(CloneContext& ctx,
- const ast::Statement* stmt) {
- auto& sem = ctx.src->Sem();
- auto& diag = ctx.dst->Diagnostics();
- using RetType = std::pair<const sem::BlockStatement*, const ast::Statement*>;
+InsertionPoint GetInsertionPoint(CloneContext& ctx, const ast::Statement* stmt) {
+ auto& sem = ctx.src->Sem();
+ auto& diag = ctx.dst->Diagnostics();
+ using RetType = std::pair<const sem::BlockStatement*, const ast::Statement*>;
- if (auto* sem_stmt = sem.Get(stmt)) {
- auto* parent = sem_stmt->Parent();
- return Switch(
- parent,
- [&](const sem::BlockStatement* block) -> RetType {
- // Common case, can insert in the current block above/below the input
- // statement.
- return {block, stmt};
- },
- [&](const sem::ForLoopStatement* fl) -> RetType {
- // `stmt` is either the for loop initializer or the continuing
- // statement of a for-loop.
- if (fl->Declaration()->initializer == stmt) {
- // For loop init, can insert above the for loop itself.
- return {fl->Block(), fl->Declaration()};
- }
+ if (auto* sem_stmt = sem.Get(stmt)) {
+ auto* parent = sem_stmt->Parent();
+ return Switch(
+ parent,
+ [&](const sem::BlockStatement* block) -> RetType {
+ // Common case, can insert in the current block above/below the input
+ // statement.
+ return {block, stmt};
+ },
+ [&](const sem::ForLoopStatement* fl) -> RetType {
+ // `stmt` is either the for loop initializer or the continuing
+ // statement of a for-loop.
+ if (fl->Declaration()->initializer == stmt) {
+ // For loop init, can insert above the for loop itself.
+ return {fl->Block(), fl->Declaration()};
+ }
- // Cannot insert before or after continuing statement of a for-loop
- return {};
- },
- [&](Default) -> RetType {
- TINT_ICE(Transform, diag) << "expected parent of statement to be "
- "either a block or for loop";
- return {};
- });
- }
+ // Cannot insert before or after continuing statement of a for-loop
+ return {};
+ },
+ [&](Default) -> RetType {
+ TINT_ICE(Transform, diag) << "expected parent of statement to be "
+ "either a block or for loop";
+ return {};
+ });
+ }
- return {};
+ return {};
}
} // namespace tint::transform::utils
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.h b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.h
index 85abcea8706..14e867c9a37 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.h
+++ b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point.h
@@ -24,8 +24,7 @@ namespace tint::transform::utils {
/// InsertionPoint is a pair of the block (`first`) within which, and the
/// statement (`second`) before or after which to insert.
-using InsertionPoint =
- std::pair<const sem::BlockStatement*, const ast::Statement*>;
+using InsertionPoint = std::pair<const sem::BlockStatement*, const ast::Statement*>;
/// For the input statement, returns the block and statement within that
/// block to insert before/after. If `stmt` is a for-loop continue statement,
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point_test.cc b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point_test.cc
index 48e358ece89..071cfea40a0 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/utils/get_insertion_point_test.cc
@@ -20,74 +20,76 @@
#include "src/tint/transform/test_helper.h"
#include "src/tint/transform/utils/get_insertion_point.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
namespace {
using GetInsertionPointTest = ::testing::Test;
TEST_F(GetInsertionPointTest, Block) {
- // fn f() {
- // var a = 1;
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* var = b.Decl(b.Var("a", nullptr, expr));
- auto* block = b.Block(var);
- b.Func("f", {}, b.ty.void_(), {block});
+ // fn f() {
+ // var a = 1i;
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* var = b.Decl(b.Var("a", nullptr, expr));
+ auto* block = b.Block(var);
+ b.Func("f", {}, b.ty.void_(), {block});
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
- // Can insert in block containing the variable, above or below the input
- // statement.
- auto ip = utils::GetInsertionPoint(ctx, var);
- ASSERT_EQ(ip.first->Declaration(), block);
- ASSERT_EQ(ip.second, var);
+ // Can insert in block containing the variable, above or below the input
+ // statement.
+ auto ip = utils::GetInsertionPoint(ctx, var);
+ ASSERT_EQ(ip.first->Declaration(), block);
+ ASSERT_EQ(ip.second, var);
}
TEST_F(GetInsertionPointTest, ForLoopInit) {
- // fn f() {
- // for(var a = 1; true; ) {
- // }
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* var = b.Decl(b.Var("a", nullptr, expr));
- auto* fl = b.For(var, b.Expr(true), {}, b.Block());
- auto* func_block = b.Block(fl);
- b.Func("f", {}, b.ty.void_(), {func_block});
+ // fn f() {
+ // for(var a = 1i; true; ) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* var = b.Decl(b.Var("a", nullptr, expr));
+ auto* fl = b.For(var, b.Expr(true), {}, b.Block());
+ auto* func_block = b.Block(fl);
+ b.Func("f", {}, b.ty.void_(), {func_block});
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
- // Can insert in block containing for-loop above the for-loop itself.
- auto ip = utils::GetInsertionPoint(ctx, var);
- ASSERT_EQ(ip.first->Declaration(), func_block);
- ASSERT_EQ(ip.second, fl);
+ // Can insert in block containing for-loop above the for-loop itself.
+ auto ip = utils::GetInsertionPoint(ctx, var);
+ ASSERT_EQ(ip.first->Declaration(), func_block);
+ ASSERT_EQ(ip.second, fl);
}
TEST_F(GetInsertionPointTest, ForLoopCont_Invalid) {
- // fn f() {
- // for(; true; var a = 1) {
- // }
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* var = b.Decl(b.Var("a", nullptr, expr));
- auto* s = b.For({}, b.Expr(true), var, b.Block());
- b.Func("f", {}, b.ty.void_(), {s});
+ // fn f() {
+ // for(; true; var a = 1i) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* var = b.Decl(b.Var("a", nullptr, expr));
+ auto* s = b.For({}, b.Expr(true), var, b.Block());
+ b.Func("f", {}, b.ty.void_(), {s});
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
- // Can't insert before/after for loop continue statement (would ned to be
- // converted to loop).
- auto ip = utils::GetInsertionPoint(ctx, var);
- ASSERT_EQ(ip.first, nullptr);
- ASSERT_EQ(ip.second, nullptr);
+ // Can't insert before/after for loop continue statement (would ned to be
+ // converted to loop).
+ auto ip = utils::GetInsertionPoint(ctx, var);
+ ASSERT_EQ(ip.first, nullptr);
+ ASSERT_EQ(ip.second, nullptr);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.cc b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.cc
index 05c56bb1779..450a2e8a922 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.cc
+++ b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.cc
@@ -20,7 +20,7 @@
#include "src/tint/sem/block_statement.h"
#include "src/tint/sem/for_loop_statement.h"
#include "src/tint/sem/if_statement.h"
-#include "src/tint/sem/reference_type.h"
+#include "src/tint/sem/reference.h"
#include "src/tint/sem/variable.h"
#include "src/tint/utils/reverse.h"
@@ -28,305 +28,241 @@ namespace tint::transform {
/// Private implementation of HoistToDeclBefore transform
class HoistToDeclBefore::State {
- CloneContext& ctx;
- ProgramBuilder& b;
-
- /// Holds information about a for-loop that needs to be decomposed into a
- /// loop, so that declaration statements can be inserted before the
- /// condition expression or continuing statement.
- struct LoopInfo {
- ast::StatementList cond_decls;
- ast::StatementList cont_decls;
- };
-
- /// Holds information about 'if's with 'else-if' statements that need to be
- /// decomposed into 'if {else}' so that declaration statements can be
- /// inserted before the condition expression.
- struct IfInfo {
+ CloneContext& ctx;
+ ProgramBuilder& b;
+
+ /// Holds information about a for-loop that needs to be decomposed into a
+ /// loop, so that declaration statements can be inserted before the
+ /// condition expression or continuing statement.
+ struct LoopInfo {
+ ast::StatementList cond_decls;
+ ast::StatementList cont_decls;
+ };
+
/// Info for each else-if that needs decomposing
struct ElseIfInfo {
- /// Decls to insert before condition
- ast::StatementList cond_decls;
+ /// Decls to insert before condition
+ ast::StatementList cond_decls;
};
- /// 'else if's that need to be decomposed to 'else { if }'
- std::unordered_map<const sem::ElseStatement*, ElseIfInfo> else_ifs;
- };
+ /// For-loops that need to be decomposed to loops.
+ std::unordered_map<const sem::ForLoopStatement*, LoopInfo> loops;
- /// For-loops that need to be decomposed to loops.
- std::unordered_map<const sem::ForLoopStatement*, LoopInfo> loops;
+ /// 'else if' statements that need to be decomposed to 'else {if}'
+ std::unordered_map<const ast::IfStatement*, ElseIfInfo> else_ifs;
- /// If statements with 'else if's that need to be decomposed to 'else {if}'
- std::unordered_map<const sem::IfStatement*, IfInfo> ifs;
-
- // Converts any for-loops marked for conversion to loops, inserting
- // registered declaration statements before the condition or continuing
- // statement.
- void ForLoopsToLoops() {
- if (loops.empty()) {
- return;
- }
+ // Converts any for-loops marked for conversion to loops, inserting
+ // registered declaration statements before the condition or continuing
+ // statement.
+ void ForLoopsToLoops() {
+ if (loops.empty()) {
+ return;
+ }
- // At least one for-loop needs to be transformed into a loop.
- ctx.ReplaceAll(
- [&](const ast::ForLoopStatement* stmt) -> const ast::Statement* {
- auto& sem = ctx.src->Sem();
-
- if (auto* fl = sem.Get(stmt)) {
- if (auto it = loops.find(fl); it != loops.end()) {
- auto& info = it->second;
- auto* for_loop = fl->Declaration();
- // For-loop needs to be decomposed to a loop.
- // Build the loop body's statements.
- // Start with any let declarations for the conditional
- // expression.
- auto body_stmts = info.cond_decls;
- // If the for-loop has a condition, emit this next as:
- // if (!cond) { break; }
- if (auto* cond = for_loop->condition) {
- // !condition
- auto* not_cond = b.create<ast::UnaryOpExpression>(
- ast::UnaryOp::kNot, ctx.Clone(cond));
- // { break; }
- auto* break_body = b.Block(b.create<ast::BreakStatement>());
- // if (!condition) { break; }
- body_stmts.emplace_back(b.If(not_cond, break_body));
- }
- // Next emit the for-loop body
- body_stmts.emplace_back(ctx.Clone(for_loop->body));
-
- // Finally create the continuing block if there was one.
- const ast::BlockStatement* continuing = nullptr;
- if (auto* cont = for_loop->continuing) {
- // Continuing block starts with any let declarations used by
- // the continuing.
- auto cont_stmts = info.cont_decls;
- cont_stmts.emplace_back(ctx.Clone(cont));
- continuing = b.Block(cont_stmts);
- }
-
- auto* body = b.Block(body_stmts);
- auto* loop = b.Loop(body, continuing);
- if (auto* init = for_loop->initializer) {
- return b.Block(ctx.Clone(init), loop);
- }
- return loop;
+ // At least one for-loop needs to be transformed into a loop.
+ ctx.ReplaceAll([&](const ast::ForLoopStatement* stmt) -> const ast::Statement* {
+ auto& sem = ctx.src->Sem();
+
+ if (auto* fl = sem.Get(stmt)) {
+ if (auto it = loops.find(fl); it != loops.end()) {
+ auto& info = it->second;
+ auto* for_loop = fl->Declaration();
+ // For-loop needs to be decomposed to a loop.
+ // Build the loop body's statements.
+ // Start with any let declarations for the conditional
+ // expression.
+ auto body_stmts = info.cond_decls;
+ // If the for-loop has a condition, emit this next as:
+ // if (!cond) { break; }
+ if (auto* cond = for_loop->condition) {
+ // !condition
+ auto* not_cond =
+ b.create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, ctx.Clone(cond));
+ // { break; }
+ auto* break_body = b.Block(b.create<ast::BreakStatement>());
+ // if (!condition) { break; }
+ body_stmts.emplace_back(b.If(not_cond, break_body));
+ }
+ // Next emit the for-loop body
+ body_stmts.emplace_back(ctx.Clone(for_loop->body));
+
+ // Finally create the continuing block if there was one.
+ const ast::BlockStatement* continuing = nullptr;
+ if (auto* cont = for_loop->continuing) {
+ // Continuing block starts with any let declarations used by
+ // the continuing.
+ auto cont_stmts = info.cont_decls;
+ cont_stmts.emplace_back(ctx.Clone(cont));
+ continuing = b.Block(cont_stmts);
+ }
+
+ auto* body = b.Block(body_stmts);
+ auto* loop = b.Loop(body, continuing);
+ if (auto* init = for_loop->initializer) {
+ return b.Block(ctx.Clone(init), loop);
+ }
+ return loop;
+ }
}
- }
- return nullptr;
+ return nullptr;
});
- }
-
- void ElseIfsToElseWithNestedIfs() {
- if (ifs.empty()) {
- return;
}
- ctx.ReplaceAll([&](const ast::IfStatement* if_stmt) //
- -> const ast::IfStatement* {
- auto& sem = ctx.src->Sem();
- auto* sem_if = sem.Get(if_stmt);
- if (!sem_if) {
- return nullptr;
- }
-
- auto it = ifs.find(sem_if);
- if (it == ifs.end()) {
- return nullptr;
- }
- auto& if_info = it->second;
-
- // This if statement has "else if"s that need to be converted to "else
- // { if }"s
-
- ast::ElseStatementList next_else_stmts;
- next_else_stmts.reserve(if_stmt->else_statements.size());
-
- for (auto* else_stmt : utils::Reverse(if_stmt->else_statements)) {
- if (else_stmt->condition == nullptr) {
- // The last 'else', keep as is
- next_else_stmts.insert(next_else_stmts.begin(), ctx.Clone(else_stmt));
-
- } else {
- auto* sem_else_if = sem.Get(else_stmt);
-
- auto it2 = if_info.else_ifs.find(sem_else_if);
- if (it2 == if_info.else_ifs.end()) {
- // 'else if' we don't need to modify (no decls to insert), so
- // keep as is
- next_else_stmts.insert(next_else_stmts.begin(),
- ctx.Clone(else_stmt));
-
- } else {
- // 'else if' we need to replace with 'else <decls> { if }'
- auto& else_if_info = it2->second;
-
- // Build the else body's statements, starting with let decls for
- // the conditional expression
+ void ElseIfsToElseWithNestedIfs() {
+ // Decompose 'else-if' statements into 'else { if }' blocks.
+ ctx.ReplaceAll([&](const ast::IfStatement* else_if) -> const ast::Statement* {
+ if (!else_ifs.count(else_if)) {
+ return nullptr;
+ }
+ auto& else_if_info = else_ifs[else_if];
+
+ // Build the else block's body statements, starting with let decls for
+ // the conditional expression.
auto& body_stmts = else_if_info.cond_decls;
- // Build nested if
- auto* cond = ctx.Clone(else_stmt->condition);
- auto* body = ctx.Clone(else_stmt->body);
- body_stmts.emplace_back(b.If(cond, body, next_else_stmts));
+ // Move the 'else-if' into the new `else` block as a plain 'if'.
+ auto* cond = ctx.Clone(else_if->condition);
+ auto* body = ctx.Clone(else_if->body);
+ auto* new_if = b.If(cond, body, b.Else(ctx.Clone(else_if->else_statement)));
+ body_stmts.emplace_back(new_if);
- // Build else
- auto* else_with_nested_if = b.Else(b.Block(body_stmts));
+ // Replace the 'else-if' with the new 'else' block.
+ return b.Block(body_stmts);
+ });
+ }
- // This will be used in parent if (either another nested if, or
- // top-level if)
- next_else_stmts = {else_with_nested_if};
- }
+ public:
+ /// Constructor
+ /// @param ctx_in the clone context
+ explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst) {}
+
+ /// Hoists `expr` to a `let` or `var` with optional `decl_name`, inserting it
+ /// before `before_expr`.
+ /// @param before_expr expression to insert `expr` before
+ /// @param expr expression to hoist
+ /// @param as_const hoist to `let` if true, otherwise to `var`
+ /// @param decl_name optional name to use for the variable/constant name
+ /// @return true on success
+ bool Add(const sem::Expression* before_expr,
+ const ast::Expression* expr,
+ bool as_const,
+ const char* decl_name) {
+ auto name = b.Symbols().New(decl_name);
+
+ // Construct the let/var that holds the hoisted expr
+ auto* v = as_const ? b.Let(name, nullptr, ctx.Clone(expr))
+ : b.Var(name, nullptr, ctx.Clone(expr));
+ auto* decl = b.Decl(v);
+
+ if (!InsertBefore(before_expr->Stmt(), decl)) {
+ return false;
}
- }
-
- // Build a new top-level if with new else statements
- if (next_else_stmts.empty()) {
- TINT_ICE(Transform, b.Diagnostics())
- << "Expected else statements to insert into new if";
- }
- auto* cond = ctx.Clone(if_stmt->condition);
- auto* body = ctx.Clone(if_stmt->body);
- auto* new_if = b.If(cond, body, next_else_stmts);
- return new_if;
- });
- }
-
- public:
- /// Constructor
- /// @param ctx_in the clone context
- explicit State(CloneContext& ctx_in) : ctx(ctx_in), b(*ctx_in.dst) {}
-
- /// Hoists `expr` to a `let` or `var` with optional `decl_name`, inserting it
- /// before `before_expr`.
- /// @param before_expr expression to insert `expr` before
- /// @param expr expression to hoist
- /// @param as_const hoist to `let` if true, otherwise to `var`
- /// @param decl_name optional name to use for the variable/constant name
- /// @return true on success
- bool Add(const sem::Expression* before_expr,
- const ast::Expression* expr,
- bool as_const,
- const char* decl_name) {
- auto name = b.Symbols().New(decl_name);
-
- // Construct the let/var that holds the hoisted expr
- auto* v = as_const ? b.Const(name, nullptr, ctx.Clone(expr))
- : b.Var(name, nullptr, ctx.Clone(expr));
- auto* decl = b.Decl(v);
-
- if (!InsertBefore(before_expr->Stmt(), decl)) {
- return false;
- }
- // Replace the initializer expression with a reference to the let
- ctx.Replace(expr, b.Expr(name));
- return true;
- }
-
- /// Inserts `stmt` before `before_stmt`, possibly marking a for-loop to be
- /// converted to a loop, or an else-if to an else { if }. If `decl` is
- /// nullptr, for-loop and else-if conversions are marked, but no hoisting
- /// takes place.
- /// @param before_stmt statement to insert `stmt` before
- /// @param stmt statement to insert
- /// @return true on success
- bool InsertBefore(const sem::Statement* before_stmt,
- const ast::Statement* stmt) {
- auto* ip = before_stmt->Declaration();
-
- if (auto* else_if = before_stmt->As<sem::ElseStatement>()) {
- // Insertion point is an 'else if' condition.
- // Need to convert 'else if' to 'else { if }'.
- auto& if_info = ifs[else_if->Parent()->As<sem::IfStatement>()];
-
- // Index the map to convert this else if, even if `stmt` is nullptr.
- auto& decls = if_info.else_ifs[else_if].cond_decls;
- if (stmt) {
- decls.emplace_back(stmt);
- }
- return true;
+ // Replace the initializer expression with a reference to the let
+ ctx.Replace(expr, b.Expr(name));
+ return true;
}
- if (auto* fl = before_stmt->As<sem::ForLoopStatement>()) {
- // Insertion point is a for-loop condition.
- // For-loop needs to be decomposed to a loop.
+ /// Inserts `stmt` before `before_stmt`, possibly marking a for-loop to be
+ /// converted to a loop, or an else-if to an else { if }. If `decl` is
+ /// nullptr, for-loop and else-if conversions are marked, but no hoisting
+ /// takes place.
+ /// @param before_stmt statement to insert `stmt` before
+ /// @param stmt statement to insert
+ /// @return true on success
+ bool InsertBefore(const sem::Statement* before_stmt, const ast::Statement* stmt) {
+ auto* ip = before_stmt->Declaration();
+
+ auto* else_if = before_stmt->As<sem::IfStatement>();
+ if (else_if && else_if->Parent()->Is<sem::IfStatement>()) {
+ // Insertion point is an 'else if' condition.
+ // Need to convert 'else if' to 'else { if }'.
+ auto& else_if_info = else_ifs[else_if->Declaration()];
+
+ // Index the map to convert this else if, even if `stmt` is nullptr.
+ auto& decls = else_if_info.cond_decls;
+ if (stmt) {
+ decls.emplace_back(stmt);
+ }
+ return true;
+ }
- // Index the map to convert this for-loop, even if `stmt` is nullptr.
- auto& decls = loops[fl].cond_decls;
- if (stmt) {
- decls.emplace_back(stmt);
- }
- return true;
- }
+ if (auto* fl = before_stmt->As<sem::ForLoopStatement>()) {
+ // Insertion point is a for-loop condition.
+ // For-loop needs to be decomposed to a loop.
- auto* parent = before_stmt->Parent(); // The statement's parent
- if (auto* block = parent->As<sem::BlockStatement>()) {
- // Insert point sits in a block. Simple case.
- // Insert the stmt before the parent statement.
- if (stmt) {
- ctx.InsertBefore(block->Declaration()->statements, ip, stmt);
- }
- return true;
- }
+ // Index the map to convert this for-loop, even if `stmt` is nullptr.
+ auto& decls = loops[fl].cond_decls;
+ if (stmt) {
+ decls.emplace_back(stmt);
+ }
+ return true;
+ }
- if (auto* fl = parent->As<sem::ForLoopStatement>()) {
- // Insertion point is a for-loop initializer or continuing statement.
- // These require special care.
- if (fl->Declaration()->initializer == ip) {
- // Insertion point is a for-loop initializer.
- // Insert the new statement above the for-loop.
- if (stmt) {
- ctx.InsertBefore(fl->Block()->Declaration()->statements,
- fl->Declaration(), stmt);
+ auto* parent = before_stmt->Parent(); // The statement's parent
+ if (auto* block = parent->As<sem::BlockStatement>()) {
+ // Insert point sits in a block. Simple case.
+ // Insert the stmt before the parent statement.
+ if (stmt) {
+ ctx.InsertBefore(block->Declaration()->statements, ip, stmt);
+ }
+ return true;
}
- return true;
- }
- if (fl->Declaration()->continuing == ip) {
- // Insertion point is a for-loop continuing statement.
- // For-loop needs to be decomposed to a loop.
+ if (auto* fl = parent->As<sem::ForLoopStatement>()) {
+ // Insertion point is a for-loop initializer or continuing statement.
+ // These require special care.
+ if (fl->Declaration()->initializer == ip) {
+ // Insertion point is a for-loop initializer.
+ // Insert the new statement above the for-loop.
+ if (stmt) {
+ ctx.InsertBefore(fl->Block()->Declaration()->statements, fl->Declaration(),
+ stmt);
+ }
+ return true;
+ }
+
+ if (fl->Declaration()->continuing == ip) {
+ // Insertion point is a for-loop continuing statement.
+ // For-loop needs to be decomposed to a loop.
- // Index the map to convert this for-loop, even if `stmt` is nullptr.
- auto& decls = loops[fl].cont_decls;
- if (stmt) {
- decls.emplace_back(stmt);
+ // Index the map to convert this for-loop, even if `stmt` is nullptr.
+ auto& decls = loops[fl].cont_decls;
+ if (stmt) {
+ decls.emplace_back(stmt);
+ }
+ return true;
+ }
+
+ TINT_ICE(Transform, b.Diagnostics()) << "unhandled use of expression in for-loop";
+ return false;
}
- return true;
- }
- TINT_ICE(Transform, b.Diagnostics())
- << "unhandled use of expression in for-loop";
- return false;
+ TINT_ICE(Transform, b.Diagnostics())
+ << "unhandled expression parent statement type: " << parent->TypeInfo().name;
+ return false;
+ }
+
+ /// Use to signal that we plan on hoisting a decl before `before_expr`. This
+ /// will convert 'for-loop's to 'loop's and 'else-if's to 'else {if}'s if
+ /// needed.
+ /// @param before_expr expression we would hoist a decl before
+ /// @return true on success
+ bool Prepare(const sem::Expression* before_expr) {
+ return InsertBefore(before_expr->Stmt(), nullptr);
}
- TINT_ICE(Transform, b.Diagnostics())
- << "unhandled expression parent statement type: "
- << parent->TypeInfo().name;
- return false;
- }
-
- /// Use to signal that we plan on hoisting a decl before `before_expr`. This
- /// will convert 'for-loop's to 'loop's and 'else-if's to 'else {if}'s if
- /// needed.
- /// @param before_expr expression we would hoist a decl before
- /// @return true on success
- bool Prepare(const sem::Expression* before_expr) {
- return InsertBefore(before_expr->Stmt(), nullptr);
- }
-
- /// Applies any scheduled insertions from previous calls to Add() to
- /// CloneContext. Call this once before ctx.Clone().
- /// @return true on success
- bool Apply() {
- ForLoopsToLoops();
- ElseIfsToElseWithNestedIfs();
- return true;
- }
+ /// Applies any scheduled insertions from previous calls to Add() to
+ /// CloneContext. Call this once before ctx.Clone().
+ /// @return true on success
+ bool Apply() {
+ ForLoopsToLoops();
+ ElseIfsToElseWithNestedIfs();
+ return true;
+ }
};
-HoistToDeclBefore::HoistToDeclBefore(CloneContext& ctx)
- : state_(std::make_unique<State>(ctx)) {}
+HoistToDeclBefore::HoistToDeclBefore(CloneContext& ctx) : state_(std::make_unique<State>(ctx)) {}
HoistToDeclBefore::~HoistToDeclBefore() {}
@@ -334,20 +270,20 @@ bool HoistToDeclBefore::Add(const sem::Expression* before_expr,
const ast::Expression* expr,
bool as_const,
const char* decl_name) {
- return state_->Add(before_expr, expr, as_const, decl_name);
+ return state_->Add(before_expr, expr, as_const, decl_name);
}
bool HoistToDeclBefore::InsertBefore(const sem::Statement* before_stmt,
const ast::Statement* stmt) {
- return state_->InsertBefore(before_stmt, stmt);
+ return state_->InsertBefore(before_stmt, stmt);
}
bool HoistToDeclBefore::Prepare(const sem::Expression* before_expr) {
- return state_->Prepare(before_expr);
+ return state_->Prepare(before_expr);
}
bool HoistToDeclBefore::Apply() {
- return state_->Apply();
+ return state_->Apply();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.h b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.h
index 2d94f528e5e..d0b96e029f0 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.h
+++ b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before.h
@@ -26,49 +26,48 @@ namespace tint::transform {
/// expressions, possibly converting 'for-loop's to 'loop's and 'else-if's to
// 'else {if}'s.
class HoistToDeclBefore {
- public:
- /// Constructor
- /// @param ctx the clone context
- explicit HoistToDeclBefore(CloneContext& ctx);
+ public:
+ /// Constructor
+ /// @param ctx the clone context
+ explicit HoistToDeclBefore(CloneContext& ctx);
- /// Destructor
- ~HoistToDeclBefore();
+ /// Destructor
+ ~HoistToDeclBefore();
- /// Hoists `expr` to a `let` or `var` with optional `decl_name`, inserting it
- /// before `before_expr`.
- /// @param before_expr expression to insert `expr` before
- /// @param expr expression to hoist
- /// @param as_const hoist to `let` if true, otherwise to `var`
- /// @param decl_name optional name to use for the variable/constant name
- /// @return true on success
- bool Add(const sem::Expression* before_expr,
- const ast::Expression* expr,
- bool as_const,
- const char* decl_name = "");
+ /// Hoists `expr` to a `let` or `var` with optional `decl_name`, inserting it
+ /// before `before_expr`.
+ /// @param before_expr expression to insert `expr` before
+ /// @param expr expression to hoist
+ /// @param as_const hoist to `let` if true, otherwise to `var`
+ /// @param decl_name optional name to use for the variable/constant name
+ /// @return true on success
+ bool Add(const sem::Expression* before_expr,
+ const ast::Expression* expr,
+ bool as_const,
+ const char* decl_name = "");
- /// Inserts `stmt` before `before_stmt`, possibly converting 'for-loop's to
- /// 'loop's if necessary.
- /// @param before_stmt statement to insert `stmt` before
- /// @param stmt statement to insert
- /// @return true on success
- bool InsertBefore(const sem::Statement* before_stmt,
- const ast::Statement* stmt);
+ /// Inserts `stmt` before `before_stmt`, possibly converting 'for-loop's to
+ /// 'loop's if necessary.
+ /// @param before_stmt statement to insert `stmt` before
+ /// @param stmt statement to insert
+ /// @return true on success
+ bool InsertBefore(const sem::Statement* before_stmt, const ast::Statement* stmt);
- /// Use to signal that we plan on hoisting a decl before `before_expr`. This
- /// will convert 'for-loop's to 'loop's and 'else-if's to 'else {if}'s if
- /// needed.
- /// @param before_expr expression we would hoist a decl before
- /// @return true on success
- bool Prepare(const sem::Expression* before_expr);
+ /// Use to signal that we plan on hoisting a decl before `before_expr`. This
+ /// will convert 'for-loop's to 'loop's and 'else-if's to 'else {if}'s if
+ /// needed.
+ /// @param before_expr expression we would hoist a decl before
+ /// @return true on success
+ bool Prepare(const sem::Expression* before_expr);
- /// Applies any scheduled insertions from previous calls to Add() to
- /// CloneContext. Call this once before ctx.Clone().
- /// @return true on success
- bool Apply();
+ /// Applies any scheduled insertions from previous calls to Add() to
+ /// CloneContext. Call this once before ctx.Clone().
+ /// @return true on success
+ bool Apply();
- private:
- class State;
- std::unique_ptr<State> state_;
+ private:
+ class State;
+ std::unique_ptr<State> state_;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before_test.cc b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before_test.cc
index 589eb9a5e65..1e4cb8ef369 100644
--- a/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/utils/hoist_to_decl_before_test.cc
@@ -21,101 +21,102 @@
#include "src/tint/transform/test_helper.h"
#include "src/tint/transform/utils/hoist_to_decl_before.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
namespace {
using HoistToDeclBeforeTest = ::testing::Test;
TEST_F(HoistToDeclBeforeTest, VarInit) {
- // fn f() {
- // var a = 1;
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* var = b.Decl(b.Var("a", nullptr, expr));
- b.Func("f", {}, b.ty.void_(), {var});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a = 1;
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* var = b.Decl(b.Var("a", nullptr, expr));
+ b.Func("f", {}, b.ty.void_(), {var});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
- let tint_symbol = 1;
+ let tint_symbol = 1i;
var a = tint_symbol;
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, ForLoopInit) {
- // fn f() {
- // for(var a = 1; true; ) {
- // }
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* s =
- b.For(b.Decl(b.Var("a", nullptr, expr)), b.Expr(true), {}, b.Block());
- b.Func("f", {}, b.ty.void_(), {s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // for(var a = 1i; true; ) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* s = b.For(b.Decl(b.Var("a", nullptr, expr)), b.Expr(true), {}, b.Block());
+ b.Func("f", {}, b.ty.void_(), {s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
- let tint_symbol = 1;
+ let tint_symbol = 1i;
for(var a = tint_symbol; true; ) {
}
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, ForLoopCond) {
- // fn f() {
- // var a : bool;
- // for(; a; ) {
- // }
- // }
- ProgramBuilder b;
- auto* var = b.Decl(b.Var("a", b.ty.bool_()));
- auto* expr = b.Expr("a");
- auto* s = b.For({}, expr, {}, b.Block());
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : bool;
+ // for(; a; ) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* var = b.Decl(b.Var("a", b.ty.bool_()));
+ auto* expr = b.Expr("a");
+ auto* s = b.For({}, expr, {}, b.Block());
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
var a : bool;
loop {
@@ -129,33 +130,32 @@ fn f() {
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, ForLoopCont) {
- // fn f() {
- // for(; true; var a = 1) {
- // }
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* s =
- b.For({}, b.Expr(true), b.Decl(b.Var("a", nullptr, expr)), b.Block());
- b.Func("f", {}, b.ty.void_(), {s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // for(; true; var a = 1i) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* s = b.For({}, b.Expr(true), b.Decl(b.Var("a", nullptr, expr)), b.Block());
+ b.Func("f", {}, b.ty.void_(), {s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
loop {
if (!(true)) {
@@ -165,45 +165,45 @@ fn f() {
}
continuing {
- let tint_symbol = 1;
+ let tint_symbol = 1i;
var a = tint_symbol;
}
}
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, ElseIf) {
- // fn f() {
- // var a : bool;
- // if (true) {
- // } else if (a) {
- // } else {
- // }
- // }
- ProgramBuilder b;
- auto* var = b.Decl(b.Var("a", b.ty.bool_()));
- auto* expr = b.Expr("a");
- auto* s = b.If(b.Expr(true), b.Block(), //
- b.Else(expr, b.Block()), //
- b.Else(b.Block()));
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : bool;
+ // if (true) {
+ // } else if (a) {
+ // } else {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* var = b.Decl(b.Var("a", b.ty.bool_()));
+ auto* expr = b.Expr("a");
+ auto* s = b.If(b.Expr(true), b.Block(), //
+ b.Else(b.If(expr, b.Block(), //
+ b.Else(b.Block()))));
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
var a : bool;
if (true) {
@@ -216,104 +216,103 @@ fn f() {
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, Array1D) {
- // fn f() {
- // var a : array<i32, 10>;
- // var b = a[0];
- // }
- ProgramBuilder b;
- auto* var1 = b.Decl(b.Var("a", b.ty.array<ProgramBuilder::i32, 10>()));
- auto* expr = b.IndexAccessor("a", 0);
- auto* var2 = b.Decl(b.Var("b", nullptr, expr));
- b.Func("f", {}, b.ty.void_(), {var1, var2});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : array<i32, 10>;
+ // var b = a[0];
+ // }
+ ProgramBuilder b;
+ auto* var1 = b.Decl(b.Var("a", b.ty.array<i32, 10>()));
+ auto* expr = b.IndexAccessor("a", 0_i);
+ auto* var2 = b.Decl(b.Var("b", nullptr, expr));
+ b.Func("f", {}, b.ty.void_(), {var1, var2});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
- var a : array<i32, 10>;
- let tint_symbol = a[0];
+ var a : array<i32, 10u>;
+ let tint_symbol = a[0i];
var b = tint_symbol;
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, Array2D) {
- // fn f() {
- // var a : array<array<i32, 10>, 10>;
- // var b = a[0][0];
- // }
- ProgramBuilder b;
-
- auto* var1 =
- b.Decl(b.Var("a", b.ty.array(b.ty.array<ProgramBuilder::i32, 10>(), 10)));
- auto* expr = b.IndexAccessor(b.IndexAccessor("a", 0), 0);
- auto* var2 = b.Decl(b.Var("b", nullptr, expr));
- b.Func("f", {}, b.ty.void_(), {var1, var2});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Add(sem_expr, expr, true);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : array<array<i32, 10>, 10>;
+ // var b = a[0][0];
+ // }
+ ProgramBuilder b;
+
+ auto* var1 = b.Decl(b.Var("a", b.ty.array(b.ty.array<i32, 10>(), 10_i)));
+ auto* expr = b.IndexAccessor(b.IndexAccessor("a", 0_i), 0_i);
+ auto* var2 = b.Decl(b.Var("b", nullptr, expr));
+ b.Func("f", {}, b.ty.void_(), {var1, var2});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Add(sem_expr, expr, true);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
- var a : array<array<i32, 10>, 10>;
- let tint_symbol = a[0][0];
+ var a : array<array<i32, 10u>, 10i>;
+ let tint_symbol = a[0i][0i];
var b = tint_symbol;
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, Prepare_ForLoopCond) {
- // fn f() {
- // var a : bool;
- // for(; a; ) {
- // }
- // }
- ProgramBuilder b;
- auto* var = b.Decl(b.Var("a", b.ty.bool_()));
- auto* expr = b.Expr("a");
- auto* s = b.For({}, expr, {}, b.Block());
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Prepare(sem_expr);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : bool;
+ // for(; a; ) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* var = b.Decl(b.Var("a", b.ty.bool_()));
+ auto* expr = b.Expr("a");
+ auto* s = b.For({}, expr, {}, b.Block());
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Prepare(sem_expr);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
var a : bool;
loop {
@@ -326,33 +325,32 @@ fn f() {
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, Prepare_ForLoopCont) {
- // fn f() {
- // for(; true; var a = 1) {
- // }
- // }
- ProgramBuilder b;
- auto* expr = b.Expr(1);
- auto* s =
- b.For({}, b.Expr(true), b.Decl(b.Var("a", nullptr, expr)), b.Block());
- b.Func("f", {}, b.ty.void_(), {s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Prepare(sem_expr);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // for(; true; var a = 1i) {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* expr = b.Expr(1_i);
+ auto* s = b.For({}, b.Expr(true), b.Decl(b.Var("a", nullptr, expr)), b.Block());
+ b.Func("f", {}, b.ty.void_(), {s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Prepare(sem_expr);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
loop {
if (!(true)) {
@@ -362,44 +360,44 @@ fn f() {
}
continuing {
- var a = 1;
+ var a = 1i;
}
}
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, Prepare_ElseIf) {
- // fn f() {
- // var a : bool;
- // if (true) {
- // } else if (a) {
- // } else {
- // }
- // }
- ProgramBuilder b;
- auto* var = b.Decl(b.Var("a", b.ty.bool_()));
- auto* expr = b.Expr("a");
- auto* s = b.If(b.Expr(true), b.Block(), //
- b.Else(expr, b.Block()), //
- b.Else(b.Block()));
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* sem_expr = ctx.src->Sem().Get(expr);
- hoistToDeclBefore.Prepare(sem_expr);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn f() {
+ // var a : bool;
+ // if (true) {
+ // } else if (a) {
+ // } else {
+ // }
+ // }
+ ProgramBuilder b;
+ auto* var = b.Decl(b.Var("a", b.ty.bool_()));
+ auto* expr = b.Expr("a");
+ auto* s = b.If(b.Expr(true), b.Block(), //
+ b.Else(b.If(expr, b.Block(), //
+ b.Else(b.Block()))));
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* sem_expr = ctx.src->Sem().Get(expr);
+ hoistToDeclBefore.Prepare(sem_expr);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn f() {
var a : bool;
if (true) {
@@ -411,120 +409,120 @@ fn f() {
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, InsertBefore_Block) {
- // fn foo() {
- // }
- // fn f() {
- // var a = 1;
- // }
- ProgramBuilder b;
- b.Func("foo", {}, b.ty.void_(), {});
- auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1)));
- b.Func("f", {}, b.ty.void_(), {var});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* before_stmt = ctx.src->Sem().Get(var);
- auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
- hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn foo() {
+ // }
+ // fn f() {
+ // var a = 1i;
+ // }
+ ProgramBuilder b;
+ b.Func("foo", {}, b.ty.void_(), {});
+ auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1_i)));
+ b.Func("f", {}, b.ty.void_(), {var});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* before_stmt = ctx.src->Sem().Get(var);
+ auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
+ hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn foo() {
}
fn f() {
foo();
- var a = 1;
+ var a = 1i;
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, InsertBefore_ForLoopInit) {
- // fn foo() {
- // }
- // fn f() {
- // for(var a = 1; true;) {
- // }
- // }
- ProgramBuilder b;
- b.Func("foo", {}, b.ty.void_(), {});
- auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1)));
- auto* s = b.For(var, b.Expr(true), {}, b.Block());
- b.Func("f", {}, b.ty.void_(), {s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* before_stmt = ctx.src->Sem().Get(var);
- auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
- hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn foo() {
+ // }
+ // fn f() {
+ // for(var a = 1i; true;) {
+ // }
+ // }
+ ProgramBuilder b;
+ b.Func("foo", {}, b.ty.void_(), {});
+ auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1_i)));
+ auto* s = b.For(var, b.Expr(true), {}, b.Block());
+ b.Func("f", {}, b.ty.void_(), {s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* before_stmt = ctx.src->Sem().Get(var);
+ auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
+ hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn foo() {
}
fn f() {
foo();
- for(var a = 1; true; ) {
+ for(var a = 1i; true; ) {
}
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, InsertBefore_ForLoopCont) {
- // fn foo() {
- // }
- // fn f() {
- // var a = 1;
- // for(; true; a+=1) {
- // }
- // }
- ProgramBuilder b;
- b.Func("foo", {}, b.ty.void_(), {});
- auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1)));
- auto* cont = b.CompoundAssign("a", b.Expr(1), ast::BinaryOp::kAdd);
- auto* s = b.For({}, b.Expr(true), cont, b.Block());
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* before_stmt = ctx.src->Sem().Get(cont->As<ast::Statement>());
- auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
- hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn foo() {
+ // }
+ // fn f() {
+ // var a = 1i;
+ // for(; true; a+=1i) {
+ // }
+ // }
+ ProgramBuilder b;
+ b.Func("foo", {}, b.ty.void_(), {});
+ auto* var = b.Decl(b.Var("a", nullptr, b.Expr(1_i)));
+ auto* cont = b.CompoundAssign("a", b.Expr(1_i), ast::BinaryOp::kAdd);
+ auto* s = b.For({}, b.Expr(true), cont, b.Block());
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* before_stmt = ctx.src->Sem().Get(cont->As<ast::Statement>());
+ auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
+ hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn foo() {
}
fn f() {
- var a = 1;
+ var a = 1i;
loop {
if (!(true)) {
break;
@@ -534,48 +532,47 @@ fn f() {
continuing {
foo();
- a += 1;
+ a += 1i;
}
}
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
TEST_F(HoistToDeclBeforeTest, InsertBefore_ElseIf) {
- // fn foo() {
- // }
- // fn f() {
- // var a : bool;
- // if (true) {
- // } else if (a) {
- // } else {
- // }
- // }
- ProgramBuilder b;
- b.Func("foo", {}, b.ty.void_(), {});
- auto* var = b.Decl(b.Var("a", b.ty.bool_()));
- auto* elseif = b.Else(b.Expr("a"), b.Block());
- auto* s = b.If(b.Expr(true), b.Block(), //
- elseif, //
- b.Else(b.Block()));
- b.Func("f", {}, b.ty.void_(), {var, s});
-
- Program original(std::move(b));
- ProgramBuilder cloned_b;
- CloneContext ctx(&cloned_b, &original);
-
- HoistToDeclBefore hoistToDeclBefore(ctx);
- auto* before_stmt = ctx.src->Sem().Get(elseif);
- auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
- hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
- hoistToDeclBefore.Apply();
-
- ctx.Clone();
- Program cloned(std::move(cloned_b));
-
- auto* expect = R"(
+ // fn foo() {
+ // }
+ // fn f() {
+ // var a : bool;
+ // if (true) {
+ // } else if (a) {
+ // } else {
+ // }
+ // }
+ ProgramBuilder b;
+ b.Func("foo", {}, b.ty.void_(), {});
+ auto* var = b.Decl(b.Var("a", b.ty.bool_()));
+ auto* elseif = b.If(b.Expr("a"), b.Block(), b.Else(b.Block()));
+ auto* s = b.If(b.Expr(true), b.Block(), //
+ b.Else(elseif));
+ b.Func("f", {}, b.ty.void_(), {var, s});
+
+ Program original(std::move(b));
+ ProgramBuilder cloned_b;
+ CloneContext ctx(&cloned_b, &original);
+
+ HoistToDeclBefore hoistToDeclBefore(ctx);
+ auto* before_stmt = ctx.src->Sem().Get(elseif);
+ auto* new_stmt = ctx.dst->CallStmt(ctx.dst->Call("foo"));
+ hoistToDeclBefore.InsertBefore(before_stmt, new_stmt);
+ hoistToDeclBefore.Apply();
+
+ ctx.Clone();
+ Program cloned(std::move(cloned_b));
+
+ auto* expect = R"(
fn foo() {
}
@@ -591,7 +588,7 @@ fn f() {
}
)";
- EXPECT_EQ(expect, str(cloned));
+ EXPECT_EQ(expect, str(cloned));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.cc b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.cc
index ccd1215d29f..aaebdc76061 100644
--- a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.cc
+++ b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.cc
@@ -22,47 +22,43 @@ VarForDynamicIndex::VarForDynamicIndex() = default;
VarForDynamicIndex::~VarForDynamicIndex() = default;
-void VarForDynamicIndex::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- HoistToDeclBefore hoist_to_decl_before(ctx);
+void VarForDynamicIndex::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ HoistToDeclBefore hoist_to_decl_before(ctx);
- // Extracts array and matrix values that are dynamically indexed to a
- // temporary `var` local that is then indexed.
- auto dynamic_index_to_var =
- [&](const ast::IndexAccessorExpression* access_expr) {
+ // Extracts array and matrix values that are dynamically indexed to a
+ // temporary `var` local that is then indexed.
+ auto dynamic_index_to_var = [&](const ast::IndexAccessorExpression* access_expr) {
auto* index_expr = access_expr->index;
auto* object_expr = access_expr->object;
auto& sem = ctx.src->Sem();
if (sem.Get(index_expr)->ConstantValue()) {
- // Index expression resolves to a compile time value.
- // As this isn't a dynamic index, we can ignore this.
- return true;
+ // Index expression resolves to a compile time value.
+ // As this isn't a dynamic index, we can ignore this.
+ return true;
}
auto* indexed = sem.Get(object_expr);
if (!indexed->Type()->IsAnyOf<sem::Array, sem::Matrix>()) {
- // We only care about array and matrices.
- return true;
+ // We only care about array and matrices.
+ return true;
}
// TODO(bclayton): group multiple accesses in the same object.
// e.g. arr[i] + arr[i+1] // Don't create two vars for this
- return hoist_to_decl_before.Add(indexed, object_expr, false,
- "var_for_index");
- };
+ return hoist_to_decl_before.Add(indexed, object_expr, false, "var_for_index");
+ };
- for (auto* node : ctx.src->ASTNodes().Objects()) {
- if (auto* access_expr = node->As<ast::IndexAccessorExpression>()) {
- if (!dynamic_index_to_var(access_expr)) {
- return;
- }
+ for (auto* node : ctx.src->ASTNodes().Objects()) {
+ if (auto* access_expr = node->As<ast::IndexAccessorExpression>()) {
+ if (!dynamic_index_to_var(access_expr)) {
+ return;
+ }
+ }
}
- }
- hoist_to_decl_before.Apply();
- ctx.Clone();
+ hoist_to_decl_before.Apply();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.h b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.h
index e7e2815cadf..39ef2f25444 100644
--- a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.h
+++ b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index.h
@@ -24,23 +24,21 @@ namespace tint::transform {
/// transform is used by the SPIR-V writer as there is no SPIR-V instruction
/// that can dynamically index a non-pointer composite.
class VarForDynamicIndex : public Transform {
- public:
- /// Constructor
- VarForDynamicIndex();
-
- /// Destructor
- ~VarForDynamicIndex() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ VarForDynamicIndex();
+
+ /// Destructor
+ ~VarForDynamicIndex() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index_test.cc b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index_test.cc
index a222ad737bd..ca767c9d057 100644
--- a/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/var_for_dynamic_index_test.cc
@@ -23,16 +23,16 @@ namespace {
using VarForDynamicIndexTest = TransformTest;
TEST_F(VarForDynamicIndexTest, EmptyModule) {
- auto* src = "";
- auto* expect = "";
+ auto* src = "";
+ auto* expect = "";
- auto got = Run<ForLoopToLoop, VarForDynamicIndex>(src);
+ auto got = Run<ForLoopToLoop, VarForDynamicIndex>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexDynamic) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = array<i32, 4>(1, 2, 3, 4);
@@ -40,7 +40,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = array<i32, 4>(1, 2, 3, 4);
@@ -49,14 +49,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexDynamic) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -64,7 +64,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -73,14 +73,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexDynamicChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
var j : i32;
@@ -89,12 +89,12 @@ fn f() {
}
)";
- // TODO(bclayton): Optimize this case:
- // This output is not as efficient as it could be.
- // We only actually need to hoist the inner-most array to a `var`
- // (`var_for_index`), as later indexing operations will be working with
- // references, not values.
- auto* expect = R"(
+ // TODO(bclayton): Optimize this case:
+ // This output is not as efficient as it could be.
+ // We only actually need to hoist the inner-most array to a `var`
+ // (`var_for_index`), as later indexing operations will be working with
+ // references, not values.
+ auto* expect = R"(
fn f() {
var i : i32;
var j : i32;
@@ -105,14 +105,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexInForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = array<array<i32, 2>, 2>(array<i32, 2>(1, 2), array<i32, 2>(3, 4));
@@ -122,7 +122,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = array<array<i32, 2>, 2>(array<i32, 2>(1, 2), array<i32, 2>(3, 4));
@@ -133,14 +133,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexInForLoopInit) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -150,7 +150,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -161,14 +161,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexInForLoopCond) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -178,7 +178,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -194,14 +194,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexInForLoopCond) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -211,7 +211,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -227,14 +227,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexInForLoopCondWithNestedIndex) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -247,7 +247,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -267,14 +267,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexInElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -286,7 +286,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -301,14 +301,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexInElseIfChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -328,7 +328,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = array<i32, 2>(1, 2);
@@ -354,14 +354,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexInElseIf) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -373,7 +373,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -388,14 +388,14 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexInElseIfChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -415,7 +415,7 @@ fn f() {
}
)";
- auto* expect = R"(
+ auto* expect = R"(
fn f() {
var i : i32;
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
@@ -441,46 +441,46 @@ fn f() {
}
)";
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexLiteral) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let p = array<i32, 4>(1, 2, 3, 4);
let x = p[1];
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexLiteral) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
let x = p[1];
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexConstantLet) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let p = array<i32, 4>(1, 2, 3, 4);
let c = 1;
@@ -488,16 +488,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexConstantLet) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
let c = 1;
@@ -505,16 +505,16 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, ArrayIndexLiteralChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let a = array<i32, 2>(1, 2);
let b = array<i32, 2>(3, 4);
@@ -523,28 +523,28 @@ fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VarForDynamicIndexTest, MatrixIndexLiteralChain) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
let p = mat2x2(1.0, 2.0, 3.0, 4.0);
let x = p[0][1];
}
)";
- auto* expect = src;
+ auto* expect = src;
- DataMap data;
- auto got = Run<VarForDynamicIndex>(src, data);
+ DataMap data;
+ auto got = Run<VarForDynamicIndex>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.cc b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.cc
index 8af027284d3..9cd9757bbbe 100644
--- a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.cc
+++ b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.cc
@@ -14,82 +14,113 @@
#include "src/tint/transform/vectorize_scalar_matrix_constructors.h"
+#include <unordered_map>
#include <utility>
#include "src/tint/program_builder.h"
#include "src/tint/sem/call.h"
#include "src/tint/sem/expression.h"
#include "src/tint/sem/type_constructor.h"
+#include "src/tint/utils/map.h"
TINT_INSTANTIATE_TYPEINFO(tint::transform::VectorizeScalarMatrixConstructors);
namespace tint::transform {
-VectorizeScalarMatrixConstructors::VectorizeScalarMatrixConstructors() =
- default;
+VectorizeScalarMatrixConstructors::VectorizeScalarMatrixConstructors() = default;
-VectorizeScalarMatrixConstructors::~VectorizeScalarMatrixConstructors() =
- default;
+VectorizeScalarMatrixConstructors::~VectorizeScalarMatrixConstructors() = default;
-bool VectorizeScalarMatrixConstructors::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (auto* call = program->Sem().Get<sem::Call>(node)) {
- if (call->Target()->Is<sem::TypeConstructor>() &&
- call->Type()->Is<sem::Matrix>()) {
- auto& args = call->Arguments();
- if (args.size() > 0 && args[0]->Type()->is_scalar()) {
- return true;
+bool VectorizeScalarMatrixConstructors::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (auto* call = program->Sem().Get<sem::Call>(node)) {
+ if (call->Target()->Is<sem::TypeConstructor>() && call->Type()->Is<sem::Matrix>()) {
+ auto& args = call->Arguments();
+ if (args.size() > 0 && args[0]->Type()->is_scalar()) {
+ return true;
+ }
+ }
}
- }
}
- }
- return false;
+ return false;
}
-void VectorizeScalarMatrixConstructors::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) -> const ast::CallExpression* {
- auto* call = ctx.src->Sem().Get(expr);
+void VectorizeScalarMatrixConstructors::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ std::unordered_map<const sem::Matrix*, Symbol> scalar_ctors;
+
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::CallExpression* {
+ auto* call = ctx.src->Sem().Get(expr)->UnwrapMaterialize()->As<sem::Call>();
auto* ty_ctor = call->Target()->As<sem::TypeConstructor>();
if (!ty_ctor) {
- return nullptr;
+ return nullptr;
}
// Check if this is a matrix constructor with scalar arguments.
auto* mat_type = call->Type()->As<sem::Matrix>();
if (!mat_type) {
- return nullptr;
+ return nullptr;
}
auto& args = call->Arguments();
if (args.size() == 0) {
- return nullptr;
+ return nullptr;
}
if (!args[0]->Type()->is_scalar()) {
- return nullptr;
+ return nullptr;
}
- // Build a list of vector expressions for each column.
- ast::ExpressionList columns;
- for (uint32_t c = 0; c < mat_type->columns(); c++) {
- // Build a list of scalar expressions for each value in the column.
- ast::ExpressionList row_values;
- for (uint32_t r = 0; r < mat_type->rows(); r++) {
- row_values.push_back(
- ctx.Clone(args[c * mat_type->rows() + r]->Declaration()));
- }
-
- // Construct the column vector.
- auto* col = ctx.dst->vec(CreateASTTypeFor(ctx, mat_type->type()),
- mat_type->rows(), row_values);
- columns.push_back(col);
+ // Constructs a matrix using vector columns, with the elements constructed using the
+ // 'element(uint32_t c, uint32_t r)' callback.
+ auto build_mat = [&](auto&& element) {
+ ast::ExpressionList columns(mat_type->columns());
+ for (uint32_t c = 0; c < mat_type->columns(); c++) {
+ ast::ExpressionList row_values(mat_type->rows());
+ for (uint32_t r = 0; r < mat_type->rows(); r++) {
+ row_values[r] = element(c, r);
+ }
+
+ // Construct the column vector.
+ columns[c] = ctx.dst->vec(CreateASTTypeFor(ctx, mat_type->type()), mat_type->rows(),
+ row_values);
+ }
+ return ctx.dst->Construct(CreateASTTypeFor(ctx, mat_type), columns);
+ };
+
+ if (args.size() == 1) {
+ // Generate a helper function for constructing the matrix.
+ // This is done to ensure that the single argument value is only evaluated once, and
+ // with the correct expression evaluation order.
+ auto fn = utils::GetOrCreate(scalar_ctors, mat_type, [&] {
+ auto name =
+ ctx.dst->Symbols().New("build_mat" + std::to_string(mat_type->columns()) + "x" +
+ std::to_string(mat_type->rows()));
+ ctx.dst->Func(name,
+ {
+ // Single scalar parameter
+ ctx.dst->Param("value", CreateASTTypeFor(ctx, mat_type->type())),
+ },
+ CreateASTTypeFor(ctx, mat_type),
+ {
+ ctx.dst->Return(build_mat([&](uint32_t, uint32_t) { //
+ return ctx.dst->Expr("value");
+ })),
+ });
+ return name;
+ });
+ return ctx.dst->Call(fn, ctx.Clone(args[0]->Declaration()));
}
- return ctx.dst->Construct(CreateASTTypeFor(ctx, mat_type), columns);
- });
- ctx.Clone();
+ if (args.size() == mat_type->columns() * mat_type->rows()) {
+ return build_mat([&](uint32_t c, uint32_t r) {
+ return ctx.Clone(args[c * mat_type->rows() + r]->Declaration());
+ });
+ }
+
+ TINT_ICE(Transform, ctx.dst->Diagnostics())
+ << "matrix constructor has unexpected number of arguments";
+ return nullptr;
+ });
+
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.h b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.h
index f1abb4fcd96..83c4ce1aa64 100644
--- a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.h
+++ b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors.h
@@ -22,29 +22,26 @@ namespace tint::transform {
/// A transform that converts scalar matrix constructors to the vector form.
class VectorizeScalarMatrixConstructors
: public Castable<VectorizeScalarMatrixConstructors, Transform> {
- public:
- /// Constructor
- VectorizeScalarMatrixConstructors();
-
- /// Destructor
- ~VectorizeScalarMatrixConstructors() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ public:
+ /// Constructor
+ VectorizeScalarMatrixConstructors();
+
+ /// Destructor
+ ~VectorizeScalarMatrixConstructors() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors_test.cc b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors_test.cc
index edd68e254d2..1ee9d337b7f 100644
--- a/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/vectorize_scalar_matrix_constructors_test.cc
@@ -23,87 +23,134 @@
namespace tint::transform {
namespace {
-using VectorizeScalarMatrixConstructorsTest =
- TransformTestWithParam<std::pair<uint32_t, uint32_t>>;
+using VectorizeScalarMatrixConstructorsTest = TransformTestWithParam<std::pair<uint32_t, uint32_t>>;
TEST_F(VectorizeScalarMatrixConstructorsTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
+ EXPECT_FALSE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
}
-TEST_P(VectorizeScalarMatrixConstructorsTest, Basic) {
- uint32_t cols = GetParam().first;
- uint32_t rows = GetParam().second;
- std::string mat_type =
- "mat" + std::to_string(cols) + "x" + std::to_string(rows) + "<f32>";
- std::string vec_type = "vec" + std::to_string(rows) + "<f32>";
- std::string scalar_values;
- std::string vector_values;
- for (uint32_t c = 0; c < cols; c++) {
- if (c > 0) {
- vector_values += ", ";
- scalar_values += ", ";
+TEST_P(VectorizeScalarMatrixConstructorsTest, SingleScalars) {
+ uint32_t cols = GetParam().first;
+ uint32_t rows = GetParam().second;
+ std::string matrix_no_type = "mat" + std::to_string(cols) + "x" + std::to_string(rows);
+ std::string matrix = matrix_no_type + "<f32>";
+ std::string vector = "vec" + std::to_string(rows) + "<f32>";
+ std::string values;
+ for (uint32_t c = 0; c < cols; c++) {
+ if (c > 0) {
+ values += ", ";
+ }
+ values += vector + "(";
+ for (uint32_t r = 0; r < rows; r++) {
+ if (r > 0) {
+ values += ", ";
+ }
+ values += "value";
+ }
+ values += ")";
}
- vector_values += vec_type + "(";
- for (uint32_t r = 0; r < rows; r++) {
- if (r > 0) {
- scalar_values += ", ";
- vector_values += ", ";
- }
- auto value = std::to_string(c * rows + r) + ".0";
- scalar_values += value;
- vector_values += value;
+
+ std::string src = R"(
+@fragment
+fn main() {
+ let m = ${matrix}(42.0);
+}
+)";
+
+ std::string expect = R"(
+fn build_${matrix_no_type}(value : f32) -> ${matrix} {
+ return ${matrix}(${values});
+}
+
+@fragment
+fn main() {
+ let m = build_${matrix_no_type}(42.0);
+}
+)";
+ src = utils::ReplaceAll(src, "${matrix}", matrix);
+ expect = utils::ReplaceAll(expect, "${matrix}", matrix);
+ expect = utils::ReplaceAll(expect, "${matrix_no_type}", matrix_no_type);
+ expect = utils::ReplaceAll(expect, "${values}", values);
+
+ EXPECT_TRUE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
+
+ auto got = Run<VectorizeScalarMatrixConstructors>(src);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_P(VectorizeScalarMatrixConstructorsTest, MultipleScalars) {
+ uint32_t cols = GetParam().first;
+ uint32_t rows = GetParam().second;
+ std::string mat_type = "mat" + std::to_string(cols) + "x" + std::to_string(rows) + "<f32>";
+ std::string vec_type = "vec" + std::to_string(rows) + "<f32>";
+ std::string scalar_values;
+ std::string vector_values;
+ for (uint32_t c = 0; c < cols; c++) {
+ if (c > 0) {
+ vector_values += ", ";
+ scalar_values += ", ";
+ }
+ vector_values += vec_type + "(";
+ for (uint32_t r = 0; r < rows; r++) {
+ if (r > 0) {
+ scalar_values += ", ";
+ vector_values += ", ";
+ }
+ auto value = std::to_string(c * rows + r) + ".0";
+ scalar_values += value;
+ vector_values += value;
+ }
+ vector_values += ")";
}
- vector_values += ")";
- }
- std::string tmpl = R"(
-@stage(fragment)
+ std::string tmpl = R"(
+@fragment
fn main() {
let m = ${matrix}(${values});
}
)";
- tmpl = utils::ReplaceAll(tmpl, "${matrix}", mat_type);
- auto src = utils::ReplaceAll(tmpl, "${values}", scalar_values);
- auto expect = utils::ReplaceAll(tmpl, "${values}", vector_values);
+ tmpl = utils::ReplaceAll(tmpl, "${matrix}", mat_type);
+ auto src = utils::ReplaceAll(tmpl, "${values}", scalar_values);
+ auto expect = utils::ReplaceAll(tmpl, "${values}", vector_values);
- EXPECT_TRUE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
+ EXPECT_TRUE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
- auto got = Run<VectorizeScalarMatrixConstructors>(src);
+ auto got = Run<VectorizeScalarMatrixConstructors>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_P(VectorizeScalarMatrixConstructorsTest, NonScalarConstructors) {
- uint32_t cols = GetParam().first;
- uint32_t rows = GetParam().second;
- std::string mat_type =
- "mat" + std::to_string(cols) + "x" + std::to_string(rows) + "<f32>";
- std::string vec_type = "vec" + std::to_string(rows) + "<f32>";
- std::string columns;
- for (uint32_t c = 0; c < cols; c++) {
- if (c > 0) {
- columns += ", ";
+ uint32_t cols = GetParam().first;
+ uint32_t rows = GetParam().second;
+ std::string mat_type = "mat" + std::to_string(cols) + "x" + std::to_string(rows) + "<f32>";
+ std::string vec_type = "vec" + std::to_string(rows) + "<f32>";
+ std::string columns;
+ for (uint32_t c = 0; c < cols; c++) {
+ if (c > 0) {
+ columns += ", ";
+ }
+ columns += vec_type + "()";
}
- columns += vec_type + "()";
- }
- std::string tmpl = R"(
-@stage(fragment)
+ std::string tmpl = R"(
+@fragment
fn main() {
let m = ${matrix}(${columns});
}
)";
- tmpl = utils::ReplaceAll(tmpl, "${matrix}", mat_type);
- auto src = utils::ReplaceAll(tmpl, "${columns}", columns);
- auto expect = src;
+ tmpl = utils::ReplaceAll(tmpl, "${matrix}", mat_type);
+ auto src = utils::ReplaceAll(tmpl, "${columns}", columns);
+ auto expect = src;
- EXPECT_FALSE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
+ EXPECT_FALSE(ShouldRun<VectorizeScalarMatrixConstructors>(src));
- auto got = Run<VectorizeScalarMatrixConstructors>(src);
+ auto got = Run<VectorizeScalarMatrixConstructors>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
INSTANTIATE_TEST_SUITE_P(VectorizeScalarMatrixConstructorsTest,
diff --git a/chromium/third_party/dawn/src/tint/transform/vertex_pulling.cc b/chromium/third_party/dawn/src/tint/transform/vertex_pulling.cc
index 420076784fc..e297c904493 100644
--- a/chromium/third_party/dawn/src/tint/transform/vertex_pulling.cc
+++ b/chromium/third_party/dawn/src/tint/transform/vertex_pulling.cc
@@ -28,6 +28,8 @@
TINT_INSTANTIATE_TYPEINFO(tint::transform::VertexPulling);
TINT_INSTANTIATE_TYPEINFO(tint::transform::VertexPulling::Config);
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::transform {
namespace {
@@ -35,10 +37,10 @@ namespace {
/// The base type of a component.
/// The format type is either this type or a vector of this type.
enum class BaseType {
- kInvalid,
- kU32,
- kI32,
- kF32,
+ kInvalid,
+ kU32,
+ kI32,
+ kF32,
};
/// Writes the BaseType to the std::ostream.
@@ -46,17 +48,17 @@ enum class BaseType {
/// @param format the BaseType to write
/// @returns out so calls can be chained
std::ostream& operator<<(std::ostream& out, BaseType format) {
- switch (format) {
- case BaseType::kInvalid:
- return out << "invalid";
- case BaseType::kU32:
- return out << "u32";
- case BaseType::kI32:
- return out << "i32";
- case BaseType::kF32:
- return out << "f32";
- }
- return out << "<unknown>";
+ switch (format) {
+ case BaseType::kInvalid:
+ return out << "invalid";
+ case BaseType::kU32:
+ return out << "u32";
+ case BaseType::kI32:
+ return out << "i32";
+ case BaseType::kF32:
+ return out << "f32";
+ }
+ return out << "<unknown>";
}
/// Writes the VertexFormat to the std::ostream.
@@ -64,837 +66,800 @@ std::ostream& operator<<(std::ostream& out, BaseType format) {
/// @param format the VertexFormat to write
/// @returns out so calls can be chained
std::ostream& operator<<(std::ostream& out, VertexFormat format) {
- switch (format) {
- case VertexFormat::kUint8x2:
- return out << "uint8x2";
- case VertexFormat::kUint8x4:
- return out << "uint8x4";
- case VertexFormat::kSint8x2:
- return out << "sint8x2";
- case VertexFormat::kSint8x4:
- return out << "sint8x4";
- case VertexFormat::kUnorm8x2:
- return out << "unorm8x2";
- case VertexFormat::kUnorm8x4:
- return out << "unorm8x4";
- case VertexFormat::kSnorm8x2:
- return out << "snorm8x2";
- case VertexFormat::kSnorm8x4:
- return out << "snorm8x4";
- case VertexFormat::kUint16x2:
- return out << "uint16x2";
- case VertexFormat::kUint16x4:
- return out << "uint16x4";
- case VertexFormat::kSint16x2:
- return out << "sint16x2";
- case VertexFormat::kSint16x4:
- return out << "sint16x4";
- case VertexFormat::kUnorm16x2:
- return out << "unorm16x2";
- case VertexFormat::kUnorm16x4:
- return out << "unorm16x4";
- case VertexFormat::kSnorm16x2:
- return out << "snorm16x2";
- case VertexFormat::kSnorm16x4:
- return out << "snorm16x4";
- case VertexFormat::kFloat16x2:
- return out << "float16x2";
- case VertexFormat::kFloat16x4:
- return out << "float16x4";
- case VertexFormat::kFloat32:
- return out << "float32";
- case VertexFormat::kFloat32x2:
- return out << "float32x2";
- case VertexFormat::kFloat32x3:
- return out << "float32x3";
- case VertexFormat::kFloat32x4:
- return out << "float32x4";
- case VertexFormat::kUint32:
- return out << "uint32";
- case VertexFormat::kUint32x2:
- return out << "uint32x2";
- case VertexFormat::kUint32x3:
- return out << "uint32x3";
- case VertexFormat::kUint32x4:
- return out << "uint32x4";
- case VertexFormat::kSint32:
- return out << "sint32";
- case VertexFormat::kSint32x2:
- return out << "sint32x2";
- case VertexFormat::kSint32x3:
- return out << "sint32x3";
- case VertexFormat::kSint32x4:
- return out << "sint32x4";
- }
- return out << "<unknown>";
+ switch (format) {
+ case VertexFormat::kUint8x2:
+ return out << "uint8x2";
+ case VertexFormat::kUint8x4:
+ return out << "uint8x4";
+ case VertexFormat::kSint8x2:
+ return out << "sint8x2";
+ case VertexFormat::kSint8x4:
+ return out << "sint8x4";
+ case VertexFormat::kUnorm8x2:
+ return out << "unorm8x2";
+ case VertexFormat::kUnorm8x4:
+ return out << "unorm8x4";
+ case VertexFormat::kSnorm8x2:
+ return out << "snorm8x2";
+ case VertexFormat::kSnorm8x4:
+ return out << "snorm8x4";
+ case VertexFormat::kUint16x2:
+ return out << "uint16x2";
+ case VertexFormat::kUint16x4:
+ return out << "uint16x4";
+ case VertexFormat::kSint16x2:
+ return out << "sint16x2";
+ case VertexFormat::kSint16x4:
+ return out << "sint16x4";
+ case VertexFormat::kUnorm16x2:
+ return out << "unorm16x2";
+ case VertexFormat::kUnorm16x4:
+ return out << "unorm16x4";
+ case VertexFormat::kSnorm16x2:
+ return out << "snorm16x2";
+ case VertexFormat::kSnorm16x4:
+ return out << "snorm16x4";
+ case VertexFormat::kFloat16x2:
+ return out << "float16x2";
+ case VertexFormat::kFloat16x4:
+ return out << "float16x4";
+ case VertexFormat::kFloat32:
+ return out << "float32";
+ case VertexFormat::kFloat32x2:
+ return out << "float32x2";
+ case VertexFormat::kFloat32x3:
+ return out << "float32x3";
+ case VertexFormat::kFloat32x4:
+ return out << "float32x4";
+ case VertexFormat::kUint32:
+ return out << "uint32";
+ case VertexFormat::kUint32x2:
+ return out << "uint32x2";
+ case VertexFormat::kUint32x3:
+ return out << "uint32x3";
+ case VertexFormat::kUint32x4:
+ return out << "uint32x4";
+ case VertexFormat::kSint32:
+ return out << "sint32";
+ case VertexFormat::kSint32x2:
+ return out << "sint32x2";
+ case VertexFormat::kSint32x3:
+ return out << "sint32x3";
+ case VertexFormat::kSint32x4:
+ return out << "sint32x4";
+ }
+ return out << "<unknown>";
}
/// A vertex attribute data format.
struct DataType {
- BaseType base_type;
- uint32_t width; // 1 for scalar, 2+ for a vector
+ BaseType base_type;
+ uint32_t width; // 1 for scalar, 2+ for a vector
};
DataType DataTypeOf(const sem::Type* ty) {
- if (ty->Is<sem::I32>()) {
- return {BaseType::kI32, 1};
- }
- if (ty->Is<sem::U32>()) {
- return {BaseType::kU32, 1};
- }
- if (ty->Is<sem::F32>()) {
- return {BaseType::kF32, 1};
- }
- if (auto* vec = ty->As<sem::Vector>()) {
- return {DataTypeOf(vec->type()).base_type, vec->Width()};
- }
- return {BaseType::kInvalid, 0};
+ if (ty->Is<sem::I32>()) {
+ return {BaseType::kI32, 1};
+ }
+ if (ty->Is<sem::U32>()) {
+ return {BaseType::kU32, 1};
+ }
+ if (ty->Is<sem::F32>()) {
+ return {BaseType::kF32, 1};
+ }
+ if (auto* vec = ty->As<sem::Vector>()) {
+ return {DataTypeOf(vec->type()).base_type, vec->Width()};
+ }
+ return {BaseType::kInvalid, 0};
}
DataType DataTypeOf(VertexFormat format) {
- switch (format) {
- case VertexFormat::kUint32:
- return {BaseType::kU32, 1};
- case VertexFormat::kUint8x2:
- case VertexFormat::kUint16x2:
- case VertexFormat::kUint32x2:
- return {BaseType::kU32, 2};
- case VertexFormat::kUint32x3:
- return {BaseType::kU32, 3};
- case VertexFormat::kUint8x4:
- case VertexFormat::kUint16x4:
- case VertexFormat::kUint32x4:
- return {BaseType::kU32, 4};
- case VertexFormat::kSint32:
- return {BaseType::kI32, 1};
- case VertexFormat::kSint8x2:
- case VertexFormat::kSint16x2:
- case VertexFormat::kSint32x2:
- return {BaseType::kI32, 2};
- case VertexFormat::kSint32x3:
- return {BaseType::kI32, 3};
- case VertexFormat::kSint8x4:
- case VertexFormat::kSint16x4:
- case VertexFormat::kSint32x4:
- return {BaseType::kI32, 4};
- case VertexFormat::kFloat32:
- return {BaseType::kF32, 1};
- case VertexFormat::kUnorm8x2:
- case VertexFormat::kSnorm8x2:
- case VertexFormat::kUnorm16x2:
- case VertexFormat::kSnorm16x2:
- case VertexFormat::kFloat16x2:
- case VertexFormat::kFloat32x2:
- return {BaseType::kF32, 2};
- case VertexFormat::kFloat32x3:
- return {BaseType::kF32, 3};
- case VertexFormat::kUnorm8x4:
- case VertexFormat::kSnorm8x4:
- case VertexFormat::kUnorm16x4:
- case VertexFormat::kSnorm16x4:
- case VertexFormat::kFloat16x4:
- case VertexFormat::kFloat32x4:
- return {BaseType::kF32, 4};
- }
- return {BaseType::kInvalid, 0};
+ switch (format) {
+ case VertexFormat::kUint32:
+ return {BaseType::kU32, 1};
+ case VertexFormat::kUint8x2:
+ case VertexFormat::kUint16x2:
+ case VertexFormat::kUint32x2:
+ return {BaseType::kU32, 2};
+ case VertexFormat::kUint32x3:
+ return {BaseType::kU32, 3};
+ case VertexFormat::kUint8x4:
+ case VertexFormat::kUint16x4:
+ case VertexFormat::kUint32x4:
+ return {BaseType::kU32, 4};
+ case VertexFormat::kSint32:
+ return {BaseType::kI32, 1};
+ case VertexFormat::kSint8x2:
+ case VertexFormat::kSint16x2:
+ case VertexFormat::kSint32x2:
+ return {BaseType::kI32, 2};
+ case VertexFormat::kSint32x3:
+ return {BaseType::kI32, 3};
+ case VertexFormat::kSint8x4:
+ case VertexFormat::kSint16x4:
+ case VertexFormat::kSint32x4:
+ return {BaseType::kI32, 4};
+ case VertexFormat::kFloat32:
+ return {BaseType::kF32, 1};
+ case VertexFormat::kUnorm8x2:
+ case VertexFormat::kSnorm8x2:
+ case VertexFormat::kUnorm16x2:
+ case VertexFormat::kSnorm16x2:
+ case VertexFormat::kFloat16x2:
+ case VertexFormat::kFloat32x2:
+ return {BaseType::kF32, 2};
+ case VertexFormat::kFloat32x3:
+ return {BaseType::kF32, 3};
+ case VertexFormat::kUnorm8x4:
+ case VertexFormat::kSnorm8x4:
+ case VertexFormat::kUnorm16x4:
+ case VertexFormat::kSnorm16x4:
+ case VertexFormat::kFloat16x4:
+ case VertexFormat::kFloat32x4:
+ return {BaseType::kF32, 4};
+ }
+ return {BaseType::kInvalid, 0};
}
struct State {
- State(CloneContext& context, const VertexPulling::Config& c)
- : ctx(context), cfg(c) {}
- State(const State&) = default;
- ~State() = default;
-
- /// LocationReplacement describes an ast::Variable replacement for a
- /// location input.
- struct LocationReplacement {
- /// The variable to replace in the source Program
- ast::Variable* from;
- /// The replacement to use in the target ProgramBuilder
- ast::Variable* to;
- };
-
- struct LocationInfo {
- std::function<const ast::Expression*()> expr;
- const sem::Type* type;
- };
-
- CloneContext& ctx;
- VertexPulling::Config const cfg;
- std::unordered_map<uint32_t, LocationInfo> location_info;
- std::function<const ast::Expression*()> vertex_index_expr = nullptr;
- std::function<const ast::Expression*()> instance_index_expr = nullptr;
- Symbol pulling_position_name;
- Symbol struct_buffer_name;
- std::unordered_map<uint32_t, Symbol> vertex_buffer_names;
- ast::VariableList new_function_parameters;
-
- /// Generate the vertex buffer binding name
- /// @param index index to append to buffer name
- Symbol GetVertexBufferName(uint32_t index) {
- return utils::GetOrCreate(vertex_buffer_names, index, [&] {
- static const char kVertexBufferNamePrefix[] =
- "tint_pulling_vertex_buffer_";
- return ctx.dst->Symbols().New(kVertexBufferNamePrefix +
- std::to_string(index));
- });
- }
-
- /// Lazily generates the structure buffer symbol
- Symbol GetStructBufferName() {
- if (!struct_buffer_name.IsValid()) {
- static const char kStructBufferName[] = "tint_vertex_data";
- struct_buffer_name = ctx.dst->Symbols().New(kStructBufferName);
- }
- return struct_buffer_name;
- }
-
- /// Adds storage buffer decorated variables for the vertex buffers
- void AddVertexStorageBuffers() {
- // Creating the struct type
- static const char kStructName[] = "TintVertexData";
- auto* struct_type = ctx.dst->Structure(
- ctx.dst->Symbols().New(kStructName),
- {
- ctx.dst->Member(GetStructBufferName(),
- ctx.dst->ty.array<ProgramBuilder::u32>()),
+ State(CloneContext& context, const VertexPulling::Config& c) : ctx(context), cfg(c) {}
+ State(const State&) = default;
+ ~State() = default;
+
+ /// LocationReplacement describes an ast::Variable replacement for a
+ /// location input.
+ struct LocationReplacement {
+ /// The variable to replace in the source Program
+ ast::Variable* from;
+ /// The replacement to use in the target ProgramBuilder
+ ast::Variable* to;
+ };
+
+ struct LocationInfo {
+ std::function<const ast::Expression*()> expr;
+ const sem::Type* type;
+ };
+
+ CloneContext& ctx;
+ VertexPulling::Config const cfg;
+ std::unordered_map<uint32_t, LocationInfo> location_info;
+ std::function<const ast::Expression*()> vertex_index_expr = nullptr;
+ std::function<const ast::Expression*()> instance_index_expr = nullptr;
+ Symbol pulling_position_name;
+ Symbol struct_buffer_name;
+ std::unordered_map<uint32_t, Symbol> vertex_buffer_names;
+ ast::VariableList new_function_parameters;
+
+ /// Generate the vertex buffer binding name
+ /// @param index index to append to buffer name
+ Symbol GetVertexBufferName(uint32_t index) {
+ return utils::GetOrCreate(vertex_buffer_names, index, [&] {
+ static const char kVertexBufferNamePrefix[] = "tint_pulling_vertex_buffer_";
+ return ctx.dst->Symbols().New(kVertexBufferNamePrefix + std::to_string(index));
});
- for (uint32_t i = 0; i < cfg.vertex_state.size(); ++i) {
- // The decorated variable with struct type
- ctx.dst->Global(
- GetVertexBufferName(i), ctx.dst->ty.Of(struct_type),
- ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- ctx.dst->create<ast::BindingAttribute>(i),
- ctx.dst->create<ast::GroupAttribute>(cfg.pulling_group),
- });
}
- }
-
- /// Creates and returns the assignment to the variables from the buffers
- ast::BlockStatement* CreateVertexPullingPreamble() {
- // Assign by looking at the vertex descriptor to find attributes with
- // matching location.
-
- ast::StatementList stmts;
-
- for (uint32_t buffer_idx = 0; buffer_idx < cfg.vertex_state.size();
- ++buffer_idx) {
- const VertexBufferLayoutDescriptor& buffer_layout =
- cfg.vertex_state[buffer_idx];
-
- if ((buffer_layout.array_stride & 3) != 0) {
- ctx.dst->Diagnostics().add_error(
- diag::System::Transform,
- "WebGPU requires that vertex stride must be a multiple of 4 bytes, "
- "but VertexPulling array stride for buffer " +
- std::to_string(buffer_idx) + " was " +
- std::to_string(buffer_layout.array_stride) + " bytes");
- return nullptr;
- }
-
- auto* index_expr = buffer_layout.step_mode == VertexStepMode::kVertex
- ? vertex_index_expr()
- : instance_index_expr();
-
- // buffer_array_base is the base array offset for all the vertex
- // attributes. These are units of uint (4 bytes).
- auto buffer_array_base = ctx.dst->Symbols().New(
- "buffer_array_base_" + std::to_string(buffer_idx));
-
- auto* attribute_offset = index_expr;
- if (buffer_layout.array_stride != 4) {
- attribute_offset =
- ctx.dst->Mul(index_expr, buffer_layout.array_stride / 4u);
- }
-
- // let pulling_offset_n = <attribute_offset>
- stmts.emplace_back(ctx.dst->Decl(
- ctx.dst->Const(buffer_array_base, nullptr, attribute_offset)));
-
- for (const VertexAttributeDescriptor& attribute_desc :
- buffer_layout.attributes) {
- auto it = location_info.find(attribute_desc.shader_location);
- if (it == location_info.end()) {
- continue;
- }
- auto& var = it->second;
-
- // Data type of the target WGSL variable
- auto var_dt = DataTypeOf(var.type);
- // Data type of the vertex stream attribute
- auto fmt_dt = DataTypeOf(attribute_desc.format);
-
- // Base types must match between the vertex stream and the WGSL variable
- if (var_dt.base_type != fmt_dt.base_type) {
- std::stringstream err;
- err << "VertexAttributeDescriptor for location "
- << std::to_string(attribute_desc.shader_location)
- << " has format " << attribute_desc.format
- << " but shader expects "
- << var.type->FriendlyName(ctx.src->Symbols());
- ctx.dst->Diagnostics().add_error(diag::System::Transform, err.str());
- return nullptr;
- }
- // Load the attribute value
- auto* fetch = Fetch(buffer_array_base, attribute_desc.offset,
- buffer_idx, attribute_desc.format);
-
- // The attribute value may not be of the desired vector width. If it is
- // not, we'll need to either reduce the width with a swizzle, or append
- // 0's and / or a 1.
- auto* value = fetch;
- if (var_dt.width < fmt_dt.width) {
- // WGSL variable vector width is smaller than the loaded vector width
- switch (var_dt.width) {
- case 1:
- value = ctx.dst->MemberAccessor(fetch, "x");
- break;
- case 2:
- value = ctx.dst->MemberAccessor(fetch, "xy");
- break;
- case 3:
- value = ctx.dst->MemberAccessor(fetch, "xyz");
- break;
- default:
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << var_dt.width;
- return nullptr;
- }
- } else if (var_dt.width > fmt_dt.width) {
- // WGSL variable vector width is wider than the loaded vector width
- const ast::Type* ty = nullptr;
- ast::ExpressionList values{fetch};
- switch (var_dt.base_type) {
- case BaseType::kI32:
- ty = ctx.dst->ty.i32();
- for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
- values.emplace_back(ctx.dst->Expr((i == 3) ? 1 : 0));
- }
- break;
- case BaseType::kU32:
- ty = ctx.dst->ty.u32();
- for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
- values.emplace_back(ctx.dst->Expr((i == 3) ? 1u : 0u));
- }
- break;
- case BaseType::kF32:
- ty = ctx.dst->ty.f32();
- for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
- values.emplace_back(ctx.dst->Expr((i == 3) ? 1.f : 0.f));
- }
- break;
- default:
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << var_dt.base_type;
- return nullptr;
- }
- value = ctx.dst->Construct(ctx.dst->ty.vec(ty, var_dt.width), values);
+ /// Lazily generates the structure buffer symbol
+ Symbol GetStructBufferName() {
+ if (!struct_buffer_name.IsValid()) {
+ static const char kStructBufferName[] = "tint_vertex_data";
+ struct_buffer_name = ctx.dst->Symbols().New(kStructBufferName);
}
-
- // Assign the value to the WGSL variable
- stmts.emplace_back(ctx.dst->Assign(var.expr(), value));
- }
+ return struct_buffer_name;
}
- if (stmts.empty()) {
- return nullptr;
+ /// Adds storage buffer decorated variables for the vertex buffers
+ void AddVertexStorageBuffers() {
+ // Creating the struct type
+ static const char kStructName[] = "TintVertexData";
+ auto* struct_type =
+ ctx.dst->Structure(ctx.dst->Symbols().New(kStructName),
+ {
+ ctx.dst->Member(GetStructBufferName(), ctx.dst->ty.array<u32>()),
+ });
+ for (uint32_t i = 0; i < cfg.vertex_state.size(); ++i) {
+ // The decorated variable with struct type
+ ctx.dst->Global(GetVertexBufferName(i), ctx.dst->ty.Of(struct_type),
+ ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ ctx.dst->create<ast::BindingAttribute>(i),
+ ctx.dst->create<ast::GroupAttribute>(cfg.pulling_group),
+ });
+ }
}
- return ctx.dst->create<ast::BlockStatement>(stmts);
- }
-
- /// Generates an expression reading from a buffer a specific format.
- /// @param array_base the symbol of the variable holding the base array offset
- /// of the vertex array (each index is 4-bytes).
- /// @param offset the byte offset of the data from `buffer_base`
- /// @param buffer the index of the vertex buffer
- /// @param format the format to read
- const ast::Expression* Fetch(Symbol array_base,
- uint32_t offset,
- uint32_t buffer,
- VertexFormat format) {
- using u32 = ProgramBuilder::u32;
- using i32 = ProgramBuilder::i32;
- using f32 = ProgramBuilder::f32;
-
- // Returns a u32 loaded from buffer_base + offset.
- auto load_u32 = [&] {
- return LoadPrimitive(array_base, offset, buffer, VertexFormat::kUint32);
- };
-
- // Returns a i32 loaded from buffer_base + offset.
- auto load_i32 = [&] { return ctx.dst->Bitcast<i32>(load_u32()); };
-
- // Returns a u32 loaded from buffer_base + offset + 4.
- auto load_next_u32 = [&] {
- return LoadPrimitive(array_base, offset + 4, buffer,
- VertexFormat::kUint32);
- };
-
- // Returns a i32 loaded from buffer_base + offset + 4.
- auto load_next_i32 = [&] { return ctx.dst->Bitcast<i32>(load_next_u32()); };
-
- // Returns a u16 loaded from offset, packed in the high 16 bits of a u32.
- // The low 16 bits are 0.
- // `min_alignment` must be a power of two.
- // `offset` must be `min_alignment` bytes aligned.
- auto load_u16_h = [&] {
- auto low_u32_offset = offset & ~3u;
- auto* low_u32 = LoadPrimitive(array_base, low_u32_offset, buffer,
- VertexFormat::kUint32);
- switch (offset & 3) {
- case 0:
- return ctx.dst->Shl(low_u32, 16u);
- case 1:
- return ctx.dst->And(ctx.dst->Shl(low_u32, 8u), 0xffff0000u);
- case 2:
- return ctx.dst->And(low_u32, 0xffff0000u);
- default: { // 3:
- auto* high_u32 = LoadPrimitive(array_base, low_u32_offset + 4, buffer,
- VertexFormat::kUint32);
- auto* shr = ctx.dst->Shr(low_u32, 8u);
- auto* shl = ctx.dst->Shl(high_u32, 24u);
- return ctx.dst->And(ctx.dst->Or(shl, shr), 0xffff0000u);
+ /// Creates and returns the assignment to the variables from the buffers
+ ast::BlockStatement* CreateVertexPullingPreamble() {
+ // Assign by looking at the vertex descriptor to find attributes with
+ // matching location.
+
+ ast::StatementList stmts;
+
+ for (uint32_t buffer_idx = 0; buffer_idx < cfg.vertex_state.size(); ++buffer_idx) {
+ const VertexBufferLayoutDescriptor& buffer_layout = cfg.vertex_state[buffer_idx];
+
+ if ((buffer_layout.array_stride & 3) != 0) {
+ ctx.dst->Diagnostics().add_error(
+ diag::System::Transform,
+ "WebGPU requires that vertex stride must be a multiple of 4 bytes, "
+ "but VertexPulling array stride for buffer " +
+ std::to_string(buffer_idx) + " was " +
+ std::to_string(buffer_layout.array_stride) + " bytes");
+ return nullptr;
+ }
+
+ auto* index_expr = buffer_layout.step_mode == VertexStepMode::kVertex
+ ? vertex_index_expr()
+ : instance_index_expr();
+
+ // buffer_array_base is the base array offset for all the vertex
+ // attributes. These are units of uint (4 bytes).
+ auto buffer_array_base =
+ ctx.dst->Symbols().New("buffer_array_base_" + std::to_string(buffer_idx));
+
+ auto* attribute_offset = index_expr;
+ if (buffer_layout.array_stride != 4) {
+ attribute_offset = ctx.dst->Mul(index_expr, u32(buffer_layout.array_stride / 4u));
+ }
+
+ // let pulling_offset_n = <attribute_offset>
+ stmts.emplace_back(
+ ctx.dst->Decl(ctx.dst->Let(buffer_array_base, nullptr, attribute_offset)));
+
+ for (const VertexAttributeDescriptor& attribute_desc : buffer_layout.attributes) {
+ auto it = location_info.find(attribute_desc.shader_location);
+ if (it == location_info.end()) {
+ continue;
+ }
+ auto& var = it->second;
+
+ // Data type of the target WGSL variable
+ auto var_dt = DataTypeOf(var.type);
+ // Data type of the vertex stream attribute
+ auto fmt_dt = DataTypeOf(attribute_desc.format);
+
+ // Base types must match between the vertex stream and the WGSL variable
+ if (var_dt.base_type != fmt_dt.base_type) {
+ std::stringstream err;
+ err << "VertexAttributeDescriptor for location "
+ << std::to_string(attribute_desc.shader_location) << " has format "
+ << attribute_desc.format << " but shader expects "
+ << var.type->FriendlyName(ctx.src->Symbols());
+ ctx.dst->Diagnostics().add_error(diag::System::Transform, err.str());
+ return nullptr;
+ }
+
+ // Load the attribute value
+ auto* fetch = Fetch(buffer_array_base, attribute_desc.offset, buffer_idx,
+ attribute_desc.format);
+
+ // The attribute value may not be of the desired vector width. If it is
+ // not, we'll need to either reduce the width with a swizzle, or append
+ // 0's and / or a 1.
+ auto* value = fetch;
+ if (var_dt.width < fmt_dt.width) {
+ // WGSL variable vector width is smaller than the loaded vector width
+ switch (var_dt.width) {
+ case 1:
+ value = ctx.dst->MemberAccessor(fetch, "x");
+ break;
+ case 2:
+ value = ctx.dst->MemberAccessor(fetch, "xy");
+ break;
+ case 3:
+ value = ctx.dst->MemberAccessor(fetch, "xyz");
+ break;
+ default:
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics()) << var_dt.width;
+ return nullptr;
+ }
+ } else if (var_dt.width > fmt_dt.width) {
+ // WGSL variable vector width is wider than the loaded vector width
+ const ast::Type* ty = nullptr;
+ ast::ExpressionList values{fetch};
+ switch (var_dt.base_type) {
+ case BaseType::kI32:
+ ty = ctx.dst->ty.i32();
+ for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
+ values.emplace_back(ctx.dst->Expr((i == 3) ? 1_i : 0_i));
+ }
+ break;
+ case BaseType::kU32:
+ ty = ctx.dst->ty.u32();
+ for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
+ values.emplace_back(ctx.dst->Expr((i == 3) ? 1_u : 0_u));
+ }
+ break;
+ case BaseType::kF32:
+ ty = ctx.dst->ty.f32();
+ for (uint32_t i = fmt_dt.width; i < var_dt.width; i++) {
+ values.emplace_back(ctx.dst->Expr((i == 3) ? 1_f : 0_f));
+ }
+ break;
+ default:
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics()) << var_dt.base_type;
+ return nullptr;
+ }
+ value = ctx.dst->Construct(ctx.dst->ty.vec(ty, var_dt.width), values);
+ }
+
+ // Assign the value to the WGSL variable
+ stmts.emplace_back(ctx.dst->Assign(var.expr(), value));
+ }
}
- }
- };
- // Returns a u16 loaded from offset, packed in the low 16 bits of a u32.
- // The high 16 bits are 0.
- auto load_u16_l = [&] {
- auto low_u32_offset = offset & ~3u;
- auto* low_u32 = LoadPrimitive(array_base, low_u32_offset, buffer,
- VertexFormat::kUint32);
- switch (offset & 3) {
- case 0:
- return ctx.dst->And(low_u32, 0xffffu);
- case 1:
- return ctx.dst->And(ctx.dst->Shr(low_u32, 8u), 0xffffu);
- case 2:
- return ctx.dst->Shr(low_u32, 16u);
- default: { // 3:
- auto* high_u32 = LoadPrimitive(array_base, low_u32_offset + 4, buffer,
- VertexFormat::kUint32);
- auto* shr = ctx.dst->Shr(low_u32, 24u);
- auto* shl = ctx.dst->Shl(high_u32, 8u);
- return ctx.dst->And(ctx.dst->Or(shl, shr), 0xffffu);
+ if (stmts.empty()) {
+ return nullptr;
}
- }
- };
-
- // Returns a i16 loaded from offset, packed in the high 16 bits of a u32.
- // The low 16 bits are 0.
- auto load_i16_h = [&] { return ctx.dst->Bitcast<i32>(load_u16_h()); };
-
- // Assumptions are made that alignment must be at least as large as the size
- // of a single component.
- switch (format) {
- // Basic primitives
- case VertexFormat::kUint32:
- case VertexFormat::kSint32:
- case VertexFormat::kFloat32:
- return LoadPrimitive(array_base, offset, buffer, format);
-
- // Vectors of basic primitives
- case VertexFormat::kUint32x2:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
- VertexFormat::kUint32, 2);
- case VertexFormat::kUint32x3:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
- VertexFormat::kUint32, 3);
- case VertexFormat::kUint32x4:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
- VertexFormat::kUint32, 4);
- case VertexFormat::kSint32x2:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
- VertexFormat::kSint32, 2);
- case VertexFormat::kSint32x3:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
- VertexFormat::kSint32, 3);
- case VertexFormat::kSint32x4:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
- VertexFormat::kSint32, 4);
- case VertexFormat::kFloat32x2:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
- VertexFormat::kFloat32, 2);
- case VertexFormat::kFloat32x3:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
- VertexFormat::kFloat32, 3);
- case VertexFormat::kFloat32x4:
- return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
- VertexFormat::kFloat32, 4);
-
- case VertexFormat::kUint8x2: {
- // yyxx0000, yyxx0000
- auto* u16s = ctx.dst->vec2<u32>(load_u16_h());
- // xx000000, yyxx0000
- auto* shl = ctx.dst->Shl(u16s, ctx.dst->vec2<u32>(8u, 0u));
- // 000000xx, 000000yy
- return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(24u));
- }
- case VertexFormat::kUint8x4: {
- // wwzzyyxx, wwzzyyxx, wwzzyyxx, wwzzyyxx
- auto* u32s = ctx.dst->vec4<u32>(load_u32());
- // xx000000, yyxx0000, zzyyxx00, wwzzyyxx
- auto* shl = ctx.dst->Shl(u32s, ctx.dst->vec4<u32>(24u, 16u, 8u, 0u));
- // 000000xx, 000000yy, 000000zz, 000000ww
- return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(24u));
- }
- case VertexFormat::kUint16x2: {
- // yyyyxxxx, yyyyxxxx
- auto* u32s = ctx.dst->vec2<u32>(load_u32());
- // xxxx0000, yyyyxxxx
- auto* shl = ctx.dst->Shl(u32s, ctx.dst->vec2<u32>(16u, 0u));
- // 0000xxxx, 0000yyyy
- return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(16u));
- }
- case VertexFormat::kUint16x4: {
- // yyyyxxxx, wwwwzzzz
- auto* u32s = ctx.dst->vec2<u32>(load_u32(), load_next_u32());
- // yyyyxxxx, yyyyxxxx, wwwwzzzz, wwwwzzzz
- auto* xxyy = ctx.dst->MemberAccessor(u32s, "xxyy");
- // xxxx0000, yyyyxxxx, zzzz0000, wwwwzzzz
- auto* shl = ctx.dst->Shl(xxyy, ctx.dst->vec4<u32>(16u, 0u, 16u, 0u));
- // 0000xxxx, 0000yyyy, 0000zzzz, 0000wwww
- return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(16u));
- }
- case VertexFormat::kSint8x2: {
- // yyxx0000, yyxx0000
- auto* i16s = ctx.dst->vec2<i32>(load_i16_h());
- // xx000000, yyxx0000
- auto* shl = ctx.dst->Shl(i16s, ctx.dst->vec2<u32>(8u, 0u));
- // ssssssxx, ssssssyy
- return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(24u));
- }
- case VertexFormat::kSint8x4: {
- // wwzzyyxx, wwzzyyxx, wwzzyyxx, wwzzyyxx
- auto* i32s = ctx.dst->vec4<i32>(load_i32());
- // xx000000, yyxx0000, zzyyxx00, wwzzyyxx
- auto* shl = ctx.dst->Shl(i32s, ctx.dst->vec4<u32>(24u, 16u, 8u, 0u));
- // ssssssxx, ssssssyy, sssssszz, ssssssww
- return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(24u));
- }
- case VertexFormat::kSint16x2: {
- // yyyyxxxx, yyyyxxxx
- auto* i32s = ctx.dst->vec2<i32>(load_i32());
- // xxxx0000, yyyyxxxx
- auto* shl = ctx.dst->Shl(i32s, ctx.dst->vec2<u32>(16u, 0u));
- // ssssxxxx, ssssyyyy
- return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(16u));
- }
- case VertexFormat::kSint16x4: {
- // yyyyxxxx, wwwwzzzz
- auto* i32s = ctx.dst->vec2<i32>(load_i32(), load_next_i32());
- // yyyyxxxx, yyyyxxxx, wwwwzzzz, wwwwzzzz
- auto* xxyy = ctx.dst->MemberAccessor(i32s, "xxyy");
- // xxxx0000, yyyyxxxx, zzzz0000, wwwwzzzz
- auto* shl = ctx.dst->Shl(xxyy, ctx.dst->vec4<u32>(16u, 0u, 16u, 0u));
- // ssssxxxx, ssssyyyy, sssszzzz, sssswwww
- return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(16u));
- }
- case VertexFormat::kUnorm8x2:
- return ctx.dst->MemberAccessor(
- ctx.dst->Call("unpack4x8unorm", load_u16_l()), "xy");
- case VertexFormat::kSnorm8x2:
- return ctx.dst->MemberAccessor(
- ctx.dst->Call("unpack4x8snorm", load_u16_l()), "xy");
- case VertexFormat::kUnorm8x4:
- return ctx.dst->Call("unpack4x8unorm", load_u32());
- case VertexFormat::kSnorm8x4:
- return ctx.dst->Call("unpack4x8snorm", load_u32());
- case VertexFormat::kUnorm16x2:
- return ctx.dst->Call("unpack2x16unorm", load_u32());
- case VertexFormat::kSnorm16x2:
- return ctx.dst->Call("unpack2x16snorm", load_u32());
- case VertexFormat::kFloat16x2:
- return ctx.dst->Call("unpack2x16float", load_u32());
- case VertexFormat::kUnorm16x4:
- return ctx.dst->vec4<f32>(
- ctx.dst->Call("unpack2x16unorm", load_u32()),
- ctx.dst->Call("unpack2x16unorm", load_next_u32()));
- case VertexFormat::kSnorm16x4:
- return ctx.dst->vec4<f32>(
- ctx.dst->Call("unpack2x16snorm", load_u32()),
- ctx.dst->Call("unpack2x16snorm", load_next_u32()));
- case VertexFormat::kFloat16x4:
- return ctx.dst->vec4<f32>(
- ctx.dst->Call("unpack2x16float", load_u32()),
- ctx.dst->Call("unpack2x16float", load_next_u32()));
- }
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << "format " << static_cast<int>(format);
- return nullptr;
- }
-
- /// Generates an expression reading an aligned basic type (u32, i32, f32) from
- /// a vertex buffer.
- /// @param array_base the symbol of the variable holding the base array offset
- /// of the vertex array (each index is 4-bytes).
- /// @param offset the byte offset of the data from `buffer_base`
- /// @param buffer the index of the vertex buffer
- /// @param format VertexFormat::kUint32, VertexFormat::kSint32 or
- /// VertexFormat::kFloat32
- const ast::Expression* LoadPrimitive(Symbol array_base,
- uint32_t offset,
- uint32_t buffer,
- VertexFormat format) {
- const ast::Expression* u32 = nullptr;
- if ((offset & 3) == 0) {
- // Aligned load.
-
- const ast ::Expression* index = nullptr;
- if (offset > 0) {
- index = ctx.dst->Add(array_base, offset / 4);
- } else {
- index = ctx.dst->Expr(array_base);
- }
- u32 = ctx.dst->IndexAccessor(
- ctx.dst->MemberAccessor(GetVertexBufferName(buffer),
- GetStructBufferName()),
- index);
-
- } else {
- // Unaligned load
- uint32_t offset_aligned = offset & ~3u;
- auto* low = LoadPrimitive(array_base, offset_aligned, buffer,
- VertexFormat::kUint32);
- auto* high = LoadPrimitive(array_base, offset_aligned + 4u, buffer,
- VertexFormat::kUint32);
-
- uint32_t shift = 8u * (offset & 3u);
-
- auto* low_shr = ctx.dst->Shr(low, shift);
- auto* high_shl = ctx.dst->Shl(high, 32u - shift);
- u32 = ctx.dst->Or(low_shr, high_shl);
+ return ctx.dst->create<ast::BlockStatement>(stmts);
}
- switch (format) {
- case VertexFormat::kUint32:
- return u32;
- case VertexFormat::kSint32:
- return ctx.dst->Bitcast(ctx.dst->ty.i32(), u32);
- case VertexFormat::kFloat32:
- return ctx.dst->Bitcast(ctx.dst->ty.f32(), u32);
- default:
- break;
- }
- TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
- << "invalid format for LoadPrimitive" << static_cast<int>(format);
- return nullptr;
- }
-
- /// Generates an expression reading a vec2/3/4 from a vertex buffer.
- /// @param array_base the symbol of the variable holding the base array offset
- /// of the vertex array (each index is 4-bytes).
- /// @param offset the byte offset of the data from `buffer_base`
- /// @param buffer the index of the vertex buffer
- /// @param element_stride stride between elements, in bytes
- /// @param base_type underlying AST type
- /// @param base_format underlying vertex format
- /// @param count how many elements the vector has
- const ast::Expression* LoadVec(Symbol array_base,
+ /// Generates an expression reading from a buffer a specific format.
+ /// @param array_base the symbol of the variable holding the base array offset
+ /// of the vertex array (each index is 4-bytes).
+ /// @param offset the byte offset of the data from `buffer_base`
+ /// @param buffer the index of the vertex buffer
+ /// @param format the format to read
+ const ast::Expression* Fetch(Symbol array_base,
uint32_t offset,
uint32_t buffer,
- uint32_t element_stride,
- const ast::Type* base_type,
- VertexFormat base_format,
- uint32_t count) {
- ast::ExpressionList expr_list;
- for (uint32_t i = 0; i < count; ++i) {
- // Offset read position by element_stride for each component
- uint32_t primitive_offset = offset + element_stride * i;
- expr_list.push_back(
- LoadPrimitive(array_base, primitive_offset, buffer, base_format));
- }
+ VertexFormat format) {
+ // Returns a u32 loaded from buffer_base + offset.
+ auto load_u32 = [&] {
+ return LoadPrimitive(array_base, offset, buffer, VertexFormat::kUint32);
+ };
- return ctx.dst->Construct(ctx.dst->create<ast::Vector>(base_type, count),
- std::move(expr_list));
- }
-
- /// Process a non-struct entry point parameter.
- /// Generate function-scope variables for location parameters, and record
- /// vertex_index and instance_index builtins if present.
- /// @param func the entry point function
- /// @param param the parameter to process
- void ProcessNonStructParameter(const ast::Function* func,
- const ast::Variable* param) {
- if (auto* location =
- ast::GetAttribute<ast::LocationAttribute>(param->attributes)) {
- // Create a function-scope variable to replace the parameter.
- auto func_var_sym = ctx.Clone(param->symbol);
- auto* func_var_type = ctx.Clone(param->type);
- auto* func_var = ctx.dst->Var(func_var_sym, func_var_type);
- ctx.InsertFront(func->body->statements, ctx.dst->Decl(func_var));
- // Capture mapping from location to the new variable.
- LocationInfo info;
- info.expr = [this, func_var]() { return ctx.dst->Expr(func_var); };
- info.type = ctx.src->Sem().Get(param)->Type();
- location_info[location->value] = info;
- } else if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
- param->attributes)) {
- // Check for existing vertex_index and instance_index builtins.
- if (builtin->builtin == ast::Builtin::kVertexIndex) {
- vertex_index_expr = [this, param]() {
- return ctx.dst->Expr(ctx.Clone(param->symbol));
+ // Returns a i32 loaded from buffer_base + offset.
+ auto load_i32 = [&] { return ctx.dst->Bitcast<i32>(load_u32()); };
+
+ // Returns a u32 loaded from buffer_base + offset + 4.
+ auto load_next_u32 = [&] {
+ return LoadPrimitive(array_base, offset + 4, buffer, VertexFormat::kUint32);
};
- } else if (builtin->builtin == ast::Builtin::kInstanceIndex) {
- instance_index_expr = [this, param]() {
- return ctx.dst->Expr(ctx.Clone(param->symbol));
+
+ // Returns a i32 loaded from buffer_base + offset + 4.
+ auto load_next_i32 = [&] { return ctx.dst->Bitcast<i32>(load_next_u32()); };
+
+ // Returns a u16 loaded from offset, packed in the high 16 bits of a u32.
+ // The low 16 bits are 0.
+ // `min_alignment` must be a power of two.
+ // `offset` must be `min_alignment` bytes aligned.
+ auto load_u16_h = [&] {
+ auto low_u32_offset = offset & ~3u;
+ auto* low_u32 =
+ LoadPrimitive(array_base, low_u32_offset, buffer, VertexFormat::kUint32);
+ switch (offset & 3) {
+ case 0:
+ return ctx.dst->Shl(low_u32, 16_u);
+ case 1:
+ return ctx.dst->And(ctx.dst->Shl(low_u32, 8_u), 0xffff0000_u);
+ case 2:
+ return ctx.dst->And(low_u32, 0xffff0000_u);
+ default: { // 3:
+ auto* high_u32 = LoadPrimitive(array_base, low_u32_offset + 4, buffer,
+ VertexFormat::kUint32);
+ auto* shr = ctx.dst->Shr(low_u32, 8_u);
+ auto* shl = ctx.dst->Shl(high_u32, 24_u);
+ return ctx.dst->And(ctx.dst->Or(shl, shr), 0xffff0000_u);
+ }
+ }
};
- }
- new_function_parameters.push_back(ctx.Clone(param));
- } else {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "Invalid entry point parameter";
- }
- }
-
- /// Process a struct entry point parameter.
- /// If the struct has members with location attributes, push the parameter to
- /// a function-scope variable and create a new struct parameter without those
- /// attributes. Record expressions for members that are vertex_index and
- /// instance_index builtins.
- /// @param func the entry point function
- /// @param param the parameter to process
- /// @param struct_ty the structure type
- void ProcessStructParameter(const ast::Function* func,
- const ast::Variable* param,
- const ast::Struct* struct_ty) {
- auto param_sym = ctx.Clone(param->symbol);
-
- // Process the struct members.
- bool has_locations = false;
- ast::StructMemberList members_to_clone;
- for (auto* member : struct_ty->members) {
- auto member_sym = ctx.Clone(member->symbol);
- std::function<const ast::Expression*()> member_expr = [this, param_sym,
- member_sym]() {
- return ctx.dst->MemberAccessor(param_sym, member_sym);
- };
-
- if (auto* location =
- ast::GetAttribute<ast::LocationAttribute>(member->attributes)) {
- // Capture mapping from location to struct member.
- LocationInfo info;
- info.expr = member_expr;
- info.type = ctx.src->Sem().Get(member)->Type();
- location_info[location->value] = info;
- has_locations = true;
- } else if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
- member->attributes)) {
- // Check for existing vertex_index and instance_index builtins.
- if (builtin->builtin == ast::Builtin::kVertexIndex) {
- vertex_index_expr = member_expr;
- } else if (builtin->builtin == ast::Builtin::kInstanceIndex) {
- instance_index_expr = member_expr;
+
+ // Returns a u16 loaded from offset, packed in the low 16 bits of a u32.
+ // The high 16 bits are 0.
+ auto load_u16_l = [&] {
+ auto low_u32_offset = offset & ~3u;
+ auto* low_u32 =
+ LoadPrimitive(array_base, low_u32_offset, buffer, VertexFormat::kUint32);
+ switch (offset & 3) {
+ case 0:
+ return ctx.dst->And(low_u32, 0xffff_u);
+ case 1:
+ return ctx.dst->And(ctx.dst->Shr(low_u32, 8_u), 0xffff_u);
+ case 2:
+ return ctx.dst->Shr(low_u32, 16_u);
+ default: { // 3:
+ auto* high_u32 = LoadPrimitive(array_base, low_u32_offset + 4, buffer,
+ VertexFormat::kUint32);
+ auto* shr = ctx.dst->Shr(low_u32, 24_u);
+ auto* shl = ctx.dst->Shl(high_u32, 8_u);
+ return ctx.dst->And(ctx.dst->Or(shl, shr), 0xffff_u);
+ }
+ }
+ };
+
+ // Returns a i16 loaded from offset, packed in the high 16 bits of a u32.
+ // The low 16 bits are 0.
+ auto load_i16_h = [&] { return ctx.dst->Bitcast<i32>(load_u16_h()); };
+
+ // Assumptions are made that alignment must be at least as large as the size
+ // of a single component.
+ switch (format) {
+ // Basic primitives
+ case VertexFormat::kUint32:
+ case VertexFormat::kSint32:
+ case VertexFormat::kFloat32:
+ return LoadPrimitive(array_base, offset, buffer, format);
+
+ // Vectors of basic primitives
+ case VertexFormat::kUint32x2:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
+ VertexFormat::kUint32, 2);
+ case VertexFormat::kUint32x3:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
+ VertexFormat::kUint32, 3);
+ case VertexFormat::kUint32x4:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.u32(),
+ VertexFormat::kUint32, 4);
+ case VertexFormat::kSint32x2:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
+ VertexFormat::kSint32, 2);
+ case VertexFormat::kSint32x3:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
+ VertexFormat::kSint32, 3);
+ case VertexFormat::kSint32x4:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.i32(),
+ VertexFormat::kSint32, 4);
+ case VertexFormat::kFloat32x2:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
+ VertexFormat::kFloat32, 2);
+ case VertexFormat::kFloat32x3:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
+ VertexFormat::kFloat32, 3);
+ case VertexFormat::kFloat32x4:
+ return LoadVec(array_base, offset, buffer, 4, ctx.dst->ty.f32(),
+ VertexFormat::kFloat32, 4);
+
+ case VertexFormat::kUint8x2: {
+ // yyxx0000, yyxx0000
+ auto* u16s = ctx.dst->vec2<u32>(load_u16_h());
+ // xx000000, yyxx0000
+ auto* shl = ctx.dst->Shl(u16s, ctx.dst->vec2<u32>(8_u, 0_u));
+ // 000000xx, 000000yy
+ return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(24_u));
+ }
+ case VertexFormat::kUint8x4: {
+ // wwzzyyxx, wwzzyyxx, wwzzyyxx, wwzzyyxx
+ auto* u32s = ctx.dst->vec4<u32>(load_u32());
+ // xx000000, yyxx0000, zzyyxx00, wwzzyyxx
+ auto* shl = ctx.dst->Shl(u32s, ctx.dst->vec4<u32>(24_u, 16_u, 8_u, 0_u));
+ // 000000xx, 000000yy, 000000zz, 000000ww
+ return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(24_u));
+ }
+ case VertexFormat::kUint16x2: {
+ // yyyyxxxx, yyyyxxxx
+ auto* u32s = ctx.dst->vec2<u32>(load_u32());
+ // xxxx0000, yyyyxxxx
+ auto* shl = ctx.dst->Shl(u32s, ctx.dst->vec2<u32>(16_u, 0_u));
+ // 0000xxxx, 0000yyyy
+ return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(16_u));
+ }
+ case VertexFormat::kUint16x4: {
+ // yyyyxxxx, wwwwzzzz
+ auto* u32s = ctx.dst->vec2<u32>(load_u32(), load_next_u32());
+ // yyyyxxxx, yyyyxxxx, wwwwzzzz, wwwwzzzz
+ auto* xxyy = ctx.dst->MemberAccessor(u32s, "xxyy");
+ // xxxx0000, yyyyxxxx, zzzz0000, wwwwzzzz
+ auto* shl = ctx.dst->Shl(xxyy, ctx.dst->vec4<u32>(16_u, 0_u, 16_u, 0_u));
+ // 0000xxxx, 0000yyyy, 0000zzzz, 0000wwww
+ return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(16_u));
+ }
+ case VertexFormat::kSint8x2: {
+ // yyxx0000, yyxx0000
+ auto* i16s = ctx.dst->vec2<i32>(load_i16_h());
+ // xx000000, yyxx0000
+ auto* shl = ctx.dst->Shl(i16s, ctx.dst->vec2<u32>(8_u, 0_u));
+ // ssssssxx, ssssssyy
+ return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(24_u));
+ }
+ case VertexFormat::kSint8x4: {
+ // wwzzyyxx, wwzzyyxx, wwzzyyxx, wwzzyyxx
+ auto* i32s = ctx.dst->vec4<i32>(load_i32());
+ // xx000000, yyxx0000, zzyyxx00, wwzzyyxx
+ auto* shl = ctx.dst->Shl(i32s, ctx.dst->vec4<u32>(24_u, 16_u, 8_u, 0_u));
+ // ssssssxx, ssssssyy, sssssszz, ssssssww
+ return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(24_u));
+ }
+ case VertexFormat::kSint16x2: {
+ // yyyyxxxx, yyyyxxxx
+ auto* i32s = ctx.dst->vec2<i32>(load_i32());
+ // xxxx0000, yyyyxxxx
+ auto* shl = ctx.dst->Shl(i32s, ctx.dst->vec2<u32>(16_u, 0_u));
+ // ssssxxxx, ssssyyyy
+ return ctx.dst->Shr(shl, ctx.dst->vec2<u32>(16_u));
+ }
+ case VertexFormat::kSint16x4: {
+ // yyyyxxxx, wwwwzzzz
+ auto* i32s = ctx.dst->vec2<i32>(load_i32(), load_next_i32());
+ // yyyyxxxx, yyyyxxxx, wwwwzzzz, wwwwzzzz
+ auto* xxyy = ctx.dst->MemberAccessor(i32s, "xxyy");
+ // xxxx0000, yyyyxxxx, zzzz0000, wwwwzzzz
+ auto* shl = ctx.dst->Shl(xxyy, ctx.dst->vec4<u32>(16_u, 0_u, 16_u, 0_u));
+ // ssssxxxx, ssssyyyy, sssszzzz, sssswwww
+ return ctx.dst->Shr(shl, ctx.dst->vec4<u32>(16_u));
+ }
+ case VertexFormat::kUnorm8x2:
+ return ctx.dst->MemberAccessor(ctx.dst->Call("unpack4x8unorm", load_u16_l()), "xy");
+ case VertexFormat::kSnorm8x2:
+ return ctx.dst->MemberAccessor(ctx.dst->Call("unpack4x8snorm", load_u16_l()), "xy");
+ case VertexFormat::kUnorm8x4:
+ return ctx.dst->Call("unpack4x8unorm", load_u32());
+ case VertexFormat::kSnorm8x4:
+ return ctx.dst->Call("unpack4x8snorm", load_u32());
+ case VertexFormat::kUnorm16x2:
+ return ctx.dst->Call("unpack2x16unorm", load_u32());
+ case VertexFormat::kSnorm16x2:
+ return ctx.dst->Call("unpack2x16snorm", load_u32());
+ case VertexFormat::kFloat16x2:
+ return ctx.dst->Call("unpack2x16float", load_u32());
+ case VertexFormat::kUnorm16x4:
+ return ctx.dst->vec4<f32>(ctx.dst->Call("unpack2x16unorm", load_u32()),
+ ctx.dst->Call("unpack2x16unorm", load_next_u32()));
+ case VertexFormat::kSnorm16x4:
+ return ctx.dst->vec4<f32>(ctx.dst->Call("unpack2x16snorm", load_u32()),
+ ctx.dst->Call("unpack2x16snorm", load_next_u32()));
+ case VertexFormat::kFloat16x4:
+ return ctx.dst->vec4<f32>(ctx.dst->Call("unpack2x16float", load_u32()),
+ ctx.dst->Call("unpack2x16float", load_next_u32()));
}
- members_to_clone.push_back(member);
- } else {
- TINT_ICE(Transform, ctx.dst->Diagnostics())
- << "Invalid entry point parameter";
- }
- }
- if (!has_locations) {
- // Nothing to do.
- new_function_parameters.push_back(ctx.Clone(param));
- return;
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
+ << "format " << static_cast<int>(format);
+ return nullptr;
}
- // Create a function-scope variable to replace the parameter.
- auto* func_var = ctx.dst->Var(param_sym, ctx.Clone(param->type));
- ctx.InsertFront(func->body->statements, ctx.dst->Decl(func_var));
-
- if (!members_to_clone.empty()) {
- // Create a new struct without the location attributes.
- ast::StructMemberList new_members;
- for (auto* member : members_to_clone) {
- auto member_sym = ctx.Clone(member->symbol);
- auto* member_type = ctx.Clone(member->type);
- auto member_attrs = ctx.Clone(member->attributes);
- new_members.push_back(
- ctx.dst->Member(member_sym, member_type, std::move(member_attrs)));
- }
- auto* new_struct = ctx.dst->Structure(ctx.dst->Sym(), new_members);
-
- // Create a new function parameter with this struct.
- auto* new_param =
- ctx.dst->Param(ctx.dst->Sym(), ctx.dst->ty.Of(new_struct));
- new_function_parameters.push_back(new_param);
-
- // Copy values from the new parameter to the function-scope variable.
- for (auto* member : members_to_clone) {
- auto member_name = ctx.Clone(member->symbol);
- ctx.InsertFront(
- func->body->statements,
- ctx.dst->Assign(ctx.dst->MemberAccessor(func_var, member_name),
- ctx.dst->MemberAccessor(new_param, member_name)));
- }
- }
- }
+ /// Generates an expression reading an aligned basic type (u32, i32, f32) from
+ /// a vertex buffer.
+ /// @param array_base the symbol of the variable holding the base array offset
+ /// of the vertex array (each index is 4-bytes).
+ /// @param offset the byte offset of the data from `buffer_base`
+ /// @param buffer the index of the vertex buffer
+ /// @param format VertexFormat::kUint32, VertexFormat::kSint32 or
+ /// VertexFormat::kFloat32
+ const ast::Expression* LoadPrimitive(Symbol array_base,
+ uint32_t offset,
+ uint32_t buffer,
+ VertexFormat format) {
+ const ast::Expression* u = nullptr;
+ if ((offset & 3) == 0) {
+ // Aligned load.
+
+ const ast ::Expression* index = nullptr;
+ if (offset > 0) {
+ index = ctx.dst->Add(array_base, u32(offset / 4));
+ } else {
+ index = ctx.dst->Expr(array_base);
+ }
+ u = ctx.dst->IndexAccessor(
+ ctx.dst->MemberAccessor(GetVertexBufferName(buffer), GetStructBufferName()), index);
+
+ } else {
+ // Unaligned load
+ uint32_t offset_aligned = offset & ~3u;
+ auto* low = LoadPrimitive(array_base, offset_aligned, buffer, VertexFormat::kUint32);
+ auto* high =
+ LoadPrimitive(array_base, offset_aligned + 4u, buffer, VertexFormat::kUint32);
+
+ uint32_t shift = 8u * (offset & 3u);
+
+ auto* low_shr = ctx.dst->Shr(low, u32(shift));
+ auto* high_shl = ctx.dst->Shl(high, u32(32u - shift));
+ u = ctx.dst->Or(low_shr, high_shl);
+ }
- /// Process an entry point function.
- /// @param func the entry point function
- void Process(const ast::Function* func) {
- if (func->body->Empty()) {
- return;
+ switch (format) {
+ case VertexFormat::kUint32:
+ return u;
+ case VertexFormat::kSint32:
+ return ctx.dst->Bitcast(ctx.dst->ty.i32(), u);
+ case VertexFormat::kFloat32:
+ return ctx.dst->Bitcast(ctx.dst->ty.f32(), u);
+ default:
+ break;
+ }
+ TINT_UNREACHABLE(Transform, ctx.dst->Diagnostics())
+ << "invalid format for LoadPrimitive" << static_cast<int>(format);
+ return nullptr;
}
- // Process entry point parameters.
- for (auto* param : func->params) {
- auto* sem = ctx.src->Sem().Get(param);
- if (auto* str = sem->Type()->As<sem::Struct>()) {
- ProcessStructParameter(func, param, str->Declaration());
- } else {
- ProcessNonStructParameter(func, param);
- }
+ /// Generates an expression reading a vec2/3/4 from a vertex buffer.
+ /// @param array_base the symbol of the variable holding the base array offset
+ /// of the vertex array (each index is 4-bytes).
+ /// @param offset the byte offset of the data from `buffer_base`
+ /// @param buffer the index of the vertex buffer
+ /// @param element_stride stride between elements, in bytes
+ /// @param base_type underlying AST type
+ /// @param base_format underlying vertex format
+ /// @param count how many elements the vector has
+ const ast::Expression* LoadVec(Symbol array_base,
+ uint32_t offset,
+ uint32_t buffer,
+ uint32_t element_stride,
+ const ast::Type* base_type,
+ VertexFormat base_format,
+ uint32_t count) {
+ ast::ExpressionList expr_list;
+ for (uint32_t i = 0; i < count; ++i) {
+ // Offset read position by element_stride for each component
+ uint32_t primitive_offset = offset + element_stride * i;
+ expr_list.push_back(LoadPrimitive(array_base, primitive_offset, buffer, base_format));
+ }
+
+ return ctx.dst->Construct(ctx.dst->create<ast::Vector>(base_type, count),
+ std::move(expr_list));
}
- // Insert new parameters for vertex_index and instance_index if needed.
- if (!vertex_index_expr) {
- for (const VertexBufferLayoutDescriptor& layout : cfg.vertex_state) {
- if (layout.step_mode == VertexStepMode::kVertex) {
- auto name = ctx.dst->Symbols().New("tint_pulling_vertex_index");
- new_function_parameters.push_back(
- ctx.dst->Param(name, ctx.dst->ty.u32(),
- {ctx.dst->Builtin(ast::Builtin::kVertexIndex)}));
- vertex_index_expr = [this, name]() { return ctx.dst->Expr(name); };
- break;
+ /// Process a non-struct entry point parameter.
+ /// Generate function-scope variables for location parameters, and record
+ /// vertex_index and instance_index builtins if present.
+ /// @param func the entry point function
+ /// @param param the parameter to process
+ void ProcessNonStructParameter(const ast::Function* func, const ast::Variable* param) {
+ if (auto* location = ast::GetAttribute<ast::LocationAttribute>(param->attributes)) {
+ // Create a function-scope variable to replace the parameter.
+ auto func_var_sym = ctx.Clone(param->symbol);
+ auto* func_var_type = ctx.Clone(param->type);
+ auto* func_var = ctx.dst->Var(func_var_sym, func_var_type);
+ ctx.InsertFront(func->body->statements, ctx.dst->Decl(func_var));
+ // Capture mapping from location to the new variable.
+ LocationInfo info;
+ info.expr = [this, func_var]() { return ctx.dst->Expr(func_var); };
+ info.type = ctx.src->Sem().Get(param)->Type();
+ location_info[location->value] = info;
+ } else if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(param->attributes)) {
+ // Check for existing vertex_index and instance_index builtins.
+ if (builtin->builtin == ast::Builtin::kVertexIndex) {
+ vertex_index_expr = [this, param]() {
+ return ctx.dst->Expr(ctx.Clone(param->symbol));
+ };
+ } else if (builtin->builtin == ast::Builtin::kInstanceIndex) {
+ instance_index_expr = [this, param]() {
+ return ctx.dst->Expr(ctx.Clone(param->symbol));
+ };
+ }
+ new_function_parameters.push_back(ctx.Clone(param));
+ } else {
+ TINT_ICE(Transform, ctx.dst->Diagnostics()) << "Invalid entry point parameter";
}
- }
}
- if (!instance_index_expr) {
- for (const VertexBufferLayoutDescriptor& layout : cfg.vertex_state) {
- if (layout.step_mode == VertexStepMode::kInstance) {
- auto name = ctx.dst->Symbols().New("tint_pulling_instance_index");
- new_function_parameters.push_back(
- ctx.dst->Param(name, ctx.dst->ty.u32(),
- {ctx.dst->Builtin(ast::Builtin::kInstanceIndex)}));
- instance_index_expr = [this, name]() { return ctx.dst->Expr(name); };
- break;
+
+ /// Process a struct entry point parameter.
+ /// If the struct has members with location attributes, push the parameter to
+ /// a function-scope variable and create a new struct parameter without those
+ /// attributes. Record expressions for members that are vertex_index and
+ /// instance_index builtins.
+ /// @param func the entry point function
+ /// @param param the parameter to process
+ /// @param struct_ty the structure type
+ void ProcessStructParameter(const ast::Function* func,
+ const ast::Variable* param,
+ const ast::Struct* struct_ty) {
+ auto param_sym = ctx.Clone(param->symbol);
+
+ // Process the struct members.
+ bool has_locations = false;
+ ast::StructMemberList members_to_clone;
+ for (auto* member : struct_ty->members) {
+ auto member_sym = ctx.Clone(member->symbol);
+ std::function<const ast::Expression*()> member_expr = [this, param_sym, member_sym]() {
+ return ctx.dst->MemberAccessor(param_sym, member_sym);
+ };
+
+ if (auto* location = ast::GetAttribute<ast::LocationAttribute>(member->attributes)) {
+ // Capture mapping from location to struct member.
+ LocationInfo info;
+ info.expr = member_expr;
+ info.type = ctx.src->Sem().Get(member)->Type();
+ location_info[location->value] = info;
+ has_locations = true;
+ } else if (auto* builtin =
+ ast::GetAttribute<ast::BuiltinAttribute>(member->attributes)) {
+ // Check for existing vertex_index and instance_index builtins.
+ if (builtin->builtin == ast::Builtin::kVertexIndex) {
+ vertex_index_expr = member_expr;
+ } else if (builtin->builtin == ast::Builtin::kInstanceIndex) {
+ instance_index_expr = member_expr;
+ }
+ members_to_clone.push_back(member);
+ } else {
+ TINT_ICE(Transform, ctx.dst->Diagnostics()) << "Invalid entry point parameter";
+ }
+ }
+
+ if (!has_locations) {
+ // Nothing to do.
+ new_function_parameters.push_back(ctx.Clone(param));
+ return;
}
- }
- }
- // Generate vertex pulling preamble.
- if (auto* block = CreateVertexPullingPreamble()) {
- ctx.InsertFront(func->body->statements, block);
+ // Create a function-scope variable to replace the parameter.
+ auto* func_var = ctx.dst->Var(param_sym, ctx.Clone(param->type));
+ ctx.InsertFront(func->body->statements, ctx.dst->Decl(func_var));
+
+ if (!members_to_clone.empty()) {
+ // Create a new struct without the location attributes.
+ ast::StructMemberList new_members;
+ for (auto* member : members_to_clone) {
+ auto member_sym = ctx.Clone(member->symbol);
+ auto* member_type = ctx.Clone(member->type);
+ auto member_attrs = ctx.Clone(member->attributes);
+ new_members.push_back(
+ ctx.dst->Member(member_sym, member_type, std::move(member_attrs)));
+ }
+ auto* new_struct = ctx.dst->Structure(ctx.dst->Sym(), new_members);
+
+ // Create a new function parameter with this struct.
+ auto* new_param = ctx.dst->Param(ctx.dst->Sym(), ctx.dst->ty.Of(new_struct));
+ new_function_parameters.push_back(new_param);
+
+ // Copy values from the new parameter to the function-scope variable.
+ for (auto* member : members_to_clone) {
+ auto member_name = ctx.Clone(member->symbol);
+ ctx.InsertFront(func->body->statements,
+ ctx.dst->Assign(ctx.dst->MemberAccessor(func_var, member_name),
+ ctx.dst->MemberAccessor(new_param, member_name)));
+ }
+ }
}
- // Rewrite the function header with the new parameters.
- auto func_sym = ctx.Clone(func->symbol);
- auto* ret_type = ctx.Clone(func->return_type);
- auto* body = ctx.Clone(func->body);
- auto attrs = ctx.Clone(func->attributes);
- auto ret_attrs = ctx.Clone(func->return_type_attributes);
- auto* new_func = ctx.dst->create<ast::Function>(
- func->source, func_sym, new_function_parameters, ret_type, body,
- std::move(attrs), std::move(ret_attrs));
- ctx.Replace(func, new_func);
- }
+ /// Process an entry point function.
+ /// @param func the entry point function
+ void Process(const ast::Function* func) {
+ if (func->body->Empty()) {
+ return;
+ }
+
+ // Process entry point parameters.
+ for (auto* param : func->params) {
+ auto* sem = ctx.src->Sem().Get(param);
+ if (auto* str = sem->Type()->As<sem::Struct>()) {
+ ProcessStructParameter(func, param, str->Declaration());
+ } else {
+ ProcessNonStructParameter(func, param);
+ }
+ }
+
+ // Insert new parameters for vertex_index and instance_index if needed.
+ if (!vertex_index_expr) {
+ for (const VertexBufferLayoutDescriptor& layout : cfg.vertex_state) {
+ if (layout.step_mode == VertexStepMode::kVertex) {
+ auto name = ctx.dst->Symbols().New("tint_pulling_vertex_index");
+ new_function_parameters.push_back(ctx.dst->Param(
+ name, ctx.dst->ty.u32(), {ctx.dst->Builtin(ast::Builtin::kVertexIndex)}));
+ vertex_index_expr = [this, name]() { return ctx.dst->Expr(name); };
+ break;
+ }
+ }
+ }
+ if (!instance_index_expr) {
+ for (const VertexBufferLayoutDescriptor& layout : cfg.vertex_state) {
+ if (layout.step_mode == VertexStepMode::kInstance) {
+ auto name = ctx.dst->Symbols().New("tint_pulling_instance_index");
+ new_function_parameters.push_back(ctx.dst->Param(
+ name, ctx.dst->ty.u32(), {ctx.dst->Builtin(ast::Builtin::kInstanceIndex)}));
+ instance_index_expr = [this, name]() { return ctx.dst->Expr(name); };
+ break;
+ }
+ }
+ }
+
+ // Generate vertex pulling preamble.
+ if (auto* block = CreateVertexPullingPreamble()) {
+ ctx.InsertFront(func->body->statements, block);
+ }
+
+ // Rewrite the function header with the new parameters.
+ auto func_sym = ctx.Clone(func->symbol);
+ auto* ret_type = ctx.Clone(func->return_type);
+ auto* body = ctx.Clone(func->body);
+ auto attrs = ctx.Clone(func->attributes);
+ auto ret_attrs = ctx.Clone(func->return_type_attributes);
+ auto* new_func =
+ ctx.dst->create<ast::Function>(func->source, func_sym, new_function_parameters,
+ ret_type, body, std::move(attrs), std::move(ret_attrs));
+ ctx.Replace(func, new_func);
+ }
};
} // namespace
@@ -902,42 +867,38 @@ struct State {
VertexPulling::VertexPulling() = default;
VertexPulling::~VertexPulling() = default;
-void VertexPulling::Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap&) const {
- auto cfg = cfg_;
- if (auto* cfg_data = inputs.Get<Config>()) {
- cfg = *cfg_data;
- }
-
- // Find entry point
- auto* func = ctx.src->AST().Functions().Find(
- ctx.src->Symbols().Get(cfg.entry_point_name),
- ast::PipelineStage::kVertex);
- if (func == nullptr) {
- ctx.dst->Diagnostics().add_error(diag::System::Transform,
- "Vertex stage entry point not found");
- return;
- }
-
- // TODO(idanr): Need to check shader locations in descriptor cover all
- // attributes
-
- // TODO(idanr): Make sure we covered all error cases, to guarantee the
- // following stages will pass
-
- State state{ctx, cfg};
- state.AddVertexStorageBuffers();
- state.Process(func);
-
- ctx.Clone();
+void VertexPulling::Run(CloneContext& ctx, const DataMap& inputs, DataMap&) const {
+ auto cfg = cfg_;
+ if (auto* cfg_data = inputs.Get<Config>()) {
+ cfg = *cfg_data;
+ }
+
+ // Find entry point
+ auto* func = ctx.src->AST().Functions().Find(ctx.src->Symbols().Get(cfg.entry_point_name),
+ ast::PipelineStage::kVertex);
+ if (func == nullptr) {
+ ctx.dst->Diagnostics().add_error(diag::System::Transform,
+ "Vertex stage entry point not found");
+ return;
+ }
+
+ // TODO(idanr): Need to check shader locations in descriptor cover all
+ // attributes
+
+ // TODO(idanr): Make sure we covered all error cases, to guarantee the
+ // following stages will pass
+
+ State state{ctx, cfg};
+ state.AddVertexStorageBuffers();
+ state.Process(func);
+
+ ctx.Clone();
}
VertexPulling::Config::Config() = default;
VertexPulling::Config::Config(const Config&) = default;
VertexPulling::Config::~Config() = default;
-VertexPulling::Config& VertexPulling::Config::operator=(const Config&) =
- default;
+VertexPulling::Config& VertexPulling::Config::operator=(const Config&) = default;
VertexBufferLayoutDescriptor::VertexBufferLayoutDescriptor() = default;
diff --git a/chromium/third_party/dawn/src/tint/transform/vertex_pulling.h b/chromium/third_party/dawn/src/tint/transform/vertex_pulling.h
index ec0769b9b54..78756005deb 100644
--- a/chromium/third_party/dawn/src/tint/transform/vertex_pulling.h
+++ b/chromium/third_party/dawn/src/tint/transform/vertex_pulling.h
@@ -26,38 +26,38 @@ namespace tint::transform {
/// Describes the format of data in a vertex buffer
enum class VertexFormat {
- kUint8x2, // uint8x2
- kUint8x4, // uint8x4
- kSint8x2, // sint8x2
- kSint8x4, // sint8x4
- kUnorm8x2, // unorm8x2
- kUnorm8x4, // unorm8x4
- kSnorm8x2, // snorm8x2
- kSnorm8x4, // snorm8x4
- kUint16x2, // uint16x2
- kUint16x4, // uint16x4
- kSint16x2, // sint16x2
- kSint16x4, // sint16x4
- kUnorm16x2, // unorm16x2
- kUnorm16x4, // unorm16x4
- kSnorm16x2, // snorm16x2
- kSnorm16x4, // snorm16x4
- kFloat16x2, // float16x2
- kFloat16x4, // float16x4
- kFloat32, // float32
- kFloat32x2, // float32x2
- kFloat32x3, // float32x3
- kFloat32x4, // float32x4
- kUint32, // uint32
- kUint32x2, // uint32x2
- kUint32x3, // uint32x3
- kUint32x4, // uint32x4
- kSint32, // sint32
- kSint32x2, // sint32x2
- kSint32x3, // sint32x3
- kSint32x4, // sint32x4
-
- kLastEntry = kSint32x4,
+ kUint8x2, // uint8x2
+ kUint8x4, // uint8x4
+ kSint8x2, // sint8x2
+ kSint8x4, // sint8x4
+ kUnorm8x2, // unorm8x2
+ kUnorm8x4, // unorm8x4
+ kSnorm8x2, // snorm8x2
+ kSnorm8x4, // snorm8x4
+ kUint16x2, // uint16x2
+ kUint16x4, // uint16x4
+ kSint16x2, // sint16x2
+ kSint16x4, // sint16x4
+ kUnorm16x2, // unorm16x2
+ kUnorm16x4, // unorm16x4
+ kSnorm16x2, // snorm16x2
+ kSnorm16x4, // snorm16x4
+ kFloat16x2, // float16x2
+ kFloat16x4, // float16x4
+ kFloat32, // float32
+ kFloat32x2, // float32x2
+ kFloat32x3, // float32x3
+ kFloat32x4, // float32x4
+ kUint32, // uint32
+ kUint32x2, // uint32x2
+ kUint32x3, // uint32x3
+ kUint32x4, // uint32x4
+ kSint32, // sint32
+ kSint32x2, // sint32x2
+ kSint32x3, // sint32x3
+ kSint32x4, // sint32x4
+
+ kLastEntry = kSint32x4,
};
/// Describes if a vertex attributes increments with vertex index or instance
@@ -66,44 +66,42 @@ enum class VertexStepMode { kVertex, kInstance, kLastEntry = kInstance };
/// Describes a vertex attribute within a buffer
struct VertexAttributeDescriptor {
- /// The format of the attribute
- VertexFormat format;
- /// The byte offset of the attribute in the buffer
- uint32_t offset;
- /// The shader location used for the attribute
- uint32_t shader_location;
+ /// The format of the attribute
+ VertexFormat format;
+ /// The byte offset of the attribute in the buffer
+ uint32_t offset;
+ /// The shader location used for the attribute
+ uint32_t shader_location;
};
/// Describes a buffer containing multiple vertex attributes
struct VertexBufferLayoutDescriptor {
- /// Constructor
- VertexBufferLayoutDescriptor();
- /// Constructor
- /// @param in_array_stride the array stride of the in buffer
- /// @param in_step_mode the step mode of the in buffer
- /// @param in_attributes the in attributes
- VertexBufferLayoutDescriptor(
- uint32_t in_array_stride,
- VertexStepMode in_step_mode,
- std::vector<VertexAttributeDescriptor> in_attributes);
- /// Copy constructor
- /// @param other the struct to copy
- VertexBufferLayoutDescriptor(const VertexBufferLayoutDescriptor& other);
-
- /// Assignment operator
- /// @param other the struct to copy
- /// @returns this struct
- VertexBufferLayoutDescriptor& operator=(
- const VertexBufferLayoutDescriptor& other);
-
- ~VertexBufferLayoutDescriptor();
-
- /// The array stride used in the in buffer
- uint32_t array_stride = 0u;
- /// The input step mode used
- VertexStepMode step_mode = VertexStepMode::kVertex;
- /// The vertex attributes
- std::vector<VertexAttributeDescriptor> attributes;
+ /// Constructor
+ VertexBufferLayoutDescriptor();
+ /// Constructor
+ /// @param in_array_stride the array stride of the in buffer
+ /// @param in_step_mode the step mode of the in buffer
+ /// @param in_attributes the in attributes
+ VertexBufferLayoutDescriptor(uint32_t in_array_stride,
+ VertexStepMode in_step_mode,
+ std::vector<VertexAttributeDescriptor> in_attributes);
+ /// Copy constructor
+ /// @param other the struct to copy
+ VertexBufferLayoutDescriptor(const VertexBufferLayoutDescriptor& other);
+
+ /// Assignment operator
+ /// @param other the struct to copy
+ /// @returns this struct
+ VertexBufferLayoutDescriptor& operator=(const VertexBufferLayoutDescriptor& other);
+
+ ~VertexBufferLayoutDescriptor();
+
+ /// The array stride used in the in buffer
+ uint32_t array_stride = 0u;
+ /// The input step mode used
+ VertexStepMode step_mode = VertexStepMode::kVertex;
+ /// The vertex attributes
+ std::vector<VertexAttributeDescriptor> attributes;
};
/// Describes vertex state, which consists of many buffers containing vertex
@@ -131,52 +129,50 @@ using VertexStateDescriptor = std::vector<VertexBufferLayoutDescriptor>;
/// these smaller types into the base types such as `f32` and `u32` for the
/// shader to use.
class VertexPulling : public Castable<VertexPulling, Transform> {
- public:
- /// Configuration options for the transform
- struct Config : public Castable<Config, Data> {
- /// Constructor
- Config();
+ public:
+ /// Configuration options for the transform
+ struct Config : public Castable<Config, Data> {
+ /// Constructor
+ Config();
- /// Copy constructor
- Config(const Config&);
+ /// Copy constructor
+ Config(const Config&);
- /// Destructor
- ~Config() override;
+ /// Destructor
+ ~Config() override;
- /// Assignment operator
- /// @returns this Config
- Config& operator=(const Config&);
-
- /// The entry point to add assignments into
- std::string entry_point_name;
-
- /// The vertex state descriptor, containing info about attributes
- VertexStateDescriptor vertex_state;
-
- /// The "group" we will put all our vertex buffers into (as storage buffers)
- /// Default to 4 as it is past the limits of user-accessible groups
- uint32_t pulling_group = 4u;
- };
-
- /// Constructor
- VertexPulling();
-
- /// Destructor
- ~VertexPulling() override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- private:
- Config cfg_;
+ /// Assignment operator
+ /// @returns this Config
+ Config& operator=(const Config&);
+
+ /// The entry point to add assignments into
+ std::string entry_point_name;
+
+ /// The vertex state descriptor, containing info about attributes
+ VertexStateDescriptor vertex_state;
+
+ /// The "group" we will put all our vertex buffers into (as storage buffers)
+ /// Default to 4 as it is past the limits of user-accessible groups
+ uint32_t pulling_group = 4u;
+ };
+
+ /// Constructor
+ VertexPulling();
+
+ /// Destructor
+ ~VertexPulling() override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ private:
+ Config cfg_;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/vertex_pulling_test.cc b/chromium/third_party/dawn/src/tint/transform/vertex_pulling_test.cc
index 3c19aa6d48e..5fb8b1cd405 100644
--- a/chromium/third_party/dawn/src/tint/transform/vertex_pulling_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/vertex_pulling_test.cc
@@ -24,124 +24,123 @@ namespace {
using VertexPullingTest = TransformTest;
TEST_F(VertexPullingTest, Error_NoEntryPoint) {
- auto* src = "";
+ auto* src = "";
- auto* expect = "error: Vertex stage entry point not found";
+ auto* expect = "error: Vertex stage entry point not found";
- DataMap data;
- data.Add<VertexPulling::Config>();
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>();
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, Error_InvalidEntryPoint) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = "error: Vertex stage entry point not found";
+ auto* expect = "error: Vertex stage entry point not found";
- VertexPulling::Config cfg;
- cfg.entry_point_name = "_";
+ VertexPulling::Config cfg;
+ cfg.entry_point_name = "_";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, Error_EntryPointWrongStage) {
- auto* src = R"(
-@stage(fragment)
+ auto* src = R"(
+@fragment
fn main() {}
)";
- auto* expect = "error: Vertex stage entry point not found";
+ auto* expect = "error: Vertex stage entry point not found";
- VertexPulling::Config cfg;
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, Error_BadStride) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32) -> @builtin(position) vec4<f32> {
return vec4<f32>(var_a, 0.0, 0.0, 1.0);
}
)";
- auto* expect =
- "error: WebGPU requires that vertex stride must be a multiple of 4 "
- "bytes, but VertexPulling array stride for buffer 0 was 15 bytes";
+ auto* expect =
+ "error: WebGPU requires that vertex stride must be a multiple of 4 "
+ "bytes, but VertexPulling array stride for buffer 0 was 15 bytes";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{15, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{15, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, BasicModule) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
-@stage(vertex)
+@vertex
fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- VertexPulling::Config cfg;
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, OneAttribute) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32) -> @builtin(position) vec4<f32> {
return vec4<f32>(var_a, 0.0, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
{
@@ -152,34 +151,33 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, OneInstancedAttribute) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32) -> @builtin(position) vec4<f32> {
return vec4<f32>(var_a, 0.0, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(instance_index) tint_pulling_instance_index : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
{
@@ -190,34 +188,33 @@ fn main(@builtin(instance_index) tint_pulling_instance_index : u32) -> @builtin(
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{4, VertexStepMode::kInstance, {{VertexFormat::kFloat32, 0, 0}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{4, VertexStepMode::kInstance, {{VertexFormat::kFloat32, 0, 0}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, OneAttributeDifferentOutputSet) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32) -> @builtin(position) vec4<f32> {
return vec4<f32>(var_a, 0.0, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(5) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
{
@@ -228,32 +225,31 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
- cfg.pulling_group = 5;
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
+ cfg.pulling_group = 5;
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, OneAttribute_Struct) {
- auto* src = R"(
+ auto* src = R"(
struct Inputs {
@location(0) var_a : f32,
};
-@stage(vertex)
+@vertex
fn main(inputs : Inputs) -> @builtin(position) vec4<f32> {
return vec4<f32>(inputs.var_a, 0.0, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -265,7 +261,7 @@ struct Inputs {
var_a : f32,
}
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var inputs : Inputs;
{
@@ -276,22 +272,21 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{4, VertexStepMode::kVertex, {{VertexFormat::kFloat32, 0, 0}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
// We expect the transform to use an existing builtin variables if it finds them
TEST_F(VertexPullingTest, ExistingVertexIndexAndInstanceIndex) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32,
@location(1) var_b : f32,
@builtin(vertex_index) custom_vertex_index : u32,
@@ -301,7 +296,7 @@ fn main(@location(0) var_a : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -310,7 +305,7 @@ struct TintVertexData {
@binding(1) @group(4) var<storage, read> tint_pulling_vertex_buffer_1 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) custom_vertex_index : u32, @builtin(instance_index) custom_instance_index : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
var var_b : f32;
@@ -324,30 +319,30 @@ fn main(@builtin(vertex_index) custom_vertex_index : u32, @builtin(instance_inde
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {
- 4,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}},
- },
- {
- 4,
- VertexStepMode::kInstance,
- {{VertexFormat::kFloat32, 0, 1}},
- },
- }};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {
+ 4,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}},
+ },
+ {
+ 4,
+ VertexStepMode::kInstance,
+ {{VertexFormat::kFloat32, 0, 1}},
+ },
+ }};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, ExistingVertexIndexAndInstanceIndex_Struct) {
- auto* src = R"(
+ auto* src = R"(
struct Inputs {
@location(0) var_a : f32,
@location(1) var_b : f32,
@@ -355,13 +350,13 @@ struct Inputs {
@builtin(instance_index) custom_instance_index : u32,
};
-@stage(vertex)
+@vertex
fn main(inputs : Inputs) -> @builtin(position) vec4<f32> {
return vec4<f32>(inputs.var_a, inputs.var_b, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -388,7 +383,7 @@ struct Inputs {
custom_instance_index : u32,
}
-@stage(vertex)
+@vertex
fn main(tint_symbol_1 : tint_symbol) -> @builtin(position) vec4<f32> {
var inputs : Inputs;
inputs.custom_vertex_index = tint_symbol_1.custom_vertex_index;
@@ -403,32 +398,31 @@ fn main(tint_symbol_1 : tint_symbol) -> @builtin(position) vec4<f32> {
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {
- 4,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}},
- },
- {
- 4,
- VertexStepMode::kInstance,
- {{VertexFormat::kFloat32, 0, 1}},
- },
- }};
- cfg.entry_point_name = "main";
-
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(VertexPullingTest,
- ExistingVertexIndexAndInstanceIndex_Struct_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {
+ 4,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}},
+ },
+ {
+ 4,
+ VertexStepMode::kInstance,
+ {{VertexFormat::kFloat32, 0, 1}},
+ },
+ }};
+ cfg.entry_point_name = "main";
+
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_F(VertexPullingTest, ExistingVertexIndexAndInstanceIndex_Struct_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn main(inputs : Inputs) -> @builtin(position) vec4<f32> {
return vec4<f32>(inputs.var_a, inputs.var_b, 0.0, 1.0);
}
@@ -441,7 +435,7 @@ struct Inputs {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -457,7 +451,7 @@ struct tint_symbol {
custom_instance_index : u32,
}
-@stage(vertex)
+@vertex
fn main(tint_symbol_1 : tint_symbol) -> @builtin(position) vec4<f32> {
var inputs : Inputs;
inputs.custom_vertex_index = tint_symbol_1.custom_vertex_index;
@@ -483,30 +477,30 @@ struct Inputs {
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {
- 4,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}},
- },
- {
- 4,
- VertexStepMode::kInstance,
- {{VertexFormat::kFloat32, 0, 1}},
- },
- }};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {
+ 4,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}},
+ },
+ {
+ 4,
+ VertexStepMode::kInstance,
+ {{VertexFormat::kFloat32, 0, 1}},
+ },
+ }};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, ExistingVertexIndexAndInstanceIndex_SeparateStruct) {
- auto* src = R"(
+ auto* src = R"(
struct Inputs {
@location(0) var_a : f32,
@location(1) var_b : f32,
@@ -517,13 +511,13 @@ struct Indices {
@builtin(instance_index) custom_instance_index : u32,
};
-@stage(vertex)
+@vertex
fn main(inputs : Inputs, indices : Indices) -> @builtin(position) vec4<f32> {
return vec4<f32>(inputs.var_a, inputs.var_b, 0.0, 1.0);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -546,7 +540,7 @@ struct Indices {
custom_instance_index : u32,
}
-@stage(vertex)
+@vertex
fn main(indices : Indices) -> @builtin(position) vec4<f32> {
var inputs : Inputs;
{
@@ -559,32 +553,31 @@ fn main(indices : Indices) -> @builtin(position) vec4<f32> {
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {
- 4,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}},
- },
- {
- 4,
- VertexStepMode::kInstance,
- {{VertexFormat::kFloat32, 0, 1}},
- },
- }};
- cfg.entry_point_name = "main";
-
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
-
- EXPECT_EQ(expect, str(got));
-}
-
-TEST_F(VertexPullingTest,
- ExistingVertexIndexAndInstanceIndex_SeparateStruct_OutOfOrder) {
- auto* src = R"(
-@stage(vertex)
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {
+ 4,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}},
+ },
+ {
+ 4,
+ VertexStepMode::kInstance,
+ {{VertexFormat::kFloat32, 0, 1}},
+ },
+ }};
+ cfg.entry_point_name = "main";
+
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
+
+ EXPECT_EQ(expect, str(got));
+}
+
+TEST_F(VertexPullingTest, ExistingVertexIndexAndInstanceIndex_SeparateStruct_OutOfOrder) {
+ auto* src = R"(
+@vertex
fn main(inputs : Inputs, indices : Indices) -> @builtin(position) vec4<f32> {
return vec4<f32>(inputs.var_a, inputs.var_b, 0.0, 1.0);
}
@@ -600,7 +593,7 @@ struct Indices {
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -609,7 +602,7 @@ struct TintVertexData {
@binding(1) @group(4) var<storage, read> tint_pulling_vertex_buffer_1 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(indices : Indices) -> @builtin(position) vec4<f32> {
var inputs : Inputs;
{
@@ -636,45 +629,45 @@ struct Indices {
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {
- 4,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}},
- },
- {
- 4,
- VertexStepMode::kInstance,
- {{VertexFormat::kFloat32, 0, 1}},
- },
- }};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {
+ 4,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}},
+ },
+ {
+ 4,
+ VertexStepMode::kInstance,
+ {{VertexFormat::kFloat32, 0, 1}},
+ },
+ }};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, TwoAttributesSameBuffer) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32,
@location(1) var_b : vec4<f32>) -> @builtin(position) vec4<f32> {
return vec4<f32>();
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
var var_b : vec4<f32>;
@@ -687,23 +680,22 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{16,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}, {VertexFormat::kFloat32x4, 0, 1}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{16,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}, {VertexFormat::kFloat32x4, 0, 1}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, FloatVectorAttributes) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : vec2<f32>,
@location(1) var_b : vec3<f32>,
@location(2) var_c : vec4<f32>
@@ -712,7 +704,7 @@ fn main(@location(0) var_a : vec2<f32>,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@@ -723,7 +715,7 @@ struct TintVertexData {
@binding(2) @group(4) var<storage, read> tint_pulling_vertex_buffer_2 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var var_a : vec2<f32>;
var var_b : vec3<f32>;
@@ -740,24 +732,24 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{
- {8, VertexStepMode::kVertex, {{VertexFormat::kFloat32x2, 0, 0}}},
- {12, VertexStepMode::kVertex, {{VertexFormat::kFloat32x3, 0, 1}}},
- {16, VertexStepMode::kVertex, {{VertexFormat::kFloat32x4, 0, 2}}},
- }};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{
+ {8, VertexStepMode::kVertex, {{VertexFormat::kFloat32x2, 0, 0}}},
+ {12, VertexStepMode::kVertex, {{VertexFormat::kFloat32x3, 0, 1}}},
+ {16, VertexStepMode::kVertex, {{VertexFormat::kFloat32x4, 0, 2}}},
+ }};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, AttemptSymbolCollision) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(@location(0) var_a : f32,
@location(1) var_b : vec4<f32>) -> @builtin(position) vec4<f32> {
var tint_pulling_vertex_index : i32;
@@ -768,14 +760,14 @@ fn main(@location(0) var_a : f32,
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data_1 : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0_1 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index_1 : u32) -> @builtin(position) vec4<f32> {
var var_a : f32;
var var_b : vec4<f32>;
@@ -792,23 +784,22 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index_1 : u32) -> @builtin(po
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {
- {{16,
- VertexStepMode::kVertex,
- {{VertexFormat::kFloat32, 0, 0}, {VertexFormat::kFloat32x4, 0, 1}}}}};
- cfg.entry_point_name = "main";
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {{{16,
+ VertexStepMode::kVertex,
+ {{VertexFormat::kFloat32, 0, 0}, {VertexFormat::kFloat32x4, 0, 1}}}}};
+ cfg.entry_point_name = "main";
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, std::move(data));
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, std::move(data));
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, FormatsAligned) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(
@location(0) uint8x2 : vec2<u32>,
@location(1) uint8x4 : vec4<u32>,
@@ -845,14 +836,14 @@ fn main(
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var uint8x2 : vec2<u32>;
var uint8x4 : vec4<u32>;
@@ -921,53 +912,39 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{{256,
- VertexStepMode::kVertex,
- {
- {VertexFormat::kUint8x2, 64, 0},
- {VertexFormat::kUint8x4, 64, 1},
- {VertexFormat::kSint8x2, 64, 2},
- {VertexFormat::kSint8x4, 64, 3},
- {VertexFormat::kUnorm8x2, 64, 4},
- {VertexFormat::kUnorm8x4, 64, 5},
- {VertexFormat::kSnorm8x2, 64, 6},
- {VertexFormat::kSnorm8x4, 64, 7},
- {VertexFormat::kUint16x2, 64, 8},
- {VertexFormat::kUint16x4, 64, 9},
- {VertexFormat::kSint16x2, 64, 10},
- {VertexFormat::kSint16x4, 64, 11},
- {VertexFormat::kUnorm16x2, 64, 12},
- {VertexFormat::kUnorm16x4, 64, 13},
- {VertexFormat::kSnorm16x2, 64, 14},
- {VertexFormat::kSnorm16x4, 64, 15},
- {VertexFormat::kFloat16x2, 64, 16},
- {VertexFormat::kFloat16x4, 64, 17},
- {VertexFormat::kFloat32, 64, 18},
- {VertexFormat::kFloat32x2, 64, 19},
- {VertexFormat::kFloat32x3, 64, 20},
- {VertexFormat::kFloat32x4, 64, 21},
- {VertexFormat::kUint32, 64, 22},
- {VertexFormat::kUint32x2, 64, 23},
- {VertexFormat::kUint32x3, 64, 24},
- {VertexFormat::kUint32x4, 64, 25},
- {VertexFormat::kSint32, 64, 26},
- {VertexFormat::kSint32x2, 64, 27},
- {VertexFormat::kSint32x3, 64, 28},
- {VertexFormat::kSint32x4, 64, 29},
- }}}};
- cfg.entry_point_name = "main";
-
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
-
- EXPECT_EQ(expect, str(got));
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {
+ {{256,
+ VertexStepMode::kVertex,
+ {
+ {VertexFormat::kUint8x2, 64, 0}, {VertexFormat::kUint8x4, 64, 1},
+ {VertexFormat::kSint8x2, 64, 2}, {VertexFormat::kSint8x4, 64, 3},
+ {VertexFormat::kUnorm8x2, 64, 4}, {VertexFormat::kUnorm8x4, 64, 5},
+ {VertexFormat::kSnorm8x2, 64, 6}, {VertexFormat::kSnorm8x4, 64, 7},
+ {VertexFormat::kUint16x2, 64, 8}, {VertexFormat::kUint16x4, 64, 9},
+ {VertexFormat::kSint16x2, 64, 10}, {VertexFormat::kSint16x4, 64, 11},
+ {VertexFormat::kUnorm16x2, 64, 12}, {VertexFormat::kUnorm16x4, 64, 13},
+ {VertexFormat::kSnorm16x2, 64, 14}, {VertexFormat::kSnorm16x4, 64, 15},
+ {VertexFormat::kFloat16x2, 64, 16}, {VertexFormat::kFloat16x4, 64, 17},
+ {VertexFormat::kFloat32, 64, 18}, {VertexFormat::kFloat32x2, 64, 19},
+ {VertexFormat::kFloat32x3, 64, 20}, {VertexFormat::kFloat32x4, 64, 21},
+ {VertexFormat::kUint32, 64, 22}, {VertexFormat::kUint32x2, 64, 23},
+ {VertexFormat::kUint32x3, 64, 24}, {VertexFormat::kUint32x4, 64, 25},
+ {VertexFormat::kSint32, 64, 26}, {VertexFormat::kSint32x2, 64, 27},
+ {VertexFormat::kSint32x3, 64, 28}, {VertexFormat::kSint32x4, 64, 29},
+ }}}};
+ cfg.entry_point_name = "main";
+
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
+
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, FormatsStrideUnaligned) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(
@location(0) uint8x2 : vec2<u32>,
@location(1) uint8x4 : vec4<u32>,
@@ -1004,15 +981,15 @@ fn main(
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var uint8x2 : vec2<u32>;
var uint8x4 : vec4<u32>;
@@ -1081,53 +1058,39 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{{256,
- VertexStepMode::kVertex,
- {
- {VertexFormat::kUint8x2, 63, 0},
- {VertexFormat::kUint8x4, 63, 1},
- {VertexFormat::kSint8x2, 63, 2},
- {VertexFormat::kSint8x4, 63, 3},
- {VertexFormat::kUnorm8x2, 63, 4},
- {VertexFormat::kUnorm8x4, 63, 5},
- {VertexFormat::kSnorm8x2, 63, 6},
- {VertexFormat::kSnorm8x4, 63, 7},
- {VertexFormat::kUint16x2, 63, 8},
- {VertexFormat::kUint16x4, 63, 9},
- {VertexFormat::kSint16x2, 63, 10},
- {VertexFormat::kSint16x4, 63, 11},
- {VertexFormat::kUnorm16x2, 63, 12},
- {VertexFormat::kUnorm16x4, 63, 13},
- {VertexFormat::kSnorm16x2, 63, 14},
- {VertexFormat::kSnorm16x4, 63, 15},
- {VertexFormat::kFloat16x2, 63, 16},
- {VertexFormat::kFloat16x4, 63, 17},
- {VertexFormat::kFloat32, 63, 18},
- {VertexFormat::kFloat32x2, 63, 19},
- {VertexFormat::kFloat32x3, 63, 20},
- {VertexFormat::kFloat32x4, 63, 21},
- {VertexFormat::kUint32, 63, 22},
- {VertexFormat::kUint32x2, 63, 23},
- {VertexFormat::kUint32x3, 63, 24},
- {VertexFormat::kUint32x4, 63, 25},
- {VertexFormat::kSint32, 63, 26},
- {VertexFormat::kSint32x2, 63, 27},
- {VertexFormat::kSint32x3, 63, 28},
- {VertexFormat::kSint32x4, 63, 29},
- }}}};
- cfg.entry_point_name = "main";
-
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
-
- EXPECT_EQ(expect, str(got));
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {
+ {{256,
+ VertexStepMode::kVertex,
+ {
+ {VertexFormat::kUint8x2, 63, 0}, {VertexFormat::kUint8x4, 63, 1},
+ {VertexFormat::kSint8x2, 63, 2}, {VertexFormat::kSint8x4, 63, 3},
+ {VertexFormat::kUnorm8x2, 63, 4}, {VertexFormat::kUnorm8x4, 63, 5},
+ {VertexFormat::kSnorm8x2, 63, 6}, {VertexFormat::kSnorm8x4, 63, 7},
+ {VertexFormat::kUint16x2, 63, 8}, {VertexFormat::kUint16x4, 63, 9},
+ {VertexFormat::kSint16x2, 63, 10}, {VertexFormat::kSint16x4, 63, 11},
+ {VertexFormat::kUnorm16x2, 63, 12}, {VertexFormat::kUnorm16x4, 63, 13},
+ {VertexFormat::kSnorm16x2, 63, 14}, {VertexFormat::kSnorm16x4, 63, 15},
+ {VertexFormat::kFloat16x2, 63, 16}, {VertexFormat::kFloat16x4, 63, 17},
+ {VertexFormat::kFloat32, 63, 18}, {VertexFormat::kFloat32x2, 63, 19},
+ {VertexFormat::kFloat32x3, 63, 20}, {VertexFormat::kFloat32x4, 63, 21},
+ {VertexFormat::kUint32, 63, 22}, {VertexFormat::kUint32x2, 63, 23},
+ {VertexFormat::kUint32x3, 63, 24}, {VertexFormat::kUint32x4, 63, 25},
+ {VertexFormat::kSint32, 63, 26}, {VertexFormat::kSint32x2, 63, 27},
+ {VertexFormat::kSint32x3, 63, 28}, {VertexFormat::kSint32x4, 63, 29},
+ }}}};
+ cfg.entry_point_name = "main";
+
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
+
+ EXPECT_EQ(expect, str(got));
}
TEST_F(VertexPullingTest, FormatsWithVectorsResized) {
- auto* src = R"(
-@stage(vertex)
+ auto* src = R"(
+@vertex
fn main(
@location(0) uint8x2 : vec3<u32>,
@location(1) uint8x4 : vec2<u32>,
@@ -1164,14 +1127,14 @@ fn main(
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct TintVertexData {
tint_vertex_data : array<u32>,
}
@binding(0) @group(4) var<storage, read> tint_pulling_vertex_buffer_0 : TintVertexData;
-@stage(vertex)
+@vertex
fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(position) vec4<f32> {
var uint8x2 : vec3<u32>;
var uint8x4 : vec2<u32>;
@@ -1209,30 +1172,30 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
uint8x4 = (((vec4<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]) << vec4<u32>(24u, 16u, 8u, 0u)) >> vec4<u32>(24u))).xy;
sint8x2 = (((vec2<i32>(bitcast<i32>((tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)] << 16u))) << vec2<u32>(8u, 0u)) >> vec2<u32>(24u))).x;
sint8x4 = (((vec4<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)])) << vec4<u32>(24u, 16u, 8u, 0u)) >> vec4<u32>(24u))).xy;
- unorm8x2 = vec4<f32>(unpack4x8unorm((tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)] & 65535u)).xy, 0.0, 1.0);
+ unorm8x2 = vec4<f32>(unpack4x8unorm((tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)] & 65535u)).xy, 0.0f, 1.0f);
unorm8x4 = unpack4x8unorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]).x;
- snorm8x2 = vec3<f32>(unpack4x8snorm((tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)] & 65535u)).xy, 0.0);
+ snorm8x2 = vec3<f32>(unpack4x8snorm((tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)] & 65535u)).xy, 0.0f);
snorm8x4 = unpack4x8snorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]).x;
uint16x2 = vec3<u32>(((vec2<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]) << vec2<u32>(16u, 0u)) >> vec2<u32>(16u)), 0u);
uint16x4 = (((vec2<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]).xxyy << vec4<u32>(16u, 0u, 16u, 0u)) >> vec4<u32>(16u))).xy;
- sint16x2 = vec4<i32>(((vec2<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)])) << vec2<u32>(16u, 0u)) >> vec2<u32>(16u)), 0, 1);
+ sint16x2 = vec4<i32>(((vec2<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)])) << vec2<u32>(16u, 0u)) >> vec2<u32>(16u)), 0i, 1i);
sint16x4 = (((vec2<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])).xxyy << vec4<u32>(16u, 0u, 16u, 0u)) >> vec4<u32>(16u))).x;
- unorm16x2 = vec3<f32>(unpack2x16unorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0);
+ unorm16x2 = vec3<f32>(unpack2x16unorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0f);
unorm16x4 = vec4<f32>(unpack2x16unorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), unpack2x16unorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])).x;
- snorm16x2 = vec4<f32>(unpack2x16snorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0, 1.0);
+ snorm16x2 = vec4<f32>(unpack2x16snorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0f, 1.0f);
snorm16x4 = vec4<f32>(unpack2x16snorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), unpack2x16snorm(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])).xyz;
- float16x2 = vec4<f32>(unpack2x16float(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0, 1.0);
+ float16x2 = vec4<f32>(unpack2x16float(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0f, 1.0f);
float16x4 = vec4<f32>(unpack2x16float(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), unpack2x16float(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])).x;
- float32 = vec4<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0, 0.0, 1.0);
- float32x2 = vec4<f32>(vec2<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])), 0.0, 1.0);
+ float32 = vec4<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0.0f, 0.0f, 1.0f);
+ float32x2 = vec4<f32>(vec2<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])), 0.0f, 1.0f);
float32x3 = vec3<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)])).xy;
float32x4 = vec4<f32>(bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)]), bitcast<f32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 19u)])).xyz;
uint32 = vec3<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)], 0u, 0u);
uint32x2 = vec4<u32>(vec2<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]), 0u, 1u);
uint32x3 = vec4<u32>(vec3<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)]), 1u);
uint32x4 = vec4<u32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)], tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 19u)]).xy;
- sint32 = vec4<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0, 0, 1);
- sint32x2 = vec3<i32>(vec2<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])), 0);
+ sint32 = vec4<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), 0i, 0i, 1i);
+ sint32x2 = vec3<i32>(vec2<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)])), 0i);
sint32x3 = vec3<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)])).x;
sint32x4 = vec4<i32>(bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 16u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 17u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 18u)]), bitcast<i32>(tint_pulling_vertex_buffer_0.tint_vertex_data[(buffer_array_base_0 + 19u)])).xy;
}
@@ -1240,48 +1203,34 @@ fn main(@builtin(vertex_index) tint_pulling_vertex_index : u32) -> @builtin(posi
}
)";
- VertexPulling::Config cfg;
- cfg.vertex_state = {{{256,
- VertexStepMode::kVertex,
- {
- {VertexFormat::kUint8x2, 64, 0},
- {VertexFormat::kUint8x4, 64, 1},
- {VertexFormat::kSint8x2, 64, 2},
- {VertexFormat::kSint8x4, 64, 3},
- {VertexFormat::kUnorm8x2, 64, 4},
- {VertexFormat::kUnorm8x4, 64, 5},
- {VertexFormat::kSnorm8x2, 64, 6},
- {VertexFormat::kSnorm8x4, 64, 7},
- {VertexFormat::kUint16x2, 64, 8},
- {VertexFormat::kUint16x4, 64, 9},
- {VertexFormat::kSint16x2, 64, 10},
- {VertexFormat::kSint16x4, 64, 11},
- {VertexFormat::kUnorm16x2, 64, 12},
- {VertexFormat::kUnorm16x4, 64, 13},
- {VertexFormat::kSnorm16x2, 64, 14},
- {VertexFormat::kSnorm16x4, 64, 15},
- {VertexFormat::kFloat16x2, 64, 16},
- {VertexFormat::kFloat16x4, 64, 17},
- {VertexFormat::kFloat32, 64, 18},
- {VertexFormat::kFloat32x2, 64, 19},
- {VertexFormat::kFloat32x3, 64, 20},
- {VertexFormat::kFloat32x4, 64, 21},
- {VertexFormat::kUint32, 64, 22},
- {VertexFormat::kUint32x2, 64, 23},
- {VertexFormat::kUint32x3, 64, 24},
- {VertexFormat::kUint32x4, 64, 25},
- {VertexFormat::kSint32, 64, 26},
- {VertexFormat::kSint32x2, 64, 27},
- {VertexFormat::kSint32x3, 64, 28},
- {VertexFormat::kSint32x4, 64, 29},
- }}}};
- cfg.entry_point_name = "main";
-
- DataMap data;
- data.Add<VertexPulling::Config>(cfg);
- auto got = Run<VertexPulling>(src, data);
-
- EXPECT_EQ(expect, str(got));
+ VertexPulling::Config cfg;
+ cfg.vertex_state = {
+ {{256,
+ VertexStepMode::kVertex,
+ {
+ {VertexFormat::kUint8x2, 64, 0}, {VertexFormat::kUint8x4, 64, 1},
+ {VertexFormat::kSint8x2, 64, 2}, {VertexFormat::kSint8x4, 64, 3},
+ {VertexFormat::kUnorm8x2, 64, 4}, {VertexFormat::kUnorm8x4, 64, 5},
+ {VertexFormat::kSnorm8x2, 64, 6}, {VertexFormat::kSnorm8x4, 64, 7},
+ {VertexFormat::kUint16x2, 64, 8}, {VertexFormat::kUint16x4, 64, 9},
+ {VertexFormat::kSint16x2, 64, 10}, {VertexFormat::kSint16x4, 64, 11},
+ {VertexFormat::kUnorm16x2, 64, 12}, {VertexFormat::kUnorm16x4, 64, 13},
+ {VertexFormat::kSnorm16x2, 64, 14}, {VertexFormat::kSnorm16x4, 64, 15},
+ {VertexFormat::kFloat16x2, 64, 16}, {VertexFormat::kFloat16x4, 64, 17},
+ {VertexFormat::kFloat32, 64, 18}, {VertexFormat::kFloat32x2, 64, 19},
+ {VertexFormat::kFloat32x3, 64, 20}, {VertexFormat::kFloat32x4, 64, 21},
+ {VertexFormat::kUint32, 64, 22}, {VertexFormat::kUint32x2, 64, 23},
+ {VertexFormat::kUint32x3, 64, 24}, {VertexFormat::kUint32x4, 64, 25},
+ {VertexFormat::kSint32, 64, 26}, {VertexFormat::kSint32x2, 64, 27},
+ {VertexFormat::kSint32x3, 64, 28}, {VertexFormat::kSint32x4, 64, 29},
+ }}}};
+ cfg.entry_point_name = "main";
+
+ DataMap data;
+ data.Add<VertexPulling::Config>(cfg);
+ auto got = Run<VertexPulling>(src, data);
+
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.cc b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.cc
index 7cf3fcb7700..eb133d784ef 100644
--- a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.cc
+++ b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.cc
@@ -29,141 +29,130 @@ TINT_INSTANTIATE_TYPEINFO(tint::transform::WrapArraysInStructs);
namespace tint::transform {
WrapArraysInStructs::WrappedArrayInfo::WrappedArrayInfo() = default;
-WrapArraysInStructs::WrappedArrayInfo::WrappedArrayInfo(
- const WrappedArrayInfo&) = default;
+WrapArraysInStructs::WrappedArrayInfo::WrappedArrayInfo(const WrappedArrayInfo&) = default;
WrapArraysInStructs::WrappedArrayInfo::~WrappedArrayInfo() = default;
WrapArraysInStructs::WrapArraysInStructs() = default;
WrapArraysInStructs::~WrapArraysInStructs() = default;
-bool WrapArraysInStructs::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* node : program->ASTNodes().Objects()) {
- if (program->Sem().Get<sem::Array>(node->As<ast::Type>())) {
- return true;
+bool WrapArraysInStructs::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* node : program->ASTNodes().Objects()) {
+ if (program->Sem().Get<sem::Array>(node->As<ast::Type>())) {
+ return true;
+ }
}
- }
- return false;
+ return false;
}
-void WrapArraysInStructs::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- auto& sem = ctx.src->Sem();
-
- std::unordered_map<const sem::Array*, WrappedArrayInfo> wrapped_arrays;
- auto wrapper = [&](const sem::Array* array) {
- return WrapArray(ctx, wrapped_arrays, array);
- };
- auto wrapper_typename = [&](const sem::Array* arr) -> ast::TypeName* {
- auto info = wrapper(arr);
- return info ? ctx.dst->create<ast::TypeName>(info.wrapper_name) : nullptr;
- };
-
- // Replace all array types with their corresponding wrapper
- ctx.ReplaceAll([&](const ast::Type* ast_type) -> const ast::Type* {
- auto* type = ctx.src->TypeOf(ast_type);
- if (auto* array = type->UnwrapRef()->As<sem::Array>()) {
- return wrapper_typename(array);
- }
- return nullptr;
- });
-
- // Fix up index accessors so `a[1]` becomes `a.arr[1]`
- ctx.ReplaceAll([&](const ast::IndexAccessorExpression* accessor)
- -> const ast::IndexAccessorExpression* {
- if (auto* array = ::tint::As<sem::Array>(
- sem.Get(accessor->object)->Type()->UnwrapRef())) {
- if (wrapper(array)) {
- // Array is wrapped in a structure. Emit a member accessor to get
- // to the actual array.
- auto* arr = ctx.Clone(accessor->object);
- auto* idx = ctx.Clone(accessor->index);
- auto* unwrapped = ctx.dst->MemberAccessor(arr, "arr");
- return ctx.dst->IndexAccessor(accessor->source, unwrapped, idx);
- }
- }
- return nullptr;
- });
-
- // Fix up array constructors so `A(1,2)` becomes `tint_array_wrapper(A(1,2))`
- ctx.ReplaceAll(
- [&](const ast::CallExpression* expr) -> const ast::Expression* {
- if (auto* call = sem.Get(expr)) {
- if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
- if (auto* array = ctor->ReturnType()->As<sem::Array>()) {
- if (auto w = wrapper(array)) {
- // Wrap the array type constructor with another constructor for
- // the wrapper
- auto* wrapped_array_ty = ctx.dst->ty.type_name(w.wrapper_name);
- auto* array_ty = w.array_type(ctx);
- auto args = utils::Transform(
- call->Arguments(), [&](const tint::sem::Expression* s) {
- return ctx.Clone(s->Declaration());
- });
- auto* arr_ctor = ctx.dst->Construct(array_ty, args);
- return ctx.dst->Construct(wrapped_array_ty, arr_ctor);
- }
+void WrapArraysInStructs::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ auto& sem = ctx.src->Sem();
+
+ std::unordered_map<const sem::Array*, WrappedArrayInfo> wrapped_arrays;
+ auto wrapper = [&](const sem::Array* array) { return WrapArray(ctx, wrapped_arrays, array); };
+ auto wrapper_typename = [&](const sem::Array* arr) -> ast::TypeName* {
+ auto info = wrapper(arr);
+ return info ? ctx.dst->create<ast::TypeName>(info.wrapper_name) : nullptr;
+ };
+
+ // Replace all array types with their corresponding wrapper
+ ctx.ReplaceAll([&](const ast::Type* ast_type) -> const ast::Type* {
+ auto* type = ctx.src->TypeOf(ast_type);
+ if (auto* array = type->UnwrapRef()->As<sem::Array>()) {
+ return wrapper_typename(array);
+ }
+ return nullptr;
+ });
+
+ // Fix up index accessors so `a[1]` becomes `a.arr[1]`
+ ctx.ReplaceAll(
+ [&](const ast::IndexAccessorExpression* accessor) -> const ast::IndexAccessorExpression* {
+ if (auto* array =
+ ::tint::As<sem::Array>(sem.Get(accessor->object)->Type()->UnwrapRef())) {
+ if (wrapper(array)) {
+ // Array is wrapped in a structure. Emit a member accessor to get
+ // to the actual array.
+ auto* arr = ctx.Clone(accessor->object);
+ auto* idx = ctx.Clone(accessor->index);
+ auto* unwrapped = ctx.dst->MemberAccessor(arr, "arr");
+ return ctx.dst->IndexAccessor(accessor->source, unwrapped, idx);
+ }
+ }
+ return nullptr;
+ });
+
+ // Fix up array constructors so `A(1,2)` becomes `tint_array_wrapper(A(1,2))`
+ ctx.ReplaceAll([&](const ast::CallExpression* expr) -> const ast::Expression* {
+ if (auto* call = sem.Get(expr)->UnwrapMaterialize()->As<sem::Call>()) {
+ if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
+ if (auto* array = ctor->ReturnType()->As<sem::Array>()) {
+ if (auto w = wrapper(array)) {
+ // Wrap the array type constructor with another constructor for
+ // the wrapper
+ auto* wrapped_array_ty = ctx.dst->ty.type_name(w.wrapper_name);
+ auto* array_ty = w.array_type(ctx);
+ auto args = utils::Transform(call->Arguments(),
+ [&](const tint::sem::Expression* s) {
+ return ctx.Clone(s->Declaration());
+ });
+ auto* arr_ctor = ctx.dst->Construct(array_ty, args);
+ return ctx.dst->Construct(wrapped_array_ty, arr_ctor);
+ }
+ }
}
- }
}
return nullptr;
- });
+ });
- ctx.Clone();
+ ctx.Clone();
}
WrapArraysInStructs::WrappedArrayInfo WrapArraysInStructs::WrapArray(
CloneContext& ctx,
std::unordered_map<const sem::Array*, WrappedArrayInfo>& wrapped_arrays,
const sem::Array* array) const {
- if (array->IsRuntimeSized()) {
- return {}; // We don't want to wrap runtime sized arrays
- }
-
- return utils::GetOrCreate(wrapped_arrays, array, [&] {
- WrappedArrayInfo info;
-
- // Generate a unique name for the array wrapper
- info.wrapper_name = ctx.dst->Symbols().New("tint_array_wrapper");
-
- // Examine the element type. Is it also an array?
- std::function<const ast::Type*(CloneContext&)> el_type;
- if (auto* el_array = array->ElemType()->As<sem::Array>()) {
- // Array of array - call WrapArray() on the element type
- if (auto el = WrapArray(ctx, wrapped_arrays, el_array)) {
- el_type = [=](CloneContext& c) {
- return c.dst->create<ast::TypeName>(el.wrapper_name);
- };
- }
+ if (array->IsRuntimeSized()) {
+ return {}; // We don't want to wrap runtime sized arrays
}
- // If the element wasn't an array, just create the typical AST type for it
- if (!el_type) {
- el_type = [=](CloneContext& c) {
- return CreateASTTypeFor(c, array->ElemType());
- };
- }
+ return utils::GetOrCreate(wrapped_arrays, array, [&] {
+ WrappedArrayInfo info;
- // Construct the single structure field type
- info.array_type = [=](CloneContext& c) {
- ast::AttributeList attrs;
- if (!array->IsStrideImplicit()) {
- attrs.emplace_back(
- c.dst->create<ast::StrideAttribute>(array->Stride()));
- }
- return c.dst->ty.array(el_type(c), array->Count(), std::move(attrs));
- };
+ // Generate a unique name for the array wrapper
+ info.wrapper_name = ctx.dst->Symbols().New("tint_array_wrapper");
+
+ // Examine the element type. Is it also an array?
+ std::function<const ast::Type*(CloneContext&)> el_type;
+ if (auto* el_array = array->ElemType()->As<sem::Array>()) {
+ // Array of array - call WrapArray() on the element type
+ if (auto el = WrapArray(ctx, wrapped_arrays, el_array)) {
+ el_type = [=](CloneContext& c) {
+ return c.dst->create<ast::TypeName>(el.wrapper_name);
+ };
+ }
+ }
+
+ // If the element wasn't an array, just create the typical AST type for it
+ if (!el_type) {
+ el_type = [=](CloneContext& c) { return CreateASTTypeFor(c, array->ElemType()); };
+ }
+
+ // Construct the single structure field type
+ info.array_type = [=](CloneContext& c) {
+ ast::AttributeList attrs;
+ if (!array->IsStrideImplicit()) {
+ attrs.emplace_back(c.dst->create<ast::StrideAttribute>(array->Stride()));
+ }
+ return c.dst->ty.array(el_type(c), u32(array->Count()), std::move(attrs));
+ };
- // Structure() will create and append the ast::Struct to the
- // global declarations of `ctx.dst`. As we haven't finished building the
- // current module-scope statement or function, this will be placed
- // immediately before the usage.
- ctx.dst->Structure(info.wrapper_name,
- {ctx.dst->Member("arr", info.array_type(ctx))});
- return info;
- });
+ // Structure() will create and append the ast::Struct to the
+ // global declarations of `ctx.dst`. As we haven't finished building the
+ // current module-scope statement or function, this will be placed
+ // immediately before the usage.
+ ctx.dst->Structure(info.wrapper_name, {ctx.dst->Member("arr", info.array_type(ctx))});
+ return info;
+ });
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.h b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.h
index a256ff8dca8..4653c6beeab 100644
--- a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.h
+++ b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs.h
@@ -34,56 +34,53 @@ namespace tint::transform {
/// This transform helps with backends that cannot directly return arrays or use
/// them as parameters.
class WrapArraysInStructs : public Castable<WrapArraysInStructs, Transform> {
- public:
- /// Constructor
- WrapArraysInStructs();
+ public:
+ /// Constructor
+ WrapArraysInStructs();
- /// Destructor
- ~WrapArraysInStructs() override;
+ /// Destructor
+ ~WrapArraysInStructs() override;
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
- private:
- struct WrappedArrayInfo {
- WrappedArrayInfo();
- WrappedArrayInfo(const WrappedArrayInfo&);
- ~WrappedArrayInfo();
+ private:
+ struct WrappedArrayInfo {
+ WrappedArrayInfo();
+ WrappedArrayInfo(const WrappedArrayInfo&);
+ ~WrappedArrayInfo();
- Symbol wrapper_name;
- std::function<const ast::Type*(CloneContext&)> array_type;
+ Symbol wrapper_name;
+ std::function<const ast::Type*(CloneContext&)> array_type;
- operator bool() { return wrapper_name.IsValid(); }
- };
+ operator bool() { return wrapper_name.IsValid(); }
+ };
- /// WrapArray wraps the fixed-size array type in a new structure (if it hasn't
- /// already been wrapped). WrapArray will recursively wrap arrays-of-arrays.
- /// The new structure will be added to module-scope type declarations of
- /// `ctx.dst`.
- /// @param ctx the CloneContext
- /// @param wrapped_arrays a map of src array type to the wrapped structure
- /// name
- /// @param array the array type
- /// @return the name of the structure that wraps the array, or an invalid
- /// Symbol if this array should not be wrapped
- WrappedArrayInfo WrapArray(
- CloneContext& ctx,
- std::unordered_map<const sem::Array*, WrappedArrayInfo>& wrapped_arrays,
- const sem::Array* array) const;
+ /// WrapArray wraps the fixed-size array type in a new structure (if it hasn't
+ /// already been wrapped). WrapArray will recursively wrap arrays-of-arrays.
+ /// The new structure will be added to module-scope type declarations of
+ /// `ctx.dst`.
+ /// @param ctx the CloneContext
+ /// @param wrapped_arrays a map of src array type to the wrapped structure
+ /// name
+ /// @param array the array type
+ /// @return the name of the structure that wraps the array, or an invalid
+ /// Symbol if this array should not be wrapped
+ WrappedArrayInfo WrapArray(
+ CloneContext& ctx,
+ std::unordered_map<const sem::Array*, WrappedArrayInfo>& wrapped_arrays,
+ const sem::Array* array) const;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs_test.cc b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs_test.cc
index 7ba884cea38..7a7a6b337d6 100644
--- a/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/wrap_arrays_in_structs_test.cc
@@ -25,33 +25,33 @@ namespace {
using WrapArraysInStructsTest = TransformTest;
TEST_F(WrapArraysInStructsTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<WrapArraysInStructs>(src));
+ EXPECT_FALSE(ShouldRun<WrapArraysInStructs>(src));
}
TEST_F(WrapArraysInStructsTest, ShouldRunHasArray) {
- auto* src = R"(
+ auto* src = R"(
var<private> arr : array<i32, 4>;
)";
- EXPECT_TRUE(ShouldRun<WrapArraysInStructs>(src));
+ EXPECT_TRUE(ShouldRun<WrapArraysInStructs>(src));
}
TEST_F(WrapArraysInStructsTest, EmptyModule) {
- auto* src = R"()";
- auto* expect = src;
+ auto* src = R"()";
+ auto* expect = src;
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAsGlobal) {
- auto* src = R"(
+ auto* src = R"(
var<private> arr : array<i32, 4>;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -59,19 +59,19 @@ struct tint_array_wrapper {
var<private> arr : tint_array_wrapper;
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAsFunctionVar) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var arr : array<i32, 4>;
let x = arr[3];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -82,18 +82,18 @@ fn f() {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAsParam) {
- auto* src = R"(
+ auto* src = R"(
fn f(a : array<i32, 4>) -> i32 {
return a[2];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -103,18 +103,18 @@ fn f(a : tint_array_wrapper) -> i32 {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAsReturn) {
- auto* src = R"(
+ auto* src = R"(
fn f() -> array<i32, 4> {
return array<i32, 4>(1, 2, 3, 4);
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -124,13 +124,13 @@ fn f() -> tint_array_wrapper {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAlias) {
- auto* src = R"(
+ auto* src = R"(
type Inner = array<i32, 2>;
type Array = array<Inner, 2>;
@@ -143,7 +143,7 @@ fn f() {
let x = arr[3];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 2u>,
}
@@ -166,13 +166,13 @@ fn f() {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArrayAlias_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f() {
var arr : Array;
arr = Array();
@@ -185,7 +185,7 @@ fn f() {
type Array = array<Inner, 2>;
type Inner = array<i32, 2>;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper_1 {
arr : array<i32, 2u>,
}
@@ -208,20 +208,20 @@ type Array = tint_array_wrapper;
type Inner = tint_array_wrapper_1;
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArraysInStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : array<i32, 4>,
b : array<i32, 8>,
c : array<i32, 4>,
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -237,20 +237,20 @@ struct S {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, ArraysOfArraysInStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : array<i32, 4>,
b : array<array<i32, 4>, 4>,
c : array<array<array<i32, 4>, 4>, 4>,
};
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -270,13 +270,13 @@ struct S {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, AccessArraysOfArraysInStruct) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : array<i32, 4>,
b : array<array<i32, 4>, 4>,
@@ -287,7 +287,7 @@ fn f(s : S) -> i32 {
return s.a[2] + s.b[1][2] + s.c[3][1][2];
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 4u>,
}
@@ -311,13 +311,13 @@ fn f(s : S) -> i32 {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, DeclarationOrder) {
- auto* src = R"(
+ auto* src = R"(
type T0 = i32;
type T1 = array<i32, 1>;
@@ -333,7 +333,7 @@ fn f2() {
var v : array<i32, 3>;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
type T0 = i32;
struct tint_array_wrapper {
@@ -362,13 +362,13 @@ fn f2() {
}
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(WrapArraysInStructsTest, DeclarationOrder_OutOfOrder) {
- auto* src = R"(
+ auto* src = R"(
fn f2() {
var v : array<i32, 3>;
}
@@ -384,7 +384,7 @@ type T1 = array<i32, 1>;
type T0 = i32;
)";
- auto* expect = R"(
+ auto* expect = R"(
struct tint_array_wrapper {
arr : array<i32, 3u>,
}
@@ -413,9 +413,9 @@ type T1 = tint_array_wrapper_2;
type T0 = i32;
)";
- auto got = Run<WrapArraysInStructs>(src);
+ auto got = Run<WrapArraysInStructs>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.cc b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.cc
index 8441e175601..6e843109922 100644
--- a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.cc
+++ b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.cc
@@ -22,7 +22,7 @@
#include "src/tint/ast/workgroup_attribute.h"
#include "src/tint/program_builder.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/variable.h"
#include "src/tint/utils/map.h"
@@ -34,426 +34,405 @@ namespace tint::transform {
/// PIMPL state for the ZeroInitWorkgroupMemory transform
struct ZeroInitWorkgroupMemory::State {
- /// The clone context
- CloneContext& ctx;
-
- /// An alias to *ctx.dst
- ProgramBuilder& b = *ctx.dst;
-
- /// The constant size of the workgroup. If 0, then #workgroup_size_expr should
- /// be used instead.
- uint32_t workgroup_size_const = 0;
- /// The size of the workgroup as an expression generator. Use if
- /// #workgroup_size_const is 0.
- std::function<const ast::Expression*()> workgroup_size_expr;
-
- /// ArrayIndex represents a function on the local invocation index, of
- /// the form: `array_index = (local_invocation_index % modulo) / division`
- struct ArrayIndex {
- /// The RHS of the modulus part of the expression
- uint32_t modulo = 1;
- /// The RHS of the division part of the expression
- uint32_t division = 1;
-
- /// Equality operator
- /// @param i the ArrayIndex to compare to this ArrayIndex
- /// @returns true if `i` and this ArrayIndex are equal
- bool operator==(const ArrayIndex& i) const {
- return modulo == i.modulo && division == i.division;
- }
+ /// The clone context
+ CloneContext& ctx;
+
+ /// An alias to *ctx.dst
+ ProgramBuilder& b = *ctx.dst;
+
+ /// The constant size of the workgroup. If 0, then #workgroup_size_expr should
+ /// be used instead.
+ uint32_t workgroup_size_const = 0;
+ /// The size of the workgroup as an expression generator. Use if
+ /// #workgroup_size_const is 0.
+ std::function<const ast::Expression*()> workgroup_size_expr;
+
+ /// ArrayIndex represents a function on the local invocation index, of
+ /// the form: `array_index = (local_invocation_index % modulo) / division`
+ struct ArrayIndex {
+ /// The RHS of the modulus part of the expression
+ uint32_t modulo = 1;
+ /// The RHS of the division part of the expression
+ uint32_t division = 1;
+
+ /// Equality operator
+ /// @param i the ArrayIndex to compare to this ArrayIndex
+ /// @returns true if `i` and this ArrayIndex are equal
+ bool operator==(const ArrayIndex& i) const {
+ return modulo == i.modulo && division == i.division;
+ }
- /// Hash function for the ArrayIndex type
- struct Hasher {
- /// @param i the ArrayIndex to calculate a hash for
- /// @returns the hash value for the ArrayIndex `i`
- size_t operator()(const ArrayIndex& i) const {
- return utils::Hash(i.modulo, i.division);
- }
+ /// Hash function for the ArrayIndex type
+ struct Hasher {
+ /// @param i the ArrayIndex to calculate a hash for
+ /// @returns the hash value for the ArrayIndex `i`
+ size_t operator()(const ArrayIndex& i) const {
+ return utils::Hash(i.modulo, i.division);
+ }
+ };
};
- };
-
- /// A list of unique ArrayIndex
- using ArrayIndices = utils::UniqueVector<ArrayIndex, ArrayIndex::Hasher>;
-
- /// Expression holds information about an expression that is being built for a
- /// statement will zero workgroup values.
- struct Expression {
- /// The AST expression node
- const ast::Expression* expr = nullptr;
- /// The number of iterations required to zero the value
- uint32_t num_iterations = 0;
- /// All array indices used by this expression
- ArrayIndices array_indices;
- };
-
- /// Statement holds information about a statement that will zero workgroup
- /// values.
- struct Statement {
- /// The AST statement node
- const ast::Statement* stmt;
- /// The number of iterations required to zero the value
- uint32_t num_iterations;
- /// All array indices used by this statement
- ArrayIndices array_indices;
- };
-
- /// All statements that zero workgroup memory
- std::vector<Statement> statements;
-
- /// A map of ArrayIndex to the name reserved for the `let` declaration of that
- /// index.
- std::unordered_map<ArrayIndex, Symbol, ArrayIndex::Hasher> array_index_names;
-
- /// Constructor
- /// @param c the CloneContext used for the transform
- explicit State(CloneContext& c) : ctx(c) {}
-
- /// Run inserts the workgroup memory zero-initialization logic at the top of
- /// the given function
- /// @param fn a compute shader entry point function
- void Run(const ast::Function* fn) {
- auto& sem = ctx.src->Sem();
-
- CalculateWorkgroupSize(
- ast::GetAttribute<ast::WorkgroupAttribute>(fn->attributes));
-
- // Generate a list of statements to zero initialize each of the
- // workgroup storage variables used by `fn`. This will populate #statements.
- auto* func = sem.Get(fn);
- for (auto* var : func->TransitivelyReferencedGlobals()) {
- if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
- BuildZeroingStatements(
- var->Type()->UnwrapRef(), [&](uint32_t num_values) {
- auto var_name = ctx.Clone(var->Declaration()->symbol);
- return Expression{b.Expr(var_name), num_values, ArrayIndices{}};
- });
- }
- }
- if (statements.empty()) {
- return; // No workgroup variables to initialize.
- }
+ /// A list of unique ArrayIndex
+ using ArrayIndices = utils::UniqueVector<ArrayIndex, ArrayIndex::Hasher>;
+
+ /// Expression holds information about an expression that is being built for a
+ /// statement will zero workgroup values.
+ struct Expression {
+ /// The AST expression node
+ const ast::Expression* expr = nullptr;
+ /// The number of iterations required to zero the value
+ uint32_t num_iterations = 0;
+ /// All array indices used by this expression
+ ArrayIndices array_indices;
+ };
- // Scan the entry point for an existing local_invocation_index builtin
- // parameter
- std::function<const ast::Expression*()> local_index;
- for (auto* param : fn->params) {
- if (auto* builtin =
- ast::GetAttribute<ast::BuiltinAttribute>(param->attributes)) {
- if (builtin->builtin == ast::Builtin::kLocalInvocationIndex) {
- local_index = [=] { return b.Expr(ctx.Clone(param->symbol)); };
- break;
- }
- }
-
- if (auto* str = sem.Get(param)->Type()->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
- member->Declaration()->attributes)) {
- if (builtin->builtin == ast::Builtin::kLocalInvocationIndex) {
- local_index = [=] {
- auto* param_expr = b.Expr(ctx.Clone(param->symbol));
- auto member_name = ctx.Clone(member->Declaration()->symbol);
- return b.MemberAccessor(param_expr, member_name);
- };
- break;
+ /// Statement holds information about a statement that will zero workgroup
+ /// values.
+ struct Statement {
+ /// The AST statement node
+ const ast::Statement* stmt;
+ /// The number of iterations required to zero the value
+ uint32_t num_iterations;
+ /// All array indices used by this statement
+ ArrayIndices array_indices;
+ };
+
+ /// All statements that zero workgroup memory
+ std::vector<Statement> statements;
+
+ /// A map of ArrayIndex to the name reserved for the `let` declaration of that
+ /// index.
+ std::unordered_map<ArrayIndex, Symbol, ArrayIndex::Hasher> array_index_names;
+
+ /// Constructor
+ /// @param c the CloneContext used for the transform
+ explicit State(CloneContext& c) : ctx(c) {}
+
+ /// Run inserts the workgroup memory zero-initialization logic at the top of
+ /// the given function
+ /// @param fn a compute shader entry point function
+ void Run(const ast::Function* fn) {
+ auto& sem = ctx.src->Sem();
+
+ CalculateWorkgroupSize(ast::GetAttribute<ast::WorkgroupAttribute>(fn->attributes));
+
+ // Generate a list of statements to zero initialize each of the
+ // workgroup storage variables used by `fn`. This will populate #statements.
+ auto* func = sem.Get(fn);
+ for (auto* var : func->TransitivelyReferencedGlobals()) {
+ if (var->StorageClass() == ast::StorageClass::kWorkgroup) {
+ BuildZeroingStatements(var->Type()->UnwrapRef(), [&](uint32_t num_values) {
+ auto var_name = ctx.Clone(var->Declaration()->symbol);
+ return Expression{b.Expr(var_name), num_values, ArrayIndices{}};
+ });
}
- }
}
- }
- }
- if (!local_index) {
- // No existing local index parameter. Append one to the entry point.
- auto* param =
- b.Param(b.Symbols().New("local_invocation_index"), b.ty.u32(),
- {b.Builtin(ast::Builtin::kLocalInvocationIndex)});
- ctx.InsertBack(fn->params, param);
- local_index = [=] { return b.Expr(param->symbol); };
- }
- // Take the zeroing statements and bin them by the number of iterations
- // required to zero the workgroup data. We then emit these in blocks,
- // possibly wrapped in if-statements or for-loops.
- std::unordered_map<uint32_t, std::vector<Statement>>
- stmts_by_num_iterations;
- std::vector<uint32_t> num_sorted_iterations;
- for (auto& s : statements) {
- auto& stmts = stmts_by_num_iterations[s.num_iterations];
- if (stmts.empty()) {
- num_sorted_iterations.emplace_back(s.num_iterations);
- }
- stmts.emplace_back(s);
- }
- std::sort(num_sorted_iterations.begin(), num_sorted_iterations.end());
+ if (statements.empty()) {
+ return; // No workgroup variables to initialize.
+ }
- // Loop over the statements, grouped by num_iterations.
- for (auto num_iterations : num_sorted_iterations) {
- auto& stmts = stmts_by_num_iterations[num_iterations];
+ // Scan the entry point for an existing local_invocation_index builtin
+ // parameter
+ std::function<const ast::Expression*()> local_index;
+ for (auto* param : fn->params) {
+ if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(param->attributes)) {
+ if (builtin->builtin == ast::Builtin::kLocalInvocationIndex) {
+ local_index = [=] { return b.Expr(ctx.Clone(param->symbol)); };
+ break;
+ }
+ }
- // Gather all the array indices used by all the statements in the block.
- ArrayIndices array_indices;
- for (auto& s : stmts) {
- for (auto& idx : s.array_indices) {
- array_indices.add(idx);
+ if (auto* str = sem.Get(param)->Type()->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ if (auto* builtin = ast::GetAttribute<ast::BuiltinAttribute>(
+ member->Declaration()->attributes)) {
+ if (builtin->builtin == ast::Builtin::kLocalInvocationIndex) {
+ local_index = [=] {
+ auto* param_expr = b.Expr(ctx.Clone(param->symbol));
+ auto member_name = ctx.Clone(member->Declaration()->symbol);
+ return b.MemberAccessor(param_expr, member_name);
+ };
+ break;
+ }
+ }
+ }
+ }
}
- }
-
- // Determine the block type used to emit these statements.
-
- if (workgroup_size_const == 0 || num_iterations > workgroup_size_const) {
- // Either the workgroup size is dynamic, or smaller than num_iterations.
- // In either case, we need to generate a for loop to ensure we
- // initialize all the array elements.
- //
- // for (var idx : u32 = local_index;
- // idx < num_iterations;
- // idx += workgroup_size) {
- // ...
- // }
- auto idx = b.Symbols().New("idx");
- auto* init = b.Decl(b.Var(idx, b.ty.u32(), local_index()));
- auto* cond = b.create<ast::BinaryExpression>(
- ast::BinaryOp::kLessThan, b.Expr(idx), b.Expr(num_iterations));
- auto* cont = b.Assign(
- idx, b.Add(idx, workgroup_size_const ? b.Expr(workgroup_size_const)
- : workgroup_size_expr()));
-
- auto block = DeclareArrayIndices(num_iterations, array_indices,
- [&] { return b.Expr(idx); });
- for (auto& s : stmts) {
- block.emplace_back(s.stmt);
+ if (!local_index) {
+ // No existing local index parameter. Append one to the entry point.
+ auto* param = b.Param(b.Symbols().New("local_invocation_index"), b.ty.u32(),
+ {b.Builtin(ast::Builtin::kLocalInvocationIndex)});
+ ctx.InsertBack(fn->params, param);
+ local_index = [=] { return b.Expr(param->symbol); };
}
- auto* for_loop = b.For(init, cond, cont, b.Block(block));
- ctx.InsertFront(fn->body->statements, for_loop);
- } else if (num_iterations < workgroup_size_const) {
- // Workgroup size is a known constant, but is greater than
- // num_iterations. Emit an if statement:
- //
- // if (local_index < num_iterations) {
- // ...
- // }
- auto* cond = b.create<ast::BinaryExpression>(
- ast::BinaryOp::kLessThan, local_index(), b.Expr(num_iterations));
- auto block = DeclareArrayIndices(num_iterations, array_indices,
- [&] { return b.Expr(local_index()); });
- for (auto& s : stmts) {
- block.emplace_back(s.stmt);
+
+ // Take the zeroing statements and bin them by the number of iterations
+ // required to zero the workgroup data. We then emit these in blocks,
+ // possibly wrapped in if-statements or for-loops.
+ std::unordered_map<uint32_t, std::vector<Statement>> stmts_by_num_iterations;
+ std::vector<uint32_t> num_sorted_iterations;
+ for (auto& s : statements) {
+ auto& stmts = stmts_by_num_iterations[s.num_iterations];
+ if (stmts.empty()) {
+ num_sorted_iterations.emplace_back(s.num_iterations);
+ }
+ stmts.emplace_back(s);
}
- auto* if_stmt = b.If(cond, b.Block(block));
- ctx.InsertFront(fn->body->statements, if_stmt);
- } else {
- // Workgroup size exactly equals num_iterations.
- // No need for any conditionals. Just emit a basic block:
- //
- // {
- // ...
- // }
- auto block = DeclareArrayIndices(num_iterations, array_indices,
- [&] { return b.Expr(local_index()); });
- for (auto& s : stmts) {
- block.emplace_back(s.stmt);
+ std::sort(num_sorted_iterations.begin(), num_sorted_iterations.end());
+
+ // Loop over the statements, grouped by num_iterations.
+ for (auto num_iterations : num_sorted_iterations) {
+ auto& stmts = stmts_by_num_iterations[num_iterations];
+
+ // Gather all the array indices used by all the statements in the block.
+ ArrayIndices array_indices;
+ for (auto& s : stmts) {
+ for (auto& idx : s.array_indices) {
+ array_indices.add(idx);
+ }
+ }
+
+ // Determine the block type used to emit these statements.
+
+ if (workgroup_size_const == 0 || num_iterations > workgroup_size_const) {
+ // Either the workgroup size is dynamic, or smaller than num_iterations.
+ // In either case, we need to generate a for loop to ensure we
+ // initialize all the array elements.
+ //
+ // for (var idx : u32 = local_index;
+ // idx < num_iterations;
+ // idx += workgroup_size) {
+ // ...
+ // }
+ auto idx = b.Symbols().New("idx");
+ auto* init = b.Decl(b.Var(idx, b.ty.u32(), local_index()));
+ auto* cond = b.create<ast::BinaryExpression>(ast::BinaryOp::kLessThan, b.Expr(idx),
+ b.Expr(u32(num_iterations)));
+ auto* cont = b.Assign(
+ idx, b.Add(idx, workgroup_size_const ? b.Expr(u32(workgroup_size_const))
+ : workgroup_size_expr()));
+
+ auto block =
+ DeclareArrayIndices(num_iterations, array_indices, [&] { return b.Expr(idx); });
+ for (auto& s : stmts) {
+ block.emplace_back(s.stmt);
+ }
+ auto* for_loop = b.For(init, cond, cont, b.Block(block));
+ ctx.InsertFront(fn->body->statements, for_loop);
+ } else if (num_iterations < workgroup_size_const) {
+ // Workgroup size is a known constant, but is greater than
+ // num_iterations. Emit an if statement:
+ //
+ // if (local_index < num_iterations) {
+ // ...
+ // }
+ auto* cond = b.create<ast::BinaryExpression>(
+ ast::BinaryOp::kLessThan, local_index(), b.Expr(u32(num_iterations)));
+ auto block = DeclareArrayIndices(num_iterations, array_indices,
+ [&] { return b.Expr(local_index()); });
+ for (auto& s : stmts) {
+ block.emplace_back(s.stmt);
+ }
+ auto* if_stmt = b.If(cond, b.Block(block));
+ ctx.InsertFront(fn->body->statements, if_stmt);
+ } else {
+ // Workgroup size exactly equals num_iterations.
+ // No need for any conditionals. Just emit a basic block:
+ //
+ // {
+ // ...
+ // }
+ auto block = DeclareArrayIndices(num_iterations, array_indices,
+ [&] { return b.Expr(local_index()); });
+ for (auto& s : stmts) {
+ block.emplace_back(s.stmt);
+ }
+ ctx.InsertFront(fn->body->statements, b.Block(block));
+ }
}
- ctx.InsertFront(fn->body->statements, b.Block(block));
- }
- }
- // Append a single workgroup barrier after the zero initialization.
- ctx.InsertFront(fn->body->statements,
- b.CallStmt(b.Call("workgroupBarrier")));
- }
-
- /// BuildZeroingExpr is a function that builds a sub-expression used to zero
- /// workgroup values. `num_values` is the number of elements that the
- /// expression will be used to zero. Returns the expression.
- using BuildZeroingExpr = std::function<Expression(uint32_t num_values)>;
-
- /// BuildZeroingStatements() generates the statements required to zero
- /// initialize the workgroup storage expression of type `ty`.
- /// @param ty the expression type
- /// @param get_expr a function that builds the AST nodes for the expression.
- void BuildZeroingStatements(const sem::Type* ty,
- const BuildZeroingExpr& get_expr) {
- if (CanTriviallyZero(ty)) {
- auto var = get_expr(1u);
- auto* zero_init = b.Construct(CreateASTTypeFor(ctx, ty));
- statements.emplace_back(Statement{b.Assign(var.expr, zero_init),
- var.num_iterations, var.array_indices});
- return;
+ // Append a single workgroup barrier after the zero initialization.
+ ctx.InsertFront(fn->body->statements, b.CallStmt(b.Call("workgroupBarrier")));
}
- if (auto* atomic = ty->As<sem::Atomic>()) {
- auto* zero_init = b.Construct(CreateASTTypeFor(ctx, atomic->Type()));
- auto expr = get_expr(1u);
- auto* store = b.Call("atomicStore", b.AddressOf(expr.expr), zero_init);
- statements.emplace_back(Statement{b.CallStmt(store), expr.num_iterations,
- expr.array_indices});
- return;
- }
+ /// BuildZeroingExpr is a function that builds a sub-expression used to zero
+ /// workgroup values. `num_values` is the number of elements that the
+ /// expression will be used to zero. Returns the expression.
+ using BuildZeroingExpr = std::function<Expression(uint32_t num_values)>;
+
+ /// BuildZeroingStatements() generates the statements required to zero
+ /// initialize the workgroup storage expression of type `ty`.
+ /// @param ty the expression type
+ /// @param get_expr a function that builds the AST nodes for the expression.
+ void BuildZeroingStatements(const sem::Type* ty, const BuildZeroingExpr& get_expr) {
+ if (CanTriviallyZero(ty)) {
+ auto var = get_expr(1u);
+ auto* zero_init = b.Construct(CreateASTTypeFor(ctx, ty));
+ statements.emplace_back(
+ Statement{b.Assign(var.expr, zero_init), var.num_iterations, var.array_indices});
+ return;
+ }
- if (auto* str = ty->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- auto name = ctx.Clone(member->Declaration()->symbol);
- BuildZeroingStatements(member->Type(), [&](uint32_t num_values) {
- auto s = get_expr(num_values);
- return Expression{b.MemberAccessor(s.expr, name), s.num_iterations,
- s.array_indices};
- });
- }
- return;
- }
+ if (auto* atomic = ty->As<sem::Atomic>()) {
+ auto* zero_init = b.Construct(CreateASTTypeFor(ctx, atomic->Type()));
+ auto expr = get_expr(1u);
+ auto* store = b.Call("atomicStore", b.AddressOf(expr.expr), zero_init);
+ statements.emplace_back(
+ Statement{b.CallStmt(store), expr.num_iterations, expr.array_indices});
+ return;
+ }
+
+ if (auto* str = ty->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ auto name = ctx.Clone(member->Declaration()->symbol);
+ BuildZeroingStatements(member->Type(), [&](uint32_t num_values) {
+ auto s = get_expr(num_values);
+ return Expression{b.MemberAccessor(s.expr, name), s.num_iterations,
+ s.array_indices};
+ });
+ }
+ return;
+ }
+
+ if (auto* arr = ty->As<sem::Array>()) {
+ BuildZeroingStatements(arr->ElemType(), [&](uint32_t num_values) {
+ // num_values is the number of values to zero for the element type.
+ // The number of iterations required to zero the array and its elements
+ // is:
+ // `num_values * arr->Count()`
+ // The index for this array is:
+ // `(idx % modulo) / division`
+ auto modulo = num_values * arr->Count();
+ auto division = num_values;
+ auto a = get_expr(modulo);
+ auto array_indices = a.array_indices;
+ array_indices.add(ArrayIndex{modulo, division});
+ auto index = utils::GetOrCreate(array_index_names, ArrayIndex{modulo, division},
+ [&] { return b.Symbols().New("i"); });
+ return Expression{b.IndexAccessor(a.expr, index), a.num_iterations, array_indices};
+ });
+ return;
+ }
- if (auto* arr = ty->As<sem::Array>()) {
- BuildZeroingStatements(arr->ElemType(), [&](uint32_t num_values) {
- // num_values is the number of values to zero for the element type.
- // The number of iterations required to zero the array and its elements
- // is:
- // `num_values * arr->Count()`
- // The index for this array is:
- // `(idx % modulo) / division`
- auto modulo = num_values * arr->Count();
- auto division = num_values;
- auto a = get_expr(modulo);
- auto array_indices = a.array_indices;
- array_indices.add(ArrayIndex{modulo, division});
- auto index =
- utils::GetOrCreate(array_index_names, ArrayIndex{modulo, division},
- [&] { return b.Symbols().New("i"); });
- return Expression{b.IndexAccessor(a.expr, index), a.num_iterations,
- array_indices};
- });
- return;
+ TINT_UNREACHABLE(Transform, b.Diagnostics())
+ << "could not zero workgroup type: " << ty->FriendlyName(ctx.src->Symbols());
}
- TINT_UNREACHABLE(Transform, b.Diagnostics())
- << "could not zero workgroup type: "
- << ty->FriendlyName(ctx.src->Symbols());
- }
-
- /// DeclareArrayIndices returns a list of statements that contain the `let`
- /// declarations for all of the ArrayIndices.
- /// @param num_iterations the number of iterations for the block
- /// @param array_indices the list of array indices to generate `let`
- /// declarations for
- /// @param iteration a function that returns the index of the current
- /// iteration.
- /// @returns the list of `let` statements that declare the array indices
- ast::StatementList DeclareArrayIndices(
- uint32_t num_iterations,
- const ArrayIndices& array_indices,
- const std::function<const ast::Expression*()>& iteration) {
- ast::StatementList stmts;
- std::map<Symbol, ArrayIndex> indices_by_name;
- for (auto index : array_indices) {
- auto name = array_index_names.at(index);
- auto* mod =
- (num_iterations > index.modulo)
- ? b.create<ast::BinaryExpression>(
- ast::BinaryOp::kModulo, iteration(), b.Expr(index.modulo))
- : iteration();
- auto* div = (index.division != 1u) ? b.Div(mod, index.division) : mod;
- auto* decl = b.Decl(b.Const(name, b.ty.u32(), div));
- stmts.emplace_back(decl);
+ /// DeclareArrayIndices returns a list of statements that contain the `let`
+ /// declarations for all of the ArrayIndices.
+ /// @param num_iterations the number of iterations for the block
+ /// @param array_indices the list of array indices to generate `let`
+ /// declarations for
+ /// @param iteration a function that returns the index of the current
+ /// iteration.
+ /// @returns the list of `let` statements that declare the array indices
+ ast::StatementList DeclareArrayIndices(
+ uint32_t num_iterations,
+ const ArrayIndices& array_indices,
+ const std::function<const ast::Expression*()>& iteration) {
+ ast::StatementList stmts;
+ std::map<Symbol, ArrayIndex> indices_by_name;
+ for (auto index : array_indices) {
+ auto name = array_index_names.at(index);
+ auto* mod = (num_iterations > index.modulo)
+ ? b.create<ast::BinaryExpression>(ast::BinaryOp::kModulo, iteration(),
+ b.Expr(u32(index.modulo)))
+ : iteration();
+ auto* div = (index.division != 1u) ? b.Div(mod, u32(index.division)) : mod;
+ auto* decl = b.Decl(b.Let(name, b.ty.u32(), div));
+ stmts.emplace_back(decl);
+ }
+ return stmts;
}
- return stmts;
- }
-
- /// CalculateWorkgroupSize initializes the members #workgroup_size_const and
- /// #workgroup_size_expr with the linear workgroup size.
- /// @param attr the workgroup attribute applied to the entry point function
- void CalculateWorkgroupSize(const ast::WorkgroupAttribute* attr) {
- bool is_signed = false;
- workgroup_size_const = 1u;
- workgroup_size_expr = nullptr;
- for (auto* expr : attr->Values()) {
- if (!expr) {
- continue;
- }
- auto* sem = ctx.src->Sem().Get(expr);
- if (auto c = sem->ConstantValue()) {
- if (c.ElementType()->Is<sem::I32>()) {
- workgroup_size_const *= static_cast<uint32_t>(c.Elements()[0].i32);
- continue;
- } else if (c.ElementType()->Is<sem::U32>()) {
- workgroup_size_const *= c.Elements()[0].u32;
- continue;
+
+ /// CalculateWorkgroupSize initializes the members #workgroup_size_const and
+ /// #workgroup_size_expr with the linear workgroup size.
+ /// @param attr the workgroup attribute applied to the entry point function
+ void CalculateWorkgroupSize(const ast::WorkgroupAttribute* attr) {
+ bool is_signed = false;
+ workgroup_size_const = 1u;
+ workgroup_size_expr = nullptr;
+ for (auto* expr : attr->Values()) {
+ if (!expr) {
+ continue;
+ }
+ auto* sem = ctx.src->Sem().Get(expr);
+ if (auto c = sem->ConstantValue()) {
+ workgroup_size_const *= c.Element<AInt>(0).value;
+ continue;
+ }
+ // Constant value could not be found. Build expression instead.
+ workgroup_size_expr = [this, expr, size = workgroup_size_expr] {
+ auto* e = ctx.Clone(expr);
+ if (ctx.src->TypeOf(expr)->UnwrapRef()->Is<sem::I32>()) {
+ e = b.Construct<u32>(e);
+ }
+ return size ? b.Mul(size(), e) : e;
+ };
}
- }
- // Constant value could not be found. Build expression instead.
- workgroup_size_expr = [this, expr, size = workgroup_size_expr] {
- auto* e = ctx.Clone(expr);
- if (ctx.src->TypeOf(expr)->UnwrapRef()->Is<sem::I32>()) {
- e = b.Construct<ProgramBuilder::u32>(e);
+ if (workgroup_size_expr) {
+ if (workgroup_size_const != 1) {
+ // Fold workgroup_size_const in to workgroup_size_expr
+ workgroup_size_expr = [this, is_signed, const_size = workgroup_size_const,
+ expr_size = workgroup_size_expr] {
+ return is_signed ? b.Mul(expr_size(), i32(const_size))
+ : b.Mul(expr_size(), u32(const_size));
+ };
+ }
+ // Indicate that workgroup_size_expr should be used instead of the
+ // constant.
+ workgroup_size_const = 0;
}
- return size ? b.Mul(size(), e) : e;
- };
}
- if (workgroup_size_expr) {
- if (workgroup_size_const != 1) {
- // Fold workgroup_size_const in to workgroup_size_expr
- workgroup_size_expr = [this, is_signed,
- const_size = workgroup_size_const,
- expr_size = workgroup_size_expr] {
- return is_signed
- ? b.Mul(expr_size(), static_cast<int32_t>(const_size))
- : b.Mul(expr_size(), const_size);
- };
- }
- // Indicate that workgroup_size_expr should be used instead of the
- // constant.
- workgroup_size_const = 0;
- }
- }
-
- /// @returns true if a variable with store type `ty` can be efficiently zeroed
- /// by assignment of a type constructor without operands. If
- /// CanTriviallyZero() returns false, then the type needs to be
- /// initialized by decomposing the initialization into multiple
- /// sub-initializations.
- /// @param ty the type to inspect
- bool CanTriviallyZero(const sem::Type* ty) {
- if (ty->Is<sem::Atomic>()) {
- return false;
- }
- if (auto* str = ty->As<sem::Struct>()) {
- for (auto* member : str->Members()) {
- if (!CanTriviallyZero(member->Type())) {
- return false;
+
+ /// @returns true if a variable with store type `ty` can be efficiently zeroed
+ /// by assignment of a type constructor without operands. If
+ /// CanTriviallyZero() returns false, then the type needs to be
+ /// initialized by decomposing the initialization into multiple
+ /// sub-initializations.
+ /// @param ty the type to inspect
+ bool CanTriviallyZero(const sem::Type* ty) {
+ if (ty->Is<sem::Atomic>()) {
+ return false;
}
- }
- }
- if (ty->Is<sem::Array>()) {
- return false;
+ if (auto* str = ty->As<sem::Struct>()) {
+ for (auto* member : str->Members()) {
+ if (!CanTriviallyZero(member->Type())) {
+ return false;
+ }
+ }
+ }
+ if (ty->Is<sem::Array>()) {
+ return false;
+ }
+ // True for all other storable types
+ return true;
}
- // True for all other storable types
- return true;
- }
};
ZeroInitWorkgroupMemory::ZeroInitWorkgroupMemory() = default;
ZeroInitWorkgroupMemory::~ZeroInitWorkgroupMemory() = default;
-bool ZeroInitWorkgroupMemory::ShouldRun(const Program* program,
- const DataMap&) const {
- for (auto* decl : program->AST().GlobalDeclarations()) {
- if (auto* var = decl->As<ast::Variable>()) {
- if (var->declared_storage_class == ast::StorageClass::kWorkgroup) {
- return true;
- }
+bool ZeroInitWorkgroupMemory::ShouldRun(const Program* program, const DataMap&) const {
+ for (auto* decl : program->AST().GlobalDeclarations()) {
+ if (auto* var = decl->As<ast::Variable>()) {
+ if (var->declared_storage_class == ast::StorageClass::kWorkgroup) {
+ return true;
+ }
+ }
}
- }
- return false;
+ return false;
}
-void ZeroInitWorkgroupMemory::Run(CloneContext& ctx,
- const DataMap&,
- DataMap&) const {
- for (auto* fn : ctx.src->AST().Functions()) {
- if (fn->PipelineStage() == ast::PipelineStage::kCompute) {
- State{ctx}.Run(fn);
+void ZeroInitWorkgroupMemory::Run(CloneContext& ctx, const DataMap&, DataMap&) const {
+ for (auto* fn : ctx.src->AST().Functions()) {
+ if (fn->PipelineStage() == ast::PipelineStage::kCompute) {
+ State{ctx}.Run(fn);
+ }
}
- }
- ctx.Clone();
+ ctx.Clone();
}
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.h b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.h
index 33ae52ce7bf..c75772553d7 100644
--- a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.h
+++ b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory.h
@@ -22,34 +22,30 @@ namespace tint::transform {
/// ZeroInitWorkgroupMemory is a transform that injects code at the top of entry
/// points to zero-initialize workgroup memory used by that entry point (and all
/// transitive functions called by that entry point)
-class ZeroInitWorkgroupMemory
- : public Castable<ZeroInitWorkgroupMemory, Transform> {
- public:
- /// Constructor
- ZeroInitWorkgroupMemory();
-
- /// Destructor
- ~ZeroInitWorkgroupMemory() override;
-
- /// @param program the program to inspect
- /// @param data optional extra transform-specific input data
- /// @returns true if this transform should be run for the given program
- bool ShouldRun(const Program* program,
- const DataMap& data = {}) const override;
-
- protected:
- /// Runs the transform using the CloneContext built for transforming a
- /// program. Run() is responsible for calling Clone() on the CloneContext.
- /// @param ctx the CloneContext primed with the input program and
- /// ProgramBuilder
- /// @param inputs optional extra transform-specific input data
- /// @param outputs optional extra transform-specific output data
- void Run(CloneContext& ctx,
- const DataMap& inputs,
- DataMap& outputs) const override;
-
- private:
- struct State;
+class ZeroInitWorkgroupMemory : public Castable<ZeroInitWorkgroupMemory, Transform> {
+ public:
+ /// Constructor
+ ZeroInitWorkgroupMemory();
+
+ /// Destructor
+ ~ZeroInitWorkgroupMemory() override;
+
+ /// @param program the program to inspect
+ /// @param data optional extra transform-specific input data
+ /// @returns true if this transform should be run for the given program
+ bool ShouldRun(const Program* program, const DataMap& data = {}) const override;
+
+ protected:
+ /// Runs the transform using the CloneContext built for transforming a
+ /// program. Run() is responsible for calling Clone() on the CloneContext.
+ /// @param ctx the CloneContext primed with the input program and
+ /// ProgramBuilder
+ /// @param inputs optional extra transform-specific input data
+ /// @param outputs optional extra transform-specific output data
+ void Run(CloneContext& ctx, const DataMap& inputs, DataMap& outputs) const override;
+
+ private:
+ struct State;
};
} // namespace tint::transform
diff --git a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory_test.cc b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory_test.cc
index 32c73db1e60..93f3933f41e 100644
--- a/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory_test.cc
+++ b/chromium/third_party/dawn/src/tint/transform/zero_init_workgroup_memory_test.cc
@@ -24,53 +24,53 @@ namespace {
using ZeroInitWorkgroupMemoryTest = TransformTest;
TEST_F(ZeroInitWorkgroupMemoryTest, ShouldRunEmptyModule) {
- auto* src = R"()";
+ auto* src = R"()";
- EXPECT_FALSE(ShouldRun<ZeroInitWorkgroupMemory>(src));
+ EXPECT_FALSE(ShouldRun<ZeroInitWorkgroupMemory>(src));
}
TEST_F(ZeroInitWorkgroupMemoryTest, ShouldRunHasNoWorkgroupVars) {
- auto* src = R"(
+ auto* src = R"(
var<private> v : i32;
)";
- EXPECT_FALSE(ShouldRun<ZeroInitWorkgroupMemory>(src));
+ EXPECT_FALSE(ShouldRun<ZeroInitWorkgroupMemory>(src));
}
TEST_F(ZeroInitWorkgroupMemoryTest, ShouldRunHasWorkgroupVars) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> a : i32;
)";
- EXPECT_TRUE(ShouldRun<ZeroInitWorkgroupMemory>(src));
+ EXPECT_TRUE(ShouldRun<ZeroInitWorkgroupMemory>(src));
}
TEST_F(ZeroInitWorkgroupMemoryTest, EmptyModule) {
- auto* src = "";
- auto* expect = src;
+ auto* src = "";
+ auto* expect = src;
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, NoWorkgroupVars) {
- auto* src = R"(
+ auto* src = R"(
var<private> v : i32;
fn f() {
v = 1;
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, UnreferencedWorkgroupVars) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> a : i32;
var<workgroup> b : i32;
@@ -81,20 +81,20 @@ fn unreferenced() {
b = c;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
}
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, UnreferencedWorkgroupVars_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
}
@@ -108,26 +108,26 @@ var<workgroup> b : i32;
var<workgroup> c : i32;
)";
- auto* expect = src;
+ auto* expect = src;
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_ExistingLocalIndex) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> v : i32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = v; // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> v : i32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
v = i32();
@@ -137,23 +137,22 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- SingleWorkgroupVar_ExistingLocalIndex_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_ExistingLocalIndex_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = v; // Initialization should be inserted above this statement
}
var<workgroup> v : i32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
v = i32();
@@ -165,26 +164,25 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
var<workgroup> v : i32;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- SingleWorkgroupVar_ExistingLocalIndexInStruct) {
- auto* src = R"(
+TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_ExistingLocalIndexInStruct) {
+ auto* src = R"(
var<workgroup> v : i32;
struct Params {
@builtin(local_invocation_index) local_idx : u32,
};
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(params : Params) {
_ = v; // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> v : i32;
struct Params {
@@ -192,7 +190,7 @@ struct Params {
local_idx : u32,
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(params : Params) {
{
v = i32();
@@ -202,15 +200,14 @@ fn f(params : Params) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- SingleWorkgroupVar_ExistingLocalIndexInStruct_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_ExistingLocalIndexInStruct_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f(params : Params) {
_ = v; // Initialization should be inserted above this statement
}
@@ -221,8 +218,8 @@ struct Params {
var<workgroup> v : i32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(params : Params) {
{
v = i32();
@@ -239,24 +236,24 @@ struct Params {
var<workgroup> v : i32;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_InjectedLocalIndex) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> v : i32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
_ = v; // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> v : i32;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
v = i32();
@@ -266,23 +263,22 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- SingleWorkgroupVar_InjectedLocalIndex_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, SingleWorkgroupVar_InjectedLocalIndex_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
_ = v; // Initialization should be inserted above this statement
}
var<workgroup> v : i32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
v = i32();
@@ -294,14 +290,13 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
var<workgroup> v : i32;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_ExistingLocalIndex_Size1) {
- auto* src = R"(
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_ExistingLocalIndex_Size1) {
+ auto* src = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -313,14 +308,14 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = a; // Initialization should be inserted above this statement
_ = b;
_ = c;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -332,7 +327,7 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
a = i32();
@@ -358,15 +353,14 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_ExistingLocalIndex_Size1_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_ExistingLocalIndex_Size1_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = a; // Initialization should be inserted above this statement
_ = b;
@@ -384,8 +378,8 @@ struct S {
y : array<i32, 8>,
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
a = i32();
@@ -422,14 +416,13 @@ struct S {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_ExistingLocalIndex_Size_2_3) {
- auto* src = R"(
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_ExistingLocalIndex_Size_2_3) {
+ auto* src = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -441,14 +434,14 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(2, 3)
+@compute @workgroup_size(2, 3)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = a; // Initialization should be inserted above this statement
_ = b;
_ = c;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -460,7 +453,7 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(2, 3)
+@compute @workgroup_size(2, 3)
fn f(@builtin(local_invocation_index) local_idx : u32) {
if ((local_idx < 1u)) {
a = i32();
@@ -486,14 +479,13 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_ExistingLocalIndex_Size_2_3_X) {
- auto* src = R"(
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_ExistingLocalIndex_Size_2_3_X) {
+ auto* src = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -507,15 +499,15 @@ var<workgroup> c : array<S, 32>;
@id(1) override X : i32;
-@stage(compute) @workgroup_size(2, 3, X)
+@compute @workgroup_size(2, 3, X)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = a; // Initialization should be inserted above this statement
_ = b;
_ = c;
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -529,7 +521,7 @@ var<workgroup> c : array<S, 32>;
@id(1) override X : i32;
-@stage(compute) @workgroup_size(2, 3, X)
+@compute @workgroup_size(2, 3, X)
fn f(@builtin(local_invocation_index) local_idx : u32) {
for(var idx : u32 = local_idx; (idx < 1u); idx = (idx + (u32(X) * 6u))) {
a = i32();
@@ -555,14 +547,13 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_ExistingLocalIndex_Size_5u_X_10u) {
- auto* src = R"(
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_ExistingLocalIndex_Size_5u_X_10u) {
+ auto* src = R"(
struct S {
x : array<array<i32, 8>, 10>,
y : array<i32, 8>,
@@ -577,15 +568,15 @@ var<workgroup> c : array<S, 32>;
@id(1) override X : u32;
-@stage(compute) @workgroup_size(5u, X, 10u)
+@compute @workgroup_size(5u, X, 10u)
fn f(@builtin(local_invocation_index) local_idx : u32) {
_ = a; // Initialization should be inserted above this statement
_ = b;
_ = c;
}
)";
- auto* expect =
- R"(
+ auto* expect =
+ R"(
struct S {
x : array<array<i32, 8>, 10>,
y : array<i32, 8>,
@@ -600,7 +591,7 @@ var<workgroup> c : array<S, 32>;
@id(1) override X : u32;
-@stage(compute) @workgroup_size(5u, X, 10u)
+@compute @workgroup_size(5u, X, 10u)
fn f(@builtin(local_invocation_index) local_idx : u32) {
for(var idx : u32 = local_idx; (idx < 1u); idx = (idx + (X * 50u))) {
a = i32();
@@ -645,13 +636,13 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_InjectedLocalIndex) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -663,14 +654,14 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_id) local_invocation_id : vec3<u32>) {
_ = a; // Initialization should be inserted above this statement
_ = b;
_ = c;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -682,7 +673,7 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(local_invocation_index) local_invocation_index : u32) {
{
a = i32();
@@ -708,15 +699,14 @@ fn f(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(loc
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_InjectedLocalIndex_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_InjectedLocalIndex_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_id) local_invocation_id : vec3<u32>) {
_ = a; // Initialization should be inserted above this statement
_ = b;
@@ -734,8 +724,8 @@ struct S {
y : array<i32, 8>,
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(local_invocation_index) local_invocation_index : u32) {
{
a = i32();
@@ -772,13 +762,13 @@ struct S {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_MultipleEntryPoints) {
- auto* src = R"(
+ auto* src = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -790,24 +780,24 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f1() {
_ = a; // Initialization should be inserted above this statement
_ = c;
}
-@stage(compute) @workgroup_size(1, 2, 3)
+@compute @workgroup_size(1, 2, 3)
fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>) {
_ = b; // Initialization should be inserted above this statement
}
-@stage(compute) @workgroup_size(4, 5, 6)
+@compute @workgroup_size(4, 5, 6)
fn f3() {
_ = c; // Initialization should be inserted above this statement
_ = a;
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
x : i32,
y : array<i32, 8>,
@@ -819,7 +809,7 @@ var<workgroup> b : S;
var<workgroup> c : array<S, 32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f1(@builtin(local_invocation_index) local_invocation_index : u32) {
{
a = i32();
@@ -838,7 +828,7 @@ fn f1(@builtin(local_invocation_index) local_invocation_index : u32) {
_ = c;
}
-@stage(compute) @workgroup_size(1, 2, 3)
+@compute @workgroup_size(1, 2, 3)
fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(local_invocation_index) local_invocation_index_1 : u32) {
if ((local_invocation_index_1 < 1u)) {
b.x = i32();
@@ -851,7 +841,7 @@ fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(lo
_ = b;
}
-@stage(compute) @workgroup_size(4, 5, 6)
+@compute @workgroup_size(4, 5, 6)
fn f3(@builtin(local_invocation_index) local_invocation_index_2 : u32) {
if ((local_invocation_index_2 < 1u)) {
a = i32();
@@ -871,26 +861,25 @@ fn f3(@builtin(local_invocation_index) local_invocation_index_2 : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- MultipleWorkgroupVar_MultipleEntryPoints_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, MultipleWorkgroupVar_MultipleEntryPoints_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f1() {
_ = a; // Initialization should be inserted above this statement
_ = c;
}
-@stage(compute) @workgroup_size(1, 2, 3)
+@compute @workgroup_size(1, 2, 3)
fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>) {
_ = b; // Initialization should be inserted above this statement
}
-@stage(compute) @workgroup_size(4, 5, 6)
+@compute @workgroup_size(4, 5, 6)
fn f3() {
_ = c; // Initialization should be inserted above this statement
_ = a;
@@ -907,8 +896,8 @@ struct S {
y : array<i32, 8>,
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f1(@builtin(local_invocation_index) local_invocation_index : u32) {
{
a = i32();
@@ -927,7 +916,7 @@ fn f1(@builtin(local_invocation_index) local_invocation_index : u32) {
_ = c;
}
-@stage(compute) @workgroup_size(1, 2, 3)
+@compute @workgroup_size(1, 2, 3)
fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(local_invocation_index) local_invocation_index_1 : u32) {
if ((local_invocation_index_1 < 1u)) {
b.x = i32();
@@ -940,7 +929,7 @@ fn f2(@builtin(local_invocation_id) local_invocation_id : vec3<u32>, @builtin(lo
_ = b;
}
-@stage(compute) @workgroup_size(4, 5, 6)
+@compute @workgroup_size(4, 5, 6)
fn f3(@builtin(local_invocation_index) local_invocation_index_2 : u32) {
if ((local_invocation_index_2 < 1u)) {
a = i32();
@@ -971,13 +960,13 @@ struct S {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, TransitiveUsage) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> v : i32;
fn use_v() {
@@ -988,12 +977,12 @@ fn call_use_v() {
use_v();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
call_use_v(); // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> v : i32;
fn use_v() {
@@ -1004,7 +993,7 @@ fn call_use_v() {
use_v();
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
v = i32();
@@ -1014,14 +1003,14 @@ fn f(@builtin(local_invocation_index) local_idx : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, TransitiveUsage_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
call_use_v(); // Initialization should be inserted above this statement
}
@@ -1036,8 +1025,8 @@ fn use_v() {
var<workgroup> v : i32;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_idx : u32) {
{
v = i32();
@@ -1057,28 +1046,28 @@ fn use_v() {
var<workgroup> v : i32;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupAtomics) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> i : atomic<i32>;
var<workgroup> u : atomic<u32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
atomicLoad(&(i)); // Initialization should be inserted above this statement
atomicLoad(&(u));
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> i : atomic<i32>;
var<workgroup> u : atomic<u32>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
atomicStore(&(i), i32());
@@ -1090,14 +1079,14 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
atomicLoad(&(i)); // Initialization should be inserted above this statement
atomicLoad(&(u));
@@ -1106,8 +1095,8 @@ fn f() {
var<workgroup> i : atomic<i32>;
var<workgroup> u : atomic<u32>;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
atomicStore(&(i), i32());
@@ -1123,13 +1112,13 @@ var<workgroup> i : atomic<i32>;
var<workgroup> u : atomic<u32>;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupStructOfAtomics) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : i32,
i : atomic<i32>,
@@ -1140,12 +1129,12 @@ struct S {
var<workgroup> w : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
_ = w.a; // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : i32,
i : atomic<i32>,
@@ -1156,7 +1145,7 @@ struct S {
var<workgroup> w : S;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
w.a = i32();
@@ -1170,14 +1159,14 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupStructOfAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
_ = w.a; // Initialization should be inserted above this statement
}
@@ -1192,8 +1181,8 @@ struct S {
c : u32,
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
{
w.a = i32();
@@ -1217,24 +1206,24 @@ struct S {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupArrayOfAtomics) {
- auto* src = R"(
+ auto* src = R"(
var<workgroup> w : array<atomic<u32>, 4>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
atomicLoad(&w[0]); // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
var<workgroup> w : array<atomic<u32>, 4>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
for(var idx : u32 = local_invocation_index; (idx < 4u); idx = (idx + 1u)) {
let i : u32 = idx;
@@ -1245,22 +1234,22 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupArrayOfAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
atomicLoad(&w[0]); // Initialization should be inserted above this statement
}
var<workgroup> w : array<atomic<u32>, 4>;
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
for(var idx : u32 = local_invocation_index; (idx < 4u); idx = (idx + 1u)) {
let i : u32 = idx;
@@ -1273,13 +1262,13 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
var<workgroup> w : array<atomic<u32>, 4>;
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupArrayOfStructOfAtomics) {
- auto* src = R"(
+ auto* src = R"(
struct S {
a : i32,
i : atomic<i32>,
@@ -1290,12 +1279,12 @@ struct S {
var<workgroup> w : array<S, 4>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f() {
_ = w[0].a; // Initialization should be inserted above this statement
}
)";
- auto* expect = R"(
+ auto* expect = R"(
struct S {
a : i32,
i : atomic<i32>,
@@ -1306,7 +1295,7 @@ struct S {
var<workgroup> w : array<S, 4>;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
for(var idx : u32 = local_invocation_index; (idx < 4u); idx = (idx + 1u)) {
let i_1 : u32 = idx;
@@ -1321,15 +1310,14 @@ fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
-TEST_F(ZeroInitWorkgroupMemoryTest,
- WorkgroupArrayOfStructOfAtomics_OutOfOrder) {
- auto* src = R"(
-@stage(compute) @workgroup_size(1)
+TEST_F(ZeroInitWorkgroupMemoryTest, WorkgroupArrayOfStructOfAtomics_OutOfOrder) {
+ auto* src = R"(
+@compute @workgroup_size(1)
fn f() {
_ = w[0].a; // Initialization should be inserted above this statement
}
@@ -1344,8 +1332,8 @@ struct S {
c : u32,
};
)";
- auto* expect = R"(
-@stage(compute) @workgroup_size(1)
+ auto* expect = R"(
+@compute @workgroup_size(1)
fn f(@builtin(local_invocation_index) local_invocation_index : u32) {
for(var idx : u32 = local_invocation_index; (idx < 4u); idx = (idx + 1u)) {
let i_1 : u32 = idx;
@@ -1370,9 +1358,9 @@ struct S {
}
)";
- auto got = Run<ZeroInitWorkgroupMemory>(src);
+ auto got = Run<ZeroInitWorkgroupMemory>(src);
- EXPECT_EQ(expect, str(got));
+ EXPECT_EQ(expect, str(got));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/bitcast.h b/chromium/third_party/dawn/src/tint/utils/bitcast.h
new file mode 100644
index 00000000000..4450336fbd3
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/utils/bitcast.h
@@ -0,0 +1,39 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_UTILS_BITCAST_H_
+#define SRC_TINT_UTILS_BITCAST_H_
+
+#include <cstring>
+
+namespace tint::utils {
+
+/// Bitcast performs a cast of `from` to the `TO` type using a memcpy.
+/// This unsafe cast avoids triggering Clang's Control Flow Integrity checks.
+/// See: crbug.com/dawn/1406
+/// See: https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking
+/// @param from the value to cast
+/// @tparam TO the value to cast to
+/// @returns the cast value
+template <typename TO, typename FROM>
+inline TO Bitcast(FROM&& from) {
+ static_assert(sizeof(FROM) == sizeof(TO));
+ TO to;
+ memcpy(&to, &from, sizeof(TO));
+ return to;
+}
+
+} // namespace tint::utils
+
+#endif // SRC_TINT_UTILS_BITCAST_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/bitcast_test.cc b/chromium/third_party/dawn/src/tint/utils/bitcast_test.cc
new file mode 100644
index 00000000000..236489954bf
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/utils/bitcast_test.cc
@@ -0,0 +1,37 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/utils/bitcast.h"
+
+#include <stdint.h>
+
+#include "gtest/gtest.h"
+
+namespace tint::utils {
+namespace {
+
+TEST(Bitcast, Integer) {
+ uint32_t a = 123;
+ int32_t b = Bitcast<int32_t>(a);
+ EXPECT_EQ(a, static_cast<uint32_t>(b));
+}
+
+TEST(Bitcast, Pointer) {
+ uint32_t a = 123;
+ void* b = Bitcast<void*>(&a);
+ EXPECT_EQ(&a, static_cast<uint32_t*>(b));
+}
+
+} // namespace
+} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/block_allocator.h b/chromium/third_party/dawn/src/tint/utils/block_allocator.h
index 4823fae621d..f4139409854 100644
--- a/chromium/third_party/dawn/src/tint/utils/block_allocator.h
+++ b/chromium/third_party/dawn/src/tint/utils/block_allocator.h
@@ -16,8 +16,10 @@
#define SRC_TINT_UTILS_BLOCK_ALLOCATOR_H_
#include <array>
+#include <cstring>
#include <utility>
+#include "src/tint/utils/bitcast.h"
#include "src/tint/utils/math.h"
namespace tint::utils {
@@ -28,265 +30,259 @@ namespace tint::utils {
/// objects are automatically destructed and freed.
///
/// Objects held by the BlockAllocator can be iterated over using a View.
-template <typename T,
- size_t BLOCK_SIZE = 64 * 1024,
- size_t BLOCK_ALIGNMENT = 16>
+template <typename T, size_t BLOCK_SIZE = 64 * 1024, size_t BLOCK_ALIGNMENT = 16>
class BlockAllocator {
- /// Pointers is a chunk of T* pointers, forming a linked list.
- /// The list of Pointers are used to maintain the list of allocated objects.
- /// Pointers are allocated out of the block memory.
- struct Pointers {
- static constexpr size_t kMax = 32;
- std::array<T*, kMax> ptrs;
- Pointers* next;
- };
-
- /// Block is linked list of memory blocks.
- /// Blocks are allocated out of heap memory.
- ///
- /// Note: We're not using std::aligned_storage here as this warns / errors
- /// on MSVC.
- struct alignas(BLOCK_ALIGNMENT) Block {
- uint8_t data[BLOCK_SIZE];
- Block* next;
- };
-
- // Forward declaration
- template <bool IS_CONST>
- class TView;
-
- /// An iterator for the objects owned by the BlockAllocator.
- template <bool IS_CONST>
- class TIterator {
- using PointerTy = std::conditional_t<IS_CONST, const T*, T*>;
-
- public:
- /// Equality operator
- /// @param other the iterator to compare this iterator to
- /// @returns true if this iterator is equal to other
- bool operator==(const TIterator& other) const {
- return ptrs == other.ptrs && idx == other.idx;
- }
+ /// Pointers is a chunk of T* pointers, forming a linked list.
+ /// The list of Pointers are used to maintain the list of allocated objects.
+ /// Pointers are allocated out of the block memory.
+ struct Pointers {
+ static constexpr size_t kMax = 32;
+ std::array<T*, kMax> ptrs;
+ Pointers* next;
+ };
+
+ /// Block is linked list of memory blocks.
+ /// Blocks are allocated out of heap memory.
+ ///
+ /// Note: We're not using std::aligned_storage here as this warns / errors
+ /// on MSVC.
+ struct alignas(BLOCK_ALIGNMENT) Block {
+ uint8_t data[BLOCK_SIZE];
+ Block* next;
+ };
+
+ // Forward declaration
+ template <bool IS_CONST>
+ class TView;
+
+ /// An iterator for the objects owned by the BlockAllocator.
+ template <bool IS_CONST>
+ class TIterator {
+ using PointerTy = std::conditional_t<IS_CONST, const T*, T*>;
+
+ public:
+ /// Equality operator
+ /// @param other the iterator to compare this iterator to
+ /// @returns true if this iterator is equal to other
+ bool operator==(const TIterator& other) const {
+ return ptrs == other.ptrs && idx == other.idx;
+ }
- /// Inequality operator
- /// @param other the iterator to compare this iterator to
- /// @returns true if this iterator is not equal to other
- bool operator!=(const TIterator& other) const { return !(*this == other); }
-
- /// Advances the iterator
- /// @returns this iterator
- TIterator& operator++() {
- if (ptrs != nullptr) {
- ++idx;
- if (idx == Pointers::kMax) {
- idx = 0;
- ptrs = ptrs->next;
+ /// Inequality operator
+ /// @param other the iterator to compare this iterator to
+ /// @returns true if this iterator is not equal to other
+ bool operator!=(const TIterator& other) const { return !(*this == other); }
+
+ /// Advances the iterator
+ /// @returns this iterator
+ TIterator& operator++() {
+ if (ptrs != nullptr) {
+ ++idx;
+ if (idx == Pointers::kMax) {
+ idx = 0;
+ ptrs = ptrs->next;
+ }
+ }
+ return *this;
}
- }
- return *this;
- }
- /// @returns the pointer to the object at the current iterator position
- PointerTy operator*() const { return ptrs ? ptrs->ptrs[idx] : nullptr; }
-
- private:
- friend TView<IS_CONST>; // Keep internal iterator impl private.
- explicit TIterator(const Pointers* p, size_t i) : ptrs(p), idx(i) {}
-
- const Pointers* ptrs;
- size_t idx;
- };
-
- /// View provides begin() and end() methods for looping over the objects
- /// owned by the BlockAllocator.
- template <bool IS_CONST>
- class TView {
- public:
- /// @returns an iterator to the beginning of the view
- TIterator<IS_CONST> begin() const {
- return TIterator<IS_CONST>{allocator_->pointers_.root, 0};
- }
+ /// @returns the pointer to the object at the current iterator position
+ PointerTy operator*() const { return ptrs ? ptrs->ptrs[idx] : nullptr; }
+
+ private:
+ friend TView<IS_CONST>; // Keep internal iterator impl private.
+ explicit TIterator(const Pointers* p, size_t i) : ptrs(p), idx(i) {}
+
+ const Pointers* ptrs;
+ size_t idx;
+ };
+
+ /// View provides begin() and end() methods for looping over the objects
+ /// owned by the BlockAllocator.
+ template <bool IS_CONST>
+ class TView {
+ public:
+ /// @returns an iterator to the beginning of the view
+ TIterator<IS_CONST> begin() const {
+ return TIterator<IS_CONST>{allocator_->pointers_.root, 0};
+ }
+
+ /// @returns an iterator to the end of the view
+ TIterator<IS_CONST> end() const {
+ return allocator_->pointers_.current_index >= Pointers::kMax
+ ? TIterator<IS_CONST>(nullptr, 0)
+ : TIterator<IS_CONST>(allocator_->pointers_.current,
+ allocator_->pointers_.current_index);
+ }
+
+ private:
+ friend BlockAllocator; // For BlockAllocator::operator View()
+ explicit TView(BlockAllocator const* allocator) : allocator_(allocator) {}
+ BlockAllocator const* const allocator_;
+ };
+
+ public:
+ /// An iterator type over the objects of the BlockAllocator
+ using Iterator = TIterator<false>;
+
+ /// An immutable iterator type over the objects of the BlockAllocator
+ using ConstIterator = TIterator<true>;
+
+ /// View provides begin() and end() methods for looping over the objects
+ /// owned by the BlockAllocator.
+ using View = TView<false>;
+
+ /// ConstView provides begin() and end() methods for looping over the objects
+ /// owned by the BlockAllocator.
+ using ConstView = TView<true>;
- /// @returns an iterator to the end of the view
- TIterator<IS_CONST> end() const {
- return allocator_->pointers_.current_index >= Pointers::kMax
- ? TIterator<IS_CONST>(nullptr, 0)
- : TIterator<IS_CONST>(allocator_->pointers_.current,
- allocator_->pointers_.current_index);
+ /// Constructor
+ BlockAllocator() = default;
+
+ /// Move constructor
+ /// @param rhs the BlockAllocator to move
+ BlockAllocator(BlockAllocator&& rhs) {
+ std::swap(block_, rhs.block_);
+ std::swap(pointers_, rhs.pointers_);
}
- private:
- friend BlockAllocator; // For BlockAllocator::operator View()
- explicit TView(BlockAllocator const* allocator) : allocator_(allocator) {}
- BlockAllocator const* const allocator_;
- };
-
- public:
- /// An iterator type over the objects of the BlockAllocator
- using Iterator = TIterator<false>;
-
- /// An immutable iterator type over the objects of the BlockAllocator
- using ConstIterator = TIterator<true>;
-
- /// View provides begin() and end() methods for looping over the objects
- /// owned by the BlockAllocator.
- using View = TView<false>;
-
- /// ConstView provides begin() and end() methods for looping over the objects
- /// owned by the BlockAllocator.
- using ConstView = TView<true>;
-
- /// Constructor
- BlockAllocator() = default;
-
- /// Move constructor
- /// @param rhs the BlockAllocator to move
- BlockAllocator(BlockAllocator&& rhs) {
- std::swap(block_, rhs.block_);
- std::swap(pointers_, rhs.pointers_);
- }
-
- /// Move assignment operator
- /// @param rhs the BlockAllocator to move
- /// @return this BlockAllocator
- BlockAllocator& operator=(BlockAllocator&& rhs) {
- if (this != &rhs) {
- Reset();
- std::swap(block_, rhs.block_);
- std::swap(pointers_, rhs.pointers_);
+ /// Move assignment operator
+ /// @param rhs the BlockAllocator to move
+ /// @return this BlockAllocator
+ BlockAllocator& operator=(BlockAllocator&& rhs) {
+ if (this != &rhs) {
+ Reset();
+ std::swap(block_, rhs.block_);
+ std::swap(pointers_, rhs.pointers_);
+ }
+ return *this;
}
- return *this;
- }
-
- /// Destructor
- ~BlockAllocator() { Reset(); }
-
- /// @return a View of all objects owned by this BlockAllocator
- View Objects() { return View(this); }
-
- /// @return a ConstView of all objects owned by this BlockAllocator
- ConstView Objects() const { return ConstView(this); }
-
- /// Creates a new `TYPE` owned by the BlockAllocator.
- /// When the BlockAllocator is destructed the object will be destructed and
- /// freed.
- /// @param args the arguments to pass to the type constructor
- /// @returns the pointer to the constructed object
- template <typename TYPE = T, typename... ARGS>
- TYPE* Create(ARGS&&... args) {
- static_assert(
- std::is_same<T, TYPE>::value || std::is_base_of<T, TYPE>::value,
- "TYPE does not derive from T");
- static_assert(
- std::is_same<T, TYPE>::value || std::has_virtual_destructor<T>::value,
- "TYPE requires a virtual destructor when calling Create() for a type "
- "that is not T");
-
- auto* ptr = Allocate<TYPE>();
- new (ptr) TYPE(std::forward<ARGS>(args)...);
- AddObjectPointer(ptr);
-
- return ptr;
- }
-
- /// Frees all allocations from the allocator.
- void Reset() {
- for (auto ptr : Objects()) {
- ptr->~T();
+
+ /// Destructor
+ ~BlockAllocator() { Reset(); }
+
+ /// @return a View of all objects owned by this BlockAllocator
+ View Objects() { return View(this); }
+
+ /// @return a ConstView of all objects owned by this BlockAllocator
+ ConstView Objects() const { return ConstView(this); }
+
+ /// Creates a new `TYPE` owned by the BlockAllocator.
+ /// When the BlockAllocator is destructed the object will be destructed and
+ /// freed.
+ /// @param args the arguments to pass to the type constructor
+ /// @returns the pointer to the constructed object
+ template <typename TYPE = T, typename... ARGS>
+ TYPE* Create(ARGS&&... args) {
+ static_assert(std::is_same<T, TYPE>::value || std::is_base_of<T, TYPE>::value,
+ "TYPE does not derive from T");
+ static_assert(std::is_same<T, TYPE>::value || std::has_virtual_destructor<T>::value,
+ "TYPE requires a virtual destructor when calling Create() for a type "
+ "that is not T");
+
+ auto* ptr = Allocate<TYPE>();
+ new (ptr) TYPE(std::forward<ARGS>(args)...);
+ AddObjectPointer(ptr);
+
+ return ptr;
}
- auto* block = block_.root;
- while (block != nullptr) {
- auto* next = block->next;
- delete block;
- block = next;
+
+ /// Frees all allocations from the allocator.
+ void Reset() {
+ for (auto ptr : Objects()) {
+ ptr->~T();
+ }
+ auto* block = block_.root;
+ while (block != nullptr) {
+ auto* next = block->next;
+ delete block;
+ block = next;
+ }
+ block_ = {};
+ pointers_ = {};
}
- block_ = {};
- pointers_ = {};
- }
-
- private:
- BlockAllocator(const BlockAllocator&) = delete;
- BlockAllocator& operator=(const BlockAllocator&) = delete;
-
- /// Allocates an instance of TYPE from the current block, or from a newly
- /// allocated block if the current block is full.
- template <typename TYPE>
- TYPE* Allocate() {
- static_assert(sizeof(TYPE) <= BLOCK_SIZE,
- "Cannot construct TYPE with size greater than BLOCK_SIZE");
- static_assert(alignof(TYPE) <= BLOCK_ALIGNMENT,
- "alignof(TYPE) is greater than ALIGNMENT");
-
- block_.current_offset =
- utils::RoundUp(alignof(TYPE), block_.current_offset);
- if (block_.current_offset + sizeof(TYPE) > BLOCK_SIZE) {
- // Allocate a new block from the heap
- auto* prev_block = block_.current;
- block_.current = new Block;
- if (!block_.current) {
- return nullptr; // out of memory
- }
- block_.current->next = nullptr;
- block_.current_offset = 0;
- if (prev_block) {
- prev_block->next = block_.current;
- } else {
- block_.root = block_.current;
- }
+
+ private:
+ BlockAllocator(const BlockAllocator&) = delete;
+ BlockAllocator& operator=(const BlockAllocator&) = delete;
+
+ /// Allocates an instance of TYPE from the current block, or from a newly
+ /// allocated block if the current block is full.
+ template <typename TYPE>
+ TYPE* Allocate() {
+ static_assert(sizeof(TYPE) <= BLOCK_SIZE,
+ "Cannot construct TYPE with size greater than BLOCK_SIZE");
+ static_assert(alignof(TYPE) <= BLOCK_ALIGNMENT, "alignof(TYPE) is greater than ALIGNMENT");
+
+ block_.current_offset = utils::RoundUp(alignof(TYPE), block_.current_offset);
+ if (block_.current_offset + sizeof(TYPE) > BLOCK_SIZE) {
+ // Allocate a new block from the heap
+ auto* prev_block = block_.current;
+ block_.current = new Block;
+ if (!block_.current) {
+ return nullptr; // out of memory
+ }
+ block_.current->next = nullptr;
+ block_.current_offset = 0;
+ if (prev_block) {
+ prev_block->next = block_.current;
+ } else {
+ block_.root = block_.current;
+ }
+ }
+
+ auto* base = &block_.current->data[0];
+ auto* ptr = utils::Bitcast<TYPE*>(base + block_.current_offset);
+ block_.current_offset += sizeof(TYPE);
+ return ptr;
}
- auto* base = &block_.current->data[0];
- auto* ptr = reinterpret_cast<TYPE*>(base + block_.current_offset);
- block_.current_offset += sizeof(TYPE);
- return ptr;
- }
-
- /// Adds `ptr` to the linked list of objects owned by this BlockAllocator.
- /// Once added, `ptr` will be tracked for destruction when the BlockAllocator
- /// is destructed.
- void AddObjectPointer(T* ptr) {
- if (pointers_.current_index >= Pointers::kMax) {
- auto* prev_pointers = pointers_.current;
- pointers_.current = Allocate<Pointers>();
- if (!pointers_.current) {
- return; // out of memory
- }
- pointers_.current->next = nullptr;
- pointers_.current_index = 0;
-
- if (prev_pointers) {
- prev_pointers->next = pointers_.current;
- } else {
- pointers_.root = pointers_.current;
- }
+ /// Adds `ptr` to the linked list of objects owned by this BlockAllocator.
+ /// Once added, `ptr` will be tracked for destruction when the BlockAllocator
+ /// is destructed.
+ void AddObjectPointer(T* ptr) {
+ if (pointers_.current_index >= Pointers::kMax) {
+ auto* prev_pointers = pointers_.current;
+ pointers_.current = Allocate<Pointers>();
+ if (!pointers_.current) {
+ return; // out of memory
+ }
+ pointers_.current->next = nullptr;
+ pointers_.current_index = 0;
+
+ if (prev_pointers) {
+ prev_pointers->next = pointers_.current;
+ } else {
+ pointers_.root = pointers_.current;
+ }
+ }
+
+ pointers_.current->ptrs[pointers_.current_index++] = ptr;
}
- pointers_.current->ptrs[pointers_.current_index++] = ptr;
- }
-
- struct {
- /// The root block of the block linked list
- Block* root = nullptr;
- /// The current (end) block of the blocked linked list.
- /// New allocations come from this block
- Block* current = nullptr;
- /// The byte offset in #current for the next allocation.
- /// Initialized with BLOCK_SIZE so that the first allocation triggers a
- /// block allocation.
- size_t current_offset = BLOCK_SIZE;
- } block_;
-
- struct {
- /// The root Pointers structure of the pointers linked list
- Pointers* root = nullptr;
- /// The current (end) Pointers structure of the pointers linked list.
- /// AddObjectPointer() adds to this structure.
- Pointers* current = nullptr;
- /// The array index in #current for the next append.
- /// Initialized with Pointers::kMax so that the first append triggers a
- /// allocation of the Pointers structure.
- size_t current_index = Pointers::kMax;
- } pointers_;
+ struct {
+ /// The root block of the block linked list
+ Block* root = nullptr;
+ /// The current (end) block of the blocked linked list.
+ /// New allocations come from this block
+ Block* current = nullptr;
+ /// The byte offset in #current for the next allocation.
+ /// Initialized with BLOCK_SIZE so that the first allocation triggers a
+ /// block allocation.
+ size_t current_offset = BLOCK_SIZE;
+ } block_;
+
+ struct {
+ /// The root Pointers structure of the pointers linked list
+ Pointers* root = nullptr;
+ /// The current (end) Pointers structure of the pointers linked list.
+ /// AddObjectPointer() adds to this structure.
+ Pointers* current = nullptr;
+ /// The array index in #current for the next append.
+ /// Initialized with Pointers::kMax so that the first append triggers a
+ /// allocation of the Pointers structure.
+ size_t current_index = Pointers::kMax;
+ } pointers_;
};
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/block_allocator_test.cc b/chromium/third_party/dawn/src/tint/utils/block_allocator_test.cc
index ace0e9f76dd..600019c4e9b 100644
--- a/chromium/third_party/dawn/src/tint/utils/block_allocator_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/block_allocator_test.cc
@@ -20,127 +20,125 @@ namespace tint::utils {
namespace {
struct LifetimeCounter {
- explicit LifetimeCounter(size_t* count) : count_(count) { (*count)++; }
- ~LifetimeCounter() { (*count_)--; }
+ explicit LifetimeCounter(size_t* count) : count_(count) { (*count)++; }
+ ~LifetimeCounter() { (*count_)--; }
- size_t* const count_;
+ size_t* const count_;
};
using BlockAllocatorTest = testing::Test;
TEST_F(BlockAllocatorTest, Empty) {
- using Allocator = BlockAllocator<int>;
+ using Allocator = BlockAllocator<int>;
- Allocator allocator;
+ Allocator allocator;
- for (int* i : allocator.Objects()) {
- (void)i;
- if ((true)) { // Workaround for "error: loop will run at most once"
- FAIL() << "BlockAllocator should be empty";
+ for (int* i : allocator.Objects()) {
+ (void)i;
+ if ((true)) { // Workaround for "error: loop will run at most once"
+ FAIL() << "BlockAllocator should be empty";
+ }
}
- }
- for (const int* i : static_cast<const Allocator&>(allocator).Objects()) {
- (void)i;
- if ((true)) { // Workaround for "error: loop will run at most once"
- FAIL() << "BlockAllocator should be empty";
+ for (const int* i : static_cast<const Allocator&>(allocator).Objects()) {
+ (void)i;
+ if ((true)) { // Workaround for "error: loop will run at most once"
+ FAIL() << "BlockAllocator should be empty";
+ }
}
- }
}
TEST_F(BlockAllocatorTest, ObjectLifetime) {
- using Allocator = BlockAllocator<LifetimeCounter>;
+ using Allocator = BlockAllocator<LifetimeCounter>;
- size_t count = 0;
- {
- Allocator allocator;
+ size_t count = 0;
+ {
+ Allocator allocator;
+ EXPECT_EQ(count, 0u);
+ allocator.Create(&count);
+ EXPECT_EQ(count, 1u);
+ allocator.Create(&count);
+ EXPECT_EQ(count, 2u);
+ allocator.Create(&count);
+ EXPECT_EQ(count, 3u);
+ }
EXPECT_EQ(count, 0u);
- allocator.Create(&count);
- EXPECT_EQ(count, 1u);
- allocator.Create(&count);
- EXPECT_EQ(count, 2u);
- allocator.Create(&count);
- EXPECT_EQ(count, 3u);
- }
- EXPECT_EQ(count, 0u);
}
TEST_F(BlockAllocatorTest, MoveConstruct) {
- using Allocator = BlockAllocator<LifetimeCounter>;
-
- for (size_t n :
- {0, 1, 10, 16, 20, 32, 50, 64, 100, 256, 300, 512, 500, 512}) {
- size_t count = 0;
- {
- Allocator allocator_a;
- for (size_t i = 0; i < n; i++) {
- allocator_a.Create(&count);
- }
- EXPECT_EQ(count, n);
-
- Allocator allocator_b{std::move(allocator_a)};
- EXPECT_EQ(count, n);
+ using Allocator = BlockAllocator<LifetimeCounter>;
+
+ for (size_t n : {0, 1, 10, 16, 20, 32, 50, 64, 100, 256, 300, 512, 500, 512}) {
+ size_t count = 0;
+ {
+ Allocator allocator_a;
+ for (size_t i = 0; i < n; i++) {
+ allocator_a.Create(&count);
+ }
+ EXPECT_EQ(count, n);
+
+ Allocator allocator_b{std::move(allocator_a)};
+ EXPECT_EQ(count, n);
+ }
+
+ EXPECT_EQ(count, 0u);
}
-
- EXPECT_EQ(count, 0u);
- }
}
TEST_F(BlockAllocatorTest, MoveAssign) {
- using Allocator = BlockAllocator<LifetimeCounter>;
-
- for (size_t n :
- {0, 1, 10, 16, 20, 32, 50, 64, 100, 256, 300, 512, 500, 512}) {
- size_t count_a = 0;
- size_t count_b = 0;
-
- {
- Allocator allocator_a;
- for (size_t i = 0; i < n; i++) {
- allocator_a.Create(&count_a);
- }
- EXPECT_EQ(count_a, n);
-
- Allocator allocator_b;
- for (size_t i = 0; i < n; i++) {
- allocator_b.Create(&count_b);
- }
- EXPECT_EQ(count_b, n);
-
- allocator_b = std::move(allocator_a);
- EXPECT_EQ(count_a, n);
- EXPECT_EQ(count_b, 0u);
+ using Allocator = BlockAllocator<LifetimeCounter>;
+
+ for (size_t n : {0, 1, 10, 16, 20, 32, 50, 64, 100, 256, 300, 512, 500, 512}) {
+ size_t count_a = 0;
+ size_t count_b = 0;
+
+ {
+ Allocator allocator_a;
+ for (size_t i = 0; i < n; i++) {
+ allocator_a.Create(&count_a);
+ }
+ EXPECT_EQ(count_a, n);
+
+ Allocator allocator_b;
+ for (size_t i = 0; i < n; i++) {
+ allocator_b.Create(&count_b);
+ }
+ EXPECT_EQ(count_b, n);
+
+ allocator_b = std::move(allocator_a);
+ EXPECT_EQ(count_a, n);
+ EXPECT_EQ(count_b, 0u);
+ }
+
+ EXPECT_EQ(count_a, 0u);
+ EXPECT_EQ(count_b, 0u);
}
-
- EXPECT_EQ(count_a, 0u);
- EXPECT_EQ(count_b, 0u);
- }
}
TEST_F(BlockAllocatorTest, ObjectOrder) {
- using Allocator = BlockAllocator<int>;
-
- Allocator allocator;
- constexpr int N = 10000;
- for (int i = 0; i < N; i++) {
- allocator.Create(i);
- }
-
- {
- int i = 0;
- for (int* p : allocator.Objects()) {
- EXPECT_EQ(*p, i);
- i++;
+ using Allocator = BlockAllocator<int>;
+
+ Allocator allocator;
+ constexpr int N = 10000;
+ for (int i = 0; i < N; i++) {
+ allocator.Create(i);
}
- EXPECT_EQ(i, N);
- }
- {
- int i = 0;
- for (const int* p : static_cast<const Allocator&>(allocator).Objects()) {
- EXPECT_EQ(*p, i);
- i++;
+
+ {
+ int i = 0;
+ for (int* p : allocator.Objects()) {
+ EXPECT_EQ(*p, i);
+ i++;
+ }
+ EXPECT_EQ(i, N);
+ }
+ {
+ int i = 0;
+ for (const int* p : static_cast<const Allocator&>(allocator).Objects()) {
+ EXPECT_EQ(*p, i);
+ i++;
+ }
+ EXPECT_EQ(i, N);
}
- EXPECT_EQ(i, N);
- }
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/compiler_macros.h b/chromium/third_party/dawn/src/tint/utils/compiler_macros.h
new file mode 100644
index 00000000000..34965c672af
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/utils/compiler_macros.h
@@ -0,0 +1,82 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/utils/concat.h"
+
+#ifndef SRC_TINT_UTILS_COMPILER_MACROS_H_
+#define SRC_TINT_UTILS_COMPILER_MACROS_H_
+
+#define TINT_REQUIRE_SEMICOLON static_assert(true)
+
+#if defined(_MSC_VER)
+////////////////////////////////////////////////////////////////////////////////
+// MSVC
+////////////////////////////////////////////////////////////////////////////////
+#define TINT_DISABLE_WARNING_CONSTANT_OVERFLOW __pragma(warning(disable : 4756))
+#define TINT_DISABLE_WARNING_MAYBE_UNINITIALIZED /* currently no-op */
+#define TINT_DISABLE_WARNING_UNREACHABLE_CODE __pragma(warning(disable : 4702))
+
+// clang-format off
+#define TINT_BEGIN_DISABLE_WARNING(name) \
+ __pragma(warning(push)) \
+ TINT_CONCAT(TINT_DISABLE_WARNING_, name) \
+ TINT_REQUIRE_SEMICOLON
+#define TINT_END_DISABLE_WARNING(name) \
+ __pragma(warning(pop)) \
+ TINT_REQUIRE_SEMICOLON
+// clang-format on
+#elif defined(__clang__)
+////////////////////////////////////////////////////////////////////////////////
+// Clang
+////////////////////////////////////////////////////////////////////////////////
+#define TINT_DISABLE_WARNING_CONSTANT_OVERFLOW /* currently no-op */
+#define TINT_DISABLE_WARNING_MAYBE_UNINITIALIZED /* currently no-op */
+#define TINT_DISABLE_WARNING_UNREACHABLE_CODE /* currently no-op */
+
+// clang-format off
+#define TINT_BEGIN_DISABLE_WARNING(name) \
+ _Pragma("clang diagnostic push") \
+ TINT_CONCAT(TINT_DISABLE_WARNING_, name) \
+ TINT_REQUIRE_SEMICOLON
+#define TINT_END_DISABLE_WARNING(name) \
+ _Pragma("clang diagnostic pop") \
+ TINT_REQUIRE_SEMICOLON
+// clang-format on
+#elif defined(__GNUC__)
+////////////////////////////////////////////////////////////////////////////////
+// GCC
+////////////////////////////////////////////////////////////////////////////////
+#define TINT_DISABLE_WARNING_CONSTANT_OVERFLOW /* currently no-op */
+#define TINT_DISABLE_WARNING_MAYBE_UNINITIALIZED \
+ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+#define TINT_DISABLE_WARNING_UNREACHABLE_CODE /* currently no-op */
+
+// clang-format off
+#define TINT_BEGIN_DISABLE_WARNING(name) \
+ _Pragma("GCC diagnostic push") \
+ TINT_CONCAT(TINT_DISABLE_WARNING_, name) \
+ TINT_REQUIRE_SEMICOLON
+#define TINT_END_DISABLE_WARNING(name) \
+ _Pragma("GCC diagnostic pop") \
+ TINT_REQUIRE_SEMICOLON
+// clang-format on
+#else
+////////////////////////////////////////////////////////////////////////////////
+// Other
+////////////////////////////////////////////////////////////////////////////////
+#define TINT_BEGIN_DISABLE_WARNING(name) TINT_REQUIRE_SEMICOLON
+#define TINT_END_DISABLE_WARNING(name) TINT_REQUIRE_SEMICOLON
+#endif
+
+#endif // SRC_TINT_UTILS_COMPILER_MACROS_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/crc32.h b/chromium/third_party/dawn/src/tint/utils/crc32.h
index efe2f0e7a94..51236124de5 100644
--- a/chromium/third_party/dawn/src/tint/utils/crc32.h
+++ b/chromium/third_party/dawn/src/tint/utils/crc32.h
@@ -24,57 +24,50 @@ namespace tint::utils {
/// at compile time.
/// @see https://en.wikipedia.org/wiki/Cyclic_redundancy_check#CRC-32_algorithm
constexpr uint32_t CRC32(const char* s) {
- constexpr uint32_t kLUT[] = {
- 0, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
- 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
- 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
- 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
- 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
- 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
- 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
- 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
- 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
- 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
- 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
- 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
- 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
- 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
- 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
- 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
- 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
- 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
- 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
- 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
- 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
- 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
+ constexpr uint32_t kLUT[] = {
+ 0, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535,
+ 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd,
+ 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d,
+ 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+ 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac,
+ 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab,
+ 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+ 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb,
+ 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea,
+ 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce,
+ 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+ 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409,
+ 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739,
+ 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268,
+ 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0,
+ 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8,
+ 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+ 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703,
+ 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7,
+ 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae,
+ 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6,
+ 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d,
+ 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5,
+ 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+ 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
- uint32_t crc = 0xffffffff;
- for (auto* p = s; *p != '\0'; ++p) {
- crc =
- (crc >> 8) ^ kLUT[static_cast<uint8_t>(crc) ^ static_cast<uint8_t>(*p)];
- }
- return crc ^ 0xffffffff;
+ uint32_t crc = 0xffffffff;
+ for (auto* p = s; *p != '\0'; ++p) {
+ crc = (crc >> 8) ^ kLUT[static_cast<uint8_t>(crc) ^ static_cast<uint8_t>(*p)];
+ }
+ return crc ^ 0xffffffff;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/crc32_test.cc b/chromium/third_party/dawn/src/tint/utils/crc32_test.cc
index 3e80e787e33..28dd16f9589 100644
--- a/chromium/third_party/dawn/src/tint/utils/crc32_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/crc32_test.cc
@@ -20,15 +20,15 @@ namespace tint::utils {
namespace {
TEST(CRC32Test, Compiletime) {
- static_assert(CRC32("") == 0x00000000u);
- static_assert(CRC32("hello world") == 0x0d4a1185u);
- static_assert(CRC32("123456789") == 0xcbf43926u);
+ static_assert(CRC32("") == 0x00000000u);
+ static_assert(CRC32("hello world") == 0x0d4a1185u);
+ static_assert(CRC32("123456789") == 0xcbf43926u);
}
TEST(CRC32Test, Runtime) {
- EXPECT_EQ(CRC32(""), 0x00000000u);
- EXPECT_EQ(CRC32("hello world"), 0x0d4a1185u);
- EXPECT_EQ(CRC32("123456789"), 0xcbf43926u);
+ EXPECT_EQ(CRC32(""), 0x00000000u);
+ EXPECT_EQ(CRC32("hello world"), 0x0d4a1185u);
+ EXPECT_EQ(CRC32("123456789"), 0xcbf43926u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/debugger.cc b/chromium/third_party/dawn/src/tint/utils/debugger.cc
index aded5b6d6e3..3e05bdc3737 100644
--- a/chromium/third_party/dawn/src/tint/utils/debugger.cc
+++ b/chromium/third_party/dawn/src/tint/utils/debugger.cc
@@ -27,32 +27,31 @@
#ifdef _MSC_VER
#define TINT_DEBUGGER_BREAK_DEFINED
void tint::debugger::Break() {
- if (::IsDebuggerPresent()) {
- ::DebugBreak();
- }
+ if (::IsDebuggerPresent()) {
+ ::DebugBreak();
+ }
}
#elif defined(__linux__)
#define TINT_DEBUGGER_BREAK_DEFINED
void tint::debugger::Break() {
- // A process is being traced (debugged) if "/proc/self/status" contains a
- // line with "TracerPid: <non-zero-digit>...".
- bool is_traced = false;
- std::ifstream fin("/proc/self/status");
- std::string line;
- while (!is_traced && std::getline(fin, line)) {
- const char kPrefix[] = "TracerPid:\t";
- static constexpr int kPrefixLen = sizeof(kPrefix) - 1;
- if (line.length() > kPrefixLen &&
- line.compare(0, kPrefixLen, kPrefix) == 0) {
- is_traced = line[kPrefixLen] != '0';
+ // A process is being traced (debugged) if "/proc/self/status" contains a
+ // line with "TracerPid: <non-zero-digit>...".
+ bool is_traced = false;
+ std::ifstream fin("/proc/self/status");
+ std::string line;
+ while (!is_traced && std::getline(fin, line)) {
+ const char kPrefix[] = "TracerPid:\t";
+ static constexpr int kPrefixLen = sizeof(kPrefix) - 1;
+ if (line.length() > kPrefixLen && line.compare(0, kPrefixLen, kPrefix) == 0) {
+ is_traced = line[kPrefixLen] != '0';
+ }
}
- }
- if (is_traced) {
- raise(SIGTRAP);
- }
+ if (is_traced) {
+ raise(SIGTRAP);
+ }
}
#endif // platform
diff --git a/chromium/third_party/dawn/src/tint/utils/defer.h b/chromium/third_party/dawn/src/tint/utils/defer.h
index ce586f94c7c..f073fb9dd44 100644
--- a/chromium/third_party/dawn/src/tint/utils/defer.h
+++ b/chromium/third_party/dawn/src/tint/utils/defer.h
@@ -24,38 +24,37 @@ namespace tint::utils {
/// Defer executes a function or function like object when it is destructed.
template <typename F>
class Defer {
- public:
- /// Constructor
- /// @param f the function to call when the Defer is destructed
- explicit Defer(F&& f) : f_(std::move(f)) {}
+ public:
+ /// Constructor
+ /// @param f the function to call when the Defer is destructed
+ explicit Defer(F&& f) : f_(std::move(f)) {}
- /// Move constructor
- Defer(Defer&&) = default;
+ /// Move constructor
+ Defer(Defer&&) = default;
- /// Destructor
- /// Calls the deferred function
- ~Defer() { f_(); }
+ /// Destructor
+ /// Calls the deferred function
+ ~Defer() { f_(); }
- private:
- Defer(const Defer&) = delete;
- Defer& operator=(const Defer&) = delete;
+ private:
+ Defer(const Defer&) = delete;
+ Defer& operator=(const Defer&) = delete;
- F f_;
+ F f_;
};
/// Constructor
/// @param f the function to call when the Defer is destructed
template <typename F>
inline Defer<F> MakeDefer(F&& f) {
- return Defer<F>(std::forward<F>(f));
+ return Defer<F>(std::forward<F>(f));
}
} // namespace tint::utils
/// TINT_DEFER(S) executes the statement(s) `S` when exiting the current lexical
/// scope.
-#define TINT_DEFER(S) \
- auto TINT_CONCAT(tint_defer_, __COUNTER__) = \
- ::tint::utils::MakeDefer([&] { S; })
+#define TINT_DEFER(S) \
+ auto TINT_CONCAT(tint_defer_, __COUNTER__) = ::tint::utils::MakeDefer([&] { S; })
#endif // SRC_TINT_UTILS_DEFER_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/defer_test.cc b/chromium/third_party/dawn/src/tint/utils/defer_test.cc
index 27fd9b5ef34..8a0bddb8194 100644
--- a/chromium/third_party/dawn/src/tint/utils/defer_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/defer_test.cc
@@ -20,22 +20,22 @@ namespace tint::utils {
namespace {
TEST(DeferTest, Basic) {
- bool deferCalled = false;
- { TINT_DEFER(deferCalled = true); }
- ASSERT_TRUE(deferCalled);
+ bool deferCalled = false;
+ { TINT_DEFER(deferCalled = true); }
+ ASSERT_TRUE(deferCalled);
}
TEST(DeferTest, DeferOrder) {
- int counter = 0;
- int a = 0, b = 0, c = 0;
- {
- TINT_DEFER(a = ++counter);
- TINT_DEFER(b = ++counter);
- TINT_DEFER(c = ++counter);
- }
- ASSERT_EQ(a, 3);
- ASSERT_EQ(b, 2);
- ASSERT_EQ(c, 1);
+ int counter = 0;
+ int a = 0, b = 0, c = 0;
+ {
+ TINT_DEFER(a = ++counter);
+ TINT_DEFER(b = ++counter);
+ TINT_DEFER(c = ++counter);
+ }
+ ASSERT_EQ(a, 3);
+ ASSERT_EQ(b, 2);
+ ASSERT_EQ(c, 1);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/enum_set.h b/chromium/third_party/dawn/src/tint/utils/enum_set.h
index c5af0f9f074..19d1a822234 100644
--- a/chromium/third_party/dawn/src/tint/utils/enum_set.h
+++ b/chromium/third_party/dawn/src/tint/utils/enum_set.h
@@ -28,194 +28,192 @@ namespace tint::utils {
/// enum values in the range [0 .. 63].
template <typename ENUM>
struct EnumSet {
- public:
- /// Enum is the enum type this EnumSet wraps
- using Enum = ENUM;
-
- /// Constructor. Initializes the EnumSet with zero.
- constexpr EnumSet() = default;
-
- /// Copy constructor.
- /// @param s the set to copy
- constexpr EnumSet(const EnumSet& s) = default;
-
- /// Constructor. Initializes the EnumSet with the given values.
- /// @param values the enumerator values to construct the set with
- template <typename... VALUES>
- explicit constexpr EnumSet(VALUES... values) : set(Union(values...)) {}
-
- /// Copy assignment operator.
- /// @param set the set to assign to this set
- /// @return this set so calls can be chained
- inline EnumSet& operator=(const EnumSet& set) = default;
-
- /// Copy assignment operator.
- /// @param e the enum value
- /// @return this set so calls can be chained
- inline EnumSet& operator=(Enum e) { return *this = EnumSet{e}; }
-
- /// Adds all the given values to this set
- /// @param values the values to add
- /// @return this set so calls can be chained
- template <typename... VALUES>
- inline EnumSet& Add(VALUES... values) {
- return Add(EnumSet(std::forward<VALUES>(values)...));
- }
-
- /// Removes all the given values from this set
- /// @param values the values to remove
- /// @return this set so calls can be chained
- template <typename... VALUES>
- inline EnumSet& Remove(VALUES... values) {
- return Remove(EnumSet(std::forward<VALUES>(values)...));
- }
-
- /// Adds all of s to this set
- /// @param s the enum value
- /// @return this set so calls can be chained
- inline EnumSet& Add(EnumSet s) { return (*this = *this + s); }
-
- /// Removes all of s from this set
- /// @param s the enum value
- /// @return this set so calls can be chained
- inline EnumSet& Remove(EnumSet s) { return (*this = *this - s); }
-
- /// @param e the enum value
- /// @returns a copy of this set with e added
- inline EnumSet operator+(Enum e) const {
- EnumSet out;
- out.set = set | Bit(e);
- return out;
- }
-
- /// @param e the enum value
- /// @returns a copy of this set with e removed
- inline EnumSet operator-(Enum e) const {
- EnumSet out;
- out.set = set & ~Bit(e);
- return out;
- }
-
- /// @param s the other set
- /// @returns the union of this set with s (this ∪ rhs)
- inline EnumSet operator+(EnumSet s) const {
- EnumSet out;
- out.set = set | s.set;
- return out;
- }
-
- /// @param s the other set
- /// @returns the set of entries found in this but not in s (this \ s)
- inline EnumSet operator-(EnumSet s) const {
- EnumSet out;
- out.set = set & ~s.set;
- return out;
- }
-
- /// @param s the other set
- /// @returns the intersection of this set with s (this ∩ rhs)
- inline EnumSet operator&(EnumSet s) const {
- EnumSet out;
- out.set = set & s.set;
- return out;
- }
-
- /// @param e the enum value
- /// @return true if the set contains `e`
- inline bool Contains(Enum e) const { return (set & Bit(e)) != 0; }
-
- /// @return true if the set is empty
- inline bool Empty() const { return set == 0; }
-
- /// Equality operator
- /// @param rhs the other EnumSet to compare this to
- /// @return true if this EnumSet is equal to rhs
- inline bool operator==(EnumSet rhs) const { return set == rhs.set; }
-
- /// Inequality operator
- /// @param rhs the other EnumSet to compare this to
- /// @return true if this EnumSet is not equal to rhs
- inline bool operator!=(EnumSet rhs) const { return set != rhs.set; }
-
- /// Equality operator
- /// @param rhs the enum to compare this to
- /// @return true if this EnumSet only contains `rhs`
- inline bool operator==(Enum rhs) const { return set == Bit(rhs); }
-
- /// Inequality operator
- /// @param rhs the enum to compare this to
- /// @return false if this EnumSet only contains `rhs`
- inline bool operator!=(Enum rhs) const { return set != Bit(rhs); }
-
- /// @return the underlying value for the EnumSet
- inline uint64_t Value() const { return set; }
-
- /// Iterator provides read-only, unidirectional iterator over the enums of an
- /// EnumSet.
- class Iterator {
- static constexpr int8_t kEnd = 63;
-
- Iterator(uint64_t s, int8_t b) : set(s), pos(b) {}
-
- /// Make the constructor accessible to the EnumSet.
- friend struct EnumSet;
-
- public:
- /// @return the Enum value at this point in the iterator
- Enum operator*() const { return static_cast<Enum>(pos); }
-
- /// Increments the iterator
- /// @returns this iterator
- Iterator& operator++() {
- while (pos < kEnd) {
- pos++;
- if (set & (static_cast<uint64_t>(1) << static_cast<uint64_t>(pos))) {
- break;
- }
- }
- return *this;
+ public:
+ /// Enum is the enum type this EnumSet wraps
+ using Enum = ENUM;
+
+ /// Constructor. Initializes the EnumSet with zero.
+ constexpr EnumSet() = default;
+
+ /// Copy constructor.
+ /// @param s the set to copy
+ constexpr EnumSet(const EnumSet& s) = default;
+
+ /// Constructor. Initializes the EnumSet with the given values.
+ /// @param values the enumerator values to construct the set with
+ template <typename... VALUES>
+ explicit constexpr EnumSet(VALUES... values) : set(Union(values...)) {}
+
+ /// Copy assignment operator.
+ /// @param set the set to assign to this set
+ /// @return this set so calls can be chained
+ inline EnumSet& operator=(const EnumSet& set) = default;
+
+ /// Copy assignment operator.
+ /// @param e the enum value
+ /// @return this set so calls can be chained
+ inline EnumSet& operator=(Enum e) { return *this = EnumSet{e}; }
+
+ /// Adds all the given values to this set
+ /// @param values the values to add
+ /// @return this set so calls can be chained
+ template <typename... VALUES>
+ inline EnumSet& Add(VALUES... values) {
+ return Add(EnumSet(std::forward<VALUES>(values)...));
}
- /// Equality operator
- /// @param rhs the Iterator to compare this to
- /// @return true if the two iterators are equal
- bool operator==(const Iterator& rhs) const {
- return set == rhs.set && pos == rhs.pos;
+ /// Removes all the given values from this set
+ /// @param values the values to remove
+ /// @return this set so calls can be chained
+ template <typename... VALUES>
+ inline EnumSet& Remove(VALUES... values) {
+ return Remove(EnumSet(std::forward<VALUES>(values)...));
+ }
+
+ /// Adds all of s to this set
+ /// @param s the enum value
+ /// @return this set so calls can be chained
+ inline EnumSet& Add(EnumSet s) { return (*this = *this + s); }
+
+ /// Removes all of s from this set
+ /// @param s the enum value
+ /// @return this set so calls can be chained
+ inline EnumSet& Remove(EnumSet s) { return (*this = *this - s); }
+
+ /// @param e the enum value
+ /// @returns a copy of this set with e added
+ inline EnumSet operator+(Enum e) const {
+ EnumSet out;
+ out.set = set | Bit(e);
+ return out;
+ }
+
+ /// @param e the enum value
+ /// @returns a copy of this set with e removed
+ inline EnumSet operator-(Enum e) const {
+ EnumSet out;
+ out.set = set & ~Bit(e);
+ return out;
+ }
+
+ /// @param s the other set
+ /// @returns the union of this set with s (this ∪ rhs)
+ inline EnumSet operator+(EnumSet s) const {
+ EnumSet out;
+ out.set = set | s.set;
+ return out;
}
+ /// @param s the other set
+ /// @returns the set of entries found in this but not in s (this \ s)
+ inline EnumSet operator-(EnumSet s) const {
+ EnumSet out;
+ out.set = set & ~s.set;
+ return out;
+ }
+
+ /// @param s the other set
+ /// @returns the intersection of this set with s (this ∩ rhs)
+ inline EnumSet operator&(EnumSet s) const {
+ EnumSet out;
+ out.set = set & s.set;
+ return out;
+ }
+
+ /// @param e the enum value
+ /// @return true if the set contains `e`
+ inline bool Contains(Enum e) const { return (set & Bit(e)) != 0; }
+
+ /// @return true if the set is empty
+ inline bool Empty() const { return set == 0; }
+
+ /// Equality operator
+ /// @param rhs the other EnumSet to compare this to
+ /// @return true if this EnumSet is equal to rhs
+ inline bool operator==(EnumSet rhs) const { return set == rhs.set; }
+
/// Inequality operator
- /// @param rhs the Iterator to compare this to
- /// @return true if the two iterators are different
- bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
-
- private:
- const uint64_t set;
- int8_t pos;
- };
-
- /// @returns an read-only iterator to the beginning of the set
- Iterator begin() {
- auto it = Iterator{set, -1};
- ++it; // Move to first set bit
- return it;
- }
-
- /// @returns an iterator to the beginning of the set
- Iterator end() { return Iterator{set, Iterator::kEnd}; }
-
- private:
- static constexpr uint64_t Bit(Enum value) {
- return static_cast<uint64_t>(1) << static_cast<uint64_t>(value);
- }
-
- static constexpr uint64_t Union() { return 0; }
-
- template <typename FIRST, typename... VALUES>
- static constexpr uint64_t Union(FIRST first, VALUES... values) {
- return Bit(first) | Union(values...);
- }
-
- uint64_t set = 0;
+ /// @param rhs the other EnumSet to compare this to
+ /// @return true if this EnumSet is not equal to rhs
+ inline bool operator!=(EnumSet rhs) const { return set != rhs.set; }
+
+ /// Equality operator
+ /// @param rhs the enum to compare this to
+ /// @return true if this EnumSet only contains `rhs`
+ inline bool operator==(Enum rhs) const { return set == Bit(rhs); }
+
+ /// Inequality operator
+ /// @param rhs the enum to compare this to
+ /// @return false if this EnumSet only contains `rhs`
+ inline bool operator!=(Enum rhs) const { return set != Bit(rhs); }
+
+ /// @return the underlying value for the EnumSet
+ inline uint64_t Value() const { return set; }
+
+ /// Iterator provides read-only, unidirectional iterator over the enums of an
+ /// EnumSet.
+ class Iterator {
+ static constexpr int8_t kEnd = 63;
+
+ Iterator(uint64_t s, int8_t b) : set(s), pos(b) {}
+
+ /// Make the constructor accessible to the EnumSet.
+ friend struct EnumSet;
+
+ public:
+ /// @return the Enum value at this point in the iterator
+ Enum operator*() const { return static_cast<Enum>(pos); }
+
+ /// Increments the iterator
+ /// @returns this iterator
+ Iterator& operator++() {
+ while (pos < kEnd) {
+ pos++;
+ if (set & (static_cast<uint64_t>(1) << static_cast<uint64_t>(pos))) {
+ break;
+ }
+ }
+ return *this;
+ }
+
+ /// Equality operator
+ /// @param rhs the Iterator to compare this to
+ /// @return true if the two iterators are equal
+ bool operator==(const Iterator& rhs) const { return set == rhs.set && pos == rhs.pos; }
+
+ /// Inequality operator
+ /// @param rhs the Iterator to compare this to
+ /// @return true if the two iterators are different
+ bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
+
+ private:
+ const uint64_t set;
+ int8_t pos;
+ };
+
+ /// @returns an read-only iterator to the beginning of the set
+ Iterator begin() {
+ auto it = Iterator{set, -1};
+ ++it; // Move to first set bit
+ return it;
+ }
+
+ /// @returns an iterator to the beginning of the set
+ Iterator end() { return Iterator{set, Iterator::kEnd}; }
+
+ private:
+ static constexpr uint64_t Bit(Enum value) {
+ return static_cast<uint64_t>(1) << static_cast<uint64_t>(value);
+ }
+
+ static constexpr uint64_t Union() { return 0; }
+
+ template <typename FIRST, typename... VALUES>
+ static constexpr uint64_t Union(FIRST first, VALUES... values) {
+ return Bit(first) | Union(values...);
+ }
+
+ uint64_t set = 0;
};
/// Writes the EnumSet to the std::ostream.
@@ -224,16 +222,16 @@ struct EnumSet {
/// @returns out so calls can be chained
template <typename ENUM>
inline std::ostream& operator<<(std::ostream& out, EnumSet<ENUM> set) {
- out << "{";
- bool first = true;
- for (auto e : set) {
- if (!first) {
- out << ", ";
+ out << "{";
+ bool first = true;
+ for (auto e : set) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ out << e;
}
- first = false;
- out << e;
- }
- return out << "}";
+ return out << "}";
}
} // namespace tint::utils
@@ -243,12 +241,12 @@ namespace std {
/// Custom std::hash specialization for tint::utils::EnumSet<T>
template <typename T>
class hash<tint::utils::EnumSet<T>> {
- public:
- /// @param e the EnumSet to create a hash for
- /// @return the hash value
- inline std::size_t operator()(const tint::utils::EnumSet<T>& e) const {
- return std::hash<uint64_t>()(e.Value());
- }
+ public:
+ /// @param e the EnumSet to create a hash for
+ /// @return the hash value
+ inline std::size_t operator()(const tint::utils::EnumSet<T>& e) const {
+ return std::hash<uint64_t>()(e.Value());
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/utils/enum_set_test.cc b/chromium/third_party/dawn/src/tint/utils/enum_set_test.cc
index f0e4e67beaf..e4696500dcf 100644
--- a/chromium/third_party/dawn/src/tint/utils/enum_set_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/enum_set_test.cc
@@ -27,215 +27,215 @@ using ::testing::ElementsAre;
enum class E { A = 0, B = 3, C = 7 };
std::ostream& operator<<(std::ostream& out, E e) {
- switch (e) {
- case E::A:
- return out << "A";
- case E::B:
- return out << "B";
- case E::C:
- return out << "C";
- }
- return out << "E(" << static_cast<uint32_t>(e) << ")";
+ switch (e) {
+ case E::A:
+ return out << "A";
+ case E::B:
+ return out << "B";
+ case E::C:
+ return out << "C";
+ }
+ return out << "E(" << static_cast<uint32_t>(e) << ")";
}
TEST(EnumSetTest, ConstructEmpty) {
- EnumSet<E> set;
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
- EXPECT_TRUE(set.Empty());
+ EnumSet<E> set;
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
+ EXPECT_TRUE(set.Empty());
}
TEST(EnumSetTest, ConstructWithSingle) {
- EnumSet<E> set(E::B);
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
- EXPECT_FALSE(set.Empty());
+ EnumSet<E> set(E::B);
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
+ EXPECT_FALSE(set.Empty());
}
TEST(EnumSetTest, ConstructWithMultiple) {
- EnumSet<E> set(E::A, E::C);
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
- EXPECT_FALSE(set.Empty());
+ EnumSet<E> set(E::A, E::C);
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
+ EXPECT_FALSE(set.Empty());
}
TEST(EnumSetTest, AssignSet) {
- EnumSet<E> set;
- set = EnumSet<E>(E::A, E::C);
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
+ EnumSet<E> set;
+ set = EnumSet<E>(E::A, E::C);
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
}
TEST(EnumSetTest, AssignEnum) {
- EnumSet<E> set(E::A);
- set = E::B;
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set(E::A);
+ set = E::B;
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, AddEnum) {
- EnumSet<E> set;
- set.Add(E::B);
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set;
+ set.Add(E::B);
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, RemoveEnum) {
- EnumSet<E> set(E::A, E::B);
- set.Remove(E::B);
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set(E::A, E::B);
+ set.Remove(E::B);
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, AddEnums) {
- EnumSet<E> set;
- set.Add(E::B, E::C);
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
+ EnumSet<E> set;
+ set.Add(E::B, E::C);
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
}
TEST(EnumSetTest, RemoveEnums) {
- EnumSet<E> set(E::A, E::B);
- set.Remove(E::C, E::B);
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set(E::A, E::B);
+ set.Remove(E::C, E::B);
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, AddEnumSet) {
- EnumSet<E> set;
- set.Add(EnumSet<E>{E::B, E::C});
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
+ EnumSet<E> set;
+ set.Add(EnumSet<E>{E::B, E::C});
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
}
TEST(EnumSetTest, RemoveEnumSet) {
- EnumSet<E> set(E::A, E::B);
- set.Remove(EnumSet<E>{E::B, E::C});
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set(E::A, E::B);
+ set.Remove(EnumSet<E>{E::B, E::C});
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, OperatorPlusEnum) {
- EnumSet<E> set = EnumSet<E>{E::B} + E::C;
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
+ EnumSet<E> set = EnumSet<E>{E::B} + E::C;
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
}
TEST(EnumSetTest, OperatorMinusEnum) {
- EnumSet<E> set = EnumSet<E>{E::A, E::B} - E::B;
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set = EnumSet<E>{E::A, E::B} - E::B;
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, OperatorPlusSet) {
- EnumSet<E> set = EnumSet<E>{E::B} + EnumSet<E>{E::B, E::C};
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_TRUE(set.Contains(E::C));
+ EnumSet<E> set = EnumSet<E>{E::B} + EnumSet<E>{E::B, E::C};
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_TRUE(set.Contains(E::C));
}
TEST(EnumSetTest, OperatorMinusSet) {
- EnumSet<E> set = EnumSet<E>{E::A, E::B} - EnumSet<E>{E::B, E::C};
- EXPECT_TRUE(set.Contains(E::A));
- EXPECT_FALSE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set = EnumSet<E>{E::A, E::B} - EnumSet<E>{E::B, E::C};
+ EXPECT_TRUE(set.Contains(E::A));
+ EXPECT_FALSE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, OperatorAnd) {
- EnumSet<E> set = EnumSet<E>{E::A, E::B} & EnumSet<E>{E::B, E::C};
- EXPECT_FALSE(set.Contains(E::A));
- EXPECT_TRUE(set.Contains(E::B));
- EXPECT_FALSE(set.Contains(E::C));
+ EnumSet<E> set = EnumSet<E>{E::A, E::B} & EnumSet<E>{E::B, E::C};
+ EXPECT_FALSE(set.Contains(E::A));
+ EXPECT_TRUE(set.Contains(E::B));
+ EXPECT_FALSE(set.Contains(E::C));
}
TEST(EnumSetTest, EqualitySet) {
- EXPECT_TRUE(EnumSet<E>(E::A, E::B) == EnumSet<E>(E::A, E::B));
- EXPECT_FALSE(EnumSet<E>(E::A, E::B) == EnumSet<E>(E::A, E::C));
+ EXPECT_TRUE(EnumSet<E>(E::A, E::B) == EnumSet<E>(E::A, E::B));
+ EXPECT_FALSE(EnumSet<E>(E::A, E::B) == EnumSet<E>(E::A, E::C));
}
TEST(EnumSetTest, InequalitySet) {
- EXPECT_FALSE(EnumSet<E>(E::A, E::B) != EnumSet<E>(E::A, E::B));
- EXPECT_TRUE(EnumSet<E>(E::A, E::B) != EnumSet<E>(E::A, E::C));
+ EXPECT_FALSE(EnumSet<E>(E::A, E::B) != EnumSet<E>(E::A, E::B));
+ EXPECT_TRUE(EnumSet<E>(E::A, E::B) != EnumSet<E>(E::A, E::C));
}
TEST(EnumSetTest, EqualityEnum) {
- EXPECT_TRUE(EnumSet<E>(E::A) == E::A);
- EXPECT_FALSE(EnumSet<E>(E::B) == E::A);
- EXPECT_FALSE(EnumSet<E>(E::B) == E::C);
- EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::A);
- EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::B);
- EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::C);
+ EXPECT_TRUE(EnumSet<E>(E::A) == E::A);
+ EXPECT_FALSE(EnumSet<E>(E::B) == E::A);
+ EXPECT_FALSE(EnumSet<E>(E::B) == E::C);
+ EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::A);
+ EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::B);
+ EXPECT_FALSE(EnumSet<E>(E::A, E::B) == E::C);
}
TEST(EnumSetTest, InequalityEnum) {
- EXPECT_FALSE(EnumSet<E>(E::A) != E::A);
- EXPECT_TRUE(EnumSet<E>(E::B) != E::A);
- EXPECT_TRUE(EnumSet<E>(E::B) != E::C);
- EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::A);
- EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::B);
- EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::C);
+ EXPECT_FALSE(EnumSet<E>(E::A) != E::A);
+ EXPECT_TRUE(EnumSet<E>(E::B) != E::A);
+ EXPECT_TRUE(EnumSet<E>(E::B) != E::C);
+ EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::A);
+ EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::B);
+ EXPECT_TRUE(EnumSet<E>(E::A, E::B) != E::C);
}
TEST(EnumSetTest, Hash) {
- auto hash = [&](EnumSet<E> s) { return std::hash<EnumSet<E>>()(s); };
- EXPECT_EQ(hash(EnumSet<E>(E::A, E::B)), hash(EnumSet<E>(E::A, E::B)));
- EXPECT_NE(hash(EnumSet<E>(E::A, E::B)), hash(EnumSet<E>(E::A, E::C)));
+ auto hash = [&](EnumSet<E> s) { return std::hash<EnumSet<E>>()(s); };
+ EXPECT_EQ(hash(EnumSet<E>(E::A, E::B)), hash(EnumSet<E>(E::A, E::B)));
+ EXPECT_NE(hash(EnumSet<E>(E::A, E::B)), hash(EnumSet<E>(E::A, E::C)));
}
TEST(EnumSetTest, Value) {
- EXPECT_EQ(EnumSet<E>().Value(), 0u);
- EXPECT_EQ(EnumSet<E>(E::A).Value(), 1u);
- EXPECT_EQ(EnumSet<E>(E::B).Value(), 8u);
- EXPECT_EQ(EnumSet<E>(E::C).Value(), 128u);
- EXPECT_EQ(EnumSet<E>(E::A, E::C).Value(), 129u);
+ EXPECT_EQ(EnumSet<E>().Value(), 0u);
+ EXPECT_EQ(EnumSet<E>(E::A).Value(), 1u);
+ EXPECT_EQ(EnumSet<E>(E::B).Value(), 8u);
+ EXPECT_EQ(EnumSet<E>(E::C).Value(), 128u);
+ EXPECT_EQ(EnumSet<E>(E::A, E::C).Value(), 129u);
}
TEST(EnumSetTest, Iterator) {
- auto set = EnumSet<E>(E::C, E::A);
+ auto set = EnumSet<E>(E::C, E::A);
- auto it = set.begin();
- EXPECT_EQ(*it, E::A);
- EXPECT_NE(it, set.end());
- ++it;
- EXPECT_EQ(*it, E::C);
- EXPECT_NE(it, set.end());
- ++it;
- EXPECT_EQ(it, set.end());
+ auto it = set.begin();
+ EXPECT_EQ(*it, E::A);
+ EXPECT_NE(it, set.end());
+ ++it;
+ EXPECT_EQ(*it, E::C);
+ EXPECT_NE(it, set.end());
+ ++it;
+ EXPECT_EQ(it, set.end());
}
TEST(EnumSetTest, IteratorEmpty) {
- auto set = EnumSet<E>();
- EXPECT_EQ(set.begin(), set.end());
+ auto set = EnumSet<E>();
+ EXPECT_EQ(set.begin(), set.end());
}
TEST(EnumSetTest, Loop) {
- auto set = EnumSet<E>(E::C, E::A);
+ auto set = EnumSet<E>(E::C, E::A);
- std::vector<E> seen;
- for (auto e : set) {
- seen.emplace_back(e);
- }
+ std::vector<E> seen;
+ for (auto e : set) {
+ seen.emplace_back(e);
+ }
- EXPECT_THAT(seen, ElementsAre(E::A, E::C));
+ EXPECT_THAT(seen, ElementsAre(E::A, E::C));
}
TEST(EnumSetTest, Ostream) {
- std::stringstream ss;
- ss << EnumSet<E>(E::A, E::C);
- EXPECT_EQ(ss.str(), "{A, C}");
+ std::stringstream ss;
+ ss << EnumSet<E>(E::A, E::C);
+ EXPECT_EQ(ss.str(), "{A, C}");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/hash.h b/chromium/third_party/dawn/src/tint/utils/hash.h
index e043e81fe87..3deb14bcc43 100644
--- a/chromium/third_party/dawn/src/tint/utils/hash.h
+++ b/chromium/third_party/dawn/src/tint/utils/hash.h
@@ -18,6 +18,8 @@
#include <stdint.h>
#include <cstdio>
#include <functional>
+#include <tuple>
+#include <utility>
#include <vector>
namespace tint::utils {
@@ -31,51 +33,103 @@ struct HashCombineOffset {};
/// Specialization of HashCombineOffset for size_t == 4.
template <>
struct HashCombineOffset<4> {
- /// @returns the seed bias value for HashCombine()
- static constexpr inline uint32_t value() { return 0x7f4a7c16; }
+ /// @returns the seed bias value for HashCombine()
+ static constexpr inline uint32_t value() { return 0x7f4a7c16; }
};
/// Specialization of HashCombineOffset for size_t == 8.
template <>
struct HashCombineOffset<8> {
- /// @returns the seed bias value for HashCombine()
- static constexpr inline uint64_t value() { return 0x9e3779b97f4a7c16; }
+ /// @returns the seed bias value for HashCombine()
+ static constexpr inline uint64_t value() { return 0x9e3779b97f4a7c16; }
};
} // namespace detail
+// Forward declaration
+template <typename... ARGS>
+size_t Hash(const ARGS&... args);
+
/// HashCombine "hashes" together an existing hash and hashable values.
template <typename T>
void HashCombine(size_t* hash, const T& value) {
- constexpr size_t offset = detail::HashCombineOffset<sizeof(size_t)>::value();
- *hash ^= std::hash<T>()(value) + offset + (*hash << 6) + (*hash >> 2);
+ constexpr size_t offset = detail::HashCombineOffset<sizeof(size_t)>::value();
+ *hash ^= std::hash<T>()(value) + offset + (*hash << 6) + (*hash >> 2);
}
/// HashCombine "hashes" together an existing hash and hashable values.
template <typename T>
void HashCombine(size_t* hash, const std::vector<T>& vector) {
- HashCombine(hash, vector.size());
- for (auto& el : vector) {
- HashCombine(hash, el);
- }
+ HashCombine(hash, vector.size());
+ for (auto& el : vector) {
+ HashCombine(hash, el);
+ }
+}
+
+/// HashCombine "hashes" together an existing hash and hashable values.
+template <typename... TYPES>
+void HashCombine(size_t* hash, const std::tuple<TYPES...>& tuple) {
+ HashCombine(hash, sizeof...(TYPES));
+ HashCombine(hash, std::apply(Hash<TYPES...>, tuple));
}
/// HashCombine "hashes" together an existing hash and hashable values.
template <typename T, typename... ARGS>
void HashCombine(size_t* hash, const T& value, const ARGS&... args) {
- HashCombine(hash, value);
- HashCombine(hash, args...);
+ HashCombine(hash, value);
+ HashCombine(hash, args...);
}
/// @returns a hash of the combined arguments. The returned hash is dependent on
/// the order of the arguments.
template <typename... ARGS>
size_t Hash(const ARGS&... args) {
- size_t hash = 102931; // seed with an arbitrary prime
- HashCombine(&hash, args...);
- return hash;
+ size_t hash = 102931; // seed with an arbitrary prime
+ HashCombine(&hash, args...);
+ return hash;
}
+/// Wrapper for a hashable type enabling the wrapped value to be used as a key
+/// for an unordered_map or unordered_set.
+template <typename T>
+struct UnorderedKeyWrapper {
+ /// The wrapped value
+ const T value;
+ /// The hash of value
+ const size_t hash;
+
+ /// Constructor
+ /// @param v the value to wrap
+ explicit UnorderedKeyWrapper(const T& v) : value(v), hash(Hash(v)) {}
+
+ /// Move constructor
+ /// @param v the value to wrap
+ explicit UnorderedKeyWrapper(T&& v) : value(std::move(v)), hash(Hash(value)) {}
+
+ /// @returns true if this wrapper comes before other
+ /// @param other the RHS of the operator
+ bool operator<(const UnorderedKeyWrapper& other) const { return hash < other.hash; }
+
+ /// @returns true if this wrapped value is equal to the other wrapped value
+ /// @param other the RHS of the operator
+ bool operator==(const UnorderedKeyWrapper& other) const { return value == other.value; }
+};
+
} // namespace tint::utils
+namespace std {
+
+/// Custom std::hash specialization for tint::utils::UnorderedKeyWrapper
+template <typename T>
+class hash<tint::utils::UnorderedKeyWrapper<T>> {
+ public:
+ /// @param w the UnorderedKeyWrapper
+ /// @return the hash value
+ inline std::size_t operator()(const tint::utils::UnorderedKeyWrapper<T>& w) const {
+ return w.hash;
+ }
+};
+
+} // namespace std
+
#endif // SRC_TINT_UTILS_HASH_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/hash_test.cc b/chromium/third_party/dawn/src/tint/utils/hash_test.cc
index ec88a8ca230..cb74df95c71 100644
--- a/chromium/third_party/dawn/src/tint/utils/hash_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/hash_test.cc
@@ -15,6 +15,8 @@
#include "src/tint/utils/hash.h"
#include <string>
+#include <tuple>
+#include <unordered_map>
#include "gtest/gtest.h"
@@ -22,25 +24,54 @@ namespace tint::utils {
namespace {
TEST(HashTests, Basic) {
- EXPECT_EQ(Hash(123), Hash(123));
- EXPECT_NE(Hash(123), Hash(321));
- EXPECT_EQ(Hash(123, 456), Hash(123, 456));
- EXPECT_NE(Hash(123, 456), Hash(456, 123));
- EXPECT_NE(Hash(123, 456), Hash(123));
- EXPECT_EQ(Hash(123, 456, false), Hash(123, 456, false));
- EXPECT_NE(Hash(123, 456, false), Hash(123, 456));
- EXPECT_EQ(Hash(std::string("hello")), Hash(std::string("hello")));
- EXPECT_NE(Hash(std::string("hello")), Hash(std::string("world")));
+ EXPECT_EQ(Hash(123), Hash(123));
+ EXPECT_NE(Hash(123), Hash(321));
+ EXPECT_EQ(Hash(123, 456), Hash(123, 456));
+ EXPECT_NE(Hash(123, 456), Hash(456, 123));
+ EXPECT_NE(Hash(123, 456), Hash(123));
+ EXPECT_EQ(Hash(123, 456, false), Hash(123, 456, false));
+ EXPECT_NE(Hash(123, 456, false), Hash(123, 456));
+ EXPECT_EQ(Hash(std::string("hello")), Hash(std::string("hello")));
+ EXPECT_NE(Hash(std::string("hello")), Hash(std::string("world")));
}
TEST(HashTests, Vector) {
- EXPECT_EQ(Hash(std::vector<int>({})), Hash(std::vector<int>({})));
- EXPECT_EQ(Hash(std::vector<int>({1, 2, 3})),
- Hash(std::vector<int>({1, 2, 3})));
- EXPECT_NE(Hash(std::vector<int>({1, 2, 3})),
- Hash(std::vector<int>({1, 2, 4})));
- EXPECT_NE(Hash(std::vector<int>({1, 2, 3})),
- Hash(std::vector<int>({1, 2, 3, 4})));
+ EXPECT_EQ(Hash(std::vector<int>({})), Hash(std::vector<int>({})));
+ EXPECT_EQ(Hash(std::vector<int>({1, 2, 3})), Hash(std::vector<int>({1, 2, 3})));
+ EXPECT_NE(Hash(std::vector<int>({1, 2, 3})), Hash(std::vector<int>({1, 2, 4})));
+ EXPECT_NE(Hash(std::vector<int>({1, 2, 3})), Hash(std::vector<int>({1, 2, 3, 4})));
+}
+
+TEST(HashTests, Tuple) {
+ EXPECT_EQ(Hash(std::make_tuple(1)), Hash(std::make_tuple(1)));
+ EXPECT_EQ(Hash(std::make_tuple(1, 2, 3)), Hash(std::make_tuple(1, 2, 3)));
+ EXPECT_NE(Hash(std::make_tuple(1, 2, 3)), Hash(std::make_tuple(1, 2, 4)));
+ EXPECT_NE(Hash(std::make_tuple(1, 2, 3)), Hash(std::make_tuple(1, 2, 3, 4)));
+}
+
+TEST(HashTests, UnorderedKeyWrapper) {
+ using W = UnorderedKeyWrapper<std::vector<int>>;
+
+ std::unordered_map<W, int> m;
+
+ m.emplace(W{{1, 2}}, -1);
+ EXPECT_EQ(m.size(), 1u);
+ EXPECT_EQ(m[W({1, 2})], -1);
+
+ m.emplace(W{{3, 2}}, 1);
+ EXPECT_EQ(m.size(), 2u);
+ EXPECT_EQ(m[W({3, 2})], 1);
+ EXPECT_EQ(m[W({1, 2})], -1);
+
+ m.emplace(W{{100}}, 100);
+ EXPECT_EQ(m.size(), 3u);
+ EXPECT_EQ(m[W({100})], 100);
+ EXPECT_EQ(m[W({3, 2})], 1);
+ EXPECT_EQ(m[W({1, 2})], -1);
+
+ // Reversed vector element order
+ EXPECT_EQ(m[W({2, 3})], 0);
+ EXPECT_EQ(m[W({2, 1})], 0);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/io/command.h b/chromium/third_party/dawn/src/tint/utils/io/command.h
index 63ddab83a31..8aae691e0e0 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/command.h
+++ b/chromium/third_party/dawn/src/tint/utils/io/command.h
@@ -24,57 +24,57 @@ namespace tint::utils {
/// arguments and an optional stdin string, and then collecting and returning
/// the process's stdout and stderr output as strings.
class Command {
- public:
- /// Output holds the output of the process
- struct Output {
- /// stdout from the process
- std::string out;
- /// stderr from the process
- std::string err;
- /// process error code
- int error_code = 0;
- };
+ public:
+ /// Output holds the output of the process
+ struct Output {
+ /// stdout from the process
+ std::string out;
+ /// stderr from the process
+ std::string err;
+ /// process error code
+ int error_code = 0;
+ };
- /// Constructor
- /// @param path path to the executable
- explicit Command(const std::string& path);
+ /// Constructor
+ /// @param path path to the executable
+ explicit Command(const std::string& path);
- /// Looks for an executable with the given name in the current working
- /// directory, and if not found there, in each of the directories in the
- /// `PATH` environment variable.
- /// @param executable the executable name
- /// @returns a Command which will return true for Found() if the executable
- /// was found.
- static Command LookPath(const std::string& executable);
+ /// Looks for an executable with the given name in the current working
+ /// directory, and if not found there, in each of the directories in the
+ /// `PATH` environment variable.
+ /// @param executable the executable name
+ /// @returns a Command which will return true for Found() if the executable
+ /// was found.
+ static Command LookPath(const std::string& executable);
- /// @return true if the executable exists at the path provided to the
- /// constructor
- bool Found() const;
+ /// @return true if the executable exists at the path provided to the
+ /// constructor
+ bool Found() const;
- /// @returns the path of the command
- const std::string& Path() const { return path_; }
+ /// @returns the path of the command
+ const std::string& Path() const { return path_; }
- /// Invokes the command with the given argument strings, blocking until the
- /// process has returned.
- /// @param args the string arguments to pass to the process
- /// @returns the process output
- template <typename... ARGS>
- Output operator()(ARGS... args) const {
- return Exec({std::forward<ARGS>(args)...});
- }
+ /// Invokes the command with the given argument strings, blocking until the
+ /// process has returned.
+ /// @param args the string arguments to pass to the process
+ /// @returns the process output
+ template <typename... ARGS>
+ Output operator()(ARGS... args) const {
+ return Exec({std::forward<ARGS>(args)...});
+ }
- /// Exec invokes the command with the given argument strings, blocking until
- /// the process has returned.
- /// @param args the string arguments to pass to the process
- /// @returns the process output
- Output Exec(std::initializer_list<std::string> args) const;
+ /// Exec invokes the command with the given argument strings, blocking until
+ /// the process has returned.
+ /// @param args the string arguments to pass to the process
+ /// @returns the process output
+ Output Exec(std::initializer_list<std::string> args) const;
- /// @param input the input data to pipe to the process's stdin
- void SetInput(const std::string& input) { input_ = input; }
+ /// @param input the input data to pipe to the process's stdin
+ void SetInput(const std::string& input) { input_ = input; }
- private:
- std::string const path_;
- std::string input_;
+ private:
+ std::string const path_;
+ std::string input_;
};
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/command_other.cc b/chromium/third_party/dawn/src/tint/utils/io/command_other.cc
index 5ae73daaa97..cc1997b2ca7 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/command_other.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/command_other.cc
@@ -19,17 +19,17 @@ namespace tint::utils {
Command::Command(const std::string&) {}
Command Command::LookPath(const std::string&) {
- return Command("");
+ return Command("");
}
bool Command::Found() const {
- return false;
+ return false;
}
Command::Output Command::Exec(std::initializer_list<std::string>) const {
- Output out;
- out.err = "Command not supported by this target";
- return out;
+ Output out;
+ out.err = "Command not supported by this target";
+ return out;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/command_posix.cc b/chromium/third_party/dawn/src/tint/utils/io/command_posix.cc
index 3696921d2eb..23ee5115fdd 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/command_posix.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/command_posix.cc
@@ -27,102 +27,102 @@ namespace {
/// File is a simple wrapper around a POSIX file descriptor
class File {
- constexpr static const int kClosed = -1;
-
- public:
- /// Constructor
- File() : handle_(kClosed) {}
-
- /// Constructor
- explicit File(int handle) : handle_(handle) {}
-
- /// Destructor
- ~File() { Close(); }
-
- /// Move assignment operator
- File& operator=(File&& rhs) {
- Close();
- handle_ = rhs.handle_;
- rhs.handle_ = kClosed;
- return *this;
- }
-
- /// Closes the file (if it wasn't already closed)
- void Close() {
- if (handle_ != kClosed) {
- close(handle_);
+ constexpr static const int kClosed = -1;
+
+ public:
+ /// Constructor
+ File() : handle_(kClosed) {}
+
+ /// Constructor
+ explicit File(int handle) : handle_(handle) {}
+
+ /// Destructor
+ ~File() { Close(); }
+
+ /// Move assignment operator
+ File& operator=(File&& rhs) {
+ Close();
+ handle_ = rhs.handle_;
+ rhs.handle_ = kClosed;
+ return *this;
}
- handle_ = kClosed;
- }
- /// @returns the file handle
- operator int() { return handle_; }
+ /// Closes the file (if it wasn't already closed)
+ void Close() {
+ if (handle_ != kClosed) {
+ close(handle_);
+ }
+ handle_ = kClosed;
+ }
- /// @returns true if the file is not closed
- operator bool() { return handle_ != kClosed; }
+ /// @returns the file handle
+ operator int() { return handle_; }
- private:
- File(const File&) = delete;
- File& operator=(const File&) = delete;
+ /// @returns true if the file is not closed
+ operator bool() { return handle_ != kClosed; }
- int handle_ = kClosed;
+ private:
+ File(const File&) = delete;
+ File& operator=(const File&) = delete;
+
+ int handle_ = kClosed;
};
/// Pipe is a simple wrapper around a POSIX pipe() function
class Pipe {
- public:
- /// Constructs the pipe
- Pipe() {
- int pipes[2] = {};
- if (pipe(pipes) == 0) {
- read = File(pipes[0]);
- write = File(pipes[1]);
+ public:
+ /// Constructs the pipe
+ Pipe() {
+ int pipes[2] = {};
+ if (pipe(pipes) == 0) {
+ read = File(pipes[0]);
+ write = File(pipes[1]);
+ }
}
- }
- /// Closes both the read and write files (if they're not already closed)
- void Close() {
- read.Close();
- write.Close();
- }
+ /// Closes both the read and write files (if they're not already closed)
+ void Close() {
+ read.Close();
+ write.Close();
+ }
- /// @returns true if the pipe has an open read or write file
- operator bool() { return read || write; }
+ /// @returns true if the pipe has an open read or write file
+ operator bool() { return read || write; }
- /// The reader end of the pipe
- File read;
+ /// The reader end of the pipe
+ File read;
- /// The writer end of the pipe
- File write;
+ /// The writer end of the pipe
+ File write;
};
bool ExecutableExists(const std::string& path) {
- struct stat s {};
- if (stat(path.c_str(), &s) != 0) {
- return false;
- }
- return s.st_mode & S_IXUSR;
+ struct stat s {};
+ if (stat(path.c_str(), &s) != 0) {
+ return false;
+ }
+ return s.st_mode & S_IXUSR;
}
std::string FindExecutable(const std::string& name) {
- if (ExecutableExists(name)) {
- return name;
- }
- if (name.find("/") == std::string::npos) {
- auto* path_env = getenv("PATH");
- if (!path_env) {
- return "";
+ if (ExecutableExists(name)) {
+ return name;
}
- std::istringstream path{path_env};
- std::string dir;
- while (getline(path, dir, ':')) {
- auto test = dir + "/" + name;
- if (ExecutableExists(test)) {
- return test;
- }
+ if (name.find("/") == std::string::npos) {
+ auto* path_env = getenv("PATH");
+ if (!path_env) {
+ return "";
+ }
+ std::istringstream path{path_env};
+ std::string dir;
+ while (getline(path, dir, ':')) {
+ auto test = dir + "/" + name;
+ if (ExecutableExists(test)) {
+ return test;
+ }
+ }
}
- }
- return "";
+ return "";
}
} // namespace
@@ -130,134 +130,133 @@ std::string FindExecutable(const std::string& name) {
Command::Command(const std::string& path) : path_(path) {}
Command Command::LookPath(const std::string& executable) {
- return Command(FindExecutable(executable));
+ return Command(FindExecutable(executable));
}
bool Command::Found() const {
- return ExecutableExists(path_);
+ return ExecutableExists(path_);
}
-Command::Output Command::Exec(
- std::initializer_list<std::string> arguments) const {
- if (!Found()) {
- Output out;
- out.err = "Executable not found";
- return out;
- }
-
- // Pipes used for piping std[in,out,err] to / from the target process.
- Pipe stdin_pipe;
- Pipe stdout_pipe;
- Pipe stderr_pipe;
-
- if (!stdin_pipe || !stdout_pipe || !stderr_pipe) {
- Output output;
- output.err = "Command::Exec(): Failed to create pipes";
- return output;
- }
-
- // execv() and friends replace the current process image with the target
- // process image. To keep process that called this function going, we need to
- // fork() this process into a child and parent process.
- //
- // The child process is responsible for hooking up the pipes to
- // std[in,out,err]_pipes to STD[IN,OUT,ERR]_FILENO and then calling execv() to
- // run the target command.
- //
- // The parent process is responsible for feeding any input to the stdin_pipe
- // and collectting output from the std[out,err]_pipes.
-
- int child_id = fork();
- if (child_id < 0) {
- Output output;
- output.err = "Command::Exec(): fork() failed";
- return output;
- }
-
- if (child_id > 0) {
- // fork() - parent
-
- // Close the stdout and stderr writer pipes.
- // This is required for getting poll() POLLHUP events.
- stdout_pipe.write.Close();
- stderr_pipe.write.Close();
-
- // Write the input to the child process
- if (!input_.empty()) {
- ssize_t n = write(stdin_pipe.write, input_.data(), input_.size());
- if (n != static_cast<ssize_t>(input_.size())) {
+Command::Output Command::Exec(std::initializer_list<std::string> arguments) const {
+ if (!Found()) {
+ Output out;
+ out.err = "Executable not found";
+ return out;
+ }
+
+ // Pipes used for piping std[in,out,err] to / from the target process.
+ Pipe stdin_pipe;
+ Pipe stdout_pipe;
+ Pipe stderr_pipe;
+
+ if (!stdin_pipe || !stdout_pipe || !stderr_pipe) {
Output output;
- output.err = "Command::Exec(): write() for stdin failed";
+ output.err = "Command::Exec(): Failed to create pipes";
return output;
- }
}
- stdin_pipe.write.Close();
-
- // Accumulate the stdout and stderr output from the child process
- pollfd poll_fds[2];
- poll_fds[0].fd = stdout_pipe.read;
- poll_fds[0].events = POLLIN;
- poll_fds[1].fd = stderr_pipe.read;
- poll_fds[1].events = POLLIN;
-
- Output output;
- bool stdout_open = true;
- bool stderr_open = true;
- while (stdout_open || stderr_open) {
- if (poll(poll_fds, 2, -1) < 0) {
- break;
- }
- char buf[256];
- if (poll_fds[0].revents & POLLIN) {
- auto n = read(stdout_pipe.read, buf, sizeof(buf));
- if (n > 0) {
- output.out += std::string(buf, buf + n);
- }
- }
- if (poll_fds[0].revents & POLLHUP) {
- stdout_open = false;
- }
- if (poll_fds[1].revents & POLLIN) {
- auto n = read(stderr_pipe.read, buf, sizeof(buf));
- if (n > 0) {
- output.err += std::string(buf, buf + n);
- }
- }
- if (poll_fds[1].revents & POLLHUP) {
- stderr_open = false;
- }
+
+ // execv() and friends replace the current process image with the target
+ // process image. To keep process that called this function going, we need to
+ // fork() this process into a child and parent process.
+ //
+ // The child process is responsible for hooking up the pipes to
+ // std[in,out,err]_pipes to STD[IN,OUT,ERR]_FILENO and then calling execv() to
+ // run the target command.
+ //
+ // The parent process is responsible for feeding any input to the stdin_pipe
+ // and collectting output from the std[out,err]_pipes.
+
+ int child_id = fork();
+ if (child_id < 0) {
+ Output output;
+ output.err = "Command::Exec(): fork() failed";
+ return output;
}
- // Get the resulting error code
- waitpid(child_id, &output.error_code, 0);
+ if (child_id > 0) {
+ // fork() - parent
+
+ // Close the stdout and stderr writer pipes.
+ // This is required for getting poll() POLLHUP events.
+ stdout_pipe.write.Close();
+ stderr_pipe.write.Close();
+
+ // Write the input to the child process
+ if (!input_.empty()) {
+ ssize_t n = write(stdin_pipe.write, input_.data(), input_.size());
+ if (n != static_cast<ssize_t>(input_.size())) {
+ Output output;
+ output.err = "Command::Exec(): write() for stdin failed";
+ return output;
+ }
+ }
+ stdin_pipe.write.Close();
- return output;
- } else {
- // fork() - child
+ // Accumulate the stdout and stderr output from the child process
+ pollfd poll_fds[2];
+ poll_fds[0].fd = stdout_pipe.read;
+ poll_fds[0].events = POLLIN;
+ poll_fds[1].fd = stderr_pipe.read;
+ poll_fds[1].events = POLLIN;
- // Redirect the stdin, stdout, stderr pipes for the execv process
- if ((dup2(stdin_pipe.read, STDIN_FILENO) == -1) ||
- (dup2(stdout_pipe.write, STDOUT_FILENO) == -1) ||
- (dup2(stderr_pipe.write, STDERR_FILENO) == -1)) {
- fprintf(stderr, "Command::Exec(): Failed to redirect pipes");
- exit(errno);
- }
+ Output output;
+ bool stdout_open = true;
+ bool stderr_open = true;
+ while (stdout_open || stderr_open) {
+ if (poll(poll_fds, 2, -1) < 0) {
+ break;
+ }
+ char buf[256];
+ if (poll_fds[0].revents & POLLIN) {
+ auto n = read(stdout_pipe.read, buf, sizeof(buf));
+ if (n > 0) {
+ output.out += std::string(buf, buf + n);
+ }
+ }
+ if (poll_fds[0].revents & POLLHUP) {
+ stdout_open = false;
+ }
+ if (poll_fds[1].revents & POLLIN) {
+ auto n = read(stderr_pipe.read, buf, sizeof(buf));
+ if (n > 0) {
+ output.err += std::string(buf, buf + n);
+ }
+ }
+ if (poll_fds[1].revents & POLLHUP) {
+ stderr_open = false;
+ }
+ }
- // Close the pipes, once redirected above, we're now done with them.
- stdin_pipe.Close();
- stdout_pipe.Close();
- stderr_pipe.Close();
+ // Get the resulting error code
+ waitpid(child_id, &output.error_code, 0);
- // Run target executable
- std::vector<const char*> args;
- args.emplace_back(path_.c_str());
- for (auto& arg : arguments) {
- args.emplace_back(arg.c_str());
+ return output;
+ } else {
+ // fork() - child
+
+ // Redirect the stdin, stdout, stderr pipes for the execv process
+ if ((dup2(stdin_pipe.read, STDIN_FILENO) == -1) ||
+ (dup2(stdout_pipe.write, STDOUT_FILENO) == -1) ||
+ (dup2(stderr_pipe.write, STDERR_FILENO) == -1)) {
+ fprintf(stderr, "Command::Exec(): Failed to redirect pipes");
+ exit(errno);
+ }
+
+ // Close the pipes, once redirected above, we're now done with them.
+ stdin_pipe.Close();
+ stdout_pipe.Close();
+ stderr_pipe.Close();
+
+ // Run target executable
+ std::vector<const char*> args;
+ args.emplace_back(path_.c_str());
+ for (auto& arg : arguments) {
+ args.emplace_back(arg.c_str());
+ }
+ args.emplace_back(nullptr);
+ auto res = execv(path_.c_str(), const_cast<char* const*>(args.data()));
+ exit(res);
}
- args.emplace_back(nullptr);
- auto res = execv(path_.c_str(), const_cast<char* const*>(args.data()));
- exit(res);
- }
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/command_test.cc b/chromium/third_party/dawn/src/tint/utils/io/command_test.cc
index 1b8bb836806..ed7e29a57d0 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/command_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/command_test.cc
@@ -22,66 +22,66 @@ namespace {
#ifdef _WIN32
TEST(CommandTest, Echo) {
- auto cmd = Command::LookPath("cmd");
- if (!cmd.Found()) {
- GTEST_SKIP() << "cmd not found on PATH";
- }
-
- auto res = cmd("/C", "echo", "hello world");
- EXPECT_EQ(res.error_code, 0);
- EXPECT_EQ(res.out, "hello world\r\n");
- EXPECT_EQ(res.err, "");
+ auto cmd = Command::LookPath("cmd");
+ if (!cmd.Found()) {
+ GTEST_SKIP() << "cmd not found on PATH";
+ }
+
+ auto res = cmd("/C", "echo", "hello world");
+ EXPECT_EQ(res.error_code, 0);
+ EXPECT_EQ(res.out, "hello world\r\n");
+ EXPECT_EQ(res.err, "");
}
#else
TEST(CommandTest, Echo) {
- auto cmd = Command::LookPath("echo");
- if (!cmd.Found()) {
- GTEST_SKIP() << "echo not found on PATH";
- }
-
- auto res = cmd("hello world");
- EXPECT_EQ(res.error_code, 0);
- EXPECT_EQ(res.out, "hello world\n");
- EXPECT_EQ(res.err, "");
+ auto cmd = Command::LookPath("echo");
+ if (!cmd.Found()) {
+ GTEST_SKIP() << "echo not found on PATH";
+ }
+
+ auto res = cmd("hello world");
+ EXPECT_EQ(res.error_code, 0);
+ EXPECT_EQ(res.out, "hello world\n");
+ EXPECT_EQ(res.err, "");
}
TEST(CommandTest, Cat) {
- auto cmd = Command::LookPath("cat");
- if (!cmd.Found()) {
- GTEST_SKIP() << "cat not found on PATH";
- }
-
- cmd.SetInput("hello world");
- auto res = cmd();
- EXPECT_EQ(res.error_code, 0);
- EXPECT_EQ(res.out, "hello world");
- EXPECT_EQ(res.err, "");
+ auto cmd = Command::LookPath("cat");
+ if (!cmd.Found()) {
+ GTEST_SKIP() << "cat not found on PATH";
+ }
+
+ cmd.SetInput("hello world");
+ auto res = cmd();
+ EXPECT_EQ(res.error_code, 0);
+ EXPECT_EQ(res.out, "hello world");
+ EXPECT_EQ(res.err, "");
}
TEST(CommandTest, True) {
- auto cmd = Command::LookPath("true");
- if (!cmd.Found()) {
- GTEST_SKIP() << "true not found on PATH";
- }
-
- auto res = cmd();
- EXPECT_EQ(res.error_code, 0);
- EXPECT_EQ(res.out, "");
- EXPECT_EQ(res.err, "");
+ auto cmd = Command::LookPath("true");
+ if (!cmd.Found()) {
+ GTEST_SKIP() << "true not found on PATH";
+ }
+
+ auto res = cmd();
+ EXPECT_EQ(res.error_code, 0);
+ EXPECT_EQ(res.out, "");
+ EXPECT_EQ(res.err, "");
}
TEST(CommandTest, False) {
- auto cmd = Command::LookPath("false");
- if (!cmd.Found()) {
- GTEST_SKIP() << "false not found on PATH";
- }
-
- auto res = cmd();
- EXPECT_NE(res.error_code, 0);
- EXPECT_EQ(res.out, "");
- EXPECT_EQ(res.err, "");
+ auto cmd = Command::LookPath("false");
+ if (!cmd.Found()) {
+ GTEST_SKIP() << "false not found on PATH";
+ }
+
+ auto res = cmd();
+ EXPECT_NE(res.error_code, 0);
+ EXPECT_EQ(res.out, "");
+ EXPECT_EQ(res.err, "");
}
#endif
diff --git a/chromium/third_party/dawn/src/tint/utils/io/command_windows.cc b/chromium/third_party/dawn/src/tint/utils/io/command_windows.cc
index d59f849c7c9..f953a857760 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/command_windows.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/command_windows.cc
@@ -25,112 +25,110 @@ namespace {
/// Handle is a simple wrapper around the Win32 HANDLE
class Handle {
- public:
- /// Constructor
- Handle() : handle_(nullptr) {}
-
- /// Constructor
- explicit Handle(HANDLE handle) : handle_(handle) {}
-
- /// Destructor
- ~Handle() { Close(); }
-
- /// Move assignment operator
- Handle& operator=(Handle&& rhs) {
- Close();
- handle_ = rhs.handle_;
- rhs.handle_ = nullptr;
- return *this;
- }
-
- /// Closes the handle (if it wasn't already closed)
- void Close() {
- if (handle_) {
- CloseHandle(handle_);
+ public:
+ /// Constructor
+ Handle() : handle_(nullptr) {}
+
+ /// Constructor
+ explicit Handle(HANDLE handle) : handle_(handle) {}
+
+ /// Destructor
+ ~Handle() { Close(); }
+
+ /// Move assignment operator
+ Handle& operator=(Handle&& rhs) {
+ Close();
+ handle_ = rhs.handle_;
+ rhs.handle_ = nullptr;
+ return *this;
}
- handle_ = nullptr;
- }
- /// @returns the handle
- operator HANDLE() { return handle_; }
+ /// Closes the handle (if it wasn't already closed)
+ void Close() {
+ if (handle_) {
+ CloseHandle(handle_);
+ }
+ handle_ = nullptr;
+ }
+
+ /// @returns the handle
+ operator HANDLE() { return handle_; }
- /// @returns true if the handle is not invalid
- operator bool() { return handle_ != nullptr; }
+ /// @returns true if the handle is not invalid
+ operator bool() { return handle_ != nullptr; }
- private:
- Handle(const Handle&) = delete;
- Handle& operator=(const Handle&) = delete;
+ private:
+ Handle(const Handle&) = delete;
+ Handle& operator=(const Handle&) = delete;
- HANDLE handle_ = nullptr;
+ HANDLE handle_ = nullptr;
};
/// Pipe is a simple wrapper around a Win32 CreatePipe() function
class Pipe {
- public:
- /// Constructs the pipe
- explicit Pipe(bool for_read) {
- SECURITY_ATTRIBUTES sa;
- sa.nLength = sizeof(SECURITY_ATTRIBUTES);
- sa.bInheritHandle = TRUE;
- sa.lpSecurityDescriptor = nullptr;
-
- HANDLE hread;
- HANDLE hwrite;
- if (CreatePipe(&hread, &hwrite, &sa, 0)) {
- read = Handle(hread);
- write = Handle(hwrite);
- // Ensure the read handle to the pipe is not inherited
- if (!SetHandleInformation(for_read ? read : write, HANDLE_FLAG_INHERIT,
- 0)) {
- read.Close();
- write.Close();
- }
+ public:
+ /// Constructs the pipe
+ explicit Pipe(bool for_read) {
+ SECURITY_ATTRIBUTES sa;
+ sa.nLength = sizeof(SECURITY_ATTRIBUTES);
+ sa.bInheritHandle = TRUE;
+ sa.lpSecurityDescriptor = nullptr;
+
+ HANDLE hread;
+ HANDLE hwrite;
+ if (CreatePipe(&hread, &hwrite, &sa, 0)) {
+ read = Handle(hread);
+ write = Handle(hwrite);
+ // Ensure the read handle to the pipe is not inherited
+ if (!SetHandleInformation(for_read ? read : write, HANDLE_FLAG_INHERIT, 0)) {
+ read.Close();
+ write.Close();
+ }
+ }
}
- }
- /// @returns true if the pipe has an open read or write file
- operator bool() { return read || write; }
+ /// @returns true if the pipe has an open read or write file
+ operator bool() { return read || write; }
- /// The reader end of the pipe
- Handle read;
+ /// The reader end of the pipe
+ Handle read;
- /// The writer end of the pipe
- Handle write;
+ /// The writer end of the pipe
+ Handle write;
};
bool ExecutableExists(const std::string& path) {
- DWORD type = 0;
- return GetBinaryTypeA(path.c_str(), &type);
+ DWORD type = 0;
+ return GetBinaryTypeA(path.c_str(), &type);
}
std::string FindExecutable(const std::string& name) {
- if (ExecutableExists(name)) {
- return name;
- }
- if (ExecutableExists(name + ".exe")) {
- return name + ".exe";
- }
- if (name.find("/") == std::string::npos &&
- name.find("\\") == std::string::npos) {
- char* path_env = nullptr;
- size_t path_env_len = 0;
- if (_dupenv_s(&path_env, &path_env_len, "PATH")) {
- return "";
+ if (ExecutableExists(name)) {
+ return name;
}
- std::istringstream path{path_env};
- free(path_env);
- std::string dir;
- while (getline(path, dir, ';')) {
- auto test = dir + "\\" + name;
- if (ExecutableExists(test)) {
- return test;
- }
- if (ExecutableExists(test + ".exe")) {
- return test + ".exe";
- }
+ if (ExecutableExists(name + ".exe")) {
+ return name + ".exe";
}
- }
- return "";
+ if (name.find("/") == std::string::npos && name.find("\\") == std::string::npos) {
+ char* path_env = nullptr;
+ size_t path_env_len = 0;
+ if (_dupenv_s(&path_env, &path_env_len, "PATH")) {
+ return "";
+ }
+ std::istringstream path{path_env};
+ free(path_env);
+ std::string dir;
+ while (getline(path, dir, ';')) {
+ auto test = dir + "\\" + name;
+ if (ExecutableExists(test)) {
+ return test;
+ }
+ if (ExecutableExists(test + ".exe")) {
+ return test + ".exe";
+ }
+ }
+ }
+ return "";
}
} // namespace
@@ -138,110 +136,106 @@ std::string FindExecutable(const std::string& name) {
Command::Command(const std::string& path) : path_(path) {}
Command Command::LookPath(const std::string& executable) {
- return Command(FindExecutable(executable));
+ return Command(FindExecutable(executable));
}
bool Command::Found() const {
- return ExecutableExists(path_);
+ return ExecutableExists(path_);
}
-Command::Output Command::Exec(
- std::initializer_list<std::string> arguments) const {
- Pipe stdout_pipe(true);
- Pipe stderr_pipe(true);
- Pipe stdin_pipe(false);
- if (!stdin_pipe || !stdout_pipe || !stderr_pipe) {
- Output output;
- output.err = "Command::Exec(): Failed to create pipes";
- return output;
- }
-
- if (!input_.empty()) {
- if (!WriteFile(stdin_pipe.write, input_.data(), input_.size(), nullptr,
- nullptr)) {
- Output output;
- output.err = "Command::Exec() Failed to write stdin";
- return output;
+Command::Output Command::Exec(std::initializer_list<std::string> arguments) const {
+ Pipe stdout_pipe(true);
+ Pipe stderr_pipe(true);
+ Pipe stdin_pipe(false);
+ if (!stdin_pipe || !stdout_pipe || !stderr_pipe) {
+ Output output;
+ output.err = "Command::Exec(): Failed to create pipes";
+ return output;
+ }
+
+ if (!input_.empty()) {
+ if (!WriteFile(stdin_pipe.write, input_.data(), input_.size(), nullptr, nullptr)) {
+ Output output;
+ output.err = "Command::Exec() Failed to write stdin";
+ return output;
+ }
+ }
+ stdin_pipe.write.Close();
+
+ STARTUPINFOA si{};
+ si.cb = sizeof(si);
+ si.dwFlags |= STARTF_USESTDHANDLES;
+ si.hStdOutput = stdout_pipe.write;
+ si.hStdError = stderr_pipe.write;
+ si.hStdInput = stdin_pipe.read;
+
+ std::stringstream args;
+ args << path_;
+ for (auto& arg : arguments) {
+ args << " " << arg;
}
- }
- stdin_pipe.write.Close();
-
- STARTUPINFOA si{};
- si.cb = sizeof(si);
- si.dwFlags |= STARTF_USESTDHANDLES;
- si.hStdOutput = stdout_pipe.write;
- si.hStdError = stderr_pipe.write;
- si.hStdInput = stdin_pipe.read;
-
- std::stringstream args;
- args << path_;
- for (auto& arg : arguments) {
- args << " " << arg;
- }
-
- PROCESS_INFORMATION pi{};
- if (!CreateProcessA(nullptr, // No module name (use command line)
- const_cast<LPSTR>(args.str().c_str()), // Command line
- nullptr, // Process handle not inheritable
- nullptr, // Thread handle not inheritable
- TRUE, // Handles are inherited
- 0, // No creation flags
- nullptr, // Use parent's environment block
- nullptr, // Use parent's starting directory
- &si, // Pointer to STARTUPINFO structure
- &pi)) { // Pointer to PROCESS_INFORMATION structure
- Output out;
- out.err = "Command::Exec() CreateProcess() failed";
- return out;
- }
-
- stdin_pipe.read.Close();
- stdout_pipe.write.Close();
- stderr_pipe.write.Close();
-
- struct StreamReadThreadArgs {
- HANDLE stream;
- std::string output;
- };
-
- auto stream_read_thread = [](LPVOID user) -> DWORD {
- auto* thread_args = reinterpret_cast<StreamReadThreadArgs*>(user);
- DWORD n = 0;
- char buf[256];
- while (ReadFile(thread_args->stream, buf, sizeof(buf), &n, NULL)) {
- auto s = std::string(buf, buf + n);
- thread_args->output += std::string(buf, buf + n);
+
+ PROCESS_INFORMATION pi{};
+ if (!CreateProcessA(nullptr, // No module name (use command line)
+ const_cast<LPSTR>(args.str().c_str()), // Command line
+ nullptr, // Process handle not inheritable
+ nullptr, // Thread handle not inheritable
+ TRUE, // Handles are inherited
+ 0, // No creation flags
+ nullptr, // Use parent's environment block
+ nullptr, // Use parent's starting directory
+ &si, // Pointer to STARTUPINFO structure
+ &pi)) { // Pointer to PROCESS_INFORMATION structure
+ Output out;
+ out.err = "Command::Exec() CreateProcess() failed";
+ return out;
+ }
+
+ stdin_pipe.read.Close();
+ stdout_pipe.write.Close();
+ stderr_pipe.write.Close();
+
+ struct StreamReadThreadArgs {
+ HANDLE stream;
+ std::string output;
+ };
+
+ auto stream_read_thread = [](LPVOID user) -> DWORD {
+ auto* thread_args = reinterpret_cast<StreamReadThreadArgs*>(user);
+ DWORD n = 0;
+ char buf[256];
+ while (ReadFile(thread_args->stream, buf, sizeof(buf), &n, NULL)) {
+ auto s = std::string(buf, buf + n);
+ thread_args->output += std::string(buf, buf + n);
+ }
+ return 0;
+ };
+
+ StreamReadThreadArgs stdout_read_args{stdout_pipe.read, {}};
+ auto* stdout_read_thread =
+ ::CreateThread(nullptr, 0, stream_read_thread, &stdout_read_args, 0, nullptr);
+
+ StreamReadThreadArgs stderr_read_args{stderr_pipe.read, {}};
+ auto* stderr_read_thread =
+ ::CreateThread(nullptr, 0, stream_read_thread, &stderr_read_args, 0, nullptr);
+
+ HANDLE handles[] = {pi.hProcess, stdout_read_thread, stderr_read_thread};
+ constexpr DWORD num_handles = sizeof(handles) / sizeof(handles[0]);
+
+ Output output;
+
+ auto res = WaitForMultipleObjects(num_handles, handles, /* wait_all = */ TRUE, INFINITE);
+ if (res >= WAIT_OBJECT_0 && res < WAIT_OBJECT_0 + num_handles) {
+ output.out = stdout_read_args.output;
+ output.err = stderr_read_args.output;
+ DWORD exit_code = 0;
+ GetExitCodeProcess(pi.hProcess, &exit_code);
+ output.error_code = static_cast<int>(exit_code);
+ } else {
+ output.err = "Command::Exec() WaitForMultipleObjects() returned " + std::to_string(res);
}
- return 0;
- };
-
- StreamReadThreadArgs stdout_read_args{stdout_pipe.read, {}};
- auto* stdout_read_thread = ::CreateThread(nullptr, 0, stream_read_thread,
- &stdout_read_args, 0, nullptr);
-
- StreamReadThreadArgs stderr_read_args{stderr_pipe.read, {}};
- auto* stderr_read_thread = ::CreateThread(nullptr, 0, stream_read_thread,
- &stderr_read_args, 0, nullptr);
-
- HANDLE handles[] = {pi.hProcess, stdout_read_thread, stderr_read_thread};
- constexpr DWORD num_handles = sizeof(handles) / sizeof(handles[0]);
-
- Output output;
-
- auto res = WaitForMultipleObjects(num_handles, handles, /* wait_all = */ TRUE,
- INFINITE);
- if (res >= WAIT_OBJECT_0 && res < WAIT_OBJECT_0 + num_handles) {
- output.out = stdout_read_args.output;
- output.err = stderr_read_args.output;
- DWORD exit_code = 0;
- GetExitCodeProcess(pi.hProcess, &exit_code);
- output.error_code = static_cast<int>(exit_code);
- } else {
- output.err = "Command::Exec() WaitForMultipleObjects() returned " +
- std::to_string(res);
- }
-
- return output;
+
+ return output;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/tmpfile.h b/chromium/third_party/dawn/src/tint/utils/io/tmpfile.h
index 6f81a891e91..24e72086c2c 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/tmpfile.h
+++ b/chromium/third_party/dawn/src/tint/utils/io/tmpfile.h
@@ -23,50 +23,50 @@ namespace tint::utils {
/// TmpFile constructs a temporary file that can be written to, and is
/// automatically deleted on destruction.
class TmpFile {
- public:
- /// Constructor.
- /// Creates a new temporary file which can be written to.
- /// The temporary file will be automatically deleted on destruction.
- /// @param extension optional file extension to use with the file. The file
- /// have no extension by default.
- explicit TmpFile(std::string extension = "");
+ public:
+ /// Constructor.
+ /// Creates a new temporary file which can be written to.
+ /// The temporary file will be automatically deleted on destruction.
+ /// @param extension optional file extension to use with the file. The file
+ /// have no extension by default.
+ explicit TmpFile(std::string extension = "");
- /// Destructor.
- /// Deletes the temporary file.
- ~TmpFile();
+ /// Destructor.
+ /// Deletes the temporary file.
+ ~TmpFile();
- /// @return true if the temporary file was successfully created.
- operator bool() { return !path_.empty(); }
+ /// @return true if the temporary file was successfully created.
+ operator bool() { return !path_.empty(); }
- /// @return the path to the temporary file
- std::string Path() const { return path_; }
+ /// @return the path to the temporary file
+ std::string Path() const { return path_; }
- /// Opens the temporary file and appends |size| bytes from |data| to the end
- /// of the temporary file. The temporary file is closed again before
- /// returning, allowing other processes to open the file on operating systems
- /// that require exclusive ownership of opened files.
- /// @param data the data to write to the end of the file
- /// @param size the number of bytes to write from data
- /// @returns true on success, otherwise false
- bool Append(const void* data, size_t size) const;
+ /// Opens the temporary file and appends |size| bytes from |data| to the end
+ /// of the temporary file. The temporary file is closed again before
+ /// returning, allowing other processes to open the file on operating systems
+ /// that require exclusive ownership of opened files.
+ /// @param data the data to write to the end of the file
+ /// @param size the number of bytes to write from data
+ /// @returns true on success, otherwise false
+ bool Append(const void* data, size_t size) const;
- /// Appends the argument to the end of the file.
- /// @param data the data to write to the end of the file
- /// @return a reference to this TmpFile
- template <typename T>
- inline TmpFile& operator<<(T&& data) {
- std::stringstream ss;
- ss << data;
- std::string str = ss.str();
- Append(str.data(), str.size());
- return *this;
- }
+ /// Appends the argument to the end of the file.
+ /// @param data the data to write to the end of the file
+ /// @return a reference to this TmpFile
+ template <typename T>
+ inline TmpFile& operator<<(T&& data) {
+ std::stringstream ss;
+ ss << data;
+ std::string str = ss.str();
+ Append(str.data(), str.size());
+ return *this;
+ }
- private:
- TmpFile(const TmpFile&) = delete;
- TmpFile& operator=(const TmpFile&) = delete;
+ private:
+ TmpFile(const TmpFile&) = delete;
+ TmpFile& operator=(const TmpFile&) = delete;
- std::string path_;
+ std::string path_;
};
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_other.cc b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_other.cc
index 7ddbb58155a..14c8660dc94 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_other.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_other.cc
@@ -21,7 +21,7 @@ TmpFile::TmpFile(std::string) {}
TmpFile::~TmpFile() = default;
bool TmpFile::Append(const void*, size_t) const {
- return false;
+ return false;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_posix.cc b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_posix.cc
index 00c20fe8da4..ba84afd31c7 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_posix.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_posix.cc
@@ -24,45 +24,43 @@ namespace tint::utils {
namespace {
std::string TmpFilePath(std::string ext) {
- char const* dir = getenv("TMPDIR");
- if (dir == nullptr) {
- dir = "/tmp";
- }
+ char const* dir = getenv("TMPDIR");
+ if (dir == nullptr) {
+ dir = "/tmp";
+ }
- // mkstemps requires an `int` for the file extension name but STL represents
- // size_t. Pre-C++20 there the behavior for unsigned-to-signed conversion
- // (when the source value exceeds the representable range) is implementation
- // defined. While such a large file extension is unlikely in practice, we
- // enforce this here at runtime.
- TINT_ASSERT(Utils, ext.length() <=
- static_cast<size_t>(std::numeric_limits<int>::max()));
- std::string name = std::string(dir) + "/tint_XXXXXX" + ext;
- int file = mkstemps(&name[0], static_cast<int>(ext.length()));
- if (file != -1) {
- close(file);
- return name;
- }
- return "";
+ // mkstemps requires an `int` for the file extension name but STL represents
+ // size_t. Pre-C++20 there the behavior for unsigned-to-signed conversion
+ // (when the source value exceeds the representable range) is implementation
+ // defined. While such a large file extension is unlikely in practice, we
+ // enforce this here at runtime.
+ TINT_ASSERT(Utils, ext.length() <= static_cast<size_t>(std::numeric_limits<int>::max()));
+ std::string name = std::string(dir) + "/tint_XXXXXX" + ext;
+ int file = mkstemps(&name[0], static_cast<int>(ext.length()));
+ if (file != -1) {
+ close(file);
+ return name;
+ }
+ return "";
}
} // namespace
-TmpFile::TmpFile(std::string extension)
- : path_(TmpFilePath(std::move(extension))) {}
+TmpFile::TmpFile(std::string extension) : path_(TmpFilePath(std::move(extension))) {}
TmpFile::~TmpFile() {
- if (!path_.empty()) {
- remove(path_.c_str());
- }
+ if (!path_.empty()) {
+ remove(path_.c_str());
+ }
}
bool TmpFile::Append(const void* data, size_t size) const {
- if (auto* file = fopen(path_.c_str(), "ab")) {
- fwrite(data, size, 1, file);
- fclose(file);
- return true;
- }
- return false;
+ if (auto* file = fopen(path_.c_str(), "ab")) {
+ fwrite(data, size, 1, file);
+ fclose(file);
+ return true;
+ }
+ return false;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_test.cc b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_test.cc
index d312922f28e..4fe103dd773 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_test.cc
@@ -22,66 +22,66 @@ namespace tint::utils {
namespace {
TEST(TmpFileTest, WriteReadAppendDelete) {
- std::string path;
- {
- TmpFile tmp;
- if (!tmp) {
- GTEST_SKIP() << "Unable to create a temporary file";
- }
+ std::string path;
+ {
+ TmpFile tmp;
+ if (!tmp) {
+ GTEST_SKIP() << "Unable to create a temporary file";
+ }
- path = tmp.Path();
+ path = tmp.Path();
- // Write a string to the temporary file
- tmp << "hello world\n";
+ // Write a string to the temporary file
+ tmp << "hello world\n";
- // Check the content of the file
- {
- std::ifstream file(path);
- ASSERT_TRUE(file);
- std::string line;
- EXPECT_TRUE(std::getline(file, line));
- EXPECT_EQ(line, "hello world");
- EXPECT_FALSE(std::getline(file, line));
- }
+ // Check the content of the file
+ {
+ std::ifstream file(path);
+ ASSERT_TRUE(file);
+ std::string line;
+ EXPECT_TRUE(std::getline(file, line));
+ EXPECT_EQ(line, "hello world");
+ EXPECT_FALSE(std::getline(file, line));
+ }
- // Write some more content to the file
- tmp << 42;
+ // Write some more content to the file
+ tmp << 42;
- // Check the content of the file again
- {
- std::ifstream file(path);
- ASSERT_TRUE(file);
- std::string line;
- EXPECT_TRUE(std::getline(file, line));
- EXPECT_EQ(line, "hello world");
- EXPECT_TRUE(std::getline(file, line));
- EXPECT_EQ(line, "42");
- EXPECT_FALSE(std::getline(file, line));
+ // Check the content of the file again
+ {
+ std::ifstream file(path);
+ ASSERT_TRUE(file);
+ std::string line;
+ EXPECT_TRUE(std::getline(file, line));
+ EXPECT_EQ(line, "hello world");
+ EXPECT_TRUE(std::getline(file, line));
+ EXPECT_EQ(line, "42");
+ EXPECT_FALSE(std::getline(file, line));
+ }
}
- }
- // Check the file has been deleted when it fell out of scope
- std::ifstream file(path);
- ASSERT_FALSE(file);
+ // Check the file has been deleted when it fell out of scope
+ std::ifstream file(path);
+ ASSERT_FALSE(file);
}
TEST(TmpFileTest, FileExtension) {
- const std::string kExt = ".foo";
- std::string path;
- {
- TmpFile tmp(kExt);
- if (!tmp) {
- GTEST_SKIP() << "Unable create a temporary file";
+ const std::string kExt = ".foo";
+ std::string path;
+ {
+ TmpFile tmp(kExt);
+ if (!tmp) {
+ GTEST_SKIP() << "Unable create a temporary file";
+ }
+ path = tmp.Path();
}
- path = tmp.Path();
- }
- ASSERT_GT(path.length(), kExt.length());
- EXPECT_EQ(kExt, path.substr(path.length() - kExt.length()));
+ ASSERT_GT(path.length(), kExt.length());
+ EXPECT_EQ(kExt, path.substr(path.length() - kExt.length()));
- // Check the file has been deleted when it fell out of scope
- std::ifstream file(path);
- ASSERT_FALSE(file);
+ // Check the file has been deleted when it fell out of scope
+ std::ifstream file(path);
+ ASSERT_FALSE(file);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_windows.cc b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_windows.cc
index 40dffc826da..3c8e5f7dd7a 100644
--- a/chromium/third_party/dawn/src/tint/utils/io/tmpfile_windows.cc
+++ b/chromium/third_party/dawn/src/tint/utils/io/tmpfile_windows.cc
@@ -22,20 +22,20 @@ namespace tint::utils {
namespace {
std::string TmpFilePath(const std::string& ext) {
- char name[L_tmpnam];
- // As we're adding an extension, to ensure the file is really unique, try
- // creating it, failing if it already exists.
- while (tmpnam_s(name, L_tmpnam - 1) == 0) {
- std::string name_with_ext = std::string(name) + ext;
- FILE* f = nullptr;
- // The "x" arg forces the function to fail if the file already exists.
- fopen_s(&f, name_with_ext.c_str(), "wbx");
- if (f) {
- fclose(f);
- return name_with_ext;
+ char name[L_tmpnam];
+ // As we're adding an extension, to ensure the file is really unique, try
+ // creating it, failing if it already exists.
+ while (tmpnam_s(name, L_tmpnam - 1) == 0) {
+ std::string name_with_ext = std::string(name) + ext;
+ FILE* f = nullptr;
+ // The "x" arg forces the function to fail if the file already exists.
+ fopen_s(&f, name_with_ext.c_str(), "wbx");
+ if (f) {
+ fclose(f);
+ return name_with_ext;
+ }
}
- }
- return {};
+ return {};
}
} // namespace
@@ -43,19 +43,19 @@ std::string TmpFilePath(const std::string& ext) {
TmpFile::TmpFile(std::string ext) : path_(TmpFilePath(ext)) {}
TmpFile::~TmpFile() {
- if (!path_.empty()) {
- remove(path_.c_str());
- }
+ if (!path_.empty()) {
+ remove(path_.c_str());
+ }
}
bool TmpFile::Append(const void* data, size_t size) const {
- FILE* file = nullptr;
- if (fopen_s(&file, path_.c_str(), "ab") != 0) {
- return false;
- }
- fwrite(data, size, 1, file);
- fclose(file);
- return true;
+ FILE* file = nullptr;
+ if (fopen_s(&file, path_.c_str(), "ab") != 0) {
+ return false;
+ }
+ fwrite(data, size, 1, file);
+ fclose(file);
+ return true;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/map.h b/chromium/third_party/dawn/src/tint/utils/map.h
index 12c93d4971a..0a137305d41 100644
--- a/chromium/third_party/dawn/src/tint/utils/map.h
+++ b/chromium/third_party/dawn/src/tint/utils/map.h
@@ -28,11 +28,9 @@ namespace tint::utils {
/// @return the map item value, or `if_missing` if the map does not contain the
/// given key
template <typename K, typename V, typename H, typename C, typename KV = K>
-V Lookup(const std::unordered_map<K, V, H, C>& map,
- const KV& key,
- const V& if_missing = {}) {
- auto it = map.find(key);
- return it != map.end() ? it->second : if_missing;
+V Lookup(const std::unordered_map<K, V, H, C>& map, const KV& key, const V& if_missing = {}) {
+ auto it = map.find(key);
+ return it != map.end() ? it->second : if_missing;
}
/// GetOrCreate is a utility function for lazily adding to an unordered map.
@@ -43,16 +41,14 @@ V Lookup(const std::unordered_map<K, V, H, C>& map,
/// @param create a callable function-like object with the signature `V()`
/// @return the value of the item with the given key, or the newly created item
template <typename K, typename V, typename H, typename C, typename CREATE>
-V GetOrCreate(std::unordered_map<K, V, H, C>& map,
- const K& key,
- CREATE&& create) {
- auto it = map.find(key);
- if (it != map.end()) {
- return it->second;
- }
- V value = create();
- map.emplace(key, value);
- return value;
+V GetOrCreate(std::unordered_map<K, V, H, C>& map, const K& key, CREATE&& create) {
+ auto it = map.find(key);
+ if (it != map.end()) {
+ return it->second;
+ }
+ V value = create();
+ map.emplace(key, value);
+ return value;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/map_test.cc b/chromium/third_party/dawn/src/tint/utils/map_test.cc
index ae35aeb36ee..f0d939256bd 100644
--- a/chromium/third_party/dawn/src/tint/utils/map_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/map_test.cc
@@ -22,34 +22,34 @@ namespace tint::utils {
namespace {
TEST(Lookup, Test) {
- std::unordered_map<int, int> map;
- map.emplace(10, 1);
- EXPECT_EQ(Lookup(map, 10, 0), 1); // exists, with if_missing
- EXPECT_EQ(Lookup(map, 10), 1); // exists, without if_missing
- EXPECT_EQ(Lookup(map, 20, 50), 50); // missing, with if_missing
- EXPECT_EQ(Lookup(map, 20), 0); // missing, without if_missing
+ std::unordered_map<int, int> map;
+ map.emplace(10, 1);
+ EXPECT_EQ(Lookup(map, 10, 0), 1); // exists, with if_missing
+ EXPECT_EQ(Lookup(map, 10), 1); // exists, without if_missing
+ EXPECT_EQ(Lookup(map, 20, 50), 50); // missing, with if_missing
+ EXPECT_EQ(Lookup(map, 20), 0); // missing, without if_missing
}
TEST(GetOrCreateTest, NewKey) {
- std::unordered_map<int, int> map;
- EXPECT_EQ(GetOrCreate(map, 1, [&] { return 2; }), 2);
- EXPECT_EQ(map.size(), 1u);
- EXPECT_EQ(map[1], 2);
+ std::unordered_map<int, int> map;
+ EXPECT_EQ(GetOrCreate(map, 1, [&] { return 2; }), 2);
+ EXPECT_EQ(map.size(), 1u);
+ EXPECT_EQ(map[1], 2);
}
TEST(GetOrCreateTest, ExistingKey) {
- std::unordered_map<int, int> map;
- map[1] = 2;
- bool called = false;
- EXPECT_EQ(GetOrCreate(map, 1,
- [&] {
- called = true;
- return -2;
- }),
- 2);
- EXPECT_EQ(called, false);
- EXPECT_EQ(map.size(), 1u);
- EXPECT_EQ(map[1], 2);
+ std::unordered_map<int, int> map;
+ map[1] = 2;
+ bool called = false;
+ EXPECT_EQ(GetOrCreate(map, 1,
+ [&] {
+ called = true;
+ return -2;
+ }),
+ 2);
+ EXPECT_EQ(called, false);
+ EXPECT_EQ(map.size(), 1u);
+ EXPECT_EQ(map[1], 2);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/math.h b/chromium/third_party/dawn/src/tint/utils/math.h
index c6d7d461f04..3d8874ab20d 100644
--- a/chromium/third_party/dawn/src/tint/utils/math.h
+++ b/chromium/third_party/dawn/src/tint/utils/math.h
@@ -27,7 +27,7 @@ namespace tint::utils {
/// @note `alignment` must be positive. An alignment of zero will cause a DBZ.
template <typename T>
inline T RoundUp(T alignment, T value) {
- return ((value + alignment - 1) / alignment) * alignment;
+ return ((value + alignment - 1) / alignment) * alignment;
}
/// @param value the value to check whether it is a power-of-two
@@ -35,19 +35,19 @@ inline T RoundUp(T alignment, T value) {
/// @note `value` must be positive if `T` is signed
template <typename T>
inline bool IsPowerOfTwo(T value) {
- return (value & (value - 1)) == 0;
+ return (value & (value - 1)) == 0;
}
/// @param value the input value
/// @returns the largest power of two that `value` is a multiple of
template <typename T>
inline std::enable_if_t<std::is_unsigned<T>::value, T> MaxAlignOf(T value) {
- T pot = 1;
- while (value && ((value & 1u) == 0)) {
- pot <<= 1;
- value >>= 1;
- }
- return pot;
+ T pot = 1;
+ while (value && ((value & 1u) == 0)) {
+ pot <<= 1;
+ value >>= 1;
+ }
+ return pot;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/math_test.cc b/chromium/third_party/dawn/src/tint/utils/math_test.cc
index d6be3f67879..515c71823ec 100644
--- a/chromium/third_party/dawn/src/tint/utils/math_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/math_test.cc
@@ -20,61 +20,61 @@ namespace tint::utils {
namespace {
TEST(MathTests, RoundUp) {
- EXPECT_EQ(RoundUp(1, 0), 0);
- EXPECT_EQ(RoundUp(1, 1), 1);
- EXPECT_EQ(RoundUp(1, 2), 2);
+ EXPECT_EQ(RoundUp(1, 0), 0);
+ EXPECT_EQ(RoundUp(1, 1), 1);
+ EXPECT_EQ(RoundUp(1, 2), 2);
- EXPECT_EQ(RoundUp(1, 1), 1);
- EXPECT_EQ(RoundUp(2, 1), 2);
- EXPECT_EQ(RoundUp(3, 1), 3);
- EXPECT_EQ(RoundUp(4, 1), 4);
+ EXPECT_EQ(RoundUp(1, 1), 1);
+ EXPECT_EQ(RoundUp(2, 1), 2);
+ EXPECT_EQ(RoundUp(3, 1), 3);
+ EXPECT_EQ(RoundUp(4, 1), 4);
- EXPECT_EQ(RoundUp(1, 2), 2);
- EXPECT_EQ(RoundUp(2, 2), 2);
- EXPECT_EQ(RoundUp(3, 2), 3);
- EXPECT_EQ(RoundUp(4, 2), 4);
+ EXPECT_EQ(RoundUp(1, 2), 2);
+ EXPECT_EQ(RoundUp(2, 2), 2);
+ EXPECT_EQ(RoundUp(3, 2), 3);
+ EXPECT_EQ(RoundUp(4, 2), 4);
- EXPECT_EQ(RoundUp(1, 3), 3);
- EXPECT_EQ(RoundUp(2, 3), 4);
- EXPECT_EQ(RoundUp(3, 3), 3);
- EXPECT_EQ(RoundUp(4, 3), 4);
+ EXPECT_EQ(RoundUp(1, 3), 3);
+ EXPECT_EQ(RoundUp(2, 3), 4);
+ EXPECT_EQ(RoundUp(3, 3), 3);
+ EXPECT_EQ(RoundUp(4, 3), 4);
- EXPECT_EQ(RoundUp(1, 4), 4);
- EXPECT_EQ(RoundUp(2, 4), 4);
- EXPECT_EQ(RoundUp(3, 4), 6);
- EXPECT_EQ(RoundUp(4, 4), 4);
+ EXPECT_EQ(RoundUp(1, 4), 4);
+ EXPECT_EQ(RoundUp(2, 4), 4);
+ EXPECT_EQ(RoundUp(3, 4), 6);
+ EXPECT_EQ(RoundUp(4, 4), 4);
}
TEST(MathTests, IsPowerOfTwo) {
- EXPECT_EQ(IsPowerOfTwo(1), true);
- EXPECT_EQ(IsPowerOfTwo(2), true);
- EXPECT_EQ(IsPowerOfTwo(3), false);
- EXPECT_EQ(IsPowerOfTwo(4), true);
- EXPECT_EQ(IsPowerOfTwo(5), false);
- EXPECT_EQ(IsPowerOfTwo(6), false);
- EXPECT_EQ(IsPowerOfTwo(7), false);
- EXPECT_EQ(IsPowerOfTwo(8), true);
- EXPECT_EQ(IsPowerOfTwo(9), false);
+ EXPECT_EQ(IsPowerOfTwo(1), true);
+ EXPECT_EQ(IsPowerOfTwo(2), true);
+ EXPECT_EQ(IsPowerOfTwo(3), false);
+ EXPECT_EQ(IsPowerOfTwo(4), true);
+ EXPECT_EQ(IsPowerOfTwo(5), false);
+ EXPECT_EQ(IsPowerOfTwo(6), false);
+ EXPECT_EQ(IsPowerOfTwo(7), false);
+ EXPECT_EQ(IsPowerOfTwo(8), true);
+ EXPECT_EQ(IsPowerOfTwo(9), false);
}
TEST(MathTests, MaxAlignOf) {
- EXPECT_EQ(MaxAlignOf(0u), 1u);
- EXPECT_EQ(MaxAlignOf(1u), 1u);
- EXPECT_EQ(MaxAlignOf(2u), 2u);
- EXPECT_EQ(MaxAlignOf(3u), 1u);
- EXPECT_EQ(MaxAlignOf(4u), 4u);
- EXPECT_EQ(MaxAlignOf(5u), 1u);
- EXPECT_EQ(MaxAlignOf(6u), 2u);
- EXPECT_EQ(MaxAlignOf(7u), 1u);
- EXPECT_EQ(MaxAlignOf(8u), 8u);
- EXPECT_EQ(MaxAlignOf(9u), 1u);
- EXPECT_EQ(MaxAlignOf(10u), 2u);
- EXPECT_EQ(MaxAlignOf(11u), 1u);
- EXPECT_EQ(MaxAlignOf(12u), 4u);
- EXPECT_EQ(MaxAlignOf(13u), 1u);
- EXPECT_EQ(MaxAlignOf(14u), 2u);
- EXPECT_EQ(MaxAlignOf(15u), 1u);
- EXPECT_EQ(MaxAlignOf(16u), 16u);
+ EXPECT_EQ(MaxAlignOf(0u), 1u);
+ EXPECT_EQ(MaxAlignOf(1u), 1u);
+ EXPECT_EQ(MaxAlignOf(2u), 2u);
+ EXPECT_EQ(MaxAlignOf(3u), 1u);
+ EXPECT_EQ(MaxAlignOf(4u), 4u);
+ EXPECT_EQ(MaxAlignOf(5u), 1u);
+ EXPECT_EQ(MaxAlignOf(6u), 2u);
+ EXPECT_EQ(MaxAlignOf(7u), 1u);
+ EXPECT_EQ(MaxAlignOf(8u), 8u);
+ EXPECT_EQ(MaxAlignOf(9u), 1u);
+ EXPECT_EQ(MaxAlignOf(10u), 2u);
+ EXPECT_EQ(MaxAlignOf(11u), 1u);
+ EXPECT_EQ(MaxAlignOf(12u), 4u);
+ EXPECT_EQ(MaxAlignOf(13u), 1u);
+ EXPECT_EQ(MaxAlignOf(14u), 2u);
+ EXPECT_EQ(MaxAlignOf(15u), 1u);
+ EXPECT_EQ(MaxAlignOf(16u), 16u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/result.h b/chromium/third_party/dawn/src/tint/utils/result.h
new file mode 100644
index 00000000000..b2a69d56fc4
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/utils/result.h
@@ -0,0 +1,103 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_UTILS_RESULT_H_
+#define SRC_TINT_UTILS_RESULT_H_
+
+#include <ostream>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <variant> // NOLINT(build/include_order)
+
+namespace tint::utils {
+
+/// Empty structure used as the default FAILURE_TYPE for a Result.
+struct FailureType {};
+
+static constexpr const FailureType Failure;
+
+/// Result is a helper for functions that need to return a value, or an failure value.
+/// Result can be constructed with either a 'success' or 'failure' value.
+/// @tparam SUCCESS_TYPE the 'success' value type.
+/// @tparam FAILURE_TYPE the 'failure' value type. Defaults to FailureType which provides no
+/// information about the failure, except that something failed. Must not be the same type
+/// as SUCCESS_TYPE.
+template <typename SUCCESS_TYPE, typename FAILURE_TYPE = FailureType>
+struct Result {
+ static_assert(!std::is_same_v<SUCCESS_TYPE, FAILURE_TYPE>,
+ "Result must not have the same type for SUCCESS_TYPE and FAILURE_TYPE");
+
+ /// Constructor
+ /// @param success the success result
+ Result(const SUCCESS_TYPE& success) // NOLINT(runtime/explicit):
+ : value{success} {}
+
+ /// Constructor
+ /// @param failure the failure result
+ Result(const FAILURE_TYPE& failure) // NOLINT(runtime/explicit):
+ : value{failure} {}
+
+ /// @returns true if the result was a success
+ operator bool() const { return std::holds_alternative<SUCCESS_TYPE>(value); }
+
+ /// @returns true if the result was a failure
+ bool operator!() const { return std::holds_alternative<FAILURE_TYPE>(value); }
+
+ /// @returns the success value
+ /// @warning attempting to call this when the Result holds an failure will result in UB.
+ const SUCCESS_TYPE* operator->() const { return &std::get<SUCCESS_TYPE>(value); }
+
+ /// @returns the success value
+ /// @warning attempting to call this when the Result holds an failure value will result in UB.
+ const SUCCESS_TYPE& Get() const { return std::get<SUCCESS_TYPE>(value); }
+
+ /// @returns the failure value
+ /// @warning attempting to call this when the Result holds a success value will result in UB.
+ const FAILURE_TYPE& Failure() const { return std::get<FAILURE_TYPE>(value); }
+
+ /// Equality operator
+ /// @param val the value to compare this Result to
+ /// @returns true if this result holds a success value equal to `value`
+ bool operator==(SUCCESS_TYPE val) const {
+ if (auto* v = std::get_if<SUCCESS_TYPE>(&value)) {
+ return *v == val;
+ }
+ return false;
+ }
+
+ /// Equality operator
+ /// @param val the value to compare this Result to
+ /// @returns true if this result holds a failure value equal to `value`
+ bool operator==(FAILURE_TYPE val) const {
+ if (auto* v = std::get_if<FAILURE_TYPE>(&value)) {
+ return *v == val;
+ }
+ return false;
+ }
+
+ /// The result. Either a success of failure value.
+ std::variant<SUCCESS_TYPE, FAILURE_TYPE> value;
+};
+
+/// Writes the result to the ostream.
+/// @param out the std::ostream to write to
+/// @param res the result
+/// @return the std::ostream so calls can be chained
+template <typename SUCCESS, typename FAILURE>
+inline std::ostream& operator<<(std::ostream& out, Result<SUCCESS, FAILURE> res) {
+ return res ? (out << "success: " << res.Get()) : (out << "failure: " << res.Failure());
+}
+
+} // namespace tint::utils
+
+#endif // SRC_TINT_UTILS_RESULT_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/result_test.cc b/chromium/third_party/dawn/src/tint/utils/result_test.cc
new file mode 100644
index 00000000000..ce125f452bb
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/utils/result_test.cc
@@ -0,0 +1,55 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/utils/result.h"
+
+#include <string>
+
+#include "gmock/gmock.h"
+
+namespace tint::utils {
+namespace {
+
+TEST(ResultTest, SuccessInt) {
+ auto r = Result<int>(123);
+ EXPECT_TRUE(r);
+ EXPECT_FALSE(!r);
+ EXPECT_EQ(r.Get(), 123);
+}
+
+TEST(ResultTest, SuccessStruct) {
+ struct S {
+ int value;
+ };
+ auto r = Result<S>({123});
+ EXPECT_TRUE(r);
+ EXPECT_FALSE(!r);
+ EXPECT_EQ(r->value, 123);
+}
+
+TEST(ResultTest, Failure) {
+ auto r = Result<int>(Failure);
+ EXPECT_FALSE(r);
+ EXPECT_TRUE(!r);
+}
+
+TEST(ResultTest, CustomFailure) {
+ auto r = Result<int, std::string>("oh noes!");
+ EXPECT_FALSE(r);
+ EXPECT_TRUE(!r);
+ EXPECT_EQ(r.Failure(), "oh noes!");
+}
+
+} // namespace
+} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/reverse.h b/chromium/third_party/dawn/src/tint/utils/reverse.h
index fb4f237cd06..f28eedde9fb 100644
--- a/chromium/third_party/dawn/src/tint/utils/reverse.h
+++ b/chromium/third_party/dawn/src/tint/utils/reverse.h
@@ -26,18 +26,18 @@ namespace detail {
/// See https://en.cppreference.com/w/cpp/language/range-for
template <typename T>
struct ReverseIterable {
- /// The wrapped iterable object.
- T& iterable;
+ /// The wrapped iterable object.
+ T& iterable;
};
template <typename T>
auto begin(ReverseIterable<T> r_it) {
- return std::rbegin(r_it.iterable);
+ return std::rbegin(r_it.iterable);
}
template <typename T>
auto end(ReverseIterable<T> r_it) {
- return std::rend(r_it.iterable);
+ return std::rend(r_it.iterable);
}
} // namespace detail
@@ -54,7 +54,7 @@ auto end(ReverseIterable<T> r_it) {
/// ```
template <typename T>
detail::ReverseIterable<T> Reverse(T&& iterable) {
- return {iterable};
+ return {iterable};
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/reverse_test.cc b/chromium/third_party/dawn/src/tint/utils/reverse_test.cc
index b23c799508d..9bef6dee674 100644
--- a/chromium/third_party/dawn/src/tint/utils/reverse_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/reverse_test.cc
@@ -22,12 +22,12 @@ namespace tint::utils {
namespace {
TEST(ReverseTest, Vector) {
- std::vector<int> vec{1, 3, 5, 7, 9};
- std::vector<int> rev;
- for (auto v : Reverse(vec)) {
- rev.emplace_back(v);
- }
- ASSERT_THAT(rev, testing::ElementsAre(9, 7, 5, 3, 1));
+ std::vector<int> vec{1, 3, 5, 7, 9};
+ std::vector<int> rev;
+ for (auto v : Reverse(vec)) {
+ rev.emplace_back(v);
+ }
+ ASSERT_THAT(rev, testing::ElementsAre(9, 7, 5, 3, 1));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/scoped_assignment.h b/chromium/third_party/dawn/src/tint/utils/scoped_assignment.h
index fdd787f1e5c..614945424e1 100644
--- a/chromium/third_party/dawn/src/tint/utils/scoped_assignment.h
+++ b/chromium/third_party/dawn/src/tint/utils/scoped_assignment.h
@@ -27,36 +27,36 @@ namespace tint::utils {
/// original value is restored.
template <typename T>
class ScopedAssignment {
- public:
- /// Constructor
- /// @param var the variable to temporarily assign a new value to
- /// @param val the value to assign to `ref` for the lifetime of this
- /// ScopedAssignment.
- ScopedAssignment(T& var, T val) : ref_(var) {
- old_value_ = var;
- var = val;
- }
-
- /// Destructor
- /// Restores the original value of the variable.
- ~ScopedAssignment() { ref_ = old_value_; }
-
- private:
- ScopedAssignment(const ScopedAssignment&) = delete;
- ScopedAssignment& operator=(const ScopedAssignment&) = delete;
-
- T& ref_;
- T old_value_;
+ public:
+ /// Constructor
+ /// @param var the variable to temporarily assign a new value to
+ /// @param val the value to assign to `ref` for the lifetime of this
+ /// ScopedAssignment.
+ ScopedAssignment(T& var, T val) : ref_(var) {
+ old_value_ = var;
+ var = val;
+ }
+
+ /// Destructor
+ /// Restores the original value of the variable.
+ ~ScopedAssignment() { ref_ = old_value_; }
+
+ private:
+ ScopedAssignment(const ScopedAssignment&) = delete;
+ ScopedAssignment& operator=(const ScopedAssignment&) = delete;
+
+ T& ref_;
+ T old_value_;
};
} // namespace tint::utils
/// TINT_SCOPED_ASSIGNMENT(var, val) assigns `val` to `var`, and automatically
/// restores the original value of `var` when exiting the current lexical scope.
-#define TINT_SCOPED_ASSIGNMENT(var, val) \
- ::tint::utils::ScopedAssignment<std::remove_reference_t<decltype(var)>> \
- TINT_CONCAT(tint_scoped_assignment_, __COUNTER__) { \
- var, val \
- }
+#define TINT_SCOPED_ASSIGNMENT(var, val) \
+ ::tint::utils::ScopedAssignment<std::remove_reference_t<decltype(var)>> TINT_CONCAT( \
+ tint_scoped_assignment_, __COUNTER__) { \
+ var, val \
+ }
#endif // SRC_TINT_UTILS_SCOPED_ASSIGNMENT_H_
diff --git a/chromium/third_party/dawn/src/tint/utils/scoped_assignment_test.cc b/chromium/third_party/dawn/src/tint/utils/scoped_assignment_test.cc
index 3055afe5d7b..3c0c548ca19 100644
--- a/chromium/third_party/dawn/src/tint/utils/scoped_assignment_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/scoped_assignment_test.cc
@@ -20,25 +20,25 @@ namespace tint::utils {
namespace {
TEST(ScopedAssignmentTest, Scopes) {
- int i = 0;
- EXPECT_EQ(i, 0);
- {
+ int i = 0;
EXPECT_EQ(i, 0);
- TINT_SCOPED_ASSIGNMENT(i, 1);
- EXPECT_EQ(i, 1);
{
- EXPECT_EQ(i, 1);
- TINT_SCOPED_ASSIGNMENT(i, 2);
- EXPECT_EQ(i, 2);
+ EXPECT_EQ(i, 0);
+ TINT_SCOPED_ASSIGNMENT(i, 1);
+ EXPECT_EQ(i, 1);
+ {
+ EXPECT_EQ(i, 1);
+ TINT_SCOPED_ASSIGNMENT(i, 2);
+ EXPECT_EQ(i, 2);
+ }
+ {
+ EXPECT_EQ(i, 1);
+ TINT_SCOPED_ASSIGNMENT(i, 3);
+ EXPECT_EQ(i, 3);
+ }
+ EXPECT_EQ(i, 1);
}
- {
- EXPECT_EQ(i, 1);
- TINT_SCOPED_ASSIGNMENT(i, 3);
- EXPECT_EQ(i, 3);
- }
- EXPECT_EQ(i, 1);
- }
- EXPECT_EQ(i, 0);
+ EXPECT_EQ(i, 0);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/string.h b/chromium/third_party/dawn/src/tint/utils/string.h
index 011e326c0cf..a11e44e4210 100644
--- a/chromium/third_party/dawn/src/tint/utils/string.h
+++ b/chromium/third_party/dawn/src/tint/utils/string.h
@@ -26,12 +26,12 @@ namespace tint::utils {
inline std::string ReplaceAll(std::string str,
const std::string& substr,
const std::string& replacement) {
- size_t pos = 0;
- while ((pos = str.find(substr, pos)) != std::string::npos) {
- str.replace(pos, substr.length(), replacement);
- pos += replacement.length();
- }
- return str;
+ size_t pos = 0;
+ while ((pos = str.find(substr, pos)) != std::string::npos) {
+ str.replace(pos, substr.length(), replacement);
+ pos += replacement.length();
+ }
+ return str;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/string_test.cc b/chromium/third_party/dawn/src/tint/utils/string_test.cc
index f394ed73ee2..0d3e14fc401 100644
--- a/chromium/third_party/dawn/src/tint/utils/string_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/string_test.cc
@@ -20,16 +20,16 @@ namespace tint::utils {
namespace {
TEST(StringTest, ReplaceAll) {
- ASSERT_EQ("xybbcc", ReplaceAll("aabbcc", "aa", "xy"));
- ASSERT_EQ("aaxycc", ReplaceAll("aabbcc", "bb", "xy"));
- ASSERT_EQ("aabbxy", ReplaceAll("aabbcc", "cc", "xy"));
- ASSERT_EQ("xyxybbcc", ReplaceAll("aabbcc", "a", "xy"));
- ASSERT_EQ("aaxyxycc", ReplaceAll("aabbcc", "b", "xy"));
- ASSERT_EQ("aabbxyxy", ReplaceAll("aabbcc", "c", "xy"));
- // Replacement string includes the searched-for string.
- // This proves that the algorithm needs to advance 'pos'
- // past the replacement.
- ASSERT_EQ("aabxybbxybcc", ReplaceAll("aabbcc", "b", "bxyb"));
+ ASSERT_EQ("xybbcc", ReplaceAll("aabbcc", "aa", "xy"));
+ ASSERT_EQ("aaxycc", ReplaceAll("aabbcc", "bb", "xy"));
+ ASSERT_EQ("aabbxy", ReplaceAll("aabbcc", "cc", "xy"));
+ ASSERT_EQ("xyxybbcc", ReplaceAll("aabbcc", "a", "xy"));
+ ASSERT_EQ("aaxyxycc", ReplaceAll("aabbcc", "b", "xy"));
+ ASSERT_EQ("aabbxyxy", ReplaceAll("aabbcc", "c", "xy"));
+ // Replacement string includes the searched-for string.
+ // This proves that the algorithm needs to advance 'pos'
+ // past the replacement.
+ ASSERT_EQ("aabxybbxybcc", ReplaceAll("aabbcc", "b", "bxyb"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/to_const_ptr_vec.h b/chromium/third_party/dawn/src/tint/utils/to_const_ptr_vec.h
index a46b3badefa..02cc984aae8 100644
--- a/chromium/third_party/dawn/src/tint/utils/to_const_ptr_vec.h
+++ b/chromium/third_party/dawn/src/tint/utils/to_const_ptr_vec.h
@@ -24,12 +24,12 @@ namespace tint::utils {
/// @returns a vector of `const T*` with the content of `in`.
template <typename T>
std::vector<const T*> ToConstPtrVec(const std::vector<T*>& in) {
- std::vector<const T*> out;
- out.reserve(in.size());
- for (auto* ptr : in) {
- out.emplace_back(ptr);
- }
- return out;
+ std::vector<const T*> out;
+ out.reserve(in.size());
+ for (auto* ptr : in) {
+ out.emplace_back(ptr);
+ }
+ return out;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/transform.h b/chromium/third_party/dawn/src/tint/utils/transform.h
index 29a9740893d..ff925303ca9 100644
--- a/chromium/third_party/dawn/src/tint/utils/transform.h
+++ b/chromium/third_party/dawn/src/tint/utils/transform.h
@@ -27,32 +27,63 @@ namespace tint::utils {
/// Transform performs an element-wise transformation of a vector.
/// @param in the input vector.
/// @param transform the transformation function with signature: `OUT(IN)`
-/// @returns a new vector with each element of the source vector transformed by
-/// `transform`.
+/// @returns a new vector with each element of the source vector transformed by `transform`.
template <typename IN, typename TRANSFORMER>
auto Transform(const std::vector<IN>& in, TRANSFORMER&& transform)
-> std::vector<decltype(transform(in[0]))> {
- std::vector<decltype(transform(in[0]))> result(in.size());
- for (size_t i = 0; i < result.size(); ++i) {
- result[i] = transform(in[i]);
- }
- return result;
+ std::vector<decltype(transform(in[0]))> result(in.size());
+ for (size_t i = 0; i < result.size(); ++i) {
+ result[i] = transform(in[i]);
+ }
+ return result;
}
/// Transform performs an element-wise transformation of a vector.
/// @param in the input vector.
-/// @param transform the transformation function with signature:
-/// `OUT(IN, size_t)`
-/// @returns a new vector with each element of the source vector transformed by
-/// `transform`.
+/// @param transform the transformation function with signature: `OUT(IN, size_t)`
+/// @returns a new vector with each element of the source vector transformed by `transform`.
template <typename IN, typename TRANSFORMER>
auto Transform(const std::vector<IN>& in, TRANSFORMER&& transform)
-> std::vector<decltype(transform(in[0], 1u))> {
- std::vector<decltype(transform(in[0], 1u))> result(in.size());
- for (size_t i = 0; i < result.size(); ++i) {
- result[i] = transform(in[i], i);
- }
- return result;
+ std::vector<decltype(transform(in[0], 1u))> result(in.size());
+ for (size_t i = 0; i < result.size(); ++i) {
+ result[i] = transform(in[i], i);
+ }
+ return result;
+}
+
+/// TransformN performs an element-wise transformation of a vector, transforming and returning at
+/// most `n` elements.
+/// @param in the input vector.
+/// @param n the maximum number of elements to transform.
+/// @param transform the transformation function with signature: `OUT(IN)`
+/// @returns a new vector with at most n-elements of the source vector transformed by `transform`.
+template <typename IN, typename TRANSFORMER>
+auto TransformN(const std::vector<IN>& in, size_t n, TRANSFORMER&& transform)
+ -> std::vector<decltype(transform(in[0]))> {
+ const auto count = std::min(n, in.size());
+ std::vector<decltype(transform(in[0]))> result(count);
+ for (size_t i = 0; i < count; ++i) {
+ result[i] = transform(in[i]);
+ }
+ return result;
+}
+
+/// TransformN performs an element-wise transformation of a vector, transforming and returning at
+/// most `n` elements.
+/// @param in the input vector.
+/// @param n the maximum number of elements to transform.
+/// @param transform the transformation function with signature: `OUT(IN, size_t)`
+/// @returns a new vector with at most n-elements of the source vector transformed by `transform`.
+template <typename IN, typename TRANSFORMER>
+auto TransformN(const std::vector<IN>& in, size_t n, TRANSFORMER&& transform)
+ -> std::vector<decltype(transform(in[0], 1u))> {
+ const auto count = std::min(n, in.size());
+ std::vector<decltype(transform(in[0], 1u))> result(count);
+ for (size_t i = 0; i < count; ++i) {
+ result[i] = transform(in[i], i);
+ }
+ return result;
}
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/transform_test.cc b/chromium/third_party/dawn/src/tint/utils/transform_test.cc
index e6688242b05..89c07568c9e 100644
--- a/chromium/third_party/dawn/src/tint/utils/transform_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/transform_test.cc
@@ -19,73 +19,202 @@
#include "gmock/gmock.h"
-#define CHECK_ELEMENT_TYPE(vector, expected) \
- static_assert(std::is_same<decltype(vector)::value_type, expected>::value, \
- "unexpected result vector element type")
+#define CHECK_ELEMENT_TYPE(vector, expected) \
+ static_assert(std::is_same<decltype(vector)::value_type, expected>::value, \
+ "unexpected result vector element type")
namespace tint::utils {
namespace {
TEST(TransformTest, Empty) {
- const std::vector<int> empty{};
- {
- auto transformed = Transform(empty, [](int) -> int {
- [] { FAIL() << "Transform should not be called for empty vector"; }();
- return 0;
- });
- CHECK_ELEMENT_TYPE(transformed, int);
- EXPECT_EQ(transformed.size(), 0u);
- }
- {
- auto transformed = Transform(empty, [](int, size_t) -> int {
- [] { FAIL() << "Transform should not be called for empty vector"; }();
- return 0;
- });
- CHECK_ELEMENT_TYPE(transformed, int);
- EXPECT_EQ(transformed.size(), 0u);
- }
+ const std::vector<int> empty{};
+ {
+ auto transformed = Transform(empty, [](int) -> int {
+ [] { FAIL() << "Callback should not be called for empty vector"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_EQ(transformed.size(), 0u);
+ }
+ {
+ auto transformed = Transform(empty, [](int, size_t) -> int {
+ [] { FAIL() << "Callback should not be called for empty vector"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_EQ(transformed.size(), 0u);
+ }
}
TEST(TransformTest, Identity) {
- const std::vector<int> input{1, 2, 3, 4};
- {
+ const std::vector<int> input{1, 2, 3, 4};
auto transformed = Transform(input, [](int i) { return i; });
CHECK_ELEMENT_TYPE(transformed, int);
EXPECT_THAT(transformed, testing::ElementsAre(1, 2, 3, 4));
- }
- {
+}
+
+TEST(TransformTest, IdentityWithIndex) {
+ const std::vector<int> input{1, 2, 3, 4};
auto transformed = Transform(input, [](int i, size_t) { return i; });
CHECK_ELEMENT_TYPE(transformed, int);
EXPECT_THAT(transformed, testing::ElementsAre(1, 2, 3, 4));
- }
}
TEST(TransformTest, Index) {
- const std::vector<int> input{10, 20, 30, 40};
- {
- auto transformed = Transform(input, [](int, size_t idx) { return idx; });
- CHECK_ELEMENT_TYPE(transformed, size_t);
- EXPECT_THAT(transformed, testing::ElementsAre(0u, 1u, 2u, 3u));
- }
+ const std::vector<int> input{10, 20, 30, 40};
+ {
+ auto transformed = Transform(input, [](int, size_t idx) { return idx; });
+ CHECK_ELEMENT_TYPE(transformed, size_t);
+ EXPECT_THAT(transformed, testing::ElementsAre(0u, 1u, 2u, 3u));
+ }
}
TEST(TransformTest, TransformSameType) {
- const std::vector<int> input{1, 2, 3, 4};
- {
- auto transformed = Transform(input, [](int i) { return i * 10; });
- CHECK_ELEMENT_TYPE(transformed, int);
- EXPECT_THAT(transformed, testing::ElementsAre(10, 20, 30, 40));
- }
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = Transform(input, [](int i) { return i * 10; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(10, 20, 30, 40));
+ }
}
TEST(TransformTest, TransformDifferentType) {
- const std::vector<int> input{1, 2, 3, 4};
- {
- auto transformed =
- Transform(input, [](int i) { return std::to_string(i); });
- CHECK_ELEMENT_TYPE(transformed, std::string);
- EXPECT_THAT(transformed, testing::ElementsAre("1", "2", "3", "4"));
- }
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = Transform(input, [](int i) { return std::to_string(i); });
+ CHECK_ELEMENT_TYPE(transformed, std::string);
+ EXPECT_THAT(transformed, testing::ElementsAre("1", "2", "3", "4"));
+ }
+}
+
+TEST(TransformNTest, Empty) {
+ const std::vector<int> empty{};
+ {
+ auto transformed = TransformN(empty, 4u, [](int) -> int {
+ [] { FAIL() << "Callback should not be called for empty vector"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_EQ(transformed.size(), 0u);
+ }
+ {
+ auto transformed = TransformN(empty, 4u, [](int, size_t) -> int {
+ [] { FAIL() << "Callback should not be called for empty vector"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_EQ(transformed.size(), 0u);
+ }
+}
+
+TEST(TransformNTest, Identity) {
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = TransformN(input, 0u, [](int) {
+ [] { FAIL() << "Callback should not call the transform when n == 0"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_TRUE(transformed.empty());
+ }
+ {
+ auto transformed = TransformN(input, 2u, [](int i) { return i; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(1, 2));
+ }
+ {
+ auto transformed = TransformN(input, 6u, [](int i) { return i; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(1, 2, 3, 4));
+ }
+}
+
+TEST(TransformNTest, IdentityWithIndex) {
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = TransformN(input, 0u, [](int, size_t) {
+ [] { FAIL() << "Callback should not call the transform when n == 0"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_TRUE(transformed.empty());
+ }
+ {
+ auto transformed = TransformN(input, 3u, [](int i, size_t) { return i; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(1, 2, 3));
+ }
+ {
+ auto transformed = TransformN(input, 9u, [](int i, size_t) { return i; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(1, 2, 3, 4));
+ }
+}
+
+TEST(TransformNTest, Index) {
+ const std::vector<int> input{10, 20, 30, 40};
+ {
+ auto transformed = TransformN(input, 0u, [](int, size_t) {
+ [] { FAIL() << "Callback should not call the transform when n == 0"; }();
+ return static_cast<size_t>(0);
+ });
+ CHECK_ELEMENT_TYPE(transformed, size_t);
+ EXPECT_TRUE(transformed.empty());
+ }
+ {
+ auto transformed = TransformN(input, 2u, [](int, size_t idx) { return idx; });
+ CHECK_ELEMENT_TYPE(transformed, size_t);
+ EXPECT_THAT(transformed, testing::ElementsAre(0u, 1u));
+ }
+ {
+ auto transformed = TransformN(input, 9u, [](int, size_t idx) { return idx; });
+ CHECK_ELEMENT_TYPE(transformed, size_t);
+ EXPECT_THAT(transformed, testing::ElementsAre(0u, 1u, 2u, 3u));
+ }
+}
+
+TEST(TransformNTest, TransformSameType) {
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = TransformN(input, 0u, [](int, size_t) {
+ [] { FAIL() << "Callback should not call the transform when n == 0"; }();
+ return 0;
+ });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_TRUE(transformed.empty());
+ }
+ {
+ auto transformed = TransformN(input, 2u, [](int i) { return i * 10; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(10, 20));
+ }
+ {
+ auto transformed = TransformN(input, 9u, [](int i) { return i * 10; });
+ CHECK_ELEMENT_TYPE(transformed, int);
+ EXPECT_THAT(transformed, testing::ElementsAre(10, 20, 30, 40));
+ }
+}
+
+TEST(TransformNTest, TransformDifferentType) {
+ const std::vector<int> input{1, 2, 3, 4};
+ {
+ auto transformed = TransformN(input, 0u, [](int) {
+ [] { FAIL() << "Callback should not call the transform when n == 0"; }();
+ return std::string();
+ });
+ CHECK_ELEMENT_TYPE(transformed, std::string);
+ EXPECT_TRUE(transformed.empty());
+ }
+ {
+ auto transformed = TransformN(input, 2u, [](int i) { return std::to_string(i); });
+ CHECK_ELEMENT_TYPE(transformed, std::string);
+ EXPECT_THAT(transformed, testing::ElementsAre("1", "2"));
+ }
+ {
+ auto transformed = TransformN(input, 9u, [](int i) { return std::to_string(i); });
+ CHECK_ELEMENT_TYPE(transformed, std::string);
+ EXPECT_THAT(transformed, testing::ElementsAre("1", "2", "3", "4"));
+ }
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/unique_allocator.h b/chromium/third_party/dawn/src/tint/utils/unique_allocator.h
index 69242fba8f9..628bc792ce7 100644
--- a/chromium/third_party/dawn/src/tint/utils/unique_allocator.h
+++ b/chromium/third_party/dawn/src/tint/utils/unique_allocator.h
@@ -25,58 +25,56 @@ namespace tint::utils {
/// UniqueAllocator is used to allocate unique instances of the template type
/// `T`.
-template <typename T,
- typename HASH = std::hash<T>,
- typename EQUAL = std::equal_to<T>>
+template <typename T, typename HASH = std::hash<T>, typename EQUAL = std::equal_to<T>>
class UniqueAllocator {
- public:
- /// @param args the arguments used to construct the object.
- /// @return a pointer to an instance of `T` with the provided arguments.
- /// If an existing instance of `T` has been constructed, then the same
- /// pointer is returned.
- template <typename TYPE = T, typename... ARGS>
- TYPE* Get(ARGS&&... args) {
- // Create a temporary T instance on the stack so that we can hash it, and
- // use it for equality lookup for the std::unordered_set. If the item is not
- // found in the set, then we create the persisted instance with the
- // allocator.
- TYPE key{args...};
- auto hash = HASH{}(key);
- auto it = items.find(Entry{hash, &key});
- if (it != items.end()) {
- return static_cast<TYPE*>(it->ptr);
+ public:
+ /// @param args the arguments used to construct the object.
+ /// @return a pointer to an instance of `T` with the provided arguments.
+ /// If an existing instance of `T` has been constructed, then the same
+ /// pointer is returned.
+ template <typename TYPE = T, typename... ARGS>
+ TYPE* Get(ARGS&&... args) {
+ // Create a temporary T instance on the stack so that we can hash it, and
+ // use it for equality lookup for the std::unordered_set. If the item is not
+ // found in the set, then we create the persisted instance with the
+ // allocator.
+ TYPE key{args...};
+ auto hash = HASH{}(key);
+ auto it = items.find(Entry{hash, &key});
+ if (it != items.end()) {
+ return static_cast<TYPE*>(it->ptr);
+ }
+ auto* ptr = allocator.template Create<TYPE>(std::forward<ARGS>(args)...);
+ items.emplace_hint(it, Entry{hash, ptr});
+ return ptr;
}
- auto* ptr = allocator.template Create<TYPE>(std::forward<ARGS>(args)...);
- items.emplace_hint(it, Entry{hash, ptr});
- return ptr;
- }
- protected:
- /// Entry is used as the entry to the unordered_set
- struct Entry {
- /// The pre-calculated hash of the entry
- size_t hash;
- /// Tge pointer to the unique object
- T* ptr;
- };
- /// Comparator is the hashing and equality function used by the unordered_set
- struct Comparator {
- /// Hashing function
- /// @param e the entry
- /// @returns the hash of the entry
- size_t operator()(Entry e) const { return e.hash; }
+ protected:
+ /// Entry is used as the entry to the unordered_set
+ struct Entry {
+ /// The pre-calculated hash of the entry
+ size_t hash;
+ /// Tge pointer to the unique object
+ T* ptr;
+ };
+ /// Comparator is the hashing and equality function used by the unordered_set
+ struct Comparator {
+ /// Hashing function
+ /// @param e the entry
+ /// @returns the hash of the entry
+ size_t operator()(Entry e) const { return e.hash; }
- /// Equality function
- /// @param a the first entry to compare
- /// @param b the second entry to compare
- /// @returns true if the two entries are equal
- bool operator()(Entry a, Entry b) const { return EQUAL{}(*a.ptr, *b.ptr); }
- };
+ /// Equality function
+ /// @param a the first entry to compare
+ /// @param b the second entry to compare
+ /// @returns true if the two entries are equal
+ bool operator()(Entry a, Entry b) const { return EQUAL{}(*a.ptr, *b.ptr); }
+ };
- /// The block allocator used to allocate the unique objects
- BlockAllocator<T> allocator;
- /// The unordered_set of unique item entries
- std::unordered_set<Entry, Comparator, Comparator> items;
+ /// The block allocator used to allocate the unique objects
+ BlockAllocator<T> allocator;
+ /// The unordered_set of unique item entries
+ std::unordered_set<Entry, Comparator, Comparator> items;
};
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/unique_allocator_test.cc b/chromium/third_party/dawn/src/tint/utils/unique_allocator_test.cc
index d8d62df00a9..b6187347a07 100644
--- a/chromium/third_party/dawn/src/tint/utils/unique_allocator_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/unique_allocator_test.cc
@@ -22,30 +22,30 @@ namespace tint::utils {
namespace {
TEST(UniqueAllocator, Int) {
- UniqueAllocator<int> a;
- EXPECT_NE(a.Get(0), a.Get(1));
- EXPECT_NE(a.Get(1), a.Get(2));
- EXPECT_EQ(a.Get(0), a.Get(0));
- EXPECT_EQ(a.Get(1), a.Get(1));
- EXPECT_EQ(a.Get(2), a.Get(2));
+ UniqueAllocator<int> a;
+ EXPECT_NE(a.Get(0), a.Get(1));
+ EXPECT_NE(a.Get(1), a.Get(2));
+ EXPECT_EQ(a.Get(0), a.Get(0));
+ EXPECT_EQ(a.Get(1), a.Get(1));
+ EXPECT_EQ(a.Get(2), a.Get(2));
}
TEST(UniqueAllocator, Float) {
- UniqueAllocator<float> a;
- EXPECT_NE(a.Get(0.1f), a.Get(1.1f));
- EXPECT_NE(a.Get(1.1f), a.Get(2.1f));
- EXPECT_EQ(a.Get(0.1f), a.Get(0.1f));
- EXPECT_EQ(a.Get(1.1f), a.Get(1.1f));
- EXPECT_EQ(a.Get(2.1f), a.Get(2.1f));
+ UniqueAllocator<float> a;
+ EXPECT_NE(a.Get(0.1f), a.Get(1.1f));
+ EXPECT_NE(a.Get(1.1f), a.Get(2.1f));
+ EXPECT_EQ(a.Get(0.1f), a.Get(0.1f));
+ EXPECT_EQ(a.Get(1.1f), a.Get(1.1f));
+ EXPECT_EQ(a.Get(2.1f), a.Get(2.1f));
}
TEST(UniqueAllocator, String) {
- UniqueAllocator<std::string> a;
- EXPECT_NE(a.Get("x"), a.Get("y"));
- EXPECT_NE(a.Get("z"), a.Get("w"));
- EXPECT_EQ(a.Get("x"), a.Get("x"));
- EXPECT_EQ(a.Get("y"), a.Get("y"));
- EXPECT_EQ(a.Get("z"), a.Get("z"));
+ UniqueAllocator<std::string> a;
+ EXPECT_NE(a.Get("x"), a.Get("y"));
+ EXPECT_NE(a.Get("z"), a.Get("w"));
+ EXPECT_EQ(a.Get("x"), a.Get("x"));
+ EXPECT_EQ(a.Get("y"), a.Get("y"));
+ EXPECT_EQ(a.Get("z"), a.Get("z"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/utils/unique_vector.h b/chromium/third_party/dawn/src/tint/utils/unique_vector.h
index 32d593ef0b1..0f3f18d8a15 100644
--- a/chromium/third_party/dawn/src/tint/utils/unique_vector.h
+++ b/chromium/third_party/dawn/src/tint/utils/unique_vector.h
@@ -25,85 +25,86 @@ namespace tint::utils {
/// UniqueVector is an ordered container that only contains unique items.
/// Attempting to add a duplicate is a no-op.
-template <typename T,
- typename HASH = std::hash<T>,
- typename EQUAL = std::equal_to<T>>
+template <typename T, typename HASH = std::hash<T>, typename EQUAL = std::equal_to<T>>
struct UniqueVector {
- /// The iterator returned by begin() and end()
- using ConstIterator = typename std::vector<T>::const_iterator;
- /// The iterator returned by rbegin() and rend()
- using ConstReverseIterator = typename std::vector<T>::const_reverse_iterator;
-
- /// Constructor
- UniqueVector() = default;
-
- /// Constructor
- /// @param v the vector to construct this UniqueVector with. Duplicate
- /// elements will be removed.
- explicit UniqueVector(std::vector<T>&& v) {
- for (auto& el : v) {
- add(el);
+ /// The iterator returned by begin() and end()
+ using ConstIterator = typename std::vector<T>::const_iterator;
+ /// The iterator returned by rbegin() and rend()
+ using ConstReverseIterator = typename std::vector<T>::const_reverse_iterator;
+
+ /// Constructor
+ UniqueVector() = default;
+
+ /// Constructor
+ /// @param v the vector to construct this UniqueVector with. Duplicate
+ /// elements will be removed.
+ explicit UniqueVector(std::vector<T>&& v) {
+ for (auto& el : v) {
+ add(el);
+ }
}
- }
-
- /// add appends the item to the end of the vector, if the vector does not
- /// already contain the given item.
- /// @param item the item to append to the end of the vector
- /// @returns true if the item was added, otherwise false.
- bool add(const T& item) {
- if (set.count(item) == 0) {
- vector.emplace_back(item);
- set.emplace(item);
- return true;
+
+ /// add appends the item to the end of the vector, if the vector does not
+ /// already contain the given item.
+ /// @param item the item to append to the end of the vector
+ /// @returns true if the item was added, otherwise false.
+ bool add(const T& item) {
+ if (set.count(item) == 0) {
+ vector.emplace_back(item);
+ set.emplace(item);
+ return true;
+ }
+ return false;
}
- return false;
- }
- /// @returns true if the vector contains `item`
- /// @param item the item
- bool contains(const T& item) const { return set.count(item); }
+ /// @returns true if the vector contains `item`
+ /// @param item the item
+ bool contains(const T& item) const { return set.count(item); }
+
+ /// @param i the index of the element to retrieve
+ /// @returns the element at the index `i`
+ T& operator[](size_t i) { return vector[i]; }
- /// @param i the index of the element to retrieve
- /// @returns the element at the index `i`
- T& operator[](size_t i) { return vector[i]; }
+ /// @param i the index of the element to retrieve
+ /// @returns the element at the index `i`
+ const T& operator[](size_t i) const { return vector[i]; }
- /// @param i the index of the element to retrieve
- /// @returns the element at the index `i`
- const T& operator[](size_t i) const { return vector[i]; }
+ /// @returns true if the vector is empty
+ bool empty() const { return vector.empty(); }
- /// @returns true if the vector is empty
- bool empty() const { return vector.empty(); }
+ /// @returns the number of items in the vector
+ size_t size() const { return vector.size(); }
- /// @returns the number of items in the vector
- size_t size() const { return vector.size(); }
+ /// @returns the pointer to the first element in the vector, or nullptr if the vector is empty.
+ const T* data() const { return vector.empty() ? nullptr : vector.data(); }
- /// @returns an iterator to the beginning of the vector
- ConstIterator begin() const { return vector.begin(); }
+ /// @returns an iterator to the beginning of the vector
+ ConstIterator begin() const { return vector.begin(); }
- /// @returns an iterator to the end of the vector
- ConstIterator end() const { return vector.end(); }
+ /// @returns an iterator to the end of the vector
+ ConstIterator end() const { return vector.end(); }
- /// @returns an iterator to the beginning of the reversed vector
- ConstReverseIterator rbegin() const { return vector.rbegin(); }
+ /// @returns an iterator to the beginning of the reversed vector
+ ConstReverseIterator rbegin() const { return vector.rbegin(); }
- /// @returns an iterator to the end of the reversed vector
- ConstReverseIterator rend() const { return vector.rend(); }
+ /// @returns an iterator to the end of the reversed vector
+ ConstReverseIterator rend() const { return vector.rend(); }
- /// @returns a const reference to the internal vector
- operator const std::vector<T>&() const { return vector; }
+ /// @returns a const reference to the internal vector
+ operator const std::vector<T>&() const { return vector; }
- /// Removes the last element from the vector
- /// @returns the popped element
- T pop_back() {
- auto el = std::move(vector.back());
- set.erase(el);
- vector.pop_back();
- return el;
- }
+ /// Removes the last element from the vector
+ /// @returns the popped element
+ T pop_back() {
+ auto el = std::move(vector.back());
+ set.erase(el);
+ vector.pop_back();
+ return el;
+ }
- private:
- std::vector<T> vector;
- std::unordered_set<T, HASH, EQUAL> set;
+ private:
+ std::vector<T> vector;
+ std::unordered_set<T, HASH, EQUAL> set;
};
} // namespace tint::utils
diff --git a/chromium/third_party/dawn/src/tint/utils/unique_vector_test.cc b/chromium/third_party/dawn/src/tint/utils/unique_vector_test.cc
index c2c47a40679..035ebf8de72 100644
--- a/chromium/third_party/dawn/src/tint/utils/unique_vector_test.cc
+++ b/chromium/third_party/dawn/src/tint/utils/unique_vector_test.cc
@@ -21,122 +21,131 @@ namespace tint::utils {
namespace {
TEST(UniqueVectorTest, Empty) {
- UniqueVector<int> unique_vec;
- EXPECT_EQ(unique_vec.size(), 0u);
- EXPECT_EQ(unique_vec.empty(), true);
- EXPECT_EQ(unique_vec.begin(), unique_vec.end());
+ UniqueVector<int> unique_vec;
+ EXPECT_EQ(unique_vec.size(), 0u);
+ EXPECT_EQ(unique_vec.empty(), true);
+ EXPECT_EQ(unique_vec.begin(), unique_vec.end());
}
TEST(UniqueVectorTest, MoveConstructor) {
- UniqueVector<int> unique_vec(std::vector<int>{0, 3, 2, 1, 2});
- EXPECT_EQ(unique_vec.size(), 4u);
- EXPECT_EQ(unique_vec.empty(), false);
- EXPECT_EQ(unique_vec[0], 0);
- EXPECT_EQ(unique_vec[1], 3);
- EXPECT_EQ(unique_vec[2], 2);
- EXPECT_EQ(unique_vec[3], 1);
+ UniqueVector<int> unique_vec(std::vector<int>{0, 3, 2, 1, 2});
+ EXPECT_EQ(unique_vec.size(), 4u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ EXPECT_EQ(unique_vec[0], 0);
+ EXPECT_EQ(unique_vec[1], 3);
+ EXPECT_EQ(unique_vec[2], 2);
+ EXPECT_EQ(unique_vec[3], 1);
}
TEST(UniqueVectorTest, AddUnique) {
- UniqueVector<int> unique_vec;
- unique_vec.add(0);
- unique_vec.add(1);
- unique_vec.add(2);
- EXPECT_EQ(unique_vec.size(), 3u);
- EXPECT_EQ(unique_vec.empty(), false);
- int i = 0;
- for (auto n : unique_vec) {
- EXPECT_EQ(n, i);
- i++;
- }
- for (auto n : Reverse(unique_vec)) {
- i--;
- EXPECT_EQ(n, i);
- }
- EXPECT_EQ(unique_vec[0], 0);
- EXPECT_EQ(unique_vec[1], 1);
- EXPECT_EQ(unique_vec[2], 2);
+ UniqueVector<int> unique_vec;
+ unique_vec.add(0);
+ unique_vec.add(1);
+ unique_vec.add(2);
+ EXPECT_EQ(unique_vec.size(), 3u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ int i = 0;
+ for (auto n : unique_vec) {
+ EXPECT_EQ(n, i);
+ i++;
+ }
+ for (auto n : Reverse(unique_vec)) {
+ i--;
+ EXPECT_EQ(n, i);
+ }
+ EXPECT_EQ(unique_vec[0], 0);
+ EXPECT_EQ(unique_vec[1], 1);
+ EXPECT_EQ(unique_vec[2], 2);
}
TEST(UniqueVectorTest, AddDuplicates) {
- UniqueVector<int> unique_vec;
- unique_vec.add(0);
- unique_vec.add(0);
- unique_vec.add(0);
- unique_vec.add(1);
- unique_vec.add(1);
- unique_vec.add(2);
- EXPECT_EQ(unique_vec.size(), 3u);
- EXPECT_EQ(unique_vec.empty(), false);
- int i = 0;
- for (auto n : unique_vec) {
- EXPECT_EQ(n, i);
- i++;
- }
- for (auto n : Reverse(unique_vec)) {
- i--;
- EXPECT_EQ(n, i);
- }
- EXPECT_EQ(unique_vec[0], 0);
- EXPECT_EQ(unique_vec[1], 1);
- EXPECT_EQ(unique_vec[2], 2);
+ UniqueVector<int> unique_vec;
+ unique_vec.add(0);
+ unique_vec.add(0);
+ unique_vec.add(0);
+ unique_vec.add(1);
+ unique_vec.add(1);
+ unique_vec.add(2);
+ EXPECT_EQ(unique_vec.size(), 3u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ int i = 0;
+ for (auto n : unique_vec) {
+ EXPECT_EQ(n, i);
+ i++;
+ }
+ for (auto n : Reverse(unique_vec)) {
+ i--;
+ EXPECT_EQ(n, i);
+ }
+ EXPECT_EQ(unique_vec[0], 0);
+ EXPECT_EQ(unique_vec[1], 1);
+ EXPECT_EQ(unique_vec[2], 2);
}
TEST(UniqueVectorTest, AsVector) {
- UniqueVector<int> unique_vec;
- unique_vec.add(0);
- unique_vec.add(0);
- unique_vec.add(0);
- unique_vec.add(1);
- unique_vec.add(1);
- unique_vec.add(2);
-
- const std::vector<int>& vec = unique_vec;
- EXPECT_EQ(vec.size(), 3u);
- EXPECT_EQ(unique_vec.empty(), false);
- int i = 0;
- for (auto n : vec) {
- EXPECT_EQ(n, i);
- i++;
- }
- for (auto n : Reverse(unique_vec)) {
- i--;
- EXPECT_EQ(n, i);
- }
+ UniqueVector<int> unique_vec;
+ unique_vec.add(0);
+ unique_vec.add(0);
+ unique_vec.add(0);
+ unique_vec.add(1);
+ unique_vec.add(1);
+ unique_vec.add(2);
+
+ const std::vector<int>& vec = unique_vec;
+ EXPECT_EQ(vec.size(), 3u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ int i = 0;
+ for (auto n : vec) {
+ EXPECT_EQ(n, i);
+ i++;
+ }
+ for (auto n : Reverse(unique_vec)) {
+ i--;
+ EXPECT_EQ(n, i);
+ }
}
TEST(UniqueVectorTest, PopBack) {
- UniqueVector<int> unique_vec;
- unique_vec.add(0);
- unique_vec.add(2);
- unique_vec.add(1);
-
- EXPECT_EQ(unique_vec.pop_back(), 1);
- EXPECT_EQ(unique_vec.size(), 2u);
- EXPECT_EQ(unique_vec.empty(), false);
- EXPECT_EQ(unique_vec[0], 0);
- EXPECT_EQ(unique_vec[1], 2);
-
- EXPECT_EQ(unique_vec.pop_back(), 2);
- EXPECT_EQ(unique_vec.size(), 1u);
- EXPECT_EQ(unique_vec.empty(), false);
- EXPECT_EQ(unique_vec[0], 0);
-
- unique_vec.add(1);
-
- EXPECT_EQ(unique_vec.size(), 2u);
- EXPECT_EQ(unique_vec.empty(), false);
- EXPECT_EQ(unique_vec[0], 0);
- EXPECT_EQ(unique_vec[1], 1);
-
- EXPECT_EQ(unique_vec.pop_back(), 1);
- EXPECT_EQ(unique_vec.size(), 1u);
- EXPECT_EQ(unique_vec.empty(), false);
- EXPECT_EQ(unique_vec[0], 0);
-
- EXPECT_EQ(unique_vec.pop_back(), 0);
- EXPECT_EQ(unique_vec.size(), 0u);
- EXPECT_EQ(unique_vec.empty(), true);
+ UniqueVector<int> unique_vec;
+ unique_vec.add(0);
+ unique_vec.add(2);
+ unique_vec.add(1);
+
+ EXPECT_EQ(unique_vec.pop_back(), 1);
+ EXPECT_EQ(unique_vec.size(), 2u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ EXPECT_EQ(unique_vec[0], 0);
+ EXPECT_EQ(unique_vec[1], 2);
+
+ EXPECT_EQ(unique_vec.pop_back(), 2);
+ EXPECT_EQ(unique_vec.size(), 1u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ EXPECT_EQ(unique_vec[0], 0);
+
+ unique_vec.add(1);
+
+ EXPECT_EQ(unique_vec.size(), 2u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ EXPECT_EQ(unique_vec[0], 0);
+ EXPECT_EQ(unique_vec[1], 1);
+
+ EXPECT_EQ(unique_vec.pop_back(), 1);
+ EXPECT_EQ(unique_vec.size(), 1u);
+ EXPECT_EQ(unique_vec.empty(), false);
+ EXPECT_EQ(unique_vec[0], 0);
+
+ EXPECT_EQ(unique_vec.pop_back(), 0);
+ EXPECT_EQ(unique_vec.size(), 0u);
+ EXPECT_EQ(unique_vec.empty(), true);
+}
+
+TEST(UniqueVectorTest, Data) {
+ UniqueVector<int> unique_vec;
+ EXPECT_EQ(unique_vec.data(), nullptr);
+
+ unique_vec.add(42);
+ EXPECT_EQ(unique_vec.data(), &unique_vec[0]);
+ EXPECT_EQ(*unique_vec.data(), 42);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/val/hlsl.cc b/chromium/third_party/dawn/src/tint/val/hlsl.cc
index a817a5dda7b..49c38501a9e 100644
--- a/chromium/third_party/dawn/src/tint/val/hlsl.cc
+++ b/chromium/third_party/dawn/src/tint/val/hlsl.cc
@@ -30,146 +30,167 @@ namespace tint::val {
Result HlslUsingDXC(const std::string& dxc_path,
const std::string& source,
- const EntryPointList& entry_points) {
- Result result;
+ const EntryPointList& entry_points,
+ const std::vector<std::string>& overrides) {
+ Result result;
- auto dxc = utils::Command(dxc_path);
- if (!dxc.Found()) {
- result.output = "DXC not found at '" + std::string(dxc_path) + "'";
- result.failed = true;
- return result;
- }
-
- utils::TmpFile file;
- file << source;
-
- for (auto ep : entry_points) {
- const char* profile = "";
-
- switch (ep.second) {
- case ast::PipelineStage::kNone:
- result.output = "Invalid PipelineStage";
+ auto dxc = utils::Command(dxc_path);
+ if (!dxc.Found()) {
+ result.output = "DXC not found at '" + std::string(dxc_path) + "'";
result.failed = true;
return result;
- case ast::PipelineStage::kVertex:
- profile = "-T vs_6_0";
- break;
- case ast::PipelineStage::kFragment:
- profile = "-T ps_6_0";
- break;
- case ast::PipelineStage::kCompute:
- profile = "-T cs_6_0";
- break;
}
- // Match Dawn's compile flags
- // See dawn\src\dawn_native\d3d12\RenderPipelineD3D12.cpp
- // and dawn_native\d3d12\ShaderModuleD3D12.cpp (GetDXCArguments)
- const char* compileFlags =
- "/Zpr " // D3DCOMPILE_PACK_MATRIX_ROW_MAJOR
- "/Gis"; // D3DCOMPILE_IEEE_STRICTNESS
-
- auto res = dxc(profile, "-E " + ep.first, compileFlags, file.Path());
- if (!res.out.empty()) {
- if (!result.output.empty()) {
- result.output += "\n";
- }
- result.output += res.out;
+ utils::TmpFile file;
+ file << source;
+
+ for (auto ep : entry_points) {
+ const char* profile = "";
+
+ switch (ep.second) {
+ case ast::PipelineStage::kNone:
+ result.output = "Invalid PipelineStage";
+ result.failed = true;
+ return result;
+ case ast::PipelineStage::kVertex:
+ profile = "-T vs_6_0";
+ break;
+ case ast::PipelineStage::kFragment:
+ profile = "-T ps_6_0";
+ break;
+ case ast::PipelineStage::kCompute:
+ profile = "-T cs_6_0";
+ break;
+ }
+
+ // Match Dawn's compile flags
+ // See dawn\src\dawn_native\d3d12\RenderPipelineD3D12.cpp
+ // and dawn_native\d3d12\ShaderModuleD3D12.cpp (GetDXCArguments)
+ const char* compileFlags =
+ "/Zpr " // D3DCOMPILE_PACK_MATRIX_ROW_MAJOR
+ "/Gis"; // D3DCOMPILE_IEEE_STRICTNESS
+
+ std::string defs;
+ defs.reserve(overrides.size() * 20);
+ for (auto& o : overrides) {
+ defs += "/D" + o + " ";
+ }
+
+ auto res = dxc(profile, "-E " + ep.first, compileFlags, file.Path(), defs);
+ if (!res.out.empty()) {
+ if (!result.output.empty()) {
+ result.output += "\n";
+ }
+ result.output += res.out;
+ }
+ if (!res.err.empty()) {
+ if (!result.output.empty()) {
+ result.output += "\n";
+ }
+ result.output += res.err;
+ }
+ result.failed = (res.error_code != 0);
}
- if (!res.err.empty()) {
- if (!result.output.empty()) {
- result.output += "\n";
- }
- result.output += res.err;
+
+ if (entry_points.empty()) {
+ result.output = "No entrypoint found";
+ result.failed = true;
+ return result;
}
- result.failed = (res.error_code != 0);
- }
- if (entry_points.empty()) {
- result.output = "No entrypoint found";
- result.failed = true;
return result;
- }
-
- return result;
}
#ifdef _WIN32
Result HlslUsingFXC(const std::string& source,
- const EntryPointList& entry_points) {
- Result result;
-
- // This library leaks if an error happens in this function, but it is ok
- // because it is loaded at most once, and the executables using HlslUsingFXC
- // are short-lived.
- HMODULE fxcLib = LoadLibraryA("d3dcompiler_47.dll");
- if (fxcLib == nullptr) {
- result.output = "Couldn't load FXC";
- result.failed = true;
- return result;
- }
-
- pD3DCompile d3dCompile = reinterpret_cast<pD3DCompile>(
- reinterpret_cast<void*>(GetProcAddress(fxcLib, "D3DCompile")));
- if (d3dCompile == nullptr) {
- result.output = "Couldn't load D3DCompile from FXC";
- result.failed = true;
- return result;
- }
+ const EntryPointList& entry_points,
+ const std::vector<std::string>& overrides) {
+ Result result;
+
+ // This library leaks if an error happens in this function, but it is ok
+ // because it is loaded at most once, and the executables using HlslUsingFXC
+ // are short-lived.
+ HMODULE fxcLib = LoadLibraryA("d3dcompiler_47.dll");
+ if (fxcLib == nullptr) {
+ result.output = "Couldn't load FXC";
+ result.failed = true;
+ return result;
+ }
- for (auto ep : entry_points) {
- const char* profile = "";
- switch (ep.second) {
- case ast::PipelineStage::kNone:
- result.output = "Invalid PipelineStage";
+ pD3DCompile d3dCompile = reinterpret_cast<pD3DCompile>(
+ reinterpret_cast<void*>(GetProcAddress(fxcLib, "D3DCompile")));
+ if (d3dCompile == nullptr) {
+ result.output = "Couldn't load D3DCompile from FXC";
result.failed = true;
return result;
- case ast::PipelineStage::kVertex:
- profile = "vs_5_1";
- break;
- case ast::PipelineStage::kFragment:
- profile = "ps_5_1";
- break;
- case ast::PipelineStage::kCompute:
- profile = "cs_5_1";
- break;
}
- // Match Dawn's compile flags
- // See dawn\src\dawn_native\d3d12\RenderPipelineD3D12.cpp
- UINT compileFlags = D3DCOMPILE_OPTIMIZATION_LEVEL0 |
- D3DCOMPILE_PACK_MATRIX_ROW_MAJOR |
- D3DCOMPILE_IEEE_STRICTNESS;
-
- ComPtr<ID3DBlob> compiledShader;
- ComPtr<ID3DBlob> errors;
- HRESULT cr = d3dCompile(source.c_str(), // pSrcData
- source.length(), // SrcDataSize
- nullptr, // pSourceName
- nullptr, // pDefines
- nullptr, // pInclude
- ep.first.c_str(), // pEntrypoint
- profile, // pTarget
- compileFlags, // Flags1
- 0, // Flags2
- &compiledShader, // ppCode
- &errors); // ppErrorMsgs
- if (FAILED(cr)) {
- result.output = static_cast<char*>(errors->GetBufferPointer());
- result.failed = true;
- return result;
+ for (auto ep : entry_points) {
+ const char* profile = "";
+ switch (ep.second) {
+ case ast::PipelineStage::kNone:
+ result.output = "Invalid PipelineStage";
+ result.failed = true;
+ return result;
+ case ast::PipelineStage::kVertex:
+ profile = "vs_5_1";
+ break;
+ case ast::PipelineStage::kFragment:
+ profile = "ps_5_1";
+ break;
+ case ast::PipelineStage::kCompute:
+ profile = "cs_5_1";
+ break;
+ }
+
+ // Match Dawn's compile flags
+ // See dawn\src\dawn_native\d3d12\RenderPipelineD3D12.cpp
+ UINT compileFlags = D3DCOMPILE_OPTIMIZATION_LEVEL0 | D3DCOMPILE_PACK_MATRIX_ROW_MAJOR |
+ D3DCOMPILE_IEEE_STRICTNESS;
+
+ auto overrides_copy = overrides; // Copy so that we can replace '=' with '\0'
+ std::vector<D3D_SHADER_MACRO> macros;
+ macros.reserve(overrides_copy.size() * 2);
+ for (auto& o : overrides_copy) {
+ if (auto sep = o.find_first_of('='); sep != std::string::npos) {
+ // Replace '=' with '\0' so we can point directly into the allocated string buffer
+ o[sep] = '\0';
+ macros.push_back(D3D_SHADER_MACRO{&o[0], &o[sep + 1]});
+ } else {
+ macros.emplace_back(D3D_SHADER_MACRO{o.c_str(), NULL});
+ }
+ }
+ macros.emplace_back(D3D_SHADER_MACRO{NULL, NULL});
+
+ ComPtr<ID3DBlob> compiledShader;
+ ComPtr<ID3DBlob> errors;
+ HRESULT cr = d3dCompile(source.c_str(), // pSrcData
+ source.length(), // SrcDataSize
+ nullptr, // pSourceName
+ macros.data(), // pDefines
+ nullptr, // pInclude
+ ep.first.c_str(), // pEntrypoint
+ profile, // pTarget
+ compileFlags, // Flags1
+ 0, // Flags2
+ &compiledShader, // ppCode
+ &errors); // ppErrorMsgs
+ if (FAILED(cr)) {
+ result.output = static_cast<char*>(errors->GetBufferPointer());
+ result.failed = true;
+ return result;
+ }
}
- }
- FreeLibrary(fxcLib);
+ FreeLibrary(fxcLib);
- if (entry_points.empty()) {
- result.output = "No entrypoint found";
- result.failed = true;
- return result;
- }
+ if (entry_points.empty()) {
+ result.output = "No entrypoint found";
+ result.failed = true;
+ return result;
+ }
- return result;
+ return result;
}
#endif // _WIN32
diff --git a/chromium/third_party/dawn/src/tint/val/msl.cc b/chromium/third_party/dawn/src/tint/val/msl.cc
index 68e3c29b02d..13bae1dc6f1 100644
--- a/chromium/third_party/dawn/src/tint/val/msl.cc
+++ b/chromium/third_party/dawn/src/tint/val/msl.cc
@@ -22,46 +22,46 @@
namespace tint::val {
Result Msl(const std::string& xcrun_path, const std::string& source) {
- Result result;
+ Result result;
- auto xcrun = utils::Command(xcrun_path);
- if (!xcrun.Found()) {
- result.output = "xcrun not found at '" + std::string(xcrun_path) + "'";
- result.failed = true;
- return result;
- }
+ auto xcrun = utils::Command(xcrun_path);
+ if (!xcrun.Found()) {
+ result.output = "xcrun not found at '" + std::string(xcrun_path) + "'";
+ result.failed = true;
+ return result;
+ }
- utils::TmpFile file(".metal");
- file << source;
+ utils::TmpFile file(".metal");
+ file << source;
#ifdef _WIN32
- // On Windows, we should actually be running metal.exe from the Metal
- // Developer Tools for Windows
- auto res = xcrun("-x", "metal", //
- "-o", "NUL", //
- "-std=osx-metal1.2", //
- "-c", file.Path());
+ // On Windows, we should actually be running metal.exe from the Metal
+ // Developer Tools for Windows
+ auto res = xcrun("-x", "metal", //
+ "-o", "NUL", //
+ "-std=osx-metal1.2", //
+ "-c", file.Path());
#else
- auto res = xcrun("-sdk", "macosx", "metal", //
- "-o", "/dev/null", //
- "-std=osx-metal1.2", //
- "-c", file.Path());
+ auto res = xcrun("-sdk", "macosx", "metal", //
+ "-o", "/dev/null", //
+ "-std=osx-metal1.2", //
+ "-c", file.Path());
#endif
- if (!res.out.empty()) {
- if (!result.output.empty()) {
- result.output += "\n";
+ if (!res.out.empty()) {
+ if (!result.output.empty()) {
+ result.output += "\n";
+ }
+ result.output += res.out;
}
- result.output += res.out;
- }
- if (!res.err.empty()) {
- if (!result.output.empty()) {
- result.output += "\n";
+ if (!res.err.empty()) {
+ if (!result.output.empty()) {
+ result.output += "\n";
+ }
+ result.output += res.err;
}
- result.output += res.err;
- }
- result.failed = (res.error_code != 0);
+ result.failed = (res.error_code != 0);
- return result;
+ return result;
}
} // namespace tint::val
diff --git a/chromium/third_party/dawn/src/tint/val/msl_metal.mm b/chromium/third_party/dawn/src/tint/val/msl_metal.mm
index ad0271fd845..4e60959ba85 100644
--- a/chromium/third_party/dawn/src/tint/val/msl_metal.mm
+++ b/chromium/third_party/dawn/src/tint/val/msl_metal.mm
@@ -25,33 +25,32 @@
namespace tint::val {
Result MslUsingMetalAPI(const std::string& src) {
- tint::val::Result result;
+ tint::val::Result result;
- NSError* error = nil;
+ NSError* error = nil;
- id<MTLDevice> device = MTLCreateSystemDefaultDevice();
- if (!device) {
- result.output = "MTLCreateSystemDefaultDevice returned null";
- result.failed = true;
- return result;
- }
+ id<MTLDevice> device = MTLCreateSystemDefaultDevice();
+ if (!device) {
+ result.output = "MTLCreateSystemDefaultDevice returned null";
+ result.failed = true;
+ return result;
+ }
- NSString* source = [NSString stringWithCString:src.c_str()
- encoding:NSUTF8StringEncoding];
+ NSString* source = [NSString stringWithCString:src.c_str() encoding:NSUTF8StringEncoding];
- MTLCompileOptions* compileOptions = [MTLCompileOptions new];
- compileOptions.languageVersion = MTLLanguageVersion1_2;
+ MTLCompileOptions* compileOptions = [MTLCompileOptions new];
+ compileOptions.languageVersion = MTLLanguageVersion1_2;
- id<MTLLibrary> library = [device newLibraryWithSource:source
- options:compileOptions
- error:&error];
- if (!library) {
- NSString* output = [error localizedDescription];
- result.output = [output UTF8String];
- result.failed = true;
- }
+ id<MTLLibrary> library = [device newLibraryWithSource:source
+ options:compileOptions
+ error:&error];
+ if (!library) {
+ NSString* output = [error localizedDescription];
+ result.output = [output UTF8String];
+ result.failed = true;
+ }
- return result;
+ return result;
}
} // namespace tint::val
diff --git a/chromium/third_party/dawn/src/tint/val/val.h b/chromium/third_party/dawn/src/tint/val/val.h
index a936181411d..c869efb7aa8 100644
--- a/chromium/third_party/dawn/src/tint/val/val.h
+++ b/chromium/third_party/dawn/src/tint/val/val.h
@@ -32,10 +32,10 @@ using EntryPointList = std::vector<std::pair<std::string, ast::PipelineStage>>;
/// The return structure of Validate()
struct Result {
- /// True if validation passed
- bool failed = false;
- /// Output of DXC.
- std::string output;
+ /// True if validation passed
+ bool failed = false;
+ /// Output of DXC.
+ std::string output;
};
/// Hlsl attempts to compile the shader with DXC, verifying that the shader
@@ -43,19 +43,23 @@ struct Result {
/// @param dxc_path path to DXC
/// @param source the generated HLSL source
/// @param entry_points the list of entry points to validate
+/// @param overrides optional list of pipeline overrides
/// @return the result of the compile
Result HlslUsingDXC(const std::string& dxc_path,
const std::string& source,
- const EntryPointList& entry_points);
+ const EntryPointList& entry_points,
+ const std::vector<std::string>& overrides);
#ifdef _WIN32
/// Hlsl attempts to compile the shader with FXC, verifying that the shader
/// compiles successfully.
/// @param source the generated HLSL source
/// @param entry_points the list of entry points to validate
+/// @param overrides optional list of pipeline overrides
/// @return the result of the compile
Result HlslUsingFXC(const std::string& source,
- const EntryPointList& entry_points);
+ const EntryPointList& entry_points,
+ const std::vector<std::string>& overrides);
#endif // _WIN32
/// Msl attempts to compile the shader with the Metal Shader Compiler,
diff --git a/chromium/third_party/dawn/src/tint/writer/append_vector.cc b/chromium/third_party/dawn/src/tint/writer/append_vector.cc
index 5c02917d47b..bc89d1f0d87 100644
--- a/chromium/third_party/dawn/src/tint/writer/append_vector.cc
+++ b/chromium/third_party/dawn/src/tint/writer/append_vector.cc
@@ -23,46 +23,46 @@
#include "src/tint/sem/type_conversion.h"
#include "src/tint/utils/transform.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer {
namespace {
struct VectorConstructorInfo {
- const sem::Call* call = nullptr;
- const sem::TypeConstructor* ctor = nullptr;
- operator bool() const { return call != nullptr; }
+ const sem::Call* call = nullptr;
+ const sem::TypeConstructor* ctor = nullptr;
+ operator bool() const { return call != nullptr; }
};
VectorConstructorInfo AsVectorConstructor(const sem::Expression* expr) {
- if (auto* call = expr->As<sem::Call>()) {
- if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
- if (ctor->ReturnType()->Is<sem::Vector>()) {
- return {call, ctor};
- }
+ if (auto* call = expr->As<sem::Call>()) {
+ if (auto* ctor = call->Target()->As<sem::TypeConstructor>()) {
+ if (ctor->ReturnType()->Is<sem::Vector>()) {
+ return {call, ctor};
+ }
+ }
}
- }
- return {};
+ return {};
}
-const sem::Expression* Zero(ProgramBuilder& b,
- const sem::Type* ty,
- const sem::Statement* stmt) {
- const ast::Expression* expr = nullptr;
- if (ty->Is<sem::I32>()) {
- expr = b.Expr(0);
- } else if (ty->Is<sem::U32>()) {
- expr = b.Expr(0u);
- } else if (ty->Is<sem::F32>()) {
- expr = b.Expr(0.0f);
- } else if (ty->Is<sem::Bool>()) {
- expr = b.Expr(false);
- } else {
- TINT_UNREACHABLE(Writer, b.Diagnostics())
- << "unsupported vector element type: " << ty->TypeInfo().name;
- return nullptr;
- }
- auto* sem = b.create<sem::Expression>(expr, ty, stmt, sem::Constant{},
- /* has_side_effects */ false);
- b.Sem().Add(expr, sem);
- return sem;
+const sem::Expression* Zero(ProgramBuilder& b, const sem::Type* ty, const sem::Statement* stmt) {
+ const ast::Expression* expr = nullptr;
+ if (ty->Is<sem::I32>()) {
+ expr = b.Expr(0_i);
+ } else if (ty->Is<sem::U32>()) {
+ expr = b.Expr(0_u);
+ } else if (ty->Is<sem::F32>()) {
+ expr = b.Expr(0_f);
+ } else if (ty->Is<sem::Bool>()) {
+ expr = b.Expr(false);
+ } else {
+ TINT_UNREACHABLE(Writer, b.Diagnostics())
+ << "unsupported vector element type: " << ty->TypeInfo().name;
+ return nullptr;
+ }
+ auto* sem = b.create<sem::Expression>(expr, ty, stmt, sem::Constant{},
+ /* has_side_effects */ false);
+ b.Sem().Add(expr, sem);
+ return sem;
}
} // namespace
@@ -70,104 +70,98 @@ const sem::Expression* Zero(ProgramBuilder& b,
const sem::Call* AppendVector(ProgramBuilder* b,
const ast::Expression* vector_ast,
const ast::Expression* scalar_ast) {
- uint32_t packed_size;
- const sem::Type* packed_el_sem_ty;
- auto* vector_sem = b->Sem().Get(vector_ast);
- auto* scalar_sem = b->Sem().Get(scalar_ast);
- auto* vector_ty = vector_sem->Type()->UnwrapRef();
- if (auto* vec = vector_ty->As<sem::Vector>()) {
- packed_size = vec->Width() + 1;
- packed_el_sem_ty = vec->type();
- } else {
- packed_size = 2;
- packed_el_sem_ty = vector_ty;
- }
-
- const ast::Type* packed_el_ast_ty = nullptr;
- if (packed_el_sem_ty->Is<sem::I32>()) {
- packed_el_ast_ty = b->create<ast::I32>();
- } else if (packed_el_sem_ty->Is<sem::U32>()) {
- packed_el_ast_ty = b->create<ast::U32>();
- } else if (packed_el_sem_ty->Is<sem::F32>()) {
- packed_el_ast_ty = b->create<ast::F32>();
- } else if (packed_el_sem_ty->Is<sem::Bool>()) {
- packed_el_ast_ty = b->create<ast::Bool>();
- } else {
- TINT_UNREACHABLE(Writer, b->Diagnostics())
- << "unsupported vector element type: "
- << packed_el_sem_ty->TypeInfo().name;
- }
-
- auto* statement = vector_sem->Stmt();
-
- auto* packed_ast_ty = b->create<ast::Vector>(packed_el_ast_ty, packed_size);
- auto* packed_sem_ty = b->create<sem::Vector>(packed_el_sem_ty, packed_size);
-
- // If the coordinates are already passed in a vector constructor, with only
- // scalar components supplied, extract the elements into the new vector
- // instead of nesting a vector-in-vector.
- // If the coordinates are a zero-constructor of the vector, then expand that
- // to scalar zeros.
- // The other cases for a nested vector constructor are when it is used
- // to convert a vector of a different type, e.g. vec2<i32>(vec2<u32>()).
- // In that case, preserve the original argument, or you'll get a type error.
-
- std::vector<const sem::Expression*> packed;
- if (auto vc = AsVectorConstructor(vector_sem)) {
- const auto num_supplied = vc.call->Arguments().size();
- if (num_supplied == 0) {
- // Zero-value vector constructor. Populate with zeros
- for (uint32_t i = 0; i < packed_size - 1; i++) {
- auto* zero = Zero(*b, packed_el_sem_ty, statement);
- packed.emplace_back(zero);
- }
- } else if (num_supplied + 1 == packed_size) {
- // All vector components were supplied as scalars. Pass them through.
- packed = vc.call->Arguments();
+ uint32_t packed_size;
+ const sem::Type* packed_el_sem_ty;
+ auto* vector_sem = b->Sem().Get(vector_ast);
+ auto* scalar_sem = b->Sem().Get(scalar_ast);
+ auto* vector_ty = vector_sem->Type()->UnwrapRef();
+ if (auto* vec = vector_ty->As<sem::Vector>()) {
+ packed_size = vec->Width() + 1;
+ packed_el_sem_ty = vec->type();
+ } else {
+ packed_size = 2;
+ packed_el_sem_ty = vector_ty;
+ }
+
+ const ast::Type* packed_el_ast_ty = nullptr;
+ if (packed_el_sem_ty->Is<sem::I32>()) {
+ packed_el_ast_ty = b->create<ast::I32>();
+ } else if (packed_el_sem_ty->Is<sem::U32>()) {
+ packed_el_ast_ty = b->create<ast::U32>();
+ } else if (packed_el_sem_ty->Is<sem::F32>()) {
+ packed_el_ast_ty = b->create<ast::F32>();
+ } else if (packed_el_sem_ty->Is<sem::Bool>()) {
+ packed_el_ast_ty = b->create<ast::Bool>();
+ } else {
+ TINT_UNREACHABLE(Writer, b->Diagnostics())
+ << "unsupported vector element type: " << packed_el_sem_ty->TypeInfo().name;
+ }
+
+ auto* statement = vector_sem->Stmt();
+
+ auto* packed_ast_ty = b->create<ast::Vector>(packed_el_ast_ty, packed_size);
+ auto* packed_sem_ty = b->create<sem::Vector>(packed_el_sem_ty, packed_size);
+
+ // If the coordinates are already passed in a vector constructor, with only
+ // scalar components supplied, extract the elements into the new vector
+ // instead of nesting a vector-in-vector.
+ // If the coordinates are a zero-constructor of the vector, then expand that
+ // to scalar zeros.
+ // The other cases for a nested vector constructor are when it is used
+ // to convert a vector of a different type, e.g. vec2<i32>(vec2<u32>()).
+ // In that case, preserve the original argument, or you'll get a type error.
+
+ std::vector<const sem::Expression*> packed;
+ if (auto vc = AsVectorConstructor(vector_sem)) {
+ const auto num_supplied = vc.call->Arguments().size();
+ if (num_supplied == 0) {
+ // Zero-value vector constructor. Populate with zeros
+ for (uint32_t i = 0; i < packed_size - 1; i++) {
+ auto* zero = Zero(*b, packed_el_sem_ty, statement);
+ packed.emplace_back(zero);
+ }
+ } else if (num_supplied + 1 == packed_size) {
+ // All vector components were supplied as scalars. Pass them through.
+ packed = vc.call->Arguments();
+ }
+ }
+ if (packed.empty()) {
+ // The special cases didn't occur. Use the vector argument as-is.
+ packed.emplace_back(vector_sem);
}
- }
- if (packed.empty()) {
- // The special cases didn't occur. Use the vector argument as-is.
- packed.emplace_back(vector_sem);
- }
-
- if (packed_el_sem_ty != scalar_sem->Type()->UnwrapRef()) {
- // Cast scalar to the vector element type
- auto* scalar_cast_ast = b->Construct(packed_el_ast_ty, scalar_ast);
- auto* scalar_cast_target = b->create<sem::TypeConversion>(
- packed_el_sem_ty,
- b->create<sem::Parameter>(nullptr, 0, scalar_sem->Type()->UnwrapRef(),
- ast::StorageClass::kNone,
- ast::Access::kUndefined));
- auto* scalar_cast_sem = b->create<sem::Call>(
- scalar_cast_ast, scalar_cast_target,
- std::vector<const sem::Expression*>{scalar_sem}, statement,
- sem::Constant{}, /* has_side_effects */ false);
- b->Sem().Add(scalar_cast_ast, scalar_cast_sem);
- packed.emplace_back(scalar_cast_sem);
- } else {
- packed.emplace_back(scalar_sem);
- }
-
- auto* constructor_ast = b->Construct(
- packed_ast_ty, utils::Transform(packed, [&](const sem::Expression* expr) {
- return expr->Declaration();
- }));
- auto* constructor_target = b->create<sem::TypeConstructor>(
- packed_sem_ty, utils::Transform(packed,
- [&](const tint::sem::Expression* arg,
- size_t i) -> const sem::Parameter* {
- return b->create<sem::Parameter>(
- nullptr, static_cast<uint32_t>(i),
- arg->Type()->UnwrapRef(),
- ast::StorageClass::kNone,
- ast::Access::kUndefined);
- }));
- auto* constructor_sem = b->create<sem::Call>(
- constructor_ast, constructor_target, packed, statement, sem::Constant{},
- /* has_side_effects */ false);
- b->Sem().Add(constructor_ast, constructor_sem);
- return constructor_sem;
+
+ if (packed_el_sem_ty != scalar_sem->Type()->UnwrapRef()) {
+ // Cast scalar to the vector element type
+ auto* scalar_cast_ast = b->Construct(packed_el_ast_ty, scalar_ast);
+ auto* scalar_cast_target = b->create<sem::TypeConversion>(
+ packed_el_sem_ty,
+ b->create<sem::Parameter>(nullptr, 0, scalar_sem->Type()->UnwrapRef(),
+ ast::StorageClass::kNone, ast::Access::kUndefined));
+ auto* scalar_cast_sem = b->create<sem::Call>(
+ scalar_cast_ast, scalar_cast_target, std::vector<const sem::Expression*>{scalar_sem},
+ statement, sem::Constant{}, /* has_side_effects */ false);
+ b->Sem().Add(scalar_cast_ast, scalar_cast_sem);
+ packed.emplace_back(scalar_cast_sem);
+ } else {
+ packed.emplace_back(scalar_sem);
+ }
+
+ auto* constructor_ast = b->Construct(
+ packed_ast_ty,
+ utils::Transform(packed, [&](const sem::Expression* expr) { return expr->Declaration(); }));
+ auto* constructor_target = b->create<sem::TypeConstructor>(
+ packed_sem_ty,
+ utils::Transform(
+ packed, [&](const tint::sem::Expression* arg, size_t i) -> const sem::Parameter* {
+ return b->create<sem::Parameter>(nullptr, static_cast<uint32_t>(i),
+ arg->Type()->UnwrapRef(), ast::StorageClass::kNone,
+ ast::Access::kUndefined);
+ }));
+ auto* constructor_sem = b->create<sem::Call>(constructor_ast, constructor_target, packed,
+ statement, sem::Constant{},
+ /* has_side_effects */ false);
+ b->Sem().Add(constructor_ast, constructor_sem);
+ return constructor_sem;
}
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/append_vector_test.cc b/chromium/third_party/dawn/src/tint/writer/append_vector_test.cc
index e5b4292da31..8169039210f 100644
--- a/chromium/third_party/dawn/src/tint/writer/append_vector_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/append_vector_test.cc
@@ -19,6 +19,8 @@
#include "gtest/gtest.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer {
namespace {
@@ -26,460 +28,459 @@ class AppendVectorTest : public ::testing::Test, public ProgramBuilder {};
// AppendVector(vec2<i32>(1, 2), 3) -> vec3<i32>(1, 2, 3)
TEST_F(AppendVectorTest, Vec2i32_i32) {
- auto* scalar_1 = Expr(1);
- auto* scalar_2 = Expr(2);
- auto* scalar_3 = Expr(3);
- auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 3u);
- EXPECT_EQ(vec_123->args[0], scalar_1);
- EXPECT_EQ(vec_123->args[1], scalar_2);
- EXPECT_EQ(vec_123->args[2], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 3u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ auto* scalar_1 = Expr(1_i);
+ auto* scalar_2 = Expr(2_i);
+ auto* scalar_3 = Expr(3_i);
+ auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 3u);
+ EXPECT_EQ(vec_123->args[0], scalar_1);
+ EXPECT_EQ(vec_123->args[1], scalar_2);
+ EXPECT_EQ(vec_123->args[2], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 3u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
}
// AppendVector(vec2<i32>(1, 2), 3u) -> vec3<i32>(1, 2, i32(3u))
TEST_F(AppendVectorTest, Vec2i32_u32) {
- auto* scalar_1 = Expr(1);
- auto* scalar_2 = Expr(2);
- auto* scalar_3 = Expr(3u);
- auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 3u);
- EXPECT_EQ(vec_123->args[0], scalar_1);
- EXPECT_EQ(vec_123->args[1], scalar_2);
- auto* u32_to_i32 = vec_123->args[2]->As<ast::CallExpression>();
- ASSERT_NE(u32_to_i32, nullptr);
- EXPECT_TRUE(u32_to_i32->target.type->Is<ast::I32>());
- ASSERT_EQ(u32_to_i32->args.size(), 1u);
- EXPECT_EQ(u32_to_i32->args[0], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 3u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(u32_to_i32));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ auto* scalar_1 = Expr(1_i);
+ auto* scalar_2 = Expr(2_i);
+ auto* scalar_3 = Expr(3_u);
+ auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 3u);
+ EXPECT_EQ(vec_123->args[0], scalar_1);
+ EXPECT_EQ(vec_123->args[1], scalar_2);
+ auto* u32_to_i32 = vec_123->args[2]->As<ast::CallExpression>();
+ ASSERT_NE(u32_to_i32, nullptr);
+ EXPECT_TRUE(u32_to_i32->target.type->Is<ast::I32>());
+ ASSERT_EQ(u32_to_i32->args.size(), 1u);
+ EXPECT_EQ(u32_to_i32->args[0], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 3u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(u32_to_i32));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
}
// AppendVector(vec2<i32>(vec2<u32>(1u, 2u)), 3u) ->
// vec3<i32>(vec2<i32>(vec2<u32>(1u, 2u)), i32(3u))
TEST_F(AppendVectorTest, Vec2i32FromVec2u32_u32) {
- auto* scalar_1 = Expr(1u);
- auto* scalar_2 = Expr(2u);
- auto* scalar_3 = Expr(3u);
- auto* uvec_12 = vec2<u32>(scalar_1, scalar_2);
- auto* vec_12 = vec2<i32>(uvec_12);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 2u);
- auto* v2u32_to_v2i32 = vec_123->args[0]->As<ast::CallExpression>();
- ASSERT_NE(v2u32_to_v2i32, nullptr);
- ASSERT_TRUE(v2u32_to_v2i32->target.type->Is<ast::Vector>());
- EXPECT_EQ(v2u32_to_v2i32->target.type->As<ast::Vector>()->width, 2u);
- EXPECT_TRUE(
- v2u32_to_v2i32->target.type->As<ast::Vector>()->type->Is<ast::I32>());
- EXPECT_EQ(v2u32_to_v2i32->args.size(), 1u);
- EXPECT_EQ(v2u32_to_v2i32->args[0], uvec_12);
-
- auto* u32_to_i32 = vec_123->args[1]->As<ast::CallExpression>();
- ASSERT_NE(u32_to_i32, nullptr);
- EXPECT_TRUE(u32_to_i32->target.type->Is<ast::I32>());
- ASSERT_EQ(u32_to_i32->args.size(), 1u);
- EXPECT_EQ(u32_to_i32->args[0], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 2u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(u32_to_i32));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ auto* scalar_1 = Expr(1_u);
+ auto* scalar_2 = Expr(2_u);
+ auto* scalar_3 = Expr(3_u);
+ auto* uvec_12 = vec2<u32>(scalar_1, scalar_2);
+ auto* vec_12 = vec2<i32>(uvec_12);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 2u);
+ auto* v2u32_to_v2i32 = vec_123->args[0]->As<ast::CallExpression>();
+ ASSERT_NE(v2u32_to_v2i32, nullptr);
+ ASSERT_TRUE(v2u32_to_v2i32->target.type->Is<ast::Vector>());
+ EXPECT_EQ(v2u32_to_v2i32->target.type->As<ast::Vector>()->width, 2u);
+ EXPECT_TRUE(v2u32_to_v2i32->target.type->As<ast::Vector>()->type->Is<ast::I32>());
+ EXPECT_EQ(v2u32_to_v2i32->args.size(), 1u);
+ EXPECT_EQ(v2u32_to_v2i32->args[0], uvec_12);
+
+ auto* u32_to_i32 = vec_123->args[1]->As<ast::CallExpression>();
+ ASSERT_NE(u32_to_i32, nullptr);
+ EXPECT_TRUE(u32_to_i32->target.type->Is<ast::I32>());
+ ASSERT_EQ(u32_to_i32->args.size(), 1u);
+ EXPECT_EQ(u32_to_i32->args[0], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 2u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(u32_to_i32));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
}
// AppendVector(vec2<i32>(1, 2), 3.0f) -> vec3<i32>(1, 2, i32(3.0f))
TEST_F(AppendVectorTest, Vec2i32_f32) {
- auto* scalar_1 = Expr(1);
- auto* scalar_2 = Expr(2);
- auto* scalar_3 = Expr(3.0f);
- auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 3u);
- EXPECT_EQ(vec_123->args[0], scalar_1);
- EXPECT_EQ(vec_123->args[1], scalar_2);
- auto* f32_to_i32 = vec_123->args[2]->As<ast::CallExpression>();
- ASSERT_NE(f32_to_i32, nullptr);
- EXPECT_TRUE(f32_to_i32->target.type->Is<ast::I32>());
- ASSERT_EQ(f32_to_i32->args.size(), 1u);
- EXPECT_EQ(f32_to_i32->args[0], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 3u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(f32_to_i32));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ auto* scalar_1 = Expr(1_i);
+ auto* scalar_2 = Expr(2_i);
+ auto* scalar_3 = Expr(3_f);
+ auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 3u);
+ EXPECT_EQ(vec_123->args[0], scalar_1);
+ EXPECT_EQ(vec_123->args[1], scalar_2);
+ auto* f32_to_i32 = vec_123->args[2]->As<ast::CallExpression>();
+ ASSERT_NE(f32_to_i32, nullptr);
+ EXPECT_TRUE(f32_to_i32->target.type->Is<ast::I32>());
+ ASSERT_EQ(f32_to_i32->args.size(), 1u);
+ EXPECT_EQ(f32_to_i32->args[0], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 3u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(f32_to_i32));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
}
// AppendVector(vec3<i32>(1, 2, 3), 4) -> vec4<i32>(1, 2, 3, 4)
TEST_F(AppendVectorTest, Vec3i32_i32) {
- auto* scalar_1 = Expr(1);
- auto* scalar_2 = Expr(2);
- auto* scalar_3 = Expr(3);
- auto* scalar_4 = Expr(4);
- auto* vec_123 = vec3<i32>(scalar_1, scalar_2, scalar_3);
- WrapInFunction(vec_123, scalar_4);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_123, scalar_4);
-
- auto* vec_1234 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_1234, nullptr);
- ASSERT_EQ(vec_1234->args.size(), 4u);
- EXPECT_EQ(vec_1234->args[0], scalar_1);
- EXPECT_EQ(vec_1234->args[1], scalar_2);
- EXPECT_EQ(vec_1234->args[2], scalar_3);
- EXPECT_EQ(vec_1234->args[3], scalar_4);
-
- auto* call = Sem().Get(vec_1234);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 4u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
- EXPECT_EQ(call->Arguments()[3], Sem().Get(scalar_4));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 4u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[3]->Type()->Is<sem::I32>());
+ auto* scalar_1 = Expr(1_i);
+ auto* scalar_2 = Expr(2_i);
+ auto* scalar_3 = Expr(3_i);
+ auto* scalar_4 = Expr(4_i);
+ auto* vec_123 = vec3<i32>(scalar_1, scalar_2, scalar_3);
+ WrapInFunction(vec_123, scalar_4);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_123, scalar_4);
+
+ auto* vec_1234 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_1234, nullptr);
+ ASSERT_EQ(vec_1234->args.size(), 4u);
+ EXPECT_EQ(vec_1234->args[0], scalar_1);
+ EXPECT_EQ(vec_1234->args[1], scalar_2);
+ EXPECT_EQ(vec_1234->args[2], scalar_3);
+ EXPECT_EQ(vec_1234->args[3], scalar_4);
+
+ auto* call = Sem().Get<sem::Call>(vec_1234);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 4u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
+ EXPECT_EQ(call->Arguments()[3], Sem().Get(scalar_4));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 4u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[3]->Type()->Is<sem::I32>());
}
// AppendVector(vec_12, 3) -> vec3<i32>(vec_12, 3)
TEST_F(AppendVectorTest, Vec2i32Var_i32) {
- Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- auto* vec_12 = Expr("vec_12");
- auto* scalar_3 = Expr(3);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 2u);
- EXPECT_EQ(vec_123->args[0], vec_12);
- EXPECT_EQ(vec_123->args[1], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 2u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ auto* vec_12 = Expr("vec_12");
+ auto* scalar_3 = Expr(3_i);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 2u);
+ EXPECT_EQ(vec_123->args[0], vec_12);
+ EXPECT_EQ(vec_123->args[1], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 2u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
}
// AppendVector(1, 2, scalar_3) -> vec3<i32>(1, 2, scalar_3)
TEST_F(AppendVectorTest, Vec2i32_i32Var) {
- Global("scalar_3", ty.i32(), ast::StorageClass::kPrivate);
- auto* scalar_1 = Expr(1);
- auto* scalar_2 = Expr(2);
- auto* scalar_3 = Expr("scalar_3");
- auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 3u);
- EXPECT_EQ(vec_123->args[0], scalar_1);
- EXPECT_EQ(vec_123->args[1], scalar_2);
- EXPECT_EQ(vec_123->args[2], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 3u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 3u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ Global("scalar_3", ty.i32(), ast::StorageClass::kPrivate);
+ auto* scalar_1 = Expr(1_i);
+ auto* scalar_2 = Expr(2_i);
+ auto* scalar_3 = Expr("scalar_3");
+ auto* vec_12 = vec2<i32>(scalar_1, scalar_2);
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 3u);
+ EXPECT_EQ(vec_123->args[0], scalar_1);
+ EXPECT_EQ(vec_123->args[1], scalar_2);
+ EXPECT_EQ(vec_123->args[2], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 3u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(scalar_1));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_2));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(scalar_3));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 3u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
}
// AppendVector(vec_12, scalar_3) -> vec3<i32>(vec_12, scalar_3)
TEST_F(AppendVectorTest, Vec2i32Var_i32Var) {
- Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- Global("scalar_3", ty.i32(), ast::StorageClass::kPrivate);
- auto* vec_12 = Expr("vec_12");
- auto* scalar_3 = Expr("scalar_3");
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 2u);
- EXPECT_EQ(vec_123->args[0], vec_12);
- EXPECT_EQ(vec_123->args[1], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 2u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ Global("scalar_3", ty.i32(), ast::StorageClass::kPrivate);
+ auto* vec_12 = Expr("vec_12");
+ auto* scalar_3 = Expr("scalar_3");
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 2u);
+ EXPECT_EQ(vec_123->args[0], vec_12);
+ EXPECT_EQ(vec_123->args[1], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 2u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
}
// AppendVector(vec_12, scalar_3) -> vec3<i32>(vec_12, i32(scalar_3))
TEST_F(AppendVectorTest, Vec2i32Var_f32Var) {
- Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- Global("scalar_3", ty.f32(), ast::StorageClass::kPrivate);
- auto* vec_12 = Expr("vec_12");
- auto* scalar_3 = Expr("scalar_3");
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 2u);
- EXPECT_EQ(vec_123->args[0], vec_12);
- auto* f32_to_i32 = vec_123->args[1]->As<ast::CallExpression>();
- ASSERT_NE(f32_to_i32, nullptr);
- EXPECT_TRUE(f32_to_i32->target.type->Is<ast::I32>());
- ASSERT_EQ(f32_to_i32->args.size(), 1u);
- EXPECT_EQ(f32_to_i32->args[0], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 2u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(f32_to_i32));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ Global("vec_12", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ Global("scalar_3", ty.f32(), ast::StorageClass::kPrivate);
+ auto* vec_12 = Expr("vec_12");
+ auto* scalar_3 = Expr("scalar_3");
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 2u);
+ EXPECT_EQ(vec_123->args[0], vec_12);
+ auto* f32_to_i32 = vec_123->args[1]->As<ast::CallExpression>();
+ ASSERT_NE(f32_to_i32, nullptr);
+ EXPECT_TRUE(f32_to_i32->target.type->Is<ast::I32>());
+ ASSERT_EQ(f32_to_i32->args.size(), 1u);
+ EXPECT_EQ(f32_to_i32->args[0], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 2u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(f32_to_i32));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
}
// AppendVector(vec_12, scalar_3) -> vec3<bool>(vec_12, scalar_3)
TEST_F(AppendVectorTest, Vec2boolVar_boolVar) {
- Global("vec_12", ty.vec2<bool>(), ast::StorageClass::kPrivate);
- Global("scalar_3", ty.bool_(), ast::StorageClass::kPrivate);
- auto* vec_12 = Expr("vec_12");
- auto* scalar_3 = Expr("scalar_3");
- WrapInFunction(vec_12, scalar_3);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec_12, scalar_3);
-
- auto* vec_123 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_123, nullptr);
- ASSERT_EQ(vec_123->args.size(), 2u);
- EXPECT_EQ(vec_123->args[0], vec_12);
- EXPECT_EQ(vec_123->args[1], scalar_3);
-
- auto* call = Sem().Get(vec_123);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 2u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::Bool>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 2u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
+ Global("vec_12", ty.vec2<bool>(), ast::StorageClass::kPrivate);
+ Global("scalar_3", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* vec_12 = Expr("vec_12");
+ auto* scalar_3 = Expr("scalar_3");
+ WrapInFunction(vec_12, scalar_3);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec_12, scalar_3);
+
+ auto* vec_123 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_123, nullptr);
+ ASSERT_EQ(vec_123->args.size(), 2u);
+ EXPECT_EQ(vec_123->args[0], vec_12);
+ EXPECT_EQ(vec_123->args[1], scalar_3);
+
+ auto* call = Sem().Get<sem::Call>(vec_123);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 2u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_12));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(scalar_3));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 3u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::Bool>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 2u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::Vector>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::Bool>());
}
// AppendVector(vec3<i32>(), 4) -> vec3<bool>(0, 0, 0, 4)
TEST_F(AppendVectorTest, ZeroVec3i32_i32) {
- auto* scalar = Expr(4);
- auto* vec000 = vec3<i32>();
- WrapInFunction(vec000, scalar);
-
- resolver::Resolver resolver(this);
- ASSERT_TRUE(resolver.Resolve()) << resolver.error();
-
- auto* append = AppendVector(this, vec000, scalar);
-
- auto* vec_0004 = As<ast::CallExpression>(append->Declaration());
- ASSERT_NE(vec_0004, nullptr);
- ASSERT_EQ(vec_0004->args.size(), 4u);
- for (size_t i = 0; i < 3; i++) {
- auto* literal = As<ast::SintLiteralExpression>(vec_0004->args[i]);
- ASSERT_NE(literal, nullptr);
- EXPECT_EQ(literal->value, 0);
- }
- EXPECT_EQ(vec_0004->args[3], scalar);
-
- auto* call = Sem().Get(vec_0004);
- ASSERT_NE(call, nullptr);
- ASSERT_EQ(call->Arguments().size(), 4u);
- EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_0004->args[0]));
- EXPECT_EQ(call->Arguments()[1], Sem().Get(vec_0004->args[1]));
- EXPECT_EQ(call->Arguments()[2], Sem().Get(vec_0004->args[2]));
- EXPECT_EQ(call->Arguments()[3], Sem().Get(scalar));
-
- auto* ctor = call->Target()->As<sem::TypeConstructor>();
- ASSERT_NE(ctor, nullptr);
- ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
- EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 4u);
- EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
- EXPECT_EQ(ctor->ReturnType(), call->Type());
-
- ASSERT_EQ(ctor->Parameters().size(), 4u);
- EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
- EXPECT_TRUE(ctor->Parameters()[3]->Type()->Is<sem::I32>());
+ auto* scalar = Expr(4_i);
+ auto* vec000 = vec3<i32>();
+ WrapInFunction(vec000, scalar);
+
+ resolver::Resolver resolver(this);
+ ASSERT_TRUE(resolver.Resolve()) << resolver.error();
+
+ auto* append = AppendVector(this, vec000, scalar);
+
+ auto* vec_0004 = As<ast::CallExpression>(append->Declaration());
+ ASSERT_NE(vec_0004, nullptr);
+ ASSERT_EQ(vec_0004->args.size(), 4u);
+ for (size_t i = 0; i < 3; i++) {
+ auto* literal = As<ast::IntLiteralExpression>(vec_0004->args[i]);
+ ASSERT_NE(literal, nullptr);
+ EXPECT_EQ(literal->value, 0);
+ }
+ EXPECT_EQ(vec_0004->args[3], scalar);
+
+ auto* call = Sem().Get<sem::Call>(vec_0004);
+ ASSERT_NE(call, nullptr);
+ ASSERT_EQ(call->Arguments().size(), 4u);
+ EXPECT_EQ(call->Arguments()[0], Sem().Get(vec_0004->args[0]));
+ EXPECT_EQ(call->Arguments()[1], Sem().Get(vec_0004->args[1]));
+ EXPECT_EQ(call->Arguments()[2], Sem().Get(vec_0004->args[2]));
+ EXPECT_EQ(call->Arguments()[3], Sem().Get(scalar));
+
+ auto* ctor = call->Target()->As<sem::TypeConstructor>();
+ ASSERT_NE(ctor, nullptr);
+ ASSERT_TRUE(ctor->ReturnType()->Is<sem::Vector>());
+ EXPECT_EQ(ctor->ReturnType()->As<sem::Vector>()->Width(), 4u);
+ EXPECT_TRUE(ctor->ReturnType()->As<sem::Vector>()->type()->Is<sem::I32>());
+ EXPECT_EQ(ctor->ReturnType(), call->Type());
+
+ ASSERT_EQ(ctor->Parameters().size(), 4u);
+ EXPECT_TRUE(ctor->Parameters()[0]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[1]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[2]->Type()->Is<sem::I32>());
+ EXPECT_TRUE(ctor->Parameters()[3]->Type()->Is<sem::I32>());
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.cc b/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.cc
index 2fdd43642b3..7fd6e63b752 100644
--- a/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.cc
+++ b/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.cc
@@ -18,11 +18,11 @@ namespace tint::writer {
ArrayLengthFromUniformOptions::ArrayLengthFromUniformOptions() = default;
ArrayLengthFromUniformOptions::~ArrayLengthFromUniformOptions() = default;
-ArrayLengthFromUniformOptions::ArrayLengthFromUniformOptions(
- const ArrayLengthFromUniformOptions&) = default;
+ArrayLengthFromUniformOptions::ArrayLengthFromUniformOptions(const ArrayLengthFromUniformOptions&) =
+ default;
ArrayLengthFromUniformOptions& ArrayLengthFromUniformOptions::operator=(
const ArrayLengthFromUniformOptions&) = default;
-ArrayLengthFromUniformOptions::ArrayLengthFromUniformOptions(
- ArrayLengthFromUniformOptions&&) = default;
+ArrayLengthFromUniformOptions::ArrayLengthFromUniformOptions(ArrayLengthFromUniformOptions&&) =
+ default;
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.h b/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.h
index e672dde5808..cfa2fbd8735 100644
--- a/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.h
+++ b/chromium/third_party/dawn/src/tint/writer/array_length_from_uniform_options.h
@@ -24,28 +24,27 @@ namespace tint::writer {
/// Options used to specify a mapping of binding points to indices into a UBO
/// from which to load buffer sizes.
struct ArrayLengthFromUniformOptions {
- /// Constructor
- ArrayLengthFromUniformOptions();
- /// Destructor
- ~ArrayLengthFromUniformOptions();
- /// Copy constructor
- ArrayLengthFromUniformOptions(const ArrayLengthFromUniformOptions&);
- /// Copy assignment
- /// @returns this ArrayLengthFromUniformOptions
- ArrayLengthFromUniformOptions& operator=(
- const ArrayLengthFromUniformOptions&);
- /// Move constructor
- ArrayLengthFromUniformOptions(ArrayLengthFromUniformOptions&&);
-
- /// The binding point to use to generate a uniform buffer from which to read
- /// buffer sizes.
- sem::BindingPoint ubo_binding;
- /// The mapping from storage buffer binding points to the index into the
- /// uniform buffer where the length of the buffer is stored.
- std::unordered_map<sem::BindingPoint, uint32_t> bindpoint_to_size_index;
-
- // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
- // struct members.
+ /// Constructor
+ ArrayLengthFromUniformOptions();
+ /// Destructor
+ ~ArrayLengthFromUniformOptions();
+ /// Copy constructor
+ ArrayLengthFromUniformOptions(const ArrayLengthFromUniformOptions&);
+ /// Copy assignment
+ /// @returns this ArrayLengthFromUniformOptions
+ ArrayLengthFromUniformOptions& operator=(const ArrayLengthFromUniformOptions&);
+ /// Move constructor
+ ArrayLengthFromUniformOptions(ArrayLengthFromUniformOptions&&);
+
+ /// The binding point to use to generate a uniform buffer from which to read
+ /// buffer sizes.
+ sem::BindingPoint ubo_binding;
+ /// The mapping from storage buffer binding points to the index into the
+ /// uniform buffer where the length of the buffer is stored.
+ std::unordered_map<sem::BindingPoint, uint32_t> bindpoint_to_size_index;
+
+ // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
+ // struct members.
};
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/flatten_bindings.cc b/chromium/third_party/dawn/src/tint/writer/flatten_bindings.cc
new file mode 100644
index 00000000000..1efc02a87f1
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/writer/flatten_bindings.cc
@@ -0,0 +1,78 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/writer/flatten_bindings.h"
+
+#include <utility>
+
+#include "src/tint/inspector/inspector.h"
+#include "src/tint/transform/binding_remapper.h"
+#include "src/tint/transform/manager.h"
+
+namespace tint::writer {
+std::optional<Program> FlattenBindings(const Program* program) {
+ // TODO(crbug.com/tint/1101): Make this more robust for multiple entry points.
+ using BindingPoint = tint::transform::BindingPoint;
+ tint::transform::BindingRemapper::BindingPoints binding_points;
+ uint32_t next_buffer_idx = 0;
+ uint32_t next_sampler_idx = 0;
+ uint32_t next_texture_idx = 0;
+
+ tint::inspector::Inspector inspector(program);
+ auto entry_points = inspector.GetEntryPoints();
+ for (auto& entry_point : entry_points) {
+ auto bindings = inspector.GetResourceBindings(entry_point.name);
+ for (auto& binding : bindings) {
+ BindingPoint src = {binding.bind_group, binding.binding};
+ if (binding_points.count(src)) {
+ continue;
+ }
+ switch (binding.resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ binding_points.emplace(src, BindingPoint{0, next_buffer_idx++});
+ break;
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ binding_points.emplace(src, BindingPoint{0, next_sampler_idx++});
+ break;
+ case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+ binding_points.emplace(src, BindingPoint{0, next_texture_idx++});
+ break;
+ }
+ }
+ }
+
+ // Run the binding remapper transform.
+ tint::transform::Output transform_output;
+ if (!binding_points.empty()) {
+ tint::transform::Manager manager;
+ tint::transform::DataMap inputs;
+ inputs.Add<tint::transform::BindingRemapper::Remappings>(
+ std::move(binding_points), tint::transform::BindingRemapper::AccessControls{},
+ /* mayCollide */ true);
+ manager.Add<tint::transform::BindingRemapper>();
+ transform_output = manager.Run(program, inputs);
+ return std::move(transform_output.program);
+ }
+
+ return {};
+}
+} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/flatten_bindings.h b/chromium/third_party/dawn/src/tint/writer/flatten_bindings.h
new file mode 100644
index 00000000000..02505715fe0
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/writer/flatten_bindings.h
@@ -0,0 +1,31 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_TINT_WRITER_FLATTEN_BINDINGS_H_
+#define SRC_TINT_WRITER_FLATTEN_BINDINGS_H_
+
+#include <optional>
+#include "src/tint/program.h"
+
+namespace tint::writer {
+
+/// If needed, remaps resource numbers of `program` to a flat namespace: all in
+/// group 0 within unique binding numbers.
+/// @param program A valid program
+/// @return A new program with bindings remapped if needed
+std::optional<Program> FlattenBindings(const Program* program);
+
+} // namespace tint::writer
+
+#endif // SRC_TINT_WRITER_FLATTEN_BINDINGS_H_
diff --git a/chromium/third_party/dawn/src/tint/writer/flatten_bindings_test.cc b/chromium/third_party/dawn/src/tint/writer/flatten_bindings_test.cc
new file mode 100644
index 00000000000..1c516c92e5f
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/writer/flatten_bindings_test.cc
@@ -0,0 +1,142 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/writer/flatten_bindings.h"
+
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "src/tint/program_builder.h"
+#include "src/tint/resolver/resolver.h"
+#include "src/tint/sem/variable.h"
+
+namespace tint::writer {
+namespace {
+
+class FlattenBindingsTest : public ::testing::Test {};
+
+TEST_F(FlattenBindingsTest, NoBindings) {
+ ProgramBuilder b;
+ b.WrapInFunction();
+
+ resolver::Resolver resolver(&b);
+
+ Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+
+ auto flattened = tint::writer::FlattenBindings(&program);
+ EXPECT_FALSE(flattened);
+}
+
+TEST_F(FlattenBindingsTest, AlreadyFlat) {
+ ProgramBuilder b;
+ b.Global("a", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Global("b", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(0, 1));
+ b.Global("c", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(0, 2));
+ b.WrapInFunction();
+
+ resolver::Resolver resolver(&b);
+
+ Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+
+ auto flattened = tint::writer::FlattenBindings(&program);
+ EXPECT_FALSE(flattened);
+}
+
+TEST_F(FlattenBindingsTest, NotFlat_SingleNamespace) {
+ ProgramBuilder b;
+ b.Global("a", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Global("b", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(1, 1));
+ b.Global("c", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(2, 2));
+ b.WrapInFunction(b.Expr("a"), b.Expr("b"), b.Expr("c"));
+
+ resolver::Resolver resolver(&b);
+
+ Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+
+ auto flattened = tint::writer::FlattenBindings(&program);
+ EXPECT_TRUE(flattened);
+
+ auto& vars = flattened->AST().GlobalVariables();
+ EXPECT_EQ(vars[0]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[0]->BindingPoint().binding->value, 0u);
+ EXPECT_EQ(vars[1]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[1]->BindingPoint().binding->value, 1u);
+ EXPECT_EQ(vars[2]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[2]->BindingPoint().binding->value, 2u);
+}
+
+TEST_F(FlattenBindingsTest, NotFlat_MultipleNamespaces) {
+ ProgramBuilder b;
+
+ const size_t num_buffers = 3;
+ b.Global("buffer1", b.ty.i32(), ast::StorageClass::kUniform, b.GroupAndBinding(0, 0));
+ b.Global("buffer2", b.ty.i32(), ast::StorageClass::kStorage, b.GroupAndBinding(1, 1));
+ b.Global("buffer3", b.ty.i32(), ast::StorageClass::kStorage, ast::Access::kRead,
+ b.GroupAndBinding(2, 2));
+
+ const size_t num_samplers = 2;
+ b.Global("sampler1", b.ty.sampler(ast::SamplerKind::kSampler), b.GroupAndBinding(3, 3));
+ b.Global("sampler2", b.ty.sampler(ast::SamplerKind::kComparisonSampler),
+ b.GroupAndBinding(4, 4));
+
+ const size_t num_textures = 6;
+ b.Global("texture1", b.ty.sampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
+ b.GroupAndBinding(5, 5));
+ b.Global("texture2", b.ty.multisampled_texture(ast::TextureDimension::k2d, b.ty.f32()),
+ b.GroupAndBinding(6, 6));
+ b.Global("texture3",
+ b.ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite),
+ b.GroupAndBinding(7, 7));
+ b.Global("texture4", b.ty.depth_texture(ast::TextureDimension::k2d), b.GroupAndBinding(8, 8));
+ b.Global("texture5", b.ty.depth_multisampled_texture(ast::TextureDimension::k2d),
+ b.GroupAndBinding(9, 9));
+ b.Global("texture6", b.ty.external_texture(), b.GroupAndBinding(10, 10));
+
+ b.WrapInFunction(b.Assign(b.Phony(), "buffer1"), b.Assign(b.Phony(), "buffer2"),
+ b.Assign(b.Phony(), "buffer3"), b.Assign(b.Phony(), "sampler1"),
+ b.Assign(b.Phony(), "sampler2"), b.Assign(b.Phony(), "texture1"),
+ b.Assign(b.Phony(), "texture2"), b.Assign(b.Phony(), "texture3"),
+ b.Assign(b.Phony(), "texture4"), b.Assign(b.Phony(), "texture5"),
+ b.Assign(b.Phony(), "texture6"));
+
+ resolver::Resolver resolver(&b);
+
+ Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+
+ auto flattened = tint::writer::FlattenBindings(&program);
+ EXPECT_TRUE(flattened);
+
+ auto& vars = flattened->AST().GlobalVariables();
+
+ for (size_t i = 0; i < num_buffers; ++i) {
+ EXPECT_EQ(vars[i]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[i]->BindingPoint().binding->value, i);
+ }
+ for (size_t i = 0; i < num_samplers; ++i) {
+ EXPECT_EQ(vars[i + num_buffers]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[i + num_buffers]->BindingPoint().binding->value, i);
+ }
+ for (size_t i = 0; i < num_textures; ++i) {
+ EXPECT_EQ(vars[i + num_buffers + num_samplers]->BindingPoint().group->value, 0u);
+ EXPECT_EQ(vars[i + num_buffers + num_samplers]->BindingPoint().binding->value, i);
+ }
+}
+
+} // namespace
+} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/float_to_string.cc b/chromium/third_party/dawn/src/tint/writer/float_to_string.cc
index 64885fe9fb2..e0c3260d546 100644
--- a/chromium/third_party/dawn/src/tint/writer/float_to_string.cc
+++ b/chromium/third_party/dawn/src/tint/writer/float_to_string.cc
@@ -26,131 +26,130 @@
namespace tint::writer {
std::string FloatToString(float f) {
- // Try printing the float in fixed point, with a smallish limit on the
- // precision
- std::stringstream fixed;
- fixed.flags(fixed.flags() | std::ios_base::showpoint | std::ios_base::fixed);
- fixed.precision(9);
- fixed << f;
-
- // If this string can be parsed without loss of information, use it
- auto float_equal_no_warning = std::equal_to<float>();
- if (float_equal_no_warning(std::stof(fixed.str()), f)) {
- auto str = fixed.str();
- while (str.length() >= 2 && str[str.size() - 1] == '0' &&
- str[str.size() - 2] != '.') {
- str.pop_back();
- }
+ // Try printing the float in fixed point, with a smallish limit on the
+ // precision
+ std::stringstream fixed;
+ fixed.flags(fixed.flags() | std::ios_base::showpoint | std::ios_base::fixed);
+ fixed.precision(9);
+ fixed << f;
+
+ // If this string can be parsed without loss of information, use it
+ auto float_equal_no_warning = std::equal_to<float>();
+ if (float_equal_no_warning(std::stof(fixed.str()), f)) {
+ auto str = fixed.str();
+ while (str.length() >= 2 && str[str.size() - 1] == '0' && str[str.size() - 2] != '.') {
+ str.pop_back();
+ }
- return str;
- }
+ return str;
+ }
- // Resort to scientific, with the minimum precision needed to preserve the
- // whole float
- std::stringstream sci;
- sci.precision(std::numeric_limits<float>::max_digits10);
- sci << f;
- return sci.str();
+ // Resort to scientific, with the minimum precision needed to preserve the
+ // whole float
+ std::stringstream sci;
+ sci.precision(std::numeric_limits<float>::max_digits10);
+ sci << f;
+ return sci.str();
}
std::string FloatToBitPreservingString(float f) {
- // For the NaN case, avoid handling the number as a floating point value.
- // Some machines will modify the top bit in the mantissa of a NaN.
-
- std::stringstream ss;
-
- uint32_t float_bits = 0u;
- std::memcpy(&float_bits, &f, sizeof(float_bits));
-
- // Handle the sign.
- const uint32_t kSignMask = 1u << 31;
- if (float_bits & kSignMask) {
- // If `f` is -0.0 print -0.0.
- ss << '-';
- // Strip sign bit.
- float_bits = float_bits & (~kSignMask);
- }
-
- switch (std::fpclassify(f)) {
- case FP_ZERO:
- case FP_NORMAL:
- std::memcpy(&f, &float_bits, sizeof(float_bits));
- ss << FloatToString(f);
- break;
-
- default: {
- // Infinity, NaN, and Subnormal
- // TODO(dneto): It's unclear how Infinity and NaN should be handled.
- // See https://github.com/gpuweb/gpuweb/issues/1769
-
- // std::hexfloat prints 'nan' and 'inf' instead of an
- // explicit representation like we want. Split it out
- // manually.
- const int kExponentBias = 127;
- const int kExponentMask = 0x7f800000;
- const int kMantissaMask = 0x007fffff;
- const int kMantissaBits = 23;
-
- int mantissaNibbles = (kMantissaBits + 3) / 4;
-
- const int biased_exponent =
- static_cast<int>((float_bits & kExponentMask) >> kMantissaBits);
- int exponent = biased_exponent - kExponentBias;
- uint32_t mantissa = float_bits & kMantissaMask;
-
- ss << "0x";
-
- if (exponent == 128) {
- if (mantissa == 0) {
- // Infinity case.
- ss << "1p+128";
- } else {
- // NaN case.
- // Emit the mantissa bits as if they are left-justified after the
- // binary point. This is what SPIRV-Tools hex float emitter does,
- // and it's a justifiable choice independent of the bit width
- // of the mantissa.
- mantissa <<= (4 - (kMantissaBits % 4));
- // Remove trailing zeroes, for tidyness.
- while (0 == (0xf & mantissa)) {
- mantissa >>= 4;
- mantissaNibbles--;
- }
- ss << "1." << std::hex << std::setfill('0')
- << std::setw(mantissaNibbles) << mantissa << "p+128";
- }
- } else {
- // Subnormal, and not zero.
- TINT_ASSERT(Writer, mantissa != 0);
- const int kTopBit = (1 << kMantissaBits);
-
- // Shift left until we get 1.x
- while (0 == (kTopBit & mantissa)) {
- mantissa <<= 1;
- exponent--;
- }
- // Emit the leading 1, and remove it from the mantissa.
- ss << "1";
- mantissa = mantissa ^ kTopBit;
- mantissa <<= 1;
- exponent++;
-
- // Emit the fractional part.
- if (mantissa) {
- // Remove trailing zeroes, for tidyness
- while (0 == (0xf & mantissa)) {
- mantissa >>= 4;
- mantissaNibbles--;
- }
- ss << "." << std::hex << std::setfill('0')
- << std::setw(mantissaNibbles) << mantissa;
+ // For the NaN case, avoid handling the number as a floating point value.
+ // Some machines will modify the top bit in the mantissa of a NaN.
+
+ std::stringstream ss;
+
+ uint32_t float_bits = 0u;
+ std::memcpy(&float_bits, &f, sizeof(float_bits));
+
+ // Handle the sign.
+ const uint32_t kSignMask = 1u << 31;
+ if (float_bits & kSignMask) {
+ // If `f` is -0.0 print -0.0.
+ ss << '-';
+ // Strip sign bit.
+ float_bits = float_bits & (~kSignMask);
+ }
+
+ switch (std::fpclassify(f)) {
+ case FP_ZERO:
+ case FP_NORMAL:
+ std::memcpy(&f, &float_bits, sizeof(float_bits));
+ ss << FloatToString(f);
+ break;
+
+ default: {
+ // Infinity, NaN, and Subnormal
+ // TODO(dneto): It's unclear how Infinity and NaN should be handled.
+ // See https://github.com/gpuweb/gpuweb/issues/1769
+
+ // std::hexfloat prints 'nan' and 'inf' instead of an
+ // explicit representation like we want. Split it out
+ // manually.
+ const int kExponentBias = 127;
+ const int kExponentMask = 0x7f800000;
+ const int kMantissaMask = 0x007fffff;
+ const int kMantissaBits = 23;
+
+ int mantissaNibbles = (kMantissaBits + 3) / 4;
+
+ const int biased_exponent =
+ static_cast<int>((float_bits & kExponentMask) >> kMantissaBits);
+ int exponent = biased_exponent - kExponentBias;
+ uint32_t mantissa = float_bits & kMantissaMask;
+
+ ss << "0x";
+
+ if (exponent == 128) {
+ if (mantissa == 0) {
+ // Infinity case.
+ ss << "1p+128";
+ } else {
+ // NaN case.
+ // Emit the mantissa bits as if they are left-justified after the
+ // binary point. This is what SPIRV-Tools hex float emitter does,
+ // and it's a justifiable choice independent of the bit width
+ // of the mantissa.
+ mantissa <<= (4 - (kMantissaBits % 4));
+ // Remove trailing zeroes, for tidyness.
+ while (0 == (0xf & mantissa)) {
+ mantissa >>= 4;
+ mantissaNibbles--;
+ }
+ ss << "1." << std::hex << std::setfill('0') << std::setw(mantissaNibbles)
+ << mantissa << "p+128";
+ }
+ } else {
+ // Subnormal, and not zero.
+ TINT_ASSERT(Writer, mantissa != 0);
+ const int kTopBit = (1 << kMantissaBits);
+
+ // Shift left until we get 1.x
+ while (0 == (kTopBit & mantissa)) {
+ mantissa <<= 1;
+ exponent--;
+ }
+ // Emit the leading 1, and remove it from the mantissa.
+ ss << "1";
+ mantissa = mantissa ^ kTopBit;
+ mantissa <<= 1;
+ exponent++;
+
+ // Emit the fractional part.
+ if (mantissa) {
+ // Remove trailing zeroes, for tidyness
+ while (0 == (0xf & mantissa)) {
+ mantissa >>= 4;
+ mantissaNibbles--;
+ }
+ ss << "." << std::hex << std::setfill('0') << std::setw(mantissaNibbles)
+ << mantissa;
+ }
+ // Emit the exponent
+ ss << "p" << std::showpos << std::dec << exponent;
+ }
}
- // Emit the exponent
- ss << "p" << std::showpos << std::dec << exponent;
- }
}
- }
- return ss.str();
+ return ss.str();
}
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/float_to_string_test.cc b/chromium/third_party/dawn/src/tint/writer/float_to_string_test.cc
index 50caa594458..2596be79982 100644
--- a/chromium/third_party/dawn/src/tint/writer/float_to_string_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/float_to_string_test.cc
@@ -28,70 +28,69 @@ namespace {
// - 'exponent_bits' is placed in the exponent space.
// So, the exponent bias must already be included.
float MakeFloat(int sign, int biased_exponent, int mantissa) {
- const uint32_t sign_bit = sign ? 0x80000000u : 0u;
- // The binary32 exponent is 8 bits, just below the sign.
- const uint32_t exponent_bits = (biased_exponent & 0xffu) << 23;
- // The mantissa is the bottom 23 bits.
- const uint32_t mantissa_bits = (mantissa & 0x7fffffu);
+ const uint32_t sign_bit = sign ? 0x80000000u : 0u;
+ // The binary32 exponent is 8 bits, just below the sign.
+ const uint32_t exponent_bits = (biased_exponent & 0xffu) << 23;
+ // The mantissa is the bottom 23 bits.
+ const uint32_t mantissa_bits = (mantissa & 0x7fffffu);
- uint32_t bits = sign_bit | exponent_bits | mantissa_bits;
- float result = 0.0f;
- static_assert(sizeof(result) == sizeof(bits),
- "expected float and uint32_t to be the same size");
- std::memcpy(&result, &bits, sizeof(bits));
- return result;
+ uint32_t bits = sign_bit | exponent_bits | mantissa_bits;
+ float result = 0.0f;
+ static_assert(sizeof(result) == sizeof(bits),
+ "expected float and uint32_t to be the same size");
+ std::memcpy(&result, &bits, sizeof(bits));
+ return result;
}
TEST(FloatToStringTest, Zero) {
- EXPECT_EQ(FloatToString(0.0f), "0.0");
+ EXPECT_EQ(FloatToString(0.0f), "0.0");
}
TEST(FloatToStringTest, One) {
- EXPECT_EQ(FloatToString(1.0f), "1.0");
+ EXPECT_EQ(FloatToString(1.0f), "1.0");
}
TEST(FloatToStringTest, MinusOne) {
- EXPECT_EQ(FloatToString(-1.0f), "-1.0");
+ EXPECT_EQ(FloatToString(-1.0f), "-1.0");
}
TEST(FloatToStringTest, Billion) {
- EXPECT_EQ(FloatToString(1e9f), "1000000000.0");
+ EXPECT_EQ(FloatToString(1e9f), "1000000000.0");
}
TEST(FloatToStringTest, Small) {
- EXPECT_NE(FloatToString(std::numeric_limits<float>::epsilon()), "0.0");
+ EXPECT_NE(FloatToString(std::numeric_limits<float>::epsilon()), "0.0");
}
TEST(FloatToStringTest, Highest) {
- const auto highest = std::numeric_limits<float>::max();
- const auto expected_highest = 340282346638528859811704183484516925440.0f;
- if (highest < expected_highest || highest > expected_highest) {
- GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
- "this target";
- }
- EXPECT_EQ(FloatToString(std::numeric_limits<float>::max()),
- "340282346638528859811704183484516925440.0");
+ const auto highest = std::numeric_limits<float>::max();
+ const auto expected_highest = 340282346638528859811704183484516925440.0f;
+ if (highest < expected_highest || highest > expected_highest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
+ "this target";
+ }
+ EXPECT_EQ(FloatToString(std::numeric_limits<float>::max()),
+ "340282346638528859811704183484516925440.0");
}
TEST(FloatToStringTest, Lowest) {
- // Some compilers complain if you test floating point numbers for equality.
- // So say it via two inequalities.
- const auto lowest = std::numeric_limits<float>::lowest();
- const auto expected_lowest = -340282346638528859811704183484516925440.0f;
- if (lowest < expected_lowest || lowest > expected_lowest) {
- GTEST_SKIP()
- << "std::numeric_limits<float>::lowest() is not as expected for "
- "this target";
- }
- EXPECT_EQ(FloatToString(std::numeric_limits<float>::lowest()),
- "-340282346638528859811704183484516925440.0");
+ // Some compilers complain if you test floating point numbers for equality.
+ // So say it via two inequalities.
+ const auto lowest = std::numeric_limits<float>::lowest();
+ const auto expected_lowest = -340282346638528859811704183484516925440.0f;
+ if (lowest < expected_lowest || lowest > expected_lowest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::lowest() is not as expected for "
+ "this target";
+ }
+ EXPECT_EQ(FloatToString(std::numeric_limits<float>::lowest()),
+ "-340282346638528859811704183484516925440.0");
}
TEST(FloatToStringTest, Precision) {
- EXPECT_EQ(FloatToString(1e-8f), "0.00000001");
- EXPECT_EQ(FloatToString(1e-9f), "0.000000001");
- EXPECT_EQ(FloatToString(1e-10f), "1.00000001e-10");
- EXPECT_EQ(FloatToString(1e-20f), "9.99999968e-21");
+ EXPECT_EQ(FloatToString(1e-8f), "0.00000001");
+ EXPECT_EQ(FloatToString(1e-9f), "0.000000001");
+ EXPECT_EQ(FloatToString(1e-10f), "1.00000001e-10");
+ EXPECT_EQ(FloatToString(1e-20f), "9.99999968e-21");
}
// FloatToBitPreservingString
@@ -99,99 +98,92 @@ TEST(FloatToStringTest, Precision) {
// First replicate the tests for FloatToString
TEST(FloatToBitPreservingStringTest, Zero) {
- EXPECT_EQ(FloatToBitPreservingString(0.0f), "0.0");
+ EXPECT_EQ(FloatToBitPreservingString(0.0f), "0.0");
}
TEST(FloatToBitPreservingStringTest, One) {
- EXPECT_EQ(FloatToBitPreservingString(1.0f), "1.0");
+ EXPECT_EQ(FloatToBitPreservingString(1.0f), "1.0");
}
TEST(FloatToBitPreservingStringTest, MinusOne) {
- EXPECT_EQ(FloatToBitPreservingString(-1.0f), "-1.0");
+ EXPECT_EQ(FloatToBitPreservingString(-1.0f), "-1.0");
}
TEST(FloatToBitPreservingStringTest, Billion) {
- EXPECT_EQ(FloatToBitPreservingString(1e9f), "1000000000.0");
+ EXPECT_EQ(FloatToBitPreservingString(1e9f), "1000000000.0");
}
TEST(FloatToBitPreservingStringTest, Small) {
- EXPECT_NE(FloatToBitPreservingString(std::numeric_limits<float>::epsilon()),
- "0.0");
+ EXPECT_NE(FloatToBitPreservingString(std::numeric_limits<float>::epsilon()), "0.0");
}
TEST(FloatToBitPreservingStringTest, Highest) {
- const auto highest = std::numeric_limits<float>::max();
- const auto expected_highest = 340282346638528859811704183484516925440.0f;
- if (highest < expected_highest || highest > expected_highest) {
- GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
- "this target";
- }
- EXPECT_EQ(FloatToBitPreservingString(std::numeric_limits<float>::max()),
- "340282346638528859811704183484516925440.0");
+ const auto highest = std::numeric_limits<float>::max();
+ const auto expected_highest = 340282346638528859811704183484516925440.0f;
+ if (highest < expected_highest || highest > expected_highest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::max() is not as expected for "
+ "this target";
+ }
+ EXPECT_EQ(FloatToBitPreservingString(std::numeric_limits<float>::max()),
+ "340282346638528859811704183484516925440.0");
}
TEST(FloatToBitPreservingStringTest, Lowest) {
- // Some compilers complain if you test floating point numbers for equality.
- // So say it via two inequalities.
- const auto lowest = std::numeric_limits<float>::lowest();
- const auto expected_lowest = -340282346638528859811704183484516925440.0f;
- if (lowest < expected_lowest || lowest > expected_lowest) {
- GTEST_SKIP()
- << "std::numeric_limits<float>::lowest() is not as expected for "
- "this target";
- }
- EXPECT_EQ(FloatToBitPreservingString(std::numeric_limits<float>::lowest()),
- "-340282346638528859811704183484516925440.0");
+ // Some compilers complain if you test floating point numbers for equality.
+ // So say it via two inequalities.
+ const auto lowest = std::numeric_limits<float>::lowest();
+ const auto expected_lowest = -340282346638528859811704183484516925440.0f;
+ if (lowest < expected_lowest || lowest > expected_lowest) {
+ GTEST_SKIP() << "std::numeric_limits<float>::lowest() is not as expected for "
+ "this target";
+ }
+ EXPECT_EQ(FloatToBitPreservingString(std::numeric_limits<float>::lowest()),
+ "-340282346638528859811704183484516925440.0");
}
// Special cases for bit-preserving output.
TEST(FloatToBitPreservingStringTest, NegativeZero) {
- EXPECT_EQ(FloatToBitPreservingString(std::copysign(0.0f, -5.0f)), "-0.0");
+ EXPECT_EQ(FloatToBitPreservingString(std::copysign(0.0f, -5.0f)), "-0.0");
}
TEST(FloatToBitPreservingStringTest, ZeroAsBits) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0)), "0.0");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0)), "-0.0");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0)), "0.0");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0)), "-0.0");
}
TEST(FloatToBitPreservingStringTest, OneBits) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 127, 0)), "1.0");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 127, 0)), "-1.0");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 127, 0)), "1.0");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 127, 0)), "-1.0");
}
TEST(FloatToBitPreservingStringTest, SmallestDenormal) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 1)), "0x1p-149");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 1)), "-0x1p-149");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 1)), "0x1p-149");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 1)), "-0x1p-149");
}
TEST(FloatToBitPreservingStringTest, BiggerDenormal) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 2)), "0x1p-148");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 2)), "-0x1p-148");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 2)), "0x1p-148");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 2)), "-0x1p-148");
}
TEST(FloatToBitPreservingStringTest, LargestDenormal) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0x7fffff)),
- "0x1.fffffcp-127");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0x7fffff)), "0x1.fffffcp-127");
}
TEST(FloatToBitPreservingStringTest, Subnormal_cafebe) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0xcafebe)),
- "0x1.2bfaf8p-127");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0xcafebe)),
- "-0x1.2bfaf8p-127");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0xcafebe)), "0x1.2bfaf8p-127");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0xcafebe)), "-0x1.2bfaf8p-127");
}
TEST(FloatToBitPreservingStringTest, Subnormal_aaaaa) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0xaaaaa)),
- "0x1.55554p-130");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0xaaaaa)),
- "-0x1.55554p-130");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 0, 0xaaaaa)), "0x1.55554p-130");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 0, 0xaaaaa)), "-0x1.55554p-130");
}
TEST(FloatToBitPreservingStringTest, Infinity) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0)), "0x1p+128");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0)), "-0x1p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0)), "0x1p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0)), "-0x1p+128");
}
// TODO(dneto): It's unclear how Infinity and NaN should be handled.
@@ -200,24 +192,18 @@ TEST(FloatToBitPreservingStringTest, Infinity) {
// Disable NaN tests for now.
TEST(FloatToBitPreservingStringTest, DISABLED_NaN_MsbOnly) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x400000)),
- "0x1.8p+128");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x400000)),
- "-0x1.8p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x400000)), "0x1.8p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x400000)), "-0x1.8p+128");
}
TEST(FloatToBitPreservingStringTest, DISABLED_NaN_LsbOnly) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x1)),
- "0x1.000002p+128");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x1)),
- "-0x1.000002p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x1)), "0x1.000002p+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x1)), "-0x1.000002p+128");
}
TEST(FloatToBitPreservingStringTest, DISABLED_NaN_NonMsb) {
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x20101f)),
- "0x1.40203ep+128");
- EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x20101f)),
- "-0x1.40203ep+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(0, 255, 0x20101f)), "0x1.40203ep+128");
+ EXPECT_EQ(FloatToBitPreservingString(MakeFloat(1, 255, 0x20101f)), "-0x1.40203ep+128");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.cc b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.cc
index 44bd262d74c..16c33e30835 100644
--- a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.cc
+++ b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.cc
@@ -21,39 +21,39 @@
#include "src/tint/ast/external_texture.h"
#include "src/tint/ast/module.h"
#include "src/tint/program.h"
-#include "src/tint/sem/external_texture_type.h"
+#include "src/tint/sem/external_texture.h"
#include "src/tint/sem/variable.h"
namespace tint::writer {
-transform::MultiplanarExternalTexture::BindingsMap
-GenerateExternalTextureBindings(const Program* program) {
- // TODO(tint:1491): Use Inspector once we can get binding info for all
- // variables, not just those referenced by entry points.
-
- // Collect next valid binding number per group
- std::unordered_map<uint32_t, uint32_t> group_to_next_binding_number;
- std::vector<sem::BindingPoint> ext_tex_bps;
- for (auto* var : program->AST().GlobalVariables()) {
- if (auto* sem_var = program->Sem().Get(var)->As<sem::GlobalVariable>()) {
- auto bp = sem_var->BindingPoint();
- auto& n = group_to_next_binding_number[bp.group];
- n = std::max(n, bp.binding + 1);
-
- if (sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
- ext_tex_bps.emplace_back(bp);
- }
+transform::MultiplanarExternalTexture::BindingsMap GenerateExternalTextureBindings(
+ const Program* program) {
+ // TODO(tint:1491): Use Inspector once we can get binding info for all
+ // variables, not just those referenced by entry points.
+
+ // Collect next valid binding number per group
+ std::unordered_map<uint32_t, uint32_t> group_to_next_binding_number;
+ std::vector<sem::BindingPoint> ext_tex_bps;
+ for (auto* var : program->AST().GlobalVariables()) {
+ if (auto* sem_var = program->Sem().Get(var)->As<sem::GlobalVariable>()) {
+ auto bp = sem_var->BindingPoint();
+ auto& n = group_to_next_binding_number[bp.group];
+ n = std::max(n, bp.binding + 1);
+
+ if (sem_var->Type()->UnwrapRef()->Is<sem::ExternalTexture>()) {
+ ext_tex_bps.emplace_back(bp);
+ }
+ }
}
- }
-
- transform::MultiplanarExternalTexture::BindingsMap new_bindings_map;
- for (auto bp : ext_tex_bps) {
- uint32_t g = bp.group;
- uint32_t& next_num = group_to_next_binding_number[g];
- auto new_bps = transform::BindingPoints{{g, next_num++}, {g, next_num++}};
- new_bindings_map[bp] = new_bps;
- }
- return new_bindings_map;
+
+ transform::MultiplanarExternalTexture::BindingsMap new_bindings_map;
+ for (auto bp : ext_tex_bps) {
+ uint32_t g = bp.group;
+ uint32_t& next_num = group_to_next_binding_number[g];
+ auto new_bps = transform::BindingPoints{{g, next_num++}, {g, next_num++}};
+ new_bindings_map[bp] = new_bps;
+ }
+ return new_bindings_map;
}
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.h b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.h
index 6f321d55571..8d0aad9e12e 100644
--- a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.h
+++ b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings.h
@@ -19,8 +19,8 @@
namespace tint::writer {
-transform::MultiplanarExternalTexture::BindingsMap
-GenerateExternalTextureBindings(const Program* program);
+transform::MultiplanarExternalTexture::BindingsMap GenerateExternalTextureBindings(
+ const Program* program);
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings_test.cc b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings_test.cc
index 31630070439..d0918c322d3 100644
--- a/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/generate_external_texture_bindings_test.cc
@@ -26,105 +26,105 @@ constexpr auto kUniform = ast::StorageClass::kUniform;
class GenerateExternalTextureBindingsTest : public ::testing::Test {};
TEST_F(GenerateExternalTextureBindingsTest, None) {
- ProgramBuilder b;
- b.WrapInFunction();
+ ProgramBuilder b;
+ b.WrapInFunction();
- tint::Program program(std::move(b));
- ASSERT_TRUE(program.IsValid());
- auto bindings = GenerateExternalTextureBindings(&program);
- ASSERT_TRUE(bindings.empty());
+ tint::Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid());
+ auto bindings = GenerateExternalTextureBindings(&program);
+ ASSERT_TRUE(bindings.empty());
}
TEST_F(GenerateExternalTextureBindingsTest, One) {
- ProgramBuilder b;
- b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
- b.WrapInFunction();
-
- tint::Program program(std::move(b));
- ASSERT_TRUE(program.IsValid());
- auto bindings = GenerateExternalTextureBindings(&program);
- ASSERT_EQ(bindings.size(), 1u);
-
- auto to = bindings[transform::BindingPoint{0, 0}];
- ASSERT_EQ(to.plane_1.group, 0u);
- ASSERT_EQ(to.params.group, 0u);
- ASSERT_EQ(to.plane_1.binding, 1u);
- ASSERT_EQ(to.params.binding, 2u);
+ ProgramBuilder b;
+ b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
+ b.WrapInFunction();
+
+ tint::Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid());
+ auto bindings = GenerateExternalTextureBindings(&program);
+ ASSERT_EQ(bindings.size(), 1u);
+
+ auto to = bindings[transform::BindingPoint{0, 0}];
+ ASSERT_EQ(to.plane_1.group, 0u);
+ ASSERT_EQ(to.params.group, 0u);
+ ASSERT_EQ(to.plane_1.binding, 1u);
+ ASSERT_EQ(to.params.binding, 2u);
}
TEST_F(GenerateExternalTextureBindingsTest, Two_SameGroup) {
- ProgramBuilder b;
- b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
- b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(0, 1));
- b.WrapInFunction();
-
- tint::Program program(std::move(b));
- ASSERT_TRUE(program.IsValid());
- auto bindings = GenerateExternalTextureBindings(&program);
- ASSERT_EQ(bindings.size(), 2u);
-
- auto to0 = bindings[transform::BindingPoint{0, 0}];
- ASSERT_EQ(to0.plane_1.group, 0u);
- ASSERT_EQ(to0.params.group, 0u);
- ASSERT_EQ(to0.plane_1.binding, 2u);
- ASSERT_EQ(to0.params.binding, 3u);
-
- auto to1 = bindings[transform::BindingPoint{0, 1}];
- ASSERT_EQ(to1.plane_1.group, 0u);
- ASSERT_EQ(to1.params.group, 0u);
- ASSERT_EQ(to1.plane_1.binding, 4u);
- ASSERT_EQ(to1.params.binding, 5u);
+ ProgramBuilder b;
+ b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
+ b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(0, 1));
+ b.WrapInFunction();
+
+ tint::Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid());
+ auto bindings = GenerateExternalTextureBindings(&program);
+ ASSERT_EQ(bindings.size(), 2u);
+
+ auto to0 = bindings[transform::BindingPoint{0, 0}];
+ ASSERT_EQ(to0.plane_1.group, 0u);
+ ASSERT_EQ(to0.params.group, 0u);
+ ASSERT_EQ(to0.plane_1.binding, 2u);
+ ASSERT_EQ(to0.params.binding, 3u);
+
+ auto to1 = bindings[transform::BindingPoint{0, 1}];
+ ASSERT_EQ(to1.plane_1.group, 0u);
+ ASSERT_EQ(to1.params.group, 0u);
+ ASSERT_EQ(to1.plane_1.binding, 4u);
+ ASSERT_EQ(to1.params.binding, 5u);
}
TEST_F(GenerateExternalTextureBindingsTest, Two_DifferentGroup) {
- ProgramBuilder b;
- b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
- b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(1, 0));
- b.WrapInFunction();
-
- tint::Program program(std::move(b));
- ASSERT_TRUE(program.IsValid());
- auto bindings = GenerateExternalTextureBindings(&program);
- ASSERT_EQ(bindings.size(), 2u);
-
- auto to0 = bindings[transform::BindingPoint{0, 0}];
- ASSERT_EQ(to0.plane_1.group, 0u);
- ASSERT_EQ(to0.params.group, 0u);
- ASSERT_EQ(to0.plane_1.binding, 1u);
- ASSERT_EQ(to0.params.binding, 2u);
-
- auto to1 = bindings[transform::BindingPoint{1, 0}];
- ASSERT_EQ(to1.plane_1.group, 1u);
- ASSERT_EQ(to1.params.group, 1u);
- ASSERT_EQ(to1.plane_1.binding, 1u);
- ASSERT_EQ(to1.params.binding, 2u);
+ ProgramBuilder b;
+ b.Global("v0", b.ty.external_texture(), b.GroupAndBinding(0, 0));
+ b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(1, 0));
+ b.WrapInFunction();
+
+ tint::Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid());
+ auto bindings = GenerateExternalTextureBindings(&program);
+ ASSERT_EQ(bindings.size(), 2u);
+
+ auto to0 = bindings[transform::BindingPoint{0, 0}];
+ ASSERT_EQ(to0.plane_1.group, 0u);
+ ASSERT_EQ(to0.params.group, 0u);
+ ASSERT_EQ(to0.plane_1.binding, 1u);
+ ASSERT_EQ(to0.params.binding, 2u);
+
+ auto to1 = bindings[transform::BindingPoint{1, 0}];
+ ASSERT_EQ(to1.plane_1.group, 1u);
+ ASSERT_EQ(to1.params.group, 1u);
+ ASSERT_EQ(to1.plane_1.binding, 1u);
+ ASSERT_EQ(to1.params.binding, 2u);
}
TEST_F(GenerateExternalTextureBindingsTest, Two_WithOtherBindingsInSameGroup) {
- ProgramBuilder b;
- b.Global("v0", b.ty.i32(), b.GroupAndBinding(0, 0), kUniform);
- b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(0, 1));
- b.Global("v2", b.ty.i32(), b.GroupAndBinding(0, 2), kUniform);
- b.Global("v3", b.ty.external_texture(), b.GroupAndBinding(0, 3));
- b.Global("v4", b.ty.i32(), b.GroupAndBinding(0, 4), kUniform);
- b.WrapInFunction();
-
- tint::Program program(std::move(b));
- ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
- auto bindings = GenerateExternalTextureBindings(&program);
- ASSERT_EQ(bindings.size(), 2u);
-
- auto to0 = bindings[transform::BindingPoint{0, 1}];
- ASSERT_EQ(to0.plane_1.group, 0u);
- ASSERT_EQ(to0.params.group, 0u);
- ASSERT_EQ(to0.plane_1.binding, 5u);
- ASSERT_EQ(to0.params.binding, 6u);
-
- auto to1 = bindings[transform::BindingPoint{0, 3}];
- ASSERT_EQ(to1.plane_1.group, 0u);
- ASSERT_EQ(to1.params.group, 0u);
- ASSERT_EQ(to1.plane_1.binding, 7u);
- ASSERT_EQ(to1.params.binding, 8u);
+ ProgramBuilder b;
+ b.Global("v0", b.ty.i32(), b.GroupAndBinding(0, 0), kUniform);
+ b.Global("v1", b.ty.external_texture(), b.GroupAndBinding(0, 1));
+ b.Global("v2", b.ty.i32(), b.GroupAndBinding(0, 2), kUniform);
+ b.Global("v3", b.ty.external_texture(), b.GroupAndBinding(0, 3));
+ b.Global("v4", b.ty.i32(), b.GroupAndBinding(0, 4), kUniform);
+ b.WrapInFunction();
+
+ tint::Program program(std::move(b));
+ ASSERT_TRUE(program.IsValid()) << program.Diagnostics().str();
+ auto bindings = GenerateExternalTextureBindings(&program);
+ ASSERT_EQ(bindings.size(), 2u);
+
+ auto to0 = bindings[transform::BindingPoint{0, 1}];
+ ASSERT_EQ(to0.plane_1.group, 0u);
+ ASSERT_EQ(to0.params.group, 0u);
+ ASSERT_EQ(to0.plane_1.binding, 5u);
+ ASSERT_EQ(to0.params.binding, 6u);
+
+ auto to1 = bindings[transform::BindingPoint{0, 3}];
+ ASSERT_EQ(to1.plane_1.group, 0u);
+ ASSERT_EQ(to1.params.group, 0u);
+ ASSERT_EQ(to1.plane_1.binding, 7u);
+ ASSERT_EQ(to1.params.binding, 8u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator.cc
index 83ae8c4bc61..4b1e5166554 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator.cc
@@ -28,35 +28,36 @@ Result::Result() = default;
Result::~Result() = default;
Result::Result(const Result&) = default;
-Result Generate(const Program* program,
- const Options& options,
- const std::string& entry_point) {
- Result result;
-
- // Sanitize the program.
- auto sanitized_result = Sanitize(program, options, entry_point);
- if (!sanitized_result.program.IsValid()) {
- result.success = false;
- result.error = sanitized_result.program.Diagnostics().str();
- return result;
- }
-
- // Generate the GLSL code.
- auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program,
- options.version);
- result.success = impl->Generate();
- result.error = impl->error();
- result.glsl = impl->result();
-
- // Collect the list of entry points in the sanitized program.
- for (auto* func : sanitized_result.program.AST().Functions()) {
- if (func->IsEntryPoint()) {
- auto name = sanitized_result.program.Symbols().NameFor(func->symbol);
- result.entry_points.push_back({name, func->PipelineStage()});
+Result Generate(const Program* program, const Options& options, const std::string& entry_point) {
+ Result result;
+ if (!program->IsValid()) {
+ result.error = "input program is not valid";
+ return result;
+ }
+
+ // Sanitize the program.
+ auto sanitized_result = Sanitize(program, options, entry_point);
+ if (!sanitized_result.program.IsValid()) {
+ result.success = false;
+ result.error = sanitized_result.program.Diagnostics().str();
+ return result;
}
- }
- return result;
+ // Generate the GLSL code.
+ auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program, options.version);
+ result.success = impl->Generate();
+ result.error = impl->error();
+ result.glsl = impl->result();
+
+ // Collect the list of entry points in the sanitized program.
+ for (auto* func : sanitized_result.program.AST().Functions()) {
+ if (func->IsEntryPoint()) {
+ auto name = sanitized_result.program.Symbols().NameFor(func->symbol);
+ result.entry_points.push_back({name, func->PipelineStage()});
+ }
+ }
+
+ return result;
}
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator.h b/chromium/third_party/dawn/src/tint/writer/glsl/generator.h
index 11206cf1a1a..d48b0bc7d9c 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator.h
@@ -32,9 +32,6 @@
namespace tint {
class Program;
} // namespace tint
-namespace tint::writer::glsl {
-class GeneratorImpl;
-} // namespace tint::writer::glsl
namespace tint::writer::glsl {
@@ -42,66 +39,66 @@ using BindingMap = std::unordered_map<sem::SamplerTexturePair, std::string>;
/// Configuration options used for generating GLSL.
struct Options {
- /// Constructor
- Options();
+ /// Constructor
+ Options();
- /// Destructor
- ~Options();
+ /// Destructor
+ ~Options();
- /// Copy constructor
- Options(const Options&);
+ /// Copy constructor
+ Options(const Options&);
- /// A map of SamplerTexturePair to combined sampler names for the
- /// CombineSamplers transform
- BindingMap binding_map;
+ /// A map of SamplerTexturePair to combined sampler names for the
+ /// CombineSamplers transform
+ BindingMap binding_map;
- /// The binding point to use for placeholder samplers.
- sem::BindingPoint placeholder_binding_point;
+ /// The binding point to use for placeholder samplers.
+ sem::BindingPoint placeholder_binding_point;
- /// A map of old binding point to new binding point for the BindingRemapper
- /// transform
- std::unordered_map<sem::BindingPoint, sem::BindingPoint> binding_points;
+ /// A map of old binding point to new binding point for the BindingRemapper
+ /// transform
+ std::unordered_map<sem::BindingPoint, sem::BindingPoint> binding_points;
- /// A map of old binding point to new access control for the BindingRemapper
- /// transform
- std::unordered_map<sem::BindingPoint, ast::Access> access_controls;
+ /// A map of old binding point to new access control for the BindingRemapper
+ /// transform
+ std::unordered_map<sem::BindingPoint, ast::Access> access_controls;
- /// If true, then validation will be disabled for binding point collisions
- /// generated by the BindingRemapper transform
- bool allow_collisions = false;
+ /// If true, then validation will be disabled for binding point collisions
+ /// generated by the BindingRemapper transform
+ bool allow_collisions = false;
- /// Set to `true` to disable workgroup memory zero initialization
- bool disable_workgroup_init = false;
+ /// Set to `true` to disable workgroup memory zero initialization
+ bool disable_workgroup_init = false;
- /// Set to 'true' to generates binding mappings for external textures
- bool generate_external_texture_bindings = false;
+ /// Set to 'true' to generates binding mappings for external textures
+ bool generate_external_texture_bindings = false;
- /// The GLSL version to emit
- Version version;
+ /// The GLSL version to emit
+ Version version;
};
/// The result produced when generating GLSL.
struct Result {
- /// Constructor
- Result();
+ /// Constructor
+ Result();
- /// Destructor
- ~Result();
+ /// Destructor
+ ~Result();
- /// Copy constructor
- Result(const Result&);
+ /// Copy constructor
+ Result(const Result&);
- /// True if generation was successful.
- bool success = false;
+ /// True if generation was successful.
+ bool success = false;
- /// The errors generated during code generation, if any.
- std::string error;
+ /// The errors generated during code generation, if any.
+ std::string error;
- /// The generated GLSL.
- std::string glsl = "";
+ /// The generated GLSL.
+ std::string glsl = "";
- /// The list of entry points in the generated GLSL.
- std::vector<std::pair<std::string, ast::PipelineStage>> entry_points;
+ /// The list of entry points in the generated GLSL.
+ std::vector<std::pair<std::string, ast::PipelineStage>> entry_points;
};
/// Generate GLSL for a program, according to a set of configuration options.
@@ -111,9 +108,7 @@ struct Result {
/// @param options the configuration options to use when generating GLSL
/// @param entry_point the entry point to generate GLSL for
/// @returns the resulting GLSL and supplementary information
-Result Generate(const Program* program,
- const Options& options,
- const std::string& entry_point);
+Result Generate(const Program* program, const Options& options, const std::string& entry_point);
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_bench.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_bench.cc
index ad87073c805..dddb3035070 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_bench.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_bench.cc
@@ -21,27 +21,27 @@ namespace tint::writer::glsl {
namespace {
void GenerateGLSL(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadProgram(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& program = std::get<bench::ProgramAndFile>(res).program;
- std::vector<std::string> entry_points;
- for (auto& fn : program.AST().Functions()) {
- if (fn->IsEntryPoint()) {
- entry_points.emplace_back(program.Symbols().NameFor(fn->symbol));
+ auto res = bench::LoadProgram(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& program = std::get<bench::ProgramAndFile>(res).program;
+ std::vector<std::string> entry_points;
+ for (auto& fn : program.AST().Functions()) {
+ if (fn->IsEntryPoint()) {
+ entry_points.emplace_back(program.Symbols().NameFor(fn->symbol));
+ }
}
- }
- for (auto _ : state) {
- for (auto& ep : entry_points) {
- auto res = Generate(&program, {}, ep);
- if (!res.error.empty()) {
- state.SkipWithError(res.error.c_str());
- }
+ for (auto _ : state) {
+ for (auto& ep : entry_points) {
+ auto res = Generate(&program, {}, ep);
+ if (!res.error.empty()) {
+ state.SkipWithError(res.error.c_str());
+ }
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(GenerateGLSL);
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.cc
index ef7467db5cf..e07139bc8e2 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.cc
@@ -29,18 +29,19 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/debug.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/block_statement.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/constant.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/type_constructor.h"
#include "src/tint/sem/type_conversion.h"
@@ -52,6 +53,7 @@
#include "src/tint/transform/canonicalize_entry_point_io.h"
#include "src/tint/transform/combine_samplers.h"
#include "src/tint/transform/decompose_memory_access.h"
+#include "src/tint/transform/disable_uniformity_analysis.h"
#include "src/tint/transform/expand_compound_assignment.h"
#include "src/tint/transform/fold_trivial_single_use_lets.h"
#include "src/tint/transform/loop_to_for_loop.h"
@@ -72,25 +74,25 @@
#include "src/tint/writer/float_to_string.h"
#include "src/tint/writer/generate_external_texture_bindings.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace {
bool IsRelational(tint::ast::BinaryOp op) {
- return op == tint::ast::BinaryOp::kEqual ||
- op == tint::ast::BinaryOp::kNotEqual ||
- op == tint::ast::BinaryOp::kLessThan ||
- op == tint::ast::BinaryOp::kGreaterThan ||
- op == tint::ast::BinaryOp::kLessThanEqual ||
- op == tint::ast::BinaryOp::kGreaterThanEqual;
+ return op == tint::ast::BinaryOp::kEqual || op == tint::ast::BinaryOp::kNotEqual ||
+ op == tint::ast::BinaryOp::kLessThan || op == tint::ast::BinaryOp::kGreaterThan ||
+ op == tint::ast::BinaryOp::kLessThanEqual ||
+ op == tint::ast::BinaryOp::kGreaterThanEqual;
}
bool RequiresOESSampleVariables(tint::ast::Builtin builtin) {
- switch (builtin) {
- case tint::ast::Builtin::kSampleIndex:
- case tint::ast::Builtin::kSampleMask:
- return true;
- default:
- return false;
- }
+ switch (builtin) {
+ case tint::ast::Builtin::kSampleIndex:
+ case tint::ast::Builtin::kSampleMask:
+ return true;
+ default:
+ return false;
+ }
}
} // namespace
@@ -102,47 +104,59 @@ const char kTempNamePrefix[] = "tint_tmp";
const char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
bool last_is_break_or_fallthrough(const ast::BlockStatement* stmts) {
- return IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(stmts->Last());
+ return IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(stmts->Last());
}
const char* convert_texel_format_to_glsl(const ast::TexelFormat format) {
- switch (format) {
- case ast::TexelFormat::kR32Uint:
- return "r32ui";
- case ast::TexelFormat::kR32Sint:
- return "r32i";
- case ast::TexelFormat::kR32Float:
- return "r32f";
- case ast::TexelFormat::kRgba8Unorm:
- return "rgba8";
- case ast::TexelFormat::kRgba8Snorm:
- return "rgba8_snorm";
- case ast::TexelFormat::kRgba8Uint:
- return "rgba8ui";
- case ast::TexelFormat::kRgba8Sint:
- return "rgba8i";
- case ast::TexelFormat::kRg32Uint:
- return "rg32ui";
- case ast::TexelFormat::kRg32Sint:
- return "rg32i";
- case ast::TexelFormat::kRg32Float:
- return "rg32f";
- case ast::TexelFormat::kRgba16Uint:
- return "rgba16ui";
- case ast::TexelFormat::kRgba16Sint:
- return "rgba16i";
- case ast::TexelFormat::kRgba16Float:
- return "rgba16f";
- case ast::TexelFormat::kRgba32Uint:
- return "rgba32ui";
- case ast::TexelFormat::kRgba32Sint:
- return "rgba32i";
- case ast::TexelFormat::kRgba32Float:
- return "rgba32f";
- case ast::TexelFormat::kNone:
- return "unknown";
- }
- return "unknown";
+ switch (format) {
+ case ast::TexelFormat::kR32Uint:
+ return "r32ui";
+ case ast::TexelFormat::kR32Sint:
+ return "r32i";
+ case ast::TexelFormat::kR32Float:
+ return "r32f";
+ case ast::TexelFormat::kRgba8Unorm:
+ return "rgba8";
+ case ast::TexelFormat::kRgba8Snorm:
+ return "rgba8_snorm";
+ case ast::TexelFormat::kRgba8Uint:
+ return "rgba8ui";
+ case ast::TexelFormat::kRgba8Sint:
+ return "rgba8i";
+ case ast::TexelFormat::kRg32Uint:
+ return "rg32ui";
+ case ast::TexelFormat::kRg32Sint:
+ return "rg32i";
+ case ast::TexelFormat::kRg32Float:
+ return "rg32f";
+ case ast::TexelFormat::kRgba16Uint:
+ return "rgba16ui";
+ case ast::TexelFormat::kRgba16Sint:
+ return "rgba16i";
+ case ast::TexelFormat::kRgba16Float:
+ return "rgba16f";
+ case ast::TexelFormat::kRgba32Uint:
+ return "rgba32ui";
+ case ast::TexelFormat::kRgba32Sint:
+ return "rgba32i";
+ case ast::TexelFormat::kRgba32Float:
+ return "rgba32f";
+ case ast::TexelFormat::kNone:
+ return "unknown";
+ }
+ return "unknown";
+}
+
+void PrintF32(std::ostream& out, float value) {
+ // Note: Currently inf and nan should not be constructable, but this is implemented for the day
+ // we support them.
+ if (std::isinf(value)) {
+ out << (value >= 0 ? "uintBitsToFloat(0x7f800000u)" : "uintBitsToFloat(0xff800000u)");
+ } else if (std::isnan(value)) {
+ out << "uintBitsToFloat(0x7fc00000u)";
+ } else {
+ out << FloatToString(value) << "f";
+ }
}
} // namespace
@@ -154,77 +168,75 @@ SanitizedResult::SanitizedResult(SanitizedResult&&) = default;
SanitizedResult Sanitize(const Program* in,
const Options& options,
const std::string& entry_point) {
- transform::Manager manager;
- transform::DataMap data;
-
- { // Builtin polyfills
- transform::BuiltinPolyfill::Builtins polyfills;
- polyfills.count_leading_zeros = true;
- polyfills.count_trailing_zeros = true;
- polyfills.extract_bits =
- transform::BuiltinPolyfill::Level::kClampParameters;
- polyfills.first_leading_bit = true;
- polyfills.first_trailing_bit = true;
- polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
- data.Add<transform::BuiltinPolyfill::Config>(polyfills);
- manager.Add<transform::BuiltinPolyfill>();
- }
-
- if (!entry_point.empty()) {
- manager.Add<transform::SingleEntryPoint>();
- data.Add<transform::SingleEntryPoint::Config>(entry_point);
- }
- manager.Add<transform::Renamer>();
- data.Add<transform::Renamer::Config>(
- transform::Renamer::Target::kGlslKeywords,
- /* preserve_unicode */ false);
- manager.Add<transform::Unshadow>();
-
- // Attempt to convert `loop`s into for-loops. This is to try and massage the
- // output into something that will not cause FXC to choke or misbehave.
- manager.Add<transform::FoldTrivialSingleUseLets>();
- manager.Add<transform::LoopToForLoop>();
-
- if (!options.disable_workgroup_init) {
- // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
- // ZeroInitWorkgroupMemory may inject new builtin parameters.
- manager.Add<transform::ZeroInitWorkgroupMemory>();
- }
- manager.Add<transform::CanonicalizeEntryPointIO>();
- manager.Add<transform::ExpandCompoundAssignment>();
- manager.Add<transform::PromoteSideEffectsToDecl>();
- manager.Add<transform::UnwindDiscardFunctions>();
- manager.Add<transform::SimplifyPointers>();
-
- manager.Add<transform::RemovePhonies>();
-
- if (options.generate_external_texture_bindings) {
- auto new_bindings_map = writer::GenerateExternalTextureBindings(in);
- data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(
- new_bindings_map);
- }
- manager.Add<transform::MultiplanarExternalTexture>();
-
- data.Add<transform::CombineSamplers::BindingInfo>(
- options.binding_map, options.placeholder_binding_point);
- manager.Add<transform::CombineSamplers>();
-
- data.Add<transform::BindingRemapper::Remappings>(options.binding_points,
- options.access_controls,
- options.allow_collisions);
- manager.Add<transform::BindingRemapper>();
-
- manager.Add<transform::PromoteInitializersToConstVar>();
- manager.Add<transform::AddEmptyEntryPoint>();
- manager.Add<transform::AddSpirvBlockAttribute>();
- data.Add<transform::CanonicalizeEntryPointIO::Config>(
- transform::CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
-
- auto out = manager.Run(in, data);
-
- SanitizedResult result;
- result.program = std::move(out.program);
- return result;
+ transform::Manager manager;
+ transform::DataMap data;
+
+ manager.Add<transform::DisableUniformityAnalysis>();
+
+ { // Builtin polyfills
+ transform::BuiltinPolyfill::Builtins polyfills;
+ polyfills.count_leading_zeros = true;
+ polyfills.count_trailing_zeros = true;
+ polyfills.extract_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ polyfills.first_leading_bit = true;
+ polyfills.first_trailing_bit = true;
+ polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ data.Add<transform::BuiltinPolyfill::Config>(polyfills);
+ manager.Add<transform::BuiltinPolyfill>();
+ }
+
+ if (!entry_point.empty()) {
+ manager.Add<transform::SingleEntryPoint>();
+ data.Add<transform::SingleEntryPoint::Config>(entry_point);
+ }
+ manager.Add<transform::Renamer>();
+ data.Add<transform::Renamer::Config>(transform::Renamer::Target::kGlslKeywords,
+ /* preserve_unicode */ false);
+ manager.Add<transform::Unshadow>();
+
+ // Attempt to convert `loop`s into for-loops. This is to try and massage the
+ // output into something that will not cause FXC to choke or misbehave.
+ manager.Add<transform::FoldTrivialSingleUseLets>();
+ manager.Add<transform::LoopToForLoop>();
+
+ if (!options.disable_workgroup_init) {
+ // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
+ // ZeroInitWorkgroupMemory may inject new builtin parameters.
+ manager.Add<transform::ZeroInitWorkgroupMemory>();
+ }
+ manager.Add<transform::CanonicalizeEntryPointIO>();
+ manager.Add<transform::ExpandCompoundAssignment>();
+ manager.Add<transform::PromoteSideEffectsToDecl>();
+ manager.Add<transform::UnwindDiscardFunctions>();
+ manager.Add<transform::SimplifyPointers>();
+
+ manager.Add<transform::RemovePhonies>();
+
+ if (options.generate_external_texture_bindings) {
+ auto new_bindings_map = writer::GenerateExternalTextureBindings(in);
+ data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(new_bindings_map);
+ }
+ manager.Add<transform::MultiplanarExternalTexture>();
+
+ data.Add<transform::CombineSamplers::BindingInfo>(options.binding_map,
+ options.placeholder_binding_point);
+ manager.Add<transform::CombineSamplers>();
+
+ data.Add<transform::BindingRemapper::Remappings>(
+ options.binding_points, options.access_controls, options.allow_collisions);
+ manager.Add<transform::BindingRemapper>();
+
+ manager.Add<transform::PromoteInitializersToConstVar>();
+ manager.Add<transform::AddEmptyEntryPoint>();
+ manager.Add<transform::AddSpirvBlockAttribute>();
+ data.Add<transform::CanonicalizeEntryPointIO::Config>(
+ transform::CanonicalizeEntryPointIO::ShaderStyle::kGlsl);
+
+ auto out = manager.Run(in, data);
+
+ SanitizedResult result;
+ result.program = std::move(out.program);
+ return result;
}
GeneratorImpl::GeneratorImpl(const Program* program, const Version& version)
@@ -233,2339 +245,2390 @@ GeneratorImpl::GeneratorImpl(const Program* program, const Version& version)
GeneratorImpl::~GeneratorImpl() = default;
bool GeneratorImpl::Generate() {
- {
- auto out = line();
- out << "#version " << version_.major_version << version_.minor_version
- << "0";
- if (version_.IsES()) {
- out << " es";
+ {
+ auto out = line();
+ out << "#version " << version_.major_version << version_.minor_version << "0";
+ if (version_.IsES()) {
+ out << " es";
+ }
}
- }
- auto helpers_insertion_point = current_buffer_->lines.size();
+ auto helpers_insertion_point = current_buffer_->lines.size();
- line();
+ line();
- auto* mod = builder_.Sem().Module();
- for (auto* decl : mod->DependencyOrderedDeclarations()) {
- if (decl->Is<ast::Alias>()) {
- continue; // Ignore aliases.
- }
-
- if (auto* global = decl->As<ast::Variable>()) {
- if (!EmitGlobalVariable(global)) {
- return false;
- }
- } else if (auto* str = decl->As<ast::Struct>()) {
- // Skip emission if the struct contains a runtime-sized array, since its
- // only use will be as the store-type of a buffer and we emit those
- // elsewhere.
- // TODO(crbug.com/tint/1339): We could also avoid emitting any other
- // struct that is only used as a buffer store type.
- const sem::Struct* sem_str = builder_.Sem().Get(str);
- const auto& members = sem_str->Members();
- TINT_ASSERT(Writer, members.size() > 0);
- auto* last_member = members[members.size() - 1];
- auto* arr = last_member->Type()->As<sem::Array>();
- if (!arr || !arr->IsRuntimeSized()) {
- if (!EmitStructType(current_buffer_, sem_str)) {
- return false;
- }
- }
- } else if (auto* func = decl->As<ast::Function>()) {
- if (func->IsEntryPoint()) {
- if (!EmitEntryPointFunction(func)) {
- return false;
+ auto* mod = builder_.Sem().Module();
+ for (auto* decl : mod->DependencyOrderedDeclarations()) {
+ if (decl->Is<ast::Alias>()) {
+ continue; // Ignore aliases.
}
- } else {
- if (!EmitFunction(func)) {
- return false;
+
+ if (auto* global = decl->As<ast::Variable>()) {
+ if (!EmitGlobalVariable(global)) {
+ return false;
+ }
+ } else if (auto* str = decl->As<ast::Struct>()) {
+ // Skip emission if the struct contains a runtime-sized array, since its
+ // only use will be as the store-type of a buffer and we emit those
+ // elsewhere.
+ // TODO(crbug.com/tint/1339): We could also avoid emitting any other
+ // struct that is only used as a buffer store type.
+ const sem::Struct* sem_str = builder_.Sem().Get(str);
+ const auto& members = sem_str->Members();
+ TINT_ASSERT(Writer, members.size() > 0);
+ auto* last_member = members[members.size() - 1];
+ auto* arr = last_member->Type()->As<sem::Array>();
+ if (!arr || !arr->IsRuntimeSized()) {
+ if (!EmitStructType(current_buffer_, sem_str)) {
+ return false;
+ }
+ }
+ } else if (auto* func = decl->As<ast::Function>()) {
+ if (func->IsEntryPoint()) {
+ if (!EmitEntryPointFunction(func)) {
+ return false;
+ }
+ } else {
+ if (!EmitFunction(func)) {
+ return false;
+ }
+ }
+ } else if (auto* ext = decl->As<ast::Enable>()) {
+ // Record the required extension for generating extension directive later
+ if (!RecordExtension(ext)) {
+ return false;
+ }
+ } else {
+ TINT_ICE(Writer, diagnostics_)
+ << "unhandled module-scope declaration: " << decl->TypeInfo().name;
+ return false;
}
- }
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled module-scope declaration: " << decl->TypeInfo().name;
- return false;
}
- }
- TextBuffer extensions;
+ TextBuffer extensions;
- if (version_.IsES() && requires_oes_sample_variables_) {
- extensions.Append("#extension GL_OES_sample_variables : require");
- }
+ if (version_.IsES() && requires_oes_sample_variables_) {
+ extensions.Append("#extension GL_OES_sample_variables : require");
+ }
- auto indent = current_buffer_->current_indent;
+ auto indent = current_buffer_->current_indent;
- if (!extensions.lines.empty()) {
- current_buffer_->Insert(extensions, helpers_insertion_point, indent);
- helpers_insertion_point += extensions.lines.size();
- }
+ if (!extensions.lines.empty()) {
+ current_buffer_->Insert(extensions, helpers_insertion_point, indent);
+ helpers_insertion_point += extensions.lines.size();
+ }
- if (version_.IsES() && requires_default_precision_qualifier_) {
- current_buffer_->Insert("precision mediump float;",
- helpers_insertion_point++, indent);
- }
+ if (version_.IsES() && requires_default_precision_qualifier_) {
+ current_buffer_->Insert("precision mediump float;", helpers_insertion_point++, indent);
+ }
- if (!helpers_.lines.empty()) {
- current_buffer_->Insert("", helpers_insertion_point++, indent);
- current_buffer_->Insert(helpers_, helpers_insertion_point, indent);
- helpers_insertion_point += helpers_.lines.size();
- }
+ if (!helpers_.lines.empty()) {
+ current_buffer_->Insert("", helpers_insertion_point++, indent);
+ current_buffer_->Insert(helpers_, helpers_insertion_point, indent);
+ helpers_insertion_point += helpers_.lines.size();
+ }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitIndexAccessor(
- std::ostream& out,
- const ast::IndexAccessorExpression* expr) {
- if (!EmitExpression(out, expr->object)) {
- return false;
- }
- out << "[";
+bool GeneratorImpl::RecordExtension(const ast::Enable*) {
+ /*
+ Deal with extension node here, recording it within the generator for
+ later emition.
+ For example:
+ ```
+ if (ext->kind == ast::Enable::ExtensionKind::kF16) {
+ require_fp16_ = true;
+ }
+ ```
+ */
- if (!EmitExpression(out, expr->index)) {
- return false;
- }
- out << "]";
+ return true;
+}
- return true;
+bool GeneratorImpl::EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr) {
+ if (!EmitExpression(out, expr->object)) {
+ return false;
+ }
+ out << "[";
+
+ if (!EmitExpression(out, expr->index)) {
+ return false;
+ }
+ out << "]";
+
+ return true;
}
-bool GeneratorImpl::EmitBitcast(std::ostream& out,
- const ast::BitcastExpression* expr) {
- auto* src_type = TypeOf(expr->expr)->UnwrapRef();
- auto* dst_type = TypeOf(expr)->UnwrapRef();
+bool GeneratorImpl::EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr) {
+ auto* src_type = TypeOf(expr->expr)->UnwrapRef();
+ auto* dst_type = TypeOf(expr)->UnwrapRef();
- if (!dst_type->is_integer_scalar_or_vector() &&
- !dst_type->is_float_scalar_or_vector()) {
- diagnostics_.add_error(diag::System::Writer,
- "Unable to do bitcast to type " +
- dst_type->FriendlyName(builder_.Symbols()));
- return false;
- }
-
- if (src_type == dst_type) {
- return EmitExpression(out, expr->expr);
- }
-
- if (src_type->is_float_scalar_or_vector() &&
- dst_type->is_signed_scalar_or_vector()) {
- out << "floatBitsToInt";
- } else if (src_type->is_float_scalar_or_vector() &&
- dst_type->is_unsigned_scalar_or_vector()) {
- out << "floatBitsToUint";
- } else if (src_type->is_signed_scalar_or_vector() &&
- dst_type->is_float_scalar_or_vector()) {
- out << "intBitsToFloat";
- } else if (src_type->is_unsigned_scalar_or_vector() &&
- dst_type->is_float_scalar_or_vector()) {
- out << "uintBitsToFloat";
- } else {
- if (!EmitType(out, dst_type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- }
- out << "(";
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
- out << ")";
- return true;
+ if (!dst_type->is_integer_scalar_or_vector() && !dst_type->is_float_scalar_or_vector()) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "Unable to do bitcast to type " + dst_type->FriendlyName(builder_.Symbols()));
+ return false;
+ }
+
+ if (src_type == dst_type) {
+ return EmitExpression(out, expr->expr);
+ }
+
+ if (src_type->is_float_scalar_or_vector() && dst_type->is_signed_scalar_or_vector()) {
+ out << "floatBitsToInt";
+ } else if (src_type->is_float_scalar_or_vector() && dst_type->is_unsigned_scalar_or_vector()) {
+ out << "floatBitsToUint";
+ } else if (src_type->is_signed_scalar_or_vector() && dst_type->is_float_scalar_or_vector()) {
+ out << "intBitsToFloat";
+ } else if (src_type->is_unsigned_scalar_or_vector() && dst_type->is_float_scalar_or_vector()) {
+ out << "uintBitsToFloat";
+ } else {
+ if (!EmitType(out, dst_type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ }
+ out << "(";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitAssign(const ast::AssignmentStatement* stmt) {
- auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
- out << " = ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
- out << ";";
- return true;
-}
-
-bool GeneratorImpl::EmitVectorRelational(std::ostream& out,
- const ast::BinaryExpression* expr) {
- switch (expr->op) {
- case ast::BinaryOp::kEqual:
- out << "equal";
- break;
- case ast::BinaryOp::kNotEqual:
- out << "notEqual";
- break;
- case ast::BinaryOp::kLessThan:
- out << "lessThan";
- break;
- case ast::BinaryOp::kGreaterThan:
- out << "greaterThan";
- break;
- case ast::BinaryOp::kLessThanEqual:
- out << "lessThanEqual";
- break;
- case ast::BinaryOp::kGreaterThanEqual:
- out << "greaterThanEqual";
- break;
- default:
- break;
- }
- out << "(";
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
- out << ")";
- return true;
+ auto out = line();
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
+ }
+ out << " = ";
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
+ out << ";";
+ return true;
}
-bool GeneratorImpl::EmitBitwiseBoolOp(std::ostream& out,
- const ast::BinaryExpression* expr) {
- auto* bool_type = TypeOf(expr->lhs)->UnwrapRef();
- auto* uint_type = BoolTypeToUint(bool_type);
-
- // Cast result to bool scalar or vector type.
- if (!EmitType(out, bool_type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- ScopedParen outerCastParen(out);
- // Cast LHS to uint scalar or vector type.
- if (!EmitType(out, uint_type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- {
- ScopedParen innerCastParen(out);
- // Emit LHS.
+bool GeneratorImpl::EmitVectorRelational(std::ostream& out, const ast::BinaryExpression* expr) {
+ switch (expr->op) {
+ case ast::BinaryOp::kEqual:
+ out << "equal";
+ break;
+ case ast::BinaryOp::kNotEqual:
+ out << "notEqual";
+ break;
+ case ast::BinaryOp::kLessThan:
+ out << "lessThan";
+ break;
+ case ast::BinaryOp::kGreaterThan:
+ out << "greaterThan";
+ break;
+ case ast::BinaryOp::kLessThanEqual:
+ out << "lessThanEqual";
+ break;
+ case ast::BinaryOp::kGreaterThanEqual:
+ out << "greaterThanEqual";
+ break;
+ default:
+ break;
+ }
+ out << "(";
if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- }
- // Emit operator.
- if (expr->op == ast::BinaryOp::kAnd) {
- out << " & ";
- } else if (expr->op == ast::BinaryOp::kOr) {
- out << " | ";
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "unexpected binary op: " << FriendlyName(expr->op);
- return false;
- }
- // Cast RHS to uint scalar or vector type.
- if (!EmitType(out, uint_type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- {
- ScopedParen innerCastParen(out);
- // Emit RHS.
+ return false;
+ }
+ out << ", ";
if (!EmitExpression(out, expr->rhs)) {
- return false;
+ return false;
}
- }
- return true;
+ out << ")";
+ return true;
}
-bool GeneratorImpl::EmitFloatModulo(std::ostream& out,
- const ast::BinaryExpression* expr) {
- std::string fn;
- auto* ret_ty = TypeOf(expr)->UnwrapRef();
- fn = utils::GetOrCreate(float_modulo_funcs_, ret_ty, [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
+bool GeneratorImpl::EmitBitwiseBoolOp(std::ostream& out, const ast::BinaryExpression* expr) {
+ auto* bool_type = TypeOf(expr->lhs)->UnwrapRef();
+ auto* uint_type = BoolTypeToUint(bool_type);
- auto fn_name = UniqueIdentifier("tint_float_modulo");
- std::vector<std::string> parameter_names;
+ // Cast result to bool scalar or vector type.
+ if (!EmitType(out, bool_type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ ScopedParen outerCastParen(out);
+ // Cast LHS to uint scalar or vector type.
+ if (!EmitType(out, uint_type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
{
- auto decl = line(&b);
- if (!EmitTypeAndName(decl, ret_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, fn_name)) {
- return "";
- }
- {
- ScopedParen sp(decl);
- const auto* ty = TypeOf(expr->lhs)->UnwrapRef();
- if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "lhs")) {
- return "";
- }
- decl << ", ";
- ty = TypeOf(expr->rhs)->UnwrapRef();
- if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "rhs")) {
- return "";
+ ScopedParen innerCastParen(out);
+ // Emit LHS.
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
}
- }
- decl << " {";
+ }
+ // Emit operator.
+ if (expr->op == ast::BinaryOp::kAnd) {
+ out << " & ";
+ } else if (expr->op == ast::BinaryOp::kOr) {
+ out << " | ";
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "unexpected binary op: " << FriendlyName(expr->op);
+ return false;
+ }
+ // Cast RHS to uint scalar or vector type.
+ if (!EmitType(out, uint_type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
}
{
- ScopedIndent si(&b);
- line(&b) << "return (lhs - rhs * trunc(lhs / rhs));";
+ ScopedParen innerCastParen(out);
+ // Emit RHS.
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
}
- line(&b) << "}";
- line(&b);
- return fn_name;
- });
+ return true;
+}
- if (fn.empty()) {
- return false;
- }
+bool GeneratorImpl::EmitFloatModulo(std::ostream& out, const ast::BinaryExpression* expr) {
+ std::string fn;
+ auto* ret_ty = TypeOf(expr)->UnwrapRef();
+ fn = utils::GetOrCreate(float_modulo_funcs_, ret_ty, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
- // Call the helper
- out << fn;
- {
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->lhs)) {
- return false;
+ auto fn_name = UniqueIdentifier("tint_float_modulo");
+ std::vector<std::string> parameter_names;
+ {
+ auto decl = line(&b);
+ if (!EmitTypeAndName(decl, ret_ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ fn_name)) {
+ return "";
+ }
+ {
+ ScopedParen sp(decl);
+ const auto* ty = TypeOf(expr->lhs)->UnwrapRef();
+ if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ "lhs")) {
+ return "";
+ }
+ decl << ", ";
+ ty = TypeOf(expr->rhs)->UnwrapRef();
+ if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ "rhs")) {
+ return "";
+ }
+ }
+ decl << " {";
+ }
+ {
+ ScopedIndent si(&b);
+ line(&b) << "return (lhs - rhs * trunc(lhs / rhs));";
+ }
+ line(&b) << "}";
+ line(&b);
+ return fn_name;
+ });
+
+ if (fn.empty()) {
+ return false;
}
- out << ", ";
- if (!EmitExpression(out, expr->rhs)) {
- return false;
+
+ // Call the helper
+ out << fn;
+ {
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitBinary(std::ostream& out,
- const ast::BinaryExpression* expr) {
- if (IsRelational(expr->op) && !TypeOf(expr->lhs)->UnwrapRef()->is_scalar()) {
- return EmitVectorRelational(out, expr);
- }
- if (expr->op == ast::BinaryOp::kLogicalAnd ||
- expr->op == ast::BinaryOp::kLogicalOr) {
- auto name = UniqueIdentifier(kTempNamePrefix);
+bool GeneratorImpl::EmitBinary(std::ostream& out, const ast::BinaryExpression* expr) {
+ if (IsRelational(expr->op) && !TypeOf(expr->lhs)->UnwrapRef()->is_scalar()) {
+ return EmitVectorRelational(out, expr);
+ }
+ if (expr->op == ast::BinaryOp::kLogicalAnd || expr->op == ast::BinaryOp::kLogicalOr) {
+ auto name = UniqueIdentifier(kTempNamePrefix);
- {
- auto pre = line();
- pre << "bool " << name << " = ";
- if (!EmitExpression(pre, expr->lhs)) {
- return false;
- }
- pre << ";";
+ {
+ auto pre = line();
+ pre << "bool " << name << " = ";
+ if (!EmitExpression(pre, expr->lhs)) {
+ return false;
+ }
+ pre << ";";
+ }
+
+ if (expr->op == ast::BinaryOp::kLogicalOr) {
+ line() << "if (!" << name << ") {";
+ } else {
+ line() << "if (" << name << ") {";
+ }
+
+ {
+ ScopedIndent si(this);
+ auto pre = line();
+ pre << name << " = ";
+ if (!EmitExpression(pre, expr->rhs)) {
+ return false;
+ }
+ pre << ";";
+ }
+
+ line() << "}";
+
+ out << "(" << name << ")";
+ return true;
+ }
+ if ((expr->op == ast::BinaryOp::kAnd || expr->op == ast::BinaryOp::kOr) &&
+ TypeOf(expr->lhs)->UnwrapRef()->is_bool_scalar_or_vector()) {
+ return EmitBitwiseBoolOp(out, expr);
}
- if (expr->op == ast::BinaryOp::kLogicalOr) {
- line() << "if (!" << name << ") {";
- } else {
- line() << "if (" << name << ") {";
+ if (expr->op == ast::BinaryOp::kModulo &&
+ (TypeOf(expr->lhs)->UnwrapRef()->is_float_scalar_or_vector() ||
+ TypeOf(expr->rhs)->UnwrapRef()->is_float_scalar_or_vector())) {
+ return EmitFloatModulo(out, expr);
}
- {
- ScopedIndent si(this);
- auto pre = line();
- pre << name << " = ";
- if (!EmitExpression(pre, expr->rhs)) {
+ out << "(";
+ if (!EmitExpression(out, expr->lhs)) {
return false;
- }
- pre << ";";
}
+ out << " ";
+
+ switch (expr->op) {
+ case ast::BinaryOp::kAnd:
+ out << "&";
+ break;
+ case ast::BinaryOp::kOr:
+ out << "|";
+ break;
+ case ast::BinaryOp::kXor:
+ out << "^";
+ break;
+ case ast::BinaryOp::kLogicalAnd:
+ case ast::BinaryOp::kLogicalOr: {
+ // These are both handled above.
+ TINT_UNREACHABLE(Writer, diagnostics_);
+ return false;
+ }
+ case ast::BinaryOp::kEqual:
+ out << "==";
+ break;
+ case ast::BinaryOp::kNotEqual:
+ out << "!=";
+ break;
+ case ast::BinaryOp::kLessThan:
+ out << "<";
+ break;
+ case ast::BinaryOp::kGreaterThan:
+ out << ">";
+ break;
+ case ast::BinaryOp::kLessThanEqual:
+ out << "<=";
+ break;
+ case ast::BinaryOp::kGreaterThanEqual:
+ out << ">=";
+ break;
+ case ast::BinaryOp::kShiftLeft:
+ out << "<<";
+ break;
+ case ast::BinaryOp::kShiftRight:
+ // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
+ // implementation-defined behaviour for negative LHS. We may have to
+ // generate extra code to implement WGSL-specified behaviour for negative
+ // LHS.
+ out << R"(>>)";
+ break;
+
+ case ast::BinaryOp::kAdd:
+ out << "+";
+ break;
+ case ast::BinaryOp::kSubtract:
+ out << "-";
+ break;
+ case ast::BinaryOp::kMultiply:
+ out << "*";
+ break;
+ case ast::BinaryOp::kDivide:
+ out << "/";
+ break;
+ case ast::BinaryOp::kModulo:
+ out << "%";
+ break;
+ case ast::BinaryOp::kNone:
+ diagnostics_.add_error(diag::System::Writer, "missing binary operation type");
+ return false;
+ }
+ out << " ";
- line() << "}";
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
- out << "(" << name << ")";
+ out << ")";
return true;
- }
- if ((expr->op == ast::BinaryOp::kAnd || expr->op == ast::BinaryOp::kOr) &&
- TypeOf(expr->lhs)->UnwrapRef()->is_bool_scalar_or_vector()) {
- return EmitBitwiseBoolOp(out, expr);
- }
-
- if (expr->op == ast::BinaryOp::kModulo &&
- (TypeOf(expr->lhs)->UnwrapRef()->is_float_scalar_or_vector() ||
- TypeOf(expr->rhs)->UnwrapRef()->is_float_scalar_or_vector())) {
- return EmitFloatModulo(out, expr);
- }
-
- out << "(";
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- out << " ";
-
- switch (expr->op) {
- case ast::BinaryOp::kAnd:
- out << "&";
- break;
- case ast::BinaryOp::kOr:
- out << "|";
- break;
- case ast::BinaryOp::kXor:
- out << "^";
- break;
- case ast::BinaryOp::kLogicalAnd:
- case ast::BinaryOp::kLogicalOr: {
- // These are both handled above.
- TINT_UNREACHABLE(Writer, diagnostics_);
- return false;
- }
- case ast::BinaryOp::kEqual:
- out << "==";
- break;
- case ast::BinaryOp::kNotEqual:
- out << "!=";
- break;
- case ast::BinaryOp::kLessThan:
- out << "<";
- break;
- case ast::BinaryOp::kGreaterThan:
- out << ">";
- break;
- case ast::BinaryOp::kLessThanEqual:
- out << "<=";
- break;
- case ast::BinaryOp::kGreaterThanEqual:
- out << ">=";
- break;
- case ast::BinaryOp::kShiftLeft:
- out << "<<";
- break;
- case ast::BinaryOp::kShiftRight:
- // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
- // implementation-defined behaviour for negative LHS. We may have to
- // generate extra code to implement WGSL-specified behaviour for negative
- // LHS.
- out << R"(>>)";
- break;
-
- case ast::BinaryOp::kAdd:
- out << "+";
- break;
- case ast::BinaryOp::kSubtract:
- out << "-";
- break;
- case ast::BinaryOp::kMultiply:
- out << "*";
- break;
- case ast::BinaryOp::kDivide:
- out << "/";
- break;
- case ast::BinaryOp::kModulo:
- out << "%";
- break;
- case ast::BinaryOp::kNone:
- diagnostics_.add_error(diag::System::Writer,
- "missing binary operation type");
- return false;
- }
- out << " ";
-
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
-
- out << ")";
- return true;
}
bool GeneratorImpl::EmitStatements(const ast::StatementList& stmts) {
- for (auto* s : stmts) {
- if (!EmitStatement(s)) {
- return false;
+ for (auto* s : stmts) {
+ if (!EmitStatement(s)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatementsWithIndent(const ast::StatementList& stmts) {
- ScopedIndent si(this);
- return EmitStatements(stmts);
+ ScopedIndent si(this);
+ return EmitStatements(stmts);
}
bool GeneratorImpl::EmitBlock(const ast::BlockStatement* stmt) {
- line() << "{";
- if (!EmitStatementsWithIndent(stmt->statements)) {
- return false;
- }
- line() << "}";
- return true;
+ line() << "{";
+ if (!EmitStatementsWithIndent(stmt->statements)) {
+ return false;
+ }
+ line() << "}";
+ return true;
}
bool GeneratorImpl::EmitBreak(const ast::BreakStatement*) {
- line() << "break;";
- return true;
-}
-
-bool GeneratorImpl::EmitCall(std::ostream& out,
- const ast::CallExpression* expr) {
- auto* call = builder_.Sem().Get(expr);
- auto* target = call->Target();
-
- if (target->Is<sem::Function>()) {
- return EmitFunctionCall(out, call);
- }
- if (auto* builtin = target->As<sem::Builtin>()) {
- return EmitBuiltinCall(out, call, builtin);
- }
- if (auto* cast = target->As<sem::TypeConversion>()) {
- return EmitTypeConversion(out, call, cast);
- }
- if (auto* ctor = target->As<sem::TypeConstructor>()) {
- return EmitTypeConstructor(out, call, ctor);
- }
- TINT_ICE(Writer, diagnostics_)
- << "unhandled call target: " << target->TypeInfo().name;
- return false;
+ line() << "break;";
+ return true;
+}
+
+bool GeneratorImpl::EmitCall(std::ostream& out, const ast::CallExpression* expr) {
+ auto* call = builder_.Sem().Get<sem::Call>(expr);
+ auto* target = call->Target();
+
+ if (target->Is<sem::Function>()) {
+ return EmitFunctionCall(out, call);
+ }
+ if (auto* builtin = target->As<sem::Builtin>()) {
+ return EmitBuiltinCall(out, call, builtin);
+ }
+ if (auto* cast = target->As<sem::TypeConversion>()) {
+ return EmitTypeConversion(out, call, cast);
+ }
+ if (auto* ctor = target->As<sem::TypeConstructor>()) {
+ return EmitTypeConstructor(out, call, ctor);
+ }
+ TINT_ICE(Writer, diagnostics_) << "unhandled call target: " << target->TypeInfo().name;
+ return false;
}
bool GeneratorImpl::EmitFunctionCall(std::ostream& out, const sem::Call* call) {
- const auto& args = call->Arguments();
- auto* decl = call->Declaration();
- auto* ident = decl->target.name;
+ const auto& args = call->Arguments();
+ auto* decl = call->Declaration();
+ auto* ident = decl->target.name;
- auto name = builder_.Symbols().NameFor(ident->symbol);
- auto caller_sym = ident->symbol;
+ auto name = builder_.Symbols().NameFor(ident->symbol);
+ auto caller_sym = ident->symbol;
- out << name << "(";
+ out << name << "(";
- bool first = true;
- for (auto* arg : args) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
+ for (auto* arg : args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitBuiltinCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- auto* expr = call->Declaration();
- if (builtin->IsTexture()) {
- return EmitTextureCall(out, call, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kCountOneBits) {
- return EmitCountOneBitsCall(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kSelect) {
- return EmitSelectCall(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kDot) {
- return EmitDotCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kModf) {
- return EmitModfCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kFrexp) {
- return EmitFrexpCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kDegrees) {
- return EmitDegreesCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kRadians) {
- return EmitRadiansCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kArrayLength) {
- return EmitArrayLength(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kExtractBits) {
- return EmitExtractBits(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kInsertBits) {
- return EmitInsertBits(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kFma && version_.IsES()) {
- return EmitEmulatedFMA(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kAbs &&
- TypeOf(expr->args[0])->UnwrapRef()->is_unsigned_scalar_or_vector()) {
- // GLSL does not support abs() on unsigned arguments. However, it's a no-op.
- return EmitExpression(out, expr->args[0]);
- }
- if ((builtin->Type() == sem::BuiltinType::kAny ||
- builtin->Type() == sem::BuiltinType::kAll) &&
- TypeOf(expr->args[0])->UnwrapRef()->is_scalar()) {
- // GLSL does not support any() or all() on scalar arguments. It's a no-op.
- return EmitExpression(out, expr->args[0]);
- }
- if (builtin->IsBarrier()) {
- return EmitBarrierCall(out, builtin);
- }
- if (builtin->IsAtomic()) {
- return EmitWorkgroupAtomicCall(out, expr, builtin);
- }
- auto name = generate_builtin_name(builtin);
- if (name.empty()) {
- return false;
- }
+ auto* expr = call->Declaration();
+ if (builtin->IsTexture()) {
+ return EmitTextureCall(out, call, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kCountOneBits) {
+ return EmitCountOneBitsCall(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kSelect) {
+ return EmitSelectCall(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kDot) {
+ return EmitDotCall(out, expr, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kModf) {
+ return EmitModfCall(out, expr, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kFrexp) {
+ return EmitFrexpCall(out, expr, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kDegrees) {
+ return EmitDegreesCall(out, expr, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kRadians) {
+ return EmitRadiansCall(out, expr, builtin);
+ }
+ if (builtin->Type() == sem::BuiltinType::kArrayLength) {
+ return EmitArrayLength(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kExtractBits) {
+ return EmitExtractBits(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kInsertBits) {
+ return EmitInsertBits(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kFma && version_.IsES()) {
+ return EmitEmulatedFMA(out, expr);
+ }
+ if (builtin->Type() == sem::BuiltinType::kAbs &&
+ TypeOf(expr->args[0])->UnwrapRef()->is_unsigned_scalar_or_vector()) {
+ // GLSL does not support abs() on unsigned arguments. However, it's a no-op.
+ return EmitExpression(out, expr->args[0]);
+ }
+ if ((builtin->Type() == sem::BuiltinType::kAny || builtin->Type() == sem::BuiltinType::kAll) &&
+ TypeOf(expr->args[0])->UnwrapRef()->is_scalar()) {
+ // GLSL does not support any() or all() on scalar arguments. It's a no-op.
+ return EmitExpression(out, expr->args[0]);
+ }
+ if (builtin->IsBarrier()) {
+ return EmitBarrierCall(out, builtin);
+ }
+ if (builtin->IsAtomic()) {
+ return EmitWorkgroupAtomicCall(out, expr, builtin);
+ }
+ auto name = generate_builtin_name(builtin);
+ if (name.empty()) {
+ return false;
+ }
- out << name << "(";
+ out << name << "(";
- bool first = true;
- for (auto* arg : call->Arguments()) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
+ for (auto* arg : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitTypeConversion(std::ostream& out,
const sem::Call* call,
const sem::TypeConversion* conv) {
- if (!EmitType(out, conv->Target(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- out << "(";
+ if (!EmitType(out, conv->Target(), ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ out << "(";
- if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
- return false;
- }
+ if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitTypeConstructor(std::ostream& out,
const sem::Call* call,
const sem::TypeConstructor* ctor) {
- auto* type = ctor->ReturnType();
-
- // If the type constructor is empty then we need to construct with the zero
- // value for all components.
- if (call->Arguments().empty()) {
- return EmitZeroValue(out, type);
- }
-
- auto it = structure_builders_.find(As<sem::Struct>(type));
- if (it != structure_builders_.end()) {
- out << it->second << "(";
- } else {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite,
- "")) {
- return false;
+ auto* type = ctor->ReturnType();
+
+ // If the type constructor is empty then we need to construct with the zero
+ // value for all components.
+ if (call->Arguments().empty()) {
+ return EmitZeroValue(out, type);
}
- out << "(";
- }
- bool first = true;
- for (auto* arg : call->Arguments()) {
- if (!first) {
- out << ", ";
+ auto it = structure_builders_.find(As<sem::Struct>(type));
+ if (it != structure_builders_.end()) {
+ out << it->second << "(";
+ } else {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ out << "(";
}
- first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ bool first = true;
+ for (auto* arg : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitWorkgroupAtomicCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- auto call = [&](const char* name) {
- out << name;
- {
- ScopedParen sp(out);
- for (size_t i = 0; i < expr->args.size(); i++) {
- auto* arg = expr->args[i];
- if (i > 0) {
- out << ", ";
- }
- if (!EmitExpression(out, arg)) {
- return false;
- }
- }
- }
- return true;
- };
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kAtomicLoad: {
- // GLSL does not have an atomicLoad, so we emulate it with
- // atomicOr using 0 as the OR value
- out << "atomicOr";
- {
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", 0";
- if (builtin->ReturnType()->Is<sem::U32>()) {
- out << "u";
+ auto call = [&](const char* name) {
+ out << name;
+ {
+ ScopedParen sp(out);
+ for (size_t i = 0; i < expr->args.size(); i++) {
+ auto* arg = expr->args[i];
+ if (i > 0) {
+ out << ", ";
+ }
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
+ }
}
- }
- return true;
- }
- case sem::BuiltinType::kAtomicCompareExchangeWeak: {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
+ return true;
+ };
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAtomicLoad: {
+ // GLSL does not have an atomicLoad, so we emulate it with
+ // atomicOr using 0 as the OR value
+ out << "atomicOr";
{
- auto pre = line(b);
- if (!EmitTypeAndName(pre, builtin->ReturnType(),
- ast::StorageClass::kNone,
- ast::Access::kUndefined, "result")) {
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", 0";
+ if (builtin->ReturnType()->Is<sem::U32>()) {
+ out << "u";
+ }
+ }
+ return true;
+ }
+ case sem::BuiltinType::kAtomicCompareExchangeWeak: {
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructTypeOnce(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
return false;
- }
- pre << ";";
+ }
+
+ auto* dest = expr->args[0];
+ auto* compare_value = expr->args[1];
+ auto* value = expr->args[2];
+
+ std::string result = UniqueIdentifier("atomic_compare_result");
+
+ {
+ auto pre = line();
+ if (!EmitTypeAndName(pre, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, result)) {
+ return false;
+ }
+ pre << ";";
}
{
- auto pre = line(b);
- pre << "result.x = atomicCompSwap";
- {
- ScopedParen sp(pre);
- pre << params[0];
- pre << ", " << params[1];
- pre << ", " << params[2];
- }
- pre << ";";
+ auto pre = line();
+ pre << result << ".old_value = atomicCompSwap";
+ {
+ ScopedParen sp(pre);
+ if (!EmitExpression(pre, dest)) {
+ return false;
+ }
+ pre << ", ";
+ if (!EmitExpression(pre, compare_value)) {
+ return false;
+ }
+ pre << ", ";
+ if (!EmitExpression(pre, value)) {
+ return false;
+ }
+ }
+ pre << ";";
}
{
- auto pre = line(b);
- pre << "result.y = result.x == " << params[2] << " ? ";
- if (TypeOf(expr->args[2])->Is<sem::U32>()) {
- pre << "1u : 0u;";
- } else {
- pre << "1 : 0;";
- }
- }
- line(b) << "return result;";
+ auto pre = line();
+ pre << result << ".exchanged = " << result << ".old_value == ";
+ if (!EmitExpression(pre, compare_value)) {
+ return false;
+ }
+ pre << ";";
+ }
+
+ out << result;
return true;
- });
- }
+ }
- case sem::BuiltinType::kAtomicAdd:
- case sem::BuiltinType::kAtomicSub:
- return call("atomicAdd");
+ case sem::BuiltinType::kAtomicAdd:
+ case sem::BuiltinType::kAtomicSub:
+ return call("atomicAdd");
- case sem::BuiltinType::kAtomicMax:
- return call("atomicMax");
+ case sem::BuiltinType::kAtomicMax:
+ return call("atomicMax");
- case sem::BuiltinType::kAtomicMin:
- return call("atomicMin");
+ case sem::BuiltinType::kAtomicMin:
+ return call("atomicMin");
- case sem::BuiltinType::kAtomicAnd:
- return call("atomicAnd");
+ case sem::BuiltinType::kAtomicAnd:
+ return call("atomicAnd");
- case sem::BuiltinType::kAtomicOr:
- return call("atomicOr");
+ case sem::BuiltinType::kAtomicOr:
+ return call("atomicOr");
- case sem::BuiltinType::kAtomicXor:
- return call("atomicXor");
+ case sem::BuiltinType::kAtomicXor:
+ return call("atomicXor");
- case sem::BuiltinType::kAtomicExchange:
- case sem::BuiltinType::kAtomicStore:
- // GLSL does not have an atomicStore, so we emulate it with
- // atomicExchange.
- return call("atomicExchange");
+ case sem::BuiltinType::kAtomicExchange:
+ case sem::BuiltinType::kAtomicStore:
+ // GLSL does not have an atomicStore, so we emulate it with
+ // atomicExchange.
+ return call("atomicExchange");
- default:
- break;
- }
+ default:
+ break;
+ }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported atomic builtin: " << builtin->Type();
- return false;
+ TINT_UNREACHABLE(Writer, diagnostics_) << "unsupported atomic builtin: " << builtin->Type();
+ return false;
}
-bool GeneratorImpl::EmitArrayLength(std::ostream& out,
- const ast::CallExpression* expr) {
- out << "uint(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ".length())";
- return true;
+bool GeneratorImpl::EmitArrayLength(std::ostream& out, const ast::CallExpression* expr) {
+ out << "uint(";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ".length())";
+ return true;
}
-bool GeneratorImpl::EmitExtractBits(std::ostream& out,
- const ast::CallExpression* expr) {
- out << "bitfieldExtract(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", int(";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << "), int(";
- if (!EmitExpression(out, expr->args[2])) {
- return false;
- }
- out << "))";
- return true;
+bool GeneratorImpl::EmitExtractBits(std::ostream& out, const ast::CallExpression* expr) {
+ out << "bitfieldExtract(";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", int(";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << "), int(";
+ if (!EmitExpression(out, expr->args[2])) {
+ return false;
+ }
+ out << "))";
+ return true;
}
-bool GeneratorImpl::EmitInsertBits(std::ostream& out,
- const ast::CallExpression* expr) {
- out << "bitfieldInsert(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << ", int(";
- if (!EmitExpression(out, expr->args[2])) {
- return false;
- }
- out << "), int(";
- if (!EmitExpression(out, expr->args[3])) {
- return false;
- }
- out << "))";
- return true;
+bool GeneratorImpl::EmitInsertBits(std::ostream& out, const ast::CallExpression* expr) {
+ out << "bitfieldInsert(";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << ", int(";
+ if (!EmitExpression(out, expr->args[2])) {
+ return false;
+ }
+ out << "), int(";
+ if (!EmitExpression(out, expr->args[3])) {
+ return false;
+ }
+ out << "))";
+ return true;
}
-bool GeneratorImpl::EmitEmulatedFMA(std::ostream& out,
- const ast::CallExpression* expr) {
- out << "((";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ") * (";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << ") + (";
- if (!EmitExpression(out, expr->args[2])) {
- return false;
- }
- out << "))";
- return true;
+bool GeneratorImpl::EmitEmulatedFMA(std::ostream& out, const ast::CallExpression* expr) {
+ out << "((";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ") * (";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << ") + (";
+ if (!EmitExpression(out, expr->args[2])) {
+ return false;
+ }
+ out << "))";
+ return true;
}
-bool GeneratorImpl::EmitCountOneBitsCall(std::ostream& out,
- const ast::CallExpression* expr) {
- // GLSL's bitCount returns an integer type, so cast it to the appropriate
- // unsigned type.
- if (!EmitType(out, TypeOf(expr)->UnwrapRef(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- out << "(bitCount(";
+bool GeneratorImpl::EmitCountOneBitsCall(std::ostream& out, const ast::CallExpression* expr) {
+ // GLSL's bitCount returns an integer type, so cast it to the appropriate
+ // unsigned type.
+ if (!EmitType(out, TypeOf(expr)->UnwrapRef(), ast::StorageClass::kNone, ast::Access::kReadWrite,
+ "")) {
+ return false;
+ }
+ out << "(bitCount(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << "))";
- return true;
-}
-
-bool GeneratorImpl::EmitSelectCall(std::ostream& out,
- const ast::CallExpression* expr) {
- auto* expr_false = expr->args[0];
- auto* expr_true = expr->args[1];
- auto* expr_cond = expr->args[2];
- // GLSL does not support ternary expressions with a bool vector conditional,
- // but it does support mix() with same.
- if (TypeOf(expr_cond)->UnwrapRef()->is_bool_vector()) {
- out << "mix(";
- if (!EmitExpression(out, expr_false)) {
- return false;
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
}
- out << ", ";
- if (!EmitExpression(out, expr_true)) {
- return false;
+ out << "))";
+ return true;
+}
+
+bool GeneratorImpl::EmitSelectCall(std::ostream& out, const ast::CallExpression* expr) {
+ auto* expr_false = expr->args[0];
+ auto* expr_true = expr->args[1];
+ auto* expr_cond = expr->args[2];
+ // GLSL does not support ternary expressions with a bool vector conditional,
+ // but it does support mix() with same.
+ if (TypeOf(expr_cond)->UnwrapRef()->is_bool_vector()) {
+ out << "mix(";
+ if (!EmitExpression(out, expr_false)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr_true)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr_cond)) {
+ return false;
+ }
+ out << ")";
+ return true;
}
- out << ", ";
+ ScopedParen paren(out);
if (!EmitExpression(out, expr_cond)) {
- return false;
+ return false;
}
- out << ")";
- return true;
- }
- ScopedParen paren(out);
- if (!EmitExpression(out, expr_cond)) {
- return false;
- }
- out << " ? ";
+ out << " ? ";
- if (!EmitExpression(out, expr_true)) {
- return false;
- }
+ if (!EmitExpression(out, expr_true)) {
+ return false;
+ }
- out << " : ";
+ out << " : ";
- if (!EmitExpression(out, expr_false)) {
- return false;
- }
+ if (!EmitExpression(out, expr_false)) {
+ return false;
+ }
- return true;
+ return true;
}
bool GeneratorImpl::EmitDotCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
- std::string fn = "dot";
- if (vec_ty->type()->is_integer_scalar()) {
- // GLSL does not have a builtin for dot() with integer vector types.
- // Generate the helper function if it hasn't been created already
- fn = utils::GetOrCreate(int_dot_funcs_, vec_ty, [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name = UniqueIdentifier("tint_int_dot");
-
- std::string v;
- {
- std::stringstream s;
- if (!EmitType(s, vec_ty->type(), ast::StorageClass::kNone,
- ast::Access::kRead, "")) {
- return "";
- }
- v = s.str();
- }
- { // (u)int tint_int_dot([i|u]vecN a, [i|u]vecN b) {
- auto l = line(&b);
- if (!EmitType(l, vec_ty->type(), ast::StorageClass::kNone,
- ast::Access::kRead, "")) {
- return "";
- }
- l << " " << fn_name << "(";
- if (!EmitType(l, vec_ty, ast::StorageClass::kNone, ast::Access::kRead,
- "")) {
- return "";
- }
- l << " a, ";
- if (!EmitType(l, vec_ty, ast::StorageClass::kNone, ast::Access::kRead,
- "")) {
- return "";
+ auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
+ std::string fn = "dot";
+ if (vec_ty->type()->is_integer_scalar()) {
+ // GLSL does not have a builtin for dot() with integer vector types.
+ // Generate the helper function if it hasn't been created already
+ fn = utils::GetOrCreate(int_dot_funcs_, vec_ty, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
+
+ auto fn_name = UniqueIdentifier("tint_int_dot");
+
+ std::string v;
+ {
+ std::stringstream s;
+ if (!EmitType(s, vec_ty->type(), ast::StorageClass::kNone, ast::Access::kRead,
+ "")) {
+ return "";
+ }
+ v = s.str();
+ }
+ { // (u)int tint_int_dot([i|u]vecN a, [i|u]vecN b) {
+ auto l = line(&b);
+ if (!EmitType(l, vec_ty->type(), ast::StorageClass::kNone, ast::Access::kRead,
+ "")) {
+ return "";
+ }
+ l << " " << fn_name << "(";
+ if (!EmitType(l, vec_ty, ast::StorageClass::kNone, ast::Access::kRead, "")) {
+ return "";
+ }
+ l << " a, ";
+ if (!EmitType(l, vec_ty, ast::StorageClass::kNone, ast::Access::kRead, "")) {
+ return "";
+ }
+ l << " b) {";
+ }
+ {
+ auto l = line(&b);
+ l << " return ";
+ for (uint32_t i = 0; i < vec_ty->Width(); i++) {
+ if (i > 0) {
+ l << " + ";
+ }
+ l << "a[" << i << "]*b[" << i << "]";
+ }
+ l << ";";
+ }
+ line(&b) << "}";
+ return fn_name;
+ });
+ if (fn.empty()) {
+ return false;
}
- l << " b) {";
- }
- {
- auto l = line(&b);
- l << " return ";
- for (uint32_t i = 0; i < vec_ty->Width(); i++) {
- if (i > 0) {
- l << " + ";
- }
- l << "a[" << i << "]*b[" << i << "]";
- }
- l << ";";
- }
- line(&b) << "}";
- return fn_name;
- });
- if (fn.empty()) {
- return false;
}
- }
- out << fn << "(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << ")";
- return true;
+ out << fn << "(";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitModfCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- if (expr->args.size() == 1) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
-
- {
- auto l = line(b);
- if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
- return false;
- }
- l << " result;";
- }
- line(b) << "result.fract = modf(" << params[0] << ", result.whole);";
- line(b) << "return result;";
- return true;
- });
- }
-
- // DEPRECATED
- out << "modf";
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- return true;
+ if (expr->args.size() == 1) {
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
+
+ {
+ auto l = line(b);
+ if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, "")) {
+ return false;
+ }
+ l << " result;";
+ }
+ line(b) << "result.fract = modf(" << params[0] << ", result.whole);";
+ line(b) << "return result;";
+ return true;
+ });
+ }
+
+ // DEPRECATED
+ out << "modf";
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ return true;
}
bool GeneratorImpl::EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- if (expr->args.size() == 1) {
+ if (expr->args.size() == 1) {
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
+
+ {
+ auto l = line(b);
+ if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, "")) {
+ return false;
+ }
+ l << " result;";
+ }
+ line(b) << "result.sig = frexp(" << params[0] << ", result.exp);";
+ line(b) << "return result;";
+ return true;
+ });
+ }
+ // DEPRECATED
+ // Exponent is an integer in WGSL, but HLSL wants a float.
+ // We need to make the call with a temporary float, and then cast.
return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
-
- {
- auto l = line(b);
- if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
- return false;
- }
- l << " result;";
- }
- line(b) << "result.sig = frexp(" << params[0] << ", result.exp);";
- line(b) << "return result;";
- return true;
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ auto* significand_ty = builtin->Parameters()[0]->Type();
+ auto significand = params[0];
+ auto* exponent_ty = builtin->Parameters()[1]->Type();
+ auto exponent = params[1];
+
+ std::string width;
+ if (auto* vec = significand_ty->As<sem::Vector>()) {
+ width = std::to_string(vec->Width());
+ }
+
+ // Exponent is an integer, which HLSL does not have an overload for.
+ // We need to cast from a float.
+ line(b) << "float" << width << " float_exp;";
+ line(b) << "float" << width << " significand = frexp(" << significand
+ << ", float_exp);";
+ {
+ auto l = line(b);
+ l << exponent << " = ";
+ if (!EmitType(l, exponent_ty->UnwrapPtr(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, "")) {
+ return false;
+ }
+ l << "(float_exp);";
+ }
+ line(b) << "return significand;";
+ return true;
});
- }
- // DEPRECATED
- // Exponent is an integer in WGSL, but HLSL wants a float.
- // We need to make the call with a temporary float, and then cast.
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- auto* significand_ty = builtin->Parameters()[0]->Type();
- auto significand = params[0];
- auto* exponent_ty = builtin->Parameters()[1]->Type();
- auto exponent = params[1];
-
- std::string width;
- if (auto* vec = significand_ty->As<sem::Vector>()) {
- width = std::to_string(vec->Width());
- }
-
- // Exponent is an integer, which HLSL does not have an overload for.
- // We need to cast from a float.
- line(b) << "float" << width << " float_exp;";
- line(b) << "float" << width << " significand = frexp(" << significand
- << ", float_exp);";
- {
- auto l = line(b);
- l << exponent << " = ";
- if (!EmitType(l, exponent_ty->UnwrapPtr(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
- return false;
- }
- l << "(float_exp);";
- }
- line(b) << "return significand;";
- return true;
- });
}
bool GeneratorImpl::EmitDegreesCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kRadToDeg << ";";
- return true;
- });
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kRadToDeg << ";";
+ return true;
+ });
}
bool GeneratorImpl::EmitRadiansCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kDegToRad << ";";
- return true;
- });
-}
-
-bool GeneratorImpl::EmitBarrierCall(std::ostream& out,
- const sem::Builtin* builtin) {
- // TODO(crbug.com/tint/661): Combine sequential barriers to a single
- // instruction.
- if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
- out << "barrier()";
- } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
- out << "{ barrier(); memoryBarrierBuffer(); }";
- } else {
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unexpected barrier builtin type " << sem::str(builtin->Type());
- return false;
- }
- return true;
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kDegToRad << ";";
+ return true;
+ });
+}
+
+bool GeneratorImpl::EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin) {
+ // TODO(crbug.com/tint/661): Combine sequential barriers to a single
+ // instruction.
+ if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
+ out << "barrier()";
+ } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
+ out << "{ barrier(); memoryBarrierBuffer(); }";
+ } else {
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unexpected barrier builtin type " << sem::str(builtin->Type());
+ return false;
+ }
+ return true;
}
-const ast::Expression* GeneratorImpl::CreateF32Zero(
- const sem::Statement* stmt) {
- auto* zero = builder_.Expr(0.0f);
- auto* f32 = builder_.create<sem::F32>();
- auto* sem_zero = builder_.create<sem::Expression>(
- zero, f32, stmt, sem::Constant{}, /* has_side_effects */ false);
- builder_.Sem().Add(zero, sem_zero);
- return zero;
+const ast::Expression* GeneratorImpl::CreateF32Zero(const sem::Statement* stmt) {
+ auto* zero = builder_.Expr(0_f);
+ auto* f32 = builder_.create<sem::F32>();
+ auto* sem_zero = builder_.create<sem::Expression>(zero, f32, stmt, sem::Constant{},
+ /* has_side_effects */ false);
+ builder_.Sem().Add(zero, sem_zero);
+ return zero;
}
bool GeneratorImpl::EmitTextureCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- using Usage = sem::ParameterUsage;
+ using Usage = sem::ParameterUsage;
- auto& signature = builtin->Signature();
- auto* expr = call->Declaration();
- auto arguments = expr->args;
+ auto& signature = builtin->Signature();
+ auto* expr = call->Declaration();
+ auto arguments = expr->args;
- // Returns the argument with the given usage
- auto arg = [&](Usage usage) {
- int idx = signature.IndexOf(usage);
- return (idx >= 0) ? arguments[idx] : nullptr;
- };
+ // Returns the argument with the given usage
+ auto arg = [&](Usage usage) {
+ int idx = signature.IndexOf(usage);
+ return (idx >= 0) ? arguments[idx] : nullptr;
+ };
- auto* texture = arg(Usage::kTexture);
- if (!texture) {
- TINT_ICE(Writer, diagnostics_) << "missing texture argument";
- return false;
- }
+ auto* texture = arg(Usage::kTexture);
+ if (!texture) {
+ TINT_ICE(Writer, diagnostics_) << "missing texture argument";
+ return false;
+ }
- auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
+ auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureDimensions: {
- if (texture_type->Is<sem::StorageTexture>()) {
- out << "imageSize(";
- } else {
- out << "textureSize(";
- }
- if (!EmitExpression(out, texture)) {
- return false;
- }
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureDimensions: {
+ if (texture_type->Is<sem::StorageTexture>()) {
+ out << "imageSize(";
+ } else {
+ out << "textureSize(";
+ }
+ if (!EmitExpression(out, texture)) {
+ return false;
+ }
- // The LOD parameter is mandatory on textureSize() for non-multisampled
- // textures.
- if (!texture_type->Is<sem::StorageTexture>() &&
- !texture_type->Is<sem::MultisampledTexture>() &&
- !texture_type->Is<sem::DepthMultisampledTexture>()) {
- out << ", ";
- if (auto* level_arg = arg(Usage::kLevel)) {
- if (!EmitExpression(out, level_arg)) {
- return false;
- }
- } else {
- out << "0";
+ // The LOD parameter is mandatory on textureSize() for non-multisampled
+ // textures.
+ if (!texture_type->Is<sem::StorageTexture>() &&
+ !texture_type->Is<sem::MultisampledTexture>() &&
+ !texture_type->Is<sem::DepthMultisampledTexture>()) {
+ out << ", ";
+ if (auto* level_arg = arg(Usage::kLevel)) {
+ if (!EmitExpression(out, level_arg)) {
+ return false;
+ }
+ } else {
+ out << "0";
+ }
+ }
+ out << ")";
+ // textureSize() on array samplers returns the array size in the
+ // final component, so strip it out.
+ if (texture_type->dim() == ast::TextureDimension::k2dArray ||
+ texture_type->dim() == ast::TextureDimension::kCubeArray) {
+ out << ".xy";
+ }
+ return true;
}
- }
- out << ")";
- // textureSize() on array samplers returns the array size in the
- // final component, so strip it out.
- if (texture_type->dim() == ast::TextureDimension::k2dArray ||
- texture_type->dim() == ast::TextureDimension::kCubeArray) {
- out << ".xy";
- }
- return true;
+ case sem::BuiltinType::kTextureNumLayers: {
+ if (texture_type->Is<sem::StorageTexture>()) {
+ out << "imageSize(";
+ } else {
+ out << "textureSize(";
+ }
+ // textureSize() on sampler2dArray returns the array size in the
+ // final component, so return it
+ if (!EmitExpression(out, texture)) {
+ return false;
+ }
+ // The LOD parameter is mandatory on textureSize() for non-multisampled
+ // textures.
+ if (!texture_type->Is<sem::StorageTexture>() &&
+ !texture_type->Is<sem::MultisampledTexture>() &&
+ !texture_type->Is<sem::DepthMultisampledTexture>()) {
+ out << ", ";
+ if (auto* level_arg = arg(Usage::kLevel)) {
+ if (!EmitExpression(out, level_arg)) {
+ return false;
+ }
+ } else {
+ out << "0";
+ }
+ }
+ out << ").z";
+ return true;
+ }
+ case sem::BuiltinType::kTextureNumLevels: {
+ out << "textureQueryLevels(";
+ if (!EmitExpression(out, texture)) {
+ return false;
+ }
+ out << ")";
+ return true;
+ }
+ case sem::BuiltinType::kTextureNumSamples: {
+ out << "textureSamples(";
+ if (!EmitExpression(out, texture)) {
+ return false;
+ }
+ out << ")";
+ return true;
+ }
+ default:
+ break;
}
- case sem::BuiltinType::kTextureNumLayers: {
- if (texture_type->Is<sem::StorageTexture>()) {
- out << "imageSize(";
- } else {
- out << "textureSize(";
- }
- // textureSize() on sampler2dArray returns the array size in the
- // final component, so return it
- if (!EmitExpression(out, texture)) {
- return false;
- }
- // The LOD parameter is mandatory on textureSize() for non-multisampled
- // textures.
- if (!texture_type->Is<sem::StorageTexture>() &&
- !texture_type->Is<sem::MultisampledTexture>() &&
- !texture_type->Is<sem::DepthMultisampledTexture>()) {
- out << ", ";
- if (auto* level_arg = arg(Usage::kLevel)) {
- if (!EmitExpression(out, level_arg)) {
+
+ uint32_t glsl_ret_width = 4u;
+ bool append_depth_ref_to_coords = true;
+ bool is_depth = texture_type->Is<sem::DepthTexture>();
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureSample:
+ case sem::BuiltinType::kTextureSampleBias:
+ out << "texture";
+ if (is_depth) {
+ glsl_ret_width = 1u;
+ }
+ break;
+ case sem::BuiltinType::kTextureSampleLevel:
+ out << "textureLod";
+ if (is_depth) {
+ glsl_ret_width = 1u;
+ }
+ break;
+ case sem::BuiltinType::kTextureGather:
+ case sem::BuiltinType::kTextureGatherCompare:
+ out << "textureGather";
+ append_depth_ref_to_coords = false;
+ break;
+ case sem::BuiltinType::kTextureSampleGrad:
+ out << "textureGrad";
+ break;
+ case sem::BuiltinType::kTextureSampleCompare:
+ case sem::BuiltinType::kTextureSampleCompareLevel:
+ out << "texture";
+ glsl_ret_width = 1;
+ break;
+ case sem::BuiltinType::kTextureLoad:
+ out << "texelFetch";
+ break;
+ case sem::BuiltinType::kTextureStore:
+ out << "imageStore";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Internal compiler error: Unhandled texture builtin '" +
+ std::string(builtin->str()) + "'");
return false;
- }
- } else {
- out << "0";
- }
- }
- out << ").z";
- return true;
}
- case sem::BuiltinType::kTextureNumLevels: {
- out << "textureQueryLevels(";
- if (!EmitExpression(out, texture)) {
+
+ if (builtin->Signature().IndexOf(sem::ParameterUsage::kOffset) >= 0) {
+ out << "Offset";
+ }
+
+ out << "(";
+
+ if (!EmitExpression(out, texture)) {
return false;
- }
- out << ")";
- return true;
}
- case sem::BuiltinType::kTextureNumSamples: {
- out << "textureSamples(";
- if (!EmitExpression(out, texture)) {
+
+ out << ", ";
+
+ auto* param_coords = arg(Usage::kCoords);
+ if (!param_coords) {
+ TINT_ICE(Writer, diagnostics_) << "missing coords argument";
return false;
- }
- out << ")";
- return true;
- }
- default:
- break;
- }
-
- uint32_t glsl_ret_width = 4u;
- bool append_depth_ref_to_coords = true;
- bool is_depth = texture_type->Is<sem::DepthTexture>();
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureSample:
- case sem::BuiltinType::kTextureSampleBias:
- out << "texture";
- if (is_depth) {
- glsl_ret_width = 1u;
- }
- break;
- case sem::BuiltinType::kTextureSampleLevel:
- out << "textureLod";
- if (is_depth) {
- glsl_ret_width = 1u;
- }
- break;
- case sem::BuiltinType::kTextureGather:
- case sem::BuiltinType::kTextureGatherCompare:
- out << "textureGather";
- append_depth_ref_to_coords = false;
- break;
- case sem::BuiltinType::kTextureSampleGrad:
- out << "textureGrad";
- break;
- case sem::BuiltinType::kTextureSampleCompare:
- case sem::BuiltinType::kTextureSampleCompareLevel:
- out << "texture";
- glsl_ret_width = 1;
- break;
- case sem::BuiltinType::kTextureLoad:
- out << "texelFetch";
- break;
- case sem::BuiltinType::kTextureStore:
- out << "imageStore";
- break;
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Internal compiler error: Unhandled texture builtin '" +
- std::string(builtin->str()) + "'");
- return false;
- }
-
- if (builtin->Signature().IndexOf(sem::ParameterUsage::kOffset) >= 0) {
- out << "Offset";
- }
-
- out << "(";
-
- if (!EmitExpression(out, texture))
- return false;
+ }
- out << ", ";
+ if (auto* array_index = arg(Usage::kArrayIndex)) {
+ // Array index needs to be appended to the coordinates.
+ param_coords = AppendVector(&builder_, param_coords, array_index)->Declaration();
+ }
- auto* param_coords = arg(Usage::kCoords);
- if (!param_coords) {
- TINT_ICE(Writer, diagnostics_) << "missing coords argument";
- return false;
- }
-
- if (auto* array_index = arg(Usage::kArrayIndex)) {
- // Array index needs to be appended to the coordinates.
- param_coords =
- AppendVector(&builder_, param_coords, array_index)->Declaration();
- }
-
- // GLSL requires Dref to be appended to the coordinates, *unless* it's
- // samplerCubeArrayShadow, in which case it will be handled as a separate
- // parameter.
- if (texture_type->dim() == ast::TextureDimension::kCubeArray) {
- append_depth_ref_to_coords = false;
- }
-
- if (is_depth && append_depth_ref_to_coords) {
- auto* depth_ref = arg(Usage::kDepthRef);
- if (!depth_ref) {
- // Sampling a depth texture in GLSL always requires a depth reference, so
- // append zero here.
- depth_ref = CreateF32Zero(builder_.Sem().Get(param_coords)->Stmt());
- }
- param_coords =
- AppendVector(&builder_, param_coords, depth_ref)->Declaration();
- }
-
- if (!EmitExpression(out, param_coords)) {
- return false;
- }
-
- for (auto usage : {Usage::kLevel, Usage::kDdx, Usage::kDdy,
- Usage::kSampleIndex, Usage::kValue}) {
- if (auto* e = arg(usage)) {
- out << ", ";
- if (usage == Usage::kLevel && is_depth) {
- // WGSL's textureSampleLevel() "level" param is i32 for depth textures,
- // whereas GLSL's textureLod() "lod" param is always float, so cast it.
- out << "float(";
- if (!EmitExpression(out, e)) {
- return false;
+ // GLSL requires Dref to be appended to the coordinates, *unless* it's
+ // samplerCubeArrayShadow, in which case it will be handled as a separate
+ // parameter.
+ if (texture_type->dim() == ast::TextureDimension::kCubeArray) {
+ append_depth_ref_to_coords = false;
+ }
+
+ if (is_depth && append_depth_ref_to_coords) {
+ auto* depth_ref = arg(Usage::kDepthRef);
+ if (!depth_ref) {
+ // Sampling a depth texture in GLSL always requires a depth reference, so
+ // append zero here.
+ depth_ref = CreateF32Zero(builder_.Sem().Get(param_coords)->Stmt());
}
- out << ")";
- } else if (!EmitExpression(out, e)) {
+ param_coords = AppendVector(&builder_, param_coords, depth_ref)->Declaration();
+ }
+
+ if (!EmitExpression(out, param_coords)) {
return false;
- }
}
- }
- // GLSL's textureGather always requires a refZ parameter.
- if (is_depth && builtin->Type() == sem::BuiltinType::kTextureGather) {
- out << ", 0.0";
- }
+ for (auto usage :
+ {Usage::kLevel, Usage::kDdx, Usage::kDdy, Usage::kSampleIndex, Usage::kValue}) {
+ if (auto* e = arg(usage)) {
+ out << ", ";
+ if (usage == Usage::kLevel && is_depth) {
+ // WGSL's textureSampleLevel() "level" param is i32 for depth textures,
+ // whereas GLSL's textureLod() "lod" param is always float, so cast it.
+ out << "float(";
+ if (!EmitExpression(out, e)) {
+ return false;
+ }
+ out << ")";
+ } else if (!EmitExpression(out, e)) {
+ return false;
+ }
+ }
+ }
- // [1] samplerCubeArrayShadow requires a separate depthRef parameter
- if (is_depth && !append_depth_ref_to_coords) {
- if (auto* e = arg(Usage::kDepthRef)) {
- out << ", ";
- if (!EmitExpression(out, e)) {
- return false;
- }
- } else if (builtin->Type() == sem::BuiltinType::kTextureSample) {
- out << ", 0.0f";
+ // GLSL's textureGather always requires a refZ parameter.
+ if (is_depth && builtin->Type() == sem::BuiltinType::kTextureGather) {
+ out << ", 0.0";
}
- }
- for (auto usage : {Usage::kOffset, Usage::kComponent, Usage::kBias}) {
- if (auto* e = arg(usage)) {
- out << ", ";
- if (!EmitExpression(out, e)) {
- return false;
- }
+ // [1] samplerCubeArrayShadow requires a separate depthRef parameter
+ if (is_depth && !append_depth_ref_to_coords) {
+ if (auto* e = arg(Usage::kDepthRef)) {
+ out << ", ";
+ if (!EmitExpression(out, e)) {
+ return false;
+ }
+ } else if (builtin->Type() == sem::BuiltinType::kTextureSample) {
+ out << ", 0.0f";
+ }
}
- }
- out << ")";
+ for (auto usage : {Usage::kOffset, Usage::kComponent, Usage::kBias}) {
+ if (auto* e = arg(usage)) {
+ out << ", ";
+ if (!EmitExpression(out, e)) {
+ return false;
+ }
+ }
+ }
- if (builtin->ReturnType()->Is<sem::Void>()) {
- return true;
- }
- // If the builtin return type does not match the number of elements of the
- // GLSL builtin, we need to swizzle the expression to generate the correct
- // number of components.
- uint32_t wgsl_ret_width = 1;
- if (auto* vec = builtin->ReturnType()->As<sem::Vector>()) {
- wgsl_ret_width = vec->Width();
- }
- if (wgsl_ret_width < glsl_ret_width) {
- out << ".";
- for (uint32_t i = 0; i < wgsl_ret_width; i++) {
- out << "xyz"[i];
- }
- }
- if (wgsl_ret_width > glsl_ret_width) {
- TINT_ICE(Writer, diagnostics_)
- << "WGSL return width (" << wgsl_ret_width
- << ") is wider than GLSL return width (" << glsl_ret_width << ") for "
- << builtin->Type();
- return false;
- }
+ out << ")";
+
+ if (builtin->ReturnType()->Is<sem::Void>()) {
+ return true;
+ }
+ // If the builtin return type does not match the number of elements of the
+ // GLSL builtin, we need to swizzle the expression to generate the correct
+ // number of components.
+ uint32_t wgsl_ret_width = 1;
+ if (auto* vec = builtin->ReturnType()->As<sem::Vector>()) {
+ wgsl_ret_width = vec->Width();
+ }
+ if (wgsl_ret_width < glsl_ret_width) {
+ out << ".";
+ for (uint32_t i = 0; i < wgsl_ret_width; i++) {
+ out << "xyz"[i];
+ }
+ }
+ if (wgsl_ret_width > glsl_ret_width) {
+ TINT_ICE(Writer, diagnostics_)
+ << "WGSL return width (" << wgsl_ret_width << ") is wider than GLSL return width ("
+ << glsl_ret_width << ") for " << builtin->Type();
+ return false;
+ }
- return true;
+ return true;
}
std::string GeneratorImpl::generate_builtin_name(const sem::Builtin* builtin) {
- switch (builtin->Type()) {
- case sem::BuiltinType::kAbs:
- case sem::BuiltinType::kAcos:
- case sem::BuiltinType::kAll:
- case sem::BuiltinType::kAny:
- case sem::BuiltinType::kAsin:
- case sem::BuiltinType::kAtan:
- case sem::BuiltinType::kCeil:
- case sem::BuiltinType::kClamp:
- case sem::BuiltinType::kCos:
- case sem::BuiltinType::kCosh:
- case sem::BuiltinType::kCross:
- case sem::BuiltinType::kDeterminant:
- case sem::BuiltinType::kDistance:
- case sem::BuiltinType::kDot:
- case sem::BuiltinType::kExp:
- case sem::BuiltinType::kExp2:
- case sem::BuiltinType::kFloor:
- case sem::BuiltinType::kFrexp:
- case sem::BuiltinType::kLdexp:
- case sem::BuiltinType::kLength:
- case sem::BuiltinType::kLog:
- case sem::BuiltinType::kLog2:
- case sem::BuiltinType::kMax:
- case sem::BuiltinType::kMin:
- case sem::BuiltinType::kModf:
- case sem::BuiltinType::kNormalize:
- case sem::BuiltinType::kPow:
- case sem::BuiltinType::kReflect:
- case sem::BuiltinType::kRefract:
- case sem::BuiltinType::kRound:
- case sem::BuiltinType::kSign:
- case sem::BuiltinType::kSin:
- case sem::BuiltinType::kSinh:
- case sem::BuiltinType::kSqrt:
- case sem::BuiltinType::kStep:
- case sem::BuiltinType::kTan:
- case sem::BuiltinType::kTanh:
- case sem::BuiltinType::kTranspose:
- case sem::BuiltinType::kTrunc:
- return builtin->str();
- case sem::BuiltinType::kAtan2:
- return "atan";
- case sem::BuiltinType::kCountOneBits:
- return "bitCount";
- case sem::BuiltinType::kDpdx:
- return "dFdx";
- case sem::BuiltinType::kDpdxCoarse:
- if (version_.IsES()) {
- return "dFdx";
- }
- return "dFdxCoarse";
- case sem::BuiltinType::kDpdxFine:
- if (version_.IsES()) {
- return "dFdx";
- }
- return "dFdxFine";
- case sem::BuiltinType::kDpdy:
- return "dFdy";
- case sem::BuiltinType::kDpdyCoarse:
- if (version_.IsES()) {
- return "dFdy";
- }
- return "dFdyCoarse";
- case sem::BuiltinType::kDpdyFine:
- if (version_.IsES()) {
- return "dFdy";
- }
- return "dFdyFine";
- case sem::BuiltinType::kFaceForward:
- return "faceforward";
- case sem::BuiltinType::kFract:
- return "fract";
- case sem::BuiltinType::kFma:
- return "fma";
- case sem::BuiltinType::kFwidth:
- case sem::BuiltinType::kFwidthCoarse:
- case sem::BuiltinType::kFwidthFine:
- return "fwidth";
- case sem::BuiltinType::kInverseSqrt:
- return "inversesqrt";
- case sem::BuiltinType::kMix:
- return "mix";
- case sem::BuiltinType::kPack2x16float:
- return "packHalf2x16";
- case sem::BuiltinType::kPack2x16snorm:
- return "packSnorm2x16";
- case sem::BuiltinType::kPack2x16unorm:
- return "packUnorm2x16";
- case sem::BuiltinType::kPack4x8snorm:
- return "packSnorm4x8";
- case sem::BuiltinType::kPack4x8unorm:
- return "packUnorm4x8";
- case sem::BuiltinType::kReverseBits:
- return "bitfieldReverse";
- case sem::BuiltinType::kSmoothstep:
- case sem::BuiltinType::kSmoothStep:
- return "smoothstep";
- case sem::BuiltinType::kUnpack2x16float:
- return "unpackHalf2x16";
- case sem::BuiltinType::kUnpack2x16snorm:
- return "unpackSnorm2x16";
- case sem::BuiltinType::kUnpack2x16unorm:
- return "unpackUnorm2x16";
- case sem::BuiltinType::kUnpack4x8snorm:
- return "unpackSnorm4x8";
- case sem::BuiltinType::kUnpack4x8unorm:
- return "unpackUnorm4x8";
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Unknown builtin method: " + std::string(builtin->str()));
- }
-
- return "";
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAbs:
+ case sem::BuiltinType::kAcos:
+ case sem::BuiltinType::kAll:
+ case sem::BuiltinType::kAny:
+ case sem::BuiltinType::kAsin:
+ case sem::BuiltinType::kAtan:
+ case sem::BuiltinType::kCeil:
+ case sem::BuiltinType::kClamp:
+ case sem::BuiltinType::kCos:
+ case sem::BuiltinType::kCosh:
+ case sem::BuiltinType::kCross:
+ case sem::BuiltinType::kDeterminant:
+ case sem::BuiltinType::kDistance:
+ case sem::BuiltinType::kDot:
+ case sem::BuiltinType::kExp:
+ case sem::BuiltinType::kExp2:
+ case sem::BuiltinType::kFloor:
+ case sem::BuiltinType::kFrexp:
+ case sem::BuiltinType::kLdexp:
+ case sem::BuiltinType::kLength:
+ case sem::BuiltinType::kLog:
+ case sem::BuiltinType::kLog2:
+ case sem::BuiltinType::kMax:
+ case sem::BuiltinType::kMin:
+ case sem::BuiltinType::kModf:
+ case sem::BuiltinType::kNormalize:
+ case sem::BuiltinType::kPow:
+ case sem::BuiltinType::kReflect:
+ case sem::BuiltinType::kRefract:
+ case sem::BuiltinType::kRound:
+ case sem::BuiltinType::kSign:
+ case sem::BuiltinType::kSin:
+ case sem::BuiltinType::kSinh:
+ case sem::BuiltinType::kSqrt:
+ case sem::BuiltinType::kStep:
+ case sem::BuiltinType::kTan:
+ case sem::BuiltinType::kTanh:
+ case sem::BuiltinType::kTranspose:
+ case sem::BuiltinType::kTrunc:
+ return builtin->str();
+ case sem::BuiltinType::kAtan2:
+ return "atan";
+ case sem::BuiltinType::kCountOneBits:
+ return "bitCount";
+ case sem::BuiltinType::kDpdx:
+ return "dFdx";
+ case sem::BuiltinType::kDpdxCoarse:
+ if (version_.IsES()) {
+ return "dFdx";
+ }
+ return "dFdxCoarse";
+ case sem::BuiltinType::kDpdxFine:
+ if (version_.IsES()) {
+ return "dFdx";
+ }
+ return "dFdxFine";
+ case sem::BuiltinType::kDpdy:
+ return "dFdy";
+ case sem::BuiltinType::kDpdyCoarse:
+ if (version_.IsES()) {
+ return "dFdy";
+ }
+ return "dFdyCoarse";
+ case sem::BuiltinType::kDpdyFine:
+ if (version_.IsES()) {
+ return "dFdy";
+ }
+ return "dFdyFine";
+ case sem::BuiltinType::kFaceForward:
+ return "faceforward";
+ case sem::BuiltinType::kFract:
+ return "fract";
+ case sem::BuiltinType::kFma:
+ return "fma";
+ case sem::BuiltinType::kFwidth:
+ case sem::BuiltinType::kFwidthCoarse:
+ case sem::BuiltinType::kFwidthFine:
+ return "fwidth";
+ case sem::BuiltinType::kInverseSqrt:
+ return "inversesqrt";
+ case sem::BuiltinType::kMix:
+ return "mix";
+ case sem::BuiltinType::kPack2x16float:
+ return "packHalf2x16";
+ case sem::BuiltinType::kPack2x16snorm:
+ return "packSnorm2x16";
+ case sem::BuiltinType::kPack2x16unorm:
+ return "packUnorm2x16";
+ case sem::BuiltinType::kPack4x8snorm:
+ return "packSnorm4x8";
+ case sem::BuiltinType::kPack4x8unorm:
+ return "packUnorm4x8";
+ case sem::BuiltinType::kReverseBits:
+ return "bitfieldReverse";
+ case sem::BuiltinType::kSmoothstep:
+ case sem::BuiltinType::kSmoothStep:
+ return "smoothstep";
+ case sem::BuiltinType::kUnpack2x16float:
+ return "unpackHalf2x16";
+ case sem::BuiltinType::kUnpack2x16snorm:
+ return "unpackSnorm2x16";
+ case sem::BuiltinType::kUnpack2x16unorm:
+ return "unpackUnorm2x16";
+ case sem::BuiltinType::kUnpack4x8snorm:
+ return "unpackSnorm4x8";
+ case sem::BuiltinType::kUnpack4x8unorm:
+ return "unpackUnorm4x8";
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Unknown builtin method: " + std::string(builtin->str()));
+ }
+
+ return "";
}
bool GeneratorImpl::EmitCase(const ast::CaseStatement* stmt) {
- if (stmt->IsDefault()) {
- line() << "default: {";
- } else {
- for (auto* selector : stmt->selectors) {
- auto out = line();
- out << "case ";
- if (!EmitLiteral(out, selector)) {
- return false;
- }
- out << ":";
- if (selector == stmt->selectors.back()) {
- out << " {";
- }
+ if (stmt->IsDefault()) {
+ line() << "default: {";
+ } else {
+ for (auto* selector : stmt->selectors) {
+ auto out = line();
+ out << "case ";
+ if (!EmitLiteral(out, selector)) {
+ return false;
+ }
+ out << ":";
+ if (selector == stmt->selectors.back()) {
+ out << " {";
+ }
+ }
}
- }
- {
- ScopedIndent si(this);
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
- if (!last_is_break_or_fallthrough(stmt->body)) {
- line() << "break;";
+ {
+ ScopedIndent si(this);
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
+ if (!last_is_break_or_fallthrough(stmt->body)) {
+ line() << "break;";
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitContinue(const ast::ContinueStatement*) {
- if (!emit_continuing_()) {
- return false;
- }
- line() << "continue;";
- return true;
+ if (!emit_continuing_()) {
+ return false;
+ }
+ line() << "continue;";
+ return true;
}
bool GeneratorImpl::EmitDiscard(const ast::DiscardStatement*) {
- // TODO(dsinclair): Verify this is correct when the discard semantics are
- // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
- line() << "discard;";
- return true;
-}
-
-bool GeneratorImpl::EmitExpression(std::ostream& out,
- const ast::Expression* expr) {
- if (auto* a = expr->As<ast::IndexAccessorExpression>()) {
- return EmitIndexAccessor(out, a);
- }
- if (auto* b = expr->As<ast::BinaryExpression>()) {
- return EmitBinary(out, b);
- }
- if (auto* b = expr->As<ast::BitcastExpression>()) {
- return EmitBitcast(out, b);
- }
- if (auto* c = expr->As<ast::CallExpression>()) {
- return EmitCall(out, c);
- }
- if (auto* i = expr->As<ast::IdentifierExpression>()) {
- return EmitIdentifier(out, i);
- }
- if (auto* l = expr->As<ast::LiteralExpression>()) {
- return EmitLiteral(out, l);
- }
- if (auto* m = expr->As<ast::MemberAccessorExpression>()) {
- return EmitMemberAccessor(out, m);
- }
- if (auto* u = expr->As<ast::UnaryOpExpression>()) {
- return EmitUnaryOp(out, u);
- }
-
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown expression type: " + std::string(expr->TypeInfo().name));
- return false;
-}
-
-bool GeneratorImpl::EmitIdentifier(std::ostream& out,
- const ast::IdentifierExpression* expr) {
- out << builder_.Symbols().NameFor(expr->symbol);
- return true;
+ // TODO(dsinclair): Verify this is correct when the discard semantics are
+ // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
+ line() << "discard;";
+ return true;
}
-bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
- {
- auto out = line();
- out << "if (";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+bool GeneratorImpl::EmitExpression(std::ostream& out, const ast::Expression* expr) {
+ if (auto* sem = builder_.Sem().Get(expr)) {
+ if (auto constant = sem->ConstantValue()) {
+ return EmitConstant(out, constant);
+ }
}
- out << ") {";
- }
-
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ return Switch(
+ expr,
+ [&](const ast::IndexAccessorExpression* a) { //
+ return EmitIndexAccessor(out, a);
+ },
+ [&](const ast::BinaryExpression* b) { //
+ return EmitBinary(out, b);
+ },
+ [&](const ast::BitcastExpression* b) { //
+ return EmitBitcast(out, b);
+ },
+ [&](const ast::CallExpression* c) { //
+ return EmitCall(out, c);
+ },
+ [&](const ast::IdentifierExpression* i) { //
+ return EmitIdentifier(out, i);
+ },
+ [&](const ast::LiteralExpression* l) { //
+ return EmitLiteral(out, l);
+ },
+ [&](const ast::MemberAccessorExpression* m) { //
+ return EmitMemberAccessor(out, m);
+ },
+ [&](const ast::UnaryOpExpression* u) { //
+ return EmitUnaryOp(out, u);
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer, "unknown expression type: " +
+ std::string(expr->TypeInfo().name));
+ return false;
+ });
+}
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- line() << "} else {";
- increment_indent();
+bool GeneratorImpl::EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr) {
+ out << builder_.Symbols().NameFor(expr->symbol);
+ return true;
+}
- {
+bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
+ {
auto out = line();
out << "if (";
- if (!EmitExpression(out, e->condition)) {
- return false;
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
}
out << ") {";
- }
- } else {
- line() << "} else {";
}
- if (!EmitStatementsWithIndent(e->body->statements)) {
- return false;
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
}
- }
-
- line() << "}";
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- decrement_indent();
- line() << "}";
+ if (stmt->else_statement) {
+ line() << "} else {";
+ if (auto* block = stmt->else_statement->As<ast::BlockStatement>()) {
+ if (!EmitStatementsWithIndent(block->statements)) {
+ return false;
+ }
+ } else {
+ if (!EmitStatementsWithIndent({stmt->else_statement})) {
+ return false;
+ }
+ }
}
- }
- return true;
+ line() << "}";
+
+ return true;
}
bool GeneratorImpl::EmitFunction(const ast::Function* func) {
- auto* sem = builder_.Sem().Get(func);
+ auto* sem = builder_.Sem().Get(func);
- if (ast::HasAttribute<ast::InternalAttribute>(func->attributes)) {
- // An internal function. Do not emit.
- return true;
- }
-
- {
- auto out = line();
- auto name = builder_.Symbols().NameFor(func->symbol);
- if (!EmitType(out, sem->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
+ if (ast::HasAttribute<ast::InternalAttribute>(func->attributes)) {
+ // An internal function. Do not emit.
+ return true;
}
- out << " " << name << "(";
+ {
+ auto out = line();
+ auto name = builder_.Symbols().NameFor(func->symbol);
+ if (!EmitType(out, sem->ReturnType(), ast::StorageClass::kNone, ast::Access::kReadWrite,
+ "")) {
+ return false;
+ }
- bool first = true;
+ out << " " << name << "(";
- for (auto* v : sem->Parameters()) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
- auto const* type = v->Type();
+ for (auto* v : sem->Parameters()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (auto* ptr = type->As<sem::Pointer>()) {
- // Transform pointer parameters in to `inout` parameters.
- // The WGSL spec is highly restrictive in what can be passed in pointer
- // parameters, which allows for this transformation. See:
- // https://gpuweb.github.io/gpuweb/wgsl/#function-restriction
- out << "inout ";
- type = ptr->StoreType();
- }
+ auto const* type = v->Type();
- // Note: WGSL only allows for StorageClass::kNone on parameters, however
- // the sanitizer transforms generates load / store functions for storage
- // or uniform buffers. These functions have a buffer parameter with
- // StorageClass::kStorage or StorageClass::kUniform. This is required to
- // correctly translate the parameter to a [RW]ByteAddressBuffer for
- // storage buffers and a uint4[N] for uniform buffers.
- if (!EmitTypeAndName(
- out, type, v->StorageClass(), v->Access(),
- builder_.Symbols().NameFor(v->Declaration()->symbol))) {
- return false;
- }
+ if (auto* ptr = type->As<sem::Pointer>()) {
+ // Transform pointer parameters in to `inout` parameters.
+ // The WGSL spec is highly restrictive in what can be passed in pointer
+ // parameters, which allows for this transformation. See:
+ // https://gpuweb.github.io/gpuweb/wgsl/#function-restriction
+ out << "inout ";
+ type = ptr->StoreType();
+ }
+
+ // Note: WGSL only allows for StorageClass::kNone on parameters, however
+ // the sanitizer transforms generates load / store functions for storage
+ // or uniform buffers. These functions have a buffer parameter with
+ // StorageClass::kStorage or StorageClass::kUniform. This is required to
+ // correctly translate the parameter to a [RW]ByteAddressBuffer for
+ // storage buffers and a uint4[N] for uniform buffers.
+ if (!EmitTypeAndName(out, type, v->StorageClass(), v->Access(),
+ builder_.Symbols().NameFor(v->Declaration()->symbol))) {
+ return false;
+ }
+ }
+ out << ") {";
}
- out << ") {";
- }
- if (!EmitStatementsWithIndent(func->body->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(func->body->statements)) {
+ return false;
+ }
- line() << "}";
- line();
+ line() << "}";
+ line();
- return true;
+ return true;
}
bool GeneratorImpl::EmitGlobalVariable(const ast::Variable* global) {
- if (global->is_const) {
- return EmitProgramConstVariable(global);
- }
-
- auto* sem = builder_.Sem().Get(global);
- switch (sem->StorageClass()) {
- case ast::StorageClass::kUniform:
- return EmitUniformVariable(sem);
- case ast::StorageClass::kStorage:
- return EmitStorageVariable(sem);
- case ast::StorageClass::kUniformConstant:
- return EmitHandleVariable(sem);
- case ast::StorageClass::kPrivate:
- return EmitPrivateVariable(sem);
- case ast::StorageClass::kWorkgroup:
- return EmitWorkgroupVariable(sem);
- case ast::StorageClass::kInput:
- case ast::StorageClass::kOutput:
- return EmitIOVariable(sem);
- default:
- break;
- }
-
- TINT_ICE(Writer, diagnostics_)
- << "unhandled storage class " << sem->StorageClass();
- return false;
+ if (global->is_const) {
+ return EmitProgramConstVariable(global);
+ }
+
+ auto* sem = builder_.Sem().Get(global);
+ switch (sem->StorageClass()) {
+ case ast::StorageClass::kUniform:
+ return EmitUniformVariable(sem);
+ case ast::StorageClass::kStorage:
+ return EmitStorageVariable(sem);
+ case ast::StorageClass::kHandle:
+ return EmitHandleVariable(sem);
+ case ast::StorageClass::kPrivate:
+ return EmitPrivateVariable(sem);
+ case ast::StorageClass::kWorkgroup:
+ return EmitWorkgroupVariable(sem);
+ case ast::StorageClass::kInput:
+ case ast::StorageClass::kOutput:
+ return EmitIOVariable(sem);
+ default:
+ break;
+ }
+
+ TINT_ICE(Writer, diagnostics_) << "unhandled storage class " << sem->StorageClass();
+ return false;
}
bool GeneratorImpl::EmitUniformVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto* type = var->Type()->UnwrapRef();
- auto* str = type->As<sem::Struct>();
- if (!str) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "storage variable must be of struct type";
- return false;
- }
- ast::VariableBindingPoint bp = decl->BindingPoint();
- {
- auto out = line();
- out << "layout(binding = " << bp.binding->value;
- if (version_.IsDesktop()) {
- out << ", std140";
+ auto* decl = var->Declaration();
+ auto* type = var->Type()->UnwrapRef();
+ auto* str = type->As<sem::Struct>();
+ if (!str) {
+ TINT_ICE(Writer, builder_.Diagnostics()) << "storage variable must be of struct type";
+ return false;
+ }
+ ast::VariableBindingPoint bp = decl->BindingPoint();
+ {
+ auto out = line();
+ out << "layout(binding = " << bp.binding->value;
+ if (version_.IsDesktop()) {
+ out << ", std140";
+ }
+ out << ") uniform " << UniqueIdentifier(StructName(str)) << " {";
}
- out << ") uniform " << UniqueIdentifier(StructName(str)) << " {";
- }
- EmitStructMembers(current_buffer_, str, /* emit_offsets */ true);
- auto name = builder_.Symbols().NameFor(decl->symbol);
- line() << "} " << name << ";";
- line();
+ EmitStructMembers(current_buffer_, str, /* emit_offsets */ true);
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ line() << "} " << name << ";";
+ line();
- return true;
+ return true;
}
bool GeneratorImpl::EmitStorageVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto* type = var->Type()->UnwrapRef();
- auto* str = type->As<sem::Struct>();
- if (!str) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "storage variable must be of struct type";
- return false;
- }
- ast::VariableBindingPoint bp = decl->BindingPoint();
- line() << "layout(binding = " << bp.binding->value << ", std430) buffer "
- << UniqueIdentifier(StructName(str)) << " {";
- EmitStructMembers(current_buffer_, str, /* emit_offsets */ true);
- auto name = builder_.Symbols().NameFor(decl->symbol);
- line() << "} " << name << ";";
- return true;
+ auto* decl = var->Declaration();
+ auto* type = var->Type()->UnwrapRef();
+ auto* str = type->As<sem::Struct>();
+ if (!str) {
+ TINT_ICE(Writer, builder_.Diagnostics()) << "storage variable must be of struct type";
+ return false;
+ }
+ ast::VariableBindingPoint bp = decl->BindingPoint();
+ line() << "layout(binding = " << bp.binding->value << ", std430) buffer "
+ << UniqueIdentifier(StructName(str)) << " {";
+ EmitStructMembers(current_buffer_, str, /* emit_offsets */ true);
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ line() << "} " << name << ";";
+ return true;
}
bool GeneratorImpl::EmitHandleVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto out = line();
+ auto* decl = var->Declaration();
+ auto out = line();
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (type->Is<sem::Sampler>()) {
- // GLSL ignores Sampler variables.
- return true;
- }
- if (auto* storage = type->As<sem::StorageTexture>()) {
- out << "layout(" << convert_texel_format_to_glsl(storage->texel_format())
- << ") ";
- }
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (type->Is<sem::Sampler>()) {
+ // GLSL ignores Sampler variables.
+ return true;
+ }
+ if (auto* storage = type->As<sem::StorageTexture>()) {
+ out << "layout(" << convert_texel_format_to_glsl(storage->texel_format()) << ") ";
+ }
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
+ }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitPrivateVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto out = line();
-
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto* decl = var->Declaration();
+ auto out = line();
- out << " = ";
- if (auto* constructor = decl->constructor) {
- if (!EmitExpression(out, constructor)) {
- return false;
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
}
- } else {
- if (!EmitZeroValue(out, var->Type()->UnwrapRef())) {
- return false;
+
+ out << " = ";
+ if (auto* constructor = decl->constructor) {
+ if (!EmitExpression(out, constructor)) {
+ return false;
+ }
+ } else {
+ if (!EmitZeroValue(out, var->Type()->UnwrapRef())) {
+ return false;
+ }
}
- }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitWorkgroupVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto out = line();
+ auto* decl = var->Declaration();
+ auto out = line();
- out << "shared ";
+ out << "shared ";
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
+ }
- if (auto* constructor = decl->constructor) {
- out << " = ";
- if (!EmitExpression(out, constructor)) {
- return false;
+ if (auto* constructor = decl->constructor) {
+ out << " = ";
+ if (!EmitExpression(out, constructor)) {
+ return false;
+ }
}
- }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitIOVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
+ auto* decl = var->Declaration();
- if (auto* b = ast::GetAttribute<ast::BuiltinAttribute>(decl->attributes)) {
- // Use of gl_SampleID requires the GL_OES_sample_variables extension
- if (RequiresOESSampleVariables(b->builtin)) {
- requires_oes_sample_variables_ = true;
+ if (auto* b = ast::GetAttribute<ast::BuiltinAttribute>(decl->attributes)) {
+ // Use of gl_SampleID requires the GL_OES_sample_variables extension
+ if (RequiresOESSampleVariables(b->builtin)) {
+ requires_oes_sample_variables_ = true;
+ }
+ // Do not emit builtin (gl_) variables.
+ return true;
}
- // Do not emit builtin (gl_) variables.
- return true;
- }
- auto out = line();
- EmitAttributes(out, decl->attributes);
- EmitInterpolationQualifiers(out, decl->attributes);
+ auto out = line();
+ EmitAttributes(out, decl->attributes);
+ EmitInterpolationQualifiers(out, decl->attributes);
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
+ }
- if (auto* constructor = decl->constructor) {
- out << " = ";
- if (!EmitExpression(out, constructor)) {
- return false;
- }
- }
-
- out << ";";
- return true;
-}
-
-void GeneratorImpl::EmitInterpolationQualifiers(
- std::ostream& out,
- const ast::AttributeList& attributes) {
- for (auto* attr : attributes) {
- if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
- switch (interpolate->type) {
- case ast::InterpolationType::kPerspective:
- case ast::InterpolationType::kLinear:
- break;
- case ast::InterpolationType::kFlat:
- out << "flat ";
- break;
- }
- switch (interpolate->sampling) {
- case ast::InterpolationSampling::kCentroid:
- out << "centroid ";
- break;
- case ast::InterpolationSampling::kSample:
- case ast::InterpolationSampling::kCenter:
- case ast::InterpolationSampling::kNone:
- break;
- }
+ if (auto* constructor = decl->constructor) {
+ out << " = ";
+ if (!EmitExpression(out, constructor)) {
+ return false;
+ }
}
- }
-}
-bool GeneratorImpl::EmitAttributes(std::ostream& out,
- const ast::AttributeList& attributes) {
- if (attributes.empty()) {
+ out << ";";
return true;
- }
- bool first = true;
- for (auto* attr : attributes) {
- if (auto* location = attr->As<ast::LocationAttribute>()) {
- out << (first ? "layout(" : ", ");
- out << "location = " << std::to_string(location->value);
- first = false;
+}
+
+void GeneratorImpl::EmitInterpolationQualifiers(std::ostream& out,
+ const ast::AttributeList& attributes) {
+ for (auto* attr : attributes) {
+ if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
+ switch (interpolate->type) {
+ case ast::InterpolationType::kPerspective:
+ case ast::InterpolationType::kLinear:
+ break;
+ case ast::InterpolationType::kFlat:
+ out << "flat ";
+ break;
+ }
+ switch (interpolate->sampling) {
+ case ast::InterpolationSampling::kCentroid:
+ out << "centroid ";
+ break;
+ case ast::InterpolationSampling::kSample:
+ case ast::InterpolationSampling::kCenter:
+ case ast::InterpolationSampling::kNone:
+ break;
+ }
+ }
}
- }
- if (!first) {
- out << ") ";
- }
- return true;
}
-bool GeneratorImpl::EmitEntryPointFunction(const ast::Function* func) {
- auto* func_sem = builder_.Sem().Get(func);
+bool GeneratorImpl::EmitAttributes(std::ostream& out, const ast::AttributeList& attributes) {
+ if (attributes.empty()) {
+ return true;
+ }
+ bool first = true;
+ for (auto* attr : attributes) {
+ if (auto* location = attr->As<ast::LocationAttribute>()) {
+ out << (first ? "layout(" : ", ");
+ out << "location = " << std::to_string(location->value);
+ first = false;
+ }
+ }
+ if (!first) {
+ out << ") ";
+ }
+ return true;
+}
- if (func->PipelineStage() == ast::PipelineStage::kFragment) {
- requires_default_precision_qualifier_ = true;
- }
+bool GeneratorImpl::EmitEntryPointFunction(const ast::Function* func) {
+ auto* func_sem = builder_.Sem().Get(func);
- if (func->PipelineStage() == ast::PipelineStage::kCompute) {
- auto out = line();
- // Emit the layout(local_size) attributes.
- auto wgsize = func_sem->WorkgroupSize();
- out << "layout(";
- for (int i = 0; i < 3; i++) {
- if (i > 0) {
- out << ", ";
- }
- out << "local_size_" << (i == 0 ? "x" : i == 1 ? "y" : "z") << " = ";
-
- if (wgsize[i].overridable_const) {
- auto* global = builder_.Sem().Get<sem::GlobalVariable>(
- wgsize[i].overridable_const);
- if (!global->IsOverridable()) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "expected a pipeline-overridable constant";
- }
- out << kSpecConstantPrefix << global->ConstantId();
- } else {
- out << std::to_string(wgsize[i].value);
- }
+ if (func->PipelineStage() == ast::PipelineStage::kFragment) {
+ requires_default_precision_qualifier_ = true;
}
- out << ") in;";
- }
- // Emit original entry point signature
- {
- auto out = line();
- out << func->return_type->FriendlyName(builder_.Symbols()) << " "
- << builder_.Symbols().NameFor(func->symbol) << "(";
+ if (func->PipelineStage() == ast::PipelineStage::kCompute) {
+ auto out = line();
+ // Emit the layout(local_size) attributes.
+ auto wgsize = func_sem->WorkgroupSize();
+ out << "layout(";
+ for (int i = 0; i < 3; i++) {
+ if (i > 0) {
+ out << ", ";
+ }
+ out << "local_size_" << (i == 0 ? "x" : i == 1 ? "y" : "z") << " = ";
+
+ if (wgsize[i].overridable_const) {
+ auto* global = builder_.Sem().Get<sem::GlobalVariable>(wgsize[i].overridable_const);
+ if (!global->IsOverridable()) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "expected a pipeline-overridable constant";
+ }
+ out << kSpecConstantPrefix << global->ConstantId();
+ } else {
+ out << std::to_string(wgsize[i].value);
+ }
+ }
+ out << ") in;";
+ }
- bool first = true;
+ // Emit original entry point signature
+ {
+ auto out = line();
+ out << func->return_type->FriendlyName(builder_.Symbols()) << " "
+ << builder_.Symbols().NameFor(func->symbol) << "(";
+
+ bool first = true;
+
+ // Emit entry point parameters.
+ for (auto* var : func->params) {
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type();
+ if (!type->Is<sem::Struct>()) {
+ // ICE likely indicates that the CanonicalizeEntryPointIO transform was
+ // not run, or a builtin parameter was added after it was run.
+ TINT_ICE(Writer, diagnostics_) << "Unsupported non-struct entry point parameter";
+ }
- // Emit entry point parameters.
- for (auto* var : func->params) {
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type();
- if (!type->Is<sem::Struct>()) {
- // ICE likely indicates that the CanonicalizeEntryPointIO transform was
- // not run, or a builtin parameter was added after it was run.
- TINT_ICE(Writer, diagnostics_)
- << "Unsupported non-struct entry point parameter";
- }
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!first) {
- out << ", ";
- }
- first = false;
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ }
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
+ out << ") {";
}
- out << ") {";
- }
+ // Emit original entry point function body
+ {
+ ScopedIndent si(this);
+ if (func->PipelineStage() == ast::PipelineStage::kVertex) {
+ line() << "gl_PointSize = 1.0;";
+ }
- // Emit original entry point function body
- {
- ScopedIndent si(this);
+ if (!EmitStatements(func->body->statements)) {
+ return false;
+ }
- if (!EmitStatements(func->body->statements)) {
- return false;
+ if (!Is<ast::ReturnStatement>(func->body->Last())) {
+ ast::ReturnStatement ret(ProgramID(), Source{});
+ if (!EmitStatement(&ret)) {
+ return false;
+ }
+ }
}
- if (!Is<ast::ReturnStatement>(func->body->Last())) {
- ast::ReturnStatement ret(ProgramID(), Source{});
- if (!EmitStatement(&ret)) {
- return false;
- }
- }
- }
+ line() << "}";
+
+ return true;
+}
+
+bool GeneratorImpl::EmitConstant(std::ostream& out, const sem::Constant& constant) {
+ auto emit_bool = [&](size_t element_idx) {
+ out << (constant.Element<AInt>(element_idx) ? "true" : "false");
+ return true;
+ };
+ auto emit_f32 = [&](size_t element_idx) {
+ PrintF32(out, static_cast<float>(constant.Element<AFloat>(element_idx)));
+ return true;
+ };
+ auto emit_i32 = [&](size_t element_idx) {
+ out << constant.Element<AInt>(element_idx).value;
+ return true;
+ };
+ auto emit_u32 = [&](size_t element_idx) {
+ out << constant.Element<AInt>(element_idx).value << "u";
+ return true;
+ };
+ auto emit_vector = [&](const sem::Vector* vec_ty, size_t start, size_t end) {
+ if (!EmitType(out, vec_ty, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return false;
+ }
+
+ ScopedParen sp(out);
+
+ auto emit_els = [&](auto emit_el) {
+ if (constant.AllEqual(start, end)) {
+ return emit_el(start);
+ }
+ for (size_t i = start; i < end; i++) {
+ if (i > start) {
+ out << ", ";
+ }
+ if (!emit_el(i)) {
+ return false;
+ }
+ }
+ return true;
+ };
+
+ return Switch(
+ vec_ty->type(), //
+ [&](const sem::Bool*) { return emit_els(emit_bool); }, //
+ [&](const sem::F32*) { return emit_els(emit_f32); }, //
+ [&](const sem::I32*) { return emit_els(emit_i32); }, //
+ [&](const sem::U32*) { return emit_els(emit_u32); }, //
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unhandled constant vector element type: " +
+ builder_.FriendlyName(vec_ty->type()));
+ return false;
+ });
+ };
+ auto emit_matrix = [&](const sem::Matrix* m) {
+ if (!EmitType(out, constant.Type(), ast::StorageClass::kNone, ast::Access::kUndefined,
+ "")) {
+ return false;
+ }
- line() << "}";
+ ScopedParen sp(out);
- return true;
+ for (size_t column_idx = 0; column_idx < m->columns(); column_idx++) {
+ if (column_idx > 0) {
+ out << ", ";
+ }
+ size_t start = m->rows() * column_idx;
+ size_t end = m->rows() * (column_idx + 1);
+ if (!emit_vector(m->ColumnType(), start, end)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ return Switch(
+ constant.Type(), //
+ [&](const sem::Bool*) { return emit_bool(0); }, //
+ [&](const sem::F32*) { return emit_f32(0); }, //
+ [&](const sem::I32*) { return emit_i32(0); }, //
+ [&](const sem::U32*) { return emit_u32(0); }, //
+ [&](const sem::Vector* v) { return emit_vector(v, 0, constant.ElementCount()); }, //
+ [&](const sem::Matrix* m) { return emit_matrix(m); }, //
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "unhandled constant type: " + builder_.FriendlyName(constant.Type()));
+ return false;
+ });
}
-bool GeneratorImpl::EmitLiteral(std::ostream& out,
- const ast::LiteralExpression* lit) {
- if (auto* l = lit->As<ast::BoolLiteralExpression>()) {
- out << (l->value ? "true" : "false");
- } else if (auto* fl = lit->As<ast::FloatLiteralExpression>()) {
- if (std::isinf(fl->value)) {
- out << (fl->value >= 0 ? "uintBitsToFloat(0x7f800000u)"
- : "uintBitsToFloat(0xff800000u)");
- } else if (std::isnan(fl->value)) {
- out << "uintBitsToFloat(0x7fc00000u)";
- } else {
- out << FloatToString(fl->value) << "f";
- }
- } else if (auto* sl = lit->As<ast::SintLiteralExpression>()) {
- out << sl->value;
- } else if (auto* ul = lit->As<ast::UintLiteralExpression>()) {
- out << ul->value << "u";
- } else {
- diagnostics_.add_error(diag::System::Writer, "unknown literal type");
- return false;
- }
- return true;
+bool GeneratorImpl::EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit) {
+ return Switch(
+ lit,
+ [&](const ast::BoolLiteralExpression* l) {
+ out << (l->value ? "true" : "false");
+ return true;
+ },
+ [&](const ast::FloatLiteralExpression* l) {
+ PrintF32(out, static_cast<float>(l->value));
+ return true;
+ },
+ [&](const ast::IntLiteralExpression* l) {
+ out << l->value;
+ if (l->suffix == ast::IntLiteralExpression::Suffix::kU) {
+ out << "u";
+ }
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "unknown literal type");
+ return false;
+ });
}
bool GeneratorImpl::EmitZeroValue(std::ostream& out, const sem::Type* type) {
- if (type->Is<sem::Bool>()) {
- out << "false";
- } else if (type->Is<sem::F32>()) {
- out << "0.0f";
- } else if (type->Is<sem::I32>()) {
- out << "0";
- } else if (type->Is<sem::U32>()) {
- out << "0u";
- } else if (auto* vec = type->As<sem::Vector>()) {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite,
- "")) {
- return false;
- }
- ScopedParen sp(out);
- for (uint32_t i = 0; i < vec->Width(); i++) {
- if (i != 0) {
- out << ", ";
- }
- if (!EmitZeroValue(out, vec->type())) {
- return false;
- }
- }
- } else if (auto* mat = type->As<sem::Matrix>()) {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite,
- "")) {
- return false;
- }
- ScopedParen sp(out);
- for (uint32_t i = 0; i < (mat->rows() * mat->columns()); i++) {
- if (i != 0) {
- out << ", ";
- }
- if (!EmitZeroValue(out, mat->type())) {
+ if (type->Is<sem::Bool>()) {
+ out << "false";
+ } else if (type->Is<sem::F32>()) {
+ out << "0.0f";
+ } else if (type->Is<sem::I32>()) {
+ out << "0";
+ } else if (type->Is<sem::U32>()) {
+ out << "0u";
+ } else if (auto* vec = type->As<sem::Vector>()) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ ScopedParen sp(out);
+ for (uint32_t i = 0; i < vec->Width(); i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ if (!EmitZeroValue(out, vec->type())) {
+ return false;
+ }
+ }
+ } else if (auto* mat = type->As<sem::Matrix>()) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ ScopedParen sp(out);
+ for (uint32_t i = 0; i < (mat->rows() * mat->columns()); i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ if (!EmitZeroValue(out, mat->type())) {
+ return false;
+ }
+ }
+ } else if (auto* str = type->As<sem::Struct>()) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return false;
+ }
+ bool first = true;
+ out << "(";
+ for (auto* member : str->Members()) {
+ if (!first) {
+ out << ", ";
+ } else {
+ first = false;
+ }
+ EmitZeroValue(out, member->Type());
+ }
+ out << ")";
+ } else if (auto* array = type->As<sem::Array>()) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return false;
+ }
+ out << "(";
+ for (uint32_t i = 0; i < array->Count(); i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ EmitZeroValue(out, array->ElemType());
+ }
+ out << ")";
+ } else {
+ diagnostics_.add_error(diag::System::Writer, "Invalid type for zero emission: " +
+ type->FriendlyName(builder_.Symbols()));
return false;
- }
- }
- } else if (auto* str = type->As<sem::Struct>()) {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined,
- "")) {
- return false;
- }
- bool first = true;
- out << "(";
- for (auto* member : str->Members()) {
- if (!first) {
- out << ", ";
- } else {
- first = false;
- }
- EmitZeroValue(out, member->Type());
}
- out << ")";
- } else if (auto* array = type->As<sem::Array>()) {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined,
- "")) {
- return false;
- }
- out << "(";
- for (uint32_t i = 0; i < array->Count(); i++) {
- if (i != 0) {
- out << ", ";
- }
- EmitZeroValue(out, array->ElemType());
- }
- out << ")";
- } else {
- diagnostics_.add_error(diag::System::Writer,
- "Invalid type for zero emission: " +
- type->FriendlyName(builder_.Symbols()));
- return false;
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitLoop(const ast::LoopStatement* stmt) {
- auto emit_continuing = [this, stmt]() {
- if (stmt->continuing && !stmt->continuing->Empty()) {
- if (!EmitBlock(stmt->continuing)) {
- return false;
- }
- }
- return true;
- };
+ auto emit_continuing = [this, stmt]() {
+ if (stmt->continuing && !stmt->continuing->Empty()) {
+ if (!EmitBlock(stmt->continuing)) {
+ return false;
+ }
+ }
+ return true;
+ };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << "while (true) {";
- {
- ScopedIndent si(this);
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
- if (!emit_continuing_()) {
- return false;
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << "while (true) {";
+ {
+ ScopedIndent si(this);
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
+ if (!emit_continuing_()) {
+ return false;
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitForLoop(const ast::ForLoopStatement* stmt) {
- // Nest a for loop with a new block. In HLSL the initializer scope is not
- // nested by the for-loop, so we may get variable redefinitions.
- line() << "{";
- increment_indent();
- TINT_DEFER({
- decrement_indent();
- line() << "}";
- });
-
- TextBuffer init_buf;
- if (auto* init = stmt->initializer) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
- if (!EmitStatement(init)) {
- return false;
- }
- }
-
- TextBuffer cond_pre;
- std::stringstream cond_buf;
- if (auto* cond = stmt->condition) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
- if (!EmitExpression(cond_buf, cond)) {
- return false;
- }
- }
-
- TextBuffer cont_buf;
- if (auto* cont = stmt->continuing) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
- if (!EmitStatement(cont)) {
- return false;
- }
- }
-
- // If the for-loop has a multi-statement conditional and / or continuing, then
- // we cannot emit this as a regular for-loop in HLSL. Instead we need to
- // generate a `while(true)` loop.
- bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
-
- // If the for-loop has multi-statement initializer, or is going to be emitted
- // as a `while(true)` loop, then declare the initializer statement(s) before
- // the loop.
- if (init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop)) {
- current_buffer_->Append(init_buf);
- init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
- }
-
- if (emit_as_loop) {
- auto emit_continuing = [&]() {
- current_buffer_->Append(cont_buf);
- return true;
- };
-
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << "while (true) {";
+ // Nest a for loop with a new block. In HLSL the initializer scope is not
+ // nested by the for-loop, so we may get variable redefinitions.
+ line() << "{";
increment_indent();
TINT_DEFER({
- decrement_indent();
- line() << "}";
+ decrement_indent();
+ line() << "}";
});
- if (stmt->condition) {
- current_buffer_->Append(cond_pre);
- line() << "if (!(" << cond_buf.str() << ")) { break; }";
+ TextBuffer init_buf;
+ if (auto* init = stmt->initializer) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
+ if (!EmitStatement(init)) {
+ return false;
+ }
}
- if (!EmitStatements(stmt->body->statements)) {
- return false;
+ TextBuffer cond_pre;
+ std::stringstream cond_buf;
+ if (auto* cond = stmt->condition) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
+ if (!EmitExpression(cond_buf, cond)) {
+ return false;
+ }
}
- if (!emit_continuing_()) {
- return false;
+ TextBuffer cont_buf;
+ if (auto* cont = stmt->continuing) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
+ if (!EmitStatement(cont)) {
+ return false;
+ }
}
- } else {
- // For-loop can be generated.
- {
- auto out = line();
- out << "for";
- {
- ScopedParen sp(out);
- if (!init_buf.lines.empty()) {
- out << init_buf.lines[0].content << " ";
- } else {
- out << "; ";
+ // If the for-loop has a multi-statement conditional and / or continuing, then
+ // we cannot emit this as a regular for-loop in HLSL. Instead we need to
+ // generate a `while(true)` loop.
+ bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
+
+ // If the for-loop has multi-statement initializer, or is going to be emitted
+ // as a `while(true)` loop, then declare the initializer statement(s) before
+ // the loop.
+ if (init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop)) {
+ current_buffer_->Append(init_buf);
+ init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
+ }
+
+ if (emit_as_loop) {
+ auto emit_continuing = [&]() {
+ current_buffer_->Append(cont_buf);
+ return true;
+ };
+
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << "while (true) {";
+ increment_indent();
+ TINT_DEFER({
+ decrement_indent();
+ line() << "}";
+ });
+
+ if (stmt->condition) {
+ current_buffer_->Append(cond_pre);
+ line() << "if (!(" << cond_buf.str() << ")) { break; }";
}
- out << cond_buf.str() << "; ";
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
- if (!cont_buf.lines.empty()) {
- out << TrimSuffix(cont_buf.lines[0].content, ";");
+ if (!emit_continuing_()) {
+ return false;
}
- }
- out << " {";
- }
- {
- auto emit_continuing = [] { return true; };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ } else {
+ // For-loop can be generated.
+ {
+ auto out = line();
+ out << "for";
+ {
+ ScopedParen sp(out);
+
+ if (!init_buf.lines.empty()) {
+ out << init_buf.lines[0].content << " ";
+ } else {
+ out << "; ";
+ }
+
+ out << cond_buf.str() << "; ";
+
+ if (!cont_buf.lines.empty()) {
+ out << TrimSuffix(cont_buf.lines[0].content, ";");
+ }
+ }
+ out << " {";
+ }
+ {
+ auto emit_continuing = [] { return true; };
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
+ }
+ }
+ line() << "}";
}
- line() << "}";
- }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitMemberAccessor(
- std::ostream& out,
- const ast::MemberAccessorExpression* expr) {
- if (!EmitExpression(out, expr->structure)) {
- return false;
- }
- out << ".";
+bool GeneratorImpl::EmitMemberAccessor(std::ostream& out,
+ const ast::MemberAccessorExpression* expr) {
+ if (!EmitExpression(out, expr->structure)) {
+ return false;
+ }
+ out << ".";
- // Swizzles output the name directly
- if (builder_.Sem().Get(expr)->Is<sem::Swizzle>()) {
- out << builder_.Symbols().NameFor(expr->member->symbol);
- } else if (!EmitExpression(out, expr->member)) {
- return false;
- }
+ // Swizzles output the name directly
+ if (builder_.Sem().Get(expr)->Is<sem::Swizzle>()) {
+ out << builder_.Symbols().NameFor(expr->member->symbol);
+ } else if (!EmitExpression(out, expr->member)) {
+ return false;
+ }
- return true;
+ return true;
}
bool GeneratorImpl::EmitReturn(const ast::ReturnStatement* stmt) {
- if (stmt->value) {
- auto out = line();
- out << "return ";
- if (!EmitExpression(out, stmt->value)) {
- return false;
+ if (stmt->value) {
+ auto out = line();
+ out << "return ";
+ if (!EmitExpression(out, stmt->value)) {
+ return false;
+ }
+ out << ";";
+ } else {
+ line() << "return;";
}
- out << ";";
- } else {
- line() << "return;";
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatement(const ast::Statement* stmt) {
- if (auto* a = stmt->As<ast::AssignmentStatement>()) {
- return EmitAssign(a);
- }
- if (auto* b = stmt->As<ast::BlockStatement>()) {
- return EmitBlock(b);
- }
- if (auto* b = stmt->As<ast::BreakStatement>()) {
- return EmitBreak(b);
- }
- if (auto* c = stmt->As<ast::CallStatement>()) {
- auto out = line();
- if (!EmitCall(out, c->expr)) {
- return false;
+ if (auto* a = stmt->As<ast::AssignmentStatement>()) {
+ return EmitAssign(a);
}
- out << ";";
- return true;
- }
- if (auto* c = stmt->As<ast::ContinueStatement>()) {
- return EmitContinue(c);
- }
- if (auto* d = stmt->As<ast::DiscardStatement>()) {
- return EmitDiscard(d);
- }
- if (stmt->As<ast::FallthroughStatement>()) {
- line() << "/* fallthrough */";
- return true;
- }
- if (auto* i = stmt->As<ast::IfStatement>()) {
- return EmitIf(i);
- }
- if (auto* l = stmt->As<ast::LoopStatement>()) {
- return EmitLoop(l);
- }
- if (auto* l = stmt->As<ast::ForLoopStatement>()) {
- return EmitForLoop(l);
- }
- if (auto* r = stmt->As<ast::ReturnStatement>()) {
- return EmitReturn(r);
- }
- if (auto* s = stmt->As<ast::SwitchStatement>()) {
- return EmitSwitch(s);
- }
- if (auto* v = stmt->As<ast::VariableDeclStatement>()) {
- return EmitVariable(v->variable);
- }
-
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown statement type: " + std::string(stmt->TypeInfo().name));
- return false;
+ if (auto* b = stmt->As<ast::BlockStatement>()) {
+ return EmitBlock(b);
+ }
+ if (auto* b = stmt->As<ast::BreakStatement>()) {
+ return EmitBreak(b);
+ }
+ if (auto* c = stmt->As<ast::CallStatement>()) {
+ auto out = line();
+ if (!EmitCall(out, c->expr)) {
+ return false;
+ }
+ out << ";";
+ return true;
+ }
+ if (auto* c = stmt->As<ast::ContinueStatement>()) {
+ return EmitContinue(c);
+ }
+ if (auto* d = stmt->As<ast::DiscardStatement>()) {
+ return EmitDiscard(d);
+ }
+ if (stmt->As<ast::FallthroughStatement>()) {
+ line() << "/* fallthrough */";
+ return true;
+ }
+ if (auto* i = stmt->As<ast::IfStatement>()) {
+ return EmitIf(i);
+ }
+ if (auto* l = stmt->As<ast::LoopStatement>()) {
+ return EmitLoop(l);
+ }
+ if (auto* l = stmt->As<ast::ForLoopStatement>()) {
+ return EmitForLoop(l);
+ }
+ if (auto* r = stmt->As<ast::ReturnStatement>()) {
+ return EmitReturn(r);
+ }
+ if (auto* s = stmt->As<ast::SwitchStatement>()) {
+ return EmitSwitch(s);
+ }
+ if (auto* v = stmt->As<ast::VariableDeclStatement>()) {
+ return EmitVariable(v->variable);
+ }
+
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown statement type: " + std::string(stmt->TypeInfo().name));
+ return false;
}
bool GeneratorImpl::EmitSwitch(const ast::SwitchStatement* stmt) {
- { // switch(expr) {
- auto out = line();
- out << "switch(";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ { // switch(expr) {
+ auto out = line();
+ out << "switch(";
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ") {";
}
- out << ") {";
- }
- {
- ScopedIndent si(this);
- for (auto* s : stmt->body) {
- if (!EmitCase(s)) {
- return false;
- }
+ {
+ ScopedIndent si(this);
+ for (auto* s : stmt->body) {
+ if (!EmitCase(s)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitType(std::ostream& out,
@@ -2574,161 +2637,164 @@ bool GeneratorImpl::EmitType(std::ostream& out,
ast::Access access,
const std::string& name,
bool* name_printed /* = nullptr */) {
- if (name_printed) {
- *name_printed = false;
- }
- switch (storage_class) {
- case ast::StorageClass::kInput: {
- out << "in ";
- break;
- }
- case ast::StorageClass::kOutput: {
- out << "out ";
- break;
- }
- case ast::StorageClass::kUniform:
- case ast::StorageClass::kUniformConstant: {
- out << "uniform ";
- break;
- }
- default:
- break;
- }
-
- if (auto* ary = type->As<sem::Array>()) {
- const sem::Type* base_type = ary;
- std::vector<uint32_t> sizes;
- while (auto* arr = base_type->As<sem::Array>()) {
- sizes.push_back(arr->Count());
- base_type = arr->ElemType();
- }
- if (!EmitType(out, base_type, storage_class, access, "")) {
- return false;
- }
- if (!name.empty()) {
- out << " " << name;
- if (name_printed) {
- *name_printed = true;
- }
+ if (name_printed) {
+ *name_printed = false;
}
- for (uint32_t size : sizes) {
- if (size > 0) {
- out << "[" << size << "]";
- } else {
- out << "[]";
- }
+ switch (storage_class) {
+ case ast::StorageClass::kInput: {
+ out << "in ";
+ break;
+ }
+ case ast::StorageClass::kOutput: {
+ out << "out ";
+ break;
+ }
+ case ast::StorageClass::kUniform:
+ case ast::StorageClass::kHandle: {
+ out << "uniform ";
+ break;
+ }
+ default:
+ break;
}
- } else if (type->Is<sem::Bool>()) {
- out << "bool";
- } else if (type->Is<sem::F32>()) {
- out << "float";
- } else if (type->Is<sem::I32>()) {
- out << "int";
- } else if (auto* mat = type->As<sem::Matrix>()) {
- TINT_ASSERT(Writer, mat->type()->Is<sem::F32>());
- out << "mat" << mat->columns();
- if (mat->rows() != mat->columns()) {
- out << "x" << mat->rows();
- }
- } else if (type->Is<sem::Pointer>()) {
- TINT_ICE(Writer, diagnostics_)
- << "Attempting to emit pointer type. These should have been removed "
- "with the InlinePointerLets transform";
- return false;
- } else if (type->Is<sem::Sampler>()) {
- return false;
- } else if (auto* str = type->As<sem::Struct>()) {
- out << StructName(str);
- } else if (auto* tex = type->As<sem::Texture>()) {
- if (tex->Is<sem::ExternalTexture>()) {
- TINT_ICE(Writer, diagnostics_)
- << "Multiplanar external texture transform was not run.";
- return false;
- }
-
- auto* storage = tex->As<sem::StorageTexture>();
- auto* ms = tex->As<sem::MultisampledTexture>();
- auto* depth_ms = tex->As<sem::DepthMultisampledTexture>();
- auto* sampled = tex->As<sem::SampledTexture>();
-
- out << "highp ";
-
- if (storage && storage->access() != ast::Access::kRead) {
- out << "writeonly ";
- }
- auto* subtype = sampled
- ? sampled->type()
- : storage ? storage->type() : ms ? ms->type() : nullptr;
- if (!subtype || subtype->Is<sem::F32>()) {
- } else if (subtype->Is<sem::I32>()) {
- out << "i";
- } else if (subtype->Is<sem::U32>()) {
- out << "u";
- } else {
- TINT_ICE(Writer, diagnostics_) << "Unsupported texture type";
- return false;
- }
-
- out << (storage ? "image" : "sampler");
-
- switch (tex->dim()) {
- case ast::TextureDimension::k1d:
- out << "1D";
- break;
- case ast::TextureDimension::k2d:
- out << ((ms || depth_ms) ? "2DMS" : "2D");
- break;
- case ast::TextureDimension::k2dArray:
- out << ((ms || depth_ms) ? "2DMSArray" : "2DArray");
- break;
- case ast::TextureDimension::k3d:
- out << "3D";
- break;
- case ast::TextureDimension::kCube:
- out << "Cube";
- break;
- case ast::TextureDimension::kCubeArray:
- out << "CubeArray";
- break;
- default:
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unexpected TextureDimension " << tex->dim();
- return false;
- }
- if (tex->Is<sem::DepthTexture>()) {
- out << "Shadow";
- }
- } else if (type->Is<sem::U32>()) {
- out << "uint";
- } else if (auto* vec = type->As<sem::Vector>()) {
- auto width = vec->Width();
- if (vec->type()->Is<sem::F32>() && width >= 1 && width <= 4) {
- out << "vec" << width;
- } else if (vec->type()->Is<sem::I32>() && width >= 1 && width <= 4) {
- out << "ivec" << width;
- } else if (vec->type()->Is<sem::U32>() && width >= 1 && width <= 4) {
- out << "uvec" << width;
- } else if (vec->type()->Is<sem::Bool>() && width >= 1 && width <= 4) {
- out << "bvec" << width;
+
+ if (auto* ary = type->As<sem::Array>()) {
+ const sem::Type* base_type = ary;
+ std::vector<uint32_t> sizes;
+ while (auto* arr = base_type->As<sem::Array>()) {
+ sizes.push_back(arr->Count());
+ base_type = arr->ElemType();
+ }
+ if (!EmitType(out, base_type, storage_class, access, "")) {
+ return false;
+ }
+ if (!name.empty()) {
+ out << " " << name;
+ if (name_printed) {
+ *name_printed = true;
+ }
+ }
+ for (uint32_t size : sizes) {
+ if (size > 0) {
+ out << "[" << size << "]";
+ } else {
+ out << "[]";
+ }
+ }
+ } else if (type->Is<sem::Bool>()) {
+ out << "bool";
+ } else if (type->Is<sem::F32>()) {
+ out << "float";
+ } else if (type->Is<sem::F16>()) {
+ diagnostics_.add_error(diag::System::Writer, "Type f16 is not completely implemented yet.");
+ return false;
+ } else if (type->Is<sem::I32>()) {
+ out << "int";
+ } else if (auto* mat = type->As<sem::Matrix>()) {
+ TINT_ASSERT(Writer, mat->type()->Is<sem::F32>());
+ out << "mat" << mat->columns();
+ if (mat->rows() != mat->columns()) {
+ out << "x" << mat->rows();
+ }
+ } else if (type->Is<sem::Pointer>()) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Attempting to emit pointer type. These should have been removed "
+ "with the InlinePointerLets transform";
+ return false;
+ } else if (type->Is<sem::Sampler>()) {
+ return false;
+ } else if (auto* str = type->As<sem::Struct>()) {
+ out << StructName(str);
+ } else if (auto* tex = type->As<sem::Texture>()) {
+ if (tex->Is<sem::ExternalTexture>()) {
+ TINT_ICE(Writer, diagnostics_) << "Multiplanar external texture transform was not run.";
+ return false;
+ }
+
+ auto* storage = tex->As<sem::StorageTexture>();
+ auto* ms = tex->As<sem::MultisampledTexture>();
+ auto* depth_ms = tex->As<sem::DepthMultisampledTexture>();
+ auto* sampled = tex->As<sem::SampledTexture>();
+
+ out << "highp ";
+
+ if (storage && storage->access() != ast::Access::kRead) {
+ out << "writeonly ";
+ }
+ auto* subtype = sampled ? sampled->type()
+ : storage ? storage->type()
+ : ms ? ms->type()
+ : nullptr;
+ if (!subtype || subtype->Is<sem::F32>()) {
+ } else if (subtype->Is<sem::I32>()) {
+ out << "i";
+ } else if (subtype->Is<sem::U32>()) {
+ out << "u";
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "Unsupported texture type";
+ return false;
+ }
+
+ out << (storage ? "image" : "sampler");
+
+ switch (tex->dim()) {
+ case ast::TextureDimension::k1d:
+ out << "1D";
+ break;
+ case ast::TextureDimension::k2d:
+ out << ((ms || depth_ms) ? "2DMS" : "2D");
+ break;
+ case ast::TextureDimension::k2dArray:
+ out << ((ms || depth_ms) ? "2DMSArray" : "2DArray");
+ break;
+ case ast::TextureDimension::k3d:
+ out << "3D";
+ break;
+ case ast::TextureDimension::kCube:
+ out << "Cube";
+ break;
+ case ast::TextureDimension::kCubeArray:
+ out << "CubeArray";
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unexpected TextureDimension " << tex->dim();
+ return false;
+ }
+ if (tex->Is<sem::DepthTexture>()) {
+ out << "Shadow";
+ }
+ } else if (type->Is<sem::U32>()) {
+ out << "uint";
+ } else if (auto* vec = type->As<sem::Vector>()) {
+ auto width = vec->Width();
+ if (vec->type()->Is<sem::F32>() && width >= 1 && width <= 4) {
+ out << "vec" << width;
+ } else if (vec->type()->Is<sem::I32>() && width >= 1 && width <= 4) {
+ out << "ivec" << width;
+ } else if (vec->type()->Is<sem::U32>() && width >= 1 && width <= 4) {
+ out << "uvec" << width;
+ } else if (vec->type()->Is<sem::Bool>() && width >= 1 && width <= 4) {
+ out << "bvec" << width;
+ } else {
+ out << "vector<";
+ if (!EmitType(out, vec->type(), storage_class, access, "")) {
+ return false;
+ }
+ out << ", " << width << ">";
+ }
+ } else if (auto* atomic = type->As<sem::Atomic>()) {
+ if (!EmitType(out, atomic->Type(), storage_class, access, name)) {
+ return false;
+ }
+ } else if (type->Is<sem::Void>()) {
+ out << "void";
} else {
- out << "vector<";
- if (!EmitType(out, vec->type(), storage_class, access, "")) {
+ diagnostics_.add_error(diag::System::Writer, "unknown type in EmitType");
return false;
- }
- out << ", " << width << ">";
- }
- } else if (auto* atomic = type->As<sem::Atomic>()) {
- if (!EmitType(out, atomic->Type(), storage_class, access, name)) {
- return false;
}
- } else if (type->Is<sem::Void>()) {
- out << "void";
- } else {
- diagnostics_.add_error(diag::System::Writer, "unknown type in EmitType");
- return false;
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitTypeAndName(std::ostream& out,
@@ -2736,171 +2802,173 @@ bool GeneratorImpl::EmitTypeAndName(std::ostream& out,
ast::StorageClass storage_class,
ast::Access access,
const std::string& name) {
- bool printed_name = false;
- if (!EmitType(out, type, storage_class, access, name, &printed_name)) {
- return false;
- }
- if (!name.empty() && !printed_name) {
- out << " " << name;
- }
- return true;
+ bool printed_name = false;
+ if (!EmitType(out, type, storage_class, access, name, &printed_name)) {
+ return false;
+ }
+ if (!name.empty() && !printed_name) {
+ out << " " << name;
+ }
+ return true;
}
bool GeneratorImpl::EmitStructType(TextBuffer* b, const sem::Struct* str) {
- auto storage_class_uses = str->StorageClassUsage();
- line(b) << "struct " << StructName(str) << " {";
- EmitStructMembers(b, str, false);
- line(b) << "};";
- line(b);
+ auto storage_class_uses = str->StorageClassUsage();
+ line(b) << "struct " << StructName(str) << " {";
+ EmitStructMembers(b, str, false);
+ line(b) << "};";
+ line(b);
+
+ return true;
+}
- return true;
+bool GeneratorImpl::EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* str) {
+ auto it = emitted_structs_.emplace(str);
+ if (!it.second) {
+ return true;
+ }
+ return EmitStructType(buffer, str);
}
-bool GeneratorImpl::EmitStructMembers(TextBuffer* b,
- const sem::Struct* str,
- bool emit_offsets) {
- ScopedIndent si(b);
- for (auto* mem : str->Members()) {
- auto name = builder_.Symbols().NameFor(mem->Name());
+bool GeneratorImpl::EmitStructMembers(TextBuffer* b, const sem::Struct* str, bool emit_offsets) {
+ ScopedIndent si(b);
+ for (auto* mem : str->Members()) {
+ auto name = builder_.Symbols().NameFor(mem->Name());
- auto* ty = mem->Type();
+ auto* ty = mem->Type();
- auto out = line(b);
+ auto out = line(b);
+
+ // Note: offsets are unsupported on GLSL ES.
+ if (emit_offsets && version_.IsDesktop() && mem->Offset() != 0) {
+ out << "layout(offset=" << mem->Offset() << ") ";
+ }
+ if (!EmitTypeAndName(out, ty, ast::StorageClass::kNone, ast::Access::kReadWrite, name)) {
+ return false;
+ }
+ out << ";";
+ }
+ return true;
+}
- // Note: offsets are unsupported on GLSL ES.
- if (emit_offsets && version_.IsDesktop() && mem->Offset() != 0) {
- out << "layout(offset=" << mem->Offset() << ") ";
+bool GeneratorImpl::EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr) {
+ switch (expr->op) {
+ case ast::UnaryOp::kIndirection:
+ case ast::UnaryOp::kAddressOf:
+ return EmitExpression(out, expr->expr);
+ case ast::UnaryOp::kComplement:
+ out << "~";
+ break;
+ case ast::UnaryOp::kNot:
+ if (TypeOf(expr)->UnwrapRef()->is_scalar()) {
+ out << "!";
+ } else {
+ out << "not";
+ }
+ break;
+ case ast::UnaryOp::kNegation:
+ out << "-";
+ break;
}
- if (!EmitTypeAndName(out, ty, ast::StorageClass::kNone,
- ast::Access::kReadWrite, name)) {
- return false;
+ out << "(";
+
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
}
- out << ";";
- }
- return true;
-}
-
-bool GeneratorImpl::EmitUnaryOp(std::ostream& out,
- const ast::UnaryOpExpression* expr) {
- switch (expr->op) {
- case ast::UnaryOp::kIndirection:
- case ast::UnaryOp::kAddressOf:
- return EmitExpression(out, expr->expr);
- case ast::UnaryOp::kComplement:
- out << "~";
- break;
- case ast::UnaryOp::kNot:
- if (TypeOf(expr)->UnwrapRef()->is_scalar()) {
- out << "!";
- } else {
- out << "not";
- }
- break;
- case ast::UnaryOp::kNegation:
- out << "-";
- break;
- }
- out << "(";
-
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
- out << ")";
+ out << ")";
- return true;
+ return true;
}
bool GeneratorImpl::EmitVariable(const ast::Variable* var) {
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type()->UnwrapRef();
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type()->UnwrapRef();
- // TODO(dsinclair): Handle variable attributes
- if (!var->attributes.empty()) {
- diagnostics_.add_error(diag::System::Writer,
- "Variable attributes are not handled yet");
- return false;
- }
+ // TODO(dsinclair): Handle variable attributes
+ if (!var->attributes.empty()) {
+ diagnostics_.add_error(diag::System::Writer, "Variable attributes are not handled yet");
+ return false;
+ }
- auto out = line();
- // TODO(senorblanco): handle const
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
+ auto out = line();
+ // TODO(senorblanco): handle const
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
- out << " = ";
+ out << " = ";
- if (var->constructor) {
- if (!EmitExpression(out, var->constructor)) {
- return false;
- }
- } else {
- if (!EmitZeroValue(out, type)) {
- return false;
+ if (var->constructor) {
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ } else {
+ if (!EmitZeroValue(out, type)) {
+ return false;
+ }
}
- }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitProgramConstVariable(const ast::Variable* var) {
- for (auto* d : var->attributes) {
- if (!d->Is<ast::IdAttribute>()) {
- diagnostics_.add_error(diag::System::Writer,
- "Decorated const values not valid");
- return false;
- }
- }
- if (!var->is_const) {
- diagnostics_.add_error(diag::System::Writer, "Expected a const value");
- return false;
- }
+ for (auto* d : var->attributes) {
+ if (!d->Is<ast::IdAttribute>()) {
+ diagnostics_.add_error(diag::System::Writer, "Decorated const values not valid");
+ return false;
+ }
+ }
+ if (!var->is_const) {
+ diagnostics_.add_error(diag::System::Writer, "Expected a const value");
+ return false;
+ }
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type();
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type();
- auto* global = sem->As<sem::GlobalVariable>();
- if (global && global->IsOverridable()) {
- auto const_id = global->ConstantId();
+ auto* global = sem->As<sem::GlobalVariable>();
+ if (global && global->IsOverridable()) {
+ auto const_id = global->ConstantId();
- line() << "#ifndef " << kSpecConstantPrefix << const_id;
+ line() << "#ifndef " << kSpecConstantPrefix << const_id;
- if (var->constructor != nullptr) {
- auto out = line();
- out << "#define " << kSpecConstantPrefix << const_id << " ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
- }
+ if (var->constructor != nullptr) {
+ auto out = line();
+ out << "#define " << kSpecConstantPrefix << const_id << " ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ } else {
+ line() << "#error spec constant required for constant id " << const_id;
+ }
+ line() << "#endif";
+ {
+ auto out = line();
+ out << "const ";
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ out << " = " << kSpecConstantPrefix << const_id << ";";
+ }
} else {
- line() << "#error spec constant required for constant id " << const_id;
- }
- line() << "#endif";
- {
- auto out = line();
- out << "const ";
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
- out << " = " << kSpecConstantPrefix << const_id << ";";
- }
- } else {
- auto out = line();
- out << "const ";
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
- out << " = ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
+ auto out = line();
+ out << "const ";
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ out << " = ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ out << ";";
}
- out << ";";
- }
- return true;
+ return true;
}
template <typename F>
@@ -2908,84 +2976,82 @@ bool GeneratorImpl::CallBuiltinHelper(std::ostream& out,
const ast::CallExpression* call,
const sem::Builtin* builtin,
F&& build) {
- // Generate the helper function if it hasn't been created already
- auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name =
- UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
- std::vector<std::string> parameter_names;
- {
- auto decl = line(&b);
- if (!EmitTypeAndName(decl, builtin->ReturnType(),
- ast::StorageClass::kNone, ast::Access::kUndefined,
- fn_name)) {
- return "";
- }
- {
- ScopedParen sp(decl);
- for (auto* param : builtin->Parameters()) {
- if (!parameter_names.empty()) {
- decl << ", ";
- }
- auto param_name = "param_" + std::to_string(parameter_names.size());
- const auto* ty = param->Type();
- if (auto* ptr = ty->As<sem::Pointer>()) {
- decl << "inout ";
- ty = ptr->StoreType();
- }
- if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, param_name)) {
- return "";
- }
- parameter_names.emplace_back(std::move(param_name));
- }
- }
- decl << " {";
- }
- {
- ScopedIndent si(&b);
- if (!build(&b, parameter_names)) {
- return "";
- }
- }
- line(&b) << "}";
- line(&b);
- return fn_name;
- });
+ // Generate the helper function if it hasn't been created already
+ auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
- if (fn.empty()) {
- return false;
- }
+ auto fn_name = UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
+ std::vector<std::string> parameter_names;
+ {
+ auto decl = line(&b);
+ if (!EmitTypeAndName(decl, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, fn_name)) {
+ return "";
+ }
+ {
+ ScopedParen sp(decl);
+ for (auto* param : builtin->Parameters()) {
+ if (!parameter_names.empty()) {
+ decl << ", ";
+ }
+ auto param_name = "param_" + std::to_string(parameter_names.size());
+ const auto* ty = param->Type();
+ if (auto* ptr = ty->As<sem::Pointer>()) {
+ decl << "inout ";
+ ty = ptr->StoreType();
+ }
+ if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, param_name)) {
+ return "";
+ }
+ parameter_names.emplace_back(std::move(param_name));
+ }
+ }
+ decl << " {";
+ }
+ {
+ ScopedIndent si(&b);
+ if (!build(&b, parameter_names)) {
+ return "";
+ }
+ }
+ line(&b) << "}";
+ line(&b);
+ return fn_name;
+ });
- // Call the helper
- out << fn;
- {
- ScopedParen sp(out);
- bool first = true;
- for (auto* arg : call->args) {
- if (!first) {
- out << ", ";
- }
- first = false;
- if (!EmitExpression(out, arg)) {
+ if (fn.empty()) {
return false;
- }
}
- }
- return true;
+
+ // Call the helper
+ out << fn;
+ {
+ ScopedParen sp(out);
+ bool first = true;
+ for (auto* arg : call->args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
+ }
+ }
+ return true;
}
sem::Type* GeneratorImpl::BoolTypeToUint(const sem::Type* type) {
- auto* u32 = builder_.create<sem::U32>();
- if (type->Is<sem::Bool>()) {
- return u32;
- } else if (auto* vec = type->As<sem::Vector>()) {
- return builder_.create<sem::Vector>(u32, vec->Width());
- } else {
- return nullptr;
- }
+ auto* u32 = builder_.create<sem::U32>();
+ if (type->Is<sem::Bool>()) {
+ return u32;
+ } else if (auto* vec = type->As<sem::Vector>()) {
+ return builder_.create<sem::Vector>(u32, vec->Width());
+ } else {
+ return nullptr;
+ }
}
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.h b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.h
index 9104182acf4..819c79b973e 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.h
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl.h
@@ -42,6 +42,7 @@
// Forward declarations
namespace tint::sem {
class Call;
+class Constant;
class Builtin;
class TypeConstructor;
class TypeConversion;
@@ -51,15 +52,15 @@ namespace tint::writer::glsl {
/// The result of sanitizing a program for generation.
struct SanitizedResult {
- /// Constructor
- SanitizedResult();
- /// Destructor
- ~SanitizedResult();
- /// Move constructor
- SanitizedResult(SanitizedResult&&);
+ /// Constructor
+ SanitizedResult();
+ /// Destructor
+ ~SanitizedResult();
+ /// Move constructor
+ SanitizedResult(SanitizedResult&&);
- /// The sanitized program.
- Program program;
+ /// The sanitized program.
+ Program program;
};
/// Sanitize a program in preparation for generating GLSL.
@@ -73,451 +74,445 @@ SanitizedResult Sanitize(const Program* program,
/// Implementation class for GLSL generator
class GeneratorImpl : public TextGenerator {
- public:
- /// Constructor
- /// @param program the program to generate
- /// @param version the GLSL version to use
- GeneratorImpl(const Program* program, const Version& version);
- ~GeneratorImpl();
+ public:
+ /// Constructor
+ /// @param program the program to generate
+ /// @param version the GLSL version to use
+ GeneratorImpl(const Program* program, const Version& version);
+ ~GeneratorImpl();
- /// @returns true on successful generation; false otherwise
- bool Generate();
+ /// @returns true on successful generation; false otherwise
+ bool Generate();
- /// Handles an index accessor expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the index accessor was emitted
- bool EmitIndexAccessor(std::ostream& out,
- const ast::IndexAccessorExpression* expr);
- /// Handles an assignment statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitAssign(const ast::AssignmentStatement* stmt);
- /// Handles emission of bitwise operators (&|) on bool scalars and vectors
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitBitwiseBoolOp(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating a binary expression
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitFloatModulo(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating the modulo operator on float vector operands
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating a bitcast expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the binary expression was emitted
- bool EmitVectorRelational(std::ostream& out,
- const ast::BinaryExpression* expr);
- /// Handles generating a vector relational expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the vector relational expression was emitted
- bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
- /// Emits a list of statements
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatements(const ast::StatementList& stmts);
- /// Emits a list of statements with an indentation
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatementsWithIndent(const ast::StatementList& stmts);
- /// Handles a block statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBlock(const ast::BlockStatement* stmt);
- /// Handles a break statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBreak(const ast::BreakStatement* stmt);
- /// Handles generating a call expression
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a function call expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @returns true if the expression is emitted
- bool EmitFunctionCall(std::ostream& out, const sem::Call* call);
- /// Handles generating a builtin call expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the builtin being called
- /// @returns true if the expression is emitted
- bool EmitBuiltinCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a type conversion expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param conv the type conversion
- /// @returns true if the expression is emitted
- bool EmitTypeConversion(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConversion* conv);
- /// Handles generating a type constructor expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param ctor the type constructor
- /// @returns true if the expression is emitted
- bool EmitTypeConstructor(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConstructor* ctor);
- /// Handles generating a barrier builtin call
- /// @param out the output of the expression stream
- /// @param builtin the semantic information for the barrier builtin
- /// @returns true if the call expression is emitted
- bool EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin);
- /// Handles generating an atomic intrinsic call for a storage buffer variable
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param intrinsic the atomic intrinsic
- /// @returns true if the call expression is emitted
- bool EmitStorageAtomicCall(
- std::ostream& out,
- const ast::CallExpression* expr,
- const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
- /// Handles generating an atomic builtin call for a workgroup variable
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the atomic builtin
- /// @returns true if the call expression is emitted
- bool EmitWorkgroupAtomicCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating an array.length() call
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the array length expression is emitted
- bool EmitArrayLength(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a call to `bitfieldExtract`
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the expression is emitted
- bool EmitExtractBits(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a call to `bitfieldInsert`
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the expression is emitted
- bool EmitInsertBits(std::ostream& out, const ast::CallExpression* expr);
- /// Emulates 'fma' on GLSL ES, where it is unsupported.
- /// @param out the output of the expression stream
- /// @param expr the fma() expression
- /// @returns true if the expression is emitted
- bool EmitEmulatedFMA(std::ostream& out, const ast::CallExpression* expr);
- /// Create a float literal zero AST node, and associated semantic nodes.
- /// @param stmt the statement which will own the semantic expression node
- /// @returns an AST expression representing 0.0f
- const ast::Expression* CreateF32Zero(const sem::Statement* stmt);
+ /// Record an extension directive within the generator
+ /// @param ext the extension to record
+ /// @returns true if the extension directive was recorded successfully
+ bool RecordExtension(const ast::Enable* ext);
+ /// Handles an index accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the index accessor was emitted
+ bool EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr);
+ /// Handles an assignment statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitAssign(const ast::AssignmentStatement* stmt);
+ /// Handles emission of bitwise operators (&|) on bool scalars and vectors
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitBitwiseBoolOp(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a binary expression
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitFloatModulo(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating the modulo operator on float vector operands
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a bitcast expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the binary expression was emitted
+ bool EmitVectorRelational(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a vector relational expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the vector relational expression was emitted
+ bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
+ /// Emits a list of statements
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatements(const ast::StatementList& stmts);
+ /// Emits a list of statements with an indentation
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatementsWithIndent(const ast::StatementList& stmts);
+ /// Handles a block statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBlock(const ast::BlockStatement* stmt);
+ /// Handles a break statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBreak(const ast::BreakStatement* stmt);
+ /// Handles generating a call expression
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a function call expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @returns true if the expression is emitted
+ bool EmitFunctionCall(std::ostream& out, const sem::Call* call);
+ /// Handles generating a builtin call expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the builtin being called
+ /// @returns true if the expression is emitted
+ bool EmitBuiltinCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a type conversion expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param conv the type conversion
+ /// @returns true if the expression is emitted
+ bool EmitTypeConversion(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConversion* conv);
+ /// Handles generating a type constructor expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param ctor the type constructor
+ /// @returns true if the expression is emitted
+ bool EmitTypeConstructor(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConstructor* ctor);
+ /// Handles generating a barrier builtin call
+ /// @param out the output of the expression stream
+ /// @param builtin the semantic information for the barrier builtin
+ /// @returns true if the call expression is emitted
+ bool EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin);
+ /// Handles generating an atomic builtin call for a workgroup variable
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the atomic builtin
+ /// @returns true if the call expression is emitted
+ bool EmitWorkgroupAtomicCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating an array.length() call
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the array length expression is emitted
+ bool EmitArrayLength(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a call to `bitfieldExtract`
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the expression is emitted
+ bool EmitExtractBits(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a call to `bitfieldInsert`
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the expression is emitted
+ bool EmitInsertBits(std::ostream& out, const ast::CallExpression* expr);
+ /// Emulates 'fma' on GLSL ES, where it is unsupported.
+ /// @param out the output of the expression stream
+ /// @param expr the fma() expression
+ /// @returns true if the expression is emitted
+ bool EmitEmulatedFMA(std::ostream& out, const ast::CallExpression* expr);
+ /// Create a float literal zero AST node, and associated semantic nodes.
+ /// @param stmt the statement which will own the semantic expression node
+ /// @returns an AST expression representing 0.0f
+ const ast::Expression* CreateF32Zero(const sem::Statement* stmt);
- /// Handles generating a call to a texture function (`textureSample`,
- /// `textureSampleGrad`, etc)
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @returns true if the call expression is emitted
- bool EmitTextureCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `select()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitCountOneBitsCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a call to the `countOneBits()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitSelectCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a call to the `dot()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitDotCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `modf()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitModfCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `frexp()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitFrexpCall(std::ostream& out,
+ /// Handles generating a call to a texture function (`textureSample`,
+ /// `textureSampleGrad`, etc)
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the texture builtin
+ /// @returns true if the call expression is emitted
+ bool EmitTextureCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a call to the `select()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitCountOneBitsCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a call to the `countOneBits()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitSelectCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a call to the `dot()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDotCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles generating a call to the `degrees()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitDegreesCall(std::ostream& out,
+ /// Handles generating a call to the `modf()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitModfCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `frexp()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles generating a call to the `radians()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitRadiansCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles a case statement
- /// @param stmt the statement
- /// @returns true if the statement was emitted successfully
- bool EmitCase(const ast::CaseStatement* stmt);
- /// Handles generating a discard statement
- /// @param stmt the discard statement
- /// @returns true if the statement was successfully emitted
- bool EmitDiscard(const ast::DiscardStatement* stmt);
- /// Handles a continue statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitContinue(const ast::ContinueStatement* stmt);
- /// Handles generate an Expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the expression was emitted
- bool EmitExpression(std::ostream& out, const ast::Expression* expr);
- /// Handles generating a function
- /// @param func the function to generate
- /// @returns true if the function was emitted
- bool EmitFunction(const ast::Function* func);
+ /// Handles generating a call to the `degrees()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDegreesCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `radians()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitRadiansCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles a case statement
+ /// @param stmt the statement
+ /// @returns true if the statement was emitted successfully
+ bool EmitCase(const ast::CaseStatement* stmt);
+ /// Handles generating a discard statement
+ /// @param stmt the discard statement
+ /// @returns true if the statement was successfully emitted
+ bool EmitDiscard(const ast::DiscardStatement* stmt);
+ /// Handles a continue statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitContinue(const ast::ContinueStatement* stmt);
+ /// Handles generate an Expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the expression was emitted
+ bool EmitExpression(std::ostream& out, const ast::Expression* expr);
+ /// Handles generating a function
+ /// @param func the function to generate
+ /// @returns true if the function was emitted
+ bool EmitFunction(const ast::Function* func);
- /// Handles emitting a global variable
- /// @param global the global variable
- /// @returns true on success
- bool EmitGlobalVariable(const ast::Variable* global);
+ /// Handles emitting a global variable
+ /// @param global the global variable
+ /// @returns true on success
+ bool EmitGlobalVariable(const ast::Variable* global);
- /// Handles emitting a global variable with the uniform storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitUniformVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the uniform storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitUniformVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the storage storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitStorageVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the storage storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitStorageVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the handle storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitHandleVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the handle storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitHandleVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the private storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitPrivateVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the private storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitPrivateVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the workgroup storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitWorkgroupVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the workgroup storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitWorkgroupVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the input or output storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitIOVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the input or output storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitIOVariable(const sem::Variable* var);
- /// Handles emitting interpolation qualifiers
- /// @param out the output of the expression stream
- /// @param attrs the attributes
- void EmitInterpolationQualifiers(std::ostream& out,
- const ast::AttributeList& attrs);
- /// Handles emitting attributes
- /// @param out the output of the expression stream
- /// @param attrs the attributes
- /// @returns true if the attributes were emitted
- bool EmitAttributes(std::ostream& out, const ast::AttributeList& attrs);
- /// Handles emitting the entry point function
- /// @param func the entry point
- /// @returns true if the entry point function was emitted
- bool EmitEntryPointFunction(const ast::Function* func);
- /// Handles an if statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitIf(const ast::IfStatement* stmt);
- /// Handles a literal
- /// @param out the output stream
- /// @param lit the literal to emit
- /// @returns true if the literal was successfully emitted
- bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
- /// Handles a loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitLoop(const ast::LoopStatement* stmt);
- /// Handles a for loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitForLoop(const ast::ForLoopStatement* stmt);
- /// Handles generating an identifier expression
- /// @param out the output of the expression stream
- /// @param expr the identifier expression
- /// @returns true if the identifier was emitted
- bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
- /// Handles a member accessor expression
- /// @param out the output of the expression stream
- /// @param expr the member accessor expression
- /// @returns true if the member accessor was emitted
- bool EmitMemberAccessor(std::ostream& out,
- const ast::MemberAccessorExpression* expr);
- /// Handles return statements
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitReturn(const ast::ReturnStatement* stmt);
- /// Handles statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitStatement(const ast::Statement* stmt);
- /// Handles generating a switch statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitSwitch(const ast::SwitchStatement* stmt);
- /// Handles generating type
- /// @param out the output stream
- /// @param type the type to generate
- /// @param storage_class the storage class of the variable
- /// @param access the access control type of the variable
- /// @param name the name of the variable, used for array emission.
- /// @param name_printed (optional) if not nullptr and an array was printed
- /// then the boolean is set to true.
- /// @returns true if the type is emitted
- bool EmitType(std::ostream& out,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const std::string& name,
- bool* name_printed = nullptr);
- /// Handles generating type and name
- /// @param out the output stream
- /// @param type the type to generate
- /// @param storage_class the storage class of the variable
- /// @param access the access control type of the variable
- /// @param name the name to emit
- /// @returns true if the type is emitted
- bool EmitTypeAndName(std::ostream& out,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const std::string& name);
- /// Handles generating a structure declaration
- /// @param buffer the text buffer that the type declaration will be written to
- /// @param ty the struct to generate
- /// @returns true if the struct is emitted
- bool EmitStructType(TextBuffer* buffer, const sem::Struct* ty);
- /// Handles generating the members of a structure
- /// @param buffer the text buffer that the struct members will be written to
- /// @param ty the struct to generate
- /// @param emit_offsets whether offsets should be emitted as offset=
- /// @returns true if the struct members are emitted
- bool EmitStructMembers(TextBuffer* buffer,
- const sem::Struct* ty,
- bool emit_offsets);
- /// Handles a unary op expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the expression was emitted
- bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
- /// Emits the zero value for the given type
- /// @param out the output stream
- /// @param type the type to emit the value for
- /// @returns true if the zero value was successfully emitted.
- bool EmitZeroValue(std::ostream& out, const sem::Type* type);
- /// Handles generating a variable
- /// @param var the variable to generate
- /// @returns true if the variable was emitted
- bool EmitVariable(const ast::Variable* var);
- /// Handles generating a program scope constant variable
- /// @param var the variable to emit
- /// @returns true if the variable was emitted
- bool EmitProgramConstVariable(const ast::Variable* var);
- /// Handles generating a builtin method name
- /// @param builtin the semantic info for the builtin
- /// @returns the name or "" if not valid
- std::string generate_builtin_name(const sem::Builtin* builtin);
- /// Converts a builtin to a gl_ string
- /// @param builtin the builtin to convert
- /// @param stage pipeline stage in which this builtin is used
- /// @returns the string name of the builtin or blank on error
- const char* builtin_to_string(ast::Builtin builtin, ast::PipelineStage stage);
- /// Converts a builtin to a sem::Type appropriate for GLSL.
- /// @param builtin the builtin to convert
- /// @returns the appropriate semantic type or null on error.
- sem::Type* builtin_type(ast::Builtin builtin);
+ /// Handles emitting interpolation qualifiers
+ /// @param out the output of the expression stream
+ /// @param attrs the attributes
+ void EmitInterpolationQualifiers(std::ostream& out, const ast::AttributeList& attrs);
+ /// Handles emitting attributes
+ /// @param out the output of the expression stream
+ /// @param attrs the attributes
+ /// @returns true if the attributes were emitted
+ bool EmitAttributes(std::ostream& out, const ast::AttributeList& attrs);
+ /// Handles emitting the entry point function
+ /// @param func the entry point
+ /// @returns true if the entry point function was emitted
+ bool EmitEntryPointFunction(const ast::Function* func);
+ /// Handles an if statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitIf(const ast::IfStatement* stmt);
+ /// Handles a constant value
+ /// @param out the output stream
+ /// @param constant the constant value to emit
+ /// @returns true if the constant value was successfully emitted
+ bool EmitConstant(std::ostream& out, const sem::Constant& constant);
+ /// Handles a literal
+ /// @param out the output stream
+ /// @param lit the literal to emit
+ /// @returns true if the literal was successfully emitted
+ bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
+ /// Handles a loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitLoop(const ast::LoopStatement* stmt);
+ /// Handles a for loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitForLoop(const ast::ForLoopStatement* stmt);
+ /// Handles generating an identifier expression
+ /// @param out the output of the expression stream
+ /// @param expr the identifier expression
+ /// @returns true if the identifier was emitted
+ bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
+ /// Handles a member accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the member accessor expression
+ /// @returns true if the member accessor was emitted
+ bool EmitMemberAccessor(std::ostream& out, const ast::MemberAccessorExpression* expr);
+ /// Handles return statements
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitReturn(const ast::ReturnStatement* stmt);
+ /// Handles statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitStatement(const ast::Statement* stmt);
+ /// Handles generating a switch statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitSwitch(const ast::SwitchStatement* stmt);
+ /// Handles generating type
+ /// @param out the output stream
+ /// @param type the type to generate
+ /// @param storage_class the storage class of the variable
+ /// @param access the access control type of the variable
+ /// @param name the name of the variable, used for array emission.
+ /// @param name_printed (optional) if not nullptr and an array was printed
+ /// then the boolean is set to true.
+ /// @returns true if the type is emitted
+ bool EmitType(std::ostream& out,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const std::string& name,
+ bool* name_printed = nullptr);
+ /// Handles generating type and name
+ /// @param out the output stream
+ /// @param type the type to generate
+ /// @param storage_class the storage class of the variable
+ /// @param access the access control type of the variable
+ /// @param name the name to emit
+ /// @returns true if the type is emitted
+ bool EmitTypeAndName(std::ostream& out,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const std::string& name);
+ /// Handles generating a structure declaration
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param ty the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructType(TextBuffer* buffer, const sem::Struct* ty);
+ /// Handles generating a structure declaration only the first time called. Subsequent calls are
+ /// a no-op and return true.
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param ty the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* ty);
+ /// Handles generating the members of a structure
+ /// @param buffer the text buffer that the struct members will be written to
+ /// @param ty the struct to generate
+ /// @param emit_offsets whether offsets should be emitted as offset=
+ /// @returns true if the struct members are emitted
+ bool EmitStructMembers(TextBuffer* buffer, const sem::Struct* ty, bool emit_offsets);
+ /// Handles a unary op expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the expression was emitted
+ bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
+ /// Emits the zero value for the given type
+ /// @param out the output stream
+ /// @param type the type to emit the value for
+ /// @returns true if the zero value was successfully emitted.
+ bool EmitZeroValue(std::ostream& out, const sem::Type* type);
+ /// Handles generating a variable
+ /// @param var the variable to generate
+ /// @returns true if the variable was emitted
+ bool EmitVariable(const ast::Variable* var);
+ /// Handles generating a program scope constant variable
+ /// @param var the variable to emit
+ /// @returns true if the variable was emitted
+ bool EmitProgramConstVariable(const ast::Variable* var);
+ /// Handles generating a builtin method name
+ /// @param builtin the semantic info for the builtin
+ /// @returns the name or "" if not valid
+ std::string generate_builtin_name(const sem::Builtin* builtin);
+ /// Converts a builtin to a gl_ string
+ /// @param builtin the builtin to convert
+ /// @param stage pipeline stage in which this builtin is used
+ /// @returns the string name of the builtin or blank on error
+ const char* builtin_to_string(ast::Builtin builtin, ast::PipelineStage stage);
+ /// Converts a builtin to a sem::Type appropriate for GLSL.
+ /// @param builtin the builtin to convert
+ /// @returns the appropriate semantic type or null on error.
+ sem::Type* builtin_type(ast::Builtin builtin);
- private:
- enum class VarType { kIn, kOut };
+ private:
+ enum class VarType { kIn, kOut };
- struct EntryPointData {
- std::string struct_name;
- std::string var_name;
- };
+ struct EntryPointData {
+ std::string struct_name;
+ std::string var_name;
+ };
- struct DMAIntrinsic {
- transform::DecomposeMemoryAccess::Intrinsic::Op op;
- transform::DecomposeMemoryAccess::Intrinsic::DataType type;
- bool operator==(const DMAIntrinsic& rhs) const {
- return op == rhs.op && type == rhs.type;
- }
- /// Hasher is a std::hash function for DMAIntrinsic
- struct Hasher {
- /// @param i the DMAIntrinsic to hash
- /// @returns the hash of `i`
- inline std::size_t operator()(const DMAIntrinsic& i) const {
- return utils::Hash(i.op, i.type);
- }
+ struct DMAIntrinsic {
+ transform::DecomposeMemoryAccess::Intrinsic::Op op;
+ transform::DecomposeMemoryAccess::Intrinsic::DataType type;
+ bool operator==(const DMAIntrinsic& rhs) const { return op == rhs.op && type == rhs.type; }
+ /// Hasher is a std::hash function for DMAIntrinsic
+ struct Hasher {
+ /// @param i the DMAIntrinsic to hash
+ /// @returns the hash of `i`
+ inline std::size_t operator()(const DMAIntrinsic& i) const {
+ return utils::Hash(i.op, i.type);
+ }
+ };
};
- };
- /// CallBuiltinHelper will call the builtin helper function, creating it
- /// if it hasn't been built already. If the builtin needs to be built then
- /// CallBuiltinHelper will generate the function signature and will call
- /// `build` to emit the body of the function.
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the builtin
- /// @param build a function with the signature:
- /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
- /// Where:
- /// `buffer` is the body of the generated function
- /// `params` is the name of all the generated function parameters
- /// @returns true if the call expression is emitted
- template <typename F>
- bool CallBuiltinHelper(std::ostream& out,
- const ast::CallExpression* call,
- const sem::Builtin* builtin,
- F&& build);
+ /// CallBuiltinHelper will call the builtin helper function, creating it
+ /// if it hasn't been built already. If the builtin needs to be built then
+ /// CallBuiltinHelper will generate the function signature and will call
+ /// `build` to emit the body of the function.
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @param build a function with the signature:
+ /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
+ /// Where:
+ /// `buffer` is the body of the generated function
+ /// `params` is the name of all the generated function parameters
+ /// @returns true if the call expression is emitted
+ template <typename F>
+ bool CallBuiltinHelper(std::ostream& out,
+ const ast::CallExpression* call,
+ const sem::Builtin* builtin,
+ F&& build);
- /// Create a uint type corresponding to the given bool or bool vector type.
- /// @param type the bool or bool vector type to convert
- /// @returns the corresponding uint type
- sem::Type* BoolTypeToUint(const sem::Type* type);
+ /// Create a uint type corresponding to the given bool or bool vector type.
+ /// @param type the bool or bool vector type to convert
+ /// @returns the corresponding uint type
+ sem::Type* BoolTypeToUint(const sem::Type* type);
- TextBuffer helpers_; // Helper functions emitted at the top of the output
- std::function<bool()> emit_continuing_;
- std::unordered_map<DMAIntrinsic, std::string, DMAIntrinsic::Hasher>
- dma_intrinsics_;
- std::unordered_map<const sem::Builtin*, std::string> builtins_;
- std::unordered_map<const sem::Struct*, std::string> structure_builders_;
- std::unordered_map<const sem::Vector*, std::string> dynamic_vector_write_;
- std::unordered_map<const sem::Vector*, std::string> int_dot_funcs_;
- std::unordered_map<const sem::Type*, std::string> float_modulo_funcs_;
- bool requires_oes_sample_variables_ = false;
- bool requires_default_precision_qualifier_ = false;
- Version version_;
+ TextBuffer helpers_; // Helper functions emitted at the top of the output
+ std::function<bool()> emit_continuing_;
+ std::unordered_map<DMAIntrinsic, std::string, DMAIntrinsic::Hasher> dma_intrinsics_;
+ std::unordered_map<const sem::Builtin*, std::string> builtins_;
+ std::unordered_map<const sem::Struct*, std::string> structure_builders_;
+ std::unordered_map<const sem::Vector*, std::string> dynamic_vector_write_;
+ std::unordered_map<const sem::Vector*, std::string> int_dot_funcs_;
+ std::unordered_map<const sem::Type*, std::string> float_modulo_funcs_;
+ std::unordered_set<const sem::Struct*> emitted_structs_;
+ bool requires_oes_sample_variables_ = false;
+ bool requires_default_precision_qualifier_ = false;
+ Version version_;
};
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_array_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_array_accessor_test.cc
index 7858f9bd9eb..d28e5607d8c 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_array_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_array_accessor_test.cc
@@ -14,21 +14,23 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Expression = TestHelper;
TEST_F(GlslGeneratorImplTest_Expression, IndexAccessor) {
- Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
- auto* expr = IndexAccessor("ary", 5);
- WrapInFunction(expr);
+ Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
+ auto* expr = IndexAccessor("ary", 5_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "ary[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "ary[5]");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_assign_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_assign_test.cc
index c0da89ee96b..fbd9f6262c8 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_assign_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_assign_test.cc
@@ -20,17 +20,17 @@ namespace {
using GlslGeneratorImplTest_Assign = TestHelper;
TEST_F(GlslGeneratorImplTest_Assign, Emit_Assign) {
- Global("lhs", ty.i32(), ast::StorageClass::kPrivate);
- Global("rhs", ty.i32(), ast::StorageClass::kPrivate);
- auto* assign = Assign("lhs", "rhs");
- WrapInFunction(assign);
+ Global("lhs", ty.i32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.i32(), ast::StorageClass::kPrivate);
+ auto* assign = Assign("lhs", "rhs");
+ WrapInFunction(assign);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
- EXPECT_EQ(gen.result(), " lhs = rhs;\n");
+ ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
+ EXPECT_EQ(gen.result(), " lhs = rhs;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_binary_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_binary_test.cc
index 744eef48af9..0d019a98f81 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_binary_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_binary_test.cc
@@ -16,243 +16,229 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Binary = TestHelper;
struct BinaryData {
- const char* result;
- ast::BinaryOp op;
+ const char* result;
+ ast::BinaryOp op;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << data.op;
- return out;
+ out << data.op;
+ return out;
}
using GlslBinaryTest = TestParamHelper<BinaryData>;
TEST_P(GlslBinaryTest, Emit_f32) {
- auto params = GetParam();
-
- // Skip ops that are illegal for this type
- if (params.op == ast::BinaryOp::kAnd || params.op == ast::BinaryOp::kOr ||
- params.op == ast::BinaryOp::kXor ||
- params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight ||
- params.op == ast::BinaryOp::kModulo) {
- return;
- }
+ auto params = GetParam();
- Global("left", ty.f32(), ast::StorageClass::kPrivate);
- Global("right", ty.f32(), ast::StorageClass::kPrivate);
+ // Skip ops that are illegal for this type
+ if (params.op == ast::BinaryOp::kAnd || params.op == ast::BinaryOp::kOr ||
+ params.op == ast::BinaryOp::kXor || params.op == ast::BinaryOp::kShiftLeft ||
+ params.op == ast::BinaryOp::kShiftRight || params.op == ast::BinaryOp::kModulo) {
+ return;
+ }
- auto* left = Expr("left");
- auto* right = Expr("right");
+ Global("left", ty.f32(), ast::StorageClass::kPrivate);
+ Global("right", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- GeneratorImpl& gen = Build();
+ WrapInFunction(expr);
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
TEST_P(GlslBinaryTest, Emit_u32) {
- auto params = GetParam();
+ auto params = GetParam();
- Global("left", ty.u32(), ast::StorageClass::kPrivate);
- Global("right", ty.u32(), ast::StorageClass::kPrivate);
+ Global("left", ty.u32(), ast::StorageClass::kPrivate);
+ Global("right", ty.u32(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
TEST_P(GlslBinaryTest, Emit_i32) {
- auto params = GetParam();
+ auto params = GetParam();
- // Skip ops that are illegal for this type
- if (params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight) {
- return;
- }
+ // Skip ops that are illegal for this type
+ if (params.op == ast::BinaryOp::kShiftLeft || params.op == ast::BinaryOp::kShiftRight) {
+ return;
+ }
- Global("left", ty.i32(), ast::StorageClass::kPrivate);
- Global("right", ty.i32(), ast::StorageClass::kPrivate);
+ Global("left", ty.i32(), ast::StorageClass::kPrivate);
+ Global("right", ty.i32(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
GlslGeneratorImplTest,
GlslBinaryTest,
- testing::Values(
- BinaryData{"(left & right)", ast::BinaryOp::kAnd},
- BinaryData{"(left | right)", ast::BinaryOp::kOr},
- BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
- BinaryData{"(left == right)", ast::BinaryOp::kEqual},
- BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
- BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
- BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
- BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
- BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
- BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
- BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
- BinaryData{"(left + right)", ast::BinaryOp::kAdd},
- BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
- BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
- BinaryData{"(left / right)", ast::BinaryOp::kDivide},
- BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
+ testing::Values(BinaryData{"(left & right)", ast::BinaryOp::kAnd},
+ BinaryData{"(left | right)", ast::BinaryOp::kOr},
+ BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
+ BinaryData{"(left == right)", ast::BinaryOp::kEqual},
+ BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
+ BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
+ BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
+ BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
+ BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
+ BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
+ BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
+ BinaryData{"(left + right)", ast::BinaryOp::kAdd},
+ BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
+ BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
+ BinaryData{"(left / right)", ast::BinaryOp::kDivide},
+ BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
TEST_F(GlslGeneratorImplTest_Binary, Multiply_VectorScalar) {
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = Expr(1.f);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = Expr(1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- "(vec3(1.0f, 1.0f, 1.0f) * "
- "1.0f)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(vec3(1.0f) * 1.0f)");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_ScalarVector) {
- auto* lhs = Expr(1.f);
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* lhs = Expr(1_f);
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- "(1.0f * vec3(1.0f, 1.0f, "
- "1.0f))");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(1.0f * vec3(1.0f))");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_MatrixScalar) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr("mat");
- auto* rhs = Expr(1.f);
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr("mat");
+ auto* rhs = Expr(1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(mat * 1.0f)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(mat * 1.0f)");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_ScalarMatrix) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr(1.f);
- auto* rhs = Expr("mat");
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr(1_f);
+ auto* rhs = Expr("mat");
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(1.0f * mat)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(1.0f * mat)");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_MatrixVector) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr("mat");
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr("mat");
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(mat * vec3(1.0f, 1.0f, 1.0f))");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(mat * vec3(1.0f))");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_VectorMatrix) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = Expr("mat");
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = Expr("mat");
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(vec3(1.0f, 1.0f, 1.0f) * mat)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(vec3(1.0f) * mat)");
}
TEST_F(GlslGeneratorImplTest_Binary, Multiply_MatrixMatrix) {
- Global("lhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- Global("rhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("lhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(lhs * rhs)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(lhs * rhs)");
}
TEST_F(GlslGeneratorImplTest_Binary, Logical_And) {
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
@@ -260,56 +246,52 @@ if (tint_tmp) {
}
TEST_F(GlslGeneratorImplTest_Binary, ModF32) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
- Global("b", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("b", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr("a"),
- Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "tint_float_modulo(a, b)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "tint_float_modulo(a, b)");
}
TEST_F(GlslGeneratorImplTest_Binary, ModVec3F32) {
- Global("a", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("a", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr("a"),
- Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "tint_float_modulo(a, b)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "tint_float_modulo(a, b)");
}
TEST_F(GlslGeneratorImplTest_Binary, Logical_Multi) {
- // (a && b) || (c || d)
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
-
- auto* expr = create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"),
- Expr("b")),
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"),
- Expr("d")));
- WrapInFunction(expr);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
+ // (a && b) || (c || d)
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+
+ auto* expr = create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"), Expr("d")));
+ WrapInFunction(expr);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
if (tint_tmp_1) {
tint_tmp_1 = b;
}
@@ -325,19 +307,18 @@ if (!tint_tmp) {
}
TEST_F(GlslGeneratorImplTest_Binary, Logical_Or) {
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (!tint_tmp) {
tint_tmp = b;
}
@@ -345,31 +326,29 @@ if (!tint_tmp) {
}
TEST_F(GlslGeneratorImplTest_Binary, If_WithLogical) {
- // if (a && b) {
- // return 1;
- // } else if (b || c) {
- // return 2;
- // } else {
- // return 3;
- // }
-
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
-
- auto* expr = If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b")),
- Block(Return(1)),
- Else(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("b"), Expr("c")),
- Block(Return(2))),
- Else(Block(Return(3))));
- Func("func", {}, ty.i32(), {WrapInStatement(expr)});
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ // if (a && b) {
+ // return 1i;
+ // } else if (b || c) {
+ // return 2i;
+ // } else {
+ // return 3i;
+ // }
+
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+
+ auto* expr =
+ If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ Block(Return(1_i)),
+ Else(If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("c")),
+ Block(Return(2_i)), Else(Block(Return(3_i))))));
+ Func("func", {}, ty.i32(), {WrapInStatement(expr)});
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
@@ -390,23 +369,22 @@ if ((tint_tmp)) {
}
TEST_F(GlslGeneratorImplTest_Binary, Return_WithLogical) {
- // return (a && b) || c;
+ // return (a && b) || c;
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Return(create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"),
- Expr("b")),
- Expr("c")));
- Func("func", {}, ty.bool_(), {WrapInStatement(expr)});
+ auto* expr = Return(create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ Expr("c")));
+ Func("func", {}, ty.bool_(), {WrapInStatement(expr)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
if (tint_tmp_1) {
tint_tmp_1 = b;
}
@@ -419,25 +397,25 @@ return (tint_tmp);
}
TEST_F(GlslGeneratorImplTest_Binary, Assign_WithLogical) {
- // a = (b || c) && d;
+ // a = (b || c) && d;
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Assign(
- Expr("a"), create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalAnd,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("b"), Expr("c")),
- Expr("d")));
- WrapInFunction(expr);
+ auto* expr =
+ Assign(Expr("a"),
+ create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalAnd,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("c")),
+ Expr("d")));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
if (!tint_tmp_1) {
tint_tmp_1 = c;
}
@@ -450,26 +428,26 @@ a = (tint_tmp);
}
TEST_F(GlslGeneratorImplTest_Binary, Decl_WithLogical) {
- // var a : bool = (b && c) || d;
+ // var a : bool = (b && c) || d;
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
- auto* var = Var("a", ty.bool_(), ast::StorageClass::kNone,
- create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("b"), Expr("c")),
- Expr("d")));
+ auto* var =
+ Var("a", ty.bool_(), ast::StorageClass::kNone,
+ create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("b"), Expr("c")),
+ Expr("d")));
- auto* decl = Decl(var);
- WrapInFunction(decl);
+ auto* decl = Decl(var);
+ WrapInFunction(decl);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(decl)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
+ ASSERT_TRUE(gen.EmitStatement(decl)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
if (tint_tmp_1) {
tint_tmp_1 = c;
}
@@ -482,39 +460,37 @@ bool a = (tint_tmp);
}
TEST_F(GlslGeneratorImplTest_Binary, Call_WithLogical) {
- // foo(a && b, c || d, (a || c) && (b || d))
-
- Func("foo",
- {
- Param(Sym(), ty.bool_()),
- Param(Sym(), ty.bool_()),
- Param(Sym(), ty.bool_()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
-
- ast::ExpressionList params;
- params.push_back(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b")));
- params.push_back(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("c"), Expr("d")));
- params.push_back(create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalAnd,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"),
- Expr("c")),
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"),
- Expr("d"))));
-
- auto* expr = CallStmt(Call("foo", params));
- WrapInFunction(expr);
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ // foo(a && b, c || d, (a || c) && (b || d))
+
+ Func("foo",
+ {
+ Param(Sym(), ty.bool_()),
+ Param(Sym(), ty.bool_()),
+ Param(Sym(), ty.bool_()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+
+ ast::ExpressionList params;
+ params.push_back(
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")));
+ params.push_back(
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"), Expr("d")));
+ params.push_back(create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalAnd,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"), Expr("c")),
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("d"))));
+
+ auto* expr = CallStmt(Call("foo", params));
+ WrapInFunction(expr);
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_bitcast_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_bitcast_test.cc
index c0fcfb4a340..a56c5dd1a71 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_bitcast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_bitcast_test.cc
@@ -14,42 +14,44 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Bitcast = TestHelper;
TEST_F(GlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Float) {
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "intBitsToFloat(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "intBitsToFloat(1)");
}
TEST_F(GlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Int) {
- auto* bitcast = create<ast::BitcastExpression>(ty.i32(), Expr(1u));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.i32(), Expr(1_u));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "int(1u)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "int(1u)");
}
TEST_F(GlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Uint) {
- auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "uint(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "uint(1)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_block_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_block_test.cc
index cdfc9259f49..014c1c71e53 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_block_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_block_test.cc
@@ -20,15 +20,15 @@ namespace {
using GlslGeneratorImplTest_Block = TestHelper;
TEST_F(GlslGeneratorImplTest_Block, Emit_Block) {
- auto* b = Block(create<ast::DiscardStatement>());
- WrapInFunction(b);
+ auto* b = Block(create<ast::DiscardStatement>());
+ WrapInFunction(b);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
discard;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_break_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_break_test.cc
index 7e716a23a04..fa0e5142f3d 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_break_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_break_test.cc
@@ -20,15 +20,15 @@ namespace {
using GlslGeneratorImplTest_Break = TestHelper;
TEST_F(GlslGeneratorImplTest_Break, Emit_Break) {
- auto* b = create<ast::BreakStatement>();
- WrapInFunction(Loop(Block(b)));
+ auto* b = create<ast::BreakStatement>();
+ WrapInFunction(Loop(Block(b)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), " break;\n");
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), " break;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_test.cc
index 3b313263f25..6f2c555909e 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_test.cc
@@ -18,276 +18,275 @@
#include "src/tint/sem/call.h"
#include "src/tint/writer/glsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using BuiltinType = sem::BuiltinType;
-using ::testing::HasSubstr;
-
using GlslGeneratorImplTest_Builtin = TestHelper;
enum class ParamType {
- kF32,
- kU32,
- kBool,
+ kF32,
+ kU32,
+ kBool,
};
struct BuiltinData {
- BuiltinType builtin;
- ParamType type;
- const char* glsl_name;
+ BuiltinType builtin;
+ ParamType type;
+ const char* glsl_name;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.glsl_name;
- switch (data.type) {
- case ParamType::kF32:
- out << "f32";
- break;
- case ParamType::kU32:
- out << "u32";
- break;
- case ParamType::kBool:
- out << "bool";
- break;
- }
- out << ">";
- return out;
+ out << data.glsl_name;
+ switch (data.type) {
+ case ParamType::kF32:
+ out << "f32";
+ break;
+ case ParamType::kU32:
+ out << "u32";
+ break;
+ case ParamType::kBool:
+ out << "bool";
+ break;
+ }
+ out << ">";
+ return out;
}
const ast::CallExpression* GenerateCall(BuiltinType builtin,
ParamType type,
ProgramBuilder* builder) {
- std::string name;
- std::ostringstream str(name);
- str << builtin;
- switch (builtin) {
- case BuiltinType::kAcos:
- case BuiltinType::kAsin:
- case BuiltinType::kAtan:
- case BuiltinType::kCeil:
- case BuiltinType::kCos:
- case BuiltinType::kCosh:
- case BuiltinType::kDpdx:
- case BuiltinType::kDpdxCoarse:
- case BuiltinType::kDpdxFine:
- case BuiltinType::kDpdy:
- case BuiltinType::kDpdyCoarse:
- case BuiltinType::kDpdyFine:
- case BuiltinType::kExp:
- case BuiltinType::kExp2:
- case BuiltinType::kFloor:
- case BuiltinType::kFract:
- case BuiltinType::kFwidth:
- case BuiltinType::kFwidthCoarse:
- case BuiltinType::kFwidthFine:
- case BuiltinType::kInverseSqrt:
- case BuiltinType::kLength:
- case BuiltinType::kLog:
- case BuiltinType::kLog2:
- case BuiltinType::kNormalize:
- case BuiltinType::kRound:
- case BuiltinType::kSin:
- case BuiltinType::kSinh:
- case BuiltinType::kSqrt:
- case BuiltinType::kTan:
- case BuiltinType::kTanh:
- case BuiltinType::kTrunc:
- case BuiltinType::kSign:
- return builder->Call(str.str(), "f2");
- case BuiltinType::kLdexp:
- return builder->Call(str.str(), "f2", "i2");
- case BuiltinType::kAtan2:
- case BuiltinType::kDot:
- case BuiltinType::kDistance:
- case BuiltinType::kPow:
- case BuiltinType::kReflect:
- case BuiltinType::kStep:
- return builder->Call(str.str(), "f2", "f2");
- case BuiltinType::kCross:
- return builder->Call(str.str(), "f3", "f3");
- case BuiltinType::kFma:
- case BuiltinType::kMix:
- case BuiltinType::kFaceForward:
- case BuiltinType::kSmoothstep:
- case BuiltinType::kSmoothStep:
- return builder->Call(str.str(), "f2", "f2", "f2");
- case BuiltinType::kAll:
- case BuiltinType::kAny:
- return builder->Call(str.str(), "b2");
- case BuiltinType::kAbs:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2");
- } else {
- return builder->Call(str.str(), "u2");
- }
- case BuiltinType::kCountOneBits:
- case BuiltinType::kReverseBits:
- return builder->Call(str.str(), "u2");
- case BuiltinType::kMax:
- case BuiltinType::kMin:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2");
- }
- case BuiltinType::kClamp:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2", "u2");
- }
- case BuiltinType::kSelect:
- return builder->Call(str.str(), "f2", "f2", "b2");
- case BuiltinType::kDeterminant:
- return builder->Call(str.str(), "m2x2");
- case BuiltinType::kTranspose:
- return builder->Call(str.str(), "m3x2");
- default:
- break;
- }
- return nullptr;
+ std::string name;
+ std::ostringstream str(name);
+ str << builtin;
+ switch (builtin) {
+ case BuiltinType::kAcos:
+ case BuiltinType::kAsin:
+ case BuiltinType::kAtan:
+ case BuiltinType::kCeil:
+ case BuiltinType::kCos:
+ case BuiltinType::kCosh:
+ case BuiltinType::kDpdx:
+ case BuiltinType::kDpdxCoarse:
+ case BuiltinType::kDpdxFine:
+ case BuiltinType::kDpdy:
+ case BuiltinType::kDpdyCoarse:
+ case BuiltinType::kDpdyFine:
+ case BuiltinType::kExp:
+ case BuiltinType::kExp2:
+ case BuiltinType::kFloor:
+ case BuiltinType::kFract:
+ case BuiltinType::kFwidth:
+ case BuiltinType::kFwidthCoarse:
+ case BuiltinType::kFwidthFine:
+ case BuiltinType::kInverseSqrt:
+ case BuiltinType::kLength:
+ case BuiltinType::kLog:
+ case BuiltinType::kLog2:
+ case BuiltinType::kNormalize:
+ case BuiltinType::kRound:
+ case BuiltinType::kSin:
+ case BuiltinType::kSinh:
+ case BuiltinType::kSqrt:
+ case BuiltinType::kTan:
+ case BuiltinType::kTanh:
+ case BuiltinType::kTrunc:
+ case BuiltinType::kSign:
+ return builder->Call(str.str(), "f2");
+ case BuiltinType::kLdexp:
+ return builder->Call(str.str(), "f2", "i2");
+ case BuiltinType::kAtan2:
+ case BuiltinType::kDot:
+ case BuiltinType::kDistance:
+ case BuiltinType::kPow:
+ case BuiltinType::kReflect:
+ case BuiltinType::kStep:
+ return builder->Call(str.str(), "f2", "f2");
+ case BuiltinType::kCross:
+ return builder->Call(str.str(), "f3", "f3");
+ case BuiltinType::kFma:
+ case BuiltinType::kMix:
+ case BuiltinType::kFaceForward:
+ case BuiltinType::kSmoothstep:
+ case BuiltinType::kSmoothStep:
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ case BuiltinType::kAll:
+ case BuiltinType::kAny:
+ return builder->Call(str.str(), "b2");
+ case BuiltinType::kAbs:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2");
+ } else {
+ return builder->Call(str.str(), "u2");
+ }
+ case BuiltinType::kCountOneBits:
+ case BuiltinType::kReverseBits:
+ return builder->Call(str.str(), "u2");
+ case BuiltinType::kMax:
+ case BuiltinType::kMin:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2");
+ }
+ case BuiltinType::kClamp:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2", "u2");
+ }
+ case BuiltinType::kSelect:
+ return builder->Call(str.str(), "f2", "f2", "b2");
+ case BuiltinType::kDeterminant:
+ return builder->Call(str.str(), "m2x2");
+ case BuiltinType::kTranspose:
+ return builder->Call(str.str(), "m3x2");
+ default:
+ break;
+ }
+ return nullptr;
}
using GlslBuiltinTest = TestParamHelper<BuiltinData>;
TEST_P(GlslBuiltinTest, Emit) {
- auto param = GetParam();
+ auto param = GetParam();
- Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
- Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
- Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
- Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
+ Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
+ Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
+ Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
+ Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
- auto* call = GenerateCall(param.builtin, param.type, this);
- ASSERT_NE(nullptr, call) << "Unhandled builtin";
- Func("func", {}, ty.void_(), {CallStmt(call)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ auto* call = GenerateCall(param.builtin, param.type, this);
+ ASSERT_NE(nullptr, call) << "Unhandled builtin";
+ Func("func", {}, ty.void_(), {CallStmt(call)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- auto* sem = program->Sem().Get(call);
- ASSERT_NE(sem, nullptr);
- auto* target = sem->Target();
- ASSERT_NE(target, nullptr);
- auto* builtin = target->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
+ auto* sem = program->Sem().Get<sem::Call>(call);
+ ASSERT_NE(sem, nullptr);
+ auto* target = sem->Target();
+ ASSERT_NE(target, nullptr);
+ auto* builtin = target->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
- EXPECT_EQ(gen.generate_builtin_name(builtin), param.glsl_name);
+ EXPECT_EQ(gen.generate_builtin_name(builtin), param.glsl_name);
}
INSTANTIATE_TEST_SUITE_P(
GlslGeneratorImplTest_Builtin,
GlslBuiltinTest,
- testing::Values(
- BuiltinData{BuiltinType::kAbs, ParamType::kF32, "abs"},
- BuiltinData{BuiltinType::kAbs, ParamType::kU32, "abs"},
- BuiltinData{BuiltinType::kAcos, ParamType::kF32, "acos"},
- BuiltinData{BuiltinType::kAll, ParamType::kBool, "all"},
- BuiltinData{BuiltinType::kAny, ParamType::kBool, "any"},
- BuiltinData{BuiltinType::kAsin, ParamType::kF32, "asin"},
- BuiltinData{BuiltinType::kAtan, ParamType::kF32, "atan"},
- BuiltinData{BuiltinType::kAtan2, ParamType::kF32, "atan"},
- BuiltinData{BuiltinType::kCeil, ParamType::kF32, "ceil"},
- BuiltinData{BuiltinType::kClamp, ParamType::kF32, "clamp"},
- BuiltinData{BuiltinType::kClamp, ParamType::kU32, "clamp"},
- BuiltinData{BuiltinType::kCos, ParamType::kF32, "cos"},
- BuiltinData{BuiltinType::kCosh, ParamType::kF32, "cosh"},
- BuiltinData{BuiltinType::kCountOneBits, ParamType::kU32, "bitCount"},
- BuiltinData{BuiltinType::kCross, ParamType::kF32, "cross"},
- BuiltinData{BuiltinType::kDeterminant, ParamType::kF32, "determinant"},
- BuiltinData{BuiltinType::kDistance, ParamType::kF32, "distance"},
- BuiltinData{BuiltinType::kDot, ParamType::kF32, "dot"},
- BuiltinData{BuiltinType::kDpdx, ParamType::kF32, "dFdx"},
- BuiltinData{BuiltinType::kDpdxCoarse, ParamType::kF32, "dFdx"},
- BuiltinData{BuiltinType::kDpdxFine, ParamType::kF32, "dFdx"},
- BuiltinData{BuiltinType::kDpdy, ParamType::kF32, "dFdy"},
- BuiltinData{BuiltinType::kDpdyCoarse, ParamType::kF32, "dFdy"},
- BuiltinData{BuiltinType::kDpdyFine, ParamType::kF32, "dFdy"},
- BuiltinData{BuiltinType::kExp, ParamType::kF32, "exp"},
- BuiltinData{BuiltinType::kExp2, ParamType::kF32, "exp2"},
- BuiltinData{BuiltinType::kFaceForward, ParamType::kF32, "faceforward"},
- BuiltinData{BuiltinType::kFloor, ParamType::kF32, "floor"},
- BuiltinData{BuiltinType::kFma, ParamType::kF32, "fma"},
- BuiltinData{BuiltinType::kFract, ParamType::kF32, "fract"},
- BuiltinData{BuiltinType::kFwidth, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kFwidthCoarse, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kFwidthFine, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kInverseSqrt, ParamType::kF32, "inversesqrt"},
- BuiltinData{BuiltinType::kLdexp, ParamType::kF32, "ldexp"},
- BuiltinData{BuiltinType::kLength, ParamType::kF32, "length"},
- BuiltinData{BuiltinType::kLog, ParamType::kF32, "log"},
- BuiltinData{BuiltinType::kLog2, ParamType::kF32, "log2"},
- BuiltinData{BuiltinType::kMax, ParamType::kF32, "max"},
- BuiltinData{BuiltinType::kMax, ParamType::kU32, "max"},
- BuiltinData{BuiltinType::kMin, ParamType::kF32, "min"},
- BuiltinData{BuiltinType::kMin, ParamType::kU32, "min"},
- BuiltinData{BuiltinType::kMix, ParamType::kF32, "mix"},
- BuiltinData{BuiltinType::kNormalize, ParamType::kF32, "normalize"},
- BuiltinData{BuiltinType::kPow, ParamType::kF32, "pow"},
- BuiltinData{BuiltinType::kReflect, ParamType::kF32, "reflect"},
- BuiltinData{BuiltinType::kReverseBits, ParamType::kU32,
- "bitfieldReverse"},
- BuiltinData{BuiltinType::kRound, ParamType::kU32, "round"},
- BuiltinData{BuiltinType::kSign, ParamType::kF32, "sign"},
- BuiltinData{BuiltinType::kSin, ParamType::kF32, "sin"},
- BuiltinData{BuiltinType::kSinh, ParamType::kF32, "sinh"},
- BuiltinData{BuiltinType::kSmoothstep, ParamType::kF32, "smoothstep"},
- BuiltinData{BuiltinType::kSmoothStep, ParamType::kF32, "smoothstep"},
- BuiltinData{BuiltinType::kSqrt, ParamType::kF32, "sqrt"},
- BuiltinData{BuiltinType::kStep, ParamType::kF32, "step"},
- BuiltinData{BuiltinType::kTan, ParamType::kF32, "tan"},
- BuiltinData{BuiltinType::kTanh, ParamType::kF32, "tanh"},
- BuiltinData{BuiltinType::kTranspose, ParamType::kF32, "transpose"},
- BuiltinData{BuiltinType::kTrunc, ParamType::kF32, "trunc"}));
+ testing::Values(BuiltinData{BuiltinType::kAbs, ParamType::kF32, "abs"},
+ BuiltinData{BuiltinType::kAbs, ParamType::kU32, "abs"},
+ BuiltinData{BuiltinType::kAcos, ParamType::kF32, "acos"},
+ BuiltinData{BuiltinType::kAll, ParamType::kBool, "all"},
+ BuiltinData{BuiltinType::kAny, ParamType::kBool, "any"},
+ BuiltinData{BuiltinType::kAsin, ParamType::kF32, "asin"},
+ BuiltinData{BuiltinType::kAtan, ParamType::kF32, "atan"},
+ BuiltinData{BuiltinType::kAtan2, ParamType::kF32, "atan"},
+ BuiltinData{BuiltinType::kCeil, ParamType::kF32, "ceil"},
+ BuiltinData{BuiltinType::kClamp, ParamType::kF32, "clamp"},
+ BuiltinData{BuiltinType::kClamp, ParamType::kU32, "clamp"},
+ BuiltinData{BuiltinType::kCos, ParamType::kF32, "cos"},
+ BuiltinData{BuiltinType::kCosh, ParamType::kF32, "cosh"},
+ BuiltinData{BuiltinType::kCountOneBits, ParamType::kU32, "bitCount"},
+ BuiltinData{BuiltinType::kCross, ParamType::kF32, "cross"},
+ BuiltinData{BuiltinType::kDeterminant, ParamType::kF32, "determinant"},
+ BuiltinData{BuiltinType::kDistance, ParamType::kF32, "distance"},
+ BuiltinData{BuiltinType::kDot, ParamType::kF32, "dot"},
+ BuiltinData{BuiltinType::kDpdx, ParamType::kF32, "dFdx"},
+ BuiltinData{BuiltinType::kDpdxCoarse, ParamType::kF32, "dFdx"},
+ BuiltinData{BuiltinType::kDpdxFine, ParamType::kF32, "dFdx"},
+ BuiltinData{BuiltinType::kDpdy, ParamType::kF32, "dFdy"},
+ BuiltinData{BuiltinType::kDpdyCoarse, ParamType::kF32, "dFdy"},
+ BuiltinData{BuiltinType::kDpdyFine, ParamType::kF32, "dFdy"},
+ BuiltinData{BuiltinType::kExp, ParamType::kF32, "exp"},
+ BuiltinData{BuiltinType::kExp2, ParamType::kF32, "exp2"},
+ BuiltinData{BuiltinType::kFaceForward, ParamType::kF32, "faceforward"},
+ BuiltinData{BuiltinType::kFloor, ParamType::kF32, "floor"},
+ BuiltinData{BuiltinType::kFma, ParamType::kF32, "fma"},
+ BuiltinData{BuiltinType::kFract, ParamType::kF32, "fract"},
+ BuiltinData{BuiltinType::kFwidth, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kFwidthCoarse, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kFwidthFine, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kInverseSqrt, ParamType::kF32, "inversesqrt"},
+ BuiltinData{BuiltinType::kLdexp, ParamType::kF32, "ldexp"},
+ BuiltinData{BuiltinType::kLength, ParamType::kF32, "length"},
+ BuiltinData{BuiltinType::kLog, ParamType::kF32, "log"},
+ BuiltinData{BuiltinType::kLog2, ParamType::kF32, "log2"},
+ BuiltinData{BuiltinType::kMax, ParamType::kF32, "max"},
+ BuiltinData{BuiltinType::kMax, ParamType::kU32, "max"},
+ BuiltinData{BuiltinType::kMin, ParamType::kF32, "min"},
+ BuiltinData{BuiltinType::kMin, ParamType::kU32, "min"},
+ BuiltinData{BuiltinType::kMix, ParamType::kF32, "mix"},
+ BuiltinData{BuiltinType::kNormalize, ParamType::kF32, "normalize"},
+ BuiltinData{BuiltinType::kPow, ParamType::kF32, "pow"},
+ BuiltinData{BuiltinType::kReflect, ParamType::kF32, "reflect"},
+ BuiltinData{BuiltinType::kReverseBits, ParamType::kU32, "bitfieldReverse"},
+ BuiltinData{BuiltinType::kRound, ParamType::kU32, "round"},
+ BuiltinData{BuiltinType::kSign, ParamType::kF32, "sign"},
+ BuiltinData{BuiltinType::kSin, ParamType::kF32, "sin"},
+ BuiltinData{BuiltinType::kSinh, ParamType::kF32, "sinh"},
+ BuiltinData{BuiltinType::kSmoothstep, ParamType::kF32, "smoothstep"},
+ BuiltinData{BuiltinType::kSmoothStep, ParamType::kF32, "smoothstep"},
+ BuiltinData{BuiltinType::kSqrt, ParamType::kF32, "sqrt"},
+ BuiltinData{BuiltinType::kStep, ParamType::kF32, "step"},
+ BuiltinData{BuiltinType::kTan, ParamType::kF32, "tan"},
+ BuiltinData{BuiltinType::kTanh, ParamType::kF32, "tanh"},
+ BuiltinData{BuiltinType::kTranspose, ParamType::kF32, "transpose"},
+ BuiltinData{BuiltinType::kTrunc, ParamType::kF32, "trunc"}));
TEST_F(GlslGeneratorImplTest_Builtin, Builtin_Call) {
- auto* call = Call("dot", "param1", "param2");
+ auto* call = Call("dot", "param1", "param2");
- Global("param1", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("param2", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("param1", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("param2", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "dot(param1, param2)");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "dot(param1, param2)");
}
TEST_F(GlslGeneratorImplTest_Builtin, Select_Scalar) {
- auto* call = Call("select", 1.0f, 2.0f, true);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("select", 1_f, 2_f, true);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "(true ? 2.0f : 1.0f)");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "(true ? 2.0f : 1.0f)");
}
TEST_F(GlslGeneratorImplTest_Builtin, Select_Vector) {
- auto* call =
- Call("select", vec2<i32>(1, 2), vec2<i32>(3, 4), vec2<bool>(true, false));
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("select", vec2<i32>(1_i, 2_i), vec2<i32>(3_i, 4_i), vec2<bool>(true, false));
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "mix(ivec2(1, 2), ivec2(3, 4), bvec2(true, false))");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "mix(ivec2(1, 2), ivec2(3, 4), bvec2(true, false))");
}
TEST_F(GlslGeneratorImplTest_Builtin, Modf_Scalar) {
- auto* call = Call("modf", 1.0f);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("modf", 1_f);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct modf_result {
float fract;
@@ -314,13 +313,13 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Modf_Vector) {
- auto* call = Call("modf", vec3<f32>());
- WrapInFunction(CallStmt(call));
+ auto* call = Call("modf", vec3<f32>());
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct modf_result_vec3 {
vec3 fract;
@@ -335,7 +334,7 @@ modf_result_vec3 tint_modf(vec3 param_0) {
void test_function() {
- tint_modf(vec3(0.0f, 0.0f, 0.0f));
+ tint_modf(vec3(0.0f));
}
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -347,13 +346,13 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Frexp_Scalar_i32) {
- auto* call = Call("frexp", 1.0f);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("frexp", 1_f);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(
float sig;
int exp;
};
@@ -374,13 +373,13 @@ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
}
TEST_F(GlslGeneratorImplTest_Builtin, Frexp_Vector_i32) {
- auto* call = Call("frexp", vec3<f32>());
- WrapInFunction(CallStmt(call));
+ auto* call = Call("frexp", vec3<f32>());
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(
struct frexp_result_vec3 {
vec3 sig;
@@ -395,7 +394,7 @@ frexp_result_vec3 tint_frexp(vec3 param_0) {
void test_function() {
- tint_frexp(vec3(0.0f, 0.0f, 0.0f));
+ tint_frexp(vec3(0.0f));
}
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -406,14 +405,14 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Degrees_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
float tint_degrees(float param_0) {
return param_0 * 57.295779513082322865;
@@ -434,14 +433,14 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Degrees_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec3 tint_degrees(vec3 param_0) {
return param_0 * 57.295779513082322865;
@@ -462,14 +461,14 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Radians_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
float tint_radians(float param_0) {
return param_0 * 0.017453292519943295474;
@@ -490,14 +489,14 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Radians_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec3 tint_radians(vec3 param_0) {
return param_0 * 0.017453292519943295474;
@@ -518,16 +517,16 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, ExtractBits) {
- auto* v = Var("v", ty.vec3<u32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("extractBits", v, offset, count);
- WrapInFunction(v, offset, count, call);
+ auto* v = Var("v", ty.vec3<u32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("extractBits", v, offset, count);
+ WrapInFunction(v, offset, count, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uvec3 tint_extract_bits(uvec3 v, uint offset, uint count) {
uint s = min(offset, 32u);
@@ -551,17 +550,17 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, InsertBits) {
- auto* v = Var("v", ty.vec3<u32>());
- auto* n = Var("n", ty.vec3<u32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("insertBits", v, n, offset, count);
- WrapInFunction(v, n, offset, count, call);
+ auto* v = Var("v", ty.vec3<u32>());
+ auto* n = Var("n", ty.vec3<u32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("insertBits", v, n, offset, count);
+ WrapInFunction(v, n, offset, count, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uvec3 tint_insert_bits(uvec3 v, uvec3 n, uint offset, uint count) {
uint s = min(offset, 32u);
@@ -586,13 +585,13 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Pack4x8Snorm) {
- auto* call = Call("pack4x8snorm", "p1");
- Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack4x8snorm", "p1");
+ Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec4 p1 = vec4(0.0f, 0.0f, 0.0f, 0.0f);
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -604,13 +603,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Pack4x8Unorm) {
- auto* call = Call("pack4x8unorm", "p1");
- Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack4x8unorm", "p1");
+ Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec4 p1 = vec4(0.0f, 0.0f, 0.0f, 0.0f);
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -622,13 +621,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Pack2x16Snorm) {
- auto* call = Call("pack2x16snorm", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16snorm", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec2 p1 = vec2(0.0f, 0.0f);
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -640,13 +639,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Pack2x16Unorm) {
- auto* call = Call("pack2x16unorm", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16unorm", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec2 p1 = vec2(0.0f, 0.0f);
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -658,13 +657,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Pack2x16Float) {
- auto* call = Call("pack2x16float", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16float", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
vec2 p1 = vec2(0.0f, 0.0f);
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -676,13 +675,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Unpack4x8Snorm) {
- auto* call = Call("unpack4x8snorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack4x8snorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint p1 = 0u;
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -694,13 +693,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Unpack4x8Unorm) {
- auto* call = Call("unpack4x8unorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack4x8unorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint p1 = 0u;
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -712,13 +711,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Unpack2x16Snorm) {
- auto* call = Call("unpack2x16snorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16snorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint p1 = 0u;
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -730,13 +729,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Unpack2x16Unorm) {
- auto* call = Call("unpack2x16unorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16unorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint p1 = 0u;
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -748,13 +747,13 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, Unpack2x16Float) {
- auto* call = Call("unpack2x16float", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16float", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint p1 = 0u;
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
@@ -766,16 +765,16 @@ void test_function() {
}
TEST_F(GlslGeneratorImplTest_Builtin, StorageBarrier) {
- Func("main", {}, ty.void_(), {CallStmt(Call("storageBarrier"))},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("main", {}, ty.void_(), {CallStmt(Call("storageBarrier"))},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
@@ -786,16 +785,16 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, WorkgroupBarrier) {
- Func("main", {}, ty.void_(), {CallStmt(Call("workgroupBarrier"))},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("main", {}, ty.void_(), {CallStmt(Call("workgroupBarrier"))},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
@@ -806,13 +805,13 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, DotI32) {
- Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(Call("dot", "v", "v")));
+ Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(Call("dot", "v", "v")));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
int tint_int_dot(ivec3 a, ivec3 b) {
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
@@ -832,30 +831,30 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Builtin, FMA) {
- auto* call = Call("fma", "a", "b", "c");
+ auto* call = Call("fma", "a", "b", "c");
- Global("a", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("c", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("a", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("c", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "((a) * (b) + (c))");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "((a) * (b) + (c))");
}
TEST_F(GlslGeneratorImplTest_Builtin, DotU32) {
- Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(Call("dot", "v", "v")));
+ Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(Call("dot", "v", "v")));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
uint tint_int_dot(uvec3 a, uvec3 b) {
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_texture_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_texture_test.cc
index a94eb67f765..42db16b852b 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_builtin_texture_test.cc
@@ -24,277 +24,275 @@ namespace {
using ::testing::HasSubstr;
struct ExpectedResult {
- ExpectedResult(const char* o) : out(o) {} // NOLINT
+ ExpectedResult(const char* o) : out(o) {} // NOLINT
- std::string pre;
- std::string out;
+ std::string pre;
+ std::string out;
};
-ExpectedResult expected_texture_overload(
- ast::builtin::test::ValidTextureOverload overload) {
- using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
- switch (overload) {
- case ValidTextureOverload::kDimensions1d:
- case ValidTextureOverload::kDimensions2d:
- case ValidTextureOverload::kDimensionsDepth2d:
- case ValidTextureOverload::kDimensionsDepthMultisampled2d:
- case ValidTextureOverload::kDimensionsMultisampled2d:
- case ValidTextureOverload::kDimensions2dArray:
- case ValidTextureOverload::kDimensionsDepth2dArray:
- case ValidTextureOverload::kDimensions3d:
- case ValidTextureOverload::kDimensionsCube:
- case ValidTextureOverload::kDimensionsDepthCube:
- case ValidTextureOverload::kDimensionsCubeArray:
- case ValidTextureOverload::kDimensionsDepthCubeArray:
- case ValidTextureOverload::kDimensions2dLevel:
- case ValidTextureOverload::kDimensionsDepth2dLevel:
- case ValidTextureOverload::kDimensions2dArrayLevel:
- case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
- case ValidTextureOverload::kDimensions3dLevel:
- case ValidTextureOverload::kDimensionsCubeLevel:
- case ValidTextureOverload::kDimensionsDepthCubeLevel:
- case ValidTextureOverload::kDimensionsCubeArrayLevel:
- case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
- return {"textureSize"};
- case ValidTextureOverload::kDimensionsStorageWO1d:
- case ValidTextureOverload::kDimensionsStorageWO2d:
- case ValidTextureOverload::kDimensionsStorageWO2dArray:
- case ValidTextureOverload::kDimensionsStorageWO3d:
- return {"imageSize"};
- case ValidTextureOverload::kGather2dF32:
- return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 0))";
- case ValidTextureOverload::kGather2dOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(3, 4), 0))";
- case ValidTextureOverload::kGather2dArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0))";
- case ValidTextureOverload::kGather2dArrayOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(4, 5), 0))";
- case ValidTextureOverload::kGatherCubeF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 0))";
- case ValidTextureOverload::kGatherCubeArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0))";
- case ValidTextureOverload::kGatherDepth2dF32:
- return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 0.0))";
- case ValidTextureOverload::kGatherDepth2dOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 0.0, ivec2(3, 4))";
- case ValidTextureOverload::kGatherDepth2dArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0.0))";
- case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0.0, ivec2(4, 5)))";
- case ValidTextureOverload::kGatherDepthCubeF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 0.0))";
- case ValidTextureOverload::kGatherDepthCubeArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0.0))";
- case ValidTextureOverload::kGatherCompareDepth2dF32:
- return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5)))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
- return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f, ivec2(5, 6)))";
- case ValidTextureOverload::kGatherCompareDepthCubeF32:
- return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f))";
- case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
- return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f))";
- case ValidTextureOverload::kNumLayers2dArray:
- case ValidTextureOverload::kNumLayersDepth2dArray:
- case ValidTextureOverload::kNumLayersCubeArray:
- case ValidTextureOverload::kNumLayersDepthCubeArray:
- return {"textureSize"};
- case ValidTextureOverload::kNumLayersStorageWO2dArray:
- return {"imageSize"};
- case ValidTextureOverload::kNumLevels2d:
- case ValidTextureOverload::kNumLevelsCube:
- case ValidTextureOverload::kNumLevelsDepth2d:
- case ValidTextureOverload::kNumLevelsDepthCube:
- case ValidTextureOverload::kNumLevels2dArray:
- case ValidTextureOverload::kNumLevels3d:
- case ValidTextureOverload::kNumLevelsCubeArray:
- case ValidTextureOverload::kNumLevelsDepth2dArray:
- case ValidTextureOverload::kNumLevelsDepthCubeArray:
- return {"textureQueryLevels"};
- case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
- case ValidTextureOverload::kNumSamplesMultisampled2d:
- return {"textureSamples"};
- case ValidTextureOverload::kSample1dF32:
- return R"(texture(tint_symbol_sampler, 1.0f);)";
- case ValidTextureOverload::kSample2dF32:
- return R"(texture(tint_symbol_sampler, vec2(1.0f, 2.0f));)";
- case ValidTextureOverload::kSample2dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(3, 4));)";
- case ValidTextureOverload::kSample2dArrayF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)));)";
- case ValidTextureOverload::kSample2dArrayOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(4, 5));)";
- case ValidTextureOverload::kSample3dF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
- case ValidTextureOverload::kSample3dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec3(4, 5, 6));)";
- case ValidTextureOverload::kSampleCubeF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
- case ValidTextureOverload::kSampleCubeArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)));)";
- case ValidTextureOverload::kSampleDepth2dF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f));)";
- case ValidTextureOverload::kSampleDepth2dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), ivec2(3, 4));)";
- case ValidTextureOverload::kSampleDepth2dArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f));)";
- case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), ivec2(4, 5));)";
- case ValidTextureOverload::kSampleDepthCubeF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 0.0f));)";
- case ValidTextureOverload::kSampleDepthCubeArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0.0f);)";
- case ValidTextureOverload::kSampleBias2dF32:
- return R"(texture(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleBias2dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(4, 5), 3.0f);)";
- case ValidTextureOverload::kSampleBias2dArrayF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, float(4)), 3.0f);)";
- case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(5, 6), 4.0f);)";
- case ValidTextureOverload::kSampleBias3dF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleBias3dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec3(5, 6, 7), 4.0f);)";
- case ValidTextureOverload::kSampleBiasCubeF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleBiasCubeArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(3)), 4.0f);)";
- case ValidTextureOverload::kSampleLevel2dF32:
- return R"(textureLod(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleLevel2dOffsetF32:
- return R"(textureLodOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5));)";
- case ValidTextureOverload::kSampleLevel2dArrayF32:
- return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f);)";
- case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
- return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f, ivec2(5, 6));)";
- case ValidTextureOverload::kSampleLevel3dF32:
- return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleLevel3dOffsetF32:
- return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f, ivec3(5, 6, 7));)";
- case ValidTextureOverload::kSampleLevelCubeF32:
- return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleLevelCubeArrayF32:
- return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kSampleLevelDepth2dF32:
- return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), float(3));)";
- case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
- return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), float(3), ivec2(4, 5));)";
- case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
- return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), float(4));)";
- case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
- return R"(textureLodOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), float(4), ivec2(5, 6));)";
- case ValidTextureOverload::kSampleLevelDepthCubeF32:
- return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 0.0f), float(4)))";
- case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
- return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), float(5));)";
- case ValidTextureOverload::kSampleGrad2dF32:
- return R"(textureGrad(tint_symbol_sampler, vec2(1.0f, 2.0f), vec2(3.0f, 4.0f), vec2(5.0f, 6.0f));)";
- case ValidTextureOverload::kSampleGrad2dOffsetF32:
- return R"(textureGradOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), vec2(3.0f, 4.0f), vec2(5.0f, 6.0f), ivec2(7, 7));)";
- case ValidTextureOverload::kSampleGrad2dArrayF32:
- return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), vec2(4.0f, 5.0f), vec2(6.0f, 7.0f));)";
- case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
- return R"(textureGradOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), vec2(4.0f, 5.0f), vec2(6.0f, 7.0f), ivec2(6, 7));)";
- case ValidTextureOverload::kSampleGrad3dF32:
- return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f));)";
- case ValidTextureOverload::kSampleGrad3dOffsetF32:
- return R"(textureGradOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f), ivec3(0, 1, 2));)";
- case ValidTextureOverload::kSampleGradCubeF32:
- return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f));)";
- case ValidTextureOverload::kSampleGradCubeArrayF32:
- return R"(textureGrad(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), vec3(5.0f, 6.0f, 7.0f), vec3(8.0f, 9.0f, 10.0f));)";
- case ValidTextureOverload::kSampleCompareDepth2dF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
- case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec2(4, 5));)";
- case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4), 3.0f));)";
- case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4), 3.0f), ivec2(5, 6));)";
- case ValidTextureOverload::kSampleCompareDepthCubeF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 4.0f));)";
- case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
- return R"(yyytexture(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
- return R"(yyytextureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5));)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4)), 3.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
- return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(4)), 3.0f, ivec2(5, 6));)";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
- return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
- return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kLoad1dLevelF32:
- case ValidTextureOverload::kLoad1dLevelU32:
- case ValidTextureOverload::kLoad1dLevelI32:
- return R"(texelFetch(tint_symbol_2, 1, 3);)";
- case ValidTextureOverload::kLoad2dLevelF32:
- case ValidTextureOverload::kLoad2dLevelU32:
- case ValidTextureOverload::kLoad2dLevelI32:
- return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
- case ValidTextureOverload::kLoad2dArrayLevelF32:
- case ValidTextureOverload::kLoad2dArrayLevelU32:
- case ValidTextureOverload::kLoad2dArrayLevelI32:
- case ValidTextureOverload::kLoad3dLevelF32:
- case ValidTextureOverload::kLoad3dLevelU32:
- case ValidTextureOverload::kLoad3dLevelI32:
- return R"(texelFetch(tint_symbol_2, ivec3(1, 2, 3), 4);)";
- case ValidTextureOverload::kLoadDepthMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dU32:
- case ValidTextureOverload::kLoadMultisampled2dI32:
- return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
- case ValidTextureOverload::kLoadDepth2dLevelF32:
- return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
- case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
- return R"(texelFetch(tint_symbol_2, ivec3(1, 2, 3), 4);)";
- case ValidTextureOverload::kStoreWO1dRgba32float:
- return R"(imageStore(tint_symbol, 1, vec4(2.0f, 3.0f, 4.0f, 5.0f));)";
- case ValidTextureOverload::kStoreWO2dRgba32float:
- return R"(imageStore(tint_symbol, ivec2(1, 2), vec4(3.0f, 4.0f, 5.0f, 6.0f));)";
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- return R"(imageStore(tint_symbol, ivec3(1, 2, 3), vec4(4.0f, 5.0f, 6.0f, 7.0f));)";
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return R"(imageStore(tint_symbol, ivec3(1, 2, 3), vec4(4.0f, 5.0f, 6.0f, 7.0f));)";
- }
- return "<unmatched texture overload>";
+ExpectedResult expected_texture_overload(ast::builtin::test::ValidTextureOverload overload) {
+ using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
+ switch (overload) {
+ case ValidTextureOverload::kDimensions1d:
+ case ValidTextureOverload::kDimensions2d:
+ case ValidTextureOverload::kDimensionsDepth2d:
+ case ValidTextureOverload::kDimensionsDepthMultisampled2d:
+ case ValidTextureOverload::kDimensionsMultisampled2d:
+ case ValidTextureOverload::kDimensions2dArray:
+ case ValidTextureOverload::kDimensionsDepth2dArray:
+ case ValidTextureOverload::kDimensions3d:
+ case ValidTextureOverload::kDimensionsCube:
+ case ValidTextureOverload::kDimensionsDepthCube:
+ case ValidTextureOverload::kDimensionsCubeArray:
+ case ValidTextureOverload::kDimensionsDepthCubeArray:
+ case ValidTextureOverload::kDimensions2dLevel:
+ case ValidTextureOverload::kDimensionsDepth2dLevel:
+ case ValidTextureOverload::kDimensions2dArrayLevel:
+ case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
+ case ValidTextureOverload::kDimensions3dLevel:
+ case ValidTextureOverload::kDimensionsCubeLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeLevel:
+ case ValidTextureOverload::kDimensionsCubeArrayLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
+ return {"textureSize"};
+ case ValidTextureOverload::kDimensionsStorageWO1d:
+ case ValidTextureOverload::kDimensionsStorageWO2d:
+ case ValidTextureOverload::kDimensionsStorageWO2dArray:
+ case ValidTextureOverload::kDimensionsStorageWO3d:
+ return {"imageSize"};
+ case ValidTextureOverload::kGather2dF32:
+ return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 0))";
+ case ValidTextureOverload::kGather2dOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(3, 4), 0))";
+ case ValidTextureOverload::kGather2dArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0))";
+ case ValidTextureOverload::kGather2dArrayOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(4, 5), 0))";
+ case ValidTextureOverload::kGatherCubeF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 0))";
+ case ValidTextureOverload::kGatherCubeArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0))";
+ case ValidTextureOverload::kGatherDepth2dF32:
+ return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 0.0))";
+ case ValidTextureOverload::kGatherDepth2dOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 0.0, ivec2(3, 4))";
+ case ValidTextureOverload::kGatherDepth2dArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0.0))";
+ case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 0.0, ivec2(4, 5)))";
+ case ValidTextureOverload::kGatherDepthCubeF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 0.0))";
+ case ValidTextureOverload::kGatherDepthCubeArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0.0))";
+ case ValidTextureOverload::kGatherCompareDepth2dF32:
+ return R"(textureGather(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5)))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
+ return R"(textureGatherOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f, ivec2(5, 6)))";
+ case ValidTextureOverload::kGatherCompareDepthCubeF32:
+ return R"(textureGather(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
+ return R"(textureGather(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f))";
+ case ValidTextureOverload::kNumLayers2dArray:
+ case ValidTextureOverload::kNumLayersDepth2dArray:
+ case ValidTextureOverload::kNumLayersCubeArray:
+ case ValidTextureOverload::kNumLayersDepthCubeArray:
+ return {"textureSize"};
+ case ValidTextureOverload::kNumLayersStorageWO2dArray:
+ return {"imageSize"};
+ case ValidTextureOverload::kNumLevels2d:
+ case ValidTextureOverload::kNumLevelsCube:
+ case ValidTextureOverload::kNumLevelsDepth2d:
+ case ValidTextureOverload::kNumLevelsDepthCube:
+ case ValidTextureOverload::kNumLevels2dArray:
+ case ValidTextureOverload::kNumLevels3d:
+ case ValidTextureOverload::kNumLevelsCubeArray:
+ case ValidTextureOverload::kNumLevelsDepth2dArray:
+ case ValidTextureOverload::kNumLevelsDepthCubeArray:
+ return {"textureQueryLevels"};
+ case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
+ case ValidTextureOverload::kNumSamplesMultisampled2d:
+ return {"textureSamples"};
+ case ValidTextureOverload::kSample1dF32:
+ return R"(texture(tint_symbol_sampler, 1.0f);)";
+ case ValidTextureOverload::kSample2dF32:
+ return R"(texture(tint_symbol_sampler, vec2(1.0f, 2.0f));)";
+ case ValidTextureOverload::kSample2dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(3, 4));)";
+ case ValidTextureOverload::kSample2dArrayF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)));)";
+ case ValidTextureOverload::kSample2dArrayOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(4, 5));)";
+ case ValidTextureOverload::kSample3dF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
+ case ValidTextureOverload::kSample3dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec3(4, 5, 6));)";
+ case ValidTextureOverload::kSampleCubeF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
+ case ValidTextureOverload::kSampleCubeArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)));)";
+ case ValidTextureOverload::kSampleDepth2dF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f));)";
+ case ValidTextureOverload::kSampleDepth2dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), ivec2(3, 4));)";
+ case ValidTextureOverload::kSampleDepth2dArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f));)";
+ case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), ivec2(4, 5));)";
+ case ValidTextureOverload::kSampleDepthCubeF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 0.0f));)";
+ case ValidTextureOverload::kSampleDepthCubeArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 0.0f);)";
+ case ValidTextureOverload::kSampleBias2dF32:
+ return R"(texture(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleBias2dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), ivec2(4, 5), 3.0f);)";
+ case ValidTextureOverload::kSampleBias2dArrayF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, float(4)), 3.0f);)";
+ case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), ivec2(5, 6), 4.0f);)";
+ case ValidTextureOverload::kSampleBias3dF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleBias3dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec3(5, 6, 7), 4.0f);)";
+ case ValidTextureOverload::kSampleBiasCubeF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleBiasCubeArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(3)), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel2dF32:
+ return R"(textureLod(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleLevel2dOffsetF32:
+ return R"(textureLodOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5));)";
+ case ValidTextureOverload::kSampleLevel2dArrayF32:
+ return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
+ return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), 4.0f, ivec2(5, 6));)";
+ case ValidTextureOverload::kSampleLevel3dF32:
+ return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel3dOffsetF32:
+ return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f, ivec3(5, 6, 7));)";
+ case ValidTextureOverload::kSampleLevelCubeF32:
+ return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleLevelCubeArrayF32:
+ return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kSampleLevelDepth2dF32:
+ return R"(textureLod(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), float(3));)";
+ case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
+ return R"(textureLodOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 0.0f), float(3), ivec2(4, 5));)";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
+ return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), float(4));)";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
+ return R"(textureLodOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(3), 0.0f), float(4), ivec2(5, 6));)";
+ case ValidTextureOverload::kSampleLevelDepthCubeF32:
+ return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 0.0f), float(4)))";
+ case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
+ return R"(textureLod(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), float(5));)";
+ case ValidTextureOverload::kSampleGrad2dF32:
+ return R"(textureGrad(tint_symbol_sampler, vec2(1.0f, 2.0f), vec2(3.0f, 4.0f), vec2(5.0f, 6.0f));)";
+ case ValidTextureOverload::kSampleGrad2dOffsetF32:
+ return R"(textureGradOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), vec2(3.0f, 4.0f), vec2(5.0f, 6.0f), ivec2(7));)";
+ case ValidTextureOverload::kSampleGrad2dArrayF32:
+ return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), vec2(4.0f, 5.0f), vec2(6.0f, 7.0f));)";
+ case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
+ return R"(textureGradOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(3)), vec2(4.0f, 5.0f), vec2(6.0f, 7.0f), ivec2(6, 7));)";
+ case ValidTextureOverload::kSampleGrad3dF32:
+ return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f));)";
+ case ValidTextureOverload::kSampleGrad3dOffsetF32:
+ return R"(textureGradOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f), ivec3(0, 1, 2));)";
+ case ValidTextureOverload::kSampleGradCubeF32:
+ return R"(textureGrad(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f));)";
+ case ValidTextureOverload::kSampleGradCubeArrayF32:
+ return R"(textureGrad(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), vec3(5.0f, 6.0f, 7.0f), vec3(8.0f, 9.0f, 10.0f));)";
+ case ValidTextureOverload::kSampleCompareDepth2dF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f));)";
+ case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), ivec2(4, 5));)";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4), 3.0f));)";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4), 3.0f), ivec2(5, 6));)";
+ case ValidTextureOverload::kSampleCompareDepthCubeF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, 4.0f));)";
+ case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
+ return R"(yyytexture(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
+ return R"(yyytextureOffset(tint_symbol_sampler, vec2(1.0f, 2.0f), 3.0f, ivec2(4, 5));)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, float(4)), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
+ return R"(textureOffset(tint_symbol_sampler, vec3(1.0f, 2.0f, float(4)), 3.0f, ivec2(5, 6));)";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
+ return R"(texture(tint_symbol_sampler, vec3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
+ return R"(texture(tint_symbol_sampler, vec4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kLoad1dLevelF32:
+ case ValidTextureOverload::kLoad1dLevelU32:
+ case ValidTextureOverload::kLoad1dLevelI32:
+ return R"(texelFetch(tint_symbol_2, 1, 3);)";
+ case ValidTextureOverload::kLoad2dLevelF32:
+ case ValidTextureOverload::kLoad2dLevelU32:
+ case ValidTextureOverload::kLoad2dLevelI32:
+ return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
+ case ValidTextureOverload::kLoad2dArrayLevelF32:
+ case ValidTextureOverload::kLoad2dArrayLevelU32:
+ case ValidTextureOverload::kLoad2dArrayLevelI32:
+ case ValidTextureOverload::kLoad3dLevelF32:
+ case ValidTextureOverload::kLoad3dLevelU32:
+ case ValidTextureOverload::kLoad3dLevelI32:
+ return R"(texelFetch(tint_symbol_2, ivec3(1, 2, 3), 4);)";
+ case ValidTextureOverload::kLoadDepthMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dU32:
+ case ValidTextureOverload::kLoadMultisampled2dI32:
+ return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
+ case ValidTextureOverload::kLoadDepth2dLevelF32:
+ return R"(texelFetch(tint_symbol_2, ivec2(1, 2), 3);)";
+ case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
+ return R"(texelFetch(tint_symbol_2, ivec3(1, 2, 3), 4);)";
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ return R"(imageStore(tint_symbol, 1, vec4(2.0f, 3.0f, 4.0f, 5.0f));)";
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ return R"(imageStore(tint_symbol, ivec2(1, 2), vec4(3.0f, 4.0f, 5.0f, 6.0f));)";
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ return R"(imageStore(tint_symbol, ivec3(1, 2, 3), vec4(4.0f, 5.0f, 6.0f, 7.0f));)";
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return R"(imageStore(tint_symbol, ivec3(1, 2, 3), vec4(4.0f, 5.0f, 6.0f, 7.0f));)";
+ }
+ return "<unmatched texture overload>";
} // NOLINT - Ignore the length of this function
class GlslGeneratorBuiltinTextureTest
: public TestParamHelper<ast::builtin::test::TextureOverloadCase> {};
TEST_P(GlslGeneratorBuiltinTextureTest, Call) {
- auto param = GetParam();
+ auto param = GetParam();
- param.BuildTextureVariable(this);
- param.BuildSamplerVariable(this);
+ param.BuildTextureVariable(this);
+ param.BuildSamplerVariable(this);
- auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
+ auto* call = Call(param.function, param.args(this));
+ auto* stmt = CallStmt(call);
- Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto expected = expected_texture_overload(param.overload);
+ auto expected = expected_texture_overload(param.overload);
- EXPECT_THAT(gen.result(), HasSubstr(expected.pre));
- EXPECT_THAT(gen.result(), HasSubstr(expected.out));
+ EXPECT_THAT(gen.result(), HasSubstr(expected.pre));
+ EXPECT_THAT(gen.result(), HasSubstr(expected.out));
}
-INSTANTIATE_TEST_SUITE_P(
- GlslGeneratorBuiltinTextureTest,
- GlslGeneratorBuiltinTextureTest,
- testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
+INSTANTIATE_TEST_SUITE_P(GlslGeneratorBuiltinTextureTest,
+ GlslGeneratorBuiltinTextureTest,
+ testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
} // namespace
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_call_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_call_test.cc
index fc74bb9c0a5..c8a1de26e9c 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_call_test.cc
@@ -15,62 +15,64 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Call = TestHelper;
TEST_F(GlslGeneratorImplTest_Call, EmitExpression_Call_WithoutParams) {
- Func("my_func", {}, ty.f32(), {Return(1.23f)});
+ Func("my_func", {}, ty.f32(), {Return(1.23_f)});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func()");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func()");
}
TEST_F(GlslGeneratorImplTest_Call, EmitExpression_Call_WithParams) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.f32(), {Return(1.23f)});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func(param1, param2)");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.f32(), {Return(1.23_f)});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func(param1, param2)");
}
TEST_F(GlslGeneratorImplTest_Call, EmitStatement_Call) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = CallStmt(Call("my_func", "param1", "param2"));
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(call)) << gen.error();
- EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = CallStmt(Call("my_func", "param1", "param2"));
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(call)) << gen.error();
+ EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_case_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_case_test.cc
index ecf39371b31..02d16654da9 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_case_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_case_test.cc
@@ -15,70 +15,70 @@
#include "src/tint/ast/fallthrough_statement.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Case = TestHelper;
TEST_F(GlslGeneratorImplTest_Case, Emit_Case) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block(create<ast::BreakStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(GlslGeneratorImplTest_Case, Emit_Case_BreaksByDefault) {
- auto* s = Switch(1, Case(Expr(5), Block()), DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block()), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(GlslGeneratorImplTest_Case, Emit_Case_WithFallthrough) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::FallthroughStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s =
+ Switch(1_i, Case(Expr(5_i), Block(create<ast::FallthroughStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
/* fallthrough */
}
)");
}
TEST_F(GlslGeneratorImplTest_Case, Emit_Case_MultipleSelectors) {
- auto* s =
- Switch(1, Case({Expr(5), Expr(6)}, Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case({Expr(5_i), Expr(6_i)}, Block(create<ast::BreakStatement>())),
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5:
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5:
case 6: {
break;
}
@@ -86,15 +86,15 @@ TEST_F(GlslGeneratorImplTest_Case, Emit_Case_MultipleSelectors) {
}
TEST_F(GlslGeneratorImplTest_Case, Emit_Case_Default) {
- auto* s = Switch(1, DefaultCase(Block(create<ast::BreakStatement>())));
- WrapInFunction(s);
+ auto* s = Switch(1_i, DefaultCase(Block(create<ast::BreakStatement>())));
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( default: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( default: {
break;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_cast_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_cast_test.cc
index 8228af68557..c4f9c059867 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_cast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_cast_test.cc
@@ -14,31 +14,33 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Cast = TestHelper;
TEST_F(GlslGeneratorImplTest_Cast, EmitExpression_Cast_Scalar) {
- auto* cast = Construct<f32>(1);
- WrapInFunction(cast);
+ auto* cast = Construct<f32>(1_i);
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "float(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "1.0f");
}
TEST_F(GlslGeneratorImplTest_Cast, EmitExpression_Cast_Vector) {
- auto* cast = vec3<f32>(vec3<i32>(1, 2, 3));
- WrapInFunction(cast);
+ auto* cast = vec3<f32>(vec3<i32>(1_i, 2_i, 3_i));
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "vec3(ivec3(1, 2, 3))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "vec3(1.0f, 2.0f, 3.0f)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_constructor_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_constructor_test.cc
index 51f29434d7a..e70ecaf6617 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_constructor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_constructor_test.cc
@@ -15,6 +15,8 @@
#include "gmock/gmock.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
@@ -23,213 +25,202 @@ using ::testing::HasSubstr;
using GlslGeneratorImplTest_Constructor = TestHelper;
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Bool) {
- WrapInFunction(Expr(false));
+ WrapInFunction(Expr(false));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("false"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("false"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Int) {
- WrapInFunction(Expr(-12345));
+ WrapInFunction(Expr(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("-12345"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_UInt) {
- WrapInFunction(Expr(56779u));
+ WrapInFunction(Expr(56779_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("56779u"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("56779u"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Float) {
- // Use a number close to 1<<30 but whose decimal representation ends in 0.
- WrapInFunction(Expr(static_cast<float>((1 << 30) - 4)));
+ // Use a number close to 1<<30 but whose decimal representation ends in 0.
+ WrapInFunction(Expr(f32((1 << 30) - 4)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Float) {
- WrapInFunction(Construct<f32>(-1.2e-5f));
+ WrapInFunction(Construct<f32>(-1.2e-5_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float(-0.000012f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-0.000012f"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Bool) {
- WrapInFunction(Construct<bool>(true));
+ WrapInFunction(Construct<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bool(true)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("true"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Int) {
- WrapInFunction(Construct<i32>(-12345));
+ WrapInFunction(Construct<i32>(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("int(-12345)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Uint) {
- WrapInFunction(Construct<u32>(12345u));
+ WrapInFunction(Construct<u32>(12345_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("uint(12345u)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("12345u"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec) {
- WrapInFunction(vec3<f32>(1.f, 2.f, 3.f));
+ WrapInFunction(vec3<f32>(1_f, 2_f, 3_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("vec3(1.0f, 2.0f, 3.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3(1.0f, 2.0f, 3.0f)"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_Empty) {
- WrapInFunction(vec3<f32>());
+ WrapInFunction(vec3<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("vec3(0.0f, 0.0f, 0.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3(0.0f)"));
}
-TEST_F(GlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Float) {
- WrapInFunction(vec3<f32>(2.0f));
+TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Float) {
+ WrapInFunction(vec3<f32>(2_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("vec3(2.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3(2.0f)"));
}
-TEST_F(GlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Bool) {
- WrapInFunction(vec3<bool>(true));
+TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Bool) {
+ WrapInFunction(vec3<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bvec3(true)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("bvec3(true)"));
}
-TEST_F(GlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Int) {
- WrapInFunction(vec3<i32>(2));
+TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Int) {
+ WrapInFunction(vec3<i32>(2_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("ivec3(2)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("ivec3(2)"));
}
-TEST_F(GlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_UInt) {
- WrapInFunction(vec3<u32>(2u));
+TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_UInt) {
+ WrapInFunction(vec3<u32>(2_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("uvec3(2u)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("uvec3(2u)"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Mat) {
- WrapInFunction(
- mat2x3<f32>(vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(3.f, 4.f, 5.f)));
+ WrapInFunction(mat2x3<f32>(vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(3_f, 4_f, 5_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(
- gen.result(),
- HasSubstr("mat2x3(vec3(1.0f, 2.0f, 3.0f), vec3(3.0f, 4.0f, 5.0f))"));
+ EXPECT_THAT(gen.result(), HasSubstr("mat2x3(vec3(1.0f, 2.0f, 3.0f), vec3(3.0f, 4.0f, 5.0f))"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Mat_Empty) {
- WrapInFunction(mat2x3<f32>());
+ WrapInFunction(mat2x3<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("mat2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f)"));
+ EXPECT_THAT(gen.result(), HasSubstr("mat2x3(vec3(0.0f), vec3(0.0f)"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Array) {
- WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3),
- vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(4.f, 5.f, 6.f),
- vec3<f32>(7.f, 8.f, 9.f)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u), vec3<f32>(1_f, 2_f, 3_f),
+ vec3<f32>(4_f, 5_f, 6_f), vec3<f32>(7_f, 8_f, 9_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("vec3[3](vec3(1.0f, 2.0f, 3.0f), "
- "vec3(4.0f, 5.0f, 6.0f), "
- "vec3(7.0f, 8.0f, 9.0f))"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3[3](vec3(1.0f, 2.0f, 3.0f), "
+ "vec3(4.0f, 5.0f, 6.0f), "
+ "vec3(7.0f, 8.0f, 9.0f))"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Array_Empty) {
- WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(
- gen.result(),
- HasSubstr("vec3[3](vec3(0.0f, 0.0f, 0.0f), vec3(0.0f, 0.0f, 0.0f),"
- " vec3(0.0f, 0.0f, 0.0f))"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3[3](vec3(0.0f, 0.0f, 0.0f), vec3(0.0f, 0.0f, 0.0f),"
+ " vec3(0.0f, 0.0f, 0.0f))"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Struct) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str), 1, 2.0f, vec3<i32>(3, 4, 5)));
+ WrapInFunction(Construct(ty.Of(str), 1_i, 2_f, vec3<i32>(3_i, 4_i, 5_i)));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("S(1, 2.0f, ivec3(3, 4, 5))"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("S(1, 2.0f, ivec3(3, 4, 5))"));
}
TEST_F(GlslGeneratorImplTest_Constructor, EmitConstructor_Type_Struct_Empty) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str)));
+ WrapInFunction(Construct(ty.Of(str)));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("S(0"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("S(0"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_continue_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_continue_test.cc
index 961aec5c71e..331ce795dec 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_continue_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_continue_test.cc
@@ -20,16 +20,15 @@ namespace {
using GlslGeneratorImplTest_Continue = TestHelper;
TEST_F(GlslGeneratorImplTest_Continue, Emit_Continue) {
- auto* loop = Loop(Block(If(false, Block(Break())), //
- Continue()));
- WrapInFunction(loop);
+ auto* loop = Loop(Block(If(false, Block(Break())), Continue()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
if (false) {
break;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_discard_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_discard_test.cc
index 7f3f1839051..87c85cbeefa 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_discard_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_discard_test.cc
@@ -20,15 +20,15 @@ namespace {
using GlslGeneratorImplTest_Discard = TestHelper;
TEST_F(GlslGeneratorImplTest_Discard, Emit_Discard) {
- auto* stmt = create<ast::DiscardStatement>();
- WrapInFunction(stmt);
+ auto* stmt = create<ast::DiscardStatement>();
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " discard;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " discard;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_function_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_function_test.cc
index c9aa3b9ef00..c5bccf263b6 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_function_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_function_test.cc
@@ -20,23 +20,25 @@
using ::testing::HasSubstr;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Function = TestHelper;
TEST_F(GlslGeneratorImplTest_Function, Emit_Function) {
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #version 310 es
void my_func() {
return;
@@ -46,34 +48,33 @@ TEST_F(GlslGeneratorImplTest_Function, Emit_Function) {
}
TEST_F(GlslGeneratorImplTest_Function, Emit_Function_Name_Collision) {
- Func("centroid", ast::VariableList{}, ty.void_(),
- {
- Return(),
- });
+ Func("centroid", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"( void tint_symbol() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"( void tint_symbol() {
return;
})"));
}
TEST_F(GlslGeneratorImplTest_Function, Emit_Function_WithParams) {
- Func("my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())},
- ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #version 310 es
void my_func(float a, int b) {
return;
@@ -82,17 +83,16 @@ TEST_F(GlslGeneratorImplTest_Function, Emit_Function_WithParams) {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_NoReturn_Void) {
- Func("func", ast::VariableList{}, ty.void_(), {/* no explicit return */},
- {
- Stage(ast::PipelineStage::kFragment),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_NoReturn_Void) {
+ Func("func", ast::VariableList{}, ty.void_(), {/* no explicit return */},
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
void func() {
@@ -102,34 +102,33 @@ void func() {
}
TEST_F(GlslGeneratorImplTest_Function, PtrParameter) {
- // fn f(foo : ptr<function, f32>) -> f32 {
- // return *foo;
- // }
- Func("f", {Param("foo", ty.pointer<f32>(ast::StorageClass::kFunction))},
- ty.f32(), {Return(Deref("foo"))});
+ // fn f(foo : ptr<function, f32>) -> f32 {
+ // return *foo;
+ // }
+ Func("f", {Param("foo", ty.pointer<f32>(ast::StorageClass::kFunction))}, ty.f32(),
+ {Return(Deref("foo"))});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(float f(inout float foo) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(float f(inout float foo) {
return foo;
}
)"));
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithInOutVars) {
- // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
- // return foo;
- // }
- auto* foo_in = Param("foo", ty.f32(), {Location(0)});
- Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithInOutVars) {
+ // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
+ // return foo;
+ // }
+ auto* foo_in = Param("foo", ty.f32(), {Location(0)});
+ Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
layout(location = 0) in float foo_1;
@@ -146,22 +145,18 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithInOut_Builtins) {
- // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
- // return coord.x;
- // }
- auto* coord_in =
- Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
- Func("frag_main", ast::VariableList{coord_in}, ty.f32(),
- {Return(MemberAccessor("coord", "x"))},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kFragDepth)});
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithInOut_Builtins) {
+ // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
+ // return coord.x;
+ // }
+ auto* coord_in = Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
+ Func("frag_main", ast::VariableList{coord_in}, ty.f32(), {Return(MemberAccessor("coord", "x"))},
+ {Stage(ast::PipelineStage::kFragment)}, {Builtin(ast::Builtin::kFragDepth)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
float frag_main(vec4 coord) {
@@ -176,46 +171,44 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
- // struct Interface {
- // @builtin(position) pos : vec4<f32>;
- // @location(1) col1 : f32;
- // @location(2) col2 : f32;
- // };
- // fn vert_main() -> Interface {
- // return Interface(vec4<f32>(), 0.4, 0.6);
- // }
- // fn frag_main(inputs : Interface) {
- // const r = inputs.col1;
- // const g = inputs.col2;
- // const p = inputs.pos;
- // }
- auto* interface_struct = Structure(
- "Interface",
- {
- Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
- Member("col1", ty.f32(), {Location(1)}),
- Member("col2", ty.f32(), {Location(2)}),
- });
-
- Func("vert_main", {}, ty.Of(interface_struct),
- {Return(Construct(ty.Of(interface_struct), Construct(ty.vec4<f32>()),
- Expr(0.5f), Expr(0.25f)))},
- {Stage(ast::PipelineStage::kVertex)});
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
+ // struct Interface {
+ // @builtin(position) pos : vec4<f32>;
+ // @location(1) col1 : f32;
+ // @location(2) col2 : f32;
+ // };
+ // fn vert_main() -> Interface {
+ // return Interface(vec4<f32>(), 0.4, 0.6);
+ // }
+ // fn frag_main(inputs : Interface) {
+ // const r = inputs.col1;
+ // const g = inputs.col2;
+ // const p = inputs.pos;
+ // }
+ auto* interface_struct = Structure(
+ "Interface", {
+ Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
+ Member("col1", ty.f32(), {Location(1)}),
+ Member("col2", ty.f32(), {Location(2)}),
+ });
- Func("frag_main", {Param("inputs", ty.Of(interface_struct))}, ty.void_(),
- {
- Decl(Const("r", ty.f32(), MemberAccessor("inputs", "col1"))),
- Decl(Const("g", ty.f32(), MemberAccessor("inputs", "col2"))),
- Decl(Const("p", ty.vec4<f32>(), MemberAccessor("inputs", "pos"))),
- },
- {Stage(ast::PipelineStage::kFragment)});
+ Func("vert_main", {}, ty.Of(interface_struct),
+ {Return(Construct(ty.Of(interface_struct), Construct(ty.vec4<f32>()), Expr(0.5_f),
+ Expr(0.25_f)))},
+ {Stage(ast::PipelineStage::kVertex)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", {Param("inputs", ty.Of(interface_struct))}, ty.void_(),
+ {
+ Decl(Let("r", ty.f32(), MemberAccessor("inputs", "col1"))),
+ Decl(Let("g", ty.f32(), MemberAccessor("inputs", "col2"))),
+ Decl(Let("p", ty.vec4<f32>(), MemberAccessor("inputs", "pos"))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
layout(location = 1) out float col1_1;
@@ -229,11 +222,12 @@ struct Interface {
};
Interface vert_main() {
- Interface tint_symbol = Interface(vec4(0.0f, 0.0f, 0.0f, 0.0f), 0.5f, 0.25f);
+ Interface tint_symbol = Interface(vec4(0.0f), 0.5f, 0.25f);
return tint_symbol;
}
void main() {
+ gl_PointSize = 1.0;
Interface inner_result = vert_main();
gl_Position = inner_result.pos;
col1_1 = inner_result.col1;
@@ -277,17 +271,17 @@ TEST_F(GlslGeneratorImplTest_Function,
Func("foo", {Param("x", ty.f32())}, ty.Of(vertex_output_struct),
{Return(Construct(ty.Of(vertex_output_struct),
- Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1.f))))},
+ Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1_f))))},
{});
Func("vert_main1", {}, ty.Of(vertex_output_struct),
{Return(Construct(ty.Of(vertex_output_struct),
- Expr(Call("foo", Expr(0.5f)))))},
+ Expr(Call("foo", Expr(0.5_f)))))},
{Stage(ast::PipelineStage::kVertex)});
Func("vert_main2", {}, ty.Of(vertex_output_struct),
{Return(Construct(ty.Of(vertex_output_struct),
- Expr(Call("foo", Expr(0.25f)))))},
+ Expr(Call("foo", Expr(0.25_f)))))},
{Stage(ast::PipelineStage::kVertex)});
GeneratorImpl& gen = SanitizeAndBuild();
@@ -326,38 +320,37 @@ tint_symbol_2 vert_main2() {
#endif
TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_Uniform) {
- auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
- auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+ auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
+ auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
+
+ Func("sub_func",
+ {
+ Param("param", ty.f32()),
+ },
+ ty.f32(),
+ {
+ Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
+ });
- Func("sub_func",
- {
- Param("param", ty.f32()),
- },
- ty.f32(),
- {
- Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
- });
-
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
-
- Func("frag_main", {}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = Build();
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ Func("frag_main", {}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct UBO {
@@ -379,32 +372,31 @@ void frag_main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_UniformStruct) {
- auto* s = Structure("Uniforms", {Member("coord", ty.vec4<f32>())});
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_UniformStruct) {
+ auto* s = Structure("Uniforms", {Member("coord", ty.vec4<f32>())});
- Global("uniforms", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+ Global("uniforms", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor(MemberAccessor("uniforms", "coord"), "x"));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
+ MemberAccessor(MemberAccessor("uniforms", "coord"), "x"));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct Uniforms {
@@ -422,36 +414,33 @@ void frag_main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_RW_StorageBuffer_Read) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_RW_StorageBuffer_Read) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct Data {
@@ -475,36 +464,34 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_RO_StorageBuffer_Read) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_RO_StorageBuffer_Read) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(#version 310 es
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -528,32 +515,31 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_WO_StorageBuffer_Store) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_WO_StorageBuffer_Store) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Assign(MemberAccessor("coord", "b"), Expr(2.0f)),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Assign(MemberAccessor("coord", "b"), Expr(2_f)),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct Data {
@@ -577,33 +563,31 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_StorageBuffer_Store) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_StorageBuffer_Store) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Assign(MemberAccessor("coord", "b"), Expr(2.0f)),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Assign(MemberAccessor("coord", "b"), Expr(2_f)),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct Data {
@@ -627,36 +611,34 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_Called_By_EntryPoint_With_Uniform) {
- auto* s = Structure("S", {Member("x", ty.f32())});
- Global("coord", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_Called_By_EntryPoint_With_Uniform) {
+ auto* s = Structure("S", {Member("x", ty.f32())});
+ Global("coord", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
- {
- Return(MemberAccessor("coord", "x")),
- });
+ Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
+ {
+ Return(MemberAccessor("coord", "x")),
+ });
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
struct S {
@@ -678,38 +660,35 @@ void frag_main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_Called_By_EntryPoint_With_StorageBuffer) {
- auto* s = Structure("S", {Member("x", ty.f32())});
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_Called_By_EntryPoint_With_StorageBuffer) {
+ auto* s = Structure("S", {Member("x", ty.f32())});
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
+
+ Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
+ {
+ Return(MemberAccessor("coord", "x")),
});
- Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
- {
- Return(MemberAccessor("coord", "x")),
- });
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ GeneratorImpl& gen = SanitizeAndBuild();
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(#version 310 es
precision mediump float;
struct S {
@@ -735,17 +714,16 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithNameCollision) {
- Func("centroid", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kFragment),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithNameCollision) {
+ Func("centroid", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
precision mediump float;
void tint_symbol() {
@@ -759,16 +737,16 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute) {
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
@@ -777,18 +755,17 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Literal) {
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(2, 4, 6),
- });
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Literal) {
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(2_i, 4_i, 6_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
layout(local_size_x = 2, local_size_y = 4, local_size_z = 6) in;
void main() {
@@ -797,25 +774,24 @@ void main() {
)");
}
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Const) {
- GlobalConst("width", ty.i32(), Construct(ty.i32(), 2));
- GlobalConst("height", ty.i32(), Construct(ty.i32(), 3));
- GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4));
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth"),
- });
-
- GeneratorImpl& gen = Build();
+TEST_F(GlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Const) {
+ GlobalConst("width", ty.i32(), Construct(ty.i32(), 2_i));
+ GlobalConst("height", ty.i32(), Construct(ty.i32(), 3_i));
+ GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4_i));
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize("width", "height", "depth"),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
-const int width = int(2);
-const int height = int(3);
-const int depth = int(4);
+const int width = 2;
+const int height = 3;
+const int depth = 4;
layout(local_size_x = 2, local_size_y = 3, local_size_z = 4) in;
void main() {
return;
@@ -825,30 +801,30 @@ void main() {
TEST_F(GlslGeneratorImplTest_Function,
Emit_Attribute_EntryPoint_Compute_WithWorkgroup_OverridableConst) {
- Override("width", ty.i32(), Construct(ty.i32(), 2), {Id(7u)});
- Override("height", ty.i32(), Construct(ty.i32(), 3), {Id(8u)});
- Override("depth", ty.i32(), Construct(ty.i32(), 4), {Id(9u)});
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth"),
- });
+ Override("width", ty.i32(), Construct(ty.i32(), 2_i), {Id(7u)});
+ Override("height", ty.i32(), Construct(ty.i32(), 3_i), {Id(8u)});
+ Override("depth", ty.i32(), Construct(ty.i32(), 4_i), {Id(9u)});
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize("width", "height", "depth"),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
#ifndef WGSL_SPEC_CONSTANT_7
-#define WGSL_SPEC_CONSTANT_7 int(2)
+#define WGSL_SPEC_CONSTANT_7 2
#endif
const int width = WGSL_SPEC_CONSTANT_7;
#ifndef WGSL_SPEC_CONSTANT_8
-#define WGSL_SPEC_CONSTANT_8 int(3)
+#define WGSL_SPEC_CONSTANT_8 3
#endif
const int height = WGSL_SPEC_CONSTANT_8;
#ifndef WGSL_SPEC_CONSTANT_9
-#define WGSL_SPEC_CONSTANT_9 int(4)
+#define WGSL_SPEC_CONSTANT_9 4
#endif
const int depth = WGSL_SPEC_CONSTANT_9;
layout(local_size_x = WGSL_SPEC_CONSTANT_7, local_size_y = WGSL_SPEC_CONSTANT_8, local_size_z = WGSL_SPEC_CONSTANT_9) in;
@@ -859,15 +835,15 @@ void main() {
}
TEST_F(GlslGeneratorImplTest_Function, Emit_Function_WithArrayParams) {
- Func("my_func", ast::VariableList{Param("a", ty.array<f32, 5>())}, ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{Param("a", ty.array<f32, 5>())}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
void my_func(float a[5]) {
return;
@@ -877,15 +853,15 @@ void my_func(float a[5]) {
}
TEST_F(GlslGeneratorImplTest_Function, Emit_Function_WithArrayReturn) {
- Func("my_func", {}, ty.array<f32, 5>(),
- {
- Return(Construct(ty.array<f32, 5>())),
- });
+ Func("my_func", {}, ty.array<f32, 5>(),
+ {
+ Return(Construct(ty.array<f32, 5>())),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
float[5] my_func() {
return float[5](0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
@@ -895,61 +871,58 @@ float[5] my_func() {
}
// https://crbug.com/tint/297
-TEST_F(GlslGeneratorImplTest_Function,
- Emit_Multiple_EntryPoint_With_Same_ModuleVar) {
- // struct Data {
- // d : f32;
- // };
- // @binding(0) @group(0) var<storage> data : Data;
- //
- // @stage(compute) @workgroup_size(1)
- // fn a() {
- // var v = data.d;
- // return;
- // }
- //
- // @stage(compute) @workgroup_size(1)
- // fn b() {
- // var v = data.d;
- // return;
- // }
-
- auto* s = Structure("Data", {Member("d", ty.f32())});
-
- Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("a", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- }
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("b", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- }
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+TEST_F(GlslGeneratorImplTest_Function, Emit_Multiple_EntryPoint_With_Same_ModuleVar) {
+ // struct Data {
+ // d : f32;
+ // };
+ // @binding(0) @group(0) var<storage> data : Data;
+ //
+ // @compute @workgroup_size(1)
+ // fn a() {
+ // var v = data.d;
+ // return;
+ // }
+ //
+ // @compute @workgroup_size(1)
+ // fn b() {
+ // var v = data.d;
+ // return;
+ // }
+
+ auto* s = Structure("Data", {Member("d", ty.f32())});
+
+ Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("a", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("b", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct Data {
float d;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_identifier_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_identifier_test.cc
index 18369cbc725..396c261f20b 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_identifier_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_identifier_test.cc
@@ -20,16 +20,16 @@ namespace {
using GlslGeneratorImplTest_Identifier = TestHelper;
TEST_F(GlslGeneratorImplTest_Identifier, EmitIdentifierExpression) {
- Global("foo", ty.i32(), ast::StorageClass::kPrivate);
+ Global("foo", ty.i32(), ast::StorageClass::kPrivate);
- auto* i = Expr("foo");
- WrapInFunction(i);
+ auto* i = Expr("foo");
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
- EXPECT_EQ(out.str(), "foo");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
+ EXPECT_EQ(out.str(), "foo");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_if_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_if_test.cc
index 69759433b82..4b0b7bbe7a9 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_if_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_if_test.cc
@@ -20,43 +20,41 @@ namespace {
using GlslGeneratorImplTest_If = TestHelper;
TEST_F(GlslGeneratorImplTest_If, Emit_If) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body);
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body);
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
}
)");
}
TEST_F(GlslGeneratorImplTest_If, Emit_IfWithElseIf) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_cond = Expr("else_cond");
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(else_cond, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body)));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
@@ -67,23 +65,21 @@ TEST_F(GlslGeneratorImplTest_If, Emit_IfWithElseIf) {
}
TEST_F(GlslGeneratorImplTest_If, Emit_IfWithElse) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(nullptr, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(else_body));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
return;
@@ -92,30 +88,26 @@ TEST_F(GlslGeneratorImplTest_If, Emit_IfWithElse) {
}
TEST_F(GlslGeneratorImplTest_If, Emit_IfWithMultiple) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
+ auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* else_body_2 = Block(Return());
+ auto* else_body_2 = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body,
- ast::ElseStatementList{
- create<ast::ElseStatement>(else_cond, else_body),
- create<ast::ElseStatement>(nullptr, else_body_2),
- });
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body, Else(else_body_2))));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_import_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_import_test.cc
index b2cdf55176f..2843f98b2ef 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_import_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_import_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Import = TestHelper;
struct GlslImportData {
- const char* name;
- const char* glsl_name;
+ const char* name;
+ const char* glsl_name;
};
inline std::ostream& operator<<(std::ostream& out, GlslImportData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using GlslImportData_SingleParamTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_SingleParamTest, FloatScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* ident = Expr(param.name);
- auto* expr = Call(ident, 1.f);
- WrapInFunction(expr);
+ auto* ident = Expr(param.name);
+ auto* expr = Call(ident, 1_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_SingleParamTest,
@@ -55,8 +57,7 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData{"exp2", "exp2"},
GlslImportData{"floor", "floor"},
GlslImportData{"fract", "fract"},
- GlslImportData{"inverseSqrt",
- "inversesqrt"},
+ GlslImportData{"inverseSqrt", "inversesqrt"},
GlslImportData{"length", "length"},
GlslImportData{"log", "log"},
GlslImportData{"log2", "log2"},
@@ -71,16 +72,16 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
using GlslImportData_SingleIntParamTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_SingleIntParamTest, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, Expr(1));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, Expr(1_i));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_SingleIntParamTest,
@@ -88,59 +89,57 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
using GlslImportData_SingleVectorParamTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_SingleVectorParamTest, FloatVector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* ident = Expr(param.name);
- auto* expr = Call(ident, vec3<f32>(1.f, 2.f, 3.f));
- WrapInFunction(expr);
+ auto* ident = Expr(param.name);
+ auto* expr = Call(ident, vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- std::string(param.glsl_name) + "(vec3(1.0f, 2.0f, 3.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(vec3(1.0f, 2.0f, 3.0f))");
}
-INSTANTIATE_TEST_SUITE_P(
- GlslGeneratorImplTest_Import,
- GlslImportData_SingleVectorParamTest,
- testing::Values(GlslImportData{"abs", "abs"},
- GlslImportData{"acos", "acos"},
- GlslImportData{"asin", "asin"},
- GlslImportData{"atan", "atan"},
- GlslImportData{"cos", "cos"},
- GlslImportData{"cosh", "cosh"},
- GlslImportData{"ceil", "ceil"},
- GlslImportData{"exp", "exp"},
- GlslImportData{"exp2", "exp2"},
- GlslImportData{"floor", "floor"},
- GlslImportData{"fract", "fract"},
- GlslImportData{"inverseSqrt", "inversesqrt"},
- GlslImportData{"length", "length"},
- GlslImportData{"log", "log"},
- GlslImportData{"log2", "log2"},
- GlslImportData{"normalize", "normalize"},
- GlslImportData{"round", "round"},
- GlslImportData{"sign", "sign"},
- GlslImportData{"sin", "sin"},
- GlslImportData{"sinh", "sinh"},
- GlslImportData{"sqrt", "sqrt"},
- GlslImportData{"tan", "tan"},
- GlslImportData{"tanh", "tanh"},
- GlslImportData{"trunc", "trunc"}));
+INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
+ GlslImportData_SingleVectorParamTest,
+ testing::Values(GlslImportData{"abs", "abs"},
+ GlslImportData{"acos", "acos"},
+ GlslImportData{"asin", "asin"},
+ GlslImportData{"atan", "atan"},
+ GlslImportData{"cos", "cos"},
+ GlslImportData{"cosh", "cosh"},
+ GlslImportData{"ceil", "ceil"},
+ GlslImportData{"exp", "exp"},
+ GlslImportData{"exp2", "exp2"},
+ GlslImportData{"floor", "floor"},
+ GlslImportData{"fract", "fract"},
+ GlslImportData{"inverseSqrt", "inversesqrt"},
+ GlslImportData{"length", "length"},
+ GlslImportData{"log", "log"},
+ GlslImportData{"log2", "log2"},
+ GlslImportData{"normalize", "normalize"},
+ GlslImportData{"round", "round"},
+ GlslImportData{"sign", "sign"},
+ GlslImportData{"sin", "sin"},
+ GlslImportData{"sinh", "sinh"},
+ GlslImportData{"sqrt", "sqrt"},
+ GlslImportData{"tan", "tan"},
+ GlslImportData{"tanh", "tanh"},
+ GlslImportData{"trunc", "trunc"}));
using GlslImportData_DualParam_ScalarTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_DualParam_ScalarTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1.f, 2.f);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_f, 2_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f, 2.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f, 2.0f)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_DualParam_ScalarTest,
@@ -153,18 +152,17 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
using GlslImportData_DualParam_VectorTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_DualParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr =
- Call(param.name, vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(4.f, 5.f, 6.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) +
- "(vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(),
+ std::string(param.glsl_name) + "(vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f))");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_DualParam_VectorTest,
@@ -179,16 +177,16 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
using GlslImportData_DualParam_Int_Test = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_DualParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1, 2)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1, 2)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_DualParam_Int_Test,
@@ -197,80 +195,77 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
using GlslImportData_TripleParam_ScalarTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_TripleParam_ScalarTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1.f, 2.f, 3.f);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_f, 2_f, 3_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f, 2.0f, 3.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1.0f, 2.0f, 3.0f)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_TripleParam_ScalarTest,
testing::Values(GlslImportData{"mix", "mix"},
GlslImportData{"clamp", "clamp"},
- GlslImportData{"smoothStep",
- "smoothstep"}));
+ GlslImportData{"smoothstep", "smoothstep"}));
using GlslImportData_TripleParam_VectorTest = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_TripleParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, vec3<f32>(1.f, 2.f, 3.f),
- vec3<f32>(4.f, 5.f, 6.f), vec3<f32>(7.f, 8.f, 9.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f),
+ vec3<f32>(7_f, 8_f, 9_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(
- out.str(),
- std::string(param.glsl_name) +
- R"((vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f)))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(),
+ std::string(param.glsl_name) +
+ R"((vec3(1.0f, 2.0f, 3.0f), vec3(4.0f, 5.0f, 6.0f), vec3(7.0f, 8.0f, 9.0f)))");
}
-INSTANTIATE_TEST_SUITE_P(
- GlslGeneratorImplTest_Import,
- GlslImportData_TripleParam_VectorTest,
- testing::Values(GlslImportData{"faceForward", "faceforward"},
- GlslImportData{"clamp", "clamp"},
- GlslImportData{"smoothStep", "smoothstep"}));
+INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
+ GlslImportData_TripleParam_VectorTest,
+ testing::Values(GlslImportData{"faceForward", "faceforward"},
+ GlslImportData{"clamp", "clamp"},
+ GlslImportData{"smoothstep", "smoothstep"}));
TEST_F(GlslGeneratorImplTest_Import, DISABLED_GlslImportData_FMix) {
- FAIL();
+ FAIL();
}
using GlslImportData_TripleParam_Int_Test = TestParamHelper<GlslImportData>;
TEST_P(GlslImportData_TripleParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2, 3);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i, 3_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1, 2, 3)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.glsl_name) + "(1, 2, 3)");
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Import,
GlslImportData_TripleParam_Int_Test,
testing::Values(GlslImportData{"clamp", "clamp"}));
TEST_F(GlslGeneratorImplTest_Import, GlslImportData_Determinant) {
- Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("determinant", "var");
- WrapInFunction(expr);
+ auto* expr = Call("determinant", "var");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string("determinant(var)"));
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string("determinant(var)"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_loop_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_loop_test.cc
index a8fa0392325..5187dafdb98 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_loop_test.cc
@@ -15,44 +15,46 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Loop = TestHelper;
TEST_F(GlslGeneratorImplTest_Loop, Emit_Loop) {
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block();
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block();
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
discard;
}
)");
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
discard;
{
a_statement();
@@ -62,31 +64,31 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopWithContinuing) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopNestedWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* inner = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* inner = Loop(body, continuing);
- body = Block(inner);
+ body = Block(inner);
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- continuing = Block(Assign(lhs, rhs));
+ continuing = Block(Assign(lhs, rhs));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
while (true) {
discard;
{
@@ -101,30 +103,30 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopNestedWithContinuing) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopWithVarUsedInContinuing) {
- // loop {
- // var lhs : f32 = 2.4;
- // var other : f32;
- // break;
- // continuing {
- // lhs = rhs
- // }
- // }
-
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4f))), //
- Decl(Var("other", ty.f32())), //
- Break());
- auto* continuing = Block(Assign("lhs", "rhs"));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
-
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ // loop {
+ // var lhs : f32 = 2.4;
+ // var other : f32;
+ // break;
+ // continuing {
+ // lhs = rhs
+ // }
+ // }
+
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4_f))), //
+ Decl(Var("other", ty.f32())), //
+ Break());
+ auto* continuing = Block(Assign("lhs", "rhs"));
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
float lhs = 2.400000095f;
float other = 0.0f;
break;
@@ -136,20 +138,20 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_LoopWithVarUsedInContinuing) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoop) {
- // for(; ; ) {
- // return;
- // }
+ // for(; ; ) {
+ // return;
+ // }
- auto* f = For(nullptr, nullptr, nullptr, //
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, nullptr, nullptr, //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
for(; ; ) {
return;
}
@@ -158,20 +160,20 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoop) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInit) {
- // for(var i : i32; ; ) {
- // return;
- // }
+ // for(var i : i32; ; ) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, //
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
for(int i = 0; ; ) {
return;
}
@@ -180,22 +182,21 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInit) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInit) {
- // for(var b = true && false; ; ) {
- // return;
- // }
+ // for(var b = true && false; ; ) {
+ // return;
+ // }
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* f = For(Decl(Var("b", nullptr, multi_stmt)), nullptr, nullptr,
- Block(Return()));
- WrapInFunction(f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* f = For(Decl(Var("b", nullptr, multi_stmt)), nullptr, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
bool tint_tmp = true;
if (tint_tmp) {
tint_tmp = false;
@@ -209,21 +210,21 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInit) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCond) {
- // for(; true; ) {
- // return;
- // }
+ // for(; true; ) {
+ // return;
+ // }
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* f = For(nullptr, true, nullptr, Block(CallStmt(Call("a_statement"))));
- WrapInFunction(f);
+ auto* f = For(nullptr, true, nullptr, Block(CallStmt(Call("a_statement"))));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
for(; true; ) {
a_statement();
}
@@ -232,24 +233,23 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCond) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCond) {
- // for(; true && false; ) {
- // return;
- // }
+ // for(; true && false; ) {
+ // return;
+ // }
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* f =
- For(nullptr, multi_stmt, nullptr, Block(CallStmt(Call("a_statement"))));
- WrapInFunction(f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* f = For(nullptr, multi_stmt, nullptr, Block(CallStmt(Call("a_statement"))));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
while (true) {
bool tint_tmp = true;
if (tint_tmp) {
@@ -263,21 +263,21 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCond) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCont) {
- // for(; ; i = i + 1) {
- // return;
- // }
+ // for(; ; i = i + 1i) {
+ // return;
+ // }
- auto* v = Decl(Var("i", ty.i32()));
- auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1)), //
- Block(Return()));
- WrapInFunction(v, f);
+ auto* v = Decl(Var("i", ty.i32()));
+ auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1_i)), //
+ Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
for(; ; i = (i + 1)) {
return;
}
@@ -286,23 +286,23 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCont) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCont) {
- // for(; ; i = true && false) {
- // return;
- // }
+ // for(; ; i = true && false) {
+ // return;
+ // }
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* v = Decl(Var("i", ty.bool_()));
- auto* f = For(nullptr, nullptr, Assign("i", multi_stmt), //
- Block(Return()));
- WrapInFunction(v, f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* v = Decl(Var("i", ty.bool_()));
+ auto* f = For(nullptr, nullptr, Assign("i", multi_stmt), //
+ Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
while (true) {
return;
bool tint_tmp = true;
@@ -316,20 +316,19 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCont) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInitCondCont) {
- // for(var i : i32; true; i = i + 1) {
- // return;
- // }
+ // for(var i : i32; true; i = ii + 1) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1)),
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1_i)), Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
for(int i = 0; true; i = (i + 1)) {
return;
}
@@ -338,28 +337,28 @@ TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInitCondCont) {
}
TEST_F(GlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInitCondCont) {
- // for(var i = true && false; true && false; i = true && false) {
- // return;
- // }
+ // for(var i = true && false; true && false; i = true && false) {
+ // return;
+ // }
- auto* multi_stmt_a = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* multi_stmt_b = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* multi_stmt_c = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
+ auto* multi_stmt_a =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* multi_stmt_b =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* multi_stmt_c =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
- auto* f = For(Decl(Var("i", nullptr, multi_stmt_a)), multi_stmt_b,
- Assign("i", multi_stmt_c), //
- Block(Return()));
- WrapInFunction(f);
+ auto* f =
+ For(Decl(Var("i", nullptr, multi_stmt_a)), multi_stmt_b, Assign("i", multi_stmt_c), //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
bool tint_tmp = true;
if (tint_tmp) {
tint_tmp = false;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_member_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_member_accessor_test.cc
index f46779864fc..4f7fdb1ee5d 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_member_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_member_accessor_test.cc
@@ -16,119 +16,114 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using ::testing::HasSubstr;
-using create_type_func_ptr =
- const ast::Type* (*)(const ProgramBuilder::TypesBuilder& ty);
+using create_type_func_ptr = const ast::Type* (*)(const ProgramBuilder::TypesBuilder& ty);
inline const ast::Type* ty_i32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.i32();
+ return ty.i32();
}
inline const ast::Type* ty_u32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.u32();
+ return ty.u32();
}
inline const ast::Type* ty_f32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.f32();
+ return ty.f32();
}
template <typename T>
inline const ast::Type* ty_vec2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec2<T>();
+ return ty.vec2<T>();
}
template <typename T>
inline const ast::Type* ty_vec3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec3<T>();
+ return ty.vec3<T>();
}
template <typename T>
inline const ast::Type* ty_vec4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec4<T>();
+ return ty.vec4<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x2<T>();
+ return ty.mat2x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x3<T>();
+ return ty.mat2x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x4<T>();
+ return ty.mat2x4<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x2<T>();
+ return ty.mat3x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x3<T>();
+ return ty.mat3x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x4<T>();
+ return ty.mat3x4<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x2<T>();
+ return ty.mat4x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x3<T>();
+ return ty.mat4x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x4<T>();
+ return ty.mat4x4<T>();
}
-using i32 = ProgramBuilder::i32;
-using u32 = ProgramBuilder::u32;
-using f32 = ProgramBuilder::f32;
-
template <typename BASE>
class GlslGeneratorImplTest_MemberAccessorBase : public BASE {
- public:
- void SetupStorageBuffer(ast::StructMemberList members) {
- ProgramBuilder& b = *this;
-
- auto* s = b.Structure("Data", members);
-
- b.Global("data", b.ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- b.create<ast::BindingAttribute>(0),
- b.create<ast::GroupAttribute>(1),
- });
- }
-
- void SetupFunction(ast::StatementList statements) {
- ProgramBuilder& b = *this;
- b.Func("main", ast::VariableList{}, b.ty.void_(), statements,
- ast::AttributeList{
- b.Stage(ast::PipelineStage::kFragment),
- });
- }
+ public:
+ void SetupStorageBuffer(ast::StructMemberList members) {
+ ProgramBuilder& b = *this;
+
+ auto* s = b.Structure("Data", members);
+
+ b.Global("data", b.ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ b.create<ast::BindingAttribute>(0),
+ b.create<ast::GroupAttribute>(1),
+ });
+ }
+
+ void SetupFunction(ast::StatementList statements) {
+ ProgramBuilder& b = *this;
+ b.Func("main", ast::VariableList{}, b.ty.void_(), statements,
+ ast::AttributeList{
+ b.Stage(ast::PipelineStage::kFragment),
+ });
+ }
};
-using GlslGeneratorImplTest_MemberAccessor =
- GlslGeneratorImplTest_MemberAccessorBase<TestHelper>;
+using GlslGeneratorImplTest_MemberAccessor = GlslGeneratorImplTest_MemberAccessorBase<TestHelper>;
template <typename T>
using GlslGeneratorImplTest_MemberAccessorWithParam =
GlslGeneratorImplTest_MemberAccessorBase<TestParamHelper<T>>;
TEST_F(GlslGeneratorImplTest_MemberAccessor, EmitExpression_MemberAccessor) {
- auto* s = Structure("Data", {Member("mem", ty.f32())});
- Global("str", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("Data", {Member("mem", ty.f32())});
+ Global("str", ty.Of(s), ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("str", "mem");
- WrapInFunction(Var("expr", ty.f32(), ast::StorageClass::kNone, expr));
+ auto* expr = MemberAccessor("str", "mem");
+ WrapInFunction(Var("expr", ty.f32(), ast::StorageClass::kNone, expr));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct Data {
float mem;
@@ -148,42 +143,41 @@ void main() {
}
struct TypeCase {
- create_type_func_ptr member_type;
- std::string expected;
+ create_type_func_ptr member_type;
+ std::string expected;
};
inline std::ostream& operator<<(std::ostream& out, TypeCase c) {
- ProgramBuilder b;
- auto* ty = c.member_type(b.ty);
- out << ty->FriendlyName(b.Symbols());
- return out;
+ ProgramBuilder b;
+ auto* ty = c.member_type(b.ty);
+ out << ty->FriendlyName(b.Symbols());
+ return out;
}
using GlslGeneratorImplTest_MemberAccessor_StorageBufferLoad =
GlslGeneratorImplTest_MemberAccessorWithParam<TypeCase>;
TEST_P(GlslGeneratorImplTest_MemberAccessor_StorageBufferLoad, Test) {
- // struct Data {
- // a : i32;
- // b : <type>;
- // };
- // var<storage> data : Data;
- // data.b;
+ // struct Data {
+ // a : i32;
+ // b : <type>;
+ // };
+ // var<storage> data : Data;
+ // data.b;
- auto p = GetParam();
+ auto p = GetParam();
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", p.member_type(ty)),
- });
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", p.member_type(ty)),
+ });
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor("data", "b"))),
- });
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone, MemberAccessor("data", "b"))),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(p.expected));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(p.expected));
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_MemberAccessor,
@@ -213,80 +207,78 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_MemberAccessor,
using GlslGeneratorImplTest_MemberAccessor_StorageBufferStore =
GlslGeneratorImplTest_MemberAccessorWithParam<TypeCase>;
TEST_P(GlslGeneratorImplTest_MemberAccessor_StorageBufferStore, Test) {
- // struct Data {
- // a : i32;
- // b : <type>;
- // };
- // var<storage> data : Data;
- // data.b = <type>();
-
- auto p = GetParam();
-
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", p.member_type(ty)),
- });
-
- SetupFunction({
- Decl(Var("value", p.member_type(ty), ast::StorageClass::kNone,
- Construct(p.member_type(ty)))),
- Assign(MemberAccessor("data", "b"), Expr("value")),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(p.expected));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- GlslGeneratorImplTest_MemberAccessor,
- GlslGeneratorImplTest_MemberAccessor_StorageBufferStore,
- testing::Values(TypeCase{ty_u32, "data.b = value"},
- TypeCase{ty_f32, "data.b = value"},
- TypeCase{ty_i32, "data.b = value"},
- TypeCase{ty_vec2<u32>, "data.b = value"},
- TypeCase{ty_vec2<f32>, "data.b = value"},
- TypeCase{ty_vec2<i32>, "data.b = value"},
- TypeCase{ty_vec3<u32>, "data.b = value"},
- TypeCase{ty_vec3<f32>, "data.b = value"},
- TypeCase{ty_vec3<i32>, "data.b = value"},
- TypeCase{ty_vec4<u32>, "data.b = value"},
- TypeCase{ty_vec4<f32>, "data.b = value"},
- TypeCase{ty_vec4<i32>, "data.b = value"},
- TypeCase{ty_mat2x2<f32>, "data.b = value"},
- TypeCase{ty_mat2x3<f32>, "data.b = value"},
- TypeCase{ty_mat2x4<f32>, "data.b = value"},
- TypeCase{ty_mat3x2<f32>, "data.b = value"},
- TypeCase{ty_mat3x3<f32>, "data.b = value"},
- TypeCase{ty_mat3x4<f32>, "data.b = value"},
- TypeCase{ty_mat4x2<f32>, "data.b = value"},
- TypeCase{ty_mat4x3<f32>, "data.b = value"},
- TypeCase{ty_mat4x4<f32>, "data.b = value"}));
+ // struct Data {
+ // a : i32;
+ // b : <type>;
+ // };
+ // var<storage> data : Data;
+ // data.b = <type>();
+
+ auto p = GetParam();
+
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", p.member_type(ty)),
+ });
+
+ SetupFunction({
+ Decl(Var("value", p.member_type(ty), ast::StorageClass::kNone,
+ Construct(p.member_type(ty)))),
+ Assign(MemberAccessor("data", "b"), Expr("value")),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(p.expected));
+}
+
+INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_MemberAccessor,
+ GlslGeneratorImplTest_MemberAccessor_StorageBufferStore,
+ testing::Values(TypeCase{ty_u32, "data.b = value"},
+ TypeCase{ty_f32, "data.b = value"},
+ TypeCase{ty_i32, "data.b = value"},
+ TypeCase{ty_vec2<u32>, "data.b = value"},
+ TypeCase{ty_vec2<f32>, "data.b = value"},
+ TypeCase{ty_vec2<i32>, "data.b = value"},
+ TypeCase{ty_vec3<u32>, "data.b = value"},
+ TypeCase{ty_vec3<f32>, "data.b = value"},
+ TypeCase{ty_vec3<i32>, "data.b = value"},
+ TypeCase{ty_vec4<u32>, "data.b = value"},
+ TypeCase{ty_vec4<f32>, "data.b = value"},
+ TypeCase{ty_vec4<i32>, "data.b = value"},
+ TypeCase{ty_mat2x2<f32>, "data.b = value"},
+ TypeCase{ty_mat2x3<f32>, "data.b = value"},
+ TypeCase{ty_mat2x4<f32>, "data.b = value"},
+ TypeCase{ty_mat3x2<f32>, "data.b = value"},
+ TypeCase{ty_mat3x3<f32>, "data.b = value"},
+ TypeCase{ty_mat3x4<f32>, "data.b = value"},
+ TypeCase{ty_mat4x2<f32>, "data.b = value"},
+ TypeCase{ty_mat4x3<f32>, "data.b = value"},
+ TypeCase{ty_mat4x4<f32>, "data.b = value"}));
TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_Matrix_Empty) {
- // struct Data {
- // z : f32;
- // a : mat2x3<f32>;
- // };
- // var<storage> data : Data;
- // data.a = mat2x3<f32>();
-
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", ty.mat2x3<f32>()),
- });
-
- SetupFunction({
- Assign(MemberAccessor("data", "b"),
- Construct(ty.mat2x3<f32>(), ast::ExpressionList{})),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Data {
+ // z : f32;
+ // a : mat2x3<f32>;
+ // };
+ // var<storage> data : Data;
+ // data.a = mat2x3<f32>();
+
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", ty.mat2x3<f32>()),
+ });
+
+ SetupFunction({
+ Assign(MemberAccessor("data", "b"), Construct(ty.mat2x3<f32>(), ast::ExpressionList{})),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -299,7 +291,7 @@ layout(binding = 0, std430) buffer Data_1 {
mat2x3 b;
} data;
void tint_symbol() {
- data.b = mat2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
+ data.b = mat2x3(vec3(0.0f), vec3(0.0f));
}
void main() {
@@ -307,34 +299,32 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
-TEST_F(GlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_Matrix_Single_Element) {
- // struct Data {
- // z : f32;
- // a : mat4x3<f32>;
- // };
- // var<storage> data : Data;
- // data.a[2][1];
-
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.mat4x3<f32>()),
- });
-
- SetupFunction({
- Decl(
- Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(IndexAccessor(MemberAccessor("data", "a"), 2), 1))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_Matrix_Single_Element) {
+ // struct Data {
+ // z : f32;
+ // a : mat4x3<f32>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2i][1i];
+
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.mat4x3<f32>()),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(IndexAccessor(MemberAccessor("data", "a"), 2_i), 1_i))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -355,32 +345,32 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor,
EmitExpression_IndexAccessor_StorageBuffer_Load_Int_FromArray) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[2];
-
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor("data", "a"), 2))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Data {
+ // a : array<i32, 5>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2];
+
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor("data", "a"), 2_i))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -401,33 +391,32 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor,
EmitExpression_IndexAccessor_StorageBuffer_Load_Int_FromArray_ExprIdx) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[(2 + 4) - 3];
-
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor("data", "a"),
- Sub(Add(2, Expr(4)), Expr(3))))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Data {
+ // a : array<i32, 5u>;
+ // };
+ // var<storage> data : Data;
+ // data.a[(2i + 4i) - 3i];
+
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor("data", "a"), Sub(Add(2_i, 4_i), 3_i)))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -448,30 +437,30 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_ToArray) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[2] = 2;
-
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
-
- SetupFunction({
- Assign(IndexAccessor(MemberAccessor("data", "a"), 2), 2),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Data {
+ // a : array<i32, 5u>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2i] = 2i;
+
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
+
+ SetupFunction({
+ Assign(IndexAccessor(MemberAccessor("data", "a"), 2_i), 2_i),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Data {
@@ -492,41 +481,40 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var(
- "x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2), "b"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -550,44 +538,41 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(GlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_MultiLevel_Swizzle) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.xy
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "xy"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel_Swizzle) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b.xy
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"), "xy"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -611,44 +596,42 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor,
StorageBuffer_Load_MultiLevel_Swizzle_SingleLetter) { // NOLINT
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.g
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "g"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b.g
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"), "g"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -672,44 +655,41 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(GlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_MultiLevel_Index) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b[1]
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var(
- "x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor(
- IndexAccessor(MemberAccessor("data", "c"), 2), "b"),
- 1))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel_Index) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b[1i]
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ 1_i))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -733,40 +713,40 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_MultiLevel) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b = vec3<f32>(1.f, 2.f, 3.f);
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Assign(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2), "b"),
- vec3<f32>(1.f, 2.f, 3.f)),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b = vec3<f32>(1.f, 2.f, 3.f);
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Assign(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ vec3<f32>(1_f, 2_f, 3_f)),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -790,44 +770,41 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(GlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Store_Swizzle_SingleLetter) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.y = 1.f;
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<i32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Assign(MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "y"),
- Expr(1.f)),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(#version 310 es
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(GlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_Swizzle_SingleLetter) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2i].b.y = 1.f;
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<i32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Assign(MemberAccessor(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ "y"),
+ Expr(1_f)),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(#version 310 es
precision mediump float;
struct Inner {
@@ -851,29 +828,29 @@ void main() {
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(GlslGeneratorImplTest_MemberAccessor, Swizzle_xyz) {
- auto* var = Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone,
- vec4<f32>(1.f, 2.f, 3.f, 4.f));
- auto* expr = MemberAccessor("my_vec", "xyz");
- WrapInFunction(var, expr);
+ auto* var =
+ Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone, vec4<f32>(1_f, 2_f, 3_f, 4_f));
+ auto* expr = MemberAccessor("my_vec", "xyz");
+ WrapInFunction(var, expr);
- GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("my_vec.xyz"));
+ GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("my_vec.xyz"));
}
TEST_F(GlslGeneratorImplTest_MemberAccessor, Swizzle_gbr) {
- auto* var = Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone,
- vec4<f32>(1.f, 2.f, 3.f, 4.f));
- auto* expr = MemberAccessor("my_vec", "gbr");
- WrapInFunction(var, expr);
-
- GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("my_vec.gbr"));
+ auto* var =
+ Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone, vec4<f32>(1_f, 2_f, 3_f, 4_f));
+ auto* expr = MemberAccessor("my_vec", "gbr");
+ WrapInFunction(var, expr);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("my_vec.gbr"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_module_constant_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_module_constant_test.cc
index bf9c74c7143..223122afb83 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_module_constant_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_module_constant_test.cc
@@ -15,31 +15,33 @@
#include "src/tint/ast/id_attribute.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_ModuleConstant = TestHelper;
TEST_F(GlslGeneratorImplTest_ModuleConstant, Emit_ModuleConstant) {
- auto* var = Const("pos", ty.array<f32, 3>(), array<f32, 3>(1.f, 2.f, 3.f));
- WrapInFunction(Decl(var));
+ auto* var = Let("pos", ty.array<f32, 3>(), array<f32, 3>(1_f, 2_f, 3_f));
+ WrapInFunction(Decl(var));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), "const float pos[3] = float[3](1.0f, 2.0f, 3.0f);\n");
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), "const float pos[3] = float[3](1.0f, 2.0f, 3.0f);\n");
}
TEST_F(GlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant) {
- auto* var = Override("pos", ty.f32(), Expr(3.0f),
- ast::AttributeList{
- Id(23),
- });
+ auto* var = Override("pos", ty.f32(), Expr(3_f),
+ ast::AttributeList{
+ Id(23),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
#define WGSL_SPEC_CONSTANT_23 3.0f
#endif
const float pos = WGSL_SPEC_CONSTANT_23;
@@ -47,15 +49,15 @@ const float pos = WGSL_SPEC_CONSTANT_23;
}
TEST_F(GlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant_NoConstructor) {
- auto* var = Override("pos", ty.f32(), nullptr,
- ast::AttributeList{
- Id(23),
- });
+ auto* var = Override("pos", ty.f32(), nullptr,
+ ast::AttributeList{
+ Id(23),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
#error spec constant required for constant id 23
#endif
const float pos = WGSL_SPEC_CONSTANT_23;
@@ -63,17 +65,17 @@ const float pos = WGSL_SPEC_CONSTANT_23;
}
TEST_F(GlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant_NoId) {
- auto* a = Override("a", ty.f32(), Expr(3.0f),
- ast::AttributeList{
- Id(0),
- });
- auto* b = Override("b", ty.f32(), Expr(2.0f));
+ auto* a = Override("a", ty.f32(), Expr(3_f),
+ ast::AttributeList{
+ Id(0),
+ });
+ auto* b = Override("b", ty.f32(), Expr(2_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(a)) << gen.error();
- ASSERT_TRUE(gen.EmitProgramConstVariable(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_0
+ ASSERT_TRUE(gen.EmitProgramConstVariable(a)) << gen.error();
+ ASSERT_TRUE(gen.EmitProgramConstVariable(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_0
#define WGSL_SPEC_CONSTANT_0 3.0f
#endif
const float a = WGSL_SPEC_CONSTANT_0;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_return_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_return_test.cc
index 760188a9f83..59df38514ba 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_return_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_return_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Return = TestHelper;
TEST_F(GlslGeneratorImplTest_Return, Emit_Return) {
- auto* r = Return();
- WrapInFunction(r);
+ auto* r = Return();
+ WrapInFunction(r);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return;\n");
}
TEST_F(GlslGeneratorImplTest_Return, Emit_ReturnWithValue) {
- auto* r = Return(123);
- Func("f", {}, ty.i32(), {r});
+ auto* r = Return(123_i);
+ Func("f", {}, ty.i32(), {r});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return 123;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return 123;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_sanitizer_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_sanitizer_test.cc
index 812806f8f4b..6575c56c158 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_sanitizer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_sanitizer_test.cc
@@ -17,34 +17,36 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslSanitizerTest = TestHelper;
TEST_F(GlslSanitizerTest, Call_ArrayLength) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
layout(binding = 1, std430) buffer my_struct_1 {
@@ -59,35 +61,35 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, Call_ArrayLength_OtherMembersInStruct) {
- auto* s = Structure("my_struct", {
- Member(0, "z", ty.f32()),
- Member(4, "a", ty.array<f32>(4)),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {
+ Member(0, "z", ty.f32()),
+ Member(4, "a", ty.array<f32>(4)),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
layout(binding = 1, std430) buffer my_struct_1 {
@@ -104,37 +106,36 @@ void main() {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, Call_ArrayLength_ViaLets) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ auto* p = Let("p", nullptr, AddressOf("b"));
+ auto* p2 = Let("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(p),
+ Decl(p2),
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone, Call("arrayLength", p2))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* p = Const("p", nullptr, AddressOf("b"));
- auto* p2 = Const("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
-
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(p),
- Decl(p2),
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", p2))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
layout(binding = 1, std430) buffer my_struct_1 {
@@ -150,28 +151,28 @@ void main() {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, PromoteArrayInitializerToConstVar) {
- auto* array_init = array<i32, 4>(1, 2, 3, 4);
- auto* array_index = IndexAccessor(array_init, 3);
- auto* pos = Var("pos", ty.i32(), ast::StorageClass::kNone, array_index);
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(pos),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* array_init = array<i32, 4>(1_i, 2_i, 3_i, 4_i);
+ auto* array_index = IndexAccessor(array_init, 3_i);
+ auto* pos = Var("pos", ty.i32(), ast::StorageClass::kNone, array_index);
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(pos),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
void tint_symbol() {
@@ -184,34 +185,33 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, PromoteStructInitializerToConstVar) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.vec3<f32>()),
- Member("c", ty.i32()),
- });
- auto* struct_init = Construct(ty.Of(str), 1, vec3<f32>(2.f, 3.f, 4.f), 4);
- auto* struct_access = MemberAccessor(struct_init, "b");
- auto* pos =
- Var("pos", ty.vec3<f32>(), ast::StorageClass::kNone, struct_access);
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(pos),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.vec3<f32>()),
+ Member("c", ty.i32()),
+ });
+ auto* struct_init = Construct(ty.Of(str), 1_i, vec3<f32>(2_f, 3_f, 4_f), 4_i);
+ auto* struct_access = MemberAccessor(struct_init, "b");
+ auto* pos = Var("pos", ty.vec3<f32>(), ast::StorageClass::kNone, struct_access);
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(pos),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
struct S {
@@ -230,34 +230,33 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, InlinePtrLetsBasic) {
- // var v : i32;
- // let p : ptr<function, i32> = &v;
- // let x : i32 = *p;
- auto* v = Var("v", ty.i32());
- auto* p =
- Const("p", ty.pointer<i32>(ast::StorageClass::kFunction), AddressOf(v));
- auto* x = Var("x", ty.i32(), ast::StorageClass::kNone, Deref(p));
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(v),
- Decl(p),
- Decl(x),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ // var v : i32;
+ // let p : ptr<function, i32> = &v;
+ // let x : i32 = *p;
+ auto* v = Var("v", ty.i32());
+ auto* p = Let("p", ty.pointer<i32>(ast::StorageClass::kFunction), AddressOf(v));
+ auto* x = Var("x", ty.i32(), ast::StorageClass::kNone, Deref(p));
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(v),
+ Decl(p),
+ Decl(x),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
void tint_symbol() {
@@ -270,46 +269,42 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(GlslSanitizerTest, InlinePtrLetsComplexChain) {
- // var a : array<mat4x4<f32>, 4>;
- // let ap : ptr<function, array<mat4x4<f32>, 4>> = &a;
- // let mp : ptr<function, mat4x4<f32>> = &(*ap)[3];
- // let vp : ptr<function, vec4<f32>> = &(*mp)[2];
- // let v : vec4<f32> = *vp;
- auto* a = Var("a", ty.array(ty.mat4x4<f32>(), 4));
- auto* ap = Const(
- "ap",
- ty.pointer(ty.array(ty.mat4x4<f32>(), 4), ast::StorageClass::kFunction),
- AddressOf(a));
- auto* mp =
- Const("mp", ty.pointer(ty.mat4x4<f32>(), ast::StorageClass::kFunction),
- AddressOf(IndexAccessor(Deref(ap), 3)));
- auto* vp =
- Const("vp", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction),
- AddressOf(IndexAccessor(Deref(mp), 2)));
- auto* v = Var("v", ty.vec4<f32>(), ast::StorageClass::kNone, Deref(vp));
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(ap),
- Decl(mp),
- Decl(vp),
- Decl(v),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#version 310 es
+ // var a : array<mat4x4<f32>, 4u>;
+ // let ap : ptr<function, array<mat4x4<f32>, 4u>> = &a;
+ // let mp : ptr<function, mat4x4<f32>> = &(*ap)[3i];
+ // let vp : ptr<function, vec4<f32>> = &(*mp)[2i];
+ // let v : vec4<f32> = *vp;
+ auto* a = Var("a", ty.array(ty.mat4x4<f32>(), 4_u));
+ auto* ap = Let("ap", ty.pointer(ty.array(ty.mat4x4<f32>(), 4_u), ast::StorageClass::kFunction),
+ AddressOf(a));
+ auto* mp = Let("mp", ty.pointer(ty.mat4x4<f32>(), ast::StorageClass::kFunction),
+ AddressOf(IndexAccessor(Deref(ap), 3_i)));
+ auto* vp = Let("vp", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction),
+ AddressOf(IndexAccessor(Deref(mp), 2_i)));
+ auto* v = Var("v", ty.vec4<f32>(), ast::StorageClass::kNone, Deref(vp));
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(ap),
+ Decl(mp),
+ Decl(vp),
+ Decl(v),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(#version 310 es
precision mediump float;
void tint_symbol() {
@@ -322,7 +317,7 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_storage_buffer_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_storage_buffer_test.cc
index 00bca6c523c..a12fbbcd9d6 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_storage_buffer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_storage_buffer_test.cc
@@ -23,35 +23,34 @@ namespace {
using GlslGeneratorImplTest_StorageBuffer = TestHelper;
void TestAlign(ProgramBuilder* ctx) {
- // struct Nephews {
- // @align(256) huey : f32;
- // @align(256) dewey : f32;
- // @align(256) louie : f32;
- // };
- // @group(0) @binding(0) var<storage, read_write> nephews : Nephews;
- auto* nephews = ctx->Structure(
- "Nephews",
- {
- ctx->Member("huey", ctx->ty.f32(), {ctx->MemberAlign(256)}),
- ctx->Member("dewey", ctx->ty.f32(), {ctx->MemberAlign(256)}),
- ctx->Member("louie", ctx->ty.f32(), {ctx->MemberAlign(256)}),
- });
- ctx->Global("nephews", ctx->ty.Of(nephews), ast::StorageClass::kStorage,
- ast::AttributeList{
- ctx->create<ast::BindingAttribute>(0),
- ctx->create<ast::GroupAttribute>(0),
- });
+ // struct Nephews {
+ // @align(256) huey : f32;
+ // @align(256) dewey : f32;
+ // @align(256) louie : f32;
+ // };
+ // @group(0) @binding(0) var<storage, read_write> nephews : Nephews;
+ auto* nephews =
+ ctx->Structure("Nephews", {
+ ctx->Member("huey", ctx->ty.f32(), {ctx->MemberAlign(256)}),
+ ctx->Member("dewey", ctx->ty.f32(), {ctx->MemberAlign(256)}),
+ ctx->Member("louie", ctx->ty.f32(), {ctx->MemberAlign(256)}),
+ });
+ ctx->Global("nephews", ctx->ty.Of(nephews), ast::StorageClass::kStorage,
+ ast::AttributeList{
+ ctx->create<ast::BindingAttribute>(0),
+ ctx->create<ast::GroupAttribute>(0),
+ });
}
TEST_F(GlslGeneratorImplTest_StorageBuffer, Align) {
- TestAlign(this);
+ TestAlign(this);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- // TODO(crbug.com/tint/1421) offsets do not currently work on GLSL ES.
- // They will likely require manual padding.
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ // TODO(crbug.com/tint/1421) offsets do not currently work on GLSL ES.
+ // They will likely require manual padding.
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct Nephews {
float huey;
@@ -68,12 +67,12 @@ layout(binding = 0, std430) buffer Nephews_1 {
}
TEST_F(GlslGeneratorImplTest_StorageBuffer, Align_Desktop) {
- TestAlign(this);
+ TestAlign(this);
- GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
+ GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 440
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 440
struct Nephews {
float huey;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_switch_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_switch_test.cc
index e49c08ce065..1cc42fb639f 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_switch_test.cc
@@ -14,38 +14,40 @@
#include "src/tint/writer/glsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
using GlslGeneratorImplTest_Switch = TestHelper;
TEST_F(GlslGeneratorImplTest_Switch, Emit_Switch) {
- Global("cond", ty.i32(), ast::StorageClass::kPrivate);
+ Global("cond", ty.i32(), ast::StorageClass::kPrivate);
- auto* def_body = Block(create<ast::BreakStatement>());
- auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
+ auto* def_body = Block(create<ast::BreakStatement>());
+ auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
- ast::CaseSelectorList case_val;
- case_val.push_back(Expr(5));
+ ast::CaseSelectorList case_val;
+ case_val.push_back(Expr(5_i));
- auto* case_body = Block(create<ast::BreakStatement>());
+ auto* case_body = Block(create<ast::BreakStatement>());
- auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
+ auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
- ast::CaseStatementList body;
- body.push_back(case_stmt);
- body.push_back(def);
+ ast::CaseStatementList body;
+ body.push_back(case_stmt);
+ body.push_back(def);
- auto* cond = Expr("cond");
- auto* s = create<ast::SwitchStatement>(cond, body);
- WrapInFunction(s);
+ auto* cond = Expr("cond");
+ auto* s = create<ast::SwitchStatement>(cond, body);
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"( switch(cond) {
+ ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( switch(cond) {
case 5: {
break;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_test.cc
index cecfa92677b..af2e205d317 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_test.cc
@@ -19,14 +19,22 @@ namespace {
using GlslGeneratorImplTest = TestHelper;
+TEST_F(GlslGeneratorImplTest, InvalidProgram) {
+ Diagnostics().add_error(diag::System::Writer, "make the program invalid");
+ ASSERT_FALSE(IsValid());
+ auto program = std::make_unique<Program>(std::move(*this));
+ ASSERT_FALSE(program->IsValid());
+ auto result = Generate(program.get(), Options{}, "");
+ EXPECT_EQ(result.error, "input program is not valid");
+}
+
TEST_F(GlslGeneratorImplTest, Generate) {
- Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
void my_func() {
}
@@ -35,13 +43,12 @@ void my_func() {
}
TEST_F(GlslGeneratorImplTest, GenerateDesktop) {
- Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
+ GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 440
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 440
void my_func() {
}
@@ -50,18 +57,16 @@ void my_func() {
}
TEST_F(GlslGeneratorImplTest, GenerateSampleIndexES) {
- Global(
- "gl_SampleID", ty.i32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleIndex),
- Disable(ast::DisabledValidation::kIgnoreStorageClass)},
- ast::StorageClass::kInput);
- Func("my_func", {}, ty.i32(),
- ast::StatementList{Return(Expr("gl_SampleID"))});
-
- GeneratorImpl& gen = Build(Version(Version::Standard::kES, 3, 1));
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ Global("gl_SampleID", ty.i32(),
+ ast::AttributeList{Builtin(ast::Builtin::kSampleIndex),
+ Disable(ast::DisabledValidation::kIgnoreStorageClass)},
+ ast::StorageClass::kInput);
+ Func("my_func", {}, ty.i32(), ast::StatementList{Return(Expr("gl_SampleID"))});
+
+ GeneratorImpl& gen = Build(Version(Version::Standard::kES, 3, 1));
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
#extension GL_OES_sample_variables : require
int my_func() {
@@ -72,18 +77,16 @@ int my_func() {
}
TEST_F(GlslGeneratorImplTest, GenerateSampleIndexDesktop) {
- Global(
- "gl_SampleID", ty.i32(),
- ast::AttributeList{Builtin(ast::Builtin::kSampleIndex),
- Disable(ast::DisabledValidation::kIgnoreStorageClass)},
- ast::StorageClass::kInput);
- Func("my_func", {}, ty.i32(),
- ast::StatementList{Return(Expr("gl_SampleID"))});
-
- GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 440
+ Global("gl_SampleID", ty.i32(),
+ ast::AttributeList{Builtin(ast::Builtin::kSampleIndex),
+ Disable(ast::DisabledValidation::kIgnoreStorageClass)},
+ ast::StorageClass::kInput);
+ Func("my_func", {}, ty.i32(), ast::StatementList{Return(Expr("gl_SampleID"))});
+
+ GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 440
int my_func() {
return gl_SampleID;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_type_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_type_test.cc
index c6dbf539f84..b7daee263a6 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_type_test.cc
@@ -15,135 +15,133 @@
#include "gmock/gmock.h"
#include "src/tint/ast/call_statement.h"
#include "src/tint/ast/stage_attribute.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/writer/glsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
-using ::testing::HasSubstr;
-
using GlslGeneratorImplTest_Type = TestHelper;
TEST_F(GlslGeneratorImplTest_Type, EmitType_Array) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[4]");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_ArrayOfArray) {
- auto* arr = ty.array(ty.array<bool, 4>(), 5);
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array(ty.array<bool, 4>(), 5_u);
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[5][4]");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_ArrayOfArrayOfArray) {
- auto* arr = ty.array(ty.array(ty.array<bool, 4>(), 5), 6);
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array(ty.array(ty.array<bool, 4>(), 5_u), 6_u);
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[6][5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[6][5][4]");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Array_WithoutName) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "bool[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool[4]");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Bool) {
- auto* bool_ = create<sem::Bool>();
+ auto* bool_ = create<sem::Bool>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, bool_, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "bool");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, bool_, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_F32) {
- auto* f32 = create<sem::F32>();
+ auto* f32 = create<sem::F32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, f32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "float");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, f32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "float");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_I32) {
- auto* i32 = create<sem::I32>();
+ auto* i32 = create<sem::I32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, i32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "int");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, i32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "int");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Matrix) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
- auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, mat2x3, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "mat2x3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, mat2x3, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "mat2x3");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_StructDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
int a;
float b;
};
@@ -152,33 +150,32 @@ TEST_F(GlslGeneratorImplTest_Type, EmitType_StructDecl) {
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Struct) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sem_s, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "S");
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sem_s, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "S");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Struct_NameCollision) {
- auto* s = Structure("S", {
- Member("double", ty.i32()),
- Member("float", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {
+ Member("double", ty.i32()),
+ Member("float", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(struct S {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(struct S {
int tint_symbol;
float tint_symbol_1;
};
@@ -186,18 +183,18 @@ TEST_F(GlslGeneratorImplTest_Type, EmitType_Struct_NameCollision) {
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Struct_WithOffsetAttributes) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberOffset(0)}),
- Member("b", ty.f32(), {MemberOffset(8)}),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberOffset(0)}),
+ Member("b", ty.f32(), {MemberOffset(8)}),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
int a;
float b;
};
@@ -206,165 +203,156 @@ TEST_F(GlslGeneratorImplTest_Type, EmitType_Struct_WithOffsetAttributes) {
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_U32) {
- auto* u32 = create<sem::U32>();
+ auto* u32 = create<sem::U32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, u32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "uint");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, u32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "uint");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Vector) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, vec3, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "vec3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, vec3, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "vec3");
}
TEST_F(GlslGeneratorImplTest_Type, EmitType_Void) {
- auto* void_ = create<sem::Void>();
+ auto* void_ = create<sem::Void>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, void_, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "void");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, void_, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "void");
}
TEST_F(GlslGeneratorImplTest_Type, EmitSampler) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_FALSE(gen.EmitType(out, sampler, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
+ std::stringstream out;
+ ASSERT_FALSE(gen.EmitType(out, sampler, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
}
TEST_F(GlslGeneratorImplTest_Type, EmitSamplerComparison) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_FALSE(gen.EmitType(out, sampler, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
+ std::stringstream out;
+ ASSERT_FALSE(gen.EmitType(out, sampler, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
}
struct GlslDepthTextureData {
- ast::TextureDimension dim;
- std::string result;
+ ast::TextureDimension dim;
+ std::string result;
};
inline std::ostream& operator<<(std::ostream& out, GlslDepthTextureData data) {
- out << data.dim;
- return out;
+ out << data.dim;
+ return out;
}
using GlslDepthTexturesTest = TestParamHelper<GlslDepthTextureData>;
TEST_P(GlslDepthTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* t = ty.depth_texture(params.dim);
+ auto* t = ty.depth_texture(params.dim);
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
INSTANTIATE_TEST_SUITE_P(
GlslGeneratorImplTest_Type,
GlslDepthTexturesTest,
- testing::Values(GlslDepthTextureData{ast::TextureDimension::k2d,
- "sampler2DShadow tex;"},
- GlslDepthTextureData{ast::TextureDimension::k2dArray,
- "sampler2DArrayShadow tex;"},
- GlslDepthTextureData{ast::TextureDimension::kCube,
- "samplerCubeShadow tex;"},
- GlslDepthTextureData{ast::TextureDimension::kCubeArray,
- "samplerCubeArrayShadow tex;"}));
+ testing::Values(
+ GlslDepthTextureData{ast::TextureDimension::k2d, "sampler2DShadow tex;"},
+ GlslDepthTextureData{ast::TextureDimension::k2dArray, "sampler2DArrayShadow tex;"},
+ GlslDepthTextureData{ast::TextureDimension::kCube, "samplerCubeShadow tex;"},
+ GlslDepthTextureData{ast::TextureDimension::kCubeArray, "samplerCubeArrayShadow tex;"}));
using GlslDepthMultisampledTexturesTest = TestHelper;
TEST_F(GlslDepthMultisampledTexturesTest, Emit) {
- auto* t = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
+ auto* t = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("sampler2DMS tex;"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("sampler2DMS tex;"));
}
enum class TextureDataType { F32, U32, I32 };
struct GlslSampledTextureData {
- ast::TextureDimension dim;
- TextureDataType datatype;
- std::string result;
+ ast::TextureDimension dim;
+ TextureDataType datatype;
+ std::string result;
};
-inline std::ostream& operator<<(std::ostream& out,
- GlslSampledTextureData data) {
- out << data.dim;
- return out;
+inline std::ostream& operator<<(std::ostream& out, GlslSampledTextureData data) {
+ out << data.dim;
+ return out;
}
using GlslSampledTexturesTest = TestParamHelper<GlslSampledTextureData>;
TEST_P(GlslSampledTexturesTest, Emit) {
- auto params = GetParam();
-
- const ast::Type* datatype = nullptr;
- switch (params.datatype) {
- case TextureDataType::F32:
- datatype = ty.f32();
- break;
- case TextureDataType::U32:
- datatype = ty.u32();
- break;
- case TextureDataType::I32:
- datatype = ty.i32();
- break;
- }
- auto* t = ty.sampled_texture(params.dim, datatype);
-
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ auto params = GetParam();
+
+ const ast::Type* datatype = nullptr;
+ switch (params.datatype) {
+ case TextureDataType::F32:
+ datatype = ty.f32();
+ break;
+ case TextureDataType::U32:
+ datatype = ty.u32();
+ break;
+ case TextureDataType::I32:
+ datatype = ty.i32();
+ break;
+ }
+ auto* t = ty.sampled_texture(params.dim, datatype);
+
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Type,
GlslSampledTexturesTest,
@@ -461,78 +449,74 @@ INSTANTIATE_TEST_SUITE_P(GlslGeneratorImplTest_Type,
}));
TEST_F(GlslGeneratorImplTest_Type, EmitMultisampledTexture) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, s, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "highp sampler2DMS");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, s, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "highp sampler2DMS");
}
struct GlslStorageTextureData {
- ast::TextureDimension dim;
- ast::TexelFormat imgfmt;
- std::string result;
+ ast::TextureDimension dim;
+ ast::TexelFormat imgfmt;
+ std::string result;
};
-inline std::ostream& operator<<(std::ostream& out,
- GlslStorageTextureData data) {
- return out << data.dim;
+inline std::ostream& operator<<(std::ostream& out, GlslStorageTextureData data) {
+ return out << data.dim;
}
using GlslStorageTexturesTest = TestParamHelper<GlslStorageTextureData>;
TEST_P(GlslStorageTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* t = ty.storage_texture(params.dim, params.imgfmt, ast::Access::kWrite);
+ auto* t = ty.storage_texture(params.dim, params.imgfmt, ast::Access::kWrite);
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
INSTANTIATE_TEST_SUITE_P(
GlslGeneratorImplTest_Type,
GlslStorageTexturesTest,
- testing::Values(
- GlslStorageTextureData{ast::TextureDimension::k1d,
- ast::TexelFormat::kRgba8Unorm, "image1D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2d,
- ast::TexelFormat::kRgba16Float, "image2D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2dArray,
- ast::TexelFormat::kR32Float,
- "image2DArray tex;"},
- GlslStorageTextureData{ast::TextureDimension::k3d,
- ast::TexelFormat::kRg32Float, "image3D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k1d,
- ast::TexelFormat::kRgba32Float, "image1D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2d,
- ast::TexelFormat::kRgba16Uint, "image2D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2dArray,
- ast::TexelFormat::kR32Uint, "image2DArray tex;"},
- GlslStorageTextureData{ast::TextureDimension::k3d,
- ast::TexelFormat::kRg32Uint, "image3D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k1d,
- ast::TexelFormat::kRgba32Uint, "image1D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2d,
- ast::TexelFormat::kRgba16Sint, "image2D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k2dArray,
- ast::TexelFormat::kR32Sint, "image2DArray tex;"},
- GlslStorageTextureData{ast::TextureDimension::k3d,
- ast::TexelFormat::kRg32Sint, "image3D tex;"},
- GlslStorageTextureData{ast::TextureDimension::k1d,
- ast::TexelFormat::kRgba32Sint, "image1D tex;"}));
+ testing::Values(GlslStorageTextureData{ast::TextureDimension::k1d,
+ ast::TexelFormat::kRgba8Unorm, "image1D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2d,
+ ast::TexelFormat::kRgba16Float, "image2D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2dArray,
+ ast::TexelFormat::kR32Float, "image2DArray tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Float,
+ "image3D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k1d,
+ ast::TexelFormat::kRgba32Float, "image1D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2d,
+ ast::TexelFormat::kRgba16Uint, "image2D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2dArray,
+ ast::TexelFormat::kR32Uint, "image2DArray tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Uint,
+ "image3D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k1d,
+ ast::TexelFormat::kRgba32Uint, "image1D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2d,
+ ast::TexelFormat::kRgba16Sint, "image2D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k2dArray,
+ ast::TexelFormat::kR32Sint, "image2DArray tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Sint,
+ "image3D tex;"},
+ GlslStorageTextureData{ast::TextureDimension::k1d,
+ ast::TexelFormat::kRgba32Sint, "image1D tex;"}));
} // namespace
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_unary_op_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_unary_op_test.cc
index 2743ad18d18..0e318fcf8c3 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_unary_op_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_unary_op_test.cc
@@ -20,70 +20,65 @@ namespace {
using GlslUnaryOpTest = TestHelper;
TEST_F(GlslUnaryOpTest, AddressOf) {
- Global("expr", ty.f32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.f32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "expr");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "expr");
}
TEST_F(GlslUnaryOpTest, Complement) {
- Global("expr", ty.u32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.u32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "~(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "~(expr)");
}
TEST_F(GlslUnaryOpTest, Indirection) {
- Global("G", ty.f32(), ast::StorageClass::kPrivate);
- auto* p = Const(
- "expr", nullptr,
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
- WrapInFunction(p, op);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "expr");
+ Global("G", ty.f32(), ast::StorageClass::kPrivate);
+ auto* p =
+ Let("expr", nullptr, create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
+ WrapInFunction(p, op);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "expr");
}
TEST_F(GlslUnaryOpTest, Not) {
- Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
- auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "!(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "!(expr)");
}
TEST_F(GlslUnaryOpTest, Negation) {
- Global("expr", ty.i32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.i32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "-(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "-(expr)");
}
} // namespace
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_uniform_buffer_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_uniform_buffer_test.cc
index b7a5f888148..8709b3dadc6 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_uniform_buffer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_uniform_buffer_test.cc
@@ -23,14 +23,13 @@ namespace {
using GlslGeneratorImplTest_UniformBuffer = TestHelper;
TEST_F(GlslGeneratorImplTest_UniformBuffer, Simple) {
- auto* simple = Structure("Simple", {Member("member", ty.f32())});
- Global("simple", ty.Of(simple), ast::StorageClass::kUniform,
- GroupAndBinding(0, 0));
+ auto* simple = Structure("Simple", {Member("member", ty.f32())});
+ Global("simple", ty.Of(simple), ast::StorageClass::kUniform, GroupAndBinding(0, 0));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 310 es
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 310 es
struct Simple {
float member;
@@ -44,14 +43,13 @@ layout(binding = 0) uniform Simple_1 {
}
TEST_F(GlslGeneratorImplTest_UniformBuffer, Simple_Desktop) {
- auto* simple = Structure("Simple", {Member("member", ty.f32())});
- Global("simple", ty.Of(simple), ast::StorageClass::kUniform,
- GroupAndBinding(0, 0));
+ auto* simple = Structure("Simple", {Member("member", ty.f32())});
+ Global("simple", ty.Of(simple), ast::StorageClass::kUniform, GroupAndBinding(0, 0));
- GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
+ GeneratorImpl& gen = Build(Version(Version::Standard::kDesktop, 4, 4));
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#version 440
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#version 440
struct Simple {
float member;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_variable_decl_statement_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_variable_decl_statement_test.cc
index 29d5a410952..5d95bc66cd1 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_variable_decl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_variable_decl_statement_test.cc
@@ -24,100 +24,95 @@ using ::testing::HasSubstr;
using GlslGeneratorImplTest_VariableDecl = TestHelper;
TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement) {
- auto* var = Var("a", ty.f32());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.f32());
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
}
TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Const) {
- auto* var = Const("a", ty.f32(), Construct(ty.f32()));
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Let("a", ty.f32(), Construct(ty.f32()));
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
}
TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Array) {
- auto* var = Var("a", ty.array<f32, 5>());
+ auto* var = Var("a", ty.array<f32, 5>());
- WrapInFunction(var, Expr("a"));
+ WrapInFunction(var, Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(
- gen.result(),
- HasSubstr(" float a[5] = float[5](0.0f, 0.0f, 0.0f, 0.0f, 0.0f);\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(),
+ HasSubstr(" float a[5] = float[5](0.0f, 0.0f, 0.0f, 0.0f, 0.0f);\n"));
}
TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Private) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(" float a = 0.0f;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(" float a = 0.0f;\n"));
}
-TEST_F(GlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_Private) {
- Global("initializer", ty.f32(), ast::StorageClass::kPrivate);
- Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
+TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_Private) {
+ Global("initializer", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(float a = initializer;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(float a = initializer;
)"));
}
-TEST_F(GlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_ZeroVec) {
- auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, vec3<f32>());
+TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_ZeroVec) {
+ auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, vec3<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), R"(vec3 a = vec3(0.0f, 0.0f, 0.0f);
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(vec3 a = vec3(0.0f);
)");
}
-TEST_F(GlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_ZeroMat) {
- auto* var =
- Var("a", ty.mat2x3<f32>(), ast::StorageClass::kNone, mat2x3<f32>());
+TEST_F(GlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_ZeroMat) {
+ auto* var = Var("a", ty.mat2x3<f32>(), ast::StorageClass::kNone, mat2x3<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(mat2x3 a = mat2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(mat2x3 a = mat2x3(vec3(0.0f), vec3(0.0f));
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_workgroup_var_test.cc b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_workgroup_var_test.cc
index 9c5a51b519c..faa675dcded 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_workgroup_var_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/generator_impl_workgroup_var_test.cc
@@ -17,40 +17,43 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/writer/glsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::glsl {
namespace {
-using ::testing::HasSubstr;
using GlslGeneratorImplTest_WorkgroupVar = TestHelper;
TEST_F(GlslGeneratorImplTest_WorkgroupVar, Basic) {
- Global("wg", ty.f32(), ast::StorageClass::kWorkgroup);
+ Global("wg", ty.f32(), ast::StorageClass::kWorkgroup);
- Func("main", {}, ty.void_(), {Assign("wg", 1.2f)},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- GeneratorImpl& gen = Build();
+ Func("main", {}, ty.void_(), {Assign("wg", 1.2_f)},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("shared float wg;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("shared float wg;\n"));
}
TEST_F(GlslGeneratorImplTest_WorkgroupVar, Aliased) {
- auto* alias = Alias("F32", ty.f32());
+ auto* alias = Alias("F32", ty.f32());
- Global("wg", ty.Of(alias), ast::StorageClass::kWorkgroup);
+ Global("wg", ty.Of(alias), ast::StorageClass::kWorkgroup);
- Func("main", {}, ty.void_(), {Assign("wg", 1.2f)},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- GeneratorImpl& gen = Build();
+ Func("main", {}, ty.void_(), {Assign("wg", 1.2_f)},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("shared float wg;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("shared float wg;\n"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/test_helper.h b/chromium/third_party/dawn/src/tint/writer/glsl/test_helper.h
index 7266d0a499f..2f806ce4f39 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/test_helper.h
@@ -28,72 +28,66 @@ namespace tint::writer::glsl {
/// Helper class for testing
template <typename BODY>
class TestHelperBase : public BODY, public ProgramBuilder {
- public:
- TestHelperBase() = default;
- ~TestHelperBase() override = default;
+ public:
+ TestHelperBase() = default;
+ ~TestHelperBase() override = default;
- /// Builds the program and returns a GeneratorImpl from the program.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @param version the GLSL version
- /// @return the built generator
- GeneratorImpl& Build(Version version = Version()) {
- if (gen_) {
- return *gen_;
+ /// Builds the program and returns a GeneratorImpl from the program.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @param version the GLSL version
+ /// @return the built generator
+ GeneratorImpl& Build(Version version = Version()) {
+ if (gen_) {
+ return *gen_;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
+ gen_ = std::make_unique<GeneratorImpl>(program.get(), version);
+ return *gen_;
}
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
- gen_ = std::make_unique<GeneratorImpl>(program.get(), version);
- return *gen_;
- }
- /// Builds the program, runs the program through the transform::Glsl sanitizer
- /// and returns a GeneratorImpl from the sanitized program.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @param version the GLSL version
- /// @param options the GLSL backend options
- /// @return the built generator
- GeneratorImpl& SanitizeAndBuild(Version version = Version(),
- const Options& options = {}) {
- if (gen_) {
- return *gen_;
- }
- diag::Formatter formatter;
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << formatter.format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << formatter.format(program->Diagnostics());
- }();
+ /// Builds the program, runs the program through the transform::Glsl sanitizer
+ /// and returns a GeneratorImpl from the sanitized program.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @param version the GLSL version
+ /// @param options the GLSL backend options
+ /// @return the built generator
+ GeneratorImpl& SanitizeAndBuild(Version version = Version(), const Options& options = {}) {
+ if (gen_) {
+ return *gen_;
+ }
+ diag::Formatter formatter;
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << formatter.format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() { ASSERT_TRUE(program->IsValid()) << formatter.format(program->Diagnostics()); }();
- auto sanitized_result =
- Sanitize(program.get(), options, /* entry_point */ "");
- [&]() {
- ASSERT_TRUE(sanitized_result.program.IsValid())
- << formatter.format(sanitized_result.program.Diagnostics());
- }();
+ auto sanitized_result = Sanitize(program.get(), options, /* entry_point */ "");
+ [&]() {
+ ASSERT_TRUE(sanitized_result.program.IsValid())
+ << formatter.format(sanitized_result.program.Diagnostics());
+ }();
- *program = std::move(sanitized_result.program);
- gen_ = std::make_unique<GeneratorImpl>(program.get(), version);
- return *gen_;
- }
+ *program = std::move(sanitized_result.program);
+ gen_ = std::make_unique<GeneratorImpl>(program.get(), version);
+ return *gen_;
+ }
- /// The program built with a call to Build()
- std::unique_ptr<Program> program;
+ /// The program built with a call to Build()
+ std::unique_ptr<Program> program;
- private:
- std::unique_ptr<GeneratorImpl> gen_;
+ private:
+ std::unique_ptr<GeneratorImpl> gen_;
};
using TestHelper = TestHelperBase<testing::Test>;
diff --git a/chromium/third_party/dawn/src/tint/writer/glsl/version.h b/chromium/third_party/dawn/src/tint/writer/glsl/version.h
index 63888a487a3..ee33c16b659 100644
--- a/chromium/third_party/dawn/src/tint/writer/glsl/version.h
+++ b/chromium/third_party/dawn/src/tint/writer/glsl/version.h
@@ -21,36 +21,36 @@ namespace tint::writer::glsl {
/// A structure representing the version of GLSL to be generated.
struct Version {
- /// Is this version desktop GLSL, or GLSL ES?
- enum class Standard {
- kDesktop,
- kES,
- };
+ /// Is this version desktop GLSL, or GLSL ES?
+ enum class Standard {
+ kDesktop,
+ kES,
+ };
- /// Constructor
- /// @param standard_ Desktop or ES
- /// @param major_ the major version
- /// @param minor_ the minor version
- Version(Standard standard_, uint32_t major_, uint32_t minor_)
- : standard(standard_), major_version(major_), minor_version(minor_) {}
+ /// Constructor
+ /// @param standard_ Desktop or ES
+ /// @param major_ the major version
+ /// @param minor_ the minor version
+ Version(Standard standard_, uint32_t major_, uint32_t minor_)
+ : standard(standard_), major_version(major_), minor_version(minor_) {}
- /// Default constructor (see default values below)
- Version() = default;
+ /// Default constructor (see default values below)
+ Version() = default;
- /// @returns true if this version is GLSL ES
- bool IsES() const { return standard == Standard::kES; }
+ /// @returns true if this version is GLSL ES
+ bool IsES() const { return standard == Standard::kES; }
- /// @returns true if this version is Desktop GLSL
- bool IsDesktop() const { return standard == Standard::kDesktop; }
+ /// @returns true if this version is Desktop GLSL
+ bool IsDesktop() const { return standard == Standard::kDesktop; }
- /// Desktop or ES
- Standard standard = Standard::kES;
+ /// Desktop or ES
+ Standard standard = Standard::kES;
- /// Major GLSL version
- uint32_t major_version = 3;
+ /// Major GLSL version
+ uint32_t major_version = 3;
- /// Minor GLSL version
- uint32_t minor_version = 1;
+ /// Minor GLSL version
+ uint32_t minor_version = 1;
};
} // namespace tint::writer::glsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator.cc
index 682d3711f19..b514c2b641e 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator.cc
@@ -28,34 +28,38 @@ Result::~Result() = default;
Result::Result(const Result&) = default;
Result Generate(const Program* program, const Options& options) {
- Result result;
+ Result result;
+ if (!program->IsValid()) {
+ result.error = "input program is not valid";
+ return result;
+ }
- // Sanitize the program.
- auto sanitized_result = Sanitize(program, options);
- if (!sanitized_result.program.IsValid()) {
- result.success = false;
- result.error = sanitized_result.program.Diagnostics().str();
- return result;
- }
-
- // Generate the HLSL code.
- auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program);
- result.success = impl->Generate();
- result.error = impl->error();
- result.hlsl = impl->result();
-
- // Collect the list of entry points in the sanitized program.
- for (auto* func : sanitized_result.program.AST().Functions()) {
- if (func->IsEntryPoint()) {
- auto name = sanitized_result.program.Symbols().NameFor(func->symbol);
- result.entry_points.push_back({name, func->PipelineStage()});
+ // Sanitize the program.
+ auto sanitized_result = Sanitize(program, options);
+ if (!sanitized_result.program.IsValid()) {
+ result.success = false;
+ result.error = sanitized_result.program.Diagnostics().str();
+ return result;
+ }
+
+ // Generate the HLSL code.
+ auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program);
+ result.success = impl->Generate();
+ result.error = impl->error();
+ result.hlsl = impl->result();
+
+ // Collect the list of entry points in the sanitized program.
+ for (auto* func : sanitized_result.program.AST().Functions()) {
+ if (func->IsEntryPoint()) {
+ auto name = sanitized_result.program.Symbols().NameFor(func->symbol);
+ result.entry_points.push_back({name, func->PipelineStage()});
+ }
}
- }
- result.used_array_length_from_uniform_indices =
- std::move(sanitized_result.used_array_length_from_uniform_indices);
+ result.used_array_length_from_uniform_indices =
+ std::move(sanitized_result.used_array_length_from_uniform_indices);
- return result;
+ return result;
}
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator.h b/chromium/third_party/dawn/src/tint/writer/hlsl/generator.h
index beb2a886b98..a18687a709f 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator.h
@@ -16,6 +16,7 @@
#define SRC_TINT_WRITER_HLSL_GENERATOR_H_
#include <memory>
+#include <optional> // NOLINT(build/include_order)
#include <string>
#include <unordered_set>
#include <utility>
@@ -30,64 +31,61 @@
namespace tint {
class Program;
} // namespace tint
-namespace tint::writer::hlsl {
-class GeneratorImpl;
-} // namespace tint::writer::hlsl
namespace tint::writer::hlsl {
/// Configuration options used for generating HLSL.
struct Options {
- /// Constructor
- Options();
- /// Destructor
- ~Options();
- /// Copy constructor
- Options(const Options&);
- /// Copy assignment
- /// @returns this Options
- Options& operator=(const Options&);
-
- /// The binding point to use for information passed via root constants.
- sem::BindingPoint root_constant_binding_point;
- /// Set to `true` to disable workgroup memory zero initialization
- bool disable_workgroup_init = false;
- /// Set to 'true' to generates binding mappings for external textures
- bool generate_external_texture_bindings = false;
- /// Options used to specify a mapping of binding points to indices into a UBO
- /// from which to load buffer sizes.
- ArrayLengthFromUniformOptions array_length_from_uniform = {};
-
- // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
- // struct members.
+ /// Constructor
+ Options();
+ /// Destructor
+ ~Options();
+ /// Copy constructor
+ Options(const Options&);
+ /// Copy assignment
+ /// @returns this Options
+ Options& operator=(const Options&);
+
+ /// The binding point to use for information passed via root constants.
+ std::optional<sem::BindingPoint> root_constant_binding_point;
+ /// Set to `true` to disable workgroup memory zero initialization
+ bool disable_workgroup_init = false;
+ /// Set to 'true' to generates binding mappings for external textures
+ bool generate_external_texture_bindings = false;
+ /// Options used to specify a mapping of binding points to indices into a UBO
+ /// from which to load buffer sizes.
+ ArrayLengthFromUniformOptions array_length_from_uniform = {};
+
+ // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
+ // struct members.
};
/// The result produced when generating HLSL.
struct Result {
- /// Constructor
- Result();
+ /// Constructor
+ Result();
- /// Destructor
- ~Result();
+ /// Destructor
+ ~Result();
- /// Copy constructor
- Result(const Result&);
+ /// Copy constructor
+ Result(const Result&);
- /// True if generation was successful.
- bool success = false;
+ /// True if generation was successful.
+ bool success = false;
- /// The errors generated during code generation, if any.
- std::string error;
+ /// The errors generated during code generation, if any.
+ std::string error;
- /// The generated HLSL.
- std::string hlsl = "";
+ /// The generated HLSL.
+ std::string hlsl = "";
- /// The list of entry points in the generated HLSL.
- std::vector<std::pair<std::string, ast::PipelineStage>> entry_points;
+ /// The list of entry points in the generated HLSL.
+ std::vector<std::pair<std::string, ast::PipelineStage>> entry_points;
- /// Indices into the array_length_from_uniform binding that are statically
- /// used.
- std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
+ /// Indices into the array_length_from_uniform binding that are statically
+ /// used.
+ std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
};
/// Generate HLSL for a program, according to a set of configuration options.
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_bench.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_bench.cc
index 27b605bba4e..4567e2d3e45 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_bench.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_bench.cc
@@ -20,18 +20,18 @@ namespace tint::writer::hlsl {
namespace {
void GenerateHLSL(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadProgram(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& program = std::get<bench::ProgramAndFile>(res).program;
- for (auto _ : state) {
- auto res = Generate(&program, {});
- if (!res.error.empty()) {
- state.SkipWithError(res.error.c_str());
+ auto res = bench::LoadProgram(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& program = std::get<bench::ProgramAndFile>(res).program;
+ for (auto _ : state) {
+ auto res = Generate(&program, {});
+ if (!res.error.empty()) {
+ state.SkipWithError(res.error.c_str());
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(GenerateHLSL);
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.cc
index 008f184c50d..19af4fad521 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.cc
@@ -30,18 +30,19 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/debug.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/block_statement.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/constant.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/sem/function.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/type_constructor.h"
#include "src/tint/sem/type_conversion.h"
@@ -52,6 +53,7 @@
#include "src/tint/transform/calculate_array_length.h"
#include "src/tint/transform/canonicalize_entry_point_io.h"
#include "src/tint/transform/decompose_memory_access.h"
+#include "src/tint/transform/disable_uniformity_analysis.h"
#include "src/tint/transform/expand_compound_assignment.h"
#include "src/tint/transform/fold_trivial_single_use_lets.h"
#include "src/tint/transform/localize_struct_array_assignment.h"
@@ -65,6 +67,7 @@
#include "src/tint/transform/simplify_pointers.h"
#include "src/tint/transform/unshadow.h"
#include "src/tint/transform/unwind_discard_functions.h"
+#include "src/tint/transform/vectorize_scalar_matrix_constructors.h"
#include "src/tint/transform/zero_init_workgroup_memory.h"
#include "src/tint/utils/defer.h"
#include "src/tint/utils/map.h"
@@ -73,6 +76,8 @@
#include "src/tint/writer/float_to_string.h"
#include "src/tint/writer/generate_external_texture_bindings.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
@@ -80,52 +85,63 @@ const char kTempNamePrefix[] = "tint_tmp";
const char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
const char* image_format_to_rwtexture_type(ast::TexelFormat image_format) {
- switch (image_format) {
- case ast::TexelFormat::kRgba8Unorm:
- case ast::TexelFormat::kRgba8Snorm:
- case ast::TexelFormat::kRgba16Float:
- case ast::TexelFormat::kR32Float:
- case ast::TexelFormat::kRg32Float:
- case ast::TexelFormat::kRgba32Float:
- return "float4";
- case ast::TexelFormat::kRgba8Uint:
- case ast::TexelFormat::kRgba16Uint:
- case ast::TexelFormat::kR32Uint:
- case ast::TexelFormat::kRg32Uint:
- case ast::TexelFormat::kRgba32Uint:
- return "uint4";
- case ast::TexelFormat::kRgba8Sint:
- case ast::TexelFormat::kRgba16Sint:
- case ast::TexelFormat::kR32Sint:
- case ast::TexelFormat::kRg32Sint:
- case ast::TexelFormat::kRgba32Sint:
- return "int4";
- default:
- return nullptr;
- }
+ switch (image_format) {
+ case ast::TexelFormat::kRgba8Unorm:
+ case ast::TexelFormat::kRgba8Snorm:
+ case ast::TexelFormat::kRgba16Float:
+ case ast::TexelFormat::kR32Float:
+ case ast::TexelFormat::kRg32Float:
+ case ast::TexelFormat::kRgba32Float:
+ return "float4";
+ case ast::TexelFormat::kRgba8Uint:
+ case ast::TexelFormat::kRgba16Uint:
+ case ast::TexelFormat::kR32Uint:
+ case ast::TexelFormat::kRg32Uint:
+ case ast::TexelFormat::kRgba32Uint:
+ return "uint4";
+ case ast::TexelFormat::kRgba8Sint:
+ case ast::TexelFormat::kRgba16Sint:
+ case ast::TexelFormat::kR32Sint:
+ case ast::TexelFormat::kRg32Sint:
+ case ast::TexelFormat::kRgba32Sint:
+ return "int4";
+ default:
+ return nullptr;
+ }
+}
+
+void PrintF32(std::ostream& out, float value) {
+ // Note: Currently inf and nan should not be constructable, but this is implemented for the day
+ // we support them.
+ if (std::isinf(value)) {
+ out << (value >= 0 ? "asfloat(0x7f800000u)" : "asfloat(0xff800000u)");
+ } else if (std::isnan(value)) {
+ out << "asfloat(0x7fc00000u)";
+ } else {
+ out << FloatToString(value) << "f";
+ }
}
// Helper for writing " : register(RX, spaceY)", where R is the register, X is
// the binding point binding value, and Y is the binding point group value.
struct RegisterAndSpace {
- RegisterAndSpace(char r, ast::VariableBindingPoint bp)
- : reg(r), binding_point(bp) {}
+ RegisterAndSpace(char r, ast::VariableBindingPoint bp) : reg(r), binding_point(bp) {}
- const char reg;
- ast::VariableBindingPoint const binding_point;
+ const char reg;
+ ast::VariableBindingPoint const binding_point;
};
std::ostream& operator<<(std::ostream& s, const RegisterAndSpace& rs) {
- s << " : register(" << rs.reg << rs.binding_point.binding->value << ", space"
- << rs.binding_point.group->value << ")";
- return s;
+ s << " : register(" << rs.reg << rs.binding_point.binding->value << ", space"
+ << rs.binding_point.group->value << ")";
+ return s;
}
const char* LoopAttribute() {
- // Force loops not to be unrolled to work around FXC compilation issues when
- // it attempts and fails to unroll loops when it contains gradient operations.
- // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-while
- return "[loop] ";
+ // Force loops not to be unrolled to work around FXC compilation issues when
+ // it attempts and fails to unroll loops when it contains gradient operations.
+ // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-while
+ return "[loop] ";
}
} // namespace
@@ -135,105 +151,104 @@ SanitizedResult::~SanitizedResult() = default;
SanitizedResult::SanitizedResult(SanitizedResult&&) = default;
SanitizedResult Sanitize(const Program* in, const Options& options) {
- transform::Manager manager;
- transform::DataMap data;
-
- { // Builtin polyfills
- transform::BuiltinPolyfill::Builtins polyfills;
- // TODO(crbug.com/tint/1449): Some of these can map to HLSL's `firstbitlow`
- // and `firstbithigh`.
- polyfills.count_leading_zeros = true;
- polyfills.count_trailing_zeros = true;
- polyfills.extract_bits = transform::BuiltinPolyfill::Level::kFull;
- polyfills.first_leading_bit = true;
- polyfills.first_trailing_bit = true;
- polyfills.insert_bits = transform::BuiltinPolyfill::Level::kFull;
- data.Add<transform::BuiltinPolyfill::Config>(polyfills);
- manager.Add<transform::BuiltinPolyfill>();
- }
-
- // Build the config for the internal ArrayLengthFromUniform transform.
- auto& array_length_from_uniform = options.array_length_from_uniform;
- transform::ArrayLengthFromUniform::Config array_length_from_uniform_cfg(
- array_length_from_uniform.ubo_binding);
- array_length_from_uniform_cfg.bindpoint_to_size_index =
- array_length_from_uniform.bindpoint_to_size_index;
-
- if (options.generate_external_texture_bindings) {
- auto new_bindings_map = GenerateExternalTextureBindings(in);
- data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(
- new_bindings_map);
- }
- manager.Add<transform::MultiplanarExternalTexture>();
-
- manager.Add<transform::Unshadow>();
-
- // LocalizeStructArrayAssignment must come after:
- // * SimplifyPointers, because it assumes assignment to arrays in structs are
- // done directly, not indirectly.
- // TODO(crbug.com/tint/1340): See if we can get rid of the duplicate
- // SimplifyPointers transform. Can't do it right now because
- // LocalizeStructArrayAssignment introduces pointers.
- manager.Add<transform::SimplifyPointers>();
- manager.Add<transform::LocalizeStructArrayAssignment>();
-
- // Attempt to convert `loop`s into for-loops. This is to try and massage the
- // output into something that will not cause FXC to choke or misbehave.
- manager.Add<transform::FoldTrivialSingleUseLets>();
- manager.Add<transform::LoopToForLoop>();
-
- if (!options.disable_workgroup_init) {
- // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
- // ZeroInitWorkgroupMemory may inject new builtin parameters.
- manager.Add<transform::ZeroInitWorkgroupMemory>();
- }
- manager.Add<transform::CanonicalizeEntryPointIO>();
- // NumWorkgroupsFromUniform must come after CanonicalizeEntryPointIO, as it
- // assumes that num_workgroups builtins only appear as struct members and are
- // only accessed directly via member accessors.
- manager.Add<transform::NumWorkgroupsFromUniform>();
- manager.Add<transform::ExpandCompoundAssignment>();
- manager.Add<transform::PromoteSideEffectsToDecl>();
- manager.Add<transform::UnwindDiscardFunctions>();
- manager.Add<transform::SimplifyPointers>();
- manager.Add<transform::RemovePhonies>();
- // ArrayLengthFromUniform must come after InlinePointerLets and Simplify, as
- // it assumes that the form of the array length argument is &var.array.
- manager.Add<transform::ArrayLengthFromUniform>();
- data.Add<transform::ArrayLengthFromUniform::Config>(
- std::move(array_length_from_uniform_cfg));
- // DecomposeMemoryAccess must come after:
- // * InlinePointerLets, as we cannot take the address of calls to
- // DecomposeMemoryAccess::Intrinsic.
- // * Simplify, as we need to fold away the address-of and dereferences of
- // `*(&(intrinsic_load()))` expressions.
- // * RemovePhonies, as phonies can be assigned a pointer to a
- // non-constructible buffer, or dynamic array, which DMA cannot cope with.
- manager.Add<transform::DecomposeMemoryAccess>();
- // CalculateArrayLength must come after DecomposeMemoryAccess, as
- // DecomposeMemoryAccess special-cases the arrayLength() intrinsic, which
- // will be transformed by CalculateArrayLength
- manager.Add<transform::CalculateArrayLength>();
- manager.Add<transform::PromoteInitializersToConstVar>();
-
- manager.Add<transform::RemoveContinueInSwitch>();
-
- manager.Add<transform::AddEmptyEntryPoint>();
-
- data.Add<transform::CanonicalizeEntryPointIO::Config>(
- transform::CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
- data.Add<transform::NumWorkgroupsFromUniform::Config>(
- options.root_constant_binding_point);
-
- auto out = manager.Run(in, data);
-
- SanitizedResult result;
- result.program = std::move(out.program);
- if (auto* res = out.data.Get<transform::ArrayLengthFromUniform::Result>()) {
- result.used_array_length_from_uniform_indices =
- std::move(res->used_size_indices);
- }
- return result;
+ transform::Manager manager;
+ transform::DataMap data;
+
+ manager.Add<transform::DisableUniformityAnalysis>();
+
+ { // Builtin polyfills
+ transform::BuiltinPolyfill::Builtins polyfills;
+ // TODO(crbug.com/tint/1449): Some of these can map to HLSL's `firstbitlow`
+ // and `firstbithigh`.
+ polyfills.count_leading_zeros = true;
+ polyfills.count_trailing_zeros = true;
+ polyfills.extract_bits = transform::BuiltinPolyfill::Level::kFull;
+ polyfills.first_leading_bit = true;
+ polyfills.first_trailing_bit = true;
+ polyfills.insert_bits = transform::BuiltinPolyfill::Level::kFull;
+ data.Add<transform::BuiltinPolyfill::Config>(polyfills);
+ manager.Add<transform::BuiltinPolyfill>();
+ }
+
+ // Build the config for the internal ArrayLengthFromUniform transform.
+ auto& array_length_from_uniform = options.array_length_from_uniform;
+ transform::ArrayLengthFromUniform::Config array_length_from_uniform_cfg(
+ array_length_from_uniform.ubo_binding);
+ array_length_from_uniform_cfg.bindpoint_to_size_index =
+ array_length_from_uniform.bindpoint_to_size_index;
+
+ if (options.generate_external_texture_bindings) {
+ auto new_bindings_map = GenerateExternalTextureBindings(in);
+ data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(new_bindings_map);
+ }
+ manager.Add<transform::MultiplanarExternalTexture>();
+
+ manager.Add<transform::Unshadow>();
+
+ // LocalizeStructArrayAssignment must come after:
+ // * SimplifyPointers, because it assumes assignment to arrays in structs are
+ // done directly, not indirectly.
+ // TODO(crbug.com/tint/1340): See if we can get rid of the duplicate
+ // SimplifyPointers transform. Can't do it right now because
+ // LocalizeStructArrayAssignment introduces pointers.
+ manager.Add<transform::SimplifyPointers>();
+ manager.Add<transform::LocalizeStructArrayAssignment>();
+
+ // Attempt to convert `loop`s into for-loops. This is to try and massage the
+ // output into something that will not cause FXC to choke or misbehave.
+ manager.Add<transform::FoldTrivialSingleUseLets>();
+ manager.Add<transform::LoopToForLoop>();
+
+ if (!options.disable_workgroup_init) {
+ // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
+ // ZeroInitWorkgroupMemory may inject new builtin parameters.
+ manager.Add<transform::ZeroInitWorkgroupMemory>();
+ }
+ manager.Add<transform::CanonicalizeEntryPointIO>();
+ // NumWorkgroupsFromUniform must come after CanonicalizeEntryPointIO, as it
+ // assumes that num_workgroups builtins only appear as struct members and are
+ // only accessed directly via member accessors.
+ manager.Add<transform::NumWorkgroupsFromUniform>();
+ manager.Add<transform::ExpandCompoundAssignment>();
+ manager.Add<transform::PromoteSideEffectsToDecl>();
+ manager.Add<transform::UnwindDiscardFunctions>();
+ manager.Add<transform::VectorizeScalarMatrixConstructors>();
+ manager.Add<transform::SimplifyPointers>();
+ manager.Add<transform::RemovePhonies>();
+ // ArrayLengthFromUniform must come after InlinePointerLets and Simplify, as
+ // it assumes that the form of the array length argument is &var.array.
+ manager.Add<transform::ArrayLengthFromUniform>();
+ data.Add<transform::ArrayLengthFromUniform::Config>(std::move(array_length_from_uniform_cfg));
+ // DecomposeMemoryAccess must come after:
+ // * InlinePointerLets, as we cannot take the address of calls to
+ // DecomposeMemoryAccess::Intrinsic.
+ // * Simplify, as we need to fold away the address-of and dereferences of
+ // `*(&(intrinsic_load()))` expressions.
+ // * RemovePhonies, as phonies can be assigned a pointer to a
+ // non-constructible buffer, or dynamic array, which DMA cannot cope with.
+ manager.Add<transform::DecomposeMemoryAccess>();
+ // CalculateArrayLength must come after DecomposeMemoryAccess, as
+ // DecomposeMemoryAccess special-cases the arrayLength() intrinsic, which
+ // will be transformed by CalculateArrayLength
+ manager.Add<transform::CalculateArrayLength>();
+ manager.Add<transform::PromoteInitializersToConstVar>();
+
+ manager.Add<transform::RemoveContinueInSwitch>();
+
+ manager.Add<transform::AddEmptyEntryPoint>();
+
+ data.Add<transform::CanonicalizeEntryPointIO::Config>(
+ transform::CanonicalizeEntryPointIO::ShaderStyle::kHlsl);
+ data.Add<transform::NumWorkgroupsFromUniform::Config>(options.root_constant_binding_point);
+
+ auto out = manager.Run(in, data);
+
+ SanitizedResult result;
+ result.program = std::move(out.program);
+ if (auto* res = out.data.Get<transform::ArrayLengthFromUniform::Result>()) {
+ result.used_array_length_from_uniform_indices = std::move(res->used_size_indices);
+ }
+ return result;
}
GeneratorImpl::GeneratorImpl(const Program* program) : TextGenerator(program) {}
@@ -241,3278 +256,3366 @@ GeneratorImpl::GeneratorImpl(const Program* program) : TextGenerator(program) {}
GeneratorImpl::~GeneratorImpl() = default;
bool GeneratorImpl::Generate() {
- const TypeInfo* last_kind = nullptr;
- size_t last_padding_line = 0;
-
- auto* mod = builder_.Sem().Module();
- for (auto* decl : mod->DependencyOrderedDeclarations()) {
- if (decl->Is<ast::Alias>()) {
- continue; // Ignore aliases.
- }
-
- // Emit a new line between declarations if the type of declaration has
- // changed, or we're about to emit a function
- auto* kind = &decl->TypeInfo();
- if (current_buffer_->lines.size() != last_padding_line) {
- if (last_kind && (last_kind != kind || decl->Is<ast::Function>())) {
- line();
- last_padding_line = current_buffer_->lines.size();
- }
- }
- last_kind = kind;
-
- bool ok = Switch(
- decl,
- [&](const ast::Variable* global) { //
- return EmitGlobalVariable(global);
- },
- [&](const ast::Struct* str) {
- auto* ty = builder_.Sem().Get(str);
- auto storage_class_uses = ty->StorageClassUsage();
- if (storage_class_uses.size() !=
- (storage_class_uses.count(ast::StorageClass::kStorage) +
- storage_class_uses.count(ast::StorageClass::kUniform))) {
- // The structure is used as something other than a storage buffer or
- // uniform buffer, so it needs to be emitted.
- // Storage buffer are read and written to via a ByteAddressBuffer
- // instead of true structure.
- // Structures used as uniform buffer are read from an array of
- // vectors instead of true structure.
- return EmitStructType(current_buffer_, ty);
- }
- return true;
- },
- [&](const ast::Function* func) {
- if (func->IsEntryPoint()) {
- return EmitEntryPointFunction(func);
- }
- return EmitFunction(func);
- },
- [&](Default) {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled module-scope declaration: "
- << decl->TypeInfo().name;
- return false;
- });
+ const TypeInfo* last_kind = nullptr;
+ size_t last_padding_line = 0;
+
+ auto* mod = builder_.Sem().Module();
+ for (auto* decl : mod->DependencyOrderedDeclarations()) {
+ if (decl->Is<ast::Alias>()) {
+ continue; // Ignore aliases.
+ }
+ if (decl->Is<ast::Enable>()) {
+ // Currently we don't have to do anything for using a extension in HLSL.
+ continue;
+ }
- if (!ok) {
- return false;
+ // Emit a new line between declarations if the type of declaration has
+ // changed, or we're about to emit a function
+ auto* kind = &decl->TypeInfo();
+ if (current_buffer_->lines.size() != last_padding_line) {
+ if (last_kind && (last_kind != kind || decl->Is<ast::Function>())) {
+ line();
+ last_padding_line = current_buffer_->lines.size();
+ }
+ }
+ last_kind = kind;
+
+ bool ok = Switch(
+ decl,
+ [&](const ast::Variable* global) { //
+ return EmitGlobalVariable(global);
+ },
+ [&](const ast::Struct* str) {
+ auto* ty = builder_.Sem().Get(str);
+ auto storage_class_uses = ty->StorageClassUsage();
+ if (storage_class_uses.size() !=
+ (storage_class_uses.count(ast::StorageClass::kStorage) +
+ storage_class_uses.count(ast::StorageClass::kUniform))) {
+ // The structure is used as something other than a storage buffer or
+ // uniform buffer, so it needs to be emitted.
+ // Storage buffer are read and written to via a ByteAddressBuffer
+ // instead of true structure.
+ // Structures used as uniform buffer are read from an array of
+ // vectors instead of true structure.
+ return EmitStructType(current_buffer_, ty);
+ }
+ return true;
+ },
+ [&](const ast::Function* func) {
+ if (func->IsEntryPoint()) {
+ return EmitEntryPointFunction(func);
+ }
+ return EmitFunction(func);
+ },
+ [&](Default) {
+ TINT_ICE(Writer, diagnostics_)
+ << "unhandled module-scope declaration: " << decl->TypeInfo().name;
+ return false;
+ });
+
+ if (!ok) {
+ return false;
+ }
}
- }
- if (!helpers_.lines.empty()) {
- current_buffer_->Insert(helpers_, 0, 0);
- }
+ if (!helpers_.lines.empty()) {
+ current_buffer_->Insert(helpers_, 0, 0);
+ }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitDynamicVectorAssignment(
- const ast::AssignmentStatement* stmt,
- const sem::Vector* vec) {
- auto name =
- utils::GetOrCreate(dynamic_vector_write_, vec, [&]() -> std::string {
+bool GeneratorImpl::EmitDynamicVectorAssignment(const ast::AssignmentStatement* stmt,
+ const sem::Vector* vec) {
+ auto name = utils::GetOrCreate(dynamic_vector_write_, vec, [&]() -> std::string {
std::string fn;
{
- std::ostringstream ss;
- if (!EmitType(ss, vec, tint::ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "")) {
- return "";
- }
- fn = UniqueIdentifier("set_" + ss.str());
+ std::ostringstream ss;
+ if (!EmitType(ss, vec, tint::ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "")) {
+ return "";
+ }
+ fn = UniqueIdentifier("set_" + ss.str());
}
{
- auto out = line(&helpers_);
- out << "void " << fn << "(inout ";
- if (!EmitTypeAndName(out, vec, ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "vec")) {
- return "";
- }
- out << ", int idx, ";
- if (!EmitTypeAndName(out, vec->type(), ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "val")) {
- return "";
- }
- out << ") {";
+ auto out = line(&helpers_);
+ out << "void " << fn << "(inout ";
+ if (!EmitTypeAndName(out, vec, ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "vec")) {
+ return "";
+ }
+ out << ", int idx, ";
+ if (!EmitTypeAndName(out, vec->type(), ast::StorageClass::kInvalid,
+ ast::Access::kUndefined, "val")) {
+ return "";
+ }
+ out << ") {";
}
{
- ScopedIndent si(&helpers_);
- auto out = line(&helpers_);
- switch (vec->Width()) {
- case 2:
- out << "vec = (idx.xx == int2(0, 1)) ? val.xx : vec;";
- break;
- case 3:
- out << "vec = (idx.xxx == int3(0, 1, 2)) ? val.xxx : vec;";
- break;
- case 4:
- out << "vec = (idx.xxxx == int4(0, 1, 2, 3)) ? val.xxxx : vec;";
- break;
- default:
- TINT_UNREACHABLE(Writer, builder_.Diagnostics())
- << "invalid vector size " << vec->Width();
- break;
- }
+ ScopedIndent si(&helpers_);
+ auto out = line(&helpers_);
+ switch (vec->Width()) {
+ case 2:
+ out << "vec = (idx.xx == int2(0, 1)) ? val.xx : vec;";
+ break;
+ case 3:
+ out << "vec = (idx.xxx == int3(0, 1, 2)) ? val.xxx : vec;";
+ break;
+ case 4:
+ out << "vec = (idx.xxxx == int4(0, 1, 2, 3)) ? val.xxxx : vec;";
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics())
+ << "invalid vector size " << vec->Width();
+ break;
+ }
}
line(&helpers_) << "}";
line(&helpers_);
return fn;
- });
+ });
- if (name.empty()) {
- return false;
- }
+ if (name.empty()) {
+ return false;
+ }
- auto* ast_access_expr = stmt->lhs->As<ast::IndexAccessorExpression>();
+ auto* ast_access_expr = stmt->lhs->As<ast::IndexAccessorExpression>();
- auto out = line();
- out << name << "(";
- if (!EmitExpression(out, ast_access_expr->object)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, ast_access_expr->index)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
- out << ");";
+ auto out = line();
+ out << name << "(";
+ if (!EmitExpression(out, ast_access_expr->object)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, ast_access_expr->index)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
+ out << ");";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitDynamicMatrixVectorAssignment(
- const ast::AssignmentStatement* stmt,
- const sem::Matrix* mat) {
- auto name = utils::GetOrCreate(
- dynamic_matrix_vector_write_, mat, [&]() -> std::string {
+bool GeneratorImpl::EmitDynamicMatrixVectorAssignment(const ast::AssignmentStatement* stmt,
+ const sem::Matrix* mat) {
+ auto name = utils::GetOrCreate(dynamic_matrix_vector_write_, mat, [&]() -> std::string {
std::string fn;
{
- std::ostringstream ss;
- if (!EmitType(ss, mat, tint::ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "")) {
- return "";
- }
- fn = UniqueIdentifier("set_vector_" + ss.str());
+ std::ostringstream ss;
+ if (!EmitType(ss, mat, tint::ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "")) {
+ return "";
+ }
+ fn = UniqueIdentifier("set_vector_" + ss.str());
}
{
- auto out = line(&helpers_);
- out << "void " << fn << "(inout ";
- if (!EmitTypeAndName(out, mat, ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "mat")) {
- return "";
- }
- out << ", int col, ";
- if (!EmitTypeAndName(out, mat->ColumnType(),
- ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "val")) {
- return "";
- }
- out << ") {";
+ auto out = line(&helpers_);
+ out << "void " << fn << "(inout ";
+ if (!EmitTypeAndName(out, mat, ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "mat")) {
+ return "";
+ }
+ out << ", int col, ";
+ if (!EmitTypeAndName(out, mat->ColumnType(), ast::StorageClass::kInvalid,
+ ast::Access::kUndefined, "val")) {
+ return "";
+ }
+ out << ") {";
}
{
- ScopedIndent si(&helpers_);
- line(&helpers_) << "switch (col) {";
- {
- ScopedIndent si2(&helpers_);
- for (uint32_t i = 0; i < mat->columns(); ++i) {
- line(&helpers_)
- << "case " << i << ": mat[" << i << "] = val; break;";
+ ScopedIndent si(&helpers_);
+ line(&helpers_) << "switch (col) {";
+ {
+ ScopedIndent si2(&helpers_);
+ for (uint32_t i = 0; i < mat->columns(); ++i) {
+ line(&helpers_) << "case " << i << ": mat[" << i << "] = val; break;";
+ }
}
- }
- line(&helpers_) << "}";
+ line(&helpers_) << "}";
}
line(&helpers_) << "}";
line(&helpers_);
return fn;
- });
+ });
- if (name.empty()) {
- return false;
- }
+ if (name.empty()) {
+ return false;
+ }
- auto* ast_access_expr = stmt->lhs->As<ast::IndexAccessorExpression>();
+ auto* ast_access_expr = stmt->lhs->As<ast::IndexAccessorExpression>();
- auto out = line();
- out << name << "(";
- if (!EmitExpression(out, ast_access_expr->object)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, ast_access_expr->index)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
- out << ");";
+ auto out = line();
+ out << name << "(";
+ if (!EmitExpression(out, ast_access_expr->object)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, ast_access_expr->index)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
+ out << ");";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitDynamicMatrixScalarAssignment(
- const ast::AssignmentStatement* stmt,
- const sem::Matrix* mat) {
- auto* lhs_col_access = stmt->lhs->As<ast::IndexAccessorExpression>();
- auto* lhs_row_access =
- lhs_col_access->object->As<ast::IndexAccessorExpression>();
+bool GeneratorImpl::EmitDynamicMatrixScalarAssignment(const ast::AssignmentStatement* stmt,
+ const sem::Matrix* mat) {
+ auto* lhs_col_access = stmt->lhs->As<ast::IndexAccessorExpression>();
+ auto* lhs_row_access = lhs_col_access->object->As<ast::IndexAccessorExpression>();
- auto name = utils::GetOrCreate(
- dynamic_matrix_scalar_write_, mat, [&]() -> std::string {
+ auto name = utils::GetOrCreate(dynamic_matrix_scalar_write_, mat, [&]() -> std::string {
std::string fn;
{
- std::ostringstream ss;
- if (!EmitType(ss, mat, tint::ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "")) {
- return "";
- }
- fn = UniqueIdentifier("set_scalar_" + ss.str());
+ std::ostringstream ss;
+ if (!EmitType(ss, mat, tint::ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "")) {
+ return "";
+ }
+ fn = UniqueIdentifier("set_scalar_" + ss.str());
}
{
- auto out = line(&helpers_);
- out << "void " << fn << "(inout ";
- if (!EmitTypeAndName(out, mat, ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "mat")) {
- return "";
- }
- out << ", int col, int row, ";
- if (!EmitTypeAndName(out, mat->type(), ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "val")) {
- return "";
- }
- out << ") {";
+ auto out = line(&helpers_);
+ out << "void " << fn << "(inout ";
+ if (!EmitTypeAndName(out, mat, ast::StorageClass::kInvalid, ast::Access::kUndefined,
+ "mat")) {
+ return "";
+ }
+ out << ", int col, int row, ";
+ if (!EmitTypeAndName(out, mat->type(), ast::StorageClass::kInvalid,
+ ast::Access::kUndefined, "val")) {
+ return "";
+ }
+ out << ") {";
}
{
- ScopedIndent si(&helpers_);
- line(&helpers_) << "switch (col) {";
- {
- ScopedIndent si2(&helpers_);
- auto* vec =
- TypeOf(lhs_row_access->object)->UnwrapRef()->As<sem::Vector>();
- for (uint32_t i = 0; i < mat->columns(); ++i) {
- line(&helpers_) << "case " << i << ":";
- {
- auto vec_name = "mat[" + std::to_string(i) + "]";
- ScopedIndent si3(&helpers_);
- {
- auto out = line(&helpers_);
- switch (mat->rows()) {
- case 2:
- out << vec_name
- << " = (row.xx == int2(0, 1)) ? val.xx : " << vec_name
- << ";";
- break;
- case 3:
- out << vec_name
- << " = (row.xxx == int3(0, 1, 2)) ? val.xxx : "
- << vec_name << ";";
- break;
- case 4:
- out << vec_name
- << " = (row.xxxx == int4(0, 1, 2, 3)) ? val.xxxx : "
- << vec_name << ";";
- break;
- default:
- TINT_UNREACHABLE(Writer, builder_.Diagnostics())
- << "invalid vector size " << vec->Width();
- break;
- }
- }
- line(&helpers_) << "break;";
- }
- }
- }
- line(&helpers_) << "}";
+ ScopedIndent si(&helpers_);
+ line(&helpers_) << "switch (col) {";
+ {
+ ScopedIndent si2(&helpers_);
+ auto* vec = TypeOf(lhs_row_access->object)->UnwrapRef()->As<sem::Vector>();
+ for (uint32_t i = 0; i < mat->columns(); ++i) {
+ line(&helpers_) << "case " << i << ":";
+ {
+ auto vec_name = "mat[" + std::to_string(i) + "]";
+ ScopedIndent si3(&helpers_);
+ {
+ auto out = line(&helpers_);
+ switch (mat->rows()) {
+ case 2:
+ out << vec_name
+ << " = (row.xx == int2(0, 1)) ? val.xx : " << vec_name
+ << ";";
+ break;
+ case 3:
+ out << vec_name
+ << " = (row.xxx == int3(0, 1, 2)) ? val.xxx : " << vec_name
+ << ";";
+ break;
+ case 4:
+ out << vec_name
+ << " = (row.xxxx == int4(0, 1, 2, 3)) ? val.xxxx : "
+ << vec_name << ";";
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics())
+ << "invalid vector size " << vec->Width();
+ break;
+ }
+ }
+ line(&helpers_) << "break;";
+ }
+ }
+ }
+ line(&helpers_) << "}";
}
line(&helpers_) << "}";
line(&helpers_);
return fn;
- });
+ });
- if (name.empty()) {
- return false;
- }
+ if (name.empty()) {
+ return false;
+ }
- auto out = line();
- out << name << "(";
- if (!EmitExpression(out, lhs_row_access->object)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, lhs_col_access->index)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, lhs_row_access->index)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
- out << ");";
+ auto out = line();
+ out << name << "(";
+ if (!EmitExpression(out, lhs_row_access->object)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, lhs_col_access->index)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, lhs_row_access->index)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
+ out << ");";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitIndexAccessor(
- std::ostream& out,
- const ast::IndexAccessorExpression* expr) {
- if (!EmitExpression(out, expr->object)) {
- return false;
- }
- out << "[";
+bool GeneratorImpl::EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr) {
+ if (!EmitExpression(out, expr->object)) {
+ return false;
+ }
+ out << "[";
- if (!EmitExpression(out, expr->index)) {
- return false;
- }
- out << "]";
+ if (!EmitExpression(out, expr->index)) {
+ return false;
+ }
+ out << "]";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitBitcast(std::ostream& out,
- const ast::BitcastExpression* expr) {
- auto* type = TypeOf(expr);
- if (auto* vec = type->UnwrapRef()->As<sem::Vector>()) {
- type = vec->type();
- }
+bool GeneratorImpl::EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr) {
+ auto* type = TypeOf(expr);
+ if (auto* vec = type->UnwrapRef()->As<sem::Vector>()) {
+ type = vec->type();
+ }
- if (!type->is_integer_scalar() && !type->is_float_scalar()) {
- diagnostics_.add_error(diag::System::Writer,
- "Unable to do bitcast to type " +
- type->FriendlyName(builder_.Symbols()));
- return false;
- }
+ if (!type->is_integer_scalar() && !type->is_float_scalar()) {
+ diagnostics_.add_error(diag::System::Writer, "Unable to do bitcast to type " +
+ type->FriendlyName(builder_.Symbols()));
+ return false;
+ }
- out << "as";
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite,
- "")) {
- return false;
- }
- out << "(";
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
- out << ")";
- return true;
+ out << "as";
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ out << "(";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitAssign(const ast::AssignmentStatement* stmt) {
- if (auto* lhs_access = stmt->lhs->As<ast::IndexAccessorExpression>()) {
- // BUG(crbug.com/tint/1333): work around assignment of scalar to matrices
- // with at least one dynamic index
- if (auto* lhs_sub_access =
- lhs_access->object->As<ast::IndexAccessorExpression>()) {
- if (auto* mat =
- TypeOf(lhs_sub_access->object)->UnwrapRef()->As<sem::Matrix>()) {
- auto* rhs_col_idx_sem = builder_.Sem().Get(lhs_access->index);
- auto* rhs_row_idx_sem = builder_.Sem().Get(lhs_sub_access->index);
- if (!rhs_col_idx_sem->ConstantValue().IsValid() ||
- !rhs_row_idx_sem->ConstantValue().IsValid()) {
- return EmitDynamicMatrixScalarAssignment(stmt, mat);
- }
- }
- }
- // BUG(crbug.com/tint/1333): work around assignment of vector to matrices
- // with dynamic indices
- const auto* lhs_access_type = TypeOf(lhs_access->object)->UnwrapRef();
- if (auto* mat = lhs_access_type->As<sem::Matrix>()) {
- auto* lhs_index_sem = builder_.Sem().Get(lhs_access->index);
- if (!lhs_index_sem->ConstantValue().IsValid()) {
- return EmitDynamicMatrixVectorAssignment(stmt, mat);
- }
- }
- // BUG(crbug.com/tint/534): work around assignment to vectors with dynamic
- // indices
- if (auto* vec = lhs_access_type->As<sem::Vector>()) {
- auto* rhs_sem = builder_.Sem().Get(lhs_access->index);
- if (!rhs_sem->ConstantValue().IsValid()) {
- return EmitDynamicVectorAssignment(stmt, vec);
- }
- }
- }
-
- auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
- out << " = ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
- out << ";";
- return true;
-}
-
-bool GeneratorImpl::EmitExpressionOrOneIfZero(std::ostream& out,
- const ast::Expression* expr) {
- // For constants, replace literal 0 with 1.
- sem::Constant::Scalars elems;
- if (const auto& val = builder_.Sem().Get(expr)->ConstantValue()) {
- if (!val.AnyZero()) {
- return EmitExpression(out, expr);
+ if (auto* lhs_access = stmt->lhs->As<ast::IndexAccessorExpression>()) {
+ // BUG(crbug.com/tint/1333): work around assignment of scalar to matrices
+ // with at least one dynamic index
+ if (auto* lhs_sub_access = lhs_access->object->As<ast::IndexAccessorExpression>()) {
+ if (auto* mat = TypeOf(lhs_sub_access->object)->UnwrapRef()->As<sem::Matrix>()) {
+ auto* rhs_col_idx_sem = builder_.Sem().Get(lhs_access->index);
+ auto* rhs_row_idx_sem = builder_.Sem().Get(lhs_sub_access->index);
+ if (!rhs_col_idx_sem->ConstantValue().IsValid() ||
+ !rhs_row_idx_sem->ConstantValue().IsValid()) {
+ return EmitDynamicMatrixScalarAssignment(stmt, mat);
+ }
+ }
+ }
+ // BUG(crbug.com/tint/1333): work around assignment of vector to matrices
+ // with dynamic indices
+ const auto* lhs_access_type = TypeOf(lhs_access->object)->UnwrapRef();
+ if (auto* mat = lhs_access_type->As<sem::Matrix>()) {
+ auto* lhs_index_sem = builder_.Sem().Get(lhs_access->index);
+ if (!lhs_index_sem->ConstantValue().IsValid()) {
+ return EmitDynamicMatrixVectorAssignment(stmt, mat);
+ }
+ }
+ // BUG(crbug.com/tint/534): work around assignment to vectors with dynamic
+ // indices
+ if (auto* vec = lhs_access_type->As<sem::Vector>()) {
+ auto* rhs_sem = builder_.Sem().Get(lhs_access->index);
+ if (!rhs_sem->ConstantValue().IsValid()) {
+ return EmitDynamicVectorAssignment(stmt, vec);
+ }
+ }
}
- if (val.Type()->IsAnyOf<sem::I32, sem::U32>()) {
- return EmitValue(out, val.Type(), 1);
+ auto out = line();
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
}
-
- if (auto* vec = val.Type()->As<sem::Vector>()) {
- auto* elem_ty = vec->type();
-
- if (!EmitType(out, val.Type(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
+ out << " = ";
+ if (!EmitExpression(out, stmt->rhs)) {
return false;
- }
+ }
+ out << ";";
+ return true;
+}
- out << "(";
- for (size_t i = 0; i < val.Elements().size(); ++i) {
- if (i != 0) {
- out << ", ";
+bool GeneratorImpl::EmitExpressionOrOneIfZero(std::ostream& out, const ast::Expression* expr) {
+ // For constants, replace literal 0 with 1.
+ if (const auto& val = builder_.Sem().Get(expr)->ConstantValue()) {
+ if (!val.AnyZero()) {
+ return EmitExpression(out, expr);
}
- if (!val.WithScalarAt(i, [&](auto&& s) -> bool {
- // Use std::equal_to to work around -Wfloat-equal warnings
- auto equals_to =
- std::equal_to<std::remove_reference_t<decltype(s)>>{};
- bool is_zero = equals_to(s, 0);
- return EmitValue(out, elem_ty, is_zero ? 1 : static_cast<int>(s));
- })) {
- return false;
+ if (val.Type()->IsAnyOf<sem::I32, sem::U32>()) {
+ return EmitValue(out, val.Type(), 1);
}
- }
- out << ")";
- return true;
+
+ if (auto* vec = val.Type()->As<sem::Vector>()) {
+ auto* elem_ty = vec->type();
+
+ if (!EmitType(out, val.Type(), ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return false;
+ }
+
+ out << "(";
+ for (size_t i = 0; i < val.ElementCount(); ++i) {
+ if (i != 0) {
+ out << ", ";
+ }
+ auto s = val.Element<AInt>(i).value;
+ if (!EmitValue(out, elem_ty, (s == 0) ? 1 : static_cast<int>(s))) {
+ return false;
+ }
+ }
+ out << ")";
+ return true;
+ }
+
+ TINT_ICE(Writer, diagnostics_)
+ << "EmitExpressionOrOneIfZero expects integer scalar or vector";
+ return false;
}
- TINT_ICE(Writer, diagnostics_)
- << "EmitExpressionOrOneIfZero expects integer scalar or vector";
- return false;
- }
-
- auto* ty = TypeOf(expr)->UnwrapRef();
-
- // For non-constants, we need to emit runtime code to check if the value is 0,
- // and return 1 in that case.
- std::string zero;
- {
- std::ostringstream ss;
- EmitValue(ss, ty, 0);
- zero = ss.str();
- }
- std::string one;
- {
- std::ostringstream ss;
- EmitValue(ss, ty, 1);
- one = ss.str();
- }
-
- // For identifiers, no need for a function call as it's fine to evaluate
- // `expr` more than once.
- if (expr->Is<ast::IdentifierExpression>()) {
- out << "(";
- if (!EmitExpression(out, expr)) {
- return false;
+ auto* ty = TypeOf(expr)->UnwrapRef();
+
+ // For non-constants, we need to emit runtime code to check if the value is 0,
+ // and return 1 in that case.
+ std::string zero;
+ {
+ std::ostringstream ss;
+ EmitValue(ss, ty, 0);
+ zero = ss.str();
}
- out << " == " << zero << " ? " << one << " : ";
- if (!EmitExpression(out, expr)) {
- return false;
+ std::string one;
+ {
+ std::ostringstream ss;
+ EmitValue(ss, ty, 1);
+ one = ss.str();
}
- out << ")";
- return true;
- }
- // For non-identifier expressions, call a function to make sure `expr` is only
- // evaluated once.
- auto name =
- utils::GetOrCreate(value_or_one_if_zero_, ty, [&]() -> std::string {
+ // For identifiers, no need for a function call as it's fine to evaluate
+ // `expr` more than once.
+ if (expr->Is<ast::IdentifierExpression>()) {
+ out << "(";
+ if (!EmitExpression(out, expr)) {
+ return false;
+ }
+ out << " == " << zero << " ? " << one << " : ";
+ if (!EmitExpression(out, expr)) {
+ return false;
+ }
+ out << ")";
+ return true;
+ }
+
+ // For non-identifier expressions, call a function to make sure `expr` is only
+ // evaluated once.
+ auto name = utils::GetOrCreate(value_or_one_if_zero_, ty, [&]() -> std::string {
// Example:
// int4 tint_value_or_one_if_zero_int4(int4 value) {
// return value == 0 ? 0 : value;
// }
std::string ty_name;
{
- std::ostringstream ss;
- if (!EmitType(ss, ty, tint::ast::StorageClass::kInvalid,
- ast::Access::kUndefined, "")) {
- return "";
- }
- ty_name = ss.str();
+ std::ostringstream ss;
+ if (!EmitType(ss, ty, tint::ast::StorageClass::kInvalid, ast::Access::kUndefined, "")) {
+ return "";
+ }
+ ty_name = ss.str();
}
std::string fn = UniqueIdentifier("value_or_one_if_zero_" + ty_name);
- line(&helpers_) << ty_name << " " << fn << "(" << ty_name
- << " value) {";
+ line(&helpers_) << ty_name << " " << fn << "(" << ty_name << " value) {";
{
- ScopedIndent si(&helpers_);
- line(&helpers_) << "return value == " << zero << " ? " << one
- << " : value;";
+ ScopedIndent si(&helpers_);
+ line(&helpers_) << "return value == " << zero << " ? " << one << " : value;";
}
line(&helpers_) << "}";
line(&helpers_);
return fn;
- });
+ });
- if (name.empty()) {
- return false;
- }
+ if (name.empty()) {
+ return false;
+ }
- out << name << "(";
- if (!EmitExpression(out, expr)) {
- return false;
- }
- out << ")";
- return true;
+ out << name << "(";
+ if (!EmitExpression(out, expr)) {
+ return false;
+ }
+ out << ")";
+ return true;
}
-bool GeneratorImpl::EmitBinary(std::ostream& out,
- const ast::BinaryExpression* expr) {
- if (expr->op == ast::BinaryOp::kLogicalAnd ||
- expr->op == ast::BinaryOp::kLogicalOr) {
- auto name = UniqueIdentifier(kTempNamePrefix);
+bool GeneratorImpl::EmitBinary(std::ostream& out, const ast::BinaryExpression* expr) {
+ if (expr->op == ast::BinaryOp::kLogicalAnd || expr->op == ast::BinaryOp::kLogicalOr) {
+ auto name = UniqueIdentifier(kTempNamePrefix);
- {
- auto pre = line();
- pre << "bool " << name << " = ";
- if (!EmitExpression(pre, expr->lhs)) {
- return false;
- }
- pre << ";";
- }
+ {
+ auto pre = line();
+ pre << "bool " << name << " = ";
+ if (!EmitExpression(pre, expr->lhs)) {
+ return false;
+ }
+ pre << ";";
+ }
- if (expr->op == ast::BinaryOp::kLogicalOr) {
- line() << "if (!" << name << ") {";
- } else {
- line() << "if (" << name << ") {";
- }
+ if (expr->op == ast::BinaryOp::kLogicalOr) {
+ line() << "if (!" << name << ") {";
+ } else {
+ line() << "if (" << name << ") {";
+ }
- {
- ScopedIndent si(this);
- auto pre = line();
- pre << name << " = ";
- if (!EmitExpression(pre, expr->rhs)) {
- return false;
- }
- pre << ";";
+ {
+ ScopedIndent si(this);
+ auto pre = line();
+ pre << name << " = ";
+ if (!EmitExpression(pre, expr->rhs)) {
+ return false;
+ }
+ pre << ";";
+ }
+
+ line() << "}";
+
+ out << "(" << name << ")";
+ return true;
}
- line() << "}";
+ auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
+ auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
+ // Multiplying by a matrix requires the use of `mul` in order to get the
+ // type of multiply we desire.
+ if (expr->op == ast::BinaryOp::kMultiply &&
+ ((lhs_type->Is<sem::Vector>() && rhs_type->Is<sem::Matrix>()) ||
+ (lhs_type->Is<sem::Matrix>() && rhs_type->Is<sem::Vector>()) ||
+ (lhs_type->Is<sem::Matrix>() && rhs_type->Is<sem::Matrix>()))) {
+ // Matrices are transposed, so swap LHS and RHS.
+ out << "mul(";
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ out << ")";
- out << "(" << name << ")";
- return true;
- }
-
- auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
- auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
- // Multiplying by a matrix requires the use of `mul` in order to get the
- // type of multiply we desire.
- if (expr->op == ast::BinaryOp::kMultiply &&
- ((lhs_type->Is<sem::Vector>() && rhs_type->Is<sem::Matrix>()) ||
- (lhs_type->Is<sem::Matrix>() && rhs_type->Is<sem::Vector>()) ||
- (lhs_type->Is<sem::Matrix>() && rhs_type->Is<sem::Matrix>()))) {
- // Matrices are transposed, so swap LHS and RHS.
- out << "mul(";
- if (!EmitExpression(out, expr->rhs)) {
- return false;
+ return true;
}
- out << ", ";
+
+ ScopedParen sp(out);
+
if (!EmitExpression(out, expr->lhs)) {
- return false;
+ return false;
}
- out << ")";
+ out << " ";
- return true;
- }
+ switch (expr->op) {
+ case ast::BinaryOp::kAnd:
+ out << "&";
+ break;
+ case ast::BinaryOp::kOr:
+ out << "|";
+ break;
+ case ast::BinaryOp::kXor:
+ out << "^";
+ break;
+ case ast::BinaryOp::kLogicalAnd:
+ case ast::BinaryOp::kLogicalOr: {
+ // These are both handled above.
+ TINT_UNREACHABLE(Writer, diagnostics_);
+ return false;
+ }
+ case ast::BinaryOp::kEqual:
+ out << "==";
+ break;
+ case ast::BinaryOp::kNotEqual:
+ out << "!=";
+ break;
+ case ast::BinaryOp::kLessThan:
+ out << "<";
+ break;
+ case ast::BinaryOp::kGreaterThan:
+ out << ">";
+ break;
+ case ast::BinaryOp::kLessThanEqual:
+ out << "<=";
+ break;
+ case ast::BinaryOp::kGreaterThanEqual:
+ out << ">=";
+ break;
+ case ast::BinaryOp::kShiftLeft:
+ out << "<<";
+ break;
+ case ast::BinaryOp::kShiftRight:
+ // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
+ // implementation-defined behaviour for negative LHS. We may have to
+ // generate extra code to implement WGSL-specified behaviour for negative
+ // LHS.
+ out << R"(>>)";
+ break;
- out << "(";
- TINT_DEFER(out << ")");
+ case ast::BinaryOp::kAdd:
+ out << "+";
+ break;
+ case ast::BinaryOp::kSubtract:
+ out << "-";
+ break;
+ case ast::BinaryOp::kMultiply:
+ out << "*";
+ break;
+ case ast::BinaryOp::kDivide:
+ out << "/";
+ // BUG(crbug.com/tint/1083): Integer divide/modulo by zero is a FXC
+ // compile error, and undefined behavior in WGSL.
+ if (TypeOf(expr->rhs)->UnwrapRef()->is_integer_scalar_or_vector()) {
+ out << " ";
+ return EmitExpressionOrOneIfZero(out, expr->rhs);
+ }
+ break;
+ case ast::BinaryOp::kModulo:
+ out << "%";
+ // BUG(crbug.com/tint/1083): Integer divide/modulo by zero is a FXC
+ // compile error, and undefined behavior in WGSL.
+ if (TypeOf(expr->rhs)->UnwrapRef()->is_integer_scalar_or_vector()) {
+ out << " ";
+ return EmitExpressionOrOneIfZero(out, expr->rhs);
+ }
+ break;
+ case ast::BinaryOp::kNone:
+ diagnostics_.add_error(diag::System::Writer, "missing binary operation type");
+ return false;
+ }
+ out << " ";
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- out << " ";
-
- switch (expr->op) {
- case ast::BinaryOp::kAnd:
- out << "&";
- break;
- case ast::BinaryOp::kOr:
- out << "|";
- break;
- case ast::BinaryOp::kXor:
- out << "^";
- break;
- case ast::BinaryOp::kLogicalAnd:
- case ast::BinaryOp::kLogicalOr: {
- // These are both handled above.
- TINT_UNREACHABLE(Writer, diagnostics_);
- return false;
- }
- case ast::BinaryOp::kEqual:
- out << "==";
- break;
- case ast::BinaryOp::kNotEqual:
- out << "!=";
- break;
- case ast::BinaryOp::kLessThan:
- out << "<";
- break;
- case ast::BinaryOp::kGreaterThan:
- out << ">";
- break;
- case ast::BinaryOp::kLessThanEqual:
- out << "<=";
- break;
- case ast::BinaryOp::kGreaterThanEqual:
- out << ">=";
- break;
- case ast::BinaryOp::kShiftLeft:
- out << "<<";
- break;
- case ast::BinaryOp::kShiftRight:
- // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
- // implementation-defined behaviour for negative LHS. We may have to
- // generate extra code to implement WGSL-specified behaviour for negative
- // LHS.
- out << R"(>>)";
- break;
-
- case ast::BinaryOp::kAdd:
- out << "+";
- break;
- case ast::BinaryOp::kSubtract:
- out << "-";
- break;
- case ast::BinaryOp::kMultiply:
- out << "*";
- break;
- case ast::BinaryOp::kDivide:
- out << "/";
- // BUG(crbug.com/tint/1083): Integer divide/modulo by zero is a FXC
- // compile error, and undefined behavior in WGSL.
- if (TypeOf(expr->rhs)->UnwrapRef()->is_integer_scalar_or_vector()) {
- out << " ";
- return EmitExpressionOrOneIfZero(out, expr->rhs);
- }
- break;
- case ast::BinaryOp::kModulo:
- out << "%";
- // BUG(crbug.com/tint/1083): Integer divide/modulo by zero is a FXC
- // compile error, and undefined behavior in WGSL.
- if (TypeOf(expr->rhs)->UnwrapRef()->is_integer_scalar_or_vector()) {
- out << " ";
- return EmitExpressionOrOneIfZero(out, expr->rhs);
- }
- break;
- case ast::BinaryOp::kNone:
- diagnostics_.add_error(diag::System::Writer,
- "missing binary operation type");
- return false;
- }
- out << " ";
-
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatements(const ast::StatementList& stmts) {
- for (auto* s : stmts) {
- if (!EmitStatement(s)) {
- return false;
+ for (auto* s : stmts) {
+ if (!EmitStatement(s)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatementsWithIndent(const ast::StatementList& stmts) {
- ScopedIndent si(this);
- return EmitStatements(stmts);
+ ScopedIndent si(this);
+ return EmitStatements(stmts);
}
bool GeneratorImpl::EmitBlock(const ast::BlockStatement* stmt) {
- line() << "{";
- if (!EmitStatementsWithIndent(stmt->statements)) {
- return false;
- }
- line() << "}";
- return true;
+ line() << "{";
+ if (!EmitStatementsWithIndent(stmt->statements)) {
+ return false;
+ }
+ line() << "}";
+ return true;
}
bool GeneratorImpl::EmitBreak(const ast::BreakStatement*) {
- line() << "break;";
- return true;
-}
-
-bool GeneratorImpl::EmitCall(std::ostream& out,
- const ast::CallExpression* expr) {
- auto* call = builder_.Sem().Get(expr);
- auto* target = call->Target();
- return Switch(
- target,
- [&](const sem::Function* func) {
- return EmitFunctionCall(out, call, func);
- },
- [&](const sem::Builtin* builtin) {
- return EmitBuiltinCall(out, call, builtin);
- },
- [&](const sem::TypeConversion* conv) {
- return EmitTypeConversion(out, call, conv);
- },
- [&](const sem::TypeConstructor* ctor) {
- return EmitTypeConstructor(out, call, ctor);
- },
- [&](Default) {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled call target: " << target->TypeInfo().name;
- return false;
- });
+ line() << "break;";
+ return true;
+}
+
+bool GeneratorImpl::EmitCall(std::ostream& out, const ast::CallExpression* expr) {
+ auto* call = builder_.Sem().Get<sem::Call>(expr);
+ auto* target = call->Target();
+ return Switch(
+ target, [&](const sem::Function* func) { return EmitFunctionCall(out, call, func); },
+ [&](const sem::Builtin* builtin) { return EmitBuiltinCall(out, call, builtin); },
+ [&](const sem::TypeConversion* conv) { return EmitTypeConversion(out, call, conv); },
+ [&](const sem::TypeConstructor* ctor) { return EmitTypeConstructor(out, call, ctor); },
+ [&](Default) {
+ TINT_ICE(Writer, diagnostics_) << "unhandled call target: " << target->TypeInfo().name;
+ return false;
+ });
}
bool GeneratorImpl::EmitFunctionCall(std::ostream& out,
const sem::Call* call,
const sem::Function* func) {
- auto* expr = call->Declaration();
+ auto* expr = call->Declaration();
- if (ast::HasAttribute<transform::CalculateArrayLength::BufferSizeIntrinsic>(
- func->Declaration()->attributes)) {
- // Special function generated by the CalculateArrayLength transform for
- // calling X.GetDimensions(Y)
- if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
- return false;
- }
- out << ".GetDimensions(";
- if (!EmitExpression(out, call->Arguments()[1]->Declaration())) {
- return false;
+ if (ast::HasAttribute<transform::CalculateArrayLength::BufferSizeIntrinsic>(
+ func->Declaration()->attributes)) {
+ // Special function generated by the CalculateArrayLength transform for
+ // calling X.GetDimensions(Y)
+ if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
+ return false;
+ }
+ out << ".GetDimensions(";
+ if (!EmitExpression(out, call->Arguments()[1]->Declaration())) {
+ return false;
+ }
+ out << ")";
+ return true;
}
- out << ")";
- return true;
- }
-
- if (auto* intrinsic =
- ast::GetAttribute<transform::DecomposeMemoryAccess::Intrinsic>(
- func->Declaration()->attributes)) {
- switch (intrinsic->storage_class) {
- case ast::StorageClass::kUniform:
- return EmitUniformBufferAccess(out, expr, intrinsic);
- case ast::StorageClass::kStorage:
- return EmitStorageBufferAccess(out, expr, intrinsic);
- default:
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic storage class:"
- << intrinsic->storage_class;
- return false;
+
+ if (auto* intrinsic = ast::GetAttribute<transform::DecomposeMemoryAccess::Intrinsic>(
+ func->Declaration()->attributes)) {
+ switch (intrinsic->storage_class) {
+ case ast::StorageClass::kUniform:
+ return EmitUniformBufferAccess(out, expr, intrinsic);
+ case ast::StorageClass::kStorage:
+ if (!intrinsic->IsAtomic()) {
+ return EmitStorageBufferAccess(out, expr, intrinsic);
+ }
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic storage class:"
+ << intrinsic->storage_class;
+ return false;
+ }
}
- }
- out << builder_.Symbols().NameFor(func->Declaration()->symbol) << "(";
+ out << builder_.Symbols().NameFor(func->Declaration()->symbol) << "(";
- bool first = true;
- for (auto* arg : call->Arguments()) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
+ for (auto* arg : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitBuiltinCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- auto* expr = call->Declaration();
- if (builtin->IsTexture()) {
- return EmitTextureCall(out, call, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kSelect) {
- return EmitSelectCall(out, expr);
- }
- if (builtin->Type() == sem::BuiltinType::kModf) {
- return EmitModfCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kFrexp) {
- return EmitFrexpCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kDegrees) {
- return EmitDegreesCall(out, expr, builtin);
- }
- if (builtin->Type() == sem::BuiltinType::kRadians) {
- return EmitRadiansCall(out, expr, builtin);
- }
- if (builtin->IsDataPacking()) {
- return EmitDataPackingCall(out, expr, builtin);
- }
- if (builtin->IsDataUnpacking()) {
- return EmitDataUnpackingCall(out, expr, builtin);
- }
- if (builtin->IsBarrier()) {
- return EmitBarrierCall(out, builtin);
- }
- if (builtin->IsAtomic()) {
- return EmitWorkgroupAtomicCall(out, expr, builtin);
- }
- auto name = generate_builtin_name(builtin);
- if (name.empty()) {
- return false;
- }
+ const auto type = builtin->Type();
- out << name << "(";
+ auto* expr = call->Declaration();
+ if (builtin->IsTexture()) {
+ return EmitTextureCall(out, call, builtin);
+ }
+ if (type == sem::BuiltinType::kSelect) {
+ return EmitSelectCall(out, expr);
+ }
+ if (type == sem::BuiltinType::kModf) {
+ return EmitModfCall(out, expr, builtin);
+ }
+ if (type == sem::BuiltinType::kFrexp) {
+ return EmitFrexpCall(out, expr, builtin);
+ }
+ if (type == sem::BuiltinType::kDegrees) {
+ return EmitDegreesCall(out, expr, builtin);
+ }
+ if (type == sem::BuiltinType::kRadians) {
+ return EmitRadiansCall(out, expr, builtin);
+ }
+ if (builtin->IsDataPacking()) {
+ return EmitDataPackingCall(out, expr, builtin);
+ }
+ if (builtin->IsDataUnpacking()) {
+ return EmitDataUnpackingCall(out, expr, builtin);
+ }
+ if (builtin->IsBarrier()) {
+ return EmitBarrierCall(out, builtin);
+ }
+ if (builtin->IsAtomic()) {
+ return EmitWorkgroupAtomicCall(out, expr, builtin);
+ }
+ if (builtin->IsDP4a()) {
+ return EmitDP4aCall(out, expr, builtin);
+ }
- bool first = true;
- for (auto* arg : call->Arguments()) {
- if (!first) {
- out << ", ";
+ auto name = generate_builtin_name(builtin);
+ if (name.empty()) {
+ return false;
}
- first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ // Handle single argument builtins that only accept and return uint (not int overload). We need
+ // to explicitly cast the return value (we also cast the arg for good measure). See
+ // crbug.com/tint/1550
+ if (type == sem::BuiltinType::kCountOneBits || type == sem::BuiltinType::kReverseBits) {
+ auto* arg = call->Arguments()[0];
+ if (arg->Type()->UnwrapRef()->is_signed_scalar_or_vector()) {
+ out << "asint(" << name << "(asuint(";
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
+ out << ")))";
+ return true;
+ }
}
- }
- out << ")";
- return true;
+ out << name << "(";
+
+ bool first = true;
+ for (auto* arg : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
+ }
+
+ out << ")";
+
+ return true;
}
bool GeneratorImpl::EmitTypeConversion(std::ostream& out,
const sem::Call* call,
const sem::TypeConversion* conv) {
- if (!EmitType(out, conv->Target(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
- out << "(";
+ if (!EmitType(out, conv->Target(), ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ out << "(";
- if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
- return false;
- }
+ if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitTypeConstructor(std::ostream& out,
const sem::Call* call,
const sem::TypeConstructor* ctor) {
- auto* type = call->Type();
-
- // If the type constructor is empty then we need to construct with the zero
- // value for all components.
- if (call->Arguments().empty()) {
- return EmitZeroValue(out, type);
- }
-
- bool brackets = type->IsAnyOf<sem::Array, sem::Struct>();
-
- // For single-value vector initializers, swizzle the scalar to the right
- // vector dimension using .x
- const bool is_single_value_vector_init =
- type->is_scalar_vector() && call->Arguments().size() == 1 &&
- ctor->Parameters()[0]->Type()->is_scalar();
-
- auto it = structure_builders_.find(As<sem::Struct>(type));
- if (it != structure_builders_.end()) {
- out << it->second << "(";
- brackets = false;
- } else if (brackets) {
- out << "{";
- } else {
- if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite,
- "")) {
- return false;
+ auto* type = call->Type();
+
+ // If the type constructor is empty then we need to construct with the zero
+ // value for all components.
+ if (call->Arguments().empty()) {
+ return EmitZeroValue(out, type);
}
- out << "(";
- }
- if (is_single_value_vector_init) {
- out << "(";
- }
+ if (auto* mat = call->Type()->As<sem::Matrix>()) {
+ if (ctor->Parameters().size() == 1) {
+ // Matrix constructor with single scalar.
+ auto fn = utils::GetOrCreate(matrix_scalar_ctors_, mat, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
+
+ auto name = UniqueIdentifier("build_mat" + std::to_string(mat->columns()) + "x" +
+ std::to_string(mat->rows()));
+ {
+ auto l = line(&b);
+ if (!EmitType(l, mat, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return "";
+ }
+ l << " " << name << "(";
+ if (!EmitType(l, mat->type(), ast::StorageClass::kNone, ast::Access::kUndefined,
+ "")) {
+ return "";
+ }
+ l << " value) {";
+ }
+ {
+ ScopedIndent si(&b);
+ auto l = line(&b);
+ l << "return ";
+ if (!EmitType(l, mat, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return "";
+ }
+ l << "(";
+ for (uint32_t i = 0; i < mat->columns() * mat->rows(); i++) {
+ l << ((i > 0) ? ", value" : "value");
+ }
+ l << ");";
+ }
+ line(&b) << "}";
+ return name;
+ });
+ if (fn.empty()) {
+ return false;
+ }
+ out << fn << "(";
+ if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
+ return false;
+ }
+ out << ")";
+ return true;
+ }
+ }
+
+ bool brackets = type->IsAnyOf<sem::Array, sem::Struct>();
+
+ // For single-value vector initializers, swizzle the scalar to the right
+ // vector dimension using .x
+ const bool is_single_value_vector_init = type->is_scalar_vector() &&
+ call->Arguments().size() == 1 &&
+ ctor->Parameters()[0]->Type()->is_scalar();
- bool first = true;
- for (auto* e : call->Arguments()) {
- if (!first) {
- out << ", ";
+ auto it = structure_builders_.find(As<sem::Struct>(type));
+ if (it != structure_builders_.end()) {
+ out << it->second << "(";
+ brackets = false;
+ } else if (brackets) {
+ out << "{";
+ } else {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ out << "(";
+ }
+
+ if (is_single_value_vector_init) {
+ out << "(";
}
- first = false;
- if (!EmitExpression(out, e->Declaration())) {
- return false;
+ bool first = true;
+ for (auto* e : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ if (!EmitExpression(out, e->Declaration())) {
+ return false;
+ }
}
- }
- if (is_single_value_vector_init) {
- out << ")." << std::string(type->As<sem::Vector>()->Width(), 'x');
- }
+ if (is_single_value_vector_init) {
+ out << ")." << std::string(type->As<sem::Vector>()->Width(), 'x');
+ }
- out << (brackets ? "}" : ")");
- return true;
+ out << (brackets ? "}" : ")");
+ return true;
}
bool GeneratorImpl::EmitUniformBufferAccess(
std::ostream& out,
const ast::CallExpression* expr,
const transform::DecomposeMemoryAccess::Intrinsic* intrinsic) {
- const auto& args = expr->args;
- auto* offset_arg = builder_.Sem().Get(args[1]);
-
- uint32_t scalar_offset_value = 0;
- std::string scalar_offset_expr;
-
- // If true, use scalar_offset_value, otherwise use scalar_offset_expr
- bool scalar_offset_constant = false;
-
- if (auto val = offset_arg->ConstantValue()) {
- TINT_ASSERT(Writer, val.Type()->Is<sem::U32>());
- scalar_offset_value = val.Elements()[0].u32;
- scalar_offset_value /= 4; // bytes -> scalar index
- scalar_offset_constant = true;
- }
-
- if (!scalar_offset_constant) {
- // UBO offset not compile-time known.
- // Calculate the scalar offset into a temporary.
- scalar_offset_expr = UniqueIdentifier("scalar_offset");
- auto pre = line();
- pre << "const uint " << scalar_offset_expr << " = (";
- if (!EmitExpression(pre, args[1])) { // offset
- return false;
- }
- pre << ") / 4;";
- }
-
- using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
- using DataType = transform::DecomposeMemoryAccess::Intrinsic::DataType;
- switch (intrinsic->op) {
- case Op::kLoad: {
- auto cast = [&](const char* to, auto&& load) {
- out << to << "(";
- auto result = load();
- out << ")";
- return result;
- };
- auto load_scalar = [&]() {
- if (!EmitExpression(out, args[0])) { // buffer
- return false;
- }
- if (scalar_offset_constant) {
- char swizzle[] = {'x', 'y', 'z', 'w'};
- out << "[" << (scalar_offset_value / 4) << "]."
- << swizzle[scalar_offset_value & 3];
- } else {
- out << "[" << scalar_offset_expr << " / 4][" << scalar_offset_expr
- << " % 4]";
+ const auto& args = expr->args;
+ auto* offset_arg = builder_.Sem().Get(args[1]);
+
+ uint32_t scalar_offset_value = 0;
+ std::string scalar_offset_expr;
+
+ // If true, use scalar_offset_value, otherwise use scalar_offset_expr
+ bool scalar_offset_constant = false;
+
+ if (auto val = offset_arg->ConstantValue()) {
+ TINT_ASSERT(Writer, val.Type()->Is<sem::U32>());
+ scalar_offset_value = static_cast<uint32_t>(val.Element<AInt>(0).value);
+ scalar_offset_value /= 4; // bytes -> scalar index
+ scalar_offset_constant = true;
+ }
+
+ if (!scalar_offset_constant) {
+ // UBO offset not compile-time known.
+ // Calculate the scalar offset into a temporary.
+ scalar_offset_expr = UniqueIdentifier("scalar_offset");
+ auto pre = line();
+ pre << "const uint " << scalar_offset_expr << " = (";
+ if (!EmitExpression(pre, args[1])) { // offset
+ return false;
}
- return true;
- };
- // Has a minimum alignment of 8 bytes, so is either .xy or .zw
- auto load_vec2 = [&] {
- if (scalar_offset_constant) {
- if (!EmitExpression(out, args[0])) { // buffer
- return false;
- }
- out << "[" << (scalar_offset_value / 4) << "]";
- out << ((scalar_offset_value & 2) == 0 ? ".xy" : ".zw");
- } else {
- std::string ubo_load = UniqueIdentifier("ubo_load");
- {
- auto pre = line();
- pre << "uint4 " << ubo_load << " = ";
- if (!EmitExpression(pre, args[0])) { // buffer
- return false;
+ pre << ") / 4;";
+ }
+
+ using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
+ using DataType = transform::DecomposeMemoryAccess::Intrinsic::DataType;
+ switch (intrinsic->op) {
+ case Op::kLoad: {
+ auto cast = [&](const char* to, auto&& load) {
+ out << to << "(";
+ auto result = load();
+ out << ")";
+ return result;
+ };
+ auto load_scalar = [&]() {
+ if (!EmitExpression(out, args[0])) { // buffer
+ return false;
+ }
+ if (scalar_offset_constant) {
+ char swizzle[] = {'x', 'y', 'z', 'w'};
+ out << "[" << (scalar_offset_value / 4) << "]."
+ << swizzle[scalar_offset_value & 3];
+ } else {
+ out << "[" << scalar_offset_expr << " / 4][" << scalar_offset_expr << " % 4]";
+ }
+ return true;
+ };
+ // Has a minimum alignment of 8 bytes, so is either .xy or .zw
+ auto load_vec2 = [&] {
+ if (scalar_offset_constant) {
+ if (!EmitExpression(out, args[0])) { // buffer
+ return false;
+ }
+ out << "[" << (scalar_offset_value / 4) << "]";
+ out << ((scalar_offset_value & 2) == 0 ? ".xy" : ".zw");
+ } else {
+ std::string ubo_load = UniqueIdentifier("ubo_load");
+ {
+ auto pre = line();
+ pre << "uint4 " << ubo_load << " = ";
+ if (!EmitExpression(pre, args[0])) { // buffer
+ return false;
+ }
+ pre << "[" << scalar_offset_expr << " / 4];";
+ }
+ out << "((" << scalar_offset_expr << " & 2) ? " << ubo_load
+ << ".zw : " << ubo_load << ".xy)";
+ }
+ return true;
+ };
+ // vec4 has a minimum alignment of 16 bytes, easiest case
+ auto load_vec4 = [&] {
+ if (!EmitExpression(out, args[0])) { // buffer
+ return false;
+ }
+ if (scalar_offset_constant) {
+ out << "[" << (scalar_offset_value / 4) << "]";
+ } else {
+ out << "[" << scalar_offset_expr << " / 4]";
+ }
+ return true;
+ };
+ // vec3 has a minimum alignment of 16 bytes, so is just a .xyz swizzle
+ auto load_vec3 = [&] {
+ if (!load_vec4()) {
+ return false;
+ }
+ out << ".xyz";
+ return true;
+ };
+ switch (intrinsic->type) {
+ case DataType::kU32:
+ return load_scalar();
+ case DataType::kF32:
+ return cast("asfloat", load_scalar);
+ case DataType::kI32:
+ return cast("asint", load_scalar);
+ case DataType::kVec2U32:
+ return load_vec2();
+ case DataType::kVec2F32:
+ return cast("asfloat", load_vec2);
+ case DataType::kVec2I32:
+ return cast("asint", load_vec2);
+ case DataType::kVec3U32:
+ return load_vec3();
+ case DataType::kVec3F32:
+ return cast("asfloat", load_vec3);
+ case DataType::kVec3I32:
+ return cast("asint", load_vec3);
+ case DataType::kVec4U32:
+ return load_vec4();
+ case DataType::kVec4F32:
+ return cast("asfloat", load_vec4);
+ case DataType::kVec4I32:
+ return cast("asint", load_vec4);
}
- pre << "[" << scalar_offset_expr << " / 4];";
- }
- out << "((" << scalar_offset_expr << " & 2) ? " << ubo_load
- << ".zw : " << ubo_load << ".xy)";
- }
- return true;
- };
- // vec4 has a minimum alignment of 16 bytes, easiest case
- auto load_vec4 = [&] {
- if (!EmitExpression(out, args[0])) { // buffer
- return false;
- }
- if (scalar_offset_constant) {
- out << "[" << (scalar_offset_value / 4) << "]";
- } else {
- out << "[" << scalar_offset_expr << " / 4]";
- }
- return true;
- };
- // vec3 has a minimum alignment of 16 bytes, so is just a .xyz swizzle
- auto load_vec3 = [&] {
- if (!load_vec4()) {
- return false;
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
+ << static_cast<int>(intrinsic->type);
+ return false;
}
- out << ".xyz";
- return true;
- };
- switch (intrinsic->type) {
- case DataType::kU32:
- return load_scalar();
- case DataType::kF32:
- return cast("asfloat", load_scalar);
- case DataType::kI32:
- return cast("asint", load_scalar);
- case DataType::kVec2U32:
- return load_vec2();
- case DataType::kVec2F32:
- return cast("asfloat", load_vec2);
- case DataType::kVec2I32:
- return cast("asint", load_vec2);
- case DataType::kVec3U32:
- return load_vec3();
- case DataType::kVec3F32:
- return cast("asfloat", load_vec3);
- case DataType::kVec3I32:
- return cast("asint", load_vec3);
- case DataType::kVec4U32:
- return load_vec4();
- case DataType::kVec4F32:
- return cast("asfloat", load_vec4);
- case DataType::kVec4I32:
- return cast("asint", load_vec4);
- }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
- << static_cast<int>(intrinsic->type);
- return false;
- }
- default:
- break;
- }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic::Op: "
- << static_cast<int>(intrinsic->op);
- return false;
+ default:
+ break;
+ }
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic::Op: " << static_cast<int>(intrinsic->op);
+ return false;
}
bool GeneratorImpl::EmitStorageBufferAccess(
std::ostream& out,
const ast::CallExpression* expr,
const transform::DecomposeMemoryAccess::Intrinsic* intrinsic) {
- const auto& args = expr->args;
+ const auto& args = expr->args;
- using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
- using DataType = transform::DecomposeMemoryAccess::Intrinsic::DataType;
- switch (intrinsic->op) {
- case Op::kLoad: {
- auto load = [&](const char* cast, int n) {
- if (cast) {
- out << cast << "(";
- }
- if (!EmitExpression(out, args[0])) { // buffer
- return false;
+ using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
+ using DataType = transform::DecomposeMemoryAccess::Intrinsic::DataType;
+ switch (intrinsic->op) {
+ case Op::kLoad: {
+ auto load = [&](const char* cast, int n) {
+ if (cast) {
+ out << cast << "(";
+ }
+ if (!EmitExpression(out, args[0])) { // buffer
+ return false;
+ }
+ out << ".Load";
+ if (n > 1) {
+ out << n;
+ }
+ ScopedParen sp(out);
+ if (!EmitExpression(out, args[1])) { // offset
+ return false;
+ }
+ if (cast) {
+ out << ")";
+ }
+ return true;
+ };
+ switch (intrinsic->type) {
+ case DataType::kU32:
+ return load(nullptr, 1);
+ case DataType::kF32:
+ return load("asfloat", 1);
+ case DataType::kI32:
+ return load("asint", 1);
+ case DataType::kVec2U32:
+ return load(nullptr, 2);
+ case DataType::kVec2F32:
+ return load("asfloat", 2);
+ case DataType::kVec2I32:
+ return load("asint", 2);
+ case DataType::kVec3U32:
+ return load(nullptr, 3);
+ case DataType::kVec3F32:
+ return load("asfloat", 3);
+ case DataType::kVec3I32:
+ return load("asint", 3);
+ case DataType::kVec4U32:
+ return load(nullptr, 4);
+ case DataType::kVec4F32:
+ return load("asfloat", 4);
+ case DataType::kVec4I32:
+ return load("asint", 4);
+ }
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
+ << static_cast<int>(intrinsic->type);
+ return false;
}
- out << ".Load";
- if (n > 1) {
- out << n;
+
+ case Op::kStore: {
+ auto store = [&](int n) {
+ if (!EmitExpression(out, args[0])) { // buffer
+ return false;
+ }
+ out << ".Store";
+ if (n > 1) {
+ out << n;
+ }
+ ScopedParen sp1(out);
+ if (!EmitExpression(out, args[1])) { // offset
+ return false;
+ }
+ out << ", asuint";
+ ScopedParen sp2(out);
+ if (!EmitExpression(out, args[2])) { // value
+ return false;
+ }
+ return true;
+ };
+ switch (intrinsic->type) {
+ case DataType::kU32:
+ return store(1);
+ case DataType::kF32:
+ return store(1);
+ case DataType::kI32:
+ return store(1);
+ case DataType::kVec2U32:
+ return store(2);
+ case DataType::kVec2F32:
+ return store(2);
+ case DataType::kVec2I32:
+ return store(2);
+ case DataType::kVec3U32:
+ return store(3);
+ case DataType::kVec3F32:
+ return store(3);
+ case DataType::kVec3I32:
+ return store(3);
+ case DataType::kVec4U32:
+ return store(4);
+ case DataType::kVec4F32:
+ return store(4);
+ case DataType::kVec4I32:
+ return store(4);
+ }
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
+ << static_cast<int>(intrinsic->type);
+ return false;
}
- ScopedParen sp(out);
- if (!EmitExpression(out, args[1])) { // offset
- return false;
+ default:
+ // Break out to error case below/
+ // Note that atomic intrinsics are generated as functions.
+ break;
+ }
+
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unsupported DecomposeMemoryAccess::Intrinsic::Op: " << static_cast<int>(intrinsic->op);
+ return false;
+}
+
+bool GeneratorImpl::EmitStorageAtomicIntrinsic(
+ const ast::Function* func,
+ const transform::DecomposeMemoryAccess::Intrinsic* intrinsic) {
+ using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
+
+ const sem::Function* sem_func = builder_.Sem().Get(func);
+ auto* result_ty = sem_func->ReturnType();
+ const auto& params = sem_func->Parameters();
+ const auto name = builder_.Symbols().NameFor(func->symbol);
+ auto& buf = *current_buffer_;
+
+ auto rmw = [&](const char* hlsl) -> bool {
+ {
+ auto fn = line(&buf);
+ if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ name)) {
+ return false;
+ }
+ fn << "(RWByteAddressBuffer buffer, uint offset, ";
+ if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ "value")) {
+ return false;
+ }
+ fn << ") {";
}
- if (cast) {
- out << ")";
+
+ buf.IncrementIndent();
+ TINT_DEFER({
+ buf.DecrementIndent();
+ line(&buf) << "}";
+ line(&buf);
+ });
+
+ {
+ auto l = line(&buf);
+ if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ "original_value")) {
+ return false;
+ }
+ l << " = 0;";
}
- return true;
- };
- switch (intrinsic->type) {
- case DataType::kU32:
- return load(nullptr, 1);
- case DataType::kF32:
- return load("asfloat", 1);
- case DataType::kI32:
- return load("asint", 1);
- case DataType::kVec2U32:
- return load(nullptr, 2);
- case DataType::kVec2F32:
- return load("asfloat", 2);
- case DataType::kVec2I32:
- return load("asint", 2);
- case DataType::kVec3U32:
- return load(nullptr, 3);
- case DataType::kVec3F32:
- return load("asfloat", 3);
- case DataType::kVec3I32:
- return load("asint", 3);
- case DataType::kVec4U32:
- return load(nullptr, 4);
- case DataType::kVec4F32:
- return load("asfloat", 4);
- case DataType::kVec4I32:
- return load("asint", 4);
- }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
- << static_cast<int>(intrinsic->type);
- return false;
- }
-
- case Op::kStore: {
- auto store = [&](int n) {
- if (!EmitExpression(out, args[0])) { // buffer
- return false;
- }
- out << ".Store";
- if (n > 1) {
- out << n;
- }
- ScopedParen sp1(out);
- if (!EmitExpression(out, args[1])) { // offset
- return false;
- }
- out << ", asuint";
- ScopedParen sp2(out);
- if (!EmitExpression(out, args[2])) { // value
- return false;
+ {
+ auto l = line(&buf);
+ l << "buffer." << hlsl << "(offset, ";
+ if (intrinsic->op == Op::kAtomicSub) {
+ l << "-";
+ }
+ l << "value, original_value);";
}
+ line(&buf) << "return original_value;";
return true;
- };
- switch (intrinsic->type) {
- case DataType::kU32:
- return store(1);
- case DataType::kF32:
- return store(1);
- case DataType::kI32:
- return store(1);
- case DataType::kVec2U32:
- return store(2);
- case DataType::kVec2F32:
- return store(2);
- case DataType::kVec2I32:
- return store(2);
- case DataType::kVec3U32:
- return store(3);
- case DataType::kVec3F32:
- return store(3);
- case DataType::kVec3I32:
- return store(3);
- case DataType::kVec4U32:
- return store(4);
- case DataType::kVec4F32:
- return store(4);
- case DataType::kVec4I32:
- return store(4);
- }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic::DataType: "
- << static_cast<int>(intrinsic->type);
- return false;
- }
-
- case Op::kAtomicLoad:
- case Op::kAtomicStore:
- case Op::kAtomicAdd:
- case Op::kAtomicSub:
- case Op::kAtomicMax:
- case Op::kAtomicMin:
- case Op::kAtomicAnd:
- case Op::kAtomicOr:
- case Op::kAtomicXor:
- case Op::kAtomicExchange:
- case Op::kAtomicCompareExchangeWeak:
- return EmitStorageAtomicCall(out, expr, intrinsic);
- }
-
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported DecomposeMemoryAccess::Intrinsic::Op: "
- << static_cast<int>(intrinsic->op);
- return false;
-}
-
-bool GeneratorImpl::EmitStorageAtomicCall(
- std::ostream& out,
- const ast::CallExpression* expr,
- const transform::DecomposeMemoryAccess::Intrinsic* intrinsic) {
- using Op = transform::DecomposeMemoryAccess::Intrinsic::Op;
-
- auto* result_ty = TypeOf(expr);
-
- auto& buf = helpers_;
-
- // generate_helper() generates a helper function that translates the
- // DecomposeMemoryAccess::Intrinsic call into the corresponding HLSL
- // atomic intrinsic function.
- auto generate_helper = [&]() -> std::string {
- auto rmw = [&](const char* wgsl, const char* hlsl) -> std::string {
- auto name = UniqueIdentifier(wgsl);
- {
- auto fn = line(&buf);
- if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, name)) {
- return "";
- }
- fn << "(RWByteAddressBuffer buffer, uint offset, ";
- if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "value")) {
- return "";
- }
- fn << ") {";
- }
-
- buf.IncrementIndent();
- TINT_DEFER({
- buf.DecrementIndent();
- line(&buf) << "}";
- line(&buf);
- });
-
- {
- auto l = line(&buf);
- if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "original_value")) {
- return "";
- }
- l << " = 0;";
- }
- {
- auto l = line(&buf);
- l << "buffer." << hlsl << "(offset, ";
- if (intrinsic->op == Op::kAtomicSub) {
- l << "-";
- }
- l << "value, original_value);";
- }
- line(&buf) << "return original_value;";
- return name;
};
switch (intrinsic->op) {
- case Op::kAtomicAdd:
- return rmw("atomicAdd", "InterlockedAdd");
+ case Op::kAtomicAdd:
+ return rmw("InterlockedAdd");
- case Op::kAtomicSub:
- // Use add with the operand negated.
- return rmw("atomicSub", "InterlockedAdd");
+ case Op::kAtomicSub:
+ // Use add with the operand negated.
+ return rmw("InterlockedAdd");
- case Op::kAtomicMax:
- return rmw("atomicMax", "InterlockedMax");
+ case Op::kAtomicMax:
+ return rmw("InterlockedMax");
- case Op::kAtomicMin:
- return rmw("atomicMin", "InterlockedMin");
+ case Op::kAtomicMin:
+ return rmw("InterlockedMin");
- case Op::kAtomicAnd:
- return rmw("atomicAnd", "InterlockedAnd");
+ case Op::kAtomicAnd:
+ return rmw("InterlockedAnd");
- case Op::kAtomicOr:
- return rmw("atomicOr", "InterlockedOr");
+ case Op::kAtomicOr:
+ return rmw("InterlockedOr");
- case Op::kAtomicXor:
- return rmw("atomicXor", "InterlockedXor");
+ case Op::kAtomicXor:
+ return rmw("InterlockedXor");
- case Op::kAtomicExchange:
- return rmw("atomicExchange", "InterlockedExchange");
+ case Op::kAtomicExchange:
+ return rmw("InterlockedExchange");
- case Op::kAtomicLoad: {
- // HLSL does not have an InterlockedLoad, so we emulate it with
- // InterlockedOr using 0 as the OR value
- auto name = UniqueIdentifier("atomicLoad");
- {
- auto fn = line(&buf);
- if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, name)) {
- return "";
- }
- fn << "(RWByteAddressBuffer buffer, uint offset) {";
- }
+ case Op::kAtomicLoad: {
+ // HLSL does not have an InterlockedLoad, so we emulate it with
+ // InterlockedOr using 0 as the OR value
+ {
+ auto fn = line(&buf);
+ if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, name)) {
+ return false;
+ }
+ fn << "(RWByteAddressBuffer buffer, uint offset) {";
+ }
- buf.IncrementIndent();
- TINT_DEFER({
- buf.DecrementIndent();
- line(&buf) << "}";
- line(&buf);
- });
+ buf.IncrementIndent();
+ TINT_DEFER({
+ buf.DecrementIndent();
+ line(&buf) << "}";
+ line(&buf);
+ });
- {
- auto l = line(&buf);
- if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "value")) {
- return "";
- }
- l << " = 0;";
- }
-
- line(&buf) << "buffer.InterlockedOr(offset, 0, value);";
- line(&buf) << "return value;";
- return name;
- }
- case Op::kAtomicStore: {
- // HLSL does not have an InterlockedStore, so we emulate it with
- // InterlockedExchange and discard the returned value
- auto* value_ty = TypeOf(expr->args[2])->UnwrapRef();
- auto name = UniqueIdentifier("atomicStore");
- {
- auto fn = line(&buf);
- fn << "void " << name << "(RWByteAddressBuffer buffer, uint offset, ";
- if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "value")) {
- return "";
- }
- fn << ") {";
+ {
+ auto l = line(&buf);
+ if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, "value")) {
+ return false;
+ }
+ l << " = 0;";
+ }
+
+ line(&buf) << "buffer.InterlockedOr(offset, 0, value);";
+ line(&buf) << "return value;";
+ return true;
}
+ case Op::kAtomicStore: {
+ // HLSL does not have an InterlockedStore, so we emulate it with
+ // InterlockedExchange and discard the returned value
+ auto* value_ty = params[2]->Type()->UnwrapRef();
+ {
+ auto fn = line(&buf);
+ fn << "void " << name << "(RWByteAddressBuffer buffer, uint offset, ";
+ if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, "value")) {
+ return false;
+ }
+ fn << ") {";
+ }
- buf.IncrementIndent();
- TINT_DEFER({
- buf.DecrementIndent();
- line(&buf) << "}";
- line(&buf);
- });
+ buf.IncrementIndent();
+ TINT_DEFER({
+ buf.DecrementIndent();
+ line(&buf) << "}";
+ line(&buf);
+ });
- {
- auto l = line(&buf);
- if (!EmitTypeAndName(l, value_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "ignored")) {
- return "";
- }
- l << ";";
- }
- line(&buf) << "buffer.InterlockedExchange(offset, value, ignored);";
- return name;
- }
- case Op::kAtomicCompareExchangeWeak: {
- auto* value_ty = TypeOf(expr->args[2])->UnwrapRef();
-
- auto name = UniqueIdentifier("atomicCompareExchangeWeak");
- {
- auto fn = line(&buf);
- if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, name)) {
- return "";
- }
- fn << "(RWByteAddressBuffer buffer, uint offset, ";
- if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "compare")) {
- return "";
- }
- fn << ", ";
- if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "value")) {
- return "";
- }
- fn << ") {";
+ {
+ auto l = line(&buf);
+ if (!EmitTypeAndName(l, value_ty, ast::StorageClass::kNone, ast::Access::kUndefined,
+ "ignored")) {
+ return false;
+ }
+ l << ";";
+ }
+ line(&buf) << "buffer.InterlockedExchange(offset, value, ignored);";
+ return true;
}
+ case Op::kAtomicCompareExchangeWeak: {
+ // NOTE: We don't need to emit the return type struct here as DecomposeMemoryAccess
+ // already added it to the AST, and it should have already been emitted by now.
+ auto* value_ty = params[2]->Type()->UnwrapRef();
+ {
+ auto fn = line(&buf);
+ if (!EmitTypeAndName(fn, result_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, name)) {
+ return false;
+ }
+ fn << "(RWByteAddressBuffer buffer, uint offset, ";
+ if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, "compare")) {
+ return false;
+ }
+ fn << ", ";
+ if (!EmitTypeAndName(fn, value_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, "value")) {
+ return false;
+ }
+ fn << ") {";
+ }
- buf.IncrementIndent();
- TINT_DEFER({
- buf.DecrementIndent();
- line(&buf) << "}";
- line(&buf);
- });
+ buf.IncrementIndent();
+ TINT_DEFER({
+ buf.DecrementIndent();
+ line(&buf) << "}";
+ line(&buf);
+ });
+
+ { // T result = {0};
+ auto l = line(&buf);
+ if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, "result")) {
+ return false;
+ }
+ l << "=";
+ if (!EmitZeroValue(l, result_ty)) {
+ return false;
+ }
+ l << ";";
+ }
- { // T result = {0, 0};
- auto l = line(&buf);
- if (!EmitTypeAndName(l, result_ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, "result")) {
- return "";
- }
- l << " = {0, 0};";
- }
- line(&buf) << "buffer.InterlockedCompareExchange(offset, compare, "
- "value, result.x);";
- line(&buf) << "result.y = result.x == compare;";
- line(&buf) << "return result;";
- return name;
- }
- default:
- break;
+ line(&buf) << "buffer.InterlockedCompareExchange(offset, compare, value, "
+ "result.old_value);";
+ line(&buf) << "result.exchanged = result.old_value == compare;";
+ line(&buf) << "return result;";
+
+ return true;
+ }
+ default:
+ break;
}
+
TINT_UNREACHABLE(Writer, diagnostics_)
<< "unsupported atomic DecomposeMemoryAccess::Intrinsic::Op: "
<< static_cast<int>(intrinsic->op);
- return "";
- };
-
- auto func = utils::GetOrCreate(dma_intrinsics_,
- DMAIntrinsic{intrinsic->op, intrinsic->type},
- generate_helper);
- if (func.empty()) {
return false;
- }
-
- out << func;
- {
- ScopedParen sp(out);
- bool first = true;
- for (auto* arg : expr->args) {
- if (!first) {
- out << ", ";
- }
- first = false;
- if (!EmitExpression(out, arg)) {
- return false;
- }
- }
- }
-
- return true;
}
bool GeneratorImpl::EmitWorkgroupAtomicCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- std::string result = UniqueIdentifier("atomic_result");
-
- if (!builtin->ReturnType()->Is<sem::Void>()) {
- auto pre = line();
- if (!EmitTypeAndName(pre, builtin->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kUndefined, result)) {
- return false;
- }
- pre << " = ";
- if (!EmitZeroValue(pre, builtin->ReturnType())) {
- return false;
- }
- pre << ";";
- }
+ std::string result = UniqueIdentifier("atomic_result");
- auto call = [&](const char* name) {
- auto pre = line();
- pre << name;
-
- {
- ScopedParen sp(pre);
- for (size_t i = 0; i < expr->args.size(); i++) {
- auto* arg = expr->args[i];
- if (i > 0) {
- pre << ", ";
- }
- if (i == 1 && builtin->Type() == sem::BuiltinType::kAtomicSub) {
- // Sub uses InterlockedAdd with the operand negated.
- pre << "-";
- }
- if (!EmitExpression(pre, arg)) {
- return false;
- }
- }
-
- pre << ", " << result;
- }
-
- pre << ";";
-
- out << result;
- return true;
- };
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kAtomicLoad: {
- // HLSL does not have an InterlockedLoad, so we emulate it with
- // InterlockedOr using 0 as the OR value
- auto pre = line();
- pre << "InterlockedOr";
- {
- ScopedParen sp(pre);
- if (!EmitExpression(pre, expr->args[0])) {
- return false;
- }
- pre << ", 0, " << result;
- }
- pre << ";";
-
- out << result;
- return true;
- }
- case sem::BuiltinType::kAtomicStore: {
- // HLSL does not have an InterlockedStore, so we emulate it with
- // InterlockedExchange and discard the returned value
- { // T result = 0;
+ if (!builtin->ReturnType()->Is<sem::Void>()) {
auto pre = line();
- auto* value_ty = builtin->Parameters()[1]->Type()->UnwrapRef();
- if (!EmitTypeAndName(pre, value_ty, ast::StorageClass::kNone,
+ if (!EmitTypeAndName(pre, builtin->ReturnType(), ast::StorageClass::kNone,
ast::Access::kUndefined, result)) {
- return false;
+ return false;
}
pre << " = ";
- if (!EmitZeroValue(pre, value_ty)) {
- return false;
+ if (!EmitZeroValue(pre, builtin->ReturnType())) {
+ return false;
}
pre << ";";
- }
-
- out << "InterlockedExchange";
- {
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << ", " << result;
- }
- return true;
}
- case sem::BuiltinType::kAtomicCompareExchangeWeak: {
- auto* dest = expr->args[0];
- auto* compare_value = expr->args[1];
- auto* value = expr->args[2];
-
- std::string compare = UniqueIdentifier("atomic_compare_value");
- { // T compare_value = <compare_value>;
+ auto call = [&](const char* name) {
auto pre = line();
- if (!EmitTypeAndName(pre, TypeOf(compare_value),
- ast::StorageClass::kNone, ast::Access::kUndefined,
- compare)) {
- return false;
- }
- pre << " = ";
- if (!EmitExpression(pre, compare_value)) {
- return false;
- }
- pre << ";";
- }
+ pre << name;
- { // InterlockedCompareExchange(dst, compare, value, result.x);
- auto pre = line();
- pre << "InterlockedCompareExchange";
{
- ScopedParen sp(pre);
- if (!EmitExpression(pre, dest)) {
- return false;
- }
- pre << ", " << compare << ", ";
- if (!EmitExpression(pre, value)) {
- return false;
- }
- pre << ", " << result << ".x";
+ ScopedParen sp(pre);
+ for (size_t i = 0; i < expr->args.size(); i++) {
+ auto* arg = expr->args[i];
+ if (i > 0) {
+ pre << ", ";
+ }
+ if (i == 1 && builtin->Type() == sem::BuiltinType::kAtomicSub) {
+ // Sub uses InterlockedAdd with the operand negated.
+ pre << "-";
+ }
+ if (!EmitExpression(pre, arg)) {
+ return false;
+ }
+ }
+
+ pre << ", " << result;
}
+
pre << ";";
- }
- { // result.y = result.x == compare;
- line() << result << ".y = " << result << ".x == " << compare << ";";
- }
+ out << result;
+ return true;
+ };
- out << result;
- return true;
- }
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAtomicLoad: {
+ // HLSL does not have an InterlockedLoad, so we emulate it with
+ // InterlockedOr using 0 as the OR value
+ auto pre = line();
+ pre << "InterlockedOr";
+ {
+ ScopedParen sp(pre);
+ if (!EmitExpression(pre, expr->args[0])) {
+ return false;
+ }
+ pre << ", 0, " << result;
+ }
+ pre << ";";
- case sem::BuiltinType::kAtomicAdd:
- case sem::BuiltinType::kAtomicSub:
- return call("InterlockedAdd");
+ out << result;
+ return true;
+ }
+ case sem::BuiltinType::kAtomicStore: {
+ // HLSL does not have an InterlockedStore, so we emulate it with
+ // InterlockedExchange and discard the returned value
+ { // T result = 0;
+ auto pre = line();
+ auto* value_ty = builtin->Parameters()[1]->Type()->UnwrapRef();
+ if (!EmitTypeAndName(pre, value_ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, result)) {
+ return false;
+ }
+ pre << " = ";
+ if (!EmitZeroValue(pre, value_ty)) {
+ return false;
+ }
+ pre << ";";
+ }
- case sem::BuiltinType::kAtomicMax:
- return call("InterlockedMax");
+ out << "InterlockedExchange";
+ {
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << ", " << result;
+ }
+ return true;
+ }
+ case sem::BuiltinType::kAtomicCompareExchangeWeak: {
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructTypeOnce(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
- case sem::BuiltinType::kAtomicMin:
- return call("InterlockedMin");
+ auto* dest = expr->args[0];
+ auto* compare_value = expr->args[1];
+ auto* value = expr->args[2];
- case sem::BuiltinType::kAtomicAnd:
- return call("InterlockedAnd");
+ std::string compare = UniqueIdentifier("atomic_compare_value");
- case sem::BuiltinType::kAtomicOr:
- return call("InterlockedOr");
+ { // T compare_value = <compare_value>;
+ auto pre = line();
+ if (!EmitTypeAndName(pre, TypeOf(compare_value), ast::StorageClass::kNone,
+ ast::Access::kUndefined, compare)) {
+ return false;
+ }
+ pre << " = ";
+ if (!EmitExpression(pre, compare_value)) {
+ return false;
+ }
+ pre << ";";
+ }
- case sem::BuiltinType::kAtomicXor:
- return call("InterlockedXor");
+ { // InterlockedCompareExchange(dst, compare, value, result.old_value);
+ auto pre = line();
+ pre << "InterlockedCompareExchange";
+ {
+ ScopedParen sp(pre);
+ if (!EmitExpression(pre, dest)) {
+ return false;
+ }
+ pre << ", " << compare << ", ";
+ if (!EmitExpression(pre, value)) {
+ return false;
+ }
+ pre << ", " << result << ".old_value";
+ }
+ pre << ";";
+ }
- case sem::BuiltinType::kAtomicExchange:
- return call("InterlockedExchange");
+ // result.exchanged = result.old_value == compare;
+ line() << result << ".exchanged = " << result << ".old_value == " << compare << ";";
- default:
- break;
- }
+ out << result;
+ return true;
+ }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported atomic builtin: " << builtin->Type();
- return false;
-}
+ case sem::BuiltinType::kAtomicAdd:
+ case sem::BuiltinType::kAtomicSub:
+ return call("InterlockedAdd");
-bool GeneratorImpl::EmitSelectCall(std::ostream& out,
- const ast::CallExpression* expr) {
- auto* expr_false = expr->args[0];
- auto* expr_true = expr->args[1];
- auto* expr_cond = expr->args[2];
- ScopedParen paren(out);
- if (!EmitExpression(out, expr_cond)) {
- return false;
- }
+ case sem::BuiltinType::kAtomicMax:
+ return call("InterlockedMax");
- out << " ? ";
+ case sem::BuiltinType::kAtomicMin:
+ return call("InterlockedMin");
- if (!EmitExpression(out, expr_true)) {
- return false;
- }
+ case sem::BuiltinType::kAtomicAnd:
+ return call("InterlockedAnd");
- out << " : ";
+ case sem::BuiltinType::kAtomicOr:
+ return call("InterlockedOr");
- if (!EmitExpression(out, expr_false)) {
+ case sem::BuiltinType::kAtomicXor:
+ return call("InterlockedXor");
+
+ case sem::BuiltinType::kAtomicExchange:
+ return call("InterlockedExchange");
+
+ default:
+ break;
+ }
+
+ TINT_UNREACHABLE(Writer, diagnostics_) << "unsupported atomic builtin: " << builtin->Type();
return false;
- }
+}
+
+bool GeneratorImpl::EmitSelectCall(std::ostream& out, const ast::CallExpression* expr) {
+ auto* expr_false = expr->args[0];
+ auto* expr_true = expr->args[1];
+ auto* expr_cond = expr->args[2];
+ ScopedParen paren(out);
+ if (!EmitExpression(out, expr_cond)) {
+ return false;
+ }
+
+ out << " ? ";
+
+ if (!EmitExpression(out, expr_true)) {
+ return false;
+ }
+
+ out << " : ";
- return true;
+ if (!EmitExpression(out, expr_false)) {
+ return false;
+ }
+
+ return true;
}
bool GeneratorImpl::EmitModfCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- auto* ty = builtin->Parameters()[0]->Type();
- auto in = params[0];
-
- std::string width;
- if (auto* vec = ty->As<sem::Vector>()) {
- width = std::to_string(vec->Width());
- }
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ auto* ty = builtin->Parameters()[0]->Type();
+ auto in = params[0];
+
+ std::string width;
+ if (auto* vec = ty->As<sem::Vector>()) {
+ width = std::to_string(vec->Width());
+ }
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
- line(b) << "float" << width << " whole;";
- line(b) << "float" << width << " fract = modf(" << in << ", whole);";
- {
- auto l = line(b);
- if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
- return false;
- }
- l << " result = {fract, whole};";
- }
- line(b) << "return result;";
- return true;
- });
+ line(b) << "float" << width << " whole;";
+ line(b) << "float" << width << " fract = modf(" << in << ", whole);";
+ {
+ auto l = line(b);
+ if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, "")) {
+ return false;
+ }
+ l << " result = {fract, whole};";
+ }
+ line(b) << "return result;";
+ return true;
+ });
}
bool GeneratorImpl::EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- auto* ty = builtin->Parameters()[0]->Type();
- auto in = params[0];
-
- std::string width;
- if (auto* vec = ty->As<sem::Vector>()) {
- width = std::to_string(vec->Width());
- }
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ auto* ty = builtin->Parameters()[0]->Type();
+ auto in = params[0];
+
+ std::string width;
+ if (auto* vec = ty->As<sem::Vector>()) {
+ width = std::to_string(vec->Width());
+ }
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
- line(b) << "float" << width << " exp;";
- line(b) << "float" << width << " sig = frexp(" << in << ", exp);";
- {
- auto l = line(b);
- if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kUndefined, "")) {
- return false;
- }
- l << " result = {sig, int" << width << "(exp)};";
- }
- line(b) << "return result;";
- return true;
- });
+ line(b) << "float" << width << " exp;";
+ line(b) << "float" << width << " sig = frexp(" << in << ", exp);";
+ {
+ auto l = line(b);
+ if (!EmitType(l, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, "")) {
+ return false;
+ }
+ l << " result = {sig, int" << width << "(exp)};";
+ }
+ line(b) << "return result;";
+ return true;
+ });
}
bool GeneratorImpl::EmitDegreesCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kRadToDeg << ";";
- return true;
- });
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kRadToDeg << ";";
+ return true;
+ });
}
bool GeneratorImpl::EmitRadiansCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kDegToRad << ";";
- return true;
- });
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kDegToRad << ";";
+ return true;
+ });
}
bool GeneratorImpl::EmitDataPackingCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- uint32_t dims = 2;
- bool is_signed = false;
- uint32_t scale = 65535;
- if (builtin->Type() == sem::BuiltinType::kPack4x8snorm ||
- builtin->Type() == sem::BuiltinType::kPack4x8unorm) {
- dims = 4;
- scale = 255;
- }
- if (builtin->Type() == sem::BuiltinType::kPack4x8snorm ||
- builtin->Type() == sem::BuiltinType::kPack2x16snorm) {
- is_signed = true;
- scale = (scale - 1) / 2;
- }
- switch (builtin->Type()) {
- case sem::BuiltinType::kPack4x8snorm:
- case sem::BuiltinType::kPack4x8unorm:
- case sem::BuiltinType::kPack2x16snorm:
- case sem::BuiltinType::kPack2x16unorm: {
- {
- auto l = line(b);
- l << (is_signed ? "" : "u") << "int" << dims
- << " i = " << (is_signed ? "" : "u") << "int" << dims
- << "(round(clamp(" << params[0] << ", "
- << (is_signed ? "-1.0" : "0.0") << ", 1.0) * " << scale
- << ".0))";
- if (is_signed) {
- l << " & " << (dims == 4 ? "0xff" : "0xffff");
- }
- l << ";";
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ uint32_t dims = 2;
+ bool is_signed = false;
+ uint32_t scale = 65535;
+ if (builtin->Type() == sem::BuiltinType::kPack4x8snorm ||
+ builtin->Type() == sem::BuiltinType::kPack4x8unorm) {
+ dims = 4;
+ scale = 255;
}
- {
- auto l = line(b);
- l << "return ";
- if (is_signed) {
- l << "asuint";
- }
- l << "(i.x | i.y << " << (32 / dims);
- if (dims == 4) {
- l << " | i.z << 16 | i.w << 24";
- }
- l << ");";
+ if (builtin->Type() == sem::BuiltinType::kPack4x8snorm ||
+ builtin->Type() == sem::BuiltinType::kPack2x16snorm) {
+ is_signed = true;
+ scale = (scale - 1) / 2;
+ }
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kPack4x8snorm:
+ case sem::BuiltinType::kPack4x8unorm:
+ case sem::BuiltinType::kPack2x16snorm:
+ case sem::BuiltinType::kPack2x16unorm: {
+ {
+ auto l = line(b);
+ l << (is_signed ? "" : "u") << "int" << dims
+ << " i = " << (is_signed ? "" : "u") << "int" << dims << "(round(clamp("
+ << params[0] << ", " << (is_signed ? "-1.0" : "0.0") << ", 1.0) * "
+ << scale << ".0))";
+ if (is_signed) {
+ l << " & " << (dims == 4 ? "0xff" : "0xffff");
+ }
+ l << ";";
+ }
+ {
+ auto l = line(b);
+ l << "return ";
+ if (is_signed) {
+ l << "asuint";
+ }
+ l << "(i.x | i.y << " << (32 / dims);
+ if (dims == 4) {
+ l << " | i.z << 16 | i.w << 24";
+ }
+ l << ");";
+ }
+ break;
+ }
+ case sem::BuiltinType::kPack2x16float: {
+ line(b) << "uint2 i = f32tof16(" << params[0] << ");";
+ line(b) << "return i.x | (i.y << 16);";
+ break;
+ }
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Internal error: unhandled data packing builtin");
+ return false;
}
- break;
- }
- case sem::BuiltinType::kPack2x16float: {
- line(b) << "uint2 i = f32tof16(" << params[0] << ");";
- line(b) << "return i.x | (i.y << 16);";
- break;
- }
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Internal error: unhandled data packing builtin");
- return false;
- }
- return true;
- });
+ return true;
+ });
}
bool GeneratorImpl::EmitDataUnpackingCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- uint32_t dims = 2;
- bool is_signed = false;
- uint32_t scale = 65535;
- if (builtin->Type() == sem::BuiltinType::kUnpack4x8snorm ||
- builtin->Type() == sem::BuiltinType::kUnpack4x8unorm) {
- dims = 4;
- scale = 255;
- }
- if (builtin->Type() == sem::BuiltinType::kUnpack4x8snorm ||
- builtin->Type() == sem::BuiltinType::kUnpack2x16snorm) {
- is_signed = true;
- scale = (scale - 1) / 2;
- }
- switch (builtin->Type()) {
- case sem::BuiltinType::kUnpack4x8snorm:
- case sem::BuiltinType::kUnpack2x16snorm: {
- line(b) << "int j = int(" << params[0] << ");";
- { // Perform sign extension on the converted values.
- auto l = line(b);
- l << "int" << dims << " i = int" << dims << "(";
- if (dims == 2) {
- l << "j << 16, j) >> 16";
- } else {
- l << "j << 24, j << 16, j << 8, j) >> 24";
- }
- l << ";";
- }
- line(b) << "return clamp(float" << dims << "(i) / " << scale
- << ".0, " << (is_signed ? "-1.0" : "0.0") << ", 1.0);";
- break;
- }
- case sem::BuiltinType::kUnpack4x8unorm:
- case sem::BuiltinType::kUnpack2x16unorm: {
- line(b) << "uint j = " << params[0] << ";";
- {
- auto l = line(b);
- l << "uint" << dims << " i = uint" << dims << "(";
- l << "j & " << (dims == 2 ? "0xffff" : "0xff") << ", ";
- if (dims == 4) {
- l << "(j >> " << (32 / dims)
- << ") & 0xff, (j >> 16) & 0xff, j >> 24";
- } else {
- l << "j >> " << (32 / dims);
- }
- l << ");";
- }
- line(b) << "return float" << dims << "(i) / " << scale << ".0;";
- break;
- }
- case sem::BuiltinType::kUnpack2x16float:
- line(b) << "uint i = " << params[0] << ";";
- line(b) << "return f16tof32(uint2(i & 0xffff, i >> 16));";
- break;
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Internal error: unhandled data packing builtin");
- return false;
- }
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ uint32_t dims = 2;
+ bool is_signed = false;
+ uint32_t scale = 65535;
+ if (builtin->Type() == sem::BuiltinType::kUnpack4x8snorm ||
+ builtin->Type() == sem::BuiltinType::kUnpack4x8unorm) {
+ dims = 4;
+ scale = 255;
+ }
+ if (builtin->Type() == sem::BuiltinType::kUnpack4x8snorm ||
+ builtin->Type() == sem::BuiltinType::kUnpack2x16snorm) {
+ is_signed = true;
+ scale = (scale - 1) / 2;
+ }
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kUnpack4x8snorm:
+ case sem::BuiltinType::kUnpack2x16snorm: {
+ line(b) << "int j = int(" << params[0] << ");";
+ { // Perform sign extension on the converted values.
+ auto l = line(b);
+ l << "int" << dims << " i = int" << dims << "(";
+ if (dims == 2) {
+ l << "j << 16, j) >> 16";
+ } else {
+ l << "j << 24, j << 16, j << 8, j) >> 24";
+ }
+ l << ";";
+ }
+ line(b) << "return clamp(float" << dims << "(i) / " << scale << ".0, "
+ << (is_signed ? "-1.0" : "0.0") << ", 1.0);";
+ break;
+ }
+ case sem::BuiltinType::kUnpack4x8unorm:
+ case sem::BuiltinType::kUnpack2x16unorm: {
+ line(b) << "uint j = " << params[0] << ";";
+ {
+ auto l = line(b);
+ l << "uint" << dims << " i = uint" << dims << "(";
+ l << "j & " << (dims == 2 ? "0xffff" : "0xff") << ", ";
+ if (dims == 4) {
+ l << "(j >> " << (32 / dims) << ") & 0xff, (j >> 16) & 0xff, j >> 24";
+ } else {
+ l << "j >> " << (32 / dims);
+ }
+ l << ");";
+ }
+ line(b) << "return float" << dims << "(i) / " << scale << ".0;";
+ break;
+ }
+ case sem::BuiltinType::kUnpack2x16float:
+ line(b) << "uint i = " << params[0] << ";";
+ line(b) << "return f16tof32(uint2(i & 0xffff, i >> 16));";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Internal error: unhandled data packing builtin");
+ return false;
+ }
- return true;
- });
+ return true;
+ });
}
-bool GeneratorImpl::EmitBarrierCall(std::ostream& out,
- const sem::Builtin* builtin) {
- // TODO(crbug.com/tint/661): Combine sequential barriers to a single
- // instruction.
- if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
- out << "GroupMemoryBarrierWithGroupSync()";
- } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
- out << "DeviceMemoryBarrierWithGroupSync()";
- } else {
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unexpected barrier builtin type " << sem::str(builtin->Type());
- return false;
- }
- return true;
+bool GeneratorImpl::EmitDP4aCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin) {
+ // TODO(crbug.com/tint/1497): support the polyfill version of DP4a functions.
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ std::string functionName;
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kDot4I8Packed:
+ line(b) << "int accumulator = 0;";
+ functionName = "dot4add_i8packed";
+ break;
+ case sem::BuiltinType::kDot4U8Packed:
+ line(b) << "uint accumulator = 0u;";
+ functionName = "dot4add_u8packed";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Internal error: unhandled DP4a builtin");
+ return false;
+ }
+ line(b) << "return " << functionName << "(" << params[0] << ", " << params[1]
+ << ", accumulator);";
+
+ return true;
+ });
+}
+
+bool GeneratorImpl::EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin) {
+ // TODO(crbug.com/tint/661): Combine sequential barriers to a single
+ // instruction.
+ if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
+ out << "GroupMemoryBarrierWithGroupSync()";
+ } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
+ out << "DeviceMemoryBarrierWithGroupSync()";
+ } else {
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unexpected barrier builtin type " << sem::str(builtin->Type());
+ return false;
+ }
+ return true;
}
bool GeneratorImpl::EmitTextureCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- using Usage = sem::ParameterUsage;
+ using Usage = sem::ParameterUsage;
- auto& signature = builtin->Signature();
- auto* expr = call->Declaration();
- auto arguments = expr->args;
+ auto& signature = builtin->Signature();
+ auto* expr = call->Declaration();
+ auto arguments = expr->args;
- // Returns the argument with the given usage
- auto arg = [&](Usage usage) {
- int idx = signature.IndexOf(usage);
- return (idx >= 0) ? arguments[idx] : nullptr;
- };
+ // Returns the argument with the given usage
+ auto arg = [&](Usage usage) {
+ int idx = signature.IndexOf(usage);
+ return (idx >= 0) ? arguments[idx] : nullptr;
+ };
- auto* texture = arg(Usage::kTexture);
- if (!texture) {
- TINT_ICE(Writer, diagnostics_) << "missing texture argument";
- return false;
- }
-
- auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureDimensions:
- case sem::BuiltinType::kTextureNumLayers:
- case sem::BuiltinType::kTextureNumLevels:
- case sem::BuiltinType::kTextureNumSamples: {
- // All of these builtins use the GetDimensions() method on the texture
- bool is_ms = texture_type->IsAnyOf<sem::MultisampledTexture,
- sem::DepthMultisampledTexture>();
- int num_dimensions = 0;
- std::string swizzle;
-
- switch (builtin->Type()) {
+ auto* texture = arg(Usage::kTexture);
+ if (!texture) {
+ TINT_ICE(Writer, diagnostics_) << "missing texture argument";
+ return false;
+ }
+
+ auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
+
+ switch (builtin->Type()) {
case sem::BuiltinType::kTextureDimensions:
- switch (texture_type->dim()) {
- case ast::TextureDimension::kNone:
- TINT_ICE(Writer, diagnostics_) << "texture dimension is kNone";
- return false;
- case ast::TextureDimension::k1d:
- num_dimensions = 1;
- break;
- case ast::TextureDimension::k2d:
- num_dimensions = is_ms ? 3 : 2;
- swizzle = is_ms ? ".xy" : "";
- break;
- case ast::TextureDimension::k2dArray:
- num_dimensions = is_ms ? 4 : 3;
- swizzle = ".xy";
- break;
- case ast::TextureDimension::k3d:
- num_dimensions = 3;
- break;
- case ast::TextureDimension::kCube:
- num_dimensions = 2;
- break;
- case ast::TextureDimension::kCubeArray:
- num_dimensions = 3;
- swizzle = ".xy";
- break;
- }
- break;
case sem::BuiltinType::kTextureNumLayers:
- switch (texture_type->dim()) {
- default:
- TINT_ICE(Writer, diagnostics_)
- << "texture dimension is not arrayed";
- return false;
- case ast::TextureDimension::k2dArray:
- num_dimensions = is_ms ? 4 : 3;
- swizzle = ".z";
- break;
- case ast::TextureDimension::kCubeArray:
- num_dimensions = 3;
- swizzle = ".z";
- break;
- }
- break;
case sem::BuiltinType::kTextureNumLevels:
- switch (texture_type->dim()) {
- default:
- TINT_ICE(Writer, diagnostics_)
- << "texture dimension does not support mips";
- return false;
- case ast::TextureDimension::k1d:
- num_dimensions = 2;
- swizzle = ".y";
- break;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::kCube:
- num_dimensions = 3;
- swizzle = ".z";
- break;
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::k3d:
- case ast::TextureDimension::kCubeArray:
- num_dimensions = 4;
- swizzle = ".w";
- break;
- }
- break;
- case sem::BuiltinType::kTextureNumSamples:
- switch (texture_type->dim()) {
- default:
- TINT_ICE(Writer, diagnostics_)
- << "texture dimension does not support multisampling";
- return false;
- case ast::TextureDimension::k2d:
- num_dimensions = 3;
- swizzle = ".z";
- break;
- case ast::TextureDimension::k2dArray:
- num_dimensions = 4;
- swizzle = ".w";
- break;
- }
- break;
- default:
- TINT_ICE(Writer, diagnostics_) << "unexpected builtin";
- return false;
- }
+ case sem::BuiltinType::kTextureNumSamples: {
+ // All of these builtins use the GetDimensions() method on the texture
+ bool is_ms =
+ texture_type->IsAnyOf<sem::MultisampledTexture, sem::DepthMultisampledTexture>();
+ int num_dimensions = 0;
+ std::string swizzle;
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureDimensions:
+ switch (texture_type->dim()) {
+ case ast::TextureDimension::kNone:
+ TINT_ICE(Writer, diagnostics_) << "texture dimension is kNone";
+ return false;
+ case ast::TextureDimension::k1d:
+ num_dimensions = 1;
+ break;
+ case ast::TextureDimension::k2d:
+ num_dimensions = is_ms ? 3 : 2;
+ swizzle = is_ms ? ".xy" : "";
+ break;
+ case ast::TextureDimension::k2dArray:
+ num_dimensions = is_ms ? 4 : 3;
+ swizzle = ".xy";
+ break;
+ case ast::TextureDimension::k3d:
+ num_dimensions = 3;
+ break;
+ case ast::TextureDimension::kCube:
+ num_dimensions = 2;
+ break;
+ case ast::TextureDimension::kCubeArray:
+ num_dimensions = 3;
+ swizzle = ".xy";
+ break;
+ }
+ break;
+ case sem::BuiltinType::kTextureNumLayers:
+ switch (texture_type->dim()) {
+ default:
+ TINT_ICE(Writer, diagnostics_) << "texture dimension is not arrayed";
+ return false;
+ case ast::TextureDimension::k2dArray:
+ num_dimensions = is_ms ? 4 : 3;
+ swizzle = ".z";
+ break;
+ case ast::TextureDimension::kCubeArray:
+ num_dimensions = 3;
+ swizzle = ".z";
+ break;
+ }
+ break;
+ case sem::BuiltinType::kTextureNumLevels:
+ switch (texture_type->dim()) {
+ default:
+ TINT_ICE(Writer, diagnostics_)
+ << "texture dimension does not support mips";
+ return false;
+ case ast::TextureDimension::k1d:
+ num_dimensions = 2;
+ swizzle = ".y";
+ break;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::kCube:
+ num_dimensions = 3;
+ swizzle = ".z";
+ break;
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::k3d:
+ case ast::TextureDimension::kCubeArray:
+ num_dimensions = 4;
+ swizzle = ".w";
+ break;
+ }
+ break;
+ case sem::BuiltinType::kTextureNumSamples:
+ switch (texture_type->dim()) {
+ default:
+ TINT_ICE(Writer, diagnostics_)
+ << "texture dimension does not support multisampling";
+ return false;
+ case ast::TextureDimension::k2d:
+ num_dimensions = 3;
+ swizzle = ".z";
+ break;
+ case ast::TextureDimension::k2dArray:
+ num_dimensions = 4;
+ swizzle = ".w";
+ break;
+ }
+ break;
+ default:
+ TINT_ICE(Writer, diagnostics_) << "unexpected builtin";
+ return false;
+ }
- auto* level_arg = arg(Usage::kLevel);
+ auto* level_arg = arg(Usage::kLevel);
- if (level_arg) {
- // `NumberOfLevels` is a non-optional argument if `MipLevel` was passed.
- // Increment the number of dimensions for the temporary vector to
- // accommodate this.
- num_dimensions++;
+ if (level_arg) {
+ // `NumberOfLevels` is a non-optional argument if `MipLevel` was passed.
+ // Increment the number of dimensions for the temporary vector to
+ // accommodate this.
+ num_dimensions++;
- // If the swizzle was empty, the expression will evaluate to the whole
- // vector. As we've grown the vector by one element, we now need to
- // swizzle to keep the result expression equivalent.
- if (swizzle.empty()) {
- static constexpr const char* swizzles[] = {"", ".x", ".xy", ".xyz"};
- swizzle = swizzles[num_dimensions - 1];
- }
- }
+ // If the swizzle was empty, the expression will evaluate to the whole
+ // vector. As we've grown the vector by one element, we now need to
+ // swizzle to keep the result expression equivalent.
+ if (swizzle.empty()) {
+ static constexpr const char* swizzles[] = {"", ".x", ".xy", ".xyz"};
+ swizzle = swizzles[num_dimensions - 1];
+ }
+ }
- if (num_dimensions > 4) {
- TINT_ICE(Writer, diagnostics_)
- << "Texture query builtin temporary vector has " << num_dimensions
- << " dimensions";
- return false;
- }
+ if (num_dimensions > 4) {
+ TINT_ICE(Writer, diagnostics_) << "Texture query builtin temporary vector has "
+ << num_dimensions << " dimensions";
+ return false;
+ }
- // Declare a variable to hold the queried texture info
- auto dims = UniqueIdentifier(kTempNamePrefix);
- if (num_dimensions == 1) {
- line() << "int " << dims << ";";
- } else {
- line() << "int" << num_dimensions << " " << dims << ";";
- }
+ // Declare a variable to hold the queried texture info
+ auto dims = UniqueIdentifier(kTempNamePrefix);
+ if (num_dimensions == 1) {
+ line() << "int " << dims << ";";
+ } else {
+ line() << "int" << num_dimensions << " " << dims << ";";
+ }
- { // texture.GetDimensions(...)
- auto pre = line();
- if (!EmitExpression(pre, texture)) {
- return false;
- }
- pre << ".GetDimensions(";
+ { // texture.GetDimensions(...)
+ auto pre = line();
+ if (!EmitExpression(pre, texture)) {
+ return false;
+ }
+ pre << ".GetDimensions(";
+
+ if (level_arg) {
+ if (!EmitExpression(pre, level_arg)) {
+ return false;
+ }
+ pre << ", ";
+ } else if (builtin->Type() == sem::BuiltinType::kTextureNumLevels) {
+ pre << "0, ";
+ }
- if (level_arg) {
- if (!EmitExpression(pre, level_arg)) {
- return false;
- }
- pre << ", ";
- } else if (builtin->Type() == sem::BuiltinType::kTextureNumLevels) {
- pre << "0, ";
- }
+ if (num_dimensions == 1) {
+ pre << dims;
+ } else {
+ static constexpr char xyzw[] = {'x', 'y', 'z', 'w'};
+ if (num_dimensions < 0 || num_dimensions > 4) {
+ TINT_ICE(Writer, diagnostics_)
+ << "vector dimensions are " << num_dimensions;
+ return false;
+ }
+ for (int i = 0; i < num_dimensions; i++) {
+ if (i > 0) {
+ pre << ", ";
+ }
+ pre << dims << "." << xyzw[i];
+ }
+ }
- if (num_dimensions == 1) {
- pre << dims;
- } else {
- static constexpr char xyzw[] = {'x', 'y', 'z', 'w'};
- if (num_dimensions < 0 || num_dimensions > 4) {
- TINT_ICE(Writer, diagnostics_)
- << "vector dimensions are " << num_dimensions;
- return false;
- }
- for (int i = 0; i < num_dimensions; i++) {
- if (i > 0) {
- pre << ", ";
+ pre << ");";
}
- pre << dims << "." << xyzw[i];
- }
- }
- pre << ");";
- }
+ // The out parameters of the GetDimensions() call is now in temporary
+ // `dims` variable. This may be packed with other data, so the final
+ // expression may require a swizzle.
+ out << dims << swizzle;
+ return true;
+ }
+ default:
+ break;
+ }
- // The out parameters of the GetDimensions() call is now in temporary
- // `dims` variable. This may be packed with other data, so the final
- // expression may require a swizzle.
- out << dims << swizzle;
- return true;
+ if (!EmitExpression(out, texture)) {
+ return false;
}
- default:
- break;
- }
- if (!EmitExpression(out, texture))
- return false;
+ // If pack_level_in_coords is true, then the mip level will be appended as the
+ // last value of the coordinates argument. If the WGSL builtin overload does
+ // not have a level parameter and pack_level_in_coords is true, then a zero
+ // mip level will be inserted.
+ bool pack_level_in_coords = false;
+
+ uint32_t hlsl_ret_width = 4u;
- // If pack_level_in_coords is true, then the mip level will be appended as the
- // last value of the coordinates argument. If the WGSL builtin overload does
- // not have a level parameter and pack_level_in_coords is true, then a zero
- // mip level will be inserted.
- bool pack_level_in_coords = false;
-
- uint32_t hlsl_ret_width = 4u;
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureSample:
- out << ".Sample(";
- break;
- case sem::BuiltinType::kTextureSampleBias:
- out << ".SampleBias(";
- break;
- case sem::BuiltinType::kTextureSampleLevel:
- out << ".SampleLevel(";
- break;
- case sem::BuiltinType::kTextureSampleGrad:
- out << ".SampleGrad(";
- break;
- case sem::BuiltinType::kTextureSampleCompare:
- out << ".SampleCmp(";
- hlsl_ret_width = 1;
- break;
- case sem::BuiltinType::kTextureSampleCompareLevel:
- out << ".SampleCmpLevelZero(";
- hlsl_ret_width = 1;
- break;
- case sem::BuiltinType::kTextureLoad:
- out << ".Load(";
- // Multisampled textures do not support mip-levels.
- if (!texture_type->Is<sem::MultisampledTexture>()) {
- pack_level_in_coords = true;
- }
- break;
- case sem::BuiltinType::kTextureGather:
- out << ".Gather";
- if (builtin->Parameters()[0]->Usage() ==
- sem::ParameterUsage::kComponent) {
- switch (call->Arguments()[0]->ConstantValue().Elements()[0].i32) {
- case 0:
- out << "Red";
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureSample:
+ out << ".Sample(";
break;
- case 1:
- out << "Green";
+ case sem::BuiltinType::kTextureSampleBias:
+ out << ".SampleBias(";
break;
- case 2:
- out << "Blue";
+ case sem::BuiltinType::kTextureSampleLevel:
+ out << ".SampleLevel(";
break;
- case 3:
- out << "Alpha";
+ case sem::BuiltinType::kTextureSampleGrad:
+ out << ".SampleGrad(";
break;
- }
- }
- out << "(";
- break;
- case sem::BuiltinType::kTextureGatherCompare:
- out << ".GatherCmp(";
- break;
- case sem::BuiltinType::kTextureStore:
- out << "[";
- break;
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Internal compiler error: Unhandled texture builtin '" +
- std::string(builtin->str()) + "'");
- return false;
- }
-
- if (auto* sampler = arg(Usage::kSampler)) {
- if (!EmitExpression(out, sampler))
- return false;
- out << ", ";
- }
-
- auto* param_coords = arg(Usage::kCoords);
- if (!param_coords) {
- TINT_ICE(Writer, diagnostics_) << "missing coords argument";
- return false;
- }
-
- auto emit_vector_appended_with_i32_zero = [&](const ast::Expression* vector) {
- auto* i32 = builder_.create<sem::I32>();
- auto* zero = builder_.Expr(0);
- auto* stmt = builder_.Sem().Get(vector)->Stmt();
- builder_.Sem().Add(
- zero, builder_.create<sem::Expression>(zero, i32, stmt, sem::Constant{},
- /* has_side_effects */ false));
- auto* packed = AppendVector(&builder_, vector, zero);
- return EmitExpression(out, packed->Declaration());
- };
-
- auto emit_vector_appended_with_level = [&](const ast::Expression* vector) {
- if (auto* level = arg(Usage::kLevel)) {
- auto* packed = AppendVector(&builder_, vector, level);
- return EmitExpression(out, packed->Declaration());
- }
- return emit_vector_appended_with_i32_zero(vector);
- };
-
- if (auto* array_index = arg(Usage::kArrayIndex)) {
- // Array index needs to be appended to the coordinates.
- auto* packed = AppendVector(&builder_, param_coords, array_index);
- if (pack_level_in_coords) {
- // Then mip level needs to be appended to the coordinates.
- if (!emit_vector_appended_with_level(packed->Declaration())) {
- return false;
- }
- } else {
- if (!EmitExpression(out, packed->Declaration())) {
- return false;
- }
- }
- } else if (pack_level_in_coords) {
- // Mip level needs to be appended to the coordinates.
- if (!emit_vector_appended_with_level(param_coords)) {
- return false;
- }
- } else {
- if (!EmitExpression(out, param_coords)) {
- return false;
+ case sem::BuiltinType::kTextureSampleCompare:
+ out << ".SampleCmp(";
+ hlsl_ret_width = 1;
+ break;
+ case sem::BuiltinType::kTextureSampleCompareLevel:
+ out << ".SampleCmpLevelZero(";
+ hlsl_ret_width = 1;
+ break;
+ case sem::BuiltinType::kTextureLoad:
+ out << ".Load(";
+ // Multisampled textures do not support mip-levels.
+ if (!texture_type->Is<sem::MultisampledTexture>()) {
+ pack_level_in_coords = true;
+ }
+ break;
+ case sem::BuiltinType::kTextureGather:
+ out << ".Gather";
+ if (builtin->Parameters()[0]->Usage() == sem::ParameterUsage::kComponent) {
+ switch (call->Arguments()[0]->ConstantValue().Element<AInt>(0).value) {
+ case 0:
+ out << "Red";
+ break;
+ case 1:
+ out << "Green";
+ break;
+ case 2:
+ out << "Blue";
+ break;
+ case 3:
+ out << "Alpha";
+ break;
+ }
+ }
+ out << "(";
+ break;
+ case sem::BuiltinType::kTextureGatherCompare:
+ out << ".GatherCmp(";
+ break;
+ case sem::BuiltinType::kTextureStore:
+ out << "[";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Internal compiler error: Unhandled texture builtin '" +
+ std::string(builtin->str()) + "'");
+ return false;
}
- }
- for (auto usage : {Usage::kDepthRef, Usage::kBias, Usage::kLevel, Usage::kDdx,
- Usage::kDdy, Usage::kSampleIndex, Usage::kOffset}) {
- if (usage == Usage::kLevel && pack_level_in_coords) {
- continue; // mip level already packed in coordinates.
+ if (auto* sampler = arg(Usage::kSampler)) {
+ if (!EmitExpression(out, sampler)) {
+ return false;
+ }
+ out << ", ";
}
- if (auto* e = arg(usage)) {
- out << ", ";
- if (!EmitExpression(out, e)) {
+
+ auto* param_coords = arg(Usage::kCoords);
+ if (!param_coords) {
+ TINT_ICE(Writer, diagnostics_) << "missing coords argument";
return false;
- }
}
- }
- if (builtin->Type() == sem::BuiltinType::kTextureStore) {
- out << "] = ";
- if (!EmitExpression(out, arg(Usage::kValue))) {
- return false;
- }
- } else {
- out << ")";
+ auto emit_vector_appended_with_i32_zero = [&](const ast::Expression* vector) {
+ auto* i32 = builder_.create<sem::I32>();
+ auto* zero = builder_.Expr(0_i);
+ auto* stmt = builder_.Sem().Get(vector)->Stmt();
+ builder_.Sem().Add(zero, builder_.create<sem::Expression>(zero, i32, stmt, sem::Constant{},
+ /* has_side_effects */ false));
+ auto* packed = AppendVector(&builder_, vector, zero);
+ return EmitExpression(out, packed->Declaration());
+ };
- // If the builtin return type does not match the number of elements of the
- // HLSL builtin, we need to swizzle the expression to generate the correct
- // number of components.
- uint32_t wgsl_ret_width = 1;
- if (auto* vec = builtin->ReturnType()->As<sem::Vector>()) {
- wgsl_ret_width = vec->Width();
+ auto emit_vector_appended_with_level = [&](const ast::Expression* vector) {
+ if (auto* level = arg(Usage::kLevel)) {
+ auto* packed = AppendVector(&builder_, vector, level);
+ return EmitExpression(out, packed->Declaration());
+ }
+ return emit_vector_appended_with_i32_zero(vector);
+ };
+
+ if (auto* array_index = arg(Usage::kArrayIndex)) {
+ // Array index needs to be appended to the coordinates.
+ auto* packed = AppendVector(&builder_, param_coords, array_index);
+ if (pack_level_in_coords) {
+ // Then mip level needs to be appended to the coordinates.
+ if (!emit_vector_appended_with_level(packed->Declaration())) {
+ return false;
+ }
+ } else {
+ if (!EmitExpression(out, packed->Declaration())) {
+ return false;
+ }
+ }
+ } else if (pack_level_in_coords) {
+ // Mip level needs to be appended to the coordinates.
+ if (!emit_vector_appended_with_level(param_coords)) {
+ return false;
+ }
+ } else {
+ if (!EmitExpression(out, param_coords)) {
+ return false;
+ }
}
- if (wgsl_ret_width < hlsl_ret_width) {
- out << ".";
- for (uint32_t i = 0; i < wgsl_ret_width; i++) {
- out << "xyz"[i];
- }
+
+ for (auto usage : {Usage::kDepthRef, Usage::kBias, Usage::kLevel, Usage::kDdx, Usage::kDdy,
+ Usage::kSampleIndex, Usage::kOffset}) {
+ if (usage == Usage::kLevel && pack_level_in_coords) {
+ continue; // mip level already packed in coordinates.
+ }
+ if (auto* e = arg(usage)) {
+ out << ", ";
+ if (!EmitExpression(out, e)) {
+ return false;
+ }
+ }
}
- if (wgsl_ret_width > hlsl_ret_width) {
- TINT_ICE(Writer, diagnostics_)
- << "WGSL return width (" << wgsl_ret_width
- << ") is wider than HLSL return width (" << hlsl_ret_width << ") for "
- << builtin->Type();
- return false;
+
+ if (builtin->Type() == sem::BuiltinType::kTextureStore) {
+ out << "] = ";
+ if (!EmitExpression(out, arg(Usage::kValue))) {
+ return false;
+ }
+ } else {
+ out << ")";
+
+ // If the builtin return type does not match the number of elements of the
+ // HLSL builtin, we need to swizzle the expression to generate the correct
+ // number of components.
+ uint32_t wgsl_ret_width = 1;
+ if (auto* vec = builtin->ReturnType()->As<sem::Vector>()) {
+ wgsl_ret_width = vec->Width();
+ }
+ if (wgsl_ret_width < hlsl_ret_width) {
+ out << ".";
+ for (uint32_t i = 0; i < wgsl_ret_width; i++) {
+ out << "xyz"[i];
+ }
+ }
+ if (wgsl_ret_width > hlsl_ret_width) {
+ TINT_ICE(Writer, diagnostics_)
+ << "WGSL return width (" << wgsl_ret_width << ") is wider than HLSL return width ("
+ << hlsl_ret_width << ") for " << builtin->Type();
+ return false;
+ }
}
- }
- return true;
+ return true;
}
std::string GeneratorImpl::generate_builtin_name(const sem::Builtin* builtin) {
- switch (builtin->Type()) {
- case sem::BuiltinType::kAbs:
- case sem::BuiltinType::kAcos:
- case sem::BuiltinType::kAll:
- case sem::BuiltinType::kAny:
- case sem::BuiltinType::kAsin:
- case sem::BuiltinType::kAtan:
- case sem::BuiltinType::kAtan2:
- case sem::BuiltinType::kCeil:
- case sem::BuiltinType::kClamp:
- case sem::BuiltinType::kCos:
- case sem::BuiltinType::kCosh:
- case sem::BuiltinType::kCross:
- case sem::BuiltinType::kDeterminant:
- case sem::BuiltinType::kDistance:
- case sem::BuiltinType::kDot:
- case sem::BuiltinType::kExp:
- case sem::BuiltinType::kExp2:
- case sem::BuiltinType::kFloor:
- case sem::BuiltinType::kFrexp:
- case sem::BuiltinType::kLdexp:
- case sem::BuiltinType::kLength:
- case sem::BuiltinType::kLog:
- case sem::BuiltinType::kLog2:
- case sem::BuiltinType::kMax:
- case sem::BuiltinType::kMin:
- case sem::BuiltinType::kModf:
- case sem::BuiltinType::kNormalize:
- case sem::BuiltinType::kPow:
- case sem::BuiltinType::kReflect:
- case sem::BuiltinType::kRefract:
- case sem::BuiltinType::kRound:
- case sem::BuiltinType::kSign:
- case sem::BuiltinType::kSin:
- case sem::BuiltinType::kSinh:
- case sem::BuiltinType::kSqrt:
- case sem::BuiltinType::kStep:
- case sem::BuiltinType::kTan:
- case sem::BuiltinType::kTanh:
- case sem::BuiltinType::kTranspose:
- case sem::BuiltinType::kTrunc:
- return builtin->str();
- case sem::BuiltinType::kCountOneBits:
- return "countbits";
- case sem::BuiltinType::kDpdx:
- return "ddx";
- case sem::BuiltinType::kDpdxCoarse:
- return "ddx_coarse";
- case sem::BuiltinType::kDpdxFine:
- return "ddx_fine";
- case sem::BuiltinType::kDpdy:
- return "ddy";
- case sem::BuiltinType::kDpdyCoarse:
- return "ddy_coarse";
- case sem::BuiltinType::kDpdyFine:
- return "ddy_fine";
- case sem::BuiltinType::kFaceForward:
- return "faceforward";
- case sem::BuiltinType::kFract:
- return "frac";
- case sem::BuiltinType::kFma:
- return "mad";
- case sem::BuiltinType::kFwidth:
- case sem::BuiltinType::kFwidthCoarse:
- case sem::BuiltinType::kFwidthFine:
- return "fwidth";
- case sem::BuiltinType::kInverseSqrt:
- return "rsqrt";
- case sem::BuiltinType::kMix:
- return "lerp";
- case sem::BuiltinType::kReverseBits:
- return "reversebits";
- case sem::BuiltinType::kSmoothstep:
- case sem::BuiltinType::kSmoothStep:
- return "smoothstep";
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Unknown builtin method: " + std::string(builtin->str()));
- }
-
- return "";
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAbs:
+ case sem::BuiltinType::kAcos:
+ case sem::BuiltinType::kAll:
+ case sem::BuiltinType::kAny:
+ case sem::BuiltinType::kAsin:
+ case sem::BuiltinType::kAtan:
+ case sem::BuiltinType::kAtan2:
+ case sem::BuiltinType::kCeil:
+ case sem::BuiltinType::kClamp:
+ case sem::BuiltinType::kCos:
+ case sem::BuiltinType::kCosh:
+ case sem::BuiltinType::kCross:
+ case sem::BuiltinType::kDeterminant:
+ case sem::BuiltinType::kDistance:
+ case sem::BuiltinType::kDot:
+ case sem::BuiltinType::kExp:
+ case sem::BuiltinType::kExp2:
+ case sem::BuiltinType::kFloor:
+ case sem::BuiltinType::kFrexp:
+ case sem::BuiltinType::kLdexp:
+ case sem::BuiltinType::kLength:
+ case sem::BuiltinType::kLog:
+ case sem::BuiltinType::kLog2:
+ case sem::BuiltinType::kMax:
+ case sem::BuiltinType::kMin:
+ case sem::BuiltinType::kModf:
+ case sem::BuiltinType::kNormalize:
+ case sem::BuiltinType::kPow:
+ case sem::BuiltinType::kReflect:
+ case sem::BuiltinType::kRefract:
+ case sem::BuiltinType::kRound:
+ case sem::BuiltinType::kSign:
+ case sem::BuiltinType::kSin:
+ case sem::BuiltinType::kSinh:
+ case sem::BuiltinType::kSqrt:
+ case sem::BuiltinType::kStep:
+ case sem::BuiltinType::kTan:
+ case sem::BuiltinType::kTanh:
+ case sem::BuiltinType::kTranspose:
+ case sem::BuiltinType::kTrunc:
+ return builtin->str();
+ case sem::BuiltinType::kCountOneBits: // uint
+ return "countbits";
+ case sem::BuiltinType::kDpdx:
+ return "ddx";
+ case sem::BuiltinType::kDpdxCoarse:
+ return "ddx_coarse";
+ case sem::BuiltinType::kDpdxFine:
+ return "ddx_fine";
+ case sem::BuiltinType::kDpdy:
+ return "ddy";
+ case sem::BuiltinType::kDpdyCoarse:
+ return "ddy_coarse";
+ case sem::BuiltinType::kDpdyFine:
+ return "ddy_fine";
+ case sem::BuiltinType::kFaceForward:
+ return "faceforward";
+ case sem::BuiltinType::kFract:
+ return "frac";
+ case sem::BuiltinType::kFma:
+ return "mad";
+ case sem::BuiltinType::kFwidth:
+ case sem::BuiltinType::kFwidthCoarse:
+ case sem::BuiltinType::kFwidthFine:
+ return "fwidth";
+ case sem::BuiltinType::kInverseSqrt:
+ return "rsqrt";
+ case sem::BuiltinType::kMix:
+ return "lerp";
+ case sem::BuiltinType::kReverseBits: // uint
+ return "reversebits";
+ case sem::BuiltinType::kSmoothstep:
+ case sem::BuiltinType::kSmoothStep:
+ return "smoothstep";
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Unknown builtin method: " + std::string(builtin->str()));
+ }
+
+ return "";
}
bool GeneratorImpl::EmitCase(const ast::SwitchStatement* s, size_t case_idx) {
- auto* stmt = s->body[case_idx];
- if (stmt->IsDefault()) {
- line() << "default: {";
- } else {
- for (auto* selector : stmt->selectors) {
- auto out = line();
- out << "case ";
- if (!EmitLiteral(out, selector)) {
- return false;
- }
- out << ":";
- if (selector == stmt->selectors.back()) {
- out << " {";
- }
+ auto* stmt = s->body[case_idx];
+ if (stmt->IsDefault()) {
+ line() << "default: {";
+ } else {
+ for (auto* selector : stmt->selectors) {
+ auto out = line();
+ out << "case ";
+ if (!EmitLiteral(out, selector)) {
+ return false;
+ }
+ out << ":";
+ if (selector == stmt->selectors.back()) {
+ out << " {";
+ }
+ }
}
- }
- increment_indent();
- TINT_DEFER({
- decrement_indent();
- line() << "}";
- });
+ increment_indent();
+ TINT_DEFER({
+ decrement_indent();
+ line() << "}";
+ });
- // Emit the case statement
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
+ // Emit the case statement
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
- // Inline all fallthrough case statements. FXC cannot handle fallthroughs.
- while (tint::Is<ast::FallthroughStatement>(stmt->body->Last())) {
- case_idx++;
- stmt = s->body[case_idx];
- // Generate each fallthrough case statement in a new block. This is done to
- // prevent symbol collision of variables declared in these cases statements.
- if (!EmitBlock(stmt->body)) {
- return false;
+ // Inline all fallthrough case statements. FXC cannot handle fallthroughs.
+ while (tint::Is<ast::FallthroughStatement>(stmt->body->Last())) {
+ case_idx++;
+ stmt = s->body[case_idx];
+ // Generate each fallthrough case statement in a new block. This is done to
+ // prevent symbol collision of variables declared in these cases statements.
+ if (!EmitBlock(stmt->body)) {
+ return false;
+ }
}
- }
- if (!tint::IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(
- stmt->body->Last())) {
- line() << "break;";
- }
+ if (!tint::IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(stmt->body->Last())) {
+ line() << "break;";
+ }
- return true;
+ return true;
}
bool GeneratorImpl::EmitContinue(const ast::ContinueStatement*) {
- if (!emit_continuing_()) {
- return false;
- }
- line() << "continue;";
- return true;
-}
-
-bool GeneratorImpl::EmitDiscard(const ast::DiscardStatement*) {
- // TODO(dsinclair): Verify this is correct when the discard semantics are
- // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
- line() << "discard;";
- return true;
-}
-
-bool GeneratorImpl::EmitExpression(std::ostream& out,
- const ast::Expression* expr) {
- return Switch(
- expr,
- [&](const ast::IndexAccessorExpression* a) { //
- return EmitIndexAccessor(out, a);
- },
- [&](const ast::BinaryExpression* b) { //
- return EmitBinary(out, b);
- },
- [&](const ast::BitcastExpression* b) { //
- return EmitBitcast(out, b);
- },
- [&](const ast::CallExpression* c) { //
- return EmitCall(out, c);
- },
- [&](const ast::IdentifierExpression* i) { //
- return EmitIdentifier(out, i);
- },
- [&](const ast::LiteralExpression* l) { //
- return EmitLiteral(out, l);
- },
- [&](const ast::MemberAccessorExpression* m) { //
- return EmitMemberAccessor(out, m);
- },
- [&](const ast::UnaryOpExpression* u) { //
- return EmitUnaryOp(out, u);
- },
- [&](Default) { //
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown expression type: " + std::string(expr->TypeInfo().name));
+ if (!emit_continuing_()) {
return false;
- });
+ }
+ line() << "continue;";
+ return true;
}
-bool GeneratorImpl::EmitIdentifier(std::ostream& out,
- const ast::IdentifierExpression* expr) {
- out << builder_.Symbols().NameFor(expr->symbol);
- return true;
+bool GeneratorImpl::EmitDiscard(const ast::DiscardStatement*) {
+ // TODO(dsinclair): Verify this is correct when the discard semantics are
+ // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
+ line() << "discard;";
+ return true;
}
-bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
- {
- auto out = line();
- out << "if (";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+bool GeneratorImpl::EmitExpression(std::ostream& out, const ast::Expression* expr) {
+ if (auto* sem = builder_.Sem().Get(expr)) {
+ if (auto constant = sem->ConstantValue()) {
+ return EmitConstant(out, constant);
+ }
}
- out << ") {";
- }
-
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ return Switch(
+ expr,
+ [&](const ast::IndexAccessorExpression* a) { //
+ return EmitIndexAccessor(out, a);
+ },
+ [&](const ast::BinaryExpression* b) { //
+ return EmitBinary(out, b);
+ },
+ [&](const ast::BitcastExpression* b) { //
+ return EmitBitcast(out, b);
+ },
+ [&](const ast::CallExpression* c) { //
+ return EmitCall(out, c);
+ },
+ [&](const ast::IdentifierExpression* i) { //
+ return EmitIdentifier(out, i);
+ },
+ [&](const ast::LiteralExpression* l) { //
+ return EmitLiteral(out, l);
+ },
+ [&](const ast::MemberAccessorExpression* m) { //
+ return EmitMemberAccessor(out, m);
+ },
+ [&](const ast::UnaryOpExpression* u) { //
+ return EmitUnaryOp(out, u);
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer, "unknown expression type: " +
+ std::string(expr->TypeInfo().name));
+ return false;
+ });
+}
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- line() << "} else {";
- increment_indent();
+bool GeneratorImpl::EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr) {
+ out << builder_.Symbols().NameFor(expr->symbol);
+ return true;
+}
- {
+bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
+ {
auto out = line();
out << "if (";
- if (!EmitExpression(out, e->condition)) {
- return false;
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
}
out << ") {";
- }
- } else {
- line() << "} else {";
}
- if (!EmitStatementsWithIndent(e->body->statements)) {
- return false;
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
}
- }
-
- line() << "}";
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- decrement_indent();
- line() << "}";
+ if (stmt->else_statement) {
+ line() << "} else {";
+ if (auto* block = stmt->else_statement->As<ast::BlockStatement>()) {
+ if (!EmitStatementsWithIndent(block->statements)) {
+ return false;
+ }
+ } else {
+ if (!EmitStatementsWithIndent({stmt->else_statement})) {
+ return false;
+ }
+ }
}
- }
- return true;
+ line() << "}";
+
+ return true;
}
bool GeneratorImpl::EmitFunction(const ast::Function* func) {
- auto* sem = builder_.Sem().Get(func);
-
- if (ast::HasAttribute<ast::InternalAttribute>(func->attributes)) {
- // An internal function. Do not emit.
- return true;
- }
+ auto* sem = builder_.Sem().Get(func);
+
+ // Emit storage atomic helpers
+ if (auto* intrinsic =
+ ast::GetAttribute<transform::DecomposeMemoryAccess::Intrinsic>(func->attributes)) {
+ if (intrinsic->storage_class == ast::StorageClass::kStorage && intrinsic->IsAtomic()) {
+ if (!EmitStorageAtomicIntrinsic(func, intrinsic)) {
+ return false;
+ }
+ }
+ return true;
+ }
- {
- auto out = line();
- auto name = builder_.Symbols().NameFor(func->symbol);
- // If the function returns an array, then we need to declare a typedef for
- // this.
- if (sem->ReturnType()->Is<sem::Array>()) {
- auto typedef_name = UniqueIdentifier(name + "_ret");
- auto pre = line();
- pre << "typedef ";
- if (!EmitTypeAndName(pre, sem->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, typedef_name)) {
- return false;
- }
- pre << ";";
- out << typedef_name;
- } else {
- if (!EmitType(out, sem->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
- }
+ if (ast::HasAttribute<ast::InternalAttribute>(func->attributes)) {
+ // An internal function. Do not emit.
+ return true;
}
- out << " " << name << "(";
+ {
+ auto out = line();
+ auto name = builder_.Symbols().NameFor(func->symbol);
+ // If the function returns an array, then we need to declare a typedef for
+ // this.
+ if (sem->ReturnType()->Is<sem::Array>()) {
+ auto typedef_name = UniqueIdentifier(name + "_ret");
+ auto pre = line();
+ pre << "typedef ";
+ if (!EmitTypeAndName(pre, sem->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, typedef_name)) {
+ return false;
+ }
+ pre << ";";
+ out << typedef_name;
+ } else {
+ if (!EmitType(out, sem->ReturnType(), ast::StorageClass::kNone, ast::Access::kReadWrite,
+ "")) {
+ return false;
+ }
+ }
- bool first = true;
+ out << " " << name << "(";
- for (auto* v : sem->Parameters()) {
- if (!first) {
- out << ", ";
- }
- first = false;
-
- auto const* type = v->Type();
-
- if (auto* ptr = type->As<sem::Pointer>()) {
- // Transform pointer parameters in to `inout` parameters.
- // The WGSL spec is highly restrictive in what can be passed in pointer
- // parameters, which allows for this transformation. See:
- // https://gpuweb.github.io/gpuweb/wgsl/#function-restriction
- out << "inout ";
- type = ptr->StoreType();
- }
-
- // Note: WGSL only allows for StorageClass::kNone on parameters, however
- // the sanitizer transforms generates load / store functions for storage
- // or uniform buffers. These functions have a buffer parameter with
- // StorageClass::kStorage or StorageClass::kUniform. This is required to
- // correctly translate the parameter to a [RW]ByteAddressBuffer for
- // storage buffers and a uint4[N] for uniform buffers.
- if (!EmitTypeAndName(
- out, type, v->StorageClass(), v->Access(),
- builder_.Symbols().NameFor(v->Declaration()->symbol))) {
- return false;
- }
- }
- out << ") {";
- }
+ bool first = true;
+
+ for (auto* v : sem->Parameters()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ auto const* type = v->Type();
+
+ if (auto* ptr = type->As<sem::Pointer>()) {
+ // Transform pointer parameters in to `inout` parameters.
+ // The WGSL spec is highly restrictive in what can be passed in pointer
+ // parameters, which allows for this transformation. See:
+ // https://gpuweb.github.io/gpuweb/wgsl/#function-restriction
+ out << "inout ";
+ type = ptr->StoreType();
+ }
- if (sem->HasDiscard() && !sem->ReturnType()->Is<sem::Void>()) {
- // BUG(crbug.com/tint/1081): work around non-void functions with discard
- // failing compilation sometimes
- if (!EmitFunctionBodyWithDiscard(func)) {
- return false;
+ // Note: WGSL only allows for StorageClass::kNone on parameters, however
+ // the sanitizer transforms generates load / store functions for storage
+ // or uniform buffers. These functions have a buffer parameter with
+ // StorageClass::kStorage or StorageClass::kUniform. This is required to
+ // correctly translate the parameter to a [RW]ByteAddressBuffer for
+ // storage buffers and a uint4[N] for uniform buffers.
+ if (!EmitTypeAndName(out, type, v->StorageClass(), v->Access(),
+ builder_.Symbols().NameFor(v->Declaration()->symbol))) {
+ return false;
+ }
+ }
+ out << ") {";
}
- } else {
- if (!EmitStatementsWithIndent(func->body->statements)) {
- return false;
+
+ if (sem->HasDiscard() && !sem->ReturnType()->Is<sem::Void>()) {
+ // BUG(crbug.com/tint/1081): work around non-void functions with discard
+ // failing compilation sometimes
+ if (!EmitFunctionBodyWithDiscard(func)) {
+ return false;
+ }
+ } else {
+ if (!EmitStatementsWithIndent(func->body->statements)) {
+ return false;
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitFunctionBodyWithDiscard(const ast::Function* func) {
- // FXC sometimes fails to compile functions that discard with 'Not all control
- // paths return a value'. We work around this by wrapping the function body
- // within an "if (true) { <body> } return <default return type obj>;" so that
- // there is always an (unused) return statement.
+ // FXC sometimes fails to compile functions that discard with 'Not all control
+ // paths return a value'. We work around this by wrapping the function body
+ // within an "if (true) { <body> } return <default return type obj>;" so that
+ // there is always an (unused) return statement.
- auto* sem = builder_.Sem().Get(func);
- TINT_ASSERT(Writer, sem->HasDiscard() && !sem->ReturnType()->Is<sem::Void>());
+ auto* sem = builder_.Sem().Get(func);
+ TINT_ASSERT(Writer, sem->HasDiscard() && !sem->ReturnType()->Is<sem::Void>());
- ScopedIndent si(this);
- line() << "if (true) {";
+ ScopedIndent si(this);
+ line() << "if (true) {";
- if (!EmitStatementsWithIndent(func->body->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(func->body->statements)) {
+ return false;
+ }
- line() << "}";
+ line() << "}";
- // Return an unused result that matches the type of the return value
- auto name = builder_.Symbols().NameFor(builder_.Symbols().New("unused"));
- {
- auto out = line();
- if (!EmitTypeAndName(out, sem->ReturnType(), ast::StorageClass::kNone,
- ast::Access::kReadWrite, name)) {
- return false;
+ // Return an unused result that matches the type of the return value
+ auto name = builder_.Symbols().NameFor(builder_.Symbols().New("unused"));
+ {
+ auto out = line();
+ if (!EmitTypeAndName(out, sem->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, name)) {
+ return false;
+ }
+ out << ";";
}
- out << ";";
- }
- line() << "return " << name << ";";
+ line() << "return " << name << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitGlobalVariable(const ast::Variable* global) {
- if (global->is_const) {
- return EmitProgramConstVariable(global);
- }
-
- auto* sem = builder_.Sem().Get(global);
- switch (sem->StorageClass()) {
- case ast::StorageClass::kUniform:
- return EmitUniformVariable(sem);
- case ast::StorageClass::kStorage:
- return EmitStorageVariable(sem);
- case ast::StorageClass::kUniformConstant:
- return EmitHandleVariable(sem);
- case ast::StorageClass::kPrivate:
- return EmitPrivateVariable(sem);
- case ast::StorageClass::kWorkgroup:
- return EmitWorkgroupVariable(sem);
- default:
- break;
- }
-
- TINT_ICE(Writer, diagnostics_)
- << "unhandled storage class " << sem->StorageClass();
- return false;
+ if (global->is_const) {
+ return EmitProgramConstVariable(global);
+ }
+
+ auto* sem = builder_.Sem().Get(global);
+ switch (sem->StorageClass()) {
+ case ast::StorageClass::kUniform:
+ return EmitUniformVariable(sem);
+ case ast::StorageClass::kStorage:
+ return EmitStorageVariable(sem);
+ case ast::StorageClass::kHandle:
+ return EmitHandleVariable(sem);
+ case ast::StorageClass::kPrivate:
+ return EmitPrivateVariable(sem);
+ case ast::StorageClass::kWorkgroup:
+ return EmitWorkgroupVariable(sem);
+ default:
+ break;
+ }
+
+ TINT_ICE(Writer, diagnostics_) << "unhandled storage class " << sem->StorageClass();
+ return false;
}
bool GeneratorImpl::EmitUniformVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto binding_point = decl->BindingPoint();
- auto* type = var->Type()->UnwrapRef();
- auto name = builder_.Symbols().NameFor(decl->symbol);
- line() << "cbuffer cbuffer_" << name << RegisterAndSpace('b', binding_point)
- << " {";
-
- {
- ScopedIndent si(this);
- auto out = line();
- if (!EmitTypeAndName(out, type, ast::StorageClass::kUniform, var->Access(),
- name)) {
- return false;
+ auto* decl = var->Declaration();
+ auto binding_point = decl->BindingPoint();
+ auto* type = var->Type()->UnwrapRef();
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ line() << "cbuffer cbuffer_" << name << RegisterAndSpace('b', binding_point) << " {";
+
+ {
+ ScopedIndent si(this);
+ auto out = line();
+ if (!EmitTypeAndName(out, type, ast::StorageClass::kUniform, var->Access(), name)) {
+ return false;
+ }
+ out << ";";
}
- out << ";";
- }
- line() << "};";
+ line() << "};";
- return true;
+ return true;
}
bool GeneratorImpl::EmitStorageVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto* type = var->Type()->UnwrapRef();
- auto out = line();
- if (!EmitTypeAndName(out, type, ast::StorageClass::kStorage, var->Access(),
- builder_.Symbols().NameFor(decl->symbol))) {
- return false;
- }
+ auto* decl = var->Declaration();
+ auto* type = var->Type()->UnwrapRef();
+ auto out = line();
+ if (!EmitTypeAndName(out, type, ast::StorageClass::kStorage, var->Access(),
+ builder_.Symbols().NameFor(decl->symbol))) {
+ return false;
+ }
- out << RegisterAndSpace(var->Access() == ast::Access::kRead ? 't' : 'u',
- decl->BindingPoint())
- << ";";
+ out << RegisterAndSpace(var->Access() == ast::Access::kRead ? 't' : 'u', decl->BindingPoint())
+ << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitHandleVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto* unwrapped_type = var->Type()->UnwrapRef();
- auto out = line();
+ auto* decl = var->Declaration();
+ auto* unwrapped_type = var->Type()->UnwrapRef();
+ auto out = line();
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
+ }
- const char* register_space = nullptr;
+ const char* register_space = nullptr;
- if (unwrapped_type->Is<sem::Texture>()) {
- register_space = "t";
- if (unwrapped_type->Is<sem::StorageTexture>()) {
- register_space = "u";
+ if (unwrapped_type->Is<sem::Texture>()) {
+ register_space = "t";
+ if (unwrapped_type->Is<sem::StorageTexture>()) {
+ register_space = "u";
+ }
+ } else if (unwrapped_type->Is<sem::Sampler>()) {
+ register_space = "s";
}
- } else if (unwrapped_type->Is<sem::Sampler>()) {
- register_space = "s";
- }
- if (register_space) {
- auto bp = decl->BindingPoint();
- out << " : register(" << register_space << bp.binding->value << ", space"
- << bp.group->value << ")";
- }
+ if (register_space) {
+ auto bp = decl->BindingPoint();
+ out << " : register(" << register_space << bp.binding->value << ", space" << bp.group->value
+ << ")";
+ }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitPrivateVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto out = line();
+ auto* decl = var->Declaration();
+ auto out = line();
- out << "static ";
+ out << "static ";
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
-
- out << " = ";
- if (auto* constructor = decl->constructor) {
- if (!EmitExpression(out, constructor)) {
- return false;
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
}
- } else {
- if (!EmitZeroValue(out, var->Type()->UnwrapRef())) {
- return false;
+
+ out << " = ";
+ if (auto* constructor = decl->constructor) {
+ if (!EmitExpression(out, constructor)) {
+ return false;
+ }
+ } else {
+ if (!EmitZeroValue(out, var->Type()->UnwrapRef())) {
+ return false;
+ }
}
- }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitWorkgroupVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
- auto out = line();
+ auto* decl = var->Declaration();
+ auto out = line();
- out << "groupshared ";
+ out << "groupshared ";
- auto name = builder_.Symbols().NameFor(decl->symbol);
- auto* type = var->Type()->UnwrapRef();
- if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
- return false;
- }
+ auto name = builder_.Symbols().NameFor(decl->symbol);
+ auto* type = var->Type()->UnwrapRef();
+ if (!EmitTypeAndName(out, type, var->StorageClass(), var->Access(), name)) {
+ return false;
+ }
- if (auto* constructor = decl->constructor) {
- out << " = ";
- if (!EmitExpression(out, constructor)) {
- return false;
+ if (auto* constructor = decl->constructor) {
+ out << " = ";
+ if (!EmitExpression(out, constructor)) {
+ return false;
+ }
}
- }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
std::string GeneratorImpl::builtin_to_attribute(ast::Builtin builtin) const {
- switch (builtin) {
- case ast::Builtin::kPosition:
- return "SV_Position";
- case ast::Builtin::kVertexIndex:
- return "SV_VertexID";
- case ast::Builtin::kInstanceIndex:
- return "SV_InstanceID";
- case ast::Builtin::kFrontFacing:
- return "SV_IsFrontFace";
- case ast::Builtin::kFragDepth:
- return "SV_Depth";
- case ast::Builtin::kLocalInvocationId:
- return "SV_GroupThreadID";
- case ast::Builtin::kLocalInvocationIndex:
- return "SV_GroupIndex";
- case ast::Builtin::kGlobalInvocationId:
- return "SV_DispatchThreadID";
- case ast::Builtin::kWorkgroupId:
- return "SV_GroupID";
- case ast::Builtin::kSampleIndex:
- return "SV_SampleIndex";
- case ast::Builtin::kSampleMask:
- return "SV_Coverage";
- default:
- break;
- }
- return "";
-}
-
-std::string GeneratorImpl::interpolation_to_modifiers(
- ast::InterpolationType type,
- ast::InterpolationSampling sampling) const {
- std::string modifiers;
- switch (type) {
- case ast::InterpolationType::kPerspective:
- modifiers += "linear ";
- break;
- case ast::InterpolationType::kLinear:
- modifiers += "noperspective ";
- break;
- case ast::InterpolationType::kFlat:
- modifiers += "nointerpolation ";
- break;
- }
- switch (sampling) {
- case ast::InterpolationSampling::kCentroid:
- modifiers += "centroid ";
- break;
- case ast::InterpolationSampling::kSample:
- modifiers += "sample ";
- break;
- case ast::InterpolationSampling::kCenter:
- case ast::InterpolationSampling::kNone:
- break;
- }
- return modifiers;
+ switch (builtin) {
+ case ast::Builtin::kPosition:
+ return "SV_Position";
+ case ast::Builtin::kVertexIndex:
+ return "SV_VertexID";
+ case ast::Builtin::kInstanceIndex:
+ return "SV_InstanceID";
+ case ast::Builtin::kFrontFacing:
+ return "SV_IsFrontFace";
+ case ast::Builtin::kFragDepth:
+ return "SV_Depth";
+ case ast::Builtin::kLocalInvocationId:
+ return "SV_GroupThreadID";
+ case ast::Builtin::kLocalInvocationIndex:
+ return "SV_GroupIndex";
+ case ast::Builtin::kGlobalInvocationId:
+ return "SV_DispatchThreadID";
+ case ast::Builtin::kWorkgroupId:
+ return "SV_GroupID";
+ case ast::Builtin::kSampleIndex:
+ return "SV_SampleIndex";
+ case ast::Builtin::kSampleMask:
+ return "SV_Coverage";
+ default:
+ break;
+ }
+ return "";
+}
+
+std::string GeneratorImpl::interpolation_to_modifiers(ast::InterpolationType type,
+ ast::InterpolationSampling sampling) const {
+ std::string modifiers;
+ switch (type) {
+ case ast::InterpolationType::kPerspective:
+ modifiers += "linear ";
+ break;
+ case ast::InterpolationType::kLinear:
+ modifiers += "noperspective ";
+ break;
+ case ast::InterpolationType::kFlat:
+ modifiers += "nointerpolation ";
+ break;
+ }
+ switch (sampling) {
+ case ast::InterpolationSampling::kCentroid:
+ modifiers += "centroid ";
+ break;
+ case ast::InterpolationSampling::kSample:
+ modifiers += "sample ";
+ break;
+ case ast::InterpolationSampling::kCenter:
+ case ast::InterpolationSampling::kNone:
+ break;
+ }
+ return modifiers;
}
bool GeneratorImpl::EmitEntryPointFunction(const ast::Function* func) {
- auto* func_sem = builder_.Sem().Get(func);
+ auto* func_sem = builder_.Sem().Get(func);
- {
- auto out = line();
- if (func->PipelineStage() == ast::PipelineStage::kCompute) {
- // Emit the workgroup_size attribute.
- auto wgsize = func_sem->WorkgroupSize();
- out << "[numthreads(";
- for (int i = 0; i < 3; i++) {
- if (i > 0) {
- out << ", ";
- }
-
- if (wgsize[i].overridable_const) {
- auto* global = builder_.Sem().Get<sem::GlobalVariable>(
- wgsize[i].overridable_const);
- if (!global->IsOverridable()) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "expected a pipeline-overridable constant";
- }
- out << kSpecConstantPrefix << global->ConstantId();
- } else {
- out << std::to_string(wgsize[i].value);
+ {
+ auto out = line();
+ if (func->PipelineStage() == ast::PipelineStage::kCompute) {
+ // Emit the workgroup_size attribute.
+ auto wgsize = func_sem->WorkgroupSize();
+ out << "[numthreads(";
+ for (int i = 0; i < 3; i++) {
+ if (i > 0) {
+ out << ", ";
+ }
+
+ if (wgsize[i].overridable_const) {
+ auto* global =
+ builder_.Sem().Get<sem::GlobalVariable>(wgsize[i].overridable_const);
+ if (!global->IsOverridable()) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "expected a pipeline-overridable constant";
+ }
+ out << kSpecConstantPrefix << global->ConstantId();
+ } else {
+ out << std::to_string(wgsize[i].value);
+ }
+ }
+ out << ")]" << std::endl;
}
- }
- out << ")]" << std::endl;
- }
- out << func->return_type->FriendlyName(builder_.Symbols());
+ out << func->return_type->FriendlyName(builder_.Symbols());
- out << " " << builder_.Symbols().NameFor(func->symbol) << "(";
+ out << " " << builder_.Symbols().NameFor(func->symbol) << "(";
- bool first = true;
+ bool first = true;
- // Emit entry point parameters.
- for (auto* var : func->params) {
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type();
- if (!type->Is<sem::Struct>()) {
- // ICE likely indicates that the CanonicalizeEntryPointIO transform was
- // not run, or a builtin parameter was added after it was run.
- TINT_ICE(Writer, diagnostics_)
- << "Unsupported non-struct entry point parameter";
- }
+ // Emit entry point parameters.
+ for (auto* var : func->params) {
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type();
+ if (!type->Is<sem::Struct>()) {
+ // ICE likely indicates that the CanonicalizeEntryPointIO transform was
+ // not run, or a builtin parameter was added after it was run.
+ TINT_ICE(Writer, diagnostics_) << "Unsupported non-struct entry point parameter";
+ }
- if (!first) {
- out << ", ";
- }
- first = false;
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
- }
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ }
- out << ") {";
- }
+ out << ") {";
+ }
- {
- ScopedIndent si(this);
+ {
+ ScopedIndent si(this);
- if (!EmitStatements(func->body->statements)) {
- return false;
- }
+ if (!EmitStatements(func->body->statements)) {
+ return false;
+ }
- if (!Is<ast::ReturnStatement>(func->body->Last())) {
- ast::ReturnStatement ret(ProgramID(), Source{});
- if (!EmitStatement(&ret)) {
- return false;
- }
+ if (!Is<ast::ReturnStatement>(func->body->Last())) {
+ ast::ReturnStatement ret(ProgramID(), Source{});
+ if (!EmitStatement(&ret)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitLiteral(std::ostream& out,
- const ast::LiteralExpression* lit) {
- return Switch(
- lit,
- [&](const ast::BoolLiteralExpression* l) {
- out << (l->value ? "true" : "false");
+bool GeneratorImpl::EmitConstant(std::ostream& out, const sem::Constant& constant) {
+ auto emit_bool = [&](size_t element_idx) {
+ out << (constant.Element<AInt>(element_idx) ? "true" : "false");
return true;
- },
- [&](const ast::FloatLiteralExpression* fl) {
- if (std::isinf(fl->value)) {
- out << (fl->value >= 0 ? "asfloat(0x7f800000u)"
- : "asfloat(0xff800000u)");
- } else if (std::isnan(fl->value)) {
- out << "asfloat(0x7fc00000u)";
- } else {
- out << FloatToString(fl->value) << "f";
- }
+ };
+ auto emit_f32 = [&](size_t element_idx) {
+ PrintF32(out, static_cast<float>(constant.Element<AFloat>(element_idx)));
return true;
- },
- [&](const ast::SintLiteralExpression* sl) {
- out << sl->value;
+ };
+ auto emit_i32 = [&](size_t element_idx) {
+ out << constant.Element<AInt>(element_idx).value;
return true;
- },
- [&](const ast::UintLiteralExpression* ul) {
- out << ul->value << "u";
+ };
+ auto emit_u32 = [&](size_t element_idx) {
+ out << constant.Element<AInt>(element_idx).value << "u";
return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer, "unknown literal type");
- return false;
- });
-}
+ };
+ auto emit_vector = [&](const sem::Vector* vec_ty, size_t start, size_t end) {
+ if (constant.AllEqual(start, end)) {
+ {
+ ScopedParen sp(out);
+ bool ok = Switch(
+ vec_ty->type(), //
+ [&](const sem::Bool*) { return emit_bool(0); }, //
+ [&](const sem::F32*) { return emit_f32(0); }, //
+ [&](const sem::I32*) { return emit_i32(0); }, //
+ [&](const sem::U32*) { return emit_u32(0); } //
+ );
+ if (!ok) {
+ return false;
+ }
+ }
+ out << ".";
+ for (size_t i = start; i < end; i++) {
+ out << "x";
+ }
+ return true;
+ }
-bool GeneratorImpl::EmitValue(std::ostream& out,
- const sem::Type* type,
- int value) {
- return Switch(
- type,
- [&](const sem::Bool*) {
- out << (value == 0 ? "false" : "true");
- return true;
- },
- [&](const sem::F32*) {
- out << value << ".0f";
- return true;
- },
- [&](const sem::I32*) {
- out << value;
- return true;
- },
- [&](const sem::U32*) {
- out << value << "u";
- return true;
- },
- [&](const sem::Vector* vec) {
- if (!EmitType(out, type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
+ if (!EmitType(out, vec_ty, ast::StorageClass::kNone, ast::Access::kUndefined, "")) {
+ return false;
}
+
ScopedParen sp(out);
- for (uint32_t i = 0; i < vec->Width(); i++) {
- if (i != 0) {
- out << ", ";
- }
- if (!EmitValue(out, vec->type(), value)) {
+
+ auto emit_els = [&](auto emit_el) {
+ for (size_t i = start; i < end; i++) {
+ if (i > start) {
+ out << ", ";
+ }
+ if (!emit_el(i)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ return Switch(
+ vec_ty->type(), //
+ [&](const sem::Bool*) { return emit_els(emit_bool); }, //
+ [&](const sem::F32*) { return emit_els(emit_f32); }, //
+ [&](const sem::I32*) { return emit_els(emit_i32); }, //
+ [&](const sem::U32*) { return emit_els(emit_u32); }, //
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unhandled constant vector element type: " +
+ builder_.FriendlyName(vec_ty->type()));
+ return false;
+ });
+ };
+ auto emit_matrix = [&](const sem::Matrix* m) {
+ if (!EmitType(out, constant.Type(), ast::StorageClass::kNone, ast::Access::kUndefined,
+ "")) {
return false;
- }
- }
- return true;
- },
- [&](const sem::Matrix* mat) {
- if (!EmitType(out, type, ast::StorageClass::kNone,
- ast::Access::kReadWrite, "")) {
- return false;
}
+
ScopedParen sp(out);
- for (uint32_t i = 0; i < (mat->rows() * mat->columns()); i++) {
- if (i != 0) {
- out << ", ";
- }
- if (!EmitValue(out, mat->type(), value)) {
- return false;
- }
+
+ for (size_t column_idx = 0; column_idx < m->columns(); column_idx++) {
+ if (column_idx > 0) {
+ out << ", ";
+ }
+ size_t start = m->rows() * column_idx;
+ size_t end = m->rows() * (column_idx + 1);
+ if (!emit_vector(m->ColumnType(), start, end)) {
+ return false;
+ }
}
return true;
- },
- [&](const sem::Struct*) {
- out << "(";
- TINT_DEFER(out << ")" << value);
- return EmitType(out, type, ast::StorageClass::kNone,
- ast::Access::kUndefined, "");
- },
- [&](const sem::Array*) {
- out << "(";
- TINT_DEFER(out << ")" << value);
- return EmitType(out, type, ast::StorageClass::kNone,
- ast::Access::kUndefined, "");
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer,
- "Invalid type for value emission: " +
- type->FriendlyName(builder_.Symbols()));
- return false;
- });
+ };
+ return Switch(
+ constant.Type(), //
+ [&](const sem::Bool*) { return emit_bool(0); }, //
+ [&](const sem::F32*) { return emit_f32(0); }, //
+ [&](const sem::I32*) { return emit_i32(0); }, //
+ [&](const sem::U32*) { return emit_u32(0); }, //
+ [&](const sem::Vector* v) { return emit_vector(v, 0, constant.ElementCount()); }, //
+ [&](const sem::Matrix* m) { return emit_matrix(m); },
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "unhandled constant type: " + builder_.FriendlyName(constant.Type()));
+ return false;
+ });
+}
+
+bool GeneratorImpl::EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit) {
+ return Switch(
+ lit,
+ [&](const ast::BoolLiteralExpression* l) {
+ out << (l->value ? "true" : "false");
+ return true;
+ },
+ [&](const ast::FloatLiteralExpression* l) {
+ PrintF32(out, static_cast<float>(l->value));
+ return true;
+ },
+ [&](const ast::IntLiteralExpression* i) {
+ out << i->value;
+ switch (i->suffix) {
+ case ast::IntLiteralExpression::Suffix::kNone:
+ case ast::IntLiteralExpression::Suffix::kI:
+ return true;
+ case ast::IntLiteralExpression::Suffix::kU:
+ out << "u";
+ return true;
+ }
+ diagnostics_.add_error(diag::System::Writer, "unknown integer literal suffix type");
+ return false;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "unknown literal type");
+ return false;
+ });
+}
+
+bool GeneratorImpl::EmitValue(std::ostream& out, const sem::Type* type, int value) {
+ return Switch(
+ type,
+ [&](const sem::Bool*) {
+ out << (value == 0 ? "false" : "true");
+ return true;
+ },
+ [&](const sem::F32*) {
+ out << value << ".0f";
+ return true;
+ },
+ [&](const sem::I32*) {
+ out << value;
+ return true;
+ },
+ [&](const sem::U32*) {
+ out << value << "u";
+ return true;
+ },
+ [&](const sem::Vector* vec) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ ScopedParen sp(out);
+ for (uint32_t i = 0; i < vec->Width(); i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ if (!EmitValue(out, vec->type(), value)) {
+ return false;
+ }
+ }
+ return true;
+ },
+ [&](const sem::Matrix* mat) {
+ if (!EmitType(out, type, ast::StorageClass::kNone, ast::Access::kReadWrite, "")) {
+ return false;
+ }
+ ScopedParen sp(out);
+ for (uint32_t i = 0; i < (mat->rows() * mat->columns()); i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ if (!EmitValue(out, mat->type(), value)) {
+ return false;
+ }
+ }
+ return true;
+ },
+ [&](const sem::Struct*) {
+ out << "(";
+ TINT_DEFER(out << ")" << value);
+ return EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined, "");
+ },
+ [&](const sem::Array*) {
+ out << "(";
+ TINT_DEFER(out << ")" << value);
+ return EmitType(out, type, ast::StorageClass::kNone, ast::Access::kUndefined, "");
+ },
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "Invalid type for value emission: " + type->FriendlyName(builder_.Symbols()));
+ return false;
+ });
}
bool GeneratorImpl::EmitZeroValue(std::ostream& out, const sem::Type* type) {
- return EmitValue(out, type, 0);
+ return EmitValue(out, type, 0);
}
bool GeneratorImpl::EmitLoop(const ast::LoopStatement* stmt) {
- auto emit_continuing = [this, stmt]() {
- if (stmt->continuing && !stmt->continuing->Empty()) {
- if (!EmitBlock(stmt->continuing)) {
- return false;
- }
- }
- return true;
- };
+ auto emit_continuing = [this, stmt]() {
+ if (stmt->continuing && !stmt->continuing->Empty()) {
+ if (!EmitBlock(stmt->continuing)) {
+ return false;
+ }
+ }
+ return true;
+ };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << LoopAttribute() << "while (true) {";
- {
- ScopedIndent si(this);
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
- if (!emit_continuing_()) {
- return false;
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << LoopAttribute() << "while (true) {";
+ {
+ ScopedIndent si(this);
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
+ if (!emit_continuing_()) {
+ return false;
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitForLoop(const ast::ForLoopStatement* stmt) {
- // Nest a for loop with a new block. In HLSL the initializer scope is not
- // nested by the for-loop, so we may get variable redefinitions.
- line() << "{";
- increment_indent();
- TINT_DEFER({
- decrement_indent();
- line() << "}";
- });
-
- TextBuffer init_buf;
- if (auto* init = stmt->initializer) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
- if (!EmitStatement(init)) {
- return false;
- }
- }
-
- TextBuffer cond_pre;
- std::stringstream cond_buf;
- if (auto* cond = stmt->condition) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
- if (!EmitExpression(cond_buf, cond)) {
- return false;
- }
- }
-
- TextBuffer cont_buf;
- if (auto* cont = stmt->continuing) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
- if (!EmitStatement(cont)) {
- return false;
- }
- }
-
- // If the for-loop has a multi-statement conditional and / or continuing, then
- // we cannot emit this as a regular for-loop in HLSL. Instead we need to
- // generate a `while(true)` loop.
- bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
-
- // If the for-loop has multi-statement initializer, or is going to be emitted
- // as a `while(true)` loop, then declare the initializer statement(s) before
- // the loop.
- if (init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop)) {
- current_buffer_->Append(init_buf);
- init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
- }
-
- if (emit_as_loop) {
- auto emit_continuing = [&]() {
- current_buffer_->Append(cont_buf);
- return true;
- };
-
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << LoopAttribute() << "while (true) {";
+ // Nest a for loop with a new block. In HLSL the initializer scope is not
+ // nested by the for-loop, so we may get variable redefinitions.
+ line() << "{";
increment_indent();
TINT_DEFER({
- decrement_indent();
- line() << "}";
+ decrement_indent();
+ line() << "}";
});
- if (stmt->condition) {
- current_buffer_->Append(cond_pre);
- line() << "if (!(" << cond_buf.str() << ")) { break; }";
+ TextBuffer init_buf;
+ if (auto* init = stmt->initializer) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
+ if (!EmitStatement(init)) {
+ return false;
+ }
}
- if (!EmitStatements(stmt->body->statements)) {
- return false;
+ TextBuffer cond_pre;
+ std::stringstream cond_buf;
+ if (auto* cond = stmt->condition) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
+ if (!EmitExpression(cond_buf, cond)) {
+ return false;
+ }
}
- if (!emit_continuing_()) {
- return false;
+ TextBuffer cont_buf;
+ if (auto* cont = stmt->continuing) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
+ if (!EmitStatement(cont)) {
+ return false;
+ }
}
- } else {
- // For-loop can be generated.
- {
- auto out = line();
- out << LoopAttribute() << "for";
- {
- ScopedParen sp(out);
- if (!init_buf.lines.empty()) {
- out << init_buf.lines[0].content << " ";
- } else {
- out << "; ";
+ // If the for-loop has a multi-statement conditional and / or continuing, then
+ // we cannot emit this as a regular for-loop in HLSL. Instead we need to
+ // generate a `while(true)` loop.
+ bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
+
+ // If the for-loop has multi-statement initializer, or is going to be emitted
+ // as a `while(true)` loop, then declare the initializer statement(s) before
+ // the loop.
+ if (init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop)) {
+ current_buffer_->Append(init_buf);
+ init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
+ }
+
+ if (emit_as_loop) {
+ auto emit_continuing = [&]() {
+ current_buffer_->Append(cont_buf);
+ return true;
+ };
+
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << LoopAttribute() << "while (true) {";
+ increment_indent();
+ TINT_DEFER({
+ decrement_indent();
+ line() << "}";
+ });
+
+ if (stmt->condition) {
+ current_buffer_->Append(cond_pre);
+ line() << "if (!(" << cond_buf.str() << ")) { break; }";
}
- out << cond_buf.str() << "; ";
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
- if (!cont_buf.lines.empty()) {
- out << TrimSuffix(cont_buf.lines[0].content, ";");
+ if (!emit_continuing_()) {
+ return false;
}
- }
- out << " {";
- }
- {
- auto emit_continuing = [] { return true; };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ } else {
+ // For-loop can be generated.
+ {
+ auto out = line();
+ out << LoopAttribute() << "for";
+ {
+ ScopedParen sp(out);
+
+ if (!init_buf.lines.empty()) {
+ out << init_buf.lines[0].content << " ";
+ } else {
+ out << "; ";
+ }
+
+ out << cond_buf.str() << "; ";
+
+ if (!cont_buf.lines.empty()) {
+ out << TrimSuffix(cont_buf.lines[0].content, ";");
+ }
+ }
+ out << " {";
+ }
+ {
+ auto emit_continuing = [] { return true; };
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
+ }
+ }
+ line() << "}";
}
- line() << "}";
- }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitMemberAccessor(
- std::ostream& out,
- const ast::MemberAccessorExpression* expr) {
- if (!EmitExpression(out, expr->structure)) {
- return false;
- }
- out << ".";
+bool GeneratorImpl::EmitMemberAccessor(std::ostream& out,
+ const ast::MemberAccessorExpression* expr) {
+ if (!EmitExpression(out, expr->structure)) {
+ return false;
+ }
+ out << ".";
- // Swizzles output the name directly
- if (builder_.Sem().Get(expr)->Is<sem::Swizzle>()) {
- out << builder_.Symbols().NameFor(expr->member->symbol);
- } else if (!EmitExpression(out, expr->member)) {
- return false;
- }
+ // Swizzles output the name directly
+ if (builder_.Sem().Get(expr)->Is<sem::Swizzle>()) {
+ out << builder_.Symbols().NameFor(expr->member->symbol);
+ } else if (!EmitExpression(out, expr->member)) {
+ return false;
+ }
- return true;
+ return true;
}
bool GeneratorImpl::EmitReturn(const ast::ReturnStatement* stmt) {
- if (stmt->value) {
- auto out = line();
- out << "return ";
- if (!EmitExpression(out, stmt->value)) {
- return false;
+ if (stmt->value) {
+ auto out = line();
+ out << "return ";
+ if (!EmitExpression(out, stmt->value)) {
+ return false;
+ }
+ out << ";";
+ } else {
+ line() << "return;";
}
- out << ";";
- } else {
- line() << "return;";
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- [&](const ast::AssignmentStatement* a) { //
- return EmitAssign(a);
- },
- [&](const ast::BlockStatement* b) { //
- return EmitBlock(b);
- },
- [&](const ast::BreakStatement* b) { //
- return EmitBreak(b);
- },
- [&](const ast::CallStatement* c) { //
- auto out = line();
- if (!EmitCall(out, c->expr)) {
- return false;
- }
- out << ";";
- return true;
- },
- [&](const ast::ContinueStatement* c) { //
- return EmitContinue(c);
- },
- [&](const ast::DiscardStatement* d) { //
- return EmitDiscard(d);
- },
- [&](const ast::FallthroughStatement*) { //
- line() << "/* fallthrough */";
- return true;
- },
- [&](const ast::IfStatement* i) { //
- return EmitIf(i);
- },
- [&](const ast::LoopStatement* l) { //
- return EmitLoop(l);
- },
- [&](const ast::ForLoopStatement* l) { //
- return EmitForLoop(l);
- },
- [&](const ast::ReturnStatement* r) { //
- return EmitReturn(r);
- },
- [&](const ast::SwitchStatement* s) { //
- return EmitSwitch(s);
- },
- [&](const ast::VariableDeclStatement* v) { //
- return EmitVariable(v->variable);
- },
- [&](Default) { //
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown statement type: " + std::string(stmt->TypeInfo().name));
- return false;
- });
+ return Switch(
+ stmt,
+ [&](const ast::AssignmentStatement* a) { //
+ return EmitAssign(a);
+ },
+ [&](const ast::BlockStatement* b) { //
+ return EmitBlock(b);
+ },
+ [&](const ast::BreakStatement* b) { //
+ return EmitBreak(b);
+ },
+ [&](const ast::CallStatement* c) { //
+ auto out = line();
+ if (!EmitCall(out, c->expr)) {
+ return false;
+ }
+ out << ";";
+ return true;
+ },
+ [&](const ast::ContinueStatement* c) { //
+ return EmitContinue(c);
+ },
+ [&](const ast::DiscardStatement* d) { //
+ return EmitDiscard(d);
+ },
+ [&](const ast::FallthroughStatement*) { //
+ line() << "/* fallthrough */";
+ return true;
+ },
+ [&](const ast::IfStatement* i) { //
+ return EmitIf(i);
+ },
+ [&](const ast::LoopStatement* l) { //
+ return EmitLoop(l);
+ },
+ [&](const ast::ForLoopStatement* l) { //
+ return EmitForLoop(l);
+ },
+ [&](const ast::ReturnStatement* r) { //
+ return EmitReturn(r);
+ },
+ [&](const ast::SwitchStatement* s) { //
+ return EmitSwitch(s);
+ },
+ [&](const ast::VariableDeclStatement* v) { //
+ return EmitVariable(v->variable);
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown statement type: " + std::string(stmt->TypeInfo().name));
+ return false;
+ });
}
bool GeneratorImpl::EmitDefaultOnlySwitch(const ast::SwitchStatement* stmt) {
- TINT_ASSERT(Writer, stmt->body.size() == 1 && stmt->body[0]->IsDefault());
+ TINT_ASSERT(Writer, stmt->body.size() == 1 && stmt->body[0]->IsDefault());
- // FXC fails to compile a switch with just a default case, ignoring the
- // default case body. We work around this here by emitting the default case
- // without the switch.
+ // FXC fails to compile a switch with just a default case, ignoring the
+ // default case body. We work around this here by emitting the default case
+ // without the switch.
- // Emit the switch condition as-is in case it has side-effects (e.g.
- // function call). Note that's it's fine not to assign the result of the
- // expression.
- {
- auto out = line();
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ // Emit the switch condition as-is in case it has side-effects (e.g.
+ // function call). Note that's it's fine not to assign the result of the
+ // expression.
+ {
+ auto out = line();
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ";";
}
- out << ";";
- }
- // Emit "do { <default case body> } while(false);". We use a 'do' loop so
- // that break statements work as expected, and make it 'while (false)' in
- // case there isn't a break statement.
- line() << "do {";
- {
- ScopedIndent si(this);
- if (!EmitStatements(stmt->body[0]->body->statements)) {
- return false;
+ // Emit "do { <default case body> } while(false);". We use a 'do' loop so
+ // that break statements work as expected, and make it 'while (false)' in
+ // case there isn't a break statement.
+ line() << "do {";
+ {
+ ScopedIndent si(this);
+ if (!EmitStatements(stmt->body[0]->body->statements)) {
+ return false;
+ }
}
- }
- line() << "} while (false);";
- return true;
+ line() << "} while (false);";
+ return true;
}
bool GeneratorImpl::EmitSwitch(const ast::SwitchStatement* stmt) {
- // BUG(crbug.com/tint/1188): work around default-only switches
- if (stmt->body.size() == 1 && stmt->body[0]->IsDefault()) {
- return EmitDefaultOnlySwitch(stmt);
- }
+ // BUG(crbug.com/tint/1188): work around default-only switches
+ if (stmt->body.size() == 1 && stmt->body[0]->IsDefault()) {
+ return EmitDefaultOnlySwitch(stmt);
+ }
- { // switch(expr) {
- auto out = line();
- out << "switch(";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ { // switch(expr) {
+ auto out = line();
+ out << "switch(";
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ") {";
}
- out << ") {";
- }
- {
- ScopedIndent si(this);
- for (size_t i = 0; i < stmt->body.size(); i++) {
- if (!EmitCase(stmt, i)) {
- return false;
- }
+ {
+ ScopedIndent si(this);
+ for (size_t i = 0; i < stmt->body.size(); i++) {
+ if (!EmitCase(stmt, i)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitType(std::ostream& out,
@@ -3521,209 +3624,210 @@ bool GeneratorImpl::EmitType(std::ostream& out,
ast::Access access,
const std::string& name,
bool* name_printed /* = nullptr */) {
- if (name_printed) {
- *name_printed = false;
- }
- switch (storage_class) {
- case ast::StorageClass::kStorage:
- if (access != ast::Access::kRead) {
- out << "RW";
- }
- out << "ByteAddressBuffer";
- return true;
- case ast::StorageClass::kUniform: {
- auto array_length = (type->Size() + 15) / 16;
- out << "uint4 " << name << "[" << array_length << "]";
- if (name_printed) {
- *name_printed = true;
- }
- return true;
- }
- default:
- break;
- }
-
- return Switch(
- type,
- [&](const sem::Array* ary) {
- const sem::Type* base_type = ary;
- std::vector<uint32_t> sizes;
- while (auto* arr = base_type->As<sem::Array>()) {
- if (arr->IsRuntimeSized()) {
- TINT_ICE(Writer, diagnostics_)
- << "Runtime arrays may only exist in storage buffers, which "
- "should "
- "have been transformed into a ByteAddressBuffer";
- return false;
- }
- sizes.push_back(arr->Count());
- base_type = arr->ElemType();
- }
- if (!EmitType(out, base_type, storage_class, access, "")) {
- return false;
- }
- if (!name.empty()) {
- out << " " << name;
- if (name_printed) {
- *name_printed = true;
- }
- }
- for (uint32_t size : sizes) {
- out << "[" << size << "]";
- }
- return true;
- },
- [&](const sem::Bool*) {
- out << "bool";
- return true;
- },
- [&](const sem::F32*) {
- out << "float";
- return true;
- },
- [&](const sem::I32*) {
- out << "int";
- return true;
- },
- [&](const sem::Matrix* mat) {
- if (!EmitType(out, mat->type(), storage_class, access, "")) {
- return false;
- }
- // Note: HLSL's matrices are declared as <type>NxM, where N is the
- // number of rows and M is the number of columns. Despite HLSL's
- // matrices being column-major by default, the index operator and
- // constructors actually operate on row-vectors, where as WGSL operates
- // on column vectors. To simplify everything we use the transpose of the
- // matrices. See:
- // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-per-component-math#matrix-ordering
- out << mat->columns() << "x" << mat->rows();
- return true;
- },
- [&](const sem::Pointer*) {
- TINT_ICE(Writer, diagnostics_)
- << "Attempting to emit pointer type. These should have been "
- "removed with the InlinePointerLets transform";
- return false;
- },
- [&](const sem::Sampler* sampler) {
- out << "Sampler";
- if (sampler->IsComparison()) {
- out << "Comparison";
- }
- out << "State";
- return true;
- },
- [&](const sem::Struct* str) {
- out << StructName(str);
- return true;
- },
- [&](const sem::Texture* tex) {
- if (tex->Is<sem::ExternalTexture>()) {
- TINT_ICE(Writer, diagnostics_)
- << "Multiplanar external texture transform was not run.";
- return false;
- }
-
- auto* storage = tex->As<sem::StorageTexture>();
- auto* ms = tex->As<sem::MultisampledTexture>();
- auto* depth_ms = tex->As<sem::DepthMultisampledTexture>();
- auto* sampled = tex->As<sem::SampledTexture>();
-
- if (storage && storage->access() != ast::Access::kRead) {
- out << "RW";
+ if (name_printed) {
+ *name_printed = false;
+ }
+ switch (storage_class) {
+ case ast::StorageClass::kStorage:
+ if (access != ast::Access::kRead) {
+ out << "RW";
+ }
+ out << "ByteAddressBuffer";
+ return true;
+ case ast::StorageClass::kUniform: {
+ auto array_length = (type->Size() + 15) / 16;
+ out << "uint4 " << name << "[" << array_length << "]";
+ if (name_printed) {
+ *name_printed = true;
+ }
+ return true;
}
- out << "Texture";
-
- switch (tex->dim()) {
- case ast::TextureDimension::k1d:
- out << "1D";
- break;
- case ast::TextureDimension::k2d:
- out << ((ms || depth_ms) ? "2DMS" : "2D");
- break;
- case ast::TextureDimension::k2dArray:
- out << ((ms || depth_ms) ? "2DMSArray" : "2DArray");
- break;
- case ast::TextureDimension::k3d:
- out << "3D";
- break;
- case ast::TextureDimension::kCube:
- out << "Cube";
- break;
- case ast::TextureDimension::kCubeArray:
- out << "CubeArray";
+ default:
break;
- default:
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unexpected TextureDimension " << tex->dim();
- return false;
- }
+ }
- if (storage) {
- auto* component =
- image_format_to_rwtexture_type(storage->texel_format());
- if (component == nullptr) {
- TINT_ICE(Writer, diagnostics_)
- << "Unsupported StorageTexture TexelFormat: "
- << static_cast<int>(storage->texel_format());
- return false;
- }
- out << "<" << component << ">";
- } else if (depth_ms) {
- out << "<float4>";
- } else if (sampled || ms) {
- auto* subtype = sampled ? sampled->type() : ms->type();
- out << "<";
- if (subtype->Is<sem::F32>()) {
- out << "float4";
- } else if (subtype->Is<sem::I32>()) {
- out << "int4";
- } else if (subtype->Is<sem::U32>()) {
- out << "uint4";
- } else {
+ return Switch(
+ type,
+ [&](const sem::Array* ary) {
+ const sem::Type* base_type = ary;
+ std::vector<uint32_t> sizes;
+ while (auto* arr = base_type->As<sem::Array>()) {
+ if (arr->IsRuntimeSized()) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Runtime arrays may only exist in storage buffers, which "
+ "should "
+ "have been transformed into a ByteAddressBuffer";
+ return false;
+ }
+ sizes.push_back(arr->Count());
+ base_type = arr->ElemType();
+ }
+ if (!EmitType(out, base_type, storage_class, access, "")) {
+ return false;
+ }
+ if (!name.empty()) {
+ out << " " << name;
+ if (name_printed) {
+ *name_printed = true;
+ }
+ }
+ for (uint32_t size : sizes) {
+ out << "[" << size << "]";
+ }
+ return true;
+ },
+ [&](const sem::Bool*) {
+ out << "bool";
+ return true;
+ },
+ [&](const sem::F32*) {
+ out << "float";
+ return true;
+ },
+ [&](const sem::F16*) {
+ diagnostics_.add_error(diag::System::Writer,
+ "Type f16 is not completely implemented yet.");
+ return false;
+ },
+ [&](const sem::I32*) {
+ out << "int";
+ return true;
+ },
+ [&](const sem::Matrix* mat) {
+ if (!EmitType(out, mat->type(), storage_class, access, "")) {
+ return false;
+ }
+ // Note: HLSL's matrices are declared as <type>NxM, where N is the
+ // number of rows and M is the number of columns. Despite HLSL's
+ // matrices being column-major by default, the index operator and
+ // constructors actually operate on row-vectors, where as WGSL operates
+ // on column vectors. To simplify everything we use the transpose of the
+ // matrices. See:
+ // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-per-component-math#matrix-ordering
+ out << mat->columns() << "x" << mat->rows();
+ return true;
+ },
+ [&](const sem::Pointer*) {
TINT_ICE(Writer, diagnostics_)
- << "Unsupported multisampled texture type";
+ << "Attempting to emit pointer type. These should have been "
+ "removed with the InlinePointerLets transform";
return false;
- }
- out << ">";
- }
- return true;
- },
- [&](const sem::U32*) {
- out << "uint";
- return true;
- },
- [&](const sem::Vector* vec) {
- auto width = vec->Width();
- if (vec->type()->Is<sem::F32>() && width >= 1 && width <= 4) {
- out << "float" << width;
- } else if (vec->type()->Is<sem::I32>() && width >= 1 && width <= 4) {
- out << "int" << width;
- } else if (vec->type()->Is<sem::U32>() && width >= 1 && width <= 4) {
- out << "uint" << width;
- } else if (vec->type()->Is<sem::Bool>() && width >= 1 && width <= 4) {
- out << "bool" << width;
- } else {
- out << "vector<";
- if (!EmitType(out, vec->type(), storage_class, access, "")) {
+ },
+ [&](const sem::Sampler* sampler) {
+ out << "Sampler";
+ if (sampler->IsComparison()) {
+ out << "Comparison";
+ }
+ out << "State";
+ return true;
+ },
+ [&](const sem::Struct* str) {
+ out << StructName(str);
+ return true;
+ },
+ [&](const sem::Texture* tex) {
+ if (tex->Is<sem::ExternalTexture>()) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Multiplanar external texture transform was not run.";
+ return false;
+ }
+
+ auto* storage = tex->As<sem::StorageTexture>();
+ auto* ms = tex->As<sem::MultisampledTexture>();
+ auto* depth_ms = tex->As<sem::DepthMultisampledTexture>();
+ auto* sampled = tex->As<sem::SampledTexture>();
+
+ if (storage && storage->access() != ast::Access::kRead) {
+ out << "RW";
+ }
+ out << "Texture";
+
+ switch (tex->dim()) {
+ case ast::TextureDimension::k1d:
+ out << "1D";
+ break;
+ case ast::TextureDimension::k2d:
+ out << ((ms || depth_ms) ? "2DMS" : "2D");
+ break;
+ case ast::TextureDimension::k2dArray:
+ out << ((ms || depth_ms) ? "2DMSArray" : "2DArray");
+ break;
+ case ast::TextureDimension::k3d:
+ out << "3D";
+ break;
+ case ast::TextureDimension::kCube:
+ out << "Cube";
+ break;
+ case ast::TextureDimension::kCubeArray:
+ out << "CubeArray";
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "unexpected TextureDimension " << tex->dim();
+ return false;
+ }
+
+ if (storage) {
+ auto* component = image_format_to_rwtexture_type(storage->texel_format());
+ if (component == nullptr) {
+ TINT_ICE(Writer, diagnostics_) << "Unsupported StorageTexture TexelFormat: "
+ << static_cast<int>(storage->texel_format());
+ return false;
+ }
+ out << "<" << component << ">";
+ } else if (depth_ms) {
+ out << "<float4>";
+ } else if (sampled || ms) {
+ auto* subtype = sampled ? sampled->type() : ms->type();
+ out << "<";
+ if (subtype->Is<sem::F32>()) {
+ out << "float4";
+ } else if (subtype->Is<sem::I32>()) {
+ out << "int4";
+ } else if (subtype->Is<sem::U32>()) {
+ out << "uint4";
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "Unsupported multisampled texture type";
+ return false;
+ }
+ out << ">";
+ }
+ return true;
+ },
+ [&](const sem::U32*) {
+ out << "uint";
+ return true;
+ },
+ [&](const sem::Vector* vec) {
+ auto width = vec->Width();
+ if (vec->type()->Is<sem::F32>() && width >= 1 && width <= 4) {
+ out << "float" << width;
+ } else if (vec->type()->Is<sem::I32>() && width >= 1 && width <= 4) {
+ out << "int" << width;
+ } else if (vec->type()->Is<sem::U32>() && width >= 1 && width <= 4) {
+ out << "uint" << width;
+ } else if (vec->type()->Is<sem::Bool>() && width >= 1 && width <= 4) {
+ out << "bool" << width;
+ } else {
+ out << "vector<";
+ if (!EmitType(out, vec->type(), storage_class, access, "")) {
+ return false;
+ }
+ out << ", " << width << ">";
+ }
+ return true;
+ },
+ [&](const sem::Atomic* atomic) {
+ return EmitType(out, atomic->Type(), storage_class, access, name);
+ },
+ [&](const sem::Void*) {
+ out << "void";
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "unknown type in EmitType");
return false;
- }
- out << ", " << width << ">";
- }
- return true;
- },
- [&](const sem::Atomic* atomic) {
- return EmitType(out, atomic->Type(), storage_class, access, name);
- },
- [&](const sem::Void*) {
- out << "void";
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer,
- "unknown type in EmitType");
- return false;
- });
+ });
}
bool GeneratorImpl::EmitTypeAndName(std::ostream& out,
@@ -3731,221 +3835,216 @@ bool GeneratorImpl::EmitTypeAndName(std::ostream& out,
ast::StorageClass storage_class,
ast::Access access,
const std::string& name) {
- bool name_printed = false;
- if (!EmitType(out, type, storage_class, access, name, &name_printed)) {
- return false;
- }
- if (!name.empty() && !name_printed) {
- out << " " << name;
- }
- return true;
+ bool name_printed = false;
+ if (!EmitType(out, type, storage_class, access, name, &name_printed)) {
+ return false;
+ }
+ if (!name.empty() && !name_printed) {
+ out << " " << name;
+ }
+ return true;
}
bool GeneratorImpl::EmitStructType(TextBuffer* b, const sem::Struct* str) {
- line(b) << "struct " << StructName(str) << " {";
- {
- ScopedIndent si(b);
- for (auto* mem : str->Members()) {
- auto mem_name = builder_.Symbols().NameFor(mem->Name());
-
- auto* ty = mem->Type();
-
- auto out = line(b);
-
- std::string pre, post;
-
- if (auto* decl = mem->Declaration()) {
- for (auto* attr : decl->attributes) {
- if (auto* location = attr->As<ast::LocationAttribute>()) {
- auto& pipeline_stage_uses = str->PipelineStageUses();
- if (pipeline_stage_uses.size() != 1) {
- TINT_ICE(Writer, diagnostics_)
- << "invalid entry point IO struct uses";
- }
-
- if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kVertexInput)) {
- post += " : TEXCOORD" + std::to_string(location->value);
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kVertexOutput)) {
- post += " : TEXCOORD" + std::to_string(location->value);
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kFragmentInput)) {
- post += " : TEXCOORD" + std::to_string(location->value);
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kFragmentOutput)) {
- post += " : SV_Target" + std::to_string(location->value);
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "invalid use of location attribute";
- }
- } else if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
- auto name = builtin_to_attribute(builtin->builtin);
- if (name.empty()) {
- diagnostics_.add_error(diag::System::Writer,
- "unsupported builtin");
- return false;
- }
- post += " : " + name;
- } else if (auto* interpolate =
- attr->As<ast::InterpolateAttribute>()) {
- auto mod = interpolation_to_modifiers(interpolate->type,
- interpolate->sampling);
- if (mod.empty()) {
- diagnostics_.add_error(diag::System::Writer,
- "unsupported interpolation");
- return false;
- }
- pre += mod;
-
- } else if (attr->Is<ast::InvariantAttribute>()) {
- // Note: `precise` is not exactly the same as `invariant`, but is
- // stricter and therefore provides the necessary guarantees.
- // See discussion here: https://github.com/gpuweb/gpuweb/issues/893
- pre += "precise ";
- } else if (!attr->IsAnyOf<ast::StructMemberAlignAttribute,
- ast::StructMemberOffsetAttribute,
- ast::StructMemberSizeAttribute>()) {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled struct member attribute: " << attr->Name();
- return false;
- }
+ line(b) << "struct " << StructName(str) << " {";
+ {
+ ScopedIndent si(b);
+ for (auto* mem : str->Members()) {
+ auto mem_name = builder_.Symbols().NameFor(mem->Name());
+ auto* ty = mem->Type();
+ auto out = line(b);
+ std::string pre, post;
+ if (auto* decl = mem->Declaration()) {
+ for (auto* attr : decl->attributes) {
+ if (auto* location = attr->As<ast::LocationAttribute>()) {
+ auto& pipeline_stage_uses = str->PipelineStageUses();
+ if (pipeline_stage_uses.size() != 1) {
+ TINT_ICE(Writer, diagnostics_) << "invalid entry point IO struct uses";
+ }
+
+ if (pipeline_stage_uses.count(sem::PipelineStageUsage::kVertexInput)) {
+ post += " : TEXCOORD" + std::to_string(location->value);
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kVertexOutput)) {
+ post += " : TEXCOORD" + std::to_string(location->value);
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kFragmentInput)) {
+ post += " : TEXCOORD" + std::to_string(location->value);
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kFragmentOutput)) {
+ post += " : SV_Target" + std::to_string(location->value);
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "invalid use of location attribute";
+ }
+ } else if (auto* builtin = attr->As<ast::BuiltinAttribute>()) {
+ auto name = builtin_to_attribute(builtin->builtin);
+ if (name.empty()) {
+ diagnostics_.add_error(diag::System::Writer, "unsupported builtin");
+ return false;
+ }
+ post += " : " + name;
+ } else if (auto* interpolate = attr->As<ast::InterpolateAttribute>()) {
+ auto mod =
+ interpolation_to_modifiers(interpolate->type, interpolate->sampling);
+ if (mod.empty()) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unsupported interpolation");
+ return false;
+ }
+ pre += mod;
+
+ } else if (attr->Is<ast::InvariantAttribute>()) {
+ // Note: `precise` is not exactly the same as `invariant`, but is
+ // stricter and therefore provides the necessary guarantees.
+ // See discussion here: https://github.com/gpuweb/gpuweb/issues/893
+ pre += "precise ";
+ } else if (!attr->IsAnyOf<ast::StructMemberAlignAttribute,
+ ast::StructMemberOffsetAttribute,
+ ast::StructMemberSizeAttribute>()) {
+ TINT_ICE(Writer, diagnostics_)
+ << "unhandled struct member attribute: " << attr->Name();
+ return false;
+ }
+ }
+ }
+
+ out << pre;
+ if (!EmitTypeAndName(out, ty, ast::StorageClass::kNone, ast::Access::kReadWrite,
+ mem_name)) {
+ return false;
+ }
+ out << post << ";";
}
- }
+ }
+
+ line(b) << "};";
+ return true;
+}
+
+bool GeneratorImpl::EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* str) {
+ auto it = emitted_structs_.emplace(str);
+ if (!it.second) {
+ return true;
+ }
+ return EmitStructType(buffer, str);
+}
+
+bool GeneratorImpl::EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr) {
+ switch (expr->op) {
+ case ast::UnaryOp::kIndirection:
+ case ast::UnaryOp::kAddressOf:
+ return EmitExpression(out, expr->expr);
+ case ast::UnaryOp::kComplement:
+ out << "~";
+ break;
+ case ast::UnaryOp::kNot:
+ out << "!";
+ break;
+ case ast::UnaryOp::kNegation:
+ out << "-";
+ break;
+ }
+ out << "(";
- out << pre;
- if (!EmitTypeAndName(out, ty, ast::StorageClass::kNone,
- ast::Access::kReadWrite, mem_name)) {
+ if (!EmitExpression(out, expr->expr)) {
return false;
- }
- out << post << ";";
- }
- }
-
- line(b) << "};";
-
- return true;
-}
-
-bool GeneratorImpl::EmitUnaryOp(std::ostream& out,
- const ast::UnaryOpExpression* expr) {
- switch (expr->op) {
- case ast::UnaryOp::kIndirection:
- case ast::UnaryOp::kAddressOf:
- return EmitExpression(out, expr->expr);
- case ast::UnaryOp::kComplement:
- out << "~";
- break;
- case ast::UnaryOp::kNot:
- out << "!";
- break;
- case ast::UnaryOp::kNegation:
- out << "-";
- break;
- }
- out << "(";
-
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
+ }
- out << ")";
+ out << ")";
- return true;
+ return true;
}
bool GeneratorImpl::EmitVariable(const ast::Variable* var) {
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type()->UnwrapRef();
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type()->UnwrapRef();
- // TODO(dsinclair): Handle variable attributes
- if (!var->attributes.empty()) {
- diagnostics_.add_error(diag::System::Writer,
- "Variable attributes are not handled yet");
- return false;
- }
-
- auto out = line();
- if (var->is_const) {
- out << "const ";
- }
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
-
- out << " = ";
+ // TODO(dsinclair): Handle variable attributes
+ if (!var->attributes.empty()) {
+ diagnostics_.add_error(diag::System::Writer, "Variable attributes are not handled yet");
+ return false;
+ }
- if (var->constructor) {
- if (!EmitExpression(out, var->constructor)) {
- return false;
+ auto out = line();
+ if (var->is_const) {
+ out << "const ";
+ }
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
}
- } else {
- if (!EmitZeroValue(out, type)) {
- return false;
+
+ out << " = ";
+
+ if (var->constructor) {
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ } else {
+ if (!EmitZeroValue(out, type)) {
+ return false;
+ }
}
- }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitProgramConstVariable(const ast::Variable* var) {
- for (auto* d : var->attributes) {
- if (!d->Is<ast::IdAttribute>()) {
- diagnostics_.add_error(diag::System::Writer,
- "Decorated const values not valid");
- return false;
- }
- }
- if (!var->is_const) {
- diagnostics_.add_error(diag::System::Writer, "Expected a const value");
- return false;
- }
+ for (auto* d : var->attributes) {
+ if (!d->Is<ast::IdAttribute>()) {
+ diagnostics_.add_error(diag::System::Writer, "Decorated const values not valid");
+ return false;
+ }
+ }
+ if (!var->is_const) {
+ diagnostics_.add_error(diag::System::Writer, "Expected a const value");
+ return false;
+ }
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type();
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type();
- auto* global = sem->As<sem::GlobalVariable>();
- if (global && global->IsOverridable()) {
- auto const_id = global->ConstantId();
+ auto* global = sem->As<sem::GlobalVariable>();
+ if (global && global->IsOverridable()) {
+ auto const_id = global->ConstantId();
- line() << "#ifndef " << kSpecConstantPrefix << const_id;
+ line() << "#ifndef " << kSpecConstantPrefix << const_id;
- if (var->constructor != nullptr) {
- auto out = line();
- out << "#define " << kSpecConstantPrefix << const_id << " ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
- }
+ if (var->constructor != nullptr) {
+ auto out = line();
+ out << "#define " << kSpecConstantPrefix << const_id << " ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ } else {
+ line() << "#error spec constant required for constant id " << const_id;
+ }
+ line() << "#endif";
+ {
+ auto out = line();
+ out << "static const ";
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ out << " = " << kSpecConstantPrefix << const_id << ";";
+ }
} else {
- line() << "#error spec constant required for constant id " << const_id;
- }
- line() << "#endif";
- {
- auto out = line();
- out << "static const ";
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
- out << " = " << kSpecConstantPrefix << const_id << ";";
- }
- } else {
- auto out = line();
- out << "static const ";
- if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
- builder_.Symbols().NameFor(var->symbol))) {
- return false;
- }
- out << " = ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
+ auto out = line();
+ out << "static const ";
+ if (!EmitTypeAndName(out, type, sem->StorageClass(), sem->Access(),
+ builder_.Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ out << " = ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ out << ";";
}
- out << ";";
- }
- return true;
+ return true;
}
template <typename F>
@@ -3953,73 +4052,71 @@ bool GeneratorImpl::CallBuiltinHelper(std::ostream& out,
const ast::CallExpression* call,
const sem::Builtin* builtin,
F&& build) {
- // Generate the helper function if it hasn't been created already
- auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name =
- UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
- std::vector<std::string> parameter_names;
- {
- auto decl = line(&b);
- if (!EmitTypeAndName(decl, builtin->ReturnType(),
- ast::StorageClass::kNone, ast::Access::kUndefined,
- fn_name)) {
- return "";
- }
- {
- ScopedParen sp(decl);
- for (auto* param : builtin->Parameters()) {
- if (!parameter_names.empty()) {
- decl << ", ";
- }
- auto param_name = "param_" + std::to_string(parameter_names.size());
- const auto* ty = param->Type();
- if (auto* ptr = ty->As<sem::Pointer>()) {
- decl << "inout ";
- ty = ptr->StoreType();
- }
- if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
- ast::Access::kUndefined, param_name)) {
- return "";
- }
- parameter_names.emplace_back(std::move(param_name));
- }
- }
- decl << " {";
- }
- {
- ScopedIndent si(&b);
- if (!build(&b, parameter_names)) {
- return "";
- }
- }
- line(&b) << "}";
- line(&b);
- return fn_name;
- });
+ // Generate the helper function if it hasn't been created already
+ auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
- if (fn.empty()) {
- return false;
- }
+ auto fn_name = UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
+ std::vector<std::string> parameter_names;
+ {
+ auto decl = line(&b);
+ if (!EmitTypeAndName(decl, builtin->ReturnType(), ast::StorageClass::kNone,
+ ast::Access::kUndefined, fn_name)) {
+ return "";
+ }
+ {
+ ScopedParen sp(decl);
+ for (auto* param : builtin->Parameters()) {
+ if (!parameter_names.empty()) {
+ decl << ", ";
+ }
+ auto param_name = "param_" + std::to_string(parameter_names.size());
+ const auto* ty = param->Type();
+ if (auto* ptr = ty->As<sem::Pointer>()) {
+ decl << "inout ";
+ ty = ptr->StoreType();
+ }
+ if (!EmitTypeAndName(decl, ty, ast::StorageClass::kNone,
+ ast::Access::kUndefined, param_name)) {
+ return "";
+ }
+ parameter_names.emplace_back(std::move(param_name));
+ }
+ }
+ decl << " {";
+ }
+ {
+ ScopedIndent si(&b);
+ if (!build(&b, parameter_names)) {
+ return "";
+ }
+ }
+ line(&b) << "}";
+ line(&b);
+ return fn_name;
+ });
- // Call the helper
- out << fn;
- {
- ScopedParen sp(out);
- bool first = true;
- for (auto* arg : call->args) {
- if (!first) {
- out << ", ";
- }
- first = false;
- if (!EmitExpression(out, arg)) {
+ if (fn.empty()) {
return false;
- }
}
- }
- return true;
+
+ // Call the helper
+ out << fn;
+ {
+ ScopedParen sp(out);
+ bool first = true;
+ for (auto* arg : call->args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
+ }
+ }
+ return true;
}
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.h b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.h
index 4fcf151d7db..af7e4c98208 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.h
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl.h
@@ -43,6 +43,7 @@
// Forward declarations
namespace tint::sem {
class Call;
+class Constant;
class Builtin;
class TypeConstructor;
class TypeConversion;
@@ -52,18 +53,18 @@ namespace tint::writer::hlsl {
/// The result of sanitizing a program for generation.
struct SanitizedResult {
- /// Constructor
- SanitizedResult();
- /// Destructor
- ~SanitizedResult();
- /// Move constructor
- SanitizedResult(SanitizedResult&&);
+ /// Constructor
+ SanitizedResult();
+ /// Destructor
+ ~SanitizedResult();
+ /// Move constructor
+ SanitizedResult(SanitizedResult&&);
- /// The sanitized program.
- Program program;
- /// Indices into the array_length_from_uniform binding that are statically
- /// used.
- std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
+ /// The sanitized program.
+ Program program;
+ /// Indices into the array_length_from_uniform binding that are statically
+ /// used.
+ std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
};
/// Sanitize a program in preparation for generating HLSL.
@@ -74,461 +75,468 @@ SanitizedResult Sanitize(const Program* program, const Options& options);
/// Implementation class for HLSL generator
class GeneratorImpl : public TextGenerator {
- public:
- /// Constructor
- /// @param program the program to generate
- explicit GeneratorImpl(const Program* program);
- ~GeneratorImpl();
+ public:
+ /// Constructor
+ /// @param program the program to generate
+ explicit GeneratorImpl(const Program* program);
+ ~GeneratorImpl();
- /// @returns true on successful generation; false otherwise
- bool Generate();
+ /// @returns true on successful generation; false otherwise
+ bool Generate();
- /// Handles an index accessor expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the index accessor was emitted
- bool EmitIndexAccessor(std::ostream& out,
- const ast::IndexAccessorExpression* expr);
- /// Handles an assignment statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitAssign(const ast::AssignmentStatement* stmt);
- /// Emits code such that if `expr` is zero, it emits one, else `expr`
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitExpressionOrOneIfZero(std::ostream& out,
- const ast::Expression* expr);
- /// Handles generating a binary expression
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating a bitcast expression
- /// @param out the output of the expression stream
- /// @param expr the as expression
- /// @returns true if the bitcast was emitted
- bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
- /// Emits a list of statements
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatements(const ast::StatementList& stmts);
- /// Emits a list of statements with an indentation
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatementsWithIndent(const ast::StatementList& stmts);
- /// Handles a block statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBlock(const ast::BlockStatement* stmt);
- /// Handles a break statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBreak(const ast::BreakStatement* stmt);
- /// Handles generating a call expression
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a function call expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param function the function being called
- /// @returns true if the expression is emitted
- bool EmitFunctionCall(std::ostream& out,
- const sem::Call* call,
- const sem::Function* function);
- /// Handles generating a builtin call expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the builtin being called
- /// @returns true if the expression is emitted
- bool EmitBuiltinCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a type conversion expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param conv the type conversion
- /// @returns true if the expression is emitted
- bool EmitTypeConversion(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConversion* conv);
- /// Handles generating a type constructor expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param ctor the type constructor
- /// @returns true if the expression is emitted
- bool EmitTypeConstructor(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConstructor* ctor);
- /// Handles generating a call expression to a
- /// transform::DecomposeMemoryAccess::Intrinsic for a uniform buffer
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param intrinsic the transform::DecomposeMemoryAccess::Intrinsic
- /// @returns true if the call expression is emitted
- bool EmitUniformBufferAccess(
- std::ostream& out,
- const ast::CallExpression* expr,
- const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
- /// Handles generating a call expression to a
- /// transform::DecomposeMemoryAccess::Intrinsic for a storage buffer
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param intrinsic the transform::DecomposeMemoryAccess::Intrinsic
- /// @returns true if the call expression is emitted
- bool EmitStorageBufferAccess(
- std::ostream& out,
- const ast::CallExpression* expr,
- const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
- /// Handles generating a barrier intrinsic call
- /// @param out the output of the expression stream
- /// @param builtin the semantic information for the barrier builtin
- /// @returns true if the call expression is emitted
- bool EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin);
- /// Handles generating an atomic intrinsic call for a storage buffer variable
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param intrinsic the atomic intrinsic
- /// @returns true if the call expression is emitted
- bool EmitStorageAtomicCall(
- std::ostream& out,
- const ast::CallExpression* expr,
- const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
- /// Handles generating an atomic intrinsic call for a workgroup variable
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the atomic builtin
- /// @returns true if the call expression is emitted
- bool EmitWorkgroupAtomicCall(std::ostream& out,
+ /// Handles an index accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the index accessor was emitted
+ bool EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr);
+ /// Handles an assignment statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitAssign(const ast::AssignmentStatement* stmt);
+ /// Emits code such that if `expr` is zero, it emits one, else `expr`
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitExpressionOrOneIfZero(std::ostream& out, const ast::Expression* expr);
+ /// Handles generating a binary expression
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a bitcast expression
+ /// @param out the output of the expression stream
+ /// @param expr the as expression
+ /// @returns true if the bitcast was emitted
+ bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
+ /// Emits a list of statements
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatements(const ast::StatementList& stmts);
+ /// Emits a list of statements with an indentation
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatementsWithIndent(const ast::StatementList& stmts);
+ /// Handles a block statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBlock(const ast::BlockStatement* stmt);
+ /// Handles a break statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBreak(const ast::BreakStatement* stmt);
+ /// Handles generating a call expression
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a function call expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param function the function being called
+ /// @returns true if the expression is emitted
+ bool EmitFunctionCall(std::ostream& out, const sem::Call* call, const sem::Function* function);
+ /// Handles generating a builtin call expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the builtin being called
+ /// @returns true if the expression is emitted
+ bool EmitBuiltinCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a type conversion expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param conv the type conversion
+ /// @returns true if the expression is emitted
+ bool EmitTypeConversion(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConversion* conv);
+ /// Handles generating a type constructor expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param ctor the type constructor
+ /// @returns true if the expression is emitted
+ bool EmitTypeConstructor(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConstructor* ctor);
+ /// Handles generating a call expression to a
+ /// transform::DecomposeMemoryAccess::Intrinsic for a uniform buffer
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param intrinsic the transform::DecomposeMemoryAccess::Intrinsic
+ /// @returns true if the call expression is emitted
+ bool EmitUniformBufferAccess(std::ostream& out,
+ const ast::CallExpression* expr,
+ const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
+ /// Handles generating a call expression to a
+ /// transform::DecomposeMemoryAccess::Intrinsic for a storage buffer
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param intrinsic the transform::DecomposeMemoryAccess::Intrinsic
+ /// @returns true if the call expression is emitted
+ bool EmitStorageBufferAccess(std::ostream& out,
+ const ast::CallExpression* expr,
+ const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
+ /// Handles generating a barrier intrinsic call
+ /// @param out the output of the expression stream
+ /// @param builtin the semantic information for the barrier builtin
+ /// @returns true if the call expression is emitted
+ bool EmitBarrierCall(std::ostream& out, const sem::Builtin* builtin);
+ /// Handles generating an atomic intrinsic call for a storage buffer variable
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param intrinsic the atomic intrinsic
+ /// @returns true if the call expression is emitted
+ bool EmitStorageAtomicCall(std::ostream& out,
const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to a texture function (`textureSample`,
- /// `textureSampleGrad`, etc)
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @returns true if the call expression is emitted
- bool EmitTextureCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `select()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitSelectCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a call to the `modf()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitModfCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `frexp()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitFrexpCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `degrees()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitDegreesCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `radians()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitRadiansCall(std::ostream& out,
+ const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
+ /// Handles generating the helper function for the atomic intrinsic function
+ /// @param func the function
+ /// @param intrinsic the atomic intrinsic
+ /// @returns true if the function is emitted
+ bool EmitStorageAtomicIntrinsic(const ast::Function* func,
+ const transform::DecomposeMemoryAccess::Intrinsic* intrinsic);
+ /// Handles generating an atomic intrinsic call for a workgroup variable
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the atomic builtin
+ /// @returns true if the call expression is emitted
+ bool EmitWorkgroupAtomicCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to a texture function (`textureSample`,
+ /// `textureSampleGrad`, etc)
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the texture builtin
+ /// @returns true if the call expression is emitted
+ bool EmitTextureCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a call to the `select()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitSelectCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a call to the `modf()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitModfCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `frexp()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles generating a call to data packing builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @returns true if the call expression is emitted
- bool EmitDataPackingCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to data unpacking builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @returns true if the call expression is emitted
- bool EmitDataUnpackingCall(std::ostream& out,
+ /// Handles generating a call to the `degrees()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDegreesCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `radians()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitRadiansCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to data packing builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDataPackingCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles a case statement
- /// @param s the switch statement
- /// @param case_idx the index of the switch case in the switch statement
- /// @returns true if the statement was emitted successfully
- bool EmitCase(const ast::SwitchStatement* s, size_t case_idx);
- /// Handles generating a discard statement
- /// @param stmt the discard statement
- /// @returns true if the statement was successfully emitted
- bool EmitDiscard(const ast::DiscardStatement* stmt);
- /// Handles a continue statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitContinue(const ast::ContinueStatement* stmt);
- /// Handles generate an Expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the expression was emitted
- bool EmitExpression(std::ostream& out, const ast::Expression* expr);
- /// Handles generating a function
- /// @param func the function to generate
- /// @returns true if the function was emitted
- bool EmitFunction(const ast::Function* func);
- /// Handles emitting the function body if it discards to work around a FXC
- /// compilation bug.
- /// @param func the function with the body to emit
- /// @returns true if the function was emitted
- bool EmitFunctionBodyWithDiscard(const ast::Function* func);
- /// Handles emitting a global variable
- /// @param global the global variable
- /// @returns true on success
- bool EmitGlobalVariable(const ast::Variable* global);
+ /// Handles generating a call to data unpacking builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDataUnpackingCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to DP4a builtins (dot4I8Packed and dot4U8Packed)
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDP4aCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles a case statement
+ /// @param s the switch statement
+ /// @param case_idx the index of the switch case in the switch statement
+ /// @returns true if the statement was emitted successfully
+ bool EmitCase(const ast::SwitchStatement* s, size_t case_idx);
+ /// Handles generating a discard statement
+ /// @param stmt the discard statement
+ /// @returns true if the statement was successfully emitted
+ bool EmitDiscard(const ast::DiscardStatement* stmt);
+ /// Handles a continue statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitContinue(const ast::ContinueStatement* stmt);
+ /// Handles generate an Expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the expression was emitted
+ bool EmitExpression(std::ostream& out, const ast::Expression* expr);
+ /// Handles generating a function
+ /// @param func the function to generate
+ /// @returns true if the function was emitted
+ bool EmitFunction(const ast::Function* func);
+ /// Handles emitting the function body if it discards to work around a FXC
+ /// compilation bug.
+ /// @param func the function with the body to emit
+ /// @returns true if the function was emitted
+ bool EmitFunctionBodyWithDiscard(const ast::Function* func);
+ /// Handles emitting a global variable
+ /// @param global the global variable
+ /// @returns true on success
+ bool EmitGlobalVariable(const ast::Variable* global);
- /// Handles emitting a global variable with the uniform storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitUniformVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the uniform storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitUniformVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the storage storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitStorageVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the storage storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitStorageVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the handle storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitHandleVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the handle storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitHandleVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the private storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitPrivateVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the private storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitPrivateVariable(const sem::Variable* var);
- /// Handles emitting a global variable with the workgroup storage class
- /// @param var the global variable
- /// @returns true on success
- bool EmitWorkgroupVariable(const sem::Variable* var);
+ /// Handles emitting a global variable with the workgroup storage class
+ /// @param var the global variable
+ /// @returns true on success
+ bool EmitWorkgroupVariable(const sem::Variable* var);
- /// Handles emitting the entry point function
- /// @param func the entry point
- /// @returns true if the entry point function was emitted
- bool EmitEntryPointFunction(const ast::Function* func);
- /// Handles an if statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitIf(const ast::IfStatement* stmt);
- /// Handles a literal
- /// @param out the output stream
- /// @param lit the literal to emit
- /// @returns true if the literal was successfully emitted
- bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
- /// Handles a loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitLoop(const ast::LoopStatement* stmt);
- /// Handles a for loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitForLoop(const ast::ForLoopStatement* stmt);
- /// Handles generating an identifier expression
- /// @param out the output of the expression stream
- /// @param expr the identifier expression
- /// @returns true if the identifeir was emitted
- bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
- /// Handles a member accessor expression
- /// @param out the output of the expression stream
- /// @param expr the member accessor expression
- /// @returns true if the member accessor was emitted
- bool EmitMemberAccessor(std::ostream& out,
- const ast::MemberAccessorExpression* expr);
- /// Handles return statements
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitReturn(const ast::ReturnStatement* stmt);
- /// Handles statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitStatement(const ast::Statement* stmt);
- /// Handles generating a switch statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitSwitch(const ast::SwitchStatement* stmt);
- // Handles generating a switch statement with only a default case
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitDefaultOnlySwitch(const ast::SwitchStatement* stmt);
- /// Handles generating type
- /// @param out the output stream
- /// @param type the type to generate
- /// @param storage_class the storage class of the variable
- /// @param access the access control type of the variable
- /// @param name the name of the variable, used for array emission.
- /// @param name_printed (optional) if not nullptr and an array was printed
- /// then the boolean is set to true.
- /// @returns true if the type is emitted
- bool EmitType(std::ostream& out,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const std::string& name,
- bool* name_printed = nullptr);
- /// Handles generating type and name
- /// @param out the output stream
- /// @param type the type to generate
- /// @param storage_class the storage class of the variable
- /// @param access the access control type of the variable
- /// @param name the name to emit
- /// @returns true if the type is emitted
- bool EmitTypeAndName(std::ostream& out,
- const sem::Type* type,
- ast::StorageClass storage_class,
- ast::Access access,
- const std::string& name);
- /// Handles generating a structure declaration
- /// @param buffer the text buffer that the type declaration will be written to
- /// @param ty the struct to generate
- /// @returns true if the struct is emitted
- bool EmitStructType(TextBuffer* buffer, const sem::Struct* ty);
- /// Handles a unary op expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the expression was emitted
- bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
- /// Emits `value` for the given type
- /// @param out the output stream
- /// @param type the type to emit the value for
- /// @param value the value to emit
- /// @returns true if the value was successfully emitted.
- bool EmitValue(std::ostream& out, const sem::Type* type, int value);
- /// Emits the zero value for the given type
- /// @param out the output stream
- /// @param type the type to emit the value for
- /// @returns true if the zero value was successfully emitted.
- bool EmitZeroValue(std::ostream& out, const sem::Type* type);
- /// Handles generating a variable
- /// @param var the variable to generate
- /// @returns true if the variable was emitted
- bool EmitVariable(const ast::Variable* var);
- /// Handles generating a program scope constant variable
- /// @param var the variable to emit
- /// @returns true if the variable was emitted
- bool EmitProgramConstVariable(const ast::Variable* var);
- /// Emits call to a helper vector assignment function for the input assignment
- /// statement and vector type. This is used to work around FXC issues where
- /// assignments to vectors with dynamic indices cause compilation failures.
- /// @param stmt assignment statement that corresponds to a vector assignment
- /// via an accessor expression
- /// @param vec the vector type being assigned to
- /// @returns true on success
- bool EmitDynamicVectorAssignment(const ast::AssignmentStatement* stmt,
- const sem::Vector* vec);
- /// Emits call to a helper matrix assignment function for the input assignment
- /// statement and matrix type. This is used to work around FXC issues where
- /// assignment of a vector to a matrix with a dynamic index causes compilation
- /// failures.
- /// @param stmt assignment statement that corresponds to a matrix assignment
- /// via an accessor expression
- /// @param mat the matrix type being assigned to
- /// @returns true on success
- bool EmitDynamicMatrixVectorAssignment(const ast::AssignmentStatement* stmt,
- const sem::Matrix* mat);
- /// Emits call to a helper matrix assignment function for the input assignment
- /// statement and matrix type. This is used to work around FXC issues where
- /// assignment of a scalar to a matrix with at least one dynamic index causes
- /// compilation failures.
- /// @param stmt assignment statement that corresponds to a matrix assignment
- /// via an accessor expression
- /// @param mat the matrix type being assigned to
- /// @returns true on success
- bool EmitDynamicMatrixScalarAssignment(const ast::AssignmentStatement* stmt,
- const sem::Matrix* mat);
+ /// Handles emitting the entry point function
+ /// @param func the entry point
+ /// @returns true if the entry point function was emitted
+ bool EmitEntryPointFunction(const ast::Function* func);
+ /// Handles an if statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitIf(const ast::IfStatement* stmt);
+ /// Handles a constant value
+ /// @param out the output stream
+ /// @param constant the constant value to emit
+ /// @returns true if the constant value was successfully emitted
+ bool EmitConstant(std::ostream& out, const sem::Constant& constant);
+ /// Handles a literal
+ /// @param out the output stream
+ /// @param lit the literal to emit
+ /// @returns true if the literal was successfully emitted
+ bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
+ /// Handles a loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitLoop(const ast::LoopStatement* stmt);
+ /// Handles a for loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitForLoop(const ast::ForLoopStatement* stmt);
+ /// Handles generating an identifier expression
+ /// @param out the output of the expression stream
+ /// @param expr the identifier expression
+ /// @returns true if the identifeir was emitted
+ bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
+ /// Handles a member accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the member accessor expression
+ /// @returns true if the member accessor was emitted
+ bool EmitMemberAccessor(std::ostream& out, const ast::MemberAccessorExpression* expr);
+ /// Handles return statements
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitReturn(const ast::ReturnStatement* stmt);
+ /// Handles statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitStatement(const ast::Statement* stmt);
+ /// Handles generating a switch statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitSwitch(const ast::SwitchStatement* stmt);
+ // Handles generating a switch statement with only a default case
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitDefaultOnlySwitch(const ast::SwitchStatement* stmt);
+ /// Handles generating type
+ /// @param out the output stream
+ /// @param type the type to generate
+ /// @param storage_class the storage class of the variable
+ /// @param access the access control type of the variable
+ /// @param name the name of the variable, used for array emission.
+ /// @param name_printed (optional) if not nullptr and an array was printed
+ /// then the boolean is set to true.
+ /// @returns true if the type is emitted
+ bool EmitType(std::ostream& out,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const std::string& name,
+ bool* name_printed = nullptr);
+ /// Handles generating type and name
+ /// @param out the output stream
+ /// @param type the type to generate
+ /// @param storage_class the storage class of the variable
+ /// @param access the access control type of the variable
+ /// @param name the name to emit
+ /// @returns true if the type is emitted
+ bool EmitTypeAndName(std::ostream& out,
+ const sem::Type* type,
+ ast::StorageClass storage_class,
+ ast::Access access,
+ const std::string& name);
+ /// Handles generating a structure declaration
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param ty the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructType(TextBuffer* buffer, const sem::Struct* ty);
+ /// Handles generating a structure declaration only the first time called. Subsequent calls are
+ /// a no-op and return true.
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param ty the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* ty);
+ /// Handles a unary op expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the expression was emitted
+ bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
+ /// Emits `value` for the given type
+ /// @param out the output stream
+ /// @param type the type to emit the value for
+ /// @param value the value to emit
+ /// @returns true if the value was successfully emitted.
+ bool EmitValue(std::ostream& out, const sem::Type* type, int value);
+ /// Emits the zero value for the given type
+ /// @param out the output stream
+ /// @param type the type to emit the value for
+ /// @returns true if the zero value was successfully emitted.
+ bool EmitZeroValue(std::ostream& out, const sem::Type* type);
+ /// Handles generating a variable
+ /// @param var the variable to generate
+ /// @returns true if the variable was emitted
+ bool EmitVariable(const ast::Variable* var);
+ /// Handles generating a program scope constant variable
+ /// @param var the variable to emit
+ /// @returns true if the variable was emitted
+ bool EmitProgramConstVariable(const ast::Variable* var);
+ /// Emits call to a helper vector assignment function for the input assignment
+ /// statement and vector type. This is used to work around FXC issues where
+ /// assignments to vectors with dynamic indices cause compilation failures.
+ /// @param stmt assignment statement that corresponds to a vector assignment
+ /// via an accessor expression
+ /// @param vec the vector type being assigned to
+ /// @returns true on success
+ bool EmitDynamicVectorAssignment(const ast::AssignmentStatement* stmt, const sem::Vector* vec);
+ /// Emits call to a helper matrix assignment function for the input assignment
+ /// statement and matrix type. This is used to work around FXC issues where
+ /// assignment of a vector to a matrix with a dynamic index causes compilation
+ /// failures.
+ /// @param stmt assignment statement that corresponds to a matrix assignment
+ /// via an accessor expression
+ /// @param mat the matrix type being assigned to
+ /// @returns true on success
+ bool EmitDynamicMatrixVectorAssignment(const ast::AssignmentStatement* stmt,
+ const sem::Matrix* mat);
+ /// Emits call to a helper matrix assignment function for the input assignment
+ /// statement and matrix type. This is used to work around FXC issues where
+ /// assignment of a scalar to a matrix with at least one dynamic index causes
+ /// compilation failures.
+ /// @param stmt assignment statement that corresponds to a matrix assignment
+ /// via an accessor expression
+ /// @param mat the matrix type being assigned to
+ /// @returns true on success
+ bool EmitDynamicMatrixScalarAssignment(const ast::AssignmentStatement* stmt,
+ const sem::Matrix* mat);
- /// Handles generating a builtin method name
- /// @param builtin the semantic info for the builtin
- /// @returns the name or "" if not valid
- std::string generate_builtin_name(const sem::Builtin* builtin);
- /// Converts a builtin to an attribute name
- /// @param builtin the builtin to convert
- /// @returns the string name of the builtin or blank on error
- std::string builtin_to_attribute(ast::Builtin builtin) const;
+ /// Handles generating a builtin method name
+ /// @param builtin the semantic info for the builtin
+ /// @returns the name or "" if not valid
+ std::string generate_builtin_name(const sem::Builtin* builtin);
+ /// Converts a builtin to an attribute name
+ /// @param builtin the builtin to convert
+ /// @returns the string name of the builtin or blank on error
+ std::string builtin_to_attribute(ast::Builtin builtin) const;
- /// Converts interpolation attributes to a HLSL modifiers
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- /// @returns the string name of the attribute or blank on error
- std::string interpolation_to_modifiers(
- ast::InterpolationType type,
- ast::InterpolationSampling sampling) const;
+ /// Converts interpolation attributes to a HLSL modifiers
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ /// @returns the string name of the attribute or blank on error
+ std::string interpolation_to_modifiers(ast::InterpolationType type,
+ ast::InterpolationSampling sampling) const;
- private:
- enum class VarType { kIn, kOut };
+ private:
+ enum class VarType { kIn, kOut };
- struct EntryPointData {
- std::string struct_name;
- std::string var_name;
- };
+ struct EntryPointData {
+ std::string struct_name;
+ std::string var_name;
+ };
- struct DMAIntrinsic {
- transform::DecomposeMemoryAccess::Intrinsic::Op op;
- transform::DecomposeMemoryAccess::Intrinsic::DataType type;
- bool operator==(const DMAIntrinsic& rhs) const {
- return op == rhs.op && type == rhs.type;
- }
- /// Hasher is a std::hash function for DMAIntrinsic
- struct Hasher {
- /// @param i the DMAIntrinsic to hash
- /// @returns the hash of `i`
- inline std::size_t operator()(const DMAIntrinsic& i) const {
- return utils::Hash(i.op, i.type);
- }
+ struct DMAIntrinsic {
+ transform::DecomposeMemoryAccess::Intrinsic::Op op;
+ transform::DecomposeMemoryAccess::Intrinsic::DataType type;
+ bool operator==(const DMAIntrinsic& rhs) const { return op == rhs.op && type == rhs.type; }
+ /// Hasher is a std::hash function for DMAIntrinsic
+ struct Hasher {
+ /// @param i the DMAIntrinsic to hash
+ /// @returns the hash of `i`
+ inline std::size_t operator()(const DMAIntrinsic& i) const {
+ return utils::Hash(i.op, i.type);
+ }
+ };
};
- };
- /// CallBuiltinHelper will call the builtin helper function, creating it
- /// if it hasn't been built already. If the builtin needs to be built then
- /// CallBuiltinHelper will generate the function signature and will call
- /// `build` to emit the body of the function.
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the builtin
- /// @param build a function with the signature:
- /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
- /// Where:
- /// `buffer` is the body of the generated function
- /// `params` is the name of all the generated function parameters
- /// @returns true if the call expression is emitted
- template <typename F>
- bool CallBuiltinHelper(std::ostream& out,
- const ast::CallExpression* call,
- const sem::Builtin* builtin,
- F&& build);
+ /// CallBuiltinHelper will call the builtin helper function, creating it
+ /// if it hasn't been built already. If the builtin needs to be built then
+ /// CallBuiltinHelper will generate the function signature and will call
+ /// `build` to emit the body of the function.
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @param build a function with the signature:
+ /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
+ /// Where:
+ /// `buffer` is the body of the generated function
+ /// `params` is the name of all the generated function parameters
+ /// @returns true if the call expression is emitted
+ template <typename F>
+ bool CallBuiltinHelper(std::ostream& out,
+ const ast::CallExpression* call,
+ const sem::Builtin* builtin,
+ F&& build);
- TextBuffer helpers_; // Helper functions emitted at the top of the output
- std::function<bool()> emit_continuing_;
- std::unordered_map<DMAIntrinsic, std::string, DMAIntrinsic::Hasher>
- dma_intrinsics_;
- std::unordered_map<const sem::Builtin*, std::string> builtins_;
- std::unordered_map<const sem::Struct*, std::string> structure_builders_;
- std::unordered_map<const sem::Vector*, std::string> dynamic_vector_write_;
- std::unordered_map<const sem::Matrix*, std::string>
- dynamic_matrix_vector_write_;
- std::unordered_map<const sem::Matrix*, std::string>
- dynamic_matrix_scalar_write_;
- std::unordered_map<const sem::Type*, std::string> value_or_one_if_zero_;
+ TextBuffer helpers_; // Helper functions emitted at the top of the output
+ std::function<bool()> emit_continuing_;
+ std::unordered_map<const sem::Matrix*, std::string> matrix_scalar_ctors_;
+ std::unordered_map<const sem::Builtin*, std::string> builtins_;
+ std::unordered_map<const sem::Struct*, std::string> structure_builders_;
+ std::unordered_map<const sem::Vector*, std::string> dynamic_vector_write_;
+ std::unordered_map<const sem::Matrix*, std::string> dynamic_matrix_vector_write_;
+ std::unordered_map<const sem::Matrix*, std::string> dynamic_matrix_scalar_write_;
+ std::unordered_map<const sem::Type*, std::string> value_or_one_if_zero_;
+ std::unordered_set<const sem::Struct*> emitted_structs_;
};
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_array_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_array_accessor_test.cc
index bcf3ad1a03b..bbdeb10fd15 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_array_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_array_accessor_test.cc
@@ -14,21 +14,23 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Expression = TestHelper;
TEST_F(HlslGeneratorImplTest_Expression, IndexAccessor) {
- Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
- auto* expr = IndexAccessor("ary", 5);
- WrapInFunction(expr);
+ Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
+ auto* expr = IndexAccessor("ary", 5_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "ary[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "ary[5]");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_assign_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_assign_test.cc
index 6305dc0c095..c69cbdf9190 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_assign_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_assign_test.cc
@@ -14,24 +14,26 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Assign = TestHelper;
TEST_F(HlslGeneratorImplTest_Assign, Emit_Assign) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.i32())),
- Decl(Var("rhs", ty.i32())),
- Assign("lhs", "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(),
- R"(void fn() {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.i32())),
+ Decl(Var("rhs", ty.i32())),
+ Assign("lhs", "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void fn() {
int lhs = 0;
int rhs = 0;
lhs = rhs;
@@ -40,41 +42,41 @@ TEST_F(HlslGeneratorImplTest_Assign, Emit_Assign) {
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Vector_Assign_ConstantIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.vec3<f32>())),
- Decl(Var("rhs", ty.f32())),
- Decl(Const("index", ty.u32(), Expr(0u))),
- Assign(IndexAccessor("lhs", "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(),
- R"(void fn() {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.vec3<f32>())),
+ Decl(Var("rhs", ty.f32())),
+ Decl(Let("index", ty.u32(), Expr(0_u))),
+ Assign(IndexAccessor("lhs", "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void fn() {
float3 lhs = float3(0.0f, 0.0f, 0.0f);
float rhs = 0.0f;
const uint index = 0u;
- lhs[index] = rhs;
+ lhs[0u] = rhs;
}
)");
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Vector_Assign_DynamicIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.vec3<f32>())),
- Decl(Var("rhs", ty.f32())),
- Decl(Var("index", ty.u32())),
- Assign(IndexAccessor("lhs", "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(),
- R"(void set_float3(inout float3 vec, int idx, float val) {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.vec3<f32>())),
+ Decl(Var("rhs", ty.f32())),
+ Decl(Var("index", ty.u32())),
+ Assign(IndexAccessor("lhs", "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void set_float3(inout float3 vec, int idx, float val) {
vec = (idx.xxx == int3(0, 1, 2)) ? val.xxx : vec;
}
@@ -88,42 +90,41 @@ void fn() {
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Matrix_Assign_Vector_ConstantIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.mat4x2<f32>())),
- Decl(Var("rhs", ty.vec2<f32>())),
- Decl(Const("index", ty.u32(), Expr(0u))),
- Assign(IndexAccessor("lhs", "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(),
- R"(void fn() {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.mat4x2<f32>())),
+ Decl(Var("rhs", ty.vec2<f32>())),
+ Decl(Let("index", ty.u32(), Expr(0_u))),
+ Assign(IndexAccessor("lhs", "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void fn() {
float4x2 lhs = float4x2(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
float2 rhs = float2(0.0f, 0.0f);
const uint index = 0u;
- lhs[index] = rhs;
+ lhs[0u] = rhs;
}
)");
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Matrix_Assign_Vector_DynamicIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.mat4x2<f32>())),
- Decl(Var("rhs", ty.vec2<f32>())),
- Decl(Var("index", ty.u32())),
- Assign(IndexAccessor("lhs", "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(
- gen.result(),
- R"(void set_vector_float4x2(inout float4x2 mat, int col, float2 val) {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.mat4x2<f32>())),
+ Decl(Var("rhs", ty.vec2<f32>())),
+ Decl(Var("index", ty.u32())),
+ Assign(IndexAccessor("lhs", "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void set_vector_float4x2(inout float4x2 mat, int col, float2 val) {
switch (col) {
case 0: mat[0] = val; break;
case 1: mat[1] = val; break;
@@ -142,42 +143,41 @@ void fn() {
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Matrix_Assign_Scalar_ConstantIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.mat4x2<f32>())),
- Decl(Var("rhs", ty.f32())),
- Decl(Const("index", ty.u32(), Expr(0u))),
- Assign(IndexAccessor(IndexAccessor("lhs", "index"), "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(),
- R"(void fn() {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.mat4x2<f32>())),
+ Decl(Var("rhs", ty.f32())),
+ Decl(Let("index", ty.u32(), Expr(0_u))),
+ Assign(IndexAccessor(IndexAccessor("lhs", "index"), "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void fn() {
float4x2 lhs = float4x2(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
float rhs = 0.0f;
const uint index = 0u;
- lhs[index][index] = rhs;
+ lhs[0u][0u] = rhs;
}
)");
}
TEST_F(HlslGeneratorImplTest_Assign, Emit_Matrix_Assign_Scalar_DynamicIndex) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("lhs", ty.mat4x2<f32>())),
- Decl(Var("rhs", ty.f32())),
- Decl(Var("index", ty.u32())),
- Assign(IndexAccessor(IndexAccessor("lhs", "index"), "index"), "rhs"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(
- gen.result(),
- R"(void set_scalar_float4x2(inout float4x2 mat, int col, int row, float val) {
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("lhs", ty.mat4x2<f32>())),
+ Decl(Var("rhs", ty.f32())),
+ Decl(Var("index", ty.u32())),
+ Assign(IndexAccessor(IndexAccessor("lhs", "index"), "index"), "rhs"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(),
+ R"(void set_scalar_float4x2(inout float4x2 mat, int col, int row, float val) {
switch (col) {
case 0:
mat[0] = (row.xx == int2(0, 1)) ? val.xx : mat[0];
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_binary_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_binary_test.cc
index f720db7a835..58255368bb7 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_binary_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_binary_test.cc
@@ -16,261 +16,247 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Binary = TestHelper;
struct BinaryData {
- const char* result;
- ast::BinaryOp op;
+ const char* result;
+ ast::BinaryOp op;
- enum Types { All = 0b11, Integer = 0b10, Float = 0b01 };
- Types valid_for = Types::All;
+ enum Types { All = 0b11, Integer = 0b10, Float = 0b01 };
+ Types valid_for = Types::All;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << data.op;
- return out;
+ out << data.op;
+ return out;
}
using HlslBinaryTest = TestParamHelper<BinaryData>;
TEST_P(HlslBinaryTest, Emit_f32) {
- auto params = GetParam();
+ auto params = GetParam();
- if ((params.valid_for & BinaryData::Types::Float) == 0) {
- return;
- }
+ if ((params.valid_for & BinaryData::Types::Float) == 0) {
+ return;
+ }
- // Skip ops that are illegal for this type
- if (params.op == ast::BinaryOp::kAnd || params.op == ast::BinaryOp::kOr ||
- params.op == ast::BinaryOp::kXor ||
- params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight) {
- return;
- }
+ // Skip ops that are illegal for this type
+ if (params.op == ast::BinaryOp::kAnd || params.op == ast::BinaryOp::kOr ||
+ params.op == ast::BinaryOp::kXor || params.op == ast::BinaryOp::kShiftLeft ||
+ params.op == ast::BinaryOp::kShiftRight) {
+ return;
+ }
- Global("left", ty.f32(), ast::StorageClass::kPrivate);
- Global("right", ty.f32(), ast::StorageClass::kPrivate);
+ Global("left", ty.f32(), ast::StorageClass::kPrivate);
+ Global("right", ty.f32(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
TEST_P(HlslBinaryTest, Emit_u32) {
- auto params = GetParam();
+ auto params = GetParam();
- if ((params.valid_for & BinaryData::Types::Integer) == 0) {
- return;
- }
+ if ((params.valid_for & BinaryData::Types::Integer) == 0) {
+ return;
+ }
- Global("left", ty.u32(), ast::StorageClass::kPrivate);
- Global("right", ty.u32(), ast::StorageClass::kPrivate);
+ Global("left", ty.u32(), ast::StorageClass::kPrivate);
+ Global("right", ty.u32(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
TEST_P(HlslBinaryTest, Emit_i32) {
- auto params = GetParam();
+ auto params = GetParam();
- if ((params.valid_for & BinaryData::Types::Integer) == 0) {
- return;
- }
+ if ((params.valid_for & BinaryData::Types::Integer) == 0) {
+ return;
+ }
- // Skip ops that are illegal for this type
- if (params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight) {
- return;
- }
+ // Skip ops that are illegal for this type
+ if (params.op == ast::BinaryOp::kShiftLeft || params.op == ast::BinaryOp::kShiftRight) {
+ return;
+ }
- Global("left", ty.i32(), ast::StorageClass::kPrivate);
- Global("right", ty.i32(), ast::StorageClass::kPrivate);
+ Global("left", ty.i32(), ast::StorageClass::kPrivate);
+ Global("right", ty.i32(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
HlslGeneratorImplTest,
HlslBinaryTest,
- testing::Values(
- BinaryData{"(left & right)", ast::BinaryOp::kAnd},
- BinaryData{"(left | right)", ast::BinaryOp::kOr},
- BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
- BinaryData{"(left == right)", ast::BinaryOp::kEqual},
- BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
- BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
- BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
- BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
- BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
- BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
- BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
- BinaryData{"(left + right)", ast::BinaryOp::kAdd},
- BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
- BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
- // NOTE: Integer divide covered by DivOrModBy* tests below
- BinaryData{"(left / right)", ast::BinaryOp::kDivide,
- BinaryData::Types::Float},
- // NOTE: Integer modulo covered by DivOrModBy* tests below
- BinaryData{"(left % right)", ast::BinaryOp::kModulo,
- BinaryData::Types::Float}));
+ testing::Values(BinaryData{"(left & right)", ast::BinaryOp::kAnd},
+ BinaryData{"(left | right)", ast::BinaryOp::kOr},
+ BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
+ BinaryData{"(left == right)", ast::BinaryOp::kEqual},
+ BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
+ BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
+ BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
+ BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
+ BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
+ BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
+ BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
+ BinaryData{"(left + right)", ast::BinaryOp::kAdd},
+ BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
+ BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
+ // NOTE: Integer divide covered by DivOrModBy* tests below
+ BinaryData{"(left / right)", ast::BinaryOp::kDivide, BinaryData::Types::Float},
+ // NOTE: Integer modulo covered by DivOrModBy* tests below
+ BinaryData{"(left % right)", ast::BinaryOp::kModulo,
+ BinaryData::Types::Float}));
TEST_F(HlslGeneratorImplTest_Binary, Multiply_VectorScalar) {
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = Expr(1.f);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = Expr(1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- "(float3(1.0f, 1.0f, 1.0f) * "
- "1.0f)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "((1.0f).xxx * 1.0f)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_ScalarVector) {
- auto* lhs = Expr(1.f);
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* lhs = Expr(1_f);
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- "(1.0f * float3(1.0f, 1.0f, "
- "1.0f))");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(1.0f * (1.0f).xxx)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_MatrixScalar) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr("mat");
- auto* rhs = Expr(1.f);
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr("mat");
+ auto* rhs = Expr(1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(mat * 1.0f)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(mat * 1.0f)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_ScalarMatrix) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr(1.f);
- auto* rhs = Expr("mat");
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr(1_f);
+ auto* rhs = Expr("mat");
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(1.0f * mat)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(1.0f * mat)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_MatrixVector) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = Expr("mat");
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = Expr("mat");
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "mul(float3(1.0f, 1.0f, 1.0f), mat)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "mul((1.0f).xxx, mat)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_VectorMatrix) {
- Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = Expr("mat");
+ Global("mat", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = Expr("mat");
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "mul(mat, float3(1.0f, 1.0f, 1.0f))");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "mul(mat, (1.0f).xxx)");
}
TEST_F(HlslGeneratorImplTest_Binary, Multiply_MatrixMatrix) {
- Global("lhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- Global("rhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("lhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr("lhs"), Expr("rhs"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("lhs"), Expr("rhs"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "mul(rhs, lhs)");
+ std::stringstream out;
+ EXPECT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "mul(rhs, lhs)");
}
TEST_F(HlslGeneratorImplTest_Binary, Logical_And) {
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
@@ -278,26 +264,24 @@ if (tint_tmp) {
}
TEST_F(HlslGeneratorImplTest_Binary, Logical_Multi) {
- // (a && b) || (c || d)
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
-
- auto* expr = create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"),
- Expr("b")),
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"),
- Expr("d")));
- WrapInFunction(expr);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
+ // (a && b) || (c || d)
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+
+ auto* expr = create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"), Expr("d")));
+ WrapInFunction(expr);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
if (tint_tmp_1) {
tint_tmp_1 = b;
}
@@ -313,19 +297,18 @@ if (!tint_tmp) {
}
TEST_F(HlslGeneratorImplTest_Binary, Logical_Or) {
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"), Expr("b"));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(tint_tmp)");
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(tint_tmp)");
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (!tint_tmp) {
tint_tmp = b;
}
@@ -333,31 +316,29 @@ if (!tint_tmp) {
}
TEST_F(HlslGeneratorImplTest_Binary, If_WithLogical) {
- // if (a && b) {
- // return 1;
- // } else if (b || c) {
- // return 2;
- // } else {
- // return 3;
- // }
-
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
-
- auto* expr = If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b")),
- Block(Return(1)),
- Else(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("b"), Expr("c")),
- Block(Return(2))),
- Else(Block(Return(3))));
- Func("func", {}, ty.i32(), {WrapInStatement(expr)});
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ // if (a && b) {
+ // return 1i;
+ // } else if (b || c) {
+ // return 2i;
+ // } else {
+ // return 3i;
+ // }
+
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+
+ auto* expr =
+ If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ Block(Return(1_i)),
+ Else(If(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("c")),
+ Block(Return(2_i)), Else(Block(Return(3_i))))));
+ Func("func", {}, ty.i32(), {WrapInStatement(expr)});
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
@@ -378,23 +359,22 @@ if ((tint_tmp)) {
}
TEST_F(HlslGeneratorImplTest_Binary, Return_WithLogical) {
- // return (a && b) || c;
+ // return (a && b) || c;
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Return(create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"),
- Expr("b")),
- Expr("c")));
- Func("func", {}, ty.bool_(), {WrapInStatement(expr)});
+ auto* expr = Return(create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")),
+ Expr("c")));
+ Func("func", {}, ty.bool_(), {WrapInStatement(expr)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = a;
if (tint_tmp_1) {
tint_tmp_1 = b;
}
@@ -407,25 +387,25 @@ return (tint_tmp);
}
TEST_F(HlslGeneratorImplTest_Binary, Assign_WithLogical) {
- // a = (b || c) && d;
+ // a = (b || c) && d;
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Assign(
- Expr("a"), create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalAnd,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("b"), Expr("c")),
- Expr("d")));
- WrapInFunction(expr);
+ auto* expr =
+ Assign(Expr("a"),
+ create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalAnd,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("c")),
+ Expr("d")));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
if (!tint_tmp_1) {
tint_tmp_1 = c;
}
@@ -438,26 +418,26 @@ a = (tint_tmp);
}
TEST_F(HlslGeneratorImplTest_Binary, Decl_WithLogical) {
- // var a : bool = (b && c) || d;
+ // var a : bool = (b && c) || d;
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
- auto* var = Var("a", ty.bool_(), ast::StorageClass::kNone,
- create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("b"), Expr("c")),
- Expr("d")));
+ auto* var =
+ Var("a", ty.bool_(), ast::StorageClass::kNone,
+ create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalOr,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("b"), Expr("c")),
+ Expr("d")));
- auto* decl = Decl(var);
- WrapInFunction(decl);
+ auto* decl = Decl(var);
+ WrapInFunction(decl);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(decl)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
+ ASSERT_TRUE(gen.EmitStatement(decl)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp_1 = b;
if (tint_tmp_1) {
tint_tmp_1 = c;
}
@@ -470,39 +450,37 @@ bool a = (tint_tmp);
}
TEST_F(HlslGeneratorImplTest_Binary, Call_WithLogical) {
- // foo(a && b, c || d, (a || c) && (b || d))
-
- Func("foo",
- {
- Param(Sym(), ty.bool_()),
- Param(Sym(), ty.bool_()),
- Param(Sym(), ty.bool_()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- Global("b", ty.bool_(), ast::StorageClass::kPrivate);
- Global("c", ty.bool_(), ast::StorageClass::kPrivate);
- Global("d", ty.bool_(), ast::StorageClass::kPrivate);
-
- ast::ExpressionList params;
- params.push_back(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b")));
- params.push_back(create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("c"), Expr("d")));
- params.push_back(create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalAnd,
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"),
- Expr("c")),
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"),
- Expr("d"))));
-
- auto* expr = CallStmt(Call("foo", params));
- WrapInFunction(expr);
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
- EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
+ // foo(a && b, c || d, (a || c) && (b || d))
+
+ Func("foo",
+ {
+ Param(Sym(), ty.bool_()),
+ Param(Sym(), ty.bool_()),
+ Param(Sym(), ty.bool_()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("b", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("c", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("d", ty.bool_(), ast::StorageClass::kPrivate);
+
+ ast::ExpressionList params;
+ params.push_back(
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b")));
+ params.push_back(
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("c"), Expr("d")));
+ params.push_back(create<ast::BinaryExpression>(
+ ast::BinaryOp::kLogicalAnd,
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"), Expr("c")),
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("b"), Expr("d"))));
+
+ auto* expr = CallStmt(Call("foo", params));
+ WrapInFunction(expr);
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.EmitStatement(expr)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(bool tint_tmp = a;
if (tint_tmp) {
tint_tmp = b;
}
@@ -529,188 +507,184 @@ foo((tint_tmp), (tint_tmp_1), (tint_tmp_2));
namespace HlslGeneratorDivMod {
struct Params {
- enum class Type { Div, Mod };
- Type type;
+ enum class Type { Div, Mod };
+ Type type;
};
struct HlslGeneratorDivModTest : TestParamHelper<Params> {
- std::string Token() {
- return GetParam().type == Params::Type::Div ? "/" : "%";
- }
+ std::string Token() { return GetParam().type == Params::Type::Div ? "/" : "%"; }
- template <typename... Args>
- auto Op(Args... args) {
- return GetParam().type == Params::Type::Div
- ? Div(std::forward<Args>(args)...)
- : Mod(std::forward<Args>(args)...);
- }
+ template <typename... Args>
+ auto Op(Args... args) {
+ return GetParam().type == Params::Type::Div ? Div(std::forward<Args>(args)...)
+ : Mod(std::forward<Args>(args)...);
+ }
};
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest,
HlslGeneratorDivModTest,
- testing::Values(Params{Params::Type::Div},
- Params{Params::Type::Mod}));
+ testing::Values(Params{Params::Type::Div}, Params{Params::Type::Mod}));
TEST_P(HlslGeneratorDivModTest, DivOrModByLiteralZero_i32) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.i32())),
- Decl(Const("r", nullptr, Op("a", 0))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.i32())),
+ Decl(Let("r", nullptr, Op("a", 0_i))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn() {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn() {
int a = 0;
const int r = (a )" + Token() +
- R"( 1);
+ R"( 1);
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByLiteralZero_u32) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.u32())),
- Decl(Const("r", nullptr, Op("a", 0u))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.u32())),
+ Decl(Let("r", nullptr, Op("a", 0_u))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn() {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn() {
uint a = 0u;
const uint r = (a )" + Token() +
- R"( 1u);
+ R"( 1u);
}
)");
} // namespace HlslGeneratorDivMod
TEST_P(HlslGeneratorDivModTest, DivOrModByLiteralZero_vec_by_vec_i32) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", nullptr, vec4<i32>(100, 100, 100, 100))),
- Decl(Const("r", nullptr, Op("a", vec4<i32>(50, 0, 25, 0)))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", nullptr, vec4<i32>(100_i, 100_i, 100_i, 100_i))),
+ Decl(Let("r", nullptr, Op("a", vec4<i32>(50_i, 0_i, 25_i, 0_i)))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn() {
- int4 a = int4(100, 100, 100, 100);
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn() {
+ int4 a = (100).xxxx;
const int4 r = (a )" + Token() +
- R"( int4(50, 1, 25, 1));
+ R"( int4(50, 1, 25, 1));
}
)");
} // namespace
TEST_P(HlslGeneratorDivModTest, DivOrModByLiteralZero_vec_by_scalar_i32) {
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", nullptr, vec4<i32>(100, 100, 100, 100))),
- Decl(Const("r", nullptr, Op("a", 0))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", nullptr, vec4<i32>(100_i, 100_i, 100_i, 100_i))),
+ Decl(Let("r", nullptr, Op("a", 0_i))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn() {
- int4 a = int4(100, 100, 100, 100);
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn() {
+ int4 a = (100).xxxx;
const int4 r = (a )" + Token() +
- R"( 1);
+ R"( 1);
}
)");
} // namespace hlsl
TEST_P(HlslGeneratorDivModTest, DivOrModByIdentifier_i32) {
- Func("fn", {Param("b", ty.i32())}, ty.void_(),
- {
- Decl(Var("a", ty.i32())),
- Decl(Const("r", nullptr, Op("a", "b"))),
- });
+ Func("fn", {Param("b", ty.i32())}, ty.void_(),
+ {
+ Decl(Var("a", ty.i32())),
+ Decl(Let("r", nullptr, Op("a", "b"))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn(int b) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn(int b) {
int a = 0;
const int r = (a )" + Token() +
- R"( (b == 0 ? 1 : b));
+ R"( (b == 0 ? 1 : b));
}
)");
} // namespace writer
TEST_P(HlslGeneratorDivModTest, DivOrModByIdentifier_u32) {
- Func("fn", {Param("b", ty.u32())}, ty.void_(),
- {
- Decl(Var("a", ty.u32())),
- Decl(Const("r", nullptr, Op("a", "b"))),
- });
+ Func("fn", {Param("b", ty.u32())}, ty.void_(),
+ {
+ Decl(Var("a", ty.u32())),
+ Decl(Let("r", nullptr, Op("a", "b"))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn(uint b) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn(uint b) {
uint a = 0u;
const uint r = (a )" + Token() +
- R"( (b == 0u ? 1u : b));
+ R"( (b == 0u ? 1u : b));
}
)");
} // namespace tint
TEST_P(HlslGeneratorDivModTest, DivOrModByIdentifier_vec_by_vec_i32) {
- Func("fn", {Param("b", ty.vec3<i32>())}, ty.void_(),
- {
- Decl(Var("a", ty.vec3<i32>())),
- Decl(Const("r", nullptr, Op("a", "b"))),
- });
+ Func("fn", {Param("b", ty.vec3<i32>())}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec3<i32>())),
+ Decl(Let("r", nullptr, Op("a", "b"))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn(int3 b) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn(int3 b) {
int3 a = int3(0, 0, 0);
const int3 r = (a )" + Token() +
- R"( (b == int3(0, 0, 0) ? int3(1, 1, 1) : b));
+ R"( (b == int3(0, 0, 0) ? int3(1, 1, 1) : b));
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByIdentifier_vec_by_scalar_i32) {
- Func("fn", {Param("b", ty.i32())}, ty.void_(),
- {
- Decl(Var("a", ty.vec3<i32>())),
- Decl(Const("r", nullptr, Op("a", "b"))),
- });
+ Func("fn", {Param("b", ty.i32())}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec3<i32>())),
+ Decl(Let("r", nullptr, Op("a", "b"))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(void fn(int b) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(void fn(int b) {
int3 a = int3(0, 0, 0);
const int3 r = (a )" + Token() +
- R"( (b == 0 ? 1 : b));
+ R"( (b == 0 ? 1 : b));
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByExpression_i32) {
- Func("zero", {}, ty.i32(),
- {
- Return(Expr(0)),
- });
+ Func("zero", {}, ty.i32(),
+ {
+ Return(Expr(0_i)),
+ });
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.i32())),
- Decl(Const("r", nullptr, Op("a", Call("zero")))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.i32())),
+ Decl(Let("r", nullptr, Op("a", Call("zero")))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(int value_or_one_if_zero_int(int value) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(int value_or_one_if_zero_int(int value) {
return value == 0 ? 1 : value;
}
@@ -721,27 +695,27 @@ int zero() {
void fn() {
int a = 0;
const int r = (a )" + Token() +
- R"( value_or_one_if_zero_int(zero()));
+ R"( value_or_one_if_zero_int(zero()));
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByExpression_u32) {
- Func("zero", {}, ty.u32(),
- {
- Return(Expr(0u)),
- });
+ Func("zero", {}, ty.u32(),
+ {
+ Return(Expr(0_u)),
+ });
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.u32())),
- Decl(Const("r", nullptr, Op("a", Call("zero")))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.u32())),
+ Decl(Let("r", nullptr, Op("a", Call("zero")))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(uint value_or_one_if_zero_uint(uint value) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(uint value_or_one_if_zero_uint(uint value) {
return value == 0u ? 1u : value;
}
@@ -752,58 +726,58 @@ uint zero() {
void fn() {
uint a = 0u;
const uint r = (a )" + Token() +
- R"( value_or_one_if_zero_uint(zero()));
+ R"( value_or_one_if_zero_uint(zero()));
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByExpression_vec_by_vec_i32) {
- Func("zero", {}, ty.vec3<i32>(),
- {
- Return(vec3<i32>(0, 0, 0)),
- });
+ Func("zero", {}, ty.vec3<i32>(),
+ {
+ Return(vec3<i32>(0_i, 0_i, 0_i)),
+ });
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.vec3<i32>())),
- Decl(Const("r", nullptr, Op("a", Call("zero")))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec3<i32>())),
+ Decl(Let("r", nullptr, Op("a", Call("zero")))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(int3 value_or_one_if_zero_int3(int3 value) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(int3 value_or_one_if_zero_int3(int3 value) {
return value == int3(0, 0, 0) ? int3(1, 1, 1) : value;
}
int3 zero() {
- return int3(0, 0, 0);
+ return (0).xxx;
}
void fn() {
int3 a = int3(0, 0, 0);
const int3 r = (a )" + Token() +
- R"( value_or_one_if_zero_int3(zero()));
+ R"( value_or_one_if_zero_int3(zero()));
}
)");
}
TEST_P(HlslGeneratorDivModTest, DivOrModByExpression_vec_by_scalar_i32) {
- Func("zero", {}, ty.i32(),
- {
- Return(0),
- });
+ Func("zero", {}, ty.i32(),
+ {
+ Return(0_i),
+ });
- Func("fn", {}, ty.void_(),
- {
- Decl(Var("a", ty.vec3<i32>())),
- Decl(Const("r", nullptr, Op("a", Call("zero")))),
- });
+ Func("fn", {}, ty.void_(),
+ {
+ Decl(Var("a", ty.vec3<i32>())),
+ Decl(Let("r", nullptr, Op("a", Call("zero")))),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate());
- EXPECT_EQ(gen.result(), R"(int value_or_one_if_zero_int(int value) {
+ ASSERT_TRUE(gen.Generate());
+ EXPECT_EQ(gen.result(), R"(int value_or_one_if_zero_int(int value) {
return value == 0 ? 1 : value;
}
@@ -814,7 +788,7 @@ int zero() {
void fn() {
int3 a = int3(0, 0, 0);
const int3 r = (a )" + Token() +
- R"( value_or_one_if_zero_int(zero()));
+ R"( value_or_one_if_zero_int(zero()));
}
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_bitcast_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_bitcast_test.cc
index 19c7ed78fb1..8305d641356 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_bitcast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_bitcast_test.cc
@@ -14,42 +14,44 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Bitcast = TestHelper;
TEST_F(HlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Float) {
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "asfloat(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "asfloat(1)");
}
TEST_F(HlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Int) {
- auto* bitcast = create<ast::BitcastExpression>(ty.i32(), Expr(1u));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.i32(), Expr(1_u));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "asint(1u)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "asint(1u)");
}
TEST_F(HlslGeneratorImplTest_Bitcast, EmitExpression_Bitcast_Uint) {
- auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "asuint(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "asuint(1)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_block_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_block_test.cc
index 2f6cd13fd71..9a6cada22b3 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_block_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_block_test.cc
@@ -20,15 +20,15 @@ namespace {
using HlslGeneratorImplTest_Block = TestHelper;
TEST_F(HlslGeneratorImplTest_Block, Emit_Block) {
- auto* b = Block(create<ast::DiscardStatement>());
- WrapInFunction(b);
+ auto* b = Block(create<ast::DiscardStatement>());
+ WrapInFunction(b);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
discard;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_break_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_break_test.cc
index 4a2c0385985..4e4ecf1e99d 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_break_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_break_test.cc
@@ -20,15 +20,15 @@ namespace {
using HlslGeneratorImplTest_Break = TestHelper;
TEST_F(HlslGeneratorImplTest_Break, Emit_Break) {
- auto* b = create<ast::BreakStatement>();
- WrapInFunction(Loop(Block(b)));
+ auto* b = create<ast::BreakStatement>();
+ WrapInFunction(Loop(Block(b)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), " break;\n");
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), " break;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_test.cc
index 95d59afa9dd..b356ccd21e7 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_test.cc
@@ -18,275 +18,274 @@
#include "src/tint/sem/call.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using BuiltinType = sem::BuiltinType;
-
-using ::testing::HasSubstr;
-
using HlslGeneratorImplTest_Builtin = TestHelper;
enum class ParamType {
- kF32,
- kU32,
- kBool,
+ kF32,
+ kU32,
+ kBool,
};
struct BuiltinData {
- BuiltinType builtin;
- ParamType type;
- const char* hlsl_name;
+ BuiltinType builtin;
+ ParamType type;
+ const char* hlsl_name;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.hlsl_name;
- switch (data.type) {
- case ParamType::kF32:
- out << "f32";
- break;
- case ParamType::kU32:
- out << "u32";
- break;
- case ParamType::kBool:
- out << "bool";
- break;
- }
- out << ">";
- return out;
+ out << data.hlsl_name;
+ switch (data.type) {
+ case ParamType::kF32:
+ out << "f32";
+ break;
+ case ParamType::kU32:
+ out << "u32";
+ break;
+ case ParamType::kBool:
+ out << "bool";
+ break;
+ }
+ out << ">";
+ return out;
}
const ast::CallExpression* GenerateCall(BuiltinType builtin,
ParamType type,
ProgramBuilder* builder) {
- std::string name;
- std::ostringstream str(name);
- str << builtin;
- switch (builtin) {
- case BuiltinType::kAcos:
- case BuiltinType::kAsin:
- case BuiltinType::kAtan:
- case BuiltinType::kCeil:
- case BuiltinType::kCos:
- case BuiltinType::kCosh:
- case BuiltinType::kDpdx:
- case BuiltinType::kDpdxCoarse:
- case BuiltinType::kDpdxFine:
- case BuiltinType::kDpdy:
- case BuiltinType::kDpdyCoarse:
- case BuiltinType::kDpdyFine:
- case BuiltinType::kExp:
- case BuiltinType::kExp2:
- case BuiltinType::kFloor:
- case BuiltinType::kFract:
- case BuiltinType::kFwidth:
- case BuiltinType::kFwidthCoarse:
- case BuiltinType::kFwidthFine:
- case BuiltinType::kInverseSqrt:
- case BuiltinType::kLength:
- case BuiltinType::kLog:
- case BuiltinType::kLog2:
- case BuiltinType::kNormalize:
- case BuiltinType::kRound:
- case BuiltinType::kSin:
- case BuiltinType::kSinh:
- case BuiltinType::kSqrt:
- case BuiltinType::kTan:
- case BuiltinType::kTanh:
- case BuiltinType::kTrunc:
- case BuiltinType::kSign:
- return builder->Call(str.str(), "f2");
- case BuiltinType::kLdexp:
- return builder->Call(str.str(), "f2", "i2");
- case BuiltinType::kAtan2:
- case BuiltinType::kDot:
- case BuiltinType::kDistance:
- case BuiltinType::kPow:
- case BuiltinType::kReflect:
- case BuiltinType::kStep:
- return builder->Call(str.str(), "f2", "f2");
- case BuiltinType::kCross:
- return builder->Call(str.str(), "f3", "f3");
- case BuiltinType::kFma:
- case BuiltinType::kMix:
- case BuiltinType::kFaceForward:
- case BuiltinType::kSmoothstep:
- case BuiltinType::kSmoothStep:
- return builder->Call(str.str(), "f2", "f2", "f2");
- case BuiltinType::kAll:
- case BuiltinType::kAny:
- return builder->Call(str.str(), "b2");
- case BuiltinType::kAbs:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2");
- } else {
- return builder->Call(str.str(), "u2");
- }
- case BuiltinType::kCountOneBits:
- case BuiltinType::kReverseBits:
- return builder->Call(str.str(), "u2");
- case BuiltinType::kMax:
- case BuiltinType::kMin:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2");
- }
- case BuiltinType::kClamp:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2", "u2");
- }
- case BuiltinType::kSelect:
- return builder->Call(str.str(), "f2", "f2", "b2");
- case BuiltinType::kDeterminant:
- return builder->Call(str.str(), "m2x2");
- case BuiltinType::kTranspose:
- return builder->Call(str.str(), "m3x2");
- default:
- break;
- }
- return nullptr;
+ std::string name;
+ std::ostringstream str(name);
+ str << builtin;
+ switch (builtin) {
+ case BuiltinType::kAcos:
+ case BuiltinType::kAsin:
+ case BuiltinType::kAtan:
+ case BuiltinType::kCeil:
+ case BuiltinType::kCos:
+ case BuiltinType::kCosh:
+ case BuiltinType::kDpdx:
+ case BuiltinType::kDpdxCoarse:
+ case BuiltinType::kDpdxFine:
+ case BuiltinType::kDpdy:
+ case BuiltinType::kDpdyCoarse:
+ case BuiltinType::kDpdyFine:
+ case BuiltinType::kExp:
+ case BuiltinType::kExp2:
+ case BuiltinType::kFloor:
+ case BuiltinType::kFract:
+ case BuiltinType::kFwidth:
+ case BuiltinType::kFwidthCoarse:
+ case BuiltinType::kFwidthFine:
+ case BuiltinType::kInverseSqrt:
+ case BuiltinType::kLength:
+ case BuiltinType::kLog:
+ case BuiltinType::kLog2:
+ case BuiltinType::kNormalize:
+ case BuiltinType::kRound:
+ case BuiltinType::kSin:
+ case BuiltinType::kSinh:
+ case BuiltinType::kSqrt:
+ case BuiltinType::kTan:
+ case BuiltinType::kTanh:
+ case BuiltinType::kTrunc:
+ case BuiltinType::kSign:
+ return builder->Call(str.str(), "f2");
+ case BuiltinType::kLdexp:
+ return builder->Call(str.str(), "f2", "i2");
+ case BuiltinType::kAtan2:
+ case BuiltinType::kDot:
+ case BuiltinType::kDistance:
+ case BuiltinType::kPow:
+ case BuiltinType::kReflect:
+ case BuiltinType::kStep:
+ return builder->Call(str.str(), "f2", "f2");
+ case BuiltinType::kCross:
+ return builder->Call(str.str(), "f3", "f3");
+ case BuiltinType::kFma:
+ case BuiltinType::kMix:
+ case BuiltinType::kFaceForward:
+ case BuiltinType::kSmoothstep:
+ case BuiltinType::kSmoothStep:
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ case BuiltinType::kAll:
+ case BuiltinType::kAny:
+ return builder->Call(str.str(), "b2");
+ case BuiltinType::kAbs:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2");
+ } else {
+ return builder->Call(str.str(), "u2");
+ }
+ case BuiltinType::kCountOneBits:
+ case BuiltinType::kReverseBits:
+ return builder->Call(str.str(), "u2");
+ case BuiltinType::kMax:
+ case BuiltinType::kMin:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2");
+ }
+ case BuiltinType::kClamp:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2", "u2");
+ }
+ case BuiltinType::kSelect:
+ return builder->Call(str.str(), "f2", "f2", "b2");
+ case BuiltinType::kDeterminant:
+ return builder->Call(str.str(), "m2x2");
+ case BuiltinType::kTranspose:
+ return builder->Call(str.str(), "m3x2");
+ default:
+ break;
+ }
+ return nullptr;
}
using HlslBuiltinTest = TestParamHelper<BuiltinData>;
TEST_P(HlslBuiltinTest, Emit) {
- auto param = GetParam();
+ auto param = GetParam();
- Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
- Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
- Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
- Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
+ Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
+ Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
+ Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
+ Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
- auto* call = GenerateCall(param.builtin, param.type, this);
- ASSERT_NE(nullptr, call) << "Unhandled builtin";
- Func("func", {}, ty.void_(), {CallStmt(call)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ auto* call = GenerateCall(param.builtin, param.type, this);
+ ASSERT_NE(nullptr, call) << "Unhandled builtin";
+ Func("func", {}, ty.void_(), {CallStmt(call)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- auto* sem = program->Sem().Get(call);
- ASSERT_NE(sem, nullptr);
- auto* target = sem->Target();
- ASSERT_NE(target, nullptr);
- auto* builtin = target->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
+ auto* sem = program->Sem().Get<sem::Call>(call);
+ ASSERT_NE(sem, nullptr);
+ auto* target = sem->Target();
+ ASSERT_NE(target, nullptr);
+ auto* builtin = target->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
- EXPECT_EQ(gen.generate_builtin_name(builtin), param.hlsl_name);
+ EXPECT_EQ(gen.generate_builtin_name(builtin), param.hlsl_name);
}
INSTANTIATE_TEST_SUITE_P(
HlslGeneratorImplTest_Builtin,
HlslBuiltinTest,
- testing::Values(
- BuiltinData{BuiltinType::kAbs, ParamType::kF32, "abs"},
- BuiltinData{BuiltinType::kAbs, ParamType::kU32, "abs"},
- BuiltinData{BuiltinType::kAcos, ParamType::kF32, "acos"},
- BuiltinData{BuiltinType::kAll, ParamType::kBool, "all"},
- BuiltinData{BuiltinType::kAny, ParamType::kBool, "any"},
- BuiltinData{BuiltinType::kAsin, ParamType::kF32, "asin"},
- BuiltinData{BuiltinType::kAtan, ParamType::kF32, "atan"},
- BuiltinData{BuiltinType::kAtan2, ParamType::kF32, "atan2"},
- BuiltinData{BuiltinType::kCeil, ParamType::kF32, "ceil"},
- BuiltinData{BuiltinType::kClamp, ParamType::kF32, "clamp"},
- BuiltinData{BuiltinType::kClamp, ParamType::kU32, "clamp"},
- BuiltinData{BuiltinType::kCos, ParamType::kF32, "cos"},
- BuiltinData{BuiltinType::kCosh, ParamType::kF32, "cosh"},
- BuiltinData{BuiltinType::kCountOneBits, ParamType::kU32, "countbits"},
- BuiltinData{BuiltinType::kCross, ParamType::kF32, "cross"},
- BuiltinData{BuiltinType::kDeterminant, ParamType::kF32, "determinant"},
- BuiltinData{BuiltinType::kDistance, ParamType::kF32, "distance"},
- BuiltinData{BuiltinType::kDot, ParamType::kF32, "dot"},
- BuiltinData{BuiltinType::kDpdx, ParamType::kF32, "ddx"},
- BuiltinData{BuiltinType::kDpdxCoarse, ParamType::kF32, "ddx_coarse"},
- BuiltinData{BuiltinType::kDpdxFine, ParamType::kF32, "ddx_fine"},
- BuiltinData{BuiltinType::kDpdy, ParamType::kF32, "ddy"},
- BuiltinData{BuiltinType::kDpdyCoarse, ParamType::kF32, "ddy_coarse"},
- BuiltinData{BuiltinType::kDpdyFine, ParamType::kF32, "ddy_fine"},
- BuiltinData{BuiltinType::kExp, ParamType::kF32, "exp"},
- BuiltinData{BuiltinType::kExp2, ParamType::kF32, "exp2"},
- BuiltinData{BuiltinType::kFaceForward, ParamType::kF32, "faceforward"},
- BuiltinData{BuiltinType::kFloor, ParamType::kF32, "floor"},
- BuiltinData{BuiltinType::kFma, ParamType::kF32, "mad"},
- BuiltinData{BuiltinType::kFract, ParamType::kF32, "frac"},
- BuiltinData{BuiltinType::kFwidth, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kFwidthCoarse, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kFwidthFine, ParamType::kF32, "fwidth"},
- BuiltinData{BuiltinType::kInverseSqrt, ParamType::kF32, "rsqrt"},
- BuiltinData{BuiltinType::kLdexp, ParamType::kF32, "ldexp"},
- BuiltinData{BuiltinType::kLength, ParamType::kF32, "length"},
- BuiltinData{BuiltinType::kLog, ParamType::kF32, "log"},
- BuiltinData{BuiltinType::kLog2, ParamType::kF32, "log2"},
- BuiltinData{BuiltinType::kMax, ParamType::kF32, "max"},
- BuiltinData{BuiltinType::kMax, ParamType::kU32, "max"},
- BuiltinData{BuiltinType::kMin, ParamType::kF32, "min"},
- BuiltinData{BuiltinType::kMin, ParamType::kU32, "min"},
- BuiltinData{BuiltinType::kMix, ParamType::kF32, "lerp"},
- BuiltinData{BuiltinType::kNormalize, ParamType::kF32, "normalize"},
- BuiltinData{BuiltinType::kPow, ParamType::kF32, "pow"},
- BuiltinData{BuiltinType::kReflect, ParamType::kF32, "reflect"},
- BuiltinData{BuiltinType::kReverseBits, ParamType::kU32, "reversebits"},
- BuiltinData{BuiltinType::kRound, ParamType::kU32, "round"},
- BuiltinData{BuiltinType::kSign, ParamType::kF32, "sign"},
- BuiltinData{BuiltinType::kSin, ParamType::kF32, "sin"},
- BuiltinData{BuiltinType::kSinh, ParamType::kF32, "sinh"},
- BuiltinData{BuiltinType::kSmoothstep, ParamType::kF32, "smoothstep"},
- BuiltinData{BuiltinType::kSmoothStep, ParamType::kF32, "smoothstep"},
- BuiltinData{BuiltinType::kSqrt, ParamType::kF32, "sqrt"},
- BuiltinData{BuiltinType::kStep, ParamType::kF32, "step"},
- BuiltinData{BuiltinType::kTan, ParamType::kF32, "tan"},
- BuiltinData{BuiltinType::kTanh, ParamType::kF32, "tanh"},
- BuiltinData{BuiltinType::kTranspose, ParamType::kF32, "transpose"},
- BuiltinData{BuiltinType::kTrunc, ParamType::kF32, "trunc"}));
+ testing::Values(BuiltinData{BuiltinType::kAbs, ParamType::kF32, "abs"},
+ BuiltinData{BuiltinType::kAbs, ParamType::kU32, "abs"},
+ BuiltinData{BuiltinType::kAcos, ParamType::kF32, "acos"},
+ BuiltinData{BuiltinType::kAll, ParamType::kBool, "all"},
+ BuiltinData{BuiltinType::kAny, ParamType::kBool, "any"},
+ BuiltinData{BuiltinType::kAsin, ParamType::kF32, "asin"},
+ BuiltinData{BuiltinType::kAtan, ParamType::kF32, "atan"},
+ BuiltinData{BuiltinType::kAtan2, ParamType::kF32, "atan2"},
+ BuiltinData{BuiltinType::kCeil, ParamType::kF32, "ceil"},
+ BuiltinData{BuiltinType::kClamp, ParamType::kF32, "clamp"},
+ BuiltinData{BuiltinType::kClamp, ParamType::kU32, "clamp"},
+ BuiltinData{BuiltinType::kCos, ParamType::kF32, "cos"},
+ BuiltinData{BuiltinType::kCosh, ParamType::kF32, "cosh"},
+ BuiltinData{BuiltinType::kCountOneBits, ParamType::kU32, "countbits"},
+ BuiltinData{BuiltinType::kCross, ParamType::kF32, "cross"},
+ BuiltinData{BuiltinType::kDeterminant, ParamType::kF32, "determinant"},
+ BuiltinData{BuiltinType::kDistance, ParamType::kF32, "distance"},
+ BuiltinData{BuiltinType::kDot, ParamType::kF32, "dot"},
+ BuiltinData{BuiltinType::kDpdx, ParamType::kF32, "ddx"},
+ BuiltinData{BuiltinType::kDpdxCoarse, ParamType::kF32, "ddx_coarse"},
+ BuiltinData{BuiltinType::kDpdxFine, ParamType::kF32, "ddx_fine"},
+ BuiltinData{BuiltinType::kDpdy, ParamType::kF32, "ddy"},
+ BuiltinData{BuiltinType::kDpdyCoarse, ParamType::kF32, "ddy_coarse"},
+ BuiltinData{BuiltinType::kDpdyFine, ParamType::kF32, "ddy_fine"},
+ BuiltinData{BuiltinType::kExp, ParamType::kF32, "exp"},
+ BuiltinData{BuiltinType::kExp2, ParamType::kF32, "exp2"},
+ BuiltinData{BuiltinType::kFaceForward, ParamType::kF32, "faceforward"},
+ BuiltinData{BuiltinType::kFloor, ParamType::kF32, "floor"},
+ BuiltinData{BuiltinType::kFma, ParamType::kF32, "mad"},
+ BuiltinData{BuiltinType::kFract, ParamType::kF32, "frac"},
+ BuiltinData{BuiltinType::kFwidth, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kFwidthCoarse, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kFwidthFine, ParamType::kF32, "fwidth"},
+ BuiltinData{BuiltinType::kInverseSqrt, ParamType::kF32, "rsqrt"},
+ BuiltinData{BuiltinType::kLdexp, ParamType::kF32, "ldexp"},
+ BuiltinData{BuiltinType::kLength, ParamType::kF32, "length"},
+ BuiltinData{BuiltinType::kLog, ParamType::kF32, "log"},
+ BuiltinData{BuiltinType::kLog2, ParamType::kF32, "log2"},
+ BuiltinData{BuiltinType::kMax, ParamType::kF32, "max"},
+ BuiltinData{BuiltinType::kMax, ParamType::kU32, "max"},
+ BuiltinData{BuiltinType::kMin, ParamType::kF32, "min"},
+ BuiltinData{BuiltinType::kMin, ParamType::kU32, "min"},
+ BuiltinData{BuiltinType::kMix, ParamType::kF32, "lerp"},
+ BuiltinData{BuiltinType::kNormalize, ParamType::kF32, "normalize"},
+ BuiltinData{BuiltinType::kPow, ParamType::kF32, "pow"},
+ BuiltinData{BuiltinType::kReflect, ParamType::kF32, "reflect"},
+ BuiltinData{BuiltinType::kReverseBits, ParamType::kU32, "reversebits"},
+ BuiltinData{BuiltinType::kRound, ParamType::kU32, "round"},
+ BuiltinData{BuiltinType::kSign, ParamType::kF32, "sign"},
+ BuiltinData{BuiltinType::kSin, ParamType::kF32, "sin"},
+ BuiltinData{BuiltinType::kSinh, ParamType::kF32, "sinh"},
+ BuiltinData{BuiltinType::kSmoothstep, ParamType::kF32, "smoothstep"},
+ BuiltinData{BuiltinType::kSmoothStep, ParamType::kF32, "smoothstep"},
+ BuiltinData{BuiltinType::kSqrt, ParamType::kF32, "sqrt"},
+ BuiltinData{BuiltinType::kStep, ParamType::kF32, "step"},
+ BuiltinData{BuiltinType::kTan, ParamType::kF32, "tan"},
+ BuiltinData{BuiltinType::kTanh, ParamType::kF32, "tanh"},
+ BuiltinData{BuiltinType::kTranspose, ParamType::kF32, "transpose"},
+ BuiltinData{BuiltinType::kTrunc, ParamType::kF32, "trunc"}));
TEST_F(HlslGeneratorImplTest_Builtin, Builtin_Call) {
- auto* call = Call("dot", "param1", "param2");
+ auto* call = Call("dot", "param1", "param2");
- Global("param1", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("param2", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("param1", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("param2", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "dot(param1, param2)");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "dot(param1, param2)");
}
TEST_F(HlslGeneratorImplTest_Builtin, Select_Scalar) {
- auto* call = Call("select", 1.0f, 2.0f, true);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("select", 1_f, 2_f, true);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "(true ? 2.0f : 1.0f)");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "(true ? 2.0f : 1.0f)");
}
TEST_F(HlslGeneratorImplTest_Builtin, Select_Vector) {
- auto* call =
- Call("select", vec2<i32>(1, 2), vec2<i32>(3, 4), vec2<bool>(true, false));
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("select", vec2<i32>(1_i, 2_i), vec2<i32>(3_i, 4_i), vec2<bool>(true, false));
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "(bool2(true, false) ? int2(3, 4) : int2(1, 2))");
+ gen.increment_indent();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "(bool2(true, false) ? int2(3, 4) : int2(1, 2))");
}
TEST_F(HlslGeneratorImplTest_Builtin, Modf_Scalar) {
- auto* call = Call("modf", 1.0f);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("modf", 1_f);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct modf_result {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct modf_result {
float fract;
float whole;
};
@@ -306,13 +305,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Modf_Vector) {
- auto* call = Call("modf", vec3<f32>());
- WrapInFunction(CallStmt(call));
+ auto* call = Call("modf", vec3<f32>());
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct modf_result_vec3 {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct modf_result_vec3 {
float3 fract;
float3 whole;
};
@@ -325,20 +324,20 @@ modf_result_vec3 tint_modf(float3 param_0) {
[numthreads(1, 1, 1)]
void test_function() {
- tint_modf(float3(0.0f, 0.0f, 0.0f));
+ tint_modf((0.0f).xxx);
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Builtin, Frexp_Scalar_i32) {
- auto* call = Call("frexp", 1.0f);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("frexp", 1_f);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct frexp_result {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct frexp_result {
float sig;
int exp;
};
@@ -358,13 +357,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Frexp_Vector_i32) {
- auto* call = Call("frexp", vec3<f32>());
- WrapInFunction(CallStmt(call));
+ auto* call = Call("frexp", vec3<f32>());
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct frexp_result_vec3 {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct frexp_result_vec3 {
float3 sig;
int3 exp;
};
@@ -377,21 +376,21 @@ frexp_result_vec3 tint_frexp(float3 param_0) {
[numthreads(1, 1, 1)]
void test_function() {
- tint_frexp(float3(0.0f, 0.0f, 0.0f));
+ tint_frexp((0.0f).xxx);
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Builtin, Degrees_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float tint_degrees(float param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float tint_degrees(float param_0) {
return param_0 * 57.295779513082322865;
}
@@ -405,14 +404,14 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Degrees_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float3 tint_degrees(float3 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float3 tint_degrees(float3 param_0) {
return param_0 * 57.295779513082322865;
}
@@ -426,14 +425,14 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Radians_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float tint_radians(float param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float tint_radians(float param_0) {
return param_0 * 0.017453292519943295474;
}
@@ -447,14 +446,14 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Radians_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float3 tint_radians(float3 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float3 tint_radians(float3 param_0) {
return param_0 * 0.017453292519943295474;
}
@@ -468,13 +467,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Pack4x8Snorm) {
- auto* call = Call("pack4x8snorm", "p1");
- Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack4x8snorm", "p1");
+ Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(uint tint_pack4x8snorm(float4 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_pack4x8snorm(float4 param_0) {
int4 i = int4(round(clamp(param_0, -1.0, 1.0) * 127.0)) & 0xff;
return asuint(i.x | i.y << 8 | i.z << 16 | i.w << 24);
}
@@ -490,13 +489,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Pack4x8Unorm) {
- auto* call = Call("pack4x8unorm", "p1");
- Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack4x8unorm", "p1");
+ Global("p1", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(uint tint_pack4x8unorm(float4 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_pack4x8unorm(float4 param_0) {
uint4 i = uint4(round(clamp(param_0, 0.0, 1.0) * 255.0));
return (i.x | i.y << 8 | i.z << 16 | i.w << 24);
}
@@ -512,13 +511,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Pack2x16Snorm) {
- auto* call = Call("pack2x16snorm", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16snorm", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(uint tint_pack2x16snorm(float2 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_pack2x16snorm(float2 param_0) {
int2 i = int2(round(clamp(param_0, -1.0, 1.0) * 32767.0)) & 0xffff;
return asuint(i.x | i.y << 16);
}
@@ -534,13 +533,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Pack2x16Unorm) {
- auto* call = Call("pack2x16unorm", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16unorm", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(uint tint_pack2x16unorm(float2 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_pack2x16unorm(float2 param_0) {
uint2 i = uint2(round(clamp(param_0, 0.0, 1.0) * 65535.0));
return (i.x | i.y << 16);
}
@@ -556,13 +555,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Pack2x16Float) {
- auto* call = Call("pack2x16float", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("pack2x16float", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(uint tint_pack2x16float(float2 param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_pack2x16float(float2 param_0) {
uint2 i = f32tof16(param_0);
return i.x | (i.y << 16);
}
@@ -578,13 +577,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Unpack4x8Snorm) {
- auto* call = Call("unpack4x8snorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack4x8snorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float4 tint_unpack4x8snorm(uint param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float4 tint_unpack4x8snorm(uint param_0) {
int j = int(param_0);
int4 i = int4(j << 24, j << 16, j << 8, j) >> 24;
return clamp(float4(i) / 127.0, -1.0, 1.0);
@@ -601,13 +600,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Unpack4x8Unorm) {
- auto* call = Call("unpack4x8unorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack4x8unorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float4 tint_unpack4x8unorm(uint param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float4 tint_unpack4x8unorm(uint param_0) {
uint j = param_0;
uint4 i = uint4(j & 0xff, (j >> 8) & 0xff, (j >> 16) & 0xff, j >> 24);
return float4(i) / 255.0;
@@ -624,13 +623,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Unpack2x16Snorm) {
- auto* call = Call("unpack2x16snorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16snorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16snorm(uint param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16snorm(uint param_0) {
int j = int(param_0);
int2 i = int2(j << 16, j) >> 16;
return clamp(float2(i) / 32767.0, -1.0, 1.0);
@@ -647,13 +646,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Unpack2x16Unorm) {
- auto* call = Call("unpack2x16unorm", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16unorm", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16unorm(uint param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16unorm(uint param_0) {
uint j = param_0;
uint2 i = uint2(j & 0xffff, j >> 16);
return float2(i) / 65535.0;
@@ -670,13 +669,13 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, Unpack2x16Float) {
- auto* call = Call("unpack2x16float", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ auto* call = Call("unpack2x16float", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16float(uint param_0) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float2 tint_unpack2x16float(uint param_0) {
uint i = param_0;
return f16tof32(uint2(i & 0xffff, i >> 16));
}
@@ -692,16 +691,16 @@ void test_function() {
}
TEST_F(HlslGeneratorImplTest_Builtin, StorageBarrier) {
- Func("main", {}, ty.void_(), {CallStmt(Call("storageBarrier"))},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("main", {}, ty.void_(), {CallStmt(Call("storageBarrier"))},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
void main() {
DeviceMemoryBarrierWithGroupSync();
return;
@@ -710,16 +709,16 @@ void main() {
}
TEST_F(HlslGeneratorImplTest_Builtin, WorkgroupBarrier) {
- Func("main", {}, ty.void_(), {CallStmt(Call("workgroupBarrier"))},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("main", {}, ty.void_(), {CallStmt(Call("workgroupBarrier"))},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
void main() {
GroupMemoryBarrierWithGroupSync();
return;
@@ -727,5 +726,91 @@ void main() {
)");
}
+TEST_F(HlslGeneratorImplTest_Builtin, Dot4I8Packed) {
+ Enable(ast::Extension::kChromiumExperimentalDP4a);
+
+ auto* val1 = Var("val1", ty.u32());
+ auto* val2 = Var("val2", ty.u32());
+ auto* call = Call("dot4I8Packed", val1, val2);
+ WrapInFunction(val1, val2, call);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(int tint_dot4I8Packed(uint param_0, uint param_1) {
+ int accumulator = 0;
+ return dot4add_i8packed(param_0, param_1, accumulator);
+}
+
+[numthreads(1, 1, 1)]
+void test_function() {
+ uint val1 = 0u;
+ uint val2 = 0u;
+ const int tint_symbol = tint_dot4I8Packed(val1, val2);
+ return;
+}
+)");
+}
+
+TEST_F(HlslGeneratorImplTest_Builtin, Dot4U8Packed) {
+ Enable(ast::Extension::kChromiumExperimentalDP4a);
+
+ auto* val1 = Var("val1", ty.u32());
+ auto* val2 = Var("val2", ty.u32());
+ auto* call = Call("dot4U8Packed", val1, val2);
+ WrapInFunction(val1, val2, call);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(uint tint_dot4U8Packed(uint param_0, uint param_1) {
+ uint accumulator = 0u;
+ return dot4add_u8packed(param_0, param_1, accumulator);
+}
+
+[numthreads(1, 1, 1)]
+void test_function() {
+ uint val1 = 0u;
+ uint val2 = 0u;
+ const uint tint_symbol = tint_dot4U8Packed(val1, val2);
+ return;
+}
+)");
+}
+
+TEST_F(HlslGeneratorImplTest_Builtin, CountOneBits) {
+ auto* val = Var("val1", ty.i32());
+ auto* call = Call("countOneBits", val);
+ WrapInFunction(val, call);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
+void test_function() {
+ int val1 = 0;
+ const int tint_symbol = asint(countbits(asuint(val1)));
+ return;
+}
+)");
+}
+
+TEST_F(HlslGeneratorImplTest_Builtin, ReverseBits) {
+ auto* val = Var("val1", ty.i32());
+ auto* call = Call("reverseBits", val);
+ WrapInFunction(val, call);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
+void test_function() {
+ int val1 = 0;
+ const int tint_symbol = asint(reversebits(asuint(val1)));
+ return;
+}
+)");
+}
+
} // namespace
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_texture_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_texture_test.cc
index f11dd908798..371cc1df611 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_builtin_texture_test.cc
@@ -24,369 +24,367 @@ namespace {
using ::testing::HasSubstr;
struct ExpectedResult {
- ExpectedResult(const char* o) : out(o) {} // NOLINT
- ExpectedResult(const char* p, const char* o) : pre(p), out(o) {}
+ ExpectedResult(const char* o) : out(o) {} // NOLINT
+ ExpectedResult(const char* p, const char* o) : pre(p), out(o) {}
- std::string pre;
- std::string out;
+ std::string pre;
+ std::string out;
};
-ExpectedResult expected_texture_overload(
- ast::builtin::test::ValidTextureOverload overload) {
- using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
- switch (overload) {
- case ValidTextureOverload::kDimensions1d:
- case ValidTextureOverload::kDimensionsStorageWO1d:
- return {
- R"(int tint_tmp;
+ExpectedResult expected_texture_overload(ast::builtin::test::ValidTextureOverload overload) {
+ using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
+ switch (overload) {
+ case ValidTextureOverload::kDimensions1d:
+ case ValidTextureOverload::kDimensionsStorageWO1d:
+ return {
+ R"(int tint_tmp;
tint_symbol.GetDimensions(tint_tmp);
)",
- "tint_tmp;",
- };
- case ValidTextureOverload::kDimensions2d:
- case ValidTextureOverload::kDimensionsDepth2d:
- case ValidTextureOverload::kDimensionsStorageWO2d:
- return {
- R"(int2 tint_tmp;
+ "tint_tmp;",
+ };
+ case ValidTextureOverload::kDimensions2d:
+ case ValidTextureOverload::kDimensionsDepth2d:
+ case ValidTextureOverload::kDimensionsStorageWO2d:
+ return {
+ R"(int2 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y);
)",
- "tint_tmp;",
- };
- case ValidTextureOverload::kDimensionsDepthMultisampled2d:
- case ValidTextureOverload::kDimensionsMultisampled2d:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp;",
+ };
+ case ValidTextureOverload::kDimensionsDepthMultisampled2d:
+ case ValidTextureOverload::kDimensionsMultisampled2d:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.xy;",
- };
+ "tint_tmp.xy;",
+ };
- case ValidTextureOverload::kDimensions2dArray:
- case ValidTextureOverload::kDimensionsDepth2dArray:
- case ValidTextureOverload::kDimensionsStorageWO2dArray:
- return {
- R"(int3 tint_tmp;
+ case ValidTextureOverload::kDimensions2dArray:
+ case ValidTextureOverload::kDimensionsDepth2dArray:
+ case ValidTextureOverload::kDimensionsStorageWO2dArray:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kDimensions3d:
- case ValidTextureOverload::kDimensionsStorageWO3d:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kDimensions3d:
+ case ValidTextureOverload::kDimensionsStorageWO3d:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp;",
- };
- case ValidTextureOverload::kDimensionsCube:
- case ValidTextureOverload::kDimensionsDepthCube:
- return {
- R"(int2 tint_tmp;
+ "tint_tmp;",
+ };
+ case ValidTextureOverload::kDimensionsCube:
+ case ValidTextureOverload::kDimensionsDepthCube:
+ return {
+ R"(int2 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y);
)",
- "tint_tmp;",
- };
- case ValidTextureOverload::kDimensionsCubeArray:
- case ValidTextureOverload::kDimensionsDepthCubeArray:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp;",
+ };
+ case ValidTextureOverload::kDimensionsCubeArray:
+ case ValidTextureOverload::kDimensionsDepthCubeArray:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kDimensions2dLevel:
- case ValidTextureOverload::kDimensionsDepth2dLevel:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kDimensions2dLevel:
+ case ValidTextureOverload::kDimensionsDepth2dLevel:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(1, tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kDimensions2dArrayLevel:
- case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
- return {
- R"(int4 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kDimensions2dArrayLevel:
+ case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
+ return {
+ R"(int4 tint_tmp;
tint_symbol.GetDimensions(1, tint_tmp.x, tint_tmp.y, tint_tmp.z, tint_tmp.w);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kDimensions3dLevel:
- return {
- R"(int4 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kDimensions3dLevel:
+ return {
+ R"(int4 tint_tmp;
tint_symbol.GetDimensions(1, tint_tmp.x, tint_tmp.y, tint_tmp.z, tint_tmp.w);
)",
- "tint_tmp.xyz;",
- };
- case ValidTextureOverload::kDimensionsCubeLevel:
- case ValidTextureOverload::kDimensionsDepthCubeLevel:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.xyz;",
+ };
+ case ValidTextureOverload::kDimensionsCubeLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeLevel:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(1, tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kDimensionsCubeArrayLevel:
- case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
- return {
- R"(int4 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kDimensionsCubeArrayLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
+ return {
+ R"(int4 tint_tmp;
tint_symbol.GetDimensions(1, tint_tmp.x, tint_tmp.y, tint_tmp.z, tint_tmp.w);
)",
- "tint_tmp.xy;",
- };
- case ValidTextureOverload::kGather2dF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float2(1.0f, 2.0f)))";
- case ValidTextureOverload::kGather2dOffsetF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)))";
- case ValidTextureOverload::kGather2dArrayF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, float(3))))";
- case ValidTextureOverload::kGather2dArrayOffsetF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)))";
- case ValidTextureOverload::kGatherCubeF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kGatherCubeArrayF32:
- return R"(tint_symbol.GatherRed(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))))";
- case ValidTextureOverload::kGatherDepth2dF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float2(1.0f, 2.0f)))";
- case ValidTextureOverload::kGatherDepth2dOffsetF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)))";
- case ValidTextureOverload::kGatherDepth2dArrayF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, float(3))))";
- case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)))";
- case ValidTextureOverload::kGatherDepthCubeF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kGatherDepthCubeArrayF32:
- return R"(tint_symbol.Gather(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))))";
- case ValidTextureOverload::kGatherCompareDepth2dF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6)))";
- case ValidTextureOverload::kGatherCompareDepthCubeF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f))";
- case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
- return R"(tint_symbol.GatherCmp(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f))";
- case ValidTextureOverload::kNumLayers2dArray:
- case ValidTextureOverload::kNumLayersDepth2dArray:
- case ValidTextureOverload::kNumLayersCubeArray:
- case ValidTextureOverload::kNumLayersDepthCubeArray:
- case ValidTextureOverload::kNumLayersStorageWO2dArray:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.xy;",
+ };
+ case ValidTextureOverload::kGather2dF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float2(1.0f, 2.0f)))";
+ case ValidTextureOverload::kGather2dOffsetF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)))";
+ case ValidTextureOverload::kGather2dArrayF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, float(3))))";
+ case ValidTextureOverload::kGather2dArrayOffsetF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)))";
+ case ValidTextureOverload::kGatherCubeF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kGatherCubeArrayF32:
+ return R"(tint_symbol.GatherRed(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))))";
+ case ValidTextureOverload::kGatherDepth2dF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float2(1.0f, 2.0f)))";
+ case ValidTextureOverload::kGatherDepth2dOffsetF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)))";
+ case ValidTextureOverload::kGatherDepth2dArrayF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, float(3))))";
+ case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)))";
+ case ValidTextureOverload::kGatherDepthCubeF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kGatherDepthCubeArrayF32:
+ return R"(tint_symbol.Gather(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))))";
+ case ValidTextureOverload::kGatherCompareDepth2dF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6)))";
+ case ValidTextureOverload::kGatherCompareDepthCubeF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
+ return R"(tint_symbol.GatherCmp(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f))";
+ case ValidTextureOverload::kNumLayers2dArray:
+ case ValidTextureOverload::kNumLayersDepth2dArray:
+ case ValidTextureOverload::kNumLayersCubeArray:
+ case ValidTextureOverload::kNumLayersDepthCubeArray:
+ case ValidTextureOverload::kNumLayersStorageWO2dArray:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.z;",
- };
- case ValidTextureOverload::kNumLevels2d:
- case ValidTextureOverload::kNumLevelsCube:
- case ValidTextureOverload::kNumLevelsDepth2d:
- case ValidTextureOverload::kNumLevelsDepthCube:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.z;",
+ };
+ case ValidTextureOverload::kNumLevels2d:
+ case ValidTextureOverload::kNumLevelsCube:
+ case ValidTextureOverload::kNumLevelsDepth2d:
+ case ValidTextureOverload::kNumLevelsDepthCube:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(0, tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.z;",
- };
- case ValidTextureOverload::kNumLevels2dArray:
- case ValidTextureOverload::kNumLevels3d:
- case ValidTextureOverload::kNumLevelsCubeArray:
- case ValidTextureOverload::kNumLevelsDepth2dArray:
- case ValidTextureOverload::kNumLevelsDepthCubeArray:
- return {
- R"(int4 tint_tmp;
+ "tint_tmp.z;",
+ };
+ case ValidTextureOverload::kNumLevels2dArray:
+ case ValidTextureOverload::kNumLevels3d:
+ case ValidTextureOverload::kNumLevelsCubeArray:
+ case ValidTextureOverload::kNumLevelsDepth2dArray:
+ case ValidTextureOverload::kNumLevelsDepthCubeArray:
+ return {
+ R"(int4 tint_tmp;
tint_symbol.GetDimensions(0, tint_tmp.x, tint_tmp.y, tint_tmp.z, tint_tmp.w);
)",
- "tint_tmp.w;",
- };
- case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
- case ValidTextureOverload::kNumSamplesMultisampled2d:
- return {
- R"(int3 tint_tmp;
+ "tint_tmp.w;",
+ };
+ case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
+ case ValidTextureOverload::kNumSamplesMultisampled2d:
+ return {
+ R"(int3 tint_tmp;
tint_symbol.GetDimensions(tint_tmp.x, tint_tmp.y, tint_tmp.z);
)",
- "tint_tmp.z;",
- };
- case ValidTextureOverload::kSample1dF32:
- return R"(tint_symbol.Sample(tint_symbol_1, 1.0f);)";
- case ValidTextureOverload::kSample2dF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f));)";
- case ValidTextureOverload::kSample2dOffsetF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4));)";
- case ValidTextureOverload::kSample2dArrayF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)));)";
- case ValidTextureOverload::kSample2dArrayOffsetF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5));)";
- case ValidTextureOverload::kSample3dF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f));)";
- case ValidTextureOverload::kSample3dOffsetF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), int3(4, 5, 6));)";
- case ValidTextureOverload::kSampleCubeF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f));)";
- case ValidTextureOverload::kSampleCubeArrayF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)));)";
- case ValidTextureOverload::kSampleDepth2dF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f)).x;)";
- case ValidTextureOverload::kSampleDepth2dOffsetF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)).x;)";
- case ValidTextureOverload::kSampleDepth2dArrayF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3))).x;)";
- case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)).x;)";
- case ValidTextureOverload::kSampleDepthCubeF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)).x;)";
- case ValidTextureOverload::kSampleDepthCubeArrayF32:
- return R"(tint_symbol.Sample(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))).x;)";
- case ValidTextureOverload::kSampleBias2dF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleBias2dOffsetF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
- case ValidTextureOverload::kSampleBias2dArrayF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
- case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6));)";
- case ValidTextureOverload::kSampleBias3dF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleBias3dOffsetF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f, int3(5, 6, 7));)";
- case ValidTextureOverload::kSampleBiasCubeF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleBiasCubeArrayF32:
- return R"(tint_symbol.SampleBias(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(3)), 4.0f);)";
- case ValidTextureOverload::kSampleLevel2dF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleLevel2dOffsetF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
- case ValidTextureOverload::kSampleLevel2dArrayF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f);)";
- case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6));)";
- case ValidTextureOverload::kSampleLevel3dF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleLevel3dOffsetF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f, int3(5, 6, 7));)";
- case ValidTextureOverload::kSampleLevelCubeF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleLevelCubeArrayF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kSampleLevelDepth2dF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3).x;)";
- case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3, int2(4, 5)).x;)";
- case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4).x;)";
- case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4, int2(5, 6)).x;)";
- case ValidTextureOverload::kSampleLevelDepthCubeF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4).x;)";
- case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
- return R"(tint_symbol.SampleLevel(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5).x;)";
- case ValidTextureOverload::kSampleGrad2dF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float2(1.0f, 2.0f), float2(3.0f, 4.0f), float2(5.0f, 6.0f));)";
- case ValidTextureOverload::kSampleGrad2dOffsetF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float2(1.0f, 2.0f), float2(3.0f, 4.0f), float2(5.0f, 6.0f), int2(7, 7));)";
- case ValidTextureOverload::kSampleGrad2dArrayF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, float(3)), float2(4.0f, 5.0f), float2(6.0f, 7.0f));)";
- case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, float(3)), float2(4.0f, 5.0f), float2(6.0f, 7.0f), int2(6, 7));)";
- case ValidTextureOverload::kSampleGrad3dF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f));)";
- case ValidTextureOverload::kSampleGrad3dOffsetF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f), int3(0, 1, 2));)";
- case ValidTextureOverload::kSampleGradCubeF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f));)";
- case ValidTextureOverload::kSampleGradCubeArrayF32:
- return R"(tint_symbol.SampleGrad(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), float3(5.0f, 6.0f, 7.0f), float3(8.0f, 9.0f, 10.0f));)";
- case ValidTextureOverload::kSampleCompareDepth2dF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
- case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
- case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f, int2(5, 6));)";
- case ValidTextureOverload::kSampleCompareDepthCubeF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
- return R"(tint_symbol.SampleCmp(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f, int2(5, 6));)";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
- return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
- case ValidTextureOverload::kLoad1dLevelF32:
- case ValidTextureOverload::kLoad1dLevelU32:
- case ValidTextureOverload::kLoad1dLevelI32:
- return R"(tint_symbol.Load(int2(1, 3));)";
- case ValidTextureOverload::kLoad2dLevelF32:
- case ValidTextureOverload::kLoad2dLevelU32:
- case ValidTextureOverload::kLoad2dLevelI32:
- return R"(tint_symbol.Load(int3(1, 2, 3));)";
- case ValidTextureOverload::kLoad2dArrayLevelF32:
- case ValidTextureOverload::kLoad2dArrayLevelU32:
- case ValidTextureOverload::kLoad2dArrayLevelI32:
- case ValidTextureOverload::kLoad3dLevelF32:
- case ValidTextureOverload::kLoad3dLevelU32:
- case ValidTextureOverload::kLoad3dLevelI32:
- return R"(tint_symbol.Load(int4(1, 2, 3, 4));)";
- case ValidTextureOverload::kLoadDepthMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dF32:
- case ValidTextureOverload::kLoadMultisampled2dU32:
- case ValidTextureOverload::kLoadMultisampled2dI32:
- return R"(tint_symbol.Load(int2(1, 2), 3);)";
- case ValidTextureOverload::kLoadDepth2dLevelF32:
- return R"(tint_symbol.Load(int3(1, 2, 3)).x;)";
- case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
- return R"(tint_symbol.Load(int4(1, 2, 3, 4)).x;)";
- case ValidTextureOverload::kStoreWO1dRgba32float:
- return R"(tint_symbol[1] = float4(2.0f, 3.0f, 4.0f, 5.0f);)";
- case ValidTextureOverload::kStoreWO2dRgba32float:
- return R"(tint_symbol[int2(1, 2)] = float4(3.0f, 4.0f, 5.0f, 6.0f);)";
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- return R"(tint_symbol[int3(1, 2, 3)] = float4(4.0f, 5.0f, 6.0f, 7.0f);)";
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return R"(tint_symbol[int3(1, 2, 3)] = float4(4.0f, 5.0f, 6.0f, 7.0f);)";
- }
- return "<unmatched texture overload>";
+ "tint_tmp.z;",
+ };
+ case ValidTextureOverload::kSample1dF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, 1.0f);)";
+ case ValidTextureOverload::kSample2dF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f));)";
+ case ValidTextureOverload::kSample2dOffsetF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4));)";
+ case ValidTextureOverload::kSample2dArrayF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)));)";
+ case ValidTextureOverload::kSample2dArrayOffsetF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5));)";
+ case ValidTextureOverload::kSample3dF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f));)";
+ case ValidTextureOverload::kSample3dOffsetF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), int3(4, 5, 6));)";
+ case ValidTextureOverload::kSampleCubeF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f));)";
+ case ValidTextureOverload::kSampleCubeArrayF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)));)";
+ case ValidTextureOverload::kSampleDepth2dF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f)).x;)";
+ case ValidTextureOverload::kSampleDepth2dOffsetF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float2(1.0f, 2.0f), int2(3, 4)).x;)";
+ case ValidTextureOverload::kSampleDepth2dArrayF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3))).x;)";
+ case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, float(3)), int2(4, 5)).x;)";
+ case ValidTextureOverload::kSampleDepthCubeF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float3(1.0f, 2.0f, 3.0f)).x;)";
+ case ValidTextureOverload::kSampleDepthCubeArrayF32:
+ return R"(tint_symbol.Sample(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4))).x;)";
+ case ValidTextureOverload::kSampleBias2dF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleBias2dOffsetF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
+ case ValidTextureOverload::kSampleBias2dArrayF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
+ case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6));)";
+ case ValidTextureOverload::kSampleBias3dF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleBias3dOffsetF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f, int3(5, 6, 7));)";
+ case ValidTextureOverload::kSampleBiasCubeF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleBiasCubeArrayF32:
+ return R"(tint_symbol.SampleBias(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(3)), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel2dF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleLevel2dOffsetF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
+ case ValidTextureOverload::kSampleLevel2dArrayF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4.0f, int2(5, 6));)";
+ case ValidTextureOverload::kSampleLevel3dF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleLevel3dOffsetF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f, int3(5, 6, 7));)";
+ case ValidTextureOverload::kSampleLevelCubeF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleLevelCubeArrayF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kSampleLevelDepth2dF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3).x;)";
+ case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float2(1.0f, 2.0f), 3, int2(4, 5)).x;)";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4).x;)";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, float(3)), 4, int2(5, 6)).x;)";
+ case ValidTextureOverload::kSampleLevelDepthCubeF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4).x;)";
+ case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
+ return R"(tint_symbol.SampleLevel(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5).x;)";
+ case ValidTextureOverload::kSampleGrad2dF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float2(1.0f, 2.0f), float2(3.0f, 4.0f), float2(5.0f, 6.0f));)";
+ case ValidTextureOverload::kSampleGrad2dOffsetF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float2(1.0f, 2.0f), float2(3.0f, 4.0f), float2(5.0f, 6.0f), (7).xx);)";
+ case ValidTextureOverload::kSampleGrad2dArrayF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, float(3)), float2(4.0f, 5.0f), float2(6.0f, 7.0f));)";
+ case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, float(3)), float2(4.0f, 5.0f), float2(6.0f, 7.0f), int2(6, 7));)";
+ case ValidTextureOverload::kSampleGrad3dF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f));)";
+ case ValidTextureOverload::kSampleGrad3dOffsetF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f), int3(0, 1, 2));)";
+ case ValidTextureOverload::kSampleGradCubeF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f));)";
+ case ValidTextureOverload::kSampleGradCubeArrayF32:
+ return R"(tint_symbol.SampleGrad(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), float3(5.0f, 6.0f, 7.0f), float3(8.0f, 9.0f, 10.0f));)";
+ case ValidTextureOverload::kSampleCompareDepth2dF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f, int2(5, 6));)";
+ case ValidTextureOverload::kSampleCompareDepthCubeF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
+ return R"(tint_symbol.SampleCmp(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float2(1.0f, 2.0f), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float2(1.0f, 2.0f), 3.0f, int2(4, 5));)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, float(4)), 3.0f, int2(5, 6));)";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float3(1.0f, 2.0f, 3.0f), 4.0f);)";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
+ return R"(tint_symbol.SampleCmpLevelZero(tint_symbol_1, float4(1.0f, 2.0f, 3.0f, float(4)), 5.0f);)";
+ case ValidTextureOverload::kLoad1dLevelF32:
+ case ValidTextureOverload::kLoad1dLevelU32:
+ case ValidTextureOverload::kLoad1dLevelI32:
+ return R"(tint_symbol.Load(int2(1, 3));)";
+ case ValidTextureOverload::kLoad2dLevelF32:
+ case ValidTextureOverload::kLoad2dLevelU32:
+ case ValidTextureOverload::kLoad2dLevelI32:
+ return R"(tint_symbol.Load(int3(1, 2, 3));)";
+ case ValidTextureOverload::kLoad2dArrayLevelF32:
+ case ValidTextureOverload::kLoad2dArrayLevelU32:
+ case ValidTextureOverload::kLoad2dArrayLevelI32:
+ case ValidTextureOverload::kLoad3dLevelF32:
+ case ValidTextureOverload::kLoad3dLevelU32:
+ case ValidTextureOverload::kLoad3dLevelI32:
+ return R"(tint_symbol.Load(int4(1, 2, 3, 4));)";
+ case ValidTextureOverload::kLoadDepthMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dF32:
+ case ValidTextureOverload::kLoadMultisampled2dU32:
+ case ValidTextureOverload::kLoadMultisampled2dI32:
+ return R"(tint_symbol.Load(int2(1, 2), 3);)";
+ case ValidTextureOverload::kLoadDepth2dLevelF32:
+ return R"(tint_symbol.Load(int3(1, 2, 3)).x;)";
+ case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
+ return R"(tint_symbol.Load(int4(1, 2, 3, 4)).x;)";
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ return R"(tint_symbol[1] = float4(2.0f, 3.0f, 4.0f, 5.0f);)";
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ return R"(tint_symbol[int2(1, 2)] = float4(3.0f, 4.0f, 5.0f, 6.0f);)";
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ return R"(tint_symbol[int3(1, 2, 3)] = float4(4.0f, 5.0f, 6.0f, 7.0f);)";
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return R"(tint_symbol[int3(1, 2, 3)] = float4(4.0f, 5.0f, 6.0f, 7.0f);)";
+ }
+ return "<unmatched texture overload>";
} // NOLINT - Ignore the length of this function
class HlslGeneratorBuiltinTextureTest
: public TestParamHelper<ast::builtin::test::TextureOverloadCase> {};
TEST_P(HlslGeneratorBuiltinTextureTest, Call) {
- auto param = GetParam();
+ auto param = GetParam();
- param.BuildTextureVariable(this);
- param.BuildSamplerVariable(this);
+ param.BuildTextureVariable(this);
+ param.BuildSamplerVariable(this);
- auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
+ auto* call = Call(param.function, param.args(this));
+ auto* stmt = CallStmt(call);
- Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto expected = expected_texture_overload(param.overload);
+ auto expected = expected_texture_overload(param.overload);
- EXPECT_THAT(gen.result(), HasSubstr(expected.pre));
- EXPECT_THAT(gen.result(), HasSubstr(expected.out));
+ EXPECT_THAT(gen.result(), HasSubstr(expected.pre));
+ EXPECT_THAT(gen.result(), HasSubstr(expected.out));
}
-INSTANTIATE_TEST_SUITE_P(
- HlslGeneratorBuiltinTextureTest,
- HlslGeneratorBuiltinTextureTest,
- testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
+INSTANTIATE_TEST_SUITE_P(HlslGeneratorBuiltinTextureTest,
+ HlslGeneratorBuiltinTextureTest,
+ testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
} // namespace
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_call_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_call_test.cc
index 8a3289e9e6a..b72b0eb740f 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_call_test.cc
@@ -15,62 +15,64 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Call = TestHelper;
TEST_F(HlslGeneratorImplTest_Call, EmitExpression_Call_WithoutParams) {
- Func("my_func", {}, ty.f32(), {Return(1.23f)});
+ Func("my_func", {}, ty.f32(), {Return(1.23_f)});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func()");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func()");
}
TEST_F(HlslGeneratorImplTest_Call, EmitExpression_Call_WithParams) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.f32(), {Return(1.23f)});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func(param1, param2)");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.f32(), {Return(1.23_f)});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func(param1, param2)");
}
TEST_F(HlslGeneratorImplTest_Call, EmitStatement_Call) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = CallStmt(Call("my_func", "param1", "param2"));
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(call)) << gen.error();
- EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = CallStmt(Call("my_func", "param1", "param2"));
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(call)) << gen.error();
+ EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_case_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_case_test.cc
index 004ba410b0e..ee3acfcc06f 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_case_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_case_test.cc
@@ -15,56 +15,56 @@
#include "src/tint/ast/fallthrough_statement.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Case = TestHelper;
TEST_F(HlslGeneratorImplTest_Case, Emit_Case) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block(create<ast::BreakStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(HlslGeneratorImplTest_Case, Emit_Case_BreaksByDefault) {
- auto* s = Switch(1, Case(Expr(5), Block()), DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block()), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(HlslGeneratorImplTest_Case, Emit_Case_WithFallthrough) {
- auto* s =
- Switch(1, //
- Case(Expr(4), Block(create<ast::FallthroughStatement>())), //
- Case(Expr(5), Block(create<ast::ReturnStatement>())), //
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, //
+ Case(Expr(4_i), Block(create<ast::FallthroughStatement>())), //
+ Case(Expr(5_i), Block(create<ast::ReturnStatement>())), //
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 4: {
+ ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 4: {
/* fallthrough */
{
return;
@@ -75,17 +75,16 @@ TEST_F(HlslGeneratorImplTest_Case, Emit_Case_WithFallthrough) {
}
TEST_F(HlslGeneratorImplTest_Case, Emit_Case_MultipleSelectors) {
- auto* s =
- Switch(1, Case({Expr(5), Expr(6)}, Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case({Expr(5_i), Expr(6_i)}, Block(create<ast::BreakStatement>())),
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5:
+ ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5:
case 6: {
break;
}
@@ -93,15 +92,15 @@ TEST_F(HlslGeneratorImplTest_Case, Emit_Case_MultipleSelectors) {
}
TEST_F(HlslGeneratorImplTest_Case, Emit_Case_Default) {
- auto* s = Switch(1, DefaultCase(Block(create<ast::BreakStatement>())));
- WrapInFunction(s);
+ auto* s = Switch(1_i, DefaultCase(Block(create<ast::BreakStatement>())));
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s, 0)) << gen.error();
- EXPECT_EQ(gen.result(), R"( default: {
+ ASSERT_TRUE(gen.EmitCase(s, 0_i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( default: {
break;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_cast_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_cast_test.cc
index 283c8590cd7..2c4690a7189 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_cast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_cast_test.cc
@@ -14,31 +14,33 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Cast = TestHelper;
TEST_F(HlslGeneratorImplTest_Cast, EmitExpression_Cast_Scalar) {
- auto* cast = Construct<f32>(1);
- WrapInFunction(cast);
+ auto* cast = Construct<f32>(1_i);
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "float(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "1.0f");
}
TEST_F(HlslGeneratorImplTest_Cast, EmitExpression_Cast_Vector) {
- auto* cast = vec3<f32>(vec3<i32>(1, 2, 3));
- WrapInFunction(cast);
+ auto* cast = vec3<f32>(vec3<i32>(1_i, 2_i, 3_i));
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "float3(int3(1, 2, 3))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "float3(1.0f, 2.0f, 3.0f)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_constructor_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_constructor_test.cc
index 8801edd0719..fbcc79b6936 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_constructor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_constructor_test.cc
@@ -15,6 +15,8 @@
#include "gmock/gmock.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
@@ -23,237 +25,225 @@ using ::testing::HasSubstr;
using HlslGeneratorImplTest_Constructor = TestHelper;
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Bool) {
- WrapInFunction(Expr(false));
+ WrapInFunction(Expr(false));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("false"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("false"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Int) {
- WrapInFunction(Expr(-12345));
+ WrapInFunction(Expr(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("-12345"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_UInt) {
- WrapInFunction(Expr(56779u));
+ WrapInFunction(Expr(56779_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("56779u"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("56779u"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Float) {
- // Use a number close to 1<<30 but whose decimal representation ends in 0.
- WrapInFunction(Expr(static_cast<float>((1 << 30) - 4)));
+ // Use a number close to 1<<30 but whose decimal representation ends in 0.
+ WrapInFunction(Expr(f32((1 << 30) - 4)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Float) {
- WrapInFunction(Construct<f32>(-1.2e-5f));
+ WrapInFunction(Construct<f32>(-1.2e-5_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float(-0.000012f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-0.000012f"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Bool) {
- WrapInFunction(Construct<bool>(true));
+ WrapInFunction(Construct<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bool(true)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("true"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Int) {
- WrapInFunction(Construct<i32>(-12345));
+ WrapInFunction(Construct<i32>(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("int(-12345)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Uint) {
- WrapInFunction(Construct<u32>(12345u));
+ WrapInFunction(Construct<u32>(12345_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("uint(12345u)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("12345u"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec) {
- WrapInFunction(vec3<f32>(1.f, 2.f, 3.f));
+ WrapInFunction(vec3<f32>(1_f, 2_f, 3_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float3(1.0f, 2.0f, 3.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("float3(1.0f, 2.0f, 3.0f)"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_Empty) {
- WrapInFunction(vec3<f32>());
+ WrapInFunction(vec3<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float3(0.0f, 0.0f, 0.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("0.0f).xxx"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Float_Literal) {
- WrapInFunction(vec3<f32>(2.0f));
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Float_Literal) {
+ WrapInFunction(vec3<f32>(2_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float3((2.0f).xxx)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("2.0f).xxx"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Float_Var) {
- auto* var = Var("v", nullptr, Expr(2.0f));
- auto* cast = vec3<f32>(var);
- WrapInFunction(var, cast);
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Float_Var) {
+ auto* var = Var("v", nullptr, Expr(2_f));
+ auto* cast = vec3<f32>(var);
+ WrapInFunction(var, cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(float v = 2.0f;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(float v = 2.0f;
const float3 tint_symbol = float3((v).xxx);)"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Bool_Literal) {
- WrapInFunction(vec3<bool>(true));
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Bool_Literal) {
+ WrapInFunction(vec3<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bool3((true).xxx)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("(true).xxx"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Bool_Var) {
- auto* var = Var("v", nullptr, Expr(true));
- auto* cast = vec3<bool>(var);
- WrapInFunction(var, cast);
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Bool_Var) {
+ auto* var = Var("v", nullptr, Expr(true));
+ auto* cast = vec3<bool>(var);
+ WrapInFunction(var, cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(bool v = true;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(bool v = true;
const bool3 tint_symbol = bool3((v).xxx);)"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_Int) {
- WrapInFunction(vec3<i32>(2));
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_Int) {
+ WrapInFunction(vec3<i32>(2_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("int3((2).xxx)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("2).xxx"));
}
-TEST_F(HlslGeneratorImplTest_Constructor,
- EmitConstructor_Type_Vec_SingleScalar_UInt) {
- WrapInFunction(vec3<u32>(2u));
+TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Vec_SingleScalar_UInt) {
+ WrapInFunction(vec3<u32>(2_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("uint3((2u).xxx)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("2u).xxx"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Mat) {
- WrapInFunction(
- mat2x3<f32>(vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(3.f, 4.f, 5.f)));
+ WrapInFunction(mat2x3<f32>(vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(3_f, 4_f, 5_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(
- gen.result(),
- HasSubstr(
- "float2x3(float3(1.0f, 2.0f, 3.0f), float3(3.0f, 4.0f, 5.0f))"));
+ EXPECT_THAT(gen.result(),
+ HasSubstr("float2x3(float3(1.0f, 2.0f, 3.0f), float3(3.0f, 4.0f, 5.0f))"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Mat_Empty) {
- WrapInFunction(mat2x3<f32>());
+ WrapInFunction(mat2x3<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("float2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f)"));
+ EXPECT_THAT(gen.result(), HasSubstr("float2x3 tint_symbol = float2x3((0.0f).xxx, (0.0f).xxx)"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Array) {
- WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3),
- vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(4.f, 5.f, 6.f),
- vec3<f32>(7.f, 8.f, 9.f)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u), vec3<f32>(1_f, 2_f, 3_f),
+ vec3<f32>(4_f, 5_f, 6_f), vec3<f32>(7_f, 8_f, 9_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("{float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f),"
- " float3(7.0f, 8.0f, 9.0f)}"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("{float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f),"
+ " float3(7.0f, 8.0f, 9.0f)}"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Array_Empty) {
- WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("(float3[3])0"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("(float3[3])0"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Struct) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str), 1, 2.0f, vec3<i32>(3, 4, 5)));
+ WrapInFunction(Construct(ty.Of(str), 1_i, 2_f, vec3<i32>(3_i, 4_i, 5_i)));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("{1, 2.0f, int3(3, 4, 5)}"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("{1, 2.0f, int3(3, 4, 5)}"));
}
TEST_F(HlslGeneratorImplTest_Constructor, EmitConstructor_Type_Struct_Empty) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str)));
+ WrapInFunction(Construct(ty.Of(str)));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("(S)0"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("(S)0"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_continue_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_continue_test.cc
index 5a9e07da630..c7192f6e82a 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_continue_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_continue_test.cc
@@ -20,16 +20,16 @@ namespace {
using HlslGeneratorImplTest_Continue = TestHelper;
TEST_F(HlslGeneratorImplTest_Continue, Emit_Continue) {
- auto* loop = Loop(Block(If(false, Block(Break())), //
- Continue()));
- WrapInFunction(loop);
+ auto* loop = Loop(Block(If(false, Block(Break())), //
+ Continue()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( [loop] while (true) {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( [loop] while (true) {
if (false) {
break;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_discard_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_discard_test.cc
index e8e52a9b5da..4bc4bf9e41b 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_discard_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_discard_test.cc
@@ -20,15 +20,15 @@ namespace {
using HlslGeneratorImplTest_Discard = TestHelper;
TEST_F(HlslGeneratorImplTest_Discard, Emit_Discard) {
- auto* stmt = create<ast::DiscardStatement>();
- WrapInFunction(stmt);
+ auto* stmt = create<ast::DiscardStatement>();
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " discard;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " discard;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_function_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_function_test.cc
index 15e9e867125..c994b35b5de 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_function_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_function_test.cc
@@ -20,107 +20,106 @@
using ::testing::HasSubstr;
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Function = TestHelper;
TEST_F(HlslGeneratorImplTest_Function, Emit_Function) {
- Func("my_func", ast::VariableList{}, ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( void my_func() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( void my_func() {
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Function_Name_Collision) {
- Func("GeometryShader", ast::VariableList{}, ty.void_(),
- {
- Return(),
- });
+ Func("GeometryShader", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"( void tint_symbol() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"( void tint_symbol() {
return;
})"));
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithParams) {
- Func("my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())},
- ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( void my_func(float a, int b) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( void my_func(float a, int b) {
return;
}
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_NoReturn_Void) {
- Func("main", ast::VariableList{}, ty.void_(), {/* no explicit return */},
- {
- Stage(ast::PipelineStage::kFragment),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_NoReturn_Void) {
+ Func("main", ast::VariableList{}, ty.void_(), {/* no explicit return */},
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(void main() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(void main() {
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Function, PtrParameter) {
- // fn f(foo : ptr<function, f32>) -> f32 {
- // return *foo;
- // }
- Func("f", {Param("foo", ty.pointer<f32>(ast::StorageClass::kFunction))},
- ty.f32(), {Return(Deref("foo"))});
+ // fn f(foo : ptr<function, f32>) -> f32 {
+ // return *foo;
+ // }
+ Func("f", {Param("foo", ty.pointer<f32>(ast::StorageClass::kFunction))}, ty.f32(),
+ {Return(Deref("foo"))});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(float f(inout float foo) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(float f(inout float foo) {
return foo;
}
)"));
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithInOutVars) {
- // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
- // return foo;
- // }
- auto* foo_in = Param("foo", ty.f32(), {Location(0)});
- Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct tint_symbol_1 {
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithInOutVars) {
+ // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
+ // return foo;
+ // }
+ auto* foo_in = Param("foo", ty.f32(), {Location(0)});
+ Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct tint_symbol_1 {
float foo : TEXCOORD0;
};
struct tint_symbol_2 {
@@ -140,22 +139,18 @@ tint_symbol_2 frag_main(tint_symbol_1 tint_symbol) {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithInOut_Builtins) {
- // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
- // return coord.x;
- // }
- auto* coord_in =
- Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
- Func("frag_main", ast::VariableList{coord_in}, ty.f32(),
- {Return(MemberAccessor("coord", "x"))},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kFragDepth)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct tint_symbol_1 {
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithInOut_Builtins) {
+ // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
+ // return coord.x;
+ // }
+ auto* coord_in = Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
+ Func("frag_main", ast::VariableList{coord_in}, ty.f32(), {Return(MemberAccessor("coord", "x"))},
+ {Stage(ast::PipelineStage::kFragment)}, {Builtin(ast::Builtin::kFragDepth)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct tint_symbol_1 {
float4 coord : SV_Position;
};
struct tint_symbol_2 {
@@ -175,46 +170,44 @@ tint_symbol_2 frag_main(tint_symbol_1 tint_symbol) {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
- // struct Interface {
- // @builtin(position) pos : vec4<f32>;
- // @location(1) col1 : f32;
- // @location(2) col2 : f32;
- // };
- // fn vert_main() -> Interface {
- // return Interface(vec4<f32>(), 0.4, 0.6);
- // }
- // fn frag_main(inputs : Interface) {
- // const r = inputs.col1;
- // const g = inputs.col2;
- // const p = inputs.pos;
- // }
- auto* interface_struct = Structure(
- "Interface",
- {
- Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
- Member("col1", ty.f32(), {Location(1)}),
- Member("col2", ty.f32(), {Location(2)}),
- });
-
- Func("vert_main", {}, ty.Of(interface_struct),
- {Return(Construct(ty.Of(interface_struct), Construct(ty.vec4<f32>()),
- Expr(0.5f), Expr(0.25f)))},
- {Stage(ast::PipelineStage::kVertex)});
-
- Func("frag_main", {Param("inputs", ty.Of(interface_struct))}, ty.void_(),
- {
- Decl(Const("r", ty.f32(), MemberAccessor("inputs", "col1"))),
- Decl(Const("g", ty.f32(), MemberAccessor("inputs", "col2"))),
- Decl(Const("p", ty.vec4<f32>(), MemberAccessor("inputs", "pos"))),
- },
- {Stage(ast::PipelineStage::kFragment)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct Interface {
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
+ // struct Interface {
+ // @builtin(position) pos : vec4<f32>;
+ // @location(1) col1 : f32;
+ // @location(2) col2 : f32;
+ // };
+ // fn vert_main() -> Interface {
+ // return Interface(vec4<f32>(), 0.4, 0.6);
+ // }
+ // fn frag_main(inputs : Interface) {
+ // const r = inputs.col1;
+ // const g = inputs.col2;
+ // const p = inputs.pos;
+ // }
+ auto* interface_struct = Structure(
+ "Interface", {
+ Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
+ Member("col1", ty.f32(), {Location(1)}),
+ Member("col2", ty.f32(), {Location(2)}),
+ });
+
+ Func("vert_main", {}, ty.Of(interface_struct),
+ {Return(Construct(ty.Of(interface_struct), Construct(ty.vec4<f32>()), Expr(0.5_f),
+ Expr(0.25_f)))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ Func("frag_main", {Param("inputs", ty.Of(interface_struct))}, ty.void_(),
+ {
+ Decl(Let("r", ty.f32(), MemberAccessor("inputs", "col1"))),
+ Decl(Let("g", ty.f32(), MemberAccessor("inputs", "col2"))),
+ Decl(Let("p", ty.vec4<f32>(), MemberAccessor("inputs", "pos"))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct Interface {
float4 pos;
float col1;
float col2;
@@ -226,7 +219,7 @@ struct tint_symbol {
};
Interface vert_main_inner() {
- const Interface tint_symbol_3 = {float4(0.0f, 0.0f, 0.0f, 0.0f), 0.5f, 0.25f};
+ const Interface tint_symbol_3 = {(0.0f).xxxx, 0.5f, 0.25f};
return tint_symbol_3;
}
@@ -259,40 +252,37 @@ void frag_main(tint_symbol_2 tint_symbol_1) {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_SharedStruct_HelperFunction) {
- // struct VertexOutput {
- // @builtin(position) pos : vec4<f32>;
- // };
- // fn foo(x : f32) -> VertexOutput {
- // return VertexOutput(vec4<f32>(x, x, x, 1.0));
- // }
- // fn vert_main1() -> VertexOutput {
- // return foo(0.5);
- // }
- // fn vert_main2() -> VertexOutput {
- // return foo(0.25);
- // }
- auto* vertex_output_struct = Structure(
- "VertexOutput",
- {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
-
- Func("foo", {Param("x", ty.f32())}, ty.Of(vertex_output_struct),
- {Return(Construct(ty.Of(vertex_output_struct),
- Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1.f))))},
- {});
-
- Func("vert_main1", {}, ty.Of(vertex_output_struct),
- {Return(Call("foo", Expr(0.5f)))}, {Stage(ast::PipelineStage::kVertex)});
-
- Func("vert_main2", {}, ty.Of(vertex_output_struct),
- {Return(Call("foo", Expr(0.25f)))},
- {Stage(ast::PipelineStage::kVertex)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct VertexOutput {
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_SharedStruct_HelperFunction) {
+ // struct VertexOutput {
+ // @builtin(position) pos : vec4<f32>;
+ // };
+ // fn foo(x : f32) -> VertexOutput {
+ // return VertexOutput(vec4<f32>(x, x, x, 1.0));
+ // }
+ // fn vert_main1() -> VertexOutput {
+ // return foo(0.5);
+ // }
+ // fn vert_main2() -> VertexOutput {
+ // return foo(0.25);
+ // }
+ auto* vertex_output_struct = Structure(
+ "VertexOutput", {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
+
+ Func("foo", {Param("x", ty.f32())}, ty.Of(vertex_output_struct),
+ {Return(Construct(ty.Of(vertex_output_struct),
+ Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1_f))))},
+ {});
+
+ Func("vert_main1", {}, ty.Of(vertex_output_struct), {Return(Call("foo", Expr(0.5_f)))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ Func("vert_main2", {}, ty.Of(vertex_output_struct), {Return(Call("foo", Expr(0.25_f)))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct VertexOutput {
float4 pos;
};
@@ -334,38 +324,37 @@ tint_symbol_1 vert_main2() {
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_Uniform) {
- auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
- auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+ auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
+ auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
+
+ Func("sub_func",
+ {
+ Param("param", ty.f32()),
+ },
+ ty.f32(),
+ {
+ Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
+ });
- Func("sub_func",
- {
- Param("param", ty.f32()),
- },
- ty.f32(),
- {
- Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
- });
-
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
-
- Func("frag_main", {}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_ubo : register(b0, space1) {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
+
+ Func("frag_main", {}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_ubo : register(b0, space1) {
uint4 ubo[1];
};
@@ -380,32 +369,31 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_UniformStruct) {
- auto* s = Structure("Uniforms", {Member("coord", ty.vec4<f32>())});
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_UniformStruct) {
+ auto* s = Structure("Uniforms", {Member("coord", ty.vec4<f32>())});
- Global("uniforms", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+ Global("uniforms", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor(MemberAccessor("uniforms", "coord"), "x"));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
+ MemberAccessor(MemberAccessor("uniforms", "coord"), "x"));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_uniforms : register(b0, space1) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_uniforms : register(b0, space1) {
uint4 uniforms[1];
};
@@ -416,37 +404,34 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_RW_StorageBuffer_Read) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_RW_StorageBuffer_Read) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(RWByteAddressBuffer coord : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(RWByteAddressBuffer coord : register(u0, space1);
void frag_main() {
float v = asfloat(coord.Load(4u));
@@ -455,36 +440,34 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_RO_StorageBuffer_Read) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_RO_StorageBuffer_Read) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(ByteAddressBuffer coord : register(t0, space1);
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(ByteAddressBuffer coord : register(t0, space1);
void frag_main() {
float v = asfloat(coord.Load(4u));
@@ -493,33 +476,32 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_WO_StorageBuffer_Store) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_WO_StorageBuffer_Store) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Assign(MemberAccessor("coord", "b"), Expr(2.0f)),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
+
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Assign(MemberAccessor("coord", "b"), Expr(2_f)),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(RWByteAddressBuffer coord : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(RWByteAddressBuffer coord : register(u0, space1);
void frag_main() {
coord.Store(4u, asuint(2.0f));
@@ -528,34 +510,32 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_With_StorageBuffer_Store) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_With_StorageBuffer_Store) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Assign(MemberAccessor("coord", "b"), Expr(2.0f)),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Assign(MemberAccessor("coord", "b"), Expr(2_f)),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(RWByteAddressBuffer coord : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(RWByteAddressBuffer coord : register(u0, space1);
void frag_main() {
coord.Store(4u, asuint(2.0f));
@@ -564,36 +544,34 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_Called_By_EntryPoint_With_Uniform) {
- auto* s = Structure("S", {Member("x", ty.f32())});
- Global("coord", ty.Of(s), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_Called_By_EntryPoint_With_Uniform) {
+ auto* s = Structure("S", {Member("x", ty.f32())});
+ Global("coord", ty.Of(s), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
- {
- Return(MemberAccessor("coord", "x")),
- });
+ Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
+ {
+ Return(MemberAccessor("coord", "x")),
+ });
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_coord : register(b0, space1) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(cbuffer cbuffer_coord : register(b0, space1) {
uint4 coord[1];
};
@@ -608,38 +586,35 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_Called_By_EntryPoint_With_StorageBuffer) {
- auto* s = Structure("S", {Member("x", ty.f32())});
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(1),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_Called_By_EntryPoint_With_StorageBuffer) {
+ auto* s = Structure("S", {Member("x", ty.f32())});
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(1),
+ });
- Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
- {
- Return(MemberAccessor("coord", "x")),
- });
+ Func("sub_func", ast::VariableList{Param("param", ty.f32())}, ty.f32(),
+ {
+ Return(MemberAccessor("coord", "x")),
+ });
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(RWByteAddressBuffer coord : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(RWByteAddressBuffer coord : register(u0, space1);
float sub_func(float param) {
return asfloat(coord.Load(0u));
@@ -652,74 +627,71 @@ void frag_main() {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_WithNameCollision) {
- Func("GeometryShader", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kFragment),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_WithNameCollision) {
+ Func("GeometryShader", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(void tint_symbol() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(void tint_symbol() {
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute) {
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(1, 1, 1)]
void main() {
return;
}
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Literal) {
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(2, 4, 6),
- });
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Literal) {
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(2_i, 4_i, 6_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"([numthreads(2, 4, 6)]
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"([numthreads(2, 4, 6)]
void main() {
return;
}
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Const) {
- GlobalConst("width", ty.i32(), Construct(ty.i32(), 2));
- GlobalConst("height", ty.i32(), Construct(ty.i32(), 3));
- GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4));
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(static const int width = int(2);
-static const int height = int(3);
-static const int depth = int(4);
+TEST_F(HlslGeneratorImplTest_Function, Emit_Attribute_EntryPoint_Compute_WithWorkgroup_Const) {
+ GlobalConst("width", ty.i32(), Construct(ty.i32(), 2_i));
+ GlobalConst("height", ty.i32(), Construct(ty.i32(), 3_i));
+ GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4_i));
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize("width", "height", "depth"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(static const int width = 2;
+static const int height = 3;
+static const int depth = 4;
[numthreads(2, 3, 4)]
void main() {
@@ -730,28 +702,28 @@ void main() {
TEST_F(HlslGeneratorImplTest_Function,
Emit_Attribute_EntryPoint_Compute_WithWorkgroup_OverridableConst) {
- Override("width", ty.i32(), Construct(ty.i32(), 2), {Id(7u)});
- Override("height", ty.i32(), Construct(ty.i32(), 3), {Id(8u)});
- Override("depth", ty.i32(), Construct(ty.i32(), 4), {Id(9u)});
- Func("main", ast::VariableList{}, ty.void_(), {},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize("width", "height", "depth"),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_7
-#define WGSL_SPEC_CONSTANT_7 int(2)
+ Override("width", ty.i32(), Construct(ty.i32(), 2_i), {Id(7u)});
+ Override("height", ty.i32(), Construct(ty.i32(), 3_i), {Id(8u)});
+ Override("depth", ty.i32(), Construct(ty.i32(), 4_i), {Id(9u)});
+ Func("main", ast::VariableList{}, ty.void_(), {},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize("width", "height", "depth"),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_7
+#define WGSL_SPEC_CONSTANT_7 2
#endif
static const int width = WGSL_SPEC_CONSTANT_7;
#ifndef WGSL_SPEC_CONSTANT_8
-#define WGSL_SPEC_CONSTANT_8 int(3)
+#define WGSL_SPEC_CONSTANT_8 3
#endif
static const int height = WGSL_SPEC_CONSTANT_8;
#ifndef WGSL_SPEC_CONSTANT_9
-#define WGSL_SPEC_CONSTANT_9 int(4)
+#define WGSL_SPEC_CONSTANT_9 4
#endif
static const int depth = WGSL_SPEC_CONSTANT_9;
@@ -763,30 +735,30 @@ void main() {
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithArrayParams) {
- Func("my_func", ast::VariableList{Param("a", ty.array<f32, 5>())}, ty.void_(),
- {
- Return(),
- });
+ Func("my_func", ast::VariableList{Param("a", ty.array<f32, 5>())}, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(void my_func(float a[5]) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(void my_func(float a[5]) {
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithArrayReturn) {
- Func("my_func", {}, ty.array<f32, 5>(),
- {
- Return(Construct(ty.array<f32, 5>())),
- });
+ Func("my_func", {}, ty.array<f32, 5>(),
+ {
+ Return(Construct(ty.array<f32, 5>())),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(typedef float my_func_ret[5];
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(typedef float my_func_ret[5];
my_func_ret my_func() {
return (float[5])0;
}
@@ -794,17 +766,17 @@ my_func_ret my_func() {
}
TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithDiscardAndVoidReturn) {
- Func("my_func", {Param("a", ty.i32())}, ty.void_(),
- {
- If(Equal("a", 0), //
- Block(create<ast::DiscardStatement>())),
- Return(),
- });
+ Func("my_func", {Param("a", ty.i32())}, ty.void_(),
+ {
+ If(Equal("a", 0_i), //
+ Block(create<ast::DiscardStatement>())),
+ Return(),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(void my_func(int a) {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(void my_func(int a) {
if ((a == 0)) {
discard;
}
@@ -813,19 +785,18 @@ TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithDiscardAndVoidReturn) {
)");
}
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Function_WithDiscardAndNonVoidReturn) {
- Func("my_func", {Param("a", ty.i32())}, ty.i32(),
- {
- If(Equal("a", 0), //
- Block(create<ast::DiscardStatement>())),
- Return(42),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(int my_func(int a) {
+TEST_F(HlslGeneratorImplTest_Function, Emit_Function_WithDiscardAndNonVoidReturn) {
+ Func("my_func", {Param("a", ty.i32())}, ty.i32(),
+ {
+ If(Equal("a", 0_i), //
+ Block(create<ast::DiscardStatement>())),
+ Return(42_i),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(int my_func(int a) {
if (true) {
if ((a == 0)) {
discard;
@@ -839,61 +810,58 @@ TEST_F(HlslGeneratorImplTest_Function,
}
// https://crbug.com/tint/297
-TEST_F(HlslGeneratorImplTest_Function,
- Emit_Multiple_EntryPoint_With_Same_ModuleVar) {
- // struct Data {
- // d : f32;
- // };
- // @binding(0) @group(0) var<storage> data : Data;
- //
- // @stage(compute) @workgroup_size(1)
- // fn a() {
- // var v = data.d;
- // return;
- // }
- //
- // @stage(compute) @workgroup_size(1)
- // fn b() {
- // var v = data.d;
- // return;
- // }
-
- auto* s = Structure("Data", {Member("d", ty.f32())});
-
- Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("a", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- }
+TEST_F(HlslGeneratorImplTest_Function, Emit_Multiple_EntryPoint_With_Same_ModuleVar) {
+ // struct Data {
+ // d : f32;
+ // };
+ // @binding(0) @group(0) var<storage> data : Data;
+ //
+ // @compute @workgroup_size(1)
+ // fn a() {
+ // var v = data.d;
+ // return;
+ // }
+ //
+ // @compute @workgroup_size(1)
+ // fn b() {
+ // var v = data.d;
+ // return;
+ // }
+
+ auto* s = Structure("Data", {Member("d", ty.f32())});
+
+ Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("a", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
- Func("b", ast::VariableList{}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- }
+ Func("b", ast::VariableList{}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(RWByteAddressBuffer data : register(u0, space0);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(RWByteAddressBuffer data : register(u0, space0);
[numthreads(1, 1, 1)]
void a() {
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_identifier_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_identifier_test.cc
index d054bd8644d..d982e1e7dfd 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_identifier_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_identifier_test.cc
@@ -20,16 +20,16 @@ namespace {
using HlslGeneratorImplTest_Identifier = TestHelper;
TEST_F(HlslGeneratorImplTest_Identifier, EmitIdentifierExpression) {
- Global("foo", ty.i32(), ast::StorageClass::kPrivate);
+ Global("foo", ty.i32(), ast::StorageClass::kPrivate);
- auto* i = Expr("foo");
- WrapInFunction(i);
+ auto* i = Expr("foo");
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
- EXPECT_EQ(out.str(), "foo");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
+ EXPECT_EQ(out.str(), "foo");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_if_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_if_test.cc
index f2e092cf9c8..1668d71cb8a 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_if_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_if_test.cc
@@ -20,43 +20,41 @@ namespace {
using HlslGeneratorImplTest_If = TestHelper;
TEST_F(HlslGeneratorImplTest_If, Emit_If) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body);
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body);
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
}
)");
}
TEST_F(HlslGeneratorImplTest_If, Emit_IfWithElseIf) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_cond = Expr("else_cond");
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(else_cond, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body)));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
@@ -67,23 +65,21 @@ TEST_F(HlslGeneratorImplTest_If, Emit_IfWithElseIf) {
}
TEST_F(HlslGeneratorImplTest_If, Emit_IfWithElse) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(nullptr, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(else_body));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
return;
@@ -92,30 +88,26 @@ TEST_F(HlslGeneratorImplTest_If, Emit_IfWithElse) {
}
TEST_F(HlslGeneratorImplTest_If, Emit_IfWithMultiple) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
+ auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* else_body_2 = Block(Return());
+ auto* else_body_2 = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body,
- ast::ElseStatementList{
- create<ast::ElseStatement>(else_cond, else_body),
- create<ast::ElseStatement>(nullptr, else_body_2),
- });
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body, Else(else_body_2))));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_import_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_import_test.cc
index ac415fe9e93..f7d544e49cd 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_import_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_import_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Import = TestHelper;
struct HlslImportData {
- const char* name;
- const char* hlsl_name;
+ const char* name;
+ const char* hlsl_name;
};
inline std::ostream& operator<<(std::ostream& out, HlslImportData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using HlslImportData_SingleParamTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_SingleParamTest, FloatScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* ident = Expr(param.name);
- auto* expr = Call(ident, 1.f);
- WrapInFunction(expr);
+ auto* ident = Expr(param.name);
+ auto* expr = Call(ident, 1_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_SingleParamTest,
@@ -70,16 +72,16 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_SingleIntParamTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_SingleIntParamTest, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, Expr(1));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, Expr(1_i));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_SingleIntParamTest,
@@ -87,18 +89,17 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_SingleVectorParamTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_SingleVectorParamTest, FloatVector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* ident = Expr(param.name);
- auto* expr = Call(ident, vec3<f32>(1.f, 2.f, 3.f));
- WrapInFunction(expr);
+ auto* ident = Expr(param.name);
+ auto* expr = Call(ident, vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- std::string(param.hlsl_name) + "(float3(1.0f, 2.0f, 3.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(float3(1.0f, 2.0f, 3.0f))");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_SingleVectorParamTest,
@@ -117,8 +118,7 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData{"length", "length"},
HlslImportData{"log", "log"},
HlslImportData{"log2", "log2"},
- HlslImportData{"normalize",
- "normalize"},
+ HlslImportData{"normalize", "normalize"},
HlslImportData{"round", "round"},
HlslImportData{"sign", "sign"},
HlslImportData{"sin", "sin"},
@@ -130,16 +130,16 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_DualParam_ScalarTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_DualParam_ScalarTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1.f, 2.f);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_f, 2_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f, 2.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f, 2.0f)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_DualParam_ScalarTest,
@@ -152,19 +152,17 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_DualParam_VectorTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_DualParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr =
- Call(param.name, vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(4.f, 5.f, 6.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- std::string(param.hlsl_name) +
- "(float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) +
+ "(float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f))");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_DualParam_VectorTest,
@@ -179,16 +177,16 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_DualParam_Int_Test = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_DualParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1, 2)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1, 2)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_DualParam_Int_Test,
@@ -197,82 +195,80 @@ INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
using HlslImportData_TripleParam_ScalarTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_TripleParam_ScalarTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1.f, 2.f, 3.f);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_f, 2_f, 3_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f, 2.0f, 3.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1.0f, 2.0f, 3.0f)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_TripleParam_ScalarTest,
testing::Values(HlslImportData{"fma", "mad"},
HlslImportData{"mix", "lerp"},
HlslImportData{"clamp", "clamp"},
- HlslImportData{"smoothStep",
- "smoothstep"}));
+ HlslImportData{"smoothstep", "smoothstep"}));
using HlslImportData_TripleParam_VectorTest = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_TripleParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, vec3<f32>(1.f, 2.f, 3.f),
- vec3<f32>(4.f, 5.f, 6.f), vec3<f32>(7.f, 8.f, 9.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f),
+ vec3<f32>(7_f, 8_f, 9_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(
- out.str(),
- std::string(param.hlsl_name) +
- R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(
+ out.str(),
+ std::string(param.hlsl_name) +
+ R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)))");
}
-INSTANTIATE_TEST_SUITE_P(
- HlslGeneratorImplTest_Import,
- HlslImportData_TripleParam_VectorTest,
- testing::Values(HlslImportData{"faceForward", "faceforward"},
- HlslImportData{"fma", "mad"},
- HlslImportData{"clamp", "clamp"},
- HlslImportData{"smoothStep", "smoothstep"}));
+INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
+ HlslImportData_TripleParam_VectorTest,
+ testing::Values(HlslImportData{"faceForward", "faceforward"},
+ HlslImportData{"fma", "mad"},
+ HlslImportData{"clamp", "clamp"},
+ HlslImportData{"smoothstep", "smoothstep"}));
TEST_F(HlslGeneratorImplTest_Import, DISABLED_HlslImportData_FMix) {
- FAIL();
+ FAIL();
}
using HlslImportData_TripleParam_Int_Test = TestParamHelper<HlslImportData>;
TEST_P(HlslImportData_TripleParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2, 3);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i, 3_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1, 2, 3)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.hlsl_name) + "(1, 2, 3)");
}
INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Import,
HlslImportData_TripleParam_Int_Test,
testing::Values(HlslImportData{"clamp", "clamp"}));
TEST_F(HlslGeneratorImplTest_Import, HlslImportData_Determinant) {
- Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("determinant", "var");
- WrapInFunction(expr);
+ auto* expr = Call("determinant", "var");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string("determinant(var)"));
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string("determinant(var)"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_loop_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_loop_test.cc
index 6932d3b7a3f..0bf4090c96d 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_loop_test.cc
@@ -15,44 +15,46 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Loop = TestHelper;
TEST_F(HlslGeneratorImplTest_Loop, Emit_Loop) {
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block();
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block();
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( [loop] while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( [loop] while (true) {
discard;
}
)");
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( [loop] while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( [loop] while (true) {
discard;
{
a_statement();
@@ -62,31 +64,31 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopWithContinuing) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopNestedWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* inner = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* inner = Loop(body, continuing);
- body = Block(inner);
+ body = Block(inner);
- auto* lhs = Expr("lhs");
- auto* rhs = Expr("rhs");
+ auto* lhs = Expr("lhs");
+ auto* rhs = Expr("rhs");
- continuing = Block(Assign(lhs, rhs));
+ continuing = Block(Assign(lhs, rhs));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( [loop] while (true) {
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( [loop] while (true) {
[loop] while (true) {
discard;
{
@@ -101,31 +103,31 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopNestedWithContinuing) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopWithVarUsedInContinuing) {
- // loop {
- // var lhs : f32 = 2.4;
- // var other : f32;
- // break;
- // continuing {
- // lhs = rhs
- // }
- // }
+ // loop {
+ // var lhs : f32 = 2.4;
+ // var other : f32;
+ // break;
+ // continuing {
+ // lhs = rhs
+ // }
+ // }
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
- auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4f))), //
- Decl(Var("other", ty.f32())), //
- Break());
+ auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4_f))), //
+ Decl(Var("other", ty.f32())), //
+ Break());
- auto* continuing = Block(Assign("lhs", "rhs"));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
+ auto* continuing = Block(Assign("lhs", "rhs"));
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( [loop] while (true) {
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( [loop] while (true) {
float lhs = 2.400000095f;
float other = 0.0f;
break;
@@ -137,19 +139,19 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_LoopWithVarUsedInContinuing) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoop) {
- // for(; ; ) {
- // return;
- // }
+ // for(; ; ) {
+ // return;
+ // }
- auto* f = For(nullptr, nullptr, nullptr, Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, nullptr, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] for(; ; ) {
return;
}
@@ -158,19 +160,19 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoop) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInit) {
- // for(var i : i32; ; ) {
- // return;
- // }
+ // for(var i : i32; ; ) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] for(int i = 0; ; ) {
return;
}
@@ -179,22 +181,21 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInit) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInit) {
- // for(var b = true && false; ; ) {
- // return;
- // }
+ // for(var b = true && false; ; ) {
+ // return;
+ // }
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* f = For(Decl(Var("b", nullptr, multi_stmt)), nullptr, nullptr,
- Block(Return()));
- WrapInFunction(f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* f = For(Decl(Var("b", nullptr, multi_stmt)), nullptr, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
bool tint_tmp = true;
if (tint_tmp) {
tint_tmp = false;
@@ -208,19 +209,19 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInit) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCond) {
- // for(; true; ) {
- // return;
- // }
+ // for(; true; ) {
+ // return;
+ // }
- auto* f = For(nullptr, true, nullptr, Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, true, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] for(; true; ) {
return;
}
@@ -229,21 +230,21 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCond) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCond) {
- // for(; true && false; ) {
- // return;
- // }
+ // for(; true && false; ) {
+ // return;
+ // }
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* f = For(nullptr, multi_stmt, nullptr, Block(Return()));
- WrapInFunction(f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* f = For(nullptr, multi_stmt, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] while (true) {
bool tint_tmp = true;
if (tint_tmp) {
@@ -257,20 +258,20 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCond) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCont) {
- // for(; ; i = i + 1) {
- // return;
- // }
+ // for(; ; i = i + 1i) {
+ // return;
+ // }
- auto* v = Decl(Var("i", ty.i32()));
- auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1)), Block(Return()));
- WrapInFunction(v, f);
+ auto* v = Decl(Var("i", ty.i32()));
+ auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1_i)), Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] for(; ; i = (i + 1)) {
return;
}
@@ -279,22 +280,22 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleCont) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCont) {
- // for(; ; i = true && false) {
- // return;
- // }
+ // for(; ; i = true && false) {
+ // return;
+ // }
- auto* multi_stmt = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* v = Decl(Var("i", ty.bool_()));
- auto* f = For(nullptr, nullptr, Assign("i", multi_stmt), Block(Return()));
- WrapInFunction(v, f);
+ auto* multi_stmt =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* v = Decl(Var("i", ty.bool_()));
+ auto* f = For(nullptr, nullptr, Assign("i", multi_stmt), Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] while (true) {
return;
bool tint_tmp = true;
@@ -308,20 +309,19 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtCont) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInitCondCont) {
- // for(var i : i32; true; i = i + 1) {
- // return;
- // }
+ // for(var i : i32; true; i = i + 1i) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1)),
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1_i)), Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
[loop] for(int i = 0; true; i = (i + 1)) {
return;
}
@@ -330,27 +330,27 @@ TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithSimpleInitCondCont) {
}
TEST_F(HlslGeneratorImplTest_Loop, Emit_ForLoopWithMultiStmtInitCondCont) {
- // for(var i = true && false; true && false; i = true && false) {
- // return;
- // }
+ // for(var i = true && false; true && false; i = true && false) {
+ // return;
+ // }
- auto* multi_stmt_a = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* multi_stmt_b = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
- auto* multi_stmt_c = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), Expr(false));
+ auto* multi_stmt_a =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* multi_stmt_b =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* multi_stmt_c =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
- auto* f = For(Decl(Var("i", nullptr, multi_stmt_a)), multi_stmt_b,
- Assign("i", multi_stmt_c), Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", nullptr, multi_stmt_a)), multi_stmt_b, Assign("i", multi_stmt_c),
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
bool tint_tmp = true;
if (tint_tmp) {
tint_tmp = false;
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_member_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_member_accessor_test.cc
index 82447674616..10e9f1096f6 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_member_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_member_accessor_test.cc
@@ -16,119 +16,114 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
-using ::testing::HasSubstr;
-
-using create_type_func_ptr =
- const ast::Type* (*)(const ProgramBuilder::TypesBuilder& ty);
+using create_type_func_ptr = const ast::Type* (*)(const ProgramBuilder::TypesBuilder& ty);
inline const ast::Type* ty_i32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.i32();
+ return ty.i32();
}
inline const ast::Type* ty_u32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.u32();
+ return ty.u32();
}
inline const ast::Type* ty_f32(const ProgramBuilder::TypesBuilder& ty) {
- return ty.f32();
+ return ty.f32();
}
template <typename T>
inline const ast::Type* ty_vec2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec2<T>();
+ return ty.vec2<T>();
}
template <typename T>
inline const ast::Type* ty_vec3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec3<T>();
+ return ty.vec3<T>();
}
template <typename T>
inline const ast::Type* ty_vec4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.vec4<T>();
+ return ty.vec4<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x2<T>();
+ return ty.mat2x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x3<T>();
+ return ty.mat2x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat2x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat2x4<T>();
+ return ty.mat2x4<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x2<T>();
+ return ty.mat3x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x3<T>();
+ return ty.mat3x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat3x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat3x4<T>();
+ return ty.mat3x4<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x2(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x2<T>();
+ return ty.mat4x2<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x3(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x3<T>();
+ return ty.mat4x3<T>();
}
template <typename T>
inline const ast::Type* ty_mat4x4(const ProgramBuilder::TypesBuilder& ty) {
- return ty.mat4x4<T>();
+ return ty.mat4x4<T>();
}
-using i32 = ProgramBuilder::i32;
-using u32 = ProgramBuilder::u32;
-using f32 = ProgramBuilder::f32;
-
template <typename BASE>
class HlslGeneratorImplTest_MemberAccessorBase : public BASE {
- public:
- void SetupStorageBuffer(ast::StructMemberList members) {
- ProgramBuilder& b = *this;
-
- auto* s = b.Structure("Data", members);
-
- b.Global("data", b.ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- b.create<ast::BindingAttribute>(0),
- b.create<ast::GroupAttribute>(1),
- });
- }
-
- void SetupFunction(ast::StatementList statements) {
- ProgramBuilder& b = *this;
- b.Func("main", ast::VariableList{}, b.ty.void_(), statements,
- ast::AttributeList{
- b.Stage(ast::PipelineStage::kFragment),
- });
- }
+ public:
+ void SetupStorageBuffer(ast::StructMemberList members) {
+ ProgramBuilder& b = *this;
+
+ auto* s = b.Structure("Data", members);
+
+ b.Global("data", b.ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ b.create<ast::BindingAttribute>(0),
+ b.create<ast::GroupAttribute>(1),
+ });
+ }
+
+ void SetupFunction(ast::StatementList statements) {
+ ProgramBuilder& b = *this;
+ b.Func("main", ast::VariableList{}, b.ty.void_(), statements,
+ ast::AttributeList{
+ b.Stage(ast::PipelineStage::kFragment),
+ });
+ }
};
-using HlslGeneratorImplTest_MemberAccessor =
- HlslGeneratorImplTest_MemberAccessorBase<TestHelper>;
+using HlslGeneratorImplTest_MemberAccessor = HlslGeneratorImplTest_MemberAccessorBase<TestHelper>;
template <typename T>
using HlslGeneratorImplTest_MemberAccessorWithParam =
HlslGeneratorImplTest_MemberAccessorBase<TestParamHelper<T>>;
TEST_F(HlslGeneratorImplTest_MemberAccessor, EmitExpression_MemberAccessor) {
- auto* s = Structure("Data", {Member("mem", ty.f32())});
- Global("str", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("Data", {Member("mem", ty.f32())});
+ Global("str", ty.Of(s), ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("str", "mem");
- WrapInFunction(Var("expr", ty.f32(), ast::StorageClass::kNone, expr));
+ auto* expr = MemberAccessor("str", "mem");
+ WrapInFunction(Var("expr", ty.f32(), ast::StorageClass::kNone, expr));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct Data {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct Data {
float mem;
};
@@ -143,42 +138,41 @@ void test_function() {
}
struct TypeCase {
- create_type_func_ptr member_type;
- std::string expected;
+ create_type_func_ptr member_type;
+ std::string expected;
};
inline std::ostream& operator<<(std::ostream& out, TypeCase c) {
- ProgramBuilder b;
- auto* ty = c.member_type(b.ty);
- out << ty->FriendlyName(b.Symbols());
- return out;
+ ProgramBuilder b;
+ auto* ty = c.member_type(b.ty);
+ out << ty->FriendlyName(b.Symbols());
+ return out;
}
using HlslGeneratorImplTest_MemberAccessor_StorageBufferLoad =
HlslGeneratorImplTest_MemberAccessorWithParam<TypeCase>;
TEST_P(HlslGeneratorImplTest_MemberAccessor_StorageBufferLoad, Test) {
- // struct Data {
- // a : i32;
- // b : <type>;
- // };
- // var<storage> data : Data;
- // data.b;
+ // struct Data {
+ // a : i32;
+ // b : <type>;
+ // };
+ // var<storage> data : Data;
+ // data.b;
- auto p = GetParam();
+ auto p = GetParam();
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", p.member_type(ty)),
- });
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", p.member_type(ty)),
+ });
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor("data", "b"))),
- });
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone, MemberAccessor("data", "b"))),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(p.expected));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(p.expected));
}
INSTANTIATE_TEST_SUITE_P(
@@ -228,87 +222,86 @@ INSTANTIATE_TEST_SUITE_P(
using HlslGeneratorImplTest_MemberAccessor_StorageBufferStore =
HlslGeneratorImplTest_MemberAccessorWithParam<TypeCase>;
TEST_P(HlslGeneratorImplTest_MemberAccessor_StorageBufferStore, Test) {
- // struct Data {
- // a : i32;
- // b : <type>;
- // };
- // var<storage> data : Data;
- // data.b = <type>();
-
- auto p = GetParam();
-
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", p.member_type(ty)),
- });
-
- SetupFunction({
- Decl(Var("value", p.member_type(ty), ast::StorageClass::kNone,
- Construct(p.member_type(ty)))),
- Assign(MemberAccessor("data", "b"), Expr("value")),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(p.expected));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- HlslGeneratorImplTest_MemberAccessor,
- HlslGeneratorImplTest_MemberAccessor_StorageBufferStore,
- testing::Values(TypeCase{ty_u32, "data.Store(4u, asuint(value))"},
- TypeCase{ty_f32, "data.Store(4u, asuint(value))"},
- TypeCase{ty_i32, "data.Store(4u, asuint(value))"},
- TypeCase{ty_vec2<u32>, "data.Store2(8u, asuint(value))"},
- TypeCase{ty_vec2<f32>, "data.Store2(8u, asuint(value))"},
- TypeCase{ty_vec2<i32>, "data.Store2(8u, asuint(value))"},
- TypeCase{ty_vec3<u32>, "data.Store3(16u, asuint(value))"},
- TypeCase{ty_vec3<f32>, "data.Store3(16u, asuint(value))"},
- TypeCase{ty_vec3<i32>, "data.Store3(16u, asuint(value))"},
- TypeCase{ty_vec4<u32>, "data.Store4(16u, asuint(value))"},
- TypeCase{ty_vec4<f32>, "data.Store4(16u, asuint(value))"},
- TypeCase{ty_vec4<i32>, "data.Store4(16u, asuint(value))"},
- TypeCase{ty_mat2x2<f32>, R"({
+ // struct Data {
+ // a : i32;
+ // b : <type>;
+ // };
+ // var<storage> data : Data;
+ // data.b = <type>();
+
+ auto p = GetParam();
+
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", p.member_type(ty)),
+ });
+
+ SetupFunction({
+ Decl(Var("value", p.member_type(ty), ast::StorageClass::kNone,
+ Construct(p.member_type(ty)))),
+ Assign(MemberAccessor("data", "b"), Expr("value")),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(p.expected));
+}
+
+INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_MemberAccessor,
+ HlslGeneratorImplTest_MemberAccessor_StorageBufferStore,
+ testing::Values(TypeCase{ty_u32, "data.Store(4u, asuint(value))"},
+ TypeCase{ty_f32, "data.Store(4u, asuint(value))"},
+ TypeCase{ty_i32, "data.Store(4u, asuint(value))"},
+ TypeCase{ty_vec2<u32>, "data.Store2(8u, asuint(value))"},
+ TypeCase{ty_vec2<f32>, "data.Store2(8u, asuint(value))"},
+ TypeCase{ty_vec2<i32>, "data.Store2(8u, asuint(value))"},
+ TypeCase{ty_vec3<u32>, "data.Store3(16u, asuint(value))"},
+ TypeCase{ty_vec3<f32>, "data.Store3(16u, asuint(value))"},
+ TypeCase{ty_vec3<i32>, "data.Store3(16u, asuint(value))"},
+ TypeCase{ty_vec4<u32>, "data.Store4(16u, asuint(value))"},
+ TypeCase{ty_vec4<f32>, "data.Store4(16u, asuint(value))"},
+ TypeCase{ty_vec4<i32>, "data.Store4(16u, asuint(value))"},
+ TypeCase{ty_mat2x2<f32>, R"({
buffer.Store2((offset + 0u), asuint(value[0u]));
buffer.Store2((offset + 8u), asuint(value[1u]));
})"},
- TypeCase{ty_mat2x3<f32>, R"({
+ TypeCase{ty_mat2x3<f32>, R"({
buffer.Store3((offset + 0u), asuint(value[0u]));
buffer.Store3((offset + 16u), asuint(value[1u]));
})"},
- TypeCase{ty_mat2x4<f32>, R"({
+ TypeCase{ty_mat2x4<f32>, R"({
buffer.Store4((offset + 0u), asuint(value[0u]));
buffer.Store4((offset + 16u), asuint(value[1u]));
})"},
- TypeCase{ty_mat3x2<f32>, R"({
+ TypeCase{ty_mat3x2<f32>, R"({
buffer.Store2((offset + 0u), asuint(value[0u]));
buffer.Store2((offset + 8u), asuint(value[1u]));
buffer.Store2((offset + 16u), asuint(value[2u]));
})"},
- TypeCase{ty_mat3x3<f32>, R"({
+ TypeCase{ty_mat3x3<f32>, R"({
buffer.Store3((offset + 0u), asuint(value[0u]));
buffer.Store3((offset + 16u), asuint(value[1u]));
buffer.Store3((offset + 32u), asuint(value[2u]));
})"},
- TypeCase{ty_mat3x4<f32>, R"({
+ TypeCase{ty_mat3x4<f32>, R"({
buffer.Store4((offset + 0u), asuint(value[0u]));
buffer.Store4((offset + 16u), asuint(value[1u]));
buffer.Store4((offset + 32u), asuint(value[2u]));
})"},
- TypeCase{ty_mat4x2<f32>, R"({
+ TypeCase{ty_mat4x2<f32>, R"({
buffer.Store2((offset + 0u), asuint(value[0u]));
buffer.Store2((offset + 8u), asuint(value[1u]));
buffer.Store2((offset + 16u), asuint(value[2u]));
buffer.Store2((offset + 24u), asuint(value[3u]));
})"},
- TypeCase{ty_mat4x3<f32>, R"({
+ TypeCase{ty_mat4x3<f32>, R"({
buffer.Store3((offset + 0u), asuint(value[0u]));
buffer.Store3((offset + 16u), asuint(value[1u]));
buffer.Store3((offset + 32u), asuint(value[2u]));
buffer.Store3((offset + 48u), asuint(value[3u]));
})"},
- TypeCase{ty_mat4x4<f32>, R"({
+ TypeCase{ty_mat4x4<f32>, R"({
buffer.Store4((offset + 0u), asuint(value[0u]));
buffer.Store4((offset + 16u), asuint(value[1u]));
buffer.Store4((offset + 32u), asuint(value[2u]));
@@ -316,28 +309,27 @@ INSTANTIATE_TEST_SUITE_P(
})"}));
TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_Matrix_Empty) {
- // struct Data {
- // z : f32;
- // a : mat2x3<f32>;
- // };
- // var<storage> data : Data;
- // data.a = mat2x3<f32>();
+ // struct Data {
+ // z : f32;
+ // a : mat2x3<f32>;
+ // };
+ // var<storage> data : Data;
+ // data.a = mat2x3<f32>();
- SetupStorageBuffer({
- Member("a", ty.i32()),
- Member("b", ty.mat2x3<f32>()),
- });
+ SetupStorageBuffer({
+ Member("a", ty.i32()),
+ Member("b", ty.mat2x3<f32>()),
+ });
- SetupFunction({
- Assign(MemberAccessor("data", "b"),
- Construct(ty.mat2x3<f32>(), ast::ExpressionList{})),
- });
+ SetupFunction({
+ Assign(MemberAccessor("data", "b"), Construct(ty.mat2x3<f32>(), ast::ExpressionList{})),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void tint_symbol(RWByteAddressBuffer buffer, uint offset, float2x3 value) {
buffer.Store3((offset + 0u), asuint(value[0u]));
@@ -345,419 +337,404 @@ void tint_symbol(RWByteAddressBuffer buffer, uint offset, float2x3 value) {
}
void main() {
- tint_symbol(data, 16u, float2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f));
+ tint_symbol(data, 16u, float2x3((0.0f).xxx, (0.0f).xxx));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
-TEST_F(HlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_Matrix_Single_Element) {
- // struct Data {
- // z : f32;
- // a : mat4x3<f32>;
- // };
- // var<storage> data : Data;
- // data.a[2][1];
-
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.mat4x3<f32>()),
- });
-
- SetupFunction({
- Decl(
- Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(IndexAccessor(MemberAccessor("data", "a"), 2), 1))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_Matrix_Single_Element) {
+ // struct Data {
+ // z : f32;
+ // a : mat4x3<f32>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2i][1i];
+
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.mat4x3<f32>()),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(IndexAccessor(MemberAccessor("data", "a"), 2_i), 1_i))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
float x = asfloat(data.Load(52u));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor,
EmitExpression_IndexAccessor_StorageBuffer_Load_Int_FromArray) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[2];
+ // struct Data {
+ // a : array<i32, 5>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2];
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor("data", "a"), 2))),
- });
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor("data", "a"), 2_i))),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
int x = asint(data.Load(12u));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor,
EmitExpression_IndexAccessor_StorageBuffer_Load_Int_FromArray_ExprIdx) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[(2 + 4) - 3];
+ // struct Data {
+ // a : array<i32, 5>;
+ // };
+ // var<storage> data : Data;
+ // data.a[(2i + 4i) - 3i];
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor("data", "a"),
- Sub(Add(2, Expr(4)), Expr(3))))),
- });
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor("data", "a"), Sub(Add(2_i, Expr(4_i)), Expr(3_i))))),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
int x = asint(data.Load((4u + (4u * uint(((2 + 4) - 3))))));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_ToArray) {
- // struct Data {
- // a : array<i32, 5>;
- // };
- // var<storage> data : Data;
- // data.a[2] = 2;
+ // struct Data {
+ // a : array<i32, 5>;
+ // };
+ // var<storage> data : Data;
+ // data.a[2] = 2;
- SetupStorageBuffer({
- Member("z", ty.f32()),
- Member("a", ty.array<i32, 5>(4)),
- });
+ SetupStorageBuffer({
+ Member("z", ty.f32()),
+ Member("a", ty.array<i32, 5>(4)),
+ });
- SetupFunction({
- Assign(IndexAccessor(MemberAccessor("data", "a"), 2), 2),
- });
+ SetupFunction({
+ Assign(IndexAccessor(MemberAccessor("data", "a"), 2_i), 2_i),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
data.Store(12u, asuint(2));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var(
- "x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2), "b"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
float3 x = asfloat(data.Load3(80u));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(HlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_MultiLevel_Swizzle) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.xy
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "xy"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel_Swizzle) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b.xy
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"), "xy"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
float2 x = asfloat(data.Load3(80u)).xy;
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor,
StorageBuffer_Load_MultiLevel_Swizzle_SingleLetter) { // NOLINT
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.g
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var("x", nullptr, ast::StorageClass::kNone,
- MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "g"))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b.g
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ MemberAccessor(
+ MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"), "g"))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
float x = asfloat(data.Load(84u));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(HlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Load_MultiLevel_Index) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b[1]
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Decl(Var(
- "x", nullptr, ast::StorageClass::kNone,
- IndexAccessor(MemberAccessor(
- IndexAccessor(MemberAccessor("data", "c"), 2), "b"),
- 1))),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Load_MultiLevel_Index) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b[1]
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Decl(Var("x", nullptr, ast::StorageClass::kNone,
+ IndexAccessor(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ 1_i))),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
float x = asfloat(data.Load(84u));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_MultiLevel) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b = vec3<f32>(1.f, 2.f, 3.f);
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<f32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Assign(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2), "b"),
- vec3<f32>(1.f, 2.f, 3.f)),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b = vec3<f32>(1_f, 2_f, 3_f);
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<f32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Assign(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ vec3<f32>(1_f, 2_f, 3_f)),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
data.Store3(80u, asuint(float3(1.0f, 2.0f, 3.0f)));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
-}
-
-TEST_F(HlslGeneratorImplTest_MemberAccessor,
- StorageBuffer_Store_Swizzle_SingleLetter) {
- // struct Inner {
- // a : vec3<i32>;
- // b : vec3<f32>;
- // };
- // struct Data {
- // var c : array<Inner, 4>;
- // };
- //
- // var<storage> data : Pre;
- // data.c[2].b.y = 1.f;
-
- auto* inner = Structure("Inner", {
- Member("a", ty.vec3<i32>()),
- Member("b", ty.vec3<f32>()),
- });
-
- SetupStorageBuffer({
- Member("c", ty.array(ty.Of(inner), 4, 32)),
- });
-
- SetupFunction({
- Assign(MemberAccessor(
- MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2),
- "b"),
- "y"),
- Expr(1.f)),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- auto* expected =
- R"(RWByteAddressBuffer data : register(u0, space1);
+ EXPECT_EQ(gen.result(), expected);
+}
+
+TEST_F(HlslGeneratorImplTest_MemberAccessor, StorageBuffer_Store_Swizzle_SingleLetter) {
+ // struct Inner {
+ // a : vec3<i32>;
+ // b : vec3<f32>;
+ // };
+ // struct Data {
+ // var c : array<Inner, 4u>;
+ // };
+ //
+ // var<storage> data : Pre;
+ // data.c[2].b.y = 1.f;
+
+ auto* inner = Structure("Inner", {
+ Member("a", ty.vec3<i32>()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ SetupStorageBuffer({
+ Member("c", ty.array(ty.Of(inner), 4_u, 32)),
+ });
+
+ SetupFunction({
+ Assign(MemberAccessor(MemberAccessor(IndexAccessor(MemberAccessor("data", "c"), 2_i), "b"),
+ "y"),
+ Expr(1_f)),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ auto* expected =
+ R"(RWByteAddressBuffer data : register(u0, space1);
void main() {
data.Store(84u, asuint(1.0f));
return;
}
)";
- EXPECT_EQ(gen.result(), expected);
+ EXPECT_EQ(gen.result(), expected);
}
TEST_F(HlslGeneratorImplTest_MemberAccessor, Swizzle_xyz) {
- auto* var = Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone,
- vec4<f32>(1.f, 2.f, 3.f, 4.f));
- auto* expr = MemberAccessor("my_vec", "xyz");
- WrapInFunction(var, expr);
+ auto* var =
+ Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone, vec4<f32>(1_f, 2_f, 3_f, 4_f));
+ auto* expr = MemberAccessor("my_vec", "xyz");
+ WrapInFunction(var, expr);
- GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("my_vec.xyz"));
+ GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("my_vec.xyz"));
}
TEST_F(HlslGeneratorImplTest_MemberAccessor, Swizzle_gbr) {
- auto* var = Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone,
- vec4<f32>(1.f, 2.f, 3.f, 4.f));
- auto* expr = MemberAccessor("my_vec", "gbr");
- WrapInFunction(var, expr);
-
- GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("my_vec.gbr"));
+ auto* var =
+ Var("my_vec", ty.vec4<f32>(), ast::StorageClass::kNone, vec4<f32>(1_f, 2_f, 3_f, 4_f));
+ auto* expr = MemberAccessor("my_vec", "gbr");
+ WrapInFunction(var, expr);
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("my_vec.gbr"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_module_constant_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_module_constant_test.cc
index 30b0b0c2273..2c967446239 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_module_constant_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_module_constant_test.cc
@@ -15,31 +15,33 @@
#include "src/tint/ast/id_attribute.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_ModuleConstant = TestHelper;
TEST_F(HlslGeneratorImplTest_ModuleConstant, Emit_ModuleConstant) {
- auto* var = Const("pos", ty.array<f32, 3>(), array<f32, 3>(1.f, 2.f, 3.f));
- WrapInFunction(Decl(var));
+ auto* var = Let("pos", ty.array<f32, 3>(), array<f32, 3>(1_f, 2_f, 3_f));
+ WrapInFunction(Decl(var));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), "static const float pos[3] = {1.0f, 2.0f, 3.0f};\n");
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), "static const float pos[3] = {1.0f, 2.0f, 3.0f};\n");
}
TEST_F(HlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant) {
- auto* var = Override("pos", ty.f32(), Expr(3.0f),
- ast::AttributeList{
- Id(23),
- });
+ auto* var = Override("pos", ty.f32(), Expr(3_f),
+ ast::AttributeList{
+ Id(23),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
#define WGSL_SPEC_CONSTANT_23 3.0f
#endif
static const float pos = WGSL_SPEC_CONSTANT_23;
@@ -47,15 +49,15 @@ static const float pos = WGSL_SPEC_CONSTANT_23;
}
TEST_F(HlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant_NoConstructor) {
- auto* var = Override("pos", ty.f32(), nullptr,
- ast::AttributeList{
- Id(23),
- });
+ auto* var = Override("pos", ty.f32(), nullptr,
+ ast::AttributeList{
+ Id(23),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_23
#error spec constant required for constant id 23
#endif
static const float pos = WGSL_SPEC_CONSTANT_23;
@@ -63,17 +65,17 @@ static const float pos = WGSL_SPEC_CONSTANT_23;
}
TEST_F(HlslGeneratorImplTest_ModuleConstant, Emit_SpecConstant_NoId) {
- auto* a = Override("a", ty.f32(), Expr(3.0f),
- ast::AttributeList{
- Id(0),
- });
- auto* b = Override("b", ty.f32(), Expr(2.0f));
+ auto* a = Override("a", ty.f32(), Expr(3_f),
+ ast::AttributeList{
+ Id(0),
+ });
+ auto* b = Override("b", ty.f32(), Expr(2_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(a)) << gen.error();
- ASSERT_TRUE(gen.EmitProgramConstVariable(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_0
+ ASSERT_TRUE(gen.EmitProgramConstVariable(a)) << gen.error();
+ ASSERT_TRUE(gen.EmitProgramConstVariable(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#ifndef WGSL_SPEC_CONSTANT_0
#define WGSL_SPEC_CONSTANT_0 3.0f
#endif
static const float a = WGSL_SPEC_CONSTANT_0;
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_return_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_return_test.cc
index 3f839fc1bf8..1813645206b 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_return_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_return_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Return = TestHelper;
TEST_F(HlslGeneratorImplTest_Return, Emit_Return) {
- auto* r = Return();
- WrapInFunction(r);
+ auto* r = Return();
+ WrapInFunction(r);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return;\n");
}
TEST_F(HlslGeneratorImplTest_Return, Emit_ReturnWithValue) {
- auto* r = Return(123);
- Func("f", {}, ty.i32(), {r});
+ auto* r = Return(123_i);
+ Func("f", {}, ty.i32(), {r});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return 123;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return 123;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_sanitizer_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_sanitizer_test.cc
index cb31f4d1d3c..66df5352483 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_sanitizer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_sanitizer_test.cc
@@ -17,34 +17,36 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslSanitizerTest = TestHelper;
TEST_F(HlslSanitizerTest, Call_ArrayLength) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
+ auto got = gen.result();
+ auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
void a_func() {
uint tint_symbol_1 = 0u;
@@ -54,35 +56,35 @@ void a_func() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, Call_ArrayLength_OtherMembersInStruct) {
- auto* s = Structure("my_struct", {
- Member(0, "z", ty.f32()),
- Member(4, "a", ty.array<f32>(4)),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {
+ Member(0, "z", ty.f32()),
+ Member(4, "a", ty.array<f32>(4)),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
+ auto got = gen.result();
+ auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
void a_func() {
uint tint_symbol_1 = 0u;
@@ -93,37 +95,36 @@ void a_func() {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, Call_ArrayLength_ViaLets) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ auto* p = Let("p", nullptr, AddressOf("b"));
+ auto* p2 = Let("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(p),
+ Decl(p2),
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone, Call("arrayLength", p2))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* p = Const("p", nullptr, AddressOf("b"));
- auto* p2 = Const("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
+ GeneratorImpl& gen = SanitizeAndBuild();
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(p),
- Decl(p2),
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", p2))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
+ auto got = gen.result();
+ auto* expect = R"(ByteAddressBuffer b : register(t1, space2);
void a_func() {
uint tint_symbol_1 = 0u;
@@ -134,43 +135,41 @@ void a_func() {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, Call_ArrayLength_ArrayLengthFromUniform) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
- Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+ Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(2),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
+ Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(2),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var(
- "len", ty.u32(), ast::StorageClass::kNone,
- Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
- Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Options options;
- options.array_length_from_uniform.ubo_binding = {3, 4};
- options.array_length_from_uniform.bindpoint_to_size_index.emplace(
- sem::BindingPoint{2, 2}, 7u);
- GeneratorImpl& gen = SanitizeAndBuild(options);
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(cbuffer cbuffer_tint_symbol_1 : register(b4, space3) {
+ Options options;
+ options.array_length_from_uniform.ubo_binding = {3, 4};
+ options.array_length_from_uniform.bindpoint_to_size_index.emplace(sem::BindingPoint{2, 2}, 7u);
+ GeneratorImpl& gen = SanitizeAndBuild(options);
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(cbuffer cbuffer_tint_symbol_1 : register(b4, space3) {
uint4 tint_symbol_1[2];
};
ByteAddressBuffer b : register(t1, space2);
@@ -184,61 +183,60 @@ void a_func() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, PromoteArrayInitializerToConstVar) {
- auto* array_init = array<i32, 4>(1, 2, 3, 4);
- auto* array_index = IndexAccessor(array_init, 3);
- auto* pos = Var("pos", ty.i32(), ast::StorageClass::kNone, array_index);
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(pos),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* array_init = array<i32, 4>(1_i, 2_i, 3_i, 4_i);
+ auto* array_index = IndexAccessor(array_init, 3_i);
+ auto* pos = Var("pos", ty.i32(), ast::StorageClass::kNone, array_index);
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(pos),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(void main() {
+ auto got = gen.result();
+ auto* expect = R"(void main() {
const int tint_symbol[4] = {1, 2, 3, 4};
int pos = tint_symbol[3];
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, PromoteStructInitializerToConstVar) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.vec3<f32>()),
- Member("c", ty.i32()),
- });
- auto* struct_init = Construct(ty.Of(str), 1, vec3<f32>(2.f, 3.f, 4.f), 4);
- auto* struct_access = MemberAccessor(struct_init, "b");
- auto* pos =
- Var("pos", ty.vec3<f32>(), ast::StorageClass::kNone, struct_access);
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(pos),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(struct S {
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.vec3<f32>()),
+ Member("c", ty.i32()),
+ });
+ auto* struct_init = Construct(ty.Of(str), 1_i, vec3<f32>(2_f, 3_f, 4_f), 4_i);
+ auto* struct_access = MemberAccessor(struct_init, "b");
+ auto* pos = Var("pos", ty.vec3<f32>(), ast::StorageClass::kNone, struct_access);
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(pos),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(struct S {
int a;
float3 b;
int c;
@@ -250,85 +248,80 @@ void main() {
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, InlinePtrLetsBasic) {
- // var v : i32;
- // let p : ptr<function, i32> = &v;
- // let x : i32 = *p;
- auto* v = Var("v", ty.i32());
- auto* p =
- Const("p", ty.pointer<i32>(ast::StorageClass::kFunction), AddressOf(v));
- auto* x = Var("x", ty.i32(), ast::StorageClass::kNone, Deref(p));
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(v),
- Decl(p),
- Decl(x),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(void main() {
+ // var v : i32;
+ // let p : ptr<function, i32> = &v;
+ // let x : i32 = *p;
+ auto* v = Var("v", ty.i32());
+ auto* p = Let("p", ty.pointer<i32>(ast::StorageClass::kFunction), AddressOf(v));
+ auto* x = Var("x", ty.i32(), ast::StorageClass::kNone, Deref(p));
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(v),
+ Decl(p),
+ Decl(x),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(void main() {
int v = 0;
int x = v;
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(HlslSanitizerTest, InlinePtrLetsComplexChain) {
- // var a : array<mat4x4<f32>, 4>;
- // let ap : ptr<function, array<mat4x4<f32>, 4>> = &a;
- // let mp : ptr<function, mat4x4<f32>> = &(*ap)[3];
- // let vp : ptr<function, vec4<f32>> = &(*mp)[2];
- // let v : vec4<f32> = *vp;
- auto* a = Var("a", ty.array(ty.mat4x4<f32>(), 4));
- auto* ap = Const(
- "ap",
- ty.pointer(ty.array(ty.mat4x4<f32>(), 4), ast::StorageClass::kFunction),
- AddressOf(a));
- auto* mp =
- Const("mp", ty.pointer(ty.mat4x4<f32>(), ast::StorageClass::kFunction),
- AddressOf(IndexAccessor(Deref(ap), 3)));
- auto* vp =
- Const("vp", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction),
- AddressOf(IndexAccessor(Deref(mp), 2)));
- auto* v = Var("v", ty.vec4<f32>(), ast::StorageClass::kNone, Deref(vp));
-
- Func("main", ast::VariableList{}, ty.void_(),
- {
- Decl(a),
- Decl(ap),
- Decl(mp),
- Decl(vp),
- Decl(v),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(void main() {
+ // var a : array<mat4x4<f32>, 4u>;
+ // let ap : ptr<function, array<mat4x4<f32>, 4u>> = &a;
+ // let mp : ptr<function, mat4x4<f32>> = &(*ap)[3i];
+ // let vp : ptr<function, vec4<f32>> = &(*mp)[2i];
+ // let v : vec4<f32> = *vp;
+ auto* a = Var("a", ty.array(ty.mat4x4<f32>(), 4_u));
+ auto* ap = Let("ap", ty.pointer(ty.array(ty.mat4x4<f32>(), 4_u), ast::StorageClass::kFunction),
+ AddressOf(a));
+ auto* mp = Let("mp", ty.pointer(ty.mat4x4<f32>(), ast::StorageClass::kFunction),
+ AddressOf(IndexAccessor(Deref(ap), 3_i)));
+ auto* vp = Let("vp", ty.pointer(ty.vec4<f32>(), ast::StorageClass::kFunction),
+ AddressOf(IndexAccessor(Deref(mp), 2_i)));
+ auto* v = Var("v", ty.vec4<f32>(), ast::StorageClass::kNone, Deref(vp));
+
+ Func("main", ast::VariableList{}, ty.void_(),
+ {
+ Decl(a),
+ Decl(ap),
+ Decl(mp),
+ Decl(vp),
+ Decl(v),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(void main() {
float4x4 a[4] = (float4x4[4])0;
float4 v = a[3][2];
return;
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_switch_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_switch_test.cc
index 792b64be6d8..3ef1a7be88d 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_switch_test.cc
@@ -14,25 +14,27 @@
#include "src/tint/writer/hlsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
using HlslGeneratorImplTest_Switch = TestHelper;
TEST_F(HlslGeneratorImplTest_Switch, Emit_Switch) {
- Global("cond", ty.i32(), ast::StorageClass::kPrivate);
- auto* s = Switch( //
- Expr("cond"), //
- Case(Expr(5), Block(Break())), //
- DefaultCase());
- WrapInFunction(s);
+ Global("cond", ty.i32(), ast::StorageClass::kPrivate);
+ auto* s = Switch( //
+ Expr("cond"), //
+ Case(Expr(5_i), Block(Break())), //
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"( switch(cond) {
+ ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( switch(cond) {
case 5: {
break;
}
@@ -44,19 +46,19 @@ TEST_F(HlslGeneratorImplTest_Switch, Emit_Switch) {
}
TEST_F(HlslGeneratorImplTest_Switch, Emit_Switch_OnlyDefaultCase) {
- Global("cond", ty.i32(), ast::StorageClass::kPrivate);
- Global("a", ty.i32(), ast::StorageClass::kPrivate);
- auto* s = Switch( //
- Expr("cond"), //
- DefaultCase(Block(Assign(Expr("a"), Expr(42)))));
- WrapInFunction(s);
+ Global("cond", ty.i32(), ast::StorageClass::kPrivate);
+ Global("a", ty.i32(), ast::StorageClass::kPrivate);
+ auto* s = Switch( //
+ Expr("cond"), //
+ DefaultCase(Block(Assign(Expr("a"), Expr(42_i)))));
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"( cond;
+ ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( cond;
do {
a = 42;
} while (false);
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_test.cc
index ea479e46aa3..a1e0edb33bf 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_test.cc
@@ -19,50 +19,55 @@ namespace {
using HlslGeneratorImplTest = TestHelper;
+TEST_F(HlslGeneratorImplTest, InvalidProgram) {
+ Diagnostics().add_error(diag::System::Writer, "make the program invalid");
+ ASSERT_FALSE(IsValid());
+ auto program = std::make_unique<Program>(std::move(*this));
+ ASSERT_FALSE(program->IsValid());
+ auto result = Generate(program.get(), Options{});
+ EXPECT_EQ(result.error, "input program is not valid");
+}
+
TEST_F(HlslGeneratorImplTest, Generate) {
- Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(void my_func() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(void my_func() {
}
)");
}
struct HlslBuiltinData {
- ast::Builtin builtin;
- const char* attribute_name;
+ ast::Builtin builtin;
+ const char* attribute_name;
};
inline std::ostream& operator<<(std::ostream& out, HlslBuiltinData data) {
- out << data.builtin;
- return out;
+ out << data.builtin;
+ return out;
}
using HlslBuiltinConversionTest = TestParamHelper<HlslBuiltinData>;
TEST_P(HlslBuiltinConversionTest, Emit) {
- auto params = GetParam();
- GeneratorImpl& gen = Build();
+ auto params = GetParam();
+ GeneratorImpl& gen = Build();
- EXPECT_EQ(gen.builtin_to_attribute(params.builtin),
- std::string(params.attribute_name));
+ EXPECT_EQ(gen.builtin_to_attribute(params.builtin), std::string(params.attribute_name));
}
INSTANTIATE_TEST_SUITE_P(
HlslGeneratorImplTest,
HlslBuiltinConversionTest,
- testing::Values(
- HlslBuiltinData{ast::Builtin::kPosition, "SV_Position"},
- HlslBuiltinData{ast::Builtin::kVertexIndex, "SV_VertexID"},
- HlslBuiltinData{ast::Builtin::kInstanceIndex, "SV_InstanceID"},
- HlslBuiltinData{ast::Builtin::kFrontFacing, "SV_IsFrontFace"},
- HlslBuiltinData{ast::Builtin::kFragDepth, "SV_Depth"},
- HlslBuiltinData{ast::Builtin::kLocalInvocationId, "SV_GroupThreadID"},
- HlslBuiltinData{ast::Builtin::kLocalInvocationIndex, "SV_GroupIndex"},
- HlslBuiltinData{ast::Builtin::kGlobalInvocationId,
- "SV_DispatchThreadID"},
- HlslBuiltinData{ast::Builtin::kWorkgroupId, "SV_GroupID"},
- HlslBuiltinData{ast::Builtin::kSampleIndex, "SV_SampleIndex"},
- HlslBuiltinData{ast::Builtin::kSampleMask, "SV_Coverage"}));
+ testing::Values(HlslBuiltinData{ast::Builtin::kPosition, "SV_Position"},
+ HlslBuiltinData{ast::Builtin::kVertexIndex, "SV_VertexID"},
+ HlslBuiltinData{ast::Builtin::kInstanceIndex, "SV_InstanceID"},
+ HlslBuiltinData{ast::Builtin::kFrontFacing, "SV_IsFrontFace"},
+ HlslBuiltinData{ast::Builtin::kFragDepth, "SV_Depth"},
+ HlslBuiltinData{ast::Builtin::kLocalInvocationId, "SV_GroupThreadID"},
+ HlslBuiltinData{ast::Builtin::kLocalInvocationIndex, "SV_GroupIndex"},
+ HlslBuiltinData{ast::Builtin::kGlobalInvocationId, "SV_DispatchThreadID"},
+ HlslBuiltinData{ast::Builtin::kWorkgroupId, "SV_GroupID"},
+ HlslBuiltinData{ast::Builtin::kSampleIndex, "SV_SampleIndex"},
+ HlslBuiltinData{ast::Builtin::kSampleMask, "SV_Coverage"}));
} // namespace
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_type_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_type_test.cc
index bea99cefe20..b99f06eddcc 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_type_test.cc
@@ -15,135 +15,133 @@
#include "gmock/gmock.h"
#include "src/tint/ast/call_statement.h"
#include "src/tint/ast/stage_attribute.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
-using ::testing::HasSubstr;
-
using HlslGeneratorImplTest_Type = TestHelper;
TEST_F(HlslGeneratorImplTest_Type, EmitType_Array) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[4]");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_ArrayOfArray) {
- auto* arr = ty.array(ty.array<bool, 4>(), 5);
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array(ty.array<bool, 4>(), 5_u);
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[5][4]");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_ArrayOfArrayOfArray) {
- auto* arr = ty.array(ty.array(ty.array<bool, 4>(), 5), 6);
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array(ty.array(ty.array<bool, 4>(), 5_u), 6_u);
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, "ary"))
- << gen.error();
- EXPECT_EQ(out.str(), "bool ary[6][5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, "ary"))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[6][5][4]");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Array_WithoutName) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "bool[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), ast::StorageClass::kNone,
+ ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool[4]");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Bool) {
- auto* bool_ = create<sem::Bool>();
+ auto* bool_ = create<sem::Bool>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, bool_, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "bool");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, bool_, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "bool");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_F32) {
- auto* f32 = create<sem::F32>();
+ auto* f32 = create<sem::F32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, f32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "float");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, f32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "float");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_I32) {
- auto* i32 = create<sem::I32>();
+ auto* i32 = create<sem::I32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, i32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "int");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, i32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "int");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Matrix) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
- auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, mat2x3, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "float2x3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, mat2x3, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "float2x3");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_StructDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
int a;
float b;
};
@@ -151,50 +149,49 @@ TEST_F(HlslGeneratorImplTest_Type, EmitType_StructDecl) {
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_StructDecl_OmittedIfStorageBuffer) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), "RWByteAddressBuffer g : register(u0, space0);\n");
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), "RWByteAddressBuffer g : register(u0, space0);\n");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Struct) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sem_s, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "S");
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sem_s, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "S");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Struct_NameCollision) {
- auto* s = Structure("S", {
- Member("double", ty.i32()),
- Member("float", ty.f32()),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("S", {
+ Member("double", ty.i32()),
+ Member("float", ty.f32()),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(struct S {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(struct S {
int tint_symbol;
float tint_symbol_1;
};
@@ -202,18 +199,18 @@ TEST_F(HlslGeneratorImplTest_Type, EmitType_Struct_NameCollision) {
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Struct_WithOffsetAttributes) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberOffset(0)}),
- Member("b", ty.f32(), {MemberOffset(8)}),
- });
- Global("g", ty.Of(s), ast::StorageClass::kPrivate);
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberOffset(0)}),
+ Member("b", ty.f32(), {MemberOffset(8)}),
+ });
+ Global("g", ty.Of(s), ast::StorageClass::kPrivate);
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
int a;
float b;
};
@@ -221,347 +218,323 @@ TEST_F(HlslGeneratorImplTest_Type, EmitType_Struct_WithOffsetAttributes) {
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_U32) {
- auto* u32 = create<sem::U32>();
+ auto* u32 = create<sem::U32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, u32, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "uint");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, u32, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "uint");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Vector) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, vec3, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "float3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, vec3, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "float3");
}
TEST_F(HlslGeneratorImplTest_Type, EmitType_Void) {
- auto* void_ = create<sem::Void>();
+ auto* void_ = create<sem::Void>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, void_, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "void");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, void_, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "void");
}
TEST_F(HlslGeneratorImplTest_Type, EmitSampler) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "SamplerState");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "SamplerState");
}
TEST_F(HlslGeneratorImplTest_Type, EmitSamplerComparison) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "SamplerComparisonState");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "SamplerComparisonState");
}
struct HlslDepthTextureData {
- ast::TextureDimension dim;
- std::string result;
+ ast::TextureDimension dim;
+ std::string result;
};
inline std::ostream& operator<<(std::ostream& out, HlslDepthTextureData data) {
- out << data.dim;
- return out;
+ out << data.dim;
+ return out;
}
using HlslDepthTexturesTest = TestParamHelper<HlslDepthTextureData>;
TEST_P(HlslDepthTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* t = ty.depth_texture(params.dim);
+ auto* t = ty.depth_texture(params.dim);
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
INSTANTIATE_TEST_SUITE_P(
HlslGeneratorImplTest_Type,
HlslDepthTexturesTest,
- testing::Values(
- HlslDepthTextureData{ast::TextureDimension::k2d,
- "Texture2D tex : register(t1, space2);"},
- HlslDepthTextureData{ast::TextureDimension::k2dArray,
- "Texture2DArray tex : register(t1, space2);"},
- HlslDepthTextureData{ast::TextureDimension::kCube,
- "TextureCube tex : register(t1, space2);"},
- HlslDepthTextureData{ast::TextureDimension::kCubeArray,
- "TextureCubeArray tex : register(t1, space2);"}));
+ testing::Values(HlslDepthTextureData{ast::TextureDimension::k2d,
+ "Texture2D tex : register(t1, space2);"},
+ HlslDepthTextureData{ast::TextureDimension::k2dArray,
+ "Texture2DArray tex : register(t1, space2);"},
+ HlslDepthTextureData{ast::TextureDimension::kCube,
+ "TextureCube tex : register(t1, space2);"},
+ HlslDepthTextureData{ast::TextureDimension::kCubeArray,
+ "TextureCubeArray tex : register(t1, space2);"}));
using HlslDepthMultisampledTexturesTest = TestHelper;
TEST_F(HlslDepthMultisampledTexturesTest, Emit) {
- auto* t = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
+ auto* t = ty.depth_multisampled_texture(ast::TextureDimension::k2d);
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("Texture2DMS<float4> tex : register(t1, space2);"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("Texture2DMS<float4> tex : register(t1, space2);"));
}
enum class TextureDataType { F32, U32, I32 };
struct HlslSampledTextureData {
- ast::TextureDimension dim;
- TextureDataType datatype;
- std::string result;
+ ast::TextureDimension dim;
+ TextureDataType datatype;
+ std::string result;
};
-inline std::ostream& operator<<(std::ostream& out,
- HlslSampledTextureData data) {
- out << data.dim;
- return out;
+inline std::ostream& operator<<(std::ostream& out, HlslSampledTextureData data) {
+ out << data.dim;
+ return out;
}
using HlslSampledTexturesTest = TestParamHelper<HlslSampledTextureData>;
TEST_P(HlslSampledTexturesTest, Emit) {
- auto params = GetParam();
-
- const ast::Type* datatype = nullptr;
- switch (params.datatype) {
- case TextureDataType::F32:
- datatype = ty.f32();
- break;
- case TextureDataType::U32:
- datatype = ty.u32();
- break;
- case TextureDataType::I32:
- datatype = ty.i32();
- break;
- }
- auto* t = ty.sampled_texture(params.dim, datatype);
-
- Global("tex", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ auto params = GetParam();
+
+ const ast::Type* datatype = nullptr;
+ switch (params.datatype) {
+ case TextureDataType::F32:
+ datatype = ty.f32();
+ break;
+ case TextureDataType::U32:
+ datatype = ty.u32();
+ break;
+ case TextureDataType::I32:
+ datatype = ty.i32();
+ break;
+ }
+ auto* t = ty.sampled_texture(params.dim, datatype);
+
+ Global("tex", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
-INSTANTIATE_TEST_SUITE_P(
- HlslGeneratorImplTest_Type,
- HlslSampledTexturesTest,
- testing::Values(
- HlslSampledTextureData{
- ast::TextureDimension::k1d,
- TextureDataType::F32,
- "Texture1D<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2d,
- TextureDataType::F32,
- "Texture2D<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2dArray,
- TextureDataType::F32,
- "Texture2DArray<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k3d,
- TextureDataType::F32,
- "Texture3D<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCube,
- TextureDataType::F32,
- "TextureCube<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCubeArray,
- TextureDataType::F32,
- "TextureCubeArray<float4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k1d,
- TextureDataType::U32,
- "Texture1D<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2d,
- TextureDataType::U32,
- "Texture2D<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2dArray,
- TextureDataType::U32,
- "Texture2DArray<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k3d,
- TextureDataType::U32,
- "Texture3D<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCube,
- TextureDataType::U32,
- "TextureCube<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCubeArray,
- TextureDataType::U32,
- "TextureCubeArray<uint4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k1d,
- TextureDataType::I32,
- "Texture1D<int4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2d,
- TextureDataType::I32,
- "Texture2D<int4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k2dArray,
- TextureDataType::I32,
- "Texture2DArray<int4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::k3d,
- TextureDataType::I32,
- "Texture3D<int4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCube,
- TextureDataType::I32,
- "TextureCube<int4> tex : register(t1, space2);",
- },
- HlslSampledTextureData{
- ast::TextureDimension::kCubeArray,
- TextureDataType::I32,
- "TextureCubeArray<int4> tex : register(t1, space2);",
- }));
+INSTANTIATE_TEST_SUITE_P(HlslGeneratorImplTest_Type,
+ HlslSampledTexturesTest,
+ testing::Values(
+ HlslSampledTextureData{
+ ast::TextureDimension::k1d,
+ TextureDataType::F32,
+ "Texture1D<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2d,
+ TextureDataType::F32,
+ "Texture2D<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2dArray,
+ TextureDataType::F32,
+ "Texture2DArray<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k3d,
+ TextureDataType::F32,
+ "Texture3D<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCube,
+ TextureDataType::F32,
+ "TextureCube<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::F32,
+ "TextureCubeArray<float4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k1d,
+ TextureDataType::U32,
+ "Texture1D<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2d,
+ TextureDataType::U32,
+ "Texture2D<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2dArray,
+ TextureDataType::U32,
+ "Texture2DArray<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k3d,
+ TextureDataType::U32,
+ "Texture3D<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCube,
+ TextureDataType::U32,
+ "TextureCube<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::U32,
+ "TextureCubeArray<uint4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k1d,
+ TextureDataType::I32,
+ "Texture1D<int4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2d,
+ TextureDataType::I32,
+ "Texture2D<int4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k2dArray,
+ TextureDataType::I32,
+ "Texture2DArray<int4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::k3d,
+ TextureDataType::I32,
+ "Texture3D<int4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCube,
+ TextureDataType::I32,
+ "TextureCube<int4> tex : register(t1, space2);",
+ },
+ HlslSampledTextureData{
+ ast::TextureDimension::kCubeArray,
+ TextureDataType::I32,
+ "TextureCubeArray<int4> tex : register(t1, space2);",
+ }));
TEST_F(HlslGeneratorImplTest_Type, EmitMultisampledTexture) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, s, ast::StorageClass::kNone,
- ast::Access::kReadWrite, ""))
- << gen.error();
- EXPECT_EQ(out.str(), "Texture2DMS<float4>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, s, ast::StorageClass::kNone, ast::Access::kReadWrite, ""))
+ << gen.error();
+ EXPECT_EQ(out.str(), "Texture2DMS<float4>");
}
struct HlslStorageTextureData {
- ast::TextureDimension dim;
- ast::TexelFormat imgfmt;
- std::string result;
+ ast::TextureDimension dim;
+ ast::TexelFormat imgfmt;
+ std::string result;
};
-inline std::ostream& operator<<(std::ostream& out,
- HlslStorageTextureData data) {
- out << data.dim;
- return out;
+inline std::ostream& operator<<(std::ostream& out, HlslStorageTextureData data) {
+ out << data.dim;
+ return out;
}
using HlslStorageTexturesTest = TestParamHelper<HlslStorageTextureData>;
TEST_P(HlslStorageTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* t = ty.storage_texture(params.dim, params.imgfmt, ast::Access::kWrite);
+ auto* t = ty.storage_texture(params.dim, params.imgfmt, ast::Access::kWrite);
- Global("tex", t, ast::AttributeList{GroupAndBinding(2, 1)});
+ Global("tex", t, ast::AttributeList{GroupAndBinding(2, 1)});
- Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {CallStmt(Call("textureDimensions", "tex"))},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(params.result));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(params.result));
}
INSTANTIATE_TEST_SUITE_P(
HlslGeneratorImplTest_Type,
HlslStorageTexturesTest,
testing::Values(
- HlslStorageTextureData{
- ast::TextureDimension::k1d, ast::TexelFormat::kRgba8Unorm,
- "RWTexture1D<float4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k2d, ast::TexelFormat::kRgba16Float,
- "RWTexture2D<float4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Float,
- "RWTexture2DArray<float4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k3d, ast::TexelFormat::kRg32Float,
- "RWTexture3D<float4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Float,
- "RWTexture1D<float4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k2d, ast::TexelFormat::kRgba16Uint,
- "RWTexture2D<uint4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Uint,
- "RWTexture2DArray<uint4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k3d, ast::TexelFormat::kRg32Uint,
- "RWTexture3D<uint4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Uint,
- "RWTexture1D<uint4> tex : register(u1, space2);"},
- HlslStorageTextureData{ast::TextureDimension::k2d,
- ast::TexelFormat::kRgba16Sint,
+ HlslStorageTextureData{ast::TextureDimension::k1d, ast::TexelFormat::kRgba8Unorm,
+ "RWTexture1D<float4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k2d, ast::TexelFormat::kRgba16Float,
+ "RWTexture2D<float4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Float,
+ "RWTexture2DArray<float4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Float,
+ "RWTexture3D<float4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Float,
+ "RWTexture1D<float4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k2d, ast::TexelFormat::kRgba16Uint,
+ "RWTexture2D<uint4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Uint,
+ "RWTexture2DArray<uint4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Uint,
+ "RWTexture3D<uint4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Uint,
+ "RWTexture1D<uint4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k2d, ast::TexelFormat::kRgba16Sint,
"RWTexture2D<int4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Sint,
- "RWTexture2DArray<int4> tex : register(u1, space2);"},
- HlslStorageTextureData{ast::TextureDimension::k3d,
- ast::TexelFormat::kRg32Sint,
+ HlslStorageTextureData{ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Sint,
+ "RWTexture2DArray<int4> tex : register(u1, space2);"},
+ HlslStorageTextureData{ast::TextureDimension::k3d, ast::TexelFormat::kRg32Sint,
"RWTexture3D<int4> tex : register(u1, space2);"},
- HlslStorageTextureData{
- ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Sint,
- "RWTexture1D<int4> tex : register(u1, space2);"}));
+ HlslStorageTextureData{ast::TextureDimension::k1d, ast::TexelFormat::kRgba32Sint,
+ "RWTexture1D<int4> tex : register(u1, space2);"}));
} // namespace
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_unary_op_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_unary_op_test.cc
index 3217a0194b8..c9abf1f250e 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_unary_op_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_unary_op_test.cc
@@ -20,70 +20,65 @@ namespace {
using HlslUnaryOpTest = TestHelper;
TEST_F(HlslUnaryOpTest, AddressOf) {
- Global("expr", ty.f32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.f32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "expr");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "expr");
}
TEST_F(HlslUnaryOpTest, Complement) {
- Global("expr", ty.u32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.u32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "~(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "~(expr)");
}
TEST_F(HlslUnaryOpTest, Indirection) {
- Global("G", ty.f32(), ast::StorageClass::kPrivate);
- auto* p = Const(
- "expr", nullptr,
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
- WrapInFunction(p, op);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "expr");
+ Global("G", ty.f32(), ast::StorageClass::kPrivate);
+ auto* p =
+ Let("expr", nullptr, create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
+ WrapInFunction(p, op);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "expr");
}
TEST_F(HlslUnaryOpTest, Not) {
- Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
- auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "!(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "!(expr)");
}
TEST_F(HlslUnaryOpTest, Negation) {
- Global("expr", ty.i32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.i32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "-(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "-(expr)");
}
} // namespace
} // namespace tint::writer::hlsl
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_variable_decl_statement_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_variable_decl_statement_test.cc
index 1188aed88b5..5ed85f3a747 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_variable_decl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_variable_decl_statement_test.cc
@@ -24,98 +24,94 @@ using ::testing::HasSubstr;
using HlslGeneratorImplTest_VariableDecl = TestHelper;
TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement) {
- auto* var = Var("a", ty.f32());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.f32());
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
}
TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Const) {
- auto* var = Const("a", ty.f32(), Construct(ty.f32()));
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Let("a", ty.f32(), Construct(ty.f32()));
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " const float a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " const float a = 0.0f;\n");
}
TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Array) {
- auto* var = Var("a", ty.array<f32, 5>());
+ auto* var = Var("a", ty.array<f32, 5>());
- WrapInFunction(var, Expr("a"));
+ WrapInFunction(var, Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(" float a[5] = (float[5])0;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(" float a[5] = (float[5])0;\n"));
}
TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Private) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(" static float a = 0.0f;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(" static float a = 0.0f;\n"));
}
-TEST_F(HlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_Private) {
- Global("initializer", ty.f32(), ast::StorageClass::kPrivate);
- Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
+TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_Private) {
+ Global("initializer", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr(R"(float a = initializer;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr(R"(float a = initializer;
)"));
}
-TEST_F(HlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_ZeroVec) {
- auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, vec3<f32>());
+TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_ZeroVec) {
+ auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, vec3<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), R"(float3 a = float3(0.0f, 0.0f, 0.0f);
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float3 a = (0.0f).xxx;
)");
}
-TEST_F(HlslGeneratorImplTest_VariableDecl,
- Emit_VariableDeclStatement_Initializer_ZeroMat) {
- auto* var =
- Var("a", ty.mat2x3<f32>(), ast::StorageClass::kNone, mat2x3<f32>());
+TEST_F(HlslGeneratorImplTest_VariableDecl, Emit_VariableDeclStatement_Initializer_ZeroMat) {
+ auto* var = Var("a", ty.mat2x3<f32>(), ast::StorageClass::kNone, mat2x3<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(),
- R"(float2x3 a = float2x3(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"(float2x3 a = float2x3((0.0f).xxx, (0.0f).xxx);
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_workgroup_var_test.cc b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_workgroup_var_test.cc
index 641b8f18906..826a545b68c 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_workgroup_var_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/generator_impl_workgroup_var_test.cc
@@ -17,40 +17,43 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/writer/hlsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::hlsl {
namespace {
-using ::testing::HasSubstr;
using HlslGeneratorImplTest_WorkgroupVar = TestHelper;
TEST_F(HlslGeneratorImplTest_WorkgroupVar, Basic) {
- Global("wg", ty.f32(), ast::StorageClass::kWorkgroup);
+ Global("wg", ty.f32(), ast::StorageClass::kWorkgroup);
- Func("main", {}, ty.void_(), {Assign("wg", 1.2f)},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- GeneratorImpl& gen = Build();
+ Func("main", {}, ty.void_(), {Assign("wg", 1.2_f)},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("groupshared float wg;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("groupshared float wg;\n"));
}
TEST_F(HlslGeneratorImplTest_WorkgroupVar, Aliased) {
- auto* alias = Alias("F32", ty.f32());
+ auto* alias = Alias("F32", ty.f32());
- Global("wg", ty.Of(alias), ast::StorageClass::kWorkgroup);
+ Global("wg", ty.Of(alias), ast::StorageClass::kWorkgroup);
- Func("main", {}, ty.void_(), {Assign("wg", 1.2f)},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- GeneratorImpl& gen = Build();
+ Func("main", {}, ty.void_(), {Assign("wg", 1.2_f)},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("groupshared float wg;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("groupshared float wg;\n"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/hlsl/test_helper.h b/chromium/third_party/dawn/src/tint/writer/hlsl/test_helper.h
index 089ddb99061..8fd6bae9c6d 100644
--- a/chromium/third_party/dawn/src/tint/writer/hlsl/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/writer/hlsl/test_helper.h
@@ -30,80 +30,73 @@ namespace tint::writer::hlsl {
/// Helper class for testing
template <typename BODY>
class TestHelperBase : public BODY, public ProgramBuilder {
- public:
- TestHelperBase() = default;
- ~TestHelperBase() override = default;
+ public:
+ TestHelperBase() = default;
+ ~TestHelperBase() override = default;
- /// Builds the program and returns a GeneratorImpl from the program.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @return the built generator
- GeneratorImpl& Build() {
- if (gen_) {
- return *gen_;
+ /// Builds the program and returns a GeneratorImpl from the program.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @return the built generator
+ GeneratorImpl& Build() {
+ if (gen_) {
+ return *gen_;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
+ gen_ = std::make_unique<GeneratorImpl>(program.get());
+ return *gen_;
}
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
- gen_ = std::make_unique<GeneratorImpl>(program.get());
- return *gen_;
- }
- /// Builds the program, runs the program through the HLSL sanitizer
- /// and returns a GeneratorImpl from the sanitized program.
- /// @param options The HLSL generator options.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @return the built generator
- GeneratorImpl& SanitizeAndBuild(const Options& options = {}) {
- if (gen_) {
- return *gen_;
- }
- diag::Formatter formatter;
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << formatter.format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << formatter.format(program->Diagnostics());
- }();
+ /// Builds the program, runs the program through the HLSL sanitizer
+ /// and returns a GeneratorImpl from the sanitized program.
+ /// @param options The HLSL generator options.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @return the built generator
+ GeneratorImpl& SanitizeAndBuild(const Options& options = {}) {
+ if (gen_) {
+ return *gen_;
+ }
+ diag::Formatter formatter;
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << formatter.format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() { ASSERT_TRUE(program->IsValid()) << formatter.format(program->Diagnostics()); }();
- auto sanitized_result = Sanitize(program.get(), options);
- [&]() {
- ASSERT_TRUE(sanitized_result.program.IsValid())
- << formatter.format(sanitized_result.program.Diagnostics());
- }();
+ auto sanitized_result = Sanitize(program.get(), options);
+ [&]() {
+ ASSERT_TRUE(sanitized_result.program.IsValid())
+ << formatter.format(sanitized_result.program.Diagnostics());
+ }();
- transform::Manager transform_manager;
- transform::DataMap transform_data;
- transform_data.Add<transform::Renamer::Config>(
- transform::Renamer::Target::kHlslKeywords,
- /* preserve_unicode */ true);
- transform_manager.Add<tint::transform::Renamer>();
- auto result =
- transform_manager.Run(&sanitized_result.program, transform_data);
- [&]() {
- ASSERT_TRUE(result.program.IsValid())
- << formatter.format(result.program.Diagnostics());
- }();
- *program = std::move(result.program);
- gen_ = std::make_unique<GeneratorImpl>(program.get());
- return *gen_;
- }
+ transform::Manager transform_manager;
+ transform::DataMap transform_data;
+ transform_data.Add<transform::Renamer::Config>(transform::Renamer::Target::kHlslKeywords,
+ /* preserve_unicode */ true);
+ transform_manager.Add<tint::transform::Renamer>();
+ auto result = transform_manager.Run(&sanitized_result.program, transform_data);
+ [&]() {
+ ASSERT_TRUE(result.program.IsValid()) << formatter.format(result.program.Diagnostics());
+ }();
+ *program = std::move(result.program);
+ gen_ = std::make_unique<GeneratorImpl>(program.get());
+ return *gen_;
+ }
- /// The program built with a call to Build()
- std::unique_ptr<Program> program;
+ /// The program built with a call to Build()
+ std::unique_ptr<Program> program;
- private:
- std::unique_ptr<GeneratorImpl> gen_;
+ private:
+ std::unique_ptr<GeneratorImpl> gen_;
};
/// TestHelper the the base class for HLSL writer unit tests.
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator.cc
index ee33006fdab..5cd9ac879df 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator.cc
@@ -30,29 +30,32 @@ Result::~Result() = default;
Result::Result(const Result&) = default;
Result Generate(const Program* program, const Options& options) {
- Result result;
+ Result result;
+ if (!program->IsValid()) {
+ result.error = "input program is not valid";
+ return result;
+ }
+
+ // Sanitize the program.
+ auto sanitized_result = Sanitize(program, options);
+ if (!sanitized_result.program.IsValid()) {
+ result.success = false;
+ result.error = sanitized_result.program.Diagnostics().str();
+ return result;
+ }
+ result.needs_storage_buffer_sizes = sanitized_result.needs_storage_buffer_sizes;
+ result.used_array_length_from_uniform_indices =
+ std::move(sanitized_result.used_array_length_from_uniform_indices);
+
+ // Generate the MSL code.
+ auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program);
+ result.success = impl->Generate();
+ result.error = impl->error();
+ result.msl = impl->result();
+ result.has_invariant_attribute = impl->HasInvariant();
+ result.workgroup_allocations = impl->DynamicWorkgroupAllocations();
- // Sanitize the program.
- auto sanitized_result = Sanitize(program, options);
- if (!sanitized_result.program.IsValid()) {
- result.success = false;
- result.error = sanitized_result.program.Diagnostics().str();
return result;
- }
- result.needs_storage_buffer_sizes =
- sanitized_result.needs_storage_buffer_sizes;
- result.used_array_length_from_uniform_indices =
- std::move(sanitized_result.used_array_length_from_uniform_indices);
-
- // Generate the MSL code.
- auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program);
- result.success = impl->Generate();
- result.error = impl->error();
- result.msl = impl->result();
- result.has_invariant_attribute = impl->HasInvariant();
- result.workgroup_allocations = impl->DynamicWorkgroupAllocations();
-
- return result;
}
} // namespace tint::writer::msl
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator.h b/chromium/third_party/dawn/src/tint/writer/msl/generator.h
index 1415e6becbc..d4208cc262b 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator.h
@@ -28,84 +28,81 @@
namespace tint {
class Program;
} // namespace tint
-namespace tint::writer::msl {
-class GeneratorImpl;
-} // namespace tint::writer::msl
namespace tint::writer::msl {
/// Configuration options used for generating MSL.
struct Options {
- /// Constructor
- Options();
- /// Destructor
- ~Options();
- /// Copy constructor
- Options(const Options&);
- /// Copy assignment
- /// @returns this Options
- Options& operator=(const Options&);
-
- /// The index to use when generating a UBO to receive storage buffer sizes.
- /// Defaults to 30, which is the last valid buffer slot.
- uint32_t buffer_size_ubo_index = 30;
-
- /// The fixed sample mask to combine with fragment shader outputs.
- /// Defaults to 0xFFFFFFFF.
- uint32_t fixed_sample_mask = 0xFFFFFFFF;
-
- /// Set to `true` to generate a [[point_size]] attribute which is set to 1.0
- /// for all vertex shaders in the module.
- bool emit_vertex_point_size = false;
-
- /// Set to `true` to disable workgroup memory zero initialization
- bool disable_workgroup_init = false;
-
- /// Set to 'true' to generates binding mappings for external textures
- bool generate_external_texture_bindings = false;
-
- /// Options used to specify a mapping of binding points to indices into a UBO
- /// from which to load buffer sizes.
- ArrayLengthFromUniformOptions array_length_from_uniform = {};
-
- // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
- // struct members.
+ /// Constructor
+ Options();
+ /// Destructor
+ ~Options();
+ /// Copy constructor
+ Options(const Options&);
+ /// Copy assignment
+ /// @returns this Options
+ Options& operator=(const Options&);
+
+ /// The index to use when generating a UBO to receive storage buffer sizes.
+ /// Defaults to 30, which is the last valid buffer slot.
+ uint32_t buffer_size_ubo_index = 30;
+
+ /// The fixed sample mask to combine with fragment shader outputs.
+ /// Defaults to 0xFFFFFFFF.
+ uint32_t fixed_sample_mask = 0xFFFFFFFF;
+
+ /// Set to `true` to generate a [[point_size]] attribute which is set to 1.0
+ /// for all vertex shaders in the module.
+ bool emit_vertex_point_size = false;
+
+ /// Set to `true` to disable workgroup memory zero initialization
+ bool disable_workgroup_init = false;
+
+ /// Set to 'true' to generates binding mappings for external textures
+ bool generate_external_texture_bindings = false;
+
+ /// Options used to specify a mapping of binding points to indices into a UBO
+ /// from which to load buffer sizes.
+ ArrayLengthFromUniformOptions array_length_from_uniform = {};
+
+ // NOTE: Update src/tint/fuzzers/data_builder.h when adding or changing any
+ // struct members.
};
/// The result produced when generating MSL.
struct Result {
- /// Constructor
- Result();
+ /// Constructor
+ Result();
- /// Destructor
- ~Result();
+ /// Destructor
+ ~Result();
- /// Copy constructor
- Result(const Result&);
+ /// Copy constructor
+ Result(const Result&);
- /// True if generation was successful.
- bool success = false;
+ /// True if generation was successful.
+ bool success = false;
- /// The errors generated during code generation, if any.
- std::string error;
+ /// The errors generated during code generation, if any.
+ std::string error;
- /// The generated MSL.
- std::string msl = "";
+ /// The generated MSL.
+ std::string msl = "";
- /// True if the shader needs a UBO of buffer sizes.
- bool needs_storage_buffer_sizes = false;
+ /// True if the shader needs a UBO of buffer sizes.
+ bool needs_storage_buffer_sizes = false;
- /// True if the generated shader uses the invariant attribute.
- bool has_invariant_attribute = false;
+ /// True if the generated shader uses the invariant attribute.
+ bool has_invariant_attribute = false;
- /// A map from entry point name to a list of dynamic workgroup allocations.
- /// Each entry in the vector is the size of the workgroup allocation that
- /// should be created for that index.
- std::unordered_map<std::string, std::vector<uint32_t>> workgroup_allocations;
+ /// A map from entry point name to a list of dynamic workgroup allocations.
+ /// Each entry in the vector is the size of the workgroup allocation that
+ /// should be created for that index.
+ std::unordered_map<std::string, std::vector<uint32_t>> workgroup_allocations;
- /// Indices into the array_length_from_uniform binding that are statically
- /// used.
- std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
+ /// Indices into the array_length_from_uniform binding that are statically
+ /// used.
+ std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
};
/// Generate MSL for a program, according to a set of configuration options. The
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_bench.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_bench.cc
index c9d44407016..7feb1cfdb07 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_bench.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_bench.cc
@@ -20,18 +20,18 @@ namespace tint::writer::msl {
namespace {
void GenerateMSL(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadProgram(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& program = std::get<bench::ProgramAndFile>(res).program;
- for (auto _ : state) {
- auto res = Generate(&program, {});
- if (!res.error.empty()) {
- state.SkipWithError(res.error.c_str());
+ auto res = bench::LoadProgram(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& program = std::get<bench::ProgramAndFile>(res).program;
+ for (auto _ : state) {
+ auto res = Generate(&program, {});
+ if (!res.error.empty()) {
+ state.SkipWithError(res.error.c_str());
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(GenerateMSL);
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.cc
index 349ed1d4bac..3228e867d32 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.cc
@@ -30,37 +30,37 @@
#include "src/tint/ast/id_attribute.h"
#include "src/tint/ast/interpolate_attribute.h"
#include "src/tint/ast/module.h"
-#include "src/tint/ast/sint_literal_expression.h"
-#include "src/tint/ast/uint_literal_expression.h"
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/ast/void.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
-#include "src/tint/sem/bool_type.h"
+#include "src/tint/sem/atomic.h"
+#include "src/tint/sem/bool.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/f32_type.h"
+#include "src/tint/sem/constant.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/f32.h"
#include "src/tint/sem/function.h"
-#include "src/tint/sem/i32_type.h"
-#include "src/tint/sem/matrix_type.h"
+#include "src/tint/sem/i32.h"
+#include "src/tint/sem/matrix.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/pointer_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/pointer.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/type_constructor.h"
#include "src/tint/sem/type_conversion.h"
-#include "src/tint/sem/u32_type.h"
+#include "src/tint/sem/u32.h"
#include "src/tint/sem/variable.h"
-#include "src/tint/sem/vector_type.h"
-#include "src/tint/sem/void_type.h"
+#include "src/tint/sem/vector.h"
+#include "src/tint/sem/void.h"
#include "src/tint/transform/array_length_from_uniform.h"
#include "src/tint/transform/builtin_polyfill.h"
#include "src/tint/transform/canonicalize_entry_point_io.h"
+#include "src/tint/transform/disable_uniformity_analysis.h"
#include "src/tint/transform/expand_compound_assignment.h"
#include "src/tint/transform/manager.h"
#include "src/tint/transform/module_scope_var_to_entry_point_param.h"
@@ -83,34 +83,59 @@ namespace tint::writer::msl {
namespace {
bool last_is_break_or_fallthrough(const ast::BlockStatement* stmts) {
- return IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(stmts->Last());
+ return IsAnyOf<ast::BreakStatement, ast::FallthroughStatement>(stmts->Last());
+}
+
+void PrintF32(std::ostream& out, float value) {
+ // Note: Currently inf and nan should not be constructable, but this is implemented for the day
+ // we support them.
+ if (std::isinf(value)) {
+ out << (value >= 0 ? "INFINITY" : "-INFINITY");
+ } else if (std::isnan(value)) {
+ out << "NAN";
+ } else {
+ out << FloatToString(value) << "f";
+ }
+}
+
+void PrintI32(std::ostream& out, int32_t value) {
+ // MSL (and C++) parse `-2147483648` as a `long` because it parses unary minus and `2147483648`
+ // as separate tokens, and the latter doesn't fit into an (32-bit) `int`.
+ // WGSL, on the other hand, parses this as an `i32`.
+ // To avoid issues with `long` to `int` casts, emit `(-2147483647 - 1)` instead, which ensures
+ // the expression type is `int`.
+ if (auto int_min = std::numeric_limits<int32_t>::min(); value == int_min) {
+ out << "(" << int_min + 1 << " - 1)";
+ } else {
+ out << value;
+ }
}
class ScopedBitCast {
- public:
- ScopedBitCast(GeneratorImpl* generator,
- std::ostream& stream,
- const sem::Type* curr_type,
- const sem::Type* target_type)
- : s(stream) {
- auto* target_vec_type = target_type->As<sem::Vector>();
-
- // If we need to promote from scalar to vector, bitcast the scalar to the
- // vector element type.
- if (curr_type->is_scalar() && target_vec_type) {
- target_type = target_vec_type->type();
- }
-
- // Bit cast
- s << "as_type<";
- generator->EmitType(s, target_type, "");
- s << ">(";
- }
-
- ~ScopedBitCast() { s << ")"; }
-
- private:
- std::ostream& s;
+ public:
+ ScopedBitCast(GeneratorImpl* generator,
+ std::ostream& stream,
+ const sem::Type* curr_type,
+ const sem::Type* target_type)
+ : s(stream) {
+ auto* target_vec_type = target_type->As<sem::Vector>();
+
+ // If we need to promote from scalar to vector, bitcast the scalar to the
+ // vector element type.
+ if (curr_type->is_scalar() && target_vec_type) {
+ target_type = target_vec_type->type();
+ }
+
+ // Bit cast
+ s << "as_type<";
+ generator->EmitType(s, target_type, "");
+ s << ">(";
+ }
+
+ ~ScopedBitCast() { s << ")"; }
+
+ private:
+ std::ostream& s;
};
} // namespace
@@ -120,94 +145,90 @@ SanitizedResult::~SanitizedResult() = default;
SanitizedResult::SanitizedResult(SanitizedResult&&) = default;
SanitizedResult Sanitize(const Program* in, const Options& options) {
- transform::Manager manager;
- transform::DataMap data;
-
- { // Builtin polyfills
- transform::BuiltinPolyfill::Builtins polyfills;
- polyfills.extract_bits =
- transform::BuiltinPolyfill::Level::kClampParameters;
- polyfills.first_leading_bit = true;
- polyfills.first_trailing_bit = true;
- polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
- data.Add<transform::BuiltinPolyfill::Config>(polyfills);
- manager.Add<transform::BuiltinPolyfill>();
- }
-
- // Build the config for the internal ArrayLengthFromUniform transform.
- auto& array_length_from_uniform = options.array_length_from_uniform;
- transform::ArrayLengthFromUniform::Config array_length_from_uniform_cfg(
- array_length_from_uniform.ubo_binding);
- if (!array_length_from_uniform.bindpoint_to_size_index.empty()) {
- // If |array_length_from_uniform| bindings are provided, use that config.
- array_length_from_uniform_cfg.bindpoint_to_size_index =
- array_length_from_uniform.bindpoint_to_size_index;
- } else {
- // If the binding map is empty, use the deprecated |buffer_size_ubo_index|
- // and automatically choose indices using the binding numbers.
- array_length_from_uniform_cfg = transform::ArrayLengthFromUniform::Config(
- sem::BindingPoint{0, options.buffer_size_ubo_index});
- // Use the SSBO binding numbers as the indices for the buffer size lookups.
- for (auto* var : in->AST().GlobalVariables()) {
- auto* global = in->Sem().Get<sem::GlobalVariable>(var);
- if (global && global->StorageClass() == ast::StorageClass::kStorage) {
- array_length_from_uniform_cfg.bindpoint_to_size_index.emplace(
- global->BindingPoint(), global->BindingPoint().binding);
- }
- }
- }
-
- // Build the configs for the internal CanonicalizeEntryPointIO transform.
- auto entry_point_io_cfg = transform::CanonicalizeEntryPointIO::Config(
- transform::CanonicalizeEntryPointIO::ShaderStyle::kMsl,
- options.fixed_sample_mask, options.emit_vertex_point_size);
-
- if (options.generate_external_texture_bindings) {
- auto new_bindings_map = GenerateExternalTextureBindings(in);
- data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(
- new_bindings_map);
- }
- manager.Add<transform::MultiplanarExternalTexture>();
-
- manager.Add<transform::Unshadow>();
-
- if (!options.disable_workgroup_init) {
- // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
- // ZeroInitWorkgroupMemory may inject new builtin parameters.
- manager.Add<transform::ZeroInitWorkgroupMemory>();
- }
- manager.Add<transform::CanonicalizeEntryPointIO>();
- manager.Add<transform::ExpandCompoundAssignment>();
- manager.Add<transform::PromoteSideEffectsToDecl>();
- manager.Add<transform::UnwindDiscardFunctions>();
- manager.Add<transform::PromoteInitializersToConstVar>();
-
- manager.Add<transform::VectorizeScalarMatrixConstructors>();
- manager.Add<transform::WrapArraysInStructs>();
- manager.Add<transform::RemovePhonies>();
- manager.Add<transform::SimplifyPointers>();
- // ArrayLengthFromUniform must come after SimplifyPointers, as
- // it assumes that the form of the array length argument is &var.array.
- manager.Add<transform::ArrayLengthFromUniform>();
- manager.Add<transform::ModuleScopeVarToEntryPointParam>();
- data.Add<transform::ArrayLengthFromUniform::Config>(
- std::move(array_length_from_uniform_cfg));
- data.Add<transform::CanonicalizeEntryPointIO::Config>(
- std::move(entry_point_io_cfg));
- auto out = manager.Run(in, data);
-
- SanitizedResult result;
- result.program = std::move(out.program);
- if (!result.program.IsValid()) {
+ transform::Manager manager;
+ transform::DataMap data;
+
+ manager.Add<transform::DisableUniformityAnalysis>();
+
+ { // Builtin polyfills
+ transform::BuiltinPolyfill::Builtins polyfills;
+ polyfills.extract_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ polyfills.first_leading_bit = true;
+ polyfills.first_trailing_bit = true;
+ polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ data.Add<transform::BuiltinPolyfill::Config>(polyfills);
+ manager.Add<transform::BuiltinPolyfill>();
+ }
+
+ // Build the config for the internal ArrayLengthFromUniform transform.
+ auto& array_length_from_uniform = options.array_length_from_uniform;
+ transform::ArrayLengthFromUniform::Config array_length_from_uniform_cfg(
+ array_length_from_uniform.ubo_binding);
+ if (!array_length_from_uniform.bindpoint_to_size_index.empty()) {
+ // If |array_length_from_uniform| bindings are provided, use that config.
+ array_length_from_uniform_cfg.bindpoint_to_size_index =
+ array_length_from_uniform.bindpoint_to_size_index;
+ } else {
+ // If the binding map is empty, use the deprecated |buffer_size_ubo_index|
+ // and automatically choose indices using the binding numbers.
+ array_length_from_uniform_cfg = transform::ArrayLengthFromUniform::Config(
+ sem::BindingPoint{0, options.buffer_size_ubo_index});
+ // Use the SSBO binding numbers as the indices for the buffer size lookups.
+ for (auto* var : in->AST().GlobalVariables()) {
+ auto* global = in->Sem().Get<sem::GlobalVariable>(var);
+ if (global && global->StorageClass() == ast::StorageClass::kStorage) {
+ array_length_from_uniform_cfg.bindpoint_to_size_index.emplace(
+ global->BindingPoint(), global->BindingPoint().binding);
+ }
+ }
+ }
+
+ // Build the configs for the internal CanonicalizeEntryPointIO transform.
+ auto entry_point_io_cfg = transform::CanonicalizeEntryPointIO::Config(
+ transform::CanonicalizeEntryPointIO::ShaderStyle::kMsl, options.fixed_sample_mask,
+ options.emit_vertex_point_size);
+
+ if (options.generate_external_texture_bindings) {
+ auto new_bindings_map = GenerateExternalTextureBindings(in);
+ data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(new_bindings_map);
+ }
+ manager.Add<transform::MultiplanarExternalTexture>();
+
+ manager.Add<transform::Unshadow>();
+
+ if (!options.disable_workgroup_init) {
+ // ZeroInitWorkgroupMemory must come before CanonicalizeEntryPointIO as
+ // ZeroInitWorkgroupMemory may inject new builtin parameters.
+ manager.Add<transform::ZeroInitWorkgroupMemory>();
+ }
+ manager.Add<transform::CanonicalizeEntryPointIO>();
+ manager.Add<transform::ExpandCompoundAssignment>();
+ manager.Add<transform::PromoteSideEffectsToDecl>();
+ manager.Add<transform::UnwindDiscardFunctions>();
+ manager.Add<transform::PromoteInitializersToConstVar>();
+
+ manager.Add<transform::VectorizeScalarMatrixConstructors>();
+ manager.Add<transform::WrapArraysInStructs>();
+ manager.Add<transform::RemovePhonies>();
+ manager.Add<transform::SimplifyPointers>();
+ // ArrayLengthFromUniform must come after SimplifyPointers, as
+ // it assumes that the form of the array length argument is &var.array.
+ manager.Add<transform::ArrayLengthFromUniform>();
+ manager.Add<transform::ModuleScopeVarToEntryPointParam>();
+ data.Add<transform::ArrayLengthFromUniform::Config>(std::move(array_length_from_uniform_cfg));
+ data.Add<transform::CanonicalizeEntryPointIO::Config>(std::move(entry_point_io_cfg));
+ auto out = manager.Run(in, data);
+
+ SanitizedResult result;
+ result.program = std::move(out.program);
+ if (!result.program.IsValid()) {
+ return result;
+ }
+ if (auto* res = out.data.Get<transform::ArrayLengthFromUniform::Result>()) {
+ result.used_array_length_from_uniform_indices = std::move(res->used_size_indices);
+ }
+ result.needs_storage_buffer_sizes = !result.used_array_length_from_uniform_indices.empty();
return result;
- }
- if (auto* res = out.data.Get<transform::ArrayLengthFromUniform::Result>()) {
- result.used_array_length_from_uniform_indices =
- std::move(res->used_size_indices);
- }
- result.needs_storage_buffer_sizes =
- !result.used_array_length_from_uniform_indices.empty();
- return result;
}
GeneratorImpl::GeneratorImpl(const Program* program) : TextGenerator(program) {}
@@ -215,2319 +236,2379 @@ GeneratorImpl::GeneratorImpl(const Program* program) : TextGenerator(program) {}
GeneratorImpl::~GeneratorImpl() = default;
bool GeneratorImpl::Generate() {
- line() << "#include <metal_stdlib>";
- line();
- line() << "using namespace metal;";
-
- auto helpers_insertion_point = current_buffer_->lines.size();
-
- auto* mod = builder_.Sem().Module();
- for (auto* decl : mod->DependencyOrderedDeclarations()) {
- bool ok = Switch(
- decl, //
- [&](const ast::Struct* str) {
- TINT_DEFER(line());
- return EmitTypeDecl(TypeOf(str));
- },
- [&](const ast::Alias*) {
- return true; // folded away by the writer
- },
- [&](const ast::Variable* var) {
- if (var->is_const) {
- TINT_DEFER(line());
- return EmitProgramConstVariable(var);
- }
- // These are pushed into the entry point by sanitizer transforms.
- TINT_ICE(Writer, diagnostics_)
- << "module-scope variables should have been handled by the MSL "
- "sanitizer";
- return false;
- },
- [&](const ast::Function* func) {
- TINT_DEFER(line());
- if (func->IsEntryPoint()) {
- return EmitEntryPointFunction(func);
- }
- return EmitFunction(func);
- },
- [&](Default) {
- // These are pushed into the entry point by sanitizer transforms.
- TINT_ICE(Writer, diagnostics_)
- << "unhandled type: " << decl->TypeInfo().name;
- return false;
- });
- if (!ok) {
- return false;
- }
- }
-
- if (!invariant_define_name_.empty()) {
- // 'invariant' attribute requires MSL 2.1 or higher.
- // WGSL can ignore the invariant attribute on pre MSL 2.1 devices.
- // See: https://github.com/gpuweb/gpuweb/issues/893#issuecomment-745537465
- line(&helpers_) << "#if __METAL_VERSION__ >= 210";
- line(&helpers_) << "#define " << invariant_define_name_ << " @invariant";
- line(&helpers_) << "#else";
- line(&helpers_) << "#define " << invariant_define_name_;
- line(&helpers_) << "#endif";
- line(&helpers_);
- }
-
- if (!helpers_.lines.empty()) {
- current_buffer_->Insert("", helpers_insertion_point++, 0);
- current_buffer_->Insert(helpers_, helpers_insertion_point++, 0);
- }
-
- return true;
+ line() << "#include <metal_stdlib>";
+ line();
+ line() << "using namespace metal;";
+
+ auto helpers_insertion_point = current_buffer_->lines.size();
+
+ auto* mod = builder_.Sem().Module();
+ for (auto* decl : mod->DependencyOrderedDeclarations()) {
+ bool ok = Switch(
+ decl, //
+ [&](const ast::Struct* str) {
+ TINT_DEFER(line());
+ return EmitTypeDecl(TypeOf(str));
+ },
+ [&](const ast::Alias*) {
+ return true; // folded away by the writer
+ },
+ [&](const ast::Variable* var) {
+ if (var->is_const) {
+ TINT_DEFER(line());
+ return EmitProgramConstVariable(var);
+ }
+ // These are pushed into the entry point by sanitizer transforms.
+ TINT_ICE(Writer, diagnostics_)
+ << "module-scope variables should have been handled by the MSL "
+ "sanitizer";
+ return false;
+ },
+ [&](const ast::Function* func) {
+ TINT_DEFER(line());
+ if (func->IsEntryPoint()) {
+ return EmitEntryPointFunction(func);
+ }
+ return EmitFunction(func);
+ },
+ [&](const ast::Enable*) {
+ // Do nothing for enabling extension in MSL
+ return true;
+ },
+ [&](Default) {
+ // These are pushed into the entry point by sanitizer transforms.
+ TINT_ICE(Writer, diagnostics_) << "unhandled type: " << decl->TypeInfo().name;
+ return false;
+ });
+ if (!ok) {
+ return false;
+ }
+ }
+
+ if (!invariant_define_name_.empty()) {
+ // 'invariant' attribute requires MSL 2.1 or higher.
+ // WGSL can ignore the invariant attribute on pre MSL 2.1 devices.
+ // See: https://github.com/gpuweb/gpuweb/issues/893#issuecomment-745537465
+ line(&helpers_) << "#if __METAL_VERSION__ >= 210";
+ line(&helpers_) << "#define " << invariant_define_name_ << " @invariant";
+ line(&helpers_) << "#else";
+ line(&helpers_) << "#define " << invariant_define_name_;
+ line(&helpers_) << "#endif";
+ line(&helpers_);
+ }
+
+ if (!helpers_.lines.empty()) {
+ current_buffer_->Insert("", helpers_insertion_point++, 0);
+ current_buffer_->Insert(helpers_, helpers_insertion_point++, 0);
+ }
+
+ return true;
}
bool GeneratorImpl::EmitTypeDecl(const sem::Type* ty) {
- if (auto* str = ty->As<sem::Struct>()) {
- if (!EmitStructType(current_buffer_, str)) {
- return false;
- }
- } else {
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown alias type: " + ty->FriendlyName(builder_.Symbols()));
- return false;
- }
+ if (auto* str = ty->As<sem::Struct>()) {
+ if (!EmitStructType(current_buffer_, str)) {
+ return false;
+ }
+ } else {
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown alias type: " + ty->FriendlyName(builder_.Symbols()));
+ return false;
+ }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitIndexAccessor(
- std::ostream& out,
- const ast::IndexAccessorExpression* expr) {
- bool paren_lhs =
- !expr->object->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
- ast::IdentifierExpression,
- ast::MemberAccessorExpression>();
+bool GeneratorImpl::EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr) {
+ bool paren_lhs =
+ !expr->object->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
+ ast::IdentifierExpression, ast::MemberAccessorExpression>();
- if (paren_lhs) {
- out << "(";
- }
- if (!EmitExpression(out, expr->object)) {
- return false;
- }
- if (paren_lhs) {
- out << ")";
- }
+ if (paren_lhs) {
+ out << "(";
+ }
+ if (!EmitExpression(out, expr->object)) {
+ return false;
+ }
+ if (paren_lhs) {
+ out << ")";
+ }
- out << "[";
+ out << "[";
- if (!EmitExpression(out, expr->index)) {
- return false;
- }
- out << "]";
+ if (!EmitExpression(out, expr->index)) {
+ return false;
+ }
+ out << "]";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitBitcast(std::ostream& out,
- const ast::BitcastExpression* expr) {
- out << "as_type<";
- if (!EmitType(out, TypeOf(expr)->UnwrapRef(), "")) {
- return false;
- }
+bool GeneratorImpl::EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr) {
+ out << "as_type<";
+ if (!EmitType(out, TypeOf(expr)->UnwrapRef(), "")) {
+ return false;
+ }
- out << ">(";
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
+ out << ">(";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitAssign(const ast::AssignmentStatement* stmt) {
- auto out = line();
+ auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
+ }
- out << " = ";
+ out << " = ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitBinary(std::ostream& out,
- const ast::BinaryExpression* expr) {
- auto emit_op = [&] {
- out << " ";
+bool GeneratorImpl::EmitBinary(std::ostream& out, const ast::BinaryExpression* expr) {
+ auto emit_op = [&] {
+ out << " ";
- switch (expr->op) {
- case ast::BinaryOp::kAnd:
- out << "&";
- break;
- case ast::BinaryOp::kOr:
- out << "|";
- break;
- case ast::BinaryOp::kXor:
- out << "^";
- break;
- case ast::BinaryOp::kLogicalAnd:
- out << "&&";
- break;
- case ast::BinaryOp::kLogicalOr:
- out << "||";
- break;
- case ast::BinaryOp::kEqual:
- out << "==";
- break;
- case ast::BinaryOp::kNotEqual:
- out << "!=";
- break;
- case ast::BinaryOp::kLessThan:
- out << "<";
- break;
- case ast::BinaryOp::kGreaterThan:
- out << ">";
- break;
- case ast::BinaryOp::kLessThanEqual:
- out << "<=";
- break;
- case ast::BinaryOp::kGreaterThanEqual:
- out << ">=";
- break;
- case ast::BinaryOp::kShiftLeft:
- out << "<<";
- break;
- case ast::BinaryOp::kShiftRight:
- // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
- // implementation-defined behaviour for negative LHS. We may have to
- // generate extra code to implement WGSL-specified behaviour for
- // negative LHS.
- out << R"(>>)";
- break;
-
- case ast::BinaryOp::kAdd:
- out << "+";
- break;
- case ast::BinaryOp::kSubtract:
- out << "-";
- break;
- case ast::BinaryOp::kMultiply:
- out << "*";
- break;
- case ast::BinaryOp::kDivide:
- out << "/";
- break;
- case ast::BinaryOp::kModulo:
- out << "%";
- break;
- case ast::BinaryOp::kNone:
- diagnostics_.add_error(diag::System::Writer,
- "missing binary operation type");
- return false;
- }
- out << " ";
- return true;
- };
-
- auto signed_type_of = [&](const sem::Type* ty) -> const sem::Type* {
- if (ty->is_integer_scalar()) {
- return builder_.create<sem::I32>();
- } else if (auto* v = ty->As<sem::Vector>()) {
- return builder_.create<sem::Vector>(builder_.create<sem::I32>(),
- v->Width());
- }
- return {};
- };
-
- auto unsigned_type_of = [&](const sem::Type* ty) -> const sem::Type* {
- if (ty->is_integer_scalar()) {
- return builder_.create<sem::U32>();
- } else if (auto* v = ty->As<sem::Vector>()) {
- return builder_.create<sem::Vector>(builder_.create<sem::U32>(),
- v->Width());
- }
- return {};
- };
-
- auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
- auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
-
- // Handle fmod
- if (expr->op == ast::BinaryOp::kModulo &&
- lhs_type->is_float_scalar_or_vector()) {
- out << "fmod";
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->rhs)) {
- return false;
+ switch (expr->op) {
+ case ast::BinaryOp::kAnd:
+ out << "&";
+ break;
+ case ast::BinaryOp::kOr:
+ out << "|";
+ break;
+ case ast::BinaryOp::kXor:
+ out << "^";
+ break;
+ case ast::BinaryOp::kLogicalAnd:
+ out << "&&";
+ break;
+ case ast::BinaryOp::kLogicalOr:
+ out << "||";
+ break;
+ case ast::BinaryOp::kEqual:
+ out << "==";
+ break;
+ case ast::BinaryOp::kNotEqual:
+ out << "!=";
+ break;
+ case ast::BinaryOp::kLessThan:
+ out << "<";
+ break;
+ case ast::BinaryOp::kGreaterThan:
+ out << ">";
+ break;
+ case ast::BinaryOp::kLessThanEqual:
+ out << "<=";
+ break;
+ case ast::BinaryOp::kGreaterThanEqual:
+ out << ">=";
+ break;
+ case ast::BinaryOp::kShiftLeft:
+ out << "<<";
+ break;
+ case ast::BinaryOp::kShiftRight:
+ // TODO(dsinclair): MSL is based on C++14, and >> in C++14 has
+ // implementation-defined behaviour for negative LHS. We may have to
+ // generate extra code to implement WGSL-specified behaviour for
+ // negative LHS.
+ out << R"(>>)";
+ break;
+
+ case ast::BinaryOp::kAdd:
+ out << "+";
+ break;
+ case ast::BinaryOp::kSubtract:
+ out << "-";
+ break;
+ case ast::BinaryOp::kMultiply:
+ out << "*";
+ break;
+ case ast::BinaryOp::kDivide:
+ out << "/";
+ break;
+ case ast::BinaryOp::kModulo:
+ out << "%";
+ break;
+ case ast::BinaryOp::kNone:
+ diagnostics_.add_error(diag::System::Writer, "missing binary operation type");
+ return false;
+ }
+ out << " ";
+ return true;
+ };
+
+ auto signed_type_of = [&](const sem::Type* ty) -> const sem::Type* {
+ if (ty->is_integer_scalar()) {
+ return builder_.create<sem::I32>();
+ } else if (auto* v = ty->As<sem::Vector>()) {
+ return builder_.create<sem::Vector>(builder_.create<sem::I32>(), v->Width());
+ }
+ return {};
+ };
+
+ auto unsigned_type_of = [&](const sem::Type* ty) -> const sem::Type* {
+ if (ty->is_integer_scalar()) {
+ return builder_.create<sem::U32>();
+ } else if (auto* v = ty->As<sem::Vector>()) {
+ return builder_.create<sem::Vector>(builder_.create<sem::U32>(), v->Width());
+ }
+ return {};
+ };
+
+ auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
+ auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
+
+ // Handle fmod
+ if (expr->op == ast::BinaryOp::kModulo && lhs_type->is_float_scalar_or_vector()) {
+ out << "fmod";
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
+ return true;
}
- return true;
- }
-
- // Handle +/-/* of signed values
- if ((expr->IsAdd() || expr->IsSubtract() || expr->IsMultiply()) &&
- lhs_type->is_signed_scalar_or_vector() &&
- rhs_type->is_signed_scalar_or_vector()) {
- // If lhs or rhs is a vector, use that type (support implicit scalar to
- // vector promotion)
- auto* target_type =
- lhs_type->Is<sem::Vector>()
- ? lhs_type
- : (rhs_type->Is<sem::Vector>() ? rhs_type : lhs_type);
-
- // WGSL defines behaviour for signed overflow, MSL does not. For these
- // cases, bitcast operands to unsigned, then cast result to signed.
- ScopedBitCast outer_int_cast(this, out, target_type,
- signed_type_of(target_type));
- ScopedParen sp(out);
- {
- ScopedBitCast lhs_uint_cast(this, out, lhs_type,
- unsigned_type_of(target_type));
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
+
+ // Handle +/-/* of signed values
+ if ((expr->IsAdd() || expr->IsSubtract() || expr->IsMultiply()) &&
+ lhs_type->is_signed_scalar_or_vector() && rhs_type->is_signed_scalar_or_vector()) {
+ // If lhs or rhs is a vector, use that type (support implicit scalar to
+ // vector promotion)
+ auto* target_type = lhs_type->Is<sem::Vector>()
+ ? lhs_type
+ : (rhs_type->Is<sem::Vector>() ? rhs_type : lhs_type);
+
+ // WGSL defines behaviour for signed overflow, MSL does not. For these
+ // cases, bitcast operands to unsigned, then cast result to signed.
+ ScopedBitCast outer_int_cast(this, out, target_type, signed_type_of(target_type));
+ ScopedParen sp(out);
+ {
+ ScopedBitCast lhs_uint_cast(this, out, lhs_type, unsigned_type_of(target_type));
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ }
+ if (!emit_op()) {
+ return false;
+ }
+ {
+ ScopedBitCast rhs_uint_cast(this, out, rhs_type, unsigned_type_of(target_type));
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
+ }
+ return true;
}
- if (!emit_op()) {
- return false;
+
+ // Handle left bit shifting a signed value
+ // TODO(crbug.com/tint/1077): This may not be necessary. The MSL spec
+ // seems to imply that left shifting a signed value is treated the same as
+ // left shifting an unsigned value, but we need to make sure.
+ if (expr->IsShiftLeft() && lhs_type->is_signed_scalar_or_vector()) {
+ // Shift left: discards top bits, so convert first operand to unsigned
+ // first, then convert result back to signed
+ ScopedBitCast outer_int_cast(this, out, lhs_type, signed_type_of(lhs_type));
+ ScopedParen sp(out);
+ {
+ ScopedBitCast lhs_uint_cast(this, out, lhs_type, unsigned_type_of(lhs_type));
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ }
+ if (!emit_op()) {
+ return false;
+ }
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
+ return true;
}
- {
- ScopedBitCast rhs_uint_cast(this, out, rhs_type,
- unsigned_type_of(target_type));
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
+
+ // Handle '&' and '|' of booleans.
+ if ((expr->IsAnd() || expr->IsOr()) && lhs_type->Is<sem::Bool>()) {
+ out << "bool";
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ if (!emit_op()) {
+ return false;
+ }
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
+ return true;
}
- return true;
- }
-
- // Handle left bit shifting a signed value
- // TODO(crbug.com/tint/1077): This may not be necessary. The MSL spec
- // seems to imply that left shifting a signed value is treated the same as
- // left shifting an unsigned value, but we need to make sure.
- if (expr->IsShiftLeft() && lhs_type->is_signed_scalar_or_vector()) {
- // Shift left: discards top bits, so convert first operand to unsigned
- // first, then convert result back to signed
- ScopedBitCast outer_int_cast(this, out, lhs_type, signed_type_of(lhs_type));
+
+ // Emit as usual
ScopedParen sp(out);
- {
- ScopedBitCast lhs_uint_cast(this, out, lhs_type,
- unsigned_type_of(lhs_type));
- if (!EmitExpression(out, expr->lhs)) {
+ if (!EmitExpression(out, expr->lhs)) {
return false;
- }
}
if (!emit_op()) {
- return false;
+ return false;
}
if (!EmitExpression(out, expr->rhs)) {
- return false;
+ return false;
}
- return true;
- }
- // Emit as usual
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- if (!emit_op()) {
- return false;
- }
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
-
- return true;
+ return true;
}
bool GeneratorImpl::EmitBreak(const ast::BreakStatement*) {
- line() << "break;";
- return true;
+ line() << "break;";
+ return true;
}
-bool GeneratorImpl::EmitCall(std::ostream& out,
- const ast::CallExpression* expr) {
- auto* call = program_->Sem().Get(expr);
- auto* target = call->Target();
- return Switch(
- target,
- [&](const sem::Function* func) {
- return EmitFunctionCall(out, call, func);
- },
- [&](const sem::Builtin* builtin) {
- return EmitBuiltinCall(out, call, builtin);
- },
- [&](const sem::TypeConversion* conv) {
- return EmitTypeConversion(out, call, conv);
- },
- [&](const sem::TypeConstructor* ctor) {
- return EmitTypeConstructor(out, call, ctor);
- },
- [&](Default) {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled call target: " << target->TypeInfo().name;
- return false;
- });
+bool GeneratorImpl::EmitCall(std::ostream& out, const ast::CallExpression* expr) {
+ auto* call = program_->Sem().Get<sem::Call>(expr);
+ auto* target = call->Target();
+ return Switch(
+ target, [&](const sem::Function* func) { return EmitFunctionCall(out, call, func); },
+ [&](const sem::Builtin* builtin) { return EmitBuiltinCall(out, call, builtin); },
+ [&](const sem::TypeConversion* conv) { return EmitTypeConversion(out, call, conv); },
+ [&](const sem::TypeConstructor* ctor) { return EmitTypeConstructor(out, call, ctor); },
+ [&](Default) {
+ TINT_ICE(Writer, diagnostics_) << "unhandled call target: " << target->TypeInfo().name;
+ return false;
+ });
}
bool GeneratorImpl::EmitFunctionCall(std::ostream& out,
const sem::Call* call,
const sem::Function*) {
- auto* ident = call->Declaration()->target.name;
- out << program_->Symbols().NameFor(ident->symbol) << "(";
+ auto* ident = call->Declaration()->target.name;
+ out << program_->Symbols().NameFor(ident->symbol) << "(";
- bool first = true;
- for (auto* arg : call->Arguments()) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
+ for (auto* arg : call->Arguments()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitBuiltinCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- auto* expr = call->Declaration();
- if (builtin->IsAtomic()) {
- return EmitAtomicCall(out, expr, builtin);
- }
- if (builtin->IsTexture()) {
- return EmitTextureCall(out, call, builtin);
- }
-
- auto name = generate_builtin_name(builtin);
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kDot:
- return EmitDotCall(out, expr, builtin);
- case sem::BuiltinType::kModf:
- return EmitModfCall(out, expr, builtin);
- case sem::BuiltinType::kFrexp:
- return EmitFrexpCall(out, expr, builtin);
- case sem::BuiltinType::kDegrees:
- return EmitDegreesCall(out, expr, builtin);
- case sem::BuiltinType::kRadians:
- return EmitRadiansCall(out, expr, builtin);
-
- case sem::BuiltinType::kPack2x16float:
- case sem::BuiltinType::kUnpack2x16float: {
- if (builtin->Type() == sem::BuiltinType::kPack2x16float) {
- out << "as_type<uint>(half2(";
- } else {
- out << "float2(as_type<half2>(";
- }
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << "))";
- return true;
- }
- // TODO(crbug.com/tint/661): Combine sequential barriers to a single
- // instruction.
- case sem::BuiltinType::kStorageBarrier: {
- out << "threadgroup_barrier(mem_flags::mem_device)";
- return true;
- }
- case sem::BuiltinType::kWorkgroupBarrier: {
- out << "threadgroup_barrier(mem_flags::mem_threadgroup)";
- return true;
- }
-
- case sem::BuiltinType::kLength: {
- auto* sem = builder_.Sem().Get(expr->args[0]);
- if (sem->Type()->UnwrapRef()->is_scalar()) {
- // Emulate scalar overload using fabs(x).
- name = "fabs";
- }
- break;
- }
-
- case sem::BuiltinType::kDistance: {
- auto* sem = builder_.Sem().Get(expr->args[0]);
- if (sem->Type()->UnwrapRef()->is_scalar()) {
- // Emulate scalar overload using fabs(x - y);
- out << "fabs";
- ScopedParen sp(out);
- if (!EmitExpression(out, expr->args[0])) {
- return false;
+ auto* expr = call->Declaration();
+ if (builtin->IsAtomic()) {
+ return EmitAtomicCall(out, expr, builtin);
+ }
+ if (builtin->IsTexture()) {
+ return EmitTextureCall(out, call, builtin);
+ }
+
+ auto name = generate_builtin_name(builtin);
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kDot:
+ return EmitDotCall(out, expr, builtin);
+ case sem::BuiltinType::kModf:
+ return EmitModfCall(out, expr, builtin);
+ case sem::BuiltinType::kFrexp:
+ return EmitFrexpCall(out, expr, builtin);
+ case sem::BuiltinType::kDegrees:
+ return EmitDegreesCall(out, expr, builtin);
+ case sem::BuiltinType::kRadians:
+ return EmitRadiansCall(out, expr, builtin);
+
+ case sem::BuiltinType::kPack2x16float:
+ case sem::BuiltinType::kUnpack2x16float: {
+ if (builtin->Type() == sem::BuiltinType::kPack2x16float) {
+ out << "as_type<uint>(half2(";
+ } else {
+ out << "float2(as_type<half2>(";
+ }
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << "))";
+ return true;
}
- out << " - ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
+ // TODO(crbug.com/tint/661): Combine sequential barriers to a single
+ // instruction.
+ case sem::BuiltinType::kStorageBarrier: {
+ out << "threadgroup_barrier(mem_flags::mem_device)";
+ return true;
+ }
+ case sem::BuiltinType::kWorkgroupBarrier: {
+ out << "threadgroup_barrier(mem_flags::mem_threadgroup)";
+ return true;
}
- return true;
- }
- break;
- }
- default:
- break;
- }
+ case sem::BuiltinType::kLength: {
+ auto* sem = builder_.Sem().Get(expr->args[0]);
+ if (sem->Type()->UnwrapRef()->is_scalar()) {
+ // Emulate scalar overload using fabs(x).
+ name = "fabs";
+ }
+ break;
+ }
- if (name.empty()) {
- return false;
- }
+ case sem::BuiltinType::kDistance: {
+ auto* sem = builder_.Sem().Get(expr->args[0]);
+ if (sem->Type()->UnwrapRef()->is_scalar()) {
+ // Emulate scalar overload using fabs(x - y);
+ out << "fabs";
+ ScopedParen sp(out);
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << " - ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ return true;
+ }
+ break;
+ }
- out << name << "(";
+ default:
+ break;
+ }
- bool first = true;
- for (auto* arg : expr->args) {
- if (!first) {
- out << ", ";
+ if (name.empty()) {
+ return false;
}
- first = false;
- if (!EmitExpression(out, arg)) {
- return false;
+ out << name << "(";
+
+ bool first = true;
+ for (auto* arg : expr->args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
}
- }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitTypeConversion(std::ostream& out,
const sem::Call* call,
const sem::TypeConversion* conv) {
- if (!EmitType(out, conv->Target(), "")) {
- return false;
- }
- out << "(";
+ if (!EmitType(out, conv->Target(), "")) {
+ return false;
+ }
+ out << "(";
- if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
- return false;
- }
+ if (!EmitExpression(out, call->Arguments()[0]->Declaration())) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitTypeConstructor(std::ostream& out,
const sem::Call* call,
const sem::TypeConstructor* ctor) {
- auto* type = ctor->ReturnType();
+ auto* type = ctor->ReturnType();
- if (type->IsAnyOf<sem::Array, sem::Struct>()) {
- out << "{";
- } else {
- if (!EmitType(out, type, "")) {
- return false;
+ if (type->IsAnyOf<sem::Array, sem::Struct>()) {
+ out << "{";
+ } else {
+ if (!EmitType(out, type, "")) {
+ return false;
+ }
+ out << "(";
}
- out << "(";
- }
- int i = 0;
- for (auto* arg : call->Arguments()) {
- if (i > 0) {
- out << ", ";
- }
+ int i = 0;
+ for (auto* arg : call->Arguments()) {
+ if (i > 0) {
+ out << ", ";
+ }
- if (auto* struct_ty = type->As<sem::Struct>()) {
- // Emit field designators for structures to account for padding members.
- auto* member = struct_ty->Members()[i]->Declaration();
- auto name = program_->Symbols().NameFor(member->symbol);
- out << "." << name << "=";
- }
+ if (auto* struct_ty = type->As<sem::Struct>()) {
+ // Emit field designators for structures to account for padding members.
+ auto* member = struct_ty->Members()[i]->Declaration();
+ auto name = program_->Symbols().NameFor(member->symbol);
+ out << "." << name << "=";
+ }
- if (!EmitExpression(out, arg->Declaration())) {
- return false;
- }
+ if (!EmitExpression(out, arg->Declaration())) {
+ return false;
+ }
- i++;
- }
+ i++;
+ }
- if (type->IsAnyOf<sem::Array, sem::Struct>()) {
- out << "}";
- } else {
- out << ")";
- }
- return true;
+ if (type->IsAnyOf<sem::Array, sem::Struct>()) {
+ out << "}";
+ } else {
+ out << ")";
+ }
+ return true;
}
bool GeneratorImpl::EmitAtomicCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- auto call = [&](const std::string& name, bool append_memory_order_relaxed) {
- out << name;
- {
- ScopedParen sp(out);
- for (size_t i = 0; i < expr->args.size(); i++) {
- auto* arg = expr->args[i];
- if (i > 0) {
- out << ", ";
- }
- if (!EmitExpression(out, arg)) {
- return false;
+ auto call = [&](const std::string& name, bool append_memory_order_relaxed) {
+ out << name;
+ {
+ ScopedParen sp(out);
+ for (size_t i = 0; i < expr->args.size(); i++) {
+ auto* arg = expr->args[i];
+ if (i > 0) {
+ out << ", ";
+ }
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
+ }
+ if (append_memory_order_relaxed) {
+ out << ", memory_order_relaxed";
+ }
}
- }
- if (append_memory_order_relaxed) {
- out << ", memory_order_relaxed";
- }
- }
- return true;
- };
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kAtomicLoad:
- return call("atomic_load_explicit", true);
-
- case sem::BuiltinType::kAtomicStore:
- return call("atomic_store_explicit", true);
-
- case sem::BuiltinType::kAtomicAdd:
- return call("atomic_fetch_add_explicit", true);
-
- case sem::BuiltinType::kAtomicSub:
- return call("atomic_fetch_sub_explicit", true);
-
- case sem::BuiltinType::kAtomicMax:
- return call("atomic_fetch_max_explicit", true);
-
- case sem::BuiltinType::kAtomicMin:
- return call("atomic_fetch_min_explicit", true);
-
- case sem::BuiltinType::kAtomicAnd:
- return call("atomic_fetch_and_explicit", true);
-
- case sem::BuiltinType::kAtomicOr:
- return call("atomic_fetch_or_explicit", true);
-
- case sem::BuiltinType::kAtomicXor:
- return call("atomic_fetch_xor_explicit", true);
-
- case sem::BuiltinType::kAtomicExchange:
- return call("atomic_exchange_explicit", true);
-
- case sem::BuiltinType::kAtomicCompareExchangeWeak: {
- auto* ptr_ty = TypeOf(expr->args[0])->UnwrapRef()->As<sem::Pointer>();
- auto sc = ptr_ty->StorageClass();
-
- auto func = utils::GetOrCreate(
- atomicCompareExchangeWeak_, sc, [&]() -> std::string {
- auto name = UniqueIdentifier("atomicCompareExchangeWeak");
- auto& buf = helpers_;
+ return true;
+ };
- line(&buf) << "template <typename A, typename T>";
- {
- auto f = line(&buf);
- f << "vec<T, 2> " << name << "(";
- if (!EmitStorageClass(f, sc)) {
- return "";
- }
- f << " A* atomic, T compare, T value) {";
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAtomicLoad:
+ return call("atomic_load_explicit", true);
+
+ case sem::BuiltinType::kAtomicStore:
+ return call("atomic_store_explicit", true);
+
+ case sem::BuiltinType::kAtomicAdd:
+ return call("atomic_fetch_add_explicit", true);
+
+ case sem::BuiltinType::kAtomicSub:
+ return call("atomic_fetch_sub_explicit", true);
+
+ case sem::BuiltinType::kAtomicMax:
+ return call("atomic_fetch_max_explicit", true);
+
+ case sem::BuiltinType::kAtomicMin:
+ return call("atomic_fetch_min_explicit", true);
+
+ case sem::BuiltinType::kAtomicAnd:
+ return call("atomic_fetch_and_explicit", true);
+
+ case sem::BuiltinType::kAtomicOr:
+ return call("atomic_fetch_or_explicit", true);
+
+ case sem::BuiltinType::kAtomicXor:
+ return call("atomic_fetch_xor_explicit", true);
+
+ case sem::BuiltinType::kAtomicExchange:
+ return call("atomic_exchange_explicit", true);
+
+ case sem::BuiltinType::kAtomicCompareExchangeWeak: {
+ auto* ptr_ty = TypeOf(expr->args[0])->UnwrapRef()->As<sem::Pointer>();
+ auto sc = ptr_ty->StorageClass();
+ auto* str = builtin->ReturnType()->As<sem::Struct>();
+
+ auto func = utils::GetOrCreate(
+ atomicCompareExchangeWeak_, ACEWKeyType{{sc, str}}, [&]() -> std::string {
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructTypeOnce(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return "";
+ }
+
+ auto name = UniqueIdentifier("atomicCompareExchangeWeak");
+ auto& buf = helpers_;
+ auto* atomic_ty = builtin->Parameters()[0]->Type();
+ auto* arg_ty = builtin->Parameters()[1]->Type();
+
+ {
+ auto f = line(&buf);
+ auto str_name = StructName(builtin->ReturnType()->As<sem::Struct>());
+ f << str_name << " " << name << "(";
+ if (!EmitTypeAndName(f, atomic_ty, "atomic")) {
+ return "";
+ }
+ f << ", ";
+ if (!EmitTypeAndName(f, arg_ty, "compare")) {
+ return "";
+ }
+ f << ", ";
+ if (!EmitTypeAndName(f, arg_ty, "value")) {
+ return "";
+ }
+ f << ") {";
+ }
+
+ buf.IncrementIndent();
+ TINT_DEFER({
+ buf.DecrementIndent();
+ line(&buf) << "}";
+ line(&buf);
+ });
+
+ {
+ auto f = line(&buf);
+ if (!EmitTypeAndName(f, arg_ty, "old_value")) {
+ return "";
+ }
+ f << " = compare;";
+ }
+ line(&buf) << "bool exchanged = "
+ "atomic_compare_exchange_weak_explicit(atomic, "
+ "&old_value, value, memory_order_relaxed, "
+ "memory_order_relaxed);";
+ line(&buf) << "return {old_value, exchanged};";
+ return name;
+ });
+
+ if (func.empty()) {
+ return false;
}
+ return call(func, false);
+ }
- buf.IncrementIndent();
- TINT_DEFER({
- buf.DecrementIndent();
- line(&buf) << "}";
- line(&buf);
- });
-
- line(&buf) << "T prev_value = compare;";
- line(&buf) << "bool matched = "
- "atomic_compare_exchange_weak_explicit(atomic, "
- "&prev_value, value, memory_order_relaxed, "
- "memory_order_relaxed);";
- line(&buf) << "return {prev_value, matched};";
- return name;
- });
-
- return call(func, false);
+ default:
+ break;
}
- default:
- break;
- }
-
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "unsupported atomic builtin: " << builtin->Type();
- return false;
+ TINT_UNREACHABLE(Writer, diagnostics_) << "unsupported atomic builtin: " << builtin->Type();
+ return false;
}
bool GeneratorImpl::EmitTextureCall(std::ostream& out,
const sem::Call* call,
const sem::Builtin* builtin) {
- using Usage = sem::ParameterUsage;
+ using Usage = sem::ParameterUsage;
- auto& signature = builtin->Signature();
- auto* expr = call->Declaration();
- auto& arguments = call->Arguments();
+ auto& signature = builtin->Signature();
+ auto* expr = call->Declaration();
+ auto& arguments = call->Arguments();
- // Returns the argument with the given usage
- auto arg = [&](Usage usage) {
- int idx = signature.IndexOf(usage);
- return (idx >= 0) ? arguments[idx] : nullptr;
- };
+ // Returns the argument with the given usage
+ auto arg = [&](Usage usage) {
+ int idx = signature.IndexOf(usage);
+ return (idx >= 0) ? arguments[idx] : nullptr;
+ };
- auto* texture = arg(Usage::kTexture)->Declaration();
- if (!texture) {
- TINT_ICE(Writer, diagnostics_) << "missing texture arg";
- return false;
- }
+ auto* texture = arg(Usage::kTexture)->Declaration();
+ if (!texture) {
+ TINT_ICE(Writer, diagnostics_) << "missing texture arg";
+ return false;
+ }
- auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
+ auto* texture_type = TypeOf(texture)->UnwrapRef()->As<sem::Texture>();
- // Helper to emit the texture expression, wrapped in parentheses if the
- // expression includes an operator with lower precedence than the member
- // accessor used for the function calls.
- auto texture_expr = [&]() {
- bool paren_lhs =
- !texture->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
- ast::IdentifierExpression,
- ast::MemberAccessorExpression>();
- if (paren_lhs) {
- out << "(";
- }
- if (!EmitExpression(out, texture)) {
- return false;
- }
- if (paren_lhs) {
- out << ")";
- }
- return true;
- };
-
- // MSL requires that `lod` is a constant 0 for 1D textures.
- bool level_is_constant_zero =
- texture_type->dim() == ast::TextureDimension::k1d;
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureDimensions: {
- std::vector<const char*> dims;
- switch (texture_type->dim()) {
- case ast::TextureDimension::kNone:
- diagnostics_.add_error(diag::System::Writer,
- "texture dimension is kNone");
- return false;
- case ast::TextureDimension::k1d:
- dims = {"width"};
- break;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::kCube:
- case ast::TextureDimension::kCubeArray:
- dims = {"width", "height"};
- break;
- case ast::TextureDimension::k3d:
- dims = {"width", "height", "depth"};
- break;
- }
-
- auto get_dim = [&](const char* name) {
- if (!texture_expr()) {
- return false;
- }
- out << ".get_" << name << "(";
- if (level_is_constant_zero) {
- out << "0";
- } else {
- if (auto* level = arg(Usage::kLevel)) {
- if (!EmitExpression(out, level->Declaration())) {
- return false;
- }
- }
+ // Helper to emit the texture expression, wrapped in parentheses if the
+ // expression includes an operator with lower precedence than the member
+ // accessor used for the function calls.
+ auto texture_expr = [&]() {
+ bool paren_lhs =
+ !texture->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
+ ast::IdentifierExpression, ast::MemberAccessorExpression>();
+ if (paren_lhs) {
+ out << "(";
+ }
+ if (!EmitExpression(out, texture)) {
+ return false;
+ }
+ if (paren_lhs) {
+ out << ")";
}
- out << ")";
return true;
- };
+ };
- if (dims.size() == 1) {
- out << "int(";
- get_dim(dims[0]);
- out << ")";
- } else {
- EmitType(out, TypeOf(expr)->UnwrapRef(), "");
- out << "(";
- for (size_t i = 0; i < dims.size(); i++) {
- if (i > 0) {
- out << ", ";
- }
- get_dim(dims[i]);
+ // MSL requires that `lod` is a constant 0 for 1D textures.
+ bool level_is_constant_zero = texture_type->dim() == ast::TextureDimension::k1d;
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureDimensions: {
+ std::vector<const char*> dims;
+ switch (texture_type->dim()) {
+ case ast::TextureDimension::kNone:
+ diagnostics_.add_error(diag::System::Writer, "texture dimension is kNone");
+ return false;
+ case ast::TextureDimension::k1d:
+ dims = {"width"};
+ break;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::kCube:
+ case ast::TextureDimension::kCubeArray:
+ dims = {"width", "height"};
+ break;
+ case ast::TextureDimension::k3d:
+ dims = {"width", "height", "depth"};
+ break;
+ }
+
+ auto get_dim = [&](const char* name) {
+ if (!texture_expr()) {
+ return false;
+ }
+ out << ".get_" << name << "(";
+ if (level_is_constant_zero) {
+ out << "0";
+ } else {
+ if (auto* level = arg(Usage::kLevel)) {
+ if (!EmitExpression(out, level->Declaration())) {
+ return false;
+ }
+ }
+ }
+ out << ")";
+ return true;
+ };
+
+ if (dims.size() == 1) {
+ out << "int(";
+ get_dim(dims[0]);
+ out << ")";
+ } else {
+ EmitType(out, TypeOf(expr)->UnwrapRef(), "");
+ out << "(";
+ for (size_t i = 0; i < dims.size(); i++) {
+ if (i > 0) {
+ out << ", ";
+ }
+ get_dim(dims[i]);
+ }
+ out << ")";
+ }
+ return true;
}
- out << ")";
- }
- return true;
- }
- case sem::BuiltinType::kTextureNumLayers: {
- out << "int(";
- if (!texture_expr()) {
- return false;
- }
- out << ".get_array_size())";
- return true;
- }
- case sem::BuiltinType::kTextureNumLevels: {
- out << "int(";
- if (!texture_expr()) {
- return false;
- }
- out << ".get_num_mip_levels())";
- return true;
+ case sem::BuiltinType::kTextureNumLayers: {
+ out << "int(";
+ if (!texture_expr()) {
+ return false;
+ }
+ out << ".get_array_size())";
+ return true;
+ }
+ case sem::BuiltinType::kTextureNumLevels: {
+ out << "int(";
+ if (!texture_expr()) {
+ return false;
+ }
+ out << ".get_num_mip_levels())";
+ return true;
+ }
+ case sem::BuiltinType::kTextureNumSamples: {
+ out << "int(";
+ if (!texture_expr()) {
+ return false;
+ }
+ out << ".get_num_samples())";
+ return true;
+ }
+ default:
+ break;
}
- case sem::BuiltinType::kTextureNumSamples: {
- out << "int(";
- if (!texture_expr()) {
+
+ if (!texture_expr()) {
return false;
- }
- out << ".get_num_samples())";
- return true;
}
- default:
- break;
- }
- if (!texture_expr()) {
- return false;
- }
-
- bool lod_param_is_named = true;
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kTextureSample:
- case sem::BuiltinType::kTextureSampleBias:
- case sem::BuiltinType::kTextureSampleLevel:
- case sem::BuiltinType::kTextureSampleGrad:
- out << ".sample(";
- break;
- case sem::BuiltinType::kTextureSampleCompare:
- case sem::BuiltinType::kTextureSampleCompareLevel:
- out << ".sample_compare(";
- break;
- case sem::BuiltinType::kTextureGather:
- out << ".gather(";
- break;
- case sem::BuiltinType::kTextureGatherCompare:
- out << ".gather_compare(";
- break;
- case sem::BuiltinType::kTextureLoad:
- out << ".read(";
- lod_param_is_named = false;
- break;
- case sem::BuiltinType::kTextureStore:
- out << ".write(";
- break;
- default:
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "Unhandled texture builtin '" << builtin->str() << "'";
- return false;
- }
-
- bool first_arg = true;
- auto maybe_write_comma = [&] {
- if (!first_arg) {
- out << ", ";
- }
- first_arg = false;
- };
-
- for (auto usage :
- {Usage::kValue, Usage::kSampler, Usage::kCoords, Usage::kArrayIndex,
- Usage::kDepthRef, Usage::kSampleIndex}) {
- if (auto* e = arg(usage)) {
- maybe_write_comma();
-
- // Cast the coordinates to unsigned integers if necessary.
- bool casted = false;
- if (usage == Usage::kCoords &&
- e->Type()->UnwrapRef()->is_integer_scalar_or_vector()) {
- casted = true;
- switch (texture_type->dim()) {
- case ast::TextureDimension::k1d:
- out << "uint(";
+ bool lod_param_is_named = true;
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kTextureSample:
+ case sem::BuiltinType::kTextureSampleBias:
+ case sem::BuiltinType::kTextureSampleLevel:
+ case sem::BuiltinType::kTextureSampleGrad:
+ out << ".sample(";
break;
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- out << "uint2(";
+ case sem::BuiltinType::kTextureSampleCompare:
+ case sem::BuiltinType::kTextureSampleCompareLevel:
+ out << ".sample_compare(";
break;
- case ast::TextureDimension::k3d:
- out << "uint3(";
+ case sem::BuiltinType::kTextureGather:
+ out << ".gather(";
break;
- default:
- TINT_ICE(Writer, diagnostics_)
- << "unhandled texture dimensionality";
+ case sem::BuiltinType::kTextureGatherCompare:
+ out << ".gather_compare(";
break;
+ case sem::BuiltinType::kTextureLoad:
+ out << ".read(";
+ lod_param_is_named = false;
+ break;
+ case sem::BuiltinType::kTextureStore:
+ out << ".write(";
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "Unhandled texture builtin '" << builtin->str() << "'";
+ return false;
+ }
+
+ bool first_arg = true;
+ auto maybe_write_comma = [&] {
+ if (!first_arg) {
+ out << ", ";
}
- }
+ first_arg = false;
+ };
- if (!EmitExpression(out, e->Declaration()))
- return false;
+ for (auto usage : {Usage::kValue, Usage::kSampler, Usage::kCoords, Usage::kArrayIndex,
+ Usage::kDepthRef, Usage::kSampleIndex}) {
+ if (auto* e = arg(usage)) {
+ maybe_write_comma();
+
+ // Cast the coordinates to unsigned integers if necessary.
+ bool casted = false;
+ if (usage == Usage::kCoords && e->Type()->UnwrapRef()->is_integer_scalar_or_vector()) {
+ casted = true;
+ switch (texture_type->dim()) {
+ case ast::TextureDimension::k1d:
+ out << "uint(";
+ break;
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ out << "uint2(";
+ break;
+ case ast::TextureDimension::k3d:
+ out << "uint3(";
+ break;
+ default:
+ TINT_ICE(Writer, diagnostics_) << "unhandled texture dimensionality";
+ break;
+ }
+ }
- if (casted) {
- out << ")";
- }
- }
- }
+ if (!EmitExpression(out, e->Declaration())) {
+ return false;
+ }
- if (auto* bias = arg(Usage::kBias)) {
- maybe_write_comma();
- out << "bias(";
- if (!EmitExpression(out, bias->Declaration())) {
- return false;
+ if (casted) {
+ out << ")";
+ }
+ }
}
- out << ")";
- }
- if (auto* level = arg(Usage::kLevel)) {
- maybe_write_comma();
- if (lod_param_is_named) {
- out << "level(";
- }
- if (level_is_constant_zero) {
- out << "0";
- } else {
- if (!EmitExpression(out, level->Declaration())) {
- return false;
- }
- }
- if (lod_param_is_named) {
- out << ")";
- }
- }
- if (builtin->Type() == sem::BuiltinType::kTextureSampleCompareLevel) {
- maybe_write_comma();
- out << "level(0)";
- }
- if (auto* ddx = arg(Usage::kDdx)) {
- auto dim = texture_type->dim();
- switch (dim) {
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
+
+ if (auto* bias = arg(Usage::kBias)) {
maybe_write_comma();
- out << "gradient2d(";
- break;
- case ast::TextureDimension::k3d:
+ out << "bias(";
+ if (!EmitExpression(out, bias->Declaration())) {
+ return false;
+ }
+ out << ")";
+ }
+ if (auto* level = arg(Usage::kLevel)) {
maybe_write_comma();
- out << "gradient3d(";
- break;
- case ast::TextureDimension::kCube:
- case ast::TextureDimension::kCubeArray:
+ if (lod_param_is_named) {
+ out << "level(";
+ }
+ if (level_is_constant_zero) {
+ out << "0";
+ } else {
+ if (!EmitExpression(out, level->Declaration())) {
+ return false;
+ }
+ }
+ if (lod_param_is_named) {
+ out << ")";
+ }
+ }
+ if (builtin->Type() == sem::BuiltinType::kTextureSampleCompareLevel) {
maybe_write_comma();
- out << "gradientcube(";
- break;
- default: {
- std::stringstream err;
- err << "MSL does not support gradients for " << dim << " textures";
- diagnostics_.add_error(diag::System::Writer, err.str());
- return false;
- }
+ out << "level(0)";
+ }
+ if (auto* ddx = arg(Usage::kDdx)) {
+ auto dim = texture_type->dim();
+ switch (dim) {
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ maybe_write_comma();
+ out << "gradient2d(";
+ break;
+ case ast::TextureDimension::k3d:
+ maybe_write_comma();
+ out << "gradient3d(";
+ break;
+ case ast::TextureDimension::kCube:
+ case ast::TextureDimension::kCubeArray:
+ maybe_write_comma();
+ out << "gradientcube(";
+ break;
+ default: {
+ std::stringstream err;
+ err << "MSL does not support gradients for " << dim << " textures";
+ diagnostics_.add_error(diag::System::Writer, err.str());
+ return false;
+ }
+ }
+ if (!EmitExpression(out, ddx->Declaration())) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, arg(Usage::kDdy)->Declaration())) {
+ return false;
+ }
+ out << ")";
}
- if (!EmitExpression(out, ddx->Declaration())) {
- return false;
+
+ bool has_offset = false;
+ if (auto* offset = arg(Usage::kOffset)) {
+ has_offset = true;
+ maybe_write_comma();
+ if (!EmitExpression(out, offset->Declaration())) {
+ return false;
+ }
}
- out << ", ";
- if (!EmitExpression(out, arg(Usage::kDdy)->Declaration())) {
- return false;
+
+ if (auto* component = arg(Usage::kComponent)) {
+ maybe_write_comma();
+ if (!has_offset) {
+ // offset argument may need to be provided if we have a component.
+ switch (texture_type->dim()) {
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k2dArray:
+ out << "int2(0), ";
+ break;
+ default:
+ break; // Other texture dimensions don't have an offset
+ }
+ }
+ auto c = component->ConstantValue().Element<AInt>(0);
+ switch (c.value) {
+ case 0:
+ out << "component::x";
+ break;
+ case 1:
+ out << "component::y";
+ break;
+ case 2:
+ out << "component::z";
+ break;
+ case 3:
+ out << "component::w";
+ break;
+ default:
+ TINT_ICE(Writer, diagnostics_) << "invalid textureGather component: " << c;
+ break;
+ }
}
+
out << ")";
- }
-
- bool has_offset = false;
- if (auto* offset = arg(Usage::kOffset)) {
- has_offset = true;
- maybe_write_comma();
- if (!EmitExpression(out, offset->Declaration())) {
- return false;
- }
- }
-
- if (auto* component = arg(Usage::kComponent)) {
- maybe_write_comma();
- if (!has_offset) {
- // offset argument may need to be provided if we have a component.
- switch (texture_type->dim()) {
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k2dArray:
- out << "int2(0), ";
- break;
- default:
- break; // Other texture dimensions don't have an offset
- }
- }
- auto c = component->ConstantValue().Elements()[0].i32;
- switch (c) {
- case 0:
- out << "component::x";
- break;
- case 1:
- out << "component::y";
- break;
- case 2:
- out << "component::z";
- break;
- case 3:
- out << "component::w";
- break;
- default:
- TINT_ICE(Writer, diagnostics_)
- << "invalid textureGather component: " << c;
- break;
- }
- }
-
- out << ")";
-
- return true;
+
+ return true;
}
bool GeneratorImpl::EmitDotCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
- std::string fn = "dot";
- if (vec_ty->type()->is_integer_scalar()) {
- // MSL does not have a builtin for dot() with integer vector types.
- // Generate the helper function if it hasn't been created already
- fn = utils::GetOrCreate(
- int_dot_funcs_, vec_ty->Width(), [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name =
- UniqueIdentifier("tint_dot" + std::to_string(vec_ty->Width()));
- auto v = "vec<T," + std::to_string(vec_ty->Width()) + ">";
-
- line(&b) << "template<typename T>";
- line(&b) << "T " << fn_name << "(" << v << " a, " << v << " b) {";
- {
- auto l = line(&b);
- l << " return ";
- for (uint32_t i = 0; i < vec_ty->Width(); i++) {
- if (i > 0) {
- l << " + ";
- }
- l << "a[" << i << "]*b[" << i << "]";
- }
- l << ";";
- }
- line(&b) << "}";
- return fn_name;
+ auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
+ std::string fn = "dot";
+ if (vec_ty->type()->is_integer_scalar()) {
+ // MSL does not have a builtin for dot() with integer vector types.
+ // Generate the helper function if it hasn't been created already
+ fn = utils::GetOrCreate(int_dot_funcs_, vec_ty->Width(), [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
+
+ auto fn_name = UniqueIdentifier("tint_dot" + std::to_string(vec_ty->Width()));
+ auto v = "vec<T," + std::to_string(vec_ty->Width()) + ">";
+
+ line(&b) << "template<typename T>";
+ line(&b) << "T " << fn_name << "(" << v << " a, " << v << " b) {";
+ {
+ auto l = line(&b);
+ l << " return ";
+ for (uint32_t i = 0; i < vec_ty->Width(); i++) {
+ if (i > 0) {
+ l << " + ";
+ }
+ l << "a[" << i << "]*b[" << i << "]";
+ }
+ l << ";";
+ }
+ line(&b) << "}";
+ return fn_name;
});
- }
+ }
- out << fn << "(";
- if (!EmitExpression(out, expr->args[0])) {
- return false;
- }
- out << ", ";
- if (!EmitExpression(out, expr->args[1])) {
- return false;
- }
- out << ")";
- return true;
+ out << fn << "(";
+ if (!EmitExpression(out, expr->args[0])) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitExpression(out, expr->args[1])) {
+ return false;
+ }
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitModfCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- auto* ty = builtin->Parameters()[0]->Type();
- auto in = params[0];
-
- std::string width;
- if (auto* vec = ty->As<sem::Vector>()) {
- width = std::to_string(vec->Width());
- }
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ auto* ty = builtin->Parameters()[0]->Type();
+ auto in = params[0];
+
+ std::string width;
+ if (auto* vec = ty->As<sem::Vector>()) {
+ width = std::to_string(vec->Width());
+ }
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
- line(b) << "float" << width << " whole;";
- line(b) << "float" << width << " fract = modf(" << in << ", whole);";
- line(b) << "return {fract, whole};";
- return true;
- });
+ line(b) << "float" << width << " whole;";
+ line(b) << "float" << width << " fract = modf(" << in << ", whole);";
+ line(b) << "return {fract, whole};";
+ return true;
+ });
}
bool GeneratorImpl::EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- auto* ty = builtin->Parameters()[0]->Type();
- auto in = params[0];
-
- std::string width;
- if (auto* vec = ty->As<sem::Vector>()) {
- width = std::to_string(vec->Width());
- }
+ return CallBuiltinHelper(
+ out, expr, builtin, [&](TextBuffer* b, const std::vector<std::string>& params) {
+ auto* ty = builtin->Parameters()[0]->Type();
+ auto in = params[0];
+
+ std::string width;
+ if (auto* vec = ty->As<sem::Vector>()) {
+ width = std::to_string(vec->Width());
+ }
- // Emit the builtin return type unique to this overload. This does not
- // exist in the AST, so it will not be generated in Generate().
- if (!EmitStructType(&helpers_,
- builtin->ReturnType()->As<sem::Struct>())) {
- return false;
- }
+ // Emit the builtin return type unique to this overload. This does not
+ // exist in the AST, so it will not be generated in Generate().
+ if (!EmitStructType(&helpers_, builtin->ReturnType()->As<sem::Struct>())) {
+ return false;
+ }
- line(b) << "int" << width << " exp;";
- line(b) << "float" << width << " sig = frexp(" << in << ", exp);";
- line(b) << "return {sig, exp};";
- return true;
- });
+ line(b) << "int" << width << " exp;";
+ line(b) << "float" << width << " sig = frexp(" << in << ", exp);";
+ line(b) << "return {sig, exp};";
+ return true;
+ });
}
bool GeneratorImpl::EmitDegreesCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kRadToDeg << ";";
- return true;
- });
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kRadToDeg << ";";
+ return true;
+ });
}
bool GeneratorImpl::EmitRadiansCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin) {
- return CallBuiltinHelper(
- out, expr, builtin,
- [&](TextBuffer* b, const std::vector<std::string>& params) {
- line(b) << "return " << params[0] << " * " << std::setprecision(20)
- << sem::kDegToRad << ";";
- return true;
- });
+ return CallBuiltinHelper(out, expr, builtin,
+ [&](TextBuffer* b, const std::vector<std::string>& params) {
+ line(b) << "return " << params[0] << " * " << std::setprecision(20)
+ << sem::kDegToRad << ";";
+ return true;
+ });
}
std::string GeneratorImpl::generate_builtin_name(const sem::Builtin* builtin) {
- std::string out = "";
- switch (builtin->Type()) {
- case sem::BuiltinType::kAcos:
- case sem::BuiltinType::kAll:
- case sem::BuiltinType::kAny:
- case sem::BuiltinType::kAsin:
- case sem::BuiltinType::kAtan:
- case sem::BuiltinType::kAtan2:
- case sem::BuiltinType::kCeil:
- case sem::BuiltinType::kCos:
- case sem::BuiltinType::kCosh:
- case sem::BuiltinType::kCross:
- case sem::BuiltinType::kDeterminant:
- case sem::BuiltinType::kDistance:
- case sem::BuiltinType::kDot:
- case sem::BuiltinType::kExp:
- case sem::BuiltinType::kExp2:
- case sem::BuiltinType::kFloor:
- case sem::BuiltinType::kFma:
- case sem::BuiltinType::kFract:
- case sem::BuiltinType::kFrexp:
- case sem::BuiltinType::kLength:
- case sem::BuiltinType::kLdexp:
- case sem::BuiltinType::kLog:
- case sem::BuiltinType::kLog2:
- case sem::BuiltinType::kMix:
- case sem::BuiltinType::kModf:
- case sem::BuiltinType::kNormalize:
- case sem::BuiltinType::kPow:
- case sem::BuiltinType::kReflect:
- case sem::BuiltinType::kRefract:
- case sem::BuiltinType::kSelect:
- case sem::BuiltinType::kSin:
- case sem::BuiltinType::kSinh:
- case sem::BuiltinType::kSqrt:
- case sem::BuiltinType::kStep:
- case sem::BuiltinType::kTan:
- case sem::BuiltinType::kTanh:
- case sem::BuiltinType::kTranspose:
- case sem::BuiltinType::kTrunc:
- case sem::BuiltinType::kSign:
- case sem::BuiltinType::kClamp:
- out += builtin->str();
- break;
- case sem::BuiltinType::kAbs:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- out += "fabs";
- } else {
- out += "abs";
- }
- break;
- case sem::BuiltinType::kCountLeadingZeros:
- out += "clz";
- break;
- case sem::BuiltinType::kCountOneBits:
- out += "popcount";
- break;
- case sem::BuiltinType::kCountTrailingZeros:
- out += "ctz";
- break;
- case sem::BuiltinType::kDpdx:
- case sem::BuiltinType::kDpdxCoarse:
- case sem::BuiltinType::kDpdxFine:
- out += "dfdx";
- break;
- case sem::BuiltinType::kDpdy:
- case sem::BuiltinType::kDpdyCoarse:
- case sem::BuiltinType::kDpdyFine:
- out += "dfdy";
- break;
- case sem::BuiltinType::kExtractBits:
- out += "extract_bits";
- break;
- case sem::BuiltinType::kInsertBits:
- out += "insert_bits";
- break;
- case sem::BuiltinType::kFwidth:
- case sem::BuiltinType::kFwidthCoarse:
- case sem::BuiltinType::kFwidthFine:
- out += "fwidth";
- break;
- case sem::BuiltinType::kMax:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- out += "fmax";
- } else {
- out += "max";
- }
- break;
- case sem::BuiltinType::kMin:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- out += "fmin";
- } else {
- out += "min";
- }
- break;
- case sem::BuiltinType::kFaceForward:
- out += "faceforward";
- break;
- case sem::BuiltinType::kPack4x8snorm:
- out += "pack_float_to_snorm4x8";
- break;
- case sem::BuiltinType::kPack4x8unorm:
- out += "pack_float_to_unorm4x8";
- break;
- case sem::BuiltinType::kPack2x16snorm:
- out += "pack_float_to_snorm2x16";
- break;
- case sem::BuiltinType::kPack2x16unorm:
- out += "pack_float_to_unorm2x16";
- break;
- case sem::BuiltinType::kReverseBits:
- out += "reverse_bits";
- break;
- case sem::BuiltinType::kRound:
- out += "rint";
- break;
- case sem::BuiltinType::kSmoothstep:
- case sem::BuiltinType::kSmoothStep:
- out += "smoothstep";
- break;
- case sem::BuiltinType::kInverseSqrt:
- out += "rsqrt";
- break;
- case sem::BuiltinType::kUnpack4x8snorm:
- out += "unpack_snorm4x8_to_float";
- break;
- case sem::BuiltinType::kUnpack4x8unorm:
- out += "unpack_unorm4x8_to_float";
- break;
- case sem::BuiltinType::kUnpack2x16snorm:
- out += "unpack_snorm2x16_to_float";
- break;
- case sem::BuiltinType::kUnpack2x16unorm:
- out += "unpack_unorm2x16_to_float";
- break;
- case sem::BuiltinType::kArrayLength:
- diagnostics_.add_error(
- diag::System::Writer,
- "Unable to translate builtin: " + std::string(builtin->str()) +
- "\nDid you forget to pass array_length_from_uniform generator "
- "options?");
- return "";
- default:
- diagnostics_.add_error(
- diag::System::Writer,
- "Unknown import method: " + std::string(builtin->str()));
- return "";
- }
- return out;
+ std::string out = "";
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAcos:
+ case sem::BuiltinType::kAll:
+ case sem::BuiltinType::kAny:
+ case sem::BuiltinType::kAsin:
+ case sem::BuiltinType::kAtan:
+ case sem::BuiltinType::kAtan2:
+ case sem::BuiltinType::kCeil:
+ case sem::BuiltinType::kCos:
+ case sem::BuiltinType::kCosh:
+ case sem::BuiltinType::kCross:
+ case sem::BuiltinType::kDeterminant:
+ case sem::BuiltinType::kDistance:
+ case sem::BuiltinType::kDot:
+ case sem::BuiltinType::kExp:
+ case sem::BuiltinType::kExp2:
+ case sem::BuiltinType::kFloor:
+ case sem::BuiltinType::kFma:
+ case sem::BuiltinType::kFract:
+ case sem::BuiltinType::kFrexp:
+ case sem::BuiltinType::kLength:
+ case sem::BuiltinType::kLdexp:
+ case sem::BuiltinType::kLog:
+ case sem::BuiltinType::kLog2:
+ case sem::BuiltinType::kMix:
+ case sem::BuiltinType::kModf:
+ case sem::BuiltinType::kNormalize:
+ case sem::BuiltinType::kPow:
+ case sem::BuiltinType::kReflect:
+ case sem::BuiltinType::kRefract:
+ case sem::BuiltinType::kSelect:
+ case sem::BuiltinType::kSin:
+ case sem::BuiltinType::kSinh:
+ case sem::BuiltinType::kSqrt:
+ case sem::BuiltinType::kStep:
+ case sem::BuiltinType::kTan:
+ case sem::BuiltinType::kTanh:
+ case sem::BuiltinType::kTranspose:
+ case sem::BuiltinType::kTrunc:
+ case sem::BuiltinType::kSign:
+ case sem::BuiltinType::kClamp:
+ out += builtin->str();
+ break;
+ case sem::BuiltinType::kAbs:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ out += "fabs";
+ } else {
+ out += "abs";
+ }
+ break;
+ case sem::BuiltinType::kCountLeadingZeros:
+ out += "clz";
+ break;
+ case sem::BuiltinType::kCountOneBits:
+ out += "popcount";
+ break;
+ case sem::BuiltinType::kCountTrailingZeros:
+ out += "ctz";
+ break;
+ case sem::BuiltinType::kDpdx:
+ case sem::BuiltinType::kDpdxCoarse:
+ case sem::BuiltinType::kDpdxFine:
+ out += "dfdx";
+ break;
+ case sem::BuiltinType::kDpdy:
+ case sem::BuiltinType::kDpdyCoarse:
+ case sem::BuiltinType::kDpdyFine:
+ out += "dfdy";
+ break;
+ case sem::BuiltinType::kExtractBits:
+ out += "extract_bits";
+ break;
+ case sem::BuiltinType::kInsertBits:
+ out += "insert_bits";
+ break;
+ case sem::BuiltinType::kFwidth:
+ case sem::BuiltinType::kFwidthCoarse:
+ case sem::BuiltinType::kFwidthFine:
+ out += "fwidth";
+ break;
+ case sem::BuiltinType::kMax:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ out += "fmax";
+ } else {
+ out += "max";
+ }
+ break;
+ case sem::BuiltinType::kMin:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ out += "fmin";
+ } else {
+ out += "min";
+ }
+ break;
+ case sem::BuiltinType::kFaceForward:
+ out += "faceforward";
+ break;
+ case sem::BuiltinType::kPack4x8snorm:
+ out += "pack_float_to_snorm4x8";
+ break;
+ case sem::BuiltinType::kPack4x8unorm:
+ out += "pack_float_to_unorm4x8";
+ break;
+ case sem::BuiltinType::kPack2x16snorm:
+ out += "pack_float_to_snorm2x16";
+ break;
+ case sem::BuiltinType::kPack2x16unorm:
+ out += "pack_float_to_unorm2x16";
+ break;
+ case sem::BuiltinType::kReverseBits:
+ out += "reverse_bits";
+ break;
+ case sem::BuiltinType::kRound:
+ out += "rint";
+ break;
+ case sem::BuiltinType::kSmoothstep:
+ case sem::BuiltinType::kSmoothStep:
+ out += "smoothstep";
+ break;
+ case sem::BuiltinType::kInverseSqrt:
+ out += "rsqrt";
+ break;
+ case sem::BuiltinType::kUnpack4x8snorm:
+ out += "unpack_snorm4x8_to_float";
+ break;
+ case sem::BuiltinType::kUnpack4x8unorm:
+ out += "unpack_unorm4x8_to_float";
+ break;
+ case sem::BuiltinType::kUnpack2x16snorm:
+ out += "unpack_snorm2x16_to_float";
+ break;
+ case sem::BuiltinType::kUnpack2x16unorm:
+ out += "unpack_unorm2x16_to_float";
+ break;
+ case sem::BuiltinType::kArrayLength:
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "Unable to translate builtin: " + std::string(builtin->str()) +
+ "\nDid you forget to pass array_length_from_uniform generator "
+ "options?");
+ return "";
+ default:
+ diagnostics_.add_error(diag::System::Writer,
+ "Unknown import method: " + std::string(builtin->str()));
+ return "";
+ }
+ return out;
}
bool GeneratorImpl::EmitCase(const ast::CaseStatement* stmt) {
- if (stmt->IsDefault()) {
- line() << "default: {";
- } else {
- for (auto* selector : stmt->selectors) {
- auto out = line();
- out << "case ";
- if (!EmitLiteral(out, selector)) {
- return false;
- }
- out << ":";
- if (selector == stmt->selectors.back()) {
- out << " {";
- }
+ if (stmt->IsDefault()) {
+ line() << "default: {";
+ } else {
+ for (auto* selector : stmt->selectors) {
+ auto out = line();
+ out << "case ";
+ if (!EmitLiteral(out, selector)) {
+ return false;
+ }
+ out << ":";
+ if (selector == stmt->selectors.back()) {
+ out << " {";
+ }
+ }
}
- }
- {
- ScopedIndent si(this);
+ {
+ ScopedIndent si(this);
- for (auto* s : stmt->body->statements) {
- if (!EmitStatement(s)) {
- return false;
- }
- }
+ for (auto* s : stmt->body->statements) {
+ if (!EmitStatement(s)) {
+ return false;
+ }
+ }
- if (!last_is_break_or_fallthrough(stmt->body)) {
- line() << "break;";
+ if (!last_is_break_or_fallthrough(stmt->body)) {
+ line() << "break;";
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitContinue(const ast::ContinueStatement*) {
- if (!emit_continuing_()) {
- return false;
- }
+ if (!emit_continuing_()) {
+ return false;
+ }
- line() << "continue;";
- return true;
+ line() << "continue;";
+ return true;
}
bool GeneratorImpl::EmitZeroValue(std::ostream& out, const sem::Type* type) {
- return Switch(
- type,
- [&](const sem::Bool*) {
- out << "false";
+ return Switch(
+ type,
+ [&](const sem::Bool*) {
+ out << "false";
+ return true;
+ },
+ [&](const sem::F16*) {
+ // Placeholder for emitting f16 zero value
+ diagnostics_.add_error(diag::System::Writer,
+ "Type f16 is not completely implemented yet");
+ return false;
+ },
+ [&](const sem::F32*) {
+ out << "0.0f";
+ return true;
+ },
+ [&](const sem::I32*) {
+ out << "0";
+ return true;
+ },
+ [&](const sem::U32*) {
+ out << "0u";
+ return true;
+ },
+ [&](const sem::Vector* vec) { //
+ return EmitZeroValue(out, vec->type());
+ },
+ [&](const sem::Matrix* mat) {
+ if (!EmitType(out, mat, "")) {
+ return false;
+ }
+ ScopedParen sp(out);
+ return EmitZeroValue(out, mat->type());
+ },
+ [&](const sem::Array* arr) {
+ out << "{";
+ TINT_DEFER(out << "}");
+ return EmitZeroValue(out, arr->ElemType());
+ },
+ [&](const sem::Struct*) {
+ out << "{}";
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "Invalid type for zero emission: " + type->FriendlyName(builder_.Symbols()));
+ return false;
+ });
+}
+
+bool GeneratorImpl::EmitConstant(std::ostream& out, const sem::Constant& constant) {
+ auto emit_bool = [&](size_t element_idx) {
+ out << (constant.Element<AInt>(element_idx) ? "true" : "false");
return true;
- },
- [&](const sem::F32*) {
- out << "0.0f";
+ };
+ auto emit_f32 = [&](size_t element_idx) {
+ PrintF32(out, static_cast<float>(constant.Element<AFloat>(element_idx)));
return true;
- },
- [&](const sem::I32*) {
- out << "0";
+ };
+ auto emit_i32 = [&](size_t element_idx) {
+ PrintI32(out, static_cast<int32_t>(constant.Element<AInt>(element_idx).value));
return true;
- },
- [&](const sem::U32*) {
- out << "0u";
+ };
+ auto emit_u32 = [&](size_t element_idx) {
+ out << constant.Element<AInt>(element_idx).value << "u";
return true;
- },
- [&](const sem::Vector* vec) { //
- return EmitZeroValue(out, vec->type());
- },
- [&](const sem::Matrix* mat) {
- if (!EmitType(out, mat, "")) {
- return false;
+ };
+ auto emit_vector = [&](const sem::Vector* vec_ty, size_t start, size_t end) {
+ if (!EmitType(out, vec_ty, "")) {
+ return false;
}
- out << "(";
- TINT_DEFER(out << ")");
- return EmitZeroValue(out, mat->type());
- },
- [&](const sem::Array* arr) {
- out << "{";
- TINT_DEFER(out << "}");
- return EmitZeroValue(out, arr->ElemType());
- },
- [&](const sem::Struct*) {
- out << "{}";
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer,
- "Invalid type for zero emission: " +
- type->FriendlyName(builder_.Symbols()));
- return false;
- });
-}
-bool GeneratorImpl::EmitLiteral(std::ostream& out,
- const ast::LiteralExpression* lit) {
- return Switch(
- lit,
- [&](const ast::BoolLiteralExpression* l) {
- out << (l->value ? "true" : "false");
- return true;
- },
- [&](const ast::FloatLiteralExpression* l) {
- if (std::isinf(l->value)) {
- out << (l->value >= 0 ? "INFINITY" : "-INFINITY");
- } else if (std::isnan(l->value)) {
- out << "NAN";
- } else {
- out << FloatToString(l->value) << "f";
+ ScopedParen sp(out);
+
+ auto emit_els = [&](auto emit_el) {
+ if (constant.AllEqual(start, end)) {
+ return emit_el(start);
+ }
+ for (size_t i = start; i < end; i++) {
+ if (i > start) {
+ out << ", ";
+ }
+ if (!emit_el(i)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ return Switch(
+ vec_ty->type(), //
+ [&](const sem::Bool*) { return emit_els(emit_bool); }, //
+ [&](const sem::F32*) { return emit_els(emit_f32); }, //
+ [&](const sem::I32*) { return emit_els(emit_i32); }, //
+ [&](const sem::U32*) { return emit_els(emit_u32); }, //
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unhandled constant vector element type: " +
+ builder_.FriendlyName(vec_ty->type()));
+ return false;
+ });
+ };
+ auto emit_matrix = [&](const sem::Matrix* m) {
+ if (!EmitType(out, constant.Type(), "")) {
+ return false;
}
- return true;
- },
- [&](const ast::SintLiteralExpression* l) {
- // MSL (and C++) parse `-2147483648` as a `long` because it parses
- // unary minus and `2147483648` as separate tokens, and the latter
- // doesn't fit into an (32-bit) `int`. WGSL, OTOH, parses this as an
- // `i32`. To avoid issues with `long` to `int` casts, emit
- // `(2147483647 - 1)` instead, which ensures the expression type is
- // `int`.
- const auto int_min = std::numeric_limits<int32_t>::min();
- if (l->ValueAsI32() == int_min) {
- out << "(" << int_min + 1 << " - 1)";
- } else {
- out << l->value;
+
+ ScopedParen sp(out);
+
+ for (size_t column_idx = 0; column_idx < m->columns(); column_idx++) {
+ if (column_idx > 0) {
+ out << ", ";
+ }
+ size_t start = m->rows() * column_idx;
+ size_t end = m->rows() * (column_idx + 1);
+ if (!emit_vector(m->ColumnType(), start, end)) {
+ return false;
+ }
}
return true;
- },
- [&](const ast::UintLiteralExpression* l) {
- out << l->value << "u";
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer, "unknown literal type");
- return false;
- });
+ };
+ return Switch(
+ constant.Type(), //
+ [&](const sem::Bool*) { return emit_bool(0); }, //
+ [&](const sem::F32*) { return emit_f32(0); }, //
+ [&](const sem::I32*) { return emit_i32(0); }, //
+ [&](const sem::U32*) { return emit_u32(0); }, //
+ [&](const sem::Vector* v) { return emit_vector(v, 0, constant.ElementCount()); }, //
+ [&](const sem::Matrix* m) { return emit_matrix(m); }, //
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "unhandled constant type: " + builder_.FriendlyName(constant.Type()));
+ return false;
+ });
}
-bool GeneratorImpl::EmitExpression(std::ostream& out,
- const ast::Expression* expr) {
- return Switch(
- expr,
- [&](const ast::IndexAccessorExpression* a) { //
- return EmitIndexAccessor(out, a);
- },
- [&](const ast::BinaryExpression* b) { //
- return EmitBinary(out, b);
- },
- [&](const ast::BitcastExpression* b) { //
- return EmitBitcast(out, b);
- },
- [&](const ast::CallExpression* c) { //
- return EmitCall(out, c);
- },
- [&](const ast::IdentifierExpression* i) { //
- return EmitIdentifier(out, i);
- },
- [&](const ast::LiteralExpression* l) { //
- return EmitLiteral(out, l);
- },
- [&](const ast::MemberAccessorExpression* m) { //
- return EmitMemberAccessor(out, m);
- },
- [&](const ast::UnaryOpExpression* u) { //
- return EmitUnaryOp(out, u);
- },
- [&](Default) { //
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown expression type: " + std::string(expr->TypeInfo().name));
- return false;
- });
+bool GeneratorImpl::EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit) {
+ return Switch(
+ lit,
+ [&](const ast::BoolLiteralExpression* l) {
+ out << (l->value ? "true" : "false");
+ return true;
+ },
+ [&](const ast::FloatLiteralExpression* l) {
+ PrintF32(out, static_cast<float>(l->value));
+ return true;
+ },
+ [&](const ast::IntLiteralExpression* i) {
+ switch (i->suffix) {
+ case ast::IntLiteralExpression::Suffix::kNone:
+ case ast::IntLiteralExpression::Suffix::kI: {
+ PrintI32(out, static_cast<int32_t>(i->value));
+ return true;
+ }
+ case ast::IntLiteralExpression::Suffix::kU: {
+ out << i->value << "u";
+ return true;
+ }
+ }
+ diagnostics_.add_error(diag::System::Writer, "unknown integer literal suffix type");
+ return false;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "unknown literal type");
+ return false;
+ });
+}
+
+bool GeneratorImpl::EmitExpression(std::ostream& out, const ast::Expression* expr) {
+ if (auto* sem = builder_.Sem().Get(expr)) {
+ if (auto constant = sem->ConstantValue()) {
+ return EmitConstant(out, constant);
+ }
+ }
+ return Switch(
+ expr,
+ [&](const ast::IndexAccessorExpression* a) { //
+ return EmitIndexAccessor(out, a);
+ },
+ [&](const ast::BinaryExpression* b) { //
+ return EmitBinary(out, b);
+ },
+ [&](const ast::BitcastExpression* b) { //
+ return EmitBitcast(out, b);
+ },
+ [&](const ast::CallExpression* c) { //
+ return EmitCall(out, c);
+ },
+ [&](const ast::IdentifierExpression* i) { //
+ return EmitIdentifier(out, i);
+ },
+ [&](const ast::LiteralExpression* l) { //
+ return EmitLiteral(out, l);
+ },
+ [&](const ast::MemberAccessorExpression* m) { //
+ return EmitMemberAccessor(out, m);
+ },
+ [&](const ast::UnaryOpExpression* u) { //
+ return EmitUnaryOp(out, u);
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer, "unknown expression type: " +
+ std::string(expr->TypeInfo().name));
+ return false;
+ });
}
void GeneratorImpl::EmitStage(std::ostream& out, ast::PipelineStage stage) {
- switch (stage) {
- case ast::PipelineStage::kFragment:
- out << "fragment";
- break;
- case ast::PipelineStage::kVertex:
- out << "vertex";
- break;
- case ast::PipelineStage::kCompute:
- out << "kernel";
- break;
- case ast::PipelineStage::kNone:
- break;
- }
- return;
+ switch (stage) {
+ case ast::PipelineStage::kFragment:
+ out << "fragment";
+ break;
+ case ast::PipelineStage::kVertex:
+ out << "vertex";
+ break;
+ case ast::PipelineStage::kCompute:
+ out << "kernel";
+ break;
+ case ast::PipelineStage::kNone:
+ break;
+ }
+ return;
}
bool GeneratorImpl::EmitFunction(const ast::Function* func) {
- auto* func_sem = program_->Sem().Get(func);
+ auto* func_sem = program_->Sem().Get(func);
- {
- auto out = line();
- if (!EmitType(out, func_sem->ReturnType(), "")) {
- return false;
- }
- out << " " << program_->Symbols().NameFor(func->symbol) << "(";
+ {
+ auto out = line();
+ if (!EmitType(out, func_sem->ReturnType(), "")) {
+ return false;
+ }
+ out << " " << program_->Symbols().NameFor(func->symbol) << "(";
- bool first = true;
- for (auto* v : func->params) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ bool first = true;
+ for (auto* v : func->params) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- auto* type = program_->Sem().Get(v)->Type();
+ auto* type = program_->Sem().Get(v)->Type();
- std::string param_name =
- "const " + program_->Symbols().NameFor(v->symbol);
- if (!EmitType(out, type, param_name)) {
- return false;
- }
- // Parameter name is output as part of the type for arrays and pointers.
- if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
- out << " " << program_->Symbols().NameFor(v->symbol);
- }
- }
+ std::string param_name = "const " + program_->Symbols().NameFor(v->symbol);
+ if (!EmitType(out, type, param_name)) {
+ return false;
+ }
+ // Parameter name is output as part of the type for arrays and pointers.
+ if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
+ out << " " << program_->Symbols().NameFor(v->symbol);
+ }
+ }
- out << ") {";
- }
+ out << ") {";
+ }
- if (!EmitStatementsWithIndent(func->body->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(func->body->statements)) {
+ return false;
+ }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
std::string GeneratorImpl::builtin_to_attribute(ast::Builtin builtin) const {
- switch (builtin) {
- case ast::Builtin::kPosition:
- return "position";
- case ast::Builtin::kVertexIndex:
- return "vertex_id";
- case ast::Builtin::kInstanceIndex:
- return "instance_id";
- case ast::Builtin::kFrontFacing:
- return "front_facing";
- case ast::Builtin::kFragDepth:
- return "depth(any)";
- case ast::Builtin::kLocalInvocationId:
- return "thread_position_in_threadgroup";
- case ast::Builtin::kLocalInvocationIndex:
- return "thread_index_in_threadgroup";
- case ast::Builtin::kGlobalInvocationId:
- return "thread_position_in_grid";
- case ast::Builtin::kWorkgroupId:
- return "threadgroup_position_in_grid";
- case ast::Builtin::kNumWorkgroups:
- return "threadgroups_per_grid";
- case ast::Builtin::kSampleIndex:
- return "sample_id";
- case ast::Builtin::kSampleMask:
- return "sample_mask";
- case ast::Builtin::kPointSize:
- return "point_size";
- default:
- break;
- }
- return "";
+ switch (builtin) {
+ case ast::Builtin::kPosition:
+ return "position";
+ case ast::Builtin::kVertexIndex:
+ return "vertex_id";
+ case ast::Builtin::kInstanceIndex:
+ return "instance_id";
+ case ast::Builtin::kFrontFacing:
+ return "front_facing";
+ case ast::Builtin::kFragDepth:
+ return "depth(any)";
+ case ast::Builtin::kLocalInvocationId:
+ return "thread_position_in_threadgroup";
+ case ast::Builtin::kLocalInvocationIndex:
+ return "thread_index_in_threadgroup";
+ case ast::Builtin::kGlobalInvocationId:
+ return "thread_position_in_grid";
+ case ast::Builtin::kWorkgroupId:
+ return "threadgroup_position_in_grid";
+ case ast::Builtin::kNumWorkgroups:
+ return "threadgroups_per_grid";
+ case ast::Builtin::kSampleIndex:
+ return "sample_id";
+ case ast::Builtin::kSampleMask:
+ return "sample_mask";
+ case ast::Builtin::kPointSize:
+ return "point_size";
+ default:
+ break;
+ }
+ return "";
}
-std::string GeneratorImpl::interpolation_to_attribute(
- ast::InterpolationType type,
- ast::InterpolationSampling sampling) const {
- std::string attr;
- switch (sampling) {
- case ast::InterpolationSampling::kCenter:
- attr = "center_";
- break;
- case ast::InterpolationSampling::kCentroid:
- attr = "centroid_";
- break;
- case ast::InterpolationSampling::kSample:
- attr = "sample_";
- break;
- case ast::InterpolationSampling::kNone:
- break;
- }
- switch (type) {
- case ast::InterpolationType::kPerspective:
- attr += "perspective";
- break;
- case ast::InterpolationType::kLinear:
- attr += "no_perspective";
- break;
- case ast::InterpolationType::kFlat:
- attr += "flat";
- break;
- }
- return attr;
+std::string GeneratorImpl::interpolation_to_attribute(ast::InterpolationType type,
+ ast::InterpolationSampling sampling) const {
+ std::string attr;
+ switch (sampling) {
+ case ast::InterpolationSampling::kCenter:
+ attr = "center_";
+ break;
+ case ast::InterpolationSampling::kCentroid:
+ attr = "centroid_";
+ break;
+ case ast::InterpolationSampling::kSample:
+ attr = "sample_";
+ break;
+ case ast::InterpolationSampling::kNone:
+ break;
+ }
+ switch (type) {
+ case ast::InterpolationType::kPerspective:
+ attr += "perspective";
+ break;
+ case ast::InterpolationType::kLinear:
+ attr += "no_perspective";
+ break;
+ case ast::InterpolationType::kFlat:
+ attr += "flat";
+ break;
+ }
+ return attr;
}
bool GeneratorImpl::EmitEntryPointFunction(const ast::Function* func) {
- auto func_name = program_->Symbols().NameFor(func->symbol);
-
- // Returns the binding index of a variable, requiring that the group
- // attribute have a value of zero.
- const uint32_t kInvalidBindingIndex = std::numeric_limits<uint32_t>::max();
- auto get_binding_index = [&](const ast::Variable* var) -> uint32_t {
- auto bp = var->BindingPoint();
- if (bp.group == nullptr || bp.binding == nullptr) {
- TINT_ICE(Writer, diagnostics_)
- << "missing binding attributes for entry point parameter";
- return kInvalidBindingIndex;
- }
- if (bp.group->value != 0) {
- TINT_ICE(Writer, diagnostics_)
- << "encountered non-zero resource group index (use "
- "BindingRemapper to fix)";
- return kInvalidBindingIndex;
- }
- return bp.binding->value;
- };
-
- {
- auto out = line();
+ auto func_name = program_->Symbols().NameFor(func->symbol);
+
+ // Returns the binding index of a variable, requiring that the group
+ // attribute have a value of zero.
+ const uint32_t kInvalidBindingIndex = std::numeric_limits<uint32_t>::max();
+ auto get_binding_index = [&](const ast::Variable* var) -> uint32_t {
+ auto bp = var->BindingPoint();
+ if (bp.group == nullptr || bp.binding == nullptr) {
+ TINT_ICE(Writer, diagnostics_)
+ << "missing binding attributes for entry point parameter";
+ return kInvalidBindingIndex;
+ }
+ if (bp.group->value != 0) {
+ TINT_ICE(Writer, diagnostics_) << "encountered non-zero resource group index (use "
+ "BindingRemapper to fix)";
+ return kInvalidBindingIndex;
+ }
+ return bp.binding->value;
+ };
- EmitStage(out, func->PipelineStage());
- out << " " << func->return_type->FriendlyName(program_->Symbols());
- out << " " << func_name << "(";
+ {
+ auto out = line();
- // Emit entry point parameters.
- bool first = true;
- for (auto* var : func->params) {
- if (!first) {
- out << ", ";
- }
- first = false;
+ EmitStage(out, func->PipelineStage());
+ out << " " << func->return_type->FriendlyName(program_->Symbols());
+ out << " " << func_name << "(";
- auto* type = program_->Sem().Get(var)->Type()->UnwrapRef();
+ // Emit entry point parameters.
+ bool first = true;
+ for (auto* var : func->params) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- auto param_name = program_->Symbols().NameFor(var->symbol);
- if (!EmitType(out, type, param_name)) {
- return false;
- }
- // Parameter name is output as part of the type for arrays and pointers.
- if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
- out << " " << param_name;
- }
-
- if (type->Is<sem::Struct>()) {
- out << " [[stage_in]]";
- } else if (type->is_handle()) {
- uint32_t binding = get_binding_index(var);
- if (binding == kInvalidBindingIndex) {
- return false;
- }
- if (var->type->Is<ast::Sampler>()) {
- out << " [[sampler(" << binding << ")]]";
- } else if (var->type->Is<ast::Texture>()) {
- out << " [[texture(" << binding << ")]]";
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "invalid handle type entry point parameter";
- return false;
- }
- } else if (auto* ptr = var->type->As<ast::Pointer>()) {
- auto sc = ptr->storage_class;
- if (sc == ast::StorageClass::kWorkgroup) {
- auto& allocations = workgroup_allocations_[func_name];
- out << " [[threadgroup(" << allocations.size() << ")]]";
- allocations.push_back(program_->Sem().Get(ptr->type)->Size());
- } else if (sc == ast::StorageClass::kStorage ||
- sc == ast::StorageClass::kUniform) {
- uint32_t binding = get_binding_index(var);
- if (binding == kInvalidBindingIndex) {
- return false;
- }
- out << " [[buffer(" << binding << ")]]";
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "invalid pointer storage class for entry point parameter";
- return false;
- }
- } else {
- auto& attrs = var->attributes;
- bool builtin_found = false;
- for (auto* attr : attrs) {
- auto* builtin = attr->As<ast::BuiltinAttribute>();
- if (!builtin) {
- continue;
- }
-
- builtin_found = true;
-
- auto name = builtin_to_attribute(builtin->builtin);
- if (name.empty()) {
- diagnostics_.add_error(diag::System::Writer, "unknown builtin");
- return false;
- }
- out << " [[" << name << "]]";
- }
- if (!builtin_found) {
- TINT_ICE(Writer, diagnostics_) << "Unsupported entry point parameter";
+ auto* type = program_->Sem().Get(var)->Type()->UnwrapRef();
+
+ auto param_name = program_->Symbols().NameFor(var->symbol);
+ if (!EmitType(out, type, param_name)) {
+ return false;
+ }
+ // Parameter name is output as part of the type for arrays and pointers.
+ if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
+ out << " " << param_name;
+ }
+
+ if (type->Is<sem::Struct>()) {
+ out << " [[stage_in]]";
+ } else if (type->is_handle()) {
+ uint32_t binding = get_binding_index(var);
+ if (binding == kInvalidBindingIndex) {
+ return false;
+ }
+ if (var->type->Is<ast::Sampler>()) {
+ out << " [[sampler(" << binding << ")]]";
+ } else if (var->type->Is<ast::Texture>()) {
+ out << " [[texture(" << binding << ")]]";
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "invalid handle type entry point parameter";
+ return false;
+ }
+ } else if (auto* ptr = var->type->As<ast::Pointer>()) {
+ auto sc = ptr->storage_class;
+ if (sc == ast::StorageClass::kWorkgroup) {
+ auto& allocations = workgroup_allocations_[func_name];
+ out << " [[threadgroup(" << allocations.size() << ")]]";
+ allocations.push_back(program_->Sem().Get(ptr->type)->Size());
+ } else if (sc == ast::StorageClass::kStorage || sc == ast::StorageClass::kUniform) {
+ uint32_t binding = get_binding_index(var);
+ if (binding == kInvalidBindingIndex) {
+ return false;
+ }
+ out << " [[buffer(" << binding << ")]]";
+ } else {
+ TINT_ICE(Writer, diagnostics_)
+ << "invalid pointer storage class for entry point parameter";
+ return false;
+ }
+ } else {
+ auto& attrs = var->attributes;
+ bool builtin_found = false;
+ for (auto* attr : attrs) {
+ auto* builtin = attr->As<ast::BuiltinAttribute>();
+ if (!builtin) {
+ continue;
+ }
+
+ builtin_found = true;
+
+ auto name = builtin_to_attribute(builtin->builtin);
+ if (name.empty()) {
+ diagnostics_.add_error(diag::System::Writer, "unknown builtin");
+ return false;
+ }
+ out << " [[" << name << "]]";
+ }
+ if (!builtin_found) {
+ TINT_ICE(Writer, diagnostics_) << "Unsupported entry point parameter";
+ }
+ }
}
- }
+ out << ") {";
}
- out << ") {";
- }
- {
- ScopedIndent si(this);
+ {
+ ScopedIndent si(this);
- if (!EmitStatements(func->body->statements)) {
- return false;
- }
+ if (!EmitStatements(func->body->statements)) {
+ return false;
+ }
- if (!Is<ast::ReturnStatement>(func->body->Last())) {
- ast::ReturnStatement ret(ProgramID{}, Source{});
- if (!EmitStatement(&ret)) {
- return false;
- }
+ if (!Is<ast::ReturnStatement>(func->body->Last())) {
+ ast::ReturnStatement ret(ProgramID{}, Source{});
+ if (!EmitStatement(&ret)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
- return true;
+ line() << "}";
+ return true;
}
-bool GeneratorImpl::EmitIdentifier(std::ostream& out,
- const ast::IdentifierExpression* expr) {
- out << program_->Symbols().NameFor(expr->symbol);
- return true;
+bool GeneratorImpl::EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr) {
+ out << program_->Symbols().NameFor(expr->symbol);
+ return true;
}
bool GeneratorImpl::EmitLoop(const ast::LoopStatement* stmt) {
- auto emit_continuing = [this, stmt]() {
- if (stmt->continuing && !stmt->continuing->Empty()) {
- if (!EmitBlock(stmt->continuing)) {
- return false;
- }
- }
- return true;
- };
+ auto emit_continuing = [this, stmt]() {
+ if (stmt->continuing && !stmt->continuing->Empty()) {
+ if (!EmitBlock(stmt->continuing)) {
+ return false;
+ }
+ }
+ return true;
+ };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << "while (true) {";
- {
- ScopedIndent si(this);
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
- if (!emit_continuing_()) {
- return false;
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << "while (true) {";
+ {
+ ScopedIndent si(this);
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
+ if (!emit_continuing_()) {
+ return false;
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitForLoop(const ast::ForLoopStatement* stmt) {
- TextBuffer init_buf;
- if (auto* init = stmt->initializer) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
- if (!EmitStatement(init)) {
- return false;
- }
- }
-
- TextBuffer cond_pre;
- std::stringstream cond_buf;
- if (auto* cond = stmt->condition) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
- if (!EmitExpression(cond_buf, cond)) {
- return false;
- }
- }
-
- TextBuffer cont_buf;
- if (auto* cont = stmt->continuing) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
- if (!EmitStatement(cont)) {
- return false;
- }
- }
-
- // If the for-loop has a multi-statement conditional and / or continuing,
- // then we cannot emit this as a regular for-loop in MSL. Instead we need to
- // generate a `while(true)` loop.
- bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
-
- // If the for-loop has multi-statement initializer, or is going to be
- // emitted as a `while(true)` loop, then declare the initializer
- // statement(s) before the loop in a new block.
- bool nest_in_block =
- init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop);
- if (nest_in_block) {
- line() << "{";
- increment_indent();
- current_buffer_->Append(init_buf);
- init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
- }
- TINT_DEFER({
- if (nest_in_block) {
- decrement_indent();
- line() << "}";
+ TextBuffer init_buf;
+ if (auto* init = stmt->initializer) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
+ if (!EmitStatement(init)) {
+ return false;
+ }
}
- });
- if (emit_as_loop) {
- auto emit_continuing = [&]() {
- current_buffer_->Append(cont_buf);
- return true;
- };
+ TextBuffer cond_pre;
+ std::stringstream cond_buf;
+ if (auto* cond = stmt->condition) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cond_pre);
+ if (!EmitExpression(cond_buf, cond)) {
+ return false;
+ }
+ }
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- line() << "while (true) {";
- increment_indent();
+ TextBuffer cont_buf;
+ if (auto* cont = stmt->continuing) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
+ if (!EmitStatement(cont)) {
+ return false;
+ }
+ }
+
+ // If the for-loop has a multi-statement conditional and / or continuing,
+ // then we cannot emit this as a regular for-loop in MSL. Instead we need to
+ // generate a `while(true)` loop.
+ bool emit_as_loop = cond_pre.lines.size() > 0 || cont_buf.lines.size() > 1;
+
+ // If the for-loop has multi-statement initializer, or is going to be
+ // emitted as a `while(true)` loop, then declare the initializer
+ // statement(s) before the loop in a new block.
+ bool nest_in_block = init_buf.lines.size() > 1 || (stmt->initializer && emit_as_loop);
+ if (nest_in_block) {
+ line() << "{";
+ increment_indent();
+ current_buffer_->Append(init_buf);
+ init_buf.lines.clear(); // Don't emit the initializer again in the 'for'
+ }
TINT_DEFER({
- decrement_indent();
- line() << "}";
+ if (nest_in_block) {
+ decrement_indent();
+ line() << "}";
+ }
});
- if (stmt->condition) {
- current_buffer_->Append(cond_pre);
- line() << "if (!(" << cond_buf.str() << ")) { break; }";
- }
+ if (emit_as_loop) {
+ auto emit_continuing = [&]() {
+ current_buffer_->Append(cont_buf);
+ return true;
+ };
+
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ line() << "while (true) {";
+ increment_indent();
+ TINT_DEFER({
+ decrement_indent();
+ line() << "}";
+ });
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
+ if (stmt->condition) {
+ current_buffer_->Append(cond_pre);
+ line() << "if (!(" << cond_buf.str() << ")) { break; }";
+ }
- if (!emit_continuing_()) {
- return false;
- }
- } else {
- // For-loop can be generated.
- {
- auto out = line();
- out << "for";
- {
- ScopedParen sp(out);
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
- if (!init_buf.lines.empty()) {
- out << init_buf.lines[0].content << " ";
- } else {
- out << "; ";
+ if (!emit_continuing_()) {
+ return false;
}
+ } else {
+ // For-loop can be generated.
+ {
+ auto out = line();
+ out << "for";
+ {
+ ScopedParen sp(out);
+
+ if (!init_buf.lines.empty()) {
+ out << init_buf.lines[0].content << " ";
+ } else {
+ out << "; ";
+ }
- out << cond_buf.str() << "; ";
+ out << cond_buf.str() << "; ";
- if (!cont_buf.lines.empty()) {
- out << TrimSuffix(cont_buf.lines[0].content, ";");
+ if (!cont_buf.lines.empty()) {
+ out << TrimSuffix(cont_buf.lines[0].content, ";");
+ }
+ }
+ out << " {";
}
- }
- out << " {";
- }
- {
- auto emit_continuing = [] { return true; };
- TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ {
+ auto emit_continuing = [] { return true; };
+ TINT_SCOPED_ASSIGNMENT(emit_continuing_, emit_continuing);
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
+ }
+ }
+ line() << "}";
}
- line() << "}";
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitDiscard(const ast::DiscardStatement*) {
- // TODO(dsinclair): Verify this is correct when the discard semantics are
- // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
- line() << "discard_fragment();";
- return true;
+ // TODO(dsinclair): Verify this is correct when the discard semantics are
+ // defined for WGSL (https://github.com/gpuweb/gpuweb/issues/361)
+ line() << "discard_fragment();";
+ return true;
}
bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
- {
- auto out = line();
- out << "if (";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
- }
- out << ") {";
- }
-
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
-
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- line() << "} else {";
- increment_indent();
-
- {
+ {
auto out = line();
out << "if (";
- if (!EmitExpression(out, e->condition)) {
- return false;
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
}
out << ") {";
- }
- } else {
- line() << "} else {";
}
- if (!EmitStatementsWithIndent(e->body->statements)) {
- return false;
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
}
- }
-
- line() << "}";
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- decrement_indent();
- line() << "}";
+ if (stmt->else_statement) {
+ line() << "} else {";
+ if (auto* block = stmt->else_statement->As<ast::BlockStatement>()) {
+ if (!EmitStatementsWithIndent(block->statements)) {
+ return false;
+ }
+ } else {
+ if (!EmitStatementsWithIndent({stmt->else_statement})) {
+ return false;
+ }
+ }
}
- }
- return true;
-}
+ line() << "}";
-bool GeneratorImpl::EmitMemberAccessor(
- std::ostream& out,
- const ast::MemberAccessorExpression* expr) {
- auto write_lhs = [&] {
- bool paren_lhs = !expr->structure->IsAnyOf<
- ast::IndexAccessorExpression, ast::CallExpression,
- ast::IdentifierExpression, ast::MemberAccessorExpression>();
- if (paren_lhs) {
- out << "(";
- }
- if (!EmitExpression(out, expr->structure)) {
- return false;
- }
- if (paren_lhs) {
- out << ")";
- }
return true;
- };
-
- auto& sem = program_->Sem();
-
- if (auto* swizzle = sem.Get(expr)->As<sem::Swizzle>()) {
- // Metal 1.x does not support swizzling of packed vector types.
- // For single element swizzles, we can use the index operator.
- // For multi-element swizzles, we need to cast to a regular vector type
- // first. Note that we do not currently allow assignments to swizzles, so
- // the casting which will convert the l-value to r-value is fine.
- if (swizzle->Indices().size() == 1) {
- if (!write_lhs()) {
- return false;
- }
- out << "[" << swizzle->Indices()[0] << "]";
+}
+
+bool GeneratorImpl::EmitMemberAccessor(std::ostream& out,
+ const ast::MemberAccessorExpression* expr) {
+ auto write_lhs = [&] {
+ bool paren_lhs =
+ !expr->structure->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
+ ast::IdentifierExpression, ast::MemberAccessorExpression>();
+ if (paren_lhs) {
+ out << "(";
+ }
+ if (!EmitExpression(out, expr->structure)) {
+ return false;
+ }
+ if (paren_lhs) {
+ out << ")";
+ }
+ return true;
+ };
+
+ auto& sem = program_->Sem();
+
+ if (auto* swizzle = sem.Get(expr)->As<sem::Swizzle>()) {
+ // Metal 1.x does not support swizzling of packed vector types.
+ // For single element swizzles, we can use the index operator.
+ // For multi-element swizzles, we need to cast to a regular vector type
+ // first. Note that we do not currently allow assignments to swizzles, so
+ // the casting which will convert the l-value to r-value is fine.
+ if (swizzle->Indices().size() == 1) {
+ if (!write_lhs()) {
+ return false;
+ }
+ out << "[" << swizzle->Indices()[0] << "]";
+ } else {
+ if (!EmitType(out, sem.Get(expr->structure)->Type()->UnwrapRef(), "")) {
+ return false;
+ }
+ out << "(";
+ if (!write_lhs()) {
+ return false;
+ }
+ out << ")." << program_->Symbols().NameFor(expr->member->symbol);
+ }
} else {
- if (!EmitType(out, sem.Get(expr->structure)->Type()->UnwrapRef(), "")) {
- return false;
- }
- out << "(";
- if (!write_lhs()) {
- return false;
- }
- out << ")." << program_->Symbols().NameFor(expr->member->symbol);
- }
- } else {
- if (!write_lhs()) {
- return false;
- }
- out << ".";
- if (!EmitExpression(out, expr->member)) {
- return false;
+ if (!write_lhs()) {
+ return false;
+ }
+ out << ".";
+ if (!EmitExpression(out, expr->member)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitReturn(const ast::ReturnStatement* stmt) {
- auto out = line();
- out << "return";
- if (stmt->value) {
- out << " ";
- if (!EmitExpression(out, stmt->value)) {
- return false;
- }
- }
- out << ";";
- return true;
+ auto out = line();
+ out << "return";
+ if (stmt->value) {
+ out << " ";
+ if (!EmitExpression(out, stmt->value)) {
+ return false;
+ }
+ }
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitBlock(const ast::BlockStatement* stmt) {
- line() << "{";
+ line() << "{";
- if (!EmitStatementsWithIndent(stmt->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(stmt->statements)) {
+ return false;
+ }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- [&](const ast::AssignmentStatement* a) { //
- return EmitAssign(a);
- },
- [&](const ast::BlockStatement* b) { //
- return EmitBlock(b);
- },
- [&](const ast::BreakStatement* b) { //
- return EmitBreak(b);
- },
- [&](const ast::CallStatement* c) { //
- auto out = line();
- if (!EmitCall(out, c->expr)) { //
- return false;
- }
- out << ";";
- return true;
- },
- [&](const ast::ContinueStatement* c) { //
- return EmitContinue(c);
- },
- [&](const ast::DiscardStatement* d) { //
- return EmitDiscard(d);
- },
- [&](const ast::FallthroughStatement*) { //
- line() << "/* fallthrough */";
- return true;
- },
- [&](const ast::IfStatement* i) { //
- return EmitIf(i);
- },
- [&](const ast::LoopStatement* l) { //
- return EmitLoop(l);
- },
- [&](const ast::ForLoopStatement* l) { //
- return EmitForLoop(l);
- },
- [&](const ast::ReturnStatement* r) { //
- return EmitReturn(r);
- },
- [&](const ast::SwitchStatement* s) { //
- return EmitSwitch(s);
- },
- [&](const ast::VariableDeclStatement* v) { //
- auto* var = program_->Sem().Get(v->variable);
- return EmitVariable(var);
- },
- [&](Default) {
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown statement type: " + std::string(stmt->TypeInfo().name));
- return false;
- });
+ return Switch(
+ stmt,
+ [&](const ast::AssignmentStatement* a) { //
+ return EmitAssign(a);
+ },
+ [&](const ast::BlockStatement* b) { //
+ return EmitBlock(b);
+ },
+ [&](const ast::BreakStatement* b) { //
+ return EmitBreak(b);
+ },
+ [&](const ast::CallStatement* c) { //
+ auto out = line();
+ if (!EmitCall(out, c->expr)) { //
+ return false;
+ }
+ out << ";";
+ return true;
+ },
+ [&](const ast::ContinueStatement* c) { //
+ return EmitContinue(c);
+ },
+ [&](const ast::DiscardStatement* d) { //
+ return EmitDiscard(d);
+ },
+ [&](const ast::FallthroughStatement*) { //
+ line() << "/* fallthrough */";
+ return true;
+ },
+ [&](const ast::IfStatement* i) { //
+ return EmitIf(i);
+ },
+ [&](const ast::LoopStatement* l) { //
+ return EmitLoop(l);
+ },
+ [&](const ast::ForLoopStatement* l) { //
+ return EmitForLoop(l);
+ },
+ [&](const ast::ReturnStatement* r) { //
+ return EmitReturn(r);
+ },
+ [&](const ast::SwitchStatement* s) { //
+ return EmitSwitch(s);
+ },
+ [&](const ast::VariableDeclStatement* v) { //
+ auto* var = program_->Sem().Get(v->variable);
+ return EmitVariable(var);
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown statement type: " + std::string(stmt->TypeInfo().name));
+ return false;
+ });
}
bool GeneratorImpl::EmitStatements(const ast::StatementList& stmts) {
- for (auto* s : stmts) {
- if (!EmitStatement(s)) {
- return false;
+ for (auto* s : stmts) {
+ if (!EmitStatement(s)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatementsWithIndent(const ast::StatementList& stmts) {
- ScopedIndent si(this);
- return EmitStatements(stmts);
+ ScopedIndent si(this);
+ return EmitStatements(stmts);
}
bool GeneratorImpl::EmitSwitch(const ast::SwitchStatement* stmt) {
- {
- auto out = line();
- out << "switch(";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ {
+ auto out = line();
+ out << "switch(";
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ") {";
}
- out << ") {";
- }
- {
- ScopedIndent si(this);
- for (auto* s : stmt->body) {
- if (!EmitCase(s)) {
- return false;
- }
+ {
+ ScopedIndent si(this);
+ for (auto* s : stmt->body) {
+ if (!EmitCase(s)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitType(std::ostream& out,
const sem::Type* type,
const std::string& name,
bool* name_printed /* = nullptr */) {
- if (name_printed) {
- *name_printed = false;
- }
-
- return Switch(
- type,
- [&](const sem::Atomic* atomic) {
- if (atomic->Type()->Is<sem::I32>()) {
- out << "atomic_int";
- return true;
- }
- if (atomic->Type()->Is<sem::U32>()) {
- out << "atomic_uint";
- return true;
- }
- TINT_ICE(Writer, diagnostics_)
- << "unhandled atomic type "
- << atomic->Type()->FriendlyName(builder_.Symbols());
- return false;
- },
- [&](const sem::Array* ary) {
- const sem::Type* base_type = ary;
- std::vector<uint32_t> sizes;
- while (auto* arr = base_type->As<sem::Array>()) {
- if (arr->IsRuntimeSized()) {
- sizes.push_back(1);
- } else {
- sizes.push_back(arr->Count());
- }
- base_type = arr->ElemType();
- }
- if (!EmitType(out, base_type, "")) {
- return false;
- }
- if (!name.empty()) {
- out << " " << name;
- if (name_printed) {
- *name_printed = true;
- }
- }
- for (uint32_t size : sizes) {
- out << "[" << size << "]";
- }
- return true;
- },
- [&](const sem::Bool*) {
- out << "bool";
- return true;
- },
- [&](const sem::F32*) {
- out << "float";
- return true;
- },
- [&](const sem::I32*) {
- out << "int";
- return true;
- },
- [&](const sem::Matrix* mat) {
- if (!EmitType(out, mat->type(), "")) {
- return false;
- }
- out << mat->columns() << "x" << mat->rows();
- return true;
- },
- [&](const sem::Pointer* ptr) {
- if (ptr->Access() == ast::Access::kRead) {
- out << "const ";
- }
- if (!EmitStorageClass(out, ptr->StorageClass())) {
- return false;
- }
- out << " ";
- if (ptr->StoreType()->Is<sem::Array>()) {
- std::string inner = "(*" + name + ")";
- if (!EmitType(out, ptr->StoreType(), inner)) {
- return false;
- }
- if (name_printed) {
- *name_printed = true;
- }
- } else {
- if (!EmitType(out, ptr->StoreType(), "")) {
- return false;
- }
- out << "* " << name;
- if (name_printed) {
- *name_printed = true;
- }
- }
- return true;
- },
- [&](const sem::Sampler*) {
- out << "sampler";
- return true;
- },
- [&](const sem::Struct* str) {
- // The struct type emits as just the name. The declaration would be
- // emitted as part of emitting the declared types.
- out << StructName(str);
- return true;
- },
- [&](const sem::Texture* tex) {
- if (tex->Is<sem::ExternalTexture>()) {
- TINT_ICE(Writer, diagnostics_)
- << "Multiplanar external texture transform was not run.";
- return false;
- }
-
- if (tex->IsAnyOf<sem::DepthTexture, sem::DepthMultisampledTexture>()) {
- out << "depth";
- } else {
- out << "texture";
- }
+ if (name_printed) {
+ *name_printed = false;
+ }
- switch (tex->dim()) {
- case ast::TextureDimension::k1d:
- out << "1d";
- break;
- case ast::TextureDimension::k2d:
- out << "2d";
- break;
- case ast::TextureDimension::k2dArray:
- out << "2d_array";
- break;
- case ast::TextureDimension::k3d:
- out << "3d";
- break;
- case ast::TextureDimension::kCube:
- out << "cube";
- break;
- case ast::TextureDimension::kCubeArray:
- out << "cube_array";
- break;
- default:
+ return Switch(
+ type,
+ [&](const sem::Atomic* atomic) {
+ if (atomic->Type()->Is<sem::I32>()) {
+ out << "atomic_int";
+ return true;
+ }
+ if (atomic->Type()->Is<sem::U32>()) {
+ out << "atomic_uint";
+ return true;
+ }
+ TINT_ICE(Writer, diagnostics_)
+ << "unhandled atomic type " << atomic->Type()->FriendlyName(builder_.Symbols());
+ return false;
+ },
+ [&](const sem::Array* ary) {
+ const sem::Type* base_type = ary;
+ std::vector<uint32_t> sizes;
+ while (auto* arr = base_type->As<sem::Array>()) {
+ if (arr->IsRuntimeSized()) {
+ sizes.push_back(1);
+ } else {
+ sizes.push_back(arr->Count());
+ }
+ base_type = arr->ElemType();
+ }
+ if (!EmitType(out, base_type, "")) {
+ return false;
+ }
+ if (!name.empty()) {
+ out << " " << name;
+ if (name_printed) {
+ *name_printed = true;
+ }
+ }
+ for (uint32_t size : sizes) {
+ out << "[" << size << "]";
+ }
+ return true;
+ },
+ [&](const sem::Bool*) {
+ out << "bool";
+ return true;
+ },
+ [&](const sem::F16*) {
diagnostics_.add_error(diag::System::Writer,
- "Invalid texture dimensions");
+ "Type f16 is not completely implemented yet");
return false;
- }
- if (tex->IsAnyOf<sem::MultisampledTexture,
- sem::DepthMultisampledTexture>()) {
- out << "_ms";
- }
- out << "<";
- TINT_DEFER(out << ">");
-
- return Switch(
- tex,
- [&](const sem::DepthTexture*) {
- out << "float, access::sample";
- return true;
- },
- [&](const sem::DepthMultisampledTexture*) {
- out << "float, access::read";
- return true;
- },
- [&](const sem::StorageTexture* storage) {
- if (!EmitType(out, storage->type(), "")) {
+ },
+ [&](const sem::F32*) {
+ out << "float";
+ return true;
+ },
+ [&](const sem::I32*) {
+ out << "int";
+ return true;
+ },
+ [&](const sem::Matrix* mat) {
+ if (!EmitType(out, mat->type(), "")) {
return false;
- }
-
- std::string access_str;
- if (storage->access() == ast::Access::kRead) {
- out << ", access::read";
- } else if (storage->access() == ast::Access::kWrite) {
- out << ", access::write";
- } else {
- diagnostics_.add_error(
- diag::System::Writer,
- "Invalid access control for storage texture");
+ }
+ out << mat->columns() << "x" << mat->rows();
+ return true;
+ },
+ [&](const sem::Pointer* ptr) {
+ if (ptr->Access() == ast::Access::kRead) {
+ out << "const ";
+ }
+ if (!EmitStorageClass(out, ptr->StorageClass())) {
return false;
- }
- return true;
- },
- [&](const sem::MultisampledTexture* ms) {
- if (!EmitType(out, ms->type(), "")) {
+ }
+ out << " ";
+ if (ptr->StoreType()->Is<sem::Array>()) {
+ std::string inner = "(*" + name + ")";
+ if (!EmitType(out, ptr->StoreType(), inner)) {
+ return false;
+ }
+ if (name_printed) {
+ *name_printed = true;
+ }
+ } else {
+ if (!EmitType(out, ptr->StoreType(), "")) {
+ return false;
+ }
+ out << "* " << name;
+ if (name_printed) {
+ *name_printed = true;
+ }
+ }
+ return true;
+ },
+ [&](const sem::Sampler*) {
+ out << "sampler";
+ return true;
+ },
+ [&](const sem::Struct* str) {
+ // The struct type emits as just the name. The declaration would be
+ // emitted as part of emitting the declared types.
+ out << StructName(str);
+ return true;
+ },
+ [&](const sem::Texture* tex) {
+ if (tex->Is<sem::ExternalTexture>()) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Multiplanar external texture transform was not run.";
return false;
- }
- out << ", access::read";
- return true;
- },
- [&](const sem::SampledTexture* sampled) {
- if (!EmitType(out, sampled->type(), "")) {
+ }
+
+ if (tex->IsAnyOf<sem::DepthTexture, sem::DepthMultisampledTexture>()) {
+ out << "depth";
+ } else {
+ out << "texture";
+ }
+
+ switch (tex->dim()) {
+ case ast::TextureDimension::k1d:
+ out << "1d";
+ break;
+ case ast::TextureDimension::k2d:
+ out << "2d";
+ break;
+ case ast::TextureDimension::k2dArray:
+ out << "2d_array";
+ break;
+ case ast::TextureDimension::k3d:
+ out << "3d";
+ break;
+ case ast::TextureDimension::kCube:
+ out << "cube";
+ break;
+ case ast::TextureDimension::kCubeArray:
+ out << "cube_array";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer, "Invalid texture dimensions");
+ return false;
+ }
+ if (tex->IsAnyOf<sem::MultisampledTexture, sem::DepthMultisampledTexture>()) {
+ out << "_ms";
+ }
+ out << "<";
+ TINT_DEFER(out << ">");
+
+ return Switch(
+ tex,
+ [&](const sem::DepthTexture*) {
+ out << "float, access::sample";
+ return true;
+ },
+ [&](const sem::DepthMultisampledTexture*) {
+ out << "float, access::read";
+ return true;
+ },
+ [&](const sem::StorageTexture* storage) {
+ if (!EmitType(out, storage->type(), "")) {
+ return false;
+ }
+
+ std::string access_str;
+ if (storage->access() == ast::Access::kRead) {
+ out << ", access::read";
+ } else if (storage->access() == ast::Access::kWrite) {
+ out << ", access::write";
+ } else {
+ diagnostics_.add_error(diag::System::Writer,
+ "Invalid access control for storage texture");
+ return false;
+ }
+ return true;
+ },
+ [&](const sem::MultisampledTexture* ms) {
+ if (!EmitType(out, ms->type(), "")) {
+ return false;
+ }
+ out << ", access::read";
+ return true;
+ },
+ [&](const sem::SampledTexture* sampled) {
+ if (!EmitType(out, sampled->type(), "")) {
+ return false;
+ }
+ out << ", access::sample";
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "invalid texture type");
+ return false;
+ });
+ },
+ [&](const sem::U32*) {
+ out << "uint";
+ return true;
+ },
+ [&](const sem::Vector* vec) {
+ if (!EmitType(out, vec->type(), "")) {
return false;
- }
- out << ", access::sample";
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer,
- "invalid texture type");
- return false;
- });
- },
- [&](const sem::U32*) {
- out << "uint";
- return true;
- },
- [&](const sem::Vector* vec) {
- if (!EmitType(out, vec->type(), "")) {
- return false;
- }
- out << vec->Width();
- return true;
- },
- [&](const sem::Void*) {
- out << "void";
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer,
- "unknown type in EmitType: " +
- type->FriendlyName(builder_.Symbols()));
- return false;
- });
+ }
+ out << vec->Width();
+ return true;
+ },
+ [&](const sem::Void*) {
+ out << "void";
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(
+ diag::System::Writer,
+ "unknown type in EmitType: " + type->FriendlyName(builder_.Symbols()));
+ return false;
+ });
}
bool GeneratorImpl::EmitTypeAndName(std::ostream& out,
const sem::Type* type,
const std::string& name) {
- bool name_printed = false;
- if (!EmitType(out, type, name, &name_printed)) {
- return false;
- }
- if (!name_printed) {
- out << " " << name;
- }
- return true;
+ bool name_printed = false;
+ if (!EmitType(out, type, name, &name_printed)) {
+ return false;
+ }
+ if (!name_printed) {
+ out << " " << name;
+ }
+ return true;
}
bool GeneratorImpl::EmitStorageClass(std::ostream& out, ast::StorageClass sc) {
- switch (sc) {
- case ast::StorageClass::kFunction:
- case ast::StorageClass::kPrivate:
- case ast::StorageClass::kUniformConstant:
- out << "thread";
- return true;
- case ast::StorageClass::kWorkgroup:
- out << "threadgroup";
- return true;
- case ast::StorageClass::kStorage:
- out << "device";
- return true;
- case ast::StorageClass::kUniform:
- out << "constant";
- return true;
- default:
- break;
- }
- TINT_ICE(Writer, diagnostics_) << "unhandled storage class: " << sc;
- return false;
+ switch (sc) {
+ case ast::StorageClass::kFunction:
+ case ast::StorageClass::kPrivate:
+ case ast::StorageClass::kHandle:
+ out << "thread";
+ return true;
+ case ast::StorageClass::kWorkgroup:
+ out << "threadgroup";
+ return true;
+ case ast::StorageClass::kStorage:
+ out << "device";
+ return true;
+ case ast::StorageClass::kUniform:
+ out << "constant";
+ return true;
+ default:
+ break;
+ }
+ TINT_ICE(Writer, diagnostics_) << "unhandled storage class: " << sc;
+ return false;
}
bool GeneratorImpl::EmitPackedType(std::ostream& out,
const sem::Type* type,
const std::string& name) {
- auto* vec = type->As<sem::Vector>();
- if (vec && vec->Width() == 3) {
- out << "packed_";
- if (!EmitType(out, vec, "")) {
- return false;
- }
-
- if (vec->is_float_vector() && !matrix_packed_vector_overloads_) {
- // Overload operators for matrix-vector arithmetic where the vector
- // operand is packed, as these overloads to not exist in the metal
- // namespace.
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
- line(&b) << R"(template<typename T, int N, int M>
+ auto* vec = type->As<sem::Vector>();
+ if (vec && vec->Width() == 3) {
+ out << "packed_";
+ if (!EmitType(out, vec, "")) {
+ return false;
+ }
+
+ if (vec->is_float_vector() && !matrix_packed_vector_overloads_) {
+ // Overload operators for matrix-vector arithmetic where the vector
+ // operand is packed, as these overloads to not exist in the metal
+ // namespace.
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
+ line(&b) << R"(template<typename T, int N, int M>
inline vec<T, M> operator*(matrix<T, N, M> lhs, packed_vec<T, N> rhs) {
return lhs * vec<T, N>(rhs);
}
@@ -2537,443 +2618,437 @@ inline vec<T, N> operator*(packed_vec<T, M> lhs, matrix<T, N, M> rhs) {
return vec<T, M>(lhs) * rhs;
}
)";
- matrix_packed_vector_overloads_ = true;
- }
+ matrix_packed_vector_overloads_ = true;
+ }
- return true;
- }
+ return true;
+ }
- return EmitType(out, type, name);
+ return EmitType(out, type, name);
}
bool GeneratorImpl::EmitStructType(TextBuffer* b, const sem::Struct* str) {
- line(b) << "struct " << StructName(str) << " {";
-
- bool is_host_shareable = str->IsHostShareable();
-
- // Emits a `/* 0xnnnn */` byte offset comment for a struct member.
- auto add_byte_offset_comment = [&](std::ostream& out, uint32_t offset) {
- std::ios_base::fmtflags saved_flag_state(out.flags());
- out << "/* 0x" << std::hex << std::setfill('0') << std::setw(4) << offset
- << " */ ";
- out.flags(saved_flag_state);
- };
-
- auto add_padding = [&](uint32_t size, uint32_t msl_offset) {
- std::string name;
- do {
- name = UniqueIdentifier("tint_pad");
- } while (str->FindMember(program_->Symbols().Get(name)));
-
- auto out = line(b);
- add_byte_offset_comment(out, msl_offset);
- out << "int8_t " << name << "[" << size << "];";
- };
-
- b->IncrementIndent();
-
- uint32_t msl_offset = 0;
- for (auto* mem : str->Members()) {
- auto out = line(b);
- auto mem_name = program_->Symbols().NameFor(mem->Name());
- auto wgsl_offset = mem->Offset();
-
- if (is_host_shareable) {
- if (wgsl_offset < msl_offset) {
- // Unimplementable layout
- TINT_ICE(Writer, diagnostics_)
- << "Structure member WGSL offset (" << wgsl_offset
- << ") is behind MSL offset (" << msl_offset << ")";
- return false;
- }
+ line(b) << "struct " << StructName(str) << " {";
- // Generate padding if required
- if (auto padding = wgsl_offset - msl_offset) {
- add_padding(padding, msl_offset);
- msl_offset += padding;
- }
+ bool is_host_shareable = str->IsHostShareable();
- add_byte_offset_comment(out, msl_offset);
+ // Emits a `/* 0xnnnn */` byte offset comment for a struct member.
+ auto add_byte_offset_comment = [&](std::ostream& out, uint32_t offset) {
+ std::ios_base::fmtflags saved_flag_state(out.flags());
+ out << "/* 0x" << std::hex << std::setfill('0') << std::setw(4) << offset << " */ ";
+ out.flags(saved_flag_state);
+ };
- if (!EmitPackedType(out, mem->Type(), mem_name)) {
- return false;
- }
- } else {
- if (!EmitType(out, mem->Type(), mem_name)) {
- return false;
- }
- }
+ auto add_padding = [&](uint32_t size, uint32_t msl_offset) {
+ std::string name;
+ do {
+ name = UniqueIdentifier("tint_pad");
+ } while (str->FindMember(program_->Symbols().Get(name)));
- auto* ty = mem->Type();
+ auto out = line(b);
+ add_byte_offset_comment(out, msl_offset);
+ out << "int8_t " << name << "[" << size << "];";
+ };
- // Array member name will be output with the type
- if (!ty->Is<sem::Array>()) {
- out << " " << mem_name;
- }
+ b->IncrementIndent();
- // Emit attributes
- if (auto* decl = mem->Declaration()) {
- for (auto* attr : decl->attributes) {
- bool ok = Switch(
- attr,
- [&](const ast::BuiltinAttribute* builtin) {
- auto name = builtin_to_attribute(builtin->builtin);
- if (name.empty()) {
- diagnostics_.add_error(diag::System::Writer, "unknown builtin");
+ uint32_t msl_offset = 0;
+ for (auto* mem : str->Members()) {
+ auto out = line(b);
+ auto mem_name = program_->Symbols().NameFor(mem->Name());
+ auto wgsl_offset = mem->Offset();
+
+ if (is_host_shareable) {
+ if (wgsl_offset < msl_offset) {
+ // Unimplementable layout
+ TINT_ICE(Writer, diagnostics_) << "Structure member WGSL offset (" << wgsl_offset
+ << ") is behind MSL offset (" << msl_offset << ")";
return false;
- }
- out << " [[" << name << "]]";
- return true;
- },
- [&](const ast::LocationAttribute* loc) {
- auto& pipeline_stage_uses = str->PipelineStageUses();
- if (pipeline_stage_uses.size() != 1) {
- TINT_ICE(Writer, diagnostics_)
- << "invalid entry point IO struct uses";
+ }
+
+ // Generate padding if required
+ if (auto padding = wgsl_offset - msl_offset) {
+ add_padding(padding, msl_offset);
+ msl_offset += padding;
+ }
+
+ add_byte_offset_comment(out, msl_offset);
+
+ if (!EmitPackedType(out, mem->Type(), mem_name)) {
return false;
- }
-
- if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kVertexInput)) {
- out << " [[attribute(" + std::to_string(loc->value) + ")]]";
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kVertexOutput)) {
- out << " [[user(locn" + std::to_string(loc->value) + ")]]";
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kFragmentInput)) {
- out << " [[user(locn" + std::to_string(loc->value) + ")]]";
- } else if (pipeline_stage_uses.count(
- sem::PipelineStageUsage::kFragmentOutput)) {
- out << " [[color(" + std::to_string(loc->value) + ")]]";
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "invalid use of location decoration";
+ }
+ } else {
+ if (!EmitType(out, mem->Type(), mem_name)) {
return false;
- }
- return true;
- },
- [&](const ast::InterpolateAttribute* interpolate) {
- auto name = interpolation_to_attribute(interpolate->type,
- interpolate->sampling);
- if (name.empty()) {
- diagnostics_.add_error(diag::System::Writer,
- "unknown interpolation attribute");
+ }
+ }
+
+ auto* ty = mem->Type();
+
+ // Array member name will be output with the type
+ if (!ty->Is<sem::Array>()) {
+ out << " " << mem_name;
+ }
+
+ // Emit attributes
+ if (auto* decl = mem->Declaration()) {
+ for (auto* attr : decl->attributes) {
+ bool ok = Switch(
+ attr,
+ [&](const ast::BuiltinAttribute* builtin) {
+ auto name = builtin_to_attribute(builtin->builtin);
+ if (name.empty()) {
+ diagnostics_.add_error(diag::System::Writer, "unknown builtin");
+ return false;
+ }
+ out << " [[" << name << "]]";
+ return true;
+ },
+ [&](const ast::LocationAttribute* loc) {
+ auto& pipeline_stage_uses = str->PipelineStageUses();
+ if (pipeline_stage_uses.size() != 1) {
+ TINT_ICE(Writer, diagnostics_) << "invalid entry point IO struct uses";
+ return false;
+ }
+
+ if (pipeline_stage_uses.count(sem::PipelineStageUsage::kVertexInput)) {
+ out << " [[attribute(" + std::to_string(loc->value) + ")]]";
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kVertexOutput)) {
+ out << " [[user(locn" + std::to_string(loc->value) + ")]]";
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kFragmentInput)) {
+ out << " [[user(locn" + std::to_string(loc->value) + ")]]";
+ } else if (pipeline_stage_uses.count(
+ sem::PipelineStageUsage::kFragmentOutput)) {
+ out << " [[color(" + std::to_string(loc->value) + ")]]";
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "invalid use of location decoration";
+ return false;
+ }
+ return true;
+ },
+ [&](const ast::InterpolateAttribute* interpolate) {
+ auto name =
+ interpolation_to_attribute(interpolate->type, interpolate->sampling);
+ if (name.empty()) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown interpolation attribute");
+ return false;
+ }
+ out << " [[" << name << "]]";
+ return true;
+ },
+ [&](const ast::InvariantAttribute*) {
+ if (invariant_define_name_.empty()) {
+ invariant_define_name_ = UniqueIdentifier("TINT_INVARIANT");
+ }
+ out << " " << invariant_define_name_;
+ return true;
+ },
+ [&](const ast::StructMemberOffsetAttribute*) { return true; },
+ [&](const ast::StructMemberAlignAttribute*) { return true; },
+ [&](const ast::StructMemberSizeAttribute*) { return true; },
+ [&](Default) {
+ TINT_ICE(Writer, diagnostics_)
+ << "unhandled struct member attribute: " << attr->Name();
+ return false;
+ });
+ if (!ok) {
+ return false;
+ }
+ }
+ }
+
+ out << ";";
+
+ if (is_host_shareable) {
+ // Calculate new MSL offset
+ auto size_align = MslPackedTypeSizeAndAlign(ty);
+ if (msl_offset % size_align.align) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Misaligned MSL structure member " << ty->FriendlyName(program_->Symbols())
+ << " " << mem_name;
return false;
- }
- out << " [[" << name << "]]";
- return true;
- },
- [&](const ast::InvariantAttribute*) {
- if (invariant_define_name_.empty()) {
- invariant_define_name_ = UniqueIdentifier("TINT_INVARIANT");
- }
- out << " " << invariant_define_name_;
- return true;
- },
- [&](const ast::StructMemberOffsetAttribute*) { return true; },
- [&](const ast::StructMemberAlignAttribute*) { return true; },
- [&](const ast::StructMemberSizeAttribute*) { return true; },
- [&](Default) {
- TINT_ICE(Writer, diagnostics_)
- << "unhandled struct member attribute: " << attr->Name();
- return false;
- });
- if (!ok) {
- return false;
+ }
+ msl_offset += size_align.size;
}
- }
}
- out << ";";
-
- if (is_host_shareable) {
- // Calculate new MSL offset
- auto size_align = MslPackedTypeSizeAndAlign(ty);
- if (msl_offset % size_align.align) {
- TINT_ICE(Writer, diagnostics_)
- << "Misaligned MSL structure member "
- << ty->FriendlyName(program_->Symbols()) << " " << mem_name;
- return false;
- }
- msl_offset += size_align.size;
+ if (is_host_shareable && str->Size() != msl_offset) {
+ add_padding(str->Size() - msl_offset, msl_offset);
}
- }
- if (is_host_shareable && str->Size() != msl_offset) {
- add_padding(str->Size() - msl_offset, msl_offset);
- }
+ b->DecrementIndent();
- b->DecrementIndent();
+ line(b) << "};";
+ return true;
+}
- line(b) << "};";
- return true;
+bool GeneratorImpl::EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* str) {
+ auto it = emitted_structs_.emplace(str);
+ if (!it.second) {
+ return true;
+ }
+ return EmitStructType(buffer, str);
}
-bool GeneratorImpl::EmitUnaryOp(std::ostream& out,
- const ast::UnaryOpExpression* expr) {
- // Handle `-e` when `e` is signed, so that we ensure that if `e` is the
- // largest negative value, it returns `e`.
- auto* expr_type = TypeOf(expr->expr)->UnwrapRef();
- if (expr->op == ast::UnaryOp::kNegation &&
- expr_type->is_signed_scalar_or_vector()) {
- auto fn =
- utils::GetOrCreate(unary_minus_funcs_, expr_type, [&]() -> std::string {
- // e.g.:
- // int tint_unary_minus(const int v) {
- // return (v == -2147483648) ? v : -v;
- // }
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name = UniqueIdentifier("tint_unary_minus");
- {
- auto decl = line(&b);
- if (!EmitTypeAndName(decl, expr_type, fn_name)) {
- return "";
- }
- decl << "(const ";
- if (!EmitType(decl, expr_type, "")) {
- return "";
+bool GeneratorImpl::EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr) {
+ // Handle `-e` when `e` is signed, so that we ensure that if `e` is the
+ // largest negative value, it returns `e`.
+ auto* expr_type = TypeOf(expr->expr)->UnwrapRef();
+ if (expr->op == ast::UnaryOp::kNegation && expr_type->is_signed_scalar_or_vector()) {
+ auto fn = utils::GetOrCreate(unary_minus_funcs_, expr_type, [&]() -> std::string {
+ // e.g.:
+ // int tint_unary_minus(const int v) {
+ // return (v == -2147483648) ? v : -v;
+ // }
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
+
+ auto fn_name = UniqueIdentifier("tint_unary_minus");
+ {
+ auto decl = line(&b);
+ if (!EmitTypeAndName(decl, expr_type, fn_name)) {
+ return "";
+ }
+ decl << "(const ";
+ if (!EmitType(decl, expr_type, "")) {
+ return "";
+ }
+ decl << " v) {";
}
- decl << " v) {";
- }
- {
- ScopedIndent si(&b);
- const auto largest_negative_value =
- std::to_string(std::numeric_limits<int32_t>::min());
- line(&b) << "return select(-v, v, v == " << largest_negative_value
- << ");";
- }
- line(&b) << "}";
- line(&b);
- return fn_name;
+ {
+ ScopedIndent si(&b);
+ const auto largest_negative_value =
+ std::to_string(std::numeric_limits<int32_t>::min());
+ line(&b) << "return select(-v, v, v == " << largest_negative_value << ");";
+ }
+ line(&b) << "}";
+ line(&b);
+ return fn_name;
});
- out << fn << "(";
+ out << fn << "(";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
+ out << ")";
+ return true;
+ }
+
+ switch (expr->op) {
+ case ast::UnaryOp::kAddressOf:
+ out << "&";
+ break;
+ case ast::UnaryOp::kComplement:
+ out << "~";
+ break;
+ case ast::UnaryOp::kIndirection:
+ out << "*";
+ break;
+ case ast::UnaryOp::kNot:
+ out << "!";
+ break;
+ case ast::UnaryOp::kNegation:
+ out << "-";
+ break;
+ }
+ out << "(";
+
if (!EmitExpression(out, expr->expr)) {
- return false;
+ return false;
}
- out << ")";
- return true;
- }
-
- switch (expr->op) {
- case ast::UnaryOp::kAddressOf:
- out << "&";
- break;
- case ast::UnaryOp::kComplement:
- out << "~";
- break;
- case ast::UnaryOp::kIndirection:
- out << "*";
- break;
- case ast::UnaryOp::kNot:
- out << "!";
- break;
- case ast::UnaryOp::kNegation:
- out << "-";
- break;
- }
- out << "(";
-
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
- out << ")";
+ out << ")";
- return true;
+ return true;
}
bool GeneratorImpl::EmitVariable(const sem::Variable* var) {
- auto* decl = var->Declaration();
-
- for (auto* attr : decl->attributes) {
- if (!attr->Is<ast::InternalAttribute>()) {
- TINT_ICE(Writer, diagnostics_) << "unexpected variable attribute";
- return false;
- }
- }
-
- auto out = line();
-
- switch (var->StorageClass()) {
- case ast::StorageClass::kFunction:
- case ast::StorageClass::kUniformConstant:
- case ast::StorageClass::kNone:
- break;
- case ast::StorageClass::kPrivate:
- out << "thread ";
- break;
- case ast::StorageClass::kWorkgroup:
- out << "threadgroup ";
- break;
- default:
- TINT_ICE(Writer, diagnostics_) << "unhandled variable storage class";
- return false;
- }
-
- auto* type = var->Type()->UnwrapRef();
-
- std::string name = program_->Symbols().NameFor(decl->symbol);
- if (decl->is_const) {
- name = "const " + name;
- }
- if (!EmitType(out, type, name)) {
- return false;
- }
- // Variable name is output as part of the type for arrays and pointers.
- if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
- out << " " << name;
- }
+ auto* decl = var->Declaration();
- if (decl->constructor != nullptr) {
- out << " = ";
- if (!EmitExpression(out, decl->constructor)) {
- return false;
+ for (auto* attr : decl->attributes) {
+ if (!attr->Is<ast::InternalAttribute>()) {
+ TINT_ICE(Writer, diagnostics_) << "unexpected variable attribute";
+ return false;
+ }
}
- } else if (var->StorageClass() == ast::StorageClass::kPrivate ||
- var->StorageClass() == ast::StorageClass::kFunction ||
- var->StorageClass() == ast::StorageClass::kNone) {
- out << " = ";
- if (!EmitZeroValue(out, type)) {
- return false;
+
+ auto out = line();
+
+ switch (var->StorageClass()) {
+ case ast::StorageClass::kFunction:
+ case ast::StorageClass::kHandle:
+ case ast::StorageClass::kNone:
+ break;
+ case ast::StorageClass::kPrivate:
+ out << "thread ";
+ break;
+ case ast::StorageClass::kWorkgroup:
+ out << "threadgroup ";
+ break;
+ default:
+ TINT_ICE(Writer, diagnostics_) << "unhandled variable storage class";
+ return false;
}
- }
- out << ";";
- return true;
+ auto* type = var->Type()->UnwrapRef();
+
+ std::string name = program_->Symbols().NameFor(decl->symbol);
+ if (decl->is_const) {
+ name = "const " + name;
+ }
+ if (!EmitType(out, type, name)) {
+ return false;
+ }
+ // Variable name is output as part of the type for arrays and pointers.
+ if (!type->Is<sem::Array>() && !type->Is<sem::Pointer>()) {
+ out << " " << name;
+ }
+
+ if (decl->constructor != nullptr) {
+ out << " = ";
+ if (!EmitExpression(out, decl->constructor)) {
+ return false;
+ }
+ } else if (var->StorageClass() == ast::StorageClass::kPrivate ||
+ var->StorageClass() == ast::StorageClass::kFunction ||
+ var->StorageClass() == ast::StorageClass::kNone) {
+ out << " = ";
+ if (!EmitZeroValue(out, type)) {
+ return false;
+ }
+ }
+ out << ";";
+
+ return true;
}
bool GeneratorImpl::EmitProgramConstVariable(const ast::Variable* var) {
- for (auto* d : var->attributes) {
- if (!d->Is<ast::IdAttribute>()) {
- diagnostics_.add_error(diag::System::Writer,
- "Decorated const values not valid");
- return false;
- }
- }
- if (!var->is_const) {
- diagnostics_.add_error(diag::System::Writer, "Expected a const value");
- return false;
- }
+ for (auto* d : var->attributes) {
+ if (!d->Is<ast::IdAttribute>()) {
+ diagnostics_.add_error(diag::System::Writer, "Decorated const values not valid");
+ return false;
+ }
+ }
+ if (!var->is_const) {
+ diagnostics_.add_error(diag::System::Writer, "Expected a const value");
+ return false;
+ }
- auto out = line();
- out << "constant ";
- auto* type = program_->Sem().Get(var)->Type()->UnwrapRef();
- if (!EmitType(out, type, program_->Symbols().NameFor(var->symbol))) {
- return false;
- }
- if (!type->Is<sem::Array>()) {
- out << " " << program_->Symbols().NameFor(var->symbol);
- }
-
- auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
- if (global && global->IsOverridable()) {
- out << " [[function_constant(" << global->ConstantId() << ")]]";
- } else if (var->constructor != nullptr) {
- out << " = ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
+ auto out = line();
+ out << "constant ";
+ auto* type = program_->Sem().Get(var)->Type()->UnwrapRef();
+ if (!EmitType(out, type, program_->Symbols().NameFor(var->symbol))) {
+ return false;
+ }
+ if (!type->Is<sem::Array>()) {
+ out << " " << program_->Symbols().NameFor(var->symbol);
}
- }
- out << ";";
- return true;
+ auto* global = program_->Sem().Get<sem::GlobalVariable>(var);
+ if (global && global->IsOverridable()) {
+ out << " [[function_constant(" << global->ConstantId() << ")]]";
+ } else if (var->constructor != nullptr) {
+ out << " = ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
+ }
+ out << ";";
+
+ return true;
}
-GeneratorImpl::SizeAndAlign GeneratorImpl::MslPackedTypeSizeAndAlign(
- const sem::Type* ty) {
- return Switch(
- ty,
-
- // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
- // 2.1 Scalar Data Types
- [&](const sem::U32*) {
- return SizeAndAlign{4, 4};
- },
- [&](const sem::I32*) {
- return SizeAndAlign{4, 4};
- },
- [&](const sem::F32*) {
- return SizeAndAlign{4, 4};
- },
-
- [&](const sem::Vector* vec) {
- auto num_els = vec->Width();
- auto* el_ty = vec->type();
- if (el_ty->IsAnyOf<sem::U32, sem::I32, sem::F32>()) {
- // Use a packed_vec type for 3-element vectors only.
- if (num_els == 3) {
- // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
- // 2.2.3 Packed Vector Types
- return SizeAndAlign{num_els * 4, 4};
- } else {
- // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
- // 2.2 Vector Data Types
- return SizeAndAlign{num_els * 4, num_els * 4};
- }
- }
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "Unhandled vector element type " << el_ty->TypeInfo().name;
- return SizeAndAlign{};
- },
+GeneratorImpl::SizeAndAlign GeneratorImpl::MslPackedTypeSizeAndAlign(const sem::Type* ty) {
+ return Switch(
+ ty,
- [&](const sem::Matrix* mat) {
// https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
- // 2.3 Matrix Data Types
- auto cols = mat->columns();
- auto rows = mat->rows();
- auto* el_ty = mat->type();
- if (el_ty->IsAnyOf<sem::U32, sem::I32, sem::F32>()) {
- static constexpr SizeAndAlign table[] = {
- /* float2x2 */ {16, 8},
- /* float2x3 */ {32, 16},
- /* float2x4 */ {32, 16},
- /* float3x2 */ {24, 8},
- /* float3x3 */ {48, 16},
- /* float3x4 */ {48, 16},
- /* float4x2 */ {32, 8},
- /* float4x3 */ {64, 16},
- /* float4x4 */ {64, 16},
- };
- if (cols >= 2 && cols <= 4 && rows >= 2 && rows <= 4) {
- return table[(3 * (cols - 2)) + (rows - 2)];
- }
- }
-
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "Unhandled matrix element type " << el_ty->TypeInfo().name;
- return SizeAndAlign{};
- },
-
- [&](const sem::Array* arr) {
- if (!arr->IsStrideImplicit()) {
- TINT_ICE(Writer, diagnostics_) << "arrays with explicit strides not "
- "exist past the SPIR-V reader";
- return SizeAndAlign{};
- }
- auto num_els = std::max<uint32_t>(arr->Count(), 1);
- return SizeAndAlign{arr->Stride() * num_els, arr->Align()};
- },
-
- [&](const sem::Struct* str) {
- // TODO(crbug.com/tint/650): There's an assumption here that MSL's
- // default structure size and alignment matches WGSL's. We need to
- // confirm this.
- return SizeAndAlign{str->Size(), str->Align()};
- },
-
- [&](const sem::Atomic* atomic) {
- return MslPackedTypeSizeAndAlign(atomic->Type());
- },
-
- [&](Default) {
- TINT_UNREACHABLE(Writer, diagnostics_)
- << "Unhandled type " << ty->TypeInfo().name;
- return SizeAndAlign{};
- });
+ // 2.1 Scalar Data Types
+ [&](const sem::U32*) {
+ return SizeAndAlign{4, 4};
+ },
+ [&](const sem::I32*) {
+ return SizeAndAlign{4, 4};
+ },
+ [&](const sem::F32*) {
+ return SizeAndAlign{4, 4};
+ },
+
+ [&](const sem::Vector* vec) {
+ auto num_els = vec->Width();
+ auto* el_ty = vec->type();
+ if (el_ty->IsAnyOf<sem::U32, sem::I32, sem::F32>()) {
+ // Use a packed_vec type for 3-element vectors only.
+ if (num_els == 3) {
+ // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+ // 2.2.3 Packed Vector Types
+ return SizeAndAlign{num_els * 4, 4};
+ } else {
+ // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+ // 2.2 Vector Data Types
+ return SizeAndAlign{num_els * 4, num_els * 4};
+ }
+ }
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "Unhandled vector element type " << el_ty->TypeInfo().name;
+ return SizeAndAlign{};
+ },
+
+ [&](const sem::Matrix* mat) {
+ // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+ // 2.3 Matrix Data Types
+ auto cols = mat->columns();
+ auto rows = mat->rows();
+ auto* el_ty = mat->type();
+ if (el_ty->IsAnyOf<sem::U32, sem::I32, sem::F32>()) {
+ static constexpr SizeAndAlign table[] = {
+ /* float2x2 */ {16, 8},
+ /* float2x3 */ {32, 16},
+ /* float2x4 */ {32, 16},
+ /* float3x2 */ {24, 8},
+ /* float3x3 */ {48, 16},
+ /* float3x4 */ {48, 16},
+ /* float4x2 */ {32, 8},
+ /* float4x3 */ {64, 16},
+ /* float4x4 */ {64, 16},
+ };
+ if (cols >= 2 && cols <= 4 && rows >= 2 && rows <= 4) {
+ return table[(3 * (cols - 2)) + (rows - 2)];
+ }
+ }
+
+ TINT_UNREACHABLE(Writer, diagnostics_)
+ << "Unhandled matrix element type " << el_ty->TypeInfo().name;
+ return SizeAndAlign{};
+ },
+
+ [&](const sem::Array* arr) {
+ if (!arr->IsStrideImplicit()) {
+ TINT_ICE(Writer, diagnostics_) << "arrays with explicit strides not "
+ "exist past the SPIR-V reader";
+ return SizeAndAlign{};
+ }
+ auto num_els = std::max<uint32_t>(arr->Count(), 1);
+ return SizeAndAlign{arr->Stride() * num_els, arr->Align()};
+ },
+
+ [&](const sem::Struct* str) {
+ // TODO(crbug.com/tint/650): There's an assumption here that MSL's
+ // default structure size and alignment matches WGSL's. We need to
+ // confirm this.
+ return SizeAndAlign{str->Size(), str->Align()};
+ },
+
+ [&](const sem::Atomic* atomic) { return MslPackedTypeSizeAndAlign(atomic->Type()); },
+
+ [&](Default) {
+ TINT_UNREACHABLE(Writer, diagnostics_) << "Unhandled type " << ty->TypeInfo().name;
+ return SizeAndAlign{};
+ });
}
template <typename F>
@@ -2981,65 +3056,64 @@ bool GeneratorImpl::CallBuiltinHelper(std::ostream& out,
const ast::CallExpression* call,
const sem::Builtin* builtin,
F&& build) {
- // Generate the helper function if it hasn't been created already
- auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
- TextBuffer b;
- TINT_DEFER(helpers_.Append(b));
-
- auto fn_name =
- UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
- std::vector<std::string> parameter_names;
- {
- auto decl = line(&b);
- if (!EmitTypeAndName(decl, builtin->ReturnType(), fn_name)) {
- return "";
- }
- {
- ScopedParen sp(decl);
- for (auto* param : builtin->Parameters()) {
- if (!parameter_names.empty()) {
- decl << ", ";
- }
- auto param_name = "param_" + std::to_string(parameter_names.size());
- if (!EmitTypeAndName(decl, param->Type(), param_name)) {
- return "";
- }
- parameter_names.emplace_back(std::move(param_name));
- }
- }
- decl << " {";
- }
- {
- ScopedIndent si(&b);
- if (!build(&b, parameter_names)) {
- return "";
- }
- }
- line(&b) << "}";
- line(&b);
- return fn_name;
- });
+ // Generate the helper function if it hasn't been created already
+ auto fn = utils::GetOrCreate(builtins_, builtin, [&]() -> std::string {
+ TextBuffer b;
+ TINT_DEFER(helpers_.Append(b));
- if (fn.empty()) {
- return false;
- }
+ auto fn_name = UniqueIdentifier(std::string("tint_") + sem::str(builtin->Type()));
+ std::vector<std::string> parameter_names;
+ {
+ auto decl = line(&b);
+ if (!EmitTypeAndName(decl, builtin->ReturnType(), fn_name)) {
+ return "";
+ }
+ {
+ ScopedParen sp(decl);
+ for (auto* param : builtin->Parameters()) {
+ if (!parameter_names.empty()) {
+ decl << ", ";
+ }
+ auto param_name = "param_" + std::to_string(parameter_names.size());
+ if (!EmitTypeAndName(decl, param->Type(), param_name)) {
+ return "";
+ }
+ parameter_names.emplace_back(std::move(param_name));
+ }
+ }
+ decl << " {";
+ }
+ {
+ ScopedIndent si(&b);
+ if (!build(&b, parameter_names)) {
+ return "";
+ }
+ }
+ line(&b) << "}";
+ line(&b);
+ return fn_name;
+ });
- // Call the helper
- out << fn;
- {
- ScopedParen sp(out);
- bool first = true;
- for (auto* arg : call->args) {
- if (!first) {
- out << ", ";
- }
- first = false;
- if (!EmitExpression(out, arg)) {
+ if (fn.empty()) {
return false;
- }
}
- }
- return true;
+
+ // Call the helper
+ out << fn;
+ {
+ ScopedParen sp(out);
+ bool first = true;
+ for (auto* arg : call->args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
+ }
+ }
+ return true;
}
} // namespace tint::writer::msl
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.h b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.h
index f3cee25a297..be98a86b431 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.h
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl.h
@@ -16,6 +16,7 @@
#define SRC_TINT_WRITER_MSL_GENERATOR_IMPL_H_
#include <string>
+#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>
@@ -45,6 +46,7 @@
// Forward declarations
namespace tint::sem {
class Call;
+class Constant;
class Builtin;
class TypeConstructor;
class TypeConversion;
@@ -54,20 +56,20 @@ namespace tint::writer::msl {
/// The result of sanitizing a program for generation.
struct SanitizedResult {
- /// Constructor
- SanitizedResult();
- /// Destructor
- ~SanitizedResult();
- /// Move constructor
- SanitizedResult(SanitizedResult&&);
+ /// Constructor
+ SanitizedResult();
+ /// Destructor
+ ~SanitizedResult();
+ /// Move constructor
+ SanitizedResult(SanitizedResult&&);
- /// The sanitized program.
- Program program;
- /// True if the shader needs a UBO of buffer sizes.
- bool needs_storage_buffer_sizes = false;
- /// Indices into the array_length_from_uniform binding that are statically
- /// used.
- std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
+ /// The sanitized program.
+ Program program;
+ /// True if the shader needs a UBO of buffer sizes.
+ bool needs_storage_buffer_sizes = false;
+ /// Indices into the array_length_from_uniform binding that are statically
+ /// used.
+ std::unordered_set<uint32_t> used_array_length_from_uniform_indices;
};
/// Sanitize a program in preparation for generating MSL.
@@ -78,359 +80,357 @@ SanitizedResult Sanitize(const Program* program, const Options& options);
/// Implementation class for MSL generator
class GeneratorImpl : public TextGenerator {
- public:
- /// Constructor
- /// @param program the program to generate
- explicit GeneratorImpl(const Program* program);
- ~GeneratorImpl();
+ public:
+ /// Constructor
+ /// @param program the program to generate
+ explicit GeneratorImpl(const Program* program);
+ ~GeneratorImpl();
- /// @returns true on successful generation; false otherwise
- bool Generate();
+ /// @returns true on successful generation; false otherwise
+ bool Generate();
- /// @returns true if an invariant attribute was generated
- bool HasInvariant() { return !invariant_define_name_.empty(); }
+ /// @returns true if an invariant attribute was generated
+ bool HasInvariant() { return !invariant_define_name_.empty(); }
- /// @returns a map from entry point to list of required workgroup allocations
- const std::unordered_map<std::string, std::vector<uint32_t>>&
- DynamicWorkgroupAllocations() const {
- return workgroup_allocations_;
- }
+ /// @returns a map from entry point to list of required workgroup allocations
+ const std::unordered_map<std::string, std::vector<uint32_t>>& DynamicWorkgroupAllocations()
+ const {
+ return workgroup_allocations_;
+ }
- /// Handles generating a declared type
- /// @param ty the declared type to generate
- /// @returns true if the declared type was emitted
- bool EmitTypeDecl(const sem::Type* ty);
- /// Handles an index accessor expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the index accessor was emitted
- bool EmitIndexAccessor(std::ostream& out,
- const ast::IndexAccessorExpression* expr);
- /// Handles an assignment statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitAssign(const ast::AssignmentStatement* stmt);
- /// Handles generating a binary expression
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating a bitcast expression
- /// @param out the output of the expression stream
- /// @param expr the bitcast expression
- /// @returns true if the bitcast was emitted
- bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
- /// Handles a block statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBlock(const ast::BlockStatement* stmt);
- /// Handles a break statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBreak(const ast::BreakStatement* stmt);
- /// Handles generating a call expression
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles generating a builtin call expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the builtin being called
- /// @returns true if the call expression is emitted
- bool EmitBuiltinCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a type conversion expression
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param conv the type conversion
- /// @returns true if the expression is emitted
- bool EmitTypeConversion(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConversion* conv);
- /// Handles generating a type constructor
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param ctor the type constructor
- /// @returns true if the constructor is emitted
- bool EmitTypeConstructor(std::ostream& out,
- const sem::Call* call,
- const sem::TypeConstructor* ctor);
- /// Handles generating a function call
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param func the target function
- /// @returns true if the call is emitted
- bool EmitFunctionCall(std::ostream& out,
- const sem::Call* call,
- const sem::Function* func);
- /// Handles generating a call to an atomic function (`atomicAdd`,
- /// `atomicMax`, etc)
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the atomic builtin
- /// @returns true if the call expression is emitted
- bool EmitAtomicCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to a texture function (`textureSample`,
- /// `textureSampleGrad`, etc)
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @returns true if the call expression is emitted
- bool EmitTextureCall(std::ostream& out,
- const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `dot()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitDotCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `modf()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitModfCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `frexp()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitFrexpCall(std::ostream& out,
+ /// Handles generating a declared type
+ /// @param ty the declared type to generate
+ /// @returns true if the declared type was emitted
+ bool EmitTypeDecl(const sem::Type* ty);
+ /// Handles an index accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the index accessor was emitted
+ bool EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr);
+ /// Handles an assignment statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitAssign(const ast::AssignmentStatement* stmt);
+ /// Handles generating a binary expression
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a bitcast expression
+ /// @param out the output of the expression stream
+ /// @param expr the bitcast expression
+ /// @returns true if the bitcast was emitted
+ bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
+ /// Handles a block statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBlock(const ast::BlockStatement* stmt);
+ /// Handles a break statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBreak(const ast::BreakStatement* stmt);
+ /// Handles generating a call expression
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles generating a builtin call expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the builtin being called
+ /// @returns true if the call expression is emitted
+ bool EmitBuiltinCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a type conversion expression
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param conv the type conversion
+ /// @returns true if the expression is emitted
+ bool EmitTypeConversion(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConversion* conv);
+ /// Handles generating a type constructor
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param ctor the type constructor
+ /// @returns true if the constructor is emitted
+ bool EmitTypeConstructor(std::ostream& out,
+ const sem::Call* call,
+ const sem::TypeConstructor* ctor);
+ /// Handles generating a function call
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param func the target function
+ /// @returns true if the call is emitted
+ bool EmitFunctionCall(std::ostream& out, const sem::Call* call, const sem::Function* func);
+ /// Handles generating a call to an atomic function (`atomicAdd`,
+ /// `atomicMax`, etc)
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the atomic builtin
+ /// @returns true if the call expression is emitted
+ bool EmitAtomicCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to a texture function (`textureSample`,
+ /// `textureSampleGrad`, etc)
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the texture builtin
+ /// @returns true if the call expression is emitted
+ bool EmitTextureCall(std::ostream& out, const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a call to the `dot()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDotCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles generating a call to the `degrees()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitDegreesCall(std::ostream& out,
- const ast::CallExpression* expr,
- const sem::Builtin* builtin);
- /// Handles generating a call to the `radians()` builtin
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @param builtin the semantic information for the builtin
- /// @returns true if the call expression is emitted
- bool EmitRadiansCall(std::ostream& out,
+ /// Handles generating a call to the `modf()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitModfCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `frexp()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitFrexpCall(std::ostream& out,
const ast::CallExpression* expr,
const sem::Builtin* builtin);
- /// Handles a case statement
- /// @param stmt the statement
- /// @returns true if the statement was emitted successfully
- bool EmitCase(const ast::CaseStatement* stmt);
- /// Handles a continue statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitContinue(const ast::ContinueStatement* stmt);
- /// Handles generating a discard statement
- /// @param stmt the discard statement
- /// @returns true if the statement was successfully emitted
- bool EmitDiscard(const ast::DiscardStatement* stmt);
- /// Handles emitting the entry point function
- /// @param func the entry point function
- /// @returns true if the entry point function was emitted
- bool EmitEntryPointFunction(const ast::Function* func);
- /// Handles generate an Expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the expression was emitted
- bool EmitExpression(std::ostream& out, const ast::Expression* expr);
- /// Handles generating a function
- /// @param func the function to generate
- /// @returns true if the function was emitted
- bool EmitFunction(const ast::Function* func);
- /// Handles generating an identifier expression
- /// @param out the output of the expression stream
- /// @param expr the identifier expression
- /// @returns true if the identifier was emitted
- bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
- /// Handles an if statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitIf(const ast::IfStatement* stmt);
- /// Handles a literal
- /// @param out the output of the expression stream
- /// @param lit the literal to emit
- /// @returns true if the literal was successfully emitted
- bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
- /// Handles a loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitLoop(const ast::LoopStatement* stmt);
- /// Handles a for loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitForLoop(const ast::ForLoopStatement* stmt);
- /// Handles a member accessor expression
- /// @param out the output of the expression stream
- /// @param expr the member accessor expression
- /// @returns true if the member accessor was emitted
- bool EmitMemberAccessor(std::ostream& out,
- const ast::MemberAccessorExpression* expr);
- /// Handles return statements
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitReturn(const ast::ReturnStatement* stmt);
- /// Handles emitting a pipeline stage name
- /// @param out the output of the expression stream
- /// @param stage the stage to emit
- void EmitStage(std::ostream& out, ast::PipelineStage stage);
- /// Handles statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitStatement(const ast::Statement* stmt);
- /// Emits a list of statements
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatements(const ast::StatementList& stmts);
- /// Emits a list of statements with an indentation
- /// @param stmts the statement list
- /// @returns true if the statements were emitted successfully
- bool EmitStatementsWithIndent(const ast::StatementList& stmts);
- /// Handles generating a switch statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitSwitch(const ast::SwitchStatement* stmt);
- /// Handles generating a type
- /// @param out the output of the type stream
- /// @param type the type to generate
- /// @param name the name of the variable, only used for array emission
- /// @param name_printed (optional) if not nullptr and an array was printed
- /// @returns true if the type is emitted
- bool EmitType(std::ostream& out,
- const sem::Type* type,
- const std::string& name,
- bool* name_printed = nullptr);
- /// Handles generating type and name
- /// @param out the output stream
- /// @param type the type to generate
- /// @param name the name to emit
- /// @returns true if the type is emitted
- bool EmitTypeAndName(std::ostream& out,
- const sem::Type* type,
- const std::string& name);
- /// Handles generating a storage class
- /// @param out the output of the type stream
- /// @param sc the storage class to generate
- /// @returns true if the storage class is emitted
- bool EmitStorageClass(std::ostream& out, ast::StorageClass sc);
- /// Handles generating an MSL-packed storage type.
- /// If the type does not have a packed form, the standard non-packed form is
- /// emitted.
- /// @param out the output of the type stream
- /// @param type the type to generate
- /// @param name the name of the variable, only used for array emission
- /// @returns true if the type is emitted
- bool EmitPackedType(std::ostream& out,
- const sem::Type* type,
- const std::string& name);
- /// Handles generating a struct declaration
- /// @param buffer the text buffer that the type declaration will be written to
- /// @param str the struct to generate
- /// @returns true if the struct is emitted
- bool EmitStructType(TextBuffer* buffer, const sem::Struct* str);
- /// Handles a unary op expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the expression was emitted
- bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
- /// Handles generating a variable
- /// @param var the variable to generate
- /// @returns true if the variable was emitted
- bool EmitVariable(const sem::Variable* var);
- /// Handles generating a program scope constant variable
- /// @param var the variable to emit
- /// @returns true if the variable was emitted
- bool EmitProgramConstVariable(const ast::Variable* var);
- /// Emits the zero value for the given type
- /// @param out the output of the expression stream
- /// @param type the type to emit the value for
- /// @returns true if the zero value was successfully emitted.
- bool EmitZeroValue(std::ostream& out, const sem::Type* type);
-
- /// Handles generating a builtin name
- /// @param builtin the semantic info for the builtin
- /// @returns the name or "" if not valid
- std::string generate_builtin_name(const sem::Builtin* builtin);
+ /// Handles generating a call to the `degrees()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitDegreesCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles generating a call to the `radians()` builtin
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @returns true if the call expression is emitted
+ bool EmitRadiansCall(std::ostream& out,
+ const ast::CallExpression* expr,
+ const sem::Builtin* builtin);
+ /// Handles a case statement
+ /// @param stmt the statement
+ /// @returns true if the statement was emitted successfully
+ bool EmitCase(const ast::CaseStatement* stmt);
+ /// Handles a continue statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitContinue(const ast::ContinueStatement* stmt);
+ /// Handles generating a discard statement
+ /// @param stmt the discard statement
+ /// @returns true if the statement was successfully emitted
+ bool EmitDiscard(const ast::DiscardStatement* stmt);
+ /// Handles emitting the entry point function
+ /// @param func the entry point function
+ /// @returns true if the entry point function was emitted
+ bool EmitEntryPointFunction(const ast::Function* func);
+ /// Handles generate an Expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the expression was emitted
+ bool EmitExpression(std::ostream& out, const ast::Expression* expr);
+ /// Handles generating a function
+ /// @param func the function to generate
+ /// @returns true if the function was emitted
+ bool EmitFunction(const ast::Function* func);
+ /// Handles generating an identifier expression
+ /// @param out the output of the expression stream
+ /// @param expr the identifier expression
+ /// @returns true if the identifier was emitted
+ bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
+ /// Handles an if statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitIf(const ast::IfStatement* stmt);
+ /// Handles a constant value
+ /// @param out the output stream
+ /// @param constant the constant value to emit
+ /// @returns true if the constant value was successfully emitted
+ bool EmitConstant(std::ostream& out, const sem::Constant& constant);
+ /// Handles a literal
+ /// @param out the output of the expression stream
+ /// @param lit the literal to emit
+ /// @returns true if the literal was successfully emitted
+ bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit);
+ /// Handles a loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitLoop(const ast::LoopStatement* stmt);
+ /// Handles a for loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitForLoop(const ast::ForLoopStatement* stmt);
+ /// Handles a member accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the member accessor expression
+ /// @returns true if the member accessor was emitted
+ bool EmitMemberAccessor(std::ostream& out, const ast::MemberAccessorExpression* expr);
+ /// Handles return statements
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitReturn(const ast::ReturnStatement* stmt);
+ /// Handles emitting a pipeline stage name
+ /// @param out the output of the expression stream
+ /// @param stage the stage to emit
+ void EmitStage(std::ostream& out, ast::PipelineStage stage);
+ /// Handles statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitStatement(const ast::Statement* stmt);
+ /// Emits a list of statements
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatements(const ast::StatementList& stmts);
+ /// Emits a list of statements with an indentation
+ /// @param stmts the statement list
+ /// @returns true if the statements were emitted successfully
+ bool EmitStatementsWithIndent(const ast::StatementList& stmts);
+ /// Handles generating a switch statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitSwitch(const ast::SwitchStatement* stmt);
+ /// Handles generating a type
+ /// @param out the output of the type stream
+ /// @param type the type to generate
+ /// @param name the name of the variable, only used for array emission
+ /// @param name_printed (optional) if not nullptr and an array was printed
+ /// @returns true if the type is emitted
+ bool EmitType(std::ostream& out,
+ const sem::Type* type,
+ const std::string& name,
+ bool* name_printed = nullptr);
+ /// Handles generating type and name
+ /// @param out the output stream
+ /// @param type the type to generate
+ /// @param name the name to emit
+ /// @returns true if the type is emitted
+ bool EmitTypeAndName(std::ostream& out, const sem::Type* type, const std::string& name);
+ /// Handles generating a storage class
+ /// @param out the output of the type stream
+ /// @param sc the storage class to generate
+ /// @returns true if the storage class is emitted
+ bool EmitStorageClass(std::ostream& out, ast::StorageClass sc);
+ /// Handles generating an MSL-packed storage type.
+ /// If the type does not have a packed form, the standard non-packed form is
+ /// emitted.
+ /// @param out the output of the type stream
+ /// @param type the type to generate
+ /// @param name the name of the variable, only used for array emission
+ /// @returns true if the type is emitted
+ bool EmitPackedType(std::ostream& out, const sem::Type* type, const std::string& name);
+ /// Handles generating a struct declaration
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param str the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructType(TextBuffer* buffer, const sem::Struct* str);
+ /// Handles generating a structure declaration only the first time called. Subsequent calls are
+ /// a no-op and return true.
+ /// @param buffer the text buffer that the type declaration will be written to
+ /// @param ty the struct to generate
+ /// @returns true if the struct is emitted
+ bool EmitStructTypeOnce(TextBuffer* buffer, const sem::Struct* ty);
+ /// Handles a unary op expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the expression was emitted
+ bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
+ /// Handles generating a variable
+ /// @param var the variable to generate
+ /// @returns true if the variable was emitted
+ bool EmitVariable(const sem::Variable* var);
+ /// Handles generating a program scope constant variable
+ /// @param var the variable to emit
+ /// @returns true if the variable was emitted
+ bool EmitProgramConstVariable(const ast::Variable* var);
+ /// Emits the zero value for the given type
+ /// @param out the output of the expression stream
+ /// @param type the type to emit the value for
+ /// @returns true if the zero value was successfully emitted.
+ bool EmitZeroValue(std::ostream& out, const sem::Type* type);
- /// Converts a builtin to an attribute name
- /// @param builtin the builtin to convert
- /// @returns the string name of the builtin or blank on error
- std::string builtin_to_attribute(ast::Builtin builtin) const;
+ /// Handles generating a builtin name
+ /// @param builtin the semantic info for the builtin
+ /// @returns the name or "" if not valid
+ std::string generate_builtin_name(const sem::Builtin* builtin);
- /// Converts interpolation attributes to an MSL attribute
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- /// @returns the string name of the attribute or blank on error
- std::string interpolation_to_attribute(
- ast::InterpolationType type,
- ast::InterpolationSampling sampling) const;
+ /// Converts a builtin to an attribute name
+ /// @param builtin the builtin to convert
+ /// @returns the string name of the builtin or blank on error
+ std::string builtin_to_attribute(ast::Builtin builtin) const;
- private:
- // A pair of byte size and alignment `uint32_t`s.
- struct SizeAndAlign {
- uint32_t size;
- uint32_t align;
- };
+ /// Converts interpolation attributes to an MSL attribute
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ /// @returns the string name of the attribute or blank on error
+ std::string interpolation_to_attribute(ast::InterpolationType type,
+ ast::InterpolationSampling sampling) const;
- /// CallBuiltinHelper will call the builtin helper function, creating it
- /// if it hasn't been built already. If the builtin needs to be built then
- /// CallBuiltinHelper will generate the function signature and will call
- /// `build` to emit the body of the function.
- /// @param out the output of the expression stream
- /// @param call the call expression
- /// @param builtin the semantic information for the builtin
- /// @param build a function with the signature:
- /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
- /// Where:
- /// `buffer` is the body of the generated function
- /// `params` is the name of all the generated function parameters
- /// @returns true if the call expression is emitted
- template <typename F>
- bool CallBuiltinHelper(std::ostream& out,
- const ast::CallExpression* call,
- const sem::Builtin* builtin,
- F&& build);
+ private:
+ // A pair of byte size and alignment `uint32_t`s.
+ struct SizeAndAlign {
+ uint32_t size;
+ uint32_t align;
+ };
- TextBuffer helpers_; // Helper functions emitted at the top of the output
+ /// CallBuiltinHelper will call the builtin helper function, creating it
+ /// if it hasn't been built already. If the builtin needs to be built then
+ /// CallBuiltinHelper will generate the function signature and will call
+ /// `build` to emit the body of the function.
+ /// @param out the output of the expression stream
+ /// @param call the call expression
+ /// @param builtin the semantic information for the builtin
+ /// @param build a function with the signature:
+ /// `bool(TextBuffer* buffer, const std::vector<std::string>& params)`
+ /// Where:
+ /// `buffer` is the body of the generated function
+ /// `params` is the name of all the generated function parameters
+ /// @returns true if the call expression is emitted
+ template <typename F>
+ bool CallBuiltinHelper(std::ostream& out,
+ const ast::CallExpression* call,
+ const sem::Builtin* builtin,
+ F&& build);
- /// @returns the MSL packed type size and alignment in bytes for the given
- /// type.
- SizeAndAlign MslPackedTypeSizeAndAlign(const sem::Type* ty);
+ TextBuffer helpers_; // Helper functions emitted at the top of the output
- using StorageClassToString =
- std::unordered_map<ast::StorageClass, std::string>;
+ /// @returns the MSL packed type size and alignment in bytes for the given
+ /// type.
+ SizeAndAlign MslPackedTypeSizeAndAlign(const sem::Type* ty);
- std::function<bool()> emit_continuing_;
+ std::function<bool()> emit_continuing_;
- /// Name of atomicCompareExchangeWeak() helper for the given pointer storage
- /// class.
- StorageClassToString atomicCompareExchangeWeak_;
+ /// Name of atomicCompareExchangeWeak() helper for the given pointer storage
+ /// class and struct return type
+ using ACEWKeyType =
+ utils::UnorderedKeyWrapper<std::tuple<ast::StorageClass, const sem::Struct*>>;
+ std::unordered_map<ACEWKeyType, std::string> atomicCompareExchangeWeak_;
- /// Unique name of the 'TINT_INVARIANT' preprocessor define. Non-empty only if
- /// an invariant attribute has been generated.
- std::string invariant_define_name_;
+ /// Unique name of the 'TINT_INVARIANT' preprocessor define. Non-empty only if
+ /// an invariant attribute has been generated.
+ std::string invariant_define_name_;
- /// True if matrix-packed_vector operator overloads have been generated.
- bool matrix_packed_vector_overloads_ = false;
+ /// True if matrix-packed_vector operator overloads have been generated.
+ bool matrix_packed_vector_overloads_ = false;
- /// A map from entry point name to a list of dynamic workgroup allocations.
- /// Each entry in the vector is the size of the workgroup allocation that
- /// should be created for that index.
- std::unordered_map<std::string, std::vector<uint32_t>> workgroup_allocations_;
+ /// A map from entry point name to a list of dynamic workgroup allocations.
+ /// Each entry in the vector is the size of the workgroup allocation that
+ /// should be created for that index.
+ std::unordered_map<std::string, std::vector<uint32_t>> workgroup_allocations_;
- std::unordered_map<const sem::Builtin*, std::string> builtins_;
- std::unordered_map<const sem::Type*, std::string> unary_minus_funcs_;
- std::unordered_map<uint32_t, std::string> int_dot_funcs_;
+ std::unordered_map<const sem::Builtin*, std::string> builtins_;
+ std::unordered_map<const sem::Type*, std::string> unary_minus_funcs_;
+ std::unordered_map<uint32_t, std::string> int_dot_funcs_;
+ std::unordered_set<const sem::Struct*> emitted_structs_;
};
} // namespace tint::writer::msl
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_array_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_array_accessor_test.cc
index d87469ed673..7475b6768a4 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_array_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_array_accessor_test.cc
@@ -14,35 +14,37 @@
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, IndexAccessor) {
- auto* ary = Var("ary", ty.array<i32, 10>());
- auto* expr = IndexAccessor("ary", 5);
- WrapInFunction(ary, expr);
+ auto* ary = Var("ary", ty.array<i32, 10>());
+ auto* expr = IndexAccessor("ary", 5_i);
+ WrapInFunction(ary, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "ary[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "ary[5]");
}
TEST_F(MslGeneratorImplTest, IndexAccessor_OfDref) {
- Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
+ Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
- auto* p = Const("p", nullptr, AddressOf("ary"));
- auto* expr = IndexAccessor(Deref("p"), 5);
- WrapInFunction(p, expr);
+ auto* p = Let("p", nullptr, AddressOf("ary"));
+ auto* expr = IndexAccessor(Deref("p"), 5_i);
+ WrapInFunction(p, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(*(p))[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(*(p))[5]");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_assign_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_assign_test.cc
index d87a6140882..6423aaef269 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_assign_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_assign_test.cc
@@ -20,17 +20,17 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Assign) {
- auto* lhs = Var("lhs", ty.i32());
- auto* rhs = Var("rhs", ty.i32());
- auto* assign = Assign(lhs, rhs);
- WrapInFunction(lhs, rhs, assign);
+ auto* lhs = Var("lhs", ty.i32());
+ auto* rhs = Var("rhs", ty.i32());
+ auto* assign = Assign(lhs, rhs);
+ WrapInFunction(lhs, rhs, assign);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
- EXPECT_EQ(gen.result(), " lhs = rhs;\n");
+ ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
+ EXPECT_EQ(gen.result(), " lhs = rhs;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_binary_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_binary_test.cc
index 796f1de65c9..02daae25503 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_binary_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_binary_test.cc
@@ -18,82 +18,79 @@ namespace tint::writer::msl {
namespace {
struct BinaryData {
- const char* result;
- ast::BinaryOp op;
+ const char* result;
+ ast::BinaryOp op;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << data.op;
- return out;
+ out << data.op;
+ return out;
}
using MslBinaryTest = TestParamHelper<BinaryData>;
TEST_P(MslBinaryTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto type = [&] {
- return ((params.op == ast::BinaryOp::kLogicalAnd) ||
- (params.op == ast::BinaryOp::kLogicalOr))
- ? static_cast<const ast::Type*>(ty.bool_())
- : static_cast<const ast::Type*>(ty.u32());
- };
+ auto type = [&] {
+ return ((params.op == ast::BinaryOp::kLogicalAnd) ||
+ (params.op == ast::BinaryOp::kLogicalOr))
+ ? static_cast<const ast::Type*>(ty.bool_())
+ : static_cast<const ast::Type*>(ty.u32());
+ };
- auto* left = Var("left", type());
- auto* right = Var("right", type());
+ auto* left = Var("left", type());
+ auto* right = Var("right", type());
- auto* expr =
- create<ast::BinaryExpression>(params.op, Expr(left), Expr(right));
- WrapInFunction(left, right, expr);
+ auto* expr = create<ast::BinaryExpression>(params.op, Expr(left), Expr(right));
+ WrapInFunction(left, right, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
MslBinaryTest,
- testing::Values(
- BinaryData{"(left & right)", ast::BinaryOp::kAnd},
- BinaryData{"(left | right)", ast::BinaryOp::kOr},
- BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
- BinaryData{"(left && right)", ast::BinaryOp::kLogicalAnd},
- BinaryData{"(left || right)", ast::BinaryOp::kLogicalOr},
- BinaryData{"(left == right)", ast::BinaryOp::kEqual},
- BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
- BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
- BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
- BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
- BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
- BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
- BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
- BinaryData{"(left + right)", ast::BinaryOp::kAdd},
- BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
- BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
- BinaryData{"(left / right)", ast::BinaryOp::kDivide},
- BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
-
-using MslBinaryTest_SignedOverflowDefinedBehaviour =
- TestParamHelper<BinaryData>;
+ testing::Values(BinaryData{"(left & right)", ast::BinaryOp::kAnd},
+ BinaryData{"(left | right)", ast::BinaryOp::kOr},
+ BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
+ BinaryData{"(left && right)", ast::BinaryOp::kLogicalAnd},
+ BinaryData{"(left || right)", ast::BinaryOp::kLogicalOr},
+ BinaryData{"(left == right)", ast::BinaryOp::kEqual},
+ BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
+ BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
+ BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
+ BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
+ BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
+ BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
+ BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
+ BinaryData{"(left + right)", ast::BinaryOp::kAdd},
+ BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
+ BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
+ BinaryData{"(left / right)", ast::BinaryOp::kDivide},
+ BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
+
+using MslBinaryTest_SignedOverflowDefinedBehaviour = TestParamHelper<BinaryData>;
TEST_P(MslBinaryTest_SignedOverflowDefinedBehaviour, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* a_type = ty.i32();
- auto* b_type = (params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight)
- ? static_cast<const ast::Type*>(ty.u32())
- : ty.i32();
+ auto* a_type = ty.i32();
+ auto* b_type =
+ (params.op == ast::BinaryOp::kShiftLeft || params.op == ast::BinaryOp::kShiftRight)
+ ? static_cast<const ast::Type*>(ty.u32())
+ : ty.i32();
- auto* a = Var("a", a_type);
- auto* b = Var("b", b_type);
+ auto* a = Var("a", a_type);
+ auto* b = Var("b", b_type);
- auto* expr = create<ast::BinaryExpression>(params.op, Expr(a), Expr(b));
- WrapInFunction(a, b, expr);
+ auto* expr = create<ast::BinaryExpression>(params.op, Expr(a), Expr(b));
+ WrapInFunction(a, b, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
using Op = ast::BinaryOp;
constexpr BinaryData signed_overflow_defined_behaviour_cases[] = {
@@ -102,34 +99,32 @@ constexpr BinaryData signed_overflow_defined_behaviour_cases[] = {
{"as_type<int>((as_type<uint>(a) + as_type<uint>(b)))", Op::kAdd},
{"as_type<int>((as_type<uint>(a) - as_type<uint>(b)))", Op::kSubtract},
{"as_type<int>((as_type<uint>(a) * as_type<uint>(b)))", Op::kMultiply}};
-INSTANTIATE_TEST_SUITE_P(
- MslGeneratorImplTest,
- MslBinaryTest_SignedOverflowDefinedBehaviour,
- testing::ValuesIn(signed_overflow_defined_behaviour_cases));
+INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
+ MslBinaryTest_SignedOverflowDefinedBehaviour,
+ testing::ValuesIn(signed_overflow_defined_behaviour_cases));
-using MslBinaryTest_SignedOverflowDefinedBehaviour_Chained =
- TestParamHelper<BinaryData>;
+using MslBinaryTest_SignedOverflowDefinedBehaviour_Chained = TestParamHelper<BinaryData>;
TEST_P(MslBinaryTest_SignedOverflowDefinedBehaviour_Chained, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* a_type = ty.i32();
- auto* b_type = (params.op == ast::BinaryOp::kShiftLeft ||
- params.op == ast::BinaryOp::kShiftRight)
- ? static_cast<const ast::Type*>(ty.u32())
- : ty.i32();
+ auto* a_type = ty.i32();
+ auto* b_type =
+ (params.op == ast::BinaryOp::kShiftLeft || params.op == ast::BinaryOp::kShiftRight)
+ ? static_cast<const ast::Type*>(ty.u32())
+ : ty.i32();
- auto* a = Var("a", a_type);
- auto* b = Var("b", b_type);
+ auto* a = Var("a", a_type);
+ auto* b = Var("b", b_type);
- auto* expr1 = create<ast::BinaryExpression>(params.op, Expr(a), Expr(b));
- auto* expr2 = create<ast::BinaryExpression>(params.op, expr1, Expr(b));
- WrapInFunction(a, b, expr2);
+ auto* expr1 = create<ast::BinaryExpression>(params.op, Expr(a), Expr(b));
+ auto* expr2 = create<ast::BinaryExpression>(params.op, expr1, Expr(b));
+ WrapInFunction(a, b, expr2);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr2)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr2)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
using Op = ast::BinaryOp;
constexpr BinaryData signed_overflow_defined_behaviour_chained_cases[] = {
@@ -146,37 +141,60 @@ constexpr BinaryData signed_overflow_defined_behaviour_chained_cases[] = {
{"as_type<int>((as_type<uint>(as_type<int>((as_type<uint>(a) * "
"as_type<uint>(b)))) * as_type<uint>(b)))",
Op::kMultiply}};
-INSTANTIATE_TEST_SUITE_P(
- MslGeneratorImplTest,
- MslBinaryTest_SignedOverflowDefinedBehaviour_Chained,
- testing::ValuesIn(signed_overflow_defined_behaviour_chained_cases));
+INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
+ MslBinaryTest_SignedOverflowDefinedBehaviour_Chained,
+ testing::ValuesIn(signed_overflow_defined_behaviour_chained_cases));
TEST_F(MslBinaryTest, ModF32) {
- auto* left = Var("left", ty.f32());
- auto* right = Var("right", ty.f32());
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr(left),
- Expr(right));
- WrapInFunction(left, right, expr);
+ auto* left = Var("left", ty.f32());
+ auto* right = Var("right", ty.f32());
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr(left), Expr(right));
+ WrapInFunction(left, right, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "fmod(left, right)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "fmod(left, right)");
}
TEST_F(MslBinaryTest, ModVec3F32) {
- auto* left = Var("left", ty.vec3<f32>());
- auto* right = Var("right", ty.vec3<f32>());
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr(left),
- Expr(right));
- WrapInFunction(left, right, expr);
+ auto* left = Var("left", ty.vec3<f32>());
+ auto* right = Var("right", ty.vec3<f32>());
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kModulo, Expr(left), Expr(right));
+ WrapInFunction(left, right, expr);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "fmod(left, right)");
+}
+
+TEST_F(MslBinaryTest, BoolAnd) {
+ auto* left = Var("left", nullptr, Expr(true));
+ auto* right = Var("right", nullptr, Expr(false));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kAnd, Expr(left), Expr(right));
+ WrapInFunction(left, right, expr);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "bool(left & right)");
+}
+
+TEST_F(MslBinaryTest, BoolOr) {
+ auto* left = Var("left", nullptr, Expr(true));
+ auto* right = Var("right", nullptr, Expr(false));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kOr, Expr(left), Expr(right));
+ WrapInFunction(left, right, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "fmod(left, right)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "bool(left | right)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_bitcast_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_bitcast_test.cc
index fe51305e221..c3985589d48 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_bitcast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_bitcast_test.cc
@@ -14,20 +14,22 @@
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitExpression_Bitcast) {
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "as_type<float>(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "as_type<float>(1)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_block_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_block_test.cc
index ac6aa5a57c7..9e73eac7c8a 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_block_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_block_test.cc
@@ -20,30 +20,30 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Block) {
- auto* b = Block(create<ast::DiscardStatement>());
- WrapInFunction(b);
+ auto* b = Block(create<ast::DiscardStatement>());
+ WrapInFunction(b);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
discard_fragment();
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_Block_WithoutNewline) {
- auto* b = Block(create<ast::DiscardStatement>());
- WrapInFunction(b);
+ auto* b = Block(create<ast::DiscardStatement>());
+ WrapInFunction(b);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitBlock(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitBlock(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
discard_fragment();
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_break_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_break_test.cc
index 806188b307e..d9d11035df1 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_break_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_break_test.cc
@@ -20,15 +20,15 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Break) {
- auto* b = create<ast::BreakStatement>();
- WrapInFunction(Loop(Block(b)));
+ auto* b = create<ast::BreakStatement>();
+ WrapInFunction(Loop(Block(b)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), " break;\n");
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), " break;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_test.cc
index 09a98a722bf..0c1f2a4bd2d 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_test.cc
@@ -16,6 +16,8 @@
#include "src/tint/sem/call.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
@@ -24,177 +26,177 @@ using BuiltinType = sem::BuiltinType;
using MslGeneratorImplTest = TestHelper;
enum class ParamType {
- kF32,
- kU32,
- kBool,
+ kF32,
+ kU32,
+ kBool,
};
struct BuiltinData {
- BuiltinType builtin;
- ParamType type;
- const char* msl_name;
+ BuiltinType builtin;
+ ParamType type;
+ const char* msl_name;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.msl_name << "<";
- switch (data.type) {
- case ParamType::kF32:
- out << "f32";
- break;
- case ParamType::kU32:
- out << "u32";
- break;
- case ParamType::kBool:
- out << "bool";
- break;
- }
- out << ">";
- return out;
+ out << data.msl_name << "<";
+ switch (data.type) {
+ case ParamType::kF32:
+ out << "f32";
+ break;
+ case ParamType::kU32:
+ out << "u32";
+ break;
+ case ParamType::kBool:
+ out << "bool";
+ break;
+ }
+ out << ">";
+ return out;
}
const ast::CallExpression* GenerateCall(BuiltinType builtin,
ParamType type,
ProgramBuilder* builder) {
- std::string name;
- std::ostringstream str(name);
- str << builtin;
- switch (builtin) {
- case BuiltinType::kAcos:
- case BuiltinType::kAsin:
- case BuiltinType::kAtan:
- case BuiltinType::kCeil:
- case BuiltinType::kCos:
- case BuiltinType::kCosh:
- case BuiltinType::kDpdx:
- case BuiltinType::kDpdxCoarse:
- case BuiltinType::kDpdxFine:
- case BuiltinType::kDpdy:
- case BuiltinType::kDpdyCoarse:
- case BuiltinType::kDpdyFine:
- case BuiltinType::kExp:
- case BuiltinType::kExp2:
- case BuiltinType::kFloor:
- case BuiltinType::kFract:
- case BuiltinType::kFwidth:
- case BuiltinType::kFwidthCoarse:
- case BuiltinType::kFwidthFine:
- case BuiltinType::kInverseSqrt:
- case BuiltinType::kLength:
- case BuiltinType::kLog:
- case BuiltinType::kLog2:
- case BuiltinType::kNormalize:
- case BuiltinType::kRound:
- case BuiltinType::kSin:
- case BuiltinType::kSinh:
- case BuiltinType::kSqrt:
- case BuiltinType::kTan:
- case BuiltinType::kTanh:
- case BuiltinType::kTrunc:
- case BuiltinType::kSign:
- return builder->Call(str.str(), "f2");
- case BuiltinType::kLdexp:
- return builder->Call(str.str(), "f2", "i2");
- case BuiltinType::kAtan2:
- case BuiltinType::kDot:
- case BuiltinType::kDistance:
- case BuiltinType::kPow:
- case BuiltinType::kReflect:
- case BuiltinType::kStep:
- return builder->Call(str.str(), "f2", "f2");
- case BuiltinType::kStorageBarrier:
- return builder->Call(str.str());
- case BuiltinType::kCross:
- return builder->Call(str.str(), "f3", "f3");
- case BuiltinType::kFma:
- case BuiltinType::kMix:
- case BuiltinType::kFaceForward:
- case BuiltinType::kSmoothstep:
- case BuiltinType::kSmoothStep:
- return builder->Call(str.str(), "f2", "f2", "f2");
- case BuiltinType::kAll:
- case BuiltinType::kAny:
- return builder->Call(str.str(), "b2");
- case BuiltinType::kAbs:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2");
- } else {
- return builder->Call(str.str(), "u2");
- }
- case BuiltinType::kCountLeadingZeros:
- case BuiltinType::kCountOneBits:
- case BuiltinType::kCountTrailingZeros:
- case BuiltinType::kReverseBits:
- return builder->Call(str.str(), "u2");
- case BuiltinType::kExtractBits:
- return builder->Call(str.str(), "u2", "u1", "u1");
- case BuiltinType::kInsertBits:
- return builder->Call(str.str(), "u2", "u2", "u1", "u1");
- case BuiltinType::kMax:
- case BuiltinType::kMin:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2");
- }
- case BuiltinType::kClamp:
- if (type == ParamType::kF32) {
- return builder->Call(str.str(), "f2", "f2", "f2");
- } else {
- return builder->Call(str.str(), "u2", "u2", "u2");
- }
- case BuiltinType::kSelect:
- return builder->Call(str.str(), "f2", "f2", "b2");
- case BuiltinType::kDeterminant:
- return builder->Call(str.str(), "m2x2");
- case BuiltinType::kPack2x16snorm:
- case BuiltinType::kPack2x16unorm:
- return builder->Call(str.str(), "f2");
- case BuiltinType::kPack4x8snorm:
- case BuiltinType::kPack4x8unorm:
- return builder->Call(str.str(), "f4");
- case BuiltinType::kUnpack4x8snorm:
- case BuiltinType::kUnpack4x8unorm:
- case BuiltinType::kUnpack2x16snorm:
- case BuiltinType::kUnpack2x16unorm:
- return builder->Call(str.str(), "u1");
- case BuiltinType::kWorkgroupBarrier:
- return builder->Call(str.str());
- case BuiltinType::kTranspose:
- return builder->Call(str.str(), "m3x2");
- default:
- break;
- }
- return nullptr;
+ std::string name;
+ std::ostringstream str(name);
+ str << builtin;
+ switch (builtin) {
+ case BuiltinType::kAcos:
+ case BuiltinType::kAsin:
+ case BuiltinType::kAtan:
+ case BuiltinType::kCeil:
+ case BuiltinType::kCos:
+ case BuiltinType::kCosh:
+ case BuiltinType::kDpdx:
+ case BuiltinType::kDpdxCoarse:
+ case BuiltinType::kDpdxFine:
+ case BuiltinType::kDpdy:
+ case BuiltinType::kDpdyCoarse:
+ case BuiltinType::kDpdyFine:
+ case BuiltinType::kExp:
+ case BuiltinType::kExp2:
+ case BuiltinType::kFloor:
+ case BuiltinType::kFract:
+ case BuiltinType::kFwidth:
+ case BuiltinType::kFwidthCoarse:
+ case BuiltinType::kFwidthFine:
+ case BuiltinType::kInverseSqrt:
+ case BuiltinType::kLength:
+ case BuiltinType::kLog:
+ case BuiltinType::kLog2:
+ case BuiltinType::kNormalize:
+ case BuiltinType::kRound:
+ case BuiltinType::kSin:
+ case BuiltinType::kSinh:
+ case BuiltinType::kSqrt:
+ case BuiltinType::kTan:
+ case BuiltinType::kTanh:
+ case BuiltinType::kTrunc:
+ case BuiltinType::kSign:
+ return builder->Call(str.str(), "f2");
+ case BuiltinType::kLdexp:
+ return builder->Call(str.str(), "f2", "i2");
+ case BuiltinType::kAtan2:
+ case BuiltinType::kDot:
+ case BuiltinType::kDistance:
+ case BuiltinType::kPow:
+ case BuiltinType::kReflect:
+ case BuiltinType::kStep:
+ return builder->Call(str.str(), "f2", "f2");
+ case BuiltinType::kStorageBarrier:
+ return builder->Call(str.str());
+ case BuiltinType::kCross:
+ return builder->Call(str.str(), "f3", "f3");
+ case BuiltinType::kFma:
+ case BuiltinType::kMix:
+ case BuiltinType::kFaceForward:
+ case BuiltinType::kSmoothstep:
+ case BuiltinType::kSmoothStep:
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ case BuiltinType::kAll:
+ case BuiltinType::kAny:
+ return builder->Call(str.str(), "b2");
+ case BuiltinType::kAbs:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2");
+ } else {
+ return builder->Call(str.str(), "u2");
+ }
+ case BuiltinType::kCountLeadingZeros:
+ case BuiltinType::kCountOneBits:
+ case BuiltinType::kCountTrailingZeros:
+ case BuiltinType::kReverseBits:
+ return builder->Call(str.str(), "u2");
+ case BuiltinType::kExtractBits:
+ return builder->Call(str.str(), "u2", "u1", "u1");
+ case BuiltinType::kInsertBits:
+ return builder->Call(str.str(), "u2", "u2", "u1", "u1");
+ case BuiltinType::kMax:
+ case BuiltinType::kMin:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2");
+ }
+ case BuiltinType::kClamp:
+ if (type == ParamType::kF32) {
+ return builder->Call(str.str(), "f2", "f2", "f2");
+ } else {
+ return builder->Call(str.str(), "u2", "u2", "u2");
+ }
+ case BuiltinType::kSelect:
+ return builder->Call(str.str(), "f2", "f2", "b2");
+ case BuiltinType::kDeterminant:
+ return builder->Call(str.str(), "m2x2");
+ case BuiltinType::kPack2x16snorm:
+ case BuiltinType::kPack2x16unorm:
+ return builder->Call(str.str(), "f2");
+ case BuiltinType::kPack4x8snorm:
+ case BuiltinType::kPack4x8unorm:
+ return builder->Call(str.str(), "f4");
+ case BuiltinType::kUnpack4x8snorm:
+ case BuiltinType::kUnpack4x8unorm:
+ case BuiltinType::kUnpack2x16snorm:
+ case BuiltinType::kUnpack2x16unorm:
+ return builder->Call(str.str(), "u1");
+ case BuiltinType::kWorkgroupBarrier:
+ return builder->Call(str.str());
+ case BuiltinType::kTranspose:
+ return builder->Call(str.str(), "m3x2");
+ default:
+ break;
+ }
+ return nullptr;
}
using MslBuiltinTest = TestParamHelper<BuiltinData>;
TEST_P(MslBuiltinTest, Emit) {
- auto param = GetParam();
-
- Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- Global("f4", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- Global("u1", ty.u32(), ast::StorageClass::kPrivate);
- Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
- Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
- Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
- Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
- Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
-
- auto* call = GenerateCall(param.builtin, param.type, this);
- ASSERT_NE(nullptr, call) << "Unhandled builtin";
- Func("func", {}, ty.void_(), {Ignore(call)},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
-
- GeneratorImpl& gen = Build();
-
- auto* sem = program->Sem().Get(call);
- ASSERT_NE(sem, nullptr);
- auto* target = sem->Target();
- ASSERT_NE(target, nullptr);
- auto* builtin = target->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
-
- EXPECT_EQ(gen.generate_builtin_name(builtin), param.msl_name);
+ auto param = GetParam();
+
+ Global("f2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("f3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ Global("f4", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ Global("u1", ty.u32(), ast::StorageClass::kPrivate);
+ Global("u2", ty.vec2<u32>(), ast::StorageClass::kPrivate);
+ Global("i2", ty.vec2<i32>(), ast::StorageClass::kPrivate);
+ Global("b2", ty.vec2<bool>(), ast::StorageClass::kPrivate);
+ Global("m2x2", ty.mat2x2<f32>(), ast::StorageClass::kPrivate);
+ Global("m3x2", ty.mat3x2<f32>(), ast::StorageClass::kPrivate);
+
+ auto* call = GenerateCall(param.builtin, param.type, this);
+ ASSERT_NE(nullptr, call) << "Unhandled builtin";
+ Func("func", {}, ty.void_(), {Ignore(call)},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+
+ GeneratorImpl& gen = Build();
+
+ auto* sem = program->Sem().Get<sem::Call>(call);
+ ASSERT_NE(sem, nullptr);
+ auto* target = sem->Target();
+ ASSERT_NE(target, nullptr);
+ auto* builtin = target->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
+
+ EXPECT_EQ(gen.generate_builtin_name(builtin), param.msl_name);
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
@@ -247,14 +249,10 @@ INSTANTIATE_TEST_SUITE_P(
BuiltinData{BuiltinType::kMin, ParamType::kF32, "fmin"},
BuiltinData{BuiltinType::kMin, ParamType::kU32, "min"},
BuiltinData{BuiltinType::kNormalize, ParamType::kF32, "normalize"},
- BuiltinData{BuiltinType::kPack4x8snorm, ParamType::kF32,
- "pack_float_to_snorm4x8"},
- BuiltinData{BuiltinType::kPack4x8unorm, ParamType::kF32,
- "pack_float_to_unorm4x8"},
- BuiltinData{BuiltinType::kPack2x16snorm, ParamType::kF32,
- "pack_float_to_snorm2x16"},
- BuiltinData{BuiltinType::kPack2x16unorm, ParamType::kF32,
- "pack_float_to_unorm2x16"},
+ BuiltinData{BuiltinType::kPack4x8snorm, ParamType::kF32, "pack_float_to_snorm4x8"},
+ BuiltinData{BuiltinType::kPack4x8unorm, ParamType::kF32, "pack_float_to_unorm4x8"},
+ BuiltinData{BuiltinType::kPack2x16snorm, ParamType::kF32, "pack_float_to_snorm2x16"},
+ BuiltinData{BuiltinType::kPack2x16unorm, ParamType::kF32, "pack_float_to_unorm2x16"},
BuiltinData{BuiltinType::kPow, ParamType::kF32, "pow"},
BuiltinData{BuiltinType::kReflect, ParamType::kF32, "reflect"},
BuiltinData{BuiltinType::kReverseBits, ParamType::kU32, "reverse_bits"},
@@ -271,60 +269,56 @@ INSTANTIATE_TEST_SUITE_P(
BuiltinData{BuiltinType::kTanh, ParamType::kF32, "tanh"},
BuiltinData{BuiltinType::kTranspose, ParamType::kF32, "transpose"},
BuiltinData{BuiltinType::kTrunc, ParamType::kF32, "trunc"},
- BuiltinData{BuiltinType::kUnpack4x8snorm, ParamType::kU32,
- "unpack_snorm4x8_to_float"},
- BuiltinData{BuiltinType::kUnpack4x8unorm, ParamType::kU32,
- "unpack_unorm4x8_to_float"},
- BuiltinData{BuiltinType::kUnpack2x16snorm, ParamType::kU32,
- "unpack_snorm2x16_to_float"},
- BuiltinData{BuiltinType::kUnpack2x16unorm, ParamType::kU32,
- "unpack_unorm2x16_to_float"}));
+ BuiltinData{BuiltinType::kUnpack4x8snorm, ParamType::kU32, "unpack_snorm4x8_to_float"},
+ BuiltinData{BuiltinType::kUnpack4x8unorm, ParamType::kU32, "unpack_unorm4x8_to_float"},
+ BuiltinData{BuiltinType::kUnpack2x16snorm, ParamType::kU32, "unpack_snorm2x16_to_float"},
+ BuiltinData{BuiltinType::kUnpack2x16unorm, ParamType::kU32, "unpack_unorm2x16_to_float"}));
TEST_F(MslGeneratorImplTest, Builtin_Call) {
- Global("param1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- Global("param2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("param1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ Global("param2", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- auto* call = Call("dot", "param1", "param2");
- WrapInFunction(CallStmt(call));
+ auto* call = Call("dot", "param1", "param2");
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "dot(param1, param2)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "dot(param1, param2)");
}
TEST_F(MslGeneratorImplTest, StorageBarrier) {
- auto* call = Call("storageBarrier");
- WrapInFunction(CallStmt(call));
+ auto* call = Call("storageBarrier");
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "threadgroup_barrier(mem_flags::mem_device)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "threadgroup_barrier(mem_flags::mem_device)");
}
TEST_F(MslGeneratorImplTest, WorkgroupBarrier) {
- auto* call = Call("workgroupBarrier");
- WrapInFunction(CallStmt(call));
+ auto* call = Call("workgroupBarrier");
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "threadgroup_barrier(mem_flags::mem_threadgroup)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "threadgroup_barrier(mem_flags::mem_threadgroup)");
}
TEST_F(MslGeneratorImplTest, Degrees_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -342,14 +336,14 @@ kernel void test_function() {
}
TEST_F(MslGeneratorImplTest, Degrees_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("degrees", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("degrees", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -367,14 +361,14 @@ kernel void test_function() {
}
TEST_F(MslGeneratorImplTest, Radians_Scalar) {
- auto* val = Var("val", ty.f32());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.f32());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -392,14 +386,14 @@ kernel void test_function() {
}
TEST_F(MslGeneratorImplTest, Radians_Vector) {
- auto* val = Var("val", ty.vec3<f32>());
- auto* call = Call("radians", val);
- WrapInFunction(val, call);
+ auto* val = Var("val", ty.vec3<f32>());
+ auto* call = Call("radians", val);
+ WrapInFunction(val, call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -417,37 +411,37 @@ kernel void test_function() {
}
TEST_F(MslGeneratorImplTest, Pack2x16Float) {
- auto* call = Call("pack2x16float", "p1");
- Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("pack2x16float", "p1");
+ Global("p1", ty.vec2<f32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "as_type<uint>(half2(p1))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "as_type<uint>(half2(p1))");
}
TEST_F(MslGeneratorImplTest, Unpack2x16Float) {
- auto* call = Call("unpack2x16float", "p1");
- Global("p1", ty.u32(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(call));
+ auto* call = Call("unpack2x16float", "p1");
+ Global("p1", ty.u32(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(call));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "float2(as_type<half2>(p1))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "float2(as_type<half2>(p1))");
}
TEST_F(MslGeneratorImplTest, DotI32) {
- Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- WrapInFunction(CallStmt(Call("dot", "v", "v")));
+ Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ WrapInFunction(CallStmt(Call("dot", "v", "v")));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -465,19 +459,19 @@ kernel void test_function() {
}
TEST_F(MslGeneratorImplTest, Ignore) {
- Func("f", {Param("a", ty.i32()), Param("b", ty.i32()), Param("c", ty.i32())},
- ty.i32(), {Return(Mul(Add("a", "b"), "c"))});
+ Func("f", {Param("a", ty.i32()), Param("b", ty.i32()), Param("c", ty.i32())}, ty.i32(),
+ {Return(Mul(Add("a", "b"), "c"))});
- Func("func", {}, ty.void_(), {CallStmt(Call("f", 1, 2, 3))},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("func", {}, ty.void_(), {CallStmt(Call("f", 1_i, 2_i, 3_i))},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
int f(int a, int b, int c) {
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_texture_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_texture_test.cc
index 47605268777..20813fabc6f 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_builtin_texture_test.cc
@@ -19,283 +19,281 @@
namespace tint::writer::msl {
namespace {
-std::string expected_texture_overload(
- ast::builtin::test::ValidTextureOverload overload) {
- using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
- switch (overload) {
- case ValidTextureOverload::kDimensions1d:
- case ValidTextureOverload::kDimensionsStorageWO1d:
- return R"(int(texture.get_width(0)))";
- case ValidTextureOverload::kDimensions2d:
- case ValidTextureOverload::kDimensions2dArray:
- case ValidTextureOverload::kDimensionsCube:
- case ValidTextureOverload::kDimensionsCubeArray:
- case ValidTextureOverload::kDimensionsMultisampled2d:
- case ValidTextureOverload::kDimensionsDepth2d:
- case ValidTextureOverload::kDimensionsDepth2dArray:
- case ValidTextureOverload::kDimensionsDepthCube:
- case ValidTextureOverload::kDimensionsDepthCubeArray:
- case ValidTextureOverload::kDimensionsDepthMultisampled2d:
- case ValidTextureOverload::kDimensionsStorageWO2d:
- case ValidTextureOverload::kDimensionsStorageWO2dArray:
- return R"(int2(texture.get_width(), texture.get_height()))";
- case ValidTextureOverload::kDimensions3d:
- case ValidTextureOverload::kDimensionsStorageWO3d:
- return R"(int3(texture.get_width(), texture.get_height(), texture.get_depth()))";
- case ValidTextureOverload::kDimensions2dLevel:
- case ValidTextureOverload::kDimensionsCubeLevel:
- case ValidTextureOverload::kDimensionsCubeArrayLevel:
- case ValidTextureOverload::kDimensions2dArrayLevel:
- case ValidTextureOverload::kDimensionsDepth2dLevel:
- case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
- case ValidTextureOverload::kDimensionsDepthCubeLevel:
- case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
- return R"(int2(texture.get_width(1), texture.get_height(1)))";
- case ValidTextureOverload::kDimensions3dLevel:
- return R"(int3(texture.get_width(1), texture.get_height(1), texture.get_depth(1)))";
- case ValidTextureOverload::kGather2dF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(0), component::x))";
- case ValidTextureOverload::kGather2dOffsetF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(3, 4), component::x))";
- case ValidTextureOverload::kGather2dArrayF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(0), component::x))";
- case ValidTextureOverload::kGather2dArrayOffsetF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(4, 5), component::x))";
- case ValidTextureOverload::kGatherCubeF32:
- return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), component::x))";
- case ValidTextureOverload::kGatherCubeArrayF32:
- return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), 4, component::x))";
- case ValidTextureOverload::kGatherDepth2dF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f)))";
- case ValidTextureOverload::kGatherDepth2dOffsetF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
- case ValidTextureOverload::kGatherDepth2dArrayF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3))";
- case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
- return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
- case ValidTextureOverload::kGatherDepthCubeF32:
- return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kGatherDepthCubeArrayF32:
- return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
- case ValidTextureOverload::kGatherCompareDepth2dF32:
- return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
- return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
- return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3, 4.0f))";
- case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
- return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3, 4.0f, int2(5, 6)))";
- case ValidTextureOverload::kGatherCompareDepthCubeF32:
- return R"(texture.gather_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
- case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
- return R"(texture.gather_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
- case ValidTextureOverload::kNumLayers2dArray:
- case ValidTextureOverload::kNumLayersCubeArray:
- case ValidTextureOverload::kNumLayersDepth2dArray:
- case ValidTextureOverload::kNumLayersDepthCubeArray:
- case ValidTextureOverload::kNumLayersStorageWO2dArray:
- return R"(int(texture.get_array_size()))";
- case ValidTextureOverload::kNumLevels2d:
- case ValidTextureOverload::kNumLevels2dArray:
- case ValidTextureOverload::kNumLevels3d:
- case ValidTextureOverload::kNumLevelsCube:
- case ValidTextureOverload::kNumLevelsCubeArray:
- case ValidTextureOverload::kNumLevelsDepth2d:
- case ValidTextureOverload::kNumLevelsDepth2dArray:
- case ValidTextureOverload::kNumLevelsDepthCube:
- case ValidTextureOverload::kNumLevelsDepthCubeArray:
- return R"(int(texture.get_num_mip_levels()))";
- case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
- case ValidTextureOverload::kNumSamplesMultisampled2d:
- return R"(int(texture.get_num_samples()))";
- case ValidTextureOverload::kSample1dF32:
- return R"(texture.sample(sampler, 1.0f))";
- case ValidTextureOverload::kSample2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f)))";
- case ValidTextureOverload::kSample2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
- case ValidTextureOverload::kSample2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3))";
- case ValidTextureOverload::kSample2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
- case ValidTextureOverload::kSample3dF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kSample3dOffsetF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), int3(4, 5, 6)))";
- case ValidTextureOverload::kSampleCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kSampleCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
- case ValidTextureOverload::kSampleDepth2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f)))";
- case ValidTextureOverload::kSampleDepth2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
- case ValidTextureOverload::kSampleDepth2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3))";
- case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
- case ValidTextureOverload::kSampleDepthCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
- case ValidTextureOverload::kSampleDepthCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
- case ValidTextureOverload::kSampleBias2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), bias(3.0f)))";
- case ValidTextureOverload::kSampleBias2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), bias(3.0f), int2(4, 5)))";
- case ValidTextureOverload::kSampleBias2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 4, bias(3.0f)))";
- case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, bias(4.0f), int2(5, 6)))";
- case ValidTextureOverload::kSampleBias3dF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f)))";
- case ValidTextureOverload::kSampleBias3dOffsetF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f), int3(5, 6, 7)))";
- case ValidTextureOverload::kSampleBiasCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f)))";
- case ValidTextureOverload::kSampleBiasCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 3, bias(4.0f)))";
- case ValidTextureOverload::kSampleLevel2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3.0f)))";
- case ValidTextureOverload::kSampleLevel2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3.0f), int2(4, 5)))";
- case ValidTextureOverload::kSampleLevel2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4.0f)))";
- case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4.0f), int2(5, 6)))";
- case ValidTextureOverload::kSampleLevel3dF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f)))";
- case ValidTextureOverload::kSampleLevel3dOffsetF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f), int3(5, 6, 7)))";
- case ValidTextureOverload::kSampleLevelCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f)))";
- case ValidTextureOverload::kSampleLevelCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, level(5.0f)))";
- case ValidTextureOverload::kSampleLevelDepth2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3)))";
- case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3), int2(4, 5)))";
- case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4)))";
- case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4), int2(5, 6)))";
- case ValidTextureOverload::kSampleLevelDepthCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4)))";
- case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, level(5)))";
- case ValidTextureOverload::kSampleGrad2dF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), gradient2d(float2(3.0f, 4.0f), float2(5.0f, 6.0f))))";
- case ValidTextureOverload::kSampleGrad2dOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), gradient2d(float2(3.0f, 4.0f), float2(5.0f, 6.0f)), int2(7, 7)))";
- case ValidTextureOverload::kSampleGrad2dArrayF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, gradient2d(float2(4.0f, 5.0f), float2(6.0f, 7.0f))))";
- case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
- return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, gradient2d(float2(4.0f, 5.0f), float2(6.0f, 7.0f)), int2(6, 7)))";
- case ValidTextureOverload::kSampleGrad3dF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradient3d(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f))))";
- case ValidTextureOverload::kSampleGrad3dOffsetF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradient3d(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)), int3(0, 1, 2)))";
- case ValidTextureOverload::kSampleGradCubeF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradientcube(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f))))";
- case ValidTextureOverload::kSampleGradCubeArrayF32:
- return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, gradientcube(float3(5.0f, 6.0f, 7.0f), float3(8.0f, 9.0f, 10.0f))))";
- case ValidTextureOverload::kSampleCompareDepth2dF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
- case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
- case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f))";
- case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f, int2(5, 6)))";
- case ValidTextureOverload::kSampleCompareDepthCubeF32:
- return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
- case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
- return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f))";
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
- return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f, int2(5, 6)))";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
- return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
- case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
- return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
- case ValidTextureOverload::kLoad1dLevelF32:
- return R"(texture.read(uint(1), 0))";
- case ValidTextureOverload::kLoad1dLevelU32:
- return R"(texture.read(uint(1), 0))";
- case ValidTextureOverload::kLoad1dLevelI32:
- return R"(texture.read(uint(1), 0))";
- case ValidTextureOverload::kLoad2dLevelF32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoad2dLevelU32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoad2dLevelI32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoad2dArrayLevelF32:
- return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
- case ValidTextureOverload::kLoad2dArrayLevelU32:
- return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
- case ValidTextureOverload::kLoad2dArrayLevelI32:
- return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
- case ValidTextureOverload::kLoad3dLevelF32:
- return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
- case ValidTextureOverload::kLoad3dLevelU32:
- return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
- case ValidTextureOverload::kLoad3dLevelI32:
- return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
- case ValidTextureOverload::kLoadMultisampled2dF32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoadMultisampled2dU32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoadMultisampled2dI32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoadDepth2dLevelF32:
- case ValidTextureOverload::kLoadDepthMultisampled2dF32:
- return R"(texture.read(uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
- return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
- case ValidTextureOverload::kStoreWO1dRgba32float:
- return R"(texture.write(float4(2.0f, 3.0f, 4.0f, 5.0f), uint(1)))";
- case ValidTextureOverload::kStoreWO2dRgba32float:
- return R"(texture.write(float4(3.0f, 4.0f, 5.0f, 6.0f), uint2(int2(1, 2))))";
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- return R"(texture.write(float4(4.0f, 5.0f, 6.0f, 7.0f), uint2(int2(1, 2)), 3))";
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return R"(texture.write(float4(4.0f, 5.0f, 6.0f, 7.0f), uint3(int3(1, 2, 3))))";
- }
- return "<unmatched texture overload>";
+std::string expected_texture_overload(ast::builtin::test::ValidTextureOverload overload) {
+ using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
+ switch (overload) {
+ case ValidTextureOverload::kDimensions1d:
+ case ValidTextureOverload::kDimensionsStorageWO1d:
+ return R"(int(texture.get_width(0)))";
+ case ValidTextureOverload::kDimensions2d:
+ case ValidTextureOverload::kDimensions2dArray:
+ case ValidTextureOverload::kDimensionsCube:
+ case ValidTextureOverload::kDimensionsCubeArray:
+ case ValidTextureOverload::kDimensionsMultisampled2d:
+ case ValidTextureOverload::kDimensionsDepth2d:
+ case ValidTextureOverload::kDimensionsDepth2dArray:
+ case ValidTextureOverload::kDimensionsDepthCube:
+ case ValidTextureOverload::kDimensionsDepthCubeArray:
+ case ValidTextureOverload::kDimensionsDepthMultisampled2d:
+ case ValidTextureOverload::kDimensionsStorageWO2d:
+ case ValidTextureOverload::kDimensionsStorageWO2dArray:
+ return R"(int2(texture.get_width(), texture.get_height()))";
+ case ValidTextureOverload::kDimensions3d:
+ case ValidTextureOverload::kDimensionsStorageWO3d:
+ return R"(int3(texture.get_width(), texture.get_height(), texture.get_depth()))";
+ case ValidTextureOverload::kDimensions2dLevel:
+ case ValidTextureOverload::kDimensionsCubeLevel:
+ case ValidTextureOverload::kDimensionsCubeArrayLevel:
+ case ValidTextureOverload::kDimensions2dArrayLevel:
+ case ValidTextureOverload::kDimensionsDepth2dLevel:
+ case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeLevel:
+ case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
+ return R"(int2(texture.get_width(1), texture.get_height(1)))";
+ case ValidTextureOverload::kDimensions3dLevel:
+ return R"(int3(texture.get_width(1), texture.get_height(1), texture.get_depth(1)))";
+ case ValidTextureOverload::kGather2dF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(0), component::x))";
+ case ValidTextureOverload::kGather2dOffsetF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(3, 4), component::x))";
+ case ValidTextureOverload::kGather2dArrayF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(0), component::x))";
+ case ValidTextureOverload::kGather2dArrayOffsetF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(4, 5), component::x))";
+ case ValidTextureOverload::kGatherCubeF32:
+ return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), component::x))";
+ case ValidTextureOverload::kGatherCubeArrayF32:
+ return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), 4, component::x))";
+ case ValidTextureOverload::kGatherDepth2dF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f)))";
+ case ValidTextureOverload::kGatherDepth2dOffsetF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
+ case ValidTextureOverload::kGatherDepth2dArrayF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3))";
+ case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
+ return R"(texture.gather(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
+ case ValidTextureOverload::kGatherDepthCubeF32:
+ return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kGatherDepthCubeArrayF32:
+ return R"(texture.gather(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
+ case ValidTextureOverload::kGatherCompareDepth2dF32:
+ return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
+ return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
+ return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3, 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
+ return R"(texture.gather_compare(sampler, float2(1.0f, 2.0f), 3, 4.0f, int2(5, 6)))";
+ case ValidTextureOverload::kGatherCompareDepthCubeF32:
+ return R"(texture.gather_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
+ case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
+ return R"(texture.gather_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
+ case ValidTextureOverload::kNumLayers2dArray:
+ case ValidTextureOverload::kNumLayersCubeArray:
+ case ValidTextureOverload::kNumLayersDepth2dArray:
+ case ValidTextureOverload::kNumLayersDepthCubeArray:
+ case ValidTextureOverload::kNumLayersStorageWO2dArray:
+ return R"(int(texture.get_array_size()))";
+ case ValidTextureOverload::kNumLevels2d:
+ case ValidTextureOverload::kNumLevels2dArray:
+ case ValidTextureOverload::kNumLevels3d:
+ case ValidTextureOverload::kNumLevelsCube:
+ case ValidTextureOverload::kNumLevelsCubeArray:
+ case ValidTextureOverload::kNumLevelsDepth2d:
+ case ValidTextureOverload::kNumLevelsDepth2dArray:
+ case ValidTextureOverload::kNumLevelsDepthCube:
+ case ValidTextureOverload::kNumLevelsDepthCubeArray:
+ return R"(int(texture.get_num_mip_levels()))";
+ case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
+ case ValidTextureOverload::kNumSamplesMultisampled2d:
+ return R"(int(texture.get_num_samples()))";
+ case ValidTextureOverload::kSample1dF32:
+ return R"(texture.sample(sampler, 1.0f))";
+ case ValidTextureOverload::kSample2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f)))";
+ case ValidTextureOverload::kSample2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
+ case ValidTextureOverload::kSample2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3))";
+ case ValidTextureOverload::kSample2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
+ case ValidTextureOverload::kSample3dF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kSample3dOffsetF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), int3(4, 5, 6)))";
+ case ValidTextureOverload::kSampleCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kSampleCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
+ case ValidTextureOverload::kSampleDepth2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f)))";
+ case ValidTextureOverload::kSampleDepth2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), int2(3, 4)))";
+ case ValidTextureOverload::kSampleDepth2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3))";
+ case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, int2(4, 5)))";
+ case ValidTextureOverload::kSampleDepthCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f)))";
+ case ValidTextureOverload::kSampleDepthCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4))";
+ case ValidTextureOverload::kSampleBias2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), bias(3.0f)))";
+ case ValidTextureOverload::kSampleBias2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), bias(3.0f), int2(4, 5)))";
+ case ValidTextureOverload::kSampleBias2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 4, bias(3.0f)))";
+ case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, bias(4.0f), int2(5, 6)))";
+ case ValidTextureOverload::kSampleBias3dF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f)))";
+ case ValidTextureOverload::kSampleBias3dOffsetF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f), int3(5, 6, 7)))";
+ case ValidTextureOverload::kSampleBiasCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), bias(4.0f)))";
+ case ValidTextureOverload::kSampleBiasCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 3, bias(4.0f)))";
+ case ValidTextureOverload::kSampleLevel2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3.0f)))";
+ case ValidTextureOverload::kSampleLevel2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3.0f), int2(4, 5)))";
+ case ValidTextureOverload::kSampleLevel2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4.0f)))";
+ case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4.0f), int2(5, 6)))";
+ case ValidTextureOverload::kSampleLevel3dF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f)))";
+ case ValidTextureOverload::kSampleLevel3dOffsetF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f), int3(5, 6, 7)))";
+ case ValidTextureOverload::kSampleLevelCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4.0f)))";
+ case ValidTextureOverload::kSampleLevelCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, level(5.0f)))";
+ case ValidTextureOverload::kSampleLevelDepth2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3)))";
+ case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), level(3), int2(4, 5)))";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4)))";
+ case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, level(4), int2(5, 6)))";
+ case ValidTextureOverload::kSampleLevelDepthCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), level(4)))";
+ case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, level(5)))";
+ case ValidTextureOverload::kSampleGrad2dF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), gradient2d(float2(3.0f, 4.0f), float2(5.0f, 6.0f))))";
+ case ValidTextureOverload::kSampleGrad2dOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), gradient2d(float2(3.0f, 4.0f), float2(5.0f, 6.0f)), int2(7)))";
+ case ValidTextureOverload::kSampleGrad2dArrayF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, gradient2d(float2(4.0f, 5.0f), float2(6.0f, 7.0f))))";
+ case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
+ return R"(texture.sample(sampler, float2(1.0f, 2.0f), 3, gradient2d(float2(4.0f, 5.0f), float2(6.0f, 7.0f)), int2(6, 7)))";
+ case ValidTextureOverload::kSampleGrad3dF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradient3d(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f))))";
+ case ValidTextureOverload::kSampleGrad3dOffsetF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradient3d(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)), int3(0, 1, 2)))";
+ case ValidTextureOverload::kSampleGradCubeF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), gradientcube(float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f))))";
+ case ValidTextureOverload::kSampleGradCubeArrayF32:
+ return R"(texture.sample(sampler, float3(1.0f, 2.0f, 3.0f), 4, gradientcube(float3(5.0f, 6.0f, 7.0f), float3(8.0f, 9.0f, 10.0f))))";
+ case ValidTextureOverload::kSampleCompareDepth2dF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
+ case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f))";
+ case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f, int2(5, 6)))";
+ case ValidTextureOverload::kSampleCompareDepthCubeF32:
+ return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
+ case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
+ return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 3.0f, int2(4, 5)))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f))";
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
+ return R"(texture.sample_compare(sampler, float2(1.0f, 2.0f), 4, 3.0f, int2(5, 6)))";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
+ return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4.0f))";
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
+ return R"(texture.sample_compare(sampler, float3(1.0f, 2.0f, 3.0f), 4, 5.0f))";
+ case ValidTextureOverload::kLoad1dLevelF32:
+ return R"(texture.read(uint(1), 0))";
+ case ValidTextureOverload::kLoad1dLevelU32:
+ return R"(texture.read(uint(1), 0))";
+ case ValidTextureOverload::kLoad1dLevelI32:
+ return R"(texture.read(uint(1), 0))";
+ case ValidTextureOverload::kLoad2dLevelF32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoad2dLevelU32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoad2dLevelI32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoad2dArrayLevelF32:
+ return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
+ case ValidTextureOverload::kLoad2dArrayLevelU32:
+ return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
+ case ValidTextureOverload::kLoad2dArrayLevelI32:
+ return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
+ case ValidTextureOverload::kLoad3dLevelF32:
+ return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
+ case ValidTextureOverload::kLoad3dLevelU32:
+ return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
+ case ValidTextureOverload::kLoad3dLevelI32:
+ return R"(texture.read(uint3(int3(1, 2, 3)), 4))";
+ case ValidTextureOverload::kLoadMultisampled2dF32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoadMultisampled2dU32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoadMultisampled2dI32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoadDepth2dLevelF32:
+ case ValidTextureOverload::kLoadDepthMultisampled2dF32:
+ return R"(texture.read(uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
+ return R"(texture.read(uint2(int2(1, 2)), 3, 4))";
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ return R"(texture.write(float4(2.0f, 3.0f, 4.0f, 5.0f), uint(1)))";
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ return R"(texture.write(float4(3.0f, 4.0f, 5.0f, 6.0f), uint2(int2(1, 2))))";
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ return R"(texture.write(float4(4.0f, 5.0f, 6.0f, 7.0f), uint2(int2(1, 2)), 3))";
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return R"(texture.write(float4(4.0f, 5.0f, 6.0f, 7.0f), uint3(int3(1, 2, 3))))";
+ }
+ return "<unmatched texture overload>";
} // NOLINT - Ignore the length of this function
class MslGeneratorBuiltinTextureTest
: public TestParamHelper<ast::builtin::test::TextureOverloadCase> {};
TEST_P(MslGeneratorBuiltinTextureTest, Call) {
- auto param = GetParam();
+ auto param = GetParam();
- param.BuildTextureVariable(this);
- param.BuildSamplerVariable(this);
+ param.BuildTextureVariable(this);
+ param.BuildSamplerVariable(this);
- auto* call = Call(Expr(param.function), param.args(this));
- auto* stmt = CallStmt(call);
+ auto* call = Call(Expr(param.function), param.args(this));
+ auto* stmt = CallStmt(call);
- Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- auto expected = expected_texture_overload(param.overload);
- EXPECT_EQ(expected, out.str());
+ auto expected = expected_texture_overload(param.overload);
+ EXPECT_EQ(expected, out.str());
}
-INSTANTIATE_TEST_SUITE_P(
- MslGeneratorBuiltinTextureTest,
- MslGeneratorBuiltinTextureTest,
- testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
+INSTANTIATE_TEST_SUITE_P(MslGeneratorBuiltinTextureTest,
+ MslGeneratorBuiltinTextureTest,
+ testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
} // namespace
} // namespace tint::writer::msl
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_call_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_call_test.cc
index 1b6ce7a2afb..93c9b830364 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_call_test.cc
@@ -15,63 +15,65 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitExpression_Call_WithoutParams) {
- Func("my_func", {}, ty.f32(), {Return(1.23f)});
+ Func("my_func", {}, ty.f32(), {Return(1.23_f)});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func()");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func()");
}
TEST_F(MslGeneratorImplTest, EmitExpression_Call_WithParams) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.f32(), {Return(1.23f)});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func(param1, param2)");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.f32(), {Return(1.23_f)});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func(param1, param2)");
}
TEST_F(MslGeneratorImplTest, EmitStatement_Call) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- auto* stmt = CallStmt(call);
- WrapInFunction(stmt);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ auto* stmt = CallStmt(call);
+ WrapInFunction(stmt);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_case_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_case_test.cc
index 8822ea9c3c4..8fc1bc95b5b 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_case_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_case_test.cc
@@ -15,70 +15,70 @@
#include "src/tint/ast/fallthrough_statement.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Case) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block(create<ast::BreakStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_Case_BreaksByDefault) {
- auto* s = Switch(1, Case(Expr(5), Block()), DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block()), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
break;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_Case_WithFallthrough) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::FallthroughStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s =
+ Switch(1_i, Case(Expr(5_i), Block(create<ast::FallthroughStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5: {
/* fallthrough */
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_Case_MultipleSelectors) {
- auto* s =
- Switch(1, Case({Expr(5), Expr(6)}, Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case({Expr(5_i), Expr(6_i)}, Block(create<ast::BreakStatement>())),
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5:
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5:
case 6: {
break;
}
@@ -86,15 +86,15 @@ TEST_F(MslGeneratorImplTest, Emit_Case_MultipleSelectors) {
}
TEST_F(MslGeneratorImplTest, Emit_Case_Default) {
- auto* s = Switch(1, DefaultCase(Block(create<ast::BreakStatement>())));
- WrapInFunction(s);
+ auto* s = Switch(1_i, DefaultCase(Block(create<ast::BreakStatement>())));
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( default: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( default: {
break;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_cast_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_cast_test.cc
index 80497c29d29..4b9e3f28d9a 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_cast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_cast_test.cc
@@ -14,42 +14,44 @@
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitExpression_Cast_Scalar) {
- auto* cast = Construct<f32>(1);
- WrapInFunction(cast);
+ auto* cast = Construct<f32>(1_i);
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "float(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "1.0f");
}
TEST_F(MslGeneratorImplTest, EmitExpression_Cast_Vector) {
- auto* cast = vec3<f32>(vec3<i32>(1, 2, 3));
- WrapInFunction(cast);
+ auto* cast = vec3<f32>(vec3<i32>(1_i, 2_i, 3_i));
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "float3(int3(1, 2, 3))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "float3(1.0f, 2.0f, 3.0f)");
}
TEST_F(MslGeneratorImplTest, EmitExpression_Cast_IntMin) {
- auto* cast = Construct<u32>(std::numeric_limits<int32_t>::min());
- WrapInFunction(cast);
+ auto* cast = Construct<u32>(i32(std::numeric_limits<int32_t>::min()));
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "uint((-2147483647 - 1))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "0u");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_constructor_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_constructor_test.cc
index cbe34e76993..2fa85f01cee 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_constructor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_constructor_test.cc
@@ -15,6 +15,8 @@
#include "gmock/gmock.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
@@ -23,163 +25,158 @@ using ::testing::HasSubstr;
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitConstructor_Bool) {
- WrapInFunction(Expr(false));
+ WrapInFunction(Expr(false));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("false"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("false"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Int) {
- WrapInFunction(Expr(-12345));
+ WrapInFunction(Expr(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("-12345"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_UInt) {
- WrapInFunction(Expr(56779u));
+ WrapInFunction(Expr(56779_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("56779u"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("56779u"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Float) {
- // Use a number close to 1<<30 but whose decimal representation ends in 0.
- WrapInFunction(Expr(static_cast<float>((1 << 30) - 4)));
+ // Use a number close to 1<<30 but whose decimal representation ends in 0.
+ WrapInFunction(Expr(f32((1 << 30) - 4)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Float) {
- WrapInFunction(Construct<f32>(-1.2e-5f));
+ WrapInFunction(Construct<f32>(-1.2e-5_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float(-0.000012f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-0.000012f"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Bool) {
- WrapInFunction(Construct<bool>(true));
+ WrapInFunction(Construct<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bool(true)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("true"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Int) {
- WrapInFunction(Construct<i32>(-12345));
+ WrapInFunction(Construct<i32>(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("int(-12345)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Uint) {
- WrapInFunction(Construct<u32>(12345u));
+ WrapInFunction(Construct<u32>(12345_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("uint(12345u)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("12345u"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Vec) {
- WrapInFunction(vec3<f32>(1.f, 2.f, 3.f));
+ WrapInFunction(vec3<f32>(1_f, 2_f, 3_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float3(1.0f, 2.0f, 3.0f)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("float3(1.0f, 2.0f, 3.0f)"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Vec_Empty) {
- WrapInFunction(vec3<f32>());
+ WrapInFunction(vec3<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float3()"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("float3(0.0f)"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Mat) {
- WrapInFunction(Construct(ty.mat2x3<f32>(), vec3<f32>(1.0f, 2.0f, 3.0f),
- vec3<f32>(3.0f, 4.0f, 5.0f)));
+ WrapInFunction(Construct(ty.mat2x3<f32>(), vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(3_f, 4_f, 5_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- // A matrix of type T with n columns and m rows can also be constructed from
- // n vectors of type T with m components.
- EXPECT_THAT(
- gen.result(),
- HasSubstr(
- "float2x3(float3(1.0f, 2.0f, 3.0f), float3(3.0f, 4.0f, 5.0f))"));
+ // A matrix of type T with n columns and m rows can also be constructed from
+ // n vectors of type T with m components.
+ EXPECT_THAT(gen.result(),
+ HasSubstr("float2x3(float3(1.0f, 2.0f, 3.0f), float3(3.0f, 4.0f, 5.0f))"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Mat_Empty) {
- WrapInFunction(mat4x4<f32>());
+ WrapInFunction(mat4x4<f32>());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("float4x4()"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("float4x4(float4(0.0f), float4(0.0f)"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Array) {
- WrapInFunction(
- Construct(ty.array(ty.vec3<f32>(), 3), vec3<f32>(1.0f, 2.0f, 3.0f),
- vec3<f32>(4.0f, 5.0f, 6.0f), vec3<f32>(7.0f, 8.0f, 9.0f)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u), vec3<f32>(1_f, 2_f, 3_f),
+ vec3<f32>(4_f, 5_f, 6_f), vec3<f32>(7_f, 8_f, 9_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("{float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), "
- "float3(7.0f, 8.0f, 9.0f)}"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("{float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), "
+ "float3(7.0f, 8.0f, 9.0f)}"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Struct) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str), 1, 2.0f, vec3<i32>(3, 4, 5)));
+ WrapInFunction(Construct(ty.Of(str), 1_i, 2_f, vec3<i32>(3_i, 4_i, 5_i)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("{.a=1, .b=2.0f, .c=int3(3, 4, 5)}"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("{.a=1, .b=2.0f, .c=int3(3, 4, 5)}"));
}
TEST_F(MslGeneratorImplTest, EmitConstructor_Type_Struct_Empty) {
- auto* str = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- Member("c", ty.vec3<i32>()),
- });
+ auto* str = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ Member("c", ty.vec3<i32>()),
+ });
- WrapInFunction(Construct(ty.Of(str)));
+ WrapInFunction(Construct(ty.Of(str)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("{}"));
- EXPECT_THAT(gen.result(), testing::Not(HasSubstr("{{}}")));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("{}"));
+ EXPECT_THAT(gen.result(), testing::Not(HasSubstr("{{}}")));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_continue_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_continue_test.cc
index b6618db5add..649aae6d464 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_continue_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_continue_test.cc
@@ -20,16 +20,16 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Continue) {
- auto* loop = Loop(Block(If(false, Block(Break())), //
- Continue()));
- WrapInFunction(loop);
+ auto* loop = Loop(Block(If(false, Block(Break())), //
+ Continue()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
if (false) {
break;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_discard_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_discard_test.cc
index aeea97d0be0..5f5c17f59c6 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_discard_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_discard_test.cc
@@ -20,15 +20,15 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Discard) {
- auto* stmt = create<ast::DiscardStatement>();
- WrapInFunction(stmt);
+ auto* stmt = create<ast::DiscardStatement>();
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " discard_fragment();\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " discard_fragment();\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_function_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_function_test.cc
index 41dcbf19df0..3c78522487a 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_function_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_function_test.cc
@@ -16,24 +16,26 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Function) {
- Func("my_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- {});
+ Func("my_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ {});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
using namespace metal;
void my_func() {
@@ -44,22 +46,22 @@ TEST_F(MslGeneratorImplTest, Emit_Function) {
}
TEST_F(MslGeneratorImplTest, Emit_Function_WithParams) {
- ast::VariableList params;
- params.push_back(Param("a", ty.f32()));
- params.push_back(Param("b", ty.i32()));
+ ast::VariableList params;
+ params.push_back(Param("a", ty.f32()));
+ params.push_back(Param("b", ty.i32()));
- Func("my_func", params, ty.void_(),
- ast::StatementList{
- Return(),
- },
- {});
+ Func("my_func", params, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ {});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
using namespace metal;
void my_func(float a, int b) {
@@ -70,14 +72,13 @@ TEST_F(MslGeneratorImplTest, Emit_Function_WithParams) {
}
TEST_F(MslGeneratorImplTest, Emit_Attribute_EntryPoint_NoReturn_Void) {
- Func("main", ast::VariableList{}, ty.void_(),
- ast::StatementList{/* no explicit return */},
- {Stage(ast::PipelineStage::kFragment)});
+ Func("main", ast::VariableList{}, ty.void_(), ast::StatementList{/* no explicit return */},
+ {Stage(ast::PipelineStage::kFragment)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
fragment void main() {
@@ -88,17 +89,17 @@ fragment void main() {
}
TEST_F(MslGeneratorImplTest, Emit_Attribute_EntryPoint_WithInOutVars) {
- // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
- // return foo;
- // }
- auto* foo_in = Param("foo", ty.f32(), {Location(0)});
- Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
- {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
+ // fn frag_main(@location(0) foo : f32) -> @location(1) f32 {
+ // return foo;
+ // }
+ auto* foo_in = Param("foo", ty.f32(), {Location(0)});
+ Func("frag_main", ast::VariableList{foo_in}, ty.f32(), {Return("foo")},
+ {Stage(ast::PipelineStage::kFragment)}, {Location(1)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol_1 {
@@ -124,20 +125,17 @@ fragment tint_symbol_2 frag_main(tint_symbol_1 tint_symbol [[stage_in]]) {
}
TEST_F(MslGeneratorImplTest, Emit_Attribute_EntryPoint_WithInOut_Builtins) {
- // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
- // return coord.x;
- // }
- auto* coord_in =
- Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
- Func("frag_main", ast::VariableList{coord_in}, ty.f32(),
- {Return(MemberAccessor("coord", "x"))},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kFragDepth)});
+ // fn frag_main(@position(0) coord : vec4<f32>) -> @frag_depth f32 {
+ // return coord.x;
+ // }
+ auto* coord_in = Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
+ Func("frag_main", ast::VariableList{coord_in}, ty.f32(), {Return(MemberAccessor("coord", "x"))},
+ {Stage(ast::PipelineStage::kFragment)}, {Builtin(ast::Builtin::kFragDepth)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol {
@@ -158,46 +156,42 @@ fragment tint_symbol frag_main(float4 coord [[position]]) {
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
- // struct Interface {
- // @location(1) col1 : f32;
- // @location(2) col2 : f32;
- // @builtin(position) pos : vec4<f32>;
- // };
- // fn vert_main() -> Interface {
- // return Interface(0.4, 0.6, vec4<f32>());
- // }
- // fn frag_main(colors : Interface) {
- // const r = colors.col1;
- // const g = colors.col2;
- // }
- auto* interface_struct = Structure(
- "Interface",
- {
- Member("col1", ty.f32(), {Location(1)}),
- Member("col2", ty.f32(), {Location(2)}),
- Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
- });
-
- Func("vert_main", {}, ty.Of(interface_struct),
- {Return(Construct(ty.Of(interface_struct), Expr(0.5f), Expr(0.25f),
- Construct(ty.vec4<f32>())))},
- {Stage(ast::PipelineStage::kVertex)});
-
- Func("frag_main", {Param("colors", ty.Of(interface_struct))}, ty.void_(),
- {
- WrapInStatement(
- Const("r", ty.f32(), MemberAccessor("colors", "col1"))),
- WrapInStatement(
- Const("g", ty.f32(), MemberAccessor("colors", "col2"))),
- },
- {Stage(ast::PipelineStage::kFragment)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+TEST_F(MslGeneratorImplTest, Emit_Attribute_EntryPoint_SharedStruct_DifferentStages) {
+ // struct Interface {
+ // @location(1) col1 : f32;
+ // @location(2) col2 : f32;
+ // @builtin(position) pos : vec4<f32>;
+ // };
+ // fn vert_main() -> Interface {
+ // return Interface(0.4, 0.6, vec4<f32>());
+ // }
+ // fn frag_main(colors : Interface) {
+ // const r = colors.col1;
+ // const g = colors.col2;
+ // }
+ auto* interface_struct = Structure(
+ "Interface", {
+ Member("col1", ty.f32(), {Location(1)}),
+ Member("col2", ty.f32(), {Location(2)}),
+ Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)}),
+ });
+
+ Func("vert_main", {}, ty.Of(interface_struct),
+ {Return(Construct(ty.Of(interface_struct), Expr(0.5_f), Expr(0.25_f),
+ Construct(ty.vec4<f32>())))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ Func("frag_main", {Param("colors", ty.Of(interface_struct))}, ty.void_(),
+ {
+ WrapInStatement(Let("r", ty.f32(), MemberAccessor("colors", "col1"))),
+ WrapInStatement(Let("g", ty.f32(), MemberAccessor("colors", "col2"))),
+ },
+ {Stage(ast::PipelineStage::kFragment)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Interface {
@@ -213,7 +207,7 @@ struct tint_symbol {
};
Interface vert_main_inner() {
- Interface const tint_symbol_3 = {.col1=0.5f, .col2=0.25f, .pos=float4()};
+ Interface const tint_symbol_3 = {.col1=0.5f, .col2=0.25f, .pos=float4(0.0f)};
return tint_symbol_3;
}
@@ -245,41 +239,37 @@ fragment void frag_main(float4 pos [[position]], tint_symbol_2 tint_symbol_1 [[s
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_Attribute_EntryPoint_SharedStruct_HelperFunction) {
- // struct VertexOutput {
- // @builtin(position) pos : vec4<f32>;
- // };
- // fn foo(x : f32) -> VertexOutput {
- // return VertexOutput(vec4<f32>(x, x, x, 1.0));
- // }
- // fn vert_main1() -> VertexOutput {
- // return foo(0.5);
- // }
- // fn vert_main2() -> VertexOutput {
- // return foo(0.25);
- // }
- auto* vertex_output_struct = Structure(
- "VertexOutput",
- {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
-
- Func("foo", {Param("x", ty.f32())}, ty.Of(vertex_output_struct),
- {Return(Construct(ty.Of(vertex_output_struct),
- Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1.f))))},
- {});
-
- Func("vert_main1", {}, ty.Of(vertex_output_struct),
- {Return(Expr(Call("foo", Expr(0.5f))))},
- {Stage(ast::PipelineStage::kVertex)});
-
- Func("vert_main2", {}, ty.Of(vertex_output_struct),
- {Return(Expr(Call("foo", Expr(0.25f))))},
- {Stage(ast::PipelineStage::kVertex)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+TEST_F(MslGeneratorImplTest, Emit_Attribute_EntryPoint_SharedStruct_HelperFunction) {
+ // struct VertexOutput {
+ // @builtin(position) pos : vec4<f32>;
+ // };
+ // fn foo(x : f32) -> VertexOutput {
+ // return VertexOutput(vec4<f32>(x, x, x, 1.0));
+ // }
+ // fn vert_main1() -> VertexOutput {
+ // return foo(0.5);
+ // }
+ // fn vert_main2() -> VertexOutput {
+ // return foo(0.25);
+ // }
+ auto* vertex_output_struct = Structure(
+ "VertexOutput", {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
+
+ Func("foo", {Param("x", ty.f32())}, ty.Of(vertex_output_struct),
+ {Return(Construct(ty.Of(vertex_output_struct),
+ Construct(ty.vec4<f32>(), "x", "x", "x", Expr(1_f))))},
+ {});
+
+ Func("vert_main1", {}, ty.Of(vertex_output_struct), {Return(Expr(Call("foo", Expr(0.5_f))))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ Func("vert_main2", {}, ty.Of(vertex_output_struct), {Return(Expr(Call("foo", Expr(0.25_f))))},
+ {Stage(ast::PipelineStage::kVertex)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct VertexOutput {
@@ -324,36 +314,33 @@ vertex tint_symbol_1 vert_main2() {
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_FunctionAttribute_EntryPoint_With_RW_StorageBuffer) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+TEST_F(MslGeneratorImplTest, Emit_FunctionAttribute_EntryPoint_With_RW_StorageBuffer) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Data {
@@ -369,35 +356,33 @@ fragment void frag_main(device Data* tint_symbol [[buffer(0)]]) {
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_FunctionAttribute_EntryPoint_With_RO_StorageBuffer) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+TEST_F(MslGeneratorImplTest, Emit_FunctionAttribute_EntryPoint_With_RO_StorageBuffer) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("coord", "b"));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("coord", "b"));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Data {
@@ -414,38 +399,37 @@ fragment void frag_main(const device Data* tint_symbol [[buffer(0)]]) {
}
TEST_F(MslGeneratorImplTest, Emit_Attribute_Called_By_EntryPoint_With_Uniform) {
- auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
- auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- Func("sub_func",
- {
- Param("param", ty.f32()),
- },
- ty.f32(),
- {
- Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
- });
+ auto* ubo_ty = Structure("UBO", {Member("coord", ty.vec4<f32>())});
+ auto* ubo = Global("ubo", ty.Of(ubo_ty), ast::StorageClass::kUniform,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ Func("sub_func",
+ {
+ Param("param", ty.f32()),
+ },
+ ty.f32(),
+ {
+ Return(MemberAccessor(MemberAccessor(ubo, "coord"), "x")),
+ });
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", {}, ty.void_(),
- {
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", {}, ty.void_(),
+ {
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct UBO {
@@ -464,43 +448,40 @@ fragment void frag_main(const constant UBO* tint_symbol_1 [[buffer(0)]]) {
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_FunctionAttribute_Called_By_EntryPoint_With_RW_StorageBuffer) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+TEST_F(MslGeneratorImplTest, Emit_FunctionAttribute_Called_By_EntryPoint_With_RW_StorageBuffer) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Global("coord", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ast::VariableList params;
- params.push_back(Param("param", ty.f32()));
+ ast::VariableList params;
+ params.push_back(Param("param", ty.f32()));
- auto body = ast::StatementList{Return(MemberAccessor("coord", "b"))};
+ auto body = ast::StatementList{Return(MemberAccessor("coord", "b"))};
- Func("sub_func", params, ty.f32(), body, {});
+ Func("sub_func", params, ty.f32(), body, {});
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Data {
@@ -520,42 +501,40 @@ fragment void frag_main(device Data* tint_symbol_1 [[buffer(0)]]) {
)");
}
-TEST_F(MslGeneratorImplTest,
- Emit_FunctionAttribute_Called_By_EntryPoint_With_RO_StorageBuffer) {
- auto* s = Structure("Data", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+TEST_F(MslGeneratorImplTest, Emit_FunctionAttribute_Called_By_EntryPoint_With_RO_StorageBuffer) {
+ auto* s = Structure("Data", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("coord", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- ast::VariableList params;
- params.push_back(Param("param", ty.f32()));
+ ast::VariableList params;
+ params.push_back(Param("param", ty.f32()));
- auto body = ast::StatementList{Return(MemberAccessor("coord", "b"))};
+ auto body = ast::StatementList{Return(MemberAccessor("coord", "b"))};
- Func("sub_func", params, ty.f32(), body, {});
+ Func("sub_func", params, ty.f32(), body, {});
- auto* var =
- Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1.0f));
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, Call("sub_func", 1_f));
- Func("frag_main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kFragment),
- });
+ Func("frag_main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Data {
@@ -576,20 +555,20 @@ fragment void frag_main(const device Data* tint_symbol_1 [[buffer(0)]]) {
}
TEST_F(MslGeneratorImplTest, Emit_Function_WithArrayParams) {
- ast::VariableList params;
- params.push_back(Param("a", ty.array<f32, 5>()));
+ ast::VariableList params;
+ params.push_back(Param("a", ty.array<f32, 5>()));
- Func("my_func", params, ty.void_(),
- {
- Return(),
- });
+ Func("my_func", params, ty.void_(),
+ {
+ Return(),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
using namespace metal;
struct tint_array_wrapper {
@@ -604,17 +583,17 @@ TEST_F(MslGeneratorImplTest, Emit_Function_WithArrayParams) {
}
TEST_F(MslGeneratorImplTest, Emit_Function_WithArrayReturn) {
- Func("my_func", {}, ty.array<f32, 5>(),
- {
- Return(Construct(ty.array<f32, 5>())),
- });
+ Func("my_func", {}, ty.array<f32, 5>(),
+ {
+ Return(Construct(ty.array<f32, 5>())),
+ });
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( #include <metal_stdlib>
using namespace metal;
struct tint_array_wrapper {
@@ -630,62 +609,58 @@ TEST_F(MslGeneratorImplTest, Emit_Function_WithArrayReturn) {
}
// https://crbug.com/tint/297
-TEST_F(MslGeneratorImplTest,
- Emit_Function_Multiple_EntryPoint_With_Same_ModuleVar) {
- // struct Data {
- // d : f32;
- // };
- // @binding(0) @group(0) var<storage> data : Data;
- //
- // @stage(compute) @workgroup_size(1)
- // fn a() {
- // return;
- // }
- //
- // @stage(compute) @workgroup_size(1)
- // fn b() {
- // return;
- // }
-
- auto* s = Structure("Data", {Member("d", ty.f32())});
-
- Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("a", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- }
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("b", ast::VariableList{}, ty.void_(),
- ast::StatementList{Decl(var), Return()},
- {
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- }
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+TEST_F(MslGeneratorImplTest, Emit_Function_Multiple_EntryPoint_With_Same_ModuleVar) {
+ // struct Data {
+ // d : f32;
+ // };
+ // @binding(0) @group(0) var<storage> data : Data;
+ //
+ // @compute @workgroup_size(1)
+ // fn a() {
+ // return;
+ // }
+ //
+ // @compute @workgroup_size(1)
+ // fn b() {
+ // return;
+ // }
+
+ auto* s = Structure("Data", {Member("d", ty.f32())});
+
+ Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("a", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ }
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("b", ast::VariableList{}, ty.void_(), ast::StatementList{Decl(var), Return()},
+ {
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ }
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Data {
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_identifier_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_identifier_test.cc
index ea7a61708d4..a6257760ea6 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_identifier_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_identifier_test.cc
@@ -20,16 +20,16 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitIdentifierExpression) {
- auto* foo = Var("foo", ty.i32());
+ auto* foo = Var("foo", ty.i32());
- auto* i = Expr("foo");
- WrapInFunction(foo, i);
+ auto* i = Expr("foo");
+ WrapInFunction(foo, i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
- EXPECT_EQ(out.str(), "foo");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
+ EXPECT_EQ(out.str(), "foo");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_if_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_if_test.cc
index ff0cabcb9b0..5138dce1724 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_if_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_if_test.cc
@@ -20,33 +20,33 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_If) {
- auto* cond = Var("cond", ty.bool_());
- auto* i = If(cond, Block(Return()));
- WrapInFunction(cond, i);
+ auto* cond = Var("cond", ty.bool_());
+ auto* i = If(cond, Block(Return()));
+ WrapInFunction(cond, i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_IfWithElseIf) {
- auto* cond = Var("cond", ty.bool_());
- auto* else_cond = Var("else_cond", ty.bool_());
- auto* i = If(cond, Block(Return()), Else(else_cond, Block(Return())));
- WrapInFunction(cond, else_cond, i);
+ auto* cond = Var("cond", ty.bool_());
+ auto* else_cond = Var("else_cond", ty.bool_());
+ auto* i = If(cond, Block(Return()), Else(If(else_cond, Block(Return()))));
+ WrapInFunction(cond, else_cond, i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
@@ -57,16 +57,16 @@ TEST_F(MslGeneratorImplTest, Emit_IfWithElseIf) {
}
TEST_F(MslGeneratorImplTest, Emit_IfWithElse) {
- auto* cond = Var("cond", ty.bool_());
- auto* i = If(cond, Block(Return()), Else(nullptr, Block(Return())));
- WrapInFunction(cond, i);
+ auto* cond = Var("cond", ty.bool_());
+ auto* i = If(cond, Block(Return()), Else(Block(Return())));
+ WrapInFunction(cond, i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
return;
@@ -75,18 +75,18 @@ TEST_F(MslGeneratorImplTest, Emit_IfWithElse) {
}
TEST_F(MslGeneratorImplTest, Emit_IfWithMultiple) {
- auto* cond = Var("cond", ty.bool_());
- auto* else_cond = Var("else_cond", ty.bool_());
- auto* i = If(cond, Block(Return()), Else(else_cond, Block(Return())),
- Else(nullptr, Block(Return())));
- WrapInFunction(cond, else_cond, i);
+ auto* cond = Var("cond", ty.bool_());
+ auto* else_cond = Var("else_cond", ty.bool_());
+ auto* i =
+ If(cond, Block(Return()), Else(If(else_cond, Block(Return()), Else(Block(Return())))));
+ WrapInFunction(cond, else_cond, i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
if (else_cond) {
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_import_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_import_test.cc
index 6cf2f76ac46..de9353ea446 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_import_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_import_test.cc
@@ -15,37 +15,39 @@
#include "src/tint/sem/call.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
struct MslImportData {
- const char* name;
- const char* msl_name;
+ const char* name;
+ const char* msl_name;
};
inline std::ostream& operator<<(std::ostream& out, MslImportData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using MslImportData_SingleParamTest = TestParamHelper<MslImportData>;
TEST_P(MslImportData_SingleParamTest, FloatScalar) {
- auto param = GetParam();
- auto* call = Call(param.name, 1.f);
+ auto param = GetParam();
+ auto* call = Call(param.name, 1_f);
- // The resolver will set the builtin data for the ident
- WrapInFunction(call);
+ // The resolver will set the builtin data for the ident
+ WrapInFunction(call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- auto* sem = program->Sem().Get(call);
- ASSERT_NE(sem, nullptr);
- auto* target = sem->Target();
- ASSERT_NE(target, nullptr);
- auto* builtin = target->As<sem::Builtin>();
- ASSERT_NE(builtin, nullptr);
+ auto* sem = program->Sem().Get<sem::Call>(call);
+ ASSERT_NE(sem, nullptr);
+ auto* target = sem->Target();
+ ASSERT_NE(target, nullptr);
+ auto* builtin = target->As<sem::Builtin>();
+ ASSERT_NE(builtin, nullptr);
- ASSERT_EQ(gen.generate_builtin_name(builtin), param.msl_name);
+ ASSERT_EQ(gen.generate_builtin_name(builtin), param.msl_name);
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_SingleParamTest,
@@ -74,39 +76,39 @@ INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData{"trunc", "trunc"}));
TEST_F(MslGeneratorImplTest, MslImportData_SingleParamTest_IntScalar) {
- auto* expr = Call("abs", 1);
- WrapInFunction(expr);
+ auto* expr = Call("abs", 1_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), R"(abs(1))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), R"(abs(1))");
}
TEST_F(MslGeneratorImplTest, MslImportData_SingleParamTest_ScalarLength) {
- auto* expr = Call("length", 2.f);
- WrapInFunction(expr);
+ auto* expr = Call("length", 2_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), R"(fabs(2.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), R"(fabs(2.0f))");
}
using MslImportData_DualParam_ScalarTest = TestParamHelper<MslImportData>;
TEST_P(MslImportData_DualParam_ScalarTest, Float) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1.0f, 2.0f);
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_f, 2_f);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1.0f, 2.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1.0f, 2.0f)");
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_DualParam_ScalarTest,
@@ -117,31 +119,29 @@ INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData{"step", "step"}));
TEST_F(MslGeneratorImplTest, MslImportData_DualParam_ScalarDistance) {
- auto* expr = Call("distance", 2.f, 3.f);
- WrapInFunction(expr);
+ auto* expr = Call("distance", 2_f, 3_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), R"(fabs(2.0f - 3.0f))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), R"(fabs(2.0f - 3.0f))");
}
using MslImportData_DualParam_VectorTest = TestParamHelper<MslImportData>;
TEST_P(MslImportData_DualParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr =
- Call(param.name, vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(4.f, 5.f, 6.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(),
- std::string(param.msl_name) +
- R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f)))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.msl_name) +
+ R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f)))");
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_DualParam_VectorTest,
@@ -156,80 +156,77 @@ INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
using MslImportData_DualParam_Int_Test = TestParamHelper<MslImportData>;
TEST_P(MslImportData_DualParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1, 2)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1, 2)");
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_DualParam_Int_Test,
- testing::Values(MslImportData{"max", "max"},
- MslImportData{"min", "min"}));
+ testing::Values(MslImportData{"max", "max"}, MslImportData{"min", "min"}));
using MslImportData_TripleParam_ScalarTest = TestParamHelper<MslImportData>;
TEST_P(MslImportData_TripleParam_ScalarTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1.f, 2.f, 3.f);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_f, 2_f, 3_f);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1.0f, 2.0f, 3.0f)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1.0f, 2.0f, 3.0f)");
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_TripleParam_ScalarTest,
testing::Values(MslImportData{"fma", "fma"},
MslImportData{"mix", "mix"},
MslImportData{"clamp", "clamp"},
- MslImportData{"smoothStep",
- "smoothstep"}));
+ MslImportData{"smoothstep", "smoothstep"}));
using MslImportData_TripleParam_VectorTest = TestParamHelper<MslImportData>;
TEST_P(MslImportData_TripleParam_VectorTest, Float) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, vec3<f32>(1.f, 2.f, 3.f),
- vec3<f32>(4.f, 5.f, 6.f), vec3<f32>(7.f, 8.f, 9.f));
- WrapInFunction(expr);
+ auto* expr = Call(param.name, vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(4_f, 5_f, 6_f),
+ vec3<f32>(7_f, 8_f, 9_f));
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(
- out.str(),
- std::string(param.msl_name) +
- R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(
+ out.str(),
+ std::string(param.msl_name) +
+ R"((float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f), float3(7.0f, 8.0f, 9.0f)))");
}
-INSTANTIATE_TEST_SUITE_P(
- MslGeneratorImplTest,
- MslImportData_TripleParam_VectorTest,
- testing::Values(MslImportData{"faceForward", "faceforward"},
- MslImportData{"fma", "fma"},
- MslImportData{"clamp", "clamp"},
- MslImportData{"smoothStep", "smoothstep"}));
+INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
+ MslImportData_TripleParam_VectorTest,
+ testing::Values(MslImportData{"faceForward", "faceforward"},
+ MslImportData{"fma", "fma"},
+ MslImportData{"clamp", "clamp"},
+ MslImportData{"smoothstep", "smoothstep"}));
using MslImportData_TripleParam_Int_Test = TestParamHelper<MslImportData>;
TEST_P(MslImportData_TripleParam_Int_Test, IntScalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* expr = Call(param.name, 1, 2, 3);
- WrapInFunction(expr);
+ auto* expr = Call(param.name, 1_i, 2_i, 3_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1, 2, 3)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.msl_name) + "(1, 2, 3)");
}
INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData_TripleParam_Int_Test,
@@ -237,17 +234,17 @@ INSTANTIATE_TEST_SUITE_P(MslGeneratorImplTest,
MslImportData{"clamp", "clamp"}));
TEST_F(MslGeneratorImplTest, MslImportData_Determinant) {
- Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("determinant", "var");
+ auto* expr = Call("determinant", "var");
- WrapInFunction(expr);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), std::string("determinant(var)"));
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitCall(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), std::string("determinant(var)"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_loop_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_loop_test.cc
index e31b6402d25..248e7116928 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_loop_test.cc
@@ -15,42 +15,44 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Loop) {
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block();
- auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block();
+ auto* l = Loop(body, continuing);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
discard_fragment();
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_LoopWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* l = Loop(body, continuing);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
discard_fragment();
{
a_statement();
@@ -60,28 +62,28 @@ TEST_F(MslGeneratorImplTest, Emit_LoopWithContinuing) {
}
TEST_F(MslGeneratorImplTest, Emit_LoopNestedWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("lhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* inner = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* inner = Loop(body, continuing);
- body = Block(inner);
+ body = Block(inner);
- continuing = Block(Assign("lhs", "rhs"));
+ continuing = Block(Assign("lhs", "rhs"));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
while (true) {
discard_fragment();
{
@@ -96,31 +98,31 @@ TEST_F(MslGeneratorImplTest, Emit_LoopNestedWithContinuing) {
}
TEST_F(MslGeneratorImplTest, Emit_LoopWithVarUsedInContinuing) {
- // loop {
- // var lhs : f32 = 2.4;
- // var other : f32;
- // continuing {
- // lhs = rhs
- // }
- // }
- //
+ // loop {
+ // var lhs : f32 = 2.4;
+ // var other : f32;
+ // continuing {
+ // lhs = rhs
+ // }
+ // }
+ //
- Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
+ Global("rhs", ty.f32(), ast::StorageClass::kPrivate);
- auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4f))), //
- Decl(Var("other", ty.f32())), //
- Break());
+ auto* body = Block(Decl(Var("lhs", ty.f32(), Expr(2.4_f))), //
+ Decl(Var("other", ty.f32())), //
+ Break());
- auto* continuing = Block(Assign("lhs", "rhs"));
- auto* outer = Loop(body, continuing);
- WrapInFunction(outer);
+ auto* continuing = Block(Assign("lhs", "rhs"));
+ auto* outer = Loop(body, continuing);
+ WrapInFunction(outer);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(outer)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
float lhs = 2.400000095f;
float other = 0.0f;
break;
@@ -132,68 +134,68 @@ TEST_F(MslGeneratorImplTest, Emit_LoopWithVarUsedInContinuing) {
}
TEST_F(MslGeneratorImplTest, Emit_ForLoop) {
- // for(; ; ) {
- // return;
- // }
+ // for(; ; ) {
+ // return;
+ // }
- auto* f = For(nullptr, nullptr, nullptr, //
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, nullptr, nullptr, //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(; ; ) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(; ; ) {
return;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithSimpleInit) {
- // for(var i : i32; ; ) {
- // return;
- // }
+ // for(var i : i32; ; ) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, //
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), nullptr, nullptr, //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(int i = 0; ; ) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(int i = 0; ; ) {
return;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithMultiStmtInit) {
- // fn f(i : i32) {}
- //
- // var<workgroup> a : atomic<i32>;
- // for({f(1); f(2);}; ; ) {
- // return;
- // }
+ // fn f(i : i32) {}
+ //
+ // var<workgroup> a : atomic<i32>;
+ // for({f(1i); f(2i);}; ; ) {
+ // return;
+ // }
- Func("f", {Param("i", ty.i32())}, ty.void_(), {});
- auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
+ Func("f", {Param("i", ty.i32())}, ty.void_(), {});
+ auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt = Block(f(1), f(2));
- auto* loop = For(multi_stmt, nullptr, nullptr, //
- Block(Return()));
- WrapInFunction(loop);
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt = Block(f(1_i), f(2_i));
+ auto* loop = For(multi_stmt, nullptr, nullptr, //
+ Block(Return()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
{
f(1);
f(2);
@@ -206,71 +208,70 @@ TEST_F(MslGeneratorImplTest, Emit_ForLoopWithMultiStmtInit) {
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithSimpleCond) {
- // for(; true; ) {
- // return;
- // }
+ // for(; true; ) {
+ // return;
+ // }
- auto* f = For(nullptr, true, nullptr, //
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, true, nullptr, //
+ Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(; true; ) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(; true; ) {
return;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithSimpleCont) {
- // for(; ; i = i + 1) {
- // return;
- // }
+ // for(; ; i = i + 1) {
+ // return;
+ // }
- auto* v = Decl(Var("i", ty.i32()));
- auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1)), //
- Block(Return()));
- WrapInFunction(v, f);
+ auto* v = Decl(Var("i", ty.i32()));
+ auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1_i)), //
+ Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(
- gen.result(),
- R"( for(; ; i = as_type<int>((as_type<uint>(i) + as_type<uint>(1)))) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"( for(; ; i = as_type<int>((as_type<uint>(i) + as_type<uint>(1)))) {
return;
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithMultiStmtCont) {
- // fn f(i : i32) {}
- //
- // var<workgroup> a : atomic<i32>;
- // for(; ; { f(1); f(2); }) {
- // return;
- // }
+ // fn f(i : i32) {}
+ //
+ // var<workgroup> a : atomic<i32>;
+ // for(; ; { f(1i); f(2i); }) {
+ // return;
+ // }
- Func("f", {Param("i", ty.i32())}, ty.void_(), {});
- auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
+ Func("f", {Param("i", ty.i32())}, ty.void_(), {});
+ auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt = Block(f(1), f(2));
- auto* loop = For(nullptr, nullptr, multi_stmt, //
- Block(Return()));
- WrapInFunction(loop);
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt = Block(f(1_i), f(2_i));
+ auto* loop = For(nullptr, nullptr, multi_stmt, //
+ Block(Return()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( while (true) {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( while (true) {
return;
{
f(1);
@@ -281,53 +282,52 @@ TEST_F(MslGeneratorImplTest, Emit_ForLoopWithMultiStmtCont) {
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithSimpleInitCondCont) {
- // for(var i : i32; true; i = i + 1) {
- // return;
- // }
+ // for(var i : i32; true; i = i + 1) {
+ // return;
+ // }
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1)),
- Block(CallStmt(Call("a_statement"))));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1_i)),
+ Block(CallStmt(Call("a_statement"))));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(
- gen.result(),
- R"( for(int i = 0; true; i = as_type<int>((as_type<uint>(i) + as_type<uint>(1)))) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(),
+ R"( for(int i = 0; true; i = as_type<int>((as_type<uint>(i) + as_type<uint>(1)))) {
a_statement();
}
)");
}
TEST_F(MslGeneratorImplTest, Emit_ForLoopWithMultiStmtInitCondCont) {
- // fn f(i : i32) {}
- //
- // var<workgroup> a : atomic<i32>;
- // for({ f(1); f(2); }; true; { f(3); f(4); }) {
- // return;
- // }
+ // fn f(i : i32) {}
+ //
+ // var<workgroup> a : atomic<i32>;
+ // for({ f(1i); f(2i); }; true; { f(3i); f(4i); }) {
+ // return;
+ // }
- Func("f", {Param("i", ty.i32())}, ty.void_(), {});
- auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
+ Func("f", {Param("i", ty.i32())}, ty.void_(), {});
+ auto f = [&](auto&& expr) { return CallStmt(Call("f", expr)); };
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt_a = Block(f(1), f(2));
- auto* multi_stmt_b = Block(f(3), f(4));
- auto* loop = For(multi_stmt_a, Expr(true), multi_stmt_b, //
- Block(Return()));
- WrapInFunction(loop);
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt_a = Block(f(1_i), f(2_i));
+ auto* multi_stmt_b = Block(f(3_i), f(4_i));
+ auto* loop = For(multi_stmt_a, Expr(true), multi_stmt_b, //
+ Block(Return()));
+ WrapInFunction(loop);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(loop)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
{
f(1);
f(2);
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_member_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_member_accessor_test.cc
index 21c637efaa2..c9f3da074a7 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_member_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_member_accessor_test.cc
@@ -20,40 +20,40 @@ namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitExpression_MemberAccessor) {
- Global("str", ty.Of(Structure("my_str", {Member("mem", ty.f32())})),
- ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("str", "mem");
- WrapInFunction(expr);
+ Global("str", ty.Of(Structure("my_str", {Member("mem", ty.f32())})),
+ ast::StorageClass::kPrivate);
+ auto* expr = MemberAccessor("str", "mem");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "str.mem");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "str.mem");
}
TEST_F(MslGeneratorImplTest, EmitExpression_MemberAccessor_Swizzle_xyz) {
- Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("my_vec", "xyz");
- WrapInFunction(expr);
+ auto* expr = MemberAccessor("my_vec", "xyz");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "float4(my_vec).xyz");
+ GeneratorImpl& gen = Build();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "float4(my_vec).xyz");
}
TEST_F(MslGeneratorImplTest, EmitExpression_MemberAccessor_Swizzle_gbr) {
- Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
+ Global("my_vec", ty.vec4<f32>(), ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("my_vec", "gbr");
- WrapInFunction(expr);
+ auto* expr = MemberAccessor("my_vec", "gbr");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "float4(my_vec).gbr");
+ GeneratorImpl& gen = Build();
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "float4(my_vec).gbr");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_module_constant_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_module_constant_test.cc
index 8f419de4017..2b70da48d3b 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_module_constant_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_module_constant_test.cc
@@ -15,45 +15,46 @@
#include "src/tint/ast/id_attribute.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_ModuleConstant) {
- auto* var =
- GlobalConst("pos", ty.array<f32, 3>(), array<f32, 3>(1.f, 2.f, 3.f));
+ auto* var = GlobalConst("pos", ty.array<f32, 3>(), array<f32, 3>(1_f, 2_f, 3_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), "constant float pos[3] = {1.0f, 2.0f, 3.0f};\n");
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), "constant float pos[3] = {1.0f, 2.0f, 3.0f};\n");
}
TEST_F(MslGeneratorImplTest, Emit_SpecConstant) {
- auto* var = Override("pos", ty.f32(), Expr(3.f),
- ast::AttributeList{
- Id(23),
- });
+ auto* var = Override("pos", ty.f32(), Expr(3_f),
+ ast::AttributeList{
+ Id(23),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
- EXPECT_EQ(gen.result(), "constant float pos [[function_constant(23)]];\n");
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var)) << gen.error();
+ EXPECT_EQ(gen.result(), "constant float pos [[function_constant(23)]];\n");
}
TEST_F(MslGeneratorImplTest, Emit_SpecConstant_NoId) {
- auto* var_a = Override("a", ty.f32(), nullptr,
- ast::AttributeList{
- Id(0),
- });
- auto* var_b = Override("b", ty.f32(), nullptr);
+ auto* var_a = Override("a", ty.f32(), nullptr,
+ ast::AttributeList{
+ Id(0),
+ });
+ auto* var_b = Override("b", ty.f32(), nullptr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var_a)) << gen.error();
- ASSERT_TRUE(gen.EmitProgramConstVariable(var_b)) << gen.error();
- EXPECT_EQ(gen.result(), R"(constant float a [[function_constant(0)]];
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var_a)) << gen.error();
+ ASSERT_TRUE(gen.EmitProgramConstVariable(var_b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(constant float a [[function_constant(0)]];
constant float b [[function_constant(1)]];
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_return_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_return_test.cc
index 7b846af5061..144320962b7 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_return_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_return_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Return) {
- auto* r = Return();
- WrapInFunction(r);
+ auto* r = Return();
+ WrapInFunction(r);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return;\n");
}
TEST_F(MslGeneratorImplTest, Emit_ReturnWithValue) {
- auto* r = Return(123);
- Func("f", {}, ty.i32(), {r});
+ auto* r = Return(123_i);
+ Func("f", {}, ty.i32(), {r});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return 123;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return 123;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_sanitizer_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_sanitizer_test.cc
index abab3d2469e..32ddb42700f 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_sanitizer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_sanitizer_test.cc
@@ -26,28 +26,28 @@ using ::testing::HasSubstr;
using MslSanitizerTest = TestHelper;
TEST_F(MslSanitizerTest, Call_ArrayLength) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ GeneratorImpl& gen = SanitizeAndBuild();
- GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#include <metal_stdlib>
+ auto got = gen.result();
+ auto* expect = R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol {
@@ -64,35 +64,35 @@ fragment void a_func(const constant tint_symbol* tint_symbol_2 [[buffer(30)]]) {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(MslSanitizerTest, Call_ArrayLength_OtherMembersInStruct) {
- auto* s = Structure("my_struct", {
- Member(0, "z", ty.f32()),
- Member(4, "a", ty.array<f32>(4)),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {
+ Member(0, "z", ty.f32()),
+ Member(4, "a", ty.array<f32>(4)),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", AddressOf(MemberAccessor("b", "a"))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- auto got = gen.result();
- auto* expect = R"(#include <metal_stdlib>
+ auto got = gen.result();
+ auto* expect = R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol {
@@ -111,37 +111,36 @@ fragment void a_func(const constant tint_symbol* tint_symbol_2 [[buffer(30)]]) {
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(MslSanitizerTest, Call_ArrayLength_ViaLets) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ auto* p = Let("p", nullptr, AddressOf("b"));
+ auto* p2 = Let("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(p),
+ Decl(p2),
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone, Call("arrayLength", p2))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* p = Const("p", nullptr, AddressOf("b"));
- auto* p2 = Const("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
-
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(p),
- Decl(p2),
- Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
- Call("arrayLength", p2))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ GeneratorImpl& gen = SanitizeAndBuild();
- GeneratorImpl& gen = SanitizeAndBuild();
+ ASSERT_TRUE(gen.Generate()) << gen.error();
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#include <metal_stdlib>
+ auto got = gen.result();
+ auto* expect = R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol {
@@ -159,45 +158,42 @@ fragment void a_func(const constant tint_symbol* tint_symbol_2 [[buffer(30)]]) {
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
TEST_F(MslSanitizerTest, Call_ArrayLength_ArrayLengthFromUniform) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
- Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+ Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(2),
+ create<ast::GroupAttribute>(0),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
+ Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(2),
- create<ast::GroupAttribute>(0),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var(
- "len", ty.u32(), ast::StorageClass::kNone,
- Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
- Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Options options;
- options.array_length_from_uniform.ubo_binding = {0, 29};
- options.array_length_from_uniform.bindpoint_to_size_index.emplace(
- sem::BindingPoint{0, 1}, 7u);
- options.array_length_from_uniform.bindpoint_to_size_index.emplace(
- sem::BindingPoint{0, 2}, 2u);
- GeneratorImpl& gen = SanitizeAndBuild(options);
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
-
- auto got = gen.result();
- auto* expect = R"(#include <metal_stdlib>
+ Options options;
+ options.array_length_from_uniform.ubo_binding = {0, 29};
+ options.array_length_from_uniform.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 1}, 7u);
+ options.array_length_from_uniform.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2}, 2u);
+ GeneratorImpl& gen = SanitizeAndBuild(options);
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+
+ auto got = gen.result();
+ auto* expect = R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol {
@@ -214,43 +210,39 @@ fragment void a_func(const constant tint_symbol* tint_symbol_2 [[buffer(29)]]) {
}
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
}
-TEST_F(MslSanitizerTest,
- Call_ArrayLength_ArrayLengthFromUniformMissingBinding) {
- auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+TEST_F(MslSanitizerTest, Call_ArrayLength_ArrayLengthFromUniformMissingBinding) {
+ auto* s = Structure("my_struct", {Member(0, "a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
+ Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(2),
+ create<ast::GroupAttribute>(0),
+ });
+
+ Func("a_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("len", ty.u32(), ast::StorageClass::kNone,
+ Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
+ Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
- Global("c", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(2),
- create<ast::GroupAttribute>(0),
+ Stage(ast::PipelineStage::kFragment),
});
- Func("a_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var(
- "len", ty.u32(), ast::StorageClass::kNone,
- Add(Call("arrayLength", AddressOf(MemberAccessor("b", "a"))),
- Call("arrayLength", AddressOf(MemberAccessor("c", "a")))))),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- Options options;
- options.array_length_from_uniform.ubo_binding = {0, 29};
- options.array_length_from_uniform.bindpoint_to_size_index.emplace(
- sem::BindingPoint{0, 2}, 2u);
- GeneratorImpl& gen = SanitizeAndBuild(options);
-
- ASSERT_FALSE(gen.Generate());
- EXPECT_THAT(gen.error(),
- HasSubstr("Unable to translate builtin: arrayLength"));
+ Options options;
+ options.array_length_from_uniform.ubo_binding = {0, 29};
+ options.array_length_from_uniform.bindpoint_to_size_index.emplace(sem::BindingPoint{0, 2}, 2u);
+ GeneratorImpl& gen = SanitizeAndBuild(options);
+
+ ASSERT_FALSE(gen.Generate());
+ EXPECT_THAT(gen.error(), HasSubstr("Unable to translate builtin: arrayLength"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_switch_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_switch_test.cc
index eec4c24537a..ce8087b0a39 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_switch_test.cc
@@ -14,36 +14,38 @@
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_Switch) {
- auto* cond = Var("cond", ty.i32());
+ auto* cond = Var("cond", ty.i32());
- auto* def_body = Block(create<ast::BreakStatement>());
- auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
+ auto* def_body = Block(create<ast::BreakStatement>());
+ auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
- ast::CaseSelectorList case_val;
- case_val.push_back(Expr(5));
+ ast::CaseSelectorList case_val;
+ case_val.push_back(Expr(5_i));
- auto* case_body = Block(create<ast::BreakStatement>());
+ auto* case_body = Block(create<ast::BreakStatement>());
- auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
+ auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
- ast::CaseStatementList body;
- body.push_back(case_stmt);
- body.push_back(def);
+ ast::CaseStatementList body;
+ body.push_back(case_stmt);
+ body.push_back(def);
- auto* s = create<ast::SwitchStatement>(Expr(cond), body);
- WrapInFunction(cond, s);
- GeneratorImpl& gen = Build();
+ auto* s = create<ast::SwitchStatement>(Expr(cond), body);
+ WrapInFunction(cond, s);
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"( switch(cond) {
+ ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( switch(cond) {
case 5: {
break;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_test.cc
index cad1eb17b29..7cef2684197 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_test.cc
@@ -15,22 +15,33 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
using MslGeneratorImplTest = TestHelper;
+TEST_F(MslGeneratorImplTest, InvalidProgram) {
+ Diagnostics().add_error(diag::System::Writer, "make the program invalid");
+ ASSERT_FALSE(IsValid());
+ auto program = std::make_unique<Program>(std::move(*this));
+ ASSERT_FALSE(program->IsValid());
+ auto result = Generate(program.get(), Options{});
+ EXPECT_EQ(result.error, "input program is not valid");
+}
+
TEST_F(MslGeneratorImplTest, Generate) {
- Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
kernel void my_func() {
@@ -41,56 +52,50 @@ kernel void my_func() {
}
struct MslBuiltinData {
- ast::Builtin builtin;
- const char* attribute_name;
+ ast::Builtin builtin;
+ const char* attribute_name;
};
inline std::ostream& operator<<(std::ostream& out, MslBuiltinData data) {
- out << data.builtin;
- return out;
+ out << data.builtin;
+ return out;
}
using MslBuiltinConversionTest = TestParamHelper<MslBuiltinData>;
TEST_P(MslBuiltinConversionTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- EXPECT_EQ(gen.builtin_to_attribute(params.builtin),
- std::string(params.attribute_name));
+ EXPECT_EQ(gen.builtin_to_attribute(params.builtin), std::string(params.attribute_name));
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
MslBuiltinConversionTest,
- testing::Values(MslBuiltinData{ast::Builtin::kPosition, "position"},
- MslBuiltinData{ast::Builtin::kVertexIndex, "vertex_id"},
- MslBuiltinData{ast::Builtin::kInstanceIndex, "instance_id"},
- MslBuiltinData{ast::Builtin::kFrontFacing, "front_facing"},
- MslBuiltinData{ast::Builtin::kFragDepth, "depth(any)"},
- MslBuiltinData{ast::Builtin::kLocalInvocationId,
- "thread_position_in_threadgroup"},
- MslBuiltinData{ast::Builtin::kLocalInvocationIndex,
- "thread_index_in_threadgroup"},
- MslBuiltinData{ast::Builtin::kGlobalInvocationId,
- "thread_position_in_grid"},
- MslBuiltinData{ast::Builtin::kWorkgroupId,
- "threadgroup_position_in_grid"},
- MslBuiltinData{ast::Builtin::kNumWorkgroups,
- "threadgroups_per_grid"},
- MslBuiltinData{ast::Builtin::kSampleIndex, "sample_id"},
- MslBuiltinData{ast::Builtin::kSampleMask, "sample_mask"},
- MslBuiltinData{ast::Builtin::kPointSize, "point_size"}));
+ testing::Values(
+ MslBuiltinData{ast::Builtin::kPosition, "position"},
+ MslBuiltinData{ast::Builtin::kVertexIndex, "vertex_id"},
+ MslBuiltinData{ast::Builtin::kInstanceIndex, "instance_id"},
+ MslBuiltinData{ast::Builtin::kFrontFacing, "front_facing"},
+ MslBuiltinData{ast::Builtin::kFragDepth, "depth(any)"},
+ MslBuiltinData{ast::Builtin::kLocalInvocationId, "thread_position_in_threadgroup"},
+ MslBuiltinData{ast::Builtin::kLocalInvocationIndex, "thread_index_in_threadgroup"},
+ MslBuiltinData{ast::Builtin::kGlobalInvocationId, "thread_position_in_grid"},
+ MslBuiltinData{ast::Builtin::kWorkgroupId, "threadgroup_position_in_grid"},
+ MslBuiltinData{ast::Builtin::kNumWorkgroups, "threadgroups_per_grid"},
+ MslBuiltinData{ast::Builtin::kSampleIndex, "sample_id"},
+ MslBuiltinData{ast::Builtin::kSampleMask, "sample_mask"},
+ MslBuiltinData{ast::Builtin::kPointSize, "point_size"}));
TEST_F(MslGeneratorImplTest, HasInvariantAttribute_True) {
- auto* out = Structure(
- "Out", {Member("pos", ty.vec4<f32>(),
- {Builtin(ast::Builtin::kPosition), Invariant()})});
- Func("vert_main", ast::VariableList{}, ty.Of(out),
- {Return(Construct(ty.Of(out)))}, {Stage(ast::PipelineStage::kVertex)});
+ auto* out = Structure(
+ "Out", {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition), Invariant()})});
+ Func("vert_main", ast::VariableList{}, ty.Of(out), {Return(Construct(ty.Of(out)))},
+ {Stage(ast::PipelineStage::kVertex)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_TRUE(gen.HasInvariant());
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_TRUE(gen.HasInvariant());
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
@@ -112,16 +117,16 @@ vertex Out vert_main() {
}
TEST_F(MslGeneratorImplTest, HasInvariantAttribute_False) {
- auto* out = Structure("Out", {Member("pos", ty.vec4<f32>(),
- {Builtin(ast::Builtin::kPosition)})});
- Func("vert_main", ast::VariableList{}, ty.Of(out),
- {Return(Construct(ty.Of(out)))}, {Stage(ast::PipelineStage::kVertex)});
+ auto* out =
+ Structure("Out", {Member("pos", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)})});
+ Func("vert_main", ast::VariableList{}, ty.Of(out), {Return(Construct(ty.Of(out)))},
+ {Stage(ast::PipelineStage::kVertex)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_FALSE(gen.HasInvariant());
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_FALSE(gen.HasInvariant());
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct Out {
@@ -136,15 +141,14 @@ vertex Out vert_main() {
}
TEST_F(MslGeneratorImplTest, WorkgroupMatrix) {
- Global("m", ty.mat2x2<f32>(), ast::StorageClass::kWorkgroup);
- Func("comp_main", ast::VariableList{}, ty.void_(),
- {Decl(Const("x", nullptr, Expr("m")))},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Global("m", ty.mat2x2<f32>(), ast::StorageClass::kWorkgroup);
+ Func("comp_main", ast::VariableList{}, ty.void_(), {Decl(Let("x", nullptr, Expr("m")))},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol_3 {
@@ -153,7 +157,7 @@ struct tint_symbol_3 {
void comp_main_inner(uint local_invocation_index, threadgroup float2x2* const tint_symbol) {
{
- *(tint_symbol) = float2x2();
+ *(tint_symbol) = float2x2(float2(0.0f), float2(0.0f));
}
threadgroup_barrier(mem_flags::mem_threadgroup);
float2x2 const x = *(tint_symbol);
@@ -167,22 +171,21 @@ kernel void comp_main(threadgroup tint_symbol_3* tint_symbol_2 [[threadgroup(0)]
)");
- auto allocations = gen.DynamicWorkgroupAllocations();
- ASSERT_TRUE(allocations.count("comp_main"));
- ASSERT_EQ(allocations["comp_main"].size(), 1u);
- EXPECT_EQ(allocations["comp_main"][0], 2u * 2u * sizeof(float));
+ auto allocations = gen.DynamicWorkgroupAllocations();
+ ASSERT_TRUE(allocations.count("comp_main"));
+ ASSERT_EQ(allocations["comp_main"].size(), 1u);
+ EXPECT_EQ(allocations["comp_main"][0], 2u * 2u * sizeof(float));
}
TEST_F(MslGeneratorImplTest, WorkgroupMatrixInArray) {
- Global("m", ty.array(ty.mat2x2<f32>(), 4), ast::StorageClass::kWorkgroup);
- Func("comp_main", ast::VariableList{}, ty.void_(),
- {Decl(Const("x", nullptr, Expr("m")))},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
+ Global("m", ty.array(ty.mat2x2<f32>(), 4_i), ast::StorageClass::kWorkgroup);
+ Func("comp_main", ast::VariableList{}, ty.void_(), {Decl(Let("x", nullptr, Expr("m")))},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct tint_array_wrapper {
@@ -196,7 +199,7 @@ struct tint_symbol_3 {
void comp_main_inner(uint local_invocation_index, threadgroup tint_array_wrapper* const tint_symbol) {
for(uint idx = local_invocation_index; (idx < 4u); idx = (idx + 1u)) {
uint const i = idx;
- (*(tint_symbol)).arr[i] = float2x2();
+ (*(tint_symbol)).arr[i] = float2x2(float2(0.0f), float2(0.0f));
}
threadgroup_barrier(mem_flags::mem_threadgroup);
tint_array_wrapper const x = *(tint_symbol);
@@ -210,29 +213,28 @@ kernel void comp_main(threadgroup tint_symbol_3* tint_symbol_2 [[threadgroup(0)]
)");
- auto allocations = gen.DynamicWorkgroupAllocations();
- ASSERT_TRUE(allocations.count("comp_main"));
- ASSERT_EQ(allocations["comp_main"].size(), 1u);
- EXPECT_EQ(allocations["comp_main"][0], 4u * 2u * 2u * sizeof(float));
+ auto allocations = gen.DynamicWorkgroupAllocations();
+ ASSERT_TRUE(allocations.count("comp_main"));
+ ASSERT_EQ(allocations["comp_main"].size(), 1u);
+ EXPECT_EQ(allocations["comp_main"][0], 4u * 2u * 2u * sizeof(float));
}
TEST_F(MslGeneratorImplTest, WorkgroupMatrixInStruct) {
- Structure("S1", {
- Member("m1", ty.mat2x2<f32>()),
- Member("m2", ty.mat4x4<f32>()),
- });
- Structure("S2", {
- Member("s", ty.type_name("S1")),
- });
- Global("s", ty.type_name("S2"), ast::StorageClass::kWorkgroup);
- Func("comp_main", ast::VariableList{}, ty.void_(),
- {Decl(Const("x", nullptr, Expr("s")))},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ Structure("S1", {
+ Member("m1", ty.mat2x2<f32>()),
+ Member("m2", ty.mat4x4<f32>()),
+ });
+ Structure("S2", {
+ Member("s", ty.type_name("S1")),
+ });
+ Global("s", ty.type_name("S2"), ast::StorageClass::kWorkgroup);
+ Func("comp_main", ast::VariableList{}, ty.void_(), {Decl(Let("x", nullptr, Expr("s")))},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct S1 {
@@ -265,51 +267,50 @@ kernel void comp_main(threadgroup tint_symbol_4* tint_symbol_3 [[threadgroup(0)]
)");
- auto allocations = gen.DynamicWorkgroupAllocations();
- ASSERT_TRUE(allocations.count("comp_main"));
- ASSERT_EQ(allocations["comp_main"].size(), 1u);
- EXPECT_EQ(allocations["comp_main"][0],
- (2 * 2 * sizeof(float)) + (4u * 4u * sizeof(float)));
+ auto allocations = gen.DynamicWorkgroupAllocations();
+ ASSERT_TRUE(allocations.count("comp_main"));
+ ASSERT_EQ(allocations["comp_main"].size(), 1u);
+ EXPECT_EQ(allocations["comp_main"][0], (2 * 2 * sizeof(float)) + (4u * 4u * sizeof(float)));
}
TEST_F(MslGeneratorImplTest, WorkgroupMatrix_Multiples) {
- Global("m1", ty.mat2x2<f32>(), ast::StorageClass::kWorkgroup);
- Global("m2", ty.mat2x3<f32>(), ast::StorageClass::kWorkgroup);
- Global("m3", ty.mat2x4<f32>(), ast::StorageClass::kWorkgroup);
- Global("m4", ty.mat3x2<f32>(), ast::StorageClass::kWorkgroup);
- Global("m5", ty.mat3x3<f32>(), ast::StorageClass::kWorkgroup);
- Global("m6", ty.mat3x4<f32>(), ast::StorageClass::kWorkgroup);
- Global("m7", ty.mat4x2<f32>(), ast::StorageClass::kWorkgroup);
- Global("m8", ty.mat4x3<f32>(), ast::StorageClass::kWorkgroup);
- Global("m9", ty.mat4x4<f32>(), ast::StorageClass::kWorkgroup);
- Func("main1", ast::VariableList{}, ty.void_(),
- {
- Decl(Const("a1", nullptr, Expr("m1"))),
- Decl(Const("a2", nullptr, Expr("m2"))),
- Decl(Const("a3", nullptr, Expr("m3"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- Func("main2", ast::VariableList{}, ty.void_(),
- {
- Decl(Const("a1", nullptr, Expr("m4"))),
- Decl(Const("a2", nullptr, Expr("m5"))),
- Decl(Const("a3", nullptr, Expr("m6"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- Func("main3", ast::VariableList{}, ty.void_(),
- {
- Decl(Const("a1", nullptr, Expr("m7"))),
- Decl(Const("a2", nullptr, Expr("m8"))),
- Decl(Const("a3", nullptr, Expr("m9"))),
- },
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
- Func("main4_no_usages", ast::VariableList{}, ty.void_(), {},
- {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1)});
-
- GeneratorImpl& gen = SanitizeAndBuild();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
+ Global("m1", ty.mat2x2<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m2", ty.mat2x3<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m3", ty.mat2x4<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m4", ty.mat3x2<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m5", ty.mat3x3<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m6", ty.mat3x4<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m7", ty.mat4x2<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m8", ty.mat4x3<f32>(), ast::StorageClass::kWorkgroup);
+ Global("m9", ty.mat4x4<f32>(), ast::StorageClass::kWorkgroup);
+ Func("main1", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Let("a1", nullptr, Expr("m1"))),
+ Decl(Let("a2", nullptr, Expr("m2"))),
+ Decl(Let("a3", nullptr, Expr("m3"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ Func("main2", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Let("a1", nullptr, Expr("m4"))),
+ Decl(Let("a2", nullptr, Expr("m5"))),
+ Decl(Let("a3", nullptr, Expr("m6"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ Func("main3", ast::VariableList{}, ty.void_(),
+ {
+ Decl(Let("a1", nullptr, Expr("m7"))),
+ Decl(Let("a2", nullptr, Expr("m8"))),
+ Decl(Let("a3", nullptr, Expr("m9"))),
+ },
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ Func("main4_no_usages", ast::VariableList{}, ty.void_(), {},
+ {Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+
+ GeneratorImpl& gen = SanitizeAndBuild();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(#include <metal_stdlib>
using namespace metal;
struct tint_symbol_7 {
@@ -332,9 +333,9 @@ struct tint_symbol_23 {
void main1_inner(uint local_invocation_index, threadgroup float2x2* const tint_symbol, threadgroup float2x3* const tint_symbol_1, threadgroup float2x4* const tint_symbol_2) {
{
- *(tint_symbol) = float2x2();
- *(tint_symbol_1) = float2x3();
- *(tint_symbol_2) = float2x4();
+ *(tint_symbol) = float2x2(float2(0.0f), float2(0.0f));
+ *(tint_symbol_1) = float2x3(float3(0.0f), float3(0.0f));
+ *(tint_symbol_2) = float2x4(float4(0.0f), float4(0.0f));
}
threadgroup_barrier(mem_flags::mem_threadgroup);
float2x2 const a1 = *(tint_symbol);
@@ -352,9 +353,9 @@ kernel void main1(threadgroup tint_symbol_7* tint_symbol_4 [[threadgroup(0)]], u
void main2_inner(uint local_invocation_index_1, threadgroup float3x2* const tint_symbol_8, threadgroup float3x3* const tint_symbol_9, threadgroup float3x4* const tint_symbol_10) {
{
- *(tint_symbol_8) = float3x2();
- *(tint_symbol_9) = float3x3();
- *(tint_symbol_10) = float3x4();
+ *(tint_symbol_8) = float3x2(float2(0.0f), float2(0.0f), float2(0.0f));
+ *(tint_symbol_9) = float3x3(float3(0.0f), float3(0.0f), float3(0.0f));
+ *(tint_symbol_10) = float3x4(float4(0.0f), float4(0.0f), float4(0.0f));
}
threadgroup_barrier(mem_flags::mem_threadgroup);
float3x2 const a1 = *(tint_symbol_8);
@@ -372,9 +373,9 @@ kernel void main2(threadgroup tint_symbol_15* tint_symbol_12 [[threadgroup(0)]],
void main3_inner(uint local_invocation_index_2, threadgroup float4x2* const tint_symbol_16, threadgroup float4x3* const tint_symbol_17, threadgroup float4x4* const tint_symbol_18) {
{
- *(tint_symbol_16) = float4x2();
- *(tint_symbol_17) = float4x3();
- *(tint_symbol_18) = float4x4();
+ *(tint_symbol_16) = float4x2(float2(0.0f), float2(0.0f), float2(0.0f), float2(0.0f));
+ *(tint_symbol_17) = float4x3(float3(0.0f), float3(0.0f), float3(0.0f), float3(0.0f));
+ *(tint_symbol_18) = float4x4(float4(0.0f), float4(0.0f), float4(0.0f), float4(0.0f));
}
threadgroup_barrier(mem_flags::mem_threadgroup);
float4x2 const a1 = *(tint_symbol_16);
@@ -396,17 +397,17 @@ kernel void main4_no_usages() {
)");
- auto allocations = gen.DynamicWorkgroupAllocations();
- ASSERT_TRUE(allocations.count("main1"));
- ASSERT_TRUE(allocations.count("main2"));
- ASSERT_TRUE(allocations.count("main3"));
- EXPECT_EQ(allocations.count("main4_no_usages"), 0u);
- ASSERT_EQ(allocations["main1"].size(), 1u);
- EXPECT_EQ(allocations["main1"][0], 20u * sizeof(float));
- ASSERT_EQ(allocations["main2"].size(), 1u);
- EXPECT_EQ(allocations["main2"][0], 32u * sizeof(float));
- ASSERT_EQ(allocations["main3"].size(), 1u);
- EXPECT_EQ(allocations["main3"][0], 40u * sizeof(float));
+ auto allocations = gen.DynamicWorkgroupAllocations();
+ ASSERT_TRUE(allocations.count("main1"));
+ ASSERT_TRUE(allocations.count("main2"));
+ ASSERT_TRUE(allocations.count("main3"));
+ EXPECT_EQ(allocations.count("main4_no_usages"), 0u);
+ ASSERT_EQ(allocations["main1"].size(), 1u);
+ EXPECT_EQ(allocations["main1"][0], 20u * sizeof(float));
+ ASSERT_EQ(allocations["main2"].size(), 1u);
+ EXPECT_EQ(allocations["main2"][0], 32u * sizeof(float));
+ ASSERT_EQ(allocations["main3"].size(), 1u);
+ EXPECT_EQ(allocations["main3"][0], 40u * sizeof(float));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_type_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_type_test.cc
index 1efffbb1bd1..257468bff31 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_type_test.cc
@@ -16,30 +16,32 @@
#include "gmock/gmock.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
-#include "src/tint/sem/sampler_type.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
+#include "src/tint/sem/sampler.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/writer/msl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
-using ::testing::HasSubstr;
-
-#define CHECK_TYPE_SIZE_AND_ALIGN(TYPE, SIZE, ALIGN) \
- static_assert(sizeof(TYPE) == SIZE, "Bad type size"); \
- static_assert(alignof(TYPE) == ALIGN, "Bad type alignment")
+#define CHECK_TYPE_SIZE_AND_ALIGN(TYPE, SIZE, ALIGN) \
+ static_assert(sizeof(TYPE) == SIZE, "Bad type size"); \
+ static_assert(alignof(TYPE) == ALIGN, "Bad type alignment")
// Declare C++ types that match the size and alignment of the types of the same
// name in MSL.
#define DECLARE_TYPE(NAME, SIZE, ALIGN) \
- struct alignas(ALIGN) NAME { \
- uint8_t _[SIZE]; \
- }; \
- CHECK_TYPE_SIZE_AND_ALIGN(NAME, SIZE, ALIGN)
+ struct alignas(ALIGN) NAME { \
+ uint8_t _[SIZE]; \
+ }; \
+ CHECK_TYPE_SIZE_AND_ALIGN(NAME, SIZE, ALIGN)
// Size and alignments taken from the MSL spec:
// https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
@@ -60,142 +62,141 @@ using uint = unsigned int;
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, EmitType_Array) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "ary")) << gen.error();
- EXPECT_EQ(out.str(), "bool ary[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "ary")) << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[4]");
}
TEST_F(MslGeneratorImplTest, EmitType_ArrayOfArray) {
- auto* a = ty.array<bool, 4>();
- auto* b = ty.array(a, 5);
- Global("G", b, ast::StorageClass::kPrivate);
+ auto* a = ty.array<bool, 4>();
+ auto* b = ty.array(a, 5_u);
+ Global("G", b, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(b), "ary")) << gen.error();
- EXPECT_EQ(out.str(), "bool ary[5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(b), "ary")) << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[5][4]");
}
TEST_F(MslGeneratorImplTest, EmitType_ArrayOfArrayOfArray) {
- auto* a = ty.array<bool, 4>();
- auto* b = ty.array(a, 5);
- auto* c = ty.array(b, 6);
- Global("G", c, ast::StorageClass::kPrivate);
+ auto* a = ty.array<bool, 4>();
+ auto* b = ty.array(a, 5_u);
+ auto* c = ty.array(b, 6_u);
+ Global("G", c, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(c), "ary")) << gen.error();
- EXPECT_EQ(out.str(), "bool ary[6][5][4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(c), "ary")) << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[6][5][4]");
}
TEST_F(MslGeneratorImplTest, EmitType_Array_WithoutName) {
- auto* arr = ty.array<bool, 4>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 4>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "")) << gen.error();
- EXPECT_EQ(out.str(), "bool[4]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "")) << gen.error();
+ EXPECT_EQ(out.str(), "bool[4]");
}
TEST_F(MslGeneratorImplTest, EmitType_RuntimeArray) {
- auto* arr = ty.array<bool, 1>();
- Global("G", arr, ast::StorageClass::kPrivate);
+ auto* arr = ty.array<bool, 1>();
+ Global("G", arr, ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "ary")) << gen.error();
- EXPECT_EQ(out.str(), "bool ary[1]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(arr), "ary")) << gen.error();
+ EXPECT_EQ(out.str(), "bool ary[1]");
}
TEST_F(MslGeneratorImplTest, EmitType_Bool) {
- auto* bool_ = create<sem::Bool>();
+ auto* bool_ = create<sem::Bool>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, bool_, "")) << gen.error();
- EXPECT_EQ(out.str(), "bool");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, bool_, "")) << gen.error();
+ EXPECT_EQ(out.str(), "bool");
}
TEST_F(MslGeneratorImplTest, EmitType_F32) {
- auto* f32 = create<sem::F32>();
+ auto* f32 = create<sem::F32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, f32, "")) << gen.error();
- EXPECT_EQ(out.str(), "float");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, f32, "")) << gen.error();
+ EXPECT_EQ(out.str(), "float");
}
TEST_F(MslGeneratorImplTest, EmitType_I32) {
- auto* i32 = create<sem::I32>();
+ auto* i32 = create<sem::I32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, i32, "")) << gen.error();
- EXPECT_EQ(out.str(), "int");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, i32, "")) << gen.error();
+ EXPECT_EQ(out.str(), "int");
}
TEST_F(MslGeneratorImplTest, EmitType_Matrix) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
- auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, mat2x3, "")) << gen.error();
- EXPECT_EQ(out.str(), "float2x3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, mat2x3, "")) << gen.error();
+ EXPECT_EQ(out.str(), "float2x3");
}
TEST_F(MslGeneratorImplTest, EmitType_Pointer) {
- auto* f32 = create<sem::F32>();
- auto* p = create<sem::Pointer>(f32, ast::StorageClass::kWorkgroup,
- ast::Access::kReadWrite);
+ auto* f32 = create<sem::F32>();
+ auto* p = create<sem::Pointer>(f32, ast::StorageClass::kWorkgroup, ast::Access::kReadWrite);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, p, "")) << gen.error();
- EXPECT_EQ(out.str(), "threadgroup float* ");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, p, "")) << gen.error();
+ EXPECT_EQ(out.str(), "threadgroup float* ");
}
TEST_F(MslGeneratorImplTest, EmitType_Struct) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(s), "")) << gen.error();
- EXPECT_EQ(out.str(), "S");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(s), "")) << gen.error();
+ EXPECT_EQ(out.str(), "S");
}
TEST_F(MslGeneratorImplTest, EmitType_StructDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
int a;
float b;
};
@@ -203,393 +204,384 @@ TEST_F(MslGeneratorImplTest, EmitType_StructDecl) {
}
TEST_F(MslGeneratorImplTest, EmitType_Struct_Layout_NonComposites) {
- auto* s = Structure(
- "S", {
- Member("a", ty.i32(), {MemberSize(32)}),
- Member("b", ty.f32(), {MemberAlign(128), MemberSize(128)}),
- Member("c", ty.vec2<f32>()),
- Member("d", ty.u32()),
- Member("e", ty.vec3<f32>()),
- Member("f", ty.u32()),
- Member("g", ty.vec4<f32>()),
- Member("h", ty.u32()),
- Member("i", ty.mat2x2<f32>()),
- Member("j", ty.u32()),
- Member("k", ty.mat2x3<f32>()),
- Member("l", ty.u32()),
- Member("m", ty.mat2x4<f32>()),
- Member("n", ty.u32()),
- Member("o", ty.mat3x2<f32>()),
- Member("p", ty.u32()),
- Member("q", ty.mat3x3<f32>()),
- Member("r", ty.u32()),
- Member("s", ty.mat3x4<f32>()),
- Member("t", ty.u32()),
- Member("u", ty.mat4x2<f32>()),
- Member("v", ty.u32()),
- Member("w", ty.mat4x3<f32>()),
- Member("x", ty.u32()),
- Member("y", ty.mat4x4<f32>()),
- Member("z", ty.f32()),
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberSize(32)}),
+ Member("b", ty.f32(), {MemberAlign(128), MemberSize(128)}),
+ Member("c", ty.vec2<f32>()),
+ Member("d", ty.u32()),
+ Member("e", ty.vec3<f32>()),
+ Member("f", ty.u32()),
+ Member("g", ty.vec4<f32>()),
+ Member("h", ty.u32()),
+ Member("i", ty.mat2x2<f32>()),
+ Member("j", ty.u32()),
+ Member("k", ty.mat2x3<f32>()),
+ Member("l", ty.u32()),
+ Member("m", ty.mat2x4<f32>()),
+ Member("n", ty.u32()),
+ Member("o", ty.mat3x2<f32>()),
+ Member("p", ty.u32()),
+ Member("q", ty.mat3x3<f32>()),
+ Member("r", ty.u32()),
+ Member("s", ty.mat3x4<f32>()),
+ Member("t", ty.u32()),
+ Member("u", ty.mat4x2<f32>()),
+ Member("v", ty.u32()),
+ Member("w", ty.mat4x3<f32>()),
+ Member("x", ty.u32()),
+ Member("y", ty.mat4x4<f32>()),
+ Member("z", ty.f32()),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
});
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
-
- // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
- // for each field of the structure s.
-#define ALL_FIELDS() \
- FIELD(0x0000, int, a, /*NO SUFFIX*/) \
- FIELD(0x0004, int8_t, tint_pad, [124]) \
- FIELD(0x0080, float, b, /*NO SUFFIX*/) \
- FIELD(0x0084, int8_t, tint_pad_1, [124]) \
- FIELD(0x0100, float2, c, /*NO SUFFIX*/) \
- FIELD(0x0108, uint, d, /*NO SUFFIX*/) \
- FIELD(0x010c, int8_t, tint_pad_2, [4]) \
- FIELD(0x0110, packed_float3, e, /*NO SUFFIX*/) \
- FIELD(0x011c, uint, f, /*NO SUFFIX*/) \
- FIELD(0x0120, float4, g, /*NO SUFFIX*/) \
- FIELD(0x0130, uint, h, /*NO SUFFIX*/) \
- FIELD(0x0134, int8_t, tint_pad_3, [4]) \
- FIELD(0x0138, float2x2, i, /*NO SUFFIX*/) \
- FIELD(0x0148, uint, j, /*NO SUFFIX*/) \
- FIELD(0x014c, int8_t, tint_pad_4, [4]) \
- FIELD(0x0150, float2x3, k, /*NO SUFFIX*/) \
- FIELD(0x0170, uint, l, /*NO SUFFIX*/) \
- FIELD(0x0174, int8_t, tint_pad_5, [12]) \
- FIELD(0x0180, float2x4, m, /*NO SUFFIX*/) \
- FIELD(0x01a0, uint, n, /*NO SUFFIX*/) \
- FIELD(0x01a4, int8_t, tint_pad_6, [4]) \
- FIELD(0x01a8, float3x2, o, /*NO SUFFIX*/) \
- FIELD(0x01c0, uint, p, /*NO SUFFIX*/) \
- FIELD(0x01c4, int8_t, tint_pad_7, [12]) \
- FIELD(0x01d0, float3x3, q, /*NO SUFFIX*/) \
- FIELD(0x0200, uint, r, /*NO SUFFIX*/) \
- FIELD(0x0204, int8_t, tint_pad_8, [12]) \
- FIELD(0x0210, float3x4, s, /*NO SUFFIX*/) \
- FIELD(0x0240, uint, t, /*NO SUFFIX*/) \
- FIELD(0x0244, int8_t, tint_pad_9, [4]) \
- FIELD(0x0248, float4x2, u, /*NO SUFFIX*/) \
- FIELD(0x0268, uint, v, /*NO SUFFIX*/) \
- FIELD(0x026c, int8_t, tint_pad_10, [4]) \
- FIELD(0x0270, float4x3, w, /*NO SUFFIX*/) \
- FIELD(0x02b0, uint, x, /*NO SUFFIX*/) \
- FIELD(0x02b4, int8_t, tint_pad_11, [12]) \
- FIELD(0x02c0, float4x4, y, /*NO SUFFIX*/) \
- FIELD(0x0300, float, z, /*NO SUFFIX*/) \
- FIELD(0x0304, int8_t, tint_pad_12, [124])
-
- // Check that the generated string is as expected.
-#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
- auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+
+ // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
+ // for each field of the structure s.
+#define ALL_FIELDS() \
+ FIELD(0x0000, int, a, /*NO SUFFIX*/) \
+ FIELD(0x0004, int8_t, tint_pad, [124]) \
+ FIELD(0x0080, float, b, /*NO SUFFIX*/) \
+ FIELD(0x0084, int8_t, tint_pad_1, [124]) \
+ FIELD(0x0100, float2, c, /*NO SUFFIX*/) \
+ FIELD(0x0108, uint, d, /*NO SUFFIX*/) \
+ FIELD(0x010c, int8_t, tint_pad_2, [4]) \
+ FIELD(0x0110, packed_float3, e, /*NO SUFFIX*/) \
+ FIELD(0x011c, uint, f, /*NO SUFFIX*/) \
+ FIELD(0x0120, float4, g, /*NO SUFFIX*/) \
+ FIELD(0x0130, uint, h, /*NO SUFFIX*/) \
+ FIELD(0x0134, int8_t, tint_pad_3, [4]) \
+ FIELD(0x0138, float2x2, i, /*NO SUFFIX*/) \
+ FIELD(0x0148, uint, j, /*NO SUFFIX*/) \
+ FIELD(0x014c, int8_t, tint_pad_4, [4]) \
+ FIELD(0x0150, float2x3, k, /*NO SUFFIX*/) \
+ FIELD(0x0170, uint, l, /*NO SUFFIX*/) \
+ FIELD(0x0174, int8_t, tint_pad_5, [12]) \
+ FIELD(0x0180, float2x4, m, /*NO SUFFIX*/) \
+ FIELD(0x01a0, uint, n, /*NO SUFFIX*/) \
+ FIELD(0x01a4, int8_t, tint_pad_6, [4]) \
+ FIELD(0x01a8, float3x2, o, /*NO SUFFIX*/) \
+ FIELD(0x01c0, uint, p, /*NO SUFFIX*/) \
+ FIELD(0x01c4, int8_t, tint_pad_7, [12]) \
+ FIELD(0x01d0, float3x3, q, /*NO SUFFIX*/) \
+ FIELD(0x0200, uint, r, /*NO SUFFIX*/) \
+ FIELD(0x0204, int8_t, tint_pad_8, [12]) \
+ FIELD(0x0210, float3x4, s, /*NO SUFFIX*/) \
+ FIELD(0x0240, uint, t, /*NO SUFFIX*/) \
+ FIELD(0x0244, int8_t, tint_pad_9, [4]) \
+ FIELD(0x0248, float4x2, u, /*NO SUFFIX*/) \
+ FIELD(0x0268, uint, v, /*NO SUFFIX*/) \
+ FIELD(0x026c, int8_t, tint_pad_10, [4]) \
+ FIELD(0x0270, float4x3, w, /*NO SUFFIX*/) \
+ FIELD(0x02b0, uint, x, /*NO SUFFIX*/) \
+ FIELD(0x02b4, int8_t, tint_pad_11, [12]) \
+ FIELD(0x02c0, float4x4, y, /*NO SUFFIX*/) \
+ FIELD(0x0300, float, z, /*NO SUFFIX*/) \
+ FIELD(0x0304, int8_t, tint_pad_12, [124])
+
+ // Check that the generated string is as expected.
+#define FIELD(ADDR, TYPE, NAME, SUFFIX) " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
+ auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
#undef FIELD
- EXPECT_EQ(buf.String(), expect);
-
- // 1.4 Metal and C++14
- // The Metal programming language is a C++14-based Specification with
- // extensions and restrictions. Refer to the C++14 Specification (also known
- // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
- // description of the language grammar.
- //
- // Tint is written in C++14, so use the compiler to verify the generated
- // layout is as expected for C++14 / MSL.
- {
- struct S {
+ EXPECT_EQ(buf.String(), expect);
+
+ // 1.4 Metal and C++14
+ // The Metal programming language is a C++14-based Specification with
+ // extensions and restrictions. Refer to the C++14 Specification (also known
+ // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
+ // description of the language grammar.
+ //
+ // Tint is written in C++14, so use the compiler to verify the generated
+ // layout is as expected for C++14 / MSL.
+ {
+ struct S {
#define FIELD(ADDR, TYPE, NAME, SUFFIX) TYPE NAME SUFFIX;
- ALL_FIELDS()
+ ALL_FIELDS()
#undef FIELD
- };
+ };
#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
- ALL_FIELDS()
+ EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
+ ALL_FIELDS()
#undef FIELD
- }
+ }
#undef ALL_FIELDS
}
TEST_F(MslGeneratorImplTest, EmitType_Struct_Layout_Structures) {
- // inner_x: size(1024), align(512)
- auto* inner_x =
- Structure("inner_x", {
- Member("a", ty.i32()),
- Member("b", ty.f32(), {MemberAlign(512)}),
- });
-
- // inner_y: size(516), align(4)
- auto* inner_y =
- Structure("inner_y", {
- Member("a", ty.i32(), {MemberSize(512)}),
- Member("b", ty.f32()),
- });
-
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.Of(inner_x)),
- Member("c", ty.f32()),
- Member("d", ty.Of(inner_y)),
- Member("e", ty.f32()),
- });
-
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
-
- // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
- // for each field of the structure s.
-#define ALL_FIELDS() \
- FIELD(0x0000, int, a, /*NO SUFFIX*/) \
- FIELD(0x0004, int8_t, tint_pad, [508]) \
- FIELD(0x0200, inner_x, b, /*NO SUFFIX*/) \
- FIELD(0x0600, float, c, /*NO SUFFIX*/) \
- FIELD(0x0604, inner_y, d, /*NO SUFFIX*/) \
- FIELD(0x0808, float, e, /*NO SUFFIX*/) \
- FIELD(0x080c, int8_t, tint_pad_1, [500])
-
- // Check that the generated string is as expected.
-#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
- auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
+ // inner_x: size(1024), align(512)
+ auto* inner_x = Structure("inner_x", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32(), {MemberAlign(512)}),
+ });
+
+ // inner_y: size(516), align(4)
+ auto* inner_y = Structure("inner_y", {
+ Member("a", ty.i32(), {MemberSize(512)}),
+ Member("b", ty.f32()),
+ });
+
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.Of(inner_x)),
+ Member("c", ty.f32()),
+ Member("d", ty.Of(inner_y)),
+ Member("e", ty.f32()),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+
+ // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
+ // for each field of the structure s.
+#define ALL_FIELDS() \
+ FIELD(0x0000, int, a, /*NO SUFFIX*/) \
+ FIELD(0x0004, int8_t, tint_pad, [508]) \
+ FIELD(0x0200, inner_x, b, /*NO SUFFIX*/) \
+ FIELD(0x0600, float, c, /*NO SUFFIX*/) \
+ FIELD(0x0604, inner_y, d, /*NO SUFFIX*/) \
+ FIELD(0x0808, float, e, /*NO SUFFIX*/) \
+ FIELD(0x080c, int8_t, tint_pad_1, [500])
+
+ // Check that the generated string is as expected.
+#define FIELD(ADDR, TYPE, NAME, SUFFIX) " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
+ auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
#undef FIELD
- EXPECT_EQ(buf.String(), expect);
-
- // 1.4 Metal and C++14
- // The Metal programming language is a C++14-based Specification with
- // extensions and restrictions. Refer to the C++14 Specification (also known
- // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
- // description of the language grammar.
- //
- // Tint is written in C++14, so use the compiler to verify the generated
- // layout is as expected for C++14 / MSL.
- {
- struct inner_x {
- uint32_t a;
- alignas(512) float b;
- };
- CHECK_TYPE_SIZE_AND_ALIGN(inner_x, 1024, 512);
-
- struct inner_y {
- uint32_t a[128];
- float b;
- };
- CHECK_TYPE_SIZE_AND_ALIGN(inner_y, 516, 4);
-
- struct S {
+ EXPECT_EQ(buf.String(), expect);
+
+ // 1.4 Metal and C++14
+ // The Metal programming language is a C++14-based Specification with
+ // extensions and restrictions. Refer to the C++14 Specification (also known
+ // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
+ // description of the language grammar.
+ //
+ // Tint is written in C++14, so use the compiler to verify the generated
+ // layout is as expected for C++14 / MSL.
+ {
+ struct inner_x {
+ uint32_t a;
+ alignas(512) float b;
+ };
+ CHECK_TYPE_SIZE_AND_ALIGN(inner_x, 1024, 512);
+
+ struct inner_y {
+ uint32_t a[128];
+ float b;
+ };
+ CHECK_TYPE_SIZE_AND_ALIGN(inner_y, 516, 4);
+
+ struct S {
#define FIELD(ADDR, TYPE, NAME, SUFFIX) TYPE NAME SUFFIX;
- ALL_FIELDS()
+ ALL_FIELDS()
#undef FIELD
- };
+ };
#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
- ALL_FIELDS()
+ EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
+ ALL_FIELDS()
#undef FIELD
- }
+ }
#undef ALL_FIELDS
}
TEST_F(MslGeneratorImplTest, EmitType_Struct_Layout_ArrayDefaultStride) {
- // inner: size(1024), align(512)
- auto* inner =
- Structure("inner", {
- Member("a", ty.i32()),
- Member("b", ty.f32(), {MemberAlign(512)}),
- });
-
- // array_x: size(28), align(4)
- auto* array_x = ty.array<f32, 7>();
-
- // array_y: size(4096), align(512)
- auto* array_y = ty.array(ty.Of(inner), 4);
-
- // array_z: size(4), align(4)
- auto* array_z = ty.array<f32>();
-
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", array_x),
- Member("c", ty.f32()),
- Member("d", array_y),
- Member("e", ty.f32()),
- Member("f", array_z),
- });
-
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
-
- // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
- // for each field of the structure s.
-#define ALL_FIELDS() \
- FIELD(0x0000, int, a, /*NO SUFFIX*/) \
- FIELD(0x0004, float, b, [7]) \
- FIELD(0x0020, float, c, /*NO SUFFIX*/) \
- FIELD(0x0024, int8_t, tint_pad, [476]) \
- FIELD(0x0200, inner, d, [4]) \
- FIELD(0x1200, float, e, /*NO SUFFIX*/) \
- FIELD(0x1204, float, f, [1]) \
- FIELD(0x1208, int8_t, tint_pad_1, [504])
-
- // Check that the generated string is as expected.
-#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
- auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
-#undef FIELD
- EXPECT_EQ(buf.String(), expect);
-
- // 1.4 Metal and C++14
- // The Metal programming language is a C++14-based Specification with
- // extensions and restrictions. Refer to the C++14 Specification (also known
- // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
- // description of the language grammar.
- //
- // Tint is written in C++14, so use the compiler to verify the generated
- // layout is as expected for C++14 / MSL.
- {
- struct inner {
- uint32_t a;
- alignas(512) float b;
- };
- CHECK_TYPE_SIZE_AND_ALIGN(inner, 1024, 512);
+ // inner: size(1024), align(512)
+ auto* inner = Structure("inner", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32(), {MemberAlign(512)}),
+ });
// array_x: size(28), align(4)
- using array_x = std::array<float, 7>;
- CHECK_TYPE_SIZE_AND_ALIGN(array_x, 28, 4);
+ auto* array_x = ty.array<f32, 7>();
// array_y: size(4096), align(512)
- using array_y = std::array<inner, 4>;
- CHECK_TYPE_SIZE_AND_ALIGN(array_y, 4096, 512);
+ auto* array_y = ty.array(ty.Of(inner), 4_u);
// array_z: size(4), align(4)
- using array_z = std::array<float, 1>;
- CHECK_TYPE_SIZE_AND_ALIGN(array_z, 4, 4);
+ auto* array_z = ty.array<f32>();
+
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", array_x),
+ Member("c", ty.f32()),
+ Member("d", array_y),
+ Member("e", ty.f32()),
+ Member("f", array_z),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- struct S {
+ // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
+ // for each field of the structure s.
+#define ALL_FIELDS() \
+ FIELD(0x0000, int, a, /*NO SUFFIX*/) \
+ FIELD(0x0004, float, b, [7]) \
+ FIELD(0x0020, float, c, /*NO SUFFIX*/) \
+ FIELD(0x0024, int8_t, tint_pad, [476]) \
+ FIELD(0x0200, inner, d, [4]) \
+ FIELD(0x1200, float, e, /*NO SUFFIX*/) \
+ FIELD(0x1204, float, f, [1]) \
+ FIELD(0x1208, int8_t, tint_pad_1, [504])
+
+ // Check that the generated string is as expected.
+#define FIELD(ADDR, TYPE, NAME, SUFFIX) " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
+ auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
+#undef FIELD
+ EXPECT_EQ(buf.String(), expect);
+
+ // 1.4 Metal and C++14
+ // The Metal programming language is a C++14-based Specification with
+ // extensions and restrictions. Refer to the C++14 Specification (also known
+ // as the ISO/IEC JTC1/SC22/WG21 N4431 Language Specification) for a detailed
+ // description of the language grammar.
+ //
+ // Tint is written in C++14, so use the compiler to verify the generated
+ // layout is as expected for C++14 / MSL.
+ {
+ struct inner {
+ uint32_t a;
+ alignas(512) float b;
+ };
+ CHECK_TYPE_SIZE_AND_ALIGN(inner, 1024, 512);
+
+ // array_x: size(28), align(4)
+ using array_x = std::array<float, 7>;
+ CHECK_TYPE_SIZE_AND_ALIGN(array_x, 28, 4);
+
+ // array_y: size(4096), align(512)
+ using array_y = std::array<inner, 4>;
+ CHECK_TYPE_SIZE_AND_ALIGN(array_y, 4096, 512);
+
+ // array_z: size(4), align(4)
+ using array_z = std::array<float, 1>;
+ CHECK_TYPE_SIZE_AND_ALIGN(array_z, 4, 4);
+
+ struct S {
#define FIELD(ADDR, TYPE, NAME, SUFFIX) TYPE NAME SUFFIX;
- ALL_FIELDS()
+ ALL_FIELDS()
#undef FIELD
- };
+ };
#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
- ALL_FIELDS()
+ EXPECT_EQ(ADDR, static_cast<int>(offsetof(S, NAME))) << "Field " << #NAME;
+ ALL_FIELDS()
#undef FIELD
- }
+ }
#undef ALL_FIELDS
}
TEST_F(MslGeneratorImplTest, EmitType_Struct_Layout_ArrayVec3DefaultStride) {
- // array: size(64), align(16)
- auto* array = ty.array(ty.vec3<f32>(), 4);
-
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", array),
- Member("c", ty.i32()),
- });
-
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
-
- // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
- // for each field of the structure s.
-#define ALL_FIELDS() \
- FIELD(0x0000, int, a, /*NO SUFFIX*/) \
- FIELD(0x0004, int8_t, tint_pad, [12]) \
- FIELD(0x0010, float3, b, [4]) \
- FIELD(0x0050, int, c, /*NO SUFFIX*/) \
- FIELD(0x0054, int8_t, tint_pad_1, [12])
-
- // Check that the generated string is as expected.
-#define FIELD(ADDR, TYPE, NAME, SUFFIX) \
- " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
- auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
+ // array: size(64), align(16)
+ auto* array = ty.array(ty.vec3<f32>(), 4_u);
+
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", array),
+ Member("c", ty.i32()),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+
+ // ALL_FIELDS() calls the macro FIELD(ADDR, TYPE, NAME, SUFFIX)
+ // for each field of the structure s.
+#define ALL_FIELDS() \
+ FIELD(0x0000, int, a, /*NO SUFFIX*/) \
+ FIELD(0x0004, int8_t, tint_pad, [12]) \
+ FIELD(0x0010, float3, b, [4]) \
+ FIELD(0x0050, int, c, /*NO SUFFIX*/) \
+ FIELD(0x0054, int8_t, tint_pad_1, [12])
+
+ // Check that the generated string is as expected.
+#define FIELD(ADDR, TYPE, NAME, SUFFIX) " /* " #ADDR " */ " #TYPE " " #NAME #SUFFIX ";\n"
+ auto* expect = "struct S {\n" ALL_FIELDS() "};\n";
#undef FIELD
- EXPECT_EQ(buf.String(), expect);
+ EXPECT_EQ(buf.String(), expect);
}
TEST_F(MslGeneratorImplTest, AttemptTintPadSymbolCollision) {
- auto* s = Structure(
- "S",
- {
- // uses symbols tint_pad_[0..9] and tint_pad_[20..35]
- Member("tint_pad_2", ty.i32(), {MemberSize(32)}),
- Member("tint_pad_20", ty.f32(), {MemberAlign(128), MemberSize(128)}),
- Member("tint_pad_33", ty.vec2<f32>()),
- Member("tint_pad_1", ty.u32()),
- Member("tint_pad_3", ty.vec3<f32>()),
- Member("tint_pad_7", ty.u32()),
- Member("tint_pad_25", ty.vec4<f32>()),
- Member("tint_pad_5", ty.u32()),
- Member("tint_pad_27", ty.mat2x2<f32>()),
- Member("tint_pad_24", ty.u32()),
- Member("tint_pad_23", ty.mat2x3<f32>()),
- Member("tint_pad", ty.u32()),
- Member("tint_pad_8", ty.mat2x4<f32>()),
- Member("tint_pad_26", ty.u32()),
- Member("tint_pad_29", ty.mat3x2<f32>()),
- Member("tint_pad_6", ty.u32()),
- Member("tint_pad_22", ty.mat3x3<f32>()),
- Member("tint_pad_32", ty.u32()),
- Member("tint_pad_34", ty.mat3x4<f32>()),
- Member("tint_pad_35", ty.u32()),
- Member("tint_pad_30", ty.mat4x2<f32>()),
- Member("tint_pad_9", ty.u32()),
- Member("tint_pad_31", ty.mat4x3<f32>()),
- Member("tint_pad_28", ty.u32()),
- Member("tint_pad_4", ty.mat4x4<f32>()),
- Member("tint_pad_21", ty.f32()),
- });
-
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s =
+ Structure("S", {
+ // uses symbols tint_pad_[0..9] and tint_pad_[20..35]
+ Member("tint_pad_2", ty.i32(), {MemberSize(32)}),
+ Member("tint_pad_20", ty.f32(), {MemberAlign(128), MemberSize(128)}),
+ Member("tint_pad_33", ty.vec2<f32>()),
+ Member("tint_pad_1", ty.u32()),
+ Member("tint_pad_3", ty.vec3<f32>()),
+ Member("tint_pad_7", ty.u32()),
+ Member("tint_pad_25", ty.vec4<f32>()),
+ Member("tint_pad_5", ty.u32()),
+ Member("tint_pad_27", ty.mat2x2<f32>()),
+ Member("tint_pad_24", ty.u32()),
+ Member("tint_pad_23", ty.mat2x3<f32>()),
+ Member("tint_pad", ty.u32()),
+ Member("tint_pad_8", ty.mat2x4<f32>()),
+ Member("tint_pad_26", ty.u32()),
+ Member("tint_pad_29", ty.mat3x2<f32>()),
+ Member("tint_pad_6", ty.u32()),
+ Member("tint_pad_22", ty.mat3x3<f32>()),
+ Member("tint_pad_32", ty.u32()),
+ Member("tint_pad_34", ty.mat3x4<f32>()),
+ Member("tint_pad_35", ty.u32()),
+ Member("tint_pad_30", ty.mat4x2<f32>()),
+ Member("tint_pad_9", ty.u32()),
+ Member("tint_pad_31", ty.mat4x3<f32>()),
+ Member("tint_pad_28", ty.u32()),
+ Member("tint_pad_4", ty.mat4x4<f32>()),
+ Member("tint_pad_21", ty.f32()),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
/* 0x0000 */ int tint_pad_2;
/* 0x0004 */ int8_t tint_pad_10[124];
/* 0x0080 */ float tint_pad_20;
@@ -634,23 +626,23 @@ TEST_F(MslGeneratorImplTest, AttemptTintPadSymbolCollision) {
}
TEST_F(MslGeneratorImplTest, EmitType_Struct_WithAttribute) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
-
- Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- TextGenerator::TextBuffer buf;
- auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
- ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
- EXPECT_EQ(buf.String(), R"(struct S {
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+
+ Global("G", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ TextGenerator::TextBuffer buf;
+ auto* sem_s = program->TypeOf(s)->As<sem::Struct>();
+ ASSERT_TRUE(gen.EmitStructType(&buf, sem_s)) << gen.error();
+ EXPECT_EQ(buf.String(), R"(struct S {
/* 0x0000 */ int a;
/* 0x0004 */ float b;
};
@@ -658,186 +650,175 @@ TEST_F(MslGeneratorImplTest, EmitType_Struct_WithAttribute) {
}
TEST_F(MslGeneratorImplTest, EmitType_U32) {
- auto* u32 = create<sem::U32>();
+ auto* u32 = create<sem::U32>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, u32, "")) << gen.error();
- EXPECT_EQ(out.str(), "uint");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, u32, "")) << gen.error();
+ EXPECT_EQ(out.str(), "uint");
}
TEST_F(MslGeneratorImplTest, EmitType_Vector) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, vec3, "")) << gen.error();
- EXPECT_EQ(out.str(), "float3");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, vec3, "")) << gen.error();
+ EXPECT_EQ(out.str(), "float3");
}
TEST_F(MslGeneratorImplTest, EmitType_Void) {
- auto* void_ = create<sem::Void>();
+ auto* void_ = create<sem::Void>();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, void_, "")) << gen.error();
- EXPECT_EQ(out.str(), "void");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, void_, "")) << gen.error();
+ EXPECT_EQ(out.str(), "void");
}
TEST_F(MslGeneratorImplTest, EmitType_Sampler) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler, "")) << gen.error();
- EXPECT_EQ(out.str(), "sampler");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler, "")) << gen.error();
+ EXPECT_EQ(out.str(), "sampler");
}
TEST_F(MslGeneratorImplTest, EmitType_SamplerComparison) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler, "")) << gen.error();
- EXPECT_EQ(out.str(), "sampler");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler, "")) << gen.error();
+ EXPECT_EQ(out.str(), "sampler");
}
struct MslDepthTextureData {
- ast::TextureDimension dim;
- std::string result;
+ ast::TextureDimension dim;
+ std::string result;
};
inline std::ostream& operator<<(std::ostream& out, MslDepthTextureData data) {
- out << data.dim;
- return out;
+ out << data.dim;
+ return out;
}
using MslDepthTexturesTest = TestParamHelper<MslDepthTextureData>;
TEST_P(MslDepthTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- sem::DepthTexture s(params.dim);
+ sem::DepthTexture s(params.dim);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, &s, "")) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, &s, "")) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
MslDepthTexturesTest,
- testing::Values(MslDepthTextureData{ast::TextureDimension::k2d,
- "depth2d<float, access::sample>"},
- MslDepthTextureData{ast::TextureDimension::k2dArray,
- "depth2d_array<float, access::sample>"},
- MslDepthTextureData{ast::TextureDimension::kCube,
- "depthcube<float, access::sample>"},
- MslDepthTextureData{
- ast::TextureDimension::kCubeArray,
- "depthcube_array<float, access::sample>"}));
+ testing::Values(
+ MslDepthTextureData{ast::TextureDimension::k2d, "depth2d<float, access::sample>"},
+ MslDepthTextureData{ast::TextureDimension::k2dArray,
+ "depth2d_array<float, access::sample>"},
+ MslDepthTextureData{ast::TextureDimension::kCube, "depthcube<float, access::sample>"},
+ MslDepthTextureData{ast::TextureDimension::kCubeArray,
+ "depthcube_array<float, access::sample>"}));
using MslDepthMultisampledTexturesTest = TestHelper;
TEST_F(MslDepthMultisampledTexturesTest, Emit) {
- sem::DepthMultisampledTexture s(ast::TextureDimension::k2d);
+ sem::DepthMultisampledTexture s(ast::TextureDimension::k2d);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, &s, "")) << gen.error();
- EXPECT_EQ(out.str(), "depth2d_ms<float, access::read>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, &s, "")) << gen.error();
+ EXPECT_EQ(out.str(), "depth2d_ms<float, access::read>");
}
struct MslTextureData {
- ast::TextureDimension dim;
- std::string result;
+ ast::TextureDimension dim;
+ std::string result;
};
inline std::ostream& operator<<(std::ostream& out, MslTextureData data) {
- out << data.dim;
- return out;
+ out << data.dim;
+ return out;
}
using MslSampledtexturesTest = TestParamHelper<MslTextureData>;
TEST_P(MslSampledtexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(params.dim, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(params.dim, f32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, s, "")) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, s, "")) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
MslSampledtexturesTest,
- testing::Values(MslTextureData{ast::TextureDimension::k1d,
- "texture1d<float, access::sample>"},
- MslTextureData{ast::TextureDimension::k2d,
- "texture2d<float, access::sample>"},
- MslTextureData{ast::TextureDimension::k2dArray,
- "texture2d_array<float, access::sample>"},
- MslTextureData{ast::TextureDimension::k3d,
- "texture3d<float, access::sample>"},
- MslTextureData{ast::TextureDimension::kCube,
- "texturecube<float, access::sample>"},
- MslTextureData{
- ast::TextureDimension::kCubeArray,
- "texturecube_array<float, access::sample>"}));
+ testing::Values(
+ MslTextureData{ast::TextureDimension::k1d, "texture1d<float, access::sample>"},
+ MslTextureData{ast::TextureDimension::k2d, "texture2d<float, access::sample>"},
+ MslTextureData{ast::TextureDimension::k2dArray, "texture2d_array<float, access::sample>"},
+ MslTextureData{ast::TextureDimension::k3d, "texture3d<float, access::sample>"},
+ MslTextureData{ast::TextureDimension::kCube, "texturecube<float, access::sample>"},
+ MslTextureData{ast::TextureDimension::kCubeArray,
+ "texturecube_array<float, access::sample>"}));
TEST_F(MslGeneratorImplTest, Emit_TypeMultisampledTexture) {
- auto* u32 = create<sem::U32>();
- auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, u32);
+ auto* u32 = create<sem::U32>();
+ auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, u32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, ms, "")) << gen.error();
- EXPECT_EQ(out.str(), "texture2d_ms<uint, access::read>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, ms, "")) << gen.error();
+ EXPECT_EQ(out.str(), "texture2d_ms<uint, access::read>");
}
struct MslStorageTextureData {
- ast::TextureDimension dim;
- std::string result;
+ ast::TextureDimension dim;
+ std::string result;
};
inline std::ostream& operator<<(std::ostream& out, MslStorageTextureData data) {
- return out << data.dim;
+ return out << data.dim;
}
using MslStorageTexturesTest = TestParamHelper<MslStorageTextureData>;
TEST_P(MslStorageTexturesTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto* s = ty.storage_texture(params.dim, ast::TexelFormat::kR32Float,
- ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* s = ty.storage_texture(params.dim, ast::TexelFormat::kR32Float, ast::Access::kWrite);
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, program->TypeOf(s), "")) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, program->TypeOf(s), "")) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
MslGeneratorImplTest,
MslStorageTexturesTest,
- testing::Values(MslStorageTextureData{ast::TextureDimension::k1d,
- "texture1d<float, access::write>"},
- MslStorageTextureData{ast::TextureDimension::k2d,
- "texture2d<float, access::write>"},
- MslStorageTextureData{
- ast::TextureDimension::k2dArray,
- "texture2d_array<float, access::write>"},
- MslStorageTextureData{ast::TextureDimension::k3d,
- "texture3d<float, access::write>"}));
+ testing::Values(
+ MslStorageTextureData{ast::TextureDimension::k1d, "texture1d<float, access::write>"},
+ MslStorageTextureData{ast::TextureDimension::k2d, "texture2d<float, access::write>"},
+ MslStorageTextureData{ast::TextureDimension::k2dArray,
+ "texture2d_array<float, access::write>"},
+ MslStorageTextureData{ast::TextureDimension::k3d, "texture3d<float, access::write>"}));
} // namespace
} // namespace tint::writer::msl
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_unary_op_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_unary_op_test.cc
index c40183d9b2d..a1aecd9e643 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_unary_op_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_unary_op_test.cc
@@ -20,82 +20,77 @@ namespace {
using MslUnaryOpTest = TestHelper;
TEST_F(MslUnaryOpTest, AddressOf) {
- Global("expr", ty.f32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.f32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "&(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "&(expr)");
}
TEST_F(MslUnaryOpTest, Complement) {
- Global("expr", ty.i32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.i32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "~(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "~(expr)");
}
TEST_F(MslUnaryOpTest, Indirection) {
- Global("G", ty.f32(), ast::StorageClass::kPrivate);
- auto* p = Const(
- "expr", nullptr,
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
- WrapInFunction(p, op);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "*(expr)");
+ Global("G", ty.f32(), ast::StorageClass::kPrivate);
+ auto* p =
+ Let("expr", nullptr, create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
+ WrapInFunction(p, op);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "*(expr)");
}
TEST_F(MslUnaryOpTest, Not) {
- Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
- auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "!(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "!(expr)");
}
TEST_F(MslUnaryOpTest, Negation) {
- Global("expr", ty.i32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.i32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "tint_unary_minus(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "tint_unary_minus(expr)");
}
TEST_F(MslUnaryOpTest, NegationOfIntMin) {
- auto* op = create<ast::UnaryOpExpression>(
- ast::UnaryOp::kNegation, Expr(std::numeric_limits<int32_t>::min()));
- WrapInFunction(op);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation,
+ Expr(i32(std::numeric_limits<int32_t>::min())));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "tint_unary_minus((-2147483647 - 1))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "tint_unary_minus((-2147483647 - 1))");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_variable_decl_statement_test.cc b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_variable_decl_statement_test.cc
index ce26ab91d6a..e41cc3b2813 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_variable_decl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/msl/generator_impl_variable_decl_statement_test.cc
@@ -16,6 +16,8 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/msl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::msl {
namespace {
@@ -24,140 +26,139 @@ using ::testing::HasSubstr;
using MslGeneratorImplTest = TestHelper;
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement) {
- auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.f32(), ast::StorageClass::kNone);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float a = 0.0f;\n");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Const) {
- auto* var = Const("a", ty.f32(), Construct(ty.f32()));
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Let("a", ty.f32(), Construct(ty.f32()));
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float const a = float();\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float const a = 0.0f;\n");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Array) {
- auto* var = Var("a", ty.array<f32, 5>(), ast::StorageClass::kNone);
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.array<f32, 5>(), ast::StorageClass::kNone);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float a[5] = {0.0f};\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float a[5] = {0.0f};\n");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Struct) {
- auto* s = Structure("S", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Var("a", ty.Of(s), ast::StorageClass::kNone);
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.Of(s), ast::StorageClass::kNone);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), R"( S a = {};
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( S a = {};
)");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Vector) {
- auto* var = Var("a", ty.vec2<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.vec2<f32>());
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float2 a = 0.0f;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float2 a = 0.0f;\n");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Matrix) {
- auto* var = Var("a", ty.mat3x2<f32>());
+ auto* var = Var("a", ty.mat3x2<f32>());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " float3x2 a = float3x2(0.0f);\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " float3x2 a = float3x2(0.0f);\n");
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Private) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("thread float tint_symbol_1 = 0.0f;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("thread float tint_symbol_1 = 0.0f;\n"));
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Initializer_Private) {
- GlobalConst("initializer", ty.f32(), Expr(0.f));
- Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
+ GlobalConst("initializer", ty.f32(), Expr(0_f));
+ Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr("initializer"));
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("thread float tint_symbol_1 = initializer;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("thread float tint_symbol_1 = 0.0f;\n float const tint_symbol = tint_symbol_1;\n return;\n"));
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Workgroup) {
- Global("a", ty.f32(), ast::StorageClass::kWorkgroup);
+ Global("a", ty.f32(), ast::StorageClass::kWorkgroup);
- WrapInFunction(Expr("a"));
+ WrapInFunction(Expr("a"));
- GeneratorImpl& gen = SanitizeAndBuild();
+ GeneratorImpl& gen = SanitizeAndBuild();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("threadgroup float tint_symbol_2;\n"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("threadgroup float tint_symbol_2;\n"));
}
TEST_F(MslGeneratorImplTest, Emit_VariableDeclStatement_Initializer_ZeroVec) {
- auto* zero_vec = vec3<f32>();
+ auto* zero_vec = vec3<f32>();
- auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, zero_vec);
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* var = Var("a", ty.vec3<f32>(), ast::StorageClass::kNone, zero_vec);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), R"(float3 a = float3();
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(float3 a = float3(0.0f);
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/msl/test_helper.h b/chromium/third_party/dawn/src/tint/writer/msl/test_helper.h
index 25fb046f02f..f1b530d22b1 100644
--- a/chromium/third_party/dawn/src/tint/writer/msl/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/writer/msl/test_helper.h
@@ -29,66 +29,64 @@ namespace tint::writer::msl {
/// Helper class for testing
template <typename BASE>
class TestHelperBase : public BASE, public ProgramBuilder {
- public:
- TestHelperBase() = default;
- ~TestHelperBase() override = default;
+ public:
+ TestHelperBase() = default;
+ ~TestHelperBase() override = default;
- /// Builds and returns a GeneratorImpl from the program.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @return the built generator
- GeneratorImpl& Build() {
- if (gen_) {
- return *gen_;
+ /// Builds and returns a GeneratorImpl from the program.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @return the built generator
+ GeneratorImpl& Build() {
+ if (gen_) {
+ return *gen_;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
+ gen_ = std::make_unique<GeneratorImpl>(program.get());
+ return *gen_;
}
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
- gen_ = std::make_unique<GeneratorImpl>(program.get());
- return *gen_;
- }
- /// Builds the program, runs the program through the transform::Msl sanitizer
- /// and returns a GeneratorImpl from the sanitized program.
- /// @param options The MSL generator options.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @return the built generator
- GeneratorImpl& SanitizeAndBuild(const Options& options = {}) {
- if (gen_) {
- return *gen_;
- }
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
+ /// Builds the program, runs the program through the transform::Msl sanitizer
+ /// and returns a GeneratorImpl from the sanitized program.
+ /// @param options The MSL generator options.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @return the built generator
+ GeneratorImpl& SanitizeAndBuild(const Options& options = {}) {
+ if (gen_) {
+ return *gen_;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
- auto result = Sanitize(program.get(), options);
- [&]() {
- ASSERT_TRUE(result.program.IsValid())
- << diag::Formatter().format(result.program.Diagnostics());
- }();
- *program = std::move(result.program);
- gen_ = std::make_unique<GeneratorImpl>(program.get());
- return *gen_;
- }
+ auto result = Sanitize(program.get(), options);
+ [&]() {
+ ASSERT_TRUE(result.program.IsValid())
+ << diag::Formatter().format(result.program.Diagnostics());
+ }();
+ *program = std::move(result.program);
+ gen_ = std::make_unique<GeneratorImpl>(program.get());
+ return *gen_;
+ }
- /// The program built with a call to Build()
- std::unique_ptr<Program> program;
+ /// The program built with a call to Build()
+ std::unique_ptr<Program> program;
- private:
- std::unique_ptr<GeneratorImpl> gen_;
+ private:
+ std::unique_ptr<GeneratorImpl> gen_;
};
using TestHelper = TestHelperBase<testing::Test>;
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.cc b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.cc
index aa32f38e0e8..69ac35346e9 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.cc
@@ -15,6 +15,7 @@
#include "src/tint/writer/spirv/binary_writer.h"
#include <cstring>
+#include <string>
namespace tint::writer::spirv {
namespace {
@@ -28,46 +29,47 @@ BinaryWriter::BinaryWriter() = default;
BinaryWriter::~BinaryWriter() = default;
void BinaryWriter::WriteBuilder(Builder* builder) {
- out_.reserve(builder->total_size());
- builder->iterate(
- [this](const Instruction& inst) { this->process_instruction(inst); });
+ out_.reserve(builder->total_size());
+ builder->iterate([this](const Instruction& inst) { this->process_instruction(inst); });
}
void BinaryWriter::WriteInstruction(const Instruction& inst) {
- process_instruction(inst);
+ process_instruction(inst);
}
void BinaryWriter::WriteHeader(uint32_t bound) {
- out_.push_back(spv::MagicNumber);
- out_.push_back(0x00010300); // Version 1.3
- out_.push_back(kGeneratorId);
- out_.push_back(bound);
- out_.push_back(0);
+ out_.push_back(spv::MagicNumber);
+ out_.push_back(0x00010300); // Version 1.3
+ out_.push_back(kGeneratorId);
+ out_.push_back(bound);
+ out_.push_back(0);
}
void BinaryWriter::process_instruction(const Instruction& inst) {
- out_.push_back(inst.word_length() << 16 |
- static_cast<uint32_t>(inst.opcode()));
- for (const auto& op : inst.operands()) {
- process_op(op);
- }
+ out_.push_back(inst.word_length() << 16 | static_cast<uint32_t>(inst.opcode()));
+ for (const auto& op : inst.operands()) {
+ process_op(op);
+ }
}
void BinaryWriter::process_op(const Operand& op) {
- if (op.IsFloat()) {
- // Allocate space for the float
- out_.push_back(0);
- auto f = op.to_f();
- uint8_t* ptr = reinterpret_cast<uint8_t*>(out_.data() + (out_.size() - 1));
- memcpy(ptr, &f, 4);
- } else if (op.IsInt()) {
- out_.push_back(op.to_i());
- } else {
- auto idx = out_.size();
- const auto& str = op.to_s();
- out_.resize(out_.size() + op.length(), 0);
- memcpy(out_.data() + idx, str.c_str(), str.size() + 1);
- }
+ if (auto* i = std::get_if<uint32_t>(&op)) {
+ out_.push_back(*i);
+ return;
+ }
+ if (auto* f = std::get_if<float>(&op)) {
+ // Allocate space for the float
+ out_.push_back(0);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(out_.data() + (out_.size() - 1));
+ memcpy(ptr, f, 4);
+ return;
+ }
+ if (auto* str = std::get_if<std::string>(&op)) {
+ auto idx = out_.size();
+ out_.resize(out_.size() + OperandLength(op), 0);
+ memcpy(out_.data() + idx, str->c_str(), str->size() + 1);
+ return;
+ }
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.h b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.h
index a071d7cabd7..e1e7f683988 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer.h
@@ -23,36 +23,36 @@ namespace tint::writer::spirv {
/// Writer to convert from builder to SPIR-V binary
class BinaryWriter {
- public:
- /// Constructor
- BinaryWriter();
- ~BinaryWriter();
+ public:
+ /// Constructor
+ BinaryWriter();
+ ~BinaryWriter();
- /// Writes the SPIR-V header.
- /// @param bound the bound to output
- void WriteHeader(uint32_t bound);
+ /// Writes the SPIR-V header.
+ /// @param bound the bound to output
+ void WriteHeader(uint32_t bound);
- /// Writes the given builder data into a binary. Note, this does not emit
- /// the SPIR-V header. You **must** call WriteHeader() before WriteBuilder()
- /// if you want the SPIR-V to be emitted.
- /// @param builder the builder to assemble from
- void WriteBuilder(Builder* builder);
+ /// Writes the given builder data into a binary. Note, this does not emit
+ /// the SPIR-V header. You **must** call WriteHeader() before WriteBuilder()
+ /// if you want the SPIR-V to be emitted.
+ /// @param builder the builder to assemble from
+ void WriteBuilder(Builder* builder);
- /// Writes the given instruction into the binary.
- /// @param inst the instruction to assemble
- void WriteInstruction(const Instruction& inst);
+ /// Writes the given instruction into the binary.
+ /// @param inst the instruction to assemble
+ void WriteInstruction(const Instruction& inst);
- /// @returns the assembled SPIR-V
- const std::vector<uint32_t>& result() const { return out_; }
+ /// @returns the assembled SPIR-V
+ const std::vector<uint32_t>& result() const { return out_; }
- /// @returns the assembled SPIR-V
- std::vector<uint32_t>& result() { return out_; }
+ /// @returns the assembled SPIR-V
+ std::vector<uint32_t>& result() { return out_; }
- private:
- void process_instruction(const Instruction& inst);
- void process_op(const Operand& op);
+ private:
+ void process_instruction(const Instruction& inst);
+ void process_op(const Operand& op);
- std::vector<uint32_t> out_;
+ std::vector<uint32_t> out_;
};
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer_test.cc
index 5d6b8b8f55c..11812cfd288 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/binary_writer_test.cc
@@ -20,106 +20,106 @@ namespace {
using BinaryWriterTest = TestHelper;
TEST_F(BinaryWriterTest, Preamble) {
- BinaryWriter bw;
- bw.WriteHeader(5);
-
- auto res = bw.result();
- ASSERT_EQ(res.size(), 5u);
- EXPECT_EQ(res[0], spv::MagicNumber);
- EXPECT_EQ(res[1], 0x00010300u); // SPIR-V 1.3
- EXPECT_EQ(res[2], 23u << 16); // Generator ID
- EXPECT_EQ(res[3], 5u); // ID Bound
- EXPECT_EQ(res[4], 0u); // Reserved
+ BinaryWriter bw;
+ bw.WriteHeader(5);
+
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 5u);
+ EXPECT_EQ(res[0], spv::MagicNumber);
+ EXPECT_EQ(res[1], 0x00010300u); // SPIR-V 1.3
+ EXPECT_EQ(res[2], 23u << 16); // Generator ID
+ EXPECT_EQ(res[3], 5u); // ID Bound
+ EXPECT_EQ(res[4], 0u); // Reserved
}
TEST_F(BinaryWriterTest, Float) {
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_annot(spv::Op::OpKill, {Operand::Float(2.4f)});
- BinaryWriter bw;
- bw.WriteBuilder(&b);
+ b.push_annot(spv::Op::OpKill, {Operand(2.4f)});
+ BinaryWriter bw;
+ bw.WriteBuilder(&b);
- auto res = bw.result();
- ASSERT_EQ(res.size(), 2u);
- float f;
- memcpy(&f, res.data() + 1, 4);
- EXPECT_EQ(f, 2.4f);
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 2u);
+ float f;
+ memcpy(&f, res.data() + 1, 4);
+ EXPECT_EQ(f, 2.4f);
}
TEST_F(BinaryWriterTest, Int) {
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_annot(spv::Op::OpKill, {Operand::Int(2)});
- BinaryWriter bw;
- bw.WriteBuilder(&b);
+ b.push_annot(spv::Op::OpKill, {Operand(2u)});
+ BinaryWriter bw;
+ bw.WriteBuilder(&b);
- auto res = bw.result();
- ASSERT_EQ(res.size(), 2u);
- EXPECT_EQ(res[1], 2u);
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 2u);
+ EXPECT_EQ(res[1], 2u);
}
TEST_F(BinaryWriterTest, String) {
- spirv::Builder& b = Build();
-
- b.push_annot(spv::Op::OpKill, {Operand::String("my_string")});
- BinaryWriter bw;
- bw.WriteBuilder(&b);
-
- auto res = bw.result();
- ASSERT_EQ(res.size(), 4u);
-
- uint8_t* v = reinterpret_cast<uint8_t*>(res.data() + 1);
- EXPECT_EQ(v[0], 'm');
- EXPECT_EQ(v[1], 'y');
- EXPECT_EQ(v[2], '_');
- EXPECT_EQ(v[3], 's');
- EXPECT_EQ(v[4], 't');
- EXPECT_EQ(v[5], 'r');
- EXPECT_EQ(v[6], 'i');
- EXPECT_EQ(v[7], 'n');
- EXPECT_EQ(v[8], 'g');
- EXPECT_EQ(v[9], '\0');
- EXPECT_EQ(v[10], '\0');
- EXPECT_EQ(v[11], '\0');
+ spirv::Builder& b = Build();
+
+ b.push_annot(spv::Op::OpKill, {Operand("my_string")});
+ BinaryWriter bw;
+ bw.WriteBuilder(&b);
+
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 4u);
+
+ uint8_t* v = reinterpret_cast<uint8_t*>(res.data() + 1);
+ EXPECT_EQ(v[0], 'm');
+ EXPECT_EQ(v[1], 'y');
+ EXPECT_EQ(v[2], '_');
+ EXPECT_EQ(v[3], 's');
+ EXPECT_EQ(v[4], 't');
+ EXPECT_EQ(v[5], 'r');
+ EXPECT_EQ(v[6], 'i');
+ EXPECT_EQ(v[7], 'n');
+ EXPECT_EQ(v[8], 'g');
+ EXPECT_EQ(v[9], '\0');
+ EXPECT_EQ(v[10], '\0');
+ EXPECT_EQ(v[11], '\0');
}
TEST_F(BinaryWriterTest, String_Multiple4Length) {
- spirv::Builder& b = Build();
-
- b.push_annot(spv::Op::OpKill, {Operand::String("mystring")});
- BinaryWriter bw;
- bw.WriteBuilder(&b);
-
- auto res = bw.result();
- ASSERT_EQ(res.size(), 4u);
-
- uint8_t* v = reinterpret_cast<uint8_t*>(res.data() + 1);
- EXPECT_EQ(v[0], 'm');
- EXPECT_EQ(v[1], 'y');
- EXPECT_EQ(v[2], 's');
- EXPECT_EQ(v[3], 't');
- EXPECT_EQ(v[4], 'r');
- EXPECT_EQ(v[5], 'i');
- EXPECT_EQ(v[6], 'n');
- EXPECT_EQ(v[7], 'g');
- EXPECT_EQ(v[8], '\0');
- EXPECT_EQ(v[9], '\0');
- EXPECT_EQ(v[10], '\0');
- EXPECT_EQ(v[11], '\0');
+ spirv::Builder& b = Build();
+
+ b.push_annot(spv::Op::OpKill, {Operand("mystring")});
+ BinaryWriter bw;
+ bw.WriteBuilder(&b);
+
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 4u);
+
+ uint8_t* v = reinterpret_cast<uint8_t*>(res.data() + 1);
+ EXPECT_EQ(v[0], 'm');
+ EXPECT_EQ(v[1], 'y');
+ EXPECT_EQ(v[2], 's');
+ EXPECT_EQ(v[3], 't');
+ EXPECT_EQ(v[4], 'r');
+ EXPECT_EQ(v[5], 'i');
+ EXPECT_EQ(v[6], 'n');
+ EXPECT_EQ(v[7], 'g');
+ EXPECT_EQ(v[8], '\0');
+ EXPECT_EQ(v[9], '\0');
+ EXPECT_EQ(v[10], '\0');
+ EXPECT_EQ(v[11], '\0');
}
TEST_F(BinaryWriterTest, TestInstructionWriter) {
- Instruction i1{spv::Op::OpKill, {Operand::Int(2)}};
- Instruction i2{spv::Op::OpKill, {Operand::Int(4)}};
+ Instruction i1{spv::Op::OpKill, {Operand(2u)}};
+ Instruction i2{spv::Op::OpKill, {Operand(4u)}};
- BinaryWriter bw;
- bw.WriteInstruction(i1);
- bw.WriteInstruction(i2);
+ BinaryWriter bw;
+ bw.WriteInstruction(i1);
+ bw.WriteInstruction(i2);
- auto res = bw.result();
- ASSERT_EQ(res.size(), 4u);
- EXPECT_EQ(res[1], 2u);
- EXPECT_EQ(res[3], 4u);
+ auto res = bw.result();
+ ASSERT_EQ(res.size(), 4u);
+ EXPECT_EQ(res[1], 2u);
+ EXPECT_EQ(res[3], 4u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder.cc
index c03334489da..18f983af354 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder.cc
@@ -24,23 +24,25 @@
#include "src/tint/ast/internal_attribute.h"
#include "src/tint/ast/traverse_expressions.h"
#include "src/tint/sem/array.h"
-#include "src/tint/sem/atomic_type.h"
+#include "src/tint/sem/atomic.h"
#include "src/tint/sem/builtin.h"
#include "src/tint/sem/call.h"
-#include "src/tint/sem/depth_multisampled_texture_type.h"
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/constant.h"
+#include "src/tint/sem/depth_multisampled_texture.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/sem/function.h"
+#include "src/tint/sem/materialize.h"
#include "src/tint/sem/member_accessor_expression.h"
#include "src/tint/sem/module.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/reference_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/reference.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/sem/statement.h"
#include "src/tint/sem/struct.h"
#include "src/tint/sem/type_constructor.h"
#include "src/tint/sem/type_conversion.h"
#include "src/tint/sem/variable.h"
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/vector.h"
#include "src/tint/transform/add_spirv_block_attribute.h"
#include "src/tint/utils/defer.h"
#include "src/tint/utils/map.h"
@@ -54,35 +56,36 @@ using BuiltinType = sem::BuiltinType;
const char kGLSLstd450[] = "GLSL.std.450";
uint32_t size_of(const InstructionList& instructions) {
- uint32_t size = 0;
- for (const auto& inst : instructions)
- size += inst.word_length();
+ uint32_t size = 0;
+ for (const auto& inst : instructions) {
+ size += inst.word_length();
+ }
- return size;
+ return size;
}
uint32_t pipeline_stage_to_execution_model(ast::PipelineStage stage) {
- SpvExecutionModel model = SpvExecutionModelVertex;
-
- switch (stage) {
- case ast::PipelineStage::kFragment:
- model = SpvExecutionModelFragment;
- break;
- case ast::PipelineStage::kVertex:
- model = SpvExecutionModelVertex;
- break;
- case ast::PipelineStage::kCompute:
- model = SpvExecutionModelGLCompute;
- break;
- case ast::PipelineStage::kNone:
- model = SpvExecutionModelMax;
- break;
- }
- return model;
+ SpvExecutionModel model = SpvExecutionModelVertex;
+
+ switch (stage) {
+ case ast::PipelineStage::kFragment:
+ model = SpvExecutionModelFragment;
+ break;
+ case ast::PipelineStage::kVertex:
+ model = SpvExecutionModelVertex;
+ break;
+ case ast::PipelineStage::kCompute:
+ model = SpvExecutionModelGLCompute;
+ break;
+ case ast::PipelineStage::kNone:
+ model = SpvExecutionModelMax;
+ break;
+ }
+ return model;
}
bool LastIsFallthrough(const ast::BlockStatement* stmts) {
- return !stmts->Empty() && stmts->Last()->Is<ast::FallthroughStatement>();
+ return !stmts->Empty() && stmts->Last()->Is<ast::FallthroughStatement>();
}
/// Returns the matrix type that is `type` or that is wrapped by
@@ -90,151 +93,151 @@ bool LastIsFallthrough(const ast::BlockStatement* stmts) {
/// @param type the given type, which must not be null
/// @returns the nested matrix type, or nullptr if none
const sem::Matrix* GetNestedMatrixType(const sem::Type* type) {
- while (auto* arr = type->As<sem::Array>()) {
- type = arr->ElemType();
- }
- return type->As<sem::Matrix>();
+ while (auto* arr = type->As<sem::Array>()) {
+ type = arr->ElemType();
+ }
+ return type->As<sem::Matrix>();
}
uint32_t builtin_to_glsl_method(const sem::Builtin* builtin) {
- switch (builtin->Type()) {
- case BuiltinType::kAcos:
- return GLSLstd450Acos;
- case BuiltinType::kAsin:
- return GLSLstd450Asin;
- case BuiltinType::kAtan:
- return GLSLstd450Atan;
- case BuiltinType::kAtan2:
- return GLSLstd450Atan2;
- case BuiltinType::kCeil:
- return GLSLstd450Ceil;
- case BuiltinType::kClamp:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- return GLSLstd450NClamp;
- } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
- return GLSLstd450UClamp;
- } else {
- return GLSLstd450SClamp;
- }
- case BuiltinType::kCos:
- return GLSLstd450Cos;
- case BuiltinType::kCosh:
- return GLSLstd450Cosh;
- case BuiltinType::kCross:
- return GLSLstd450Cross;
- case BuiltinType::kDegrees:
- return GLSLstd450Degrees;
- case BuiltinType::kDeterminant:
- return GLSLstd450Determinant;
- case BuiltinType::kDistance:
- return GLSLstd450Distance;
- case BuiltinType::kExp:
- return GLSLstd450Exp;
- case BuiltinType::kExp2:
- return GLSLstd450Exp2;
- case BuiltinType::kFaceForward:
- return GLSLstd450FaceForward;
- case BuiltinType::kFloor:
- return GLSLstd450Floor;
- case BuiltinType::kFma:
- return GLSLstd450Fma;
- case BuiltinType::kFract:
- return GLSLstd450Fract;
- case BuiltinType::kFrexp:
- return GLSLstd450FrexpStruct;
- case BuiltinType::kInverseSqrt:
- return GLSLstd450InverseSqrt;
- case BuiltinType::kLdexp:
- return GLSLstd450Ldexp;
- case BuiltinType::kLength:
- return GLSLstd450Length;
- case BuiltinType::kLog:
- return GLSLstd450Log;
- case BuiltinType::kLog2:
- return GLSLstd450Log2;
- case BuiltinType::kMax:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- return GLSLstd450NMax;
- } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
- return GLSLstd450UMax;
- } else {
- return GLSLstd450SMax;
- }
- case BuiltinType::kMin:
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- return GLSLstd450NMin;
- } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
- return GLSLstd450UMin;
- } else {
- return GLSLstd450SMin;
- }
- case BuiltinType::kMix:
- return GLSLstd450FMix;
- case BuiltinType::kModf:
- return GLSLstd450ModfStruct;
- case BuiltinType::kNormalize:
- return GLSLstd450Normalize;
- case BuiltinType::kPack4x8snorm:
- return GLSLstd450PackSnorm4x8;
- case BuiltinType::kPack4x8unorm:
- return GLSLstd450PackUnorm4x8;
- case BuiltinType::kPack2x16snorm:
- return GLSLstd450PackSnorm2x16;
- case BuiltinType::kPack2x16unorm:
- return GLSLstd450PackUnorm2x16;
- case BuiltinType::kPack2x16float:
- return GLSLstd450PackHalf2x16;
- case BuiltinType::kPow:
- return GLSLstd450Pow;
- case BuiltinType::kRadians:
- return GLSLstd450Radians;
- case BuiltinType::kReflect:
- return GLSLstd450Reflect;
- case BuiltinType::kRefract:
- return GLSLstd450Refract;
- case BuiltinType::kRound:
- return GLSLstd450RoundEven;
- case BuiltinType::kSign:
- return GLSLstd450FSign;
- case BuiltinType::kSin:
- return GLSLstd450Sin;
- case BuiltinType::kSinh:
- return GLSLstd450Sinh;
- case BuiltinType::kSmoothstep:
- case BuiltinType::kSmoothStep:
- return GLSLstd450SmoothStep;
- case BuiltinType::kSqrt:
- return GLSLstd450Sqrt;
- case BuiltinType::kStep:
- return GLSLstd450Step;
- case BuiltinType::kTan:
- return GLSLstd450Tan;
- case BuiltinType::kTanh:
- return GLSLstd450Tanh;
- case BuiltinType::kTrunc:
- return GLSLstd450Trunc;
- case BuiltinType::kUnpack4x8snorm:
- return GLSLstd450UnpackSnorm4x8;
- case BuiltinType::kUnpack4x8unorm:
- return GLSLstd450UnpackUnorm4x8;
- case BuiltinType::kUnpack2x16snorm:
- return GLSLstd450UnpackSnorm2x16;
- case BuiltinType::kUnpack2x16unorm:
- return GLSLstd450UnpackUnorm2x16;
- case BuiltinType::kUnpack2x16float:
- return GLSLstd450UnpackHalf2x16;
- default:
- break;
- }
- return 0;
+ switch (builtin->Type()) {
+ case BuiltinType::kAcos:
+ return GLSLstd450Acos;
+ case BuiltinType::kAsin:
+ return GLSLstd450Asin;
+ case BuiltinType::kAtan:
+ return GLSLstd450Atan;
+ case BuiltinType::kAtan2:
+ return GLSLstd450Atan2;
+ case BuiltinType::kCeil:
+ return GLSLstd450Ceil;
+ case BuiltinType::kClamp:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ return GLSLstd450NClamp;
+ } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
+ return GLSLstd450UClamp;
+ } else {
+ return GLSLstd450SClamp;
+ }
+ case BuiltinType::kCos:
+ return GLSLstd450Cos;
+ case BuiltinType::kCosh:
+ return GLSLstd450Cosh;
+ case BuiltinType::kCross:
+ return GLSLstd450Cross;
+ case BuiltinType::kDegrees:
+ return GLSLstd450Degrees;
+ case BuiltinType::kDeterminant:
+ return GLSLstd450Determinant;
+ case BuiltinType::kDistance:
+ return GLSLstd450Distance;
+ case BuiltinType::kExp:
+ return GLSLstd450Exp;
+ case BuiltinType::kExp2:
+ return GLSLstd450Exp2;
+ case BuiltinType::kFaceForward:
+ return GLSLstd450FaceForward;
+ case BuiltinType::kFloor:
+ return GLSLstd450Floor;
+ case BuiltinType::kFma:
+ return GLSLstd450Fma;
+ case BuiltinType::kFract:
+ return GLSLstd450Fract;
+ case BuiltinType::kFrexp:
+ return GLSLstd450FrexpStruct;
+ case BuiltinType::kInverseSqrt:
+ return GLSLstd450InverseSqrt;
+ case BuiltinType::kLdexp:
+ return GLSLstd450Ldexp;
+ case BuiltinType::kLength:
+ return GLSLstd450Length;
+ case BuiltinType::kLog:
+ return GLSLstd450Log;
+ case BuiltinType::kLog2:
+ return GLSLstd450Log2;
+ case BuiltinType::kMax:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ return GLSLstd450NMax;
+ } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
+ return GLSLstd450UMax;
+ } else {
+ return GLSLstd450SMax;
+ }
+ case BuiltinType::kMin:
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ return GLSLstd450NMin;
+ } else if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
+ return GLSLstd450UMin;
+ } else {
+ return GLSLstd450SMin;
+ }
+ case BuiltinType::kMix:
+ return GLSLstd450FMix;
+ case BuiltinType::kModf:
+ return GLSLstd450ModfStruct;
+ case BuiltinType::kNormalize:
+ return GLSLstd450Normalize;
+ case BuiltinType::kPack4x8snorm:
+ return GLSLstd450PackSnorm4x8;
+ case BuiltinType::kPack4x8unorm:
+ return GLSLstd450PackUnorm4x8;
+ case BuiltinType::kPack2x16snorm:
+ return GLSLstd450PackSnorm2x16;
+ case BuiltinType::kPack2x16unorm:
+ return GLSLstd450PackUnorm2x16;
+ case BuiltinType::kPack2x16float:
+ return GLSLstd450PackHalf2x16;
+ case BuiltinType::kPow:
+ return GLSLstd450Pow;
+ case BuiltinType::kRadians:
+ return GLSLstd450Radians;
+ case BuiltinType::kReflect:
+ return GLSLstd450Reflect;
+ case BuiltinType::kRefract:
+ return GLSLstd450Refract;
+ case BuiltinType::kRound:
+ return GLSLstd450RoundEven;
+ case BuiltinType::kSign:
+ return GLSLstd450FSign;
+ case BuiltinType::kSin:
+ return GLSLstd450Sin;
+ case BuiltinType::kSinh:
+ return GLSLstd450Sinh;
+ case BuiltinType::kSmoothstep:
+ case BuiltinType::kSmoothStep:
+ return GLSLstd450SmoothStep;
+ case BuiltinType::kSqrt:
+ return GLSLstd450Sqrt;
+ case BuiltinType::kStep:
+ return GLSLstd450Step;
+ case BuiltinType::kTan:
+ return GLSLstd450Tan;
+ case BuiltinType::kTanh:
+ return GLSLstd450Tanh;
+ case BuiltinType::kTrunc:
+ return GLSLstd450Trunc;
+ case BuiltinType::kUnpack4x8snorm:
+ return GLSLstd450UnpackSnorm4x8;
+ case BuiltinType::kUnpack4x8unorm:
+ return GLSLstd450UnpackUnorm4x8;
+ case BuiltinType::kUnpack2x16snorm:
+ return GLSLstd450UnpackSnorm2x16;
+ case BuiltinType::kUnpack2x16unorm:
+ return GLSLstd450UnpackUnorm2x16;
+ case BuiltinType::kUnpack2x16float:
+ return GLSLstd450UnpackHalf2x16;
+ default:
+ break;
+ }
+ return 0;
}
/// @return the vector element type if ty is a vector, otherwise return ty.
const sem::Type* ElementTypeOf(const sem::Type* ty) {
- if (auto* v = ty->As<sem::Vector>()) {
- return v->type();
- }
- return ty;
+ if (auto* v = ty->As<sem::Vector>()) {
+ return v->type();
+ }
+ return ty;
}
} // namespace
@@ -245,2939 +248,3012 @@ Builder::AccessorInfo::~AccessorInfo() {}
Builder::Builder(const Program* program, bool zero_initialize_workgroup_memory)
: builder_(ProgramBuilder::Wrap(program)),
- scope_stack_({}),
+ scope_stack_{Scope{}},
zero_initialize_workgroup_memory_(zero_initialize_workgroup_memory) {}
Builder::~Builder() = default;
bool Builder::Build() {
- push_capability(SpvCapabilityShader);
+ push_capability(SpvCapabilityShader);
- push_memory_model(spv::Op::OpMemoryModel,
- {Operand::Int(SpvAddressingModelLogical),
- Operand::Int(SpvMemoryModelGLSL450)});
+ push_memory_model(spv::Op::OpMemoryModel,
+ {U32Operand(SpvAddressingModelLogical), U32Operand(SpvMemoryModelGLSL450)});
- for (auto* var : builder_.AST().GlobalVariables()) {
- if (!GenerateGlobalVariable(var)) {
- return false;
+ for (auto ext : builder_.Sem().Module()->Extensions()) {
+ GenerateExtension(ext);
}
- }
- auto* mod = builder_.Sem().Module();
- for (auto* decl : mod->DependencyOrderedDeclarations()) {
- if (auto* func = decl->As<ast::Function>()) {
- if (!GenerateFunction(func)) {
- return false;
- }
+ for (auto* var : builder_.AST().GlobalVariables()) {
+ if (!GenerateGlobalVariable(var)) {
+ return false;
+ }
+ }
+
+ auto* mod = builder_.Sem().Module();
+ for (auto* decl : mod->DependencyOrderedDeclarations()) {
+ if (auto* func = decl->As<ast::Function>()) {
+ if (!GenerateFunction(func)) {
+ return false;
+ }
+ }
}
- }
- return true;
+ return true;
+}
+
+void Builder::RegisterVariable(const sem::Variable* var, uint32_t id) {
+ var_to_id_.emplace(var, id);
+ id_to_var_.emplace(id, var);
+}
+
+uint32_t Builder::LookupVariableID(const sem::Variable* var) {
+ auto it = var_to_id_.find(var);
+ if (it == var_to_id_.end()) {
+ error_ = "unable to find ID for variable: " +
+ builder_.Symbols().NameFor(var->Declaration()->symbol);
+ return 0;
+ }
+ return it->second;
+}
+
+void Builder::PushScope() {
+ // Push a new scope, by copying the top-most stack
+ scope_stack_.push_back(scope_stack_.back());
+}
+
+void Builder::PopScope() {
+ scope_stack_.pop_back();
}
Operand Builder::result_op() {
- return Operand::Int(next_id());
+ return Operand(next_id());
}
uint32_t Builder::total_size() const {
- // The 5 covers the magic, version, generator, id bound and reserved.
- uint32_t size = 5;
-
- size += size_of(capabilities_);
- size += size_of(extensions_);
- size += size_of(ext_imports_);
- size += size_of(memory_model_);
- size += size_of(entry_points_);
- size += size_of(execution_modes_);
- size += size_of(debug_);
- size += size_of(annotations_);
- size += size_of(types_);
- for (const auto& func : functions_) {
- size += func.word_length();
- }
-
- return size;
+ // The 5 covers the magic, version, generator, id bound and reserved.
+ uint32_t size = 5;
+
+ size += size_of(capabilities_);
+ size += size_of(extensions_);
+ size += size_of(ext_imports_);
+ size += size_of(memory_model_);
+ size += size_of(entry_points_);
+ size += size_of(execution_modes_);
+ size += size_of(debug_);
+ size += size_of(annotations_);
+ size += size_of(types_);
+ for (const auto& func : functions_) {
+ size += func.word_length();
+ }
+
+ return size;
}
void Builder::iterate(std::function<void(const Instruction&)> cb) const {
- for (const auto& inst : capabilities_) {
- cb(inst);
- }
- for (const auto& inst : extensions_) {
- cb(inst);
- }
- for (const auto& inst : ext_imports_) {
- cb(inst);
- }
- for (const auto& inst : memory_model_) {
- cb(inst);
- }
- for (const auto& inst : entry_points_) {
- cb(inst);
- }
- for (const auto& inst : execution_modes_) {
- cb(inst);
- }
- for (const auto& inst : debug_) {
- cb(inst);
- }
- for (const auto& inst : annotations_) {
- cb(inst);
- }
- for (const auto& inst : types_) {
- cb(inst);
- }
- for (const auto& func : functions_) {
- func.iterate(cb);
- }
+ for (const auto& inst : capabilities_) {
+ cb(inst);
+ }
+ for (const auto& inst : extensions_) {
+ cb(inst);
+ }
+ for (const auto& inst : ext_imports_) {
+ cb(inst);
+ }
+ for (const auto& inst : memory_model_) {
+ cb(inst);
+ }
+ for (const auto& inst : entry_points_) {
+ cb(inst);
+ }
+ for (const auto& inst : execution_modes_) {
+ cb(inst);
+ }
+ for (const auto& inst : debug_) {
+ cb(inst);
+ }
+ for (const auto& inst : annotations_) {
+ cb(inst);
+ }
+ for (const auto& inst : types_) {
+ cb(inst);
+ }
+ for (const auto& func : functions_) {
+ func.iterate(cb);
+ }
}
void Builder::push_capability(uint32_t cap) {
- if (capability_set_.count(cap) == 0) {
- capability_set_.insert(cap);
- capabilities_.push_back(
- Instruction{spv::Op::OpCapability, {Operand::Int(cap)}});
- }
+ if (capability_set_.count(cap) == 0) {
+ capability_set_.insert(cap);
+ capabilities_.push_back(Instruction{spv::Op::OpCapability, {Operand(cap)}});
+ }
}
-bool Builder::GenerateLabel(uint32_t id) {
- if (!push_function_inst(spv::Op::OpLabel, {Operand::Int(id)})) {
- return false;
- }
- current_label_id_ = id;
- return true;
+void Builder::push_extension(const char* extension) {
+ extensions_.push_back(Instruction{spv::Op::OpExtension, {Operand(extension)}});
}
-bool Builder::GenerateAssignStatement(const ast::AssignmentStatement* assign) {
- if (assign->lhs->Is<ast::PhonyExpression>()) {
- auto rhs_id = GenerateExpression(assign->rhs);
- if (rhs_id == 0) {
- return false;
+bool Builder::GenerateExtension(ast::Extension extension) {
+ /*
+ For each supported extension, push corresponding capability into the builder.
+ For example:
+ if (kind == ast::Extension::Kind::kF16) {
+ push_capability(SpvCapabilityFloat16);
+ push_capability(SpvCapabilityUniformAndStorageBuffer16BitAccess);
+ push_capability(SpvCapabilityStorageBuffer16BitAccess);
+ push_capability(SpvCapabilityStorageInputOutput16);
+ }
+ */
+ switch (extension) {
+ case ast::Extension::kChromiumExperimentalDP4a:
+ push_extension("SPV_KHR_integer_dot_product");
+ push_capability(SpvCapabilityDotProductKHR);
+ push_capability(SpvCapabilityDotProductInput4x8BitPackedKHR);
+ break;
+ default:
+ return false;
}
+
return true;
- } else {
- auto lhs_id = GenerateExpression(assign->lhs);
- if (lhs_id == 0) {
- return false;
+}
+
+bool Builder::GenerateLabel(uint32_t id) {
+ if (!push_function_inst(spv::Op::OpLabel, {Operand(id)})) {
+ return false;
}
- auto rhs_id = GenerateExpressionWithLoadIfNeeded(assign->rhs);
- if (rhs_id == 0) {
- return false;
+ current_label_id_ = id;
+ return true;
+}
+
+bool Builder::GenerateAssignStatement(const ast::AssignmentStatement* assign) {
+ if (assign->lhs->Is<ast::PhonyExpression>()) {
+ if (builder_.Sem().Get(assign->rhs)->ConstantValue()) {
+ // RHS of phony assignment is constant.
+ // Constants can't have side-effects, so just drop this.
+ return true;
+ }
+ auto rhs_id = GenerateExpression(assign->rhs);
+ if (rhs_id == 0) {
+ return false;
+ }
+ return true;
+ } else {
+ auto lhs_id = GenerateExpression(assign->lhs);
+ if (lhs_id == 0) {
+ return false;
+ }
+ auto rhs_id = GenerateExpressionWithLoadIfNeeded(assign->rhs);
+ if (rhs_id == 0) {
+ return false;
+ }
+ return GenerateStore(lhs_id, rhs_id);
}
- return GenerateStore(lhs_id, rhs_id);
- }
}
bool Builder::GenerateBreakStatement(const ast::BreakStatement*) {
- if (merge_stack_.empty()) {
- error_ = "Attempted to break without a merge block";
- return false;
- }
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(merge_stack_.back())})) {
- return false;
- }
- return true;
+ if (merge_stack_.empty()) {
+ error_ = "Attempted to break without a merge block";
+ return false;
+ }
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_stack_.back())})) {
+ return false;
+ }
+ return true;
}
bool Builder::GenerateContinueStatement(const ast::ContinueStatement*) {
- if (continue_stack_.empty()) {
- error_ = "Attempted to continue without a continue block";
- return false;
- }
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(continue_stack_.back())})) {
- return false;
- }
- return true;
+ if (continue_stack_.empty()) {
+ error_ = "Attempted to continue without a continue block";
+ return false;
+ }
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(continue_stack_.back())})) {
+ return false;
+ }
+ return true;
}
// TODO(dsinclair): This is generating an OpKill but the semantics of kill
// haven't been defined for WGSL yet. So, this may need to change.
// https://github.com/gpuweb/gpuweb/issues/676
bool Builder::GenerateDiscardStatement(const ast::DiscardStatement*) {
- if (!push_function_inst(spv::Op::OpKill, {})) {
- return false;
- }
- return true;
+ if (!push_function_inst(spv::Op::OpKill, {})) {
+ return false;
+ }
+ return true;
}
bool Builder::GenerateEntryPoint(const ast::Function* func, uint32_t id) {
- auto stage = pipeline_stage_to_execution_model(func->PipelineStage());
- if (stage == SpvExecutionModelMax) {
- error_ = "Unknown pipeline stage provided";
- return false;
- }
+ auto stage = pipeline_stage_to_execution_model(func->PipelineStage());
+ if (stage == SpvExecutionModelMax) {
+ error_ = "Unknown pipeline stage provided";
+ return false;
+ }
- OperandList operands = {
- Operand::Int(stage), Operand::Int(id),
- Operand::String(builder_.Symbols().NameFor(func->symbol))};
+ OperandList operands = {Operand(stage), Operand(id),
+ Operand(builder_.Symbols().NameFor(func->symbol))};
- auto* func_sem = builder_.Sem().Get(func);
- for (const auto* var : func_sem->TransitivelyReferencedGlobals()) {
- // For SPIR-V 1.3 we only output Input/output variables. If we update to
- // SPIR-V 1.4 or later this should be all variables.
- if (var->StorageClass() != ast::StorageClass::kInput &&
- var->StorageClass() != ast::StorageClass::kOutput) {
- continue;
- }
+ auto* func_sem = builder_.Sem().Get(func);
+ for (const auto* var : func_sem->TransitivelyReferencedGlobals()) {
+ // For SPIR-V 1.3 we only output Input/output variables. If we update to
+ // SPIR-V 1.4 or later this should be all variables.
+ if (var->StorageClass() != ast::StorageClass::kInput &&
+ var->StorageClass() != ast::StorageClass::kOutput) {
+ continue;
+ }
- uint32_t var_id = scope_stack_.Get(var->Declaration()->symbol);
- if (var_id == 0) {
- error_ = "unable to find ID for global variable: " +
- builder_.Symbols().NameFor(var->Declaration()->symbol);
- return false;
- }
+ uint32_t var_id = LookupVariableID(var);
+ if (var_id == 0) {
+ error_ = "unable to find ID for global variable: " +
+ builder_.Symbols().NameFor(var->Declaration()->symbol);
+ return false;
+ }
- operands.push_back(Operand::Int(var_id));
- }
- push_entry_point(spv::Op::OpEntryPoint, operands);
+ operands.push_back(Operand(var_id));
+ }
+ push_entry_point(spv::Op::OpEntryPoint, operands);
- return true;
+ return true;
}
bool Builder::GenerateExecutionModes(const ast::Function* func, uint32_t id) {
- auto* func_sem = builder_.Sem().Get(func);
-
- // WGSL fragment shader origin is upper left
- if (func->PipelineStage() == ast::PipelineStage::kFragment) {
- push_execution_mode(
- spv::Op::OpExecutionMode,
- {Operand::Int(id), Operand::Int(SpvExecutionModeOriginUpperLeft)});
- } else if (func->PipelineStage() == ast::PipelineStage::kCompute) {
- auto& wgsize = func_sem->WorkgroupSize();
-
- // Check if the workgroup_size uses pipeline-overridable constants.
- if (wgsize[0].overridable_const || wgsize[1].overridable_const ||
- wgsize[2].overridable_const) {
- if (has_overridable_workgroup_size_) {
- // Only one stage can have a pipeline-overridable workgroup size.
- // TODO(crbug.com/tint/810): Use LocalSizeId to handle this scenario.
- TINT_ICE(Writer, builder_.Diagnostics())
- << "multiple stages using pipeline-overridable workgroup sizes";
- }
- has_overridable_workgroup_size_ = true;
+ auto* func_sem = builder_.Sem().Get(func);
+
+ // WGSL fragment shader origin is upper left
+ if (func->PipelineStage() == ast::PipelineStage::kFragment) {
+ push_execution_mode(spv::Op::OpExecutionMode,
+ {Operand(id), U32Operand(SpvExecutionModeOriginUpperLeft)});
+ } else if (func->PipelineStage() == ast::PipelineStage::kCompute) {
+ auto& wgsize = func_sem->WorkgroupSize();
+
+ // Check if the workgroup_size uses pipeline-overridable constants.
+ if (wgsize[0].overridable_const || wgsize[1].overridable_const ||
+ wgsize[2].overridable_const) {
+ if (has_overridable_workgroup_size_) {
+ // Only one stage can have a pipeline-overridable workgroup size.
+ // TODO(crbug.com/tint/810): Use LocalSizeId to handle this scenario.
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "multiple stages using pipeline-overridable workgroup sizes";
+ }
+ has_overridable_workgroup_size_ = true;
- auto* vec3_u32 =
- builder_.create<sem::Vector>(builder_.create<sem::U32>(), 3u);
- uint32_t vec3_u32_type_id = GenerateTypeIfNeeded(vec3_u32);
- if (vec3_u32_type_id == 0) {
- return 0;
- }
+ auto* vec3_u32 = builder_.create<sem::Vector>(builder_.create<sem::U32>(), 3u);
+ uint32_t vec3_u32_type_id = GenerateTypeIfNeeded(vec3_u32);
+ if (vec3_u32_type_id == 0) {
+ return 0;
+ }
- OperandList wgsize_ops;
- auto wgsize_result = result_op();
- wgsize_ops.push_back(Operand::Int(vec3_u32_type_id));
- wgsize_ops.push_back(wgsize_result);
-
- // Generate OpConstant instructions for each dimension.
- for (int i = 0; i < 3; i++) {
- auto constant = ScalarConstant::U32(wgsize[i].value);
- if (wgsize[i].overridable_const) {
- // Make the constant specializable.
- auto* sem_const = builder_.Sem().Get<sem::GlobalVariable>(
- wgsize[i].overridable_const);
- if (!sem_const->IsOverridable()) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "expected a pipeline-overridable constant";
- }
- constant.is_spec_op = true;
- constant.constant_id = sem_const->ConstantId();
+ OperandList wgsize_ops;
+ auto wgsize_result = result_op();
+ wgsize_ops.push_back(Operand(vec3_u32_type_id));
+ wgsize_ops.push_back(wgsize_result);
+
+ // Generate OpConstant instructions for each dimension.
+ for (int i = 0; i < 3; i++) {
+ auto constant = ScalarConstant::U32(wgsize[i].value);
+ if (wgsize[i].overridable_const) {
+ // Make the constant specializable.
+ auto* sem_const =
+ builder_.Sem().Get<sem::GlobalVariable>(wgsize[i].overridable_const);
+ if (!sem_const->IsOverridable()) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "expected a pipeline-overridable constant";
+ }
+ constant.is_spec_op = true;
+ constant.constant_id = sem_const->ConstantId();
+ }
+
+ auto result = GenerateConstantIfNeeded(constant);
+ wgsize_ops.push_back(Operand(result));
+ }
+
+ // Generate the WorkgroupSize builtin.
+ push_type(spv::Op::OpSpecConstantComposite, wgsize_ops);
+ push_annot(spv::Op::OpDecorate, {wgsize_result, U32Operand(SpvDecorationBuiltIn),
+ U32Operand(SpvBuiltInWorkgroupSize)});
+ } else {
+ // Not overridable, so just use OpExecutionMode LocalSize.
+ uint32_t x = wgsize[0].value;
+ uint32_t y = wgsize[1].value;
+ uint32_t z = wgsize[2].value;
+ push_execution_mode(spv::Op::OpExecutionMode,
+ {Operand(id), U32Operand(SpvExecutionModeLocalSize), Operand(x),
+ Operand(y), Operand(z)});
}
+ }
- auto result = GenerateConstantIfNeeded(constant);
- wgsize_ops.push_back(Operand::Int(result));
- }
+ for (auto builtin : func_sem->TransitivelyReferencedBuiltinVariables()) {
+ if (builtin.second->builtin == ast::Builtin::kFragDepth) {
+ push_execution_mode(spv::Op::OpExecutionMode,
+ {Operand(id), U32Operand(SpvExecutionModeDepthReplacing)});
+ }
+ }
- // Generate the WorkgroupSize builtin.
- push_type(spv::Op::OpSpecConstantComposite, wgsize_ops);
- push_annot(spv::Op::OpDecorate,
- {wgsize_result, Operand::Int(SpvDecorationBuiltIn),
- Operand::Int(SpvBuiltInWorkgroupSize)});
- } else {
- // Not overridable, so just use OpExecutionMode LocalSize.
- uint32_t x = wgsize[0].value;
- uint32_t y = wgsize[1].value;
- uint32_t z = wgsize[2].value;
- push_execution_mode(
- spv::Op::OpExecutionMode,
- {Operand::Int(id), Operand::Int(SpvExecutionModeLocalSize),
- Operand::Int(x), Operand::Int(y), Operand::Int(z)});
- }
- }
-
- for (auto builtin : func_sem->TransitivelyReferencedBuiltinVariables()) {
- if (builtin.second->builtin == ast::Builtin::kFragDepth) {
- push_execution_mode(
- spv::Op::OpExecutionMode,
- {Operand::Int(id), Operand::Int(SpvExecutionModeDepthReplacing)});
- }
- }
-
- return true;
+ return true;
}
uint32_t Builder::GenerateExpression(const ast::Expression* expr) {
- return Switch(
- expr,
- [&](const ast::IndexAccessorExpression* a) {
- return GenerateAccessorExpression(a);
- },
- [&](const ast::BinaryExpression* b) {
- return GenerateBinaryExpression(b);
- },
- [&](const ast::BitcastExpression* b) {
- return GenerateBitcastExpression(b);
- },
- [&](const ast::CallExpression* c) { return GenerateCallExpression(c); },
- [&](const ast::IdentifierExpression* i) {
- return GenerateIdentifierExpression(i);
- },
- [&](const ast::LiteralExpression* l) {
- return GenerateLiteralIfNeeded(nullptr, l);
- },
- [&](const ast::MemberAccessorExpression* m) {
- return GenerateAccessorExpression(m);
- },
- [&](const ast::UnaryOpExpression* u) {
- return GenerateUnaryOpExpression(u);
- },
- [&](Default) {
- error_ =
- "unknown expression type: " + std::string(expr->TypeInfo().name);
- return 0;
- });
+ if (auto* sem = builder_.Sem().Get(expr)) {
+ if (auto constant = sem->ConstantValue()) {
+ return GenerateConstantIfNeeded(constant);
+ }
+ }
+ return Switch(
+ expr, //
+ [&](const ast::IndexAccessorExpression* a) { return GenerateAccessorExpression(a); },
+ [&](const ast::BinaryExpression* b) { return GenerateBinaryExpression(b); },
+ [&](const ast::BitcastExpression* b) { return GenerateBitcastExpression(b); },
+ [&](const ast::CallExpression* c) { return GenerateCallExpression(c); },
+ [&](const ast::IdentifierExpression* i) { return GenerateIdentifierExpression(i); },
+ [&](const ast::LiteralExpression* l) { return GenerateLiteralIfNeeded(nullptr, l); },
+ [&](const ast::MemberAccessorExpression* m) { return GenerateAccessorExpression(m); },
+ [&](const ast::UnaryOpExpression* u) { return GenerateUnaryOpExpression(u); },
+ [&](Default) {
+ error_ = "unknown expression type: " + std::string(expr->TypeInfo().name);
+ return 0;
+ });
}
bool Builder::GenerateFunction(const ast::Function* func_ast) {
- auto* func = builder_.Sem().Get(func_ast);
+ auto* func = builder_.Sem().Get(func_ast);
- uint32_t func_type_id = GenerateFunctionTypeIfNeeded(func);
- if (func_type_id == 0) {
- return false;
- }
+ uint32_t func_type_id = GenerateFunctionTypeIfNeeded(func);
+ if (func_type_id == 0) {
+ return false;
+ }
- auto func_op = result_op();
- auto func_id = func_op.to_i();
+ auto func_op = result_op();
+ auto func_id = std::get<uint32_t>(func_op);
- push_debug(spv::Op::OpName,
- {Operand::Int(func_id),
- Operand::String(builder_.Symbols().NameFor(func_ast->symbol))});
+ push_debug(spv::Op::OpName,
+ {Operand(func_id), Operand(builder_.Symbols().NameFor(func_ast->symbol))});
- auto ret_id = GenerateTypeIfNeeded(func->ReturnType());
- if (ret_id == 0) {
- return false;
- }
+ auto ret_id = GenerateTypeIfNeeded(func->ReturnType());
+ if (ret_id == 0) {
+ return false;
+ }
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
+ PushScope();
+ TINT_DEFER(PopScope());
- auto definition_inst = Instruction{
- spv::Op::OpFunction,
- {Operand::Int(ret_id), func_op, Operand::Int(SpvFunctionControlMaskNone),
- Operand::Int(func_type_id)}};
+ auto definition_inst = Instruction{
+ spv::Op::OpFunction,
+ {Operand(ret_id), func_op, U32Operand(SpvFunctionControlMaskNone), Operand(func_type_id)}};
- InstructionList params;
- for (auto* param : func->Parameters()) {
- auto param_op = result_op();
- auto param_id = param_op.to_i();
+ InstructionList params;
+ for (auto* param : func->Parameters()) {
+ auto param_op = result_op();
+ auto param_id = std::get<uint32_t>(param_op);
- auto param_type_id = GenerateTypeIfNeeded(param->Type());
- if (param_type_id == 0) {
- return false;
- }
+ auto param_type_id = GenerateTypeIfNeeded(param->Type());
+ if (param_type_id == 0) {
+ return false;
+ }
- push_debug(spv::Op::OpName, {Operand::Int(param_id),
- Operand::String(builder_.Symbols().NameFor(
- param->Declaration()->symbol))});
- params.push_back(Instruction{spv::Op::OpFunctionParameter,
- {Operand::Int(param_type_id), param_op}});
+ push_debug(
+ spv::Op::OpName,
+ {Operand(param_id), Operand(builder_.Symbols().NameFor(param->Declaration()->symbol))});
+ params.push_back(
+ Instruction{spv::Op::OpFunctionParameter, {Operand(param_type_id), param_op}});
- scope_stack_.Set(param->Declaration()->symbol, param_id);
- }
+ RegisterVariable(param, param_id);
+ }
- push_function(Function{definition_inst, result_op(), std::move(params)});
+ push_function(Function{definition_inst, result_op(), std::move(params)});
- for (auto* stmt : func_ast->body->statements) {
- if (!GenerateStatement(stmt)) {
- return false;
+ for (auto* stmt : func_ast->body->statements) {
+ if (!GenerateStatement(stmt)) {
+ return false;
+ }
}
- }
- if (InsideBasicBlock()) {
- if (func->ReturnType()->Is<sem::Void>()) {
- push_function_inst(spv::Op::OpReturn, {});
- } else {
- auto zero = GenerateConstantNullIfNeeded(func->ReturnType());
- push_function_inst(spv::Op::OpReturnValue, {Operand::Int(zero)});
+ if (InsideBasicBlock()) {
+ if (func->ReturnType()->Is<sem::Void>()) {
+ push_function_inst(spv::Op::OpReturn, {});
+ } else {
+ auto zero = GenerateConstantNullIfNeeded(func->ReturnType());
+ push_function_inst(spv::Op::OpReturnValue, {Operand(zero)});
+ }
}
- }
- if (func_ast->IsEntryPoint()) {
- if (!GenerateEntryPoint(func_ast, func_id)) {
- return false;
- }
- if (!GenerateExecutionModes(func_ast, func_id)) {
- return false;
+ if (func_ast->IsEntryPoint()) {
+ if (!GenerateEntryPoint(func_ast, func_id)) {
+ return false;
+ }
+ if (!GenerateExecutionModes(func_ast, func_id)) {
+ return false;
+ }
}
- }
- func_symbol_to_id_[func_ast->symbol] = func_id;
+ func_symbol_to_id_[func_ast->symbol] = func_id;
- return true;
+ return true;
}
uint32_t Builder::GenerateFunctionTypeIfNeeded(const sem::Function* func) {
- return utils::GetOrCreate(
- func_sig_to_id_, func->Signature(), [&]() -> uint32_t {
+ return utils::GetOrCreate(func_sig_to_id_, func->Signature(), [&]() -> uint32_t {
auto func_op = result_op();
- auto func_type_id = func_op.to_i();
+ auto func_type_id = std::get<uint32_t>(func_op);
auto ret_id = GenerateTypeIfNeeded(func->ReturnType());
if (ret_id == 0) {
- return 0;
+ return 0;
}
- OperandList ops = {func_op, Operand::Int(ret_id)};
+ OperandList ops = {func_op, Operand(ret_id)};
for (auto* param : func->Parameters()) {
- auto param_type_id = GenerateTypeIfNeeded(param->Type());
- if (param_type_id == 0) {
- return 0;
- }
- ops.push_back(Operand::Int(param_type_id));
+ auto param_type_id = GenerateTypeIfNeeded(param->Type());
+ if (param_type_id == 0) {
+ return 0;
+ }
+ ops.push_back(Operand(param_type_id));
}
push_type(spv::Op::OpTypeFunction, std::move(ops));
return func_type_id;
- });
+ });
}
bool Builder::GenerateFunctionVariable(const ast::Variable* var) {
- uint32_t init_id = 0;
- if (var->constructor) {
- init_id = GenerateExpressionWithLoadIfNeeded(var->constructor);
- if (init_id == 0) {
- return false;
+ uint32_t init_id = 0;
+ if (var->constructor) {
+ init_id = GenerateExpressionWithLoadIfNeeded(var->constructor);
+ if (init_id == 0) {
+ return false;
+ }
}
- }
- if (var->is_const) {
- if (!var->constructor) {
- error_ = "missing constructor for constant";
- return false;
+ auto* sem = builder_.Sem().Get(var);
+
+ if (var->is_const) {
+ if (!var->constructor) {
+ error_ = "missing constructor for constant";
+ return false;
+ }
+ RegisterVariable(sem, init_id);
+ return true;
}
- scope_stack_.Set(var->symbol, init_id);
- spirv_id_to_variable_[init_id] = var;
- return true;
- }
-
- auto result = result_op();
- auto var_id = result.to_i();
- auto sc = ast::StorageClass::kFunction;
- auto* type = builder_.Sem().Get(var)->Type();
- auto type_id = GenerateTypeIfNeeded(type);
- if (type_id == 0) {
- return false;
- }
- push_debug(spv::Op::OpName,
- {Operand::Int(var_id),
- Operand::String(builder_.Symbols().NameFor(var->symbol))});
+ auto result = result_op();
+ auto var_id = std::get<uint32_t>(result);
+ auto sc = ast::StorageClass::kFunction;
+ auto* type = sem->Type();
+ auto type_id = GenerateTypeIfNeeded(type);
+ if (type_id == 0) {
+ return false;
+ }
- // TODO(dsinclair) We could detect if the constructor is fully const and emit
- // an initializer value for the variable instead of doing the OpLoad.
- auto null_id = GenerateConstantNullIfNeeded(type->UnwrapRef());
- if (null_id == 0) {
- return 0;
- }
- push_function_var({Operand::Int(type_id), result,
- Operand::Int(ConvertStorageClass(sc)),
- Operand::Int(null_id)});
+ push_debug(spv::Op::OpName,
+ {Operand(var_id), Operand(builder_.Symbols().NameFor(var->symbol))});
- if (var->constructor) {
- if (!GenerateStore(var_id, init_id)) {
- return false;
+ // TODO(dsinclair) We could detect if the constructor is fully const and emit
+ // an initializer value for the variable instead of doing the OpLoad.
+ auto null_id = GenerateConstantNullIfNeeded(type->UnwrapRef());
+ if (null_id == 0) {
+ return 0;
+ }
+ push_function_var(
+ {Operand(type_id), result, U32Operand(ConvertStorageClass(sc)), Operand(null_id)});
+
+ if (var->constructor) {
+ if (!GenerateStore(var_id, init_id)) {
+ return false;
+ }
}
- }
- scope_stack_.Set(var->symbol, var_id);
- spirv_id_to_variable_[var_id] = var;
+ RegisterVariable(sem, var_id);
- return true;
+ return true;
}
bool Builder::GenerateStore(uint32_t to, uint32_t from) {
- return push_function_inst(spv::Op::OpStore,
- {Operand::Int(to), Operand::Int(from)});
+ return push_function_inst(spv::Op::OpStore, {Operand(to), Operand(from)});
}
bool Builder::GenerateGlobalVariable(const ast::Variable* var) {
- auto* sem = builder_.Sem().Get(var);
- auto* type = sem->Type()->UnwrapRef();
-
- uint32_t init_id = 0;
- if (var->constructor) {
- init_id = GenerateConstructorExpression(var, var->constructor);
- if (init_id == 0) {
- return false;
- }
- }
-
- if (var->is_const) {
- if (!var->constructor) {
- // Constants must have an initializer unless they are overridable.
- if (!var->is_overridable) {
- error_ = "missing constructor for constant";
- return false;
- }
-
- // SPIR-V requires specialization constants to have initializers.
- if (type->Is<sem::F32>()) {
- ast::FloatLiteralExpression l(ProgramID(), Source{}, 0.0f);
- init_id = GenerateLiteralIfNeeded(var, &l);
- } else if (type->Is<sem::U32>()) {
- ast::UintLiteralExpression l(ProgramID(), Source{}, 0);
- init_id = GenerateLiteralIfNeeded(var, &l);
- } else if (type->Is<sem::I32>()) {
- ast::SintLiteralExpression l(ProgramID(), Source{}, 0);
- init_id = GenerateLiteralIfNeeded(var, &l);
- } else if (type->Is<sem::Bool>()) {
- ast::BoolLiteralExpression l(ProgramID(), Source{}, false);
- init_id = GenerateLiteralIfNeeded(var, &l);
- } else {
- error_ = "invalid type for pipeline constant ID, must be scalar";
- return false;
- }
- if (init_id == 0) {
- return 0;
- }
- }
- push_debug(spv::Op::OpName,
- {Operand::Int(init_id),
- Operand::String(builder_.Symbols().NameFor(var->symbol))});
-
- scope_stack_.Set(var->symbol, init_id);
- spirv_id_to_variable_[init_id] = var;
- return true;
- }
-
- auto result = result_op();
- auto var_id = result.to_i();
-
- auto sc = sem->StorageClass() == ast::StorageClass::kNone
- ? ast::StorageClass::kPrivate
- : sem->StorageClass();
-
- auto type_id = GenerateTypeIfNeeded(sem->Type());
- if (type_id == 0) {
- return false;
- }
-
- push_debug(spv::Op::OpName,
- {Operand::Int(var_id),
- Operand::String(builder_.Symbols().NameFor(var->symbol))});
-
- OperandList ops = {Operand::Int(type_id), result,
- Operand::Int(ConvertStorageClass(sc))};
-
- if (var->constructor) {
- ops.push_back(Operand::Int(init_id));
- } else {
- auto* st = type->As<sem::StorageTexture>();
- if (st || type->Is<sem::Struct>()) {
- // type is a sem::Struct or a sem::StorageTexture
- auto access = st ? st->access() : sem->Access();
- switch (access) {
- case ast::Access::kWrite:
- push_annot(
- spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationNonReadable)});
- break;
- case ast::Access::kRead:
- push_annot(
- spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationNonWritable)});
- break;
- case ast::Access::kUndefined:
- case ast::Access::kReadWrite:
- break;
- }
- }
- if (!type->Is<sem::Sampler>()) {
- // If we don't have a constructor and we're an Output or Private
- // variable, then WGSL requires that we zero-initialize.
- // If we're a Workgroup variable, and the
- // VK_KHR_zero_initialize_workgroup_memory extension is enabled, we should
- // also zero-initialize.
- if (sem->StorageClass() == ast::StorageClass::kPrivate ||
- sem->StorageClass() == ast::StorageClass::kOutput ||
- (zero_initialize_workgroup_memory_ &&
- sem->StorageClass() == ast::StorageClass::kWorkgroup)) {
- init_id = GenerateConstantNullIfNeeded(type);
+ auto* sem = builder_.Sem().Get(var);
+ auto* type = sem->Type()->UnwrapRef();
+
+ uint32_t init_id = 0;
+ if (var->constructor) {
+ if (!var->is_overridable) {
+ auto* ctor = builder_.Sem().Get(var->constructor);
+ if (auto constant = ctor->ConstantValue()) {
+ init_id = GenerateConstantIfNeeded(std::move(constant));
+ }
+ }
if (init_id == 0) {
- return 0;
+ init_id = GenerateConstructorExpression(var, var->constructor);
+ }
+ if (init_id == 0) {
+ return false;
}
- ops.push_back(Operand::Int(init_id));
- }
}
- }
-
- push_type(spv::Op::OpVariable, std::move(ops));
- for (auto* attr : var->attributes) {
- bool ok = Switch(
- attr,
- [&](const ast::BuiltinAttribute* builtin) {
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationBuiltIn),
- Operand::Int(ConvertBuiltin(builtin->builtin,
- sem->StorageClass()))});
- return true;
- },
- [&](const ast::LocationAttribute* location) {
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationLocation),
- Operand::Int(location->value)});
- return true;
- },
- [&](const ast::InterpolateAttribute* interpolate) {
- AddInterpolationDecorations(var_id, interpolate->type,
- interpolate->sampling);
- return true;
- },
- [&](const ast::InvariantAttribute*) {
- push_annot(
- spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationInvariant)});
- return true;
- },
- [&](const ast::BindingAttribute* binding) {
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationBinding),
- Operand::Int(binding->value)});
- return true;
- },
- [&](const ast::GroupAttribute* group) {
- push_annot(
- spv::Op::OpDecorate,
- {Operand::Int(var_id), Operand::Int(SpvDecorationDescriptorSet),
- Operand::Int(group->value)});
- return true;
- },
- [&](const ast::IdAttribute*) {
- return true; // Spec constants are handled elsewhere
- },
- [&](const ast::InternalAttribute*) {
- return true; // ignored
- },
- [&](Default) {
- error_ = "unknown attribute";
- return false;
- });
- if (!ok) {
- return false;
- }
- }
+ if (var->is_const) {
+ if (!var->constructor) {
+ // Constants must have an initializer unless they are overridable.
+ if (!var->is_overridable) {
+ error_ = "missing constructor for constant";
+ return false;
+ }
- scope_stack_.Set(var->symbol, var_id);
- spirv_id_to_variable_[var_id] = var;
- return true;
-}
+ // SPIR-V requires specialization constants to have initializers.
+ init_id = Switch(
+ type, //
+ [&](const sem::F32*) {
+ ast::FloatLiteralExpression l(ProgramID{}, Source{}, 0,
+ ast::FloatLiteralExpression::Suffix::kF);
+ return GenerateLiteralIfNeeded(var, &l);
+ },
+ [&](const sem::U32*) {
+ ast::IntLiteralExpression l(ProgramID{}, Source{}, 0,
+ ast::IntLiteralExpression::Suffix::kU);
+ return GenerateLiteralIfNeeded(var, &l);
+ },
+ [&](const sem::I32*) {
+ ast::IntLiteralExpression l(ProgramID{}, Source{}, 0,
+ ast::IntLiteralExpression::Suffix::kI);
+ return GenerateLiteralIfNeeded(var, &l);
+ },
+ [&](const sem::Bool*) {
+ ast::BoolLiteralExpression l(ProgramID{}, Source{}, false);
+ return GenerateLiteralIfNeeded(var, &l);
+ },
+ [&](Default) {
+ error_ = "invalid type for pipeline constant ID, must be scalar";
+ return 0;
+ });
+ if (init_id == 0) {
+ return 0;
+ }
+ }
+ push_debug(spv::Op::OpName,
+ {Operand(init_id), Operand(builder_.Symbols().NameFor(var->symbol))});
-bool Builder::GenerateIndexAccessor(const ast::IndexAccessorExpression* expr,
- AccessorInfo* info) {
- auto idx_id = GenerateExpressionWithLoadIfNeeded(expr->index);
- if (idx_id == 0) {
- return 0;
- }
-
- // If the source is a reference, we access chain into it.
- // In the future, pointers may support access-chaining.
- // See https://github.com/gpuweb/gpuweb/pull/1580
- if (info->source_type->Is<sem::Reference>()) {
- info->access_chain_indices.push_back(idx_id);
- info->source_type = TypeOf(expr);
- return true;
- }
+ RegisterVariable(sem, init_id);
+ return true;
+ }
- auto result_type_id = GenerateTypeIfNeeded(TypeOf(expr));
- if (result_type_id == 0) {
- return false;
- }
+ auto result = result_op();
+ auto var_id = std::get<uint32_t>(result);
- // We don't have a pointer, so we can just directly extract the value.
- auto extract = result_op();
- auto extract_id = extract.to_i();
+ auto sc = sem->StorageClass() == ast::StorageClass::kNone ? ast::StorageClass::kPrivate
+ : sem->StorageClass();
- // If the index is compile-time constant, we use OpCompositeExtract.
- auto* idx = builder_.Sem().Get(expr->index);
- if (auto idx_constval = idx->ConstantValue()) {
- if (!push_function_inst(
- spv::Op::OpCompositeExtract,
- {
- Operand::Int(result_type_id),
- extract,
- Operand::Int(info->source_id),
- Operand::Int(idx_constval.ElementAs<uint32_t>(0)),
- })) {
- return false;
+ auto type_id = GenerateTypeIfNeeded(sem->Type());
+ if (type_id == 0) {
+ return false;
}
- info->source_id = extract_id;
- info->source_type = TypeOf(expr);
+ push_debug(spv::Op::OpName,
+ {Operand(var_id), Operand(builder_.Symbols().NameFor(var->symbol))});
- return true;
- }
+ OperandList ops = {Operand(type_id), result, U32Operand(ConvertStorageClass(sc))};
- // If the source is a vector, we use OpVectorExtractDynamic.
- if (info->source_type->Is<sem::Vector>()) {
- if (!push_function_inst(
- spv::Op::OpVectorExtractDynamic,
- {Operand::Int(result_type_id), extract,
- Operand::Int(info->source_id), Operand::Int(idx_id)})) {
- return false;
+ if (var->constructor) {
+ ops.push_back(Operand(init_id));
+ } else {
+ auto* st = type->As<sem::StorageTexture>();
+ if (st || type->Is<sem::Struct>()) {
+ // type is a sem::Struct or a sem::StorageTexture
+ auto access = st ? st->access() : sem->Access();
+ switch (access) {
+ case ast::Access::kWrite:
+ push_annot(spv::Op::OpDecorate,
+ {Operand(var_id), U32Operand(SpvDecorationNonReadable)});
+ break;
+ case ast::Access::kRead:
+ push_annot(spv::Op::OpDecorate,
+ {Operand(var_id), U32Operand(SpvDecorationNonWritable)});
+ break;
+ case ast::Access::kUndefined:
+ case ast::Access::kReadWrite:
+ break;
+ }
+ }
+ if (!type->Is<sem::Sampler>()) {
+ // If we don't have a constructor and we're an Output or Private
+ // variable, then WGSL requires that we zero-initialize.
+ // If we're a Workgroup variable, and the
+ // VK_KHR_zero_initialize_workgroup_memory extension is enabled, we should
+ // also zero-initialize.
+ if (sem->StorageClass() == ast::StorageClass::kPrivate ||
+ sem->StorageClass() == ast::StorageClass::kOutput ||
+ (zero_initialize_workgroup_memory_ &&
+ sem->StorageClass() == ast::StorageClass::kWorkgroup)) {
+ init_id = GenerateConstantNullIfNeeded(type);
+ if (init_id == 0) {
+ return 0;
+ }
+ ops.push_back(Operand(init_id));
+ }
+ }
}
- info->source_id = extract_id;
- info->source_type = TypeOf(expr);
+ push_type(spv::Op::OpVariable, std::move(ops));
+
+ for (auto* attr : var->attributes) {
+ bool ok = Switch(
+ attr,
+ [&](const ast::BuiltinAttribute* builtin) {
+ push_annot(spv::Op::OpDecorate,
+ {Operand(var_id), U32Operand(SpvDecorationBuiltIn),
+ U32Operand(ConvertBuiltin(builtin->builtin, sem->StorageClass()))});
+ return true;
+ },
+ [&](const ast::LocationAttribute* location) {
+ push_annot(spv::Op::OpDecorate, {Operand(var_id), U32Operand(SpvDecorationLocation),
+ Operand(location->value)});
+ return true;
+ },
+ [&](const ast::InterpolateAttribute* interpolate) {
+ AddInterpolationDecorations(var_id, interpolate->type, interpolate->sampling);
+ return true;
+ },
+ [&](const ast::InvariantAttribute*) {
+ push_annot(spv::Op::OpDecorate,
+ {Operand(var_id), U32Operand(SpvDecorationInvariant)});
+ return true;
+ },
+ [&](const ast::BindingAttribute* binding) {
+ push_annot(spv::Op::OpDecorate, {Operand(var_id), U32Operand(SpvDecorationBinding),
+ Operand(binding->value)});
+ return true;
+ },
+ [&](const ast::GroupAttribute* group) {
+ push_annot(spv::Op::OpDecorate,
+ {Operand(var_id), U32Operand(SpvDecorationDescriptorSet),
+ Operand(group->value)});
+ return true;
+ },
+ [&](const ast::IdAttribute*) {
+ return true; // Spec constants are handled elsewhere
+ },
+ [&](const ast::InternalAttribute*) {
+ return true; // ignored
+ },
+ [&](Default) {
+ error_ = "unknown attribute";
+ return false;
+ });
+ if (!ok) {
+ return false;
+ }
+ }
+ RegisterVariable(sem, var_id);
return true;
- }
-
- TINT_ICE(Writer, builder_.Diagnostics())
- << "unsupported index accessor expression";
- return false;
}
-bool Builder::GenerateMemberAccessor(const ast::MemberAccessorExpression* expr,
- AccessorInfo* info) {
- auto* expr_sem = builder_.Sem().Get(expr);
- auto* expr_type = expr_sem->Type();
-
- if (auto* access = expr_sem->As<sem::StructMemberAccess>()) {
- uint32_t idx = access->Member()->Index();
+bool Builder::GenerateIndexAccessor(const ast::IndexAccessorExpression* expr, AccessorInfo* info) {
+ auto idx_id = GenerateExpressionWithLoadIfNeeded(expr->index);
+ if (idx_id == 0) {
+ return 0;
+ }
+ // If the source is a reference, we access chain into it.
+ // In the future, pointers may support access-chaining.
+ // See https://github.com/gpuweb/gpuweb/pull/1580
if (info->source_type->Is<sem::Reference>()) {
- auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(idx));
- if (idx_id == 0) {
- return 0;
- }
- info->access_chain_indices.push_back(idx_id);
- info->source_type = expr_type;
- } else {
- auto result_type_id = GenerateTypeIfNeeded(expr_type);
- if (result_type_id == 0) {
- return false;
- }
+ info->access_chain_indices.push_back(idx_id);
+ info->source_type = TypeOf(expr);
+ return true;
+ }
- auto extract = result_op();
- auto extract_id = extract.to_i();
- if (!push_function_inst(
- spv::Op::OpCompositeExtract,
- {Operand::Int(result_type_id), extract,
- Operand::Int(info->source_id), Operand::Int(idx)})) {
+ auto result_type_id = GenerateTypeIfNeeded(TypeOf(expr));
+ if (result_type_id == 0) {
return false;
- }
-
- info->source_id = extract_id;
- info->source_type = expr_type;
}
- return true;
- }
+ // We don't have a pointer, so we can just directly extract the value.
+ auto extract = result_op();
+ auto extract_id = std::get<uint32_t>(extract);
- if (auto* swizzle = expr_sem->As<sem::Swizzle>()) {
- // Single element swizzle is either an access chain or a composite extract
- auto& indices = swizzle->Indices();
- if (indices.size() == 1) {
- if (info->source_type->Is<sem::Reference>()) {
- auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(indices[0]));
- if (idx_id == 0) {
- return 0;
- }
- info->access_chain_indices.push_back(idx_id);
- } else {
- auto result_type_id = GenerateTypeIfNeeded(expr_type);
- if (result_type_id == 0) {
- return 0;
+ // If the index is compile-time constant, we use OpCompositeExtract.
+ auto* idx = builder_.Sem().Get(expr->index);
+ if (auto idx_constval = idx->ConstantValue()) {
+ if (!push_function_inst(spv::Op::OpCompositeExtract,
+ {
+ Operand(result_type_id),
+ extract,
+ Operand(info->source_id),
+ Operand(idx_constval.Element<uint32_t>(0)),
+ })) {
+ return false;
}
- auto extract = result_op();
- auto extract_id = extract.to_i();
+ info->source_id = extract_id;
+ info->source_type = TypeOf(expr);
+
+ return true;
+ }
+
+ // If the source is a vector, we use OpVectorExtractDynamic.
+ if (info->source_type->Is<sem::Vector>()) {
if (!push_function_inst(
- spv::Op::OpCompositeExtract,
- {Operand::Int(result_type_id), extract,
- Operand::Int(info->source_id), Operand::Int(indices[0])})) {
- return false;
+ spv::Op::OpVectorExtractDynamic,
+ {Operand(result_type_id), extract, Operand(info->source_id), Operand(idx_id)})) {
+ return false;
}
info->source_id = extract_id;
- info->source_type = expr_type;
- }
- return true;
+ info->source_type = TypeOf(expr);
+
+ return true;
}
- // Store the type away as it may change if we run the access chain
- auto* incoming_type = info->source_type;
+ TINT_ICE(Writer, builder_.Diagnostics()) << "unsupported index accessor expression";
+ return false;
+}
- // Multi-item extract is a VectorShuffle. We have to emit any existing
- // access chain data, then load the access chain and shuffle that.
- if (!info->access_chain_indices.empty()) {
- auto result_type_id = GenerateTypeIfNeeded(info->source_type);
- if (result_type_id == 0) {
- return 0;
- }
- auto extract = result_op();
- auto extract_id = extract.to_i();
+bool Builder::GenerateMemberAccessor(const ast::MemberAccessorExpression* expr,
+ AccessorInfo* info) {
+ auto* expr_sem = builder_.Sem().Get(expr);
+ auto* expr_type = expr_sem->Type();
- OperandList ops = {Operand::Int(result_type_id), extract,
- Operand::Int(info->source_id)};
- for (auto id : info->access_chain_indices) {
- ops.push_back(Operand::Int(id));
- }
+ if (auto* access = expr_sem->As<sem::StructMemberAccess>()) {
+ uint32_t idx = access->Member()->Index();
- if (!push_function_inst(spv::Op::OpAccessChain, ops)) {
- return false;
- }
+ if (info->source_type->Is<sem::Reference>()) {
+ auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(idx));
+ if (idx_id == 0) {
+ return 0;
+ }
+ info->access_chain_indices.push_back(idx_id);
+ info->source_type = expr_type;
+ } else {
+ auto result_type_id = GenerateTypeIfNeeded(expr_type);
+ if (result_type_id == 0) {
+ return false;
+ }
- info->source_id = GenerateLoadIfNeeded(expr_type, extract_id);
- info->source_type = expr_type->UnwrapRef();
- info->access_chain_indices.clear();
- }
+ auto extract = result_op();
+ auto extract_id = std::get<uint32_t>(extract);
+ if (!push_function_inst(
+ spv::Op::OpCompositeExtract,
+ {Operand(result_type_id), extract, Operand(info->source_id), Operand(idx)})) {
+ return false;
+ }
- auto result_type_id = GenerateTypeIfNeeded(expr_type);
- if (result_type_id == 0) {
- return false;
+ info->source_id = extract_id;
+ info->source_type = expr_type;
+ }
+
+ return true;
}
- auto vec_id = GenerateLoadIfNeeded(incoming_type, info->source_id);
+ if (auto* swizzle = expr_sem->As<sem::Swizzle>()) {
+ // Single element swizzle is either an access chain or a composite extract
+ auto& indices = swizzle->Indices();
+ if (indices.size() == 1) {
+ if (info->source_type->Is<sem::Reference>()) {
+ auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(indices[0]));
+ if (idx_id == 0) {
+ return 0;
+ }
+ info->access_chain_indices.push_back(idx_id);
+ } else {
+ auto result_type_id = GenerateTypeIfNeeded(expr_type);
+ if (result_type_id == 0) {
+ return 0;
+ }
+
+ auto extract = result_op();
+ auto extract_id = std::get<uint32_t>(extract);
+ if (!push_function_inst(spv::Op::OpCompositeExtract,
+ {Operand(result_type_id), extract, Operand(info->source_id),
+ Operand(indices[0])})) {
+ return false;
+ }
+
+ info->source_id = extract_id;
+ info->source_type = expr_type;
+ }
+ return true;
+ }
+
+ // Store the type away as it may change if we run the access chain
+ auto* incoming_type = info->source_type;
- auto result = result_op();
- auto result_id = result.to_i();
+ // Multi-item extract is a VectorShuffle. We have to emit any existing
+ // access chain data, then load the access chain and shuffle that.
+ if (!info->access_chain_indices.empty()) {
+ auto result_type_id = GenerateTypeIfNeeded(info->source_type);
+ if (result_type_id == 0) {
+ return 0;
+ }
+ auto extract = result_op();
+ auto extract_id = std::get<uint32_t>(extract);
- OperandList ops = {Operand::Int(result_type_id), result,
- Operand::Int(vec_id), Operand::Int(vec_id)};
+ OperandList ops = {Operand(result_type_id), extract, Operand(info->source_id)};
+ for (auto id : info->access_chain_indices) {
+ ops.push_back(Operand(id));
+ }
- for (auto idx : indices) {
- ops.push_back(Operand::Int(idx));
- }
+ if (!push_function_inst(spv::Op::OpAccessChain, ops)) {
+ return false;
+ }
+
+ info->source_id = GenerateLoadIfNeeded(expr_type, extract_id);
+ info->source_type = expr_type->UnwrapRef();
+ info->access_chain_indices.clear();
+ }
+
+ auto result_type_id = GenerateTypeIfNeeded(expr_type);
+ if (result_type_id == 0) {
+ return false;
+ }
+
+ auto vec_id = GenerateLoadIfNeeded(incoming_type, info->source_id);
- if (!push_function_inst(spv::Op::OpVectorShuffle, ops)) {
- return false;
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+
+ OperandList ops = {Operand(result_type_id), result, Operand(vec_id), Operand(vec_id)};
+
+ for (auto idx : indices) {
+ ops.push_back(Operand(idx));
+ }
+
+ if (!push_function_inst(spv::Op::OpVectorShuffle, ops)) {
+ return false;
+ }
+ info->source_id = result_id;
+ info->source_type = expr_type;
+ return true;
}
- info->source_id = result_id;
- info->source_type = expr_type;
- return true;
- }
- TINT_ICE(Writer, builder_.Diagnostics())
- << "unhandled member index type: " << expr_sem->TypeInfo().name;
- return false;
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "unhandled member index type: " << expr_sem->TypeInfo().name;
+ return false;
}
uint32_t Builder::GenerateAccessorExpression(const ast::Expression* expr) {
- if (!expr->IsAnyOf<ast::IndexAccessorExpression,
- ast::MemberAccessorExpression>()) {
- TINT_ICE(Writer, builder_.Diagnostics()) << "expression is not an accessor";
- return 0;
- }
-
- // Gather a list of all the member and index accessors that are in this chain.
- // The list is built in reverse order as that's the order we need to access
- // the chain.
- std::vector<const ast::Expression*> accessors;
- const ast::Expression* source = expr;
- while (true) {
- if (auto* array = source->As<ast::IndexAccessorExpression>()) {
- accessors.insert(accessors.begin(), source);
- source = array->object;
- } else if (auto* member = source->As<ast::MemberAccessorExpression>()) {
- accessors.insert(accessors.begin(), source);
- source = member->structure;
- } else {
- break;
+ if (!expr->IsAnyOf<ast::IndexAccessorExpression, ast::MemberAccessorExpression>()) {
+ TINT_ICE(Writer, builder_.Diagnostics()) << "expression is not an accessor";
+ return 0;
}
- }
- AccessorInfo info;
- info.source_id = GenerateExpression(source);
- if (info.source_id == 0) {
- return 0;
- }
- info.source_type = TypeOf(source);
-
- // Note: Dynamic index on array and matrix values (lets) should have been
- // promoted to storage with the VarForDynamicIndex transform.
-
- for (auto* accessor : accessors) {
- bool ok = Switch(
- accessor,
- [&](const ast::IndexAccessorExpression* array) {
- return GenerateIndexAccessor(array, &info);
- },
- [&](const ast::MemberAccessorExpression* member) {
- return GenerateMemberAccessor(member, &info);
- },
- [&](Default) {
- error_ = "invalid accessor in list: " +
- std::string(accessor->TypeInfo().name);
- return false;
- });
- if (!ok) {
- return false;
+ // Gather a list of all the member and index accessors that are in this chain.
+ // The list is built in reverse order as that's the order we need to access
+ // the chain.
+ std::vector<const ast::Expression*> accessors;
+ const ast::Expression* source = expr;
+ while (true) {
+ if (auto* array = source->As<ast::IndexAccessorExpression>()) {
+ accessors.insert(accessors.begin(), source);
+ source = array->object;
+ } else if (auto* member = source->As<ast::MemberAccessorExpression>()) {
+ accessors.insert(accessors.begin(), source);
+ source = member->structure;
+ } else {
+ break;
+ }
}
- }
- if (!info.access_chain_indices.empty()) {
- auto* type = TypeOf(expr);
- auto result_type_id = GenerateTypeIfNeeded(type);
- if (result_type_id == 0) {
- return 0;
+ AccessorInfo info;
+ info.source_id = GenerateExpression(source);
+ if (info.source_id == 0) {
+ return 0;
+ }
+ info.source_type = TypeOf(source);
+
+ // Note: Dynamic index on array and matrix values (lets) should have been
+ // promoted to storage with the VarForDynamicIndex transform.
+
+ for (auto* accessor : accessors) {
+ bool ok = Switch(
+ accessor,
+ [&](const ast::IndexAccessorExpression* array) {
+ return GenerateIndexAccessor(array, &info);
+ },
+ [&](const ast::MemberAccessorExpression* member) {
+ return GenerateMemberAccessor(member, &info);
+ },
+ [&](Default) {
+ error_ = "invalid accessor in list: " + std::string(accessor->TypeInfo().name);
+ return false;
+ });
+ if (!ok) {
+ return false;
+ }
}
- auto result = result_op();
- auto result_id = result.to_i();
+ if (!info.access_chain_indices.empty()) {
+ auto* type = TypeOf(expr);
+ auto result_type_id = GenerateTypeIfNeeded(type);
+ if (result_type_id == 0) {
+ return 0;
+ }
- OperandList ops = {Operand::Int(result_type_id), result,
- Operand::Int(info.source_id)};
- for (auto id : info.access_chain_indices) {
- ops.push_back(Operand::Int(id));
- }
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+
+ OperandList ops = {Operand(result_type_id), result, Operand(info.source_id)};
+ for (auto id : info.access_chain_indices) {
+ ops.push_back(Operand(id));
+ }
- if (!push_function_inst(spv::Op::OpAccessChain, ops)) {
- return false;
+ if (!push_function_inst(spv::Op::OpAccessChain, ops)) {
+ return false;
+ }
+ info.source_id = result_id;
}
- info.source_id = result_id;
- }
- return info.source_id;
+ return info.source_id;
}
-uint32_t Builder::GenerateIdentifierExpression(
- const ast::IdentifierExpression* expr) {
- uint32_t val = scope_stack_.Get(expr->symbol);
- if (val == 0) {
- error_ = "unable to find variable with identifier: " +
- builder_.Symbols().NameFor(expr->symbol);
- }
- return val;
+uint32_t Builder::GenerateIdentifierExpression(const ast::IdentifierExpression* expr) {
+ auto* sem = builder_.Sem().Get(expr);
+ if (auto* user = sem->As<sem::VariableUser>()) {
+ return LookupVariableID(user->Variable());
+ }
+ error_ = "identifier '" + builder_.Symbols().NameFor(expr->symbol) +
+ "' does not resolve to a variable";
+ return 0;
}
-uint32_t Builder::GenerateExpressionWithLoadIfNeeded(
- const sem::Expression* expr) {
- // The semantic node directly knows both the AST node and the resolved type.
- if (const auto id = GenerateExpression(expr->Declaration())) {
- return GenerateLoadIfNeeded(expr->Type(), id);
- }
- return 0;
+uint32_t Builder::GenerateExpressionWithLoadIfNeeded(const sem::Expression* expr) {
+ // The semantic node directly knows both the AST node and the resolved type.
+ if (const auto id = GenerateExpression(expr->Declaration())) {
+ return GenerateLoadIfNeeded(expr->Type(), id);
+ }
+ return 0;
}
-uint32_t Builder::GenerateExpressionWithLoadIfNeeded(
- const ast::Expression* expr) {
- if (const auto id = GenerateExpression(expr)) {
- // Perform a lookup to get the resolved type.
- return GenerateLoadIfNeeded(TypeOf(expr), id);
- }
- return 0;
+uint32_t Builder::GenerateExpressionWithLoadIfNeeded(const ast::Expression* expr) {
+ if (const auto id = GenerateExpression(expr)) {
+ // Perform a lookup to get the resolved type.
+ return GenerateLoadIfNeeded(TypeOf(expr), id);
+ }
+ return 0;
}
uint32_t Builder::GenerateLoadIfNeeded(const sem::Type* type, uint32_t id) {
- if (auto* ref = type->As<sem::Reference>()) {
- type = ref->StoreType();
- } else {
- return id;
- }
+ if (auto* ref = type->As<sem::Reference>()) {
+ type = ref->StoreType();
+ } else {
+ return id;
+ }
- auto type_id = GenerateTypeIfNeeded(type);
- auto result = result_op();
- auto result_id = result.to_i();
- if (!push_function_inst(spv::Op::OpLoad,
- {Operand::Int(type_id), result, Operand::Int(id)})) {
- return 0;
- }
- return result_id;
+ auto type_id = GenerateTypeIfNeeded(type);
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+ if (!push_function_inst(spv::Op::OpLoad, {Operand(type_id), result, Operand(id)})) {
+ return 0;
+ }
+ return result_id;
}
-uint32_t Builder::GenerateUnaryOpExpression(
- const ast::UnaryOpExpression* expr) {
- auto result = result_op();
- auto result_id = result.to_i();
-
- spv::Op op = spv::Op::OpNop;
- switch (expr->op) {
- case ast::UnaryOp::kComplement:
- op = spv::Op::OpNot;
- break;
- case ast::UnaryOp::kNegation:
- if (TypeOf(expr)->is_float_scalar_or_vector()) {
- op = spv::Op::OpFNegate;
- } else {
- op = spv::Op::OpSNegate;
- }
- break;
- case ast::UnaryOp::kNot:
- op = spv::Op::OpLogicalNot;
- break;
- case ast::UnaryOp::kAddressOf:
- case ast::UnaryOp::kIndirection:
- // Address-of converts a reference to a pointer, and dereference converts
- // a pointer to a reference. These are the same thing in SPIR-V, so this
- // is a no-op.
- return GenerateExpression(expr->expr);
- }
-
- auto val_id = GenerateExpressionWithLoadIfNeeded(expr->expr);
- if (val_id == 0) {
- return 0;
- }
+uint32_t Builder::GenerateUnaryOpExpression(const ast::UnaryOpExpression* expr) {
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+
+ spv::Op op = spv::Op::OpNop;
+ switch (expr->op) {
+ case ast::UnaryOp::kComplement:
+ op = spv::Op::OpNot;
+ break;
+ case ast::UnaryOp::kNegation:
+ if (TypeOf(expr)->is_float_scalar_or_vector()) {
+ op = spv::Op::OpFNegate;
+ } else {
+ op = spv::Op::OpSNegate;
+ }
+ break;
+ case ast::UnaryOp::kNot:
+ op = spv::Op::OpLogicalNot;
+ break;
+ case ast::UnaryOp::kAddressOf:
+ case ast::UnaryOp::kIndirection:
+ // Address-of converts a reference to a pointer, and dereference converts
+ // a pointer to a reference. These are the same thing in SPIR-V, so this
+ // is a no-op.
+ return GenerateExpression(expr->expr);
+ }
+
+ auto val_id = GenerateExpressionWithLoadIfNeeded(expr->expr);
+ if (val_id == 0) {
+ return 0;
+ }
- auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
- if (type_id == 0) {
- return 0;
- }
+ auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
+ if (type_id == 0) {
+ return 0;
+ }
- if (!push_function_inst(
- op, {Operand::Int(type_id), result, Operand::Int(val_id)})) {
- return false;
- }
+ if (!push_function_inst(op, {Operand(type_id), result, Operand(val_id)})) {
+ return false;
+ }
- return result_id;
+ return result_id;
}
uint32_t Builder::GetGLSLstd450Import() {
- auto where = import_name_to_id_.find(kGLSLstd450);
- if (where != import_name_to_id_.end()) {
- return where->second;
- }
+ auto where = import_name_to_id_.find(kGLSLstd450);
+ if (where != import_name_to_id_.end()) {
+ return where->second;
+ }
- // It doesn't exist yet. Generate it.
- auto result = result_op();
- auto id = result.to_i();
+ // It doesn't exist yet. Generate it.
+ auto result = result_op();
+ auto id = std::get<uint32_t>(result);
- push_ext_import(spv::Op::OpExtInstImport,
- {result, Operand::String(kGLSLstd450)});
+ push_ext_import(spv::Op::OpExtInstImport, {result, Operand(kGLSLstd450)});
- // Remember it for later.
- import_name_to_id_[kGLSLstd450] = id;
- return id;
+ // Remember it for later.
+ import_name_to_id_[kGLSLstd450] = id;
+ return id;
}
uint32_t Builder::GenerateConstructorExpression(const ast::Variable* var,
const ast::Expression* expr) {
- if (auto* literal = expr->As<ast::LiteralExpression>()) {
- return GenerateLiteralIfNeeded(var, literal);
- }
- if (auto* call = builder_.Sem().Get<sem::Call>(expr)) {
- if (call->Target()->IsAnyOf<sem::TypeConstructor, sem::TypeConversion>()) {
- return GenerateTypeConstructorOrConversion(call, var);
- }
- }
- error_ = "unknown constructor expression";
- return 0;
+ if (auto* literal = expr->As<ast::LiteralExpression>()) {
+ return GenerateLiteralIfNeeded(var, literal);
+ }
+ if (auto* call = builder_.Sem().Get<sem::Call>(expr)) {
+ if (call->Target()->IsAnyOf<sem::TypeConstructor, sem::TypeConversion>()) {
+ return GenerateTypeConstructorOrConversion(call, var);
+ }
+ }
+ error_ = "unknown constructor expression";
+ return 0;
}
bool Builder::IsConstructorConst(const ast::Expression* expr) {
- bool is_const = true;
- ast::TraverseExpressions(expr, builder_.Diagnostics(),
- [&](const ast::Expression* e) {
- if (e->Is<ast::LiteralExpression>()) {
- return ast::TraverseAction::Descend;
- }
- if (auto* ce = e->As<ast::CallExpression>()) {
- auto* call = builder_.Sem().Get(ce);
- if (call->Target()->Is<sem::TypeConstructor>()) {
- return ast::TraverseAction::Descend;
- }
- }
-
- is_const = false;
- return ast::TraverseAction::Stop;
- });
- return is_const;
+ bool is_const = true;
+ ast::TraverseExpressions(expr, builder_.Diagnostics(), [&](const ast::Expression* e) {
+ if (e->Is<ast::LiteralExpression>()) {
+ return ast::TraverseAction::Descend;
+ }
+ if (auto* ce = e->As<ast::CallExpression>()) {
+ auto* sem = builder_.Sem().Get(ce);
+ if (sem->Is<sem::Materialize>()) {
+ // Materialize can only occur on compile time expressions, so this sub-tree must be
+ // constant.
+ return ast::TraverseAction::Skip;
+ }
+ auto* call = sem->As<sem::Call>();
+ if (call->Target()->Is<sem::TypeConstructor>()) {
+ return ast::TraverseAction::Descend;
+ }
+ }
+
+ is_const = false;
+ return ast::TraverseAction::Stop;
+ });
+ return is_const;
}
-uint32_t Builder::GenerateTypeConstructorOrConversion(
- const sem::Call* call,
- const ast::Variable* var) {
- auto& args = call->Arguments();
- auto* global_var = builder_.Sem().Get<sem::GlobalVariable>(var);
- auto* result_type = call->Type();
-
- // Generate the zero initializer if there are no values provided.
- if (args.empty()) {
- if (global_var && global_var->IsOverridable()) {
- auto constant_id = global_var->ConstantId();
- if (result_type->Is<sem::I32>()) {
- return GenerateConstantIfNeeded(
- ScalarConstant::I32(0).AsSpecOp(constant_id));
- }
- if (result_type->Is<sem::U32>()) {
- return GenerateConstantIfNeeded(
- ScalarConstant::U32(0).AsSpecOp(constant_id));
- }
- if (result_type->Is<sem::F32>()) {
- return GenerateConstantIfNeeded(
- ScalarConstant::F32(0).AsSpecOp(constant_id));
- }
- if (result_type->Is<sem::Bool>()) {
- return GenerateConstantIfNeeded(
- ScalarConstant::Bool(false).AsSpecOp(constant_id));
- }
+uint32_t Builder::GenerateTypeConstructorOrConversion(const sem::Call* call,
+ const ast::Variable* var) {
+ auto& args = call->Arguments();
+ auto* global_var = builder_.Sem().Get<sem::GlobalVariable>(var);
+ auto* result_type = call->Type();
+
+ // Generate the zero initializer if there are no values provided.
+ if (args.empty()) {
+ if (global_var && global_var->IsOverridable()) {
+ auto constant_id = global_var->ConstantId();
+ if (result_type->Is<sem::I32>()) {
+ return GenerateConstantIfNeeded(ScalarConstant::I32(0).AsSpecOp(constant_id));
+ }
+ if (result_type->Is<sem::U32>()) {
+ return GenerateConstantIfNeeded(ScalarConstant::U32(0).AsSpecOp(constant_id));
+ }
+ if (result_type->Is<sem::F32>()) {
+ return GenerateConstantIfNeeded(ScalarConstant::F32(0).AsSpecOp(constant_id));
+ }
+ if (result_type->Is<sem::Bool>()) {
+ return GenerateConstantIfNeeded(ScalarConstant::Bool(false).AsSpecOp(constant_id));
+ }
+ }
+ return GenerateConstantNullIfNeeded(result_type->UnwrapRef());
}
- return GenerateConstantNullIfNeeded(result_type->UnwrapRef());
- }
- std::ostringstream out;
- out << "__const_" << result_type->FriendlyName(builder_.Symbols()) << "_";
-
- result_type = result_type->UnwrapRef();
- bool constructor_is_const = IsConstructorConst(call->Declaration());
- if (has_error()) {
- return 0;
- }
+ result_type = result_type->UnwrapRef();
+ bool constructor_is_const = IsConstructorConst(call->Declaration());
+ if (has_error()) {
+ return 0;
+ }
- bool can_cast_or_copy = result_type->is_scalar();
+ bool can_cast_or_copy = result_type->is_scalar();
- if (auto* res_vec = result_type->As<sem::Vector>()) {
- if (res_vec->type()->is_scalar()) {
- auto* value_type = args[0]->Type()->UnwrapRef();
- if (auto* val_vec = value_type->As<sem::Vector>()) {
- if (val_vec->type()->is_scalar()) {
- can_cast_or_copy = res_vec->Width() == val_vec->Width();
+ if (auto* res_vec = result_type->As<sem::Vector>()) {
+ if (res_vec->type()->is_scalar()) {
+ auto* value_type = args[0]->Type()->UnwrapRef();
+ if (auto* val_vec = value_type->As<sem::Vector>()) {
+ if (val_vec->type()->is_scalar()) {
+ can_cast_or_copy = res_vec->Width() == val_vec->Width();
+ }
+ }
}
- }
}
- }
-
- if (can_cast_or_copy) {
- return GenerateCastOrCopyOrPassthrough(result_type, args[0]->Declaration(),
- global_var);
- }
- auto type_id = GenerateTypeIfNeeded(result_type);
- if (type_id == 0) {
- return 0;
- }
-
- bool result_is_constant_composite = constructor_is_const;
- bool result_is_spec_composite = false;
-
- if (auto* vec = result_type->As<sem::Vector>()) {
- result_type = vec->type();
- }
-
- OperandList ops;
- for (auto* e : args) {
- uint32_t id = 0;
- id = GenerateExpressionWithLoadIfNeeded(e);
- if (id == 0) {
- return 0;
- }
-
- auto* value_type = e->Type()->UnwrapRef();
- // If the result and value types are the same we can just use the object.
- // If the result is not a vector then we should have validated that the
- // value type is a correctly sized vector so we can just use it directly.
- if (result_type == value_type || result_type->Is<sem::Matrix>() ||
- result_type->Is<sem::Array>() || result_type->Is<sem::Struct>()) {
- out << "_" << id;
-
- ops.push_back(Operand::Int(id));
- continue;
- }
-
- // Both scalars, but not the same type so we need to generate a conversion
- // of the value.
- if (value_type->is_scalar() && result_type->is_scalar()) {
- id = GenerateCastOrCopyOrPassthrough(result_type, args[0]->Declaration(),
- global_var);
- out << "_" << id;
- ops.push_back(Operand::Int(id));
- continue;
- }
-
- // When handling vectors as the values there a few cases to take into
- // consideration:
- // 1. Module scoped vec3<f32>(vec2<f32>(1, 2), 3) -> OpSpecConstantOp
- // 2. Function scoped vec3<f32>(vec2<f32>(1, 2), 3) -> OpCompositeExtract
- // 3. Either array<vec3<f32>, 1>(vec3<f32>(1, 2, 3)) -> use the ID.
- // -> handled above
- //
- // For cases 1 and 2, if the type is different we also may need to insert
- // a type cast.
- if (auto* vec = value_type->As<sem::Vector>()) {
- auto* vec_type = vec->type();
+ if (can_cast_or_copy) {
+ return GenerateCastOrCopyOrPassthrough(result_type, args[0]->Declaration(), global_var);
+ }
- auto value_type_id = GenerateTypeIfNeeded(vec_type);
- if (value_type_id == 0) {
+ auto type_id = GenerateTypeIfNeeded(result_type);
+ if (type_id == 0) {
return 0;
- }
+ }
- for (uint32_t i = 0; i < vec->Width(); ++i) {
- auto extract = result_op();
- auto extract_id = extract.to_i();
+ bool result_is_constant_composite = constructor_is_const;
+ bool result_is_spec_composite = false;
- if (!global_var) {
- // A non-global initializer. Case 2.
- if (!push_function_inst(spv::Op::OpCompositeExtract,
- {Operand::Int(value_type_id), extract,
- Operand::Int(id), Operand::Int(i)})) {
- return false;
- }
+ if (auto* vec = result_type->As<sem::Vector>()) {
+ result_type = vec->type();
+ }
- // We no longer have a constant composite, but have to do a
- // composite construction as these calls are inside a function.
- result_is_constant_composite = false;
- } else {
- // A global initializer, must use OpSpecConstantOp. Case 1.
- auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(i));
- if (idx_id == 0) {
+ OperandList ops;
+ static constexpr size_t kOpsResultIdx = 1;
+ static constexpr size_t kOpsFirstValueIdx = 2;
+ ops.reserve(8);
+ ops.push_back(Operand(type_id));
+ ops.push_back(Operand(0u)); // Placeholder for the result ID
+
+ for (auto* e : args) {
+ uint32_t id = 0;
+ id = GenerateExpressionWithLoadIfNeeded(e);
+ if (id == 0) {
return 0;
- }
- push_type(spv::Op::OpSpecConstantOp,
- {Operand::Int(value_type_id), extract,
- Operand::Int(SpvOpCompositeExtract), Operand::Int(id),
- Operand::Int(idx_id)});
+ }
- result_is_spec_composite = true;
+ auto* value_type = e->Type()->UnwrapRef();
+ // If the result and value types are the same we can just use the object.
+ // If the result is not a vector then we should have validated that the
+ // value type is a correctly sized vector so we can just use it directly.
+ if (result_type == value_type || result_type->Is<sem::Matrix>() ||
+ result_type->Is<sem::Array>() || result_type->Is<sem::Struct>()) {
+ ops.push_back(Operand(id));
+ continue;
}
- out << "_" << extract_id;
- ops.push_back(Operand::Int(extract_id));
- }
- } else {
- error_ = "Unhandled type cast value type";
- return 0;
- }
- }
-
- // For a single-value vector initializer, splat the initializer value.
- auto* const init_result_type = call->Type()->UnwrapRef();
- if (args.size() == 1 && init_result_type->is_scalar_vector() &&
- args[0]->Type()->UnwrapRef()->is_scalar()) {
- size_t vec_size = init_result_type->As<sem::Vector>()->Width();
- for (size_t i = 0; i < (vec_size - 1); ++i) {
- ops.push_back(ops[0]);
- }
- }
-
- auto str = out.str();
- auto val = type_constructor_to_id_.find(str);
- if (val != type_constructor_to_id_.end()) {
- return val->second;
- }
-
- auto result = result_op();
- ops.insert(ops.begin(), result);
- ops.insert(ops.begin(), Operand::Int(type_id));
-
- type_constructor_to_id_[str] = result.to_i();
-
- if (result_is_spec_composite) {
- push_type(spv::Op::OpSpecConstantComposite, ops);
- } else if (result_is_constant_composite) {
- push_type(spv::Op::OpConstantComposite, ops);
- } else {
- if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
- return 0;
+ // Both scalars, but not the same type so we need to generate a conversion
+ // of the value.
+ if (value_type->is_scalar() && result_type->is_scalar()) {
+ id = GenerateCastOrCopyOrPassthrough(result_type, args[0]->Declaration(), global_var);
+ ops.push_back(Operand(id));
+ continue;
+ }
+
+ // When handling vectors as the values there a few cases to take into
+ // consideration:
+ // 1. Module scoped vec3<f32>(vec2<f32>(1, 2), 3) -> OpSpecConstantOp
+ // 2. Function scoped vec3<f32>(vec2<f32>(1, 2), 3) -> OpCompositeExtract
+ // 3. Either array<vec3<f32>, 1>(vec3<f32>(1, 2, 3)) -> use the ID.
+ // -> handled above
+ //
+ // For cases 1 and 2, if the type is different we also may need to insert
+ // a type cast.
+ if (auto* vec = value_type->As<sem::Vector>()) {
+ auto* vec_type = vec->type();
+
+ auto value_type_id = GenerateTypeIfNeeded(vec_type);
+ if (value_type_id == 0) {
+ return 0;
+ }
+
+ for (uint32_t i = 0; i < vec->Width(); ++i) {
+ auto extract = result_op();
+ auto extract_id = std::get<uint32_t>(extract);
+
+ if (!global_var) {
+ // A non-global initializer. Case 2.
+ if (!push_function_inst(
+ spv::Op::OpCompositeExtract,
+ {Operand(value_type_id), extract, Operand(id), Operand(i)})) {
+ return false;
+ }
+
+ // We no longer have a constant composite, but have to do a
+ // composite construction as these calls are inside a function.
+ result_is_constant_composite = false;
+ } else {
+ // A global initializer, must use OpSpecConstantOp. Case 1.
+ auto idx_id = GenerateConstantIfNeeded(ScalarConstant::U32(i));
+ if (idx_id == 0) {
+ return 0;
+ }
+ push_type(spv::Op::OpSpecConstantOp,
+ {Operand(value_type_id), extract, U32Operand(SpvOpCompositeExtract),
+ Operand(id), Operand(idx_id)});
+
+ result_is_spec_composite = true;
+ }
+
+ ops.push_back(Operand(extract_id));
+ }
+ } else {
+ error_ = "Unhandled type cast value type";
+ return 0;
+ }
+ }
+
+ // For a single-value vector initializer, splat the initializer value.
+ auto* const init_result_type = call->Type()->UnwrapRef();
+ if (args.size() == 1 && init_result_type->is_scalar_vector() &&
+ args[0]->Type()->UnwrapRef()->is_scalar()) {
+ size_t vec_size = init_result_type->As<sem::Vector>()->Width();
+ for (size_t i = 0; i < (vec_size - 1); ++i) {
+ ops.push_back(ops[kOpsFirstValueIdx]);
+ }
}
- }
- return result.to_i();
+ auto& stack = (result_is_spec_composite || result_is_constant_composite)
+ ? scope_stack_[0] // Global scope
+ : scope_stack_.back(); // Lexical scope
+
+ return utils::GetOrCreate(stack.type_ctor_to_id_, OperandListKey{ops}, [&]() -> uint32_t {
+ auto result = result_op();
+ ops[kOpsResultIdx] = result;
+
+ if (result_is_spec_composite) {
+ push_type(spv::Op::OpSpecConstantComposite, ops);
+ } else if (result_is_constant_composite) {
+ push_type(spv::Op::OpConstantComposite, ops);
+ } else {
+ if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
+ return 0;
+ }
+ }
+
+ return std::get<uint32_t>(result);
+ });
}
-uint32_t Builder::GenerateCastOrCopyOrPassthrough(
- const sem::Type* to_type,
- const ast::Expression* from_expr,
- bool is_global_init) {
- // This should not happen as we rely on constant folding to obviate
- // casts/conversions for module-scope variables
- if (is_global_init) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "Module-level conversions are not supported. Conversions should "
- "have already been constant-folded by the FoldConstants transform.";
- return 0;
- }
+uint32_t Builder::GenerateCastOrCopyOrPassthrough(const sem::Type* to_type,
+ const ast::Expression* from_expr,
+ bool is_global_init) {
+ // This should not happen as we rely on constant folding to obviate
+ // casts/conversions for module-scope variables
+ if (is_global_init) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "Module-level conversions are not supported. Conversions should "
+ "have already been constant-folded by the FoldConstants transform.";
+ return 0;
+ }
+
+ auto elem_type_of = [](const sem::Type* t) -> const sem::Type* {
+ if (t->is_scalar()) {
+ return t;
+ }
+ if (auto* v = t->As<sem::Vector>()) {
+ return v->type();
+ }
+ return nullptr;
+ };
- auto elem_type_of = [](const sem::Type* t) -> const sem::Type* {
- if (t->is_scalar()) {
- return t;
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+
+ auto result_type_id = GenerateTypeIfNeeded(to_type);
+ if (result_type_id == 0) {
+ return 0;
}
- if (auto* v = t->As<sem::Vector>()) {
- return v->type();
+
+ auto val_id = GenerateExpressionWithLoadIfNeeded(from_expr);
+ if (val_id == 0) {
+ return 0;
}
- return nullptr;
- };
- auto result = result_op();
- auto result_id = result.to_i();
+ auto* from_type = TypeOf(from_expr)->UnwrapRef();
+
+ spv::Op op = spv::Op::OpNop;
+ if ((from_type->Is<sem::I32>() && to_type->Is<sem::F32>()) ||
+ (from_type->is_signed_integer_vector() && to_type->is_float_vector())) {
+ op = spv::Op::OpConvertSToF;
+ } else if ((from_type->Is<sem::U32>() && to_type->Is<sem::F32>()) ||
+ (from_type->is_unsigned_integer_vector() && to_type->is_float_vector())) {
+ op = spv::Op::OpConvertUToF;
+ } else if ((from_type->Is<sem::F32>() && to_type->Is<sem::I32>()) ||
+ (from_type->is_float_vector() && to_type->is_signed_integer_vector())) {
+ op = spv::Op::OpConvertFToS;
+ } else if ((from_type->Is<sem::F32>() && to_type->Is<sem::U32>()) ||
+ (from_type->is_float_vector() && to_type->is_unsigned_integer_vector())) {
+ op = spv::Op::OpConvertFToU;
+ } else if ((from_type->Is<sem::Bool>() && to_type->Is<sem::Bool>()) ||
+ (from_type->Is<sem::U32>() && to_type->Is<sem::U32>()) ||
+ (from_type->Is<sem::I32>() && to_type->Is<sem::I32>()) ||
+ (from_type->Is<sem::F32>() && to_type->Is<sem::F32>()) ||
+ (from_type->Is<sem::Vector>() && (from_type == to_type))) {
+ return val_id;
+ } else if ((from_type->Is<sem::I32>() && to_type->Is<sem::U32>()) ||
+ (from_type->Is<sem::U32>() && to_type->Is<sem::I32>()) ||
+ (from_type->is_signed_integer_vector() && to_type->is_unsigned_integer_vector()) ||
+ (from_type->is_unsigned_integer_vector() &&
+ to_type->is_integer_scalar_or_vector())) {
+ op = spv::Op::OpBitcast;
+ } else if ((from_type->is_numeric_scalar() && to_type->Is<sem::Bool>()) ||
+ (from_type->is_numeric_vector() && to_type->is_bool_vector())) {
+ // Convert scalar (vector) to bool (vector)
+
+ // Return the result of comparing from_expr with zero
+ uint32_t zero = GenerateConstantNullIfNeeded(from_type);
+ const auto* from_elem_type = elem_type_of(from_type);
+ op = from_elem_type->is_integer_scalar() ? spv::Op::OpINotEqual : spv::Op::OpFUnordNotEqual;
+ if (!push_function_inst(op, {Operand(result_type_id), Operand(result_id), Operand(val_id),
+ Operand(zero)})) {
+ return 0;
+ }
- auto result_type_id = GenerateTypeIfNeeded(to_type);
- if (result_type_id == 0) {
- return 0;
- }
+ return result_id;
+ } else if (from_type->is_bool_scalar_or_vector() && to_type->is_numeric_scalar_or_vector()) {
+ // Convert bool scalar/vector to numeric scalar/vector.
+ // Use the bool to select between 1 (if true) and 0 (if false).
+
+ const auto* to_elem_type = elem_type_of(to_type);
+ uint32_t one_id;
+ uint32_t zero_id;
+ if (to_elem_type->Is<sem::F32>()) {
+ zero_id = GenerateConstantIfNeeded(ScalarConstant::F32(0));
+ one_id = GenerateConstantIfNeeded(ScalarConstant::F32(1));
+ } else if (to_elem_type->Is<sem::U32>()) {
+ zero_id = GenerateConstantIfNeeded(ScalarConstant::U32(0));
+ one_id = GenerateConstantIfNeeded(ScalarConstant::U32(1));
+ } else if (to_elem_type->Is<sem::I32>()) {
+ zero_id = GenerateConstantIfNeeded(ScalarConstant::I32(0));
+ one_id = GenerateConstantIfNeeded(ScalarConstant::I32(1));
+ } else {
+ error_ = "invalid destination type for bool conversion";
+ return false;
+ }
+ if (auto* to_vec = to_type->As<sem::Vector>()) {
+ // Splat the scalars into vectors.
+ zero_id = GenerateConstantVectorSplatIfNeeded(to_vec, zero_id);
+ one_id = GenerateConstantVectorSplatIfNeeded(to_vec, one_id);
+ }
+ if (!one_id || !zero_id) {
+ return false;
+ }
- auto val_id = GenerateExpressionWithLoadIfNeeded(from_expr);
- if (val_id == 0) {
- return 0;
- }
-
- auto* from_type = TypeOf(from_expr)->UnwrapRef();
-
- spv::Op op = spv::Op::OpNop;
- if ((from_type->Is<sem::I32>() && to_type->Is<sem::F32>()) ||
- (from_type->is_signed_integer_vector() && to_type->is_float_vector())) {
- op = spv::Op::OpConvertSToF;
- } else if ((from_type->Is<sem::U32>() && to_type->Is<sem::F32>()) ||
- (from_type->is_unsigned_integer_vector() &&
- to_type->is_float_vector())) {
- op = spv::Op::OpConvertUToF;
- } else if ((from_type->Is<sem::F32>() && to_type->Is<sem::I32>()) ||
- (from_type->is_float_vector() &&
- to_type->is_signed_integer_vector())) {
- op = spv::Op::OpConvertFToS;
- } else if ((from_type->Is<sem::F32>() && to_type->Is<sem::U32>()) ||
- (from_type->is_float_vector() &&
- to_type->is_unsigned_integer_vector())) {
- op = spv::Op::OpConvertFToU;
- } else if ((from_type->Is<sem::Bool>() && to_type->Is<sem::Bool>()) ||
- (from_type->Is<sem::U32>() && to_type->Is<sem::U32>()) ||
- (from_type->Is<sem::I32>() && to_type->Is<sem::I32>()) ||
- (from_type->Is<sem::F32>() && to_type->Is<sem::F32>()) ||
- (from_type->Is<sem::Vector>() && (from_type == to_type))) {
- return val_id;
- } else if ((from_type->Is<sem::I32>() && to_type->Is<sem::U32>()) ||
- (from_type->Is<sem::U32>() && to_type->Is<sem::I32>()) ||
- (from_type->is_signed_integer_vector() &&
- to_type->is_unsigned_integer_vector()) ||
- (from_type->is_unsigned_integer_vector() &&
- to_type->is_integer_scalar_or_vector())) {
- op = spv::Op::OpBitcast;
- } else if ((from_type->is_numeric_scalar() && to_type->Is<sem::Bool>()) ||
- (from_type->is_numeric_vector() && to_type->is_bool_vector())) {
- // Convert scalar (vector) to bool (vector)
-
- // Return the result of comparing from_expr with zero
- uint32_t zero = GenerateConstantNullIfNeeded(from_type);
- const auto* from_elem_type = elem_type_of(from_type);
- op = from_elem_type->is_integer_scalar() ? spv::Op::OpINotEqual
- : spv::Op::OpFUnordNotEqual;
- if (!push_function_inst(
- op, {Operand::Int(result_type_id), Operand::Int(result_id),
- Operand::Int(val_id), Operand::Int(zero)})) {
- return 0;
- }
+ op = spv::Op::OpSelect;
+ if (!push_function_inst(op, {Operand(result_type_id), Operand(result_id), Operand(val_id),
+ Operand(one_id), Operand(zero_id)})) {
+ return 0;
+ }
- return result_id;
- } else if (from_type->is_bool_scalar_or_vector() &&
- to_type->is_numeric_scalar_or_vector()) {
- // Convert bool scalar/vector to numeric scalar/vector.
- // Use the bool to select between 1 (if true) and 0 (if false).
-
- const auto* to_elem_type = elem_type_of(to_type);
- uint32_t one_id;
- uint32_t zero_id;
- if (to_elem_type->Is<sem::F32>()) {
- ast::FloatLiteralExpression one(ProgramID(), Source{}, 1.0f);
- ast::FloatLiteralExpression zero(ProgramID(), Source{}, 0.0f);
- one_id = GenerateLiteralIfNeeded(nullptr, &one);
- zero_id = GenerateLiteralIfNeeded(nullptr, &zero);
- } else if (to_elem_type->Is<sem::U32>()) {
- ast::UintLiteralExpression one(ProgramID(), Source{}, 1);
- ast::UintLiteralExpression zero(ProgramID(), Source{}, 0);
- one_id = GenerateLiteralIfNeeded(nullptr, &one);
- zero_id = GenerateLiteralIfNeeded(nullptr, &zero);
- } else if (to_elem_type->Is<sem::I32>()) {
- ast::SintLiteralExpression one(ProgramID(), Source{}, 1);
- ast::SintLiteralExpression zero(ProgramID(), Source{}, 0);
- one_id = GenerateLiteralIfNeeded(nullptr, &one);
- zero_id = GenerateLiteralIfNeeded(nullptr, &zero);
+ return result_id;
} else {
- error_ = "invalid destination type for bool conversion";
- return false;
+ TINT_ICE(Writer, builder_.Diagnostics()) << "Invalid from_type";
}
- if (auto* to_vec = to_type->As<sem::Vector>()) {
- // Splat the scalars into vectors.
- one_id = GenerateConstantVectorSplatIfNeeded(to_vec, one_id);
- zero_id = GenerateConstantVectorSplatIfNeeded(to_vec, zero_id);
- }
- if (!one_id || !zero_id) {
- return false;
+
+ if (op == spv::Op::OpNop) {
+ error_ = "unable to determine conversion type for cast, from: " +
+ from_type->FriendlyName(builder_.Symbols()) +
+ " to: " + to_type->FriendlyName(builder_.Symbols());
+ return 0;
}
- op = spv::Op::OpSelect;
- if (!push_function_inst(
- op, {Operand::Int(result_type_id), Operand::Int(result_id),
- Operand::Int(val_id), Operand::Int(one_id),
- Operand::Int(zero_id)})) {
- return 0;
+ if (!push_function_inst(op, {Operand(result_type_id), result, Operand(val_id)})) {
+ return 0;
}
return result_id;
- } else {
- TINT_ICE(Writer, builder_.Diagnostics()) << "Invalid from_type";
- }
-
- if (op == spv::Op::OpNop) {
- error_ = "unable to determine conversion type for cast, from: " +
- from_type->FriendlyName(builder_.Symbols()) +
- " to: " + to_type->FriendlyName(builder_.Symbols());
- return 0;
- }
-
- if (!push_function_inst(
- op, {Operand::Int(result_type_id), result, Operand::Int(val_id)})) {
- return 0;
- }
-
- return result_id;
}
uint32_t Builder::GenerateLiteralIfNeeded(const ast::Variable* var,
const ast::LiteralExpression* lit) {
- ScalarConstant constant;
-
- auto* global = builder_.Sem().Get<sem::GlobalVariable>(var);
- if (global && global->IsOverridable()) {
- constant.is_spec_op = true;
- constant.constant_id = global->ConstantId();
- }
-
- Switch(
- lit,
- [&](const ast::BoolLiteralExpression* l) {
- constant.kind = ScalarConstant::Kind::kBool;
- constant.value.b = l->value;
- },
- [&](const ast::SintLiteralExpression* sl) {
- constant.kind = ScalarConstant::Kind::kI32;
- constant.value.i32 = sl->value;
- },
- [&](const ast::UintLiteralExpression* ul) {
- constant.kind = ScalarConstant::Kind::kU32;
- constant.value.u32 = ul->value;
- },
- [&](const ast::FloatLiteralExpression* fl) {
- constant.kind = ScalarConstant::Kind::kF32;
- constant.value.f32 = fl->value;
- },
- [&](Default) { error_ = "unknown literal type"; });
-
- if (!error_.empty()) {
- return false;
- }
+ ScalarConstant constant;
- return GenerateConstantIfNeeded(constant);
-}
+ auto* global = builder_.Sem().Get<sem::GlobalVariable>(var);
+ if (global && global->IsOverridable()) {
+ constant.is_spec_op = true;
+ constant.constant_id = global->ConstantId();
+ }
-uint32_t Builder::GenerateConstantIfNeeded(const ScalarConstant& constant) {
- auto it = const_to_id_.find(constant);
- if (it != const_to_id_.end()) {
- return it->second;
- }
+ Switch(
+ lit,
+ [&](const ast::BoolLiteralExpression* l) {
+ constant.kind = ScalarConstant::Kind::kBool;
+ constant.value.b = l->value;
+ },
+ [&](const ast::IntLiteralExpression* i) {
+ switch (i->suffix) {
+ case ast::IntLiteralExpression::Suffix::kNone:
+ case ast::IntLiteralExpression::Suffix::kI:
+ constant.kind = ScalarConstant::Kind::kI32;
+ constant.value.i32 = static_cast<int32_t>(i->value);
+ return;
+ case ast::IntLiteralExpression::Suffix::kU:
+ constant.kind = ScalarConstant::Kind::kU32;
+ constant.value.u32 = static_cast<uint32_t>(i->value);
+ return;
+ }
+ },
+ [&](const ast::FloatLiteralExpression* f) {
+ switch (f->suffix) {
+ case ast::FloatLiteralExpression::Suffix::kNone:
+ case ast::FloatLiteralExpression::Suffix::kF:
+ constant.kind = ScalarConstant::Kind::kF32;
+ constant.value.f32 = static_cast<float>(f->value);
+ return;
+ case ast::FloatLiteralExpression::Suffix::kH:
+ error_ = "Type f16 is not completely implemented yet";
+ }
+ },
+ [&](Default) { error_ = "unknown literal type"; });
+
+ if (!error_.empty()) {
+ return false;
+ }
- uint32_t type_id = 0;
+ return GenerateConstantIfNeeded(constant);
+}
- switch (constant.kind) {
- case ScalarConstant::Kind::kU32: {
- type_id = GenerateTypeIfNeeded(builder_.create<sem::U32>());
- break;
+uint32_t Builder::GenerateConstantIfNeeded(const sem::Constant& constant) {
+ if (constant.AllZero()) {
+ return GenerateConstantNullIfNeeded(constant.Type());
}
- case ScalarConstant::Kind::kI32: {
- type_id = GenerateTypeIfNeeded(builder_.create<sem::I32>());
- break;
+
+ static constexpr size_t kOpsResultIdx = 1; // operand index of the result
+ auto& global_scope = scope_stack_[0];
+
+ auto gen_bool = [&](size_t element_idx) {
+ bool val = constant.Element<AInt>(element_idx);
+ return GenerateConstantIfNeeded(ScalarConstant::Bool(val));
+ };
+ auto gen_f32 = [&](size_t element_idx) {
+ auto val = f32(constant.Element<AFloat>(element_idx));
+ return GenerateConstantIfNeeded(ScalarConstant::F32(val.value));
+ };
+ auto gen_i32 = [&](size_t element_idx) {
+ auto val = i32(constant.Element<AInt>(element_idx));
+ return GenerateConstantIfNeeded(ScalarConstant::I32(val.value));
+ };
+ auto gen_u32 = [&](size_t element_idx) {
+ auto val = u32(constant.Element<AInt>(element_idx));
+ return GenerateConstantIfNeeded(ScalarConstant::U32(val.value));
+ };
+ auto gen_els = [&](std::vector<Operand>& ids, size_t start, size_t end, auto gen_el) {
+ for (size_t i = start; i < end; i++) {
+ auto id = gen_el(i);
+ if (!id) {
+ return false;
+ }
+ ids.emplace_back(id);
+ }
+ return true;
+ };
+ auto gen_vector = [&](const sem::Vector* ty, size_t start, size_t end) -> uint32_t {
+ auto type_id = GenerateTypeIfNeeded(ty);
+ if (!type_id) {
+ return 0;
+ }
+
+ std::vector<Operand> ops;
+ ops.reserve(end - start + 2);
+ ops.emplace_back(type_id);
+ ops.push_back(Operand(0u)); // Placeholder for the result ID
+ auto ok = Switch(
+ constant.ElementType(), //
+ [&](const sem::Bool*) { return gen_els(ops, start, end, gen_bool); }, //
+ [&](const sem::F32*) { return gen_els(ops, start, end, gen_f32); }, //
+ [&](const sem::I32*) { return gen_els(ops, start, end, gen_i32); }, //
+ [&](const sem::U32*) { return gen_els(ops, start, end, gen_u32); }, //
+ [&](Default) {
+ error_ = "unhandled constant element type: " + builder_.FriendlyName(ty);
+ return false;
+ });
+ if (!ok) {
+ return 0;
+ }
+
+ return utils::GetOrCreate(global_scope.type_ctor_to_id_, OperandListKey{ops},
+ [&]() -> uint32_t {
+ auto result = result_op();
+ ops[kOpsResultIdx] = result;
+ push_type(spv::Op::OpConstantComposite, std::move(ops));
+ return std::get<uint32_t>(result);
+ });
+ };
+ auto gen_matrix = [&](const sem::Matrix* m) -> uint32_t {
+ auto mat_type_id = GenerateTypeIfNeeded(m);
+ if (!mat_type_id) {
+ return 0;
+ }
+
+ std::vector<Operand> ops;
+ ops.reserve(m->columns() + 2);
+ ops.emplace_back(mat_type_id);
+ ops.push_back(Operand(0u)); // Placeholder for the result ID
+
+ for (size_t column_idx = 0; column_idx < m->columns(); column_idx++) {
+ size_t start = m->rows() * column_idx;
+ size_t end = m->rows() * (column_idx + 1);
+ auto column_id = gen_vector(m->ColumnType(), start, end);
+ if (!column_id) {
+ return 0;
+ }
+ ops.emplace_back(column_id);
+ }
+
+ return utils::GetOrCreate(global_scope.type_ctor_to_id_, OperandListKey{ops},
+ [&]() -> uint32_t {
+ auto result = result_op();
+ ops[kOpsResultIdx] = result;
+ push_type(spv::Op::OpConstantComposite, std::move(ops));
+ return std::get<uint32_t>(result);
+ });
+ };
+
+ return Switch(
+ constant.Type(), //
+ [&](const sem::Bool*) { return gen_bool(0); }, //
+ [&](const sem::F32*) { return gen_f32(0); }, //
+ [&](const sem::I32*) { return gen_i32(0); }, //
+ [&](const sem::U32*) { return gen_u32(0); }, //
+ [&](const sem::Vector* v) { return gen_vector(v, 0, constant.ElementCount()); }, //
+ [&](const sem::Matrix* m) { return gen_matrix(m); }, //
+ [&](Default) {
+ error_ = "unhandled constant type: " + builder_.FriendlyName(constant.Type());
+ return false;
+ });
+}
+
+uint32_t Builder::GenerateConstantIfNeeded(const ScalarConstant& constant) {
+ auto it = const_to_id_.find(constant);
+ if (it != const_to_id_.end()) {
+ return it->second;
}
- case ScalarConstant::Kind::kF32: {
- type_id = GenerateTypeIfNeeded(builder_.create<sem::F32>());
- break;
+
+ uint32_t type_id = 0;
+
+ switch (constant.kind) {
+ case ScalarConstant::Kind::kU32: {
+ type_id = GenerateTypeIfNeeded(builder_.create<sem::U32>());
+ break;
+ }
+ case ScalarConstant::Kind::kI32: {
+ type_id = GenerateTypeIfNeeded(builder_.create<sem::I32>());
+ break;
+ }
+ case ScalarConstant::Kind::kF32: {
+ type_id = GenerateTypeIfNeeded(builder_.create<sem::F32>());
+ break;
+ }
+ case ScalarConstant::Kind::kBool: {
+ type_id = GenerateTypeIfNeeded(builder_.create<sem::Bool>());
+ break;
+ }
}
- case ScalarConstant::Kind::kBool: {
- type_id = GenerateTypeIfNeeded(builder_.create<sem::Bool>());
- break;
+
+ if (type_id == 0) {
+ return 0;
}
- }
- if (type_id == 0) {
- return 0;
- }
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- auto result = result_op();
- auto result_id = result.to_i();
+ if (constant.is_spec_op) {
+ push_annot(spv::Op::OpDecorate, {Operand(result_id), U32Operand(SpvDecorationSpecId),
+ Operand(constant.constant_id)});
+ }
- if (constant.is_spec_op) {
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(result_id), Operand::Int(SpvDecorationSpecId),
- Operand::Int(constant.constant_id)});
- }
-
- switch (constant.kind) {
- case ScalarConstant::Kind::kU32: {
- push_type(
- constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
- {Operand::Int(type_id), result, Operand::Int(constant.value.u32)});
- break;
- }
- case ScalarConstant::Kind::kI32: {
- push_type(
- constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
- {Operand::Int(type_id), result, Operand::Int(constant.value.i32)});
- break;
- }
- case ScalarConstant::Kind::kF32: {
- push_type(
- constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
- {Operand::Int(type_id), result, Operand::Float(constant.value.f32)});
- break;
- }
- case ScalarConstant::Kind::kBool: {
- if (constant.value.b) {
- push_type(constant.is_spec_op ? spv::Op::OpSpecConstantTrue
- : spv::Op::OpConstantTrue,
- {Operand::Int(type_id), result});
- } else {
- push_type(constant.is_spec_op ? spv::Op::OpSpecConstantFalse
- : spv::Op::OpConstantFalse,
- {Operand::Int(type_id), result});
- }
- break;
+ switch (constant.kind) {
+ case ScalarConstant::Kind::kU32: {
+ push_type(constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
+ {Operand(type_id), result, Operand(constant.value.u32)});
+ break;
+ }
+ case ScalarConstant::Kind::kI32: {
+ push_type(constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
+ {Operand(type_id), result, U32Operand(constant.value.i32)});
+ break;
+ }
+ case ScalarConstant::Kind::kF32: {
+ push_type(constant.is_spec_op ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
+ {Operand(type_id), result, Operand(constant.value.f32)});
+ break;
+ }
+ case ScalarConstant::Kind::kBool: {
+ if (constant.value.b) {
+ push_type(
+ constant.is_spec_op ? spv::Op::OpSpecConstantTrue : spv::Op::OpConstantTrue,
+ {Operand(type_id), result});
+ } else {
+ push_type(
+ constant.is_spec_op ? spv::Op::OpSpecConstantFalse : spv::Op::OpConstantFalse,
+ {Operand(type_id), result});
+ }
+ break;
+ }
}
- }
- const_to_id_[constant] = result_id;
- return result_id;
+ const_to_id_[constant] = result_id;
+ return result_id;
}
uint32_t Builder::GenerateConstantNullIfNeeded(const sem::Type* type) {
- auto type_id = GenerateTypeIfNeeded(type);
- if (type_id == 0) {
- return 0;
- }
+ auto type_id = GenerateTypeIfNeeded(type);
+ if (type_id == 0) {
+ return 0;
+ }
- return utils::GetOrCreate(const_null_to_id_, type, [&] {
- auto result = result_op();
+ return utils::GetOrCreate(const_null_to_id_, type, [&] {
+ auto result = result_op();
- push_type(spv::Op::OpConstantNull, {Operand::Int(type_id), result});
+ push_type(spv::Op::OpConstantNull, {Operand(type_id), result});
- return result.to_i();
- });
+ return std::get<uint32_t>(result);
+ });
}
-uint32_t Builder::GenerateConstantVectorSplatIfNeeded(const sem::Vector* type,
- uint32_t value_id) {
- auto type_id = GenerateTypeIfNeeded(type);
- if (type_id == 0 || value_id == 0) {
- return 0;
- }
+uint32_t Builder::GenerateConstantVectorSplatIfNeeded(const sem::Vector* type, uint32_t value_id) {
+ auto type_id = GenerateTypeIfNeeded(type);
+ if (type_id == 0 || value_id == 0) {
+ return 0;
+ }
- uint64_t key = (static_cast<uint64_t>(type->Width()) << 32) + value_id;
- return utils::GetOrCreate(const_splat_to_id_, key, [&] {
- auto result = result_op();
- auto result_id = result.to_i();
+ uint64_t key = (static_cast<uint64_t>(type->Width()) << 32) + value_id;
+ return utils::GetOrCreate(const_splat_to_id_, key, [&] {
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- OperandList ops;
- ops.push_back(Operand::Int(type_id));
- ops.push_back(result);
- for (uint32_t i = 0; i < type->Width(); i++) {
- ops.push_back(Operand::Int(value_id));
- }
- push_type(spv::Op::OpConstantComposite, ops);
+ OperandList ops;
+ ops.push_back(Operand(type_id));
+ ops.push_back(result);
+ for (uint32_t i = 0; i < type->Width(); i++) {
+ ops.push_back(Operand(value_id));
+ }
+ push_type(spv::Op::OpConstantComposite, ops);
- const_splat_to_id_[key] = result_id;
- return result_id;
- });
+ const_splat_to_id_[key] = result_id;
+ return result_id;
+ });
}
-uint32_t Builder::GenerateShortCircuitBinaryExpression(
- const ast::BinaryExpression* expr) {
- auto lhs_id = GenerateExpressionWithLoadIfNeeded(expr->lhs);
- if (lhs_id == 0) {
- return false;
- }
+uint32_t Builder::GenerateShortCircuitBinaryExpression(const ast::BinaryExpression* expr) {
+ auto lhs_id = GenerateExpressionWithLoadIfNeeded(expr->lhs);
+ if (lhs_id == 0) {
+ return false;
+ }
- // Get the ID of the basic block where control flow will diverge. It's the
- // last basic block generated for the left-hand-side of the operator.
- auto original_label_id = current_label_id_;
+ // Get the ID of the basic block where control flow will diverge. It's the
+ // last basic block generated for the left-hand-side of the operator.
+ auto original_label_id = current_label_id_;
- auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
- if (type_id == 0) {
- return 0;
- }
+ auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
+ if (type_id == 0) {
+ return 0;
+ }
- auto merge_block = result_op();
- auto merge_block_id = merge_block.to_i();
+ auto merge_block = result_op();
+ auto merge_block_id = std::get<uint32_t>(merge_block);
- auto block = result_op();
- auto block_id = block.to_i();
+ auto block = result_op();
+ auto block_id = std::get<uint32_t>(block);
- auto true_block_id = block_id;
- auto false_block_id = merge_block_id;
+ auto true_block_id = block_id;
+ auto false_block_id = merge_block_id;
- // For a logical or we want to only check the RHS if the LHS is failed.
- if (expr->IsLogicalOr()) {
- std::swap(true_block_id, false_block_id);
- }
+ // For a logical or we want to only check the RHS if the LHS is failed.
+ if (expr->IsLogicalOr()) {
+ std::swap(true_block_id, false_block_id);
+ }
- if (!push_function_inst(spv::Op::OpSelectionMerge,
- {Operand::Int(merge_block_id),
- Operand::Int(SpvSelectionControlMaskNone)})) {
- return 0;
- }
- if (!push_function_inst(spv::Op::OpBranchConditional,
- {Operand::Int(lhs_id), Operand::Int(true_block_id),
- Operand::Int(false_block_id)})) {
- return 0;
- }
+ if (!push_function_inst(spv::Op::OpSelectionMerge,
+ {Operand(merge_block_id), U32Operand(SpvSelectionControlMaskNone)})) {
+ return 0;
+ }
+ if (!push_function_inst(spv::Op::OpBranchConditional,
+ {Operand(lhs_id), Operand(true_block_id), Operand(false_block_id)})) {
+ return 0;
+ }
- // Output block to check the RHS
- if (!GenerateLabel(block_id)) {
- return 0;
- }
- auto rhs_id = GenerateExpressionWithLoadIfNeeded(expr->rhs);
- if (rhs_id == 0) {
- return 0;
- }
+ // Output block to check the RHS
+ if (!GenerateLabel(block_id)) {
+ return 0;
+ }
+ auto rhs_id = GenerateExpressionWithLoadIfNeeded(expr->rhs);
+ if (rhs_id == 0) {
+ return 0;
+ }
- // Get the block ID of the last basic block generated for the right-hand-side
- // expression. That block will be an immediate predecessor to the merge block.
- auto rhs_block_id = current_label_id_;
- if (!push_function_inst(spv::Op::OpBranch, {Operand::Int(merge_block_id)})) {
- return 0;
- }
+ // Get the block ID of the last basic block generated for the right-hand-side
+ // expression. That block will be an immediate predecessor to the merge block.
+ auto rhs_block_id = current_label_id_;
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_block_id)})) {
+ return 0;
+ }
- // Output the merge block
- if (!GenerateLabel(merge_block_id)) {
- return 0;
- }
+ // Output the merge block
+ if (!GenerateLabel(merge_block_id)) {
+ return 0;
+ }
- auto result = result_op();
- auto result_id = result.to_i();
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- if (!push_function_inst(spv::Op::OpPhi,
- {Operand::Int(type_id), result, Operand::Int(lhs_id),
- Operand::Int(original_label_id),
- Operand::Int(rhs_id), Operand::Int(rhs_block_id)})) {
- return 0;
- }
+ if (!push_function_inst(spv::Op::OpPhi,
+ {Operand(type_id), result, Operand(lhs_id), Operand(original_label_id),
+ Operand(rhs_id), Operand(rhs_block_id)})) {
+ return 0;
+ }
- return result_id;
+ return result_id;
}
uint32_t Builder::GenerateSplat(uint32_t scalar_id, const sem::Type* vec_type) {
- // Create a new vector to splat scalar into
- auto splat_vector = result_op();
- auto* splat_vector_type = builder_.create<sem::Pointer>(
- vec_type, ast::StorageClass::kFunction, ast::Access::kReadWrite);
- push_function_var(
- {Operand::Int(GenerateTypeIfNeeded(splat_vector_type)), splat_vector,
- Operand::Int(ConvertStorageClass(ast::StorageClass::kFunction)),
- Operand::Int(GenerateConstantNullIfNeeded(vec_type))});
-
- // Splat scalar into vector
- auto splat_result = result_op();
- OperandList ops;
- ops.push_back(Operand::Int(GenerateTypeIfNeeded(vec_type)));
- ops.push_back(splat_result);
- for (size_t i = 0; i < vec_type->As<sem::Vector>()->Width(); ++i) {
- ops.push_back(Operand::Int(scalar_id));
- }
- if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
- return 0;
- }
+ // Create a new vector to splat scalar into
+ auto splat_vector = result_op();
+ auto* splat_vector_type = builder_.create<sem::Pointer>(vec_type, ast::StorageClass::kFunction,
+ ast::Access::kReadWrite);
+ push_function_var({Operand(GenerateTypeIfNeeded(splat_vector_type)), splat_vector,
+ U32Operand(ConvertStorageClass(ast::StorageClass::kFunction)),
+ Operand(GenerateConstantNullIfNeeded(vec_type))});
+
+ // Splat scalar into vector
+ auto splat_result = result_op();
+ OperandList ops;
+ ops.push_back(Operand(GenerateTypeIfNeeded(vec_type)));
+ ops.push_back(splat_result);
+ for (size_t i = 0; i < vec_type->As<sem::Vector>()->Width(); ++i) {
+ ops.push_back(Operand(scalar_id));
+ }
+ if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
+ return 0;
+ }
- return splat_result.to_i();
+ return std::get<uint32_t>(splat_result);
}
uint32_t Builder::GenerateMatrixAddOrSub(uint32_t lhs_id,
uint32_t rhs_id,
const sem::Matrix* type,
spv::Op op) {
- // Example addition of two matrices:
- // %31 = OpLoad %mat3v4float %m34
- // %32 = OpLoad %mat3v4float %m34
- // %33 = OpCompositeExtract %v4float %31 0
- // %34 = OpCompositeExtract %v4float %32 0
- // %35 = OpFAdd %v4float %33 %34
- // %36 = OpCompositeExtract %v4float %31 1
- // %37 = OpCompositeExtract %v4float %32 1
- // %38 = OpFAdd %v4float %36 %37
- // %39 = OpCompositeExtract %v4float %31 2
- // %40 = OpCompositeExtract %v4float %32 2
- // %41 = OpFAdd %v4float %39 %40
- // %42 = OpCompositeConstruct %mat3v4float %35 %38 %41
-
- auto* column_type = builder_.create<sem::Vector>(type->type(), type->rows());
- auto column_type_id = GenerateTypeIfNeeded(column_type);
-
- OperandList ops;
-
- for (uint32_t i = 0; i < type->columns(); ++i) {
- // Extract column `i` from lhs mat
- auto lhs_column_id = result_op();
- if (!push_function_inst(spv::Op::OpCompositeExtract,
- {Operand::Int(column_type_id), lhs_column_id,
- Operand::Int(lhs_id), Operand::Int(i)})) {
- return 0;
- }
-
- // Extract column `i` from rhs mat
- auto rhs_column_id = result_op();
- if (!push_function_inst(spv::Op::OpCompositeExtract,
- {Operand::Int(column_type_id), rhs_column_id,
- Operand::Int(rhs_id), Operand::Int(i)})) {
- return 0;
- }
-
- // Add or subtract the two columns
- auto result = result_op();
- if (!push_function_inst(op, {Operand::Int(column_type_id), result,
- lhs_column_id, rhs_column_id})) {
- return 0;
- }
+ // Example addition of two matrices:
+ // %31 = OpLoad %mat3v4float %m34
+ // %32 = OpLoad %mat3v4float %m34
+ // %33 = OpCompositeExtract %v4float %31 0
+ // %34 = OpCompositeExtract %v4float %32 0
+ // %35 = OpFAdd %v4float %33 %34
+ // %36 = OpCompositeExtract %v4float %31 1
+ // %37 = OpCompositeExtract %v4float %32 1
+ // %38 = OpFAdd %v4float %36 %37
+ // %39 = OpCompositeExtract %v4float %31 2
+ // %40 = OpCompositeExtract %v4float %32 2
+ // %41 = OpFAdd %v4float %39 %40
+ // %42 = OpCompositeConstruct %mat3v4float %35 %38 %41
+
+ auto* column_type = builder_.create<sem::Vector>(type->type(), type->rows());
+ auto column_type_id = GenerateTypeIfNeeded(column_type);
- ops.push_back(result);
- }
+ OperandList ops;
- // Create the result matrix from the added/subtracted column vectors
- auto result_mat_id = result_op();
- ops.insert(ops.begin(), result_mat_id);
- ops.insert(ops.begin(), Operand::Int(GenerateTypeIfNeeded(type)));
- if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
- return 0;
- }
+ for (uint32_t i = 0; i < type->columns(); ++i) {
+ // Extract column `i` from lhs mat
+ auto lhs_column_id = result_op();
+ if (!push_function_inst(
+ spv::Op::OpCompositeExtract,
+ {Operand(column_type_id), lhs_column_id, Operand(lhs_id), Operand(i)})) {
+ return 0;
+ }
- return result_mat_id.to_i();
-}
+ // Extract column `i` from rhs mat
+ auto rhs_column_id = result_op();
+ if (!push_function_inst(
+ spv::Op::OpCompositeExtract,
+ {Operand(column_type_id), rhs_column_id, Operand(rhs_id), Operand(i)})) {
+ return 0;
+ }
-uint32_t Builder::GenerateBinaryExpression(const ast::BinaryExpression* expr) {
- // There is special logic for short circuiting operators.
- if (expr->IsLogicalAnd() || expr->IsLogicalOr()) {
- return GenerateShortCircuitBinaryExpression(expr);
- }
+ // Add or subtract the two columns
+ auto result = result_op();
+ if (!push_function_inst(op,
+ {Operand(column_type_id), result, lhs_column_id, rhs_column_id})) {
+ return 0;
+ }
- auto lhs_id = GenerateExpressionWithLoadIfNeeded(expr->lhs);
- if (lhs_id == 0) {
- return 0;
- }
+ ops.push_back(result);
+ }
- auto rhs_id = GenerateExpressionWithLoadIfNeeded(expr->rhs);
- if (rhs_id == 0) {
- return 0;
- }
+ // Create the result matrix from the added/subtracted column vectors
+ auto result_mat_id = result_op();
+ ops.insert(ops.begin(), result_mat_id);
+ ops.insert(ops.begin(), Operand(GenerateTypeIfNeeded(type)));
+ if (!push_function_inst(spv::Op::OpCompositeConstruct, ops)) {
+ return 0;
+ }
- auto result = result_op();
- auto result_id = result.to_i();
+ return std::get<uint32_t>(result_mat_id);
+}
- auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
- if (type_id == 0) {
- return 0;
- }
-
- // Handle int and float and the vectors of those types. Other types
- // should have been rejected by validation.
- auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
- auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
-
- // Handle matrix-matrix addition and subtraction
- if ((expr->IsAdd() || expr->IsSubtract()) && lhs_type->is_float_matrix() &&
- rhs_type->is_float_matrix()) {
- auto* lhs_mat = lhs_type->As<sem::Matrix>();
- auto* rhs_mat = rhs_type->As<sem::Matrix>();
-
- // This should already have been validated by resolver
- if (lhs_mat->rows() != rhs_mat->rows() ||
- lhs_mat->columns() != rhs_mat->columns()) {
- error_ = "matrices must have same dimensionality for add or subtract";
- return 0;
- }
-
- return GenerateMatrixAddOrSub(
- lhs_id, rhs_id, lhs_mat,
- expr->IsAdd() ? spv::Op::OpFAdd : spv::Op::OpFSub);
- }
-
- // For vector-scalar arithmetic operations, splat scalar into a vector. We
- // skip this for multiply as we can use OpVectorTimesScalar.
- const bool is_float_scalar_vector_multiply =
- expr->IsMultiply() &&
- ((lhs_type->is_float_scalar() && rhs_type->is_float_vector()) ||
- (lhs_type->is_float_vector() && rhs_type->is_float_scalar()));
-
- if (expr->IsArithmetic() && !is_float_scalar_vector_multiply) {
- if (lhs_type->Is<sem::Vector>() && rhs_type->is_numeric_scalar()) {
- uint32_t splat_vector_id = GenerateSplat(rhs_id, lhs_type);
- if (splat_vector_id == 0) {
+uint32_t Builder::GenerateBinaryExpression(const ast::BinaryExpression* expr) {
+ // There is special logic for short circuiting operators.
+ if (expr->IsLogicalAnd() || expr->IsLogicalOr()) {
+ return GenerateShortCircuitBinaryExpression(expr);
+ }
+
+ auto lhs_id = GenerateExpressionWithLoadIfNeeded(expr->lhs);
+ if (lhs_id == 0) {
return 0;
- }
- rhs_id = splat_vector_id;
- rhs_type = lhs_type;
+ }
- } else if (lhs_type->is_numeric_scalar() && rhs_type->Is<sem::Vector>()) {
- uint32_t splat_vector_id = GenerateSplat(lhs_id, rhs_type);
- if (splat_vector_id == 0) {
+ auto rhs_id = GenerateExpressionWithLoadIfNeeded(expr->rhs);
+ if (rhs_id == 0) {
return 0;
- }
- lhs_id = splat_vector_id;
- lhs_type = rhs_type;
- }
- }
-
- bool lhs_is_float_or_vec = lhs_type->is_float_scalar_or_vector();
- bool lhs_is_bool_or_vec = lhs_type->is_bool_scalar_or_vector();
- bool lhs_is_integer_or_vec = lhs_type->is_integer_scalar_or_vector();
- bool lhs_is_unsigned = lhs_type->is_unsigned_scalar_or_vector();
-
- spv::Op op = spv::Op::OpNop;
- if (expr->IsAnd()) {
- if (lhs_is_integer_or_vec) {
- op = spv::Op::OpBitwiseAnd;
- } else if (lhs_is_bool_or_vec) {
- op = spv::Op::OpLogicalAnd;
- } else {
- error_ = "invalid and expression";
- return 0;
- }
- } else if (expr->IsAdd()) {
- op = lhs_is_float_or_vec ? spv::Op::OpFAdd : spv::Op::OpIAdd;
- } else if (expr->IsDivide()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFDiv;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpUDiv;
- } else {
- op = spv::Op::OpSDiv;
- }
- } else if (expr->IsEqual()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdEqual;
- } else if (lhs_is_bool_or_vec) {
- op = spv::Op::OpLogicalEqual;
- } else if (lhs_is_integer_or_vec) {
- op = spv::Op::OpIEqual;
- } else {
- error_ = "invalid equal expression";
- return 0;
- }
- } else if (expr->IsGreaterThan()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdGreaterThan;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpUGreaterThan;
- } else {
- op = spv::Op::OpSGreaterThan;
}
- } else if (expr->IsGreaterThanEqual()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdGreaterThanEqual;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpUGreaterThanEqual;
- } else {
- op = spv::Op::OpSGreaterThanEqual;
+
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
+
+ auto type_id = GenerateTypeIfNeeded(TypeOf(expr));
+ if (type_id == 0) {
+ return 0;
}
- } else if (expr->IsLessThan()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdLessThan;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpULessThan;
- } else {
- op = spv::Op::OpSLessThan;
+
+ // Handle int and float and the vectors of those types. Other types
+ // should have been rejected by validation.
+ auto* lhs_type = TypeOf(expr->lhs)->UnwrapRef();
+ auto* rhs_type = TypeOf(expr->rhs)->UnwrapRef();
+
+ // Handle matrix-matrix addition and subtraction
+ if ((expr->IsAdd() || expr->IsSubtract()) && lhs_type->is_float_matrix() &&
+ rhs_type->is_float_matrix()) {
+ auto* lhs_mat = lhs_type->As<sem::Matrix>();
+ auto* rhs_mat = rhs_type->As<sem::Matrix>();
+
+ // This should already have been validated by resolver
+ if (lhs_mat->rows() != rhs_mat->rows() || lhs_mat->columns() != rhs_mat->columns()) {
+ error_ = "matrices must have same dimensionality for add or subtract";
+ return 0;
+ }
+
+ return GenerateMatrixAddOrSub(lhs_id, rhs_id, lhs_mat,
+ expr->IsAdd() ? spv::Op::OpFAdd : spv::Op::OpFSub);
}
- } else if (expr->IsLessThanEqual()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdLessThanEqual;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpULessThanEqual;
- } else {
- op = spv::Op::OpSLessThanEqual;
+
+ // For vector-scalar arithmetic operations, splat scalar into a vector. We
+ // skip this for multiply as we can use OpVectorTimesScalar.
+ const bool is_float_scalar_vector_multiply =
+ expr->IsMultiply() && ((lhs_type->is_float_scalar() && rhs_type->is_float_vector()) ||
+ (lhs_type->is_float_vector() && rhs_type->is_float_scalar()));
+
+ if (expr->IsArithmetic() && !is_float_scalar_vector_multiply) {
+ if (lhs_type->Is<sem::Vector>() && rhs_type->is_numeric_scalar()) {
+ uint32_t splat_vector_id = GenerateSplat(rhs_id, lhs_type);
+ if (splat_vector_id == 0) {
+ return 0;
+ }
+ rhs_id = splat_vector_id;
+ rhs_type = lhs_type;
+
+ } else if (lhs_type->is_numeric_scalar() && rhs_type->Is<sem::Vector>()) {
+ uint32_t splat_vector_id = GenerateSplat(lhs_id, rhs_type);
+ if (splat_vector_id == 0) {
+ return 0;
+ }
+ lhs_id = splat_vector_id;
+ lhs_type = rhs_type;
+ }
}
- } else if (expr->IsModulo()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFRem;
- } else if (lhs_is_unsigned) {
- op = spv::Op::OpUMod;
- } else {
- op = spv::Op::OpSMod;
- }
- } else if (expr->IsMultiply()) {
- if (lhs_type->is_integer_scalar_or_vector()) {
- // If the left hand side is an integer then this _has_ to be OpIMul as
- // there there is no other integer multiplication.
- op = spv::Op::OpIMul;
- } else if (lhs_type->is_float_scalar() && rhs_type->is_float_scalar()) {
- // Float scalars multiply with OpFMul
- op = spv::Op::OpFMul;
- } else if (lhs_type->is_float_vector() && rhs_type->is_float_vector()) {
- // Float vectors must be validated to be the same size and then use OpFMul
- op = spv::Op::OpFMul;
- } else if (lhs_type->is_float_scalar() && rhs_type->is_float_vector()) {
- // Scalar * Vector we need to flip lhs and rhs types
- // because OpVectorTimesScalar expects <vector>, <scalar>
- std::swap(lhs_id, rhs_id);
- op = spv::Op::OpVectorTimesScalar;
- } else if (lhs_type->is_float_vector() && rhs_type->is_float_scalar()) {
- // float vector * scalar
- op = spv::Op::OpVectorTimesScalar;
- } else if (lhs_type->is_float_scalar() && rhs_type->is_float_matrix()) {
- // Scalar * Matrix we need to flip lhs and rhs types because
- // OpMatrixTimesScalar expects <matrix>, <scalar>
- std::swap(lhs_id, rhs_id);
- op = spv::Op::OpMatrixTimesScalar;
- } else if (lhs_type->is_float_matrix() && rhs_type->is_float_scalar()) {
- // float matrix * scalar
- op = spv::Op::OpMatrixTimesScalar;
- } else if (lhs_type->is_float_vector() && rhs_type->is_float_matrix()) {
- // float vector * matrix
- op = spv::Op::OpVectorTimesMatrix;
- } else if (lhs_type->is_float_matrix() && rhs_type->is_float_vector()) {
- // float matrix * vector
- op = spv::Op::OpMatrixTimesVector;
- } else if (lhs_type->is_float_matrix() && rhs_type->is_float_matrix()) {
- // float matrix * matrix
- op = spv::Op::OpMatrixTimesMatrix;
- } else {
- error_ = "invalid multiply expression";
- return 0;
- }
- } else if (expr->IsNotEqual()) {
- if (lhs_is_float_or_vec) {
- op = spv::Op::OpFOrdNotEqual;
- } else if (lhs_is_bool_or_vec) {
- op = spv::Op::OpLogicalNotEqual;
- } else if (lhs_is_integer_or_vec) {
- op = spv::Op::OpINotEqual;
- } else {
- error_ = "invalid not-equal expression";
- return 0;
- }
- } else if (expr->IsOr()) {
- if (lhs_is_integer_or_vec) {
- op = spv::Op::OpBitwiseOr;
- } else if (lhs_is_bool_or_vec) {
- op = spv::Op::OpLogicalOr;
+
+ bool lhs_is_float_or_vec = lhs_type->is_float_scalar_or_vector();
+ bool lhs_is_bool_or_vec = lhs_type->is_bool_scalar_or_vector();
+ bool lhs_is_integer_or_vec = lhs_type->is_integer_scalar_or_vector();
+ bool lhs_is_unsigned = lhs_type->is_unsigned_scalar_or_vector();
+
+ spv::Op op = spv::Op::OpNop;
+ if (expr->IsAnd()) {
+ if (lhs_is_integer_or_vec) {
+ op = spv::Op::OpBitwiseAnd;
+ } else if (lhs_is_bool_or_vec) {
+ op = spv::Op::OpLogicalAnd;
+ } else {
+ error_ = "invalid and expression";
+ return 0;
+ }
+ } else if (expr->IsAdd()) {
+ op = lhs_is_float_or_vec ? spv::Op::OpFAdd : spv::Op::OpIAdd;
+ } else if (expr->IsDivide()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFDiv;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpUDiv;
+ } else {
+ op = spv::Op::OpSDiv;
+ }
+ } else if (expr->IsEqual()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdEqual;
+ } else if (lhs_is_bool_or_vec) {
+ op = spv::Op::OpLogicalEqual;
+ } else if (lhs_is_integer_or_vec) {
+ op = spv::Op::OpIEqual;
+ } else {
+ error_ = "invalid equal expression";
+ return 0;
+ }
+ } else if (expr->IsGreaterThan()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdGreaterThan;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpUGreaterThan;
+ } else {
+ op = spv::Op::OpSGreaterThan;
+ }
+ } else if (expr->IsGreaterThanEqual()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdGreaterThanEqual;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpUGreaterThanEqual;
+ } else {
+ op = spv::Op::OpSGreaterThanEqual;
+ }
+ } else if (expr->IsLessThan()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdLessThan;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpULessThan;
+ } else {
+ op = spv::Op::OpSLessThan;
+ }
+ } else if (expr->IsLessThanEqual()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdLessThanEqual;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpULessThanEqual;
+ } else {
+ op = spv::Op::OpSLessThanEqual;
+ }
+ } else if (expr->IsModulo()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFRem;
+ } else if (lhs_is_unsigned) {
+ op = spv::Op::OpUMod;
+ } else {
+ op = spv::Op::OpSMod;
+ }
+ } else if (expr->IsMultiply()) {
+ if (lhs_type->is_integer_scalar_or_vector()) {
+ // If the left hand side is an integer then this _has_ to be OpIMul as
+ // there there is no other integer multiplication.
+ op = spv::Op::OpIMul;
+ } else if (lhs_type->is_float_scalar() && rhs_type->is_float_scalar()) {
+ // Float scalars multiply with OpFMul
+ op = spv::Op::OpFMul;
+ } else if (lhs_type->is_float_vector() && rhs_type->is_float_vector()) {
+ // Float vectors must be validated to be the same size and then use OpFMul
+ op = spv::Op::OpFMul;
+ } else if (lhs_type->is_float_scalar() && rhs_type->is_float_vector()) {
+ // Scalar * Vector we need to flip lhs and rhs types
+ // because OpVectorTimesScalar expects <vector>, <scalar>
+ std::swap(lhs_id, rhs_id);
+ op = spv::Op::OpVectorTimesScalar;
+ } else if (lhs_type->is_float_vector() && rhs_type->is_float_scalar()) {
+ // float vector * scalar
+ op = spv::Op::OpVectorTimesScalar;
+ } else if (lhs_type->is_float_scalar() && rhs_type->is_float_matrix()) {
+ // Scalar * Matrix we need to flip lhs and rhs types because
+ // OpMatrixTimesScalar expects <matrix>, <scalar>
+ std::swap(lhs_id, rhs_id);
+ op = spv::Op::OpMatrixTimesScalar;
+ } else if (lhs_type->is_float_matrix() && rhs_type->is_float_scalar()) {
+ // float matrix * scalar
+ op = spv::Op::OpMatrixTimesScalar;
+ } else if (lhs_type->is_float_vector() && rhs_type->is_float_matrix()) {
+ // float vector * matrix
+ op = spv::Op::OpVectorTimesMatrix;
+ } else if (lhs_type->is_float_matrix() && rhs_type->is_float_vector()) {
+ // float matrix * vector
+ op = spv::Op::OpMatrixTimesVector;
+ } else if (lhs_type->is_float_matrix() && rhs_type->is_float_matrix()) {
+ // float matrix * matrix
+ op = spv::Op::OpMatrixTimesMatrix;
+ } else {
+ error_ = "invalid multiply expression";
+ return 0;
+ }
+ } else if (expr->IsNotEqual()) {
+ if (lhs_is_float_or_vec) {
+ op = spv::Op::OpFOrdNotEqual;
+ } else if (lhs_is_bool_or_vec) {
+ op = spv::Op::OpLogicalNotEqual;
+ } else if (lhs_is_integer_or_vec) {
+ op = spv::Op::OpINotEqual;
+ } else {
+ error_ = "invalid not-equal expression";
+ return 0;
+ }
+ } else if (expr->IsOr()) {
+ if (lhs_is_integer_or_vec) {
+ op = spv::Op::OpBitwiseOr;
+ } else if (lhs_is_bool_or_vec) {
+ op = spv::Op::OpLogicalOr;
+ } else {
+ error_ = "invalid and expression";
+ return 0;
+ }
+ } else if (expr->IsShiftLeft()) {
+ op = spv::Op::OpShiftLeftLogical;
+ } else if (expr->IsShiftRight() && lhs_type->is_signed_scalar_or_vector()) {
+ // A shift right with a signed LHS is an arithmetic shift.
+ op = spv::Op::OpShiftRightArithmetic;
+ } else if (expr->IsShiftRight()) {
+ op = spv::Op::OpShiftRightLogical;
+ } else if (expr->IsSubtract()) {
+ op = lhs_is_float_or_vec ? spv::Op::OpFSub : spv::Op::OpISub;
+ } else if (expr->IsXor()) {
+ op = spv::Op::OpBitwiseXor;
} else {
- error_ = "invalid and expression";
- return 0;
- }
- } else if (expr->IsShiftLeft()) {
- op = spv::Op::OpShiftLeftLogical;
- } else if (expr->IsShiftRight() && lhs_type->is_signed_scalar_or_vector()) {
- // A shift right with a signed LHS is an arithmetic shift.
- op = spv::Op::OpShiftRightArithmetic;
- } else if (expr->IsShiftRight()) {
- op = spv::Op::OpShiftRightLogical;
- } else if (expr->IsSubtract()) {
- op = lhs_is_float_or_vec ? spv::Op::OpFSub : spv::Op::OpISub;
- } else if (expr->IsXor()) {
- op = spv::Op::OpBitwiseXor;
- } else {
- error_ = "unknown binary expression";
- return 0;
- }
+ error_ = "unknown binary expression";
+ return 0;
+ }
- if (!push_function_inst(op, {Operand::Int(type_id), result,
- Operand::Int(lhs_id), Operand::Int(rhs_id)})) {
- return 0;
- }
- return result_id;
+ if (!push_function_inst(op, {Operand(type_id), result, Operand(lhs_id), Operand(rhs_id)})) {
+ return 0;
+ }
+ return result_id;
}
bool Builder::GenerateBlockStatement(const ast::BlockStatement* stmt) {
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
- return GenerateBlockStatementWithoutScoping(stmt);
+ PushScope();
+ TINT_DEFER(PopScope());
+ return GenerateBlockStatementWithoutScoping(stmt);
}
-bool Builder::GenerateBlockStatementWithoutScoping(
- const ast::BlockStatement* stmt) {
- for (auto* block_stmt : stmt->statements) {
- if (!GenerateStatement(block_stmt)) {
- return false;
+bool Builder::GenerateBlockStatementWithoutScoping(const ast::BlockStatement* stmt) {
+ for (auto* block_stmt : stmt->statements) {
+ if (!GenerateStatement(block_stmt)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
uint32_t Builder::GenerateCallExpression(const ast::CallExpression* expr) {
- auto* call = builder_.Sem().Get(expr);
- auto* target = call->Target();
- return Switch(
- target,
- [&](const sem::Function* func) {
- return GenerateFunctionCall(call, func);
- },
- [&](const sem::Builtin* builtin) {
- return GenerateBuiltinCall(call, builtin);
- },
- [&](const sem::TypeConversion*) {
- return GenerateTypeConstructorOrConversion(call, nullptr);
- },
- [&](const sem::TypeConstructor*) {
- return GenerateTypeConstructorOrConversion(call, nullptr);
- },
- [&](Default) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "unhandled call target: " << target->TypeInfo().name;
- return 0;
- });
+ auto* call = builder_.Sem().Get<sem::Call>(expr);
+ auto* target = call->Target();
+ return Switch(
+ target, [&](const sem::Function* func) { return GenerateFunctionCall(call, func); },
+ [&](const sem::Builtin* builtin) { return GenerateBuiltinCall(call, builtin); },
+ [&](const sem::TypeConversion*) {
+ return GenerateTypeConstructorOrConversion(call, nullptr);
+ },
+ [&](const sem::TypeConstructor*) {
+ return GenerateTypeConstructorOrConversion(call, nullptr);
+ },
+ [&](Default) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "unhandled call target: " << target->TypeInfo().name;
+ return 0;
+ });
}
-uint32_t Builder::GenerateFunctionCall(const sem::Call* call,
- const sem::Function*) {
- auto* expr = call->Declaration();
- auto* ident = expr->target.name;
+uint32_t Builder::GenerateFunctionCall(const sem::Call* call, const sem::Function*) {
+ auto* expr = call->Declaration();
+ auto* ident = expr->target.name;
- auto type_id = GenerateTypeIfNeeded(call->Type());
- if (type_id == 0) {
- return 0;
- }
+ auto type_id = GenerateTypeIfNeeded(call->Type());
+ if (type_id == 0) {
+ return 0;
+ }
- auto result = result_op();
- auto result_id = result.to_i();
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- OperandList ops = {Operand::Int(type_id), result};
+ OperandList ops = {Operand(type_id), result};
- auto func_id = func_symbol_to_id_[ident->symbol];
- if (func_id == 0) {
- error_ = "unable to find called function: " +
- builder_.Symbols().NameFor(ident->symbol);
- return 0;
- }
- ops.push_back(Operand::Int(func_id));
+ auto func_id = func_symbol_to_id_[ident->symbol];
+ if (func_id == 0) {
+ error_ = "unable to find called function: " + builder_.Symbols().NameFor(ident->symbol);
+ return 0;
+ }
+ ops.push_back(Operand(func_id));
- size_t arg_idx = 0;
- for (auto* arg : expr->args) {
- auto id = GenerateExpressionWithLoadIfNeeded(arg);
- if (id == 0) {
- return 0;
+ for (auto* arg : expr->args) {
+ auto id = GenerateExpressionWithLoadIfNeeded(arg);
+ if (id == 0) {
+ return 0;
+ }
+ ops.push_back(Operand(id));
}
- ops.push_back(Operand::Int(id));
- arg_idx++;
- }
- if (!push_function_inst(spv::Op::OpFunctionCall, std::move(ops))) {
- return 0;
- }
+ if (!push_function_inst(spv::Op::OpFunctionCall, std::move(ops))) {
+ return 0;
+ }
- return result_id;
+ return result_id;
}
-uint32_t Builder::GenerateBuiltinCall(const sem::Call* call,
- const sem::Builtin* builtin) {
- auto result = result_op();
- auto result_id = result.to_i();
-
- auto result_type_id = GenerateTypeIfNeeded(builtin->ReturnType());
- if (result_type_id == 0) {
- return 0;
- }
-
- if (builtin->IsFineDerivative() || builtin->IsCoarseDerivative()) {
- push_capability(SpvCapabilityDerivativeControl);
- }
+uint32_t Builder::GenerateBuiltinCall(const sem::Call* call, const sem::Builtin* builtin) {
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- if (builtin->IsImageQuery()) {
- push_capability(SpvCapabilityImageQuery);
- }
+ auto result_type_id = GenerateTypeIfNeeded(builtin->ReturnType());
+ if (result_type_id == 0) {
+ return 0;
+ }
- if (builtin->IsTexture()) {
- if (!GenerateTextureBuiltin(call, builtin, Operand::Int(result_type_id),
- result)) {
- return 0;
+ if (builtin->IsFineDerivative() || builtin->IsCoarseDerivative()) {
+ push_capability(SpvCapabilityDerivativeControl);
}
- return result_id;
- }
- if (builtin->IsBarrier()) {
- if (!GenerateControlBarrierBuiltin(builtin)) {
- return 0;
+ if (builtin->IsImageQuery()) {
+ push_capability(SpvCapabilityImageQuery);
}
- return result_id;
- }
- if (builtin->IsAtomic()) {
- if (!GenerateAtomicBuiltin(call, builtin, Operand::Int(result_type_id),
- result)) {
- return 0;
+ if (builtin->IsTexture()) {
+ if (!GenerateTextureBuiltin(call, builtin, Operand(result_type_id), result)) {
+ return 0;
+ }
+ return result_id;
}
- return result_id;
- }
-
- // Generates the SPIR-V ID for the expression for the indexed call argument,
- // and loads it if necessary. Returns 0 on error.
- auto get_arg_as_value_id = [&](size_t i,
- bool generate_load = true) -> uint32_t {
- auto* arg = call->Arguments()[i];
- auto* param = builtin->Parameters()[i];
- auto val_id = GenerateExpression(arg->Declaration());
- if (val_id == 0) {
- return 0;
+
+ if (builtin->IsBarrier()) {
+ if (!GenerateControlBarrierBuiltin(builtin)) {
+ return 0;
+ }
+ return result_id;
}
- if (generate_load && !param->Type()->Is<sem::Pointer>()) {
- val_id = GenerateLoadIfNeeded(arg->Type(), val_id);
+ if (builtin->IsAtomic()) {
+ if (!GenerateAtomicBuiltin(call, builtin, Operand(result_type_id), result)) {
+ return 0;
+ }
+ return result_id;
}
- return val_id;
- };
- OperandList params = {Operand::Int(result_type_id), result};
- spv::Op op = spv::Op::OpNop;
+ // Generates the SPIR-V ID for the expression for the indexed call argument,
+ // and loads it if necessary. Returns 0 on error.
+ auto get_arg_as_value_id = [&](size_t i, bool generate_load = true) -> uint32_t {
+ auto* arg = call->Arguments()[i];
+ auto* param = builtin->Parameters()[i];
+ auto val_id = GenerateExpression(arg->Declaration());
+ if (val_id == 0) {
+ return 0;
+ }
- // Pushes the arguments for a GlslStd450 extended instruction, and sets op
- // to OpExtInst.
- auto glsl_std450 = [&](uint32_t inst_id) {
- auto set_id = GetGLSLstd450Import();
- params.push_back(Operand::Int(set_id));
- params.push_back(Operand::Int(inst_id));
- op = spv::Op::OpExtInst;
- };
+ if (generate_load && !param->Type()->Is<sem::Pointer>()) {
+ val_id = GenerateLoadIfNeeded(arg->Type(), val_id);
+ }
+ return val_id;
+ };
- switch (builtin->Type()) {
- case BuiltinType::kAny:
- if (builtin->Parameters()[0]->Type()->Is<sem::Bool>()) {
- // any(v: bool) just resolves to v.
- return get_arg_as_value_id(0);
- }
- op = spv::Op::OpAny;
- break;
- case BuiltinType::kAll:
- if (builtin->Parameters()[0]->Type()->Is<sem::Bool>()) {
- // all(v: bool) just resolves to v.
- return get_arg_as_value_id(0);
- }
- op = spv::Op::OpAll;
- break;
- case BuiltinType::kArrayLength: {
- auto* address_of =
- call->Arguments()[0]->Declaration()->As<ast::UnaryOpExpression>();
- if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
- error_ = "arrayLength() expected pointer to member access, got " +
- std::string(address_of->TypeInfo().name);
- return 0;
- }
- auto* array_expr = address_of->expr;
+ OperandList params = {Operand(result_type_id), result};
+ spv::Op op = spv::Op::OpNop;
- auto* accessor = array_expr->As<ast::MemberAccessorExpression>();
- if (!accessor) {
- error_ =
- "arrayLength() expected pointer to member access, got pointer to " +
- std::string(array_expr->TypeInfo().name);
- return 0;
- }
+ // Pushes the arguments for a GlslStd450 extended instruction, and sets op
+ // to OpExtInst.
+ auto glsl_std450 = [&](uint32_t inst_id) {
+ auto set_id = GetGLSLstd450Import();
+ params.push_back(Operand(set_id));
+ params.push_back(Operand(inst_id));
+ op = spv::Op::OpExtInst;
+ };
- auto struct_id = GenerateExpression(accessor->structure);
- if (struct_id == 0) {
- return 0;
- }
- params.push_back(Operand::Int(struct_id));
+ switch (builtin->Type()) {
+ case BuiltinType::kAny:
+ if (builtin->Parameters()[0]->Type()->Is<sem::Bool>()) {
+ // any(v: bool) just resolves to v.
+ return get_arg_as_value_id(0);
+ }
+ op = spv::Op::OpAny;
+ break;
+ case BuiltinType::kAll:
+ if (builtin->Parameters()[0]->Type()->Is<sem::Bool>()) {
+ // all(v: bool) just resolves to v.
+ return get_arg_as_value_id(0);
+ }
+ op = spv::Op::OpAll;
+ break;
+ case BuiltinType::kArrayLength: {
+ auto* address_of = call->Arguments()[0]->Declaration()->As<ast::UnaryOpExpression>();
+ if (!address_of || address_of->op != ast::UnaryOp::kAddressOf) {
+ error_ = "arrayLength() expected pointer to member access, got " +
+ std::string(address_of->TypeInfo().name);
+ return 0;
+ }
+ auto* array_expr = address_of->expr;
- auto* type = TypeOf(accessor->structure)->UnwrapRef();
- if (!type->Is<sem::Struct>()) {
- error_ = "invalid type (" + type->FriendlyName(builder_.Symbols()) +
- ") for runtime array length";
- return 0;
- }
- // Runtime array must be the last member in the structure
- params.push_back(Operand::Int(uint32_t(
- type->As<sem::Struct>()->Declaration()->members.size() - 1)));
+ auto* accessor = array_expr->As<ast::MemberAccessorExpression>();
+ if (!accessor) {
+ error_ = "arrayLength() expected pointer to member access, got pointer to " +
+ std::string(array_expr->TypeInfo().name);
+ return 0;
+ }
- if (!push_function_inst(spv::Op::OpArrayLength, params)) {
- return 0;
- }
- return result_id;
- }
- case BuiltinType::kCountOneBits:
- op = spv::Op::OpBitCount;
- break;
- case BuiltinType::kDot: {
- op = spv::Op::OpDot;
- auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
- if (vec_ty->type()->is_integer_scalar()) {
- // TODO(crbug.com/tint/1267): OpDot requires floating-point types, but
- // WGSL also supports integer types. SPV_KHR_integer_dot_product adds
- // support for integer vectors. Use it if it is available.
- auto el_ty = Operand::Int(GenerateTypeIfNeeded(vec_ty->type()));
- auto vec_a = Operand::Int(get_arg_as_value_id(0));
- auto vec_b = Operand::Int(get_arg_as_value_id(1));
- if (vec_a.to_i() == 0 || vec_b.to_i() == 0) {
- return 0;
- }
-
- auto sum = Operand::Int(0);
- for (uint32_t i = 0; i < vec_ty->Width(); i++) {
- auto a = result_op();
- auto b = result_op();
- auto mul = result_op();
- if (!push_function_inst(spv::Op::OpCompositeExtract,
- {el_ty, a, vec_a, Operand::Int(i)}) ||
- !push_function_inst(spv::Op::OpCompositeExtract,
- {el_ty, b, vec_b, Operand::Int(i)}) ||
- !push_function_inst(spv::Op::OpIMul, {el_ty, mul, a, b})) {
- return 0;
- }
- if (i == 0) {
- sum = mul;
- } else {
- auto prev_sum = sum;
- auto is_last_el = i == (vec_ty->Width() - 1);
- sum = is_last_el ? Operand::Int(result_id) : result_op();
- if (!push_function_inst(spv::Op::OpIAdd,
- {el_ty, sum, prev_sum, mul})) {
- return 0;
+ auto struct_id = GenerateExpression(accessor->structure);
+ if (struct_id == 0) {
+ return 0;
+ }
+ params.push_back(Operand(struct_id));
+
+ auto* type = TypeOf(accessor->structure)->UnwrapRef();
+ if (!type->Is<sem::Struct>()) {
+ error_ = "invalid type (" + type->FriendlyName(builder_.Symbols()) +
+ ") for runtime array length";
+ return 0;
+ }
+ // Runtime array must be the last member in the structure
+ params.push_back(
+ Operand(uint32_t(type->As<sem::Struct>()->Declaration()->members.size() - 1)));
+
+ if (!push_function_inst(spv::Op::OpArrayLength, params)) {
+ return 0;
}
- }
+ return result_id;
}
- return result_id;
- }
- break;
- }
- case BuiltinType::kDpdx:
- op = spv::Op::OpDPdx;
- break;
- case BuiltinType::kDpdxCoarse:
- op = spv::Op::OpDPdxCoarse;
- break;
- case BuiltinType::kDpdxFine:
- op = spv::Op::OpDPdxFine;
- break;
- case BuiltinType::kDpdy:
- op = spv::Op::OpDPdy;
- break;
- case BuiltinType::kDpdyCoarse:
- op = spv::Op::OpDPdyCoarse;
- break;
- case BuiltinType::kDpdyFine:
- op = spv::Op::OpDPdyFine;
- break;
- case BuiltinType::kExtractBits:
- op = builtin->Parameters()[0]->Type()->is_unsigned_scalar_or_vector()
- ? spv::Op::OpBitFieldUExtract
- : spv::Op::OpBitFieldSExtract;
- break;
- case BuiltinType::kFwidth:
- op = spv::Op::OpFwidth;
- break;
- case BuiltinType::kFwidthCoarse:
- op = spv::Op::OpFwidthCoarse;
- break;
- case BuiltinType::kFwidthFine:
- op = spv::Op::OpFwidthFine;
- break;
- case BuiltinType::kInsertBits:
- op = spv::Op::OpBitFieldInsert;
- break;
- case BuiltinType::kMix: {
- auto std450 = Operand::Int(GetGLSLstd450Import());
-
- auto a_id = get_arg_as_value_id(0);
- auto b_id = get_arg_as_value_id(1);
- auto f_id = get_arg_as_value_id(2);
- if (!a_id || !b_id || !f_id) {
- return 0;
- }
+ case BuiltinType::kCountOneBits:
+ op = spv::Op::OpBitCount;
+ break;
+ case BuiltinType::kDot: {
+ op = spv::Op::OpDot;
+ auto* vec_ty = builtin->Parameters()[0]->Type()->As<sem::Vector>();
+ if (vec_ty->type()->is_integer_scalar()) {
+ // TODO(crbug.com/tint/1267): OpDot requires floating-point types, but
+ // WGSL also supports integer types. SPV_KHR_integer_dot_product adds
+ // support for integer vectors. Use it if it is available.
+ auto el_ty = Operand(GenerateTypeIfNeeded(vec_ty->type()));
+ auto vec_a = Operand(get_arg_as_value_id(0));
+ auto vec_b = Operand(get_arg_as_value_id(1));
+ if (std::get<uint32_t>(vec_a) == 0 || std::get<uint32_t>(vec_b) == 0) {
+ return 0;
+ }
+
+ auto sum = Operand(0u);
+ for (uint32_t i = 0; i < vec_ty->Width(); i++) {
+ auto a = result_op();
+ auto b = result_op();
+ auto mul = result_op();
+ if (!push_function_inst(spv::Op::OpCompositeExtract,
+ {el_ty, a, vec_a, Operand(i)}) ||
+ !push_function_inst(spv::Op::OpCompositeExtract,
+ {el_ty, b, vec_b, Operand(i)}) ||
+ !push_function_inst(spv::Op::OpIMul, {el_ty, mul, a, b})) {
+ return 0;
+ }
+ if (i == 0) {
+ sum = mul;
+ } else {
+ auto prev_sum = sum;
+ auto is_last_el = i == (vec_ty->Width() - 1);
+ sum = is_last_el ? Operand(result_id) : result_op();
+ if (!push_function_inst(spv::Op::OpIAdd, {el_ty, sum, prev_sum, mul})) {
+ return 0;
+ }
+ }
+ }
+ return result_id;
+ }
+ break;
+ }
+ case BuiltinType::kDpdx:
+ op = spv::Op::OpDPdx;
+ break;
+ case BuiltinType::kDpdxCoarse:
+ op = spv::Op::OpDPdxCoarse;
+ break;
+ case BuiltinType::kDpdxFine:
+ op = spv::Op::OpDPdxFine;
+ break;
+ case BuiltinType::kDpdy:
+ op = spv::Op::OpDPdy;
+ break;
+ case BuiltinType::kDpdyCoarse:
+ op = spv::Op::OpDPdyCoarse;
+ break;
+ case BuiltinType::kDpdyFine:
+ op = spv::Op::OpDPdyFine;
+ break;
+ case BuiltinType::kExtractBits:
+ op = builtin->Parameters()[0]->Type()->is_unsigned_scalar_or_vector()
+ ? spv::Op::OpBitFieldUExtract
+ : spv::Op::OpBitFieldSExtract;
+ break;
+ case BuiltinType::kFwidth:
+ op = spv::Op::OpFwidth;
+ break;
+ case BuiltinType::kFwidthCoarse:
+ op = spv::Op::OpFwidthCoarse;
+ break;
+ case BuiltinType::kFwidthFine:
+ op = spv::Op::OpFwidthFine;
+ break;
+ case BuiltinType::kInsertBits:
+ op = spv::Op::OpBitFieldInsert;
+ break;
+ case BuiltinType::kMix: {
+ auto std450 = Operand(GetGLSLstd450Import());
+
+ auto a_id = get_arg_as_value_id(0);
+ auto b_id = get_arg_as_value_id(1);
+ auto f_id = get_arg_as_value_id(2);
+ if (!a_id || !b_id || !f_id) {
+ return 0;
+ }
- // If the interpolant is scalar but the objects are vectors, we need to
- // splat the interpolant into a vector of the same size.
- auto* result_vector_type = builtin->ReturnType()->As<sem::Vector>();
- if (result_vector_type && builtin->Parameters()[2]->Type()->is_scalar()) {
- f_id = GenerateSplat(f_id, builtin->Parameters()[0]->Type());
- if (f_id == 0) {
- return 0;
+ // If the interpolant is scalar but the objects are vectors, we need to
+ // splat the interpolant into a vector of the same size.
+ auto* result_vector_type = builtin->ReturnType()->As<sem::Vector>();
+ if (result_vector_type && builtin->Parameters()[2]->Type()->is_scalar()) {
+ f_id = GenerateSplat(f_id, builtin->Parameters()[0]->Type());
+ if (f_id == 0) {
+ return 0;
+ }
+ }
+
+ if (!push_function_inst(spv::Op::OpExtInst, {Operand(result_type_id), result, std450,
+ U32Operand(GLSLstd450FMix), Operand(a_id),
+ Operand(b_id), Operand(f_id)})) {
+ return 0;
+ }
+ return result_id;
}
- }
+ case BuiltinType::kReverseBits:
+ op = spv::Op::OpBitReverse;
+ break;
+ case BuiltinType::kSelect: {
+ // Note: Argument order is different in WGSL and SPIR-V
+ auto cond_id = get_arg_as_value_id(2);
+ auto true_id = get_arg_as_value_id(1);
+ auto false_id = get_arg_as_value_id(0);
+ if (!cond_id || !true_id || !false_id) {
+ return 0;
+ }
- if (!push_function_inst(spv::Op::OpExtInst,
- {Operand::Int(result_type_id), result, std450,
- Operand::Int(GLSLstd450FMix), Operand::Int(a_id),
- Operand::Int(b_id), Operand::Int(f_id)})) {
- return 0;
- }
- return result_id;
- }
- case BuiltinType::kReverseBits:
- op = spv::Op::OpBitReverse;
- break;
- case BuiltinType::kSelect: {
- // Note: Argument order is different in WGSL and SPIR-V
- auto cond_id = get_arg_as_value_id(2);
- auto true_id = get_arg_as_value_id(1);
- auto false_id = get_arg_as_value_id(0);
- if (!cond_id || !true_id || !false_id) {
- return 0;
- }
+ // If the condition is scalar but the objects are vectors, we need to
+ // splat the condition into a vector of the same size.
+ // TODO(jrprice): If we're targeting SPIR-V 1.4, we don't need to do this.
+ auto* result_vector_type = builtin->ReturnType()->As<sem::Vector>();
+ if (result_vector_type && builtin->Parameters()[2]->Type()->is_scalar()) {
+ auto* bool_vec_ty = builder_.create<sem::Vector>(builder_.create<sem::Bool>(),
+ result_vector_type->Width());
+ if (!GenerateTypeIfNeeded(bool_vec_ty)) {
+ return 0;
+ }
+ cond_id = GenerateSplat(cond_id, bool_vec_ty);
+ if (cond_id == 0) {
+ return 0;
+ }
+ }
- // If the condition is scalar but the objects are vectors, we need to
- // splat the condition into a vector of the same size.
- // TODO(jrprice): If we're targeting SPIR-V 1.4, we don't need to do this.
- auto* result_vector_type = builtin->ReturnType()->As<sem::Vector>();
- if (result_vector_type && builtin->Parameters()[2]->Type()->is_scalar()) {
- auto* bool_vec_ty = builder_.create<sem::Vector>(
- builder_.create<sem::Bool>(), result_vector_type->Width());
- if (!GenerateTypeIfNeeded(bool_vec_ty)) {
- return 0;
+ if (!push_function_inst(spv::Op::OpSelect,
+ {Operand(result_type_id), result, Operand(cond_id),
+ Operand(true_id), Operand(false_id)})) {
+ return 0;
+ }
+ return result_id;
+ }
+ case BuiltinType::kTranspose:
+ op = spv::Op::OpTranspose;
+ break;
+ case BuiltinType::kAbs:
+ if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
+ // abs() only operates on *signed* integers.
+ // This is a no-op for unsigned integers.
+ return get_arg_as_value_id(0);
+ }
+ if (builtin->ReturnType()->is_float_scalar_or_vector()) {
+ glsl_std450(GLSLstd450FAbs);
+ } else {
+ glsl_std450(GLSLstd450SAbs);
+ }
+ break;
+ case BuiltinType::kDot4I8Packed: {
+ auto first_param_id = get_arg_as_value_id(0);
+ auto second_param_id = get_arg_as_value_id(1);
+ if (!push_function_inst(spv::Op::OpSDotKHR,
+ {Operand(result_type_id), result, Operand(first_param_id),
+ Operand(second_param_id),
+ Operand(static_cast<uint32_t>(
+ spv::PackedVectorFormat::PackedVectorFormat4x8BitKHR))})) {
+ return 0;
+ }
+ return result_id;
}
- cond_id = GenerateSplat(cond_id, bool_vec_ty);
- if (cond_id == 0) {
- return 0;
+ case BuiltinType::kDot4U8Packed: {
+ auto first_param_id = get_arg_as_value_id(0);
+ auto second_param_id = get_arg_as_value_id(1);
+ if (!push_function_inst(spv::Op::OpUDotKHR,
+ {Operand(result_type_id), result, Operand(first_param_id),
+ Operand(second_param_id),
+ Operand(static_cast<uint32_t>(
+ spv::PackedVectorFormat::PackedVectorFormat4x8BitKHR))})) {
+ return 0;
+ }
+ return result_id;
}
- }
+ default: {
+ auto inst_id = builtin_to_glsl_method(builtin);
+ if (inst_id == 0) {
+ error_ = "unknown method " + std::string(builtin->str());
+ return 0;
+ }
+ glsl_std450(inst_id);
+ break;
+ }
+ }
- if (!push_function_inst(
- spv::Op::OpSelect,
- {Operand::Int(result_type_id), result, Operand::Int(cond_id),
- Operand::Int(true_id), Operand::Int(false_id)})) {
+ if (op == spv::Op::OpNop) {
+ error_ = "unable to determine operator for: " + std::string(builtin->str());
return 0;
- }
- return result_id;
- }
- case BuiltinType::kTranspose:
- op = spv::Op::OpTranspose;
- break;
- case BuiltinType::kAbs:
- if (builtin->ReturnType()->is_unsigned_scalar_or_vector()) {
- // abs() only operates on *signed* integers.
- // This is a no-op for unsigned integers.
- return get_arg_as_value_id(0);
- }
- if (builtin->ReturnType()->is_float_scalar_or_vector()) {
- glsl_std450(GLSLstd450FAbs);
- } else {
- glsl_std450(GLSLstd450SAbs);
- }
- break;
- default: {
- auto inst_id = builtin_to_glsl_method(builtin);
- if (inst_id == 0) {
- error_ = "unknown method " + std::string(builtin->str());
- return 0;
- }
- glsl_std450(inst_id);
- break;
}
- }
-
- if (op == spv::Op::OpNop) {
- error_ = "unable to determine operator for: " + std::string(builtin->str());
- return 0;
- }
- for (size_t i = 0; i < call->Arguments().size(); i++) {
- if (auto val_id = get_arg_as_value_id(i)) {
- params.emplace_back(Operand::Int(val_id));
- } else {
- return 0;
+ for (size_t i = 0; i < call->Arguments().size(); i++) {
+ if (auto val_id = get_arg_as_value_id(i)) {
+ params.emplace_back(Operand(val_id));
+ } else {
+ return 0;
+ }
}
- }
- if (!push_function_inst(op, params)) {
- return 0;
- }
+ if (!push_function_inst(op, params)) {
+ return 0;
+ }
- return result_id;
+ return result_id;
}
bool Builder::GenerateTextureBuiltin(const sem::Call* call,
const sem::Builtin* builtin,
Operand result_type,
Operand result_id) {
- using Usage = sem::ParameterUsage;
-
- auto& signature = builtin->Signature();
- auto& arguments = call->Arguments();
-
- // Generates the given expression, returning the operand ID
- auto gen = [&](const sem::Expression* expr) {
- const auto val_id = GenerateExpressionWithLoadIfNeeded(expr);
- return Operand::Int(val_id);
- };
-
- // Returns the argument with the given usage
- auto arg = [&](Usage usage) {
- int idx = signature.IndexOf(usage);
- return (idx >= 0) ? arguments[idx] : nullptr;
- };
-
- // Generates the argument with the given usage, returning the operand ID
- auto gen_arg = [&](Usage usage) {
- auto* argument = arg(usage);
- if (!argument) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "missing argument " << static_cast<int>(usage);
- }
- return gen(argument);
- };
-
- auto* texture = arg(Usage::kTexture);
- if (!texture) {
- TINT_ICE(Writer, builder_.Diagnostics()) << "missing texture argument";
- }
-
- auto* texture_type = texture->Type()->UnwrapRef()->As<sem::Texture>();
-
- auto op = spv::Op::OpNop;
-
- // Custom function to call after the texture-builtin op has been generated.
- std::function<bool()> post_emission = [] { return true; };
-
- // Populate the spirv_params with common parameters
- OperandList spirv_params;
- spirv_params.reserve(8); // Enough to fit most parameter lists
-
- // Extra image operands, appended to spirv_params.
- struct ImageOperand {
- SpvImageOperandsMask mask;
- Operand operand;
- };
- std::vector<ImageOperand> image_operands;
- image_operands.reserve(4); // Enough to fit most parameter lists
-
- // Appends `result_type` and `result_id` to `spirv_params`
- auto append_result_type_and_id_to_spirv_params = [&]() {
- spirv_params.emplace_back(std::move(result_type));
- spirv_params.emplace_back(std::move(result_id));
- };
-
- // Appends a result type and id to `spirv_params`, possibly adding a
- // post_emission step.
- //
- // If the texture is a depth texture, then this function wraps the result of
- // the op with a OpCompositeExtract to evaluate to the first element of the
- // returned vector. This is done as the WGSL texture reading functions for
- // depths return a single float scalar instead of a vector.
- //
- // If the texture is not a depth texture, then this function simply delegates
- // to calling append_result_type_and_id_to_spirv_params().
- auto append_result_type_and_id_to_spirv_params_for_read = [&]() {
- if (texture_type
- ->IsAnyOf<sem::DepthTexture, sem::DepthMultisampledTexture>()) {
- auto* f32 = builder_.create<sem::F32>();
- auto* spirv_result_type = builder_.create<sem::Vector>(f32, 4u);
- auto spirv_result = result_op();
- post_emission = [=] {
- return push_function_inst(
- spv::Op::OpCompositeExtract,
- {result_type, result_id, spirv_result, Operand::Int(0)});
- };
- auto spirv_result_type_id = GenerateTypeIfNeeded(spirv_result_type);
- if (spirv_result_type_id == 0) {
- return false;
- }
- spirv_params.emplace_back(Operand::Int(spirv_result_type_id));
- spirv_params.emplace_back(spirv_result);
- return true;
+ using Usage = sem::ParameterUsage;
+
+ auto& signature = builtin->Signature();
+ auto& arguments = call->Arguments();
+
+ // Generates the given expression, returning the operand ID
+ auto gen = [&](const sem::Expression* expr) {
+ const auto val_id = GenerateExpressionWithLoadIfNeeded(expr);
+ return Operand(val_id);
+ };
+
+ // Returns the argument with the given usage
+ auto arg = [&](Usage usage) {
+ int idx = signature.IndexOf(usage);
+ return (idx >= 0) ? arguments[idx] : nullptr;
+ };
+
+ // Generates the argument with the given usage, returning the operand ID
+ auto gen_arg = [&](Usage usage) {
+ auto* argument = arg(usage);
+ if (!argument) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "missing argument " << static_cast<int>(usage);
+ }
+ return gen(argument);
+ };
+
+ auto* texture = arg(Usage::kTexture);
+ if (!texture) {
+ TINT_ICE(Writer, builder_.Diagnostics()) << "missing texture argument";
}
- append_result_type_and_id_to_spirv_params();
- return true;
- };
+ auto* texture_type = texture->Type()->UnwrapRef()->As<sem::Texture>();
- // Appends a result type and id to `spirv_params`, by first swizzling the
- // result of the op with `swizzle`.
- auto append_result_type_and_id_to_spirv_params_swizzled =
- [&](uint32_t spirv_result_width, std::vector<uint32_t> swizzle) {
- if (swizzle.empty()) {
- append_result_type_and_id_to_spirv_params();
- } else {
- // Assign post_emission to swizzle the result of the call to
- // OpImageQuerySize[Lod].
- auto* element_type = ElementTypeOf(call->Type());
- auto spirv_result = result_op();
- auto* spirv_result_type =
- builder_.create<sem::Vector>(element_type, spirv_result_width);
- if (swizzle.size() > 1) {
- post_emission = [=] {
- OperandList operands{
- result_type,
- result_id,
- spirv_result,
- spirv_result,
- };
- for (auto idx : swizzle) {
- operands.emplace_back(Operand::Int(idx));
- }
- return push_function_inst(spv::Op::OpVectorShuffle, operands);
- };
- } else {
+ auto op = spv::Op::OpNop;
+
+ // Custom function to call after the texture-builtin op has been generated.
+ std::function<bool()> post_emission = [] { return true; };
+
+ // Populate the spirv_params with common parameters
+ OperandList spirv_params;
+ spirv_params.reserve(8); // Enough to fit most parameter lists
+
+ // Extra image operands, appended to spirv_params.
+ struct ImageOperand {
+ SpvImageOperandsMask mask;
+ Operand operand;
+ };
+ std::vector<ImageOperand> image_operands;
+ image_operands.reserve(4); // Enough to fit most parameter lists
+
+ // Appends `result_type` and `result_id` to `spirv_params`
+ auto append_result_type_and_id_to_spirv_params = [&]() {
+ spirv_params.emplace_back(std::move(result_type));
+ spirv_params.emplace_back(std::move(result_id));
+ };
+
+ // Appends a result type and id to `spirv_params`, possibly adding a
+ // post_emission step.
+ //
+ // If the texture is a depth texture, then this function wraps the result of
+ // the op with a OpCompositeExtract to evaluate to the first element of the
+ // returned vector. This is done as the WGSL texture reading functions for
+ // depths return a single float scalar instead of a vector.
+ //
+ // If the texture is not a depth texture, then this function simply delegates
+ // to calling append_result_type_and_id_to_spirv_params().
+ auto append_result_type_and_id_to_spirv_params_for_read = [&]() {
+ if (texture_type->IsAnyOf<sem::DepthTexture, sem::DepthMultisampledTexture>()) {
+ auto* f32 = builder_.create<sem::F32>();
+ auto* spirv_result_type = builder_.create<sem::Vector>(f32, 4u);
+ auto spirv_result = result_op();
post_emission = [=] {
- return push_function_inst(spv::Op::OpCompositeExtract,
- {result_type, result_id, spirv_result,
- Operand::Int(swizzle[0])});
+ return push_function_inst(spv::Op::OpCompositeExtract,
+ {result_type, result_id, spirv_result, Operand(0u)});
};
- }
- auto spirv_result_type_id = GenerateTypeIfNeeded(spirv_result_type);
- if (spirv_result_type_id == 0) {
- return false;
- }
- spirv_params.emplace_back(Operand::Int(spirv_result_type_id));
- spirv_params.emplace_back(spirv_result);
+ auto spirv_result_type_id = GenerateTypeIfNeeded(spirv_result_type);
+ if (spirv_result_type_id == 0) {
+ return false;
+ }
+ spirv_params.emplace_back(Operand(spirv_result_type_id));
+ spirv_params.emplace_back(spirv_result);
+ return true;
}
+
+ append_result_type_and_id_to_spirv_params();
return true;
- };
-
- auto append_coords_to_spirv_params = [&]() -> bool {
- if (auto* array_index = arg(Usage::kArrayIndex)) {
- // Array index needs to be appended to the coordinates.
- auto* packed = AppendVector(&builder_, arg(Usage::kCoords)->Declaration(),
- array_index->Declaration());
- auto param = GenerateExpression(packed->Declaration());
- if (param == 0) {
- return false;
- }
- spirv_params.emplace_back(Operand::Int(param));
- } else {
- spirv_params.emplace_back(gen_arg(Usage::kCoords)); // coordinates
- }
- return true;
- };
-
- auto append_image_and_coords_to_spirv_params = [&]() -> bool {
- auto sampler_param = gen_arg(Usage::kSampler);
- auto texture_param = gen_arg(Usage::kTexture);
- auto sampled_image =
- GenerateSampledImage(texture_type, texture_param, sampler_param);
-
- // Populate the spirv_params with the common parameters
- spirv_params.emplace_back(Operand::Int(sampled_image)); // sampled image
- return append_coords_to_spirv_params();
- };
-
- switch (builtin->Type()) {
- case BuiltinType::kTextureDimensions: {
- // Number of returned elements from OpImageQuerySize[Lod] may not match
- // those of textureDimensions().
- // This might be due to an extra vector scalar describing the number of
- // array elements or textureDimensions() returning a vec3 for cubes
- // when only width / height is returned by OpImageQuerySize[Lod]
- // (see https://github.com/gpuweb/gpuweb/issues/1345).
- // Handle these mismatches by swizzling the returned vector.
- std::vector<uint32_t> swizzle;
- uint32_t spirv_dims = 0;
- switch (texture_type->dim()) {
- case ast::TextureDimension::kNone:
- error_ = "texture dimension is kNone";
- return false;
- case ast::TextureDimension::k1d:
- case ast::TextureDimension::k2d:
- case ast::TextureDimension::k3d:
- case ast::TextureDimension::kCube:
- break; // No swizzle needed
- case ast::TextureDimension::kCubeArray:
- case ast::TextureDimension::k2dArray:
- swizzle = {0, 1}; // Strip array index
- spirv_dims = 3; // [width, height, array_count]
- break;
- }
+ };
- if (!append_result_type_and_id_to_spirv_params_swizzled(spirv_dims,
- swizzle)) {
- return false;
- }
+ // Appends a result type and id to `spirv_params`, by first swizzling the
+ // result of the op with `swizzle`.
+ auto append_result_type_and_id_to_spirv_params_swizzled = [&](uint32_t spirv_result_width,
+ std::vector<uint32_t> swizzle) {
+ if (swizzle.empty()) {
+ append_result_type_and_id_to_spirv_params();
+ } else {
+ // Assign post_emission to swizzle the result of the call to
+ // OpImageQuerySize[Lod].
+ auto* element_type = ElementTypeOf(call->Type());
+ auto spirv_result = result_op();
+ auto* spirv_result_type =
+ builder_.create<sem::Vector>(element_type, spirv_result_width);
+ if (swizzle.size() > 1) {
+ post_emission = [=] {
+ OperandList operands{
+ result_type,
+ result_id,
+ spirv_result,
+ spirv_result,
+ };
+ for (auto idx : swizzle) {
+ operands.emplace_back(Operand(idx));
+ }
+ return push_function_inst(spv::Op::OpVectorShuffle, operands);
+ };
+ } else {
+ post_emission = [=] {
+ return push_function_inst(
+ spv::Op::OpCompositeExtract,
+ {result_type, result_id, spirv_result, Operand(swizzle[0])});
+ };
+ }
+ auto spirv_result_type_id = GenerateTypeIfNeeded(spirv_result_type);
+ if (spirv_result_type_id == 0) {
+ return false;
+ }
+ spirv_params.emplace_back(Operand(spirv_result_type_id));
+ spirv_params.emplace_back(spirv_result);
+ }
+ return true;
+ };
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
- if (texture_type->IsAnyOf<sem::MultisampledTexture, //
- sem::DepthMultisampledTexture, //
- sem::StorageTexture>()) {
- op = spv::Op::OpImageQuerySize;
- } else if (auto* level = arg(Usage::kLevel)) {
- op = spv::Op::OpImageQuerySizeLod;
- spirv_params.emplace_back(gen(level));
- } else {
- ast::SintLiteralExpression i32_0(ProgramID(), Source{}, 0);
- op = spv::Op::OpImageQuerySizeLod;
- spirv_params.emplace_back(
- Operand::Int(GenerateLiteralIfNeeded(nullptr, &i32_0)));
- }
- break;
- }
- case BuiltinType::kTextureNumLayers: {
- uint32_t spirv_dims = 0;
- switch (texture_type->dim()) {
- default:
- error_ = "texture is not arrayed";
- return false;
- case ast::TextureDimension::k2dArray:
- case ast::TextureDimension::kCubeArray:
- spirv_dims = 3;
- break;
- }
+ auto append_coords_to_spirv_params = [&]() -> bool {
+ if (auto* array_index = arg(Usage::kArrayIndex)) {
+ // Array index needs to be appended to the coordinates.
+ auto* packed = AppendVector(&builder_, arg(Usage::kCoords)->Declaration(),
+ array_index->Declaration());
+ auto param = GenerateExpression(packed->Declaration());
+ if (param == 0) {
+ return false;
+ }
+ spirv_params.emplace_back(Operand(param));
+ } else {
+ spirv_params.emplace_back(gen_arg(Usage::kCoords)); // coordinates
+ }
+ return true;
+ };
- // OpImageQuerySize[Lod] packs the array count as the last element of the
- // returned vector. Extract this.
- if (!append_result_type_and_id_to_spirv_params_swizzled(
- spirv_dims, {spirv_dims - 1})) {
- return false;
- }
+ auto append_image_and_coords_to_spirv_params = [&]() -> bool {
+ auto sampler_param = gen_arg(Usage::kSampler);
+ auto texture_param = gen_arg(Usage::kTexture);
+ auto sampled_image = GenerateSampledImage(texture_type, texture_param, sampler_param);
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ // Populate the spirv_params with the common parameters
+ spirv_params.emplace_back(Operand(sampled_image)); // sampled image
+ return append_coords_to_spirv_params();
+ };
- if (texture_type->Is<sem::MultisampledTexture>() ||
- texture_type->Is<sem::StorageTexture>()) {
- op = spv::Op::OpImageQuerySize;
- } else {
- ast::SintLiteralExpression i32_0(ProgramID(), Source{}, 0);
- op = spv::Op::OpImageQuerySizeLod;
- spirv_params.emplace_back(
- Operand::Int(GenerateLiteralIfNeeded(nullptr, &i32_0)));
- }
- break;
- }
- case BuiltinType::kTextureNumLevels: {
- op = spv::Op::OpImageQueryLevels;
- append_result_type_and_id_to_spirv_params();
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
- break;
- }
- case BuiltinType::kTextureNumSamples: {
- op = spv::Op::OpImageQuerySamples;
- append_result_type_and_id_to_spirv_params();
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
- break;
- }
- case BuiltinType::kTextureLoad: {
- op = texture_type->Is<sem::StorageTexture>() ? spv::Op::OpImageRead
- : spv::Op::OpImageFetch;
- append_result_type_and_id_to_spirv_params_for_read();
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
- if (!append_coords_to_spirv_params()) {
- return false;
- }
+ switch (builtin->Type()) {
+ case BuiltinType::kTextureDimensions: {
+ // Number of returned elements from OpImageQuerySize[Lod] may not match
+ // those of textureDimensions().
+ // This might be due to an extra vector scalar describing the number of
+ // array elements or textureDimensions() returning a vec3 for cubes
+ // when only width / height is returned by OpImageQuerySize[Lod]
+ // (see https://github.com/gpuweb/gpuweb/issues/1345).
+ // Handle these mismatches by swizzling the returned vector.
+ std::vector<uint32_t> swizzle;
+ uint32_t spirv_dims = 0;
+ switch (texture_type->dim()) {
+ case ast::TextureDimension::kNone:
+ error_ = "texture dimension is kNone";
+ return false;
+ case ast::TextureDimension::k1d:
+ case ast::TextureDimension::k2d:
+ case ast::TextureDimension::k3d:
+ case ast::TextureDimension::kCube:
+ break; // No swizzle needed
+ case ast::TextureDimension::kCubeArray:
+ case ast::TextureDimension::k2dArray:
+ swizzle = {0, 1}; // Strip array index
+ spirv_dims = 3; // [width, height, array_count]
+ break;
+ }
- if (auto* level = arg(Usage::kLevel)) {
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsLodMask, gen(level)});
- }
+ if (!append_result_type_and_id_to_spirv_params_swizzled(spirv_dims, swizzle)) {
+ return false;
+ }
- if (auto* sample_index = arg(Usage::kSampleIndex)) {
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsSampleMask, gen(sample_index)});
- }
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ if (texture_type->IsAnyOf<sem::MultisampledTexture, //
+ sem::DepthMultisampledTexture, //
+ sem::StorageTexture>()) {
+ op = spv::Op::OpImageQuerySize;
+ } else if (auto* level = arg(Usage::kLevel)) {
+ op = spv::Op::OpImageQuerySizeLod;
+ spirv_params.emplace_back(gen(level));
+ } else {
+ op = spv::Op::OpImageQuerySizeLod;
+ spirv_params.emplace_back(
+ Operand(GenerateConstantIfNeeded(ScalarConstant::I32(0))));
+ }
+ break;
+ }
+ case BuiltinType::kTextureNumLayers: {
+ uint32_t spirv_dims = 0;
+ switch (texture_type->dim()) {
+ default:
+ error_ = "texture is not arrayed";
+ return false;
+ case ast::TextureDimension::k2dArray:
+ case ast::TextureDimension::kCubeArray:
+ spirv_dims = 3;
+ break;
+ }
- break;
- }
- case BuiltinType::kTextureStore: {
- op = spv::Op::OpImageWrite;
- spirv_params.emplace_back(gen_arg(Usage::kTexture));
- if (!append_coords_to_spirv_params()) {
- return false;
- }
- spirv_params.emplace_back(gen_arg(Usage::kValue));
- break;
- }
- case BuiltinType::kTextureGather: {
- op = spv::Op::OpImageGather;
- append_result_type_and_id_to_spirv_params();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- if (signature.IndexOf(Usage::kComponent) < 0) {
- spirv_params.emplace_back(
- Operand::Int(GenerateConstantIfNeeded(ScalarConstant::I32(0))));
- } else {
- spirv_params.emplace_back(gen_arg(Usage::kComponent));
- }
- break;
- }
- case BuiltinType::kTextureGatherCompare: {
- op = spv::Op::OpImageDrefGather;
- append_result_type_and_id_to_spirv_params();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
- break;
+ // OpImageQuerySize[Lod] packs the array count as the last element of the
+ // returned vector. Extract this.
+ if (!append_result_type_and_id_to_spirv_params_swizzled(spirv_dims, {spirv_dims - 1})) {
+ return false;
+ }
+
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+
+ if (texture_type->Is<sem::MultisampledTexture>() ||
+ texture_type->Is<sem::StorageTexture>()) {
+ op = spv::Op::OpImageQuerySize;
+ } else {
+ op = spv::Op::OpImageQuerySizeLod;
+ spirv_params.emplace_back(
+ Operand(GenerateConstantIfNeeded(ScalarConstant::I32(0))));
+ }
+ break;
+ }
+ case BuiltinType::kTextureNumLevels: {
+ op = spv::Op::OpImageQueryLevels;
+ append_result_type_and_id_to_spirv_params();
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ break;
+ }
+ case BuiltinType::kTextureNumSamples: {
+ op = spv::Op::OpImageQuerySamples;
+ append_result_type_and_id_to_spirv_params();
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ break;
+ }
+ case BuiltinType::kTextureLoad: {
+ op = texture_type->Is<sem::StorageTexture>() ? spv::Op::OpImageRead
+ : spv::Op::OpImageFetch;
+ append_result_type_and_id_to_spirv_params_for_read();
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ if (!append_coords_to_spirv_params()) {
+ return false;
+ }
+
+ if (auto* level = arg(Usage::kLevel)) {
+ image_operands.emplace_back(ImageOperand{SpvImageOperandsLodMask, gen(level)});
+ }
+
+ if (auto* sample_index = arg(Usage::kSampleIndex)) {
+ image_operands.emplace_back(
+ ImageOperand{SpvImageOperandsSampleMask, gen(sample_index)});
+ }
+
+ break;
+ }
+ case BuiltinType::kTextureStore: {
+ op = spv::Op::OpImageWrite;
+ spirv_params.emplace_back(gen_arg(Usage::kTexture));
+ if (!append_coords_to_spirv_params()) {
+ return false;
+ }
+ spirv_params.emplace_back(gen_arg(Usage::kValue));
+ break;
+ }
+ case BuiltinType::kTextureGather: {
+ op = spv::Op::OpImageGather;
+ append_result_type_and_id_to_spirv_params();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ if (signature.IndexOf(Usage::kComponent) < 0) {
+ spirv_params.emplace_back(
+ Operand(GenerateConstantIfNeeded(ScalarConstant::I32(0))));
+ } else {
+ spirv_params.emplace_back(gen_arg(Usage::kComponent));
+ }
+ break;
+ }
+ case BuiltinType::kTextureGatherCompare: {
+ op = spv::Op::OpImageDrefGather;
+ append_result_type_and_id_to_spirv_params();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
+ break;
+ }
+ case BuiltinType::kTextureSample: {
+ op = spv::Op::OpImageSampleImplicitLod;
+ append_result_type_and_id_to_spirv_params_for_read();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ break;
+ }
+ case BuiltinType::kTextureSampleBias: {
+ op = spv::Op::OpImageSampleImplicitLod;
+ append_result_type_and_id_to_spirv_params_for_read();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ image_operands.emplace_back(
+ ImageOperand{SpvImageOperandsBiasMask, gen_arg(Usage::kBias)});
+ break;
+ }
+ case BuiltinType::kTextureSampleLevel: {
+ op = spv::Op::OpImageSampleExplicitLod;
+ append_result_type_and_id_to_spirv_params_for_read();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ auto level = Operand(0u);
+ if (arg(Usage::kLevel)->Type()->UnwrapRef()->Is<sem::I32>()) {
+ // Depth textures have i32 parameters for the level, but SPIR-V expects
+ // F32. Cast.
+ auto f32_type_id = GenerateTypeIfNeeded(builder_.create<sem::F32>());
+ if (f32_type_id == 0) {
+ return 0;
+ }
+ level = result_op();
+ if (!push_function_inst(spv::Op::OpConvertSToF,
+ {Operand(f32_type_id), level, gen_arg(Usage::kLevel)})) {
+ return 0;
+ }
+ } else {
+ level = gen_arg(Usage::kLevel);
+ }
+ image_operands.emplace_back(ImageOperand{SpvImageOperandsLodMask, level});
+ break;
+ }
+ case BuiltinType::kTextureSampleGrad: {
+ op = spv::Op::OpImageSampleExplicitLod;
+ append_result_type_and_id_to_spirv_params_for_read();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ image_operands.emplace_back(
+ ImageOperand{SpvImageOperandsGradMask, gen_arg(Usage::kDdx)});
+ image_operands.emplace_back(
+ ImageOperand{SpvImageOperandsGradMask, gen_arg(Usage::kDdy)});
+ break;
+ }
+ case BuiltinType::kTextureSampleCompare: {
+ op = spv::Op::OpImageSampleDrefImplicitLod;
+ append_result_type_and_id_to_spirv_params();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
+ break;
+ }
+ case BuiltinType::kTextureSampleCompareLevel: {
+ op = spv::Op::OpImageSampleDrefExplicitLod;
+ append_result_type_and_id_to_spirv_params();
+ if (!append_image_and_coords_to_spirv_params()) {
+ return false;
+ }
+ spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
+
+ image_operands.emplace_back(
+ ImageOperand{SpvImageOperandsLodMask,
+ Operand(GenerateConstantIfNeeded(ScalarConstant::F32(0.0)))});
+ break;
+ }
+ default:
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics());
+ return false;
}
- case BuiltinType::kTextureSample: {
- op = spv::Op::OpImageSampleImplicitLod;
- append_result_type_and_id_to_spirv_params_for_read();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- break;
+
+ if (auto* offset = arg(Usage::kOffset)) {
+ image_operands.emplace_back(ImageOperand{SpvImageOperandsConstOffsetMask, gen(offset)});
}
- case BuiltinType::kTextureSampleBias: {
- op = spv::Op::OpImageSampleImplicitLod;
- append_result_type_and_id_to_spirv_params_for_read();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsBiasMask, gen_arg(Usage::kBias)});
- break;
- }
- case BuiltinType::kTextureSampleLevel: {
- op = spv::Op::OpImageSampleExplicitLod;
- append_result_type_and_id_to_spirv_params_for_read();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- auto level = Operand::Int(0);
- if (arg(Usage::kLevel)->Type()->UnwrapRef()->Is<sem::I32>()) {
- // Depth textures have i32 parameters for the level, but SPIR-V expects
- // F32. Cast.
- auto f32_type_id = GenerateTypeIfNeeded(builder_.create<sem::F32>());
- if (f32_type_id == 0) {
- return 0;
- }
- level = result_op();
- if (!push_function_inst(
- spv::Op::OpConvertSToF,
- {Operand::Int(f32_type_id), level, gen_arg(Usage::kLevel)})) {
- return 0;
+
+ if (!image_operands.empty()) {
+ std::sort(image_operands.begin(), image_operands.end(),
+ [](auto& a, auto& b) { return a.mask < b.mask; });
+ uint32_t mask = 0;
+ for (auto& image_operand : image_operands) {
+ mask |= image_operand.mask;
+ }
+ spirv_params.emplace_back(Operand(mask));
+ for (auto& image_operand : image_operands) {
+ spirv_params.emplace_back(image_operand.operand);
}
- } else {
- level = gen_arg(Usage::kLevel);
- }
- image_operands.emplace_back(ImageOperand{SpvImageOperandsLodMask, level});
- break;
}
- case BuiltinType::kTextureSampleGrad: {
- op = spv::Op::OpImageSampleExplicitLod;
- append_result_type_and_id_to_spirv_params_for_read();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsGradMask, gen_arg(Usage::kDdx)});
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsGradMask, gen_arg(Usage::kDdy)});
- break;
- }
- case BuiltinType::kTextureSampleCompare: {
- op = spv::Op::OpImageSampleDrefImplicitLod;
- append_result_type_and_id_to_spirv_params();
- if (!append_image_and_coords_to_spirv_params()) {
+
+ if (op == spv::Op::OpNop) {
+ error_ = "unable to determine operator for: " + std::string(builtin->str());
return false;
- }
- spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
- break;
}
- case BuiltinType::kTextureSampleCompareLevel: {
- op = spv::Op::OpImageSampleDrefExplicitLod;
- append_result_type_and_id_to_spirv_params();
- if (!append_image_and_coords_to_spirv_params()) {
- return false;
- }
- spirv_params.emplace_back(gen_arg(Usage::kDepthRef));
-
- ast::FloatLiteralExpression float_0(ProgramID(), Source{}, 0.0);
- image_operands.emplace_back(ImageOperand{
- SpvImageOperandsLodMask,
- Operand::Int(GenerateLiteralIfNeeded(nullptr, &float_0))});
- break;
- }
- default:
- TINT_UNREACHABLE(Writer, builder_.Diagnostics());
- return false;
- }
-
- if (auto* offset = arg(Usage::kOffset)) {
- image_operands.emplace_back(
- ImageOperand{SpvImageOperandsConstOffsetMask, gen(offset)});
- }
-
- if (!image_operands.empty()) {
- std::sort(image_operands.begin(), image_operands.end(),
- [](auto& a, auto& b) { return a.mask < b.mask; });
- uint32_t mask = 0;
- for (auto& image_operand : image_operands) {
- mask |= image_operand.mask;
- }
- spirv_params.emplace_back(Operand::Int(mask));
- for (auto& image_operand : image_operands) {
- spirv_params.emplace_back(image_operand.operand);
- }
- }
-
- if (op == spv::Op::OpNop) {
- error_ = "unable to determine operator for: " + std::string(builtin->str());
- return false;
- }
- if (!push_function_inst(op, spirv_params)) {
- return false;
- }
+ if (!push_function_inst(op, spirv_params)) {
+ return false;
+ }
- return post_emission();
+ return post_emission();
}
bool Builder::GenerateControlBarrierBuiltin(const sem::Builtin* builtin) {
- auto const op = spv::Op::OpControlBarrier;
- uint32_t execution = 0;
- uint32_t memory = 0;
- uint32_t semantics = 0;
-
- // TODO(crbug.com/tint/661): Combine sequential barriers to a single
- // instruction.
- if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
- execution = static_cast<uint32_t>(spv::Scope::Workgroup);
- memory = static_cast<uint32_t>(spv::Scope::Workgroup);
- semantics =
- static_cast<uint32_t>(spv::MemorySemanticsMask::AcquireRelease) |
- static_cast<uint32_t>(spv::MemorySemanticsMask::WorkgroupMemory);
- } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
- execution = static_cast<uint32_t>(spv::Scope::Workgroup);
- memory = static_cast<uint32_t>(spv::Scope::Workgroup);
- semantics =
- static_cast<uint32_t>(spv::MemorySemanticsMask::AcquireRelease) |
- static_cast<uint32_t>(spv::MemorySemanticsMask::UniformMemory);
- } else {
- error_ = "unexpected barrier builtin type ";
- error_ += sem::str(builtin->Type());
- return false;
- }
+ auto const op = spv::Op::OpControlBarrier;
+ uint32_t execution = 0;
+ uint32_t memory = 0;
+ uint32_t semantics = 0;
+
+ // TODO(crbug.com/tint/661): Combine sequential barriers to a single
+ // instruction.
+ if (builtin->Type() == sem::BuiltinType::kWorkgroupBarrier) {
+ execution = static_cast<uint32_t>(spv::Scope::Workgroup);
+ memory = static_cast<uint32_t>(spv::Scope::Workgroup);
+ semantics = static_cast<uint32_t>(spv::MemorySemanticsMask::AcquireRelease) |
+ static_cast<uint32_t>(spv::MemorySemanticsMask::WorkgroupMemory);
+ } else if (builtin->Type() == sem::BuiltinType::kStorageBarrier) {
+ execution = static_cast<uint32_t>(spv::Scope::Workgroup);
+ memory = static_cast<uint32_t>(spv::Scope::Workgroup);
+ semantics = static_cast<uint32_t>(spv::MemorySemanticsMask::AcquireRelease) |
+ static_cast<uint32_t>(spv::MemorySemanticsMask::UniformMemory);
+ } else {
+ error_ = "unexpected barrier builtin type ";
+ error_ += sem::str(builtin->Type());
+ return false;
+ }
- auto execution_id = GenerateConstantIfNeeded(ScalarConstant::U32(execution));
- auto memory_id = GenerateConstantIfNeeded(ScalarConstant::U32(memory));
- auto semantics_id = GenerateConstantIfNeeded(ScalarConstant::U32(semantics));
- if (execution_id == 0 || memory_id == 0 || semantics_id == 0) {
- return false;
- }
+ auto execution_id = GenerateConstantIfNeeded(ScalarConstant::U32(execution));
+ auto memory_id = GenerateConstantIfNeeded(ScalarConstant::U32(memory));
+ auto semantics_id = GenerateConstantIfNeeded(ScalarConstant::U32(semantics));
+ if (execution_id == 0 || memory_id == 0 || semantics_id == 0) {
+ return false;
+ }
- return push_function_inst(op, {
- Operand::Int(execution_id),
- Operand::Int(memory_id),
- Operand::Int(semantics_id),
- });
+ return push_function_inst(op, {
+ Operand(execution_id),
+ Operand(memory_id),
+ Operand(semantics_id),
+ });
}
bool Builder::GenerateAtomicBuiltin(const sem::Call* call,
const sem::Builtin* builtin,
Operand result_type,
Operand result_id) {
- auto is_value_signed = [&] {
- return builtin->Parameters()[1]->Type()->Is<sem::I32>();
- };
-
- auto storage_class =
- builtin->Parameters()[0]->Type()->As<sem::Pointer>()->StorageClass();
-
- uint32_t memory_id = 0;
- switch (
- builtin->Parameters()[0]->Type()->As<sem::Pointer>()->StorageClass()) {
- case ast::StorageClass::kWorkgroup:
- memory_id = GenerateConstantIfNeeded(
- ScalarConstant::U32(static_cast<uint32_t>(spv::Scope::Workgroup)));
- break;
- case ast::StorageClass::kStorage:
- memory_id = GenerateConstantIfNeeded(
- ScalarConstant::U32(static_cast<uint32_t>(spv::Scope::Device)));
- break;
- default:
- TINT_UNREACHABLE(Writer, builder_.Diagnostics())
- << "unhandled atomic storage class " << storage_class;
- return false;
- }
- if (memory_id == 0) {
- return false;
- }
+ auto is_value_signed = [&] { return builtin->Parameters()[1]->Type()->Is<sem::I32>(); };
+
+ auto storage_class = builtin->Parameters()[0]->Type()->As<sem::Pointer>()->StorageClass();
+
+ uint32_t memory_id = 0;
+ switch (builtin->Parameters()[0]->Type()->As<sem::Pointer>()->StorageClass()) {
+ case ast::StorageClass::kWorkgroup:
+ memory_id = GenerateConstantIfNeeded(
+ ScalarConstant::U32(static_cast<uint32_t>(spv::Scope::Workgroup)));
+ break;
+ case ast::StorageClass::kStorage:
+ memory_id = GenerateConstantIfNeeded(
+ ScalarConstant::U32(static_cast<uint32_t>(spv::Scope::Device)));
+ break;
+ default:
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics())
+ << "unhandled atomic storage class " << storage_class;
+ return false;
+ }
+ if (memory_id == 0) {
+ return false;
+ }
- uint32_t semantics_id = GenerateConstantIfNeeded(ScalarConstant::U32(
- static_cast<uint32_t>(spv::MemorySemanticsMask::MaskNone)));
- if (semantics_id == 0) {
- return false;
- }
+ uint32_t semantics_id = GenerateConstantIfNeeded(
+ ScalarConstant::U32(static_cast<uint32_t>(spv::MemorySemanticsMask::MaskNone)));
+ if (semantics_id == 0) {
+ return false;
+ }
- uint32_t pointer_id = GenerateExpression(call->Arguments()[0]->Declaration());
- if (pointer_id == 0) {
- return false;
- }
-
- uint32_t value_id = 0;
- if (call->Arguments().size() > 1) {
- value_id = GenerateExpressionWithLoadIfNeeded(call->Arguments().back());
- if (value_id == 0) {
- return false;
- }
- }
-
- Operand pointer = Operand::Int(pointer_id);
- Operand value = Operand::Int(value_id);
- Operand memory = Operand::Int(memory_id);
- Operand semantics = Operand::Int(semantics_id);
-
- switch (builtin->Type()) {
- case sem::BuiltinType::kAtomicLoad:
- return push_function_inst(spv::Op::OpAtomicLoad, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- });
- case sem::BuiltinType::kAtomicStore:
- return push_function_inst(spv::Op::OpAtomicStore, {
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicAdd:
- return push_function_inst(spv::Op::OpAtomicIAdd, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicSub:
- return push_function_inst(spv::Op::OpAtomicISub, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicMax:
- return push_function_inst(
- is_value_signed() ? spv::Op::OpAtomicSMax : spv::Op::OpAtomicUMax,
- {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicMin:
- return push_function_inst(
- is_value_signed() ? spv::Op::OpAtomicSMin : spv::Op::OpAtomicUMin,
- {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicAnd:
- return push_function_inst(spv::Op::OpAtomicAnd, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicOr:
- return push_function_inst(spv::Op::OpAtomicOr, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicXor:
- return push_function_inst(spv::Op::OpAtomicXor, {
- result_type,
- result_id,
- pointer,
- memory,
- semantics,
- value,
- });
- case sem::BuiltinType::kAtomicExchange:
- return push_function_inst(spv::Op::OpAtomicExchange, {
+ uint32_t pointer_id = GenerateExpression(call->Arguments()[0]->Declaration());
+ if (pointer_id == 0) {
+ return false;
+ }
+
+ uint32_t value_id = 0;
+ if (call->Arguments().size() > 1) {
+ value_id = GenerateExpressionWithLoadIfNeeded(call->Arguments().back());
+ if (value_id == 0) {
+ return false;
+ }
+ }
+
+ Operand pointer = Operand(pointer_id);
+ Operand value = Operand(value_id);
+ Operand memory = Operand(memory_id);
+ Operand semantics = Operand(semantics_id);
+
+ switch (builtin->Type()) {
+ case sem::BuiltinType::kAtomicLoad:
+ return push_function_inst(spv::Op::OpAtomicLoad, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ });
+ case sem::BuiltinType::kAtomicStore:
+ return push_function_inst(spv::Op::OpAtomicStore, {
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicAdd:
+ return push_function_inst(spv::Op::OpAtomicIAdd, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicSub:
+ return push_function_inst(spv::Op::OpAtomicISub, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicMax:
+ return push_function_inst(
+ is_value_signed() ? spv::Op::OpAtomicSMax : spv::Op::OpAtomicUMax, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicMin:
+ return push_function_inst(
+ is_value_signed() ? spv::Op::OpAtomicSMin : spv::Op::OpAtomicUMin, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicAnd:
+ return push_function_inst(spv::Op::OpAtomicAnd, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicOr:
+ return push_function_inst(spv::Op::OpAtomicOr, {
result_type,
result_id,
pointer,
@@ -3185,1148 +3261,1055 @@ bool Builder::GenerateAtomicBuiltin(const sem::Call* call,
semantics,
value,
});
- case sem::BuiltinType::kAtomicCompareExchangeWeak: {
- auto comparator = GenerateExpression(call->Arguments()[1]->Declaration());
- if (comparator == 0) {
- return false;
- }
-
- auto* value_sem_type = TypeOf(call->Arguments()[2]->Declaration());
-
- auto value_type = GenerateTypeIfNeeded(value_sem_type);
- if (value_type == 0) {
- return false;
- }
+ case sem::BuiltinType::kAtomicXor:
+ return push_function_inst(spv::Op::OpAtomicXor, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicExchange:
+ return push_function_inst(spv::Op::OpAtomicExchange, {
+ result_type,
+ result_id,
+ pointer,
+ memory,
+ semantics,
+ value,
+ });
+ case sem::BuiltinType::kAtomicCompareExchangeWeak: {
+ auto comparator = GenerateExpression(call->Arguments()[1]->Declaration());
+ if (comparator == 0) {
+ return false;
+ }
- auto* bool_sem_ty = builder_.create<sem::Bool>();
- auto bool_type = GenerateTypeIfNeeded(bool_sem_ty);
- if (bool_type == 0) {
- return false;
- }
+ auto* value_sem_type = call->Target()->Signature().parameters[2]->Type();
- // original_value := OpAtomicCompareExchange(pointer, memory, semantics,
- // semantics, value, comparator)
- auto original_value = result_op();
- if (!push_function_inst(spv::Op::OpAtomicCompareExchange,
- {
- Operand::Int(value_type),
- original_value,
- pointer,
- memory,
- semantics,
- semantics,
- value,
- Operand::Int(comparator),
- })) {
- return false;
- }
+ auto value_type = GenerateTypeIfNeeded(value_sem_type);
+ if (value_type == 0) {
+ return false;
+ }
- // values_equal := original_value == value
- auto values_equal = result_op();
- if (!push_function_inst(spv::Op::OpIEqual, {
- Operand::Int(bool_type),
- values_equal,
- original_value,
- value,
- })) {
- return false;
- }
+ auto* bool_sem_ty = builder_.create<sem::Bool>();
+ auto bool_type = GenerateTypeIfNeeded(bool_sem_ty);
+ if (bool_type == 0) {
+ return false;
+ }
- // zero := T(0)
- // one := T(1)
- uint32_t zero = 0;
- uint32_t one = 0;
- if (value_sem_type->Is<sem::I32>()) {
- zero = GenerateConstantIfNeeded(ScalarConstant::I32(0u));
- one = GenerateConstantIfNeeded(ScalarConstant::I32(1u));
- } else if (value_sem_type->Is<sem::U32>()) {
- zero = GenerateConstantIfNeeded(ScalarConstant::U32(0u));
- one = GenerateConstantIfNeeded(ScalarConstant::U32(1u));
- } else {
- TINT_UNREACHABLE(Writer, builder_.Diagnostics())
- << "unsupported atomic type " << value_sem_type->TypeInfo().name;
- }
- if (zero == 0 || one == 0) {
- return false;
- }
+ // original_value := OpAtomicCompareExchange(pointer, memory, semantics,
+ // semantics, value, comparator)
+ auto original_value = result_op();
+ if (!push_function_inst(spv::Op::OpAtomicCompareExchange, {
+ Operand(value_type),
+ original_value,
+ pointer,
+ memory,
+ semantics,
+ semantics,
+ value,
+ Operand(comparator),
+ })) {
+ return false;
+ }
- // xchg_success := values_equal ? one : zero
- auto xchg_success = result_op();
- if (!push_function_inst(spv::Op::OpSelect, {
- Operand::Int(value_type),
- xchg_success,
- values_equal,
- Operand::Int(one),
- Operand::Int(zero),
- })) {
- return false;
- }
+ // values_equal := original_value == value
+ auto values_equal = result_op();
+ if (!push_function_inst(spv::Op::OpIEqual, {
+ Operand(bool_type),
+ values_equal,
+ original_value,
+ value,
+ })) {
+ return false;
+ }
- // result := vec2<T>(original_value, xchg_success)
- return push_function_inst(spv::Op::OpCompositeConstruct,
- {
- result_type,
- result_id,
- original_value,
- xchg_success,
- });
- }
- default:
- TINT_UNREACHABLE(Writer, builder_.Diagnostics())
- << "unhandled atomic builtin " << builtin->Type();
- return false;
- }
+ // result := __atomic_compare_exchange_result<T>(original_value, values_equal)
+ return push_function_inst(spv::Op::OpCompositeConstruct, {
+ result_type,
+ result_id,
+ original_value,
+ values_equal,
+ });
+ }
+ default:
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics())
+ << "unhandled atomic builtin " << builtin->Type();
+ return false;
+ }
}
uint32_t Builder::GenerateSampledImage(const sem::Type* texture_type,
Operand texture_operand,
Operand sampler_operand) {
- // DepthTexture is always declared as SampledTexture.
- // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
- // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
- // Using anything other than 0 is problematic on various Vulkan drivers.
- if (auto* depthTextureType = texture_type->As<sem::DepthTexture>()) {
- texture_type = builder_.create<sem::SampledTexture>(
- depthTextureType->dim(), builder_.create<sem::F32>());
- }
-
- uint32_t sampled_image_type_id = utils::GetOrCreate(
- texture_type_to_sampled_image_type_id_, texture_type, [&] {
- // We need to create the sampled image type and cache the result.
- auto sampled_image_type = result_op();
- auto texture_type_id = GenerateTypeIfNeeded(texture_type);
- push_type(spv::Op::OpTypeSampledImage,
- {sampled_image_type, Operand::Int(texture_type_id)});
- return sampled_image_type.to_i();
- });
-
- auto sampled_image = result_op();
- if (!push_function_inst(spv::Op::OpSampledImage,
- {Operand::Int(sampled_image_type_id), sampled_image,
- texture_operand, sampler_operand})) {
- return 0;
- }
+ // DepthTexture is always declared as SampledTexture.
+ // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
+ // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
+ // Using anything other than 0 is problematic on various Vulkan drivers.
+ if (auto* depthTextureType = texture_type->As<sem::DepthTexture>()) {
+ texture_type = builder_.create<sem::SampledTexture>(depthTextureType->dim(),
+ builder_.create<sem::F32>());
+ }
+
+ uint32_t sampled_image_type_id =
+ utils::GetOrCreate(texture_type_to_sampled_image_type_id_, texture_type, [&] {
+ // We need to create the sampled image type and cache the result.
+ auto sampled_image_type = result_op();
+ auto texture_type_id = GenerateTypeIfNeeded(texture_type);
+ push_type(spv::Op::OpTypeSampledImage, {sampled_image_type, Operand(texture_type_id)});
+ return std::get<uint32_t>(sampled_image_type);
+ });
- return sampled_image.to_i();
+ auto sampled_image = result_op();
+ if (!push_function_inst(spv::Op::OpSampledImage, {Operand(sampled_image_type_id), sampled_image,
+ texture_operand, sampler_operand})) {
+ return 0;
+ }
+
+ return std::get<uint32_t>(sampled_image);
}
-uint32_t Builder::GenerateBitcastExpression(
- const ast::BitcastExpression* expr) {
- auto result = result_op();
- auto result_id = result.to_i();
+uint32_t Builder::GenerateBitcastExpression(const ast::BitcastExpression* expr) {
+ auto result = result_op();
+ auto result_id = std::get<uint32_t>(result);
- auto result_type_id = GenerateTypeIfNeeded(TypeOf(expr));
- if (result_type_id == 0) {
- return 0;
- }
+ auto result_type_id = GenerateTypeIfNeeded(TypeOf(expr));
+ if (result_type_id == 0) {
+ return 0;
+ }
- auto val_id = GenerateExpressionWithLoadIfNeeded(expr->expr);
- if (val_id == 0) {
- return 0;
- }
+ auto val_id = GenerateExpressionWithLoadIfNeeded(expr->expr);
+ if (val_id == 0) {
+ return 0;
+ }
- // Bitcast does not allow same types, just emit a CopyObject
- auto* to_type = TypeOf(expr)->UnwrapRef();
- auto* from_type = TypeOf(expr->expr)->UnwrapRef();
- if (to_type == from_type) {
- if (!push_function_inst(
- spv::Op::OpCopyObject,
- {Operand::Int(result_type_id), result, Operand::Int(val_id)})) {
- return 0;
+ // Bitcast does not allow same types, just emit a CopyObject
+ auto* to_type = TypeOf(expr)->UnwrapRef();
+ auto* from_type = TypeOf(expr->expr)->UnwrapRef();
+ if (to_type == from_type) {
+ if (!push_function_inst(spv::Op::OpCopyObject,
+ {Operand(result_type_id), result, Operand(val_id)})) {
+ return 0;
+ }
+ return result_id;
}
- return result_id;
- }
- if (!push_function_inst(spv::Op::OpBitcast, {Operand::Int(result_type_id),
- result, Operand::Int(val_id)})) {
- return 0;
- }
+ if (!push_function_inst(spv::Op::OpBitcast,
+ {Operand(result_type_id), result, Operand(val_id)})) {
+ return 0;
+ }
- return result_id;
+ return result_id;
}
-bool Builder::GenerateConditionalBlock(
- const ast::Expression* cond,
- const ast::BlockStatement* true_body,
- size_t cur_else_idx,
- const ast::ElseStatementList& else_stmts) {
- auto cond_id = GenerateExpressionWithLoadIfNeeded(cond);
- if (cond_id == 0) {
- return false;
- }
-
- auto merge_block = result_op();
- auto merge_block_id = merge_block.to_i();
+bool Builder::GenerateConditionalBlock(const ast::Expression* cond,
+ const ast::BlockStatement* true_body,
+ const ast::Statement* else_stmt) {
+ auto cond_id = GenerateExpressionWithLoadIfNeeded(cond);
+ if (cond_id == 0) {
+ return false;
+ }
- if (!push_function_inst(spv::Op::OpSelectionMerge,
- {Operand::Int(merge_block_id),
- Operand::Int(SpvSelectionControlMaskNone)})) {
- return false;
- }
+ auto merge_block = result_op();
+ auto merge_block_id = std::get<uint32_t>(merge_block);
- auto true_block = result_op();
- auto true_block_id = true_block.to_i();
+ if (!push_function_inst(spv::Op::OpSelectionMerge,
+ {Operand(merge_block_id), U32Operand(SpvSelectionControlMaskNone)})) {
+ return false;
+ }
- // if there are no more else statements we branch on false to the merge
- // block otherwise we branch to the false block
- auto false_block_id =
- cur_else_idx < else_stmts.size() ? next_id() : merge_block_id;
+ auto true_block = result_op();
+ auto true_block_id = std::get<uint32_t>(true_block);
- if (!push_function_inst(spv::Op::OpBranchConditional,
- {Operand::Int(cond_id), Operand::Int(true_block_id),
- Operand::Int(false_block_id)})) {
- return false;
- }
+ // if there are no more else statements we branch on false to the merge
+ // block otherwise we branch to the false block
+ auto false_block_id = else_stmt ? next_id() : merge_block_id;
- // Output true block
- if (!GenerateLabel(true_block_id)) {
- return false;
- }
- if (!GenerateBlockStatement(true_body)) {
- return false;
- }
- // We only branch if the last element of the body didn't already branch.
- if (InsideBasicBlock()) {
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(merge_block_id)})) {
- return false;
- }
- }
-
- // Start the false block if needed
- if (false_block_id != merge_block_id) {
- if (!GenerateLabel(false_block_id)) {
- return false;
+ if (!push_function_inst(spv::Op::OpBranchConditional,
+ {Operand(cond_id), Operand(true_block_id), Operand(false_block_id)})) {
+ return false;
}
- auto* else_stmt = else_stmts[cur_else_idx];
- // Handle the else case by just outputting the statements.
- if (!else_stmt->condition) {
- if (!GenerateBlockStatement(else_stmt->body)) {
+ // Output true block
+ if (!GenerateLabel(true_block_id)) {
return false;
- }
- } else {
- if (!GenerateConditionalBlock(else_stmt->condition, else_stmt->body,
- cur_else_idx + 1, else_stmts)) {
+ }
+ if (!GenerateBlockStatement(true_body)) {
return false;
- }
}
+ // We only branch if the last element of the body didn't already branch.
if (InsideBasicBlock()) {
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(merge_block_id)})) {
- return false;
- }
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_block_id)})) {
+ return false;
+ }
+ }
+
+ // Start the false block if needed
+ if (false_block_id != merge_block_id) {
+ if (!GenerateLabel(false_block_id)) {
+ return false;
+ }
+
+ // Handle the else case by just outputting the statements.
+ if (auto* block = else_stmt->As<ast::BlockStatement>()) {
+ if (!GenerateBlockStatement(block)) {
+ return false;
+ }
+ } else {
+ auto* elseif = else_stmt->As<ast::IfStatement>();
+ if (!GenerateConditionalBlock(elseif->condition, elseif->body,
+ elseif->else_statement)) {
+ return false;
+ }
+ }
+ if (InsideBasicBlock()) {
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_block_id)})) {
+ return false;
+ }
+ }
}
- }
- // Output the merge block
- return GenerateLabel(merge_block_id);
+ // Output the merge block
+ return GenerateLabel(merge_block_id);
}
bool Builder::GenerateIfStatement(const ast::IfStatement* stmt) {
- if (!continuing_stack_.empty() &&
- stmt == continuing_stack_.back().last_statement->As<ast::IfStatement>()) {
- const ContinuingInfo& ci = continuing_stack_.back();
- // Match one of two patterns: the break-if and break-unless patterns.
- //
- // The break-if pattern:
- // continuing { ...
- // if (cond) { break; }
- // }
- //
- // The break-unless pattern:
- // continuing { ...
- // if (cond) {} else {break;}
- // }
- auto is_just_a_break = [](const ast::BlockStatement* block) {
- return block && (block->statements.size() == 1) &&
- block->Last()->Is<ast::BreakStatement>();
- };
- if (is_just_a_break(stmt->body) && stmt->else_statements.empty()) {
- // It's a break-if.
- TINT_ASSERT(Writer, !backedge_stack_.empty());
- const auto cond_id = GenerateExpressionWithLoadIfNeeded(stmt->condition);
- if (!cond_id) {
- return false;
- }
- backedge_stack_.back() =
- Backedge(spv::Op::OpBranchConditional,
- {Operand::Int(cond_id), Operand::Int(ci.break_target_id),
- Operand::Int(ci.loop_header_id)});
- return true;
- } else if (stmt->body->Empty()) {
- const auto& es = stmt->else_statements;
- if (es.size() == 1 && !es.back()->condition &&
- is_just_a_break(es.back()->body)) {
- // It's a break-unless.
- TINT_ASSERT(Writer, !backedge_stack_.empty());
- const auto cond_id =
- GenerateExpressionWithLoadIfNeeded(stmt->condition);
- if (!cond_id) {
- return false;
- }
- backedge_stack_.back() =
- Backedge(spv::Op::OpBranchConditional,
- {Operand::Int(cond_id), Operand::Int(ci.loop_header_id),
- Operand::Int(ci.break_target_id)});
- return true;
- }
+ if (!continuing_stack_.empty() &&
+ stmt == continuing_stack_.back().last_statement->As<ast::IfStatement>()) {
+ const ContinuingInfo& ci = continuing_stack_.back();
+ // Match one of two patterns: the break-if and break-unless patterns.
+ //
+ // The break-if pattern:
+ // continuing { ...
+ // if (cond) { break; }
+ // }
+ //
+ // The break-unless pattern:
+ // continuing { ...
+ // if (cond) {} else {break;}
+ // }
+ auto is_just_a_break = [](const ast::BlockStatement* block) {
+ return block && (block->statements.size() == 1) &&
+ block->Last()->Is<ast::BreakStatement>();
+ };
+ if (is_just_a_break(stmt->body) && stmt->else_statement == nullptr) {
+ // It's a break-if.
+ TINT_ASSERT(Writer, !backedge_stack_.empty());
+ const auto cond_id = GenerateExpressionWithLoadIfNeeded(stmt->condition);
+ if (!cond_id) {
+ return false;
+ }
+ backedge_stack_.back() = Backedge(
+ spv::Op::OpBranchConditional,
+ {Operand(cond_id), Operand(ci.break_target_id), Operand(ci.loop_header_id)});
+ return true;
+ } else if (stmt->body->Empty()) {
+ auto* es_block = As<ast::BlockStatement>(stmt->else_statement);
+ if (es_block && is_just_a_break(es_block)) {
+ // It's a break-unless.
+ TINT_ASSERT(Writer, !backedge_stack_.empty());
+ const auto cond_id = GenerateExpressionWithLoadIfNeeded(stmt->condition);
+ if (!cond_id) {
+ return false;
+ }
+ backedge_stack_.back() = Backedge(
+ spv::Op::OpBranchConditional,
+ {Operand(cond_id), Operand(ci.loop_header_id), Operand(ci.break_target_id)});
+ return true;
+ }
+ }
}
- }
- if (!GenerateConditionalBlock(stmt->condition, stmt->body, 0,
- stmt->else_statements)) {
- return false;
- }
- return true;
+ if (!GenerateConditionalBlock(stmt->condition, stmt->body, stmt->else_statement)) {
+ return false;
+ }
+ return true;
}
bool Builder::GenerateSwitchStatement(const ast::SwitchStatement* stmt) {
- auto merge_block = result_op();
- auto merge_block_id = merge_block.to_i();
+ auto merge_block = result_op();
+ auto merge_block_id = std::get<uint32_t>(merge_block);
- merge_stack_.push_back(merge_block_id);
+ merge_stack_.push_back(merge_block_id);
- auto cond_id = GenerateExpressionWithLoadIfNeeded(stmt->condition);
- if (cond_id == 0) {
- return false;
- }
-
- auto default_block = result_op();
- auto default_block_id = default_block.to_i();
-
- OperandList params = {Operand::Int(cond_id), Operand::Int(default_block_id)};
-
- std::vector<uint32_t> case_ids;
- for (const auto* item : stmt->body) {
- if (item->IsDefault()) {
- case_ids.push_back(default_block_id);
- continue;
+ auto cond_id = GenerateExpressionWithLoadIfNeeded(stmt->condition);
+ if (cond_id == 0) {
+ return false;
}
- auto block = result_op();
- auto block_id = block.to_i();
+ auto default_block = result_op();
+ auto default_block_id = std::get<uint32_t>(default_block);
- case_ids.push_back(block_id);
- for (auto* selector : item->selectors) {
- auto* int_literal = selector->As<ast::IntLiteralExpression>();
- if (!int_literal) {
- error_ = "expected integer literal for switch case label";
- return false;
- }
-
- params.push_back(Operand::Int(int_literal->ValueAsU32()));
- params.push_back(Operand::Int(block_id));
- }
- }
+ OperandList params = {Operand(cond_id), Operand(default_block_id)};
- if (!push_function_inst(spv::Op::OpSelectionMerge,
- {Operand::Int(merge_block_id),
- Operand::Int(SpvSelectionControlMaskNone)})) {
- return false;
- }
- if (!push_function_inst(spv::Op::OpSwitch, params)) {
- return false;
- }
+ std::vector<uint32_t> case_ids;
+ for (const auto* item : stmt->body) {
+ if (item->IsDefault()) {
+ case_ids.push_back(default_block_id);
+ continue;
+ }
- bool generated_default = false;
- auto& body = stmt->body;
- // We output the case statements in order they were entered in the original
- // source. Each fallthrough goes to the next case entry, so is a forward
- // branch, otherwise the branch is to the merge block which comes after
- // the switch statement.
- for (uint32_t i = 0; i < body.size(); i++) {
- auto* item = body[i];
+ auto block = result_op();
+ auto block_id = std::get<uint32_t>(block);
- if (item->IsDefault()) {
- generated_default = true;
- }
+ case_ids.push_back(block_id);
+ for (auto* selector : item->selectors) {
+ auto* int_literal = selector->As<ast::IntLiteralExpression>();
+ if (!int_literal) {
+ error_ = "expected integer literal for switch case label";
+ return false;
+ }
- if (!GenerateLabel(case_ids[i])) {
- return false;
- }
- if (!GenerateBlockStatement(item->body)) {
- return false;
+ params.push_back(Operand(static_cast<uint32_t>(int_literal->value)));
+ params.push_back(Operand(block_id));
+ }
}
- if (LastIsFallthrough(item->body)) {
- if (i == (body.size() - 1)) {
- // This case is caught by Resolver validation
- TINT_UNREACHABLE(Writer, builder_.Diagnostics());
+ if (!push_function_inst(spv::Op::OpSelectionMerge,
+ {Operand(merge_block_id), U32Operand(SpvSelectionControlMaskNone)})) {
return false;
- }
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(case_ids[i + 1])})) {
- return false;
- }
- } else if (InsideBasicBlock()) {
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(merge_block_id)})) {
+ }
+ if (!push_function_inst(spv::Op::OpSwitch, params)) {
return false;
- }
}
- }
- if (!generated_default) {
- if (!GenerateLabel(default_block_id)) {
- return false;
+ bool generated_default = false;
+ auto& body = stmt->body;
+ // We output the case statements in order they were entered in the original
+ // source. Each fallthrough goes to the next case entry, so is a forward
+ // branch, otherwise the branch is to the merge block which comes after
+ // the switch statement.
+ for (uint32_t i = 0; i < body.size(); i++) {
+ auto* item = body[i];
+
+ if (item->IsDefault()) {
+ generated_default = true;
+ }
+
+ if (!GenerateLabel(case_ids[i])) {
+ return false;
+ }
+ if (!GenerateBlockStatement(item->body)) {
+ return false;
+ }
+
+ if (LastIsFallthrough(item->body)) {
+ if (i == (body.size() - 1)) {
+ // This case is caught by Resolver validation
+ TINT_UNREACHABLE(Writer, builder_.Diagnostics());
+ return false;
+ }
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(case_ids[i + 1])})) {
+ return false;
+ }
+ } else if (InsideBasicBlock()) {
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_block_id)})) {
+ return false;
+ }
+ }
}
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(merge_block_id)})) {
- return false;
+
+ if (!generated_default) {
+ if (!GenerateLabel(default_block_id)) {
+ return false;
+ }
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(merge_block_id)})) {
+ return false;
+ }
}
- }
- merge_stack_.pop_back();
+ merge_stack_.pop_back();
- return GenerateLabel(merge_block_id);
+ return GenerateLabel(merge_block_id);
}
bool Builder::GenerateReturnStatement(const ast::ReturnStatement* stmt) {
- if (stmt->value) {
- auto val_id = GenerateExpressionWithLoadIfNeeded(stmt->value);
- if (val_id == 0) {
- return false;
- }
- if (!push_function_inst(spv::Op::OpReturnValue, {Operand::Int(val_id)})) {
- return false;
- }
- } else {
- if (!push_function_inst(spv::Op::OpReturn, {})) {
- return false;
+ if (stmt->value) {
+ auto val_id = GenerateExpressionWithLoadIfNeeded(stmt->value);
+ if (val_id == 0) {
+ return false;
+ }
+ if (!push_function_inst(spv::Op::OpReturnValue, {Operand(val_id)})) {
+ return false;
+ }
+ } else {
+ if (!push_function_inst(spv::Op::OpReturn, {})) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool Builder::GenerateLoopStatement(const ast::LoopStatement* stmt) {
- auto loop_header = result_op();
- auto loop_header_id = loop_header.to_i();
- if (!push_function_inst(spv::Op::OpBranch, {Operand::Int(loop_header_id)})) {
- return false;
- }
- if (!GenerateLabel(loop_header_id)) {
- return false;
- }
-
- auto merge_block = result_op();
- auto merge_block_id = merge_block.to_i();
- auto continue_block = result_op();
- auto continue_block_id = continue_block.to_i();
-
- auto body_block = result_op();
- auto body_block_id = body_block.to_i();
-
- if (!push_function_inst(
- spv::Op::OpLoopMerge,
- {Operand::Int(merge_block_id), Operand::Int(continue_block_id),
- Operand::Int(SpvLoopControlMaskNone)})) {
- return false;
- }
+ auto loop_header = result_op();
+ auto loop_header_id = std::get<uint32_t>(loop_header);
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(loop_header_id)})) {
+ return false;
+ }
+ if (!GenerateLabel(loop_header_id)) {
+ return false;
+ }
- continue_stack_.push_back(continue_block_id);
- merge_stack_.push_back(merge_block_id);
+ auto merge_block = result_op();
+ auto merge_block_id = std::get<uint32_t>(merge_block);
+ auto continue_block = result_op();
+ auto continue_block_id = std::get<uint32_t>(continue_block);
- // Usually, the backedge is a simple branch. This will be modified if the
- // backedge block in the continuing construct has an exiting edge.
- backedge_stack_.emplace_back(spv::Op::OpBranch,
- OperandList{Operand::Int(loop_header_id)});
+ auto body_block = result_op();
+ auto body_block_id = std::get<uint32_t>(body_block);
- if (!push_function_inst(spv::Op::OpBranch, {Operand::Int(body_block_id)})) {
- return false;
- }
- if (!GenerateLabel(body_block_id)) {
- return false;
- }
+ if (!push_function_inst(spv::Op::OpLoopMerge,
+ {Operand(merge_block_id), Operand(continue_block_id),
+ U32Operand(SpvLoopControlMaskNone)})) {
+ return false;
+ }
- // We need variables from the body to be visible in the continuing block, so
- // manage scope outside of GenerateBlockStatement.
- {
- scope_stack_.Push();
- TINT_DEFER(scope_stack_.Pop());
+ continue_stack_.push_back(continue_block_id);
+ merge_stack_.push_back(merge_block_id);
- if (!GenerateBlockStatementWithoutScoping(stmt->body)) {
- return false;
- }
+ // Usually, the backedge is a simple branch. This will be modified if the
+ // backedge block in the continuing construct has an exiting edge.
+ backedge_stack_.emplace_back(spv::Op::OpBranch, OperandList{Operand(loop_header_id)});
- // We only branch if the last element of the body didn't already branch.
- if (InsideBasicBlock()) {
- if (!push_function_inst(spv::Op::OpBranch,
- {Operand::Int(continue_block_id)})) {
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(body_block_id)})) {
return false;
- }
}
-
- if (!GenerateLabel(continue_block_id)) {
- return false;
- }
- if (stmt->continuing && !stmt->continuing->Empty()) {
- continuing_stack_.emplace_back(stmt->continuing->Last(), loop_header_id,
- merge_block_id);
- if (!GenerateBlockStatementWithoutScoping(stmt->continuing)) {
+ if (!GenerateLabel(body_block_id)) {
return false;
- }
- continuing_stack_.pop_back();
}
- }
- // Generate the backedge.
- TINT_ASSERT(Writer, !backedge_stack_.empty());
- const Backedge& backedge = backedge_stack_.back();
- if (!push_function_inst(backedge.opcode, backedge.operands)) {
- return false;
- }
- backedge_stack_.pop_back();
+ // We need variables from the body to be visible in the continuing block, so
+ // manage scope outside of GenerateBlockStatement.
+ {
+ PushScope();
+ TINT_DEFER(PopScope());
- merge_stack_.pop_back();
- continue_stack_.pop_back();
+ if (!GenerateBlockStatementWithoutScoping(stmt->body)) {
+ return false;
+ }
- return GenerateLabel(merge_block_id);
-}
+ // We only branch if the last element of the body didn't already branch.
+ if (InsideBasicBlock()) {
+ if (!push_function_inst(spv::Op::OpBranch, {Operand(continue_block_id)})) {
+ return false;
+ }
+ }
-bool Builder::GenerateStatement(const ast::Statement* stmt) {
- return Switch(
- stmt,
- [&](const ast::AssignmentStatement* a) {
- return GenerateAssignStatement(a);
- },
- [&](const ast::BlockStatement* b) { //
- return GenerateBlockStatement(b);
- },
- [&](const ast::BreakStatement* b) { //
- return GenerateBreakStatement(b);
- },
- [&](const ast::CallStatement* c) {
- return GenerateCallExpression(c->expr) != 0;
- },
- [&](const ast::ContinueStatement* c) {
- return GenerateContinueStatement(c);
- },
- [&](const ast::DiscardStatement* d) {
- return GenerateDiscardStatement(d);
- },
- [&](const ast::FallthroughStatement*) {
- // Do nothing here, the fallthrough gets handled by the switch code.
- return true;
- },
- [&](const ast::IfStatement* i) { //
- return GenerateIfStatement(i);
- },
- [&](const ast::LoopStatement* l) { //
- return GenerateLoopStatement(l);
- },
- [&](const ast::ReturnStatement* r) { //
- return GenerateReturnStatement(r);
- },
- [&](const ast::SwitchStatement* s) { //
- return GenerateSwitchStatement(s);
- },
- [&](const ast::VariableDeclStatement* v) {
- return GenerateVariableDeclStatement(v);
- },
- [&](Default) {
- error_ = "Unknown statement: " + std::string(stmt->TypeInfo().name);
+ if (!GenerateLabel(continue_block_id)) {
+ return false;
+ }
+ if (stmt->continuing && !stmt->continuing->Empty()) {
+ continuing_stack_.emplace_back(stmt->continuing->Last(), loop_header_id,
+ merge_block_id);
+ if (!GenerateBlockStatementWithoutScoping(stmt->continuing)) {
+ return false;
+ }
+ continuing_stack_.pop_back();
+ }
+ }
+
+ // Generate the backedge.
+ TINT_ASSERT(Writer, !backedge_stack_.empty());
+ const Backedge& backedge = backedge_stack_.back();
+ if (!push_function_inst(backedge.opcode, backedge.operands)) {
return false;
- });
-}
+ }
+ backedge_stack_.pop_back();
-bool Builder::GenerateVariableDeclStatement(
- const ast::VariableDeclStatement* stmt) {
- return GenerateFunctionVariable(stmt->variable);
+ merge_stack_.pop_back();
+ continue_stack_.pop_back();
+
+ return GenerateLabel(merge_block_id);
}
-uint32_t Builder::GenerateTypeIfNeeded(const sem::Type* type) {
- if (type == nullptr) {
- error_ = "attempting to generate type from null type";
- return 0;
- }
-
- // Atomics are a type in WGSL, but aren't a distinct type in SPIR-V.
- // Just emit the type inside the atomic.
- if (auto* atomic = type->As<sem::Atomic>()) {
- return GenerateTypeIfNeeded(atomic->Type());
- }
-
- // DepthTexture is always declared as SampledTexture.
- // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
- // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
- // Using anything other than 0 is problematic on various Vulkan drivers.
- if (auto* depthTextureType = type->As<sem::DepthTexture>()) {
- type = builder_.create<sem::SampledTexture>(depthTextureType->dim(),
- builder_.create<sem::F32>());
- } else if (auto* multisampledDepthTextureType =
- type->As<sem::DepthMultisampledTexture>()) {
- type = builder_.create<sem::MultisampledTexture>(
- multisampledDepthTextureType->dim(), builder_.create<sem::F32>());
- }
-
- // Pointers and references with differing accesses should not result in a
- // different SPIR-V types, so we explicitly ignore the access.
- // Pointers and References both map to a SPIR-V pointer type.
- // Transform a Reference to a Pointer to prevent these having duplicated
- // definitions in the generated SPIR-V. Note that nested pointers and
- // references are not legal in WGSL, so only considering the top-level type is
- // fine.
- if (auto* ptr = type->As<sem::Pointer>()) {
- type = builder_.create<sem::Pointer>(ptr->StoreType(), ptr->StorageClass(),
- ast::kReadWrite);
- } else if (auto* ref = type->As<sem::Reference>()) {
- type = builder_.create<sem::Pointer>(ref->StoreType(), ref->StorageClass(),
- ast::kReadWrite);
- }
-
- return utils::GetOrCreate(type_to_id_, type, [&]() -> uint32_t {
- auto result = result_op();
- auto id = result.to_i();
- bool ok = Switch(
- type,
- [&](const sem::Array* arr) { //
- return GenerateArrayType(arr, result);
- },
- [&](const sem::Bool*) {
- push_type(spv::Op::OpTypeBool, {result});
- return true;
- },
- [&](const sem::F32*) {
- push_type(spv::Op::OpTypeFloat, {result, Operand::Int(32)});
- return true;
- },
- [&](const sem::I32*) {
- push_type(spv::Op::OpTypeInt,
- {result, Operand::Int(32), Operand::Int(1)});
- return true;
- },
- [&](const sem::Matrix* mat) { //
- return GenerateMatrixType(mat, result);
- },
- [&](const sem::Pointer* ptr) { //
- return GeneratePointerType(ptr, result);
- },
- [&](const sem::Reference* ref) { //
- return GenerateReferenceType(ref, result);
- },
- [&](const sem::Struct* str) { //
- return GenerateStructType(str, result);
+bool Builder::GenerateStatement(const ast::Statement* stmt) {
+ return Switch(
+ stmt, [&](const ast::AssignmentStatement* a) { return GenerateAssignStatement(a); },
+ [&](const ast::BlockStatement* b) { //
+ return GenerateBlockStatement(b);
},
- [&](const sem::U32*) {
- push_type(spv::Op::OpTypeInt,
- {result, Operand::Int(32), Operand::Int(0)});
- return true;
+ [&](const ast::BreakStatement* b) { //
+ return GenerateBreakStatement(b);
},
- [&](const sem::Vector* vec) { //
- return GenerateVectorType(vec, result);
+ [&](const ast::CallStatement* c) { return GenerateCallExpression(c->expr) != 0; },
+ [&](const ast::ContinueStatement* c) { return GenerateContinueStatement(c); },
+ [&](const ast::DiscardStatement* d) { return GenerateDiscardStatement(d); },
+ [&](const ast::FallthroughStatement*) {
+ // Do nothing here, the fallthrough gets handled by the switch code.
+ return true;
},
- [&](const sem::Void*) {
- push_type(spv::Op::OpTypeVoid, {result});
- return true;
+ [&](const ast::IfStatement* i) { //
+ return GenerateIfStatement(i);
},
- [&](const sem::StorageTexture* tex) {
- if (!GenerateTextureType(tex, result)) {
- return false;
- }
-
- // Register all three access types of StorageTexture names. In
- // SPIR-V, we must output a single type, while the variable is
- // annotated with the access type. Doing this ensures we de-dupe.
- type_to_id_[builder_.create<sem::StorageTexture>(
- tex->dim(), tex->texel_format(), ast::Access::kRead,
- tex->type())] = id;
- type_to_id_[builder_.create<sem::StorageTexture>(
- tex->dim(), tex->texel_format(), ast::Access::kWrite,
- tex->type())] = id;
- type_to_id_[builder_.create<sem::StorageTexture>(
- tex->dim(), tex->texel_format(), ast::Access::kReadWrite,
- tex->type())] = id;
- return true;
+ [&](const ast::LoopStatement* l) { //
+ return GenerateLoopStatement(l);
},
- [&](const sem::Texture* tex) {
- return GenerateTextureType(tex, result);
+ [&](const ast::ReturnStatement* r) { //
+ return GenerateReturnStatement(r);
},
- [&](const sem::Sampler* s) {
- push_type(spv::Op::OpTypeSampler, {result});
-
- // Register both of the sampler type names. In SPIR-V they're the same
- // sampler type, so we need to match that when we do the dedup check.
- if (s->kind() == ast::SamplerKind::kSampler) {
- type_to_id_[builder_.create<sem::Sampler>(
- ast::SamplerKind::kComparisonSampler)] = id;
- } else {
- type_to_id_[builder_.create<sem::Sampler>(
- ast::SamplerKind::kSampler)] = id;
- }
- return true;
+ [&](const ast::SwitchStatement* s) { //
+ return GenerateSwitchStatement(s);
},
+ [&](const ast::VariableDeclStatement* v) { return GenerateVariableDeclStatement(v); },
[&](Default) {
- error_ = "unable to convert type: " +
- type->FriendlyName(builder_.Symbols());
- return false;
+ error_ = "Unknown statement: " + std::string(stmt->TypeInfo().name);
+ return false;
});
+}
- if (!ok) {
- return 0;
+bool Builder::GenerateVariableDeclStatement(const ast::VariableDeclStatement* stmt) {
+ return GenerateFunctionVariable(stmt->variable);
+}
+
+uint32_t Builder::GenerateTypeIfNeeded(const sem::Type* type) {
+ if (type == nullptr) {
+ error_ = "attempting to generate type from null type";
+ return 0;
}
- return id;
- });
+ // Atomics are a type in WGSL, but aren't a distinct type in SPIR-V.
+ // Just emit the type inside the atomic.
+ if (auto* atomic = type->As<sem::Atomic>()) {
+ return GenerateTypeIfNeeded(atomic->Type());
+ }
+
+ // DepthTexture is always declared as SampledTexture.
+ // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
+ // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
+ // Using anything other than 0 is problematic on various Vulkan drivers.
+ if (auto* depthTextureType = type->As<sem::DepthTexture>()) {
+ type = builder_.create<sem::SampledTexture>(depthTextureType->dim(),
+ builder_.create<sem::F32>());
+ } else if (auto* multisampledDepthTextureType = type->As<sem::DepthMultisampledTexture>()) {
+ type = builder_.create<sem::MultisampledTexture>(multisampledDepthTextureType->dim(),
+ builder_.create<sem::F32>());
+ }
+
+ // Pointers and references with differing accesses should not result in a
+ // different SPIR-V types, so we explicitly ignore the access.
+ // Pointers and References both map to a SPIR-V pointer type.
+ // Transform a Reference to a Pointer to prevent these having duplicated
+ // definitions in the generated SPIR-V. Note that nested pointers and
+ // references are not legal in WGSL, so only considering the top-level type is
+ // fine.
+ if (auto* ptr = type->As<sem::Pointer>()) {
+ type =
+ builder_.create<sem::Pointer>(ptr->StoreType(), ptr->StorageClass(), ast::kReadWrite);
+ } else if (auto* ref = type->As<sem::Reference>()) {
+ type =
+ builder_.create<sem::Pointer>(ref->StoreType(), ref->StorageClass(), ast::kReadWrite);
+ }
+
+ return utils::GetOrCreate(type_to_id_, type, [&]() -> uint32_t {
+ auto result = result_op();
+ auto id = std::get<uint32_t>(result);
+ bool ok = Switch(
+ type,
+ [&](const sem::Array* arr) { //
+ return GenerateArrayType(arr, result);
+ },
+ [&](const sem::Bool*) {
+ push_type(spv::Op::OpTypeBool, {result});
+ return true;
+ },
+ [&](const sem::F32*) {
+ push_type(spv::Op::OpTypeFloat, {result, Operand(32u)});
+ return true;
+ },
+ [&](const sem::F16*) {
+ // Should be `push_type(spv::Op::OpTypeFloat, {result, Operand(16u)});`
+ error_ = "Type f16 is not completely implemented yet.";
+ return false;
+ },
+ [&](const sem::I32*) {
+ push_type(spv::Op::OpTypeInt, {result, Operand(32u), Operand(1u)});
+ return true;
+ },
+ [&](const sem::Matrix* mat) { //
+ return GenerateMatrixType(mat, result);
+ },
+ [&](const sem::Pointer* ptr) { //
+ return GeneratePointerType(ptr, result);
+ },
+ [&](const sem::Reference* ref) { //
+ return GenerateReferenceType(ref, result);
+ },
+ [&](const sem::Struct* str) { //
+ return GenerateStructType(str, result);
+ },
+ [&](const sem::U32*) {
+ push_type(spv::Op::OpTypeInt, {result, Operand(32u), Operand(0u)});
+ return true;
+ },
+ [&](const sem::Vector* vec) { //
+ return GenerateVectorType(vec, result);
+ },
+ [&](const sem::Void*) {
+ push_type(spv::Op::OpTypeVoid, {result});
+ return true;
+ },
+ [&](const sem::StorageTexture* tex) {
+ if (!GenerateTextureType(tex, result)) {
+ return false;
+ }
+
+ // Register all three access types of StorageTexture names. In
+ // SPIR-V, we must output a single type, while the variable is
+ // annotated with the access type. Doing this ensures we de-dupe.
+ type_to_id_[builder_.create<sem::StorageTexture>(
+ tex->dim(), tex->texel_format(), ast::Access::kRead, tex->type())] = id;
+ type_to_id_[builder_.create<sem::StorageTexture>(
+ tex->dim(), tex->texel_format(), ast::Access::kWrite, tex->type())] = id;
+ type_to_id_[builder_.create<sem::StorageTexture>(
+ tex->dim(), tex->texel_format(), ast::Access::kReadWrite, tex->type())] = id;
+ return true;
+ },
+ [&](const sem::Texture* tex) { return GenerateTextureType(tex, result); },
+ [&](const sem::Sampler* s) {
+ push_type(spv::Op::OpTypeSampler, {result});
+
+ // Register both of the sampler type names. In SPIR-V they're the same
+ // sampler type, so we need to match that when we do the dedup check.
+ if (s->kind() == ast::SamplerKind::kSampler) {
+ type_to_id_[builder_.create<sem::Sampler>(
+ ast::SamplerKind::kComparisonSampler)] = id;
+ } else {
+ type_to_id_[builder_.create<sem::Sampler>(ast::SamplerKind::kSampler)] = id;
+ }
+ return true;
+ },
+ [&](Default) {
+ error_ = "unable to convert type: " + type->FriendlyName(builder_.Symbols());
+ return false;
+ });
+
+ if (!ok) {
+ return 0;
+ }
+
+ return id;
+ });
}
-bool Builder::GenerateTextureType(const sem::Texture* texture,
- const Operand& result) {
- if (texture->Is<sem::ExternalTexture>()) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "Multiplanar external texture transform was not run.";
- return false;
- }
-
- uint32_t array_literal = 0u;
- const auto dim = texture->dim();
- if (dim == ast::TextureDimension::k2dArray ||
- dim == ast::TextureDimension::kCubeArray) {
- array_literal = 1u;
- }
-
- uint32_t dim_literal = SpvDim2D;
- if (dim == ast::TextureDimension::k1d) {
- dim_literal = SpvDim1D;
- if (texture->Is<sem::SampledTexture>()) {
- push_capability(SpvCapabilitySampled1D);
- } else if (texture->Is<sem::StorageTexture>()) {
- push_capability(SpvCapabilityImage1D);
- }
- }
- if (dim == ast::TextureDimension::k3d) {
- dim_literal = SpvDim3D;
- }
- if (dim == ast::TextureDimension::kCube ||
- dim == ast::TextureDimension::kCubeArray) {
- dim_literal = SpvDimCube;
- }
-
- uint32_t ms_literal = 0u;
- if (texture->IsAnyOf<sem::MultisampledTexture,
- sem::DepthMultisampledTexture>()) {
- ms_literal = 1u;
- }
-
- uint32_t depth_literal = 0u;
- // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
- // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
- // Using anything other than 0 is problematic on various Vulkan drivers.
-
- uint32_t sampled_literal = 2u;
- if (texture->IsAnyOf<sem::MultisampledTexture, sem::SampledTexture,
- sem::DepthTexture, sem::DepthMultisampledTexture>()) {
- sampled_literal = 1u;
- }
-
- if (dim == ast::TextureDimension::kCubeArray) {
- if (texture->IsAnyOf<sem::SampledTexture, sem::DepthTexture>()) {
- push_capability(SpvCapabilitySampledCubeArray);
- }
- }
-
- uint32_t type_id = Switch(
- texture,
- [&](const sem::DepthTexture*) {
- return GenerateTypeIfNeeded(builder_.create<sem::F32>());
- },
- [&](const sem::DepthMultisampledTexture*) {
- return GenerateTypeIfNeeded(builder_.create<sem::F32>());
- },
- [&](const sem::SampledTexture* t) {
- return GenerateTypeIfNeeded(t->type());
- },
- [&](const sem::MultisampledTexture* t) {
- return GenerateTypeIfNeeded(t->type());
- },
- [&](const sem::StorageTexture* t) {
- return GenerateTypeIfNeeded(t->type());
- },
- [&](Default) { return 0u; });
- if (type_id == 0u) {
- return false;
- }
+bool Builder::GenerateTextureType(const sem::Texture* texture, const Operand& result) {
+ if (texture->Is<sem::ExternalTexture>()) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "Multiplanar external texture transform was not run.";
+ return false;
+ }
+
+ uint32_t array_literal = 0u;
+ const auto dim = texture->dim();
+ if (dim == ast::TextureDimension::k2dArray || dim == ast::TextureDimension::kCubeArray) {
+ array_literal = 1u;
+ }
+
+ uint32_t dim_literal = SpvDim2D;
+ if (dim == ast::TextureDimension::k1d) {
+ dim_literal = SpvDim1D;
+ if (texture->Is<sem::SampledTexture>()) {
+ push_capability(SpvCapabilitySampled1D);
+ } else if (texture->Is<sem::StorageTexture>()) {
+ push_capability(SpvCapabilityImage1D);
+ }
+ }
+ if (dim == ast::TextureDimension::k3d) {
+ dim_literal = SpvDim3D;
+ }
+ if (dim == ast::TextureDimension::kCube || dim == ast::TextureDimension::kCubeArray) {
+ dim_literal = SpvDimCube;
+ }
+
+ uint32_t ms_literal = 0u;
+ if (texture->IsAnyOf<sem::MultisampledTexture, sem::DepthMultisampledTexture>()) {
+ ms_literal = 1u;
+ }
- uint32_t format_literal = SpvImageFormat_::SpvImageFormatUnknown;
- if (auto* t = texture->As<sem::StorageTexture>()) {
- format_literal = convert_texel_format_to_spv(t->texel_format());
- }
+ uint32_t depth_literal = 0u;
+ // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored.
+ // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown.
+ // Using anything other than 0 is problematic on various Vulkan drivers.
- push_type(spv::Op::OpTypeImage,
- {result, Operand::Int(type_id), Operand::Int(dim_literal),
- Operand::Int(depth_literal), Operand::Int(array_literal),
- Operand::Int(ms_literal), Operand::Int(sampled_literal),
- Operand::Int(format_literal)});
+ uint32_t sampled_literal = 2u;
+ if (texture->IsAnyOf<sem::MultisampledTexture, sem::SampledTexture, sem::DepthTexture,
+ sem::DepthMultisampledTexture>()) {
+ sampled_literal = 1u;
+ }
+
+ if (dim == ast::TextureDimension::kCubeArray) {
+ if (texture->IsAnyOf<sem::SampledTexture, sem::DepthTexture>()) {
+ push_capability(SpvCapabilitySampledCubeArray);
+ }
+ }
+
+ uint32_t type_id = Switch(
+ texture,
+ [&](const sem::DepthTexture*) { return GenerateTypeIfNeeded(builder_.create<sem::F32>()); },
+ [&](const sem::DepthMultisampledTexture*) {
+ return GenerateTypeIfNeeded(builder_.create<sem::F32>());
+ },
+ [&](const sem::SampledTexture* t) { return GenerateTypeIfNeeded(t->type()); },
+ [&](const sem::MultisampledTexture* t) { return GenerateTypeIfNeeded(t->type()); },
+ [&](const sem::StorageTexture* t) { return GenerateTypeIfNeeded(t->type()); },
+ [&](Default) { return 0u; });
+ if (type_id == 0u) {
+ return false;
+ }
+
+ uint32_t format_literal = SpvImageFormat_::SpvImageFormatUnknown;
+ if (auto* t = texture->As<sem::StorageTexture>()) {
+ format_literal = convert_texel_format_to_spv(t->texel_format());
+ }
- return true;
+ push_type(spv::Op::OpTypeImage,
+ {result, Operand(type_id), Operand(dim_literal), Operand(depth_literal),
+ Operand(array_literal), Operand(ms_literal), Operand(sampled_literal),
+ Operand(format_literal)});
+
+ return true;
}
bool Builder::GenerateArrayType(const sem::Array* ary, const Operand& result) {
- auto elem_type = GenerateTypeIfNeeded(ary->ElemType());
- if (elem_type == 0) {
- return false;
- }
-
- auto result_id = result.to_i();
- if (ary->IsRuntimeSized()) {
- push_type(spv::Op::OpTypeRuntimeArray, {result, Operand::Int(elem_type)});
- } else {
- auto len_id = GenerateConstantIfNeeded(ScalarConstant::U32(ary->Count()));
- if (len_id == 0) {
- return false;
+ auto elem_type = GenerateTypeIfNeeded(ary->ElemType());
+ if (elem_type == 0) {
+ return false;
}
- push_type(spv::Op::OpTypeArray,
- {result, Operand::Int(elem_type), Operand::Int(len_id)});
- }
+ auto result_id = std::get<uint32_t>(result);
+ if (ary->IsRuntimeSized()) {
+ push_type(spv::Op::OpTypeRuntimeArray, {result, Operand(elem_type)});
+ } else {
+ auto len_id = GenerateConstantIfNeeded(ScalarConstant::U32(ary->Count()));
+ if (len_id == 0) {
+ return false;
+ }
+
+ push_type(spv::Op::OpTypeArray, {result, Operand(elem_type), Operand(len_id)});
+ }
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(result_id), Operand::Int(SpvDecorationArrayStride),
- Operand::Int(ary->Stride())});
- return true;
+ push_annot(spv::Op::OpDecorate,
+ {Operand(result_id), U32Operand(SpvDecorationArrayStride), Operand(ary->Stride())});
+ return true;
}
-bool Builder::GenerateMatrixType(const sem::Matrix* mat,
- const Operand& result) {
- auto* col_type = builder_.create<sem::Vector>(mat->type(), mat->rows());
- auto col_type_id = GenerateTypeIfNeeded(col_type);
- if (has_error()) {
- return false;
- }
+bool Builder::GenerateMatrixType(const sem::Matrix* mat, const Operand& result) {
+ auto* col_type = builder_.create<sem::Vector>(mat->type(), mat->rows());
+ auto col_type_id = GenerateTypeIfNeeded(col_type);
+ if (has_error()) {
+ return false;
+ }
- push_type(spv::Op::OpTypeMatrix,
- {result, Operand::Int(col_type_id), Operand::Int(mat->columns())});
- return true;
+ push_type(spv::Op::OpTypeMatrix, {result, Operand(col_type_id), Operand(mat->columns())});
+ return true;
}
-bool Builder::GeneratePointerType(const sem::Pointer* ptr,
- const Operand& result) {
- auto subtype_id = GenerateTypeIfNeeded(ptr->StoreType());
- if (subtype_id == 0) {
- return false;
- }
+bool Builder::GeneratePointerType(const sem::Pointer* ptr, const Operand& result) {
+ auto subtype_id = GenerateTypeIfNeeded(ptr->StoreType());
+ if (subtype_id == 0) {
+ return false;
+ }
- auto stg_class = ConvertStorageClass(ptr->StorageClass());
- if (stg_class == SpvStorageClassMax) {
- error_ = "invalid storage class for pointer";
- return false;
- }
+ auto stg_class = ConvertStorageClass(ptr->StorageClass());
+ if (stg_class == SpvStorageClassMax) {
+ error_ = "invalid storage class for pointer";
+ return false;
+ }
- push_type(spv::Op::OpTypePointer,
- {result, Operand::Int(stg_class), Operand::Int(subtype_id)});
+ push_type(spv::Op::OpTypePointer, {result, U32Operand(stg_class), Operand(subtype_id)});
- return true;
+ return true;
}
-bool Builder::GenerateReferenceType(const sem::Reference* ref,
- const Operand& result) {
- auto subtype_id = GenerateTypeIfNeeded(ref->StoreType());
- if (subtype_id == 0) {
- return false;
- }
+bool Builder::GenerateReferenceType(const sem::Reference* ref, const Operand& result) {
+ auto subtype_id = GenerateTypeIfNeeded(ref->StoreType());
+ if (subtype_id == 0) {
+ return false;
+ }
- auto stg_class = ConvertStorageClass(ref->StorageClass());
- if (stg_class == SpvStorageClassMax) {
- error_ = "invalid storage class for reference";
- return false;
- }
+ auto stg_class = ConvertStorageClass(ref->StorageClass());
+ if (stg_class == SpvStorageClassMax) {
+ error_ = "invalid storage class for reference";
+ return false;
+ }
- push_type(spv::Op::OpTypePointer,
- {result, Operand::Int(stg_class), Operand::Int(subtype_id)});
+ push_type(spv::Op::OpTypePointer, {result, U32Operand(stg_class), Operand(subtype_id)});
- return true;
+ return true;
}
-bool Builder::GenerateStructType(const sem::Struct* struct_type,
- const Operand& result) {
- auto struct_id = result.to_i();
-
- if (struct_type->Name().IsValid()) {
- push_debug(
- spv::Op::OpName,
- {Operand::Int(struct_id),
- Operand::String(builder_.Symbols().NameFor(struct_type->Name()))});
- }
+bool Builder::GenerateStructType(const sem::Struct* struct_type, const Operand& result) {
+ auto struct_id = std::get<uint32_t>(result);
- OperandList ops;
- ops.push_back(result);
+ if (struct_type->Name().IsValid()) {
+ push_debug(spv::Op::OpName,
+ {Operand(struct_id), Operand(builder_.Symbols().NameFor(struct_type->Name()))});
+ }
- auto* decl = struct_type->Declaration();
- if (decl &&
- ast::HasAttribute<transform::AddSpirvBlockAttribute::SpirvBlockAttribute>(
- decl->attributes)) {
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(struct_id), Operand::Int(SpvDecorationBlock)});
- }
+ OperandList ops;
+ ops.push_back(result);
- for (uint32_t i = 0; i < struct_type->Members().size(); ++i) {
- auto mem_id = GenerateStructMember(struct_id, i, struct_type->Members()[i]);
- if (mem_id == 0) {
- return false;
+ auto* decl = struct_type->Declaration();
+ if (decl && ast::HasAttribute<transform::AddSpirvBlockAttribute::SpirvBlockAttribute>(
+ decl->attributes)) {
+ push_annot(spv::Op::OpDecorate, {Operand(struct_id), U32Operand(SpvDecorationBlock)});
}
- ops.push_back(Operand::Int(mem_id));
- }
+ for (uint32_t i = 0; i < struct_type->Members().size(); ++i) {
+ auto mem_id = GenerateStructMember(struct_id, i, struct_type->Members()[i]);
+ if (mem_id == 0) {
+ return false;
+ }
+
+ ops.push_back(Operand(mem_id));
+ }
- push_type(spv::Op::OpTypeStruct, std::move(ops));
- return true;
+ push_type(spv::Op::OpTypeStruct, std::move(ops));
+ return true;
}
uint32_t Builder::GenerateStructMember(uint32_t struct_id,
uint32_t idx,
const sem::StructMember* member) {
- push_debug(spv::Op::OpMemberName,
- {Operand::Int(struct_id), Operand::Int(idx),
- Operand::String(builder_.Symbols().NameFor(member->Name()))});
-
- // Note: This will generate layout annotations for *all* structs, whether or
- // not they are used in host-shareable variables. This is officially ok in
- // SPIR-V 1.0 through 1.3. If / when we migrate to using SPIR-V 1.4 we'll have
- // to only generate the layout info for structs used for certain storage
- // classes.
-
- push_annot(
- spv::Op::OpMemberDecorate,
- {Operand::Int(struct_id), Operand::Int(idx),
- Operand::Int(SpvDecorationOffset), Operand::Int(member->Offset())});
-
- // Infer and emit matrix layout.
- auto* matrix_type = GetNestedMatrixType(member->Type());
- if (matrix_type) {
- push_annot(spv::Op::OpMemberDecorate,
- {Operand::Int(struct_id), Operand::Int(idx),
- Operand::Int(SpvDecorationColMajor)});
- if (!matrix_type->type()->Is<sem::F32>()) {
- error_ = "matrix scalar element type must be f32";
- return 0;
- }
- const auto scalar_elem_size = 4;
- const auto effective_row_count = (matrix_type->rows() == 2) ? 2 : 4;
+ push_debug(spv::Op::OpMemberName, {Operand(struct_id), Operand(idx),
+ Operand(builder_.Symbols().NameFor(member->Name()))});
+
+ // Note: This will generate layout annotations for *all* structs, whether or
+ // not they are used in host-shareable variables. This is officially ok in
+ // SPIR-V 1.0 through 1.3. If / when we migrate to using SPIR-V 1.4 we'll have
+ // to only generate the layout info for structs used for certain storage
+ // classes.
+
push_annot(spv::Op::OpMemberDecorate,
- {Operand::Int(struct_id), Operand::Int(idx),
- Operand::Int(SpvDecorationMatrixStride),
- Operand::Int(effective_row_count * scalar_elem_size)});
- }
+ {Operand(struct_id), Operand(idx), U32Operand(SpvDecorationOffset),
+ Operand(member->Offset())});
+
+ // Infer and emit matrix layout.
+ auto* matrix_type = GetNestedMatrixType(member->Type());
+ if (matrix_type) {
+ push_annot(spv::Op::OpMemberDecorate,
+ {Operand(struct_id), Operand(idx), U32Operand(SpvDecorationColMajor)});
+ if (!matrix_type->type()->Is<sem::F32>()) {
+ error_ = "matrix scalar element type must be f32";
+ return 0;
+ }
+ const uint32_t scalar_elem_size = 4;
+ const uint32_t effective_row_count = (matrix_type->rows() == 2) ? 2 : 4;
+ push_annot(spv::Op::OpMemberDecorate,
+ {Operand(struct_id), Operand(idx), U32Operand(SpvDecorationMatrixStride),
+ Operand(effective_row_count * scalar_elem_size)});
+ }
- return GenerateTypeIfNeeded(member->Type());
+ return GenerateTypeIfNeeded(member->Type());
}
-bool Builder::GenerateVectorType(const sem::Vector* vec,
- const Operand& result) {
- auto type_id = GenerateTypeIfNeeded(vec->type());
- if (has_error()) {
- return false;
- }
+bool Builder::GenerateVectorType(const sem::Vector* vec, const Operand& result) {
+ auto type_id = GenerateTypeIfNeeded(vec->type());
+ if (has_error()) {
+ return false;
+ }
- push_type(spv::Op::OpTypeVector,
- {result, Operand::Int(type_id), Operand::Int(vec->Width())});
- return true;
+ push_type(spv::Op::OpTypeVector, {result, Operand(type_id), Operand(vec->Width())});
+ return true;
}
SpvStorageClass Builder::ConvertStorageClass(ast::StorageClass klass) const {
- switch (klass) {
- case ast::StorageClass::kInvalid:
- return SpvStorageClassMax;
- case ast::StorageClass::kInput:
- return SpvStorageClassInput;
- case ast::StorageClass::kOutput:
- return SpvStorageClassOutput;
- case ast::StorageClass::kUniform:
- return SpvStorageClassUniform;
- case ast::StorageClass::kWorkgroup:
- return SpvStorageClassWorkgroup;
- case ast::StorageClass::kUniformConstant:
- return SpvStorageClassUniformConstant;
- case ast::StorageClass::kStorage:
- return SpvStorageClassStorageBuffer;
- case ast::StorageClass::kPrivate:
- return SpvStorageClassPrivate;
- case ast::StorageClass::kFunction:
- return SpvStorageClassFunction;
- case ast::StorageClass::kNone:
- break;
- }
- return SpvStorageClassMax;
+ switch (klass) {
+ case ast::StorageClass::kInvalid:
+ return SpvStorageClassMax;
+ case ast::StorageClass::kInput:
+ return SpvStorageClassInput;
+ case ast::StorageClass::kOutput:
+ return SpvStorageClassOutput;
+ case ast::StorageClass::kUniform:
+ return SpvStorageClassUniform;
+ case ast::StorageClass::kWorkgroup:
+ return SpvStorageClassWorkgroup;
+ case ast::StorageClass::kHandle:
+ return SpvStorageClassUniformConstant;
+ case ast::StorageClass::kStorage:
+ return SpvStorageClassStorageBuffer;
+ case ast::StorageClass::kPrivate:
+ return SpvStorageClassPrivate;
+ case ast::StorageClass::kFunction:
+ return SpvStorageClassFunction;
+ case ast::StorageClass::kNone:
+ break;
+ }
+ return SpvStorageClassMax;
}
-SpvBuiltIn Builder::ConvertBuiltin(ast::Builtin builtin,
- ast::StorageClass storage) {
- switch (builtin) {
- case ast::Builtin::kPosition:
- if (storage == ast::StorageClass::kInput) {
- return SpvBuiltInFragCoord;
- } else if (storage == ast::StorageClass::kOutput) {
- return SpvBuiltInPosition;
- } else {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "invalid storage class for builtin";
- break;
- }
- case ast::Builtin::kVertexIndex:
- return SpvBuiltInVertexIndex;
- case ast::Builtin::kInstanceIndex:
- return SpvBuiltInInstanceIndex;
- case ast::Builtin::kFrontFacing:
- return SpvBuiltInFrontFacing;
- case ast::Builtin::kFragDepth:
- return SpvBuiltInFragDepth;
- case ast::Builtin::kLocalInvocationId:
- return SpvBuiltInLocalInvocationId;
- case ast::Builtin::kLocalInvocationIndex:
- return SpvBuiltInLocalInvocationIndex;
- case ast::Builtin::kGlobalInvocationId:
- return SpvBuiltInGlobalInvocationId;
- case ast::Builtin::kPointSize:
- return SpvBuiltInPointSize;
- case ast::Builtin::kWorkgroupId:
- return SpvBuiltInWorkgroupId;
- case ast::Builtin::kNumWorkgroups:
- return SpvBuiltInNumWorkgroups;
- case ast::Builtin::kSampleIndex:
- push_capability(SpvCapabilitySampleRateShading);
- return SpvBuiltInSampleId;
- case ast::Builtin::kSampleMask:
- return SpvBuiltInSampleMask;
- case ast::Builtin::kNone:
- break;
- }
- return SpvBuiltInMax;
+SpvBuiltIn Builder::ConvertBuiltin(ast::Builtin builtin, ast::StorageClass storage) {
+ switch (builtin) {
+ case ast::Builtin::kPosition:
+ if (storage == ast::StorageClass::kInput) {
+ return SpvBuiltInFragCoord;
+ } else if (storage == ast::StorageClass::kOutput) {
+ return SpvBuiltInPosition;
+ } else {
+ TINT_ICE(Writer, builder_.Diagnostics()) << "invalid storage class for builtin";
+ break;
+ }
+ case ast::Builtin::kVertexIndex:
+ return SpvBuiltInVertexIndex;
+ case ast::Builtin::kInstanceIndex:
+ return SpvBuiltInInstanceIndex;
+ case ast::Builtin::kFrontFacing:
+ return SpvBuiltInFrontFacing;
+ case ast::Builtin::kFragDepth:
+ return SpvBuiltInFragDepth;
+ case ast::Builtin::kLocalInvocationId:
+ return SpvBuiltInLocalInvocationId;
+ case ast::Builtin::kLocalInvocationIndex:
+ return SpvBuiltInLocalInvocationIndex;
+ case ast::Builtin::kGlobalInvocationId:
+ return SpvBuiltInGlobalInvocationId;
+ case ast::Builtin::kPointSize:
+ return SpvBuiltInPointSize;
+ case ast::Builtin::kWorkgroupId:
+ return SpvBuiltInWorkgroupId;
+ case ast::Builtin::kNumWorkgroups:
+ return SpvBuiltInNumWorkgroups;
+ case ast::Builtin::kSampleIndex:
+ push_capability(SpvCapabilitySampleRateShading);
+ return SpvBuiltInSampleId;
+ case ast::Builtin::kSampleMask:
+ return SpvBuiltInSampleMask;
+ case ast::Builtin::kNone:
+ break;
+ }
+ return SpvBuiltInMax;
}
void Builder::AddInterpolationDecorations(uint32_t id,
ast::InterpolationType type,
ast::InterpolationSampling sampling) {
- switch (type) {
- case ast::InterpolationType::kLinear:
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(id), Operand::Int(SpvDecorationNoPerspective)});
- break;
- case ast::InterpolationType::kFlat:
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(id), Operand::Int(SpvDecorationFlat)});
- break;
- case ast::InterpolationType::kPerspective:
- break;
- }
- switch (sampling) {
- case ast::InterpolationSampling::kCentroid:
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(id), Operand::Int(SpvDecorationCentroid)});
- break;
- case ast::InterpolationSampling::kSample:
- push_capability(SpvCapabilitySampleRateShading);
- push_annot(spv::Op::OpDecorate,
- {Operand::Int(id), Operand::Int(SpvDecorationSample)});
- break;
- case ast::InterpolationSampling::kCenter:
- case ast::InterpolationSampling::kNone:
- break;
- }
+ switch (type) {
+ case ast::InterpolationType::kLinear:
+ push_annot(spv::Op::OpDecorate, {Operand(id), U32Operand(SpvDecorationNoPerspective)});
+ break;
+ case ast::InterpolationType::kFlat:
+ push_annot(spv::Op::OpDecorate, {Operand(id), U32Operand(SpvDecorationFlat)});
+ break;
+ case ast::InterpolationType::kPerspective:
+ break;
+ }
+ switch (sampling) {
+ case ast::InterpolationSampling::kCentroid:
+ push_annot(spv::Op::OpDecorate, {Operand(id), U32Operand(SpvDecorationCentroid)});
+ break;
+ case ast::InterpolationSampling::kSample:
+ push_capability(SpvCapabilitySampleRateShading);
+ push_annot(spv::Op::OpDecorate, {Operand(id), U32Operand(SpvDecorationSample)});
+ break;
+ case ast::InterpolationSampling::kCenter:
+ case ast::InterpolationSampling::kNone:
+ break;
+ }
}
-SpvImageFormat Builder::convert_texel_format_to_spv(
- const ast::TexelFormat format) {
- switch (format) {
- case ast::TexelFormat::kR32Uint:
- return SpvImageFormatR32ui;
- case ast::TexelFormat::kR32Sint:
- return SpvImageFormatR32i;
- case ast::TexelFormat::kR32Float:
- return SpvImageFormatR32f;
- case ast::TexelFormat::kRgba8Unorm:
- return SpvImageFormatRgba8;
- case ast::TexelFormat::kRgba8Snorm:
- return SpvImageFormatRgba8Snorm;
- case ast::TexelFormat::kRgba8Uint:
- return SpvImageFormatRgba8ui;
- case ast::TexelFormat::kRgba8Sint:
- return SpvImageFormatRgba8i;
- case ast::TexelFormat::kRg32Uint:
- push_capability(SpvCapabilityStorageImageExtendedFormats);
- return SpvImageFormatRg32ui;
- case ast::TexelFormat::kRg32Sint:
- push_capability(SpvCapabilityStorageImageExtendedFormats);
- return SpvImageFormatRg32i;
- case ast::TexelFormat::kRg32Float:
- push_capability(SpvCapabilityStorageImageExtendedFormats);
- return SpvImageFormatRg32f;
- case ast::TexelFormat::kRgba16Uint:
- return SpvImageFormatRgba16ui;
- case ast::TexelFormat::kRgba16Sint:
- return SpvImageFormatRgba16i;
- case ast::TexelFormat::kRgba16Float:
- return SpvImageFormatRgba16f;
- case ast::TexelFormat::kRgba32Uint:
- return SpvImageFormatRgba32ui;
- case ast::TexelFormat::kRgba32Sint:
- return SpvImageFormatRgba32i;
- case ast::TexelFormat::kRgba32Float:
- return SpvImageFormatRgba32f;
- case ast::TexelFormat::kNone:
- return SpvImageFormatUnknown;
- }
- return SpvImageFormatUnknown;
+SpvImageFormat Builder::convert_texel_format_to_spv(const ast::TexelFormat format) {
+ switch (format) {
+ case ast::TexelFormat::kR32Uint:
+ return SpvImageFormatR32ui;
+ case ast::TexelFormat::kR32Sint:
+ return SpvImageFormatR32i;
+ case ast::TexelFormat::kR32Float:
+ return SpvImageFormatR32f;
+ case ast::TexelFormat::kRgba8Unorm:
+ return SpvImageFormatRgba8;
+ case ast::TexelFormat::kRgba8Snorm:
+ return SpvImageFormatRgba8Snorm;
+ case ast::TexelFormat::kRgba8Uint:
+ return SpvImageFormatRgba8ui;
+ case ast::TexelFormat::kRgba8Sint:
+ return SpvImageFormatRgba8i;
+ case ast::TexelFormat::kRg32Uint:
+ push_capability(SpvCapabilityStorageImageExtendedFormats);
+ return SpvImageFormatRg32ui;
+ case ast::TexelFormat::kRg32Sint:
+ push_capability(SpvCapabilityStorageImageExtendedFormats);
+ return SpvImageFormatRg32i;
+ case ast::TexelFormat::kRg32Float:
+ push_capability(SpvCapabilityStorageImageExtendedFormats);
+ return SpvImageFormatRg32f;
+ case ast::TexelFormat::kRgba16Uint:
+ return SpvImageFormatRgba16ui;
+ case ast::TexelFormat::kRgba16Sint:
+ return SpvImageFormatRgba16i;
+ case ast::TexelFormat::kRgba16Float:
+ return SpvImageFormatRgba16f;
+ case ast::TexelFormat::kRgba32Uint:
+ return SpvImageFormatRgba32ui;
+ case ast::TexelFormat::kRgba32Sint:
+ return SpvImageFormatRgba32i;
+ case ast::TexelFormat::kRgba32Float:
+ return SpvImageFormatRgba32f;
+ case ast::TexelFormat::kNone:
+ return SpvImageFormatUnknown;
+ }
+ return SpvImageFormatUnknown;
}
bool Builder::push_function_inst(spv::Op op, const OperandList& operands) {
- if (functions_.empty()) {
- std::ostringstream ss;
- ss << "Internal error: trying to add SPIR-V instruction " << int(op)
- << " outside a function";
- error_ = ss.str();
- return false;
- }
- functions_.back().push_inst(op, operands);
- return true;
+ if (functions_.empty()) {
+ std::ostringstream ss;
+ ss << "Internal error: trying to add SPIR-V instruction " << int(op)
+ << " outside a function";
+ error_ = ss.str();
+ return false;
+ }
+ functions_.back().push_inst(op, operands);
+ return true;
}
bool Builder::InsideBasicBlock() const {
- if (functions_.empty()) {
- return false;
- }
- const auto& instructions = functions_.back().instructions();
- if (instructions.empty()) {
- // The Function object does not explicitly represent its entry block
- // label. So return *true* because an empty list means the only
- // thing in the function is that entry block label.
+ if (functions_.empty()) {
+ return false;
+ }
+ const auto& instructions = functions_.back().instructions();
+ if (instructions.empty()) {
+ // The Function object does not explicitly represent its entry block
+ // label. So return *true* because an empty list means the only
+ // thing in the function is that entry block label.
+ return true;
+ }
+ const auto& inst = instructions.back();
+ switch (inst.opcode()) {
+ case spv::Op::OpBranch:
+ case spv::Op::OpBranchConditional:
+ case spv::Op::OpSwitch:
+ case spv::Op::OpReturn:
+ case spv::Op::OpReturnValue:
+ case spv::Op::OpUnreachable:
+ case spv::Op::OpKill:
+ case spv::Op::OpTerminateInvocation:
+ return false;
+ default:
+ break;
+ }
return true;
- }
- const auto& inst = instructions.back();
- switch (inst.opcode()) {
- case spv::Op::OpBranch:
- case spv::Op::OpBranchConditional:
- case spv::Op::OpSwitch:
- case spv::Op::OpReturn:
- case spv::Op::OpReturnValue:
- case spv::Op::OpUnreachable:
- case spv::Op::OpKill:
- case spv::Op::OpTerminateInvocation:
- return false;
- default:
- break;
- }
- return true;
}
-Builder::ContinuingInfo::ContinuingInfo(
- const ast::Statement* the_last_statement,
- uint32_t loop_id,
- uint32_t break_id)
- : last_statement(the_last_statement),
- loop_header_id(loop_id),
- break_target_id(break_id) {
- TINT_ASSERT(Writer, last_statement != nullptr);
- TINT_ASSERT(Writer, loop_header_id != 0u);
- TINT_ASSERT(Writer, break_target_id != 0u);
+Builder::ContinuingInfo::ContinuingInfo(const ast::Statement* the_last_statement,
+ uint32_t loop_id,
+ uint32_t break_id)
+ : last_statement(the_last_statement), loop_header_id(loop_id), break_target_id(break_id) {
+ TINT_ASSERT(Writer, last_statement != nullptr);
+ TINT_ASSERT(Writer, loop_header_id != 0u);
+ TINT_ASSERT(Writer, break_target_id != 0u);
}
Builder::Backedge::Backedge(spv::Op the_opcode, OperandList the_operands)
: opcode(the_opcode), operands(the_operands) {}
Builder::Backedge::Backedge(const Builder::Backedge& other) = default;
-Builder::Backedge& Builder::Backedge::operator=(
- const Builder::Backedge& other) = default;
+Builder::Backedge& Builder::Backedge::operator=(const Builder::Backedge& other) = default;
Builder::Backedge::~Backedge() = default;
+Builder::Scope::Scope() = default;
+Builder::Scope::Scope(const Scope&) = default;
+Builder::Scope::~Scope() = default;
+
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder.h b/chromium/third_party/dawn/src/tint/writer/spirv/builder.h
index 7ad9cf242d1..9866328a59b 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder.h
@@ -36,13 +36,14 @@
#include "src/tint/program_builder.h"
#include "src/tint/scope_stack.h"
#include "src/tint/sem/builtin.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/writer/spirv/function.h"
#include "src/tint/writer/spirv/scalar_constant.h"
// Forward declarations
namespace tint::sem {
class Call;
+class Constant;
class Reference;
class TypeConstructor;
class TypeConversion;
@@ -52,592 +53,607 @@ namespace tint::writer::spirv {
/// Builder class to create SPIR-V instructions from a module.
class Builder {
- public:
- /// Contains information for generating accessor chains
- struct AccessorInfo {
- AccessorInfo();
- ~AccessorInfo();
-
- /// The ID of the current chain source. The chain source may change as we
- /// evaluate the access chain. The chain source always points to the ID
- /// which we will use to evaluate the current set of accessors. This maybe
- /// the original variable, or maybe an intermediary if we had to evaulate
- /// the access chain early (in the case of a swizzle of an access chain).
- uint32_t source_id;
- /// The type of the current chain source. This type matches the deduced
- /// result_type of the current source defined above.
- const sem::Type* source_type;
- /// A list of access chain indices to emit. Note, we _only_ have access
- /// chain indices if the source is reference.
- std::vector<uint32_t> access_chain_indices;
- };
-
- /// Constructor
- /// @param program the program
- /// @param zero_initialize_workgroup_memory `true` to initialize all the
- /// variables in the Workgroup storage class with OpConstantNull
- Builder(const Program* program,
- bool zero_initialize_workgroup_memory = false);
- ~Builder();
-
- /// Generates the SPIR-V instructions for the given program
- /// @returns true if the SPIR-V was successfully built
- bool Build();
-
- /// @returns the error string or blank if no error was reported.
- const std::string& error() const { return error_; }
- /// @returns true if the builder encountered an error
- bool has_error() const { return !error_.empty(); }
-
- /// @returns the number of uint32_t's needed to make up the results
- uint32_t total_size() const;
-
- /// @returns the id bound for this program
- uint32_t id_bound() const { return next_id_; }
-
- /// @returns the next id to be used
- uint32_t next_id() {
- auto id = next_id_;
- next_id_ += 1;
- return id;
- }
-
- /// Iterates over all the instructions in the correct order and calls the
- /// given callback
- /// @param cb the callback to execute
- void iterate(std::function<void(const Instruction&)> cb) const;
-
- /// Adds an instruction to the list of capabilities, if the capability
- /// hasn't already been added.
- /// @param cap the capability to set
- void push_capability(uint32_t cap);
- /// @returns the capabilities
- const InstructionList& capabilities() const { return capabilities_; }
- /// Adds an instruction to the extensions
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_extension(spv::Op op, const OperandList& operands) {
- extensions_.push_back(Instruction{op, operands});
- }
- /// @returns the extensions
- const InstructionList& extensions() const { return extensions_; }
- /// Adds an instruction to the ext import
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_ext_import(spv::Op op, const OperandList& operands) {
- ext_imports_.push_back(Instruction{op, operands});
- }
- /// @returns the ext imports
- const InstructionList& ext_imports() const { return ext_imports_; }
- /// Adds an instruction to the memory model
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_memory_model(spv::Op op, const OperandList& operands) {
- memory_model_.push_back(Instruction{op, operands});
- }
- /// @returns the memory model
- const InstructionList& memory_model() const { return memory_model_; }
- /// Adds an instruction to the entry points
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_entry_point(spv::Op op, const OperandList& operands) {
- entry_points_.push_back(Instruction{op, operands});
- }
- /// @returns the entry points
- const InstructionList& entry_points() const { return entry_points_; }
- /// Adds an instruction to the execution modes
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_execution_mode(spv::Op op, const OperandList& operands) {
- execution_modes_.push_back(Instruction{op, operands});
- }
- /// @returns the execution modes
- const InstructionList& execution_modes() const { return execution_modes_; }
- /// Adds an instruction to the debug
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_debug(spv::Op op, const OperandList& operands) {
- debug_.push_back(Instruction{op, operands});
- }
- /// @returns the debug instructions
- const InstructionList& debug() const { return debug_; }
- /// Adds an instruction to the types
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_type(spv::Op op, const OperandList& operands) {
- types_.push_back(Instruction{op, operands});
- }
- /// @returns the type instructions
- const InstructionList& types() const { return types_; }
- /// Adds an instruction to the annotations
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_annot(spv::Op op, const OperandList& operands) {
- annotations_.push_back(Instruction{op, operands});
- }
- /// @returns the annotations
- const InstructionList& annots() const { return annotations_; }
-
- /// Adds a function to the builder
- /// @param func the function to add
- void push_function(const Function& func) {
- functions_.push_back(func);
- current_label_id_ = func.label_id();
- }
- /// @returns the functions
- const std::vector<Function>& functions() const { return functions_; }
- /// Pushes an instruction to the current function. If we're outside
- /// a function then issue an internal error and return false.
- /// @param op the operation
- /// @param operands the operands
- /// @returns true if we succeeded
- bool push_function_inst(spv::Op op, const OperandList& operands);
- /// Pushes a variable to the current function
- /// @param operands the variable operands
- void push_function_var(const OperandList& operands) {
- if (functions_.empty()) {
- TINT_ICE(Writer, builder_.Diagnostics())
- << "push_function_var() called without a function";
+ public:
+ /// Contains information for generating accessor chains
+ struct AccessorInfo {
+ AccessorInfo();
+ ~AccessorInfo();
+
+ /// The ID of the current chain source. The chain source may change as we
+ /// evaluate the access chain. The chain source always points to the ID
+ /// which we will use to evaluate the current set of accessors. This maybe
+ /// the original variable, or maybe an intermediary if we had to evaulate
+ /// the access chain early (in the case of a swizzle of an access chain).
+ uint32_t source_id;
+ /// The type of the current chain source. This type matches the deduced
+ /// result_type of the current source defined above.
+ const sem::Type* source_type;
+ /// A list of access chain indices to emit. Note, we _only_ have access
+ /// chain indices if the source is reference.
+ std::vector<uint32_t> access_chain_indices;
+ };
+
+ /// Constructor
+ /// @param program the program
+ /// @param zero_initialize_workgroup_memory `true` to initialize all the
+ /// variables in the Workgroup storage class with OpConstantNull
+ explicit Builder(const Program* program, bool zero_initialize_workgroup_memory = false);
+ ~Builder();
+
+ /// Generates the SPIR-V instructions for the given program
+ /// @returns true if the SPIR-V was successfully built
+ bool Build();
+
+ /// @returns the error string or blank if no error was reported.
+ const std::string& error() const { return error_; }
+ /// @returns true if the builder encountered an error
+ bool has_error() const { return !error_.empty(); }
+
+ /// @returns the number of uint32_t's needed to make up the results
+ uint32_t total_size() const;
+
+ /// @returns the id bound for this program
+ uint32_t id_bound() const { return next_id_; }
+
+ /// @returns the next id to be used
+ uint32_t next_id() {
+ auto id = next_id_;
+ next_id_ += 1;
+ return id;
}
- functions_.back().push_var(operands);
- }
-
- /// @returns true if the current instruction insertion point is
- /// inside a basic block.
- bool InsideBasicBlock() const;
-
- /// Converts a storage class to a SPIR-V storage class.
- /// @param klass the storage class to convert
- /// @returns the SPIR-V storage class or SpvStorageClassMax on error.
- SpvStorageClass ConvertStorageClass(ast::StorageClass klass) const;
- /// Converts a builtin to a SPIR-V builtin and pushes a capability if needed.
- /// @param builtin the builtin to convert
- /// @param storage the storage class that this builtin is being used with
- /// @returns the SPIR-V builtin or SpvBuiltInMax on error.
- SpvBuiltIn ConvertBuiltin(ast::Builtin builtin, ast::StorageClass storage);
-
- /// Converts an interpolate attribute to SPIR-V decorations and pushes a
- /// capability if needed.
- /// @param id the id to decorate
- /// @param type the interpolation type
- /// @param sampling the interpolation sampling
- void AddInterpolationDecorations(uint32_t id,
- ast::InterpolationType type,
- ast::InterpolationSampling sampling);
-
- /// Generates a label for the given id. Emits an error and returns false if
- /// we're currently outside a function.
- /// @param id the id to use for the label
- /// @returns true on success.
- bool GenerateLabel(uint32_t id);
- /// Generates an assignment statement
- /// @param assign the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateAssignStatement(const ast::AssignmentStatement* assign);
- /// Generates a block statement, wrapped in a push/pop scope
- /// @param stmt the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateBlockStatement(const ast::BlockStatement* stmt);
- /// Generates a block statement
- /// @param stmt the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateBlockStatementWithoutScoping(const ast::BlockStatement* stmt);
- /// Generates a break statement
- /// @param stmt the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateBreakStatement(const ast::BreakStatement* stmt);
- /// Generates a continue statement
- /// @param stmt the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateContinueStatement(const ast::ContinueStatement* stmt);
- /// Generates a discard statement
- /// @param stmt the statement to generate
- /// @returns true if the statement was successfully generated
- bool GenerateDiscardStatement(const ast::DiscardStatement* stmt);
- /// Generates an entry point instruction
- /// @param func the function
- /// @param id the id of the function
- /// @returns true if the instruction was generated, false otherwise
- bool GenerateEntryPoint(const ast::Function* func, uint32_t id);
- /// Generates execution modes for an entry point
- /// @param func the function
- /// @param id the id of the function
- /// @returns false on failure
- bool GenerateExecutionModes(const ast::Function* func, uint32_t id);
- /// Generates an expression
- /// @param expr the expression to generate
- /// @returns the resulting ID of the expression or 0 on error
- uint32_t GenerateExpression(const ast::Expression* expr);
- /// Generates the instructions for a function
- /// @param func the function to generate
- /// @returns true if the instructions were generated
- bool GenerateFunction(const ast::Function* func);
- /// Generates a function type if not already created
- /// @param func the function to generate for
- /// @returns the ID to use for the function type. Returns 0 on failure.
- uint32_t GenerateFunctionTypeIfNeeded(const sem::Function* func);
- /// Generates access control annotations if needed
- /// @param type the type to generate for
- /// @param struct_id the struct id
- /// @param member_idx the member index
- void GenerateMemberAccessIfNeeded(const sem::Type* type,
- uint32_t struct_id,
- uint32_t member_idx);
- /// Generates a function variable
- /// @param var the variable
- /// @returns true if the variable was generated
- bool GenerateFunctionVariable(const ast::Variable* var);
- /// Generates a global variable
- /// @param var the variable to generate
- /// @returns true if the variable is emited.
- bool GenerateGlobalVariable(const ast::Variable* var);
- /// Generates an index accessor expression.
- ///
- /// For more information on accessors see the "Pointer evaluation" section of
- /// the WGSL specification.
- ///
- /// @param expr the expresssion to generate
- /// @returns the id of the expression or 0 on failure
- uint32_t GenerateAccessorExpression(const ast::Expression* expr);
- /// Generates an index accessor
- /// @param expr the accessor to generate
- /// @param info the current accessor information
- /// @returns true if the accessor was generated successfully
- bool GenerateIndexAccessor(const ast::IndexAccessorExpression* expr,
- AccessorInfo* info);
- /// Generates a member accessor
- /// @param expr the accessor to generate
- /// @param info the current accessor information
- /// @returns true if the accessor was generated successfully
- bool GenerateMemberAccessor(const ast::MemberAccessorExpression* expr,
- AccessorInfo* info);
- /// Generates an identifier expression
- /// @param expr the expresssion to generate
- /// @returns the id of the expression or 0 on failure
- uint32_t GenerateIdentifierExpression(const ast::IdentifierExpression* expr);
- /// Generates a unary op expression
- /// @param expr the expression to generate
- /// @returns the id of the expression or 0 on failure
- uint32_t GenerateUnaryOpExpression(const ast::UnaryOpExpression* expr);
- /// Generates an if statement
- /// @param stmt the statement to generate
- /// @returns true on success
- bool GenerateIfStatement(const ast::IfStatement* stmt);
- /// Generates an import instruction for the "GLSL.std.450" extended
- /// instruction set, if one doesn't exist yet, and returns the import ID.
- /// @returns the import ID, or 0 on error.
- uint32_t GetGLSLstd450Import();
- /// Generates a constructor expression
- /// @param var the variable generated for, nullptr if no variable associated.
- /// @param expr the expression to generate
- /// @returns the ID of the expression or 0 on failure.
- uint32_t GenerateConstructorExpression(const ast::Variable* var,
- const ast::Expression* expr);
- /// Generates a literal constant if needed
- /// @param var the variable generated for, nullptr if no variable associated.
- /// @param lit the literal to generate
- /// @returns the ID on success or 0 on failure
- uint32_t GenerateLiteralIfNeeded(const ast::Variable* var,
- const ast::LiteralExpression* lit);
- /// Generates a binary expression
- /// @param expr the expression to generate
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateBinaryExpression(const ast::BinaryExpression* expr);
- /// Generates a bitcast expression
- /// @param expr the expression to generate
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateBitcastExpression(const ast::BitcastExpression* expr);
- /// Generates a short circuting binary expression
- /// @param expr the expression to generate
- /// @returns teh expression ID on success or 0 otherwise
- uint32_t GenerateShortCircuitBinaryExpression(
- const ast::BinaryExpression* expr);
- /// Generates a call expression
- /// @param expr the expression to generate
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateCallExpression(const ast::CallExpression* expr);
- /// Handles generating a function call expression
- /// @param call the call expression
- /// @param function the function being called
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateFunctionCall(const sem::Call* call,
- const sem::Function* function);
- /// Handles generating a builtin call expression
- /// @param call the call expression
- /// @param builtin the builtin being called
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateBuiltinCall(const sem::Call* call,
- const sem::Builtin* builtin);
- /// Handles generating a type constructor or type conversion expression
- /// @param call the call expression
- /// @param var the variable that is being initialized. May be null.
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateTypeConstructorOrConversion(const sem::Call* call,
- const ast::Variable* var);
- /// Generates a texture builtin call. Emits an error and returns false if
- /// we're currently outside a function.
- /// @param call the call expression
- /// @param builtin the semantic information for the texture builtin
- /// @param result_type result type operand of the texture instruction
- /// @param result_id result identifier operand of the texture instruction
- /// parameters
- /// @returns true on success
- bool GenerateTextureBuiltin(const sem::Call* call,
- const sem::Builtin* builtin,
- spirv::Operand result_type,
- spirv::Operand result_id);
- /// Generates a control barrier statement.
- /// @param builtin the semantic information for the barrier builtin call
- /// @returns true on success
- bool GenerateControlBarrierBuiltin(const sem::Builtin* builtin);
- /// Generates an atomic builtin call.
- /// @param call the call expression
- /// @param builtin the semantic information for the atomic builtin call
- /// @param result_type result type operand of the texture instruction
- /// @param result_id result identifier operand of the texture instruction
- /// @returns true on success
- bool GenerateAtomicBuiltin(const sem::Call* call,
- const sem::Builtin* builtin,
- Operand result_type,
- Operand result_id);
- /// Generates a sampled image
- /// @param texture_type the texture type
- /// @param texture_operand the texture operand
- /// @param sampler_operand the sampler operand
- /// @returns the expression ID
- uint32_t GenerateSampledImage(const sem::Type* texture_type,
- Operand texture_operand,
- Operand sampler_operand);
- /// Generates a cast or object copy for the expression result,
- /// or return the ID generated the expression if it is already
- /// of the right type.
- /// @param to_type the type we're casting too
- /// @param from_expr the expression to cast
- /// @param is_global_init if this is a global initializer
- /// @returns the expression ID on success or 0 otherwise
- uint32_t GenerateCastOrCopyOrPassthrough(const sem::Type* to_type,
- const ast::Expression* from_expr,
- bool is_global_init);
- /// Generates a loop statement
- /// @param stmt the statement to generate
- /// @returns true on successful generation
- bool GenerateLoopStatement(const ast::LoopStatement* stmt);
- /// Generates a return statement
- /// @param stmt the statement to generate
- /// @returns true on success, false otherwise
- bool GenerateReturnStatement(const ast::ReturnStatement* stmt);
- /// Generates a switch statement
- /// @param stmt the statement to generate
- /// @returns ture on success, false otherwise
- bool GenerateSwitchStatement(const ast::SwitchStatement* stmt);
- /// Generates a conditional section merge block
- /// @param cond the condition
- /// @param true_body the statements making up the true block
- /// @param cur_else_idx the index of the current else statement to process
- /// @param else_stmts the list of all else statements
- /// @returns true on success, false on failure
- bool GenerateConditionalBlock(const ast::Expression* cond,
- const ast::BlockStatement* true_body,
- size_t cur_else_idx,
- const ast::ElseStatementList& else_stmts);
- /// Generates a statement
- /// @param stmt the statement to generate
- /// @returns true if the statement was generated
- bool GenerateStatement(const ast::Statement* stmt);
- /// Generates an expression. If the WGSL expression does not have reference
- /// type, then return the SPIR-V ID for the expression. Otherwise implement
- /// the WGSL Load Rule: generate an OpLoad and return the ID of the result.
- /// Returns 0 if the expression could not be generated.
- /// @param expr the semantic expression node to be generated
- /// @returns the the ID of the expression, or loaded expression
- uint32_t GenerateExpressionWithLoadIfNeeded(const sem::Expression* expr);
- /// Generates an expression. If the WGSL expression does not have reference
- /// type, then return the SPIR-V ID for the expression. Otherwise implement
- /// the WGSL Load Rule: generate an OpLoad and return the ID of the result.
- /// Returns 0 if the expression could not be generated.
- /// @param expr the AST expression to be generated
- /// @returns the the ID of the expression, or loaded expression
- uint32_t GenerateExpressionWithLoadIfNeeded(const ast::Expression* expr);
- /// Generates an OpLoad on the given ID if it has reference type in WGSL,
- /// othewrise return the ID itself.
- /// @param type the type of the expression
- /// @param id the SPIR-V id of the experssion
- /// @returns the ID of the loaded value or `id` if type is not a reference
- uint32_t GenerateLoadIfNeeded(const sem::Type* type, uint32_t id);
- /// Generates an OpStore. Emits an error and returns false if we're
- /// currently outside a function.
- /// @param to the ID to store too
- /// @param from the ID to store from
- /// @returns true on success
- bool GenerateStore(uint32_t to, uint32_t from);
- /// Generates a type if not already created
- /// @param type the type to create
- /// @returns the ID to use for the given type. Returns 0 on unknown type.
- uint32_t GenerateTypeIfNeeded(const sem::Type* type);
- /// Generates a texture type declaration
- /// @param texture the texture to generate
- /// @param result the result operand
- /// @returns true if the texture was successfully generated
- bool GenerateTextureType(const sem::Texture* texture, const Operand& result);
- /// Generates an array type declaration
- /// @param ary the array to generate
- /// @param result the result operand
- /// @returns true if the array was successfully generated
- bool GenerateArrayType(const sem::Array* ary, const Operand& result);
- /// Generates a matrix type declaration
- /// @param mat the matrix to generate
- /// @param result the result operand
- /// @returns true if the matrix was successfully generated
- bool GenerateMatrixType(const sem::Matrix* mat, const Operand& result);
- /// Generates a pointer type declaration
- /// @param ptr the pointer type to generate
- /// @param result the result operand
- /// @returns true if the pointer was successfully generated
- bool GeneratePointerType(const sem::Pointer* ptr, const Operand& result);
- /// Generates a reference type declaration
- /// @param ref the reference type to generate
- /// @param result the result operand
- /// @returns true if the reference was successfully generated
- bool GenerateReferenceType(const sem::Reference* ref, const Operand& result);
- /// Generates a vector type declaration
- /// @param struct_type the vector to generate
- /// @param result the result operand
- /// @returns true if the vector was successfully generated
- bool GenerateStructType(const sem::Struct* struct_type,
- const Operand& result);
- /// Generates a struct member
- /// @param struct_id the id of the parent structure
- /// @param idx the index of the member
- /// @param member the member to generate
- /// @returns the id of the struct member or 0 on error.
- uint32_t GenerateStructMember(uint32_t struct_id,
- uint32_t idx,
- const sem::StructMember* member);
- /// Generates a variable declaration statement
- /// @param stmt the statement to generate
- /// @returns true on successfull generation
- bool GenerateVariableDeclStatement(const ast::VariableDeclStatement* stmt);
- /// Generates a vector type declaration
- /// @param vec the vector to generate
- /// @param result the result operand
- /// @returns true if the vector was successfully generated
- bool GenerateVectorType(const sem::Vector* vec, const Operand& result);
-
- /// Generates instructions to splat `scalar_id` into a vector of type
- /// `vec_type`
- /// @param scalar_id scalar to splat
- /// @param vec_type type of vector
- /// @returns id of the new vector
- uint32_t GenerateSplat(uint32_t scalar_id, const sem::Type* vec_type);
-
- /// Generates instructions to add or subtract two matrices
- /// @param lhs_id id of multiplicand
- /// @param rhs_id id of multiplier
- /// @param type type of both matrices and of result
- /// @param op one of `spv::Op::OpFAdd` or `spv::Op::OpFSub`
- /// @returns id of the result matrix
- uint32_t GenerateMatrixAddOrSub(uint32_t lhs_id,
- uint32_t rhs_id,
- const sem::Matrix* type,
- spv::Op op);
-
- /// Converts TexelFormat to SPIR-V and pushes an appropriate capability.
- /// @param format AST image format type
- /// @returns SPIR-V image format type
- SpvImageFormat convert_texel_format_to_spv(const ast::TexelFormat format);
-
- /// Determines if the given type constructor is created from constant values
- /// @param expr the expression to check
- /// @returns true if the constructor is constant
- bool IsConstructorConst(const ast::Expression* expr);
-
- private:
- /// @returns an Operand with a new result ID in it. Increments the next_id_
- /// automatically.
- Operand result_op();
-
- /// @returns the resolved type of the ast::Expression `expr`
- /// @param expr the expression
- const sem::Type* TypeOf(const ast::Expression* expr) const {
- return builder_.TypeOf(expr);
- }
-
- /// Generates a scalar constant if needed
- /// @param constant the constant to generate.
- /// @returns the ID on success or 0 on failure
- uint32_t GenerateConstantIfNeeded(const ScalarConstant& constant);
-
- /// Generates a constant-null of the given type, if needed
- /// @param type the type of the constant null to generate.
- /// @returns the ID on success or 0 on failure
- uint32_t GenerateConstantNullIfNeeded(const sem::Type* type);
-
- /// Generates a vector constant splat if needed
- /// @param type the type of the vector to generate
- /// @param value_id the ID of the scalar value to splat
- /// @returns the ID on success or 0 on failure
- uint32_t GenerateConstantVectorSplatIfNeeded(const sem::Vector* type,
- uint32_t value_id);
-
- ProgramBuilder builder_;
- std::string error_;
- uint32_t next_id_ = 1;
- uint32_t current_label_id_ = 0;
- InstructionList capabilities_;
- InstructionList extensions_;
- InstructionList ext_imports_;
- InstructionList memory_model_;
- InstructionList entry_points_;
- InstructionList execution_modes_;
- InstructionList debug_;
- InstructionList types_;
- InstructionList annotations_;
- std::vector<Function> functions_;
-
- std::unordered_map<std::string, uint32_t> import_name_to_id_;
- std::unordered_map<Symbol, uint32_t> func_symbol_to_id_;
- std::unordered_map<sem::CallTargetSignature, uint32_t> func_sig_to_id_;
- std::unordered_map<const sem::Type*, uint32_t> type_to_id_;
- std::unordered_map<ScalarConstant, uint32_t> const_to_id_;
- std::unordered_map<std::string, uint32_t> type_constructor_to_id_;
- std::unordered_map<const sem::Type*, uint32_t> const_null_to_id_;
- std::unordered_map<uint64_t, uint32_t> const_splat_to_id_;
- std::unordered_map<const sem::Type*, uint32_t>
- texture_type_to_sampled_image_type_id_;
- ScopeStack<uint32_t> scope_stack_;
- std::unordered_map<uint32_t, const ast::Variable*> spirv_id_to_variable_;
- std::vector<uint32_t> merge_stack_;
- std::vector<uint32_t> continue_stack_;
- std::unordered_set<uint32_t> capability_set_;
- bool has_overridable_workgroup_size_ = false;
- bool zero_initialize_workgroup_memory_ = false;
-
- struct ContinuingInfo {
- ContinuingInfo(const ast::Statement* last_statement,
- uint32_t loop_header_id,
- uint32_t break_target_id);
- // The last statement in the continiung block.
- const ast::Statement* const last_statement = nullptr;
- // The ID of the loop header
- const uint32_t loop_header_id = 0u;
- // The ID of the merge block for the loop.
- const uint32_t break_target_id = 0u;
- };
- // Stack of nodes, where each is the last statement in a surrounding
- // continuing block.
- std::vector<ContinuingInfo> continuing_stack_;
-
- // The instruction to emit as the backedge of a loop.
- struct Backedge {
- Backedge(spv::Op, OperandList);
- Backedge(const Backedge&);
- Backedge& operator=(const Backedge&);
- ~Backedge();
-
- spv::Op opcode;
- OperandList operands;
- };
- std::vector<Backedge> backedge_stack_;
+
+ /// Iterates over all the instructions in the correct order and calls the
+ /// given callback
+ /// @param cb the callback to execute
+ void iterate(std::function<void(const Instruction&)> cb) const;
+
+ /// Adds an instruction to the list of capabilities, if the capability
+ /// hasn't already been added.
+ /// @param cap the capability to set
+ void push_capability(uint32_t cap);
+ /// @returns the capabilities
+ const InstructionList& capabilities() const { return capabilities_; }
+ /// Adds an instruction to the extensions
+ /// @param extension the name of the extension
+ void push_extension(const char* extension);
+ /// @returns the extensions
+ const InstructionList& extensions() const { return extensions_; }
+ /// Adds an instruction to the ext import
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_ext_import(spv::Op op, const OperandList& operands) {
+ ext_imports_.push_back(Instruction{op, operands});
+ }
+ /// @returns the ext imports
+ const InstructionList& ext_imports() const { return ext_imports_; }
+ /// Adds an instruction to the memory model
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_memory_model(spv::Op op, const OperandList& operands) {
+ memory_model_.push_back(Instruction{op, operands});
+ }
+ /// @returns the memory model
+ const InstructionList& memory_model() const { return memory_model_; }
+ /// Adds an instruction to the entry points
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_entry_point(spv::Op op, const OperandList& operands) {
+ entry_points_.push_back(Instruction{op, operands});
+ }
+ /// @returns the entry points
+ const InstructionList& entry_points() const { return entry_points_; }
+ /// Adds an instruction to the execution modes
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_execution_mode(spv::Op op, const OperandList& operands) {
+ execution_modes_.push_back(Instruction{op, operands});
+ }
+ /// @returns the execution modes
+ const InstructionList& execution_modes() const { return execution_modes_; }
+ /// Adds an instruction to the debug
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_debug(spv::Op op, const OperandList& operands) {
+ debug_.push_back(Instruction{op, operands});
+ }
+ /// @returns the debug instructions
+ const InstructionList& debug() const { return debug_; }
+ /// Adds an instruction to the types
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_type(spv::Op op, const OperandList& operands) {
+ types_.push_back(Instruction{op, operands});
+ }
+ /// @returns the type instructions
+ const InstructionList& types() const { return types_; }
+ /// Adds an instruction to the annotations
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_annot(spv::Op op, const OperandList& operands) {
+ annotations_.push_back(Instruction{op, operands});
+ }
+ /// @returns the annotations
+ const InstructionList& annots() const { return annotations_; }
+
+ /// Adds a function to the builder
+ /// @param func the function to add
+ void push_function(const Function& func) {
+ functions_.push_back(func);
+ current_label_id_ = func.label_id();
+ }
+ /// @returns the functions
+ const std::vector<Function>& functions() const { return functions_; }
+ /// Pushes an instruction to the current function. If we're outside
+ /// a function then issue an internal error and return false.
+ /// @param op the operation
+ /// @param operands the operands
+ /// @returns true if we succeeded
+ bool push_function_inst(spv::Op op, const OperandList& operands);
+ /// Pushes a variable to the current function
+ /// @param operands the variable operands
+ void push_function_var(const OperandList& operands) {
+ if (functions_.empty()) {
+ TINT_ICE(Writer, builder_.Diagnostics())
+ << "push_function_var() called without a function";
+ }
+ functions_.back().push_var(operands);
+ }
+
+ /// @returns true if the current instruction insertion point is
+ /// inside a basic block.
+ bool InsideBasicBlock() const;
+
+ /// Converts a storage class to a SPIR-V storage class.
+ /// @param klass the storage class to convert
+ /// @returns the SPIR-V storage class or SpvStorageClassMax on error.
+ SpvStorageClass ConvertStorageClass(ast::StorageClass klass) const;
+ /// Converts a builtin to a SPIR-V builtin and pushes a capability if needed.
+ /// @param builtin the builtin to convert
+ /// @param storage the storage class that this builtin is being used with
+ /// @returns the SPIR-V builtin or SpvBuiltInMax on error.
+ SpvBuiltIn ConvertBuiltin(ast::Builtin builtin, ast::StorageClass storage);
+
+ /// Converts an interpolate attribute to SPIR-V decorations and pushes a
+ /// capability if needed.
+ /// @param id the id to decorate
+ /// @param type the interpolation type
+ /// @param sampling the interpolation sampling
+ void AddInterpolationDecorations(uint32_t id,
+ ast::InterpolationType type,
+ ast::InterpolationSampling sampling);
+
+ /// Generates the enabling of an extension. Emits an error and returns false if the extension is
+ /// not supported.
+ /// @param ext the extension to generate
+ /// @returns true on success.
+ bool GenerateExtension(ast::Extension ext);
+ /// Generates a label for the given id. Emits an error and returns false if
+ /// we're currently outside a function.
+ /// @param id the id to use for the label
+ /// @returns true on success.
+ bool GenerateLabel(uint32_t id);
+ /// Generates an assignment statement
+ /// @param assign the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateAssignStatement(const ast::AssignmentStatement* assign);
+ /// Generates a block statement, wrapped in a push/pop scope
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateBlockStatement(const ast::BlockStatement* stmt);
+ /// Generates a block statement
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateBlockStatementWithoutScoping(const ast::BlockStatement* stmt);
+ /// Generates a break statement
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateBreakStatement(const ast::BreakStatement* stmt);
+ /// Generates a continue statement
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateContinueStatement(const ast::ContinueStatement* stmt);
+ /// Generates a discard statement
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was successfully generated
+ bool GenerateDiscardStatement(const ast::DiscardStatement* stmt);
+ /// Generates an entry point instruction
+ /// @param func the function
+ /// @param id the id of the function
+ /// @returns true if the instruction was generated, false otherwise
+ bool GenerateEntryPoint(const ast::Function* func, uint32_t id);
+ /// Generates execution modes for an entry point
+ /// @param func the function
+ /// @param id the id of the function
+ /// @returns false on failure
+ bool GenerateExecutionModes(const ast::Function* func, uint32_t id);
+ /// Generates an expression
+ /// @param expr the expression to generate
+ /// @returns the resulting ID of the expression or 0 on error
+ uint32_t GenerateExpression(const ast::Expression* expr);
+ /// Generates the instructions for a function
+ /// @param func the function to generate
+ /// @returns true if the instructions were generated
+ bool GenerateFunction(const ast::Function* func);
+ /// Generates a function type if not already created
+ /// @param func the function to generate for
+ /// @returns the ID to use for the function type. Returns 0 on failure.
+ uint32_t GenerateFunctionTypeIfNeeded(const sem::Function* func);
+ /// Generates access control annotations if needed
+ /// @param type the type to generate for
+ /// @param struct_id the struct id
+ /// @param member_idx the member index
+ void GenerateMemberAccessIfNeeded(const sem::Type* type,
+ uint32_t struct_id,
+ uint32_t member_idx);
+ /// Generates a function variable
+ /// @param var the variable
+ /// @returns true if the variable was generated
+ bool GenerateFunctionVariable(const ast::Variable* var);
+ /// Generates a global variable
+ /// @param var the variable to generate
+ /// @returns true if the variable is emited.
+ bool GenerateGlobalVariable(const ast::Variable* var);
+ /// Generates an index accessor expression.
+ ///
+ /// For more information on accessors see the "Pointer evaluation" section of
+ /// the WGSL specification.
+ ///
+ /// @param expr the expresssion to generate
+ /// @returns the id of the expression or 0 on failure
+ uint32_t GenerateAccessorExpression(const ast::Expression* expr);
+ /// Generates an index accessor
+ /// @param expr the accessor to generate
+ /// @param info the current accessor information
+ /// @returns true if the accessor was generated successfully
+ bool GenerateIndexAccessor(const ast::IndexAccessorExpression* expr, AccessorInfo* info);
+ /// Generates a member accessor
+ /// @param expr the accessor to generate
+ /// @param info the current accessor information
+ /// @returns true if the accessor was generated successfully
+ bool GenerateMemberAccessor(const ast::MemberAccessorExpression* expr, AccessorInfo* info);
+ /// Generates an identifier expression
+ /// @param expr the expresssion to generate
+ /// @returns the id of the expression or 0 on failure
+ uint32_t GenerateIdentifierExpression(const ast::IdentifierExpression* expr);
+ /// Generates a unary op expression
+ /// @param expr the expression to generate
+ /// @returns the id of the expression or 0 on failure
+ uint32_t GenerateUnaryOpExpression(const ast::UnaryOpExpression* expr);
+ /// Generates an if statement
+ /// @param stmt the statement to generate
+ /// @returns true on success
+ bool GenerateIfStatement(const ast::IfStatement* stmt);
+ /// Generates an import instruction for the "GLSL.std.450" extended
+ /// instruction set, if one doesn't exist yet, and returns the import ID.
+ /// @returns the import ID, or 0 on error.
+ uint32_t GetGLSLstd450Import();
+ /// Generates a constructor expression
+ /// @param var the variable generated for, nullptr if no variable associated.
+ /// @param expr the expression to generate
+ /// @returns the ID of the expression or 0 on failure.
+ uint32_t GenerateConstructorExpression(const ast::Variable* var, const ast::Expression* expr);
+ /// Generates a literal constant if needed
+ /// @param var the variable generated for, nullptr if no variable associated.
+ /// @param lit the literal to generate
+ /// @returns the ID on success or 0 on failure
+ uint32_t GenerateLiteralIfNeeded(const ast::Variable* var, const ast::LiteralExpression* lit);
+ /// Generates a binary expression
+ /// @param expr the expression to generate
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateBinaryExpression(const ast::BinaryExpression* expr);
+ /// Generates a bitcast expression
+ /// @param expr the expression to generate
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateBitcastExpression(const ast::BitcastExpression* expr);
+ /// Generates a short circuting binary expression
+ /// @param expr the expression to generate
+ /// @returns teh expression ID on success or 0 otherwise
+ uint32_t GenerateShortCircuitBinaryExpression(const ast::BinaryExpression* expr);
+ /// Generates a call expression
+ /// @param expr the expression to generate
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateCallExpression(const ast::CallExpression* expr);
+ /// Handles generating a function call expression
+ /// @param call the call expression
+ /// @param function the function being called
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateFunctionCall(const sem::Call* call, const sem::Function* function);
+ /// Handles generating a builtin call expression
+ /// @param call the call expression
+ /// @param builtin the builtin being called
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateBuiltinCall(const sem::Call* call, const sem::Builtin* builtin);
+ /// Handles generating a type constructor or type conversion expression
+ /// @param call the call expression
+ /// @param var the variable that is being initialized. May be null.
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateTypeConstructorOrConversion(const sem::Call* call, const ast::Variable* var);
+ /// Generates a texture builtin call. Emits an error and returns false if
+ /// we're currently outside a function.
+ /// @param call the call expression
+ /// @param builtin the semantic information for the texture builtin
+ /// @param result_type result type operand of the texture instruction
+ /// @param result_id result identifier operand of the texture instruction
+ /// parameters
+ /// @returns true on success
+ bool GenerateTextureBuiltin(const sem::Call* call,
+ const sem::Builtin* builtin,
+ spirv::Operand result_type,
+ spirv::Operand result_id);
+ /// Generates a control barrier statement.
+ /// @param builtin the semantic information for the barrier builtin call
+ /// @returns true on success
+ bool GenerateControlBarrierBuiltin(const sem::Builtin* builtin);
+ /// Generates an atomic builtin call.
+ /// @param call the call expression
+ /// @param builtin the semantic information for the atomic builtin call
+ /// @param result_type result type operand of the texture instruction
+ /// @param result_id result identifier operand of the texture instruction
+ /// @returns true on success
+ bool GenerateAtomicBuiltin(const sem::Call* call,
+ const sem::Builtin* builtin,
+ Operand result_type,
+ Operand result_id);
+ /// Generates a sampled image
+ /// @param texture_type the texture type
+ /// @param texture_operand the texture operand
+ /// @param sampler_operand the sampler operand
+ /// @returns the expression ID
+ uint32_t GenerateSampledImage(const sem::Type* texture_type,
+ Operand texture_operand,
+ Operand sampler_operand);
+ /// Generates a cast or object copy for the expression result,
+ /// or return the ID generated the expression if it is already
+ /// of the right type.
+ /// @param to_type the type we're casting too
+ /// @param from_expr the expression to cast
+ /// @param is_global_init if this is a global initializer
+ /// @returns the expression ID on success or 0 otherwise
+ uint32_t GenerateCastOrCopyOrPassthrough(const sem::Type* to_type,
+ const ast::Expression* from_expr,
+ bool is_global_init);
+ /// Generates a loop statement
+ /// @param stmt the statement to generate
+ /// @returns true on successful generation
+ bool GenerateLoopStatement(const ast::LoopStatement* stmt);
+ /// Generates a return statement
+ /// @param stmt the statement to generate
+ /// @returns true on success, false otherwise
+ bool GenerateReturnStatement(const ast::ReturnStatement* stmt);
+ /// Generates a switch statement
+ /// @param stmt the statement to generate
+ /// @returns ture on success, false otherwise
+ bool GenerateSwitchStatement(const ast::SwitchStatement* stmt);
+ /// Generates a conditional section merge block
+ /// @param cond the condition
+ /// @param true_body the statements making up the true block
+ /// @param else_stmt the statement for the else block
+ /// @returns true on success, false on failure
+ bool GenerateConditionalBlock(const ast::Expression* cond,
+ const ast::BlockStatement* true_body,
+ const ast::Statement* else_stmt);
+ /// Generates a statement
+ /// @param stmt the statement to generate
+ /// @returns true if the statement was generated
+ bool GenerateStatement(const ast::Statement* stmt);
+ /// Generates an expression. If the WGSL expression does not have reference
+ /// type, then return the SPIR-V ID for the expression. Otherwise implement
+ /// the WGSL Load Rule: generate an OpLoad and return the ID of the result.
+ /// Returns 0 if the expression could not be generated.
+ /// @param expr the semantic expression node to be generated
+ /// @returns the the ID of the expression, or loaded expression
+ uint32_t GenerateExpressionWithLoadIfNeeded(const sem::Expression* expr);
+ /// Generates an expression. If the WGSL expression does not have reference
+ /// type, then return the SPIR-V ID for the expression. Otherwise implement
+ /// the WGSL Load Rule: generate an OpLoad and return the ID of the result.
+ /// Returns 0 if the expression could not be generated.
+ /// @param expr the AST expression to be generated
+ /// @returns the the ID of the expression, or loaded expression
+ uint32_t GenerateExpressionWithLoadIfNeeded(const ast::Expression* expr);
+ /// Generates an OpLoad on the given ID if it has reference type in WGSL,
+ /// othewrise return the ID itself.
+ /// @param type the type of the expression
+ /// @param id the SPIR-V id of the experssion
+ /// @returns the ID of the loaded value or `id` if type is not a reference
+ uint32_t GenerateLoadIfNeeded(const sem::Type* type, uint32_t id);
+ /// Generates an OpStore. Emits an error and returns false if we're
+ /// currently outside a function.
+ /// @param to the ID to store too
+ /// @param from the ID to store from
+ /// @returns true on success
+ bool GenerateStore(uint32_t to, uint32_t from);
+ /// Generates a type if not already created
+ /// @param type the type to create
+ /// @returns the ID to use for the given type. Returns 0 on unknown type.
+ uint32_t GenerateTypeIfNeeded(const sem::Type* type);
+ /// Generates a texture type declaration
+ /// @param texture the texture to generate
+ /// @param result the result operand
+ /// @returns true if the texture was successfully generated
+ bool GenerateTextureType(const sem::Texture* texture, const Operand& result);
+ /// Generates an array type declaration
+ /// @param ary the array to generate
+ /// @param result the result operand
+ /// @returns true if the array was successfully generated
+ bool GenerateArrayType(const sem::Array* ary, const Operand& result);
+ /// Generates a matrix type declaration
+ /// @param mat the matrix to generate
+ /// @param result the result operand
+ /// @returns true if the matrix was successfully generated
+ bool GenerateMatrixType(const sem::Matrix* mat, const Operand& result);
+ /// Generates a pointer type declaration
+ /// @param ptr the pointer type to generate
+ /// @param result the result operand
+ /// @returns true if the pointer was successfully generated
+ bool GeneratePointerType(const sem::Pointer* ptr, const Operand& result);
+ /// Generates a reference type declaration
+ /// @param ref the reference type to generate
+ /// @param result the result operand
+ /// @returns true if the reference was successfully generated
+ bool GenerateReferenceType(const sem::Reference* ref, const Operand& result);
+ /// Generates a vector type declaration
+ /// @param struct_type the vector to generate
+ /// @param result the result operand
+ /// @returns true if the vector was successfully generated
+ bool GenerateStructType(const sem::Struct* struct_type, const Operand& result);
+ /// Generates a struct member
+ /// @param struct_id the id of the parent structure
+ /// @param idx the index of the member
+ /// @param member the member to generate
+ /// @returns the id of the struct member or 0 on error.
+ uint32_t GenerateStructMember(uint32_t struct_id,
+ uint32_t idx,
+ const sem::StructMember* member);
+ /// Generates a variable declaration statement
+ /// @param stmt the statement to generate
+ /// @returns true on successfull generation
+ bool GenerateVariableDeclStatement(const ast::VariableDeclStatement* stmt);
+ /// Generates a vector type declaration
+ /// @param vec the vector to generate
+ /// @param result the result operand
+ /// @returns true if the vector was successfully generated
+ bool GenerateVectorType(const sem::Vector* vec, const Operand& result);
+
+ /// Generates instructions to splat `scalar_id` into a vector of type
+ /// `vec_type`
+ /// @param scalar_id scalar to splat
+ /// @param vec_type type of vector
+ /// @returns id of the new vector
+ uint32_t GenerateSplat(uint32_t scalar_id, const sem::Type* vec_type);
+
+ /// Generates instructions to add or subtract two matrices
+ /// @param lhs_id id of multiplicand
+ /// @param rhs_id id of multiplier
+ /// @param type type of both matrices and of result
+ /// @param op one of `spv::Op::OpFAdd` or `spv::Op::OpFSub`
+ /// @returns id of the result matrix
+ uint32_t GenerateMatrixAddOrSub(uint32_t lhs_id,
+ uint32_t rhs_id,
+ const sem::Matrix* type,
+ spv::Op op);
+
+ /// Converts TexelFormat to SPIR-V and pushes an appropriate capability.
+ /// @param format AST image format type
+ /// @returns SPIR-V image format type
+ SpvImageFormat convert_texel_format_to_spv(const ast::TexelFormat format);
+
+ /// Determines if the given type constructor is created from constant values
+ /// @param expr the expression to check
+ /// @returns true if the constructor is constant
+ bool IsConstructorConst(const ast::Expression* expr);
+
+ private:
+ /// @returns an Operand with a new result ID in it. Increments the next_id_
+ /// automatically.
+ Operand result_op();
+
+ /// @returns the resolved type of the ast::Expression `expr`
+ /// @param expr the expression
+ const sem::Type* TypeOf(const ast::Expression* expr) const { return builder_.TypeOf(expr); }
+
+ /// Generates a constant value if needed
+ /// @param constant the constant to generate.
+ /// @returns the ID on success or 0 on failure
+ uint32_t GenerateConstantIfNeeded(const sem::Constant& constant);
+
+ /// Generates a scalar constant if needed
+ /// @param constant the constant to generate.
+ /// @returns the ID on success or 0 on failure
+ uint32_t GenerateConstantIfNeeded(const ScalarConstant& constant);
+
+ /// Generates a constant-null of the given type, if needed
+ /// @param type the type of the constant null to generate.
+ /// @returns the ID on success or 0 on failure
+ uint32_t GenerateConstantNullIfNeeded(const sem::Type* type);
+
+ /// Generates a vector constant splat if needed
+ /// @param type the type of the vector to generate
+ /// @param value_id the ID of the scalar value to splat
+ /// @returns the ID on success or 0 on failure
+ uint32_t GenerateConstantVectorSplatIfNeeded(const sem::Vector* type, uint32_t value_id);
+
+ /// Registers the semantic variable to the given SPIR-V ID
+ /// @param var the semantic variable
+ /// @param id the generated SPIR-V identifier for the variable
+ void RegisterVariable(const sem::Variable* var, uint32_t id);
+
+ /// Looks up the SPIR-V ID for the variable, which must have been registered
+ /// with a call to RegisterVariable()
+ /// @returns the SPIR-V ID, or 0 if the variable was not found
+ uint32_t LookupVariableID(const sem::Variable* var);
+
+ /// Pushes a new scope
+ void PushScope();
+
+ /// Pops the top-most scope
+ void PopScope();
+
+ ProgramBuilder builder_;
+ std::string error_;
+ uint32_t next_id_ = 1;
+ uint32_t current_label_id_ = 0;
+ InstructionList capabilities_;
+ InstructionList extensions_;
+ InstructionList ext_imports_;
+ InstructionList memory_model_;
+ InstructionList entry_points_;
+ InstructionList execution_modes_;
+ InstructionList debug_;
+ InstructionList types_;
+ InstructionList annotations_;
+ std::vector<Function> functions_;
+
+ // Scope holds per-block information
+ struct Scope {
+ Scope();
+ Scope(const Scope&);
+ ~Scope();
+ std::unordered_map<OperandListKey, uint32_t> type_ctor_to_id_;
+ };
+
+ std::unordered_map<const sem::Variable*, uint32_t> var_to_id_;
+ std::unordered_map<uint32_t, const sem::Variable*> id_to_var_;
+ std::unordered_map<std::string, uint32_t> import_name_to_id_;
+ std::unordered_map<Symbol, uint32_t> func_symbol_to_id_;
+ std::unordered_map<sem::CallTargetSignature, uint32_t> func_sig_to_id_;
+ std::unordered_map<const sem::Type*, uint32_t> type_to_id_;
+ std::unordered_map<ScalarConstant, uint32_t> const_to_id_;
+ std::unordered_map<const sem::Type*, uint32_t> const_null_to_id_;
+ std::unordered_map<uint64_t, uint32_t> const_splat_to_id_;
+ std::unordered_map<const sem::Type*, uint32_t> texture_type_to_sampled_image_type_id_;
+ std::vector<Scope> scope_stack_;
+ std::vector<uint32_t> merge_stack_;
+ std::vector<uint32_t> continue_stack_;
+ std::unordered_set<uint32_t> capability_set_;
+ bool has_overridable_workgroup_size_ = false;
+ bool zero_initialize_workgroup_memory_ = false;
+
+ struct ContinuingInfo {
+ ContinuingInfo(const ast::Statement* last_statement,
+ uint32_t loop_header_id,
+ uint32_t break_target_id);
+ // The last statement in the continiung block.
+ const ast::Statement* const last_statement = nullptr;
+ // The ID of the loop header
+ const uint32_t loop_header_id = 0u;
+ // The ID of the merge block for the loop.
+ const uint32_t break_target_id = 0u;
+ };
+ // Stack of nodes, where each is the last statement in a surrounding
+ // continuing block.
+ std::vector<ContinuingInfo> continuing_stack_;
+
+ // The instruction to emit as the backedge of a loop.
+ struct Backedge {
+ Backedge(spv::Op, OperandList);
+ Backedge(const Backedge&);
+ Backedge& operator=(const Backedge&);
+ ~Backedge();
+
+ spv::Op opcode;
+ OperandList operands;
+ };
+ std::vector<Backedge> backedge_stack_;
};
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_accessor_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_accessor_expression_test.cc
index 285d2594eea..2df1b6585ff 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_accessor_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_accessor_expression_test.cc
@@ -15,31 +15,33 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, IndexAccessor_VectorRef_Literal) {
- // var ary : vec3<f32>;
- // ary[1] -> ref<f32>
+ // var ary : vec3<f32>;
+ // ary[1] -> ref<f32>
- auto* var = Var("ary", ty.vec3<f32>());
+ auto* var = Var("ary", ty.vec3<f32>());
- auto* ary = Expr("ary");
- auto* idx_expr = Expr(1);
+ auto* ary = Expr("ary");
+ auto* idx_expr = Expr(1_i);
- auto* expr = IndexAccessor(ary, idx_expr);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor(ary, idx_expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
@@ -47,37 +49,37 @@ TEST_F(BuilderTest, IndexAccessor_VectorRef_Literal) {
%7 = OpConstant %6 1
%8 = OpTypePointer Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpAccessChain %8 %1 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpAccessChain %8 %1 %7
)");
}
TEST_F(BuilderTest, IndexAccessor_VectorRef_Dynamic) {
- // var ary : vec3<f32>;
- // var idx : i32;
- // ary[idx] -> ref<f32>
+ // var ary : vec3<f32>;
+ // var idx : i32;
+ // ary[idx] -> ref<f32>
- auto* var = Var("ary", ty.vec3<f32>());
- auto* idx = Var("idx", ty.i32());
+ auto* var = Var("ary", ty.vec3<f32>());
+ auto* idx = Var("idx", ty.i32());
- auto* ary = Expr("ary");
- auto* idx_expr = Expr("idx");
+ auto* ary = Expr("ary");
+ auto* idx_expr = Expr("idx");
- auto* expr = IndexAccessor(ary, idx_expr);
- WrapInFunction(var, idx, expr);
+ auto* expr = IndexAccessor(ary, idx_expr);
+ WrapInFunction(var, idx, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunctionVariable(idx)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunctionVariable(idx)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 12u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 12u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
@@ -86,35 +88,35 @@ TEST_F(BuilderTest, IndexAccessor_VectorRef_Dynamic) {
%9 = OpConstantNull %8
%11 = OpTypePointer Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
%6 = OpVariable %7 Function %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%10 = OpLoad %8 %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%10 = OpLoad %8 %6
%12 = OpAccessChain %11 %1 %10
)");
}
TEST_F(BuilderTest, IndexAccessor_VectorRef_Dynamic2) {
- // var ary : vec3<f32>;
- // ary[1 + 2] -> ref<f32>
+ // var ary : vec3<f32>;
+ // ary[1 + 2] -> ref<f32>
- auto* var = Var("ary", ty.vec3<f32>());
+ auto* var = Var("ary", ty.vec3<f32>());
- auto* ary = Expr("ary");
+ auto* ary = Expr("ary");
- auto* expr = IndexAccessor(ary, Add(1, 2));
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor(ary, Add(1_i, 2_i));
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
@@ -123,34 +125,34 @@ TEST_F(BuilderTest, IndexAccessor_VectorRef_Dynamic2) {
%8 = OpConstant %6 2
%10 = OpTypePointer Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpIAdd %6 %7 %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpIAdd %6 %7 %8
%11 = OpAccessChain %10 %1 %9
)");
}
TEST_F(BuilderTest, IndexAccessor_ArrayRef_MultiLevel) {
- auto* ary4 = ty.array(ty.vec3<f32>(), 4);
+ auto* ary4 = ty.array(ty.vec3<f32>(), 4_u);
- // var ary : array<vec3<f32>, 4>
- // ary[3][2];
+ // var ary : array<vec3<f32>, 4u>
+ // ary[3i][2i];
- auto* var = Var("ary", ary4);
+ auto* var = Var("ary", ary4);
- auto* expr = IndexAccessor(IndexAccessor("ary", 3), 2);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor(IndexAccessor("ary", 3_i), 2_i);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 13u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 13u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%6 = OpTypeInt 32 0
%7 = OpConstant %6 4
@@ -162,32 +164,32 @@ TEST_F(BuilderTest, IndexAccessor_ArrayRef_MultiLevel) {
%11 = OpConstant %9 2
%12 = OpTypePointer Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%13 = OpAccessChain %12 %1 %10 %11
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%13 = OpAccessChain %12 %1 %10 %11
)");
}
TEST_F(BuilderTest, IndexAccessor_ArrayRef_ArrayWithSwizzle) {
- auto* ary4 = ty.array(ty.vec3<f32>(), 4);
+ auto* ary4 = ty.array(ty.vec3<f32>(), 4_u);
- // var a : array<vec3<f32>, 4>;
- // a[2].xy;
+ // var a : array<vec3<f32>, 4u>;
+ // a[2i].xy;
- auto* var = Var("ary", ary4);
+ auto* var = Var("ary", ary4);
- auto* expr = MemberAccessor(IndexAccessor("ary", 2), "xy");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor(IndexAccessor("ary", 2_i), "xy");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 15u);
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 15u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%6 = OpTypeInt 32 0
%7 = OpConstant %6 4
@@ -199,42 +201,42 @@ TEST_F(BuilderTest, IndexAccessor_ArrayRef_ArrayWithSwizzle) {
%11 = OpTypePointer Function %4
%13 = OpTypeVector %5 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%12 = OpAccessChain %11 %1 %10
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%12 = OpAccessChain %11 %1 %10
%14 = OpLoad %4 %12
%15 = OpVectorShuffle %13 %14 %14 0 1
)");
}
TEST_F(BuilderTest, MemberAccessor) {
- // my_struct {
- // a : f32
- // b : f32
- // }
- // var ident : my_struct
- // ident.b
+ // my_struct {
+ // a : f32
+ // b : f32
+ // }
+ // var ident : my_struct
+ // ident.b
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Var("ident", ty.Of(s));
+ auto* var = Var("ident", ty.Of(s));
- auto* expr = MemberAccessor("ident", "b");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor("ident", "b");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeStruct %4 %4
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
@@ -242,44 +244,44 @@ TEST_F(BuilderTest, MemberAccessor) {
%7 = OpConstant %6 1
%8 = OpTypePointer Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpAccessChain %8 %1 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpAccessChain %8 %1 %7
)");
}
TEST_F(BuilderTest, MemberAccessor_Nested) {
- // inner_struct {
- // a : f32
- // b : f32
- // }
- // my_struct {
- // inner : inner_struct
- // }
- //
- // var ident : my_struct
- // ident.inner.a
- auto* inner_struct = Structure("Inner", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
-
- auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
-
- auto* var = Var("ident", ty.Of(s_type));
- auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "b");
- WrapInFunction(var, expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
-
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ // inner_struct {
+ // a : f32
+ // b : f32
+ // }
+ // my_struct {
+ // inner : inner_struct
+ // }
+ //
+ // var ident : my_struct
+ // ident.inner.a
+ auto* inner_struct = Structure("Inner", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
+
+ auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
+
+ auto* var = Var("ident", ty.Of(s_type));
+ auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "b");
+ WrapInFunction(var, expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeStruct %5 %5
%3 = OpTypeStruct %4
%2 = OpTypePointer Function %3
@@ -289,125 +291,124 @@ TEST_F(BuilderTest, MemberAccessor_Nested) {
%9 = OpConstant %7 1
%10 = OpTypePointer Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%11 = OpAccessChain %10 %1 %8 %9
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%11 = OpAccessChain %10 %1 %8 %9
)");
}
TEST_F(BuilderTest, MemberAccessor_NonPointer) {
- // my_struct {
- // a : f32
- // b : f32
- // }
- // let ident : my_struct = my_struct();
- // ident.b
+ // my_struct {
+ // a : f32
+ // b : f32
+ // }
+ // let ident : my_struct = my_struct();
+ // ident.b
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
- auto* var = Const("ident", ty.Of(s), Construct(ty.Of(s), 0.f, 0.f));
+ auto* var = Let("ident", ty.Of(s), Construct(ty.Of(s), 0_f, 0_f));
- auto* expr = MemberAccessor("ident", "b");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor("ident", "b");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 5u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 5u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeStruct %2 %2
-%3 = OpConstant %2 0
+%3 = OpConstantNull %2
%4 = OpConstantComposite %1 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%5 = OpCompositeExtract %2 %4 1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%5 = OpCompositeExtract %2 %4 1
)");
}
TEST_F(BuilderTest, MemberAccessor_Nested_NonPointer) {
- // inner_struct {
- // a : f32
- // b : f32
- // }
- // my_struct {
- // inner : inner_struct
- // }
- //
- // let ident : my_struct = my_struct();
- // ident.inner.a
- auto* inner_struct = Structure("Inner", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
-
- auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
-
- auto* var =
- Const("ident", ty.Of(s_type),
- Construct(ty.Of(s_type), Construct(ty.Of(inner_struct), 0.f, 0.f)));
- auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "b");
- WrapInFunction(var, expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
-
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ // inner_struct {
+ // a : f32
+ // b : f32
+ // }
+ // my_struct {
+ // inner : inner_struct
+ // }
+ //
+ // let ident : my_struct = my_struct();
+ // ident.inner.a
+ auto* inner_struct = Structure("Inner", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
+
+ auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
+
+ auto* var = Let("ident", ty.Of(s_type),
+ Construct(ty.Of(s_type), Construct(ty.Of(inner_struct), 0_f, 0_f)));
+ auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "b");
+ WrapInFunction(var, expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeStruct %3 %3
%1 = OpTypeStruct %2
-%4 = OpConstant %3 0
+%4 = OpConstantNull %3
%5 = OpConstantComposite %2 %4 %4
%6 = OpConstantComposite %1 %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%7 = OpCompositeExtract %2 %6 0
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%7 = OpCompositeExtract %2 %6 0
%8 = OpCompositeExtract %3 %7 1
)");
}
TEST_F(BuilderTest, MemberAccessor_Nested_WithAlias) {
- // struct Inner {
- // a : f32
- // b : f32
- // };
- // type Alias = Inner;
- // my_struct {
- // inner : Inner
- // }
- //
- // var ident : my_struct
- // ident.inner.a
- auto* inner_struct = Structure("Inner", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
-
- auto* alias = Alias("Alias", ty.Of(inner_struct));
- auto* s_type = Structure("Outer", {Member("inner", ty.Of(alias))});
-
- auto* var = Var("ident", ty.Of(s_type));
- auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "a");
- WrapInFunction(var, expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
-
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 10u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ // struct Inner {
+ // a : f32
+ // b : f32
+ // };
+ // type Alias = Inner;
+ // my_struct {
+ // inner : Inner
+ // }
+ //
+ // var ident : my_struct
+ // ident.inner.a
+ auto* inner_struct = Structure("Inner", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
+
+ auto* alias = Alias("Alias", ty.Of(inner_struct));
+ auto* s_type = Structure("Outer", {Member("inner", ty.Of(alias))});
+
+ auto* var = Var("ident", ty.Of(s_type));
+ auto* expr = MemberAccessor(MemberAccessor("ident", "inner"), "a");
+ WrapInFunction(var, expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 10u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeStruct %5 %5
%3 = OpTypeStruct %4
%2 = OpTypePointer Function %3
@@ -416,44 +417,43 @@ TEST_F(BuilderTest, MemberAccessor_Nested_WithAlias) {
%8 = OpConstant %7 0
%9 = OpTypePointer Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%10 = OpAccessChain %9 %1 %8 %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%10 = OpAccessChain %9 %1 %8 %8
)");
}
TEST_F(BuilderTest, MemberAccessor_Nested_Assignment_LHS) {
- // inner_struct {
- // a : f32
- // }
- // my_struct {
- // inner : inner_struct
- // }
- //
- // var ident : my_struct
- // ident.inner.a = 2.0f;
- auto* inner_struct = Structure("Inner", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
-
- auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
-
- auto* var = Var("ident", ty.Of(s_type));
- auto* expr =
- Assign(MemberAccessor(MemberAccessor("ident", "inner"), "a"), Expr(2.0f));
- WrapInFunction(var, expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
-
- EXPECT_TRUE(b.GenerateAssignStatement(expr)) << b.error();
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ // inner_struct {
+ // a : f32
+ // }
+ // my_struct {
+ // inner : inner_struct
+ // }
+ //
+ // var ident : my_struct
+ // ident.inner.a = 2.0f;
+ auto* inner_struct = Structure("Inner", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
+
+ auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
+
+ auto* var = Var("ident", ty.Of(s_type));
+ auto* expr = Assign(MemberAccessor(MemberAccessor("ident", "inner"), "a"), Expr(2_f));
+ WrapInFunction(var, expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+
+ EXPECT_TRUE(b.GenerateAssignStatement(expr)) << b.error();
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeStruct %5 %5
%3 = OpTypeStruct %4
%2 = OpTypePointer Function %3
@@ -463,49 +463,49 @@ TEST_F(BuilderTest, MemberAccessor_Nested_Assignment_LHS) {
%9 = OpTypePointer Function %5
%11 = OpConstant %5 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%10 = OpAccessChain %9 %1 %8 %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%10 = OpAccessChain %9 %1 %8 %8
OpStore %10 %11
)");
}
TEST_F(BuilderTest, MemberAccessor_Nested_Assignment_RHS) {
- // inner_struct {
- // a : f32
- // }
- // my_struct {
- // inner : inner_struct
- // }
- //
- // var ident : my_struct
- // var store : f32 = ident.inner.a
+ // inner_struct {
+ // a : f32
+ // }
+ // my_struct {
+ // inner : inner_struct
+ // }
+ //
+ // var ident : my_struct
+ // var store : f32 = ident.inner.a
- auto* inner_struct = Structure("Inner", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
+ auto* inner_struct = Structure("Inner", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
- auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
+ auto* s_type = Structure("my_struct", {Member("inner", ty.Of(inner_struct))});
- auto* var = Var("ident", ty.Of(s_type));
- auto* store = Var("store", ty.f32());
+ auto* var = Var("ident", ty.Of(s_type));
+ auto* store = Var("store", ty.f32());
- auto* rhs = MemberAccessor(MemberAccessor("ident", "inner"), "a");
- auto* expr = Assign("store", rhs);
- WrapInFunction(var, store, expr);
+ auto* rhs = MemberAccessor(MemberAccessor("ident", "inner"), "a");
+ auto* expr = Assign("store", rhs);
+ WrapInFunction(var, store, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunctionVariable(store)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunctionVariable(store)) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(expr)) << b.error();
+ EXPECT_TRUE(b.GenerateAssignStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeStruct %5 %5
%3 = OpTypeStruct %4
%2 = OpTypePointer Function %3
@@ -515,33 +515,33 @@ TEST_F(BuilderTest, MemberAccessor_Nested_Assignment_RHS) {
%10 = OpTypeInt 32 0
%11 = OpConstant %10 0
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %6
%7 = OpVariable %8 Function %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%12 = OpAccessChain %8 %1 %11 %11
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%12 = OpAccessChain %8 %1 %11 %11
%13 = OpLoad %5 %12
OpStore %7 %13
)");
}
TEST_F(BuilderTest, MemberAccessor_Swizzle_Single) {
- // ident.y
+ // ident.y
- auto* var = Var("ident", ty.vec3<f32>());
+ auto* var = Var("ident", ty.vec3<f32>());
- auto* expr = MemberAccessor("ident", "y");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor("ident", "y");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
@@ -549,176 +549,174 @@ TEST_F(BuilderTest, MemberAccessor_Swizzle_Single) {
%7 = OpConstant %6 1
%8 = OpTypePointer Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpAccessChain %8 %1 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpAccessChain %8 %1 %7
)");
}
TEST_F(BuilderTest, MemberAccessor_Swizzle_MultipleNames) {
- // ident.yx
+ // ident.yx
- auto* var = Var("ident", ty.vec3<f32>());
+ auto* var = Var("ident", ty.vec3<f32>());
- auto* expr = MemberAccessor("ident", "yx");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor("ident", "yx");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
%6 = OpTypeVector %4 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%7 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%7 = OpLoad %3 %1
%8 = OpVectorShuffle %6 %7 %7 1 0
)");
}
TEST_F(BuilderTest, MemberAccessor_Swizzle_of_Swizzle) {
- // ident.yxz.xz
+ // ident.yxz.xz
- auto* var = Var("ident", ty.vec3<f32>());
+ auto* var = Var("ident", ty.vec3<f32>());
- auto* expr = MemberAccessor(MemberAccessor("ident", "yxz"), "xz");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor(MemberAccessor("ident", "yxz"), "xz");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 9u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
%8 = OpTypeVector %4 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%7 = OpVectorShuffle %3 %6 %6 1 0 2
%9 = OpVectorShuffle %8 %7 %7 0 2
)");
}
TEST_F(BuilderTest, MemberAccessor_Member_of_Swizzle) {
- // ident.yxz.x
+ // ident.yxz.x
- auto* var = Var("ident", ty.vec3<f32>());
+ auto* var = Var("ident", ty.vec3<f32>());
- auto* expr = MemberAccessor(MemberAccessor("ident", "yxz"), "x");
- WrapInFunction(var, expr);
+ auto* expr = MemberAccessor(MemberAccessor("ident", "yxz"), "x");
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%7 = OpVectorShuffle %3 %6 %6 1 0 2
%8 = OpCompositeExtract %4 %7 0
)");
}
TEST_F(BuilderTest, MemberAccessor_Array_of_Swizzle) {
- // index.yxz[1]
+ // index.yxz[1i]
- auto* var = Var("ident", ty.vec3<f32>());
+ auto* var = Var("ident", ty.vec3<f32>());
- auto* expr = IndexAccessor(MemberAccessor("ident", "yxz"), 1);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor(MemberAccessor("ident", "yxz"), 1_i);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 10u);
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 10u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
%8 = OpTypeInt 32 1
%9 = OpConstant %8 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%7 = OpVectorShuffle %3 %6 %6 1 0 2
%10 = OpCompositeExtract %4 %7 1
)");
}
TEST_F(BuilderTest, IndexAccessor_Mixed_ArrayAndMember) {
- // type C = struct {
- // baz : vec3<f32>
- // }
- // type B = struct {
- // bar : C;
- // }
- // type A = struct {
- // foo : array<B, 3>
- // }
- // var index : array<A, 2>
- // index[0].foo[2].bar.baz.yx
-
- auto* c_type = Structure("C", {Member("baz", ty.vec3<f32>())});
-
- auto* b_type = Structure("B", {Member("bar", ty.Of(c_type))});
- auto* b_ary_type = ty.array(ty.Of(b_type), 3);
- auto* a_type = Structure("A", {Member("foo", b_ary_type)});
-
- auto* a_ary_type = ty.array(ty.Of(a_type), 2);
- auto* var = Var("index", a_ary_type);
- auto* expr = MemberAccessor(
- MemberAccessor(
- MemberAccessor(
- IndexAccessor(MemberAccessor(IndexAccessor("index", 0), "foo"),
- 2),
- "bar"),
- "baz"),
- "yx");
- WrapInFunction(var, expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
-
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 22u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeFloat 32
+ // type C = struct {
+ // baz : vec3<f32>
+ // }
+ // type B = struct {
+ // bar : C;
+ // }
+ // type A = struct {
+ // foo : array<B, 3>
+ // }
+ // var index : array<A, 2u>
+ // index[0i].foo[2i].bar.baz.yx
+
+ auto* c_type = Structure("C", {Member("baz", ty.vec3<f32>())});
+
+ auto* b_type = Structure("B", {Member("bar", ty.Of(c_type))});
+ auto* b_ary_type = ty.array(ty.Of(b_type), 3_u);
+ auto* a_type = Structure("A", {Member("foo", b_ary_type)});
+
+ auto* a_ary_type = ty.array(ty.Of(a_type), 2_u);
+ auto* var = Var("index", a_ary_type);
+ auto* expr = MemberAccessor(
+ MemberAccessor(
+ MemberAccessor(IndexAccessor(MemberAccessor(IndexAccessor("index", 0_i), "foo"), 2_i),
+ "bar"),
+ "baz"),
+ "yx");
+ WrapInFunction(var, expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 22u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeFloat 32
%8 = OpTypeVector %9 3
%7 = OpTypeStruct %8
%6 = OpTypeStruct %7
@@ -731,42 +729,41 @@ TEST_F(BuilderTest, IndexAccessor_Mixed_ArrayAndMember) {
%2 = OpTypePointer Function %3
%13 = OpConstantNull %3
%14 = OpTypeInt 32 1
-%15 = OpConstant %14 0
+%15 = OpConstantNull %14
%16 = OpConstant %10 0
%17 = OpConstant %14 2
%18 = OpTypePointer Function %8
%20 = OpTypeVector %9 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %13
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %13
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%19 = OpAccessChain %18 %1 %15 %16 %17 %16 %16
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%19 = OpAccessChain %18 %1 %15 %16 %17 %16 %16
%21 = OpLoad %8 %19
%22 = OpVectorShuffle %20 %21 %21 1 0
)");
}
TEST_F(BuilderTest, IndexAccessor_Of_Vec) {
- // let pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
- // vec2<f32>(0.0, 0.5),
- // vec2<f32>(-0.5, -0.5),
- // vec2<f32>(0.5, -0.5));
- // pos[1]
+ // let pos : array<vec2<f32>, 3u> = array<vec2<f32>, 3u>(
+ // vec2<f32>(0.0, 0.5),
+ // vec2<f32>(-0.5, -0.5),
+ // vec2<f32>(0.5, -0.5));
+ // pos[1u]
- auto* var =
- Const("pos", ty.array(ty.vec2<f32>(), 3),
- Construct(ty.array(ty.vec2<f32>(), 3), vec2<f32>(0.0f, 0.5f),
- vec2<f32>(-0.5f, -0.5f), vec2<f32>(0.5f, -0.5f)));
+ auto* var = Let("pos", ty.array(ty.vec2<f32>(), 3_u),
+ Construct(ty.array(ty.vec2<f32>(), 3_u), vec2<f32>(0_f, 0.5_f),
+ vec2<f32>(-0.5_f, -0.5_f), vec2<f32>(0.5_f, -0.5_f)));
- auto* expr = IndexAccessor("pos", 1u);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor("pos", 1_u);
+ WrapInFunction(var, expr);
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%7 = OpTypeFloat 32
%6 = OpTypeVector %7 2
@@ -782,35 +779,34 @@ TEST_F(BuilderTest, IndexAccessor_Of_Vec) {
%16 = OpConstantComposite %5 %12 %14 %15
%17 = OpConstant %8 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), R"()");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%18 = OpCompositeExtract %6 %16 1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%18 = OpCompositeExtract %6 %16 1
OpReturn
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, IndexAccessor_Of_Array_Of_f32) {
- // let pos : array<array<f32, 2>, 3> = array<vec2<f32, 2>, 3>(
- // array<f32, 2>(0.0, 0.5),
- // array<f32, 2>(-0.5, -0.5),
- // array<f32, 2>(0.5, -0.5));
- // pos[2][1]
+ // let pos : array<array<f32, 2>, 3u> = array<vec2<f32, 2>, 3u>(
+ // array<f32, 2>(0.0, 0.5),
+ // array<f32, 2>(-0.5, -0.5),
+ // array<f32, 2>(0.5, -0.5));
+ // pos[2u][1u]
- auto* var =
- Const("pos", ty.array(ty.vec2<f32>(), 3),
- Construct(ty.array(ty.vec2<f32>(), 3), vec2<f32>(0.0f, 0.5f),
- vec2<f32>(-0.5f, -0.5f), vec2<f32>(0.5f, -0.5f)));
+ auto* var = Let("pos", ty.array(ty.vec2<f32>(), 3_u),
+ Construct(ty.array(ty.vec2<f32>(), 3_u), vec2<f32>(0_f, 0.5_f),
+ vec2<f32>(-0.5_f, -0.5_f), vec2<f32>(0.5_f, -0.5_f)));
- auto* expr = IndexAccessor(IndexAccessor("pos", 2u), 1u);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor(IndexAccessor("pos", 2_u), 1_u);
+ WrapInFunction(var, expr);
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%7 = OpTypeFloat 32
%6 = OpTypeVector %7 2
@@ -827,32 +823,32 @@ TEST_F(BuilderTest, IndexAccessor_Of_Array_Of_f32) {
%17 = OpConstant %8 2
%19 = OpConstant %8 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), R"()");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%18 = OpCompositeExtract %6 %16 2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%18 = OpCompositeExtract %6 %16 2
%20 = OpCompositeExtract %7 %18 1
OpReturn
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, IndexAccessor_Vec_Literal) {
- // let pos : vec2<f32> = vec2<f32>(0.0, 0.5);
- // pos[1]
+ // let pos : vec2<f32> = vec2<f32>(0.0, 0.5);
+ // pos[1]
- auto* var = Const("pos", ty.vec2<f32>(), vec2<f32>(0.0f, 0.5f));
+ auto* var = Let("pos", ty.vec2<f32>(), vec2<f32>(0_f, 0.5_f));
- auto* expr = IndexAccessor("pos", 1u);
- WrapInFunction(var, expr);
+ auto* expr = IndexAccessor("pos", 1_u);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 8u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 0
%4 = OpConstant %2 0.5
@@ -860,31 +856,31 @@ TEST_F(BuilderTest, IndexAccessor_Vec_Literal) {
%6 = OpTypeInt 32 0
%7 = OpConstant %6 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), "");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%8 = OpCompositeExtract %2 %5 1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), "");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%8 = OpCompositeExtract %2 %5 1
)");
}
TEST_F(BuilderTest, IndexAccessor_Vec_Dynamic) {
- // let pos : vec2<f32> = vec2<f32>(0.0, 0.5);
- // idx : i32
- // pos[idx]
+ // let pos : vec2<f32> = vec2<f32>(0.0, 0.5);
+ // idx : i32
+ // pos[idx]
- auto* var = Const("pos", ty.vec2<f32>(), vec2<f32>(0.0f, 0.5f));
- auto* idx = Var("idx", ty.i32());
- auto* expr = IndexAccessor("pos", idx);
+ auto* var = Let("pos", ty.vec2<f32>(), vec2<f32>(0_f, 0.5_f));
+ auto* idx = Var("idx", ty.i32());
+ auto* expr = IndexAccessor("pos", idx);
- WrapInFunction(var, idx, expr);
+ WrapInFunction(var, idx, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunctionVariable(idx)) << b.error();
- EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunctionVariable(idx)) << b.error();
+ EXPECT_EQ(b.GenerateAccessorExpression(expr), 11u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 0
%4 = OpConstant %2 0.5
@@ -893,74 +889,72 @@ TEST_F(BuilderTest, IndexAccessor_Vec_Dynamic) {
%7 = OpTypePointer Function %8
%9 = OpConstantNull %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%6 = OpVariable %7 Function %9
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%6 = OpVariable %7 Function %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%10 = OpLoad %8 %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%10 = OpLoad %8 %6
%11 = OpVectorExtractDynamic %2 %5 %10
)");
}
TEST_F(BuilderTest, IndexAccessor_Array_Literal) {
- // let a : array<f32, 3>;
- // a[2]
+ // let a : array<f32, 3u>;
+ // a[2i]
- auto* var = Const("a", ty.array<f32, 3>(),
- Construct(ty.array<f32, 3>(), 0.0f, 0.5f, 1.0f));
- auto* expr = IndexAccessor("a", 2);
- WrapInFunction(var, expr);
+ auto* var = Let("a", ty.array<f32, 3>(), Construct(ty.array<f32, 3>(), 0_f, 0.5_f, 1_f));
+ auto* expr = IndexAccessor("a", 2_i);
+ WrapInFunction(var, expr);
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%6 = OpTypeFloat 32
%7 = OpTypeInt 32 0
%8 = OpConstant %7 3
%5 = OpTypeArray %6 %8
-%9 = OpConstant %6 0
+%9 = OpConstantNull %6
%10 = OpConstant %6 0.5
%11 = OpConstant %6 1
%12 = OpConstantComposite %5 %9 %10 %11
%13 = OpTypeInt 32 1
%14 = OpConstant %13 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), "");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%15 = OpCompositeExtract %6 %12 2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()), "");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%15 = OpCompositeExtract %6 %12 2
OpReturn
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, IndexAccessor_Array_Dynamic) {
- // let a : array<f32, 3>;
- // idx : i32
- // a[idx]
+ // let a : array<f32, 3>;
+ // idx : i32
+ // a[idx]
- auto* var = Const("a", ty.array<f32, 3>(),
- Construct(ty.array<f32, 3>(), 0.0f, 0.5f, 1.0f));
+ auto* var = Let("a", ty.array<f32, 3>(), Construct(ty.array<f32, 3>(), 0_f, 0.5_f, 1_f));
- auto* idx = Var("idx", ty.i32());
- auto* expr = IndexAccessor("a", idx);
+ auto* idx = Var("idx", ty.i32());
+ auto* expr = IndexAccessor("a", idx);
- WrapInFunction(var, idx, expr);
+ WrapInFunction(var, idx, expr);
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%6 = OpTypeFloat 32
%7 = OpTypeInt 32 0
%8 = OpConstant %7 3
%5 = OpTypeArray %6 %8
-%9 = OpConstant %6 0
+%9 = OpConstantNull %6
%10 = OpConstant %6 0.5
%11 = OpConstant %6 1
%12 = OpConstantComposite %5 %9 %10 %11
@@ -971,41 +965,40 @@ TEST_F(BuilderTest, IndexAccessor_Array_Dynamic) {
%19 = OpConstantNull %5
%21 = OpTypePointer Function %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%13 = OpVariable %14 Function %16
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%13 = OpVariable %14 Function %16
%17 = OpVariable %18 Function %19
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %17 %12
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %17 %12
%20 = OpLoad %15 %13
%22 = OpAccessChain %21 %17 %20
%23 = OpLoad %6 %22
OpReturn
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, IndexAccessor_Matrix_Dynamic) {
- // let a : mat2x2<f32>(vec2<f32>(1., 2.), vec2<f32>(3., 4.));
- // idx : i32
- // a[idx]
+ // let a : mat2x2<f32>(vec2<f32>(1., 2.), vec2<f32>(3., 4.));
+ // idx : i32
+ // a[idx]
- auto* var =
- Const("a", ty.mat2x2<f32>(),
- Construct(ty.mat2x2<f32>(), Construct(ty.vec2<f32>(), 1.f, 2.f),
- Construct(ty.vec2<f32>(), 3.f, 4.f)));
+ auto* var = Let("a", ty.mat2x2<f32>(),
+ Construct(ty.mat2x2<f32>(), Construct(ty.vec2<f32>(), 1_f, 2_f),
+ Construct(ty.vec2<f32>(), 3_f, 4_f)));
- auto* idx = Var("idx", ty.i32());
- auto* expr = IndexAccessor("a", idx);
+ auto* idx = Var("idx", ty.i32());
+ auto* expr = IndexAccessor("a", idx);
- WrapInFunction(var, idx, expr);
+ WrapInFunction(var, idx, expr);
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%7 = OpTypeFloat 32
%6 = OpTypeVector %7 2
@@ -1024,19 +1017,19 @@ TEST_F(BuilderTest, IndexAccessor_Matrix_Dynamic) {
%21 = OpConstantNull %5
%23 = OpTypePointer Function %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%15 = OpVariable %16 Function %18
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%15 = OpVariable %16 Function %18
%19 = OpVariable %20 Function %21
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %19 %14
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %19 %14
%22 = OpLoad %17 %15
%24 = OpAccessChain %23 %19 %22
%25 = OpLoad %6 %24
OpReturn
)");
- Validate(b);
+ Validate(b);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_assign_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_assign_test.cc
index cd64741bc93..71c958c50dd 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_assign_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_assign_test.cc
@@ -15,143 +15,141 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Assign_Var) {
- auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
- auto* assign = Assign("var", 1.f);
+ auto* assign = Assign("var", 1_f);
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
%5 = OpConstant %3 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %1 %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %5
)");
}
TEST_F(BuilderTest, Assign_Var_OutsideFunction_IsError) {
- auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
- auto* assign = Assign("var", Expr(1.f));
+ auto* assign = Assign("var", Expr(1_f));
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_FALSE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_TRUE(b.has_error());
- EXPECT_EQ(b.error(),
- "Internal error: trying to add SPIR-V instruction 62 outside a "
- "function");
+ EXPECT_FALSE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_TRUE(b.has_error());
+ EXPECT_EQ(b.error(),
+ "Internal error: trying to add SPIR-V instruction 62 outside a "
+ "function");
}
TEST_F(BuilderTest, Assign_Var_ZeroConstructor) {
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* val = vec3<f32>();
- auto* assign = Assign("var", val);
+ auto* val = vec3<f32>();
+ auto* assign = Assign("var", val);
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
%1 = OpVariable %2 Private %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %1 %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %5
)");
}
-TEST_F(BuilderTest, Assign_Var_Complex_ConstructorWithExtract) {
- auto* init = vec3<f32>(vec2<f32>(1.f, 2.f), 3.f);
+TEST_F(BuilderTest, Assign_Var_Complex_ConstructorNestedVector) {
+ auto* init = vec3<f32>(vec2<f32>(1_f, 2_f), 3_f);
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* assign = Assign("var", init);
+ auto* assign = Assign("var", init);
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
%1 = OpVariable %2 Private %5
-%6 = OpTypeVector %4 2
-%7 = OpConstant %4 1
-%8 = OpConstant %4 2
-%9 = OpConstantComposite %6 %7 %8
-%12 = OpConstant %4 3
+%6 = OpConstant %4 1
+%7 = OpConstant %4 2
+%8 = OpConstant %4 3
+%9 = OpConstantComposite %3 %6 %7 %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%10 = OpCompositeExtract %4 %9 0
-%11 = OpCompositeExtract %4 %9 1
-%13 = OpCompositeConstruct %3 %10 %11 %12
-OpStore %1 %13
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %9
)");
}
TEST_F(BuilderTest, Assign_Var_Complex_Constructor) {
- auto* init = vec3<f32>(1.f, 2.f, 3.f);
+ auto* init = vec3<f32>(1_f, 2_f, 3_f);
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* assign = Assign("var", init);
+ auto* assign = Assign("var", init);
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -161,40 +159,40 @@ TEST_F(BuilderTest, Assign_Var_Complex_Constructor) {
%8 = OpConstant %4 3
%9 = OpConstantComposite %3 %6 %7 %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %1 %9
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %9
)");
}
TEST_F(BuilderTest, Assign_StructMember) {
- // my_struct {
- // a : f32
- // b : f32
- // }
- // var ident : my_struct
- // ident.b = 4.0;
+ // my_struct {
+ // a : f32
+ // b : f32
+ // }
+ // var ident : my_struct
+ // ident.b = 4.0;
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.f32()),
- });
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32()),
+ });
- auto* v = Var("ident", ty.Of(s));
+ auto* v = Var("ident", ty.Of(s));
- auto* assign = Assign(MemberAccessor("ident", "b"), Expr(4.f));
+ auto* assign = Assign(MemberAccessor("ident", "b"), Expr(4_f));
- WrapInFunction(v, assign);
+ WrapInFunction(v, assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeStruct %4 %4
%2 = OpTypePointer Function %3
%1 = OpVariable %2 Function
@@ -204,30 +202,30 @@ TEST_F(BuilderTest, Assign_StructMember) {
%9 = OpConstant %4 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%8 = OpAccessChain %7 %1 %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%8 = OpAccessChain %7 %1 %6
OpStore %8 %9
)");
}
TEST_F(BuilderTest, Assign_Vector) {
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* val = vec3<f32>(1.f, 1.f, 3.f);
- auto* assign = Assign("var", val);
+ auto* val = vec3<f32>(1_f, 1_f, 3_f);
+ auto* assign = Assign("var", val);
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -237,30 +235,30 @@ TEST_F(BuilderTest, Assign_Vector) {
%8 = OpConstantComposite %3 %6 %6 %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %1 %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %8
)");
}
TEST_F(BuilderTest, Assign_Vector_MemberByName) {
- // var.y = 1
+ // var.y = 1
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* assign = Assign(MemberAccessor("var", "y"), Expr(1.f));
+ auto* assign = Assign(MemberAccessor("var", "y"), Expr(1_f));
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -271,31 +269,31 @@ TEST_F(BuilderTest, Assign_Vector_MemberByName) {
%10 = OpConstant %4 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpAccessChain %8 %1 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpAccessChain %8 %1 %7
OpStore %9 %10
)");
}
TEST_F(BuilderTest, Assign_Vector_MemberByIndex) {
- // var[1] = 1
+ // var[1] = 1
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* assign = Assign(IndexAccessor("var", 1), Expr(1.f));
+ auto* assign = Assign(IndexAccessor("var", 1_i), Expr(1_f));
- WrapInFunction(assign);
+ WrapInFunction(assign);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateAssignStatement(assign)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -306,8 +304,8 @@ TEST_F(BuilderTest, Assign_Vector_MemberByIndex) {
%10 = OpConstant %4 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpAccessChain %8 %1 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpAccessChain %8 %1 %7
OpStore %9 %10
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_binary_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_binary_expression_test.cc
index e2768a8d8ac..abaee2e2041 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_binary_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_binary_expression_test.cc
@@ -15,167 +15,167 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
struct BinaryData {
- ast::BinaryOp op;
- std::string name;
+ ast::BinaryOp op;
+ std::string name;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << data.op;
- return out;
+ out << data.op;
+ return out;
}
using BinaryArithSignedIntegerTest = TestParamHelper<BinaryData>;
TEST_P(BinaryArithSignedIntegerTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3);
- auto* rhs = Expr(4);
+ auto* lhs = Expr(3_i);
+ auto* rhs = Expr(4_i);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstant %1 3
%3 = OpConstant %1 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %1 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %1 %2 %3\n");
}
TEST_P(BinaryArithSignedIntegerTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- // Skip ops that are illegal for this type
- if (param.op == ast::BinaryOp::kAnd || param.op == ast::BinaryOp::kOr ||
- param.op == ast::BinaryOp::kXor) {
- return;
- }
+ // Skip ops that are illegal for this type
+ if (param.op == ast::BinaryOp::kAnd || param.op == ast::BinaryOp::kOr ||
+ param.op == ast::BinaryOp::kXor) {
+ return;
+ }
- auto* lhs = vec3<i32>(1, 1, 1);
- auto* rhs = vec3<i32>(1, 1, 1);
+ auto* lhs = vec3<i32>(1_i, 1_i, 1_i);
+ auto* rhs = vec3<i32>(1_i, 1_i, 1_i);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %1 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %1 %4 %4\n");
}
TEST_P(BinaryArithSignedIntegerTest, Scalar_Loads) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* var = Var("param", ty.i32());
- auto* expr =
- create<ast::BinaryExpression>(param.op, Expr("param"), Expr("param"));
+ auto* var = Var("param", ty.i32());
+ auto* expr = create<ast::BinaryExpression>(param.op, Expr("param"), Expr("param"));
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 7u) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 7u) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%5 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%5 = OpLoad %3 %1
%6 = OpLoad %3 %1
%7 = )" + param.name +
- R"( %3 %5 %6
+ R"( %3 %5 %6
)");
}
-INSTANTIATE_TEST_SUITE_P(
- BuilderTest,
- BinaryArithSignedIntegerTest,
- // NOTE: No left and right shift as they require u32 for rhs operand
- testing::Values(BinaryData{ast::BinaryOp::kAdd, "OpIAdd"},
- BinaryData{ast::BinaryOp::kAnd, "OpBitwiseAnd"},
- BinaryData{ast::BinaryOp::kDivide, "OpSDiv"},
- BinaryData{ast::BinaryOp::kModulo, "OpSMod"},
- BinaryData{ast::BinaryOp::kMultiply, "OpIMul"},
- BinaryData{ast::BinaryOp::kOr, "OpBitwiseOr"},
- BinaryData{ast::BinaryOp::kSubtract, "OpISub"},
- BinaryData{ast::BinaryOp::kXor, "OpBitwiseXor"}));
+INSTANTIATE_TEST_SUITE_P(BuilderTest,
+ BinaryArithSignedIntegerTest,
+ // NOTE: No left and right shift as they require u32 for rhs operand
+ testing::Values(BinaryData{ast::BinaryOp::kAdd, "OpIAdd"},
+ BinaryData{ast::BinaryOp::kAnd, "OpBitwiseAnd"},
+ BinaryData{ast::BinaryOp::kDivide, "OpSDiv"},
+ BinaryData{ast::BinaryOp::kModulo, "OpSMod"},
+ BinaryData{ast::BinaryOp::kMultiply, "OpIMul"},
+ BinaryData{ast::BinaryOp::kOr, "OpBitwiseOr"},
+ BinaryData{ast::BinaryOp::kSubtract, "OpISub"},
+ BinaryData{ast::BinaryOp::kXor, "OpBitwiseXor"}));
using BinaryArithUnsignedIntegerTest = TestParamHelper<BinaryData>;
TEST_P(BinaryArithUnsignedIntegerTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3u);
- auto* rhs = Expr(4u);
+ auto* lhs = Expr(3_u);
+ auto* rhs = Expr(4_u);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstant %1 3
%3 = OpConstant %1 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %1 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %1 %2 %3\n");
}
TEST_P(BinaryArithUnsignedIntegerTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- // Skip ops that are illegal for this type
- if (param.op == ast::BinaryOp::kAnd || param.op == ast::BinaryOp::kOr ||
- param.op == ast::BinaryOp::kXor) {
- return;
- }
+ // Skip ops that are illegal for this type
+ if (param.op == ast::BinaryOp::kAnd || param.op == ast::BinaryOp::kOr ||
+ param.op == ast::BinaryOp::kXor) {
+ return;
+ }
- auto* lhs = vec3<u32>(1u, 1u, 1u);
- auto* rhs = vec3<u32>(1u, 1u, 1u);
+ auto* lhs = vec3<u32>(1_u, 1_u, 1_u);
+ auto* rhs = vec3<u32>(1_u, 1_u, 1_u);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %1 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %1 %4 %4\n");
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest,
@@ -187,428 +187,417 @@ INSTANTIATE_TEST_SUITE_P(
BinaryData{ast::BinaryOp::kMultiply, "OpIMul"},
BinaryData{ast::BinaryOp::kOr, "OpBitwiseOr"},
BinaryData{ast::BinaryOp::kShiftLeft, "OpShiftLeftLogical"},
- BinaryData{ast::BinaryOp::kShiftRight,
- "OpShiftRightLogical"},
+ BinaryData{ast::BinaryOp::kShiftRight, "OpShiftRightLogical"},
BinaryData{ast::BinaryOp::kSubtract, "OpISub"},
BinaryData{ast::BinaryOp::kXor, "OpBitwiseXor"}));
using BinaryArithFloatTest = TestParamHelper<BinaryData>;
TEST_P(BinaryArithFloatTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3.2f);
- auto* rhs = Expr(4.5f);
+ auto* lhs = Expr(3.2_f);
+ auto* rhs = Expr(4.5_f);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 3.20000005
%3 = OpConstant %1 4.5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %1 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %1 %2 %3\n");
}
TEST_P(BinaryArithFloatTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %1 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %1 %4 %4\n");
}
-INSTANTIATE_TEST_SUITE_P(
- BuilderTest,
- BinaryArithFloatTest,
- testing::Values(BinaryData{ast::BinaryOp::kAdd, "OpFAdd"},
- BinaryData{ast::BinaryOp::kDivide, "OpFDiv"},
- BinaryData{ast::BinaryOp::kModulo, "OpFRem"},
- BinaryData{ast::BinaryOp::kMultiply, "OpFMul"},
- BinaryData{ast::BinaryOp::kSubtract, "OpFSub"}));
+INSTANTIATE_TEST_SUITE_P(BuilderTest,
+ BinaryArithFloatTest,
+ testing::Values(BinaryData{ast::BinaryOp::kAdd, "OpFAdd"},
+ BinaryData{ast::BinaryOp::kDivide, "OpFDiv"},
+ BinaryData{ast::BinaryOp::kModulo, "OpFRem"},
+ BinaryData{ast::BinaryOp::kMultiply, "OpFMul"},
+ BinaryData{ast::BinaryOp::kSubtract, "OpFSub"}));
using BinaryOperatorBoolTest = TestParamHelper<BinaryData>;
TEST_P(BinaryOperatorBoolTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(true);
- auto* rhs = Expr(false);
+ auto* lhs = Expr(true);
+ auto* rhs = Expr(false);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantTrue %1
-%3 = OpConstantFalse %1
+%3 = OpConstantNull %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %1 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %1 %2 %3\n");
}
TEST_P(BinaryOperatorBoolTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = vec3<bool>(false, true, false);
- auto* rhs = vec3<bool>(true, false, true);
+ auto* lhs = vec3<bool>(false, true, false);
+ auto* rhs = vec3<bool>(true, false, true);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 7u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 7u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeVector %2 3
%3 = OpConstantFalse %2
%4 = OpConstantTrue %2
%5 = OpConstantComposite %1 %3 %4 %3
%6 = OpConstantComposite %1 %4 %3 %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%7 = " + param.name + " %1 %5 %6\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%7 = " + param.name + " %1 %5 %6\n");
}
-INSTANTIATE_TEST_SUITE_P(
- BuilderTest,
- BinaryOperatorBoolTest,
- testing::Values(BinaryData{ast::BinaryOp::kEqual, "OpLogicalEqual"},
- BinaryData{ast::BinaryOp::kNotEqual, "OpLogicalNotEqual"},
- BinaryData{ast::BinaryOp::kAnd, "OpLogicalAnd"},
- BinaryData{ast::BinaryOp::kOr, "OpLogicalOr"}));
+INSTANTIATE_TEST_SUITE_P(BuilderTest,
+ BinaryOperatorBoolTest,
+ testing::Values(BinaryData{ast::BinaryOp::kEqual, "OpLogicalEqual"},
+ BinaryData{ast::BinaryOp::kNotEqual, "OpLogicalNotEqual"},
+ BinaryData{ast::BinaryOp::kAnd, "OpLogicalAnd"},
+ BinaryData{ast::BinaryOp::kOr, "OpLogicalOr"}));
using BinaryCompareUnsignedIntegerTest = TestParamHelper<BinaryData>;
TEST_P(BinaryCompareUnsignedIntegerTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3u);
- auto* rhs = Expr(4u);
+ auto* lhs = Expr(3_u);
+ auto* rhs = Expr(4_u);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstant %1 3
%3 = OpConstant %1 4
%5 = OpTypeBool
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %5 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %5 %2 %3\n");
}
TEST_P(BinaryCompareUnsignedIntegerTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = vec3<u32>(1u, 1u, 1u);
- auto* rhs = vec3<u32>(1u, 1u, 1u);
+ auto* lhs = vec3<u32>(1_u, 1_u, 1_u);
+ auto* rhs = vec3<u32>(1_u, 1_u, 1_u);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
%7 = OpTypeBool
%6 = OpTypeVector %7 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %6 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %6 %4 %4\n");
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest,
BinaryCompareUnsignedIntegerTest,
- testing::Values(
- BinaryData{ast::BinaryOp::kEqual, "OpIEqual"},
- BinaryData{ast::BinaryOp::kGreaterThan, "OpUGreaterThan"},
- BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpUGreaterThanEqual"},
- BinaryData{ast::BinaryOp::kLessThan, "OpULessThan"},
- BinaryData{ast::BinaryOp::kLessThanEqual, "OpULessThanEqual"},
- BinaryData{ast::BinaryOp::kNotEqual, "OpINotEqual"}));
+ testing::Values(BinaryData{ast::BinaryOp::kEqual, "OpIEqual"},
+ BinaryData{ast::BinaryOp::kGreaterThan, "OpUGreaterThan"},
+ BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpUGreaterThanEqual"},
+ BinaryData{ast::BinaryOp::kLessThan, "OpULessThan"},
+ BinaryData{ast::BinaryOp::kLessThanEqual, "OpULessThanEqual"},
+ BinaryData{ast::BinaryOp::kNotEqual, "OpINotEqual"}));
using BinaryCompareSignedIntegerTest = TestParamHelper<BinaryData>;
TEST_P(BinaryCompareSignedIntegerTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3);
- auto* rhs = Expr(4);
+ auto* lhs = Expr(3_i);
+ auto* rhs = Expr(4_i);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstant %1 3
%3 = OpConstant %1 4
%5 = OpTypeBool
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %5 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %5 %2 %3\n");
}
TEST_P(BinaryCompareSignedIntegerTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = vec3<i32>(1, 1, 1);
- auto* rhs = vec3<i32>(1, 1, 1);
+ auto* lhs = vec3<i32>(1_i, 1_i, 1_i);
+ auto* rhs = vec3<i32>(1_i, 1_i, 1_i);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
%7 = OpTypeBool
%6 = OpTypeVector %7 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %6 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %6 %4 %4\n");
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest,
BinaryCompareSignedIntegerTest,
- testing::Values(
- BinaryData{ast::BinaryOp::kEqual, "OpIEqual"},
- BinaryData{ast::BinaryOp::kGreaterThan, "OpSGreaterThan"},
- BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpSGreaterThanEqual"},
- BinaryData{ast::BinaryOp::kLessThan, "OpSLessThan"},
- BinaryData{ast::BinaryOp::kLessThanEqual, "OpSLessThanEqual"},
- BinaryData{ast::BinaryOp::kNotEqual, "OpINotEqual"}));
+ testing::Values(BinaryData{ast::BinaryOp::kEqual, "OpIEqual"},
+ BinaryData{ast::BinaryOp::kGreaterThan, "OpSGreaterThan"},
+ BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpSGreaterThanEqual"},
+ BinaryData{ast::BinaryOp::kLessThan, "OpSLessThan"},
+ BinaryData{ast::BinaryOp::kLessThanEqual, "OpSLessThanEqual"},
+ BinaryData{ast::BinaryOp::kNotEqual, "OpINotEqual"}));
using BinaryCompareFloatTest = TestParamHelper<BinaryData>;
TEST_P(BinaryCompareFloatTest, Scalar) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = Expr(3.2f);
- auto* rhs = Expr(4.5f);
+ auto* lhs = Expr(3.2_f);
+ auto* rhs = Expr(4.5_f);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 4u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 3.20000005
%3 = OpConstant %1 4.5
%5 = OpTypeBool
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%4 = " + param.name + " %5 %2 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%4 = " + param.name + " %5 %2 %3\n");
}
TEST_P(BinaryCompareFloatTest, Vector) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
%7 = OpTypeBool
%6 = OpTypeVector %7 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = " + param.name + " %6 %4 %4\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = " + param.name + " %6 %4 %4\n");
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest,
BinaryCompareFloatTest,
- testing::Values(
- BinaryData{ast::BinaryOp::kEqual, "OpFOrdEqual"},
- BinaryData{ast::BinaryOp::kGreaterThan, "OpFOrdGreaterThan"},
- BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpFOrdGreaterThanEqual"},
- BinaryData{ast::BinaryOp::kLessThan, "OpFOrdLessThan"},
- BinaryData{ast::BinaryOp::kLessThanEqual, "OpFOrdLessThanEqual"},
- BinaryData{ast::BinaryOp::kNotEqual, "OpFOrdNotEqual"}));
+ testing::Values(BinaryData{ast::BinaryOp::kEqual, "OpFOrdEqual"},
+ BinaryData{ast::BinaryOp::kGreaterThan, "OpFOrdGreaterThan"},
+ BinaryData{ast::BinaryOp::kGreaterThanEqual, "OpFOrdGreaterThanEqual"},
+ BinaryData{ast::BinaryOp::kLessThan, "OpFOrdLessThan"},
+ BinaryData{ast::BinaryOp::kLessThanEqual, "OpFOrdLessThanEqual"},
+ BinaryData{ast::BinaryOp::kNotEqual, "OpFOrdNotEqual"}));
TEST_F(BuilderTest, Binary_Multiply_VectorScalar) {
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
- auto* rhs = Expr(1.f);
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
+ auto* rhs = Expr(1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstantComposite %1 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = OpVectorTimesScalar %1 %4 %3\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = OpVectorTimesScalar %1 %4 %3\n");
}
TEST_F(BuilderTest, Binary_Multiply_ScalarVector) {
- auto* lhs = Expr(1.f);
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* lhs = Expr(1_f);
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 5u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 1
%3 = OpTypeVector %1 3
%4 = OpConstantComposite %3 %2 %2 %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%5 = OpVectorTimesScalar %3 %4 %2\n");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ "%5 = OpVectorTimesScalar %3 %4 %2\n");
}
TEST_F(BuilderTest, Binary_Multiply_MatrixScalar) {
- auto* var = Var("mat", ty.mat3x3<f32>());
+ auto* var = Var("mat", ty.mat3x3<f32>());
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr("mat"), Expr(1.f));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("mat"), Expr(1_f));
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%3 = OpTypeMatrix %4 3
%2 = OpTypePointer Function %3
%1 = OpVariable %2 Function
%7 = OpConstant %5 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%8 = OpMatrixTimesScalar %3 %6 %7
)");
}
TEST_F(BuilderTest, Binary_Multiply_ScalarMatrix) {
- auto* var = Var("mat", ty.mat3x3<f32>());
+ auto* var = Var("mat", ty.mat3x3<f32>());
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr(1.f), Expr("mat"));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr(1_f), Expr("mat"));
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%3 = OpTypeMatrix %4 3
%2 = OpTypePointer Function %3
%1 = OpVariable %2 Function
%6 = OpConstant %5 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%7 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%7 = OpLoad %3 %1
%8 = OpMatrixTimesScalar %3 %7 %6
)");
}
TEST_F(BuilderTest, Binary_Multiply_MatrixVector) {
- auto* var = Var("mat", ty.mat3x3<f32>());
- auto* rhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* var = Var("mat", ty.mat3x3<f32>());
+ auto* rhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("mat"), rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("mat"), rhs);
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 9u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 9u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%3 = OpTypeMatrix %4 3
%2 = OpTypePointer Function %3
@@ -616,29 +605,28 @@ TEST_F(BuilderTest, Binary_Multiply_MatrixVector) {
%7 = OpConstant %5 1
%8 = OpConstantComposite %4 %7 %7 %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%9 = OpMatrixTimesVector %4 %6 %8
)");
}
TEST_F(BuilderTest, Binary_Multiply_VectorMatrix) {
- auto* var = Var("mat", ty.mat3x3<f32>());
- auto* lhs = vec3<f32>(1.f, 1.f, 1.f);
+ auto* var = Var("mat", ty.mat3x3<f32>());
+ auto* lhs = vec3<f32>(1_f, 1_f, 1_f);
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, Expr("mat"));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, lhs, Expr("mat"));
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 9u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 9u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%3 = OpTypeMatrix %4 3
%2 = OpTypePointer Function %3
@@ -646,68 +634,64 @@ TEST_F(BuilderTest, Binary_Multiply_VectorMatrix) {
%6 = OpConstant %5 1
%7 = OpConstantComposite %4 %6 %6 %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%8 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%8 = OpLoad %3 %1
%9 = OpVectorTimesMatrix %4 %7 %8
)");
}
TEST_F(BuilderTest, Binary_Multiply_MatrixMatrix) {
- auto* var = Var("mat", ty.mat3x3<f32>());
+ auto* var = Var("mat", ty.mat3x3<f32>());
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply,
- Expr("mat"), Expr("mat"));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kMultiply, Expr("mat"), Expr("mat"));
- WrapInFunction(var, expr);
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%5 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 8u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
%3 = OpTypeMatrix %4 3
%2 = OpTypePointer Function %3
%1 = OpVariable %2 Function
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%6 = OpLoad %3 %1
%7 = OpLoad %3 %1
%8 = OpMatrixTimesMatrix %3 %6 %7
)");
}
TEST_F(BuilderTest, Binary_LogicalAnd) {
- auto* lhs =
- create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(1), Expr(2));
+ auto* lhs = create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(1_i), Expr(2_i));
- auto* rhs =
- create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(3), Expr(4));
+ auto* rhs = create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(3_i), Expr(4_i));
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 1
%3 = OpConstant %2 1
%4 = OpConstant %2 2
%6 = OpTypeBool
%9 = OpConstant %2 3
%10 = OpConstant %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
%5 = OpIEqual %6 %3 %4
OpSelectionMerge %7 None
OpBranchConditional %5 %8 %7
@@ -720,34 +704,31 @@ OpBranch %7
}
TEST_F(BuilderTest, Binary_LogicalAnd_WithLoads) {
- auto* a_var =
- Global("a", ty.bool_(), ast::StorageClass::kPrivate, Expr(true));
- auto* b_var =
- Global("b", ty.bool_(), ast::StorageClass::kPrivate, Expr(false));
+ auto* a_var = Global("a", ty.bool_(), ast::StorageClass::kPrivate, Expr(true));
+ auto* b_var = Global("b", ty.bool_(), ast::StorageClass::kPrivate, Expr(false));
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr("a"), Expr("b"));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- ASSERT_TRUE(b.GenerateGlobalVariable(a_var)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(b_var)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a_var)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(b_var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%3 = OpConstantTrue %2
%5 = OpTypePointer Private %2
%4 = OpVariable %5 Private %3
-%6 = OpConstantFalse %2
+%6 = OpConstantNull %2
%7 = OpVariable %5 Private %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
%8 = OpLoad %2 %4
OpSelectionMerge %9 None
OpBranchConditional %8 %10 %9
@@ -760,30 +741,30 @@ OpBranch %9
}
TEST_F(BuilderTest, Binary_logicalOr_Nested_LogicalAnd) {
- // Test an expression like
- // a || (b && c)
- // From: crbug.com/tint/355
+ // Test an expression like
+ // a || (b && c)
+ // From: crbug.com/tint/355
- auto* logical_and_expr = create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
+ auto* logical_and_expr =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), Expr(false));
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr(true), logical_and_expr);
+ auto* expr =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr(true), logical_and_expr);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 10u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 10u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%3 = OpConstantTrue %2
-%8 = OpConstantFalse %2
+%8 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
OpSelectionMerge %4 None
OpBranchConditional %3 %4 %5
%5 = OpLabel
@@ -800,30 +781,30 @@ OpBranch %4
}
TEST_F(BuilderTest, Binary_logicalAnd_Nested_LogicalOr) {
- // Test an expression like
- // a && (b || c)
- // From: crbug.com/tint/355
+ // Test an expression like
+ // a && (b || c)
+ // From: crbug.com/tint/355
- auto* logical_or_expr = create<ast::BinaryExpression>(
- ast::BinaryOp::kLogicalOr, Expr(true), Expr(false));
+ auto* logical_or_expr =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr(true), Expr(false));
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd,
- Expr(true), logical_or_expr);
+ auto* expr =
+ create<ast::BinaryExpression>(ast::BinaryOp::kLogicalAnd, Expr(true), logical_or_expr);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 10u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 10u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%3 = OpConstantTrue %2
-%8 = OpConstantFalse %2
+%8 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
OpSelectionMerge %4 None
OpBranchConditional %3 %5 %4
%5 = OpLabel
@@ -840,33 +821,30 @@ OpBranch %4
}
TEST_F(BuilderTest, Binary_LogicalOr) {
- auto* lhs =
- create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(1), Expr(2));
+ auto* lhs = create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(1_i), Expr(2_i));
- auto* rhs =
- create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(3), Expr(4));
+ auto* rhs = create<ast::BinaryExpression>(ast::BinaryOp::kEqual, Expr(3_i), Expr(4_i));
- auto* expr =
- create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 1
%3 = OpConstant %2 1
%4 = OpConstant %2 2
%6 = OpTypeBool
%9 = OpConstant %2 3
%10 = OpConstant %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
%5 = OpIEqual %6 %3 %4
OpSelectionMerge %7 None
OpBranchConditional %5 %7 %8
@@ -879,34 +857,31 @@ OpBranch %7
}
TEST_F(BuilderTest, Binary_LogicalOr_WithLoads) {
- auto* a_var =
- Global("a", ty.bool_(), ast::StorageClass::kPrivate, Expr(true));
- auto* b_var =
- Global("b", ty.bool_(), ast::StorageClass::kPrivate, Expr(false));
+ auto* a_var = Global("a", ty.bool_(), ast::StorageClass::kPrivate, Expr(true));
+ auto* b_var = Global("b", ty.bool_(), ast::StorageClass::kPrivate, Expr(false));
- auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr,
- Expr("a"), Expr("b"));
+ auto* expr = create<ast::BinaryExpression>(ast::BinaryOp::kLogicalOr, Expr("a"), Expr("b"));
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- b.GenerateLabel(b.next_id());
+ b.push_function(Function{});
+ b.GenerateLabel(b.next_id());
- ASSERT_TRUE(b.GenerateGlobalVariable(a_var)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(b_var)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a_var)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(b_var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(b.GenerateBinaryExpression(expr), 12u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%3 = OpConstantTrue %2
%5 = OpTypePointer Private %2
%4 = OpVariable %5 Private %3
-%6 = OpConstantFalse %2
+%6 = OpConstantNull %2
%7 = OpVariable %5 Private %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLabel
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLabel
%8 = OpLoad %2 %4
OpSelectionMerge %9 None
OpBranchConditional %8 %9 %10
@@ -921,64 +896,62 @@ OpBranch %9
namespace BinaryArithVectorScalar {
enum class Type { f32, i32, u32 };
-static const ast::Expression* MakeVectorExpr(ProgramBuilder* builder,
- Type type) {
- switch (type) {
- case Type::f32:
- return builder->vec3<ProgramBuilder::f32>(1.f, 1.f, 1.f);
- case Type::i32:
- return builder->vec3<ProgramBuilder::i32>(1, 1, 1);
- case Type::u32:
- return builder->vec3<ProgramBuilder::u32>(1u, 1u, 1u);
- }
- return nullptr;
+static const ast::Expression* MakeVectorExpr(ProgramBuilder* builder, Type type) {
+ switch (type) {
+ case Type::f32:
+ return builder->vec3<f32>(1_f, 1_f, 1_f);
+ case Type::i32:
+ return builder->vec3<i32>(1_i, 1_i, 1_i);
+ case Type::u32:
+ return builder->vec3<u32>(1_u, 1_u, 1_u);
+ }
+ return nullptr;
}
-static const ast::Expression* MakeScalarExpr(ProgramBuilder* builder,
- Type type) {
- switch (type) {
- case Type::f32:
- return builder->Expr(1.f);
- case Type::i32:
- return builder->Expr(1);
- case Type::u32:
- return builder->Expr(1u);
- }
- return nullptr;
+static const ast::Expression* MakeScalarExpr(ProgramBuilder* builder, Type type) {
+ switch (type) {
+ case Type::f32:
+ return builder->Expr(1_f);
+ case Type::i32:
+ return builder->Expr(1_i);
+ case Type::u32:
+ return builder->Expr(1_u);
+ }
+ return nullptr;
}
static std::string OpTypeDecl(Type type) {
- switch (type) {
- case Type::f32:
- return "OpTypeFloat 32";
- case Type::i32:
- return "OpTypeInt 32 1";
- case Type::u32:
- return "OpTypeInt 32 0";
- }
- return {};
+ switch (type) {
+ case Type::f32:
+ return "OpTypeFloat 32";
+ case Type::i32:
+ return "OpTypeInt 32 1";
+ case Type::u32:
+ return "OpTypeInt 32 0";
+ }
+ return {};
}
struct Param {
- Type type;
- ast::BinaryOp op;
- std::string name;
+ Type type;
+ ast::BinaryOp op;
+ std::string name;
};
using BinaryArithVectorScalarTest = TestParamHelper<Param>;
TEST_P(BinaryArithVectorScalarTest, VectorScalar) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = MakeVectorExpr(this, param.type);
- const ast::Expression* rhs = MakeScalarExpr(this, param.type);
- std::string op_type_decl = OpTypeDecl(param.type);
+ const ast::Expression* lhs = MakeVectorExpr(this, param.type);
+ const ast::Expression* rhs = MakeScalarExpr(this, param.type);
+ std::string op_type_decl = OpTypeDecl(param.type);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1000,23 +973,23 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
TEST_P(BinaryArithVectorScalarTest, ScalarVector) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = MakeScalarExpr(this, param.type);
- const ast::Expression* rhs = MakeVectorExpr(this, param.type);
- std::string op_type_decl = OpTypeDecl(param.type);
+ const ast::Expression* lhs = MakeScalarExpr(this, param.type);
+ const ast::Expression* rhs = MakeVectorExpr(this, param.type);
+ std::string op_type_decl = OpTypeDecl(param.type);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1038,48 +1011,47 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
-INSTANTIATE_TEST_SUITE_P(
- BuilderTest,
- BinaryArithVectorScalarTest,
- testing::Values(Param{Type::f32, ast::BinaryOp::kAdd, "OpFAdd"},
- Param{Type::f32, ast::BinaryOp::kDivide, "OpFDiv"},
- // NOTE: Modulo not allowed on mixed float scalar-vector
- // Param{Type::f32, ast::BinaryOp::kModulo, "OpFMod"},
- // NOTE: We test f32 multiplies separately as we emit
- // OpVectorTimesScalar for this case
- // Param{Type::i32, ast::BinaryOp::kMultiply, "OpIMul"},
- Param{Type::f32, ast::BinaryOp::kSubtract, "OpFSub"},
-
- Param{Type::i32, ast::BinaryOp::kAdd, "OpIAdd"},
- Param{Type::i32, ast::BinaryOp::kDivide, "OpSDiv"},
- Param{Type::i32, ast::BinaryOp::kModulo, "OpSMod"},
- Param{Type::i32, ast::BinaryOp::kMultiply, "OpIMul"},
- Param{Type::i32, ast::BinaryOp::kSubtract, "OpISub"},
-
- Param{Type::u32, ast::BinaryOp::kAdd, "OpIAdd"},
- Param{Type::u32, ast::BinaryOp::kDivide, "OpUDiv"},
- Param{Type::u32, ast::BinaryOp::kModulo, "OpUMod"},
- Param{Type::u32, ast::BinaryOp::kMultiply, "OpIMul"},
- Param{Type::u32, ast::BinaryOp::kSubtract, "OpISub"}));
+INSTANTIATE_TEST_SUITE_P(BuilderTest,
+ BinaryArithVectorScalarTest,
+ testing::Values(Param{Type::f32, ast::BinaryOp::kAdd, "OpFAdd"},
+ Param{Type::f32, ast::BinaryOp::kDivide, "OpFDiv"},
+ // NOTE: Modulo not allowed on mixed float scalar-vector
+ // Param{Type::f32, ast::BinaryOp::kModulo, "OpFMod"},
+ // NOTE: We test f32 multiplies separately as we emit
+ // OpVectorTimesScalar for this case
+ // Param{Type::i32, ast::BinaryOp::kMultiply, "OpIMul"},
+ Param{Type::f32, ast::BinaryOp::kSubtract, "OpFSub"},
+
+ Param{Type::i32, ast::BinaryOp::kAdd, "OpIAdd"},
+ Param{Type::i32, ast::BinaryOp::kDivide, "OpSDiv"},
+ Param{Type::i32, ast::BinaryOp::kModulo, "OpSMod"},
+ Param{Type::i32, ast::BinaryOp::kMultiply, "OpIMul"},
+ Param{Type::i32, ast::BinaryOp::kSubtract, "OpISub"},
+
+ Param{Type::u32, ast::BinaryOp::kAdd, "OpIAdd"},
+ Param{Type::u32, ast::BinaryOp::kDivide, "OpUDiv"},
+ Param{Type::u32, ast::BinaryOp::kModulo, "OpUMod"},
+ Param{Type::u32, ast::BinaryOp::kMultiply, "OpIMul"},
+ Param{Type::u32, ast::BinaryOp::kSubtract, "OpISub"}));
using BinaryArithVectorScalarMultiplyTest = TestParamHelper<Param>;
TEST_P(BinaryArithVectorScalarMultiplyTest, VectorScalar) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = MakeVectorExpr(this, param.type);
- const ast::Expression* rhs = MakeScalarExpr(this, param.type);
- std::string op_type_decl = OpTypeDecl(param.type);
+ const ast::Expression* lhs = MakeVectorExpr(this, param.type);
+ const ast::Expression* rhs = MakeScalarExpr(this, param.type);
+ std::string op_type_decl = OpTypeDecl(param.type);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1097,23 +1069,23 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
TEST_P(BinaryArithVectorScalarMultiplyTest, ScalarVector) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = MakeScalarExpr(this, param.type);
- const ast::Expression* rhs = MakeVectorExpr(this, param.type);
- std::string op_type_decl = OpTypeDecl(param.type);
+ const ast::Expression* lhs = MakeScalarExpr(this, param.type);
+ const ast::Expression* rhs = MakeVectorExpr(this, param.type);
+ std::string op_type_decl = OpTypeDecl(param.type);
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1131,37 +1103,36 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
INSTANTIATE_TEST_SUITE_P(BuilderTest,
BinaryArithVectorScalarMultiplyTest,
- testing::Values(Param{
- Type::f32, ast::BinaryOp::kMultiply, "OpFMul"}));
+ testing::Values(Param{Type::f32, ast::BinaryOp::kMultiply, "OpFMul"}));
} // namespace BinaryArithVectorScalar
namespace BinaryArithMatrixMatrix {
struct Param {
- ast::BinaryOp op;
- std::string name;
+ ast::BinaryOp op;
+ std::string name;
};
using BinaryArithMatrixMatrix = TestParamHelper<Param>;
TEST_P(BinaryArithMatrixMatrix, AddOrSubtract) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = mat3x4<f32>();
- const ast::Expression* rhs = mat3x4<f32>();
+ const ast::Expression* lhs = mat3x4<f32>();
+ const ast::Expression* rhs = mat3x4<f32>();
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1188,7 +1159,7 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
INSTANTIATE_TEST_SUITE_P( //
BuilderTest,
@@ -1198,19 +1169,19 @@ INSTANTIATE_TEST_SUITE_P( //
using BinaryArithMatrixMatrixMultiply = TestParamHelper<Param>;
TEST_P(BinaryArithMatrixMatrixMultiply, Multiply) {
- auto& param = GetParam();
+ auto& param = GetParam();
- const ast::Expression* lhs = mat3x4<f32>();
- const ast::Expression* rhs = mat4x3<f32>();
+ const ast::Expression* lhs = mat3x4<f32>();
+ const ast::Expression* rhs = mat4x3<f32>();
- auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
+ auto* expr = create<ast::BinaryExpression>(param.op, lhs, rhs);
- WrapInFunction(expr);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ spirv::Builder& b = Build();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
@@ -1232,7 +1203,7 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
INSTANTIATE_TEST_SUITE_P( //
BuilderTest,
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_bitcast_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_bitcast_expression_test.cc
index 655ed33ae11..9dff5f675e2 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_bitcast_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_bitcast_expression_test.cc
@@ -15,45 +15,47 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Bitcast) {
- auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(2.4f));
+ auto* bitcast = create<ast::BitcastExpression>(ty.u32(), Expr(2.4_f));
- WrapInFunction(bitcast);
+ WrapInFunction(bitcast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateBitcastExpression(bitcast), 1u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateBitcastExpression(bitcast), 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
%3 = OpTypeFloat 32
%4 = OpConstant %3 2.4000001
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpBitcast %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpBitcast %2 %4
)");
}
TEST_F(BuilderTest, Bitcast_DuplicateType) {
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(2.4f));
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(2.4_f));
- WrapInFunction(bitcast);
+ WrapInFunction(bitcast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateBitcastExpression(bitcast), 1u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateBitcastExpression(bitcast), 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpConstant %2 2.4000001
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpCopyObject %2 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpCopyObject %2 %3
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_block_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_block_test.cc
index 462161b06c7..85a2371fe05 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_block_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_block_test.cc
@@ -15,30 +15,31 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Block) {
- // Note, this test uses shadow variables which aren't allowed in WGSL but
- // serves to prove the block code is pushing new scopes as needed.
- auto* inner = Block(Decl(Var("var", ty.f32(), ast::StorageClass::kNone)),
- Assign("var", 2.f));
- auto* outer = Block(Decl(Var("var", ty.f32(), ast::StorageClass::kNone)),
- Assign("var", 1.f), inner, Assign("var", 3.f));
+ // Note, this test uses shadow variables which aren't allowed in WGSL but
+ // serves to prove the block code is pushing new scopes as needed.
+ auto* inner = Block(Decl(Var("var", ty.f32(), ast::StorageClass::kNone)), Assign("var", 2_f));
+ auto* outer = Block(Decl(Var("var", ty.f32(), ast::StorageClass::kNone)), Assign("var", 1_f),
+ inner, Assign("var", 3_f));
- WrapInFunction(outer);
+ WrapInFunction(outer);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_TRUE(b.GenerateStatement(outer)) << b.error();
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.GenerateStatement(outer)) << b.error();
+ EXPECT_FALSE(b.has_error());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
%5 = OpConstant %3 1
@@ -46,13 +47,13 @@ TEST_F(BuilderTest, Block) {
%8 = OpConstant %3 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %4
%6 = OpVariable %2 Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %1 %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %1 %5
OpStore %6 %7
OpStore %1 %8
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_test.cc
index aec6fdffb50..4f6dca8c980 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_test.cc
@@ -14,11 +14,13 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/ast/stage_attribute.h"
-#include "src/tint/sem/depth_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
#include "src/tint/utils/string.h"
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
@@ -28,30 +30,30 @@ template <typename T>
using BuiltinBuilderTestWithParam = TestParamHelper<T>;
struct BuiltinData {
- std::string name;
- std::string op;
+ std::string name;
+ std::string op;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using BuiltinBoolTest = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(BuiltinBoolTest, Call_Bool_Scalar) {
- auto param = GetParam();
- auto* var = Global("v", ty.bool_(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeBool
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -59,26 +61,25 @@ TEST_P(BuiltinBoolTest, Call_Bool_Scalar) {
%5 = OpTypeFunction %6
)");
- // both any and all are 'passthrough' for scalar booleans
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- "%10 = OpLoad %3 %1\nOpReturn\n");
+ // both any and all are 'passthrough' for scalar booleans
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), "%10 = OpLoad %3 %1\nOpReturn\n");
}
TEST_P(BuiltinBoolTest, Call_Bool_Vector) {
- auto param = GetParam();
- auto* var = Global("v", ty.vec3<bool>(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.vec3<bool>(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeBool
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -87,34 +88,33 @@ TEST_P(BuiltinBoolTest, Call_Bool_Vector) {
%6 = OpTypeFunction %7
)");
- auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
%10 = ${op} %4 %11
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
BuiltinBoolTest,
- testing::Values(BuiltinData{"any", "OpAny"},
- BuiltinData{"all", "OpAll"}));
+ testing::Values(BuiltinData{"any", "OpAny"}, BuiltinData{"all", "OpAll"}));
using BuiltinIntTest = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(BuiltinIntTest, Call_SInt_Scalar) {
- auto param = GetParam();
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -122,29 +122,29 @@ TEST_P(BuiltinIntTest, Call_SInt_Scalar) {
%5 = OpTypeFunction %6
)");
- auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
%9 = ${op} %3 %10
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
TEST_P(BuiltinIntTest, Call_SInt_Vector) {
- auto param = GetParam();
- auto* var = Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -153,29 +153,29 @@ TEST_P(BuiltinIntTest, Call_SInt_Vector) {
%6 = OpTypeFunction %7
)");
- auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
%10 = ${op} %3 %11
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
TEST_P(BuiltinIntTest, Call_UInt_Scalar) {
- auto param = GetParam();
- auto* var = Global("v", ty.u32(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.u32(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 0
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -183,29 +183,29 @@ TEST_P(BuiltinIntTest, Call_UInt_Scalar) {
%5 = OpTypeFunction %6
)");
- auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
%9 = ${op} %3 %10
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
TEST_P(BuiltinIntTest, Call_UInt_Vector) {
- auto param = GetParam();
- auto* var = Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* var = Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -214,33 +214,32 @@ TEST_P(BuiltinIntTest, Call_UInt_Vector) {
%6 = OpTypeFunction %7
)");
- auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
%10 = ${op} %3 %11
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- BuiltinIntTest,
- testing::Values(BuiltinData{"countOneBits", "OpBitCount"},
- BuiltinData{"reverseBits", "OpBitReverse"}));
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ BuiltinIntTest,
+ testing::Values(BuiltinData{"countOneBits", "OpBitCount"},
+ BuiltinData{"reverseBits", "OpBitReverse"}));
TEST_F(BuiltinBuilderTest, Call_Dot_F32) {
- auto* var = Global("v", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "v", "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("v", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("dot", "v", "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -248,8 +247,8 @@ TEST_F(BuiltinBuilderTest, Call_Dot_F32) {
%7 = OpTypeVoid
%6 = OpTypeFunction %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%11 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%11 = OpLoad %3 %1
%12 = OpLoad %3 %1
%10 = OpDot %4 %11 %12
OpReturn
@@ -257,19 +256,19 @@ OpReturn
}
TEST_F(BuiltinBuilderTest, Call_Dot_U32) {
- auto* var = Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "v", "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("v", ty.vec3<u32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("dot", "v", "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -277,8 +276,8 @@ TEST_F(BuiltinBuilderTest, Call_Dot_U32) {
%7 = OpTypeVoid
%6 = OpTypeFunction %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%11 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%11 = OpLoad %3 %1
%12 = OpLoad %3 %1
%13 = OpCompositeExtract %4 %11 0
%14 = OpCompositeExtract %4 %12 0
@@ -296,19 +295,19 @@ OpReturn
}
TEST_F(BuiltinBuilderTest, Call_Dot_I32) {
- auto* var = Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("dot", "v", "v");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("v", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("dot", "v", "v");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -316,8 +315,8 @@ TEST_F(BuiltinBuilderTest, Call_Dot_I32) {
%7 = OpTypeVoid
%6 = OpTypeFunction %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%11 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%11 = OpLoad %3 %1
%12 = OpLoad %3 %1
%13 = OpCompositeExtract %4 %11 0
%14 = OpCompositeExtract %4 %12 0
@@ -336,18 +335,18 @@ OpReturn
using BuiltinDeriveTest = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(BuiltinDeriveTest, Call_Derivative_Scalar) {
- auto param = GetParam();
- auto* var = Global("v", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("func", {}, ty.void_(), {CallStmt(expr)},
- {Stage(ast::PipelineStage::kFragment)});
+ auto param = GetParam();
+ auto* var = Global("v", ty.f32(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func =
+ Func("func", {}, ty.void_(), {CallStmt(expr)}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -355,33 +354,33 @@ TEST_P(BuiltinDeriveTest, Call_Derivative_Scalar) {
%5 = OpTypeFunction %6
)");
- auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%10 = OpLoad %3 %1
%9 = ${op} %3 %10
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
}
TEST_P(BuiltinDeriveTest, Call_Derivative_Vector) {
- auto param = GetParam();
- auto* var = Global("v", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call(param.name, "v");
- auto* func = Func("func", {}, ty.void_(), {CallStmt(expr)},
- {Stage(ast::PipelineStage::kFragment)});
+ auto param = GetParam();
+ auto* var = Global("v", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call(param.name, "v");
+ auto* func =
+ Func("func", {}, ty.void_(), {CallStmt(expr)}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- if (param.name != "dpdx" && param.name != "dpdy" && param.name != "fwidth") {
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability DerivativeControl
+ if (param.name != "dpdx" && param.name != "dpdy" && param.name != "fwidth") {
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability DerivativeControl
)");
- }
+ }
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -390,44 +389,42 @@ TEST_P(BuiltinDeriveTest, Call_Derivative_Vector) {
%6 = OpTypeFunction %7
)");
- auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
+ auto expected = utils::ReplaceAll(R"(%11 = OpLoad %3 %1
%10 = ${op} %3 %11
OpReturn
)",
- "${op}", param.op);
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
-}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- BuiltinDeriveTest,
- testing::Values(BuiltinData{"dpdx", "OpDPdx"},
- BuiltinData{"dpdxFine", "OpDPdxFine"},
- BuiltinData{"dpdxCoarse", "OpDPdxCoarse"},
- BuiltinData{"dpdy", "OpDPdy"},
- BuiltinData{"dpdyFine", "OpDPdyFine"},
- BuiltinData{"dpdyCoarse", "OpDPdyCoarse"},
- BuiltinData{"fwidth", "OpFwidth"},
- BuiltinData{"fwidthFine", "OpFwidthFine"},
- BuiltinData{"fwidthCoarse", "OpFwidthCoarse"}));
+ "${op}", param.op);
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), expected);
+}
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ BuiltinDeriveTest,
+ testing::Values(BuiltinData{"dpdx", "OpDPdx"},
+ BuiltinData{"dpdxFine", "OpDPdxFine"},
+ BuiltinData{"dpdxCoarse", "OpDPdxCoarse"},
+ BuiltinData{"dpdy", "OpDPdy"},
+ BuiltinData{"dpdyFine", "OpDPdyFine"},
+ BuiltinData{"dpdyCoarse", "OpDPdyCoarse"},
+ BuiltinData{"fwidth", "OpFwidth"},
+ BuiltinData{"fwidthFine", "OpFwidthFine"},
+ BuiltinData{"fwidthCoarse", "OpFwidthCoarse"}));
TEST_F(BuiltinBuilderTest, Call_Select) {
- auto* v3 = Global("v3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* v3 = Global("v3", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* bool_v3 =
- Global("bool_v3", ty.vec3<bool>(), ast::StorageClass::kPrivate);
- auto* expr = Call("select", "v3", "v3", "bool_v3");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* bool_v3 = Global("bool_v3", ty.vec3<bool>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("select", "v3", "v3", "bool_v3");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(v3)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(bool_v3)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(v3)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(bool_v3)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -440,8 +437,8 @@ TEST_F(BuiltinBuilderTest, Call_Select) {
%12 = OpTypeVoid
%11 = OpTypeFunction %12
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%16 = OpLoad %8 %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%16 = OpLoad %8 %6
%17 = OpLoad %3 %1
%18 = OpLoad %3 %1
%15 = OpSelect %3 %16 %17 %18
@@ -451,40 +448,38 @@ OpReturn
// This tests that we do not push OpTypeSampledImage and float_0 type twice.
TEST_F(BuiltinBuilderTest, Call_TextureSampleCompare_Twice) {
- auto* s = ty.sampler(ast::SamplerKind::kComparisonSampler);
- auto* t = ty.depth_texture(ast::TextureDimension::k2d);
-
- auto* tex = Global("texture", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- auto* sampler = Global("sampler", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
+ auto* s = ty.sampler(ast::SamplerKind::kComparisonSampler);
+ auto* t = ty.depth_texture(ast::TextureDimension::k2d);
+
+ auto* tex = Global("texture", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ auto* sampler = Global("sampler", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
- auto* expr1 = Call("textureSampleCompare", "texture", "sampler",
- vec2<f32>(1.0f, 2.0f), 2.0f);
- auto* expr2 = Call("textureSampleCompare", "texture", "sampler",
- vec2<f32>(1.0f, 2.0f), 2.0f);
+ auto* expr1 = Call("textureSampleCompare", "texture", "sampler", vec2<f32>(1_f, 2_f), 2_f);
+ auto* expr2 = Call("textureSampleCompare", "texture", "sampler", vec2<f32>(1_f, 2_f), 2_f);
- Func("f1", {}, ty.void_(), {CallStmt(expr1)}, {});
- Func("f2", {}, ty.void_(), {CallStmt(expr2)}, {});
+ Func("f1", {}, ty.void_(), {CallStmt(expr1)}, {});
+ Func("f2", {}, ty.void_(), {CallStmt(expr2)}, {});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(tex)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(tex)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
- EXPECT_EQ(b.GenerateExpression(expr1), 8u) << b.error();
- EXPECT_EQ(b.GenerateExpression(expr2), 17u) << b.error();
+ EXPECT_EQ(b.GenerateExpression(expr1), 8u) << b.error();
+ EXPECT_EQ(b.GenerateExpression(expr2), 17u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
%1 = OpVariable %2 UniformConstant
@@ -498,8 +493,8 @@ TEST_F(BuiltinBuilderTest, Call_TextureSampleCompare_Twice) {
%16 = OpConstantComposite %13 %14 %15
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %7 %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefImplicitLod %4 %12 %16 %15
@@ -511,19 +506,21 @@ TEST_F(BuiltinBuilderTest, Call_TextureSampleCompare_Twice) {
}
TEST_F(BuiltinBuilderTest, Call_GLSLMethod_WithLoad) {
- auto* var = Global("ident", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = Call("round", "ident");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("ident", ty.f32(), ast::StorageClass::kPrivate);
+ auto* expr = Call("round", "ident");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%10 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect =
+ R"(%10 = OpExtInstImport "GLSL.std.450"
OpName %1 "ident"
OpName %7 "a_func"
%3 = OpTypeFloat 32
@@ -538,24 +535,25 @@ OpName %7 "a_func"
%9 = OpExtInst %3 %10 RoundEven %11
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
-using Builtin_Builtin_SingleParam_Float_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_SingleParam_Float_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_SingleParam_Float_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1.0f);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_f);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -563,26 +561,29 @@ OpName %3 "a_func"
%8 = OpConstant %6 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_SingleParam_Float_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -592,11 +593,13 @@ OpName %3 "a_func"
%10 = OpConstantComposite %6 %9 %9
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_SingleParam_Float_Test,
@@ -612,8 +615,7 @@ INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
BuiltinData{"exp2", "Exp2"},
BuiltinData{"floor", "Floor"},
BuiltinData{"fract", "Fract"},
- BuiltinData{"inverseSqrt",
- "InverseSqrt"},
+ BuiltinData{"inverseSqrt", "InverseSqrt"},
BuiltinData{"log", "Log"},
BuiltinData{"log2", "Log2"},
BuiltinData{"radians", "Radians"},
@@ -627,17 +629,18 @@ INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
BuiltinData{"trunc", "Trunc"}));
TEST_F(BuiltinBuilderTest, Call_Length_Scalar) {
- auto* expr = Call("length", 1.0f);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("length", 1_f);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -648,21 +651,23 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %7 Length %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Length_Vector) {
- auto* expr = Call("length", vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("length", vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -675,21 +680,23 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %7 Length %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Normalize) {
- auto* expr = Call("normalize", vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("normalize", vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -702,65 +709,89 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %8 Normalize %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
-using Builtin_Builtin_DualParam_Float_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_DualParam_Float_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_DualParam_Float_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1.0f, 1.0f);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* scalar = Var("scalar", nullptr, Expr(1_f));
+ auto* expr = Call(param.name, scalar, scalar);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(scalar),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%11 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %7 "scalar"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%6 = OpTypeFloat 32
-%8 = OpConstant %6 1
+%5 = OpTypeFloat 32
+%6 = OpConstant %5 1
+%8 = OpTypePointer Function %5
+%9 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8
+%7 = OpVariable %8 Function %9
+OpStore %7 %6
+%12 = OpLoad %5 %7
+%13 = OpLoad %5 %7
+%10 = OpExtInst %5 %11 )" +
+ param.op +
+ R"( %12 %13
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_DualParam_Float_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* vec = Var("vec", nullptr, vec2<f32>(1_f, 1_f));
+ auto* expr = Call(param.name, vec, vec);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(vec),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%13 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %9 "vec"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%7 = OpTypeFloat 32
-%6 = OpTypeVector %7 2
-%9 = OpConstant %7 1
-%10 = OpConstantComposite %6 %9 %9
+%6 = OpTypeFloat 32
+%5 = OpTypeVector %6 2
+%7 = OpConstant %6 1
+%8 = OpConstantComposite %5 %7 %7
+%10 = OpTypePointer Function %5
+%11 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10
+%9 = OpVariable %10 Function %11
+OpStore %9 %8
+%14 = OpLoad %5 %9
+%15 = OpLoad %5 %9
+%12 = OpExtInst %5 %13 )" +
+ param.op +
+ R"( %14 %15
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_DualParam_Float_Test,
@@ -771,17 +802,18 @@ INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
BuiltinData{"step", "Step"}));
TEST_F(BuiltinBuilderTest, Call_Reflect_Vector) {
- auto* expr = Call("reflect", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("reflect", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -794,21 +826,23 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %8 Reflect %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Distance_Scalar) {
- auto* expr = Call("distance", 1.0f, 1.0f);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("distance", 1_f, 1_f);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -819,21 +853,23 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %7 Distance %8 %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Distance_Vector) {
- auto* expr = Call("distance", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("distance", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -846,22 +882,23 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %7 Distance %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Cross) {
- auto* expr =
- Call("cross", vec3<f32>(1.0f, 1.0f, 1.0f), vec3<f32>(1.0f, 1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("cross", vec3<f32>(1_f, 1_f, 1_f), vec3<f32>(1_f, 1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -874,24 +911,25 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %8 Cross %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
-using Builtin_Builtin_ThreeParam_Float_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_ThreeParam_Float_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_ThreeParam_Float_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1.0f, 1.0f, 1.0f);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_f, 1_f, 1_f);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -899,27 +937,29 @@ OpName %3 "a_func"
%8 = OpConstant %6 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8 %8
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %8 %8 %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_ThreeParam_Float_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f),
- vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -929,11 +969,13 @@ OpName %3 "a_func"
%10 = OpConstantComposite %6 %9 %9
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10 %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10 %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_ThreeParam_Float_Test,
@@ -941,22 +983,21 @@ INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
BuiltinData{"fma", "Fma"},
BuiltinData{"mix", "FMix"},
- BuiltinData{"smoothStep",
- "SmoothStep"}));
+ BuiltinData{"smoothstep", "SmoothStep"}));
TEST_F(BuiltinBuilderTest, Call_FaceForward_Vector) {
- auto* expr = Call("faceForward", vec2<f32>(1.0f, 1.0f), vec2<f32>(1.0f, 1.0f),
- vec2<f32>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("faceForward", vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f), vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -969,24 +1010,25 @@ OpName %3 "a_func"
%5 = OpExtInst %6 %8 FaceForward %10 %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
-using Builtin_Builtin_SingleParam_Sint_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_SingleParam_Sint_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_SingleParam_Sint_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_i);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -994,26 +1036,29 @@ OpName %3 "a_func"
%8 = OpConstant %6 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_SingleParam_Sint_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<i32>(1, 1));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, vec2<i32>(1_i, 1_i));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -1023,11 +1068,13 @@ OpName %3 "a_func"
%10 = OpConstantComposite %6 %9 %9
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_SingleParam_Sint_Test,
@@ -1036,17 +1083,18 @@ INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
// Calling abs() on an unsigned integer scalar / vector is a no-op.
using Builtin_Builtin_Abs_Uint_Test = BuiltinBuilderTest;
TEST_F(Builtin_Builtin_Abs_Uint_Test, Call_Scalar) {
- auto* expr = Call("abs", 1u);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("abs", Expr(1_u));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%6 = OpTypeInt 32 0
@@ -1055,21 +1103,23 @@ TEST_F(Builtin_Builtin_Abs_Uint_Test, Call_Scalar) {
%4 = OpLabel
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(Builtin_Builtin_Abs_Uint_Test, Call_Vector) {
- auto* expr = Call("abs", vec2<u32>(1u, 1u));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* expr = Call("abs", vec2<u32>(1_u, 1_u));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%7 = OpTypeInt 32 0
@@ -1080,148 +1130,193 @@ TEST_F(Builtin_Builtin_Abs_Uint_Test, Call_Vector) {
%4 = OpLabel
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
-using Builtin_Builtin_DualParam_SInt_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_DualParam_SInt_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_DualParam_SInt_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1, 1);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* scalar = Var("scalar", nullptr, Expr(1_i));
+ auto* expr = Call(param.name, scalar, scalar);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(scalar),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%11 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %7 "scalar"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%6 = OpTypeInt 32 1
-%8 = OpConstant %6 1
+%5 = OpTypeInt 32 1
+%6 = OpConstant %5 1
+%8 = OpTypePointer Function %5
+%9 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8
+%7 = OpVariable %8 Function %9
+OpStore %7 %6
+%12 = OpLoad %5 %7
+%13 = OpLoad %5 %7
+%10 = OpExtInst %5 %11 )" +
+ param.op +
+ R"( %12 %13
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_DualParam_SInt_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<i32>(1, 1), vec2<i32>(1, 1));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* vec = Var("vec", nullptr, vec2<i32>(1_i, 1_i));
+ auto* expr = Call(param.name, vec, vec);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(vec),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%13 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %9 "vec"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%7 = OpTypeInt 32 1
-%6 = OpTypeVector %7 2
-%9 = OpConstant %7 1
-%10 = OpConstantComposite %6 %9 %9
+%6 = OpTypeInt 32 1
+%5 = OpTypeVector %6 2
+%7 = OpConstant %6 1
+%8 = OpConstantComposite %5 %7 %7
+%10 = OpTypePointer Function %5
+%11 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10
+%9 = OpVariable %10 Function %11
+OpStore %9 %8
+%14 = OpLoad %5 %9
+%15 = OpLoad %5 %9
+%12 = OpExtInst %5 %13 )" +
+ param.op +
+ R"( %14 %15
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_DualParam_SInt_Test,
- testing::Values(BuiltinData{"max", "SMax"},
- BuiltinData{"min", "SMin"}));
+ testing::Values(BuiltinData{"max", "SMax"}, BuiltinData{"min", "SMin"}));
-using Builtin_Builtin_DualParam_UInt_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_DualParam_UInt_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_DualParam_UInt_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1u, 1u);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* scalar = Var("scalar", nullptr, Expr(1_u));
+ auto* expr = Call(param.name, scalar, scalar);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(scalar),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%11 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %7 "scalar"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%6 = OpTypeInt 32 0
-%8 = OpConstant %6 1
+%5 = OpTypeInt 32 0
+%6 = OpConstant %5 1
+%8 = OpTypePointer Function %5
+%9 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8
+%7 = OpVariable %8 Function %9
+OpStore %7 %6
+%12 = OpLoad %5 %7
+%13 = OpLoad %5 %7
+%10 = OpExtInst %5 %11 )" +
+ param.op +
+ R"( %12 %13
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_DualParam_UInt_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr = Call(param.name, vec2<u32>(1u, 1u), vec2<u32>(1u, 1u));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* vec = Var("vec", nullptr, vec2<u32>(1_u, 1_u));
+ auto* expr = Call(param.name, vec, vec);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Decl(vec),
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%13 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
+OpName %9 "vec"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
-%7 = OpTypeInt 32 0
-%6 = OpTypeVector %7 2
-%9 = OpConstant %7 1
-%10 = OpConstantComposite %6 %9 %9
+%6 = OpTypeInt 32 0
+%5 = OpTypeVector %6 2
+%7 = OpConstant %6 1
+%8 = OpConstantComposite %5 %7 %7
+%10 = OpTypePointer Function %5
+%11 = OpConstantNull %5
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10
+%9 = OpVariable %10 Function %11
+OpStore %9 %8
+%14 = OpLoad %5 %9
+%15 = OpLoad %5 %9
+%12 = OpExtInst %5 %13 )" +
+ param.op +
+ R"( %14 %15
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_DualParam_UInt_Test,
- testing::Values(BuiltinData{"max", "UMax"},
- BuiltinData{"min", "UMin"}));
+ testing::Values(BuiltinData{"max", "UMax"}, BuiltinData{"min", "UMin"}));
-using Builtin_Builtin_ThreeParam_Sint_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_ThreeParam_Sint_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_ThreeParam_Sint_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1, 1, 1);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_i, 1_i, 1_i);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -1229,27 +1324,29 @@ OpName %3 "a_func"
%8 = OpConstant %6 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8 %8
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %8 %8 %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_ThreeParam_Sint_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr =
- Call(param.name, vec2<i32>(1, 1), vec2<i32>(1, 1), vec2<i32>(1, 1));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, vec2<i32>(1_i, 1_i), vec2<i32>(1_i, 1_i), vec2<i32>(1_i, 1_i));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -1259,31 +1356,33 @@ OpName %3 "a_func"
%10 = OpConstantComposite %6 %9 %9
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10 %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10 %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_ThreeParam_Sint_Test,
testing::Values(BuiltinData{"clamp", "SClamp"}));
-using Builtin_Builtin_ThreeParam_Uint_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_ThreeParam_Uint_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_ThreeParam_Uint_Test, Call_Scalar) {
- auto param = GetParam();
- auto* expr = Call(param.name, 1u, 1u, 1u);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, 1_u, 1_u, 1_u);
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -1291,27 +1390,29 @@ OpName %3 "a_func"
%8 = OpConstant %6 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %8 %8 %8
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %8 %8 %8
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_P(Builtin_Builtin_ThreeParam_Uint_Test, Call_Vector) {
- auto param = GetParam();
- auto* expr =
- Call(param.name, vec2<u32>(1u, 1u), vec2<u32>(1u, 1u), vec2<u32>(1u, 1u));
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto param = GetParam();
+ auto* expr = Call(param.name, vec2<u32>(1_u, 1_u), vec2<u32>(1_u, 1_u), vec2<u32>(1_u, 1_u));
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -1321,26 +1422,27 @@ OpName %3 "a_func"
%10 = OpConstantComposite %6 %9 %9
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10 %10 %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10 %10 %10
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
Builtin_Builtin_ThreeParam_Uint_Test,
testing::Values(BuiltinData{"clamp", "UClamp"}));
TEST_F(BuiltinBuilderTest, Call_Modf) {
- auto* expr = Call("modf", vec2<f32>(1.0f, 2.0f));
- Func("a_func", {}, ty.void_(), {CallStmt(expr)},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* expr = Call("modf", vec2<f32>(1_f, 2_f));
+ Func("a_func", {}, ty.void_(), {CallStmt(expr)}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
- auto got = DumpBuilder(b);
- auto* expect = R"(OpCapability Shader
+ ASSERT_TRUE(b.Build()) << b.error();
+ auto got = DumpBuilder(b);
+ auto* expect = R"(OpCapability Shader
%9 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %3 "a_func"
@@ -1365,21 +1467,20 @@ OpMemberDecorate %6 1 Offset 8
OpReturn
OpFunctionEnd
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_Frexp) {
- auto* expr = Call("frexp", vec2<f32>(1.0f, 2.0f));
- Func("a_func", {}, ty.void_(), {CallStmt(expr)},
- {Stage(ast::PipelineStage::kFragment)});
+ auto* expr = Call("frexp", vec2<f32>(1_f, 2_f));
+ Func("a_func", {}, ty.void_(), {CallStmt(expr)}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
- auto got = DumpBuilder(b);
- auto* expect = R"(OpCapability Shader
+ ASSERT_TRUE(b.Build()) << b.error();
+ auto got = DumpBuilder(b);
+ auto* expect = R"(OpCapability Shader
%11 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %3 "a_func"
@@ -1406,25 +1507,26 @@ OpMemberDecorate %6 1 Offset 8
OpReturn
OpFunctionEnd
)";
- EXPECT_EQ(expect, got);
+ EXPECT_EQ(expect, got);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_Determinant) {
- auto* var = Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("determinant", "var");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("var", ty.mat3x3<f32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("determinant", "var");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(%12 = OpExtInstImport "GLSL.std.450"
+ auto got = DumpBuilder(b);
+ auto expect = R"(%12 = OpExtInstImport "GLSL.std.450"
OpName %1 "var"
OpName %9 "a_func"
%5 = OpTypeFloat 32
@@ -1441,23 +1543,25 @@ OpName %9 "a_func"
%11 = OpExtInst %5 %12 Determinant %13
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_Transpose) {
- auto* var = Global("var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
- auto* expr = Call("transpose", "var");
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Assign(Phony(), expr),
- });
+ auto* var = Global("var", ty.mat2x3<f32>(), ast::StorageClass::kPrivate);
+ auto* expr = Call("transpose", "var");
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Assign(Phony(), expr),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "var"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpName %1 "var"
OpName %9 "a_func"
%5 = OpTypeFloat 32
%4 = OpTypeVector %5 3
@@ -1475,33 +1579,34 @@ OpName %9 "a_func"
%11 = OpTranspose %12 %14
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_ArrayLength) {
- auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+ auto* expr = Call("arrayLength", AddressOf(MemberAccessor("b", "a")));
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ CallStmt(expr),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* expr = Call("arrayLength", AddressOf(MemberAccessor("b", "a")));
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- CallStmt(expr),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ spirv::Builder& b = SanitizeAndBuild();
- spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build()) << b.error();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_EQ(b.functions().size(), 1_u);
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%5 = OpTypeFloat 32
+ auto* expected_types = R"(%5 = OpTypeFloat 32
%4 = OpTypeRuntimeArray %5
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
@@ -1510,45 +1615,45 @@ TEST_F(BuiltinBuilderTest, Call_ArrayLength) {
%6 = OpTypeFunction %7
%11 = OpTypeInt 32 0
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
+ auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_ArrayLength_OtherMembersInStruct) {
- auto* s = Structure("my_struct", {
- Member("z", ty.f32()),
- Member(4, "a", ty.array<f32>(4)),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {
+ Member("z", ty.f32()),
+ Member(4, "a", ty.array<f32>(4)),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+ auto* expr = Call("arrayLength", AddressOf(MemberAccessor("b", "a")));
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ CallStmt(expr),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* expr = Call("arrayLength", AddressOf(MemberAccessor("b", "a")));
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- CallStmt(expr),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- ASSERT_EQ(b.functions().size(), 1u);
+ ASSERT_EQ(b.functions().size(), 1_u);
- auto* expected_types = R"(%4 = OpTypeFloat 32
+ auto* expected_types = R"(%4 = OpTypeFloat 32
%5 = OpTypeRuntimeArray %4
%3 = OpTypeStruct %4 %5
%2 = OpTypePointer StorageBuffer %3
@@ -1557,47 +1662,47 @@ TEST_F(BuiltinBuilderTest, Call_ArrayLength_OtherMembersInStruct) {
%6 = OpTypeFunction %7
%11 = OpTypeInt 32 0
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 1
+ auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 1
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_ArrayLength_ViaLets) {
- auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ auto* p = Let("p", nullptr, AddressOf("b"));
+ auto* p2 = Let("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
+ auto* expr = Call("arrayLength", p2);
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(p),
+ Decl(p2),
+ CallStmt(expr),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* p = Const("p", nullptr, AddressOf("b"));
- auto* p2 = Const("p2", nullptr, AddressOf(MemberAccessor(Deref(p), "a")));
- auto* expr = Call("arrayLength", p2);
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(p),
- Decl(p2),
- CallStmt(expr),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- ASSERT_EQ(b.functions().size(), 1u);
+ ASSERT_EQ(b.functions().size(), 1_u);
- auto* expected_types = R"(%5 = OpTypeFloat 32
+ auto* expected_types = R"(%5 = OpTypeFloat 32
%4 = OpTypeRuntimeArray %5
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
@@ -1606,60 +1711,60 @@ TEST_F(BuiltinBuilderTest, Call_ArrayLength_ViaLets) {
%6 = OpTypeFunction %7
%11 = OpTypeInt 32 0
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
+ auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_ArrayLength_ViaLets_WithPtrNoise) {
- // struct my_struct {
- // a : array<f32>;
- // };
- // @binding(1) @group(2) var<storage, read> b : my_struct;
- //
- // fn a_func() {
- // let p = &*&b;
- // let p2 = &*p;
- // let p3 = &((*p).a);
- // arrayLength(&*p3);
- // }
- auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ // struct my_struct {
+ // a : array<f32>;
+ // };
+ // @binding(1) @group(2) var<storage, read> b : my_struct;
+ //
+ // fn a_func() {
+ // let p = &*&b;
+ // let p2 = &*p;
+ // let p3 = &((*p).a);
+ // arrayLength(&*p3);
+ // }
+ auto* s = Structure("my_struct", {Member("a", ty.array<f32>(4))});
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ auto* p = Let("p", nullptr, AddressOf(Deref(AddressOf("b"))));
+ auto* p2 = Let("p2", nullptr, AddressOf(Deref(p)));
+ auto* p3 = Let("p3", nullptr, AddressOf(MemberAccessor(Deref(p2), "a")));
+ auto* expr = Call("arrayLength", AddressOf(Deref(p3)));
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(p),
+ Decl(p2),
+ Decl(p3),
+ CallStmt(expr),
+ },
ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
+ Stage(ast::PipelineStage::kFragment),
});
- auto* p = Const("p", nullptr, AddressOf(Deref(AddressOf("b"))));
- auto* p2 = Const("p2", nullptr, AddressOf(Deref(p)));
- auto* p3 = Const("p3", nullptr, AddressOf(MemberAccessor(Deref(p2), "a")));
- auto* expr = Call("arrayLength", AddressOf(Deref(p3)));
+ spirv::Builder& b = SanitizeAndBuild();
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(p),
- Decl(p2),
- Decl(p3),
- CallStmt(expr),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ ASSERT_TRUE(b.Build()) << b.error();
- spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_EQ(b.functions().size(), 1_u);
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%5 = OpTypeFloat 32
+ auto* expected_types = R"(%5 = OpTypeFloat 32
%4 = OpTypeRuntimeArray %5
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
@@ -1668,56 +1773,54 @@ TEST_F(BuiltinBuilderTest, Call_ArrayLength_ViaLets_WithPtrNoise) {
%6 = OpTypeFunction %7
%11 = OpTypeInt 32 0
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
+ auto* expected_instructions = R"(%10 = OpArrayLength %11 %1 0
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_AtomicLoad) {
- // struct S {
- // u : atomic<u32>;
- // i : atomic<i32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // let u : u32 = atomicLoad(&b.u);
- // let i : i32 = atomicLoad(&b.i);
- // }
- auto* s = Structure("S", {
- Member("u", ty.atomic<u32>()),
- Member("i", ty.atomic<i32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Const("u", ty.u32(),
- Call("atomicLoad", AddressOf(MemberAccessor("b", "u"))))),
- Decl(Const("i", ty.i32(),
- Call("atomicLoad", AddressOf(MemberAccessor("b", "i"))))),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%4 = OpTypeInt 32 0
+ // struct S {
+ // u : atomic<u32>;
+ // i : atomic<i32>;
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // let u : u32 = atomicLoad(&b.u);
+ // let i : i32 = atomicLoad(&b.i);
+ // }
+ auto* s = Structure("S", {
+ Member("u", ty.atomic<u32>()),
+ Member("i", ty.atomic<i32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Let("u", ty.u32(), Call("atomicLoad", AddressOf(MemberAccessor("b", "u"))))),
+ Decl(Let("i", ty.i32(), Call("atomicLoad", AddressOf(MemberAccessor("b", "i"))))),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ auto* expected_types = R"(%4 = OpTypeInt 32 0
%5 = OpTypeInt 32 1
%3 = OpTypeStruct %4 %5
%2 = OpTypePointer StorageBuffer %3
@@ -1729,63 +1832,61 @@ TEST_F(BuiltinBuilderTest, Call_AtomicLoad) {
%14 = OpTypePointer StorageBuffer %4
%18 = OpTypePointer StorageBuffer %5
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(%15 = OpAccessChain %14 %1 %12
+ auto* expected_instructions = R"(%15 = OpAccessChain %14 %1 %12
%10 = OpAtomicLoad %4 %15 %11 %12
%19 = OpAccessChain %18 %1 %11
%16 = OpAtomicLoad %5 %19 %11 %12
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_AtomicStore) {
- // struct S {
- // u : atomic<u32>;
- // i : atomic<i32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // var u = 1u;
- // var i = 2;
- // atomicStore(&b.u, u);
- // atomicStore(&b.i, i);
- // }
- auto* s = Structure("S", {
- Member("u", ty.atomic<u32>()),
- Member("i", ty.atomic<i32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Var("u", nullptr, Expr(1u))),
- Decl(Var("i", nullptr, Expr(2))),
- CallStmt(
- Call("atomicStore", AddressOf(MemberAccessor("b", "u")), "u")),
- CallStmt(
- Call("atomicStore", AddressOf(MemberAccessor("b", "i")), "i")),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%4 = OpTypeInt 32 0
+ // struct S {
+ // u : atomic<u32>;
+ // i : atomic<i32>;
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // var u = 1_u;
+ // var i = 2;
+ // atomicStore(&b.u, u);
+ // atomicStore(&b.i, i);
+ // }
+ auto* s = Structure("S", {
+ Member("u", ty.atomic<u32>()),
+ Member("i", ty.atomic<i32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("u", nullptr, Expr(1_u))),
+ Decl(Var("i", nullptr, Expr(2_i))),
+ CallStmt(Call("atomicStore", AddressOf(MemberAccessor("b", "u")), "u")),
+ CallStmt(Call("atomicStore", AddressOf(MemberAccessor("b", "i")), "i")),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ auto* expected_types = R"(%4 = OpTypeInt 32 0
%5 = OpTypeInt 32 1
%3 = OpTypeStruct %4 %5
%2 = OpTypePointer StorageBuffer %3
@@ -1802,10 +1903,10 @@ TEST_F(BuiltinBuilderTest, Call_AtomicStore) {
%21 = OpTypePointer StorageBuffer %4
%26 = OpTypePointer StorageBuffer %5
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(OpStore %11 %10
+ auto* expected_instructions = R"(OpStore %11 %10
OpStore %15 %14
%22 = OpAccessChain %21 %1 %19
%23 = OpLoad %4 %11
@@ -1815,49 +1916,48 @@ OpAtomicStore %22 %10 %19 %23
OpAtomicStore %27 %10 %19 %28
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
using Builtin_Builtin_AtomicRMW_i32 = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_AtomicRMW_i32, Test) {
- // struct S {
- // v : atomic<i32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // var v = 10;
- // let x : i32 = atomicOP(&b.v, v);
- // }
- auto* s = Structure("S", {
- Member("v", ty.atomic<i32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Var("v", nullptr, Expr(10))),
- Decl(Const("x", ty.i32(),
- Call(GetParam().name, AddressOf(MemberAccessor("b", "v")),
- "v"))),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- std::string expected_types = R"(%4 = OpTypeInt 32 1
+ // struct S {
+ // v : atomic<i32>;
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // var v = 10;
+ // let x : i32 = atomicOP(&b.v, v);
+ // }
+ auto* s = Structure("S", {
+ Member("v", ty.atomic<i32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("v", nullptr, Expr(10_i))),
+ Decl(Let("x", ty.i32(),
+ Call(GetParam().name, AddressOf(MemberAccessor("b", "v")), "v"))),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ std::string expected_types = R"(%4 = OpTypeInt 32 1
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -1871,68 +1971,66 @@ TEST_P(Builtin_Builtin_AtomicRMW_i32, Test) {
%16 = OpConstant %14 0
%18 = OpTypePointer StorageBuffer %4
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- std::string expected_instructions = R"(OpStore %10 %9
+ std::string expected_instructions = R"(OpStore %10 %9
%19 = OpAccessChain %18 %1 %16
%20 = OpLoad %4 %10
)";
- expected_instructions += "%13 = " + GetParam().op + " %4 %19 %15 %16 %20\n";
- expected_instructions += "OpReturn\n";
+ expected_instructions += "%13 = " + GetParam().op + " %4 %19 %15 %16 %20\n";
+ expected_instructions += "OpReturn\n";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- Builtin_Builtin_AtomicRMW_i32,
- testing::Values(BuiltinData{"atomicAdd", "OpAtomicIAdd"},
- BuiltinData{"atomicMax", "OpAtomicSMax"},
- BuiltinData{"atomicMin", "OpAtomicSMin"},
- BuiltinData{"atomicAnd", "OpAtomicAnd"},
- BuiltinData{"atomicOr", "OpAtomicOr"},
- BuiltinData{"atomicXor", "OpAtomicXor"}));
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ Builtin_Builtin_AtomicRMW_i32,
+ testing::Values(BuiltinData{"atomicAdd", "OpAtomicIAdd"},
+ BuiltinData{"atomicMax", "OpAtomicSMax"},
+ BuiltinData{"atomicMin", "OpAtomicSMin"},
+ BuiltinData{"atomicAnd", "OpAtomicAnd"},
+ BuiltinData{"atomicOr", "OpAtomicOr"},
+ BuiltinData{"atomicXor", "OpAtomicXor"}));
using Builtin_Builtin_AtomicRMW_u32 = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_AtomicRMW_u32, Test) {
- // struct S {
- // v : atomic<u32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // var v = 10u;
- // let x : u32 = atomicOP(&b.v, v);
- // }
- auto* s = Structure("S", {
- Member("v", ty.atomic<u32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Var("v", nullptr, Expr(10u))),
- Decl(Const("x", ty.u32(),
- Call(GetParam().name, AddressOf(MemberAccessor("b", "v")),
- "v"))),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- std::string expected_types = R"(%4 = OpTypeInt 32 0
+ // struct S {
+ // v : atomic<u32>;
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // var v = 10u;
+ // let x : u32 = atomicOP(&b.v, v);
+ // }
+ auto* s = Structure("S", {
+ Member("v", ty.atomic<u32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("v", nullptr, Expr(10_u))),
+ Decl(Let("x", ty.u32(),
+ Call(GetParam().name, AddressOf(MemberAccessor("b", "v")), "v"))),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ std::string expected_types = R"(%4 = OpTypeInt 32 0
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -1945,75 +2043,72 @@ TEST_P(Builtin_Builtin_AtomicRMW_u32, Test) {
%15 = OpConstant %4 0
%17 = OpTypePointer StorageBuffer %4
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- std::string expected_instructions = R"(OpStore %10 %9
+ std::string expected_instructions = R"(OpStore %10 %9
%18 = OpAccessChain %17 %1 %15
%19 = OpLoad %4 %10
)";
- expected_instructions += "%13 = " + GetParam().op + " %4 %18 %14 %15 %19\n";
- expected_instructions += "OpReturn\n";
+ expected_instructions += "%13 = " + GetParam().op + " %4 %18 %14 %15 %19\n";
+ expected_instructions += "OpReturn\n";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- Builtin_Builtin_AtomicRMW_u32,
- testing::Values(BuiltinData{"atomicAdd", "OpAtomicIAdd"},
- BuiltinData{"atomicMax", "OpAtomicUMax"},
- BuiltinData{"atomicMin", "OpAtomicUMin"},
- BuiltinData{"atomicAnd", "OpAtomicAnd"},
- BuiltinData{"atomicOr", "OpAtomicOr"},
- BuiltinData{"atomicXor", "OpAtomicXor"}));
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ Builtin_Builtin_AtomicRMW_u32,
+ testing::Values(BuiltinData{"atomicAdd", "OpAtomicIAdd"},
+ BuiltinData{"atomicMax", "OpAtomicUMax"},
+ BuiltinData{"atomicMin", "OpAtomicUMin"},
+ BuiltinData{"atomicAnd", "OpAtomicAnd"},
+ BuiltinData{"atomicOr", "OpAtomicOr"},
+ BuiltinData{"atomicXor", "OpAtomicXor"}));
TEST_F(BuiltinBuilderTest, Call_AtomicExchange) {
- // struct S {
- // u : atomic<u32>;
- // i : atomic<i32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // var u = 10u;
- // var i = 10;
- // let r : u32 = atomicExchange(&b.u, u);
- // let s : i32 = atomicExchange(&b.i, i);
- // }
- auto* s = Structure("S", {
- Member("u", ty.atomic<u32>()),
- Member("i", ty.atomic<i32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Var("u", nullptr, Expr(10u))),
- Decl(Var("i", nullptr, Expr(10))),
- Decl(Const("r", ty.u32(),
- Call("atomicExchange",
- AddressOf(MemberAccessor("b", "u")), "u"))),
- Decl(Const("s", ty.i32(),
- Call("atomicExchange",
- AddressOf(MemberAccessor("b", "i")), "i"))),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%4 = OpTypeInt 32 0
+ // struct S {
+ // u : atomic<u32>;
+ // i : atomic<i32>;
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // var u = 10u;
+ // var i = 10i;
+ // let r : u32 = atomicExchange(&b.u, u);
+ // let s : i32 = atomicExchange(&b.i, i);
+ // }
+ auto* s = Structure("S", {
+ Member("u", ty.atomic<u32>()),
+ Member("i", ty.atomic<i32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("u", nullptr, Expr(10_u))),
+ Decl(Var("i", nullptr, Expr(10_i))),
+ Decl(Let("r", ty.u32(),
+ Call("atomicExchange", AddressOf(MemberAccessor("b", "u")), "u"))),
+ Decl(Let("s", ty.i32(),
+ Call("atomicExchange", AddressOf(MemberAccessor("b", "i")), "i"))),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ auto* expected_types = R"(%4 = OpTypeInt 32 0
%5 = OpTypeInt 32 1
%3 = OpTypeStruct %4 %5
%2 = OpTypePointer StorageBuffer %3
@@ -2031,10 +2126,10 @@ TEST_F(BuiltinBuilderTest, Call_AtomicExchange) {
%22 = OpTypePointer StorageBuffer %4
%27 = OpTypePointer StorageBuffer %5
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(OpStore %11 %10
+ auto* expected_instructions = R"(OpStore %11 %10
OpStore %15 %14
%23 = OpAccessChain %22 %1 %20
%24 = OpLoad %4 %11
@@ -2044,109 +2139,105 @@ OpStore %15 %14
%25 = OpAtomicExchange %5 %28 %19 %20 %29
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_AtomicCompareExchangeWeak) {
- // struct S {
- // u : atomic<u32>;
- // i : atomic<i32>;
- // }
- //
- // @binding(1) @group(2) var<storage, read_write> b : S;
- //
- // fn a_func() {
- // let u : vec2<u32> = atomicCompareExchangeWeak(&b.u, 10u);
- // let i : vec2<i32> = atomicCompareExchangeWeak(&b.i, 10);
- // }
- auto* s = Structure("S", {
- Member("u", ty.atomic<u32>()),
- Member("i", ty.atomic<i32>()),
- });
- Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
-
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Decl(Const("u", ty.vec2<u32>(),
- Call("atomicCompareExchangeWeak",
- AddressOf(MemberAccessor("b", "u")), 10u, 20u))),
- Decl(Const("i", ty.vec2<i32>(),
- Call("atomicCompareExchangeWeak",
- AddressOf(MemberAccessor("b", "i")), 10, 20))),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build()) << b.error();
-
- ASSERT_EQ(b.functions().size(), 1u);
-
- auto* expected_types = R"(%4 = OpTypeInt 32 0
+ // struct S {
+ // u : atomic<u32>,
+ // i : atomic<i32>,
+ // }
+ //
+ // @binding(1) @group(2) var<storage, read_write> b : S;
+ //
+ // fn a_func() {
+ // let u = atomicCompareExchangeWeak(&b.u, 10u, 20u);
+ // let i = atomicCompareExchangeWeak(&b.i, 10, 10);
+ // }
+ auto* s = Structure("S", {
+ Member("u", ty.atomic<u32>()),
+ Member("i", ty.atomic<i32>()),
+ });
+ Global("b", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
+
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Decl(Let("u", nullptr,
+ Call("atomicCompareExchangeWeak", AddressOf(MemberAccessor("b", "u")), 10_u,
+ 20_u))),
+ Decl(Let("i", nullptr,
+ Call("atomicCompareExchangeWeak", AddressOf(MemberAccessor("b", "i")), 10_i,
+ 20_i))),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)});
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build()) << b.error();
+
+ ASSERT_EQ(b.functions().size(), 1_u);
+
+ auto* expected_types = R"(%4 = OpTypeInt 32 0
%5 = OpTypeInt 32 1
%3 = OpTypeStruct %4 %5
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
%7 = OpTypeVoid
%6 = OpTypeFunction %7
-%11 = OpTypeVector %4 2
-%12 = OpConstant %4 1
-%13 = OpConstant %4 0
-%15 = OpTypePointer StorageBuffer %4
-%17 = OpConstant %4 20
-%18 = OpConstant %4 10
-%19 = OpTypeBool
-%24 = OpTypeVector %5 2
-%26 = OpTypePointer StorageBuffer %5
-%28 = OpConstant %5 20
-%29 = OpConstant %5 10
-%32 = OpConstant %5 0
-%33 = OpConstant %5 1
+%12 = OpTypeBool
+%11 = OpTypeStruct %4 %12
+%13 = OpConstant %4 1
+%14 = OpConstant %4 0
+%16 = OpTypePointer StorageBuffer %4
+%18 = OpConstant %4 20
+%19 = OpConstant %4 10
+%23 = OpTypeStruct %5 %12
+%25 = OpTypePointer StorageBuffer %5
+%27 = OpConstant %5 20
+%28 = OpConstant %5 10
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
-
- auto* expected_instructions = R"(%16 = OpAccessChain %15 %1 %13
-%20 = OpAtomicCompareExchange %4 %16 %12 %13 %13 %17 %18
-%21 = OpIEqual %19 %20 %17
-%22 = OpSelect %4 %21 %12 %13
-%10 = OpCompositeConstruct %11 %20 %22
-%27 = OpAccessChain %26 %1 %12
-%30 = OpAtomicCompareExchange %5 %27 %12 %13 %13 %28 %29
-%31 = OpIEqual %19 %30 %28
-%34 = OpSelect %5 %31 %33 %32
-%23 = OpCompositeConstruct %24 %30 %34
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
+
+ auto* expected_instructions = R"(%17 = OpAccessChain %16 %1 %14
+%20 = OpAtomicCompareExchange %4 %17 %13 %14 %14 %18 %19
+%21 = OpIEqual %12 %20 %18
+%10 = OpCompositeConstruct %11 %20 %21
+%26 = OpAccessChain %25 %1 %13
+%29 = OpAtomicCompareExchange %5 %26 %13 %14 %14 %27 %28
+%30 = OpIEqual %12 %29 %27
+%22 = OpCompositeConstruct %23 %29 %30
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
-using Builtin_Builtin_DataPacking_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+using Builtin_Builtin_DataPacking_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_DataPacking_Test, Binary) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.name == "pack4x8snorm" || param.name == "pack4x8unorm";
- auto* call = pack4 ? Call(param.name, vec4<float>(1.0f, 1.0f, 1.0f, 1.0f))
- : Call(param.name, vec2<float>(1.0f, 1.0f));
- auto* func = Func("a_func", {}, ty.void_(), {CallStmt(call)});
+ bool pack4 = param.name == "pack4x8snorm" || param.name == "pack4x8unorm";
+ auto* call = pack4 ? Call(param.name, vec4<f32>(1_f, 1_f, 1_f, 1_f))
+ : Call(param.name, vec2<f32>(1_f, 1_f));
+ auto* func = Func("a_func", {}, ty.void_(), {CallStmt(call)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- if (pack4) {
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+ if (pack4) {
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -2157,13 +2248,16 @@ OpName %3 "a_func"
%11 = OpConstantComposite %8 %10 %10 %10 %10
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %11
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %11
OpReturn
OpFunctionEnd
-)");
- } else {
- EXPECT_EQ(DumpBuilder(b), R"(%7 = OpExtInstImport "GLSL.std.450"
+)";
+ EXPECT_EQ(got, expect);
+ } else {
+ auto got = DumpBuilder(b);
+ auto expect = R"(%7 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -2174,37 +2268,38 @@ OpName %3 "a_func"
%11 = OpConstantComposite %8 %10 %10
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %7 )" + param.op +
- R"( %11
+%5 = OpExtInst %6 %7 )" +
+ param.op +
+ R"( %11
OpReturn
OpFunctionEnd
-)");
- }
+)";
+ EXPECT_EQ(got, expect);
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- Builtin_Builtin_DataPacking_Test,
- testing::Values(BuiltinData{"pack4x8snorm", "PackSnorm4x8"},
- BuiltinData{"pack4x8unorm", "PackUnorm4x8"},
- BuiltinData{"pack2x16snorm", "PackSnorm2x16"},
- BuiltinData{"pack2x16unorm", "PackUnorm2x16"},
- BuiltinData{"pack2x16float", "PackHalf2x16"}));
-
-using Builtin_Builtin_DataUnpacking_Test =
- BuiltinBuilderTestWithParam<BuiltinData>;
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ Builtin_Builtin_DataPacking_Test,
+ testing::Values(BuiltinData{"pack4x8snorm", "PackSnorm4x8"},
+ BuiltinData{"pack4x8unorm", "PackUnorm4x8"},
+ BuiltinData{"pack2x16snorm", "PackSnorm2x16"},
+ BuiltinData{"pack2x16unorm", "PackUnorm2x16"},
+ BuiltinData{"pack2x16float", "PackHalf2x16"}));
+
+using Builtin_Builtin_DataUnpacking_Test = BuiltinBuilderTestWithParam<BuiltinData>;
TEST_P(Builtin_Builtin_DataUnpacking_Test, Binary) {
- auto param = GetParam();
+ auto param = GetParam();
- bool pack4 = param.name == "unpack4x8snorm" || param.name == "unpack4x8unorm";
- auto* func = Func("a_func", {}, ty.void_(), {CallStmt(Call(param.name, 1u))});
+ bool pack4 = param.name == "unpack4x8snorm" || param.name == "unpack4x8unorm";
+ auto* func = Func("a_func", {}, ty.void_(), {CallStmt(Call(param.name, 1_u))});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- if (pack4) {
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+ if (pack4) {
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -2214,13 +2309,16 @@ OpName %3 "a_func"
%10 = OpConstant %9 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10
OpReturn
OpFunctionEnd
-)");
- } else {
- EXPECT_EQ(DumpBuilder(b), R"(%8 = OpExtInstImport "GLSL.std.450"
+)";
+ EXPECT_EQ(got, expect);
+ } else {
+ auto got = DumpBuilder(b);
+ auto expect = R"(%8 = OpExtInstImport "GLSL.std.450"
OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
@@ -2230,103 +2328,105 @@ OpName %3 "a_func"
%10 = OpConstant %9 1
%3 = OpFunction %2 None %1
%4 = OpLabel
-%5 = OpExtInst %6 %8 )" + param.op +
- R"( %10
+%5 = OpExtInst %6 %8 )" +
+ param.op +
+ R"( %10
OpReturn
OpFunctionEnd
-)");
- }
+)";
+ EXPECT_EQ(got, expect);
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- BuiltinBuilderTest,
- Builtin_Builtin_DataUnpacking_Test,
- testing::Values(BuiltinData{"unpack4x8snorm", "UnpackSnorm4x8"},
- BuiltinData{"unpack4x8unorm", "UnpackUnorm4x8"},
- BuiltinData{"unpack2x16snorm", "UnpackSnorm2x16"},
- BuiltinData{"unpack2x16unorm", "UnpackUnorm2x16"},
- BuiltinData{"unpack2x16float", "UnpackHalf2x16"}));
+INSTANTIATE_TEST_SUITE_P(BuiltinBuilderTest,
+ Builtin_Builtin_DataUnpacking_Test,
+ testing::Values(BuiltinData{"unpack4x8snorm", "UnpackSnorm4x8"},
+ BuiltinData{"unpack4x8unorm", "UnpackUnorm4x8"},
+ BuiltinData{"unpack2x16snorm", "UnpackSnorm2x16"},
+ BuiltinData{"unpack2x16unorm", "UnpackUnorm2x16"},
+ BuiltinData{"unpack2x16float", "UnpackHalf2x16"}));
TEST_F(BuiltinBuilderTest, Call_WorkgroupBarrier) {
- Func("f", {}, ty.void_(),
- ast::StatementList{
- CallStmt(Call("workgroupBarrier")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("f", {}, ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("workgroupBarrier")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- ASSERT_EQ(b.functions().size(), 1u);
+ ASSERT_EQ(b.functions().size(), 1_u);
- auto* expected_types = R"(%2 = OpTypeVoid
+ auto* expected_types = R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%6 = OpTypeInt 32 0
%7 = OpConstant %6 2
%8 = OpConstant %6 264
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(OpControlBarrier %7 %7 %8
+ auto* expected_instructions = R"(OpControlBarrier %7 %7 %8
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_StorageBarrier) {
- Func("f", {}, ty.void_(),
- ast::StatementList{
- CallStmt(Call("storageBarrier")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("f", {}, ty.void_(),
+ ast::StatementList{
+ CallStmt(Call("storageBarrier")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- ASSERT_EQ(b.functions().size(), 1u);
+ ASSERT_EQ(b.functions().size(), 1_u);
- auto* expected_types = R"(%2 = OpTypeVoid
+ auto* expected_types = R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%6 = OpTypeInt 32 0
%7 = OpConstant %6 2
%8 = OpConstant %6 72
)";
- auto got_types = DumpInstructions(b.types());
- EXPECT_EQ(expected_types, got_types);
+ auto got_types = DumpInstructions(b.types());
+ EXPECT_EQ(expected_types, got_types);
- auto* expected_instructions = R"(OpControlBarrier %7 %7 %8
+ auto* expected_instructions = R"(OpControlBarrier %7 %7 %8
OpReturn
)";
- auto got_instructions = DumpInstructions(b.functions()[0].instructions());
- EXPECT_EQ(expected_instructions, got_instructions);
+ auto got_instructions = DumpInstructions(b.functions()[0].instructions());
+ EXPECT_EQ(expected_instructions, got_instructions);
- Validate(b);
+ Validate(b);
}
TEST_F(BuiltinBuilderTest, Call_ExtractBits_i32) {
- auto* v = Var("v", ty.i32());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("extractBits", v, offset, count);
- auto* func = WrapInFunction(v, offset, count, call);
+ auto* v = Var("v", ty.i32());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("extractBits", v, offset, count);
+ auto* func = WrapInFunction(v, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2351,21 +2451,23 @@ OpName %13 "count"
%14 = OpBitFieldSExtract %7 %15 %16 %17
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_ExtractBits_u32) {
- auto* v = Var("v", ty.u32());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("extractBits", v, offset, count);
- auto* func = WrapInFunction(v, offset, count, call);
+ auto* v = Var("v", ty.u32());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("extractBits", v, offset, count);
+ auto* func = WrapInFunction(v, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2387,21 +2489,23 @@ OpName %10 "count"
%11 = OpBitFieldUExtract %7 %12 %13 %14
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_ExtractBits_vec3_i32) {
- auto* v = Var("v", ty.vec3<i32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("extractBits", v, offset, count);
- auto* func = WrapInFunction(v, offset, count, call);
+ auto* v = Var("v", ty.vec3<i32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("extractBits", v, offset, count);
+ auto* func = WrapInFunction(v, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2427,21 +2531,23 @@ OpName %14 "count"
%15 = OpBitFieldSExtract %7 %16 %17 %18
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_ExtractBits_vec3_u32) {
- auto* v = Var("v", ty.vec3<u32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("extractBits", v, offset, count);
- auto* func = WrapInFunction(v, offset, count, call);
+ auto* v = Var("v", ty.vec3<u32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("extractBits", v, offset, count);
+ auto* func = WrapInFunction(v, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2466,22 +2572,24 @@ OpName %13 "count"
%14 = OpBitFieldUExtract %7 %15 %16 %17
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_InsertBits_i32) {
- auto* v = Var("v", ty.i32());
- auto* n = Var("n", ty.i32());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("insertBits", v, n, offset, count);
- auto* func = WrapInFunction(v, n, offset, count, call);
+ auto* v = Var("v", ty.i32());
+ auto* n = Var("n", ty.i32());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("insertBits", v, n, offset, count);
+ auto* func = WrapInFunction(v, n, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2509,22 +2617,24 @@ OpName %14 "count"
%15 = OpBitFieldInsert %7 %16 %17 %18 %19
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_InsertBits_u32) {
- auto* v = Var("v", ty.u32());
- auto* n = Var("n", ty.u32());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("insertBits", v, n, offset, count);
- auto* func = WrapInFunction(v, n, offset, count, call);
+ auto* v = Var("v", ty.u32());
+ auto* n = Var("n", ty.u32());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("insertBits", v, n, offset, count);
+ auto* func = WrapInFunction(v, n, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2549,22 +2659,24 @@ OpName %11 "count"
%12 = OpBitFieldInsert %7 %13 %14 %15 %16
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_InsertBits_vec3_i32) {
- auto* v = Var("v", ty.vec3<i32>());
- auto* n = Var("n", ty.vec3<i32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("insertBits", v, n, offset, count);
- auto* func = WrapInFunction(v, n, offset, count, call);
+ auto* v = Var("v", ty.vec3<i32>());
+ auto* n = Var("n", ty.vec3<i32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("insertBits", v, n, offset, count);
+ auto* func = WrapInFunction(v, n, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2593,22 +2705,24 @@ OpName %15 "count"
%16 = OpBitFieldInsert %7 %17 %18 %19 %20
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
}
TEST_F(BuiltinBuilderTest, Call_InsertBits_vec3_u32) {
- auto* v = Var("v", ty.vec3<u32>());
- auto* n = Var("n", ty.vec3<u32>());
- auto* offset = Var("offset", ty.u32());
- auto* count = Var("count", ty.u32());
- auto* call = Call("insertBits", v, n, offset, count);
- auto* func = WrapInFunction(v, n, offset, count, call);
+ auto* v = Var("v", ty.vec3<u32>());
+ auto* n = Var("n", ty.vec3<u32>());
+ auto* offset = Var("offset", ty.u32());
+ auto* count = Var("count", ty.u32());
+ auto* call = Call("insertBits", v, n, offset, count);
+ auto* func = WrapInFunction(v, n, offset, count, call);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpEntryPoint GLCompute %3 "test_function"
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
OpExecutionMode %3 LocalSize 1 1 1
OpName %3 "test_function"
OpName %5 "v"
@@ -2636,7 +2750,87 @@ OpName %14 "count"
%15 = OpBitFieldInsert %7 %16 %17 %18 %19
OpReturn
OpFunctionEnd
-)");
+)";
+ EXPECT_EQ(got, expect);
+}
+
+TEST_F(BuiltinBuilderTest, Call_Dot4I8Packed) {
+ auto* ext =
+ create<ast::Enable>(Source{Source::Range{Source::Location{10, 2}, Source::Location{10, 5}}},
+ ast::Extension::kChromiumExperimentalDP4a);
+ AST().AddEnable(ext);
+
+ auto* val1 = Var("val1", ty.u32());
+ auto* val2 = Var("val2", ty.u32());
+ auto* call = Call("dot4I8Packed", val1, val2);
+ auto* func = WrapInFunction(val1, val2, call);
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
+OpExecutionMode %3 LocalSize 1 1 1
+OpName %3 "test_function"
+OpName %5 "val1"
+OpName %9 "val2"
+%2 = OpTypeVoid
+%1 = OpTypeFunction %2
+%7 = OpTypeInt 32 0
+%6 = OpTypePointer Function %7
+%8 = OpConstantNull %7
+%11 = OpTypeInt 32 1
+%3 = OpFunction %2 None %1
+%4 = OpLabel
+%5 = OpVariable %6 Function %8
+%9 = OpVariable %6 Function %8
+%12 = OpLoad %7 %5
+%13 = OpLoad %7 %9
+%10 = OpSDot %11 %12 %13 PackedVectorFormat4x8Bit
+OpReturn
+OpFunctionEnd
+)";
+ EXPECT_EQ(got, expect);
+}
+
+TEST_F(BuiltinBuilderTest, Call_Dot4U8Packed) {
+ auto* ext =
+ create<ast::Enable>(Source{Source::Range{Source::Location{10, 2}, Source::Location{10, 5}}},
+ ast::Extension::kChromiumExperimentalDP4a);
+ AST().AddEnable(ext);
+
+ auto* val1 = Var("val1", ty.u32());
+ auto* val2 = Var("val2", ty.u32());
+ auto* call = Call("dot4U8Packed", val1, val2);
+ auto* func = WrapInFunction(val1, val2, call);
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ auto got = DumpBuilder(b);
+ auto expect = R"(OpEntryPoint GLCompute %3 "test_function"
+OpExecutionMode %3 LocalSize 1 1 1
+OpName %3 "test_function"
+OpName %5 "val1"
+OpName %9 "val2"
+%2 = OpTypeVoid
+%1 = OpTypeFunction %2
+%7 = OpTypeInt 32 0
+%6 = OpTypePointer Function %7
+%8 = OpConstantNull %7
+%3 = OpFunction %2 None %1
+%4 = OpLabel
+%5 = OpVariable %6 Function %8
+%9 = OpVariable %6 Function %8
+%11 = OpLoad %7 %5
+%12 = OpLoad %7 %9
+%10 = OpUDot %7 %11 %12 PackedVectorFormat4x8Bit
+OpReturn
+OpFunctionEnd
+)";
+ EXPECT_EQ(got, expect);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_texture_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_texture_test.cc
index 9a2269da01f..b4abfcf141a 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_texture_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_builtin_texture_test.cc
@@ -23,18 +23,18 @@ namespace tint::writer::spirv {
namespace {
struct expected_texture_overload_spirv {
- std::string types;
- std::string instructions;
- std::string capabilities;
+ std::string types;
+ std::string instructions;
+ std::string capabilities;
};
expected_texture_overload_spirv expected_texture_overload(
ast::builtin::test::ValidTextureOverload overload) {
- using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
- switch (overload) {
- case ValidTextureOverload::kDimensions1d:
- return {
- R"(
+ using ValidTextureOverload = ast::builtin::test::ValidTextureOverload;
+ switch (overload) {
+ case ValidTextureOverload::kDimensions1d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 1D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -45,17 +45,17 @@ expected_texture_overload_spirv expected_texture_overload(
%9 = OpTypeInt 32 1
%11 = OpConstant %9 0
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %10 %11
)",
- R"(
+ R"(
OpCapability Sampled1D
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions2d:
- return {
- R"(
+ case ValidTextureOverload::kDimensions2d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -67,16 +67,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 0
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions2dLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensions2dLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -88,16 +88,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 1
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions2dArray:
- return {
- R"(
+ case ValidTextureOverload::kDimensions2dArray:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -110,17 +110,17 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 0
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions2dArrayLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensions2dArrayLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -133,17 +133,17 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 1
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions3d:
- return {
- R"(
+ case ValidTextureOverload::kDimensions3d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -155,16 +155,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 3
%12 = OpConstant %10 0
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensions3dLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensions3dLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -176,16 +176,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 3
%12 = OpConstant %10 1
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsCube:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsCube:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -197,16 +197,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 0
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsCubeLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsCubeLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -218,16 +218,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 1
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsCubeArray:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsCubeArray:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -240,18 +240,18 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 0
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsCubeArrayLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsCubeArrayLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -264,18 +264,18 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 1
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsMultisampled2d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsMultisampled2d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -286,16 +286,16 @@ OpCapability ImageQuery
%10 = OpTypeInt 32 1
%9 = OpTypeVector %10 2
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySize %9 %11
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepth2d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepth2d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -307,16 +307,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 0
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepth2dLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepth2dLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -328,16 +328,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 1
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepth2dArray:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepth2dArray:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -350,17 +350,17 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 0
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepth2dArrayLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -373,17 +373,17 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 1
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepthCube:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepthCube:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -395,16 +395,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 0
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepthCubeLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepthCubeLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -416,16 +416,16 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpConstant %10 1
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySizeLod %9 %11 %12
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepthCubeArray:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepthCubeArray:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -438,18 +438,18 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 0
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepthCubeArrayLevel:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -462,18 +462,18 @@ OpCapability ImageQuery
%12 = OpTypeVector %10 3
%14 = OpConstant %10 1
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySizeLod %12 %13 %14
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsDepthMultisampled2d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsDepthMultisampled2d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -484,16 +484,16 @@ OpCapability ImageQuery
%10 = OpTypeInt 32 1
%9 = OpTypeVector %10 2
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySize %9 %11
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsStorageWO1d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsStorageWO1d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 1D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -503,17 +503,17 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQuerySize %9 %10
)",
- R"(
+ R"(
OpCapability Image1D
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsStorageWO2d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsStorageWO2d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -524,16 +524,16 @@ OpCapability ImageQuery
%10 = OpTypeInt 32 1
%9 = OpTypeVector %10 2
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySize %9 %11
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsStorageWO2dArray:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsStorageWO2dArray:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -545,17 +545,17 @@ OpCapability ImageQuery
%9 = OpTypeVector %10 2
%12 = OpTypeVector %10 3
)",
- R"(
+ R"(
%13 = OpLoad %3 %1
%11 = OpImageQuerySize %12 %13
%8 = OpVectorShuffle %9 %11 %11 0 1
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kDimensionsStorageWO3d:
- return {
- R"(
+ case ValidTextureOverload::kDimensionsStorageWO3d:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -566,15 +566,15 @@ OpCapability ImageQuery
%10 = OpTypeInt 32 1
%9 = OpTypeVector %10 3
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%8 = OpImageQuerySize %9 %11
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kGather2dF32:
- return {R"(
+ case ValidTextureOverload::kGather2dF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -589,18 +589,18 @@ OpCapability ImageQuery
%16 = OpConstant %4 2
%17 = OpConstantComposite %14 %15 %16
%18 = OpTypeInt 32 1
-%19 = OpConstant %18 0
+%19 = OpConstantNull %18
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %17 %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGather2dOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGather2dOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -615,22 +615,22 @@ OpCapability ImageQuery
%16 = OpConstant %4 2
%17 = OpConstantComposite %14 %15 %16
%18 = OpTypeInt 32 1
-%19 = OpConstant %18 0
+%19 = OpConstantNull %18
%20 = OpTypeVector %18 2
%21 = OpConstant %18 3
%22 = OpConstant %18 4
%23 = OpConstantComposite %20 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %17 %19 ConstOffset %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGather2dArrayF32:
- return {R"(
+ case ValidTextureOverload::kGather2dArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -645,9 +645,9 @@ OpCapability ImageQuery
%16 = OpConstant %4 2
%18 = OpTypeInt 32 1
%19 = OpConstant %18 3
-%21 = OpConstant %18 0
+%21 = OpConstantNull %18
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -655,10 +655,10 @@ OpCapability ImageQuery
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGather2dArrayOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGather2dArrayOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -673,13 +673,13 @@ OpCapability ImageQuery
%16 = OpConstant %4 2
%18 = OpTypeInt 32 1
%19 = OpConstant %18 3
-%21 = OpConstant %18 0
+%21 = OpConstantNull %18
%22 = OpTypeVector %18 2
%23 = OpConstant %18 4
%24 = OpConstant %18 5
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -687,10 +687,10 @@ OpCapability ImageQuery
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21 ConstOffset %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCubeF32:
- return {R"(
+ case ValidTextureOverload::kGatherCubeF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -706,18 +706,18 @@ OpCapability ImageQuery
%17 = OpConstant %4 3
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpTypeInt 32 1
-%20 = OpConstant %19 0
+%20 = OpConstantNull %19
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %18 %20
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCubeArrayF32:
- return {R"(
+ case ValidTextureOverload::kGatherCubeArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -732,9 +732,9 @@ OpCapability ImageQuery
%16 = OpConstant %4 3
%18 = OpTypeInt 32 1
%19 = OpConstant %18 4
-%21 = OpConstant %18 0
+%21 = OpConstantNull %18
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -742,11 +742,11 @@ OpCapability ImageQuery
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kGatherDepth2dF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepth2dF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -763,16 +763,16 @@ OpCapability SampledCubeArray
%18 = OpTypeInt 32 1
%19 = OpConstant %18 0
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %17 %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherDepth2dOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepth2dOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -793,16 +793,16 @@ OpCapability SampledCubeArray
%22 = OpConstant %18 4
%23 = OpConstantComposite %20 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %17 %19 ConstOffset %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherDepth2dArrayF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepth2dArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -819,7 +819,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 3
%21 = OpConstant %18 0
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -827,10 +827,10 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepth2dArrayOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -851,7 +851,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %18 5
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -859,10 +859,10 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21 ConstOffset %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherDepthCubeF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepthCubeF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -880,16 +880,16 @@ OpCapability SampledCubeArray
%19 = OpTypeInt 32 1
%20 = OpConstant %19 0
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageGather %9 %13 %18 %20
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherDepthCubeArrayF32:
- return {R"(
+ case ValidTextureOverload::kGatherDepthCubeArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -906,7 +906,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 4
%21 = OpConstant %18 0
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -914,11 +914,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageGather %9 %13 %20 %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kGatherCompareDepth2dF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepth2dF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -934,16 +934,16 @@ OpCapability SampledCubeArray
%17 = OpConstantComposite %14 %15 %16
%18 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageDrefGather %9 %13 %17 %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepth2dOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -964,16 +964,16 @@ OpCapability SampledCubeArray
%22 = OpConstant %20 5
%23 = OpConstantComposite %19 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageDrefGather %9 %13 %17 %18 ConstOffset %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepth2dArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -990,7 +990,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 3
%21 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -998,10 +998,10 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageDrefGather %9 %13 %20 %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepth2dArrayOffsetF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1022,7 +1022,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %18 6
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1030,10 +1030,10 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageDrefGather %9 %13 %20 %21 ConstOffset %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCompareDepthCubeF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepthCubeF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1050,16 +1050,16 @@ OpCapability SampledCubeArray
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageDrefGather %9 %13 %18 %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
- return {R"(
+ case ValidTextureOverload::kGatherCompareDepthCubeArrayF32:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1076,7 +1076,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 4
%21 = OpConstant %4 5
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1084,11 +1084,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageDrefGather %9 %13 %20 %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kNumLayers2dArray:
- return {R"(
+ case ValidTextureOverload::kNumLayers2dArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1100,16 +1100,16 @@ OpCapability SampledCubeArray
%11 = OpTypeVector %9 3
%13 = OpConstant %9 0
)",
- R"(
+ R"(
%12 = OpLoad %3 %1
%10 = OpImageQuerySizeLod %11 %12 %13
%8 = OpCompositeExtract %9 %10 2
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLayersCubeArray:
- return {R"(
+ case ValidTextureOverload::kNumLayersCubeArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1121,17 +1121,17 @@ OpCapability ImageQuery
%11 = OpTypeVector %9 3
%13 = OpConstant %9 0
)",
- R"(
+ R"(
%12 = OpLoad %3 %1
%10 = OpImageQuerySizeLod %11 %12 %13
%8 = OpCompositeExtract %9 %10 2
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLayersDepth2dArray:
- return {R"(
+ case ValidTextureOverload::kNumLayersDepth2dArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1143,16 +1143,16 @@ OpCapability ImageQuery
%11 = OpTypeVector %9 3
%13 = OpConstant %9 0
)",
- R"(
+ R"(
%12 = OpLoad %3 %1
%10 = OpImageQuerySizeLod %11 %12 %13
%8 = OpCompositeExtract %9 %10 2
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLayersDepthCubeArray:
- return {R"(
+ case ValidTextureOverload::kNumLayersDepthCubeArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1164,17 +1164,17 @@ OpCapability ImageQuery
%11 = OpTypeVector %9 3
%13 = OpConstant %9 0
)",
- R"(
+ R"(
%12 = OpLoad %3 %1
%10 = OpImageQuerySizeLod %11 %12 %13
%8 = OpCompositeExtract %9 %10 2
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLayersStorageWO2dArray:
- return {R"(
+ case ValidTextureOverload::kNumLayersStorageWO2dArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -1185,16 +1185,16 @@ OpCapability ImageQuery
%9 = OpTypeInt 32 1
%11 = OpTypeVector %9 3
)",
- R"(
+ R"(
%12 = OpLoad %3 %1
%10 = OpImageQuerySize %11 %12
%8 = OpCompositeExtract %9 %10 2
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevels2d:
- return {R"(
+ case ValidTextureOverload::kNumLevels2d:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1204,15 +1204,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevels2dArray:
- return {R"(
+ case ValidTextureOverload::kNumLevels2dArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1222,15 +1222,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevels3d:
- return {R"(
+ case ValidTextureOverload::kNumLevels3d:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1240,15 +1240,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsCube:
- return {R"(
+ case ValidTextureOverload::kNumLevelsCube:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1258,15 +1258,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsCubeArray:
- return {R"(
+ case ValidTextureOverload::kNumLevelsCubeArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1276,16 +1276,16 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsDepth2d:
- return {R"(
+ case ValidTextureOverload::kNumLevelsDepth2d:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1295,15 +1295,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsDepth2dArray:
- return {R"(
+ case ValidTextureOverload::kNumLevelsDepth2dArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1313,15 +1313,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsDepthCube:
- return {R"(
+ case ValidTextureOverload::kNumLevelsDepthCube:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1331,15 +1331,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumLevelsDepthCubeArray:
- return {R"(
+ case ValidTextureOverload::kNumLevelsDepthCubeArray:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1349,16 +1349,16 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQueryLevels %9 %10
)",
- R"(
+ R"(
OpCapability SampledCubeArray
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumSamplesMultisampled2d:
- return {R"(
+ case ValidTextureOverload::kNumSamplesMultisampled2d:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1368,15 +1368,15 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQuerySamples %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
- return {R"(
+ case ValidTextureOverload::kNumSamplesDepthMultisampled2d:
+ return {R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1386,16 +1386,16 @@ OpCapability ImageQuery
%5 = OpVariable %6 UniformConstant
%9 = OpTypeInt 32 1
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageQuerySamples %9 %10
)",
- R"(
+ R"(
OpCapability ImageQuery
)"};
- case ValidTextureOverload::kSample1dF32:
- return {
- R"(
+ case ValidTextureOverload::kSample1dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 1D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1407,18 +1407,18 @@ OpCapability ImageQuery
%12 = OpTypeSampledImage %3
%14 = OpConstant %4 1
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %14
)",
- R"(
+ R"(
OpCapability Sampled1D
)"};
- case ValidTextureOverload::kSample2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSample2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1433,17 +1433,17 @@ OpCapability Sampled1D
%16 = OpConstant %4 2
%17 = OpConstantComposite %14 %15 %16
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSample2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSample2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1463,17 +1463,17 @@ OpCapability Sampled1D
%21 = OpConstant %19 4
%22 = OpConstantComposite %18 %20 %21
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %17 ConstOffset %22
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSample2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSample2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1489,7 +1489,7 @@ OpCapability Sampled1D
%18 = OpTypeInt 32 1
%19 = OpConstant %18 3
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1497,11 +1497,11 @@ OpCapability Sampled1D
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSample2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSample2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1521,7 +1521,7 @@ OpCapability Sampled1D
%23 = OpConstant %18 5
%24 = OpConstantComposite %21 %22 %23
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1529,11 +1529,11 @@ OpCapability Sampled1D
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20 ConstOffset %24
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSample3dF32:
- return {
- R"(
+ case ValidTextureOverload::kSample3dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1549,17 +1549,17 @@ OpCapability Sampled1D
%17 = OpConstant %4 3
%18 = OpConstantComposite %14 %15 %16 %17
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSample3dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSample3dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1581,17 +1581,17 @@ OpCapability Sampled1D
%23 = OpConstant %20 6
%24 = OpConstantComposite %19 %21 %22 %23
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18 ConstOffset %24
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1607,17 +1607,17 @@ OpCapability Sampled1D
%17 = OpConstant %4 3
%18 = OpConstantComposite %14 %15 %16 %17
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1633,7 +1633,7 @@ OpCapability Sampled1D
%18 = OpTypeInt 32 1
%19 = OpConstant %18 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1641,12 +1641,12 @@ OpCapability Sampled1D
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleDepth2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepth2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1661,18 +1661,18 @@ OpCapability SampledCubeArray
%17 = OpConstant %4 2
%18 = OpConstantComposite %15 %16 %17
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
%9 = OpImageSampleImplicitLod %10 %14 %18
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleDepth2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepth2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1692,18 +1692,18 @@ OpCapability SampledCubeArray
%22 = OpConstant %20 4
%23 = OpConstantComposite %19 %21 %22
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
%9 = OpImageSampleImplicitLod %10 %14 %18 ConstOffset %23
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleDepth2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepth2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1719,7 +1719,7 @@ OpCapability SampledCubeArray
%19 = OpTypeInt 32 1
%20 = OpConstant %19 3
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -1728,11 +1728,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleImplicitLod %10 %14 %21
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepth2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1752,7 +1752,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %19 5
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -1761,11 +1761,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleImplicitLod %10 %14 %21 ConstOffset %25
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleDepthCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepthCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1781,18 +1781,18 @@ OpCapability SampledCubeArray
%18 = OpConstant %4 3
%19 = OpConstantComposite %15 %16 %17 %18
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
%9 = OpImageSampleImplicitLod %10 %14 %19
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleDepthCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleDepthCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1808,7 +1808,7 @@ OpCapability SampledCubeArray
%19 = OpTypeInt 32 1
%20 = OpConstant %19 4
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -1817,12 +1817,12 @@ OpCapability SampledCubeArray
%9 = OpImageSampleImplicitLod %10 %14 %21
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleBias2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1838,17 +1838,17 @@ OpCapability SampledCubeArray
%17 = OpConstantComposite %14 %15 %16
%18 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %17 Bias %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBias2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1869,17 +1869,17 @@ OpCapability SampledCubeArray
%22 = OpConstant %20 5
%23 = OpConstantComposite %19 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %17 Bias|ConstOffset %18 %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBias2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1896,7 +1896,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 4
%21 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1904,11 +1904,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20 Bias %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1929,7 +1929,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %18 6
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -1937,11 +1937,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20 Bias|ConstOffset %21 %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBias3dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias3dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1958,17 +1958,17 @@ OpCapability SampledCubeArray
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18 Bias %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBias3dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBias3dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -1991,17 +1991,17 @@ OpCapability SampledCubeArray
%24 = OpConstant %21 7
%25 = OpConstantComposite %20 %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18 Bias|ConstOffset %19 %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBiasCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBiasCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2018,17 +2018,17 @@ OpCapability SampledCubeArray
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleImplicitLod %9 %13 %18 Bias %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleBiasCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleBiasCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2045,7 +2045,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 3
%21 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2053,12 +2053,12 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageSampleImplicitLod %9 %13 %20 Bias %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleLevel2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2074,17 +2074,17 @@ OpCapability SampledCubeArray
%17 = OpConstantComposite %14 %15 %16
%18 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %17 Lod %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevel2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2105,17 +2105,17 @@ OpCapability SampledCubeArray
%22 = OpConstant %20 5
%23 = OpConstantComposite %19 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %17 Lod|ConstOffset %18 %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevel2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2132,7 +2132,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 3
%21 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2140,11 +2140,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Lod %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2165,7 +2165,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %18 6
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2173,11 +2173,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Lod|ConstOffset %21 %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevel3dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel3dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2194,17 +2194,17 @@ OpCapability SampledCubeArray
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Lod %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevel3dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevel3dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2227,17 +2227,17 @@ OpCapability SampledCubeArray
%24 = OpConstant %21 7
%25 = OpConstantComposite %20 %22 %23 %24
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Lod|ConstOffset %19 %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2254,17 +2254,17 @@ OpCapability SampledCubeArray
%18 = OpConstantComposite %14 %15 %16 %17
%19 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Lod %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2281,7 +2281,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 4
%21 = OpConstant %4 5
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2289,12 +2289,12 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Lod %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleLevelDepth2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepth2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2311,7 +2311,7 @@ OpCapability SampledCubeArray
%20 = OpTypeInt 32 1
%21 = OpConstant %20 3
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2319,11 +2319,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %18 Lod %19
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepth2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2344,7 +2344,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %20 5
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2352,11 +2352,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %18 Lod|ConstOffset %19 %25
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepth2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2373,7 +2373,7 @@ OpCapability SampledCubeArray
%20 = OpConstant %19 3
%23 = OpConstant %19 4
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2383,11 +2383,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %21 Lod %22
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepth2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2408,7 +2408,7 @@ OpCapability SampledCubeArray
%26 = OpConstant %19 6
%27 = OpConstantComposite %24 %25 %26
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2418,11 +2418,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %21 Lod|ConstOffset %22 %27
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelDepthCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepthCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2440,7 +2440,7 @@ OpCapability SampledCubeArray
%21 = OpTypeInt 32 1
%22 = OpConstant %21 4
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2448,11 +2448,11 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %19 Lod %20
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleLevelDepthCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2469,7 +2469,7 @@ OpCapability SampledCubeArray
%20 = OpConstant %19 4
%23 = OpConstant %19 5
)",
- R"(
+ R"(
%11 = OpLoad %7 %5
%12 = OpLoad %3 %1
%14 = OpSampledImage %13 %12 %11
@@ -2479,12 +2479,12 @@ OpCapability SampledCubeArray
%9 = OpImageSampleExplicitLod %10 %14 %21 Lod %22
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleGrad2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2505,17 +2505,17 @@ OpCapability SampledCubeArray
%22 = OpConstant %4 6
%23 = OpConstantComposite %14 %21 %22
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %17 Grad %20 %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGrad2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2540,17 +2540,17 @@ OpCapability SampledCubeArray
%26 = OpConstant %25 7
%27 = OpConstantComposite %24 %26 %26
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %17 Grad|ConstOffset %20 %23 %27
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGrad2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2573,7 +2573,7 @@ OpCapability SampledCubeArray
%26 = OpConstant %4 7
%27 = OpConstantComposite %21 %25 %26
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2581,11 +2581,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Grad %24 %27
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2612,7 +2612,7 @@ OpCapability SampledCubeArray
%30 = OpConstant %18 7
%31 = OpConstantComposite %28 %29 %30
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2620,11 +2620,11 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Grad|ConstOffset %24 %27 %31
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGrad3dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad3dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2648,17 +2648,17 @@ OpCapability SampledCubeArray
%25 = OpConstant %4 9
%26 = OpConstantComposite %14 %23 %24 %25
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Grad %22 %26
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGrad3dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGrad3dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2688,17 +2688,17 @@ OpCapability SampledCubeArray
%31 = OpConstant %28 2
%32 = OpConstantComposite %27 %29 %30 %31
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Grad|ConstOffset %22 %26 %32
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGradCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGradCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2722,17 +2722,17 @@ OpCapability SampledCubeArray
%25 = OpConstant %4 9
%26 = OpConstantComposite %14 %23 %24 %25
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
%8 = OpImageSampleExplicitLod %9 %13 %18 Grad %22 %26
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleGradCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleGradCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2757,7 +2757,7 @@ OpCapability SampledCubeArray
%28 = OpConstant %4 10
%29 = OpConstantComposite %21 %26 %27 %28
)",
- R"(
+ R"(
%10 = OpLoad %7 %5
%11 = OpLoad %3 %1
%13 = OpSampledImage %12 %11 %10
@@ -2765,12 +2765,12 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %9 %14 %15 %16 %17
%8 = OpImageSampleExplicitLod %9 %13 %20 Grad %25 %29
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleCompareDepth2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepth2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2785,17 +2785,17 @@ OpCapability SampledCubeArray
%16 = OpConstantComposite %13 %14 %15
%17 = OpConstant %4 3
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefImplicitLod %4 %12 %16 %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepth2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2815,17 +2815,17 @@ OpCapability SampledCubeArray
%21 = OpConstant %19 5
%22 = OpConstantComposite %18 %20 %21
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefImplicitLod %4 %12 %16 %17 ConstOffset %22
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepth2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2841,7 +2841,7 @@ OpCapability SampledCubeArray
%18 = OpConstant %17 4
%20 = OpConstant %4 3
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -2849,11 +2849,11 @@ OpCapability SampledCubeArray
%19 = OpCompositeConstruct %13 %14 %15 %16
%8 = OpImageSampleDrefImplicitLod %4 %12 %19 %20
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepth2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2873,7 +2873,7 @@ OpCapability SampledCubeArray
%23 = OpConstant %17 6
%24 = OpConstantComposite %21 %22 %23
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -2881,11 +2881,11 @@ OpCapability SampledCubeArray
%19 = OpCompositeConstruct %13 %14 %15 %16
%8 = OpImageSampleDrefImplicitLod %4 %12 %19 %20 ConstOffset %24
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareDepthCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepthCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2901,17 +2901,17 @@ OpCapability SampledCubeArray
%17 = OpConstantComposite %13 %14 %15 %16
%18 = OpConstant %4 4
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefImplicitLod %4 %12 %17 %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareDepthCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2928,7 +2928,7 @@ OpCapability SampledCubeArray
%19 = OpConstant %18 4
%21 = OpConstant %4 5
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -2936,12 +2936,12 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %13 %14 %15 %16 %17
%8 = OpImageSampleDrefImplicitLod %4 %12 %20 %21
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepth2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2957,17 +2957,17 @@ OpCapability SampledCubeArray
%17 = OpConstant %4 3
%18 = OpConstant %4 0
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefExplicitLod %4 %12 %16 %17 Lod %18
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepth2dOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -2988,17 +2988,17 @@ OpCapability SampledCubeArray
%22 = OpConstant %20 5
%23 = OpConstantComposite %19 %21 %22
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefExplicitLod %4 %12 %16 %17 Lod|ConstOffset %18 %23
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3015,7 +3015,7 @@ OpCapability SampledCubeArray
%20 = OpConstant %4 3
%21 = OpConstant %4 0
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -3023,11 +3023,11 @@ OpCapability SampledCubeArray
%19 = OpCompositeConstruct %13 %14 %15 %16
%8 = OpImageSampleDrefExplicitLod %4 %12 %19 %20 Lod %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepth2dArrayOffsetF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3048,7 +3048,7 @@ OpCapability SampledCubeArray
%24 = OpConstant %17 6
%25 = OpConstantComposite %22 %23 %24
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -3056,11 +3056,11 @@ OpCapability SampledCubeArray
%19 = OpCompositeConstruct %13 %14 %15 %16
%8 = OpImageSampleDrefExplicitLod %4 %12 %19 %20 Lod|ConstOffset %21 %25
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3077,17 +3077,17 @@ OpCapability SampledCubeArray
%18 = OpConstant %4 4
%19 = OpConstant %4 0
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
%8 = OpImageSampleDrefExplicitLod %4 %12 %17 %18 Lod %19
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
- return {
- R"(
+ case ValidTextureOverload::kSampleCompareLevelDepthCubeArrayF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 Cube 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3105,7 +3105,7 @@ OpCapability SampledCubeArray
%21 = OpConstant %4 5
%22 = OpConstant %4 0
)",
- R"(
+ R"(
%9 = OpLoad %7 %5
%10 = OpLoad %3 %1
%12 = OpSampledImage %11 %10 %9
@@ -3113,12 +3113,12 @@ OpCapability SampledCubeArray
%20 = OpCompositeConstruct %13 %14 %15 %16 %17
%8 = OpImageSampleDrefExplicitLod %4 %12 %20 %21 Lod %22
)",
- R"(
+ R"(
OpCapability SampledCubeArray
)"};
- case ValidTextureOverload::kLoad1dLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoad1dLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 1D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3131,16 +3131,16 @@ OpCapability SampledCubeArray
%12 = OpConstant %11 1
%13 = OpConstant %11 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %12 Lod %13
)",
- R"(
+ R"(
OpCapability Sampled1D
)"};
- case ValidTextureOverload::kLoad1dLevelU32:
- return {
- R"(
+ case ValidTextureOverload::kLoad1dLevelU32:
+ return {
+ R"(
%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 1D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3153,16 +3153,16 @@ OpCapability Sampled1D
%12 = OpConstant %11 1
%13 = OpConstant %11 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %12 Lod %13
)",
- R"(
+ R"(
OpCapability Sampled1D
)"};
- case ValidTextureOverload::kLoad1dLevelI32:
- return {
- R"(
+ case ValidTextureOverload::kLoad1dLevelI32:
+ return {
+ R"(
%4 = OpTypeInt 32 1
%3 = OpTypeImage %4 1D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3174,16 +3174,16 @@ OpCapability Sampled1D
%11 = OpConstant %4 1
%12 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %11 Lod %12
)",
- R"(
+ R"(
OpCapability Sampled1D
)"};
- case ValidTextureOverload::kLoad2dLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3199,15 +3199,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %13 %14
%16 = OpConstant %12 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Lod %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad2dLevelU32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dLevelU32:
+ return {
+ R"(
%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3223,15 +3223,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %13 %14
%16 = OpConstant %12 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Lod %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad2dLevelI32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dLevelI32:
+ return {
+ R"(
%4 = OpTypeInt 32 1
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3246,15 +3246,15 @@ OpCapability Sampled1D
%14 = OpConstantComposite %11 %12 %13
%15 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %14 Lod %15
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad2dArrayLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dArrayLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3271,15 +3271,15 @@ OpCapability Sampled1D
%16 = OpConstantComposite %11 %13 %14 %15
%17 = OpConstant %12 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %16 Lod %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad2dArrayLevelU32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dArrayLevelU32:
+ return {
+ R"(
%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3296,15 +3296,15 @@ OpCapability Sampled1D
%16 = OpConstantComposite %11 %13 %14 %15
%17 = OpConstant %12 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %16 Lod %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad2dArrayLevelI32:
- return {
- R"(
+ case ValidTextureOverload::kLoad2dArrayLevelI32:
+ return {
+ R"(
%4 = OpTypeInt 32 1
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3320,15 +3320,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %12 %13 %14
%16 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Lod %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad3dLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoad3dLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3345,15 +3345,15 @@ OpCapability Sampled1D
%16 = OpConstantComposite %11 %13 %14 %15
%17 = OpConstant %12 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %16 Lod %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad3dLevelU32:
- return {
- R"(
+ case ValidTextureOverload::kLoad3dLevelU32:
+ return {
+ R"(
%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3370,15 +3370,15 @@ OpCapability Sampled1D
%16 = OpConstantComposite %11 %13 %14 %15
%17 = OpConstant %12 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %16 Lod %17
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoad3dLevelI32:
- return {
- R"(
+ case ValidTextureOverload::kLoad3dLevelI32:
+ return {
+ R"(
%4 = OpTypeInt 32 1
%3 = OpTypeImage %4 3D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3394,15 +3394,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %12 %13 %14
%16 = OpConstant %4 4
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Lod %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadMultisampled2dF32:
- return {
- R"(
+ case ValidTextureOverload::kLoadMultisampled2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3418,15 +3418,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %13 %14
%16 = OpConstant %12 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Sample %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadMultisampled2dU32:
- return {
- R"(
+ case ValidTextureOverload::kLoadMultisampled2dU32:
+ return {
+ R"(
%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3442,15 +3442,15 @@ OpCapability Sampled1D
%15 = OpConstantComposite %11 %13 %14
%16 = OpConstant %12 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %15 Sample %16
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadMultisampled2dI32:
- return {
- R"(
+ case ValidTextureOverload::kLoadMultisampled2dI32:
+ return {
+ R"(
%4 = OpTypeInt 32 1
%3 = OpTypeImage %4 2D 0 0 1 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3465,15 +3465,15 @@ OpCapability Sampled1D
%14 = OpConstantComposite %11 %12 %13
%15 = OpConstant %4 3
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
%8 = OpImageFetch %9 %10 %14 Sample %15
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadDepth2dLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoadDepth2dLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3489,16 +3489,16 @@ OpCapability Sampled1D
%16 = OpConstantComposite %12 %14 %15
%17 = OpConstant %13 3
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%9 = OpImageFetch %10 %11 %16 Lod %17
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
- return {
- R"(
+ case ValidTextureOverload::kLoadDepth2dArrayLevelF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3515,16 +3515,16 @@ OpCapability Sampled1D
%17 = OpConstantComposite %12 %14 %15 %16
%18 = OpConstant %13 4
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%9 = OpImageFetch %10 %11 %17 Lod %18
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kLoadDepthMultisampled2dF32:
- return {
- R"(
+ case ValidTextureOverload::kLoadDepthMultisampled2dF32:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 1 Unknown
%2 = OpTypePointer UniformConstant %3
@@ -3541,16 +3541,16 @@ OpCapability Sampled1D
%17 = OpConstantComposite %12 %14 %15 %16
%18 = OpConstant %13 4
)",
- R"(
+ R"(
%11 = OpLoad %3 %1
%9 = OpImageFetch %10 %11 %17 Sample %18
%8 = OpCompositeExtract %4 %9 0
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kStoreWO1dRgba32float:
- return {
- R"(
+ case ValidTextureOverload::kStoreWO1dRgba32float:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 1D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -3568,16 +3568,16 @@ OpCapability Sampled1D
%17 = OpConstant %4 5
%18 = OpConstantComposite %13 %14 %15 %16 %17
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
OpImageWrite %10 %12 %18
)",
- R"(
+ R"(
OpCapability Image1D
)"};
- case ValidTextureOverload::kStoreWO2dRgba32float:
- return {
- R"(
+ case ValidTextureOverload::kStoreWO2dRgba32float:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -3598,15 +3598,15 @@ OpCapability Image1D
%20 = OpConstant %4 6
%21 = OpConstantComposite %16 %17 %18 %19 %20
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
OpImageWrite %10 %15 %21
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kStoreWO2dArrayRgba32float:
- return {
- R"(
+ case ValidTextureOverload::kStoreWO2dArrayRgba32float:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 2D 0 1 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -3628,15 +3628,15 @@ OpImageWrite %10 %15 %21
%21 = OpConstant %4 7
%22 = OpConstantComposite %17 %18 %19 %20 %21
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
OpImageWrite %10 %16 %22
)",
- R"(
+ R"(
)"};
- case ValidTextureOverload::kStoreWO3dRgba32float:
- return {
- R"(
+ case ValidTextureOverload::kStoreWO3dRgba32float:
+ return {
+ R"(
%4 = OpTypeFloat 32
%3 = OpTypeImage %4 3D 0 0 0 2 Rgba32f
%2 = OpTypePointer UniformConstant %3
@@ -3658,93 +3658,89 @@ OpImageWrite %10 %16 %22
%21 = OpConstant %4 7
%22 = OpConstantComposite %17 %18 %19 %20 %21
)",
- R"(
+ R"(
%10 = OpLoad %3 %1
OpImageWrite %10 %16 %22
)",
- R"(
+ R"(
)"};
- }
+ }
- return {"<unmatched texture overload>", "<unmatched texture overload>",
- "<unmatched texture overload>"};
+ return {"<unmatched texture overload>", "<unmatched texture overload>",
+ "<unmatched texture overload>"};
} // NOLINT - Ignore the length of this function
-using BuiltinTextureTest =
- TestParamHelper<ast::builtin::test::TextureOverloadCase>;
+using BuiltinTextureTest = TestParamHelper<ast::builtin::test::TextureOverloadCase>;
-INSTANTIATE_TEST_SUITE_P(
- BuiltinTextureTest,
- BuiltinTextureTest,
- testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
+INSTANTIATE_TEST_SUITE_P(BuiltinTextureTest,
+ BuiltinTextureTest,
+ testing::ValuesIn(ast::builtin::test::TextureOverloadCase::ValidCases()));
TEST_P(BuiltinTextureTest, Call) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* texture = param.BuildTextureVariable(this);
- auto* sampler = param.BuildSamplerVariable(this);
+ auto* texture = param.BuildTextureVariable(this);
+ auto* sampler = param.BuildSamplerVariable(this);
- auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
- Func("func", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+ auto* call = Call(param.function, param.args(this));
+ auto* stmt = CallStmt(call);
+ Func("func", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(texture)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(texture)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
- EXPECT_EQ(b.GenerateExpression(call), 8u) << b.error();
+ EXPECT_EQ(b.GenerateExpression(call), 8u) << b.error();
- auto expected = expected_texture_overload(param.overload);
- EXPECT_EQ(expected.types, "\n" + DumpInstructions(b.types()));
- EXPECT_EQ(expected.instructions,
- "\n" + DumpInstructions(b.functions()[0].instructions()));
- EXPECT_EQ(expected.capabilities, "\n" + DumpInstructions(b.capabilities()));
+ auto expected = expected_texture_overload(param.overload);
+ EXPECT_EQ(expected.types, "\n" + DumpInstructions(b.types()));
+ EXPECT_EQ(expected.instructions, "\n" + DumpInstructions(b.functions()[0].instructions()));
+ EXPECT_EQ(expected.capabilities, "\n" + DumpInstructions(b.capabilities()));
}
// Check the SPIRV generated passes validation
TEST_P(BuiltinTextureTest, ValidateSPIRV) {
- auto param = GetParam();
+ auto param = GetParam();
- param.BuildTextureVariable(this);
- param.BuildSamplerVariable(this);
+ param.BuildTextureVariable(this);
+ param.BuildSamplerVariable(this);
- auto* call = Call(param.function, param.args(this));
+ auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
- Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
+ auto* stmt = CallStmt(call);
+ Func("main", {}, ty.void_(), {stmt}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- Validate(b);
+ Validate(b);
}
TEST_P(BuiltinTextureTest, OutsideFunction_IsError) {
- auto param = GetParam();
+ auto param = GetParam();
- // The point of this test is to try to generate the texture
- // builtin call outside a function.
+ // The point of this test is to try to generate the texture
+ // builtin call outside a function.
- auto* texture = param.BuildTextureVariable(this);
- auto* sampler = param.BuildSamplerVariable(this);
+ auto* texture = param.BuildTextureVariable(this);
+ auto* sampler = param.BuildSamplerVariable(this);
- auto* call = Call(param.function, param.args(this));
- auto* stmt = CallStmt(call);
- Func("func", {}, ty.void_(), {stmt},
- {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
+ auto* call = Call(param.function, param.args(this));
+ auto* stmt = CallStmt(call);
+ Func("func", {}, ty.void_(), {stmt},
+ {create<ast::StageAttribute>(ast::PipelineStage::kFragment)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(texture)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
- EXPECT_EQ(b.GenerateExpression(call), 0u);
- EXPECT_THAT(b.error(),
- ::testing::StartsWith(
- "Internal error: trying to add SPIR-V instruction "));
- EXPECT_THAT(b.error(), ::testing::EndsWith(" outside a function"));
+ ASSERT_TRUE(b.GenerateGlobalVariable(texture)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(sampler)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(call), 0u);
+ EXPECT_THAT(b.error(),
+ ::testing::StartsWith("Internal error: trying to add SPIR-V instruction "));
+ EXPECT_THAT(b.error(), ::testing::EndsWith(" outside a function"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_call_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_call_test.cc
index 77b169f7c49..b81ca963f64 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_call_test.cc
@@ -17,26 +17,27 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Expression_Call) {
- ast::VariableList func_params;
- func_params.push_back(Param("a", ty.f32()));
- func_params.push_back(Param("b", ty.f32()));
+ ast::VariableList func_params;
+ func_params.push_back(Param("a", ty.f32()));
+ func_params.push_back(Param("b", ty.f32()));
- auto* a_func = Func("a_func", func_params, ty.f32(), {Return(Add("a", "b"))});
- auto* func =
- Func("main", {}, ty.void_(), {Assign(Phony(), Call("a_func", 1.f, 1.f))});
+ auto* a_func = Func("a_func", func_params, ty.f32(), {Return(Add("a", "b"))});
+ auto* func = Func("main", {}, ty.void_(), {Assign(Phony(), Call("a_func", 1_f, 1_f))});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(a_func)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(a_func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
OpName %4 "a"
OpName %5 "b"
OpName %10 "main"
@@ -61,21 +62,20 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Statement_Call) {
- ast::VariableList func_params;
- func_params.push_back(Param("a", ty.f32()));
- func_params.push_back(Param("b", ty.f32()));
+ ast::VariableList func_params;
+ func_params.push_back(Param("a", ty.f32()));
+ func_params.push_back(Param("b", ty.f32()));
- auto* a_func = Func("a_func", func_params, ty.f32(), {Return(Add("a", "b"))});
+ auto* a_func = Func("a_func", func_params, ty.f32(), {Return(Add("a", "b"))});
- auto* func =
- Func("main", {}, ty.void_(), {CallStmt(Call("a_func", 1.f, 1.f))});
+ auto* func = Func("main", {}, ty.void_(), {CallStmt(Call("a_func", 1_f, 1_f))});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(a_func)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(a_func)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
OpName %4 "a"
OpName %5 "b"
OpName %10 "main"
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_constructor_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_constructor_expression_test.cc
index 495f1c4f5eb..d1fba8fb303 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_constructor_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_constructor_expression_test.cc
@@ -15,48 +15,37 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using SpvBuilderConstructorTest = TestHelper;
TEST_F(SpvBuilderConstructorTest, Const) {
- auto* c = Expr(42.2f);
- auto* g = Global("g", ty.f32(), c, ast::StorageClass::kPrivate);
+ auto* c = Expr(42.2_f);
+ auto* g = Global("g", ty.f32(), c, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateConstructorExpression(g, c), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateConstructorExpression(g, c), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 42.2000008
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_WithCasts_OutsideFunction_IsError) {
- auto* t = Construct<f32>(Construct<u32>(1));
- WrapInFunction(t);
-
- spirv::Builder& b = Build();
-
- EXPECT_EQ(b.GenerateExpression(t), 0u);
- EXPECT_TRUE(b.has_error()) << b.error();
- EXPECT_EQ(b.error(),
- "Internal error: trying to add SPIR-V instruction 124 outside a "
- "function");
-}
-
TEST_F(SpvBuilderConstructorTest, Type) {
- auto* t = vec3<f32>(1.0f, 1.0f, 3.0f);
- WrapInFunction(t);
+ auto* t = vec3<f32>(1_f, 1_f, 3_f);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateConstructorExpression(nullptr, t), 5u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateConstructorExpression(nullptr, t), 5u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
@@ -65,633 +54,1065 @@ TEST_F(SpvBuilderConstructorTest, Type) {
}
TEST_F(SpvBuilderConstructorTest, Type_WithCasts) {
- auto* t = vec2<f32>(Construct<f32>(1), Construct<f32>(1));
- WrapInFunction(t);
+ auto* t = vec2<f32>(Construct<f32>(1_i), Construct<f32>(1_i));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 7u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 4u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
-%4 = OpTypeInt 32 1
-%5 = OpConstant %4 1
-)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%3 = OpConvertSToF %2 %5
-%6 = OpConvertSToF %2 %5
-%7 = OpCompositeConstruct %1 %3 %6
+%3 = OpConstant %2 1
+%4 = OpConstantComposite %1 %3 %3
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_WithAlias) {
- // type Int = i32
- // cast<Int>(2.3f)
+ // type Int = i32
+ // cast<Int>(2.3f)
- auto* alias = Alias("Int", ty.i32());
- auto* cast = Construct(ty.Of(alias), 2.3f);
- WrapInFunction(cast);
+ auto* alias = Alias("Int", ty.i32());
+ auto* cast = Construct(ty.Of(alias), 2.3_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 2u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
-%3 = OpTypeFloat 32
-%4 = OpConstant %3 2.29999995
-)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpConvertFToS %2 %4
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+%2 = OpConstant %1 2
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_IdentifierExpression_Param) {
- auto* var = Var("ident", ty.f32());
+ auto* var = Var("ident", ty.f32());
- auto* t = vec2<f32>(1.0f, "ident");
- WrapInFunction(var, t);
+ auto* t = vec2<f32>(1_f, "ident");
+ WrapInFunction(var, t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(t), 8u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 8u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
%5 = OpTypeVector %3 2
%6 = OpConstant %3 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%7 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%7 = OpLoad %3 %1
%8 = OpCompositeConstruct %5 %6 %7
)");
}
TEST_F(SpvBuilderConstructorTest, Vector_Bitcast_Params) {
- auto* t = vec2<u32>(Construct<u32>(1), Construct<u32>(1));
- WrapInFunction(t);
+ auto* var = Var("v", nullptr, vec3<f32>(1_f, 2_f, 3_f));
+ auto* cast = Bitcast(ty.vec3<u32>(), var);
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ ASSERT_EQ(b.GenerateExpression(cast), 10u) << b.error();
- EXPECT_EQ(b.GenerateExpression(t), 7u);
- ASSERT_FALSE(b.has_error()) << b.error();
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
-%1 = OpTypeVector %2 2
-%4 = OpTypeInt 32 1
-%5 = OpConstant %4 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
+%8 = OpTypePointer Function %1
+%9 = OpConstantNull %1
+%12 = OpTypeInt 32 0
+%11 = OpTypeVector %12 3
)");
-
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%3 = OpBitcast %2 %5
-%6 = OpBitcast %2 %5
-%7 = OpCompositeConstruct %1 %3 %6
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %7 %6
+%13 = OpLoad %1 %7
+%10 = OpBitcast %11 %13
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Bool_With_Bool) {
- auto* cast = Construct<bool>(true);
- WrapInFunction(cast);
+ auto* cast = Construct<bool>(true);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 3u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
-%3 = OpConstantTrue %2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+%2 = OpConstantTrue %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_I32_With_I32) {
- auto* cast = Construct<i32>(2);
- WrapInFunction(cast);
+ auto* cast = Construct<i32>(2_i);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 3u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 2u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
-%3 = OpConstant %2 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+%2 = OpConstant %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_U32_With_U32) {
- auto* cast = Construct<u32>(2u);
- WrapInFunction(cast);
+ auto* cast = Construct<u32>(2_u);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 3u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 2u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
-%3 = OpConstant %2 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+%2 = OpConstant %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_F32_With_F32) {
- auto* cast = Construct<f32>(2.0f);
- WrapInFunction(cast);
+ auto* cast = Construct<f32>(2_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 3u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 2u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
-%3 = OpConstant %2 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_Bool_Literal) {
- auto* cast = vec2<bool>(true);
- WrapInFunction(cast);
+ auto* cast = vec2<bool>(true);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeVector %2 2
%3 = OpConstantTrue %2
%4 = OpConstantComposite %1 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_Bool_Var) {
- auto* var = Var("v", nullptr, Expr(true));
- auto* cast = vec2<bool>(var);
- WrapInFunction(var, cast);
+ auto* var = Var("v", nullptr, Expr(true));
+ auto* cast = vec2<bool>(var);
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- ASSERT_EQ(b.GenerateExpression(cast), 8u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ ASSERT_EQ(b.GenerateExpression(cast), 8u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantTrue %1
%4 = OpTypePointer Function %1
%5 = OpConstantNull %1
%6 = OpTypeVector %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %3 %2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
%7 = OpLoad %1 %3
%8 = OpCompositeConstruct %6 %7 %7
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_F32_Literal) {
- auto* cast = vec2<f32>(2.0f);
- WrapInFunction(cast);
+ auto* cast = vec2<f32>(2_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_F32_Var) {
- auto* var = Var("v", nullptr, Expr(2.0f));
- auto* cast = vec2<f32>(var);
- WrapInFunction(var, cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_F32_F32) {
+ auto* var = Decl(Var("x", ty.f32(), Expr(2_f)));
+ auto* cast = vec2<f32>("x", "x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- ASSERT_EQ(b.GenerateExpression(cast), 8u) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 9u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 2
%4 = OpTypePointer Function %1
%5 = OpConstantNull %1
%6 = OpTypeVector %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"(OpStore %3 %2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
%7 = OpLoad %1 %3
-%8 = OpCompositeConstruct %6 %7 %7
+%8 = OpLoad %1 %3
+%9 = OpCompositeConstruct %6 %7 %8
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_F32_F32) {
- auto* cast = vec2<f32>(2.0f, 2.0f);
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_F32_F32_Const) {
+ auto* cast = vec2<f32>(1_f, 2_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 5u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
-%3 = OpConstant %2 2
-%4 = OpConstantComposite %1 %3 %3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_Vec2) {
- auto* value = vec2<f32>(2.0f, 2.0f);
- auto* cast = vec2<f32>(value);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(1_f, 2_f)));
+ auto* cast = vec2<f32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 5u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 10u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
-%2 = OpTypeVector %3 2
-%4 = OpConstant %3 2
-%5 = OpConstantComposite %2 %4 %4
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%10 = OpLoad %1 %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec2_With_Vec2_Const) {
+ auto* cast = vec2<f32>(vec2<f32>(1_f, 2_f));
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 5u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32) {
- auto* cast = vec3<f32>(2.0f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2_f)));
+ auto* cast = vec3<f32>("x", "x", "x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 10u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 3
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpLoad %1 %3
+%9 = OpLoad %1 %3
+%10 = OpCompositeConstruct %6 %7 %8 %9
+)");
+}
- spirv::Builder& b = Build();
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32_Const) {
+ auto* cast = vec3<f32>(1_f, 2_f, 3_f);
+ WrapInFunction(cast);
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ spirv::Builder& b = Build();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
-%3 = OpConstant %2 2
-%4 = OpConstantComposite %1 %3 %3 %3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Bool) {
- auto* cast = vec3<bool>(true);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.bool_(), Expr(true)));
+ auto* cast = vec3<bool>("x", "x", "x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 10u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+%2 = OpConstantTrue %1
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 3
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpLoad %1 %3
+%9 = OpLoad %1 %3
+%10 = OpCompositeConstruct %6 %7 %8 %9
+)");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Bool_Const) {
+ auto* cast = vec3<bool>(true, false, true);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 5u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeVector %2 3
%3 = OpConstantTrue %2
-%4 = OpConstantComposite %1 %3 %3 %3
+%4 = OpConstantFalse %2
+%5 = OpConstantComposite %1 %3 %4 %3
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32_F32_F32) {
- auto* cast = vec3<f32>(2.0f, 2.0f, 2.0f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2_f)));
+ auto* cast = vec3<f32>("x", "x", "x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 10u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 3
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpLoad %1 %3
+%9 = OpLoad %1 %3
+%10 = OpCompositeConstruct %6 %7 %8 %9
+)");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32_F32_F32_Const) {
+ auto* cast = vec3<f32>(1_f, 2_f, 3_f);
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
-%3 = OpConstant %2 2
-%4 = OpConstantComposite %1 %3 %3 %3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32_Vec2) {
- auto* cast = vec3<f32>(2.0f, vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(2_f, 3_f)));
+ auto* cast = vec3<f32>(1_f, "x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 8u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 14u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
-%1 = OpTypeVector %2 3
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
%3 = OpConstant %2 2
-%4 = OpTypeVector %2 2
-%5 = OpConstantComposite %4 %3 %3
+%4 = OpConstant %2 3
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 3
+%10 = OpConstant %2 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeConstruct %1 %3 %6 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%11 = OpLoad %1 %6
+%12 = OpCompositeExtract %2 %11 0
+%13 = OpCompositeExtract %2 %11 1
+%14 = OpCompositeConstruct %9 %10 %12 %13
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Vec2_F32) {
- auto* cast = vec3<f32>(vec2<f32>(2.0f, 2.0f), 2.0f);
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_F32_Vec2_Const) {
+ auto* cast = vec3<f32>(1_f, vec2<f32>(2_f, 3_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 8u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
-%3 = OpTypeVector %2 2
+%3 = OpConstant %2 1
%4 = OpConstant %2 2
-%5 = OpConstantComposite %3 %4 %4
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Vec2_F32) {
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(1_f, 2_f)));
+ auto* cast = vec3<f32>("x", 3_f);
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 14u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 3
+%13 = OpConstant %2 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeConstruct %1 %6 %7 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%10 = OpLoad %1 %6
+%11 = OpCompositeExtract %2 %10 0
+%12 = OpCompositeExtract %2 %10 1
+%14 = OpCompositeConstruct %9 %11 %12 %13
+)");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Vec2_F32_Const) {
+ auto* cast = vec3<f32>(vec2<f32>(1_f, 2_f), 3_f);
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Vec3) {
- auto* value = vec3<f32>(2.0f, 2.0f, 2.0f);
- auto* cast = vec3<f32>(value);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.vec3<f32>(), vec3<f32>(1_f, 2_f, 3_f)));
+ auto* cast = vec3<f32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 5u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
-%2 = OpTypeVector %3 3
-%4 = OpConstant %3 2
-%5 = OpConstantComposite %2 %4 %4 %4
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
+%8 = OpTypePointer Function %1
+%9 = OpConstantNull %1
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %7 %6
+%11 = OpLoad %1 %7
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec3_With_Vec3_Const) {
+ auto* cast = vec3<f32>(vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Bool) {
- auto* cast = vec4<bool>(true);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.bool_(), Expr(true)));
+ auto* cast = vec4<bool>("x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 8u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+%2 = OpConstantTrue %1
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpCompositeConstruct %6 %7 %7 %7 %7
+)");
+}
- spirv::Builder& b = Build();
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Bool_Const) {
+ auto* cast = vec4<bool>(true);
+ WrapInFunction(cast);
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ spirv::Builder& b = Build();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeVector %2 4
%3 = OpConstantTrue %2
%4 = OpConstantComposite %1 %3 %3 %3 %3
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32) {
- auto* cast = vec4<f32>(2.0f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2_f)));
+ auto* cast = vec4<f32>("x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 8u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpCompositeConstruct %6 %7 %7 %7 %7
+)");
+}
- spirv::Builder& b = Build();
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Const) {
+ auto* cast = vec4<f32>(2_f);
+ WrapInFunction(cast);
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ spirv::Builder& b = Build();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3 %3
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_F32_F32_F32) {
- auto* cast = vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2_f)));
+ auto* cast = vec4<f32>("x", "x", "x", "x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 4u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%6 = OpTypeVector %1 4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%7 = OpLoad %1 %3
+%8 = OpLoad %1 %3
+%9 = OpLoad %1 %3
+%10 = OpLoad %1 %3
+%11 = OpCompositeConstruct %6 %7 %8 %9 %10
+)");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_F32_F32_F32_Const) {
+ auto* cast = vec4<f32>(1_f, 2_f, 3_f, 4_f);
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 7u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
-%3 = OpConstant %2 2
-%4 = OpConstantComposite %1 %3 %3 %3 %3
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstant %2 4
+%7 = OpConstantComposite %1 %3 %4 %5 %6
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_F32_Vec2) {
- auto* cast = vec4<f32>(2.0f, 2.0f, vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(1_f, 2_f)));
+ auto* cast = vec4<f32>(1_f, 2_f, "x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 8u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 13u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
-%1 = OpTypeVector %2 4
-%3 = OpConstant %2 2
-%4 = OpTypeVector %2 2
-%5 = OpConstantComposite %4 %3 %3
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeConstruct %1 %3 %3 %6 %7
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%10 = OpLoad %1 %6
+%11 = OpCompositeExtract %2 %10 0
+%12 = OpCompositeExtract %2 %10 1
+%13 = OpCompositeConstruct %9 %3 %4 %11 %12
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec2_F32) {
- auto* cast = vec4<f32>(2.0f, vec2<f32>(2.0f, 2.0f), 2.0f);
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_F32_Vec2_Const) {
+ auto* cast = vec4<f32>(1_f, 2_f, vec2<f32>(3_f, 4_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 8u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 7u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstant %2 4
+%7 = OpConstantComposite %1 %3 %4 %5 %6
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec2_F32) {
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(2_f, 3_f)));
+ auto* cast = vec4<f32>(1_f, "x", 4_f);
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 15u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
%3 = OpConstant %2 2
-%4 = OpTypeVector %2 2
-%5 = OpConstantComposite %4 %3 %3
+%4 = OpConstant %2 3
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 4
+%10 = OpConstant %2 1
+%14 = OpConstant %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeConstruct %1 %3 %6 %7 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%11 = OpLoad %1 %6
+%12 = OpCompositeExtract %2 %11 0
+%13 = OpCompositeExtract %2 %11 1
+%15 = OpCompositeConstruct %9 %10 %12 %13 %14
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_F32_F32) {
- auto* cast = vec4<f32>(vec2<f32>(2.0f, 2.0f), 2.0f, 2.0f);
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec2_F32_Const) {
+ auto* cast = vec4<f32>(1_f, vec2<f32>(2_f, 3_f), 4_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 8u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 7u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
-%3 = OpTypeVector %2 2
+%3 = OpConstant %2 1
%4 = OpConstant %2 2
-%5 = OpConstantComposite %3 %4 %4
+%5 = OpConstant %2 3
+%6 = OpConstant %2 4
+%7 = OpConstantComposite %1 %3 %4 %5 %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeConstruct %1 %6 %7 %4 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_F32_F32) {
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(1_f, 2_f)));
+ auto* cast = vec4<f32>("x", 3_f, 4_f);
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 15u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 4
+%13 = OpConstant %2 3
+%14 = OpConstant %2 4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%10 = OpLoad %1 %6
+%11 = OpCompositeExtract %2 %10 0
+%12 = OpCompositeExtract %2 %10 1
+%15 = OpCompositeConstruct %9 %11 %12 %13 %14
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_Vec2) {
- auto* cast = vec4<f32>(vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_F32_F32_Const) {
+ auto* cast = vec4<f32>(vec2<f32>(1_f, 2_f), 3_f, 4_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 10u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 7u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
-%3 = OpTypeVector %2 2
+%3 = OpConstant %2 1
%4 = OpConstant %2 2
-%5 = OpConstantComposite %3 %4 %4
+%5 = OpConstant %2 3
+%6 = OpConstant %2 4
+%7 = OpConstantComposite %1 %3 %4 %5 %6
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_Vec2) {
+ auto* var = Decl(Var("x", ty.vec2<f32>(), vec2<f32>(1_f, 2_f)));
+ auto* cast = vec4<f32>("x", "x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 16u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 2
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4
+%7 = OpTypePointer Function %1
+%8 = OpConstantNull %1
+%9 = OpTypeVector %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeExtract %2 %5 0
-%9 = OpCompositeExtract %2 %5 1
-%10 = OpCompositeConstruct %1 %6 %7 %8 %9
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
+%10 = OpLoad %1 %6
+%11 = OpCompositeExtract %2 %10 0
+%12 = OpCompositeExtract %2 %10 1
+%13 = OpLoad %1 %6
+%14 = OpCompositeExtract %2 %13 0
+%15 = OpCompositeExtract %2 %13 1
+%16 = OpCompositeConstruct %9 %11 %12 %14 %15
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec3) {
- auto* cast = vec4<f32>(2.0f, vec3<f32>(2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec2_Vec2_Const) {
+ auto* cast = vec4<f32>(vec2<f32>(1_f, 2_f), vec2<f32>(1_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 9u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 5u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstantComposite %1 %3 %4 %3 %4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec3) {
+ auto* var = Decl(Var("x", ty.vec3<f32>(), vec3<f32>(2_f, 2_f, 2_f)));
+ auto* cast = vec4<f32>(2_f, "x");
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 13u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
%3 = OpConstant %2 2
-%4 = OpTypeVector %2 3
-%5 = OpConstantComposite %4 %3 %3 %3
+%4 = OpConstantComposite %1 %3 %3 %3
+%6 = OpTypePointer Function %1
+%7 = OpConstantNull %1
+%8 = OpTypeVector %2 4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeExtract %2 %5 2
-%9 = OpCompositeConstruct %1 %3 %6 %7 %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %5 %4
+%9 = OpLoad %1 %5
+%10 = OpCompositeExtract %2 %9 0
+%11 = OpCompositeExtract %2 %9 1
+%12 = OpCompositeExtract %2 %9 2
+%13 = OpCompositeConstruct %8 %3 %10 %11 %12
)");
}
-TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec3_F32) {
- auto* cast = vec4<f32>(vec3<f32>(2.0f, 2.0f, 2.0f), 2.0f);
- WrapInFunction(cast);
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_F32_Vec3_Const) {
+ auto* cast = vec4<f32>(2_f, vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 9u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
-%3 = OpTypeVector %2 3
-%4 = OpConstant %2 2
-%5 = OpConstantComposite %3 %4 %4 %4
+%3 = OpConstant %2 2
+%4 = OpConstantComposite %1 %3 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%6 = OpCompositeExtract %2 %5 0
-%7 = OpCompositeExtract %2 %5 1
-%8 = OpCompositeExtract %2 %5 2
-%9 = OpCompositeConstruct %1 %6 %7 %8 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec3_F32) {
+ auto* var = Decl(Var("x", ty.vec3<f32>(), vec3<f32>(2_f, 2_f, 2_f)));
+ auto* cast = vec4<f32>("x", 2_f);
+ WrapInFunction(var, cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var));
+ EXPECT_EQ(b.GenerateExpression(cast), 13u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 3
+%3 = OpConstant %2 2
+%4 = OpConstantComposite %1 %3 %3 %3
+%6 = OpTypePointer Function %1
+%7 = OpConstantNull %1
+%8 = OpTypeVector %2 4
+)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %5 %4
+%9 = OpLoad %1 %5
+%10 = OpCompositeExtract %2 %9 0
+%11 = OpCompositeExtract %2 %9 1
+%12 = OpCompositeExtract %2 %9 2
+%13 = OpCompositeConstruct %8 %10 %11 %12 %3
+)");
+}
+
+TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec3_F32_Const) {
+ auto* cast = vec4<f32>(vec3<f32>(2_f, 2_f, 2_f), 2_f);
+ WrapInFunction(cast);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 4
+%3 = OpConstant %2 2
+%4 = OpConstantComposite %1 %3 %3 %3 %3
)");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_Vec4_With_Vec4) {
- auto* value = vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f);
- auto* cast = vec4<f32>(value);
- WrapInFunction(cast);
+ auto* value = vec4<f32>(2_f, 2_f, 2_f, 2_f);
+ auto* cast = vec4<f32>(value);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 5u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
-%2 = OpTypeVector %3 4
-%4 = OpConstant %3 2
-%5 = OpConstantComposite %2 %4 %4 %4 %4
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+%1 = OpTypeVector %2 4
+%3 = OpConstant %2 2
+%4 = OpConstantComposite %1 %3 %3 %3 %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"()");
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_F32_With_F32) {
- auto* ctor = Construct<f32>(2.0f);
- GlobalConst("g", ty.f32(), ctor);
+ auto* ctor = Construct<f32>(2_f);
+ GlobalConst("g", ty.f32(), ctor);
- spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 2
%4 = OpTypeVoid
%3 = OpTypeFunction %4
)");
- Validate(b);
+ Validate(b);
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_U32_With_F32) {
- auto* ctor = Construct<u32>(1.5f);
- GlobalConst("g", ty.u32(), ctor);
+ auto* ctor = Construct<u32>(1.5_f);
+ GlobalConst("g", ty.u32(), ctor);
- spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstant %1 1
%4 = OpTypeVoid
%3 = OpTypeFunction %4
)");
- Validate(b);
+ Validate(b);
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec2_With_F32) {
- auto* cast = vec2<f32>(2.0f);
- auto* g = Global("g", ty.vec2<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec2<f32>(2_f);
+ auto* g = Global("g", ty.vec2<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3
@@ -699,13 +1120,13 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec2_With_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec2_With_Vec2) {
- auto* cast = vec2<f32>(vec2<f32>(2.0f, 2.0f));
- GlobalConst("a", ty.vec2<f32>(), cast);
+ auto* cast = vec2<f32>(vec2<f32>(2_f, 2_f));
+ GlobalConst("a", ty.vec2<f32>(), cast);
- spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3
@@ -713,17 +1134,17 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec2_With_Vec2) {
%5 = OpTypeFunction %6
)");
- Validate(b);
+ Validate(b);
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_Vec3) {
- auto* cast = vec3<f32>(vec3<f32>(2.0f, 2.0f, 2.0f));
- GlobalConst("a", ty.vec3<f32>(), cast);
+ auto* cast = vec3<f32>(vec3<f32>(2_f, 2_f, 2_f));
+ GlobalConst("a", ty.vec3<f32>(), cast);
- spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3
@@ -731,17 +1152,17 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_Vec3) {
%5 = OpTypeFunction %6
)");
- Validate(b);
+ Validate(b);
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec4) {
- auto* cast = vec4<f32>(vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f));
- GlobalConst("a", ty.vec4<f32>(), cast);
+ auto* cast = vec4<f32>(vec4<f32>(2_f, 2_f, 2_f, 2_f));
+ GlobalConst("a", ty.vec4<f32>(), cast);
- spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3 %3
@@ -749,19 +1170,19 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec4) {
%5 = OpTypeFunction %6
)");
- Validate(b);
+ Validate(b);
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_F32) {
- auto* cast = vec3<f32>(2.0f);
- auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec3<f32>(2_f);
+ auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3
@@ -769,15 +1190,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_F32_Vec2) {
- auto* cast = vec3<f32>(2.0f, vec2<f32>(2.0f, 2.0f));
- auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec3<f32>(2_f, vec2<f32>(2_f, 2_f));
+ auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 2
%4 = OpTypeVector %2 2
@@ -792,15 +1213,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_F32_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_Vec2_F32) {
- auto* cast = vec3<f32>(vec2<f32>(2.0f, 2.0f), 2.0f);
- auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec3<f32>(vec2<f32>(2_f, 2_f), 2_f);
+ auto* g = Global("g", ty.vec3<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpTypeVector %2 2
%4 = OpConstant %2 2
@@ -815,15 +1236,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec3_With_Vec2_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32) {
- auto* cast = vec4<f32>(2.0f);
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(2_f);
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3 %3
@@ -831,15 +1252,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_F32_Vec2) {
- auto* cast = vec4<f32>(2.0f, 2.0f, vec2<f32>(2.0f, 2.0f));
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(2_f, 2_f, vec2<f32>(2_f, 2_f));
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpTypeVector %2 2
@@ -854,15 +1275,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_F32_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_Vec2_F32) {
- auto* cast = vec4<f32>(2.0f, vec2<f32>(2.0f, 2.0f), 2.0f);
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(2_f, vec2<f32>(2_f, 2_f), 2_f);
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpTypeVector %2 2
@@ -877,15 +1298,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_Vec2_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec2_F32_F32) {
- auto* cast = vec4<f32>(vec2<f32>(2.0f, 2.0f), 2.0f, 2.0f);
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(vec2<f32>(2_f, 2_f), 2_f, 2_f);
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 11u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpTypeVector %2 2
%4 = OpConstant %2 2
@@ -900,15 +1321,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec2_F32_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec2_Vec2) {
- auto* cast = vec4<f32>(vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f));
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f));
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpTypeVector %2 2
%4 = OpConstant %2 2
@@ -925,15 +1346,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec2_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_Vec3) {
- auto* cast = vec4<f32>(2.0f, vec3<f32>(2.0f, 2.0f, 2.0f));
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(2_f, vec3<f32>(2_f, 2_f, 2_f));
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpConstant %2 2
%4 = OpTypeVector %2 3
@@ -950,15 +1371,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_F32_Vec3) {
}
TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec3_F32) {
- auto* cast = vec4<f32>(vec3<f32>(2.0f, 2.0f, 2.0f), 2.0f);
- auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
+ auto* cast = vec4<f32>(vec3<f32>(2_f, 2_f, 2_f), 2_f);
+ auto* g = Global("g", ty.vec4<f32>(), cast, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateConstructorExpression(g, cast), 13u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 4
%3 = OpTypeVector %2 3
%4 = OpConstant %2 2
@@ -975,15 +1396,15 @@ TEST_F(SpvBuilderConstructorTest, Type_ModuleScope_Vec4_With_Vec3_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat2x2_With_Vec2_Vec2) {
- auto* cast = mat2x2<f32>(vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat2x2<f32>(vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 2
%1 = OpTypeMatrix %2 2
%4 = OpConstant %3 2
@@ -993,16 +1414,15 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat2x2_With_Vec2_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat3x2_With_Vec2_Vec2_Vec2) {
- auto* cast = mat3x2<f32>(vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f),
- vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat3x2<f32>(vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 2
%1 = OpTypeMatrix %2 3
%4 = OpConstant %3 2
@@ -1012,16 +1432,16 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat3x2_With_Vec2_Vec2_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat4x2_With_Vec2_Vec2_Vec2_Vec2) {
- auto* cast = mat4x2<f32>(vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f),
- vec2<f32>(2.0f, 2.0f), vec2<f32>(2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat4x2<f32>(vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f), vec2<f32>(2_f, 2_f),
+ vec2<f32>(2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 2
%1 = OpTypeMatrix %2 4
%4 = OpConstant %3 2
@@ -1031,16 +1451,15 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat4x2_With_Vec2_Vec2_Vec2_Vec2) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat2x3_With_Vec3_Vec3) {
- auto* cast =
- mat2x3<f32>(vec3<f32>(2.0f, 2.0f, 2.0f), vec3<f32>(2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat2x3<f32>(vec3<f32>(2_f, 2_f, 2_f), vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 3
%1 = OpTypeMatrix %2 2
%4 = OpConstant %3 2
@@ -1050,17 +1469,16 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat2x3_With_Vec3_Vec3) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat3x3_With_Vec3_Vec3_Vec3) {
- auto* cast =
- mat3x3<f32>(vec3<f32>(2.0f, 2.0f, 2.0f), vec3<f32>(2.0f, 2.0f, 2.0f),
- vec3<f32>(2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast =
+ mat3x3<f32>(vec3<f32>(2_f, 2_f, 2_f), vec3<f32>(2_f, 2_f, 2_f), vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 3
%1 = OpTypeMatrix %2 3
%4 = OpConstant %3 2
@@ -1070,17 +1488,16 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat3x3_With_Vec3_Vec3_Vec3) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat4x3_With_Vec3_Vec3_Vec3_Vec3) {
- auto* cast =
- mat4x3<f32>(vec3<f32>(2.0f, 2.0f, 2.0f), vec3<f32>(2.0f, 2.0f, 2.0f),
- vec3<f32>(2.0f, 2.0f, 2.0f), vec3<f32>(2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat4x3<f32>(vec3<f32>(2_f, 2_f, 2_f), vec3<f32>(2_f, 2_f, 2_f),
+ vec3<f32>(2_f, 2_f, 2_f), vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 3
%1 = OpTypeMatrix %2 4
%4 = OpConstant %3 2
@@ -1090,16 +1507,15 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat4x3_With_Vec3_Vec3_Vec3_Vec3) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat2x4_With_Vec4_Vec4) {
- auto* cast = mat2x4<f32>(vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f),
- vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat2x4<f32>(vec4<f32>(2_f, 2_f, 2_f, 2_f), vec4<f32>(2_f, 2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 4
%1 = OpTypeMatrix %2 2
%4 = OpConstant %3 2
@@ -1109,17 +1525,16 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat2x4_With_Vec4_Vec4) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat3x4_With_Vec4_Vec4_Vec4) {
- auto* cast = mat3x4<f32>(vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f),
- vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f),
- vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat3x4<f32>(vec4<f32>(2_f, 2_f, 2_f, 2_f), vec4<f32>(2_f, 2_f, 2_f, 2_f),
+ vec4<f32>(2_f, 2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 4
%1 = OpTypeMatrix %2 3
%4 = OpConstant %3 2
@@ -1129,17 +1544,16 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat3x4_With_Vec4_Vec4_Vec4) {
}
TEST_F(SpvBuilderConstructorTest, Type_Mat4x4_With_Vec4_Vec4_Vec4_Vec4) {
- auto* cast = mat4x4<f32>(
- vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f), vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f),
- vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f), vec4<f32>(2.0f, 2.0f, 2.0f, 2.0f));
- WrapInFunction(cast);
+ auto* cast = mat4x4<f32>(vec4<f32>(2_f, 2_f, 2_f, 2_f), vec4<f32>(2_f, 2_f, 2_f, 2_f),
+ vec4<f32>(2_f, 2_f, 2_f, 2_f), vec4<f32>(2_f, 2_f, 2_f, 2_f));
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 4
%1 = OpTypeMatrix %2 4
%4 = OpConstant %3 2
@@ -1149,15 +1563,15 @@ TEST_F(SpvBuilderConstructorTest, Type_Mat4x4_With_Vec4_Vec4_Vec4_Vec4) {
}
TEST_F(SpvBuilderConstructorTest, Type_Array_5_F32) {
- auto* cast = array<f32, 5>(2.0f, 2.0f, 2.0f, 2.0f, 2.0f);
- WrapInFunction(cast);
+ auto* cast = array<f32, 5>(2_f, 2_f, 2_f, 2_f, 2_f);
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(cast), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpTypeInt 32 0
%4 = OpConstant %3 5
%1 = OpTypeArray %2 %4
@@ -1167,15 +1581,15 @@ TEST_F(SpvBuilderConstructorTest, Type_Array_5_F32) {
}
TEST_F(SpvBuilderConstructorTest, Type_Array_2_Vec3) {
- auto* first = vec3<f32>(1.f, 2.f, 3.f);
- auto* second = vec3<f32>(1.f, 2.f, 3.f);
- auto* t = Construct(ty.array(ty.vec3<f32>(), 2), first, second);
- WrapInFunction(t);
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 10u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ auto* first = vec3<f32>(1_f, 2_f, 3_f);
+ auto* second = vec3<f32>(1_f, 2_f, 3_f);
+ auto* t = Construct(ty.array(ty.vec3<f32>(), 2_u), first, second);
+ WrapInFunction(t);
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(t), 10u);
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 3
%4 = OpTypeInt 32 0
%5 = OpConstant %4 2
@@ -1189,21 +1603,21 @@ TEST_F(SpvBuilderConstructorTest, Type_Array_2_Vec3) {
}
TEST_F(SpvBuilderConstructorTest, CommonInitializer_TwoVectors) {
- auto* v1 = vec3<f32>(2.0f, 2.0f, 2.0f);
- auto* v2 = vec3<f32>(2.0f, 2.0f, 2.0f);
- ast::StatementList stmts = {
- WrapInStatement(v1),
- WrapInStatement(v2),
- };
- WrapInFunction(stmts);
+ auto* v1 = vec3<f32>(2_f, 2_f, 2_f);
+ auto* v2 = vec3<f32>(2_f, 2_f, 2_f);
+ ast::StatementList stmts = {
+ WrapInStatement(v1),
+ WrapInStatement(v2),
+ };
+ WrapInFunction(stmts);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(v1), 4u);
- EXPECT_EQ(b.GenerateExpression(v2), 4u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(v1), 4u);
+ EXPECT_EQ(b.GenerateExpression(v2), 4u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 2
%4 = OpConstantComposite %1 %3 %3 %3
@@ -1211,21 +1625,21 @@ TEST_F(SpvBuilderConstructorTest, CommonInitializer_TwoVectors) {
}
TEST_F(SpvBuilderConstructorTest, CommonInitializer_TwoArrays) {
- auto* a1 = array<f32, 3>(2.0f, 2.0f, 2.0f);
- auto* a2 = array<f32, 3>(2.0f, 2.0f, 2.0f);
- ast::StatementList stmts = {
- WrapInStatement(a1),
- WrapInStatement(a2),
- };
- WrapInFunction(stmts);
+ auto* a1 = array<f32, 3>(2_f, 2_f, 2_f);
+ auto* a2 = array<f32, 3>(2_f, 2_f, 2_f);
+ ast::StatementList stmts = {
+ WrapInStatement(a1),
+ WrapInStatement(a2),
+ };
+ WrapInFunction(stmts);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(a1), 6u);
- EXPECT_EQ(b.GenerateExpression(a2), 6u);
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(a1), 6u);
+ EXPECT_EQ(b.GenerateExpression(a2), 6u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpTypeInt 32 0
%4 = OpConstant %3 3
%1 = OpTypeArray %2 %4
@@ -1235,23 +1649,23 @@ TEST_F(SpvBuilderConstructorTest, CommonInitializer_TwoArrays) {
}
TEST_F(SpvBuilderConstructorTest, CommonInitializer_Array_VecArray) {
- // Test that initializers of different types with the same values produce
- // different OpConstantComposite instructions.
- // crbug.com/tint/777
- auto* a1 = array<f32, 2>(1.0f, 2.0f);
- auto* a2 = vec2<f32>(1.0f, 2.0f);
- ast::StatementList stmts = {
- WrapInStatement(a1),
- WrapInStatement(a2),
- };
- WrapInFunction(stmts);
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(a1), 7u);
- EXPECT_EQ(b.GenerateExpression(a2), 9u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ // Test that initializers of different types with the same values produce
+ // different OpConstantComposite instructions.
+ // crbug.com/tint/777
+ auto* a1 = array<f32, 2>(1_f, 2_f);
+ auto* a2 = vec2<f32>(1_f, 2_f);
+ ast::StatementList stmts = {
+ WrapInStatement(a1),
+ WrapInStatement(a2),
+ };
+ WrapInFunction(stmts);
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateExpression(a1), 7u);
+ EXPECT_EQ(b.GenerateExpression(a2), 9u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpTypeInt 32 0
%4 = OpConstant %3 2
%1 = OpTypeArray %2 %4
@@ -1264,22 +1678,22 @@ TEST_F(SpvBuilderConstructorTest, CommonInitializer_Array_VecArray) {
}
TEST_F(SpvBuilderConstructorTest, Type_Struct) {
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.vec3<f32>()),
- });
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.vec3<f32>()),
+ });
- auto* t = Construct(ty.Of(s), 2.0f, vec3<f32>(2.0f, 2.0f, 2.0f));
- WrapInFunction(t);
+ auto* t = Construct(ty.Of(s), 2_f, vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 6u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 6u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpTypeVector %2 3
%1 = OpTypeStruct %2 %3
%4 = OpConstant %2 2
@@ -1289,104 +1703,104 @@ TEST_F(SpvBuilderConstructorTest, Type_Struct) {
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_F32) {
- auto* t = Construct<f32>();
+ auto* t = Construct<f32>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_I32) {
- auto* t = Construct<i32>();
+ auto* t = Construct<i32>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_U32) {
- auto* t = Construct<u32>();
+ auto* t = Construct<u32>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Bool) {
- auto* t = Construct<bool>();
+ auto* t = Construct<bool>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Vector) {
- auto* t = vec2<i32>();
+ auto* t = vec2<i32>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 3u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 3u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeVector %2 2
%3 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Matrix) {
- auto* t = mat4x2<f32>();
+ auto* t = mat4x2<f32>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 4u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 4u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 2
%1 = OpTypeMatrix %2 4
%4 = OpConstantNull %1
@@ -1394,18 +1808,18 @@ TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Matrix) {
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Array) {
- auto* t = array<i32, 2>();
+ auto* t = array<i32, 2>();
- WrapInFunction(t);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 5u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 5u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpTypeInt 32 0
%4 = OpConstant %3 2
%1 = OpTypeArray %2 %4
@@ -1414,144 +1828,180 @@ TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Array) {
}
TEST_F(SpvBuilderConstructorTest, Type_ZeroInit_Struct) {
- auto* s = Structure("my_struct", {Member("a", ty.f32())});
- auto* t = Construct(ty.Of(s));
- WrapInFunction(t);
+ auto* s = Structure("my_struct", {Member("a", ty.f32())});
+ auto* t = Construct(ty.Of(s));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(t), 3u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateExpression(t), 3u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeStruct %2
%3 = OpConstantNull %1
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_U32_To_I32) {
- auto* cast = Construct<i32>(2u);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.u32(), Expr(2_u)));
+ auto* cast = Construct<i32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
-%3 = OpTypeInt 32 0
-%4 = OpConstant %3 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeInt 32 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpBitcast %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpBitcast %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_I32_To_U32) {
- auto* cast = Construct<u32>(2);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.i32(), Expr(2_i)));
+ auto* cast = Construct<u32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
-%3 = OpTypeInt 32 1
-%4 = OpConstant %3 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeInt 32 0
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpBitcast %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpBitcast %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_F32_To_I32) {
- auto* cast = Construct<i32>(2.4f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2.4_f)));
+ auto* cast = Construct<i32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
-%3 = OpTypeFloat 32
-%4 = OpConstant %3 2.4000001
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2.4000001
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeInt 32 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpConvertFToS %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpConvertFToS %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_F32_To_U32) {
- auto* cast = Construct<u32>(2.4f);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.f32(), Expr(2.4_f)));
+ auto* cast = Construct<u32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
-%3 = OpTypeFloat 32
-%4 = OpConstant %3 2.4000001
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+%2 = OpConstant %1 2.4000001
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeInt 32 0
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpConvertFToU %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpConvertFToU %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_I32_To_F32) {
- auto* cast = Construct<f32>(2);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.i32(), Expr(2_i)));
+ auto* cast = Construct<f32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
-%3 = OpTypeInt 32 1
-%4 = OpConstant %3 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeFloat 32
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpConvertSToF %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpConvertSToF %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_U32_To_F32) {
- auto* cast = Construct<f32>(2u);
- WrapInFunction(cast);
+ auto* var = Decl(Var("x", ty.u32(), Expr(2_u)));
+ auto* cast = Construct<f32>("x");
+ WrapInFunction(var, cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateExpression(cast), 1u);
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
-%3 = OpTypeInt 32 0
-%4 = OpConstant %3 2
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+%2 = OpConstant %1 2
+%4 = OpTypePointer Function %1
+%5 = OpConstantNull %1
+%7 = OpTypeFloat 32
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpConvertUToF %2 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
+%8 = OpLoad %1 %3
+%6 = OpConvertUToF %7 %8
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_U32_to_I32) {
- auto* var = Global("i", ty.vec3<u32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<u32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<i32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<i32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1559,25 +2009,25 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_U32_to_I32) {
%8 = OpTypeInt 32 1
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpBitcast %7 %9
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_F32_to_I32) {
- auto* var = Global("i", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<i32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<i32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1585,25 +2035,25 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_F32_to_I32) {
%8 = OpTypeInt 32 1
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpConvertFToS %7 %9
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_I32_to_U32) {
- auto* var = Global("i", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<u32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<u32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1611,25 +2061,25 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_I32_to_U32) {
%8 = OpTypeInt 32 0
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpBitcast %7 %9
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_F32_to_U32) {
- auto* var = Global("i", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<f32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<u32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<u32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1637,25 +2087,25 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_F32_to_U32) {
%8 = OpTypeInt 32 0
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpConvertFToU %7 %9
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_I32_to_F32) {
- auto* var = Global("i", ty.vec3<i32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<i32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<f32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<f32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1663,25 +2113,25 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_I32_to_F32) {
%8 = OpTypeFloat 32
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpConvertSToF %7 %9
)");
}
TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_U32_to_F32) {
- auto* var = Global("i", ty.vec3<u32>(), ast::StorageClass::kPrivate);
+ auto* var = Global("i", ty.vec3<u32>(), ast::StorageClass::kPrivate);
- auto* cast = vec3<f32>("i");
- WrapInFunction(cast);
+ auto* cast = vec3<f32>("i");
+ WrapInFunction(cast);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateExpression(cast), 6u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeVector %4 3
%2 = OpTypePointer Private %3
%5 = OpConstantNull %3
@@ -1689,166 +2139,250 @@ TEST_F(SpvBuilderConstructorTest, Type_Convert_Vectors_U32_to_F32) {
%8 = OpTypeFloat 32
%7 = OpTypeVector %8 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
%6 = OpConvertUToF %7 %9
)");
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_GlobalVectorWithAllConstConstructors) {
- // vec3<f32>(1.0, 2.0, 3.0) -> true
- auto* t = vec3<f32>(1.f, 2.f, 3.f);
- WrapInFunction(t);
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_GlobalVectorWithAllConstConstructors) {
+ // vec3<f32>(1.0, 2.0, 3.0) -> true
+ auto* t = vec3<f32>(1_f, 2_f, 3_f);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_GlobalArrayWithAllConstConstructors) {
- // array<vec3<f32>, 2>(vec3<f32>(1.0, 2.0, 3.0), vec3<f32>(1.0, 2.0, 3.0))
- // -> true
- auto* t = Construct(ty.array(ty.vec3<f32>(), 2), vec3<f32>(1.f, 2.f, 3.f),
- vec3<f32>(1.f, 2.f, 3.f));
- WrapInFunction(t);
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_GlobalArrayWithAllConstConstructors) {
+ // array<vec3<f32>, 2u>(vec3<f32>(1.0, 2.0, 3.0), vec3<f32>(1.0, 2.0, 3.0))
+ // -> true
+ auto* t = Construct(ty.array(ty.vec3<f32>(), 2_u), vec3<f32>(1_f, 2_f, 3_f),
+ vec3<f32>(1_f, 2_f, 3_f));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_GlobalVectorWithMatchingTypeConstructors) {
- // vec2<f32>(f32(1.0), f32(2.0)) -> false
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_GlobalVectorWithMatchingTypeConstructors) {
+ // vec2<f32>(f32(1.0), f32(2.0)) -> false
- auto* t = vec2<f32>(Construct<f32>(1.f), Construct<f32>(2.f));
- WrapInFunction(t);
+ auto* t = vec2<f32>(Construct<f32>(1_f), Construct<f32>(2_f));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_GlobalWithTypeConversionConstructor) {
- // vec2<f32>(f32(1), f32(2)) -> false
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_GlobalWithTypeConversionConstructor) {
+ // vec2<f32>(f32(1), f32(2)) -> false
- auto* t = vec2<f32>(Construct<f32>(1), Construct<f32>(2));
- WrapInFunction(t);
+ auto* t = vec2<f32>(Construct<f32>(1_i), Construct<f32>(2_i));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_FALSE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_FALSE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_VectorWithAllConstConstructors) {
- // vec3<f32>(1.0, 2.0, 3.0) -> true
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_VectorWithAllConstConstructors) {
+ // vec3<f32>(1.0, 2.0, 3.0) -> true
- auto* t = vec3<f32>(1.f, 2.f, 3.f);
- WrapInFunction(t);
+ auto* t = vec3<f32>(1_f, 2_f, 3_f);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
TEST_F(SpvBuilderConstructorTest, IsConstructorConst_Vector_WithIdent) {
- // vec3<f32>(a, b, c) -> false
+ // vec3<f32>(a, b, c) -> false
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
- Global("b", ty.f32(), ast::StorageClass::kPrivate);
- Global("c", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("b", ty.f32(), ast::StorageClass::kPrivate);
+ Global("c", ty.f32(), ast::StorageClass::kPrivate);
- auto* t = vec3<f32>("a", "b", "c");
- WrapInFunction(t);
+ auto* t = vec3<f32>("a", "b", "c");
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_FALSE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_FALSE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_ArrayWithAllConstConstructors) {
- // array<vec3<f32>, 2>(vec3<f32>(1.0, 2.0, 3.0), vec3<f32>(1.0, 2.0, 3.0))
- // -> true
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_ArrayWithAllConstConstructors) {
+ // array<vec3<f32>, 2u>(vec3<f32>(1.0, 2.0, 3.0), vec3<f32>(1.0, 2.0, 3.0))
+ // -> true
- auto* first = vec3<f32>(1.f, 2.f, 3.f);
- auto* second = vec3<f32>(1.f, 2.f, 3.f);
+ auto* first = vec3<f32>(1_f, 2_f, 3_f);
+ auto* second = vec3<f32>(1_f, 2_f, 3_f);
- auto* t = Construct(ty.array(ty.vec3<f32>(), 2), first, second);
- WrapInFunction(t);
+ auto* t = Construct(ty.array(ty.vec3<f32>(), 2_u), first, second);
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_VectorWithTypeConversionConstConstructors) {
- // vec2<f32>(f32(1), f32(2)) -> false
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_VectorWithTypeConversionConstConstructors) {
+ // vec2<f32>(f32(1), f32(2)) -> false
- auto* t = vec2<f32>(Construct<f32>(1), Construct<f32>(2));
- WrapInFunction(t);
+ auto* t = vec2<f32>(Construct<f32>(1_i), Construct<f32>(2_i));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_FALSE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_FALSE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
TEST_F(SpvBuilderConstructorTest, IsConstructorConst_BitCastScalars) {
- auto* t = vec2<u32>(Construct<u32>(1), Construct<u32>(1));
- WrapInFunction(t);
+ auto* t = vec2<u32>(Construct<u32>(1_i), Construct<u32>(1_i));
+ WrapInFunction(t);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_FALSE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ EXPECT_FALSE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
}
TEST_F(SpvBuilderConstructorTest, IsConstructorConst_Struct) {
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.vec3<f32>()),
- });
-
- auto* t = Construct(ty.Of(s), 2.f, vec3<f32>(2.f, 2.f, 2.f));
- WrapInFunction(t);
-
- spirv::Builder& b = Build();
-
- EXPECT_TRUE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ auto* t = Construct(ty.Of(s), 2_f, vec3<f32>(2_f, 2_f, 2_f));
+ WrapInFunction(t);
+
+ spirv::Builder& b = Build();
+
+ EXPECT_TRUE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
+}
+
+TEST_F(SpvBuilderConstructorTest, IsConstructorConst_Struct_WithIdentSubExpression) {
+ auto* s = Structure("my_struct", {
+ Member("a", ty.f32()),
+ Member("b", ty.vec3<f32>()),
+ });
+
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
+
+ auto* t = Construct(ty.Of(s), "a", "b");
+ WrapInFunction(t);
+
+ spirv::Builder& b = Build();
+
+ EXPECT_FALSE(b.IsConstructorConst(t));
+ EXPECT_FALSE(b.has_error());
+}
+
+TEST_F(SpvBuilderConstructorTest, ConstantCompositeScoping) {
+ // if (true) {
+ // let x = vec3<f32>(1.0, 2.0, 3.0);
+ // }
+ // let y = vec3<f32>(1.0, 2.0, 3.0); // Reuses the ID 'x'
+
+ WrapInFunction(If(true, Block(Decl(Let("x", nullptr, vec3<f32>(1_f, 2_f, 3_f))))),
+ Decl(Let("y", nullptr, vec3<f32>(1_f, 2_f, 3_f))));
+
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %3 "test_function"
+OpExecutionMode %3 LocalSize 1 1 1
+OpName %3 "test_function"
+%2 = OpTypeVoid
+%1 = OpTypeFunction %2
+%5 = OpTypeBool
+%6 = OpConstantTrue %5
+%10 = OpTypeFloat 32
+%9 = OpTypeVector %10 3
+%11 = OpConstant %10 1
+%12 = OpConstant %10 2
+%13 = OpConstant %10 3
+%14 = OpConstantComposite %9 %11 %12 %13
+%3 = OpFunction %2 None %1
+%4 = OpLabel
+OpSelectionMerge %7 None
+OpBranchConditional %6 %8 %7
+%8 = OpLabel
+OpBranch %7
+%7 = OpLabel
+OpReturn
+OpFunctionEnd
+)");
+ Validate(b);
+}
+
+// TODO(crbug.com/tint/1155) Implement when overrides are fully implemented.
+// TEST_F(SpvBuilderConstructorTest, SpecConstantCompositeScoping)
+
+TEST_F(SpvBuilderConstructorTest, CompositeConstructScoping) {
+ // var one = 1.0;
+ // if (true) {
+ // let x = vec3<f32>(one, 2.0, 3.0);
+ // }
+ // let y = vec3<f32>(one, 2.0, 3.0); // Mustn't reuse the ID 'x'
+
+ WrapInFunction(Decl(Var("one", nullptr, Expr(1_f))),
+ If(true, Block(Decl(Let("x", nullptr, vec3<f32>("one", 2_f, 3_f))))),
+ Decl(Let("y", nullptr, vec3<f32>("one", 2_f, 3_f))));
+
+ spirv::Builder& b = SanitizeAndBuild();
+ ASSERT_TRUE(b.Build());
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %3 "test_function"
+OpExecutionMode %3 LocalSize 1 1 1
+OpName %3 "test_function"
+OpName %7 "one"
+%2 = OpTypeVoid
+%1 = OpTypeFunction %2
+%5 = OpTypeFloat 32
+%6 = OpConstant %5 1
+%8 = OpTypePointer Function %5
+%9 = OpConstantNull %5
+%10 = OpTypeBool
+%11 = OpConstantTrue %10
+%14 = OpTypeVector %5 3
+%16 = OpConstant %5 2
+%17 = OpConstant %5 3
+%3 = OpFunction %2 None %1
+%4 = OpLabel
+%7 = OpVariable %8 Function %9
+OpStore %7 %6
+OpSelectionMerge %12 None
+OpBranchConditional %11 %13 %12
+%13 = OpLabel
+%15 = OpLoad %5 %7
+%18 = OpCompositeConstruct %14 %15 %16 %17
+OpBranch %12
+%12 = OpLabel
+%19 = OpLoad %5 %7
+%20 = OpCompositeConstruct %14 %19 %16 %17
+OpReturn
+OpFunctionEnd
+)");
+ Validate(b);
}
-
-TEST_F(SpvBuilderConstructorTest,
- IsConstructorConst_Struct_WithIdentSubExpression) {
- auto* s = Structure("my_struct", {
- Member("a", ty.f32()),
- Member("b", ty.vec3<f32>()),
- });
-
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
- Global("b", ty.vec3<f32>(), ast::StorageClass::kPrivate);
-
- auto* t = Construct(ty.Of(s), "a", "b");
- WrapInFunction(t);
-
- spirv::Builder& b = Build();
-
- EXPECT_FALSE(b.IsConstructorConst(t));
- EXPECT_FALSE(b.has_error());
-}
-
} // namespace
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_discard_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_discard_test.cc
index baa566f6910..8c49abe1f19 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_discard_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_discard_test.cc
@@ -21,14 +21,14 @@ namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Discard) {
- auto* expr = create<ast::DiscardStatement>();
- WrapInFunction(expr);
+ auto* expr = create<ast::DiscardStatement>();
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"(OpKill
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"(OpKill
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_entry_point_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_entry_point_test.cc
index 23c5e675a99..a407aa7fb9b 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_entry_point_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_entry_point_test.cc
@@ -23,41 +23,42 @@
#include "src/tint/ast/storage_class.h"
#include "src/tint/ast/variable.h"
#include "src/tint/program.h"
-#include "src/tint/sem/f32_type.h"
-#include "src/tint/sem/vector_type.h"
+#include "src/tint/sem/f32.h"
+#include "src/tint/sem/vector.h"
#include "src/tint/writer/spirv/builder.h"
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, EntryPoint_Parameters) {
- // @stage(fragment)
- // fn frag_main(@builtin(position) coord : vec4<f32>,
- // @location(1) loc1 : f32) {
- // var col : f32 = (coord.x * loc1);
- // }
- auto* coord =
- Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
- auto* loc1 = Param("loc1", ty.f32(), {Location(1u)});
- auto* mul = Mul(Expr(MemberAccessor("coord", "x")), Expr("loc1"));
- auto* col = Var("col", ty.f32(), ast::StorageClass::kNone, mul);
- Func("frag_main", ast::VariableList{coord, loc1}, ty.void_(),
- ast::StatementList{WrapInStatement(col)},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ // @fragment
+ // fn frag_main(@builtin(position) coord : vec4<f32>,
+ // @location(1) loc1 : f32) {
+ // var col : f32 = (coord.x * loc1);
+ // }
+ auto* coord = Param("coord", ty.vec4<f32>(), {Builtin(ast::Builtin::kPosition)});
+ auto* loc1 = Param("loc1", ty.f32(), {Location(1u)});
+ auto* mul = Mul(Expr(MemberAccessor("coord", "x")), Expr("loc1"));
+ auto* col = Var("col", ty.f32(), ast::StorageClass::kNone, mul);
+ Func("frag_main", ast::VariableList{coord, loc1}, ty.void_(),
+ ast::StatementList{WrapInStatement(col)},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- // Test that "coord" and "loc1" get hoisted out to global variables with the
- // Input storage class, retaining their decorations.
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ // Test that "coord" and "loc1" get hoisted out to global variables with the
+ // Input storage class, retaining their decorations.
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %19 "frag_main" %1 %5
OpExecutionMode %19 OriginUpperLeft
@@ -100,38 +101,38 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, EntryPoint_ReturnValue) {
- // @stage(fragment)
- // fn frag_main(@location(0) @interpolate(flat) loc_in : u32)
- // -> @location(0) f32 {
- // if (loc_in > 10) {
- // return 0.5;
- // }
- // return 1.0;
- // }
- auto* loc_in = Param("loc_in", ty.u32(), {Location(0), Flat()});
- auto* cond = create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThan,
- Expr("loc_in"), Expr(10u));
- Func("frag_main", ast::VariableList{loc_in}, ty.f32(),
- ast::StatementList{
- If(cond, Block(Return(0.5f))),
- Return(1.0f),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- },
- ast::AttributeList{Location(0)});
+ // @fragment
+ // fn frag_main(@location(0) @interpolate(flat) loc_in : u32)
+ // -> @location(0) f32 {
+ // if (loc_in > 10) {
+ // return 0.5;
+ // }
+ // return 1.0;
+ // }
+ auto* loc_in = Param("loc_in", ty.u32(), {Location(0), Flat()});
+ auto* cond =
+ create<ast::BinaryExpression>(ast::BinaryOp::kGreaterThan, Expr("loc_in"), Expr(10_u));
+ Func("frag_main", ast::VariableList{loc_in}, ty.f32(),
+ ast::StatementList{
+ If(cond, Block(Return(0.5_f))),
+ Return(1_f),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ },
+ ast::AttributeList{Location(0)});
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- // Test that the return value gets hoisted out to a global variable with the
- // Output storage class, and the return statements are replaced with stores.
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ // Test that the return value gets hoisted out to a global variable with the
+ // Output storage class, and the return statements are replaced with stores.
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %21 "frag_main" %1 %4
OpExecutionMode %21 OriginUpperLeft
@@ -177,49 +178,46 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, EntryPoint_SharedStruct) {
- // struct Interface {
- // @location(1) value : f32;
- // @builtin(position) pos : vec4<f32>;
- // };
- //
- // @stage(vertex)
- // fn vert_main() -> Interface {
- // return Interface(42.0, vec4<f32>());
- // }
- //
- // @stage(fragment)
- // fn frag_main(inputs : Interface) -> @builtin(frag_depth) f32 {
- // return inputs.value;
- // }
+ // struct Interface {
+ // @location(1) value : f32;
+ // @builtin(position) pos : vec4<f32>;
+ // };
+ //
+ // @vertex
+ // fn vert_main() -> Interface {
+ // return Interface(42.0, vec4<f32>());
+ // }
+ //
+ // @fragment
+ // fn frag_main(inputs : Interface) -> @builtin(frag_depth) f32 {
+ // return inputs.value;
+ // }
- auto* interface = Structure(
- "Interface",
- {
- Member("value", ty.f32(), ast::AttributeList{Location(1u)}),
- Member("pos", ty.vec4<f32>(),
- ast::AttributeList{Builtin(ast::Builtin::kPosition)}),
- });
+ auto* interface = Structure(
+ "Interface",
+ {
+ Member("value", ty.f32(), ast::AttributeList{Location(1u)}),
+ Member("pos", ty.vec4<f32>(), ast::AttributeList{Builtin(ast::Builtin::kPosition)}),
+ });
- auto* vert_retval =
- Construct(ty.Of(interface), 42.f, Construct(ty.vec4<f32>()));
- Func("vert_main", ast::VariableList{}, ty.Of(interface),
- {Return(vert_retval)}, {Stage(ast::PipelineStage::kVertex)});
+ auto* vert_retval = Construct(ty.Of(interface), 42_f, Construct(ty.vec4<f32>()));
+ Func("vert_main", ast::VariableList{}, ty.Of(interface), {Return(vert_retval)},
+ {Stage(ast::PipelineStage::kVertex)});
- auto* frag_inputs = Param("inputs", ty.Of(interface));
- Func("frag_main", ast::VariableList{frag_inputs}, ty.f32(),
- {Return(MemberAccessor(Expr("inputs"), "value"))},
- {Stage(ast::PipelineStage::kFragment)},
- {Builtin(ast::Builtin::kFragDepth)});
+ auto* frag_inputs = Param("inputs", ty.Of(interface));
+ Func("frag_main", ast::VariableList{frag_inputs}, ty.f32(),
+ {Return(MemberAccessor(Expr("inputs"), "value"))}, {Stage(ast::PipelineStage::kFragment)},
+ {Builtin(ast::Builtin::kFragDepth)});
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Vertex %23 "vert_main" %1 %5 %9
OpEntryPoint Fragment %34 "frag_main" %10 %12 %14
@@ -300,23 +298,22 @@ OpReturn
OpFunctionEnd
)");
- Validate(b);
+ Validate(b);
}
TEST_F(BuilderTest, SampleIndex_SampleRateShadingCapability) {
- Func("main",
- {Param("sample_index", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})},
- ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
+ Func("main", {Param("sample_index", ty.u32(), {Builtin(ast::Builtin::kSampleIndex)})},
+ ty.void_(), {}, {Stage(ast::PipelineStage::kFragment)});
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build()) << b.error();
+ ASSERT_TRUE(b.Build()) << b.error();
- // Make sure we generate the SampleRateShading capability.
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- "OpCapability Shader\n"
- "OpCapability SampleRateShading\n");
- EXPECT_EQ(DumpInstructions(b.annots()), "OpDecorate %1 BuiltIn SampleId\n");
+ // Make sure we generate the SampleRateShading capability.
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ "OpCapability Shader\n"
+ "OpCapability SampleRateShading\n");
+ EXPECT_EQ(DumpInstructions(b.annots()), "OpDecorate %1 BuiltIn SampleId\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_format_conversion_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_format_conversion_test.cc
index 396b5ac1678..dbb4f443ba8 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_format_conversion_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_format_conversion_test.cc
@@ -19,73 +19,72 @@ namespace tint::writer::spirv {
namespace {
struct TestData {
- ast::TexelFormat ast_format;
- SpvImageFormat_ spv_format;
- bool extended_format = false;
+ ast::TexelFormat ast_format;
+ SpvImageFormat_ spv_format;
+ bool extended_format = false;
};
inline std::ostream& operator<<(std::ostream& out, TestData data) {
- out << data.ast_format;
- return out;
+ out << data.ast_format;
+ return out;
}
using ImageFormatConversionTest = TestParamHelper<TestData>;
TEST_P(ImageFormatConversionTest, ImageFormatConversion) {
- auto param = GetParam();
+ auto param = GetParam();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.convert_texel_format_to_spv(param.ast_format), param.spv_format);
+ EXPECT_EQ(b.convert_texel_format_to_spv(param.ast_format), param.spv_format);
- if (param.extended_format) {
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability StorageImageExtendedFormats
+ if (param.extended_format) {
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability StorageImageExtendedFormats
)");
- } else {
- EXPECT_EQ(DumpInstructions(b.capabilities()), "");
- }
+ } else {
+ EXPECT_EQ(DumpInstructions(b.capabilities()), "");
+ }
}
-INSTANTIATE_TEST_SUITE_P(
- BuilderTest,
- ImageFormatConversionTest,
- testing::Values(
- /* WGSL unsupported formats
- TestData{ast::TexelFormat::kR8Unorm, SpvImageFormatR8, true},
- TestData{ast::TexelFormat::kR8Snorm, SpvImageFormatR8Snorm, true},
- TestData{ast::TexelFormat::kR8Uint, SpvImageFormatR8ui, true},
- TestData{ast::TexelFormat::kR8Sint, SpvImageFormatR8i, true},
- TestData{ast::TexelFormat::kR16Uint, SpvImageFormatR16ui, true},
- TestData{ast::TexelFormat::kR16Sint, SpvImageFormatR16i, true},
- TestData{ast::TexelFormat::kR16Float, SpvImageFormatR16f, true},
- TestData{ast::TexelFormat::kRg8Unorm, SpvImageFormatRg8, true},
- TestData{ast::TexelFormat::kRg8Snorm, SpvImageFormatRg8Snorm, true},
- TestData{ast::TexelFormat::kRg8Uint, SpvImageFormatRg8ui, true},
- TestData{ast::TexelFormat::kRg8Sint, SpvImageFormatRg8i, true},
- TestData{ast::TexelFormat::kRg16Uint, SpvImageFormatRg16ui, true},
- TestData{ast::TexelFormat::kRg16Sint, SpvImageFormatRg16i, true},
- TestData{ast::TexelFormat::kRg16Float, SpvImageFormatRg16f, true},
- TestData{ast::TexelFormat::kRgba8UnormSrgb, SpvImageFormatUnknown},
- TestData{ast::TexelFormat::kBgra8Unorm, SpvImageFormatUnknown},
- TestData{ast::TexelFormat::kBgra8UnormSrgb, SpvImageFormatUnknown},
- TestData{ast::TexelFormat::kRgb10A2Unorm, SpvImageFormatRgb10A2, true},
- TestData{ast::TexelFormat::kRg11B10Float, SpvImageFormatR11fG11fB10f, true},
-*/
- TestData{ast::TexelFormat::kR32Uint, SpvImageFormatR32ui},
- TestData{ast::TexelFormat::kR32Sint, SpvImageFormatR32i},
- TestData{ast::TexelFormat::kR32Float, SpvImageFormatR32f},
- TestData{ast::TexelFormat::kRgba8Unorm, SpvImageFormatRgba8},
- TestData{ast::TexelFormat::kRgba8Snorm, SpvImageFormatRgba8Snorm},
- TestData{ast::TexelFormat::kRgba8Uint, SpvImageFormatRgba8ui},
- TestData{ast::TexelFormat::kRgba8Sint, SpvImageFormatRgba8i},
- TestData{ast::TexelFormat::kRg32Uint, SpvImageFormatRg32ui, true},
- TestData{ast::TexelFormat::kRg32Sint, SpvImageFormatRg32i, true},
- TestData{ast::TexelFormat::kRg32Float, SpvImageFormatRg32f, true},
- TestData{ast::TexelFormat::kRgba16Uint, SpvImageFormatRgba16ui},
- TestData{ast::TexelFormat::kRgba16Sint, SpvImageFormatRgba16i},
- TestData{ast::TexelFormat::kRgba16Float, SpvImageFormatRgba16f},
- TestData{ast::TexelFormat::kRgba32Uint, SpvImageFormatRgba32ui},
- TestData{ast::TexelFormat::kRgba32Sint, SpvImageFormatRgba32i},
- TestData{ast::TexelFormat::kRgba32Float, SpvImageFormatRgba32f}));
+INSTANTIATE_TEST_SUITE_P(BuilderTest,
+ ImageFormatConversionTest,
+ testing::Values(
+ /* WGSL unsupported formats
+ TestData{ast::TexelFormat::kR8Unorm, SpvImageFormatR8, true},
+ TestData{ast::TexelFormat::kR8Snorm, SpvImageFormatR8Snorm, true},
+ TestData{ast::TexelFormat::kR8Uint, SpvImageFormatR8ui, true},
+ TestData{ast::TexelFormat::kR8Sint, SpvImageFormatR8i, true},
+ TestData{ast::TexelFormat::kR16Uint, SpvImageFormatR16ui, true},
+ TestData{ast::TexelFormat::kR16Sint, SpvImageFormatR16i, true},
+ TestData{ast::TexelFormat::kR16Float, SpvImageFormatR16f, true},
+ TestData{ast::TexelFormat::kRg8Unorm, SpvImageFormatRg8, true},
+ TestData{ast::TexelFormat::kRg8Snorm, SpvImageFormatRg8Snorm, true},
+ TestData{ast::TexelFormat::kRg8Uint, SpvImageFormatRg8ui, true},
+ TestData{ast::TexelFormat::kRg8Sint, SpvImageFormatRg8i, true},
+ TestData{ast::TexelFormat::kRg16Uint, SpvImageFormatRg16ui, true},
+ TestData{ast::TexelFormat::kRg16Sint, SpvImageFormatRg16i, true},
+ TestData{ast::TexelFormat::kRg16Float, SpvImageFormatRg16f, true},
+ TestData{ast::TexelFormat::kRgba8UnormSrgb, SpvImageFormatUnknown},
+ TestData{ast::TexelFormat::kBgra8Unorm, SpvImageFormatUnknown},
+ TestData{ast::TexelFormat::kBgra8UnormSrgb, SpvImageFormatUnknown},
+ TestData{ast::TexelFormat::kRgb10A2Unorm, SpvImageFormatRgb10A2, true},
+ TestData{ast::TexelFormat::kRg11B10Float, SpvImageFormatR11fG11fB10f, true},
+ */
+ TestData{ast::TexelFormat::kR32Uint, SpvImageFormatR32ui},
+ TestData{ast::TexelFormat::kR32Sint, SpvImageFormatR32i},
+ TestData{ast::TexelFormat::kR32Float, SpvImageFormatR32f},
+ TestData{ast::TexelFormat::kRgba8Unorm, SpvImageFormatRgba8},
+ TestData{ast::TexelFormat::kRgba8Snorm, SpvImageFormatRgba8Snorm},
+ TestData{ast::TexelFormat::kRgba8Uint, SpvImageFormatRgba8ui},
+ TestData{ast::TexelFormat::kRgba8Sint, SpvImageFormatRgba8i},
+ TestData{ast::TexelFormat::kRg32Uint, SpvImageFormatRg32ui, true},
+ TestData{ast::TexelFormat::kRg32Sint, SpvImageFormatRg32i, true},
+ TestData{ast::TexelFormat::kRg32Float, SpvImageFormatRg32f, true},
+ TestData{ast::TexelFormat::kRgba16Uint, SpvImageFormatRgba16ui},
+ TestData{ast::TexelFormat::kRgba16Sint, SpvImageFormatRgba16i},
+ TestData{ast::TexelFormat::kRgba16Float, SpvImageFormatRgba16f},
+ TestData{ast::TexelFormat::kRgba32Uint, SpvImageFormatRgba32ui},
+ TestData{ast::TexelFormat::kRgba32Sint, SpvImageFormatRgba32i},
+ TestData{ast::TexelFormat::kRgba32Float, SpvImageFormatRgba32f}));
} // namespace
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_attribute_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_attribute_test.cc
index 8a9e82a2fbc..e5bb43da5fc 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_attribute_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_attribute_test.cc
@@ -17,165 +17,162 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Attribute_Stage) {
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpInstructions(b.entry_points()),
- R"(OpEntryPoint Fragment %3 "main"
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.entry_points()),
+ R"(OpEntryPoint Fragment %3 "main"
)");
}
struct FunctionStageData {
- ast::PipelineStage stage;
- SpvExecutionModel model;
+ ast::PipelineStage stage;
+ SpvExecutionModel model;
};
inline std::ostream& operator<<(std::ostream& out, FunctionStageData data) {
- out << data.stage;
- return out;
+ out << data.stage;
+ return out;
}
using Attribute_StageTest = TestParamHelper<FunctionStageData>;
TEST_P(Attribute_StageTest, Emit) {
- auto params = GetParam();
-
- const ast::Variable* var = nullptr;
- const ast::Type* ret_type = nullptr;
- ast::AttributeList ret_type_attrs;
- ast::StatementList body;
- if (params.stage == ast::PipelineStage::kVertex) {
- ret_type = ty.vec4<f32>();
- ret_type_attrs.push_back(Builtin(ast::Builtin::kPosition));
- body.push_back(Return(Construct(ty.vec4<f32>())));
- } else {
- ret_type = ty.void_();
- }
-
- auto deco_list = ast::AttributeList{Stage(params.stage)};
- if (params.stage == ast::PipelineStage::kCompute) {
- deco_list.push_back(WorkgroupSize(1));
- }
-
- auto* func = Func("main", {}, ret_type, body, deco_list, ret_type_attrs);
-
- spirv::Builder& b = Build();
-
- if (var) {
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- }
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- auto preamble = b.entry_points();
- ASSERT_GE(preamble.size(), 1u);
- EXPECT_EQ(preamble[0].opcode(), spv::Op::OpEntryPoint);
-
- ASSERT_GE(preamble[0].operands().size(), 3u);
- EXPECT_EQ(preamble[0].operands()[0].to_i(),
- static_cast<uint32_t>(params.model));
+ auto params = GetParam();
+
+ const ast::Variable* var = nullptr;
+ const ast::Type* ret_type = nullptr;
+ ast::AttributeList ret_type_attrs;
+ ast::StatementList body;
+ if (params.stage == ast::PipelineStage::kVertex) {
+ ret_type = ty.vec4<f32>();
+ ret_type_attrs.push_back(Builtin(ast::Builtin::kPosition));
+ body.push_back(Return(Construct(ty.vec4<f32>())));
+ } else {
+ ret_type = ty.void_();
+ }
+
+ auto deco_list = ast::AttributeList{Stage(params.stage)};
+ if (params.stage == ast::PipelineStage::kCompute) {
+ deco_list.push_back(WorkgroupSize(1_i));
+ }
+
+ auto* func = Func("main", {}, ret_type, body, deco_list, ret_type_attrs);
+
+ spirv::Builder& b = Build();
+
+ if (var) {
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ }
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ auto preamble = b.entry_points();
+ ASSERT_GE(preamble.size(), 1u);
+ EXPECT_EQ(preamble[0].opcode(), spv::Op::OpEntryPoint);
+
+ ASSERT_GE(preamble[0].operands().size(), 3u);
+ EXPECT_EQ(std::get<uint32_t>(preamble[0].operands()[0]), static_cast<uint32_t>(params.model));
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest,
Attribute_StageTest,
- testing::Values(FunctionStageData{ast::PipelineStage::kVertex,
- SpvExecutionModelVertex},
- FunctionStageData{ast::PipelineStage::kFragment,
- SpvExecutionModelFragment},
- FunctionStageData{ast::PipelineStage::kCompute,
- SpvExecutionModelGLCompute}));
+ testing::Values(FunctionStageData{ast::PipelineStage::kVertex, SpvExecutionModelVertex},
+ FunctionStageData{ast::PipelineStage::kFragment, SpvExecutionModelFragment},
+ FunctionStageData{ast::PipelineStage::kCompute, SpvExecutionModelGLCompute}));
TEST_F(BuilderTest, Decoration_ExecutionMode_Fragment_OriginUpperLeft) {
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()),
- R"(OpExecutionMode %3 OriginUpperLeft
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()),
+ R"(OpExecutionMode %3 OriginUpperLeft
)");
}
TEST_F(BuilderTest, Decoration_ExecutionMode_WorkgroupSize_Default) {
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()),
- R"(OpExecutionMode %3 LocalSize 1 1 1
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()),
+ R"(OpExecutionMode %3 LocalSize 1 1 1
)");
}
TEST_F(BuilderTest, Decoration_ExecutionMode_WorkgroupSize_Literals) {
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- WorkgroupSize(2, 4, 6),
- Stage(ast::PipelineStage::kCompute),
- });
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ WorkgroupSize(2_i, 4_i, 6_i),
+ Stage(ast::PipelineStage::kCompute),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()),
- R"(OpExecutionMode %3 LocalSize 2 4 6
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()),
+ R"(OpExecutionMode %3 LocalSize 2 4 6
)");
}
TEST_F(BuilderTest, Decoration_ExecutionMode_WorkgroupSize_Const) {
- GlobalConst("width", ty.i32(), Construct(ty.i32(), 2));
- GlobalConst("height", ty.i32(), Construct(ty.i32(), 3));
- GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4));
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- WorkgroupSize("width", "height", "depth"),
- Stage(ast::PipelineStage::kCompute),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()),
- R"(OpExecutionMode %3 LocalSize 2 3 4
+ GlobalConst("width", ty.i32(), Construct(ty.i32(), 2_i));
+ GlobalConst("height", ty.i32(), Construct(ty.i32(), 3_i));
+ GlobalConst("depth", ty.i32(), Construct(ty.i32(), 4_i));
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ WorkgroupSize("width", "height", "depth"),
+ Stage(ast::PipelineStage::kCompute),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()),
+ R"(OpExecutionMode %3 LocalSize 2 3 4
)");
}
TEST_F(BuilderTest, Decoration_ExecutionMode_WorkgroupSize_OverridableConst) {
- Override("width", ty.i32(), Construct(ty.i32(), 2), {Id(7u)});
- Override("height", ty.i32(), Construct(ty.i32(), 3), {Id(8u)});
- Override("depth", ty.i32(), Construct(ty.i32(), 4), {Id(9u)});
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- WorkgroupSize("width", "height", "depth"),
- Stage(ast::PipelineStage::kCompute),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()), "");
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 0
+ Override("width", ty.i32(), Construct(ty.i32(), 2_i), {Id(7u)});
+ Override("height", ty.i32(), Construct(ty.i32(), 3_i), {Id(8u)});
+ Override("depth", ty.i32(), Construct(ty.i32(), 4_i), {Id(9u)});
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ WorkgroupSize("width", "height", "depth"),
+ Stage(ast::PipelineStage::kCompute),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()), "");
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 0
%1 = OpTypeVector %2 3
%4 = OpSpecConstant %2 2
%5 = OpSpecConstant %2 3
%6 = OpSpecConstant %2 4
%3 = OpSpecConstantComposite %1 %4 %5 %6
)");
- EXPECT_EQ(DumpInstructions(b.annots()),
- R"(OpDecorate %4 SpecId 7
+ EXPECT_EQ(DumpInstructions(b.annots()),
+ R"(OpDecorate %4 SpecId 7
OpDecorate %5 SpecId 8
OpDecorate %6 SpecId 9
OpDecorate %3 BuiltIn WorkgroupSize
@@ -183,49 +180,49 @@ OpDecorate %3 BuiltIn WorkgroupSize
}
TEST_F(BuilderTest, Decoration_ExecutionMode_WorkgroupSize_LiteralAndConst) {
- Override("height", ty.i32(), Construct(ty.i32(), 2), {Id(7u)});
- GlobalConst("depth", ty.i32(), Construct(ty.i32(), 3));
- auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- WorkgroupSize(4, "height", "depth"),
- Stage(ast::PipelineStage::kCompute),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
- EXPECT_EQ(DumpInstructions(b.execution_modes()), "");
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 0
+ Override("height", ty.i32(), Construct(ty.i32(), 2_i), {Id(7u)});
+ GlobalConst("depth", ty.i32(), Construct(ty.i32(), 3_i));
+ auto* func = Func("main", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ WorkgroupSize(4_i, "height", "depth"),
+ Stage(ast::PipelineStage::kCompute),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateExecutionModes(func, 3)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.execution_modes()), "");
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 0
%1 = OpTypeVector %2 3
%4 = OpConstant %2 4
%5 = OpSpecConstant %2 2
%6 = OpConstant %2 3
%3 = OpSpecConstantComposite %1 %4 %5 %6
)");
- EXPECT_EQ(DumpInstructions(b.annots()),
- R"(OpDecorate %5 SpecId 7
+ EXPECT_EQ(DumpInstructions(b.annots()),
+ R"(OpDecorate %5 SpecId 7
OpDecorate %3 BuiltIn WorkgroupSize
)");
}
TEST_F(BuilderTest, Decoration_ExecutionMode_MultipleFragment) {
- auto* func1 = Func("main1", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- auto* func2 = Func("main2", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateFunction(func1)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func2)) << b.error();
- EXPECT_EQ(DumpBuilder(b),
- R"(OpEntryPoint Fragment %3 "main1"
+ auto* func1 = Func("main1", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ auto* func2 = Func("main2", {}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateFunction(func1)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func2)) << b.error();
+ EXPECT_EQ(DumpBuilder(b),
+ R"(OpEntryPoint Fragment %3 "main1"
OpEntryPoint Fragment %5 "main2"
OpExecutionMode %3 OriginUpperLeft
OpExecutionMode %5 OriginUpperLeft
@@ -245,21 +242,21 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Decoration_ExecutionMode_FragDepth) {
- Func("main", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return(Expr(1.f)),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
- ast::AttributeList{
- Builtin(ast::Builtin::kFragDepth),
- });
+ Func("main", ast::VariableList{}, ty.f32(),
+ ast::StatementList{
+ Return(Expr(1_f)),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kFragment)},
+ ast::AttributeList{
+ Builtin(ast::Builtin::kFragDepth),
+ });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.execution_modes()),
- R"(OpExecutionMode %11 OriginUpperLeft
+ EXPECT_EQ(DumpInstructions(b.execution_modes()),
+ R"(OpExecutionMode %11 OriginUpperLeft
OpExecutionMode %11 DepthReplacing
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_test.cc
index 23dd0c8a42e..2fd1c32696e 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_test.cc
@@ -16,19 +16,21 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Function_Empty) {
- Func("a_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Func("a_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%3 = OpFunction %2 None %1
@@ -39,17 +41,17 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Function_Terminator_Return) {
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%3 = OpFunction %2 None %1
@@ -60,19 +62,18 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Function_Terminator_ReturnValue) {
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
- Func("a_func", {}, ty.f32(), ast::StatementList{Return("a")},
- ast::AttributeList{});
+ Func("a_func", {}, ty.f32(), ast::StatementList{Return("a")}, ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* var_a = program->AST().GlobalVariables()[0];
- auto* func = program->AST().Functions()[0];
+ auto* var_a = program->AST().GlobalVariables()[0];
+ auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "a"
+ ASSERT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "a"
OpName %6 "a_func"
%3 = OpTypeFloat 32
%2 = OpTypePointer Private %3
@@ -88,17 +89,17 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Function_Terminator_Discard) {
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- create<ast::DiscardStatement>(),
- },
- ast::AttributeList{});
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ create<ast::DiscardStatement>(),
+ },
+ ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%3 = OpFunction %2 None %1
@@ -109,16 +110,15 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Function_WithParams) {
- ast::VariableList params = {Param("a", ty.f32()), Param("b", ty.i32())};
+ ast::VariableList params = {Param("a", ty.f32()), Param("b", ty.i32())};
- Func("a_func", params, ty.f32(), ast::StatementList{Return("a")},
- ast::AttributeList{});
+ Func("a_func", params, ty.f32(), ast::StatementList{Return("a")}, ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpBuilder(b), R"(OpName %4 "a_func"
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %4 "a_func"
OpName %5 "a"
OpName %6 "b"
%2 = OpTypeFloat 32
@@ -134,17 +134,17 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Function_WithBody) {
- Func("a_func", {}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
+ Func("a_func", {}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "a_func"
%2 = OpTypeVoid
%1 = OpTypeFunction %2
%3 = OpFunction %2 None %1
@@ -155,87 +155,81 @@ OpFunctionEnd
}
TEST_F(BuilderTest, FunctionType) {
- Func("a_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Func("a_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto* func = program->AST().Functions()[0];
- ASSERT_TRUE(b.GenerateFunction(func));
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ auto* func = program->AST().Functions()[0];
+ ASSERT_TRUE(b.GenerateFunction(func));
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
)");
}
TEST_F(BuilderTest, FunctionType_DeDuplicate) {
- auto* func1 = Func("a_func", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
- auto* func2 = Func("b_func", {}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
+ auto* func1 = Func("a_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ auto* func2 = Func("b_func", {}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateFunction(func1));
- ASSERT_TRUE(b.GenerateFunction(func2));
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ ASSERT_TRUE(b.GenerateFunction(func1));
+ ASSERT_TRUE(b.GenerateFunction(func2));
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
)");
}
// https://crbug.com/tint/297
TEST_F(BuilderTest, Emit_Multiple_EntryPoint_With_Same_ModuleVar) {
- // struct Data {
- // d : f32;
- // };
- // @binding(0) @group(0) var<storage> data : Data;
- //
- // @stage(compute) @workgroup_size(1)
- // fn a() {
- // return;
- // }
- //
- // @stage(compute) @workgroup_size(1)
- // fn b() {
- // return;
- // }
-
- auto* s = Structure("Data", {Member("d", ty.f32())});
-
- Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("a", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
- }
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("b", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- ast::AttributeList{Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1)});
- }
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
+ // struct Data {
+ // d : f32;
+ // };
+ // @binding(0) @group(0) var<storage> data : Data;
+ //
+ // @compute @workgroup_size(1)
+ // fn a() {
+ // return;
+ // }
+ //
+ // @compute @workgroup_size(1)
+ // fn b() {
+ // return;
+ // }
+
+ auto* s = Structure("Data", {Member("d", ty.f32())});
+
+ Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("a", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("b", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ ast::AttributeList{Stage(ast::PipelineStage::kCompute), WorkgroupSize(1_i)});
+ }
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build());
+ EXPECT_EQ(DumpBuilder(b), R"(OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %7 "a"
OpEntryPoint GLCompute %17 "b"
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_variable_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_variable_test.cc
index 6264b27e036..da72233cd7a 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_variable_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_function_variable_test.cc
@@ -15,46 +15,48 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, FunctionVar_NoStorageClass) {
- auto* v = Var("var", ty.f32(), ast::StorageClass::kFunction);
- WrapInFunction(v);
+ auto* v = Var("var", ty.f32(), ast::StorageClass::kFunction);
+ WrapInFunction(v);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
)");
- const auto& func = b.functions()[0];
- EXPECT_EQ(DumpInstructions(func.variables()),
- R"(%1 = OpVariable %2 Function %4
+ const auto& func = b.functions()[0];
+ EXPECT_EQ(DumpInstructions(func.variables()),
+ R"(%1 = OpVariable %2 Function %4
)");
}
TEST_F(BuilderTest, FunctionVar_WithConstantConstructor) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
- auto* v = Var("var", ty.vec3<f32>(), ast::StorageClass::kFunction, init);
- WrapInFunction(v);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
+ auto* v = Var("var", ty.vec3<f32>(), ast::StorageClass::kFunction, init);
+ WrapInFunction(v);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %6 "var"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %6 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
@@ -62,128 +64,128 @@ TEST_F(BuilderTest, FunctionVar_WithConstantConstructor) {
%7 = OpTypePointer Function %1
%8 = OpConstantNull %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%6 = OpVariable %7 Function %8
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%6 = OpVariable %7 Function %8
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %6 %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %6 %5
)");
}
TEST_F(BuilderTest, FunctionVar_WithNonConstantConstructor) {
- auto* init = vec2<f32>(1.f, Add(3.f, 3.f));
+ auto* init = vec2<f32>(1_f, Add(3_f, 3_f));
- auto* v = Var("var", ty.vec2<f32>(), ast::StorageClass::kNone, init);
- WrapInFunction(v);
+ auto* v = Var("var", ty.vec2<f32>(), ast::StorageClass::kNone, init);
+ WrapInFunction(v);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %7 "var"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %7 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 2
%3 = OpConstant %2 1
%4 = OpConstant %2 3
%8 = OpTypePointer Function %1
%9 = OpConstantNull %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%7 = OpVariable %8 Function %9
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%7 = OpVariable %8 Function %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%5 = OpFAdd %2 %4 %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%5 = OpFAdd %2 %4 %4
%6 = OpCompositeConstruct %1 %3 %5
OpStore %7 %6
)");
}
TEST_F(BuilderTest, FunctionVar_WithNonConstantConstructorLoadedFromVar) {
- // var v : f32 = 1.0;
- // var v2 : f32 = v; // Should generate the load and store automatically.
+ // var v : f32 = 1.0;
+ // var v2 : f32 = v; // Should generate the load and store automatically.
- auto* v = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(1.f));
+ auto* v = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(1_f));
- auto* v2 = Var("v2", ty.f32(), ast::StorageClass::kNone, Expr("v"));
- WrapInFunction(v, v2);
+ auto* v2 = Var("v2", ty.f32(), ast::StorageClass::kNone, Expr("v"));
+ WrapInFunction(v, v2);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- EXPECT_TRUE(b.GenerateFunctionVariable(v2)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ EXPECT_TRUE(b.GenerateFunctionVariable(v2)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "v"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "v"
OpName %7 "v2"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 1
%4 = OpTypePointer Function %1
%5 = OpConstantNull %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%3 = OpVariable %4 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%3 = OpVariable %4 Function %5
%7 = OpVariable %4 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %3 %2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
%6 = OpLoad %1 %3
OpStore %7 %6
)");
}
TEST_F(BuilderTest, FunctionVar_ConstWithVarInitializer) {
- // var v : f32 = 1.0;
- // let v2 : f32 = v; // Should generate the load
+ // var v : f32 = 1.0;
+ // let v2 : f32 = v; // Should generate the load
- auto* v = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(1.f));
+ auto* v = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(1_f));
- auto* v2 = Var("v2", ty.f32(), ast::StorageClass::kNone, Expr("v"));
- WrapInFunction(v, v2);
+ auto* v2 = Var("v2", ty.f32(), ast::StorageClass::kNone, Expr("v"));
+ WrapInFunction(v, v2);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- EXPECT_TRUE(b.GenerateFunctionVariable(v2)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ EXPECT_TRUE(b.GenerateFunctionVariable(v2)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "v"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "v"
OpName %7 "v2"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 1
%4 = OpTypePointer Function %1
%5 = OpConstantNull %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%3 = OpVariable %4 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%3 = OpVariable %4 Function %5
%7 = OpVariable %4 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpStore %3 %2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpStore %3 %2
%6 = OpLoad %1 %3
OpStore %7 %6
)");
}
TEST_F(BuilderTest, FunctionVar_Const) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
- auto* v = Const("var", ty.vec3<f32>(), init);
+ auto* v = Let("var", ty.vec3<f32>(), init);
- WrapInFunction(v);
+ WrapInFunction(v);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_global_variable_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_global_variable_test.cc
index 16ae30bff3e..9b30bb168ab 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_global_variable_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_global_variable_test.cc
@@ -17,20 +17,22 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, GlobalVar_WithStorageClass) {
- auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -38,18 +40,18 @@ TEST_F(BuilderTest, GlobalVar_WithStorageClass) {
}
TEST_F(BuilderTest, GlobalVar_WithConstructor) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
- auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate, init);
+ auto* v = Global("var", ty.vec3<f32>(), ast::StorageClass::kPrivate, init);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %6 "var"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %6 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
@@ -60,18 +62,18 @@ TEST_F(BuilderTest, GlobalVar_WithConstructor) {
}
TEST_F(BuilderTest, GlobalVar_Const) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
- auto* v = GlobalConst("var", ty.vec3<f32>(), init);
+ auto* v = GlobalConst("var", ty.vec3<f32>(), init);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %5 "var"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %5 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
@@ -80,16 +82,16 @@ TEST_F(BuilderTest, GlobalVar_Const) {
}
TEST_F(BuilderTest, GlobalVar_Complex_Constructor) {
- auto* init = vec3<f32>(ast::ExpressionList{Expr(1.f), Expr(2.f), Expr(3.f)});
+ auto* init = vec3<f32>(1_f, 2_f, 3_f);
- auto* v = GlobalConst("var", ty.vec3<f32>(), init);
+ auto* v = GlobalConst("var", ty.vec3<f32>(), init);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 2
@@ -98,248 +100,238 @@ TEST_F(BuilderTest, GlobalVar_Complex_Constructor) {
)");
}
-TEST_F(BuilderTest, GlobalVar_Complex_ConstructorWithExtract) {
- auto* init = vec3<f32>(vec2<f32>(1.f, 2.f), 3.f);
+TEST_F(BuilderTest, GlobalVar_Complex_ConstructorNestedVector) {
+ auto* init = vec3<f32>(vec2<f32>(1_f, 2_f), 3_f);
- auto* v = GlobalConst("var", ty.vec3<f32>(), init);
+ auto* v = GlobalConst("var", ty.vec3<f32>(), init);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
-%3 = OpTypeVector %2 2
-%4 = OpConstant %2 1
-%5 = OpConstant %2 2
-%6 = OpConstantComposite %3 %4 %5
-%8 = OpTypeInt 32 0
-%9 = OpConstant %8 0
-%7 = OpSpecConstantOp %2 CompositeExtract %6 9
-%11 = OpConstant %8 1
-%10 = OpSpecConstantOp %2 CompositeExtract %6 11
-%12 = OpConstant %2 3
-%13 = OpSpecConstantComposite %1 %7 %10 %12
+%3 = OpConstant %2 1
+%4 = OpConstant %2 2
+%5 = OpConstant %2 3
+%6 = OpConstantComposite %1 %3 %4 %5
)");
}
TEST_F(BuilderTest, GlobalVar_WithBindingAndGroup) {
- auto* v = Global("var", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone, nullptr,
- ast::AttributeList{
- create<ast::BindingAttribute>(2),
- create<ast::GroupAttribute>(3),
- });
+ auto* v =
+ Global("var", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone, nullptr,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(2),
+ create<ast::GroupAttribute>(3),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 Binding 2
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 Binding 2
OpDecorate %1 DescriptorSet 3
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeSampler
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeSampler
%2 = OpTypePointer UniformConstant %3
%1 = OpVariable %2 UniformConstant
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Bool) {
- auto* v = Override("var", ty.bool_(), Expr(true),
- ast::AttributeList{
- Id(1200),
- });
+ auto* v = Override("var", ty.bool_(), Expr(true),
+ ast::AttributeList{
+ Id(1200),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpSpecConstantTrue %1
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Bool_ZeroValue) {
- auto* v = Override("var", ty.bool_(), Construct<bool>(),
- ast::AttributeList{
- Id(1200),
- });
+ auto* v = Override("var", ty.bool_(), Construct<bool>(),
+ ast::AttributeList{
+ Id(1200),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpSpecConstantFalse %1
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Bool_NoConstructor) {
- auto* v = Override("var", ty.bool_(), nullptr,
- ast::AttributeList{
- Id(1200),
- });
+ auto* v = Override("var", ty.bool_(), nullptr,
+ ast::AttributeList{
+ Id(1200),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 1200
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpSpecConstantFalse %1
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Scalar) {
- auto* v = Override("var", ty.f32(), Expr(2.f),
- ast::AttributeList{
- Id(0),
- });
+ auto* v = Override("var", ty.f32(), Expr(2_f),
+ ast::AttributeList{
+ Id(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpSpecConstant %1 2
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Scalar_ZeroValue) {
- auto* v = Override("var", ty.f32(), Construct<f32>(),
- ast::AttributeList{
- Id(0),
- });
+ auto* v = Override("var", ty.f32(), Construct<f32>(),
+ ast::AttributeList{
+ Id(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpSpecConstant %1 0
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Scalar_F32_NoConstructor) {
- auto* v = Override("var", ty.f32(), nullptr,
- ast::AttributeList{
- Id(0),
- });
+ auto* v = Override("var", ty.f32(), nullptr,
+ ast::AttributeList{
+ Id(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpSpecConstant %1 0
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Scalar_I32_NoConstructor) {
- auto* v = Override("var", ty.i32(), nullptr,
- ast::AttributeList{
- Id(0),
- });
+ auto* v = Override("var", ty.i32(), nullptr,
+ ast::AttributeList{
+ Id(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpSpecConstant %1 0
)");
}
TEST_F(BuilderTest, GlobalVar_Override_Scalar_U32_NoConstructor) {
- auto* v = Override("var", ty.u32(), nullptr,
- ast::AttributeList{
- Id(0),
- });
+ auto* v = Override("var", ty.u32(), nullptr,
+ ast::AttributeList{
+ Id(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "var"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpSpecConstant %1 0
)");
}
TEST_F(BuilderTest, GlobalVar_Override_NoId) {
- auto* var_a = Override("a", ty.bool_(), Expr(true),
- ast::AttributeList{
- Id(0),
- });
- auto* var_b = Override("b", ty.bool_(), Expr(false));
+ auto* var_a = Override("a", ty.bool_(), Expr(true),
+ ast::AttributeList{
+ Id(0),
+ });
+ auto* var_b = Override("b", ty.bool_(), Expr(false));
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
- EXPECT_TRUE(b.GenerateGlobalVariable(var_b)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "a"
+ EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(var_b)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %2 "a"
OpName %3 "b"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %2 SpecId 0
OpDecorate %3 SpecId 1
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpSpecConstantTrue %1
%3 = OpSpecConstantFalse %1
)");
}
struct BuiltinData {
- ast::Builtin builtin;
- ast::StorageClass storage;
- SpvBuiltIn result;
+ ast::Builtin builtin;
+ ast::StorageClass storage;
+ SpvBuiltIn result;
};
inline std::ostream& operator<<(std::ostream& out, BuiltinData data) {
- out << data.builtin;
- return out;
+ out << data.builtin;
+ return out;
}
using BuiltinDataTest = TestParamHelper<BuiltinData>;
TEST_P(BuiltinDataTest, Convert) {
- auto params = GetParam();
+ auto params = GetParam();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.ConvertBuiltin(params.builtin, params.storage), params.result);
+ EXPECT_EQ(b.ConvertBuiltin(params.builtin, params.storage), params.result);
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest_Type,
BuiltinDataTest,
testing::Values(
- BuiltinData{ast::Builtin::kNone, ast::StorageClass::kNone,
- SpvBuiltInMax},
- BuiltinData{ast::Builtin::kPosition, ast::StorageClass::kInput,
- SpvBuiltInFragCoord},
- BuiltinData{ast::Builtin::kPosition, ast::StorageClass::kOutput,
- SpvBuiltInPosition},
+ BuiltinData{ast::Builtin::kNone, ast::StorageClass::kNone, SpvBuiltInMax},
+ BuiltinData{ast::Builtin::kPosition, ast::StorageClass::kInput, SpvBuiltInFragCoord},
+ BuiltinData{ast::Builtin::kPosition, ast::StorageClass::kOutput, SpvBuiltInPosition},
BuiltinData{
ast::Builtin::kVertexIndex,
ast::StorageClass::kInput,
@@ -347,62 +339,56 @@ INSTANTIATE_TEST_SUITE_P(
},
BuiltinData{ast::Builtin::kInstanceIndex, ast::StorageClass::kInput,
SpvBuiltInInstanceIndex},
- BuiltinData{ast::Builtin::kFrontFacing, ast::StorageClass::kInput,
- SpvBuiltInFrontFacing},
- BuiltinData{ast::Builtin::kFragDepth, ast::StorageClass::kOutput,
- SpvBuiltInFragDepth},
+ BuiltinData{ast::Builtin::kFrontFacing, ast::StorageClass::kInput, SpvBuiltInFrontFacing},
+ BuiltinData{ast::Builtin::kFragDepth, ast::StorageClass::kOutput, SpvBuiltInFragDepth},
BuiltinData{ast::Builtin::kLocalInvocationId, ast::StorageClass::kInput,
SpvBuiltInLocalInvocationId},
- BuiltinData{ast::Builtin::kLocalInvocationIndex,
- ast::StorageClass::kInput, SpvBuiltInLocalInvocationIndex},
- BuiltinData{ast::Builtin::kGlobalInvocationId,
- ast::StorageClass::kInput, SpvBuiltInGlobalInvocationId},
- BuiltinData{ast::Builtin::kWorkgroupId, ast::StorageClass::kInput,
- SpvBuiltInWorkgroupId},
+ BuiltinData{ast::Builtin::kLocalInvocationIndex, ast::StorageClass::kInput,
+ SpvBuiltInLocalInvocationIndex},
+ BuiltinData{ast::Builtin::kGlobalInvocationId, ast::StorageClass::kInput,
+ SpvBuiltInGlobalInvocationId},
+ BuiltinData{ast::Builtin::kWorkgroupId, ast::StorageClass::kInput, SpvBuiltInWorkgroupId},
BuiltinData{ast::Builtin::kNumWorkgroups, ast::StorageClass::kInput,
SpvBuiltInNumWorkgroups},
- BuiltinData{ast::Builtin::kSampleIndex, ast::StorageClass::kInput,
- SpvBuiltInSampleId},
- BuiltinData{ast::Builtin::kSampleMask, ast::StorageClass::kInput,
- SpvBuiltInSampleMask},
- BuiltinData{ast::Builtin::kSampleMask, ast::StorageClass::kOutput,
- SpvBuiltInSampleMask}));
+ BuiltinData{ast::Builtin::kSampleIndex, ast::StorageClass::kInput, SpvBuiltInSampleId},
+ BuiltinData{ast::Builtin::kSampleMask, ast::StorageClass::kInput, SpvBuiltInSampleMask},
+ BuiltinData{ast::Builtin::kSampleMask, ast::StorageClass::kOutput, SpvBuiltInSampleMask}));
TEST_F(BuilderTest, GlobalVar_DeclReadOnly) {
- // struct A {
- // a : i32;
- // };
- // var b<storage, read> : A
-
- auto* A = Structure("A", {
- Member("a", ty.i32()),
- Member("b", ty.i32()),
- });
+ // struct A {
+ // a : i32;
+ // };
+ // var b<storage, read> : A
+
+ auto* A = Structure("A", {
+ Member("a", ty.i32()),
+ Member("b", ty.i32()),
+ });
- Global("b", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("b", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
OpMemberDecorate %3 0 Offset 0
OpMemberDecorate %3 1 Offset 4
OpDecorate %1 NonWritable
OpDecorate %1 Binding 0
OpDecorate %1 DescriptorSet 0
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
OpMemberName %3 0 "a"
OpMemberName %3 1 "b"
OpName %1 "b"
OpName %7 "unused_entry_point"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeStruct %4 %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -412,36 +398,36 @@ OpName %7 "unused_entry_point"
}
TEST_F(BuilderTest, GlobalVar_TypeAliasDeclReadOnly) {
- // struct A {
- // a : i32;
- // };
- // type B = A;
- // var b<storage, read> : B
+ // struct A {
+ // a : i32;
+ // };
+ // type B = A;
+ // var b<storage, read> : B
- auto* A = Structure("A", {Member("a", ty.i32())});
- auto* B = Alias("B", ty.Of(A));
- Global("b", ty.Of(B), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* A = Structure("A", {Member("a", ty.i32())});
+ auto* B = Alias("B", ty.Of(A));
+ Global("b", ty.Of(B), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
OpMemberDecorate %3 0 Offset 0
OpDecorate %1 NonWritable
OpDecorate %1 Binding 0
OpDecorate %1 DescriptorSet 0
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
OpMemberName %3 0 "a"
OpName %1 "b"
OpName %7 "unused_entry_point"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -451,36 +437,36 @@ OpName %7 "unused_entry_point"
}
TEST_F(BuilderTest, GlobalVar_TypeAliasAssignReadOnly) {
- // struct A {
- // a : i32;
- // };
- // type B = A;
- // var<storage, read> b : B
+ // struct A {
+ // a : i32;
+ // };
+ // type B = A;
+ // var<storage, read> b : B
- auto* A = Structure("A", {Member("a", ty.i32())});
- auto* B = Alias("B", ty.Of(A));
- Global("b", ty.Of(B), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* A = Structure("A", {Member("a", ty.i32())});
+ auto* B = Alias("B", ty.Of(A));
+ Global("b", ty.Of(B), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = SanitizeAndBuild();
+ spirv::Builder& b = SanitizeAndBuild();
- ASSERT_TRUE(b.Build());
+ ASSERT_TRUE(b.Build());
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %3 Block
OpMemberDecorate %3 0 Offset 0
OpDecorate %1 NonWritable
OpDecorate %1 Binding 0
OpDecorate %1 DescriptorSet 0
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
OpMemberName %3 0 "a"
OpName %1 "b"
OpName %7 "unused_entry_point"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -490,30 +476,30 @@ OpName %7 "unused_entry_point"
}
TEST_F(BuilderTest, GlobalVar_TwoVarDeclReadOnly) {
- // struct A {
- // a : i32;
- // };
- // var<storage, read> b : A
- // var<storage, read_write> c : A
-
- auto* A = Structure("A", {Member("a", ty.i32())});
- Global("b", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::GroupAttribute>(0),
- create<ast::BindingAttribute>(0),
- });
- Global("c", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::GroupAttribute>(1),
- create<ast::BindingAttribute>(0),
- });
-
- spirv::Builder& b = SanitizeAndBuild();
-
- ASSERT_TRUE(b.Build());
-
- EXPECT_EQ(DumpInstructions(b.annots()),
- R"(OpDecorate %3 Block
+ // struct A {
+ // a : i32;
+ // };
+ // var<storage, read> b : A
+ // var<storage, read_write> c : A
+
+ auto* A = Structure("A", {Member("a", ty.i32())});
+ Global("b", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::GroupAttribute>(0),
+ create<ast::BindingAttribute>(0),
+ });
+ Global("c", ty.Of(A), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::GroupAttribute>(1),
+ create<ast::BindingAttribute>(0),
+ });
+
+ spirv::Builder& b = SanitizeAndBuild();
+
+ ASSERT_TRUE(b.Build());
+
+ EXPECT_EQ(DumpInstructions(b.annots()),
+ R"(OpDecorate %3 Block
OpMemberDecorate %3 0 Offset 0
OpDecorate %1 NonWritable
OpDecorate %1 DescriptorSet 0
@@ -521,13 +507,13 @@ OpDecorate %1 Binding 0
OpDecorate %5 DescriptorSet 1
OpDecorate %5 Binding 0
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %3 "A"
OpMemberName %3 0 "a"
OpName %1 "b"
OpName %5 "c"
OpName %8 "unused_entry_point"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 1
%3 = OpTypeStruct %4
%2 = OpTypePointer StorageBuffer %3
%1 = OpVariable %2 StorageBuffer
@@ -538,27 +524,26 @@ OpName %8 "unused_entry_point"
}
TEST_F(BuilderTest, GlobalVar_TextureStorageWriteOnly) {
- // var<uniform_constant> a : texture_storage_2d<r32uint, write>;
+ // var<uniform_constant> a : texture_storage_2d<r32uint, write>;
- auto* type =
- ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
- ast::Access::kWrite);
+ auto* type = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite);
- auto* var_a = Global("a", type,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* var_a = Global("a", type,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 NonReadable
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 NonReadable
OpDecorate %1 Binding 0
OpDecorate %1 DescriptorSet 0
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 2D 0 0 0 2 R32ui
%2 = OpTypePointer UniformConstant %3
%1 = OpVariable %2 UniformConstant
@@ -570,42 +555,40 @@ OpDecorate %1 DescriptorSet 0
// Test disabled as storage textures currently only support 'write' access. In
// the future we'll likely support read_write.
TEST_F(BuilderTest, DISABLED_GlobalVar_TextureStorageWithDifferentAccess) {
- // var<uniform_constant> a : texture_storage_2d<r32uint, read_write>;
- // var<uniform_constant> b : texture_storage_2d<r32uint, write>;
+ // var<uniform_constant> a : texture_storage_2d<r32uint, read_write>;
+ // var<uniform_constant> b : texture_storage_2d<r32uint, write>;
- auto* type_a =
- ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
- ast::Access::kReadWrite);
- auto* var_a = Global("a", type_a, ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* type_a = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kReadWrite);
+ auto* var_a = Global("a", type_a, ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- auto* type_b =
- ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
- ast::Access::kWrite);
- auto* var_b = Global("b", type_b, ast::StorageClass::kNone,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(0),
- });
+ auto* type_b = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite);
+ auto* var_b = Global("b", type_b, ast::StorageClass::kNone,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
- EXPECT_TRUE(b.GenerateGlobalVariable(var_b)) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(var_a)) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(var_b)) << b.error();
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 NonWritable
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 NonWritable
OpDecorate %1 Binding 0
OpDecorate %1 DescriptorSet 0
OpDecorate %5 NonReadable
OpDecorate %5 Binding 1
OpDecorate %5 DescriptorSet 0
)");
- // There must only be one OpTypeImage declaration with the same
- // arguments
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
+ // There must only be one OpTypeImage declaration with the same
+ // arguments
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeInt 32 0
%3 = OpTypeImage %4 2D 0 0 0 2 R32ui
%2 = OpTypePointer UniformConstant %3
%1 = OpVariable %2 UniformConstant
@@ -615,31 +598,30 @@ OpDecorate %5 DescriptorSet 0
}
TEST_F(BuilderTest, GlobalVar_WorkgroupWithZeroInit) {
- auto* type_scalar = ty.i32();
- auto* var_scalar = Global("a", type_scalar, ast::StorageClass::kWorkgroup);
+ auto* type_scalar = ty.i32();
+ auto* var_scalar = Global("a", type_scalar, ast::StorageClass::kWorkgroup);
- auto* type_array = ty.array<f32, 16>();
- auto* var_array = Global("b", type_array, ast::StorageClass::kWorkgroup);
+ auto* type_array = ty.array<f32, 16>();
+ auto* var_array = Global("b", type_array, ast::StorageClass::kWorkgroup);
- auto* type_struct = Structure("C", {
- Member("a", ty.i32()),
- Member("b", ty.i32()),
- });
- auto* var_struct =
- Global("c", ty.Of(type_struct), ast::StorageClass::kWorkgroup);
+ auto* type_struct = Structure("C", {
+ Member("a", ty.i32()),
+ Member("b", ty.i32()),
+ });
+ auto* var_struct = Global("c", ty.Of(type_struct), ast::StorageClass::kWorkgroup);
- program = std::make_unique<Program>(std::move(*this));
+ program = std::make_unique<Program>(std::move(*this));
- constexpr bool kZeroInitializeWorkgroupMemory = true;
- std::unique_ptr<spirv::Builder> b = std::make_unique<spirv::Builder>(
- program.get(), kZeroInitializeWorkgroupMemory);
+ constexpr bool kZeroInitializeWorkgroupMemory = true;
+ std::unique_ptr<spirv::Builder> b =
+ std::make_unique<spirv::Builder>(program.get(), kZeroInitializeWorkgroupMemory);
- EXPECT_TRUE(b->GenerateGlobalVariable(var_scalar)) << b->error();
- EXPECT_TRUE(b->GenerateGlobalVariable(var_array)) << b->error();
- EXPECT_TRUE(b->GenerateGlobalVariable(var_struct)) << b->error();
- ASSERT_FALSE(b->has_error()) << b->error();
+ EXPECT_TRUE(b->GenerateGlobalVariable(var_scalar)) << b->error();
+ EXPECT_TRUE(b->GenerateGlobalVariable(var_array)) << b->error();
+ EXPECT_TRUE(b->GenerateGlobalVariable(var_struct)) << b->error();
+ ASSERT_FALSE(b->has_error()) << b->error();
- EXPECT_EQ(DumpInstructions(b->types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b->types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Workgroup %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Workgroup %4
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_ident_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_ident_expression_test.cc
index 48d35a35bbb..e2e9826a27b 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_ident_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_ident_expression_test.cc
@@ -15,145 +15,145 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, IdentifierExpression_GlobalConst) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
- auto* v = GlobalConst("var", ty.vec3<f32>(), init);
+ auto* v = GlobalConst("var", ty.vec3<f32>(), init);
- auto* expr = Expr("var");
- WrapInFunction(expr);
+ auto* expr = Expr("var");
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
%5 = OpConstantComposite %1 %3 %3 %4
)");
- EXPECT_EQ(b.GenerateIdentifierExpression(expr), 5u);
+ EXPECT_EQ(b.GenerateIdentifierExpression(expr), 5u);
}
TEST_F(BuilderTest, IdentifierExpression_GlobalVar) {
- auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("var", ty.f32(), ast::StorageClass::kPrivate);
- auto* expr = Expr("var");
- WrapInFunction(expr);
+ auto* expr = Expr("var");
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
)");
- EXPECT_EQ(b.GenerateIdentifierExpression(expr), 1u);
+ EXPECT_EQ(b.GenerateIdentifierExpression(expr), 1u);
}
TEST_F(BuilderTest, IdentifierExpression_FunctionConst) {
- auto* init = vec3<f32>(1.f, 1.f, 3.f);
+ auto* init = vec3<f32>(1_f, 1_f, 3_f);
- auto* v = Const("var", ty.vec3<f32>(), init);
+ auto* v = Let("var", ty.vec3<f32>(), init);
- auto* expr = Expr("var");
- WrapInFunction(v, expr);
+ auto* expr = Expr("var");
+ WrapInFunction(v, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
%5 = OpConstantComposite %1 %3 %3 %4
)");
- EXPECT_EQ(b.GenerateIdentifierExpression(expr), 5u);
+ EXPECT_EQ(b.GenerateIdentifierExpression(expr), 5u);
}
TEST_F(BuilderTest, IdentifierExpression_FunctionVar) {
- auto* v = Var("var", ty.f32(), ast::StorageClass::kFunction);
- auto* expr = Expr("var");
- WrapInFunction(v, expr);
+ auto* v = Var("var", ty.f32(), ast::StorageClass::kFunction);
+ auto* expr = Expr("var");
+ WrapInFunction(v, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(v)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "var"
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
)");
- const auto& func = b.functions()[0];
- EXPECT_EQ(DumpInstructions(func.variables()),
- R"(%1 = OpVariable %2 Function %4
+ const auto& func = b.functions()[0];
+ EXPECT_EQ(DumpInstructions(func.variables()),
+ R"(%1 = OpVariable %2 Function %4
)");
- EXPECT_EQ(b.GenerateIdentifierExpression(expr), 1u);
+ EXPECT_EQ(b.GenerateIdentifierExpression(expr), 1u);
}
TEST_F(BuilderTest, IdentifierExpression_Load) {
- auto* var = Global("var", ty.i32(), ast::StorageClass::kPrivate);
+ auto* var = Global("var", ty.i32(), ast::StorageClass::kPrivate);
- auto* expr = Add("var", "var");
- WrapInFunction(expr);
+ auto* expr = Add("var", "var");
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr->As<ast::BinaryExpression>()), 7u)
- << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr->As<ast::BinaryExpression>()), 7u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%5 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%5 = OpLoad %3 %1
%6 = OpLoad %3 %1
%7 = OpIAdd %3 %5 %6
)");
}
TEST_F(BuilderTest, IdentifierExpression_NoLoadConst) {
- auto* var = GlobalConst("var", ty.i32(), Expr(2));
+ auto* var = GlobalConst("var", ty.i32(), Expr(2_i));
- auto* expr = Add("var", "var");
- WrapInFunction(expr);
+ auto* expr = Add("var", "var");
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateBinaryExpression(expr->As<ast::BinaryExpression>()), 3u)
- << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateBinaryExpression(expr->As<ast::BinaryExpression>()), 3u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstant %1 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%3 = OpIAdd %1 %2 %2
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%3 = OpIAdd %1 %2 %2
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_if_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_if_test.cc
index 661bbd8658c..8c7c8eef0f2 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_if_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_if_test.cc
@@ -15,27 +15,29 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, If_Empty) {
- // if (true) {
- // }
- auto* expr = If(true, Block());
- WrapInFunction(expr);
+ // if (true) {
+ // }
+ auto* expr = If(true, Block());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantTrue %1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %3 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %3 None
OpBranchConditional %2 %4 %3
%4 = OpLabel
OpBranch %3
@@ -44,40 +46,40 @@ OpBranch %3
}
TEST_F(BuilderTest, If_Empty_OutsideFunction_IsError) {
- // Outside a function.
- // if (true) {
- // }
+ // Outside a function.
+ // if (true) {
+ // }
- auto* block = Block();
- auto* expr = If(true, block);
- WrapInFunction(expr);
+ auto* block = Block();
+ auto* expr = If(true, block);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_FALSE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_TRUE(b.has_error());
- EXPECT_EQ(b.error(),
- "Internal error: trying to add SPIR-V instruction 247 outside a "
- "function");
+ EXPECT_FALSE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_TRUE(b.has_error());
+ EXPECT_EQ(b.error(),
+ "Internal error: trying to add SPIR-V instruction 247 outside a "
+ "function");
}
TEST_F(BuilderTest, If_WithStatements) {
- // if (true) {
- // v = 2;
- // }
+ // if (true) {
+ // v = 2;
+ // }
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2));
- auto* expr = If(true, body);
- WrapInFunction(expr);
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i));
+ auto* expr = If(true, body);
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -85,8 +87,8 @@ TEST_F(BuilderTest, If_WithStatements) {
%6 = OpConstantTrue %5
%9 = OpConstant %3 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %7
%8 = OpLabel
OpStore %1 %9
@@ -96,26 +98,26 @@ OpBranch %7
}
TEST_F(BuilderTest, If_WithElse) {
- // if (true) {
- // v = 2;
- // } else {
- // v = 3;
- // }
+ // if (true) {
+ // v = 2i;
+ // } else {
+ // v = 3i;
+ // }
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2));
- auto* else_body = Block(Assign("v", 3));
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i));
+ auto* else_body = Block(Assign("v", 3_i));
- auto* expr = If(true, body, Else(else_body));
- WrapInFunction(expr);
+ auto* expr = If(true, body, Else(else_body));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -124,8 +126,8 @@ TEST_F(BuilderTest, If_WithElse) {
%10 = OpConstant %3 2
%11 = OpConstant %3 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %9
%8 = OpLabel
OpStore %1 %10
@@ -138,26 +140,26 @@ OpBranch %7
}
TEST_F(BuilderTest, If_WithElseIf) {
- // if (true) {
- // v = 2;
- // } else if (true) {
- // v = 3;
- // }
+ // if (true) {
+ // v = 2i;
+ // } else if (true) {
+ // v = 3i;
+ // }
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2));
- auto* else_body = Block(Assign("v", 3));
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i));
+ auto* else_body = Block(Assign("v", 3_i));
- auto* expr = If(true, body, Else(true, else_body));
- WrapInFunction(expr);
+ auto* expr = If(true, body, Else(If(true, else_body)));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -166,8 +168,8 @@ TEST_F(BuilderTest, If_WithElseIf) {
%10 = OpConstant %3 2
%13 = OpConstant %3 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %9
%8 = OpLabel
OpStore %1 %10
@@ -185,35 +187,35 @@ OpBranch %7
}
TEST_F(BuilderTest, If_WithMultiple) {
- // if (true) {
- // v = 2;
- // } else if (true) {
- // v = 3;
- // } else if (false) {
- // v = 4;
- // } else {
- // v = 5;
- // }
-
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2));
- auto* elseif_1_body = Block(Assign("v", 3));
- auto* elseif_2_body = Block(Assign("v", 4));
- auto* else_body = Block(Assign("v", 5));
-
- auto* expr = If(true, body, //
- Else(true, elseif_1_body), //
- Else(false, elseif_2_body), //
- Else(else_body));
- WrapInFunction(expr);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
-
- EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ // if (true) {
+ // v = 2i;
+ // } else if (true) {
+ // v = 3i;
+ // } else if (false) {
+ // v = 4i;
+ // } else {
+ // v = 5i;
+ // }
+
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i));
+ auto* elseif_1_body = Block(Assign("v", 3_i));
+ auto* elseif_2_body = Block(Assign("v", 4_i));
+ auto* else_body = Block(Assign("v", 5_i));
+
+ auto* expr = If(true, body, //
+ Else(If(true, elseif_1_body, //
+ Else(If(false, elseif_2_body, //
+ Else(else_body))))));
+ WrapInFunction(expr);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+
+ EXPECT_TRUE(b.GenerateIfStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
@@ -221,12 +223,12 @@ TEST_F(BuilderTest, If_WithMultiple) {
%6 = OpConstantTrue %5
%10 = OpConstant %3 2
%14 = OpConstant %3 3
-%15 = OpConstantFalse %5
+%15 = OpConstantNull %5
%19 = OpConstant %3 4
%20 = OpConstant %3 5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %9
%8 = OpLabel
OpStore %1 %10
@@ -255,31 +257,31 @@ OpBranch %7
}
TEST_F(BuilderTest, If_WithBreak) {
- // loop {
- // if (true) {
- // break;
- // }
- // }
+ // loop {
+ // if (true) {
+ // break;
+ // }
+ // }
- auto* if_body = Block(Break());
+ auto* if_body = Block(Break());
- auto* if_stmt = If(true, if_body);
+ auto* if_stmt = If(true, if_body);
- auto* loop_body = Block(if_stmt);
+ auto* loop_body = Block(if_stmt);
- auto* expr = Loop(loop_body, Block());
- WrapInFunction(expr);
+ auto* expr = Loop(loop_body, Block());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -297,31 +299,31 @@ OpBranch %1
}
TEST_F(BuilderTest, If_WithElseBreak) {
- // loop {
- // if (true) {
- // } else {
- // break;
- // }
- // }
- auto* else_body = Block(Break());
+ // loop {
+ // if (true) {
+ // } else {
+ // break;
+ // }
+ // }
+ auto* else_body = Block(Break());
- auto* if_stmt = If(true, Block(), Else(else_body));
+ auto* if_stmt = If(true, Block(), Else(else_body));
- auto* loop_body = Block(if_stmt);
+ auto* loop_body = Block(if_stmt);
- auto* expr = Loop(loop_body, Block());
- WrapInFunction(expr);
+ auto* expr = Loop(loop_body, Block());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -341,29 +343,29 @@ OpBranch %1
}
TEST_F(BuilderTest, If_WithContinueAndBreak) {
- // loop {
- // if (true) {
- // continue;
- // } else {
- // break;
- // }
- // }
+ // loop {
+ // if (true) {
+ // continue;
+ // } else {
+ // break;
+ // }
+ // }
- auto* if_stmt = If(true, Block(Continue()), Else(Block(Break())));
+ auto* if_stmt = If(true, Block(Continue()), Else(Block(Break())));
- auto* expr = Loop(Block(if_stmt), Block());
- WrapInFunction(expr);
+ auto* expr = Loop(Block(if_stmt), Block());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -383,32 +385,32 @@ OpBranch %1
}
TEST_F(BuilderTest, If_WithElseContinue) {
- // loop {
- // if (true) {
- // } else {
- // continue;
- // }
- // break;
- // }
- auto* else_body = Block(create<ast::ContinueStatement>());
+ // loop {
+ // if (true) {
+ // } else {
+ // continue;
+ // }
+ // break;
+ // }
+ auto* else_body = Block(create<ast::ContinueStatement>());
- auto* if_stmt = If(true, Block(), Else(else_body));
+ auto* if_stmt = If(true, Block(), Else(else_body));
- auto* loop_body = Block(if_stmt, Break());
+ auto* loop_body = Block(if_stmt, Break());
- auto* expr = Loop(loop_body, Block());
- WrapInFunction(expr);
+ auto* expr = Loop(loop_body, Block());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -428,22 +430,22 @@ OpBranch %1
}
TEST_F(BuilderTest, If_WithReturn) {
- // if (true) {
- // return;
- // }
+ // if (true) {
+ // return;
+ // }
- auto* fn = Func("f", {}, ty.void_(), {If(true, Block(Return()))});
+ auto* fn = Func("f", {}, ty.void_(), {If(true, Block(Return()))});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %7
%8 = OpLabel
OpReturn
@@ -453,27 +455,27 @@ OpReturn
}
TEST_F(BuilderTest, If_WithReturnValue) {
- // if (true) {
- // return false;
- // }
- // return true;
+ // if (true) {
+ // return false;
+ // }
+ // return true;
- auto* fn = Func("f", {}, ty.bool_(),
- {
- If(true, Block(Return(false))),
- Return(true),
- });
+ auto* fn = Func("f", {}, ty.bool_(),
+ {
+ If(true, Block(Return(false))),
+ Return(true),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeFunction %2
%5 = OpConstantTrue %2
-%8 = OpConstantFalse %2
+%8 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %6 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %6 None
OpBranchConditional %5 %7 %6
%7 = OpLabel
OpReturnValue %8
@@ -483,29 +485,29 @@ OpReturnValue %5
}
TEST_F(BuilderTest, IfElse_BothReturn) {
- // if (true) {
- // return true;
- // } else {
- // return true;
- // }
-
- auto* fn = Func("f", {}, ty.bool_(),
- {
- If(true, //
- Block(Return(true)), //
- Else(Block(Return(true)))),
- });
-
- spirv::Builder& b = Build();
-
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ // if (true) {
+ // return true;
+ // } else {
+ // return true;
+ // }
+
+ auto* fn = Func("f", {}, ty.bool_(),
+ {
+ If(true, //
+ Block(Return(true)), //
+ Else(Block(Return(true)))),
+ });
+
+ spirv::Builder& b = Build();
+
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeFunction %2
%5 = OpConstantTrue %2
%9 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %6 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %6 None
OpBranchConditional %5 %7 %8
%7 = OpLabel
OpReturnValue %5
@@ -517,33 +519,33 @@ OpReturnValue %9
}
TEST_F(BuilderTest, If_WithNestedBlockReturnValue) {
- // if (true) {
- // {
- // {
- // {
- // return false;
- // }
- // }
- // }
- // }
- // return true;
-
- auto* fn = Func("f", {}, ty.bool_(),
- {
- If(true, Block(Block(Block(Block(Return(false)))))),
- Return(true),
- });
-
- spirv::Builder& b = Build();
-
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+ // if (true) {
+ // {
+ // {
+ // {
+ // return false;
+ // }
+ // }
+ // }
+ // }
+ // return true;
+
+ auto* fn = Func("f", {}, ty.bool_(),
+ {
+ If(true, Block(Block(Block(Block(Return(false)))))),
+ Return(true),
+ });
+
+ spirv::Builder& b = Build();
+
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
%1 = OpTypeFunction %2
%5 = OpConstantTrue %2
-%8 = OpConstantFalse %2
+%8 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %6 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %6 None
OpBranchConditional %5 %7 %6
%7 = OpLabel
OpReturnValue %8
@@ -553,26 +555,26 @@ OpReturnValue %5
}
TEST_F(BuilderTest, If_WithLoad_Bug327) {
- // var a : bool;
- // if (a) {
- // }
+ // var a : bool;
+ // if (a) {
+ // }
- auto* var = Global("a", ty.bool_(), ast::StorageClass::kPrivate);
- auto* fn = Func("f", {}, ty.void_(), {If("a", Block())});
+ auto* var = Global("a", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* fn = Func("f", {}, ty.void_(), {If("a", Block())});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeBool
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeBool
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
%6 = OpTypeVoid
%5 = OpTypeFunction %6
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%9 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%9 = OpLoad %3 %1
OpSelectionMerge %10 None
OpBranchConditional %9 %11 %10
%11 = OpLabel
@@ -583,26 +585,26 @@ OpReturn
}
TEST_F(BuilderTest, If_ElseIf_WithReturn) {
- // crbug.com/tint/1315
- // if (false) {
- // } else if (true) {
- // return;
- // }
+ // crbug.com/tint/1315
+ // if (false) {
+ // } else if (true) {
+ // return;
+ // }
- auto* if_stmt = If(false, Block(), Else(true, Block(Return())));
- auto* fn = Func("f", {}, ty.void_(), {if_stmt});
+ auto* if_stmt = If(false, Block(), Else(If(true, Block(Return()))));
+ auto* fn = Func("f", {}, ty.void_(), {if_stmt});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%5 = OpTypeBool
-%6 = OpConstantFalse %5
+%6 = OpConstantNull %5
%10 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %7 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %7 None
OpBranchConditional %6 %8 %9
%8 = OpLabel
OpBranch %7
@@ -619,28 +621,28 @@ OpReturn
}
TEST_F(BuilderTest, Loop_If_ElseIf_WithBreak) {
- // crbug.com/tint/1315
- // loop {
- // if (false) {
- // } else if (true) {
- // break;
- // }
- // }
+ // crbug.com/tint/1315
+ // loop {
+ // if (false) {
+ // } else if (true) {
+ // break;
+ // }
+ // }
- auto* if_stmt = If(false, Block(), Else(true, Block(Break())));
- auto* fn = Func("f", {}, ty.void_(), {Loop(Block(if_stmt))});
+ auto* if_stmt = If(false, Block(), Else(If(true, Block(Break()))));
+ auto* fn = Func("f", {}, ty.void_(), {Loop(Block(if_stmt))});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeVoid
%1 = OpTypeFunction %2
%9 = OpTypeBool
-%10 = OpConstantFalse %9
+%10 = OpConstantNull %9
%14 = OpConstantTrue %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %5
%5 = OpLabel
OpLoopMerge %6 %7 None
OpBranch %8
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_literal_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_literal_test.cc
index 49378efe085..218db86919a 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_literal_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_literal_test.cc
@@ -15,148 +15,150 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Literal_Bool_True) {
- auto* b_true = create<ast::BoolLiteralExpression>(true);
- WrapInFunction(b_true);
+ auto* b_true = create<ast::BoolLiteralExpression>(true);
+ WrapInFunction(b_true);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateLiteralIfNeeded(nullptr, b_true);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(2u, id);
+ auto id = b.GenerateLiteralIfNeeded(nullptr, b_true);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(2u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantTrue %1
)");
}
TEST_F(BuilderTest, Literal_Bool_False) {
- auto* b_false = create<ast::BoolLiteralExpression>(false);
- WrapInFunction(b_false);
+ auto* b_false = create<ast::BoolLiteralExpression>(false);
+ WrapInFunction(b_false);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateLiteralIfNeeded(nullptr, b_false);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(2u, id);
+ auto id = b.GenerateLiteralIfNeeded(nullptr, b_false);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(2u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantFalse %1
)");
}
TEST_F(BuilderTest, Literal_Bool_Dedup) {
- auto* b_true = create<ast::BoolLiteralExpression>(true);
- auto* b_false = create<ast::BoolLiteralExpression>(false);
- WrapInFunction(b_true, b_false);
+ auto* b_true = create<ast::BoolLiteralExpression>(true);
+ auto* b_false = create<ast::BoolLiteralExpression>(false);
+ WrapInFunction(b_true, b_false);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_true), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_false), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_true), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_true), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_false), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, b_true), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeBool
%2 = OpConstantTrue %1
%3 = OpConstantFalse %1
)");
}
TEST_F(BuilderTest, Literal_I32) {
- auto* i = create<ast::SintLiteralExpression>(-23);
- WrapInFunction(i);
- spirv::Builder& b = Build();
+ auto* i = Expr(-23_i);
+ WrapInFunction(i);
+ spirv::Builder& b = Build();
- auto id = b.GenerateLiteralIfNeeded(nullptr, i);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(2u, id);
+ auto id = b.GenerateLiteralIfNeeded(nullptr, i);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(2u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstant %1 -23
)");
}
TEST_F(BuilderTest, Literal_I32_Dedup) {
- auto* i1 = create<ast::SintLiteralExpression>(-23);
- auto* i2 = create<ast::SintLiteralExpression>(-23);
- WrapInFunction(i1, i2);
+ auto* i1 = Expr(-23_i);
+ auto* i2 = Expr(-23_i);
+ WrapInFunction(i1, i2);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 1
%2 = OpConstant %1 -23
)");
}
TEST_F(BuilderTest, Literal_U32) {
- auto* i = create<ast::UintLiteralExpression>(23);
- WrapInFunction(i);
+ auto* i = Expr(23_u);
+ WrapInFunction(i);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateLiteralIfNeeded(nullptr, i);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(2u, id);
+ auto id = b.GenerateLiteralIfNeeded(nullptr, i);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(2u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstant %1 23
)");
}
TEST_F(BuilderTest, Literal_U32_Dedup) {
- auto* i1 = create<ast::UintLiteralExpression>(23);
- auto* i2 = create<ast::UintLiteralExpression>(23);
- WrapInFunction(i1, i2);
+ auto* i1 = Expr(23_u);
+ auto* i2 = Expr(23_u);
+ WrapInFunction(i1, i2);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeInt 32 0
%2 = OpConstant %1 23
)");
}
TEST_F(BuilderTest, Literal_F32) {
- auto* i = create<ast::FloatLiteralExpression>(23.245f);
- WrapInFunction(i);
+ auto* i = create<ast::FloatLiteralExpression>(23.245, ast::FloatLiteralExpression::Suffix::kF);
+ WrapInFunction(i);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateLiteralIfNeeded(nullptr, i);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(2u, id);
+ auto id = b.GenerateLiteralIfNeeded(nullptr, i);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(2u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 23.2450008
)");
}
TEST_F(BuilderTest, Literal_F32_Dedup) {
- auto* i1 = create<ast::FloatLiteralExpression>(23.245f);
- auto* i2 = create<ast::FloatLiteralExpression>(23.245f);
- WrapInFunction(i1, i2);
+ auto* i1 = create<ast::FloatLiteralExpression>(23.245, ast::FloatLiteralExpression::Suffix::kF);
+ auto* i2 = create<ast::FloatLiteralExpression>(23.245, ast::FloatLiteralExpression::Suffix::kF);
+ WrapInFunction(i1, i2);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
- ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i1), 0u);
+ ASSERT_NE(b.GenerateLiteralIfNeeded(nullptr, i2), 0u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%1 = OpTypeFloat 32
%2 = OpConstant %1 23.2450008
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_loop_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_loop_test.cc
index 555c1671850..b4c2baa5e54 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_loop_test.cc
@@ -15,26 +15,28 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Loop_Empty) {
- // loop {
- // break;
- // }
+ // loop {
+ // break;
+ // }
- auto* loop = Loop(Block(Break()), Block());
- WrapInFunction(loop);
+ auto* loop = Loop(Block(Break()), Block());
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -47,32 +49,32 @@ OpBranch %1
}
TEST_F(BuilderTest, Loop_WithoutContinuing) {
- // loop {
- // v = 2;
- // break;
- // }
+ // loop {
+ // v = 2i;
+ // break;
+ // }
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2), //
- Break());
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i), //
+ Break());
- auto* loop = Loop(body, Block());
- WrapInFunction(loop);
+ auto* loop = Loop(body, Block());
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
%9 = OpConstant %3 2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %5
%5 = OpLabel
OpLoopMerge %6 %7 None
OpBranch %8
@@ -86,37 +88,37 @@ OpBranch %5
}
TEST_F(BuilderTest, Loop_WithContinuing) {
- // loop {
- // a = 2;
- // break;
- // continuing {
- // a = 3;
- // }
- // }
+ // loop {
+ // a = 2i;
+ // break;
+ // continuing {
+ // a = 3i;
+ // }
+ // }
- auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* body = Block(Assign("v", 2), //
- Break());
- auto* continuing = Block(Assign("v", 3));
+ auto* var = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* body = Block(Assign("v", 2_i), //
+ Break());
+ auto* continuing = Block(Assign("v", 3_i));
- auto* loop = Loop(body, continuing);
- WrapInFunction(loop);
+ auto* loop = Loop(body, continuing);
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
+ b.push_function(Function{});
+ ASSERT_TRUE(b.GenerateGlobalVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeInt 32 1
%2 = OpTypePointer Private %3
%4 = OpConstantNull %3
%1 = OpVariable %2 Private %4
%9 = OpConstant %3 2
%10 = OpConstant %3 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %5
%5 = OpLabel
OpLoopMerge %6 %7 None
OpBranch %8
@@ -131,33 +133,33 @@ OpBranch %5
}
TEST_F(BuilderTest, Loop_WithBodyVariableAccessInContinuing) {
- // loop {
- // var a : i32;
- // break;
- // continuing {
- // a = 3;
- // }
- // }
+ // loop {
+ // var a : i32;
+ // break;
+ // continuing {
+ // a = 3i;
+ // }
+ // }
- auto* body = Block(Decl(Var("a", ty.i32())), //
- Break());
- auto* continuing = Block(Assign("a", 3));
+ auto* body = Block(Decl(Var("a", ty.i32())), //
+ Break());
+ auto* continuing = Block(Assign("a", 3_i));
- auto* loop = Loop(body, continuing);
- WrapInFunction(loop);
+ auto* loop = Loop(body, continuing);
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%7 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%7 = OpTypeInt 32 1
%6 = OpTypePointer Function %7
%8 = OpConstantNull %7
%9 = OpConstant %7 3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -171,22 +173,22 @@ OpBranch %1
}
TEST_F(BuilderTest, Loop_WithContinue) {
- // loop {
- // if (false) { break; }
- // continue;
- // }
- auto* body = Block(If(false, Block(Break())), //
- Continue());
- auto* loop = Loop(body, Block());
- WrapInFunction(loop);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
-
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ // loop {
+ // if (false) { break; }
+ // continue;
+ // }
+ auto* body = Block(If(false, Block(Break())), //
+ Continue());
+ auto* loop = Loop(body, Block());
+ WrapInFunction(loop);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -204,20 +206,20 @@ OpBranch %1
}
TEST_F(BuilderTest, Loop_WithBreak) {
- // loop {
- // break;
- // }
- auto* body = Block(create<ast::BreakStatement>());
- auto* loop = Loop(body, Block());
- WrapInFunction(loop);
+ // loop {
+ // break;
+ // }
+ auto* body = Block(create<ast::BreakStatement>());
+ auto* loop = Loop(body, Block());
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -230,28 +232,27 @@ OpBranch %1
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakIf) {
- // loop {
- // continuing {
- // if (true) { break; }
- // }
- // }
+ // loop {
+ // continuing {
+ // if (true) { break; }
+ // }
+ // }
- auto* if_stmt = create<ast::IfStatement>(Expr(true), Block(Break()),
- ast::ElseStatementList{});
- auto* continuing = Block(if_stmt);
- auto* loop = Loop(Block(), continuing);
- WrapInFunction(loop);
+ auto* if_stmt = If(Expr(true), Block(Break()));
+ auto* continuing = Block(if_stmt);
+ auto* loop = Loop(Block(), continuing);
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -264,28 +265,26 @@ OpBranchConditional %6 %2 %1
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakUnless) {
- // loop {
- // continuing {
- // if (true) {} else { break; }
- // }
- // }
- auto* if_stmt = create<ast::IfStatement>(
- Expr(true), Block(),
- ast::ElseStatementList{Else(nullptr, Block(Break()))});
- auto* continuing = Block(if_stmt);
- auto* loop = Loop(Block(), continuing);
- WrapInFunction(loop);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
-
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ // loop {
+ // continuing {
+ // if (true) {} else { break; }
+ // }
+ // }
+ auto* if_stmt = If(Expr(true), Block(), Else(Block(Break())));
+ auto* continuing = Block(if_stmt);
+ auto* loop = Loop(Block(), continuing);
+ WrapInFunction(loop);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -298,31 +297,31 @@ OpBranchConditional %6 %1 %2
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakIf_ConditionIsVar) {
- // loop {
- // continuing {
- // var cond = true;
- // if (cond) { break; }
- // }
- // }
+ // loop {
+ // continuing {
+ // var cond = true;
+ // if (cond) { break; }
+ // }
+ // }
- auto* cond_var = Decl(Var("cond", nullptr, Expr(true)));
- auto* if_stmt = If(Expr("cond"), Block(Break()), ast::ElseStatementList{});
- auto* continuing = Block(cond_var, if_stmt);
- auto* loop = Loop(Block(), continuing);
- WrapInFunction(loop);
+ auto* cond_var = Decl(Var("cond", nullptr, Expr(true)));
+ auto* if_stmt = If(Expr("cond"), Block(Break()));
+ auto* continuing = Block(cond_var, if_stmt);
+ auto* loop = Loop(Block(), continuing);
+ WrapInFunction(loop);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
%8 = OpTypePointer Function %5
%9 = OpConstantNull %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -337,31 +336,30 @@ OpBranchConditional %10 %2 %1
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakUnless_ConditionIsVar) {
- // loop {
- // continuing {
- // var cond = true;
- // if (cond) {} else { break; }
- // }
- // }
- auto* cond_var = Decl(Var("cond", nullptr, Expr(true)));
- auto* if_stmt = If(Expr("cond"), Block(),
- ast::ElseStatementList{Else(nullptr, Block(Break()))});
- auto* continuing = Block(cond_var, if_stmt);
- auto* loop = Loop(Block(), continuing);
- WrapInFunction(loop);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
-
- EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
+ // loop {
+ // continuing {
+ // var cond = true;
+ // if (cond) {} else { break; }
+ // }
+ // }
+ auto* cond_var = Decl(Var("cond", nullptr, Expr(true)));
+ auto* if_stmt = If(Expr("cond"), Block(), Else(Block(Break())));
+ auto* continuing = Block(cond_var, if_stmt);
+ auto* loop = Loop(Block(), continuing);
+ WrapInFunction(loop);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+
+ EXPECT_TRUE(b.GenerateLoopStatement(loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeBool
%6 = OpConstantTrue %5
%8 = OpTypePointer Function %5
%9 = OpConstantNull %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -376,40 +374,38 @@ OpBranchConditional %10 %1 %2
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakIf_Nested) {
- // Make sure the right backedge and break target are used.
- // loop {
- // continuing {
- // loop {
- // continuing {
- // if (true) { break; }
- // }
- // }
- // if (true) { break; }
- // }
- // }
-
- auto* inner_if_stmt = create<ast::IfStatement>(Expr(true), Block(Break()),
- ast::ElseStatementList{});
- auto* inner_continuing = Block(inner_if_stmt);
- auto* inner_loop = Loop(Block(), inner_continuing);
-
- auto* outer_if_stmt = create<ast::IfStatement>(Expr(true), Block(Break()),
- ast::ElseStatementList{});
- auto* outer_continuing = Block(inner_loop, outer_if_stmt);
- auto* outer_loop = Loop(Block(), outer_continuing);
-
- WrapInFunction(outer_loop);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
-
- EXPECT_TRUE(b.GenerateLoopStatement(outer_loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeBool
+ // Make sure the right backedge and break target are used.
+ // loop {
+ // continuing {
+ // loop {
+ // continuing {
+ // if (true) { break; }
+ // }
+ // }
+ // if (true) { break; }
+ // }
+ // }
+
+ auto* inner_if_stmt = If(Expr(true), Block(Break()));
+ auto* inner_continuing = Block(inner_if_stmt);
+ auto* inner_loop = Loop(Block(), inner_continuing);
+
+ auto* outer_if_stmt = If(Expr(true), Block(Break()));
+ auto* outer_continuing = Block(inner_loop, outer_if_stmt);
+ auto* outer_loop = Loop(Block(), outer_continuing);
+
+ WrapInFunction(outer_loop);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+
+ EXPECT_TRUE(b.GenerateLoopStatement(outer_loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeBool
%10 = OpConstantTrue %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
@@ -431,42 +427,38 @@ OpBranchConditional %10 %2 %1
}
TEST_F(BuilderTest, Loop_WithContinuing_BreakUnless_Nested) {
- // Make sure the right backedge and break target are used.
- // loop {
- // continuing {
- // loop {
- // continuing {
- // if (true) {} else { break; }
- // }
- // }
- // if (true) {} else { break; }
- // }
- // }
-
- auto* inner_if_stmt = create<ast::IfStatement>(
- Expr(true), Block(),
- ast::ElseStatementList{Else(nullptr, Block(Break()))});
- auto* inner_continuing = Block(inner_if_stmt);
- auto* inner_loop = Loop(Block(), inner_continuing);
-
- auto* outer_if_stmt = create<ast::IfStatement>(
- Expr(true), Block(),
- ast::ElseStatementList{Else(nullptr, Block(Break()))});
- auto* outer_continuing = Block(inner_loop, outer_if_stmt);
- auto* outer_loop = Loop(Block(), outer_continuing);
-
- WrapInFunction(outer_loop);
-
- spirv::Builder& b = Build();
-
- b.push_function(Function{});
-
- EXPECT_TRUE(b.GenerateLoopStatement(outer_loop)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeBool
+ // Make sure the right backedge and break target are used.
+ // loop {
+ // continuing {
+ // loop {
+ // continuing {
+ // if (true) {} else { break; }
+ // }
+ // }
+ // if (true) {} else { break; }
+ // }
+ // }
+
+ auto* inner_if_stmt = If(Expr(true), Block(), Else(Block(Break())));
+ auto* inner_continuing = Block(inner_if_stmt);
+ auto* inner_loop = Loop(Block(), inner_continuing);
+
+ auto* outer_if_stmt = If(Expr(true), Block(), Else(Block(Break())));
+ auto* outer_continuing = Block(inner_loop, outer_if_stmt);
+ auto* outer_loop = Loop(Block(), outer_continuing);
+
+ WrapInFunction(outer_loop);
+
+ spirv::Builder& b = Build();
+
+ b.push_function(Function{});
+
+ EXPECT_TRUE(b.GenerateLoopStatement(outer_loop)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%9 = OpTypeBool
%10 = OpConstantTrue %9
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpBranch %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpBranch %1
%1 = OpLabel
OpLoopMerge %2 %3 None
OpBranch %4
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_return_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_return_test.cc
index 3b0eb4a0fc9..0027311e3cb 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_return_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_return_test.cc
@@ -15,70 +15,72 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Return) {
- auto* ret = Return();
- WrapInFunction(ret);
+ auto* ret = Return();
+ WrapInFunction(ret);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateReturnStatement(ret));
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateReturnStatement(ret));
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"(OpReturn
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()), R"(OpReturn
)");
}
TEST_F(BuilderTest, Return_WithValue) {
- auto* val = vec3<f32>(1.f, 1.f, 3.f);
+ auto* val = vec3<f32>(1_f, 1_f, 3_f);
- auto* ret = Return(val);
- Func("test", {}, ty.vec3<f32>(), {ret}, {});
+ auto* ret = Return(val);
+ Func("test", {}, ty.vec3<f32>(), {ret}, {});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateReturnStatement(ret));
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateReturnStatement(ret));
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
%3 = OpConstant %2 1
%4 = OpConstant %2 3
%5 = OpConstantComposite %1 %3 %3 %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpReturnValue %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpReturnValue %5
)");
}
TEST_F(BuilderTest, Return_WithValue_GeneratesLoad) {
- auto* var = Var("param", ty.f32());
+ auto* var = Var("param", ty.f32());
- auto* ret = Return(var);
- Func("test", {}, ty.f32(), {Decl(var), ret}, {});
+ auto* ret = Return(var);
+ Func("test", {}, ty.f32(), {Decl(var), ret}, {});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_TRUE(b.GenerateReturnStatement(ret)) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ EXPECT_TRUE(b.GenerateReturnStatement(ret)) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypePointer Function %3
%4 = OpConstantNull %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %4
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %4
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%5 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%5 = OpLoad %3 %1
OpReturnValue %5
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_switch_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_switch_test.cc
index 8fd79213fb2..7ddd1bdac8d 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_switch_test.cc
@@ -16,29 +16,31 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, Switch_Empty) {
- // switch (1) {
- // default: {}
- // }
+ // switch (1i) {
+ // default: {}
+ // }
- auto* expr = Switch(1, DefaultCase());
- WrapInFunction(expr);
+ auto* expr = Switch(1_i, DefaultCase());
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
+ b.push_function(Function{});
- EXPECT_TRUE(b.GenerateSwitchStatement(expr)) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_TRUE(b.GenerateSwitchStatement(expr)) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpConstant %2 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(OpSelectionMerge %1 None
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(OpSelectionMerge %1 None
OpSwitch %3 %4
%4 = OpLabel
OpBranch %1
@@ -47,32 +49,32 @@ OpBranch %1
}
TEST_F(BuilderTest, Switch_WithCase) {
- // switch(a) {
- // case 1:
- // v = 1;
- // case 2:
- // v = 2;
- // default: {}
- // }
-
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
-
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Switch("a", //
- Case(Expr(1), Block(Assign("v", 1))), //
- Case(Expr(2), Block(Assign("v", 2))), //
- DefaultCase()),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ // switch(a) {
+ // case 1i:
+ // v = 1i;
+ // case 2i:
+ // v = 2i;
+ // default: {}
+ // }
+
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch("a", //
+ Case(Expr(1_i), Block(Assign("v", 1_i))), //
+ Case(Expr(2_i), Block(Assign("v", 2_i))), //
+ DefaultCase()),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %8 "a_func"
%3 = OpTypeInt 32 1
@@ -104,32 +106,32 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_WithCase_Unsigned) {
- // switch(a) {
- // case 1u:
- // v = 1;
- // case 2u:
- // v = 2;
- // default: {}
- // }
-
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.u32(), ast::StorageClass::kPrivate);
-
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Switch("a", //
- Case(Expr(1u), Block(Assign("v", 1))), //
- Case(Expr(2u), Block(Assign("v", 2))), //
- DefaultCase()),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ // switch(a) {
+ // case 1u:
+ // v = 1i;
+ // case 2u:
+ // v = 2i;
+ // default: {}
+ // }
+
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.u32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch("a", //
+ Case(Expr(1_u), Block(Assign("v", 1_i))), //
+ Case(Expr(2_u), Block(Assign("v", 2_i))), //
+ DefaultCase()),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %11 "a_func"
%3 = OpTypeInt 32 1
@@ -164,27 +166,27 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_WithDefault) {
- // switch(true) {
- // default: {}
- // v = 1;
- // }
+ // switch(true) {
+ // default: {}
+ // v = 1i;
+ // }
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Switch("a", //
- DefaultCase(Block(Assign("v", 1)))), //
- });
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch("a", //
+ DefaultCase(Block(Assign("v", 1_i)))), //
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %8 "a_func"
%3 = OpTypeInt 32 1
@@ -210,35 +212,35 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_WithCaseAndDefault) {
- // switch(a) {
- // case 1:
- // v = 1;
- // case 2, 3:
- // v = 2;
- // default: {}
- // v = 3;
- // }
-
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
-
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Switch(Expr("a"), //
- Case(Expr(1), //
- Block(Assign("v", 1))), //
- Case({Expr(2), Expr(3)}, //
- Block(Assign("v", 2))), //
- DefaultCase(Block(Assign("v", 3)))),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ // switch(a) {
+ // case 1i:
+ // v = 1i;
+ // case 2i, 3i:
+ // v = 2i;
+ // default: {}
+ // v = 3i;
+ // }
+
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch(Expr("a"), //
+ Case(Expr(1_i), //
+ Block(Assign("v", 1_i))), //
+ Case({Expr(2_i), Expr(3_i)}, //
+ Block(Assign("v", 2_i))), //
+ DefaultCase(Block(Assign("v", 3_i)))),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %8 "a_func"
%3 = OpTypeInt 32 1
@@ -272,36 +274,36 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_CaseWithFallthrough) {
- // switch(a) {
- // case 1:
- // v = 1;
- // fallthrough;
- // case 2:
- // v = 2;
- // default: {}
- // v = 3;
- // }
-
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
-
- auto* func = Func("a_func", {}, ty.void_(),
- {
- Switch(Expr("a"), //
- Case(Expr(1), //
- Block(Assign("v", 1), Fallthrough())), //
- Case(Expr(2), //
- Block(Assign("v", 2))), //
- DefaultCase(Block(Assign("v", 3)))),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ // switch(a) {
+ // case 1i:
+ // v = 1i;
+ // fallthrough;
+ // case 2i:
+ // v = 2i;
+ // default: {}
+ // v = 3i;
+ // }
+
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch(Expr("a"), //
+ Case(Expr(1_i), //
+ Block(Assign("v", 1_i), Fallthrough())), //
+ Case(Expr(2_i), //
+ Block(Assign("v", 2_i))), //
+ DefaultCase(Block(Assign("v", 3_i)))),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %8 "a_func"
%3 = OpTypeInt 32 1
@@ -335,36 +337,35 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_WithNestedBreak) {
- // switch (a) {
- // case 1:
- // if (true) {
- // break;
- // }
- // v = 1;
- // default: {}
- // }
-
- auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
- auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
-
- auto* func = Func(
- "a_func", {}, ty.void_(),
- {
- Switch("a", //
- Case(Expr(1), //
- Block( //
- If(Expr(true), Block(create<ast::BreakStatement>())),
- Assign("v", 1))),
- DefaultCase()),
- });
-
- spirv::Builder& b = Build();
-
- ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
- ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
- ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
-
- EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
+ // switch (a) {
+ // case 1:
+ // if (true) {
+ // break;
+ // }
+ // v = 1i;
+ // default: {}
+ // }
+
+ auto* v = Global("v", ty.i32(), ast::StorageClass::kPrivate);
+ auto* a = Global("a", ty.i32(), ast::StorageClass::kPrivate);
+
+ auto* func = Func("a_func", {}, ty.void_(),
+ {
+ Switch("a", //
+ Case(Expr(1_i), //
+ Block( //
+ If(Expr(true), Block(create<ast::BreakStatement>())),
+ Assign("v", 1_i))),
+ DefaultCase()),
+ });
+
+ spirv::Builder& b = Build();
+
+ ASSERT_TRUE(b.GenerateGlobalVariable(v)) << b.error();
+ ASSERT_TRUE(b.GenerateGlobalVariable(a)) << b.error();
+ ASSERT_TRUE(b.GenerateFunction(func)) << b.error();
+
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %1 "v"
OpName %5 "a"
OpName %8 "a_func"
%3 = OpTypeInt 32 1
@@ -399,30 +400,30 @@ OpFunctionEnd
}
TEST_F(BuilderTest, Switch_AllReturn) {
- // switch (1) {
- // case 1: {
- // return 1;
- // }
- // case 2: {
- // fallthrough;
- // }
- // default: {
- // return 3;
- // }
- // }
-
- auto* fn = Func("f", {}, ty.i32(),
- {
- Switch(1, //
- Case(Expr(1), Block(Return(1))), //
- Case(Expr(2), Block(Fallthrough())), //
- DefaultCase(Block(Return(3)))),
- });
-
- spirv::Builder& b = Build();
-
- EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
- EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "f"
+ // switch (1i) {
+ // case 1i: {
+ // return 1i;
+ // }
+ // case 2i: {
+ // fallthrough;
+ // }
+ // default: {
+ // return 3i;
+ // }
+ // }
+
+ auto* fn = Func("f", {}, ty.i32(),
+ {
+ Switch(1_i, //
+ Case(Expr(1_i), Block(Return(1_i))), //
+ Case(Expr(2_i), Block(Fallthrough())), //
+ DefaultCase(Block(Return(3_i)))),
+ });
+
+ spirv::Builder& b = Build();
+
+ EXPECT_TRUE(b.GenerateFunction(fn)) << b.error();
+ EXPECT_EQ(DumpBuilder(b), R"(OpName %3 "f"
%2 = OpTypeInt 32 1
%1 = OpTypeFunction %2
%6 = OpConstant %2 1
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_test.cc
index 59938ad56dc..24d5b725f52 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_test.cc
@@ -20,24 +20,41 @@ namespace {
using BuilderTest = TestHelper;
+TEST_F(BuilderTest, InvalidProgram) {
+ Diagnostics().add_error(diag::System::Writer, "make the program invalid");
+ ASSERT_FALSE(IsValid());
+ auto program = std::make_unique<Program>(std::move(*this));
+ ASSERT_FALSE(program->IsValid());
+ auto result = Generate(program.get(), Options{});
+ EXPECT_EQ(result.error, "input program is not valid");
+}
+
TEST_F(BuilderTest, TracksIdBounds) {
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- for (size_t i = 0; i < 5; i++) {
- EXPECT_EQ(b.next_id(), i + 1);
- }
+ for (size_t i = 0; i < 5; i++) {
+ EXPECT_EQ(b.next_id(), i + 1);
+ }
- EXPECT_EQ(6u, b.id_bound());
+ EXPECT_EQ(6u, b.id_bound());
}
TEST_F(BuilderTest, Capabilities_Dedup) {
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
+
+ b.push_capability(SpvCapabilityShader);
+ b.push_capability(SpvCapabilityShader);
+ b.push_capability(SpvCapabilityShader);
+
+ EXPECT_EQ(DumpInstructions(b.capabilities()), "OpCapability Shader\n");
+}
+
+TEST_F(BuilderTest, DeclareExtension) {
+ spirv::Builder& b = Build();
- b.push_capability(SpvCapabilityShader);
- b.push_capability(SpvCapabilityShader);
- b.push_capability(SpvCapabilityShader);
+ b.push_extension("SPV_KHR_integer_dot_product");
- EXPECT_EQ(DumpInstructions(b.capabilities()), "OpCapability Shader\n");
+ EXPECT_EQ(DumpInstructions(b.extensions()), "OpExtension \"SPV_KHR_integer_dot_product\"\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_type_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_type_test.cc
index 44e778b30cf..8dc048e998c 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_type_test.cc
@@ -12,68 +12,70 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest_Type = TestHelper;
TEST_F(BuilderTest_Type, GenerateRuntimeArray) {
- auto* ary = ty.array(ty.i32());
- auto* str = Structure("S", {Member("x", ary)});
- Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* ary = ty.array(ty.i32());
+ auto* str = Structure("S", {Member("x", ary)});
+ Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeRuntimeArray %2
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedRuntimeArray) {
- auto* ary = ty.array(ty.i32());
- auto* str = Structure("S", {Member("x", ary)});
- Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ auto* ary = ty.array(ty.i32());
+ auto* str = Structure("S", {Member("x", ary)});
+ Global("a", ty.Of(str), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeRuntimeArray %2
)");
}
TEST_F(BuilderTest_Type, GenerateArray) {
- auto* ary = ty.array(ty.i32(), 4);
- Global("a", ary, ast::StorageClass::kPrivate);
+ auto* ary = ty.array(ty.i32(), 4_u);
+ Global("a", ary, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpTypeInt 32 0
%4 = OpConstant %3 4
%1 = OpTypeArray %2 %4
@@ -81,19 +83,19 @@ TEST_F(BuilderTest_Type, GenerateArray) {
}
TEST_F(BuilderTest_Type, GenerateArray_WithStride) {
- auto* ary = ty.array(ty.i32(), 4, 16u);
- Global("a", ary, ast::StorageClass::kPrivate);
+ auto* ary = ty.array(ty.i32(), 4_u, 16u);
+ Global("a", ary, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(ary));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id);
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 ArrayStride 16
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpDecorate %1 ArrayStride 16
)");
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpTypeInt 32 0
%4 = OpConstant %3 4
%1 = OpTypeArray %2 %4
@@ -101,16 +103,16 @@ TEST_F(BuilderTest_Type, GenerateArray_WithStride) {
}
TEST_F(BuilderTest_Type, ReturnsGeneratedArray) {
- auto* ary = ty.array(ty.i32(), 4);
- Global("a", ary, ast::StorageClass::kPrivate);
+ auto* ary = ty.array(ty.i32(), 4_u);
+ Global("a", ary, ast::StorageClass::kPrivate);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(ary)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpTypeInt 32 0
%4 = OpConstant %3 4
%1 = OpTypeArray %2 %4
@@ -118,204 +120,202 @@ TEST_F(BuilderTest_Type, ReturnsGeneratedArray) {
}
TEST_F(BuilderTest_Type, GenerateBool) {
- auto* bool_ = create<sem::Bool>();
+ auto* bool_ = create<sem::Bool>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(bool_);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(bool_);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- ASSERT_EQ(b.types().size(), 1u);
- EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeBool
+ ASSERT_EQ(b.types().size(), 1u);
+ EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeBool
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedBool) {
- auto* bool_ = create<sem::Bool>();
- auto* i32 = create<sem::I32>();
+ auto* bool_ = create<sem::Bool>();
+ auto* i32 = create<sem::I32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(bool_), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(bool_), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(bool_), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(bool_), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GenerateF32) {
- auto* f32 = create<sem::F32>();
+ auto* f32 = create<sem::F32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(f32);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(f32);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- ASSERT_EQ(b.types().size(), 1u);
- EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeFloat 32
+ ASSERT_EQ(b.types().size(), 1u);
+ EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeFloat 32
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedF32) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GenerateI32) {
- auto* i32 = create<sem::I32>();
+ auto* i32 = create<sem::I32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(i32);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(i32);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- ASSERT_EQ(b.types().size(), 1u);
- EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeInt 32 1
+ ASSERT_EQ(b.types().size(), 1u);
+ EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeInt 32 1
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedI32) {
- auto* f32 = create<sem::F32>();
- auto* i32 = create<sem::I32>();
+ auto* f32 = create<sem::F32>();
+ auto* i32 = create<sem::I32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GenerateMatrix) {
- auto* f32 = create<sem::F32>();
- auto* vec3 = create<sem::Vector>(f32, 3u);
- auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
+ auto* f32 = create<sem::F32>();
+ auto* vec3 = create<sem::Vector>(f32, 3u);
+ auto* mat2x3 = create<sem::Matrix>(vec3, 2u);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(mat2x3);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(mat2x3);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(b.types().size(), 3u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
+ EXPECT_EQ(b.types().size(), 3u);
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%3 = OpTypeFloat 32
%2 = OpTypeVector %3 3
%1 = OpTypeMatrix %2 2
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedMatrix) {
- auto* i32 = create<sem::I32>();
- auto* col = create<sem::Vector>(i32, 4u);
- auto* mat = create<sem::Matrix>(col, 3u);
+ auto* i32 = create<sem::I32>();
+ auto* col = create<sem::Vector>(i32, 4u);
+ auto* mat = create<sem::Matrix>(col, 3u);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(mat), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 3u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(mat), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(mat), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 3u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(mat), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GeneratePtr) {
- auto* i32 = create<sem::I32>();
- auto* ptr = create<sem::Pointer>(i32, ast::StorageClass::kOutput,
- ast::Access::kReadWrite);
+ auto* i32 = create<sem::I32>();
+ auto* ptr = create<sem::Pointer>(i32, ast::StorageClass::kOutput, ast::Access::kReadWrite);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(ptr);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id);
+ auto id = b.GenerateTypeIfNeeded(ptr);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypePointer Output %2
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedPtr) {
- auto* i32 = create<sem::I32>();
- auto* ptr = create<sem::Pointer>(i32, ast::StorageClass::kOutput,
- ast::Access::kReadWrite);
+ auto* i32 = create<sem::I32>();
+ auto* ptr = create<sem::Pointer>(i32, ast::StorageClass::kOutput, ast::Access::kReadWrite);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(ptr), 1u);
- EXPECT_EQ(b.GenerateTypeIfNeeded(ptr), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(ptr), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(ptr), 1u);
}
TEST_F(BuilderTest_Type, GenerateStruct) {
- auto* s = Structure("my_struct", {Member("a", ty.f32())});
+ auto* s = Structure("my_struct", {Member("a", ty.f32())});
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeStruct %2
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "my_struct"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "my_struct"
OpMemberName %1 0 "a"
)");
}
TEST_F(BuilderTest_Type, GenerateStruct_DecoratedMembers) {
- auto* s = Structure("S", {
- Member("a", ty.f32()),
- Member("b", ty.f32(), {MemberAlign(8)}),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.f32()),
+ Member("b", ty.f32(), {MemberAlign(8)}),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeStruct %2 %2
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
OpMemberName %1 0 "a"
OpMemberName %1 1 "b"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
OpMemberDecorate %1 1 Offset 8
)");
}
TEST_F(BuilderTest_Type, GenerateStruct_NonLayout_Matrix) {
- auto* s = Structure("S", {
- Member("a", ty.mat2x2<f32>()),
- Member("b", ty.mat2x3<f32>()),
- Member("c", ty.mat4x4<f32>()),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.mat2x2<f32>()),
+ Member("b", ty.mat2x3<f32>()),
+ Member("c", ty.mat4x4<f32>()),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 2
%2 = OpTypeMatrix %3 2
%6 = OpTypeVector %4 3
@@ -324,12 +324,12 @@ TEST_F(BuilderTest_Type, GenerateStruct_NonLayout_Matrix) {
%7 = OpTypeMatrix %8 4
%1 = OpTypeStruct %2 %5 %7
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
OpMemberName %1 0 "a"
OpMemberName %1 1 "b"
OpMemberName %1 2 "c"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
OpMemberDecorate %1 0 ColMajor
OpMemberDecorate %1 0 MatrixStride 8
OpMemberDecorate %1 1 Offset 16
@@ -342,20 +342,20 @@ OpMemberDecorate %1 2 MatrixStride 16
}
TEST_F(BuilderTest_Type, GenerateStruct_DecoratedMembers_LayoutMatrix) {
- // We have to infer layout for matrix when it also has an offset.
- auto* s = Structure("S", {
- Member("a", ty.mat2x2<f32>()),
- Member("b", ty.mat2x3<f32>()),
- Member("c", ty.mat4x4<f32>()),
- });
+ // We have to infer layout for matrix when it also has an offset.
+ auto* s = Structure("S", {
+ Member("a", ty.mat2x2<f32>()),
+ Member("b", ty.mat2x3<f32>()),
+ Member("c", ty.mat4x4<f32>()),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 2
%2 = OpTypeMatrix %3 2
%6 = OpTypeVector %4 3
@@ -364,12 +364,12 @@ TEST_F(BuilderTest_Type, GenerateStruct_DecoratedMembers_LayoutMatrix) {
%7 = OpTypeMatrix %8 4
%1 = OpTypeStruct %2 %5 %7
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
OpMemberName %1 0 "a"
OpMemberName %1 1 "b"
OpMemberName %1 2 "c"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
OpMemberDecorate %1 0 ColMajor
OpMemberDecorate %1 0 MatrixStride 8
OpMemberDecorate %1 1 Offset 16
@@ -382,26 +382,26 @@ OpMemberDecorate %1 2 MatrixStride 16
}
TEST_F(BuilderTest_Type, GenerateStruct_DecoratedMembers_LayoutArraysOfMatrix) {
- // We have to infer layout for matrix when it also has an offset.
- // The decoration goes on the struct member, even if the matrix is buried
- // in levels of arrays.
- auto* arr_mat2x2 = ty.array(ty.mat2x2<f32>(), 1); // Singly nested array
- auto* arr_arr_mat2x3 = ty.array(ty.mat2x3<f32>(), 1); // Doubly nested array
- auto* rtarr_mat4x4 = ty.array(ty.mat4x4<f32>()); // Runtime array
-
- auto* s = Structure("S", {
- Member("a", arr_mat2x2),
- Member("b", arr_arr_mat2x3),
- Member("c", rtarr_mat4x4),
- });
-
- spirv::Builder& b = Build();
-
- auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
-
- EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
+ // We have to infer layout for matrix when it also has an offset.
+ // The decoration goes on the struct member, even if the matrix is buried
+ // in levels of arrays.
+ auto* arr_mat2x2 = ty.array(ty.mat2x2<f32>(), 1_u); // Singly nested array
+ auto* arr_arr_mat2x3 = ty.array(ty.mat2x3<f32>(), 1_u); // Doubly nested array
+ auto* rtarr_mat4x4 = ty.array(ty.mat4x4<f32>()); // Runtime array
+
+ auto* s = Structure("S", {
+ Member("a", arr_mat2x2),
+ Member("b", arr_arr_mat2x3),
+ Member("c", rtarr_mat4x4),
+ });
+
+ spirv::Builder& b = Build();
+
+ auto id = b.GenerateTypeIfNeeded(program->TypeOf(s));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
+
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%5 = OpTypeFloat 32
%4 = OpTypeVector %5 2
%3 = OpTypeMatrix %4 2
%6 = OpTypeInt 32 0
@@ -415,12 +415,12 @@ TEST_F(BuilderTest_Type, GenerateStruct_DecoratedMembers_LayoutArraysOfMatrix) {
%11 = OpTypeRuntimeArray %12
%1 = OpTypeStruct %2 %8 %11
)");
- EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
+ EXPECT_EQ(DumpInstructions(b.debug()), R"(OpName %1 "S"
OpMemberName %1 0 "a"
OpMemberName %1 1 "b"
OpMemberName %1 2 "c"
)");
- EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
+ EXPECT_EQ(DumpInstructions(b.annots()), R"(OpMemberDecorate %1 0 Offset 0
OpMemberDecorate %1 0 ColMajor
OpMemberDecorate %1 0 MatrixStride 8
OpDecorate %2 ArrayStride 16
@@ -436,526 +436,512 @@ OpDecorate %11 ArrayStride 64
}
TEST_F(BuilderTest_Type, GenerateU32) {
- auto* u32 = create<sem::U32>();
+ auto* u32 = create<sem::U32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(u32);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(u32);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- ASSERT_EQ(b.types().size(), 1u);
- EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeInt 32 0
+ ASSERT_EQ(b.types().size(), 1u);
+ EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeInt 32 0
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedU32) {
- auto* u32 = create<sem::U32>();
- auto* f32 = create<sem::F32>();
+ auto* u32 = create<sem::U32>();
+ auto* f32 = create<sem::F32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(u32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(u32), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(u32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(f32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(u32), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GenerateVector) {
- auto* vec = create<sem::Vector>(create<sem::F32>(), 3u);
+ auto* vec = create<sem::Vector>(create<sem::F32>(), 3u);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(vec);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(vec);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- EXPECT_EQ(b.types().size(), 2u);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.types().size(), 2u);
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeVector %2 3
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedVector) {
- auto* i32 = create<sem::I32>();
- auto* vec = create<sem::Vector>(i32, 3u);
+ auto* i32 = create<sem::I32>();
+ auto* vec = create<sem::Vector>(i32, 3u);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(vec), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(vec), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(vec), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(vec), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
TEST_F(BuilderTest_Type, GenerateVoid) {
- auto* void_ = create<sem::Void>();
+ auto* void_ = create<sem::Void>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id = b.GenerateTypeIfNeeded(void_);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(id, 1u);
+ auto id = b.GenerateTypeIfNeeded(void_);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(id, 1u);
- ASSERT_EQ(b.types().size(), 1u);
- EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeVoid
+ ASSERT_EQ(b.types().size(), 1u);
+ EXPECT_EQ(DumpInstruction(b.types()[0]), R"(%1 = OpTypeVoid
)");
}
TEST_F(BuilderTest_Type, ReturnsGeneratedVoid) {
- auto* void_ = create<sem::Void>();
- auto* i32 = create<sem::I32>();
+ auto* void_ = create<sem::Void>();
+ auto* i32 = create<sem::I32>();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(void_), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(b.GenerateTypeIfNeeded(void_), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(void_), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(i32), 2u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(b.GenerateTypeIfNeeded(void_), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
}
struct PtrData {
- ast::StorageClass ast_class;
- SpvStorageClass result;
+ ast::StorageClass ast_class;
+ SpvStorageClass result;
};
inline std::ostream& operator<<(std::ostream& out, PtrData data) {
- out << data.ast_class;
- return out;
+ out << data.ast_class;
+ return out;
}
using PtrDataTest = TestParamHelper<PtrData>;
TEST_P(PtrDataTest, ConvertStorageClass) {
- auto params = GetParam();
+ auto params = GetParam();
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.ConvertStorageClass(params.ast_class), params.result);
+ EXPECT_EQ(b.ConvertStorageClass(params.ast_class), params.result);
}
INSTANTIATE_TEST_SUITE_P(
BuilderTest_Type,
PtrDataTest,
- testing::Values(
- PtrData{ast::StorageClass::kNone, SpvStorageClassMax},
- PtrData{ast::StorageClass::kInput, SpvStorageClassInput},
- PtrData{ast::StorageClass::kOutput, SpvStorageClassOutput},
- PtrData{ast::StorageClass::kUniform, SpvStorageClassUniform},
- PtrData{ast::StorageClass::kWorkgroup, SpvStorageClassWorkgroup},
- PtrData{ast::StorageClass::kUniformConstant,
- SpvStorageClassUniformConstant},
- PtrData{ast::StorageClass::kStorage, SpvStorageClassStorageBuffer},
- PtrData{ast::StorageClass::kPrivate, SpvStorageClassPrivate},
- PtrData{ast::StorageClass::kFunction, SpvStorageClassFunction}));
+ testing::Values(PtrData{ast::StorageClass::kNone, SpvStorageClassMax},
+ PtrData{ast::StorageClass::kInput, SpvStorageClassInput},
+ PtrData{ast::StorageClass::kOutput, SpvStorageClassOutput},
+ PtrData{ast::StorageClass::kUniform, SpvStorageClassUniform},
+ PtrData{ast::StorageClass::kWorkgroup, SpvStorageClassWorkgroup},
+ PtrData{ast::StorageClass::kHandle, SpvStorageClassUniformConstant},
+ PtrData{ast::StorageClass::kStorage, SpvStorageClassStorageBuffer},
+ PtrData{ast::StorageClass::kPrivate, SpvStorageClassPrivate},
+ PtrData{ast::StorageClass::kFunction, SpvStorageClassFunction}));
TEST_F(BuilderTest_Type, DepthTexture_Generate_2d) {
- auto* two_d = create<sem::DepthTexture>(ast::TextureDimension::k2d);
+ auto* two_d = create<sem::DepthTexture>(ast::TextureDimension::k2d);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id_two_d = b.GenerateTypeIfNeeded(two_d);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id_two_d);
+ auto id_two_d = b.GenerateTypeIfNeeded(two_d);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id_two_d);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 0 0 1 Unknown
)");
}
TEST_F(BuilderTest_Type, DepthTexture_Generate_2dArray) {
- auto* two_d_array =
- create<sem::DepthTexture>(ast::TextureDimension::k2dArray);
+ auto* two_d_array = create<sem::DepthTexture>(ast::TextureDimension::k2dArray);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id_two_d_array = b.GenerateTypeIfNeeded(two_d_array);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id_two_d_array);
+ auto id_two_d_array = b.GenerateTypeIfNeeded(two_d_array);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id_two_d_array);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 1 0 1 Unknown
)");
}
TEST_F(BuilderTest_Type, DepthTexture_Generate_Cube) {
- auto* cube = create<sem::DepthTexture>(ast::TextureDimension::kCube);
+ auto* cube = create<sem::DepthTexture>(ast::TextureDimension::kCube);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id_cube = b.GenerateTypeIfNeeded(cube);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id_cube);
+ auto id_cube = b.GenerateTypeIfNeeded(cube);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id_cube);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 Cube 0 0 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()), "");
+ EXPECT_EQ(DumpInstructions(b.capabilities()), "");
}
TEST_F(BuilderTest_Type, DepthTexture_Generate_CubeArray) {
- auto* cube_array =
- create<sem::DepthTexture>(ast::TextureDimension::kCubeArray);
+ auto* cube_array = create<sem::DepthTexture>(ast::TextureDimension::kCubeArray);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- auto id_cube_array = b.GenerateTypeIfNeeded(cube_array);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(1u, id_cube_array);
+ auto id_cube_array = b.GenerateTypeIfNeeded(cube_array);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(1u, id_cube_array);
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 Cube 0 1 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability SampledCubeArray
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability SampledCubeArray
)");
}
TEST_F(BuilderTest_Type, MultisampledTexture_Generate_2d_i32) {
- auto* i32 = create<sem::I32>();
- auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, i32);
+ auto* i32 = create<sem::I32>();
+ auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, i32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(1u, b.GenerateTypeIfNeeded(ms));
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(1u, b.GenerateTypeIfNeeded(ms));
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeImage %2 2D 0 0 1 1 Unknown
)");
}
TEST_F(BuilderTest_Type, MultisampledTexture_Generate_2d_u32) {
- auto* u32 = create<sem::U32>();
- auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, u32);
+ auto* u32 = create<sem::U32>();
+ auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, u32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(ms), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateTypeIfNeeded(ms), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 0
%1 = OpTypeImage %2 2D 0 0 1 1 Unknown
)");
}
TEST_F(BuilderTest_Type, MultisampledTexture_Generate_2d_f32) {
- auto* f32 = create<sem::F32>();
- auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* ms = create<sem::MultisampledTexture>(ast::TextureDimension::k2d, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(ms), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(ms), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 0 1 1 Unknown
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_1d_i32) {
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d,
- create<sem::I32>());
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d, create<sem::I32>());
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 1
%1 = OpTypeImage %2 1D 0 0 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability Sampled1D
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability Sampled1D
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_1d_u32) {
- auto* u32 = create<sem::U32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d, u32);
+ auto* u32 = create<sem::U32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d, u32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeInt 32 0
%1 = OpTypeImage %2 1D 0 0 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability Sampled1D
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability Sampled1D
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_1d_f32) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k1d, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 1D 0 0 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability Sampled1D
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability Sampled1D
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_2d) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k2d, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 0 0 1 Unknown
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_2d_array) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k2dArray, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k2dArray, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 1 0 1 Unknown
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_3d) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::k3d, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::k3d, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 3D 0 0 0 1 Unknown
)");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_Cube) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::kCube, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::kCube, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 Cube 0 0 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()), "");
+ EXPECT_EQ(DumpInstructions(b.capabilities()), "");
}
TEST_F(BuilderTest_Type, SampledTexture_Generate_CubeArray) {
- auto* f32 = create<sem::F32>();
- auto* s = create<sem::SampledTexture>(ast::TextureDimension::kCubeArray, f32);
+ auto* f32 = create<sem::F32>();
+ auto* s = create<sem::SampledTexture>(ast::TextureDimension::kCubeArray, f32);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()),
- R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(s), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()),
+ R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 Cube 0 1 0 1 Unknown
)");
- EXPECT_EQ(DumpInstructions(b.capabilities()),
- R"(OpCapability SampledCubeArray
+ EXPECT_EQ(DumpInstructions(b.capabilities()),
+ R"(OpCapability SampledCubeArray
)");
}
TEST_F(BuilderTest_Type, StorageTexture_Generate_1d) {
- auto* s =
- ty.storage_texture(ast::TextureDimension::k1d,
- ast::TexelFormat::kR32Float, ast::Access::kWrite);
+ auto* s = ty.storage_texture(ast::TextureDimension::k1d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 1D 0 0 0 2 R32f
)");
}
TEST_F(BuilderTest_Type, StorageTexture_Generate_2d) {
- auto* s =
- ty.storage_texture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Float, ast::Access::kWrite);
+ auto* s = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 0 0 2 R32f
)");
}
TEST_F(BuilderTest_Type, StorageTexture_Generate_2dArray) {
- auto* s =
- ty.storage_texture(ast::TextureDimension::k2dArray,
- ast::TexelFormat::kR32Float, ast::Access::kWrite);
+ auto* s = ty.storage_texture(ast::TextureDimension::k2dArray, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 1 0 2 R32f
)");
}
TEST_F(BuilderTest_Type, StorageTexture_Generate_3d) {
- auto* s =
- ty.storage_texture(ast::TextureDimension::k3d,
- ast::TexelFormat::kR32Float, ast::Access::kWrite);
+ auto* s = ty.storage_texture(ast::TextureDimension::k3d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 3D 0 0 0 2 R32f
)");
}
-TEST_F(BuilderTest_Type,
- StorageTexture_Generate_SampledTypeFloat_Format_r32float) {
- auto* s =
- ty.storage_texture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Float, ast::Access::kWrite);
+TEST_F(BuilderTest_Type, StorageTexture_Generate_SampledTypeFloat_Format_r32float) {
+ auto* s = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Float,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%1 = OpTypeImage %2 2D 0 0 0 2 R32f
)");
}
-TEST_F(BuilderTest_Type,
- StorageTexture_Generate_SampledTypeSint_Format_r32sint) {
- auto* s = ty.storage_texture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Sint, ast::Access::kWrite);
+TEST_F(BuilderTest_Type, StorageTexture_Generate_SampledTypeSint_Format_r32sint) {
+ auto* s = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Sint,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%1 = OpTypeImage %2 2D 0 0 0 2 R32i
)");
}
-TEST_F(BuilderTest_Type,
- StorageTexture_Generate_SampledTypeUint_Format_r32uint) {
- auto* s = ty.storage_texture(ast::TextureDimension::k2d,
- ast::TexelFormat::kR32Uint, ast::Access::kWrite);
+TEST_F(BuilderTest_Type, StorageTexture_Generate_SampledTypeUint_Format_r32uint) {
+ auto* s = ty.storage_texture(ast::TextureDimension::k2d, ast::TexelFormat::kR32Uint,
+ ast::Access::kWrite);
- Global("test_var", s,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
+ Global("test_var", s,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
+ EXPECT_EQ(b.GenerateTypeIfNeeded(program->TypeOf(s)), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 0
%1 = OpTypeImage %2 2D 0 0 0 2 R32ui
)");
}
TEST_F(BuilderTest_Type, Sampler) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
+ EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
}
TEST_F(BuilderTest_Type, ComparisonSampler) {
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
+ EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
}
TEST_F(BuilderTest_Type, Dedup_Sampler_And_ComparisonSampler) {
- auto* comp_sampler =
- create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
- auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
+ auto* comp_sampler = create<sem::Sampler>(ast::SamplerKind::kComparisonSampler);
+ auto* sampler = create<sem::Sampler>(ast::SamplerKind::kSampler);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- EXPECT_EQ(b.GenerateTypeIfNeeded(comp_sampler), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(comp_sampler), 1u);
- EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
+ EXPECT_EQ(b.GenerateTypeIfNeeded(sampler), 1u);
- ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
+ ASSERT_FALSE(b.has_error()) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), "%1 = OpTypeSampler\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/builder_unary_op_expression_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/builder_unary_op_expression_test.cc
index ae08c947c9a..e28eb78fa51 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/builder_unary_op_expression_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/builder_unary_op_expression_test.cc
@@ -15,101 +15,100 @@
#include "src/tint/writer/spirv/spv_dump.h"
#include "src/tint/writer/spirv/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::spirv {
namespace {
using BuilderTest = TestHelper;
TEST_F(BuilderTest, UnaryOp_Negation_Integer) {
- auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(1));
- WrapInFunction(expr);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(1_i));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpConstant %2 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpSNegate %2 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpSNegate %2 %3
)");
}
TEST_F(BuilderTest, UnaryOp_Negation_Float) {
- auto* expr =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(1.f));
- WrapInFunction(expr);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr(1_f));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeFloat 32
%3 = OpConstant %2 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpFNegate %2 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpFNegate %2 %3
)");
}
TEST_F(BuilderTest, UnaryOp_Complement) {
- auto* expr =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr(1));
- WrapInFunction(expr);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr(1_i));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeInt 32 1
%3 = OpConstant %2 1
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpNot %2 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpNot %2 %3
)");
}
TEST_F(BuilderTest, UnaryOp_Not) {
- auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr(false));
- WrapInFunction(expr);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr(false));
+ WrapInFunction(expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
-%3 = OpConstantFalse %2
+ b.push_function(Function{});
+ EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 1u) << b.error();
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%2 = OpTypeBool
+%3 = OpConstantNull %2
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%1 = OpLogicalNot %2 %3
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%1 = OpLogicalNot %2 %3
)");
}
TEST_F(BuilderTest, UnaryOp_LoadRequired) {
- auto* var = Var("param", ty.vec3<f32>());
+ auto* var = Var("param", ty.vec3<f32>());
- auto* expr =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("param"));
- WrapInFunction(var, expr);
+ auto* expr = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("param"));
+ WrapInFunction(var, expr);
- spirv::Builder& b = Build();
+ spirv::Builder& b = Build();
- b.push_function(Function{});
- EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
- EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 6u) << b.error();
- ASSERT_FALSE(b.has_error()) << b.error();
+ b.push_function(Function{});
+ EXPECT_TRUE(b.GenerateFunctionVariable(var)) << b.error();
+ EXPECT_EQ(b.GenerateUnaryOpExpression(expr), 6u) << b.error();
+ ASSERT_FALSE(b.has_error()) << b.error();
- EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
+ EXPECT_EQ(DumpInstructions(b.types()), R"(%4 = OpTypeFloat 32
%3 = OpTypeVector %4 3
%2 = OpTypePointer Function %3
%5 = OpConstantNull %3
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
- R"(%1 = OpVariable %2 Function %5
+ EXPECT_EQ(DumpInstructions(b.functions()[0].variables()),
+ R"(%1 = OpVariable %2 Function %5
)");
- EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
- R"(%7 = OpLoad %3 %1
+ EXPECT_EQ(DumpInstructions(b.functions()[0].instructions()),
+ R"(%7 = OpLoad %3 %1
%6 = OpFNegate %3 %7
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/function.cc b/chromium/third_party/dawn/src/tint/writer/spirv/function.cc
index c7dc85588b7..871ef3f7b96 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/function.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/function.cc
@@ -16,9 +16,7 @@
namespace tint::writer::spirv {
-Function::Function()
- : declaration_(Instruction{spv::Op::OpNop, {}}),
- label_op_(Operand::Int(0)) {}
+Function::Function() : declaration_(Instruction{spv::Op::OpNop, {}}), label_op_(Operand(0u)) {}
Function::Function(const Instruction& declaration,
const Operand& label_op,
@@ -30,22 +28,22 @@ Function::Function(const Function& other) = default;
Function::~Function() = default;
void Function::iterate(std::function<void(const Instruction&)> cb) const {
- cb(declaration_);
+ cb(declaration_);
- for (const auto& param : params_) {
- cb(param);
- }
+ for (const auto& param : params_) {
+ cb(param);
+ }
- cb(Instruction{spv::Op::OpLabel, {label_op_}});
+ cb(Instruction{spv::Op::OpLabel, {label_op_}});
- for (const auto& var : vars_) {
- cb(var);
- }
- for (const auto& inst : instructions_) {
- cb(inst);
- }
+ for (const auto& var : vars_) {
+ cb(var);
+ }
+ for (const auto& inst : instructions_) {
+ cb(inst);
+ }
- cb(Instruction{spv::Op::OpFunctionEnd, {}});
+ cb(Instruction{spv::Op::OpFunctionEnd, {}});
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/function.h b/chromium/third_party/dawn/src/tint/writer/spirv/function.h
index de6f8a48769..05012f2788b 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/function.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/function.h
@@ -23,73 +23,73 @@ namespace tint::writer::spirv {
/// A SPIR-V function
class Function {
- public:
- /// Constructor for testing purposes
- /// This creates a bad declaration, so won't generate correct SPIR-V
- Function();
-
- /// Constructor
- /// @param declaration the function declaration
- /// @param label_op the operand for function's entry block label
- /// @param params the function parameters
- Function(const Instruction& declaration,
- const Operand& label_op,
- const InstructionList& params);
- /// Copy constructor
- /// @param other the function to copy
- Function(const Function& other);
- ~Function();
-
- /// Iterates over the function call the cb on each instruction
- /// @param cb the callback to call
- void iterate(std::function<void(const Instruction&)> cb) const;
-
- /// @returns the declaration
- const Instruction& declaration() const { return declaration_; }
-
- /// @returns the label ID for the function entry block
- uint32_t label_id() const { return label_op_.to_i(); }
-
- /// Adds an instruction to the instruction list
- /// @param op the op to set
- /// @param operands the operands for the instruction
- void push_inst(spv::Op op, const OperandList& operands) {
- instructions_.push_back(Instruction{op, operands});
- }
- /// @returns the instruction list
- const InstructionList& instructions() const { return instructions_; }
-
- /// Adds a variable to the variable list
- /// @param operands the operands for the variable
- void push_var(const OperandList& operands) {
- vars_.push_back(Instruction{spv::Op::OpVariable, operands});
- }
- /// @returns the variable list
- const InstructionList& variables() const { return vars_; }
-
- /// @returns the word length of the function
- uint32_t word_length() const {
- // 1 for the Label and 1 for the FunctionEnd
- uint32_t size = 2 + declaration_.word_length();
-
- for (const auto& param : params_) {
- size += param.word_length();
+ public:
+ /// Constructor for testing purposes
+ /// This creates a bad declaration, so won't generate correct SPIR-V
+ Function();
+
+ /// Constructor
+ /// @param declaration the function declaration
+ /// @param label_op the operand for function's entry block label
+ /// @param params the function parameters
+ Function(const Instruction& declaration,
+ const Operand& label_op,
+ const InstructionList& params);
+ /// Copy constructor
+ /// @param other the function to copy
+ Function(const Function& other);
+ ~Function();
+
+ /// Iterates over the function call the cb on each instruction
+ /// @param cb the callback to call
+ void iterate(std::function<void(const Instruction&)> cb) const;
+
+ /// @returns the declaration
+ const Instruction& declaration() const { return declaration_; }
+
+ /// @returns the label ID for the function entry block
+ uint32_t label_id() const { return std::get<uint32_t>(label_op_); }
+
+ /// Adds an instruction to the instruction list
+ /// @param op the op to set
+ /// @param operands the operands for the instruction
+ void push_inst(spv::Op op, const OperandList& operands) {
+ instructions_.push_back(Instruction{op, operands});
}
- for (const auto& var : vars_) {
- size += var.word_length();
+ /// @returns the instruction list
+ const InstructionList& instructions() const { return instructions_; }
+
+ /// Adds a variable to the variable list
+ /// @param operands the operands for the variable
+ void push_var(const OperandList& operands) {
+ vars_.push_back(Instruction{spv::Op::OpVariable, operands});
}
- for (const auto& inst : instructions_) {
- size += inst.word_length();
+ /// @returns the variable list
+ const InstructionList& variables() const { return vars_; }
+
+ /// @returns the word length of the function
+ uint32_t word_length() const {
+ // 1 for the Label and 1 for the FunctionEnd
+ uint32_t size = 2 + declaration_.word_length();
+
+ for (const auto& param : params_) {
+ size += param.word_length();
+ }
+ for (const auto& var : vars_) {
+ size += var.word_length();
+ }
+ for (const auto& inst : instructions_) {
+ size += inst.word_length();
+ }
+ return size;
}
- return size;
- }
-
- private:
- Instruction declaration_;
- Operand label_op_;
- InstructionList params_;
- InstructionList vars_;
- InstructionList instructions_;
+
+ private:
+ Instruction declaration_;
+ Operand label_op_;
+ InstructionList params_;
+ InstructionList vars_;
+ InstructionList instructions_;
};
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/generator.cc b/chromium/third_party/dawn/src/tint/writer/spirv/generator.cc
index 04f6e151294..93ac6e1f671 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/generator.cc
@@ -25,28 +25,31 @@ Result::~Result() = default;
Result::Result(const Result&) = default;
Result Generate(const Program* program, const Options& options) {
- Result result;
+ Result result;
+ if (!program->IsValid()) {
+ result.error = "input program is not valid";
+ return result;
+ }
+
+ // Sanitize the program.
+ auto sanitized_result = Sanitize(program, options);
+ if (!sanitized_result.program.IsValid()) {
+ result.success = false;
+ result.error = sanitized_result.program.Diagnostics().str();
+ return result;
+ }
+
+ // Generate the SPIR-V code.
+ bool zero_initialize_workgroup_memory =
+ !options.disable_workgroup_init && options.use_zero_initialize_workgroup_memory_extension;
+
+ auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program,
+ zero_initialize_workgroup_memory);
+ result.success = impl->Generate();
+ result.error = impl->error();
+ result.spirv = std::move(impl->result());
- // Sanitize the program.
- auto sanitized_result = Sanitize(program, options);
- if (!sanitized_result.program.IsValid()) {
- result.success = false;
- result.error = sanitized_result.program.Diagnostics().str();
return result;
- }
-
- // Generate the SPIR-V code.
- bool zero_initialize_workgroup_memory =
- !options.disable_workgroup_init &&
- options.use_zero_initialize_workgroup_memory_extension;
-
- auto impl = std::make_unique<GeneratorImpl>(&sanitized_result.program,
- zero_initialize_workgroup_memory);
- result.success = impl->Generate();
- result.error = impl->error();
- result.spirv = std::move(impl->result());
-
- return result;
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/generator.h b/chromium/third_party/dawn/src/tint/writer/spirv/generator.h
index 81428b42c7e..ce871ffc9a6 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/generator.h
@@ -34,40 +34,40 @@ namespace tint::writer::spirv {
/// Configuration options used for generating SPIR-V.
struct Options {
- /// Set to `true` to generate a PointSize builtin and have it set to 1.0
- /// from all vertex shaders in the module.
- bool emit_vertex_point_size = true;
+ /// Set to `true` to generate a PointSize builtin and have it set to 1.0
+ /// from all vertex shaders in the module.
+ bool emit_vertex_point_size = true;
- /// Set to `true` to disable workgroup memory zero initialization
- bool disable_workgroup_init = false;
+ /// Set to `true` to disable workgroup memory zero initialization
+ bool disable_workgroup_init = false;
- /// Set to 'true' to generates binding mappings for external textures
- bool generate_external_texture_bindings = false;
+ /// Set to 'true' to generates binding mappings for external textures
+ bool generate_external_texture_bindings = false;
- /// Set to `true` to initialize workgroup memory with OpConstantNull when
- /// VK_KHR_zero_initialize_workgroup_memory is enabled.
- bool use_zero_initialize_workgroup_memory_extension = false;
+ /// Set to `true` to initialize workgroup memory with OpConstantNull when
+ /// VK_KHR_zero_initialize_workgroup_memory is enabled.
+ bool use_zero_initialize_workgroup_memory_extension = false;
};
/// The result produced when generating SPIR-V.
struct Result {
- /// Constructor
- Result();
+ /// Constructor
+ Result();
- /// Destructor
- ~Result();
+ /// Destructor
+ ~Result();
- /// Copy constructor
- Result(const Result&);
+ /// Copy constructor
+ Result(const Result&);
- /// True if generation was successful.
- bool success = false;
+ /// True if generation was successful.
+ bool success = false;
- /// The errors generated during code generation, if any.
- std::string error;
+ /// The errors generated during code generation, if any.
+ std::string error;
- /// The generated SPIR-V.
- std::vector<uint32_t> spirv;
+ /// The generated SPIR-V.
+ std::vector<uint32_t> spirv;
};
/// Generate SPIR-V for a program, according to a set of configuration options.
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/generator_bench.cc b/chromium/third_party/dawn/src/tint/writer/spirv/generator_bench.cc
index 65897402fb9..4aac8bc2110 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/generator_bench.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/generator_bench.cc
@@ -20,18 +20,18 @@ namespace tint::writer::spirv {
namespace {
void GenerateSPIRV(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadProgram(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& program = std::get<bench::ProgramAndFile>(res).program;
- for (auto _ : state) {
- auto res = Generate(&program, {});
- if (!res.error.empty()) {
- state.SkipWithError(res.error.c_str());
+ auto res = bench::LoadProgram(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& program = std::get<bench::ProgramAndFile>(res).program;
+ for (auto _ : state) {
+ auto res = Generate(&program, {});
+ if (!res.error.empty()) {
+ state.SkipWithError(res.error.c_str());
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(GenerateSPIRV);
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.cc b/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.cc
index 33188085b2c..b8ca89c4b15 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.cc
@@ -21,8 +21,8 @@
#include "src/tint/transform/add_spirv_block_attribute.h"
#include "src/tint/transform/builtin_polyfill.h"
#include "src/tint/transform/canonicalize_entry_point_io.h"
+#include "src/tint/transform/disable_uniformity_analysis.h"
#include "src/tint/transform/expand_compound_assignment.h"
-#include "src/tint/transform/fold_constants.h"
#include "src/tint/transform/for_loop_to_loop.h"
#include "src/tint/transform/manager.h"
#include "src/tint/transform/promote_side_effects_to_decl.h"
@@ -38,83 +38,80 @@
namespace tint::writer::spirv {
SanitizedResult Sanitize(const Program* in, const Options& options) {
- transform::Manager manager;
- transform::DataMap data;
-
- { // Builtin polyfills
- transform::BuiltinPolyfill::Builtins polyfills;
- polyfills.count_leading_zeros = true;
- polyfills.count_trailing_zeros = true;
- polyfills.extract_bits =
- transform::BuiltinPolyfill::Level::kClampParameters;
- polyfills.first_leading_bit = true;
- polyfills.first_trailing_bit = true;
- polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
- data.Add<transform::BuiltinPolyfill::Config>(polyfills);
- manager.Add<transform::BuiltinPolyfill>();
- }
-
- if (options.generate_external_texture_bindings) {
- auto new_bindings_map = GenerateExternalTextureBindings(in);
- data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(
- new_bindings_map);
- }
- manager.Add<transform::MultiplanarExternalTexture>();
-
- manager.Add<transform::Unshadow>();
- bool disable_workgroup_init_in_sanitizer =
- options.disable_workgroup_init ||
- options.use_zero_initialize_workgroup_memory_extension;
- if (!disable_workgroup_init_in_sanitizer) {
- manager.Add<transform::ZeroInitWorkgroupMemory>();
- }
- manager.Add<transform::RemoveUnreachableStatements>();
- manager.Add<transform::ExpandCompoundAssignment>();
- manager.Add<transform::PromoteSideEffectsToDecl>();
- manager.Add<transform::UnwindDiscardFunctions>();
- manager.Add<transform::SimplifyPointers>(); // Required for arrayLength()
- manager.Add<transform::FoldConstants>();
- manager.Add<transform::VectorizeScalarMatrixConstructors>();
- manager.Add<transform::ForLoopToLoop>(); // Must come after
- // ZeroInitWorkgroupMemory
- manager.Add<transform::CanonicalizeEntryPointIO>();
- manager.Add<transform::AddEmptyEntryPoint>();
- manager.Add<transform::AddSpirvBlockAttribute>();
- manager.Add<transform::VarForDynamicIndex>();
-
- data.Add<transform::CanonicalizeEntryPointIO::Config>(
- transform::CanonicalizeEntryPointIO::Config(
- transform::CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF,
- options.emit_vertex_point_size));
-
- SanitizedResult result;
- result.program = std::move(manager.Run(in, data).program);
- return result;
+ transform::Manager manager;
+ transform::DataMap data;
+
+ manager.Add<transform::DisableUniformityAnalysis>();
+
+ { // Builtin polyfills
+ transform::BuiltinPolyfill::Builtins polyfills;
+ polyfills.count_leading_zeros = true;
+ polyfills.count_trailing_zeros = true;
+ polyfills.extract_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ polyfills.first_leading_bit = true;
+ polyfills.first_trailing_bit = true;
+ polyfills.insert_bits = transform::BuiltinPolyfill::Level::kClampParameters;
+ data.Add<transform::BuiltinPolyfill::Config>(polyfills);
+ manager.Add<transform::BuiltinPolyfill>();
+ }
+
+ if (options.generate_external_texture_bindings) {
+ auto new_bindings_map = GenerateExternalTextureBindings(in);
+ data.Add<transform::MultiplanarExternalTexture::NewBindingPoints>(new_bindings_map);
+ }
+ manager.Add<transform::MultiplanarExternalTexture>();
+
+ manager.Add<transform::Unshadow>();
+ bool disable_workgroup_init_in_sanitizer =
+ options.disable_workgroup_init || options.use_zero_initialize_workgroup_memory_extension;
+ if (!disable_workgroup_init_in_sanitizer) {
+ manager.Add<transform::ZeroInitWorkgroupMemory>();
+ }
+ manager.Add<transform::RemoveUnreachableStatements>();
+ manager.Add<transform::ExpandCompoundAssignment>();
+ manager.Add<transform::PromoteSideEffectsToDecl>();
+ manager.Add<transform::UnwindDiscardFunctions>();
+ manager.Add<transform::SimplifyPointers>(); // Required for arrayLength()
+ manager.Add<transform::VectorizeScalarMatrixConstructors>();
+ manager.Add<transform::ForLoopToLoop>(); // Must come after
+ // ZeroInitWorkgroupMemory
+ manager.Add<transform::CanonicalizeEntryPointIO>();
+ manager.Add<transform::AddEmptyEntryPoint>();
+ manager.Add<transform::AddSpirvBlockAttribute>();
+ manager.Add<transform::VarForDynamicIndex>();
+
+ data.Add<transform::CanonicalizeEntryPointIO::Config>(
+ transform::CanonicalizeEntryPointIO::Config(
+ transform::CanonicalizeEntryPointIO::ShaderStyle::kSpirv, 0xFFFFFFFF,
+ options.emit_vertex_point_size));
+
+ SanitizedResult result;
+ result.program = std::move(manager.Run(in, data).program);
+ return result;
}
-GeneratorImpl::GeneratorImpl(const Program* program,
- bool zero_initialize_workgroup_memory)
+GeneratorImpl::GeneratorImpl(const Program* program, bool zero_initialize_workgroup_memory)
: builder_(program, zero_initialize_workgroup_memory) {}
bool GeneratorImpl::Generate() {
- if (builder_.Build()) {
- writer_.WriteHeader(builder_.id_bound());
- writer_.WriteBuilder(&builder_);
- return true;
- }
- return false;
+ if (builder_.Build()) {
+ writer_.WriteHeader(builder_.id_bound());
+ writer_.WriteBuilder(&builder_);
+ return true;
+ }
+ return false;
}
const std::vector<uint32_t>& GeneratorImpl::result() const {
- return writer_.result();
+ return writer_.result();
}
std::vector<uint32_t>& GeneratorImpl::result() {
- return writer_.result();
+ return writer_.result();
}
std::string GeneratorImpl::error() const {
- return builder_.error();
+ return builder_.error();
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.h b/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.h
index 2e02af1eb1f..106e2f8133a 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/generator_impl.h
@@ -27,8 +27,8 @@ namespace tint::writer::spirv {
/// The result of sanitizing a program for generation.
struct SanitizedResult {
- /// The sanitized program.
- Program program;
+ /// The sanitized program.
+ Program program;
};
/// Sanitize a program in preparation for generating SPIR-V.
@@ -38,28 +38,28 @@ SanitizedResult Sanitize(const Program* program, const Options& options);
/// Implementation class for SPIR-V generator
class GeneratorImpl {
- public:
- /// Constructor
- /// @param program the program to generate
- /// @param zero_initialize_workgroup_memory `true` to initialize all the
- /// variables in the Workgroup storage class with OpConstantNull
- GeneratorImpl(const Program* program, bool zero_initialize_workgroup_memory);
+ public:
+ /// Constructor
+ /// @param program the program to generate
+ /// @param zero_initialize_workgroup_memory `true` to initialize all the
+ /// variables in the Workgroup storage class with OpConstantNull
+ GeneratorImpl(const Program* program, bool zero_initialize_workgroup_memory);
- /// @returns true on successful generation; false otherwise
- bool Generate();
+ /// @returns true on successful generation; false otherwise
+ bool Generate();
- /// @returns the result data
- const std::vector<uint32_t>& result() const;
+ /// @returns the result data
+ const std::vector<uint32_t>& result() const;
- /// @returns the result data
- std::vector<uint32_t>& result();
+ /// @returns the result data
+ std::vector<uint32_t>& result();
- /// @returns the error
- std::string error() const;
+ /// @returns the error
+ std::string error() const;
- private:
- Builder builder_;
- BinaryWriter writer_;
+ private:
+ Builder builder_;
+ BinaryWriter writer_;
};
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/instruction.cc b/chromium/third_party/dawn/src/tint/writer/spirv/instruction.cc
index 4be648b37a0..8b883c1c724 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/instruction.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/instruction.cc
@@ -26,11 +26,11 @@ Instruction::Instruction(const Instruction&) = default;
Instruction::~Instruction() = default;
uint32_t Instruction::word_length() const {
- uint32_t size = 1; // Initial 1 for the op and size
- for (const auto& op : operands_) {
- size += op.length();
- }
- return size;
+ uint32_t size = 1; // Initial 1 for the op and size
+ for (const auto& op : operands_) {
+ size += OperandLength(op);
+ }
+ return size;
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/instruction.h b/chromium/third_party/dawn/src/tint/writer/spirv/instruction.h
index 7fd126c6ea2..2beae59d9c5 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/instruction.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/instruction.h
@@ -24,27 +24,27 @@ namespace tint::writer::spirv {
/// A single SPIR-V instruction
class Instruction {
- public:
- /// Constructor
- /// @param op the op to generate
- /// @param operands the operand values for the instruction
- Instruction(spv::Op op, OperandList operands);
- /// Copy Constructor
- Instruction(const Instruction&);
- ~Instruction();
-
- /// @returns the instructions op
- spv::Op opcode() const { return op_; }
-
- /// @returns the instructions operands
- const OperandList& operands() const { return operands_; }
-
- /// @returns the number of uint32_t's needed to hold the instruction
- uint32_t word_length() const;
-
- private:
- spv::Op op_ = spv::Op::OpNop;
- OperandList operands_;
+ public:
+ /// Constructor
+ /// @param op the op to generate
+ /// @param operands the operand values for the instruction
+ Instruction(spv::Op op, OperandList operands);
+ /// Copy Constructor
+ Instruction(const Instruction&);
+ ~Instruction();
+
+ /// @returns the instructions op
+ spv::Op opcode() const { return op_; }
+
+ /// @returns the instructions operands
+ const OperandList& operands() const { return operands_; }
+
+ /// @returns the number of uint32_t's needed to hold the instruction
+ uint32_t word_length() const;
+
+ private:
+ spv::Op op_ = spv::Op::OpNop;
+ OperandList operands_;
};
/// A list of instructions
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/instruction_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/instruction_test.cc
index 2c919f05b16..65460c8fbd1 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/instruction_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/instruction_test.cc
@@ -14,6 +14,8 @@
#include "src/tint/writer/spirv/instruction.h"
+#include <string>
+
#include "gtest/gtest.h"
namespace tint::writer::spirv {
@@ -22,26 +24,24 @@ namespace {
using InstructionTest = testing::Test;
TEST_F(InstructionTest, Create) {
- Instruction i(spv::Op::OpEntryPoint, {Operand::Float(1.2f), Operand::Int(1),
- Operand::String("my_str")});
- EXPECT_EQ(i.opcode(), spv::Op::OpEntryPoint);
- ASSERT_EQ(i.operands().size(), 3u);
+ Instruction i(spv::Op::OpEntryPoint, {Operand(1.2f), Operand(1u), Operand("my_str")});
+ EXPECT_EQ(i.opcode(), spv::Op::OpEntryPoint);
+ ASSERT_EQ(i.operands().size(), 3u);
- const auto& ops = i.operands();
- EXPECT_TRUE(ops[0].IsFloat());
- EXPECT_FLOAT_EQ(ops[0].to_f(), 1.2f);
+ const auto& ops = i.operands();
+ ASSERT_TRUE(std::holds_alternative<float>(ops[0]));
+ EXPECT_FLOAT_EQ(std::get<float>(ops[0]), 1.2f);
- EXPECT_TRUE(ops[1].IsInt());
- EXPECT_EQ(ops[1].to_i(), 1u);
+ ASSERT_TRUE(std::holds_alternative<uint32_t>(ops[1]));
+ EXPECT_EQ(std::get<uint32_t>(ops[1]), 1u);
- EXPECT_TRUE(ops[2].IsString());
- EXPECT_EQ(ops[2].to_s(), "my_str");
+ ASSERT_TRUE(std::holds_alternative<std::string>(ops[2]));
+ EXPECT_EQ(std::get<std::string>(ops[2]), "my_str");
}
TEST_F(InstructionTest, Length) {
- Instruction i(spv::Op::OpEntryPoint, {Operand::Float(1.2f), Operand::Int(1),
- Operand::String("my_str")});
- EXPECT_EQ(i.word_length(), 5u);
+ Instruction i(spv::Op::OpEntryPoint, {Operand(1.2f), Operand(1u), Operand("my_str")});
+ EXPECT_EQ(i.word_length(), 5u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/operand.cc b/chromium/third_party/dawn/src/tint/writer/spirv/operand.cc
index 7d2724997c1..501307f0dc9 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/operand.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/operand.cc
@@ -16,46 +16,14 @@
namespace tint::writer::spirv {
-// static
-Operand Operand::Float(float val) {
- Operand o(Kind::kFloat);
- o.set_float(val);
- return o;
-}
-
-// static
-Operand Operand::Int(uint32_t val) {
- Operand o(Kind::kInt);
- o.set_int(val);
- return o;
-}
-
-// static
-Operand Operand::String(const std::string& val) {
- Operand o(Kind::kString);
- o.set_string(val);
- return o;
-}
-
-Operand::Operand(Kind kind) : kind_(kind) {}
-
-Operand::~Operand() = default;
-
-uint32_t Operand::length() const {
- uint32_t val = 0;
- switch (kind_) {
- case Kind::kFloat:
- case Kind::kInt:
- val = 1;
- break;
- case Kind::kString:
- // SPIR-V always nul-terminates strings. The length is rounded up to a
- // multiple of 4 bytes with 0 bytes padding the end. Accounting for the
- // nul terminator is why '+ 4u' is used here instead of '+ 3u'.
- val = static_cast<uint32_t>((str_val_.length() + 4u) >> 2);
- break;
- }
- return val;
+uint32_t OperandLength(const Operand& o) {
+ if (auto* str = std::get_if<std::string>(&o)) {
+ // SPIR-V always nul-terminates strings. The length is rounded up to a
+ // multiple of 4 bytes with 0 bytes padding the end. Accounting for the
+ // nul terminator is why '+ 4u' is used here instead of '+ 3u'.
+ return static_cast<uint32_t>((str->length() + 4u) >> 2);
+ }
+ return 1;
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/operand.h b/chromium/third_party/dawn/src/tint/writer/spirv/operand.h
index 46a5deb55bc..3174d0cc301 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/operand.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/operand.h
@@ -15,81 +15,47 @@
#ifndef SRC_TINT_WRITER_SPIRV_OPERAND_H_
#define SRC_TINT_WRITER_SPIRV_OPERAND_H_
+#include <cstring>
#include <string>
+// TODO(https://crbug.com/dawn/1379) Update cpplint and remove NOLINT
+#include <variant> // NOLINT(build/include_order)
#include <vector>
+#include "src/tint/utils/hash.h"
+
namespace tint::writer::spirv {
/// A single SPIR-V instruction operand
-class Operand {
- public:
- /// The kind of the operand
- // Note, the `kInt` will cover most cases as things like IDs in SPIR-V are
- // just ints for the purpose of converting to binary.
- enum class Kind { kInt = 0, kFloat, kString };
-
- /// Creates a float operand
- /// @param val the float value
- /// @returns the operand
- static Operand Float(float val);
- /// Creates an int operand
- /// @param val the int value
- /// @returns the operand
- static Operand Int(uint32_t val);
- /// Creates a string operand
- /// @param val the string value
- /// @returns the operand
- static Operand String(const std::string& val);
-
- /// Constructor
- /// @param kind the type of operand
- explicit Operand(Kind kind);
- /// Copy Constructor
- Operand(const Operand&) = default;
- ~Operand();
-
- /// Copy assignment
- /// @param b the operand to copy
- /// @returns a copy of this operand
- Operand& operator=(const Operand& b) = default;
-
- /// Sets the float value
- /// @param val the value to set
- void set_float(float val) { float_val_ = val; }
- /// Sets the int value
- /// @param val the value to set
- void set_int(uint32_t val) { int_val_ = val; }
- /// Sets the string value
- /// @param val the value to set
- void set_string(const std::string& val) { str_val_ = val; }
-
- /// @returns true if this is a float operand
- bool IsFloat() const { return kind_ == Kind::kFloat; }
- /// @returns true if this is an integer operand
- bool IsInt() const { return kind_ == Kind::kInt; }
- /// @returns true if this is a string operand
- bool IsString() const { return kind_ == Kind::kString; }
+using Operand = std::variant<uint32_t, float, std::string>;
- /// @returns the number of uint32_t's needed for this operand
- uint32_t length() const;
+// Helper for returning an uint32_t Operand with the provided integer value.
+template <typename T>
+inline Operand U32Operand(T val) {
+ return Operand{static_cast<uint32_t>(val)};
+}
- /// @returns the float value
- float to_f() const { return float_val_; }
- /// @returns the int value
- uint32_t to_i() const { return int_val_; }
- /// @returns the string value
- const std::string& to_s() const { return str_val_; }
-
- private:
- Kind kind_ = Kind::kInt;
- float float_val_ = 0.0;
- uint32_t int_val_ = 0;
- std::string str_val_;
-};
+/// @returns the number of uint32_t's needed for this operand
+uint32_t OperandLength(const Operand& o);
/// A list of operands
using OperandList = std::vector<Operand>;
+using OperandListKey = utils::UnorderedKeyWrapper<OperandList>;
+
} // namespace tint::writer::spirv
+namespace std {
+
+/// Custom std::hash specialization for tint::writer::spirv::Operand
+template <>
+class hash<tint::writer::spirv::Operand> {
+ public:
+ /// @param o the Operand
+ /// @return the hash value
+ inline std::size_t operator()(const tint::writer::spirv::Operand& o) const {
+ return std::visit([](auto v) { return tint::utils::Hash(v); }, o);
+ }
+};
+
+} // namespace std
#endif // SRC_TINT_WRITER_SPIRV_OPERAND_H_
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/operand_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/operand_test.cc
index 9a688e90561..ed284064b3d 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/operand_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/operand_test.cc
@@ -22,41 +22,41 @@ namespace {
using OperandTest = testing::Test;
TEST_F(OperandTest, CreateFloat) {
- auto o = Operand::Float(1.2f);
- EXPECT_TRUE(o.IsFloat());
- EXPECT_FLOAT_EQ(o.to_f(), 1.2f);
+ auto o = Operand(1.2f);
+ ASSERT_TRUE(std::holds_alternative<float>(o));
+ EXPECT_FLOAT_EQ(std::get<float>(o), 1.2f);
}
TEST_F(OperandTest, CreateInt) {
- auto o = Operand::Int(1);
- EXPECT_TRUE(o.IsInt());
- EXPECT_EQ(o.to_i(), 1u);
+ auto o = Operand(1u);
+ ASSERT_TRUE(std::holds_alternative<uint32_t>(o));
+ EXPECT_EQ(std::get<uint32_t>(o), 1u);
}
TEST_F(OperandTest, CreateString) {
- auto o = Operand::String("my string");
- EXPECT_TRUE(o.IsString());
- EXPECT_EQ(o.to_s(), "my string");
+ auto o = Operand("my string");
+ ASSERT_TRUE(std::holds_alternative<std::string>(o));
+ EXPECT_EQ(std::get<std::string>(o), "my string");
}
TEST_F(OperandTest, Length_Float) {
- auto o = Operand::Float(1.2f);
- EXPECT_EQ(o.length(), 1u);
+ auto o = Operand(1.2f);
+ EXPECT_EQ(OperandLength(o), 1u);
}
TEST_F(OperandTest, Length_Int) {
- auto o = Operand::Int(1);
- EXPECT_EQ(o.length(), 1u);
+ auto o = U32Operand(1);
+ EXPECT_EQ(OperandLength(o), 1u);
}
TEST_F(OperandTest, Length_String) {
- auto o = Operand::String("my string");
- EXPECT_EQ(o.length(), 3u);
+ auto o = Operand("my string");
+ EXPECT_EQ(OperandLength(o), 3u);
}
TEST_F(OperandTest, Length_String_Empty) {
- auto o = Operand::String("");
- EXPECT_EQ(o.length(), 1u);
+ auto o = Operand("");
+ EXPECT_EQ(OperandLength(o), 1u);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant.h b/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant.h
index abb1a0dcf62..14bcefbc174 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant.h
@@ -31,97 +31,95 @@ namespace tint::writer::spirv {
/// ScalarConstant represents a scalar constant value
struct ScalarConstant {
- /// The constant value
- union Value {
- /// The value as a bool
- bool b;
- /// The value as a uint32_t
- uint32_t u32;
- /// The value as a int32_t
- int32_t i32;
- /// The value as a float
- float f32;
-
- /// The value that is wide enough to encompass all other types (including
- /// future 64-bit data types).
- uint64_t u64;
- };
-
- /// The kind of constant
- enum class Kind { kBool, kU32, kI32, kF32 };
-
- /// Constructor
- inline ScalarConstant() { value.u64 = 0; }
-
- /// @param value the value of the constant
- /// @returns a new ScalarConstant with the provided value and kind Kind::kU32
- static inline ScalarConstant U32(uint32_t value) {
- ScalarConstant c;
- c.value.u32 = value;
- c.kind = Kind::kU32;
- return c;
- }
-
- /// @param value the value of the constant
- /// @returns a new ScalarConstant with the provided value and kind Kind::kI32
- static inline ScalarConstant I32(int32_t value) {
- ScalarConstant c;
- c.value.i32 = value;
- c.kind = Kind::kI32;
- return c;
- }
-
- /// @param value the value of the constant
- /// @returns a new ScalarConstant with the provided value and kind Kind::kI32
- static inline ScalarConstant F32(float value) {
- ScalarConstant c;
- c.value.f32 = value;
- c.kind = Kind::kF32;
- return c;
- }
-
- /// @param value the value of the constant
- /// @returns a new ScalarConstant with the provided value and kind Kind::kBool
- static inline ScalarConstant Bool(bool value) {
- ScalarConstant c;
- c.value.b = value;
- c.kind = Kind::kBool;
- return c;
- }
-
- /// Equality operator
- /// @param rhs the ScalarConstant to compare against
- /// @returns true if this ScalarConstant is equal to `rhs`
- inline bool operator==(const ScalarConstant& rhs) const {
- return value.u64 == rhs.value.u64 && kind == rhs.kind &&
- is_spec_op == rhs.is_spec_op && constant_id == rhs.constant_id;
- }
-
- /// Inequality operator
- /// @param rhs the ScalarConstant to compare against
- /// @returns true if this ScalarConstant is not equal to `rhs`
- inline bool operator!=(const ScalarConstant& rhs) const {
- return !(*this == rhs);
- }
-
- /// @returns this ScalarConstant as a specialization op with the given
- /// specialization constant identifier
- /// @param id the constant identifier
- ScalarConstant AsSpecOp(uint32_t id) const {
- auto ret = *this;
- ret.is_spec_op = true;
- ret.constant_id = id;
- return ret;
- }
-
- /// The constant value
- Value value;
- /// The constant value kind
- Kind kind = Kind::kBool;
- /// True if the constant is a specialization op
- bool is_spec_op = false;
- /// The identifier if a specialization op
- uint32_t constant_id = 0;
+ /// The constant value
+ union Value {
+ /// The value as a bool
+ bool b;
+ /// The value as a uint32_t
+ uint32_t u32;
+ /// The value as a int32_t
+ int32_t i32;
+ /// The value as a float
+ float f32;
+
+ /// The value that is wide enough to encompass all other types (including
+ /// future 64-bit data types).
+ uint64_t u64;
+ };
+
+ /// The kind of constant
+ enum class Kind { kBool, kU32, kI32, kF32 };
+
+ /// Constructor
+ inline ScalarConstant() { value.u64 = 0; }
+
+ /// @param value the value of the constant
+ /// @returns a new ScalarConstant with the provided value and kind Kind::kU32
+ static inline ScalarConstant U32(uint32_t value) {
+ ScalarConstant c;
+ c.value.u32 = value;
+ c.kind = Kind::kU32;
+ return c;
+ }
+
+ /// @param value the value of the constant
+ /// @returns a new ScalarConstant with the provided value and kind Kind::kI32
+ static inline ScalarConstant I32(int32_t value) {
+ ScalarConstant c;
+ c.value.i32 = value;
+ c.kind = Kind::kI32;
+ return c;
+ }
+
+ /// @param value the value of the constant
+ /// @returns a new ScalarConstant with the provided value and kind Kind::kI32
+ static inline ScalarConstant F32(float value) {
+ ScalarConstant c;
+ c.value.f32 = value;
+ c.kind = Kind::kF32;
+ return c;
+ }
+
+ /// @param value the value of the constant
+ /// @returns a new ScalarConstant with the provided value and kind Kind::kBool
+ static inline ScalarConstant Bool(bool value) {
+ ScalarConstant c;
+ c.value.b = value;
+ c.kind = Kind::kBool;
+ return c;
+ }
+
+ /// Equality operator
+ /// @param rhs the ScalarConstant to compare against
+ /// @returns true if this ScalarConstant is equal to `rhs`
+ inline bool operator==(const ScalarConstant& rhs) const {
+ return value.u64 == rhs.value.u64 && kind == rhs.kind && is_spec_op == rhs.is_spec_op &&
+ constant_id == rhs.constant_id;
+ }
+
+ /// Inequality operator
+ /// @param rhs the ScalarConstant to compare against
+ /// @returns true if this ScalarConstant is not equal to `rhs`
+ inline bool operator!=(const ScalarConstant& rhs) const { return !(*this == rhs); }
+
+ /// @returns this ScalarConstant as a specialization op with the given
+ /// specialization constant identifier
+ /// @param id the constant identifier
+ ScalarConstant AsSpecOp(uint32_t id) const {
+ auto ret = *this;
+ ret.is_spec_op = true;
+ ret.constant_id = id;
+ return ret;
+ }
+
+ /// The constant value
+ Value value;
+ /// The constant value kind
+ Kind kind = Kind::kBool;
+ /// True if the constant is a specialization op
+ bool is_spec_op = false;
+ /// The identifier if a specialization op
+ uint32_t constant_id = 0;
};
} // namespace tint::writer::spirv
@@ -132,15 +130,14 @@ namespace std {
/// keys for std::unordered_map and std::unordered_set.
template <>
class hash<tint::writer::spirv::ScalarConstant> {
- public:
- /// @param c the ScalarConstant
- /// @return the Symbol internal value
- inline std::size_t operator()(
- const tint::writer::spirv::ScalarConstant& c) const {
- uint32_t value = 0;
- std::memcpy(&value, &c.value, sizeof(value));
- return tint::utils::Hash(value, c.kind);
- }
+ public:
+ /// @param c the ScalarConstant
+ /// @return the Symbol internal value
+ inline std::size_t operator()(const tint::writer::spirv::ScalarConstant& c) const {
+ uint32_t value = 0;
+ std::memcpy(&value, &c.value, sizeof(value));
+ return tint::utils::Hash(value, c.kind);
+ }
};
} // namespace std
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant_test.cc b/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant_test.cc
index 253f05b86fa..196e600e148 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/scalar_constant_test.cc
@@ -21,35 +21,35 @@ namespace {
using SpirvScalarConstantTest = TestHelper;
TEST_F(SpirvScalarConstantTest, Equality) {
- ScalarConstant a{};
- ScalarConstant b{};
- EXPECT_EQ(a, b);
-
- a.kind = ScalarConstant::Kind::kU32;
- EXPECT_NE(a, b);
- b.kind = ScalarConstant::Kind::kU32;
- EXPECT_EQ(a, b);
-
- a.value.b = true;
- EXPECT_NE(a, b);
- b.value.b = true;
- EXPECT_EQ(a, b);
-
- a.is_spec_op = true;
- EXPECT_NE(a, b);
- b.is_spec_op = true;
- EXPECT_EQ(a, b);
-
- a.constant_id = 3;
- EXPECT_NE(a, b);
- b.constant_id = 3;
- EXPECT_EQ(a, b);
+ ScalarConstant a{};
+ ScalarConstant b{};
+ EXPECT_EQ(a, b);
+
+ a.kind = ScalarConstant::Kind::kU32;
+ EXPECT_NE(a, b);
+ b.kind = ScalarConstant::Kind::kU32;
+ EXPECT_EQ(a, b);
+
+ a.value.b = true;
+ EXPECT_NE(a, b);
+ b.value.b = true;
+ EXPECT_EQ(a, b);
+
+ a.is_spec_op = true;
+ EXPECT_NE(a, b);
+ b.is_spec_op = true;
+ EXPECT_EQ(a, b);
+
+ a.constant_id = 3;
+ EXPECT_NE(a, b);
+ b.constant_id = 3;
+ EXPECT_EQ(a, b);
}
TEST_F(SpirvScalarConstantTest, U32) {
- auto c = ScalarConstant::U32(123);
- EXPECT_EQ(c.value.u32, 123u);
- EXPECT_EQ(c.kind, ScalarConstant::Kind::kU32);
+ auto c = ScalarConstant::U32(123);
+ EXPECT_EQ(c.value.u32, 123u);
+ EXPECT_EQ(c.kind, ScalarConstant::Kind::kU32);
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/spv_dump.cc b/chromium/third_party/dawn/src/tint/writer/spirv/spv_dump.cc
index 21dc1a54ab5..0f95efea93c 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/spv_dump.cc
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/spv_dump.cc
@@ -21,65 +21,64 @@ namespace tint::writer::spirv {
namespace {
std::string Disassemble(const std::vector<uint32_t>& data) {
- std::string spv_errors;
- spv_target_env target_env = SPV_ENV_UNIVERSAL_1_0;
+ std::string spv_errors;
+ spv_target_env target_env = SPV_ENV_UNIVERSAL_1_0;
- auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
- const spv_position_t& position,
- const char* message) {
- switch (level) {
- case SPV_MSG_FATAL:
- case SPV_MSG_INTERNAL_ERROR:
- case SPV_MSG_ERROR:
- spv_errors += "error: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_WARNING:
- spv_errors += "warning: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_INFO:
- spv_errors += "info: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_DEBUG:
- break;
- }
- };
+ auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
+ const spv_position_t& position, const char* message) {
+ switch (level) {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ spv_errors +=
+ "error: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_WARNING:
+ spv_errors +=
+ "warning: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_INFO:
+ spv_errors +=
+ "info: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_DEBUG:
+ break;
+ }
+ };
- spvtools::SpirvTools tools(target_env);
- tools.SetMessageConsumer(msg_consumer);
+ spvtools::SpirvTools tools(target_env);
+ tools.SetMessageConsumer(msg_consumer);
- std::string result;
- if (!tools.Disassemble(data, &result, SPV_BINARY_TO_TEXT_OPTION_NO_HEADER)) {
- return "*** Invalid SPIR-V ***\n" + spv_errors;
- }
- return result;
+ std::string result;
+ if (!tools.Disassemble(data, &result, SPV_BINARY_TO_TEXT_OPTION_NO_HEADER)) {
+ return "*** Invalid SPIR-V ***\n" + spv_errors;
+ }
+ return result;
}
} // namespace
std::string DumpBuilder(Builder& builder) {
- BinaryWriter writer;
- writer.WriteHeader(builder.id_bound());
- writer.WriteBuilder(&builder);
- return Disassemble(writer.result());
+ BinaryWriter writer;
+ writer.WriteHeader(builder.id_bound());
+ writer.WriteBuilder(&builder);
+ return Disassemble(writer.result());
}
std::string DumpInstruction(const Instruction& inst) {
- BinaryWriter writer;
- writer.WriteHeader(kDefaultMaxIdBound);
- writer.WriteInstruction(inst);
- return Disassemble(writer.result());
+ BinaryWriter writer;
+ writer.WriteHeader(kDefaultMaxIdBound);
+ writer.WriteInstruction(inst);
+ return Disassemble(writer.result());
}
std::string DumpInstructions(const InstructionList& insts) {
- BinaryWriter writer;
- writer.WriteHeader(kDefaultMaxIdBound);
- for (const auto& inst : insts) {
- writer.WriteInstruction(inst);
- }
- return Disassemble(writer.result());
+ BinaryWriter writer;
+ writer.WriteHeader(kDefaultMaxIdBound);
+ for (const auto& inst : insts) {
+ writer.WriteInstruction(inst);
+ }
+ return Disassemble(writer.result());
}
} // namespace tint::writer::spirv
diff --git a/chromium/third_party/dawn/src/tint/writer/spirv/test_helper.h b/chromium/third_party/dawn/src/tint/writer/spirv/test_helper.h
index c8052b22b34..2e587602b8e 100644
--- a/chromium/third_party/dawn/src/tint/writer/spirv/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/writer/spirv/test_helper.h
@@ -29,103 +29,100 @@ namespace tint::writer::spirv {
/// Helper class for testing
template <typename BASE>
class TestHelperBase : public ProgramBuilder, public BASE {
- public:
- TestHelperBase() = default;
- ~TestHelperBase() override = default;
+ public:
+ TestHelperBase() = default;
+ ~TestHelperBase() override = default;
- /// Builds and returns a spirv::Builder from the program.
- /// @note The spirv::Builder is only built once. Multiple calls to Build()
- /// will return the same spirv::Builder without rebuilding.
- /// @return the built spirv::Builder
- spirv::Builder& Build() {
- if (spirv_builder) {
- return *spirv_builder;
+ /// Builds and returns a spirv::Builder from the program.
+ /// @note The spirv::Builder is only built once. Multiple calls to Build()
+ /// will return the same spirv::Builder without rebuilding.
+ /// @return the built spirv::Builder
+ spirv::Builder& Build() {
+ if (spirv_builder) {
+ return *spirv_builder;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
+ spirv_builder = std::make_unique<spirv::Builder>(program.get());
+ return *spirv_builder;
}
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
- spirv_builder = std::make_unique<spirv::Builder>(program.get());
- return *spirv_builder;
- }
- /// Builds the program, runs the program through the transform::Spirv
- /// sanitizer and returns a spirv::Builder from the sanitized program.
- /// @param options The SPIR-V generator options.
- /// @note The spirv::Builder is only built once. Multiple calls to Build()
- /// will return the same spirv::Builder without rebuilding.
- /// @return the built spirv::Builder
- spirv::Builder& SanitizeAndBuild(const Options& options = {}) {
- if (spirv_builder) {
- return *spirv_builder;
+ /// Builds the program, runs the program through the transform::Spirv
+ /// sanitizer and returns a spirv::Builder from the sanitized program.
+ /// @param options The SPIR-V generator options.
+ /// @note The spirv::Builder is only built once. Multiple calls to Build()
+ /// will return the same spirv::Builder without rebuilding.
+ /// @return the built spirv::Builder
+ spirv::Builder& SanitizeAndBuild(const Options& options = {}) {
+ if (spirv_builder) {
+ return *spirv_builder;
+ }
+ [&]() {
+ ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
+ << diag::Formatter().format(Diagnostics());
+ }();
+ program = std::make_unique<Program>(std::move(*this));
+ [&]() {
+ ASSERT_TRUE(program->IsValid()) << diag::Formatter().format(program->Diagnostics());
+ }();
+ auto result = Sanitize(program.get(), options);
+ [&]() {
+ ASSERT_TRUE(result.program.IsValid())
+ << diag::Formatter().format(result.program.Diagnostics());
+ }();
+ *program = std::move(result.program);
+ spirv_builder = std::make_unique<spirv::Builder>(program.get());
+ return *spirv_builder;
}
- [&]() {
- ASSERT_TRUE(IsValid()) << "Builder program is not valid\n"
- << diag::Formatter().format(Diagnostics());
- }();
- program = std::make_unique<Program>(std::move(*this));
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << diag::Formatter().format(program->Diagnostics());
- }();
- auto result = Sanitize(program.get(), options);
- [&]() {
- ASSERT_TRUE(result.program.IsValid())
- << diag::Formatter().format(result.program.Diagnostics());
- }();
- *program = std::move(result.program);
- spirv_builder = std::make_unique<spirv::Builder>(program.get());
- return *spirv_builder;
- }
- /// Validate passes the generated SPIR-V of the builder `b` to the SPIR-V
- /// Tools Validator. If the validator finds problems the test will fail.
- /// @param b the spirv::Builder containing the built SPIR-V module
- void Validate(spirv::Builder& b) {
- BinaryWriter writer;
- writer.WriteHeader(b.id_bound());
- writer.WriteBuilder(&b);
- auto binary = writer.result();
+ /// Validate passes the generated SPIR-V of the builder `b` to the SPIR-V
+ /// Tools Validator. If the validator finds problems the test will fail.
+ /// @param b the spirv::Builder containing the built SPIR-V module
+ void Validate(spirv::Builder& b) {
+ BinaryWriter writer;
+ writer.WriteHeader(b.id_bound());
+ writer.WriteBuilder(&b);
+ auto binary = writer.result();
- std::string spv_errors;
- auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
- const spv_position_t& position,
- const char* message) {
- switch (level) {
- case SPV_MSG_FATAL:
- case SPV_MSG_INTERNAL_ERROR:
- case SPV_MSG_ERROR:
- spv_errors += "error: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_WARNING:
- spv_errors += "warning: line " + std::to_string(position.index) +
- ": " + message + "\n";
- break;
- case SPV_MSG_INFO:
- spv_errors += "info: line " + std::to_string(position.index) + ": " +
- message + "\n";
- break;
- case SPV_MSG_DEBUG:
- break;
- }
- };
+ std::string spv_errors;
+ auto msg_consumer = [&spv_errors](spv_message_level_t level, const char*,
+ const spv_position_t& position, const char* message) {
+ switch (level) {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ spv_errors +=
+ "error: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_WARNING:
+ spv_errors +=
+ "warning: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_INFO:
+ spv_errors +=
+ "info: line " + std::to_string(position.index) + ": " + message + "\n";
+ break;
+ case SPV_MSG_DEBUG:
+ break;
+ }
+ };
- spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_2);
- tools.SetMessageConsumer(msg_consumer);
- ASSERT_TRUE(tools.Validate(binary)) << spv_errors;
- }
+ spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_2);
+ tools.SetMessageConsumer(msg_consumer);
+ ASSERT_TRUE(tools.Validate(binary)) << spv_errors;
+ }
- /// The program built with a call to Build()
- std::unique_ptr<Program> program;
+ /// The program built with a call to Build()
+ std::unique_ptr<Program> program;
- private:
- std::unique_ptr<spirv::Builder> spirv_builder;
+ private:
+ std::unique_ptr<spirv::Builder> spirv_builder;
};
using TestHelper = TestHelperBase<testing::Test>;
diff --git a/chromium/third_party/dawn/src/tint/writer/text.h b/chromium/third_party/dawn/src/tint/writer/text.h
index 71af064b28f..2bbf1c29aa3 100644
--- a/chromium/third_party/dawn/src/tint/writer/text.h
+++ b/chromium/third_party/dawn/src/tint/writer/text.h
@@ -23,11 +23,11 @@ namespace tint::writer {
/// Class to generate text source
class Text : public Writer {
- public:
- ~Text() override;
+ public:
+ ~Text() override;
- /// @returns the result data
- virtual std::string result() const = 0;
+ /// @returns the result data
+ virtual std::string result() const = 0;
};
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/text_generator.cc b/chromium/third_party/dawn/src/tint/writer/text_generator.cc
index 8623fd5dd90..5e4e93f801b 100644
--- a/chromium/third_party/dawn/src/tint/writer/text_generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/text_generator.cc
@@ -27,127 +27,118 @@ TextGenerator::TextGenerator(const Program* program)
TextGenerator::~TextGenerator() = default;
std::string TextGenerator::UniqueIdentifier(const std::string& prefix) {
- return builder_.Symbols().NameFor(builder_.Symbols().New(prefix));
+ return builder_.Symbols().NameFor(builder_.Symbols().New(prefix));
}
std::string TextGenerator::StructName(const sem::Struct* s) {
- auto name = builder_.Symbols().NameFor(s->Name());
- if (name.size() > 1 && name[0] == '_' && name[1] == '_') {
- name = utils::GetOrCreate(builtin_struct_names_, s,
- [&] { return UniqueIdentifier(name.substr(2)); });
- }
- return name;
-}
-
-std::string TextGenerator::TrimSuffix(std::string str,
- const std::string& suffix) {
- if (str.size() >= suffix.size()) {
- if (str.substr(str.size() - suffix.size(), suffix.size()) == suffix) {
- return str.substr(0, str.size() - suffix.size());
+ auto name = builder_.Symbols().NameFor(s->Name());
+ if (name.size() > 1 && name[0] == '_' && name[1] == '_') {
+ name = utils::GetOrCreate(builtin_struct_names_, s,
+ [&] { return UniqueIdentifier(name.substr(2)); });
}
- }
- return str;
+ return name;
+}
+
+std::string TextGenerator::TrimSuffix(std::string str, const std::string& suffix) {
+ if (str.size() >= suffix.size()) {
+ if (str.substr(str.size() - suffix.size(), suffix.size()) == suffix) {
+ return str.substr(0, str.size() - suffix.size());
+ }
+ }
+ return str;
}
TextGenerator::LineWriter::LineWriter(TextBuffer* buf) : buffer(buf) {}
TextGenerator::LineWriter::LineWriter(LineWriter&& other) {
- buffer = other.buffer;
- other.buffer = nullptr;
+ buffer = other.buffer;
+ other.buffer = nullptr;
}
TextGenerator::LineWriter::~LineWriter() {
- if (buffer) {
- buffer->Append(os.str());
- }
+ if (buffer) {
+ buffer->Append(os.str());
+ }
}
TextGenerator::TextBuffer::TextBuffer() = default;
TextGenerator::TextBuffer::~TextBuffer() = default;
void TextGenerator::TextBuffer::IncrementIndent() {
- current_indent += 2;
+ current_indent += 2;
}
void TextGenerator::TextBuffer::DecrementIndent() {
- current_indent = std::max(2u, current_indent) - 2u;
+ current_indent = std::max(2u, current_indent) - 2u;
}
void TextGenerator::TextBuffer::Append(const std::string& line) {
- lines.emplace_back(Line{current_indent, line});
+ lines.emplace_back(Line{current_indent, line});
}
-void TextGenerator::TextBuffer::Insert(const std::string& line,
- size_t before,
- uint32_t indent) {
- if (before >= lines.size()) {
- diag::List d;
- TINT_ICE(Writer, d)
- << "TextBuffer::Insert() called with before >= lines.size()\n"
- << " before:" << before << "\n"
- << " lines.size(): " << lines.size();
- return;
- }
- lines.insert(lines.begin() + before, Line{indent, line});
+void TextGenerator::TextBuffer::Insert(const std::string& line, size_t before, uint32_t indent) {
+ if (before >= lines.size()) {
+ diag::List d;
+ TINT_ICE(Writer, d) << "TextBuffer::Insert() called with before >= lines.size()\n"
+ << " before:" << before << "\n"
+ << " lines.size(): " << lines.size();
+ return;
+ }
+ lines.insert(lines.begin() + before, Line{indent, line});
}
void TextGenerator::TextBuffer::Append(const TextBuffer& tb) {
- for (auto& line : tb.lines) {
- // TODO(bclayton): inefficent, consider optimizing
- lines.emplace_back(Line{current_indent + line.indent, line.content});
- }
-}
-
-void TextGenerator::TextBuffer::Insert(const TextBuffer& tb,
- size_t before,
- uint32_t indent) {
- if (before >= lines.size()) {
- diag::List d;
- TINT_ICE(Writer, d)
- << "TextBuffer::Insert() called with before >= lines.size()\n"
- << " before:" << before << "\n"
- << " lines.size(): " << lines.size();
- return;
- }
- size_t idx = 0;
- for (auto& line : tb.lines) {
- // TODO(bclayton): inefficent, consider optimizing
- lines.insert(lines.begin() + before + idx,
- Line{indent + line.indent, line.content});
- idx++;
- }
+ for (auto& line : tb.lines) {
+ // TODO(bclayton): inefficent, consider optimizing
+ lines.emplace_back(Line{current_indent + line.indent, line.content});
+ }
+}
+
+void TextGenerator::TextBuffer::Insert(const TextBuffer& tb, size_t before, uint32_t indent) {
+ if (before >= lines.size()) {
+ diag::List d;
+ TINT_ICE(Writer, d) << "TextBuffer::Insert() called with before >= lines.size()\n"
+ << " before:" << before << "\n"
+ << " lines.size(): " << lines.size();
+ return;
+ }
+ size_t idx = 0;
+ for (auto& line : tb.lines) {
+ // TODO(bclayton): inefficent, consider optimizing
+ lines.insert(lines.begin() + before + idx, Line{indent + line.indent, line.content});
+ idx++;
+ }
}
std::string TextGenerator::TextBuffer::String(uint32_t indent /* = 0 */) const {
- std::stringstream ss;
- for (auto& line : lines) {
- if (!line.content.empty()) {
- for (uint32_t i = 0; i < indent + line.indent; i++) {
- ss << " ";
- }
- ss << line.content;
+ std::stringstream ss;
+ for (auto& line : lines) {
+ if (!line.content.empty()) {
+ for (uint32_t i = 0; i < indent + line.indent; i++) {
+ ss << " ";
+ }
+ ss << line.content;
+ }
+ ss << std::endl;
}
- ss << std::endl;
- }
- return ss.str();
+ return ss.str();
}
TextGenerator::ScopedParen::ScopedParen(std::ostream& stream) : s(stream) {
- s << "(";
+ s << "(";
}
TextGenerator::ScopedParen::~ScopedParen() {
- s << ")";
+ s << ")";
}
TextGenerator::ScopedIndent::ScopedIndent(TextGenerator* generator)
: ScopedIndent(generator->current_buffer_) {}
-TextGenerator::ScopedIndent::ScopedIndent(TextBuffer* buffer)
- : buffer_(buffer) {
- buffer_->IncrementIndent();
+TextGenerator::ScopedIndent::ScopedIndent(TextBuffer* buffer) : buffer_(buffer) {
+ buffer_->IncrementIndent();
}
TextGenerator::ScopedIndent::~ScopedIndent() {
- buffer_->DecrementIndent();
+ buffer_->DecrementIndent();
}
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/text_generator.h b/chromium/third_party/dawn/src/tint/writer/text_generator.h
index 67501abda2a..2f4b5789b5d 100644
--- a/chromium/third_party/dawn/src/tint/writer/text_generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/text_generator.h
@@ -28,211 +28,207 @@ namespace tint::writer {
/// Helper methods for generators which are creating text output
class TextGenerator {
- public:
- /// Line holds a single line of text
- struct Line {
- /// The indentation of the line in blankspace
- uint32_t indent = 0;
- /// The content of the line, without a trailing newline character
- std::string content;
- };
-
- /// TextBuffer holds a list of lines of text.
- struct TextBuffer {
- // Constructor
- TextBuffer();
-
- // Destructor
- ~TextBuffer();
-
- /// IncrementIndent increases the indentation of lines that will be written
- /// to the TextBuffer
- void IncrementIndent();
-
- /// DecrementIndent decreases the indentation of lines that will be written
- /// to the TextBuffer
- void DecrementIndent();
-
- /// Appends the line to the end of the TextBuffer
- /// @param line the line to append to the TextBuffer
- void Append(const std::string& line);
-
- /// Inserts the line to the TextBuffer before the line with index `before`
- /// @param line the line to append to the TextBuffer
- /// @param before the zero-based index of the line to insert the text before
- /// @param indent the indentation to apply to the inserted lines
- void Insert(const std::string& line, size_t before, uint32_t indent);
-
- /// Appends the lines of `tb` to the end of this TextBuffer
- /// @param tb the TextBuffer to append to the end of this TextBuffer
- void Append(const TextBuffer& tb);
-
- /// Inserts the lines of `tb` to the TextBuffer before the line with index
- /// `before`
- /// @param tb the TextBuffer to insert into this TextBuffer
- /// @param before the zero-based index of the line to insert the text before
- /// @param indent the indentation to apply to the inserted lines
- void Insert(const TextBuffer& tb, size_t before, uint32_t indent);
-
- /// @returns the buffer's content as a single string
- /// @param indent additional indentation to apply to each line
- std::string String(uint32_t indent = 0) const;
-
- /// The current indentation of the TextBuffer. Lines appended to the
- /// TextBuffer will use this indentation.
- uint32_t current_indent = 0;
-
- /// The lines
- std::vector<Line> lines;
- };
-
- /// Constructor
- /// @param program the program used by the generator
- explicit TextGenerator(const Program* program);
- ~TextGenerator();
-
- /// Increment the emitter indent level
- void increment_indent() { current_buffer_->IncrementIndent(); }
- /// Decrement the emitter indent level
- void decrement_indent() { current_buffer_->DecrementIndent(); }
-
- /// @returns the result data
- std::string result() const { return main_buffer_.String(); }
-
- /// @returns the list of diagnostics raised by the generator.
- const diag::List& Diagnostics() const { return diagnostics_; }
-
- /// @returns the error
- std::string error() const { return diagnostics_.str(); }
-
- /// @return a new, unique identifier with the given prefix.
- /// @param prefix optional prefix to apply to the generated identifier. If
- /// empty "tint_symbol" will be used.
- std::string UniqueIdentifier(const std::string& prefix = "");
-
- /// @param s the semantic structure
- /// @returns the name of the structure, taking special care of builtin
- /// structures that start with double underscores. If the structure is a
- /// builtin, then the returned name will be a unique name without the leading
- /// underscores.
- std::string StructName(const sem::Struct* s);
-
- /// @param str the string
- /// @param suffix the suffix to remove
- /// @return returns str without the provided trailing suffix string. If str
- /// doesn't end with suffix, str is returned unchanged.
- std::string TrimSuffix(std::string str, const std::string& suffix);
-
- protected:
- /// LineWriter is a helper that acts as a string buffer, who's content is
- /// emitted to the TextBuffer as a single line on destruction.
- struct LineWriter {
- public:
+ public:
+ /// Line holds a single line of text
+ struct Line {
+ /// The indentation of the line in blankspace
+ uint32_t indent = 0;
+ /// The content of the line, without a trailing newline character
+ std::string content;
+ };
+
+ /// TextBuffer holds a list of lines of text.
+ struct TextBuffer {
+ // Constructor
+ TextBuffer();
+
+ // Destructor
+ ~TextBuffer();
+
+ /// IncrementIndent increases the indentation of lines that will be written
+ /// to the TextBuffer
+ void IncrementIndent();
+
+ /// DecrementIndent decreases the indentation of lines that will be written
+ /// to the TextBuffer
+ void DecrementIndent();
+
+ /// Appends the line to the end of the TextBuffer
+ /// @param line the line to append to the TextBuffer
+ void Append(const std::string& line);
+
+ /// Inserts the line to the TextBuffer before the line with index `before`
+ /// @param line the line to append to the TextBuffer
+ /// @param before the zero-based index of the line to insert the text before
+ /// @param indent the indentation to apply to the inserted lines
+ void Insert(const std::string& line, size_t before, uint32_t indent);
+
+ /// Appends the lines of `tb` to the end of this TextBuffer
+ /// @param tb the TextBuffer to append to the end of this TextBuffer
+ void Append(const TextBuffer& tb);
+
+ /// Inserts the lines of `tb` to the TextBuffer before the line with index
+ /// `before`
+ /// @param tb the TextBuffer to insert into this TextBuffer
+ /// @param before the zero-based index of the line to insert the text before
+ /// @param indent the indentation to apply to the inserted lines
+ void Insert(const TextBuffer& tb, size_t before, uint32_t indent);
+
+ /// @returns the buffer's content as a single string
+ /// @param indent additional indentation to apply to each line
+ std::string String(uint32_t indent = 0) const;
+
+ /// The current indentation of the TextBuffer. Lines appended to the
+ /// TextBuffer will use this indentation.
+ uint32_t current_indent = 0;
+
+ /// The lines
+ std::vector<Line> lines;
+ };
+
/// Constructor
- /// @param buffer the TextBuffer that the LineWriter will append its
- /// content to on destruction, at the end of the buffer.
- explicit LineWriter(TextBuffer* buffer);
-
- /// Move constructor
- /// @param rhs the LineWriter to move
- LineWriter(LineWriter&& rhs);
- /// Destructor
- ~LineWriter();
-
- /// @returns the ostringstream
- operator std::ostream&() { return os; }
-
- /// @param rhs the value to write to the line
- /// @returns the ostream so calls can be chained
- template <typename T>
- std::ostream& operator<<(T&& rhs) {
- return os << std::forward<T>(rhs);
+ /// @param program the program used by the generator
+ explicit TextGenerator(const Program* program);
+ ~TextGenerator();
+
+ /// Increment the emitter indent level
+ void increment_indent() { current_buffer_->IncrementIndent(); }
+ /// Decrement the emitter indent level
+ void decrement_indent() { current_buffer_->DecrementIndent(); }
+
+ /// @returns the result data
+ std::string result() const { return main_buffer_.String(); }
+
+ /// @returns the list of diagnostics raised by the generator.
+ const diag::List& Diagnostics() const { return diagnostics_; }
+
+ /// @returns the error
+ std::string error() const { return diagnostics_.str(); }
+
+ /// @return a new, unique identifier with the given prefix.
+ /// @param prefix optional prefix to apply to the generated identifier. If
+ /// empty "tint_symbol" will be used.
+ std::string UniqueIdentifier(const std::string& prefix = "");
+
+ /// @param s the semantic structure
+ /// @returns the name of the structure, taking special care of builtin
+ /// structures that start with double underscores. If the structure is a
+ /// builtin, then the returned name will be a unique name without the leading
+ /// underscores.
+ std::string StructName(const sem::Struct* s);
+
+ /// @param str the string
+ /// @param suffix the suffix to remove
+ /// @return returns str without the provided trailing suffix string. If str
+ /// doesn't end with suffix, str is returned unchanged.
+ std::string TrimSuffix(std::string str, const std::string& suffix);
+
+ protected:
+ /// LineWriter is a helper that acts as a string buffer, who's content is
+ /// emitted to the TextBuffer as a single line on destruction.
+ struct LineWriter {
+ public:
+ /// Constructor
+ /// @param buffer the TextBuffer that the LineWriter will append its
+ /// content to on destruction, at the end of the buffer.
+ explicit LineWriter(TextBuffer* buffer);
+
+ /// Move constructor
+ /// @param rhs the LineWriter to move
+ LineWriter(LineWriter&& rhs);
+ /// Destructor
+ ~LineWriter();
+
+ /// @returns the ostringstream
+ operator std::ostream&() { return os; }
+
+ /// @param rhs the value to write to the line
+ /// @returns the ostream so calls can be chained
+ template <typename T>
+ std::ostream& operator<<(T&& rhs) {
+ return os << std::forward<T>(rhs);
+ }
+
+ private:
+ LineWriter(const LineWriter&) = delete;
+ LineWriter& operator=(const LineWriter&) = delete;
+
+ std::ostringstream os;
+ TextBuffer* buffer;
+ };
+
+ /// Helper for writing a '(' on construction and a ')' destruction.
+ struct ScopedParen {
+ /// Constructor
+ /// @param stream the std::ostream that will be written to
+ explicit ScopedParen(std::ostream& stream);
+ /// Destructor
+ ~ScopedParen();
+
+ private:
+ ScopedParen(ScopedParen&& rhs) = delete;
+ ScopedParen(const ScopedParen&) = delete;
+ ScopedParen& operator=(const ScopedParen&) = delete;
+ std::ostream& s;
+ };
+
+ /// Helper for incrementing indentation on construction and decrementing
+ /// indentation on destruction.
+ struct ScopedIndent {
+ /// Constructor
+ /// @param buffer the TextBuffer that the ScopedIndent will indent
+ explicit ScopedIndent(TextBuffer* buffer);
+ /// Constructor
+ /// @param generator ScopedIndent will indent the generator's
+ /// `current_buffer_`
+ explicit ScopedIndent(TextGenerator* generator);
+ /// Destructor
+ ~ScopedIndent();
+
+ private:
+ ScopedIndent(ScopedIndent&& rhs) = delete;
+ ScopedIndent(const ScopedIndent&) = delete;
+ ScopedIndent& operator=(const ScopedIndent&) = delete;
+ TextBuffer* buffer_;
+ };
+
+ /// @returns the resolved type of the ast::Expression `expr`
+ /// @param expr the expression
+ const sem::Type* TypeOf(const ast::Expression* expr) const { return builder_.TypeOf(expr); }
+
+ /// @returns the resolved type of the ast::Type `type`
+ /// @param type the type
+ const sem::Type* TypeOf(const ast::Type* type) const { return builder_.TypeOf(type); }
+
+ /// @returns the resolved type of the ast::TypeDecl `type_decl`
+ /// @param type_decl the type
+ const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const {
+ return builder_.TypeOf(type_decl);
}
- private:
- LineWriter(const LineWriter&) = delete;
- LineWriter& operator=(const LineWriter&) = delete;
-
- std::ostringstream os;
- TextBuffer* buffer;
- };
-
- /// Helper for writing a '(' on construction and a ')' destruction.
- struct ScopedParen {
- /// Constructor
- /// @param stream the std::ostream that will be written to
- explicit ScopedParen(std::ostream& stream);
- /// Destructor
- ~ScopedParen();
-
- private:
- ScopedParen(ScopedParen&& rhs) = delete;
- ScopedParen(const ScopedParen&) = delete;
- ScopedParen& operator=(const ScopedParen&) = delete;
- std::ostream& s;
- };
-
- /// Helper for incrementing indentation on construction and decrementing
- /// indentation on destruction.
- struct ScopedIndent {
- /// Constructor
- /// @param buffer the TextBuffer that the ScopedIndent will indent
- explicit ScopedIndent(TextBuffer* buffer);
- /// Constructor
- /// @param generator ScopedIndent will indent the generator's
- /// `current_buffer_`
- explicit ScopedIndent(TextGenerator* generator);
- /// Destructor
- ~ScopedIndent();
-
- private:
- ScopedIndent(ScopedIndent&& rhs) = delete;
- ScopedIndent(const ScopedIndent&) = delete;
- ScopedIndent& operator=(const ScopedIndent&) = delete;
- TextBuffer* buffer_;
- };
-
- /// @returns the resolved type of the ast::Expression `expr`
- /// @param expr the expression
- const sem::Type* TypeOf(const ast::Expression* expr) const {
- return builder_.TypeOf(expr);
- }
-
- /// @returns the resolved type of the ast::Type `type`
- /// @param type the type
- const sem::Type* TypeOf(const ast::Type* type) const {
- return builder_.TypeOf(type);
- }
-
- /// @returns the resolved type of the ast::TypeDecl `type_decl`
- /// @param type_decl the type
- const sem::Type* TypeOf(const ast::TypeDecl* type_decl) const {
- return builder_.TypeOf(type_decl);
- }
-
- /// @returns a new LineWriter, used for buffering and writing a line to
- /// the end of #current_buffer_.
- LineWriter line() { return LineWriter(current_buffer_); }
-
- /// @param buffer the TextBuffer to write the line to
- /// @returns a new LineWriter, used for buffering and writing a line to
- /// the end of `buffer`.
- static LineWriter line(TextBuffer* buffer) { return LineWriter(buffer); }
-
- /// The program
- Program const* const program_;
- /// A ProgramBuilder that thinly wraps program_
- ProgramBuilder builder_;
- /// Diagnostics generated by the generator
- diag::List diagnostics_;
- /// The buffer the TextGenerator is currently appending lines to
- TextBuffer* current_buffer_ = &main_buffer_;
-
- private:
- /// The primary text buffer that the generator will emit
- TextBuffer main_buffer_;
- /// Map of builtin structure to unique generated name
- std::unordered_map<const sem::Struct*, std::string> builtin_struct_names_;
+ /// @returns a new LineWriter, used for buffering and writing a line to
+ /// the end of #current_buffer_.
+ LineWriter line() { return LineWriter(current_buffer_); }
+
+ /// @param buffer the TextBuffer to write the line to
+ /// @returns a new LineWriter, used for buffering and writing a line to
+ /// the end of `buffer`.
+ static LineWriter line(TextBuffer* buffer) { return LineWriter(buffer); }
+
+ /// The program
+ Program const* const program_;
+ /// A ProgramBuilder that thinly wraps program_
+ ProgramBuilder builder_;
+ /// Diagnostics generated by the generator
+ diag::List diagnostics_;
+ /// The buffer the TextGenerator is currently appending lines to
+ TextBuffer* current_buffer_ = &main_buffer_;
+
+ private:
+ /// The primary text buffer that the generator will emit
+ TextBuffer main_buffer_;
+ /// Map of builtin structure to unique generated name
+ std::unordered_map<const sem::Struct*, std::string> builtin_struct_names_;
};
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/src/tint/writer/text_generator_test.cc b/chromium/third_party/dawn/src/tint/writer/text_generator_test.cc
index fdd74d6c004..1bfa41fc2b8 100644
--- a/chromium/third_party/dawn/src/tint/writer/text_generator_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/text_generator_test.cc
@@ -20,26 +20,26 @@ namespace tint::writer {
namespace {
TEST(TextGeneratorTest, UniqueIdentifier) {
- Program program(ProgramBuilder{});
+ Program program(ProgramBuilder{});
- TextGenerator gen(&program);
+ TextGenerator gen(&program);
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident");
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_1");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_1");
}
TEST(TextGeneratorTest, UniqueIdentifier_ConflictWithExisting) {
- ProgramBuilder builder;
- builder.Symbols().Register("ident_1");
- builder.Symbols().Register("ident_2");
- Program program(std::move(builder));
+ ProgramBuilder builder;
+ builder.Symbols().Register("ident_1");
+ builder.Symbols().Register("ident_2");
+ Program program(std::move(builder));
- TextGenerator gen(&program);
+ TextGenerator gen(&program);
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident");
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_3");
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_4");
- ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_5");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_3");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_4");
+ ASSERT_EQ(gen.UniqueIdentifier("ident"), "ident_5");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator.cc
index 4d007251431..623f3a68b8f 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator.cc
@@ -22,15 +22,15 @@ Result::~Result() = default;
Result::Result(const Result&) = default;
Result Generate(const Program* program, const Options&) {
- Result result;
+ Result result;
- // Generate the WGSL code.
- auto impl = std::make_unique<GeneratorImpl>(program);
- result.success = impl->Generate();
- result.error = impl->error();
- result.wgsl = impl->result();
+ // Generate the WGSL code.
+ auto impl = std::make_unique<GeneratorImpl>(program);
+ result.success = impl->Generate();
+ result.error = impl->error();
+ result.wgsl = impl->result();
- return result;
+ return result;
}
} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator.h b/chromium/third_party/dawn/src/tint/writer/wgsl/generator.h
index 62fe2e1b756..5f249c7ddda 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator.h
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator.h
@@ -34,23 +34,23 @@ struct Options {};
/// The result produced when generating WGSL.
struct Result {
- /// Constructor
- Result();
+ /// Constructor
+ Result();
- /// Destructor
- ~Result();
+ /// Destructor
+ ~Result();
- /// Copy constructor
- Result(const Result&);
+ /// Copy constructor
+ Result(const Result&);
- /// True if generation was successful.
- bool success = false;
+ /// True if generation was successful.
+ bool success = false;
- /// The errors generated during code generation, if any.
- std::string error;
+ /// The errors generated during code generation, if any.
+ std::string error;
- /// The generated WGSL.
- std::string wgsl = "";
+ /// The generated WGSL.
+ std::string wgsl = "";
};
/// Generate WGSL for a program, according to a set of configuration options.
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_bench.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_bench.cc
index cb8cb68d022..a9efacc0295 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_bench.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_bench.cc
@@ -20,18 +20,18 @@ namespace tint::writer::wgsl {
namespace {
void GenerateWGSL(benchmark::State& state, std::string input_name) {
- auto res = bench::LoadProgram(input_name);
- if (auto err = std::get_if<bench::Error>(&res)) {
- state.SkipWithError(err->msg.c_str());
- return;
- }
- auto& program = std::get<bench::ProgramAndFile>(res).program;
- for (auto _ : state) {
- auto res = Generate(&program, {});
- if (!res.error.empty()) {
- state.SkipWithError(res.error.c_str());
+ auto res = bench::LoadProgram(input_name);
+ if (auto err = std::get_if<bench::Error>(&res)) {
+ state.SkipWithError(err->msg.c_str());
+ return;
+ }
+ auto& program = std::get<bench::ProgramAndFile>(res).program;
+ for (auto _ : state) {
+ auto res = Generate(&program, {});
+ if (!res.error.empty()) {
+ state.SkipWithError(res.error.c_str());
+ }
}
- }
}
TINT_BENCHMARK_WGSL_PROGRAMS(GenerateWGSL);
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.cc
index d3174c10cbc..5e0ce8c16f0 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.cc
@@ -37,7 +37,6 @@
#include "src/tint/ast/multisampled_texture.h"
#include "src/tint/ast/pointer.h"
#include "src/tint/ast/sampled_texture.h"
-#include "src/tint/ast/sint_literal_expression.h"
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/ast/storage_texture.h"
#include "src/tint/ast/stride_attribute.h"
@@ -46,7 +45,6 @@
#include "src/tint/ast/struct_member_size_attribute.h"
#include "src/tint/ast/type_name.h"
#include "src/tint/ast/u32.h"
-#include "src/tint/ast/uint_literal_expression.h"
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/ast/vector.h"
#include "src/tint/ast/void.h"
@@ -63,1164 +61,1160 @@ GeneratorImpl::GeneratorImpl(const Program* program) : TextGenerator(program) {}
GeneratorImpl::~GeneratorImpl() = default;
bool GeneratorImpl::Generate() {
- // Generate global declarations in the order they appear in the module.
- for (auto* decl : program_->AST().GlobalDeclarations()) {
- if (!Switch(
- decl, //
- [&](const ast::TypeDecl* td) { return EmitTypeDecl(td); },
- [&](const ast::Function* func) { return EmitFunction(func); },
- [&](const ast::Variable* var) { return EmitVariable(line(), var); },
- [&](Default) {
- TINT_UNREACHABLE(Writer, diagnostics_);
- return false;
- })) {
- return false;
+ // Generate enable directives before any other global declarations.
+ for (auto enable : program_->AST().Enables()) {
+ if (!EmitEnable(enable)) {
+ return false;
+ }
}
- if (decl != program_->AST().GlobalDeclarations().back()) {
- line();
+ if (!program_->AST().Enables().empty()) {
+ line();
}
- }
+ // Generate global declarations in the order they appear in the module.
+ for (auto* decl : program_->AST().GlobalDeclarations()) {
+ if (decl->Is<ast::Enable>()) {
+ continue;
+ }
+ if (!Switch(
+ decl, //
+ [&](const ast::TypeDecl* td) { return EmitTypeDecl(td); },
+ [&](const ast::Function* func) { return EmitFunction(func); },
+ [&](const ast::Variable* var) { return EmitVariable(line(), var); },
+ [&](Default) {
+ TINT_UNREACHABLE(Writer, diagnostics_);
+ return false;
+ })) {
+ return false;
+ }
+ if (decl != program_->AST().GlobalDeclarations().back()) {
+ line();
+ }
+ }
+
+ return true;
+}
- return true;
+bool GeneratorImpl::EmitEnable(const ast::Enable* enable) {
+ auto out = line();
+ out << "enable " << enable->extension << ";";
+ return true;
}
bool GeneratorImpl::EmitTypeDecl(const ast::TypeDecl* ty) {
- return Switch(
- ty,
- [&](const ast::Alias* alias) { //
- auto out = line();
- out << "type " << program_->Symbols().NameFor(alias->name) << " = ";
- if (!EmitType(out, alias->type)) {
- return false;
- }
- out << ";";
- return true;
- },
- [&](const ast::Struct* str) { //
- return EmitStructType(str);
- },
- [&](Default) { //
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown declared type: " + std::string(ty->TypeInfo().name));
- return false;
- });
+ return Switch(
+ ty,
+ [&](const ast::Alias* alias) { //
+ auto out = line();
+ out << "type " << program_->Symbols().NameFor(alias->name) << " = ";
+ if (!EmitType(out, alias->type)) {
+ return false;
+ }
+ out << ";";
+ return true;
+ },
+ [&](const ast::Struct* str) { //
+ return EmitStructType(str);
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown declared type: " + std::string(ty->TypeInfo().name));
+ return false;
+ });
}
-bool GeneratorImpl::EmitExpression(std::ostream& out,
- const ast::Expression* expr) {
- return Switch(
- expr,
- [&](const ast::IndexAccessorExpression* a) { //
- return EmitIndexAccessor(out, a);
- },
- [&](const ast::BinaryExpression* b) { //
- return EmitBinary(out, b);
- },
- [&](const ast::BitcastExpression* b) { //
- return EmitBitcast(out, b);
- },
- [&](const ast::CallExpression* c) { //
- return EmitCall(out, c);
- },
- [&](const ast::IdentifierExpression* i) { //
- return EmitIdentifier(out, i);
- },
- [&](const ast::LiteralExpression* l) { //
- return EmitLiteral(out, l);
- },
- [&](const ast::MemberAccessorExpression* m) { //
- return EmitMemberAccessor(out, m);
- },
- [&](const ast::PhonyExpression*) { //
- out << "_";
- return true;
- },
- [&](const ast::UnaryOpExpression* u) { //
- return EmitUnaryOp(out, u);
- },
- [&](Default) {
- diagnostics_.add_error(diag::System::Writer, "unknown expression type");
- return false;
- });
+bool GeneratorImpl::EmitExpression(std::ostream& out, const ast::Expression* expr) {
+ return Switch(
+ expr,
+ [&](const ast::IndexAccessorExpression* a) { //
+ return EmitIndexAccessor(out, a);
+ },
+ [&](const ast::BinaryExpression* b) { //
+ return EmitBinary(out, b);
+ },
+ [&](const ast::BitcastExpression* b) { //
+ return EmitBitcast(out, b);
+ },
+ [&](const ast::CallExpression* c) { //
+ return EmitCall(out, c);
+ },
+ [&](const ast::IdentifierExpression* i) { //
+ return EmitIdentifier(out, i);
+ },
+ [&](const ast::LiteralExpression* l) { //
+ return EmitLiteral(out, l);
+ },
+ [&](const ast::MemberAccessorExpression* m) { //
+ return EmitMemberAccessor(out, m);
+ },
+ [&](const ast::PhonyExpression*) { //
+ out << "_";
+ return true;
+ },
+ [&](const ast::UnaryOpExpression* u) { //
+ return EmitUnaryOp(out, u);
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer, "unknown expression type");
+ return false;
+ });
}
-bool GeneratorImpl::EmitIndexAccessor(
- std::ostream& out,
- const ast::IndexAccessorExpression* expr) {
- bool paren_lhs =
- !expr->object->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
- ast::IdentifierExpression,
- ast::MemberAccessorExpression>();
- if (paren_lhs) {
- out << "(";
- }
- if (!EmitExpression(out, expr->object)) {
- return false;
- }
- if (paren_lhs) {
- out << ")";
- }
- out << "[";
+bool GeneratorImpl::EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr) {
+ bool paren_lhs =
+ !expr->object->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
+ ast::IdentifierExpression, ast::MemberAccessorExpression>();
+ if (paren_lhs) {
+ out << "(";
+ }
+ if (!EmitExpression(out, expr->object)) {
+ return false;
+ }
+ if (paren_lhs) {
+ out << ")";
+ }
+ out << "[";
- if (!EmitExpression(out, expr->index)) {
- return false;
- }
- out << "]";
+ if (!EmitExpression(out, expr->index)) {
+ return false;
+ }
+ out << "]";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitMemberAccessor(
- std::ostream& out,
- const ast::MemberAccessorExpression* expr) {
- bool paren_lhs =
- !expr->structure->IsAnyOf<ast::IndexAccessorExpression,
- ast::CallExpression, ast::IdentifierExpression,
- ast::MemberAccessorExpression>();
- if (paren_lhs) {
- out << "(";
- }
- if (!EmitExpression(out, expr->structure)) {
- return false;
- }
- if (paren_lhs) {
- out << ")";
- }
+bool GeneratorImpl::EmitMemberAccessor(std::ostream& out,
+ const ast::MemberAccessorExpression* expr) {
+ bool paren_lhs =
+ !expr->structure->IsAnyOf<ast::IndexAccessorExpression, ast::CallExpression,
+ ast::IdentifierExpression, ast::MemberAccessorExpression>();
+ if (paren_lhs) {
+ out << "(";
+ }
+ if (!EmitExpression(out, expr->structure)) {
+ return false;
+ }
+ if (paren_lhs) {
+ out << ")";
+ }
- out << ".";
+ out << ".";
- return EmitExpression(out, expr->member);
+ return EmitExpression(out, expr->member);
}
-bool GeneratorImpl::EmitBitcast(std::ostream& out,
- const ast::BitcastExpression* expr) {
- out << "bitcast<";
- if (!EmitType(out, expr->type)) {
- return false;
- }
+bool GeneratorImpl::EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr) {
+ out << "bitcast<";
+ if (!EmitType(out, expr->type)) {
+ return false;
+ }
- out << ">(";
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
+ out << ">(";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
-bool GeneratorImpl::EmitCall(std::ostream& out,
- const ast::CallExpression* expr) {
- if (expr->target.name) {
- if (!EmitExpression(out, expr->target.name)) {
- return false;
- }
- } else if (expr->target.type) {
- if (!EmitType(out, expr->target.type)) {
- return false;
- }
- } else {
- TINT_ICE(Writer, diagnostics_)
- << "CallExpression target had neither a name or type";
- return false;
- }
- out << "(";
-
- bool first = true;
- const auto& args = expr->args;
- for (auto* arg : args) {
- if (!first) {
- out << ", ";
+bool GeneratorImpl::EmitCall(std::ostream& out, const ast::CallExpression* expr) {
+ if (expr->target.name) {
+ if (!EmitExpression(out, expr->target.name)) {
+ return false;
+ }
+ } else if (expr->target.type) {
+ if (!EmitType(out, expr->target.type)) {
+ return false;
+ }
+ } else {
+ TINT_ICE(Writer, diagnostics_) << "CallExpression target had neither a name or type";
+ return false;
}
- first = false;
+ out << "(";
- if (!EmitExpression(out, arg)) {
- return false;
+ bool first = true;
+ const auto& args = expr->args;
+ for (auto* arg : args) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+
+ if (!EmitExpression(out, arg)) {
+ return false;
+ }
}
- }
- out << ")";
+ out << ")";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitLiteral(std::ostream& out,
- const ast::LiteralExpression* lit) {
- return Switch(
- lit,
- [&](const ast::BoolLiteralExpression* bl) { //
- out << (bl->value ? "true" : "false");
- return true;
- },
- [&](const ast::FloatLiteralExpression* fl) { //
- out << FloatToBitPreservingString(fl->value);
- return true;
- },
- [&](const ast::SintLiteralExpression* sl) { //
- out << sl->value;
- return true;
- },
- [&](const ast::UintLiteralExpression* ul) { //
- out << ul->value << "u";
- return true;
- },
- [&](Default) { //
- diagnostics_.add_error(diag::System::Writer, "unknown literal type");
- return false;
- });
+bool GeneratorImpl::EmitLiteral(std::ostream& out, const ast::LiteralExpression* lit) {
+ return Switch(
+ lit,
+ [&](const ast::BoolLiteralExpression* l) { //
+ out << (l->value ? "true" : "false");
+ return true;
+ },
+ [&](const ast::FloatLiteralExpression* l) { //
+ out << FloatToBitPreservingString(static_cast<float>(l->value)) << l->suffix;
+ return true;
+ },
+ [&](const ast::IntLiteralExpression* l) { //
+ out << l->value << l->suffix;
+ return true;
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer, "unknown literal type");
+ return false;
+ });
}
-bool GeneratorImpl::EmitIdentifier(std::ostream& out,
- const ast::IdentifierExpression* expr) {
- out << program_->Symbols().NameFor(expr->symbol);
- return true;
+bool GeneratorImpl::EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr) {
+ out << program_->Symbols().NameFor(expr->symbol);
+ return true;
}
bool GeneratorImpl::EmitFunction(const ast::Function* func) {
- if (func->attributes.size()) {
- if (!EmitAttributes(line(), func->attributes)) {
- return false;
+ if (func->attributes.size()) {
+ if (!EmitAttributes(line(), func->attributes)) {
+ return false;
+ }
}
- }
- {
- auto out = line();
- out << "fn " << program_->Symbols().NameFor(func->symbol) << "(";
+ {
+ auto out = line();
+ out << "fn " << program_->Symbols().NameFor(func->symbol) << "(";
- bool first = true;
- for (auto* v : func->params) {
- if (!first) {
- out << ", ";
- }
- first = false;
-
- if (!v->attributes.empty()) {
- if (!EmitAttributes(out, v->attributes)) {
- return false;
- }
- out << " ";
- }
+ bool first = true;
+ for (auto* v : func->params) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
- out << program_->Symbols().NameFor(v->symbol) << " : ";
+ if (!v->attributes.empty()) {
+ if (!EmitAttributes(out, v->attributes)) {
+ return false;
+ }
+ out << " ";
+ }
- if (!EmitType(out, v->type)) {
- return false;
- }
- }
+ out << program_->Symbols().NameFor(v->symbol) << " : ";
- out << ")";
+ if (!EmitType(out, v->type)) {
+ return false;
+ }
+ }
- if (!func->return_type->Is<ast::Void>() ||
- !func->return_type_attributes.empty()) {
- out << " -> ";
+ out << ")";
- if (!func->return_type_attributes.empty()) {
- if (!EmitAttributes(out, func->return_type_attributes)) {
- return false;
+ if (!func->return_type->Is<ast::Void>() || !func->return_type_attributes.empty()) {
+ out << " -> ";
+
+ if (!func->return_type_attributes.empty()) {
+ if (!EmitAttributes(out, func->return_type_attributes)) {
+ return false;
+ }
+ out << " ";
+ }
+
+ if (!EmitType(out, func->return_type)) {
+ return false;
+ }
}
- out << " ";
- }
- if (!EmitType(out, func->return_type)) {
- return false;
- }
+ if (func->body) {
+ out << " {";
+ }
}
if (func->body) {
- out << " {";
- }
- }
-
- if (func->body) {
- if (!EmitStatementsWithIndent(func->body->statements)) {
- return false;
+ if (!EmitStatementsWithIndent(func->body->statements)) {
+ return false;
+ }
+ line() << "}";
}
- line() << "}";
- }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitImageFormat(std::ostream& out,
- const ast::TexelFormat fmt) {
- switch (fmt) {
- case ast::TexelFormat::kNone:
- diagnostics_.add_error(diag::System::Writer, "unknown image format");
- return false;
- default:
- out << fmt;
- }
- return true;
+bool GeneratorImpl::EmitImageFormat(std::ostream& out, const ast::TexelFormat fmt) {
+ switch (fmt) {
+ case ast::TexelFormat::kNone:
+ diagnostics_.add_error(diag::System::Writer, "unknown image format");
+ return false;
+ default:
+ out << fmt;
+ }
+ return true;
}
bool GeneratorImpl::EmitAccess(std::ostream& out, const ast::Access access) {
- switch (access) {
- case ast::Access::kRead:
- out << "read";
- return true;
- case ast::Access::kWrite:
- out << "write";
- return true;
- case ast::Access::kReadWrite:
- out << "read_write";
- return true;
- default:
- break;
- }
- diagnostics_.add_error(diag::System::Writer, "unknown access");
- return false;
+ switch (access) {
+ case ast::Access::kRead:
+ out << "read";
+ return true;
+ case ast::Access::kWrite:
+ out << "write";
+ return true;
+ case ast::Access::kReadWrite:
+ out << "read_write";
+ return true;
+ default:
+ break;
+ }
+ diagnostics_.add_error(diag::System::Writer, "unknown access");
+ return false;
}
bool GeneratorImpl::EmitType(std::ostream& out, const ast::Type* ty) {
- return Switch(
- ty,
- [&](const ast::Array* ary) {
- for (auto* attr : ary->attributes) {
- if (auto* stride = attr->As<ast::StrideAttribute>()) {
- out << "@stride(" << stride->stride << ") ";
- }
- }
-
- out << "array<";
- if (!EmitType(out, ary->type)) {
- return false;
- }
+ return Switch(
+ ty,
+ [&](const ast::Array* ary) {
+ for (auto* attr : ary->attributes) {
+ if (auto* stride = attr->As<ast::StrideAttribute>()) {
+ out << "@stride(" << stride->stride << ") ";
+ }
+ }
- if (!ary->IsRuntimeArray()) {
- out << ", ";
- if (!EmitExpression(out, ary->count)) {
- return false;
- }
- }
+ out << "array<";
+ if (!EmitType(out, ary->type)) {
+ return false;
+ }
- out << ">";
- return true;
- },
- [&](const ast::Bool*) {
- out << "bool";
- return true;
- },
- [&](const ast::F32*) {
- out << "f32";
- return true;
- },
- [&](const ast::I32*) {
- out << "i32";
- return true;
- },
- [&](const ast::Matrix* mat) {
- out << "mat" << mat->columns << "x" << mat->rows;
- if (auto* el_ty = mat->type) {
- out << "<";
- if (!EmitType(out, el_ty)) {
- return false;
- }
- out << ">";
- }
- return true;
- },
- [&](const ast::Pointer* ptr) {
- out << "ptr<" << ptr->storage_class << ", ";
- if (!EmitType(out, ptr->type)) {
- return false;
- }
- if (ptr->access != ast::Access::kUndefined) {
- out << ", ";
- if (!EmitAccess(out, ptr->access)) {
- return false;
- }
- }
- out << ">";
- return true;
- },
- [&](const ast::Atomic* atomic) {
- out << "atomic<";
- if (!EmitType(out, atomic->type)) {
- return false;
- }
- out << ">";
- return true;
- },
- [&](const ast::Sampler* sampler) {
- out << "sampler";
-
- if (sampler->IsComparison()) {
- out << "_comparison";
- }
- return true;
- },
- [&](const ast::ExternalTexture*) {
- out << "texture_external";
- return true;
- },
- [&](const ast::Texture* texture) {
- out << "texture_";
- bool ok = Switch(
- texture,
- [&](const ast::DepthTexture*) { //
- out << "depth_";
- return true;
- },
- [&](const ast::DepthMultisampledTexture*) { //
- out << "depth_multisampled_";
- return true;
- },
- [&](const ast::SampledTexture*) { //
- /* nothing to emit */
- return true;
- },
- [&](const ast::MultisampledTexture*) { //
- out << "multisampled_";
- return true;
- },
- [&](const ast::StorageTexture*) { //
- out << "storage_";
- return true;
- },
- [&](Default) { //
- diagnostics_.add_error(diag::System::Writer,
- "unknown texture type");
- return false;
- });
- if (!ok) {
- return false;
- }
+ if (!ary->IsRuntimeArray()) {
+ out << ", ";
+ if (!EmitExpression(out, ary->count)) {
+ return false;
+ }
+ }
- switch (texture->dim) {
- case ast::TextureDimension::k1d:
- out << "1d";
- break;
- case ast::TextureDimension::k2d:
- out << "2d";
- break;
- case ast::TextureDimension::k2dArray:
- out << "2d_array";
- break;
- case ast::TextureDimension::k3d:
- out << "3d";
- break;
- case ast::TextureDimension::kCube:
- out << "cube";
- break;
- case ast::TextureDimension::kCubeArray:
- out << "cube_array";
- break;
- default:
+ out << ">";
+ return true;
+ },
+ [&](const ast::Bool*) {
+ out << "bool";
+ return true;
+ },
+ [&](const ast::F32*) {
+ out << "f32";
+ return true;
+ },
+ [&](const ast::F16*) {
diagnostics_.add_error(diag::System::Writer,
- "unknown texture dimension");
+ "Type f16 is not completely implemented yet.");
return false;
- }
-
- return Switch(
- texture,
- [&](const ast::SampledTexture* sampled) { //
- out << "<";
- if (!EmitType(out, sampled->type)) {
- return false;
- }
- out << ">";
- return true;
- },
- [&](const ast::MultisampledTexture* ms) { //
- out << "<";
- if (!EmitType(out, ms->type)) {
+ },
+ [&](const ast::I32*) {
+ out << "i32";
+ return true;
+ },
+ [&](const ast::Matrix* mat) {
+ out << "mat" << mat->columns << "x" << mat->rows;
+ if (auto* el_ty = mat->type) {
+ out << "<";
+ if (!EmitType(out, el_ty)) {
+ return false;
+ }
+ out << ">";
+ }
+ return true;
+ },
+ [&](const ast::Pointer* ptr) {
+ out << "ptr<" << ptr->storage_class << ", ";
+ if (!EmitType(out, ptr->type)) {
return false;
- }
- out << ">";
- return true;
- },
- [&](const ast::StorageTexture* storage) { //
- out << "<";
- if (!EmitImageFormat(out, storage->format)) {
+ }
+ if (ptr->access != ast::Access::kUndefined) {
+ out << ", ";
+ if (!EmitAccess(out, ptr->access)) {
+ return false;
+ }
+ }
+ out << ">";
+ return true;
+ },
+ [&](const ast::Atomic* atomic) {
+ out << "atomic<";
+ if (!EmitType(out, atomic->type)) {
return false;
- }
- out << ", ";
- if (!EmitAccess(out, storage->access)) {
+ }
+ out << ">";
+ return true;
+ },
+ [&](const ast::Sampler* sampler) {
+ out << "sampler";
+
+ if (sampler->IsComparison()) {
+ out << "_comparison";
+ }
+ return true;
+ },
+ [&](const ast::ExternalTexture*) {
+ out << "texture_external";
+ return true;
+ },
+ [&](const ast::Texture* texture) {
+ out << "texture_";
+ bool ok = Switch(
+ texture,
+ [&](const ast::DepthTexture*) { //
+ out << "depth_";
+ return true;
+ },
+ [&](const ast::DepthMultisampledTexture*) { //
+ out << "depth_multisampled_";
+ return true;
+ },
+ [&](const ast::SampledTexture*) { //
+ /* nothing to emit */
+ return true;
+ },
+ [&](const ast::MultisampledTexture*) { //
+ out << "multisampled_";
+ return true;
+ },
+ [&](const ast::StorageTexture*) { //
+ out << "storage_";
+ return true;
+ },
+ [&](Default) { //
+ diagnostics_.add_error(diag::System::Writer, "unknown texture type");
+ return false;
+ });
+ if (!ok) {
return false;
- }
- out << ">";
- return true;
- },
- [&](Default) { //
- return true;
- });
- },
- [&](const ast::U32*) {
- out << "u32";
- return true;
- },
- [&](const ast::Vector* vec) {
- out << "vec" << vec->width;
- if (auto* el_ty = vec->type) {
- out << "<";
- if (!EmitType(out, el_ty)) {
+ }
+
+ switch (texture->dim) {
+ case ast::TextureDimension::k1d:
+ out << "1d";
+ break;
+ case ast::TextureDimension::k2d:
+ out << "2d";
+ break;
+ case ast::TextureDimension::k2dArray:
+ out << "2d_array";
+ break;
+ case ast::TextureDimension::k3d:
+ out << "3d";
+ break;
+ case ast::TextureDimension::kCube:
+ out << "cube";
+ break;
+ case ast::TextureDimension::kCubeArray:
+ out << "cube_array";
+ break;
+ default:
+ diagnostics_.add_error(diag::System::Writer, "unknown texture dimension");
+ return false;
+ }
+
+ return Switch(
+ texture,
+ [&](const ast::SampledTexture* sampled) { //
+ out << "<";
+ if (!EmitType(out, sampled->type)) {
+ return false;
+ }
+ out << ">";
+ return true;
+ },
+ [&](const ast::MultisampledTexture* ms) { //
+ out << "<";
+ if (!EmitType(out, ms->type)) {
+ return false;
+ }
+ out << ">";
+ return true;
+ },
+ [&](const ast::StorageTexture* storage) { //
+ out << "<";
+ if (!EmitImageFormat(out, storage->format)) {
+ return false;
+ }
+ out << ", ";
+ if (!EmitAccess(out, storage->access)) {
+ return false;
+ }
+ out << ">";
+ return true;
+ },
+ [&](Default) { //
+ return true;
+ });
+ },
+ [&](const ast::U32*) {
+ out << "u32";
+ return true;
+ },
+ [&](const ast::Vector* vec) {
+ out << "vec" << vec->width;
+ if (auto* el_ty = vec->type) {
+ out << "<";
+ if (!EmitType(out, el_ty)) {
+ return false;
+ }
+ out << ">";
+ }
+ return true;
+ },
+ [&](const ast::Void*) {
+ out << "void";
+ return true;
+ },
+ [&](const ast::TypeName* tn) {
+ out << program_->Symbols().NameFor(tn->name);
+ return true;
+ },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown type in EmitType: " + std::string(ty->TypeInfo().name));
return false;
- }
- out << ">";
- }
- return true;
- },
- [&](const ast::Void*) {
- out << "void";
- return true;
- },
- [&](const ast::TypeName* tn) {
- out << program_->Symbols().NameFor(tn->name);
- return true;
- },
- [&](Default) {
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown type in EmitType: " + std::string(ty->TypeInfo().name));
- return false;
- });
+ });
}
bool GeneratorImpl::EmitStructType(const ast::Struct* str) {
- if (str->attributes.size()) {
- if (!EmitAttributes(line(), str->attributes)) {
- return false;
- }
- }
- line() << "struct " << program_->Symbols().NameFor(str->name) << " {";
-
- auto add_padding = [&](uint32_t size) {
- line() << "@size(" << size << ")";
-
- // Note: u32 is the smallest primitive we currently support. When WGSL
- // supports smaller types, this will need to be updated.
- line() << UniqueIdentifier("padding") << " : u32,";
- };
-
- increment_indent();
- uint32_t offset = 0;
- for (auto* mem : str->members) {
- // TODO(crbug.com/tint/798) move the @offset attribute handling to the
- // transform::Wgsl sanitizer.
- if (auto* mem_sem = program_->Sem().Get(mem)) {
- offset = utils::RoundUp(mem_sem->Align(), offset);
- if (uint32_t padding = mem_sem->Offset() - offset) {
- add_padding(padding);
- offset += padding;
- }
- offset += mem_sem->Size();
+ if (str->attributes.size()) {
+ if (!EmitAttributes(line(), str->attributes)) {
+ return false;
+ }
}
+ line() << "struct " << program_->Symbols().NameFor(str->name) << " {";
+
+ auto add_padding = [&](uint32_t size) {
+ line() << "@size(" << size << ")";
+
+ // Note: u32 is the smallest primitive we currently support. When WGSL
+ // supports smaller types, this will need to be updated.
+ line() << UniqueIdentifier("padding") << " : u32,";
+ };
+
+ increment_indent();
+ uint32_t offset = 0;
+ for (auto* mem : str->members) {
+ // TODO(crbug.com/tint/798) move the @offset attribute handling to the
+ // transform::Wgsl sanitizer.
+ if (auto* mem_sem = program_->Sem().Get(mem)) {
+ offset = utils::RoundUp(mem_sem->Align(), offset);
+ if (uint32_t padding = mem_sem->Offset() - offset) {
+ add_padding(padding);
+ offset += padding;
+ }
+ offset += mem_sem->Size();
+ }
- // Offset attributes no longer exist in the WGSL spec, but are emitted
- // by the SPIR-V reader and are consumed by the Resolver(). These should not
- // be emitted, but instead struct padding fields should be emitted.
- ast::AttributeList attributes_sanitized;
- attributes_sanitized.reserve(mem->attributes.size());
- for (auto* attr : mem->attributes) {
- if (!attr->Is<ast::StructMemberOffsetAttribute>()) {
- attributes_sanitized.emplace_back(attr);
- }
- }
+ // Offset attributes no longer exist in the WGSL spec, but are emitted
+ // by the SPIR-V reader and are consumed by the Resolver(). These should not
+ // be emitted, but instead struct padding fields should be emitted.
+ ast::AttributeList attributes_sanitized;
+ attributes_sanitized.reserve(mem->attributes.size());
+ for (auto* attr : mem->attributes) {
+ if (!attr->Is<ast::StructMemberOffsetAttribute>()) {
+ attributes_sanitized.emplace_back(attr);
+ }
+ }
- if (!attributes_sanitized.empty()) {
- if (!EmitAttributes(line(), attributes_sanitized)) {
- return false;
- }
- }
+ if (!attributes_sanitized.empty()) {
+ if (!EmitAttributes(line(), attributes_sanitized)) {
+ return false;
+ }
+ }
- auto out = line();
- out << program_->Symbols().NameFor(mem->symbol) << " : ";
- if (!EmitType(out, mem->type)) {
- return false;
+ auto out = line();
+ out << program_->Symbols().NameFor(mem->symbol) << " : ";
+ if (!EmitType(out, mem->type)) {
+ return false;
+ }
+ out << ",";
}
- out << ",";
- }
- decrement_indent();
+ decrement_indent();
- line() << "}";
- return true;
+ line() << "}";
+ return true;
}
bool GeneratorImpl::EmitVariable(std::ostream& out, const ast::Variable* var) {
- if (!var->attributes.empty()) {
- if (!EmitAttributes(out, var->attributes)) {
- return false;
+ if (!var->attributes.empty()) {
+ if (!EmitAttributes(out, var->attributes)) {
+ return false;
+ }
+ out << " ";
}
- out << " ";
- }
-
- if (var->is_overridable) {
- out << "override";
- } else if (var->is_const) {
- out << "let";
- } else {
- out << "var";
- auto sc = var->declared_storage_class;
- auto ac = var->declared_access;
- if (sc != ast::StorageClass::kNone || ac != ast::Access::kUndefined) {
- out << "<" << sc;
- if (ac != ast::Access::kUndefined) {
- out << ", ";
- if (!EmitAccess(out, ac)) {
- return false;
+
+ if (var->is_overridable) {
+ out << "override";
+ } else if (var->is_const) {
+ out << "let";
+ } else {
+ out << "var";
+ auto sc = var->declared_storage_class;
+ auto ac = var->declared_access;
+ if (sc != ast::StorageClass::kNone || ac != ast::Access::kUndefined) {
+ out << "<" << sc;
+ if (ac != ast::Access::kUndefined) {
+ out << ", ";
+ if (!EmitAccess(out, ac)) {
+ return false;
+ }
+ }
+ out << ">";
}
- }
- out << ">";
}
- }
- out << " " << program_->Symbols().NameFor(var->symbol);
+ out << " " << program_->Symbols().NameFor(var->symbol);
- if (auto* ty = var->type) {
- out << " : ";
- if (!EmitType(out, ty)) {
- return false;
+ if (auto* ty = var->type) {
+ out << " : ";
+ if (!EmitType(out, ty)) {
+ return false;
+ }
}
- }
- if (var->constructor != nullptr) {
- out << " = ";
- if (!EmitExpression(out, var->constructor)) {
- return false;
+ if (var->constructor != nullptr) {
+ out << " = ";
+ if (!EmitExpression(out, var->constructor)) {
+ return false;
+ }
}
- }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitAttributes(std::ostream& out,
- const ast::AttributeList& attrs) {
- bool first = true;
- for (auto* attr : attrs) {
- if (!first) {
- out << " ";
- }
- first = false;
- out << "@";
- bool ok = Switch(
- attr,
- [&](const ast::WorkgroupAttribute* workgroup) {
- auto values = workgroup->Values();
- out << "workgroup_size(";
- for (int i = 0; i < 3; i++) {
- if (values[i]) {
- if (i > 0) {
- out << ", ";
- }
- if (!EmitExpression(out, values[i])) {
+bool GeneratorImpl::EmitAttributes(std::ostream& out, const ast::AttributeList& attrs) {
+ bool first = true;
+ for (auto* attr : attrs) {
+ if (!first) {
+ out << " ";
+ }
+ first = false;
+ out << "@";
+ bool ok = Switch(
+ attr,
+ [&](const ast::WorkgroupAttribute* workgroup) {
+ auto values = workgroup->Values();
+ out << "workgroup_size(";
+ for (int i = 0; i < 3; i++) {
+ if (values[i]) {
+ if (i > 0) {
+ out << ", ";
+ }
+ if (!EmitExpression(out, values[i])) {
+ return false;
+ }
+ }
+ }
+ out << ")";
+ return true;
+ },
+ [&](const ast::StageAttribute* stage) {
+ out << stage->stage;
+ return true;
+ },
+ [&](const ast::BindingAttribute* binding) {
+ out << "binding(" << binding->value << ")";
+ return true;
+ },
+ [&](const ast::GroupAttribute* group) {
+ out << "group(" << group->value << ")";
+ return true;
+ },
+ [&](const ast::LocationAttribute* location) {
+ out << "location(" << location->value << ")";
+ return true;
+ },
+ [&](const ast::BuiltinAttribute* builtin) {
+ out << "builtin(" << builtin->builtin << ")";
+ return true;
+ },
+ [&](const ast::InterpolateAttribute* interpolate) {
+ out << "interpolate(" << interpolate->type;
+ if (interpolate->sampling != ast::InterpolationSampling::kNone) {
+ out << ", " << interpolate->sampling;
+ }
+ out << ")";
+ return true;
+ },
+ [&](const ast::InvariantAttribute*) {
+ out << "invariant";
+ return true;
+ },
+ [&](const ast::IdAttribute* override_deco) {
+ out << "id(" << override_deco->value << ")";
+ return true;
+ },
+ [&](const ast::StructMemberSizeAttribute* size) {
+ out << "size(" << size->size << ")";
+ return true;
+ },
+ [&](const ast::StructMemberAlignAttribute* align) {
+ out << "align(" << align->align << ")";
+ return true;
+ },
+ [&](const ast::StrideAttribute* stride) {
+ out << "stride(" << stride->stride << ")";
+ return true;
+ },
+ [&](const ast::InternalAttribute* internal) {
+ out << "internal(" << internal->InternalName() << ")";
+ return true;
+ },
+ [&](Default) {
+ TINT_ICE(Writer, diagnostics_)
+ << "Unsupported attribute '" << attr->TypeInfo().name << "'";
return false;
- }
- }
- }
- out << ")";
- return true;
- },
- [&](const ast::StageAttribute* stage) {
- out << "stage(" << stage->stage << ")";
- return true;
- },
- [&](const ast::BindingAttribute* binding) {
- out << "binding(" << binding->value << ")";
- return true;
- },
- [&](const ast::GroupAttribute* group) {
- out << "group(" << group->value << ")";
- return true;
- },
- [&](const ast::LocationAttribute* location) {
- out << "location(" << location->value << ")";
- return true;
- },
- [&](const ast::BuiltinAttribute* builtin) {
- out << "builtin(" << builtin->builtin << ")";
- return true;
- },
- [&](const ast::InterpolateAttribute* interpolate) {
- out << "interpolate(" << interpolate->type;
- if (interpolate->sampling != ast::InterpolationSampling::kNone) {
- out << ", " << interpolate->sampling;
- }
- out << ")";
- return true;
- },
- [&](const ast::InvariantAttribute*) {
- out << "invariant";
- return true;
- },
- [&](const ast::IdAttribute* override_deco) {
- out << "id(" << override_deco->value << ")";
- return true;
- },
- [&](const ast::StructMemberSizeAttribute* size) {
- out << "size(" << size->size << ")";
- return true;
- },
- [&](const ast::StructMemberAlignAttribute* align) {
- out << "align(" << align->align << ")";
- return true;
- },
- [&](const ast::StrideAttribute* stride) {
- out << "stride(" << stride->stride << ")";
- return true;
- },
- [&](const ast::InternalAttribute* internal) {
- out << "internal(" << internal->InternalName() << ")";
- return true;
- },
- [&](Default) {
- TINT_ICE(Writer, diagnostics_)
- << "Unsupported attribute '" << attr->TypeInfo().name << "'";
- return false;
- });
+ });
- if (!ok) {
- return false;
+ if (!ok) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
-bool GeneratorImpl::EmitBinary(std::ostream& out,
- const ast::BinaryExpression* expr) {
- out << "(";
+bool GeneratorImpl::EmitBinary(std::ostream& out, const ast::BinaryExpression* expr) {
+ out << "(";
- if (!EmitExpression(out, expr->lhs)) {
- return false;
- }
- out << " ";
- if (!EmitBinaryOp(out, expr->op)) {
- return false;
- }
- out << " ";
+ if (!EmitExpression(out, expr->lhs)) {
+ return false;
+ }
+ out << " ";
+ if (!EmitBinaryOp(out, expr->op)) {
+ return false;
+ }
+ out << " ";
- if (!EmitExpression(out, expr->rhs)) {
- return false;
- }
+ if (!EmitExpression(out, expr->rhs)) {
+ return false;
+ }
- out << ")";
- return true;
+ out << ")";
+ return true;
}
bool GeneratorImpl::EmitBinaryOp(std::ostream& out, const ast::BinaryOp op) {
- switch (op) {
- case ast::BinaryOp::kAnd:
- out << "&";
- break;
- case ast::BinaryOp::kOr:
- out << "|";
- break;
- case ast::BinaryOp::kXor:
- out << "^";
- break;
- case ast::BinaryOp::kLogicalAnd:
- out << "&&";
- break;
- case ast::BinaryOp::kLogicalOr:
- out << "||";
- break;
- case ast::BinaryOp::kEqual:
- out << "==";
- break;
- case ast::BinaryOp::kNotEqual:
- out << "!=";
- break;
- case ast::BinaryOp::kLessThan:
- out << "<";
- break;
- case ast::BinaryOp::kGreaterThan:
- out << ">";
- break;
- case ast::BinaryOp::kLessThanEqual:
- out << "<=";
- break;
- case ast::BinaryOp::kGreaterThanEqual:
- out << ">=";
- break;
- case ast::BinaryOp::kShiftLeft:
- out << "<<";
- break;
- case ast::BinaryOp::kShiftRight:
- out << ">>";
- break;
- case ast::BinaryOp::kAdd:
- out << "+";
- break;
- case ast::BinaryOp::kSubtract:
- out << "-";
- break;
- case ast::BinaryOp::kMultiply:
- out << "*";
- break;
- case ast::BinaryOp::kDivide:
- out << "/";
- break;
- case ast::BinaryOp::kModulo:
- out << "%";
- break;
- case ast::BinaryOp::kNone:
- diagnostics_.add_error(diag::System::Writer,
- "missing binary operation type");
- return false;
- }
- return true;
+ switch (op) {
+ case ast::BinaryOp::kAnd:
+ out << "&";
+ break;
+ case ast::BinaryOp::kOr:
+ out << "|";
+ break;
+ case ast::BinaryOp::kXor:
+ out << "^";
+ break;
+ case ast::BinaryOp::kLogicalAnd:
+ out << "&&";
+ break;
+ case ast::BinaryOp::kLogicalOr:
+ out << "||";
+ break;
+ case ast::BinaryOp::kEqual:
+ out << "==";
+ break;
+ case ast::BinaryOp::kNotEqual:
+ out << "!=";
+ break;
+ case ast::BinaryOp::kLessThan:
+ out << "<";
+ break;
+ case ast::BinaryOp::kGreaterThan:
+ out << ">";
+ break;
+ case ast::BinaryOp::kLessThanEqual:
+ out << "<=";
+ break;
+ case ast::BinaryOp::kGreaterThanEqual:
+ out << ">=";
+ break;
+ case ast::BinaryOp::kShiftLeft:
+ out << "<<";
+ break;
+ case ast::BinaryOp::kShiftRight:
+ out << ">>";
+ break;
+ case ast::BinaryOp::kAdd:
+ out << "+";
+ break;
+ case ast::BinaryOp::kSubtract:
+ out << "-";
+ break;
+ case ast::BinaryOp::kMultiply:
+ out << "*";
+ break;
+ case ast::BinaryOp::kDivide:
+ out << "/";
+ break;
+ case ast::BinaryOp::kModulo:
+ out << "%";
+ break;
+ case ast::BinaryOp::kNone:
+ diagnostics_.add_error(diag::System::Writer, "missing binary operation type");
+ return false;
+ }
+ return true;
}
-bool GeneratorImpl::EmitUnaryOp(std::ostream& out,
- const ast::UnaryOpExpression* expr) {
- switch (expr->op) {
- case ast::UnaryOp::kAddressOf:
- out << "&";
- break;
- case ast::UnaryOp::kComplement:
- out << "~";
- break;
- case ast::UnaryOp::kIndirection:
- out << "*";
- break;
- case ast::UnaryOp::kNot:
- out << "!";
- break;
- case ast::UnaryOp::kNegation:
- out << "-";
- break;
- }
- out << "(";
-
- if (!EmitExpression(out, expr->expr)) {
- return false;
- }
+bool GeneratorImpl::EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr) {
+ switch (expr->op) {
+ case ast::UnaryOp::kAddressOf:
+ out << "&";
+ break;
+ case ast::UnaryOp::kComplement:
+ out << "~";
+ break;
+ case ast::UnaryOp::kIndirection:
+ out << "*";
+ break;
+ case ast::UnaryOp::kNot:
+ out << "!";
+ break;
+ case ast::UnaryOp::kNegation:
+ out << "-";
+ break;
+ }
+ out << "(";
- out << ")";
+ if (!EmitExpression(out, expr->expr)) {
+ return false;
+ }
+
+ out << ")";
- return true;
+ return true;
}
bool GeneratorImpl::EmitBlock(const ast::BlockStatement* stmt) {
- line() << "{";
- if (!EmitStatementsWithIndent(stmt->statements)) {
- return false;
- }
- line() << "}";
+ line() << "{";
+ if (!EmitStatementsWithIndent(stmt->statements)) {
+ return false;
+ }
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatement(const ast::Statement* stmt) {
- return Switch(
- stmt, //
- [&](const ast::AssignmentStatement* a) { return EmitAssign(a); },
- [&](const ast::BlockStatement* b) { return EmitBlock(b); },
- [&](const ast::BreakStatement* b) { return EmitBreak(b); },
- [&](const ast::CallStatement* c) {
- auto out = line();
- if (!EmitCall(out, c->expr)) {
- return false;
- }
- out << ";";
- return true;
- },
- [&](const ast::CompoundAssignmentStatement* c) {
- return EmitCompoundAssign(c);
- },
- [&](const ast::ContinueStatement* c) { return EmitContinue(c); },
- [&](const ast::DiscardStatement* d) { return EmitDiscard(d); },
- [&](const ast::FallthroughStatement* f) { return EmitFallthrough(f); },
- [&](const ast::IfStatement* i) { return EmitIf(i); },
- [&](const ast::IncrementDecrementStatement* l) {
- return EmitIncrementDecrement(l);
- },
- [&](const ast::LoopStatement* l) { return EmitLoop(l); },
- [&](const ast::ForLoopStatement* l) { return EmitForLoop(l); },
- [&](const ast::ReturnStatement* r) { return EmitReturn(r); },
- [&](const ast::SwitchStatement* s) { return EmitSwitch(s); },
- [&](const ast::VariableDeclStatement* v) {
- return EmitVariable(line(), v->variable);
- },
- [&](Default) {
- diagnostics_.add_error(
- diag::System::Writer,
- "unknown statement type: " + std::string(stmt->TypeInfo().name));
- return false;
- });
+ return Switch(
+ stmt, //
+ [&](const ast::AssignmentStatement* a) { return EmitAssign(a); },
+ [&](const ast::BlockStatement* b) { return EmitBlock(b); },
+ [&](const ast::BreakStatement* b) { return EmitBreak(b); },
+ [&](const ast::CallStatement* c) {
+ auto out = line();
+ if (!EmitCall(out, c->expr)) {
+ return false;
+ }
+ out << ";";
+ return true;
+ },
+ [&](const ast::CompoundAssignmentStatement* c) { return EmitCompoundAssign(c); },
+ [&](const ast::ContinueStatement* c) { return EmitContinue(c); },
+ [&](const ast::DiscardStatement* d) { return EmitDiscard(d); },
+ [&](const ast::FallthroughStatement* f) { return EmitFallthrough(f); },
+ [&](const ast::IfStatement* i) { return EmitIf(i); },
+ [&](const ast::IncrementDecrementStatement* l) { return EmitIncrementDecrement(l); },
+ [&](const ast::LoopStatement* l) { return EmitLoop(l); },
+ [&](const ast::ForLoopStatement* l) { return EmitForLoop(l); },
+ [&](const ast::ReturnStatement* r) { return EmitReturn(r); },
+ [&](const ast::SwitchStatement* s) { return EmitSwitch(s); },
+ [&](const ast::VariableDeclStatement* v) { return EmitVariable(line(), v->variable); },
+ [&](Default) {
+ diagnostics_.add_error(diag::System::Writer,
+ "unknown statement type: " + std::string(stmt->TypeInfo().name));
+ return false;
+ });
}
bool GeneratorImpl::EmitStatements(const ast::StatementList& stmts) {
- for (auto* s : stmts) {
- if (!EmitStatement(s)) {
- return false;
+ for (auto* s : stmts) {
+ if (!EmitStatement(s)) {
+ return false;
+ }
}
- }
- return true;
+ return true;
}
bool GeneratorImpl::EmitStatementsWithIndent(const ast::StatementList& stmts) {
- ScopedIndent si(this);
- return EmitStatements(stmts);
+ ScopedIndent si(this);
+ return EmitStatements(stmts);
}
bool GeneratorImpl::EmitAssign(const ast::AssignmentStatement* stmt) {
- auto out = line();
+ auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
+ }
- out << " = ";
+ out << " = ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitBreak(const ast::BreakStatement*) {
- line() << "break;";
- return true;
+ line() << "break;";
+ return true;
}
bool GeneratorImpl::EmitCase(const ast::CaseStatement* stmt) {
- if (stmt->IsDefault()) {
- line() << "default: {";
- } else {
- auto out = line();
- out << "case ";
+ if (stmt->IsDefault()) {
+ line() << "default: {";
+ } else {
+ auto out = line();
+ out << "case ";
- bool first = true;
- for (auto* selector : stmt->selectors) {
- if (!first) {
- out << ", ";
- }
+ bool first = true;
+ for (auto* selector : stmt->selectors) {
+ if (!first) {
+ out << ", ";
+ }
- first = false;
- if (!EmitLiteral(out, selector)) {
- return false;
- }
+ first = false;
+ if (!EmitLiteral(out, selector)) {
+ return false;
+ }
+ }
+ out << ": {";
}
- out << ": {";
- }
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
+ }
- line() << "}";
- return true;
+ line() << "}";
+ return true;
}
-bool GeneratorImpl::EmitCompoundAssign(
- const ast::CompoundAssignmentStatement* stmt) {
- auto out = line();
+bool GeneratorImpl::EmitCompoundAssign(const ast::CompoundAssignmentStatement* stmt) {
+ auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
+ }
- out << " ";
- if (!EmitBinaryOp(out, stmt->op)) {
- return false;
- }
- out << "= ";
+ out << " ";
+ if (!EmitBinaryOp(out, stmt->op)) {
+ return false;
+ }
+ out << "= ";
- if (!EmitExpression(out, stmt->rhs)) {
- return false;
- }
+ if (!EmitExpression(out, stmt->rhs)) {
+ return false;
+ }
- out << ";";
+ out << ";";
- return true;
+ return true;
}
bool GeneratorImpl::EmitContinue(const ast::ContinueStatement*) {
- line() << "continue;";
- return true;
+ line() << "continue;";
+ return true;
}
bool GeneratorImpl::EmitFallthrough(const ast::FallthroughStatement*) {
- line() << "fallthrough;";
- return true;
+ line() << "fallthrough;";
+ return true;
}
bool GeneratorImpl::EmitIf(const ast::IfStatement* stmt) {
- {
- auto out = line();
- out << "if (";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ {
+ auto out = line();
+ out << "if (";
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ") {";
}
- out << ") {";
- }
-
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
- for (auto* e : stmt->else_statements) {
- if (e->condition) {
- auto out = line();
- out << "} else if (";
- if (!EmitExpression(out, e->condition)) {
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
return false;
- }
- out << ") {";
- } else {
- line() << "} else {";
}
- if (!EmitStatementsWithIndent(e->body->statements)) {
- return false;
+ const ast::Statement* e = stmt->else_statement;
+ while (e) {
+ if (auto* elseif = e->As<ast::IfStatement>()) {
+ {
+ auto out = line();
+ out << "} else if (";
+ if (!EmitExpression(out, elseif->condition)) {
+ return false;
+ }
+ out << ") {";
+ }
+ if (!EmitStatementsWithIndent(elseif->body->statements)) {
+ return false;
+ }
+ e = elseif->else_statement;
+ } else {
+ line() << "} else {";
+ if (!EmitStatementsWithIndent(e->As<ast::BlockStatement>()->statements)) {
+ return false;
+ }
+ break;
+ }
}
- }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
-bool GeneratorImpl::EmitIncrementDecrement(
- const ast::IncrementDecrementStatement* stmt) {
- auto out = line();
- if (!EmitExpression(out, stmt->lhs)) {
- return false;
- }
- out << (stmt->increment ? "++" : "--") << ";";
- return true;
+bool GeneratorImpl::EmitIncrementDecrement(const ast::IncrementDecrementStatement* stmt) {
+ auto out = line();
+ if (!EmitExpression(out, stmt->lhs)) {
+ return false;
+ }
+ out << (stmt->increment ? "++" : "--") << ";";
+ return true;
}
bool GeneratorImpl::EmitDiscard(const ast::DiscardStatement*) {
- line() << "discard;";
- return true;
+ line() << "discard;";
+ return true;
}
bool GeneratorImpl::EmitLoop(const ast::LoopStatement* stmt) {
- line() << "loop {";
- increment_indent();
+ line() << "loop {";
+ increment_indent();
- if (!EmitStatements(stmt->body->statements)) {
- return false;
- }
+ if (!EmitStatements(stmt->body->statements)) {
+ return false;
+ }
- if (stmt->continuing && !stmt->continuing->Empty()) {
- line();
- line() << "continuing {";
- if (!EmitStatementsWithIndent(stmt->continuing->statements)) {
- return false;
+ if (stmt->continuing && !stmt->continuing->Empty()) {
+ line();
+ line() << "continuing {";
+ if (!EmitStatementsWithIndent(stmt->continuing->statements)) {
+ return false;
+ }
+ line() << "}";
}
- line() << "}";
- }
- decrement_indent();
- line() << "}";
+ decrement_indent();
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitForLoop(const ast::ForLoopStatement* stmt) {
- TextBuffer init_buf;
- if (auto* init = stmt->initializer) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
- if (!EmitStatement(init)) {
- return false;
+ TextBuffer init_buf;
+ if (auto* init = stmt->initializer) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &init_buf);
+ if (!EmitStatement(init)) {
+ return false;
+ }
}
- }
- TextBuffer cont_buf;
- if (auto* cont = stmt->continuing) {
- TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
- if (!EmitStatement(cont)) {
- return false;
+ TextBuffer cont_buf;
+ if (auto* cont = stmt->continuing) {
+ TINT_SCOPED_ASSIGNMENT(current_buffer_, &cont_buf);
+ if (!EmitStatement(cont)) {
+ return false;
+ }
}
- }
- {
- auto out = line();
- out << "for";
{
- ScopedParen sp(out);
- switch (init_buf.lines.size()) {
- case 0: // No initializer
- break;
- case 1: // Single line initializer statement
- out << TrimSuffix(init_buf.lines[0].content, ";");
- break;
- default: // Block initializer statement
- for (size_t i = 1; i < init_buf.lines.size(); i++) {
- // Indent all by the first line
- init_buf.lines[i].indent += current_buffer_->current_indent;
- }
- out << TrimSuffix(init_buf.String(), "\n");
- break;
- }
-
- out << "; ";
-
- if (auto* cond = stmt->condition) {
- if (!EmitExpression(out, cond)) {
- return false;
+ auto out = line();
+ out << "for";
+ {
+ ScopedParen sp(out);
+ switch (init_buf.lines.size()) {
+ case 0: // No initializer
+ break;
+ case 1: // Single line initializer statement
+ out << TrimSuffix(init_buf.lines[0].content, ";");
+ break;
+ default: // Block initializer statement
+ for (size_t i = 1; i < init_buf.lines.size(); i++) {
+ // Indent all by the first line
+ init_buf.lines[i].indent += current_buffer_->current_indent;
+ }
+ out << TrimSuffix(init_buf.String(), "\n");
+ break;
+ }
+
+ out << "; ";
+
+ if (auto* cond = stmt->condition) {
+ if (!EmitExpression(out, cond)) {
+ return false;
+ }
+ }
+
+ out << "; ";
+
+ switch (cont_buf.lines.size()) {
+ case 0: // No continuing
+ break;
+ case 1: // Single line continuing statement
+ out << TrimSuffix(cont_buf.lines[0].content, ";");
+ break;
+ default: // Block continuing statement
+ for (size_t i = 1; i < cont_buf.lines.size(); i++) {
+ // Indent all by the first line
+ cont_buf.lines[i].indent += current_buffer_->current_indent;
+ }
+ out << TrimSuffix(cont_buf.String(), "\n");
+ break;
+ }
}
- }
-
- out << "; ";
-
- switch (cont_buf.lines.size()) {
- case 0: // No continuing
- break;
- case 1: // Single line continuing statement
- out << TrimSuffix(cont_buf.lines[0].content, ";");
- break;
- default: // Block continuing statement
- for (size_t i = 1; i < cont_buf.lines.size(); i++) {
- // Indent all by the first line
- cont_buf.lines[i].indent += current_buffer_->current_indent;
- }
- out << TrimSuffix(cont_buf.String(), "\n");
- break;
- }
+ out << " {";
}
- out << " {";
- }
- if (!EmitStatementsWithIndent(stmt->body->statements)) {
- return false;
- }
+ if (!EmitStatementsWithIndent(stmt->body->statements)) {
+ return false;
+ }
- line() << "}";
+ line() << "}";
- return true;
+ return true;
}
bool GeneratorImpl::EmitReturn(const ast::ReturnStatement* stmt) {
- auto out = line();
- out << "return";
- if (stmt->value) {
- out << " ";
- if (!EmitExpression(out, stmt->value)) {
- return false;
+ auto out = line();
+ out << "return";
+ if (stmt->value) {
+ out << " ";
+ if (!EmitExpression(out, stmt->value)) {
+ return false;
+ }
}
- }
- out << ";";
- return true;
+ out << ";";
+ return true;
}
bool GeneratorImpl::EmitSwitch(const ast::SwitchStatement* stmt) {
- {
- auto out = line();
- out << "switch(";
- if (!EmitExpression(out, stmt->condition)) {
- return false;
+ {
+ auto out = line();
+ out << "switch(";
+ if (!EmitExpression(out, stmt->condition)) {
+ return false;
+ }
+ out << ") {";
}
- out << ") {";
- }
- {
- ScopedIndent si(this);
- for (auto* s : stmt->body) {
- if (!EmitCase(s)) {
- return false;
- }
+ {
+ ScopedIndent si(this);
+ for (auto* s : stmt->body) {
+ if (!EmitCase(s)) {
+ return false;
+ }
+ }
}
- }
- line() << "}";
- return true;
+ line() << "}";
+ return true;
}
} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.h b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.h
index bee8d65e4b4..a17e2da70fb 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.h
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl.h
@@ -34,7 +34,7 @@
#include "src/tint/ast/switch_statement.h"
#include "src/tint/ast/unary_op_expression.h"
#include "src/tint/program.h"
-#include "src/tint/sem/storage_texture_type.h"
+#include "src/tint/sem/storage_texture.h"
#include "src/tint/sem/struct.h"
#include "src/tint/writer/text_generator.h"
@@ -42,173 +42,175 @@ namespace tint::writer::wgsl {
/// Implementation class for WGSL generator
class GeneratorImpl : public TextGenerator {
- public:
- /// Constructor
- /// @param program the program
- explicit GeneratorImpl(const Program* program);
- ~GeneratorImpl();
+ public:
+ /// Constructor
+ /// @param program the program
+ explicit GeneratorImpl(const Program* program);
+ ~GeneratorImpl();
- /// Generates the result data
- /// @returns true on successful generation; false otherwise
- bool Generate();
+ /// Generates the result data
+ /// @returns true on successful generation; false otherwise
+ bool Generate();
- /// Handles generating a declared type
- /// @param ty the declared type to generate
- /// @returns true if the declared type was emitted
- bool EmitTypeDecl(const ast::TypeDecl* ty);
- /// Handles an index accessor expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the index accessor was emitted
- bool EmitIndexAccessor(std::ostream& out,
- const ast::IndexAccessorExpression* expr);
- /// Handles an assignment statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitAssign(const ast::AssignmentStatement* stmt);
- /// Handles generating a binary expression
- /// @param out the output of the expression stream
- /// @param expr the binary expression
- /// @returns true if the expression was emitted, false otherwise
- bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
- /// Handles generating a binary operator
- /// @param out the output of the expression stream
- /// @param op the binary operator
- /// @returns true if the operator was emitted, false otherwise
- bool EmitBinaryOp(std::ostream& out, const ast::BinaryOp op);
- /// Handles generating a bitcast expression
- /// @param out the output of the expression stream
- /// @param expr the bitcast expression
- /// @returns true if the bitcast was emitted
- bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
- /// Handles a block statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBlock(const ast::BlockStatement* stmt);
- /// Handles a break statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitBreak(const ast::BreakStatement* stmt);
- /// Handles generating a call expression
- /// @param out the output of the expression stream
- /// @param expr the call expression
- /// @returns true if the call expression is emitted
- bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
- /// Handles a case statement
- /// @param stmt the statement
- /// @returns true if the statment was emitted successfully
- bool EmitCase(const ast::CaseStatement* stmt);
- /// Handles a compound assignment statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitCompoundAssign(const ast::CompoundAssignmentStatement* stmt);
- /// Handles generating a literal expression
- /// @param out the output of the expression stream
- /// @param expr the literal expression expression
- /// @returns true if the literal expression is emitted
- bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* expr);
- /// Handles a continue statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted successfully
- bool EmitContinue(const ast::ContinueStatement* stmt);
- /// Handles generate an Expression
- /// @param out the output of the expression stream
- /// @param expr the expression
- /// @returns true if the expression was emitted
- bool EmitExpression(std::ostream& out, const ast::Expression* expr);
- /// Handles generating a fallthrough statement
- /// @param stmt the fallthrough statement
- /// @returns true if the statement was successfully emitted
- bool EmitFallthrough(const ast::FallthroughStatement* stmt);
- /// Handles generating a function
- /// @param func the function to generate
- /// @returns true if the function was emitted
- bool EmitFunction(const ast::Function* func);
- /// Handles generating an identifier expression
- /// @param out the output of the expression stream
- /// @param expr the identifier expression
- /// @returns true if the identifier was emitted
- bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
- /// Handles an if statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitIf(const ast::IfStatement* stmt);
- /// Handles an increment/decrement statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitIncrementDecrement(const ast::IncrementDecrementStatement* stmt);
- /// Handles generating a discard statement
- /// @param stmt the discard statement
- /// @returns true if the statement was successfully emitted
- bool EmitDiscard(const ast::DiscardStatement* stmt);
- /// Handles a loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emtited
- bool EmitLoop(const ast::LoopStatement* stmt);
- /// Handles a for-loop statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emtited
- bool EmitForLoop(const ast::ForLoopStatement* stmt);
- /// Handles a member accessor expression
- /// @param out the output of the expression stream
- /// @param expr the member accessor expression
- /// @returns true if the member accessor was emitted
- bool EmitMemberAccessor(std::ostream& out,
- const ast::MemberAccessorExpression* expr);
- /// Handles return statements
- /// @param stmt the statement to emit
- /// @returns true if the statement was successfully emitted
- bool EmitReturn(const ast::ReturnStatement* stmt);
- /// Handles statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitStatement(const ast::Statement* stmt);
- /// Handles a statement list
- /// @param stmts the statements to emit
- /// @returns true if the statements were emitted
- bool EmitStatements(const ast::StatementList& stmts);
- /// Handles a statement list with an increased indentation
- /// @param stmts the statements to emit
- /// @returns true if the statements were emitted
- bool EmitStatementsWithIndent(const ast::StatementList& stmts);
- /// Handles generating a switch statement
- /// @param stmt the statement to emit
- /// @returns true if the statement was emitted
- bool EmitSwitch(const ast::SwitchStatement* stmt);
- /// Handles generating type
- /// @param out the output of the expression stream
- /// @param type the type to generate
- /// @returns true if the type is emitted
- bool EmitType(std::ostream& out, const ast::Type* type);
- /// Handles generating a struct declaration
- /// @param str the struct
- /// @returns true if the struct is emitted
- bool EmitStructType(const ast::Struct* str);
- /// Handles emitting an image format
- /// @param out the output of the expression stream
- /// @param fmt the format to generate
- /// @returns true if the format is emitted
- bool EmitImageFormat(std::ostream& out, const ast::TexelFormat fmt);
- /// Handles emitting an access control
- /// @param out the output of the expression stream
- /// @param access the access to generate
- /// @returns true if the access is emitted
- bool EmitAccess(std::ostream& out, const ast::Access access);
- /// Handles a unary op expression
- /// @param out the output of the expression stream
- /// @param expr the expression to emit
- /// @returns true if the expression was emitted
- bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
- /// Handles generating a variable
- /// @param out the output of the expression stream
- /// @param var the variable to generate
- /// @returns true if the variable was emitted
- bool EmitVariable(std::ostream& out, const ast::Variable* var);
- /// Handles generating a attribute list
- /// @param out the output of the expression stream
- /// @param attrs the attribute list
- /// @returns true if the attributes were emitted
- bool EmitAttributes(std::ostream& out, const ast::AttributeList& attrs);
+ /// Handles generating a enable directive
+ /// @param enable the enable node
+ /// @returns true if the enable directive was emitted
+ bool EmitEnable(const ast::Enable* enable);
+ /// Handles generating a declared type
+ /// @param ty the declared type to generate
+ /// @returns true if the declared type was emitted
+ bool EmitTypeDecl(const ast::TypeDecl* ty);
+ /// Handles an index accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the index accessor was emitted
+ bool EmitIndexAccessor(std::ostream& out, const ast::IndexAccessorExpression* expr);
+ /// Handles an assignment statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitAssign(const ast::AssignmentStatement* stmt);
+ /// Handles generating a binary expression
+ /// @param out the output of the expression stream
+ /// @param expr the binary expression
+ /// @returns true if the expression was emitted, false otherwise
+ bool EmitBinary(std::ostream& out, const ast::BinaryExpression* expr);
+ /// Handles generating a binary operator
+ /// @param out the output of the expression stream
+ /// @param op the binary operator
+ /// @returns true if the operator was emitted, false otherwise
+ bool EmitBinaryOp(std::ostream& out, const ast::BinaryOp op);
+ /// Handles generating a bitcast expression
+ /// @param out the output of the expression stream
+ /// @param expr the bitcast expression
+ /// @returns true if the bitcast was emitted
+ bool EmitBitcast(std::ostream& out, const ast::BitcastExpression* expr);
+ /// Handles a block statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBlock(const ast::BlockStatement* stmt);
+ /// Handles a break statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitBreak(const ast::BreakStatement* stmt);
+ /// Handles generating a call expression
+ /// @param out the output of the expression stream
+ /// @param expr the call expression
+ /// @returns true if the call expression is emitted
+ bool EmitCall(std::ostream& out, const ast::CallExpression* expr);
+ /// Handles a case statement
+ /// @param stmt the statement
+ /// @returns true if the statment was emitted successfully
+ bool EmitCase(const ast::CaseStatement* stmt);
+ /// Handles a compound assignment statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitCompoundAssign(const ast::CompoundAssignmentStatement* stmt);
+ /// Handles generating a literal expression
+ /// @param out the output of the expression stream
+ /// @param expr the literal expression expression
+ /// @returns true if the literal expression is emitted
+ bool EmitLiteral(std::ostream& out, const ast::LiteralExpression* expr);
+ /// Handles a continue statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted successfully
+ bool EmitContinue(const ast::ContinueStatement* stmt);
+ /// Handles generate an Expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression
+ /// @returns true if the expression was emitted
+ bool EmitExpression(std::ostream& out, const ast::Expression* expr);
+ /// Handles generating a fallthrough statement
+ /// @param stmt the fallthrough statement
+ /// @returns true if the statement was successfully emitted
+ bool EmitFallthrough(const ast::FallthroughStatement* stmt);
+ /// Handles generating a function
+ /// @param func the function to generate
+ /// @returns true if the function was emitted
+ bool EmitFunction(const ast::Function* func);
+ /// Handles generating an identifier expression
+ /// @param out the output of the expression stream
+ /// @param expr the identifier expression
+ /// @returns true if the identifier was emitted
+ bool EmitIdentifier(std::ostream& out, const ast::IdentifierExpression* expr);
+ /// Handles an if statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitIf(const ast::IfStatement* stmt);
+ /// Handles an increment/decrement statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitIncrementDecrement(const ast::IncrementDecrementStatement* stmt);
+ /// Handles generating a discard statement
+ /// @param stmt the discard statement
+ /// @returns true if the statement was successfully emitted
+ bool EmitDiscard(const ast::DiscardStatement* stmt);
+ /// Handles a loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emtited
+ bool EmitLoop(const ast::LoopStatement* stmt);
+ /// Handles a for-loop statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emtited
+ bool EmitForLoop(const ast::ForLoopStatement* stmt);
+ /// Handles a member accessor expression
+ /// @param out the output of the expression stream
+ /// @param expr the member accessor expression
+ /// @returns true if the member accessor was emitted
+ bool EmitMemberAccessor(std::ostream& out, const ast::MemberAccessorExpression* expr);
+ /// Handles return statements
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was successfully emitted
+ bool EmitReturn(const ast::ReturnStatement* stmt);
+ /// Handles statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitStatement(const ast::Statement* stmt);
+ /// Handles a statement list
+ /// @param stmts the statements to emit
+ /// @returns true if the statements were emitted
+ bool EmitStatements(const ast::StatementList& stmts);
+ /// Handles a statement list with an increased indentation
+ /// @param stmts the statements to emit
+ /// @returns true if the statements were emitted
+ bool EmitStatementsWithIndent(const ast::StatementList& stmts);
+ /// Handles generating a switch statement
+ /// @param stmt the statement to emit
+ /// @returns true if the statement was emitted
+ bool EmitSwitch(const ast::SwitchStatement* stmt);
+ /// Handles generating type
+ /// @param out the output of the expression stream
+ /// @param type the type to generate
+ /// @returns true if the type is emitted
+ bool EmitType(std::ostream& out, const ast::Type* type);
+ /// Handles generating a struct declaration
+ /// @param str the struct
+ /// @returns true if the struct is emitted
+ bool EmitStructType(const ast::Struct* str);
+ /// Handles emitting an image format
+ /// @param out the output of the expression stream
+ /// @param fmt the format to generate
+ /// @returns true if the format is emitted
+ bool EmitImageFormat(std::ostream& out, const ast::TexelFormat fmt);
+ /// Handles emitting an access control
+ /// @param out the output of the expression stream
+ /// @param access the access to generate
+ /// @returns true if the access is emitted
+ bool EmitAccess(std::ostream& out, const ast::Access access);
+ /// Handles a unary op expression
+ /// @param out the output of the expression stream
+ /// @param expr the expression to emit
+ /// @returns true if the expression was emitted
+ bool EmitUnaryOp(std::ostream& out, const ast::UnaryOpExpression* expr);
+ /// Handles generating a variable
+ /// @param out the output of the expression stream
+ /// @param var the variable to generate
+ /// @returns true if the variable was emitted
+ bool EmitVariable(std::ostream& out, const ast::Variable* var);
+ /// Handles generating a attribute list
+ /// @param out the output of the expression stream
+ /// @param attrs the attribute list
+ /// @returns true if the attributes were emitted
+ bool EmitAttributes(std::ostream& out, const ast::AttributeList& attrs);
};
} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_alias_type_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_alias_type_test.cc
index bc6c365efb9..92a6868f794 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_alias_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_alias_type_test.cc
@@ -20,28 +20,28 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitAlias_F32) {
- auto* alias = Alias("a", ty.f32());
+ auto* alias = Alias("a", ty.f32());
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
- EXPECT_EQ(gen.result(), R"(type a = f32;
+ ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(type a = f32;
)");
}
TEST_F(WgslGeneratorImplTest, EmitTypeDecl_Struct) {
- auto* s = Structure("A", {
- Member("a", ty.f32()),
- Member("b", ty.i32()),
- });
+ auto* s = Structure("A", {
+ Member("a", ty.f32()),
+ Member("b", ty.i32()),
+ });
- auto* alias = Alias("B", ty.Of(s));
+ auto* alias = Alias("B", ty.Of(s));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitTypeDecl(s)) << gen.error();
- ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct A {
+ ASSERT_TRUE(gen.EmitTypeDecl(s)) << gen.error();
+ ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct A {
a : f32,
b : i32,
}
@@ -50,17 +50,17 @@ type B = A;
}
TEST_F(WgslGeneratorImplTest, EmitAlias_ToStruct) {
- auto* s = Structure("A", {
- Member("a", ty.f32()),
- Member("b", ty.i32()),
- });
+ auto* s = Structure("A", {
+ Member("a", ty.f32()),
+ Member("b", ty.i32()),
+ });
- auto* alias = Alias("B", ty.Of(s));
+ auto* alias = Alias("B", ty.Of(s));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
- EXPECT_EQ(gen.result(), R"(type B = A;
+ ASSERT_TRUE(gen.EmitTypeDecl(alias)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(type B = A;
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_array_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_array_accessor_test.cc
index f0fc79fae4f..2b4e8b43346 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_array_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_array_accessor_test.cc
@@ -14,35 +14,37 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, IndexAccessor) {
- Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
- auto* expr = IndexAccessor("ary", 5);
- WrapInFunction(expr);
+ Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
+ auto* expr = IndexAccessor("ary", 5_i);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "ary[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "ary[5i]");
}
TEST_F(WgslGeneratorImplTest, IndexAccessor_OfDref) {
- Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
+ Global("ary", ty.array<i32, 10>(), ast::StorageClass::kPrivate);
- auto* p = Const("p", nullptr, AddressOf("ary"));
- auto* expr = IndexAccessor(Deref("p"), 5);
- WrapInFunction(p, expr);
+ auto* p = Let("p", nullptr, AddressOf("ary"));
+ auto* expr = IndexAccessor(Deref("p"), 5_i);
+ WrapInFunction(p, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(*(p))[5]");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(*(p))[5i]");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_assign_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_assign_test.cc
index 39680ef3183..8d5e75efa1d 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_assign_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_assign_test.cc
@@ -20,17 +20,17 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Assign) {
- auto* lhs = Global("lhs", ty.i32(), ast::StorageClass::kPrivate);
- auto* rhs = Global("rhs", ty.i32(), ast::StorageClass::kPrivate);
- auto* assign = Assign(lhs, rhs);
- WrapInFunction(assign);
+ auto* lhs = Global("lhs", ty.i32(), ast::StorageClass::kPrivate);
+ auto* rhs = Global("rhs", ty.i32(), ast::StorageClass::kPrivate);
+ auto* assign = Assign(lhs, rhs);
+ WrapInFunction(assign);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
- EXPECT_EQ(gen.result(), " lhs = rhs;\n");
+ ASSERT_TRUE(gen.EmitStatement(assign)) << gen.error();
+ EXPECT_EQ(gen.result(), " lhs = rhs;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_binary_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_binary_test.cc
index 7999d99404b..acc41807551 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_binary_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_binary_test.cc
@@ -18,62 +18,60 @@ namespace tint::writer::wgsl {
namespace {
struct BinaryData {
- const char* result;
- ast::BinaryOp op;
+ const char* result;
+ ast::BinaryOp op;
};
inline std::ostream& operator<<(std::ostream& out, BinaryData data) {
- out << data.op;
- return out;
+ out << data.op;
+ return out;
}
using WgslBinaryTest = TestParamHelper<BinaryData>;
TEST_P(WgslBinaryTest, Emit) {
- auto params = GetParam();
+ auto params = GetParam();
- auto op_ty = [&]() -> const ast::Type* {
- if (params.op == ast::BinaryOp::kLogicalAnd ||
- params.op == ast::BinaryOp::kLogicalOr) {
- return ty.bool_();
- } else {
- return ty.u32();
- }
- };
+ auto op_ty = [&]() -> const ast::Type* {
+ if (params.op == ast::BinaryOp::kLogicalAnd || params.op == ast::BinaryOp::kLogicalOr) {
+ return ty.bool_();
+ } else {
+ return ty.u32();
+ }
+ };
- Global("left", op_ty(), ast::StorageClass::kPrivate);
- Global("right", op_ty(), ast::StorageClass::kPrivate);
- auto* left = Expr("left");
- auto* right = Expr("right");
+ Global("left", op_ty(), ast::StorageClass::kPrivate);
+ Global("right", op_ty(), ast::StorageClass::kPrivate);
+ auto* left = Expr("left");
+ auto* right = Expr("right");
- auto* expr = create<ast::BinaryExpression>(params.op, left, right);
- WrapInFunction(expr);
+ auto* expr = create<ast::BinaryExpression>(params.op, left, right);
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), params.result);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), params.result);
}
INSTANTIATE_TEST_SUITE_P(
WgslGeneratorImplTest,
WgslBinaryTest,
- testing::Values(
- BinaryData{"(left & right)", ast::BinaryOp::kAnd},
- BinaryData{"(left | right)", ast::BinaryOp::kOr},
- BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
- BinaryData{"(left && right)", ast::BinaryOp::kLogicalAnd},
- BinaryData{"(left || right)", ast::BinaryOp::kLogicalOr},
- BinaryData{"(left == right)", ast::BinaryOp::kEqual},
- BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
- BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
- BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
- BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
- BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
- BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
- BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
- BinaryData{"(left + right)", ast::BinaryOp::kAdd},
- BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
- BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
- BinaryData{"(left / right)", ast::BinaryOp::kDivide},
- BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
+ testing::Values(BinaryData{"(left & right)", ast::BinaryOp::kAnd},
+ BinaryData{"(left | right)", ast::BinaryOp::kOr},
+ BinaryData{"(left ^ right)", ast::BinaryOp::kXor},
+ BinaryData{"(left && right)", ast::BinaryOp::kLogicalAnd},
+ BinaryData{"(left || right)", ast::BinaryOp::kLogicalOr},
+ BinaryData{"(left == right)", ast::BinaryOp::kEqual},
+ BinaryData{"(left != right)", ast::BinaryOp::kNotEqual},
+ BinaryData{"(left < right)", ast::BinaryOp::kLessThan},
+ BinaryData{"(left > right)", ast::BinaryOp::kGreaterThan},
+ BinaryData{"(left <= right)", ast::BinaryOp::kLessThanEqual},
+ BinaryData{"(left >= right)", ast::BinaryOp::kGreaterThanEqual},
+ BinaryData{"(left << right)", ast::BinaryOp::kShiftLeft},
+ BinaryData{"(left >> right)", ast::BinaryOp::kShiftRight},
+ BinaryData{"(left + right)", ast::BinaryOp::kAdd},
+ BinaryData{"(left - right)", ast::BinaryOp::kSubtract},
+ BinaryData{"(left * right)", ast::BinaryOp::kMultiply},
+ BinaryData{"(left / right)", ast::BinaryOp::kDivide},
+ BinaryData{"(left % right)", ast::BinaryOp::kModulo}));
} // namespace
} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_bitcast_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_bitcast_test.cc
index 08d3cb3135b..c71a8d1e7e7 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_bitcast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_bitcast_test.cc
@@ -14,20 +14,22 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitExpression_Bitcast) {
- auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1));
- WrapInFunction(bitcast);
+ auto* bitcast = create<ast::BitcastExpression>(ty.f32(), Expr(1_i));
+ WrapInFunction(bitcast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
- EXPECT_EQ(out.str(), "bitcast<f32>(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, bitcast)) << gen.error();
+ EXPECT_EQ(out.str(), "bitcast<f32>(1i)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_block_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_block_test.cc
index f3ba848da7a..ae0120056fc 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_block_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_block_test.cc
@@ -20,15 +20,15 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Block) {
- auto* b = Block(create<ast::DiscardStatement>());
- WrapInFunction(b);
+ auto* b = Block(create<ast::DiscardStatement>());
+ WrapInFunction(b);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), R"( {
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( {
discard;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_break_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_break_test.cc
index a8952777749..8f7275a353c 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_break_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_break_test.cc
@@ -20,15 +20,15 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Break) {
- auto* b = create<ast::BreakStatement>();
- WrapInFunction(Loop(Block(b)));
+ auto* b = create<ast::BreakStatement>();
+ WrapInFunction(Loop(Block(b)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
- EXPECT_EQ(gen.result(), " break;\n");
+ ASSERT_TRUE(gen.EmitStatement(b)) << gen.error();
+ EXPECT_EQ(gen.result(), " break;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_call_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_call_test.cc
index 449e117fd78..6a7e0774369 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_call_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_call_test.cc
@@ -15,63 +15,65 @@
#include "src/tint/ast/call_statement.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitExpression_Call_WithoutParams) {
- Func("my_func", {}, ty.f32(), {Return(1.23f)});
+ Func("my_func", {}, ty.f32(), {Return(1.23_f)});
- auto* call = Call("my_func");
- WrapInFunction(call);
+ auto* call = Call("my_func");
+ WrapInFunction(call);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func()");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func()");
}
TEST_F(WgslGeneratorImplTest, EmitExpression_Call_WithParams) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.f32(), {Return(1.23f)});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- WrapInFunction(call);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
- EXPECT_EQ(out.str(), "my_func(param1, param2)");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.f32(), {Return(1.23_f)});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ WrapInFunction(call);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, call)) << gen.error();
+ EXPECT_EQ(out.str(), "my_func(param1, param2)");
}
TEST_F(WgslGeneratorImplTest, EmitStatement_Call) {
- Func("my_func",
- {
- Param(Sym(), ty.f32()),
- Param(Sym(), ty.f32()),
- },
- ty.void_(), ast::StatementList{}, ast::AttributeList{});
- Global("param1", ty.f32(), ast::StorageClass::kPrivate);
- Global("param2", ty.f32(), ast::StorageClass::kPrivate);
-
- auto* call = Call("my_func", "param1", "param2");
- auto* stmt = CallStmt(call);
- WrapInFunction(stmt);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
+ Func("my_func",
+ {
+ Param(Sym(), ty.f32()),
+ Param(Sym(), ty.f32()),
+ },
+ ty.void_(), ast::StatementList{}, ast::AttributeList{});
+ Global("param1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("param2", ty.f32(), ast::StorageClass::kPrivate);
+
+ auto* call = Call("my_func", "param1", "param2");
+ auto* stmt = CallStmt(call);
+ WrapInFunction(stmt);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " my_func(param1, param2);\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_case_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_case_test.cc
index 66155bad809..c12997e3ec6 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_case_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_case_test.cc
@@ -14,54 +14,54 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Case) {
- auto* s = Switch(1, Case(Expr(5), Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case(Expr(5_i), Block(create<ast::BreakStatement>())), DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5i: {
break;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_Case_MultipleSelectors) {
- auto* s =
- Switch(1, Case({Expr(5), Expr(6)}, Block(create<ast::BreakStatement>())),
- DefaultCase());
- WrapInFunction(s);
+ auto* s = Switch(1_i, Case({Expr(5_i), Expr(6_i)}, Block(create<ast::BreakStatement>())),
+ DefaultCase());
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( case 5, 6: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( case 5i, 6i: {
break;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_Case_Default) {
- auto* s = Switch(1, DefaultCase(Block(create<ast::BreakStatement>())));
- WrapInFunction(s);
+ auto* s = Switch(1_i, DefaultCase(Block(create<ast::BreakStatement>())));
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
- EXPECT_EQ(gen.result(), R"( default: {
+ ASSERT_TRUE(gen.EmitCase(s->body[0])) << gen.error();
+ EXPECT_EQ(gen.result(), R"( default: {
break;
}
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_cast_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_cast_test.cc
index 3e71c32ef68..c423943e40f 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_cast_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_cast_test.cc
@@ -14,31 +14,33 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitExpression_Cast_Scalar) {
- auto* cast = Construct<f32>(1);
- WrapInFunction(cast);
+ auto* cast = Construct<f32>(1_i);
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "f32(1)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "f32(1i)");
}
TEST_F(WgslGeneratorImplTest, EmitExpression_Cast_Vector) {
- auto* cast = vec3<f32>(vec3<i32>(1, 2, 3));
- WrapInFunction(cast);
+ auto* cast = vec3<f32>(vec3<i32>(1_i, 2_i, 3_i));
+ WrapInFunction(cast);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
- EXPECT_EQ(out.str(), "vec3<f32>(vec3<i32>(1, 2, 3))");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, cast)) << gen.error();
+ EXPECT_EQ(out.str(), "vec3<f32>(vec3<i32>(1i, 2i, 3i))");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_constructor_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_constructor_test.cc
index 7b99e436df8..ae9f2b74f6d 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_constructor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_constructor_test.cc
@@ -15,117 +15,117 @@
#include "gmock/gmock.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using ::testing::HasSubstr;
+
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
-using ::testing::HasSubstr;
-
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitConstructor_Bool) {
- WrapInFunction(Expr(false));
+ WrapInFunction(Expr(false));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("false"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("false"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Int) {
- WrapInFunction(Expr(-12345));
+ WrapInFunction(Expr(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("-12345"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("-12345"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_UInt) {
- WrapInFunction(Expr(56779u));
+ WrapInFunction(Expr(56779_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("56779u"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("56779u"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Float) {
- // Use a number close to 1<<30 but whose decimal representation ends in 0.
- WrapInFunction(Expr(static_cast<float>((1 << 30) - 4)));
+ // Use a number close to 1<<30 but whose decimal representation ends in 0.
+ WrapInFunction(Expr(f32((1 << 30) - 4)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("1073741824.0"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("1073741824.0f"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Float) {
- WrapInFunction(Construct<f32>(Expr(-1.2e-5f)));
+ WrapInFunction(Construct<f32>(Expr(-1.2e-5_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("f32(-0.000012)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("f32(-0.000012f)"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Bool) {
- WrapInFunction(Construct<bool>(true));
+ WrapInFunction(Construct<bool>(true));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("bool(true)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("bool(true)"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Int) {
- WrapInFunction(Construct<i32>(-12345));
+ WrapInFunction(Construct<i32>(-12345_i));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("i32(-12345)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("i32(-12345i)"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Uint) {
- WrapInFunction(Construct<u32>(12345u));
+ WrapInFunction(Construct<u32>(12345_u));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("u32(12345u)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("u32(12345u)"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Vec) {
- WrapInFunction(vec3<f32>(1.f, 2.f, 3.f));
+ WrapInFunction(vec3<f32>(1_f, 2_f, 3_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("vec3<f32>(1.0, 2.0, 3.0)"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("vec3<f32>(1.0f, 2.0f, 3.0f)"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Mat) {
- WrapInFunction(
- mat2x3<f32>(vec3<f32>(1.f, 2.f, 3.f), vec3<f32>(3.f, 4.f, 5.f)));
+ WrapInFunction(mat2x3<f32>(vec3<f32>(1_f, 2_f, 3_f), vec3<f32>(3_f, 4_f, 5_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(), HasSubstr("mat2x3<f32>(vec3<f32>(1.0, 2.0, 3.0), "
- "vec3<f32>(3.0, 4.0, 5.0))"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(), HasSubstr("mat2x3<f32>(vec3<f32>(1.0f, 2.0f, 3.0f), "
+ "vec3<f32>(3.0f, 4.0f, 5.0f))"));
}
TEST_F(WgslGeneratorImplTest, EmitConstructor_Type_Array) {
- WrapInFunction(
- Construct(ty.array(ty.vec3<f32>(), 3), vec3<f32>(1.0f, 2.0f, 3.0f),
- vec3<f32>(4.0f, 5.0f, 6.0f), vec3<f32>(7.0f, 8.0f, 9.0f)));
+ WrapInFunction(Construct(ty.array(ty.vec3<f32>(), 3_u), vec3<f32>(1_f, 2_f, 3_f),
+ vec3<f32>(4_f, 5_f, 6_f), vec3<f32>(7_f, 8_f, 9_f)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_THAT(gen.result(),
- HasSubstr("array<vec3<f32>, 3>(vec3<f32>(1.0, 2.0, 3.0), "
- "vec3<f32>(4.0, 5.0, 6.0), vec3<f32>(7.0, 8.0, 9.0))"));
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_THAT(gen.result(),
+ HasSubstr("array<vec3<f32>, 3u>(vec3<f32>(1.0f, 2.0f, 3.0f), "
+ "vec3<f32>(4.0f, 5.0f, 6.0f), vec3<f32>(7.0f, 8.0f, 9.0f))"));
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_continue_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_continue_test.cc
index 252a84256a5..46f2bbd2fc5 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_continue_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_continue_test.cc
@@ -20,17 +20,17 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Continue) {
- auto* c = Continue();
+ auto* c = Continue();
- WrapInFunction(Loop(Block(If(false, Block(Break())), //
- c)));
+ WrapInFunction(Loop(Block(If(false, Block(Break())), //
+ c)));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(c)) << gen.error();
- EXPECT_EQ(gen.result(), " continue;\n");
+ ASSERT_TRUE(gen.EmitStatement(c)) << gen.error();
+ EXPECT_EQ(gen.result(), " continue;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_discard_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_discard_test.cc
index 8e508d3d13f..db176e9aec2 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_discard_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_discard_test.cc
@@ -20,15 +20,15 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Discard) {
- auto* stmt = create<ast::DiscardStatement>();
- WrapInFunction(stmt);
+ auto* stmt = create<ast::DiscardStatement>();
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " discard;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " discard;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_enable_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_enable_test.cc
new file mode 100644
index 00000000000..503a9a0022c
--- /dev/null
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_enable_test.cc
@@ -0,0 +1,33 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/tint/writer/wgsl/test_helper.h"
+
+namespace tint::writer::wgsl {
+namespace {
+
+using WgslGeneratorImplTest = TestHelper;
+
+TEST_F(WgslGeneratorImplTest, Emit_Enable) {
+ auto* enable = Enable(ast::Extension::kF16);
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.EmitEnable(enable));
+ EXPECT_EQ(gen.result(), R"(enable f16;
+)");
+}
+
+} // namespace
+} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_fallthrough_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_fallthrough_test.cc
index 5925460ab97..2b120514b2a 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_fallthrough_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_fallthrough_test.cc
@@ -14,23 +14,25 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Fallthrough) {
- auto* f = create<ast::FallthroughStatement>();
- WrapInFunction(Switch(1, //
- Case(Expr(1), Block(f)), //
- DefaultCase()));
+ auto* f = create<ast::FallthroughStatement>();
+ WrapInFunction(Switch(1_i, //
+ Case(Expr(1_i), Block(f)), //
+ DefaultCase()));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), " fallthrough;\n");
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), " fallthrough;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_function_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_function_test.cc
index 5c053acfbc4..e34292bb157 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_function_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_function_test.cc
@@ -17,85 +17,83 @@
#include "src/tint/ast/workgroup_attribute.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Function) {
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( fn my_func() {
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( fn my_func() {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_Function_WithParams) {
- auto* func = Func(
- "my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())},
- ty.void_(),
- ast::StatementList{
- Return(),
- },
- ast::AttributeList{});
+ auto* func =
+ Func("my_func", ast::VariableList{Param("a", ty.f32()), Param("b", ty.i32())}, ty.void_(),
+ ast::StatementList{
+ Return(),
+ },
+ ast::AttributeList{});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( fn my_func(a : f32, b : i32) {
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( fn my_func(a : f32, b : i32) {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_Function_WithAttribute_WorkgroupSize) {
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{Return()},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(2, 4, 6),
- });
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{Return()},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(2_i, 4_i, 6_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( @stage(compute) @workgroup_size(2, 4, 6)
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( @compute @workgroup_size(2i, 4i, 6i)
fn my_func() {
return;
}
)");
}
-TEST_F(WgslGeneratorImplTest,
- Emit_Function_WithAttribute_WorkgroupSize_WithIdent) {
- GlobalConst("height", ty.i32(), Expr(2));
- auto* func = Func("my_func", ast::VariableList{}, ty.void_(),
- ast::StatementList{Return()},
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(2, "height"),
- });
+TEST_F(WgslGeneratorImplTest, Emit_Function_WithAttribute_WorkgroupSize_WithIdent) {
+ GlobalConst("height", ty.i32(), Expr(2_i));
+ auto* func = Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{Return()},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(2_i, "height"),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( @stage(compute) @workgroup_size(2, height)
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( @compute @workgroup_size(2i, height)
fn my_func() {
return;
}
@@ -103,122 +101,118 @@ TEST_F(WgslGeneratorImplTest,
}
TEST_F(WgslGeneratorImplTest, Emit_Function_EntryPoint_Parameters) {
- auto* vec4 = ty.vec4<f32>();
- auto* coord = Param("coord", vec4, {Builtin(ast::Builtin::kPosition)});
- auto* loc1 = Param("loc1", ty.f32(), {Location(1u)});
- auto* func = Func("frag_main", ast::VariableList{coord, loc1}, ty.void_(),
- ast::StatementList{},
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- });
+ auto* vec4 = ty.vec4<f32>();
+ auto* coord = Param("coord", vec4, {Builtin(ast::Builtin::kPosition)});
+ auto* loc1 = Param("loc1", ty.f32(), {Location(1u)});
+ auto* func = Func("frag_main", ast::VariableList{coord, loc1}, ty.void_(), ast::StatementList{},
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( @stage(fragment)
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( @fragment
fn frag_main(@builtin(position) coord : vec4<f32>, @location(1) loc1 : f32) {
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_Function_EntryPoint_ReturnValue) {
- auto* func = Func("frag_main", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return(1.f),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kFragment),
- },
- ast::AttributeList{
- Location(1u),
- });
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
-
- ASSERT_TRUE(gen.EmitFunction(func));
- EXPECT_EQ(gen.result(), R"( @stage(fragment)
+ auto* func = Func("frag_main", ast::VariableList{}, ty.f32(),
+ ast::StatementList{
+ Return(1_f),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kFragment),
+ },
+ ast::AttributeList{
+ Location(1u),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+
+ ASSERT_TRUE(gen.EmitFunction(func));
+ EXPECT_EQ(gen.result(), R"( @fragment
fn frag_main() -> @location(1) f32 {
- return 1.0;
+ return 1.0f;
}
)");
}
// https://crbug.com/tint/297
-TEST_F(WgslGeneratorImplTest,
- Emit_Function_Multiple_EntryPoint_With_Same_ModuleVar) {
- // struct Data {
- // d : f32;
- // };
- // @binding(0) @group(0) var<storage> data : Data;
- //
- // @stage(compute) @workgroup_size(1)
- // fn a() {
- // return;
- // }
- //
- // @stage(compute) @workgroup_size(1)
- // fn b() {
- // return;
- // }
-
- auto* s = Structure("Data", {Member("d", ty.f32())});
-
- Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("a", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- }
-
- {
- auto* var = Var("v", ty.f32(), ast::StorageClass::kNone,
- MemberAccessor("data", "d"));
-
- Func("b", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(var),
- Return(),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
- }
-
- GeneratorImpl& gen = Build();
-
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct Data {
+TEST_F(WgslGeneratorImplTest, Emit_Function_Multiple_EntryPoint_With_Same_ModuleVar) {
+ // struct Data {
+ // d : f32;
+ // };
+ // @binding(0) @group(0) var<storage> data : Data;
+ //
+ // @compute @workgroup_size(1)
+ // fn a() {
+ // return;
+ // }
+ //
+ // @compute @workgroup_size(1)
+ // fn b() {
+ // return;
+ // }
+
+ auto* s = Structure("Data", {Member("d", ty.f32())});
+
+ Global("data", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("a", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ }
+
+ {
+ auto* var = Var("v", ty.f32(), ast::StorageClass::kNone, MemberAccessor("data", "d"));
+
+ Func("b", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(var),
+ Return(),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
+ }
+
+ GeneratorImpl& gen = Build();
+
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct Data {
d : f32,
}
@binding(0) @group(0) var<storage, read_write> data : Data;
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn a() {
var v : f32 = data.d;
return;
}
-@stage(compute) @workgroup_size(1)
+@compute @workgroup_size(1i)
fn b() {
var v : f32 = data.d;
return;
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_global_decl_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_global_decl_test.cc
index 251c577eb6d..48f1276790c 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_global_decl_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_global_decl_test.cc
@@ -14,26 +14,28 @@
#include "src/tint/ast/stage_attribute.h"
#include "src/tint/ast/variable_decl_statement.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_GlobalDeclAfterFunction) {
- auto* func_var = Var("a", ty.f32());
- WrapInFunction(func_var);
+ auto* func_var = Var("a", ty.f32());
+ WrapInFunction(func_var);
- Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a", ty.f32(), ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( @stage(compute) @workgroup_size(1, 1, 1)
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( @compute @workgroup_size(1i, 1i, 1i)
fn test_function() {
var a : f32;
}
@@ -43,37 +45,37 @@ TEST_F(WgslGeneratorImplTest, Emit_GlobalDeclAfterFunction) {
}
TEST_F(WgslGeneratorImplTest, Emit_GlobalsInterleaved) {
- Global("a0", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a0", ty.f32(), ast::StorageClass::kPrivate);
- auto* s0 = Structure("S0", {Member("a", ty.i32())});
+ auto* s0 = Structure("S0", {Member("a", ty.i32())});
- Func("func", ast::VariableList{}, ty.f32(),
- ast::StatementList{
- Return("a0"),
- },
- ast::AttributeList{});
+ Func("func", ast::VariableList{}, ty.f32(),
+ ast::StatementList{
+ Return("a0"),
+ },
+ ast::AttributeList{});
- Global("a1", ty.f32(), ast::StorageClass::kPrivate);
+ Global("a1", ty.f32(), ast::StorageClass::kPrivate);
- auto* s1 = Structure("S1", {Member("a", ty.i32())});
+ auto* s1 = Structure("S1", {Member("a", ty.i32())});
- Func("main", ast::VariableList{}, ty.void_(),
- ast::StatementList{
- Decl(Var("s0", ty.Of(s0))),
- Decl(Var("s1", ty.Of(s1))),
- Assign("a1", Call("func")),
- },
- ast::AttributeList{
- Stage(ast::PipelineStage::kCompute),
- WorkgroupSize(1),
- });
+ Func("main", ast::VariableList{}, ty.void_(),
+ ast::StatementList{
+ Decl(Var("s0", ty.Of(s0))),
+ Decl(Var("s1", ty.Of(s1))),
+ Assign("a1", Call("func")),
+ },
+ ast::AttributeList{
+ Stage(ast::PipelineStage::kCompute),
+ WorkgroupSize(1_i),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( var<private> a0 : f32;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( var<private> a0 : f32;
struct S0 {
a : i32,
@@ -89,7 +91,7 @@ TEST_F(WgslGeneratorImplTest, Emit_GlobalsInterleaved) {
a : i32,
}
- @stage(compute) @workgroup_size(1)
+ @compute @workgroup_size(1i)
fn main() {
var s0 : S0;
var s1 : S1;
@@ -99,46 +101,46 @@ TEST_F(WgslGeneratorImplTest, Emit_GlobalsInterleaved) {
}
TEST_F(WgslGeneratorImplTest, Emit_Global_Sampler) {
- Global("s", ty.sampler(ast::SamplerKind::kSampler),
- ast::AttributeList{
- create<ast::GroupAttribute>(0),
- create<ast::BindingAttribute>(0),
- });
+ Global("s", ty.sampler(ast::SamplerKind::kSampler),
+ ast::AttributeList{
+ create<ast::GroupAttribute>(0),
+ create<ast::BindingAttribute>(0),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), " @group(0) @binding(0) var s : sampler;\n");
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), " @group(0) @binding(0) var s : sampler;\n");
}
TEST_F(WgslGeneratorImplTest, Emit_Global_Texture) {
- auto* st = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
- Global("t", st,
- ast::AttributeList{
- create<ast::GroupAttribute>(0),
- create<ast::BindingAttribute>(0),
- });
+ auto* st = ty.sampled_texture(ast::TextureDimension::k1d, ty.f32());
+ Global("t", st,
+ ast::AttributeList{
+ create<ast::GroupAttribute>(0),
+ create<ast::BindingAttribute>(0),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), " @group(0) @binding(0) var t : texture_1d<f32>;\n");
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), " @group(0) @binding(0) var t : texture_1d<f32>;\n");
}
TEST_F(WgslGeneratorImplTest, Emit_OverridableConstants) {
- Override("a", ty.f32(), nullptr);
- Override("b", ty.f32(), nullptr, {Id(7u)});
+ Override("a", ty.f32(), nullptr);
+ Override("b", ty.f32(), nullptr, {Id(7u)});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"( override a : f32;
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"( override a : f32;
@id(7) override b : f32;
)");
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_identifier_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_identifier_test.cc
index c1cd64d78f2..c6bccf671a7 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_identifier_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_identifier_test.cc
@@ -20,15 +20,15 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitIdentifierExpression_Single) {
- Global("glsl", ty.f32(), ast::StorageClass::kPrivate);
- auto* i = Expr("glsl");
- WrapInFunction(i);
+ Global("glsl", ty.f32(), ast::StorageClass::kPrivate);
+ auto* i = Expr("glsl");
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
- EXPECT_EQ(out.str(), "glsl");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, i)) << gen.error();
+ EXPECT_EQ(out.str(), "glsl");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_if_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_if_test.cc
index 48180190bcb..18c0ffddc46 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_if_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_if_test.cc
@@ -20,44 +20,42 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_If) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body);
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body);
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_IfWithElseIf) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_cond = Expr("else_cond");
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(else_cond, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body)));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else if (else_cond) {
return;
@@ -66,23 +64,21 @@ TEST_F(WgslGeneratorImplTest, Emit_IfWithElseIf) {
}
TEST_F(WgslGeneratorImplTest, Emit_IfWithElse) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(
- cond, body,
- ast::ElseStatementList{create<ast::ElseStatement>(nullptr, else_body)});
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(else_body));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else {
return;
@@ -91,30 +87,26 @@ TEST_F(WgslGeneratorImplTest, Emit_IfWithElse) {
}
TEST_F(WgslGeneratorImplTest, Emit_IfWithMultiple) {
- Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
- Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("cond", ty.bool_(), ast::StorageClass::kPrivate);
+ Global("else_cond", ty.bool_(), ast::StorageClass::kPrivate);
- auto* else_cond = Expr("else_cond");
+ auto* else_cond = Expr("else_cond");
- auto* else_body = Block(Return());
+ auto* else_body = Block(Return());
- auto* else_body_2 = Block(Return());
+ auto* else_body_2 = Block(Return());
- auto* cond = Expr("cond");
- auto* body = Block(Return());
- auto* i = If(cond, body,
- ast::ElseStatementList{
- create<ast::ElseStatement>(else_cond, else_body),
- create<ast::ElseStatement>(nullptr, else_body_2),
- });
- WrapInFunction(i);
+ auto* cond = Expr("cond");
+ auto* body = Block(Return());
+ auto* i = If(cond, body, Else(If(else_cond, else_body, Else(else_body_2))));
+ WrapInFunction(i);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
- EXPECT_EQ(gen.result(), R"( if (cond) {
+ ASSERT_TRUE(gen.EmitStatement(i)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( if (cond) {
return;
} else if (else_cond) {
return;
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_literal_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_literal_test.cc
index dd3ace89770..dd715d4fbe4 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_literal_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_literal_test.cc
@@ -16,6 +16,8 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
@@ -23,78 +25,76 @@ namespace {
// - 0 sign if sign is 0, 1 otherwise
// - 'exponent_bits' is placed in the exponent space.
// So, the exponent bias must already be included.
-float MakeFloat(int sign, int biased_exponent, int mantissa) {
- const uint32_t sign_bit = sign ? 0x80000000u : 0u;
- // The binary32 exponent is 8 bits, just below the sign.
- const uint32_t exponent_bits = (biased_exponent & 0xffu) << 23;
- // The mantissa is the bottom 23 bits.
- const uint32_t mantissa_bits = (mantissa & 0x7fffffu);
-
- uint32_t bits = sign_bit | exponent_bits | mantissa_bits;
- float result = 0.0f;
- static_assert(sizeof(result) == sizeof(bits),
- "expected float and uint32_t to be the same size");
- std::memcpy(&result, &bits, sizeof(bits));
- return result;
+f32 MakeFloat(int sign, int biased_exponent, int mantissa) {
+ const uint32_t sign_bit = sign ? 0x80000000u : 0u;
+ // The binary32 exponent is 8 bits, just below the sign.
+ const uint32_t exponent_bits = (biased_exponent & 0xffu) << 23;
+ // The mantissa is the bottom 23 bits.
+ const uint32_t mantissa_bits = (mantissa & 0x7fffffu);
+
+ uint32_t bits = sign_bit | exponent_bits | mantissa_bits;
+ float result = 0.0f;
+ static_assert(sizeof(result) == sizeof(bits),
+ "expected float and uint32_t to be the same size");
+ std::memcpy(&result, &bits, sizeof(bits));
+ return f32(result);
}
struct FloatData {
- float value;
- std::string expected;
+ f32 value;
+ std::string expected;
};
inline std::ostream& operator<<(std::ostream& out, FloatData data) {
- out << "{" << data.value << "," << data.expected << "}";
- return out;
+ out << "{" << data.value << "," << data.expected << "}";
+ return out;
}
using WgslGenerator_FloatLiteralTest = TestParamHelper<FloatData>;
TEST_P(WgslGenerator_FloatLiteralTest, Emit) {
- auto* v = Expr(GetParam().value);
+ auto* v = Expr(GetParam().value);
- SetResolveOnBuild(false);
- GeneratorImpl& gen = Build();
+ SetResolveOnBuild(false);
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitLiteral(out, v)) << gen.error();
- EXPECT_EQ(out.str(), GetParam().expected);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitLiteral(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), GetParam().expected);
}
INSTANTIATE_TEST_SUITE_P(Zero,
WgslGenerator_FloatLiteralTest,
::testing::ValuesIn(std::vector<FloatData>{
- {0.0f, "0.0"},
- {MakeFloat(0, 0, 0), "0.0"},
- {MakeFloat(1, 0, 0), "-0.0"}}));
+ {0_f, "0.0f"},
+ {MakeFloat(0, 0, 0), "0.0f"},
+ {MakeFloat(1, 0, 0), "-0.0f"}}));
INSTANTIATE_TEST_SUITE_P(Normal,
WgslGenerator_FloatLiteralTest,
- ::testing::ValuesIn(std::vector<FloatData>{
- {1.0f, "1.0"},
- {-1.0f, "-1.0"},
- {101.375, "101.375"}}));
+ ::testing::ValuesIn(std::vector<FloatData>{{1_f, "1.0f"},
+ {-1_f, "-1.0f"},
+ {101.375_f, "101.375f"}}));
-INSTANTIATE_TEST_SUITE_P(
- Subnormal,
- WgslGenerator_FloatLiteralTest,
- ::testing::ValuesIn(std::vector<FloatData>{
- {MakeFloat(0, 0, 1), "0x1p-149"}, // Smallest
- {MakeFloat(1, 0, 1), "-0x1p-149"},
- {MakeFloat(0, 0, 2), "0x1p-148"},
- {MakeFloat(1, 0, 2), "-0x1p-148"},
- {MakeFloat(0, 0, 0x7fffff), "0x1.fffffcp-127"}, // Largest
- {MakeFloat(1, 0, 0x7fffff), "-0x1.fffffcp-127"}, // Largest
- {MakeFloat(0, 0, 0xcafebe), "0x1.2bfaf8p-127"}, // Scattered bits
- {MakeFloat(1, 0, 0xcafebe), "-0x1.2bfaf8p-127"}, // Scattered bits
- {MakeFloat(0, 0, 0xaaaaa), "0x1.55554p-130"}, // Scattered bits
- {MakeFloat(1, 0, 0xaaaaa), "-0x1.55554p-130"}, // Scattered bits
- }));
+INSTANTIATE_TEST_SUITE_P(Subnormal,
+ WgslGenerator_FloatLiteralTest,
+ ::testing::ValuesIn(std::vector<FloatData>{
+ {MakeFloat(0, 0, 1), "0x1p-149f"}, // Smallest
+ {MakeFloat(1, 0, 1), "-0x1p-149f"},
+ {MakeFloat(0, 0, 2), "0x1p-148f"},
+ {MakeFloat(1, 0, 2), "-0x1p-148f"},
+ {MakeFloat(0, 0, 0x7fffff), "0x1.fffffcp-127f"}, // Largest
+ {MakeFloat(1, 0, 0x7fffff), "-0x1.fffffcp-127f"}, // Largest
+ {MakeFloat(0, 0, 0xcafebe), "0x1.2bfaf8p-127f"}, // Scattered bits
+ {MakeFloat(1, 0, 0xcafebe), "-0x1.2bfaf8p-127f"}, // Scattered bits
+ {MakeFloat(0, 0, 0xaaaaa), "0x1.55554p-130f"}, // Scattered bits
+ {MakeFloat(1, 0, 0xaaaaa), "-0x1.55554p-130f"}, // Scattered bits
+ }));
INSTANTIATE_TEST_SUITE_P(Infinity,
WgslGenerator_FloatLiteralTest,
::testing::ValuesIn(std::vector<FloatData>{
- {MakeFloat(0, 255, 0), "0x1p+128"},
- {MakeFloat(1, 255, 0), "-0x1p+128"}}));
+ {MakeFloat(0, 255, 0), "0x1p+128f"},
+ {MakeFloat(1, 255, 0), "-0x1p+128f"}}));
INSTANTIATE_TEST_SUITE_P(
// TODO(dneto): It's unclear how Infinity and NaN should be handled.
@@ -109,20 +109,20 @@ INSTANTIATE_TEST_SUITE_P(
WgslGenerator_FloatLiteralTest,
::testing::ValuesIn(std::vector<FloatData>{
// LSB only. Smallest mantissa.
- {MakeFloat(0, 255, 1), "0x1.000002p+128"}, // Smallest mantissa
- {MakeFloat(1, 255, 1), "-0x1.000002p+128"},
+ {MakeFloat(0, 255, 1), "0x1.000002p+128f"}, // Smallest mantissa
+ {MakeFloat(1, 255, 1), "-0x1.000002p+128f"},
// MSB only.
- {MakeFloat(0, 255, 0x400000), "0x1.8p+128"},
- {MakeFloat(1, 255, 0x400000), "-0x1.8p+128"},
+ {MakeFloat(0, 255, 0x400000), "0x1.8p+128f"},
+ {MakeFloat(1, 255, 0x400000), "-0x1.8p+128f"},
// All 1s in the mantissa.
- {MakeFloat(0, 255, 0x7fffff), "0x1.fffffep+128"},
- {MakeFloat(1, 255, 0x7fffff), "-0x1.fffffep+128"},
+ {MakeFloat(0, 255, 0x7fffff), "0x1.fffffep+128f"},
+ {MakeFloat(1, 255, 0x7fffff), "-0x1.fffffep+128f"},
// Scattered bits, with 0 in top mantissa bit.
- {MakeFloat(0, 255, 0x20101f), "0x1.40203ep+128"},
- {MakeFloat(1, 255, 0x20101f), "-0x1.40203ep+128"},
+ {MakeFloat(0, 255, 0x20101f), "0x1.40203ep+128f"},
+ {MakeFloat(1, 255, 0x20101f), "-0x1.40203ep+128f"},
// Scattered bits, with 1 in top mantissa bit.
- {MakeFloat(0, 255, 0x40101f), "0x1.80203ep+128"},
- {MakeFloat(1, 255, 0x40101f), "-0x1.80203ep+128"}}));
+ {MakeFloat(0, 255, 0x40101f), "0x1.80203ep+128f"},
+ {MakeFloat(1, 255, 0x40101f), "-0x1.80203ep+128f"}}));
} // namespace
} // namespace tint::writer::wgsl
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_loop_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_loop_test.cc
index 71b331a0f8b..2d6a8f4a7af 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_loop_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_loop_test.cc
@@ -14,44 +14,46 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Loop) {
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block();
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block();
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( loop {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( loop {
discard;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_LoopWithContinuing) {
- Func("a_statement", {}, ty.void_(), {});
+ Func("a_statement", {}, ty.void_(), {});
- auto* body = Block(create<ast::DiscardStatement>());
- auto* continuing = Block(CallStmt(Call("a_statement")));
- auto* l = Loop(body, continuing);
+ auto* body = Block(create<ast::DiscardStatement>());
+ auto* continuing = Block(CallStmt(Call("a_statement")));
+ auto* l = Loop(body, continuing);
- WrapInFunction(l);
+ WrapInFunction(l);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
- EXPECT_EQ(gen.result(), R"( loop {
+ ASSERT_TRUE(gen.EmitStatement(l)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( loop {
discard;
continuing {
@@ -62,23 +64,23 @@ TEST_F(WgslGeneratorImplTest, Emit_LoopWithContinuing) {
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithMultiStmtInit) {
- // var<workgroup> a : atomic<i32>;
- // for({ignore(1); ignore(2);}; ; ) {
- // return;
- // }
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt = Block(Ignore(1), Ignore(2));
- auto* f = For(multi_stmt, nullptr, nullptr, Block(Return()));
- WrapInFunction(f);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
-
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for({
- _ = 1;
- _ = 2;
+ // var<workgroup> a : atomic<i32>;
+ // for({ignore(1i); ignore(2i);}; ; ) {
+ // return;
+ // }
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt = Block(Ignore(1_i), Ignore(2_i));
+ auto* f = For(multi_stmt, nullptr, nullptr, Block(Return()));
+ WrapInFunction(f);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for({
+ _ = 1i;
+ _ = 2i;
}; ; ) {
return;
}
@@ -86,63 +88,63 @@ TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithMultiStmtInit) {
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithSimpleCond) {
- // for(; true; ) {
- // return;
- // }
+ // for(; true; ) {
+ // return;
+ // }
- auto* f = For(nullptr, true, nullptr, Block(Return()));
- WrapInFunction(f);
+ auto* f = For(nullptr, true, nullptr, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(; true; ) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(; true; ) {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithSimpleCont) {
- // for(; ; i = i + 1) {
- // return;
- // }
+ // for(; ; i = i + 1i) {
+ // return;
+ // }
- auto* v = Decl(Var("i", ty.i32()));
- auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1)), Block(Return()));
- WrapInFunction(v, f);
+ auto* v = Decl(Var("i", ty.i32()));
+ auto* f = For(nullptr, nullptr, Assign("i", Add("i", 1_i)), Block(Return()));
+ WrapInFunction(v, f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(; ; i = (i + 1)) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(; ; i = (i + 1i)) {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithMultiStmtCont) {
- // var<workgroup> a : atomic<i32>;
- // for(; ; { ignore(1); ignore(2); }) {
- // return;
- // }
+ // var<workgroup> a : atomic<i32>;
+ // for(; ; { ignore(1i); ignore(2i); }) {
+ // return;
+ // }
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt = Block(Ignore(1), Ignore(2));
- auto* f = For(nullptr, nullptr, multi_stmt, Block(Return()));
- WrapInFunction(f);
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt = Block(Ignore(1_i), Ignore(2_i));
+ auto* f = For(nullptr, nullptr, multi_stmt, Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(; ; {
- _ = 1;
- _ = 2;
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(; ; {
+ _ = 1i;
+ _ = 2i;
}) {
return;
}
@@ -150,47 +152,46 @@ TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithMultiStmtCont) {
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithSimpleInitCondCont) {
- // for(var i : i32; true; i = i + 1) {
- // return;
- // }
+ // for(var i : i32; true; i = i + 1i) {
+ // return;
+ // }
- auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1)),
- Block(Return()));
- WrapInFunction(f);
+ auto* f = For(Decl(Var("i", ty.i32())), true, Assign("i", Add("i", 1_i)), Block(Return()));
+ WrapInFunction(f);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for(var i : i32; true; i = (i + 1)) {
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for(var i : i32; true; i = (i + 1i)) {
return;
}
)");
}
TEST_F(WgslGeneratorImplTest, Emit_ForLoopWithMultiStmtInitCondCont) {
- // var<workgroup> a : atomic<i32>;
- // for({ ignore(1); ignore(2); }; true; { ignore(3); ignore(4); }) {
- // return;
- // }
- Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
- auto* multi_stmt_a = Block(Ignore(1), Ignore(2));
- auto* multi_stmt_b = Block(Ignore(3), Ignore(4));
- auto* f = For(multi_stmt_a, Expr(true), multi_stmt_b, Block(Return()));
- WrapInFunction(f);
-
- GeneratorImpl& gen = Build();
-
- gen.increment_indent();
-
- ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
- EXPECT_EQ(gen.result(), R"( for({
- _ = 1;
- _ = 2;
+ // var<workgroup> a : atomic<i32>;
+ // for({ ignore(1i); ignore(2i); }; true; { ignore(3i); ignore(4i); }) {
+ // return;
+ // }
+ Global("a", ty.atomic<i32>(), ast::StorageClass::kWorkgroup);
+ auto* multi_stmt_a = Block(Ignore(1_i), Ignore(2_i));
+ auto* multi_stmt_b = Block(Ignore(3_i), Ignore(4_i));
+ auto* f = For(multi_stmt_a, Expr(true), multi_stmt_b, Block(Return()));
+ WrapInFunction(f);
+
+ GeneratorImpl& gen = Build();
+
+ gen.increment_indent();
+
+ ASSERT_TRUE(gen.EmitStatement(f)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( for({
+ _ = 1i;
+ _ = 2i;
}; true; {
- _ = 3;
- _ = 4;
+ _ = 3i;
+ _ = 4i;
}) {
return;
}
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_member_accessor_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_member_accessor_test.cc
index 6e9d036a8ba..d8bd3495f13 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_member_accessor_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_member_accessor_test.cc
@@ -20,32 +20,32 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitExpression_MemberAccessor) {
- auto* s = Structure("Data", {Member("mem", ty.f32())});
- Global("str", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("Data", {Member("mem", ty.f32())});
+ Global("str", ty.Of(s), ast::StorageClass::kPrivate);
- auto* expr = MemberAccessor("str", "mem");
- WrapInFunction(expr);
+ auto* expr = MemberAccessor("str", "mem");
+ WrapInFunction(expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "str.mem");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "str.mem");
}
TEST_F(WgslGeneratorImplTest, EmitExpression_MemberAccessor_OfDref) {
- auto* s = Structure("Data", {Member("mem", ty.f32())});
- Global("str", ty.Of(s), ast::StorageClass::kPrivate);
+ auto* s = Structure("Data", {Member("mem", ty.f32())});
+ Global("str", ty.Of(s), ast::StorageClass::kPrivate);
- auto* p = Const("p", nullptr, AddressOf("str"));
- auto* expr = MemberAccessor(Deref("p"), "mem");
- WrapInFunction(p, expr);
+ auto* p = Let("p", nullptr, AddressOf("str"));
+ auto* expr = MemberAccessor(Deref("p"), "mem");
+ WrapInFunction(p, expr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
- EXPECT_EQ(out.str(), "(*(p)).mem");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, expr)) << gen.error();
+ EXPECT_EQ(out.str(), "(*(p)).mem");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_return_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_return_test.cc
index 68f4103b14f..ed1027d63b4 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_return_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_return_test.cc
@@ -14,33 +14,35 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Return) {
- auto* r = Return();
- WrapInFunction(r);
+ auto* r = Return();
+ WrapInFunction(r);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return;\n");
}
TEST_F(WgslGeneratorImplTest, Emit_ReturnWithValue) {
- auto* r = Return(123);
- Func("f", {}, ty.i32(), {r});
+ auto* r = Return(123_i);
+ Func("f", {}, ty.i32(), {r});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
- EXPECT_EQ(gen.result(), " return 123;\n");
+ ASSERT_TRUE(gen.EmitStatement(r)) << gen.error();
+ EXPECT_EQ(gen.result(), " return 123i;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_switch_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_switch_test.cc
index 4d722c5e868..11424f0f83b 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_switch_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_switch_test.cc
@@ -14,39 +14,41 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_Switch) {
- Global("cond", ty.i32(), ast::StorageClass::kPrivate);
+ Global("cond", ty.i32(), ast::StorageClass::kPrivate);
- auto* def_body = Block(create<ast::BreakStatement>());
- auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
+ auto* def_body = Block(create<ast::BreakStatement>());
+ auto* def = create<ast::CaseStatement>(ast::CaseSelectorList{}, def_body);
- ast::CaseSelectorList case_val;
- case_val.push_back(Expr(5));
+ ast::CaseSelectorList case_val;
+ case_val.push_back(Expr(5_i));
- auto* case_body = Block(create<ast::BreakStatement>());
+ auto* case_body = Block(create<ast::BreakStatement>());
- auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
+ auto* case_stmt = create<ast::CaseStatement>(case_val, case_body);
- ast::CaseStatementList body;
- body.push_back(case_stmt);
- body.push_back(def);
+ ast::CaseStatementList body;
+ body.push_back(case_stmt);
+ body.push_back(def);
- auto* cond = Expr("cond");
- auto* s = create<ast::SwitchStatement>(cond, body);
- WrapInFunction(s);
+ auto* cond = Expr("cond");
+ auto* s = create<ast::SwitchStatement>(cond, body);
+ WrapInFunction(s);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"( switch(cond) {
- case 5: {
+ ASSERT_TRUE(gen.EmitStatement(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"( switch(cond) {
+ case 5i: {
break;
}
default: {
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_test.cc
index b526f554929..f8972c70864 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_test.cc
@@ -21,13 +21,12 @@ namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Generate) {
- Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{},
- ast::AttributeList{});
+ Func("my_func", ast::VariableList{}, ty.void_(), ast::StatementList{}, ast::AttributeList{});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.Generate()) << gen.error();
- EXPECT_EQ(gen.result(), R"(fn my_func() {
+ ASSERT_TRUE(gen.Generate()) << gen.error();
+ EXPECT_EQ(gen.result(), R"(fn my_func() {
}
)");
}
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_type_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_type_test.cc
index 8dc9addaaf6..0973ffadadf 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_type_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_type_test.cc
@@ -12,153 +12,154 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "src/tint/sem/depth_texture_type.h"
-#include "src/tint/sem/multisampled_texture_type.h"
-#include "src/tint/sem/sampled_texture_type.h"
+#include "src/tint/sem/depth_texture.h"
+#include "src/tint/sem/multisampled_texture.h"
+#include "src/tint/sem/sampled_texture.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitType_Alias) {
- auto* alias = Alias("alias", ty.f32());
- auto* alias_ty = ty.Of(alias);
- WrapInFunction(Var("make_reachable", alias_ty));
+ auto* alias = Alias("alias", ty.f32());
+ auto* alias_ty = ty.Of(alias);
+ WrapInFunction(Var("make_reachable", alias_ty));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, alias_ty)) << gen.error();
- EXPECT_EQ(out.str(), "alias");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, alias_ty)) << gen.error();
+ EXPECT_EQ(out.str(), "alias");
}
TEST_F(WgslGeneratorImplTest, EmitType_Array) {
- auto* arr = ty.array<bool, 4>();
- Alias("make_type_reachable", arr);
+ auto* arr = ty.array<bool, 4u>();
+ Alias("make_type_reachable", arr);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, arr)) << gen.error();
- EXPECT_EQ(out.str(), "array<bool, 4>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, arr)) << gen.error();
+ EXPECT_EQ(out.str(), "array<bool, 4u>");
}
TEST_F(WgslGeneratorImplTest, EmitType_Array_Attribute) {
- auto* a = ty.array(ty.bool_(), 4, 16u);
- Alias("make_type_reachable", a);
+ auto* a = ty.array(ty.bool_(), 4_u, 16u);
+ Alias("make_type_reachable", a);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, a)) << gen.error();
- EXPECT_EQ(out.str(), "@stride(16) array<bool, 4>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, a)) << gen.error();
+ EXPECT_EQ(out.str(), "@stride(16) array<bool, 4u>");
}
TEST_F(WgslGeneratorImplTest, EmitType_RuntimeArray) {
- auto* a = ty.array(ty.bool_());
- Alias("make_type_reachable", a);
+ auto* a = ty.array(ty.bool_());
+ Alias("make_type_reachable", a);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, a)) << gen.error();
- EXPECT_EQ(out.str(), "array<bool>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, a)) << gen.error();
+ EXPECT_EQ(out.str(), "array<bool>");
}
TEST_F(WgslGeneratorImplTest, EmitType_Bool) {
- auto* bool_ = ty.bool_();
- Alias("make_type_reachable", bool_);
+ auto* bool_ = ty.bool_();
+ Alias("make_type_reachable", bool_);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, bool_)) << gen.error();
- EXPECT_EQ(out.str(), "bool");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, bool_)) << gen.error();
+ EXPECT_EQ(out.str(), "bool");
}
TEST_F(WgslGeneratorImplTest, EmitType_F32) {
- auto* f32 = ty.f32();
- Alias("make_type_reachable", f32);
+ auto* f32 = ty.f32();
+ Alias("make_type_reachable", f32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, f32)) << gen.error();
- EXPECT_EQ(out.str(), "f32");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, f32)) << gen.error();
+ EXPECT_EQ(out.str(), "f32");
}
TEST_F(WgslGeneratorImplTest, EmitType_I32) {
- auto* i32 = ty.i32();
- Alias("make_type_reachable", i32);
+ auto* i32 = ty.i32();
+ Alias("make_type_reachable", i32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, i32)) << gen.error();
- EXPECT_EQ(out.str(), "i32");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, i32)) << gen.error();
+ EXPECT_EQ(out.str(), "i32");
}
TEST_F(WgslGeneratorImplTest, EmitType_Matrix) {
- auto* mat2x3 = ty.mat2x3<f32>();
- Alias("make_type_reachable", mat2x3);
+ auto* mat2x3 = ty.mat2x3<f32>();
+ Alias("make_type_reachable", mat2x3);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, mat2x3)) << gen.error();
- EXPECT_EQ(out.str(), "mat2x3<f32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, mat2x3)) << gen.error();
+ EXPECT_EQ(out.str(), "mat2x3<f32>");
}
TEST_F(WgslGeneratorImplTest, EmitType_Pointer) {
- auto* p = ty.pointer<f32>(ast::StorageClass::kWorkgroup);
- Alias("make_type_reachable", p);
+ auto* p = ty.pointer<f32>(ast::StorageClass::kWorkgroup);
+ Alias("make_type_reachable", p);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, p)) << gen.error();
- EXPECT_EQ(out.str(), "ptr<workgroup, f32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, p)) << gen.error();
+ EXPECT_EQ(out.str(), "ptr<workgroup, f32>");
}
TEST_F(WgslGeneratorImplTest, EmitType_PointerAccessMode) {
- auto* p =
- ty.pointer<f32>(ast::StorageClass::kStorage, ast::Access::kReadWrite);
- Alias("make_type_reachable", p);
+ auto* p = ty.pointer<f32>(ast::StorageClass::kStorage, ast::Access::kReadWrite);
+ Alias("make_type_reachable", p);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, p)) << gen.error();
- EXPECT_EQ(out.str(), "ptr<storage, f32, read_write>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, p)) << gen.error();
+ EXPECT_EQ(out.str(), "ptr<storage, f32, read_write>");
}
TEST_F(WgslGeneratorImplTest, EmitType_Struct) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32()),
- });
- auto* s_ty = ty.Of(s);
- WrapInFunction(Var("make_reachable", s_ty));
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32()),
+ });
+ auto* s_ty = ty.Of(s);
+ WrapInFunction(Var("make_reachable", s_ty));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, s_ty)) << gen.error();
- EXPECT_EQ(out.str(), "S");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, s_ty)) << gen.error();
+ EXPECT_EQ(out.str(), "S");
}
TEST_F(WgslGeneratorImplTest, EmitType_StructOffsetDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberOffset(8)}),
- Member("b", ty.f32(), {MemberOffset(16)}),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberOffset(8)}),
+ Member("b", ty.f32(), {MemberOffset(16)}),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
@size(8)
padding : u32,
a : i32,
@@ -170,16 +171,15 @@ TEST_F(WgslGeneratorImplTest, EmitType_StructOffsetDecl) {
}
TEST_F(WgslGeneratorImplTest, EmitType_StructOffsetDecl_WithSymbolCollisions) {
- auto* s =
- Structure("S", {
- Member("tint_0_padding", ty.i32(), {MemberOffset(8)}),
- Member("tint_2_padding", ty.f32(), {MemberOffset(16)}),
- });
+ auto* s = Structure("S", {
+ Member("tint_0_padding", ty.i32(), {MemberOffset(8)}),
+ Member("tint_2_padding", ty.f32(), {MemberOffset(16)}),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
@size(8)
padding : u32,
tint_0_padding : i32,
@@ -191,15 +191,15 @@ TEST_F(WgslGeneratorImplTest, EmitType_StructOffsetDecl_WithSymbolCollisions) {
}
TEST_F(WgslGeneratorImplTest, EmitType_StructAlignDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberAlign(8)}),
- Member("b", ty.f32(), {MemberAlign(16)}),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberAlign(8)}),
+ Member("b", ty.f32(), {MemberAlign(16)}),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
@align(8)
a : i32,
@align(16)
@@ -209,15 +209,15 @@ TEST_F(WgslGeneratorImplTest, EmitType_StructAlignDecl) {
}
TEST_F(WgslGeneratorImplTest, EmitType_StructSizeDecl) {
- auto* s = Structure("S", {
- Member("a", ty.i32(), {MemberSize(16)}),
- Member("b", ty.f32(), {MemberSize(32)}),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32(), {MemberSize(16)}),
+ Member("b", ty.f32(), {MemberSize(32)}),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
@size(16)
a : i32,
@size(32)
@@ -227,15 +227,15 @@ TEST_F(WgslGeneratorImplTest, EmitType_StructSizeDecl) {
}
TEST_F(WgslGeneratorImplTest, EmitType_Struct_WithAttribute) {
- auto* s = Structure("S", {
- Member("a", ty.i32()),
- Member("b", ty.f32(), {MemberAlign(8)}),
- });
+ auto* s = Structure("S", {
+ Member("a", ty.i32()),
+ Member("b", ty.f32(), {MemberAlign(8)}),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
a : i32,
@align(8)
b : f32,
@@ -244,15 +244,14 @@ TEST_F(WgslGeneratorImplTest, EmitType_Struct_WithAttribute) {
}
TEST_F(WgslGeneratorImplTest, EmitType_Struct_WithEntryPointAttributes) {
- auto* s = Structure(
- "S", ast::StructMemberList{
- Member("a", ty.u32(), {Builtin(ast::Builtin::kVertexIndex)}),
- Member("b", ty.f32(), {Location(2u)})});
+ auto* s = Structure(
+ "S", ast::StructMemberList{Member("a", ty.u32(), {Builtin(ast::Builtin::kVertexIndex)}),
+ Member("b", ty.f32(), {Location(2u)})});
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
- EXPECT_EQ(gen.result(), R"(struct S {
+ ASSERT_TRUE(gen.EmitStructType(s)) << gen.error();
+ EXPECT_EQ(gen.result(), R"(struct S {
@builtin(vertex_index)
a : u32,
@location(2)
@@ -262,258 +261,250 @@ TEST_F(WgslGeneratorImplTest, EmitType_Struct_WithEntryPointAttributes) {
}
TEST_F(WgslGeneratorImplTest, EmitType_U32) {
- auto* u32 = ty.u32();
- Alias("make_type_reachable", u32);
+ auto* u32 = ty.u32();
+ Alias("make_type_reachable", u32);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, u32)) << gen.error();
- EXPECT_EQ(out.str(), "u32");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, u32)) << gen.error();
+ EXPECT_EQ(out.str(), "u32");
}
TEST_F(WgslGeneratorImplTest, EmitType_Vector) {
- auto* vec3 = ty.vec3<f32>();
- Alias("make_type_reachable", vec3);
+ auto* vec3 = ty.vec3<f32>();
+ Alias("make_type_reachable", vec3);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, vec3)) << gen.error();
- EXPECT_EQ(out.str(), "vec3<f32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, vec3)) << gen.error();
+ EXPECT_EQ(out.str(), "vec3<f32>");
}
struct TextureData {
- ast::TextureDimension dim;
- const char* name;
+ ast::TextureDimension dim;
+ const char* name;
};
inline std::ostream& operator<<(std::ostream& out, TextureData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using WgslGenerator_DepthTextureTest = TestParamHelper<TextureData>;
TEST_P(WgslGenerator_DepthTextureTest, EmitType_DepthTexture) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* d = ty.depth_texture(param.dim);
- Alias("make_type_reachable", d);
+ auto* d = ty.depth_texture(param.dim);
+ Alias("make_type_reachable", d);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, d)) << gen.error();
- EXPECT_EQ(out.str(), param.name);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, d)) << gen.error();
+ EXPECT_EQ(out.str(), param.name);
}
INSTANTIATE_TEST_SUITE_P(
WgslGeneratorImplTest,
WgslGenerator_DepthTextureTest,
- testing::Values(
- TextureData{ast::TextureDimension::k2d, "texture_depth_2d"},
- TextureData{ast::TextureDimension::k2dArray, "texture_depth_2d_array"},
- TextureData{ast::TextureDimension::kCube, "texture_depth_cube"},
- TextureData{ast::TextureDimension::kCubeArray,
- "texture_depth_cube_array"}));
+ testing::Values(TextureData{ast::TextureDimension::k2d, "texture_depth_2d"},
+ TextureData{ast::TextureDimension::k2dArray, "texture_depth_2d_array"},
+ TextureData{ast::TextureDimension::kCube, "texture_depth_cube"},
+ TextureData{ast::TextureDimension::kCubeArray, "texture_depth_cube_array"}));
using WgslGenerator_SampledTextureTest = TestParamHelper<TextureData>;
TEST_P(WgslGenerator_SampledTextureTest, EmitType_SampledTexture_F32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.sampled_texture(param.dim, ty.f32());
- Alias("make_type_reachable", t);
+ auto* t = ty.sampled_texture(param.dim, ty.f32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<f32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<f32>");
}
TEST_P(WgslGenerator_SampledTextureTest, EmitType_SampledTexture_I32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.sampled_texture(param.dim, ty.i32());
- Alias("make_type_reachable", t);
+ auto* t = ty.sampled_texture(param.dim, ty.i32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<i32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<i32>");
}
TEST_P(WgslGenerator_SampledTextureTest, EmitType_SampledTexture_U32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.sampled_texture(param.dim, ty.u32());
- Alias("make_type_reachable", t);
+ auto* t = ty.sampled_texture(param.dim, ty.u32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<u32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<u32>");
}
INSTANTIATE_TEST_SUITE_P(
WgslGeneratorImplTest,
WgslGenerator_SampledTextureTest,
- testing::Values(
- TextureData{ast::TextureDimension::k1d, "texture_1d"},
- TextureData{ast::TextureDimension::k2d, "texture_2d"},
- TextureData{ast::TextureDimension::k2dArray, "texture_2d_array"},
- TextureData{ast::TextureDimension::k3d, "texture_3d"},
- TextureData{ast::TextureDimension::kCube, "texture_cube"},
- TextureData{ast::TextureDimension::kCubeArray, "texture_cube_array"}));
+ testing::Values(TextureData{ast::TextureDimension::k1d, "texture_1d"},
+ TextureData{ast::TextureDimension::k2d, "texture_2d"},
+ TextureData{ast::TextureDimension::k2dArray, "texture_2d_array"},
+ TextureData{ast::TextureDimension::k3d, "texture_3d"},
+ TextureData{ast::TextureDimension::kCube, "texture_cube"},
+ TextureData{ast::TextureDimension::kCubeArray, "texture_cube_array"}));
using WgslGenerator_MultiampledTextureTest = TestParamHelper<TextureData>;
TEST_P(WgslGenerator_MultiampledTextureTest, EmitType_MultisampledTexture_F32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.multisampled_texture(param.dim, ty.f32());
- Alias("make_type_reachable", t);
+ auto* t = ty.multisampled_texture(param.dim, ty.f32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<f32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<f32>");
}
TEST_P(WgslGenerator_MultiampledTextureTest, EmitType_MultisampledTexture_I32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.multisampled_texture(param.dim, ty.i32());
- Alias("make_type_reachable", t);
+ auto* t = ty.multisampled_texture(param.dim, ty.i32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<i32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<i32>");
}
TEST_P(WgslGenerator_MultiampledTextureTest, EmitType_MultisampledTexture_U32) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.multisampled_texture(param.dim, ty.u32());
- Alias("make_type_reachable", t);
+ auto* t = ty.multisampled_texture(param.dim, ty.u32());
+ Alias("make_type_reachable", t);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), std::string(param.name) + "<u32>");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), std::string(param.name) + "<u32>");
}
INSTANTIATE_TEST_SUITE_P(WgslGeneratorImplTest,
WgslGenerator_MultiampledTextureTest,
- testing::Values(TextureData{
- ast::TextureDimension::k2d,
- "texture_multisampled_2d"}));
+ testing::Values(TextureData{ast::TextureDimension::k2d,
+ "texture_multisampled_2d"}));
struct StorageTextureData {
- ast::TexelFormat fmt;
- ast::TextureDimension dim;
- ast::Access access;
- const char* name;
+ ast::TexelFormat fmt;
+ ast::TextureDimension dim;
+ ast::Access access;
+ const char* name;
};
inline std::ostream& operator<<(std::ostream& out, StorageTextureData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using WgslGenerator_StorageTextureTest = TestParamHelper<StorageTextureData>;
TEST_P(WgslGenerator_StorageTextureTest, EmitType_StorageTexture) {
- auto param = GetParam();
+ auto param = GetParam();
- auto* t = ty.storage_texture(param.dim, param.fmt, param.access);
- Global("g", t,
- ast::AttributeList{
- create<ast::BindingAttribute>(1),
- create<ast::GroupAttribute>(2),
- });
+ auto* t = ty.storage_texture(param.dim, param.fmt, param.access);
+ Global("g", t,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(1),
+ create<ast::GroupAttribute>(2),
+ });
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
- EXPECT_EQ(out.str(), param.name);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, t)) << gen.error();
+ EXPECT_EQ(out.str(), param.name);
}
INSTANTIATE_TEST_SUITE_P(
WgslGeneratorImplTest,
WgslGenerator_StorageTextureTest,
- testing::Values(
- StorageTextureData{ast::TexelFormat::kRgba8Sint,
- ast::TextureDimension::k1d, ast::Access::kWrite,
- "texture_storage_1d<rgba8sint, write>"},
- StorageTextureData{ast::TexelFormat::kRgba8Sint,
- ast::TextureDimension::k2d, ast::Access::kWrite,
- "texture_storage_2d<rgba8sint, write>"},
- StorageTextureData{ast::TexelFormat::kRgba8Sint,
- ast::TextureDimension::k2dArray, ast::Access::kWrite,
- "texture_storage_2d_array<rgba8sint, write>"},
- StorageTextureData{ast::TexelFormat::kRgba8Sint,
- ast::TextureDimension::k3d, ast::Access::kWrite,
- "texture_storage_3d<rgba8sint, write>"}));
+ testing::Values(StorageTextureData{ast::TexelFormat::kRgba8Sint, ast::TextureDimension::k1d,
+ ast::Access::kWrite, "texture_storage_1d<rgba8sint, write>"},
+ StorageTextureData{ast::TexelFormat::kRgba8Sint, ast::TextureDimension::k2d,
+ ast::Access::kWrite, "texture_storage_2d<rgba8sint, write>"},
+ StorageTextureData{ast::TexelFormat::kRgba8Sint,
+ ast::TextureDimension::k2dArray, ast::Access::kWrite,
+ "texture_storage_2d_array<rgba8sint, write>"},
+ StorageTextureData{ast::TexelFormat::kRgba8Sint, ast::TextureDimension::k3d,
+ ast::Access::kWrite,
+ "texture_storage_3d<rgba8sint, write>"}));
struct ImageFormatData {
- ast::TexelFormat fmt;
- const char* name;
+ ast::TexelFormat fmt;
+ const char* name;
};
inline std::ostream& operator<<(std::ostream& out, ImageFormatData data) {
- out << data.name;
- return out;
+ out << data.name;
+ return out;
}
using WgslGenerator_ImageFormatTest = TestParamHelper<ImageFormatData>;
TEST_P(WgslGenerator_ImageFormatTest, EmitType_StorageTexture_ImageFormat) {
- auto param = GetParam();
+ auto param = GetParam();
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitImageFormat(out, param.fmt)) << gen.error();
- EXPECT_EQ(out.str(), param.name);
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitImageFormat(out, param.fmt)) << gen.error();
+ EXPECT_EQ(out.str(), param.name);
}
INSTANTIATE_TEST_SUITE_P(
WgslGeneratorImplTest,
WgslGenerator_ImageFormatTest,
- testing::Values(
- ImageFormatData{ast::TexelFormat::kR32Uint, "r32uint"},
- ImageFormatData{ast::TexelFormat::kR32Sint, "r32sint"},
- ImageFormatData{ast::TexelFormat::kR32Float, "r32float"},
- ImageFormatData{ast::TexelFormat::kRgba8Unorm, "rgba8unorm"},
- ImageFormatData{ast::TexelFormat::kRgba8Snorm, "rgba8snorm"},
- ImageFormatData{ast::TexelFormat::kRgba8Uint, "rgba8uint"},
- ImageFormatData{ast::TexelFormat::kRgba8Sint, "rgba8sint"},
- ImageFormatData{ast::TexelFormat::kRg32Uint, "rg32uint"},
- ImageFormatData{ast::TexelFormat::kRg32Sint, "rg32sint"},
- ImageFormatData{ast::TexelFormat::kRg32Float, "rg32float"},
- ImageFormatData{ast::TexelFormat::kRgba16Uint, "rgba16uint"},
- ImageFormatData{ast::TexelFormat::kRgba16Sint, "rgba16sint"},
- ImageFormatData{ast::TexelFormat::kRgba16Float, "rgba16float"},
- ImageFormatData{ast::TexelFormat::kRgba32Uint, "rgba32uint"},
- ImageFormatData{ast::TexelFormat::kRgba32Sint, "rgba32sint"},
- ImageFormatData{ast::TexelFormat::kRgba32Float, "rgba32float"}));
+ testing::Values(ImageFormatData{ast::TexelFormat::kR32Uint, "r32uint"},
+ ImageFormatData{ast::TexelFormat::kR32Sint, "r32sint"},
+ ImageFormatData{ast::TexelFormat::kR32Float, "r32float"},
+ ImageFormatData{ast::TexelFormat::kRgba8Unorm, "rgba8unorm"},
+ ImageFormatData{ast::TexelFormat::kRgba8Snorm, "rgba8snorm"},
+ ImageFormatData{ast::TexelFormat::kRgba8Uint, "rgba8uint"},
+ ImageFormatData{ast::TexelFormat::kRgba8Sint, "rgba8sint"},
+ ImageFormatData{ast::TexelFormat::kRg32Uint, "rg32uint"},
+ ImageFormatData{ast::TexelFormat::kRg32Sint, "rg32sint"},
+ ImageFormatData{ast::TexelFormat::kRg32Float, "rg32float"},
+ ImageFormatData{ast::TexelFormat::kRgba16Uint, "rgba16uint"},
+ ImageFormatData{ast::TexelFormat::kRgba16Sint, "rgba16sint"},
+ ImageFormatData{ast::TexelFormat::kRgba16Float, "rgba16float"},
+ ImageFormatData{ast::TexelFormat::kRgba32Uint, "rgba32uint"},
+ ImageFormatData{ast::TexelFormat::kRgba32Sint, "rgba32sint"},
+ ImageFormatData{ast::TexelFormat::kRgba32Float, "rgba32float"}));
TEST_F(WgslGeneratorImplTest, EmitType_Sampler) {
- auto* sampler = ty.sampler(ast::SamplerKind::kSampler);
- Alias("make_type_reachable", sampler);
+ auto* sampler = ty.sampler(ast::SamplerKind::kSampler);
+ Alias("make_type_reachable", sampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler)) << gen.error();
- EXPECT_EQ(out.str(), "sampler");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler)) << gen.error();
+ EXPECT_EQ(out.str(), "sampler");
}
TEST_F(WgslGeneratorImplTest, EmitType_SamplerComparison) {
- auto* sampler = ty.sampler(ast::SamplerKind::kComparisonSampler);
- Alias("make_type_reachable", sampler);
+ auto* sampler = ty.sampler(ast::SamplerKind::kComparisonSampler);
+ Alias("make_type_reachable", sampler);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitType(out, sampler)) << gen.error();
- EXPECT_EQ(out.str(), "sampler_comparison");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitType(out, sampler)) << gen.error();
+ EXPECT_EQ(out.str(), "sampler_comparison");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_unary_op_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_unary_op_test.cc
index 1d19ca9b960..2c46b44621b 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_unary_op_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_unary_op_test.cc
@@ -20,70 +20,65 @@ namespace {
using WgslUnaryOpTest = TestHelper;
TEST_F(WgslUnaryOpTest, AddressOf) {
- Global("expr", ty.f32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.f32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "&(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "&(expr)");
}
TEST_F(WgslUnaryOpTest, Complement) {
- Global("expr", ty.u32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.u32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kComplement, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "~(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "~(expr)");
}
TEST_F(WgslUnaryOpTest, Indirection) {
- Global("G", ty.f32(), ast::StorageClass::kPrivate);
- auto* p = Const(
- "expr", nullptr,
- create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
- WrapInFunction(p, op);
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "*(expr)");
+ Global("G", ty.f32(), ast::StorageClass::kPrivate);
+ auto* p =
+ Let("expr", nullptr, create<ast::UnaryOpExpression>(ast::UnaryOp::kAddressOf, Expr("G")));
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kIndirection, Expr("expr"));
+ WrapInFunction(p, op);
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "*(expr)");
}
TEST_F(WgslUnaryOpTest, Not) {
- Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
- auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.bool_(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNot, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "!(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "!(expr)");
}
TEST_F(WgslUnaryOpTest, Negation) {
- Global("expr", ty.i32(), ast::StorageClass::kPrivate);
- auto* op =
- create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
- WrapInFunction(op);
+ Global("expr", ty.i32(), ast::StorageClass::kPrivate);
+ auto* op = create<ast::UnaryOpExpression>(ast::UnaryOp::kNegation, Expr("expr"));
+ WrapInFunction(op);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
- EXPECT_EQ(out.str(), "-(expr)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitExpression(out, op)) << gen.error();
+ EXPECT_EQ(out.str(), "-(expr)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_decl_statement_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_decl_statement_test.cc
index 5d8a026d468..a68932cf59b 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_decl_statement_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_decl_statement_test.cc
@@ -15,37 +15,39 @@
#include "src/tint/ast/variable_decl_statement.h"
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, Emit_VariableDeclStatement) {
- auto* var = Var("a", ty.f32());
+ auto* var = Var("a", ty.f32());
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " var a : f32;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " var a : f32;\n");
}
TEST_F(WgslGeneratorImplTest, Emit_VariableDeclStatement_InferredType) {
- auto* var = Var("a", nullptr, ast::StorageClass::kNone, Expr(123));
+ auto* var = Var("a", nullptr, ast::StorageClass::kNone, Expr(123_i));
- auto* stmt = Decl(var);
- WrapInFunction(stmt);
+ auto* stmt = Decl(var);
+ WrapInFunction(stmt);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- gen.increment_indent();
+ gen.increment_indent();
- ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
- EXPECT_EQ(gen.result(), " var a = 123;\n");
+ ASSERT_TRUE(gen.EmitStatement(stmt)) << gen.error();
+ EXPECT_EQ(gen.result(), " var a = 123i;\n");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_test.cc b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_test.cc
index 8d239edb357..83af7c25ea9 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_test.cc
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/generator_impl_variable_test.cc
@@ -14,114 +14,111 @@
#include "src/tint/writer/wgsl/test_helper.h"
+using namespace tint::number_suffixes; // NOLINT
+
namespace tint::writer::wgsl {
namespace {
using WgslGeneratorImplTest = TestHelper;
TEST_F(WgslGeneratorImplTest, EmitVariable) {
- auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(var<private> a : f32;)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(var<private> a : f32;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_StorageClass) {
- auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate);
+ auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate);
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(var<private> a : f32;)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(var<private> a : f32;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Access_Read) {
- auto* s = Structure("S", {Member("a", ty.i32())});
- auto* v =
- Global("a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(@binding(0) @group(0) var<storage, read> a : S;)");
+ auto* s = Structure("S", {Member("a", ty.i32())});
+ auto* v = Global("a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kRead,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(@binding(0) @group(0) var<storage, read> a : S;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Access_Write) {
- auto* s = Structure("S", {Member("a", ty.i32())});
- auto* v =
- Global("a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(@binding(0) @group(0) var<storage, write> a : S;)");
+ auto* s = Structure("S", {Member("a", ty.i32())});
+ auto* v = Global("a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(@binding(0) @group(0) var<storage, write> a : S;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Access_ReadWrite) {
- auto* s = Structure("S", {Member("a", ty.i32())});
- auto* v = Global("a", ty.Of(s), ast::StorageClass::kStorage,
- ast::Access::kReadWrite,
- ast::AttributeList{
- create<ast::BindingAttribute>(0),
- create<ast::GroupAttribute>(0),
- });
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(),
- R"(@binding(0) @group(0) var<storage, read_write> a : S;)");
+ auto* s = Structure("S", {Member("a", ty.i32())});
+ auto* v = Global("a", ty.Of(s), ast::StorageClass::kStorage, ast::Access::kReadWrite,
+ ast::AttributeList{
+ create<ast::BindingAttribute>(0),
+ create<ast::GroupAttribute>(0),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(@binding(0) @group(0) var<storage, read_write> a : S;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Decorated) {
- auto* v = Global("a", ty.sampler(ast::SamplerKind::kSampler),
- ast::StorageClass::kNone, nullptr,
- ast::AttributeList{
- create<ast::GroupAttribute>(1),
- create<ast::BindingAttribute>(2),
- });
-
- GeneratorImpl& gen = Build();
-
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(@group(1) @binding(2) var a : sampler;)");
+ auto* v = Global("a", ty.sampler(ast::SamplerKind::kSampler), ast::StorageClass::kNone, nullptr,
+ ast::AttributeList{
+ create<ast::GroupAttribute>(1),
+ create<ast::BindingAttribute>(2),
+ });
+
+ GeneratorImpl& gen = Build();
+
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(@group(1) @binding(2) var a : sampler;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Constructor) {
- auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr(1.0f));
+ auto* v = Global("a", ty.f32(), ast::StorageClass::kPrivate, Expr(1_f));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(var<private> a : f32 = 1.0;)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(var<private> a : f32 = 1.0f;)");
}
TEST_F(WgslGeneratorImplTest, EmitVariable_Const) {
- auto* v = Const("a", ty.f32(), Expr(1.0f));
- WrapInFunction(Decl(v));
+ auto* v = Let("a", ty.f32(), Expr(1_f));
+ WrapInFunction(Decl(v));
- GeneratorImpl& gen = Build();
+ GeneratorImpl& gen = Build();
- std::stringstream out;
- ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
- EXPECT_EQ(out.str(), R"(let a : f32 = 1.0;)");
+ std::stringstream out;
+ ASSERT_TRUE(gen.EmitVariable(out, v)) << gen.error();
+ EXPECT_EQ(out.str(), R"(let a : f32 = 1.0f;)");
}
} // namespace
diff --git a/chromium/third_party/dawn/src/tint/writer/wgsl/test_helper.h b/chromium/third_party/dawn/src/tint/writer/wgsl/test_helper.h
index ca320c450a3..4cf1e93c6bf 100644
--- a/chromium/third_party/dawn/src/tint/writer/wgsl/test_helper.h
+++ b/chromium/third_party/dawn/src/tint/writer/wgsl/test_helper.h
@@ -27,34 +27,31 @@ namespace tint::writer::wgsl {
/// Helper class for testing
template <typename BASE>
class TestHelperBase : public BASE, public ProgramBuilder {
- public:
- TestHelperBase() = default;
-
- ~TestHelperBase() override = default;
-
- /// Builds and returns a GeneratorImpl from the program.
- /// @note The generator is only built once. Multiple calls to Build() will
- /// return the same GeneratorImpl without rebuilding.
- /// @return the built generator
- GeneratorImpl& Build() {
- if (gen_) {
- return *gen_;
+ public:
+ TestHelperBase() = default;
+
+ ~TestHelperBase() override = default;
+
+ /// Builds and returns a GeneratorImpl from the program.
+ /// @note The generator is only built once. Multiple calls to Build() will
+ /// return the same GeneratorImpl without rebuilding.
+ /// @return the built generator
+ GeneratorImpl& Build() {
+ if (gen_) {
+ return *gen_;
+ }
+ program = std::make_unique<Program>(std::move(*this));
+ diag::Formatter formatter;
+ [&]() { ASSERT_TRUE(program->IsValid()) << formatter.format(program->Diagnostics()); }();
+ gen_ = std::make_unique<GeneratorImpl>(program.get());
+ return *gen_;
}
- program = std::make_unique<Program>(std::move(*this));
- diag::Formatter formatter;
- [&]() {
- ASSERT_TRUE(program->IsValid())
- << formatter.format(program->Diagnostics());
- }();
- gen_ = std::make_unique<GeneratorImpl>(program.get());
- return *gen_;
- }
- /// The program built with a call to Build()
- std::unique_ptr<Program> program;
+ /// The program built with a call to Build()
+ std::unique_ptr<Program> program;
- private:
- std::unique_ptr<GeneratorImpl> gen_;
+ private:
+ std::unique_ptr<GeneratorImpl> gen_;
};
using TestHelper = TestHelperBase<testing::Test>;
diff --git a/chromium/third_party/dawn/src/tint/writer/writer.h b/chromium/third_party/dawn/src/tint/writer/writer.h
index d09622b89c1..ea7016ef5fe 100644
--- a/chromium/third_party/dawn/src/tint/writer/writer.h
+++ b/chromium/third_party/dawn/src/tint/writer/writer.h
@@ -21,23 +21,23 @@ namespace tint::writer {
/// Base class for the output writers
class Writer {
- public:
- virtual ~Writer();
+ public:
+ virtual ~Writer();
- /// @returns the writer error string
- const std::string& error() const { return error_; }
+ /// @returns the writer error string
+ const std::string& error() const { return error_; }
- /// Converts the module into the desired format
- /// @returns true on success; false on failure
- virtual bool Generate() = 0;
+ /// Converts the module into the desired format
+ /// @returns true on success; false on failure
+ virtual bool Generate() = 0;
- protected:
- /// Sets the error string
- /// @param msg the error message
- void set_error(const std::string& msg) { error_ = msg; }
+ protected:
+ /// Sets the error string
+ /// @param msg the error message
+ void set_error(const std::string& msg) { error_ = msg; }
- /// An error message, if an error was encountered
- std::string error_;
+ /// An error message, if an error was encountered
+ std::string error_;
};
} // namespace tint::writer
diff --git a/chromium/third_party/dawn/test/tint/BUILD.gn b/chromium/third_party/dawn/test/tint/BUILD.gn
index bb217df4853..c75f50e467d 100644
--- a/chromium/third_party/dawn/test/tint/BUILD.gn
+++ b/chromium/third_party/dawn/test/tint/BUILD.gn
@@ -163,8 +163,10 @@ tint_unittests_source_set("tint_unittests_ast_src") {
"../../src/tint/ast/depth_multisampled_texture_test.cc",
"../../src/tint/ast/depth_texture_test.cc",
"../../src/tint/ast/discard_statement_test.cc",
- "../../src/tint/ast/else_statement_test.cc",
+ "../../src/tint/ast/enable_test.cc",
+ "../../src/tint/ast/extension_test.cc",
"../../src/tint/ast/external_texture_test.cc",
+ "../../src/tint/ast/f16_test.cc",
"../../src/tint/ast/f32_test.cc",
"../../src/tint/ast/fallthrough_statement_test.cc",
"../../src/tint/ast/float_literal_expression_test.cc",
@@ -192,7 +194,6 @@ tint_unittests_source_set("tint_unittests_ast_src") {
"../../src/tint/ast/return_statement_test.cc",
"../../src/tint/ast/sampled_texture_test.cc",
"../../src/tint/ast/sampler_test.cc",
- "../../src/tint/ast/sint_literal_expression_test.cc",
"../../src/tint/ast/stage_attribute_test.cc",
"../../src/tint/ast/storage_texture_test.cc",
"../../src/tint/ast/stride_attribute_test.cc",
@@ -206,7 +207,6 @@ tint_unittests_source_set("tint_unittests_ast_src") {
"../../src/tint/ast/texture_test.cc",
"../../src/tint/ast/traverse_expressions_test.cc",
"../../src/tint/ast/u32_test.cc",
- "../../src/tint/ast/uint_literal_expression_test.cc",
"../../src/tint/ast/unary_op_expression_test.cc",
"../../src/tint/ast/variable_decl_statement_test.cc",
"../../src/tint/ast/variable_test.cc",
@@ -254,8 +254,10 @@ tint_unittests_source_set("tint_unittests_resolver_src") {
"../../src/tint/resolver/function_validation_test.cc",
"../../src/tint/resolver/host_shareable_validation_test.cc",
"../../src/tint/resolver/increment_decrement_validation_test.cc",
+ "../../src/tint/resolver/intrinsic_table_test.cc",
"../../src/tint/resolver/is_host_shareable_test.cc",
"../../src/tint/resolver/is_storeable_test.cc",
+ "../../src/tint/resolver/materialize_test.cc",
"../../src/tint/resolver/pipeline_overridable_constant_test.cc",
"../../src/tint/resolver/ptr_ref_test.cc",
"../../src/tint/resolver/ptr_ref_validation_test.cc",
@@ -265,6 +267,7 @@ tint_unittests_source_set("tint_unittests_resolver_src") {
"../../src/tint/resolver/resolver_test_helper.cc",
"../../src/tint/resolver/resolver_test_helper.h",
"../../src/tint/resolver/side_effects_test.cc",
+ "../../src/tint/resolver/source_variable_test.cc",
"../../src/tint/resolver/storage_class_layout_validation_test.cc",
"../../src/tint/resolver/storage_class_validation_test.cc",
"../../src/tint/resolver/struct_layout_test.cc",
@@ -272,7 +275,9 @@ tint_unittests_source_set("tint_unittests_resolver_src") {
"../../src/tint/resolver/struct_storage_class_use_test.cc",
"../../src/tint/resolver/type_constructor_validation_test.cc",
"../../src/tint/resolver/type_validation_test.cc",
+ "../../src/tint/resolver/uniformity_test.cc",
"../../src/tint/resolver/validation_test.cc",
+ "../../src/tint/resolver/validator_is_storeable_test.cc",
"../../src/tint/resolver/var_let_test.cc",
"../../src/tint/resolver/var_let_validation_test.cc",
]
@@ -281,27 +286,31 @@ tint_unittests_source_set("tint_unittests_resolver_src") {
tint_unittests_source_set("tint_unittests_sem_src") {
sources = [
- "../../src/tint/sem/atomic_type_test.cc",
- "../../src/tint/sem/bool_type_test.cc",
+ "../../src/tint/sem/atomic_test.cc",
+ "../../src/tint/sem/bool_test.cc",
"../../src/tint/sem/builtin_test.cc",
- "../../src/tint/sem/depth_multisampled_texture_type_test.cc",
- "../../src/tint/sem/depth_texture_type_test.cc",
- "../../src/tint/sem/external_texture_type_test.cc",
- "../../src/tint/sem/f32_type_test.cc",
- "../../src/tint/sem/i32_type_test.cc",
- "../../src/tint/sem/matrix_type_test.cc",
- "../../src/tint/sem/multisampled_texture_type_test.cc",
- "../../src/tint/sem/pointer_type_test.cc",
- "../../src/tint/sem/reference_type_test.cc",
- "../../src/tint/sem/sampled_texture_type_test.cc",
- "../../src/tint/sem/sampler_type_test.cc",
+ "../../src/tint/sem/constant_test.cc",
+ "../../src/tint/sem/depth_multisampled_texture_test.cc",
+ "../../src/tint/sem/depth_texture_test.cc",
+ "../../src/tint/sem/expression_test.cc",
+ "../../src/tint/sem/external_texture_test.cc",
+ "../../src/tint/sem/f16_test.cc",
+ "../../src/tint/sem/f32_test.cc",
+ "../../src/tint/sem/i32_test.cc",
+ "../../src/tint/sem/matrix_test.cc",
+ "../../src/tint/sem/multisampled_texture_test.cc",
+ "../../src/tint/sem/pointer_test.cc",
+ "../../src/tint/sem/reference_test.cc",
+ "../../src/tint/sem/sampled_texture_test.cc",
+ "../../src/tint/sem/sampler_test.cc",
"../../src/tint/sem/sem_array_test.cc",
"../../src/tint/sem/sem_struct_test.cc",
- "../../src/tint/sem/storage_texture_type_test.cc",
- "../../src/tint/sem/texture_type_test.cc",
+ "../../src/tint/sem/storage_texture_test.cc",
+ "../../src/tint/sem/texture_test.cc",
+ "../../src/tint/sem/type_test.cc",
"../../src/tint/sem/type_manager_test.cc",
- "../../src/tint/sem/u32_type_test.cc",
- "../../src/tint/sem/vector_type_test.cc",
+ "../../src/tint/sem/u32_test.cc",
+ "../../src/tint/sem/vector_test.cc",
]
}
@@ -322,9 +331,9 @@ tint_unittests_source_set("tint_unittests_transform_src") {
"../../src/tint/transform/decompose_memory_access_test.cc",
"../../src/tint/transform/decompose_strided_array_test.cc",
"../../src/tint/transform/decompose_strided_matrix_test.cc",
+ "../../src/tint/transform/disable_uniformity_analysis_test.cc",
"../../src/tint/transform/expand_compound_assignment_test.cc",
"../../src/tint/transform/first_index_offset_test.cc",
- "../../src/tint/transform/fold_constants_test.cc",
"../../src/tint/transform/fold_trivial_single_use_lets_test.cc",
"../../src/tint/transform/for_loop_to_loop_test.cc",
"../../src/tint/transform/localize_struct_array_assignment_test.cc",
@@ -357,6 +366,7 @@ tint_unittests_source_set("tint_unittests_transform_src") {
tint_unittests_source_set("tint_unittests_utils_src") {
sources = [
+ "../../src/tint/utils/bitcast_test.cc",
"../../src/tint/utils/crc32_test.cc",
"../../src/tint/utils/defer_test.cc",
"../../src/tint/utils/enum_set_test.cc",
@@ -365,6 +375,7 @@ tint_unittests_source_set("tint_unittests_utils_src") {
"../../src/tint/utils/io/tmpfile_test.cc",
"../../src/tint/utils/map_test.cc",
"../../src/tint/utils/math_test.cc",
+ "../../src/tint/utils/result_test.cc",
"../../src/tint/utils/reverse_test.cc",
"../../src/tint/utils/scoped_assignment_test.cc",
"../../src/tint/utils/string_test.cc",
@@ -377,6 +388,7 @@ tint_unittests_source_set("tint_unittests_utils_src") {
tint_unittests_source_set("tint_unittests_writer_src") {
sources = [
"../../src/tint/writer/append_vector_test.cc",
+ "../../src/tint/writer/flatten_bindings_test.cc",
"../../src/tint/writer/float_to_string_test.cc",
"../../src/tint/writer/generate_external_texture_bindings_test.cc",
"../../src/tint/writer/text_generator_test.cc",
@@ -482,13 +494,13 @@ tint_unittests_source_set("tint_unittests_wgsl_reader_src") {
"../../src/tint/reader/wgsl/parser_impl_const_literal_test.cc",
"../../src/tint/reader/wgsl/parser_impl_continue_stmt_test.cc",
"../../src/tint/reader/wgsl/parser_impl_continuing_stmt_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_depth_texture_type_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_elseif_stmt_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_depth_texture_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_enable_directive_test.cc",
"../../src/tint/reader/wgsl/parser_impl_equality_expression_test.cc",
"../../src/tint/reader/wgsl/parser_impl_error_msg_test.cc",
"../../src/tint/reader/wgsl/parser_impl_error_resync_test.cc",
"../../src/tint/reader/wgsl/parser_impl_exclusive_or_expression_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_external_texture_type_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_external_texture_test.cc",
"../../src/tint/reader/wgsl/parser_impl_for_stmt_test.cc",
"../../src/tint/reader/wgsl/parser_impl_function_attribute_list_test.cc",
"../../src/tint/reader/wgsl/parser_impl_function_attribute_test.cc",
@@ -510,14 +522,14 @@ tint_unittests_source_set("tint_unittests_wgsl_reader_src") {
"../../src/tint/reader/wgsl/parser_impl_primary_expression_test.cc",
"../../src/tint/reader/wgsl/parser_impl_relational_expression_test.cc",
"../../src/tint/reader/wgsl/parser_impl_reserved_keyword_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_sampled_texture_type_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_sampler_type_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_sampled_texture_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_sampler_test.cc",
"../../src/tint/reader/wgsl/parser_impl_shift_expression_test.cc",
"../../src/tint/reader/wgsl/parser_impl_singular_expression_test.cc",
"../../src/tint/reader/wgsl/parser_impl_statement_test.cc",
"../../src/tint/reader/wgsl/parser_impl_statements_test.cc",
"../../src/tint/reader/wgsl/parser_impl_storage_class_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_storage_texture_type_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_storage_texture_test.cc",
"../../src/tint/reader/wgsl/parser_impl_struct_attribute_decl_test.cc",
"../../src/tint/reader/wgsl/parser_impl_struct_body_decl_test.cc",
"../../src/tint/reader/wgsl/parser_impl_struct_decl_test.cc",
@@ -530,7 +542,7 @@ tint_unittests_source_set("tint_unittests_wgsl_reader_src") {
"../../src/tint/reader/wgsl/parser_impl_test_helper.cc",
"../../src/tint/reader/wgsl/parser_impl_test_helper.h",
"../../src/tint/reader/wgsl/parser_impl_texel_format_test.cc",
- "../../src/tint/reader/wgsl/parser_impl_texture_sampler_types_test.cc",
+ "../../src/tint/reader/wgsl/parser_impl_texture_sampler_test.cc",
"../../src/tint/reader/wgsl/parser_impl_type_alias_test.cc",
"../../src/tint/reader/wgsl/parser_impl_type_decl_test.cc",
"../../src/tint/reader/wgsl/parser_impl_unary_expression_test.cc",
@@ -562,6 +574,7 @@ tint_unittests_source_set("tint_unittests_wgsl_writer_src") {
"../../src/tint/writer/wgsl/generator_impl_constructor_test.cc",
"../../src/tint/writer/wgsl/generator_impl_continue_test.cc",
"../../src/tint/writer/wgsl/generator_impl_discard_test.cc",
+ "../../src/tint/writer/wgsl/generator_impl_enable_test.cc",
"../../src/tint/writer/wgsl/generator_impl_fallthrough_test.cc",
"../../src/tint/writer/wgsl/generator_impl_function_test.cc",
"../../src/tint/writer/wgsl/generator_impl_global_decl_test.cc",
@@ -710,11 +723,11 @@ tint_unittests_source_set("tint_unittests_glsl_writer_src") {
tint_unittests_source_set("tint_unittests_core_src") {
sources = [
- "../../src/tint/builtin_table_test.cc",
"../../src/tint/castable_test.cc",
"../../src/tint/clone_context_test.cc",
"../../src/tint/debug_test.cc",
"../../src/tint/demangler_test.cc",
+ "../../src/tint/number_test.cc",
"../../src/tint/program_builder_test.cc",
"../../src/tint/program_test.cc",
"../../src/tint/scope_stack_test.cc",
diff --git a/chromium/third_party/dawn/third_party/CMakeLists.txt b/chromium/third_party/dawn/third_party/CMakeLists.txt
index d516352adc3..a8dcf80227d 100644
--- a/chromium/third_party/dawn/third_party/CMakeLists.txt
+++ b/chromium/third_party/dawn/third_party/CMakeLists.txt
@@ -23,24 +23,12 @@ if (NOT TARGET SPIRV-Headers)
add_subdirectory(${DAWN_SPIRV_HEADERS_DIR} "${CMAKE_CURRENT_BINARY_DIR}/spirv-headers")
endif()
-if(${TINT_BUILD_GLSL_WRITER})
- if(${TINT_BUILD_SAMPLES})
- add_subdirectory("${DAWN_THIRD_PARTY_DIR}/vulkan-deps/glslang/src" "${CMAKE_CURRENT_BINARY_DIR}/glslang" EXCLUDE_FROM_ALL)
- endif()
-endif()
-
# Needs to come before SPIR-V Tools
if ((${TINT_BUILD_SPIRV_TOOLS_FUZZER} OR ${TINT_BUILD_AST_FUZZER}) AND
- (NOT TARGET protobuf::libprotobuf OR NOT TARGET protobuf::protoc))
- set(protobuf_BUILD_TESTS OFF CACHE BOOL "Controls whether protobuf tests are built" FORCE)
- set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Controls whether a protobuf static runtime is built" FORCE)
- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/protobuf/cmake)
-endif()
-
-if(${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
- if (NOT IS_DIRECTORY "${SPIRV-Headers_SOURCE_DIR}")
- set(SPIRV-Headers_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/spirv-headers CACHE STRING "Source directory for SPIR-V headers")
- endif()
+ (NOT TARGET protobuf::libprotobuf OR NOT TARGET protobuf::protoc))
+ set(protobuf_BUILD_TESTS OFF CACHE BOOL "Controls whether protobuf tests are built" FORCE)
+ set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Controls whether a protobuf static runtime is built" FORCE)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/protobuf/cmake)
endif()
if (NOT TARGET SPIRV-Tools)
@@ -49,17 +37,22 @@ if (NOT TARGET SPIRV-Tools)
set(SKIP_SPIRV_TOOLS_INSTALL ON CACHE BOOL "" FORCE)
if(${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
- set(SPIRV_SKIP_TESTS ON CACHE BOOL "Controls whether SPIR-V tests are run" FORCE)
- set(SPIRV_WERROR OFF CACHE BOOL OFF FORCE)
- if (${TINT_BUILD_SPIRV_TOOLS_FUZZER})
- set(SPIRV_BUILD_FUZZER ON CACHE BOOL "Controls whether spirv-fuzz is built" FORCE)
- endif()
+ set(SPIRV_SKIP_TESTS ON CACHE BOOL "Controls whether SPIR-V tests are run" FORCE)
+ set(SPIRV_WERROR OFF CACHE BOOL OFF FORCE)
+ if (${TINT_BUILD_SPIRV_TOOLS_FUZZER})
+ set(SPIRV_BUILD_FUZZER ON CACHE BOOL "Controls whether spirv-fuzz is built" FORCE)
+ endif()
endif()
message(STATUS "Dawn: using SPIRV-Tools at ${DAWN_SPIRV_TOOLS_DIR}")
add_subdirectory(${DAWN_SPIRV_TOOLS_DIR} "${CMAKE_CURRENT_BINARY_DIR}/spirv-tools" EXCLUDE_FROM_ALL)
endif()
+if(NOT TARGET glslang AND ${TINT_BUILD_GLSL_WRITER} AND ${TINT_BUILD_SAMPLES})
+ set(SKIP_GLSLANG_INSTALL ON CACHE BOOL "" FORCE)
+ add_subdirectory("${DAWN_THIRD_PARTY_DIR}/vulkan-deps/glslang/src" "${CMAKE_CURRENT_BINARY_DIR}/glslang" EXCLUDE_FROM_ALL)
+endif()
+
if (NOT TARGET glfw)
set(GLFW_BUILD_DOCS OFF CACHE BOOL "" FORCE)
set(GLFW_BUILD_TESTS OFF CACHE BOOL "" FORCE)
@@ -96,7 +89,7 @@ target_sources(dawn_vulkan_headers INTERFACE
)
target_include_directories(dawn_vulkan_headers INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/khronos")
-if (${DAWN_ENABLE_SWIFTSHADER} AND NOT TARGET vk_swiftshader)
+if (NOT TARGET vk_swiftshader AND ${DAWN_ENABLE_SWIFTSHADER})
set(SWIFTSHADER_BUILD_TESTS OFF CACHE BOOL "" FORCE)
set(SWIFTSHADER_BUILD_BENCHMARKS OFF CACHE BOOL "" FORCE)
@@ -105,11 +98,11 @@ if (${DAWN_ENABLE_SWIFTSHADER} AND NOT TARGET vk_swiftshader)
endif()
if (${TINT_BUILD_BENCHMARKS})
- set(BENCHMARK_ENABLE_TESTING FALSE CACHE BOOL FALSE FORCE)
- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/benchmark EXCLUDE_FROM_ALL)
+ set(BENCHMARK_ENABLE_TESTING FALSE CACHE BOOL FALSE FORCE)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/benchmark EXCLUDE_FROM_ALL)
endif()
-if (${TINT_BUILD_TESTS} AND NOT TARGET gmock)
- set(gtest_force_shared_crt ON CACHE BOOL "Controls whether a shared run-time library should be used even when Google Test is built as static library" FORCE)
- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/googletest EXCLUDE_FROM_ALL)
+if (NOT TARGET gmock AND ${TINT_BUILD_TESTS})
+ set(gtest_force_shared_crt ON CACHE BOOL "Controls whether a shared run-time library should be used even when Google Test is built as static library" FORCE)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/googletest EXCLUDE_FROM_ALL)
endif()
diff --git a/chromium/third_party/dawn/third_party/glfw/.appveyor.yml b/chromium/third_party/dawn/third_party/glfw/.appveyor.yml
new file mode 100644
index 00000000000..2742949bfdc
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/.appveyor.yml
@@ -0,0 +1,47 @@
+image:
+ - Visual Studio 2015
+branches:
+ only:
+ - ci
+ - master
+ - latest
+ - 3.3-stable
+skip_tags: true
+environment:
+ matrix:
+ - GENERATOR: MinGW Makefiles
+ BUILD_SHARED_LIBS: ON
+ CFLAGS: -Werror
+ - GENERATOR: MinGW Makefiles
+ BUILD_SHARED_LIBS: OFF
+ CFLAGS: -Werror
+ - GENERATOR: Visual Studio 10 2010
+ BUILD_SHARED_LIBS: ON
+ CFLAGS: /WX
+ - GENERATOR: Visual Studio 10 2010
+ BUILD_SHARED_LIBS: OFF
+ CFLAGS: /WX
+matrix:
+ fast_finish: true
+for:
+-
+ matrix:
+ only:
+ - GENERATOR: MinGW Makefiles
+ build_script:
+ - set PATH=%PATH:C:\Program Files\Git\usr\bin=C:\MinGW\bin%
+ - cmake -S . -B build -G "%GENERATOR%" -DBUILD_SHARED_LIBS=%BUILD_SHARED_LIBS%
+ - cmake --build build
+-
+ matrix:
+ only:
+ - GENERATOR: Visual Studio 10 2010
+ build_script:
+ - cmake -S . -B build -G "%GENERATOR%" -DBUILD_SHARED_LIBS=%BUILD_SHARED_LIBS%
+ - cmake --build build --target glfw
+notifications:
+ - provider: Email
+ to:
+ - ci@glfw.org
+ on_build_failure: true
+ on_build_success: false
diff --git a/chromium/third_party/dawn/third_party/glfw/.github/CODEOWNERS b/chromium/third_party/dawn/third_party/glfw/.github/CODEOWNERS
new file mode 100644
index 00000000000..018808ba0ef
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/.github/CODEOWNERS
@@ -0,0 +1,10 @@
+
+* @elmindreda
+
+src/wl_* @linkmauve
+
+docs/*.css @glfw/webdev
+docs/*.scss @glfw/webdev
+docs/*.html @glfw/webdev
+docs/*.xml @glfw/webdev
+
diff --git a/chromium/third_party/dawn/third_party/glfw/.github/workflows/build.yml b/chromium/third_party/dawn/third_party/glfw/.github/workflows/build.yml
new file mode 100644
index 00000000000..7f798851b1d
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/.github/workflows/build.yml
@@ -0,0 +1,93 @@
+name: Build
+on:
+ pull_request:
+ push:
+ branches: [ ci, master, latest, 3.3-stable ]
+permissions:
+ statuses: write
+ contents: read
+
+jobs:
+ build-linux-x11-clang:
+ name: X11 (Linux, Clang)
+ runs-on: ubuntu-latest
+ env:
+ CC: clang
+ CFLAGS: -Werror
+ steps:
+ - uses: actions/checkout@v2
+ - name: Install dependencies
+ run: |
+ sudo apt update
+ sudo apt install libxrandr-dev libxinerama-dev libxcursor-dev libxi-dev libxext-dev
+
+ - name: Configure static library
+ run: cmake -S . -B build-static
+ - name: Build static library
+ run: cmake --build build-static --parallel
+
+ - name: Configure shared library
+ run: cmake -S . -B build-shared -D BUILD_SHARED_LIBS=ON
+ - name: Build shared library
+ run: cmake --build build-shared --parallel
+
+ build-linux-full-clang:
+ name: X11+Wayland (Linux, Clang)
+ runs-on: ubuntu-latest
+ env:
+ CC: clang
+ CFLAGS: -Werror
+ steps:
+ - uses: actions/checkout@v2
+ - name: Install dependencies
+ run: |
+ sudo apt update
+ sudo apt install libxrandr-dev libxinerama-dev libxcursor-dev libxi-dev libxext-dev wayland-protocols libwayland-dev libxkbcommon-dev
+
+ - name: Configure static library
+ run: cmake -S . -B build-static -D GLFW_BUILD_WAYLAND=ON
+ - name: Build static library
+ run: cmake --build build-static --parallel
+
+ - name: Configure shared library
+ run: cmake -S . -B build-shared -D GLFW_BUILD_WAYLAND=ON -D BUILD_SHARED_LIBS=ON
+ - name: Build shared library
+ run: cmake --build build-shared --parallel
+
+ build-macos-cocoa-clang:
+ name: Cocoa (macOS, Clang)
+ runs-on: macos-latest
+ env:
+ CFLAGS: -Werror
+ MACOSX_DEPLOYMENT_TARGET: 10.8
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Configure static library
+ run: cmake -S . -B build-static
+ - name: Build static library
+ run: cmake --build build-static --parallel
+
+ - name: Configure shared library
+ run: cmake -S . -B build-shared -D BUILD_SHARED_LIBS=ON
+ - name: Build shared library
+ run: cmake --build build-shared --parallel
+
+ build-windows-win32-vs2022:
+ name: Win32 (Windows, VS2022)
+ runs-on: windows-latest
+ env:
+ CFLAGS: /WX
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Configure static library
+ run: cmake -S . -B build-static -G "Visual Studio 17 2022"
+ - name: Build static library
+ run: cmake --build build-static --parallel
+
+ - name: Configure shared library
+ run: cmake -S . -B build-shared -G "Visual Studio 17 2022" -D BUILD_SHARED_LIBS=ON
+ - name: Build shared library
+ run: cmake --build build-shared --parallel
+
diff --git a/chromium/third_party/dawn/third_party/glfw/.mailmap b/chromium/third_party/dawn/third_party/glfw/.mailmap
new file mode 100644
index 00000000000..96d8a9b77f5
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/.mailmap
@@ -0,0 +1,10 @@
+Camilla Löwy <elmindreda@glfw.org> <elmindreda@users.sourceforge.net>
+Camilla Löwy <elmindreda@glfw.org> <elmindreda@elmindreda.org>
+Camilla Löwy <elmindreda@glfw.org>
+
+Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
+
+Marcus Geelnard <m@bitsnbites.eu> <marcus256@users.sourceforge.net>
+Marcus Geelnard <m@bitsnbites.eu> <marcus@geelnards-pc.(none)>
+Marcus Geelnard <m@bitsnbites.eu>
+
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/GenerateMappings.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/GenerateMappings.cmake
new file mode 100644
index 00000000000..c8c9e23f290
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/GenerateMappings.cmake
@@ -0,0 +1,48 @@
+# Usage:
+# cmake -P GenerateMappings.cmake <path/to/mappings.h.in> <path/to/mappings.h>
+
+set(source_url "https://raw.githubusercontent.com/gabomdq/SDL_GameControllerDB/master/gamecontrollerdb.txt")
+set(source_path "${CMAKE_CURRENT_BINARY_DIR}/gamecontrollerdb.txt")
+set(template_path "${CMAKE_ARGV3}")
+set(target_path "${CMAKE_ARGV4}")
+
+if (NOT EXISTS "${template_path}")
+ message(FATAL_ERROR "Failed to find template file ${template_path}")
+endif()
+
+file(DOWNLOAD "${source_url}" "${source_path}"
+ STATUS download_status
+ TLS_VERIFY on)
+
+list(GET download_status 0 status_code)
+list(GET download_status 1 status_message)
+
+if (status_code)
+ message(FATAL_ERROR "Failed to download ${source_url}: ${status_message}")
+endif()
+
+file(STRINGS "${source_path}" lines)
+foreach(line ${lines})
+ if (line MATCHES "^[0-9a-fA-F]")
+ if (line MATCHES "platform:Windows")
+ if (GLFW_WIN32_MAPPINGS)
+ string(APPEND GLFW_WIN32_MAPPINGS "\n")
+ endif()
+ string(APPEND GLFW_WIN32_MAPPINGS "\"${line}\",")
+ elseif (line MATCHES "platform:Mac OS X")
+ if (GLFW_COCOA_MAPPINGS)
+ string(APPEND GLFW_COCOA_MAPPINGS "\n")
+ endif()
+ string(APPEND GLFW_COCOA_MAPPINGS "\"${line}\",")
+ elseif (line MATCHES "platform:Linux")
+ if (GLFW_LINUX_MAPPINGS)
+ string(APPEND GLFW_LINUX_MAPPINGS "\n")
+ endif()
+ string(APPEND GLFW_LINUX_MAPPINGS "\"${line}\",")
+ endif()
+ endif()
+endforeach()
+
+configure_file("${template_path}" "${target_path}" @ONLY NEWLINE_STYLE UNIX)
+file(REMOVE "${source_path}")
+
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/Info.plist.in b/chromium/third_party/dawn/third_party/glfw/CMake/Info.plist.in
new file mode 100644
index 00000000000..684ad79087f
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/Info.plist.in
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${MACOSX_BUNDLE_EXECUTABLE_NAME}</string>
+ <key>CFBundleGetInfoString</key>
+ <string>${MACOSX_BUNDLE_INFO_STRING}</string>
+ <key>CFBundleIconFile</key>
+ <string>${MACOSX_BUNDLE_ICON_FILE}</string>
+ <key>CFBundleIdentifier</key>
+ <string>${MACOSX_BUNDLE_GUI_IDENTIFIER}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleLongVersionString</key>
+ <string>${MACOSX_BUNDLE_LONG_VERSION_STRING}</string>
+ <key>CFBundleName</key>
+ <string>${MACOSX_BUNDLE_BUNDLE_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>${MACOSX_BUNDLE_SHORT_VERSION_STRING}</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>${MACOSX_BUNDLE_BUNDLE_VERSION}</string>
+ <key>CSResourcesFileMapped</key>
+ <true/>
+ <key>LSRequiresCarbon</key>
+ <true/>
+ <key>NSHumanReadableCopyright</key>
+ <string>${MACOSX_BUNDLE_COPYRIGHT}</string>
+ <key>NSHighResolutionCapable</key>
+ <true/>
+</dict>
+</plist>
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/cmake_uninstall.cmake.in b/chromium/third_party/dawn/third_party/glfw/CMake/cmake_uninstall.cmake.in
new file mode 100644
index 00000000000..5ecc476db22
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/cmake_uninstall.cmake.in
@@ -0,0 +1,29 @@
+
+if (NOT EXISTS "@GLFW_BINARY_DIR@/install_manifest.txt")
+ message(FATAL_ERROR "Cannot find install manifest: \"@GLFW_BINARY_DIR@/install_manifest.txt\"")
+endif()
+
+file(READ "@GLFW_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+
+foreach (file ${files})
+ message(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"")
+ if (EXISTS "$ENV{DESTDIR}${file}")
+ exec_program("@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval)
+ if (NOT "${rm_retval}" STREQUAL 0)
+ MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
+ endif()
+ elseif (IS_SYMLINK "$ENV{DESTDIR}${file}")
+ EXEC_PROGRAM("@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval)
+ if (NOT "${rm_retval}" STREQUAL 0)
+ message(FATAL_ERROR "Problem when removing symlink \"$ENV{DESTDIR}${file}\"")
+ endif()
+ else()
+ message(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
+ endif()
+endforeach()
+
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/glfw3.pc.in b/chromium/third_party/dawn/third_party/glfw/CMake/glfw3.pc.in
new file mode 100644
index 00000000000..37f4efd91ff
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/glfw3.pc.in
@@ -0,0 +1,13 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=${prefix}
+includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
+libdir=@CMAKE_INSTALL_FULL_LIBDIR@
+
+Name: GLFW
+Description: A multi-platform library for OpenGL, window and input
+Version: @GLFW_VERSION@
+URL: https://www.glfw.org/
+Requires.private: @GLFW_PKG_CONFIG_REQUIRES_PRIVATE@
+Libs: -L${libdir} -l@GLFW_LIB_NAME@
+Libs.private: @GLFW_PKG_CONFIG_LIBS_PRIVATE@
+Cflags: -I${includedir}
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/glfw3Config.cmake.in b/chromium/third_party/dawn/third_party/glfw/CMake/glfw3Config.cmake.in
new file mode 100644
index 00000000000..4a13a88b9eb
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/glfw3Config.cmake.in
@@ -0,0 +1,3 @@
+include(CMakeFindDependencyMacro)
+find_dependency(Threads)
+include("${CMAKE_CURRENT_LIST_DIR}/glfw3Targets.cmake")
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32-clang.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32-clang.cmake
new file mode 100644
index 00000000000..8726b2382fb
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32-clang.cmake
@@ -0,0 +1,13 @@
+# Define the environment for cross-compiling with 32-bit MinGW-w64 Clang
+SET(CMAKE_SYSTEM_NAME Windows) # Target system name
+SET(CMAKE_SYSTEM_VERSION 1)
+SET(CMAKE_C_COMPILER "i686-w64-mingw32-clang")
+SET(CMAKE_CXX_COMPILER "i686-w64-mingw32-clang++")
+SET(CMAKE_RC_COMPILER "i686-w64-mingw32-windres")
+SET(CMAKE_RANLIB "i686-w64-mingw32-ranlib")
+
+# Configure the behaviour of the find commands
+SET(CMAKE_FIND_ROOT_PATH "/usr/i686-w64-mingw32")
+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32.cmake
new file mode 100644
index 00000000000..2ca4dcd9559
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/i686-w64-mingw32.cmake
@@ -0,0 +1,13 @@
+# Define the environment for cross-compiling with 32-bit MinGW-w64 GCC
+SET(CMAKE_SYSTEM_NAME Windows) # Target system name
+SET(CMAKE_SYSTEM_VERSION 1)
+SET(CMAKE_C_COMPILER "i686-w64-mingw32-gcc")
+SET(CMAKE_CXX_COMPILER "i686-w64-mingw32-g++")
+SET(CMAKE_RC_COMPILER "i686-w64-mingw32-windres")
+SET(CMAKE_RANLIB "i686-w64-mingw32-ranlib")
+
+# Configure the behaviour of the find commands
+SET(CMAKE_FIND_ROOT_PATH "/usr/i686-w64-mingw32")
+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindEpollShim.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindEpollShim.cmake
new file mode 100644
index 00000000000..f34d07090ef
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindEpollShim.cmake
@@ -0,0 +1,17 @@
+# Find EpollShim
+# Once done, this will define
+#
+# EPOLLSHIM_FOUND - System has EpollShim
+# EPOLLSHIM_INCLUDE_DIRS - The EpollShim include directories
+# EPOLLSHIM_LIBRARIES - The libraries needed to use EpollShim
+
+find_path(EPOLLSHIM_INCLUDE_DIRS NAMES sys/epoll.h sys/timerfd.h HINTS /usr/local/include/libepoll-shim)
+find_library(EPOLLSHIM_LIBRARIES NAMES epoll-shim libepoll-shim HINTS /usr/local/lib)
+
+if (EPOLLSHIM_INCLUDE_DIRS AND EPOLLSHIM_LIBRARIES)
+ set(EPOLLSHIM_FOUND TRUE)
+endif (EPOLLSHIM_INCLUDE_DIRS AND EPOLLSHIM_LIBRARIES)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(EpollShim DEFAULT_MSG EPOLLSHIM_LIBRARIES EPOLLSHIM_INCLUDE_DIRS)
+mark_as_advanced(EPOLLSHIM_INCLUDE_DIRS EPOLLSHIM_LIBRARIES)
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindOSMesa.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindOSMesa.cmake
new file mode 100644
index 00000000000..3194bd91aba
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/modules/FindOSMesa.cmake
@@ -0,0 +1,18 @@
+# Try to find OSMesa on a Unix system
+#
+# This will define:
+#
+# OSMESA_LIBRARIES - Link these to use OSMesa
+# OSMESA_INCLUDE_DIR - Include directory for OSMesa
+#
+# Copyright (c) 2014 Brandon Schaefer <brandon.schaefer@canonical.com>
+
+if (NOT WIN32)
+
+ find_package (PkgConfig)
+ pkg_check_modules (PKG_OSMESA QUIET osmesa)
+
+ set (OSMESA_INCLUDE_DIR ${PKG_OSMESA_INCLUDE_DIRS})
+ set (OSMESA_LIBRARIES ${PKG_OSMESA_LIBRARIES})
+
+endif ()
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32-clang.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32-clang.cmake
new file mode 100644
index 00000000000..60f7914df9c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32-clang.cmake
@@ -0,0 +1,13 @@
+# Define the environment for cross-compiling with 64-bit MinGW-w64 Clang
+SET(CMAKE_SYSTEM_NAME Windows) # Target system name
+SET(CMAKE_SYSTEM_VERSION 1)
+SET(CMAKE_C_COMPILER "x86_64-w64-mingw32-clang")
+SET(CMAKE_CXX_COMPILER "x86_64-w64-mingw32-clang++")
+SET(CMAKE_RC_COMPILER "x86_64-w64-mingw32-windres")
+SET(CMAKE_RANLIB "x86_64-w64-mingw32-ranlib")
+
+# Configure the behaviour of the find commands
+SET(CMAKE_FIND_ROOT_PATH "/usr/x86_64-w64-mingw32")
+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32.cmake b/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32.cmake
new file mode 100644
index 00000000000..063e845aac4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMake/x86_64-w64-mingw32.cmake
@@ -0,0 +1,13 @@
+# Define the environment for cross-compiling with 64-bit MinGW-w64 GCC
+SET(CMAKE_SYSTEM_NAME Windows) # Target system name
+SET(CMAKE_SYSTEM_VERSION 1)
+SET(CMAKE_C_COMPILER "x86_64-w64-mingw32-gcc")
+SET(CMAKE_CXX_COMPILER "x86_64-w64-mingw32-g++")
+SET(CMAKE_RC_COMPILER "x86_64-w64-mingw32-windres")
+SET(CMAKE_RANLIB "x86_64-w64-mingw32-ranlib")
+
+# Configure the behaviour of the find commands
+SET(CMAKE_FIND_ROOT_PATH "/usr/x86_64-w64-mingw32")
+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/chromium/third_party/dawn/third_party/glfw/CMakeLists.txt b/chromium/third_party/dawn/third_party/glfw/CMakeLists.txt
new file mode 100644
index 00000000000..f5e538bf7cc
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CMakeLists.txt
@@ -0,0 +1,179 @@
+cmake_minimum_required(VERSION 3.4...3.20 FATAL_ERROR)
+
+project(GLFW VERSION 3.4.0 LANGUAGES C)
+
+set(CMAKE_LEGACY_CYGWIN_WIN32 OFF)
+
+if (POLICY CMP0054)
+ cmake_policy(SET CMP0054 NEW)
+endif()
+
+if (POLICY CMP0069)
+ cmake_policy(SET CMP0069 NEW)
+endif()
+
+if (POLICY CMP0077)
+ cmake_policy(SET CMP0077 NEW)
+endif()
+
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
+ set(GLFW_STANDALONE TRUE)
+endif()
+
+option(BUILD_SHARED_LIBS "Build shared libraries" OFF)
+option(GLFW_BUILD_EXAMPLES "Build the GLFW example programs" ${GLFW_STANDALONE})
+option(GLFW_BUILD_TESTS "Build the GLFW test programs" ${GLFW_STANDALONE})
+option(GLFW_BUILD_DOCS "Build the GLFW documentation" ON)
+option(GLFW_INSTALL "Generate installation target" ON)
+
+include(GNUInstallDirs)
+include(CMakeDependentOption)
+
+if (GLFW_USE_OSMESA)
+ message(FATAL_ERROR "GLFW_USE_OSMESA has been removed; set the GLFW_PLATFORM init hint")
+endif()
+
+cmake_dependent_option(GLFW_BUILD_WIN32 "Build support for Win32" ON "WIN32" OFF)
+cmake_dependent_option(GLFW_BUILD_COCOA "Build support for Cocoa" ON "APPLE" OFF)
+cmake_dependent_option(GLFW_BUILD_X11 "Build support for X11" ON "UNIX;NOT APPLE" OFF)
+cmake_dependent_option(GLFW_BUILD_WAYLAND "Build support for Wayland"
+ "${GLFW_USE_WAYLAND}" "UNIX;NOT APPLE" OFF)
+
+cmake_dependent_option(GLFW_USE_HYBRID_HPG "Force use of high-performance GPU on hybrid systems" OFF
+ "WIN32" OFF)
+cmake_dependent_option(USE_MSVC_RUNTIME_LIBRARY_DLL "Use MSVC runtime library DLL" ON
+ "MSVC" OFF)
+
+set(GLFW_LIBRARY_TYPE "${GLFW_LIBRARY_TYPE}" CACHE STRING
+ "Library type override for GLFW (SHARED, STATIC, OBJECT, or empty to follow BUILD_SHARED_LIBS)")
+
+if (GLFW_LIBRARY_TYPE)
+ if (GLFW_LIBRARY_TYPE STREQUAL "SHARED")
+ set(GLFW_BUILD_SHARED_LIBRARY TRUE)
+ else()
+ set(GLFW_BUILD_SHARED_LIBRARY FALSE)
+ endif()
+else()
+ set(GLFW_BUILD_SHARED_LIBRARY ${BUILD_SHARED_LIBS})
+endif()
+
+list(APPEND CMAKE_MODULE_PATH "${GLFW_SOURCE_DIR}/CMake/modules")
+
+find_package(Threads REQUIRED)
+
+if (GLFW_BUILD_DOCS)
+ set(DOXYGEN_SKIP_DOT TRUE)
+ find_package(Doxygen)
+endif()
+
+#--------------------------------------------------------------------
+# Report backend selection
+#--------------------------------------------------------------------
+if (GLFW_BUILD_WIN32)
+ message(STATUS "Including Win32 support")
+endif()
+if (GLFW_BUILD_COCOA)
+ message(STATUS "Including Cocoa support")
+endif()
+if (GLFW_BUILD_WAYLAND)
+ message(STATUS "Including Wayland support")
+endif()
+if (GLFW_BUILD_X11)
+ message(STATUS "Including X11 support")
+endif()
+
+#--------------------------------------------------------------------
+# Apply Microsoft C runtime library option
+# This is here because it also applies to tests and examples
+#--------------------------------------------------------------------
+if (MSVC AND NOT USE_MSVC_RUNTIME_LIBRARY_DLL)
+ if (CMAKE_VERSION VERSION_LESS 3.15)
+ foreach (flag CMAKE_C_FLAGS
+ CMAKE_C_FLAGS_DEBUG
+ CMAKE_C_FLAGS_RELEASE
+ CMAKE_C_FLAGS_MINSIZEREL
+ CMAKE_C_FLAGS_RELWITHDEBINFO)
+
+ if (flag MATCHES "/MD")
+ string(REGEX REPLACE "/MD" "/MT" ${flag} "${${flag}}")
+ endif()
+ if (flag MATCHES "/MDd")
+ string(REGEX REPLACE "/MDd" "/MTd" ${flag} "${${flag}}")
+ endif()
+
+ endforeach()
+ else()
+ set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
+ endif()
+endif()
+
+#--------------------------------------------------------------------
+# Create generated files
+#--------------------------------------------------------------------
+include(CMakePackageConfigHelpers)
+
+set(GLFW_CONFIG_PATH "${CMAKE_INSTALL_LIBDIR}/cmake/glfw3")
+
+configure_package_config_file(CMake/glfw3Config.cmake.in
+ src/glfw3Config.cmake
+ INSTALL_DESTINATION "${GLFW_CONFIG_PATH}"
+ NO_CHECK_REQUIRED_COMPONENTS_MACRO)
+
+write_basic_package_version_file(src/glfw3ConfigVersion.cmake
+ VERSION ${GLFW_VERSION}
+ COMPATIBILITY SameMajorVersion)
+
+#--------------------------------------------------------------------
+# Add subdirectories
+#--------------------------------------------------------------------
+add_subdirectory(src)
+
+if (GLFW_BUILD_EXAMPLES)
+ add_subdirectory(examples)
+endif()
+
+if (GLFW_BUILD_TESTS)
+ add_subdirectory(tests)
+endif()
+
+if (DOXYGEN_FOUND AND GLFW_BUILD_DOCS)
+ add_subdirectory(docs)
+endif()
+
+#--------------------------------------------------------------------
+# Install files other than the library
+# The library is installed by src/CMakeLists.txt
+#--------------------------------------------------------------------
+if (GLFW_INSTALL)
+ install(DIRECTORY include/GLFW DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
+ FILES_MATCHING PATTERN glfw3.h PATTERN glfw3native.h)
+
+ install(FILES "${GLFW_BINARY_DIR}/src/glfw3Config.cmake"
+ "${GLFW_BINARY_DIR}/src/glfw3ConfigVersion.cmake"
+ DESTINATION "${GLFW_CONFIG_PATH}")
+
+ install(EXPORT glfwTargets FILE glfw3Targets.cmake
+ EXPORT_LINK_INTERFACE_LIBRARIES
+ DESTINATION "${GLFW_CONFIG_PATH}")
+ install(FILES "${GLFW_BINARY_DIR}/src/glfw3.pc"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+
+ if (DOXYGEN_FOUND AND GLFW_BUILD_DOCS)
+ install(DIRECTORY "${GLFW_BINARY_DIR}/docs/html"
+ DESTINATION "${CMAKE_INSTALL_DOCDIR}")
+ endif()
+
+ # Only generate this target if no higher-level project already has
+ if (NOT TARGET uninstall)
+ configure_file(CMake/cmake_uninstall.cmake.in
+ cmake_uninstall.cmake IMMEDIATE @ONLY)
+
+ add_custom_target(uninstall
+ "${CMAKE_COMMAND}" -P
+ "${GLFW_BINARY_DIR}/cmake_uninstall.cmake")
+ set_target_properties(uninstall PROPERTIES FOLDER "GLFW3")
+ endif()
+endif()
+
diff --git a/chromium/third_party/dawn/third_party/glfw/CONTRIBUTORS.md b/chromium/third_party/dawn/third_party/glfw/CONTRIBUTORS.md
new file mode 100644
index 00000000000..030abf7cc88
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/CONTRIBUTORS.md
@@ -0,0 +1,248 @@
+# Acknowledgements
+
+GLFW exists because people around the world donated their time and lent their
+skills. This list only includes contributions to the main repository and
+excludes other invaluable contributions like language bindings and text and
+video tutorials.
+
+ - Bobyshev Alexander
+ - Laurent Aphecetche
+ - Matt Arsenault
+ - ashishgamedev
+ - David Avedissian
+ - Luca Bacci
+ - Keith Bauer
+ - John Bartholomew
+ - Coşku Baş
+ - Niklas Behrens
+ - Andrew Belt
+ - Nevyn Bengtsson
+ - Niklas Bergström
+ - Denis Bernard
+ - Doug Binks
+ - blanco
+ - Waris Boonyasiriwat
+ - Kyle Brenneman
+ - Rok Breulj
+ - Kai Burjack
+ - Martin Capitanio
+ - Nicolas Caramelli
+ - David Carlier
+ - Arturo Castro
+ - Chi-kwan Chan
+ - TheChocolateOre
+ - Joseph Chua
+ - Ian Clarkson
+ - Michał Cichoń
+ - Lambert Clara
+ - Anna Clarke
+ - Josh Codd
+ - Yaron Cohen-Tal
+ - Omar Cornut
+ - Andrew Corrigan
+ - Bailey Cosier
+ - Noel Cower
+ - CuriouserThing
+ - Jason Daly
+ - danhambleton
+ - Jarrod Davis
+ - Olivier Delannoy
+ - Paul R. Deppe
+ - Michael Dickens
+ - Роман Донченко
+ - Mario Dorn
+ - Wolfgang Draxinger
+ - Jonathan Dummer
+ - Ralph Eastwood
+ - Fredrik Ehnbom
+ - Robin Eklind
+ - Jan Ekström
+ - Siavash Eliasi
+ - Ahmad Fatoum
+ - Felipe Ferreira
+ - Michael Fogleman
+ - Jason Francis
+ - Gerald Franz
+ - Mário Freitas
+ - GeO4d
+ - Marcus Geelnard
+ - ghuser404
+ - Charles Giessen
+ - Ryan C. Gordon
+ - Stephen Gowen
+ - Kovid Goyal
+ - Kevin Grandemange
+ - Eloi Marín Gratacós
+ - Stefan Gustavson
+ - Andrew Gutekanst
+ - Stephen Gutekanst
+ - Jonathan Hale
+ - hdf89shfdfs
+ - Sylvain Hellegouarch
+ - Matthew Henry
+ - heromyth
+ - Lucas Hinderberger
+ - Paul Holden
+ - Warren Hu
+ - Charles Huber
+ - Brent Huisman
+ - illustris
+ - InKryption
+ - IntellectualKitty
+ - Aaron Jacobs
+ - JannikGM
+ - Erik S. V. Jansson
+ - jjYBdx4IL
+ - Toni Jovanoski
+ - Arseny Kapoulkine
+ - Cem Karan
+ - Osman Keskin
+ - Koray Kilinc
+ - Josh Kilmer
+ - Byunghoon Kim
+ - Cameron King
+ - Peter Knut
+ - Christoph Kubisch
+ - Yuri Kunde Schlesner
+ - Rokas Kupstys
+ - Konstantin Käfer
+ - Eric Larson
+ - Francis Lecavalier
+ - Jong Won Lee
+ - Robin Leffmann
+ - Glenn Lewis
+ - Shane Liesegang
+ - Anders Lindqvist
+ - Leon Linhart
+ - Marco Lizza
+ - Eyal Lotem
+ - Aaron Loucks
+ - Luflosi
+ - lukect
+ - Tristam MacDonald
+ - Hans Mackowiak
+ - Дмитри Малышев
+ - Zbigniew Mandziejewicz
+ - Adam Marcus
+ - Célestin Marot
+ - Kyle McDonald
+ - David V. McKay
+ - David Medlock
+ - Bryce Mehring
+ - Jonathan Mercier
+ - Marcel Metz
+ - Liam Middlebrook
+ - Ave Milia
+ - Jonathan Miller
+ - Kenneth Miller
+ - Bruce Mitchener
+ - Jack Moffitt
+ - Jeff Molofee
+ - Alexander Monakov
+ - Pierre Morel
+ - Jon Morton
+ - Pierre Moulon
+ - Martins Mozeiko
+ - Pascal Muetschard
+ - Julian Møller
+ - ndogxj
+ - n3rdopolis
+ - Kristian Nielsen
+ - Kamil Nowakowski
+ - onox
+ - Denis Ovod
+ - Ozzy
+ - Andri Pálsson
+ - luz paz
+ - Peoro
+ - Braden Pellett
+ - Christopher Pelloux
+ - Arturo J. Pérez
+ - Vladimir Perminov
+ - Anthony Pesch
+ - Orson Peters
+ - Emmanuel Gil Peyrot
+ - Cyril Pichard
+ - Pilzschaf
+ - Keith Pitt
+ - Stanislav Podgorskiy
+ - Konstantin Podsvirov
+ - Nathan Poirier
+ - Alexandre Pretyman
+ - Pablo Prietz
+ - przemekmirek
+ - pthom
+ - Guillaume Racicot
+ - Philip Rideout
+ - Eddie Ringle
+ - Max Risuhin
+ - Jorge Rodriguez
+ - Jari Ronkainen
+ - Luca Rood
+ - Ed Ropple
+ - Aleksey Rybalkin
+ - Mikko Rytkönen
+ - Riku Salminen
+ - Brandon Schaefer
+ - Sebastian Schuberth
+ - Christian Sdunek
+ - Matt Sealey
+ - Steve Sexton
+ - Arkady Shapkin
+ - Ali Sherief
+ - Yoshiki Shibukawa
+ - Dmitri Shuralyov
+ - Joao da Silva
+ - Daniel Sieger
+ - Daniel Skorupski
+ - Slemmie
+ - Anthony Smith
+ - Bradley Smith
+ - Cliff Smolinsky
+ - Patrick Snape
+ - Erlend Sogge Heggen
+ - Olivier Sohn
+ - Julian Squires
+ - Johannes Stein
+ - Pontus Stenetorp
+ - Michael Stocker
+ - Justin Stoecker
+ - Elviss Strazdins
+ - Paul Sultana
+ - Nathan Sweet
+ - TTK-Bandit
+ - Jared Tiala
+ - Sergey Tikhomirov
+ - Arthur Tombs
+ - TronicLabs
+ - Ioannis Tsakpinis
+ - Samuli Tuomola
+ - Matthew Turner
+ - urraka
+ - Elias Vanderstuyft
+ - Stef Velzel
+ - Jari Vetoniemi
+ - Ricardo Vieira
+ - Nicholas Vitovitch
+ - Simon Voordouw
+ - Corentin Wallez
+ - Torsten Walluhn
+ - Patrick Walton
+ - Xo Wang
+ - Jay Weisskopf
+ - Frank Wille
+ - Andy Williams
+ - Joel Winarske
+ - Richard A. Wilkes
+ - Tatsuya Yatagawa
+ - Ryogo Yoshimura
+ - Lukas Zanner
+ - Andrey Zholos
+ - Aihui Zhu
+ - Santi Zupancic
+ - Jonas Ådahl
+ - Lasse Öörni
+ - Leonard König
+ - All the unmentioned and anonymous contributors in the GLFW community, for bug
+ reports, patches, feedback, testing and encouragement
+
diff --git a/chromium/third_party/dawn/third_party/glfw/LICENSE.md b/chromium/third_party/dawn/third_party/glfw/LICENSE.md
new file mode 100644
index 00000000000..7494a3f689c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/LICENSE.md
@@ -0,0 +1,23 @@
+Copyright (c) 2002-2006 Marcus Geelnard
+
+Copyright (c) 2006-2019 Camilla Löwy
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would
+ be appreciated but is not required.
+
+2. Altered source versions must be plainly marked as such, and must not
+ be misrepresented as being the original software.
+
+3. This notice may not be removed or altered from any source
+ distribution.
+
diff --git a/chromium/third_party/dawn/third_party/glfw/README.md b/chromium/third_party/dawn/third_party/glfw/README.md
new file mode 100644
index 00000000000..72cbef36f46
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/README.md
@@ -0,0 +1,361 @@
+# GLFW
+
+[![Build status](https://github.com/glfw/glfw/actions/workflows/build.yml/badge.svg)](https://github.com/glfw/glfw/actions)
+[![Build status](https://ci.appveyor.com/api/projects/status/0kf0ct9831i5l6sp/branch/master?svg=true)](https://ci.appveyor.com/project/elmindreda/glfw)
+[![Coverity Scan](https://scan.coverity.com/projects/4884/badge.svg)](https://scan.coverity.com/projects/glfw-glfw)
+
+## Introduction
+
+GLFW is an Open Source, multi-platform library for OpenGL, OpenGL ES and Vulkan
+application development. It provides a simple, platform-independent API for
+creating windows, contexts and surfaces, reading input, handling events, etc.
+
+GLFW natively supports Windows, macOS and Linux and other Unix-like systems. On
+Linux both X11 and Wayland are supported.
+
+GLFW is licensed under the [zlib/libpng
+license](https://www.glfw.org/license.html).
+
+You can [download](https://www.glfw.org/download.html) the latest stable release
+as source or Windows binaries, or fetch the `latest` branch from GitHub. Each
+release starting with 3.0 also has a corresponding [annotated
+tag](https://github.com/glfw/glfw/releases) with source and binary archives.
+
+The [documentation](https://www.glfw.org/docs/latest/) is available online and is
+included in all source and binary archives. See the [release
+notes](https://www.glfw.org/docs/latest/news.html) for new features, caveats and
+deprecations in the latest release. For more details see the [version
+history](https://www.glfw.org/changelog.html).
+
+The `master` branch is the stable integration branch and _should_ always compile
+and run on all supported platforms, although details of newly added features may
+change until they have been included in a release. New features and many bug
+fixes live in [other branches](https://github.com/glfw/glfw/branches/all) until
+they are stable enough to merge.
+
+If you are new to GLFW, you may find the
+[tutorial](https://www.glfw.org/docs/latest/quick.html) for GLFW 3 useful. If
+you have used GLFW 2 in the past, there is a [transition
+guide](https://www.glfw.org/docs/latest/moving.html) for moving to the GLFW
+3 API.
+
+GLFW exists because of the contributions of [many people](CONTRIBUTORS.md)
+around the world, whether by reporting bugs, providing community support, adding
+features, reviewing or testing code, debugging, proofreading docs, suggesting
+features or fixing bugs.
+
+
+## Compiling GLFW
+
+GLFW itself requires only the headers and libraries for your OS and window
+system. It does not need the headers for any context creation API (WGL, GLX,
+EGL, NSGL, OSMesa) or rendering API (OpenGL, OpenGL ES, Vulkan) to enable
+support for them.
+
+GLFW supports compilation on Windows with Visual C++ 2010 and later, MinGW and
+MinGW-w64, on macOS with Clang and on Linux and other Unix-like systems with GCC
+and Clang. It will likely compile in other environments as well, but this is
+not regularly tested.
+
+There are [pre-compiled Windows binaries](https://www.glfw.org/download.html)
+available for all supported compilers.
+
+See the [compilation guide](https://www.glfw.org/docs/latest/compile.html) for
+more information about how to compile GLFW yourself.
+
+
+## Using GLFW
+
+See the [documentation](https://www.glfw.org/docs/latest/) for tutorials, guides
+and the API reference.
+
+
+## Contributing to GLFW
+
+See the [contribution
+guide](https://github.com/glfw/glfw/blob/master/docs/CONTRIBUTING.md) for
+more information.
+
+
+## System requirements
+
+GLFW supports Windows XP and later and macOS 10.8 and later. Linux and other
+Unix-like systems running the X Window System are supported even without
+a desktop environment or modern extensions, although some features require
+a running window or clipboard manager. The OSMesa backend requires Mesa 6.3.
+
+See the [compatibility guide](https://www.glfw.org/docs/latest/compat.html)
+in the documentation for more information.
+
+
+## Dependencies
+
+GLFW itself needs only CMake 3.1 or later and the headers and libraries for your
+OS and window system.
+
+The examples and test programs depend on a number of tiny libraries. These are
+located in the `deps/` directory.
+
+ - [getopt\_port](https://github.com/kimgr/getopt_port/) for examples
+ with command-line options
+ - [TinyCThread](https://github.com/tinycthread/tinycthread) for threaded
+ examples
+ - [glad2](https://github.com/Dav1dde/glad) for loading OpenGL and Vulkan
+ functions
+ - [linmath.h](https://github.com/datenwolf/linmath.h) for linear algebra in
+ examples
+ - [Nuklear](https://github.com/Immediate-Mode-UI/Nuklear) for test and example UI
+ - [stb\_image\_write](https://github.com/nothings/stb) for writing images to disk
+
+The documentation is generated with [Doxygen](https://doxygen.org/) if CMake can
+find that tool.
+
+
+## Reporting bugs
+
+Bugs are reported to our [issue tracker](https://github.com/glfw/glfw/issues).
+Please check the [contribution
+guide](https://github.com/glfw/glfw/blob/master/docs/CONTRIBUTING.md) for
+information on what to include when reporting a bug.
+
+
+## Changelog
+
+ - Added `GLFW_PLATFORM` init hint for runtime platform selection (#1958)
+ - Added `GLFW_ANY_PLATFORM`, `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`,
+ `GLFW_PLATFORM_WAYLAND`, `GLFW_PLATFORM_X11` and `GLFW_PLATFORM_NULL` symbols to
+ specify the desired platform (#1958)
+ - Added `glfwGetPlatform` function to query what platform was selected (#1655,#1958)
+ - Added `glfwPlatformSupported` function to query if a platform is supported
+ (#1655,#1958)
+ - Added `glfwInitAllocator` for setting a custom memory allocator (#544,#1628,#1947)
+ - Added `GLFWallocator` struct and `GLFWallocatefun`, `GLFWreallocatefun` and
+ `GLFWdeallocatefun` types (#544,#1628,#1947)
+ - Added `glfwInitVulkanLoader` for using a non-default Vulkan loader (#1374,#1890)
+ - Added `GLFW_RESIZE_NWSE_CURSOR`, `GLFW_RESIZE_NESW_CURSOR`,
+ `GLFW_RESIZE_ALL_CURSOR` and `GLFW_NOT_ALLOWED_CURSOR` cursor shapes (#427)
+ - Added `GLFW_RESIZE_EW_CURSOR` alias for `GLFW_HRESIZE_CURSOR` (#427)
+ - Added `GLFW_RESIZE_NS_CURSOR` alias for `GLFW_VRESIZE_CURSOR` (#427)
+ - Added `GLFW_POINTING_HAND_CURSOR` alias for `GLFW_HAND_CURSOR` (#427)
+ - Added `GLFW_MOUSE_PASSTHROUGH` window hint for letting mouse input pass
+ through the window (#1236,#1568)
+ - Added `GLFW_PLATFORM_UNAVAILABLE` error for platform detection failures (#1958)
+ - Added `GLFW_FEATURE_UNAVAILABLE` error for platform limitations (#1692)
+ - Added `GLFW_FEATURE_UNIMPLEMENTED` error for incomplete backends (#1692)
+ - Added `GLFW_ANGLE_PLATFORM_TYPE` init hint and `GLFW_ANGLE_PLATFORM_TYPE_*`
+ values to select ANGLE backend (#1380)
+ - Added `GLFW_X11_XCB_VULKAN_SURFACE` init hint for selecting X11 Vulkan
+ surface extension (#1793)
+ - Added `GLFW_BUILD_WIN32` CMake option for enabling Win32 support (#1958)
+ - Added `GLFW_BUILD_COCOA` CMake option for enabling Cocoa support (#1958)
+ - Added `GLFW_BUILD_X11` CMake option for enabling X11 support (#1958)
+ - Added `GLFW_LIBRARY_TYPE` CMake variable for overriding the library type
+ (#279,#1307,#1497,#1574,#1928)
+ - Added `GLFW_PKG_CONFIG_REQUIRES_PRIVATE` and `GLFW_PKG_CONFIG_LIBS_PRIVATE` CMake
+ variables exposing pkg-config dependencies (#1307)
+ - Made joystick subsystem initialize at first use (#1284,#1646)
+ - Made `GLFW_DOUBLEBUFFER` a read-only window attribute
+ - Updated the minimum required CMake version to 3.1
+ - Updated gamepad mappings from upstream
+ - Disabled tests and examples by default when built as a CMake subdirectory
+ - Renamed `GLFW_USE_WAYLAND` CMake option to `GLFW_BUILD_WAYLAND` (#1958)
+ - Removed `GLFW_USE_OSMESA` CMake option enabling the Null platform (#1958)
+ - Removed CMake generated configuration header
+ - Bugfix: The CMake config-file package used an absolute path and was not
+ relocatable (#1470)
+ - Bugfix: Video modes with a duplicate screen area were discarded (#1555,#1556)
+ - Bugfix: Compiling with -Wextra-semi caused warnings (#1440)
+ - Bugfix: Built-in mappings failed because some OEMs re-used VID/PID (#1583)
+ - Bugfix: Some extension loader headers did not prevent default OpenGL header
+ inclusion (#1695)
+ - Bugfix: Buffers were swapped at creation on single-buffered windows (#1873)
+ - Bugfix: Gamepad mapping updates could spam `GLFW_INVALID_VALUE` due to
+ incompatible controllers sharing hardware ID (#1763)
+ - Bugfix: Native access functions for context handles did not check that the API matched
+ - Bugfix: `glfwMakeContextCurrent` would access TLS slot before initialization
+ - Bugfix: `glfwSetGammaRamp` could emit `GLFW_INVALID_VALUE` before initialization
+ - [Win32] Added the `GLFW_WIN32_KEYBOARD_MENU` window hint for enabling access
+ to the window menu
+ - [Win32] Added a version info resource to the GLFW DLL
+ - [Win32] Disabled framebuffer transparency on Windows 7 when DWM windows are
+ opaque (#1512)
+ - [Win32] Bugfix: `GLFW_INCLUDE_VULKAN` plus `VK_USE_PLATFORM_WIN32_KHR` caused
+ symbol redefinition (#1524)
+ - [Win32] Bugfix: The cursor position event was emitted before its cursor enter
+ event (#1490)
+ - [Win32] Bugfix: The window hint `GLFW_MAXIMIZED` did not move or resize the
+ window (#1499)
+ - [Win32] Bugfix: Disabled cursor mode interfered with some non-client actions
+ - [Win32] Bugfix: Super key was not released after Win+V hotkey (#1622)
+ - [Win32] Bugfix: `glfwGetKeyName` could access out of bounds and return an
+ invalid pointer
+ - [Win32] Bugfix: Some synthetic key events were reported as `GLFW_KEY_UNKNOWN`
+ (#1623)
+ - [Win32] Bugfix: Non-BMP Unicode codepoint input was reported as UTF-16
+ - [Win32] Bugfix: Monitor functions could return invalid values after
+ configuration change (#1761)
+ - [Win32] Bugfix: Initialization would segfault on Windows 8 (not 8.1) (#1775)
+ - [Win32] Bugfix: Duplicate size events were not filtered (#1610)
+ - [Win32] Bugfix: Full screen windows were incorrectly resized by DPI changes
+ (#1582)
+ - [Win32] Bugfix: `GLFW_SCALE_TO_MONITOR` had no effect on systems older than
+ Windows 10 version 1703 (#1511)
+ - [Win32] Bugfix: `USE_MSVC_RUNTIME_LIBRARY_DLL` had no effect on CMake 3.15 or
+ later (#1783,#1796)
+ - [Win32] Bugfix: Compilation with LLVM for Windows failed (#1807,#1824,#1874)
+ - [Win32] Bugfix: The foreground lock timeout was overridden, ignoring the user
+ - [Win32] Bugfix: Content scale queries could fail silently (#1615)
+ - [Win32] Bugfix: Content scales could have garbage values if monitor was recently
+ disconnected (#1615)
+ - [Win32] Bugfix: A window created maximized and undecorated would cover the whole
+ monitor (#1806)
+ - [Win32] Bugfix: The default restored window position was lost when creating a maximized
+ window
+ - [Win32] Bugfix: `glfwMaximizeWindow` would make a hidden window visible
+ - [Win32] Bugfix: `Alt+PrtSc` would emit `GLFW_KEY_UNKNOWN` and a different
+ scancode than `PrtSc` (#1993)
+ - [Win32] Bugfix: `GLFW_KEY_PAUSE` scancode from `glfwGetKeyScancode` did not
+ match event scancode (#1993)
+ - [Win32] Bugfix: Instance-local operations used executable instance (#469,#1296,#1395)
+ - [Win32] Bugfix: The OSMesa library was not unloaded on termination
+ - [Cocoa] Added support for `VK_EXT_metal_surface` (#1619)
+ - [Cocoa] Added locating the Vulkan loader at runtime in an application bundle
+ - [Cocoa] Moved main menu creation to GLFW initialization time (#1649)
+ - [Cocoa] Changed `EGLNativeWindowType` from `NSView` to `CALayer` (#1169)
+ - [Cocoa] Changed F13 key to report Print Screen for cross-platform consistency
+ (#1786)
+ - [Cocoa] Removed dependency on the CoreVideo framework
+ - [Cocoa] Bugfix: `glfwSetWindowSize` used a bottom-left anchor point (#1553)
+ - [Cocoa] Bugfix: Window remained on screen after destruction until event poll
+ (#1412)
+ - [Cocoa] Bugfix: Event processing before window creation would assert (#1543)
+ - [Cocoa] Bugfix: Undecorated windows could not be iconified on recent macOS
+ - [Cocoa] Bugfix: Touching event queue from secondary thread before main thread
+ would abort (#1649)
+ - [Cocoa] Bugfix: Non-BMP Unicode codepoint input was reported as UTF-16
+ (#1635)
+ - [Cocoa] Bugfix: Failing to retrieve the refresh rate of built-in displays
+ could leak memory
+ - [Cocoa] Bugfix: Objective-C files were compiled as C with CMake 3.19 (#1787)
+ - [Cocoa] Bugfix: Duplicate video modes were not filtered out (#1830)
+ - [Cocoa] Bugfix: Menu bar was not clickable on macOS 10.15+ until it lost and
+ regained focus (#1648,#1802)
+ - [Cocoa] Bugfix: Monitor name query could segfault on macOS 11 (#1809,#1833)
+ - [Cocoa] Bugfix: The install name of the installed dylib was relative (#1504)
+ - [Cocoa] Bugfix: The MoltenVK layer contents scale was updated only after
+ related events were emitted
+ - [Cocoa] Bugfix: Moving the cursor programmatically would freeze it for
+ a fraction of a second (#1962)
+ - [Cocoa] Bugfix: `kIOMasterPortDefault` was deprecated in macOS 12.0 (#1980)
+ - [Cocoa] Bugfix: `kUTTypeURL` was deprecated in macOS 12.0 (#2003)
+ - [Cocoa] Bugfix: A connected Apple AirPlay would emit a useless error (#1791)
+ - [Cocoa] Bugfix: The EGL and OSMesa libraries were not unloaded on termination
+ - [X11] Bugfix: The CMake files did not check for the XInput headers (#1480)
+ - [X11] Bugfix: Key names were not updated when the keyboard layout changed
+ (#1462,#1528)
+ - [X11] Bugfix: Decorations could not be enabled after window creation (#1566)
+ - [X11] Bugfix: Content scale fallback value could be inconsistent (#1578)
+ - [X11] Bugfix: `glfwMaximizeWindow` had no effect on hidden windows
+ - [X11] Bugfix: Clearing `GLFW_FLOATING` on a hidden window caused invalid read
+ - [X11] Bugfix: Changing `GLFW_FLOATING` on a hidden window could silently fail
+ - [X11] Bugfix: Disabled cursor mode was interrupted by indicator windows
+ - [X11] Bugfix: Monitor physical dimensions could be reported as zero mm
+ - [X11] Bugfix: Window position events were not emitted during resizing (#1613)
+ - [X11] Bugfix: `glfwFocusWindow` could terminate on older WMs or without a WM
+ - [X11] Bugfix: Querying a disconnected monitor could segfault (#1602)
+ - [X11] Bugfix: IME input of CJK was broken for "C" locale (#1587,#1636)
+ - [X11] Bugfix: Termination would segfault if the IM had been destroyed
+ - [X11] Bugfix: Any IM started after initialization would not be detected
+ - [X11] Bugfix: Xlib errors caused by other parts of the application could be
+ reported as GLFW errors
+ - [X11] Bugfix: A handle race condition could cause a `BadWindow` error (#1633)
+ - [X11] Bugfix: XKB path used keysyms instead of physical locations for
+ non-printable keys (#1598)
+ - [X11] Bugfix: Function keys were mapped to `GLFW_KEY_UNKNOWN` for some layout
+ combinations (#1598)
+ - [X11] Bugfix: Keys pressed simultaneously with others were not always
+ reported (#1112,#1415,#1472,#1616)
+ - [X11] Bugfix: Some window attributes were not applied on leaving fullscreen
+ (#1863)
+ - [X11] Bugfix: Changing `GLFW_FLOATING` could leak memory
+ - [X11] Bugfix: Icon pixel format conversion worked only by accident, relying on
+ undefined behavior (#1986)
+ - [X11] Bugfix: Dynamic loading on OpenBSD failed due to soname differences
+ - [X11] Bugfix: Waiting for events would fail if file descriptor was too large
+ (#2024)
+ - [X11] Bugfix: Joystick events could lead to busy-waiting (#1872)
+ - [X11] Bugfix: `glfwWaitEvents*` did not continue for joystick events
+ - [X11] Bugfix: `glfwPostEmptyEvent` could be ignored due to race condition
+ (#379,#1281,#1285,#2033)
+ - [X11] Bugfix: Dynamic loading on NetBSD failed due to soname differences
+ - [X11] Bugfix: Left shift of int constant relied on undefined behavior (#1951)
+ - [X11] Bugfix: The OSMesa libray was not unloaded on termination
+ - [X11] Bugfix: A malformed response during selection transfer could cause a segfault
+ - [Wayland] Added dynamic loading of all Wayland libraries
+ - [Wayland] Added support for key names via xkbcommon
+ - [Wayland] Added support for file path drop events (#2040)
+ - [Wayland] Removed support for `wl_shell` (#1443)
+ - [Wayland] Bugfix: The `GLFW_HAND_CURSOR` shape used the wrong image (#1432)
+ - [Wayland] Bugfix: `CLOCK_MONOTONIC` was not correctly enabled
+ - [Wayland] Bugfix: Repeated keys could be reported with `NULL` window (#1704)
+ - [Wayland] Bugfix: Retrieving partial framebuffer size would segfault
+ - [Wayland] Bugfix: Scrolling offsets were inverted compared to other platforms
+ (#1463)
+ - [Wayland] Bugfix: Client-Side Decorations were destroyed in the wrong order
+ (#1798)
+ - [Wayland] Bugfix: Monitors physical size could report zero (#1784,#1792)
+ - [Wayland] Bugfix: Some keys were not repeating in Wayland (#1908)
+ - [Wayland] Bugfix: Non-arrow cursors are offset from the hotspot (#1706,#1899)
+ - [Wayland] Bugfix: The `O_CLOEXEC` flag was not defined on FreeBSD
+ - [Wayland] Bugfix: Key repeat could lead to a race condition (#1710)
+ - [Wayland] Bugfix: Activating a window would emit two input focus events
+ - [Wayland] Bugfix: Disable key repeat mechanism when window loses input focus
+ - [Wayland] Bugfix: Window hiding and showing did not work (#1492,#1731)
+ - [Wayland] Bugfix: A key being repeated was not released when window lost focus
+ - [Wayland] Bugfix: Showing a hidden window did not emit a window refresh event
+ - [Wayland] Bugfix: Full screen window creation did not ignore `GLFW_VISIBLE`
+ - [Wayland] Bugfix: Some keys were reported as wrong key or `GLFW_KEY_UNKNOWN`
+ - [Wayland] Bugfix: Text input did not repeat along with key repeat
+ - [Wayland] Bugfix: `glfwPostEmptyEvent` sometimes had no effect (#1520,#1521)
+ - [Wayland] Bugfix: `glfwSetClipboardString` would fail if set to result of
+ `glfwGetClipboardString`
+ - [Wayland] Bugfix: Data source creation error would cause double free at termination
+ - [Wayland] Bugfix: Partial writes of clipboard string would cause beginning to repeat
+ - [Wayland] Bugfix: Some errors would cause clipboard string transfer to hang
+ - [Wayland] Bugfix: Drag and drop data was misinterpreted as clipboard string
+ - [Wayland] Bugfix: MIME type matching was not performed for clipboard string
+ - [Wayland] Bugfix: The OSMesa library was not unloaded on termination
+ - [Wayland] Bugfix: `glfwCreateWindow` could emit `GLFW_FEATURE_UNAVAILABLE`
+ - [POSIX] Removed use of deprecated function `gettimeofday`
+ - [POSIX] Bugfix: `CLOCK_MONOTONIC` was not correctly tested for or enabled
+ - [WGL] Disabled the DWM swap interval hack for Windows 8 and later (#1072)
+ - [NSGL] Removed enforcement of forward-compatible flag for core contexts
+ - [NSGL] Bugfix: `GLFW_COCOA_RETINA_FRAMEBUFFER` had no effect on newer
+ macOS versions (#1442)
+ - [NSGL] Bugfix: Workaround for swap interval on 10.14 broke on 10.12 (#1483)
+ - [NSGL] Bugfix: Defining `GL_SILENCE_DEPRECATION` externally caused
+ a duplicate definition warning (#1840)
+ - [EGL] Added platform selection via the `EGL_EXT_platform_base` extension
+ (#442)
+ - [EGL] Added ANGLE backend selection via `EGL_ANGLE_platform_angle` extension
+ (#1380)
+ - [EGL] Bugfix: The `GLFW_DOUBLEBUFFER` context attribute was ignored (#1843)
+ - [GLX] Bugfix: Context creation failed if GLX 1.4 was not exported by GLX library
+
+
+## Contact
+
+On [glfw.org](https://www.glfw.org/) you can find the latest version of GLFW, as
+well as news, documentation and other information about the project.
+
+If you have questions related to the use of GLFW, we have a
+[forum](https://discourse.glfw.org/), and the `#glfw` IRC channel on
+[Libera.Chat](https://libera.chat/).
+
+If you have a bug to report, a patch to submit or a feature you'd like to
+request, please file it in the
+[issue tracker](https://github.com/glfw/glfw/issues) on GitHub.
+
+Finally, if you're interested in helping out with the development of GLFW or
+porting it to your favorite platform, join us on the forum, GitHub or IRC.
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/getopt.c b/chromium/third_party/dawn/third_party/glfw/deps/getopt.c
new file mode 100644
index 00000000000..9743046f910
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/getopt.c
@@ -0,0 +1,230 @@
+/* Copyright (c) 2012, Kim Gräsman
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Kim Gräsman nor the names of contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "getopt.h"
+
+#include <stddef.h>
+#include <string.h>
+
+const int no_argument = 0;
+const int required_argument = 1;
+const int optional_argument = 2;
+
+char* optarg;
+int optopt;
+/* The variable optind [...] shall be initialized to 1 by the system. */
+int optind = 1;
+int opterr;
+
+static char* optcursor = NULL;
+
+/* Implemented based on [1] and [2] for optional arguments.
+ optopt is handled FreeBSD-style, per [3].
+ Other GNU and FreeBSD extensions are purely accidental.
+
+[1] http://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html
+[2] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
+[3] http://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE
+*/
+int getopt(int argc, char* const argv[], const char* optstring) {
+ int optchar = -1;
+ const char* optdecl = NULL;
+
+ optarg = NULL;
+ opterr = 0;
+ optopt = 0;
+
+ /* Unspecified, but we need it to avoid overrunning the argv bounds. */
+ if (optind >= argc)
+ goto no_more_optchars;
+
+ /* If, when getopt() is called argv[optind] is a null pointer, getopt()
+ shall return -1 without changing optind. */
+ if (argv[optind] == NULL)
+ goto no_more_optchars;
+
+ /* If, when getopt() is called *argv[optind] is not the character '-',
+ getopt() shall return -1 without changing optind. */
+ if (*argv[optind] != '-')
+ goto no_more_optchars;
+
+ /* If, when getopt() is called argv[optind] points to the string "-",
+ getopt() shall return -1 without changing optind. */
+ if (strcmp(argv[optind], "-") == 0)
+ goto no_more_optchars;
+
+ /* If, when getopt() is called argv[optind] points to the string "--",
+ getopt() shall return -1 after incrementing optind. */
+ if (strcmp(argv[optind], "--") == 0) {
+ ++optind;
+ goto no_more_optchars;
+ }
+
+ if (optcursor == NULL || *optcursor == '\0')
+ optcursor = argv[optind] + 1;
+
+ optchar = *optcursor;
+
+ /* FreeBSD: The variable optopt saves the last known option character
+ returned by getopt(). */
+ optopt = optchar;
+
+ /* The getopt() function shall return the next option character (if one is
+ found) from argv that matches a character in optstring, if there is
+ one that matches. */
+ optdecl = strchr(optstring, optchar);
+ if (optdecl) {
+ /* [I]f a character is followed by a colon, the option takes an
+ argument. */
+ if (optdecl[1] == ':') {
+ optarg = ++optcursor;
+ if (*optarg == '\0') {
+ /* GNU extension: Two colons mean an option takes an
+ optional arg; if there is text in the current argv-element
+ (i.e., in the same word as the option name itself, for example,
+ "-oarg"), then it is returned in optarg, otherwise optarg is set
+ to zero. */
+ if (optdecl[2] != ':') {
+ /* If the option was the last character in the string pointed to by
+ an element of argv, then optarg shall contain the next element
+ of argv, and optind shall be incremented by 2. If the resulting
+ value of optind is greater than argc, this indicates a missing
+ option-argument, and getopt() shall return an error indication.
+
+ Otherwise, optarg shall point to the string following the
+ option character in that element of argv, and optind shall be
+ incremented by 1.
+ */
+ if (++optind < argc) {
+ optarg = argv[optind];
+ } else {
+ /* If it detects a missing option-argument, it shall return the
+ colon character ( ':' ) if the first character of optstring
+ was a colon, or a question-mark character ( '?' ) otherwise.
+ */
+ optarg = NULL;
+ optchar = (optstring[0] == ':') ? ':' : '?';
+ }
+ } else {
+ optarg = NULL;
+ }
+ }
+
+ optcursor = NULL;
+ }
+ } else {
+ /* If getopt() encounters an option character that is not contained in
+ optstring, it shall return the question-mark ( '?' ) character. */
+ optchar = '?';
+ }
+
+ if (optcursor == NULL || *++optcursor == '\0')
+ ++optind;
+
+ return optchar;
+
+no_more_optchars:
+ optcursor = NULL;
+ return -1;
+}
+
+/* Implementation based on [1].
+
+[1] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
+*/
+int getopt_long(int argc, char* const argv[], const char* optstring,
+ const struct option* longopts, int* longindex) {
+ const struct option* o = longopts;
+ const struct option* match = NULL;
+ int num_matches = 0;
+ size_t argument_name_length = 0;
+ const char* current_argument = NULL;
+ int retval = -1;
+
+ optarg = NULL;
+ optopt = 0;
+
+ if (optind >= argc)
+ return -1;
+
+ if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0)
+ return getopt(argc, argv, optstring);
+
+ /* It's an option; starts with -- and is longer than two chars. */
+ current_argument = argv[optind] + 2;
+ argument_name_length = strcspn(current_argument, "=");
+ for (; o->name; ++o) {
+ if (strncmp(o->name, current_argument, argument_name_length) == 0) {
+ match = o;
+ ++num_matches;
+ }
+ }
+
+ if (num_matches == 1) {
+ /* If longindex is not NULL, it points to a variable which is set to the
+ index of the long option relative to longopts. */
+ if (longindex)
+ *longindex = (int) (match - longopts);
+
+ /* If flag is NULL, then getopt_long() shall return val.
+ Otherwise, getopt_long() returns 0, and flag shall point to a variable
+ which shall be set to val if the option is found, but left unchanged if
+ the option is not found. */
+ if (match->flag)
+ *(match->flag) = match->val;
+
+ retval = match->flag ? 0 : match->val;
+
+ if (match->has_arg != no_argument) {
+ optarg = strchr(argv[optind], '=');
+ if (optarg != NULL)
+ ++optarg;
+
+ if (match->has_arg == required_argument) {
+ /* Only scan the next argv for required arguments. Behavior is not
+ specified, but has been observed with Ubuntu and Mac OSX. */
+ if (optarg == NULL && ++optind < argc) {
+ optarg = argv[optind];
+ }
+
+ if (optarg == NULL)
+ retval = ':';
+ }
+ } else if (strchr(argv[optind], '=')) {
+ /* An argument was provided to a non-argument option.
+ I haven't seen this specified explicitly, but both GNU and BSD-based
+ implementations show this behavior.
+ */
+ retval = '?';
+ }
+ } else {
+ /* Unknown option or ambiguous match. */
+ retval = '?';
+ }
+
+ ++optind;
+ return retval;
+}
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/getopt.h b/chromium/third_party/dawn/third_party/glfw/deps/getopt.h
new file mode 100644
index 00000000000..e1eb540fd9d
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/getopt.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, Kim Gräsman
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Kim Gräsman nor the names of contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INCLUDED_GETOPT_PORT_H
+#define INCLUDED_GETOPT_PORT_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+extern const int no_argument;
+extern const int required_argument;
+extern const int optional_argument;
+
+extern char* optarg;
+extern int optind, opterr, optopt;
+
+struct option {
+ const char* name;
+ int has_arg;
+ int* flag;
+ int val;
+};
+
+int getopt(int argc, char* const argv[], const char* optstring);
+
+int getopt_long(int argc, char* const argv[],
+ const char* optstring, const struct option* longopts, int* longindex);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif // INCLUDED_GETOPT_PORT_H
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/glad/gl.h b/chromium/third_party/dawn/third_party/glfw/deps/glad/gl.h
new file mode 100644
index 00000000000..b421fe08064
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/glad/gl.h
@@ -0,0 +1,5996 @@
+/**
+ * Loader generated by glad 2.0.0-beta on Tue Aug 24 22:51:07 2021
+ *
+ * Generator: C/C++
+ * Specification: gl
+ * Extensions: 3
+ *
+ * APIs:
+ * - gl:compatibility=3.3
+ *
+ * Options:
+ * - ALIAS = False
+ * - DEBUG = False
+ * - HEADER_ONLY = True
+ * - LOADER = False
+ * - MX = False
+ * - MX_GLOBAL = False
+ * - ON_DEMAND = False
+ *
+ * Commandline:
+ * --api='gl:compatibility=3.3' --extensions='GL_ARB_multisample,GL_ARB_robustness,GL_KHR_debug' c --header-only
+ *
+ * Online:
+ * http://glad.sh/#api=gl%3Acompatibility%3D3.3&extensions=GL_ARB_multisample%2CGL_ARB_robustness%2CGL_KHR_debug&generator=c&options=HEADER_ONLY
+ *
+ */
+
+#ifndef GLAD_GL_H_
+#define GLAD_GL_H_
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wreserved-id-macro"
+#endif
+#ifdef __gl_h_
+ #error OpenGL (gl.h) header already included (API: gl), remove previous include!
+#endif
+#define __gl_h_ 1
+#ifdef __gl3_h_
+ #error OpenGL (gl3.h) header already included (API: gl), remove previous include!
+#endif
+#define __gl3_h_ 1
+#ifdef __glext_h_
+ #error OpenGL (glext.h) header already included (API: gl), remove previous include!
+#endif
+#define __glext_h_ 1
+#ifdef __gl3ext_h_
+ #error OpenGL (gl3ext.h) header already included (API: gl), remove previous include!
+#endif
+#define __gl3ext_h_ 1
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+#define GLAD_GL
+#define GLAD_OPTION_GL_HEADER_ONLY
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef GLAD_PLATFORM_H_
+#define GLAD_PLATFORM_H_
+
+#ifndef GLAD_PLATFORM_WIN32
+ #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)
+ #define GLAD_PLATFORM_WIN32 1
+ #else
+ #define GLAD_PLATFORM_WIN32 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_APPLE
+ #ifdef __APPLE__
+ #define GLAD_PLATFORM_APPLE 1
+ #else
+ #define GLAD_PLATFORM_APPLE 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_EMSCRIPTEN
+ #ifdef __EMSCRIPTEN__
+ #define GLAD_PLATFORM_EMSCRIPTEN 1
+ #else
+ #define GLAD_PLATFORM_EMSCRIPTEN 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_UWP
+ #if defined(_MSC_VER) && !defined(GLAD_INTERNAL_HAVE_WINAPIFAMILY)
+ #ifdef __has_include
+ #if __has_include(<winapifamily.h>)
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #endif
+
+ #ifdef GLAD_INTERNAL_HAVE_WINAPIFAMILY
+ #include <winapifamily.h>
+ #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+ #define GLAD_PLATFORM_UWP 1
+ #endif
+ #endif
+
+ #ifndef GLAD_PLATFORM_UWP
+ #define GLAD_PLATFORM_UWP 0
+ #endif
+#endif
+
+#ifdef __GNUC__
+ #define GLAD_GNUC_EXTENSION __extension__
+#else
+ #define GLAD_GNUC_EXTENSION
+#endif
+
+#ifndef GLAD_API_CALL
+ #if defined(GLAD_API_CALL_EXPORT)
+ #if GLAD_PLATFORM_WIN32 || defined(__CYGWIN__)
+ #if defined(GLAD_API_CALL_EXPORT_BUILD)
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllexport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllexport) extern
+ #endif
+ #else
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllimport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllimport) extern
+ #endif
+ #endif
+ #elif defined(__GNUC__) && defined(GLAD_API_CALL_EXPORT_BUILD)
+ #define GLAD_API_CALL __attribute__ ((visibility ("default"))) extern
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+#endif
+
+#ifdef APIENTRY
+ #define GLAD_API_PTR APIENTRY
+#elif GLAD_PLATFORM_WIN32
+ #define GLAD_API_PTR __stdcall
+#else
+ #define GLAD_API_PTR
+#endif
+
+#ifndef GLAPI
+#define GLAPI GLAD_API_CALL
+#endif
+
+#ifndef GLAPIENTRY
+#define GLAPIENTRY GLAD_API_PTR
+#endif
+
+#define GLAD_MAKE_VERSION(major, minor) (major * 10000 + minor)
+#define GLAD_VERSION_MAJOR(version) (version / 10000)
+#define GLAD_VERSION_MINOR(version) (version % 10000)
+
+#define GLAD_GENERATOR_VERSION "2.0.0-beta"
+
+typedef void (*GLADapiproc)(void);
+
+typedef GLADapiproc (*GLADloadfunc)(const char *name);
+typedef GLADapiproc (*GLADuserptrloadfunc)(void *userptr, const char *name);
+
+typedef void (*GLADprecallback)(const char *name, GLADapiproc apiproc, int len_args, ...);
+typedef void (*GLADpostcallback)(void *ret, const char *name, GLADapiproc apiproc, int len_args, ...);
+
+#endif /* GLAD_PLATFORM_H_ */
+
+#define GL_2D 0x0600
+#define GL_2_BYTES 0x1407
+#define GL_3D 0x0601
+#define GL_3D_COLOR 0x0602
+#define GL_3D_COLOR_TEXTURE 0x0603
+#define GL_3_BYTES 0x1408
+#define GL_4D_COLOR_TEXTURE 0x0604
+#define GL_4_BYTES 0x1409
+#define GL_ACCUM 0x0100
+#define GL_ACCUM_ALPHA_BITS 0x0D5B
+#define GL_ACCUM_BLUE_BITS 0x0D5A
+#define GL_ACCUM_BUFFER_BIT 0x00000200
+#define GL_ACCUM_CLEAR_VALUE 0x0B80
+#define GL_ACCUM_GREEN_BITS 0x0D59
+#define GL_ACCUM_RED_BITS 0x0D58
+#define GL_ACTIVE_ATTRIBUTES 0x8B89
+#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
+#define GL_ACTIVE_TEXTURE 0x84E0
+#define GL_ACTIVE_UNIFORMS 0x8B86
+#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36
+#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35
+#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
+#define GL_ADD 0x0104
+#define GL_ADD_SIGNED 0x8574
+#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GL_ALIASED_POINT_SIZE_RANGE 0x846D
+#define GL_ALL_ATTRIB_BITS 0xFFFFFFFF
+#define GL_ALPHA 0x1906
+#define GL_ALPHA12 0x803D
+#define GL_ALPHA16 0x803E
+#define GL_ALPHA4 0x803B
+#define GL_ALPHA8 0x803C
+#define GL_ALPHA_BIAS 0x0D1D
+#define GL_ALPHA_BITS 0x0D55
+#define GL_ALPHA_INTEGER 0x8D97
+#define GL_ALPHA_SCALE 0x0D1C
+#define GL_ALPHA_TEST 0x0BC0
+#define GL_ALPHA_TEST_FUNC 0x0BC1
+#define GL_ALPHA_TEST_REF 0x0BC2
+#define GL_ALREADY_SIGNALED 0x911A
+#define GL_ALWAYS 0x0207
+#define GL_AMBIENT 0x1200
+#define GL_AMBIENT_AND_DIFFUSE 0x1602
+#define GL_AND 0x1501
+#define GL_AND_INVERTED 0x1504
+#define GL_AND_REVERSE 0x1502
+#define GL_ANY_SAMPLES_PASSED 0x8C2F
+#define GL_ARRAY_BUFFER 0x8892
+#define GL_ARRAY_BUFFER_BINDING 0x8894
+#define GL_ATTACHED_SHADERS 0x8B85
+#define GL_ATTRIB_STACK_DEPTH 0x0BB0
+#define GL_AUTO_NORMAL 0x0D80
+#define GL_AUX0 0x0409
+#define GL_AUX1 0x040A
+#define GL_AUX2 0x040B
+#define GL_AUX3 0x040C
+#define GL_AUX_BUFFERS 0x0C00
+#define GL_BACK 0x0405
+#define GL_BACK_LEFT 0x0402
+#define GL_BACK_RIGHT 0x0403
+#define GL_BGR 0x80E0
+#define GL_BGRA 0x80E1
+#define GL_BGRA_INTEGER 0x8D9B
+#define GL_BGR_INTEGER 0x8D9A
+#define GL_BITMAP 0x1A00
+#define GL_BITMAP_TOKEN 0x0704
+#define GL_BLEND 0x0BE2
+#define GL_BLEND_COLOR 0x8005
+#define GL_BLEND_DST 0x0BE0
+#define GL_BLEND_DST_ALPHA 0x80CA
+#define GL_BLEND_DST_RGB 0x80C8
+#define GL_BLEND_EQUATION 0x8009
+#define GL_BLEND_EQUATION_ALPHA 0x883D
+#define GL_BLEND_EQUATION_RGB 0x8009
+#define GL_BLEND_SRC 0x0BE1
+#define GL_BLEND_SRC_ALPHA 0x80CB
+#define GL_BLEND_SRC_RGB 0x80C9
+#define GL_BLUE 0x1905
+#define GL_BLUE_BIAS 0x0D1B
+#define GL_BLUE_BITS 0x0D54
+#define GL_BLUE_INTEGER 0x8D96
+#define GL_BLUE_SCALE 0x0D1A
+#define GL_BOOL 0x8B56
+#define GL_BOOL_VEC2 0x8B57
+#define GL_BOOL_VEC3 0x8B58
+#define GL_BOOL_VEC4 0x8B59
+#define GL_BUFFER 0x82E0
+#define GL_BUFFER_ACCESS 0x88BB
+#define GL_BUFFER_ACCESS_FLAGS 0x911F
+#define GL_BUFFER_MAPPED 0x88BC
+#define GL_BUFFER_MAP_LENGTH 0x9120
+#define GL_BUFFER_MAP_OFFSET 0x9121
+#define GL_BUFFER_MAP_POINTER 0x88BD
+#define GL_BUFFER_SIZE 0x8764
+#define GL_BUFFER_USAGE 0x8765
+#define GL_BYTE 0x1400
+#define GL_C3F_V3F 0x2A24
+#define GL_C4F_N3F_V3F 0x2A26
+#define GL_C4UB_V2F 0x2A22
+#define GL_C4UB_V3F 0x2A23
+#define GL_CCW 0x0901
+#define GL_CLAMP 0x2900
+#define GL_CLAMP_FRAGMENT_COLOR 0x891B
+#define GL_CLAMP_READ_COLOR 0x891C
+#define GL_CLAMP_TO_BORDER 0x812D
+#define GL_CLAMP_TO_EDGE 0x812F
+#define GL_CLAMP_VERTEX_COLOR 0x891A
+#define GL_CLEAR 0x1500
+#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1
+#define GL_CLIENT_ALL_ATTRIB_BITS 0xFFFFFFFF
+#define GL_CLIENT_ATTRIB_STACK_DEPTH 0x0BB1
+#define GL_CLIENT_PIXEL_STORE_BIT 0x00000001
+#define GL_CLIENT_VERTEX_ARRAY_BIT 0x00000002
+#define GL_CLIP_DISTANCE0 0x3000
+#define GL_CLIP_DISTANCE1 0x3001
+#define GL_CLIP_DISTANCE2 0x3002
+#define GL_CLIP_DISTANCE3 0x3003
+#define GL_CLIP_DISTANCE4 0x3004
+#define GL_CLIP_DISTANCE5 0x3005
+#define GL_CLIP_DISTANCE6 0x3006
+#define GL_CLIP_DISTANCE7 0x3007
+#define GL_CLIP_PLANE0 0x3000
+#define GL_CLIP_PLANE1 0x3001
+#define GL_CLIP_PLANE2 0x3002
+#define GL_CLIP_PLANE3 0x3003
+#define GL_CLIP_PLANE4 0x3004
+#define GL_CLIP_PLANE5 0x3005
+#define GL_COEFF 0x0A00
+#define GL_COLOR 0x1800
+#define GL_COLOR_ARRAY 0x8076
+#define GL_COLOR_ARRAY_BUFFER_BINDING 0x8898
+#define GL_COLOR_ARRAY_POINTER 0x8090
+#define GL_COLOR_ARRAY_SIZE 0x8081
+#define GL_COLOR_ARRAY_STRIDE 0x8083
+#define GL_COLOR_ARRAY_TYPE 0x8082
+#define GL_COLOR_ATTACHMENT0 0x8CE0
+#define GL_COLOR_ATTACHMENT1 0x8CE1
+#define GL_COLOR_ATTACHMENT10 0x8CEA
+#define GL_COLOR_ATTACHMENT11 0x8CEB
+#define GL_COLOR_ATTACHMENT12 0x8CEC
+#define GL_COLOR_ATTACHMENT13 0x8CED
+#define GL_COLOR_ATTACHMENT14 0x8CEE
+#define GL_COLOR_ATTACHMENT15 0x8CEF
+#define GL_COLOR_ATTACHMENT16 0x8CF0
+#define GL_COLOR_ATTACHMENT17 0x8CF1
+#define GL_COLOR_ATTACHMENT18 0x8CF2
+#define GL_COLOR_ATTACHMENT19 0x8CF3
+#define GL_COLOR_ATTACHMENT2 0x8CE2
+#define GL_COLOR_ATTACHMENT20 0x8CF4
+#define GL_COLOR_ATTACHMENT21 0x8CF5
+#define GL_COLOR_ATTACHMENT22 0x8CF6
+#define GL_COLOR_ATTACHMENT23 0x8CF7
+#define GL_COLOR_ATTACHMENT24 0x8CF8
+#define GL_COLOR_ATTACHMENT25 0x8CF9
+#define GL_COLOR_ATTACHMENT26 0x8CFA
+#define GL_COLOR_ATTACHMENT27 0x8CFB
+#define GL_COLOR_ATTACHMENT28 0x8CFC
+#define GL_COLOR_ATTACHMENT29 0x8CFD
+#define GL_COLOR_ATTACHMENT3 0x8CE3
+#define GL_COLOR_ATTACHMENT30 0x8CFE
+#define GL_COLOR_ATTACHMENT31 0x8CFF
+#define GL_COLOR_ATTACHMENT4 0x8CE4
+#define GL_COLOR_ATTACHMENT5 0x8CE5
+#define GL_COLOR_ATTACHMENT6 0x8CE6
+#define GL_COLOR_ATTACHMENT7 0x8CE7
+#define GL_COLOR_ATTACHMENT8 0x8CE8
+#define GL_COLOR_ATTACHMENT9 0x8CE9
+#define GL_COLOR_BUFFER_BIT 0x00004000
+#define GL_COLOR_CLEAR_VALUE 0x0C22
+#define GL_COLOR_INDEX 0x1900
+#define GL_COLOR_INDEXES 0x1603
+#define GL_COLOR_LOGIC_OP 0x0BF2
+#define GL_COLOR_MATERIAL 0x0B57
+#define GL_COLOR_MATERIAL_FACE 0x0B55
+#define GL_COLOR_MATERIAL_PARAMETER 0x0B56
+#define GL_COLOR_SUM 0x8458
+#define GL_COLOR_WRITEMASK 0x0C23
+#define GL_COMBINE 0x8570
+#define GL_COMBINE_ALPHA 0x8572
+#define GL_COMBINE_RGB 0x8571
+#define GL_COMPARE_REF_TO_TEXTURE 0x884E
+#define GL_COMPARE_R_TO_TEXTURE 0x884E
+#define GL_COMPILE 0x1300
+#define GL_COMPILE_AND_EXECUTE 0x1301
+#define GL_COMPILE_STATUS 0x8B81
+#define GL_COMPRESSED_ALPHA 0x84E9
+#define GL_COMPRESSED_INTENSITY 0x84EC
+#define GL_COMPRESSED_LUMINANCE 0x84EA
+#define GL_COMPRESSED_LUMINANCE_ALPHA 0x84EB
+#define GL_COMPRESSED_RED 0x8225
+#define GL_COMPRESSED_RED_RGTC1 0x8DBB
+#define GL_COMPRESSED_RG 0x8226
+#define GL_COMPRESSED_RGB 0x84ED
+#define GL_COMPRESSED_RGBA 0x84EE
+#define GL_COMPRESSED_RG_RGTC2 0x8DBD
+#define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC
+#define GL_COMPRESSED_SIGNED_RG_RGTC2 0x8DBE
+#define GL_COMPRESSED_SLUMINANCE 0x8C4A
+#define GL_COMPRESSED_SLUMINANCE_ALPHA 0x8C4B
+#define GL_COMPRESSED_SRGB 0x8C48
+#define GL_COMPRESSED_SRGB_ALPHA 0x8C49
+#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+#define GL_CONDITION_SATISFIED 0x911C
+#define GL_CONSTANT 0x8576
+#define GL_CONSTANT_ALPHA 0x8003
+#define GL_CONSTANT_ATTENUATION 0x1207
+#define GL_CONSTANT_COLOR 0x8001
+#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define GL_CONTEXT_FLAGS 0x821E
+#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002
+#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001
+#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB 0x00000004
+#define GL_CONTEXT_PROFILE_MASK 0x9126
+#define GL_COORD_REPLACE 0x8862
+#define GL_COPY 0x1503
+#define GL_COPY_INVERTED 0x150C
+#define GL_COPY_PIXEL_TOKEN 0x0706
+#define GL_COPY_READ_BUFFER 0x8F36
+#define GL_COPY_WRITE_BUFFER 0x8F37
+#define GL_CULL_FACE 0x0B44
+#define GL_CULL_FACE_MODE 0x0B45
+#define GL_CURRENT_BIT 0x00000001
+#define GL_CURRENT_COLOR 0x0B00
+#define GL_CURRENT_FOG_COORD 0x8453
+#define GL_CURRENT_FOG_COORDINATE 0x8453
+#define GL_CURRENT_INDEX 0x0B01
+#define GL_CURRENT_NORMAL 0x0B02
+#define GL_CURRENT_PROGRAM 0x8B8D
+#define GL_CURRENT_QUERY 0x8865
+#define GL_CURRENT_RASTER_COLOR 0x0B04
+#define GL_CURRENT_RASTER_DISTANCE 0x0B09
+#define GL_CURRENT_RASTER_INDEX 0x0B05
+#define GL_CURRENT_RASTER_POSITION 0x0B07
+#define GL_CURRENT_RASTER_POSITION_VALID 0x0B08
+#define GL_CURRENT_RASTER_SECONDARY_COLOR 0x845F
+#define GL_CURRENT_RASTER_TEXTURE_COORDS 0x0B06
+#define GL_CURRENT_SECONDARY_COLOR 0x8459
+#define GL_CURRENT_TEXTURE_COORDS 0x0B03
+#define GL_CURRENT_VERTEX_ATTRIB 0x8626
+#define GL_CW 0x0900
+#define GL_DEBUG_CALLBACK_FUNCTION 0x8244
+#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245
+#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D
+#define GL_DEBUG_LOGGED_MESSAGES 0x9145
+#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243
+#define GL_DEBUG_OUTPUT 0x92E0
+#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242
+#define GL_DEBUG_SEVERITY_HIGH 0x9146
+#define GL_DEBUG_SEVERITY_LOW 0x9148
+#define GL_DEBUG_SEVERITY_MEDIUM 0x9147
+#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
+#define GL_DEBUG_SOURCE_API 0x8246
+#define GL_DEBUG_SOURCE_APPLICATION 0x824A
+#define GL_DEBUG_SOURCE_OTHER 0x824B
+#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248
+#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249
+#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247
+#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D
+#define GL_DEBUG_TYPE_ERROR 0x824C
+#define GL_DEBUG_TYPE_MARKER 0x8268
+#define GL_DEBUG_TYPE_OTHER 0x8251
+#define GL_DEBUG_TYPE_PERFORMANCE 0x8250
+#define GL_DEBUG_TYPE_POP_GROUP 0x826A
+#define GL_DEBUG_TYPE_PORTABILITY 0x824F
+#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269
+#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E
+#define GL_DECAL 0x2101
+#define GL_DECR 0x1E03
+#define GL_DECR_WRAP 0x8508
+#define GL_DELETE_STATUS 0x8B80
+#define GL_DEPTH 0x1801
+#define GL_DEPTH24_STENCIL8 0x88F0
+#define GL_DEPTH32F_STENCIL8 0x8CAD
+#define GL_DEPTH_ATTACHMENT 0x8D00
+#define GL_DEPTH_BIAS 0x0D1F
+#define GL_DEPTH_BITS 0x0D56
+#define GL_DEPTH_BUFFER_BIT 0x00000100
+#define GL_DEPTH_CLAMP 0x864F
+#define GL_DEPTH_CLEAR_VALUE 0x0B73
+#define GL_DEPTH_COMPONENT 0x1902
+#define GL_DEPTH_COMPONENT16 0x81A5
+#define GL_DEPTH_COMPONENT24 0x81A6
+#define GL_DEPTH_COMPONENT32 0x81A7
+#define GL_DEPTH_COMPONENT32F 0x8CAC
+#define GL_DEPTH_FUNC 0x0B74
+#define GL_DEPTH_RANGE 0x0B70
+#define GL_DEPTH_SCALE 0x0D1E
+#define GL_DEPTH_STENCIL 0x84F9
+#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A
+#define GL_DEPTH_TEST 0x0B71
+#define GL_DEPTH_TEXTURE_MODE 0x884B
+#define GL_DEPTH_WRITEMASK 0x0B72
+#define GL_DIFFUSE 0x1201
+#define GL_DISPLAY_LIST 0x82E7
+#define GL_DITHER 0x0BD0
+#define GL_DOMAIN 0x0A02
+#define GL_DONT_CARE 0x1100
+#define GL_DOT3_RGB 0x86AE
+#define GL_DOT3_RGBA 0x86AF
+#define GL_DOUBLE 0x140A
+#define GL_DOUBLEBUFFER 0x0C32
+#define GL_DRAW_BUFFER 0x0C01
+#define GL_DRAW_BUFFER0 0x8825
+#define GL_DRAW_BUFFER1 0x8826
+#define GL_DRAW_BUFFER10 0x882F
+#define GL_DRAW_BUFFER11 0x8830
+#define GL_DRAW_BUFFER12 0x8831
+#define GL_DRAW_BUFFER13 0x8832
+#define GL_DRAW_BUFFER14 0x8833
+#define GL_DRAW_BUFFER15 0x8834
+#define GL_DRAW_BUFFER2 0x8827
+#define GL_DRAW_BUFFER3 0x8828
+#define GL_DRAW_BUFFER4 0x8829
+#define GL_DRAW_BUFFER5 0x882A
+#define GL_DRAW_BUFFER6 0x882B
+#define GL_DRAW_BUFFER7 0x882C
+#define GL_DRAW_BUFFER8 0x882D
+#define GL_DRAW_BUFFER9 0x882E
+#define GL_DRAW_FRAMEBUFFER 0x8CA9
+#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6
+#define GL_DRAW_PIXEL_TOKEN 0x0705
+#define GL_DST_ALPHA 0x0304
+#define GL_DST_COLOR 0x0306
+#define GL_DYNAMIC_COPY 0x88EA
+#define GL_DYNAMIC_DRAW 0x88E8
+#define GL_DYNAMIC_READ 0x88E9
+#define GL_EDGE_FLAG 0x0B43
+#define GL_EDGE_FLAG_ARRAY 0x8079
+#define GL_EDGE_FLAG_ARRAY_BUFFER_BINDING 0x889B
+#define GL_EDGE_FLAG_ARRAY_POINTER 0x8093
+#define GL_EDGE_FLAG_ARRAY_STRIDE 0x808C
+#define GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
+#define GL_EMISSION 0x1600
+#define GL_ENABLE_BIT 0x00002000
+#define GL_EQUAL 0x0202
+#define GL_EQUIV 0x1509
+#define GL_EVAL_BIT 0x00010000
+#define GL_EXP 0x0800
+#define GL_EXP2 0x0801
+#define GL_EXTENSIONS 0x1F03
+#define GL_EYE_LINEAR 0x2400
+#define GL_EYE_PLANE 0x2502
+#define GL_FALSE 0
+#define GL_FASTEST 0x1101
+#define GL_FEEDBACK 0x1C01
+#define GL_FEEDBACK_BUFFER_POINTER 0x0DF0
+#define GL_FEEDBACK_BUFFER_SIZE 0x0DF1
+#define GL_FEEDBACK_BUFFER_TYPE 0x0DF2
+#define GL_FILL 0x1B02
+#define GL_FIRST_VERTEX_CONVENTION 0x8E4D
+#define GL_FIXED_ONLY 0x891D
+#define GL_FLAT 0x1D00
+#define GL_FLOAT 0x1406
+#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD
+#define GL_FLOAT_MAT2 0x8B5A
+#define GL_FLOAT_MAT2x3 0x8B65
+#define GL_FLOAT_MAT2x4 0x8B66
+#define GL_FLOAT_MAT3 0x8B5B
+#define GL_FLOAT_MAT3x2 0x8B67
+#define GL_FLOAT_MAT3x4 0x8B68
+#define GL_FLOAT_MAT4 0x8B5C
+#define GL_FLOAT_MAT4x2 0x8B69
+#define GL_FLOAT_MAT4x3 0x8B6A
+#define GL_FLOAT_VEC2 0x8B50
+#define GL_FLOAT_VEC3 0x8B51
+#define GL_FLOAT_VEC4 0x8B52
+#define GL_FOG 0x0B60
+#define GL_FOG_BIT 0x00000080
+#define GL_FOG_COLOR 0x0B66
+#define GL_FOG_COORD 0x8451
+#define GL_FOG_COORDINATE 0x8451
+#define GL_FOG_COORDINATE_ARRAY 0x8457
+#define GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING 0x889D
+#define GL_FOG_COORDINATE_ARRAY_POINTER 0x8456
+#define GL_FOG_COORDINATE_ARRAY_STRIDE 0x8455
+#define GL_FOG_COORDINATE_ARRAY_TYPE 0x8454
+#define GL_FOG_COORDINATE_SOURCE 0x8450
+#define GL_FOG_COORD_ARRAY 0x8457
+#define GL_FOG_COORD_ARRAY_BUFFER_BINDING 0x889D
+#define GL_FOG_COORD_ARRAY_POINTER 0x8456
+#define GL_FOG_COORD_ARRAY_STRIDE 0x8455
+#define GL_FOG_COORD_ARRAY_TYPE 0x8454
+#define GL_FOG_COORD_SRC 0x8450
+#define GL_FOG_DENSITY 0x0B62
+#define GL_FOG_END 0x0B64
+#define GL_FOG_HINT 0x0C54
+#define GL_FOG_INDEX 0x0B61
+#define GL_FOG_MODE 0x0B65
+#define GL_FOG_START 0x0B63
+#define GL_FRAGMENT_DEPTH 0x8452
+#define GL_FRAGMENT_SHADER 0x8B30
+#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B
+#define GL_FRAMEBUFFER 0x8D40
+#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215
+#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214
+#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210
+#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211
+#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216
+#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213
+#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
+#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212
+#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
+#define GL_FRAMEBUFFER_BINDING 0x8CA6
+#define GL_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GL_FRAMEBUFFER_DEFAULT 0x8218
+#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
+#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER 0x8CDB
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8
+#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
+#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56
+#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER 0x8CDC
+#define GL_FRAMEBUFFER_SRGB 0x8DB9
+#define GL_FRAMEBUFFER_UNDEFINED 0x8219
+#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
+#define GL_FRONT 0x0404
+#define GL_FRONT_AND_BACK 0x0408
+#define GL_FRONT_FACE 0x0B46
+#define GL_FRONT_LEFT 0x0400
+#define GL_FRONT_RIGHT 0x0401
+#define GL_FUNC_ADD 0x8006
+#define GL_FUNC_REVERSE_SUBTRACT 0x800B
+#define GL_FUNC_SUBTRACT 0x800A
+#define GL_GENERATE_MIPMAP 0x8191
+#define GL_GENERATE_MIPMAP_HINT 0x8192
+#define GL_GEOMETRY_INPUT_TYPE 0x8917
+#define GL_GEOMETRY_OUTPUT_TYPE 0x8918
+#define GL_GEOMETRY_SHADER 0x8DD9
+#define GL_GEOMETRY_VERTICES_OUT 0x8916
+#define GL_GEQUAL 0x0206
+#define GL_GREATER 0x0204
+#define GL_GREEN 0x1904
+#define GL_GREEN_BIAS 0x0D19
+#define GL_GREEN_BITS 0x0D53
+#define GL_GREEN_INTEGER 0x8D95
+#define GL_GREEN_SCALE 0x0D18
+#define GL_GUILTY_CONTEXT_RESET_ARB 0x8253
+#define GL_HALF_FLOAT 0x140B
+#define GL_HINT_BIT 0x00008000
+#define GL_INCR 0x1E02
+#define GL_INCR_WRAP 0x8507
+#define GL_INDEX 0x8222
+#define GL_INDEX_ARRAY 0x8077
+#define GL_INDEX_ARRAY_BUFFER_BINDING 0x8899
+#define GL_INDEX_ARRAY_POINTER 0x8091
+#define GL_INDEX_ARRAY_STRIDE 0x8086
+#define GL_INDEX_ARRAY_TYPE 0x8085
+#define GL_INDEX_BITS 0x0D51
+#define GL_INDEX_CLEAR_VALUE 0x0C20
+#define GL_INDEX_LOGIC_OP 0x0BF1
+#define GL_INDEX_MODE 0x0C30
+#define GL_INDEX_OFFSET 0x0D13
+#define GL_INDEX_SHIFT 0x0D12
+#define GL_INDEX_WRITEMASK 0x0C21
+#define GL_INFO_LOG_LENGTH 0x8B84
+#define GL_INNOCENT_CONTEXT_RESET_ARB 0x8254
+#define GL_INT 0x1404
+#define GL_INTENSITY 0x8049
+#define GL_INTENSITY12 0x804C
+#define GL_INTENSITY16 0x804D
+#define GL_INTENSITY4 0x804A
+#define GL_INTENSITY8 0x804B
+#define GL_INTERLEAVED_ATTRIBS 0x8C8C
+#define GL_INTERPOLATE 0x8575
+#define GL_INT_2_10_10_10_REV 0x8D9F
+#define GL_INT_SAMPLER_1D 0x8DC9
+#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
+#define GL_INT_SAMPLER_2D 0x8DCA
+#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
+#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
+#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
+#define GL_INT_SAMPLER_2D_RECT 0x8DCD
+#define GL_INT_SAMPLER_3D 0x8DCB
+#define GL_INT_SAMPLER_BUFFER 0x8DD0
+#define GL_INT_SAMPLER_CUBE 0x8DCC
+#define GL_INT_VEC2 0x8B53
+#define GL_INT_VEC3 0x8B54
+#define GL_INT_VEC4 0x8B55
+#define GL_INVALID_ENUM 0x0500
+#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
+#define GL_INVALID_INDEX 0xFFFFFFFF
+#define GL_INVALID_OPERATION 0x0502
+#define GL_INVALID_VALUE 0x0501
+#define GL_INVERT 0x150A
+#define GL_KEEP 0x1E00
+#define GL_LAST_VERTEX_CONVENTION 0x8E4E
+#define GL_LEFT 0x0406
+#define GL_LEQUAL 0x0203
+#define GL_LESS 0x0201
+#define GL_LIGHT0 0x4000
+#define GL_LIGHT1 0x4001
+#define GL_LIGHT2 0x4002
+#define GL_LIGHT3 0x4003
+#define GL_LIGHT4 0x4004
+#define GL_LIGHT5 0x4005
+#define GL_LIGHT6 0x4006
+#define GL_LIGHT7 0x4007
+#define GL_LIGHTING 0x0B50
+#define GL_LIGHTING_BIT 0x00000040
+#define GL_LIGHT_MODEL_AMBIENT 0x0B53
+#define GL_LIGHT_MODEL_COLOR_CONTROL 0x81F8
+#define GL_LIGHT_MODEL_LOCAL_VIEWER 0x0B51
+#define GL_LIGHT_MODEL_TWO_SIDE 0x0B52
+#define GL_LINE 0x1B01
+#define GL_LINEAR 0x2601
+#define GL_LINEAR_ATTENUATION 0x1208
+#define GL_LINEAR_MIPMAP_LINEAR 0x2703
+#define GL_LINEAR_MIPMAP_NEAREST 0x2701
+#define GL_LINES 0x0001
+#define GL_LINES_ADJACENCY 0x000A
+#define GL_LINE_BIT 0x00000004
+#define GL_LINE_LOOP 0x0002
+#define GL_LINE_RESET_TOKEN 0x0707
+#define GL_LINE_SMOOTH 0x0B20
+#define GL_LINE_SMOOTH_HINT 0x0C52
+#define GL_LINE_STIPPLE 0x0B24
+#define GL_LINE_STIPPLE_PATTERN 0x0B25
+#define GL_LINE_STIPPLE_REPEAT 0x0B26
+#define GL_LINE_STRIP 0x0003
+#define GL_LINE_STRIP_ADJACENCY 0x000B
+#define GL_LINE_TOKEN 0x0702
+#define GL_LINE_WIDTH 0x0B21
+#define GL_LINE_WIDTH_GRANULARITY 0x0B23
+#define GL_LINE_WIDTH_RANGE 0x0B22
+#define GL_LINK_STATUS 0x8B82
+#define GL_LIST_BASE 0x0B32
+#define GL_LIST_BIT 0x00020000
+#define GL_LIST_INDEX 0x0B33
+#define GL_LIST_MODE 0x0B30
+#define GL_LOAD 0x0101
+#define GL_LOGIC_OP 0x0BF1
+#define GL_LOGIC_OP_MODE 0x0BF0
+#define GL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
+#define GL_LOWER_LEFT 0x8CA1
+#define GL_LUMINANCE 0x1909
+#define GL_LUMINANCE12 0x8041
+#define GL_LUMINANCE12_ALPHA12 0x8047
+#define GL_LUMINANCE12_ALPHA4 0x8046
+#define GL_LUMINANCE16 0x8042
+#define GL_LUMINANCE16_ALPHA16 0x8048
+#define GL_LUMINANCE4 0x803F
+#define GL_LUMINANCE4_ALPHA4 0x8043
+#define GL_LUMINANCE6_ALPHA2 0x8044
+#define GL_LUMINANCE8 0x8040
+#define GL_LUMINANCE8_ALPHA8 0x8045
+#define GL_LUMINANCE_ALPHA 0x190A
+#define GL_MAJOR_VERSION 0x821B
+#define GL_MAP1_COLOR_4 0x0D90
+#define GL_MAP1_GRID_DOMAIN 0x0DD0
+#define GL_MAP1_GRID_SEGMENTS 0x0DD1
+#define GL_MAP1_INDEX 0x0D91
+#define GL_MAP1_NORMAL 0x0D92
+#define GL_MAP1_TEXTURE_COORD_1 0x0D93
+#define GL_MAP1_TEXTURE_COORD_2 0x0D94
+#define GL_MAP1_TEXTURE_COORD_3 0x0D95
+#define GL_MAP1_TEXTURE_COORD_4 0x0D96
+#define GL_MAP1_VERTEX_3 0x0D97
+#define GL_MAP1_VERTEX_4 0x0D98
+#define GL_MAP2_COLOR_4 0x0DB0
+#define GL_MAP2_GRID_DOMAIN 0x0DD2
+#define GL_MAP2_GRID_SEGMENTS 0x0DD3
+#define GL_MAP2_INDEX 0x0DB1
+#define GL_MAP2_NORMAL 0x0DB2
+#define GL_MAP2_TEXTURE_COORD_1 0x0DB3
+#define GL_MAP2_TEXTURE_COORD_2 0x0DB4
+#define GL_MAP2_TEXTURE_COORD_3 0x0DB5
+#define GL_MAP2_TEXTURE_COORD_4 0x0DB6
+#define GL_MAP2_VERTEX_3 0x0DB7
+#define GL_MAP2_VERTEX_4 0x0DB8
+#define GL_MAP_COLOR 0x0D10
+#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010
+#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008
+#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004
+#define GL_MAP_READ_BIT 0x0001
+#define GL_MAP_STENCIL 0x0D11
+#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020
+#define GL_MAP_WRITE_BIT 0x0002
+#define GL_MATRIX_MODE 0x0BA0
+#define GL_MAX 0x8008
+#define GL_MAX_3D_TEXTURE_SIZE 0x8073
+#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF
+#define GL_MAX_ATTRIB_STACK_DEPTH 0x0D35
+#define GL_MAX_CLIENT_ATTRIB_STACK_DEPTH 0x0D3B
+#define GL_MAX_CLIP_DISTANCES 0x0D32
+#define GL_MAX_CLIP_PLANES 0x0D32
+#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF
+#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E
+#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33
+#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32
+#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
+#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E
+#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C
+#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144
+#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143
+#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F
+#define GL_MAX_DRAW_BUFFERS 0x8824
+#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS 0x88FC
+#define GL_MAX_ELEMENTS_INDICES 0x80E9
+#define GL_MAX_ELEMENTS_VERTICES 0x80E8
+#define GL_MAX_EVAL_ORDER 0x0D30
+#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125
+#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D
+#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49
+#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123
+#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124
+#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0
+#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29
+#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1
+#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C
+#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF
+#define GL_MAX_INTEGER_SAMPLES 0x9110
+#define GL_MAX_LABEL_LENGTH 0x82E8
+#define GL_MAX_LIGHTS 0x0D31
+#define GL_MAX_LIST_NESTING 0x0B31
+#define GL_MAX_MODELVIEW_STACK_DEPTH 0x0D36
+#define GL_MAX_NAME_STACK_DEPTH 0x0D37
+#define GL_MAX_PIXEL_MAP_TABLE 0x0D34
+#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905
+#define GL_MAX_PROJECTION_STACK_DEPTH 0x0D38
+#define GL_MAX_RECTANGLE_TEXTURE_SIZE 0x84F8
+#define GL_MAX_RENDERBUFFER_SIZE 0x84E8
+#define GL_MAX_SAMPLES 0x8D57
+#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59
+#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111
+#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B
+#define GL_MAX_TEXTURE_COORDS 0x8871
+#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
+#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD
+#define GL_MAX_TEXTURE_SIZE 0x0D33
+#define GL_MAX_TEXTURE_STACK_DEPTH 0x0D39
+#define GL_MAX_TEXTURE_UNITS 0x84E2
+#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80
+#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30
+#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F
+#define GL_MAX_VARYING_COMPONENTS 0x8B4B
+#define GL_MAX_VARYING_FLOATS 0x8B4B
+#define GL_MAX_VERTEX_ATTRIBS 0x8869
+#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122
+#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
+#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B
+#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A
+#define GL_MAX_VIEWPORT_DIMS 0x0D3A
+#define GL_MIN 0x8007
+#define GL_MINOR_VERSION 0x821C
+#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904
+#define GL_MIRRORED_REPEAT 0x8370
+#define GL_MODELVIEW 0x1700
+#define GL_MODELVIEW_MATRIX 0x0BA6
+#define GL_MODELVIEW_STACK_DEPTH 0x0BA3
+#define GL_MODULATE 0x2100
+#define GL_MULT 0x0103
+#define GL_MULTISAMPLE 0x809D
+#define GL_MULTISAMPLE_ARB 0x809D
+#define GL_MULTISAMPLE_BIT 0x20000000
+#define GL_MULTISAMPLE_BIT_ARB 0x20000000
+#define GL_N3F_V3F 0x2A25
+#define GL_NAME_STACK_DEPTH 0x0D70
+#define GL_NAND 0x150E
+#define GL_NEAREST 0x2600
+#define GL_NEAREST_MIPMAP_LINEAR 0x2702
+#define GL_NEAREST_MIPMAP_NEAREST 0x2700
+#define GL_NEVER 0x0200
+#define GL_NICEST 0x1102
+#define GL_NONE 0
+#define GL_NOOP 0x1505
+#define GL_NOR 0x1508
+#define GL_NORMALIZE 0x0BA1
+#define GL_NORMAL_ARRAY 0x8075
+#define GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897
+#define GL_NORMAL_ARRAY_POINTER 0x808F
+#define GL_NORMAL_ARRAY_STRIDE 0x807F
+#define GL_NORMAL_ARRAY_TYPE 0x807E
+#define GL_NORMAL_MAP 0x8511
+#define GL_NOTEQUAL 0x0205
+#define GL_NO_ERROR 0
+#define GL_NO_RESET_NOTIFICATION_ARB 0x8261
+#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GL_NUM_EXTENSIONS 0x821D
+#define GL_OBJECT_LINEAR 0x2401
+#define GL_OBJECT_PLANE 0x2501
+#define GL_OBJECT_TYPE 0x9112
+#define GL_ONE 1
+#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GL_ONE_MINUS_DST_ALPHA 0x0305
+#define GL_ONE_MINUS_DST_COLOR 0x0307
+#define GL_ONE_MINUS_SRC1_ALPHA 0x88FB
+#define GL_ONE_MINUS_SRC1_COLOR 0x88FA
+#define GL_ONE_MINUS_SRC_ALPHA 0x0303
+#define GL_ONE_MINUS_SRC_COLOR 0x0301
+#define GL_OPERAND0_ALPHA 0x8598
+#define GL_OPERAND0_RGB 0x8590
+#define GL_OPERAND1_ALPHA 0x8599
+#define GL_OPERAND1_RGB 0x8591
+#define GL_OPERAND2_ALPHA 0x859A
+#define GL_OPERAND2_RGB 0x8592
+#define GL_OR 0x1507
+#define GL_ORDER 0x0A01
+#define GL_OR_INVERTED 0x150D
+#define GL_OR_REVERSE 0x150B
+#define GL_OUT_OF_MEMORY 0x0505
+#define GL_PACK_ALIGNMENT 0x0D05
+#define GL_PACK_IMAGE_HEIGHT 0x806C
+#define GL_PACK_LSB_FIRST 0x0D01
+#define GL_PACK_ROW_LENGTH 0x0D02
+#define GL_PACK_SKIP_IMAGES 0x806B
+#define GL_PACK_SKIP_PIXELS 0x0D04
+#define GL_PACK_SKIP_ROWS 0x0D03
+#define GL_PACK_SWAP_BYTES 0x0D00
+#define GL_PASS_THROUGH_TOKEN 0x0700
+#define GL_PERSPECTIVE_CORRECTION_HINT 0x0C50
+#define GL_PIXEL_MAP_A_TO_A 0x0C79
+#define GL_PIXEL_MAP_A_TO_A_SIZE 0x0CB9
+#define GL_PIXEL_MAP_B_TO_B 0x0C78
+#define GL_PIXEL_MAP_B_TO_B_SIZE 0x0CB8
+#define GL_PIXEL_MAP_G_TO_G 0x0C77
+#define GL_PIXEL_MAP_G_TO_G_SIZE 0x0CB7
+#define GL_PIXEL_MAP_I_TO_A 0x0C75
+#define GL_PIXEL_MAP_I_TO_A_SIZE 0x0CB5
+#define GL_PIXEL_MAP_I_TO_B 0x0C74
+#define GL_PIXEL_MAP_I_TO_B_SIZE 0x0CB4
+#define GL_PIXEL_MAP_I_TO_G 0x0C73
+#define GL_PIXEL_MAP_I_TO_G_SIZE 0x0CB3
+#define GL_PIXEL_MAP_I_TO_I 0x0C70
+#define GL_PIXEL_MAP_I_TO_I_SIZE 0x0CB0
+#define GL_PIXEL_MAP_I_TO_R 0x0C72
+#define GL_PIXEL_MAP_I_TO_R_SIZE 0x0CB2
+#define GL_PIXEL_MAP_R_TO_R 0x0C76
+#define GL_PIXEL_MAP_R_TO_R_SIZE 0x0CB6
+#define GL_PIXEL_MAP_S_TO_S 0x0C71
+#define GL_PIXEL_MAP_S_TO_S_SIZE 0x0CB1
+#define GL_PIXEL_MODE_BIT 0x00000020
+#define GL_PIXEL_PACK_BUFFER 0x88EB
+#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED
+#define GL_PIXEL_UNPACK_BUFFER 0x88EC
+#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF
+#define GL_POINT 0x1B00
+#define GL_POINTS 0x0000
+#define GL_POINT_BIT 0x00000002
+#define GL_POINT_DISTANCE_ATTENUATION 0x8129
+#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128
+#define GL_POINT_SIZE 0x0B11
+#define GL_POINT_SIZE_GRANULARITY 0x0B13
+#define GL_POINT_SIZE_MAX 0x8127
+#define GL_POINT_SIZE_MIN 0x8126
+#define GL_POINT_SIZE_RANGE 0x0B12
+#define GL_POINT_SMOOTH 0x0B10
+#define GL_POINT_SMOOTH_HINT 0x0C51
+#define GL_POINT_SPRITE 0x8861
+#define GL_POINT_SPRITE_COORD_ORIGIN 0x8CA0
+#define GL_POINT_TOKEN 0x0701
+#define GL_POLYGON 0x0009
+#define GL_POLYGON_BIT 0x00000008
+#define GL_POLYGON_MODE 0x0B40
+#define GL_POLYGON_OFFSET_FACTOR 0x8038
+#define GL_POLYGON_OFFSET_FILL 0x8037
+#define GL_POLYGON_OFFSET_LINE 0x2A02
+#define GL_POLYGON_OFFSET_POINT 0x2A01
+#define GL_POLYGON_OFFSET_UNITS 0x2A00
+#define GL_POLYGON_SMOOTH 0x0B41
+#define GL_POLYGON_SMOOTH_HINT 0x0C53
+#define GL_POLYGON_STIPPLE 0x0B42
+#define GL_POLYGON_STIPPLE_BIT 0x00000010
+#define GL_POLYGON_TOKEN 0x0703
+#define GL_POSITION 0x1203
+#define GL_PREVIOUS 0x8578
+#define GL_PRIMARY_COLOR 0x8577
+#define GL_PRIMITIVES_GENERATED 0x8C87
+#define GL_PRIMITIVE_RESTART 0x8F9D
+#define GL_PRIMITIVE_RESTART_INDEX 0x8F9E
+#define GL_PROGRAM 0x82E2
+#define GL_PROGRAM_PIPELINE 0x82E4
+#define GL_PROGRAM_POINT_SIZE 0x8642
+#define GL_PROJECTION 0x1701
+#define GL_PROJECTION_MATRIX 0x0BA7
+#define GL_PROJECTION_STACK_DEPTH 0x0BA4
+#define GL_PROVOKING_VERTEX 0x8E4F
+#define GL_PROXY_TEXTURE_1D 0x8063
+#define GL_PROXY_TEXTURE_1D_ARRAY 0x8C19
+#define GL_PROXY_TEXTURE_2D 0x8064
+#define GL_PROXY_TEXTURE_2D_ARRAY 0x8C1B
+#define GL_PROXY_TEXTURE_2D_MULTISAMPLE 0x9101
+#define GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9103
+#define GL_PROXY_TEXTURE_3D 0x8070
+#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B
+#define GL_PROXY_TEXTURE_RECTANGLE 0x84F7
+#define GL_Q 0x2003
+#define GL_QUADRATIC_ATTENUATION 0x1209
+#define GL_QUADS 0x0007
+#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION 0x8E4C
+#define GL_QUAD_STRIP 0x0008
+#define GL_QUERY 0x82E3
+#define GL_QUERY_BY_REGION_NO_WAIT 0x8E16
+#define GL_QUERY_BY_REGION_WAIT 0x8E15
+#define GL_QUERY_COUNTER_BITS 0x8864
+#define GL_QUERY_NO_WAIT 0x8E14
+#define GL_QUERY_RESULT 0x8866
+#define GL_QUERY_RESULT_AVAILABLE 0x8867
+#define GL_QUERY_WAIT 0x8E13
+#define GL_R 0x2002
+#define GL_R11F_G11F_B10F 0x8C3A
+#define GL_R16 0x822A
+#define GL_R16F 0x822D
+#define GL_R16I 0x8233
+#define GL_R16UI 0x8234
+#define GL_R16_SNORM 0x8F98
+#define GL_R32F 0x822E
+#define GL_R32I 0x8235
+#define GL_R32UI 0x8236
+#define GL_R3_G3_B2 0x2A10
+#define GL_R8 0x8229
+#define GL_R8I 0x8231
+#define GL_R8UI 0x8232
+#define GL_R8_SNORM 0x8F94
+#define GL_RASTERIZER_DISCARD 0x8C89
+#define GL_READ_BUFFER 0x0C02
+#define GL_READ_FRAMEBUFFER 0x8CA8
+#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA
+#define GL_READ_ONLY 0x88B8
+#define GL_READ_WRITE 0x88BA
+#define GL_RED 0x1903
+#define GL_RED_BIAS 0x0D15
+#define GL_RED_BITS 0x0D52
+#define GL_RED_INTEGER 0x8D94
+#define GL_RED_SCALE 0x0D14
+#define GL_REFLECTION_MAP 0x8512
+#define GL_RENDER 0x1C00
+#define GL_RENDERBUFFER 0x8D41
+#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
+#define GL_RENDERBUFFER_BINDING 0x8CA7
+#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52
+#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
+#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51
+#define GL_RENDERBUFFER_HEIGHT 0x8D43
+#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
+#define GL_RENDERBUFFER_RED_SIZE 0x8D50
+#define GL_RENDERBUFFER_SAMPLES 0x8CAB
+#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
+#define GL_RENDERBUFFER_WIDTH 0x8D42
+#define GL_RENDERER 0x1F01
+#define GL_RENDER_MODE 0x0C40
+#define GL_REPEAT 0x2901
+#define GL_REPLACE 0x1E01
+#define GL_RESCALE_NORMAL 0x803A
+#define GL_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
+#define GL_RETURN 0x0102
+#define GL_RG 0x8227
+#define GL_RG16 0x822C
+#define GL_RG16F 0x822F
+#define GL_RG16I 0x8239
+#define GL_RG16UI 0x823A
+#define GL_RG16_SNORM 0x8F99
+#define GL_RG32F 0x8230
+#define GL_RG32I 0x823B
+#define GL_RG32UI 0x823C
+#define GL_RG8 0x822B
+#define GL_RG8I 0x8237
+#define GL_RG8UI 0x8238
+#define GL_RG8_SNORM 0x8F95
+#define GL_RGB 0x1907
+#define GL_RGB10 0x8052
+#define GL_RGB10_A2 0x8059
+#define GL_RGB10_A2UI 0x906F
+#define GL_RGB12 0x8053
+#define GL_RGB16 0x8054
+#define GL_RGB16F 0x881B
+#define GL_RGB16I 0x8D89
+#define GL_RGB16UI 0x8D77
+#define GL_RGB16_SNORM 0x8F9A
+#define GL_RGB32F 0x8815
+#define GL_RGB32I 0x8D83
+#define GL_RGB32UI 0x8D71
+#define GL_RGB4 0x804F
+#define GL_RGB5 0x8050
+#define GL_RGB5_A1 0x8057
+#define GL_RGB8 0x8051
+#define GL_RGB8I 0x8D8F
+#define GL_RGB8UI 0x8D7D
+#define GL_RGB8_SNORM 0x8F96
+#define GL_RGB9_E5 0x8C3D
+#define GL_RGBA 0x1908
+#define GL_RGBA12 0x805A
+#define GL_RGBA16 0x805B
+#define GL_RGBA16F 0x881A
+#define GL_RGBA16I 0x8D88
+#define GL_RGBA16UI 0x8D76
+#define GL_RGBA16_SNORM 0x8F9B
+#define GL_RGBA2 0x8055
+#define GL_RGBA32F 0x8814
+#define GL_RGBA32I 0x8D82
+#define GL_RGBA32UI 0x8D70
+#define GL_RGBA4 0x8056
+#define GL_RGBA8 0x8058
+#define GL_RGBA8I 0x8D8E
+#define GL_RGBA8UI 0x8D7C
+#define GL_RGBA8_SNORM 0x8F97
+#define GL_RGBA_INTEGER 0x8D99
+#define GL_RGBA_MODE 0x0C31
+#define GL_RGB_INTEGER 0x8D98
+#define GL_RGB_SCALE 0x8573
+#define GL_RG_INTEGER 0x8228
+#define GL_RIGHT 0x0407
+#define GL_S 0x2000
+#define GL_SAMPLER 0x82E6
+#define GL_SAMPLER_1D 0x8B5D
+#define GL_SAMPLER_1D_ARRAY 0x8DC0
+#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
+#define GL_SAMPLER_1D_SHADOW 0x8B61
+#define GL_SAMPLER_2D 0x8B5E
+#define GL_SAMPLER_2D_ARRAY 0x8DC1
+#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
+#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
+#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
+#define GL_SAMPLER_2D_RECT 0x8B63
+#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
+#define GL_SAMPLER_2D_SHADOW 0x8B62
+#define GL_SAMPLER_3D 0x8B5F
+#define GL_SAMPLER_BINDING 0x8919
+#define GL_SAMPLER_BUFFER 0x8DC2
+#define GL_SAMPLER_CUBE 0x8B60
+#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
+#define GL_SAMPLES 0x80A9
+#define GL_SAMPLES_ARB 0x80A9
+#define GL_SAMPLES_PASSED 0x8914
+#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GL_SAMPLE_ALPHA_TO_COVERAGE_ARB 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE 0x809F
+#define GL_SAMPLE_ALPHA_TO_ONE_ARB 0x809F
+#define GL_SAMPLE_BUFFERS 0x80A8
+#define GL_SAMPLE_BUFFERS_ARB 0x80A8
+#define GL_SAMPLE_COVERAGE 0x80A0
+#define GL_SAMPLE_COVERAGE_ARB 0x80A0
+#define GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GL_SAMPLE_COVERAGE_INVERT_ARB 0x80AB
+#define GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GL_SAMPLE_COVERAGE_VALUE_ARB 0x80AA
+#define GL_SAMPLE_MASK 0x8E51
+#define GL_SAMPLE_MASK_VALUE 0x8E52
+#define GL_SAMPLE_POSITION 0x8E50
+#define GL_SCISSOR_BIT 0x00080000
+#define GL_SCISSOR_BOX 0x0C10
+#define GL_SCISSOR_TEST 0x0C11
+#define GL_SECONDARY_COLOR_ARRAY 0x845E
+#define GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING 0x889C
+#define GL_SECONDARY_COLOR_ARRAY_POINTER 0x845D
+#define GL_SECONDARY_COLOR_ARRAY_SIZE 0x845A
+#define GL_SECONDARY_COLOR_ARRAY_STRIDE 0x845C
+#define GL_SECONDARY_COLOR_ARRAY_TYPE 0x845B
+#define GL_SELECT 0x1C02
+#define GL_SELECTION_BUFFER_POINTER 0x0DF3
+#define GL_SELECTION_BUFFER_SIZE 0x0DF4
+#define GL_SEPARATE_ATTRIBS 0x8C8D
+#define GL_SEPARATE_SPECULAR_COLOR 0x81FA
+#define GL_SET 0x150F
+#define GL_SHADER 0x82E1
+#define GL_SHADER_SOURCE_LENGTH 0x8B88
+#define GL_SHADER_TYPE 0x8B4F
+#define GL_SHADE_MODEL 0x0B54
+#define GL_SHADING_LANGUAGE_VERSION 0x8B8C
+#define GL_SHININESS 0x1601
+#define GL_SHORT 0x1402
+#define GL_SIGNALED 0x9119
+#define GL_SIGNED_NORMALIZED 0x8F9C
+#define GL_SINGLE_COLOR 0x81F9
+#define GL_SLUMINANCE 0x8C46
+#define GL_SLUMINANCE8 0x8C47
+#define GL_SLUMINANCE8_ALPHA8 0x8C45
+#define GL_SLUMINANCE_ALPHA 0x8C44
+#define GL_SMOOTH 0x1D01
+#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23
+#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22
+#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13
+#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12
+#define GL_SOURCE0_ALPHA 0x8588
+#define GL_SOURCE0_RGB 0x8580
+#define GL_SOURCE1_ALPHA 0x8589
+#define GL_SOURCE1_RGB 0x8581
+#define GL_SOURCE2_ALPHA 0x858A
+#define GL_SOURCE2_RGB 0x8582
+#define GL_SPECULAR 0x1202
+#define GL_SPHERE_MAP 0x2402
+#define GL_SPOT_CUTOFF 0x1206
+#define GL_SPOT_DIRECTION 0x1204
+#define GL_SPOT_EXPONENT 0x1205
+#define GL_SRC0_ALPHA 0x8588
+#define GL_SRC0_RGB 0x8580
+#define GL_SRC1_ALPHA 0x8589
+#define GL_SRC1_COLOR 0x88F9
+#define GL_SRC1_RGB 0x8581
+#define GL_SRC2_ALPHA 0x858A
+#define GL_SRC2_RGB 0x8582
+#define GL_SRC_ALPHA 0x0302
+#define GL_SRC_ALPHA_SATURATE 0x0308
+#define GL_SRC_COLOR 0x0300
+#define GL_SRGB 0x8C40
+#define GL_SRGB8 0x8C41
+#define GL_SRGB8_ALPHA8 0x8C43
+#define GL_SRGB_ALPHA 0x8C42
+#define GL_STACK_OVERFLOW 0x0503
+#define GL_STACK_UNDERFLOW 0x0504
+#define GL_STATIC_COPY 0x88E6
+#define GL_STATIC_DRAW 0x88E4
+#define GL_STATIC_READ 0x88E5
+#define GL_STENCIL 0x1802
+#define GL_STENCIL_ATTACHMENT 0x8D20
+#define GL_STENCIL_BACK_FAIL 0x8801
+#define GL_STENCIL_BACK_FUNC 0x8800
+#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
+#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
+#define GL_STENCIL_BACK_REF 0x8CA3
+#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4
+#define GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GL_STENCIL_BITS 0x0D57
+#define GL_STENCIL_BUFFER_BIT 0x00000400
+#define GL_STENCIL_CLEAR_VALUE 0x0B91
+#define GL_STENCIL_FAIL 0x0B94
+#define GL_STENCIL_FUNC 0x0B92
+#define GL_STENCIL_INDEX 0x1901
+#define GL_STENCIL_INDEX1 0x8D46
+#define GL_STENCIL_INDEX16 0x8D49
+#define GL_STENCIL_INDEX4 0x8D47
+#define GL_STENCIL_INDEX8 0x8D48
+#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
+#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96
+#define GL_STENCIL_REF 0x0B97
+#define GL_STENCIL_TEST 0x0B90
+#define GL_STENCIL_VALUE_MASK 0x0B93
+#define GL_STENCIL_WRITEMASK 0x0B98
+#define GL_STEREO 0x0C33
+#define GL_STREAM_COPY 0x88E2
+#define GL_STREAM_DRAW 0x88E0
+#define GL_STREAM_READ 0x88E1
+#define GL_SUBPIXEL_BITS 0x0D50
+#define GL_SUBTRACT 0x84E7
+#define GL_SYNC_CONDITION 0x9113
+#define GL_SYNC_FENCE 0x9116
+#define GL_SYNC_FLAGS 0x9115
+#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001
+#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117
+#define GL_SYNC_STATUS 0x9114
+#define GL_T 0x2001
+#define GL_T2F_C3F_V3F 0x2A2A
+#define GL_T2F_C4F_N3F_V3F 0x2A2C
+#define GL_T2F_C4UB_V3F 0x2A29
+#define GL_T2F_N3F_V3F 0x2A2B
+#define GL_T2F_V3F 0x2A27
+#define GL_T4F_C4F_N3F_V4F 0x2A2D
+#define GL_T4F_V4F 0x2A28
+#define GL_TEXTURE 0x1702
+#define GL_TEXTURE0 0x84C0
+#define GL_TEXTURE1 0x84C1
+#define GL_TEXTURE10 0x84CA
+#define GL_TEXTURE11 0x84CB
+#define GL_TEXTURE12 0x84CC
+#define GL_TEXTURE13 0x84CD
+#define GL_TEXTURE14 0x84CE
+#define GL_TEXTURE15 0x84CF
+#define GL_TEXTURE16 0x84D0
+#define GL_TEXTURE17 0x84D1
+#define GL_TEXTURE18 0x84D2
+#define GL_TEXTURE19 0x84D3
+#define GL_TEXTURE2 0x84C2
+#define GL_TEXTURE20 0x84D4
+#define GL_TEXTURE21 0x84D5
+#define GL_TEXTURE22 0x84D6
+#define GL_TEXTURE23 0x84D7
+#define GL_TEXTURE24 0x84D8
+#define GL_TEXTURE25 0x84D9
+#define GL_TEXTURE26 0x84DA
+#define GL_TEXTURE27 0x84DB
+#define GL_TEXTURE28 0x84DC
+#define GL_TEXTURE29 0x84DD
+#define GL_TEXTURE3 0x84C3
+#define GL_TEXTURE30 0x84DE
+#define GL_TEXTURE31 0x84DF
+#define GL_TEXTURE4 0x84C4
+#define GL_TEXTURE5 0x84C5
+#define GL_TEXTURE6 0x84C6
+#define GL_TEXTURE7 0x84C7
+#define GL_TEXTURE8 0x84C8
+#define GL_TEXTURE9 0x84C9
+#define GL_TEXTURE_1D 0x0DE0
+#define GL_TEXTURE_1D_ARRAY 0x8C18
+#define GL_TEXTURE_2D 0x0DE1
+#define GL_TEXTURE_2D_ARRAY 0x8C1A
+#define GL_TEXTURE_2D_MULTISAMPLE 0x9100
+#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102
+#define GL_TEXTURE_3D 0x806F
+#define GL_TEXTURE_ALPHA_SIZE 0x805F
+#define GL_TEXTURE_ALPHA_TYPE 0x8C13
+#define GL_TEXTURE_BASE_LEVEL 0x813C
+#define GL_TEXTURE_BINDING_1D 0x8068
+#define GL_TEXTURE_BINDING_1D_ARRAY 0x8C1C
+#define GL_TEXTURE_BINDING_2D 0x8069
+#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D
+#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104
+#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105
+#define GL_TEXTURE_BINDING_3D 0x806A
+#define GL_TEXTURE_BINDING_BUFFER 0x8C2C
+#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GL_TEXTURE_BINDING_RECTANGLE 0x84F6
+#define GL_TEXTURE_BIT 0x00040000
+#define GL_TEXTURE_BLUE_SIZE 0x805E
+#define GL_TEXTURE_BLUE_TYPE 0x8C12
+#define GL_TEXTURE_BORDER 0x1005
+#define GL_TEXTURE_BORDER_COLOR 0x1004
+#define GL_TEXTURE_BUFFER 0x8C2A
+#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D
+#define GL_TEXTURE_COMPARE_FUNC 0x884D
+#define GL_TEXTURE_COMPARE_MODE 0x884C
+#define GL_TEXTURE_COMPONENTS 0x1003
+#define GL_TEXTURE_COMPRESSED 0x86A1
+#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0
+#define GL_TEXTURE_COMPRESSION_HINT 0x84EF
+#define GL_TEXTURE_COORD_ARRAY 0x8078
+#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A
+#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092
+#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088
+#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A
+#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089
+#define GL_TEXTURE_CUBE_MAP 0x8513
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GL_TEXTURE_CUBE_MAP_SEAMLESS 0x884F
+#define GL_TEXTURE_DEPTH 0x8071
+#define GL_TEXTURE_DEPTH_SIZE 0x884A
+#define GL_TEXTURE_DEPTH_TYPE 0x8C16
+#define GL_TEXTURE_ENV 0x2300
+#define GL_TEXTURE_ENV_COLOR 0x2201
+#define GL_TEXTURE_ENV_MODE 0x2200
+#define GL_TEXTURE_FILTER_CONTROL 0x8500
+#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107
+#define GL_TEXTURE_GEN_MODE 0x2500
+#define GL_TEXTURE_GEN_Q 0x0C63
+#define GL_TEXTURE_GEN_R 0x0C62
+#define GL_TEXTURE_GEN_S 0x0C60
+#define GL_TEXTURE_GEN_T 0x0C61
+#define GL_TEXTURE_GREEN_SIZE 0x805D
+#define GL_TEXTURE_GREEN_TYPE 0x8C11
+#define GL_TEXTURE_HEIGHT 0x1001
+#define GL_TEXTURE_INTENSITY_SIZE 0x8061
+#define GL_TEXTURE_INTENSITY_TYPE 0x8C15
+#define GL_TEXTURE_INTERNAL_FORMAT 0x1003
+#define GL_TEXTURE_LOD_BIAS 0x8501
+#define GL_TEXTURE_LUMINANCE_SIZE 0x8060
+#define GL_TEXTURE_LUMINANCE_TYPE 0x8C14
+#define GL_TEXTURE_MAG_FILTER 0x2800
+#define GL_TEXTURE_MATRIX 0x0BA8
+#define GL_TEXTURE_MAX_LEVEL 0x813D
+#define GL_TEXTURE_MAX_LOD 0x813B
+#define GL_TEXTURE_MIN_FILTER 0x2801
+#define GL_TEXTURE_MIN_LOD 0x813A
+#define GL_TEXTURE_PRIORITY 0x8066
+#define GL_TEXTURE_RECTANGLE 0x84F5
+#define GL_TEXTURE_RED_SIZE 0x805C
+#define GL_TEXTURE_RED_TYPE 0x8C10
+#define GL_TEXTURE_RESIDENT 0x8067
+#define GL_TEXTURE_SAMPLES 0x9106
+#define GL_TEXTURE_SHARED_SIZE 0x8C3F
+#define GL_TEXTURE_STACK_DEPTH 0x0BA5
+#define GL_TEXTURE_STENCIL_SIZE 0x88F1
+#define GL_TEXTURE_SWIZZLE_A 0x8E45
+#define GL_TEXTURE_SWIZZLE_B 0x8E44
+#define GL_TEXTURE_SWIZZLE_G 0x8E43
+#define GL_TEXTURE_SWIZZLE_R 0x8E42
+#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46
+#define GL_TEXTURE_WIDTH 0x1000
+#define GL_TEXTURE_WRAP_R 0x8072
+#define GL_TEXTURE_WRAP_S 0x2802
+#define GL_TEXTURE_WRAP_T 0x2803
+#define GL_TIMEOUT_EXPIRED 0x911B
+#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFF
+#define GL_TIMESTAMP 0x8E28
+#define GL_TIME_ELAPSED 0x88BF
+#define GL_TRANSFORM_BIT 0x00001000
+#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E
+#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F
+#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F
+#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85
+#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84
+#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88
+#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83
+#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76
+#define GL_TRANSPOSE_COLOR_MATRIX 0x84E6
+#define GL_TRANSPOSE_MODELVIEW_MATRIX 0x84E3
+#define GL_TRANSPOSE_PROJECTION_MATRIX 0x84E4
+#define GL_TRANSPOSE_TEXTURE_MATRIX 0x84E5
+#define GL_TRIANGLES 0x0004
+#define GL_TRIANGLES_ADJACENCY 0x000C
+#define GL_TRIANGLE_FAN 0x0006
+#define GL_TRIANGLE_STRIP 0x0005
+#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D
+#define GL_TRUE 1
+#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C
+#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42
+#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43
+#define GL_UNIFORM_BLOCK_BINDING 0x8A3F
+#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40
+#define GL_UNIFORM_BLOCK_INDEX 0x8A3A
+#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44
+#define GL_UNIFORM_BUFFER 0x8A11
+#define GL_UNIFORM_BUFFER_BINDING 0x8A28
+#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34
+#define GL_UNIFORM_BUFFER_SIZE 0x8A2A
+#define GL_UNIFORM_BUFFER_START 0x8A29
+#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E
+#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D
+#define GL_UNIFORM_NAME_LENGTH 0x8A39
+#define GL_UNIFORM_OFFSET 0x8A3B
+#define GL_UNIFORM_SIZE 0x8A38
+#define GL_UNIFORM_TYPE 0x8A37
+#define GL_UNKNOWN_CONTEXT_RESET_ARB 0x8255
+#define GL_UNPACK_ALIGNMENT 0x0CF5
+#define GL_UNPACK_IMAGE_HEIGHT 0x806E
+#define GL_UNPACK_LSB_FIRST 0x0CF1
+#define GL_UNPACK_ROW_LENGTH 0x0CF2
+#define GL_UNPACK_SKIP_IMAGES 0x806D
+#define GL_UNPACK_SKIP_PIXELS 0x0CF4
+#define GL_UNPACK_SKIP_ROWS 0x0CF3
+#define GL_UNPACK_SWAP_BYTES 0x0CF0
+#define GL_UNSIGNALED 0x9118
+#define GL_UNSIGNED_BYTE 0x1401
+#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
+#define GL_UNSIGNED_BYTE_3_3_2 0x8032
+#define GL_UNSIGNED_INT 0x1405
+#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B
+#define GL_UNSIGNED_INT_10_10_10_2 0x8036
+#define GL_UNSIGNED_INT_24_8 0x84FA
+#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368
+#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E
+#define GL_UNSIGNED_INT_8_8_8_8 0x8035
+#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367
+#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
+#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
+#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
+#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
+#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
+#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
+#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
+#define GL_UNSIGNED_INT_VEC2 0x8DC6
+#define GL_UNSIGNED_INT_VEC3 0x8DC7
+#define GL_UNSIGNED_INT_VEC4 0x8DC8
+#define GL_UNSIGNED_NORMALIZED 0x8C17
+#define GL_UNSIGNED_SHORT 0x1403
+#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
+#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365
+#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GL_UNSIGNED_SHORT_5_6_5 0x8363
+#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364
+#define GL_UPPER_LEFT 0x8CA2
+#define GL_V2F 0x2A20
+#define GL_V3F 0x2A21
+#define GL_VALIDATE_STATUS 0x8B83
+#define GL_VENDOR 0x1F00
+#define GL_VERSION 0x1F02
+#define GL_VERTEX_ARRAY 0x8074
+#define GL_VERTEX_ARRAY_BINDING 0x85B5
+#define GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896
+#define GL_VERTEX_ARRAY_POINTER 0x808E
+#define GL_VERTEX_ARRAY_SIZE 0x807A
+#define GL_VERTEX_ARRAY_STRIDE 0x807C
+#define GL_VERTEX_ARRAY_TYPE 0x807B
+#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
+#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE
+#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
+#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD
+#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
+#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
+#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
+#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
+#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
+#define GL_VERTEX_PROGRAM_POINT_SIZE 0x8642
+#define GL_VERTEX_PROGRAM_TWO_SIDE 0x8643
+#define GL_VERTEX_SHADER 0x8B31
+#define GL_VIEWPORT 0x0BA2
+#define GL_VIEWPORT_BIT 0x00000800
+#define GL_WAIT_FAILED 0x911D
+#define GL_WEIGHT_ARRAY_BUFFER_BINDING 0x889E
+#define GL_WRITE_ONLY 0x88B9
+#define GL_XOR 0x1506
+#define GL_ZERO 0
+#define GL_ZOOM_X 0x0D16
+#define GL_ZOOM_Y 0x0D17
+
+
+#ifndef __khrplatform_h_
+#define __khrplatform_h_
+
+/*
+** Copyright (c) 2008-2018 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/* Khronos platform-specific types and definitions.
+ *
+ * The master copy of khrplatform.h is maintained in the Khronos EGL
+ * Registry repository at https://github.com/KhronosGroup/EGL-Registry
+ * The last semantic modification to khrplatform.h was at commit ID:
+ * 67a3e0864c2d75ea5287b9f3d2eb74a745936692
+ *
+ * Adopters may modify this file to suit their platform. Adopters are
+ * encouraged to submit platform specific modifications to the Khronos
+ * group so that they can be included in future versions of this file.
+ * Please submit changes by filing pull requests or issues on
+ * the EGL Registry repository linked above.
+ *
+ *
+ * See the Implementer's Guidelines for information about where this file
+ * should be located on your system and for more details of its use:
+ * http://www.khronos.org/registry/implementers_guide.pdf
+ *
+ * This file should be included as
+ * #include <KHR/khrplatform.h>
+ * by Khronos client API header files that use its types and defines.
+ *
+ * The types in khrplatform.h should only be used to define API-specific types.
+ *
+ * Types defined in khrplatform.h:
+ * khronos_int8_t signed 8 bit
+ * khronos_uint8_t unsigned 8 bit
+ * khronos_int16_t signed 16 bit
+ * khronos_uint16_t unsigned 16 bit
+ * khronos_int32_t signed 32 bit
+ * khronos_uint32_t unsigned 32 bit
+ * khronos_int64_t signed 64 bit
+ * khronos_uint64_t unsigned 64 bit
+ * khronos_intptr_t signed same number of bits as a pointer
+ * khronos_uintptr_t unsigned same number of bits as a pointer
+ * khronos_ssize_t signed size
+ * khronos_usize_t unsigned size
+ * khronos_float_t signed 32 bit floating point
+ * khronos_time_ns_t unsigned 64 bit time in nanoseconds
+ * khronos_utime_nanoseconds_t unsigned time interval or absolute time in
+ * nanoseconds
+ * khronos_stime_nanoseconds_t signed time interval in nanoseconds
+ * khronos_boolean_enum_t enumerated boolean type. This should
+ * only be used as a base type when a client API's boolean type is
+ * an enum. Client APIs which use an integer or other type for
+ * booleans cannot use this as the base type for their boolean.
+ *
+ * Tokens defined in khrplatform.h:
+ *
+ * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values.
+ *
+ * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0.
+ * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0.
+ *
+ * Calling convention macros defined in this file:
+ * KHRONOS_APICALL
+ * KHRONOS_GLAD_API_PTR
+ * KHRONOS_APIATTRIBUTES
+ *
+ * These may be used in function prototypes as:
+ *
+ * KHRONOS_APICALL void KHRONOS_GLAD_API_PTR funcname(
+ * int arg1,
+ * int arg2) KHRONOS_APIATTRIBUTES;
+ */
+
+#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC)
+# define KHRONOS_STATIC 1
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APICALL
+ *-------------------------------------------------------------------------
+ * This precedes the return type of the function in the function prototype.
+ */
+#if defined(KHRONOS_STATIC)
+ /* If the preprocessor constant KHRONOS_STATIC is defined, make the
+ * header compatible with static linking. */
+# define KHRONOS_APICALL
+#elif defined(_WIN32)
+# define KHRONOS_APICALL __declspec(dllimport)
+#elif defined (__SYMBIAN32__)
+# define KHRONOS_APICALL IMPORT_C
+#elif defined(__ANDROID__)
+# define KHRONOS_APICALL __attribute__((visibility("default")))
+#else
+# define KHRONOS_APICALL
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_GLAD_API_PTR
+ *-------------------------------------------------------------------------
+ * This follows the return type of the function and precedes the function
+ * name in the function prototype.
+ */
+#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__)
+ /* Win32 but not WinCE */
+# define KHRONOS_GLAD_API_PTR __stdcall
+#else
+# define KHRONOS_GLAD_API_PTR
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIATTRIBUTES
+ *-------------------------------------------------------------------------
+ * This follows the closing parenthesis of the function prototype arguments.
+ */
+#if defined (__ARMCC_2__)
+#define KHRONOS_APIATTRIBUTES __softfp
+#else
+#define KHRONOS_APIATTRIBUTES
+#endif
+
+/*-------------------------------------------------------------------------
+ * basic type definitions
+ *-----------------------------------------------------------------------*/
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__)
+
+
+/*
+ * Using <stdint.h>
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__VMS ) || defined(__sgi)
+
+/*
+ * Using <inttypes.h>
+ */
+#include <inttypes.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(_WIN32) && !defined(__SCITECH_SNAP__)
+
+/*
+ * Win32
+ */
+typedef __int32 khronos_int32_t;
+typedef unsigned __int32 khronos_uint32_t;
+typedef __int64 khronos_int64_t;
+typedef unsigned __int64 khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__sun__) || defined(__digital__)
+
+/*
+ * Sun or Digital
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#if defined(__arch64__) || defined(_LP64)
+typedef long int khronos_int64_t;
+typedef unsigned long int khronos_uint64_t;
+#else
+typedef long long int khronos_int64_t;
+typedef unsigned long long int khronos_uint64_t;
+#endif /* __arch64__ */
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif 0
+
+/*
+ * Hypothetical platform with no float or int64 support
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#define KHRONOS_SUPPORT_INT64 0
+#define KHRONOS_SUPPORT_FLOAT 0
+
+#else
+
+/*
+ * Generic fallback
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#endif
+
+
+/*
+ * Types that are (so far) the same on all platforms
+ */
+typedef signed char khronos_int8_t;
+typedef unsigned char khronos_uint8_t;
+typedef signed short int khronos_int16_t;
+typedef unsigned short int khronos_uint16_t;
+
+/*
+ * Types that differ between LLP64 and LP64 architectures - in LLP64,
+ * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears
+ * to be the only LLP64 architecture in current use.
+ */
+#ifdef _WIN64
+typedef signed long long int khronos_intptr_t;
+typedef unsigned long long int khronos_uintptr_t;
+typedef signed long long int khronos_ssize_t;
+typedef unsigned long long int khronos_usize_t;
+#else
+typedef signed long int khronos_intptr_t;
+typedef unsigned long int khronos_uintptr_t;
+typedef signed long int khronos_ssize_t;
+typedef unsigned long int khronos_usize_t;
+#endif
+
+#if KHRONOS_SUPPORT_FLOAT
+/*
+ * Float type
+ */
+typedef float khronos_float_t;
+#endif
+
+#if KHRONOS_SUPPORT_INT64
+/* Time types
+ *
+ * These types can be used to represent a time interval in nanoseconds or
+ * an absolute Unadjusted System Time. Unadjusted System Time is the number
+ * of nanoseconds since some arbitrary system event (e.g. since the last
+ * time the system booted). The Unadjusted System Time is an unsigned
+ * 64 bit value that wraps back to 0 every 584 years. Time intervals
+ * may be either signed or unsigned.
+ */
+typedef khronos_uint64_t khronos_utime_nanoseconds_t;
+typedef khronos_int64_t khronos_stime_nanoseconds_t;
+#endif
+
+/*
+ * Dummy value used to pad enum types to 32 bits.
+ */
+#ifndef KHRONOS_MAX_ENUM
+#define KHRONOS_MAX_ENUM 0x7FFFFFFF
+#endif
+
+/*
+ * Enumerated boolean type
+ *
+ * Values other than zero should be considered to be true. Therefore
+ * comparisons should not be made against KHRONOS_TRUE.
+ */
+typedef enum {
+ KHRONOS_FALSE = 0,
+ KHRONOS_TRUE = 1,
+ KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM
+} khronos_boolean_enum_t;
+
+#endif /* __khrplatform_h_ */
+
+typedef unsigned int GLenum;
+
+typedef unsigned char GLboolean;
+
+typedef unsigned int GLbitfield;
+
+typedef void GLvoid;
+
+typedef khronos_int8_t GLbyte;
+
+typedef khronos_uint8_t GLubyte;
+
+typedef khronos_int16_t GLshort;
+
+typedef khronos_uint16_t GLushort;
+
+typedef int GLint;
+
+typedef unsigned int GLuint;
+
+typedef khronos_int32_t GLclampx;
+
+typedef int GLsizei;
+
+typedef khronos_float_t GLfloat;
+
+typedef khronos_float_t GLclampf;
+
+typedef double GLdouble;
+
+typedef double GLclampd;
+
+typedef void *GLeglClientBufferEXT;
+
+typedef void *GLeglImageOES;
+
+typedef char GLchar;
+
+typedef char GLcharARB;
+
+#ifdef __APPLE__
+typedef void *GLhandleARB;
+#else
+typedef unsigned int GLhandleARB;
+#endif
+
+typedef khronos_uint16_t GLhalf;
+
+typedef khronos_uint16_t GLhalfARB;
+
+typedef khronos_int32_t GLfixed;
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_intptr_t GLintptr;
+#else
+typedef khronos_intptr_t GLintptr;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_intptr_t GLintptrARB;
+#else
+typedef khronos_intptr_t GLintptrARB;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_ssize_t GLsizeiptr;
+#else
+typedef khronos_ssize_t GLsizeiptr;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_ssize_t GLsizeiptrARB;
+#else
+typedef khronos_ssize_t GLsizeiptrARB;
+#endif
+
+typedef khronos_int64_t GLint64;
+
+typedef khronos_int64_t GLint64EXT;
+
+typedef khronos_uint64_t GLuint64;
+
+typedef khronos_uint64_t GLuint64EXT;
+
+typedef struct __GLsync *GLsync;
+
+struct _cl_context;
+
+struct _cl_event;
+
+typedef void (GLAD_API_PTR *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam);
+
+typedef unsigned short GLhalfNV;
+
+typedef GLintptr GLvdpauSurfaceNV;
+
+typedef void (GLAD_API_PTR *GLVULKANPROCNV)(void);
+
+
+
+#define GL_VERSION_1_0 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_0;
+#define GL_VERSION_1_1 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_1;
+#define GL_VERSION_1_2 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_2;
+#define GL_VERSION_1_3 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_3;
+#define GL_VERSION_1_4 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_4;
+#define GL_VERSION_1_5 1
+GLAD_API_CALL int GLAD_GL_VERSION_1_5;
+#define GL_VERSION_2_0 1
+GLAD_API_CALL int GLAD_GL_VERSION_2_0;
+#define GL_VERSION_2_1 1
+GLAD_API_CALL int GLAD_GL_VERSION_2_1;
+#define GL_VERSION_3_0 1
+GLAD_API_CALL int GLAD_GL_VERSION_3_0;
+#define GL_VERSION_3_1 1
+GLAD_API_CALL int GLAD_GL_VERSION_3_1;
+#define GL_VERSION_3_2 1
+GLAD_API_CALL int GLAD_GL_VERSION_3_2;
+#define GL_VERSION_3_3 1
+GLAD_API_CALL int GLAD_GL_VERSION_3_3;
+#define GL_ARB_multisample 1
+GLAD_API_CALL int GLAD_GL_ARB_multisample;
+#define GL_ARB_robustness 1
+GLAD_API_CALL int GLAD_GL_ARB_robustness;
+#define GL_KHR_debug 1
+GLAD_API_CALL int GLAD_GL_KHR_debug;
+
+
+typedef void (GLAD_API_PTR *PFNGLACCUMPROC)(GLenum op, GLfloat value);
+typedef void (GLAD_API_PTR *PFNGLACTIVETEXTUREPROC)(GLenum texture);
+typedef void (GLAD_API_PTR *PFNGLALPHAFUNCPROC)(GLenum func, GLfloat ref);
+typedef GLboolean (GLAD_API_PTR *PFNGLARETEXTURESRESIDENTPROC)(GLsizei n, const GLuint * textures, GLboolean * residences);
+typedef void (GLAD_API_PTR *PFNGLARRAYELEMENTPROC)(GLint i);
+typedef void (GLAD_API_PTR *PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLBEGINPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLBEGINCONDITIONALRENDERPROC)(GLuint id, GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLBEGINQUERYPROC)(GLenum target, GLuint id);
+typedef void (GLAD_API_PTR *PFNGLBEGINTRANSFORMFEEDBACKPROC)(GLenum primitiveMode);
+typedef void (GLAD_API_PTR *PFNGLBINDATTRIBLOCATIONPROC)(GLuint program, GLuint index, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer);
+typedef void (GLAD_API_PTR *PFNGLBINDBUFFERBASEPROC)(GLenum target, GLuint index, GLuint buffer);
+typedef void (GLAD_API_PTR *PFNGLBINDBUFFERRANGEPROC)(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (GLAD_API_PTR *PFNGLBINDFRAGDATALOCATIONPROC)(GLuint program, GLuint color, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)(GLuint program, GLuint colorNumber, GLuint index, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLBINDFRAMEBUFFERPROC)(GLenum target, GLuint framebuffer);
+typedef void (GLAD_API_PTR *PFNGLBINDRENDERBUFFERPROC)(GLenum target, GLuint renderbuffer);
+typedef void (GLAD_API_PTR *PFNGLBINDSAMPLERPROC)(GLuint unit, GLuint sampler);
+typedef void (GLAD_API_PTR *PFNGLBINDTEXTUREPROC)(GLenum target, GLuint texture);
+typedef void (GLAD_API_PTR *PFNGLBINDVERTEXARRAYPROC)(GLuint array);
+typedef void (GLAD_API_PTR *PFNGLBITMAPPROC)(GLsizei width, GLsizei height, GLfloat xorig, GLfloat yorig, GLfloat xmove, GLfloat ymove, const GLubyte * bitmap);
+typedef void (GLAD_API_PTR *PFNGLBLENDCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONSEPARATEPROC)(GLenum modeRGB, GLenum modeAlpha);
+typedef void (GLAD_API_PTR *PFNGLBLENDFUNCPROC)(GLenum sfactor, GLenum dfactor);
+typedef void (GLAD_API_PTR *PFNGLBLENDFUNCSEPARATEPROC)(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+typedef void (GLAD_API_PTR *PFNGLBLITFRAMEBUFFERPROC)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+typedef void (GLAD_API_PTR *PFNGLBUFFERDATAPROC)(GLenum target, GLsizeiptr size, const void * data, GLenum usage);
+typedef void (GLAD_API_PTR *PFNGLBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCALLLISTPROC)(GLuint list);
+typedef void (GLAD_API_PTR *PFNGLCALLLISTSPROC)(GLsizei n, GLenum type, const void * lists);
+typedef GLenum (GLAD_API_PTR *PFNGLCHECKFRAMEBUFFERSTATUSPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLCLAMPCOLORPROC)(GLenum target, GLenum clamp);
+typedef void (GLAD_API_PTR *PFNGLCLEARPROC)(GLbitfield mask);
+typedef void (GLAD_API_PTR *PFNGLCLEARACCUMPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERFIPROC)(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
+typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERFVPROC)(GLenum buffer, GLint drawbuffer, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERIVPROC)(GLenum buffer, GLint drawbuffer, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERUIVPROC)(GLenum buffer, GLint drawbuffer, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLCLEARCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLCLEARDEPTHPROC)(GLdouble depth);
+typedef void (GLAD_API_PTR *PFNGLCLEARINDEXPROC)(GLfloat c);
+typedef void (GLAD_API_PTR *PFNGLCLEARSTENCILPROC)(GLint s);
+typedef void (GLAD_API_PTR *PFNGLCLIENTACTIVETEXTUREPROC)(GLenum texture);
+typedef GLenum (GLAD_API_PTR *PFNGLCLIENTWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout);
+typedef void (GLAD_API_PTR *PFNGLCLIPPLANEPROC)(GLenum plane, const GLdouble * equation);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3BPROC)(GLbyte red, GLbyte green, GLbyte blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3BVPROC)(const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3DPROC)(GLdouble red, GLdouble green, GLdouble blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3FPROC)(GLfloat red, GLfloat green, GLfloat blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3IPROC)(GLint red, GLint green, GLint blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3SPROC)(GLshort red, GLshort green, GLshort blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3UBPROC)(GLubyte red, GLubyte green, GLubyte blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3UBVPROC)(const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3UIPROC)(GLuint red, GLuint green, GLuint blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3UIVPROC)(const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3USPROC)(GLushort red, GLushort green, GLushort blue);
+typedef void (GLAD_API_PTR *PFNGLCOLOR3USVPROC)(const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4BPROC)(GLbyte red, GLbyte green, GLbyte blue, GLbyte alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4BVPROC)(const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4DPROC)(GLdouble red, GLdouble green, GLdouble blue, GLdouble alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4FPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4IPROC)(GLint red, GLint green, GLint blue, GLint alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4SPROC)(GLshort red, GLshort green, GLshort blue, GLshort alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4UBPROC)(GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4UBVPROC)(const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4UIPROC)(GLuint red, GLuint green, GLuint blue, GLuint alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4UIVPROC)(const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4USPROC)(GLushort red, GLushort green, GLushort blue, GLushort alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLOR4USVPROC)(const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLCOLORMASKPROC)(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
+typedef void (GLAD_API_PTR *PFNGLCOLORMASKIPROC)(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+typedef void (GLAD_API_PTR *PFNGLCOLORMATERIALPROC)(GLenum face, GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLCOLORP3UIPROC)(GLenum type, GLuint color);
+typedef void (GLAD_API_PTR *PFNGLCOLORP3UIVPROC)(GLenum type, const GLuint * color);
+typedef void (GLAD_API_PTR *PFNGLCOLORP4UIPROC)(GLenum type, GLuint color);
+typedef void (GLAD_API_PTR *PFNGLCOLORP4UIVPROC)(GLenum type, const GLuint * color);
+typedef void (GLAD_API_PTR *PFNGLCOLORPOINTERPROC)(GLint size, GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLCOMPILESHADERPROC)(GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE3DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOPYBUFFERSUBDATAPROC)(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+typedef void (GLAD_API_PTR *PFNGLCOPYPIXELSPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum type);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef GLuint (GLAD_API_PTR *PFNGLCREATEPROGRAMPROC)(void);
+typedef GLuint (GLAD_API_PTR *PFNGLCREATESHADERPROC)(GLenum type);
+typedef void (GLAD_API_PTR *PFNGLCULLFACEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGECALLBACKPROC)(GLDEBUGPROC callback, const void * userParam);
+typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGECONTROLPROC)(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint * ids, GLboolean enabled);
+typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGEINSERTPROC)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar * buf);
+typedef void (GLAD_API_PTR *PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint * buffers);
+typedef void (GLAD_API_PTR *PFNGLDELETEFRAMEBUFFERSPROC)(GLsizei n, const GLuint * framebuffers);
+typedef void (GLAD_API_PTR *PFNGLDELETELISTSPROC)(GLuint list, GLsizei range);
+typedef void (GLAD_API_PTR *PFNGLDELETEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLDELETEQUERIESPROC)(GLsizei n, const GLuint * ids);
+typedef void (GLAD_API_PTR *PFNGLDELETERENDERBUFFERSPROC)(GLsizei n, const GLuint * renderbuffers);
+typedef void (GLAD_API_PTR *PFNGLDELETESAMPLERSPROC)(GLsizei count, const GLuint * samplers);
+typedef void (GLAD_API_PTR *PFNGLDELETESHADERPROC)(GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLDELETESYNCPROC)(GLsync sync);
+typedef void (GLAD_API_PTR *PFNGLDELETETEXTURESPROC)(GLsizei n, const GLuint * textures);
+typedef void (GLAD_API_PTR *PFNGLDELETEVERTEXARRAYSPROC)(GLsizei n, const GLuint * arrays);
+typedef void (GLAD_API_PTR *PFNGLDEPTHFUNCPROC)(GLenum func);
+typedef void (GLAD_API_PTR *PFNGLDEPTHMASKPROC)(GLboolean flag);
+typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEPROC)(GLdouble n, GLdouble f);
+typedef void (GLAD_API_PTR *PFNGLDETACHSHADERPROC)(GLuint program, GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLDISABLEPROC)(GLenum cap);
+typedef void (GLAD_API_PTR *PFNGLDISABLECLIENTSTATEPROC)(GLenum array);
+typedef void (GLAD_API_PTR *PFNGLDISABLEVERTEXATTRIBARRAYPROC)(GLuint index);
+typedef void (GLAD_API_PTR *PFNGLDISABLEIPROC)(GLenum target, GLuint index);
+typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSPROC)(GLenum mode, GLint first, GLsizei count);
+typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSINSTANCEDPROC)(GLenum mode, GLint first, GLsizei count, GLsizei instancecount);
+typedef void (GLAD_API_PTR *PFNGLDRAWBUFFERPROC)(GLenum buf);
+typedef void (GLAD_API_PTR *PFNGLDRAWBUFFERSPROC)(GLsizei n, const GLenum * bufs);
+typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices);
+typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLint basevertex);
+typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount);
+typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount, GLint basevertex);
+typedef void (GLAD_API_PTR *PFNGLDRAWPIXELSPROC)(GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLDRAWRANGEELEMENTSPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void * indices);
+typedef void (GLAD_API_PTR *PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void * indices, GLint basevertex);
+typedef void (GLAD_API_PTR *PFNGLEDGEFLAGPROC)(GLboolean flag);
+typedef void (GLAD_API_PTR *PFNGLEDGEFLAGPOINTERPROC)(GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLEDGEFLAGVPROC)(const GLboolean * flag);
+typedef void (GLAD_API_PTR *PFNGLENABLEPROC)(GLenum cap);
+typedef void (GLAD_API_PTR *PFNGLENABLECLIENTSTATEPROC)(GLenum array);
+typedef void (GLAD_API_PTR *PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index);
+typedef void (GLAD_API_PTR *PFNGLENABLEIPROC)(GLenum target, GLuint index);
+typedef void (GLAD_API_PTR *PFNGLENDPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLENDCONDITIONALRENDERPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLENDLISTPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLENDQUERYPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLENDTRANSFORMFEEDBACKPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD1DPROC)(GLdouble u);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD1DVPROC)(const GLdouble * u);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD1FPROC)(GLfloat u);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD1FVPROC)(const GLfloat * u);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD2DPROC)(GLdouble u, GLdouble v);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD2DVPROC)(const GLdouble * u);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD2FPROC)(GLfloat u, GLfloat v);
+typedef void (GLAD_API_PTR *PFNGLEVALCOORD2FVPROC)(const GLfloat * u);
+typedef void (GLAD_API_PTR *PFNGLEVALMESH1PROC)(GLenum mode, GLint i1, GLint i2);
+typedef void (GLAD_API_PTR *PFNGLEVALMESH2PROC)(GLenum mode, GLint i1, GLint i2, GLint j1, GLint j2);
+typedef void (GLAD_API_PTR *PFNGLEVALPOINT1PROC)(GLint i);
+typedef void (GLAD_API_PTR *PFNGLEVALPOINT2PROC)(GLint i, GLint j);
+typedef void (GLAD_API_PTR *PFNGLFEEDBACKBUFFERPROC)(GLsizei size, GLenum type, GLfloat * buffer);
+typedef GLsync (GLAD_API_PTR *PFNGLFENCESYNCPROC)(GLenum condition, GLbitfield flags);
+typedef void (GLAD_API_PTR *PFNGLFINISHPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLFLUSHPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLFLUSHMAPPEDBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length);
+typedef void (GLAD_API_PTR *PFNGLFOGCOORDPOINTERPROC)(GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLFOGCOORDDPROC)(GLdouble coord);
+typedef void (GLAD_API_PTR *PFNGLFOGCOORDDVPROC)(const GLdouble * coord);
+typedef void (GLAD_API_PTR *PFNGLFOGCOORDFPROC)(GLfloat coord);
+typedef void (GLAD_API_PTR *PFNGLFOGCOORDFVPROC)(const GLfloat * coord);
+typedef void (GLAD_API_PTR *PFNGLFOGFPROC)(GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLFOGFVPROC)(GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLFOGIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLFOGIVPROC)(GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERRENDERBUFFERPROC)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTUREPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE1DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE2DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE3DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURELAYERPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+typedef void (GLAD_API_PTR *PFNGLFRONTFACEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLFRUSTUMPROC)(GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+typedef void (GLAD_API_PTR *PFNGLGENBUFFERSPROC)(GLsizei n, GLuint * buffers);
+typedef void (GLAD_API_PTR *PFNGLGENFRAMEBUFFERSPROC)(GLsizei n, GLuint * framebuffers);
+typedef GLuint (GLAD_API_PTR *PFNGLGENLISTSPROC)(GLsizei range);
+typedef void (GLAD_API_PTR *PFNGLGENQUERIESPROC)(GLsizei n, GLuint * ids);
+typedef void (GLAD_API_PTR *PFNGLGENRENDERBUFFERSPROC)(GLsizei n, GLuint * renderbuffers);
+typedef void (GLAD_API_PTR *PFNGLGENSAMPLERSPROC)(GLsizei count, GLuint * samplers);
+typedef void (GLAD_API_PTR *PFNGLGENTEXTURESPROC)(GLsizei n, GLuint * textures);
+typedef void (GLAD_API_PTR *PFNGLGENVERTEXARRAYSPROC)(GLsizei n, GLuint * arrays);
+typedef void (GLAD_API_PTR *PFNGLGENERATEMIPMAPPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEATTRIBPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)(GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei * length, GLchar * uniformBlockName);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMBLOCKIVPROC)(GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMNAMEPROC)(GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei * length, GLchar * uniformName);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMSIVPROC)(GLuint program, GLsizei uniformCount, const GLuint * uniformIndices, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETATTACHEDSHADERSPROC)(GLuint program, GLsizei maxCount, GLsizei * count, GLuint * shaders);
+typedef GLint (GLAD_API_PTR *PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETBOOLEANI_VPROC)(GLenum target, GLuint index, GLboolean * data);
+typedef void (GLAD_API_PTR *PFNGLGETBOOLEANVPROC)(GLenum pname, GLboolean * data);
+typedef void (GLAD_API_PTR *PFNGLGETBUFFERPARAMETERI64VPROC)(GLenum target, GLenum pname, GLint64 * params);
+typedef void (GLAD_API_PTR *PFNGLGETBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETBUFFERPOINTERVPROC)(GLenum target, GLenum pname, void ** params);
+typedef void (GLAD_API_PTR *PFNGLGETBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, void * data);
+typedef void (GLAD_API_PTR *PFNGLGETCLIPPLANEPROC)(GLenum plane, GLdouble * equation);
+typedef void (GLAD_API_PTR *PFNGLGETCOMPRESSEDTEXIMAGEPROC)(GLenum target, GLint level, void * img);
+typedef GLuint (GLAD_API_PTR *PFNGLGETDEBUGMESSAGELOGPROC)(GLuint count, GLsizei bufSize, GLenum * sources, GLenum * types, GLuint * ids, GLenum * severities, GLsizei * lengths, GLchar * messageLog);
+typedef void (GLAD_API_PTR *PFNGLGETDOUBLEVPROC)(GLenum pname, GLdouble * data);
+typedef GLenum (GLAD_API_PTR *PFNGLGETERRORPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLGETFLOATVPROC)(GLenum pname, GLfloat * data);
+typedef GLint (GLAD_API_PTR *PFNGLGETFRAGDATAINDEXPROC)(GLuint program, const GLchar * name);
+typedef GLint (GLAD_API_PTR *PFNGLGETFRAGDATALOCATIONPROC)(GLuint program, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)(GLenum target, GLenum attachment, GLenum pname, GLint * params);
+typedef GLenum (GLAD_API_PTR *PFNGLGETGRAPHICSRESETSTATUSARBPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLGETINTEGER64I_VPROC)(GLenum target, GLuint index, GLint64 * data);
+typedef void (GLAD_API_PTR *PFNGLGETINTEGER64VPROC)(GLenum pname, GLint64 * data);
+typedef void (GLAD_API_PTR *PFNGLGETINTEGERI_VPROC)(GLenum target, GLuint index, GLint * data);
+typedef void (GLAD_API_PTR *PFNGLGETINTEGERVPROC)(GLenum pname, GLint * data);
+typedef void (GLAD_API_PTR *PFNGLGETLIGHTFVPROC)(GLenum light, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETLIGHTIVPROC)(GLenum light, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETMAPDVPROC)(GLenum target, GLenum query, GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLGETMAPFVPROC)(GLenum target, GLenum query, GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLGETMAPIVPROC)(GLenum target, GLenum query, GLint * v);
+typedef void (GLAD_API_PTR *PFNGLGETMATERIALFVPROC)(GLenum face, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETMATERIALIVPROC)(GLenum face, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETMULTISAMPLEFVPROC)(GLenum pname, GLuint index, GLfloat * val);
+typedef void (GLAD_API_PTR *PFNGLGETOBJECTLABELPROC)(GLenum identifier, GLuint name, GLsizei bufSize, GLsizei * length, GLchar * label);
+typedef void (GLAD_API_PTR *PFNGLGETOBJECTPTRLABELPROC)(const void * ptr, GLsizei bufSize, GLsizei * length, GLchar * label);
+typedef void (GLAD_API_PTR *PFNGLGETPIXELMAPFVPROC)(GLenum map, GLfloat * values);
+typedef void (GLAD_API_PTR *PFNGLGETPIXELMAPUIVPROC)(GLenum map, GLuint * values);
+typedef void (GLAD_API_PTR *PFNGLGETPIXELMAPUSVPROC)(GLenum map, GLushort * values);
+typedef void (GLAD_API_PTR *PFNGLGETPOINTERVPROC)(GLenum pname, void ** params);
+typedef void (GLAD_API_PTR *PFNGLGETPOLYGONSTIPPLEPROC)(GLubyte * mask);
+typedef void (GLAD_API_PTR *PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei * length, GLchar * infoLog);
+typedef void (GLAD_API_PTR *PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTI64VPROC)(GLuint id, GLenum pname, GLint64 * params);
+typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTIVPROC)(GLuint id, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTUI64VPROC)(GLuint id, GLenum pname, GLuint64 * params);
+typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTUIVPROC)(GLuint id, GLenum pname, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLGETQUERYIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETRENDERBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * infoLog);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERSOURCEPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * source);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint * params);
+typedef const GLubyte * (GLAD_API_PTR *PFNGLGETSTRINGPROC)(GLenum name);
+typedef const GLubyte * (GLAD_API_PTR *PFNGLGETSTRINGIPROC)(GLenum name, GLuint index);
+typedef void (GLAD_API_PTR *PFNGLGETSYNCIVPROC)(GLsync sync, GLenum pname, GLsizei count, GLsizei * length, GLint * values);
+typedef void (GLAD_API_PTR *PFNGLGETTEXENVFVPROC)(GLenum target, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXENVIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXGENDVPROC)(GLenum coord, GLenum pname, GLdouble * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXGENFVPROC)(GLenum coord, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXGENIVPROC)(GLenum coord, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXIMAGEPROC)(GLenum target, GLint level, GLenum format, GLenum type, void * pixels);
+typedef void (GLAD_API_PTR *PFNGLGETTEXLEVELPARAMETERFVPROC)(GLenum target, GLint level, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXLEVELPARAMETERIVPROC)(GLenum target, GLint level, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERFVPROC)(GLenum target, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLsizei * size, GLenum * type, GLchar * name);
+typedef GLuint (GLAD_API_PTR *PFNGLGETUNIFORMBLOCKINDEXPROC)(GLuint program, const GLchar * uniformBlockName);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMINDICESPROC)(GLuint program, GLsizei uniformCount, const GLchar *const* uniformNames, GLuint * uniformIndices);
+typedef GLint (GLAD_API_PTR *PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMFVPROC)(GLuint program, GLint location, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMIVPROC)(GLuint program, GLint location, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMUIVPROC)(GLuint program, GLint location, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIIVPROC)(GLuint index, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIUIVPROC)(GLuint index, GLenum pname, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBPOINTERVPROC)(GLuint index, GLenum pname, void ** pointer);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBDVPROC)(GLuint index, GLenum pname, GLdouble * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBFVPROC)(GLuint index, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIVPROC)(GLuint index, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETNCOLORTABLEARBPROC)(GLenum target, GLenum format, GLenum type, GLsizei bufSize, void * table);
+typedef void (GLAD_API_PTR *PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC)(GLenum target, GLint lod, GLsizei bufSize, void * img);
+typedef void (GLAD_API_PTR *PFNGLGETNCONVOLUTIONFILTERARBPROC)(GLenum target, GLenum format, GLenum type, GLsizei bufSize, void * image);
+typedef void (GLAD_API_PTR *PFNGLGETNHISTOGRAMARBPROC)(GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void * values);
+typedef void (GLAD_API_PTR *PFNGLGETNMAPDVARBPROC)(GLenum target, GLenum query, GLsizei bufSize, GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLGETNMAPFVARBPROC)(GLenum target, GLenum query, GLsizei bufSize, GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLGETNMAPIVARBPROC)(GLenum target, GLenum query, GLsizei bufSize, GLint * v);
+typedef void (GLAD_API_PTR *PFNGLGETNMINMAXARBPROC)(GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void * values);
+typedef void (GLAD_API_PTR *PFNGLGETNPIXELMAPFVARBPROC)(GLenum map, GLsizei bufSize, GLfloat * values);
+typedef void (GLAD_API_PTR *PFNGLGETNPIXELMAPUIVARBPROC)(GLenum map, GLsizei bufSize, GLuint * values);
+typedef void (GLAD_API_PTR *PFNGLGETNPIXELMAPUSVARBPROC)(GLenum map, GLsizei bufSize, GLushort * values);
+typedef void (GLAD_API_PTR *PFNGLGETNPOLYGONSTIPPLEARBPROC)(GLsizei bufSize, GLubyte * pattern);
+typedef void (GLAD_API_PTR *PFNGLGETNSEPARABLEFILTERARBPROC)(GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, void * row, GLsizei columnBufSize, void * column, void * span);
+typedef void (GLAD_API_PTR *PFNGLGETNTEXIMAGEARBPROC)(GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void * img);
+typedef void (GLAD_API_PTR *PFNGLGETNUNIFORMDVARBPROC)(GLuint program, GLint location, GLsizei bufSize, GLdouble * params);
+typedef void (GLAD_API_PTR *PFNGLGETNUNIFORMFVARBPROC)(GLuint program, GLint location, GLsizei bufSize, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETNUNIFORMIVARBPROC)(GLuint program, GLint location, GLsizei bufSize, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETNUNIFORMUIVARBPROC)(GLuint program, GLint location, GLsizei bufSize, GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLHINTPROC)(GLenum target, GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLINDEXMASKPROC)(GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLINDEXPOINTERPROC)(GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLINDEXDPROC)(GLdouble c);
+typedef void (GLAD_API_PTR *PFNGLINDEXDVPROC)(const GLdouble * c);
+typedef void (GLAD_API_PTR *PFNGLINDEXFPROC)(GLfloat c);
+typedef void (GLAD_API_PTR *PFNGLINDEXFVPROC)(const GLfloat * c);
+typedef void (GLAD_API_PTR *PFNGLINDEXIPROC)(GLint c);
+typedef void (GLAD_API_PTR *PFNGLINDEXIVPROC)(const GLint * c);
+typedef void (GLAD_API_PTR *PFNGLINDEXSPROC)(GLshort c);
+typedef void (GLAD_API_PTR *PFNGLINDEXSVPROC)(const GLshort * c);
+typedef void (GLAD_API_PTR *PFNGLINDEXUBPROC)(GLubyte c);
+typedef void (GLAD_API_PTR *PFNGLINDEXUBVPROC)(const GLubyte * c);
+typedef void (GLAD_API_PTR *PFNGLINITNAMESPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLINTERLEAVEDARRAYSPROC)(GLenum format, GLsizei stride, const void * pointer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISBUFFERPROC)(GLuint buffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISENABLEDPROC)(GLenum cap);
+typedef GLboolean (GLAD_API_PTR *PFNGLISENABLEDIPROC)(GLenum target, GLuint index);
+typedef GLboolean (GLAD_API_PTR *PFNGLISFRAMEBUFFERPROC)(GLuint framebuffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISLISTPROC)(GLuint list);
+typedef GLboolean (GLAD_API_PTR *PFNGLISPROGRAMPROC)(GLuint program);
+typedef GLboolean (GLAD_API_PTR *PFNGLISQUERYPROC)(GLuint id);
+typedef GLboolean (GLAD_API_PTR *PFNGLISRENDERBUFFERPROC)(GLuint renderbuffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISSAMPLERPROC)(GLuint sampler);
+typedef GLboolean (GLAD_API_PTR *PFNGLISSHADERPROC)(GLuint shader);
+typedef GLboolean (GLAD_API_PTR *PFNGLISSYNCPROC)(GLsync sync);
+typedef GLboolean (GLAD_API_PTR *PFNGLISTEXTUREPROC)(GLuint texture);
+typedef GLboolean (GLAD_API_PTR *PFNGLISVERTEXARRAYPROC)(GLuint array);
+typedef void (GLAD_API_PTR *PFNGLLIGHTMODELFPROC)(GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLLIGHTMODELFVPROC)(GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLLIGHTMODELIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLLIGHTMODELIVPROC)(GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLLIGHTFPROC)(GLenum light, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLLIGHTFVPROC)(GLenum light, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLLIGHTIPROC)(GLenum light, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLLIGHTIVPROC)(GLenum light, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLLINESTIPPLEPROC)(GLint factor, GLushort pattern);
+typedef void (GLAD_API_PTR *PFNGLLINEWIDTHPROC)(GLfloat width);
+typedef void (GLAD_API_PTR *PFNGLLINKPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLLISTBASEPROC)(GLuint base);
+typedef void (GLAD_API_PTR *PFNGLLOADIDENTITYPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLLOADMATRIXDPROC)(const GLdouble * m);
+typedef void (GLAD_API_PTR *PFNGLLOADMATRIXFPROC)(const GLfloat * m);
+typedef void (GLAD_API_PTR *PFNGLLOADNAMEPROC)(GLuint name);
+typedef void (GLAD_API_PTR *PFNGLLOADTRANSPOSEMATRIXDPROC)(const GLdouble * m);
+typedef void (GLAD_API_PTR *PFNGLLOADTRANSPOSEMATRIXFPROC)(const GLfloat * m);
+typedef void (GLAD_API_PTR *PFNGLLOGICOPPROC)(GLenum opcode);
+typedef void (GLAD_API_PTR *PFNGLMAP1DPROC)(GLenum target, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble * points);
+typedef void (GLAD_API_PTR *PFNGLMAP1FPROC)(GLenum target, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat * points);
+typedef void (GLAD_API_PTR *PFNGLMAP2DPROC)(GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble * points);
+typedef void (GLAD_API_PTR *PFNGLMAP2FPROC)(GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat * points);
+typedef void * (GLAD_API_PTR *PFNGLMAPBUFFERPROC)(GLenum target, GLenum access);
+typedef void * (GLAD_API_PTR *PFNGLMAPBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
+typedef void (GLAD_API_PTR *PFNGLMAPGRID1DPROC)(GLint un, GLdouble u1, GLdouble u2);
+typedef void (GLAD_API_PTR *PFNGLMAPGRID1FPROC)(GLint un, GLfloat u1, GLfloat u2);
+typedef void (GLAD_API_PTR *PFNGLMAPGRID2DPROC)(GLint un, GLdouble u1, GLdouble u2, GLint vn, GLdouble v1, GLdouble v2);
+typedef void (GLAD_API_PTR *PFNGLMAPGRID2FPROC)(GLint un, GLfloat u1, GLfloat u2, GLint vn, GLfloat v1, GLfloat v2);
+typedef void (GLAD_API_PTR *PFNGLMATERIALFPROC)(GLenum face, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLMATERIALFVPROC)(GLenum face, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLMATERIALIPROC)(GLenum face, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLMATERIALIVPROC)(GLenum face, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLMATRIXMODEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLMULTMATRIXDPROC)(const GLdouble * m);
+typedef void (GLAD_API_PTR *PFNGLMULTMATRIXFPROC)(const GLfloat * m);
+typedef void (GLAD_API_PTR *PFNGLMULTTRANSPOSEMATRIXDPROC)(const GLdouble * m);
+typedef void (GLAD_API_PTR *PFNGLMULTTRANSPOSEMATRIXFPROC)(const GLfloat * m);
+typedef void (GLAD_API_PTR *PFNGLMULTIDRAWARRAYSPROC)(GLenum mode, const GLint * first, const GLsizei * count, GLsizei drawcount);
+typedef void (GLAD_API_PTR *PFNGLMULTIDRAWELEMENTSPROC)(GLenum mode, const GLsizei * count, GLenum type, const void *const* indices, GLsizei drawcount);
+typedef void (GLAD_API_PTR *PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, const GLsizei * count, GLenum type, const void *const* indices, GLsizei drawcount, const GLint * basevertex);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1DPROC)(GLenum target, GLdouble s);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1DVPROC)(GLenum target, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1FPROC)(GLenum target, GLfloat s);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1FVPROC)(GLenum target, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1IPROC)(GLenum target, GLint s);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1IVPROC)(GLenum target, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1SPROC)(GLenum target, GLshort s);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD1SVPROC)(GLenum target, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2DPROC)(GLenum target, GLdouble s, GLdouble t);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2DVPROC)(GLenum target, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2FPROC)(GLenum target, GLfloat s, GLfloat t);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2FVPROC)(GLenum target, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2IPROC)(GLenum target, GLint s, GLint t);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2IVPROC)(GLenum target, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2SPROC)(GLenum target, GLshort s, GLshort t);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD2SVPROC)(GLenum target, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3DPROC)(GLenum target, GLdouble s, GLdouble t, GLdouble r);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3DVPROC)(GLenum target, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3FPROC)(GLenum target, GLfloat s, GLfloat t, GLfloat r);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3FVPROC)(GLenum target, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3IPROC)(GLenum target, GLint s, GLint t, GLint r);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3IVPROC)(GLenum target, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3SPROC)(GLenum target, GLshort s, GLshort t, GLshort r);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD3SVPROC)(GLenum target, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4DPROC)(GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4DVPROC)(GLenum target, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4FPROC)(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4FVPROC)(GLenum target, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4IPROC)(GLenum target, GLint s, GLint t, GLint r, GLint q);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4IVPROC)(GLenum target, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4SPROC)(GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORD4SVPROC)(GLenum target, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP1UIPROC)(GLenum texture, GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP1UIVPROC)(GLenum texture, GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP2UIPROC)(GLenum texture, GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP2UIVPROC)(GLenum texture, GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP3UIPROC)(GLenum texture, GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP3UIVPROC)(GLenum texture, GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP4UIPROC)(GLenum texture, GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLMULTITEXCOORDP4UIVPROC)(GLenum texture, GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLNEWLISTPROC)(GLuint list, GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3BPROC)(GLbyte nx, GLbyte ny, GLbyte nz);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3BVPROC)(const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3DPROC)(GLdouble nx, GLdouble ny, GLdouble nz);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3FPROC)(GLfloat nx, GLfloat ny, GLfloat nz);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3IPROC)(GLint nx, GLint ny, GLint nz);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3SPROC)(GLshort nx, GLshort ny, GLshort nz);
+typedef void (GLAD_API_PTR *PFNGLNORMAL3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLNORMALP3UIPROC)(GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLNORMALP3UIVPROC)(GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLNORMALPOINTERPROC)(GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLOBJECTLABELPROC)(GLenum identifier, GLuint name, GLsizei length, const GLchar * label);
+typedef void (GLAD_API_PTR *PFNGLOBJECTPTRLABELPROC)(const void * ptr, GLsizei length, const GLchar * label);
+typedef void (GLAD_API_PTR *PFNGLORTHOPROC)(GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+typedef void (GLAD_API_PTR *PFNGLPASSTHROUGHPROC)(GLfloat token);
+typedef void (GLAD_API_PTR *PFNGLPIXELMAPFVPROC)(GLenum map, GLsizei mapsize, const GLfloat * values);
+typedef void (GLAD_API_PTR *PFNGLPIXELMAPUIVPROC)(GLenum map, GLsizei mapsize, const GLuint * values);
+typedef void (GLAD_API_PTR *PFNGLPIXELMAPUSVPROC)(GLenum map, GLsizei mapsize, const GLushort * values);
+typedef void (GLAD_API_PTR *PFNGLPIXELSTOREFPROC)(GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLPIXELSTOREIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLPIXELTRANSFERFPROC)(GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLPIXELTRANSFERIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLPIXELZOOMPROC)(GLfloat xfactor, GLfloat yfactor);
+typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERFPROC)(GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERFVPROC)(GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERIVPROC)(GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLPOINTSIZEPROC)(GLfloat size);
+typedef void (GLAD_API_PTR *PFNGLPOLYGONMODEPROC)(GLenum face, GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLPOLYGONOFFSETPROC)(GLfloat factor, GLfloat units);
+typedef void (GLAD_API_PTR *PFNGLPOLYGONSTIPPLEPROC)(const GLubyte * mask);
+typedef void (GLAD_API_PTR *PFNGLPOPATTRIBPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPOPCLIENTATTRIBPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPOPDEBUGGROUPPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPOPMATRIXPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPOPNAMEPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPRIMITIVERESTARTINDEXPROC)(GLuint index);
+typedef void (GLAD_API_PTR *PFNGLPRIORITIZETEXTURESPROC)(GLsizei n, const GLuint * textures, const GLfloat * priorities);
+typedef void (GLAD_API_PTR *PFNGLPROVOKINGVERTEXPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLPUSHATTRIBPROC)(GLbitfield mask);
+typedef void (GLAD_API_PTR *PFNGLPUSHCLIENTATTRIBPROC)(GLbitfield mask);
+typedef void (GLAD_API_PTR *PFNGLPUSHDEBUGGROUPPROC)(GLenum source, GLuint id, GLsizei length, const GLchar * message);
+typedef void (GLAD_API_PTR *PFNGLPUSHMATRIXPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLPUSHNAMEPROC)(GLuint name);
+typedef void (GLAD_API_PTR *PFNGLQUERYCOUNTERPROC)(GLuint id, GLenum target);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2DPROC)(GLdouble x, GLdouble y);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2FPROC)(GLfloat x, GLfloat y);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2IPROC)(GLint x, GLint y);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2SPROC)(GLshort x, GLshort y);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS2SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3DPROC)(GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3FPROC)(GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3IPROC)(GLint x, GLint y, GLint z);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3SPROC)(GLshort x, GLshort y, GLshort z);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4DPROC)(GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4FPROC)(GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4IPROC)(GLint x, GLint y, GLint z, GLint w);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4SPROC)(GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (GLAD_API_PTR *PFNGLRASTERPOS4SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLREADBUFFERPROC)(GLenum src);
+typedef void (GLAD_API_PTR *PFNGLREADPIXELSPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void * pixels);
+typedef void (GLAD_API_PTR *PFNGLREADNPIXELSARBPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void * data);
+typedef void (GLAD_API_PTR *PFNGLRECTDPROC)(GLdouble x1, GLdouble y1, GLdouble x2, GLdouble y2);
+typedef void (GLAD_API_PTR *PFNGLRECTDVPROC)(const GLdouble * v1, const GLdouble * v2);
+typedef void (GLAD_API_PTR *PFNGLRECTFPROC)(GLfloat x1, GLfloat y1, GLfloat x2, GLfloat y2);
+typedef void (GLAD_API_PTR *PFNGLRECTFVPROC)(const GLfloat * v1, const GLfloat * v2);
+typedef void (GLAD_API_PTR *PFNGLRECTIPROC)(GLint x1, GLint y1, GLint x2, GLint y2);
+typedef void (GLAD_API_PTR *PFNGLRECTIVPROC)(const GLint * v1, const GLint * v2);
+typedef void (GLAD_API_PTR *PFNGLRECTSPROC)(GLshort x1, GLshort y1, GLshort x2, GLshort y2);
+typedef void (GLAD_API_PTR *PFNGLRECTSVPROC)(const GLshort * v1, const GLshort * v2);
+typedef GLint (GLAD_API_PTR *PFNGLRENDERMODEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLRENDERBUFFERSTORAGEPROC)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLROTATEDPROC)(GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLROTATEFPROC)(GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLSAMPLECOVERAGEPROC)(GLfloat value, GLboolean invert);
+typedef void (GLAD_API_PTR *PFNGLSAMPLECOVERAGEARBPROC)(GLfloat value, GLboolean invert);
+typedef void (GLAD_API_PTR *PFNGLSAMPLEMASKIPROC)(GLuint maskNumber, GLbitfield mask);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, const GLint * param);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, const GLuint * param);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERFPROC)(GLuint sampler, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, const GLfloat * param);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIPROC)(GLuint sampler, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, const GLint * param);
+typedef void (GLAD_API_PTR *PFNGLSCALEDPROC)(GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLSCALEFPROC)(GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLSCISSORPROC)(GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3BPROC)(GLbyte red, GLbyte green, GLbyte blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3BVPROC)(const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3DPROC)(GLdouble red, GLdouble green, GLdouble blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3FPROC)(GLfloat red, GLfloat green, GLfloat blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3IPROC)(GLint red, GLint green, GLint blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3SPROC)(GLshort red, GLshort green, GLshort blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3UBPROC)(GLubyte red, GLubyte green, GLubyte blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3UBVPROC)(const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3UIPROC)(GLuint red, GLuint green, GLuint blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3UIVPROC)(const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3USPROC)(GLushort red, GLushort green, GLushort blue);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLOR3USVPROC)(const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLORP3UIPROC)(GLenum type, GLuint color);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLORP3UIVPROC)(GLenum type, const GLuint * color);
+typedef void (GLAD_API_PTR *PFNGLSECONDARYCOLORPOINTERPROC)(GLint size, GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLSELECTBUFFERPROC)(GLsizei size, GLuint * buffer);
+typedef void (GLAD_API_PTR *PFNGLSHADEMODELPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const GLchar *const* string, const GLint * length);
+typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCPROC)(GLenum func, GLint ref, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCSEPARATEPROC)(GLenum face, GLenum func, GLint ref, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILMASKPROC)(GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILMASKSEPARATEPROC)(GLenum face, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILOPPROC)(GLenum fail, GLenum zfail, GLenum zpass);
+typedef void (GLAD_API_PTR *PFNGLSTENCILOPSEPARATEPROC)(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+typedef void (GLAD_API_PTR *PFNGLTEXBUFFERPROC)(GLenum target, GLenum internalformat, GLuint buffer);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1DPROC)(GLdouble s);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1FPROC)(GLfloat s);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1IPROC)(GLint s);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1SPROC)(GLshort s);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD1SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2DPROC)(GLdouble s, GLdouble t);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2FPROC)(GLfloat s, GLfloat t);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2IPROC)(GLint s, GLint t);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2SPROC)(GLshort s, GLshort t);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD2SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3DPROC)(GLdouble s, GLdouble t, GLdouble r);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3FPROC)(GLfloat s, GLfloat t, GLfloat r);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3IPROC)(GLint s, GLint t, GLint r);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3SPROC)(GLshort s, GLshort t, GLshort r);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4DPROC)(GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4FPROC)(GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4IPROC)(GLint s, GLint t, GLint r, GLint q);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4SPROC)(GLshort s, GLshort t, GLshort r, GLshort q);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORD4SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP1UIPROC)(GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP1UIVPROC)(GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP2UIPROC)(GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP2UIVPROC)(GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP3UIPROC)(GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP3UIVPROC)(GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP4UIPROC)(GLenum type, GLuint coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDP4UIVPROC)(GLenum type, const GLuint * coords);
+typedef void (GLAD_API_PTR *PFNGLTEXCOORDPOINTERPROC)(GLint size, GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLTEXENVFPROC)(GLenum target, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLTEXENVFVPROC)(GLenum target, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLTEXENVIPROC)(GLenum target, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLTEXENVIVPROC)(GLenum target, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXGENDPROC)(GLenum coord, GLenum pname, GLdouble param);
+typedef void (GLAD_API_PTR *PFNGLTEXGENDVPROC)(GLenum coord, GLenum pname, const GLdouble * params);
+typedef void (GLAD_API_PTR *PFNGLTEXGENFPROC)(GLenum coord, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLTEXGENFVPROC)(GLenum coord, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLTEXGENIPROC)(GLenum coord, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLTEXGENIVPROC)(GLenum coord, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE1DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE2DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE2DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE3DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE3DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, const GLuint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFPROC)(GLenum target, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFVPROC)(GLenum target, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIPROC)(GLenum target, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIVPROC)(GLenum target, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTRANSFORMFEEDBACKVARYINGSPROC)(GLuint program, GLsizei count, const GLchar *const* varyings, GLenum bufferMode);
+typedef void (GLAD_API_PTR *PFNGLTRANSLATEDPROC)(GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLTRANSLATEFPROC)(GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1IPROC)(GLint location, GLint v0);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1UIPROC)(GLint location, GLuint v0);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1UIVPROC)(GLint location, GLsizei count, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2FPROC)(GLint location, GLfloat v0, GLfloat v1);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2IPROC)(GLint location, GLint v0, GLint v1);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2UIPROC)(GLint location, GLuint v0, GLuint v1);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2UIVPROC)(GLint location, GLsizei count, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3IPROC)(GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3UIVPROC)(GLint location, GLsizei count, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4IPROC)(GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4UIVPROC)(GLint location, GLsizei count, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMBLOCKBINDINGPROC)(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef GLboolean (GLAD_API_PTR *PFNGLUNMAPBUFFERPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLUSEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLVALIDATEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2DPROC)(GLdouble x, GLdouble y);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2FPROC)(GLfloat x, GLfloat y);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2IPROC)(GLint x, GLint y);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2SPROC)(GLshort x, GLshort y);
+typedef void (GLAD_API_PTR *PFNGLVERTEX2SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3DPROC)(GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3FPROC)(GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3IPROC)(GLint x, GLint y, GLint z);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3SPROC)(GLshort x, GLshort y, GLshort z);
+typedef void (GLAD_API_PTR *PFNGLVERTEX3SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4DPROC)(GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4FPROC)(GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4IPROC)(GLint x, GLint y, GLint z, GLint w);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4SPROC)(GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (GLAD_API_PTR *PFNGLVERTEX4SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1DPROC)(GLuint index, GLdouble x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1DVPROC)(GLuint index, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FPROC)(GLuint index, GLfloat x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1SPROC)(GLuint index, GLshort x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1SVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2DPROC)(GLuint index, GLdouble x, GLdouble y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2DVPROC)(GLuint index, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FPROC)(GLuint index, GLfloat x, GLfloat y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2SPROC)(GLuint index, GLshort x, GLshort y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2SVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3DVPROC)(GLuint index, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3SPROC)(GLuint index, GLshort x, GLshort y, GLshort z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3SVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NBVPROC)(GLuint index, const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NIVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NSVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUBPROC)(GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUBVPROC)(GLuint index, const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUSVPROC)(GLuint index, const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4BVPROC)(GLuint index, const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4DVPROC)(GLuint index, const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4IVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4SPROC)(GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4SVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4UBVPROC)(GLuint index, const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4UIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4USVPROC)(GLuint index, const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBDIVISORPROC)(GLuint index, GLuint divisor);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1IPROC)(GLuint index, GLint x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1IVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1UIPROC)(GLuint index, GLuint x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1UIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2IPROC)(GLuint index, GLint x, GLint y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2IVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2UIPROC)(GLuint index, GLuint x, GLuint y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2UIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3IPROC)(GLuint index, GLint x, GLint y, GLint z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3IVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3UIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4BVPROC)(GLuint index, const GLbyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4IPROC)(GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4IVPROC)(GLuint index, const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4SVPROC)(GLuint index, const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UBVPROC)(GLuint index, const GLubyte * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UIVPROC)(GLuint index, const GLuint * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4USVPROC)(GLuint index, const GLushort * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBIPOINTERPROC)(GLuint index, GLint size, GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP1UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP1UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP2UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP2UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP3UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP3UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP4UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP4UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP2UIPROC)(GLenum type, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP2UIVPROC)(GLenum type, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP3UIPROC)(GLenum type, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP3UIVPROC)(GLenum type, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP4UIPROC)(GLenum type, GLuint value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXP4UIVPROC)(GLenum type, const GLuint * value);
+typedef void (GLAD_API_PTR *PFNGLVERTEXPOINTERPROC)(GLint size, GLenum type, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLVIEWPORTPROC)(GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2DPROC)(GLdouble x, GLdouble y);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2FPROC)(GLfloat x, GLfloat y);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2IPROC)(GLint x, GLint y);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2SPROC)(GLshort x, GLshort y);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS2SVPROC)(const GLshort * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3DPROC)(GLdouble x, GLdouble y, GLdouble z);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3DVPROC)(const GLdouble * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3FPROC)(GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3FVPROC)(const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3IPROC)(GLint x, GLint y, GLint z);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3IVPROC)(const GLint * v);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3SPROC)(GLshort x, GLshort y, GLshort z);
+typedef void (GLAD_API_PTR *PFNGLWINDOWPOS3SVPROC)(const GLshort * v);
+
+GLAD_API_CALL PFNGLACCUMPROC glad_glAccum;
+#define glAccum glad_glAccum
+GLAD_API_CALL PFNGLACTIVETEXTUREPROC glad_glActiveTexture;
+#define glActiveTexture glad_glActiveTexture
+GLAD_API_CALL PFNGLALPHAFUNCPROC glad_glAlphaFunc;
+#define glAlphaFunc glad_glAlphaFunc
+GLAD_API_CALL PFNGLARETEXTURESRESIDENTPROC glad_glAreTexturesResident;
+#define glAreTexturesResident glad_glAreTexturesResident
+GLAD_API_CALL PFNGLARRAYELEMENTPROC glad_glArrayElement;
+#define glArrayElement glad_glArrayElement
+GLAD_API_CALL PFNGLATTACHSHADERPROC glad_glAttachShader;
+#define glAttachShader glad_glAttachShader
+GLAD_API_CALL PFNGLBEGINPROC glad_glBegin;
+#define glBegin glad_glBegin
+GLAD_API_CALL PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender;
+#define glBeginConditionalRender glad_glBeginConditionalRender
+GLAD_API_CALL PFNGLBEGINQUERYPROC glad_glBeginQuery;
+#define glBeginQuery glad_glBeginQuery
+GLAD_API_CALL PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback;
+#define glBeginTransformFeedback glad_glBeginTransformFeedback
+GLAD_API_CALL PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation;
+#define glBindAttribLocation glad_glBindAttribLocation
+GLAD_API_CALL PFNGLBINDBUFFERPROC glad_glBindBuffer;
+#define glBindBuffer glad_glBindBuffer
+GLAD_API_CALL PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase;
+#define glBindBufferBase glad_glBindBufferBase
+GLAD_API_CALL PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange;
+#define glBindBufferRange glad_glBindBufferRange
+GLAD_API_CALL PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation;
+#define glBindFragDataLocation glad_glBindFragDataLocation
+GLAD_API_CALL PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed;
+#define glBindFragDataLocationIndexed glad_glBindFragDataLocationIndexed
+GLAD_API_CALL PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer;
+#define glBindFramebuffer glad_glBindFramebuffer
+GLAD_API_CALL PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer;
+#define glBindRenderbuffer glad_glBindRenderbuffer
+GLAD_API_CALL PFNGLBINDSAMPLERPROC glad_glBindSampler;
+#define glBindSampler glad_glBindSampler
+GLAD_API_CALL PFNGLBINDTEXTUREPROC glad_glBindTexture;
+#define glBindTexture glad_glBindTexture
+GLAD_API_CALL PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray;
+#define glBindVertexArray glad_glBindVertexArray
+GLAD_API_CALL PFNGLBITMAPPROC glad_glBitmap;
+#define glBitmap glad_glBitmap
+GLAD_API_CALL PFNGLBLENDCOLORPROC glad_glBlendColor;
+#define glBlendColor glad_glBlendColor
+GLAD_API_CALL PFNGLBLENDEQUATIONPROC glad_glBlendEquation;
+#define glBlendEquation glad_glBlendEquation
+GLAD_API_CALL PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate;
+#define glBlendEquationSeparate glad_glBlendEquationSeparate
+GLAD_API_CALL PFNGLBLENDFUNCPROC glad_glBlendFunc;
+#define glBlendFunc glad_glBlendFunc
+GLAD_API_CALL PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate;
+#define glBlendFuncSeparate glad_glBlendFuncSeparate
+GLAD_API_CALL PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer;
+#define glBlitFramebuffer glad_glBlitFramebuffer
+GLAD_API_CALL PFNGLBUFFERDATAPROC glad_glBufferData;
+#define glBufferData glad_glBufferData
+GLAD_API_CALL PFNGLBUFFERSUBDATAPROC glad_glBufferSubData;
+#define glBufferSubData glad_glBufferSubData
+GLAD_API_CALL PFNGLCALLLISTPROC glad_glCallList;
+#define glCallList glad_glCallList
+GLAD_API_CALL PFNGLCALLLISTSPROC glad_glCallLists;
+#define glCallLists glad_glCallLists
+GLAD_API_CALL PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus;
+#define glCheckFramebufferStatus glad_glCheckFramebufferStatus
+GLAD_API_CALL PFNGLCLAMPCOLORPROC glad_glClampColor;
+#define glClampColor glad_glClampColor
+GLAD_API_CALL PFNGLCLEARPROC glad_glClear;
+#define glClear glad_glClear
+GLAD_API_CALL PFNGLCLEARACCUMPROC glad_glClearAccum;
+#define glClearAccum glad_glClearAccum
+GLAD_API_CALL PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi;
+#define glClearBufferfi glad_glClearBufferfi
+GLAD_API_CALL PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv;
+#define glClearBufferfv glad_glClearBufferfv
+GLAD_API_CALL PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv;
+#define glClearBufferiv glad_glClearBufferiv
+GLAD_API_CALL PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv;
+#define glClearBufferuiv glad_glClearBufferuiv
+GLAD_API_CALL PFNGLCLEARCOLORPROC glad_glClearColor;
+#define glClearColor glad_glClearColor
+GLAD_API_CALL PFNGLCLEARDEPTHPROC glad_glClearDepth;
+#define glClearDepth glad_glClearDepth
+GLAD_API_CALL PFNGLCLEARINDEXPROC glad_glClearIndex;
+#define glClearIndex glad_glClearIndex
+GLAD_API_CALL PFNGLCLEARSTENCILPROC glad_glClearStencil;
+#define glClearStencil glad_glClearStencil
+GLAD_API_CALL PFNGLCLIENTACTIVETEXTUREPROC glad_glClientActiveTexture;
+#define glClientActiveTexture glad_glClientActiveTexture
+GLAD_API_CALL PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync;
+#define glClientWaitSync glad_glClientWaitSync
+GLAD_API_CALL PFNGLCLIPPLANEPROC glad_glClipPlane;
+#define glClipPlane glad_glClipPlane
+GLAD_API_CALL PFNGLCOLOR3BPROC glad_glColor3b;
+#define glColor3b glad_glColor3b
+GLAD_API_CALL PFNGLCOLOR3BVPROC glad_glColor3bv;
+#define glColor3bv glad_glColor3bv
+GLAD_API_CALL PFNGLCOLOR3DPROC glad_glColor3d;
+#define glColor3d glad_glColor3d
+GLAD_API_CALL PFNGLCOLOR3DVPROC glad_glColor3dv;
+#define glColor3dv glad_glColor3dv
+GLAD_API_CALL PFNGLCOLOR3FPROC glad_glColor3f;
+#define glColor3f glad_glColor3f
+GLAD_API_CALL PFNGLCOLOR3FVPROC glad_glColor3fv;
+#define glColor3fv glad_glColor3fv
+GLAD_API_CALL PFNGLCOLOR3IPROC glad_glColor3i;
+#define glColor3i glad_glColor3i
+GLAD_API_CALL PFNGLCOLOR3IVPROC glad_glColor3iv;
+#define glColor3iv glad_glColor3iv
+GLAD_API_CALL PFNGLCOLOR3SPROC glad_glColor3s;
+#define glColor3s glad_glColor3s
+GLAD_API_CALL PFNGLCOLOR3SVPROC glad_glColor3sv;
+#define glColor3sv glad_glColor3sv
+GLAD_API_CALL PFNGLCOLOR3UBPROC glad_glColor3ub;
+#define glColor3ub glad_glColor3ub
+GLAD_API_CALL PFNGLCOLOR3UBVPROC glad_glColor3ubv;
+#define glColor3ubv glad_glColor3ubv
+GLAD_API_CALL PFNGLCOLOR3UIPROC glad_glColor3ui;
+#define glColor3ui glad_glColor3ui
+GLAD_API_CALL PFNGLCOLOR3UIVPROC glad_glColor3uiv;
+#define glColor3uiv glad_glColor3uiv
+GLAD_API_CALL PFNGLCOLOR3USPROC glad_glColor3us;
+#define glColor3us glad_glColor3us
+GLAD_API_CALL PFNGLCOLOR3USVPROC glad_glColor3usv;
+#define glColor3usv glad_glColor3usv
+GLAD_API_CALL PFNGLCOLOR4BPROC glad_glColor4b;
+#define glColor4b glad_glColor4b
+GLAD_API_CALL PFNGLCOLOR4BVPROC glad_glColor4bv;
+#define glColor4bv glad_glColor4bv
+GLAD_API_CALL PFNGLCOLOR4DPROC glad_glColor4d;
+#define glColor4d glad_glColor4d
+GLAD_API_CALL PFNGLCOLOR4DVPROC glad_glColor4dv;
+#define glColor4dv glad_glColor4dv
+GLAD_API_CALL PFNGLCOLOR4FPROC glad_glColor4f;
+#define glColor4f glad_glColor4f
+GLAD_API_CALL PFNGLCOLOR4FVPROC glad_glColor4fv;
+#define glColor4fv glad_glColor4fv
+GLAD_API_CALL PFNGLCOLOR4IPROC glad_glColor4i;
+#define glColor4i glad_glColor4i
+GLAD_API_CALL PFNGLCOLOR4IVPROC glad_glColor4iv;
+#define glColor4iv glad_glColor4iv
+GLAD_API_CALL PFNGLCOLOR4SPROC glad_glColor4s;
+#define glColor4s glad_glColor4s
+GLAD_API_CALL PFNGLCOLOR4SVPROC glad_glColor4sv;
+#define glColor4sv glad_glColor4sv
+GLAD_API_CALL PFNGLCOLOR4UBPROC glad_glColor4ub;
+#define glColor4ub glad_glColor4ub
+GLAD_API_CALL PFNGLCOLOR4UBVPROC glad_glColor4ubv;
+#define glColor4ubv glad_glColor4ubv
+GLAD_API_CALL PFNGLCOLOR4UIPROC glad_glColor4ui;
+#define glColor4ui glad_glColor4ui
+GLAD_API_CALL PFNGLCOLOR4UIVPROC glad_glColor4uiv;
+#define glColor4uiv glad_glColor4uiv
+GLAD_API_CALL PFNGLCOLOR4USPROC glad_glColor4us;
+#define glColor4us glad_glColor4us
+GLAD_API_CALL PFNGLCOLOR4USVPROC glad_glColor4usv;
+#define glColor4usv glad_glColor4usv
+GLAD_API_CALL PFNGLCOLORMASKPROC glad_glColorMask;
+#define glColorMask glad_glColorMask
+GLAD_API_CALL PFNGLCOLORMASKIPROC glad_glColorMaski;
+#define glColorMaski glad_glColorMaski
+GLAD_API_CALL PFNGLCOLORMATERIALPROC glad_glColorMaterial;
+#define glColorMaterial glad_glColorMaterial
+GLAD_API_CALL PFNGLCOLORP3UIPROC glad_glColorP3ui;
+#define glColorP3ui glad_glColorP3ui
+GLAD_API_CALL PFNGLCOLORP3UIVPROC glad_glColorP3uiv;
+#define glColorP3uiv glad_glColorP3uiv
+GLAD_API_CALL PFNGLCOLORP4UIPROC glad_glColorP4ui;
+#define glColorP4ui glad_glColorP4ui
+GLAD_API_CALL PFNGLCOLORP4UIVPROC glad_glColorP4uiv;
+#define glColorP4uiv glad_glColorP4uiv
+GLAD_API_CALL PFNGLCOLORPOINTERPROC glad_glColorPointer;
+#define glColorPointer glad_glColorPointer
+GLAD_API_CALL PFNGLCOMPILESHADERPROC glad_glCompileShader;
+#define glCompileShader glad_glCompileShader
+GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D;
+#define glCompressedTexImage1D glad_glCompressedTexImage1D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D;
+#define glCompressedTexImage2D glad_glCompressedTexImage2D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D;
+#define glCompressedTexImage3D glad_glCompressedTexImage3D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D;
+#define glCompressedTexSubImage1D glad_glCompressedTexSubImage1D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D;
+#define glCompressedTexSubImage2D glad_glCompressedTexSubImage2D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D;
+#define glCompressedTexSubImage3D glad_glCompressedTexSubImage3D
+GLAD_API_CALL PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData;
+#define glCopyBufferSubData glad_glCopyBufferSubData
+GLAD_API_CALL PFNGLCOPYPIXELSPROC glad_glCopyPixels;
+#define glCopyPixels glad_glCopyPixels
+GLAD_API_CALL PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D;
+#define glCopyTexImage1D glad_glCopyTexImage1D
+GLAD_API_CALL PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D;
+#define glCopyTexImage2D glad_glCopyTexImage2D
+GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D;
+#define glCopyTexSubImage1D glad_glCopyTexSubImage1D
+GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D;
+#define glCopyTexSubImage2D glad_glCopyTexSubImage2D
+GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D;
+#define glCopyTexSubImage3D glad_glCopyTexSubImage3D
+GLAD_API_CALL PFNGLCREATEPROGRAMPROC glad_glCreateProgram;
+#define glCreateProgram glad_glCreateProgram
+GLAD_API_CALL PFNGLCREATESHADERPROC glad_glCreateShader;
+#define glCreateShader glad_glCreateShader
+GLAD_API_CALL PFNGLCULLFACEPROC glad_glCullFace;
+#define glCullFace glad_glCullFace
+GLAD_API_CALL PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback;
+#define glDebugMessageCallback glad_glDebugMessageCallback
+GLAD_API_CALL PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl;
+#define glDebugMessageControl glad_glDebugMessageControl
+GLAD_API_CALL PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert;
+#define glDebugMessageInsert glad_glDebugMessageInsert
+GLAD_API_CALL PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers;
+#define glDeleteBuffers glad_glDeleteBuffers
+GLAD_API_CALL PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers;
+#define glDeleteFramebuffers glad_glDeleteFramebuffers
+GLAD_API_CALL PFNGLDELETELISTSPROC glad_glDeleteLists;
+#define glDeleteLists glad_glDeleteLists
+GLAD_API_CALL PFNGLDELETEPROGRAMPROC glad_glDeleteProgram;
+#define glDeleteProgram glad_glDeleteProgram
+GLAD_API_CALL PFNGLDELETEQUERIESPROC glad_glDeleteQueries;
+#define glDeleteQueries glad_glDeleteQueries
+GLAD_API_CALL PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers;
+#define glDeleteRenderbuffers glad_glDeleteRenderbuffers
+GLAD_API_CALL PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers;
+#define glDeleteSamplers glad_glDeleteSamplers
+GLAD_API_CALL PFNGLDELETESHADERPROC glad_glDeleteShader;
+#define glDeleteShader glad_glDeleteShader
+GLAD_API_CALL PFNGLDELETESYNCPROC glad_glDeleteSync;
+#define glDeleteSync glad_glDeleteSync
+GLAD_API_CALL PFNGLDELETETEXTURESPROC glad_glDeleteTextures;
+#define glDeleteTextures glad_glDeleteTextures
+GLAD_API_CALL PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays;
+#define glDeleteVertexArrays glad_glDeleteVertexArrays
+GLAD_API_CALL PFNGLDEPTHFUNCPROC glad_glDepthFunc;
+#define glDepthFunc glad_glDepthFunc
+GLAD_API_CALL PFNGLDEPTHMASKPROC glad_glDepthMask;
+#define glDepthMask glad_glDepthMask
+GLAD_API_CALL PFNGLDEPTHRANGEPROC glad_glDepthRange;
+#define glDepthRange glad_glDepthRange
+GLAD_API_CALL PFNGLDETACHSHADERPROC glad_glDetachShader;
+#define glDetachShader glad_glDetachShader
+GLAD_API_CALL PFNGLDISABLEPROC glad_glDisable;
+#define glDisable glad_glDisable
+GLAD_API_CALL PFNGLDISABLECLIENTSTATEPROC glad_glDisableClientState;
+#define glDisableClientState glad_glDisableClientState
+GLAD_API_CALL PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray;
+#define glDisableVertexAttribArray glad_glDisableVertexAttribArray
+GLAD_API_CALL PFNGLDISABLEIPROC glad_glDisablei;
+#define glDisablei glad_glDisablei
+GLAD_API_CALL PFNGLDRAWARRAYSPROC glad_glDrawArrays;
+#define glDrawArrays glad_glDrawArrays
+GLAD_API_CALL PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced;
+#define glDrawArraysInstanced glad_glDrawArraysInstanced
+GLAD_API_CALL PFNGLDRAWBUFFERPROC glad_glDrawBuffer;
+#define glDrawBuffer glad_glDrawBuffer
+GLAD_API_CALL PFNGLDRAWBUFFERSPROC glad_glDrawBuffers;
+#define glDrawBuffers glad_glDrawBuffers
+GLAD_API_CALL PFNGLDRAWELEMENTSPROC glad_glDrawElements;
+#define glDrawElements glad_glDrawElements
+GLAD_API_CALL PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex;
+#define glDrawElementsBaseVertex glad_glDrawElementsBaseVertex
+GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced;
+#define glDrawElementsInstanced glad_glDrawElementsInstanced
+GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex;
+#define glDrawElementsInstancedBaseVertex glad_glDrawElementsInstancedBaseVertex
+GLAD_API_CALL PFNGLDRAWPIXELSPROC glad_glDrawPixels;
+#define glDrawPixels glad_glDrawPixels
+GLAD_API_CALL PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements;
+#define glDrawRangeElements glad_glDrawRangeElements
+GLAD_API_CALL PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex;
+#define glDrawRangeElementsBaseVertex glad_glDrawRangeElementsBaseVertex
+GLAD_API_CALL PFNGLEDGEFLAGPROC glad_glEdgeFlag;
+#define glEdgeFlag glad_glEdgeFlag
+GLAD_API_CALL PFNGLEDGEFLAGPOINTERPROC glad_glEdgeFlagPointer;
+#define glEdgeFlagPointer glad_glEdgeFlagPointer
+GLAD_API_CALL PFNGLEDGEFLAGVPROC glad_glEdgeFlagv;
+#define glEdgeFlagv glad_glEdgeFlagv
+GLAD_API_CALL PFNGLENABLEPROC glad_glEnable;
+#define glEnable glad_glEnable
+GLAD_API_CALL PFNGLENABLECLIENTSTATEPROC glad_glEnableClientState;
+#define glEnableClientState glad_glEnableClientState
+GLAD_API_CALL PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray;
+#define glEnableVertexAttribArray glad_glEnableVertexAttribArray
+GLAD_API_CALL PFNGLENABLEIPROC glad_glEnablei;
+#define glEnablei glad_glEnablei
+GLAD_API_CALL PFNGLENDPROC glad_glEnd;
+#define glEnd glad_glEnd
+GLAD_API_CALL PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender;
+#define glEndConditionalRender glad_glEndConditionalRender
+GLAD_API_CALL PFNGLENDLISTPROC glad_glEndList;
+#define glEndList glad_glEndList
+GLAD_API_CALL PFNGLENDQUERYPROC glad_glEndQuery;
+#define glEndQuery glad_glEndQuery
+GLAD_API_CALL PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback;
+#define glEndTransformFeedback glad_glEndTransformFeedback
+GLAD_API_CALL PFNGLEVALCOORD1DPROC glad_glEvalCoord1d;
+#define glEvalCoord1d glad_glEvalCoord1d
+GLAD_API_CALL PFNGLEVALCOORD1DVPROC glad_glEvalCoord1dv;
+#define glEvalCoord1dv glad_glEvalCoord1dv
+GLAD_API_CALL PFNGLEVALCOORD1FPROC glad_glEvalCoord1f;
+#define glEvalCoord1f glad_glEvalCoord1f
+GLAD_API_CALL PFNGLEVALCOORD1FVPROC glad_glEvalCoord1fv;
+#define glEvalCoord1fv glad_glEvalCoord1fv
+GLAD_API_CALL PFNGLEVALCOORD2DPROC glad_glEvalCoord2d;
+#define glEvalCoord2d glad_glEvalCoord2d
+GLAD_API_CALL PFNGLEVALCOORD2DVPROC glad_glEvalCoord2dv;
+#define glEvalCoord2dv glad_glEvalCoord2dv
+GLAD_API_CALL PFNGLEVALCOORD2FPROC glad_glEvalCoord2f;
+#define glEvalCoord2f glad_glEvalCoord2f
+GLAD_API_CALL PFNGLEVALCOORD2FVPROC glad_glEvalCoord2fv;
+#define glEvalCoord2fv glad_glEvalCoord2fv
+GLAD_API_CALL PFNGLEVALMESH1PROC glad_glEvalMesh1;
+#define glEvalMesh1 glad_glEvalMesh1
+GLAD_API_CALL PFNGLEVALMESH2PROC glad_glEvalMesh2;
+#define glEvalMesh2 glad_glEvalMesh2
+GLAD_API_CALL PFNGLEVALPOINT1PROC glad_glEvalPoint1;
+#define glEvalPoint1 glad_glEvalPoint1
+GLAD_API_CALL PFNGLEVALPOINT2PROC glad_glEvalPoint2;
+#define glEvalPoint2 glad_glEvalPoint2
+GLAD_API_CALL PFNGLFEEDBACKBUFFERPROC glad_glFeedbackBuffer;
+#define glFeedbackBuffer glad_glFeedbackBuffer
+GLAD_API_CALL PFNGLFENCESYNCPROC glad_glFenceSync;
+#define glFenceSync glad_glFenceSync
+GLAD_API_CALL PFNGLFINISHPROC glad_glFinish;
+#define glFinish glad_glFinish
+GLAD_API_CALL PFNGLFLUSHPROC glad_glFlush;
+#define glFlush glad_glFlush
+GLAD_API_CALL PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange;
+#define glFlushMappedBufferRange glad_glFlushMappedBufferRange
+GLAD_API_CALL PFNGLFOGCOORDPOINTERPROC glad_glFogCoordPointer;
+#define glFogCoordPointer glad_glFogCoordPointer
+GLAD_API_CALL PFNGLFOGCOORDDPROC glad_glFogCoordd;
+#define glFogCoordd glad_glFogCoordd
+GLAD_API_CALL PFNGLFOGCOORDDVPROC glad_glFogCoorddv;
+#define glFogCoorddv glad_glFogCoorddv
+GLAD_API_CALL PFNGLFOGCOORDFPROC glad_glFogCoordf;
+#define glFogCoordf glad_glFogCoordf
+GLAD_API_CALL PFNGLFOGCOORDFVPROC glad_glFogCoordfv;
+#define glFogCoordfv glad_glFogCoordfv
+GLAD_API_CALL PFNGLFOGFPROC glad_glFogf;
+#define glFogf glad_glFogf
+GLAD_API_CALL PFNGLFOGFVPROC glad_glFogfv;
+#define glFogfv glad_glFogfv
+GLAD_API_CALL PFNGLFOGIPROC glad_glFogi;
+#define glFogi glad_glFogi
+GLAD_API_CALL PFNGLFOGIVPROC glad_glFogiv;
+#define glFogiv glad_glFogiv
+GLAD_API_CALL PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer;
+#define glFramebufferRenderbuffer glad_glFramebufferRenderbuffer
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture;
+#define glFramebufferTexture glad_glFramebufferTexture
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D;
+#define glFramebufferTexture1D glad_glFramebufferTexture1D
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D;
+#define glFramebufferTexture2D glad_glFramebufferTexture2D
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D;
+#define glFramebufferTexture3D glad_glFramebufferTexture3D
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer;
+#define glFramebufferTextureLayer glad_glFramebufferTextureLayer
+GLAD_API_CALL PFNGLFRONTFACEPROC glad_glFrontFace;
+#define glFrontFace glad_glFrontFace
+GLAD_API_CALL PFNGLFRUSTUMPROC glad_glFrustum;
+#define glFrustum glad_glFrustum
+GLAD_API_CALL PFNGLGENBUFFERSPROC glad_glGenBuffers;
+#define glGenBuffers glad_glGenBuffers
+GLAD_API_CALL PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers;
+#define glGenFramebuffers glad_glGenFramebuffers
+GLAD_API_CALL PFNGLGENLISTSPROC glad_glGenLists;
+#define glGenLists glad_glGenLists
+GLAD_API_CALL PFNGLGENQUERIESPROC glad_glGenQueries;
+#define glGenQueries glad_glGenQueries
+GLAD_API_CALL PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers;
+#define glGenRenderbuffers glad_glGenRenderbuffers
+GLAD_API_CALL PFNGLGENSAMPLERSPROC glad_glGenSamplers;
+#define glGenSamplers glad_glGenSamplers
+GLAD_API_CALL PFNGLGENTEXTURESPROC glad_glGenTextures;
+#define glGenTextures glad_glGenTextures
+GLAD_API_CALL PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays;
+#define glGenVertexArrays glad_glGenVertexArrays
+GLAD_API_CALL PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap;
+#define glGenerateMipmap glad_glGenerateMipmap
+GLAD_API_CALL PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib;
+#define glGetActiveAttrib glad_glGetActiveAttrib
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform;
+#define glGetActiveUniform glad_glGetActiveUniform
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName;
+#define glGetActiveUniformBlockName glad_glGetActiveUniformBlockName
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv;
+#define glGetActiveUniformBlockiv glad_glGetActiveUniformBlockiv
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName;
+#define glGetActiveUniformName glad_glGetActiveUniformName
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv;
+#define glGetActiveUniformsiv glad_glGetActiveUniformsiv
+GLAD_API_CALL PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders;
+#define glGetAttachedShaders glad_glGetAttachedShaders
+GLAD_API_CALL PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation;
+#define glGetAttribLocation glad_glGetAttribLocation
+GLAD_API_CALL PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v;
+#define glGetBooleani_v glad_glGetBooleani_v
+GLAD_API_CALL PFNGLGETBOOLEANVPROC glad_glGetBooleanv;
+#define glGetBooleanv glad_glGetBooleanv
+GLAD_API_CALL PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v;
+#define glGetBufferParameteri64v glad_glGetBufferParameteri64v
+GLAD_API_CALL PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv;
+#define glGetBufferParameteriv glad_glGetBufferParameteriv
+GLAD_API_CALL PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv;
+#define glGetBufferPointerv glad_glGetBufferPointerv
+GLAD_API_CALL PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData;
+#define glGetBufferSubData glad_glGetBufferSubData
+GLAD_API_CALL PFNGLGETCLIPPLANEPROC glad_glGetClipPlane;
+#define glGetClipPlane glad_glGetClipPlane
+GLAD_API_CALL PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage;
+#define glGetCompressedTexImage glad_glGetCompressedTexImage
+GLAD_API_CALL PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog;
+#define glGetDebugMessageLog glad_glGetDebugMessageLog
+GLAD_API_CALL PFNGLGETDOUBLEVPROC glad_glGetDoublev;
+#define glGetDoublev glad_glGetDoublev
+GLAD_API_CALL PFNGLGETERRORPROC glad_glGetError;
+#define glGetError glad_glGetError
+GLAD_API_CALL PFNGLGETFLOATVPROC glad_glGetFloatv;
+#define glGetFloatv glad_glGetFloatv
+GLAD_API_CALL PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex;
+#define glGetFragDataIndex glad_glGetFragDataIndex
+GLAD_API_CALL PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation;
+#define glGetFragDataLocation glad_glGetFragDataLocation
+GLAD_API_CALL PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv;
+#define glGetFramebufferAttachmentParameteriv glad_glGetFramebufferAttachmentParameteriv
+GLAD_API_CALL PFNGLGETGRAPHICSRESETSTATUSARBPROC glad_glGetGraphicsResetStatusARB;
+#define glGetGraphicsResetStatusARB glad_glGetGraphicsResetStatusARB
+GLAD_API_CALL PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v;
+#define glGetInteger64i_v glad_glGetInteger64i_v
+GLAD_API_CALL PFNGLGETINTEGER64VPROC glad_glGetInteger64v;
+#define glGetInteger64v glad_glGetInteger64v
+GLAD_API_CALL PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v;
+#define glGetIntegeri_v glad_glGetIntegeri_v
+GLAD_API_CALL PFNGLGETINTEGERVPROC glad_glGetIntegerv;
+#define glGetIntegerv glad_glGetIntegerv
+GLAD_API_CALL PFNGLGETLIGHTFVPROC glad_glGetLightfv;
+#define glGetLightfv glad_glGetLightfv
+GLAD_API_CALL PFNGLGETLIGHTIVPROC glad_glGetLightiv;
+#define glGetLightiv glad_glGetLightiv
+GLAD_API_CALL PFNGLGETMAPDVPROC glad_glGetMapdv;
+#define glGetMapdv glad_glGetMapdv
+GLAD_API_CALL PFNGLGETMAPFVPROC glad_glGetMapfv;
+#define glGetMapfv glad_glGetMapfv
+GLAD_API_CALL PFNGLGETMAPIVPROC glad_glGetMapiv;
+#define glGetMapiv glad_glGetMapiv
+GLAD_API_CALL PFNGLGETMATERIALFVPROC glad_glGetMaterialfv;
+#define glGetMaterialfv glad_glGetMaterialfv
+GLAD_API_CALL PFNGLGETMATERIALIVPROC glad_glGetMaterialiv;
+#define glGetMaterialiv glad_glGetMaterialiv
+GLAD_API_CALL PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv;
+#define glGetMultisamplefv glad_glGetMultisamplefv
+GLAD_API_CALL PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel;
+#define glGetObjectLabel glad_glGetObjectLabel
+GLAD_API_CALL PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel;
+#define glGetObjectPtrLabel glad_glGetObjectPtrLabel
+GLAD_API_CALL PFNGLGETPIXELMAPFVPROC glad_glGetPixelMapfv;
+#define glGetPixelMapfv glad_glGetPixelMapfv
+GLAD_API_CALL PFNGLGETPIXELMAPUIVPROC glad_glGetPixelMapuiv;
+#define glGetPixelMapuiv glad_glGetPixelMapuiv
+GLAD_API_CALL PFNGLGETPIXELMAPUSVPROC glad_glGetPixelMapusv;
+#define glGetPixelMapusv glad_glGetPixelMapusv
+GLAD_API_CALL PFNGLGETPOINTERVPROC glad_glGetPointerv;
+#define glGetPointerv glad_glGetPointerv
+GLAD_API_CALL PFNGLGETPOLYGONSTIPPLEPROC glad_glGetPolygonStipple;
+#define glGetPolygonStipple glad_glGetPolygonStipple
+GLAD_API_CALL PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog;
+#define glGetProgramInfoLog glad_glGetProgramInfoLog
+GLAD_API_CALL PFNGLGETPROGRAMIVPROC glad_glGetProgramiv;
+#define glGetProgramiv glad_glGetProgramiv
+GLAD_API_CALL PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v;
+#define glGetQueryObjecti64v glad_glGetQueryObjecti64v
+GLAD_API_CALL PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv;
+#define glGetQueryObjectiv glad_glGetQueryObjectiv
+GLAD_API_CALL PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v;
+#define glGetQueryObjectui64v glad_glGetQueryObjectui64v
+GLAD_API_CALL PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv;
+#define glGetQueryObjectuiv glad_glGetQueryObjectuiv
+GLAD_API_CALL PFNGLGETQUERYIVPROC glad_glGetQueryiv;
+#define glGetQueryiv glad_glGetQueryiv
+GLAD_API_CALL PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv;
+#define glGetRenderbufferParameteriv glad_glGetRenderbufferParameteriv
+GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv;
+#define glGetSamplerParameterIiv glad_glGetSamplerParameterIiv
+GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv;
+#define glGetSamplerParameterIuiv glad_glGetSamplerParameterIuiv
+GLAD_API_CALL PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv;
+#define glGetSamplerParameterfv glad_glGetSamplerParameterfv
+GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv;
+#define glGetSamplerParameteriv glad_glGetSamplerParameteriv
+GLAD_API_CALL PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog;
+#define glGetShaderInfoLog glad_glGetShaderInfoLog
+GLAD_API_CALL PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource;
+#define glGetShaderSource glad_glGetShaderSource
+GLAD_API_CALL PFNGLGETSHADERIVPROC glad_glGetShaderiv;
+#define glGetShaderiv glad_glGetShaderiv
+GLAD_API_CALL PFNGLGETSTRINGPROC glad_glGetString;
+#define glGetString glad_glGetString
+GLAD_API_CALL PFNGLGETSTRINGIPROC glad_glGetStringi;
+#define glGetStringi glad_glGetStringi
+GLAD_API_CALL PFNGLGETSYNCIVPROC glad_glGetSynciv;
+#define glGetSynciv glad_glGetSynciv
+GLAD_API_CALL PFNGLGETTEXENVFVPROC glad_glGetTexEnvfv;
+#define glGetTexEnvfv glad_glGetTexEnvfv
+GLAD_API_CALL PFNGLGETTEXENVIVPROC glad_glGetTexEnviv;
+#define glGetTexEnviv glad_glGetTexEnviv
+GLAD_API_CALL PFNGLGETTEXGENDVPROC glad_glGetTexGendv;
+#define glGetTexGendv glad_glGetTexGendv
+GLAD_API_CALL PFNGLGETTEXGENFVPROC glad_glGetTexGenfv;
+#define glGetTexGenfv glad_glGetTexGenfv
+GLAD_API_CALL PFNGLGETTEXGENIVPROC glad_glGetTexGeniv;
+#define glGetTexGeniv glad_glGetTexGeniv
+GLAD_API_CALL PFNGLGETTEXIMAGEPROC glad_glGetTexImage;
+#define glGetTexImage glad_glGetTexImage
+GLAD_API_CALL PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv;
+#define glGetTexLevelParameterfv glad_glGetTexLevelParameterfv
+GLAD_API_CALL PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv;
+#define glGetTexLevelParameteriv glad_glGetTexLevelParameteriv
+GLAD_API_CALL PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv;
+#define glGetTexParameterIiv glad_glGetTexParameterIiv
+GLAD_API_CALL PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv;
+#define glGetTexParameterIuiv glad_glGetTexParameterIuiv
+GLAD_API_CALL PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv;
+#define glGetTexParameterfv glad_glGetTexParameterfv
+GLAD_API_CALL PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv;
+#define glGetTexParameteriv glad_glGetTexParameteriv
+GLAD_API_CALL PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying;
+#define glGetTransformFeedbackVarying glad_glGetTransformFeedbackVarying
+GLAD_API_CALL PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex;
+#define glGetUniformBlockIndex glad_glGetUniformBlockIndex
+GLAD_API_CALL PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices;
+#define glGetUniformIndices glad_glGetUniformIndices
+GLAD_API_CALL PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation;
+#define glGetUniformLocation glad_glGetUniformLocation
+GLAD_API_CALL PFNGLGETUNIFORMFVPROC glad_glGetUniformfv;
+#define glGetUniformfv glad_glGetUniformfv
+GLAD_API_CALL PFNGLGETUNIFORMIVPROC glad_glGetUniformiv;
+#define glGetUniformiv glad_glGetUniformiv
+GLAD_API_CALL PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv;
+#define glGetUniformuiv glad_glGetUniformuiv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv;
+#define glGetVertexAttribIiv glad_glGetVertexAttribIiv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv;
+#define glGetVertexAttribIuiv glad_glGetVertexAttribIuiv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv;
+#define glGetVertexAttribPointerv glad_glGetVertexAttribPointerv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv;
+#define glGetVertexAttribdv glad_glGetVertexAttribdv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv;
+#define glGetVertexAttribfv glad_glGetVertexAttribfv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv;
+#define glGetVertexAttribiv glad_glGetVertexAttribiv
+GLAD_API_CALL PFNGLGETNCOLORTABLEARBPROC glad_glGetnColorTableARB;
+#define glGetnColorTableARB glad_glGetnColorTableARB
+GLAD_API_CALL PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC glad_glGetnCompressedTexImageARB;
+#define glGetnCompressedTexImageARB glad_glGetnCompressedTexImageARB
+GLAD_API_CALL PFNGLGETNCONVOLUTIONFILTERARBPROC glad_glGetnConvolutionFilterARB;
+#define glGetnConvolutionFilterARB glad_glGetnConvolutionFilterARB
+GLAD_API_CALL PFNGLGETNHISTOGRAMARBPROC glad_glGetnHistogramARB;
+#define glGetnHistogramARB glad_glGetnHistogramARB
+GLAD_API_CALL PFNGLGETNMAPDVARBPROC glad_glGetnMapdvARB;
+#define glGetnMapdvARB glad_glGetnMapdvARB
+GLAD_API_CALL PFNGLGETNMAPFVARBPROC glad_glGetnMapfvARB;
+#define glGetnMapfvARB glad_glGetnMapfvARB
+GLAD_API_CALL PFNGLGETNMAPIVARBPROC glad_glGetnMapivARB;
+#define glGetnMapivARB glad_glGetnMapivARB
+GLAD_API_CALL PFNGLGETNMINMAXARBPROC glad_glGetnMinmaxARB;
+#define glGetnMinmaxARB glad_glGetnMinmaxARB
+GLAD_API_CALL PFNGLGETNPIXELMAPFVARBPROC glad_glGetnPixelMapfvARB;
+#define glGetnPixelMapfvARB glad_glGetnPixelMapfvARB
+GLAD_API_CALL PFNGLGETNPIXELMAPUIVARBPROC glad_glGetnPixelMapuivARB;
+#define glGetnPixelMapuivARB glad_glGetnPixelMapuivARB
+GLAD_API_CALL PFNGLGETNPIXELMAPUSVARBPROC glad_glGetnPixelMapusvARB;
+#define glGetnPixelMapusvARB glad_glGetnPixelMapusvARB
+GLAD_API_CALL PFNGLGETNPOLYGONSTIPPLEARBPROC glad_glGetnPolygonStippleARB;
+#define glGetnPolygonStippleARB glad_glGetnPolygonStippleARB
+GLAD_API_CALL PFNGLGETNSEPARABLEFILTERARBPROC glad_glGetnSeparableFilterARB;
+#define glGetnSeparableFilterARB glad_glGetnSeparableFilterARB
+GLAD_API_CALL PFNGLGETNTEXIMAGEARBPROC glad_glGetnTexImageARB;
+#define glGetnTexImageARB glad_glGetnTexImageARB
+GLAD_API_CALL PFNGLGETNUNIFORMDVARBPROC glad_glGetnUniformdvARB;
+#define glGetnUniformdvARB glad_glGetnUniformdvARB
+GLAD_API_CALL PFNGLGETNUNIFORMFVARBPROC glad_glGetnUniformfvARB;
+#define glGetnUniformfvARB glad_glGetnUniformfvARB
+GLAD_API_CALL PFNGLGETNUNIFORMIVARBPROC glad_glGetnUniformivARB;
+#define glGetnUniformivARB glad_glGetnUniformivARB
+GLAD_API_CALL PFNGLGETNUNIFORMUIVARBPROC glad_glGetnUniformuivARB;
+#define glGetnUniformuivARB glad_glGetnUniformuivARB
+GLAD_API_CALL PFNGLHINTPROC glad_glHint;
+#define glHint glad_glHint
+GLAD_API_CALL PFNGLINDEXMASKPROC glad_glIndexMask;
+#define glIndexMask glad_glIndexMask
+GLAD_API_CALL PFNGLINDEXPOINTERPROC glad_glIndexPointer;
+#define glIndexPointer glad_glIndexPointer
+GLAD_API_CALL PFNGLINDEXDPROC glad_glIndexd;
+#define glIndexd glad_glIndexd
+GLAD_API_CALL PFNGLINDEXDVPROC glad_glIndexdv;
+#define glIndexdv glad_glIndexdv
+GLAD_API_CALL PFNGLINDEXFPROC glad_glIndexf;
+#define glIndexf glad_glIndexf
+GLAD_API_CALL PFNGLINDEXFVPROC glad_glIndexfv;
+#define glIndexfv glad_glIndexfv
+GLAD_API_CALL PFNGLINDEXIPROC glad_glIndexi;
+#define glIndexi glad_glIndexi
+GLAD_API_CALL PFNGLINDEXIVPROC glad_glIndexiv;
+#define glIndexiv glad_glIndexiv
+GLAD_API_CALL PFNGLINDEXSPROC glad_glIndexs;
+#define glIndexs glad_glIndexs
+GLAD_API_CALL PFNGLINDEXSVPROC glad_glIndexsv;
+#define glIndexsv glad_glIndexsv
+GLAD_API_CALL PFNGLINDEXUBPROC glad_glIndexub;
+#define glIndexub glad_glIndexub
+GLAD_API_CALL PFNGLINDEXUBVPROC glad_glIndexubv;
+#define glIndexubv glad_glIndexubv
+GLAD_API_CALL PFNGLINITNAMESPROC glad_glInitNames;
+#define glInitNames glad_glInitNames
+GLAD_API_CALL PFNGLINTERLEAVEDARRAYSPROC glad_glInterleavedArrays;
+#define glInterleavedArrays glad_glInterleavedArrays
+GLAD_API_CALL PFNGLISBUFFERPROC glad_glIsBuffer;
+#define glIsBuffer glad_glIsBuffer
+GLAD_API_CALL PFNGLISENABLEDPROC glad_glIsEnabled;
+#define glIsEnabled glad_glIsEnabled
+GLAD_API_CALL PFNGLISENABLEDIPROC glad_glIsEnabledi;
+#define glIsEnabledi glad_glIsEnabledi
+GLAD_API_CALL PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer;
+#define glIsFramebuffer glad_glIsFramebuffer
+GLAD_API_CALL PFNGLISLISTPROC glad_glIsList;
+#define glIsList glad_glIsList
+GLAD_API_CALL PFNGLISPROGRAMPROC glad_glIsProgram;
+#define glIsProgram glad_glIsProgram
+GLAD_API_CALL PFNGLISQUERYPROC glad_glIsQuery;
+#define glIsQuery glad_glIsQuery
+GLAD_API_CALL PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer;
+#define glIsRenderbuffer glad_glIsRenderbuffer
+GLAD_API_CALL PFNGLISSAMPLERPROC glad_glIsSampler;
+#define glIsSampler glad_glIsSampler
+GLAD_API_CALL PFNGLISSHADERPROC glad_glIsShader;
+#define glIsShader glad_glIsShader
+GLAD_API_CALL PFNGLISSYNCPROC glad_glIsSync;
+#define glIsSync glad_glIsSync
+GLAD_API_CALL PFNGLISTEXTUREPROC glad_glIsTexture;
+#define glIsTexture glad_glIsTexture
+GLAD_API_CALL PFNGLISVERTEXARRAYPROC glad_glIsVertexArray;
+#define glIsVertexArray glad_glIsVertexArray
+GLAD_API_CALL PFNGLLIGHTMODELFPROC glad_glLightModelf;
+#define glLightModelf glad_glLightModelf
+GLAD_API_CALL PFNGLLIGHTMODELFVPROC glad_glLightModelfv;
+#define glLightModelfv glad_glLightModelfv
+GLAD_API_CALL PFNGLLIGHTMODELIPROC glad_glLightModeli;
+#define glLightModeli glad_glLightModeli
+GLAD_API_CALL PFNGLLIGHTMODELIVPROC glad_glLightModeliv;
+#define glLightModeliv glad_glLightModeliv
+GLAD_API_CALL PFNGLLIGHTFPROC glad_glLightf;
+#define glLightf glad_glLightf
+GLAD_API_CALL PFNGLLIGHTFVPROC glad_glLightfv;
+#define glLightfv glad_glLightfv
+GLAD_API_CALL PFNGLLIGHTIPROC glad_glLighti;
+#define glLighti glad_glLighti
+GLAD_API_CALL PFNGLLIGHTIVPROC glad_glLightiv;
+#define glLightiv glad_glLightiv
+GLAD_API_CALL PFNGLLINESTIPPLEPROC glad_glLineStipple;
+#define glLineStipple glad_glLineStipple
+GLAD_API_CALL PFNGLLINEWIDTHPROC glad_glLineWidth;
+#define glLineWidth glad_glLineWidth
+GLAD_API_CALL PFNGLLINKPROGRAMPROC glad_glLinkProgram;
+#define glLinkProgram glad_glLinkProgram
+GLAD_API_CALL PFNGLLISTBASEPROC glad_glListBase;
+#define glListBase glad_glListBase
+GLAD_API_CALL PFNGLLOADIDENTITYPROC glad_glLoadIdentity;
+#define glLoadIdentity glad_glLoadIdentity
+GLAD_API_CALL PFNGLLOADMATRIXDPROC glad_glLoadMatrixd;
+#define glLoadMatrixd glad_glLoadMatrixd
+GLAD_API_CALL PFNGLLOADMATRIXFPROC glad_glLoadMatrixf;
+#define glLoadMatrixf glad_glLoadMatrixf
+GLAD_API_CALL PFNGLLOADNAMEPROC glad_glLoadName;
+#define glLoadName glad_glLoadName
+GLAD_API_CALL PFNGLLOADTRANSPOSEMATRIXDPROC glad_glLoadTransposeMatrixd;
+#define glLoadTransposeMatrixd glad_glLoadTransposeMatrixd
+GLAD_API_CALL PFNGLLOADTRANSPOSEMATRIXFPROC glad_glLoadTransposeMatrixf;
+#define glLoadTransposeMatrixf glad_glLoadTransposeMatrixf
+GLAD_API_CALL PFNGLLOGICOPPROC glad_glLogicOp;
+#define glLogicOp glad_glLogicOp
+GLAD_API_CALL PFNGLMAP1DPROC glad_glMap1d;
+#define glMap1d glad_glMap1d
+GLAD_API_CALL PFNGLMAP1FPROC glad_glMap1f;
+#define glMap1f glad_glMap1f
+GLAD_API_CALL PFNGLMAP2DPROC glad_glMap2d;
+#define glMap2d glad_glMap2d
+GLAD_API_CALL PFNGLMAP2FPROC glad_glMap2f;
+#define glMap2f glad_glMap2f
+GLAD_API_CALL PFNGLMAPBUFFERPROC glad_glMapBuffer;
+#define glMapBuffer glad_glMapBuffer
+GLAD_API_CALL PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange;
+#define glMapBufferRange glad_glMapBufferRange
+GLAD_API_CALL PFNGLMAPGRID1DPROC glad_glMapGrid1d;
+#define glMapGrid1d glad_glMapGrid1d
+GLAD_API_CALL PFNGLMAPGRID1FPROC glad_glMapGrid1f;
+#define glMapGrid1f glad_glMapGrid1f
+GLAD_API_CALL PFNGLMAPGRID2DPROC glad_glMapGrid2d;
+#define glMapGrid2d glad_glMapGrid2d
+GLAD_API_CALL PFNGLMAPGRID2FPROC glad_glMapGrid2f;
+#define glMapGrid2f glad_glMapGrid2f
+GLAD_API_CALL PFNGLMATERIALFPROC glad_glMaterialf;
+#define glMaterialf glad_glMaterialf
+GLAD_API_CALL PFNGLMATERIALFVPROC glad_glMaterialfv;
+#define glMaterialfv glad_glMaterialfv
+GLAD_API_CALL PFNGLMATERIALIPROC glad_glMateriali;
+#define glMateriali glad_glMateriali
+GLAD_API_CALL PFNGLMATERIALIVPROC glad_glMaterialiv;
+#define glMaterialiv glad_glMaterialiv
+GLAD_API_CALL PFNGLMATRIXMODEPROC glad_glMatrixMode;
+#define glMatrixMode glad_glMatrixMode
+GLAD_API_CALL PFNGLMULTMATRIXDPROC glad_glMultMatrixd;
+#define glMultMatrixd glad_glMultMatrixd
+GLAD_API_CALL PFNGLMULTMATRIXFPROC glad_glMultMatrixf;
+#define glMultMatrixf glad_glMultMatrixf
+GLAD_API_CALL PFNGLMULTTRANSPOSEMATRIXDPROC glad_glMultTransposeMatrixd;
+#define glMultTransposeMatrixd glad_glMultTransposeMatrixd
+GLAD_API_CALL PFNGLMULTTRANSPOSEMATRIXFPROC glad_glMultTransposeMatrixf;
+#define glMultTransposeMatrixf glad_glMultTransposeMatrixf
+GLAD_API_CALL PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays;
+#define glMultiDrawArrays glad_glMultiDrawArrays
+GLAD_API_CALL PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements;
+#define glMultiDrawElements glad_glMultiDrawElements
+GLAD_API_CALL PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex;
+#define glMultiDrawElementsBaseVertex glad_glMultiDrawElementsBaseVertex
+GLAD_API_CALL PFNGLMULTITEXCOORD1DPROC glad_glMultiTexCoord1d;
+#define glMultiTexCoord1d glad_glMultiTexCoord1d
+GLAD_API_CALL PFNGLMULTITEXCOORD1DVPROC glad_glMultiTexCoord1dv;
+#define glMultiTexCoord1dv glad_glMultiTexCoord1dv
+GLAD_API_CALL PFNGLMULTITEXCOORD1FPROC glad_glMultiTexCoord1f;
+#define glMultiTexCoord1f glad_glMultiTexCoord1f
+GLAD_API_CALL PFNGLMULTITEXCOORD1FVPROC glad_glMultiTexCoord1fv;
+#define glMultiTexCoord1fv glad_glMultiTexCoord1fv
+GLAD_API_CALL PFNGLMULTITEXCOORD1IPROC glad_glMultiTexCoord1i;
+#define glMultiTexCoord1i glad_glMultiTexCoord1i
+GLAD_API_CALL PFNGLMULTITEXCOORD1IVPROC glad_glMultiTexCoord1iv;
+#define glMultiTexCoord1iv glad_glMultiTexCoord1iv
+GLAD_API_CALL PFNGLMULTITEXCOORD1SPROC glad_glMultiTexCoord1s;
+#define glMultiTexCoord1s glad_glMultiTexCoord1s
+GLAD_API_CALL PFNGLMULTITEXCOORD1SVPROC glad_glMultiTexCoord1sv;
+#define glMultiTexCoord1sv glad_glMultiTexCoord1sv
+GLAD_API_CALL PFNGLMULTITEXCOORD2DPROC glad_glMultiTexCoord2d;
+#define glMultiTexCoord2d glad_glMultiTexCoord2d
+GLAD_API_CALL PFNGLMULTITEXCOORD2DVPROC glad_glMultiTexCoord2dv;
+#define glMultiTexCoord2dv glad_glMultiTexCoord2dv
+GLAD_API_CALL PFNGLMULTITEXCOORD2FPROC glad_glMultiTexCoord2f;
+#define glMultiTexCoord2f glad_glMultiTexCoord2f
+GLAD_API_CALL PFNGLMULTITEXCOORD2FVPROC glad_glMultiTexCoord2fv;
+#define glMultiTexCoord2fv glad_glMultiTexCoord2fv
+GLAD_API_CALL PFNGLMULTITEXCOORD2IPROC glad_glMultiTexCoord2i;
+#define glMultiTexCoord2i glad_glMultiTexCoord2i
+GLAD_API_CALL PFNGLMULTITEXCOORD2IVPROC glad_glMultiTexCoord2iv;
+#define glMultiTexCoord2iv glad_glMultiTexCoord2iv
+GLAD_API_CALL PFNGLMULTITEXCOORD2SPROC glad_glMultiTexCoord2s;
+#define glMultiTexCoord2s glad_glMultiTexCoord2s
+GLAD_API_CALL PFNGLMULTITEXCOORD2SVPROC glad_glMultiTexCoord2sv;
+#define glMultiTexCoord2sv glad_glMultiTexCoord2sv
+GLAD_API_CALL PFNGLMULTITEXCOORD3DPROC glad_glMultiTexCoord3d;
+#define glMultiTexCoord3d glad_glMultiTexCoord3d
+GLAD_API_CALL PFNGLMULTITEXCOORD3DVPROC glad_glMultiTexCoord3dv;
+#define glMultiTexCoord3dv glad_glMultiTexCoord3dv
+GLAD_API_CALL PFNGLMULTITEXCOORD3FPROC glad_glMultiTexCoord3f;
+#define glMultiTexCoord3f glad_glMultiTexCoord3f
+GLAD_API_CALL PFNGLMULTITEXCOORD3FVPROC glad_glMultiTexCoord3fv;
+#define glMultiTexCoord3fv glad_glMultiTexCoord3fv
+GLAD_API_CALL PFNGLMULTITEXCOORD3IPROC glad_glMultiTexCoord3i;
+#define glMultiTexCoord3i glad_glMultiTexCoord3i
+GLAD_API_CALL PFNGLMULTITEXCOORD3IVPROC glad_glMultiTexCoord3iv;
+#define glMultiTexCoord3iv glad_glMultiTexCoord3iv
+GLAD_API_CALL PFNGLMULTITEXCOORD3SPROC glad_glMultiTexCoord3s;
+#define glMultiTexCoord3s glad_glMultiTexCoord3s
+GLAD_API_CALL PFNGLMULTITEXCOORD3SVPROC glad_glMultiTexCoord3sv;
+#define glMultiTexCoord3sv glad_glMultiTexCoord3sv
+GLAD_API_CALL PFNGLMULTITEXCOORD4DPROC glad_glMultiTexCoord4d;
+#define glMultiTexCoord4d glad_glMultiTexCoord4d
+GLAD_API_CALL PFNGLMULTITEXCOORD4DVPROC glad_glMultiTexCoord4dv;
+#define glMultiTexCoord4dv glad_glMultiTexCoord4dv
+GLAD_API_CALL PFNGLMULTITEXCOORD4FPROC glad_glMultiTexCoord4f;
+#define glMultiTexCoord4f glad_glMultiTexCoord4f
+GLAD_API_CALL PFNGLMULTITEXCOORD4FVPROC glad_glMultiTexCoord4fv;
+#define glMultiTexCoord4fv glad_glMultiTexCoord4fv
+GLAD_API_CALL PFNGLMULTITEXCOORD4IPROC glad_glMultiTexCoord4i;
+#define glMultiTexCoord4i glad_glMultiTexCoord4i
+GLAD_API_CALL PFNGLMULTITEXCOORD4IVPROC glad_glMultiTexCoord4iv;
+#define glMultiTexCoord4iv glad_glMultiTexCoord4iv
+GLAD_API_CALL PFNGLMULTITEXCOORD4SPROC glad_glMultiTexCoord4s;
+#define glMultiTexCoord4s glad_glMultiTexCoord4s
+GLAD_API_CALL PFNGLMULTITEXCOORD4SVPROC glad_glMultiTexCoord4sv;
+#define glMultiTexCoord4sv glad_glMultiTexCoord4sv
+GLAD_API_CALL PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui;
+#define glMultiTexCoordP1ui glad_glMultiTexCoordP1ui
+GLAD_API_CALL PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv;
+#define glMultiTexCoordP1uiv glad_glMultiTexCoordP1uiv
+GLAD_API_CALL PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui;
+#define glMultiTexCoordP2ui glad_glMultiTexCoordP2ui
+GLAD_API_CALL PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv;
+#define glMultiTexCoordP2uiv glad_glMultiTexCoordP2uiv
+GLAD_API_CALL PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui;
+#define glMultiTexCoordP3ui glad_glMultiTexCoordP3ui
+GLAD_API_CALL PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv;
+#define glMultiTexCoordP3uiv glad_glMultiTexCoordP3uiv
+GLAD_API_CALL PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui;
+#define glMultiTexCoordP4ui glad_glMultiTexCoordP4ui
+GLAD_API_CALL PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv;
+#define glMultiTexCoordP4uiv glad_glMultiTexCoordP4uiv
+GLAD_API_CALL PFNGLNEWLISTPROC glad_glNewList;
+#define glNewList glad_glNewList
+GLAD_API_CALL PFNGLNORMAL3BPROC glad_glNormal3b;
+#define glNormal3b glad_glNormal3b
+GLAD_API_CALL PFNGLNORMAL3BVPROC glad_glNormal3bv;
+#define glNormal3bv glad_glNormal3bv
+GLAD_API_CALL PFNGLNORMAL3DPROC glad_glNormal3d;
+#define glNormal3d glad_glNormal3d
+GLAD_API_CALL PFNGLNORMAL3DVPROC glad_glNormal3dv;
+#define glNormal3dv glad_glNormal3dv
+GLAD_API_CALL PFNGLNORMAL3FPROC glad_glNormal3f;
+#define glNormal3f glad_glNormal3f
+GLAD_API_CALL PFNGLNORMAL3FVPROC glad_glNormal3fv;
+#define glNormal3fv glad_glNormal3fv
+GLAD_API_CALL PFNGLNORMAL3IPROC glad_glNormal3i;
+#define glNormal3i glad_glNormal3i
+GLAD_API_CALL PFNGLNORMAL3IVPROC glad_glNormal3iv;
+#define glNormal3iv glad_glNormal3iv
+GLAD_API_CALL PFNGLNORMAL3SPROC glad_glNormal3s;
+#define glNormal3s glad_glNormal3s
+GLAD_API_CALL PFNGLNORMAL3SVPROC glad_glNormal3sv;
+#define glNormal3sv glad_glNormal3sv
+GLAD_API_CALL PFNGLNORMALP3UIPROC glad_glNormalP3ui;
+#define glNormalP3ui glad_glNormalP3ui
+GLAD_API_CALL PFNGLNORMALP3UIVPROC glad_glNormalP3uiv;
+#define glNormalP3uiv glad_glNormalP3uiv
+GLAD_API_CALL PFNGLNORMALPOINTERPROC glad_glNormalPointer;
+#define glNormalPointer glad_glNormalPointer
+GLAD_API_CALL PFNGLOBJECTLABELPROC glad_glObjectLabel;
+#define glObjectLabel glad_glObjectLabel
+GLAD_API_CALL PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel;
+#define glObjectPtrLabel glad_glObjectPtrLabel
+GLAD_API_CALL PFNGLORTHOPROC glad_glOrtho;
+#define glOrtho glad_glOrtho
+GLAD_API_CALL PFNGLPASSTHROUGHPROC glad_glPassThrough;
+#define glPassThrough glad_glPassThrough
+GLAD_API_CALL PFNGLPIXELMAPFVPROC glad_glPixelMapfv;
+#define glPixelMapfv glad_glPixelMapfv
+GLAD_API_CALL PFNGLPIXELMAPUIVPROC glad_glPixelMapuiv;
+#define glPixelMapuiv glad_glPixelMapuiv
+GLAD_API_CALL PFNGLPIXELMAPUSVPROC glad_glPixelMapusv;
+#define glPixelMapusv glad_glPixelMapusv
+GLAD_API_CALL PFNGLPIXELSTOREFPROC glad_glPixelStoref;
+#define glPixelStoref glad_glPixelStoref
+GLAD_API_CALL PFNGLPIXELSTOREIPROC glad_glPixelStorei;
+#define glPixelStorei glad_glPixelStorei
+GLAD_API_CALL PFNGLPIXELTRANSFERFPROC glad_glPixelTransferf;
+#define glPixelTransferf glad_glPixelTransferf
+GLAD_API_CALL PFNGLPIXELTRANSFERIPROC glad_glPixelTransferi;
+#define glPixelTransferi glad_glPixelTransferi
+GLAD_API_CALL PFNGLPIXELZOOMPROC glad_glPixelZoom;
+#define glPixelZoom glad_glPixelZoom
+GLAD_API_CALL PFNGLPOINTPARAMETERFPROC glad_glPointParameterf;
+#define glPointParameterf glad_glPointParameterf
+GLAD_API_CALL PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv;
+#define glPointParameterfv glad_glPointParameterfv
+GLAD_API_CALL PFNGLPOINTPARAMETERIPROC glad_glPointParameteri;
+#define glPointParameteri glad_glPointParameteri
+GLAD_API_CALL PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv;
+#define glPointParameteriv glad_glPointParameteriv
+GLAD_API_CALL PFNGLPOINTSIZEPROC glad_glPointSize;
+#define glPointSize glad_glPointSize
+GLAD_API_CALL PFNGLPOLYGONMODEPROC glad_glPolygonMode;
+#define glPolygonMode glad_glPolygonMode
+GLAD_API_CALL PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset;
+#define glPolygonOffset glad_glPolygonOffset
+GLAD_API_CALL PFNGLPOLYGONSTIPPLEPROC glad_glPolygonStipple;
+#define glPolygonStipple glad_glPolygonStipple
+GLAD_API_CALL PFNGLPOPATTRIBPROC glad_glPopAttrib;
+#define glPopAttrib glad_glPopAttrib
+GLAD_API_CALL PFNGLPOPCLIENTATTRIBPROC glad_glPopClientAttrib;
+#define glPopClientAttrib glad_glPopClientAttrib
+GLAD_API_CALL PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup;
+#define glPopDebugGroup glad_glPopDebugGroup
+GLAD_API_CALL PFNGLPOPMATRIXPROC glad_glPopMatrix;
+#define glPopMatrix glad_glPopMatrix
+GLAD_API_CALL PFNGLPOPNAMEPROC glad_glPopName;
+#define glPopName glad_glPopName
+GLAD_API_CALL PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex;
+#define glPrimitiveRestartIndex glad_glPrimitiveRestartIndex
+GLAD_API_CALL PFNGLPRIORITIZETEXTURESPROC glad_glPrioritizeTextures;
+#define glPrioritizeTextures glad_glPrioritizeTextures
+GLAD_API_CALL PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex;
+#define glProvokingVertex glad_glProvokingVertex
+GLAD_API_CALL PFNGLPUSHATTRIBPROC glad_glPushAttrib;
+#define glPushAttrib glad_glPushAttrib
+GLAD_API_CALL PFNGLPUSHCLIENTATTRIBPROC glad_glPushClientAttrib;
+#define glPushClientAttrib glad_glPushClientAttrib
+GLAD_API_CALL PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup;
+#define glPushDebugGroup glad_glPushDebugGroup
+GLAD_API_CALL PFNGLPUSHMATRIXPROC glad_glPushMatrix;
+#define glPushMatrix glad_glPushMatrix
+GLAD_API_CALL PFNGLPUSHNAMEPROC glad_glPushName;
+#define glPushName glad_glPushName
+GLAD_API_CALL PFNGLQUERYCOUNTERPROC glad_glQueryCounter;
+#define glQueryCounter glad_glQueryCounter
+GLAD_API_CALL PFNGLRASTERPOS2DPROC glad_glRasterPos2d;
+#define glRasterPos2d glad_glRasterPos2d
+GLAD_API_CALL PFNGLRASTERPOS2DVPROC glad_glRasterPos2dv;
+#define glRasterPos2dv glad_glRasterPos2dv
+GLAD_API_CALL PFNGLRASTERPOS2FPROC glad_glRasterPos2f;
+#define glRasterPos2f glad_glRasterPos2f
+GLAD_API_CALL PFNGLRASTERPOS2FVPROC glad_glRasterPos2fv;
+#define glRasterPos2fv glad_glRasterPos2fv
+GLAD_API_CALL PFNGLRASTERPOS2IPROC glad_glRasterPos2i;
+#define glRasterPos2i glad_glRasterPos2i
+GLAD_API_CALL PFNGLRASTERPOS2IVPROC glad_glRasterPos2iv;
+#define glRasterPos2iv glad_glRasterPos2iv
+GLAD_API_CALL PFNGLRASTERPOS2SPROC glad_glRasterPos2s;
+#define glRasterPos2s glad_glRasterPos2s
+GLAD_API_CALL PFNGLRASTERPOS2SVPROC glad_glRasterPos2sv;
+#define glRasterPos2sv glad_glRasterPos2sv
+GLAD_API_CALL PFNGLRASTERPOS3DPROC glad_glRasterPos3d;
+#define glRasterPos3d glad_glRasterPos3d
+GLAD_API_CALL PFNGLRASTERPOS3DVPROC glad_glRasterPos3dv;
+#define glRasterPos3dv glad_glRasterPos3dv
+GLAD_API_CALL PFNGLRASTERPOS3FPROC glad_glRasterPos3f;
+#define glRasterPos3f glad_glRasterPos3f
+GLAD_API_CALL PFNGLRASTERPOS3FVPROC glad_glRasterPos3fv;
+#define glRasterPos3fv glad_glRasterPos3fv
+GLAD_API_CALL PFNGLRASTERPOS3IPROC glad_glRasterPos3i;
+#define glRasterPos3i glad_glRasterPos3i
+GLAD_API_CALL PFNGLRASTERPOS3IVPROC glad_glRasterPos3iv;
+#define glRasterPos3iv glad_glRasterPos3iv
+GLAD_API_CALL PFNGLRASTERPOS3SPROC glad_glRasterPos3s;
+#define glRasterPos3s glad_glRasterPos3s
+GLAD_API_CALL PFNGLRASTERPOS3SVPROC glad_glRasterPos3sv;
+#define glRasterPos3sv glad_glRasterPos3sv
+GLAD_API_CALL PFNGLRASTERPOS4DPROC glad_glRasterPos4d;
+#define glRasterPos4d glad_glRasterPos4d
+GLAD_API_CALL PFNGLRASTERPOS4DVPROC glad_glRasterPos4dv;
+#define glRasterPos4dv glad_glRasterPos4dv
+GLAD_API_CALL PFNGLRASTERPOS4FPROC glad_glRasterPos4f;
+#define glRasterPos4f glad_glRasterPos4f
+GLAD_API_CALL PFNGLRASTERPOS4FVPROC glad_glRasterPos4fv;
+#define glRasterPos4fv glad_glRasterPos4fv
+GLAD_API_CALL PFNGLRASTERPOS4IPROC glad_glRasterPos4i;
+#define glRasterPos4i glad_glRasterPos4i
+GLAD_API_CALL PFNGLRASTERPOS4IVPROC glad_glRasterPos4iv;
+#define glRasterPos4iv glad_glRasterPos4iv
+GLAD_API_CALL PFNGLRASTERPOS4SPROC glad_glRasterPos4s;
+#define glRasterPos4s glad_glRasterPos4s
+GLAD_API_CALL PFNGLRASTERPOS4SVPROC glad_glRasterPos4sv;
+#define glRasterPos4sv glad_glRasterPos4sv
+GLAD_API_CALL PFNGLREADBUFFERPROC glad_glReadBuffer;
+#define glReadBuffer glad_glReadBuffer
+GLAD_API_CALL PFNGLREADPIXELSPROC glad_glReadPixels;
+#define glReadPixels glad_glReadPixels
+GLAD_API_CALL PFNGLREADNPIXELSARBPROC glad_glReadnPixelsARB;
+#define glReadnPixelsARB glad_glReadnPixelsARB
+GLAD_API_CALL PFNGLRECTDPROC glad_glRectd;
+#define glRectd glad_glRectd
+GLAD_API_CALL PFNGLRECTDVPROC glad_glRectdv;
+#define glRectdv glad_glRectdv
+GLAD_API_CALL PFNGLRECTFPROC glad_glRectf;
+#define glRectf glad_glRectf
+GLAD_API_CALL PFNGLRECTFVPROC glad_glRectfv;
+#define glRectfv glad_glRectfv
+GLAD_API_CALL PFNGLRECTIPROC glad_glRecti;
+#define glRecti glad_glRecti
+GLAD_API_CALL PFNGLRECTIVPROC glad_glRectiv;
+#define glRectiv glad_glRectiv
+GLAD_API_CALL PFNGLRECTSPROC glad_glRects;
+#define glRects glad_glRects
+GLAD_API_CALL PFNGLRECTSVPROC glad_glRectsv;
+#define glRectsv glad_glRectsv
+GLAD_API_CALL PFNGLRENDERMODEPROC glad_glRenderMode;
+#define glRenderMode glad_glRenderMode
+GLAD_API_CALL PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage;
+#define glRenderbufferStorage glad_glRenderbufferStorage
+GLAD_API_CALL PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample;
+#define glRenderbufferStorageMultisample glad_glRenderbufferStorageMultisample
+GLAD_API_CALL PFNGLROTATEDPROC glad_glRotated;
+#define glRotated glad_glRotated
+GLAD_API_CALL PFNGLROTATEFPROC glad_glRotatef;
+#define glRotatef glad_glRotatef
+GLAD_API_CALL PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage;
+#define glSampleCoverage glad_glSampleCoverage
+GLAD_API_CALL PFNGLSAMPLECOVERAGEARBPROC glad_glSampleCoverageARB;
+#define glSampleCoverageARB glad_glSampleCoverageARB
+GLAD_API_CALL PFNGLSAMPLEMASKIPROC glad_glSampleMaski;
+#define glSampleMaski glad_glSampleMaski
+GLAD_API_CALL PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv;
+#define glSamplerParameterIiv glad_glSamplerParameterIiv
+GLAD_API_CALL PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv;
+#define glSamplerParameterIuiv glad_glSamplerParameterIuiv
+GLAD_API_CALL PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf;
+#define glSamplerParameterf glad_glSamplerParameterf
+GLAD_API_CALL PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv;
+#define glSamplerParameterfv glad_glSamplerParameterfv
+GLAD_API_CALL PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri;
+#define glSamplerParameteri glad_glSamplerParameteri
+GLAD_API_CALL PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv;
+#define glSamplerParameteriv glad_glSamplerParameteriv
+GLAD_API_CALL PFNGLSCALEDPROC glad_glScaled;
+#define glScaled glad_glScaled
+GLAD_API_CALL PFNGLSCALEFPROC glad_glScalef;
+#define glScalef glad_glScalef
+GLAD_API_CALL PFNGLSCISSORPROC glad_glScissor;
+#define glScissor glad_glScissor
+GLAD_API_CALL PFNGLSECONDARYCOLOR3BPROC glad_glSecondaryColor3b;
+#define glSecondaryColor3b glad_glSecondaryColor3b
+GLAD_API_CALL PFNGLSECONDARYCOLOR3BVPROC glad_glSecondaryColor3bv;
+#define glSecondaryColor3bv glad_glSecondaryColor3bv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3DPROC glad_glSecondaryColor3d;
+#define glSecondaryColor3d glad_glSecondaryColor3d
+GLAD_API_CALL PFNGLSECONDARYCOLOR3DVPROC glad_glSecondaryColor3dv;
+#define glSecondaryColor3dv glad_glSecondaryColor3dv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3FPROC glad_glSecondaryColor3f;
+#define glSecondaryColor3f glad_glSecondaryColor3f
+GLAD_API_CALL PFNGLSECONDARYCOLOR3FVPROC glad_glSecondaryColor3fv;
+#define glSecondaryColor3fv glad_glSecondaryColor3fv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3IPROC glad_glSecondaryColor3i;
+#define glSecondaryColor3i glad_glSecondaryColor3i
+GLAD_API_CALL PFNGLSECONDARYCOLOR3IVPROC glad_glSecondaryColor3iv;
+#define glSecondaryColor3iv glad_glSecondaryColor3iv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3SPROC glad_glSecondaryColor3s;
+#define glSecondaryColor3s glad_glSecondaryColor3s
+GLAD_API_CALL PFNGLSECONDARYCOLOR3SVPROC glad_glSecondaryColor3sv;
+#define glSecondaryColor3sv glad_glSecondaryColor3sv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3UBPROC glad_glSecondaryColor3ub;
+#define glSecondaryColor3ub glad_glSecondaryColor3ub
+GLAD_API_CALL PFNGLSECONDARYCOLOR3UBVPROC glad_glSecondaryColor3ubv;
+#define glSecondaryColor3ubv glad_glSecondaryColor3ubv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3UIPROC glad_glSecondaryColor3ui;
+#define glSecondaryColor3ui glad_glSecondaryColor3ui
+GLAD_API_CALL PFNGLSECONDARYCOLOR3UIVPROC glad_glSecondaryColor3uiv;
+#define glSecondaryColor3uiv glad_glSecondaryColor3uiv
+GLAD_API_CALL PFNGLSECONDARYCOLOR3USPROC glad_glSecondaryColor3us;
+#define glSecondaryColor3us glad_glSecondaryColor3us
+GLAD_API_CALL PFNGLSECONDARYCOLOR3USVPROC glad_glSecondaryColor3usv;
+#define glSecondaryColor3usv glad_glSecondaryColor3usv
+GLAD_API_CALL PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui;
+#define glSecondaryColorP3ui glad_glSecondaryColorP3ui
+GLAD_API_CALL PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv;
+#define glSecondaryColorP3uiv glad_glSecondaryColorP3uiv
+GLAD_API_CALL PFNGLSECONDARYCOLORPOINTERPROC glad_glSecondaryColorPointer;
+#define glSecondaryColorPointer glad_glSecondaryColorPointer
+GLAD_API_CALL PFNGLSELECTBUFFERPROC glad_glSelectBuffer;
+#define glSelectBuffer glad_glSelectBuffer
+GLAD_API_CALL PFNGLSHADEMODELPROC glad_glShadeModel;
+#define glShadeModel glad_glShadeModel
+GLAD_API_CALL PFNGLSHADERSOURCEPROC glad_glShaderSource;
+#define glShaderSource glad_glShaderSource
+GLAD_API_CALL PFNGLSTENCILFUNCPROC glad_glStencilFunc;
+#define glStencilFunc glad_glStencilFunc
+GLAD_API_CALL PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate;
+#define glStencilFuncSeparate glad_glStencilFuncSeparate
+GLAD_API_CALL PFNGLSTENCILMASKPROC glad_glStencilMask;
+#define glStencilMask glad_glStencilMask
+GLAD_API_CALL PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate;
+#define glStencilMaskSeparate glad_glStencilMaskSeparate
+GLAD_API_CALL PFNGLSTENCILOPPROC glad_glStencilOp;
+#define glStencilOp glad_glStencilOp
+GLAD_API_CALL PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate;
+#define glStencilOpSeparate glad_glStencilOpSeparate
+GLAD_API_CALL PFNGLTEXBUFFERPROC glad_glTexBuffer;
+#define glTexBuffer glad_glTexBuffer
+GLAD_API_CALL PFNGLTEXCOORD1DPROC glad_glTexCoord1d;
+#define glTexCoord1d glad_glTexCoord1d
+GLAD_API_CALL PFNGLTEXCOORD1DVPROC glad_glTexCoord1dv;
+#define glTexCoord1dv glad_glTexCoord1dv
+GLAD_API_CALL PFNGLTEXCOORD1FPROC glad_glTexCoord1f;
+#define glTexCoord1f glad_glTexCoord1f
+GLAD_API_CALL PFNGLTEXCOORD1FVPROC glad_glTexCoord1fv;
+#define glTexCoord1fv glad_glTexCoord1fv
+GLAD_API_CALL PFNGLTEXCOORD1IPROC glad_glTexCoord1i;
+#define glTexCoord1i glad_glTexCoord1i
+GLAD_API_CALL PFNGLTEXCOORD1IVPROC glad_glTexCoord1iv;
+#define glTexCoord1iv glad_glTexCoord1iv
+GLAD_API_CALL PFNGLTEXCOORD1SPROC glad_glTexCoord1s;
+#define glTexCoord1s glad_glTexCoord1s
+GLAD_API_CALL PFNGLTEXCOORD1SVPROC glad_glTexCoord1sv;
+#define glTexCoord1sv glad_glTexCoord1sv
+GLAD_API_CALL PFNGLTEXCOORD2DPROC glad_glTexCoord2d;
+#define glTexCoord2d glad_glTexCoord2d
+GLAD_API_CALL PFNGLTEXCOORD2DVPROC glad_glTexCoord2dv;
+#define glTexCoord2dv glad_glTexCoord2dv
+GLAD_API_CALL PFNGLTEXCOORD2FPROC glad_glTexCoord2f;
+#define glTexCoord2f glad_glTexCoord2f
+GLAD_API_CALL PFNGLTEXCOORD2FVPROC glad_glTexCoord2fv;
+#define glTexCoord2fv glad_glTexCoord2fv
+GLAD_API_CALL PFNGLTEXCOORD2IPROC glad_glTexCoord2i;
+#define glTexCoord2i glad_glTexCoord2i
+GLAD_API_CALL PFNGLTEXCOORD2IVPROC glad_glTexCoord2iv;
+#define glTexCoord2iv glad_glTexCoord2iv
+GLAD_API_CALL PFNGLTEXCOORD2SPROC glad_glTexCoord2s;
+#define glTexCoord2s glad_glTexCoord2s
+GLAD_API_CALL PFNGLTEXCOORD2SVPROC glad_glTexCoord2sv;
+#define glTexCoord2sv glad_glTexCoord2sv
+GLAD_API_CALL PFNGLTEXCOORD3DPROC glad_glTexCoord3d;
+#define glTexCoord3d glad_glTexCoord3d
+GLAD_API_CALL PFNGLTEXCOORD3DVPROC glad_glTexCoord3dv;
+#define glTexCoord3dv glad_glTexCoord3dv
+GLAD_API_CALL PFNGLTEXCOORD3FPROC glad_glTexCoord3f;
+#define glTexCoord3f glad_glTexCoord3f
+GLAD_API_CALL PFNGLTEXCOORD3FVPROC glad_glTexCoord3fv;
+#define glTexCoord3fv glad_glTexCoord3fv
+GLAD_API_CALL PFNGLTEXCOORD3IPROC glad_glTexCoord3i;
+#define glTexCoord3i glad_glTexCoord3i
+GLAD_API_CALL PFNGLTEXCOORD3IVPROC glad_glTexCoord3iv;
+#define glTexCoord3iv glad_glTexCoord3iv
+GLAD_API_CALL PFNGLTEXCOORD3SPROC glad_glTexCoord3s;
+#define glTexCoord3s glad_glTexCoord3s
+GLAD_API_CALL PFNGLTEXCOORD3SVPROC glad_glTexCoord3sv;
+#define glTexCoord3sv glad_glTexCoord3sv
+GLAD_API_CALL PFNGLTEXCOORD4DPROC glad_glTexCoord4d;
+#define glTexCoord4d glad_glTexCoord4d
+GLAD_API_CALL PFNGLTEXCOORD4DVPROC glad_glTexCoord4dv;
+#define glTexCoord4dv glad_glTexCoord4dv
+GLAD_API_CALL PFNGLTEXCOORD4FPROC glad_glTexCoord4f;
+#define glTexCoord4f glad_glTexCoord4f
+GLAD_API_CALL PFNGLTEXCOORD4FVPROC glad_glTexCoord4fv;
+#define glTexCoord4fv glad_glTexCoord4fv
+GLAD_API_CALL PFNGLTEXCOORD4IPROC glad_glTexCoord4i;
+#define glTexCoord4i glad_glTexCoord4i
+GLAD_API_CALL PFNGLTEXCOORD4IVPROC glad_glTexCoord4iv;
+#define glTexCoord4iv glad_glTexCoord4iv
+GLAD_API_CALL PFNGLTEXCOORD4SPROC glad_glTexCoord4s;
+#define glTexCoord4s glad_glTexCoord4s
+GLAD_API_CALL PFNGLTEXCOORD4SVPROC glad_glTexCoord4sv;
+#define glTexCoord4sv glad_glTexCoord4sv
+GLAD_API_CALL PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui;
+#define glTexCoordP1ui glad_glTexCoordP1ui
+GLAD_API_CALL PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv;
+#define glTexCoordP1uiv glad_glTexCoordP1uiv
+GLAD_API_CALL PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui;
+#define glTexCoordP2ui glad_glTexCoordP2ui
+GLAD_API_CALL PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv;
+#define glTexCoordP2uiv glad_glTexCoordP2uiv
+GLAD_API_CALL PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui;
+#define glTexCoordP3ui glad_glTexCoordP3ui
+GLAD_API_CALL PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv;
+#define glTexCoordP3uiv glad_glTexCoordP3uiv
+GLAD_API_CALL PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui;
+#define glTexCoordP4ui glad_glTexCoordP4ui
+GLAD_API_CALL PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv;
+#define glTexCoordP4uiv glad_glTexCoordP4uiv
+GLAD_API_CALL PFNGLTEXCOORDPOINTERPROC glad_glTexCoordPointer;
+#define glTexCoordPointer glad_glTexCoordPointer
+GLAD_API_CALL PFNGLTEXENVFPROC glad_glTexEnvf;
+#define glTexEnvf glad_glTexEnvf
+GLAD_API_CALL PFNGLTEXENVFVPROC glad_glTexEnvfv;
+#define glTexEnvfv glad_glTexEnvfv
+GLAD_API_CALL PFNGLTEXENVIPROC glad_glTexEnvi;
+#define glTexEnvi glad_glTexEnvi
+GLAD_API_CALL PFNGLTEXENVIVPROC glad_glTexEnviv;
+#define glTexEnviv glad_glTexEnviv
+GLAD_API_CALL PFNGLTEXGENDPROC glad_glTexGend;
+#define glTexGend glad_glTexGend
+GLAD_API_CALL PFNGLTEXGENDVPROC glad_glTexGendv;
+#define glTexGendv glad_glTexGendv
+GLAD_API_CALL PFNGLTEXGENFPROC glad_glTexGenf;
+#define glTexGenf glad_glTexGenf
+GLAD_API_CALL PFNGLTEXGENFVPROC glad_glTexGenfv;
+#define glTexGenfv glad_glTexGenfv
+GLAD_API_CALL PFNGLTEXGENIPROC glad_glTexGeni;
+#define glTexGeni glad_glTexGeni
+GLAD_API_CALL PFNGLTEXGENIVPROC glad_glTexGeniv;
+#define glTexGeniv glad_glTexGeniv
+GLAD_API_CALL PFNGLTEXIMAGE1DPROC glad_glTexImage1D;
+#define glTexImage1D glad_glTexImage1D
+GLAD_API_CALL PFNGLTEXIMAGE2DPROC glad_glTexImage2D;
+#define glTexImage2D glad_glTexImage2D
+GLAD_API_CALL PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample;
+#define glTexImage2DMultisample glad_glTexImage2DMultisample
+GLAD_API_CALL PFNGLTEXIMAGE3DPROC glad_glTexImage3D;
+#define glTexImage3D glad_glTexImage3D
+GLAD_API_CALL PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample;
+#define glTexImage3DMultisample glad_glTexImage3DMultisample
+GLAD_API_CALL PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv;
+#define glTexParameterIiv glad_glTexParameterIiv
+GLAD_API_CALL PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv;
+#define glTexParameterIuiv glad_glTexParameterIuiv
+GLAD_API_CALL PFNGLTEXPARAMETERFPROC glad_glTexParameterf;
+#define glTexParameterf glad_glTexParameterf
+GLAD_API_CALL PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv;
+#define glTexParameterfv glad_glTexParameterfv
+GLAD_API_CALL PFNGLTEXPARAMETERIPROC glad_glTexParameteri;
+#define glTexParameteri glad_glTexParameteri
+GLAD_API_CALL PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv;
+#define glTexParameteriv glad_glTexParameteriv
+GLAD_API_CALL PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D;
+#define glTexSubImage1D glad_glTexSubImage1D
+GLAD_API_CALL PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D;
+#define glTexSubImage2D glad_glTexSubImage2D
+GLAD_API_CALL PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D;
+#define glTexSubImage3D glad_glTexSubImage3D
+GLAD_API_CALL PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings;
+#define glTransformFeedbackVaryings glad_glTransformFeedbackVaryings
+GLAD_API_CALL PFNGLTRANSLATEDPROC glad_glTranslated;
+#define glTranslated glad_glTranslated
+GLAD_API_CALL PFNGLTRANSLATEFPROC glad_glTranslatef;
+#define glTranslatef glad_glTranslatef
+GLAD_API_CALL PFNGLUNIFORM1FPROC glad_glUniform1f;
+#define glUniform1f glad_glUniform1f
+GLAD_API_CALL PFNGLUNIFORM1FVPROC glad_glUniform1fv;
+#define glUniform1fv glad_glUniform1fv
+GLAD_API_CALL PFNGLUNIFORM1IPROC glad_glUniform1i;
+#define glUniform1i glad_glUniform1i
+GLAD_API_CALL PFNGLUNIFORM1IVPROC glad_glUniform1iv;
+#define glUniform1iv glad_glUniform1iv
+GLAD_API_CALL PFNGLUNIFORM1UIPROC glad_glUniform1ui;
+#define glUniform1ui glad_glUniform1ui
+GLAD_API_CALL PFNGLUNIFORM1UIVPROC glad_glUniform1uiv;
+#define glUniform1uiv glad_glUniform1uiv
+GLAD_API_CALL PFNGLUNIFORM2FPROC glad_glUniform2f;
+#define glUniform2f glad_glUniform2f
+GLAD_API_CALL PFNGLUNIFORM2FVPROC glad_glUniform2fv;
+#define glUniform2fv glad_glUniform2fv
+GLAD_API_CALL PFNGLUNIFORM2IPROC glad_glUniform2i;
+#define glUniform2i glad_glUniform2i
+GLAD_API_CALL PFNGLUNIFORM2IVPROC glad_glUniform2iv;
+#define glUniform2iv glad_glUniform2iv
+GLAD_API_CALL PFNGLUNIFORM2UIPROC glad_glUniform2ui;
+#define glUniform2ui glad_glUniform2ui
+GLAD_API_CALL PFNGLUNIFORM2UIVPROC glad_glUniform2uiv;
+#define glUniform2uiv glad_glUniform2uiv
+GLAD_API_CALL PFNGLUNIFORM3FPROC glad_glUniform3f;
+#define glUniform3f glad_glUniform3f
+GLAD_API_CALL PFNGLUNIFORM3FVPROC glad_glUniform3fv;
+#define glUniform3fv glad_glUniform3fv
+GLAD_API_CALL PFNGLUNIFORM3IPROC glad_glUniform3i;
+#define glUniform3i glad_glUniform3i
+GLAD_API_CALL PFNGLUNIFORM3IVPROC glad_glUniform3iv;
+#define glUniform3iv glad_glUniform3iv
+GLAD_API_CALL PFNGLUNIFORM3UIPROC glad_glUniform3ui;
+#define glUniform3ui glad_glUniform3ui
+GLAD_API_CALL PFNGLUNIFORM3UIVPROC glad_glUniform3uiv;
+#define glUniform3uiv glad_glUniform3uiv
+GLAD_API_CALL PFNGLUNIFORM4FPROC glad_glUniform4f;
+#define glUniform4f glad_glUniform4f
+GLAD_API_CALL PFNGLUNIFORM4FVPROC glad_glUniform4fv;
+#define glUniform4fv glad_glUniform4fv
+GLAD_API_CALL PFNGLUNIFORM4IPROC glad_glUniform4i;
+#define glUniform4i glad_glUniform4i
+GLAD_API_CALL PFNGLUNIFORM4IVPROC glad_glUniform4iv;
+#define glUniform4iv glad_glUniform4iv
+GLAD_API_CALL PFNGLUNIFORM4UIPROC glad_glUniform4ui;
+#define glUniform4ui glad_glUniform4ui
+GLAD_API_CALL PFNGLUNIFORM4UIVPROC glad_glUniform4uiv;
+#define glUniform4uiv glad_glUniform4uiv
+GLAD_API_CALL PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding;
+#define glUniformBlockBinding glad_glUniformBlockBinding
+GLAD_API_CALL PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv;
+#define glUniformMatrix2fv glad_glUniformMatrix2fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv;
+#define glUniformMatrix2x3fv glad_glUniformMatrix2x3fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv;
+#define glUniformMatrix2x4fv glad_glUniformMatrix2x4fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv;
+#define glUniformMatrix3fv glad_glUniformMatrix3fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv;
+#define glUniformMatrix3x2fv glad_glUniformMatrix3x2fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv;
+#define glUniformMatrix3x4fv glad_glUniformMatrix3x4fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv;
+#define glUniformMatrix4fv glad_glUniformMatrix4fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv;
+#define glUniformMatrix4x2fv glad_glUniformMatrix4x2fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv;
+#define glUniformMatrix4x3fv glad_glUniformMatrix4x3fv
+GLAD_API_CALL PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer;
+#define glUnmapBuffer glad_glUnmapBuffer
+GLAD_API_CALL PFNGLUSEPROGRAMPROC glad_glUseProgram;
+#define glUseProgram glad_glUseProgram
+GLAD_API_CALL PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram;
+#define glValidateProgram glad_glValidateProgram
+GLAD_API_CALL PFNGLVERTEX2DPROC glad_glVertex2d;
+#define glVertex2d glad_glVertex2d
+GLAD_API_CALL PFNGLVERTEX2DVPROC glad_glVertex2dv;
+#define glVertex2dv glad_glVertex2dv
+GLAD_API_CALL PFNGLVERTEX2FPROC glad_glVertex2f;
+#define glVertex2f glad_glVertex2f
+GLAD_API_CALL PFNGLVERTEX2FVPROC glad_glVertex2fv;
+#define glVertex2fv glad_glVertex2fv
+GLAD_API_CALL PFNGLVERTEX2IPROC glad_glVertex2i;
+#define glVertex2i glad_glVertex2i
+GLAD_API_CALL PFNGLVERTEX2IVPROC glad_glVertex2iv;
+#define glVertex2iv glad_glVertex2iv
+GLAD_API_CALL PFNGLVERTEX2SPROC glad_glVertex2s;
+#define glVertex2s glad_glVertex2s
+GLAD_API_CALL PFNGLVERTEX2SVPROC glad_glVertex2sv;
+#define glVertex2sv glad_glVertex2sv
+GLAD_API_CALL PFNGLVERTEX3DPROC glad_glVertex3d;
+#define glVertex3d glad_glVertex3d
+GLAD_API_CALL PFNGLVERTEX3DVPROC glad_glVertex3dv;
+#define glVertex3dv glad_glVertex3dv
+GLAD_API_CALL PFNGLVERTEX3FPROC glad_glVertex3f;
+#define glVertex3f glad_glVertex3f
+GLAD_API_CALL PFNGLVERTEX3FVPROC glad_glVertex3fv;
+#define glVertex3fv glad_glVertex3fv
+GLAD_API_CALL PFNGLVERTEX3IPROC glad_glVertex3i;
+#define glVertex3i glad_glVertex3i
+GLAD_API_CALL PFNGLVERTEX3IVPROC glad_glVertex3iv;
+#define glVertex3iv glad_glVertex3iv
+GLAD_API_CALL PFNGLVERTEX3SPROC glad_glVertex3s;
+#define glVertex3s glad_glVertex3s
+GLAD_API_CALL PFNGLVERTEX3SVPROC glad_glVertex3sv;
+#define glVertex3sv glad_glVertex3sv
+GLAD_API_CALL PFNGLVERTEX4DPROC glad_glVertex4d;
+#define glVertex4d glad_glVertex4d
+GLAD_API_CALL PFNGLVERTEX4DVPROC glad_glVertex4dv;
+#define glVertex4dv glad_glVertex4dv
+GLAD_API_CALL PFNGLVERTEX4FPROC glad_glVertex4f;
+#define glVertex4f glad_glVertex4f
+GLAD_API_CALL PFNGLVERTEX4FVPROC glad_glVertex4fv;
+#define glVertex4fv glad_glVertex4fv
+GLAD_API_CALL PFNGLVERTEX4IPROC glad_glVertex4i;
+#define glVertex4i glad_glVertex4i
+GLAD_API_CALL PFNGLVERTEX4IVPROC glad_glVertex4iv;
+#define glVertex4iv glad_glVertex4iv
+GLAD_API_CALL PFNGLVERTEX4SPROC glad_glVertex4s;
+#define glVertex4s glad_glVertex4s
+GLAD_API_CALL PFNGLVERTEX4SVPROC glad_glVertex4sv;
+#define glVertex4sv glad_glVertex4sv
+GLAD_API_CALL PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d;
+#define glVertexAttrib1d glad_glVertexAttrib1d
+GLAD_API_CALL PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv;
+#define glVertexAttrib1dv glad_glVertexAttrib1dv
+GLAD_API_CALL PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f;
+#define glVertexAttrib1f glad_glVertexAttrib1f
+GLAD_API_CALL PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv;
+#define glVertexAttrib1fv glad_glVertexAttrib1fv
+GLAD_API_CALL PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s;
+#define glVertexAttrib1s glad_glVertexAttrib1s
+GLAD_API_CALL PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv;
+#define glVertexAttrib1sv glad_glVertexAttrib1sv
+GLAD_API_CALL PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d;
+#define glVertexAttrib2d glad_glVertexAttrib2d
+GLAD_API_CALL PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv;
+#define glVertexAttrib2dv glad_glVertexAttrib2dv
+GLAD_API_CALL PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f;
+#define glVertexAttrib2f glad_glVertexAttrib2f
+GLAD_API_CALL PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv;
+#define glVertexAttrib2fv glad_glVertexAttrib2fv
+GLAD_API_CALL PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s;
+#define glVertexAttrib2s glad_glVertexAttrib2s
+GLAD_API_CALL PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv;
+#define glVertexAttrib2sv glad_glVertexAttrib2sv
+GLAD_API_CALL PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d;
+#define glVertexAttrib3d glad_glVertexAttrib3d
+GLAD_API_CALL PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv;
+#define glVertexAttrib3dv glad_glVertexAttrib3dv
+GLAD_API_CALL PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f;
+#define glVertexAttrib3f glad_glVertexAttrib3f
+GLAD_API_CALL PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv;
+#define glVertexAttrib3fv glad_glVertexAttrib3fv
+GLAD_API_CALL PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s;
+#define glVertexAttrib3s glad_glVertexAttrib3s
+GLAD_API_CALL PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv;
+#define glVertexAttrib3sv glad_glVertexAttrib3sv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv;
+#define glVertexAttrib4Nbv glad_glVertexAttrib4Nbv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv;
+#define glVertexAttrib4Niv glad_glVertexAttrib4Niv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv;
+#define glVertexAttrib4Nsv glad_glVertexAttrib4Nsv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub;
+#define glVertexAttrib4Nub glad_glVertexAttrib4Nub
+GLAD_API_CALL PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv;
+#define glVertexAttrib4Nubv glad_glVertexAttrib4Nubv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv;
+#define glVertexAttrib4Nuiv glad_glVertexAttrib4Nuiv
+GLAD_API_CALL PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv;
+#define glVertexAttrib4Nusv glad_glVertexAttrib4Nusv
+GLAD_API_CALL PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv;
+#define glVertexAttrib4bv glad_glVertexAttrib4bv
+GLAD_API_CALL PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d;
+#define glVertexAttrib4d glad_glVertexAttrib4d
+GLAD_API_CALL PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv;
+#define glVertexAttrib4dv glad_glVertexAttrib4dv
+GLAD_API_CALL PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f;
+#define glVertexAttrib4f glad_glVertexAttrib4f
+GLAD_API_CALL PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv;
+#define glVertexAttrib4fv glad_glVertexAttrib4fv
+GLAD_API_CALL PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv;
+#define glVertexAttrib4iv glad_glVertexAttrib4iv
+GLAD_API_CALL PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s;
+#define glVertexAttrib4s glad_glVertexAttrib4s
+GLAD_API_CALL PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv;
+#define glVertexAttrib4sv glad_glVertexAttrib4sv
+GLAD_API_CALL PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv;
+#define glVertexAttrib4ubv glad_glVertexAttrib4ubv
+GLAD_API_CALL PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv;
+#define glVertexAttrib4uiv glad_glVertexAttrib4uiv
+GLAD_API_CALL PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv;
+#define glVertexAttrib4usv glad_glVertexAttrib4usv
+GLAD_API_CALL PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor;
+#define glVertexAttribDivisor glad_glVertexAttribDivisor
+GLAD_API_CALL PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i;
+#define glVertexAttribI1i glad_glVertexAttribI1i
+GLAD_API_CALL PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv;
+#define glVertexAttribI1iv glad_glVertexAttribI1iv
+GLAD_API_CALL PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui;
+#define glVertexAttribI1ui glad_glVertexAttribI1ui
+GLAD_API_CALL PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv;
+#define glVertexAttribI1uiv glad_glVertexAttribI1uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i;
+#define glVertexAttribI2i glad_glVertexAttribI2i
+GLAD_API_CALL PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv;
+#define glVertexAttribI2iv glad_glVertexAttribI2iv
+GLAD_API_CALL PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui;
+#define glVertexAttribI2ui glad_glVertexAttribI2ui
+GLAD_API_CALL PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv;
+#define glVertexAttribI2uiv glad_glVertexAttribI2uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i;
+#define glVertexAttribI3i glad_glVertexAttribI3i
+GLAD_API_CALL PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv;
+#define glVertexAttribI3iv glad_glVertexAttribI3iv
+GLAD_API_CALL PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui;
+#define glVertexAttribI3ui glad_glVertexAttribI3ui
+GLAD_API_CALL PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv;
+#define glVertexAttribI3uiv glad_glVertexAttribI3uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv;
+#define glVertexAttribI4bv glad_glVertexAttribI4bv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i;
+#define glVertexAttribI4i glad_glVertexAttribI4i
+GLAD_API_CALL PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv;
+#define glVertexAttribI4iv glad_glVertexAttribI4iv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv;
+#define glVertexAttribI4sv glad_glVertexAttribI4sv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv;
+#define glVertexAttribI4ubv glad_glVertexAttribI4ubv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui;
+#define glVertexAttribI4ui glad_glVertexAttribI4ui
+GLAD_API_CALL PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv;
+#define glVertexAttribI4uiv glad_glVertexAttribI4uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv;
+#define glVertexAttribI4usv glad_glVertexAttribI4usv
+GLAD_API_CALL PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer;
+#define glVertexAttribIPointer glad_glVertexAttribIPointer
+GLAD_API_CALL PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui;
+#define glVertexAttribP1ui glad_glVertexAttribP1ui
+GLAD_API_CALL PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv;
+#define glVertexAttribP1uiv glad_glVertexAttribP1uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui;
+#define glVertexAttribP2ui glad_glVertexAttribP2ui
+GLAD_API_CALL PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv;
+#define glVertexAttribP2uiv glad_glVertexAttribP2uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui;
+#define glVertexAttribP3ui glad_glVertexAttribP3ui
+GLAD_API_CALL PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv;
+#define glVertexAttribP3uiv glad_glVertexAttribP3uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui;
+#define glVertexAttribP4ui glad_glVertexAttribP4ui
+GLAD_API_CALL PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv;
+#define glVertexAttribP4uiv glad_glVertexAttribP4uiv
+GLAD_API_CALL PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer;
+#define glVertexAttribPointer glad_glVertexAttribPointer
+GLAD_API_CALL PFNGLVERTEXP2UIPROC glad_glVertexP2ui;
+#define glVertexP2ui glad_glVertexP2ui
+GLAD_API_CALL PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv;
+#define glVertexP2uiv glad_glVertexP2uiv
+GLAD_API_CALL PFNGLVERTEXP3UIPROC glad_glVertexP3ui;
+#define glVertexP3ui glad_glVertexP3ui
+GLAD_API_CALL PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv;
+#define glVertexP3uiv glad_glVertexP3uiv
+GLAD_API_CALL PFNGLVERTEXP4UIPROC glad_glVertexP4ui;
+#define glVertexP4ui glad_glVertexP4ui
+GLAD_API_CALL PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv;
+#define glVertexP4uiv glad_glVertexP4uiv
+GLAD_API_CALL PFNGLVERTEXPOINTERPROC glad_glVertexPointer;
+#define glVertexPointer glad_glVertexPointer
+GLAD_API_CALL PFNGLVIEWPORTPROC glad_glViewport;
+#define glViewport glad_glViewport
+GLAD_API_CALL PFNGLWAITSYNCPROC glad_glWaitSync;
+#define glWaitSync glad_glWaitSync
+GLAD_API_CALL PFNGLWINDOWPOS2DPROC glad_glWindowPos2d;
+#define glWindowPos2d glad_glWindowPos2d
+GLAD_API_CALL PFNGLWINDOWPOS2DVPROC glad_glWindowPos2dv;
+#define glWindowPos2dv glad_glWindowPos2dv
+GLAD_API_CALL PFNGLWINDOWPOS2FPROC glad_glWindowPos2f;
+#define glWindowPos2f glad_glWindowPos2f
+GLAD_API_CALL PFNGLWINDOWPOS2FVPROC glad_glWindowPos2fv;
+#define glWindowPos2fv glad_glWindowPos2fv
+GLAD_API_CALL PFNGLWINDOWPOS2IPROC glad_glWindowPos2i;
+#define glWindowPos2i glad_glWindowPos2i
+GLAD_API_CALL PFNGLWINDOWPOS2IVPROC glad_glWindowPos2iv;
+#define glWindowPos2iv glad_glWindowPos2iv
+GLAD_API_CALL PFNGLWINDOWPOS2SPROC glad_glWindowPos2s;
+#define glWindowPos2s glad_glWindowPos2s
+GLAD_API_CALL PFNGLWINDOWPOS2SVPROC glad_glWindowPos2sv;
+#define glWindowPos2sv glad_glWindowPos2sv
+GLAD_API_CALL PFNGLWINDOWPOS3DPROC glad_glWindowPos3d;
+#define glWindowPos3d glad_glWindowPos3d
+GLAD_API_CALL PFNGLWINDOWPOS3DVPROC glad_glWindowPos3dv;
+#define glWindowPos3dv glad_glWindowPos3dv
+GLAD_API_CALL PFNGLWINDOWPOS3FPROC glad_glWindowPos3f;
+#define glWindowPos3f glad_glWindowPos3f
+GLAD_API_CALL PFNGLWINDOWPOS3FVPROC glad_glWindowPos3fv;
+#define glWindowPos3fv glad_glWindowPos3fv
+GLAD_API_CALL PFNGLWINDOWPOS3IPROC glad_glWindowPos3i;
+#define glWindowPos3i glad_glWindowPos3i
+GLAD_API_CALL PFNGLWINDOWPOS3IVPROC glad_glWindowPos3iv;
+#define glWindowPos3iv glad_glWindowPos3iv
+GLAD_API_CALL PFNGLWINDOWPOS3SPROC glad_glWindowPos3s;
+#define glWindowPos3s glad_glWindowPos3s
+GLAD_API_CALL PFNGLWINDOWPOS3SVPROC glad_glWindowPos3sv;
+#define glWindowPos3sv glad_glWindowPos3sv
+
+
+
+
+
+GLAD_API_CALL int gladLoadGLUserPtr( GLADuserptrloadfunc load, void *userptr);
+GLAD_API_CALL int gladLoadGL( GLADloadfunc load);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+/* Source */
+#ifdef GLAD_GL_IMPLEMENTATION
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef GLAD_IMPL_UTIL_C_
+#define GLAD_IMPL_UTIL_C_
+
+#ifdef _MSC_VER
+#define GLAD_IMPL_UTIL_SSCANF sscanf_s
+#else
+#define GLAD_IMPL_UTIL_SSCANF sscanf
+#endif
+
+#endif /* GLAD_IMPL_UTIL_C_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+int GLAD_GL_VERSION_1_0 = 0;
+int GLAD_GL_VERSION_1_1 = 0;
+int GLAD_GL_VERSION_1_2 = 0;
+int GLAD_GL_VERSION_1_3 = 0;
+int GLAD_GL_VERSION_1_4 = 0;
+int GLAD_GL_VERSION_1_5 = 0;
+int GLAD_GL_VERSION_2_0 = 0;
+int GLAD_GL_VERSION_2_1 = 0;
+int GLAD_GL_VERSION_3_0 = 0;
+int GLAD_GL_VERSION_3_1 = 0;
+int GLAD_GL_VERSION_3_2 = 0;
+int GLAD_GL_VERSION_3_3 = 0;
+int GLAD_GL_ARB_multisample = 0;
+int GLAD_GL_ARB_robustness = 0;
+int GLAD_GL_KHR_debug = 0;
+
+
+
+PFNGLACCUMPROC glad_glAccum = NULL;
+PFNGLACTIVETEXTUREPROC glad_glActiveTexture = NULL;
+PFNGLALPHAFUNCPROC glad_glAlphaFunc = NULL;
+PFNGLARETEXTURESRESIDENTPROC glad_glAreTexturesResident = NULL;
+PFNGLARRAYELEMENTPROC glad_glArrayElement = NULL;
+PFNGLATTACHSHADERPROC glad_glAttachShader = NULL;
+PFNGLBEGINPROC glad_glBegin = NULL;
+PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender = NULL;
+PFNGLBEGINQUERYPROC glad_glBeginQuery = NULL;
+PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback = NULL;
+PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation = NULL;
+PFNGLBINDBUFFERPROC glad_glBindBuffer = NULL;
+PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase = NULL;
+PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange = NULL;
+PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation = NULL;
+PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed = NULL;
+PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer = NULL;
+PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer = NULL;
+PFNGLBINDSAMPLERPROC glad_glBindSampler = NULL;
+PFNGLBINDTEXTUREPROC glad_glBindTexture = NULL;
+PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray = NULL;
+PFNGLBITMAPPROC glad_glBitmap = NULL;
+PFNGLBLENDCOLORPROC glad_glBlendColor = NULL;
+PFNGLBLENDEQUATIONPROC glad_glBlendEquation = NULL;
+PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate = NULL;
+PFNGLBLENDFUNCPROC glad_glBlendFunc = NULL;
+PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate = NULL;
+PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer = NULL;
+PFNGLBUFFERDATAPROC glad_glBufferData = NULL;
+PFNGLBUFFERSUBDATAPROC glad_glBufferSubData = NULL;
+PFNGLCALLLISTPROC glad_glCallList = NULL;
+PFNGLCALLLISTSPROC glad_glCallLists = NULL;
+PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus = NULL;
+PFNGLCLAMPCOLORPROC glad_glClampColor = NULL;
+PFNGLCLEARPROC glad_glClear = NULL;
+PFNGLCLEARACCUMPROC glad_glClearAccum = NULL;
+PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi = NULL;
+PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv = NULL;
+PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv = NULL;
+PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv = NULL;
+PFNGLCLEARCOLORPROC glad_glClearColor = NULL;
+PFNGLCLEARDEPTHPROC glad_glClearDepth = NULL;
+PFNGLCLEARINDEXPROC glad_glClearIndex = NULL;
+PFNGLCLEARSTENCILPROC glad_glClearStencil = NULL;
+PFNGLCLIENTACTIVETEXTUREPROC glad_glClientActiveTexture = NULL;
+PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync = NULL;
+PFNGLCLIPPLANEPROC glad_glClipPlane = NULL;
+PFNGLCOLOR3BPROC glad_glColor3b = NULL;
+PFNGLCOLOR3BVPROC glad_glColor3bv = NULL;
+PFNGLCOLOR3DPROC glad_glColor3d = NULL;
+PFNGLCOLOR3DVPROC glad_glColor3dv = NULL;
+PFNGLCOLOR3FPROC glad_glColor3f = NULL;
+PFNGLCOLOR3FVPROC glad_glColor3fv = NULL;
+PFNGLCOLOR3IPROC glad_glColor3i = NULL;
+PFNGLCOLOR3IVPROC glad_glColor3iv = NULL;
+PFNGLCOLOR3SPROC glad_glColor3s = NULL;
+PFNGLCOLOR3SVPROC glad_glColor3sv = NULL;
+PFNGLCOLOR3UBPROC glad_glColor3ub = NULL;
+PFNGLCOLOR3UBVPROC glad_glColor3ubv = NULL;
+PFNGLCOLOR3UIPROC glad_glColor3ui = NULL;
+PFNGLCOLOR3UIVPROC glad_glColor3uiv = NULL;
+PFNGLCOLOR3USPROC glad_glColor3us = NULL;
+PFNGLCOLOR3USVPROC glad_glColor3usv = NULL;
+PFNGLCOLOR4BPROC glad_glColor4b = NULL;
+PFNGLCOLOR4BVPROC glad_glColor4bv = NULL;
+PFNGLCOLOR4DPROC glad_glColor4d = NULL;
+PFNGLCOLOR4DVPROC glad_glColor4dv = NULL;
+PFNGLCOLOR4FPROC glad_glColor4f = NULL;
+PFNGLCOLOR4FVPROC glad_glColor4fv = NULL;
+PFNGLCOLOR4IPROC glad_glColor4i = NULL;
+PFNGLCOLOR4IVPROC glad_glColor4iv = NULL;
+PFNGLCOLOR4SPROC glad_glColor4s = NULL;
+PFNGLCOLOR4SVPROC glad_glColor4sv = NULL;
+PFNGLCOLOR4UBPROC glad_glColor4ub = NULL;
+PFNGLCOLOR4UBVPROC glad_glColor4ubv = NULL;
+PFNGLCOLOR4UIPROC glad_glColor4ui = NULL;
+PFNGLCOLOR4UIVPROC glad_glColor4uiv = NULL;
+PFNGLCOLOR4USPROC glad_glColor4us = NULL;
+PFNGLCOLOR4USVPROC glad_glColor4usv = NULL;
+PFNGLCOLORMASKPROC glad_glColorMask = NULL;
+PFNGLCOLORMASKIPROC glad_glColorMaski = NULL;
+PFNGLCOLORMATERIALPROC glad_glColorMaterial = NULL;
+PFNGLCOLORP3UIPROC glad_glColorP3ui = NULL;
+PFNGLCOLORP3UIVPROC glad_glColorP3uiv = NULL;
+PFNGLCOLORP4UIPROC glad_glColorP4ui = NULL;
+PFNGLCOLORP4UIVPROC glad_glColorP4uiv = NULL;
+PFNGLCOLORPOINTERPROC glad_glColorPointer = NULL;
+PFNGLCOMPILESHADERPROC glad_glCompileShader = NULL;
+PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D = NULL;
+PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D = NULL;
+PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D = NULL;
+PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D = NULL;
+PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D = NULL;
+PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D = NULL;
+PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData = NULL;
+PFNGLCOPYPIXELSPROC glad_glCopyPixels = NULL;
+PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D = NULL;
+PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D = NULL;
+PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D = NULL;
+PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D = NULL;
+PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D = NULL;
+PFNGLCREATEPROGRAMPROC glad_glCreateProgram = NULL;
+PFNGLCREATESHADERPROC glad_glCreateShader = NULL;
+PFNGLCULLFACEPROC glad_glCullFace = NULL;
+PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback = NULL;
+PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl = NULL;
+PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert = NULL;
+PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers = NULL;
+PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers = NULL;
+PFNGLDELETELISTSPROC glad_glDeleteLists = NULL;
+PFNGLDELETEPROGRAMPROC glad_glDeleteProgram = NULL;
+PFNGLDELETEQUERIESPROC glad_glDeleteQueries = NULL;
+PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers = NULL;
+PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers = NULL;
+PFNGLDELETESHADERPROC glad_glDeleteShader = NULL;
+PFNGLDELETESYNCPROC glad_glDeleteSync = NULL;
+PFNGLDELETETEXTURESPROC glad_glDeleteTextures = NULL;
+PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays = NULL;
+PFNGLDEPTHFUNCPROC glad_glDepthFunc = NULL;
+PFNGLDEPTHMASKPROC glad_glDepthMask = NULL;
+PFNGLDEPTHRANGEPROC glad_glDepthRange = NULL;
+PFNGLDETACHSHADERPROC glad_glDetachShader = NULL;
+PFNGLDISABLEPROC glad_glDisable = NULL;
+PFNGLDISABLECLIENTSTATEPROC glad_glDisableClientState = NULL;
+PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray = NULL;
+PFNGLDISABLEIPROC glad_glDisablei = NULL;
+PFNGLDRAWARRAYSPROC glad_glDrawArrays = NULL;
+PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced = NULL;
+PFNGLDRAWBUFFERPROC glad_glDrawBuffer = NULL;
+PFNGLDRAWBUFFERSPROC glad_glDrawBuffers = NULL;
+PFNGLDRAWELEMENTSPROC glad_glDrawElements = NULL;
+PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex = NULL;
+PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced = NULL;
+PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex = NULL;
+PFNGLDRAWPIXELSPROC glad_glDrawPixels = NULL;
+PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements = NULL;
+PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex = NULL;
+PFNGLEDGEFLAGPROC glad_glEdgeFlag = NULL;
+PFNGLEDGEFLAGPOINTERPROC glad_glEdgeFlagPointer = NULL;
+PFNGLEDGEFLAGVPROC glad_glEdgeFlagv = NULL;
+PFNGLENABLEPROC glad_glEnable = NULL;
+PFNGLENABLECLIENTSTATEPROC glad_glEnableClientState = NULL;
+PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray = NULL;
+PFNGLENABLEIPROC glad_glEnablei = NULL;
+PFNGLENDPROC glad_glEnd = NULL;
+PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender = NULL;
+PFNGLENDLISTPROC glad_glEndList = NULL;
+PFNGLENDQUERYPROC glad_glEndQuery = NULL;
+PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback = NULL;
+PFNGLEVALCOORD1DPROC glad_glEvalCoord1d = NULL;
+PFNGLEVALCOORD1DVPROC glad_glEvalCoord1dv = NULL;
+PFNGLEVALCOORD1FPROC glad_glEvalCoord1f = NULL;
+PFNGLEVALCOORD1FVPROC glad_glEvalCoord1fv = NULL;
+PFNGLEVALCOORD2DPROC glad_glEvalCoord2d = NULL;
+PFNGLEVALCOORD2DVPROC glad_glEvalCoord2dv = NULL;
+PFNGLEVALCOORD2FPROC glad_glEvalCoord2f = NULL;
+PFNGLEVALCOORD2FVPROC glad_glEvalCoord2fv = NULL;
+PFNGLEVALMESH1PROC glad_glEvalMesh1 = NULL;
+PFNGLEVALMESH2PROC glad_glEvalMesh2 = NULL;
+PFNGLEVALPOINT1PROC glad_glEvalPoint1 = NULL;
+PFNGLEVALPOINT2PROC glad_glEvalPoint2 = NULL;
+PFNGLFEEDBACKBUFFERPROC glad_glFeedbackBuffer = NULL;
+PFNGLFENCESYNCPROC glad_glFenceSync = NULL;
+PFNGLFINISHPROC glad_glFinish = NULL;
+PFNGLFLUSHPROC glad_glFlush = NULL;
+PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange = NULL;
+PFNGLFOGCOORDPOINTERPROC glad_glFogCoordPointer = NULL;
+PFNGLFOGCOORDDPROC glad_glFogCoordd = NULL;
+PFNGLFOGCOORDDVPROC glad_glFogCoorddv = NULL;
+PFNGLFOGCOORDFPROC glad_glFogCoordf = NULL;
+PFNGLFOGCOORDFVPROC glad_glFogCoordfv = NULL;
+PFNGLFOGFPROC glad_glFogf = NULL;
+PFNGLFOGFVPROC glad_glFogfv = NULL;
+PFNGLFOGIPROC glad_glFogi = NULL;
+PFNGLFOGIVPROC glad_glFogiv = NULL;
+PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer = NULL;
+PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture = NULL;
+PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D = NULL;
+PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D = NULL;
+PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D = NULL;
+PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer = NULL;
+PFNGLFRONTFACEPROC glad_glFrontFace = NULL;
+PFNGLFRUSTUMPROC glad_glFrustum = NULL;
+PFNGLGENBUFFERSPROC glad_glGenBuffers = NULL;
+PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers = NULL;
+PFNGLGENLISTSPROC glad_glGenLists = NULL;
+PFNGLGENQUERIESPROC glad_glGenQueries = NULL;
+PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers = NULL;
+PFNGLGENSAMPLERSPROC glad_glGenSamplers = NULL;
+PFNGLGENTEXTURESPROC glad_glGenTextures = NULL;
+PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays = NULL;
+PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap = NULL;
+PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib = NULL;
+PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform = NULL;
+PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName = NULL;
+PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv = NULL;
+PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName = NULL;
+PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv = NULL;
+PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders = NULL;
+PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation = NULL;
+PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v = NULL;
+PFNGLGETBOOLEANVPROC glad_glGetBooleanv = NULL;
+PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v = NULL;
+PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv = NULL;
+PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv = NULL;
+PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData = NULL;
+PFNGLGETCLIPPLANEPROC glad_glGetClipPlane = NULL;
+PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage = NULL;
+PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog = NULL;
+PFNGLGETDOUBLEVPROC glad_glGetDoublev = NULL;
+PFNGLGETERRORPROC glad_glGetError = NULL;
+PFNGLGETFLOATVPROC glad_glGetFloatv = NULL;
+PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex = NULL;
+PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation = NULL;
+PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv = NULL;
+PFNGLGETGRAPHICSRESETSTATUSARBPROC glad_glGetGraphicsResetStatusARB = NULL;
+PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v = NULL;
+PFNGLGETINTEGER64VPROC glad_glGetInteger64v = NULL;
+PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v = NULL;
+PFNGLGETINTEGERVPROC glad_glGetIntegerv = NULL;
+PFNGLGETLIGHTFVPROC glad_glGetLightfv = NULL;
+PFNGLGETLIGHTIVPROC glad_glGetLightiv = NULL;
+PFNGLGETMAPDVPROC glad_glGetMapdv = NULL;
+PFNGLGETMAPFVPROC glad_glGetMapfv = NULL;
+PFNGLGETMAPIVPROC glad_glGetMapiv = NULL;
+PFNGLGETMATERIALFVPROC glad_glGetMaterialfv = NULL;
+PFNGLGETMATERIALIVPROC glad_glGetMaterialiv = NULL;
+PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv = NULL;
+PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel = NULL;
+PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel = NULL;
+PFNGLGETPIXELMAPFVPROC glad_glGetPixelMapfv = NULL;
+PFNGLGETPIXELMAPUIVPROC glad_glGetPixelMapuiv = NULL;
+PFNGLGETPIXELMAPUSVPROC glad_glGetPixelMapusv = NULL;
+PFNGLGETPOINTERVPROC glad_glGetPointerv = NULL;
+PFNGLGETPOLYGONSTIPPLEPROC glad_glGetPolygonStipple = NULL;
+PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog = NULL;
+PFNGLGETPROGRAMIVPROC glad_glGetProgramiv = NULL;
+PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v = NULL;
+PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv = NULL;
+PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v = NULL;
+PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv = NULL;
+PFNGLGETQUERYIVPROC glad_glGetQueryiv = NULL;
+PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv = NULL;
+PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv = NULL;
+PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv = NULL;
+PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv = NULL;
+PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv = NULL;
+PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog = NULL;
+PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource = NULL;
+PFNGLGETSHADERIVPROC glad_glGetShaderiv = NULL;
+PFNGLGETSTRINGPROC glad_glGetString = NULL;
+PFNGLGETSTRINGIPROC glad_glGetStringi = NULL;
+PFNGLGETSYNCIVPROC glad_glGetSynciv = NULL;
+PFNGLGETTEXENVFVPROC glad_glGetTexEnvfv = NULL;
+PFNGLGETTEXENVIVPROC glad_glGetTexEnviv = NULL;
+PFNGLGETTEXGENDVPROC glad_glGetTexGendv = NULL;
+PFNGLGETTEXGENFVPROC glad_glGetTexGenfv = NULL;
+PFNGLGETTEXGENIVPROC glad_glGetTexGeniv = NULL;
+PFNGLGETTEXIMAGEPROC glad_glGetTexImage = NULL;
+PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv = NULL;
+PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv = NULL;
+PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv = NULL;
+PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv = NULL;
+PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv = NULL;
+PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv = NULL;
+PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying = NULL;
+PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex = NULL;
+PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices = NULL;
+PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation = NULL;
+PFNGLGETUNIFORMFVPROC glad_glGetUniformfv = NULL;
+PFNGLGETUNIFORMIVPROC glad_glGetUniformiv = NULL;
+PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv = NULL;
+PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv = NULL;
+PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv = NULL;
+PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv = NULL;
+PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv = NULL;
+PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv = NULL;
+PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv = NULL;
+PFNGLGETNCOLORTABLEARBPROC glad_glGetnColorTableARB = NULL;
+PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC glad_glGetnCompressedTexImageARB = NULL;
+PFNGLGETNCONVOLUTIONFILTERARBPROC glad_glGetnConvolutionFilterARB = NULL;
+PFNGLGETNHISTOGRAMARBPROC glad_glGetnHistogramARB = NULL;
+PFNGLGETNMAPDVARBPROC glad_glGetnMapdvARB = NULL;
+PFNGLGETNMAPFVARBPROC glad_glGetnMapfvARB = NULL;
+PFNGLGETNMAPIVARBPROC glad_glGetnMapivARB = NULL;
+PFNGLGETNMINMAXARBPROC glad_glGetnMinmaxARB = NULL;
+PFNGLGETNPIXELMAPFVARBPROC glad_glGetnPixelMapfvARB = NULL;
+PFNGLGETNPIXELMAPUIVARBPROC glad_glGetnPixelMapuivARB = NULL;
+PFNGLGETNPIXELMAPUSVARBPROC glad_glGetnPixelMapusvARB = NULL;
+PFNGLGETNPOLYGONSTIPPLEARBPROC glad_glGetnPolygonStippleARB = NULL;
+PFNGLGETNSEPARABLEFILTERARBPROC glad_glGetnSeparableFilterARB = NULL;
+PFNGLGETNTEXIMAGEARBPROC glad_glGetnTexImageARB = NULL;
+PFNGLGETNUNIFORMDVARBPROC glad_glGetnUniformdvARB = NULL;
+PFNGLGETNUNIFORMFVARBPROC glad_glGetnUniformfvARB = NULL;
+PFNGLGETNUNIFORMIVARBPROC glad_glGetnUniformivARB = NULL;
+PFNGLGETNUNIFORMUIVARBPROC glad_glGetnUniformuivARB = NULL;
+PFNGLHINTPROC glad_glHint = NULL;
+PFNGLINDEXMASKPROC glad_glIndexMask = NULL;
+PFNGLINDEXPOINTERPROC glad_glIndexPointer = NULL;
+PFNGLINDEXDPROC glad_glIndexd = NULL;
+PFNGLINDEXDVPROC glad_glIndexdv = NULL;
+PFNGLINDEXFPROC glad_glIndexf = NULL;
+PFNGLINDEXFVPROC glad_glIndexfv = NULL;
+PFNGLINDEXIPROC glad_glIndexi = NULL;
+PFNGLINDEXIVPROC glad_glIndexiv = NULL;
+PFNGLINDEXSPROC glad_glIndexs = NULL;
+PFNGLINDEXSVPROC glad_glIndexsv = NULL;
+PFNGLINDEXUBPROC glad_glIndexub = NULL;
+PFNGLINDEXUBVPROC glad_glIndexubv = NULL;
+PFNGLINITNAMESPROC glad_glInitNames = NULL;
+PFNGLINTERLEAVEDARRAYSPROC glad_glInterleavedArrays = NULL;
+PFNGLISBUFFERPROC glad_glIsBuffer = NULL;
+PFNGLISENABLEDPROC glad_glIsEnabled = NULL;
+PFNGLISENABLEDIPROC glad_glIsEnabledi = NULL;
+PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer = NULL;
+PFNGLISLISTPROC glad_glIsList = NULL;
+PFNGLISPROGRAMPROC glad_glIsProgram = NULL;
+PFNGLISQUERYPROC glad_glIsQuery = NULL;
+PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer = NULL;
+PFNGLISSAMPLERPROC glad_glIsSampler = NULL;
+PFNGLISSHADERPROC glad_glIsShader = NULL;
+PFNGLISSYNCPROC glad_glIsSync = NULL;
+PFNGLISTEXTUREPROC glad_glIsTexture = NULL;
+PFNGLISVERTEXARRAYPROC glad_glIsVertexArray = NULL;
+PFNGLLIGHTMODELFPROC glad_glLightModelf = NULL;
+PFNGLLIGHTMODELFVPROC glad_glLightModelfv = NULL;
+PFNGLLIGHTMODELIPROC glad_glLightModeli = NULL;
+PFNGLLIGHTMODELIVPROC glad_glLightModeliv = NULL;
+PFNGLLIGHTFPROC glad_glLightf = NULL;
+PFNGLLIGHTFVPROC glad_glLightfv = NULL;
+PFNGLLIGHTIPROC glad_glLighti = NULL;
+PFNGLLIGHTIVPROC glad_glLightiv = NULL;
+PFNGLLINESTIPPLEPROC glad_glLineStipple = NULL;
+PFNGLLINEWIDTHPROC glad_glLineWidth = NULL;
+PFNGLLINKPROGRAMPROC glad_glLinkProgram = NULL;
+PFNGLLISTBASEPROC glad_glListBase = NULL;
+PFNGLLOADIDENTITYPROC glad_glLoadIdentity = NULL;
+PFNGLLOADMATRIXDPROC glad_glLoadMatrixd = NULL;
+PFNGLLOADMATRIXFPROC glad_glLoadMatrixf = NULL;
+PFNGLLOADNAMEPROC glad_glLoadName = NULL;
+PFNGLLOADTRANSPOSEMATRIXDPROC glad_glLoadTransposeMatrixd = NULL;
+PFNGLLOADTRANSPOSEMATRIXFPROC glad_glLoadTransposeMatrixf = NULL;
+PFNGLLOGICOPPROC glad_glLogicOp = NULL;
+PFNGLMAP1DPROC glad_glMap1d = NULL;
+PFNGLMAP1FPROC glad_glMap1f = NULL;
+PFNGLMAP2DPROC glad_glMap2d = NULL;
+PFNGLMAP2FPROC glad_glMap2f = NULL;
+PFNGLMAPBUFFERPROC glad_glMapBuffer = NULL;
+PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange = NULL;
+PFNGLMAPGRID1DPROC glad_glMapGrid1d = NULL;
+PFNGLMAPGRID1FPROC glad_glMapGrid1f = NULL;
+PFNGLMAPGRID2DPROC glad_glMapGrid2d = NULL;
+PFNGLMAPGRID2FPROC glad_glMapGrid2f = NULL;
+PFNGLMATERIALFPROC glad_glMaterialf = NULL;
+PFNGLMATERIALFVPROC glad_glMaterialfv = NULL;
+PFNGLMATERIALIPROC glad_glMateriali = NULL;
+PFNGLMATERIALIVPROC glad_glMaterialiv = NULL;
+PFNGLMATRIXMODEPROC glad_glMatrixMode = NULL;
+PFNGLMULTMATRIXDPROC glad_glMultMatrixd = NULL;
+PFNGLMULTMATRIXFPROC glad_glMultMatrixf = NULL;
+PFNGLMULTTRANSPOSEMATRIXDPROC glad_glMultTransposeMatrixd = NULL;
+PFNGLMULTTRANSPOSEMATRIXFPROC glad_glMultTransposeMatrixf = NULL;
+PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays = NULL;
+PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements = NULL;
+PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex = NULL;
+PFNGLMULTITEXCOORD1DPROC glad_glMultiTexCoord1d = NULL;
+PFNGLMULTITEXCOORD1DVPROC glad_glMultiTexCoord1dv = NULL;
+PFNGLMULTITEXCOORD1FPROC glad_glMultiTexCoord1f = NULL;
+PFNGLMULTITEXCOORD1FVPROC glad_glMultiTexCoord1fv = NULL;
+PFNGLMULTITEXCOORD1IPROC glad_glMultiTexCoord1i = NULL;
+PFNGLMULTITEXCOORD1IVPROC glad_glMultiTexCoord1iv = NULL;
+PFNGLMULTITEXCOORD1SPROC glad_glMultiTexCoord1s = NULL;
+PFNGLMULTITEXCOORD1SVPROC glad_glMultiTexCoord1sv = NULL;
+PFNGLMULTITEXCOORD2DPROC glad_glMultiTexCoord2d = NULL;
+PFNGLMULTITEXCOORD2DVPROC glad_glMultiTexCoord2dv = NULL;
+PFNGLMULTITEXCOORD2FPROC glad_glMultiTexCoord2f = NULL;
+PFNGLMULTITEXCOORD2FVPROC glad_glMultiTexCoord2fv = NULL;
+PFNGLMULTITEXCOORD2IPROC glad_glMultiTexCoord2i = NULL;
+PFNGLMULTITEXCOORD2IVPROC glad_glMultiTexCoord2iv = NULL;
+PFNGLMULTITEXCOORD2SPROC glad_glMultiTexCoord2s = NULL;
+PFNGLMULTITEXCOORD2SVPROC glad_glMultiTexCoord2sv = NULL;
+PFNGLMULTITEXCOORD3DPROC glad_glMultiTexCoord3d = NULL;
+PFNGLMULTITEXCOORD3DVPROC glad_glMultiTexCoord3dv = NULL;
+PFNGLMULTITEXCOORD3FPROC glad_glMultiTexCoord3f = NULL;
+PFNGLMULTITEXCOORD3FVPROC glad_glMultiTexCoord3fv = NULL;
+PFNGLMULTITEXCOORD3IPROC glad_glMultiTexCoord3i = NULL;
+PFNGLMULTITEXCOORD3IVPROC glad_glMultiTexCoord3iv = NULL;
+PFNGLMULTITEXCOORD3SPROC glad_glMultiTexCoord3s = NULL;
+PFNGLMULTITEXCOORD3SVPROC glad_glMultiTexCoord3sv = NULL;
+PFNGLMULTITEXCOORD4DPROC glad_glMultiTexCoord4d = NULL;
+PFNGLMULTITEXCOORD4DVPROC glad_glMultiTexCoord4dv = NULL;
+PFNGLMULTITEXCOORD4FPROC glad_glMultiTexCoord4f = NULL;
+PFNGLMULTITEXCOORD4FVPROC glad_glMultiTexCoord4fv = NULL;
+PFNGLMULTITEXCOORD4IPROC glad_glMultiTexCoord4i = NULL;
+PFNGLMULTITEXCOORD4IVPROC glad_glMultiTexCoord4iv = NULL;
+PFNGLMULTITEXCOORD4SPROC glad_glMultiTexCoord4s = NULL;
+PFNGLMULTITEXCOORD4SVPROC glad_glMultiTexCoord4sv = NULL;
+PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui = NULL;
+PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv = NULL;
+PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui = NULL;
+PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv = NULL;
+PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui = NULL;
+PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv = NULL;
+PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui = NULL;
+PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv = NULL;
+PFNGLNEWLISTPROC glad_glNewList = NULL;
+PFNGLNORMAL3BPROC glad_glNormal3b = NULL;
+PFNGLNORMAL3BVPROC glad_glNormal3bv = NULL;
+PFNGLNORMAL3DPROC glad_glNormal3d = NULL;
+PFNGLNORMAL3DVPROC glad_glNormal3dv = NULL;
+PFNGLNORMAL3FPROC glad_glNormal3f = NULL;
+PFNGLNORMAL3FVPROC glad_glNormal3fv = NULL;
+PFNGLNORMAL3IPROC glad_glNormal3i = NULL;
+PFNGLNORMAL3IVPROC glad_glNormal3iv = NULL;
+PFNGLNORMAL3SPROC glad_glNormal3s = NULL;
+PFNGLNORMAL3SVPROC glad_glNormal3sv = NULL;
+PFNGLNORMALP3UIPROC glad_glNormalP3ui = NULL;
+PFNGLNORMALP3UIVPROC glad_glNormalP3uiv = NULL;
+PFNGLNORMALPOINTERPROC glad_glNormalPointer = NULL;
+PFNGLOBJECTLABELPROC glad_glObjectLabel = NULL;
+PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel = NULL;
+PFNGLORTHOPROC glad_glOrtho = NULL;
+PFNGLPASSTHROUGHPROC glad_glPassThrough = NULL;
+PFNGLPIXELMAPFVPROC glad_glPixelMapfv = NULL;
+PFNGLPIXELMAPUIVPROC glad_glPixelMapuiv = NULL;
+PFNGLPIXELMAPUSVPROC glad_glPixelMapusv = NULL;
+PFNGLPIXELSTOREFPROC glad_glPixelStoref = NULL;
+PFNGLPIXELSTOREIPROC glad_glPixelStorei = NULL;
+PFNGLPIXELTRANSFERFPROC glad_glPixelTransferf = NULL;
+PFNGLPIXELTRANSFERIPROC glad_glPixelTransferi = NULL;
+PFNGLPIXELZOOMPROC glad_glPixelZoom = NULL;
+PFNGLPOINTPARAMETERFPROC glad_glPointParameterf = NULL;
+PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv = NULL;
+PFNGLPOINTPARAMETERIPROC glad_glPointParameteri = NULL;
+PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv = NULL;
+PFNGLPOINTSIZEPROC glad_glPointSize = NULL;
+PFNGLPOLYGONMODEPROC glad_glPolygonMode = NULL;
+PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset = NULL;
+PFNGLPOLYGONSTIPPLEPROC glad_glPolygonStipple = NULL;
+PFNGLPOPATTRIBPROC glad_glPopAttrib = NULL;
+PFNGLPOPCLIENTATTRIBPROC glad_glPopClientAttrib = NULL;
+PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup = NULL;
+PFNGLPOPMATRIXPROC glad_glPopMatrix = NULL;
+PFNGLPOPNAMEPROC glad_glPopName = NULL;
+PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex = NULL;
+PFNGLPRIORITIZETEXTURESPROC glad_glPrioritizeTextures = NULL;
+PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex = NULL;
+PFNGLPUSHATTRIBPROC glad_glPushAttrib = NULL;
+PFNGLPUSHCLIENTATTRIBPROC glad_glPushClientAttrib = NULL;
+PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup = NULL;
+PFNGLPUSHMATRIXPROC glad_glPushMatrix = NULL;
+PFNGLPUSHNAMEPROC glad_glPushName = NULL;
+PFNGLQUERYCOUNTERPROC glad_glQueryCounter = NULL;
+PFNGLRASTERPOS2DPROC glad_glRasterPos2d = NULL;
+PFNGLRASTERPOS2DVPROC glad_glRasterPos2dv = NULL;
+PFNGLRASTERPOS2FPROC glad_glRasterPos2f = NULL;
+PFNGLRASTERPOS2FVPROC glad_glRasterPos2fv = NULL;
+PFNGLRASTERPOS2IPROC glad_glRasterPos2i = NULL;
+PFNGLRASTERPOS2IVPROC glad_glRasterPos2iv = NULL;
+PFNGLRASTERPOS2SPROC glad_glRasterPos2s = NULL;
+PFNGLRASTERPOS2SVPROC glad_glRasterPos2sv = NULL;
+PFNGLRASTERPOS3DPROC glad_glRasterPos3d = NULL;
+PFNGLRASTERPOS3DVPROC glad_glRasterPos3dv = NULL;
+PFNGLRASTERPOS3FPROC glad_glRasterPos3f = NULL;
+PFNGLRASTERPOS3FVPROC glad_glRasterPos3fv = NULL;
+PFNGLRASTERPOS3IPROC glad_glRasterPos3i = NULL;
+PFNGLRASTERPOS3IVPROC glad_glRasterPos3iv = NULL;
+PFNGLRASTERPOS3SPROC glad_glRasterPos3s = NULL;
+PFNGLRASTERPOS3SVPROC glad_glRasterPos3sv = NULL;
+PFNGLRASTERPOS4DPROC glad_glRasterPos4d = NULL;
+PFNGLRASTERPOS4DVPROC glad_glRasterPos4dv = NULL;
+PFNGLRASTERPOS4FPROC glad_glRasterPos4f = NULL;
+PFNGLRASTERPOS4FVPROC glad_glRasterPos4fv = NULL;
+PFNGLRASTERPOS4IPROC glad_glRasterPos4i = NULL;
+PFNGLRASTERPOS4IVPROC glad_glRasterPos4iv = NULL;
+PFNGLRASTERPOS4SPROC glad_glRasterPos4s = NULL;
+PFNGLRASTERPOS4SVPROC glad_glRasterPos4sv = NULL;
+PFNGLREADBUFFERPROC glad_glReadBuffer = NULL;
+PFNGLREADPIXELSPROC glad_glReadPixels = NULL;
+PFNGLREADNPIXELSARBPROC glad_glReadnPixelsARB = NULL;
+PFNGLRECTDPROC glad_glRectd = NULL;
+PFNGLRECTDVPROC glad_glRectdv = NULL;
+PFNGLRECTFPROC glad_glRectf = NULL;
+PFNGLRECTFVPROC glad_glRectfv = NULL;
+PFNGLRECTIPROC glad_glRecti = NULL;
+PFNGLRECTIVPROC glad_glRectiv = NULL;
+PFNGLRECTSPROC glad_glRects = NULL;
+PFNGLRECTSVPROC glad_glRectsv = NULL;
+PFNGLRENDERMODEPROC glad_glRenderMode = NULL;
+PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage = NULL;
+PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample = NULL;
+PFNGLROTATEDPROC glad_glRotated = NULL;
+PFNGLROTATEFPROC glad_glRotatef = NULL;
+PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage = NULL;
+PFNGLSAMPLECOVERAGEARBPROC glad_glSampleCoverageARB = NULL;
+PFNGLSAMPLEMASKIPROC glad_glSampleMaski = NULL;
+PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv = NULL;
+PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv = NULL;
+PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf = NULL;
+PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv = NULL;
+PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri = NULL;
+PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv = NULL;
+PFNGLSCALEDPROC glad_glScaled = NULL;
+PFNGLSCALEFPROC glad_glScalef = NULL;
+PFNGLSCISSORPROC glad_glScissor = NULL;
+PFNGLSECONDARYCOLOR3BPROC glad_glSecondaryColor3b = NULL;
+PFNGLSECONDARYCOLOR3BVPROC glad_glSecondaryColor3bv = NULL;
+PFNGLSECONDARYCOLOR3DPROC glad_glSecondaryColor3d = NULL;
+PFNGLSECONDARYCOLOR3DVPROC glad_glSecondaryColor3dv = NULL;
+PFNGLSECONDARYCOLOR3FPROC glad_glSecondaryColor3f = NULL;
+PFNGLSECONDARYCOLOR3FVPROC glad_glSecondaryColor3fv = NULL;
+PFNGLSECONDARYCOLOR3IPROC glad_glSecondaryColor3i = NULL;
+PFNGLSECONDARYCOLOR3IVPROC glad_glSecondaryColor3iv = NULL;
+PFNGLSECONDARYCOLOR3SPROC glad_glSecondaryColor3s = NULL;
+PFNGLSECONDARYCOLOR3SVPROC glad_glSecondaryColor3sv = NULL;
+PFNGLSECONDARYCOLOR3UBPROC glad_glSecondaryColor3ub = NULL;
+PFNGLSECONDARYCOLOR3UBVPROC glad_glSecondaryColor3ubv = NULL;
+PFNGLSECONDARYCOLOR3UIPROC glad_glSecondaryColor3ui = NULL;
+PFNGLSECONDARYCOLOR3UIVPROC glad_glSecondaryColor3uiv = NULL;
+PFNGLSECONDARYCOLOR3USPROC glad_glSecondaryColor3us = NULL;
+PFNGLSECONDARYCOLOR3USVPROC glad_glSecondaryColor3usv = NULL;
+PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui = NULL;
+PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv = NULL;
+PFNGLSECONDARYCOLORPOINTERPROC glad_glSecondaryColorPointer = NULL;
+PFNGLSELECTBUFFERPROC glad_glSelectBuffer = NULL;
+PFNGLSHADEMODELPROC glad_glShadeModel = NULL;
+PFNGLSHADERSOURCEPROC glad_glShaderSource = NULL;
+PFNGLSTENCILFUNCPROC glad_glStencilFunc = NULL;
+PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate = NULL;
+PFNGLSTENCILMASKPROC glad_glStencilMask = NULL;
+PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate = NULL;
+PFNGLSTENCILOPPROC glad_glStencilOp = NULL;
+PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate = NULL;
+PFNGLTEXBUFFERPROC glad_glTexBuffer = NULL;
+PFNGLTEXCOORD1DPROC glad_glTexCoord1d = NULL;
+PFNGLTEXCOORD1DVPROC glad_glTexCoord1dv = NULL;
+PFNGLTEXCOORD1FPROC glad_glTexCoord1f = NULL;
+PFNGLTEXCOORD1FVPROC glad_glTexCoord1fv = NULL;
+PFNGLTEXCOORD1IPROC glad_glTexCoord1i = NULL;
+PFNGLTEXCOORD1IVPROC glad_glTexCoord1iv = NULL;
+PFNGLTEXCOORD1SPROC glad_glTexCoord1s = NULL;
+PFNGLTEXCOORD1SVPROC glad_glTexCoord1sv = NULL;
+PFNGLTEXCOORD2DPROC glad_glTexCoord2d = NULL;
+PFNGLTEXCOORD2DVPROC glad_glTexCoord2dv = NULL;
+PFNGLTEXCOORD2FPROC glad_glTexCoord2f = NULL;
+PFNGLTEXCOORD2FVPROC glad_glTexCoord2fv = NULL;
+PFNGLTEXCOORD2IPROC glad_glTexCoord2i = NULL;
+PFNGLTEXCOORD2IVPROC glad_glTexCoord2iv = NULL;
+PFNGLTEXCOORD2SPROC glad_glTexCoord2s = NULL;
+PFNGLTEXCOORD2SVPROC glad_glTexCoord2sv = NULL;
+PFNGLTEXCOORD3DPROC glad_glTexCoord3d = NULL;
+PFNGLTEXCOORD3DVPROC glad_glTexCoord3dv = NULL;
+PFNGLTEXCOORD3FPROC glad_glTexCoord3f = NULL;
+PFNGLTEXCOORD3FVPROC glad_glTexCoord3fv = NULL;
+PFNGLTEXCOORD3IPROC glad_glTexCoord3i = NULL;
+PFNGLTEXCOORD3IVPROC glad_glTexCoord3iv = NULL;
+PFNGLTEXCOORD3SPROC glad_glTexCoord3s = NULL;
+PFNGLTEXCOORD3SVPROC glad_glTexCoord3sv = NULL;
+PFNGLTEXCOORD4DPROC glad_glTexCoord4d = NULL;
+PFNGLTEXCOORD4DVPROC glad_glTexCoord4dv = NULL;
+PFNGLTEXCOORD4FPROC glad_glTexCoord4f = NULL;
+PFNGLTEXCOORD4FVPROC glad_glTexCoord4fv = NULL;
+PFNGLTEXCOORD4IPROC glad_glTexCoord4i = NULL;
+PFNGLTEXCOORD4IVPROC glad_glTexCoord4iv = NULL;
+PFNGLTEXCOORD4SPROC glad_glTexCoord4s = NULL;
+PFNGLTEXCOORD4SVPROC glad_glTexCoord4sv = NULL;
+PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui = NULL;
+PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv = NULL;
+PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui = NULL;
+PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv = NULL;
+PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui = NULL;
+PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv = NULL;
+PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui = NULL;
+PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv = NULL;
+PFNGLTEXCOORDPOINTERPROC glad_glTexCoordPointer = NULL;
+PFNGLTEXENVFPROC glad_glTexEnvf = NULL;
+PFNGLTEXENVFVPROC glad_glTexEnvfv = NULL;
+PFNGLTEXENVIPROC glad_glTexEnvi = NULL;
+PFNGLTEXENVIVPROC glad_glTexEnviv = NULL;
+PFNGLTEXGENDPROC glad_glTexGend = NULL;
+PFNGLTEXGENDVPROC glad_glTexGendv = NULL;
+PFNGLTEXGENFPROC glad_glTexGenf = NULL;
+PFNGLTEXGENFVPROC glad_glTexGenfv = NULL;
+PFNGLTEXGENIPROC glad_glTexGeni = NULL;
+PFNGLTEXGENIVPROC glad_glTexGeniv = NULL;
+PFNGLTEXIMAGE1DPROC glad_glTexImage1D = NULL;
+PFNGLTEXIMAGE2DPROC glad_glTexImage2D = NULL;
+PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample = NULL;
+PFNGLTEXIMAGE3DPROC glad_glTexImage3D = NULL;
+PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample = NULL;
+PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv = NULL;
+PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv = NULL;
+PFNGLTEXPARAMETERFPROC glad_glTexParameterf = NULL;
+PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv = NULL;
+PFNGLTEXPARAMETERIPROC glad_glTexParameteri = NULL;
+PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv = NULL;
+PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D = NULL;
+PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D = NULL;
+PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D = NULL;
+PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings = NULL;
+PFNGLTRANSLATEDPROC glad_glTranslated = NULL;
+PFNGLTRANSLATEFPROC glad_glTranslatef = NULL;
+PFNGLUNIFORM1FPROC glad_glUniform1f = NULL;
+PFNGLUNIFORM1FVPROC glad_glUniform1fv = NULL;
+PFNGLUNIFORM1IPROC glad_glUniform1i = NULL;
+PFNGLUNIFORM1IVPROC glad_glUniform1iv = NULL;
+PFNGLUNIFORM1UIPROC glad_glUniform1ui = NULL;
+PFNGLUNIFORM1UIVPROC glad_glUniform1uiv = NULL;
+PFNGLUNIFORM2FPROC glad_glUniform2f = NULL;
+PFNGLUNIFORM2FVPROC glad_glUniform2fv = NULL;
+PFNGLUNIFORM2IPROC glad_glUniform2i = NULL;
+PFNGLUNIFORM2IVPROC glad_glUniform2iv = NULL;
+PFNGLUNIFORM2UIPROC glad_glUniform2ui = NULL;
+PFNGLUNIFORM2UIVPROC glad_glUniform2uiv = NULL;
+PFNGLUNIFORM3FPROC glad_glUniform3f = NULL;
+PFNGLUNIFORM3FVPROC glad_glUniform3fv = NULL;
+PFNGLUNIFORM3IPROC glad_glUniform3i = NULL;
+PFNGLUNIFORM3IVPROC glad_glUniform3iv = NULL;
+PFNGLUNIFORM3UIPROC glad_glUniform3ui = NULL;
+PFNGLUNIFORM3UIVPROC glad_glUniform3uiv = NULL;
+PFNGLUNIFORM4FPROC glad_glUniform4f = NULL;
+PFNGLUNIFORM4FVPROC glad_glUniform4fv = NULL;
+PFNGLUNIFORM4IPROC glad_glUniform4i = NULL;
+PFNGLUNIFORM4IVPROC glad_glUniform4iv = NULL;
+PFNGLUNIFORM4UIPROC glad_glUniform4ui = NULL;
+PFNGLUNIFORM4UIVPROC glad_glUniform4uiv = NULL;
+PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding = NULL;
+PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv = NULL;
+PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv = NULL;
+PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv = NULL;
+PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv = NULL;
+PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv = NULL;
+PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv = NULL;
+PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv = NULL;
+PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv = NULL;
+PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv = NULL;
+PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer = NULL;
+PFNGLUSEPROGRAMPROC glad_glUseProgram = NULL;
+PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram = NULL;
+PFNGLVERTEX2DPROC glad_glVertex2d = NULL;
+PFNGLVERTEX2DVPROC glad_glVertex2dv = NULL;
+PFNGLVERTEX2FPROC glad_glVertex2f = NULL;
+PFNGLVERTEX2FVPROC glad_glVertex2fv = NULL;
+PFNGLVERTEX2IPROC glad_glVertex2i = NULL;
+PFNGLVERTEX2IVPROC glad_glVertex2iv = NULL;
+PFNGLVERTEX2SPROC glad_glVertex2s = NULL;
+PFNGLVERTEX2SVPROC glad_glVertex2sv = NULL;
+PFNGLVERTEX3DPROC glad_glVertex3d = NULL;
+PFNGLVERTEX3DVPROC glad_glVertex3dv = NULL;
+PFNGLVERTEX3FPROC glad_glVertex3f = NULL;
+PFNGLVERTEX3FVPROC glad_glVertex3fv = NULL;
+PFNGLVERTEX3IPROC glad_glVertex3i = NULL;
+PFNGLVERTEX3IVPROC glad_glVertex3iv = NULL;
+PFNGLVERTEX3SPROC glad_glVertex3s = NULL;
+PFNGLVERTEX3SVPROC glad_glVertex3sv = NULL;
+PFNGLVERTEX4DPROC glad_glVertex4d = NULL;
+PFNGLVERTEX4DVPROC glad_glVertex4dv = NULL;
+PFNGLVERTEX4FPROC glad_glVertex4f = NULL;
+PFNGLVERTEX4FVPROC glad_glVertex4fv = NULL;
+PFNGLVERTEX4IPROC glad_glVertex4i = NULL;
+PFNGLVERTEX4IVPROC glad_glVertex4iv = NULL;
+PFNGLVERTEX4SPROC glad_glVertex4s = NULL;
+PFNGLVERTEX4SVPROC glad_glVertex4sv = NULL;
+PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d = NULL;
+PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv = NULL;
+PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f = NULL;
+PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv = NULL;
+PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s = NULL;
+PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv = NULL;
+PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d = NULL;
+PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv = NULL;
+PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f = NULL;
+PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv = NULL;
+PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s = NULL;
+PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv = NULL;
+PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d = NULL;
+PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv = NULL;
+PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f = NULL;
+PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv = NULL;
+PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s = NULL;
+PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv = NULL;
+PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv = NULL;
+PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv = NULL;
+PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv = NULL;
+PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub = NULL;
+PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv = NULL;
+PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv = NULL;
+PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv = NULL;
+PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv = NULL;
+PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d = NULL;
+PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv = NULL;
+PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f = NULL;
+PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv = NULL;
+PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv = NULL;
+PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s = NULL;
+PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv = NULL;
+PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv = NULL;
+PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv = NULL;
+PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv = NULL;
+PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor = NULL;
+PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i = NULL;
+PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv = NULL;
+PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui = NULL;
+PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv = NULL;
+PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i = NULL;
+PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv = NULL;
+PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui = NULL;
+PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv = NULL;
+PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i = NULL;
+PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv = NULL;
+PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui = NULL;
+PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv = NULL;
+PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv = NULL;
+PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i = NULL;
+PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv = NULL;
+PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv = NULL;
+PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv = NULL;
+PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui = NULL;
+PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv = NULL;
+PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv = NULL;
+PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer = NULL;
+PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui = NULL;
+PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv = NULL;
+PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui = NULL;
+PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv = NULL;
+PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui = NULL;
+PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv = NULL;
+PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui = NULL;
+PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv = NULL;
+PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer = NULL;
+PFNGLVERTEXP2UIPROC glad_glVertexP2ui = NULL;
+PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv = NULL;
+PFNGLVERTEXP3UIPROC glad_glVertexP3ui = NULL;
+PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv = NULL;
+PFNGLVERTEXP4UIPROC glad_glVertexP4ui = NULL;
+PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv = NULL;
+PFNGLVERTEXPOINTERPROC glad_glVertexPointer = NULL;
+PFNGLVIEWPORTPROC glad_glViewport = NULL;
+PFNGLWAITSYNCPROC glad_glWaitSync = NULL;
+PFNGLWINDOWPOS2DPROC glad_glWindowPos2d = NULL;
+PFNGLWINDOWPOS2DVPROC glad_glWindowPos2dv = NULL;
+PFNGLWINDOWPOS2FPROC glad_glWindowPos2f = NULL;
+PFNGLWINDOWPOS2FVPROC glad_glWindowPos2fv = NULL;
+PFNGLWINDOWPOS2IPROC glad_glWindowPos2i = NULL;
+PFNGLWINDOWPOS2IVPROC glad_glWindowPos2iv = NULL;
+PFNGLWINDOWPOS2SPROC glad_glWindowPos2s = NULL;
+PFNGLWINDOWPOS2SVPROC glad_glWindowPos2sv = NULL;
+PFNGLWINDOWPOS3DPROC glad_glWindowPos3d = NULL;
+PFNGLWINDOWPOS3DVPROC glad_glWindowPos3dv = NULL;
+PFNGLWINDOWPOS3FPROC glad_glWindowPos3f = NULL;
+PFNGLWINDOWPOS3FVPROC glad_glWindowPos3fv = NULL;
+PFNGLWINDOWPOS3IPROC glad_glWindowPos3i = NULL;
+PFNGLWINDOWPOS3IVPROC glad_glWindowPos3iv = NULL;
+PFNGLWINDOWPOS3SPROC glad_glWindowPos3s = NULL;
+PFNGLWINDOWPOS3SVPROC glad_glWindowPos3sv = NULL;
+
+
+static void glad_gl_load_GL_VERSION_1_0( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_0) return;
+ glad_glAccum = (PFNGLACCUMPROC) load(userptr, "glAccum");
+ glad_glAlphaFunc = (PFNGLALPHAFUNCPROC) load(userptr, "glAlphaFunc");
+ glad_glBegin = (PFNGLBEGINPROC) load(userptr, "glBegin");
+ glad_glBitmap = (PFNGLBITMAPPROC) load(userptr, "glBitmap");
+ glad_glBlendFunc = (PFNGLBLENDFUNCPROC) load(userptr, "glBlendFunc");
+ glad_glCallList = (PFNGLCALLLISTPROC) load(userptr, "glCallList");
+ glad_glCallLists = (PFNGLCALLLISTSPROC) load(userptr, "glCallLists");
+ glad_glClear = (PFNGLCLEARPROC) load(userptr, "glClear");
+ glad_glClearAccum = (PFNGLCLEARACCUMPROC) load(userptr, "glClearAccum");
+ glad_glClearColor = (PFNGLCLEARCOLORPROC) load(userptr, "glClearColor");
+ glad_glClearDepth = (PFNGLCLEARDEPTHPROC) load(userptr, "glClearDepth");
+ glad_glClearIndex = (PFNGLCLEARINDEXPROC) load(userptr, "glClearIndex");
+ glad_glClearStencil = (PFNGLCLEARSTENCILPROC) load(userptr, "glClearStencil");
+ glad_glClipPlane = (PFNGLCLIPPLANEPROC) load(userptr, "glClipPlane");
+ glad_glColor3b = (PFNGLCOLOR3BPROC) load(userptr, "glColor3b");
+ glad_glColor3bv = (PFNGLCOLOR3BVPROC) load(userptr, "glColor3bv");
+ glad_glColor3d = (PFNGLCOLOR3DPROC) load(userptr, "glColor3d");
+ glad_glColor3dv = (PFNGLCOLOR3DVPROC) load(userptr, "glColor3dv");
+ glad_glColor3f = (PFNGLCOLOR3FPROC) load(userptr, "glColor3f");
+ glad_glColor3fv = (PFNGLCOLOR3FVPROC) load(userptr, "glColor3fv");
+ glad_glColor3i = (PFNGLCOLOR3IPROC) load(userptr, "glColor3i");
+ glad_glColor3iv = (PFNGLCOLOR3IVPROC) load(userptr, "glColor3iv");
+ glad_glColor3s = (PFNGLCOLOR3SPROC) load(userptr, "glColor3s");
+ glad_glColor3sv = (PFNGLCOLOR3SVPROC) load(userptr, "glColor3sv");
+ glad_glColor3ub = (PFNGLCOLOR3UBPROC) load(userptr, "glColor3ub");
+ glad_glColor3ubv = (PFNGLCOLOR3UBVPROC) load(userptr, "glColor3ubv");
+ glad_glColor3ui = (PFNGLCOLOR3UIPROC) load(userptr, "glColor3ui");
+ glad_glColor3uiv = (PFNGLCOLOR3UIVPROC) load(userptr, "glColor3uiv");
+ glad_glColor3us = (PFNGLCOLOR3USPROC) load(userptr, "glColor3us");
+ glad_glColor3usv = (PFNGLCOLOR3USVPROC) load(userptr, "glColor3usv");
+ glad_glColor4b = (PFNGLCOLOR4BPROC) load(userptr, "glColor4b");
+ glad_glColor4bv = (PFNGLCOLOR4BVPROC) load(userptr, "glColor4bv");
+ glad_glColor4d = (PFNGLCOLOR4DPROC) load(userptr, "glColor4d");
+ glad_glColor4dv = (PFNGLCOLOR4DVPROC) load(userptr, "glColor4dv");
+ glad_glColor4f = (PFNGLCOLOR4FPROC) load(userptr, "glColor4f");
+ glad_glColor4fv = (PFNGLCOLOR4FVPROC) load(userptr, "glColor4fv");
+ glad_glColor4i = (PFNGLCOLOR4IPROC) load(userptr, "glColor4i");
+ glad_glColor4iv = (PFNGLCOLOR4IVPROC) load(userptr, "glColor4iv");
+ glad_glColor4s = (PFNGLCOLOR4SPROC) load(userptr, "glColor4s");
+ glad_glColor4sv = (PFNGLCOLOR4SVPROC) load(userptr, "glColor4sv");
+ glad_glColor4ub = (PFNGLCOLOR4UBPROC) load(userptr, "glColor4ub");
+ glad_glColor4ubv = (PFNGLCOLOR4UBVPROC) load(userptr, "glColor4ubv");
+ glad_glColor4ui = (PFNGLCOLOR4UIPROC) load(userptr, "glColor4ui");
+ glad_glColor4uiv = (PFNGLCOLOR4UIVPROC) load(userptr, "glColor4uiv");
+ glad_glColor4us = (PFNGLCOLOR4USPROC) load(userptr, "glColor4us");
+ glad_glColor4usv = (PFNGLCOLOR4USVPROC) load(userptr, "glColor4usv");
+ glad_glColorMask = (PFNGLCOLORMASKPROC) load(userptr, "glColorMask");
+ glad_glColorMaterial = (PFNGLCOLORMATERIALPROC) load(userptr, "glColorMaterial");
+ glad_glCopyPixels = (PFNGLCOPYPIXELSPROC) load(userptr, "glCopyPixels");
+ glad_glCullFace = (PFNGLCULLFACEPROC) load(userptr, "glCullFace");
+ glad_glDeleteLists = (PFNGLDELETELISTSPROC) load(userptr, "glDeleteLists");
+ glad_glDepthFunc = (PFNGLDEPTHFUNCPROC) load(userptr, "glDepthFunc");
+ glad_glDepthMask = (PFNGLDEPTHMASKPROC) load(userptr, "glDepthMask");
+ glad_glDepthRange = (PFNGLDEPTHRANGEPROC) load(userptr, "glDepthRange");
+ glad_glDisable = (PFNGLDISABLEPROC) load(userptr, "glDisable");
+ glad_glDrawBuffer = (PFNGLDRAWBUFFERPROC) load(userptr, "glDrawBuffer");
+ glad_glDrawPixels = (PFNGLDRAWPIXELSPROC) load(userptr, "glDrawPixels");
+ glad_glEdgeFlag = (PFNGLEDGEFLAGPROC) load(userptr, "glEdgeFlag");
+ glad_glEdgeFlagv = (PFNGLEDGEFLAGVPROC) load(userptr, "glEdgeFlagv");
+ glad_glEnable = (PFNGLENABLEPROC) load(userptr, "glEnable");
+ glad_glEnd = (PFNGLENDPROC) load(userptr, "glEnd");
+ glad_glEndList = (PFNGLENDLISTPROC) load(userptr, "glEndList");
+ glad_glEvalCoord1d = (PFNGLEVALCOORD1DPROC) load(userptr, "glEvalCoord1d");
+ glad_glEvalCoord1dv = (PFNGLEVALCOORD1DVPROC) load(userptr, "glEvalCoord1dv");
+ glad_glEvalCoord1f = (PFNGLEVALCOORD1FPROC) load(userptr, "glEvalCoord1f");
+ glad_glEvalCoord1fv = (PFNGLEVALCOORD1FVPROC) load(userptr, "glEvalCoord1fv");
+ glad_glEvalCoord2d = (PFNGLEVALCOORD2DPROC) load(userptr, "glEvalCoord2d");
+ glad_glEvalCoord2dv = (PFNGLEVALCOORD2DVPROC) load(userptr, "glEvalCoord2dv");
+ glad_glEvalCoord2f = (PFNGLEVALCOORD2FPROC) load(userptr, "glEvalCoord2f");
+ glad_glEvalCoord2fv = (PFNGLEVALCOORD2FVPROC) load(userptr, "glEvalCoord2fv");
+ glad_glEvalMesh1 = (PFNGLEVALMESH1PROC) load(userptr, "glEvalMesh1");
+ glad_glEvalMesh2 = (PFNGLEVALMESH2PROC) load(userptr, "glEvalMesh2");
+ glad_glEvalPoint1 = (PFNGLEVALPOINT1PROC) load(userptr, "glEvalPoint1");
+ glad_glEvalPoint2 = (PFNGLEVALPOINT2PROC) load(userptr, "glEvalPoint2");
+ glad_glFeedbackBuffer = (PFNGLFEEDBACKBUFFERPROC) load(userptr, "glFeedbackBuffer");
+ glad_glFinish = (PFNGLFINISHPROC) load(userptr, "glFinish");
+ glad_glFlush = (PFNGLFLUSHPROC) load(userptr, "glFlush");
+ glad_glFogf = (PFNGLFOGFPROC) load(userptr, "glFogf");
+ glad_glFogfv = (PFNGLFOGFVPROC) load(userptr, "glFogfv");
+ glad_glFogi = (PFNGLFOGIPROC) load(userptr, "glFogi");
+ glad_glFogiv = (PFNGLFOGIVPROC) load(userptr, "glFogiv");
+ glad_glFrontFace = (PFNGLFRONTFACEPROC) load(userptr, "glFrontFace");
+ glad_glFrustum = (PFNGLFRUSTUMPROC) load(userptr, "glFrustum");
+ glad_glGenLists = (PFNGLGENLISTSPROC) load(userptr, "glGenLists");
+ glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC) load(userptr, "glGetBooleanv");
+ glad_glGetClipPlane = (PFNGLGETCLIPPLANEPROC) load(userptr, "glGetClipPlane");
+ glad_glGetDoublev = (PFNGLGETDOUBLEVPROC) load(userptr, "glGetDoublev");
+ glad_glGetError = (PFNGLGETERRORPROC) load(userptr, "glGetError");
+ glad_glGetFloatv = (PFNGLGETFLOATVPROC) load(userptr, "glGetFloatv");
+ glad_glGetIntegerv = (PFNGLGETINTEGERVPROC) load(userptr, "glGetIntegerv");
+ glad_glGetLightfv = (PFNGLGETLIGHTFVPROC) load(userptr, "glGetLightfv");
+ glad_glGetLightiv = (PFNGLGETLIGHTIVPROC) load(userptr, "glGetLightiv");
+ glad_glGetMapdv = (PFNGLGETMAPDVPROC) load(userptr, "glGetMapdv");
+ glad_glGetMapfv = (PFNGLGETMAPFVPROC) load(userptr, "glGetMapfv");
+ glad_glGetMapiv = (PFNGLGETMAPIVPROC) load(userptr, "glGetMapiv");
+ glad_glGetMaterialfv = (PFNGLGETMATERIALFVPROC) load(userptr, "glGetMaterialfv");
+ glad_glGetMaterialiv = (PFNGLGETMATERIALIVPROC) load(userptr, "glGetMaterialiv");
+ glad_glGetPixelMapfv = (PFNGLGETPIXELMAPFVPROC) load(userptr, "glGetPixelMapfv");
+ glad_glGetPixelMapuiv = (PFNGLGETPIXELMAPUIVPROC) load(userptr, "glGetPixelMapuiv");
+ glad_glGetPixelMapusv = (PFNGLGETPIXELMAPUSVPROC) load(userptr, "glGetPixelMapusv");
+ glad_glGetPolygonStipple = (PFNGLGETPOLYGONSTIPPLEPROC) load(userptr, "glGetPolygonStipple");
+ glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString");
+ glad_glGetTexEnvfv = (PFNGLGETTEXENVFVPROC) load(userptr, "glGetTexEnvfv");
+ glad_glGetTexEnviv = (PFNGLGETTEXENVIVPROC) load(userptr, "glGetTexEnviv");
+ glad_glGetTexGendv = (PFNGLGETTEXGENDVPROC) load(userptr, "glGetTexGendv");
+ glad_glGetTexGenfv = (PFNGLGETTEXGENFVPROC) load(userptr, "glGetTexGenfv");
+ glad_glGetTexGeniv = (PFNGLGETTEXGENIVPROC) load(userptr, "glGetTexGeniv");
+ glad_glGetTexImage = (PFNGLGETTEXIMAGEPROC) load(userptr, "glGetTexImage");
+ glad_glGetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC) load(userptr, "glGetTexLevelParameterfv");
+ glad_glGetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC) load(userptr, "glGetTexLevelParameteriv");
+ glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC) load(userptr, "glGetTexParameterfv");
+ glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC) load(userptr, "glGetTexParameteriv");
+ glad_glHint = (PFNGLHINTPROC) load(userptr, "glHint");
+ glad_glIndexMask = (PFNGLINDEXMASKPROC) load(userptr, "glIndexMask");
+ glad_glIndexd = (PFNGLINDEXDPROC) load(userptr, "glIndexd");
+ glad_glIndexdv = (PFNGLINDEXDVPROC) load(userptr, "glIndexdv");
+ glad_glIndexf = (PFNGLINDEXFPROC) load(userptr, "glIndexf");
+ glad_glIndexfv = (PFNGLINDEXFVPROC) load(userptr, "glIndexfv");
+ glad_glIndexi = (PFNGLINDEXIPROC) load(userptr, "glIndexi");
+ glad_glIndexiv = (PFNGLINDEXIVPROC) load(userptr, "glIndexiv");
+ glad_glIndexs = (PFNGLINDEXSPROC) load(userptr, "glIndexs");
+ glad_glIndexsv = (PFNGLINDEXSVPROC) load(userptr, "glIndexsv");
+ glad_glInitNames = (PFNGLINITNAMESPROC) load(userptr, "glInitNames");
+ glad_glIsEnabled = (PFNGLISENABLEDPROC) load(userptr, "glIsEnabled");
+ glad_glIsList = (PFNGLISLISTPROC) load(userptr, "glIsList");
+ glad_glLightModelf = (PFNGLLIGHTMODELFPROC) load(userptr, "glLightModelf");
+ glad_glLightModelfv = (PFNGLLIGHTMODELFVPROC) load(userptr, "glLightModelfv");
+ glad_glLightModeli = (PFNGLLIGHTMODELIPROC) load(userptr, "glLightModeli");
+ glad_glLightModeliv = (PFNGLLIGHTMODELIVPROC) load(userptr, "glLightModeliv");
+ glad_glLightf = (PFNGLLIGHTFPROC) load(userptr, "glLightf");
+ glad_glLightfv = (PFNGLLIGHTFVPROC) load(userptr, "glLightfv");
+ glad_glLighti = (PFNGLLIGHTIPROC) load(userptr, "glLighti");
+ glad_glLightiv = (PFNGLLIGHTIVPROC) load(userptr, "glLightiv");
+ glad_glLineStipple = (PFNGLLINESTIPPLEPROC) load(userptr, "glLineStipple");
+ glad_glLineWidth = (PFNGLLINEWIDTHPROC) load(userptr, "glLineWidth");
+ glad_glListBase = (PFNGLLISTBASEPROC) load(userptr, "glListBase");
+ glad_glLoadIdentity = (PFNGLLOADIDENTITYPROC) load(userptr, "glLoadIdentity");
+ glad_glLoadMatrixd = (PFNGLLOADMATRIXDPROC) load(userptr, "glLoadMatrixd");
+ glad_glLoadMatrixf = (PFNGLLOADMATRIXFPROC) load(userptr, "glLoadMatrixf");
+ glad_glLoadName = (PFNGLLOADNAMEPROC) load(userptr, "glLoadName");
+ glad_glLogicOp = (PFNGLLOGICOPPROC) load(userptr, "glLogicOp");
+ glad_glMap1d = (PFNGLMAP1DPROC) load(userptr, "glMap1d");
+ glad_glMap1f = (PFNGLMAP1FPROC) load(userptr, "glMap1f");
+ glad_glMap2d = (PFNGLMAP2DPROC) load(userptr, "glMap2d");
+ glad_glMap2f = (PFNGLMAP2FPROC) load(userptr, "glMap2f");
+ glad_glMapGrid1d = (PFNGLMAPGRID1DPROC) load(userptr, "glMapGrid1d");
+ glad_glMapGrid1f = (PFNGLMAPGRID1FPROC) load(userptr, "glMapGrid1f");
+ glad_glMapGrid2d = (PFNGLMAPGRID2DPROC) load(userptr, "glMapGrid2d");
+ glad_glMapGrid2f = (PFNGLMAPGRID2FPROC) load(userptr, "glMapGrid2f");
+ glad_glMaterialf = (PFNGLMATERIALFPROC) load(userptr, "glMaterialf");
+ glad_glMaterialfv = (PFNGLMATERIALFVPROC) load(userptr, "glMaterialfv");
+ glad_glMateriali = (PFNGLMATERIALIPROC) load(userptr, "glMateriali");
+ glad_glMaterialiv = (PFNGLMATERIALIVPROC) load(userptr, "glMaterialiv");
+ glad_glMatrixMode = (PFNGLMATRIXMODEPROC) load(userptr, "glMatrixMode");
+ glad_glMultMatrixd = (PFNGLMULTMATRIXDPROC) load(userptr, "glMultMatrixd");
+ glad_glMultMatrixf = (PFNGLMULTMATRIXFPROC) load(userptr, "glMultMatrixf");
+ glad_glNewList = (PFNGLNEWLISTPROC) load(userptr, "glNewList");
+ glad_glNormal3b = (PFNGLNORMAL3BPROC) load(userptr, "glNormal3b");
+ glad_glNormal3bv = (PFNGLNORMAL3BVPROC) load(userptr, "glNormal3bv");
+ glad_glNormal3d = (PFNGLNORMAL3DPROC) load(userptr, "glNormal3d");
+ glad_glNormal3dv = (PFNGLNORMAL3DVPROC) load(userptr, "glNormal3dv");
+ glad_glNormal3f = (PFNGLNORMAL3FPROC) load(userptr, "glNormal3f");
+ glad_glNormal3fv = (PFNGLNORMAL3FVPROC) load(userptr, "glNormal3fv");
+ glad_glNormal3i = (PFNGLNORMAL3IPROC) load(userptr, "glNormal3i");
+ glad_glNormal3iv = (PFNGLNORMAL3IVPROC) load(userptr, "glNormal3iv");
+ glad_glNormal3s = (PFNGLNORMAL3SPROC) load(userptr, "glNormal3s");
+ glad_glNormal3sv = (PFNGLNORMAL3SVPROC) load(userptr, "glNormal3sv");
+ glad_glOrtho = (PFNGLORTHOPROC) load(userptr, "glOrtho");
+ glad_glPassThrough = (PFNGLPASSTHROUGHPROC) load(userptr, "glPassThrough");
+ glad_glPixelMapfv = (PFNGLPIXELMAPFVPROC) load(userptr, "glPixelMapfv");
+ glad_glPixelMapuiv = (PFNGLPIXELMAPUIVPROC) load(userptr, "glPixelMapuiv");
+ glad_glPixelMapusv = (PFNGLPIXELMAPUSVPROC) load(userptr, "glPixelMapusv");
+ glad_glPixelStoref = (PFNGLPIXELSTOREFPROC) load(userptr, "glPixelStoref");
+ glad_glPixelStorei = (PFNGLPIXELSTOREIPROC) load(userptr, "glPixelStorei");
+ glad_glPixelTransferf = (PFNGLPIXELTRANSFERFPROC) load(userptr, "glPixelTransferf");
+ glad_glPixelTransferi = (PFNGLPIXELTRANSFERIPROC) load(userptr, "glPixelTransferi");
+ glad_glPixelZoom = (PFNGLPIXELZOOMPROC) load(userptr, "glPixelZoom");
+ glad_glPointSize = (PFNGLPOINTSIZEPROC) load(userptr, "glPointSize");
+ glad_glPolygonMode = (PFNGLPOLYGONMODEPROC) load(userptr, "glPolygonMode");
+ glad_glPolygonStipple = (PFNGLPOLYGONSTIPPLEPROC) load(userptr, "glPolygonStipple");
+ glad_glPopAttrib = (PFNGLPOPATTRIBPROC) load(userptr, "glPopAttrib");
+ glad_glPopMatrix = (PFNGLPOPMATRIXPROC) load(userptr, "glPopMatrix");
+ glad_glPopName = (PFNGLPOPNAMEPROC) load(userptr, "glPopName");
+ glad_glPushAttrib = (PFNGLPUSHATTRIBPROC) load(userptr, "glPushAttrib");
+ glad_glPushMatrix = (PFNGLPUSHMATRIXPROC) load(userptr, "glPushMatrix");
+ glad_glPushName = (PFNGLPUSHNAMEPROC) load(userptr, "glPushName");
+ glad_glRasterPos2d = (PFNGLRASTERPOS2DPROC) load(userptr, "glRasterPos2d");
+ glad_glRasterPos2dv = (PFNGLRASTERPOS2DVPROC) load(userptr, "glRasterPos2dv");
+ glad_glRasterPos2f = (PFNGLRASTERPOS2FPROC) load(userptr, "glRasterPos2f");
+ glad_glRasterPos2fv = (PFNGLRASTERPOS2FVPROC) load(userptr, "glRasterPos2fv");
+ glad_glRasterPos2i = (PFNGLRASTERPOS2IPROC) load(userptr, "glRasterPos2i");
+ glad_glRasterPos2iv = (PFNGLRASTERPOS2IVPROC) load(userptr, "glRasterPos2iv");
+ glad_glRasterPos2s = (PFNGLRASTERPOS2SPROC) load(userptr, "glRasterPos2s");
+ glad_glRasterPos2sv = (PFNGLRASTERPOS2SVPROC) load(userptr, "glRasterPos2sv");
+ glad_glRasterPos3d = (PFNGLRASTERPOS3DPROC) load(userptr, "glRasterPos3d");
+ glad_glRasterPos3dv = (PFNGLRASTERPOS3DVPROC) load(userptr, "glRasterPos3dv");
+ glad_glRasterPos3f = (PFNGLRASTERPOS3FPROC) load(userptr, "glRasterPos3f");
+ glad_glRasterPos3fv = (PFNGLRASTERPOS3FVPROC) load(userptr, "glRasterPos3fv");
+ glad_glRasterPos3i = (PFNGLRASTERPOS3IPROC) load(userptr, "glRasterPos3i");
+ glad_glRasterPos3iv = (PFNGLRASTERPOS3IVPROC) load(userptr, "glRasterPos3iv");
+ glad_glRasterPos3s = (PFNGLRASTERPOS3SPROC) load(userptr, "glRasterPos3s");
+ glad_glRasterPos3sv = (PFNGLRASTERPOS3SVPROC) load(userptr, "glRasterPos3sv");
+ glad_glRasterPos4d = (PFNGLRASTERPOS4DPROC) load(userptr, "glRasterPos4d");
+ glad_glRasterPos4dv = (PFNGLRASTERPOS4DVPROC) load(userptr, "glRasterPos4dv");
+ glad_glRasterPos4f = (PFNGLRASTERPOS4FPROC) load(userptr, "glRasterPos4f");
+ glad_glRasterPos4fv = (PFNGLRASTERPOS4FVPROC) load(userptr, "glRasterPos4fv");
+ glad_glRasterPos4i = (PFNGLRASTERPOS4IPROC) load(userptr, "glRasterPos4i");
+ glad_glRasterPos4iv = (PFNGLRASTERPOS4IVPROC) load(userptr, "glRasterPos4iv");
+ glad_glRasterPos4s = (PFNGLRASTERPOS4SPROC) load(userptr, "glRasterPos4s");
+ glad_glRasterPos4sv = (PFNGLRASTERPOS4SVPROC) load(userptr, "glRasterPos4sv");
+ glad_glReadBuffer = (PFNGLREADBUFFERPROC) load(userptr, "glReadBuffer");
+ glad_glReadPixels = (PFNGLREADPIXELSPROC) load(userptr, "glReadPixels");
+ glad_glRectd = (PFNGLRECTDPROC) load(userptr, "glRectd");
+ glad_glRectdv = (PFNGLRECTDVPROC) load(userptr, "glRectdv");
+ glad_glRectf = (PFNGLRECTFPROC) load(userptr, "glRectf");
+ glad_glRectfv = (PFNGLRECTFVPROC) load(userptr, "glRectfv");
+ glad_glRecti = (PFNGLRECTIPROC) load(userptr, "glRecti");
+ glad_glRectiv = (PFNGLRECTIVPROC) load(userptr, "glRectiv");
+ glad_glRects = (PFNGLRECTSPROC) load(userptr, "glRects");
+ glad_glRectsv = (PFNGLRECTSVPROC) load(userptr, "glRectsv");
+ glad_glRenderMode = (PFNGLRENDERMODEPROC) load(userptr, "glRenderMode");
+ glad_glRotated = (PFNGLROTATEDPROC) load(userptr, "glRotated");
+ glad_glRotatef = (PFNGLROTATEFPROC) load(userptr, "glRotatef");
+ glad_glScaled = (PFNGLSCALEDPROC) load(userptr, "glScaled");
+ glad_glScalef = (PFNGLSCALEFPROC) load(userptr, "glScalef");
+ glad_glScissor = (PFNGLSCISSORPROC) load(userptr, "glScissor");
+ glad_glSelectBuffer = (PFNGLSELECTBUFFERPROC) load(userptr, "glSelectBuffer");
+ glad_glShadeModel = (PFNGLSHADEMODELPROC) load(userptr, "glShadeModel");
+ glad_glStencilFunc = (PFNGLSTENCILFUNCPROC) load(userptr, "glStencilFunc");
+ glad_glStencilMask = (PFNGLSTENCILMASKPROC) load(userptr, "glStencilMask");
+ glad_glStencilOp = (PFNGLSTENCILOPPROC) load(userptr, "glStencilOp");
+ glad_glTexCoord1d = (PFNGLTEXCOORD1DPROC) load(userptr, "glTexCoord1d");
+ glad_glTexCoord1dv = (PFNGLTEXCOORD1DVPROC) load(userptr, "glTexCoord1dv");
+ glad_glTexCoord1f = (PFNGLTEXCOORD1FPROC) load(userptr, "glTexCoord1f");
+ glad_glTexCoord1fv = (PFNGLTEXCOORD1FVPROC) load(userptr, "glTexCoord1fv");
+ glad_glTexCoord1i = (PFNGLTEXCOORD1IPROC) load(userptr, "glTexCoord1i");
+ glad_glTexCoord1iv = (PFNGLTEXCOORD1IVPROC) load(userptr, "glTexCoord1iv");
+ glad_glTexCoord1s = (PFNGLTEXCOORD1SPROC) load(userptr, "glTexCoord1s");
+ glad_glTexCoord1sv = (PFNGLTEXCOORD1SVPROC) load(userptr, "glTexCoord1sv");
+ glad_glTexCoord2d = (PFNGLTEXCOORD2DPROC) load(userptr, "glTexCoord2d");
+ glad_glTexCoord2dv = (PFNGLTEXCOORD2DVPROC) load(userptr, "glTexCoord2dv");
+ glad_glTexCoord2f = (PFNGLTEXCOORD2FPROC) load(userptr, "glTexCoord2f");
+ glad_glTexCoord2fv = (PFNGLTEXCOORD2FVPROC) load(userptr, "glTexCoord2fv");
+ glad_glTexCoord2i = (PFNGLTEXCOORD2IPROC) load(userptr, "glTexCoord2i");
+ glad_glTexCoord2iv = (PFNGLTEXCOORD2IVPROC) load(userptr, "glTexCoord2iv");
+ glad_glTexCoord2s = (PFNGLTEXCOORD2SPROC) load(userptr, "glTexCoord2s");
+ glad_glTexCoord2sv = (PFNGLTEXCOORD2SVPROC) load(userptr, "glTexCoord2sv");
+ glad_glTexCoord3d = (PFNGLTEXCOORD3DPROC) load(userptr, "glTexCoord3d");
+ glad_glTexCoord3dv = (PFNGLTEXCOORD3DVPROC) load(userptr, "glTexCoord3dv");
+ glad_glTexCoord3f = (PFNGLTEXCOORD3FPROC) load(userptr, "glTexCoord3f");
+ glad_glTexCoord3fv = (PFNGLTEXCOORD3FVPROC) load(userptr, "glTexCoord3fv");
+ glad_glTexCoord3i = (PFNGLTEXCOORD3IPROC) load(userptr, "glTexCoord3i");
+ glad_glTexCoord3iv = (PFNGLTEXCOORD3IVPROC) load(userptr, "glTexCoord3iv");
+ glad_glTexCoord3s = (PFNGLTEXCOORD3SPROC) load(userptr, "glTexCoord3s");
+ glad_glTexCoord3sv = (PFNGLTEXCOORD3SVPROC) load(userptr, "glTexCoord3sv");
+ glad_glTexCoord4d = (PFNGLTEXCOORD4DPROC) load(userptr, "glTexCoord4d");
+ glad_glTexCoord4dv = (PFNGLTEXCOORD4DVPROC) load(userptr, "glTexCoord4dv");
+ glad_glTexCoord4f = (PFNGLTEXCOORD4FPROC) load(userptr, "glTexCoord4f");
+ glad_glTexCoord4fv = (PFNGLTEXCOORD4FVPROC) load(userptr, "glTexCoord4fv");
+ glad_glTexCoord4i = (PFNGLTEXCOORD4IPROC) load(userptr, "glTexCoord4i");
+ glad_glTexCoord4iv = (PFNGLTEXCOORD4IVPROC) load(userptr, "glTexCoord4iv");
+ glad_glTexCoord4s = (PFNGLTEXCOORD4SPROC) load(userptr, "glTexCoord4s");
+ glad_glTexCoord4sv = (PFNGLTEXCOORD4SVPROC) load(userptr, "glTexCoord4sv");
+ glad_glTexEnvf = (PFNGLTEXENVFPROC) load(userptr, "glTexEnvf");
+ glad_glTexEnvfv = (PFNGLTEXENVFVPROC) load(userptr, "glTexEnvfv");
+ glad_glTexEnvi = (PFNGLTEXENVIPROC) load(userptr, "glTexEnvi");
+ glad_glTexEnviv = (PFNGLTEXENVIVPROC) load(userptr, "glTexEnviv");
+ glad_glTexGend = (PFNGLTEXGENDPROC) load(userptr, "glTexGend");
+ glad_glTexGendv = (PFNGLTEXGENDVPROC) load(userptr, "glTexGendv");
+ glad_glTexGenf = (PFNGLTEXGENFPROC) load(userptr, "glTexGenf");
+ glad_glTexGenfv = (PFNGLTEXGENFVPROC) load(userptr, "glTexGenfv");
+ glad_glTexGeni = (PFNGLTEXGENIPROC) load(userptr, "glTexGeni");
+ glad_glTexGeniv = (PFNGLTEXGENIVPROC) load(userptr, "glTexGeniv");
+ glad_glTexImage1D = (PFNGLTEXIMAGE1DPROC) load(userptr, "glTexImage1D");
+ glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC) load(userptr, "glTexImage2D");
+ glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC) load(userptr, "glTexParameterf");
+ glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC) load(userptr, "glTexParameterfv");
+ glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC) load(userptr, "glTexParameteri");
+ glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC) load(userptr, "glTexParameteriv");
+ glad_glTranslated = (PFNGLTRANSLATEDPROC) load(userptr, "glTranslated");
+ glad_glTranslatef = (PFNGLTRANSLATEFPROC) load(userptr, "glTranslatef");
+ glad_glVertex2d = (PFNGLVERTEX2DPROC) load(userptr, "glVertex2d");
+ glad_glVertex2dv = (PFNGLVERTEX2DVPROC) load(userptr, "glVertex2dv");
+ glad_glVertex2f = (PFNGLVERTEX2FPROC) load(userptr, "glVertex2f");
+ glad_glVertex2fv = (PFNGLVERTEX2FVPROC) load(userptr, "glVertex2fv");
+ glad_glVertex2i = (PFNGLVERTEX2IPROC) load(userptr, "glVertex2i");
+ glad_glVertex2iv = (PFNGLVERTEX2IVPROC) load(userptr, "glVertex2iv");
+ glad_glVertex2s = (PFNGLVERTEX2SPROC) load(userptr, "glVertex2s");
+ glad_glVertex2sv = (PFNGLVERTEX2SVPROC) load(userptr, "glVertex2sv");
+ glad_glVertex3d = (PFNGLVERTEX3DPROC) load(userptr, "glVertex3d");
+ glad_glVertex3dv = (PFNGLVERTEX3DVPROC) load(userptr, "glVertex3dv");
+ glad_glVertex3f = (PFNGLVERTEX3FPROC) load(userptr, "glVertex3f");
+ glad_glVertex3fv = (PFNGLVERTEX3FVPROC) load(userptr, "glVertex3fv");
+ glad_glVertex3i = (PFNGLVERTEX3IPROC) load(userptr, "glVertex3i");
+ glad_glVertex3iv = (PFNGLVERTEX3IVPROC) load(userptr, "glVertex3iv");
+ glad_glVertex3s = (PFNGLVERTEX3SPROC) load(userptr, "glVertex3s");
+ glad_glVertex3sv = (PFNGLVERTEX3SVPROC) load(userptr, "glVertex3sv");
+ glad_glVertex4d = (PFNGLVERTEX4DPROC) load(userptr, "glVertex4d");
+ glad_glVertex4dv = (PFNGLVERTEX4DVPROC) load(userptr, "glVertex4dv");
+ glad_glVertex4f = (PFNGLVERTEX4FPROC) load(userptr, "glVertex4f");
+ glad_glVertex4fv = (PFNGLVERTEX4FVPROC) load(userptr, "glVertex4fv");
+ glad_glVertex4i = (PFNGLVERTEX4IPROC) load(userptr, "glVertex4i");
+ glad_glVertex4iv = (PFNGLVERTEX4IVPROC) load(userptr, "glVertex4iv");
+ glad_glVertex4s = (PFNGLVERTEX4SPROC) load(userptr, "glVertex4s");
+ glad_glVertex4sv = (PFNGLVERTEX4SVPROC) load(userptr, "glVertex4sv");
+ glad_glViewport = (PFNGLVIEWPORTPROC) load(userptr, "glViewport");
+}
+static void glad_gl_load_GL_VERSION_1_1( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_1) return;
+ glad_glAreTexturesResident = (PFNGLARETEXTURESRESIDENTPROC) load(userptr, "glAreTexturesResident");
+ glad_glArrayElement = (PFNGLARRAYELEMENTPROC) load(userptr, "glArrayElement");
+ glad_glBindTexture = (PFNGLBINDTEXTUREPROC) load(userptr, "glBindTexture");
+ glad_glColorPointer = (PFNGLCOLORPOINTERPROC) load(userptr, "glColorPointer");
+ glad_glCopyTexImage1D = (PFNGLCOPYTEXIMAGE1DPROC) load(userptr, "glCopyTexImage1D");
+ glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC) load(userptr, "glCopyTexImage2D");
+ glad_glCopyTexSubImage1D = (PFNGLCOPYTEXSUBIMAGE1DPROC) load(userptr, "glCopyTexSubImage1D");
+ glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC) load(userptr, "glCopyTexSubImage2D");
+ glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC) load(userptr, "glDeleteTextures");
+ glad_glDisableClientState = (PFNGLDISABLECLIENTSTATEPROC) load(userptr, "glDisableClientState");
+ glad_glDrawArrays = (PFNGLDRAWARRAYSPROC) load(userptr, "glDrawArrays");
+ glad_glDrawElements = (PFNGLDRAWELEMENTSPROC) load(userptr, "glDrawElements");
+ glad_glEdgeFlagPointer = (PFNGLEDGEFLAGPOINTERPROC) load(userptr, "glEdgeFlagPointer");
+ glad_glEnableClientState = (PFNGLENABLECLIENTSTATEPROC) load(userptr, "glEnableClientState");
+ glad_glGenTextures = (PFNGLGENTEXTURESPROC) load(userptr, "glGenTextures");
+ glad_glGetPointerv = (PFNGLGETPOINTERVPROC) load(userptr, "glGetPointerv");
+ glad_glIndexPointer = (PFNGLINDEXPOINTERPROC) load(userptr, "glIndexPointer");
+ glad_glIndexub = (PFNGLINDEXUBPROC) load(userptr, "glIndexub");
+ glad_glIndexubv = (PFNGLINDEXUBVPROC) load(userptr, "glIndexubv");
+ glad_glInterleavedArrays = (PFNGLINTERLEAVEDARRAYSPROC) load(userptr, "glInterleavedArrays");
+ glad_glIsTexture = (PFNGLISTEXTUREPROC) load(userptr, "glIsTexture");
+ glad_glNormalPointer = (PFNGLNORMALPOINTERPROC) load(userptr, "glNormalPointer");
+ glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC) load(userptr, "glPolygonOffset");
+ glad_glPopClientAttrib = (PFNGLPOPCLIENTATTRIBPROC) load(userptr, "glPopClientAttrib");
+ glad_glPrioritizeTextures = (PFNGLPRIORITIZETEXTURESPROC) load(userptr, "glPrioritizeTextures");
+ glad_glPushClientAttrib = (PFNGLPUSHCLIENTATTRIBPROC) load(userptr, "glPushClientAttrib");
+ glad_glTexCoordPointer = (PFNGLTEXCOORDPOINTERPROC) load(userptr, "glTexCoordPointer");
+ glad_glTexSubImage1D = (PFNGLTEXSUBIMAGE1DPROC) load(userptr, "glTexSubImage1D");
+ glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC) load(userptr, "glTexSubImage2D");
+ glad_glVertexPointer = (PFNGLVERTEXPOINTERPROC) load(userptr, "glVertexPointer");
+}
+static void glad_gl_load_GL_VERSION_1_2( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_2) return;
+ glad_glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC) load(userptr, "glCopyTexSubImage3D");
+ glad_glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC) load(userptr, "glDrawRangeElements");
+ glad_glTexImage3D = (PFNGLTEXIMAGE3DPROC) load(userptr, "glTexImage3D");
+ glad_glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC) load(userptr, "glTexSubImage3D");
+}
+static void glad_gl_load_GL_VERSION_1_3( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_3) return;
+ glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC) load(userptr, "glActiveTexture");
+ glad_glClientActiveTexture = (PFNGLCLIENTACTIVETEXTUREPROC) load(userptr, "glClientActiveTexture");
+ glad_glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC) load(userptr, "glCompressedTexImage1D");
+ glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) load(userptr, "glCompressedTexImage2D");
+ glad_glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC) load(userptr, "glCompressedTexImage3D");
+ glad_glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) load(userptr, "glCompressedTexSubImage1D");
+ glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) load(userptr, "glCompressedTexSubImage2D");
+ glad_glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) load(userptr, "glCompressedTexSubImage3D");
+ glad_glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC) load(userptr, "glGetCompressedTexImage");
+ glad_glLoadTransposeMatrixd = (PFNGLLOADTRANSPOSEMATRIXDPROC) load(userptr, "glLoadTransposeMatrixd");
+ glad_glLoadTransposeMatrixf = (PFNGLLOADTRANSPOSEMATRIXFPROC) load(userptr, "glLoadTransposeMatrixf");
+ glad_glMultTransposeMatrixd = (PFNGLMULTTRANSPOSEMATRIXDPROC) load(userptr, "glMultTransposeMatrixd");
+ glad_glMultTransposeMatrixf = (PFNGLMULTTRANSPOSEMATRIXFPROC) load(userptr, "glMultTransposeMatrixf");
+ glad_glMultiTexCoord1d = (PFNGLMULTITEXCOORD1DPROC) load(userptr, "glMultiTexCoord1d");
+ glad_glMultiTexCoord1dv = (PFNGLMULTITEXCOORD1DVPROC) load(userptr, "glMultiTexCoord1dv");
+ glad_glMultiTexCoord1f = (PFNGLMULTITEXCOORD1FPROC) load(userptr, "glMultiTexCoord1f");
+ glad_glMultiTexCoord1fv = (PFNGLMULTITEXCOORD1FVPROC) load(userptr, "glMultiTexCoord1fv");
+ glad_glMultiTexCoord1i = (PFNGLMULTITEXCOORD1IPROC) load(userptr, "glMultiTexCoord1i");
+ glad_glMultiTexCoord1iv = (PFNGLMULTITEXCOORD1IVPROC) load(userptr, "glMultiTexCoord1iv");
+ glad_glMultiTexCoord1s = (PFNGLMULTITEXCOORD1SPROC) load(userptr, "glMultiTexCoord1s");
+ glad_glMultiTexCoord1sv = (PFNGLMULTITEXCOORD1SVPROC) load(userptr, "glMultiTexCoord1sv");
+ glad_glMultiTexCoord2d = (PFNGLMULTITEXCOORD2DPROC) load(userptr, "glMultiTexCoord2d");
+ glad_glMultiTexCoord2dv = (PFNGLMULTITEXCOORD2DVPROC) load(userptr, "glMultiTexCoord2dv");
+ glad_glMultiTexCoord2f = (PFNGLMULTITEXCOORD2FPROC) load(userptr, "glMultiTexCoord2f");
+ glad_glMultiTexCoord2fv = (PFNGLMULTITEXCOORD2FVPROC) load(userptr, "glMultiTexCoord2fv");
+ glad_glMultiTexCoord2i = (PFNGLMULTITEXCOORD2IPROC) load(userptr, "glMultiTexCoord2i");
+ glad_glMultiTexCoord2iv = (PFNGLMULTITEXCOORD2IVPROC) load(userptr, "glMultiTexCoord2iv");
+ glad_glMultiTexCoord2s = (PFNGLMULTITEXCOORD2SPROC) load(userptr, "glMultiTexCoord2s");
+ glad_glMultiTexCoord2sv = (PFNGLMULTITEXCOORD2SVPROC) load(userptr, "glMultiTexCoord2sv");
+ glad_glMultiTexCoord3d = (PFNGLMULTITEXCOORD3DPROC) load(userptr, "glMultiTexCoord3d");
+ glad_glMultiTexCoord3dv = (PFNGLMULTITEXCOORD3DVPROC) load(userptr, "glMultiTexCoord3dv");
+ glad_glMultiTexCoord3f = (PFNGLMULTITEXCOORD3FPROC) load(userptr, "glMultiTexCoord3f");
+ glad_glMultiTexCoord3fv = (PFNGLMULTITEXCOORD3FVPROC) load(userptr, "glMultiTexCoord3fv");
+ glad_glMultiTexCoord3i = (PFNGLMULTITEXCOORD3IPROC) load(userptr, "glMultiTexCoord3i");
+ glad_glMultiTexCoord3iv = (PFNGLMULTITEXCOORD3IVPROC) load(userptr, "glMultiTexCoord3iv");
+ glad_glMultiTexCoord3s = (PFNGLMULTITEXCOORD3SPROC) load(userptr, "glMultiTexCoord3s");
+ glad_glMultiTexCoord3sv = (PFNGLMULTITEXCOORD3SVPROC) load(userptr, "glMultiTexCoord3sv");
+ glad_glMultiTexCoord4d = (PFNGLMULTITEXCOORD4DPROC) load(userptr, "glMultiTexCoord4d");
+ glad_glMultiTexCoord4dv = (PFNGLMULTITEXCOORD4DVPROC) load(userptr, "glMultiTexCoord4dv");
+ glad_glMultiTexCoord4f = (PFNGLMULTITEXCOORD4FPROC) load(userptr, "glMultiTexCoord4f");
+ glad_glMultiTexCoord4fv = (PFNGLMULTITEXCOORD4FVPROC) load(userptr, "glMultiTexCoord4fv");
+ glad_glMultiTexCoord4i = (PFNGLMULTITEXCOORD4IPROC) load(userptr, "glMultiTexCoord4i");
+ glad_glMultiTexCoord4iv = (PFNGLMULTITEXCOORD4IVPROC) load(userptr, "glMultiTexCoord4iv");
+ glad_glMultiTexCoord4s = (PFNGLMULTITEXCOORD4SPROC) load(userptr, "glMultiTexCoord4s");
+ glad_glMultiTexCoord4sv = (PFNGLMULTITEXCOORD4SVPROC) load(userptr, "glMultiTexCoord4sv");
+ glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC) load(userptr, "glSampleCoverage");
+}
+static void glad_gl_load_GL_VERSION_1_4( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_4) return;
+ glad_glBlendColor = (PFNGLBLENDCOLORPROC) load(userptr, "glBlendColor");
+ glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC) load(userptr, "glBlendEquation");
+ glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC) load(userptr, "glBlendFuncSeparate");
+ glad_glFogCoordPointer = (PFNGLFOGCOORDPOINTERPROC) load(userptr, "glFogCoordPointer");
+ glad_glFogCoordd = (PFNGLFOGCOORDDPROC) load(userptr, "glFogCoordd");
+ glad_glFogCoorddv = (PFNGLFOGCOORDDVPROC) load(userptr, "glFogCoorddv");
+ glad_glFogCoordf = (PFNGLFOGCOORDFPROC) load(userptr, "glFogCoordf");
+ glad_glFogCoordfv = (PFNGLFOGCOORDFVPROC) load(userptr, "glFogCoordfv");
+ glad_glMultiDrawArrays = (PFNGLMULTIDRAWARRAYSPROC) load(userptr, "glMultiDrawArrays");
+ glad_glMultiDrawElements = (PFNGLMULTIDRAWELEMENTSPROC) load(userptr, "glMultiDrawElements");
+ glad_glPointParameterf = (PFNGLPOINTPARAMETERFPROC) load(userptr, "glPointParameterf");
+ glad_glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC) load(userptr, "glPointParameterfv");
+ glad_glPointParameteri = (PFNGLPOINTPARAMETERIPROC) load(userptr, "glPointParameteri");
+ glad_glPointParameteriv = (PFNGLPOINTPARAMETERIVPROC) load(userptr, "glPointParameteriv");
+ glad_glSecondaryColor3b = (PFNGLSECONDARYCOLOR3BPROC) load(userptr, "glSecondaryColor3b");
+ glad_glSecondaryColor3bv = (PFNGLSECONDARYCOLOR3BVPROC) load(userptr, "glSecondaryColor3bv");
+ glad_glSecondaryColor3d = (PFNGLSECONDARYCOLOR3DPROC) load(userptr, "glSecondaryColor3d");
+ glad_glSecondaryColor3dv = (PFNGLSECONDARYCOLOR3DVPROC) load(userptr, "glSecondaryColor3dv");
+ glad_glSecondaryColor3f = (PFNGLSECONDARYCOLOR3FPROC) load(userptr, "glSecondaryColor3f");
+ glad_glSecondaryColor3fv = (PFNGLSECONDARYCOLOR3FVPROC) load(userptr, "glSecondaryColor3fv");
+ glad_glSecondaryColor3i = (PFNGLSECONDARYCOLOR3IPROC) load(userptr, "glSecondaryColor3i");
+ glad_glSecondaryColor3iv = (PFNGLSECONDARYCOLOR3IVPROC) load(userptr, "glSecondaryColor3iv");
+ glad_glSecondaryColor3s = (PFNGLSECONDARYCOLOR3SPROC) load(userptr, "glSecondaryColor3s");
+ glad_glSecondaryColor3sv = (PFNGLSECONDARYCOLOR3SVPROC) load(userptr, "glSecondaryColor3sv");
+ glad_glSecondaryColor3ub = (PFNGLSECONDARYCOLOR3UBPROC) load(userptr, "glSecondaryColor3ub");
+ glad_glSecondaryColor3ubv = (PFNGLSECONDARYCOLOR3UBVPROC) load(userptr, "glSecondaryColor3ubv");
+ glad_glSecondaryColor3ui = (PFNGLSECONDARYCOLOR3UIPROC) load(userptr, "glSecondaryColor3ui");
+ glad_glSecondaryColor3uiv = (PFNGLSECONDARYCOLOR3UIVPROC) load(userptr, "glSecondaryColor3uiv");
+ glad_glSecondaryColor3us = (PFNGLSECONDARYCOLOR3USPROC) load(userptr, "glSecondaryColor3us");
+ glad_glSecondaryColor3usv = (PFNGLSECONDARYCOLOR3USVPROC) load(userptr, "glSecondaryColor3usv");
+ glad_glSecondaryColorPointer = (PFNGLSECONDARYCOLORPOINTERPROC) load(userptr, "glSecondaryColorPointer");
+ glad_glWindowPos2d = (PFNGLWINDOWPOS2DPROC) load(userptr, "glWindowPos2d");
+ glad_glWindowPos2dv = (PFNGLWINDOWPOS2DVPROC) load(userptr, "glWindowPos2dv");
+ glad_glWindowPos2f = (PFNGLWINDOWPOS2FPROC) load(userptr, "glWindowPos2f");
+ glad_glWindowPos2fv = (PFNGLWINDOWPOS2FVPROC) load(userptr, "glWindowPos2fv");
+ glad_glWindowPos2i = (PFNGLWINDOWPOS2IPROC) load(userptr, "glWindowPos2i");
+ glad_glWindowPos2iv = (PFNGLWINDOWPOS2IVPROC) load(userptr, "glWindowPos2iv");
+ glad_glWindowPos2s = (PFNGLWINDOWPOS2SPROC) load(userptr, "glWindowPos2s");
+ glad_glWindowPos2sv = (PFNGLWINDOWPOS2SVPROC) load(userptr, "glWindowPos2sv");
+ glad_glWindowPos3d = (PFNGLWINDOWPOS3DPROC) load(userptr, "glWindowPos3d");
+ glad_glWindowPos3dv = (PFNGLWINDOWPOS3DVPROC) load(userptr, "glWindowPos3dv");
+ glad_glWindowPos3f = (PFNGLWINDOWPOS3FPROC) load(userptr, "glWindowPos3f");
+ glad_glWindowPos3fv = (PFNGLWINDOWPOS3FVPROC) load(userptr, "glWindowPos3fv");
+ glad_glWindowPos3i = (PFNGLWINDOWPOS3IPROC) load(userptr, "glWindowPos3i");
+ glad_glWindowPos3iv = (PFNGLWINDOWPOS3IVPROC) load(userptr, "glWindowPos3iv");
+ glad_glWindowPos3s = (PFNGLWINDOWPOS3SPROC) load(userptr, "glWindowPos3s");
+ glad_glWindowPos3sv = (PFNGLWINDOWPOS3SVPROC) load(userptr, "glWindowPos3sv");
+}
+static void glad_gl_load_GL_VERSION_1_5( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_1_5) return;
+ glad_glBeginQuery = (PFNGLBEGINQUERYPROC) load(userptr, "glBeginQuery");
+ glad_glBindBuffer = (PFNGLBINDBUFFERPROC) load(userptr, "glBindBuffer");
+ glad_glBufferData = (PFNGLBUFFERDATAPROC) load(userptr, "glBufferData");
+ glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC) load(userptr, "glBufferSubData");
+ glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) load(userptr, "glDeleteBuffers");
+ glad_glDeleteQueries = (PFNGLDELETEQUERIESPROC) load(userptr, "glDeleteQueries");
+ glad_glEndQuery = (PFNGLENDQUERYPROC) load(userptr, "glEndQuery");
+ glad_glGenBuffers = (PFNGLGENBUFFERSPROC) load(userptr, "glGenBuffers");
+ glad_glGenQueries = (PFNGLGENQUERIESPROC) load(userptr, "glGenQueries");
+ glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC) load(userptr, "glGetBufferParameteriv");
+ glad_glGetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC) load(userptr, "glGetBufferPointerv");
+ glad_glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC) load(userptr, "glGetBufferSubData");
+ glad_glGetQueryObjectiv = (PFNGLGETQUERYOBJECTIVPROC) load(userptr, "glGetQueryObjectiv");
+ glad_glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC) load(userptr, "glGetQueryObjectuiv");
+ glad_glGetQueryiv = (PFNGLGETQUERYIVPROC) load(userptr, "glGetQueryiv");
+ glad_glIsBuffer = (PFNGLISBUFFERPROC) load(userptr, "glIsBuffer");
+ glad_glIsQuery = (PFNGLISQUERYPROC) load(userptr, "glIsQuery");
+ glad_glMapBuffer = (PFNGLMAPBUFFERPROC) load(userptr, "glMapBuffer");
+ glad_glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) load(userptr, "glUnmapBuffer");
+}
+static void glad_gl_load_GL_VERSION_2_0( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_2_0) return;
+ glad_glAttachShader = (PFNGLATTACHSHADERPROC) load(userptr, "glAttachShader");
+ glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) load(userptr, "glBindAttribLocation");
+ glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC) load(userptr, "glBlendEquationSeparate");
+ glad_glCompileShader = (PFNGLCOMPILESHADERPROC) load(userptr, "glCompileShader");
+ glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC) load(userptr, "glCreateProgram");
+ glad_glCreateShader = (PFNGLCREATESHADERPROC) load(userptr, "glCreateShader");
+ glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC) load(userptr, "glDeleteProgram");
+ glad_glDeleteShader = (PFNGLDELETESHADERPROC) load(userptr, "glDeleteShader");
+ glad_glDetachShader = (PFNGLDETACHSHADERPROC) load(userptr, "glDetachShader");
+ glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) load(userptr, "glDisableVertexAttribArray");
+ glad_glDrawBuffers = (PFNGLDRAWBUFFERSPROC) load(userptr, "glDrawBuffers");
+ glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) load(userptr, "glEnableVertexAttribArray");
+ glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC) load(userptr, "glGetActiveAttrib");
+ glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) load(userptr, "glGetActiveUniform");
+ glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC) load(userptr, "glGetAttachedShaders");
+ glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) load(userptr, "glGetAttribLocation");
+ glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) load(userptr, "glGetProgramInfoLog");
+ glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC) load(userptr, "glGetProgramiv");
+ glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) load(userptr, "glGetShaderInfoLog");
+ glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC) load(userptr, "glGetShaderSource");
+ glad_glGetShaderiv = (PFNGLGETSHADERIVPROC) load(userptr, "glGetShaderiv");
+ glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) load(userptr, "glGetUniformLocation");
+ glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC) load(userptr, "glGetUniformfv");
+ glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC) load(userptr, "glGetUniformiv");
+ glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC) load(userptr, "glGetVertexAttribPointerv");
+ glad_glGetVertexAttribdv = (PFNGLGETVERTEXATTRIBDVPROC) load(userptr, "glGetVertexAttribdv");
+ glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC) load(userptr, "glGetVertexAttribfv");
+ glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC) load(userptr, "glGetVertexAttribiv");
+ glad_glIsProgram = (PFNGLISPROGRAMPROC) load(userptr, "glIsProgram");
+ glad_glIsShader = (PFNGLISSHADERPROC) load(userptr, "glIsShader");
+ glad_glLinkProgram = (PFNGLLINKPROGRAMPROC) load(userptr, "glLinkProgram");
+ glad_glShaderSource = (PFNGLSHADERSOURCEPROC) load(userptr, "glShaderSource");
+ glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC) load(userptr, "glStencilFuncSeparate");
+ glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC) load(userptr, "glStencilMaskSeparate");
+ glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC) load(userptr, "glStencilOpSeparate");
+ glad_glUniform1f = (PFNGLUNIFORM1FPROC) load(userptr, "glUniform1f");
+ glad_glUniform1fv = (PFNGLUNIFORM1FVPROC) load(userptr, "glUniform1fv");
+ glad_glUniform1i = (PFNGLUNIFORM1IPROC) load(userptr, "glUniform1i");
+ glad_glUniform1iv = (PFNGLUNIFORM1IVPROC) load(userptr, "glUniform1iv");
+ glad_glUniform2f = (PFNGLUNIFORM2FPROC) load(userptr, "glUniform2f");
+ glad_glUniform2fv = (PFNGLUNIFORM2FVPROC) load(userptr, "glUniform2fv");
+ glad_glUniform2i = (PFNGLUNIFORM2IPROC) load(userptr, "glUniform2i");
+ glad_glUniform2iv = (PFNGLUNIFORM2IVPROC) load(userptr, "glUniform2iv");
+ glad_glUniform3f = (PFNGLUNIFORM3FPROC) load(userptr, "glUniform3f");
+ glad_glUniform3fv = (PFNGLUNIFORM3FVPROC) load(userptr, "glUniform3fv");
+ glad_glUniform3i = (PFNGLUNIFORM3IPROC) load(userptr, "glUniform3i");
+ glad_glUniform3iv = (PFNGLUNIFORM3IVPROC) load(userptr, "glUniform3iv");
+ glad_glUniform4f = (PFNGLUNIFORM4FPROC) load(userptr, "glUniform4f");
+ glad_glUniform4fv = (PFNGLUNIFORM4FVPROC) load(userptr, "glUniform4fv");
+ glad_glUniform4i = (PFNGLUNIFORM4IPROC) load(userptr, "glUniform4i");
+ glad_glUniform4iv = (PFNGLUNIFORM4IVPROC) load(userptr, "glUniform4iv");
+ glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC) load(userptr, "glUniformMatrix2fv");
+ glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) load(userptr, "glUniformMatrix3fv");
+ glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) load(userptr, "glUniformMatrix4fv");
+ glad_glUseProgram = (PFNGLUSEPROGRAMPROC) load(userptr, "glUseProgram");
+ glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC) load(userptr, "glValidateProgram");
+ glad_glVertexAttrib1d = (PFNGLVERTEXATTRIB1DPROC) load(userptr, "glVertexAttrib1d");
+ glad_glVertexAttrib1dv = (PFNGLVERTEXATTRIB1DVPROC) load(userptr, "glVertexAttrib1dv");
+ glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC) load(userptr, "glVertexAttrib1f");
+ glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC) load(userptr, "glVertexAttrib1fv");
+ glad_glVertexAttrib1s = (PFNGLVERTEXATTRIB1SPROC) load(userptr, "glVertexAttrib1s");
+ glad_glVertexAttrib1sv = (PFNGLVERTEXATTRIB1SVPROC) load(userptr, "glVertexAttrib1sv");
+ glad_glVertexAttrib2d = (PFNGLVERTEXATTRIB2DPROC) load(userptr, "glVertexAttrib2d");
+ glad_glVertexAttrib2dv = (PFNGLVERTEXATTRIB2DVPROC) load(userptr, "glVertexAttrib2dv");
+ glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC) load(userptr, "glVertexAttrib2f");
+ glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC) load(userptr, "glVertexAttrib2fv");
+ glad_glVertexAttrib2s = (PFNGLVERTEXATTRIB2SPROC) load(userptr, "glVertexAttrib2s");
+ glad_glVertexAttrib2sv = (PFNGLVERTEXATTRIB2SVPROC) load(userptr, "glVertexAttrib2sv");
+ glad_glVertexAttrib3d = (PFNGLVERTEXATTRIB3DPROC) load(userptr, "glVertexAttrib3d");
+ glad_glVertexAttrib3dv = (PFNGLVERTEXATTRIB3DVPROC) load(userptr, "glVertexAttrib3dv");
+ glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC) load(userptr, "glVertexAttrib3f");
+ glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC) load(userptr, "glVertexAttrib3fv");
+ glad_glVertexAttrib3s = (PFNGLVERTEXATTRIB3SPROC) load(userptr, "glVertexAttrib3s");
+ glad_glVertexAttrib3sv = (PFNGLVERTEXATTRIB3SVPROC) load(userptr, "glVertexAttrib3sv");
+ glad_glVertexAttrib4Nbv = (PFNGLVERTEXATTRIB4NBVPROC) load(userptr, "glVertexAttrib4Nbv");
+ glad_glVertexAttrib4Niv = (PFNGLVERTEXATTRIB4NIVPROC) load(userptr, "glVertexAttrib4Niv");
+ glad_glVertexAttrib4Nsv = (PFNGLVERTEXATTRIB4NSVPROC) load(userptr, "glVertexAttrib4Nsv");
+ glad_glVertexAttrib4Nub = (PFNGLVERTEXATTRIB4NUBPROC) load(userptr, "glVertexAttrib4Nub");
+ glad_glVertexAttrib4Nubv = (PFNGLVERTEXATTRIB4NUBVPROC) load(userptr, "glVertexAttrib4Nubv");
+ glad_glVertexAttrib4Nuiv = (PFNGLVERTEXATTRIB4NUIVPROC) load(userptr, "glVertexAttrib4Nuiv");
+ glad_glVertexAttrib4Nusv = (PFNGLVERTEXATTRIB4NUSVPROC) load(userptr, "glVertexAttrib4Nusv");
+ glad_glVertexAttrib4bv = (PFNGLVERTEXATTRIB4BVPROC) load(userptr, "glVertexAttrib4bv");
+ glad_glVertexAttrib4d = (PFNGLVERTEXATTRIB4DPROC) load(userptr, "glVertexAttrib4d");
+ glad_glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC) load(userptr, "glVertexAttrib4dv");
+ glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC) load(userptr, "glVertexAttrib4f");
+ glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC) load(userptr, "glVertexAttrib4fv");
+ glad_glVertexAttrib4iv = (PFNGLVERTEXATTRIB4IVPROC) load(userptr, "glVertexAttrib4iv");
+ glad_glVertexAttrib4s = (PFNGLVERTEXATTRIB4SPROC) load(userptr, "glVertexAttrib4s");
+ glad_glVertexAttrib4sv = (PFNGLVERTEXATTRIB4SVPROC) load(userptr, "glVertexAttrib4sv");
+ glad_glVertexAttrib4ubv = (PFNGLVERTEXATTRIB4UBVPROC) load(userptr, "glVertexAttrib4ubv");
+ glad_glVertexAttrib4uiv = (PFNGLVERTEXATTRIB4UIVPROC) load(userptr, "glVertexAttrib4uiv");
+ glad_glVertexAttrib4usv = (PFNGLVERTEXATTRIB4USVPROC) load(userptr, "glVertexAttrib4usv");
+ glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) load(userptr, "glVertexAttribPointer");
+}
+static void glad_gl_load_GL_VERSION_2_1( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_2_1) return;
+ glad_glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC) load(userptr, "glUniformMatrix2x3fv");
+ glad_glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC) load(userptr, "glUniformMatrix2x4fv");
+ glad_glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC) load(userptr, "glUniformMatrix3x2fv");
+ glad_glUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC) load(userptr, "glUniformMatrix3x4fv");
+ glad_glUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC) load(userptr, "glUniformMatrix4x2fv");
+ glad_glUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC) load(userptr, "glUniformMatrix4x3fv");
+}
+static void glad_gl_load_GL_VERSION_3_0( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_3_0) return;
+ glad_glBeginConditionalRender = (PFNGLBEGINCONDITIONALRENDERPROC) load(userptr, "glBeginConditionalRender");
+ glad_glBeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC) load(userptr, "glBeginTransformFeedback");
+ glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC) load(userptr, "glBindBufferBase");
+ glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC) load(userptr, "glBindBufferRange");
+ glad_glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC) load(userptr, "glBindFragDataLocation");
+ glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC) load(userptr, "glBindFramebuffer");
+ glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC) load(userptr, "glBindRenderbuffer");
+ glad_glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC) load(userptr, "glBindVertexArray");
+ glad_glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC) load(userptr, "glBlitFramebuffer");
+ glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC) load(userptr, "glCheckFramebufferStatus");
+ glad_glClampColor = (PFNGLCLAMPCOLORPROC) load(userptr, "glClampColor");
+ glad_glClearBufferfi = (PFNGLCLEARBUFFERFIPROC) load(userptr, "glClearBufferfi");
+ glad_glClearBufferfv = (PFNGLCLEARBUFFERFVPROC) load(userptr, "glClearBufferfv");
+ glad_glClearBufferiv = (PFNGLCLEARBUFFERIVPROC) load(userptr, "glClearBufferiv");
+ glad_glClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC) load(userptr, "glClearBufferuiv");
+ glad_glColorMaski = (PFNGLCOLORMASKIPROC) load(userptr, "glColorMaski");
+ glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC) load(userptr, "glDeleteFramebuffers");
+ glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC) load(userptr, "glDeleteRenderbuffers");
+ glad_glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC) load(userptr, "glDeleteVertexArrays");
+ glad_glDisablei = (PFNGLDISABLEIPROC) load(userptr, "glDisablei");
+ glad_glEnablei = (PFNGLENABLEIPROC) load(userptr, "glEnablei");
+ glad_glEndConditionalRender = (PFNGLENDCONDITIONALRENDERPROC) load(userptr, "glEndConditionalRender");
+ glad_glEndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC) load(userptr, "glEndTransformFeedback");
+ glad_glFlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC) load(userptr, "glFlushMappedBufferRange");
+ glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC) load(userptr, "glFramebufferRenderbuffer");
+ glad_glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC) load(userptr, "glFramebufferTexture1D");
+ glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC) load(userptr, "glFramebufferTexture2D");
+ glad_glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC) load(userptr, "glFramebufferTexture3D");
+ glad_glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC) load(userptr, "glFramebufferTextureLayer");
+ glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC) load(userptr, "glGenFramebuffers");
+ glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC) load(userptr, "glGenRenderbuffers");
+ glad_glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) load(userptr, "glGenVertexArrays");
+ glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC) load(userptr, "glGenerateMipmap");
+ glad_glGetBooleani_v = (PFNGLGETBOOLEANI_VPROC) load(userptr, "glGetBooleani_v");
+ glad_glGetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC) load(userptr, "glGetFragDataLocation");
+ glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) load(userptr, "glGetFramebufferAttachmentParameteriv");
+ glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC) load(userptr, "glGetIntegeri_v");
+ glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC) load(userptr, "glGetRenderbufferParameteriv");
+ glad_glGetStringi = (PFNGLGETSTRINGIPROC) load(userptr, "glGetStringi");
+ glad_glGetTexParameterIiv = (PFNGLGETTEXPARAMETERIIVPROC) load(userptr, "glGetTexParameterIiv");
+ glad_glGetTexParameterIuiv = (PFNGLGETTEXPARAMETERIUIVPROC) load(userptr, "glGetTexParameterIuiv");
+ glad_glGetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) load(userptr, "glGetTransformFeedbackVarying");
+ glad_glGetUniformuiv = (PFNGLGETUNIFORMUIVPROC) load(userptr, "glGetUniformuiv");
+ glad_glGetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC) load(userptr, "glGetVertexAttribIiv");
+ glad_glGetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC) load(userptr, "glGetVertexAttribIuiv");
+ glad_glIsEnabledi = (PFNGLISENABLEDIPROC) load(userptr, "glIsEnabledi");
+ glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC) load(userptr, "glIsFramebuffer");
+ glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC) load(userptr, "glIsRenderbuffer");
+ glad_glIsVertexArray = (PFNGLISVERTEXARRAYPROC) load(userptr, "glIsVertexArray");
+ glad_glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC) load(userptr, "glMapBufferRange");
+ glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC) load(userptr, "glRenderbufferStorage");
+ glad_glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) load(userptr, "glRenderbufferStorageMultisample");
+ glad_glTexParameterIiv = (PFNGLTEXPARAMETERIIVPROC) load(userptr, "glTexParameterIiv");
+ glad_glTexParameterIuiv = (PFNGLTEXPARAMETERIUIVPROC) load(userptr, "glTexParameterIuiv");
+ glad_glTransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC) load(userptr, "glTransformFeedbackVaryings");
+ glad_glUniform1ui = (PFNGLUNIFORM1UIPROC) load(userptr, "glUniform1ui");
+ glad_glUniform1uiv = (PFNGLUNIFORM1UIVPROC) load(userptr, "glUniform1uiv");
+ glad_glUniform2ui = (PFNGLUNIFORM2UIPROC) load(userptr, "glUniform2ui");
+ glad_glUniform2uiv = (PFNGLUNIFORM2UIVPROC) load(userptr, "glUniform2uiv");
+ glad_glUniform3ui = (PFNGLUNIFORM3UIPROC) load(userptr, "glUniform3ui");
+ glad_glUniform3uiv = (PFNGLUNIFORM3UIVPROC) load(userptr, "glUniform3uiv");
+ glad_glUniform4ui = (PFNGLUNIFORM4UIPROC) load(userptr, "glUniform4ui");
+ glad_glUniform4uiv = (PFNGLUNIFORM4UIVPROC) load(userptr, "glUniform4uiv");
+ glad_glVertexAttribI1i = (PFNGLVERTEXATTRIBI1IPROC) load(userptr, "glVertexAttribI1i");
+ glad_glVertexAttribI1iv = (PFNGLVERTEXATTRIBI1IVPROC) load(userptr, "glVertexAttribI1iv");
+ glad_glVertexAttribI1ui = (PFNGLVERTEXATTRIBI1UIPROC) load(userptr, "glVertexAttribI1ui");
+ glad_glVertexAttribI1uiv = (PFNGLVERTEXATTRIBI1UIVPROC) load(userptr, "glVertexAttribI1uiv");
+ glad_glVertexAttribI2i = (PFNGLVERTEXATTRIBI2IPROC) load(userptr, "glVertexAttribI2i");
+ glad_glVertexAttribI2iv = (PFNGLVERTEXATTRIBI2IVPROC) load(userptr, "glVertexAttribI2iv");
+ glad_glVertexAttribI2ui = (PFNGLVERTEXATTRIBI2UIPROC) load(userptr, "glVertexAttribI2ui");
+ glad_glVertexAttribI2uiv = (PFNGLVERTEXATTRIBI2UIVPROC) load(userptr, "glVertexAttribI2uiv");
+ glad_glVertexAttribI3i = (PFNGLVERTEXATTRIBI3IPROC) load(userptr, "glVertexAttribI3i");
+ glad_glVertexAttribI3iv = (PFNGLVERTEXATTRIBI3IVPROC) load(userptr, "glVertexAttribI3iv");
+ glad_glVertexAttribI3ui = (PFNGLVERTEXATTRIBI3UIPROC) load(userptr, "glVertexAttribI3ui");
+ glad_glVertexAttribI3uiv = (PFNGLVERTEXATTRIBI3UIVPROC) load(userptr, "glVertexAttribI3uiv");
+ glad_glVertexAttribI4bv = (PFNGLVERTEXATTRIBI4BVPROC) load(userptr, "glVertexAttribI4bv");
+ glad_glVertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC) load(userptr, "glVertexAttribI4i");
+ glad_glVertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC) load(userptr, "glVertexAttribI4iv");
+ glad_glVertexAttribI4sv = (PFNGLVERTEXATTRIBI4SVPROC) load(userptr, "glVertexAttribI4sv");
+ glad_glVertexAttribI4ubv = (PFNGLVERTEXATTRIBI4UBVPROC) load(userptr, "glVertexAttribI4ubv");
+ glad_glVertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC) load(userptr, "glVertexAttribI4ui");
+ glad_glVertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC) load(userptr, "glVertexAttribI4uiv");
+ glad_glVertexAttribI4usv = (PFNGLVERTEXATTRIBI4USVPROC) load(userptr, "glVertexAttribI4usv");
+ glad_glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC) load(userptr, "glVertexAttribIPointer");
+}
+static void glad_gl_load_GL_VERSION_3_1( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_3_1) return;
+ glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC) load(userptr, "glBindBufferBase");
+ glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC) load(userptr, "glBindBufferRange");
+ glad_glCopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC) load(userptr, "glCopyBufferSubData");
+ glad_glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) load(userptr, "glDrawArraysInstanced");
+ glad_glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) load(userptr, "glDrawElementsInstanced");
+ glad_glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) load(userptr, "glGetActiveUniformBlockName");
+ glad_glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC) load(userptr, "glGetActiveUniformBlockiv");
+ glad_glGetActiveUniformName = (PFNGLGETACTIVEUNIFORMNAMEPROC) load(userptr, "glGetActiveUniformName");
+ glad_glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC) load(userptr, "glGetActiveUniformsiv");
+ glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC) load(userptr, "glGetIntegeri_v");
+ glad_glGetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC) load(userptr, "glGetUniformBlockIndex");
+ glad_glGetUniformIndices = (PFNGLGETUNIFORMINDICESPROC) load(userptr, "glGetUniformIndices");
+ glad_glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC) load(userptr, "glPrimitiveRestartIndex");
+ glad_glTexBuffer = (PFNGLTEXBUFFERPROC) load(userptr, "glTexBuffer");
+ glad_glUniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC) load(userptr, "glUniformBlockBinding");
+}
+static void glad_gl_load_GL_VERSION_3_2( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_3_2) return;
+ glad_glClientWaitSync = (PFNGLCLIENTWAITSYNCPROC) load(userptr, "glClientWaitSync");
+ glad_glDeleteSync = (PFNGLDELETESYNCPROC) load(userptr, "glDeleteSync");
+ glad_glDrawElementsBaseVertex = (PFNGLDRAWELEMENTSBASEVERTEXPROC) load(userptr, "glDrawElementsBaseVertex");
+ glad_glDrawElementsInstancedBaseVertex = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) load(userptr, "glDrawElementsInstancedBaseVertex");
+ glad_glDrawRangeElementsBaseVertex = (PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) load(userptr, "glDrawRangeElementsBaseVertex");
+ glad_glFenceSync = (PFNGLFENCESYNCPROC) load(userptr, "glFenceSync");
+ glad_glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC) load(userptr, "glFramebufferTexture");
+ glad_glGetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC) load(userptr, "glGetBufferParameteri64v");
+ glad_glGetInteger64i_v = (PFNGLGETINTEGER64I_VPROC) load(userptr, "glGetInteger64i_v");
+ glad_glGetInteger64v = (PFNGLGETINTEGER64VPROC) load(userptr, "glGetInteger64v");
+ glad_glGetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC) load(userptr, "glGetMultisamplefv");
+ glad_glGetSynciv = (PFNGLGETSYNCIVPROC) load(userptr, "glGetSynciv");
+ glad_glIsSync = (PFNGLISSYNCPROC) load(userptr, "glIsSync");
+ glad_glMultiDrawElementsBaseVertex = (PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC) load(userptr, "glMultiDrawElementsBaseVertex");
+ glad_glProvokingVertex = (PFNGLPROVOKINGVERTEXPROC) load(userptr, "glProvokingVertex");
+ glad_glSampleMaski = (PFNGLSAMPLEMASKIPROC) load(userptr, "glSampleMaski");
+ glad_glTexImage2DMultisample = (PFNGLTEXIMAGE2DMULTISAMPLEPROC) load(userptr, "glTexImage2DMultisample");
+ glad_glTexImage3DMultisample = (PFNGLTEXIMAGE3DMULTISAMPLEPROC) load(userptr, "glTexImage3DMultisample");
+ glad_glWaitSync = (PFNGLWAITSYNCPROC) load(userptr, "glWaitSync");
+}
+static void glad_gl_load_GL_VERSION_3_3( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_VERSION_3_3) return;
+ glad_glBindFragDataLocationIndexed = (PFNGLBINDFRAGDATALOCATIONINDEXEDPROC) load(userptr, "glBindFragDataLocationIndexed");
+ glad_glBindSampler = (PFNGLBINDSAMPLERPROC) load(userptr, "glBindSampler");
+ glad_glColorP3ui = (PFNGLCOLORP3UIPROC) load(userptr, "glColorP3ui");
+ glad_glColorP3uiv = (PFNGLCOLORP3UIVPROC) load(userptr, "glColorP3uiv");
+ glad_glColorP4ui = (PFNGLCOLORP4UIPROC) load(userptr, "glColorP4ui");
+ glad_glColorP4uiv = (PFNGLCOLORP4UIVPROC) load(userptr, "glColorP4uiv");
+ glad_glDeleteSamplers = (PFNGLDELETESAMPLERSPROC) load(userptr, "glDeleteSamplers");
+ glad_glGenSamplers = (PFNGLGENSAMPLERSPROC) load(userptr, "glGenSamplers");
+ glad_glGetFragDataIndex = (PFNGLGETFRAGDATAINDEXPROC) load(userptr, "glGetFragDataIndex");
+ glad_glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC) load(userptr, "glGetQueryObjecti64v");
+ glad_glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC) load(userptr, "glGetQueryObjectui64v");
+ glad_glGetSamplerParameterIiv = (PFNGLGETSAMPLERPARAMETERIIVPROC) load(userptr, "glGetSamplerParameterIiv");
+ glad_glGetSamplerParameterIuiv = (PFNGLGETSAMPLERPARAMETERIUIVPROC) load(userptr, "glGetSamplerParameterIuiv");
+ glad_glGetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC) load(userptr, "glGetSamplerParameterfv");
+ glad_glGetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC) load(userptr, "glGetSamplerParameteriv");
+ glad_glIsSampler = (PFNGLISSAMPLERPROC) load(userptr, "glIsSampler");
+ glad_glMultiTexCoordP1ui = (PFNGLMULTITEXCOORDP1UIPROC) load(userptr, "glMultiTexCoordP1ui");
+ glad_glMultiTexCoordP1uiv = (PFNGLMULTITEXCOORDP1UIVPROC) load(userptr, "glMultiTexCoordP1uiv");
+ glad_glMultiTexCoordP2ui = (PFNGLMULTITEXCOORDP2UIPROC) load(userptr, "glMultiTexCoordP2ui");
+ glad_glMultiTexCoordP2uiv = (PFNGLMULTITEXCOORDP2UIVPROC) load(userptr, "glMultiTexCoordP2uiv");
+ glad_glMultiTexCoordP3ui = (PFNGLMULTITEXCOORDP3UIPROC) load(userptr, "glMultiTexCoordP3ui");
+ glad_glMultiTexCoordP3uiv = (PFNGLMULTITEXCOORDP3UIVPROC) load(userptr, "glMultiTexCoordP3uiv");
+ glad_glMultiTexCoordP4ui = (PFNGLMULTITEXCOORDP4UIPROC) load(userptr, "glMultiTexCoordP4ui");
+ glad_glMultiTexCoordP4uiv = (PFNGLMULTITEXCOORDP4UIVPROC) load(userptr, "glMultiTexCoordP4uiv");
+ glad_glNormalP3ui = (PFNGLNORMALP3UIPROC) load(userptr, "glNormalP3ui");
+ glad_glNormalP3uiv = (PFNGLNORMALP3UIVPROC) load(userptr, "glNormalP3uiv");
+ glad_glQueryCounter = (PFNGLQUERYCOUNTERPROC) load(userptr, "glQueryCounter");
+ glad_glSamplerParameterIiv = (PFNGLSAMPLERPARAMETERIIVPROC) load(userptr, "glSamplerParameterIiv");
+ glad_glSamplerParameterIuiv = (PFNGLSAMPLERPARAMETERIUIVPROC) load(userptr, "glSamplerParameterIuiv");
+ glad_glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC) load(userptr, "glSamplerParameterf");
+ glad_glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC) load(userptr, "glSamplerParameterfv");
+ glad_glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC) load(userptr, "glSamplerParameteri");
+ glad_glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC) load(userptr, "glSamplerParameteriv");
+ glad_glSecondaryColorP3ui = (PFNGLSECONDARYCOLORP3UIPROC) load(userptr, "glSecondaryColorP3ui");
+ glad_glSecondaryColorP3uiv = (PFNGLSECONDARYCOLORP3UIVPROC) load(userptr, "glSecondaryColorP3uiv");
+ glad_glTexCoordP1ui = (PFNGLTEXCOORDP1UIPROC) load(userptr, "glTexCoordP1ui");
+ glad_glTexCoordP1uiv = (PFNGLTEXCOORDP1UIVPROC) load(userptr, "glTexCoordP1uiv");
+ glad_glTexCoordP2ui = (PFNGLTEXCOORDP2UIPROC) load(userptr, "glTexCoordP2ui");
+ glad_glTexCoordP2uiv = (PFNGLTEXCOORDP2UIVPROC) load(userptr, "glTexCoordP2uiv");
+ glad_glTexCoordP3ui = (PFNGLTEXCOORDP3UIPROC) load(userptr, "glTexCoordP3ui");
+ glad_glTexCoordP3uiv = (PFNGLTEXCOORDP3UIVPROC) load(userptr, "glTexCoordP3uiv");
+ glad_glTexCoordP4ui = (PFNGLTEXCOORDP4UIPROC) load(userptr, "glTexCoordP4ui");
+ glad_glTexCoordP4uiv = (PFNGLTEXCOORDP4UIVPROC) load(userptr, "glTexCoordP4uiv");
+ glad_glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) load(userptr, "glVertexAttribDivisor");
+ glad_glVertexAttribP1ui = (PFNGLVERTEXATTRIBP1UIPROC) load(userptr, "glVertexAttribP1ui");
+ glad_glVertexAttribP1uiv = (PFNGLVERTEXATTRIBP1UIVPROC) load(userptr, "glVertexAttribP1uiv");
+ glad_glVertexAttribP2ui = (PFNGLVERTEXATTRIBP2UIPROC) load(userptr, "glVertexAttribP2ui");
+ glad_glVertexAttribP2uiv = (PFNGLVERTEXATTRIBP2UIVPROC) load(userptr, "glVertexAttribP2uiv");
+ glad_glVertexAttribP3ui = (PFNGLVERTEXATTRIBP3UIPROC) load(userptr, "glVertexAttribP3ui");
+ glad_glVertexAttribP3uiv = (PFNGLVERTEXATTRIBP3UIVPROC) load(userptr, "glVertexAttribP3uiv");
+ glad_glVertexAttribP4ui = (PFNGLVERTEXATTRIBP4UIPROC) load(userptr, "glVertexAttribP4ui");
+ glad_glVertexAttribP4uiv = (PFNGLVERTEXATTRIBP4UIVPROC) load(userptr, "glVertexAttribP4uiv");
+ glad_glVertexP2ui = (PFNGLVERTEXP2UIPROC) load(userptr, "glVertexP2ui");
+ glad_glVertexP2uiv = (PFNGLVERTEXP2UIVPROC) load(userptr, "glVertexP2uiv");
+ glad_glVertexP3ui = (PFNGLVERTEXP3UIPROC) load(userptr, "glVertexP3ui");
+ glad_glVertexP3uiv = (PFNGLVERTEXP3UIVPROC) load(userptr, "glVertexP3uiv");
+ glad_glVertexP4ui = (PFNGLVERTEXP4UIPROC) load(userptr, "glVertexP4ui");
+ glad_glVertexP4uiv = (PFNGLVERTEXP4UIVPROC) load(userptr, "glVertexP4uiv");
+}
+static void glad_gl_load_GL_ARB_multisample( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_ARB_multisample) return;
+ glad_glSampleCoverageARB = (PFNGLSAMPLECOVERAGEARBPROC) load(userptr, "glSampleCoverageARB");
+}
+static void glad_gl_load_GL_ARB_robustness( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_ARB_robustness) return;
+ glad_glGetGraphicsResetStatusARB = (PFNGLGETGRAPHICSRESETSTATUSARBPROC) load(userptr, "glGetGraphicsResetStatusARB");
+ glad_glGetnColorTableARB = (PFNGLGETNCOLORTABLEARBPROC) load(userptr, "glGetnColorTableARB");
+ glad_glGetnCompressedTexImageARB = (PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC) load(userptr, "glGetnCompressedTexImageARB");
+ glad_glGetnConvolutionFilterARB = (PFNGLGETNCONVOLUTIONFILTERARBPROC) load(userptr, "glGetnConvolutionFilterARB");
+ glad_glGetnHistogramARB = (PFNGLGETNHISTOGRAMARBPROC) load(userptr, "glGetnHistogramARB");
+ glad_glGetnMapdvARB = (PFNGLGETNMAPDVARBPROC) load(userptr, "glGetnMapdvARB");
+ glad_glGetnMapfvARB = (PFNGLGETNMAPFVARBPROC) load(userptr, "glGetnMapfvARB");
+ glad_glGetnMapivARB = (PFNGLGETNMAPIVARBPROC) load(userptr, "glGetnMapivARB");
+ glad_glGetnMinmaxARB = (PFNGLGETNMINMAXARBPROC) load(userptr, "glGetnMinmaxARB");
+ glad_glGetnPixelMapfvARB = (PFNGLGETNPIXELMAPFVARBPROC) load(userptr, "glGetnPixelMapfvARB");
+ glad_glGetnPixelMapuivARB = (PFNGLGETNPIXELMAPUIVARBPROC) load(userptr, "glGetnPixelMapuivARB");
+ glad_glGetnPixelMapusvARB = (PFNGLGETNPIXELMAPUSVARBPROC) load(userptr, "glGetnPixelMapusvARB");
+ glad_glGetnPolygonStippleARB = (PFNGLGETNPOLYGONSTIPPLEARBPROC) load(userptr, "glGetnPolygonStippleARB");
+ glad_glGetnSeparableFilterARB = (PFNGLGETNSEPARABLEFILTERARBPROC) load(userptr, "glGetnSeparableFilterARB");
+ glad_glGetnTexImageARB = (PFNGLGETNTEXIMAGEARBPROC) load(userptr, "glGetnTexImageARB");
+ glad_glGetnUniformdvARB = (PFNGLGETNUNIFORMDVARBPROC) load(userptr, "glGetnUniformdvARB");
+ glad_glGetnUniformfvARB = (PFNGLGETNUNIFORMFVARBPROC) load(userptr, "glGetnUniformfvARB");
+ glad_glGetnUniformivARB = (PFNGLGETNUNIFORMIVARBPROC) load(userptr, "glGetnUniformivARB");
+ glad_glGetnUniformuivARB = (PFNGLGETNUNIFORMUIVARBPROC) load(userptr, "glGetnUniformuivARB");
+ glad_glReadnPixelsARB = (PFNGLREADNPIXELSARBPROC) load(userptr, "glReadnPixelsARB");
+}
+static void glad_gl_load_GL_KHR_debug( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_KHR_debug) return;
+ glad_glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC) load(userptr, "glDebugMessageCallback");
+ glad_glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC) load(userptr, "glDebugMessageControl");
+ glad_glDebugMessageInsert = (PFNGLDEBUGMESSAGEINSERTPROC) load(userptr, "glDebugMessageInsert");
+ glad_glGetDebugMessageLog = (PFNGLGETDEBUGMESSAGELOGPROC) load(userptr, "glGetDebugMessageLog");
+ glad_glGetObjectLabel = (PFNGLGETOBJECTLABELPROC) load(userptr, "glGetObjectLabel");
+ glad_glGetObjectPtrLabel = (PFNGLGETOBJECTPTRLABELPROC) load(userptr, "glGetObjectPtrLabel");
+ glad_glGetPointerv = (PFNGLGETPOINTERVPROC) load(userptr, "glGetPointerv");
+ glad_glObjectLabel = (PFNGLOBJECTLABELPROC) load(userptr, "glObjectLabel");
+ glad_glObjectPtrLabel = (PFNGLOBJECTPTRLABELPROC) load(userptr, "glObjectPtrLabel");
+ glad_glPopDebugGroup = (PFNGLPOPDEBUGGROUPPROC) load(userptr, "glPopDebugGroup");
+ glad_glPushDebugGroup = (PFNGLPUSHDEBUGGROUPPROC) load(userptr, "glPushDebugGroup");
+}
+
+
+
+#if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0)
+#define GLAD_GL_IS_SOME_NEW_VERSION 1
+#else
+#define GLAD_GL_IS_SOME_NEW_VERSION 0
+#endif
+
+static int glad_gl_get_extensions( int version, const char **out_exts, unsigned int *out_num_exts_i, char ***out_exts_i) {
+#if GLAD_GL_IS_SOME_NEW_VERSION
+ if(GLAD_VERSION_MAJOR(version) < 3) {
+#else
+ (void) version;
+ (void) out_num_exts_i;
+ (void) out_exts_i;
+#endif
+ if (glad_glGetString == NULL) {
+ return 0;
+ }
+ *out_exts = (const char *)glad_glGetString(GL_EXTENSIONS);
+#if GLAD_GL_IS_SOME_NEW_VERSION
+ } else {
+ unsigned int index = 0;
+ unsigned int num_exts_i = 0;
+ char **exts_i = NULL;
+ if (glad_glGetStringi == NULL || glad_glGetIntegerv == NULL) {
+ return 0;
+ }
+ glad_glGetIntegerv(GL_NUM_EXTENSIONS, (int*) &num_exts_i);
+ if (num_exts_i > 0) {
+ exts_i = (char **) malloc(num_exts_i * (sizeof *exts_i));
+ }
+ if (exts_i == NULL) {
+ return 0;
+ }
+ for(index = 0; index < num_exts_i; index++) {
+ const char *gl_str_tmp = (const char*) glad_glGetStringi(GL_EXTENSIONS, index);
+ size_t len = strlen(gl_str_tmp) + 1;
+
+ char *local_str = (char*) malloc(len * sizeof(char));
+ if(local_str != NULL) {
+ memcpy(local_str, gl_str_tmp, len * sizeof(char));
+ }
+
+ exts_i[index] = local_str;
+ }
+
+ *out_num_exts_i = num_exts_i;
+ *out_exts_i = exts_i;
+ }
+#endif
+ return 1;
+}
+static void glad_gl_free_extensions(char **exts_i, unsigned int num_exts_i) {
+ if (exts_i != NULL) {
+ unsigned int index;
+ for(index = 0; index < num_exts_i; index++) {
+ free((void *) (exts_i[index]));
+ }
+ free((void *)exts_i);
+ exts_i = NULL;
+ }
+}
+static int glad_gl_has_extension(int version, const char *exts, unsigned int num_exts_i, char **exts_i, const char *ext) {
+ if(GLAD_VERSION_MAJOR(version) < 3 || !GLAD_GL_IS_SOME_NEW_VERSION) {
+ const char *extensions;
+ const char *loc;
+ const char *terminator;
+ extensions = exts;
+ if(extensions == NULL || ext == NULL) {
+ return 0;
+ }
+ while(1) {
+ loc = strstr(extensions, ext);
+ if(loc == NULL) {
+ return 0;
+ }
+ terminator = loc + strlen(ext);
+ if((loc == extensions || *(loc - 1) == ' ') &&
+ (*terminator == ' ' || *terminator == '\0')) {
+ return 1;
+ }
+ extensions = terminator;
+ }
+ } else {
+ unsigned int index;
+ for(index = 0; index < num_exts_i; index++) {
+ const char *e = exts_i[index];
+ if(strcmp(e, ext) == 0) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static GLADapiproc glad_gl_get_proc_from_userptr(void *userptr, const char* name) {
+ return (GLAD_GNUC_EXTENSION (GLADapiproc (*)(const char *name)) userptr)(name);
+}
+
+static int glad_gl_find_extensions_gl( int version) {
+ const char *exts = NULL;
+ unsigned int num_exts_i = 0;
+ char **exts_i = NULL;
+ if (!glad_gl_get_extensions(version, &exts, &num_exts_i, &exts_i)) return 0;
+
+ GLAD_GL_ARB_multisample = glad_gl_has_extension(version, exts, num_exts_i, exts_i, "GL_ARB_multisample");
+ GLAD_GL_ARB_robustness = glad_gl_has_extension(version, exts, num_exts_i, exts_i, "GL_ARB_robustness");
+ GLAD_GL_KHR_debug = glad_gl_has_extension(version, exts, num_exts_i, exts_i, "GL_KHR_debug");
+
+ glad_gl_free_extensions(exts_i, num_exts_i);
+
+ return 1;
+}
+
+static int glad_gl_find_core_gl(void) {
+ int i;
+ const char* version;
+ const char* prefixes[] = {
+ "OpenGL ES-CM ",
+ "OpenGL ES-CL ",
+ "OpenGL ES ",
+ "OpenGL SC ",
+ NULL
+ };
+ int major = 0;
+ int minor = 0;
+ version = (const char*) glad_glGetString(GL_VERSION);
+ if (!version) return 0;
+ for (i = 0; prefixes[i]; i++) {
+ const size_t length = strlen(prefixes[i]);
+ if (strncmp(version, prefixes[i], length) == 0) {
+ version += length;
+ break;
+ }
+ }
+
+ GLAD_IMPL_UTIL_SSCANF(version, "%d.%d", &major, &minor);
+
+ GLAD_GL_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1;
+ GLAD_GL_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1;
+ GLAD_GL_VERSION_1_2 = (major == 1 && minor >= 2) || major > 1;
+ GLAD_GL_VERSION_1_3 = (major == 1 && minor >= 3) || major > 1;
+ GLAD_GL_VERSION_1_4 = (major == 1 && minor >= 4) || major > 1;
+ GLAD_GL_VERSION_1_5 = (major == 1 && minor >= 5) || major > 1;
+ GLAD_GL_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2;
+ GLAD_GL_VERSION_2_1 = (major == 2 && minor >= 1) || major > 2;
+ GLAD_GL_VERSION_3_0 = (major == 3 && minor >= 0) || major > 3;
+ GLAD_GL_VERSION_3_1 = (major == 3 && minor >= 1) || major > 3;
+ GLAD_GL_VERSION_3_2 = (major == 3 && minor >= 2) || major > 3;
+ GLAD_GL_VERSION_3_3 = (major == 3 && minor >= 3) || major > 3;
+
+ return GLAD_MAKE_VERSION(major, minor);
+}
+
+int gladLoadGLUserPtr( GLADuserptrloadfunc load, void *userptr) {
+ int version;
+
+ glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString");
+ if(glad_glGetString == NULL) return 0;
+ if(glad_glGetString(GL_VERSION) == NULL) return 0;
+ version = glad_gl_find_core_gl();
+
+ glad_gl_load_GL_VERSION_1_0(load, userptr);
+ glad_gl_load_GL_VERSION_1_1(load, userptr);
+ glad_gl_load_GL_VERSION_1_2(load, userptr);
+ glad_gl_load_GL_VERSION_1_3(load, userptr);
+ glad_gl_load_GL_VERSION_1_4(load, userptr);
+ glad_gl_load_GL_VERSION_1_5(load, userptr);
+ glad_gl_load_GL_VERSION_2_0(load, userptr);
+ glad_gl_load_GL_VERSION_2_1(load, userptr);
+ glad_gl_load_GL_VERSION_3_0(load, userptr);
+ glad_gl_load_GL_VERSION_3_1(load, userptr);
+ glad_gl_load_GL_VERSION_3_2(load, userptr);
+ glad_gl_load_GL_VERSION_3_3(load, userptr);
+
+ if (!glad_gl_find_extensions_gl(version)) return 0;
+ glad_gl_load_GL_ARB_multisample(load, userptr);
+ glad_gl_load_GL_ARB_robustness(load, userptr);
+ glad_gl_load_GL_KHR_debug(load, userptr);
+
+
+
+ return version;
+}
+
+
+int gladLoadGL( GLADloadfunc load) {
+ return gladLoadGLUserPtr( glad_gl_get_proc_from_userptr, GLAD_GNUC_EXTENSION (void*) load);
+}
+
+
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLAD_GL_IMPLEMENTATION */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/glad/gles2.h b/chromium/third_party/dawn/third_party/glfw/deps/glad/gles2.h
new file mode 100644
index 00000000000..d67f11078e2
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/glad/gles2.h
@@ -0,0 +1,1805 @@
+/**
+ * Loader generated by glad 2.0.0-beta on Tue Aug 24 22:51:42 2021
+ *
+ * Generator: C/C++
+ * Specification: gl
+ * Extensions: 0
+ *
+ * APIs:
+ * - gles2=2.0
+ *
+ * Options:
+ * - ALIAS = False
+ * - DEBUG = False
+ * - HEADER_ONLY = True
+ * - LOADER = False
+ * - MX = False
+ * - MX_GLOBAL = False
+ * - ON_DEMAND = False
+ *
+ * Commandline:
+ * --api='gles2=2.0' --extensions='' c --header-only
+ *
+ * Online:
+ * http://glad.sh/#api=gles2%3D2.0&extensions=&generator=c&options=HEADER_ONLY
+ *
+ */
+
+#ifndef GLAD_GLES2_H_
+#define GLAD_GLES2_H_
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wreserved-id-macro"
+#endif
+#ifdef __gl2_h_
+ #error OpenGL ES 2 header already included (API: gles2), remove previous include!
+#endif
+#define __gl2_h_ 1
+#ifdef __gl3_h_
+ #error OpenGL ES 3 header already included (API: gles2), remove previous include!
+#endif
+#define __gl3_h_ 1
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+#define GLAD_GLES2
+#define GLAD_OPTION_GLES2_HEADER_ONLY
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef GLAD_PLATFORM_H_
+#define GLAD_PLATFORM_H_
+
+#ifndef GLAD_PLATFORM_WIN32
+ #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)
+ #define GLAD_PLATFORM_WIN32 1
+ #else
+ #define GLAD_PLATFORM_WIN32 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_APPLE
+ #ifdef __APPLE__
+ #define GLAD_PLATFORM_APPLE 1
+ #else
+ #define GLAD_PLATFORM_APPLE 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_EMSCRIPTEN
+ #ifdef __EMSCRIPTEN__
+ #define GLAD_PLATFORM_EMSCRIPTEN 1
+ #else
+ #define GLAD_PLATFORM_EMSCRIPTEN 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_UWP
+ #if defined(_MSC_VER) && !defined(GLAD_INTERNAL_HAVE_WINAPIFAMILY)
+ #ifdef __has_include
+ #if __has_include(<winapifamily.h>)
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #endif
+
+ #ifdef GLAD_INTERNAL_HAVE_WINAPIFAMILY
+ #include <winapifamily.h>
+ #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+ #define GLAD_PLATFORM_UWP 1
+ #endif
+ #endif
+
+ #ifndef GLAD_PLATFORM_UWP
+ #define GLAD_PLATFORM_UWP 0
+ #endif
+#endif
+
+#ifdef __GNUC__
+ #define GLAD_GNUC_EXTENSION __extension__
+#else
+ #define GLAD_GNUC_EXTENSION
+#endif
+
+#ifndef GLAD_API_CALL
+ #if defined(GLAD_API_CALL_EXPORT)
+ #if GLAD_PLATFORM_WIN32 || defined(__CYGWIN__)
+ #if defined(GLAD_API_CALL_EXPORT_BUILD)
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllexport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllexport) extern
+ #endif
+ #else
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllimport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllimport) extern
+ #endif
+ #endif
+ #elif defined(__GNUC__) && defined(GLAD_API_CALL_EXPORT_BUILD)
+ #define GLAD_API_CALL __attribute__ ((visibility ("default"))) extern
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+#endif
+
+#ifdef APIENTRY
+ #define GLAD_API_PTR APIENTRY
+#elif GLAD_PLATFORM_WIN32
+ #define GLAD_API_PTR __stdcall
+#else
+ #define GLAD_API_PTR
+#endif
+
+#ifndef GLAPI
+#define GLAPI GLAD_API_CALL
+#endif
+
+#ifndef GLAPIENTRY
+#define GLAPIENTRY GLAD_API_PTR
+#endif
+
+#define GLAD_MAKE_VERSION(major, minor) (major * 10000 + minor)
+#define GLAD_VERSION_MAJOR(version) (version / 10000)
+#define GLAD_VERSION_MINOR(version) (version % 10000)
+
+#define GLAD_GENERATOR_VERSION "2.0.0-beta"
+
+typedef void (*GLADapiproc)(void);
+
+typedef GLADapiproc (*GLADloadfunc)(const char *name);
+typedef GLADapiproc (*GLADuserptrloadfunc)(void *userptr, const char *name);
+
+typedef void (*GLADprecallback)(const char *name, GLADapiproc apiproc, int len_args, ...);
+typedef void (*GLADpostcallback)(void *ret, const char *name, GLADapiproc apiproc, int len_args, ...);
+
+#endif /* GLAD_PLATFORM_H_ */
+
+#define GL_ACTIVE_ATTRIBUTES 0x8B89
+#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
+#define GL_ACTIVE_TEXTURE 0x84E0
+#define GL_ACTIVE_UNIFORMS 0x8B86
+#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
+#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GL_ALIASED_POINT_SIZE_RANGE 0x846D
+#define GL_ALPHA 0x1906
+#define GL_ALPHA_BITS 0x0D55
+#define GL_ALWAYS 0x0207
+#define GL_ARRAY_BUFFER 0x8892
+#define GL_ARRAY_BUFFER_BINDING 0x8894
+#define GL_ATTACHED_SHADERS 0x8B85
+#define GL_BACK 0x0405
+#define GL_BLEND 0x0BE2
+#define GL_BLEND_COLOR 0x8005
+#define GL_BLEND_DST_ALPHA 0x80CA
+#define GL_BLEND_DST_RGB 0x80C8
+#define GL_BLEND_EQUATION 0x8009
+#define GL_BLEND_EQUATION_ALPHA 0x883D
+#define GL_BLEND_EQUATION_RGB 0x8009
+#define GL_BLEND_SRC_ALPHA 0x80CB
+#define GL_BLEND_SRC_RGB 0x80C9
+#define GL_BLUE_BITS 0x0D54
+#define GL_BOOL 0x8B56
+#define GL_BOOL_VEC2 0x8B57
+#define GL_BOOL_VEC3 0x8B58
+#define GL_BOOL_VEC4 0x8B59
+#define GL_BUFFER_SIZE 0x8764
+#define GL_BUFFER_USAGE 0x8765
+#define GL_BYTE 0x1400
+#define GL_CCW 0x0901
+#define GL_CLAMP_TO_EDGE 0x812F
+#define GL_COLOR_ATTACHMENT0 0x8CE0
+#define GL_COLOR_BUFFER_BIT 0x00004000
+#define GL_COLOR_CLEAR_VALUE 0x0C22
+#define GL_COLOR_WRITEMASK 0x0C23
+#define GL_COMPILE_STATUS 0x8B81
+#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+#define GL_CONSTANT_ALPHA 0x8003
+#define GL_CONSTANT_COLOR 0x8001
+#define GL_CULL_FACE 0x0B44
+#define GL_CULL_FACE_MODE 0x0B45
+#define GL_CURRENT_PROGRAM 0x8B8D
+#define GL_CURRENT_VERTEX_ATTRIB 0x8626
+#define GL_CW 0x0900
+#define GL_DECR 0x1E03
+#define GL_DECR_WRAP 0x8508
+#define GL_DELETE_STATUS 0x8B80
+#define GL_DEPTH_ATTACHMENT 0x8D00
+#define GL_DEPTH_BITS 0x0D56
+#define GL_DEPTH_BUFFER_BIT 0x00000100
+#define GL_DEPTH_CLEAR_VALUE 0x0B73
+#define GL_DEPTH_COMPONENT 0x1902
+#define GL_DEPTH_COMPONENT16 0x81A5
+#define GL_DEPTH_FUNC 0x0B74
+#define GL_DEPTH_RANGE 0x0B70
+#define GL_DEPTH_TEST 0x0B71
+#define GL_DEPTH_WRITEMASK 0x0B72
+#define GL_DITHER 0x0BD0
+#define GL_DONT_CARE 0x1100
+#define GL_DST_ALPHA 0x0304
+#define GL_DST_COLOR 0x0306
+#define GL_DYNAMIC_DRAW 0x88E8
+#define GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
+#define GL_EQUAL 0x0202
+#define GL_EXTENSIONS 0x1F03
+#define GL_FALSE 0
+#define GL_FASTEST 0x1101
+#define GL_FIXED 0x140C
+#define GL_FLOAT 0x1406
+#define GL_FLOAT_MAT2 0x8B5A
+#define GL_FLOAT_MAT3 0x8B5B
+#define GL_FLOAT_MAT4 0x8B5C
+#define GL_FLOAT_VEC2 0x8B50
+#define GL_FLOAT_VEC3 0x8B51
+#define GL_FLOAT_VEC4 0x8B52
+#define GL_FRAGMENT_SHADER 0x8B30
+#define GL_FRAMEBUFFER 0x8D40
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
+#define GL_FRAMEBUFFER_BINDING 0x8CA6
+#define GL_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
+#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9
+#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
+#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
+#define GL_FRONT 0x0404
+#define GL_FRONT_AND_BACK 0x0408
+#define GL_FRONT_FACE 0x0B46
+#define GL_FUNC_ADD 0x8006
+#define GL_FUNC_REVERSE_SUBTRACT 0x800B
+#define GL_FUNC_SUBTRACT 0x800A
+#define GL_GENERATE_MIPMAP_HINT 0x8192
+#define GL_GEQUAL 0x0206
+#define GL_GREATER 0x0204
+#define GL_GREEN_BITS 0x0D53
+#define GL_HIGH_FLOAT 0x8DF2
+#define GL_HIGH_INT 0x8DF5
+#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
+#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
+#define GL_INCR 0x1E02
+#define GL_INCR_WRAP 0x8507
+#define GL_INFO_LOG_LENGTH 0x8B84
+#define GL_INT 0x1404
+#define GL_INT_VEC2 0x8B53
+#define GL_INT_VEC3 0x8B54
+#define GL_INT_VEC4 0x8B55
+#define GL_INVALID_ENUM 0x0500
+#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
+#define GL_INVALID_OPERATION 0x0502
+#define GL_INVALID_VALUE 0x0501
+#define GL_INVERT 0x150A
+#define GL_KEEP 0x1E00
+#define GL_LEQUAL 0x0203
+#define GL_LESS 0x0201
+#define GL_LINEAR 0x2601
+#define GL_LINEAR_MIPMAP_LINEAR 0x2703
+#define GL_LINEAR_MIPMAP_NEAREST 0x2701
+#define GL_LINES 0x0001
+#define GL_LINE_LOOP 0x0002
+#define GL_LINE_STRIP 0x0003
+#define GL_LINE_WIDTH 0x0B21
+#define GL_LINK_STATUS 0x8B82
+#define GL_LOW_FLOAT 0x8DF0
+#define GL_LOW_INT 0x8DF3
+#define GL_LUMINANCE 0x1909
+#define GL_LUMINANCE_ALPHA 0x190A
+#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
+#define GL_MAX_RENDERBUFFER_SIZE 0x84E8
+#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
+#define GL_MAX_TEXTURE_SIZE 0x0D33
+#define GL_MAX_VARYING_VECTORS 0x8DFC
+#define GL_MAX_VERTEX_ATTRIBS 0x8869
+#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
+#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
+#define GL_MAX_VIEWPORT_DIMS 0x0D3A
+#define GL_MEDIUM_FLOAT 0x8DF1
+#define GL_MEDIUM_INT 0x8DF4
+#define GL_MIRRORED_REPEAT 0x8370
+#define GL_NEAREST 0x2600
+#define GL_NEAREST_MIPMAP_LINEAR 0x2702
+#define GL_NEAREST_MIPMAP_NEAREST 0x2700
+#define GL_NEVER 0x0200
+#define GL_NICEST 0x1102
+#define GL_NONE 0
+#define GL_NOTEQUAL 0x0205
+#define GL_NO_ERROR 0
+#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
+#define GL_ONE 1
+#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GL_ONE_MINUS_DST_ALPHA 0x0305
+#define GL_ONE_MINUS_DST_COLOR 0x0307
+#define GL_ONE_MINUS_SRC_ALPHA 0x0303
+#define GL_ONE_MINUS_SRC_COLOR 0x0301
+#define GL_OUT_OF_MEMORY 0x0505
+#define GL_PACK_ALIGNMENT 0x0D05
+#define GL_POINTS 0x0000
+#define GL_POLYGON_OFFSET_FACTOR 0x8038
+#define GL_POLYGON_OFFSET_FILL 0x8037
+#define GL_POLYGON_OFFSET_UNITS 0x2A00
+#define GL_RED_BITS 0x0D52
+#define GL_RENDERBUFFER 0x8D41
+#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
+#define GL_RENDERBUFFER_BINDING 0x8CA7
+#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52
+#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
+#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51
+#define GL_RENDERBUFFER_HEIGHT 0x8D43
+#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
+#define GL_RENDERBUFFER_RED_SIZE 0x8D50
+#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
+#define GL_RENDERBUFFER_WIDTH 0x8D42
+#define GL_RENDERER 0x1F01
+#define GL_REPEAT 0x2901
+#define GL_REPLACE 0x1E01
+#define GL_RGB 0x1907
+#define GL_RGB565 0x8D62
+#define GL_RGB5_A1 0x8057
+#define GL_RGBA 0x1908
+#define GL_RGBA4 0x8056
+#define GL_SAMPLER_2D 0x8B5E
+#define GL_SAMPLER_CUBE 0x8B60
+#define GL_SAMPLES 0x80A9
+#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GL_SAMPLE_BUFFERS 0x80A8
+#define GL_SAMPLE_COVERAGE 0x80A0
+#define GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GL_SCISSOR_BOX 0x0C10
+#define GL_SCISSOR_TEST 0x0C11
+#define GL_SHADER_BINARY_FORMATS 0x8DF8
+#define GL_SHADER_COMPILER 0x8DFA
+#define GL_SHADER_SOURCE_LENGTH 0x8B88
+#define GL_SHADER_TYPE 0x8B4F
+#define GL_SHADING_LANGUAGE_VERSION 0x8B8C
+#define GL_SHORT 0x1402
+#define GL_SRC_ALPHA 0x0302
+#define GL_SRC_ALPHA_SATURATE 0x0308
+#define GL_SRC_COLOR 0x0300
+#define GL_STATIC_DRAW 0x88E4
+#define GL_STENCIL_ATTACHMENT 0x8D20
+#define GL_STENCIL_BACK_FAIL 0x8801
+#define GL_STENCIL_BACK_FUNC 0x8800
+#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
+#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
+#define GL_STENCIL_BACK_REF 0x8CA3
+#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4
+#define GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GL_STENCIL_BITS 0x0D57
+#define GL_STENCIL_BUFFER_BIT 0x00000400
+#define GL_STENCIL_CLEAR_VALUE 0x0B91
+#define GL_STENCIL_FAIL 0x0B94
+#define GL_STENCIL_FUNC 0x0B92
+#define GL_STENCIL_INDEX8 0x8D48
+#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
+#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96
+#define GL_STENCIL_REF 0x0B97
+#define GL_STENCIL_TEST 0x0B90
+#define GL_STENCIL_VALUE_MASK 0x0B93
+#define GL_STENCIL_WRITEMASK 0x0B98
+#define GL_STREAM_DRAW 0x88E0
+#define GL_SUBPIXEL_BITS 0x0D50
+#define GL_TEXTURE 0x1702
+#define GL_TEXTURE0 0x84C0
+#define GL_TEXTURE1 0x84C1
+#define GL_TEXTURE10 0x84CA
+#define GL_TEXTURE11 0x84CB
+#define GL_TEXTURE12 0x84CC
+#define GL_TEXTURE13 0x84CD
+#define GL_TEXTURE14 0x84CE
+#define GL_TEXTURE15 0x84CF
+#define GL_TEXTURE16 0x84D0
+#define GL_TEXTURE17 0x84D1
+#define GL_TEXTURE18 0x84D2
+#define GL_TEXTURE19 0x84D3
+#define GL_TEXTURE2 0x84C2
+#define GL_TEXTURE20 0x84D4
+#define GL_TEXTURE21 0x84D5
+#define GL_TEXTURE22 0x84D6
+#define GL_TEXTURE23 0x84D7
+#define GL_TEXTURE24 0x84D8
+#define GL_TEXTURE25 0x84D9
+#define GL_TEXTURE26 0x84DA
+#define GL_TEXTURE27 0x84DB
+#define GL_TEXTURE28 0x84DC
+#define GL_TEXTURE29 0x84DD
+#define GL_TEXTURE3 0x84C3
+#define GL_TEXTURE30 0x84DE
+#define GL_TEXTURE31 0x84DF
+#define GL_TEXTURE4 0x84C4
+#define GL_TEXTURE5 0x84C5
+#define GL_TEXTURE6 0x84C6
+#define GL_TEXTURE7 0x84C7
+#define GL_TEXTURE8 0x84C8
+#define GL_TEXTURE9 0x84C9
+#define GL_TEXTURE_2D 0x0DE1
+#define GL_TEXTURE_BINDING_2D 0x8069
+#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GL_TEXTURE_CUBE_MAP 0x8513
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GL_TEXTURE_MAG_FILTER 0x2800
+#define GL_TEXTURE_MIN_FILTER 0x2801
+#define GL_TEXTURE_WRAP_S 0x2802
+#define GL_TEXTURE_WRAP_T 0x2803
+#define GL_TRIANGLES 0x0004
+#define GL_TRIANGLE_FAN 0x0006
+#define GL_TRIANGLE_STRIP 0x0005
+#define GL_TRUE 1
+#define GL_UNPACK_ALIGNMENT 0x0CF5
+#define GL_UNSIGNED_BYTE 0x1401
+#define GL_UNSIGNED_INT 0x1405
+#define GL_UNSIGNED_SHORT 0x1403
+#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GL_UNSIGNED_SHORT_5_6_5 0x8363
+#define GL_VALIDATE_STATUS 0x8B83
+#define GL_VENDOR 0x1F00
+#define GL_VERSION 0x1F02
+#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
+#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
+#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
+#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
+#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
+#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
+#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
+#define GL_VERTEX_SHADER 0x8B31
+#define GL_VIEWPORT 0x0BA2
+#define GL_ZERO 0
+
+
+#ifndef __khrplatform_h_
+#define __khrplatform_h_
+
+/*
+** Copyright (c) 2008-2018 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/* Khronos platform-specific types and definitions.
+ *
+ * The master copy of khrplatform.h is maintained in the Khronos EGL
+ * Registry repository at https://github.com/KhronosGroup/EGL-Registry
+ * The last semantic modification to khrplatform.h was at commit ID:
+ * 67a3e0864c2d75ea5287b9f3d2eb74a745936692
+ *
+ * Adopters may modify this file to suit their platform. Adopters are
+ * encouraged to submit platform specific modifications to the Khronos
+ * group so that they can be included in future versions of this file.
+ * Please submit changes by filing pull requests or issues on
+ * the EGL Registry repository linked above.
+ *
+ *
+ * See the Implementer's Guidelines for information about where this file
+ * should be located on your system and for more details of its use:
+ * http://www.khronos.org/registry/implementers_guide.pdf
+ *
+ * This file should be included as
+ * #include <KHR/khrplatform.h>
+ * by Khronos client API header files that use its types and defines.
+ *
+ * The types in khrplatform.h should only be used to define API-specific types.
+ *
+ * Types defined in khrplatform.h:
+ * khronos_int8_t signed 8 bit
+ * khronos_uint8_t unsigned 8 bit
+ * khronos_int16_t signed 16 bit
+ * khronos_uint16_t unsigned 16 bit
+ * khronos_int32_t signed 32 bit
+ * khronos_uint32_t unsigned 32 bit
+ * khronos_int64_t signed 64 bit
+ * khronos_uint64_t unsigned 64 bit
+ * khronos_intptr_t signed same number of bits as a pointer
+ * khronos_uintptr_t unsigned same number of bits as a pointer
+ * khronos_ssize_t signed size
+ * khronos_usize_t unsigned size
+ * khronos_float_t signed 32 bit floating point
+ * khronos_time_ns_t unsigned 64 bit time in nanoseconds
+ * khronos_utime_nanoseconds_t unsigned time interval or absolute time in
+ * nanoseconds
+ * khronos_stime_nanoseconds_t signed time interval in nanoseconds
+ * khronos_boolean_enum_t enumerated boolean type. This should
+ * only be used as a base type when a client API's boolean type is
+ * an enum. Client APIs which use an integer or other type for
+ * booleans cannot use this as the base type for their boolean.
+ *
+ * Tokens defined in khrplatform.h:
+ *
+ * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values.
+ *
+ * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0.
+ * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0.
+ *
+ * Calling convention macros defined in this file:
+ * KHRONOS_APICALL
+ * KHRONOS_GLAD_API_PTR
+ * KHRONOS_APIATTRIBUTES
+ *
+ * These may be used in function prototypes as:
+ *
+ * KHRONOS_APICALL void KHRONOS_GLAD_API_PTR funcname(
+ * int arg1,
+ * int arg2) KHRONOS_APIATTRIBUTES;
+ */
+
+#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC)
+# define KHRONOS_STATIC 1
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APICALL
+ *-------------------------------------------------------------------------
+ * This precedes the return type of the function in the function prototype.
+ */
+#if defined(KHRONOS_STATIC)
+ /* If the preprocessor constant KHRONOS_STATIC is defined, make the
+ * header compatible with static linking. */
+# define KHRONOS_APICALL
+#elif defined(_WIN32)
+# define KHRONOS_APICALL __declspec(dllimport)
+#elif defined (__SYMBIAN32__)
+# define KHRONOS_APICALL IMPORT_C
+#elif defined(__ANDROID__)
+# define KHRONOS_APICALL __attribute__((visibility("default")))
+#else
+# define KHRONOS_APICALL
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_GLAD_API_PTR
+ *-------------------------------------------------------------------------
+ * This follows the return type of the function and precedes the function
+ * name in the function prototype.
+ */
+#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__)
+ /* Win32 but not WinCE */
+# define KHRONOS_GLAD_API_PTR __stdcall
+#else
+# define KHRONOS_GLAD_API_PTR
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIATTRIBUTES
+ *-------------------------------------------------------------------------
+ * This follows the closing parenthesis of the function prototype arguments.
+ */
+#if defined (__ARMCC_2__)
+#define KHRONOS_APIATTRIBUTES __softfp
+#else
+#define KHRONOS_APIATTRIBUTES
+#endif
+
+/*-------------------------------------------------------------------------
+ * basic type definitions
+ *-----------------------------------------------------------------------*/
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__)
+
+
+/*
+ * Using <stdint.h>
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__VMS ) || defined(__sgi)
+
+/*
+ * Using <inttypes.h>
+ */
+#include <inttypes.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(_WIN32) && !defined(__SCITECH_SNAP__)
+
+/*
+ * Win32
+ */
+typedef __int32 khronos_int32_t;
+typedef unsigned __int32 khronos_uint32_t;
+typedef __int64 khronos_int64_t;
+typedef unsigned __int64 khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__sun__) || defined(__digital__)
+
+/*
+ * Sun or Digital
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#if defined(__arch64__) || defined(_LP64)
+typedef long int khronos_int64_t;
+typedef unsigned long int khronos_uint64_t;
+#else
+typedef long long int khronos_int64_t;
+typedef unsigned long long int khronos_uint64_t;
+#endif /* __arch64__ */
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif 0
+
+/*
+ * Hypothetical platform with no float or int64 support
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#define KHRONOS_SUPPORT_INT64 0
+#define KHRONOS_SUPPORT_FLOAT 0
+
+#else
+
+/*
+ * Generic fallback
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#endif
+
+
+/*
+ * Types that are (so far) the same on all platforms
+ */
+typedef signed char khronos_int8_t;
+typedef unsigned char khronos_uint8_t;
+typedef signed short int khronos_int16_t;
+typedef unsigned short int khronos_uint16_t;
+
+/*
+ * Types that differ between LLP64 and LP64 architectures - in LLP64,
+ * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears
+ * to be the only LLP64 architecture in current use.
+ */
+#ifdef _WIN64
+typedef signed long long int khronos_intptr_t;
+typedef unsigned long long int khronos_uintptr_t;
+typedef signed long long int khronos_ssize_t;
+typedef unsigned long long int khronos_usize_t;
+#else
+typedef signed long int khronos_intptr_t;
+typedef unsigned long int khronos_uintptr_t;
+typedef signed long int khronos_ssize_t;
+typedef unsigned long int khronos_usize_t;
+#endif
+
+#if KHRONOS_SUPPORT_FLOAT
+/*
+ * Float type
+ */
+typedef float khronos_float_t;
+#endif
+
+#if KHRONOS_SUPPORT_INT64
+/* Time types
+ *
+ * These types can be used to represent a time interval in nanoseconds or
+ * an absolute Unadjusted System Time. Unadjusted System Time is the number
+ * of nanoseconds since some arbitrary system event (e.g. since the last
+ * time the system booted). The Unadjusted System Time is an unsigned
+ * 64 bit value that wraps back to 0 every 584 years. Time intervals
+ * may be either signed or unsigned.
+ */
+typedef khronos_uint64_t khronos_utime_nanoseconds_t;
+typedef khronos_int64_t khronos_stime_nanoseconds_t;
+#endif
+
+/*
+ * Dummy value used to pad enum types to 32 bits.
+ */
+#ifndef KHRONOS_MAX_ENUM
+#define KHRONOS_MAX_ENUM 0x7FFFFFFF
+#endif
+
+/*
+ * Enumerated boolean type
+ *
+ * Values other than zero should be considered to be true. Therefore
+ * comparisons should not be made against KHRONOS_TRUE.
+ */
+typedef enum {
+ KHRONOS_FALSE = 0,
+ KHRONOS_TRUE = 1,
+ KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM
+} khronos_boolean_enum_t;
+
+#endif /* __khrplatform_h_ */
+
+typedef unsigned int GLenum;
+
+typedef unsigned char GLboolean;
+
+typedef unsigned int GLbitfield;
+
+typedef void GLvoid;
+
+typedef khronos_int8_t GLbyte;
+
+typedef khronos_uint8_t GLubyte;
+
+typedef khronos_int16_t GLshort;
+
+typedef khronos_uint16_t GLushort;
+
+typedef int GLint;
+
+typedef unsigned int GLuint;
+
+typedef khronos_int32_t GLclampx;
+
+typedef int GLsizei;
+
+typedef khronos_float_t GLfloat;
+
+typedef khronos_float_t GLclampf;
+
+typedef double GLdouble;
+
+typedef double GLclampd;
+
+typedef void *GLeglClientBufferEXT;
+
+typedef void *GLeglImageOES;
+
+typedef char GLchar;
+
+typedef char GLcharARB;
+
+#ifdef __APPLE__
+typedef void *GLhandleARB;
+#else
+typedef unsigned int GLhandleARB;
+#endif
+
+typedef khronos_uint16_t GLhalf;
+
+typedef khronos_uint16_t GLhalfARB;
+
+typedef khronos_int32_t GLfixed;
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_intptr_t GLintptr;
+#else
+typedef khronos_intptr_t GLintptr;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_intptr_t GLintptrARB;
+#else
+typedef khronos_intptr_t GLintptrARB;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_ssize_t GLsizeiptr;
+#else
+typedef khronos_ssize_t GLsizeiptr;
+#endif
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060)
+typedef khronos_ssize_t GLsizeiptrARB;
+#else
+typedef khronos_ssize_t GLsizeiptrARB;
+#endif
+
+typedef khronos_int64_t GLint64;
+
+typedef khronos_int64_t GLint64EXT;
+
+typedef khronos_uint64_t GLuint64;
+
+typedef khronos_uint64_t GLuint64EXT;
+
+typedef struct __GLsync *GLsync;
+
+struct _cl_context;
+
+struct _cl_event;
+
+typedef void (GLAD_API_PTR *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+
+typedef void (GLAD_API_PTR *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam);
+
+typedef unsigned short GLhalfNV;
+
+typedef GLintptr GLvdpauSurfaceNV;
+
+typedef void (GLAD_API_PTR *GLVULKANPROCNV)(void);
+
+
+
+#define GL_ES_VERSION_2_0 1
+GLAD_API_CALL int GLAD_GL_ES_VERSION_2_0;
+
+
+typedef void (GLAD_API_PTR *PFNGLACTIVETEXTUREPROC)(GLenum texture);
+typedef void (GLAD_API_PTR *PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLBINDATTRIBLOCATIONPROC)(GLuint program, GLuint index, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer);
+typedef void (GLAD_API_PTR *PFNGLBINDFRAMEBUFFERPROC)(GLenum target, GLuint framebuffer);
+typedef void (GLAD_API_PTR *PFNGLBINDRENDERBUFFERPROC)(GLenum target, GLuint renderbuffer);
+typedef void (GLAD_API_PTR *PFNGLBINDTEXTUREPROC)(GLenum target, GLuint texture);
+typedef void (GLAD_API_PTR *PFNGLBLENDCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONSEPARATEPROC)(GLenum modeRGB, GLenum modeAlpha);
+typedef void (GLAD_API_PTR *PFNGLBLENDFUNCPROC)(GLenum sfactor, GLenum dfactor);
+typedef void (GLAD_API_PTR *PFNGLBLENDFUNCSEPARATEPROC)(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+typedef void (GLAD_API_PTR *PFNGLBUFFERDATAPROC)(GLenum target, GLsizeiptr size, const void * data, GLenum usage);
+typedef void (GLAD_API_PTR *PFNGLBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, const void * data);
+typedef GLenum (GLAD_API_PTR *PFNGLCHECKFRAMEBUFFERSTATUSPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLCLEARPROC)(GLbitfield mask);
+typedef void (GLAD_API_PTR *PFNGLCLEARCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (GLAD_API_PTR *PFNGLCLEARDEPTHFPROC)(GLfloat d);
+typedef void (GLAD_API_PTR *PFNGLCLEARSTENCILPROC)(GLint s);
+typedef void (GLAD_API_PTR *PFNGLCOLORMASKPROC)(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
+typedef void (GLAD_API_PTR *PFNGLCOMPILESHADERPROC)(GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void * data);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef GLuint (GLAD_API_PTR *PFNGLCREATEPROGRAMPROC)(void);
+typedef GLuint (GLAD_API_PTR *PFNGLCREATESHADERPROC)(GLenum type);
+typedef void (GLAD_API_PTR *PFNGLCULLFACEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint * buffers);
+typedef void (GLAD_API_PTR *PFNGLDELETEFRAMEBUFFERSPROC)(GLsizei n, const GLuint * framebuffers);
+typedef void (GLAD_API_PTR *PFNGLDELETEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLDELETERENDERBUFFERSPROC)(GLsizei n, const GLuint * renderbuffers);
+typedef void (GLAD_API_PTR *PFNGLDELETESHADERPROC)(GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLDELETETEXTURESPROC)(GLsizei n, const GLuint * textures);
+typedef void (GLAD_API_PTR *PFNGLDEPTHFUNCPROC)(GLenum func);
+typedef void (GLAD_API_PTR *PFNGLDEPTHMASKPROC)(GLboolean flag);
+typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEFPROC)(GLfloat n, GLfloat f);
+typedef void (GLAD_API_PTR *PFNGLDETACHSHADERPROC)(GLuint program, GLuint shader);
+typedef void (GLAD_API_PTR *PFNGLDISABLEPROC)(GLenum cap);
+typedef void (GLAD_API_PTR *PFNGLDISABLEVERTEXATTRIBARRAYPROC)(GLuint index);
+typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSPROC)(GLenum mode, GLint first, GLsizei count);
+typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices);
+typedef void (GLAD_API_PTR *PFNGLENABLEPROC)(GLenum cap);
+typedef void (GLAD_API_PTR *PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index);
+typedef void (GLAD_API_PTR *PFNGLFINISHPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLFLUSHPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERRENDERBUFFERPROC)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE2DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (GLAD_API_PTR *PFNGLFRONTFACEPROC)(GLenum mode);
+typedef void (GLAD_API_PTR *PFNGLGENBUFFERSPROC)(GLsizei n, GLuint * buffers);
+typedef void (GLAD_API_PTR *PFNGLGENFRAMEBUFFERSPROC)(GLsizei n, GLuint * framebuffers);
+typedef void (GLAD_API_PTR *PFNGLGENRENDERBUFFERSPROC)(GLsizei n, GLuint * renderbuffers);
+typedef void (GLAD_API_PTR *PFNGLGENTEXTURESPROC)(GLsizei n, GLuint * textures);
+typedef void (GLAD_API_PTR *PFNGLGENERATEMIPMAPPROC)(GLenum target);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEATTRIBPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETATTACHEDSHADERSPROC)(GLuint program, GLsizei maxCount, GLsizei * count, GLuint * shaders);
+typedef GLint (GLAD_API_PTR *PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETBOOLEANVPROC)(GLenum pname, GLboolean * data);
+typedef void (GLAD_API_PTR *PFNGLGETBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef GLenum (GLAD_API_PTR *PFNGLGETERRORPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLGETFLOATVPROC)(GLenum pname, GLfloat * data);
+typedef void (GLAD_API_PTR *PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)(GLenum target, GLenum attachment, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETINTEGERVPROC)(GLenum pname, GLint * data);
+typedef void (GLAD_API_PTR *PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei * length, GLchar * infoLog);
+typedef void (GLAD_API_PTR *PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETRENDERBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * infoLog);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERPRECISIONFORMATPROC)(GLenum shadertype, GLenum precisiontype, GLint * range, GLint * precision);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERSOURCEPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * source);
+typedef void (GLAD_API_PTR *PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint * params);
+typedef const GLubyte * (GLAD_API_PTR *PFNGLGETSTRINGPROC)(GLenum name);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERFVPROC)(GLenum target, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params);
+typedef GLint (GLAD_API_PTR *PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const GLchar * name);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMFVPROC)(GLuint program, GLint location, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETUNIFORMIVPROC)(GLuint program, GLint location, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBPOINTERVPROC)(GLuint index, GLenum pname, void ** pointer);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBFVPROC)(GLuint index, GLenum pname, GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIVPROC)(GLuint index, GLenum pname, GLint * params);
+typedef void (GLAD_API_PTR *PFNGLHINTPROC)(GLenum target, GLenum mode);
+typedef GLboolean (GLAD_API_PTR *PFNGLISBUFFERPROC)(GLuint buffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISENABLEDPROC)(GLenum cap);
+typedef GLboolean (GLAD_API_PTR *PFNGLISFRAMEBUFFERPROC)(GLuint framebuffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISPROGRAMPROC)(GLuint program);
+typedef GLboolean (GLAD_API_PTR *PFNGLISRENDERBUFFERPROC)(GLuint renderbuffer);
+typedef GLboolean (GLAD_API_PTR *PFNGLISSHADERPROC)(GLuint shader);
+typedef GLboolean (GLAD_API_PTR *PFNGLISTEXTUREPROC)(GLuint texture);
+typedef void (GLAD_API_PTR *PFNGLLINEWIDTHPROC)(GLfloat width);
+typedef void (GLAD_API_PTR *PFNGLLINKPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLPIXELSTOREIPROC)(GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLPOLYGONOFFSETPROC)(GLfloat factor, GLfloat units);
+typedef void (GLAD_API_PTR *PFNGLREADPIXELSPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void * pixels);
+typedef void (GLAD_API_PTR *PFNGLRELEASESHADERCOMPILERPROC)(void);
+typedef void (GLAD_API_PTR *PFNGLRENDERBUFFERSTORAGEPROC)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLSAMPLECOVERAGEPROC)(GLfloat value, GLboolean invert);
+typedef void (GLAD_API_PTR *PFNGLSCISSORPROC)(GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (GLAD_API_PTR *PFNGLSHADERBINARYPROC)(GLsizei count, const GLuint * shaders, GLenum binaryFormat, const void * binary, GLsizei length);
+typedef void (GLAD_API_PTR *PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const GLchar *const* string, const GLint * length);
+typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCPROC)(GLenum func, GLint ref, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCSEPARATEPROC)(GLenum face, GLenum func, GLint ref, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILMASKPROC)(GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILMASKSEPARATEPROC)(GLenum face, GLuint mask);
+typedef void (GLAD_API_PTR *PFNGLSTENCILOPPROC)(GLenum fail, GLenum zfail, GLenum zpass);
+typedef void (GLAD_API_PTR *PFNGLSTENCILOPSEPARATEPROC)(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+typedef void (GLAD_API_PTR *PFNGLTEXIMAGE2DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFPROC)(GLenum target, GLenum pname, GLfloat param);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFVPROC)(GLenum target, GLenum pname, const GLfloat * params);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIPROC)(GLenum target, GLenum pname, GLint param);
+typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIVPROC)(GLenum target, GLenum pname, const GLint * params);
+typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1IPROC)(GLint location, GLint v0);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM1IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2FPROC)(GLint location, GLfloat v0, GLfloat v1);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2IPROC)(GLint location, GLint v0, GLint v1);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM2IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3IPROC)(GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM3IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4FVPROC)(GLint location, GLsizei count, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4IPROC)(GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (GLAD_API_PTR *PFNGLUNIFORM4IVPROC)(GLint location, GLsizei count, const GLint * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value);
+typedef void (GLAD_API_PTR *PFNGLUSEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLVALIDATEPROGRAMPROC)(GLuint program);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FPROC)(GLuint index, GLfloat x);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FPROC)(GLuint index, GLfloat x, GLfloat y);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FVPROC)(GLuint index, const GLfloat * v);
+typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void * pointer);
+typedef void (GLAD_API_PTR *PFNGLVIEWPORTPROC)(GLint x, GLint y, GLsizei width, GLsizei height);
+
+GLAD_API_CALL PFNGLACTIVETEXTUREPROC glad_glActiveTexture;
+#define glActiveTexture glad_glActiveTexture
+GLAD_API_CALL PFNGLATTACHSHADERPROC glad_glAttachShader;
+#define glAttachShader glad_glAttachShader
+GLAD_API_CALL PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation;
+#define glBindAttribLocation glad_glBindAttribLocation
+GLAD_API_CALL PFNGLBINDBUFFERPROC glad_glBindBuffer;
+#define glBindBuffer glad_glBindBuffer
+GLAD_API_CALL PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer;
+#define glBindFramebuffer glad_glBindFramebuffer
+GLAD_API_CALL PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer;
+#define glBindRenderbuffer glad_glBindRenderbuffer
+GLAD_API_CALL PFNGLBINDTEXTUREPROC glad_glBindTexture;
+#define glBindTexture glad_glBindTexture
+GLAD_API_CALL PFNGLBLENDCOLORPROC glad_glBlendColor;
+#define glBlendColor glad_glBlendColor
+GLAD_API_CALL PFNGLBLENDEQUATIONPROC glad_glBlendEquation;
+#define glBlendEquation glad_glBlendEquation
+GLAD_API_CALL PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate;
+#define glBlendEquationSeparate glad_glBlendEquationSeparate
+GLAD_API_CALL PFNGLBLENDFUNCPROC glad_glBlendFunc;
+#define glBlendFunc glad_glBlendFunc
+GLAD_API_CALL PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate;
+#define glBlendFuncSeparate glad_glBlendFuncSeparate
+GLAD_API_CALL PFNGLBUFFERDATAPROC glad_glBufferData;
+#define glBufferData glad_glBufferData
+GLAD_API_CALL PFNGLBUFFERSUBDATAPROC glad_glBufferSubData;
+#define glBufferSubData glad_glBufferSubData
+GLAD_API_CALL PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus;
+#define glCheckFramebufferStatus glad_glCheckFramebufferStatus
+GLAD_API_CALL PFNGLCLEARPROC glad_glClear;
+#define glClear glad_glClear
+GLAD_API_CALL PFNGLCLEARCOLORPROC glad_glClearColor;
+#define glClearColor glad_glClearColor
+GLAD_API_CALL PFNGLCLEARDEPTHFPROC glad_glClearDepthf;
+#define glClearDepthf glad_glClearDepthf
+GLAD_API_CALL PFNGLCLEARSTENCILPROC glad_glClearStencil;
+#define glClearStencil glad_glClearStencil
+GLAD_API_CALL PFNGLCOLORMASKPROC glad_glColorMask;
+#define glColorMask glad_glColorMask
+GLAD_API_CALL PFNGLCOMPILESHADERPROC glad_glCompileShader;
+#define glCompileShader glad_glCompileShader
+GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D;
+#define glCompressedTexImage2D glad_glCompressedTexImage2D
+GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D;
+#define glCompressedTexSubImage2D glad_glCompressedTexSubImage2D
+GLAD_API_CALL PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D;
+#define glCopyTexImage2D glad_glCopyTexImage2D
+GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D;
+#define glCopyTexSubImage2D glad_glCopyTexSubImage2D
+GLAD_API_CALL PFNGLCREATEPROGRAMPROC glad_glCreateProgram;
+#define glCreateProgram glad_glCreateProgram
+GLAD_API_CALL PFNGLCREATESHADERPROC glad_glCreateShader;
+#define glCreateShader glad_glCreateShader
+GLAD_API_CALL PFNGLCULLFACEPROC glad_glCullFace;
+#define glCullFace glad_glCullFace
+GLAD_API_CALL PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers;
+#define glDeleteBuffers glad_glDeleteBuffers
+GLAD_API_CALL PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers;
+#define glDeleteFramebuffers glad_glDeleteFramebuffers
+GLAD_API_CALL PFNGLDELETEPROGRAMPROC glad_glDeleteProgram;
+#define glDeleteProgram glad_glDeleteProgram
+GLAD_API_CALL PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers;
+#define glDeleteRenderbuffers glad_glDeleteRenderbuffers
+GLAD_API_CALL PFNGLDELETESHADERPROC glad_glDeleteShader;
+#define glDeleteShader glad_glDeleteShader
+GLAD_API_CALL PFNGLDELETETEXTURESPROC glad_glDeleteTextures;
+#define glDeleteTextures glad_glDeleteTextures
+GLAD_API_CALL PFNGLDEPTHFUNCPROC glad_glDepthFunc;
+#define glDepthFunc glad_glDepthFunc
+GLAD_API_CALL PFNGLDEPTHMASKPROC glad_glDepthMask;
+#define glDepthMask glad_glDepthMask
+GLAD_API_CALL PFNGLDEPTHRANGEFPROC glad_glDepthRangef;
+#define glDepthRangef glad_glDepthRangef
+GLAD_API_CALL PFNGLDETACHSHADERPROC glad_glDetachShader;
+#define glDetachShader glad_glDetachShader
+GLAD_API_CALL PFNGLDISABLEPROC glad_glDisable;
+#define glDisable glad_glDisable
+GLAD_API_CALL PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray;
+#define glDisableVertexAttribArray glad_glDisableVertexAttribArray
+GLAD_API_CALL PFNGLDRAWARRAYSPROC glad_glDrawArrays;
+#define glDrawArrays glad_glDrawArrays
+GLAD_API_CALL PFNGLDRAWELEMENTSPROC glad_glDrawElements;
+#define glDrawElements glad_glDrawElements
+GLAD_API_CALL PFNGLENABLEPROC glad_glEnable;
+#define glEnable glad_glEnable
+GLAD_API_CALL PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray;
+#define glEnableVertexAttribArray glad_glEnableVertexAttribArray
+GLAD_API_CALL PFNGLFINISHPROC glad_glFinish;
+#define glFinish glad_glFinish
+GLAD_API_CALL PFNGLFLUSHPROC glad_glFlush;
+#define glFlush glad_glFlush
+GLAD_API_CALL PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer;
+#define glFramebufferRenderbuffer glad_glFramebufferRenderbuffer
+GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D;
+#define glFramebufferTexture2D glad_glFramebufferTexture2D
+GLAD_API_CALL PFNGLFRONTFACEPROC glad_glFrontFace;
+#define glFrontFace glad_glFrontFace
+GLAD_API_CALL PFNGLGENBUFFERSPROC glad_glGenBuffers;
+#define glGenBuffers glad_glGenBuffers
+GLAD_API_CALL PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers;
+#define glGenFramebuffers glad_glGenFramebuffers
+GLAD_API_CALL PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers;
+#define glGenRenderbuffers glad_glGenRenderbuffers
+GLAD_API_CALL PFNGLGENTEXTURESPROC glad_glGenTextures;
+#define glGenTextures glad_glGenTextures
+GLAD_API_CALL PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap;
+#define glGenerateMipmap glad_glGenerateMipmap
+GLAD_API_CALL PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib;
+#define glGetActiveAttrib glad_glGetActiveAttrib
+GLAD_API_CALL PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform;
+#define glGetActiveUniform glad_glGetActiveUniform
+GLAD_API_CALL PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders;
+#define glGetAttachedShaders glad_glGetAttachedShaders
+GLAD_API_CALL PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation;
+#define glGetAttribLocation glad_glGetAttribLocation
+GLAD_API_CALL PFNGLGETBOOLEANVPROC glad_glGetBooleanv;
+#define glGetBooleanv glad_glGetBooleanv
+GLAD_API_CALL PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv;
+#define glGetBufferParameteriv glad_glGetBufferParameteriv
+GLAD_API_CALL PFNGLGETERRORPROC glad_glGetError;
+#define glGetError glad_glGetError
+GLAD_API_CALL PFNGLGETFLOATVPROC glad_glGetFloatv;
+#define glGetFloatv glad_glGetFloatv
+GLAD_API_CALL PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv;
+#define glGetFramebufferAttachmentParameteriv glad_glGetFramebufferAttachmentParameteriv
+GLAD_API_CALL PFNGLGETINTEGERVPROC glad_glGetIntegerv;
+#define glGetIntegerv glad_glGetIntegerv
+GLAD_API_CALL PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog;
+#define glGetProgramInfoLog glad_glGetProgramInfoLog
+GLAD_API_CALL PFNGLGETPROGRAMIVPROC glad_glGetProgramiv;
+#define glGetProgramiv glad_glGetProgramiv
+GLAD_API_CALL PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv;
+#define glGetRenderbufferParameteriv glad_glGetRenderbufferParameteriv
+GLAD_API_CALL PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog;
+#define glGetShaderInfoLog glad_glGetShaderInfoLog
+GLAD_API_CALL PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat;
+#define glGetShaderPrecisionFormat glad_glGetShaderPrecisionFormat
+GLAD_API_CALL PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource;
+#define glGetShaderSource glad_glGetShaderSource
+GLAD_API_CALL PFNGLGETSHADERIVPROC glad_glGetShaderiv;
+#define glGetShaderiv glad_glGetShaderiv
+GLAD_API_CALL PFNGLGETSTRINGPROC glad_glGetString;
+#define glGetString glad_glGetString
+GLAD_API_CALL PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv;
+#define glGetTexParameterfv glad_glGetTexParameterfv
+GLAD_API_CALL PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv;
+#define glGetTexParameteriv glad_glGetTexParameteriv
+GLAD_API_CALL PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation;
+#define glGetUniformLocation glad_glGetUniformLocation
+GLAD_API_CALL PFNGLGETUNIFORMFVPROC glad_glGetUniformfv;
+#define glGetUniformfv glad_glGetUniformfv
+GLAD_API_CALL PFNGLGETUNIFORMIVPROC glad_glGetUniformiv;
+#define glGetUniformiv glad_glGetUniformiv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv;
+#define glGetVertexAttribPointerv glad_glGetVertexAttribPointerv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv;
+#define glGetVertexAttribfv glad_glGetVertexAttribfv
+GLAD_API_CALL PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv;
+#define glGetVertexAttribiv glad_glGetVertexAttribiv
+GLAD_API_CALL PFNGLHINTPROC glad_glHint;
+#define glHint glad_glHint
+GLAD_API_CALL PFNGLISBUFFERPROC glad_glIsBuffer;
+#define glIsBuffer glad_glIsBuffer
+GLAD_API_CALL PFNGLISENABLEDPROC glad_glIsEnabled;
+#define glIsEnabled glad_glIsEnabled
+GLAD_API_CALL PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer;
+#define glIsFramebuffer glad_glIsFramebuffer
+GLAD_API_CALL PFNGLISPROGRAMPROC glad_glIsProgram;
+#define glIsProgram glad_glIsProgram
+GLAD_API_CALL PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer;
+#define glIsRenderbuffer glad_glIsRenderbuffer
+GLAD_API_CALL PFNGLISSHADERPROC glad_glIsShader;
+#define glIsShader glad_glIsShader
+GLAD_API_CALL PFNGLISTEXTUREPROC glad_glIsTexture;
+#define glIsTexture glad_glIsTexture
+GLAD_API_CALL PFNGLLINEWIDTHPROC glad_glLineWidth;
+#define glLineWidth glad_glLineWidth
+GLAD_API_CALL PFNGLLINKPROGRAMPROC glad_glLinkProgram;
+#define glLinkProgram glad_glLinkProgram
+GLAD_API_CALL PFNGLPIXELSTOREIPROC glad_glPixelStorei;
+#define glPixelStorei glad_glPixelStorei
+GLAD_API_CALL PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset;
+#define glPolygonOffset glad_glPolygonOffset
+GLAD_API_CALL PFNGLREADPIXELSPROC glad_glReadPixels;
+#define glReadPixels glad_glReadPixels
+GLAD_API_CALL PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler;
+#define glReleaseShaderCompiler glad_glReleaseShaderCompiler
+GLAD_API_CALL PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage;
+#define glRenderbufferStorage glad_glRenderbufferStorage
+GLAD_API_CALL PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage;
+#define glSampleCoverage glad_glSampleCoverage
+GLAD_API_CALL PFNGLSCISSORPROC glad_glScissor;
+#define glScissor glad_glScissor
+GLAD_API_CALL PFNGLSHADERBINARYPROC glad_glShaderBinary;
+#define glShaderBinary glad_glShaderBinary
+GLAD_API_CALL PFNGLSHADERSOURCEPROC glad_glShaderSource;
+#define glShaderSource glad_glShaderSource
+GLAD_API_CALL PFNGLSTENCILFUNCPROC glad_glStencilFunc;
+#define glStencilFunc glad_glStencilFunc
+GLAD_API_CALL PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate;
+#define glStencilFuncSeparate glad_glStencilFuncSeparate
+GLAD_API_CALL PFNGLSTENCILMASKPROC glad_glStencilMask;
+#define glStencilMask glad_glStencilMask
+GLAD_API_CALL PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate;
+#define glStencilMaskSeparate glad_glStencilMaskSeparate
+GLAD_API_CALL PFNGLSTENCILOPPROC glad_glStencilOp;
+#define glStencilOp glad_glStencilOp
+GLAD_API_CALL PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate;
+#define glStencilOpSeparate glad_glStencilOpSeparate
+GLAD_API_CALL PFNGLTEXIMAGE2DPROC glad_glTexImage2D;
+#define glTexImage2D glad_glTexImage2D
+GLAD_API_CALL PFNGLTEXPARAMETERFPROC glad_glTexParameterf;
+#define glTexParameterf glad_glTexParameterf
+GLAD_API_CALL PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv;
+#define glTexParameterfv glad_glTexParameterfv
+GLAD_API_CALL PFNGLTEXPARAMETERIPROC glad_glTexParameteri;
+#define glTexParameteri glad_glTexParameteri
+GLAD_API_CALL PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv;
+#define glTexParameteriv glad_glTexParameteriv
+GLAD_API_CALL PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D;
+#define glTexSubImage2D glad_glTexSubImage2D
+GLAD_API_CALL PFNGLUNIFORM1FPROC glad_glUniform1f;
+#define glUniform1f glad_glUniform1f
+GLAD_API_CALL PFNGLUNIFORM1FVPROC glad_glUniform1fv;
+#define glUniform1fv glad_glUniform1fv
+GLAD_API_CALL PFNGLUNIFORM1IPROC glad_glUniform1i;
+#define glUniform1i glad_glUniform1i
+GLAD_API_CALL PFNGLUNIFORM1IVPROC glad_glUniform1iv;
+#define glUniform1iv glad_glUniform1iv
+GLAD_API_CALL PFNGLUNIFORM2FPROC glad_glUniform2f;
+#define glUniform2f glad_glUniform2f
+GLAD_API_CALL PFNGLUNIFORM2FVPROC glad_glUniform2fv;
+#define glUniform2fv glad_glUniform2fv
+GLAD_API_CALL PFNGLUNIFORM2IPROC glad_glUniform2i;
+#define glUniform2i glad_glUniform2i
+GLAD_API_CALL PFNGLUNIFORM2IVPROC glad_glUniform2iv;
+#define glUniform2iv glad_glUniform2iv
+GLAD_API_CALL PFNGLUNIFORM3FPROC glad_glUniform3f;
+#define glUniform3f glad_glUniform3f
+GLAD_API_CALL PFNGLUNIFORM3FVPROC glad_glUniform3fv;
+#define glUniform3fv glad_glUniform3fv
+GLAD_API_CALL PFNGLUNIFORM3IPROC glad_glUniform3i;
+#define glUniform3i glad_glUniform3i
+GLAD_API_CALL PFNGLUNIFORM3IVPROC glad_glUniform3iv;
+#define glUniform3iv glad_glUniform3iv
+GLAD_API_CALL PFNGLUNIFORM4FPROC glad_glUniform4f;
+#define glUniform4f glad_glUniform4f
+GLAD_API_CALL PFNGLUNIFORM4FVPROC glad_glUniform4fv;
+#define glUniform4fv glad_glUniform4fv
+GLAD_API_CALL PFNGLUNIFORM4IPROC glad_glUniform4i;
+#define glUniform4i glad_glUniform4i
+GLAD_API_CALL PFNGLUNIFORM4IVPROC glad_glUniform4iv;
+#define glUniform4iv glad_glUniform4iv
+GLAD_API_CALL PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv;
+#define glUniformMatrix2fv glad_glUniformMatrix2fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv;
+#define glUniformMatrix3fv glad_glUniformMatrix3fv
+GLAD_API_CALL PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv;
+#define glUniformMatrix4fv glad_glUniformMatrix4fv
+GLAD_API_CALL PFNGLUSEPROGRAMPROC glad_glUseProgram;
+#define glUseProgram glad_glUseProgram
+GLAD_API_CALL PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram;
+#define glValidateProgram glad_glValidateProgram
+GLAD_API_CALL PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f;
+#define glVertexAttrib1f glad_glVertexAttrib1f
+GLAD_API_CALL PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv;
+#define glVertexAttrib1fv glad_glVertexAttrib1fv
+GLAD_API_CALL PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f;
+#define glVertexAttrib2f glad_glVertexAttrib2f
+GLAD_API_CALL PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv;
+#define glVertexAttrib2fv glad_glVertexAttrib2fv
+GLAD_API_CALL PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f;
+#define glVertexAttrib3f glad_glVertexAttrib3f
+GLAD_API_CALL PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv;
+#define glVertexAttrib3fv glad_glVertexAttrib3fv
+GLAD_API_CALL PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f;
+#define glVertexAttrib4f glad_glVertexAttrib4f
+GLAD_API_CALL PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv;
+#define glVertexAttrib4fv glad_glVertexAttrib4fv
+GLAD_API_CALL PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer;
+#define glVertexAttribPointer glad_glVertexAttribPointer
+GLAD_API_CALL PFNGLVIEWPORTPROC glad_glViewport;
+#define glViewport glad_glViewport
+
+
+
+
+
+GLAD_API_CALL int gladLoadGLES2UserPtr( GLADuserptrloadfunc load, void *userptr);
+GLAD_API_CALL int gladLoadGLES2( GLADloadfunc load);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+/* Source */
+#ifdef GLAD_GLES2_IMPLEMENTATION
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef GLAD_IMPL_UTIL_C_
+#define GLAD_IMPL_UTIL_C_
+
+#ifdef _MSC_VER
+#define GLAD_IMPL_UTIL_SSCANF sscanf_s
+#else
+#define GLAD_IMPL_UTIL_SSCANF sscanf
+#endif
+
+#endif /* GLAD_IMPL_UTIL_C_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+int GLAD_GL_ES_VERSION_2_0 = 0;
+
+
+
+PFNGLACTIVETEXTUREPROC glad_glActiveTexture = NULL;
+PFNGLATTACHSHADERPROC glad_glAttachShader = NULL;
+PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation = NULL;
+PFNGLBINDBUFFERPROC glad_glBindBuffer = NULL;
+PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer = NULL;
+PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer = NULL;
+PFNGLBINDTEXTUREPROC glad_glBindTexture = NULL;
+PFNGLBLENDCOLORPROC glad_glBlendColor = NULL;
+PFNGLBLENDEQUATIONPROC glad_glBlendEquation = NULL;
+PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate = NULL;
+PFNGLBLENDFUNCPROC glad_glBlendFunc = NULL;
+PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate = NULL;
+PFNGLBUFFERDATAPROC glad_glBufferData = NULL;
+PFNGLBUFFERSUBDATAPROC glad_glBufferSubData = NULL;
+PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus = NULL;
+PFNGLCLEARPROC glad_glClear = NULL;
+PFNGLCLEARCOLORPROC glad_glClearColor = NULL;
+PFNGLCLEARDEPTHFPROC glad_glClearDepthf = NULL;
+PFNGLCLEARSTENCILPROC glad_glClearStencil = NULL;
+PFNGLCOLORMASKPROC glad_glColorMask = NULL;
+PFNGLCOMPILESHADERPROC glad_glCompileShader = NULL;
+PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D = NULL;
+PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D = NULL;
+PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D = NULL;
+PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D = NULL;
+PFNGLCREATEPROGRAMPROC glad_glCreateProgram = NULL;
+PFNGLCREATESHADERPROC glad_glCreateShader = NULL;
+PFNGLCULLFACEPROC glad_glCullFace = NULL;
+PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers = NULL;
+PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers = NULL;
+PFNGLDELETEPROGRAMPROC glad_glDeleteProgram = NULL;
+PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers = NULL;
+PFNGLDELETESHADERPROC glad_glDeleteShader = NULL;
+PFNGLDELETETEXTURESPROC glad_glDeleteTextures = NULL;
+PFNGLDEPTHFUNCPROC glad_glDepthFunc = NULL;
+PFNGLDEPTHMASKPROC glad_glDepthMask = NULL;
+PFNGLDEPTHRANGEFPROC glad_glDepthRangef = NULL;
+PFNGLDETACHSHADERPROC glad_glDetachShader = NULL;
+PFNGLDISABLEPROC glad_glDisable = NULL;
+PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray = NULL;
+PFNGLDRAWARRAYSPROC glad_glDrawArrays = NULL;
+PFNGLDRAWELEMENTSPROC glad_glDrawElements = NULL;
+PFNGLENABLEPROC glad_glEnable = NULL;
+PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray = NULL;
+PFNGLFINISHPROC glad_glFinish = NULL;
+PFNGLFLUSHPROC glad_glFlush = NULL;
+PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer = NULL;
+PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D = NULL;
+PFNGLFRONTFACEPROC glad_glFrontFace = NULL;
+PFNGLGENBUFFERSPROC glad_glGenBuffers = NULL;
+PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers = NULL;
+PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers = NULL;
+PFNGLGENTEXTURESPROC glad_glGenTextures = NULL;
+PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap = NULL;
+PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib = NULL;
+PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform = NULL;
+PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders = NULL;
+PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation = NULL;
+PFNGLGETBOOLEANVPROC glad_glGetBooleanv = NULL;
+PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv = NULL;
+PFNGLGETERRORPROC glad_glGetError = NULL;
+PFNGLGETFLOATVPROC glad_glGetFloatv = NULL;
+PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv = NULL;
+PFNGLGETINTEGERVPROC glad_glGetIntegerv = NULL;
+PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog = NULL;
+PFNGLGETPROGRAMIVPROC glad_glGetProgramiv = NULL;
+PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv = NULL;
+PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog = NULL;
+PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat = NULL;
+PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource = NULL;
+PFNGLGETSHADERIVPROC glad_glGetShaderiv = NULL;
+PFNGLGETSTRINGPROC glad_glGetString = NULL;
+PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv = NULL;
+PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv = NULL;
+PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation = NULL;
+PFNGLGETUNIFORMFVPROC glad_glGetUniformfv = NULL;
+PFNGLGETUNIFORMIVPROC glad_glGetUniformiv = NULL;
+PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv = NULL;
+PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv = NULL;
+PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv = NULL;
+PFNGLHINTPROC glad_glHint = NULL;
+PFNGLISBUFFERPROC glad_glIsBuffer = NULL;
+PFNGLISENABLEDPROC glad_glIsEnabled = NULL;
+PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer = NULL;
+PFNGLISPROGRAMPROC glad_glIsProgram = NULL;
+PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer = NULL;
+PFNGLISSHADERPROC glad_glIsShader = NULL;
+PFNGLISTEXTUREPROC glad_glIsTexture = NULL;
+PFNGLLINEWIDTHPROC glad_glLineWidth = NULL;
+PFNGLLINKPROGRAMPROC glad_glLinkProgram = NULL;
+PFNGLPIXELSTOREIPROC glad_glPixelStorei = NULL;
+PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset = NULL;
+PFNGLREADPIXELSPROC glad_glReadPixels = NULL;
+PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler = NULL;
+PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage = NULL;
+PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage = NULL;
+PFNGLSCISSORPROC glad_glScissor = NULL;
+PFNGLSHADERBINARYPROC glad_glShaderBinary = NULL;
+PFNGLSHADERSOURCEPROC glad_glShaderSource = NULL;
+PFNGLSTENCILFUNCPROC glad_glStencilFunc = NULL;
+PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate = NULL;
+PFNGLSTENCILMASKPROC glad_glStencilMask = NULL;
+PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate = NULL;
+PFNGLSTENCILOPPROC glad_glStencilOp = NULL;
+PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate = NULL;
+PFNGLTEXIMAGE2DPROC glad_glTexImage2D = NULL;
+PFNGLTEXPARAMETERFPROC glad_glTexParameterf = NULL;
+PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv = NULL;
+PFNGLTEXPARAMETERIPROC glad_glTexParameteri = NULL;
+PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv = NULL;
+PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D = NULL;
+PFNGLUNIFORM1FPROC glad_glUniform1f = NULL;
+PFNGLUNIFORM1FVPROC glad_glUniform1fv = NULL;
+PFNGLUNIFORM1IPROC glad_glUniform1i = NULL;
+PFNGLUNIFORM1IVPROC glad_glUniform1iv = NULL;
+PFNGLUNIFORM2FPROC glad_glUniform2f = NULL;
+PFNGLUNIFORM2FVPROC glad_glUniform2fv = NULL;
+PFNGLUNIFORM2IPROC glad_glUniform2i = NULL;
+PFNGLUNIFORM2IVPROC glad_glUniform2iv = NULL;
+PFNGLUNIFORM3FPROC glad_glUniform3f = NULL;
+PFNGLUNIFORM3FVPROC glad_glUniform3fv = NULL;
+PFNGLUNIFORM3IPROC glad_glUniform3i = NULL;
+PFNGLUNIFORM3IVPROC glad_glUniform3iv = NULL;
+PFNGLUNIFORM4FPROC glad_glUniform4f = NULL;
+PFNGLUNIFORM4FVPROC glad_glUniform4fv = NULL;
+PFNGLUNIFORM4IPROC glad_glUniform4i = NULL;
+PFNGLUNIFORM4IVPROC glad_glUniform4iv = NULL;
+PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv = NULL;
+PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv = NULL;
+PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv = NULL;
+PFNGLUSEPROGRAMPROC glad_glUseProgram = NULL;
+PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram = NULL;
+PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f = NULL;
+PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv = NULL;
+PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f = NULL;
+PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv = NULL;
+PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f = NULL;
+PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv = NULL;
+PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f = NULL;
+PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv = NULL;
+PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer = NULL;
+PFNGLVIEWPORTPROC glad_glViewport = NULL;
+
+
+static void glad_gl_load_GL_ES_VERSION_2_0( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_GL_ES_VERSION_2_0) return;
+ glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC) load(userptr, "glActiveTexture");
+ glad_glAttachShader = (PFNGLATTACHSHADERPROC) load(userptr, "glAttachShader");
+ glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) load(userptr, "glBindAttribLocation");
+ glad_glBindBuffer = (PFNGLBINDBUFFERPROC) load(userptr, "glBindBuffer");
+ glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC) load(userptr, "glBindFramebuffer");
+ glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC) load(userptr, "glBindRenderbuffer");
+ glad_glBindTexture = (PFNGLBINDTEXTUREPROC) load(userptr, "glBindTexture");
+ glad_glBlendColor = (PFNGLBLENDCOLORPROC) load(userptr, "glBlendColor");
+ glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC) load(userptr, "glBlendEquation");
+ glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC) load(userptr, "glBlendEquationSeparate");
+ glad_glBlendFunc = (PFNGLBLENDFUNCPROC) load(userptr, "glBlendFunc");
+ glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC) load(userptr, "glBlendFuncSeparate");
+ glad_glBufferData = (PFNGLBUFFERDATAPROC) load(userptr, "glBufferData");
+ glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC) load(userptr, "glBufferSubData");
+ glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC) load(userptr, "glCheckFramebufferStatus");
+ glad_glClear = (PFNGLCLEARPROC) load(userptr, "glClear");
+ glad_glClearColor = (PFNGLCLEARCOLORPROC) load(userptr, "glClearColor");
+ glad_glClearDepthf = (PFNGLCLEARDEPTHFPROC) load(userptr, "glClearDepthf");
+ glad_glClearStencil = (PFNGLCLEARSTENCILPROC) load(userptr, "glClearStencil");
+ glad_glColorMask = (PFNGLCOLORMASKPROC) load(userptr, "glColorMask");
+ glad_glCompileShader = (PFNGLCOMPILESHADERPROC) load(userptr, "glCompileShader");
+ glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) load(userptr, "glCompressedTexImage2D");
+ glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) load(userptr, "glCompressedTexSubImage2D");
+ glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC) load(userptr, "glCopyTexImage2D");
+ glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC) load(userptr, "glCopyTexSubImage2D");
+ glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC) load(userptr, "glCreateProgram");
+ glad_glCreateShader = (PFNGLCREATESHADERPROC) load(userptr, "glCreateShader");
+ glad_glCullFace = (PFNGLCULLFACEPROC) load(userptr, "glCullFace");
+ glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) load(userptr, "glDeleteBuffers");
+ glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC) load(userptr, "glDeleteFramebuffers");
+ glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC) load(userptr, "glDeleteProgram");
+ glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC) load(userptr, "glDeleteRenderbuffers");
+ glad_glDeleteShader = (PFNGLDELETESHADERPROC) load(userptr, "glDeleteShader");
+ glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC) load(userptr, "glDeleteTextures");
+ glad_glDepthFunc = (PFNGLDEPTHFUNCPROC) load(userptr, "glDepthFunc");
+ glad_glDepthMask = (PFNGLDEPTHMASKPROC) load(userptr, "glDepthMask");
+ glad_glDepthRangef = (PFNGLDEPTHRANGEFPROC) load(userptr, "glDepthRangef");
+ glad_glDetachShader = (PFNGLDETACHSHADERPROC) load(userptr, "glDetachShader");
+ glad_glDisable = (PFNGLDISABLEPROC) load(userptr, "glDisable");
+ glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) load(userptr, "glDisableVertexAttribArray");
+ glad_glDrawArrays = (PFNGLDRAWARRAYSPROC) load(userptr, "glDrawArrays");
+ glad_glDrawElements = (PFNGLDRAWELEMENTSPROC) load(userptr, "glDrawElements");
+ glad_glEnable = (PFNGLENABLEPROC) load(userptr, "glEnable");
+ glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) load(userptr, "glEnableVertexAttribArray");
+ glad_glFinish = (PFNGLFINISHPROC) load(userptr, "glFinish");
+ glad_glFlush = (PFNGLFLUSHPROC) load(userptr, "glFlush");
+ glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC) load(userptr, "glFramebufferRenderbuffer");
+ glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC) load(userptr, "glFramebufferTexture2D");
+ glad_glFrontFace = (PFNGLFRONTFACEPROC) load(userptr, "glFrontFace");
+ glad_glGenBuffers = (PFNGLGENBUFFERSPROC) load(userptr, "glGenBuffers");
+ glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC) load(userptr, "glGenFramebuffers");
+ glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC) load(userptr, "glGenRenderbuffers");
+ glad_glGenTextures = (PFNGLGENTEXTURESPROC) load(userptr, "glGenTextures");
+ glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC) load(userptr, "glGenerateMipmap");
+ glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC) load(userptr, "glGetActiveAttrib");
+ glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) load(userptr, "glGetActiveUniform");
+ glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC) load(userptr, "glGetAttachedShaders");
+ glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) load(userptr, "glGetAttribLocation");
+ glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC) load(userptr, "glGetBooleanv");
+ glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC) load(userptr, "glGetBufferParameteriv");
+ glad_glGetError = (PFNGLGETERRORPROC) load(userptr, "glGetError");
+ glad_glGetFloatv = (PFNGLGETFLOATVPROC) load(userptr, "glGetFloatv");
+ glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) load(userptr, "glGetFramebufferAttachmentParameteriv");
+ glad_glGetIntegerv = (PFNGLGETINTEGERVPROC) load(userptr, "glGetIntegerv");
+ glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) load(userptr, "glGetProgramInfoLog");
+ glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC) load(userptr, "glGetProgramiv");
+ glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC) load(userptr, "glGetRenderbufferParameteriv");
+ glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) load(userptr, "glGetShaderInfoLog");
+ glad_glGetShaderPrecisionFormat = (PFNGLGETSHADERPRECISIONFORMATPROC) load(userptr, "glGetShaderPrecisionFormat");
+ glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC) load(userptr, "glGetShaderSource");
+ glad_glGetShaderiv = (PFNGLGETSHADERIVPROC) load(userptr, "glGetShaderiv");
+ glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString");
+ glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC) load(userptr, "glGetTexParameterfv");
+ glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC) load(userptr, "glGetTexParameteriv");
+ glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) load(userptr, "glGetUniformLocation");
+ glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC) load(userptr, "glGetUniformfv");
+ glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC) load(userptr, "glGetUniformiv");
+ glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC) load(userptr, "glGetVertexAttribPointerv");
+ glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC) load(userptr, "glGetVertexAttribfv");
+ glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC) load(userptr, "glGetVertexAttribiv");
+ glad_glHint = (PFNGLHINTPROC) load(userptr, "glHint");
+ glad_glIsBuffer = (PFNGLISBUFFERPROC) load(userptr, "glIsBuffer");
+ glad_glIsEnabled = (PFNGLISENABLEDPROC) load(userptr, "glIsEnabled");
+ glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC) load(userptr, "glIsFramebuffer");
+ glad_glIsProgram = (PFNGLISPROGRAMPROC) load(userptr, "glIsProgram");
+ glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC) load(userptr, "glIsRenderbuffer");
+ glad_glIsShader = (PFNGLISSHADERPROC) load(userptr, "glIsShader");
+ glad_glIsTexture = (PFNGLISTEXTUREPROC) load(userptr, "glIsTexture");
+ glad_glLineWidth = (PFNGLLINEWIDTHPROC) load(userptr, "glLineWidth");
+ glad_glLinkProgram = (PFNGLLINKPROGRAMPROC) load(userptr, "glLinkProgram");
+ glad_glPixelStorei = (PFNGLPIXELSTOREIPROC) load(userptr, "glPixelStorei");
+ glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC) load(userptr, "glPolygonOffset");
+ glad_glReadPixels = (PFNGLREADPIXELSPROC) load(userptr, "glReadPixels");
+ glad_glReleaseShaderCompiler = (PFNGLRELEASESHADERCOMPILERPROC) load(userptr, "glReleaseShaderCompiler");
+ glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC) load(userptr, "glRenderbufferStorage");
+ glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC) load(userptr, "glSampleCoverage");
+ glad_glScissor = (PFNGLSCISSORPROC) load(userptr, "glScissor");
+ glad_glShaderBinary = (PFNGLSHADERBINARYPROC) load(userptr, "glShaderBinary");
+ glad_glShaderSource = (PFNGLSHADERSOURCEPROC) load(userptr, "glShaderSource");
+ glad_glStencilFunc = (PFNGLSTENCILFUNCPROC) load(userptr, "glStencilFunc");
+ glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC) load(userptr, "glStencilFuncSeparate");
+ glad_glStencilMask = (PFNGLSTENCILMASKPROC) load(userptr, "glStencilMask");
+ glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC) load(userptr, "glStencilMaskSeparate");
+ glad_glStencilOp = (PFNGLSTENCILOPPROC) load(userptr, "glStencilOp");
+ glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC) load(userptr, "glStencilOpSeparate");
+ glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC) load(userptr, "glTexImage2D");
+ glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC) load(userptr, "glTexParameterf");
+ glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC) load(userptr, "glTexParameterfv");
+ glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC) load(userptr, "glTexParameteri");
+ glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC) load(userptr, "glTexParameteriv");
+ glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC) load(userptr, "glTexSubImage2D");
+ glad_glUniform1f = (PFNGLUNIFORM1FPROC) load(userptr, "glUniform1f");
+ glad_glUniform1fv = (PFNGLUNIFORM1FVPROC) load(userptr, "glUniform1fv");
+ glad_glUniform1i = (PFNGLUNIFORM1IPROC) load(userptr, "glUniform1i");
+ glad_glUniform1iv = (PFNGLUNIFORM1IVPROC) load(userptr, "glUniform1iv");
+ glad_glUniform2f = (PFNGLUNIFORM2FPROC) load(userptr, "glUniform2f");
+ glad_glUniform2fv = (PFNGLUNIFORM2FVPROC) load(userptr, "glUniform2fv");
+ glad_glUniform2i = (PFNGLUNIFORM2IPROC) load(userptr, "glUniform2i");
+ glad_glUniform2iv = (PFNGLUNIFORM2IVPROC) load(userptr, "glUniform2iv");
+ glad_glUniform3f = (PFNGLUNIFORM3FPROC) load(userptr, "glUniform3f");
+ glad_glUniform3fv = (PFNGLUNIFORM3FVPROC) load(userptr, "glUniform3fv");
+ glad_glUniform3i = (PFNGLUNIFORM3IPROC) load(userptr, "glUniform3i");
+ glad_glUniform3iv = (PFNGLUNIFORM3IVPROC) load(userptr, "glUniform3iv");
+ glad_glUniform4f = (PFNGLUNIFORM4FPROC) load(userptr, "glUniform4f");
+ glad_glUniform4fv = (PFNGLUNIFORM4FVPROC) load(userptr, "glUniform4fv");
+ glad_glUniform4i = (PFNGLUNIFORM4IPROC) load(userptr, "glUniform4i");
+ glad_glUniform4iv = (PFNGLUNIFORM4IVPROC) load(userptr, "glUniform4iv");
+ glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC) load(userptr, "glUniformMatrix2fv");
+ glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) load(userptr, "glUniformMatrix3fv");
+ glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) load(userptr, "glUniformMatrix4fv");
+ glad_glUseProgram = (PFNGLUSEPROGRAMPROC) load(userptr, "glUseProgram");
+ glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC) load(userptr, "glValidateProgram");
+ glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC) load(userptr, "glVertexAttrib1f");
+ glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC) load(userptr, "glVertexAttrib1fv");
+ glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC) load(userptr, "glVertexAttrib2f");
+ glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC) load(userptr, "glVertexAttrib2fv");
+ glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC) load(userptr, "glVertexAttrib3f");
+ glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC) load(userptr, "glVertexAttrib3fv");
+ glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC) load(userptr, "glVertexAttrib4f");
+ glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC) load(userptr, "glVertexAttrib4fv");
+ glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) load(userptr, "glVertexAttribPointer");
+ glad_glViewport = (PFNGLVIEWPORTPROC) load(userptr, "glViewport");
+}
+
+
+
+#if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0)
+#define GLAD_GL_IS_SOME_NEW_VERSION 1
+#else
+#define GLAD_GL_IS_SOME_NEW_VERSION 0
+#endif
+
+static int glad_gl_get_extensions( int version, const char **out_exts, unsigned int *out_num_exts_i, char ***out_exts_i) {
+#if GLAD_GL_IS_SOME_NEW_VERSION
+ if(GLAD_VERSION_MAJOR(version) < 3) {
+#else
+ (void) version;
+ (void) out_num_exts_i;
+ (void) out_exts_i;
+#endif
+ if (glad_glGetString == NULL) {
+ return 0;
+ }
+ *out_exts = (const char *)glad_glGetString(GL_EXTENSIONS);
+#if GLAD_GL_IS_SOME_NEW_VERSION
+ } else {
+ unsigned int index = 0;
+ unsigned int num_exts_i = 0;
+ char **exts_i = NULL;
+ if (glad_glGetStringi == NULL || glad_glGetIntegerv == NULL) {
+ return 0;
+ }
+ glad_glGetIntegerv(GL_NUM_EXTENSIONS, (int*) &num_exts_i);
+ if (num_exts_i > 0) {
+ exts_i = (char **) malloc(num_exts_i * (sizeof *exts_i));
+ }
+ if (exts_i == NULL) {
+ return 0;
+ }
+ for(index = 0; index < num_exts_i; index++) {
+ const char *gl_str_tmp = (const char*) glad_glGetStringi(GL_EXTENSIONS, index);
+ size_t len = strlen(gl_str_tmp) + 1;
+
+ char *local_str = (char*) malloc(len * sizeof(char));
+ if(local_str != NULL) {
+ memcpy(local_str, gl_str_tmp, len * sizeof(char));
+ }
+
+ exts_i[index] = local_str;
+ }
+
+ *out_num_exts_i = num_exts_i;
+ *out_exts_i = exts_i;
+ }
+#endif
+ return 1;
+}
+static void glad_gl_free_extensions(char **exts_i, unsigned int num_exts_i) {
+ if (exts_i != NULL) {
+ unsigned int index;
+ for(index = 0; index < num_exts_i; index++) {
+ free((void *) (exts_i[index]));
+ }
+ free((void *)exts_i);
+ exts_i = NULL;
+ }
+}
+static int glad_gl_has_extension(int version, const char *exts, unsigned int num_exts_i, char **exts_i, const char *ext) {
+ if(GLAD_VERSION_MAJOR(version) < 3 || !GLAD_GL_IS_SOME_NEW_VERSION) {
+ const char *extensions;
+ const char *loc;
+ const char *terminator;
+ extensions = exts;
+ if(extensions == NULL || ext == NULL) {
+ return 0;
+ }
+ while(1) {
+ loc = strstr(extensions, ext);
+ if(loc == NULL) {
+ return 0;
+ }
+ terminator = loc + strlen(ext);
+ if((loc == extensions || *(loc - 1) == ' ') &&
+ (*terminator == ' ' || *terminator == '\0')) {
+ return 1;
+ }
+ extensions = terminator;
+ }
+ } else {
+ unsigned int index;
+ for(index = 0; index < num_exts_i; index++) {
+ const char *e = exts_i[index];
+ if(strcmp(e, ext) == 0) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static GLADapiproc glad_gl_get_proc_from_userptr(void *userptr, const char* name) {
+ return (GLAD_GNUC_EXTENSION (GLADapiproc (*)(const char *name)) userptr)(name);
+}
+
+static int glad_gl_find_extensions_gles2( int version) {
+ const char *exts = NULL;
+ unsigned int num_exts_i = 0;
+ char **exts_i = NULL;
+ if (!glad_gl_get_extensions(version, &exts, &num_exts_i, &exts_i)) return 0;
+
+ (void) glad_gl_has_extension;
+
+ glad_gl_free_extensions(exts_i, num_exts_i);
+
+ return 1;
+}
+
+static int glad_gl_find_core_gles2(void) {
+ int i;
+ const char* version;
+ const char* prefixes[] = {
+ "OpenGL ES-CM ",
+ "OpenGL ES-CL ",
+ "OpenGL ES ",
+ "OpenGL SC ",
+ NULL
+ };
+ int major = 0;
+ int minor = 0;
+ version = (const char*) glad_glGetString(GL_VERSION);
+ if (!version) return 0;
+ for (i = 0; prefixes[i]; i++) {
+ const size_t length = strlen(prefixes[i]);
+ if (strncmp(version, prefixes[i], length) == 0) {
+ version += length;
+ break;
+ }
+ }
+
+ GLAD_IMPL_UTIL_SSCANF(version, "%d.%d", &major, &minor);
+
+ GLAD_GL_ES_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2;
+
+ return GLAD_MAKE_VERSION(major, minor);
+}
+
+int gladLoadGLES2UserPtr( GLADuserptrloadfunc load, void *userptr) {
+ int version;
+
+ glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString");
+ if(glad_glGetString == NULL) return 0;
+ if(glad_glGetString(GL_VERSION) == NULL) return 0;
+ version = glad_gl_find_core_gles2();
+
+ glad_gl_load_GL_ES_VERSION_2_0(load, userptr);
+
+ if (!glad_gl_find_extensions_gles2(version)) return 0;
+
+
+
+ return version;
+}
+
+
+int gladLoadGLES2( GLADloadfunc load) {
+ return gladLoadGLES2UserPtr( glad_gl_get_proc_from_userptr, GLAD_GNUC_EXTENSION (void*) load);
+}
+
+
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLAD_GLES2_IMPLEMENTATION */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/glad/vulkan.h b/chromium/third_party/dawn/third_party/glfw/deps/glad/vulkan.h
new file mode 100644
index 00000000000..9e78dad9293
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/glad/vulkan.h
@@ -0,0 +1,4612 @@
+/**
+ * Loader generated by glad 2.0.0-beta on Wed Aug 25 21:20:29 2021
+ *
+ * Generator: C/C++
+ * Specification: vk
+ * Extensions: 3
+ *
+ * APIs:
+ * - vulkan=1.1
+ *
+ * Options:
+ * - ALIAS = False
+ * - DEBUG = False
+ * - HEADER_ONLY = True
+ * - LOADER = False
+ * - MX = False
+ * - MX_GLOBAL = False
+ * - ON_DEMAND = False
+ *
+ * Commandline:
+ * --api='vulkan=1.1' --extensions='VK_EXT_debug_report,VK_KHR_surface,VK_KHR_swapchain' c --header-only
+ *
+ * Online:
+ * http://glad.sh/#api=vulkan%3D1.1&extensions=VK_EXT_debug_report%2CVK_KHR_surface%2CVK_KHR_swapchain&generator=c&options=HEADER_ONLY
+ *
+ */
+
+#ifndef GLAD_VULKAN_H_
+#define GLAD_VULKAN_H_
+
+#ifdef VULKAN_H_
+ #error header already included (API: vulkan), remove previous include!
+#endif
+#define VULKAN_H_ 1
+
+#ifdef VULKAN_CORE_H_
+ #error header already included (API: vulkan), remove previous include!
+#endif
+#define VULKAN_CORE_H_ 1
+
+
+#define GLAD_VULKAN
+#define GLAD_OPTION_VULKAN_HEADER_ONLY
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef GLAD_PLATFORM_H_
+#define GLAD_PLATFORM_H_
+
+#ifndef GLAD_PLATFORM_WIN32
+ #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)
+ #define GLAD_PLATFORM_WIN32 1
+ #else
+ #define GLAD_PLATFORM_WIN32 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_APPLE
+ #ifdef __APPLE__
+ #define GLAD_PLATFORM_APPLE 1
+ #else
+ #define GLAD_PLATFORM_APPLE 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_EMSCRIPTEN
+ #ifdef __EMSCRIPTEN__
+ #define GLAD_PLATFORM_EMSCRIPTEN 1
+ #else
+ #define GLAD_PLATFORM_EMSCRIPTEN 0
+ #endif
+#endif
+
+#ifndef GLAD_PLATFORM_UWP
+ #if defined(_MSC_VER) && !defined(GLAD_INTERNAL_HAVE_WINAPIFAMILY)
+ #ifdef __has_include
+ #if __has_include(<winapifamily.h>)
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_
+ #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
+ #endif
+ #endif
+
+ #ifdef GLAD_INTERNAL_HAVE_WINAPIFAMILY
+ #include <winapifamily.h>
+ #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+ #define GLAD_PLATFORM_UWP 1
+ #endif
+ #endif
+
+ #ifndef GLAD_PLATFORM_UWP
+ #define GLAD_PLATFORM_UWP 0
+ #endif
+#endif
+
+#ifdef __GNUC__
+ #define GLAD_GNUC_EXTENSION __extension__
+#else
+ #define GLAD_GNUC_EXTENSION
+#endif
+
+#ifndef GLAD_API_CALL
+ #if defined(GLAD_API_CALL_EXPORT)
+ #if GLAD_PLATFORM_WIN32 || defined(__CYGWIN__)
+ #if defined(GLAD_API_CALL_EXPORT_BUILD)
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllexport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllexport) extern
+ #endif
+ #else
+ #if defined(__GNUC__)
+ #define GLAD_API_CALL __attribute__ ((dllimport)) extern
+ #else
+ #define GLAD_API_CALL __declspec(dllimport) extern
+ #endif
+ #endif
+ #elif defined(__GNUC__) && defined(GLAD_API_CALL_EXPORT_BUILD)
+ #define GLAD_API_CALL __attribute__ ((visibility ("default"))) extern
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+ #else
+ #define GLAD_API_CALL extern
+ #endif
+#endif
+
+#ifdef APIENTRY
+ #define GLAD_API_PTR APIENTRY
+#elif GLAD_PLATFORM_WIN32
+ #define GLAD_API_PTR __stdcall
+#else
+ #define GLAD_API_PTR
+#endif
+
+#ifndef GLAPI
+#define GLAPI GLAD_API_CALL
+#endif
+
+#ifndef GLAPIENTRY
+#define GLAPIENTRY GLAD_API_PTR
+#endif
+
+#define GLAD_MAKE_VERSION(major, minor) (major * 10000 + minor)
+#define GLAD_VERSION_MAJOR(version) (version / 10000)
+#define GLAD_VERSION_MINOR(version) (version % 10000)
+
+#define GLAD_GENERATOR_VERSION "2.0.0-beta"
+
+typedef void (*GLADapiproc)(void);
+
+typedef GLADapiproc (*GLADloadfunc)(const char *name);
+typedef GLADapiproc (*GLADuserptrloadfunc)(void *userptr, const char *name);
+
+typedef void (*GLADprecallback)(const char *name, GLADapiproc apiproc, int len_args, ...);
+typedef void (*GLADpostcallback)(void *ret, const char *name, GLADapiproc apiproc, int len_args, ...);
+
+#endif /* GLAD_PLATFORM_H_ */
+
+#define VK_ATTACHMENT_UNUSED (~0U)
+#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report"
+#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9
+#define VK_FALSE 0
+#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface"
+#define VK_KHR_SURFACE_SPEC_VERSION 25
+#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain"
+#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70
+#define VK_LOD_CLAMP_NONE 1000.0f
+#define VK_LUID_SIZE 8
+#define VK_MAX_DESCRIPTION_SIZE 256
+#define VK_MAX_DEVICE_GROUP_SIZE 32
+#define VK_MAX_EXTENSION_NAME_SIZE 256
+#define VK_MAX_MEMORY_HEAPS 16
+#define VK_MAX_MEMORY_TYPES 32
+#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256
+#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1)
+#define VK_QUEUE_FAMILY_IGNORED (~0U)
+#define VK_REMAINING_ARRAY_LAYERS (~0U)
+#define VK_REMAINING_MIP_LEVELS (~0U)
+#define VK_SUBPASS_EXTERNAL (~0U)
+#define VK_TRUE 1
+#define VK_UUID_SIZE 16
+#define VK_WHOLE_SIZE (~0ULL)
+
+
+/* */
+/* File: vk_platform.h */
+/* */
+/*
+** Copyright (c) 2014-2020 The Khronos Group Inc.
+**
+** SPDX-License-Identifier: Apache-2.0
+*/
+
+
+#ifndef VK_PLATFORM_H_
+#define VK_PLATFORM_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/*
+***************************************************************************************************
+* Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+/* Platform-specific calling convention macros.
+ *
+ * Platforms should define these so that Vulkan clients call Vulkan commands
+ * with the same calling conventions that the Vulkan implementation expects.
+ *
+ * VKAPI_ATTR - Placed before the return type in function declarations.
+ * Useful for C++11 and GCC/Clang-style function attribute syntax.
+ * VKAPI_CALL - Placed after the return type in function declarations.
+ * Useful for MSVC-style calling convention syntax.
+ * VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
+ *
+ * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
+ * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
+ */
+#if defined(_WIN32)
+ /* On Windows, Vulkan commands use the stdcall convention */
+ #define VKAPI_ATTR
+ #define VKAPI_CALL __stdcall
+ #define VKAPI_PTR VKAPI_CALL
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
+ #error "Vulkan isn't supported for the 'armeabi' NDK ABI"
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
+ /* On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" */
+ /* calling convention, i.e. float parameters are passed in registers. This */
+ /* is true even if the rest of the application passes floats on the stack, */
+ /* as it does by default when compiling for the armeabi-v7a NDK ABI. */
+ #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
+ #define VKAPI_CALL
+ #define VKAPI_PTR VKAPI_ATTR
+#else
+ /* On other platforms, use the default calling convention */
+ #define VKAPI_ATTR
+ #define VKAPI_CALL
+ #define VKAPI_PTR
+#endif
+
+#include <stddef.h>
+
+#if !defined(VK_NO_STDINT_H)
+ #if defined(_MSC_VER) && (_MSC_VER < 1600)
+ typedef signed __int8 int8_t;
+ typedef unsigned __int8 uint8_t;
+ typedef signed __int16 int16_t;
+ typedef unsigned __int16 uint16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int32 uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+ #else
+ #include <stdint.h>
+ #endif
+#endif /* !defined(VK_NO_STDINT_H) */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#endif
+
+#define VK_MAKE_VERSION(major, minor, patch) \
+ ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch)))
+
+#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
+
+#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
+
+#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
+
+/* DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. */
+/*#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 */
+
+/* Vulkan 1.0 version number */
+#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)/* Patch version should always be set to 0 */
+
+/* Vulkan 1.1 version number */
+#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)/* Patch version should always be set to 0 */
+
+/* Version of this file */
+#define VK_HEADER_VERSION 152
+
+/* Complete version of this file */
+#define VK_HEADER_VERSION_COMPLETE VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION)
+
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
+
+#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE)
+#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
+#else
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#endif
+#endif
+
+#define VK_NULL_HANDLE 0
+
+
+
+
+
+
+
+
+
+VK_DEFINE_HANDLE(VkInstance)
+
+VK_DEFINE_HANDLE(VkPhysicalDevice)
+
+VK_DEFINE_HANDLE(VkDevice)
+
+VK_DEFINE_HANDLE(VkQueue)
+
+VK_DEFINE_HANDLE(VkCommandBuffer)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
+
+typedef enum VkAttachmentLoadOp {
+ VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+ VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2
+} VkAttachmentLoadOp;
+
+typedef enum VkAttachmentStoreOp {
+ VK_ATTACHMENT_STORE_OP_STORE = 0,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE = 1
+} VkAttachmentStoreOp;
+
+typedef enum VkBlendFactor {
+ VK_BLEND_FACTOR_ZERO = 0,
+ VK_BLEND_FACTOR_ONE = 1,
+ VK_BLEND_FACTOR_SRC_COLOR = 2,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
+ VK_BLEND_FACTOR_DST_COLOR = 4,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
+ VK_BLEND_FACTOR_SRC_ALPHA = 6,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ VK_BLEND_FACTOR_DST_ALPHA = 8,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
+ VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
+ VK_BLEND_FACTOR_SRC1_COLOR = 15,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
+ VK_BLEND_FACTOR_SRC1_ALPHA = 17,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18
+} VkBlendFactor;
+
+typedef enum VkBlendOp {
+ VK_BLEND_OP_ADD = 0,
+ VK_BLEND_OP_SUBTRACT = 1,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+ VK_BLEND_OP_MIN = 3,
+ VK_BLEND_OP_MAX = 4
+} VkBlendOp;
+
+typedef enum VkBorderColor {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5
+} VkBorderColor;
+
+
+
+
+typedef enum VkPipelineCacheHeaderVersion {
+ VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1
+} VkPipelineCacheHeaderVersion;
+
+
+
+
+typedef enum VkDeviceQueueCreateFlagBits {
+ VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 1
+} VkDeviceQueueCreateFlagBits;
+
+typedef enum VkBufferCreateFlagBits {
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 1,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 2,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 4,
+ VK_BUFFER_CREATE_PROTECTED_BIT = 8
+} VkBufferCreateFlagBits;
+
+typedef enum VkBufferUsageFlagBits {
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 1,
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT = 2,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 4,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 8,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 16,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 32,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 64,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 128,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 256
+} VkBufferUsageFlagBits;
+
+typedef enum VkColorComponentFlagBits {
+ VK_COLOR_COMPONENT_R_BIT = 1,
+ VK_COLOR_COMPONENT_G_BIT = 2,
+ VK_COLOR_COMPONENT_B_BIT = 4,
+ VK_COLOR_COMPONENT_A_BIT = 8
+} VkColorComponentFlagBits;
+
+typedef enum VkComponentSwizzle {
+ VK_COMPONENT_SWIZZLE_IDENTITY = 0,
+ VK_COMPONENT_SWIZZLE_ZERO = 1,
+ VK_COMPONENT_SWIZZLE_ONE = 2,
+ VK_COMPONENT_SWIZZLE_R = 3,
+ VK_COMPONENT_SWIZZLE_G = 4,
+ VK_COMPONENT_SWIZZLE_B = 5,
+ VK_COMPONENT_SWIZZLE_A = 6
+} VkComponentSwizzle;
+
+typedef enum VkCommandPoolCreateFlagBits {
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 1,
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 2,
+ VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 4
+} VkCommandPoolCreateFlagBits;
+
+typedef enum VkCommandPoolResetFlagBits {
+ VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 1
+} VkCommandPoolResetFlagBits;
+
+typedef enum VkCommandBufferResetFlagBits {
+ VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 1
+} VkCommandBufferResetFlagBits;
+
+typedef enum VkCommandBufferLevel {
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1
+} VkCommandBufferLevel;
+
+typedef enum VkCommandBufferUsageFlagBits {
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 1,
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 2,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 4
+} VkCommandBufferUsageFlagBits;
+
+typedef enum VkCompareOp {
+ VK_COMPARE_OP_NEVER = 0,
+ VK_COMPARE_OP_LESS = 1,
+ VK_COMPARE_OP_EQUAL = 2,
+ VK_COMPARE_OP_LESS_OR_EQUAL = 3,
+ VK_COMPARE_OP_GREATER = 4,
+ VK_COMPARE_OP_NOT_EQUAL = 5,
+ VK_COMPARE_OP_GREATER_OR_EQUAL = 6,
+ VK_COMPARE_OP_ALWAYS = 7
+} VkCompareOp;
+
+typedef enum VkCullModeFlagBits {
+ VK_CULL_MODE_NONE = 0,
+ VK_CULL_MODE_FRONT_BIT = 1,
+ VK_CULL_MODE_BACK_BIT = 2,
+ VK_CULL_MODE_FRONT_AND_BACK = 0x00000003
+} VkCullModeFlagBits;
+
+typedef enum VkDescriptorType {
+ VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10
+} VkDescriptorType;
+
+typedef enum VkDynamicState {
+ VK_DYNAMIC_STATE_VIEWPORT = 0,
+ VK_DYNAMIC_STATE_SCISSOR = 1,
+ VK_DYNAMIC_STATE_LINE_WIDTH = 2,
+ VK_DYNAMIC_STATE_DEPTH_BIAS = 3,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8
+} VkDynamicState;
+
+typedef enum VkFenceCreateFlagBits {
+ VK_FENCE_CREATE_SIGNALED_BIT = 1
+} VkFenceCreateFlagBits;
+
+typedef enum VkPolygonMode {
+ VK_POLYGON_MODE_FILL = 0,
+ VK_POLYGON_MODE_LINE = 1,
+ VK_POLYGON_MODE_POINT = 2
+} VkPolygonMode;
+
+typedef enum VkFormat {
+ VK_FORMAT_UNDEFINED = 0,
+ VK_FORMAT_R4G4_UNORM_PACK8 = 1,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
+ VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
+ VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
+ VK_FORMAT_R8_UNORM = 9,
+ VK_FORMAT_R8_SNORM = 10,
+ VK_FORMAT_R8_USCALED = 11,
+ VK_FORMAT_R8_SSCALED = 12,
+ VK_FORMAT_R8_UINT = 13,
+ VK_FORMAT_R8_SINT = 14,
+ VK_FORMAT_R8_SRGB = 15,
+ VK_FORMAT_R8G8_UNORM = 16,
+ VK_FORMAT_R8G8_SNORM = 17,
+ VK_FORMAT_R8G8_USCALED = 18,
+ VK_FORMAT_R8G8_SSCALED = 19,
+ VK_FORMAT_R8G8_UINT = 20,
+ VK_FORMAT_R8G8_SINT = 21,
+ VK_FORMAT_R8G8_SRGB = 22,
+ VK_FORMAT_R8G8B8_UNORM = 23,
+ VK_FORMAT_R8G8B8_SNORM = 24,
+ VK_FORMAT_R8G8B8_USCALED = 25,
+ VK_FORMAT_R8G8B8_SSCALED = 26,
+ VK_FORMAT_R8G8B8_UINT = 27,
+ VK_FORMAT_R8G8B8_SINT = 28,
+ VK_FORMAT_R8G8B8_SRGB = 29,
+ VK_FORMAT_B8G8R8_UNORM = 30,
+ VK_FORMAT_B8G8R8_SNORM = 31,
+ VK_FORMAT_B8G8R8_USCALED = 32,
+ VK_FORMAT_B8G8R8_SSCALED = 33,
+ VK_FORMAT_B8G8R8_UINT = 34,
+ VK_FORMAT_B8G8R8_SINT = 35,
+ VK_FORMAT_B8G8R8_SRGB = 36,
+ VK_FORMAT_R8G8B8A8_UNORM = 37,
+ VK_FORMAT_R8G8B8A8_SNORM = 38,
+ VK_FORMAT_R8G8B8A8_USCALED = 39,
+ VK_FORMAT_R8G8B8A8_SSCALED = 40,
+ VK_FORMAT_R8G8B8A8_UINT = 41,
+ VK_FORMAT_R8G8B8A8_SINT = 42,
+ VK_FORMAT_R8G8B8A8_SRGB = 43,
+ VK_FORMAT_B8G8R8A8_UNORM = 44,
+ VK_FORMAT_B8G8R8A8_SNORM = 45,
+ VK_FORMAT_B8G8R8A8_USCALED = 46,
+ VK_FORMAT_B8G8R8A8_SSCALED = 47,
+ VK_FORMAT_B8G8R8A8_UINT = 48,
+ VK_FORMAT_B8G8R8A8_SINT = 49,
+ VK_FORMAT_B8G8R8A8_SRGB = 50,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
+ VK_FORMAT_R16_UNORM = 70,
+ VK_FORMAT_R16_SNORM = 71,
+ VK_FORMAT_R16_USCALED = 72,
+ VK_FORMAT_R16_SSCALED = 73,
+ VK_FORMAT_R16_UINT = 74,
+ VK_FORMAT_R16_SINT = 75,
+ VK_FORMAT_R16_SFLOAT = 76,
+ VK_FORMAT_R16G16_UNORM = 77,
+ VK_FORMAT_R16G16_SNORM = 78,
+ VK_FORMAT_R16G16_USCALED = 79,
+ VK_FORMAT_R16G16_SSCALED = 80,
+ VK_FORMAT_R16G16_UINT = 81,
+ VK_FORMAT_R16G16_SINT = 82,
+ VK_FORMAT_R16G16_SFLOAT = 83,
+ VK_FORMAT_R16G16B16_UNORM = 84,
+ VK_FORMAT_R16G16B16_SNORM = 85,
+ VK_FORMAT_R16G16B16_USCALED = 86,
+ VK_FORMAT_R16G16B16_SSCALED = 87,
+ VK_FORMAT_R16G16B16_UINT = 88,
+ VK_FORMAT_R16G16B16_SINT = 89,
+ VK_FORMAT_R16G16B16_SFLOAT = 90,
+ VK_FORMAT_R16G16B16A16_UNORM = 91,
+ VK_FORMAT_R16G16B16A16_SNORM = 92,
+ VK_FORMAT_R16G16B16A16_USCALED = 93,
+ VK_FORMAT_R16G16B16A16_SSCALED = 94,
+ VK_FORMAT_R16G16B16A16_UINT = 95,
+ VK_FORMAT_R16G16B16A16_SINT = 96,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 97,
+ VK_FORMAT_R32_UINT = 98,
+ VK_FORMAT_R32_SINT = 99,
+ VK_FORMAT_R32_SFLOAT = 100,
+ VK_FORMAT_R32G32_UINT = 101,
+ VK_FORMAT_R32G32_SINT = 102,
+ VK_FORMAT_R32G32_SFLOAT = 103,
+ VK_FORMAT_R32G32B32_UINT = 104,
+ VK_FORMAT_R32G32B32_SINT = 105,
+ VK_FORMAT_R32G32B32_SFLOAT = 106,
+ VK_FORMAT_R32G32B32A32_UINT = 107,
+ VK_FORMAT_R32G32B32A32_SINT = 108,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 109,
+ VK_FORMAT_R64_UINT = 110,
+ VK_FORMAT_R64_SINT = 111,
+ VK_FORMAT_R64_SFLOAT = 112,
+ VK_FORMAT_R64G64_UINT = 113,
+ VK_FORMAT_R64G64_SINT = 114,
+ VK_FORMAT_R64G64_SFLOAT = 115,
+ VK_FORMAT_R64G64B64_UINT = 116,
+ VK_FORMAT_R64G64B64_SINT = 117,
+ VK_FORMAT_R64G64B64_SFLOAT = 118,
+ VK_FORMAT_R64G64B64A64_UINT = 119,
+ VK_FORMAT_R64G64B64A64_SINT = 120,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 121,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
+ VK_FORMAT_D16_UNORM = 124,
+ VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
+ VK_FORMAT_D32_SFLOAT = 126,
+ VK_FORMAT_S8_UINT = 127,
+ VK_FORMAT_D16_UNORM_S8_UINT = 128,
+ VK_FORMAT_D24_UNORM_S8_UINT = 129,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
+ VK_FORMAT_BC2_UNORM_BLOCK = 135,
+ VK_FORMAT_BC2_SRGB_BLOCK = 136,
+ VK_FORMAT_BC3_UNORM_BLOCK = 137,
+ VK_FORMAT_BC3_SRGB_BLOCK = 138,
+ VK_FORMAT_BC4_UNORM_BLOCK = 139,
+ VK_FORMAT_BC4_SNORM_BLOCK = 140,
+ VK_FORMAT_BC5_UNORM_BLOCK = 141,
+ VK_FORMAT_BC5_SNORM_BLOCK = 142,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
+ VK_FORMAT_BC7_UNORM_BLOCK = 145,
+ VK_FORMAT_BC7_SRGB_BLOCK = 146,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
+ VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000,
+ VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003,
+ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004,
+ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005,
+ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006,
+ VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007,
+ VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008,
+ VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,
+ VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,
+ VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,
+ VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017,
+ VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018,
+ VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,
+ VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,
+ VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,
+ VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027,
+ VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028,
+ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029,
+ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030,
+ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031,
+ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032,
+ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033
+} VkFormat;
+
+typedef enum VkFormatFeatureFlagBits {
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 1,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 2,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 4,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 8,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 16,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 32,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 64,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 128,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 256,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 512,
+ VK_FORMAT_FEATURE_BLIT_SRC_BIT = 1024,
+ VK_FORMAT_FEATURE_BLIT_DST_BIT = 2048,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 4096,
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 16384,
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 32768,
+ VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 131072,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 262144,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 524288,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 1048576,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 2097152,
+ VK_FORMAT_FEATURE_DISJOINT_BIT = 4194304,
+ VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 8388608
+} VkFormatFeatureFlagBits;
+
+typedef enum VkFrontFace {
+ VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,
+ VK_FRONT_FACE_CLOCKWISE = 1
+} VkFrontFace;
+
+typedef enum VkImageAspectFlagBits {
+ VK_IMAGE_ASPECT_COLOR_BIT = 1,
+ VK_IMAGE_ASPECT_DEPTH_BIT = 2,
+ VK_IMAGE_ASPECT_STENCIL_BIT = 4,
+ VK_IMAGE_ASPECT_METADATA_BIT = 8,
+ VK_IMAGE_ASPECT_PLANE_0_BIT = 16,
+ VK_IMAGE_ASPECT_PLANE_1_BIT = 32,
+ VK_IMAGE_ASPECT_PLANE_2_BIT = 64
+} VkImageAspectFlagBits;
+
+typedef enum VkImageCreateFlagBits {
+ VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 1,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 2,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 4,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 8,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 16,
+ VK_IMAGE_CREATE_ALIAS_BIT = 1024,
+ VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 64,
+ VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 32,
+ VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 128,
+ VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 256,
+ VK_IMAGE_CREATE_PROTECTED_BIT = 2048,
+ VK_IMAGE_CREATE_DISJOINT_BIT = 512
+} VkImageCreateFlagBits;
+
+typedef enum VkImageLayout {
+ VK_IMAGE_LAYOUT_UNDEFINED = 0,
+ VK_IMAGE_LAYOUT_GENERAL = 1,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
+ VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
+ VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000,
+ VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002
+} VkImageLayout;
+
+typedef enum VkImageTiling {
+ VK_IMAGE_TILING_OPTIMAL = 0,
+ VK_IMAGE_TILING_LINEAR = 1
+} VkImageTiling;
+
+typedef enum VkImageType {
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2
+} VkImageType;
+
+typedef enum VkImageUsageFlagBits {
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 1,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT = 2,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 4,
+ VK_IMAGE_USAGE_STORAGE_BIT = 8,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 16,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 32,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 64,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 128
+} VkImageUsageFlagBits;
+
+
+typedef enum VkImageViewType {
+ VK_IMAGE_VIEW_TYPE_1D = 0,
+ VK_IMAGE_VIEW_TYPE_2D = 1,
+ VK_IMAGE_VIEW_TYPE_3D = 2,
+ VK_IMAGE_VIEW_TYPE_CUBE = 3,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6
+} VkImageViewType;
+
+typedef enum VkSharingMode {
+ VK_SHARING_MODE_EXCLUSIVE = 0,
+ VK_SHARING_MODE_CONCURRENT = 1
+} VkSharingMode;
+
+typedef enum VkIndexType {
+ VK_INDEX_TYPE_UINT16 = 0,
+ VK_INDEX_TYPE_UINT32 = 1
+} VkIndexType;
+
+typedef enum VkLogicOp {
+ VK_LOGIC_OP_CLEAR = 0,
+ VK_LOGIC_OP_AND = 1,
+ VK_LOGIC_OP_AND_REVERSE = 2,
+ VK_LOGIC_OP_COPY = 3,
+ VK_LOGIC_OP_AND_INVERTED = 4,
+ VK_LOGIC_OP_NO_OP = 5,
+ VK_LOGIC_OP_XOR = 6,
+ VK_LOGIC_OP_OR = 7,
+ VK_LOGIC_OP_NOR = 8,
+ VK_LOGIC_OP_EQUIVALENT = 9,
+ VK_LOGIC_OP_INVERT = 10,
+ VK_LOGIC_OP_OR_REVERSE = 11,
+ VK_LOGIC_OP_COPY_INVERTED = 12,
+ VK_LOGIC_OP_OR_INVERTED = 13,
+ VK_LOGIC_OP_NAND = 14,
+ VK_LOGIC_OP_SET = 15
+} VkLogicOp;
+
+typedef enum VkMemoryHeapFlagBits {
+ VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 1,
+ VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 2
+} VkMemoryHeapFlagBits;
+
+typedef enum VkAccessFlagBits {
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 1,
+ VK_ACCESS_INDEX_READ_BIT = 2,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 4,
+ VK_ACCESS_UNIFORM_READ_BIT = 8,
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 16,
+ VK_ACCESS_SHADER_READ_BIT = 32,
+ VK_ACCESS_SHADER_WRITE_BIT = 64,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 128,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 256,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 512,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 1024,
+ VK_ACCESS_TRANSFER_READ_BIT = 2048,
+ VK_ACCESS_TRANSFER_WRITE_BIT = 4096,
+ VK_ACCESS_HOST_READ_BIT = 8192,
+ VK_ACCESS_HOST_WRITE_BIT = 16384,
+ VK_ACCESS_MEMORY_READ_BIT = 32768,
+ VK_ACCESS_MEMORY_WRITE_BIT = 65536
+} VkAccessFlagBits;
+
+typedef enum VkMemoryPropertyFlagBits {
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 1,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 2,
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 4,
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 8,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 16,
+ VK_MEMORY_PROPERTY_PROTECTED_BIT = 32
+} VkMemoryPropertyFlagBits;
+
+typedef enum VkPhysicalDeviceType {
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 4
+} VkPhysicalDeviceType;
+
+typedef enum VkPipelineBindPoint {
+ VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
+ VK_PIPELINE_BIND_POINT_COMPUTE = 1
+} VkPipelineBindPoint;
+
+typedef enum VkPipelineCreateFlagBits {
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 1,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 2,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 4,
+ VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 8,
+ VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 16,
+ VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT
+} VkPipelineCreateFlagBits;
+
+typedef enum VkPrimitiveTopology {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10
+} VkPrimitiveTopology;
+
+typedef enum VkQueryControlFlagBits {
+ VK_QUERY_CONTROL_PRECISE_BIT = 1
+} VkQueryControlFlagBits;
+
+typedef enum VkQueryPipelineStatisticFlagBits {
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 1,
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 2,
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 4,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 8,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 16,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 32,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 64,
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 128,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 256,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 512,
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 1024
+} VkQueryPipelineStatisticFlagBits;
+
+typedef enum VkQueryResultFlagBits {
+ VK_QUERY_RESULT_64_BIT = 1,
+ VK_QUERY_RESULT_WAIT_BIT = 2,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 4,
+ VK_QUERY_RESULT_PARTIAL_BIT = 8
+} VkQueryResultFlagBits;
+
+typedef enum VkQueryType {
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+ VK_QUERY_TYPE_TIMESTAMP = 2
+} VkQueryType;
+
+typedef enum VkQueueFlagBits {
+ VK_QUEUE_GRAPHICS_BIT = 1,
+ VK_QUEUE_COMPUTE_BIT = 2,
+ VK_QUEUE_TRANSFER_BIT = 4,
+ VK_QUEUE_SPARSE_BINDING_BIT = 8,
+ VK_QUEUE_PROTECTED_BIT = 16
+} VkQueueFlagBits;
+
+typedef enum VkSubpassContents {
+ VK_SUBPASS_CONTENTS_INLINE = 0,
+ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1
+} VkSubpassContents;
+
+typedef enum VkResult {
+ VK_SUCCESS = 0,
+ VK_NOT_READY = 1,
+ VK_TIMEOUT = 2,
+ VK_EVENT_SET = 3,
+ VK_EVENT_RESET = 4,
+ VK_INCOMPLETE = 5,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_DEVICE_LOST = -4,
+ VK_ERROR_MEMORY_MAP_FAILED = -5,
+ VK_ERROR_LAYER_NOT_PRESENT = -6,
+ VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+ VK_ERROR_FEATURE_NOT_PRESENT = -8,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+ VK_ERROR_TOO_MANY_OBJECTS = -10,
+ VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+ VK_ERROR_FRAGMENTED_POOL = -12,
+ VK_ERROR_UNKNOWN = -13,
+ VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003,
+ VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+ VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+ VK_SUBOPTIMAL_KHR = 1000001003,
+ VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+ VK_ERROR_VALIDATION_FAILED_EXT = -1000011001
+} VkResult;
+
+typedef enum VkShaderStageFlagBits {
+ VK_SHADER_STAGE_VERTEX_BIT = 1,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 2,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 4,
+ VK_SHADER_STAGE_GEOMETRY_BIT = 8,
+ VK_SHADER_STAGE_FRAGMENT_BIT = 16,
+ VK_SHADER_STAGE_COMPUTE_BIT = 32,
+ VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F,
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF
+} VkShaderStageFlagBits;
+
+typedef enum VkSparseMemoryBindFlagBits {
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT = 1
+} VkSparseMemoryBindFlagBits;
+
+typedef enum VkStencilFaceFlagBits {
+ VK_STENCIL_FACE_FRONT_BIT = 1,
+ VK_STENCIL_FACE_BACK_BIT = 2,
+ VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003,
+ VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK
+} VkStencilFaceFlagBits;
+
+typedef enum VkStencilOp {
+ VK_STENCIL_OP_KEEP = 0,
+ VK_STENCIL_OP_ZERO = 1,
+ VK_STENCIL_OP_REPLACE = 2,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,
+ VK_STENCIL_OP_INVERT = 5,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP = 7
+} VkStencilOp;
+
+typedef enum VkStructureType {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,
+ VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,
+ VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001,
+ VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002,
+ VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000,
+ VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003,
+ VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES,
+ VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002,
+ VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002,
+ VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000,
+ VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001,
+ VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000,
+ VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007,
+ VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009,
+ VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012,
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000,
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT
+} VkStructureType;
+
+typedef enum VkSystemAllocationScope {
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
+ VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4
+} VkSystemAllocationScope;
+
+typedef enum VkInternalAllocationType {
+ VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0
+} VkInternalAllocationType;
+
+typedef enum VkSamplerAddressMode {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3
+} VkSamplerAddressMode;
+
+typedef enum VkFilter {
+ VK_FILTER_NEAREST = 0,
+ VK_FILTER_LINEAR = 1
+} VkFilter;
+
+typedef enum VkSamplerMipmapMode {
+ VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,
+ VK_SAMPLER_MIPMAP_MODE_LINEAR = 1
+} VkSamplerMipmapMode;
+
+typedef enum VkVertexInputRate {
+ VK_VERTEX_INPUT_RATE_VERTEX = 0,
+ VK_VERTEX_INPUT_RATE_INSTANCE = 1
+} VkVertexInputRate;
+
+typedef enum VkPipelineStageFlagBits {
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 1,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 2,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 4,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 8,
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 16,
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 32,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 64,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 128,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 256,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 512,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 1024,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 2048,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 4096,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 8192,
+ VK_PIPELINE_STAGE_HOST_BIT = 16384,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 32768,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 65536
+} VkPipelineStageFlagBits;
+
+typedef enum VkSparseImageFormatFlagBits {
+ VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 1,
+ VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 2,
+ VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 4
+} VkSparseImageFormatFlagBits;
+
+typedef enum VkSampleCountFlagBits {
+ VK_SAMPLE_COUNT_1_BIT = 1,
+ VK_SAMPLE_COUNT_2_BIT = 2,
+ VK_SAMPLE_COUNT_4_BIT = 4,
+ VK_SAMPLE_COUNT_8_BIT = 8,
+ VK_SAMPLE_COUNT_16_BIT = 16,
+ VK_SAMPLE_COUNT_32_BIT = 32,
+ VK_SAMPLE_COUNT_64_BIT = 64
+} VkSampleCountFlagBits;
+
+typedef enum VkAttachmentDescriptionFlagBits {
+ VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 1
+} VkAttachmentDescriptionFlagBits;
+
+typedef enum VkDescriptorPoolCreateFlagBits {
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 1
+} VkDescriptorPoolCreateFlagBits;
+
+typedef enum VkDependencyFlagBits {
+ VK_DEPENDENCY_BY_REGION_BIT = 1,
+ VK_DEPENDENCY_DEVICE_GROUP_BIT = 4,
+ VK_DEPENDENCY_VIEW_LOCAL_BIT = 2
+} VkDependencyFlagBits;
+
+typedef enum VkObjectType {
+ VK_OBJECT_TYPE_UNKNOWN = 0,
+ VK_OBJECT_TYPE_INSTANCE = 1,
+ VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2,
+ VK_OBJECT_TYPE_DEVICE = 3,
+ VK_OBJECT_TYPE_QUEUE = 4,
+ VK_OBJECT_TYPE_SEMAPHORE = 5,
+ VK_OBJECT_TYPE_COMMAND_BUFFER = 6,
+ VK_OBJECT_TYPE_FENCE = 7,
+ VK_OBJECT_TYPE_DEVICE_MEMORY = 8,
+ VK_OBJECT_TYPE_BUFFER = 9,
+ VK_OBJECT_TYPE_IMAGE = 10,
+ VK_OBJECT_TYPE_EVENT = 11,
+ VK_OBJECT_TYPE_QUERY_POOL = 12,
+ VK_OBJECT_TYPE_BUFFER_VIEW = 13,
+ VK_OBJECT_TYPE_IMAGE_VIEW = 14,
+ VK_OBJECT_TYPE_SHADER_MODULE = 15,
+ VK_OBJECT_TYPE_PIPELINE_CACHE = 16,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17,
+ VK_OBJECT_TYPE_RENDER_PASS = 18,
+ VK_OBJECT_TYPE_PIPELINE = 19,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20,
+ VK_OBJECT_TYPE_SAMPLER = 21,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET = 23,
+ VK_OBJECT_TYPE_FRAMEBUFFER = 24,
+ VK_OBJECT_TYPE_COMMAND_POOL = 25,
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000,
+ VK_OBJECT_TYPE_SURFACE_KHR = 1000000000,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000,
+ VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000
+} VkObjectType;
+
+typedef enum VkDescriptorUpdateTemplateType {
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0
+} VkDescriptorUpdateTemplateType;
+
+
+typedef enum VkPointClippingBehavior {
+ VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0,
+ VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1
+} VkPointClippingBehavior;
+
+
+typedef enum VkColorSpaceKHR {
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
+} VkColorSpaceKHR;
+
+typedef enum VkCompositeAlphaFlagBitsKHR {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 1,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 2,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 4,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 8
+} VkCompositeAlphaFlagBitsKHR;
+
+typedef enum VkPresentModeKHR {
+ VK_PRESENT_MODE_IMMEDIATE_KHR = 0,
+ VK_PRESENT_MODE_MAILBOX_KHR = 1,
+ VK_PRESENT_MODE_FIFO_KHR = 2,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3
+} VkPresentModeKHR;
+
+typedef enum VkSurfaceTransformFlagBitsKHR {
+ VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 1,
+ VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 2,
+ VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 4,
+ VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 8,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 16,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 32,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 64,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 128,
+ VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 256
+} VkSurfaceTransformFlagBitsKHR;
+
+typedef enum VkDebugReportFlagBitsEXT {
+ VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 1,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT = 2,
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 4,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT = 8,
+ VK_DEBUG_REPORT_DEBUG_BIT_EXT = 16
+} VkDebugReportFlagBitsEXT;
+
+typedef enum VkDebugReportObjectTypeEXT {
+ VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10,
+ VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30,
+ VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33,
+ VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000
+} VkDebugReportObjectTypeEXT;
+
+typedef enum VkExternalMemoryHandleTypeFlagBits {
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 1,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 2,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 4,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 8,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 16,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 32,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 64
+} VkExternalMemoryHandleTypeFlagBits;
+
+typedef enum VkExternalMemoryFeatureFlagBits {
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 1,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 2,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 4
+} VkExternalMemoryFeatureFlagBits;
+
+typedef enum VkExternalSemaphoreHandleTypeFlagBits {
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 1,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 2,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 4,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 8,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 16
+} VkExternalSemaphoreHandleTypeFlagBits;
+
+typedef enum VkExternalSemaphoreFeatureFlagBits {
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 1,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 2
+} VkExternalSemaphoreFeatureFlagBits;
+
+typedef enum VkSemaphoreImportFlagBits {
+ VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 1
+} VkSemaphoreImportFlagBits;
+
+typedef enum VkExternalFenceHandleTypeFlagBits {
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 1,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 2,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 4,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 8
+} VkExternalFenceHandleTypeFlagBits;
+
+typedef enum VkExternalFenceFeatureFlagBits {
+ VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 1,
+ VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 2
+} VkExternalFenceFeatureFlagBits;
+
+typedef enum VkFenceImportFlagBits {
+ VK_FENCE_IMPORT_TEMPORARY_BIT = 1
+} VkFenceImportFlagBits;
+
+typedef enum VkPeerMemoryFeatureFlagBits {
+ VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 1,
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 2,
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 4,
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 8
+} VkPeerMemoryFeatureFlagBits;
+
+typedef enum VkMemoryAllocateFlagBits {
+ VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 1
+} VkMemoryAllocateFlagBits;
+
+typedef enum VkDeviceGroupPresentModeFlagBitsKHR {
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 1,
+ VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 2,
+ VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 4,
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 8
+} VkDeviceGroupPresentModeFlagBitsKHR;
+
+typedef enum VkSwapchainCreateFlagBitsKHR {
+ VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 1,
+ VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 2
+} VkSwapchainCreateFlagBitsKHR;
+
+typedef enum VkSubgroupFeatureFlagBits {
+ VK_SUBGROUP_FEATURE_BASIC_BIT = 1,
+ VK_SUBGROUP_FEATURE_VOTE_BIT = 2,
+ VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 4,
+ VK_SUBGROUP_FEATURE_BALLOT_BIT = 8,
+ VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 16,
+ VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 32,
+ VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 64,
+ VK_SUBGROUP_FEATURE_QUAD_BIT = 128
+} VkSubgroupFeatureFlagBits;
+
+typedef enum VkTessellationDomainOrigin {
+ VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0,
+ VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1
+} VkTessellationDomainOrigin;
+
+typedef enum VkSamplerYcbcrModelConversion {
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4
+} VkSamplerYcbcrModelConversion;
+
+typedef enum VkSamplerYcbcrRange {
+ VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0,
+ VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1
+} VkSamplerYcbcrRange;
+
+typedef enum VkChromaLocation {
+ VK_CHROMA_LOCATION_COSITED_EVEN = 0,
+ VK_CHROMA_LOCATION_MIDPOINT = 1
+} VkChromaLocation;
+
+typedef enum VkVendorId {
+ VK_VENDOR_ID_VIV = 0x10001,
+ VK_VENDOR_ID_VSI = 0x10002,
+ VK_VENDOR_ID_KAZAN = 0x10003,
+ VK_VENDOR_ID_CODEPLAY = 0x10004,
+ VK_VENDOR_ID_MESA = 0x10005
+} VkVendorId;
+
+typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(
+ void* pUserData,
+ void* pOriginal,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
+ void* pUserData,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkFreeFunction)(
+ void* pUserData,
+ void* pMemory);
+
+typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);
+
+typedef struct VkBaseOutStructure {
+ VkStructureType sType;
+ struct VkBaseOutStructure * pNext;
+} VkBaseOutStructure;
+
+typedef struct VkBaseInStructure {
+ VkStructureType sType;
+ const struct VkBaseInStructure * pNext;
+} VkBaseInStructure;
+
+typedef struct VkOffset2D {
+ int32_t x;
+ int32_t y;
+} VkOffset2D;
+
+typedef struct VkOffset3D {
+ int32_t x;
+ int32_t y;
+ int32_t z;
+} VkOffset3D;
+
+typedef struct VkExtent2D {
+ uint32_t width;
+ uint32_t height;
+} VkExtent2D;
+
+typedef struct VkExtent3D {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+} VkExtent3D;
+
+typedef struct VkViewport {
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+} VkViewport;
+
+typedef struct VkRect2D {
+ VkOffset2D offset;
+ VkExtent2D extent;
+} VkRect2D;
+
+typedef struct VkClearRect {
+ VkRect2D rect;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkClearRect;
+
+typedef struct VkComponentMapping {
+ VkComponentSwizzle r;
+ VkComponentSwizzle g;
+ VkComponentSwizzle b;
+ VkComponentSwizzle a;
+} VkComponentMapping;
+
+typedef struct VkExtensionProperties {
+ char extensionName [ VK_MAX_EXTENSION_NAME_SIZE ];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+typedef struct VkLayerProperties {
+ char layerName [ VK_MAX_EXTENSION_NAME_SIZE ];
+ uint32_t specVersion;
+ uint32_t implementationVersion;
+ char description [ VK_MAX_DESCRIPTION_SIZE ];
+} VkLayerProperties;
+
+typedef struct VkApplicationInfo {
+ VkStructureType sType;
+ const void * pNext;
+ const char * pApplicationName;
+ uint32_t applicationVersion;
+ const char * pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+} VkApplicationInfo;
+
+typedef struct VkAllocationCallbacks {
+ void * pUserData;
+ PFN_vkAllocationFunction pfnAllocation;
+ PFN_vkReallocationFunction pfnReallocation;
+ PFN_vkFreeFunction pfnFree;
+ PFN_vkInternalAllocationNotification pfnInternalAllocation;
+ PFN_vkInternalFreeNotification pfnInternalFree;
+} VkAllocationCallbacks;
+
+typedef struct VkDescriptorImageInfo {
+ VkSampler sampler;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+} VkDescriptorImageInfo;
+
+typedef struct VkCopyDescriptorSet {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+} VkCopyDescriptorSet;
+
+typedef struct VkDescriptorPoolSize {
+ VkDescriptorType type;
+ uint32_t descriptorCount;
+} VkDescriptorPoolSize;
+
+typedef struct VkDescriptorSetAllocateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorPool descriptorPool;
+ uint32_t descriptorSetCount;
+ const VkDescriptorSetLayout * pSetLayouts;
+} VkDescriptorSetAllocateInfo;
+
+typedef struct VkSpecializationMapEntry {
+ uint32_t constantID;
+ uint32_t offset;
+ size_t size;
+} VkSpecializationMapEntry;
+
+typedef struct VkSpecializationInfo {
+ uint32_t mapEntryCount;
+ const VkSpecializationMapEntry * pMapEntries;
+ size_t dataSize;
+ const void * pData;
+} VkSpecializationInfo;
+
+typedef struct VkVertexInputBindingDescription {
+ uint32_t binding;
+ uint32_t stride;
+ VkVertexInputRate inputRate;
+} VkVertexInputBindingDescription;
+
+typedef struct VkVertexInputAttributeDescription {
+ uint32_t location;
+ uint32_t binding;
+ VkFormat format;
+ uint32_t offset;
+} VkVertexInputAttributeDescription;
+
+typedef struct VkStencilOpState {
+ VkStencilOp failOp;
+ VkStencilOp passOp;
+ VkStencilOp depthFailOp;
+ VkCompareOp compareOp;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
+} VkStencilOpState;
+
+typedef struct VkCommandBufferAllocateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkCommandPool commandPool;
+ VkCommandBufferLevel level;
+ uint32_t commandBufferCount;
+} VkCommandBufferAllocateInfo;
+
+typedef union VkClearColorValue {
+ float float32 [4];
+ int32_t int32 [4];
+ uint32_t uint32 [4];
+} VkClearColorValue;
+
+typedef struct VkClearDepthStencilValue {
+ float depth;
+ uint32_t stencil;
+} VkClearDepthStencilValue;
+
+typedef union VkClearValue {
+ VkClearColorValue color;
+ VkClearDepthStencilValue depthStencil;
+} VkClearValue;
+
+typedef struct VkAttachmentReference {
+ uint32_t attachment;
+ VkImageLayout layout;
+} VkAttachmentReference;
+
+typedef struct VkDrawIndirectCommand {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+} VkDrawIndirectCommand;
+
+typedef struct VkDrawIndexedIndirectCommand {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t vertexOffset;
+ uint32_t firstInstance;
+} VkDrawIndexedIndirectCommand;
+
+typedef struct VkDispatchIndirectCommand {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+} VkDispatchIndirectCommand;
+
+typedef struct VkSurfaceFormatKHR {
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+} VkSurfaceFormatKHR;
+
+typedef struct VkPresentInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore * pWaitSemaphores;
+ uint32_t swapchainCount;
+ const VkSwapchainKHR * pSwapchains;
+ const uint32_t * pImageIndices;
+ VkResult * pResults;
+} VkPresentInfoKHR;
+
+typedef struct VkPhysicalDeviceExternalImageFormatInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalImageFormatInfo;
+
+typedef struct VkPhysicalDeviceExternalSemaphoreInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalSemaphoreInfo;
+
+typedef struct VkPhysicalDeviceExternalFenceInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalFenceHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalFenceInfo;
+
+typedef struct VkPhysicalDeviceMultiviewProperties {
+ VkStructureType sType;
+ void * pNext;
+ uint32_t maxMultiviewViewCount;
+ uint32_t maxMultiviewInstanceIndex;
+} VkPhysicalDeviceMultiviewProperties;
+
+typedef struct VkRenderPassMultiviewCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t subpassCount;
+ const uint32_t * pViewMasks;
+ uint32_t dependencyCount;
+ const int32_t * pViewOffsets;
+ uint32_t correlationMaskCount;
+ const uint32_t * pCorrelationMasks;
+} VkRenderPassMultiviewCreateInfo;
+
+typedef struct VkBindBufferMemoryDeviceGroupInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t deviceIndexCount;
+ const uint32_t * pDeviceIndices;
+} VkBindBufferMemoryDeviceGroupInfo;
+
+typedef struct VkBindImageMemoryDeviceGroupInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t deviceIndexCount;
+ const uint32_t * pDeviceIndices;
+ uint32_t splitInstanceBindRegionCount;
+ const VkRect2D * pSplitInstanceBindRegions;
+} VkBindImageMemoryDeviceGroupInfo;
+
+typedef struct VkDeviceGroupRenderPassBeginInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t deviceMask;
+ uint32_t deviceRenderAreaCount;
+ const VkRect2D * pDeviceRenderAreas;
+} VkDeviceGroupRenderPassBeginInfo;
+
+typedef struct VkDeviceGroupCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t deviceMask;
+} VkDeviceGroupCommandBufferBeginInfo;
+
+typedef struct VkDeviceGroupSubmitInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t waitSemaphoreCount;
+ const uint32_t * pWaitSemaphoreDeviceIndices;
+ uint32_t commandBufferCount;
+ const uint32_t * pCommandBufferDeviceMasks;
+ uint32_t signalSemaphoreCount;
+ const uint32_t * pSignalSemaphoreDeviceIndices;
+} VkDeviceGroupSubmitInfo;
+
+typedef struct VkDeviceGroupBindSparseInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t resourceDeviceIndex;
+ uint32_t memoryDeviceIndex;
+} VkDeviceGroupBindSparseInfo;
+
+typedef struct VkImageSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ VkSwapchainKHR swapchain;
+} VkImageSwapchainCreateInfoKHR;
+
+typedef struct VkBindImageMemorySwapchainInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ VkSwapchainKHR swapchain;
+ uint32_t imageIndex;
+} VkBindImageMemorySwapchainInfoKHR;
+
+typedef struct VkAcquireNextImageInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ VkSwapchainKHR swapchain;
+ uint64_t timeout;
+ VkSemaphore semaphore;
+ VkFence fence;
+ uint32_t deviceMask;
+} VkAcquireNextImageInfoKHR;
+
+typedef struct VkDeviceGroupPresentInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t swapchainCount;
+ const uint32_t * pDeviceMasks;
+ VkDeviceGroupPresentModeFlagBitsKHR mode;
+} VkDeviceGroupPresentInfoKHR;
+
+typedef struct VkDeviceGroupDeviceCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t physicalDeviceCount;
+ const VkPhysicalDevice * pPhysicalDevices;
+} VkDeviceGroupDeviceCreateInfo;
+
+typedef struct VkDescriptorUpdateTemplateEntry {
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ size_t offset;
+ size_t stride;
+} VkDescriptorUpdateTemplateEntry;
+
+typedef struct VkBufferMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkBuffer buffer;
+} VkBufferMemoryRequirementsInfo2;
+
+typedef struct VkImageMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkImage image;
+} VkImageMemoryRequirementsInfo2;
+
+typedef struct VkImageSparseMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkImage image;
+} VkImageSparseMemoryRequirementsInfo2;
+
+typedef struct VkPhysicalDevicePointClippingProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkPointClippingBehavior pointClippingBehavior;
+} VkPhysicalDevicePointClippingProperties;
+
+typedef struct VkMemoryDedicatedAllocateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImage image;
+ VkBuffer buffer;
+} VkMemoryDedicatedAllocateInfo;
+
+typedef struct VkPipelineTessellationDomainOriginStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkTessellationDomainOrigin domainOrigin;
+} VkPipelineTessellationDomainOriginStateCreateInfo;
+
+typedef struct VkSamplerYcbcrConversionInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkSamplerYcbcrConversion conversion;
+} VkSamplerYcbcrConversionInfo;
+
+typedef struct VkBindImagePlaneMemoryInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImageAspectFlagBits planeAspect;
+} VkBindImagePlaneMemoryInfo;
+
+typedef struct VkImagePlaneMemoryRequirementsInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImageAspectFlagBits planeAspect;
+} VkImagePlaneMemoryRequirementsInfo;
+
+typedef struct VkSamplerYcbcrConversionImageFormatProperties {
+ VkStructureType sType;
+ void * pNext;
+ uint32_t combinedImageSamplerDescriptorCount;
+} VkSamplerYcbcrConversionImageFormatProperties;
+
+typedef uint32_t VkSampleMask;
+
+typedef uint32_t VkBool32;
+
+typedef uint32_t VkFlags;
+
+typedef uint64_t VkDeviceSize;
+
+typedef uint64_t VkDeviceAddress;
+
+typedef VkFlags VkFramebufferCreateFlags;
+
+typedef VkFlags VkQueryPoolCreateFlags;
+
+typedef VkFlags VkRenderPassCreateFlags;
+
+typedef VkFlags VkSamplerCreateFlags;
+
+typedef VkFlags VkPipelineLayoutCreateFlags;
+
+typedef VkFlags VkPipelineCacheCreateFlags;
+
+typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+
+typedef VkFlags VkPipelineDynamicStateCreateFlags;
+
+typedef VkFlags VkPipelineColorBlendStateCreateFlags;
+
+typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+
+typedef VkFlags VkPipelineRasterizationStateCreateFlags;
+
+typedef VkFlags VkPipelineViewportStateCreateFlags;
+
+typedef VkFlags VkPipelineTessellationStateCreateFlags;
+
+typedef VkFlags VkPipelineInputAssemblyStateCreateFlags;
+
+typedef VkFlags VkPipelineVertexInputStateCreateFlags;
+
+typedef VkFlags VkPipelineShaderStageCreateFlags;
+
+typedef VkFlags VkDescriptorSetLayoutCreateFlags;
+
+typedef VkFlags VkBufferViewCreateFlags;
+
+typedef VkFlags VkInstanceCreateFlags;
+
+typedef VkFlags VkDeviceCreateFlags;
+
+typedef VkFlags VkDeviceQueueCreateFlags;
+
+typedef VkFlags VkQueueFlags;
+
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef VkFlags VkMemoryHeapFlags;
+
+typedef VkFlags VkAccessFlags;
+
+typedef VkFlags VkBufferUsageFlags;
+
+typedef VkFlags VkBufferCreateFlags;
+
+typedef VkFlags VkShaderStageFlags;
+
+typedef VkFlags VkImageUsageFlags;
+
+typedef VkFlags VkImageCreateFlags;
+
+typedef VkFlags VkImageViewCreateFlags;
+
+typedef VkFlags VkPipelineCreateFlags;
+
+typedef VkFlags VkColorComponentFlags;
+
+typedef VkFlags VkFenceCreateFlags;
+
+typedef VkFlags VkSemaphoreCreateFlags;
+
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef VkFlags VkQueryControlFlags;
+
+typedef VkFlags VkQueryResultFlags;
+
+typedef VkFlags VkShaderModuleCreateFlags;
+
+typedef VkFlags VkEventCreateFlags;
+
+typedef VkFlags VkCommandPoolCreateFlags;
+
+typedef VkFlags VkCommandPoolResetFlags;
+
+typedef VkFlags VkCommandBufferResetFlags;
+
+typedef VkFlags VkCommandBufferUsageFlags;
+
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef VkFlags VkMemoryMapFlags;
+
+typedef VkFlags VkImageAspectFlags;
+
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef VkFlags VkSubpassDescriptionFlags;
+
+typedef VkFlags VkPipelineStageFlags;
+
+typedef VkFlags VkSampleCountFlags;
+
+typedef VkFlags VkAttachmentDescriptionFlags;
+
+typedef VkFlags VkStencilFaceFlags;
+
+typedef VkFlags VkCullModeFlags;
+
+typedef VkFlags VkDescriptorPoolCreateFlags;
+
+typedef VkFlags VkDescriptorPoolResetFlags;
+
+typedef VkFlags VkDependencyFlags;
+
+typedef VkFlags VkSubgroupFeatureFlags;
+
+typedef VkFlags VkDescriptorUpdateTemplateCreateFlags;
+
+typedef VkFlags VkCompositeAlphaFlagsKHR;
+
+typedef VkFlags VkSurfaceTransformFlagsKHR;
+
+typedef VkFlags VkSwapchainCreateFlagsKHR;
+
+typedef VkFlags VkPeerMemoryFeatureFlags;
+
+typedef VkFlags VkMemoryAllocateFlags;
+
+typedef VkFlags VkDeviceGroupPresentModeFlagsKHR;
+
+typedef VkFlags VkDebugReportFlagsEXT;
+
+typedef VkFlags VkCommandPoolTrimFlags;
+
+typedef VkFlags VkExternalMemoryHandleTypeFlags;
+
+typedef VkFlags VkExternalMemoryFeatureFlags;
+
+typedef VkFlags VkExternalSemaphoreHandleTypeFlags;
+
+typedef VkFlags VkExternalSemaphoreFeatureFlags;
+
+typedef VkFlags VkSemaphoreImportFlags;
+
+typedef VkFlags VkExternalFenceHandleTypeFlags;
+
+typedef VkFlags VkExternalFenceFeatureFlags;
+
+typedef VkFlags VkFenceImportFlags;
+
+typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)(
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage,
+ void* pUserData);
+
+typedef struct VkDeviceQueueCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+ const float * pQueuePriorities;
+} VkDeviceQueueCreateInfo;
+
+typedef struct VkInstanceCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkInstanceCreateFlags flags;
+ const VkApplicationInfo * pApplicationInfo;
+ uint32_t enabledLayerCount;
+ const char * const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char * const* ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+typedef struct VkQueueFamilyProperties {
+ VkQueueFlags queueFlags;
+ uint32_t queueCount;
+ uint32_t timestampValidBits;
+ VkExtent3D minImageTransferGranularity;
+} VkQueueFamilyProperties;
+
+typedef struct VkMemoryAllocateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+} VkMemoryAllocateInfo;
+
+typedef struct VkMemoryRequirements {
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ uint32_t memoryTypeBits;
+} VkMemoryRequirements;
+
+typedef struct VkSparseImageFormatProperties {
+ VkImageAspectFlags aspectMask;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+} VkSparseImageFormatProperties;
+
+typedef struct VkSparseImageMemoryRequirements {
+ VkSparseImageFormatProperties formatProperties;
+ uint32_t imageMipTailFirstLod;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+typedef struct VkMemoryType {
+ VkMemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+} VkMemoryType;
+
+typedef struct VkMemoryHeap {
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+} VkMemoryHeap;
+
+typedef struct VkMappedMemoryRange {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceMemory memory;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkMappedMemoryRange;
+
+typedef struct VkFormatProperties {
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+ VkFormatFeatureFlags bufferFeatures;
+} VkFormatProperties;
+
+typedef struct VkImageFormatProperties {
+ VkExtent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArrayLayers;
+ VkSampleCountFlags sampleCounts;
+ VkDeviceSize maxResourceSize;
+} VkImageFormatProperties;
+
+typedef struct VkDescriptorBufferInfo {
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkDescriptorBufferInfo;
+
+typedef struct VkWriteDescriptorSet {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ const VkDescriptorImageInfo * pImageInfo;
+ const VkDescriptorBufferInfo * pBufferInfo;
+ const VkBufferView * pTexelBufferView;
+} VkWriteDescriptorSet;
+
+typedef struct VkBufferCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkBufferCreateFlags flags;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t * pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+typedef struct VkBufferViewCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkBufferViewCreateFlags flags;
+ VkBuffer buffer;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkBufferViewCreateInfo;
+
+typedef struct VkImageSubresource {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t arrayLayer;
+} VkImageSubresource;
+
+typedef struct VkImageSubresourceLayers {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceLayers;
+
+typedef struct VkImageSubresourceRange {
+ VkImageAspectFlags aspectMask;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceRange;
+
+typedef struct VkMemoryBarrier {
+ VkStructureType sType;
+ const void * pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+} VkMemoryBarrier;
+
+typedef struct VkBufferMemoryBarrier {
+ VkStructureType sType;
+ const void * pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier;
+
+typedef struct VkImageMemoryBarrier {
+ VkStructureType sType;
+ const void * pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct VkImageCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImageCreateFlags flags;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ uint32_t mipLevels;
+ uint32_t arrayLayers;
+ VkSampleCountFlagBits samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t * pQueueFamilyIndices;
+ VkImageLayout initialLayout;
+} VkImageCreateInfo;
+
+typedef struct VkSubresourceLayout {
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VkDeviceSize rowPitch;
+ VkDeviceSize arrayPitch;
+ VkDeviceSize depthPitch;
+} VkSubresourceLayout;
+
+typedef struct VkImageViewCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImageViewCreateFlags flags;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkComponentMapping components;
+ VkImageSubresourceRange subresourceRange;
+} VkImageViewCreateInfo;
+
+typedef struct VkBufferCopy {
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+} VkBufferCopy;
+
+typedef struct VkSparseMemoryBind {
+ VkDeviceSize resourceOffset;
+ VkDeviceSize size;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseMemoryBind;
+
+typedef struct VkSparseImageMemoryBind {
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseImageMemoryBind;
+
+typedef struct VkSparseBufferMemoryBindInfo {
+ VkBuffer buffer;
+ uint32_t bindCount;
+ const VkSparseMemoryBind * pBinds;
+} VkSparseBufferMemoryBindInfo;
+
+typedef struct VkSparseImageOpaqueMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseMemoryBind * pBinds;
+} VkSparseImageOpaqueMemoryBindInfo;
+
+typedef struct VkSparseImageMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseImageMemoryBind * pBinds;
+} VkSparseImageMemoryBindInfo;
+
+typedef struct VkBindSparseInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore * pWaitSemaphores;
+ uint32_t bufferBindCount;
+ const VkSparseBufferMemoryBindInfo * pBufferBinds;
+ uint32_t imageOpaqueBindCount;
+ const VkSparseImageOpaqueMemoryBindInfo * pImageOpaqueBinds;
+ uint32_t imageBindCount;
+ const VkSparseImageMemoryBindInfo * pImageBinds;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore * pSignalSemaphores;
+} VkBindSparseInfo;
+
+typedef struct VkImageCopy {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageCopy;
+
+typedef struct VkImageBlit {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffsets [2];
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffsets [2];
+} VkImageBlit;
+
+typedef struct VkBufferImageCopy {
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresourceLayers imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
+
+typedef struct VkImageResolve {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageResolve;
+
+typedef struct VkShaderModuleCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkShaderModuleCreateFlags flags;
+ size_t codeSize;
+ const uint32_t * pCode;
+} VkShaderModuleCreateInfo;
+
+typedef struct VkDescriptorSetLayoutBinding {
+ uint32_t binding;
+ VkDescriptorType descriptorType;
+ uint32_t descriptorCount;
+ VkShaderStageFlags stageFlags;
+ const VkSampler * pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct VkDescriptorSetLayoutCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorSetLayoutCreateFlags flags;
+ uint32_t bindingCount;
+ const VkDescriptorSetLayoutBinding * pBindings;
+} VkDescriptorSetLayoutCreateInfo;
+
+typedef struct VkDescriptorPoolCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorPoolCreateFlags flags;
+ uint32_t maxSets;
+ uint32_t poolSizeCount;
+ const VkDescriptorPoolSize * pPoolSizes;
+} VkDescriptorPoolCreateInfo;
+
+typedef struct VkPipelineShaderStageCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineShaderStageCreateFlags flags;
+ VkShaderStageFlagBits stage;
+ VkShaderModule module;
+ const char * pName;
+ const VkSpecializationInfo * pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct VkComputePipelineCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineCreateFlags flags;
+ VkPipelineShaderStageCreateInfo stage;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+typedef struct VkPipelineVertexInputStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineVertexInputStateCreateFlags flags;
+ uint32_t vertexBindingDescriptionCount;
+ const VkVertexInputBindingDescription * pVertexBindingDescriptions;
+ uint32_t vertexAttributeDescriptionCount;
+ const VkVertexInputAttributeDescription * pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct VkPipelineInputAssemblyStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineInputAssemblyStateCreateFlags flags;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct VkPipelineTessellationStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineTessellationStateCreateFlags flags;
+ uint32_t patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct VkPipelineViewportStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineViewportStateCreateFlags flags;
+ uint32_t viewportCount;
+ const VkViewport * pViewports;
+ uint32_t scissorCount;
+ const VkRect2D * pScissors;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct VkPipelineRasterizationStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineRasterizationStateCreateFlags flags;
+ VkBool32 depthClampEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkPolygonMode polygonMode;
+ VkCullModeFlags cullMode;
+ VkFrontFace frontFace;
+ VkBool32 depthBiasEnable;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float lineWidth;
+} VkPipelineRasterizationStateCreateInfo;
+
+typedef struct VkPipelineMultisampleStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineMultisampleStateCreateFlags flags;
+ VkSampleCountFlagBits rasterizationSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ const VkSampleMask * pSampleMask;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 alphaToOneEnable;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct VkPipelineColorBlendAttachmentState {
+ VkBool32 blendEnable;
+ VkBlendFactor srcColorBlendFactor;
+ VkBlendFactor dstColorBlendFactor;
+ VkBlendOp colorBlendOp;
+ VkBlendFactor srcAlphaBlendFactor;
+ VkBlendFactor dstAlphaBlendFactor;
+ VkBlendOp alphaBlendOp;
+ VkColorComponentFlags colorWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct VkPipelineColorBlendStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineColorBlendStateCreateFlags flags;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ uint32_t attachmentCount;
+ const VkPipelineColorBlendAttachmentState * pAttachments;
+ float blendConstants [4];
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct VkPipelineDynamicStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineDynamicStateCreateFlags flags;
+ uint32_t dynamicStateCount;
+ const VkDynamicState * pDynamicStates;
+} VkPipelineDynamicStateCreateInfo;
+
+typedef struct VkPipelineDepthStencilStateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineDepthStencilStateCreateFlags flags;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsTestEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+ float minDepthBounds;
+ float maxDepthBounds;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct VkGraphicsPipelineCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo * pStages;
+ const VkPipelineVertexInputStateCreateInfo * pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo * pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo * pTessellationState;
+ const VkPipelineViewportStateCreateInfo * pViewportState;
+ const VkPipelineRasterizationStateCreateInfo * pRasterizationState;
+ const VkPipelineMultisampleStateCreateInfo * pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo * pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo * pColorBlendState;
+ const VkPipelineDynamicStateCreateInfo * pDynamicState;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+typedef struct VkPipelineCacheCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineCacheCreateFlags flags;
+ size_t initialDataSize;
+ const void * pInitialData;
+} VkPipelineCacheCreateInfo;
+
+typedef struct VkPushConstantRange {
+ VkShaderStageFlags stageFlags;
+ uint32_t offset;
+ uint32_t size;
+} VkPushConstantRange;
+
+typedef struct VkPipelineLayoutCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkPipelineLayoutCreateFlags flags;
+ uint32_t setLayoutCount;
+ const VkDescriptorSetLayout * pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const VkPushConstantRange * pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+typedef struct VkSamplerCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkSamplerCreateFlags flags;
+ VkFilter magFilter;
+ VkFilter minFilter;
+ VkSamplerMipmapMode mipmapMode;
+ VkSamplerAddressMode addressModeU;
+ VkSamplerAddressMode addressModeV;
+ VkSamplerAddressMode addressModeW;
+ float mipLodBias;
+ VkBool32 anisotropyEnable;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+ VkBool32 unnormalizedCoordinates;
+} VkSamplerCreateInfo;
+
+typedef struct VkCommandPoolCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkCommandPoolCreateFlags flags;
+ uint32_t queueFamilyIndex;
+} VkCommandPoolCreateInfo;
+
+typedef struct VkCommandBufferInheritanceInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkFramebuffer framebuffer;
+ VkBool32 occlusionQueryEnable;
+ VkQueryControlFlags queryFlags;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkCommandBufferInheritanceInfo;
+
+typedef struct VkCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkCommandBufferUsageFlags flags;
+ const VkCommandBufferInheritanceInfo * pInheritanceInfo;
+} VkCommandBufferBeginInfo;
+
+typedef struct VkRenderPassBeginInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ uint32_t clearValueCount;
+ const VkClearValue * pClearValues;
+} VkRenderPassBeginInfo;
+
+typedef struct VkClearAttachment {
+ VkImageAspectFlags aspectMask;
+ uint32_t colorAttachment;
+ VkClearValue clearValue;
+} VkClearAttachment;
+
+typedef struct VkAttachmentDescription {
+ VkAttachmentDescriptionFlags flags;
+ VkFormat format;
+ VkSampleCountFlagBits samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+} VkAttachmentDescription;
+
+typedef struct VkSubpassDescription {
+ VkSubpassDescriptionFlags flags;
+ VkPipelineBindPoint pipelineBindPoint;
+ uint32_t inputAttachmentCount;
+ const VkAttachmentReference * pInputAttachments;
+ uint32_t colorAttachmentCount;
+ const VkAttachmentReference * pColorAttachments;
+ const VkAttachmentReference * pResolveAttachments;
+ const VkAttachmentReference * pDepthStencilAttachment;
+ uint32_t preserveAttachmentCount;
+ const uint32_t * pPreserveAttachments;
+} VkSubpassDescription;
+
+typedef struct VkSubpassDependency {
+ uint32_t srcSubpass;
+ uint32_t dstSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkDependencyFlags dependencyFlags;
+} VkSubpassDependency;
+
+typedef struct VkRenderPassCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkRenderPassCreateFlags flags;
+ uint32_t attachmentCount;
+ const VkAttachmentDescription * pAttachments;
+ uint32_t subpassCount;
+ const VkSubpassDescription * pSubpasses;
+ uint32_t dependencyCount;
+ const VkSubpassDependency * pDependencies;
+} VkRenderPassCreateInfo;
+
+typedef struct VkEventCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkEventCreateFlags flags;
+} VkEventCreateInfo;
+
+typedef struct VkFenceCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkFenceCreateFlags flags;
+} VkFenceCreateInfo;
+
+typedef struct VkPhysicalDeviceFeatures {
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSrcBlend;
+ VkBool32 logicOp;
+ VkBool32 multiDrawIndirect;
+ VkBool32 drawIndirectFirstInstance;
+ VkBool32 depthClamp;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 alphaToOne;
+ VkBool32 multiViewport;
+ VkBool32 samplerAnisotropy;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 occlusionQueryPrecise;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexPipelineStoresAndAtomics;
+ VkBool32 fragmentStoresAndAtomics;
+ VkBool32 shaderTessellationAndGeometryPointSize;
+ VkBool32 shaderImageGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageImageReadWithoutFormat;
+ VkBool32 shaderStorageImageWriteWithoutFormat;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLod;
+ VkBool32 sparseBinding;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyAliased;
+ VkBool32 variableMultisampleRate;
+ VkBool32 inheritedQueries;
+} VkPhysicalDeviceFeatures;
+
+typedef struct VkPhysicalDeviceSparseProperties {
+ VkBool32 residencyStandard2DBlockShape;
+ VkBool32 residencyStandard2DMultisampleBlockShape;
+ VkBool32 residencyStandard3DBlockShape;
+ VkBool32 residencyAlignedMipSize;
+ VkBool32 residencyNonResidentStrict;
+} VkPhysicalDeviceSparseProperties;
+
+typedef struct VkPhysicalDeviceLimits {
+ uint32_t maxImageDimension1D;
+ uint32_t maxImageDimension2D;
+ uint32_t maxImageDimension3D;
+ uint32_t maxImageDimensionCube;
+ uint32_t maxImageArrayLayers;
+ uint32_t maxTexelBufferElements;
+ uint32_t maxUniformBufferRange;
+ uint32_t maxStorageBufferRange;
+ uint32_t maxPushConstantsSize;
+ uint32_t maxMemoryAllocationCount;
+ uint32_t maxSamplerAllocationCount;
+ VkDeviceSize bufferImageGranularity;
+ VkDeviceSize sparseAddressSpaceSize;
+ uint32_t maxBoundDescriptorSets;
+ uint32_t maxPerStageDescriptorSamplers;
+ uint32_t maxPerStageDescriptorUniformBuffers;
+ uint32_t maxPerStageDescriptorStorageBuffers;
+ uint32_t maxPerStageDescriptorSampledImages;
+ uint32_t maxPerStageDescriptorStorageImages;
+ uint32_t maxPerStageDescriptorInputAttachments;
+ uint32_t maxPerStageResources;
+ uint32_t maxDescriptorSetSamplers;
+ uint32_t maxDescriptorSetUniformBuffers;
+ uint32_t maxDescriptorSetUniformBuffersDynamic;
+ uint32_t maxDescriptorSetStorageBuffers;
+ uint32_t maxDescriptorSetStorageBuffersDynamic;
+ uint32_t maxDescriptorSetSampledImages;
+ uint32_t maxDescriptorSetStorageImages;
+ uint32_t maxDescriptorSetInputAttachments;
+ uint32_t maxVertexInputAttributes;
+ uint32_t maxVertexInputBindings;
+ uint32_t maxVertexInputAttributeOffset;
+ uint32_t maxVertexInputBindingStride;
+ uint32_t maxVertexOutputComponents;
+ uint32_t maxTessellationGenerationLevel;
+ uint32_t maxTessellationPatchSize;
+ uint32_t maxTessellationControlPerVertexInputComponents;
+ uint32_t maxTessellationControlPerVertexOutputComponents;
+ uint32_t maxTessellationControlPerPatchOutputComponents;
+ uint32_t maxTessellationControlTotalOutputComponents;
+ uint32_t maxTessellationEvaluationInputComponents;
+ uint32_t maxTessellationEvaluationOutputComponents;
+ uint32_t maxGeometryShaderInvocations;
+ uint32_t maxGeometryInputComponents;
+ uint32_t maxGeometryOutputComponents;
+ uint32_t maxGeometryOutputVertices;
+ uint32_t maxGeometryTotalOutputComponents;
+ uint32_t maxFragmentInputComponents;
+ uint32_t maxFragmentOutputAttachments;
+ uint32_t maxFragmentDualSrcAttachments;
+ uint32_t maxFragmentCombinedOutputResources;
+ uint32_t maxComputeSharedMemorySize;
+ uint32_t maxComputeWorkGroupCount [3];
+ uint32_t maxComputeWorkGroupInvocations;
+ uint32_t maxComputeWorkGroupSize [3];
+ uint32_t subPixelPrecisionBits;
+ uint32_t subTexelPrecisionBits;
+ uint32_t mipmapPrecisionBits;
+ uint32_t maxDrawIndexedIndexValue;
+ uint32_t maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32_t maxViewports;
+ uint32_t maxViewportDimensions [2];
+ float viewportBoundsRange [2];
+ uint32_t viewportSubPixelBits;
+ size_t minMemoryMapAlignment;
+ VkDeviceSize minTexelBufferOffsetAlignment;
+ VkDeviceSize minUniformBufferOffsetAlignment;
+ VkDeviceSize minStorageBufferOffsetAlignment;
+ int32_t minTexelOffset;
+ uint32_t maxTexelOffset;
+ int32_t minTexelGatherOffset;
+ uint32_t maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32_t subPixelInterpolationOffsetBits;
+ uint32_t maxFramebufferWidth;
+ uint32_t maxFramebufferHeight;
+ uint32_t maxFramebufferLayers;
+ VkSampleCountFlags framebufferColorSampleCounts;
+ VkSampleCountFlags framebufferDepthSampleCounts;
+ VkSampleCountFlags framebufferStencilSampleCounts;
+ VkSampleCountFlags framebufferNoAttachmentsSampleCounts;
+ uint32_t maxColorAttachments;
+ VkSampleCountFlags sampledImageColorSampleCounts;
+ VkSampleCountFlags sampledImageIntegerSampleCounts;
+ VkSampleCountFlags sampledImageDepthSampleCounts;
+ VkSampleCountFlags sampledImageStencilSampleCounts;
+ VkSampleCountFlags storageImageSampleCounts;
+ uint32_t maxSampleMaskWords;
+ VkBool32 timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32_t maxClipDistances;
+ uint32_t maxCullDistances;
+ uint32_t maxCombinedClipAndCullDistances;
+ uint32_t discreteQueuePriorities;
+ float pointSizeRange [2];
+ float lineWidthRange [2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ VkBool32 strictLines;
+ VkBool32 standardSampleLocations;
+ VkDeviceSize optimalBufferCopyOffsetAlignment;
+ VkDeviceSize optimalBufferCopyRowPitchAlignment;
+ VkDeviceSize nonCoherentAtomSize;
+} VkPhysicalDeviceLimits;
+
+typedef struct VkSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkSemaphoreCreateFlags flags;
+} VkSemaphoreCreateInfo;
+
+typedef struct VkQueryPoolCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkQueryPoolCreateFlags flags;
+ VkQueryType queryType;
+ uint32_t queryCount;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+typedef struct VkFramebufferCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkFramebufferCreateFlags flags;
+ VkRenderPass renderPass;
+ uint32_t attachmentCount;
+ const VkImageView * pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+} VkFramebufferCreateInfo;
+
+typedef struct VkSubmitInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore * pWaitSemaphores;
+ const VkPipelineStageFlags * pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ const VkCommandBuffer * pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore * pSignalSemaphores;
+} VkSubmitInfo;
+
+typedef struct VkSurfaceCapabilitiesKHR {
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+} VkSurfaceCapabilitiesKHR;
+
+typedef struct VkSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ VkSwapchainCreateFlagsKHR flags;
+ VkSurfaceKHR surface;
+ uint32_t minImageCount;
+ VkFormat imageFormat;
+ VkColorSpaceKHR imageColorSpace;
+ VkExtent2D imageExtent;
+ uint32_t imageArrayLayers;
+ VkImageUsageFlags imageUsage;
+ VkSharingMode imageSharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t * pQueueFamilyIndices;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ VkPresentModeKHR presentMode;
+ VkBool32 clipped;
+ VkSwapchainKHR oldSwapchain;
+} VkSwapchainCreateInfoKHR;
+
+typedef struct VkDebugReportCallbackCreateInfoEXT {
+ VkStructureType sType;
+ const void * pNext;
+ VkDebugReportFlagsEXT flags;
+ PFN_vkDebugReportCallbackEXT pfnCallback;
+ void * pUserData;
+} VkDebugReportCallbackCreateInfoEXT;
+
+typedef struct VkPhysicalDeviceFeatures2 {
+ VkStructureType sType;
+ void * pNext;
+ VkPhysicalDeviceFeatures features;
+} VkPhysicalDeviceFeatures2;
+
+typedef struct VkFormatProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkFormatProperties formatProperties;
+} VkFormatProperties2;
+
+typedef struct VkImageFormatProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkImageFormatProperties imageFormatProperties;
+} VkImageFormatProperties2;
+
+typedef struct VkPhysicalDeviceImageFormatInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkFormat format;
+ VkImageType type;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkImageCreateFlags flags;
+} VkPhysicalDeviceImageFormatInfo2;
+
+typedef struct VkQueueFamilyProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkQueueFamilyProperties queueFamilyProperties;
+} VkQueueFamilyProperties2;
+
+typedef struct VkSparseImageFormatProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkSparseImageFormatProperties properties;
+} VkSparseImageFormatProperties2;
+
+typedef struct VkPhysicalDeviceSparseImageFormatInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkFormat format;
+ VkImageType type;
+ VkSampleCountFlagBits samples;
+ VkImageUsageFlags usage;
+ VkImageTiling tiling;
+} VkPhysicalDeviceSparseImageFormatInfo2;
+
+typedef struct VkPhysicalDeviceVariablePointersFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 variablePointersStorageBuffer;
+ VkBool32 variablePointers;
+} VkPhysicalDeviceVariablePointersFeatures;
+
+typedef struct VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures;
+
+typedef struct VkExternalMemoryProperties {
+ VkExternalMemoryFeatureFlags externalMemoryFeatures;
+ VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalMemoryHandleTypeFlags compatibleHandleTypes;
+} VkExternalMemoryProperties;
+
+typedef struct VkExternalImageFormatProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkExternalMemoryProperties externalMemoryProperties;
+} VkExternalImageFormatProperties;
+
+typedef struct VkPhysicalDeviceExternalBufferInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkBufferCreateFlags flags;
+ VkBufferUsageFlags usage;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalBufferInfo;
+
+typedef struct VkExternalBufferProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkExternalMemoryProperties externalMemoryProperties;
+} VkExternalBufferProperties;
+
+typedef struct VkPhysicalDeviceIDProperties {
+ VkStructureType sType;
+ void * pNext;
+ uint8_t deviceUUID [ VK_UUID_SIZE ];
+ uint8_t driverUUID [ VK_UUID_SIZE ];
+ uint8_t deviceLUID [ VK_LUID_SIZE ];
+ uint32_t deviceNodeMask;
+ VkBool32 deviceLUIDValid;
+} VkPhysicalDeviceIDProperties;
+
+typedef struct VkExternalMemoryImageCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExternalMemoryImageCreateInfo;
+
+typedef struct VkExternalMemoryBufferCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExternalMemoryBufferCreateInfo;
+
+typedef struct VkExportMemoryAllocateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExportMemoryAllocateInfo;
+
+typedef struct VkExternalSemaphoreProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes;
+ VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures;
+} VkExternalSemaphoreProperties;
+
+typedef struct VkExportSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalSemaphoreHandleTypeFlags handleTypes;
+} VkExportSemaphoreCreateInfo;
+
+typedef struct VkExternalFenceProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalFenceHandleTypeFlags compatibleHandleTypes;
+ VkExternalFenceFeatureFlags externalFenceFeatures;
+} VkExternalFenceProperties;
+
+typedef struct VkExportFenceCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkExternalFenceHandleTypeFlags handleTypes;
+} VkExportFenceCreateInfo;
+
+typedef struct VkPhysicalDeviceMultiviewFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 multiview;
+ VkBool32 multiviewGeometryShader;
+ VkBool32 multiviewTessellationShader;
+} VkPhysicalDeviceMultiviewFeatures;
+
+typedef struct VkPhysicalDeviceGroupProperties {
+ VkStructureType sType;
+ void * pNext;
+ uint32_t physicalDeviceCount;
+ VkPhysicalDevice physicalDevices [ VK_MAX_DEVICE_GROUP_SIZE ];
+ VkBool32 subsetAllocation;
+} VkPhysicalDeviceGroupProperties;
+
+typedef struct VkMemoryAllocateFlagsInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkMemoryAllocateFlags flags;
+ uint32_t deviceMask;
+} VkMemoryAllocateFlagsInfo;
+
+typedef struct VkBindBufferMemoryInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkBuffer buffer;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+} VkBindBufferMemoryInfo;
+
+typedef struct VkBindImageMemoryInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImage image;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+} VkBindImageMemoryInfo;
+
+typedef struct VkDeviceGroupPresentCapabilitiesKHR {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t presentMask [ VK_MAX_DEVICE_GROUP_SIZE ];
+ VkDeviceGroupPresentModeFlagsKHR modes;
+} VkDeviceGroupPresentCapabilitiesKHR;
+
+typedef struct VkDeviceGroupSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceGroupPresentModeFlagsKHR modes;
+} VkDeviceGroupSwapchainCreateInfoKHR;
+
+typedef struct VkDescriptorUpdateTemplateCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDescriptorUpdateTemplateCreateFlags flags;
+ uint32_t descriptorUpdateEntryCount;
+ const VkDescriptorUpdateTemplateEntry * pDescriptorUpdateEntries;
+ VkDescriptorUpdateTemplateType templateType;
+ VkDescriptorSetLayout descriptorSetLayout;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkPipelineLayout pipelineLayout;
+ uint32_t set;
+} VkDescriptorUpdateTemplateCreateInfo;
+
+typedef struct VkInputAttachmentAspectReference {
+ uint32_t subpass;
+ uint32_t inputAttachmentIndex;
+ VkImageAspectFlags aspectMask;
+} VkInputAttachmentAspectReference;
+
+typedef struct VkRenderPassInputAttachmentAspectCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ uint32_t aspectReferenceCount;
+ const VkInputAttachmentAspectReference * pAspectReferences;
+} VkRenderPassInputAttachmentAspectCreateInfo;
+
+typedef struct VkPhysicalDevice16BitStorageFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 storageBuffer16BitAccess;
+ VkBool32 uniformAndStorageBuffer16BitAccess;
+ VkBool32 storagePushConstant16;
+ VkBool32 storageInputOutput16;
+} VkPhysicalDevice16BitStorageFeatures;
+
+typedef struct VkPhysicalDeviceSubgroupProperties {
+ VkStructureType sType;
+ void * pNext;
+ uint32_t subgroupSize;
+ VkShaderStageFlags supportedStages;
+ VkSubgroupFeatureFlags supportedOperations;
+ VkBool32 quadOperationsInAllStages;
+} VkPhysicalDeviceSubgroupProperties;
+
+typedef struct VkMemoryRequirements2 {
+ VkStructureType sType;
+ void * pNext;
+ VkMemoryRequirements memoryRequirements;
+} VkMemoryRequirements2;
+
+typedef struct VkSparseImageMemoryRequirements2 {
+ VkStructureType sType;
+ void * pNext;
+ VkSparseImageMemoryRequirements memoryRequirements;
+} VkSparseImageMemoryRequirements2;
+
+typedef struct VkMemoryDedicatedRequirements {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 prefersDedicatedAllocation;
+ VkBool32 requiresDedicatedAllocation;
+} VkMemoryDedicatedRequirements;
+
+typedef struct VkImageViewUsageCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkImageUsageFlags usage;
+} VkImageViewUsageCreateInfo;
+
+typedef struct VkSamplerYcbcrConversionCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkFormat format;
+ VkSamplerYcbcrModelConversion ycbcrModel;
+ VkSamplerYcbcrRange ycbcrRange;
+ VkComponentMapping components;
+ VkChromaLocation xChromaOffset;
+ VkChromaLocation yChromaOffset;
+ VkFilter chromaFilter;
+ VkBool32 forceExplicitReconstruction;
+} VkSamplerYcbcrConversionCreateInfo;
+
+typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 samplerYcbcrConversion;
+} VkPhysicalDeviceSamplerYcbcrConversionFeatures;
+
+typedef struct VkProtectedSubmitInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkBool32 protectedSubmit;
+} VkProtectedSubmitInfo;
+
+typedef struct VkPhysicalDeviceProtectedMemoryFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 protectedMemory;
+} VkPhysicalDeviceProtectedMemoryFeatures;
+
+typedef struct VkPhysicalDeviceProtectedMemoryProperties {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 protectedNoFault;
+} VkPhysicalDeviceProtectedMemoryProperties;
+
+typedef struct VkDeviceQueueInfo2 {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueIndex;
+} VkDeviceQueueInfo2;
+
+typedef struct VkPhysicalDeviceMaintenance3Properties {
+ VkStructureType sType;
+ void * pNext;
+ uint32_t maxPerSetDescriptors;
+ VkDeviceSize maxMemoryAllocationSize;
+} VkPhysicalDeviceMaintenance3Properties;
+
+typedef struct VkDescriptorSetLayoutSupport {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 supported;
+} VkDescriptorSetLayoutSupport;
+
+typedef struct VkPhysicalDeviceShaderDrawParametersFeatures {
+ VkStructureType sType;
+ void * pNext;
+ VkBool32 shaderDrawParameters;
+} VkPhysicalDeviceShaderDrawParametersFeatures;
+
+typedef struct VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures;
+
+typedef struct VkPhysicalDeviceProperties {
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ VkPhysicalDeviceType deviceType;
+ char deviceName [ VK_MAX_PHYSICAL_DEVICE_NAME_SIZE ];
+ uint8_t pipelineCacheUUID [ VK_UUID_SIZE ];
+ VkPhysicalDeviceLimits limits;
+ VkPhysicalDeviceSparseProperties sparseProperties;
+} VkPhysicalDeviceProperties;
+
+typedef struct VkDeviceCreateInfo {
+ VkStructureType sType;
+ const void * pNext;
+ VkDeviceCreateFlags flags;
+ uint32_t queueCreateInfoCount;
+ const VkDeviceQueueCreateInfo * pQueueCreateInfos;
+ uint32_t enabledLayerCount;
+ const char * const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char * const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures * pEnabledFeatures;
+} VkDeviceCreateInfo;
+
+typedef struct VkPhysicalDeviceMemoryProperties {
+ uint32_t memoryTypeCount;
+ VkMemoryType memoryTypes [ VK_MAX_MEMORY_TYPES ];
+ uint32_t memoryHeapCount;
+ VkMemoryHeap memoryHeaps [ VK_MAX_MEMORY_HEAPS ];
+} VkPhysicalDeviceMemoryProperties;
+
+typedef struct VkPhysicalDeviceProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkPhysicalDeviceProperties properties;
+} VkPhysicalDeviceProperties2;
+
+typedef struct VkPhysicalDeviceMemoryProperties2 {
+ VkStructureType sType;
+ void * pNext;
+ VkPhysicalDeviceMemoryProperties memoryProperties;
+} VkPhysicalDeviceMemoryProperties2;
+
+
+
+#define VK_VERSION_1_0 1
+GLAD_API_CALL int GLAD_VK_VERSION_1_0;
+#define VK_VERSION_1_1 1
+GLAD_API_CALL int GLAD_VK_VERSION_1_1;
+#define VK_EXT_debug_report 1
+GLAD_API_CALL int GLAD_VK_EXT_debug_report;
+#define VK_KHR_surface 1
+GLAD_API_CALL int GLAD_VK_KHR_surface;
+#define VK_KHR_swapchain 1
+GLAD_API_CALL int GLAD_VK_KHR_swapchain;
+
+
+typedef VkResult (GLAD_API_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR * pAcquireInfo, uint32_t * pImageIndex);
+typedef VkResult (GLAD_API_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t * pImageIndex);
+typedef VkResult (GLAD_API_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo * pAllocateInfo, VkCommandBuffer * pCommandBuffers);
+typedef VkResult (GLAD_API_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo * pAllocateInfo, VkDescriptorSet * pDescriptorSets);
+typedef VkResult (GLAD_API_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo * pAllocateInfo, const VkAllocationCallbacks * pAllocator, VkDeviceMemory * pMemory);
+typedef VkResult (GLAD_API_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo * pBeginInfo);
+typedef VkResult (GLAD_API_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (GLAD_API_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo * pBindInfos);
+typedef VkResult (GLAD_API_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (GLAD_API_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo * pBindInfos);
+typedef void (GLAD_API_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);
+typedef void (GLAD_API_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo * pRenderPassBegin, VkSubpassContents contents);
+typedef void (GLAD_API_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet * pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t * pDynamicOffsets);
+typedef void (GLAD_API_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (GLAD_API_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (GLAD_API_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer * pBuffers, const VkDeviceSize * pOffsets);
+typedef void (GLAD_API_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit * pRegions, VkFilter filter);
+typedef void (GLAD_API_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment * pAttachments, uint32_t rectCount, const VkClearRect * pRects);
+typedef void (GLAD_API_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue * pColor, uint32_t rangeCount, const VkImageSubresourceRange * pRanges);
+typedef void (GLAD_API_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue * pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange * pRanges);
+typedef void (GLAD_API_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy * pRegions);
+typedef void (GLAD_API_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy * pRegions);
+typedef void (GLAD_API_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy * pRegions);
+typedef void (GLAD_API_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy * pRegions);
+typedef void (GLAD_API_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (GLAD_API_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef void (GLAD_API_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef void (GLAD_API_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef void (GLAD_API_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (GLAD_API_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+typedef void (GLAD_API_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (GLAD_API_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (GLAD_API_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
+typedef void (GLAD_API_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);
+typedef void (GLAD_API_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer * pCommandBuffers);
+typedef void (GLAD_API_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+typedef void (GLAD_API_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+typedef void (GLAD_API_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier * pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier * pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier * pImageMemoryBarriers);
+typedef void (GLAD_API_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void * pValues);
+typedef void (GLAD_API_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (GLAD_API_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+typedef void (GLAD_API_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve * pRegions);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants [4]);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D * pScissors);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+typedef void (GLAD_API_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport * pViewports);
+typedef void (GLAD_API_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void * pData);
+typedef void (GLAD_API_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent * pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier * pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier * pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier * pImageMemoryBarriers);
+typedef void (GLAD_API_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkBuffer * pBuffer);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkBufferView * pView);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkCommandPool * pCommandPool);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo * pCreateInfos, const VkAllocationCallbacks * pAllocator, VkPipeline * pPipelines);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkDebugReportCallbackEXT * pCallback);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkDescriptorPool * pDescriptorPool);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkDescriptorSetLayout * pSetLayout);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkDevice * pDevice);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkEvent * pEvent);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkFence * pFence);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkFramebuffer * pFramebuffer);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo * pCreateInfos, const VkAllocationCallbacks * pAllocator, VkPipeline * pPipelines);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkImage * pImage);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkImageView * pView);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkInstance * pInstance);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkPipelineCache * pPipelineCache);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkPipelineLayout * pPipelineLayout);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkQueryPool * pQueryPool);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkRenderPass * pRenderPass);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkSampler * pSampler);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkSamplerYcbcrConversion * pYcbcrConversion);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkSemaphore * pSemaphore);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkShaderModule * pShaderModule);
+typedef VkResult (GLAD_API_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR * pCreateInfo, const VkAllocationCallbacks * pAllocator, VkSwapchainKHR * pSwapchain);
+typedef void (GLAD_API_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char * pLayerPrefix, const char * pMessage);
+typedef void (GLAD_API_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks * pAllocator);
+typedef VkResult (GLAD_API_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (GLAD_API_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char * pLayerName, uint32_t * pPropertyCount, VkExtensionProperties * pProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t * pPropertyCount, VkLayerProperties * pProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char * pLayerName, uint32_t * pPropertyCount, VkExtensionProperties * pProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t * pPropertyCount, VkLayerProperties * pProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t * pApiVersion);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t * pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties * pPhysicalDeviceGroupProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t * pPhysicalDeviceCount, VkPhysicalDevice * pPhysicalDevices);
+typedef VkResult (GLAD_API_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange * pMemoryRanges);
+typedef void (GLAD_API_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer * pCommandBuffers);
+typedef VkResult (GLAD_API_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet * pDescriptorSets);
+typedef void (GLAD_API_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks * pAllocator);
+typedef void (GLAD_API_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements * pMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2 * pInfo, VkMemoryRequirements2 * pMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo * pCreateInfo, VkDescriptorSetLayoutSupport * pSupport);
+typedef void (GLAD_API_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags * pPeerMemoryFeatures);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR * pDeviceGroupPresentCapabilities);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR * pModes);
+typedef void (GLAD_API_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize * pCommittedMemoryInBytes);
+typedef PFN_vkVoidFunction (GLAD_API_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char * pName);
+typedef void (GLAD_API_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue * pQueue);
+typedef void (GLAD_API_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2 * pQueueInfo, VkQueue * pQueue);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);
+typedef void (GLAD_API_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements * pMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2 * pInfo, VkMemoryRequirements2 * pMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t * pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements * pSparseMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2 * pInfo, uint32_t * pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2 * pSparseMemoryRequirements);
+typedef void (GLAD_API_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource * pSubresource, VkSubresourceLayout * pLayout);
+typedef PFN_vkVoidFunction (GLAD_API_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char * pName);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo, VkExternalBufferProperties * pExternalBufferProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo, VkExternalFenceProperties * pExternalFenceProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo, VkExternalSemaphoreProperties * pExternalSemaphoreProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures * pFeatures);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2 * pFeatures);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties * pFormatProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2 * pFormatProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties * pImageFormatProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo, VkImageFormatProperties2 * pImageFormatProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties * pMemoryProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 * pMemoryProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t * pRectCount, VkRect2D * pRects);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties * pProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2 * pProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t * pQueueFamilyPropertyCount, VkQueueFamilyProperties * pQueueFamilyProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t * pQueueFamilyPropertyCount, VkQueueFamilyProperties2 * pQueueFamilyProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t * pPropertyCount, VkSparseImageFormatProperties * pProperties);
+typedef void (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2 * pFormatInfo, uint32_t * pPropertyCount, VkSparseImageFormatProperties2 * pProperties);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR * pSurfaceCapabilities);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t * pSurfaceFormatCount, VkSurfaceFormatKHR * pSurfaceFormats);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t * pPresentModeCount, VkPresentModeKHR * pPresentModes);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 * pSupported);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t * pDataSize, void * pData);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void * pData, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (GLAD_API_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D * pGranularity);
+typedef VkResult (GLAD_API_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t * pSwapchainImageCount, VkImage * pSwapchainImages);
+typedef VkResult (GLAD_API_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange * pMemoryRanges);
+typedef VkResult (GLAD_API_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void ** ppData);
+typedef VkResult (GLAD_API_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache * pSrcCaches);
+typedef VkResult (GLAD_API_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo * pBindInfo, VkFence fence);
+typedef VkResult (GLAD_API_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR * pPresentInfo);
+typedef VkResult (GLAD_API_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo * pSubmits, VkFence fence);
+typedef VkResult (GLAD_API_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (GLAD_API_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+typedef VkResult (GLAD_API_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+typedef VkResult (GLAD_API_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+typedef VkResult (GLAD_API_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (GLAD_API_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence * pFences);
+typedef VkResult (GLAD_API_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);
+typedef void (GLAD_API_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
+typedef void (GLAD_API_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);
+typedef void (GLAD_API_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void * pData);
+typedef void (GLAD_API_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet * pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet * pDescriptorCopies);
+typedef VkResult (GLAD_API_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence * pFences, VkBool32 waitAll, uint64_t timeout);
+
+GLAD_API_CALL PFN_vkAcquireNextImage2KHR glad_vkAcquireNextImage2KHR;
+#define vkAcquireNextImage2KHR glad_vkAcquireNextImage2KHR
+GLAD_API_CALL PFN_vkAcquireNextImageKHR glad_vkAcquireNextImageKHR;
+#define vkAcquireNextImageKHR glad_vkAcquireNextImageKHR
+GLAD_API_CALL PFN_vkAllocateCommandBuffers glad_vkAllocateCommandBuffers;
+#define vkAllocateCommandBuffers glad_vkAllocateCommandBuffers
+GLAD_API_CALL PFN_vkAllocateDescriptorSets glad_vkAllocateDescriptorSets;
+#define vkAllocateDescriptorSets glad_vkAllocateDescriptorSets
+GLAD_API_CALL PFN_vkAllocateMemory glad_vkAllocateMemory;
+#define vkAllocateMemory glad_vkAllocateMemory
+GLAD_API_CALL PFN_vkBeginCommandBuffer glad_vkBeginCommandBuffer;
+#define vkBeginCommandBuffer glad_vkBeginCommandBuffer
+GLAD_API_CALL PFN_vkBindBufferMemory glad_vkBindBufferMemory;
+#define vkBindBufferMemory glad_vkBindBufferMemory
+GLAD_API_CALL PFN_vkBindBufferMemory2 glad_vkBindBufferMemory2;
+#define vkBindBufferMemory2 glad_vkBindBufferMemory2
+GLAD_API_CALL PFN_vkBindImageMemory glad_vkBindImageMemory;
+#define vkBindImageMemory glad_vkBindImageMemory
+GLAD_API_CALL PFN_vkBindImageMemory2 glad_vkBindImageMemory2;
+#define vkBindImageMemory2 glad_vkBindImageMemory2
+GLAD_API_CALL PFN_vkCmdBeginQuery glad_vkCmdBeginQuery;
+#define vkCmdBeginQuery glad_vkCmdBeginQuery
+GLAD_API_CALL PFN_vkCmdBeginRenderPass glad_vkCmdBeginRenderPass;
+#define vkCmdBeginRenderPass glad_vkCmdBeginRenderPass
+GLAD_API_CALL PFN_vkCmdBindDescriptorSets glad_vkCmdBindDescriptorSets;
+#define vkCmdBindDescriptorSets glad_vkCmdBindDescriptorSets
+GLAD_API_CALL PFN_vkCmdBindIndexBuffer glad_vkCmdBindIndexBuffer;
+#define vkCmdBindIndexBuffer glad_vkCmdBindIndexBuffer
+GLAD_API_CALL PFN_vkCmdBindPipeline glad_vkCmdBindPipeline;
+#define vkCmdBindPipeline glad_vkCmdBindPipeline
+GLAD_API_CALL PFN_vkCmdBindVertexBuffers glad_vkCmdBindVertexBuffers;
+#define vkCmdBindVertexBuffers glad_vkCmdBindVertexBuffers
+GLAD_API_CALL PFN_vkCmdBlitImage glad_vkCmdBlitImage;
+#define vkCmdBlitImage glad_vkCmdBlitImage
+GLAD_API_CALL PFN_vkCmdClearAttachments glad_vkCmdClearAttachments;
+#define vkCmdClearAttachments glad_vkCmdClearAttachments
+GLAD_API_CALL PFN_vkCmdClearColorImage glad_vkCmdClearColorImage;
+#define vkCmdClearColorImage glad_vkCmdClearColorImage
+GLAD_API_CALL PFN_vkCmdClearDepthStencilImage glad_vkCmdClearDepthStencilImage;
+#define vkCmdClearDepthStencilImage glad_vkCmdClearDepthStencilImage
+GLAD_API_CALL PFN_vkCmdCopyBuffer glad_vkCmdCopyBuffer;
+#define vkCmdCopyBuffer glad_vkCmdCopyBuffer
+GLAD_API_CALL PFN_vkCmdCopyBufferToImage glad_vkCmdCopyBufferToImage;
+#define vkCmdCopyBufferToImage glad_vkCmdCopyBufferToImage
+GLAD_API_CALL PFN_vkCmdCopyImage glad_vkCmdCopyImage;
+#define vkCmdCopyImage glad_vkCmdCopyImage
+GLAD_API_CALL PFN_vkCmdCopyImageToBuffer glad_vkCmdCopyImageToBuffer;
+#define vkCmdCopyImageToBuffer glad_vkCmdCopyImageToBuffer
+GLAD_API_CALL PFN_vkCmdCopyQueryPoolResults glad_vkCmdCopyQueryPoolResults;
+#define vkCmdCopyQueryPoolResults glad_vkCmdCopyQueryPoolResults
+GLAD_API_CALL PFN_vkCmdDispatch glad_vkCmdDispatch;
+#define vkCmdDispatch glad_vkCmdDispatch
+GLAD_API_CALL PFN_vkCmdDispatchBase glad_vkCmdDispatchBase;
+#define vkCmdDispatchBase glad_vkCmdDispatchBase
+GLAD_API_CALL PFN_vkCmdDispatchIndirect glad_vkCmdDispatchIndirect;
+#define vkCmdDispatchIndirect glad_vkCmdDispatchIndirect
+GLAD_API_CALL PFN_vkCmdDraw glad_vkCmdDraw;
+#define vkCmdDraw glad_vkCmdDraw
+GLAD_API_CALL PFN_vkCmdDrawIndexed glad_vkCmdDrawIndexed;
+#define vkCmdDrawIndexed glad_vkCmdDrawIndexed
+GLAD_API_CALL PFN_vkCmdDrawIndexedIndirect glad_vkCmdDrawIndexedIndirect;
+#define vkCmdDrawIndexedIndirect glad_vkCmdDrawIndexedIndirect
+GLAD_API_CALL PFN_vkCmdDrawIndirect glad_vkCmdDrawIndirect;
+#define vkCmdDrawIndirect glad_vkCmdDrawIndirect
+GLAD_API_CALL PFN_vkCmdEndQuery glad_vkCmdEndQuery;
+#define vkCmdEndQuery glad_vkCmdEndQuery
+GLAD_API_CALL PFN_vkCmdEndRenderPass glad_vkCmdEndRenderPass;
+#define vkCmdEndRenderPass glad_vkCmdEndRenderPass
+GLAD_API_CALL PFN_vkCmdExecuteCommands glad_vkCmdExecuteCommands;
+#define vkCmdExecuteCommands glad_vkCmdExecuteCommands
+GLAD_API_CALL PFN_vkCmdFillBuffer glad_vkCmdFillBuffer;
+#define vkCmdFillBuffer glad_vkCmdFillBuffer
+GLAD_API_CALL PFN_vkCmdNextSubpass glad_vkCmdNextSubpass;
+#define vkCmdNextSubpass glad_vkCmdNextSubpass
+GLAD_API_CALL PFN_vkCmdPipelineBarrier glad_vkCmdPipelineBarrier;
+#define vkCmdPipelineBarrier glad_vkCmdPipelineBarrier
+GLAD_API_CALL PFN_vkCmdPushConstants glad_vkCmdPushConstants;
+#define vkCmdPushConstants glad_vkCmdPushConstants
+GLAD_API_CALL PFN_vkCmdResetEvent glad_vkCmdResetEvent;
+#define vkCmdResetEvent glad_vkCmdResetEvent
+GLAD_API_CALL PFN_vkCmdResetQueryPool glad_vkCmdResetQueryPool;
+#define vkCmdResetQueryPool glad_vkCmdResetQueryPool
+GLAD_API_CALL PFN_vkCmdResolveImage glad_vkCmdResolveImage;
+#define vkCmdResolveImage glad_vkCmdResolveImage
+GLAD_API_CALL PFN_vkCmdSetBlendConstants glad_vkCmdSetBlendConstants;
+#define vkCmdSetBlendConstants glad_vkCmdSetBlendConstants
+GLAD_API_CALL PFN_vkCmdSetDepthBias glad_vkCmdSetDepthBias;
+#define vkCmdSetDepthBias glad_vkCmdSetDepthBias
+GLAD_API_CALL PFN_vkCmdSetDepthBounds glad_vkCmdSetDepthBounds;
+#define vkCmdSetDepthBounds glad_vkCmdSetDepthBounds
+GLAD_API_CALL PFN_vkCmdSetDeviceMask glad_vkCmdSetDeviceMask;
+#define vkCmdSetDeviceMask glad_vkCmdSetDeviceMask
+GLAD_API_CALL PFN_vkCmdSetEvent glad_vkCmdSetEvent;
+#define vkCmdSetEvent glad_vkCmdSetEvent
+GLAD_API_CALL PFN_vkCmdSetLineWidth glad_vkCmdSetLineWidth;
+#define vkCmdSetLineWidth glad_vkCmdSetLineWidth
+GLAD_API_CALL PFN_vkCmdSetScissor glad_vkCmdSetScissor;
+#define vkCmdSetScissor glad_vkCmdSetScissor
+GLAD_API_CALL PFN_vkCmdSetStencilCompareMask glad_vkCmdSetStencilCompareMask;
+#define vkCmdSetStencilCompareMask glad_vkCmdSetStencilCompareMask
+GLAD_API_CALL PFN_vkCmdSetStencilReference glad_vkCmdSetStencilReference;
+#define vkCmdSetStencilReference glad_vkCmdSetStencilReference
+GLAD_API_CALL PFN_vkCmdSetStencilWriteMask glad_vkCmdSetStencilWriteMask;
+#define vkCmdSetStencilWriteMask glad_vkCmdSetStencilWriteMask
+GLAD_API_CALL PFN_vkCmdSetViewport glad_vkCmdSetViewport;
+#define vkCmdSetViewport glad_vkCmdSetViewport
+GLAD_API_CALL PFN_vkCmdUpdateBuffer glad_vkCmdUpdateBuffer;
+#define vkCmdUpdateBuffer glad_vkCmdUpdateBuffer
+GLAD_API_CALL PFN_vkCmdWaitEvents glad_vkCmdWaitEvents;
+#define vkCmdWaitEvents glad_vkCmdWaitEvents
+GLAD_API_CALL PFN_vkCmdWriteTimestamp glad_vkCmdWriteTimestamp;
+#define vkCmdWriteTimestamp glad_vkCmdWriteTimestamp
+GLAD_API_CALL PFN_vkCreateBuffer glad_vkCreateBuffer;
+#define vkCreateBuffer glad_vkCreateBuffer
+GLAD_API_CALL PFN_vkCreateBufferView glad_vkCreateBufferView;
+#define vkCreateBufferView glad_vkCreateBufferView
+GLAD_API_CALL PFN_vkCreateCommandPool glad_vkCreateCommandPool;
+#define vkCreateCommandPool glad_vkCreateCommandPool
+GLAD_API_CALL PFN_vkCreateComputePipelines glad_vkCreateComputePipelines;
+#define vkCreateComputePipelines glad_vkCreateComputePipelines
+GLAD_API_CALL PFN_vkCreateDebugReportCallbackEXT glad_vkCreateDebugReportCallbackEXT;
+#define vkCreateDebugReportCallbackEXT glad_vkCreateDebugReportCallbackEXT
+GLAD_API_CALL PFN_vkCreateDescriptorPool glad_vkCreateDescriptorPool;
+#define vkCreateDescriptorPool glad_vkCreateDescriptorPool
+GLAD_API_CALL PFN_vkCreateDescriptorSetLayout glad_vkCreateDescriptorSetLayout;
+#define vkCreateDescriptorSetLayout glad_vkCreateDescriptorSetLayout
+GLAD_API_CALL PFN_vkCreateDescriptorUpdateTemplate glad_vkCreateDescriptorUpdateTemplate;
+#define vkCreateDescriptorUpdateTemplate glad_vkCreateDescriptorUpdateTemplate
+GLAD_API_CALL PFN_vkCreateDevice glad_vkCreateDevice;
+#define vkCreateDevice glad_vkCreateDevice
+GLAD_API_CALL PFN_vkCreateEvent glad_vkCreateEvent;
+#define vkCreateEvent glad_vkCreateEvent
+GLAD_API_CALL PFN_vkCreateFence glad_vkCreateFence;
+#define vkCreateFence glad_vkCreateFence
+GLAD_API_CALL PFN_vkCreateFramebuffer glad_vkCreateFramebuffer;
+#define vkCreateFramebuffer glad_vkCreateFramebuffer
+GLAD_API_CALL PFN_vkCreateGraphicsPipelines glad_vkCreateGraphicsPipelines;
+#define vkCreateGraphicsPipelines glad_vkCreateGraphicsPipelines
+GLAD_API_CALL PFN_vkCreateImage glad_vkCreateImage;
+#define vkCreateImage glad_vkCreateImage
+GLAD_API_CALL PFN_vkCreateImageView glad_vkCreateImageView;
+#define vkCreateImageView glad_vkCreateImageView
+GLAD_API_CALL PFN_vkCreateInstance glad_vkCreateInstance;
+#define vkCreateInstance glad_vkCreateInstance
+GLAD_API_CALL PFN_vkCreatePipelineCache glad_vkCreatePipelineCache;
+#define vkCreatePipelineCache glad_vkCreatePipelineCache
+GLAD_API_CALL PFN_vkCreatePipelineLayout glad_vkCreatePipelineLayout;
+#define vkCreatePipelineLayout glad_vkCreatePipelineLayout
+GLAD_API_CALL PFN_vkCreateQueryPool glad_vkCreateQueryPool;
+#define vkCreateQueryPool glad_vkCreateQueryPool
+GLAD_API_CALL PFN_vkCreateRenderPass glad_vkCreateRenderPass;
+#define vkCreateRenderPass glad_vkCreateRenderPass
+GLAD_API_CALL PFN_vkCreateSampler glad_vkCreateSampler;
+#define vkCreateSampler glad_vkCreateSampler
+GLAD_API_CALL PFN_vkCreateSamplerYcbcrConversion glad_vkCreateSamplerYcbcrConversion;
+#define vkCreateSamplerYcbcrConversion glad_vkCreateSamplerYcbcrConversion
+GLAD_API_CALL PFN_vkCreateSemaphore glad_vkCreateSemaphore;
+#define vkCreateSemaphore glad_vkCreateSemaphore
+GLAD_API_CALL PFN_vkCreateShaderModule glad_vkCreateShaderModule;
+#define vkCreateShaderModule glad_vkCreateShaderModule
+GLAD_API_CALL PFN_vkCreateSwapchainKHR glad_vkCreateSwapchainKHR;
+#define vkCreateSwapchainKHR glad_vkCreateSwapchainKHR
+GLAD_API_CALL PFN_vkDebugReportMessageEXT glad_vkDebugReportMessageEXT;
+#define vkDebugReportMessageEXT glad_vkDebugReportMessageEXT
+GLAD_API_CALL PFN_vkDestroyBuffer glad_vkDestroyBuffer;
+#define vkDestroyBuffer glad_vkDestroyBuffer
+GLAD_API_CALL PFN_vkDestroyBufferView glad_vkDestroyBufferView;
+#define vkDestroyBufferView glad_vkDestroyBufferView
+GLAD_API_CALL PFN_vkDestroyCommandPool glad_vkDestroyCommandPool;
+#define vkDestroyCommandPool glad_vkDestroyCommandPool
+GLAD_API_CALL PFN_vkDestroyDebugReportCallbackEXT glad_vkDestroyDebugReportCallbackEXT;
+#define vkDestroyDebugReportCallbackEXT glad_vkDestroyDebugReportCallbackEXT
+GLAD_API_CALL PFN_vkDestroyDescriptorPool glad_vkDestroyDescriptorPool;
+#define vkDestroyDescriptorPool glad_vkDestroyDescriptorPool
+GLAD_API_CALL PFN_vkDestroyDescriptorSetLayout glad_vkDestroyDescriptorSetLayout;
+#define vkDestroyDescriptorSetLayout glad_vkDestroyDescriptorSetLayout
+GLAD_API_CALL PFN_vkDestroyDescriptorUpdateTemplate glad_vkDestroyDescriptorUpdateTemplate;
+#define vkDestroyDescriptorUpdateTemplate glad_vkDestroyDescriptorUpdateTemplate
+GLAD_API_CALL PFN_vkDestroyDevice glad_vkDestroyDevice;
+#define vkDestroyDevice glad_vkDestroyDevice
+GLAD_API_CALL PFN_vkDestroyEvent glad_vkDestroyEvent;
+#define vkDestroyEvent glad_vkDestroyEvent
+GLAD_API_CALL PFN_vkDestroyFence glad_vkDestroyFence;
+#define vkDestroyFence glad_vkDestroyFence
+GLAD_API_CALL PFN_vkDestroyFramebuffer glad_vkDestroyFramebuffer;
+#define vkDestroyFramebuffer glad_vkDestroyFramebuffer
+GLAD_API_CALL PFN_vkDestroyImage glad_vkDestroyImage;
+#define vkDestroyImage glad_vkDestroyImage
+GLAD_API_CALL PFN_vkDestroyImageView glad_vkDestroyImageView;
+#define vkDestroyImageView glad_vkDestroyImageView
+GLAD_API_CALL PFN_vkDestroyInstance glad_vkDestroyInstance;
+#define vkDestroyInstance glad_vkDestroyInstance
+GLAD_API_CALL PFN_vkDestroyPipeline glad_vkDestroyPipeline;
+#define vkDestroyPipeline glad_vkDestroyPipeline
+GLAD_API_CALL PFN_vkDestroyPipelineCache glad_vkDestroyPipelineCache;
+#define vkDestroyPipelineCache glad_vkDestroyPipelineCache
+GLAD_API_CALL PFN_vkDestroyPipelineLayout glad_vkDestroyPipelineLayout;
+#define vkDestroyPipelineLayout glad_vkDestroyPipelineLayout
+GLAD_API_CALL PFN_vkDestroyQueryPool glad_vkDestroyQueryPool;
+#define vkDestroyQueryPool glad_vkDestroyQueryPool
+GLAD_API_CALL PFN_vkDestroyRenderPass glad_vkDestroyRenderPass;
+#define vkDestroyRenderPass glad_vkDestroyRenderPass
+GLAD_API_CALL PFN_vkDestroySampler glad_vkDestroySampler;
+#define vkDestroySampler glad_vkDestroySampler
+GLAD_API_CALL PFN_vkDestroySamplerYcbcrConversion glad_vkDestroySamplerYcbcrConversion;
+#define vkDestroySamplerYcbcrConversion glad_vkDestroySamplerYcbcrConversion
+GLAD_API_CALL PFN_vkDestroySemaphore glad_vkDestroySemaphore;
+#define vkDestroySemaphore glad_vkDestroySemaphore
+GLAD_API_CALL PFN_vkDestroyShaderModule glad_vkDestroyShaderModule;
+#define vkDestroyShaderModule glad_vkDestroyShaderModule
+GLAD_API_CALL PFN_vkDestroySurfaceKHR glad_vkDestroySurfaceKHR;
+#define vkDestroySurfaceKHR glad_vkDestroySurfaceKHR
+GLAD_API_CALL PFN_vkDestroySwapchainKHR glad_vkDestroySwapchainKHR;
+#define vkDestroySwapchainKHR glad_vkDestroySwapchainKHR
+GLAD_API_CALL PFN_vkDeviceWaitIdle glad_vkDeviceWaitIdle;
+#define vkDeviceWaitIdle glad_vkDeviceWaitIdle
+GLAD_API_CALL PFN_vkEndCommandBuffer glad_vkEndCommandBuffer;
+#define vkEndCommandBuffer glad_vkEndCommandBuffer
+GLAD_API_CALL PFN_vkEnumerateDeviceExtensionProperties glad_vkEnumerateDeviceExtensionProperties;
+#define vkEnumerateDeviceExtensionProperties glad_vkEnumerateDeviceExtensionProperties
+GLAD_API_CALL PFN_vkEnumerateDeviceLayerProperties glad_vkEnumerateDeviceLayerProperties;
+#define vkEnumerateDeviceLayerProperties glad_vkEnumerateDeviceLayerProperties
+GLAD_API_CALL PFN_vkEnumerateInstanceExtensionProperties glad_vkEnumerateInstanceExtensionProperties;
+#define vkEnumerateInstanceExtensionProperties glad_vkEnumerateInstanceExtensionProperties
+GLAD_API_CALL PFN_vkEnumerateInstanceLayerProperties glad_vkEnumerateInstanceLayerProperties;
+#define vkEnumerateInstanceLayerProperties glad_vkEnumerateInstanceLayerProperties
+GLAD_API_CALL PFN_vkEnumerateInstanceVersion glad_vkEnumerateInstanceVersion;
+#define vkEnumerateInstanceVersion glad_vkEnumerateInstanceVersion
+GLAD_API_CALL PFN_vkEnumeratePhysicalDeviceGroups glad_vkEnumeratePhysicalDeviceGroups;
+#define vkEnumeratePhysicalDeviceGroups glad_vkEnumeratePhysicalDeviceGroups
+GLAD_API_CALL PFN_vkEnumeratePhysicalDevices glad_vkEnumeratePhysicalDevices;
+#define vkEnumeratePhysicalDevices glad_vkEnumeratePhysicalDevices
+GLAD_API_CALL PFN_vkFlushMappedMemoryRanges glad_vkFlushMappedMemoryRanges;
+#define vkFlushMappedMemoryRanges glad_vkFlushMappedMemoryRanges
+GLAD_API_CALL PFN_vkFreeCommandBuffers glad_vkFreeCommandBuffers;
+#define vkFreeCommandBuffers glad_vkFreeCommandBuffers
+GLAD_API_CALL PFN_vkFreeDescriptorSets glad_vkFreeDescriptorSets;
+#define vkFreeDescriptorSets glad_vkFreeDescriptorSets
+GLAD_API_CALL PFN_vkFreeMemory glad_vkFreeMemory;
+#define vkFreeMemory glad_vkFreeMemory
+GLAD_API_CALL PFN_vkGetBufferMemoryRequirements glad_vkGetBufferMemoryRequirements;
+#define vkGetBufferMemoryRequirements glad_vkGetBufferMemoryRequirements
+GLAD_API_CALL PFN_vkGetBufferMemoryRequirements2 glad_vkGetBufferMemoryRequirements2;
+#define vkGetBufferMemoryRequirements2 glad_vkGetBufferMemoryRequirements2
+GLAD_API_CALL PFN_vkGetDescriptorSetLayoutSupport glad_vkGetDescriptorSetLayoutSupport;
+#define vkGetDescriptorSetLayoutSupport glad_vkGetDescriptorSetLayoutSupport
+GLAD_API_CALL PFN_vkGetDeviceGroupPeerMemoryFeatures glad_vkGetDeviceGroupPeerMemoryFeatures;
+#define vkGetDeviceGroupPeerMemoryFeatures glad_vkGetDeviceGroupPeerMemoryFeatures
+GLAD_API_CALL PFN_vkGetDeviceGroupPresentCapabilitiesKHR glad_vkGetDeviceGroupPresentCapabilitiesKHR;
+#define vkGetDeviceGroupPresentCapabilitiesKHR glad_vkGetDeviceGroupPresentCapabilitiesKHR
+GLAD_API_CALL PFN_vkGetDeviceGroupSurfacePresentModesKHR glad_vkGetDeviceGroupSurfacePresentModesKHR;
+#define vkGetDeviceGroupSurfacePresentModesKHR glad_vkGetDeviceGroupSurfacePresentModesKHR
+GLAD_API_CALL PFN_vkGetDeviceMemoryCommitment glad_vkGetDeviceMemoryCommitment;
+#define vkGetDeviceMemoryCommitment glad_vkGetDeviceMemoryCommitment
+GLAD_API_CALL PFN_vkGetDeviceProcAddr glad_vkGetDeviceProcAddr;
+#define vkGetDeviceProcAddr glad_vkGetDeviceProcAddr
+GLAD_API_CALL PFN_vkGetDeviceQueue glad_vkGetDeviceQueue;
+#define vkGetDeviceQueue glad_vkGetDeviceQueue
+GLAD_API_CALL PFN_vkGetDeviceQueue2 glad_vkGetDeviceQueue2;
+#define vkGetDeviceQueue2 glad_vkGetDeviceQueue2
+GLAD_API_CALL PFN_vkGetEventStatus glad_vkGetEventStatus;
+#define vkGetEventStatus glad_vkGetEventStatus
+GLAD_API_CALL PFN_vkGetFenceStatus glad_vkGetFenceStatus;
+#define vkGetFenceStatus glad_vkGetFenceStatus
+GLAD_API_CALL PFN_vkGetImageMemoryRequirements glad_vkGetImageMemoryRequirements;
+#define vkGetImageMemoryRequirements glad_vkGetImageMemoryRequirements
+GLAD_API_CALL PFN_vkGetImageMemoryRequirements2 glad_vkGetImageMemoryRequirements2;
+#define vkGetImageMemoryRequirements2 glad_vkGetImageMemoryRequirements2
+GLAD_API_CALL PFN_vkGetImageSparseMemoryRequirements glad_vkGetImageSparseMemoryRequirements;
+#define vkGetImageSparseMemoryRequirements glad_vkGetImageSparseMemoryRequirements
+GLAD_API_CALL PFN_vkGetImageSparseMemoryRequirements2 glad_vkGetImageSparseMemoryRequirements2;
+#define vkGetImageSparseMemoryRequirements2 glad_vkGetImageSparseMemoryRequirements2
+GLAD_API_CALL PFN_vkGetImageSubresourceLayout glad_vkGetImageSubresourceLayout;
+#define vkGetImageSubresourceLayout glad_vkGetImageSubresourceLayout
+GLAD_API_CALL PFN_vkGetInstanceProcAddr glad_vkGetInstanceProcAddr;
+#define vkGetInstanceProcAddr glad_vkGetInstanceProcAddr
+GLAD_API_CALL PFN_vkGetPhysicalDeviceExternalBufferProperties glad_vkGetPhysicalDeviceExternalBufferProperties;
+#define vkGetPhysicalDeviceExternalBufferProperties glad_vkGetPhysicalDeviceExternalBufferProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceExternalFenceProperties glad_vkGetPhysicalDeviceExternalFenceProperties;
+#define vkGetPhysicalDeviceExternalFenceProperties glad_vkGetPhysicalDeviceExternalFenceProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceExternalSemaphoreProperties glad_vkGetPhysicalDeviceExternalSemaphoreProperties;
+#define vkGetPhysicalDeviceExternalSemaphoreProperties glad_vkGetPhysicalDeviceExternalSemaphoreProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceFeatures glad_vkGetPhysicalDeviceFeatures;
+#define vkGetPhysicalDeviceFeatures glad_vkGetPhysicalDeviceFeatures
+GLAD_API_CALL PFN_vkGetPhysicalDeviceFeatures2 glad_vkGetPhysicalDeviceFeatures2;
+#define vkGetPhysicalDeviceFeatures2 glad_vkGetPhysicalDeviceFeatures2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceFormatProperties glad_vkGetPhysicalDeviceFormatProperties;
+#define vkGetPhysicalDeviceFormatProperties glad_vkGetPhysicalDeviceFormatProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceFormatProperties2 glad_vkGetPhysicalDeviceFormatProperties2;
+#define vkGetPhysicalDeviceFormatProperties2 glad_vkGetPhysicalDeviceFormatProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceImageFormatProperties glad_vkGetPhysicalDeviceImageFormatProperties;
+#define vkGetPhysicalDeviceImageFormatProperties glad_vkGetPhysicalDeviceImageFormatProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceImageFormatProperties2 glad_vkGetPhysicalDeviceImageFormatProperties2;
+#define vkGetPhysicalDeviceImageFormatProperties2 glad_vkGetPhysicalDeviceImageFormatProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceMemoryProperties glad_vkGetPhysicalDeviceMemoryProperties;
+#define vkGetPhysicalDeviceMemoryProperties glad_vkGetPhysicalDeviceMemoryProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceMemoryProperties2 glad_vkGetPhysicalDeviceMemoryProperties2;
+#define vkGetPhysicalDeviceMemoryProperties2 glad_vkGetPhysicalDeviceMemoryProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDevicePresentRectanglesKHR glad_vkGetPhysicalDevicePresentRectanglesKHR;
+#define vkGetPhysicalDevicePresentRectanglesKHR glad_vkGetPhysicalDevicePresentRectanglesKHR
+GLAD_API_CALL PFN_vkGetPhysicalDeviceProperties glad_vkGetPhysicalDeviceProperties;
+#define vkGetPhysicalDeviceProperties glad_vkGetPhysicalDeviceProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceProperties2 glad_vkGetPhysicalDeviceProperties2;
+#define vkGetPhysicalDeviceProperties2 glad_vkGetPhysicalDeviceProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceQueueFamilyProperties glad_vkGetPhysicalDeviceQueueFamilyProperties;
+#define vkGetPhysicalDeviceQueueFamilyProperties glad_vkGetPhysicalDeviceQueueFamilyProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceQueueFamilyProperties2 glad_vkGetPhysicalDeviceQueueFamilyProperties2;
+#define vkGetPhysicalDeviceQueueFamilyProperties2 glad_vkGetPhysicalDeviceQueueFamilyProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSparseImageFormatProperties glad_vkGetPhysicalDeviceSparseImageFormatProperties;
+#define vkGetPhysicalDeviceSparseImageFormatProperties glad_vkGetPhysicalDeviceSparseImageFormatProperties
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 glad_vkGetPhysicalDeviceSparseImageFormatProperties2;
+#define vkGetPhysicalDeviceSparseImageFormatProperties2 glad_vkGetPhysicalDeviceSparseImageFormatProperties2
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR glad_vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
+#define vkGetPhysicalDeviceSurfaceCapabilitiesKHR glad_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSurfaceFormatsKHR glad_vkGetPhysicalDeviceSurfaceFormatsKHR;
+#define vkGetPhysicalDeviceSurfaceFormatsKHR glad_vkGetPhysicalDeviceSurfaceFormatsKHR
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSurfacePresentModesKHR glad_vkGetPhysicalDeviceSurfacePresentModesKHR;
+#define vkGetPhysicalDeviceSurfacePresentModesKHR glad_vkGetPhysicalDeviceSurfacePresentModesKHR
+GLAD_API_CALL PFN_vkGetPhysicalDeviceSurfaceSupportKHR glad_vkGetPhysicalDeviceSurfaceSupportKHR;
+#define vkGetPhysicalDeviceSurfaceSupportKHR glad_vkGetPhysicalDeviceSurfaceSupportKHR
+GLAD_API_CALL PFN_vkGetPipelineCacheData glad_vkGetPipelineCacheData;
+#define vkGetPipelineCacheData glad_vkGetPipelineCacheData
+GLAD_API_CALL PFN_vkGetQueryPoolResults glad_vkGetQueryPoolResults;
+#define vkGetQueryPoolResults glad_vkGetQueryPoolResults
+GLAD_API_CALL PFN_vkGetRenderAreaGranularity glad_vkGetRenderAreaGranularity;
+#define vkGetRenderAreaGranularity glad_vkGetRenderAreaGranularity
+GLAD_API_CALL PFN_vkGetSwapchainImagesKHR glad_vkGetSwapchainImagesKHR;
+#define vkGetSwapchainImagesKHR glad_vkGetSwapchainImagesKHR
+GLAD_API_CALL PFN_vkInvalidateMappedMemoryRanges glad_vkInvalidateMappedMemoryRanges;
+#define vkInvalidateMappedMemoryRanges glad_vkInvalidateMappedMemoryRanges
+GLAD_API_CALL PFN_vkMapMemory glad_vkMapMemory;
+#define vkMapMemory glad_vkMapMemory
+GLAD_API_CALL PFN_vkMergePipelineCaches glad_vkMergePipelineCaches;
+#define vkMergePipelineCaches glad_vkMergePipelineCaches
+GLAD_API_CALL PFN_vkQueueBindSparse glad_vkQueueBindSparse;
+#define vkQueueBindSparse glad_vkQueueBindSparse
+GLAD_API_CALL PFN_vkQueuePresentKHR glad_vkQueuePresentKHR;
+#define vkQueuePresentKHR glad_vkQueuePresentKHR
+GLAD_API_CALL PFN_vkQueueSubmit glad_vkQueueSubmit;
+#define vkQueueSubmit glad_vkQueueSubmit
+GLAD_API_CALL PFN_vkQueueWaitIdle glad_vkQueueWaitIdle;
+#define vkQueueWaitIdle glad_vkQueueWaitIdle
+GLAD_API_CALL PFN_vkResetCommandBuffer glad_vkResetCommandBuffer;
+#define vkResetCommandBuffer glad_vkResetCommandBuffer
+GLAD_API_CALL PFN_vkResetCommandPool glad_vkResetCommandPool;
+#define vkResetCommandPool glad_vkResetCommandPool
+GLAD_API_CALL PFN_vkResetDescriptorPool glad_vkResetDescriptorPool;
+#define vkResetDescriptorPool glad_vkResetDescriptorPool
+GLAD_API_CALL PFN_vkResetEvent glad_vkResetEvent;
+#define vkResetEvent glad_vkResetEvent
+GLAD_API_CALL PFN_vkResetFences glad_vkResetFences;
+#define vkResetFences glad_vkResetFences
+GLAD_API_CALL PFN_vkSetEvent glad_vkSetEvent;
+#define vkSetEvent glad_vkSetEvent
+GLAD_API_CALL PFN_vkTrimCommandPool glad_vkTrimCommandPool;
+#define vkTrimCommandPool glad_vkTrimCommandPool
+GLAD_API_CALL PFN_vkUnmapMemory glad_vkUnmapMemory;
+#define vkUnmapMemory glad_vkUnmapMemory
+GLAD_API_CALL PFN_vkUpdateDescriptorSetWithTemplate glad_vkUpdateDescriptorSetWithTemplate;
+#define vkUpdateDescriptorSetWithTemplate glad_vkUpdateDescriptorSetWithTemplate
+GLAD_API_CALL PFN_vkUpdateDescriptorSets glad_vkUpdateDescriptorSets;
+#define vkUpdateDescriptorSets glad_vkUpdateDescriptorSets
+GLAD_API_CALL PFN_vkWaitForFences glad_vkWaitForFences;
+#define vkWaitForFences glad_vkWaitForFences
+
+
+
+
+
+GLAD_API_CALL int gladLoadVulkanUserPtr( VkPhysicalDevice physical_device, GLADuserptrloadfunc load, void *userptr);
+GLAD_API_CALL int gladLoadVulkan( VkPhysicalDevice physical_device, GLADloadfunc load);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+/* Source */
+#ifdef GLAD_VULKAN_IMPLEMENTATION
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef GLAD_IMPL_UTIL_C_
+#define GLAD_IMPL_UTIL_C_
+
+#ifdef _MSC_VER
+#define GLAD_IMPL_UTIL_SSCANF sscanf_s
+#else
+#define GLAD_IMPL_UTIL_SSCANF sscanf
+#endif
+
+#endif /* GLAD_IMPL_UTIL_C_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+int GLAD_VK_VERSION_1_0 = 0;
+int GLAD_VK_VERSION_1_1 = 0;
+int GLAD_VK_EXT_debug_report = 0;
+int GLAD_VK_KHR_surface = 0;
+int GLAD_VK_KHR_swapchain = 0;
+
+
+
+PFN_vkAcquireNextImage2KHR glad_vkAcquireNextImage2KHR = NULL;
+PFN_vkAcquireNextImageKHR glad_vkAcquireNextImageKHR = NULL;
+PFN_vkAllocateCommandBuffers glad_vkAllocateCommandBuffers = NULL;
+PFN_vkAllocateDescriptorSets glad_vkAllocateDescriptorSets = NULL;
+PFN_vkAllocateMemory glad_vkAllocateMemory = NULL;
+PFN_vkBeginCommandBuffer glad_vkBeginCommandBuffer = NULL;
+PFN_vkBindBufferMemory glad_vkBindBufferMemory = NULL;
+PFN_vkBindBufferMemory2 glad_vkBindBufferMemory2 = NULL;
+PFN_vkBindImageMemory glad_vkBindImageMemory = NULL;
+PFN_vkBindImageMemory2 glad_vkBindImageMemory2 = NULL;
+PFN_vkCmdBeginQuery glad_vkCmdBeginQuery = NULL;
+PFN_vkCmdBeginRenderPass glad_vkCmdBeginRenderPass = NULL;
+PFN_vkCmdBindDescriptorSets glad_vkCmdBindDescriptorSets = NULL;
+PFN_vkCmdBindIndexBuffer glad_vkCmdBindIndexBuffer = NULL;
+PFN_vkCmdBindPipeline glad_vkCmdBindPipeline = NULL;
+PFN_vkCmdBindVertexBuffers glad_vkCmdBindVertexBuffers = NULL;
+PFN_vkCmdBlitImage glad_vkCmdBlitImage = NULL;
+PFN_vkCmdClearAttachments glad_vkCmdClearAttachments = NULL;
+PFN_vkCmdClearColorImage glad_vkCmdClearColorImage = NULL;
+PFN_vkCmdClearDepthStencilImage glad_vkCmdClearDepthStencilImage = NULL;
+PFN_vkCmdCopyBuffer glad_vkCmdCopyBuffer = NULL;
+PFN_vkCmdCopyBufferToImage glad_vkCmdCopyBufferToImage = NULL;
+PFN_vkCmdCopyImage glad_vkCmdCopyImage = NULL;
+PFN_vkCmdCopyImageToBuffer glad_vkCmdCopyImageToBuffer = NULL;
+PFN_vkCmdCopyQueryPoolResults glad_vkCmdCopyQueryPoolResults = NULL;
+PFN_vkCmdDispatch glad_vkCmdDispatch = NULL;
+PFN_vkCmdDispatchBase glad_vkCmdDispatchBase = NULL;
+PFN_vkCmdDispatchIndirect glad_vkCmdDispatchIndirect = NULL;
+PFN_vkCmdDraw glad_vkCmdDraw = NULL;
+PFN_vkCmdDrawIndexed glad_vkCmdDrawIndexed = NULL;
+PFN_vkCmdDrawIndexedIndirect glad_vkCmdDrawIndexedIndirect = NULL;
+PFN_vkCmdDrawIndirect glad_vkCmdDrawIndirect = NULL;
+PFN_vkCmdEndQuery glad_vkCmdEndQuery = NULL;
+PFN_vkCmdEndRenderPass glad_vkCmdEndRenderPass = NULL;
+PFN_vkCmdExecuteCommands glad_vkCmdExecuteCommands = NULL;
+PFN_vkCmdFillBuffer glad_vkCmdFillBuffer = NULL;
+PFN_vkCmdNextSubpass glad_vkCmdNextSubpass = NULL;
+PFN_vkCmdPipelineBarrier glad_vkCmdPipelineBarrier = NULL;
+PFN_vkCmdPushConstants glad_vkCmdPushConstants = NULL;
+PFN_vkCmdResetEvent glad_vkCmdResetEvent = NULL;
+PFN_vkCmdResetQueryPool glad_vkCmdResetQueryPool = NULL;
+PFN_vkCmdResolveImage glad_vkCmdResolveImage = NULL;
+PFN_vkCmdSetBlendConstants glad_vkCmdSetBlendConstants = NULL;
+PFN_vkCmdSetDepthBias glad_vkCmdSetDepthBias = NULL;
+PFN_vkCmdSetDepthBounds glad_vkCmdSetDepthBounds = NULL;
+PFN_vkCmdSetDeviceMask glad_vkCmdSetDeviceMask = NULL;
+PFN_vkCmdSetEvent glad_vkCmdSetEvent = NULL;
+PFN_vkCmdSetLineWidth glad_vkCmdSetLineWidth = NULL;
+PFN_vkCmdSetScissor glad_vkCmdSetScissor = NULL;
+PFN_vkCmdSetStencilCompareMask glad_vkCmdSetStencilCompareMask = NULL;
+PFN_vkCmdSetStencilReference glad_vkCmdSetStencilReference = NULL;
+PFN_vkCmdSetStencilWriteMask glad_vkCmdSetStencilWriteMask = NULL;
+PFN_vkCmdSetViewport glad_vkCmdSetViewport = NULL;
+PFN_vkCmdUpdateBuffer glad_vkCmdUpdateBuffer = NULL;
+PFN_vkCmdWaitEvents glad_vkCmdWaitEvents = NULL;
+PFN_vkCmdWriteTimestamp glad_vkCmdWriteTimestamp = NULL;
+PFN_vkCreateBuffer glad_vkCreateBuffer = NULL;
+PFN_vkCreateBufferView glad_vkCreateBufferView = NULL;
+PFN_vkCreateCommandPool glad_vkCreateCommandPool = NULL;
+PFN_vkCreateComputePipelines glad_vkCreateComputePipelines = NULL;
+PFN_vkCreateDebugReportCallbackEXT glad_vkCreateDebugReportCallbackEXT = NULL;
+PFN_vkCreateDescriptorPool glad_vkCreateDescriptorPool = NULL;
+PFN_vkCreateDescriptorSetLayout glad_vkCreateDescriptorSetLayout = NULL;
+PFN_vkCreateDescriptorUpdateTemplate glad_vkCreateDescriptorUpdateTemplate = NULL;
+PFN_vkCreateDevice glad_vkCreateDevice = NULL;
+PFN_vkCreateEvent glad_vkCreateEvent = NULL;
+PFN_vkCreateFence glad_vkCreateFence = NULL;
+PFN_vkCreateFramebuffer glad_vkCreateFramebuffer = NULL;
+PFN_vkCreateGraphicsPipelines glad_vkCreateGraphicsPipelines = NULL;
+PFN_vkCreateImage glad_vkCreateImage = NULL;
+PFN_vkCreateImageView glad_vkCreateImageView = NULL;
+PFN_vkCreateInstance glad_vkCreateInstance = NULL;
+PFN_vkCreatePipelineCache glad_vkCreatePipelineCache = NULL;
+PFN_vkCreatePipelineLayout glad_vkCreatePipelineLayout = NULL;
+PFN_vkCreateQueryPool glad_vkCreateQueryPool = NULL;
+PFN_vkCreateRenderPass glad_vkCreateRenderPass = NULL;
+PFN_vkCreateSampler glad_vkCreateSampler = NULL;
+PFN_vkCreateSamplerYcbcrConversion glad_vkCreateSamplerYcbcrConversion = NULL;
+PFN_vkCreateSemaphore glad_vkCreateSemaphore = NULL;
+PFN_vkCreateShaderModule glad_vkCreateShaderModule = NULL;
+PFN_vkCreateSwapchainKHR glad_vkCreateSwapchainKHR = NULL;
+PFN_vkDebugReportMessageEXT glad_vkDebugReportMessageEXT = NULL;
+PFN_vkDestroyBuffer glad_vkDestroyBuffer = NULL;
+PFN_vkDestroyBufferView glad_vkDestroyBufferView = NULL;
+PFN_vkDestroyCommandPool glad_vkDestroyCommandPool = NULL;
+PFN_vkDestroyDebugReportCallbackEXT glad_vkDestroyDebugReportCallbackEXT = NULL;
+PFN_vkDestroyDescriptorPool glad_vkDestroyDescriptorPool = NULL;
+PFN_vkDestroyDescriptorSetLayout glad_vkDestroyDescriptorSetLayout = NULL;
+PFN_vkDestroyDescriptorUpdateTemplate glad_vkDestroyDescriptorUpdateTemplate = NULL;
+PFN_vkDestroyDevice glad_vkDestroyDevice = NULL;
+PFN_vkDestroyEvent glad_vkDestroyEvent = NULL;
+PFN_vkDestroyFence glad_vkDestroyFence = NULL;
+PFN_vkDestroyFramebuffer glad_vkDestroyFramebuffer = NULL;
+PFN_vkDestroyImage glad_vkDestroyImage = NULL;
+PFN_vkDestroyImageView glad_vkDestroyImageView = NULL;
+PFN_vkDestroyInstance glad_vkDestroyInstance = NULL;
+PFN_vkDestroyPipeline glad_vkDestroyPipeline = NULL;
+PFN_vkDestroyPipelineCache glad_vkDestroyPipelineCache = NULL;
+PFN_vkDestroyPipelineLayout glad_vkDestroyPipelineLayout = NULL;
+PFN_vkDestroyQueryPool glad_vkDestroyQueryPool = NULL;
+PFN_vkDestroyRenderPass glad_vkDestroyRenderPass = NULL;
+PFN_vkDestroySampler glad_vkDestroySampler = NULL;
+PFN_vkDestroySamplerYcbcrConversion glad_vkDestroySamplerYcbcrConversion = NULL;
+PFN_vkDestroySemaphore glad_vkDestroySemaphore = NULL;
+PFN_vkDestroyShaderModule glad_vkDestroyShaderModule = NULL;
+PFN_vkDestroySurfaceKHR glad_vkDestroySurfaceKHR = NULL;
+PFN_vkDestroySwapchainKHR glad_vkDestroySwapchainKHR = NULL;
+PFN_vkDeviceWaitIdle glad_vkDeviceWaitIdle = NULL;
+PFN_vkEndCommandBuffer glad_vkEndCommandBuffer = NULL;
+PFN_vkEnumerateDeviceExtensionProperties glad_vkEnumerateDeviceExtensionProperties = NULL;
+PFN_vkEnumerateDeviceLayerProperties glad_vkEnumerateDeviceLayerProperties = NULL;
+PFN_vkEnumerateInstanceExtensionProperties glad_vkEnumerateInstanceExtensionProperties = NULL;
+PFN_vkEnumerateInstanceLayerProperties glad_vkEnumerateInstanceLayerProperties = NULL;
+PFN_vkEnumerateInstanceVersion glad_vkEnumerateInstanceVersion = NULL;
+PFN_vkEnumeratePhysicalDeviceGroups glad_vkEnumeratePhysicalDeviceGroups = NULL;
+PFN_vkEnumeratePhysicalDevices glad_vkEnumeratePhysicalDevices = NULL;
+PFN_vkFlushMappedMemoryRanges glad_vkFlushMappedMemoryRanges = NULL;
+PFN_vkFreeCommandBuffers glad_vkFreeCommandBuffers = NULL;
+PFN_vkFreeDescriptorSets glad_vkFreeDescriptorSets = NULL;
+PFN_vkFreeMemory glad_vkFreeMemory = NULL;
+PFN_vkGetBufferMemoryRequirements glad_vkGetBufferMemoryRequirements = NULL;
+PFN_vkGetBufferMemoryRequirements2 glad_vkGetBufferMemoryRequirements2 = NULL;
+PFN_vkGetDescriptorSetLayoutSupport glad_vkGetDescriptorSetLayoutSupport = NULL;
+PFN_vkGetDeviceGroupPeerMemoryFeatures glad_vkGetDeviceGroupPeerMemoryFeatures = NULL;
+PFN_vkGetDeviceGroupPresentCapabilitiesKHR glad_vkGetDeviceGroupPresentCapabilitiesKHR = NULL;
+PFN_vkGetDeviceGroupSurfacePresentModesKHR glad_vkGetDeviceGroupSurfacePresentModesKHR = NULL;
+PFN_vkGetDeviceMemoryCommitment glad_vkGetDeviceMemoryCommitment = NULL;
+PFN_vkGetDeviceProcAddr glad_vkGetDeviceProcAddr = NULL;
+PFN_vkGetDeviceQueue glad_vkGetDeviceQueue = NULL;
+PFN_vkGetDeviceQueue2 glad_vkGetDeviceQueue2 = NULL;
+PFN_vkGetEventStatus glad_vkGetEventStatus = NULL;
+PFN_vkGetFenceStatus glad_vkGetFenceStatus = NULL;
+PFN_vkGetImageMemoryRequirements glad_vkGetImageMemoryRequirements = NULL;
+PFN_vkGetImageMemoryRequirements2 glad_vkGetImageMemoryRequirements2 = NULL;
+PFN_vkGetImageSparseMemoryRequirements glad_vkGetImageSparseMemoryRequirements = NULL;
+PFN_vkGetImageSparseMemoryRequirements2 glad_vkGetImageSparseMemoryRequirements2 = NULL;
+PFN_vkGetImageSubresourceLayout glad_vkGetImageSubresourceLayout = NULL;
+PFN_vkGetInstanceProcAddr glad_vkGetInstanceProcAddr = NULL;
+PFN_vkGetPhysicalDeviceExternalBufferProperties glad_vkGetPhysicalDeviceExternalBufferProperties = NULL;
+PFN_vkGetPhysicalDeviceExternalFenceProperties glad_vkGetPhysicalDeviceExternalFenceProperties = NULL;
+PFN_vkGetPhysicalDeviceExternalSemaphoreProperties glad_vkGetPhysicalDeviceExternalSemaphoreProperties = NULL;
+PFN_vkGetPhysicalDeviceFeatures glad_vkGetPhysicalDeviceFeatures = NULL;
+PFN_vkGetPhysicalDeviceFeatures2 glad_vkGetPhysicalDeviceFeatures2 = NULL;
+PFN_vkGetPhysicalDeviceFormatProperties glad_vkGetPhysicalDeviceFormatProperties = NULL;
+PFN_vkGetPhysicalDeviceFormatProperties2 glad_vkGetPhysicalDeviceFormatProperties2 = NULL;
+PFN_vkGetPhysicalDeviceImageFormatProperties glad_vkGetPhysicalDeviceImageFormatProperties = NULL;
+PFN_vkGetPhysicalDeviceImageFormatProperties2 glad_vkGetPhysicalDeviceImageFormatProperties2 = NULL;
+PFN_vkGetPhysicalDeviceMemoryProperties glad_vkGetPhysicalDeviceMemoryProperties = NULL;
+PFN_vkGetPhysicalDeviceMemoryProperties2 glad_vkGetPhysicalDeviceMemoryProperties2 = NULL;
+PFN_vkGetPhysicalDevicePresentRectanglesKHR glad_vkGetPhysicalDevicePresentRectanglesKHR = NULL;
+PFN_vkGetPhysicalDeviceProperties glad_vkGetPhysicalDeviceProperties = NULL;
+PFN_vkGetPhysicalDeviceProperties2 glad_vkGetPhysicalDeviceProperties2 = NULL;
+PFN_vkGetPhysicalDeviceQueueFamilyProperties glad_vkGetPhysicalDeviceQueueFamilyProperties = NULL;
+PFN_vkGetPhysicalDeviceQueueFamilyProperties2 glad_vkGetPhysicalDeviceQueueFamilyProperties2 = NULL;
+PFN_vkGetPhysicalDeviceSparseImageFormatProperties glad_vkGetPhysicalDeviceSparseImageFormatProperties = NULL;
+PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 glad_vkGetPhysicalDeviceSparseImageFormatProperties2 = NULL;
+PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR glad_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = NULL;
+PFN_vkGetPhysicalDeviceSurfaceFormatsKHR glad_vkGetPhysicalDeviceSurfaceFormatsKHR = NULL;
+PFN_vkGetPhysicalDeviceSurfacePresentModesKHR glad_vkGetPhysicalDeviceSurfacePresentModesKHR = NULL;
+PFN_vkGetPhysicalDeviceSurfaceSupportKHR glad_vkGetPhysicalDeviceSurfaceSupportKHR = NULL;
+PFN_vkGetPipelineCacheData glad_vkGetPipelineCacheData = NULL;
+PFN_vkGetQueryPoolResults glad_vkGetQueryPoolResults = NULL;
+PFN_vkGetRenderAreaGranularity glad_vkGetRenderAreaGranularity = NULL;
+PFN_vkGetSwapchainImagesKHR glad_vkGetSwapchainImagesKHR = NULL;
+PFN_vkInvalidateMappedMemoryRanges glad_vkInvalidateMappedMemoryRanges = NULL;
+PFN_vkMapMemory glad_vkMapMemory = NULL;
+PFN_vkMergePipelineCaches glad_vkMergePipelineCaches = NULL;
+PFN_vkQueueBindSparse glad_vkQueueBindSparse = NULL;
+PFN_vkQueuePresentKHR glad_vkQueuePresentKHR = NULL;
+PFN_vkQueueSubmit glad_vkQueueSubmit = NULL;
+PFN_vkQueueWaitIdle glad_vkQueueWaitIdle = NULL;
+PFN_vkResetCommandBuffer glad_vkResetCommandBuffer = NULL;
+PFN_vkResetCommandPool glad_vkResetCommandPool = NULL;
+PFN_vkResetDescriptorPool glad_vkResetDescriptorPool = NULL;
+PFN_vkResetEvent glad_vkResetEvent = NULL;
+PFN_vkResetFences glad_vkResetFences = NULL;
+PFN_vkSetEvent glad_vkSetEvent = NULL;
+PFN_vkTrimCommandPool glad_vkTrimCommandPool = NULL;
+PFN_vkUnmapMemory glad_vkUnmapMemory = NULL;
+PFN_vkUpdateDescriptorSetWithTemplate glad_vkUpdateDescriptorSetWithTemplate = NULL;
+PFN_vkUpdateDescriptorSets glad_vkUpdateDescriptorSets = NULL;
+PFN_vkWaitForFences glad_vkWaitForFences = NULL;
+
+
+static void glad_vk_load_VK_VERSION_1_0( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_VK_VERSION_1_0) return;
+ glad_vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers) load(userptr, "vkAllocateCommandBuffers");
+ glad_vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets) load(userptr, "vkAllocateDescriptorSets");
+ glad_vkAllocateMemory = (PFN_vkAllocateMemory) load(userptr, "vkAllocateMemory");
+ glad_vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer) load(userptr, "vkBeginCommandBuffer");
+ glad_vkBindBufferMemory = (PFN_vkBindBufferMemory) load(userptr, "vkBindBufferMemory");
+ glad_vkBindImageMemory = (PFN_vkBindImageMemory) load(userptr, "vkBindImageMemory");
+ glad_vkCmdBeginQuery = (PFN_vkCmdBeginQuery) load(userptr, "vkCmdBeginQuery");
+ glad_vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass) load(userptr, "vkCmdBeginRenderPass");
+ glad_vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets) load(userptr, "vkCmdBindDescriptorSets");
+ glad_vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer) load(userptr, "vkCmdBindIndexBuffer");
+ glad_vkCmdBindPipeline = (PFN_vkCmdBindPipeline) load(userptr, "vkCmdBindPipeline");
+ glad_vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers) load(userptr, "vkCmdBindVertexBuffers");
+ glad_vkCmdBlitImage = (PFN_vkCmdBlitImage) load(userptr, "vkCmdBlitImage");
+ glad_vkCmdClearAttachments = (PFN_vkCmdClearAttachments) load(userptr, "vkCmdClearAttachments");
+ glad_vkCmdClearColorImage = (PFN_vkCmdClearColorImage) load(userptr, "vkCmdClearColorImage");
+ glad_vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage) load(userptr, "vkCmdClearDepthStencilImage");
+ glad_vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer) load(userptr, "vkCmdCopyBuffer");
+ glad_vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage) load(userptr, "vkCmdCopyBufferToImage");
+ glad_vkCmdCopyImage = (PFN_vkCmdCopyImage) load(userptr, "vkCmdCopyImage");
+ glad_vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer) load(userptr, "vkCmdCopyImageToBuffer");
+ glad_vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults) load(userptr, "vkCmdCopyQueryPoolResults");
+ glad_vkCmdDispatch = (PFN_vkCmdDispatch) load(userptr, "vkCmdDispatch");
+ glad_vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect) load(userptr, "vkCmdDispatchIndirect");
+ glad_vkCmdDraw = (PFN_vkCmdDraw) load(userptr, "vkCmdDraw");
+ glad_vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed) load(userptr, "vkCmdDrawIndexed");
+ glad_vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect) load(userptr, "vkCmdDrawIndexedIndirect");
+ glad_vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect) load(userptr, "vkCmdDrawIndirect");
+ glad_vkCmdEndQuery = (PFN_vkCmdEndQuery) load(userptr, "vkCmdEndQuery");
+ glad_vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass) load(userptr, "vkCmdEndRenderPass");
+ glad_vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands) load(userptr, "vkCmdExecuteCommands");
+ glad_vkCmdFillBuffer = (PFN_vkCmdFillBuffer) load(userptr, "vkCmdFillBuffer");
+ glad_vkCmdNextSubpass = (PFN_vkCmdNextSubpass) load(userptr, "vkCmdNextSubpass");
+ glad_vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier) load(userptr, "vkCmdPipelineBarrier");
+ glad_vkCmdPushConstants = (PFN_vkCmdPushConstants) load(userptr, "vkCmdPushConstants");
+ glad_vkCmdResetEvent = (PFN_vkCmdResetEvent) load(userptr, "vkCmdResetEvent");
+ glad_vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool) load(userptr, "vkCmdResetQueryPool");
+ glad_vkCmdResolveImage = (PFN_vkCmdResolveImage) load(userptr, "vkCmdResolveImage");
+ glad_vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants) load(userptr, "vkCmdSetBlendConstants");
+ glad_vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias) load(userptr, "vkCmdSetDepthBias");
+ glad_vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds) load(userptr, "vkCmdSetDepthBounds");
+ glad_vkCmdSetEvent = (PFN_vkCmdSetEvent) load(userptr, "vkCmdSetEvent");
+ glad_vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth) load(userptr, "vkCmdSetLineWidth");
+ glad_vkCmdSetScissor = (PFN_vkCmdSetScissor) load(userptr, "vkCmdSetScissor");
+ glad_vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask) load(userptr, "vkCmdSetStencilCompareMask");
+ glad_vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference) load(userptr, "vkCmdSetStencilReference");
+ glad_vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask) load(userptr, "vkCmdSetStencilWriteMask");
+ glad_vkCmdSetViewport = (PFN_vkCmdSetViewport) load(userptr, "vkCmdSetViewport");
+ glad_vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer) load(userptr, "vkCmdUpdateBuffer");
+ glad_vkCmdWaitEvents = (PFN_vkCmdWaitEvents) load(userptr, "vkCmdWaitEvents");
+ glad_vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp) load(userptr, "vkCmdWriteTimestamp");
+ glad_vkCreateBuffer = (PFN_vkCreateBuffer) load(userptr, "vkCreateBuffer");
+ glad_vkCreateBufferView = (PFN_vkCreateBufferView) load(userptr, "vkCreateBufferView");
+ glad_vkCreateCommandPool = (PFN_vkCreateCommandPool) load(userptr, "vkCreateCommandPool");
+ glad_vkCreateComputePipelines = (PFN_vkCreateComputePipelines) load(userptr, "vkCreateComputePipelines");
+ glad_vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool) load(userptr, "vkCreateDescriptorPool");
+ glad_vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout) load(userptr, "vkCreateDescriptorSetLayout");
+ glad_vkCreateDevice = (PFN_vkCreateDevice) load(userptr, "vkCreateDevice");
+ glad_vkCreateEvent = (PFN_vkCreateEvent) load(userptr, "vkCreateEvent");
+ glad_vkCreateFence = (PFN_vkCreateFence) load(userptr, "vkCreateFence");
+ glad_vkCreateFramebuffer = (PFN_vkCreateFramebuffer) load(userptr, "vkCreateFramebuffer");
+ glad_vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines) load(userptr, "vkCreateGraphicsPipelines");
+ glad_vkCreateImage = (PFN_vkCreateImage) load(userptr, "vkCreateImage");
+ glad_vkCreateImageView = (PFN_vkCreateImageView) load(userptr, "vkCreateImageView");
+ glad_vkCreateInstance = (PFN_vkCreateInstance) load(userptr, "vkCreateInstance");
+ glad_vkCreatePipelineCache = (PFN_vkCreatePipelineCache) load(userptr, "vkCreatePipelineCache");
+ glad_vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout) load(userptr, "vkCreatePipelineLayout");
+ glad_vkCreateQueryPool = (PFN_vkCreateQueryPool) load(userptr, "vkCreateQueryPool");
+ glad_vkCreateRenderPass = (PFN_vkCreateRenderPass) load(userptr, "vkCreateRenderPass");
+ glad_vkCreateSampler = (PFN_vkCreateSampler) load(userptr, "vkCreateSampler");
+ glad_vkCreateSemaphore = (PFN_vkCreateSemaphore) load(userptr, "vkCreateSemaphore");
+ glad_vkCreateShaderModule = (PFN_vkCreateShaderModule) load(userptr, "vkCreateShaderModule");
+ glad_vkDestroyBuffer = (PFN_vkDestroyBuffer) load(userptr, "vkDestroyBuffer");
+ glad_vkDestroyBufferView = (PFN_vkDestroyBufferView) load(userptr, "vkDestroyBufferView");
+ glad_vkDestroyCommandPool = (PFN_vkDestroyCommandPool) load(userptr, "vkDestroyCommandPool");
+ glad_vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool) load(userptr, "vkDestroyDescriptorPool");
+ glad_vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout) load(userptr, "vkDestroyDescriptorSetLayout");
+ glad_vkDestroyDevice = (PFN_vkDestroyDevice) load(userptr, "vkDestroyDevice");
+ glad_vkDestroyEvent = (PFN_vkDestroyEvent) load(userptr, "vkDestroyEvent");
+ glad_vkDestroyFence = (PFN_vkDestroyFence) load(userptr, "vkDestroyFence");
+ glad_vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer) load(userptr, "vkDestroyFramebuffer");
+ glad_vkDestroyImage = (PFN_vkDestroyImage) load(userptr, "vkDestroyImage");
+ glad_vkDestroyImageView = (PFN_vkDestroyImageView) load(userptr, "vkDestroyImageView");
+ glad_vkDestroyInstance = (PFN_vkDestroyInstance) load(userptr, "vkDestroyInstance");
+ glad_vkDestroyPipeline = (PFN_vkDestroyPipeline) load(userptr, "vkDestroyPipeline");
+ glad_vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache) load(userptr, "vkDestroyPipelineCache");
+ glad_vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout) load(userptr, "vkDestroyPipelineLayout");
+ glad_vkDestroyQueryPool = (PFN_vkDestroyQueryPool) load(userptr, "vkDestroyQueryPool");
+ glad_vkDestroyRenderPass = (PFN_vkDestroyRenderPass) load(userptr, "vkDestroyRenderPass");
+ glad_vkDestroySampler = (PFN_vkDestroySampler) load(userptr, "vkDestroySampler");
+ glad_vkDestroySemaphore = (PFN_vkDestroySemaphore) load(userptr, "vkDestroySemaphore");
+ glad_vkDestroyShaderModule = (PFN_vkDestroyShaderModule) load(userptr, "vkDestroyShaderModule");
+ glad_vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle) load(userptr, "vkDeviceWaitIdle");
+ glad_vkEndCommandBuffer = (PFN_vkEndCommandBuffer) load(userptr, "vkEndCommandBuffer");
+ glad_vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties) load(userptr, "vkEnumerateDeviceExtensionProperties");
+ glad_vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties) load(userptr, "vkEnumerateDeviceLayerProperties");
+ glad_vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties) load(userptr, "vkEnumerateInstanceExtensionProperties");
+ glad_vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties) load(userptr, "vkEnumerateInstanceLayerProperties");
+ glad_vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices) load(userptr, "vkEnumeratePhysicalDevices");
+ glad_vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges) load(userptr, "vkFlushMappedMemoryRanges");
+ glad_vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers) load(userptr, "vkFreeCommandBuffers");
+ glad_vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets) load(userptr, "vkFreeDescriptorSets");
+ glad_vkFreeMemory = (PFN_vkFreeMemory) load(userptr, "vkFreeMemory");
+ glad_vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements) load(userptr, "vkGetBufferMemoryRequirements");
+ glad_vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment) load(userptr, "vkGetDeviceMemoryCommitment");
+ glad_vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr) load(userptr, "vkGetDeviceProcAddr");
+ glad_vkGetDeviceQueue = (PFN_vkGetDeviceQueue) load(userptr, "vkGetDeviceQueue");
+ glad_vkGetEventStatus = (PFN_vkGetEventStatus) load(userptr, "vkGetEventStatus");
+ glad_vkGetFenceStatus = (PFN_vkGetFenceStatus) load(userptr, "vkGetFenceStatus");
+ glad_vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements) load(userptr, "vkGetImageMemoryRequirements");
+ glad_vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements) load(userptr, "vkGetImageSparseMemoryRequirements");
+ glad_vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout) load(userptr, "vkGetImageSubresourceLayout");
+ glad_vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) load(userptr, "vkGetInstanceProcAddr");
+ glad_vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures) load(userptr, "vkGetPhysicalDeviceFeatures");
+ glad_vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties) load(userptr, "vkGetPhysicalDeviceFormatProperties");
+ glad_vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties) load(userptr, "vkGetPhysicalDeviceImageFormatProperties");
+ glad_vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties) load(userptr, "vkGetPhysicalDeviceMemoryProperties");
+ glad_vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties) load(userptr, "vkGetPhysicalDeviceProperties");
+ glad_vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties) load(userptr, "vkGetPhysicalDeviceQueueFamilyProperties");
+ glad_vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties) load(userptr, "vkGetPhysicalDeviceSparseImageFormatProperties");
+ glad_vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData) load(userptr, "vkGetPipelineCacheData");
+ glad_vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults) load(userptr, "vkGetQueryPoolResults");
+ glad_vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity) load(userptr, "vkGetRenderAreaGranularity");
+ glad_vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges) load(userptr, "vkInvalidateMappedMemoryRanges");
+ glad_vkMapMemory = (PFN_vkMapMemory) load(userptr, "vkMapMemory");
+ glad_vkMergePipelineCaches = (PFN_vkMergePipelineCaches) load(userptr, "vkMergePipelineCaches");
+ glad_vkQueueBindSparse = (PFN_vkQueueBindSparse) load(userptr, "vkQueueBindSparse");
+ glad_vkQueueSubmit = (PFN_vkQueueSubmit) load(userptr, "vkQueueSubmit");
+ glad_vkQueueWaitIdle = (PFN_vkQueueWaitIdle) load(userptr, "vkQueueWaitIdle");
+ glad_vkResetCommandBuffer = (PFN_vkResetCommandBuffer) load(userptr, "vkResetCommandBuffer");
+ glad_vkResetCommandPool = (PFN_vkResetCommandPool) load(userptr, "vkResetCommandPool");
+ glad_vkResetDescriptorPool = (PFN_vkResetDescriptorPool) load(userptr, "vkResetDescriptorPool");
+ glad_vkResetEvent = (PFN_vkResetEvent) load(userptr, "vkResetEvent");
+ glad_vkResetFences = (PFN_vkResetFences) load(userptr, "vkResetFences");
+ glad_vkSetEvent = (PFN_vkSetEvent) load(userptr, "vkSetEvent");
+ glad_vkUnmapMemory = (PFN_vkUnmapMemory) load(userptr, "vkUnmapMemory");
+ glad_vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets) load(userptr, "vkUpdateDescriptorSets");
+ glad_vkWaitForFences = (PFN_vkWaitForFences) load(userptr, "vkWaitForFences");
+}
+static void glad_vk_load_VK_VERSION_1_1( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_VK_VERSION_1_1) return;
+ glad_vkBindBufferMemory2 = (PFN_vkBindBufferMemory2) load(userptr, "vkBindBufferMemory2");
+ glad_vkBindImageMemory2 = (PFN_vkBindImageMemory2) load(userptr, "vkBindImageMemory2");
+ glad_vkCmdDispatchBase = (PFN_vkCmdDispatchBase) load(userptr, "vkCmdDispatchBase");
+ glad_vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask) load(userptr, "vkCmdSetDeviceMask");
+ glad_vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate) load(userptr, "vkCreateDescriptorUpdateTemplate");
+ glad_vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion) load(userptr, "vkCreateSamplerYcbcrConversion");
+ glad_vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate) load(userptr, "vkDestroyDescriptorUpdateTemplate");
+ glad_vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion) load(userptr, "vkDestroySamplerYcbcrConversion");
+ glad_vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion) load(userptr, "vkEnumerateInstanceVersion");
+ glad_vkEnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups) load(userptr, "vkEnumeratePhysicalDeviceGroups");
+ glad_vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2) load(userptr, "vkGetBufferMemoryRequirements2");
+ glad_vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport) load(userptr, "vkGetDescriptorSetLayoutSupport");
+ glad_vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures) load(userptr, "vkGetDeviceGroupPeerMemoryFeatures");
+ glad_vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2) load(userptr, "vkGetDeviceQueue2");
+ glad_vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2) load(userptr, "vkGetImageMemoryRequirements2");
+ glad_vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2) load(userptr, "vkGetImageSparseMemoryRequirements2");
+ glad_vkGetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties) load(userptr, "vkGetPhysicalDeviceExternalBufferProperties");
+ glad_vkGetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties) load(userptr, "vkGetPhysicalDeviceExternalFenceProperties");
+ glad_vkGetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties) load(userptr, "vkGetPhysicalDeviceExternalSemaphoreProperties");
+ glad_vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2) load(userptr, "vkGetPhysicalDeviceFeatures2");
+ glad_vkGetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2) load(userptr, "vkGetPhysicalDeviceFormatProperties2");
+ glad_vkGetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2) load(userptr, "vkGetPhysicalDeviceImageFormatProperties2");
+ glad_vkGetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2) load(userptr, "vkGetPhysicalDeviceMemoryProperties2");
+ glad_vkGetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2) load(userptr, "vkGetPhysicalDeviceProperties2");
+ glad_vkGetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2) load(userptr, "vkGetPhysicalDeviceQueueFamilyProperties2");
+ glad_vkGetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2) load(userptr, "vkGetPhysicalDeviceSparseImageFormatProperties2");
+ glad_vkTrimCommandPool = (PFN_vkTrimCommandPool) load(userptr, "vkTrimCommandPool");
+ glad_vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate) load(userptr, "vkUpdateDescriptorSetWithTemplate");
+}
+static void glad_vk_load_VK_EXT_debug_report( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_VK_EXT_debug_report) return;
+ glad_vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) load(userptr, "vkCreateDebugReportCallbackEXT");
+ glad_vkDebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) load(userptr, "vkDebugReportMessageEXT");
+ glad_vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) load(userptr, "vkDestroyDebugReportCallbackEXT");
+}
+static void glad_vk_load_VK_KHR_surface( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_VK_KHR_surface) return;
+ glad_vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR) load(userptr, "vkDestroySurfaceKHR");
+ glad_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) load(userptr, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+ glad_vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) load(userptr, "vkGetPhysicalDeviceSurfaceFormatsKHR");
+ glad_vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) load(userptr, "vkGetPhysicalDeviceSurfacePresentModesKHR");
+ glad_vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) load(userptr, "vkGetPhysicalDeviceSurfaceSupportKHR");
+}
+static void glad_vk_load_VK_KHR_swapchain( GLADuserptrloadfunc load, void* userptr) {
+ if(!GLAD_VK_KHR_swapchain) return;
+ glad_vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR) load(userptr, "vkAcquireNextImage2KHR");
+ glad_vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) load(userptr, "vkAcquireNextImageKHR");
+ glad_vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) load(userptr, "vkCreateSwapchainKHR");
+ glad_vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) load(userptr, "vkDestroySwapchainKHR");
+ glad_vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR) load(userptr, "vkGetDeviceGroupPresentCapabilitiesKHR");
+ glad_vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR) load(userptr, "vkGetDeviceGroupSurfacePresentModesKHR");
+ glad_vkGetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR) load(userptr, "vkGetPhysicalDevicePresentRectanglesKHR");
+ glad_vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) load(userptr, "vkGetSwapchainImagesKHR");
+ glad_vkQueuePresentKHR = (PFN_vkQueuePresentKHR) load(userptr, "vkQueuePresentKHR");
+}
+
+
+
+static int glad_vk_get_extensions( VkPhysicalDevice physical_device, uint32_t *out_extension_count, char ***out_extensions) {
+ uint32_t i;
+ uint32_t instance_extension_count = 0;
+ uint32_t device_extension_count = 0;
+ uint32_t max_extension_count = 0;
+ uint32_t total_extension_count = 0;
+ char **extensions = NULL;
+ VkExtensionProperties *ext_properties = NULL;
+ VkResult result;
+
+ if (glad_vkEnumerateInstanceExtensionProperties == NULL || (physical_device != NULL && glad_vkEnumerateDeviceExtensionProperties == NULL)) {
+ return 0;
+ }
+
+ result = glad_vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, NULL);
+ if (result != VK_SUCCESS) {
+ return 0;
+ }
+
+ if (physical_device != NULL) {
+ result = glad_vkEnumerateDeviceExtensionProperties(physical_device, NULL, &device_extension_count, NULL);
+ if (result != VK_SUCCESS) {
+ return 0;
+ }
+ }
+
+ total_extension_count = instance_extension_count + device_extension_count;
+ if (total_extension_count <= 0) {
+ return 0;
+ }
+
+ max_extension_count = instance_extension_count > device_extension_count
+ ? instance_extension_count : device_extension_count;
+
+ ext_properties = (VkExtensionProperties*) malloc(max_extension_count * sizeof(VkExtensionProperties));
+ if (ext_properties == NULL) {
+ goto glad_vk_get_extensions_error;
+ }
+
+ result = glad_vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, ext_properties);
+ if (result != VK_SUCCESS) {
+ goto glad_vk_get_extensions_error;
+ }
+
+ extensions = (char**) calloc(total_extension_count, sizeof(char*));
+ if (extensions == NULL) {
+ goto glad_vk_get_extensions_error;
+ }
+
+ for (i = 0; i < instance_extension_count; ++i) {
+ VkExtensionProperties ext = ext_properties[i];
+
+ size_t extension_name_length = strlen(ext.extensionName) + 1;
+ extensions[i] = (char*) malloc(extension_name_length * sizeof(char));
+ if (extensions[i] == NULL) {
+ goto glad_vk_get_extensions_error;
+ }
+ memcpy(extensions[i], ext.extensionName, extension_name_length * sizeof(char));
+ }
+
+ if (physical_device != NULL) {
+ result = glad_vkEnumerateDeviceExtensionProperties(physical_device, NULL, &device_extension_count, ext_properties);
+ if (result != VK_SUCCESS) {
+ goto glad_vk_get_extensions_error;
+ }
+
+ for (i = 0; i < device_extension_count; ++i) {
+ VkExtensionProperties ext = ext_properties[i];
+
+ size_t extension_name_length = strlen(ext.extensionName) + 1;
+ extensions[instance_extension_count + i] = (char*) malloc(extension_name_length * sizeof(char));
+ if (extensions[instance_extension_count + i] == NULL) {
+ goto glad_vk_get_extensions_error;
+ }
+ memcpy(extensions[instance_extension_count + i], ext.extensionName, extension_name_length * sizeof(char));
+ }
+ }
+
+ free((void*) ext_properties);
+
+ *out_extension_count = total_extension_count;
+ *out_extensions = extensions;
+
+ return 1;
+
+glad_vk_get_extensions_error:
+ free((void*) ext_properties);
+ if (extensions != NULL) {
+ for (i = 0; i < total_extension_count; ++i) {
+ free((void*) extensions[i]);
+ }
+ free(extensions);
+ }
+ return 0;
+}
+
+static void glad_vk_free_extensions(uint32_t extension_count, char **extensions) {
+ uint32_t i;
+
+ for(i = 0; i < extension_count ; ++i) {
+ free((void*) (extensions[i]));
+ }
+
+ free((void*) extensions);
+}
+
+static int glad_vk_has_extension(const char *name, uint32_t extension_count, char **extensions) {
+ uint32_t i;
+
+ for (i = 0; i < extension_count; ++i) {
+ if(extensions[i] != NULL && strcmp(name, extensions[i]) == 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static GLADapiproc glad_vk_get_proc_from_userptr(void *userptr, const char* name) {
+ return (GLAD_GNUC_EXTENSION (GLADapiproc (*)(const char *name)) userptr)(name);
+}
+
+static int glad_vk_find_extensions_vulkan( VkPhysicalDevice physical_device) {
+ uint32_t extension_count = 0;
+ char **extensions = NULL;
+ if (!glad_vk_get_extensions(physical_device, &extension_count, &extensions)) return 0;
+
+ GLAD_VK_EXT_debug_report = glad_vk_has_extension("VK_EXT_debug_report", extension_count, extensions);
+ GLAD_VK_KHR_surface = glad_vk_has_extension("VK_KHR_surface", extension_count, extensions);
+ GLAD_VK_KHR_swapchain = glad_vk_has_extension("VK_KHR_swapchain", extension_count, extensions);
+
+ (void) glad_vk_has_extension;
+
+ glad_vk_free_extensions(extension_count, extensions);
+
+ return 1;
+}
+
+static int glad_vk_find_core_vulkan( VkPhysicalDevice physical_device) {
+ int major = 1;
+ int minor = 0;
+
+#ifdef VK_VERSION_1_1
+ if (glad_vkEnumerateInstanceVersion != NULL) {
+ uint32_t version;
+ VkResult result;
+
+ result = glad_vkEnumerateInstanceVersion(&version);
+ if (result == VK_SUCCESS) {
+ major = (int) VK_VERSION_MAJOR(version);
+ minor = (int) VK_VERSION_MINOR(version);
+ }
+ }
+#endif
+
+ if (physical_device != NULL && glad_vkGetPhysicalDeviceProperties != NULL) {
+ VkPhysicalDeviceProperties properties;
+ glad_vkGetPhysicalDeviceProperties(physical_device, &properties);
+
+ major = (int) VK_VERSION_MAJOR(properties.apiVersion);
+ minor = (int) VK_VERSION_MINOR(properties.apiVersion);
+ }
+
+ GLAD_VK_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1;
+ GLAD_VK_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1;
+
+ return GLAD_MAKE_VERSION(major, minor);
+}
+
+int gladLoadVulkanUserPtr( VkPhysicalDevice physical_device, GLADuserptrloadfunc load, void *userptr) {
+ int version;
+
+#ifdef VK_VERSION_1_1
+ glad_vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion) load(userptr, "vkEnumerateInstanceVersion");
+#endif
+ version = glad_vk_find_core_vulkan( physical_device);
+ if (!version) {
+ return 0;
+ }
+
+ glad_vk_load_VK_VERSION_1_0(load, userptr);
+ glad_vk_load_VK_VERSION_1_1(load, userptr);
+
+ if (!glad_vk_find_extensions_vulkan( physical_device)) return 0;
+ glad_vk_load_VK_EXT_debug_report(load, userptr);
+ glad_vk_load_VK_KHR_surface(load, userptr);
+ glad_vk_load_VK_KHR_swapchain(load, userptr);
+
+
+ return version;
+}
+
+
+int gladLoadVulkan( VkPhysicalDevice physical_device, GLADloadfunc load) {
+ return gladLoadVulkanUserPtr( physical_device, glad_vk_get_proc_from_userptr, GLAD_GNUC_EXTENSION (void*) load);
+}
+
+
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLAD_VULKAN_IMPLEMENTATION */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/linmath.h b/chromium/third_party/dawn/third_party/glfw/deps/linmath.h
new file mode 100644
index 00000000000..5c802656538
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/linmath.h
@@ -0,0 +1,606 @@
+#ifndef LINMATH_H
+#define LINMATH_H
+
+#include <string.h>
+#include <math.h>
+#include <string.h>
+
+/* 2021-03-21 Camilla Löwy <elmindreda@elmindreda.org>
+ * - Replaced double constants with float equivalents
+ */
+
+#ifdef LINMATH_NO_INLINE
+#define LINMATH_H_FUNC static
+#else
+#define LINMATH_H_FUNC static inline
+#endif
+
+#define LINMATH_H_DEFINE_VEC(n) \
+typedef float vec##n[n]; \
+LINMATH_H_FUNC void vec##n##_add(vec##n r, vec##n const a, vec##n const b) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = a[i] + b[i]; \
+} \
+LINMATH_H_FUNC void vec##n##_sub(vec##n r, vec##n const a, vec##n const b) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = a[i] - b[i]; \
+} \
+LINMATH_H_FUNC void vec##n##_scale(vec##n r, vec##n const v, float const s) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = v[i] * s; \
+} \
+LINMATH_H_FUNC float vec##n##_mul_inner(vec##n const a, vec##n const b) \
+{ \
+ float p = 0.f; \
+ int i; \
+ for(i=0; i<n; ++i) \
+ p += b[i]*a[i]; \
+ return p; \
+} \
+LINMATH_H_FUNC float vec##n##_len(vec##n const v) \
+{ \
+ return sqrtf(vec##n##_mul_inner(v,v)); \
+} \
+LINMATH_H_FUNC void vec##n##_norm(vec##n r, vec##n const v) \
+{ \
+ float k = 1.f / vec##n##_len(v); \
+ vec##n##_scale(r, v, k); \
+} \
+LINMATH_H_FUNC void vec##n##_min(vec##n r, vec##n const a, vec##n const b) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = a[i]<b[i] ? a[i] : b[i]; \
+} \
+LINMATH_H_FUNC void vec##n##_max(vec##n r, vec##n const a, vec##n const b) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = a[i]>b[i] ? a[i] : b[i]; \
+} \
+LINMATH_H_FUNC void vec##n##_dup(vec##n r, vec##n const src) \
+{ \
+ int i; \
+ for(i=0; i<n; ++i) \
+ r[i] = src[i]; \
+}
+
+LINMATH_H_DEFINE_VEC(2)
+LINMATH_H_DEFINE_VEC(3)
+LINMATH_H_DEFINE_VEC(4)
+
+LINMATH_H_FUNC void vec3_mul_cross(vec3 r, vec3 const a, vec3 const b)
+{
+ r[0] = a[1]*b[2] - a[2]*b[1];
+ r[1] = a[2]*b[0] - a[0]*b[2];
+ r[2] = a[0]*b[1] - a[1]*b[0];
+}
+
+LINMATH_H_FUNC void vec3_reflect(vec3 r, vec3 const v, vec3 const n)
+{
+ float p = 2.f * vec3_mul_inner(v, n);
+ int i;
+ for(i=0;i<3;++i)
+ r[i] = v[i] - p*n[i];
+}
+
+LINMATH_H_FUNC void vec4_mul_cross(vec4 r, vec4 const a, vec4 const b)
+{
+ r[0] = a[1]*b[2] - a[2]*b[1];
+ r[1] = a[2]*b[0] - a[0]*b[2];
+ r[2] = a[0]*b[1] - a[1]*b[0];
+ r[3] = 1.f;
+}
+
+LINMATH_H_FUNC void vec4_reflect(vec4 r, vec4 const v, vec4 const n)
+{
+ float p = 2.f*vec4_mul_inner(v, n);
+ int i;
+ for(i=0;i<4;++i)
+ r[i] = v[i] - p*n[i];
+}
+
+typedef vec4 mat4x4[4];
+LINMATH_H_FUNC void mat4x4_identity(mat4x4 M)
+{
+ int i, j;
+ for(i=0; i<4; ++i)
+ for(j=0; j<4; ++j)
+ M[i][j] = i==j ? 1.f : 0.f;
+}
+LINMATH_H_FUNC void mat4x4_dup(mat4x4 M, mat4x4 const N)
+{
+ int i;
+ for(i=0; i<4; ++i)
+ vec4_dup(M[i], N[i]);
+}
+LINMATH_H_FUNC void mat4x4_row(vec4 r, mat4x4 const M, int i)
+{
+ int k;
+ for(k=0; k<4; ++k)
+ r[k] = M[k][i];
+}
+LINMATH_H_FUNC void mat4x4_col(vec4 r, mat4x4 const M, int i)
+{
+ int k;
+ for(k=0; k<4; ++k)
+ r[k] = M[i][k];
+}
+LINMATH_H_FUNC void mat4x4_transpose(mat4x4 M, mat4x4 const N)
+{
+ // Note: if M and N are the same, the user has to
+ // explicitly make a copy of M and set it to N.
+ int i, j;
+ for(j=0; j<4; ++j)
+ for(i=0; i<4; ++i)
+ M[i][j] = N[j][i];
+}
+LINMATH_H_FUNC void mat4x4_add(mat4x4 M, mat4x4 const a, mat4x4 const b)
+{
+ int i;
+ for(i=0; i<4; ++i)
+ vec4_add(M[i], a[i], b[i]);
+}
+LINMATH_H_FUNC void mat4x4_sub(mat4x4 M, mat4x4 const a, mat4x4 const b)
+{
+ int i;
+ for(i=0; i<4; ++i)
+ vec4_sub(M[i], a[i], b[i]);
+}
+LINMATH_H_FUNC void mat4x4_scale(mat4x4 M, mat4x4 const a, float k)
+{
+ int i;
+ for(i=0; i<4; ++i)
+ vec4_scale(M[i], a[i], k);
+}
+LINMATH_H_FUNC void mat4x4_scale_aniso(mat4x4 M, mat4x4 const a, float x, float y, float z)
+{
+ vec4_scale(M[0], a[0], x);
+ vec4_scale(M[1], a[1], y);
+ vec4_scale(M[2], a[2], z);
+ vec4_dup(M[3], a[3]);
+}
+LINMATH_H_FUNC void mat4x4_mul(mat4x4 M, mat4x4 const a, mat4x4 const b)
+{
+ mat4x4 temp;
+ int k, r, c;
+ for(c=0; c<4; ++c) for(r=0; r<4; ++r) {
+ temp[c][r] = 0.f;
+ for(k=0; k<4; ++k)
+ temp[c][r] += a[k][r] * b[c][k];
+ }
+ mat4x4_dup(M, temp);
+}
+LINMATH_H_FUNC void mat4x4_mul_vec4(vec4 r, mat4x4 const M, vec4 const v)
+{
+ int i, j;
+ for(j=0; j<4; ++j) {
+ r[j] = 0.f;
+ for(i=0; i<4; ++i)
+ r[j] += M[i][j] * v[i];
+ }
+}
+LINMATH_H_FUNC void mat4x4_translate(mat4x4 T, float x, float y, float z)
+{
+ mat4x4_identity(T);
+ T[3][0] = x;
+ T[3][1] = y;
+ T[3][2] = z;
+}
+LINMATH_H_FUNC void mat4x4_translate_in_place(mat4x4 M, float x, float y, float z)
+{
+ vec4 t = {x, y, z, 0};
+ vec4 r;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ mat4x4_row(r, M, i);
+ M[3][i] += vec4_mul_inner(r, t);
+ }
+}
+LINMATH_H_FUNC void mat4x4_from_vec3_mul_outer(mat4x4 M, vec3 const a, vec3 const b)
+{
+ int i, j;
+ for(i=0; i<4; ++i) for(j=0; j<4; ++j)
+ M[i][j] = i<3 && j<3 ? a[i] * b[j] : 0.f;
+}
+LINMATH_H_FUNC void mat4x4_rotate(mat4x4 R, mat4x4 const M, float x, float y, float z, float angle)
+{
+ float s = sinf(angle);
+ float c = cosf(angle);
+ vec3 u = {x, y, z};
+
+ if(vec3_len(u) > 1e-4) {
+ vec3_norm(u, u);
+ mat4x4 T;
+ mat4x4_from_vec3_mul_outer(T, u, u);
+
+ mat4x4 S = {
+ { 0, u[2], -u[1], 0},
+ {-u[2], 0, u[0], 0},
+ { u[1], -u[0], 0, 0},
+ { 0, 0, 0, 0}
+ };
+ mat4x4_scale(S, S, s);
+
+ mat4x4 C;
+ mat4x4_identity(C);
+ mat4x4_sub(C, C, T);
+
+ mat4x4_scale(C, C, c);
+
+ mat4x4_add(T, T, C);
+ mat4x4_add(T, T, S);
+
+ T[3][3] = 1.f;
+ mat4x4_mul(R, M, T);
+ } else {
+ mat4x4_dup(R, M);
+ }
+}
+LINMATH_H_FUNC void mat4x4_rotate_X(mat4x4 Q, mat4x4 const M, float angle)
+{
+ float s = sinf(angle);
+ float c = cosf(angle);
+ mat4x4 R = {
+ {1.f, 0.f, 0.f, 0.f},
+ {0.f, c, s, 0.f},
+ {0.f, -s, c, 0.f},
+ {0.f, 0.f, 0.f, 1.f}
+ };
+ mat4x4_mul(Q, M, R);
+}
+LINMATH_H_FUNC void mat4x4_rotate_Y(mat4x4 Q, mat4x4 const M, float angle)
+{
+ float s = sinf(angle);
+ float c = cosf(angle);
+ mat4x4 R = {
+ { c, 0.f, -s, 0.f},
+ { 0.f, 1.f, 0.f, 0.f},
+ { s, 0.f, c, 0.f},
+ { 0.f, 0.f, 0.f, 1.f}
+ };
+ mat4x4_mul(Q, M, R);
+}
+LINMATH_H_FUNC void mat4x4_rotate_Z(mat4x4 Q, mat4x4 const M, float angle)
+{
+ float s = sinf(angle);
+ float c = cosf(angle);
+ mat4x4 R = {
+ { c, s, 0.f, 0.f},
+ { -s, c, 0.f, 0.f},
+ { 0.f, 0.f, 1.f, 0.f},
+ { 0.f, 0.f, 0.f, 1.f}
+ };
+ mat4x4_mul(Q, M, R);
+}
+LINMATH_H_FUNC void mat4x4_invert(mat4x4 T, mat4x4 const M)
+{
+ float s[6];
+ float c[6];
+ s[0] = M[0][0]*M[1][1] - M[1][0]*M[0][1];
+ s[1] = M[0][0]*M[1][2] - M[1][0]*M[0][2];
+ s[2] = M[0][0]*M[1][3] - M[1][0]*M[0][3];
+ s[3] = M[0][1]*M[1][2] - M[1][1]*M[0][2];
+ s[4] = M[0][1]*M[1][3] - M[1][1]*M[0][3];
+ s[5] = M[0][2]*M[1][3] - M[1][2]*M[0][3];
+
+ c[0] = M[2][0]*M[3][1] - M[3][0]*M[2][1];
+ c[1] = M[2][0]*M[3][2] - M[3][0]*M[2][2];
+ c[2] = M[2][0]*M[3][3] - M[3][0]*M[2][3];
+ c[3] = M[2][1]*M[3][2] - M[3][1]*M[2][2];
+ c[4] = M[2][1]*M[3][3] - M[3][1]*M[2][3];
+ c[5] = M[2][2]*M[3][3] - M[3][2]*M[2][3];
+
+ /* Assumes it is invertible */
+ float idet = 1.0f/( s[0]*c[5]-s[1]*c[4]+s[2]*c[3]+s[3]*c[2]-s[4]*c[1]+s[5]*c[0] );
+
+ T[0][0] = ( M[1][1] * c[5] - M[1][2] * c[4] + M[1][3] * c[3]) * idet;
+ T[0][1] = (-M[0][1] * c[5] + M[0][2] * c[4] - M[0][3] * c[3]) * idet;
+ T[0][2] = ( M[3][1] * s[5] - M[3][2] * s[4] + M[3][3] * s[3]) * idet;
+ T[0][3] = (-M[2][1] * s[5] + M[2][2] * s[4] - M[2][3] * s[3]) * idet;
+
+ T[1][0] = (-M[1][0] * c[5] + M[1][2] * c[2] - M[1][3] * c[1]) * idet;
+ T[1][1] = ( M[0][0] * c[5] - M[0][2] * c[2] + M[0][3] * c[1]) * idet;
+ T[1][2] = (-M[3][0] * s[5] + M[3][2] * s[2] - M[3][3] * s[1]) * idet;
+ T[1][3] = ( M[2][0] * s[5] - M[2][2] * s[2] + M[2][3] * s[1]) * idet;
+
+ T[2][0] = ( M[1][0] * c[4] - M[1][1] * c[2] + M[1][3] * c[0]) * idet;
+ T[2][1] = (-M[0][0] * c[4] + M[0][1] * c[2] - M[0][3] * c[0]) * idet;
+ T[2][2] = ( M[3][0] * s[4] - M[3][1] * s[2] + M[3][3] * s[0]) * idet;
+ T[2][3] = (-M[2][0] * s[4] + M[2][1] * s[2] - M[2][3] * s[0]) * idet;
+
+ T[3][0] = (-M[1][0] * c[3] + M[1][1] * c[1] - M[1][2] * c[0]) * idet;
+ T[3][1] = ( M[0][0] * c[3] - M[0][1] * c[1] + M[0][2] * c[0]) * idet;
+ T[3][2] = (-M[3][0] * s[3] + M[3][1] * s[1] - M[3][2] * s[0]) * idet;
+ T[3][3] = ( M[2][0] * s[3] - M[2][1] * s[1] + M[2][2] * s[0]) * idet;
+}
+LINMATH_H_FUNC void mat4x4_orthonormalize(mat4x4 R, mat4x4 const M)
+{
+ mat4x4_dup(R, M);
+ float s = 1.f;
+ vec3 h;
+
+ vec3_norm(R[2], R[2]);
+
+ s = vec3_mul_inner(R[1], R[2]);
+ vec3_scale(h, R[2], s);
+ vec3_sub(R[1], R[1], h);
+ vec3_norm(R[1], R[1]);
+
+ s = vec3_mul_inner(R[0], R[2]);
+ vec3_scale(h, R[2], s);
+ vec3_sub(R[0], R[0], h);
+
+ s = vec3_mul_inner(R[0], R[1]);
+ vec3_scale(h, R[1], s);
+ vec3_sub(R[0], R[0], h);
+ vec3_norm(R[0], R[0]);
+}
+
+LINMATH_H_FUNC void mat4x4_frustum(mat4x4 M, float l, float r, float b, float t, float n, float f)
+{
+ M[0][0] = 2.f*n/(r-l);
+ M[0][1] = M[0][2] = M[0][3] = 0.f;
+
+ M[1][1] = 2.f*n/(t-b);
+ M[1][0] = M[1][2] = M[1][3] = 0.f;
+
+ M[2][0] = (r+l)/(r-l);
+ M[2][1] = (t+b)/(t-b);
+ M[2][2] = -(f+n)/(f-n);
+ M[2][3] = -1.f;
+
+ M[3][2] = -2.f*(f*n)/(f-n);
+ M[3][0] = M[3][1] = M[3][3] = 0.f;
+}
+LINMATH_H_FUNC void mat4x4_ortho(mat4x4 M, float l, float r, float b, float t, float n, float f)
+{
+ M[0][0] = 2.f/(r-l);
+ M[0][1] = M[0][2] = M[0][3] = 0.f;
+
+ M[1][1] = 2.f/(t-b);
+ M[1][0] = M[1][2] = M[1][3] = 0.f;
+
+ M[2][2] = -2.f/(f-n);
+ M[2][0] = M[2][1] = M[2][3] = 0.f;
+
+ M[3][0] = -(r+l)/(r-l);
+ M[3][1] = -(t+b)/(t-b);
+ M[3][2] = -(f+n)/(f-n);
+ M[3][3] = 1.f;
+}
+LINMATH_H_FUNC void mat4x4_perspective(mat4x4 m, float y_fov, float aspect, float n, float f)
+{
+ /* NOTE: Degrees are an unhandy unit to work with.
+ * linmath.h uses radians for everything! */
+ float const a = 1.f / tanf(y_fov / 2.f);
+
+ m[0][0] = a / aspect;
+ m[0][1] = 0.f;
+ m[0][2] = 0.f;
+ m[0][3] = 0.f;
+
+ m[1][0] = 0.f;
+ m[1][1] = a;
+ m[1][2] = 0.f;
+ m[1][3] = 0.f;
+
+ m[2][0] = 0.f;
+ m[2][1] = 0.f;
+ m[2][2] = -((f + n) / (f - n));
+ m[2][3] = -1.f;
+
+ m[3][0] = 0.f;
+ m[3][1] = 0.f;
+ m[3][2] = -((2.f * f * n) / (f - n));
+ m[3][3] = 0.f;
+}
+LINMATH_H_FUNC void mat4x4_look_at(mat4x4 m, vec3 const eye, vec3 const center, vec3 const up)
+{
+ /* Adapted from Android's OpenGL Matrix.java. */
+ /* See the OpenGL GLUT documentation for gluLookAt for a description */
+ /* of the algorithm. We implement it in a straightforward way: */
+
+ /* TODO: The negation of of can be spared by swapping the order of
+ * operands in the following cross products in the right way. */
+ vec3 f;
+ vec3_sub(f, center, eye);
+ vec3_norm(f, f);
+
+ vec3 s;
+ vec3_mul_cross(s, f, up);
+ vec3_norm(s, s);
+
+ vec3 t;
+ vec3_mul_cross(t, s, f);
+
+ m[0][0] = s[0];
+ m[0][1] = t[0];
+ m[0][2] = -f[0];
+ m[0][3] = 0.f;
+
+ m[1][0] = s[1];
+ m[1][1] = t[1];
+ m[1][2] = -f[1];
+ m[1][3] = 0.f;
+
+ m[2][0] = s[2];
+ m[2][1] = t[2];
+ m[2][2] = -f[2];
+ m[2][3] = 0.f;
+
+ m[3][0] = 0.f;
+ m[3][1] = 0.f;
+ m[3][2] = 0.f;
+ m[3][3] = 1.f;
+
+ mat4x4_translate_in_place(m, -eye[0], -eye[1], -eye[2]);
+}
+
+typedef float quat[4];
+#define quat_add vec4_add
+#define quat_sub vec4_sub
+#define quat_norm vec4_norm
+#define quat_scale vec4_scale
+#define quat_mul_inner vec4_mul_inner
+
+LINMATH_H_FUNC void quat_identity(quat q)
+{
+ q[0] = q[1] = q[2] = 0.f;
+ q[3] = 1.f;
+}
+LINMATH_H_FUNC void quat_mul(quat r, quat const p, quat const q)
+{
+ vec3 w;
+ vec3_mul_cross(r, p, q);
+ vec3_scale(w, p, q[3]);
+ vec3_add(r, r, w);
+ vec3_scale(w, q, p[3]);
+ vec3_add(r, r, w);
+ r[3] = p[3]*q[3] - vec3_mul_inner(p, q);
+}
+LINMATH_H_FUNC void quat_conj(quat r, quat const q)
+{
+ int i;
+ for(i=0; i<3; ++i)
+ r[i] = -q[i];
+ r[3] = q[3];
+}
+LINMATH_H_FUNC void quat_rotate(quat r, float angle, vec3 const axis) {
+ vec3 axis_norm;
+ vec3_norm(axis_norm, axis);
+ float s = sinf(angle / 2);
+ float c = cosf(angle / 2);
+ vec3_scale(r, axis_norm, s);
+ r[3] = c;
+}
+LINMATH_H_FUNC void quat_mul_vec3(vec3 r, quat const q, vec3 const v)
+{
+/*
+ * Method by Fabian 'ryg' Giessen (of Farbrausch)
+t = 2 * cross(q.xyz, v)
+v' = v + q.w * t + cross(q.xyz, t)
+ */
+ vec3 t;
+ vec3 q_xyz = {q[0], q[1], q[2]};
+ vec3 u = {q[0], q[1], q[2]};
+
+ vec3_mul_cross(t, q_xyz, v);
+ vec3_scale(t, t, 2);
+
+ vec3_mul_cross(u, q_xyz, t);
+ vec3_scale(t, t, q[3]);
+
+ vec3_add(r, v, t);
+ vec3_add(r, r, u);
+}
+LINMATH_H_FUNC void mat4x4_from_quat(mat4x4 M, quat const q)
+{
+ float a = q[3];
+ float b = q[0];
+ float c = q[1];
+ float d = q[2];
+ float a2 = a*a;
+ float b2 = b*b;
+ float c2 = c*c;
+ float d2 = d*d;
+
+ M[0][0] = a2 + b2 - c2 - d2;
+ M[0][1] = 2.f*(b*c + a*d);
+ M[0][2] = 2.f*(b*d - a*c);
+ M[0][3] = 0.f;
+
+ M[1][0] = 2*(b*c - a*d);
+ M[1][1] = a2 - b2 + c2 - d2;
+ M[1][2] = 2.f*(c*d + a*b);
+ M[1][3] = 0.f;
+
+ M[2][0] = 2.f*(b*d + a*c);
+ M[2][1] = 2.f*(c*d - a*b);
+ M[2][2] = a2 - b2 - c2 + d2;
+ M[2][3] = 0.f;
+
+ M[3][0] = M[3][1] = M[3][2] = 0.f;
+ M[3][3] = 1.f;
+}
+
+LINMATH_H_FUNC void mat4x4o_mul_quat(mat4x4 R, mat4x4 const M, quat const q)
+{
+/* XXX: The way this is written only works for orthogonal matrices. */
+/* TODO: Take care of non-orthogonal case. */
+ quat_mul_vec3(R[0], q, M[0]);
+ quat_mul_vec3(R[1], q, M[1]);
+ quat_mul_vec3(R[2], q, M[2]);
+
+ R[3][0] = R[3][1] = R[3][2] = 0.f;
+ R[0][3] = M[0][3];
+ R[1][3] = M[1][3];
+ R[2][3] = M[2][3];
+ R[3][3] = M[3][3]; // typically 1.0, but here we make it general
+}
+LINMATH_H_FUNC void quat_from_mat4x4(quat q, mat4x4 const M)
+{
+ float r=0.f;
+ int i;
+
+ int perm[] = { 0, 1, 2, 0, 1 };
+ int *p = perm;
+
+ for(i = 0; i<3; i++) {
+ float m = M[i][i];
+ if( m < r )
+ continue;
+ m = r;
+ p = &perm[i];
+ }
+
+ r = sqrtf(1.f + M[p[0]][p[0]] - M[p[1]][p[1]] - M[p[2]][p[2]] );
+
+ if(r < 1e-6) {
+ q[0] = 1.f;
+ q[1] = q[2] = q[3] = 0.f;
+ return;
+ }
+
+ q[0] = r/2.f;
+ q[1] = (M[p[0]][p[1]] - M[p[1]][p[0]])/(2.f*r);
+ q[2] = (M[p[2]][p[0]] - M[p[0]][p[2]])/(2.f*r);
+ q[3] = (M[p[2]][p[1]] - M[p[1]][p[2]])/(2.f*r);
+}
+
+LINMATH_H_FUNC void mat4x4_arcball(mat4x4 R, mat4x4 const M, vec2 const _a, vec2 const _b, float s)
+{
+ vec2 a; memcpy(a, _a, sizeof(a));
+ vec2 b; memcpy(b, _b, sizeof(b));
+
+ float z_a = 0.f;
+ float z_b = 0.f;
+
+ if(vec2_len(a) < 1.f) {
+ z_a = sqrtf(1.f - vec2_mul_inner(a, a));
+ } else {
+ vec2_norm(a, a);
+ }
+
+ if(vec2_len(b) < 1.f) {
+ z_b = sqrtf(1.f - vec2_mul_inner(b, b));
+ } else {
+ vec2_norm(b, b);
+ }
+
+ vec3 a_ = {a[0], a[1], z_a};
+ vec3 b_ = {b[0], b[1], z_b};
+
+ vec3 c_;
+ vec3_mul_cross(c_, a_, b_);
+
+ float const angle = acos(vec3_mul_inner(a_, b_)) * s;
+ mat4x4_rotate(R, M, c_[0], c_[1], c_[2], angle);
+}
+#endif
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/mingw/_mingw_dxhelper.h b/chromium/third_party/dawn/third_party/glfw/deps/mingw/_mingw_dxhelper.h
new file mode 100644
index 00000000000..849e2914649
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/mingw/_mingw_dxhelper.h
@@ -0,0 +1,117 @@
+/**
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is part of the mingw-w64 runtime package.
+ * No warranty is given; refer to the file DISCLAIMER within this package.
+ */
+
+#if defined(_MSC_VER) && !defined(_MSC_EXTENSIONS)
+#define NONAMELESSUNION 1
+#endif
+#if defined(NONAMELESSSTRUCT) && \
+ !defined(NONAMELESSUNION)
+#define NONAMELESSUNION 1
+#endif
+#if defined(NONAMELESSUNION) && \
+ !defined(NONAMELESSSTRUCT)
+#define NONAMELESSSTRUCT 1
+#endif
+#if !defined(__GNU_EXTENSION)
+#if defined(__GNUC__) || defined(__GNUG__)
+#define __GNU_EXTENSION __extension__
+#else
+#define __GNU_EXTENSION
+#endif
+#endif /* __extension__ */
+
+#ifndef __ANONYMOUS_DEFINED
+#define __ANONYMOUS_DEFINED
+#if defined(__GNUC__) || defined(__GNUG__)
+#define _ANONYMOUS_UNION __extension__
+#define _ANONYMOUS_STRUCT __extension__
+#else
+#define _ANONYMOUS_UNION
+#define _ANONYMOUS_STRUCT
+#endif
+#ifndef NONAMELESSUNION
+#define _UNION_NAME(x)
+#define _STRUCT_NAME(x)
+#else /* NONAMELESSUNION */
+#define _UNION_NAME(x) x
+#define _STRUCT_NAME(x) x
+#endif
+#endif /* __ANONYMOUS_DEFINED */
+
+#ifndef DUMMYUNIONNAME
+# ifdef NONAMELESSUNION
+# define DUMMYUNIONNAME u
+# define DUMMYUNIONNAME1 u1 /* Wine uses this variant */
+# define DUMMYUNIONNAME2 u2
+# define DUMMYUNIONNAME3 u3
+# define DUMMYUNIONNAME4 u4
+# define DUMMYUNIONNAME5 u5
+# define DUMMYUNIONNAME6 u6
+# define DUMMYUNIONNAME7 u7
+# define DUMMYUNIONNAME8 u8
+# define DUMMYUNIONNAME9 u9
+# else /* NONAMELESSUNION */
+# define DUMMYUNIONNAME
+# define DUMMYUNIONNAME1 /* Wine uses this variant */
+# define DUMMYUNIONNAME2
+# define DUMMYUNIONNAME3
+# define DUMMYUNIONNAME4
+# define DUMMYUNIONNAME5
+# define DUMMYUNIONNAME6
+# define DUMMYUNIONNAME7
+# define DUMMYUNIONNAME8
+# define DUMMYUNIONNAME9
+# endif
+#endif /* DUMMYUNIONNAME */
+
+#if !defined(DUMMYUNIONNAME1) /* MinGW does not define this one */
+# ifdef NONAMELESSUNION
+# define DUMMYUNIONNAME1 u1 /* Wine uses this variant */
+# else
+# define DUMMYUNIONNAME1 /* Wine uses this variant */
+# endif
+#endif /* DUMMYUNIONNAME1 */
+
+#ifndef DUMMYSTRUCTNAME
+# ifdef NONAMELESSUNION
+# define DUMMYSTRUCTNAME s
+# define DUMMYSTRUCTNAME1 s1 /* Wine uses this variant */
+# define DUMMYSTRUCTNAME2 s2
+# define DUMMYSTRUCTNAME3 s3
+# define DUMMYSTRUCTNAME4 s4
+# define DUMMYSTRUCTNAME5 s5
+# else
+# define DUMMYSTRUCTNAME
+# define DUMMYSTRUCTNAME1 /* Wine uses this variant */
+# define DUMMYSTRUCTNAME2
+# define DUMMYSTRUCTNAME3
+# define DUMMYSTRUCTNAME4
+# define DUMMYSTRUCTNAME5
+# endif
+#endif /* DUMMYSTRUCTNAME */
+
+/* These are for compatibility with the Wine source tree */
+
+#ifndef WINELIB_NAME_AW
+# ifdef __MINGW_NAME_AW
+# define WINELIB_NAME_AW __MINGW_NAME_AW
+# else
+# ifdef UNICODE
+# define WINELIB_NAME_AW(func) func##W
+# else
+# define WINELIB_NAME_AW(func) func##A
+# endif
+# endif
+#endif /* WINELIB_NAME_AW */
+
+#ifndef DECL_WINELIB_TYPE_AW
+# ifdef __MINGW_TYPEDEF_AW
+# define DECL_WINELIB_TYPE_AW __MINGW_TYPEDEF_AW
+# else
+# define DECL_WINELIB_TYPE_AW(type) typedef WINELIB_NAME_AW(type) type;
+# endif
+#endif /* DECL_WINELIB_TYPE_AW */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/mingw/dinput.h b/chromium/third_party/dawn/third_party/glfw/deps/mingw/dinput.h
new file mode 100644
index 00000000000..b5754802be7
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/mingw/dinput.h
@@ -0,0 +1,2467 @@
+/*
+ * Copyright (C) the Wine project
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+#ifndef __DINPUT_INCLUDED__
+#define __DINPUT_INCLUDED__
+
+#define COM_NO_WINDOWS_H
+#include <objbase.h>
+#include <_mingw_dxhelper.h>
+
+#ifndef DIRECTINPUT_VERSION
+#define DIRECTINPUT_VERSION 0x0800
+#endif
+
+/* Classes */
+DEFINE_GUID(CLSID_DirectInput, 0x25E609E0,0xB259,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(CLSID_DirectInputDevice, 0x25E609E1,0xB259,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+
+DEFINE_GUID(CLSID_DirectInput8, 0x25E609E4,0xB259,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(CLSID_DirectInputDevice8, 0x25E609E5,0xB259,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+
+/* Interfaces */
+DEFINE_GUID(IID_IDirectInputA, 0x89521360,0xAA8A,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInputW, 0x89521361,0xAA8A,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInput2A, 0x5944E662,0xAA8A,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInput2W, 0x5944E663,0xAA8A,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInput7A, 0x9A4CB684,0x236D,0x11D3,0x8E,0x9D,0x00,0xC0,0x4F,0x68,0x44,0xAE);
+DEFINE_GUID(IID_IDirectInput7W, 0x9A4CB685,0x236D,0x11D3,0x8E,0x9D,0x00,0xC0,0x4F,0x68,0x44,0xAE);
+DEFINE_GUID(IID_IDirectInput8A, 0xBF798030,0x483A,0x4DA2,0xAA,0x99,0x5D,0x64,0xED,0x36,0x97,0x00);
+DEFINE_GUID(IID_IDirectInput8W, 0xBF798031,0x483A,0x4DA2,0xAA,0x99,0x5D,0x64,0xED,0x36,0x97,0x00);
+DEFINE_GUID(IID_IDirectInputDeviceA, 0x5944E680,0xC92E,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInputDeviceW, 0x5944E681,0xC92E,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInputDevice2A, 0x5944E682,0xC92E,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInputDevice2W, 0x5944E683,0xC92E,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(IID_IDirectInputDevice7A, 0x57D7C6BC,0x2356,0x11D3,0x8E,0x9D,0x00,0xC0,0x4F,0x68,0x44,0xAE);
+DEFINE_GUID(IID_IDirectInputDevice7W, 0x57D7C6BD,0x2356,0x11D3,0x8E,0x9D,0x00,0xC0,0x4F,0x68,0x44,0xAE);
+DEFINE_GUID(IID_IDirectInputDevice8A, 0x54D41080,0xDC15,0x4833,0xA4,0x1B,0x74,0x8F,0x73,0xA3,0x81,0x79);
+DEFINE_GUID(IID_IDirectInputDevice8W, 0x54D41081,0xDC15,0x4833,0xA4,0x1B,0x74,0x8F,0x73,0xA3,0x81,0x79);
+DEFINE_GUID(IID_IDirectInputEffect, 0xE7E1F7C0,0x88D2,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+
+/* Predefined object types */
+DEFINE_GUID(GUID_XAxis, 0xA36D02E0,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_YAxis, 0xA36D02E1,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_ZAxis, 0xA36D02E2,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_RxAxis,0xA36D02F4,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_RyAxis,0xA36D02F5,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_RzAxis,0xA36D02E3,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_Slider,0xA36D02E4,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_Button,0xA36D02F0,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_Key, 0x55728220,0xD33C,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_POV, 0xA36D02F2,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_Unknown,0xA36D02F3,0xC9F3,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+
+/* Predefined product GUIDs */
+DEFINE_GUID(GUID_SysMouse, 0x6F1D2B60,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_SysKeyboard, 0x6F1D2B61,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_Joystick, 0x6F1D2B70,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_SysMouseEm, 0x6F1D2B80,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_SysMouseEm2, 0x6F1D2B81,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_SysKeyboardEm, 0x6F1D2B82,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+DEFINE_GUID(GUID_SysKeyboardEm2,0x6F1D2B83,0xD5A0,0x11CF,0xBF,0xC7,0x44,0x45,0x53,0x54,0x00,0x00);
+
+/* predefined forcefeedback effects */
+DEFINE_GUID(GUID_ConstantForce, 0x13541C20,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_RampForce, 0x13541C21,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Square, 0x13541C22,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Sine, 0x13541C23,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Triangle, 0x13541C24,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_SawtoothUp, 0x13541C25,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_SawtoothDown, 0x13541C26,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Spring, 0x13541C27,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Damper, 0x13541C28,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Inertia, 0x13541C29,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_Friction, 0x13541C2A,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+DEFINE_GUID(GUID_CustomForce, 0x13541C2B,0x8E33,0x11D0,0x9A,0xD0,0x00,0xA0,0xC9,0xA0,0x6E,0x35);
+
+typedef struct IDirectInputA *LPDIRECTINPUTA;
+typedef struct IDirectInputW *LPDIRECTINPUTW;
+typedef struct IDirectInput2A *LPDIRECTINPUT2A;
+typedef struct IDirectInput2W *LPDIRECTINPUT2W;
+typedef struct IDirectInput7A *LPDIRECTINPUT7A;
+typedef struct IDirectInput7W *LPDIRECTINPUT7W;
+#if DIRECTINPUT_VERSION >= 0x0800
+typedef struct IDirectInput8A *LPDIRECTINPUT8A;
+typedef struct IDirectInput8W *LPDIRECTINPUT8W;
+#endif /* DI8 */
+typedef struct IDirectInputDeviceA *LPDIRECTINPUTDEVICEA;
+typedef struct IDirectInputDeviceW *LPDIRECTINPUTDEVICEW;
+#if DIRECTINPUT_VERSION >= 0x0500
+typedef struct IDirectInputDevice2A *LPDIRECTINPUTDEVICE2A;
+typedef struct IDirectInputDevice2W *LPDIRECTINPUTDEVICE2W;
+#endif /* DI5 */
+#if DIRECTINPUT_VERSION >= 0x0700
+typedef struct IDirectInputDevice7A *LPDIRECTINPUTDEVICE7A;
+typedef struct IDirectInputDevice7W *LPDIRECTINPUTDEVICE7W;
+#endif /* DI7 */
+#if DIRECTINPUT_VERSION >= 0x0800
+typedef struct IDirectInputDevice8A *LPDIRECTINPUTDEVICE8A;
+typedef struct IDirectInputDevice8W *LPDIRECTINPUTDEVICE8W;
+#endif /* DI8 */
+#if DIRECTINPUT_VERSION >= 0x0500
+typedef struct IDirectInputEffect *LPDIRECTINPUTEFFECT;
+#endif /* DI5 */
+typedef struct SysKeyboardA *LPSYSKEYBOARDA;
+typedef struct SysMouseA *LPSYSMOUSEA;
+
+#define IID_IDirectInput WINELIB_NAME_AW(IID_IDirectInput)
+#define IDirectInput WINELIB_NAME_AW(IDirectInput)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUT)
+#define IID_IDirectInput2 WINELIB_NAME_AW(IID_IDirectInput2)
+#define IDirectInput2 WINELIB_NAME_AW(IDirectInput2)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUT2)
+#define IID_IDirectInput7 WINELIB_NAME_AW(IID_IDirectInput7)
+#define IDirectInput7 WINELIB_NAME_AW(IDirectInput7)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUT7)
+#if DIRECTINPUT_VERSION >= 0x0800
+#define IID_IDirectInput8 WINELIB_NAME_AW(IID_IDirectInput8)
+#define IDirectInput8 WINELIB_NAME_AW(IDirectInput8)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUT8)
+#endif /* DI8 */
+#define IID_IDirectInputDevice WINELIB_NAME_AW(IID_IDirectInputDevice)
+#define IDirectInputDevice WINELIB_NAME_AW(IDirectInputDevice)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUTDEVICE)
+#if DIRECTINPUT_VERSION >= 0x0500
+#define IID_IDirectInputDevice2 WINELIB_NAME_AW(IID_IDirectInputDevice2)
+#define IDirectInputDevice2 WINELIB_NAME_AW(IDirectInputDevice2)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUTDEVICE2)
+#endif /* DI5 */
+#if DIRECTINPUT_VERSION >= 0x0700
+#define IID_IDirectInputDevice7 WINELIB_NAME_AW(IID_IDirectInputDevice7)
+#define IDirectInputDevice7 WINELIB_NAME_AW(IDirectInputDevice7)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUTDEVICE7)
+#endif /* DI7 */
+#if DIRECTINPUT_VERSION >= 0x0800
+#define IID_IDirectInputDevice8 WINELIB_NAME_AW(IID_IDirectInputDevice8)
+#define IDirectInputDevice8 WINELIB_NAME_AW(IDirectInputDevice8)
+DECL_WINELIB_TYPE_AW(LPDIRECTINPUTDEVICE8)
+#endif /* DI8 */
+
+#define DI_OK S_OK
+#define DI_NOTATTACHED S_FALSE
+#define DI_BUFFEROVERFLOW S_FALSE
+#define DI_PROPNOEFFECT S_FALSE
+#define DI_NOEFFECT S_FALSE
+#define DI_POLLEDDEVICE ((HRESULT)0x00000002L)
+#define DI_DOWNLOADSKIPPED ((HRESULT)0x00000003L)
+#define DI_EFFECTRESTARTED ((HRESULT)0x00000004L)
+#define DI_TRUNCATED ((HRESULT)0x00000008L)
+#define DI_SETTINGSNOTSAVED ((HRESULT)0x0000000BL)
+#define DI_TRUNCATEDANDRESTARTED ((HRESULT)0x0000000CL)
+#define DI_WRITEPROTECT ((HRESULT)0x00000013L)
+
+#define DIERR_OLDDIRECTINPUTVERSION \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_OLD_WIN_VERSION)
+#define DIERR_BETADIRECTINPUTVERSION \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_RMODE_APP)
+#define DIERR_BADDRIVERVER \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_BAD_DRIVER_LEVEL)
+#define DIERR_DEVICENOTREG REGDB_E_CLASSNOTREG
+#define DIERR_NOTFOUND \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_FILE_NOT_FOUND)
+#define DIERR_OBJECTNOTFOUND \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_FILE_NOT_FOUND)
+#define DIERR_INVALIDPARAM E_INVALIDARG
+#define DIERR_NOINTERFACE E_NOINTERFACE
+#define DIERR_GENERIC E_FAIL
+#define DIERR_OUTOFMEMORY E_OUTOFMEMORY
+#define DIERR_UNSUPPORTED E_NOTIMPL
+#define DIERR_NOTINITIALIZED \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_NOT_READY)
+#define DIERR_ALREADYINITIALIZED \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_ALREADY_INITIALIZED)
+#define DIERR_NOAGGREGATION CLASS_E_NOAGGREGATION
+#define DIERR_OTHERAPPHASPRIO E_ACCESSDENIED
+#define DIERR_INPUTLOST \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_READ_FAULT)
+#define DIERR_ACQUIRED \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_BUSY)
+#define DIERR_NOTACQUIRED \
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, ERROR_INVALID_ACCESS)
+#define DIERR_READONLY E_ACCESSDENIED
+#define DIERR_HANDLEEXISTS E_ACCESSDENIED
+#ifndef E_PENDING
+#define E_PENDING 0x8000000AL
+#endif
+#define DIERR_INSUFFICIENTPRIVS 0x80040200L
+#define DIERR_DEVICEFULL 0x80040201L
+#define DIERR_MOREDATA 0x80040202L
+#define DIERR_NOTDOWNLOADED 0x80040203L
+#define DIERR_HASEFFECTS 0x80040204L
+#define DIERR_NOTEXCLUSIVEACQUIRED 0x80040205L
+#define DIERR_INCOMPLETEEFFECT 0x80040206L
+#define DIERR_NOTBUFFERED 0x80040207L
+#define DIERR_EFFECTPLAYING 0x80040208L
+#define DIERR_UNPLUGGED 0x80040209L
+#define DIERR_REPORTFULL 0x8004020AL
+#define DIERR_MAPFILEFAIL 0x8004020BL
+
+#define DIENUM_STOP 0
+#define DIENUM_CONTINUE 1
+
+#define DIEDFL_ALLDEVICES 0x00000000
+#define DIEDFL_ATTACHEDONLY 0x00000001
+#define DIEDFL_FORCEFEEDBACK 0x00000100
+#define DIEDFL_INCLUDEALIASES 0x00010000
+#define DIEDFL_INCLUDEPHANTOMS 0x00020000
+#define DIEDFL_INCLUDEHIDDEN 0x00040000
+
+#define DIDEVTYPE_DEVICE 1
+#define DIDEVTYPE_MOUSE 2
+#define DIDEVTYPE_KEYBOARD 3
+#define DIDEVTYPE_JOYSTICK 4
+#define DIDEVTYPE_HID 0x00010000
+
+#define DI8DEVCLASS_ALL 0
+#define DI8DEVCLASS_DEVICE 1
+#define DI8DEVCLASS_POINTER 2
+#define DI8DEVCLASS_KEYBOARD 3
+#define DI8DEVCLASS_GAMECTRL 4
+
+#define DI8DEVTYPE_DEVICE 0x11
+#define DI8DEVTYPE_MOUSE 0x12
+#define DI8DEVTYPE_KEYBOARD 0x13
+#define DI8DEVTYPE_JOYSTICK 0x14
+#define DI8DEVTYPE_GAMEPAD 0x15
+#define DI8DEVTYPE_DRIVING 0x16
+#define DI8DEVTYPE_FLIGHT 0x17
+#define DI8DEVTYPE_1STPERSON 0x18
+#define DI8DEVTYPE_DEVICECTRL 0x19
+#define DI8DEVTYPE_SCREENPOINTER 0x1A
+#define DI8DEVTYPE_REMOTE 0x1B
+#define DI8DEVTYPE_SUPPLEMENTAL 0x1C
+
+#define DIDEVTYPEMOUSE_UNKNOWN 1
+#define DIDEVTYPEMOUSE_TRADITIONAL 2
+#define DIDEVTYPEMOUSE_FINGERSTICK 3
+#define DIDEVTYPEMOUSE_TOUCHPAD 4
+#define DIDEVTYPEMOUSE_TRACKBALL 5
+
+#define DIDEVTYPEKEYBOARD_UNKNOWN 0
+#define DIDEVTYPEKEYBOARD_PCXT 1
+#define DIDEVTYPEKEYBOARD_OLIVETTI 2
+#define DIDEVTYPEKEYBOARD_PCAT 3
+#define DIDEVTYPEKEYBOARD_PCENH 4
+#define DIDEVTYPEKEYBOARD_NOKIA1050 5
+#define DIDEVTYPEKEYBOARD_NOKIA9140 6
+#define DIDEVTYPEKEYBOARD_NEC98 7
+#define DIDEVTYPEKEYBOARD_NEC98LAPTOP 8
+#define DIDEVTYPEKEYBOARD_NEC98106 9
+#define DIDEVTYPEKEYBOARD_JAPAN106 10
+#define DIDEVTYPEKEYBOARD_JAPANAX 11
+#define DIDEVTYPEKEYBOARD_J3100 12
+
+#define DIDEVTYPEJOYSTICK_UNKNOWN 1
+#define DIDEVTYPEJOYSTICK_TRADITIONAL 2
+#define DIDEVTYPEJOYSTICK_FLIGHTSTICK 3
+#define DIDEVTYPEJOYSTICK_GAMEPAD 4
+#define DIDEVTYPEJOYSTICK_RUDDER 5
+#define DIDEVTYPEJOYSTICK_WHEEL 6
+#define DIDEVTYPEJOYSTICK_HEADTRACKER 7
+
+#define DI8DEVTYPEMOUSE_UNKNOWN 1
+#define DI8DEVTYPEMOUSE_TRADITIONAL 2
+#define DI8DEVTYPEMOUSE_FINGERSTICK 3
+#define DI8DEVTYPEMOUSE_TOUCHPAD 4
+#define DI8DEVTYPEMOUSE_TRACKBALL 5
+#define DI8DEVTYPEMOUSE_ABSOLUTE 6
+
+#define DI8DEVTYPEKEYBOARD_UNKNOWN 0
+#define DI8DEVTYPEKEYBOARD_PCXT 1
+#define DI8DEVTYPEKEYBOARD_OLIVETTI 2
+#define DI8DEVTYPEKEYBOARD_PCAT 3
+#define DI8DEVTYPEKEYBOARD_PCENH 4
+#define DI8DEVTYPEKEYBOARD_NOKIA1050 5
+#define DI8DEVTYPEKEYBOARD_NOKIA9140 6
+#define DI8DEVTYPEKEYBOARD_NEC98 7
+#define DI8DEVTYPEKEYBOARD_NEC98LAPTOP 8
+#define DI8DEVTYPEKEYBOARD_NEC98106 9
+#define DI8DEVTYPEKEYBOARD_JAPAN106 10
+#define DI8DEVTYPEKEYBOARD_JAPANAX 11
+#define DI8DEVTYPEKEYBOARD_J3100 12
+
+#define DI8DEVTYPE_LIMITEDGAMESUBTYPE 1
+
+#define DI8DEVTYPEJOYSTICK_LIMITED DI8DEVTYPE_LIMITEDGAMESUBTYPE
+#define DI8DEVTYPEJOYSTICK_STANDARD 2
+
+#define DI8DEVTYPEGAMEPAD_LIMITED DI8DEVTYPE_LIMITEDGAMESUBTYPE
+#define DI8DEVTYPEGAMEPAD_STANDARD 2
+#define DI8DEVTYPEGAMEPAD_TILT 3
+
+#define DI8DEVTYPEDRIVING_LIMITED DI8DEVTYPE_LIMITEDGAMESUBTYPE
+#define DI8DEVTYPEDRIVING_COMBINEDPEDALS 2
+#define DI8DEVTYPEDRIVING_DUALPEDALS 3
+#define DI8DEVTYPEDRIVING_THREEPEDALS 4
+#define DI8DEVTYPEDRIVING_HANDHELD 5
+
+#define DI8DEVTYPEFLIGHT_LIMITED DI8DEVTYPE_LIMITEDGAMESUBTYPE
+#define DI8DEVTYPEFLIGHT_STICK 2
+#define DI8DEVTYPEFLIGHT_YOKE 3
+#define DI8DEVTYPEFLIGHT_RC 4
+
+#define DI8DEVTYPE1STPERSON_LIMITED DI8DEVTYPE_LIMITEDGAMESUBTYPE
+#define DI8DEVTYPE1STPERSON_UNKNOWN 2
+#define DI8DEVTYPE1STPERSON_SIXDOF 3
+#define DI8DEVTYPE1STPERSON_SHOOTER 4
+
+#define DI8DEVTYPESCREENPTR_UNKNOWN 2
+#define DI8DEVTYPESCREENPTR_LIGHTGUN 3
+#define DI8DEVTYPESCREENPTR_LIGHTPEN 4
+#define DI8DEVTYPESCREENPTR_TOUCH 5
+
+#define DI8DEVTYPEREMOTE_UNKNOWN 2
+
+#define DI8DEVTYPEDEVICECTRL_UNKNOWN 2
+#define DI8DEVTYPEDEVICECTRL_COMMSSELECTION 3
+#define DI8DEVTYPEDEVICECTRL_COMMSSELECTION_HARDWIRED 4
+
+#define DI8DEVTYPESUPPLEMENTAL_UNKNOWN 2
+#define DI8DEVTYPESUPPLEMENTAL_2NDHANDCONTROLLER 3
+#define DI8DEVTYPESUPPLEMENTAL_HEADTRACKER 4
+#define DI8DEVTYPESUPPLEMENTAL_HANDTRACKER 5
+#define DI8DEVTYPESUPPLEMENTAL_SHIFTSTICKGATE 6
+#define DI8DEVTYPESUPPLEMENTAL_SHIFTER 7
+#define DI8DEVTYPESUPPLEMENTAL_THROTTLE 8
+#define DI8DEVTYPESUPPLEMENTAL_SPLITTHROTTLE 9
+#define DI8DEVTYPESUPPLEMENTAL_COMBINEDPEDALS 10
+#define DI8DEVTYPESUPPLEMENTAL_DUALPEDALS 11
+#define DI8DEVTYPESUPPLEMENTAL_THREEPEDALS 12
+#define DI8DEVTYPESUPPLEMENTAL_RUDDERPEDALS 13
+
+#define GET_DIDEVICE_TYPE(dwDevType) LOBYTE(dwDevType)
+#define GET_DIDEVICE_SUBTYPE(dwDevType) HIBYTE(dwDevType)
+
+typedef struct DIDEVICEOBJECTINSTANCE_DX3A {
+ DWORD dwSize;
+ GUID guidType;
+ DWORD dwOfs;
+ DWORD dwType;
+ DWORD dwFlags;
+ CHAR tszName[MAX_PATH];
+} DIDEVICEOBJECTINSTANCE_DX3A, *LPDIDEVICEOBJECTINSTANCE_DX3A;
+typedef const DIDEVICEOBJECTINSTANCE_DX3A *LPCDIDEVICEOBJECTINSTANCE_DX3A;
+typedef struct DIDEVICEOBJECTINSTANCE_DX3W {
+ DWORD dwSize;
+ GUID guidType;
+ DWORD dwOfs;
+ DWORD dwType;
+ DWORD dwFlags;
+ WCHAR tszName[MAX_PATH];
+} DIDEVICEOBJECTINSTANCE_DX3W, *LPDIDEVICEOBJECTINSTANCE_DX3W;
+typedef const DIDEVICEOBJECTINSTANCE_DX3W *LPCDIDEVICEOBJECTINSTANCE_DX3W;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEOBJECTINSTANCE_DX3)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEOBJECTINSTANCE_DX3)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEOBJECTINSTANCE_DX3)
+
+typedef struct DIDEVICEOBJECTINSTANCEA {
+ DWORD dwSize;
+ GUID guidType;
+ DWORD dwOfs;
+ DWORD dwType;
+ DWORD dwFlags;
+ CHAR tszName[MAX_PATH];
+#if(DIRECTINPUT_VERSION >= 0x0500)
+ DWORD dwFFMaxForce;
+ DWORD dwFFForceResolution;
+ WORD wCollectionNumber;
+ WORD wDesignatorIndex;
+ WORD wUsagePage;
+ WORD wUsage;
+ DWORD dwDimension;
+ WORD wExponent;
+ WORD wReserved;
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+} DIDEVICEOBJECTINSTANCEA, *LPDIDEVICEOBJECTINSTANCEA;
+typedef const DIDEVICEOBJECTINSTANCEA *LPCDIDEVICEOBJECTINSTANCEA;
+
+typedef struct DIDEVICEOBJECTINSTANCEW {
+ DWORD dwSize;
+ GUID guidType;
+ DWORD dwOfs;
+ DWORD dwType;
+ DWORD dwFlags;
+ WCHAR tszName[MAX_PATH];
+#if(DIRECTINPUT_VERSION >= 0x0500)
+ DWORD dwFFMaxForce;
+ DWORD dwFFForceResolution;
+ WORD wCollectionNumber;
+ WORD wDesignatorIndex;
+ WORD wUsagePage;
+ WORD wUsage;
+ DWORD dwDimension;
+ WORD wExponent;
+ WORD wReserved;
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+} DIDEVICEOBJECTINSTANCEW, *LPDIDEVICEOBJECTINSTANCEW;
+typedef const DIDEVICEOBJECTINSTANCEW *LPCDIDEVICEOBJECTINSTANCEW;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEOBJECTINSTANCE)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEOBJECTINSTANCE)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEOBJECTINSTANCE)
+
+typedef struct DIDEVICEINSTANCE_DX3A {
+ DWORD dwSize;
+ GUID guidInstance;
+ GUID guidProduct;
+ DWORD dwDevType;
+ CHAR tszInstanceName[MAX_PATH];
+ CHAR tszProductName[MAX_PATH];
+} DIDEVICEINSTANCE_DX3A, *LPDIDEVICEINSTANCE_DX3A;
+typedef const DIDEVICEINSTANCE_DX3A *LPCDIDEVICEINSTANCE_DX3A;
+typedef struct DIDEVICEINSTANCE_DX3W {
+ DWORD dwSize;
+ GUID guidInstance;
+ GUID guidProduct;
+ DWORD dwDevType;
+ WCHAR tszInstanceName[MAX_PATH];
+ WCHAR tszProductName[MAX_PATH];
+} DIDEVICEINSTANCE_DX3W, *LPDIDEVICEINSTANCE_DX3W;
+typedef const DIDEVICEINSTANCE_DX3W *LPCDIDEVICEINSTANCE_DX3W;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEINSTANCE_DX3)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEINSTANCE_DX3)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEINSTANCE_DX3)
+
+typedef struct DIDEVICEINSTANCEA {
+ DWORD dwSize;
+ GUID guidInstance;
+ GUID guidProduct;
+ DWORD dwDevType;
+ CHAR tszInstanceName[MAX_PATH];
+ CHAR tszProductName[MAX_PATH];
+#if(DIRECTINPUT_VERSION >= 0x0500)
+ GUID guidFFDriver;
+ WORD wUsagePage;
+ WORD wUsage;
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+} DIDEVICEINSTANCEA, *LPDIDEVICEINSTANCEA;
+typedef const DIDEVICEINSTANCEA *LPCDIDEVICEINSTANCEA;
+
+typedef struct DIDEVICEINSTANCEW {
+ DWORD dwSize;
+ GUID guidInstance;
+ GUID guidProduct;
+ DWORD dwDevType;
+ WCHAR tszInstanceName[MAX_PATH];
+ WCHAR tszProductName[MAX_PATH];
+#if(DIRECTINPUT_VERSION >= 0x0500)
+ GUID guidFFDriver;
+ WORD wUsagePage;
+ WORD wUsage;
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+} DIDEVICEINSTANCEW, *LPDIDEVICEINSTANCEW;
+typedef const DIDEVICEINSTANCEW *LPCDIDEVICEINSTANCEW;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEINSTANCE)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEINSTANCE)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEINSTANCE)
+
+typedef BOOL (CALLBACK *LPDIENUMDEVICESCALLBACKA)(LPCDIDEVICEINSTANCEA,LPVOID);
+typedef BOOL (CALLBACK *LPDIENUMDEVICESCALLBACKW)(LPCDIDEVICEINSTANCEW,LPVOID);
+DECL_WINELIB_TYPE_AW(LPDIENUMDEVICESCALLBACK)
+
+#define DIEDBS_MAPPEDPRI1 0x00000001
+#define DIEDBS_MAPPEDPRI2 0x00000002
+#define DIEDBS_RECENTDEVICE 0x00000010
+#define DIEDBS_NEWDEVICE 0x00000020
+
+#define DIEDBSFL_ATTACHEDONLY 0x00000000
+#define DIEDBSFL_THISUSER 0x00000010
+#define DIEDBSFL_FORCEFEEDBACK DIEDFL_FORCEFEEDBACK
+#define DIEDBSFL_AVAILABLEDEVICES 0x00001000
+#define DIEDBSFL_MULTIMICEKEYBOARDS 0x00002000
+#define DIEDBSFL_NONGAMINGDEVICES 0x00004000
+#define DIEDBSFL_VALID 0x00007110
+
+#if DIRECTINPUT_VERSION >= 0x0800
+typedef BOOL (CALLBACK *LPDIENUMDEVICESBYSEMANTICSCBA)(LPCDIDEVICEINSTANCEA,LPDIRECTINPUTDEVICE8A,DWORD,DWORD,LPVOID);
+typedef BOOL (CALLBACK *LPDIENUMDEVICESBYSEMANTICSCBW)(LPCDIDEVICEINSTANCEW,LPDIRECTINPUTDEVICE8W,DWORD,DWORD,LPVOID);
+DECL_WINELIB_TYPE_AW(LPDIENUMDEVICESBYSEMANTICSCB)
+#endif
+
+typedef BOOL (CALLBACK *LPDICONFIGUREDEVICESCALLBACK)(LPUNKNOWN,LPVOID);
+
+typedef BOOL (CALLBACK *LPDIENUMDEVICEOBJECTSCALLBACKA)(LPCDIDEVICEOBJECTINSTANCEA,LPVOID);
+typedef BOOL (CALLBACK *LPDIENUMDEVICEOBJECTSCALLBACKW)(LPCDIDEVICEOBJECTINSTANCEW,LPVOID);
+DECL_WINELIB_TYPE_AW(LPDIENUMDEVICEOBJECTSCALLBACK)
+
+#if DIRECTINPUT_VERSION >= 0x0500
+typedef BOOL (CALLBACK *LPDIENUMCREATEDEFFECTOBJECTSCALLBACK)(LPDIRECTINPUTEFFECT, LPVOID);
+#endif
+
+#define DIK_ESCAPE 0x01
+#define DIK_1 0x02
+#define DIK_2 0x03
+#define DIK_3 0x04
+#define DIK_4 0x05
+#define DIK_5 0x06
+#define DIK_6 0x07
+#define DIK_7 0x08
+#define DIK_8 0x09
+#define DIK_9 0x0A
+#define DIK_0 0x0B
+#define DIK_MINUS 0x0C /* - on main keyboard */
+#define DIK_EQUALS 0x0D
+#define DIK_BACK 0x0E /* backspace */
+#define DIK_TAB 0x0F
+#define DIK_Q 0x10
+#define DIK_W 0x11
+#define DIK_E 0x12
+#define DIK_R 0x13
+#define DIK_T 0x14
+#define DIK_Y 0x15
+#define DIK_U 0x16
+#define DIK_I 0x17
+#define DIK_O 0x18
+#define DIK_P 0x19
+#define DIK_LBRACKET 0x1A
+#define DIK_RBRACKET 0x1B
+#define DIK_RETURN 0x1C /* Enter on main keyboard */
+#define DIK_LCONTROL 0x1D
+#define DIK_A 0x1E
+#define DIK_S 0x1F
+#define DIK_D 0x20
+#define DIK_F 0x21
+#define DIK_G 0x22
+#define DIK_H 0x23
+#define DIK_J 0x24
+#define DIK_K 0x25
+#define DIK_L 0x26
+#define DIK_SEMICOLON 0x27
+#define DIK_APOSTROPHE 0x28
+#define DIK_GRAVE 0x29 /* accent grave */
+#define DIK_LSHIFT 0x2A
+#define DIK_BACKSLASH 0x2B
+#define DIK_Z 0x2C
+#define DIK_X 0x2D
+#define DIK_C 0x2E
+#define DIK_V 0x2F
+#define DIK_B 0x30
+#define DIK_N 0x31
+#define DIK_M 0x32
+#define DIK_COMMA 0x33
+#define DIK_PERIOD 0x34 /* . on main keyboard */
+#define DIK_SLASH 0x35 /* / on main keyboard */
+#define DIK_RSHIFT 0x36
+#define DIK_MULTIPLY 0x37 /* * on numeric keypad */
+#define DIK_LMENU 0x38 /* left Alt */
+#define DIK_SPACE 0x39
+#define DIK_CAPITAL 0x3A
+#define DIK_F1 0x3B
+#define DIK_F2 0x3C
+#define DIK_F3 0x3D
+#define DIK_F4 0x3E
+#define DIK_F5 0x3F
+#define DIK_F6 0x40
+#define DIK_F7 0x41
+#define DIK_F8 0x42
+#define DIK_F9 0x43
+#define DIK_F10 0x44
+#define DIK_NUMLOCK 0x45
+#define DIK_SCROLL 0x46 /* Scroll Lock */
+#define DIK_NUMPAD7 0x47
+#define DIK_NUMPAD8 0x48
+#define DIK_NUMPAD9 0x49
+#define DIK_SUBTRACT 0x4A /* - on numeric keypad */
+#define DIK_NUMPAD4 0x4B
+#define DIK_NUMPAD5 0x4C
+#define DIK_NUMPAD6 0x4D
+#define DIK_ADD 0x4E /* + on numeric keypad */
+#define DIK_NUMPAD1 0x4F
+#define DIK_NUMPAD2 0x50
+#define DIK_NUMPAD3 0x51
+#define DIK_NUMPAD0 0x52
+#define DIK_DECIMAL 0x53 /* . on numeric keypad */
+#define DIK_OEM_102 0x56 /* < > | on UK/Germany keyboards */
+#define DIK_F11 0x57
+#define DIK_F12 0x58
+#define DIK_F13 0x64 /* (NEC PC98) */
+#define DIK_F14 0x65 /* (NEC PC98) */
+#define DIK_F15 0x66 /* (NEC PC98) */
+#define DIK_KANA 0x70 /* (Japanese keyboard) */
+#define DIK_ABNT_C1 0x73 /* / ? on Portugese (Brazilian) keyboards */
+#define DIK_CONVERT 0x79 /* (Japanese keyboard) */
+#define DIK_NOCONVERT 0x7B /* (Japanese keyboard) */
+#define DIK_YEN 0x7D /* (Japanese keyboard) */
+#define DIK_ABNT_C2 0x7E /* Numpad . on Portugese (Brazilian) keyboards */
+#define DIK_NUMPADEQUALS 0x8D /* = on numeric keypad (NEC PC98) */
+#define DIK_CIRCUMFLEX 0x90 /* (Japanese keyboard) */
+#define DIK_AT 0x91 /* (NEC PC98) */
+#define DIK_COLON 0x92 /* (NEC PC98) */
+#define DIK_UNDERLINE 0x93 /* (NEC PC98) */
+#define DIK_KANJI 0x94 /* (Japanese keyboard) */
+#define DIK_STOP 0x95 /* (NEC PC98) */
+#define DIK_AX 0x96 /* (Japan AX) */
+#define DIK_UNLABELED 0x97 /* (J3100) */
+#define DIK_NEXTTRACK 0x99 /* Next Track */
+#define DIK_NUMPADENTER 0x9C /* Enter on numeric keypad */
+#define DIK_RCONTROL 0x9D
+#define DIK_MUTE 0xA0 /* Mute */
+#define DIK_CALCULATOR 0xA1 /* Calculator */
+#define DIK_PLAYPAUSE 0xA2 /* Play / Pause */
+#define DIK_MEDIASTOP 0xA4 /* Media Stop */
+#define DIK_VOLUMEDOWN 0xAE /* Volume - */
+#define DIK_VOLUMEUP 0xB0 /* Volume + */
+#define DIK_WEBHOME 0xB2 /* Web home */
+#define DIK_NUMPADCOMMA 0xB3 /* , on numeric keypad (NEC PC98) */
+#define DIK_DIVIDE 0xB5 /* / on numeric keypad */
+#define DIK_SYSRQ 0xB7
+#define DIK_RMENU 0xB8 /* right Alt */
+#define DIK_PAUSE 0xC5 /* Pause */
+#define DIK_HOME 0xC7 /* Home on arrow keypad */
+#define DIK_UP 0xC8 /* UpArrow on arrow keypad */
+#define DIK_PRIOR 0xC9 /* PgUp on arrow keypad */
+#define DIK_LEFT 0xCB /* LeftArrow on arrow keypad */
+#define DIK_RIGHT 0xCD /* RightArrow on arrow keypad */
+#define DIK_END 0xCF /* End on arrow keypad */
+#define DIK_DOWN 0xD0 /* DownArrow on arrow keypad */
+#define DIK_NEXT 0xD1 /* PgDn on arrow keypad */
+#define DIK_INSERT 0xD2 /* Insert on arrow keypad */
+#define DIK_DELETE 0xD3 /* Delete on arrow keypad */
+#define DIK_LWIN 0xDB /* Left Windows key */
+#define DIK_RWIN 0xDC /* Right Windows key */
+#define DIK_APPS 0xDD /* AppMenu key */
+#define DIK_POWER 0xDE
+#define DIK_SLEEP 0xDF
+#define DIK_WAKE 0xE3 /* System Wake */
+#define DIK_WEBSEARCH 0xE5 /* Web Search */
+#define DIK_WEBFAVORITES 0xE6 /* Web Favorites */
+#define DIK_WEBREFRESH 0xE7 /* Web Refresh */
+#define DIK_WEBSTOP 0xE8 /* Web Stop */
+#define DIK_WEBFORWARD 0xE9 /* Web Forward */
+#define DIK_WEBBACK 0xEA /* Web Back */
+#define DIK_MYCOMPUTER 0xEB /* My Computer */
+#define DIK_MAIL 0xEC /* Mail */
+#define DIK_MEDIASELECT 0xED /* Media Select */
+
+#define DIK_BACKSPACE DIK_BACK /* backspace */
+#define DIK_NUMPADSTAR DIK_MULTIPLY /* * on numeric keypad */
+#define DIK_LALT DIK_LMENU /* left Alt */
+#define DIK_CAPSLOCK DIK_CAPITAL /* CapsLock */
+#define DIK_NUMPADMINUS DIK_SUBTRACT /* - on numeric keypad */
+#define DIK_NUMPADPLUS DIK_ADD /* + on numeric keypad */
+#define DIK_NUMPADPERIOD DIK_DECIMAL /* . on numeric keypad */
+#define DIK_NUMPADSLASH DIK_DIVIDE /* / on numeric keypad */
+#define DIK_RALT DIK_RMENU /* right Alt */
+#define DIK_UPARROW DIK_UP /* UpArrow on arrow keypad */
+#define DIK_PGUP DIK_PRIOR /* PgUp on arrow keypad */
+#define DIK_LEFTARROW DIK_LEFT /* LeftArrow on arrow keypad */
+#define DIK_RIGHTARROW DIK_RIGHT /* RightArrow on arrow keypad */
+#define DIK_DOWNARROW DIK_DOWN /* DownArrow on arrow keypad */
+#define DIK_PGDN DIK_NEXT /* PgDn on arrow keypad */
+
+#define DIDFT_ALL 0x00000000
+#define DIDFT_RELAXIS 0x00000001
+#define DIDFT_ABSAXIS 0x00000002
+#define DIDFT_AXIS 0x00000003
+#define DIDFT_PSHBUTTON 0x00000004
+#define DIDFT_TGLBUTTON 0x00000008
+#define DIDFT_BUTTON 0x0000000C
+#define DIDFT_POV 0x00000010
+#define DIDFT_COLLECTION 0x00000040
+#define DIDFT_NODATA 0x00000080
+#define DIDFT_ANYINSTANCE 0x00FFFF00
+#define DIDFT_INSTANCEMASK DIDFT_ANYINSTANCE
+#define DIDFT_MAKEINSTANCE(n) ((WORD)(n) << 8)
+#define DIDFT_GETTYPE(n) LOBYTE(n)
+#define DIDFT_GETINSTANCE(n) LOWORD((n) >> 8)
+#define DIDFT_FFACTUATOR 0x01000000
+#define DIDFT_FFEFFECTTRIGGER 0x02000000
+#if DIRECTINPUT_VERSION >= 0x050a
+#define DIDFT_OUTPUT 0x10000000
+#define DIDFT_VENDORDEFINED 0x04000000
+#define DIDFT_ALIAS 0x08000000
+#endif /* DI5a */
+#ifndef DIDFT_OPTIONAL
+#define DIDFT_OPTIONAL 0x80000000
+#endif
+#define DIDFT_ENUMCOLLECTION(n) ((WORD)(n) << 8)
+#define DIDFT_NOCOLLECTION 0x00FFFF00
+
+#define DIDF_ABSAXIS 0x00000001
+#define DIDF_RELAXIS 0x00000002
+
+#define DIGDD_PEEK 0x00000001
+
+#define DISEQUENCE_COMPARE(dwSq1,cmp,dwSq2) ((int)((dwSq1) - (dwSq2)) cmp 0)
+
+typedef struct DIDEVICEOBJECTDATA_DX3 {
+ DWORD dwOfs;
+ DWORD dwData;
+ DWORD dwTimeStamp;
+ DWORD dwSequence;
+} DIDEVICEOBJECTDATA_DX3,*LPDIDEVICEOBJECTDATA_DX3;
+typedef const DIDEVICEOBJECTDATA_DX3 *LPCDIDEVICEOBJECTDATA_DX3;
+
+typedef struct DIDEVICEOBJECTDATA {
+ DWORD dwOfs;
+ DWORD dwData;
+ DWORD dwTimeStamp;
+ DWORD dwSequence;
+#if(DIRECTINPUT_VERSION >= 0x0800)
+ UINT_PTR uAppData;
+#endif /* DIRECTINPUT_VERSION >= 0x0800 */
+} DIDEVICEOBJECTDATA, *LPDIDEVICEOBJECTDATA;
+typedef const DIDEVICEOBJECTDATA *LPCDIDEVICEOBJECTDATA;
+
+typedef struct _DIOBJECTDATAFORMAT {
+ const GUID *pguid;
+ DWORD dwOfs;
+ DWORD dwType;
+ DWORD dwFlags;
+} DIOBJECTDATAFORMAT, *LPDIOBJECTDATAFORMAT;
+typedef const DIOBJECTDATAFORMAT *LPCDIOBJECTDATAFORMAT;
+
+typedef struct _DIDATAFORMAT {
+ DWORD dwSize;
+ DWORD dwObjSize;
+ DWORD dwFlags;
+ DWORD dwDataSize;
+ DWORD dwNumObjs;
+ LPDIOBJECTDATAFORMAT rgodf;
+} DIDATAFORMAT, *LPDIDATAFORMAT;
+typedef const DIDATAFORMAT *LPCDIDATAFORMAT;
+
+#if DIRECTINPUT_VERSION >= 0x0500
+#define DIDOI_FFACTUATOR 0x00000001
+#define DIDOI_FFEFFECTTRIGGER 0x00000002
+#define DIDOI_POLLED 0x00008000
+#define DIDOI_ASPECTPOSITION 0x00000100
+#define DIDOI_ASPECTVELOCITY 0x00000200
+#define DIDOI_ASPECTACCEL 0x00000300
+#define DIDOI_ASPECTFORCE 0x00000400
+#define DIDOI_ASPECTMASK 0x00000F00
+#endif /* DI5 */
+#if DIRECTINPUT_VERSION >= 0x050a
+#define DIDOI_GUIDISUSAGE 0x00010000
+#endif /* DI5a */
+
+typedef struct DIPROPHEADER {
+ DWORD dwSize;
+ DWORD dwHeaderSize;
+ DWORD dwObj;
+ DWORD dwHow;
+} DIPROPHEADER,*LPDIPROPHEADER;
+typedef const DIPROPHEADER *LPCDIPROPHEADER;
+
+#define DIPH_DEVICE 0
+#define DIPH_BYOFFSET 1
+#define DIPH_BYID 2
+#if DIRECTINPUT_VERSION >= 0x050a
+#define DIPH_BYUSAGE 3
+
+#define DIMAKEUSAGEDWORD(UsagePage, Usage) (DWORD)MAKELONG(Usage, UsagePage)
+#endif /* DI5a */
+
+typedef struct DIPROPDWORD {
+ DIPROPHEADER diph;
+ DWORD dwData;
+} DIPROPDWORD, *LPDIPROPDWORD;
+typedef const DIPROPDWORD *LPCDIPROPDWORD;
+
+typedef struct DIPROPRANGE {
+ DIPROPHEADER diph;
+ LONG lMin;
+ LONG lMax;
+} DIPROPRANGE, *LPDIPROPRANGE;
+typedef const DIPROPRANGE *LPCDIPROPRANGE;
+
+#define DIPROPRANGE_NOMIN ((LONG)0x80000000)
+#define DIPROPRANGE_NOMAX ((LONG)0x7FFFFFFF)
+
+#if DIRECTINPUT_VERSION >= 0x050a
+typedef struct DIPROPCAL {
+ DIPROPHEADER diph;
+ LONG lMin;
+ LONG lCenter;
+ LONG lMax;
+} DIPROPCAL, *LPDIPROPCAL;
+typedef const DIPROPCAL *LPCDIPROPCAL;
+
+typedef struct DIPROPCALPOV {
+ DIPROPHEADER diph;
+ LONG lMin[5];
+ LONG lMax[5];
+} DIPROPCALPOV, *LPDIPROPCALPOV;
+typedef const DIPROPCALPOV *LPCDIPROPCALPOV;
+
+typedef struct DIPROPGUIDANDPATH {
+ DIPROPHEADER diph;
+ GUID guidClass;
+ WCHAR wszPath[MAX_PATH];
+} DIPROPGUIDANDPATH, *LPDIPROPGUIDANDPATH;
+typedef const DIPROPGUIDANDPATH *LPCDIPROPGUIDANDPATH;
+
+typedef struct DIPROPSTRING {
+ DIPROPHEADER diph;
+ WCHAR wsz[MAX_PATH];
+} DIPROPSTRING, *LPDIPROPSTRING;
+typedef const DIPROPSTRING *LPCDIPROPSTRING;
+#endif /* DI5a */
+
+#if DIRECTINPUT_VERSION >= 0x0800
+typedef struct DIPROPPOINTER {
+ DIPROPHEADER diph;
+ UINT_PTR uData;
+} DIPROPPOINTER, *LPDIPROPPOINTER;
+typedef const DIPROPPOINTER *LPCDIPROPPOINTER;
+#endif /* DI8 */
+
+/* special property GUIDs */
+#ifdef __cplusplus
+#define MAKEDIPROP(prop) (*(const GUID *)(prop))
+#else
+#define MAKEDIPROP(prop) ((REFGUID)(prop))
+#endif
+#define DIPROP_BUFFERSIZE MAKEDIPROP(1)
+#define DIPROP_AXISMODE MAKEDIPROP(2)
+
+#define DIPROPAXISMODE_ABS 0
+#define DIPROPAXISMODE_REL 1
+
+#define DIPROP_GRANULARITY MAKEDIPROP(3)
+#define DIPROP_RANGE MAKEDIPROP(4)
+#define DIPROP_DEADZONE MAKEDIPROP(5)
+#define DIPROP_SATURATION MAKEDIPROP(6)
+#define DIPROP_FFGAIN MAKEDIPROP(7)
+#define DIPROP_FFLOAD MAKEDIPROP(8)
+#define DIPROP_AUTOCENTER MAKEDIPROP(9)
+
+#define DIPROPAUTOCENTER_OFF 0
+#define DIPROPAUTOCENTER_ON 1
+
+#define DIPROP_CALIBRATIONMODE MAKEDIPROP(10)
+
+#define DIPROPCALIBRATIONMODE_COOKED 0
+#define DIPROPCALIBRATIONMODE_RAW 1
+
+#if DIRECTINPUT_VERSION >= 0x050a
+#define DIPROP_CALIBRATION MAKEDIPROP(11)
+#define DIPROP_GUIDANDPATH MAKEDIPROP(12)
+#define DIPROP_INSTANCENAME MAKEDIPROP(13)
+#define DIPROP_PRODUCTNAME MAKEDIPROP(14)
+#endif
+
+#if DIRECTINPUT_VERSION >= 0x5B2
+#define DIPROP_JOYSTICKID MAKEDIPROP(15)
+#define DIPROP_GETPORTDISPLAYNAME MAKEDIPROP(16)
+#endif
+
+#if DIRECTINPUT_VERSION >= 0x0700
+#define DIPROP_PHYSICALRANGE MAKEDIPROP(18)
+#define DIPROP_LOGICALRANGE MAKEDIPROP(19)
+#endif
+
+#if(DIRECTINPUT_VERSION >= 0x0800)
+#define DIPROP_KEYNAME MAKEDIPROP(20)
+#define DIPROP_CPOINTS MAKEDIPROP(21)
+#define DIPROP_APPDATA MAKEDIPROP(22)
+#define DIPROP_SCANCODE MAKEDIPROP(23)
+#define DIPROP_VIDPID MAKEDIPROP(24)
+#define DIPROP_USERNAME MAKEDIPROP(25)
+#define DIPROP_TYPENAME MAKEDIPROP(26)
+
+#define MAXCPOINTSNUM 8
+
+typedef struct _CPOINT {
+ LONG lP;
+ DWORD dwLog;
+} CPOINT, *PCPOINT;
+
+typedef struct DIPROPCPOINTS {
+ DIPROPHEADER diph;
+ DWORD dwCPointsNum;
+ CPOINT cp[MAXCPOINTSNUM];
+} DIPROPCPOINTS, *LPDIPROPCPOINTS;
+typedef const DIPROPCPOINTS *LPCDIPROPCPOINTS;
+#endif /* DI8 */
+
+
+typedef struct DIDEVCAPS_DX3 {
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwDevType;
+ DWORD dwAxes;
+ DWORD dwButtons;
+ DWORD dwPOVs;
+} DIDEVCAPS_DX3, *LPDIDEVCAPS_DX3;
+
+typedef struct DIDEVCAPS {
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwDevType;
+ DWORD dwAxes;
+ DWORD dwButtons;
+ DWORD dwPOVs;
+#if(DIRECTINPUT_VERSION >= 0x0500)
+ DWORD dwFFSamplePeriod;
+ DWORD dwFFMinTimeResolution;
+ DWORD dwFirmwareRevision;
+ DWORD dwHardwareRevision;
+ DWORD dwFFDriverVersion;
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+} DIDEVCAPS,*LPDIDEVCAPS;
+
+#define DIDC_ATTACHED 0x00000001
+#define DIDC_POLLEDDEVICE 0x00000002
+#define DIDC_EMULATED 0x00000004
+#define DIDC_POLLEDDATAFORMAT 0x00000008
+#define DIDC_FORCEFEEDBACK 0x00000100
+#define DIDC_FFATTACK 0x00000200
+#define DIDC_FFFADE 0x00000400
+#define DIDC_SATURATION 0x00000800
+#define DIDC_POSNEGCOEFFICIENTS 0x00001000
+#define DIDC_POSNEGSATURATION 0x00002000
+#define DIDC_DEADBAND 0x00004000
+#define DIDC_STARTDELAY 0x00008000
+#define DIDC_ALIAS 0x00010000
+#define DIDC_PHANTOM 0x00020000
+#define DIDC_HIDDEN 0x00040000
+
+
+/* SetCooperativeLevel dwFlags */
+#define DISCL_EXCLUSIVE 0x00000001
+#define DISCL_NONEXCLUSIVE 0x00000002
+#define DISCL_FOREGROUND 0x00000004
+#define DISCL_BACKGROUND 0x00000008
+#define DISCL_NOWINKEY 0x00000010
+
+#if (DIRECTINPUT_VERSION >= 0x0500)
+/* Device FF flags */
+#define DISFFC_RESET 0x00000001
+#define DISFFC_STOPALL 0x00000002
+#define DISFFC_PAUSE 0x00000004
+#define DISFFC_CONTINUE 0x00000008
+#define DISFFC_SETACTUATORSON 0x00000010
+#define DISFFC_SETACTUATORSOFF 0x00000020
+
+#define DIGFFS_EMPTY 0x00000001
+#define DIGFFS_STOPPED 0x00000002
+#define DIGFFS_PAUSED 0x00000004
+#define DIGFFS_ACTUATORSON 0x00000010
+#define DIGFFS_ACTUATORSOFF 0x00000020
+#define DIGFFS_POWERON 0x00000040
+#define DIGFFS_POWEROFF 0x00000080
+#define DIGFFS_SAFETYSWITCHON 0x00000100
+#define DIGFFS_SAFETYSWITCHOFF 0x00000200
+#define DIGFFS_USERFFSWITCHON 0x00000400
+#define DIGFFS_USERFFSWITCHOFF 0x00000800
+#define DIGFFS_DEVICELOST 0x80000000
+
+/* Effect flags */
+#define DIEFT_ALL 0x00000000
+
+#define DIEFT_CONSTANTFORCE 0x00000001
+#define DIEFT_RAMPFORCE 0x00000002
+#define DIEFT_PERIODIC 0x00000003
+#define DIEFT_CONDITION 0x00000004
+#define DIEFT_CUSTOMFORCE 0x00000005
+#define DIEFT_HARDWARE 0x000000FF
+#define DIEFT_FFATTACK 0x00000200
+#define DIEFT_FFFADE 0x00000400
+#define DIEFT_SATURATION 0x00000800
+#define DIEFT_POSNEGCOEFFICIENTS 0x00001000
+#define DIEFT_POSNEGSATURATION 0x00002000
+#define DIEFT_DEADBAND 0x00004000
+#define DIEFT_STARTDELAY 0x00008000
+#define DIEFT_GETTYPE(n) LOBYTE(n)
+
+#define DIEFF_OBJECTIDS 0x00000001
+#define DIEFF_OBJECTOFFSETS 0x00000002
+#define DIEFF_CARTESIAN 0x00000010
+#define DIEFF_POLAR 0x00000020
+#define DIEFF_SPHERICAL 0x00000040
+
+#define DIEP_DURATION 0x00000001
+#define DIEP_SAMPLEPERIOD 0x00000002
+#define DIEP_GAIN 0x00000004
+#define DIEP_TRIGGERBUTTON 0x00000008
+#define DIEP_TRIGGERREPEATINTERVAL 0x00000010
+#define DIEP_AXES 0x00000020
+#define DIEP_DIRECTION 0x00000040
+#define DIEP_ENVELOPE 0x00000080
+#define DIEP_TYPESPECIFICPARAMS 0x00000100
+#if(DIRECTINPUT_VERSION >= 0x0600)
+#define DIEP_STARTDELAY 0x00000200
+#define DIEP_ALLPARAMS_DX5 0x000001FF
+#define DIEP_ALLPARAMS 0x000003FF
+#else
+#define DIEP_ALLPARAMS 0x000001FF
+#endif /* DIRECTINPUT_VERSION >= 0x0600 */
+#define DIEP_START 0x20000000
+#define DIEP_NORESTART 0x40000000
+#define DIEP_NODOWNLOAD 0x80000000
+#define DIEB_NOTRIGGER 0xFFFFFFFF
+
+#define DIES_SOLO 0x00000001
+#define DIES_NODOWNLOAD 0x80000000
+
+#define DIEGES_PLAYING 0x00000001
+#define DIEGES_EMULATED 0x00000002
+
+#define DI_DEGREES 100
+#define DI_FFNOMINALMAX 10000
+#define DI_SECONDS 1000000
+
+typedef struct DICONSTANTFORCE {
+ LONG lMagnitude;
+} DICONSTANTFORCE, *LPDICONSTANTFORCE;
+typedef const DICONSTANTFORCE *LPCDICONSTANTFORCE;
+
+typedef struct DIRAMPFORCE {
+ LONG lStart;
+ LONG lEnd;
+} DIRAMPFORCE, *LPDIRAMPFORCE;
+typedef const DIRAMPFORCE *LPCDIRAMPFORCE;
+
+typedef struct DIPERIODIC {
+ DWORD dwMagnitude;
+ LONG lOffset;
+ DWORD dwPhase;
+ DWORD dwPeriod;
+} DIPERIODIC, *LPDIPERIODIC;
+typedef const DIPERIODIC *LPCDIPERIODIC;
+
+typedef struct DICONDITION {
+ LONG lOffset;
+ LONG lPositiveCoefficient;
+ LONG lNegativeCoefficient;
+ DWORD dwPositiveSaturation;
+ DWORD dwNegativeSaturation;
+ LONG lDeadBand;
+} DICONDITION, *LPDICONDITION;
+typedef const DICONDITION *LPCDICONDITION;
+
+typedef struct DICUSTOMFORCE {
+ DWORD cChannels;
+ DWORD dwSamplePeriod;
+ DWORD cSamples;
+ LPLONG rglForceData;
+} DICUSTOMFORCE, *LPDICUSTOMFORCE;
+typedef const DICUSTOMFORCE *LPCDICUSTOMFORCE;
+
+typedef struct DIENVELOPE {
+ DWORD dwSize;
+ DWORD dwAttackLevel;
+ DWORD dwAttackTime;
+ DWORD dwFadeLevel;
+ DWORD dwFadeTime;
+} DIENVELOPE, *LPDIENVELOPE;
+typedef const DIENVELOPE *LPCDIENVELOPE;
+
+typedef struct DIEFFECT_DX5 {
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwDuration;
+ DWORD dwSamplePeriod;
+ DWORD dwGain;
+ DWORD dwTriggerButton;
+ DWORD dwTriggerRepeatInterval;
+ DWORD cAxes;
+ LPDWORD rgdwAxes;
+ LPLONG rglDirection;
+ LPDIENVELOPE lpEnvelope;
+ DWORD cbTypeSpecificParams;
+ LPVOID lpvTypeSpecificParams;
+} DIEFFECT_DX5, *LPDIEFFECT_DX5;
+typedef const DIEFFECT_DX5 *LPCDIEFFECT_DX5;
+
+typedef struct DIEFFECT {
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwDuration;
+ DWORD dwSamplePeriod;
+ DWORD dwGain;
+ DWORD dwTriggerButton;
+ DWORD dwTriggerRepeatInterval;
+ DWORD cAxes;
+ LPDWORD rgdwAxes;
+ LPLONG rglDirection;
+ LPDIENVELOPE lpEnvelope;
+ DWORD cbTypeSpecificParams;
+ LPVOID lpvTypeSpecificParams;
+#if(DIRECTINPUT_VERSION >= 0x0600)
+ DWORD dwStartDelay;
+#endif /* DIRECTINPUT_VERSION >= 0x0600 */
+} DIEFFECT, *LPDIEFFECT;
+typedef const DIEFFECT *LPCDIEFFECT;
+typedef DIEFFECT DIEFFECT_DX6;
+typedef LPDIEFFECT LPDIEFFECT_DX6;
+
+typedef struct DIEFFECTINFOA {
+ DWORD dwSize;
+ GUID guid;
+ DWORD dwEffType;
+ DWORD dwStaticParams;
+ DWORD dwDynamicParams;
+ CHAR tszName[MAX_PATH];
+} DIEFFECTINFOA, *LPDIEFFECTINFOA;
+typedef const DIEFFECTINFOA *LPCDIEFFECTINFOA;
+
+typedef struct DIEFFECTINFOW {
+ DWORD dwSize;
+ GUID guid;
+ DWORD dwEffType;
+ DWORD dwStaticParams;
+ DWORD dwDynamicParams;
+ WCHAR tszName[MAX_PATH];
+} DIEFFECTINFOW, *LPDIEFFECTINFOW;
+typedef const DIEFFECTINFOW *LPCDIEFFECTINFOW;
+
+DECL_WINELIB_TYPE_AW(DIEFFECTINFO)
+DECL_WINELIB_TYPE_AW(LPDIEFFECTINFO)
+DECL_WINELIB_TYPE_AW(LPCDIEFFECTINFO)
+
+typedef BOOL (CALLBACK *LPDIENUMEFFECTSCALLBACKA)(LPCDIEFFECTINFOA, LPVOID);
+typedef BOOL (CALLBACK *LPDIENUMEFFECTSCALLBACKW)(LPCDIEFFECTINFOW, LPVOID);
+
+typedef struct DIEFFESCAPE {
+ DWORD dwSize;
+ DWORD dwCommand;
+ LPVOID lpvInBuffer;
+ DWORD cbInBuffer;
+ LPVOID lpvOutBuffer;
+ DWORD cbOutBuffer;
+} DIEFFESCAPE, *LPDIEFFESCAPE;
+
+typedef struct DIJOYSTATE {
+ LONG lX;
+ LONG lY;
+ LONG lZ;
+ LONG lRx;
+ LONG lRy;
+ LONG lRz;
+ LONG rglSlider[2];
+ DWORD rgdwPOV[4];
+ BYTE rgbButtons[32];
+} DIJOYSTATE, *LPDIJOYSTATE;
+
+typedef struct DIJOYSTATE2 {
+ LONG lX;
+ LONG lY;
+ LONG lZ;
+ LONG lRx;
+ LONG lRy;
+ LONG lRz;
+ LONG rglSlider[2];
+ DWORD rgdwPOV[4];
+ BYTE rgbButtons[128];
+ LONG lVX; /* 'v' as in velocity */
+ LONG lVY;
+ LONG lVZ;
+ LONG lVRx;
+ LONG lVRy;
+ LONG lVRz;
+ LONG rglVSlider[2];
+ LONG lAX; /* 'a' as in acceleration */
+ LONG lAY;
+ LONG lAZ;
+ LONG lARx;
+ LONG lARy;
+ LONG lARz;
+ LONG rglASlider[2];
+ LONG lFX; /* 'f' as in force */
+ LONG lFY;
+ LONG lFZ;
+ LONG lFRx; /* 'fr' as in rotational force aka torque */
+ LONG lFRy;
+ LONG lFRz;
+ LONG rglFSlider[2];
+} DIJOYSTATE2, *LPDIJOYSTATE2;
+
+#define DIJOFS_X FIELD_OFFSET(DIJOYSTATE, lX)
+#define DIJOFS_Y FIELD_OFFSET(DIJOYSTATE, lY)
+#define DIJOFS_Z FIELD_OFFSET(DIJOYSTATE, lZ)
+#define DIJOFS_RX FIELD_OFFSET(DIJOYSTATE, lRx)
+#define DIJOFS_RY FIELD_OFFSET(DIJOYSTATE, lRy)
+#define DIJOFS_RZ FIELD_OFFSET(DIJOYSTATE, lRz)
+#define DIJOFS_SLIDER(n) (FIELD_OFFSET(DIJOYSTATE, rglSlider) + \
+ (n) * sizeof(LONG))
+#define DIJOFS_POV(n) (FIELD_OFFSET(DIJOYSTATE, rgdwPOV) + \
+ (n) * sizeof(DWORD))
+#define DIJOFS_BUTTON(n) (FIELD_OFFSET(DIJOYSTATE, rgbButtons) + (n))
+#define DIJOFS_BUTTON0 DIJOFS_BUTTON(0)
+#define DIJOFS_BUTTON1 DIJOFS_BUTTON(1)
+#define DIJOFS_BUTTON2 DIJOFS_BUTTON(2)
+#define DIJOFS_BUTTON3 DIJOFS_BUTTON(3)
+#define DIJOFS_BUTTON4 DIJOFS_BUTTON(4)
+#define DIJOFS_BUTTON5 DIJOFS_BUTTON(5)
+#define DIJOFS_BUTTON6 DIJOFS_BUTTON(6)
+#define DIJOFS_BUTTON7 DIJOFS_BUTTON(7)
+#define DIJOFS_BUTTON8 DIJOFS_BUTTON(8)
+#define DIJOFS_BUTTON9 DIJOFS_BUTTON(9)
+#define DIJOFS_BUTTON10 DIJOFS_BUTTON(10)
+#define DIJOFS_BUTTON11 DIJOFS_BUTTON(11)
+#define DIJOFS_BUTTON12 DIJOFS_BUTTON(12)
+#define DIJOFS_BUTTON13 DIJOFS_BUTTON(13)
+#define DIJOFS_BUTTON14 DIJOFS_BUTTON(14)
+#define DIJOFS_BUTTON15 DIJOFS_BUTTON(15)
+#define DIJOFS_BUTTON16 DIJOFS_BUTTON(16)
+#define DIJOFS_BUTTON17 DIJOFS_BUTTON(17)
+#define DIJOFS_BUTTON18 DIJOFS_BUTTON(18)
+#define DIJOFS_BUTTON19 DIJOFS_BUTTON(19)
+#define DIJOFS_BUTTON20 DIJOFS_BUTTON(20)
+#define DIJOFS_BUTTON21 DIJOFS_BUTTON(21)
+#define DIJOFS_BUTTON22 DIJOFS_BUTTON(22)
+#define DIJOFS_BUTTON23 DIJOFS_BUTTON(23)
+#define DIJOFS_BUTTON24 DIJOFS_BUTTON(24)
+#define DIJOFS_BUTTON25 DIJOFS_BUTTON(25)
+#define DIJOFS_BUTTON26 DIJOFS_BUTTON(26)
+#define DIJOFS_BUTTON27 DIJOFS_BUTTON(27)
+#define DIJOFS_BUTTON28 DIJOFS_BUTTON(28)
+#define DIJOFS_BUTTON29 DIJOFS_BUTTON(29)
+#define DIJOFS_BUTTON30 DIJOFS_BUTTON(30)
+#define DIJOFS_BUTTON31 DIJOFS_BUTTON(31)
+#endif /* DIRECTINPUT_VERSION >= 0x0500 */
+
+/* DInput 7 structures, types */
+#if(DIRECTINPUT_VERSION >= 0x0700)
+typedef struct DIFILEEFFECT {
+ DWORD dwSize;
+ GUID GuidEffect;
+ LPCDIEFFECT lpDiEffect;
+ CHAR szFriendlyName[MAX_PATH];
+} DIFILEEFFECT, *LPDIFILEEFFECT;
+
+typedef const DIFILEEFFECT *LPCDIFILEEFFECT;
+typedef BOOL (CALLBACK *LPDIENUMEFFECTSINFILECALLBACK)(LPCDIFILEEFFECT , LPVOID);
+#endif /* DIRECTINPUT_VERSION >= 0x0700 */
+
+/* DInput 8 structures and types */
+#if DIRECTINPUT_VERSION >= 0x0800
+typedef struct _DIACTIONA {
+ UINT_PTR uAppData;
+ DWORD dwSemantic;
+ DWORD dwFlags;
+ __GNU_EXTENSION union {
+ LPCSTR lptszActionName;
+ UINT uResIdString;
+ } DUMMYUNIONNAME;
+ GUID guidInstance;
+ DWORD dwObjID;
+ DWORD dwHow;
+} DIACTIONA, *LPDIACTIONA;
+typedef const DIACTIONA *LPCDIACTIONA;
+
+typedef struct _DIACTIONW {
+ UINT_PTR uAppData;
+ DWORD dwSemantic;
+ DWORD dwFlags;
+ __GNU_EXTENSION union {
+ LPCWSTR lptszActionName;
+ UINT uResIdString;
+ } DUMMYUNIONNAME;
+ GUID guidInstance;
+ DWORD dwObjID;
+ DWORD dwHow;
+} DIACTIONW, *LPDIACTIONW;
+typedef const DIACTIONW *LPCDIACTIONW;
+
+DECL_WINELIB_TYPE_AW(DIACTION)
+DECL_WINELIB_TYPE_AW(LPDIACTION)
+DECL_WINELIB_TYPE_AW(LPCDIACTION)
+
+#define DIA_FORCEFEEDBACK 0x00000001
+#define DIA_APPMAPPED 0x00000002
+#define DIA_APPNOMAP 0x00000004
+#define DIA_NORANGE 0x00000008
+#define DIA_APPFIXED 0x00000010
+
+#define DIAH_UNMAPPED 0x00000000
+#define DIAH_USERCONFIG 0x00000001
+#define DIAH_APPREQUESTED 0x00000002
+#define DIAH_HWAPP 0x00000004
+#define DIAH_HWDEFAULT 0x00000008
+#define DIAH_DEFAULT 0x00000020
+#define DIAH_ERROR 0x80000000
+
+typedef struct _DIACTIONFORMATA {
+ DWORD dwSize;
+ DWORD dwActionSize;
+ DWORD dwDataSize;
+ DWORD dwNumActions;
+ LPDIACTIONA rgoAction;
+ GUID guidActionMap;
+ DWORD dwGenre;
+ DWORD dwBufferSize;
+ LONG lAxisMin;
+ LONG lAxisMax;
+ HINSTANCE hInstString;
+ FILETIME ftTimeStamp;
+ DWORD dwCRC;
+ CHAR tszActionMap[MAX_PATH];
+} DIACTIONFORMATA, *LPDIACTIONFORMATA;
+typedef const DIACTIONFORMATA *LPCDIACTIONFORMATA;
+
+typedef struct _DIACTIONFORMATW {
+ DWORD dwSize;
+ DWORD dwActionSize;
+ DWORD dwDataSize;
+ DWORD dwNumActions;
+ LPDIACTIONW rgoAction;
+ GUID guidActionMap;
+ DWORD dwGenre;
+ DWORD dwBufferSize;
+ LONG lAxisMin;
+ LONG lAxisMax;
+ HINSTANCE hInstString;
+ FILETIME ftTimeStamp;
+ DWORD dwCRC;
+ WCHAR tszActionMap[MAX_PATH];
+} DIACTIONFORMATW, *LPDIACTIONFORMATW;
+typedef const DIACTIONFORMATW *LPCDIACTIONFORMATW;
+
+DECL_WINELIB_TYPE_AW(DIACTIONFORMAT)
+DECL_WINELIB_TYPE_AW(LPDIACTIONFORMAT)
+DECL_WINELIB_TYPE_AW(LPCDIACTIONFORMAT)
+
+#define DIAFTS_NEWDEVICELOW 0xFFFFFFFF
+#define DIAFTS_NEWDEVICEHIGH 0xFFFFFFFF
+#define DIAFTS_UNUSEDDEVICELOW 0x00000000
+#define DIAFTS_UNUSEDDEVICEHIGH 0x00000000
+
+#define DIDBAM_DEFAULT 0x00000000
+#define DIDBAM_PRESERVE 0x00000001
+#define DIDBAM_INITIALIZE 0x00000002
+#define DIDBAM_HWDEFAULTS 0x00000004
+
+#define DIDSAM_DEFAULT 0x00000000
+#define DIDSAM_NOUSER 0x00000001
+#define DIDSAM_FORCESAVE 0x00000002
+
+#define DICD_DEFAULT 0x00000000
+#define DICD_EDIT 0x00000001
+
+#ifndef D3DCOLOR_DEFINED
+typedef DWORD D3DCOLOR;
+#define D3DCOLOR_DEFINED
+#endif
+
+typedef struct _DICOLORSET {
+ DWORD dwSize;
+ D3DCOLOR cTextFore;
+ D3DCOLOR cTextHighlight;
+ D3DCOLOR cCalloutLine;
+ D3DCOLOR cCalloutHighlight;
+ D3DCOLOR cBorder;
+ D3DCOLOR cControlFill;
+ D3DCOLOR cHighlightFill;
+ D3DCOLOR cAreaFill;
+} DICOLORSET, *LPDICOLORSET;
+typedef const DICOLORSET *LPCDICOLORSET;
+
+typedef struct _DICONFIGUREDEVICESPARAMSA {
+ DWORD dwSize;
+ DWORD dwcUsers;
+ LPSTR lptszUserNames;
+ DWORD dwcFormats;
+ LPDIACTIONFORMATA lprgFormats;
+ HWND hwnd;
+ DICOLORSET dics;
+ LPUNKNOWN lpUnkDDSTarget;
+} DICONFIGUREDEVICESPARAMSA, *LPDICONFIGUREDEVICESPARAMSA;
+typedef const DICONFIGUREDEVICESPARAMSA *LPCDICONFIGUREDEVICESPARAMSA;
+
+typedef struct _DICONFIGUREDEVICESPARAMSW {
+ DWORD dwSize;
+ DWORD dwcUsers;
+ LPWSTR lptszUserNames;
+ DWORD dwcFormats;
+ LPDIACTIONFORMATW lprgFormats;
+ HWND hwnd;
+ DICOLORSET dics;
+ LPUNKNOWN lpUnkDDSTarget;
+} DICONFIGUREDEVICESPARAMSW, *LPDICONFIGUREDEVICESPARAMSW;
+typedef const DICONFIGUREDEVICESPARAMSW *LPCDICONFIGUREDEVICESPARAMSW;
+
+DECL_WINELIB_TYPE_AW(DICONFIGUREDEVICESPARAMS)
+DECL_WINELIB_TYPE_AW(LPDICONFIGUREDEVICESPARAMS)
+DECL_WINELIB_TYPE_AW(LPCDICONFIGUREDEVICESPARAMS)
+
+#define DIDIFT_CONFIGURATION 0x00000001
+#define DIDIFT_OVERLAY 0x00000002
+
+#define DIDAL_CENTERED 0x00000000
+#define DIDAL_LEFTALIGNED 0x00000001
+#define DIDAL_RIGHTALIGNED 0x00000002
+#define DIDAL_MIDDLE 0x00000000
+#define DIDAL_TOPALIGNED 0x00000004
+#define DIDAL_BOTTOMALIGNED 0x00000008
+
+typedef struct _DIDEVICEIMAGEINFOA {
+ CHAR tszImagePath[MAX_PATH];
+ DWORD dwFlags;
+ DWORD dwViewID;
+ RECT rcOverlay;
+ DWORD dwObjID;
+ DWORD dwcValidPts;
+ POINT rgptCalloutLine[5];
+ RECT rcCalloutRect;
+ DWORD dwTextAlign;
+} DIDEVICEIMAGEINFOA, *LPDIDEVICEIMAGEINFOA;
+typedef const DIDEVICEIMAGEINFOA *LPCDIDEVICEIMAGEINFOA;
+
+typedef struct _DIDEVICEIMAGEINFOW {
+ WCHAR tszImagePath[MAX_PATH];
+ DWORD dwFlags;
+ DWORD dwViewID;
+ RECT rcOverlay;
+ DWORD dwObjID;
+ DWORD dwcValidPts;
+ POINT rgptCalloutLine[5];
+ RECT rcCalloutRect;
+ DWORD dwTextAlign;
+} DIDEVICEIMAGEINFOW, *LPDIDEVICEIMAGEINFOW;
+typedef const DIDEVICEIMAGEINFOW *LPCDIDEVICEIMAGEINFOW;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEIMAGEINFO)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEIMAGEINFO)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEIMAGEINFO)
+
+typedef struct _DIDEVICEIMAGEINFOHEADERA {
+ DWORD dwSize;
+ DWORD dwSizeImageInfo;
+ DWORD dwcViews;
+ DWORD dwcButtons;
+ DWORD dwcAxes;
+ DWORD dwcPOVs;
+ DWORD dwBufferSize;
+ DWORD dwBufferUsed;
+ LPDIDEVICEIMAGEINFOA lprgImageInfoArray;
+} DIDEVICEIMAGEINFOHEADERA, *LPDIDEVICEIMAGEINFOHEADERA;
+typedef const DIDEVICEIMAGEINFOHEADERA *LPCDIDEVICEIMAGEINFOHEADERA;
+
+typedef struct _DIDEVICEIMAGEINFOHEADERW {
+ DWORD dwSize;
+ DWORD dwSizeImageInfo;
+ DWORD dwcViews;
+ DWORD dwcButtons;
+ DWORD dwcAxes;
+ DWORD dwcPOVs;
+ DWORD dwBufferSize;
+ DWORD dwBufferUsed;
+ LPDIDEVICEIMAGEINFOW lprgImageInfoArray;
+} DIDEVICEIMAGEINFOHEADERW, *LPDIDEVICEIMAGEINFOHEADERW;
+typedef const DIDEVICEIMAGEINFOHEADERW *LPCDIDEVICEIMAGEINFOHEADERW;
+
+DECL_WINELIB_TYPE_AW(DIDEVICEIMAGEINFOHEADER)
+DECL_WINELIB_TYPE_AW(LPDIDEVICEIMAGEINFOHEADER)
+DECL_WINELIB_TYPE_AW(LPCDIDEVICEIMAGEINFOHEADER)
+
+#endif /* DI8 */
+
+
+/*****************************************************************************
+ * IDirectInputEffect interface
+ */
+#if (DIRECTINPUT_VERSION >= 0x0500)
+#undef INTERFACE
+#define INTERFACE IDirectInputEffect
+DECLARE_INTERFACE_(IDirectInputEffect,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputEffect methods ***/
+ STDMETHOD(Initialize)(THIS_ HINSTANCE, DWORD, REFGUID) PURE;
+ STDMETHOD(GetEffectGuid)(THIS_ LPGUID) PURE;
+ STDMETHOD(GetParameters)(THIS_ LPDIEFFECT, DWORD) PURE;
+ STDMETHOD(SetParameters)(THIS_ LPCDIEFFECT, DWORD) PURE;
+ STDMETHOD(Start)(THIS_ DWORD, DWORD) PURE;
+ STDMETHOD(Stop)(THIS) PURE;
+ STDMETHOD(GetEffectStatus)(THIS_ LPDWORD) PURE;
+ STDMETHOD(Download)(THIS) PURE;
+ STDMETHOD(Unload)(THIS) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInputEffect_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInputEffect_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInputEffect_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInputEffect methods ***/
+#define IDirectInputEffect_Initialize(p,a,b,c) (p)->lpVtbl->Initialize(p,a,b,c)
+#define IDirectInputEffect_GetEffectGuid(p,a) (p)->lpVtbl->GetEffectGuid(p,a)
+#define IDirectInputEffect_GetParameters(p,a,b) (p)->lpVtbl->GetParameters(p,a,b)
+#define IDirectInputEffect_SetParameters(p,a,b) (p)->lpVtbl->SetParameters(p,a,b)
+#define IDirectInputEffect_Start(p,a,b) (p)->lpVtbl->Start(p,a,b)
+#define IDirectInputEffect_Stop(p) (p)->lpVtbl->Stop(p)
+#define IDirectInputEffect_GetEffectStatus(p,a) (p)->lpVtbl->GetEffectStatus(p,a)
+#define IDirectInputEffect_Download(p) (p)->lpVtbl->Download(p)
+#define IDirectInputEffect_Unload(p) (p)->lpVtbl->Unload(p)
+#define IDirectInputEffect_Escape(p,a) (p)->lpVtbl->Escape(p,a)
+#else
+/*** IUnknown methods ***/
+#define IDirectInputEffect_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInputEffect_AddRef(p) (p)->AddRef()
+#define IDirectInputEffect_Release(p) (p)->Release()
+/*** IDirectInputEffect methods ***/
+#define IDirectInputEffect_Initialize(p,a,b,c) (p)->Initialize(a,b,c)
+#define IDirectInputEffect_GetEffectGuid(p,a) (p)->GetEffectGuid(a)
+#define IDirectInputEffect_GetParameters(p,a,b) (p)->GetParameters(a,b)
+#define IDirectInputEffect_SetParameters(p,a,b) (p)->SetParameters(a,b)
+#define IDirectInputEffect_Start(p,a,b) (p)->Start(a,b)
+#define IDirectInputEffect_Stop(p) (p)->Stop()
+#define IDirectInputEffect_GetEffectStatus(p,a) (p)->GetEffectStatus(a)
+#define IDirectInputEffect_Download(p) (p)->Download()
+#define IDirectInputEffect_Unload(p) (p)->Unload()
+#define IDirectInputEffect_Escape(p,a) (p)->Escape(a)
+#endif
+
+#endif /* DI5 */
+
+
+/*****************************************************************************
+ * IDirectInputDeviceA interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDeviceA
+DECLARE_INTERFACE_(IDirectInputDeviceA,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceA methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEA pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEA pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInputDeviceW interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDeviceW
+DECLARE_INTERFACE_(IDirectInputDeviceW,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceW methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEW pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEW pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInputDevice_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInputDevice_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInputDevice_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice_GetCapabilities(p,a) (p)->lpVtbl->GetCapabilities(p,a)
+#define IDirectInputDevice_EnumObjects(p,a,b,c) (p)->lpVtbl->EnumObjects(p,a,b,c)
+#define IDirectInputDevice_GetProperty(p,a,b) (p)->lpVtbl->GetProperty(p,a,b)
+#define IDirectInputDevice_SetProperty(p,a,b) (p)->lpVtbl->SetProperty(p,a,b)
+#define IDirectInputDevice_Acquire(p) (p)->lpVtbl->Acquire(p)
+#define IDirectInputDevice_Unacquire(p) (p)->lpVtbl->Unacquire(p)
+#define IDirectInputDevice_GetDeviceState(p,a,b) (p)->lpVtbl->GetDeviceState(p,a,b)
+#define IDirectInputDevice_GetDeviceData(p,a,b,c,d) (p)->lpVtbl->GetDeviceData(p,a,b,c,d)
+#define IDirectInputDevice_SetDataFormat(p,a) (p)->lpVtbl->SetDataFormat(p,a)
+#define IDirectInputDevice_SetEventNotification(p,a) (p)->lpVtbl->SetEventNotification(p,a)
+#define IDirectInputDevice_SetCooperativeLevel(p,a,b) (p)->lpVtbl->SetCooperativeLevel(p,a,b)
+#define IDirectInputDevice_GetObjectInfo(p,a,b,c) (p)->lpVtbl->GetObjectInfo(p,a,b,c)
+#define IDirectInputDevice_GetDeviceInfo(p,a) (p)->lpVtbl->GetDeviceInfo(p,a)
+#define IDirectInputDevice_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInputDevice_Initialize(p,a,b,c) (p)->lpVtbl->Initialize(p,a,b,c)
+#else
+/*** IUnknown methods ***/
+#define IDirectInputDevice_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInputDevice_AddRef(p) (p)->AddRef()
+#define IDirectInputDevice_Release(p) (p)->Release()
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice_GetCapabilities(p,a) (p)->GetCapabilities(a)
+#define IDirectInputDevice_EnumObjects(p,a,b,c) (p)->EnumObjects(a,b,c)
+#define IDirectInputDevice_GetProperty(p,a,b) (p)->GetProperty(a,b)
+#define IDirectInputDevice_SetProperty(p,a,b) (p)->SetProperty(a,b)
+#define IDirectInputDevice_Acquire(p) (p)->Acquire()
+#define IDirectInputDevice_Unacquire(p) (p)->Unacquire()
+#define IDirectInputDevice_GetDeviceState(p,a,b) (p)->GetDeviceState(a,b)
+#define IDirectInputDevice_GetDeviceData(p,a,b,c,d) (p)->GetDeviceData(a,b,c,d)
+#define IDirectInputDevice_SetDataFormat(p,a) (p)->SetDataFormat(a)
+#define IDirectInputDevice_SetEventNotification(p,a) (p)->SetEventNotification(a)
+#define IDirectInputDevice_SetCooperativeLevel(p,a,b) (p)->SetCooperativeLevel(a,b)
+#define IDirectInputDevice_GetObjectInfo(p,a,b,c) (p)->GetObjectInfo(a,b,c)
+#define IDirectInputDevice_GetDeviceInfo(p,a) (p)->GetDeviceInfo(a)
+#define IDirectInputDevice_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInputDevice_Initialize(p,a,b,c) (p)->Initialize(a,b,c)
+#endif
+
+
+#if (DIRECTINPUT_VERSION >= 0x0500)
+/*****************************************************************************
+ * IDirectInputDevice2A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice2A
+DECLARE_INTERFACE_(IDirectInputDevice2A,IDirectInputDeviceA)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceA methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEA pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEA pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2A methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOA pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInputDevice2W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice2W
+DECLARE_INTERFACE_(IDirectInputDevice2W,IDirectInputDeviceW)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceW methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEW pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEW pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2W methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOW pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInputDevice2_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInputDevice2_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInputDevice2_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice2_GetCapabilities(p,a) (p)->lpVtbl->GetCapabilities(p,a)
+#define IDirectInputDevice2_EnumObjects(p,a,b,c) (p)->lpVtbl->EnumObjects(p,a,b,c)
+#define IDirectInputDevice2_GetProperty(p,a,b) (p)->lpVtbl->GetProperty(p,a,b)
+#define IDirectInputDevice2_SetProperty(p,a,b) (p)->lpVtbl->SetProperty(p,a,b)
+#define IDirectInputDevice2_Acquire(p) (p)->lpVtbl->Acquire(p)
+#define IDirectInputDevice2_Unacquire(p) (p)->lpVtbl->Unacquire(p)
+#define IDirectInputDevice2_GetDeviceState(p,a,b) (p)->lpVtbl->GetDeviceState(p,a,b)
+#define IDirectInputDevice2_GetDeviceData(p,a,b,c,d) (p)->lpVtbl->GetDeviceData(p,a,b,c,d)
+#define IDirectInputDevice2_SetDataFormat(p,a) (p)->lpVtbl->SetDataFormat(p,a)
+#define IDirectInputDevice2_SetEventNotification(p,a) (p)->lpVtbl->SetEventNotification(p,a)
+#define IDirectInputDevice2_SetCooperativeLevel(p,a,b) (p)->lpVtbl->SetCooperativeLevel(p,a,b)
+#define IDirectInputDevice2_GetObjectInfo(p,a,b,c) (p)->lpVtbl->GetObjectInfo(p,a,b,c)
+#define IDirectInputDevice2_GetDeviceInfo(p,a) (p)->lpVtbl->GetDeviceInfo(p,a)
+#define IDirectInputDevice2_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInputDevice2_Initialize(p,a,b,c) (p)->lpVtbl->Initialize(p,a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice2_CreateEffect(p,a,b,c,d) (p)->lpVtbl->CreateEffect(p,a,b,c,d)
+#define IDirectInputDevice2_EnumEffects(p,a,b,c) (p)->lpVtbl->EnumEffects(p,a,b,c)
+#define IDirectInputDevice2_GetEffectInfo(p,a,b) (p)->lpVtbl->GetEffectInfo(p,a,b)
+#define IDirectInputDevice2_GetForceFeedbackState(p,a) (p)->lpVtbl->GetForceFeedbackState(p,a)
+#define IDirectInputDevice2_SendForceFeedbackCommand(p,a) (p)->lpVtbl->SendForceFeedbackCommand(p,a)
+#define IDirectInputDevice2_EnumCreatedEffectObjects(p,a,b,c) (p)->lpVtbl->EnumCreatedEffectObjects(p,a,b,c)
+#define IDirectInputDevice2_Escape(p,a) (p)->lpVtbl->Escape(p,a)
+#define IDirectInputDevice2_Poll(p) (p)->lpVtbl->Poll(p)
+#define IDirectInputDevice2_SendDeviceData(p,a,b,c,d) (p)->lpVtbl->SendDeviceData(p,a,b,c,d)
+#else
+/*** IUnknown methods ***/
+#define IDirectInputDevice2_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInputDevice2_AddRef(p) (p)->AddRef()
+#define IDirectInputDevice2_Release(p) (p)->Release()
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice2_GetCapabilities(p,a) (p)->GetCapabilities(a)
+#define IDirectInputDevice2_EnumObjects(p,a,b,c) (p)->EnumObjects(a,b,c)
+#define IDirectInputDevice2_GetProperty(p,a,b) (p)->GetProperty(a,b)
+#define IDirectInputDevice2_SetProperty(p,a,b) (p)->SetProperty(a,b)
+#define IDirectInputDevice2_Acquire(p) (p)->Acquire()
+#define IDirectInputDevice2_Unacquire(p) (p)->Unacquire()
+#define IDirectInputDevice2_GetDeviceState(p,a,b) (p)->GetDeviceState(a,b)
+#define IDirectInputDevice2_GetDeviceData(p,a,b,c,d) (p)->GetDeviceData(a,b,c,d)
+#define IDirectInputDevice2_SetDataFormat(p,a) (p)->SetDataFormat(a)
+#define IDirectInputDevice2_SetEventNotification(p,a) (p)->SetEventNotification(a)
+#define IDirectInputDevice2_SetCooperativeLevel(p,a,b) (p)->SetCooperativeLevel(a,b)
+#define IDirectInputDevice2_GetObjectInfo(p,a,b,c) (p)->GetObjectInfo(a,b,c)
+#define IDirectInputDevice2_GetDeviceInfo(p,a) (p)->GetDeviceInfo(a)
+#define IDirectInputDevice2_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInputDevice2_Initialize(p,a,b,c) (p)->Initialize(a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice2_CreateEffect(p,a,b,c,d) (p)->CreateEffect(a,b,c,d)
+#define IDirectInputDevice2_EnumEffects(p,a,b,c) (p)->EnumEffects(a,b,c)
+#define IDirectInputDevice2_GetEffectInfo(p,a,b) (p)->GetEffectInfo(a,b)
+#define IDirectInputDevice2_GetForceFeedbackState(p,a) (p)->GetForceFeedbackState(a)
+#define IDirectInputDevice2_SendForceFeedbackCommand(p,a) (p)->SendForceFeedbackCommand(a)
+#define IDirectInputDevice2_EnumCreatedEffectObjects(p,a,b,c) (p)->EnumCreatedEffectObjects(a,b,c)
+#define IDirectInputDevice2_Escape(p,a) (p)->Escape(a)
+#define IDirectInputDevice2_Poll(p) (p)->Poll()
+#define IDirectInputDevice2_SendDeviceData(p,a,b,c,d) (p)->SendDeviceData(a,b,c,d)
+#endif
+#endif /* DI5 */
+
+#if DIRECTINPUT_VERSION >= 0x0700
+/*****************************************************************************
+ * IDirectInputDevice7A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice7A
+DECLARE_INTERFACE_(IDirectInputDevice7A,IDirectInputDevice2A)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceA methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEA pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEA pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2A methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOA pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+ /*** IDirectInputDevice7A methods ***/
+ STDMETHOD(EnumEffectsInFile)(THIS_ LPCSTR lpszFileName,LPDIENUMEFFECTSINFILECALLBACK pec,LPVOID pvRef,DWORD dwFlags) PURE;
+ STDMETHOD(WriteEffectToFile)(THIS_ LPCSTR lpszFileName,DWORD dwEntries,LPDIFILEEFFECT rgDiFileEft,DWORD dwFlags) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInputDevice7W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice7W
+DECLARE_INTERFACE_(IDirectInputDevice7W,IDirectInputDevice2W)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceW methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEW pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEW pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2W methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOW pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+ /*** IDirectInputDevice7W methods ***/
+ STDMETHOD(EnumEffectsInFile)(THIS_ LPCWSTR lpszFileName,LPDIENUMEFFECTSINFILECALLBACK pec,LPVOID pvRef,DWORD dwFlags) PURE;
+ STDMETHOD(WriteEffectToFile)(THIS_ LPCWSTR lpszFileName,DWORD dwEntries,LPDIFILEEFFECT rgDiFileEft,DWORD dwFlags) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInputDevice7_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInputDevice7_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInputDevice7_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice7_GetCapabilities(p,a) (p)->lpVtbl->GetCapabilities(p,a)
+#define IDirectInputDevice7_EnumObjects(p,a,b,c) (p)->lpVtbl->EnumObjects(p,a,b,c)
+#define IDirectInputDevice7_GetProperty(p,a,b) (p)->lpVtbl->GetProperty(p,a,b)
+#define IDirectInputDevice7_SetProperty(p,a,b) (p)->lpVtbl->SetProperty(p,a,b)
+#define IDirectInputDevice7_Acquire(p) (p)->lpVtbl->Acquire(p)
+#define IDirectInputDevice7_Unacquire(p) (p)->lpVtbl->Unacquire(p)
+#define IDirectInputDevice7_GetDeviceState(p,a,b) (p)->lpVtbl->GetDeviceState(p,a,b)
+#define IDirectInputDevice7_GetDeviceData(p,a,b,c,d) (p)->lpVtbl->GetDeviceData(p,a,b,c,d)
+#define IDirectInputDevice7_SetDataFormat(p,a) (p)->lpVtbl->SetDataFormat(p,a)
+#define IDirectInputDevice7_SetEventNotification(p,a) (p)->lpVtbl->SetEventNotification(p,a)
+#define IDirectInputDevice7_SetCooperativeLevel(p,a,b) (p)->lpVtbl->SetCooperativeLevel(p,a,b)
+#define IDirectInputDevice7_GetObjectInfo(p,a,b,c) (p)->lpVtbl->GetObjectInfo(p,a,b,c)
+#define IDirectInputDevice7_GetDeviceInfo(p,a) (p)->lpVtbl->GetDeviceInfo(p,a)
+#define IDirectInputDevice7_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInputDevice7_Initialize(p,a,b,c) (p)->lpVtbl->Initialize(p,a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice7_CreateEffect(p,a,b,c,d) (p)->lpVtbl->CreateEffect(p,a,b,c,d)
+#define IDirectInputDevice7_EnumEffects(p,a,b,c) (p)->lpVtbl->EnumEffects(p,a,b,c)
+#define IDirectInputDevice7_GetEffectInfo(p,a,b) (p)->lpVtbl->GetEffectInfo(p,a,b)
+#define IDirectInputDevice7_GetForceFeedbackState(p,a) (p)->lpVtbl->GetForceFeedbackState(p,a)
+#define IDirectInputDevice7_SendForceFeedbackCommand(p,a) (p)->lpVtbl->SendForceFeedbackCommand(p,a)
+#define IDirectInputDevice7_EnumCreatedEffectObjects(p,a,b,c) (p)->lpVtbl->EnumCreatedEffectObjects(p,a,b,c)
+#define IDirectInputDevice7_Escape(p,a) (p)->lpVtbl->Escape(p,a)
+#define IDirectInputDevice7_Poll(p) (p)->lpVtbl->Poll(p)
+#define IDirectInputDevice7_SendDeviceData(p,a,b,c,d) (p)->lpVtbl->SendDeviceData(p,a,b,c,d)
+/*** IDirectInputDevice7 methods ***/
+#define IDirectInputDevice7_EnumEffectsInFile(p,a,b,c,d) (p)->lpVtbl->EnumEffectsInFile(p,a,b,c,d)
+#define IDirectInputDevice7_WriteEffectToFile(p,a,b,c,d) (p)->lpVtbl->WriteEffectToFile(p,a,b,c,d)
+#else
+/*** IUnknown methods ***/
+#define IDirectInputDevice7_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInputDevice7_AddRef(p) (p)->AddRef()
+#define IDirectInputDevice7_Release(p) (p)->Release()
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice7_GetCapabilities(p,a) (p)->GetCapabilities(a)
+#define IDirectInputDevice7_EnumObjects(p,a,b,c) (p)->EnumObjects(a,b,c)
+#define IDirectInputDevice7_GetProperty(p,a,b) (p)->GetProperty(a,b)
+#define IDirectInputDevice7_SetProperty(p,a,b) (p)->SetProperty(a,b)
+#define IDirectInputDevice7_Acquire(p) (p)->Acquire()
+#define IDirectInputDevice7_Unacquire(p) (p)->Unacquire()
+#define IDirectInputDevice7_GetDeviceState(p,a,b) (p)->GetDeviceState(a,b)
+#define IDirectInputDevice7_GetDeviceData(p,a,b,c,d) (p)->GetDeviceData(a,b,c,d)
+#define IDirectInputDevice7_SetDataFormat(p,a) (p)->SetDataFormat(a)
+#define IDirectInputDevice7_SetEventNotification(p,a) (p)->SetEventNotification(a)
+#define IDirectInputDevice7_SetCooperativeLevel(p,a,b) (p)->SetCooperativeLevel(a,b)
+#define IDirectInputDevice7_GetObjectInfo(p,a,b,c) (p)->GetObjectInfo(a,b,c)
+#define IDirectInputDevice7_GetDeviceInfo(p,a) (p)->GetDeviceInfo(a)
+#define IDirectInputDevice7_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInputDevice7_Initialize(p,a,b,c) (p)->Initialize(a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice7_CreateEffect(p,a,b,c,d) (p)->CreateEffect(a,b,c,d)
+#define IDirectInputDevice7_EnumEffects(p,a,b,c) (p)->EnumEffects(a,b,c)
+#define IDirectInputDevice7_GetEffectInfo(p,a,b) (p)->GetEffectInfo(a,b)
+#define IDirectInputDevice7_GetForceFeedbackState(p,a) (p)->GetForceFeedbackState(a)
+#define IDirectInputDevice7_SendForceFeedbackCommand(p,a) (p)->SendForceFeedbackCommand(a)
+#define IDirectInputDevice7_EnumCreatedEffectObjects(p,a,b,c) (p)->EnumCreatedEffectObjects(a,b,c)
+#define IDirectInputDevice7_Escape(p,a) (p)->Escape(a)
+#define IDirectInputDevice7_Poll(p) (p)->Poll()
+#define IDirectInputDevice7_SendDeviceData(p,a,b,c,d) (p)->SendDeviceData(a,b,c,d)
+/*** IDirectInputDevice7 methods ***/
+#define IDirectInputDevice7_EnumEffectsInFile(p,a,b,c,d) (p)->EnumEffectsInFile(a,b,c,d)
+#define IDirectInputDevice7_WriteEffectToFile(p,a,b,c,d) (p)->WriteEffectToFile(a,b,c,d)
+#endif
+
+#endif /* DI7 */
+
+#if DIRECTINPUT_VERSION >= 0x0800
+/*****************************************************************************
+ * IDirectInputDevice8A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice8A
+DECLARE_INTERFACE_(IDirectInputDevice8A,IDirectInputDevice7A)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceA methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEA pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEA pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2A methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKA lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOA pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+ /*** IDirectInputDevice7A methods ***/
+ STDMETHOD(EnumEffectsInFile)(THIS_ LPCSTR lpszFileName,LPDIENUMEFFECTSINFILECALLBACK pec,LPVOID pvRef,DWORD dwFlags) PURE;
+ STDMETHOD(WriteEffectToFile)(THIS_ LPCSTR lpszFileName,DWORD dwEntries,LPDIFILEEFFECT rgDiFileEft,DWORD dwFlags) PURE;
+ /*** IDirectInputDevice8A methods ***/
+ STDMETHOD(BuildActionMap)(THIS_ LPDIACTIONFORMATA lpdiaf, LPCSTR lpszUserName, DWORD dwFlags) PURE;
+ STDMETHOD(SetActionMap)(THIS_ LPDIACTIONFORMATA lpdiaf, LPCSTR lpszUserName, DWORD dwFlags) PURE;
+ STDMETHOD(GetImageInfo)(THIS_ LPDIDEVICEIMAGEINFOHEADERA lpdiDevImageInfoHeader) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInputDevice8W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputDevice8W
+DECLARE_INTERFACE_(IDirectInputDevice8W,IDirectInputDevice7W)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputDeviceW methods ***/
+ STDMETHOD(GetCapabilities)(THIS_ LPDIDEVCAPS lpDIDevCaps) PURE;
+ STDMETHOD(EnumObjects)(THIS_ LPDIENUMDEVICEOBJECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetProperty)(THIS_ REFGUID rguidProp, LPDIPROPHEADER pdiph) PURE;
+ STDMETHOD(SetProperty)(THIS_ REFGUID rguidProp, LPCDIPROPHEADER pdiph) PURE;
+ STDMETHOD(Acquire)(THIS) PURE;
+ STDMETHOD(Unacquire)(THIS) PURE;
+ STDMETHOD(GetDeviceState)(THIS_ DWORD cbData, LPVOID lpvData) PURE;
+ STDMETHOD(GetDeviceData)(THIS_ DWORD cbObjectData, LPDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD dwFlags) PURE;
+ STDMETHOD(SetDataFormat)(THIS_ LPCDIDATAFORMAT lpdf) PURE;
+ STDMETHOD(SetEventNotification)(THIS_ HANDLE hEvent) PURE;
+ STDMETHOD(SetCooperativeLevel)(THIS_ HWND hwnd, DWORD dwFlags) PURE;
+ STDMETHOD(GetObjectInfo)(THIS_ LPDIDEVICEOBJECTINSTANCEW pdidoi, DWORD dwObj, DWORD dwHow) PURE;
+ STDMETHOD(GetDeviceInfo)(THIS_ LPDIDEVICEINSTANCEW pdidi) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion, REFGUID rguid) PURE;
+ /*** IDirectInputDevice2W methods ***/
+ STDMETHOD(CreateEffect)(THIS_ REFGUID rguid, LPCDIEFFECT lpeff, LPDIRECTINPUTEFFECT *ppdeff, LPUNKNOWN punkOuter) PURE;
+ STDMETHOD(EnumEffects)(THIS_ LPDIENUMEFFECTSCALLBACKW lpCallback, LPVOID pvRef, DWORD dwEffType) PURE;
+ STDMETHOD(GetEffectInfo)(THIS_ LPDIEFFECTINFOW pdei, REFGUID rguid) PURE;
+ STDMETHOD(GetForceFeedbackState)(THIS_ LPDWORD pdwOut) PURE;
+ STDMETHOD(SendForceFeedbackCommand)(THIS_ DWORD dwFlags) PURE;
+ STDMETHOD(EnumCreatedEffectObjects)(THIS_ LPDIENUMCREATEDEFFECTOBJECTSCALLBACK lpCallback, LPVOID pvRef, DWORD fl) PURE;
+ STDMETHOD(Escape)(THIS_ LPDIEFFESCAPE pesc) PURE;
+ STDMETHOD(Poll)(THIS) PURE;
+ STDMETHOD(SendDeviceData)(THIS_ DWORD cbObjectData, LPCDIDEVICEOBJECTDATA rgdod, LPDWORD pdwInOut, DWORD fl) PURE;
+ /*** IDirectInputDevice7W methods ***/
+ STDMETHOD(EnumEffectsInFile)(THIS_ LPCWSTR lpszFileName,LPDIENUMEFFECTSINFILECALLBACK pec,LPVOID pvRef,DWORD dwFlags) PURE;
+ STDMETHOD(WriteEffectToFile)(THIS_ LPCWSTR lpszFileName,DWORD dwEntries,LPDIFILEEFFECT rgDiFileEft,DWORD dwFlags) PURE;
+ /*** IDirectInputDevice8W methods ***/
+ STDMETHOD(BuildActionMap)(THIS_ LPDIACTIONFORMATW lpdiaf, LPCWSTR lpszUserName, DWORD dwFlags) PURE;
+ STDMETHOD(SetActionMap)(THIS_ LPDIACTIONFORMATW lpdiaf, LPCWSTR lpszUserName, DWORD dwFlags) PURE;
+ STDMETHOD(GetImageInfo)(THIS_ LPDIDEVICEIMAGEINFOHEADERW lpdiDevImageInfoHeader) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInputDevice8_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInputDevice8_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInputDevice8_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice8_GetCapabilities(p,a) (p)->lpVtbl->GetCapabilities(p,a)
+#define IDirectInputDevice8_EnumObjects(p,a,b,c) (p)->lpVtbl->EnumObjects(p,a,b,c)
+#define IDirectInputDevice8_GetProperty(p,a,b) (p)->lpVtbl->GetProperty(p,a,b)
+#define IDirectInputDevice8_SetProperty(p,a,b) (p)->lpVtbl->SetProperty(p,a,b)
+#define IDirectInputDevice8_Acquire(p) (p)->lpVtbl->Acquire(p)
+#define IDirectInputDevice8_Unacquire(p) (p)->lpVtbl->Unacquire(p)
+#define IDirectInputDevice8_GetDeviceState(p,a,b) (p)->lpVtbl->GetDeviceState(p,a,b)
+#define IDirectInputDevice8_GetDeviceData(p,a,b,c,d) (p)->lpVtbl->GetDeviceData(p,a,b,c,d)
+#define IDirectInputDevice8_SetDataFormat(p,a) (p)->lpVtbl->SetDataFormat(p,a)
+#define IDirectInputDevice8_SetEventNotification(p,a) (p)->lpVtbl->SetEventNotification(p,a)
+#define IDirectInputDevice8_SetCooperativeLevel(p,a,b) (p)->lpVtbl->SetCooperativeLevel(p,a,b)
+#define IDirectInputDevice8_GetObjectInfo(p,a,b,c) (p)->lpVtbl->GetObjectInfo(p,a,b,c)
+#define IDirectInputDevice8_GetDeviceInfo(p,a) (p)->lpVtbl->GetDeviceInfo(p,a)
+#define IDirectInputDevice8_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInputDevice8_Initialize(p,a,b,c) (p)->lpVtbl->Initialize(p,a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice8_CreateEffect(p,a,b,c,d) (p)->lpVtbl->CreateEffect(p,a,b,c,d)
+#define IDirectInputDevice8_EnumEffects(p,a,b,c) (p)->lpVtbl->EnumEffects(p,a,b,c)
+#define IDirectInputDevice8_GetEffectInfo(p,a,b) (p)->lpVtbl->GetEffectInfo(p,a,b)
+#define IDirectInputDevice8_GetForceFeedbackState(p,a) (p)->lpVtbl->GetForceFeedbackState(p,a)
+#define IDirectInputDevice8_SendForceFeedbackCommand(p,a) (p)->lpVtbl->SendForceFeedbackCommand(p,a)
+#define IDirectInputDevice8_EnumCreatedEffectObjects(p,a,b,c) (p)->lpVtbl->EnumCreatedEffectObjects(p,a,b,c)
+#define IDirectInputDevice8_Escape(p,a) (p)->lpVtbl->Escape(p,a)
+#define IDirectInputDevice8_Poll(p) (p)->lpVtbl->Poll(p)
+#define IDirectInputDevice8_SendDeviceData(p,a,b,c,d) (p)->lpVtbl->SendDeviceData(p,a,b,c,d)
+/*** IDirectInputDevice7 methods ***/
+#define IDirectInputDevice8_EnumEffectsInFile(p,a,b,c,d) (p)->lpVtbl->EnumEffectsInFile(p,a,b,c,d)
+#define IDirectInputDevice8_WriteEffectToFile(p,a,b,c,d) (p)->lpVtbl->WriteEffectToFile(p,a,b,c,d)
+/*** IDirectInputDevice8 methods ***/
+#define IDirectInputDevice8_BuildActionMap(p,a,b,c) (p)->lpVtbl->BuildActionMap(p,a,b,c)
+#define IDirectInputDevice8_SetActionMap(p,a,b,c) (p)->lpVtbl->SetActionMap(p,a,b,c)
+#define IDirectInputDevice8_GetImageInfo(p,a) (p)->lpVtbl->GetImageInfo(p,a)
+#else
+/*** IUnknown methods ***/
+#define IDirectInputDevice8_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInputDevice8_AddRef(p) (p)->AddRef()
+#define IDirectInputDevice8_Release(p) (p)->Release()
+/*** IDirectInputDevice methods ***/
+#define IDirectInputDevice8_GetCapabilities(p,a) (p)->GetCapabilities(a)
+#define IDirectInputDevice8_EnumObjects(p,a,b,c) (p)->EnumObjects(a,b,c)
+#define IDirectInputDevice8_GetProperty(p,a,b) (p)->GetProperty(a,b)
+#define IDirectInputDevice8_SetProperty(p,a,b) (p)->SetProperty(a,b)
+#define IDirectInputDevice8_Acquire(p) (p)->Acquire()
+#define IDirectInputDevice8_Unacquire(p) (p)->Unacquire()
+#define IDirectInputDevice8_GetDeviceState(p,a,b) (p)->GetDeviceState(a,b)
+#define IDirectInputDevice8_GetDeviceData(p,a,b,c,d) (p)->GetDeviceData(a,b,c,d)
+#define IDirectInputDevice8_SetDataFormat(p,a) (p)->SetDataFormat(a)
+#define IDirectInputDevice8_SetEventNotification(p,a) (p)->SetEventNotification(a)
+#define IDirectInputDevice8_SetCooperativeLevel(p,a,b) (p)->SetCooperativeLevel(a,b)
+#define IDirectInputDevice8_GetObjectInfo(p,a,b,c) (p)->GetObjectInfo(a,b,c)
+#define IDirectInputDevice8_GetDeviceInfo(p,a) (p)->GetDeviceInfo(a)
+#define IDirectInputDevice8_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInputDevice8_Initialize(p,a,b,c) (p)->Initialize(a,b,c)
+/*** IDirectInputDevice2 methods ***/
+#define IDirectInputDevice8_CreateEffect(p,a,b,c,d) (p)->CreateEffect(a,b,c,d)
+#define IDirectInputDevice8_EnumEffects(p,a,b,c) (p)->EnumEffects(a,b,c)
+#define IDirectInputDevice8_GetEffectInfo(p,a,b) (p)->GetEffectInfo(a,b)
+#define IDirectInputDevice8_GetForceFeedbackState(p,a) (p)->GetForceFeedbackState(a)
+#define IDirectInputDevice8_SendForceFeedbackCommand(p,a) (p)->SendForceFeedbackCommand(a)
+#define IDirectInputDevice8_EnumCreatedEffectObjects(p,a,b,c) (p)->EnumCreatedEffectObjects(a,b,c)
+#define IDirectInputDevice8_Escape(p,a) (p)->Escape(a)
+#define IDirectInputDevice8_Poll(p) (p)->Poll()
+#define IDirectInputDevice8_SendDeviceData(p,a,b,c,d) (p)->SendDeviceData(a,b,c,d)
+/*** IDirectInputDevice7 methods ***/
+#define IDirectInputDevice8_EnumEffectsInFile(p,a,b,c,d) (p)->EnumEffectsInFile(a,b,c,d)
+#define IDirectInputDevice8_WriteEffectToFile(p,a,b,c,d) (p)->WriteEffectToFile(a,b,c,d)
+/*** IDirectInputDevice8 methods ***/
+#define IDirectInputDevice8_BuildActionMap(p,a,b,c) (p)->BuildActionMap(a,b,c)
+#define IDirectInputDevice8_SetActionMap(p,a,b,c) (p)->SetActionMap(a,b,c)
+#define IDirectInputDevice8_GetImageInfo(p,a) (p)->GetImageInfo(a)
+#endif
+
+#endif /* DI8 */
+
+/* "Standard" Mouse report... */
+typedef struct DIMOUSESTATE {
+ LONG lX;
+ LONG lY;
+ LONG lZ;
+ BYTE rgbButtons[4];
+} DIMOUSESTATE;
+
+#if DIRECTINPUT_VERSION >= 0x0700
+/* "Standard" Mouse report for DInput 7... */
+typedef struct DIMOUSESTATE2 {
+ LONG lX;
+ LONG lY;
+ LONG lZ;
+ BYTE rgbButtons[8];
+} DIMOUSESTATE2;
+#endif /* DI7 */
+
+#define DIMOFS_X FIELD_OFFSET(DIMOUSESTATE, lX)
+#define DIMOFS_Y FIELD_OFFSET(DIMOUSESTATE, lY)
+#define DIMOFS_Z FIELD_OFFSET(DIMOUSESTATE, lZ)
+#define DIMOFS_BUTTON0 (FIELD_OFFSET(DIMOUSESTATE, rgbButtons) + 0)
+#define DIMOFS_BUTTON1 (FIELD_OFFSET(DIMOUSESTATE, rgbButtons) + 1)
+#define DIMOFS_BUTTON2 (FIELD_OFFSET(DIMOUSESTATE, rgbButtons) + 2)
+#define DIMOFS_BUTTON3 (FIELD_OFFSET(DIMOUSESTATE, rgbButtons) + 3)
+#if DIRECTINPUT_VERSION >= 0x0700
+#define DIMOFS_BUTTON4 (FIELD_OFFSET(DIMOUSESTATE2, rgbButtons) + 4)
+#define DIMOFS_BUTTON5 (FIELD_OFFSET(DIMOUSESTATE2, rgbButtons) + 5)
+#define DIMOFS_BUTTON6 (FIELD_OFFSET(DIMOUSESTATE2, rgbButtons) + 6)
+#define DIMOFS_BUTTON7 (FIELD_OFFSET(DIMOUSESTATE2, rgbButtons) + 7)
+#endif /* DI7 */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern const DIDATAFORMAT c_dfDIMouse;
+#if DIRECTINPUT_VERSION >= 0x0700
+extern const DIDATAFORMAT c_dfDIMouse2; /* DX 7 */
+#endif /* DI7 */
+extern const DIDATAFORMAT c_dfDIKeyboard;
+#if DIRECTINPUT_VERSION >= 0x0500
+extern const DIDATAFORMAT c_dfDIJoystick;
+extern const DIDATAFORMAT c_dfDIJoystick2;
+#endif /* DI5 */
+#ifdef __cplusplus
+};
+#endif
+
+/*****************************************************************************
+ * IDirectInputA interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputA
+DECLARE_INTERFACE_(IDirectInputA,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputA methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEA *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInputW interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInputW
+DECLARE_INTERFACE_(IDirectInputW,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputW methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEW *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInput_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInput_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInput_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInput methods ***/
+#define IDirectInput_CreateDevice(p,a,b,c) (p)->lpVtbl->CreateDevice(p,a,b,c)
+#define IDirectInput_EnumDevices(p,a,b,c,d) (p)->lpVtbl->EnumDevices(p,a,b,c,d)
+#define IDirectInput_GetDeviceStatus(p,a) (p)->lpVtbl->GetDeviceStatus(p,a)
+#define IDirectInput_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInput_Initialize(p,a,b) (p)->lpVtbl->Initialize(p,a,b)
+#else
+/*** IUnknown methods ***/
+#define IDirectInput_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInput_AddRef(p) (p)->AddRef()
+#define IDirectInput_Release(p) (p)->Release()
+/*** IDirectInput methods ***/
+#define IDirectInput_CreateDevice(p,a,b,c) (p)->CreateDevice(a,b,c)
+#define IDirectInput_EnumDevices(p,a,b,c,d) (p)->EnumDevices(a,b,c,d)
+#define IDirectInput_GetDeviceStatus(p,a) (p)->GetDeviceStatus(a)
+#define IDirectInput_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInput_Initialize(p,a,b) (p)->Initialize(a,b)
+#endif
+
+/*****************************************************************************
+ * IDirectInput2A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput2A
+DECLARE_INTERFACE_(IDirectInput2A,IDirectInputA)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputA methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEA *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ /*** IDirectInput2A methods ***/
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCSTR pszName, LPGUID pguidInstance) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInput2W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput2W
+DECLARE_INTERFACE_(IDirectInput2W,IDirectInputW)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputW methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEW *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ /*** IDirectInput2W methods ***/
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCWSTR pszName, LPGUID pguidInstance) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInput2_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInput2_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInput2_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInput methods ***/
+#define IDirectInput2_CreateDevice(p,a,b,c) (p)->lpVtbl->CreateDevice(p,a,b,c)
+#define IDirectInput2_EnumDevices(p,a,b,c,d) (p)->lpVtbl->EnumDevices(p,a,b,c,d)
+#define IDirectInput2_GetDeviceStatus(p,a) (p)->lpVtbl->GetDeviceStatus(p,a)
+#define IDirectInput2_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInput2_Initialize(p,a,b) (p)->lpVtbl->Initialize(p,a,b)
+/*** IDirectInput2 methods ***/
+#define IDirectInput2_FindDevice(p,a,b,c) (p)->lpVtbl->FindDevice(p,a,b,c)
+#else
+/*** IUnknown methods ***/
+#define IDirectInput2_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInput2_AddRef(p) (p)->AddRef()
+#define IDirectInput2_Release(p) (p)->Release()
+/*** IDirectInput methods ***/
+#define IDirectInput2_CreateDevice(p,a,b,c) (p)->CreateDevice(a,b,c)
+#define IDirectInput2_EnumDevices(p,a,b,c,d) (p)->EnumDevices(a,b,c,d)
+#define IDirectInput2_GetDeviceStatus(p,a) (p)->GetDeviceStatus(a)
+#define IDirectInput2_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInput2_Initialize(p,a,b) (p)->Initialize(a,b)
+/*** IDirectInput2 methods ***/
+#define IDirectInput2_FindDevice(p,a,b,c) (p)->FindDevice(a,b,c)
+#endif
+
+/*****************************************************************************
+ * IDirectInput7A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput7A
+DECLARE_INTERFACE_(IDirectInput7A,IDirectInput2A)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputA methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEA *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ /*** IDirectInput2A methods ***/
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCSTR pszName, LPGUID pguidInstance) PURE;
+ /*** IDirectInput7A methods ***/
+ STDMETHOD(CreateDeviceEx)(THIS_ REFGUID rguid, REFIID riid, LPVOID *pvOut, LPUNKNOWN lpUnknownOuter) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInput7W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput7W
+DECLARE_INTERFACE_(IDirectInput7W,IDirectInput2W)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInputW methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICEW *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ /*** IDirectInput2W methods ***/
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCWSTR pszName, LPGUID pguidInstance) PURE;
+ /*** IDirectInput7W methods ***/
+ STDMETHOD(CreateDeviceEx)(THIS_ REFGUID rguid, REFIID riid, LPVOID *pvOut, LPUNKNOWN lpUnknownOuter) PURE;
+};
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInput7_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInput7_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInput7_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInput methods ***/
+#define IDirectInput7_CreateDevice(p,a,b,c) (p)->lpVtbl->CreateDevice(p,a,b,c)
+#define IDirectInput7_EnumDevices(p,a,b,c,d) (p)->lpVtbl->EnumDevices(p,a,b,c,d)
+#define IDirectInput7_GetDeviceStatus(p,a) (p)->lpVtbl->GetDeviceStatus(p,a)
+#define IDirectInput7_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInput7_Initialize(p,a,b) (p)->lpVtbl->Initialize(p,a,b)
+/*** IDirectInput2 methods ***/
+#define IDirectInput7_FindDevice(p,a,b,c) (p)->lpVtbl->FindDevice(p,a,b,c)
+/*** IDirectInput7 methods ***/
+#define IDirectInput7_CreateDeviceEx(p,a,b,c,d) (p)->lpVtbl->CreateDeviceEx(p,a,b,c,d)
+#else
+/*** IUnknown methods ***/
+#define IDirectInput7_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInput7_AddRef(p) (p)->AddRef()
+#define IDirectInput7_Release(p) (p)->Release()
+/*** IDirectInput methods ***/
+#define IDirectInput7_CreateDevice(p,a,b,c) (p)->CreateDevice(a,b,c)
+#define IDirectInput7_EnumDevices(p,a,b,c,d) (p)->EnumDevices(a,b,c,d)
+#define IDirectInput7_GetDeviceStatus(p,a) (p)->GetDeviceStatus(a)
+#define IDirectInput7_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInput7_Initialize(p,a,b) (p)->Initialize(a,b)
+/*** IDirectInput2 methods ***/
+#define IDirectInput7_FindDevice(p,a,b,c) (p)->FindDevice(a,b,c)
+/*** IDirectInput7 methods ***/
+#define IDirectInput7_CreateDeviceEx(p,a,b,c,d) (p)->CreateDeviceEx(a,b,c,d)
+#endif
+
+
+#if DIRECTINPUT_VERSION >= 0x0800
+/*****************************************************************************
+ * IDirectInput8A interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput8A
+DECLARE_INTERFACE_(IDirectInput8A,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInput8A methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICE8A *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCSTR pszName, LPGUID pguidInstance) PURE;
+ STDMETHOD(EnumDevicesBySemantics)(THIS_ LPCSTR ptszUserName, LPDIACTIONFORMATA lpdiActionFormat, LPDIENUMDEVICESBYSEMANTICSCBA lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(ConfigureDevices)(THIS_ LPDICONFIGUREDEVICESCALLBACK lpdiCallback, LPDICONFIGUREDEVICESPARAMSA lpdiCDParams, DWORD dwFlags, LPVOID pvRefData) PURE;
+};
+
+/*****************************************************************************
+ * IDirectInput8W interface
+ */
+#undef INTERFACE
+#define INTERFACE IDirectInput8W
+DECLARE_INTERFACE_(IDirectInput8W,IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD_(HRESULT,QueryInterface)(THIS_ REFIID riid, void** ppvObject) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+ /*** IDirectInput8W methods ***/
+ STDMETHOD(CreateDevice)(THIS_ REFGUID rguid, LPDIRECTINPUTDEVICE8W *lplpDirectInputDevice, LPUNKNOWN pUnkOuter) PURE;
+ STDMETHOD(EnumDevices)(THIS_ DWORD dwDevType, LPDIENUMDEVICESCALLBACKW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(GetDeviceStatus)(THIS_ REFGUID rguidInstance) PURE;
+ STDMETHOD(RunControlPanel)(THIS_ HWND hwndOwner, DWORD dwFlags) PURE;
+ STDMETHOD(Initialize)(THIS_ HINSTANCE hinst, DWORD dwVersion) PURE;
+ STDMETHOD(FindDevice)(THIS_ REFGUID rguid, LPCWSTR pszName, LPGUID pguidInstance) PURE;
+ STDMETHOD(EnumDevicesBySemantics)(THIS_ LPCWSTR ptszUserName, LPDIACTIONFORMATW lpdiActionFormat, LPDIENUMDEVICESBYSEMANTICSCBW lpCallback, LPVOID pvRef, DWORD dwFlags) PURE;
+ STDMETHOD(ConfigureDevices)(THIS_ LPDICONFIGUREDEVICESCALLBACK lpdiCallback, LPDICONFIGUREDEVICESPARAMSW lpdiCDParams, DWORD dwFlags, LPVOID pvRefData) PURE;
+};
+#undef INTERFACE
+
+#if !defined(__cplusplus) || defined(CINTERFACE)
+/*** IUnknown methods ***/
+#define IDirectInput8_QueryInterface(p,a,b) (p)->lpVtbl->QueryInterface(p,a,b)
+#define IDirectInput8_AddRef(p) (p)->lpVtbl->AddRef(p)
+#define IDirectInput8_Release(p) (p)->lpVtbl->Release(p)
+/*** IDirectInput8 methods ***/
+#define IDirectInput8_CreateDevice(p,a,b,c) (p)->lpVtbl->CreateDevice(p,a,b,c)
+#define IDirectInput8_EnumDevices(p,a,b,c,d) (p)->lpVtbl->EnumDevices(p,a,b,c,d)
+#define IDirectInput8_GetDeviceStatus(p,a) (p)->lpVtbl->GetDeviceStatus(p,a)
+#define IDirectInput8_RunControlPanel(p,a,b) (p)->lpVtbl->RunControlPanel(p,a,b)
+#define IDirectInput8_Initialize(p,a,b) (p)->lpVtbl->Initialize(p,a,b)
+#define IDirectInput8_FindDevice(p,a,b,c) (p)->lpVtbl->FindDevice(p,a,b,c)
+#define IDirectInput8_EnumDevicesBySemantics(p,a,b,c,d,e) (p)->lpVtbl->EnumDevicesBySemantics(p,a,b,c,d,e)
+#define IDirectInput8_ConfigureDevices(p,a,b,c,d) (p)->lpVtbl->ConfigureDevices(p,a,b,c,d)
+#else
+/*** IUnknown methods ***/
+#define IDirectInput8_QueryInterface(p,a,b) (p)->QueryInterface(a,b)
+#define IDirectInput8_AddRef(p) (p)->AddRef()
+#define IDirectInput8_Release(p) (p)->Release()
+/*** IDirectInput8 methods ***/
+#define IDirectInput8_CreateDevice(p,a,b,c) (p)->CreateDevice(a,b,c)
+#define IDirectInput8_EnumDevices(p,a,b,c,d) (p)->EnumDevices(a,b,c,d)
+#define IDirectInput8_GetDeviceStatus(p,a) (p)->GetDeviceStatus(a)
+#define IDirectInput8_RunControlPanel(p,a,b) (p)->RunControlPanel(a,b)
+#define IDirectInput8_Initialize(p,a,b) (p)->Initialize(a,b)
+#define IDirectInput8_FindDevice(p,a,b,c) (p)->FindDevice(a,b,c)
+#define IDirectInput8_EnumDevicesBySemantics(p,a,b,c,d,e) (p)->EnumDevicesBySemantics(a,b,c,d,e)
+#define IDirectInput8_ConfigureDevices(p,a,b,c,d) (p)->ConfigureDevices(a,b,c,d)
+#endif
+
+#endif /* DI8 */
+
+/* Export functions */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if DIRECTINPUT_VERSION >= 0x0800
+HRESULT WINAPI DirectInput8Create(HINSTANCE,DWORD,REFIID,LPVOID *,LPUNKNOWN);
+#else /* DI < 8 */
+HRESULT WINAPI DirectInputCreateA(HINSTANCE,DWORD,LPDIRECTINPUTA *,LPUNKNOWN);
+HRESULT WINAPI DirectInputCreateW(HINSTANCE,DWORD,LPDIRECTINPUTW *,LPUNKNOWN);
+#define DirectInputCreate WINELIB_NAME_AW(DirectInputCreate)
+
+HRESULT WINAPI DirectInputCreateEx(HINSTANCE,DWORD,REFIID,LPVOID *,LPUNKNOWN);
+#endif /* DI8 */
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif /* __DINPUT_INCLUDED__ */
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/mingw/xinput.h b/chromium/third_party/dawn/third_party/glfw/deps/mingw/xinput.h
new file mode 100644
index 00000000000..d3ca726ce20
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/mingw/xinput.h
@@ -0,0 +1,239 @@
+/*
+ * The Wine project - Xinput Joystick Library
+ * Copyright 2008 Andrew Fenn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+#ifndef __WINE_XINPUT_H
+#define __WINE_XINPUT_H
+
+#include <windef.h>
+
+/*
+ * Bitmasks for the joysticks buttons, determines what has
+ * been pressed on the joystick, these need to be mapped
+ * to whatever device you're using instead of an xbox 360
+ * joystick
+ */
+
+#define XINPUT_GAMEPAD_DPAD_UP 0x0001
+#define XINPUT_GAMEPAD_DPAD_DOWN 0x0002
+#define XINPUT_GAMEPAD_DPAD_LEFT 0x0004
+#define XINPUT_GAMEPAD_DPAD_RIGHT 0x0008
+#define XINPUT_GAMEPAD_START 0x0010
+#define XINPUT_GAMEPAD_BACK 0x0020
+#define XINPUT_GAMEPAD_LEFT_THUMB 0x0040
+#define XINPUT_GAMEPAD_RIGHT_THUMB 0x0080
+#define XINPUT_GAMEPAD_LEFT_SHOULDER 0x0100
+#define XINPUT_GAMEPAD_RIGHT_SHOULDER 0x0200
+#define XINPUT_GAMEPAD_A 0x1000
+#define XINPUT_GAMEPAD_B 0x2000
+#define XINPUT_GAMEPAD_X 0x4000
+#define XINPUT_GAMEPAD_Y 0x8000
+
+/*
+ * Defines the flags used to determine if the user is pushing
+ * down on a button, not holding a button, etc
+ */
+
+#define XINPUT_KEYSTROKE_KEYDOWN 0x0001
+#define XINPUT_KEYSTROKE_KEYUP 0x0002
+#define XINPUT_KEYSTROKE_REPEAT 0x0004
+
+/*
+ * Defines the codes which are returned by XInputGetKeystroke
+ */
+
+#define VK_PAD_A 0x5800
+#define VK_PAD_B 0x5801
+#define VK_PAD_X 0x5802
+#define VK_PAD_Y 0x5803
+#define VK_PAD_RSHOULDER 0x5804
+#define VK_PAD_LSHOULDER 0x5805
+#define VK_PAD_LTRIGGER 0x5806
+#define VK_PAD_RTRIGGER 0x5807
+#define VK_PAD_DPAD_UP 0x5810
+#define VK_PAD_DPAD_DOWN 0x5811
+#define VK_PAD_DPAD_LEFT 0x5812
+#define VK_PAD_DPAD_RIGHT 0x5813
+#define VK_PAD_START 0x5814
+#define VK_PAD_BACK 0x5815
+#define VK_PAD_LTHUMB_PRESS 0x5816
+#define VK_PAD_RTHUMB_PRESS 0x5817
+#define VK_PAD_LTHUMB_UP 0x5820
+#define VK_PAD_LTHUMB_DOWN 0x5821
+#define VK_PAD_LTHUMB_RIGHT 0x5822
+#define VK_PAD_LTHUMB_LEFT 0x5823
+#define VK_PAD_LTHUMB_UPLEFT 0x5824
+#define VK_PAD_LTHUMB_UPRIGHT 0x5825
+#define VK_PAD_LTHUMB_DOWNRIGHT 0x5826
+#define VK_PAD_LTHUMB_DOWNLEFT 0x5827
+#define VK_PAD_RTHUMB_UP 0x5830
+#define VK_PAD_RTHUMB_DOWN 0x5831
+#define VK_PAD_RTHUMB_RIGHT 0x5832
+#define VK_PAD_RTHUMB_LEFT 0x5833
+#define VK_PAD_RTHUMB_UPLEFT 0x5834
+#define VK_PAD_RTHUMB_UPRIGHT 0x5835
+#define VK_PAD_RTHUMB_DOWNRIGHT 0x5836
+#define VK_PAD_RTHUMB_DOWNLEFT 0x5837
+
+/*
+ * Deadzones are for analogue joystick controls on the joypad
+ * which determine when input should be assumed to be in the
+ * middle of the pad. This is a threshold to stop a joypad
+ * controlling the game when the player isn't touching the
+ * controls.
+ */
+
+#define XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE 7849
+#define XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE 8689
+#define XINPUT_GAMEPAD_TRIGGER_THRESHOLD 30
+
+
+/*
+ * Defines what type of abilities the type of joystick has
+ * DEVTYPE_GAMEPAD is available for all joysticks, however
+ * there may be more specific identifiers for other joysticks
+ * which are being used.
+ */
+
+#define XINPUT_DEVTYPE_GAMEPAD 0x01
+#define XINPUT_DEVSUBTYPE_GAMEPAD 0x01
+#define XINPUT_DEVSUBTYPE_WHEEL 0x02
+#define XINPUT_DEVSUBTYPE_ARCADE_STICK 0x03
+#define XINPUT_DEVSUBTYPE_FLIGHT_SICK 0x04
+#define XINPUT_DEVSUBTYPE_DANCE_PAD 0x05
+#define XINPUT_DEVSUBTYPE_GUITAR 0x06
+#define XINPUT_DEVSUBTYPE_DRUM_KIT 0x08
+
+/*
+ * These are used with the XInputGetCapabilities function to
+ * determine the abilities to the joystick which has been
+ * plugged in.
+ */
+
+#define XINPUT_CAPS_VOICE_SUPPORTED 0x0004
+#define XINPUT_FLAG_GAMEPAD 0x00000001
+
+/*
+ * Defines the status of the battery if one is used in the
+ * attached joystick. The first two define if the joystick
+ * supports a battery. Disconnected means that the joystick
+ * isn't connected. Wired shows that the joystick is a wired
+ * joystick.
+ */
+
+#define BATTERY_DEVTYPE_GAMEPAD 0x00
+#define BATTERY_DEVTYPE_HEADSET 0x01
+#define BATTERY_TYPE_DISCONNECTED 0x00
+#define BATTERY_TYPE_WIRED 0x01
+#define BATTERY_TYPE_ALKALINE 0x02
+#define BATTERY_TYPE_NIMH 0x03
+#define BATTERY_TYPE_UNKNOWN 0xFF
+#define BATTERY_LEVEL_EMPTY 0x00
+#define BATTERY_LEVEL_LOW 0x01
+#define BATTERY_LEVEL_MEDIUM 0x02
+#define BATTERY_LEVEL_FULL 0x03
+
+/*
+ * How many joysticks can be used with this library. Games that
+ * use the xinput library will not go over this number.
+ */
+
+#define XUSER_MAX_COUNT 4
+#define XUSER_INDEX_ANY 0x000000FF
+
+/*
+ * Defines the structure of an xbox 360 joystick.
+ */
+
+typedef struct _XINPUT_GAMEPAD {
+ WORD wButtons;
+ BYTE bLeftTrigger;
+ BYTE bRightTrigger;
+ SHORT sThumbLX;
+ SHORT sThumbLY;
+ SHORT sThumbRX;
+ SHORT sThumbRY;
+} XINPUT_GAMEPAD, *PXINPUT_GAMEPAD;
+
+typedef struct _XINPUT_STATE {
+ DWORD dwPacketNumber;
+ XINPUT_GAMEPAD Gamepad;
+} XINPUT_STATE, *PXINPUT_STATE;
+
+/*
+ * Defines the structure of how much vibration is set on both the
+ * right and left motors in a joystick. If you're not using a 360
+ * joystick you will have to map these to your device.
+ */
+
+typedef struct _XINPUT_VIBRATION {
+ WORD wLeftMotorSpeed;
+ WORD wRightMotorSpeed;
+} XINPUT_VIBRATION, *PXINPUT_VIBRATION;
+
+/*
+ * Defines the structure for what kind of abilities the joystick has
+ * such abilities are things such as if the joystick has the ability
+ * to send and receive audio, if the joystick is in fact a driving
+ * wheel or perhaps if the joystick is some kind of dance pad or
+ * guitar.
+ */
+
+typedef struct _XINPUT_CAPABILITIES {
+ BYTE Type;
+ BYTE SubType;
+ WORD Flags;
+ XINPUT_GAMEPAD Gamepad;
+ XINPUT_VIBRATION Vibration;
+} XINPUT_CAPABILITIES, *PXINPUT_CAPABILITIES;
+
+/*
+ * Defines the structure for a joystick input event which is
+ * retrieved using the function XInputGetKeystroke
+ */
+typedef struct _XINPUT_KEYSTROKE {
+ WORD VirtualKey;
+ WCHAR Unicode;
+ WORD Flags;
+ BYTE UserIndex;
+ BYTE HidCode;
+} XINPUT_KEYSTROKE, *PXINPUT_KEYSTROKE;
+
+typedef struct _XINPUT_BATTERY_INFORMATION
+{
+ BYTE BatteryType;
+ BYTE BatteryLevel;
+} XINPUT_BATTERY_INFORMATION, *PXINPUT_BATTERY_INFORMATION;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void WINAPI XInputEnable(WINBOOL);
+DWORD WINAPI XInputSetState(DWORD, XINPUT_VIBRATION*);
+DWORD WINAPI XInputGetState(DWORD, XINPUT_STATE*);
+DWORD WINAPI XInputGetKeystroke(DWORD, DWORD, PXINPUT_KEYSTROKE);
+DWORD WINAPI XInputGetCapabilities(DWORD, DWORD, XINPUT_CAPABILITIES*);
+DWORD WINAPI XInputGetDSoundAudioDeviceGuids(DWORD, GUID*, GUID*);
+DWORD WINAPI XInputGetBatteryInformation(DWORD, BYTE, XINPUT_BATTERY_INFORMATION*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __WINE_XINPUT_H */
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/nuklear.h b/chromium/third_party/dawn/third_party/glfw/deps/nuklear.h
new file mode 100644
index 00000000000..f2eb9dfa2c3
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/nuklear.h
@@ -0,0 +1,25778 @@
+/*
+/// # Nuklear
+/// ![](https://cloud.githubusercontent.com/assets/8057201/11761525/ae06f0ca-a0c6-11e5-819d-5610b25f6ef4.gif)
+///
+/// ## Contents
+/// 1. About section
+/// 2. Highlights section
+/// 3. Features section
+/// 4. Usage section
+/// 1. Flags section
+/// 2. Constants section
+/// 3. Dependencies section
+/// 5. Example section
+/// 6. API section
+/// 1. Context section
+/// 2. Input section
+/// 3. Drawing section
+/// 4. Window section
+/// 5. Layouting section
+/// 6. Groups section
+/// 7. Tree section
+/// 8. Properties section
+/// 7. License section
+/// 8. Changelog section
+/// 9. Gallery section
+/// 10. Credits section
+///
+/// ## About
+/// This is a minimal state immediate mode graphical user interface toolkit
+/// written in ANSI C and licensed under public domain. It was designed as a simple
+/// embeddable user interface for application and does not have any dependencies,
+/// a default renderbackend or OS window and input handling but instead provides a very modular
+/// library approach by using simple input state for input and draw
+/// commands describing primitive shapes as output. So instead of providing a
+/// layered library that tries to abstract over a number of platform and
+/// render backends it only focuses on the actual UI.
+///
+/// ## Highlights
+/// - Graphical user interface toolkit
+/// - Single header library
+/// - Written in C89 (a.k.a. ANSI C or ISO C90)
+/// - Small codebase (~18kLOC)
+/// - Focus on portability, efficiency and simplicity
+/// - No dependencies (not even the standard library if not wanted)
+/// - Fully skinnable and customizable
+/// - Low memory footprint with total memory control if needed or wanted
+/// - UTF-8 support
+/// - No global or hidden state
+/// - Customizable library modules (you can compile and use only what you need)
+/// - Optional font baker and vertex buffer output
+///
+/// ## Features
+/// - Absolutely no platform dependent code
+/// - Memory management control ranging from/to
+/// - Ease of use by allocating everything from standard library
+/// - Control every byte of memory inside the library
+/// - Font handling control ranging from/to
+/// - Use your own font implementation for everything
+/// - Use this libraries internal font baking and handling API
+/// - Drawing output control ranging from/to
+/// - Simple shapes for more high level APIs which already have drawing capabilities
+/// - Hardware accessible anti-aliased vertex buffer output
+/// - Customizable colors and properties ranging from/to
+/// - Simple changes to color by filling a simple color table
+/// - Complete control with ability to use skinning to decorate widgets
+/// - Bendable UI library with widget ranging from/to
+/// - Basic widgets like buttons, checkboxes, slider, ...
+/// - Advanced widget like abstract comboboxes, contextual menus,...
+/// - Compile time configuration to only compile what you need
+/// - Subset which can be used if you do not want to link or use the standard library
+/// - Can be easily modified to only update on user input instead of frame updates
+///
+/// ## Usage
+/// This library is self contained in one single header file and can be used either
+/// in header only mode or in implementation mode. The header only mode is used
+/// by default when included and allows including this header in other headers
+/// and does not contain the actual implementation. <br /><br />
+///
+/// The implementation mode requires to define the preprocessor macro
+/// NK_IMPLEMENTATION in *one* .c/.cpp file before #includeing this file, e.g.:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~C
+/// #define NK_IMPLEMENTATION
+/// #include "nuklear.h"
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Also optionally define the symbols listed in the section "OPTIONAL DEFINES"
+/// below in header and implementation mode if you want to use additional functionality
+/// or need more control over the library.
+///
+/// !!! WARNING
+/// Every time nuklear is included define the same compiler flags. This very important not doing so could lead to compiler errors or even worse stack corruptions.
+///
+/// ### Flags
+/// Flag | Description
+/// --------------------------------|------------------------------------------
+/// NK_PRIVATE | If defined declares all functions as static, so they can only be accessed inside the file that contains the implementation
+/// NK_INCLUDE_FIXED_TYPES | If defined it will include header `<stdint.h>` for fixed sized types otherwise nuklear tries to select the correct type. If that fails it will throw a compiler error and you have to select the correct types yourself.
+/// NK_INCLUDE_DEFAULT_ALLOCATOR | If defined it will include header `<stdlib.h>` and provide additional functions to use this library without caring for memory allocation control and therefore ease memory management.
+/// NK_INCLUDE_STANDARD_IO | If defined it will include header `<stdio.h>` and provide additional functions depending on file loading.
+/// NK_INCLUDE_STANDARD_VARARGS | If defined it will include header <stdio.h> and provide additional functions depending on file loading.
+/// NK_INCLUDE_VERTEX_BUFFER_OUTPUT | Defining this adds a vertex draw command list backend to this library, which allows you to convert queue commands into vertex draw commands. This is mainly if you need a hardware accessible format for OpenGL, DirectX, Vulkan, Metal,...
+/// NK_INCLUDE_FONT_BAKING | Defining this adds `stb_truetype` and `stb_rect_pack` implementation to this library and provides font baking and rendering. If you already have font handling or do not want to use this font handler you don't have to define it.
+/// NK_INCLUDE_DEFAULT_FONT | Defining this adds the default font: ProggyClean.ttf into this library which can be loaded into a font atlas and allows using this library without having a truetype font
+/// NK_INCLUDE_COMMAND_USERDATA | Defining this adds a userdata pointer into each command. Can be useful for example if you want to provide custom shaders depending on the used widget. Can be combined with the style structures.
+/// NK_BUTTON_TRIGGER_ON_RELEASE | Different platforms require button clicks occurring either on buttons being pressed (up to down) or released (down to up). By default this library will react on buttons being pressed, but if you define this it will only trigger if a button is released.
+/// NK_ZERO_COMMAND_MEMORY | Defining this will zero out memory for each drawing command added to a drawing queue (inside nk_command_buffer_push). Zeroing command memory is very useful for fast checking (using memcmp) if command buffers are equal and avoid drawing frames when nothing on screen has changed since previous frame.
+/// NK_UINT_DRAW_INDEX | Defining this will set the size of vertex index elements when using NK_VERTEX_BUFFER_OUTPUT to 32bit instead of the default of 16bit
+/// NK_KEYSTATE_BASED_INPUT | Define this if your backend uses key state for each frame rather than key press/release events
+///
+/// !!! WARNING
+/// The following flags will pull in the standard C library:
+/// - NK_INCLUDE_DEFAULT_ALLOCATOR
+/// - NK_INCLUDE_STANDARD_IO
+/// - NK_INCLUDE_STANDARD_VARARGS
+///
+/// !!! WARNING
+/// The following flags if defined need to be defined for both header and implementation:
+/// - NK_INCLUDE_FIXED_TYPES
+/// - NK_INCLUDE_DEFAULT_ALLOCATOR
+/// - NK_INCLUDE_STANDARD_VARARGS
+/// - NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+/// - NK_INCLUDE_FONT_BAKING
+/// - NK_INCLUDE_DEFAULT_FONT
+/// - NK_INCLUDE_STANDARD_VARARGS
+/// - NK_INCLUDE_COMMAND_USERDATA
+/// - NK_UINT_DRAW_INDEX
+///
+/// ### Constants
+/// Define | Description
+/// --------------------------------|---------------------------------------
+/// NK_BUFFER_DEFAULT_INITIAL_SIZE | Initial buffer size allocated by all buffers while using the default allocator functions included by defining NK_INCLUDE_DEFAULT_ALLOCATOR. If you don't want to allocate the default 4k memory then redefine it.
+/// NK_MAX_NUMBER_BUFFER | Maximum buffer size for the conversion buffer between float and string Under normal circumstances this should be more than sufficient.
+/// NK_INPUT_MAX | Defines the max number of bytes which can be added as text input in one frame. Under normal circumstances this should be more than sufficient.
+///
+/// !!! WARNING
+/// The following constants if defined need to be defined for both header and implementation:
+/// - NK_MAX_NUMBER_BUFFER
+/// - NK_BUFFER_DEFAULT_INITIAL_SIZE
+/// - NK_INPUT_MAX
+///
+/// ### Dependencies
+/// Function | Description
+/// ------------|---------------------------------------------------------------
+/// NK_ASSERT | If you don't define this, nuklear will use <assert.h> with assert().
+/// NK_MEMSET | You can define this to 'memset' or your own memset implementation replacement. If not nuklear will use its own version.
+/// NK_MEMCPY | You can define this to 'memcpy' or your own memcpy implementation replacement. If not nuklear will use its own version.
+/// NK_SQRT | You can define this to 'sqrt' or your own sqrt implementation replacement. If not nuklear will use its own slow and not highly accurate version.
+/// NK_SIN | You can define this to 'sinf' or your own sine implementation replacement. If not nuklear will use its own approximation implementation.
+/// NK_COS | You can define this to 'cosf' or your own cosine implementation replacement. If not nuklear will use its own approximation implementation.
+/// NK_STRTOD | You can define this to `strtod` or your own string to double conversion implementation replacement. If not defined nuklear will use its own imprecise and possibly unsafe version (does not handle nan or infinity!).
+/// NK_DTOA | You can define this to `dtoa` or your own double to string conversion implementation replacement. If not defined nuklear will use its own imprecise and possibly unsafe version (does not handle nan or infinity!).
+/// NK_VSNPRINTF| If you define `NK_INCLUDE_STANDARD_VARARGS` as well as `NK_INCLUDE_STANDARD_IO` and want to be safe define this to `vsnprintf` on compilers supporting later versions of C or C++. By default nuklear will check for your stdlib version in C as well as compiler version in C++. if `vsnprintf` is available it will define it to `vsnprintf` directly. If not defined and if you have older versions of C or C++ it will be defined to `vsprintf` which is unsafe.
+///
+/// !!! WARNING
+/// The following dependencies will pull in the standard C library if not redefined:
+/// - NK_ASSERT
+///
+/// !!! WARNING
+/// The following dependencies if defined need to be defined for both header and implementation:
+/// - NK_ASSERT
+///
+/// !!! WARNING
+/// The following dependencies if defined need to be defined only for the implementation part:
+/// - NK_MEMSET
+/// - NK_MEMCPY
+/// - NK_SQRT
+/// - NK_SIN
+/// - NK_COS
+/// - NK_STRTOD
+/// - NK_DTOA
+/// - NK_VSNPRINTF
+///
+/// ## Example
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// // init gui state
+/// enum {EASY, HARD};
+/// static int op = EASY;
+/// static float value = 0.6f;
+/// static int i = 20;
+/// struct nk_context ctx;
+///
+/// nk_init_fixed(&ctx, calloc(1, MAX_MEMORY), MAX_MEMORY, &font);
+/// if (nk_begin(&ctx, "Show", nk_rect(50, 50, 220, 220),
+/// NK_WINDOW_BORDER|NK_WINDOW_MOVABLE|NK_WINDOW_CLOSABLE)) {
+/// // fixed widget pixel width
+/// nk_layout_row_static(&ctx, 30, 80, 1);
+/// if (nk_button_label(&ctx, "button")) {
+/// // event handling
+/// }
+///
+/// // fixed widget window ratio width
+/// nk_layout_row_dynamic(&ctx, 30, 2);
+/// if (nk_option_label(&ctx, "easy", op == EASY)) op = EASY;
+/// if (nk_option_label(&ctx, "hard", op == HARD)) op = HARD;
+///
+/// // custom widget pixel width
+/// nk_layout_row_begin(&ctx, NK_STATIC, 30, 2);
+/// {
+/// nk_layout_row_push(&ctx, 50);
+/// nk_label(&ctx, "Volume:", NK_TEXT_LEFT);
+/// nk_layout_row_push(&ctx, 110);
+/// nk_slider_float(&ctx, 0, &value, 1.0f, 0.1f);
+/// }
+/// nk_layout_row_end(&ctx);
+/// }
+/// nk_end(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// ![](https://cloud.githubusercontent.com/assets/8057201/10187981/584ecd68-675c-11e5-897c-822ef534a876.png)
+///
+/// ## API
+///
+*/
+#ifndef NK_SINGLE_FILE
+ #define NK_SINGLE_FILE
+#endif
+
+#ifndef NK_NUKLEAR_H_
+#define NK_NUKLEAR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * ==============================================================
+ *
+ * CONSTANTS
+ *
+ * ===============================================================
+ */
+#define NK_UNDEFINED (-1.0f)
+#define NK_UTF_INVALID 0xFFFD /* internal invalid utf8 rune */
+#define NK_UTF_SIZE 4 /* describes the number of bytes a glyph consists of*/
+#ifndef NK_INPUT_MAX
+ #define NK_INPUT_MAX 16
+#endif
+#ifndef NK_MAX_NUMBER_BUFFER
+ #define NK_MAX_NUMBER_BUFFER 64
+#endif
+#ifndef NK_SCROLLBAR_HIDING_TIMEOUT
+ #define NK_SCROLLBAR_HIDING_TIMEOUT 4.0f
+#endif
+/*
+ * ==============================================================
+ *
+ * HELPER
+ *
+ * ===============================================================
+ */
+#ifndef NK_API
+ #ifdef NK_PRIVATE
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199409L))
+ #define NK_API static inline
+ #elif defined(__cplusplus)
+ #define NK_API static inline
+ #else
+ #define NK_API static
+ #endif
+ #else
+ #define NK_API extern
+ #endif
+#endif
+#ifndef NK_LIB
+ #ifdef NK_SINGLE_FILE
+ #define NK_LIB static
+ #else
+ #define NK_LIB extern
+ #endif
+#endif
+
+#define NK_INTERN static
+#define NK_STORAGE static
+#define NK_GLOBAL static
+
+#define NK_FLAG(x) (1 << (x))
+#define NK_STRINGIFY(x) #x
+#define NK_MACRO_STRINGIFY(x) NK_STRINGIFY(x)
+#define NK_STRING_JOIN_IMMEDIATE(arg1, arg2) arg1 ## arg2
+#define NK_STRING_JOIN_DELAY(arg1, arg2) NK_STRING_JOIN_IMMEDIATE(arg1, arg2)
+#define NK_STRING_JOIN(arg1, arg2) NK_STRING_JOIN_DELAY(arg1, arg2)
+
+#ifdef _MSC_VER
+ #define NK_UNIQUE_NAME(name) NK_STRING_JOIN(name,__COUNTER__)
+#else
+ #define NK_UNIQUE_NAME(name) NK_STRING_JOIN(name,__LINE__)
+#endif
+
+#ifndef NK_STATIC_ASSERT
+ #define NK_STATIC_ASSERT(exp) typedef char NK_UNIQUE_NAME(_dummy_array)[(exp)?1:-1]
+#endif
+
+#ifndef NK_FILE_LINE
+#ifdef _MSC_VER
+ #define NK_FILE_LINE __FILE__ ":" NK_MACRO_STRINGIFY(__COUNTER__)
+#else
+ #define NK_FILE_LINE __FILE__ ":" NK_MACRO_STRINGIFY(__LINE__)
+#endif
+#endif
+
+#define NK_MIN(a,b) ((a) < (b) ? (a) : (b))
+#define NK_MAX(a,b) ((a) < (b) ? (b) : (a))
+#define NK_CLAMP(i,v,x) (NK_MAX(NK_MIN(v,x), i))
+
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+ #include <stdarg.h> /* valist, va_start, va_end, ... */
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) /* VS 2010 and above */
+ #include <sal.h>
+ #define NK_PRINTF_FORMAT_STRING _Printf_format_string_
+ #else
+ #define NK_PRINTF_FORMAT_STRING
+ #endif
+ #if defined(__GNUC__)
+ #define NK_PRINTF_VARARG_FUNC(fmtargnumber) __attribute__((format(__printf__, fmtargnumber, fmtargnumber+1)))
+ #define NK_PRINTF_VALIST_FUNC(fmtargnumber) __attribute__((format(__printf__, fmtargnumber, 0)))
+ #else
+ #define NK_PRINTF_VARARG_FUNC(fmtargnumber)
+ #define NK_PRINTF_VALIST_FUNC(fmtargnumber)
+ #endif
+#endif
+
+/*
+ * ===============================================================
+ *
+ * BASIC
+ *
+ * ===============================================================
+ */
+#ifdef NK_INCLUDE_FIXED_TYPES
+ #include <stdint.h>
+ #define NK_INT8 int8_t
+ #define NK_UINT8 uint8_t
+ #define NK_INT16 int16_t
+ #define NK_UINT16 uint16_t
+ #define NK_INT32 int32_t
+ #define NK_UINT32 uint32_t
+ #define NK_SIZE_TYPE uintptr_t
+ #define NK_POINTER_TYPE uintptr_t
+#else
+ #ifndef NK_INT8
+ #define NK_INT8 signed char
+ #endif
+ #ifndef NK_UINT8
+ #define NK_UINT8 unsigned char
+ #endif
+ #ifndef NK_INT16
+ #define NK_INT16 signed short
+ #endif
+ #ifndef NK_UINT16
+ #define NK_UINT16 unsigned short
+ #endif
+ #ifndef NK_INT32
+ #if defined(_MSC_VER)
+ #define NK_INT32 __int32
+ #else
+ #define NK_INT32 signed int
+ #endif
+ #endif
+ #ifndef NK_UINT32
+ #if defined(_MSC_VER)
+ #define NK_UINT32 unsigned __int32
+ #else
+ #define NK_UINT32 unsigned int
+ #endif
+ #endif
+ #ifndef NK_SIZE_TYPE
+ #if defined(_WIN64) && defined(_MSC_VER)
+ #define NK_SIZE_TYPE unsigned __int64
+ #elif (defined(_WIN32) || defined(WIN32)) && defined(_MSC_VER)
+ #define NK_SIZE_TYPE unsigned __int32
+ #elif defined(__GNUC__) || defined(__clang__)
+ #if defined(__x86_64__) || defined(__ppc64__)
+ #define NK_SIZE_TYPE unsigned long
+ #else
+ #define NK_SIZE_TYPE unsigned int
+ #endif
+ #else
+ #define NK_SIZE_TYPE unsigned long
+ #endif
+ #endif
+ #ifndef NK_POINTER_TYPE
+ #if defined(_WIN64) && defined(_MSC_VER)
+ #define NK_POINTER_TYPE unsigned __int64
+ #elif (defined(_WIN32) || defined(WIN32)) && defined(_MSC_VER)
+ #define NK_POINTER_TYPE unsigned __int32
+ #elif defined(__GNUC__) || defined(__clang__)
+ #if defined(__x86_64__) || defined(__ppc64__)
+ #define NK_POINTER_TYPE unsigned long
+ #else
+ #define NK_POINTER_TYPE unsigned int
+ #endif
+ #else
+ #define NK_POINTER_TYPE unsigned long
+ #endif
+ #endif
+#endif
+
+typedef NK_INT8 nk_char;
+typedef NK_UINT8 nk_uchar;
+typedef NK_UINT8 nk_byte;
+typedef NK_INT16 nk_short;
+typedef NK_UINT16 nk_ushort;
+typedef NK_INT32 nk_int;
+typedef NK_UINT32 nk_uint;
+typedef NK_SIZE_TYPE nk_size;
+typedef NK_POINTER_TYPE nk_ptr;
+
+typedef nk_uint nk_hash;
+typedef nk_uint nk_flags;
+typedef nk_uint nk_rune;
+
+/* Make sure correct type size:
+ * This will fire with a negative subscript error if the type sizes
+ * are set incorrectly by the compiler, and compile out if not */
+NK_STATIC_ASSERT(sizeof(nk_short) == 2);
+NK_STATIC_ASSERT(sizeof(nk_ushort) == 2);
+NK_STATIC_ASSERT(sizeof(nk_uint) == 4);
+NK_STATIC_ASSERT(sizeof(nk_int) == 4);
+NK_STATIC_ASSERT(sizeof(nk_byte) == 1);
+NK_STATIC_ASSERT(sizeof(nk_flags) >= 4);
+NK_STATIC_ASSERT(sizeof(nk_rune) >= 4);
+NK_STATIC_ASSERT(sizeof(nk_size) >= sizeof(void*));
+NK_STATIC_ASSERT(sizeof(nk_ptr) >= sizeof(void*));
+
+/* ============================================================================
+ *
+ * API
+ *
+ * =========================================================================== */
+struct nk_buffer;
+struct nk_allocator;
+struct nk_command_buffer;
+struct nk_draw_command;
+struct nk_convert_config;
+struct nk_style_item;
+struct nk_text_edit;
+struct nk_draw_list;
+struct nk_user_font;
+struct nk_panel;
+struct nk_context;
+struct nk_draw_vertex_layout_element;
+struct nk_style_button;
+struct nk_style_toggle;
+struct nk_style_selectable;
+struct nk_style_slide;
+struct nk_style_progress;
+struct nk_style_scrollbar;
+struct nk_style_edit;
+struct nk_style_property;
+struct nk_style_chart;
+struct nk_style_combo;
+struct nk_style_tab;
+struct nk_style_window_header;
+struct nk_style_window;
+
+enum {nk_false, nk_true};
+struct nk_color {nk_byte r,g,b,a;};
+struct nk_colorf {float r,g,b,a;};
+struct nk_vec2 {float x,y;};
+struct nk_vec2i {short x, y;};
+struct nk_rect {float x,y,w,h;};
+struct nk_recti {short x,y,w,h;};
+typedef char nk_glyph[NK_UTF_SIZE];
+typedef union {void *ptr; int id;} nk_handle;
+struct nk_image {nk_handle handle;unsigned short w,h;unsigned short region[4];};
+struct nk_cursor {struct nk_image img; struct nk_vec2 size, offset;};
+struct nk_scroll {nk_uint x, y;};
+
+enum nk_heading {NK_UP, NK_RIGHT, NK_DOWN, NK_LEFT};
+enum nk_button_behavior {NK_BUTTON_DEFAULT, NK_BUTTON_REPEATER};
+enum nk_modify {NK_FIXED = nk_false, NK_MODIFIABLE = nk_true};
+enum nk_orientation {NK_VERTICAL, NK_HORIZONTAL};
+enum nk_collapse_states {NK_MINIMIZED = nk_false, NK_MAXIMIZED = nk_true};
+enum nk_show_states {NK_HIDDEN = nk_false, NK_SHOWN = nk_true};
+enum nk_chart_type {NK_CHART_LINES, NK_CHART_COLUMN, NK_CHART_MAX};
+enum nk_chart_event {NK_CHART_HOVERING = 0x01, NK_CHART_CLICKED = 0x02};
+enum nk_color_format {NK_RGB, NK_RGBA};
+enum nk_popup_type {NK_POPUP_STATIC, NK_POPUP_DYNAMIC};
+enum nk_layout_format {NK_DYNAMIC, NK_STATIC};
+enum nk_tree_type {NK_TREE_NODE, NK_TREE_TAB};
+
+typedef void*(*nk_plugin_alloc)(nk_handle, void *old, nk_size);
+typedef void (*nk_plugin_free)(nk_handle, void *old);
+typedef int(*nk_plugin_filter)(const struct nk_text_edit*, nk_rune unicode);
+typedef void(*nk_plugin_paste)(nk_handle, struct nk_text_edit*);
+typedef void(*nk_plugin_copy)(nk_handle, const char*, int len);
+
+struct nk_allocator {
+ nk_handle userdata;
+ nk_plugin_alloc alloc;
+ nk_plugin_free free;
+};
+enum nk_symbol_type {
+ NK_SYMBOL_NONE,
+ NK_SYMBOL_X,
+ NK_SYMBOL_UNDERSCORE,
+ NK_SYMBOL_CIRCLE_SOLID,
+ NK_SYMBOL_CIRCLE_OUTLINE,
+ NK_SYMBOL_RECT_SOLID,
+ NK_SYMBOL_RECT_OUTLINE,
+ NK_SYMBOL_TRIANGLE_UP,
+ NK_SYMBOL_TRIANGLE_DOWN,
+ NK_SYMBOL_TRIANGLE_LEFT,
+ NK_SYMBOL_TRIANGLE_RIGHT,
+ NK_SYMBOL_PLUS,
+ NK_SYMBOL_MINUS,
+ NK_SYMBOL_MAX
+};
+/* =============================================================================
+ *
+ * CONTEXT
+ *
+ * =============================================================================*/
+/*/// ### Context
+/// Contexts are the main entry point and the majestro of nuklear and contain all required state.
+/// They are used for window, memory, input, style, stack, commands and time management and need
+/// to be passed into all nuklear GUI specific functions.
+///
+/// #### Usage
+/// To use a context it first has to be initialized which can be achieved by calling
+/// one of either `nk_init_default`, `nk_init_fixed`, `nk_init`, `nk_init_custom`.
+/// Each takes in a font handle and a specific way of handling memory. Memory control
+/// hereby ranges from standard library to just specifying a fixed sized block of memory
+/// which nuklear has to manage itself from.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// // [...]
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// --------------------|-------------------------------------------------------
+/// __nk_init_default__ | Initializes context with standard library memory allocation (malloc,free)
+/// __nk_init_fixed__ | Initializes context from single fixed size memory block
+/// __nk_init__ | Initializes context with memory allocator callbacks for alloc and free
+/// __nk_init_custom__ | Initializes context from two buffers. One for draw commands the other for window/panel/table allocations
+/// __nk_clear__ | Called at the end of the frame to reset and prepare the context for the next frame
+/// __nk_free__ | Shutdown and free all memory allocated inside the context
+/// __nk_set_user_data__| Utility function to pass user data to draw command
+ */
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+/*/// #### nk_init_default
+/// Initializes a `nk_context` struct with a default standard library allocator.
+/// Should be used if you don't want to be bothered with memory management in nuklear.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_init_default(struct nk_context *ctx, const struct nk_user_font *font);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|---------------------------------------------------------------
+/// __ctx__ | Must point to an either stack or heap allocated `nk_context` struct
+/// __font__ | Must point to a previously initialized font handle for more info look at font documentation
+///
+/// Returns either `false(0)` on failure or `true(1)` on success.
+///
+*/
+NK_API int nk_init_default(struct nk_context*, const struct nk_user_font*);
+#endif
+/*/// #### nk_init_fixed
+/// Initializes a `nk_context` struct from single fixed size memory block
+/// Should be used if you want complete control over nuklear's memory management.
+/// Especially recommended for system with little memory or systems with virtual memory.
+/// For the later case you can just allocate for example 16MB of virtual memory
+/// and only the required amount of memory will actually be committed.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_init_fixed(struct nk_context *ctx, void *memory, nk_size size, const struct nk_user_font *font);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// !!! Warning
+/// make sure the passed memory block is aligned correctly for `nk_draw_commands`.
+///
+/// Parameter | Description
+/// ------------|--------------------------------------------------------------
+/// __ctx__ | Must point to an either stack or heap allocated `nk_context` struct
+/// __memory__ | Must point to a previously allocated memory block
+/// __size__ | Must contain the total size of __memory__
+/// __font__ | Must point to a previously initialized font handle for more info look at font documentation
+///
+/// Returns either `false(0)` on failure or `true(1)` on success.
+*/
+NK_API int nk_init_fixed(struct nk_context*, void *memory, nk_size size, const struct nk_user_font*);
+/*/// #### nk_init
+/// Initializes a `nk_context` struct with memory allocation callbacks for nuklear to allocate
+/// memory from. Used internally for `nk_init_default` and provides a kitchen sink allocation
+/// interface to nuklear. Can be useful for cases like monitoring memory consumption.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_init(struct nk_context *ctx, struct nk_allocator *alloc, const struct nk_user_font *font);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|---------------------------------------------------------------
+/// __ctx__ | Must point to an either stack or heap allocated `nk_context` struct
+/// __alloc__ | Must point to a previously allocated memory allocator
+/// __font__ | Must point to a previously initialized font handle for more info look at font documentation
+///
+/// Returns either `false(0)` on failure or `true(1)` on success.
+*/
+NK_API int nk_init(struct nk_context*, struct nk_allocator*, const struct nk_user_font*);
+/*/// #### nk_init_custom
+/// Initializes a `nk_context` struct from two different either fixed or growing
+/// buffers. The first buffer is for allocating draw commands while the second buffer is
+/// used for allocating windows, panels and state tables.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_init_custom(struct nk_context *ctx, struct nk_buffer *cmds, struct nk_buffer *pool, const struct nk_user_font *font);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|---------------------------------------------------------------
+/// __ctx__ | Must point to an either stack or heap allocated `nk_context` struct
+/// __cmds__ | Must point to a previously initialized memory buffer either fixed or dynamic to store draw commands into
+/// __pool__ | Must point to a previously initialized memory buffer either fixed or dynamic to store windows, panels and tables
+/// __font__ | Must point to a previously initialized font handle for more info look at font documentation
+///
+/// Returns either `false(0)` on failure or `true(1)` on success.
+*/
+NK_API int nk_init_custom(struct nk_context*, struct nk_buffer *cmds, struct nk_buffer *pool, const struct nk_user_font*);
+/*/// #### nk_clear
+/// Resets the context state at the end of the frame. This includes mostly
+/// garbage collector tasks like removing windows or table not called and therefore
+/// used anymore.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_clear(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+*/
+NK_API void nk_clear(struct nk_context*);
+/*/// #### nk_free
+/// Frees all memory allocated by nuklear. Not needed if context was
+/// initialized with `nk_init_fixed`.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_free(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+*/
+NK_API void nk_free(struct nk_context*);
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+/*/// #### nk_set_user_data
+/// Sets the currently passed userdata passed down into each draw command.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_set_user_data(struct nk_context *ctx, nk_handle data);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|--------------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __data__ | Handle with either pointer or index to be passed into every draw commands
+*/
+NK_API void nk_set_user_data(struct nk_context*, nk_handle handle);
+#endif
+/* =============================================================================
+ *
+ * INPUT
+ *
+ * =============================================================================*/
+/*/// ### Input
+/// The input API is responsible for holding the current input state composed of
+/// mouse, key and text input states.
+/// It is worth noting that no direct OS or window handling is done in nuklear.
+/// Instead all input state has to be provided by platform specific code. This on one hand
+/// expects more work from the user and complicates usage but on the other hand
+/// provides simple abstraction over a big number of platforms, libraries and other
+/// already provided functionality.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// // [...]
+/// }
+/// } nk_input_end(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Usage
+/// Input state needs to be provided to nuklear by first calling `nk_input_begin`
+/// which resets internal state like delta mouse position and button transistions.
+/// After `nk_input_begin` all current input state needs to be provided. This includes
+/// mouse motion, button and key pressed and released, text input and scrolling.
+/// Both event- or state-based input handling are supported by this API
+/// and should work without problems. Finally after all input state has been
+/// mirrored `nk_input_end` needs to be called to finish input process.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// Event evt;
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// // [...]
+/// }
+/// }
+/// nk_input_end(&ctx);
+/// // [...]
+/// nk_clear(&ctx);
+/// } nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// --------------------|-------------------------------------------------------
+/// __nk_input_begin__ | Begins the input mirroring process. Needs to be called before all other `nk_input_xxx` calls
+/// __nk_input_motion__ | Mirrors mouse cursor position
+/// __nk_input_key__ | Mirrors key state with either pressed or released
+/// __nk_input_button__ | Mirrors mouse button state with either pressed or released
+/// __nk_input_scroll__ | Mirrors mouse scroll values
+/// __nk_input_char__ | Adds a single ASCII text character into an internal text buffer
+/// __nk_input_glyph__ | Adds a single multi-byte UTF-8 character into an internal text buffer
+/// __nk_input_unicode__| Adds a single unicode rune into an internal text buffer
+/// __nk_input_end__ | Ends the input mirroring process by calculating state changes. Don't call any `nk_input_xxx` function referenced above after this call
+*/
+enum nk_keys {
+ NK_KEY_NONE,
+ NK_KEY_SHIFT,
+ NK_KEY_CTRL,
+ NK_KEY_DEL,
+ NK_KEY_ENTER,
+ NK_KEY_TAB,
+ NK_KEY_BACKSPACE,
+ NK_KEY_COPY,
+ NK_KEY_CUT,
+ NK_KEY_PASTE,
+ NK_KEY_UP,
+ NK_KEY_DOWN,
+ NK_KEY_LEFT,
+ NK_KEY_RIGHT,
+ /* Shortcuts: text field */
+ NK_KEY_TEXT_INSERT_MODE,
+ NK_KEY_TEXT_REPLACE_MODE,
+ NK_KEY_TEXT_RESET_MODE,
+ NK_KEY_TEXT_LINE_START,
+ NK_KEY_TEXT_LINE_END,
+ NK_KEY_TEXT_START,
+ NK_KEY_TEXT_END,
+ NK_KEY_TEXT_UNDO,
+ NK_KEY_TEXT_REDO,
+ NK_KEY_TEXT_SELECT_ALL,
+ NK_KEY_TEXT_WORD_LEFT,
+ NK_KEY_TEXT_WORD_RIGHT,
+ /* Shortcuts: scrollbar */
+ NK_KEY_SCROLL_START,
+ NK_KEY_SCROLL_END,
+ NK_KEY_SCROLL_DOWN,
+ NK_KEY_SCROLL_UP,
+ NK_KEY_MAX
+};
+enum nk_buttons {
+ NK_BUTTON_LEFT,
+ NK_BUTTON_MIDDLE,
+ NK_BUTTON_RIGHT,
+ NK_BUTTON_DOUBLE,
+ NK_BUTTON_MAX
+};
+/*/// #### nk_input_begin
+/// Begins the input mirroring process by resetting text, scroll
+/// mouse, previous mouse position and movement as well as key state transitions,
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_begin(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+*/
+NK_API void nk_input_begin(struct nk_context*);
+/*/// #### nk_input_motion
+/// Mirrors current mouse position to nuklear
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_motion(struct nk_context *ctx, int x, int y);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __x__ | Must hold an integer describing the current mouse cursor x-position
+/// __y__ | Must hold an integer describing the current mouse cursor y-position
+*/
+NK_API void nk_input_motion(struct nk_context*, int x, int y);
+/*/// #### nk_input_key
+/// Mirrors the state of a specific key to nuklear
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_key(struct nk_context*, enum nk_keys key, int down);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __key__ | Must be any value specified in enum `nk_keys` that needs to be mirrored
+/// __down__ | Must be 0 for key is up and 1 for key is down
+*/
+NK_API void nk_input_key(struct nk_context*, enum nk_keys, int down);
+/*/// #### nk_input_button
+/// Mirrors the state of a specific mouse button to nuklear
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_button(struct nk_context *ctx, enum nk_buttons btn, int x, int y, int down);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __btn__ | Must be any value specified in enum `nk_buttons` that needs to be mirrored
+/// __x__ | Must contain an integer describing mouse cursor x-position on click up/down
+/// __y__ | Must contain an integer describing mouse cursor y-position on click up/down
+/// __down__ | Must be 0 for key is up and 1 for key is down
+*/
+NK_API void nk_input_button(struct nk_context*, enum nk_buttons, int x, int y, int down);
+/*/// #### nk_input_scroll
+/// Copies the last mouse scroll value to nuklear. Is generally
+/// a scroll value. So does not have to come from mouse and could also originate
+/// TODO finish this sentence
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_scroll(struct nk_context *ctx, struct nk_vec2 val);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __val__ | vector with both X- as well as Y-scroll value
+*/
+NK_API void nk_input_scroll(struct nk_context*, struct nk_vec2 val);
+/*/// #### nk_input_char
+/// Copies a single ASCII character into an internal text buffer
+/// This is basically a helper function to quickly push ASCII characters into
+/// nuklear.
+///
+/// !!! Note
+/// Stores up to NK_INPUT_MAX bytes between `nk_input_begin` and `nk_input_end`.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_char(struct nk_context *ctx, char c);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __c__ | Must be a single ASCII character preferable one that can be printed
+*/
+NK_API void nk_input_char(struct nk_context*, char);
+/*/// #### nk_input_glyph
+/// Converts an encoded unicode rune into UTF-8 and copies the result into an
+/// internal text buffer.
+///
+/// !!! Note
+/// Stores up to NK_INPUT_MAX bytes between `nk_input_begin` and `nk_input_end`.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_glyph(struct nk_context *ctx, const nk_glyph g);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __g__ | UTF-32 unicode codepoint
+*/
+NK_API void nk_input_glyph(struct nk_context*, const nk_glyph);
+/*/// #### nk_input_unicode
+/// Converts a unicode rune into UTF-8 and copies the result
+/// into an internal text buffer.
+/// !!! Note
+/// Stores up to NK_INPUT_MAX bytes between `nk_input_begin` and `nk_input_end`.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_unicode(struct nk_context*, nk_rune rune);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+/// __rune__ | UTF-32 unicode codepoint
+*/
+NK_API void nk_input_unicode(struct nk_context*, nk_rune);
+/*/// #### nk_input_end
+/// End the input mirroring process by resetting mouse grabbing
+/// state to ensure the mouse cursor is not grabbed indefinitely.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_input_end(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to a previously initialized `nk_context` struct
+*/
+NK_API void nk_input_end(struct nk_context*);
+/* =============================================================================
+ *
+ * DRAWING
+ *
+ * =============================================================================*/
+/*/// ### Drawing
+/// This library was designed to be render backend agnostic so it does
+/// not draw anything to screen directly. Instead all drawn shapes, widgets
+/// are made of, are buffered into memory and make up a command queue.
+/// Each frame therefore fills the command buffer with draw commands
+/// that then need to be executed by the user and his own render backend.
+/// After that the command buffer needs to be cleared and a new frame can be
+/// started. It is probably important to note that the command buffer is the main
+/// drawing API and the optional vertex buffer API only takes this format and
+/// converts it into a hardware accessible format.
+///
+/// #### Usage
+/// To draw all draw commands accumulated over a frame you need your own render
+/// backend able to draw a number of 2D primitives. This includes at least
+/// filled and stroked rectangles, circles, text, lines, triangles and scissors.
+/// As soon as this criterion is met you can iterate over each draw command
+/// and execute each draw command in a interpreter like fashion:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case //...:
+/// //[...]
+/// }
+/// }
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// In program flow context draw commands need to be executed after input has been
+/// gathered and the complete UI with windows and their contained widgets have
+/// been executed and before calling `nk_clear` which frees all previously
+/// allocated draw commands.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// Event evt;
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// [...]
+/// }
+/// }
+/// nk_input_end(&ctx);
+/// //
+/// // [...]
+/// //
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case ...:
+/// // [...]
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// You probably noticed that you have to draw all of the UI each frame which is
+/// quite wasteful. While the actual UI updating loop is quite fast rendering
+/// without actually needing it is not. So there are multiple things you could do.
+///
+/// First is only update on input. This of course is only an option if your
+/// application only depends on the UI and does not require any outside calculations.
+/// If you actually only update on input make sure to update the UI two times each
+/// frame and call `nk_clear` directly after the first pass and only draw in
+/// the second pass. In addition it is recommended to also add additional timers
+/// to make sure the UI is not drawn more than a fixed number of frames per second.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// // [...wait for input ]
+/// // [...do two UI passes ...]
+/// do_ui(...)
+/// nk_clear(&ctx);
+/// do_ui(...)
+/// //
+/// // draw
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case ...:
+/// //[...]
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// The second probably more applicable trick is to only draw if anything changed.
+/// It is not really useful for applications with continuous draw loop but
+/// quite useful for desktop applications. To actually get nuklear to only
+/// draw on changes you first have to define `NK_ZERO_COMMAND_MEMORY` and
+/// allocate a memory buffer that will store each unique drawing output.
+/// After each frame you compare the draw command memory inside the library
+/// with your allocated buffer by memcmp. If memcmp detects differences
+/// you have to copy the command buffer into the allocated buffer
+/// and then draw like usual (this example uses fixed memory but you could
+/// use dynamically allocated memory).
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// //[... other defines ...]
+/// #define NK_ZERO_COMMAND_MEMORY
+/// #include "nuklear.h"
+/// //
+/// // setup context
+/// struct nk_context ctx;
+/// void *last = calloc(1,64*1024);
+/// void *buf = calloc(1,64*1024);
+/// nk_init_fixed(&ctx, buf, 64*1024);
+/// //
+/// // loop
+/// while (1) {
+/// // [...input...]
+/// // [...ui...]
+/// void *cmds = nk_buffer_memory(&ctx.memory);
+/// if (memcmp(cmds, last, ctx.memory.allocated)) {
+/// memcpy(last,cmds,ctx.memory.allocated);
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case ...:
+/// // [...]
+/// }
+/// }
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Finally while using draw commands makes sense for higher abstracted platforms like
+/// X11 and Win32 or drawing libraries it is often desirable to use graphics
+/// hardware directly. Therefore it is possible to just define
+/// `NK_INCLUDE_VERTEX_BUFFER_OUTPUT` which includes optional vertex output.
+/// To access the vertex output you first have to convert all draw commands into
+/// vertexes by calling `nk_convert` which takes in your preferred vertex format.
+/// After successfully converting all draw commands just iterate over and execute all
+/// vertex draw commands:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// // fill configuration
+/// struct your_vertex
+/// {
+/// float pos[2]; // important to keep it to 2 floats
+/// float uv[2];
+/// unsigned char col[4];
+/// };
+/// struct nk_convert_config cfg = {};
+/// static const struct nk_draw_vertex_layout_element vertex_layout[] = {
+/// {NK_VERTEX_POSITION, NK_FORMAT_FLOAT, NK_OFFSETOF(struct your_vertex, pos)},
+/// {NK_VERTEX_TEXCOORD, NK_FORMAT_FLOAT, NK_OFFSETOF(struct your_vertex, uv)},
+/// {NK_VERTEX_COLOR, NK_FORMAT_R8G8B8A8, NK_OFFSETOF(struct your_vertex, col)},
+/// {NK_VERTEX_LAYOUT_END}
+/// };
+/// cfg.shape_AA = NK_ANTI_ALIASING_ON;
+/// cfg.line_AA = NK_ANTI_ALIASING_ON;
+/// cfg.vertex_layout = vertex_layout;
+/// cfg.vertex_size = sizeof(struct your_vertex);
+/// cfg.vertex_alignment = NK_ALIGNOF(struct your_vertex);
+/// cfg.circle_segment_count = 22;
+/// cfg.curve_segment_count = 22;
+/// cfg.arc_segment_count = 22;
+/// cfg.global_alpha = 1.0f;
+/// cfg.null = dev->null;
+/// //
+/// // setup buffers and convert
+/// struct nk_buffer cmds, verts, idx;
+/// nk_buffer_init_default(&cmds);
+/// nk_buffer_init_default(&verts);
+/// nk_buffer_init_default(&idx);
+/// nk_convert(&ctx, &cmds, &verts, &idx, &cfg);
+/// //
+/// // draw
+/// nk_draw_foreach(cmd, &ctx, &cmds) {
+/// if (!cmd->elem_count) continue;
+/// //[...]
+/// }
+/// nk_buffer_free(&cms);
+/// nk_buffer_free(&verts);
+/// nk_buffer_free(&idx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// --------------------|-------------------------------------------------------
+/// __nk__begin__ | Returns the first draw command in the context draw command list to be drawn
+/// __nk__next__ | Increments the draw command iterator to the next command inside the context draw command list
+/// __nk_foreach__ | Iterates over each draw command inside the context draw command list
+/// __nk_convert__ | Converts from the abstract draw commands list into a hardware accessible vertex format
+/// __nk_draw_begin__ | Returns the first vertex command in the context vertex draw list to be executed
+/// __nk__draw_next__ | Increments the vertex command iterator to the next command inside the context vertex command list
+/// __nk__draw_end__ | Returns the end of the vertex draw list
+/// __nk_draw_foreach__ | Iterates over each vertex draw command inside the vertex draw list
+*/
+enum nk_anti_aliasing {NK_ANTI_ALIASING_OFF, NK_ANTI_ALIASING_ON};
+enum nk_convert_result {
+ NK_CONVERT_SUCCESS = 0,
+ NK_CONVERT_INVALID_PARAM = 1,
+ NK_CONVERT_COMMAND_BUFFER_FULL = NK_FLAG(1),
+ NK_CONVERT_VERTEX_BUFFER_FULL = NK_FLAG(2),
+ NK_CONVERT_ELEMENT_BUFFER_FULL = NK_FLAG(3)
+};
+struct nk_draw_null_texture {
+ nk_handle texture; /* texture handle to a texture with a white pixel */
+ struct nk_vec2 uv; /* coordinates to a white pixel in the texture */
+};
+struct nk_convert_config {
+ float global_alpha; /* global alpha value */
+ enum nk_anti_aliasing line_AA; /* line anti-aliasing flag can be turned off if you are tight on memory */
+ enum nk_anti_aliasing shape_AA; /* shape anti-aliasing flag can be turned off if you are tight on memory */
+ unsigned circle_segment_count; /* number of segments used for circles: default to 22 */
+ unsigned arc_segment_count; /* number of segments used for arcs: default to 22 */
+ unsigned curve_segment_count; /* number of segments used for curves: default to 22 */
+ struct nk_draw_null_texture null; /* handle to texture with a white pixel for shape drawing */
+ const struct nk_draw_vertex_layout_element *vertex_layout; /* describes the vertex output format and packing */
+ nk_size vertex_size; /* sizeof one vertex for vertex packing */
+ nk_size vertex_alignment; /* vertex alignment: Can be obtained by NK_ALIGNOF */
+};
+/*/// #### nk__begin
+/// Returns a draw command list iterator to iterate all draw
+/// commands accumulated over one frame.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_command* nk__begin(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | must point to an previously initialized `nk_context` struct at the end of a frame
+///
+/// Returns draw command pointer pointing to the first command inside the draw command list
+*/
+NK_API const struct nk_command* nk__begin(struct nk_context*);
+/*/// #### nk__next
+/// Returns draw command pointer pointing to the next command inside the draw command list
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_command* nk__next(struct nk_context*, const struct nk_command*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+/// __cmd__ | Must point to an previously a draw command either returned by `nk__begin` or `nk__next`
+///
+/// Returns draw command pointer pointing to the next command inside the draw command list
+*/
+NK_API const struct nk_command* nk__next(struct nk_context*, const struct nk_command*);
+/*/// #### nk_foreach
+/// Iterates over each draw command inside the context draw command list
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_foreach(c, ctx)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+/// __cmd__ | Command pointer initialized to NULL
+///
+/// Iterates over each draw command inside the context draw command list
+*/
+#define nk_foreach(c, ctx) for((c) = nk__begin(ctx); (c) != 0; (c) = nk__next(ctx,c))
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+/*/// #### nk_convert
+/// Converts all internal draw commands into vertex draw commands and fills
+/// three buffers with vertexes, vertex draw commands and vertex indices. The vertex format
+/// as well as some other configuration values have to be configured by filling out a
+/// `nk_convert_config` struct.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// nk_flags nk_convert(struct nk_context *ctx, struct nk_buffer *cmds,
+/// struct nk_buffer *vertices, struct nk_buffer *elements, const struct nk_convert_config*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+/// __cmds__ | Must point to a previously initialized buffer to hold converted vertex draw commands
+/// __vertices__| Must point to a previously initialized buffer to hold all produced vertices
+/// __elements__| Must point to a previously initialized buffer to hold all produced vertex indices
+/// __config__ | Must point to a filled out `nk_config` struct to configure the conversion process
+///
+/// Returns one of enum nk_convert_result error codes
+///
+/// Parameter | Description
+/// --------------------------------|-----------------------------------------------------------
+/// NK_CONVERT_SUCCESS | Signals a successful draw command to vertex buffer conversion
+/// NK_CONVERT_INVALID_PARAM | An invalid argument was passed in the function call
+/// NK_CONVERT_COMMAND_BUFFER_FULL | The provided buffer for storing draw commands is full or failed to allocate more memory
+/// NK_CONVERT_VERTEX_BUFFER_FULL | The provided buffer for storing vertices is full or failed to allocate more memory
+/// NK_CONVERT_ELEMENT_BUFFER_FULL | The provided buffer for storing indicies is full or failed to allocate more memory
+*/
+NK_API nk_flags nk_convert(struct nk_context*, struct nk_buffer *cmds, struct nk_buffer *vertices, struct nk_buffer *elements, const struct nk_convert_config*);
+/*/// #### nk__draw_begin
+/// Returns a draw vertex command buffer iterator to iterate over the vertex draw command buffer
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_draw_command* nk__draw_begin(const struct nk_context*, const struct nk_buffer*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+/// __buf__ | Must point to an previously by `nk_convert` filled out vertex draw command buffer
+///
+/// Returns vertex draw command pointer pointing to the first command inside the vertex draw command buffer
+*/
+NK_API const struct nk_draw_command* nk__draw_begin(const struct nk_context*, const struct nk_buffer*);
+/*/// #### nk__draw_end
+/// Returns the vertex draw command at the end of the vertex draw command buffer
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_draw_command* nk__draw_end(const struct nk_context *ctx, const struct nk_buffer *buf);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+/// __buf__ | Must point to an previously by `nk_convert` filled out vertex draw command buffer
+///
+/// Returns vertex draw command pointer pointing to the end of the last vertex draw command inside the vertex draw command buffer
+*/
+NK_API const struct nk_draw_command* nk__draw_end(const struct nk_context*, const struct nk_buffer*);
+/*/// #### nk__draw_next
+/// Increments the vertex draw command buffer iterator
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// const struct nk_draw_command* nk__draw_next(const struct nk_draw_command*, const struct nk_buffer*, const struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __cmd__ | Must point to an previously either by `nk__draw_begin` or `nk__draw_next` returned vertex draw command
+/// __buf__ | Must point to an previously by `nk_convert` filled out vertex draw command buffer
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+///
+/// Returns vertex draw command pointer pointing to the end of the last vertex draw command inside the vertex draw command buffer
+*/
+NK_API const struct nk_draw_command* nk__draw_next(const struct nk_draw_command*, const struct nk_buffer*, const struct nk_context*);
+/*/// #### nk_draw_foreach
+/// Iterates over each vertex draw command inside a vertex draw command buffer
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_draw_foreach(cmd,ctx, b)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __cmd__ | `nk_draw_command`iterator set to NULL
+/// __buf__ | Must point to an previously by `nk_convert` filled out vertex draw command buffer
+/// __ctx__ | Must point to an previously initialized `nk_context` struct at the end of a frame
+*/
+#define nk_draw_foreach(cmd,ctx, b) for((cmd)=nk__draw_begin(ctx, b); (cmd)!=0; (cmd)=nk__draw_next(cmd, b, ctx))
+#endif
+/* =============================================================================
+ *
+ * WINDOW
+ *
+ * =============================================================================
+/// ### Window
+/// Windows are the main persistent state used inside nuklear and are life time
+/// controlled by simply "retouching" (i.e. calling) each window each frame.
+/// All widgets inside nuklear can only be added inside the function pair `nk_begin_xxx`
+/// and `nk_end`. Calling any widgets outside these two functions will result in an
+/// assert in debug or no state change in release mode.<br /><br />
+///
+/// Each window holds frame persistent state like position, size, flags, state tables,
+/// and some garbage collected internal persistent widget state. Each window
+/// is linked into a window stack list which determines the drawing and overlapping
+/// order. The topmost window thereby is the currently active window.<br /><br />
+///
+/// To change window position inside the stack occurs either automatically by
+/// user input by being clicked on or programmatically by calling `nk_window_focus`.
+/// Windows by default are visible unless explicitly being defined with flag
+/// `NK_WINDOW_HIDDEN`, the user clicked the close button on windows with flag
+/// `NK_WINDOW_CLOSABLE` or if a window was explicitly hidden by calling
+/// `nk_window_show`. To explicitly close and destroy a window call `nk_window_close`.<br /><br />
+///
+/// #### Usage
+/// To create and keep a window you have to call one of the two `nk_begin_xxx`
+/// functions to start window declarations and `nk_end` at the end. Furthermore it
+/// is recommended to check the return value of `nk_begin_xxx` and only process
+/// widgets inside the window if the value is not 0. Either way you have to call
+/// `nk_end` at the end of window declarations. Furthermore, do not attempt to
+/// nest `nk_begin_xxx` calls which will hopefully result in an assert or if not
+/// in a segmentation fault.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // [... widgets ...]
+/// }
+/// nk_end(ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// In the grand concept window and widget declarations need to occur after input
+/// handling and before drawing to screen. Not doing so can result in higher
+/// latency or at worst invalid behavior. Furthermore make sure that `nk_clear`
+/// is called at the end of the frame. While nuklear's default platform backends
+/// already call `nk_clear` for you if you write your own backend not calling
+/// `nk_clear` can cause asserts or even worse undefined behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// Event evt;
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// nk_input_xxx(...);
+/// }
+/// }
+/// nk_input_end(&ctx);
+///
+/// if (nk_begin_xxx(...) {
+/// //[...]
+/// }
+/// nk_end(ctx);
+///
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case //...:
+/// //[...]
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// ------------------------------------|----------------------------------------
+/// nk_begin | Starts a new window; needs to be called every frame for every window (unless hidden) or otherwise the window gets removed
+/// nk_begin_titled | Extended window start with separated title and identifier to allow multiple windows with same name but not title
+/// nk_end | Needs to be called at the end of the window building process to process scaling, scrollbars and general cleanup
+//
+/// nk_window_find | Finds and returns the window with give name
+/// nk_window_get_bounds | Returns a rectangle with screen position and size of the currently processed window.
+/// nk_window_get_position | Returns the position of the currently processed window
+/// nk_window_get_size | Returns the size with width and height of the currently processed window
+/// nk_window_get_width | Returns the width of the currently processed window
+/// nk_window_get_height | Returns the height of the currently processed window
+/// nk_window_get_panel | Returns the underlying panel which contains all processing state of the current window
+/// nk_window_get_content_region | Returns the position and size of the currently visible and non-clipped space inside the currently processed window
+/// nk_window_get_content_region_min | Returns the upper rectangle position of the currently visible and non-clipped space inside the currently processed window
+/// nk_window_get_content_region_max | Returns the upper rectangle position of the currently visible and non-clipped space inside the currently processed window
+/// nk_window_get_content_region_size | Returns the size of the currently visible and non-clipped space inside the currently processed window
+/// nk_window_get_canvas | Returns the draw command buffer. Can be used to draw custom widgets
+/// nk_window_get_scroll | Gets the scroll offset of the current window
+/// nk_window_has_focus | Returns if the currently processed window is currently active
+/// nk_window_is_collapsed | Returns if the window with given name is currently minimized/collapsed
+/// nk_window_is_closed | Returns if the currently processed window was closed
+/// nk_window_is_hidden | Returns if the currently processed window was hidden
+/// nk_window_is_active | Same as nk_window_has_focus for some reason
+/// nk_window_is_hovered | Returns if the currently processed window is currently being hovered by mouse
+/// nk_window_is_any_hovered | Return if any window currently hovered
+/// nk_item_is_any_active | Returns if any window or widgets is currently hovered or active
+//
+/// nk_window_set_bounds | Updates position and size of the currently processed window
+/// nk_window_set_position | Updates position of the currently process window
+/// nk_window_set_size | Updates the size of the currently processed window
+/// nk_window_set_focus | Set the currently processed window as active window
+/// nk_window_set_scroll | Sets the scroll offset of the current window
+//
+/// nk_window_close | Closes the window with given window name which deletes the window at the end of the frame
+/// nk_window_collapse | Collapses the window with given window name
+/// nk_window_collapse_if | Collapses the window with given window name if the given condition was met
+/// nk_window_show | Hides a visible or reshows a hidden window
+/// nk_window_show_if | Hides/shows a window depending on condition
+*/
+/*
+/// #### nk_panel_flags
+/// Flag | Description
+/// ----------------------------|----------------------------------------
+/// NK_WINDOW_BORDER | Draws a border around the window to visually separate window from the background
+/// NK_WINDOW_MOVABLE | The movable flag indicates that a window can be moved by user input or by dragging the window header
+/// NK_WINDOW_SCALABLE | The scalable flag indicates that a window can be scaled by user input by dragging a scaler icon at the button of the window
+/// NK_WINDOW_CLOSABLE | Adds a closable icon into the header
+/// NK_WINDOW_MINIMIZABLE | Adds a minimize icon into the header
+/// NK_WINDOW_NO_SCROLLBAR | Removes the scrollbar from the window
+/// NK_WINDOW_TITLE | Forces a header at the top at the window showing the title
+/// NK_WINDOW_SCROLL_AUTO_HIDE | Automatically hides the window scrollbar if no user interaction: also requires delta time in `nk_context` to be set each frame
+/// NK_WINDOW_BACKGROUND | Always keep window in the background
+/// NK_WINDOW_SCALE_LEFT | Puts window scaler in the left-bottom corner instead right-bottom
+/// NK_WINDOW_NO_INPUT | Prevents window of scaling, moving or getting focus
+///
+/// #### nk_collapse_states
+/// State | Description
+/// ----------------|-----------------------------------------------------------
+/// __NK_MINIMIZED__| UI section is collased and not visibile until maximized
+/// __NK_MAXIMIZED__| UI section is extended and visibile until minimized
+/// <br /><br />
+*/
+enum nk_panel_flags {
+ NK_WINDOW_BORDER = NK_FLAG(0),
+ NK_WINDOW_MOVABLE = NK_FLAG(1),
+ NK_WINDOW_SCALABLE = NK_FLAG(2),
+ NK_WINDOW_CLOSABLE = NK_FLAG(3),
+ NK_WINDOW_MINIMIZABLE = NK_FLAG(4),
+ NK_WINDOW_NO_SCROLLBAR = NK_FLAG(5),
+ NK_WINDOW_TITLE = NK_FLAG(6),
+ NK_WINDOW_SCROLL_AUTO_HIDE = NK_FLAG(7),
+ NK_WINDOW_BACKGROUND = NK_FLAG(8),
+ NK_WINDOW_SCALE_LEFT = NK_FLAG(9),
+ NK_WINDOW_NO_INPUT = NK_FLAG(10)
+};
+/*/// #### nk_begin
+/// Starts a new window; needs to be called every frame for every
+/// window (unless hidden) or otherwise the window gets removed
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_begin(struct nk_context *ctx, const char *title, struct nk_rect bounds, nk_flags flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __title__ | Window title and identifier. Needs to be persistent over frames to identify the window
+/// __bounds__ | Initial position and window size. However if you do not define `NK_WINDOW_SCALABLE` or `NK_WINDOW_MOVABLE` you can set window position and size every frame
+/// __flags__ | Window flags defined in the nk_panel_flags section with a number of different window behaviors
+///
+/// Returns `true(1)` if the window can be filled up with widgets from this point
+/// until `nk_end` or `false(0)` otherwise for example if minimized
+*/
+NK_API int nk_begin(struct nk_context *ctx, const char *title, struct nk_rect bounds, nk_flags flags);
+/*/// #### nk_begin_titled
+/// Extended window start with separated title and identifier to allow multiple
+/// windows with same title but not name
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_begin_titled(struct nk_context *ctx, const char *name, const char *title, struct nk_rect bounds, nk_flags flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Window identifier. Needs to be persistent over frames to identify the window
+/// __title__ | Window title displayed inside header if flag `NK_WINDOW_TITLE` or either `NK_WINDOW_CLOSABLE` or `NK_WINDOW_MINIMIZED` was set
+/// __bounds__ | Initial position and window size. However if you do not define `NK_WINDOW_SCALABLE` or `NK_WINDOW_MOVABLE` you can set window position and size every frame
+/// __flags__ | Window flags defined in the nk_panel_flags section with a number of different window behaviors
+///
+/// Returns `true(1)` if the window can be filled up with widgets from this point
+/// until `nk_end` or `false(0)` otherwise for example if minimized
+*/
+NK_API int nk_begin_titled(struct nk_context *ctx, const char *name, const char *title, struct nk_rect bounds, nk_flags flags);
+/*/// #### nk_end
+/// Needs to be called at the end of the window building process to process scaling, scrollbars and general cleanup.
+/// All widget calls after this functions will result in asserts or no state changes
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_end(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+*/
+NK_API void nk_end(struct nk_context *ctx);
+/*/// #### nk_window_find
+/// Finds and returns a window from passed name
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_window *nk_window_find(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Window identifier
+///
+/// Returns a `nk_window` struct pointing to the identified window or NULL if
+/// no window with the given name was found
+*/
+NK_API struct nk_window *nk_window_find(struct nk_context *ctx, const char *name);
+/*/// #### nk_window_get_bounds
+/// Returns a rectangle with screen position and size of the currently processed window
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_window_get_bounds(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns a `nk_rect` struct with window upper left window position and size
+*/
+NK_API struct nk_rect nk_window_get_bounds(const struct nk_context *ctx);
+/*/// #### nk_window_get_position
+/// Returns the position of the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_window_get_position(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns a `nk_vec2` struct with window upper left position
+*/
+NK_API struct nk_vec2 nk_window_get_position(const struct nk_context *ctx);
+/*/// #### nk_window_get_size
+/// Returns the size with width and height of the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_window_get_size(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns a `nk_vec2` struct with window width and height
+*/
+NK_API struct nk_vec2 nk_window_get_size(const struct nk_context*);
+/*/// #### nk_window_get_width
+/// Returns the width of the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// float nk_window_get_width(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns the current window width
+*/
+NK_API float nk_window_get_width(const struct nk_context*);
+/*/// #### nk_window_get_height
+/// Returns the height of the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// float nk_window_get_height(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns the current window height
+*/
+NK_API float nk_window_get_height(const struct nk_context*);
+/*/// #### nk_window_get_panel
+/// Returns the underlying panel which contains all processing state of the current window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// !!! WARNING
+/// Do not keep the returned panel pointer around, it is only valid until `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_panel* nk_window_get_panel(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns a pointer to window internal `nk_panel` state.
+*/
+NK_API struct nk_panel* nk_window_get_panel(struct nk_context*);
+/*/// #### nk_window_get_content_region
+/// Returns the position and size of the currently visible and non-clipped space
+/// inside the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_window_get_content_region(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `nk_rect` struct with screen position and size (no scrollbar offset)
+/// of the visible space inside the current window
+*/
+NK_API struct nk_rect nk_window_get_content_region(struct nk_context*);
+/*/// #### nk_window_get_content_region_min
+/// Returns the upper left position of the currently visible and non-clipped
+/// space inside the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_window_get_content_region_min(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// returns `nk_vec2` struct with upper left screen position (no scrollbar offset)
+/// of the visible space inside the current window
+*/
+NK_API struct nk_vec2 nk_window_get_content_region_min(struct nk_context*);
+/*/// #### nk_window_get_content_region_max
+/// Returns the lower right screen position of the currently visible and
+/// non-clipped space inside the currently processed window.
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_window_get_content_region_max(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `nk_vec2` struct with lower right screen position (no scrollbar offset)
+/// of the visible space inside the current window
+*/
+NK_API struct nk_vec2 nk_window_get_content_region_max(struct nk_context*);
+/*/// #### nk_window_get_content_region_size
+/// Returns the size of the currently visible and non-clipped space inside the
+/// currently processed window
+///
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_window_get_content_region_size(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `nk_vec2` struct with size the visible space inside the current window
+*/
+NK_API struct nk_vec2 nk_window_get_content_region_size(struct nk_context*);
+/*/// #### nk_window_get_canvas
+/// Returns the draw command buffer. Can be used to draw custom widgets
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// !!! WARNING
+/// Do not keep the returned command buffer pointer around it is only valid until `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_command_buffer* nk_window_get_canvas(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns a pointer to window internal `nk_command_buffer` struct used as
+/// drawing canvas. Can be used to do custom drawing.
+*/
+NK_API struct nk_command_buffer* nk_window_get_canvas(struct nk_context*);
+/*/// #### nk_window_get_scroll
+/// Gets the scroll offset for the current window
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_get_scroll(struct nk_context *ctx, nk_uint *offset_x, nk_uint *offset_y);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// -------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __offset_x__ | A pointer to the x offset output (or NULL to ignore)
+/// __offset_y__ | A pointer to the y offset output (or NULL to ignore)
+*/
+NK_API void nk_window_get_scroll(struct nk_context*, nk_uint *offset_x, nk_uint *offset_y);
+/*/// #### nk_window_has_focus
+/// Returns if the currently processed window is currently active
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_has_focus(const struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `false(0)` if current window is not active or `true(1)` if it is
+*/
+NK_API int nk_window_has_focus(const struct nk_context*);
+/*/// #### nk_window_is_hovered
+/// Return if the current window is being hovered
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_hovered(struct nk_context *ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `true(1)` if current window is hovered or `false(0)` otherwise
+*/
+NK_API int nk_window_is_hovered(struct nk_context*);
+/*/// #### nk_window_is_collapsed
+/// Returns if the window with given name is currently minimized/collapsed
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_collapsed(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of window you want to check if it is collapsed
+///
+/// Returns `true(1)` if current window is minimized and `false(0)` if window not
+/// found or is not minimized
+*/
+NK_API int nk_window_is_collapsed(struct nk_context *ctx, const char *name);
+/*/// #### nk_window_is_closed
+/// Returns if the window with given name was closed by calling `nk_close`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_closed(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of window you want to check if it is closed
+///
+/// Returns `true(1)` if current window was closed or `false(0)` window not found or not closed
+*/
+NK_API int nk_window_is_closed(struct nk_context*, const char*);
+/*/// #### nk_window_is_hidden
+/// Returns if the window with given name is hidden
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_hidden(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of window you want to check if it is hidden
+///
+/// Returns `true(1)` if current window is hidden or `false(0)` window not found or visible
+*/
+NK_API int nk_window_is_hidden(struct nk_context*, const char*);
+/*/// #### nk_window_is_active
+/// Same as nk_window_has_focus for some reason
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_active(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of window you want to check if it is active
+///
+/// Returns `true(1)` if current window is active or `false(0)` window not found or not active
+*/
+NK_API int nk_window_is_active(struct nk_context*, const char*);
+/*/// #### nk_window_is_any_hovered
+/// Returns if the any window is being hovered
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_window_is_any_hovered(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `true(1)` if any window is hovered or `false(0)` otherwise
+*/
+NK_API int nk_window_is_any_hovered(struct nk_context*);
+/*/// #### nk_item_is_any_active
+/// Returns if the any window is being hovered or any widget is currently active.
+/// Can be used to decide if input should be processed by UI or your specific input handling.
+/// Example could be UI and 3D camera to move inside a 3D space.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_item_is_any_active(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+///
+/// Returns `true(1)` if any window is hovered or any item is active or `false(0)` otherwise
+*/
+NK_API int nk_item_is_any_active(struct nk_context*);
+/*/// #### nk_window_set_bounds
+/// Updates position and size of window with passed in name
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_set_bounds(struct nk_context*, const char *name, struct nk_rect bounds);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to modify both position and size
+/// __bounds__ | Must point to a `nk_rect` struct with the new position and size
+*/
+NK_API void nk_window_set_bounds(struct nk_context*, const char *name, struct nk_rect bounds);
+/*/// #### nk_window_set_position
+/// Updates position of window with passed name
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_set_position(struct nk_context*, const char *name, struct nk_vec2 pos);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to modify both position
+/// __pos__ | Must point to a `nk_vec2` struct with the new position
+*/
+NK_API void nk_window_set_position(struct nk_context*, const char *name, struct nk_vec2 pos);
+/*/// #### nk_window_set_size
+/// Updates size of window with passed in name
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_set_size(struct nk_context*, const char *name, struct nk_vec2);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to modify both window size
+/// __size__ | Must point to a `nk_vec2` struct with new window size
+*/
+NK_API void nk_window_set_size(struct nk_context*, const char *name, struct nk_vec2);
+/*/// #### nk_window_set_focus
+/// Sets the window with given name as active
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_set_focus(struct nk_context*, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to set focus on
+*/
+NK_API void nk_window_set_focus(struct nk_context*, const char *name);
+/*/// #### nk_window_set_scroll
+/// Sets the scroll offset for the current window
+/// !!! WARNING
+/// Only call this function between calls `nk_begin_xxx` and `nk_end`
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_set_scroll(struct nk_context *ctx, nk_uint offset_x, nk_uint offset_y);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// -------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __offset_x__ | The x offset to scroll to
+/// __offset_y__ | The y offset to scroll to
+*/
+NK_API void nk_window_set_scroll(struct nk_context*, nk_uint offset_x, nk_uint offset_y);
+/*/// #### nk_window_close
+/// Closes a window and marks it for being freed at the end of the frame
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_close(struct nk_context *ctx, const char *name);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to close
+*/
+NK_API void nk_window_close(struct nk_context *ctx, const char *name);
+/*/// #### nk_window_collapse
+/// Updates collapse state of a window with given name
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_collapse(struct nk_context*, const char *name, enum nk_collapse_states state);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to close
+/// __state__ | value out of nk_collapse_states section
+*/
+NK_API void nk_window_collapse(struct nk_context*, const char *name, enum nk_collapse_states state);
+/*/// #### nk_window_collapse_if
+/// Updates collapse state of a window with given name if given condition is met
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_collapse_if(struct nk_context*, const char *name, enum nk_collapse_states, int cond);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to either collapse or maximize
+/// __state__ | value out of nk_collapse_states section the window should be put into
+/// __cond__ | condition that has to be met to actually commit the collapse state change
+*/
+NK_API void nk_window_collapse_if(struct nk_context*, const char *name, enum nk_collapse_states, int cond);
+/*/// #### nk_window_show
+/// updates visibility state of a window with given name
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_show(struct nk_context*, const char *name, enum nk_show_states);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to either collapse or maximize
+/// __state__ | state with either visible or hidden to modify the window with
+*/
+NK_API void nk_window_show(struct nk_context*, const char *name, enum nk_show_states);
+/*/// #### nk_window_show_if
+/// Updates visibility state of a window with given name if a given condition is met
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_window_show_if(struct nk_context*, const char *name, enum nk_show_states, int cond);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __name__ | Identifier of the window to either hide or show
+/// __state__ | state with either visible or hidden to modify the window with
+/// __cond__ | condition that has to be met to actually commit the visbility state change
+*/
+NK_API void nk_window_show_if(struct nk_context*, const char *name, enum nk_show_states, int cond);
+/* =============================================================================
+ *
+ * LAYOUT
+ *
+ * =============================================================================
+/// ### Layouting
+/// Layouting in general describes placing widget inside a window with position and size.
+/// While in this particular implementation there are five different APIs for layouting
+/// each with different trade offs between control and ease of use. <br /><br />
+///
+/// All layouting methods in this library are based around the concept of a row.
+/// A row has a height the window content grows by and a number of columns and each
+/// layouting method specifies how each widget is placed inside the row.
+/// After a row has been allocated by calling a layouting functions and then
+/// filled with widgets will advance an internal pointer over the allocated row. <br /><br />
+///
+/// To actually define a layout you just call the appropriate layouting function
+/// and each subsequent widget call will place the widget as specified. Important
+/// here is that if you define more widgets then columns defined inside the layout
+/// functions it will allocate the next row without you having to make another layouting <br /><br />
+/// call.
+///
+/// Biggest limitation with using all these APIs outside the `nk_layout_space_xxx` API
+/// is that you have to define the row height for each. However the row height
+/// often depends on the height of the font. <br /><br />
+///
+/// To fix that internally nuklear uses a minimum row height that is set to the
+/// height plus padding of currently active font and overwrites the row height
+/// value if zero. <br /><br />
+///
+/// If you manually want to change the minimum row height then
+/// use nk_layout_set_min_row_height, and use nk_layout_reset_min_row_height to
+/// reset it back to be derived from font height. <br /><br />
+///
+/// Also if you change the font in nuklear it will automatically change the minimum
+/// row height for you and. This means if you change the font but still want
+/// a minimum row height smaller than the font you have to repush your value. <br /><br />
+///
+/// For actually more advanced UI I would even recommend using the `nk_layout_space_xxx`
+/// layouting method in combination with a cassowary constraint solver (there are
+/// some versions on github with permissive license model) to take over all control over widget
+/// layouting yourself. However for quick and dirty layouting using all the other layouting
+/// functions should be fine.
+///
+/// #### Usage
+/// 1. __nk_layout_row_dynamic__<br /><br />
+/// The easiest layouting function is `nk_layout_row_dynamic`. It provides each
+/// widgets with same horizontal space inside the row and dynamically grows
+/// if the owning window grows in width. So the number of columns dictates
+/// the size of each widget dynamically by formula:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// widget_width = (window_width - padding - spacing) * (1/colum_count)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Just like all other layouting APIs if you define more widget than columns this
+/// library will allocate a new row and keep all layouting parameters previously
+/// defined.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // first row with height: 30 composed of two widgets
+/// nk_layout_row_dynamic(&ctx, 30, 2);
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // second row with same parameter as defined above
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // third row uses 0 for height which will use auto layouting
+/// nk_layout_row_dynamic(&ctx, 0, 2);
+/// nk_widget(...);
+/// nk_widget(...);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// 2. __nk_layout_row_static__<br /><br />
+/// Another easy layouting function is `nk_layout_row_static`. It provides each
+/// widget with same horizontal pixel width inside the row and does not grow
+/// if the owning window scales smaller or bigger.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // first row with height: 30 composed of two widgets with width: 80
+/// nk_layout_row_static(&ctx, 30, 80, 2);
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // second row with same parameter as defined above
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // third row uses 0 for height which will use auto layouting
+/// nk_layout_row_static(&ctx, 0, 80, 2);
+/// nk_widget(...);
+/// nk_widget(...);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// 3. __nk_layout_row_xxx__<br /><br />
+/// A little bit more advanced layouting API are functions `nk_layout_row_begin`,
+/// `nk_layout_row_push` and `nk_layout_row_end`. They allow to directly
+/// specify each column pixel or window ratio in a row. It supports either
+/// directly setting per column pixel width or widget window ratio but not
+/// both. Furthermore it is a immediate mode API so each value is directly
+/// pushed before calling a widget. Therefore the layout is not automatically
+/// repeating like the last two layouting functions.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // first row with height: 25 composed of two widgets with width 60 and 40
+/// nk_layout_row_begin(ctx, NK_STATIC, 25, 2);
+/// nk_layout_row_push(ctx, 60);
+/// nk_widget(...);
+/// nk_layout_row_push(ctx, 40);
+/// nk_widget(...);
+/// nk_layout_row_end(ctx);
+/// //
+/// // second row with height: 25 composed of two widgets with window ratio 0.25 and 0.75
+/// nk_layout_row_begin(ctx, NK_DYNAMIC, 25, 2);
+/// nk_layout_row_push(ctx, 0.25f);
+/// nk_widget(...);
+/// nk_layout_row_push(ctx, 0.75f);
+/// nk_widget(...);
+/// nk_layout_row_end(ctx);
+/// //
+/// // third row with auto generated height: composed of two widgets with window ratio 0.25 and 0.75
+/// nk_layout_row_begin(ctx, NK_DYNAMIC, 0, 2);
+/// nk_layout_row_push(ctx, 0.25f);
+/// nk_widget(...);
+/// nk_layout_row_push(ctx, 0.75f);
+/// nk_widget(...);
+/// nk_layout_row_end(ctx);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// 4. __nk_layout_row__<br /><br />
+/// The array counterpart to API nk_layout_row_xxx is the single nk_layout_row
+/// functions. Instead of pushing either pixel or window ratio for every widget
+/// it allows to define it by array. The trade of for less control is that
+/// `nk_layout_row` is automatically repeating. Otherwise the behavior is the
+/// same.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // two rows with height: 30 composed of two widgets with width 60 and 40
+/// const float size[] = {60,40};
+/// nk_layout_row(ctx, NK_STATIC, 30, 2, ratio);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // two rows with height: 30 composed of two widgets with window ratio 0.25 and 0.75
+/// const float ratio[] = {0.25, 0.75};
+/// nk_layout_row(ctx, NK_DYNAMIC, 30, 2, ratio);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// //
+/// // two rows with auto generated height composed of two widgets with window ratio 0.25 and 0.75
+/// const float ratio[] = {0.25, 0.75};
+/// nk_layout_row(ctx, NK_DYNAMIC, 30, 2, ratio);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// 5. __nk_layout_row_template_xxx__<br /><br />
+/// The most complex and second most flexible API is a simplified flexbox version without
+/// line wrapping and weights for dynamic widgets. It is an immediate mode API but
+/// unlike `nk_layout_row_xxx` it has auto repeat behavior and needs to be called
+/// before calling the templated widgets.
+/// The row template layout has three different per widget size specifier. The first
+/// one is the `nk_layout_row_template_push_static` with fixed widget pixel width.
+/// They do not grow if the row grows and will always stay the same.
+/// The second size specifier is `nk_layout_row_template_push_variable`
+/// which defines a minimum widget size but it also can grow if more space is available
+/// not taken by other widgets.
+/// Finally there are dynamic widgets with `nk_layout_row_template_push_dynamic`
+/// which are completely flexible and unlike variable widgets can even shrink
+/// to zero if not enough space is provided.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // two rows with height: 30 composed of three widgets
+/// nk_layout_row_template_begin(ctx, 30);
+/// nk_layout_row_template_push_dynamic(ctx);
+/// nk_layout_row_template_push_variable(ctx, 80);
+/// nk_layout_row_template_push_static(ctx, 80);
+/// nk_layout_row_template_end(ctx);
+/// //
+/// // first row
+/// nk_widget(...); // dynamic widget can go to zero if not enough space
+/// nk_widget(...); // variable widget with min 80 pixel but can grow bigger if enough space
+/// nk_widget(...); // static widget with fixed 80 pixel width
+/// //
+/// // second row same layout
+/// nk_widget(...);
+/// nk_widget(...);
+/// nk_widget(...);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// 6. __nk_layout_space_xxx__<br /><br />
+/// Finally the most flexible API directly allows you to place widgets inside the
+/// window. The space layout API is an immediate mode API which does not support
+/// row auto repeat and directly sets position and size of a widget. Position
+/// and size hereby can be either specified as ratio of allocated space or
+/// allocated space local position and pixel size. Since this API is quite
+/// powerful there are a number of utility functions to get the available space
+/// and convert between local allocated space and screen space.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_begin_xxx(...) {
+/// // static row with height: 500 (you can set column count to INT_MAX if you don't want to be bothered)
+/// nk_layout_space_begin(ctx, NK_STATIC, 500, INT_MAX);
+/// nk_layout_space_push(ctx, nk_rect(0,0,150,200));
+/// nk_widget(...);
+/// nk_layout_space_push(ctx, nk_rect(200,200,100,200));
+/// nk_widget(...);
+/// nk_layout_space_end(ctx);
+/// //
+/// // dynamic row with height: 500 (you can set column count to INT_MAX if you don't want to be bothered)
+/// nk_layout_space_begin(ctx, NK_DYNAMIC, 500, INT_MAX);
+/// nk_layout_space_push(ctx, nk_rect(0.5,0.5,0.1,0.1));
+/// nk_widget(...);
+/// nk_layout_space_push(ctx, nk_rect(0.7,0.6,0.1,0.1));
+/// nk_widget(...);
+/// }
+/// nk_end(...);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// ----------------------------------------|------------------------------------
+/// nk_layout_set_min_row_height | Set the currently used minimum row height to a specified value
+/// nk_layout_reset_min_row_height | Resets the currently used minimum row height to font height
+/// nk_layout_widget_bounds | Calculates current width a static layout row can fit inside a window
+/// nk_layout_ratio_from_pixel | Utility functions to calculate window ratio from pixel size
+//
+/// nk_layout_row_dynamic | Current layout is divided into n same sized growing columns
+/// nk_layout_row_static | Current layout is divided into n same fixed sized columns
+/// nk_layout_row_begin | Starts a new row with given height and number of columns
+/// nk_layout_row_push | Pushes another column with given size or window ratio
+/// nk_layout_row_end | Finished previously started row
+/// nk_layout_row | Specifies row columns in array as either window ratio or size
+//
+/// nk_layout_row_template_begin | Begins the row template declaration
+/// nk_layout_row_template_push_dynamic | Adds a dynamic column that dynamically grows and can go to zero if not enough space
+/// nk_layout_row_template_push_variable | Adds a variable column that dynamically grows but does not shrink below specified pixel width
+/// nk_layout_row_template_push_static | Adds a static column that does not grow and will always have the same size
+/// nk_layout_row_template_end | Marks the end of the row template
+//
+/// nk_layout_space_begin | Begins a new layouting space that allows to specify each widgets position and size
+/// nk_layout_space_push | Pushes position and size of the next widget in own coordinate space either as pixel or ratio
+/// nk_layout_space_end | Marks the end of the layouting space
+//
+/// nk_layout_space_bounds | Callable after nk_layout_space_begin and returns total space allocated
+/// nk_layout_space_to_screen | Converts vector from nk_layout_space coordinate space into screen space
+/// nk_layout_space_to_local | Converts vector from screen space into nk_layout_space coordinates
+/// nk_layout_space_rect_to_screen | Converts rectangle from nk_layout_space coordinate space into screen space
+/// nk_layout_space_rect_to_local | Converts rectangle from screen space into nk_layout_space coordinates
+*/
+/*/// #### nk_layout_set_min_row_height
+/// Sets the currently used minimum row height.
+/// !!! WARNING
+/// The passed height needs to include both your preferred row height
+/// as well as padding. No internal padding is added.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_set_min_row_height(struct nk_context*, float height);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __height__ | New minimum row height to be used for auto generating the row height
+*/
+NK_API void nk_layout_set_min_row_height(struct nk_context*, float height);
+/*/// #### nk_layout_reset_min_row_height
+/// Reset the currently used minimum row height back to `font_height + text_padding + padding`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_reset_min_row_height(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+*/
+NK_API void nk_layout_reset_min_row_height(struct nk_context*);
+/*/// #### nk_layout_widget_bounds
+/// Returns the width of the next row allocate by one of the layouting functions
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_layout_widget_bounds(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+///
+/// Return `nk_rect` with both position and size of the next row
+*/
+NK_API struct nk_rect nk_layout_widget_bounds(struct nk_context*);
+/*/// #### nk_layout_ratio_from_pixel
+/// Utility functions to calculate window ratio from pixel size
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// float nk_layout_ratio_from_pixel(struct nk_context*, float pixel_width);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __pixel__ | Pixel_width to convert to window ratio
+///
+/// Returns `nk_rect` with both position and size of the next row
+*/
+NK_API float nk_layout_ratio_from_pixel(struct nk_context*, float pixel_width);
+/*/// #### nk_layout_row_dynamic
+/// Sets current row layout to share horizontal space
+/// between @cols number of widgets evenly. Once called all subsequent widget
+/// calls greater than @cols will allocate a new row with same layout.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_dynamic(struct nk_context *ctx, float height, int cols);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+/// __columns__ | Number of widget inside row
+*/
+NK_API void nk_layout_row_dynamic(struct nk_context *ctx, float height, int cols);
+/*/// #### nk_layout_row_static
+/// Sets current row layout to fill @cols number of widgets
+/// in row with same @item_width horizontal size. Once called all subsequent widget
+/// calls greater than @cols will allocate a new row with same layout.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_static(struct nk_context *ctx, float height, int item_width, int cols);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+/// __width__ | Holds pixel width of each widget in the row
+/// __columns__ | Number of widget inside row
+*/
+NK_API void nk_layout_row_static(struct nk_context *ctx, float height, int item_width, int cols);
+/*/// #### nk_layout_row_begin
+/// Starts a new dynamic or fixed row with given height and columns.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_begin(struct nk_context *ctx, enum nk_layout_format fmt, float row_height, int cols);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __fmt__ | either `NK_DYNAMIC` for window ratio or `NK_STATIC` for fixed size columns
+/// __height__ | holds height of each widget in row or zero for auto layouting
+/// __columns__ | Number of widget inside row
+*/
+NK_API void nk_layout_row_begin(struct nk_context *ctx, enum nk_layout_format fmt, float row_height, int cols);
+/*/// #### nk_layout_row_push
+/// Specifies either window ratio or width of a single column
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_push(struct nk_context*, float value);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __value__ | either a window ratio or fixed width depending on @fmt in previous `nk_layout_row_begin` call
+*/
+NK_API void nk_layout_row_push(struct nk_context*, float value);
+/*/// #### nk_layout_row_end
+/// Finished previously started row
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_end(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+*/
+NK_API void nk_layout_row_end(struct nk_context*);
+/*/// #### nk_layout_row
+/// Specifies row columns in array as either window ratio or size
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row(struct nk_context*, enum nk_layout_format, float height, int cols, const float *ratio);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __fmt__ | Either `NK_DYNAMIC` for window ratio or `NK_STATIC` for fixed size columns
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+/// __columns__ | Number of widget inside row
+*/
+NK_API void nk_layout_row(struct nk_context*, enum nk_layout_format, float height, int cols, const float *ratio);
+/*/// #### nk_layout_row_template_begin
+/// Begins the row template declaration
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_template_begin(struct nk_context*, float row_height);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+*/
+NK_API void nk_layout_row_template_begin(struct nk_context*, float row_height);
+/*/// #### nk_layout_row_template_push_dynamic
+/// Adds a dynamic column that dynamically grows and can go to zero if not enough space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_template_push_dynamic(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+*/
+NK_API void nk_layout_row_template_push_dynamic(struct nk_context*);
+/*/// #### nk_layout_row_template_push_variable
+/// Adds a variable column that dynamically grows but does not shrink below specified pixel width
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_template_push_variable(struct nk_context*, float min_width);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __width__ | Holds the minimum pixel width the next column must always be
+*/
+NK_API void nk_layout_row_template_push_variable(struct nk_context*, float min_width);
+/*/// #### nk_layout_row_template_push_static
+/// Adds a static column that does not grow and will always have the same size
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_template_push_static(struct nk_context*, float width);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __width__ | Holds the absolute pixel width value the next column must be
+*/
+NK_API void nk_layout_row_template_push_static(struct nk_context*, float width);
+/*/// #### nk_layout_row_template_end
+/// Marks the end of the row template
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_row_template_end(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+*/
+NK_API void nk_layout_row_template_end(struct nk_context*);
+/*/// #### nk_layout_space_begin
+/// Begins a new layouting space that allows to specify each widgets position and size.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_space_begin(struct nk_context*, enum nk_layout_format, float height, int widget_count);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_begin_xxx`
+/// __fmt__ | Either `NK_DYNAMIC` for window ratio or `NK_STATIC` for fixed size columns
+/// __height__ | Holds height of each widget in row or zero for auto layouting
+/// __columns__ | Number of widgets inside row
+*/
+NK_API void nk_layout_space_begin(struct nk_context*, enum nk_layout_format, float height, int widget_count);
+/*/// #### nk_layout_space_push
+/// Pushes position and size of the next widget in own coordinate space either as pixel or ratio
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_space_push(struct nk_context *ctx, struct nk_rect bounds);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+/// __bounds__ | Position and size in laoyut space local coordinates
+*/
+NK_API void nk_layout_space_push(struct nk_context*, struct nk_rect bounds);
+/*/// #### nk_layout_space_end
+/// Marks the end of the layout space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_layout_space_end(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+*/
+NK_API void nk_layout_space_end(struct nk_context*);
+/*/// #### nk_layout_space_bounds
+/// Utility function to calculate total space allocated for `nk_layout_space`
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_layout_space_bounds(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+///
+/// Returns `nk_rect` holding the total space allocated
+*/
+NK_API struct nk_rect nk_layout_space_bounds(struct nk_context*);
+/*/// #### nk_layout_space_to_screen
+/// Converts vector from nk_layout_space coordinate space into screen space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_layout_space_to_screen(struct nk_context*, struct nk_vec2);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+/// __vec__ | Position to convert from layout space into screen coordinate space
+///
+/// Returns transformed `nk_vec2` in screen space coordinates
+*/
+NK_API struct nk_vec2 nk_layout_space_to_screen(struct nk_context*, struct nk_vec2);
+/*/// #### nk_layout_space_to_local
+/// Converts vector from layout space into screen space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_vec2 nk_layout_space_to_local(struct nk_context*, struct nk_vec2);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+/// __vec__ | Position to convert from screen space into layout coordinate space
+///
+/// Returns transformed `nk_vec2` in layout space coordinates
+*/
+NK_API struct nk_vec2 nk_layout_space_to_local(struct nk_context*, struct nk_vec2);
+/*/// #### nk_layout_space_rect_to_screen
+/// Converts rectangle from screen space into layout space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_layout_space_rect_to_screen(struct nk_context*, struct nk_rect);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+/// __bounds__ | Rectangle to convert from layout space into screen space
+///
+/// Returns transformed `nk_rect` in screen space coordinates
+*/
+NK_API struct nk_rect nk_layout_space_rect_to_screen(struct nk_context*, struct nk_rect);
+/*/// #### nk_layout_space_rect_to_local
+/// Converts rectangle from layout space into screen space
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_rect nk_layout_space_rect_to_local(struct nk_context*, struct nk_rect);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after call `nk_layout_space_begin`
+/// __bounds__ | Rectangle to convert from layout space into screen space
+///
+/// Returns transformed `nk_rect` in layout space coordinates
+*/
+NK_API struct nk_rect nk_layout_space_rect_to_local(struct nk_context*, struct nk_rect);
+/* =============================================================================
+ *
+ * GROUP
+ *
+ * =============================================================================
+/// ### Groups
+/// Groups are basically windows inside windows. They allow to subdivide space
+/// in a window to layout widgets as a group. Almost all more complex widget
+/// layouting requirements can be solved using groups and basic layouting
+/// fuctionality. Groups just like windows are identified by an unique name and
+/// internally keep track of scrollbar offsets by default. However additional
+/// versions are provided to directly manage the scrollbar.
+///
+/// #### Usage
+/// To create a group you have to call one of the three `nk_group_begin_xxx`
+/// functions to start group declarations and `nk_group_end` at the end. Furthermore it
+/// is required to check the return value of `nk_group_begin_xxx` and only process
+/// widgets inside the window if the value is not 0.
+/// Nesting groups is possible and even encouraged since many layouting schemes
+/// can only be achieved by nesting. Groups, unlike windows, need `nk_group_end`
+/// to be only called if the corosponding `nk_group_begin_xxx` call does not return 0:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_group_begin_xxx(ctx, ...) {
+/// // [... widgets ...]
+/// nk_group_end(ctx);
+/// }
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// In the grand concept groups can be called after starting a window
+/// with `nk_begin_xxx` and before calling `nk_end`:
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// // Input
+/// Event evt;
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// nk_input_xxx(...);
+/// }
+/// }
+/// nk_input_end(&ctx);
+/// //
+/// // Window
+/// if (nk_begin_xxx(...) {
+/// // [...widgets...]
+/// nk_layout_row_dynamic(...);
+/// if (nk_group_begin_xxx(ctx, ...) {
+/// //[... widgets ...]
+/// nk_group_end(ctx);
+/// }
+/// }
+/// nk_end(ctx);
+/// //
+/// // Draw
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case ...:
+/// // [...]
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+/// #### Reference
+/// Function | Description
+/// --------------------------------|-------------------------------------------
+/// nk_group_begin | Start a new group with internal scrollbar handling
+/// nk_group_begin_titled | Start a new group with separeted name and title and internal scrollbar handling
+/// nk_group_end | Ends a group. Should only be called if nk_group_begin returned non-zero
+/// nk_group_scrolled_offset_begin | Start a new group with manual separated handling of scrollbar x- and y-offset
+/// nk_group_scrolled_begin | Start a new group with manual scrollbar handling
+/// nk_group_scrolled_end | Ends a group with manual scrollbar handling. Should only be called if nk_group_begin returned non-zero
+/// nk_group_get_scroll | Gets the scroll offset for the given group
+/// nk_group_set_scroll | Sets the scroll offset for the given group
+*/
+/*/// #### nk_group_begin
+/// Starts a new widget group. Requires a previous layouting function to specify a pos/size.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_group_begin(struct nk_context*, const char *title, nk_flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __title__ | Must be an unique identifier for this group that is also used for the group header
+/// __flags__ | Window flags defined in the nk_panel_flags section with a number of different group behaviors
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_group_begin(struct nk_context*, const char *title, nk_flags);
+/*/// #### nk_group_begin_titled
+/// Starts a new widget group. Requires a previous layouting function to specify a pos/size.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_group_begin_titled(struct nk_context*, const char *name, const char *title, nk_flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __id__ | Must be an unique identifier for this group
+/// __title__ | Group header title
+/// __flags__ | Window flags defined in the nk_panel_flags section with a number of different group behaviors
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_group_begin_titled(struct nk_context*, const char *name, const char *title, nk_flags);
+/*/// #### nk_group_end
+/// Ends a widget group
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_group_end(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+*/
+NK_API void nk_group_end(struct nk_context*);
+/*/// #### nk_group_scrolled_offset_begin
+/// starts a new widget group. requires a previous layouting function to specify
+/// a size. Does not keep track of scrollbar.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_group_scrolled_offset_begin(struct nk_context*, nk_uint *x_offset, nk_uint *y_offset, const char *title, nk_flags flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __x_offset__| Scrollbar x-offset to offset all widgets inside the group horizontally.
+/// __y_offset__| Scrollbar y-offset to offset all widgets inside the group vertically
+/// __title__ | Window unique group title used to both identify and display in the group header
+/// __flags__ | Window flags from the nk_panel_flags section
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_group_scrolled_offset_begin(struct nk_context*, nk_uint *x_offset, nk_uint *y_offset, const char *title, nk_flags flags);
+/*/// #### nk_group_scrolled_begin
+/// Starts a new widget group. requires a previous
+/// layouting function to specify a size. Does not keep track of scrollbar.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_group_scrolled_begin(struct nk_context*, struct nk_scroll *off, const char *title, nk_flags);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __off__ | Both x- and y- scroll offset. Allows for manual scrollbar control
+/// __title__ | Window unique group title used to both identify and display in the group header
+/// __flags__ | Window flags from nk_panel_flags section
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_group_scrolled_begin(struct nk_context*, struct nk_scroll *off, const char *title, nk_flags);
+/*/// #### nk_group_scrolled_end
+/// Ends a widget group after calling nk_group_scrolled_offset_begin or nk_group_scrolled_begin.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_group_scrolled_end(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+*/
+NK_API void nk_group_scrolled_end(struct nk_context*);
+/*/// #### nk_group_get_scroll
+/// Gets the scroll position of the given group.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_group_get_scroll(struct nk_context*, const char *id, nk_uint *x_offset, nk_uint *y_offset);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// -------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __id__ | The id of the group to get the scroll position of
+/// __x_offset__ | A pointer to the x offset output (or NULL to ignore)
+/// __y_offset__ | A pointer to the y offset output (or NULL to ignore)
+*/
+NK_API void nk_group_get_scroll(struct nk_context*, const char *id, nk_uint *x_offset, nk_uint *y_offset);
+/*/// #### nk_group_set_scroll
+/// Sets the scroll position of the given group.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_group_set_scroll(struct nk_context*, const char *id, nk_uint x_offset, nk_uint y_offset);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// -------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __id__ | The id of the group to scroll
+/// __x_offset__ | The x offset to scroll to
+/// __y_offset__ | The y offset to scroll to
+*/
+NK_API void nk_group_set_scroll(struct nk_context*, const char *id, nk_uint x_offset, nk_uint y_offset);
+/* =============================================================================
+ *
+ * TREE
+ *
+ * =============================================================================
+/// ### Tree
+/// Trees represent two different concept. First the concept of a collapsable
+/// UI section that can be either in a hidden or visibile state. They allow the UI
+/// user to selectively minimize the current set of visible UI to comprehend.
+/// The second concept are tree widgets for visual UI representation of trees.<br /><br />
+///
+/// Trees thereby can be nested for tree representations and multiple nested
+/// collapsable UI sections. All trees are started by calling of the
+/// `nk_tree_xxx_push_tree` functions and ended by calling one of the
+/// `nk_tree_xxx_pop_xxx()` functions. Each starting functions takes a title label
+/// and optionally an image to be displayed and the initial collapse state from
+/// the nk_collapse_states section.<br /><br />
+///
+/// The runtime state of the tree is either stored outside the library by the caller
+/// or inside which requires a unique ID. The unique ID can either be generated
+/// automatically from `__FILE__` and `__LINE__` with function `nk_tree_push`,
+/// by `__FILE__` and a user provided ID generated for example by loop index with
+/// function `nk_tree_push_id` or completely provided from outside by user with
+/// function `nk_tree_push_hashed`.
+///
+/// #### Usage
+/// To create a tree you have to call one of the seven `nk_tree_xxx_push_xxx`
+/// functions to start a collapsable UI section and `nk_tree_xxx_pop` to mark the
+/// end.
+/// Each starting function will either return `false(0)` if the tree is collapsed
+/// or hidden and therefore does not need to be filled with content or `true(1)`
+/// if visible and required to be filled.
+///
+/// !!! Note
+/// The tree header does not require and layouting function and instead
+/// calculates a auto height based on the currently used font size
+///
+/// The tree ending functions only need to be called if the tree content is
+/// actually visible. So make sure the tree push function is guarded by `if`
+/// and the pop call is only taken if the tree is visible.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// if (nk_tree_push(ctx, NK_TREE_TAB, "Tree", NK_MINIMIZED)) {
+/// nk_layout_row_dynamic(...);
+/// nk_widget(...);
+/// nk_tree_pop(ctx);
+/// }
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// ----------------------------|-------------------------------------------
+/// nk_tree_push | Start a collapsable UI section with internal state management
+/// nk_tree_push_id | Start a collapsable UI section with internal state management callable in a look
+/// nk_tree_push_hashed | Start a collapsable UI section with internal state management with full control over internal unique ID use to store state
+/// nk_tree_image_push | Start a collapsable UI section with image and label header
+/// nk_tree_image_push_id | Start a collapsable UI section with image and label header and internal state management callable in a look
+/// nk_tree_image_push_hashed | Start a collapsable UI section with image and label header and internal state management with full control over internal unique ID use to store state
+/// nk_tree_pop | Ends a collapsable UI section
+//
+/// nk_tree_state_push | Start a collapsable UI section with external state management
+/// nk_tree_state_image_push | Start a collapsable UI section with image and label header and external state management
+/// nk_tree_state_pop | Ends a collapsabale UI section
+///
+/// #### nk_tree_type
+/// Flag | Description
+/// ----------------|----------------------------------------
+/// NK_TREE_NODE | Highlighted tree header to mark a collapsable UI section
+/// NK_TREE_TAB | Non-highighted tree header closer to tree representations
+*/
+/*/// #### nk_tree_push
+/// Starts a collapsable UI section with internal state management
+/// !!! WARNING
+/// To keep track of the runtime tree collapsable state this function uses
+/// defines `__FILE__` and `__LINE__` to generate a unique ID. If you want
+/// to call this function in a loop please use `nk_tree_push_id` or
+/// `nk_tree_push_hashed` instead.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_tree_push(ctx, type, title, state)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+#define nk_tree_push(ctx, type, title, state) nk_tree_push_hashed(ctx, type, title, state, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),__LINE__)
+/*/// #### nk_tree_push_id
+/// Starts a collapsable UI section with internal state management callable in a look
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_tree_push_id(ctx, type, title, state, id)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+/// __id__ | Loop counter index if this function is called in a loop
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+#define nk_tree_push_id(ctx, type, title, state, id) nk_tree_push_hashed(ctx, type, title, state, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),id)
+/*/// #### nk_tree_push_hashed
+/// Start a collapsable UI section with internal state management with full
+/// control over internal unique ID used to store state
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_tree_push_hashed(struct nk_context*, enum nk_tree_type, const char *title, enum nk_collapse_states initial_state, const char *hash, int len,int seed);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+/// __hash__ | Memory block or string to generate the ID from
+/// __len__ | Size of passed memory block or string in __hash__
+/// __seed__ | Seeding value if this function is called in a loop or default to `0`
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_tree_push_hashed(struct nk_context*, enum nk_tree_type, const char *title, enum nk_collapse_states initial_state, const char *hash, int len,int seed);
+/*/// #### nk_tree_image_push
+/// Start a collapsable UI section with image and label header
+/// !!! WARNING
+/// To keep track of the runtime tree collapsable state this function uses
+/// defines `__FILE__` and `__LINE__` to generate a unique ID. If you want
+/// to call this function in a loop please use `nk_tree_image_push_id` or
+/// `nk_tree_image_push_hashed` instead.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_tree_image_push(ctx, type, img, title, state)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __img__ | Image to display inside the header on the left of the label
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+#define nk_tree_image_push(ctx, type, img, title, state) nk_tree_image_push_hashed(ctx, type, img, title, state, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),__LINE__)
+/*/// #### nk_tree_image_push_id
+/// Start a collapsable UI section with image and label header and internal state
+/// management callable in a look
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// #define nk_tree_image_push_id(ctx, type, img, title, state, id)
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __img__ | Image to display inside the header on the left of the label
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+/// __id__ | Loop counter index if this function is called in a loop
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+#define nk_tree_image_push_id(ctx, type, img, title, state, id) nk_tree_image_push_hashed(ctx, type, img, title, state, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),id)
+/*/// #### nk_tree_image_push_hashed
+/// Start a collapsable UI section with internal state management with full
+/// control over internal unique ID used to store state
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_tree_image_push_hashed(struct nk_context*, enum nk_tree_type, struct nk_image, const char *title, enum nk_collapse_states initial_state, const char *hash, int len,int seed);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __img__ | Image to display inside the header on the left of the label
+/// __title__ | Label printed in the tree header
+/// __state__ | Initial tree state value out of nk_collapse_states
+/// __hash__ | Memory block or string to generate the ID from
+/// __len__ | Size of passed memory block or string in __hash__
+/// __seed__ | Seeding value if this function is called in a loop or default to `0`
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_tree_image_push_hashed(struct nk_context*, enum nk_tree_type, struct nk_image, const char *title, enum nk_collapse_states initial_state, const char *hash, int len,int seed);
+/*/// #### nk_tree_pop
+/// Ends a collapsabale UI section
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_tree_pop(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling `nk_tree_xxx_push_xxx`
+*/
+NK_API void nk_tree_pop(struct nk_context*);
+/*/// #### nk_tree_state_push
+/// Start a collapsable UI section with external state management
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_tree_state_push(struct nk_context*, enum nk_tree_type, const char *title, enum nk_collapse_states *state);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling `nk_tree_xxx_push_xxx`
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __title__ | Label printed in the tree header
+/// __state__ | Persistent state to update
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_tree_state_push(struct nk_context*, enum nk_tree_type, const char *title, enum nk_collapse_states *state);
+/*/// #### nk_tree_state_image_push
+/// Start a collapsable UI section with image and label header and external state management
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_tree_state_image_push(struct nk_context*, enum nk_tree_type, struct nk_image, const char *title, enum nk_collapse_states *state);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling `nk_tree_xxx_push_xxx`
+/// __img__ | Image to display inside the header on the left of the label
+/// __type__ | Value from the nk_tree_type section to visually mark a tree node header as either a collapseable UI section or tree node
+/// __title__ | Label printed in the tree header
+/// __state__ | Persistent state to update
+///
+/// Returns `true(1)` if visible and fillable with widgets or `false(0)` otherwise
+*/
+NK_API int nk_tree_state_image_push(struct nk_context*, enum nk_tree_type, struct nk_image, const char *title, enum nk_collapse_states *state);
+/*/// #### nk_tree_state_pop
+/// Ends a collapsabale UI section
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_tree_state_pop(struct nk_context*);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// ------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling `nk_tree_xxx_push_xxx`
+*/
+NK_API void nk_tree_state_pop(struct nk_context*);
+
+#define nk_tree_element_push(ctx, type, title, state, sel) nk_tree_element_push_hashed(ctx, type, title, state, sel, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),__LINE__)
+#define nk_tree_element_push_id(ctx, type, title, state, sel, id) nk_tree_element_push_hashed(ctx, type, title, state, sel, NK_FILE_LINE,nk_strlen(NK_FILE_LINE),id)
+NK_API int nk_tree_element_push_hashed(struct nk_context*, enum nk_tree_type, const char *title, enum nk_collapse_states initial_state, int *selected, const char *hash, int len, int seed);
+NK_API int nk_tree_element_image_push_hashed(struct nk_context*, enum nk_tree_type, struct nk_image, const char *title, enum nk_collapse_states initial_state, int *selected, const char *hash, int len,int seed);
+NK_API void nk_tree_element_pop(struct nk_context*);
+
+/* =============================================================================
+ *
+ * LIST VIEW
+ *
+ * ============================================================================= */
+struct nk_list_view {
+/* public: */
+ int begin, end, count;
+/* private: */
+ int total_height;
+ struct nk_context *ctx;
+ nk_uint *scroll_pointer;
+ nk_uint scroll_value;
+};
+NK_API int nk_list_view_begin(struct nk_context*, struct nk_list_view *out, const char *id, nk_flags, int row_height, int row_count);
+NK_API void nk_list_view_end(struct nk_list_view*);
+/* =============================================================================
+ *
+ * WIDGET
+ *
+ * ============================================================================= */
+enum nk_widget_layout_states {
+ NK_WIDGET_INVALID, /* The widget cannot be seen and is completely out of view */
+ NK_WIDGET_VALID, /* The widget is completely inside the window and can be updated and drawn */
+ NK_WIDGET_ROM /* The widget is partially visible and cannot be updated */
+};
+enum nk_widget_states {
+ NK_WIDGET_STATE_MODIFIED = NK_FLAG(1),
+ NK_WIDGET_STATE_INACTIVE = NK_FLAG(2), /* widget is neither active nor hovered */
+ NK_WIDGET_STATE_ENTERED = NK_FLAG(3), /* widget has been hovered on the current frame */
+ NK_WIDGET_STATE_HOVER = NK_FLAG(4), /* widget is being hovered */
+ NK_WIDGET_STATE_ACTIVED = NK_FLAG(5),/* widget is currently activated */
+ NK_WIDGET_STATE_LEFT = NK_FLAG(6), /* widget is from this frame on not hovered anymore */
+ NK_WIDGET_STATE_HOVERED = NK_WIDGET_STATE_HOVER|NK_WIDGET_STATE_MODIFIED, /* widget is being hovered */
+ NK_WIDGET_STATE_ACTIVE = NK_WIDGET_STATE_ACTIVED|NK_WIDGET_STATE_MODIFIED /* widget is currently activated */
+};
+NK_API enum nk_widget_layout_states nk_widget(struct nk_rect*, const struct nk_context*);
+NK_API enum nk_widget_layout_states nk_widget_fitting(struct nk_rect*, struct nk_context*, struct nk_vec2);
+NK_API struct nk_rect nk_widget_bounds(struct nk_context*);
+NK_API struct nk_vec2 nk_widget_position(struct nk_context*);
+NK_API struct nk_vec2 nk_widget_size(struct nk_context*);
+NK_API float nk_widget_width(struct nk_context*);
+NK_API float nk_widget_height(struct nk_context*);
+NK_API int nk_widget_is_hovered(struct nk_context*);
+NK_API int nk_widget_is_mouse_clicked(struct nk_context*, enum nk_buttons);
+NK_API int nk_widget_has_mouse_click_down(struct nk_context*, enum nk_buttons, int down);
+NK_API void nk_spacing(struct nk_context*, int cols);
+/* =============================================================================
+ *
+ * TEXT
+ *
+ * ============================================================================= */
+enum nk_text_align {
+ NK_TEXT_ALIGN_LEFT = 0x01,
+ NK_TEXT_ALIGN_CENTERED = 0x02,
+ NK_TEXT_ALIGN_RIGHT = 0x04,
+ NK_TEXT_ALIGN_TOP = 0x08,
+ NK_TEXT_ALIGN_MIDDLE = 0x10,
+ NK_TEXT_ALIGN_BOTTOM = 0x20
+};
+enum nk_text_alignment {
+ NK_TEXT_LEFT = NK_TEXT_ALIGN_MIDDLE|NK_TEXT_ALIGN_LEFT,
+ NK_TEXT_CENTERED = NK_TEXT_ALIGN_MIDDLE|NK_TEXT_ALIGN_CENTERED,
+ NK_TEXT_RIGHT = NK_TEXT_ALIGN_MIDDLE|NK_TEXT_ALIGN_RIGHT
+};
+NK_API void nk_text(struct nk_context*, const char*, int, nk_flags);
+NK_API void nk_text_colored(struct nk_context*, const char*, int, nk_flags, struct nk_color);
+NK_API void nk_text_wrap(struct nk_context*, const char*, int);
+NK_API void nk_text_wrap_colored(struct nk_context*, const char*, int, struct nk_color);
+NK_API void nk_label(struct nk_context*, const char*, nk_flags align);
+NK_API void nk_label_colored(struct nk_context*, const char*, nk_flags align, struct nk_color);
+NK_API void nk_label_wrap(struct nk_context*, const char*);
+NK_API void nk_label_colored_wrap(struct nk_context*, const char*, struct nk_color);
+NK_API void nk_image(struct nk_context*, struct nk_image);
+NK_API void nk_image_color(struct nk_context*, struct nk_image, struct nk_color);
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+NK_API void nk_labelf(struct nk_context*, nk_flags, NK_PRINTF_FORMAT_STRING const char*, ...) NK_PRINTF_VARARG_FUNC(3);
+NK_API void nk_labelf_colored(struct nk_context*, nk_flags, struct nk_color, NK_PRINTF_FORMAT_STRING const char*,...) NK_PRINTF_VARARG_FUNC(4);
+NK_API void nk_labelf_wrap(struct nk_context*, NK_PRINTF_FORMAT_STRING const char*,...) NK_PRINTF_VARARG_FUNC(2);
+NK_API void nk_labelf_colored_wrap(struct nk_context*, struct nk_color, NK_PRINTF_FORMAT_STRING const char*,...) NK_PRINTF_VARARG_FUNC(3);
+NK_API void nk_labelfv(struct nk_context*, nk_flags, NK_PRINTF_FORMAT_STRING const char*, va_list) NK_PRINTF_VALIST_FUNC(3);
+NK_API void nk_labelfv_colored(struct nk_context*, nk_flags, struct nk_color, NK_PRINTF_FORMAT_STRING const char*, va_list) NK_PRINTF_VALIST_FUNC(4);
+NK_API void nk_labelfv_wrap(struct nk_context*, NK_PRINTF_FORMAT_STRING const char*, va_list) NK_PRINTF_VALIST_FUNC(2);
+NK_API void nk_labelfv_colored_wrap(struct nk_context*, struct nk_color, NK_PRINTF_FORMAT_STRING const char*, va_list) NK_PRINTF_VALIST_FUNC(3);
+NK_API void nk_value_bool(struct nk_context*, const char *prefix, int);
+NK_API void nk_value_int(struct nk_context*, const char *prefix, int);
+NK_API void nk_value_uint(struct nk_context*, const char *prefix, unsigned int);
+NK_API void nk_value_float(struct nk_context*, const char *prefix, float);
+NK_API void nk_value_color_byte(struct nk_context*, const char *prefix, struct nk_color);
+NK_API void nk_value_color_float(struct nk_context*, const char *prefix, struct nk_color);
+NK_API void nk_value_color_hex(struct nk_context*, const char *prefix, struct nk_color);
+#endif
+/* =============================================================================
+ *
+ * BUTTON
+ *
+ * ============================================================================= */
+NK_API int nk_button_text(struct nk_context*, const char *title, int len);
+NK_API int nk_button_label(struct nk_context*, const char *title);
+NK_API int nk_button_color(struct nk_context*, struct nk_color);
+NK_API int nk_button_symbol(struct nk_context*, enum nk_symbol_type);
+NK_API int nk_button_image(struct nk_context*, struct nk_image img);
+NK_API int nk_button_symbol_label(struct nk_context*, enum nk_symbol_type, const char*, nk_flags text_alignment);
+NK_API int nk_button_symbol_text(struct nk_context*, enum nk_symbol_type, const char*, int, nk_flags alignment);
+NK_API int nk_button_image_label(struct nk_context*, struct nk_image img, const char*, nk_flags text_alignment);
+NK_API int nk_button_image_text(struct nk_context*, struct nk_image img, const char*, int, nk_flags alignment);
+NK_API int nk_button_text_styled(struct nk_context*, const struct nk_style_button*, const char *title, int len);
+NK_API int nk_button_label_styled(struct nk_context*, const struct nk_style_button*, const char *title);
+NK_API int nk_button_symbol_styled(struct nk_context*, const struct nk_style_button*, enum nk_symbol_type);
+NK_API int nk_button_image_styled(struct nk_context*, const struct nk_style_button*, struct nk_image img);
+NK_API int nk_button_symbol_text_styled(struct nk_context*,const struct nk_style_button*, enum nk_symbol_type, const char*, int, nk_flags alignment);
+NK_API int nk_button_symbol_label_styled(struct nk_context *ctx, const struct nk_style_button *style, enum nk_symbol_type symbol, const char *title, nk_flags align);
+NK_API int nk_button_image_label_styled(struct nk_context*,const struct nk_style_button*, struct nk_image img, const char*, nk_flags text_alignment);
+NK_API int nk_button_image_text_styled(struct nk_context*,const struct nk_style_button*, struct nk_image img, const char*, int, nk_flags alignment);
+NK_API void nk_button_set_behavior(struct nk_context*, enum nk_button_behavior);
+NK_API int nk_button_push_behavior(struct nk_context*, enum nk_button_behavior);
+NK_API int nk_button_pop_behavior(struct nk_context*);
+/* =============================================================================
+ *
+ * CHECKBOX
+ *
+ * ============================================================================= */
+NK_API int nk_check_label(struct nk_context*, const char*, int active);
+NK_API int nk_check_text(struct nk_context*, const char*, int,int active);
+NK_API unsigned nk_check_flags_label(struct nk_context*, const char*, unsigned int flags, unsigned int value);
+NK_API unsigned nk_check_flags_text(struct nk_context*, const char*, int, unsigned int flags, unsigned int value);
+NK_API int nk_checkbox_label(struct nk_context*, const char*, int *active);
+NK_API int nk_checkbox_text(struct nk_context*, const char*, int, int *active);
+NK_API int nk_checkbox_flags_label(struct nk_context*, const char*, unsigned int *flags, unsigned int value);
+NK_API int nk_checkbox_flags_text(struct nk_context*, const char*, int, unsigned int *flags, unsigned int value);
+/* =============================================================================
+ *
+ * RADIO BUTTON
+ *
+ * ============================================================================= */
+NK_API int nk_radio_label(struct nk_context*, const char*, int *active);
+NK_API int nk_radio_text(struct nk_context*, const char*, int, int *active);
+NK_API int nk_option_label(struct nk_context*, const char*, int active);
+NK_API int nk_option_text(struct nk_context*, const char*, int, int active);
+/* =============================================================================
+ *
+ * SELECTABLE
+ *
+ * ============================================================================= */
+NK_API int nk_selectable_label(struct nk_context*, const char*, nk_flags align, int *value);
+NK_API int nk_selectable_text(struct nk_context*, const char*, int, nk_flags align, int *value);
+NK_API int nk_selectable_image_label(struct nk_context*,struct nk_image, const char*, nk_flags align, int *value);
+NK_API int nk_selectable_image_text(struct nk_context*,struct nk_image, const char*, int, nk_flags align, int *value);
+NK_API int nk_selectable_symbol_label(struct nk_context*,enum nk_symbol_type, const char*, nk_flags align, int *value);
+NK_API int nk_selectable_symbol_text(struct nk_context*,enum nk_symbol_type, const char*, int, nk_flags align, int *value);
+
+NK_API int nk_select_label(struct nk_context*, const char*, nk_flags align, int value);
+NK_API int nk_select_text(struct nk_context*, const char*, int, nk_flags align, int value);
+NK_API int nk_select_image_label(struct nk_context*, struct nk_image,const char*, nk_flags align, int value);
+NK_API int nk_select_image_text(struct nk_context*, struct nk_image,const char*, int, nk_flags align, int value);
+NK_API int nk_select_symbol_label(struct nk_context*,enum nk_symbol_type, const char*, nk_flags align, int value);
+NK_API int nk_select_symbol_text(struct nk_context*,enum nk_symbol_type, const char*, int, nk_flags align, int value);
+
+/* =============================================================================
+ *
+ * SLIDER
+ *
+ * ============================================================================= */
+NK_API float nk_slide_float(struct nk_context*, float min, float val, float max, float step);
+NK_API int nk_slide_int(struct nk_context*, int min, int val, int max, int step);
+NK_API int nk_slider_float(struct nk_context*, float min, float *val, float max, float step);
+NK_API int nk_slider_int(struct nk_context*, int min, int *val, int max, int step);
+/* =============================================================================
+ *
+ * PROGRESSBAR
+ *
+ * ============================================================================= */
+NK_API int nk_progress(struct nk_context*, nk_size *cur, nk_size max, int modifyable);
+NK_API nk_size nk_prog(struct nk_context*, nk_size cur, nk_size max, int modifyable);
+
+/* =============================================================================
+ *
+ * COLOR PICKER
+ *
+ * ============================================================================= */
+NK_API struct nk_colorf nk_color_picker(struct nk_context*, struct nk_colorf, enum nk_color_format);
+NK_API int nk_color_pick(struct nk_context*, struct nk_colorf*, enum nk_color_format);
+/* =============================================================================
+ *
+ * PROPERTIES
+ *
+ * =============================================================================
+/// ### Properties
+/// Properties are the main value modification widgets in Nuklear. Changing a value
+/// can be achieved by dragging, adding/removing incremental steps on button click
+/// or by directly typing a number.
+///
+/// #### Usage
+/// Each property requires a unique name for identifaction that is also used for
+/// displaying a label. If you want to use the same name multiple times make sure
+/// add a '#' before your name. The '#' will not be shown but will generate a
+/// unique ID. Each propery also takes in a minimum and maximum value. If you want
+/// to make use of the complete number range of a type just use the provided
+/// type limits from `limits.h`. For example `INT_MIN` and `INT_MAX` for
+/// `nk_property_int` and `nk_propertyi`. In additional each property takes in
+/// a increment value that will be added or subtracted if either the increment
+/// decrement button is clicked. Finally there is a value for increment per pixel
+/// dragged that is added or subtracted from the value.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int value = 0;
+/// struct nk_context ctx;
+/// nk_init_xxx(&ctx, ...);
+/// while (1) {
+/// // Input
+/// Event evt;
+/// nk_input_begin(&ctx);
+/// while (GetEvent(&evt)) {
+/// if (evt.type == MOUSE_MOVE)
+/// nk_input_motion(&ctx, evt.motion.x, evt.motion.y);
+/// else if (evt.type == [...]) {
+/// nk_input_xxx(...);
+/// }
+/// }
+/// nk_input_end(&ctx);
+/// //
+/// // Window
+/// if (nk_begin_xxx(...) {
+/// // Property
+/// nk_layout_row_dynamic(...);
+/// nk_property_int(ctx, "ID", INT_MIN, &value, INT_MAX, 1, 1);
+/// }
+/// nk_end(ctx);
+/// //
+/// // Draw
+/// const struct nk_command *cmd = 0;
+/// nk_foreach(cmd, &ctx) {
+/// switch (cmd->type) {
+/// case NK_COMMAND_LINE:
+/// your_draw_line_function(...)
+/// break;
+/// case NK_COMMAND_RECT
+/// your_draw_rect_function(...)
+/// break;
+/// case ...:
+/// // [...]
+/// }
+/// nk_clear(&ctx);
+/// }
+/// nk_free(&ctx);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// #### Reference
+/// Function | Description
+/// --------------------|-------------------------------------------
+/// nk_property_int | Integer property directly modifing a passed in value
+/// nk_property_float | Float property directly modifing a passed in value
+/// nk_property_double | Double property directly modifing a passed in value
+/// nk_propertyi | Integer property returning the modified int value
+/// nk_propertyf | Float property returning the modified float value
+/// nk_propertyd | Double property returning the modified double value
+///
+*/
+/*/// #### nk_property_int
+/// Integer property directly modifing a passed in value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_property_int(struct nk_context *ctx, const char *name, int min, int *val, int max, int step, float inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Integer pointer to be modified
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+*/
+NK_API void nk_property_int(struct nk_context*, const char *name, int min, int *val, int max, int step, float inc_per_pixel);
+/*/// #### nk_property_float
+/// Float property directly modifing a passed in value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_property_float(struct nk_context *ctx, const char *name, float min, float *val, float max, float step, float inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Float pointer to be modified
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+*/
+NK_API void nk_property_float(struct nk_context*, const char *name, float min, float *val, float max, float step, float inc_per_pixel);
+/*/// #### nk_property_double
+/// Double property directly modifing a passed in value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// void nk_property_double(struct nk_context *ctx, const char *name, double min, double *val, double max, double step, double inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Double pointer to be modified
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+*/
+NK_API void nk_property_double(struct nk_context*, const char *name, double min, double *val, double max, double step, float inc_per_pixel);
+/*/// #### nk_propertyi
+/// Integer property modifing a passed in value and returning the new value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// int nk_propertyi(struct nk_context *ctx, const char *name, int min, int val, int max, int step, float inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Current integer value to be modified and returned
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+///
+/// Returns the new modified integer value
+*/
+NK_API int nk_propertyi(struct nk_context*, const char *name, int min, int val, int max, int step, float inc_per_pixel);
+/*/// #### nk_propertyf
+/// Float property modifing a passed in value and returning the new value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// float nk_propertyf(struct nk_context *ctx, const char *name, float min, float val, float max, float step, float inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Current float value to be modified and returned
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+///
+/// Returns the new modified float value
+*/
+NK_API float nk_propertyf(struct nk_context*, const char *name, float min, float val, float max, float step, float inc_per_pixel);
+/*/// #### nk_propertyd
+/// Float property modifing a passed in value and returning the new value
+/// !!! WARNING
+/// To generate a unique property ID using the same label make sure to insert
+/// a `#` at the beginning. It will not be shown but guarantees correct behavior.
+///
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~c
+/// float nk_propertyd(struct nk_context *ctx, const char *name, double min, double val, double max, double step, double inc_per_pixel);
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///
+/// Parameter | Description
+/// --------------------|-----------------------------------------------------------
+/// __ctx__ | Must point to an previously initialized `nk_context` struct after calling a layouting function
+/// __name__ | String used both as a label as well as a unique identifier
+/// __min__ | Minimum value not allowed to be underflown
+/// __val__ | Current double value to be modified and returned
+/// __max__ | Maximum value not allowed to be overflown
+/// __step__ | Increment added and subtracted on increment and decrement button
+/// __inc_per_pixel__ | Value per pixel added or subtracted on dragging
+///
+/// Returns the new modified double value
+*/
+NK_API double nk_propertyd(struct nk_context*, const char *name, double min, double val, double max, double step, float inc_per_pixel);
+/* =============================================================================
+ *
+ * TEXT EDIT
+ *
+ * ============================================================================= */
+enum nk_edit_flags {
+ NK_EDIT_DEFAULT = 0,
+ NK_EDIT_READ_ONLY = NK_FLAG(0),
+ NK_EDIT_AUTO_SELECT = NK_FLAG(1),
+ NK_EDIT_SIG_ENTER = NK_FLAG(2),
+ NK_EDIT_ALLOW_TAB = NK_FLAG(3),
+ NK_EDIT_NO_CURSOR = NK_FLAG(4),
+ NK_EDIT_SELECTABLE = NK_FLAG(5),
+ NK_EDIT_CLIPBOARD = NK_FLAG(6),
+ NK_EDIT_CTRL_ENTER_NEWLINE = NK_FLAG(7),
+ NK_EDIT_NO_HORIZONTAL_SCROLL = NK_FLAG(8),
+ NK_EDIT_ALWAYS_INSERT_MODE = NK_FLAG(9),
+ NK_EDIT_MULTILINE = NK_FLAG(10),
+ NK_EDIT_GOTO_END_ON_ACTIVATE = NK_FLAG(11)
+};
+enum nk_edit_types {
+ NK_EDIT_SIMPLE = NK_EDIT_ALWAYS_INSERT_MODE,
+ NK_EDIT_FIELD = NK_EDIT_SIMPLE|NK_EDIT_SELECTABLE|NK_EDIT_CLIPBOARD,
+ NK_EDIT_BOX = NK_EDIT_ALWAYS_INSERT_MODE| NK_EDIT_SELECTABLE| NK_EDIT_MULTILINE|NK_EDIT_ALLOW_TAB|NK_EDIT_CLIPBOARD,
+ NK_EDIT_EDITOR = NK_EDIT_SELECTABLE|NK_EDIT_MULTILINE|NK_EDIT_ALLOW_TAB| NK_EDIT_CLIPBOARD
+};
+enum nk_edit_events {
+ NK_EDIT_ACTIVE = NK_FLAG(0), /* edit widget is currently being modified */
+ NK_EDIT_INACTIVE = NK_FLAG(1), /* edit widget is not active and is not being modified */
+ NK_EDIT_ACTIVATED = NK_FLAG(2), /* edit widget went from state inactive to state active */
+ NK_EDIT_DEACTIVATED = NK_FLAG(3), /* edit widget went from state active to state inactive */
+ NK_EDIT_COMMITED = NK_FLAG(4) /* edit widget has received an enter and lost focus */
+};
+NK_API nk_flags nk_edit_string(struct nk_context*, nk_flags, char *buffer, int *len, int max, nk_plugin_filter);
+NK_API nk_flags nk_edit_string_zero_terminated(struct nk_context*, nk_flags, char *buffer, int max, nk_plugin_filter);
+NK_API nk_flags nk_edit_buffer(struct nk_context*, nk_flags, struct nk_text_edit*, nk_plugin_filter);
+NK_API void nk_edit_focus(struct nk_context*, nk_flags flags);
+NK_API void nk_edit_unfocus(struct nk_context*);
+/* =============================================================================
+ *
+ * CHART
+ *
+ * ============================================================================= */
+NK_API int nk_chart_begin(struct nk_context*, enum nk_chart_type, int num, float min, float max);
+NK_API int nk_chart_begin_colored(struct nk_context*, enum nk_chart_type, struct nk_color, struct nk_color active, int num, float min, float max);
+NK_API void nk_chart_add_slot(struct nk_context *ctx, const enum nk_chart_type, int count, float min_value, float max_value);
+NK_API void nk_chart_add_slot_colored(struct nk_context *ctx, const enum nk_chart_type, struct nk_color, struct nk_color active, int count, float min_value, float max_value);
+NK_API nk_flags nk_chart_push(struct nk_context*, float);
+NK_API nk_flags nk_chart_push_slot(struct nk_context*, float, int);
+NK_API void nk_chart_end(struct nk_context*);
+NK_API void nk_plot(struct nk_context*, enum nk_chart_type, const float *values, int count, int offset);
+NK_API void nk_plot_function(struct nk_context*, enum nk_chart_type, void *userdata, float(*value_getter)(void* user, int index), int count, int offset);
+/* =============================================================================
+ *
+ * POPUP
+ *
+ * ============================================================================= */
+NK_API int nk_popup_begin(struct nk_context*, enum nk_popup_type, const char*, nk_flags, struct nk_rect bounds);
+NK_API void nk_popup_close(struct nk_context*);
+NK_API void nk_popup_end(struct nk_context*);
+NK_API void nk_popup_get_scroll(struct nk_context*, nk_uint *offset_x, nk_uint *offset_y);
+NK_API void nk_popup_set_scroll(struct nk_context*, nk_uint offset_x, nk_uint offset_y);
+/* =============================================================================
+ *
+ * COMBOBOX
+ *
+ * ============================================================================= */
+NK_API int nk_combo(struct nk_context*, const char **items, int count, int selected, int item_height, struct nk_vec2 size);
+NK_API int nk_combo_separator(struct nk_context*, const char *items_separated_by_separator, int separator, int selected, int count, int item_height, struct nk_vec2 size);
+NK_API int nk_combo_string(struct nk_context*, const char *items_separated_by_zeros, int selected, int count, int item_height, struct nk_vec2 size);
+NK_API int nk_combo_callback(struct nk_context*, void(*item_getter)(void*, int, const char**), void *userdata, int selected, int count, int item_height, struct nk_vec2 size);
+NK_API void nk_combobox(struct nk_context*, const char **items, int count, int *selected, int item_height, struct nk_vec2 size);
+NK_API void nk_combobox_string(struct nk_context*, const char *items_separated_by_zeros, int *selected, int count, int item_height, struct nk_vec2 size);
+NK_API void nk_combobox_separator(struct nk_context*, const char *items_separated_by_separator, int separator,int *selected, int count, int item_height, struct nk_vec2 size);
+NK_API void nk_combobox_callback(struct nk_context*, void(*item_getter)(void*, int, const char**), void*, int *selected, int count, int item_height, struct nk_vec2 size);
+/* =============================================================================
+ *
+ * ABSTRACT COMBOBOX
+ *
+ * ============================================================================= */
+NK_API int nk_combo_begin_text(struct nk_context*, const char *selected, int, struct nk_vec2 size);
+NK_API int nk_combo_begin_label(struct nk_context*, const char *selected, struct nk_vec2 size);
+NK_API int nk_combo_begin_color(struct nk_context*, struct nk_color color, struct nk_vec2 size);
+NK_API int nk_combo_begin_symbol(struct nk_context*, enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_combo_begin_symbol_label(struct nk_context*, const char *selected, enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_combo_begin_symbol_text(struct nk_context*, const char *selected, int, enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_combo_begin_image(struct nk_context*, struct nk_image img, struct nk_vec2 size);
+NK_API int nk_combo_begin_image_label(struct nk_context*, const char *selected, struct nk_image, struct nk_vec2 size);
+NK_API int nk_combo_begin_image_text(struct nk_context*, const char *selected, int, struct nk_image, struct nk_vec2 size);
+NK_API int nk_combo_item_label(struct nk_context*, const char*, nk_flags alignment);
+NK_API int nk_combo_item_text(struct nk_context*, const char*,int, nk_flags alignment);
+NK_API int nk_combo_item_image_label(struct nk_context*, struct nk_image, const char*, nk_flags alignment);
+NK_API int nk_combo_item_image_text(struct nk_context*, struct nk_image, const char*, int,nk_flags alignment);
+NK_API int nk_combo_item_symbol_label(struct nk_context*, enum nk_symbol_type, const char*, nk_flags alignment);
+NK_API int nk_combo_item_symbol_text(struct nk_context*, enum nk_symbol_type, const char*, int, nk_flags alignment);
+NK_API void nk_combo_close(struct nk_context*);
+NK_API void nk_combo_end(struct nk_context*);
+/* =============================================================================
+ *
+ * CONTEXTUAL
+ *
+ * ============================================================================= */
+NK_API int nk_contextual_begin(struct nk_context*, nk_flags, struct nk_vec2, struct nk_rect trigger_bounds);
+NK_API int nk_contextual_item_text(struct nk_context*, const char*, int,nk_flags align);
+NK_API int nk_contextual_item_label(struct nk_context*, const char*, nk_flags align);
+NK_API int nk_contextual_item_image_label(struct nk_context*, struct nk_image, const char*, nk_flags alignment);
+NK_API int nk_contextual_item_image_text(struct nk_context*, struct nk_image, const char*, int len, nk_flags alignment);
+NK_API int nk_contextual_item_symbol_label(struct nk_context*, enum nk_symbol_type, const char*, nk_flags alignment);
+NK_API int nk_contextual_item_symbol_text(struct nk_context*, enum nk_symbol_type, const char*, int, nk_flags alignment);
+NK_API void nk_contextual_close(struct nk_context*);
+NK_API void nk_contextual_end(struct nk_context*);
+/* =============================================================================
+ *
+ * TOOLTIP
+ *
+ * ============================================================================= */
+NK_API void nk_tooltip(struct nk_context*, const char*);
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+NK_API void nk_tooltipf(struct nk_context*, NK_PRINTF_FORMAT_STRING const char*, ...) NK_PRINTF_VARARG_FUNC(2);
+NK_API void nk_tooltipfv(struct nk_context*, NK_PRINTF_FORMAT_STRING const char*, va_list) NK_PRINTF_VALIST_FUNC(2);
+#endif
+NK_API int nk_tooltip_begin(struct nk_context*, float width);
+NK_API void nk_tooltip_end(struct nk_context*);
+/* =============================================================================
+ *
+ * MENU
+ *
+ * ============================================================================= */
+NK_API void nk_menubar_begin(struct nk_context*);
+NK_API void nk_menubar_end(struct nk_context*);
+NK_API int nk_menu_begin_text(struct nk_context*, const char* title, int title_len, nk_flags align, struct nk_vec2 size);
+NK_API int nk_menu_begin_label(struct nk_context*, const char*, nk_flags align, struct nk_vec2 size);
+NK_API int nk_menu_begin_image(struct nk_context*, const char*, struct nk_image, struct nk_vec2 size);
+NK_API int nk_menu_begin_image_text(struct nk_context*, const char*, int,nk_flags align,struct nk_image, struct nk_vec2 size);
+NK_API int nk_menu_begin_image_label(struct nk_context*, const char*, nk_flags align,struct nk_image, struct nk_vec2 size);
+NK_API int nk_menu_begin_symbol(struct nk_context*, const char*, enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_menu_begin_symbol_text(struct nk_context*, const char*, int,nk_flags align,enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_menu_begin_symbol_label(struct nk_context*, const char*, nk_flags align,enum nk_symbol_type, struct nk_vec2 size);
+NK_API int nk_menu_item_text(struct nk_context*, const char*, int,nk_flags align);
+NK_API int nk_menu_item_label(struct nk_context*, const char*, nk_flags alignment);
+NK_API int nk_menu_item_image_label(struct nk_context*, struct nk_image, const char*, nk_flags alignment);
+NK_API int nk_menu_item_image_text(struct nk_context*, struct nk_image, const char*, int len, nk_flags alignment);
+NK_API int nk_menu_item_symbol_text(struct nk_context*, enum nk_symbol_type, const char*, int, nk_flags alignment);
+NK_API int nk_menu_item_symbol_label(struct nk_context*, enum nk_symbol_type, const char*, nk_flags alignment);
+NK_API void nk_menu_close(struct nk_context*);
+NK_API void nk_menu_end(struct nk_context*);
+/* =============================================================================
+ *
+ * STYLE
+ *
+ * ============================================================================= */
+enum nk_style_colors {
+ NK_COLOR_TEXT,
+ NK_COLOR_WINDOW,
+ NK_COLOR_HEADER,
+ NK_COLOR_BORDER,
+ NK_COLOR_BUTTON,
+ NK_COLOR_BUTTON_HOVER,
+ NK_COLOR_BUTTON_ACTIVE,
+ NK_COLOR_TOGGLE,
+ NK_COLOR_TOGGLE_HOVER,
+ NK_COLOR_TOGGLE_CURSOR,
+ NK_COLOR_SELECT,
+ NK_COLOR_SELECT_ACTIVE,
+ NK_COLOR_SLIDER,
+ NK_COLOR_SLIDER_CURSOR,
+ NK_COLOR_SLIDER_CURSOR_HOVER,
+ NK_COLOR_SLIDER_CURSOR_ACTIVE,
+ NK_COLOR_PROPERTY,
+ NK_COLOR_EDIT,
+ NK_COLOR_EDIT_CURSOR,
+ NK_COLOR_COMBO,
+ NK_COLOR_CHART,
+ NK_COLOR_CHART_COLOR,
+ NK_COLOR_CHART_COLOR_HIGHLIGHT,
+ NK_COLOR_SCROLLBAR,
+ NK_COLOR_SCROLLBAR_CURSOR,
+ NK_COLOR_SCROLLBAR_CURSOR_HOVER,
+ NK_COLOR_SCROLLBAR_CURSOR_ACTIVE,
+ NK_COLOR_TAB_HEADER,
+ NK_COLOR_COUNT
+};
+enum nk_style_cursor {
+ NK_CURSOR_ARROW,
+ NK_CURSOR_TEXT,
+ NK_CURSOR_MOVE,
+ NK_CURSOR_RESIZE_VERTICAL,
+ NK_CURSOR_RESIZE_HORIZONTAL,
+ NK_CURSOR_RESIZE_TOP_LEFT_DOWN_RIGHT,
+ NK_CURSOR_RESIZE_TOP_RIGHT_DOWN_LEFT,
+ NK_CURSOR_COUNT
+};
+NK_API void nk_style_default(struct nk_context*);
+NK_API void nk_style_from_table(struct nk_context*, const struct nk_color*);
+NK_API void nk_style_load_cursor(struct nk_context*, enum nk_style_cursor, const struct nk_cursor*);
+NK_API void nk_style_load_all_cursors(struct nk_context*, struct nk_cursor*);
+NK_API const char* nk_style_get_color_by_name(enum nk_style_colors);
+NK_API void nk_style_set_font(struct nk_context*, const struct nk_user_font*);
+NK_API int nk_style_set_cursor(struct nk_context*, enum nk_style_cursor);
+NK_API void nk_style_show_cursor(struct nk_context*);
+NK_API void nk_style_hide_cursor(struct nk_context*);
+
+NK_API int nk_style_push_font(struct nk_context*, const struct nk_user_font*);
+NK_API int nk_style_push_float(struct nk_context*, float*, float);
+NK_API int nk_style_push_vec2(struct nk_context*, struct nk_vec2*, struct nk_vec2);
+NK_API int nk_style_push_style_item(struct nk_context*, struct nk_style_item*, struct nk_style_item);
+NK_API int nk_style_push_flags(struct nk_context*, nk_flags*, nk_flags);
+NK_API int nk_style_push_color(struct nk_context*, struct nk_color*, struct nk_color);
+
+NK_API int nk_style_pop_font(struct nk_context*);
+NK_API int nk_style_pop_float(struct nk_context*);
+NK_API int nk_style_pop_vec2(struct nk_context*);
+NK_API int nk_style_pop_style_item(struct nk_context*);
+NK_API int nk_style_pop_flags(struct nk_context*);
+NK_API int nk_style_pop_color(struct nk_context*);
+/* =============================================================================
+ *
+ * COLOR
+ *
+ * ============================================================================= */
+NK_API struct nk_color nk_rgb(int r, int g, int b);
+NK_API struct nk_color nk_rgb_iv(const int *rgb);
+NK_API struct nk_color nk_rgb_bv(const nk_byte* rgb);
+NK_API struct nk_color nk_rgb_f(float r, float g, float b);
+NK_API struct nk_color nk_rgb_fv(const float *rgb);
+NK_API struct nk_color nk_rgb_cf(struct nk_colorf c);
+NK_API struct nk_color nk_rgb_hex(const char *rgb);
+
+NK_API struct nk_color nk_rgba(int r, int g, int b, int a);
+NK_API struct nk_color nk_rgba_u32(nk_uint);
+NK_API struct nk_color nk_rgba_iv(const int *rgba);
+NK_API struct nk_color nk_rgba_bv(const nk_byte *rgba);
+NK_API struct nk_color nk_rgba_f(float r, float g, float b, float a);
+NK_API struct nk_color nk_rgba_fv(const float *rgba);
+NK_API struct nk_color nk_rgba_cf(struct nk_colorf c);
+NK_API struct nk_color nk_rgba_hex(const char *rgb);
+
+NK_API struct nk_colorf nk_hsva_colorf(float h, float s, float v, float a);
+NK_API struct nk_colorf nk_hsva_colorfv(float *c);
+NK_API void nk_colorf_hsva_f(float *out_h, float *out_s, float *out_v, float *out_a, struct nk_colorf in);
+NK_API void nk_colorf_hsva_fv(float *hsva, struct nk_colorf in);
+
+NK_API struct nk_color nk_hsv(int h, int s, int v);
+NK_API struct nk_color nk_hsv_iv(const int *hsv);
+NK_API struct nk_color nk_hsv_bv(const nk_byte *hsv);
+NK_API struct nk_color nk_hsv_f(float h, float s, float v);
+NK_API struct nk_color nk_hsv_fv(const float *hsv);
+
+NK_API struct nk_color nk_hsva(int h, int s, int v, int a);
+NK_API struct nk_color nk_hsva_iv(const int *hsva);
+NK_API struct nk_color nk_hsva_bv(const nk_byte *hsva);
+NK_API struct nk_color nk_hsva_f(float h, float s, float v, float a);
+NK_API struct nk_color nk_hsva_fv(const float *hsva);
+
+/* color (conversion nuklear --> user) */
+NK_API void nk_color_f(float *r, float *g, float *b, float *a, struct nk_color);
+NK_API void nk_color_fv(float *rgba_out, struct nk_color);
+NK_API struct nk_colorf nk_color_cf(struct nk_color);
+NK_API void nk_color_d(double *r, double *g, double *b, double *a, struct nk_color);
+NK_API void nk_color_dv(double *rgba_out, struct nk_color);
+
+NK_API nk_uint nk_color_u32(struct nk_color);
+NK_API void nk_color_hex_rgba(char *output, struct nk_color);
+NK_API void nk_color_hex_rgb(char *output, struct nk_color);
+
+NK_API void nk_color_hsv_i(int *out_h, int *out_s, int *out_v, struct nk_color);
+NK_API void nk_color_hsv_b(nk_byte *out_h, nk_byte *out_s, nk_byte *out_v, struct nk_color);
+NK_API void nk_color_hsv_iv(int *hsv_out, struct nk_color);
+NK_API void nk_color_hsv_bv(nk_byte *hsv_out, struct nk_color);
+NK_API void nk_color_hsv_f(float *out_h, float *out_s, float *out_v, struct nk_color);
+NK_API void nk_color_hsv_fv(float *hsv_out, struct nk_color);
+
+NK_API void nk_color_hsva_i(int *h, int *s, int *v, int *a, struct nk_color);
+NK_API void nk_color_hsva_b(nk_byte *h, nk_byte *s, nk_byte *v, nk_byte *a, struct nk_color);
+NK_API void nk_color_hsva_iv(int *hsva_out, struct nk_color);
+NK_API void nk_color_hsva_bv(nk_byte *hsva_out, struct nk_color);
+NK_API void nk_color_hsva_f(float *out_h, float *out_s, float *out_v, float *out_a, struct nk_color);
+NK_API void nk_color_hsva_fv(float *hsva_out, struct nk_color);
+/* =============================================================================
+ *
+ * IMAGE
+ *
+ * ============================================================================= */
+NK_API nk_handle nk_handle_ptr(void*);
+NK_API nk_handle nk_handle_id(int);
+NK_API struct nk_image nk_image_handle(nk_handle);
+NK_API struct nk_image nk_image_ptr(void*);
+NK_API struct nk_image nk_image_id(int);
+NK_API int nk_image_is_subimage(const struct nk_image* img);
+NK_API struct nk_image nk_subimage_ptr(void*, unsigned short w, unsigned short h, struct nk_rect sub_region);
+NK_API struct nk_image nk_subimage_id(int, unsigned short w, unsigned short h, struct nk_rect sub_region);
+NK_API struct nk_image nk_subimage_handle(nk_handle, unsigned short w, unsigned short h, struct nk_rect sub_region);
+/* =============================================================================
+ *
+ * MATH
+ *
+ * ============================================================================= */
+NK_API nk_hash nk_murmur_hash(const void *key, int len, nk_hash seed);
+NK_API void nk_triangle_from_direction(struct nk_vec2 *result, struct nk_rect r, float pad_x, float pad_y, enum nk_heading);
+
+NK_API struct nk_vec2 nk_vec2(float x, float y);
+NK_API struct nk_vec2 nk_vec2i(int x, int y);
+NK_API struct nk_vec2 nk_vec2v(const float *xy);
+NK_API struct nk_vec2 nk_vec2iv(const int *xy);
+
+NK_API struct nk_rect nk_get_null_rect(void);
+NK_API struct nk_rect nk_rect(float x, float y, float w, float h);
+NK_API struct nk_rect nk_recti(int x, int y, int w, int h);
+NK_API struct nk_rect nk_recta(struct nk_vec2 pos, struct nk_vec2 size);
+NK_API struct nk_rect nk_rectv(const float *xywh);
+NK_API struct nk_rect nk_rectiv(const int *xywh);
+NK_API struct nk_vec2 nk_rect_pos(struct nk_rect);
+NK_API struct nk_vec2 nk_rect_size(struct nk_rect);
+/* =============================================================================
+ *
+ * STRING
+ *
+ * ============================================================================= */
+NK_API int nk_strlen(const char *str);
+NK_API int nk_stricmp(const char *s1, const char *s2);
+NK_API int nk_stricmpn(const char *s1, const char *s2, int n);
+NK_API int nk_strtoi(const char *str, const char **endptr);
+NK_API float nk_strtof(const char *str, const char **endptr);
+NK_API double nk_strtod(const char *str, const char **endptr);
+NK_API int nk_strfilter(const char *text, const char *regexp);
+NK_API int nk_strmatch_fuzzy_string(char const *str, char const *pattern, int *out_score);
+NK_API int nk_strmatch_fuzzy_text(const char *txt, int txt_len, const char *pattern, int *out_score);
+/* =============================================================================
+ *
+ * UTF-8
+ *
+ * ============================================================================= */
+NK_API int nk_utf_decode(const char*, nk_rune*, int);
+NK_API int nk_utf_encode(nk_rune, char*, int);
+NK_API int nk_utf_len(const char*, int byte_len);
+NK_API const char* nk_utf_at(const char *buffer, int length, int index, nk_rune *unicode, int *len);
+/* ===============================================================
+ *
+ * FONT
+ *
+ * ===============================================================*/
+/* Font handling in this library was designed to be quite customizable and lets
+ you decide what you want to use and what you want to provide. There are three
+ different ways to use the font atlas. The first two will use your font
+ handling scheme and only requires essential data to run nuklear. The next
+ slightly more advanced features is font handling with vertex buffer output.
+ Finally the most complex API wise is using nuklear's font baking API.
+
+ 1.) Using your own implementation without vertex buffer output
+ --------------------------------------------------------------
+ So first up the easiest way to do font handling is by just providing a
+ `nk_user_font` struct which only requires the height in pixel of the used
+ font and a callback to calculate the width of a string. This way of handling
+ fonts is best fitted for using the normal draw shape command API where you
+ do all the text drawing yourself and the library does not require any kind
+ of deeper knowledge about which font handling mechanism you use.
+ IMPORTANT: the `nk_user_font` pointer provided to nuklear has to persist
+ over the complete life time! I know this sucks but it is currently the only
+ way to switch between fonts.
+
+ float your_text_width_calculation(nk_handle handle, float height, const char *text, int len)
+ {
+ your_font_type *type = handle.ptr;
+ float text_width = ...;
+ return text_width;
+ }
+
+ struct nk_user_font font;
+ font.userdata.ptr = &your_font_class_or_struct;
+ font.height = your_font_height;
+ font.width = your_text_width_calculation;
+
+ struct nk_context ctx;
+ nk_init_default(&ctx, &font);
+
+ 2.) Using your own implementation with vertex buffer output
+ --------------------------------------------------------------
+ While the first approach works fine if you don't want to use the optional
+ vertex buffer output it is not enough if you do. To get font handling working
+ for these cases you have to provide two additional parameters inside the
+ `nk_user_font`. First a texture atlas handle used to draw text as subimages
+ of a bigger font atlas texture and a callback to query a character's glyph
+ information (offset, size, ...). So it is still possible to provide your own
+ font and use the vertex buffer output.
+
+ float your_text_width_calculation(nk_handle handle, float height, const char *text, int len)
+ {
+ your_font_type *type = handle.ptr;
+ float text_width = ...;
+ return text_width;
+ }
+ void query_your_font_glyph(nk_handle handle, float font_height, struct nk_user_font_glyph *glyph, nk_rune codepoint, nk_rune next_codepoint)
+ {
+ your_font_type *type = handle.ptr;
+ glyph.width = ...;
+ glyph.height = ...;
+ glyph.xadvance = ...;
+ glyph.uv[0].x = ...;
+ glyph.uv[0].y = ...;
+ glyph.uv[1].x = ...;
+ glyph.uv[1].y = ...;
+ glyph.offset.x = ...;
+ glyph.offset.y = ...;
+ }
+
+ struct nk_user_font font;
+ font.userdata.ptr = &your_font_class_or_struct;
+ font.height = your_font_height;
+ font.width = your_text_width_calculation;
+ font.query = query_your_font_glyph;
+ font.texture.id = your_font_texture;
+
+ struct nk_context ctx;
+ nk_init_default(&ctx, &font);
+
+ 3.) Nuklear font baker
+ ------------------------------------
+ The final approach if you do not have a font handling functionality or don't
+ want to use it in this library is by using the optional font baker.
+ The font baker APIs can be used to create a font plus font atlas texture
+ and can be used with or without the vertex buffer output.
+
+ It still uses the `nk_user_font` struct and the two different approaches
+ previously stated still work. The font baker is not located inside
+ `nk_context` like all other systems since it can be understood as more of
+ an extension to nuklear and does not really depend on any `nk_context` state.
+
+ Font baker need to be initialized first by one of the nk_font_atlas_init_xxx
+ functions. If you don't care about memory just call the default version
+ `nk_font_atlas_init_default` which will allocate all memory from the standard library.
+ If you want to control memory allocation but you don't care if the allocated
+ memory is temporary and therefore can be freed directly after the baking process
+ is over or permanent you can call `nk_font_atlas_init`.
+
+ After successfully initializing the font baker you can add Truetype(.ttf) fonts from
+ different sources like memory or from file by calling one of the `nk_font_atlas_add_xxx`.
+ functions. Adding font will permanently store each font, font config and ttf memory block(!)
+ inside the font atlas and allows to reuse the font atlas. If you don't want to reuse
+ the font baker by for example adding additional fonts you can call
+ `nk_font_atlas_cleanup` after the baking process is over (after calling nk_font_atlas_end).
+
+ As soon as you added all fonts you wanted you can now start the baking process
+ for every selected glyph to image by calling `nk_font_atlas_bake`.
+ The baking process returns image memory, width and height which can be used to
+ either create your own image object or upload it to any graphics library.
+ No matter which case you finally have to call `nk_font_atlas_end` which
+ will free all temporary memory including the font atlas image so make sure
+ you created our texture beforehand. `nk_font_atlas_end` requires a handle
+ to your font texture or object and optionally fills a `struct nk_draw_null_texture`
+ which can be used for the optional vertex output. If you don't want it just
+ set the argument to `NULL`.
+
+ At this point you are done and if you don't want to reuse the font atlas you
+ can call `nk_font_atlas_cleanup` to free all truetype blobs and configuration
+ memory. Finally if you don't use the font atlas and any of it's fonts anymore
+ you need to call `nk_font_atlas_clear` to free all memory still being used.
+
+ struct nk_font_atlas atlas;
+ nk_font_atlas_init_default(&atlas);
+ nk_font_atlas_begin(&atlas);
+ nk_font *font = nk_font_atlas_add_from_file(&atlas, "Path/To/Your/TTF_Font.ttf", 13, 0);
+ nk_font *font2 = nk_font_atlas_add_from_file(&atlas, "Path/To/Your/TTF_Font2.ttf", 16, 0);
+ const void* img = nk_font_atlas_bake(&atlas, &img_width, &img_height, NK_FONT_ATLAS_RGBA32);
+ nk_font_atlas_end(&atlas, nk_handle_id(texture), 0);
+
+ struct nk_context ctx;
+ nk_init_default(&ctx, &font->handle);
+ while (1) {
+
+ }
+ nk_font_atlas_clear(&atlas);
+
+ The font baker API is probably the most complex API inside this library and
+ I would suggest reading some of my examples `example/` to get a grip on how
+ to use the font atlas. There are a number of details I left out. For example
+ how to merge fonts, configure a font with `nk_font_config` to use other languages,
+ use another texture coordinate format and a lot more:
+
+ struct nk_font_config cfg = nk_font_config(font_pixel_height);
+ cfg.merge_mode = nk_false or nk_true;
+ cfg.range = nk_font_korean_glyph_ranges();
+ cfg.coord_type = NK_COORD_PIXEL;
+ nk_font *font = nk_font_atlas_add_from_file(&atlas, "Path/To/Your/TTF_Font.ttf", 13, &cfg);
+
+*/
+struct nk_user_font_glyph;
+typedef float(*nk_text_width_f)(nk_handle, float h, const char*, int len);
+typedef void(*nk_query_font_glyph_f)(nk_handle handle, float font_height,
+ struct nk_user_font_glyph *glyph,
+ nk_rune codepoint, nk_rune next_codepoint);
+
+#if defined(NK_INCLUDE_VERTEX_BUFFER_OUTPUT) || defined(NK_INCLUDE_SOFTWARE_FONT)
+struct nk_user_font_glyph {
+ struct nk_vec2 uv[2];
+ /* texture coordinates */
+ struct nk_vec2 offset;
+ /* offset between top left and glyph */
+ float width, height;
+ /* size of the glyph */
+ float xadvance;
+ /* offset to the next glyph */
+};
+#endif
+
+struct nk_user_font {
+ nk_handle userdata;
+ /* user provided font handle */
+ float height;
+ /* max height of the font */
+ nk_text_width_f width;
+ /* font string width in pixel callback */
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+ nk_query_font_glyph_f query;
+ /* font glyph callback to query drawing info */
+ nk_handle texture;
+ /* texture handle to the used font atlas or texture */
+#endif
+};
+
+#ifdef NK_INCLUDE_FONT_BAKING
+enum nk_font_coord_type {
+ NK_COORD_UV, /* texture coordinates inside font glyphs are clamped between 0-1 */
+ NK_COORD_PIXEL /* texture coordinates inside font glyphs are in absolute pixel */
+};
+
+struct nk_font;
+struct nk_baked_font {
+ float height;
+ /* height of the font */
+ float ascent, descent;
+ /* font glyphs ascent and descent */
+ nk_rune glyph_offset;
+ /* glyph array offset inside the font glyph baking output array */
+ nk_rune glyph_count;
+ /* number of glyphs of this font inside the glyph baking array output */
+ const nk_rune *ranges;
+ /* font codepoint ranges as pairs of (from/to) and 0 as last element */
+};
+
+struct nk_font_config {
+ struct nk_font_config *next;
+ /* NOTE: only used internally */
+ void *ttf_blob;
+ /* pointer to loaded TTF file memory block.
+ * NOTE: not needed for nk_font_atlas_add_from_memory and nk_font_atlas_add_from_file. */
+ nk_size ttf_size;
+ /* size of the loaded TTF file memory block
+ * NOTE: not needed for nk_font_atlas_add_from_memory and nk_font_atlas_add_from_file. */
+
+ unsigned char ttf_data_owned_by_atlas;
+ /* used inside font atlas: default to: 0*/
+ unsigned char merge_mode;
+ /* merges this font into the last font */
+ unsigned char pixel_snap;
+ /* align every character to pixel boundary (if true set oversample (1,1)) */
+ unsigned char oversample_v, oversample_h;
+ /* rasterize at hight quality for sub-pixel position */
+ unsigned char padding[3];
+
+ float size;
+ /* baked pixel height of the font */
+ enum nk_font_coord_type coord_type;
+ /* texture coordinate format with either pixel or UV coordinates */
+ struct nk_vec2 spacing;
+ /* extra pixel spacing between glyphs */
+ const nk_rune *range;
+ /* list of unicode ranges (2 values per range, zero terminated) */
+ struct nk_baked_font *font;
+ /* font to setup in the baking process: NOTE: not needed for font atlas */
+ nk_rune fallback_glyph;
+ /* fallback glyph to use if a given rune is not found */
+ struct nk_font_config *n;
+ struct nk_font_config *p;
+};
+
+struct nk_font_glyph {
+ nk_rune codepoint;
+ float xadvance;
+ float x0, y0, x1, y1, w, h;
+ float u0, v0, u1, v1;
+};
+
+struct nk_font {
+ struct nk_font *next;
+ struct nk_user_font handle;
+ struct nk_baked_font info;
+ float scale;
+ struct nk_font_glyph *glyphs;
+ const struct nk_font_glyph *fallback;
+ nk_rune fallback_codepoint;
+ nk_handle texture;
+ struct nk_font_config *config;
+};
+
+enum nk_font_atlas_format {
+ NK_FONT_ATLAS_ALPHA8,
+ NK_FONT_ATLAS_RGBA32
+};
+
+struct nk_font_atlas {
+ void *pixel;
+ int tex_width;
+ int tex_height;
+
+ struct nk_allocator permanent;
+ struct nk_allocator temporary;
+
+ struct nk_recti custom;
+ struct nk_cursor cursors[NK_CURSOR_COUNT];
+
+ int glyph_count;
+ struct nk_font_glyph *glyphs;
+ struct nk_font *default_font;
+ struct nk_font *fonts;
+ struct nk_font_config *config;
+ int font_num;
+};
+
+/* some language glyph codepoint ranges */
+NK_API const nk_rune *nk_font_default_glyph_ranges(void);
+NK_API const nk_rune *nk_font_chinese_glyph_ranges(void);
+NK_API const nk_rune *nk_font_cyrillic_glyph_ranges(void);
+NK_API const nk_rune *nk_font_korean_glyph_ranges(void);
+
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void nk_font_atlas_init_default(struct nk_font_atlas*);
+#endif
+NK_API void nk_font_atlas_init(struct nk_font_atlas*, struct nk_allocator*);
+NK_API void nk_font_atlas_init_custom(struct nk_font_atlas*, struct nk_allocator *persistent, struct nk_allocator *transient);
+NK_API void nk_font_atlas_begin(struct nk_font_atlas*);
+NK_API struct nk_font_config nk_font_config(float pixel_height);
+NK_API struct nk_font *nk_font_atlas_add(struct nk_font_atlas*, const struct nk_font_config*);
+#ifdef NK_INCLUDE_DEFAULT_FONT
+NK_API struct nk_font* nk_font_atlas_add_default(struct nk_font_atlas*, float height, const struct nk_font_config*);
+#endif
+NK_API struct nk_font* nk_font_atlas_add_from_memory(struct nk_font_atlas *atlas, void *memory, nk_size size, float height, const struct nk_font_config *config);
+#ifdef NK_INCLUDE_STANDARD_IO
+NK_API struct nk_font* nk_font_atlas_add_from_file(struct nk_font_atlas *atlas, const char *file_path, float height, const struct nk_font_config*);
+#endif
+NK_API struct nk_font *nk_font_atlas_add_compressed(struct nk_font_atlas*, void *memory, nk_size size, float height, const struct nk_font_config*);
+NK_API struct nk_font* nk_font_atlas_add_compressed_base85(struct nk_font_atlas*, const char *data, float height, const struct nk_font_config *config);
+NK_API const void* nk_font_atlas_bake(struct nk_font_atlas*, int *width, int *height, enum nk_font_atlas_format);
+NK_API void nk_font_atlas_end(struct nk_font_atlas*, nk_handle tex, struct nk_draw_null_texture*);
+NK_API const struct nk_font_glyph* nk_font_find_glyph(struct nk_font*, nk_rune unicode);
+NK_API void nk_font_atlas_cleanup(struct nk_font_atlas *atlas);
+NK_API void nk_font_atlas_clear(struct nk_font_atlas*);
+
+#endif
+
+/* ==============================================================
+ *
+ * MEMORY BUFFER
+ *
+ * ===============================================================*/
+/* A basic (double)-buffer with linear allocation and resetting as only
+ freeing policy. The buffer's main purpose is to control all memory management
+ inside the GUI toolkit and still leave memory control as much as possible in
+ the hand of the user while also making sure the library is easy to use if
+ not as much control is needed.
+ In general all memory inside this library can be provided from the user in
+ three different ways.
+
+ The first way and the one providing most control is by just passing a fixed
+ size memory block. In this case all control lies in the hand of the user
+ since he can exactly control where the memory comes from and how much memory
+ the library should consume. Of course using the fixed size API removes the
+ ability to automatically resize a buffer if not enough memory is provided so
+ you have to take over the resizing. While being a fixed sized buffer sounds
+ quite limiting, it is very effective in this library since the actual memory
+ consumption is quite stable and has a fixed upper bound for a lot of cases.
+
+ If you don't want to think about how much memory the library should allocate
+ at all time or have a very dynamic UI with unpredictable memory consumption
+ habits but still want control over memory allocation you can use the dynamic
+ allocator based API. The allocator consists of two callbacks for allocating
+ and freeing memory and optional userdata so you can plugin your own allocator.
+
+ The final and easiest way can be used by defining
+ NK_INCLUDE_DEFAULT_ALLOCATOR which uses the standard library memory
+ allocation functions malloc and free and takes over complete control over
+ memory in this library.
+*/
+struct nk_memory_status {
+ void *memory;
+ unsigned int type;
+ nk_size size;
+ nk_size allocated;
+ nk_size needed;
+ nk_size calls;
+};
+
+enum nk_allocation_type {
+ NK_BUFFER_FIXED,
+ NK_BUFFER_DYNAMIC
+};
+
+enum nk_buffer_allocation_type {
+ NK_BUFFER_FRONT,
+ NK_BUFFER_BACK,
+ NK_BUFFER_MAX
+};
+
+struct nk_buffer_marker {
+ int active;
+ nk_size offset;
+};
+
+struct nk_memory {void *ptr;nk_size size;};
+struct nk_buffer {
+ struct nk_buffer_marker marker[NK_BUFFER_MAX];
+ /* buffer marker to free a buffer to a certain offset */
+ struct nk_allocator pool;
+ /* allocator callback for dynamic buffers */
+ enum nk_allocation_type type;
+ /* memory management type */
+ struct nk_memory memory;
+ /* memory and size of the current memory block */
+ float grow_factor;
+ /* growing factor for dynamic memory management */
+ nk_size allocated;
+ /* total amount of memory allocated */
+ nk_size needed;
+ /* totally consumed memory given that enough memory is present */
+ nk_size calls;
+ /* number of allocation calls */
+ nk_size size;
+ /* current size of the buffer */
+};
+
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void nk_buffer_init_default(struct nk_buffer*);
+#endif
+NK_API void nk_buffer_init(struct nk_buffer*, const struct nk_allocator*, nk_size size);
+NK_API void nk_buffer_init_fixed(struct nk_buffer*, void *memory, nk_size size);
+NK_API void nk_buffer_info(struct nk_memory_status*, struct nk_buffer*);
+NK_API void nk_buffer_push(struct nk_buffer*, enum nk_buffer_allocation_type type, const void *memory, nk_size size, nk_size align);
+NK_API void nk_buffer_mark(struct nk_buffer*, enum nk_buffer_allocation_type type);
+NK_API void nk_buffer_reset(struct nk_buffer*, enum nk_buffer_allocation_type type);
+NK_API void nk_buffer_clear(struct nk_buffer*);
+NK_API void nk_buffer_free(struct nk_buffer*);
+NK_API void *nk_buffer_memory(struct nk_buffer*);
+NK_API const void *nk_buffer_memory_const(const struct nk_buffer*);
+NK_API nk_size nk_buffer_total(struct nk_buffer*);
+
+/* ==============================================================
+ *
+ * STRING
+ *
+ * ===============================================================*/
+/* Basic string buffer which is only used in context with the text editor
+ * to manage and manipulate dynamic or fixed size string content. This is _NOT_
+ * the default string handling method. The only instance you should have any contact
+ * with this API is if you interact with an `nk_text_edit` object inside one of the
+ * copy and paste functions and even there only for more advanced cases. */
+struct nk_str {
+ struct nk_buffer buffer;
+ int len; /* in codepoints/runes/glyphs */
+};
+
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void nk_str_init_default(struct nk_str*);
+#endif
+NK_API void nk_str_init(struct nk_str*, const struct nk_allocator*, nk_size size);
+NK_API void nk_str_init_fixed(struct nk_str*, void *memory, nk_size size);
+NK_API void nk_str_clear(struct nk_str*);
+NK_API void nk_str_free(struct nk_str*);
+
+NK_API int nk_str_append_text_char(struct nk_str*, const char*, int);
+NK_API int nk_str_append_str_char(struct nk_str*, const char*);
+NK_API int nk_str_append_text_utf8(struct nk_str*, const char*, int);
+NK_API int nk_str_append_str_utf8(struct nk_str*, const char*);
+NK_API int nk_str_append_text_runes(struct nk_str*, const nk_rune*, int);
+NK_API int nk_str_append_str_runes(struct nk_str*, const nk_rune*);
+
+NK_API int nk_str_insert_at_char(struct nk_str*, int pos, const char*, int);
+NK_API int nk_str_insert_at_rune(struct nk_str*, int pos, const char*, int);
+
+NK_API int nk_str_insert_text_char(struct nk_str*, int pos, const char*, int);
+NK_API int nk_str_insert_str_char(struct nk_str*, int pos, const char*);
+NK_API int nk_str_insert_text_utf8(struct nk_str*, int pos, const char*, int);
+NK_API int nk_str_insert_str_utf8(struct nk_str*, int pos, const char*);
+NK_API int nk_str_insert_text_runes(struct nk_str*, int pos, const nk_rune*, int);
+NK_API int nk_str_insert_str_runes(struct nk_str*, int pos, const nk_rune*);
+
+NK_API void nk_str_remove_chars(struct nk_str*, int len);
+NK_API void nk_str_remove_runes(struct nk_str *str, int len);
+NK_API void nk_str_delete_chars(struct nk_str*, int pos, int len);
+NK_API void nk_str_delete_runes(struct nk_str*, int pos, int len);
+
+NK_API char *nk_str_at_char(struct nk_str*, int pos);
+NK_API char *nk_str_at_rune(struct nk_str*, int pos, nk_rune *unicode, int *len);
+NK_API nk_rune nk_str_rune_at(const struct nk_str*, int pos);
+NK_API const char *nk_str_at_char_const(const struct nk_str*, int pos);
+NK_API const char *nk_str_at_const(const struct nk_str*, int pos, nk_rune *unicode, int *len);
+
+NK_API char *nk_str_get(struct nk_str*);
+NK_API const char *nk_str_get_const(const struct nk_str*);
+NK_API int nk_str_len(struct nk_str*);
+NK_API int nk_str_len_char(struct nk_str*);
+
+/*===============================================================
+ *
+ * TEXT EDITOR
+ *
+ * ===============================================================*/
+/* Editing text in this library is handled by either `nk_edit_string` or
+ * `nk_edit_buffer`. But like almost everything in this library there are multiple
+ * ways of doing it and a balance between control and ease of use with memory
+ * as well as functionality controlled by flags.
+ *
+ * This library generally allows three different levels of memory control:
+ * First of is the most basic way of just providing a simple char array with
+ * string length. This method is probably the easiest way of handling simple
+ * user text input. Main upside is complete control over memory while the biggest
+ * downside in comparison with the other two approaches is missing undo/redo.
+ *
+ * For UIs that require undo/redo the second way was created. It is based on
+ * a fixed size nk_text_edit struct, which has an internal undo/redo stack.
+ * This is mainly useful if you want something more like a text editor but don't want
+ * to have a dynamically growing buffer.
+ *
+ * The final way is using a dynamically growing nk_text_edit struct, which
+ * has both a default version if you don't care where memory comes from and an
+ * allocator version if you do. While the text editor is quite powerful for its
+ * complexity I would not recommend editing gigabytes of data with it.
+ * It is rather designed for uses cases which make sense for a GUI library not for
+ * an full blown text editor.
+ */
+#ifndef NK_TEXTEDIT_UNDOSTATECOUNT
+#define NK_TEXTEDIT_UNDOSTATECOUNT 99
+#endif
+
+#ifndef NK_TEXTEDIT_UNDOCHARCOUNT
+#define NK_TEXTEDIT_UNDOCHARCOUNT 999
+#endif
+
+struct nk_text_edit;
+struct nk_clipboard {
+ nk_handle userdata;
+ nk_plugin_paste paste;
+ nk_plugin_copy copy;
+};
+
+struct nk_text_undo_record {
+ int where;
+ short insert_length;
+ short delete_length;
+ short char_storage;
+};
+
+struct nk_text_undo_state {
+ struct nk_text_undo_record undo_rec[NK_TEXTEDIT_UNDOSTATECOUNT];
+ nk_rune undo_char[NK_TEXTEDIT_UNDOCHARCOUNT];
+ short undo_point;
+ short redo_point;
+ short undo_char_point;
+ short redo_char_point;
+};
+
+enum nk_text_edit_type {
+ NK_TEXT_EDIT_SINGLE_LINE,
+ NK_TEXT_EDIT_MULTI_LINE
+};
+
+enum nk_text_edit_mode {
+ NK_TEXT_EDIT_MODE_VIEW,
+ NK_TEXT_EDIT_MODE_INSERT,
+ NK_TEXT_EDIT_MODE_REPLACE
+};
+
+struct nk_text_edit {
+ struct nk_clipboard clip;
+ struct nk_str string;
+ nk_plugin_filter filter;
+ struct nk_vec2 scrollbar;
+
+ int cursor;
+ int select_start;
+ int select_end;
+ unsigned char mode;
+ unsigned char cursor_at_end_of_line;
+ unsigned char initialized;
+ unsigned char has_preferred_x;
+ unsigned char single_line;
+ unsigned char active;
+ unsigned char padding1;
+ float preferred_x;
+ struct nk_text_undo_state undo;
+};
+
+/* filter function */
+NK_API int nk_filter_default(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_ascii(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_float(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_decimal(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_hex(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_oct(const struct nk_text_edit*, nk_rune unicode);
+NK_API int nk_filter_binary(const struct nk_text_edit*, nk_rune unicode);
+
+/* text editor */
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void nk_textedit_init_default(struct nk_text_edit*);
+#endif
+NK_API void nk_textedit_init(struct nk_text_edit*, struct nk_allocator*, nk_size size);
+NK_API void nk_textedit_init_fixed(struct nk_text_edit*, void *memory, nk_size size);
+NK_API void nk_textedit_free(struct nk_text_edit*);
+NK_API void nk_textedit_text(struct nk_text_edit*, const char*, int total_len);
+NK_API void nk_textedit_delete(struct nk_text_edit*, int where, int len);
+NK_API void nk_textedit_delete_selection(struct nk_text_edit*);
+NK_API void nk_textedit_select_all(struct nk_text_edit*);
+NK_API int nk_textedit_cut(struct nk_text_edit*);
+NK_API int nk_textedit_paste(struct nk_text_edit*, char const*, int len);
+NK_API void nk_textedit_undo(struct nk_text_edit*);
+NK_API void nk_textedit_redo(struct nk_text_edit*);
+
+/* ===============================================================
+ *
+ * DRAWING
+ *
+ * ===============================================================*/
+/* This library was designed to be render backend agnostic so it does
+ not draw anything to screen. Instead all drawn shapes, widgets
+ are made of, are buffered into memory and make up a command queue.
+ Each frame therefore fills the command buffer with draw commands
+ that then need to be executed by the user and his own render backend.
+ After that the command buffer needs to be cleared and a new frame can be
+ started. It is probably important to note that the command buffer is the main
+ drawing API and the optional vertex buffer API only takes this format and
+ converts it into a hardware accessible format.
+
+ To use the command queue to draw your own widgets you can access the
+ command buffer of each window by calling `nk_window_get_canvas` after
+ previously having called `nk_begin`:
+
+ void draw_red_rectangle_widget(struct nk_context *ctx)
+ {
+ struct nk_command_buffer *canvas;
+ struct nk_input *input = &ctx->input;
+ canvas = nk_window_get_canvas(ctx);
+
+ struct nk_rect space;
+ enum nk_widget_layout_states state;
+ state = nk_widget(&space, ctx);
+ if (!state) return;
+
+ if (state != NK_WIDGET_ROM)
+ update_your_widget_by_user_input(...);
+ nk_fill_rect(canvas, space, 0, nk_rgb(255,0,0));
+ }
+
+ if (nk_begin(...)) {
+ nk_layout_row_dynamic(ctx, 25, 1);
+ draw_red_rectangle_widget(ctx);
+ }
+ nk_end(..)
+
+ Important to know if you want to create your own widgets is the `nk_widget`
+ call. It allocates space on the panel reserved for this widget to be used,
+ but also returns the state of the widget space. If your widget is not seen and does
+ not have to be updated it is '0' and you can just return. If it only has
+ to be drawn the state will be `NK_WIDGET_ROM` otherwise you can do both
+ update and draw your widget. The reason for separating is to only draw and
+ update what is actually necessary which is crucial for performance.
+*/
+enum nk_command_type {
+ NK_COMMAND_NOP,
+ NK_COMMAND_SCISSOR,
+ NK_COMMAND_LINE,
+ NK_COMMAND_CURVE,
+ NK_COMMAND_RECT,
+ NK_COMMAND_RECT_FILLED,
+ NK_COMMAND_RECT_MULTI_COLOR,
+ NK_COMMAND_CIRCLE,
+ NK_COMMAND_CIRCLE_FILLED,
+ NK_COMMAND_ARC,
+ NK_COMMAND_ARC_FILLED,
+ NK_COMMAND_TRIANGLE,
+ NK_COMMAND_TRIANGLE_FILLED,
+ NK_COMMAND_POLYGON,
+ NK_COMMAND_POLYGON_FILLED,
+ NK_COMMAND_POLYLINE,
+ NK_COMMAND_TEXT,
+ NK_COMMAND_IMAGE,
+ NK_COMMAND_CUSTOM
+};
+
+/* command base and header of every command inside the buffer */
+struct nk_command {
+ enum nk_command_type type;
+ nk_size next;
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_handle userdata;
+#endif
+};
+
+struct nk_command_scissor {
+ struct nk_command header;
+ short x, y;
+ unsigned short w, h;
+};
+
+struct nk_command_line {
+ struct nk_command header;
+ unsigned short line_thickness;
+ struct nk_vec2i begin;
+ struct nk_vec2i end;
+ struct nk_color color;
+};
+
+struct nk_command_curve {
+ struct nk_command header;
+ unsigned short line_thickness;
+ struct nk_vec2i begin;
+ struct nk_vec2i end;
+ struct nk_vec2i ctrl[2];
+ struct nk_color color;
+};
+
+struct nk_command_rect {
+ struct nk_command header;
+ unsigned short rounding;
+ unsigned short line_thickness;
+ short x, y;
+ unsigned short w, h;
+ struct nk_color color;
+};
+
+struct nk_command_rect_filled {
+ struct nk_command header;
+ unsigned short rounding;
+ short x, y;
+ unsigned short w, h;
+ struct nk_color color;
+};
+
+struct nk_command_rect_multi_color {
+ struct nk_command header;
+ short x, y;
+ unsigned short w, h;
+ struct nk_color left;
+ struct nk_color top;
+ struct nk_color bottom;
+ struct nk_color right;
+};
+
+struct nk_command_triangle {
+ struct nk_command header;
+ unsigned short line_thickness;
+ struct nk_vec2i a;
+ struct nk_vec2i b;
+ struct nk_vec2i c;
+ struct nk_color color;
+};
+
+struct nk_command_triangle_filled {
+ struct nk_command header;
+ struct nk_vec2i a;
+ struct nk_vec2i b;
+ struct nk_vec2i c;
+ struct nk_color color;
+};
+
+struct nk_command_circle {
+ struct nk_command header;
+ short x, y;
+ unsigned short line_thickness;
+ unsigned short w, h;
+ struct nk_color color;
+};
+
+struct nk_command_circle_filled {
+ struct nk_command header;
+ short x, y;
+ unsigned short w, h;
+ struct nk_color color;
+};
+
+struct nk_command_arc {
+ struct nk_command header;
+ short cx, cy;
+ unsigned short r;
+ unsigned short line_thickness;
+ float a[2];
+ struct nk_color color;
+};
+
+struct nk_command_arc_filled {
+ struct nk_command header;
+ short cx, cy;
+ unsigned short r;
+ float a[2];
+ struct nk_color color;
+};
+
+struct nk_command_polygon {
+ struct nk_command header;
+ struct nk_color color;
+ unsigned short line_thickness;
+ unsigned short point_count;
+ struct nk_vec2i points[1];
+};
+
+struct nk_command_polygon_filled {
+ struct nk_command header;
+ struct nk_color color;
+ unsigned short point_count;
+ struct nk_vec2i points[1];
+};
+
+struct nk_command_polyline {
+ struct nk_command header;
+ struct nk_color color;
+ unsigned short line_thickness;
+ unsigned short point_count;
+ struct nk_vec2i points[1];
+};
+
+struct nk_command_image {
+ struct nk_command header;
+ short x, y;
+ unsigned short w, h;
+ struct nk_image img;
+ struct nk_color col;
+};
+
+typedef void (*nk_command_custom_callback)(void *canvas, short x,short y,
+ unsigned short w, unsigned short h, nk_handle callback_data);
+struct nk_command_custom {
+ struct nk_command header;
+ short x, y;
+ unsigned short w, h;
+ nk_handle callback_data;
+ nk_command_custom_callback callback;
+};
+
+struct nk_command_text {
+ struct nk_command header;
+ const struct nk_user_font *font;
+ struct nk_color background;
+ struct nk_color foreground;
+ short x, y;
+ unsigned short w, h;
+ float height;
+ int length;
+ char string[1];
+};
+
+enum nk_command_clipping {
+ NK_CLIPPING_OFF = nk_false,
+ NK_CLIPPING_ON = nk_true
+};
+
+struct nk_command_buffer {
+ struct nk_buffer *base;
+ struct nk_rect clip;
+ int use_clipping;
+ nk_handle userdata;
+ nk_size begin, end, last;
+};
+
+/* shape outlines */
+NK_API void nk_stroke_line(struct nk_command_buffer *b, float x0, float y0, float x1, float y1, float line_thickness, struct nk_color);
+NK_API void nk_stroke_curve(struct nk_command_buffer*, float, float, float, float, float, float, float, float, float line_thickness, struct nk_color);
+NK_API void nk_stroke_rect(struct nk_command_buffer*, struct nk_rect, float rounding, float line_thickness, struct nk_color);
+NK_API void nk_stroke_circle(struct nk_command_buffer*, struct nk_rect, float line_thickness, struct nk_color);
+NK_API void nk_stroke_arc(struct nk_command_buffer*, float cx, float cy, float radius, float a_min, float a_max, float line_thickness, struct nk_color);
+NK_API void nk_stroke_triangle(struct nk_command_buffer*, float, float, float, float, float, float, float line_thichness, struct nk_color);
+NK_API void nk_stroke_polyline(struct nk_command_buffer*, float *points, int point_count, float line_thickness, struct nk_color col);
+NK_API void nk_stroke_polygon(struct nk_command_buffer*, float*, int point_count, float line_thickness, struct nk_color);
+
+/* filled shades */
+NK_API void nk_fill_rect(struct nk_command_buffer*, struct nk_rect, float rounding, struct nk_color);
+NK_API void nk_fill_rect_multi_color(struct nk_command_buffer*, struct nk_rect, struct nk_color left, struct nk_color top, struct nk_color right, struct nk_color bottom);
+NK_API void nk_fill_circle(struct nk_command_buffer*, struct nk_rect, struct nk_color);
+NK_API void nk_fill_arc(struct nk_command_buffer*, float cx, float cy, float radius, float a_min, float a_max, struct nk_color);
+NK_API void nk_fill_triangle(struct nk_command_buffer*, float x0, float y0, float x1, float y1, float x2, float y2, struct nk_color);
+NK_API void nk_fill_polygon(struct nk_command_buffer*, float*, int point_count, struct nk_color);
+
+/* misc */
+NK_API void nk_draw_image(struct nk_command_buffer*, struct nk_rect, const struct nk_image*, struct nk_color);
+NK_API void nk_draw_text(struct nk_command_buffer*, struct nk_rect, const char *text, int len, const struct nk_user_font*, struct nk_color, struct nk_color);
+NK_API void nk_push_scissor(struct nk_command_buffer*, struct nk_rect);
+NK_API void nk_push_custom(struct nk_command_buffer*, struct nk_rect, nk_command_custom_callback, nk_handle usr);
+
+/* ===============================================================
+ *
+ * INPUT
+ *
+ * ===============================================================*/
+struct nk_mouse_button {
+ int down;
+ unsigned int clicked;
+ struct nk_vec2 clicked_pos;
+};
+struct nk_mouse {
+ struct nk_mouse_button buttons[NK_BUTTON_MAX];
+ struct nk_vec2 pos;
+ struct nk_vec2 prev;
+ struct nk_vec2 delta;
+ struct nk_vec2 scroll_delta;
+ unsigned char grab;
+ unsigned char grabbed;
+ unsigned char ungrab;
+};
+
+struct nk_key {
+ int down;
+ unsigned int clicked;
+};
+struct nk_keyboard {
+ struct nk_key keys[NK_KEY_MAX];
+ char text[NK_INPUT_MAX];
+ int text_len;
+};
+
+struct nk_input {
+ struct nk_keyboard keyboard;
+ struct nk_mouse mouse;
+};
+
+NK_API int nk_input_has_mouse_click(const struct nk_input*, enum nk_buttons);
+NK_API int nk_input_has_mouse_click_in_rect(const struct nk_input*, enum nk_buttons, struct nk_rect);
+NK_API int nk_input_has_mouse_click_down_in_rect(const struct nk_input*, enum nk_buttons, struct nk_rect, int down);
+NK_API int nk_input_is_mouse_click_in_rect(const struct nk_input*, enum nk_buttons, struct nk_rect);
+NK_API int nk_input_is_mouse_click_down_in_rect(const struct nk_input *i, enum nk_buttons id, struct nk_rect b, int down);
+NK_API int nk_input_any_mouse_click_in_rect(const struct nk_input*, struct nk_rect);
+NK_API int nk_input_is_mouse_prev_hovering_rect(const struct nk_input*, struct nk_rect);
+NK_API int nk_input_is_mouse_hovering_rect(const struct nk_input*, struct nk_rect);
+NK_API int nk_input_mouse_clicked(const struct nk_input*, enum nk_buttons, struct nk_rect);
+NK_API int nk_input_is_mouse_down(const struct nk_input*, enum nk_buttons);
+NK_API int nk_input_is_mouse_pressed(const struct nk_input*, enum nk_buttons);
+NK_API int nk_input_is_mouse_released(const struct nk_input*, enum nk_buttons);
+NK_API int nk_input_is_key_pressed(const struct nk_input*, enum nk_keys);
+NK_API int nk_input_is_key_released(const struct nk_input*, enum nk_keys);
+NK_API int nk_input_is_key_down(const struct nk_input*, enum nk_keys);
+
+/* ===============================================================
+ *
+ * DRAW LIST
+ *
+ * ===============================================================*/
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+/* The optional vertex buffer draw list provides a 2D drawing context
+ with antialiasing functionality which takes basic filled or outlined shapes
+ or a path and outputs vertexes, elements and draw commands.
+ The actual draw list API is not required to be used directly while using this
+ library since converting the default library draw command output is done by
+ just calling `nk_convert` but I decided to still make this library accessible
+ since it can be useful.
+
+ The draw list is based on a path buffering and polygon and polyline
+ rendering API which allows a lot of ways to draw 2D content to screen.
+ In fact it is probably more powerful than needed but allows even more crazy
+ things than this library provides by default.
+*/
+#ifdef NK_UINT_DRAW_INDEX
+typedef nk_uint nk_draw_index;
+#else
+typedef nk_ushort nk_draw_index;
+#endif
+enum nk_draw_list_stroke {
+ NK_STROKE_OPEN = nk_false,
+ /* build up path has no connection back to the beginning */
+ NK_STROKE_CLOSED = nk_true
+ /* build up path has a connection back to the beginning */
+};
+
+enum nk_draw_vertex_layout_attribute {
+ NK_VERTEX_POSITION,
+ NK_VERTEX_COLOR,
+ NK_VERTEX_TEXCOORD,
+ NK_VERTEX_ATTRIBUTE_COUNT
+};
+
+enum nk_draw_vertex_layout_format {
+ NK_FORMAT_SCHAR,
+ NK_FORMAT_SSHORT,
+ NK_FORMAT_SINT,
+ NK_FORMAT_UCHAR,
+ NK_FORMAT_USHORT,
+ NK_FORMAT_UINT,
+ NK_FORMAT_FLOAT,
+ NK_FORMAT_DOUBLE,
+
+NK_FORMAT_COLOR_BEGIN,
+ NK_FORMAT_R8G8B8 = NK_FORMAT_COLOR_BEGIN,
+ NK_FORMAT_R16G15B16,
+ NK_FORMAT_R32G32B32,
+
+ NK_FORMAT_R8G8B8A8,
+ NK_FORMAT_B8G8R8A8,
+ NK_FORMAT_R16G15B16A16,
+ NK_FORMAT_R32G32B32A32,
+ NK_FORMAT_R32G32B32A32_FLOAT,
+ NK_FORMAT_R32G32B32A32_DOUBLE,
+
+ NK_FORMAT_RGB32,
+ NK_FORMAT_RGBA32,
+NK_FORMAT_COLOR_END = NK_FORMAT_RGBA32,
+ NK_FORMAT_COUNT
+};
+
+#define NK_VERTEX_LAYOUT_END NK_VERTEX_ATTRIBUTE_COUNT,NK_FORMAT_COUNT,0
+struct nk_draw_vertex_layout_element {
+ enum nk_draw_vertex_layout_attribute attribute;
+ enum nk_draw_vertex_layout_format format;
+ nk_size offset;
+};
+
+struct nk_draw_command {
+ unsigned int elem_count;
+ /* number of elements in the current draw batch */
+ struct nk_rect clip_rect;
+ /* current screen clipping rectangle */
+ nk_handle texture;
+ /* current texture to set */
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_handle userdata;
+#endif
+};
+
+struct nk_draw_list {
+ struct nk_rect clip_rect;
+ struct nk_vec2 circle_vtx[12];
+ struct nk_convert_config config;
+
+ struct nk_buffer *buffer;
+ struct nk_buffer *vertices;
+ struct nk_buffer *elements;
+
+ unsigned int element_count;
+ unsigned int vertex_count;
+ unsigned int cmd_count;
+ nk_size cmd_offset;
+
+ unsigned int path_count;
+ unsigned int path_offset;
+
+ enum nk_anti_aliasing line_AA;
+ enum nk_anti_aliasing shape_AA;
+
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_handle userdata;
+#endif
+};
+
+/* draw list */
+NK_API void nk_draw_list_init(struct nk_draw_list*);
+NK_API void nk_draw_list_setup(struct nk_draw_list*, const struct nk_convert_config*, struct nk_buffer *cmds, struct nk_buffer *vertices, struct nk_buffer *elements, enum nk_anti_aliasing line_aa,enum nk_anti_aliasing shape_aa);
+
+/* drawing */
+#define nk_draw_list_foreach(cmd, can, b) for((cmd)=nk__draw_list_begin(can, b); (cmd)!=0; (cmd)=nk__draw_list_next(cmd, b, can))
+NK_API const struct nk_draw_command* nk__draw_list_begin(const struct nk_draw_list*, const struct nk_buffer*);
+NK_API const struct nk_draw_command* nk__draw_list_next(const struct nk_draw_command*, const struct nk_buffer*, const struct nk_draw_list*);
+NK_API const struct nk_draw_command* nk__draw_list_end(const struct nk_draw_list*, const struct nk_buffer*);
+
+/* path */
+NK_API void nk_draw_list_path_clear(struct nk_draw_list*);
+NK_API void nk_draw_list_path_line_to(struct nk_draw_list*, struct nk_vec2 pos);
+NK_API void nk_draw_list_path_arc_to_fast(struct nk_draw_list*, struct nk_vec2 center, float radius, int a_min, int a_max);
+NK_API void nk_draw_list_path_arc_to(struct nk_draw_list*, struct nk_vec2 center, float radius, float a_min, float a_max, unsigned int segments);
+NK_API void nk_draw_list_path_rect_to(struct nk_draw_list*, struct nk_vec2 a, struct nk_vec2 b, float rounding);
+NK_API void nk_draw_list_path_curve_to(struct nk_draw_list*, struct nk_vec2 p2, struct nk_vec2 p3, struct nk_vec2 p4, unsigned int num_segments);
+NK_API void nk_draw_list_path_fill(struct nk_draw_list*, struct nk_color);
+NK_API void nk_draw_list_path_stroke(struct nk_draw_list*, struct nk_color, enum nk_draw_list_stroke closed, float thickness);
+
+/* stroke */
+NK_API void nk_draw_list_stroke_line(struct nk_draw_list*, struct nk_vec2 a, struct nk_vec2 b, struct nk_color, float thickness);
+NK_API void nk_draw_list_stroke_rect(struct nk_draw_list*, struct nk_rect rect, struct nk_color, float rounding, float thickness);
+NK_API void nk_draw_list_stroke_triangle(struct nk_draw_list*, struct nk_vec2 a, struct nk_vec2 b, struct nk_vec2 c, struct nk_color, float thickness);
+NK_API void nk_draw_list_stroke_circle(struct nk_draw_list*, struct nk_vec2 center, float radius, struct nk_color, unsigned int segs, float thickness);
+NK_API void nk_draw_list_stroke_curve(struct nk_draw_list*, struct nk_vec2 p0, struct nk_vec2 cp0, struct nk_vec2 cp1, struct nk_vec2 p1, struct nk_color, unsigned int segments, float thickness);
+NK_API void nk_draw_list_stroke_poly_line(struct nk_draw_list*, const struct nk_vec2 *pnts, const unsigned int cnt, struct nk_color, enum nk_draw_list_stroke, float thickness, enum nk_anti_aliasing);
+
+/* fill */
+NK_API void nk_draw_list_fill_rect(struct nk_draw_list*, struct nk_rect rect, struct nk_color, float rounding);
+NK_API void nk_draw_list_fill_rect_multi_color(struct nk_draw_list*, struct nk_rect rect, struct nk_color left, struct nk_color top, struct nk_color right, struct nk_color bottom);
+NK_API void nk_draw_list_fill_triangle(struct nk_draw_list*, struct nk_vec2 a, struct nk_vec2 b, struct nk_vec2 c, struct nk_color);
+NK_API void nk_draw_list_fill_circle(struct nk_draw_list*, struct nk_vec2 center, float radius, struct nk_color col, unsigned int segs);
+NK_API void nk_draw_list_fill_poly_convex(struct nk_draw_list*, const struct nk_vec2 *points, const unsigned int count, struct nk_color, enum nk_anti_aliasing);
+
+/* misc */
+NK_API void nk_draw_list_add_image(struct nk_draw_list*, struct nk_image texture, struct nk_rect rect, struct nk_color);
+NK_API void nk_draw_list_add_text(struct nk_draw_list*, const struct nk_user_font*, struct nk_rect, const char *text, int len, float font_height, struct nk_color);
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+NK_API void nk_draw_list_push_userdata(struct nk_draw_list*, nk_handle userdata);
+#endif
+
+#endif
+
+/* ===============================================================
+ *
+ * GUI
+ *
+ * ===============================================================*/
+enum nk_style_item_type {
+ NK_STYLE_ITEM_COLOR,
+ NK_STYLE_ITEM_IMAGE
+};
+
+union nk_style_item_data {
+ struct nk_image image;
+ struct nk_color color;
+};
+
+struct nk_style_item {
+ enum nk_style_item_type type;
+ union nk_style_item_data data;
+};
+
+struct nk_style_text {
+ struct nk_color color;
+ struct nk_vec2 padding;
+};
+
+struct nk_style_button {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* text */
+ struct nk_color text_background;
+ struct nk_color text_normal;
+ struct nk_color text_hover;
+ struct nk_color text_active;
+ nk_flags text_alignment;
+
+ /* properties */
+ float border;
+ float rounding;
+ struct nk_vec2 padding;
+ struct nk_vec2 image_padding;
+ struct nk_vec2 touch_padding;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle userdata);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle userdata);
+};
+
+struct nk_style_toggle {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* cursor */
+ struct nk_style_item cursor_normal;
+ struct nk_style_item cursor_hover;
+
+ /* text */
+ struct nk_color text_normal;
+ struct nk_color text_hover;
+ struct nk_color text_active;
+ struct nk_color text_background;
+ nk_flags text_alignment;
+
+ /* properties */
+ struct nk_vec2 padding;
+ struct nk_vec2 touch_padding;
+ float spacing;
+ float border;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_selectable {
+ /* background (inactive) */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item pressed;
+
+ /* background (active) */
+ struct nk_style_item normal_active;
+ struct nk_style_item hover_active;
+ struct nk_style_item pressed_active;
+
+ /* text color (inactive) */
+ struct nk_color text_normal;
+ struct nk_color text_hover;
+ struct nk_color text_pressed;
+
+ /* text color (active) */
+ struct nk_color text_normal_active;
+ struct nk_color text_hover_active;
+ struct nk_color text_pressed_active;
+ struct nk_color text_background;
+ nk_flags text_alignment;
+
+ /* properties */
+ float rounding;
+ struct nk_vec2 padding;
+ struct nk_vec2 touch_padding;
+ struct nk_vec2 image_padding;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_slider {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* background bar */
+ struct nk_color bar_normal;
+ struct nk_color bar_hover;
+ struct nk_color bar_active;
+ struct nk_color bar_filled;
+
+ /* cursor */
+ struct nk_style_item cursor_normal;
+ struct nk_style_item cursor_hover;
+ struct nk_style_item cursor_active;
+
+ /* properties */
+ float border;
+ float rounding;
+ float bar_height;
+ struct nk_vec2 padding;
+ struct nk_vec2 spacing;
+ struct nk_vec2 cursor_size;
+
+ /* optional buttons */
+ int show_buttons;
+ struct nk_style_button inc_button;
+ struct nk_style_button dec_button;
+ enum nk_symbol_type inc_symbol;
+ enum nk_symbol_type dec_symbol;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_progress {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* cursor */
+ struct nk_style_item cursor_normal;
+ struct nk_style_item cursor_hover;
+ struct nk_style_item cursor_active;
+ struct nk_color cursor_border_color;
+
+ /* properties */
+ float rounding;
+ float border;
+ float cursor_border;
+ float cursor_rounding;
+ struct nk_vec2 padding;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_scrollbar {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* cursor */
+ struct nk_style_item cursor_normal;
+ struct nk_style_item cursor_hover;
+ struct nk_style_item cursor_active;
+ struct nk_color cursor_border_color;
+
+ /* properties */
+ float border;
+ float rounding;
+ float border_cursor;
+ float rounding_cursor;
+ struct nk_vec2 padding;
+
+ /* optional buttons */
+ int show_buttons;
+ struct nk_style_button inc_button;
+ struct nk_style_button dec_button;
+ enum nk_symbol_type inc_symbol;
+ enum nk_symbol_type dec_symbol;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_edit {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+ struct nk_style_scrollbar scrollbar;
+
+ /* cursor */
+ struct nk_color cursor_normal;
+ struct nk_color cursor_hover;
+ struct nk_color cursor_text_normal;
+ struct nk_color cursor_text_hover;
+
+ /* text (unselected) */
+ struct nk_color text_normal;
+ struct nk_color text_hover;
+ struct nk_color text_active;
+
+ /* text (selected) */
+ struct nk_color selected_normal;
+ struct nk_color selected_hover;
+ struct nk_color selected_text_normal;
+ struct nk_color selected_text_hover;
+
+ /* properties */
+ float border;
+ float rounding;
+ float cursor_size;
+ struct nk_vec2 scrollbar_size;
+ struct nk_vec2 padding;
+ float row_padding;
+};
+
+struct nk_style_property {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* text */
+ struct nk_color label_normal;
+ struct nk_color label_hover;
+ struct nk_color label_active;
+
+ /* symbols */
+ enum nk_symbol_type sym_left;
+ enum nk_symbol_type sym_right;
+
+ /* properties */
+ float border;
+ float rounding;
+ struct nk_vec2 padding;
+
+ struct nk_style_edit edit;
+ struct nk_style_button inc_button;
+ struct nk_style_button dec_button;
+
+ /* optional user callbacks */
+ nk_handle userdata;
+ void(*draw_begin)(struct nk_command_buffer*, nk_handle);
+ void(*draw_end)(struct nk_command_buffer*, nk_handle);
+};
+
+struct nk_style_chart {
+ /* colors */
+ struct nk_style_item background;
+ struct nk_color border_color;
+ struct nk_color selected_color;
+ struct nk_color color;
+
+ /* properties */
+ float border;
+ float rounding;
+ struct nk_vec2 padding;
+};
+
+struct nk_style_combo {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+ struct nk_color border_color;
+
+ /* label */
+ struct nk_color label_normal;
+ struct nk_color label_hover;
+ struct nk_color label_active;
+
+ /* symbol */
+ struct nk_color symbol_normal;
+ struct nk_color symbol_hover;
+ struct nk_color symbol_active;
+
+ /* button */
+ struct nk_style_button button;
+ enum nk_symbol_type sym_normal;
+ enum nk_symbol_type sym_hover;
+ enum nk_symbol_type sym_active;
+
+ /* properties */
+ float border;
+ float rounding;
+ struct nk_vec2 content_padding;
+ struct nk_vec2 button_padding;
+ struct nk_vec2 spacing;
+};
+
+struct nk_style_tab {
+ /* background */
+ struct nk_style_item background;
+ struct nk_color border_color;
+ struct nk_color text;
+
+ /* button */
+ struct nk_style_button tab_maximize_button;
+ struct nk_style_button tab_minimize_button;
+ struct nk_style_button node_maximize_button;
+ struct nk_style_button node_minimize_button;
+ enum nk_symbol_type sym_minimize;
+ enum nk_symbol_type sym_maximize;
+
+ /* properties */
+ float border;
+ float rounding;
+ float indent;
+ struct nk_vec2 padding;
+ struct nk_vec2 spacing;
+};
+
+enum nk_style_header_align {
+ NK_HEADER_LEFT,
+ NK_HEADER_RIGHT
+};
+struct nk_style_window_header {
+ /* background */
+ struct nk_style_item normal;
+ struct nk_style_item hover;
+ struct nk_style_item active;
+
+ /* button */
+ struct nk_style_button close_button;
+ struct nk_style_button minimize_button;
+ enum nk_symbol_type close_symbol;
+ enum nk_symbol_type minimize_symbol;
+ enum nk_symbol_type maximize_symbol;
+
+ /* title */
+ struct nk_color label_normal;
+ struct nk_color label_hover;
+ struct nk_color label_active;
+
+ /* properties */
+ enum nk_style_header_align align;
+ struct nk_vec2 padding;
+ struct nk_vec2 label_padding;
+ struct nk_vec2 spacing;
+};
+
+struct nk_style_window {
+ struct nk_style_window_header header;
+ struct nk_style_item fixed_background;
+ struct nk_color background;
+
+ struct nk_color border_color;
+ struct nk_color popup_border_color;
+ struct nk_color combo_border_color;
+ struct nk_color contextual_border_color;
+ struct nk_color menu_border_color;
+ struct nk_color group_border_color;
+ struct nk_color tooltip_border_color;
+ struct nk_style_item scaler;
+
+ float border;
+ float combo_border;
+ float contextual_border;
+ float menu_border;
+ float group_border;
+ float tooltip_border;
+ float popup_border;
+ float min_row_height_padding;
+
+ float rounding;
+ struct nk_vec2 spacing;
+ struct nk_vec2 scrollbar_size;
+ struct nk_vec2 min_size;
+
+ struct nk_vec2 padding;
+ struct nk_vec2 group_padding;
+ struct nk_vec2 popup_padding;
+ struct nk_vec2 combo_padding;
+ struct nk_vec2 contextual_padding;
+ struct nk_vec2 menu_padding;
+ struct nk_vec2 tooltip_padding;
+};
+
+struct nk_style {
+ const struct nk_user_font *font;
+ const struct nk_cursor *cursors[NK_CURSOR_COUNT];
+ const struct nk_cursor *cursor_active;
+ struct nk_cursor *cursor_last;
+ int cursor_visible;
+
+ struct nk_style_text text;
+ struct nk_style_button button;
+ struct nk_style_button contextual_button;
+ struct nk_style_button menu_button;
+ struct nk_style_toggle option;
+ struct nk_style_toggle checkbox;
+ struct nk_style_selectable selectable;
+ struct nk_style_slider slider;
+ struct nk_style_progress progress;
+ struct nk_style_property property;
+ struct nk_style_edit edit;
+ struct nk_style_chart chart;
+ struct nk_style_scrollbar scrollh;
+ struct nk_style_scrollbar scrollv;
+ struct nk_style_tab tab;
+ struct nk_style_combo combo;
+ struct nk_style_window window;
+};
+
+NK_API struct nk_style_item nk_style_item_image(struct nk_image img);
+NK_API struct nk_style_item nk_style_item_color(struct nk_color);
+NK_API struct nk_style_item nk_style_item_hide(void);
+
+/*==============================================================
+ * PANEL
+ * =============================================================*/
+#ifndef NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS
+#define NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS 16
+#endif
+#ifndef NK_CHART_MAX_SLOT
+#define NK_CHART_MAX_SLOT 4
+#endif
+
+enum nk_panel_type {
+ NK_PANEL_NONE = 0,
+ NK_PANEL_WINDOW = NK_FLAG(0),
+ NK_PANEL_GROUP = NK_FLAG(1),
+ NK_PANEL_POPUP = NK_FLAG(2),
+ NK_PANEL_CONTEXTUAL = NK_FLAG(4),
+ NK_PANEL_COMBO = NK_FLAG(5),
+ NK_PANEL_MENU = NK_FLAG(6),
+ NK_PANEL_TOOLTIP = NK_FLAG(7)
+};
+enum nk_panel_set {
+ NK_PANEL_SET_NONBLOCK = NK_PANEL_CONTEXTUAL|NK_PANEL_COMBO|NK_PANEL_MENU|NK_PANEL_TOOLTIP,
+ NK_PANEL_SET_POPUP = NK_PANEL_SET_NONBLOCK|NK_PANEL_POPUP,
+ NK_PANEL_SET_SUB = NK_PANEL_SET_POPUP|NK_PANEL_GROUP
+};
+
+struct nk_chart_slot {
+ enum nk_chart_type type;
+ struct nk_color color;
+ struct nk_color highlight;
+ float min, max, range;
+ int count;
+ struct nk_vec2 last;
+ int index;
+};
+
+struct nk_chart {
+ int slot;
+ float x, y, w, h;
+ struct nk_chart_slot slots[NK_CHART_MAX_SLOT];
+};
+
+enum nk_panel_row_layout_type {
+ NK_LAYOUT_DYNAMIC_FIXED = 0,
+ NK_LAYOUT_DYNAMIC_ROW,
+ NK_LAYOUT_DYNAMIC_FREE,
+ NK_LAYOUT_DYNAMIC,
+ NK_LAYOUT_STATIC_FIXED,
+ NK_LAYOUT_STATIC_ROW,
+ NK_LAYOUT_STATIC_FREE,
+ NK_LAYOUT_STATIC,
+ NK_LAYOUT_TEMPLATE,
+ NK_LAYOUT_COUNT
+};
+struct nk_row_layout {
+ enum nk_panel_row_layout_type type;
+ int index;
+ float height;
+ float min_height;
+ int columns;
+ const float *ratio;
+ float item_width;
+ float item_height;
+ float item_offset;
+ float filled;
+ struct nk_rect item;
+ int tree_depth;
+ float templates[NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS];
+};
+
+struct nk_popup_buffer {
+ nk_size begin;
+ nk_size parent;
+ nk_size last;
+ nk_size end;
+ int active;
+};
+
+struct nk_menu_state {
+ float x, y, w, h;
+ struct nk_scroll offset;
+};
+
+struct nk_panel {
+ enum nk_panel_type type;
+ nk_flags flags;
+ struct nk_rect bounds;
+ nk_uint *offset_x;
+ nk_uint *offset_y;
+ float at_x, at_y, max_x;
+ float footer_height;
+ float header_height;
+ float border;
+ unsigned int has_scrolling;
+ struct nk_rect clip;
+ struct nk_menu_state menu;
+ struct nk_row_layout row;
+ struct nk_chart chart;
+ struct nk_command_buffer *buffer;
+ struct nk_panel *parent;
+};
+
+/*==============================================================
+ * WINDOW
+ * =============================================================*/
+#ifndef NK_WINDOW_MAX_NAME
+#define NK_WINDOW_MAX_NAME 64
+#endif
+
+struct nk_table;
+enum nk_window_flags {
+ NK_WINDOW_PRIVATE = NK_FLAG(11),
+ NK_WINDOW_DYNAMIC = NK_WINDOW_PRIVATE,
+ /* special window type growing up in height while being filled to a certain maximum height */
+ NK_WINDOW_ROM = NK_FLAG(12),
+ /* sets window widgets into a read only mode and does not allow input changes */
+ NK_WINDOW_NOT_INTERACTIVE = NK_WINDOW_ROM|NK_WINDOW_NO_INPUT,
+ /* prevents all interaction caused by input to either window or widgets inside */
+ NK_WINDOW_HIDDEN = NK_FLAG(13),
+ /* Hides window and stops any window interaction and drawing */
+ NK_WINDOW_CLOSED = NK_FLAG(14),
+ /* Directly closes and frees the window at the end of the frame */
+ NK_WINDOW_MINIMIZED = NK_FLAG(15),
+ /* marks the window as minimized */
+ NK_WINDOW_REMOVE_ROM = NK_FLAG(16)
+ /* Removes read only mode at the end of the window */
+};
+
+struct nk_popup_state {
+ struct nk_window *win;
+ enum nk_panel_type type;
+ struct nk_popup_buffer buf;
+ nk_hash name;
+ int active;
+ unsigned combo_count;
+ unsigned con_count, con_old;
+ unsigned active_con;
+ struct nk_rect header;
+};
+
+struct nk_edit_state {
+ nk_hash name;
+ unsigned int seq;
+ unsigned int old;
+ int active, prev;
+ int cursor;
+ int sel_start;
+ int sel_end;
+ struct nk_scroll scrollbar;
+ unsigned char mode;
+ unsigned char single_line;
+};
+
+struct nk_property_state {
+ int active, prev;
+ char buffer[NK_MAX_NUMBER_BUFFER];
+ int length;
+ int cursor;
+ int select_start;
+ int select_end;
+ nk_hash name;
+ unsigned int seq;
+ unsigned int old;
+ int state;
+};
+
+struct nk_window {
+ unsigned int seq;
+ nk_hash name;
+ char name_string[NK_WINDOW_MAX_NAME];
+ nk_flags flags;
+
+ struct nk_rect bounds;
+ struct nk_scroll scrollbar;
+ struct nk_command_buffer buffer;
+ struct nk_panel *layout;
+ float scrollbar_hiding_timer;
+
+ /* persistent widget state */
+ struct nk_property_state property;
+ struct nk_popup_state popup;
+ struct nk_edit_state edit;
+ unsigned int scrolled;
+
+ struct nk_table *tables;
+ unsigned int table_count;
+
+ /* window list hooks */
+ struct nk_window *next;
+ struct nk_window *prev;
+ struct nk_window *parent;
+};
+
+/*==============================================================
+ * STACK
+ * =============================================================*/
+/* The style modifier stack can be used to temporarily change a
+ * property inside `nk_style`. For example if you want a special
+ * red button you can temporarily push the old button color onto a stack
+ * draw the button with a red color and then you just pop the old color
+ * back from the stack:
+ *
+ * nk_style_push_style_item(ctx, &ctx->style.button.normal, nk_style_item_color(nk_rgb(255,0,0)));
+ * nk_style_push_style_item(ctx, &ctx->style.button.hover, nk_style_item_color(nk_rgb(255,0,0)));
+ * nk_style_push_style_item(ctx, &ctx->style.button.active, nk_style_item_color(nk_rgb(255,0,0)));
+ * nk_style_push_vec2(ctx, &cx->style.button.padding, nk_vec2(2,2));
+ *
+ * nk_button(...);
+ *
+ * nk_style_pop_style_item(ctx);
+ * nk_style_pop_style_item(ctx);
+ * nk_style_pop_style_item(ctx);
+ * nk_style_pop_vec2(ctx);
+ *
+ * Nuklear has a stack for style_items, float properties, vector properties,
+ * flags, colors, fonts and for button_behavior. Each has it's own fixed size stack
+ * which can be changed at compile time.
+ */
+#ifndef NK_BUTTON_BEHAVIOR_STACK_SIZE
+#define NK_BUTTON_BEHAVIOR_STACK_SIZE 8
+#endif
+
+#ifndef NK_FONT_STACK_SIZE
+#define NK_FONT_STACK_SIZE 8
+#endif
+
+#ifndef NK_STYLE_ITEM_STACK_SIZE
+#define NK_STYLE_ITEM_STACK_SIZE 16
+#endif
+
+#ifndef NK_FLOAT_STACK_SIZE
+#define NK_FLOAT_STACK_SIZE 32
+#endif
+
+#ifndef NK_VECTOR_STACK_SIZE
+#define NK_VECTOR_STACK_SIZE 16
+#endif
+
+#ifndef NK_FLAGS_STACK_SIZE
+#define NK_FLAGS_STACK_SIZE 32
+#endif
+
+#ifndef NK_COLOR_STACK_SIZE
+#define NK_COLOR_STACK_SIZE 32
+#endif
+
+#define NK_CONFIGURATION_STACK_TYPE(prefix, name, type)\
+ struct nk_config_stack_##name##_element {\
+ prefix##_##type *address;\
+ prefix##_##type old_value;\
+ }
+#define NK_CONFIG_STACK(type,size)\
+ struct nk_config_stack_##type {\
+ int head;\
+ struct nk_config_stack_##type##_element elements[size];\
+ }
+
+#define nk_float float
+NK_CONFIGURATION_STACK_TYPE(struct nk, style_item, style_item);
+NK_CONFIGURATION_STACK_TYPE(nk ,float, float);
+NK_CONFIGURATION_STACK_TYPE(struct nk, vec2, vec2);
+NK_CONFIGURATION_STACK_TYPE(nk ,flags, flags);
+NK_CONFIGURATION_STACK_TYPE(struct nk, color, color);
+NK_CONFIGURATION_STACK_TYPE(const struct nk, user_font, user_font*);
+NK_CONFIGURATION_STACK_TYPE(enum nk, button_behavior, button_behavior);
+
+NK_CONFIG_STACK(style_item, NK_STYLE_ITEM_STACK_SIZE);
+NK_CONFIG_STACK(float, NK_FLOAT_STACK_SIZE);
+NK_CONFIG_STACK(vec2, NK_VECTOR_STACK_SIZE);
+NK_CONFIG_STACK(flags, NK_FLAGS_STACK_SIZE);
+NK_CONFIG_STACK(color, NK_COLOR_STACK_SIZE);
+NK_CONFIG_STACK(user_font, NK_FONT_STACK_SIZE);
+NK_CONFIG_STACK(button_behavior, NK_BUTTON_BEHAVIOR_STACK_SIZE);
+
+struct nk_configuration_stacks {
+ struct nk_config_stack_style_item style_items;
+ struct nk_config_stack_float floats;
+ struct nk_config_stack_vec2 vectors;
+ struct nk_config_stack_flags flags;
+ struct nk_config_stack_color colors;
+ struct nk_config_stack_user_font fonts;
+ struct nk_config_stack_button_behavior button_behaviors;
+};
+
+/*==============================================================
+ * CONTEXT
+ * =============================================================*/
+#define NK_VALUE_PAGE_CAPACITY \
+ (((NK_MAX(sizeof(struct nk_window),sizeof(struct nk_panel)) / sizeof(nk_uint))) / 2)
+
+struct nk_table {
+ unsigned int seq;
+ unsigned int size;
+ nk_hash keys[NK_VALUE_PAGE_CAPACITY];
+ nk_uint values[NK_VALUE_PAGE_CAPACITY];
+ struct nk_table *next, *prev;
+};
+
+union nk_page_data {
+ struct nk_table tbl;
+ struct nk_panel pan;
+ struct nk_window win;
+};
+
+struct nk_page_element {
+ union nk_page_data data;
+ struct nk_page_element *next;
+ struct nk_page_element *prev;
+};
+
+struct nk_page {
+ unsigned int size;
+ struct nk_page *next;
+ struct nk_page_element win[1];
+};
+
+struct nk_pool {
+ struct nk_allocator alloc;
+ enum nk_allocation_type type;
+ unsigned int page_count;
+ struct nk_page *pages;
+ struct nk_page_element *freelist;
+ unsigned capacity;
+ nk_size size;
+ nk_size cap;
+};
+
+struct nk_context {
+/* public: can be accessed freely */
+ struct nk_input input;
+ struct nk_style style;
+ struct nk_buffer memory;
+ struct nk_clipboard clip;
+ nk_flags last_widget_state;
+ enum nk_button_behavior button_behavior;
+ struct nk_configuration_stacks stacks;
+ float delta_time_seconds;
+
+/* private:
+ should only be accessed if you
+ know what you are doing */
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+ struct nk_draw_list draw_list;
+#endif
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_handle userdata;
+#endif
+ /* text editor objects are quite big because of an internal
+ * undo/redo stack. Therefore it does not make sense to have one for
+ * each window for temporary use cases, so I only provide *one* instance
+ * for all windows. This works because the content is cleared anyway */
+ struct nk_text_edit text_edit;
+ /* draw buffer used for overlay drawing operation like cursor */
+ struct nk_command_buffer overlay;
+
+ /* windows */
+ int build;
+ int use_pool;
+ struct nk_pool pool;
+ struct nk_window *begin;
+ struct nk_window *end;
+ struct nk_window *active;
+ struct nk_window *current;
+ struct nk_page_element *freelist;
+ unsigned int count;
+ unsigned int seq;
+};
+
+/* ==============================================================
+ * MATH
+ * =============================================================== */
+#define NK_PI 3.141592654f
+#define NK_UTF_INVALID 0xFFFD
+#define NK_MAX_FLOAT_PRECISION 2
+
+#define NK_UNUSED(x) ((void)(x))
+#define NK_SATURATE(x) (NK_MAX(0, NK_MIN(1.0f, x)))
+#define NK_LEN(a) (sizeof(a)/sizeof(a)[0])
+#define NK_ABS(a) (((a) < 0) ? -(a) : (a))
+#define NK_BETWEEN(x, a, b) ((a) <= (x) && (x) < (b))
+#define NK_INBOX(px, py, x, y, w, h)\
+ (NK_BETWEEN(px,x,x+w) && NK_BETWEEN(py,y,y+h))
+#define NK_INTERSECT(x0, y0, w0, h0, x1, y1, w1, h1) \
+ (!(((x1 > (x0 + w0)) || ((x1 + w1) < x0) || (y1 > (y0 + h0)) || (y1 + h1) < y0)))
+#define NK_CONTAINS(x, y, w, h, bx, by, bw, bh)\
+ (NK_INBOX(x,y, bx, by, bw, bh) && NK_INBOX(x+w,y+h, bx, by, bw, bh))
+
+#define nk_vec2_sub(a, b) nk_vec2((a).x - (b).x, (a).y - (b).y)
+#define nk_vec2_add(a, b) nk_vec2((a).x + (b).x, (a).y + (b).y)
+#define nk_vec2_len_sqr(a) ((a).x*(a).x+(a).y*(a).y)
+#define nk_vec2_muls(a, t) nk_vec2((a).x * (t), (a).y * (t))
+
+#define nk_ptr_add(t, p, i) ((t*)((void*)((nk_byte*)(p) + (i))))
+#define nk_ptr_add_const(t, p, i) ((const t*)((const void*)((const nk_byte*)(p) + (i))))
+#define nk_zero_struct(s) nk_zero(&s, sizeof(s))
+
+/* ==============================================================
+ * ALIGNMENT
+ * =============================================================== */
+/* Pointer to Integer type conversion for pointer alignment */
+#if defined(__PTRDIFF_TYPE__) /* This case should work for GCC*/
+# define NK_UINT_TO_PTR(x) ((void*)(__PTRDIFF_TYPE__)(x))
+# define NK_PTR_TO_UINT(x) ((nk_size)(__PTRDIFF_TYPE__)(x))
+#elif !defined(__GNUC__) /* works for compilers other than LLVM */
+# define NK_UINT_TO_PTR(x) ((void*)&((char*)0)[x])
+# define NK_PTR_TO_UINT(x) ((nk_size)(((char*)x)-(char*)0))
+#elif defined(NK_USE_FIXED_TYPES) /* used if we have <stdint.h> */
+# define NK_UINT_TO_PTR(x) ((void*)(uintptr_t)(x))
+# define NK_PTR_TO_UINT(x) ((uintptr_t)(x))
+#else /* generates warning but works */
+# define NK_UINT_TO_PTR(x) ((void*)(x))
+# define NK_PTR_TO_UINT(x) ((nk_size)(x))
+#endif
+
+#define NK_ALIGN_PTR(x, mask)\
+ (NK_UINT_TO_PTR((NK_PTR_TO_UINT((nk_byte*)(x) + (mask-1)) & ~(mask-1))))
+#define NK_ALIGN_PTR_BACK(x, mask)\
+ (NK_UINT_TO_PTR((NK_PTR_TO_UINT((nk_byte*)(x)) & ~(mask-1))))
+
+#define NK_OFFSETOF(st,m) ((nk_ptr)&(((st*)0)->m))
+#define NK_CONTAINER_OF(ptr,type,member)\
+ (type*)((void*)((char*)(1 ? (ptr): &((type*)0)->member) - NK_OFFSETOF(type, member)))
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __cplusplus
+template<typename T> struct nk_alignof;
+template<typename T, int size_diff> struct nk_helper{enum {value = size_diff};};
+template<typename T> struct nk_helper<T,0>{enum {value = nk_alignof<T>::value};};
+template<typename T> struct nk_alignof{struct Big {T x; char c;}; enum {
+ diff = sizeof(Big) - sizeof(T), value = nk_helper<Big, diff>::value};};
+#define NK_ALIGNOF(t) (nk_alignof<t>::value)
+#elif defined(_MSC_VER)
+#define NK_ALIGNOF(t) (__alignof(t))
+#else
+#define NK_ALIGNOF(t) ((char*)(&((struct {char c; t _h;}*)0)->_h) - (char*)0)
+#endif
+
+#endif /* NK_NUKLEAR_H_ */
+
+#ifdef NK_IMPLEMENTATION
+
+#ifndef NK_INTERNAL_H
+#define NK_INTERNAL_H
+
+#ifndef NK_POOL_DEFAULT_CAPACITY
+#define NK_POOL_DEFAULT_CAPACITY 16
+#endif
+
+#ifndef NK_DEFAULT_COMMAND_BUFFER_SIZE
+#define NK_DEFAULT_COMMAND_BUFFER_SIZE (4*1024)
+#endif
+
+#ifndef NK_BUFFER_DEFAULT_INITIAL_SIZE
+#define NK_BUFFER_DEFAULT_INITIAL_SIZE (4*1024)
+#endif
+
+/* standard library headers */
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+#include <stdlib.h> /* malloc, free */
+#endif
+#ifdef NK_INCLUDE_STANDARD_IO
+#include <stdio.h> /* fopen, fclose,... */
+#endif
+#ifndef NK_ASSERT
+#include <assert.h>
+#define NK_ASSERT(expr) assert(expr)
+#endif
+
+#ifndef NK_MEMSET
+#define NK_MEMSET nk_memset
+#endif
+#ifndef NK_MEMCPY
+#define NK_MEMCPY nk_memcopy
+#endif
+#ifndef NK_SQRT
+#define NK_SQRT nk_sqrt
+#endif
+#ifndef NK_SIN
+#define NK_SIN nk_sin
+#endif
+#ifndef NK_COS
+#define NK_COS nk_cos
+#endif
+#ifndef NK_STRTOD
+#define NK_STRTOD nk_strtod
+#endif
+#ifndef NK_DTOA
+#define NK_DTOA nk_dtoa
+#endif
+
+#define NK_DEFAULT (-1)
+
+#ifndef NK_VSNPRINTF
+/* If your compiler does support `vsnprintf` I would highly recommend
+ * defining this to vsnprintf instead since `vsprintf` is basically
+ * unbelievable unsafe and should *NEVER* be used. But I have to support
+ * it since C89 only provides this unsafe version. */
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) ||\
+ (defined(__cplusplus) && (__cplusplus >= 201103L)) || \
+ (defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE >= 200112L)) ||\
+ (defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE >= 500)) ||\
+ defined(_ISOC99_SOURCE) || defined(_BSD_SOURCE)
+ #define NK_VSNPRINTF(s,n,f,a) vsnprintf(s,n,f,a)
+ #else
+ #define NK_VSNPRINTF(s,n,f,a) vsprintf(s,f,a)
+ #endif
+#endif
+
+#define NK_SCHAR_MIN (-127)
+#define NK_SCHAR_MAX 127
+#define NK_UCHAR_MIN 0
+#define NK_UCHAR_MAX 256
+#define NK_SSHORT_MIN (-32767)
+#define NK_SSHORT_MAX 32767
+#define NK_USHORT_MIN 0
+#define NK_USHORT_MAX 65535
+#define NK_SINT_MIN (-2147483647)
+#define NK_SINT_MAX 2147483647
+#define NK_UINT_MIN 0
+#define NK_UINT_MAX 4294967295u
+
+/* Make sure correct type size:
+ * This will fire with a negative subscript error if the type sizes
+ * are set incorrectly by the compiler, and compile out if not */
+NK_STATIC_ASSERT(sizeof(nk_size) >= sizeof(void*));
+NK_STATIC_ASSERT(sizeof(nk_ptr) == sizeof(void*));
+NK_STATIC_ASSERT(sizeof(nk_flags) >= 4);
+NK_STATIC_ASSERT(sizeof(nk_rune) >= 4);
+NK_STATIC_ASSERT(sizeof(nk_ushort) == 2);
+NK_STATIC_ASSERT(sizeof(nk_short) == 2);
+NK_STATIC_ASSERT(sizeof(nk_uint) == 4);
+NK_STATIC_ASSERT(sizeof(nk_int) == 4);
+NK_STATIC_ASSERT(sizeof(nk_byte) == 1);
+
+NK_GLOBAL const struct nk_rect nk_null_rect = {-8192.0f, -8192.0f, 16384, 16384};
+#define NK_FLOAT_PRECISION 0.00000000000001
+
+NK_GLOBAL const struct nk_color nk_red = {255,0,0,255};
+NK_GLOBAL const struct nk_color nk_green = {0,255,0,255};
+NK_GLOBAL const struct nk_color nk_blue = {0,0,255,255};
+NK_GLOBAL const struct nk_color nk_white = {255,255,255,255};
+NK_GLOBAL const struct nk_color nk_black = {0,0,0,255};
+NK_GLOBAL const struct nk_color nk_yellow = {255,255,0,255};
+
+/* widget */
+#define nk_widget_state_reset(s)\
+ if ((*(s)) & NK_WIDGET_STATE_MODIFIED)\
+ (*(s)) = NK_WIDGET_STATE_INACTIVE|NK_WIDGET_STATE_MODIFIED;\
+ else (*(s)) = NK_WIDGET_STATE_INACTIVE;
+
+/* math */
+NK_LIB float nk_inv_sqrt(float n);
+NK_LIB float nk_sqrt(float x);
+NK_LIB float nk_sin(float x);
+NK_LIB float nk_cos(float x);
+NK_LIB nk_uint nk_round_up_pow2(nk_uint v);
+NK_LIB struct nk_rect nk_shrink_rect(struct nk_rect r, float amount);
+NK_LIB struct nk_rect nk_pad_rect(struct nk_rect r, struct nk_vec2 pad);
+NK_LIB void nk_unify(struct nk_rect *clip, const struct nk_rect *a, float x0, float y0, float x1, float y1);
+NK_LIB double nk_pow(double x, int n);
+NK_LIB int nk_ifloord(double x);
+NK_LIB int nk_ifloorf(float x);
+NK_LIB int nk_iceilf(float x);
+NK_LIB int nk_log10(double n);
+
+/* util */
+enum {NK_DO_NOT_STOP_ON_NEW_LINE, NK_STOP_ON_NEW_LINE};
+NK_LIB int nk_is_lower(int c);
+NK_LIB int nk_is_upper(int c);
+NK_LIB int nk_to_upper(int c);
+NK_LIB int nk_to_lower(int c);
+NK_LIB void* nk_memcopy(void *dst, const void *src, nk_size n);
+NK_LIB void nk_memset(void *ptr, int c0, nk_size size);
+NK_LIB void nk_zero(void *ptr, nk_size size);
+NK_LIB char *nk_itoa(char *s, long n);
+NK_LIB int nk_string_float_limit(char *string, int prec);
+NK_LIB char *nk_dtoa(char *s, double n);
+NK_LIB int nk_text_clamp(const struct nk_user_font *font, const char *text, int text_len, float space, int *glyphs, float *text_width, nk_rune *sep_list, int sep_count);
+NK_LIB struct nk_vec2 nk_text_calculate_text_bounds(const struct nk_user_font *font, const char *begin, int byte_len, float row_height, const char **remaining, struct nk_vec2 *out_offset, int *glyphs, int op);
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+NK_LIB int nk_strfmt(char *buf, int buf_size, const char *fmt, va_list args);
+#endif
+#ifdef NK_INCLUDE_STANDARD_IO
+NK_LIB char *nk_file_load(const char* path, nk_size* siz, struct nk_allocator *alloc);
+#endif
+
+/* buffer */
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_LIB void* nk_malloc(nk_handle unused, void *old,nk_size size);
+NK_LIB void nk_mfree(nk_handle unused, void *ptr);
+#endif
+NK_LIB void* nk_buffer_align(void *unaligned, nk_size align, nk_size *alignment, enum nk_buffer_allocation_type type);
+NK_LIB void* nk_buffer_alloc(struct nk_buffer *b, enum nk_buffer_allocation_type type, nk_size size, nk_size align);
+NK_LIB void* nk_buffer_realloc(struct nk_buffer *b, nk_size capacity, nk_size *size);
+
+/* draw */
+NK_LIB void nk_command_buffer_init(struct nk_command_buffer *cb, struct nk_buffer *b, enum nk_command_clipping clip);
+NK_LIB void nk_command_buffer_reset(struct nk_command_buffer *b);
+NK_LIB void* nk_command_buffer_push(struct nk_command_buffer* b, enum nk_command_type t, nk_size size);
+NK_LIB void nk_draw_symbol(struct nk_command_buffer *out, enum nk_symbol_type type, struct nk_rect content, struct nk_color background, struct nk_color foreground, float border_width, const struct nk_user_font *font);
+
+/* buffering */
+NK_LIB void nk_start_buffer(struct nk_context *ctx, struct nk_command_buffer *b);
+NK_LIB void nk_start(struct nk_context *ctx, struct nk_window *win);
+NK_LIB void nk_start_popup(struct nk_context *ctx, struct nk_window *win);
+NK_LIB void nk_finish_popup(struct nk_context *ctx, struct nk_window*);
+NK_LIB void nk_finish_buffer(struct nk_context *ctx, struct nk_command_buffer *b);
+NK_LIB void nk_finish(struct nk_context *ctx, struct nk_window *w);
+NK_LIB void nk_build(struct nk_context *ctx);
+
+/* text editor */
+NK_LIB void nk_textedit_clear_state(struct nk_text_edit *state, enum nk_text_edit_type type, nk_plugin_filter filter);
+NK_LIB void nk_textedit_click(struct nk_text_edit *state, float x, float y, const struct nk_user_font *font, float row_height);
+NK_LIB void nk_textedit_drag(struct nk_text_edit *state, float x, float y, const struct nk_user_font *font, float row_height);
+NK_LIB void nk_textedit_key(struct nk_text_edit *state, enum nk_keys key, int shift_mod, const struct nk_user_font *font, float row_height);
+
+/* window */
+enum nk_window_insert_location {
+ NK_INSERT_BACK, /* inserts window into the back of list (front of screen) */
+ NK_INSERT_FRONT /* inserts window into the front of list (back of screen) */
+};
+NK_LIB void *nk_create_window(struct nk_context *ctx);
+NK_LIB void nk_remove_window(struct nk_context*, struct nk_window*);
+NK_LIB void nk_free_window(struct nk_context *ctx, struct nk_window *win);
+NK_LIB struct nk_window *nk_find_window(struct nk_context *ctx, nk_hash hash, const char *name);
+NK_LIB void nk_insert_window(struct nk_context *ctx, struct nk_window *win, enum nk_window_insert_location loc);
+
+/* pool */
+NK_LIB void nk_pool_init(struct nk_pool *pool, struct nk_allocator *alloc, unsigned int capacity);
+NK_LIB void nk_pool_free(struct nk_pool *pool);
+NK_LIB void nk_pool_init_fixed(struct nk_pool *pool, void *memory, nk_size size);
+NK_LIB struct nk_page_element *nk_pool_alloc(struct nk_pool *pool);
+
+/* page-element */
+NK_LIB struct nk_page_element* nk_create_page_element(struct nk_context *ctx);
+NK_LIB void nk_link_page_element_into_freelist(struct nk_context *ctx, struct nk_page_element *elem);
+NK_LIB void nk_free_page_element(struct nk_context *ctx, struct nk_page_element *elem);
+
+/* table */
+NK_LIB struct nk_table* nk_create_table(struct nk_context *ctx);
+NK_LIB void nk_remove_table(struct nk_window *win, struct nk_table *tbl);
+NK_LIB void nk_free_table(struct nk_context *ctx, struct nk_table *tbl);
+NK_LIB void nk_push_table(struct nk_window *win, struct nk_table *tbl);
+NK_LIB nk_uint *nk_add_value(struct nk_context *ctx, struct nk_window *win, nk_hash name, nk_uint value);
+NK_LIB nk_uint *nk_find_value(struct nk_window *win, nk_hash name);
+
+/* panel */
+NK_LIB void *nk_create_panel(struct nk_context *ctx);
+NK_LIB void nk_free_panel(struct nk_context*, struct nk_panel *pan);
+NK_LIB int nk_panel_has_header(nk_flags flags, const char *title);
+NK_LIB struct nk_vec2 nk_panel_get_padding(const struct nk_style *style, enum nk_panel_type type);
+NK_LIB float nk_panel_get_border(const struct nk_style *style, nk_flags flags, enum nk_panel_type type);
+NK_LIB struct nk_color nk_panel_get_border_color(const struct nk_style *style, enum nk_panel_type type);
+NK_LIB int nk_panel_is_sub(enum nk_panel_type type);
+NK_LIB int nk_panel_is_nonblock(enum nk_panel_type type);
+NK_LIB int nk_panel_begin(struct nk_context *ctx, const char *title, enum nk_panel_type panel_type);
+NK_LIB void nk_panel_end(struct nk_context *ctx);
+
+/* layout */
+NK_LIB float nk_layout_row_calculate_usable_space(const struct nk_style *style, enum nk_panel_type type, float total_space, int columns);
+NK_LIB void nk_panel_layout(const struct nk_context *ctx, struct nk_window *win, float height, int cols);
+NK_LIB void nk_row_layout(struct nk_context *ctx, enum nk_layout_format fmt, float height, int cols, int width);
+NK_LIB void nk_panel_alloc_row(const struct nk_context *ctx, struct nk_window *win);
+NK_LIB void nk_layout_widget_space(struct nk_rect *bounds, const struct nk_context *ctx, struct nk_window *win, int modify);
+NK_LIB void nk_panel_alloc_space(struct nk_rect *bounds, const struct nk_context *ctx);
+NK_LIB void nk_layout_peek(struct nk_rect *bounds, struct nk_context *ctx);
+
+/* popup */
+NK_LIB int nk_nonblock_begin(struct nk_context *ctx, nk_flags flags, struct nk_rect body, struct nk_rect header, enum nk_panel_type panel_type);
+
+/* text */
+struct nk_text {
+ struct nk_vec2 padding;
+ struct nk_color background;
+ struct nk_color text;
+};
+NK_LIB void nk_widget_text(struct nk_command_buffer *o, struct nk_rect b, const char *string, int len, const struct nk_text *t, nk_flags a, const struct nk_user_font *f);
+NK_LIB void nk_widget_text_wrap(struct nk_command_buffer *o, struct nk_rect b, const char *string, int len, const struct nk_text *t, const struct nk_user_font *f);
+
+/* button */
+NK_LIB int nk_button_behavior(nk_flags *state, struct nk_rect r, const struct nk_input *i, enum nk_button_behavior behavior);
+NK_LIB const struct nk_style_item* nk_draw_button(struct nk_command_buffer *out, const struct nk_rect *bounds, nk_flags state, const struct nk_style_button *style);
+NK_LIB int nk_do_button(nk_flags *state, struct nk_command_buffer *out, struct nk_rect r, const struct nk_style_button *style, const struct nk_input *in, enum nk_button_behavior behavior, struct nk_rect *content);
+NK_LIB void nk_draw_button_text(struct nk_command_buffer *out, const struct nk_rect *bounds, const struct nk_rect *content, nk_flags state, const struct nk_style_button *style, const char *txt, int len, nk_flags text_alignment, const struct nk_user_font *font);
+NK_LIB int nk_do_button_text(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, const char *string, int len, nk_flags align, enum nk_button_behavior behavior, const struct nk_style_button *style, const struct nk_input *in, const struct nk_user_font *font);
+NK_LIB void nk_draw_button_symbol(struct nk_command_buffer *out, const struct nk_rect *bounds, const struct nk_rect *content, nk_flags state, const struct nk_style_button *style, enum nk_symbol_type type, const struct nk_user_font *font);
+NK_LIB int nk_do_button_symbol(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, enum nk_symbol_type symbol, enum nk_button_behavior behavior, const struct nk_style_button *style, const struct nk_input *in, const struct nk_user_font *font);
+NK_LIB void nk_draw_button_image(struct nk_command_buffer *out, const struct nk_rect *bounds, const struct nk_rect *content, nk_flags state, const struct nk_style_button *style, const struct nk_image *img);
+NK_LIB int nk_do_button_image(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, struct nk_image img, enum nk_button_behavior b, const struct nk_style_button *style, const struct nk_input *in);
+NK_LIB void nk_draw_button_text_symbol(struct nk_command_buffer *out, const struct nk_rect *bounds, const struct nk_rect *label, const struct nk_rect *symbol, nk_flags state, const struct nk_style_button *style, const char *str, int len, enum nk_symbol_type type, const struct nk_user_font *font);
+NK_LIB int nk_do_button_text_symbol(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, enum nk_symbol_type symbol, const char *str, int len, nk_flags align, enum nk_button_behavior behavior, const struct nk_style_button *style, const struct nk_user_font *font, const struct nk_input *in);
+NK_LIB void nk_draw_button_text_image(struct nk_command_buffer *out, const struct nk_rect *bounds, const struct nk_rect *label, const struct nk_rect *image, nk_flags state, const struct nk_style_button *style, const char *str, int len, const struct nk_user_font *font, const struct nk_image *img);
+NK_LIB int nk_do_button_text_image(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, struct nk_image img, const char* str, int len, nk_flags align, enum nk_button_behavior behavior, const struct nk_style_button *style, const struct nk_user_font *font, const struct nk_input *in);
+
+/* toggle */
+enum nk_toggle_type {
+ NK_TOGGLE_CHECK,
+ NK_TOGGLE_OPTION
+};
+NK_LIB int nk_toggle_behavior(const struct nk_input *in, struct nk_rect select, nk_flags *state, int active);
+NK_LIB void nk_draw_checkbox(struct nk_command_buffer *out, nk_flags state, const struct nk_style_toggle *style, int active, const struct nk_rect *label, const struct nk_rect *selector, const struct nk_rect *cursors, const char *string, int len, const struct nk_user_font *font);
+NK_LIB void nk_draw_option(struct nk_command_buffer *out, nk_flags state, const struct nk_style_toggle *style, int active, const struct nk_rect *label, const struct nk_rect *selector, const struct nk_rect *cursors, const char *string, int len, const struct nk_user_font *font);
+NK_LIB int nk_do_toggle(nk_flags *state, struct nk_command_buffer *out, struct nk_rect r, int *active, const char *str, int len, enum nk_toggle_type type, const struct nk_style_toggle *style, const struct nk_input *in, const struct nk_user_font *font);
+
+/* progress */
+NK_LIB nk_size nk_progress_behavior(nk_flags *state, struct nk_input *in, struct nk_rect r, struct nk_rect cursor, nk_size max, nk_size value, int modifiable);
+NK_LIB void nk_draw_progress(struct nk_command_buffer *out, nk_flags state, const struct nk_style_progress *style, const struct nk_rect *bounds, const struct nk_rect *scursor, nk_size value, nk_size max);
+NK_LIB nk_size nk_do_progress(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, nk_size value, nk_size max, int modifiable, const struct nk_style_progress *style, struct nk_input *in);
+
+/* slider */
+NK_LIB float nk_slider_behavior(nk_flags *state, struct nk_rect *logical_cursor, struct nk_rect *visual_cursor, struct nk_input *in, struct nk_rect bounds, float slider_min, float slider_max, float slider_value, float slider_step, float slider_steps);
+NK_LIB void nk_draw_slider(struct nk_command_buffer *out, nk_flags state, const struct nk_style_slider *style, const struct nk_rect *bounds, const struct nk_rect *visual_cursor, float min, float value, float max);
+NK_LIB float nk_do_slider(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, float min, float val, float max, float step, const struct nk_style_slider *style, struct nk_input *in, const struct nk_user_font *font);
+
+/* scrollbar */
+NK_LIB float nk_scrollbar_behavior(nk_flags *state, struct nk_input *in, int has_scrolling, const struct nk_rect *scroll, const struct nk_rect *cursor, const struct nk_rect *empty0, const struct nk_rect *empty1, float scroll_offset, float target, float scroll_step, enum nk_orientation o);
+NK_LIB void nk_draw_scrollbar(struct nk_command_buffer *out, nk_flags state, const struct nk_style_scrollbar *style, const struct nk_rect *bounds, const struct nk_rect *scroll);
+NK_LIB float nk_do_scrollbarv(nk_flags *state, struct nk_command_buffer *out, struct nk_rect scroll, int has_scrolling, float offset, float target, float step, float button_pixel_inc, const struct nk_style_scrollbar *style, struct nk_input *in, const struct nk_user_font *font);
+NK_LIB float nk_do_scrollbarh(nk_flags *state, struct nk_command_buffer *out, struct nk_rect scroll, int has_scrolling, float offset, float target, float step, float button_pixel_inc, const struct nk_style_scrollbar *style, struct nk_input *in, const struct nk_user_font *font);
+
+/* selectable */
+NK_LIB void nk_draw_selectable(struct nk_command_buffer *out, nk_flags state, const struct nk_style_selectable *style, int active, const struct nk_rect *bounds, const struct nk_rect *icon, const struct nk_image *img, enum nk_symbol_type sym, const char *string, int len, nk_flags align, const struct nk_user_font *font);
+NK_LIB int nk_do_selectable(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, const char *str, int len, nk_flags align, int *value, const struct nk_style_selectable *style, const struct nk_input *in, const struct nk_user_font *font);
+NK_LIB int nk_do_selectable_image(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, const char *str, int len, nk_flags align, int *value, const struct nk_image *img, const struct nk_style_selectable *style, const struct nk_input *in, const struct nk_user_font *font);
+
+/* edit */
+NK_LIB void nk_edit_draw_text(struct nk_command_buffer *out, const struct nk_style_edit *style, float pos_x, float pos_y, float x_offset, const char *text, int byte_len, float row_height, const struct nk_user_font *font, struct nk_color background, struct nk_color foreground, int is_selected);
+NK_LIB nk_flags nk_do_edit(nk_flags *state, struct nk_command_buffer *out, struct nk_rect bounds, nk_flags flags, nk_plugin_filter filter, struct nk_text_edit *edit, const struct nk_style_edit *style, struct nk_input *in, const struct nk_user_font *font);
+
+/* color-picker */
+NK_LIB int nk_color_picker_behavior(nk_flags *state, const struct nk_rect *bounds, const struct nk_rect *matrix, const struct nk_rect *hue_bar, const struct nk_rect *alpha_bar, struct nk_colorf *color, const struct nk_input *in);
+NK_LIB void nk_draw_color_picker(struct nk_command_buffer *o, const struct nk_rect *matrix, const struct nk_rect *hue_bar, const struct nk_rect *alpha_bar, struct nk_colorf col);
+NK_LIB int nk_do_color_picker(nk_flags *state, struct nk_command_buffer *out, struct nk_colorf *col, enum nk_color_format fmt, struct nk_rect bounds, struct nk_vec2 padding, const struct nk_input *in, const struct nk_user_font *font);
+
+/* property */
+enum nk_property_status {
+ NK_PROPERTY_DEFAULT,
+ NK_PROPERTY_EDIT,
+ NK_PROPERTY_DRAG
+};
+enum nk_property_filter {
+ NK_FILTER_INT,
+ NK_FILTER_FLOAT
+};
+enum nk_property_kind {
+ NK_PROPERTY_INT,
+ NK_PROPERTY_FLOAT,
+ NK_PROPERTY_DOUBLE
+};
+union nk_property {
+ int i;
+ float f;
+ double d;
+};
+struct nk_property_variant {
+ enum nk_property_kind kind;
+ union nk_property value;
+ union nk_property min_value;
+ union nk_property max_value;
+ union nk_property step;
+};
+NK_LIB struct nk_property_variant nk_property_variant_int(int value, int min_value, int max_value, int step);
+NK_LIB struct nk_property_variant nk_property_variant_float(float value, float min_value, float max_value, float step);
+NK_LIB struct nk_property_variant nk_property_variant_double(double value, double min_value, double max_value, double step);
+
+NK_LIB void nk_drag_behavior(nk_flags *state, const struct nk_input *in, struct nk_rect drag, struct nk_property_variant *variant, float inc_per_pixel);
+NK_LIB void nk_property_behavior(nk_flags *ws, const struct nk_input *in, struct nk_rect property, struct nk_rect label, struct nk_rect edit, struct nk_rect empty, int *state, struct nk_property_variant *variant, float inc_per_pixel);
+NK_LIB void nk_draw_property(struct nk_command_buffer *out, const struct nk_style_property *style, const struct nk_rect *bounds, const struct nk_rect *label, nk_flags state, const char *name, int len, const struct nk_user_font *font);
+NK_LIB void nk_do_property(nk_flags *ws, struct nk_command_buffer *out, struct nk_rect property, const char *name, struct nk_property_variant *variant, float inc_per_pixel, char *buffer, int *len, int *state, int *cursor, int *select_begin, int *select_end, const struct nk_style_property *style, enum nk_property_filter filter, struct nk_input *in, const struct nk_user_font *font, struct nk_text_edit *text_edit, enum nk_button_behavior behavior);
+NK_LIB void nk_property(struct nk_context *ctx, const char *name, struct nk_property_variant *variant, float inc_per_pixel, const enum nk_property_filter filter);
+
+#endif
+
+
+
+
+
+/* ===============================================================
+ *
+ * MATH
+ *
+ * ===============================================================*/
+/* Since nuklear is supposed to work on all systems providing floating point
+ math without any dependencies I also had to implement my own math functions
+ for sqrt, sin and cos. Since the actual highly accurate implementations for
+ the standard library functions are quite complex and I do not need high
+ precision for my use cases I use approximations.
+
+ Sqrt
+ ----
+ For square root nuklear uses the famous fast inverse square root:
+ https://en.wikipedia.org/wiki/Fast_inverse_square_root with
+ slightly tweaked magic constant. While on today's hardware it is
+ probably not faster it is still fast and accurate enough for
+ nuklear's use cases. IMPORTANT: this requires float format IEEE 754
+
+ Sine/Cosine
+ -----------
+ All constants inside both function are generated Remez's minimax
+ approximations for value range 0...2*PI. The reason why I decided to
+ approximate exactly that range is that nuklear only needs sine and
+ cosine to generate circles which only requires that exact range.
+ In addition I used Remez instead of Taylor for additional precision:
+ www.lolengine.net/blog/2011/12/21/better-function-approximations.
+
+ The tool I used to generate constants for both sine and cosine
+ (it can actually approximate a lot more functions) can be
+ found here: www.lolengine.net/wiki/oss/lolremez
+*/
+NK_LIB float
+nk_inv_sqrt(float n)
+{
+ float x2;
+ const float threehalfs = 1.5f;
+ union {nk_uint i; float f;} conv = {0};
+ conv.f = n;
+ x2 = n * 0.5f;
+ conv.i = 0x5f375A84 - (conv.i >> 1);
+ conv.f = conv.f * (threehalfs - (x2 * conv.f * conv.f));
+ return conv.f;
+}
+NK_LIB float
+nk_sqrt(float x)
+{
+ return x * nk_inv_sqrt(x);
+}
+NK_LIB float
+nk_sin(float x)
+{
+ NK_STORAGE const float a0 = +1.91059300966915117e-31f;
+ NK_STORAGE const float a1 = +1.00086760103908896f;
+ NK_STORAGE const float a2 = -1.21276126894734565e-2f;
+ NK_STORAGE const float a3 = -1.38078780785773762e-1f;
+ NK_STORAGE const float a4 = -2.67353392911981221e-2f;
+ NK_STORAGE const float a5 = +2.08026600266304389e-2f;
+ NK_STORAGE const float a6 = -3.03996055049204407e-3f;
+ NK_STORAGE const float a7 = +1.38235642404333740e-4f;
+ return a0 + x*(a1 + x*(a2 + x*(a3 + x*(a4 + x*(a5 + x*(a6 + x*a7))))));
+}
+NK_LIB float
+nk_cos(float x)
+{
+ /* New implementation. Also generated using lolremez. */
+ /* Old version significantly deviated from expected results. */
+ NK_STORAGE const float a0 = 9.9995999154986614e-1f;
+ NK_STORAGE const float a1 = 1.2548995793001028e-3f;
+ NK_STORAGE const float a2 = -5.0648546280678015e-1f;
+ NK_STORAGE const float a3 = 1.2942246466519995e-2f;
+ NK_STORAGE const float a4 = 2.8668384702547972e-2f;
+ NK_STORAGE const float a5 = 7.3726485210586547e-3f;
+ NK_STORAGE const float a6 = -3.8510875386947414e-3f;
+ NK_STORAGE const float a7 = 4.7196604604366623e-4f;
+ NK_STORAGE const float a8 = -1.8776444013090451e-5f;
+ return a0 + x*(a1 + x*(a2 + x*(a3 + x*(a4 + x*(a5 + x*(a6 + x*(a7 + x*a8)))))));
+}
+NK_LIB nk_uint
+nk_round_up_pow2(nk_uint v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+NK_LIB double
+nk_pow(double x, int n)
+{
+ /* check the sign of n */
+ double r = 1;
+ int plus = n >= 0;
+ n = (plus) ? n : -n;
+ while (n > 0) {
+ if ((n & 1) == 1)
+ r *= x;
+ n /= 2;
+ x *= x;
+ }
+ return plus ? r : 1.0 / r;
+}
+NK_LIB int
+nk_ifloord(double x)
+{
+ x = (double)((int)x - ((x < 0.0) ? 1 : 0));
+ return (int)x;
+}
+NK_LIB int
+nk_ifloorf(float x)
+{
+ x = (float)((int)x - ((x < 0.0f) ? 1 : 0));
+ return (int)x;
+}
+NK_LIB int
+nk_iceilf(float x)
+{
+ if (x >= 0) {
+ int i = (int)x;
+ return (x > i) ? i+1: i;
+ } else {
+ int t = (int)x;
+ float r = x - (float)t;
+ return (r > 0.0f) ? t+1: t;
+ }
+}
+NK_LIB int
+nk_log10(double n)
+{
+ int neg;
+ int ret;
+ int exp = 0;
+
+ neg = (n < 0) ? 1 : 0;
+ ret = (neg) ? (int)-n : (int)n;
+ while ((ret / 10) > 0) {
+ ret /= 10;
+ exp++;
+ }
+ if (neg) exp = -exp;
+ return exp;
+}
+NK_API struct nk_rect
+nk_get_null_rect(void)
+{
+ return nk_null_rect;
+}
+NK_API struct nk_rect
+nk_rect(float x, float y, float w, float h)
+{
+ struct nk_rect r;
+ r.x = x; r.y = y;
+ r.w = w; r.h = h;
+ return r;
+}
+NK_API struct nk_rect
+nk_recti(int x, int y, int w, int h)
+{
+ struct nk_rect r;
+ r.x = (float)x;
+ r.y = (float)y;
+ r.w = (float)w;
+ r.h = (float)h;
+ return r;
+}
+NK_API struct nk_rect
+nk_recta(struct nk_vec2 pos, struct nk_vec2 size)
+{
+ return nk_rect(pos.x, pos.y, size.x, size.y);
+}
+NK_API struct nk_rect
+nk_rectv(const float *r)
+{
+ return nk_rect(r[0], r[1], r[2], r[3]);
+}
+NK_API struct nk_rect
+nk_rectiv(const int *r)
+{
+ return nk_recti(r[0], r[1], r[2], r[3]);
+}
+NK_API struct nk_vec2
+nk_rect_pos(struct nk_rect r)
+{
+ struct nk_vec2 ret;
+ ret.x = r.x; ret.y = r.y;
+ return ret;
+}
+NK_API struct nk_vec2
+nk_rect_size(struct nk_rect r)
+{
+ struct nk_vec2 ret;
+ ret.x = r.w; ret.y = r.h;
+ return ret;
+}
+NK_LIB struct nk_rect
+nk_shrink_rect(struct nk_rect r, float amount)
+{
+ struct nk_rect res;
+ r.w = NK_MAX(r.w, 2 * amount);
+ r.h = NK_MAX(r.h, 2 * amount);
+ res.x = r.x + amount;
+ res.y = r.y + amount;
+ res.w = r.w - 2 * amount;
+ res.h = r.h - 2 * amount;
+ return res;
+}
+NK_LIB struct nk_rect
+nk_pad_rect(struct nk_rect r, struct nk_vec2 pad)
+{
+ r.w = NK_MAX(r.w, 2 * pad.x);
+ r.h = NK_MAX(r.h, 2 * pad.y);
+ r.x += pad.x; r.y += pad.y;
+ r.w -= 2 * pad.x;
+ r.h -= 2 * pad.y;
+ return r;
+}
+NK_API struct nk_vec2
+nk_vec2(float x, float y)
+{
+ struct nk_vec2 ret;
+ ret.x = x; ret.y = y;
+ return ret;
+}
+NK_API struct nk_vec2
+nk_vec2i(int x, int y)
+{
+ struct nk_vec2 ret;
+ ret.x = (float)x;
+ ret.y = (float)y;
+ return ret;
+}
+NK_API struct nk_vec2
+nk_vec2v(const float *v)
+{
+ return nk_vec2(v[0], v[1]);
+}
+NK_API struct nk_vec2
+nk_vec2iv(const int *v)
+{
+ return nk_vec2i(v[0], v[1]);
+}
+NK_LIB void
+nk_unify(struct nk_rect *clip, const struct nk_rect *a, float x0, float y0,
+ float x1, float y1)
+{
+ NK_ASSERT(a);
+ NK_ASSERT(clip);
+ clip->x = NK_MAX(a->x, x0);
+ clip->y = NK_MAX(a->y, y0);
+ clip->w = NK_MIN(a->x + a->w, x1) - clip->x;
+ clip->h = NK_MIN(a->y + a->h, y1) - clip->y;
+ clip->w = NK_MAX(0, clip->w);
+ clip->h = NK_MAX(0, clip->h);
+}
+
+NK_API void
+nk_triangle_from_direction(struct nk_vec2 *result, struct nk_rect r,
+ float pad_x, float pad_y, enum nk_heading direction)
+{
+ float w_half, h_half;
+ NK_ASSERT(result);
+
+ r.w = NK_MAX(2 * pad_x, r.w);
+ r.h = NK_MAX(2 * pad_y, r.h);
+ r.w = r.w - 2 * pad_x;
+ r.h = r.h - 2 * pad_y;
+
+ r.x = r.x + pad_x;
+ r.y = r.y + pad_y;
+
+ w_half = r.w / 2.0f;
+ h_half = r.h / 2.0f;
+
+ if (direction == NK_UP) {
+ result[0] = nk_vec2(r.x + w_half, r.y);
+ result[1] = nk_vec2(r.x + r.w, r.y + r.h);
+ result[2] = nk_vec2(r.x, r.y + r.h);
+ } else if (direction == NK_RIGHT) {
+ result[0] = nk_vec2(r.x, r.y);
+ result[1] = nk_vec2(r.x + r.w, r.y + h_half);
+ result[2] = nk_vec2(r.x, r.y + r.h);
+ } else if (direction == NK_DOWN) {
+ result[0] = nk_vec2(r.x, r.y);
+ result[1] = nk_vec2(r.x + r.w, r.y);
+ result[2] = nk_vec2(r.x + w_half, r.y + r.h);
+ } else {
+ result[0] = nk_vec2(r.x, r.y + h_half);
+ result[1] = nk_vec2(r.x + r.w, r.y);
+ result[2] = nk_vec2(r.x + r.w, r.y + r.h);
+ }
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * UTIL
+ *
+ * ===============================================================*/
+NK_INTERN int nk_str_match_here(const char *regexp, const char *text);
+NK_INTERN int nk_str_match_star(int c, const char *regexp, const char *text);
+NK_LIB int nk_is_lower(int c) {return (c >= 'a' && c <= 'z') || (c >= 0xE0 && c <= 0xFF);}
+NK_LIB int nk_is_upper(int c){return (c >= 'A' && c <= 'Z') || (c >= 0xC0 && c <= 0xDF);}
+NK_LIB int nk_to_upper(int c) {return (c >= 'a' && c <= 'z') ? (c - ('a' - 'A')) : c;}
+NK_LIB int nk_to_lower(int c) {return (c >= 'A' && c <= 'Z') ? (c - ('a' + 'A')) : c;}
+
+NK_LIB void*
+nk_memcopy(void *dst0, const void *src0, nk_size length)
+{
+ nk_ptr t;
+ char *dst = (char*)dst0;
+ const char *src = (const char*)src0;
+ if (length == 0 || dst == src)
+ goto done;
+
+ #define nk_word int
+ #define nk_wsize sizeof(nk_word)
+ #define nk_wmask (nk_wsize-1)
+ #define NK_TLOOP(s) if (t) NK_TLOOP1(s)
+ #define NK_TLOOP1(s) do { s; } while (--t)
+
+ if (dst < src) {
+ t = (nk_ptr)src; /* only need low bits */
+ if ((t | (nk_ptr)dst) & nk_wmask) {
+ if ((t ^ (nk_ptr)dst) & nk_wmask || length < nk_wsize)
+ t = length;
+ else
+ t = nk_wsize - (t & nk_wmask);
+ length -= t;
+ NK_TLOOP1(*dst++ = *src++);
+ }
+ t = length / nk_wsize;
+ NK_TLOOP(*(nk_word*)(void*)dst = *(const nk_word*)(const void*)src;
+ src += nk_wsize; dst += nk_wsize);
+ t = length & nk_wmask;
+ NK_TLOOP(*dst++ = *src++);
+ } else {
+ src += length;
+ dst += length;
+ t = (nk_ptr)src;
+ if ((t | (nk_ptr)dst) & nk_wmask) {
+ if ((t ^ (nk_ptr)dst) & nk_wmask || length <= nk_wsize)
+ t = length;
+ else
+ t &= nk_wmask;
+ length -= t;
+ NK_TLOOP1(*--dst = *--src);
+ }
+ t = length / nk_wsize;
+ NK_TLOOP(src -= nk_wsize; dst -= nk_wsize;
+ *(nk_word*)(void*)dst = *(const nk_word*)(const void*)src);
+ t = length & nk_wmask;
+ NK_TLOOP(*--dst = *--src);
+ }
+ #undef nk_word
+ #undef nk_wsize
+ #undef nk_wmask
+ #undef NK_TLOOP
+ #undef NK_TLOOP1
+done:
+ return (dst0);
+}
+NK_LIB void
+nk_memset(void *ptr, int c0, nk_size size)
+{
+ #define nk_word unsigned
+ #define nk_wsize sizeof(nk_word)
+ #define nk_wmask (nk_wsize - 1)
+ nk_byte *dst = (nk_byte*)ptr;
+ unsigned c = 0;
+ nk_size t = 0;
+
+ if ((c = (nk_byte)c0) != 0) {
+ c = (c << 8) | c; /* at least 16-bits */
+ if (sizeof(unsigned int) > 2)
+ c = (c << 16) | c; /* at least 32-bits*/
+ }
+
+ /* too small of a word count */
+ dst = (nk_byte*)ptr;
+ if (size < 3 * nk_wsize) {
+ while (size--) *dst++ = (nk_byte)c0;
+ return;
+ }
+
+ /* align destination */
+ if ((t = NK_PTR_TO_UINT(dst) & nk_wmask) != 0) {
+ t = nk_wsize -t;
+ size -= t;
+ do {
+ *dst++ = (nk_byte)c0;
+ } while (--t != 0);
+ }
+
+ /* fill word */
+ t = size / nk_wsize;
+ do {
+ *(nk_word*)((void*)dst) = c;
+ dst += nk_wsize;
+ } while (--t != 0);
+
+ /* fill trailing bytes */
+ t = (size & nk_wmask);
+ if (t != 0) {
+ do {
+ *dst++ = (nk_byte)c0;
+ } while (--t != 0);
+ }
+
+ #undef nk_word
+ #undef nk_wsize
+ #undef nk_wmask
+}
+NK_LIB void
+nk_zero(void *ptr, nk_size size)
+{
+ NK_ASSERT(ptr);
+ NK_MEMSET(ptr, 0, size);
+}
+NK_API int
+nk_strlen(const char *str)
+{
+ int siz = 0;
+ NK_ASSERT(str);
+ while (str && *str++ != '\0') siz++;
+ return siz;
+}
+NK_API int
+nk_strtoi(const char *str, const char **endptr)
+{
+ int neg = 1;
+ const char *p = str;
+ int value = 0;
+
+ NK_ASSERT(str);
+ if (!str) return 0;
+
+ /* skip whitespace */
+ while (*p == ' ') p++;
+ if (*p == '-') {
+ neg = -1;
+ p++;
+ }
+ while (*p && *p >= '0' && *p <= '9') {
+ value = value * 10 + (int) (*p - '0');
+ p++;
+ }
+ if (endptr)
+ *endptr = p;
+ return neg*value;
+}
+NK_API double
+nk_strtod(const char *str, const char **endptr)
+{
+ double m;
+ double neg = 1.0;
+ const char *p = str;
+ double value = 0;
+ double number = 0;
+
+ NK_ASSERT(str);
+ if (!str) return 0;
+
+ /* skip whitespace */
+ while (*p == ' ') p++;
+ if (*p == '-') {
+ neg = -1.0;
+ p++;
+ }
+
+ while (*p && *p != '.' && *p != 'e') {
+ value = value * 10.0 + (double) (*p - '0');
+ p++;
+ }
+
+ if (*p == '.') {
+ p++;
+ for(m = 0.1; *p && *p != 'e'; p++ ) {
+ value = value + (double) (*p - '0') * m;
+ m *= 0.1;
+ }
+ }
+ if (*p == 'e') {
+ int i, pow, div;
+ p++;
+ if (*p == '-') {
+ div = nk_true;
+ p++;
+ } else if (*p == '+') {
+ div = nk_false;
+ p++;
+ } else div = nk_false;
+
+ for (pow = 0; *p; p++)
+ pow = pow * 10 + (int) (*p - '0');
+
+ for (m = 1.0, i = 0; i < pow; i++)
+ m *= 10.0;
+
+ if (div)
+ value /= m;
+ else value *= m;
+ }
+ number = value * neg;
+ if (endptr)
+ *endptr = p;
+ return number;
+}
+NK_API float
+nk_strtof(const char *str, const char **endptr)
+{
+ float float_value;
+ double double_value;
+ double_value = NK_STRTOD(str, endptr);
+ float_value = (float)double_value;
+ return float_value;
+}
+NK_API int
+nk_stricmp(const char *s1, const char *s2)
+{
+ nk_int c1,c2,d;
+ do {
+ c1 = *s1++;
+ c2 = *s2++;
+ d = c1 - c2;
+ while (d) {
+ if (c1 <= 'Z' && c1 >= 'A') {
+ d += ('a' - 'A');
+ if (!d) break;
+ }
+ if (c2 <= 'Z' && c2 >= 'A') {
+ d -= ('a' - 'A');
+ if (!d) break;
+ }
+ return ((d >= 0) << 1) - 1;
+ }
+ } while (c1);
+ return 0;
+}
+NK_API int
+nk_stricmpn(const char *s1, const char *s2, int n)
+{
+ int c1,c2,d;
+ NK_ASSERT(n >= 0);
+ do {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (!n--) return 0;
+
+ d = c1 - c2;
+ while (d) {
+ if (c1 <= 'Z' && c1 >= 'A') {
+ d += ('a' - 'A');
+ if (!d) break;
+ }
+ if (c2 <= 'Z' && c2 >= 'A') {
+ d -= ('a' - 'A');
+ if (!d) break;
+ }
+ return ((d >= 0) << 1) - 1;
+ }
+ } while (c1);
+ return 0;
+}
+NK_INTERN int
+nk_str_match_here(const char *regexp, const char *text)
+{
+ if (regexp[0] == '\0')
+ return 1;
+ if (regexp[1] == '*')
+ return nk_str_match_star(regexp[0], regexp+2, text);
+ if (regexp[0] == '$' && regexp[1] == '\0')
+ return *text == '\0';
+ if (*text!='\0' && (regexp[0]=='.' || regexp[0]==*text))
+ return nk_str_match_here(regexp+1, text+1);
+ return 0;
+}
+NK_INTERN int
+nk_str_match_star(int c, const char *regexp, const char *text)
+{
+ do {/* a '* matches zero or more instances */
+ if (nk_str_match_here(regexp, text))
+ return 1;
+ } while (*text != '\0' && (*text++ == c || c == '.'));
+ return 0;
+}
+NK_API int
+nk_strfilter(const char *text, const char *regexp)
+{
+ /*
+ c matches any literal character c
+ . matches any single character
+ ^ matches the beginning of the input string
+ $ matches the end of the input string
+ * matches zero or more occurrences of the previous character*/
+ if (regexp[0] == '^')
+ return nk_str_match_here(regexp+1, text);
+ do { /* must look even if string is empty */
+ if (nk_str_match_here(regexp, text))
+ return 1;
+ } while (*text++ != '\0');
+ return 0;
+}
+NK_API int
+nk_strmatch_fuzzy_text(const char *str, int str_len,
+ const char *pattern, int *out_score)
+{
+ /* Returns true if each character in pattern is found sequentially within str
+ * if found then out_score is also set. Score value has no intrinsic meaning.
+ * Range varies with pattern. Can only compare scores with same search pattern. */
+
+ /* bonus for adjacent matches */
+ #define NK_ADJACENCY_BONUS 5
+ /* bonus if match occurs after a separator */
+ #define NK_SEPARATOR_BONUS 10
+ /* bonus if match is uppercase and prev is lower */
+ #define NK_CAMEL_BONUS 10
+ /* penalty applied for every letter in str before the first match */
+ #define NK_LEADING_LETTER_PENALTY (-3)
+ /* maximum penalty for leading letters */
+ #define NK_MAX_LEADING_LETTER_PENALTY (-9)
+ /* penalty for every letter that doesn't matter */
+ #define NK_UNMATCHED_LETTER_PENALTY (-1)
+
+ /* loop variables */
+ int score = 0;
+ char const * pattern_iter = pattern;
+ int str_iter = 0;
+ int prev_matched = nk_false;
+ int prev_lower = nk_false;
+ /* true so if first letter match gets separator bonus*/
+ int prev_separator = nk_true;
+
+ /* use "best" matched letter if multiple string letters match the pattern */
+ char const * best_letter = 0;
+ int best_letter_score = 0;
+
+ /* loop over strings */
+ NK_ASSERT(str);
+ NK_ASSERT(pattern);
+ if (!str || !str_len || !pattern) return 0;
+ while (str_iter < str_len)
+ {
+ const char pattern_letter = *pattern_iter;
+ const char str_letter = str[str_iter];
+
+ int next_match = *pattern_iter != '\0' &&
+ nk_to_lower(pattern_letter) == nk_to_lower(str_letter);
+ int rematch = best_letter && nk_to_upper(*best_letter) == nk_to_upper(str_letter);
+
+ int advanced = next_match && best_letter;
+ int pattern_repeat = best_letter && *pattern_iter != '\0';
+ pattern_repeat = pattern_repeat &&
+ nk_to_lower(*best_letter) == nk_to_lower(pattern_letter);
+
+ if (advanced || pattern_repeat) {
+ score += best_letter_score;
+ best_letter = 0;
+ best_letter_score = 0;
+ }
+
+ if (next_match || rematch)
+ {
+ int new_score = 0;
+ /* Apply penalty for each letter before the first pattern match */
+ if (pattern_iter == pattern) {
+ int count = (int)(&str[str_iter] - str);
+ int penalty = NK_LEADING_LETTER_PENALTY * count;
+ if (penalty < NK_MAX_LEADING_LETTER_PENALTY)
+ penalty = NK_MAX_LEADING_LETTER_PENALTY;
+
+ score += penalty;
+ }
+
+ /* apply bonus for consecutive bonuses */
+ if (prev_matched)
+ new_score += NK_ADJACENCY_BONUS;
+
+ /* apply bonus for matches after a separator */
+ if (prev_separator)
+ new_score += NK_SEPARATOR_BONUS;
+
+ /* apply bonus across camel case boundaries */
+ if (prev_lower && nk_is_upper(str_letter))
+ new_score += NK_CAMEL_BONUS;
+
+ /* update pattern iter IFF the next pattern letter was matched */
+ if (next_match)
+ ++pattern_iter;
+
+ /* update best letter in str which may be for a "next" letter or a rematch */
+ if (new_score >= best_letter_score) {
+ /* apply penalty for now skipped letter */
+ if (best_letter != 0)
+ score += NK_UNMATCHED_LETTER_PENALTY;
+
+ best_letter = &str[str_iter];
+ best_letter_score = new_score;
+ }
+ prev_matched = nk_true;
+ } else {
+ score += NK_UNMATCHED_LETTER_PENALTY;
+ prev_matched = nk_false;
+ }
+
+ /* separators should be more easily defined */
+ prev_lower = nk_is_lower(str_letter) != 0;
+ prev_separator = str_letter == '_' || str_letter == ' ';
+
+ ++str_iter;
+ }
+
+ /* apply score for last match */
+ if (best_letter)
+ score += best_letter_score;
+
+ /* did not match full pattern */
+ if (*pattern_iter != '\0')
+ return nk_false;
+
+ if (out_score)
+ *out_score = score;
+ return nk_true;
+}
+NK_API int
+nk_strmatch_fuzzy_string(char const *str, char const *pattern, int *out_score)
+{
+ return nk_strmatch_fuzzy_text(str, nk_strlen(str), pattern, out_score);
+}
+NK_LIB int
+nk_string_float_limit(char *string, int prec)
+{
+ int dot = 0;
+ char *c = string;
+ while (*c) {
+ if (*c == '.') {
+ dot = 1;
+ c++;
+ continue;
+ }
+ if (dot == (prec+1)) {
+ *c = 0;
+ break;
+ }
+ if (dot > 0) dot++;
+ c++;
+ }
+ return (int)(c - string);
+}
+NK_INTERN void
+nk_strrev_ascii(char *s)
+{
+ int len = nk_strlen(s);
+ int end = len / 2;
+ int i = 0;
+ char t;
+ for (; i < end; ++i) {
+ t = s[i];
+ s[i] = s[len - 1 - i];
+ s[len -1 - i] = t;
+ }
+}
+NK_LIB char*
+nk_itoa(char *s, long n)
+{
+ long i = 0;
+ if (n == 0) {
+ s[i++] = '0';
+ s[i] = 0;
+ return s;
+ }
+ if (n < 0) {
+ s[i++] = '-';
+ n = -n;
+ }
+ while (n > 0) {
+ s[i++] = (char)('0' + (n % 10));
+ n /= 10;
+ }
+ s[i] = 0;
+ if (s[0] == '-')
+ ++s;
+
+ nk_strrev_ascii(s);
+ return s;
+}
+NK_LIB char*
+nk_dtoa(char *s, double n)
+{
+ int useExp = 0;
+ int digit = 0, m = 0, m1 = 0;
+ char *c = s;
+ int neg = 0;
+
+ NK_ASSERT(s);
+ if (!s) return 0;
+
+ if (n == 0.0) {
+ s[0] = '0'; s[1] = '\0';
+ return s;
+ }
+
+ neg = (n < 0);
+ if (neg) n = -n;
+
+ /* calculate magnitude */
+ m = nk_log10(n);
+ useExp = (m >= 14 || (neg && m >= 9) || m <= -9);
+ if (neg) *(c++) = '-';
+
+ /* set up for scientific notation */
+ if (useExp) {
+ if (m < 0)
+ m -= 1;
+ n = n / (double)nk_pow(10.0, m);
+ m1 = m;
+ m = 0;
+ }
+ if (m < 1.0) {
+ m = 0;
+ }
+
+ /* convert the number */
+ while (n > NK_FLOAT_PRECISION || m >= 0) {
+ double weight = nk_pow(10.0, m);
+ if (weight > 0) {
+ double t = (double)n / weight;
+ digit = nk_ifloord(t);
+ n -= ((double)digit * weight);
+ *(c++) = (char)('0' + (char)digit);
+ }
+ if (m == 0 && n > 0)
+ *(c++) = '.';
+ m--;
+ }
+
+ if (useExp) {
+ /* convert the exponent */
+ int i, j;
+ *(c++) = 'e';
+ if (m1 > 0) {
+ *(c++) = '+';
+ } else {
+ *(c++) = '-';
+ m1 = -m1;
+ }
+ m = 0;
+ while (m1 > 0) {
+ *(c++) = (char)('0' + (char)(m1 % 10));
+ m1 /= 10;
+ m++;
+ }
+ c -= m;
+ for (i = 0, j = m-1; i<j; i++, j--) {
+ /* swap without temporary */
+ c[i] ^= c[j];
+ c[j] ^= c[i];
+ c[i] ^= c[j];
+ }
+ c += m;
+ }
+ *(c) = '\0';
+ return s;
+}
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+#ifndef NK_INCLUDE_STANDARD_IO
+NK_INTERN int
+nk_vsnprintf(char *buf, int buf_size, const char *fmt, va_list args)
+{
+ enum nk_arg_type {
+ NK_ARG_TYPE_CHAR,
+ NK_ARG_TYPE_SHORT,
+ NK_ARG_TYPE_DEFAULT,
+ NK_ARG_TYPE_LONG
+ };
+ enum nk_arg_flags {
+ NK_ARG_FLAG_LEFT = 0x01,
+ NK_ARG_FLAG_PLUS = 0x02,
+ NK_ARG_FLAG_SPACE = 0x04,
+ NK_ARG_FLAG_NUM = 0x10,
+ NK_ARG_FLAG_ZERO = 0x20
+ };
+
+ char number_buffer[NK_MAX_NUMBER_BUFFER];
+ enum nk_arg_type arg_type = NK_ARG_TYPE_DEFAULT;
+ int precision = NK_DEFAULT;
+ int width = NK_DEFAULT;
+ nk_flags flag = 0;
+
+ int len = 0;
+ int result = -1;
+ const char *iter = fmt;
+
+ NK_ASSERT(buf);
+ NK_ASSERT(buf_size);
+ if (!buf || !buf_size || !fmt) return 0;
+ for (iter = fmt; *iter && len < buf_size; iter++) {
+ /* copy all non-format characters */
+ while (*iter && (*iter != '%') && (len < buf_size))
+ buf[len++] = *iter++;
+ if (!(*iter) || len >= buf_size) break;
+ iter++;
+
+ /* flag arguments */
+ while (*iter) {
+ if (*iter == '-') flag |= NK_ARG_FLAG_LEFT;
+ else if (*iter == '+') flag |= NK_ARG_FLAG_PLUS;
+ else if (*iter == ' ') flag |= NK_ARG_FLAG_SPACE;
+ else if (*iter == '#') flag |= NK_ARG_FLAG_NUM;
+ else if (*iter == '0') flag |= NK_ARG_FLAG_ZERO;
+ else break;
+ iter++;
+ }
+
+ /* width argument */
+ width = NK_DEFAULT;
+ if (*iter >= '1' && *iter <= '9') {
+ const char *end;
+ width = nk_strtoi(iter, &end);
+ if (end == iter)
+ width = -1;
+ else iter = end;
+ } else if (*iter == '*') {
+ width = va_arg(args, int);
+ iter++;
+ }
+
+ /* precision argument */
+ precision = NK_DEFAULT;
+ if (*iter == '.') {
+ iter++;
+ if (*iter == '*') {
+ precision = va_arg(args, int);
+ iter++;
+ } else {
+ const char *end;
+ precision = nk_strtoi(iter, &end);
+ if (end == iter)
+ precision = -1;
+ else iter = end;
+ }
+ }
+
+ /* length modifier */
+ if (*iter == 'h') {
+ if (*(iter+1) == 'h') {
+ arg_type = NK_ARG_TYPE_CHAR;
+ iter++;
+ } else arg_type = NK_ARG_TYPE_SHORT;
+ iter++;
+ } else if (*iter == 'l') {
+ arg_type = NK_ARG_TYPE_LONG;
+ iter++;
+ } else arg_type = NK_ARG_TYPE_DEFAULT;
+
+ /* specifier */
+ if (*iter == '%') {
+ NK_ASSERT(arg_type == NK_ARG_TYPE_DEFAULT);
+ NK_ASSERT(precision == NK_DEFAULT);
+ NK_ASSERT(width == NK_DEFAULT);
+ if (len < buf_size)
+ buf[len++] = '%';
+ } else if (*iter == 's') {
+ /* string */
+ const char *str = va_arg(args, const char*);
+ NK_ASSERT(str != buf && "buffer and argument are not allowed to overlap!");
+ NK_ASSERT(arg_type == NK_ARG_TYPE_DEFAULT);
+ NK_ASSERT(precision == NK_DEFAULT);
+ NK_ASSERT(width == NK_DEFAULT);
+ if (str == buf) return -1;
+ while (str && *str && len < buf_size)
+ buf[len++] = *str++;
+ } else if (*iter == 'n') {
+ /* current length callback */
+ signed int *n = va_arg(args, int*);
+ NK_ASSERT(arg_type == NK_ARG_TYPE_DEFAULT);
+ NK_ASSERT(precision == NK_DEFAULT);
+ NK_ASSERT(width == NK_DEFAULT);
+ if (n) *n = len;
+ } else if (*iter == 'c' || *iter == 'i' || *iter == 'd') {
+ /* signed integer */
+ long value = 0;
+ const char *num_iter;
+ int num_len, num_print, padding;
+ int cur_precision = NK_MAX(precision, 1);
+ int cur_width = NK_MAX(width, 0);
+
+ /* retrieve correct value type */
+ if (arg_type == NK_ARG_TYPE_CHAR)
+ value = (signed char)va_arg(args, int);
+ else if (arg_type == NK_ARG_TYPE_SHORT)
+ value = (signed short)va_arg(args, int);
+ else if (arg_type == NK_ARG_TYPE_LONG)
+ value = va_arg(args, signed long);
+ else if (*iter == 'c')
+ value = (unsigned char)va_arg(args, int);
+ else value = va_arg(args, signed int);
+
+ /* convert number to string */
+ nk_itoa(number_buffer, value);
+ num_len = nk_strlen(number_buffer);
+ padding = NK_MAX(cur_width - NK_MAX(cur_precision, num_len), 0);
+ if ((flag & NK_ARG_FLAG_PLUS) || (flag & NK_ARG_FLAG_SPACE))
+ padding = NK_MAX(padding-1, 0);
+
+ /* fill left padding up to a total of `width` characters */
+ if (!(flag & NK_ARG_FLAG_LEFT)) {
+ while (padding-- > 0 && (len < buf_size)) {
+ if ((flag & NK_ARG_FLAG_ZERO) && (precision == NK_DEFAULT))
+ buf[len++] = '0';
+ else buf[len++] = ' ';
+ }
+ }
+
+ /* copy string value representation into buffer */
+ if ((flag & NK_ARG_FLAG_PLUS) && value >= 0 && len < buf_size)
+ buf[len++] = '+';
+ else if ((flag & NK_ARG_FLAG_SPACE) && value >= 0 && len < buf_size)
+ buf[len++] = ' ';
+
+ /* fill up to precision number of digits with '0' */
+ num_print = NK_MAX(cur_precision, num_len);
+ while (precision && (num_print > num_len) && (len < buf_size)) {
+ buf[len++] = '0';
+ num_print--;
+ }
+
+ /* copy string value representation into buffer */
+ num_iter = number_buffer;
+ while (precision && *num_iter && len < buf_size)
+ buf[len++] = *num_iter++;
+
+ /* fill right padding up to width characters */
+ if (flag & NK_ARG_FLAG_LEFT) {
+ while ((padding-- > 0) && (len < buf_size))
+ buf[len++] = ' ';
+ }
+ } else if (*iter == 'o' || *iter == 'x' || *iter == 'X' || *iter == 'u') {
+ /* unsigned integer */
+ unsigned long value = 0;
+ int num_len = 0, num_print, padding = 0;
+ int cur_precision = NK_MAX(precision, 1);
+ int cur_width = NK_MAX(width, 0);
+ unsigned int base = (*iter == 'o') ? 8: (*iter == 'u')? 10: 16;
+
+ /* print oct/hex/dec value */
+ const char *upper_output_format = "0123456789ABCDEF";
+ const char *lower_output_format = "0123456789abcdef";
+ const char *output_format = (*iter == 'x') ?
+ lower_output_format: upper_output_format;
+
+ /* retrieve correct value type */
+ if (arg_type == NK_ARG_TYPE_CHAR)
+ value = (unsigned char)va_arg(args, int);
+ else if (arg_type == NK_ARG_TYPE_SHORT)
+ value = (unsigned short)va_arg(args, int);
+ else if (arg_type == NK_ARG_TYPE_LONG)
+ value = va_arg(args, unsigned long);
+ else value = va_arg(args, unsigned int);
+
+ do {
+ /* convert decimal number into hex/oct number */
+ int digit = output_format[value % base];
+ if (num_len < NK_MAX_NUMBER_BUFFER)
+ number_buffer[num_len++] = (char)digit;
+ value /= base;
+ } while (value > 0);
+
+ num_print = NK_MAX(cur_precision, num_len);
+ padding = NK_MAX(cur_width - NK_MAX(cur_precision, num_len), 0);
+ if (flag & NK_ARG_FLAG_NUM)
+ padding = NK_MAX(padding-1, 0);
+
+ /* fill left padding up to a total of `width` characters */
+ if (!(flag & NK_ARG_FLAG_LEFT)) {
+ while ((padding-- > 0) && (len < buf_size)) {
+ if ((flag & NK_ARG_FLAG_ZERO) && (precision == NK_DEFAULT))
+ buf[len++] = '0';
+ else buf[len++] = ' ';
+ }
+ }
+
+ /* fill up to precision number of digits */
+ if (num_print && (flag & NK_ARG_FLAG_NUM)) {
+ if ((*iter == 'o') && (len < buf_size)) {
+ buf[len++] = '0';
+ } else if ((*iter == 'x') && ((len+1) < buf_size)) {
+ buf[len++] = '0';
+ buf[len++] = 'x';
+ } else if ((*iter == 'X') && ((len+1) < buf_size)) {
+ buf[len++] = '0';
+ buf[len++] = 'X';
+ }
+ }
+ while (precision && (num_print > num_len) && (len < buf_size)) {
+ buf[len++] = '0';
+ num_print--;
+ }
+
+ /* reverse number direction */
+ while (num_len > 0) {
+ if (precision && (len < buf_size))
+ buf[len++] = number_buffer[num_len-1];
+ num_len--;
+ }
+
+ /* fill right padding up to width characters */
+ if (flag & NK_ARG_FLAG_LEFT) {
+ while ((padding-- > 0) && (len < buf_size))
+ buf[len++] = ' ';
+ }
+ } else if (*iter == 'f') {
+ /* floating point */
+ const char *num_iter;
+ int cur_precision = (precision < 0) ? 6: precision;
+ int prefix, cur_width = NK_MAX(width, 0);
+ double value = va_arg(args, double);
+ int num_len = 0, frac_len = 0, dot = 0;
+ int padding = 0;
+
+ NK_ASSERT(arg_type == NK_ARG_TYPE_DEFAULT);
+ NK_DTOA(number_buffer, value);
+ num_len = nk_strlen(number_buffer);
+
+ /* calculate padding */
+ num_iter = number_buffer;
+ while (*num_iter && *num_iter != '.')
+ num_iter++;
+
+ prefix = (*num_iter == '.')?(int)(num_iter - number_buffer)+1:0;
+ padding = NK_MAX(cur_width - (prefix + NK_MIN(cur_precision, num_len - prefix)) , 0);
+ if ((flag & NK_ARG_FLAG_PLUS) || (flag & NK_ARG_FLAG_SPACE))
+ padding = NK_MAX(padding-1, 0);
+
+ /* fill left padding up to a total of `width` characters */
+ if (!(flag & NK_ARG_FLAG_LEFT)) {
+ while (padding-- > 0 && (len < buf_size)) {
+ if (flag & NK_ARG_FLAG_ZERO)
+ buf[len++] = '0';
+ else buf[len++] = ' ';
+ }
+ }
+
+ /* copy string value representation into buffer */
+ num_iter = number_buffer;
+ if ((flag & NK_ARG_FLAG_PLUS) && (value >= 0) && (len < buf_size))
+ buf[len++] = '+';
+ else if ((flag & NK_ARG_FLAG_SPACE) && (value >= 0) && (len < buf_size))
+ buf[len++] = ' ';
+ while (*num_iter) {
+ if (dot) frac_len++;
+ if (len < buf_size)
+ buf[len++] = *num_iter;
+ if (*num_iter == '.') dot = 1;
+ if (frac_len >= cur_precision) break;
+ num_iter++;
+ }
+
+ /* fill number up to precision */
+ while (frac_len < cur_precision) {
+ if (!dot && len < buf_size) {
+ buf[len++] = '.';
+ dot = 1;
+ }
+ if (len < buf_size)
+ buf[len++] = '0';
+ frac_len++;
+ }
+
+ /* fill right padding up to width characters */
+ if (flag & NK_ARG_FLAG_LEFT) {
+ while ((padding-- > 0) && (len < buf_size))
+ buf[len++] = ' ';
+ }
+ } else {
+ /* Specifier not supported: g,G,e,E,p,z */
+ NK_ASSERT(0 && "specifier is not supported!");
+ return result;
+ }
+ }
+ buf[(len >= buf_size)?(buf_size-1):len] = 0;
+ result = (len >= buf_size)?-1:len;
+ return result;
+}
+#endif
+NK_LIB int
+nk_strfmt(char *buf, int buf_size, const char *fmt, va_list args)
+{
+ int result = -1;
+ NK_ASSERT(buf);
+ NK_ASSERT(buf_size);
+ if (!buf || !buf_size || !fmt) return 0;
+#ifdef NK_INCLUDE_STANDARD_IO
+ result = NK_VSNPRINTF(buf, (nk_size)buf_size, fmt, args);
+ result = (result >= buf_size) ? -1: result;
+ buf[buf_size-1] = 0;
+#else
+ result = nk_vsnprintf(buf, buf_size, fmt, args);
+#endif
+ return result;
+}
+#endif
+NK_API nk_hash
+nk_murmur_hash(const void * key, int len, nk_hash seed)
+{
+ /* 32-Bit MurmurHash3: https://code.google.com/p/smhasher/wiki/MurmurHash3*/
+ #define NK_ROTL(x,r) ((x) << (r) | ((x) >> (32 - r)))
+
+ nk_uint h1 = seed;
+ nk_uint k1;
+ const nk_byte *data = (const nk_byte*)key;
+ const nk_byte *keyptr = data;
+ nk_byte *k1ptr;
+ const int bsize = sizeof(k1);
+ const int nblocks = len/4;
+
+ const nk_uint c1 = 0xcc9e2d51;
+ const nk_uint c2 = 0x1b873593;
+ const nk_byte *tail;
+ int i;
+
+ /* body */
+ if (!key) return 0;
+ for (i = 0; i < nblocks; ++i, keyptr += bsize) {
+ k1ptr = (nk_byte*)&k1;
+ k1ptr[0] = keyptr[0];
+ k1ptr[1] = keyptr[1];
+ k1ptr[2] = keyptr[2];
+ k1ptr[3] = keyptr[3];
+
+ k1 *= c1;
+ k1 = NK_ROTL(k1,15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = NK_ROTL(h1,13);
+ h1 = h1*5+0xe6546b64;
+ }
+
+ /* tail */
+ tail = (const nk_byte*)(data + nblocks*4);
+ k1 = 0;
+ switch (len & 3) {
+ case 3: k1 ^= (nk_uint)(tail[2] << 16); /* fallthrough */
+ case 2: k1 ^= (nk_uint)(tail[1] << 8u); /* fallthrough */
+ case 1: k1 ^= tail[0];
+ k1 *= c1;
+ k1 = NK_ROTL(k1,15);
+ k1 *= c2;
+ h1 ^= k1;
+ break;
+ default: break;
+ }
+
+ /* finalization */
+ h1 ^= (nk_uint)len;
+ /* fmix32 */
+ h1 ^= h1 >> 16;
+ h1 *= 0x85ebca6b;
+ h1 ^= h1 >> 13;
+ h1 *= 0xc2b2ae35;
+ h1 ^= h1 >> 16;
+
+ #undef NK_ROTL
+ return h1;
+}
+#ifdef NK_INCLUDE_STANDARD_IO
+NK_LIB char*
+nk_file_load(const char* path, nk_size* siz, struct nk_allocator *alloc)
+{
+ char *buf;
+ FILE *fd;
+ long ret;
+
+ NK_ASSERT(path);
+ NK_ASSERT(siz);
+ NK_ASSERT(alloc);
+ if (!path || !siz || !alloc)
+ return 0;
+
+ fd = fopen(path, "rb");
+ if (!fd) return 0;
+ fseek(fd, 0, SEEK_END);
+ ret = ftell(fd);
+ if (ret < 0) {
+ fclose(fd);
+ return 0;
+ }
+ *siz = (nk_size)ret;
+ fseek(fd, 0, SEEK_SET);
+ buf = (char*)alloc->alloc(alloc->userdata,0, *siz);
+ NK_ASSERT(buf);
+ if (!buf) {
+ fclose(fd);
+ return 0;
+ }
+ *siz = (nk_size)fread(buf, 1,*siz, fd);
+ fclose(fd);
+ return buf;
+}
+#endif
+NK_LIB int
+nk_text_clamp(const struct nk_user_font *font, const char *text,
+ int text_len, float space, int *glyphs, float *text_width,
+ nk_rune *sep_list, int sep_count)
+{
+ int i = 0;
+ int glyph_len = 0;
+ float last_width = 0;
+ nk_rune unicode = 0;
+ float width = 0;
+ int len = 0;
+ int g = 0;
+ float s;
+
+ int sep_len = 0;
+ int sep_g = 0;
+ float sep_width = 0;
+ sep_count = NK_MAX(sep_count,0);
+
+ glyph_len = nk_utf_decode(text, &unicode, text_len);
+ while (glyph_len && (width < space) && (len < text_len)) {
+ len += glyph_len;
+ s = font->width(font->userdata, font->height, text, len);
+ for (i = 0; i < sep_count; ++i) {
+ if (unicode != sep_list[i]) continue;
+ sep_width = last_width = width;
+ sep_g = g+1;
+ sep_len = len;
+ break;
+ }
+ if (i == sep_count){
+ last_width = sep_width = width;
+ sep_g = g+1;
+ }
+ width = s;
+ glyph_len = nk_utf_decode(&text[len], &unicode, text_len - len);
+ g++;
+ }
+ if (len >= text_len) {
+ *glyphs = g;
+ *text_width = last_width;
+ return len;
+ } else {
+ *glyphs = sep_g;
+ *text_width = sep_width;
+ return (!sep_len) ? len: sep_len;
+ }
+}
+NK_LIB struct nk_vec2
+nk_text_calculate_text_bounds(const struct nk_user_font *font,
+ const char *begin, int byte_len, float row_height, const char **remaining,
+ struct nk_vec2 *out_offset, int *glyphs, int op)
+{
+ float line_height = row_height;
+ struct nk_vec2 text_size = nk_vec2(0,0);
+ float line_width = 0.0f;
+
+ float glyph_width;
+ int glyph_len = 0;
+ nk_rune unicode = 0;
+ int text_len = 0;
+ if (!begin || byte_len <= 0 || !font)
+ return nk_vec2(0,row_height);
+
+ glyph_len = nk_utf_decode(begin, &unicode, byte_len);
+ if (!glyph_len) return text_size;
+ glyph_width = font->width(font->userdata, font->height, begin, glyph_len);
+
+ *glyphs = 0;
+ while ((text_len < byte_len) && glyph_len) {
+ if (unicode == '\n') {
+ text_size.x = NK_MAX(text_size.x, line_width);
+ text_size.y += line_height;
+ line_width = 0;
+ *glyphs+=1;
+ if (op == NK_STOP_ON_NEW_LINE)
+ break;
+
+ text_len++;
+ glyph_len = nk_utf_decode(begin + text_len, &unicode, byte_len-text_len);
+ continue;
+ }
+
+ if (unicode == '\r') {
+ text_len++;
+ *glyphs+=1;
+ glyph_len = nk_utf_decode(begin + text_len, &unicode, byte_len-text_len);
+ continue;
+ }
+
+ *glyphs = *glyphs + 1;
+ text_len += glyph_len;
+ line_width += (float)glyph_width;
+ glyph_len = nk_utf_decode(begin + text_len, &unicode, byte_len-text_len);
+ glyph_width = font->width(font->userdata, font->height, begin+text_len, glyph_len);
+ continue;
+ }
+
+ if (text_size.x < line_width)
+ text_size.x = line_width;
+ if (out_offset)
+ *out_offset = nk_vec2(line_width, text_size.y + line_height);
+ if (line_width > 0 || text_size.y == 0.0f)
+ text_size.y += line_height;
+ if (remaining)
+ *remaining = begin+text_len;
+ return text_size;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * COLOR
+ *
+ * ===============================================================*/
+NK_INTERN int
+nk_parse_hex(const char *p, int length)
+{
+ int i = 0;
+ int len = 0;
+ while (len < length) {
+ i <<= 4;
+ if (p[len] >= 'a' && p[len] <= 'f')
+ i += ((p[len] - 'a') + 10);
+ else if (p[len] >= 'A' && p[len] <= 'F')
+ i += ((p[len] - 'A') + 10);
+ else i += (p[len] - '0');
+ len++;
+ }
+ return i;
+}
+NK_API struct nk_color
+nk_rgba(int r, int g, int b, int a)
+{
+ struct nk_color ret;
+ ret.r = (nk_byte)NK_CLAMP(0, r, 255);
+ ret.g = (nk_byte)NK_CLAMP(0, g, 255);
+ ret.b = (nk_byte)NK_CLAMP(0, b, 255);
+ ret.a = (nk_byte)NK_CLAMP(0, a, 255);
+ return ret;
+}
+NK_API struct nk_color
+nk_rgb_hex(const char *rgb)
+{
+ struct nk_color col;
+ const char *c = rgb;
+ if (*c == '#') c++;
+ col.r = (nk_byte)nk_parse_hex(c, 2);
+ col.g = (nk_byte)nk_parse_hex(c+2, 2);
+ col.b = (nk_byte)nk_parse_hex(c+4, 2);
+ col.a = 255;
+ return col;
+}
+NK_API struct nk_color
+nk_rgba_hex(const char *rgb)
+{
+ struct nk_color col;
+ const char *c = rgb;
+ if (*c == '#') c++;
+ col.r = (nk_byte)nk_parse_hex(c, 2);
+ col.g = (nk_byte)nk_parse_hex(c+2, 2);
+ col.b = (nk_byte)nk_parse_hex(c+4, 2);
+ col.a = (nk_byte)nk_parse_hex(c+6, 2);
+ return col;
+}
+NK_API void
+nk_color_hex_rgba(char *output, struct nk_color col)
+{
+ #define NK_TO_HEX(i) ((i) <= 9 ? '0' + (i): 'A' - 10 + (i))
+ output[0] = (char)NK_TO_HEX((col.r & 0xF0) >> 4);
+ output[1] = (char)NK_TO_HEX((col.r & 0x0F));
+ output[2] = (char)NK_TO_HEX((col.g & 0xF0) >> 4);
+ output[3] = (char)NK_TO_HEX((col.g & 0x0F));
+ output[4] = (char)NK_TO_HEX((col.b & 0xF0) >> 4);
+ output[5] = (char)NK_TO_HEX((col.b & 0x0F));
+ output[6] = (char)NK_TO_HEX((col.a & 0xF0) >> 4);
+ output[7] = (char)NK_TO_HEX((col.a & 0x0F));
+ output[8] = '\0';
+ #undef NK_TO_HEX
+}
+NK_API void
+nk_color_hex_rgb(char *output, struct nk_color col)
+{
+ #define NK_TO_HEX(i) ((i) <= 9 ? '0' + (i): 'A' - 10 + (i))
+ output[0] = (char)NK_TO_HEX((col.r & 0xF0) >> 4);
+ output[1] = (char)NK_TO_HEX((col.r & 0x0F));
+ output[2] = (char)NK_TO_HEX((col.g & 0xF0) >> 4);
+ output[3] = (char)NK_TO_HEX((col.g & 0x0F));
+ output[4] = (char)NK_TO_HEX((col.b & 0xF0) >> 4);
+ output[5] = (char)NK_TO_HEX((col.b & 0x0F));
+ output[6] = '\0';
+ #undef NK_TO_HEX
+}
+NK_API struct nk_color
+nk_rgba_iv(const int *c)
+{
+ return nk_rgba(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_color
+nk_rgba_bv(const nk_byte *c)
+{
+ return nk_rgba(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_color
+nk_rgb(int r, int g, int b)
+{
+ struct nk_color ret;
+ ret.r = (nk_byte)NK_CLAMP(0, r, 255);
+ ret.g = (nk_byte)NK_CLAMP(0, g, 255);
+ ret.b = (nk_byte)NK_CLAMP(0, b, 255);
+ ret.a = (nk_byte)255;
+ return ret;
+}
+NK_API struct nk_color
+nk_rgb_iv(const int *c)
+{
+ return nk_rgb(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_rgb_bv(const nk_byte* c)
+{
+ return nk_rgb(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_rgba_u32(nk_uint in)
+{
+ struct nk_color ret;
+ ret.r = (in & 0xFF);
+ ret.g = ((in >> 8) & 0xFF);
+ ret.b = ((in >> 16) & 0xFF);
+ ret.a = (nk_byte)((in >> 24) & 0xFF);
+ return ret;
+}
+NK_API struct nk_color
+nk_rgba_f(float r, float g, float b, float a)
+{
+ struct nk_color ret;
+ ret.r = (nk_byte)(NK_SATURATE(r) * 255.0f);
+ ret.g = (nk_byte)(NK_SATURATE(g) * 255.0f);
+ ret.b = (nk_byte)(NK_SATURATE(b) * 255.0f);
+ ret.a = (nk_byte)(NK_SATURATE(a) * 255.0f);
+ return ret;
+}
+NK_API struct nk_color
+nk_rgba_fv(const float *c)
+{
+ return nk_rgba_f(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_color
+nk_rgba_cf(struct nk_colorf c)
+{
+ return nk_rgba_f(c.r, c.g, c.b, c.a);
+}
+NK_API struct nk_color
+nk_rgb_f(float r, float g, float b)
+{
+ struct nk_color ret;
+ ret.r = (nk_byte)(NK_SATURATE(r) * 255.0f);
+ ret.g = (nk_byte)(NK_SATURATE(g) * 255.0f);
+ ret.b = (nk_byte)(NK_SATURATE(b) * 255.0f);
+ ret.a = 255;
+ return ret;
+}
+NK_API struct nk_color
+nk_rgb_fv(const float *c)
+{
+ return nk_rgb_f(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_rgb_cf(struct nk_colorf c)
+{
+ return nk_rgb_f(c.r, c.g, c.b);
+}
+NK_API struct nk_color
+nk_hsv(int h, int s, int v)
+{
+ return nk_hsva(h, s, v, 255);
+}
+NK_API struct nk_color
+nk_hsv_iv(const int *c)
+{
+ return nk_hsv(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_hsv_bv(const nk_byte *c)
+{
+ return nk_hsv(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_hsv_f(float h, float s, float v)
+{
+ return nk_hsva_f(h, s, v, 1.0f);
+}
+NK_API struct nk_color
+nk_hsv_fv(const float *c)
+{
+ return nk_hsv_f(c[0], c[1], c[2]);
+}
+NK_API struct nk_color
+nk_hsva(int h, int s, int v, int a)
+{
+ float hf = ((float)NK_CLAMP(0, h, 255)) / 255.0f;
+ float sf = ((float)NK_CLAMP(0, s, 255)) / 255.0f;
+ float vf = ((float)NK_CLAMP(0, v, 255)) / 255.0f;
+ float af = ((float)NK_CLAMP(0, a, 255)) / 255.0f;
+ return nk_hsva_f(hf, sf, vf, af);
+}
+NK_API struct nk_color
+nk_hsva_iv(const int *c)
+{
+ return nk_hsva(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_color
+nk_hsva_bv(const nk_byte *c)
+{
+ return nk_hsva(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_colorf
+nk_hsva_colorf(float h, float s, float v, float a)
+{
+ int i;
+ float p, q, t, f;
+ struct nk_colorf out = {0,0,0,0};
+ if (s <= 0.0f) {
+ out.r = v; out.g = v; out.b = v; out.a = a;
+ return out;
+ }
+ h = h / (60.0f/360.0f);
+ i = (int)h;
+ f = h - (float)i;
+ p = v * (1.0f - s);
+ q = v * (1.0f - (s * f));
+ t = v * (1.0f - s * (1.0f - f));
+
+ switch (i) {
+ case 0: default: out.r = v; out.g = t; out.b = p; break;
+ case 1: out.r = q; out.g = v; out.b = p; break;
+ case 2: out.r = p; out.g = v; out.b = t; break;
+ case 3: out.r = p; out.g = q; out.b = v; break;
+ case 4: out.r = t; out.g = p; out.b = v; break;
+ case 5: out.r = v; out.g = p; out.b = q; break;}
+ out.a = a;
+ return out;
+}
+NK_API struct nk_colorf
+nk_hsva_colorfv(float *c)
+{
+ return nk_hsva_colorf(c[0], c[1], c[2], c[3]);
+}
+NK_API struct nk_color
+nk_hsva_f(float h, float s, float v, float a)
+{
+ struct nk_colorf c = nk_hsva_colorf(h, s, v, a);
+ return nk_rgba_f(c.r, c.g, c.b, c.a);
+}
+NK_API struct nk_color
+nk_hsva_fv(const float *c)
+{
+ return nk_hsva_f(c[0], c[1], c[2], c[3]);
+}
+NK_API nk_uint
+nk_color_u32(struct nk_color in)
+{
+ nk_uint out = (nk_uint)in.r;
+ out |= ((nk_uint)in.g << 8);
+ out |= ((nk_uint)in.b << 16);
+ out |= ((nk_uint)in.a << 24);
+ return out;
+}
+NK_API void
+nk_color_f(float *r, float *g, float *b, float *a, struct nk_color in)
+{
+ NK_STORAGE const float s = 1.0f/255.0f;
+ *r = (float)in.r * s;
+ *g = (float)in.g * s;
+ *b = (float)in.b * s;
+ *a = (float)in.a * s;
+}
+NK_API void
+nk_color_fv(float *c, struct nk_color in)
+{
+ nk_color_f(&c[0], &c[1], &c[2], &c[3], in);
+}
+NK_API struct nk_colorf
+nk_color_cf(struct nk_color in)
+{
+ struct nk_colorf o;
+ nk_color_f(&o.r, &o.g, &o.b, &o.a, in);
+ return o;
+}
+NK_API void
+nk_color_d(double *r, double *g, double *b, double *a, struct nk_color in)
+{
+ NK_STORAGE const double s = 1.0/255.0;
+ *r = (double)in.r * s;
+ *g = (double)in.g * s;
+ *b = (double)in.b * s;
+ *a = (double)in.a * s;
+}
+NK_API void
+nk_color_dv(double *c, struct nk_color in)
+{
+ nk_color_d(&c[0], &c[1], &c[2], &c[3], in);
+}
+NK_API void
+nk_color_hsv_f(float *out_h, float *out_s, float *out_v, struct nk_color in)
+{
+ float a;
+ nk_color_hsva_f(out_h, out_s, out_v, &a, in);
+}
+NK_API void
+nk_color_hsv_fv(float *out, struct nk_color in)
+{
+ float a;
+ nk_color_hsva_f(&out[0], &out[1], &out[2], &a, in);
+}
+NK_API void
+nk_colorf_hsva_f(float *out_h, float *out_s,
+ float *out_v, float *out_a, struct nk_colorf in)
+{
+ float chroma;
+ float K = 0.0f;
+ if (in.g < in.b) {
+ const float t = in.g; in.g = in.b; in.b = t;
+ K = -1.f;
+ }
+ if (in.r < in.g) {
+ const float t = in.r; in.r = in.g; in.g = t;
+ K = -2.f/6.0f - K;
+ }
+ chroma = in.r - ((in.g < in.b) ? in.g: in.b);
+ *out_h = NK_ABS(K + (in.g - in.b)/(6.0f * chroma + 1e-20f));
+ *out_s = chroma / (in.r + 1e-20f);
+ *out_v = in.r;
+ *out_a = in.a;
+
+}
+NK_API void
+nk_colorf_hsva_fv(float *hsva, struct nk_colorf in)
+{
+ nk_colorf_hsva_f(&hsva[0], &hsva[1], &hsva[2], &hsva[3], in);
+}
+NK_API void
+nk_color_hsva_f(float *out_h, float *out_s,
+ float *out_v, float *out_a, struct nk_color in)
+{
+ struct nk_colorf col;
+ nk_color_f(&col.r,&col.g,&col.b,&col.a, in);
+ nk_colorf_hsva_f(out_h, out_s, out_v, out_a, col);
+}
+NK_API void
+nk_color_hsva_fv(float *out, struct nk_color in)
+{
+ nk_color_hsva_f(&out[0], &out[1], &out[2], &out[3], in);
+}
+NK_API void
+nk_color_hsva_i(int *out_h, int *out_s, int *out_v,
+ int *out_a, struct nk_color in)
+{
+ float h,s,v,a;
+ nk_color_hsva_f(&h, &s, &v, &a, in);
+ *out_h = (nk_byte)(h * 255.0f);
+ *out_s = (nk_byte)(s * 255.0f);
+ *out_v = (nk_byte)(v * 255.0f);
+ *out_a = (nk_byte)(a * 255.0f);
+}
+NK_API void
+nk_color_hsva_iv(int *out, struct nk_color in)
+{
+ nk_color_hsva_i(&out[0], &out[1], &out[2], &out[3], in);
+}
+NK_API void
+nk_color_hsva_bv(nk_byte *out, struct nk_color in)
+{
+ int tmp[4];
+ nk_color_hsva_i(&tmp[0], &tmp[1], &tmp[2], &tmp[3], in);
+ out[0] = (nk_byte)tmp[0];
+ out[1] = (nk_byte)tmp[1];
+ out[2] = (nk_byte)tmp[2];
+ out[3] = (nk_byte)tmp[3];
+}
+NK_API void
+nk_color_hsva_b(nk_byte *h, nk_byte *s, nk_byte *v, nk_byte *a, struct nk_color in)
+{
+ int tmp[4];
+ nk_color_hsva_i(&tmp[0], &tmp[1], &tmp[2], &tmp[3], in);
+ *h = (nk_byte)tmp[0];
+ *s = (nk_byte)tmp[1];
+ *v = (nk_byte)tmp[2];
+ *a = (nk_byte)tmp[3];
+}
+NK_API void
+nk_color_hsv_i(int *out_h, int *out_s, int *out_v, struct nk_color in)
+{
+ int a;
+ nk_color_hsva_i(out_h, out_s, out_v, &a, in);
+}
+NK_API void
+nk_color_hsv_b(nk_byte *out_h, nk_byte *out_s, nk_byte *out_v, struct nk_color in)
+{
+ int tmp[4];
+ nk_color_hsva_i(&tmp[0], &tmp[1], &tmp[2], &tmp[3], in);
+ *out_h = (nk_byte)tmp[0];
+ *out_s = (nk_byte)tmp[1];
+ *out_v = (nk_byte)tmp[2];
+}
+NK_API void
+nk_color_hsv_iv(int *out, struct nk_color in)
+{
+ nk_color_hsv_i(&out[0], &out[1], &out[2], in);
+}
+NK_API void
+nk_color_hsv_bv(nk_byte *out, struct nk_color in)
+{
+ int tmp[4];
+ nk_color_hsv_i(&tmp[0], &tmp[1], &tmp[2], in);
+ out[0] = (nk_byte)tmp[0];
+ out[1] = (nk_byte)tmp[1];
+ out[2] = (nk_byte)tmp[2];
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * UTF-8
+ *
+ * ===============================================================*/
+NK_GLOBAL const nk_byte nk_utfbyte[NK_UTF_SIZE+1] = {0x80, 0, 0xC0, 0xE0, 0xF0};
+NK_GLOBAL const nk_byte nk_utfmask[NK_UTF_SIZE+1] = {0xC0, 0x80, 0xE0, 0xF0, 0xF8};
+NK_GLOBAL const nk_uint nk_utfmin[NK_UTF_SIZE+1] = {0, 0, 0x80, 0x800, 0x10000};
+NK_GLOBAL const nk_uint nk_utfmax[NK_UTF_SIZE+1] = {0x10FFFF, 0x7F, 0x7FF, 0xFFFF, 0x10FFFF};
+
+NK_INTERN int
+nk_utf_validate(nk_rune *u, int i)
+{
+ NK_ASSERT(u);
+ if (!u) return 0;
+ if (!NK_BETWEEN(*u, nk_utfmin[i], nk_utfmax[i]) ||
+ NK_BETWEEN(*u, 0xD800, 0xDFFF))
+ *u = NK_UTF_INVALID;
+ for (i = 1; *u > nk_utfmax[i]; ++i);
+ return i;
+}
+NK_INTERN nk_rune
+nk_utf_decode_byte(char c, int *i)
+{
+ NK_ASSERT(i);
+ if (!i) return 0;
+ for(*i = 0; *i < (int)NK_LEN(nk_utfmask); ++(*i)) {
+ if (((nk_byte)c & nk_utfmask[*i]) == nk_utfbyte[*i])
+ return (nk_byte)(c & ~nk_utfmask[*i]);
+ }
+ return 0;
+}
+NK_API int
+nk_utf_decode(const char *c, nk_rune *u, int clen)
+{
+ int i, j, len, type=0;
+ nk_rune udecoded;
+
+ NK_ASSERT(c);
+ NK_ASSERT(u);
+
+ if (!c || !u) return 0;
+ if (!clen) return 0;
+ *u = NK_UTF_INVALID;
+
+ udecoded = nk_utf_decode_byte(c[0], &len);
+ if (!NK_BETWEEN(len, 1, NK_UTF_SIZE))
+ return 1;
+
+ for (i = 1, j = 1; i < clen && j < len; ++i, ++j) {
+ udecoded = (udecoded << 6) | nk_utf_decode_byte(c[i], &type);
+ if (type != 0)
+ return j;
+ }
+ if (j < len)
+ return 0;
+ *u = udecoded;
+ nk_utf_validate(u, len);
+ return len;
+}
+NK_INTERN char
+nk_utf_encode_byte(nk_rune u, int i)
+{
+ return (char)((nk_utfbyte[i]) | ((nk_byte)u & ~nk_utfmask[i]));
+}
+NK_API int
+nk_utf_encode(nk_rune u, char *c, int clen)
+{
+ int len, i;
+ len = nk_utf_validate(&u, 0);
+ if (clen < len || !len || len > NK_UTF_SIZE)
+ return 0;
+
+ for (i = len - 1; i != 0; --i) {
+ c[i] = nk_utf_encode_byte(u, 0);
+ u >>= 6;
+ }
+ c[0] = nk_utf_encode_byte(u, len);
+ return len;
+}
+NK_API int
+nk_utf_len(const char *str, int len)
+{
+ const char *text;
+ int glyphs = 0;
+ int text_len;
+ int glyph_len;
+ int src_len = 0;
+ nk_rune unicode;
+
+ NK_ASSERT(str);
+ if (!str || !len) return 0;
+
+ text = str;
+ text_len = len;
+ glyph_len = nk_utf_decode(text, &unicode, text_len);
+ while (glyph_len && src_len < len) {
+ glyphs++;
+ src_len = src_len + glyph_len;
+ glyph_len = nk_utf_decode(text + src_len, &unicode, text_len - src_len);
+ }
+ return glyphs;
+}
+NK_API const char*
+nk_utf_at(const char *buffer, int length, int index,
+ nk_rune *unicode, int *len)
+{
+ int i = 0;
+ int src_len = 0;
+ int glyph_len = 0;
+ const char *text;
+ int text_len;
+
+ NK_ASSERT(buffer);
+ NK_ASSERT(unicode);
+ NK_ASSERT(len);
+
+ if (!buffer || !unicode || !len) return 0;
+ if (index < 0) {
+ *unicode = NK_UTF_INVALID;
+ *len = 0;
+ return 0;
+ }
+
+ text = buffer;
+ text_len = length;
+ glyph_len = nk_utf_decode(text, unicode, text_len);
+ while (glyph_len) {
+ if (i == index) {
+ *len = glyph_len;
+ break;
+ }
+
+ i++;
+ src_len = src_len + glyph_len;
+ glyph_len = nk_utf_decode(text + src_len, unicode, text_len - src_len);
+ }
+ if (i != index) return 0;
+ return buffer + src_len;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * BUFFER
+ *
+ * ===============================================================*/
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_LIB void*
+nk_malloc(nk_handle unused, void *old,nk_size size)
+{
+ NK_UNUSED(unused);
+ NK_UNUSED(old);
+ return malloc(size);
+}
+NK_LIB void
+nk_mfree(nk_handle unused, void *ptr)
+{
+ NK_UNUSED(unused);
+ free(ptr);
+}
+NK_API void
+nk_buffer_init_default(struct nk_buffer *buffer)
+{
+ struct nk_allocator alloc;
+ alloc.userdata.ptr = 0;
+ alloc.alloc = nk_malloc;
+ alloc.free = nk_mfree;
+ nk_buffer_init(buffer, &alloc, NK_BUFFER_DEFAULT_INITIAL_SIZE);
+}
+#endif
+
+NK_API void
+nk_buffer_init(struct nk_buffer *b, const struct nk_allocator *a,
+ nk_size initial_size)
+{
+ NK_ASSERT(b);
+ NK_ASSERT(a);
+ NK_ASSERT(initial_size);
+ if (!b || !a || !initial_size) return;
+
+ nk_zero(b, sizeof(*b));
+ b->type = NK_BUFFER_DYNAMIC;
+ b->memory.ptr = a->alloc(a->userdata,0, initial_size);
+ b->memory.size = initial_size;
+ b->size = initial_size;
+ b->grow_factor = 2.0f;
+ b->pool = *a;
+}
+NK_API void
+nk_buffer_init_fixed(struct nk_buffer *b, void *m, nk_size size)
+{
+ NK_ASSERT(b);
+ NK_ASSERT(m);
+ NK_ASSERT(size);
+ if (!b || !m || !size) return;
+
+ nk_zero(b, sizeof(*b));
+ b->type = NK_BUFFER_FIXED;
+ b->memory.ptr = m;
+ b->memory.size = size;
+ b->size = size;
+}
+NK_LIB void*
+nk_buffer_align(void *unaligned,
+ nk_size align, nk_size *alignment,
+ enum nk_buffer_allocation_type type)
+{
+ void *memory = 0;
+ switch (type) {
+ default:
+ case NK_BUFFER_MAX:
+ case NK_BUFFER_FRONT:
+ if (align) {
+ memory = NK_ALIGN_PTR(unaligned, align);
+ *alignment = (nk_size)((nk_byte*)memory - (nk_byte*)unaligned);
+ } else {
+ memory = unaligned;
+ *alignment = 0;
+ }
+ break;
+ case NK_BUFFER_BACK:
+ if (align) {
+ memory = NK_ALIGN_PTR_BACK(unaligned, align);
+ *alignment = (nk_size)((nk_byte*)unaligned - (nk_byte*)memory);
+ } else {
+ memory = unaligned;
+ *alignment = 0;
+ }
+ break;
+ }
+ return memory;
+}
+NK_LIB void*
+nk_buffer_realloc(struct nk_buffer *b, nk_size capacity, nk_size *size)
+{
+ void *temp;
+ nk_size buffer_size;
+
+ NK_ASSERT(b);
+ NK_ASSERT(size);
+ if (!b || !size || !b->pool.alloc || !b->pool.free)
+ return 0;
+
+ buffer_size = b->memory.size;
+ temp = b->pool.alloc(b->pool.userdata, b->memory.ptr, capacity);
+ NK_ASSERT(temp);
+ if (!temp) return 0;
+
+ *size = capacity;
+ if (temp != b->memory.ptr) {
+ NK_MEMCPY(temp, b->memory.ptr, buffer_size);
+ b->pool.free(b->pool.userdata, b->memory.ptr);
+ }
+
+ if (b->size == buffer_size) {
+ /* no back buffer so just set correct size */
+ b->size = capacity;
+ return temp;
+ } else {
+ /* copy back buffer to the end of the new buffer */
+ void *dst, *src;
+ nk_size back_size;
+ back_size = buffer_size - b->size;
+ dst = nk_ptr_add(void, temp, capacity - back_size);
+ src = nk_ptr_add(void, temp, b->size);
+ NK_MEMCPY(dst, src, back_size);
+ b->size = capacity - back_size;
+ }
+ return temp;
+}
+NK_LIB void*
+nk_buffer_alloc(struct nk_buffer *b, enum nk_buffer_allocation_type type,
+ nk_size size, nk_size align)
+{
+ int full;
+ nk_size alignment;
+ void *unaligned;
+ void *memory;
+
+ NK_ASSERT(b);
+ NK_ASSERT(size);
+ if (!b || !size) return 0;
+ b->needed += size;
+
+ /* calculate total size with needed alignment + size */
+ if (type == NK_BUFFER_FRONT)
+ unaligned = nk_ptr_add(void, b->memory.ptr, b->allocated);
+ else unaligned = nk_ptr_add(void, b->memory.ptr, b->size - size);
+ memory = nk_buffer_align(unaligned, align, &alignment, type);
+
+ /* check if buffer has enough memory*/
+ if (type == NK_BUFFER_FRONT)
+ full = ((b->allocated + size + alignment) > b->size);
+ else full = ((b->size - NK_MIN(b->size,(size + alignment))) <= b->allocated);
+
+ if (full) {
+ nk_size capacity;
+ if (b->type != NK_BUFFER_DYNAMIC)
+ return 0;
+ NK_ASSERT(b->pool.alloc && b->pool.free);
+ if (b->type != NK_BUFFER_DYNAMIC || !b->pool.alloc || !b->pool.free)
+ return 0;
+
+ /* buffer is full so allocate bigger buffer if dynamic */
+ capacity = (nk_size)((float)b->memory.size * b->grow_factor);
+ capacity = NK_MAX(capacity, nk_round_up_pow2((nk_uint)(b->allocated + size)));
+ b->memory.ptr = nk_buffer_realloc(b, capacity, &b->memory.size);
+ if (!b->memory.ptr) return 0;
+
+ /* align newly allocated pointer */
+ if (type == NK_BUFFER_FRONT)
+ unaligned = nk_ptr_add(void, b->memory.ptr, b->allocated);
+ else unaligned = nk_ptr_add(void, b->memory.ptr, b->size - size);
+ memory = nk_buffer_align(unaligned, align, &alignment, type);
+ }
+ if (type == NK_BUFFER_FRONT)
+ b->allocated += size + alignment;
+ else b->size -= (size + alignment);
+ b->needed += alignment;
+ b->calls++;
+ return memory;
+}
+NK_API void
+nk_buffer_push(struct nk_buffer *b, enum nk_buffer_allocation_type type,
+ const void *memory, nk_size size, nk_size align)
+{
+ void *mem = nk_buffer_alloc(b, type, size, align);
+ if (!mem) return;
+ NK_MEMCPY(mem, memory, size);
+}
+NK_API void
+nk_buffer_mark(struct nk_buffer *buffer, enum nk_buffer_allocation_type type)
+{
+ NK_ASSERT(buffer);
+ if (!buffer) return;
+ buffer->marker[type].active = nk_true;
+ if (type == NK_BUFFER_BACK)
+ buffer->marker[type].offset = buffer->size;
+ else buffer->marker[type].offset = buffer->allocated;
+}
+NK_API void
+nk_buffer_reset(struct nk_buffer *buffer, enum nk_buffer_allocation_type type)
+{
+ NK_ASSERT(buffer);
+ if (!buffer) return;
+ if (type == NK_BUFFER_BACK) {
+ /* reset back buffer either back to marker or empty */
+ buffer->needed -= (buffer->memory.size - buffer->marker[type].offset);
+ if (buffer->marker[type].active)
+ buffer->size = buffer->marker[type].offset;
+ else buffer->size = buffer->memory.size;
+ buffer->marker[type].active = nk_false;
+ } else {
+ /* reset front buffer either back to back marker or empty */
+ buffer->needed -= (buffer->allocated - buffer->marker[type].offset);
+ if (buffer->marker[type].active)
+ buffer->allocated = buffer->marker[type].offset;
+ else buffer->allocated = 0;
+ buffer->marker[type].active = nk_false;
+ }
+}
+NK_API void
+nk_buffer_clear(struct nk_buffer *b)
+{
+ NK_ASSERT(b);
+ if (!b) return;
+ b->allocated = 0;
+ b->size = b->memory.size;
+ b->calls = 0;
+ b->needed = 0;
+}
+NK_API void
+nk_buffer_free(struct nk_buffer *b)
+{
+ NK_ASSERT(b);
+ if (!b || !b->memory.ptr) return;
+ if (b->type == NK_BUFFER_FIXED) return;
+ if (!b->pool.free) return;
+ NK_ASSERT(b->pool.free);
+ b->pool.free(b->pool.userdata, b->memory.ptr);
+}
+NK_API void
+nk_buffer_info(struct nk_memory_status *s, struct nk_buffer *b)
+{
+ NK_ASSERT(b);
+ NK_ASSERT(s);
+ if (!s || !b) return;
+ s->allocated = b->allocated;
+ s->size = b->memory.size;
+ s->needed = b->needed;
+ s->memory = b->memory.ptr;
+ s->calls = b->calls;
+}
+NK_API void*
+nk_buffer_memory(struct nk_buffer *buffer)
+{
+ NK_ASSERT(buffer);
+ if (!buffer) return 0;
+ return buffer->memory.ptr;
+}
+NK_API const void*
+nk_buffer_memory_const(const struct nk_buffer *buffer)
+{
+ NK_ASSERT(buffer);
+ if (!buffer) return 0;
+ return buffer->memory.ptr;
+}
+NK_API nk_size
+nk_buffer_total(struct nk_buffer *buffer)
+{
+ NK_ASSERT(buffer);
+ if (!buffer) return 0;
+ return buffer->memory.size;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * STRING
+ *
+ * ===============================================================*/
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void
+nk_str_init_default(struct nk_str *str)
+{
+ struct nk_allocator alloc;
+ alloc.userdata.ptr = 0;
+ alloc.alloc = nk_malloc;
+ alloc.free = nk_mfree;
+ nk_buffer_init(&str->buffer, &alloc, 32);
+ str->len = 0;
+}
+#endif
+
+NK_API void
+nk_str_init(struct nk_str *str, const struct nk_allocator *alloc, nk_size size)
+{
+ nk_buffer_init(&str->buffer, alloc, size);
+ str->len = 0;
+}
+NK_API void
+nk_str_init_fixed(struct nk_str *str, void *memory, nk_size size)
+{
+ nk_buffer_init_fixed(&str->buffer, memory, size);
+ str->len = 0;
+}
+NK_API int
+nk_str_append_text_char(struct nk_str *s, const char *str, int len)
+{
+ char *mem;
+ NK_ASSERT(s);
+ NK_ASSERT(str);
+ if (!s || !str || !len) return 0;
+ mem = (char*)nk_buffer_alloc(&s->buffer, NK_BUFFER_FRONT, (nk_size)len * sizeof(char), 0);
+ if (!mem) return 0;
+ NK_MEMCPY(mem, str, (nk_size)len * sizeof(char));
+ s->len += nk_utf_len(str, len);
+ return len;
+}
+NK_API int
+nk_str_append_str_char(struct nk_str *s, const char *str)
+{
+ return nk_str_append_text_char(s, str, nk_strlen(str));
+}
+NK_API int
+nk_str_append_text_utf8(struct nk_str *str, const char *text, int len)
+{
+ int i = 0;
+ int byte_len = 0;
+ nk_rune unicode;
+ if (!str || !text || !len) return 0;
+ for (i = 0; i < len; ++i)
+ byte_len += nk_utf_decode(text+byte_len, &unicode, 4);
+ nk_str_append_text_char(str, text, byte_len);
+ return len;
+}
+NK_API int
+nk_str_append_str_utf8(struct nk_str *str, const char *text)
+{
+ int runes = 0;
+ int byte_len = 0;
+ int num_runes = 0;
+ int glyph_len = 0;
+ nk_rune unicode;
+ if (!str || !text) return 0;
+
+ glyph_len = byte_len = nk_utf_decode(text+byte_len, &unicode, 4);
+ while (unicode != '\0' && glyph_len) {
+ glyph_len = nk_utf_decode(text+byte_len, &unicode, 4);
+ byte_len += glyph_len;
+ num_runes++;
+ }
+ nk_str_append_text_char(str, text, byte_len);
+ return runes;
+}
+NK_API int
+nk_str_append_text_runes(struct nk_str *str, const nk_rune *text, int len)
+{
+ int i = 0;
+ int byte_len = 0;
+ nk_glyph glyph;
+
+ NK_ASSERT(str);
+ if (!str || !text || !len) return 0;
+ for (i = 0; i < len; ++i) {
+ byte_len = nk_utf_encode(text[i], glyph, NK_UTF_SIZE);
+ if (!byte_len) break;
+ nk_str_append_text_char(str, glyph, byte_len);
+ }
+ return len;
+}
+NK_API int
+nk_str_append_str_runes(struct nk_str *str, const nk_rune *runes)
+{
+ int i = 0;
+ nk_glyph glyph;
+ int byte_len;
+ NK_ASSERT(str);
+ if (!str || !runes) return 0;
+ while (runes[i] != '\0') {
+ byte_len = nk_utf_encode(runes[i], glyph, NK_UTF_SIZE);
+ nk_str_append_text_char(str, glyph, byte_len);
+ i++;
+ }
+ return i;
+}
+NK_API int
+nk_str_insert_at_char(struct nk_str *s, int pos, const char *str, int len)
+{
+ int i;
+ void *mem;
+ char *src;
+ char *dst;
+
+ int copylen;
+ NK_ASSERT(s);
+ NK_ASSERT(str);
+ NK_ASSERT(len >= 0);
+ if (!s || !str || !len || (nk_size)pos > s->buffer.allocated) return 0;
+ if ((s->buffer.allocated + (nk_size)len >= s->buffer.memory.size) &&
+ (s->buffer.type == NK_BUFFER_FIXED)) return 0;
+
+ copylen = (int)s->buffer.allocated - pos;
+ if (!copylen) {
+ nk_str_append_text_char(s, str, len);
+ return 1;
+ }
+ mem = nk_buffer_alloc(&s->buffer, NK_BUFFER_FRONT, (nk_size)len * sizeof(char), 0);
+ if (!mem) return 0;
+
+ /* memmove */
+ NK_ASSERT(((int)pos + (int)len + ((int)copylen - 1)) >= 0);
+ NK_ASSERT(((int)pos + ((int)copylen - 1)) >= 0);
+ dst = nk_ptr_add(char, s->buffer.memory.ptr, pos + len + (copylen - 1));
+ src = nk_ptr_add(char, s->buffer.memory.ptr, pos + (copylen-1));
+ for (i = 0; i < copylen; ++i) *dst-- = *src--;
+ mem = nk_ptr_add(void, s->buffer.memory.ptr, pos);
+ NK_MEMCPY(mem, str, (nk_size)len * sizeof(char));
+ s->len = nk_utf_len((char *)s->buffer.memory.ptr, (int)s->buffer.allocated);
+ return 1;
+}
+NK_API int
+nk_str_insert_at_rune(struct nk_str *str, int pos, const char *cstr, int len)
+{
+ int glyph_len;
+ nk_rune unicode;
+ const char *begin;
+ const char *buffer;
+
+ NK_ASSERT(str);
+ NK_ASSERT(cstr);
+ NK_ASSERT(len);
+ if (!str || !cstr || !len) return 0;
+ begin = nk_str_at_rune(str, pos, &unicode, &glyph_len);
+ if (!str->len)
+ return nk_str_append_text_char(str, cstr, len);
+ buffer = nk_str_get_const(str);
+ if (!begin) return 0;
+ return nk_str_insert_at_char(str, (int)(begin - buffer), cstr, len);
+}
+NK_API int
+nk_str_insert_text_char(struct nk_str *str, int pos, const char *text, int len)
+{
+ return nk_str_insert_text_utf8(str, pos, text, len);
+}
+NK_API int
+nk_str_insert_str_char(struct nk_str *str, int pos, const char *text)
+{
+ return nk_str_insert_text_utf8(str, pos, text, nk_strlen(text));
+}
+NK_API int
+nk_str_insert_text_utf8(struct nk_str *str, int pos, const char *text, int len)
+{
+ int i = 0;
+ int byte_len = 0;
+ nk_rune unicode;
+
+ NK_ASSERT(str);
+ NK_ASSERT(text);
+ if (!str || !text || !len) return 0;
+ for (i = 0; i < len; ++i)
+ byte_len += nk_utf_decode(text+byte_len, &unicode, 4);
+ nk_str_insert_at_rune(str, pos, text, byte_len);
+ return len;
+}
+NK_API int
+nk_str_insert_str_utf8(struct nk_str *str, int pos, const char *text)
+{
+ int runes = 0;
+ int byte_len = 0;
+ int num_runes = 0;
+ int glyph_len = 0;
+ nk_rune unicode;
+ if (!str || !text) return 0;
+
+ glyph_len = byte_len = nk_utf_decode(text+byte_len, &unicode, 4);
+ while (unicode != '\0' && glyph_len) {
+ glyph_len = nk_utf_decode(text+byte_len, &unicode, 4);
+ byte_len += glyph_len;
+ num_runes++;
+ }
+ nk_str_insert_at_rune(str, pos, text, byte_len);
+ return runes;
+}
+NK_API int
+nk_str_insert_text_runes(struct nk_str *str, int pos, const nk_rune *runes, int len)
+{
+ int i = 0;
+ int byte_len = 0;
+ nk_glyph glyph;
+
+ NK_ASSERT(str);
+ if (!str || !runes || !len) return 0;
+ for (i = 0; i < len; ++i) {
+ byte_len = nk_utf_encode(runes[i], glyph, NK_UTF_SIZE);
+ if (!byte_len) break;
+ nk_str_insert_at_rune(str, pos+i, glyph, byte_len);
+ }
+ return len;
+}
+NK_API int
+nk_str_insert_str_runes(struct nk_str *str, int pos, const nk_rune *runes)
+{
+ int i = 0;
+ nk_glyph glyph;
+ int byte_len;
+ NK_ASSERT(str);
+ if (!str || !runes) return 0;
+ while (runes[i] != '\0') {
+ byte_len = nk_utf_encode(runes[i], glyph, NK_UTF_SIZE);
+ nk_str_insert_at_rune(str, pos+i, glyph, byte_len);
+ i++;
+ }
+ return i;
+}
+NK_API void
+nk_str_remove_chars(struct nk_str *s, int len)
+{
+ NK_ASSERT(s);
+ NK_ASSERT(len >= 0);
+ if (!s || len < 0 || (nk_size)len > s->buffer.allocated) return;
+ NK_ASSERT(((int)s->buffer.allocated - (int)len) >= 0);
+ s->buffer.allocated -= (nk_size)len;
+ s->len = nk_utf_len((char *)s->buffer.memory.ptr, (int)s->buffer.allocated);
+}
+NK_API void
+nk_str_remove_runes(struct nk_str *str, int len)
+{
+ int index;
+ const char *begin;
+ const char *end;
+ nk_rune unicode;
+
+ NK_ASSERT(str);
+ NK_ASSERT(len >= 0);
+ if (!str || len < 0) return;
+ if (len >= str->len) {
+ str->len = 0;
+ return;
+ }
+
+ index = str->len - len;
+ begin = nk_str_at_rune(str, index, &unicode, &len);
+ end = (const char*)str->buffer.memory.ptr + str->buffer.allocated;
+ nk_str_remove_chars(str, (int)(end-begin)+1);
+}
+NK_API void
+nk_str_delete_chars(struct nk_str *s, int pos, int len)
+{
+ NK_ASSERT(s);
+ if (!s || !len || (nk_size)pos > s->buffer.allocated ||
+ (nk_size)(pos + len) > s->buffer.allocated) return;
+
+ if ((nk_size)(pos + len) < s->buffer.allocated) {
+ /* memmove */
+ char *dst = nk_ptr_add(char, s->buffer.memory.ptr, pos);
+ char *src = nk_ptr_add(char, s->buffer.memory.ptr, pos + len);
+ NK_MEMCPY(dst, src, s->buffer.allocated - (nk_size)(pos + len));
+ NK_ASSERT(((int)s->buffer.allocated - (int)len) >= 0);
+ s->buffer.allocated -= (nk_size)len;
+ } else nk_str_remove_chars(s, len);
+ s->len = nk_utf_len((char *)s->buffer.memory.ptr, (int)s->buffer.allocated);
+}
+NK_API void
+nk_str_delete_runes(struct nk_str *s, int pos, int len)
+{
+ char *temp;
+ nk_rune unicode;
+ char *begin;
+ char *end;
+ int unused;
+
+ NK_ASSERT(s);
+ NK_ASSERT(s->len >= pos + len);
+ if (s->len < pos + len)
+ len = NK_CLAMP(0, (s->len - pos), s->len);
+ if (!len) return;
+
+ temp = (char *)s->buffer.memory.ptr;
+ begin = nk_str_at_rune(s, pos, &unicode, &unused);
+ if (!begin) return;
+ s->buffer.memory.ptr = begin;
+ end = nk_str_at_rune(s, len, &unicode, &unused);
+ s->buffer.memory.ptr = temp;
+ if (!end) return;
+ nk_str_delete_chars(s, (int)(begin - temp), (int)(end - begin));
+}
+NK_API char*
+nk_str_at_char(struct nk_str *s, int pos)
+{
+ NK_ASSERT(s);
+ if (!s || pos > (int)s->buffer.allocated) return 0;
+ return nk_ptr_add(char, s->buffer.memory.ptr, pos);
+}
+NK_API char*
+nk_str_at_rune(struct nk_str *str, int pos, nk_rune *unicode, int *len)
+{
+ int i = 0;
+ int src_len = 0;
+ int glyph_len = 0;
+ char *text;
+ int text_len;
+
+ NK_ASSERT(str);
+ NK_ASSERT(unicode);
+ NK_ASSERT(len);
+
+ if (!str || !unicode || !len) return 0;
+ if (pos < 0) {
+ *unicode = 0;
+ *len = 0;
+ return 0;
+ }
+
+ text = (char*)str->buffer.memory.ptr;
+ text_len = (int)str->buffer.allocated;
+ glyph_len = nk_utf_decode(text, unicode, text_len);
+ while (glyph_len) {
+ if (i == pos) {
+ *len = glyph_len;
+ break;
+ }
+
+ i++;
+ src_len = src_len + glyph_len;
+ glyph_len = nk_utf_decode(text + src_len, unicode, text_len - src_len);
+ }
+ if (i != pos) return 0;
+ return text + src_len;
+}
+NK_API const char*
+nk_str_at_char_const(const struct nk_str *s, int pos)
+{
+ NK_ASSERT(s);
+ if (!s || pos > (int)s->buffer.allocated) return 0;
+ return nk_ptr_add(char, s->buffer.memory.ptr, pos);
+}
+NK_API const char*
+nk_str_at_const(const struct nk_str *str, int pos, nk_rune *unicode, int *len)
+{
+ int i = 0;
+ int src_len = 0;
+ int glyph_len = 0;
+ char *text;
+ int text_len;
+
+ NK_ASSERT(str);
+ NK_ASSERT(unicode);
+ NK_ASSERT(len);
+
+ if (!str || !unicode || !len) return 0;
+ if (pos < 0) {
+ *unicode = 0;
+ *len = 0;
+ return 0;
+ }
+
+ text = (char*)str->buffer.memory.ptr;
+ text_len = (int)str->buffer.allocated;
+ glyph_len = nk_utf_decode(text, unicode, text_len);
+ while (glyph_len) {
+ if (i == pos) {
+ *len = glyph_len;
+ break;
+ }
+
+ i++;
+ src_len = src_len + glyph_len;
+ glyph_len = nk_utf_decode(text + src_len, unicode, text_len - src_len);
+ }
+ if (i != pos) return 0;
+ return text + src_len;
+}
+NK_API nk_rune
+nk_str_rune_at(const struct nk_str *str, int pos)
+{
+ int len;
+ nk_rune unicode = 0;
+ nk_str_at_const(str, pos, &unicode, &len);
+ return unicode;
+}
+NK_API char*
+nk_str_get(struct nk_str *s)
+{
+ NK_ASSERT(s);
+ if (!s || !s->len || !s->buffer.allocated) return 0;
+ return (char*)s->buffer.memory.ptr;
+}
+NK_API const char*
+nk_str_get_const(const struct nk_str *s)
+{
+ NK_ASSERT(s);
+ if (!s || !s->len || !s->buffer.allocated) return 0;
+ return (const char*)s->buffer.memory.ptr;
+}
+NK_API int
+nk_str_len(struct nk_str *s)
+{
+ NK_ASSERT(s);
+ if (!s || !s->len || !s->buffer.allocated) return 0;
+ return s->len;
+}
+NK_API int
+nk_str_len_char(struct nk_str *s)
+{
+ NK_ASSERT(s);
+ if (!s || !s->len || !s->buffer.allocated) return 0;
+ return (int)s->buffer.allocated;
+}
+NK_API void
+nk_str_clear(struct nk_str *str)
+{
+ NK_ASSERT(str);
+ nk_buffer_clear(&str->buffer);
+ str->len = 0;
+}
+NK_API void
+nk_str_free(struct nk_str *str)
+{
+ NK_ASSERT(str);
+ nk_buffer_free(&str->buffer);
+ str->len = 0;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * DRAW
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_command_buffer_init(struct nk_command_buffer *cb,
+ struct nk_buffer *b, enum nk_command_clipping clip)
+{
+ NK_ASSERT(cb);
+ NK_ASSERT(b);
+ if (!cb || !b) return;
+ cb->base = b;
+ cb->use_clipping = (int)clip;
+ cb->begin = b->allocated;
+ cb->end = b->allocated;
+ cb->last = b->allocated;
+}
+NK_LIB void
+nk_command_buffer_reset(struct nk_command_buffer *b)
+{
+ NK_ASSERT(b);
+ if (!b) return;
+ b->begin = 0;
+ b->end = 0;
+ b->last = 0;
+ b->clip = nk_null_rect;
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ b->userdata.ptr = 0;
+#endif
+}
+NK_LIB void*
+nk_command_buffer_push(struct nk_command_buffer* b,
+ enum nk_command_type t, nk_size size)
+{
+ NK_STORAGE const nk_size align = NK_ALIGNOF(struct nk_command);
+ struct nk_command *cmd;
+ nk_size alignment;
+ void *unaligned;
+ void *memory;
+
+ NK_ASSERT(b);
+ NK_ASSERT(b->base);
+ if (!b) return 0;
+ cmd = (struct nk_command*)nk_buffer_alloc(b->base,NK_BUFFER_FRONT,size,align);
+ if (!cmd) return 0;
+
+ /* make sure the offset to the next command is aligned */
+ b->last = (nk_size)((nk_byte*)cmd - (nk_byte*)b->base->memory.ptr);
+ unaligned = (nk_byte*)cmd + size;
+ memory = NK_ALIGN_PTR(unaligned, align);
+ alignment = (nk_size)((nk_byte*)memory - (nk_byte*)unaligned);
+#ifdef NK_ZERO_COMMAND_MEMORY
+ NK_MEMSET(cmd, 0, size + alignment);
+#endif
+
+ cmd->type = t;
+ cmd->next = b->base->allocated + alignment;
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ cmd->userdata = b->userdata;
+#endif
+ b->end = cmd->next;
+ return cmd;
+}
+NK_API void
+nk_push_scissor(struct nk_command_buffer *b, struct nk_rect r)
+{
+ struct nk_command_scissor *cmd;
+ NK_ASSERT(b);
+ if (!b) return;
+
+ b->clip.x = r.x;
+ b->clip.y = r.y;
+ b->clip.w = r.w;
+ b->clip.h = r.h;
+ cmd = (struct nk_command_scissor*)
+ nk_command_buffer_push(b, NK_COMMAND_SCISSOR, sizeof(*cmd));
+
+ if (!cmd) return;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)NK_MAX(0, r.w);
+ cmd->h = (unsigned short)NK_MAX(0, r.h);
+}
+NK_API void
+nk_stroke_line(struct nk_command_buffer *b, float x0, float y0,
+ float x1, float y1, float line_thickness, struct nk_color c)
+{
+ struct nk_command_line *cmd;
+ NK_ASSERT(b);
+ if (!b || line_thickness <= 0) return;
+ cmd = (struct nk_command_line*)
+ nk_command_buffer_push(b, NK_COMMAND_LINE, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->begin.x = (short)x0;
+ cmd->begin.y = (short)y0;
+ cmd->end.x = (short)x1;
+ cmd->end.y = (short)y1;
+ cmd->color = c;
+}
+NK_API void
+nk_stroke_curve(struct nk_command_buffer *b, float ax, float ay,
+ float ctrl0x, float ctrl0y, float ctrl1x, float ctrl1y,
+ float bx, float by, float line_thickness, struct nk_color col)
+{
+ struct nk_command_curve *cmd;
+ NK_ASSERT(b);
+ if (!b || col.a == 0 || line_thickness <= 0) return;
+
+ cmd = (struct nk_command_curve*)
+ nk_command_buffer_push(b, NK_COMMAND_CURVE, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->begin.x = (short)ax;
+ cmd->begin.y = (short)ay;
+ cmd->ctrl[0].x = (short)ctrl0x;
+ cmd->ctrl[0].y = (short)ctrl0y;
+ cmd->ctrl[1].x = (short)ctrl1x;
+ cmd->ctrl[1].y = (short)ctrl1y;
+ cmd->end.x = (short)bx;
+ cmd->end.y = (short)by;
+ cmd->color = col;
+}
+NK_API void
+nk_stroke_rect(struct nk_command_buffer *b, struct nk_rect rect,
+ float rounding, float line_thickness, struct nk_color c)
+{
+ struct nk_command_rect *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0 || rect.w == 0 || rect.h == 0 || line_thickness <= 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INTERSECT(rect.x, rect.y, rect.w, rect.h,
+ clip->x, clip->y, clip->w, clip->h)) return;
+ }
+ cmd = (struct nk_command_rect*)
+ nk_command_buffer_push(b, NK_COMMAND_RECT, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->rounding = (unsigned short)rounding;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->x = (short)rect.x;
+ cmd->y = (short)rect.y;
+ cmd->w = (unsigned short)NK_MAX(0, rect.w);
+ cmd->h = (unsigned short)NK_MAX(0, rect.h);
+ cmd->color = c;
+}
+NK_API void
+nk_fill_rect(struct nk_command_buffer *b, struct nk_rect rect,
+ float rounding, struct nk_color c)
+{
+ struct nk_command_rect_filled *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0 || rect.w == 0 || rect.h == 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INTERSECT(rect.x, rect.y, rect.w, rect.h,
+ clip->x, clip->y, clip->w, clip->h)) return;
+ }
+
+ cmd = (struct nk_command_rect_filled*)
+ nk_command_buffer_push(b, NK_COMMAND_RECT_FILLED, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->rounding = (unsigned short)rounding;
+ cmd->x = (short)rect.x;
+ cmd->y = (short)rect.y;
+ cmd->w = (unsigned short)NK_MAX(0, rect.w);
+ cmd->h = (unsigned short)NK_MAX(0, rect.h);
+ cmd->color = c;
+}
+NK_API void
+nk_fill_rect_multi_color(struct nk_command_buffer *b, struct nk_rect rect,
+ struct nk_color left, struct nk_color top, struct nk_color right,
+ struct nk_color bottom)
+{
+ struct nk_command_rect_multi_color *cmd;
+ NK_ASSERT(b);
+ if (!b || rect.w == 0 || rect.h == 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INTERSECT(rect.x, rect.y, rect.w, rect.h,
+ clip->x, clip->y, clip->w, clip->h)) return;
+ }
+
+ cmd = (struct nk_command_rect_multi_color*)
+ nk_command_buffer_push(b, NK_COMMAND_RECT_MULTI_COLOR, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->x = (short)rect.x;
+ cmd->y = (short)rect.y;
+ cmd->w = (unsigned short)NK_MAX(0, rect.w);
+ cmd->h = (unsigned short)NK_MAX(0, rect.h);
+ cmd->left = left;
+ cmd->top = top;
+ cmd->right = right;
+ cmd->bottom = bottom;
+}
+NK_API void
+nk_stroke_circle(struct nk_command_buffer *b, struct nk_rect r,
+ float line_thickness, struct nk_color c)
+{
+ struct nk_command_circle *cmd;
+ if (!b || r.w == 0 || r.h == 0 || line_thickness <= 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INTERSECT(r.x, r.y, r.w, r.h, clip->x, clip->y, clip->w, clip->h))
+ return;
+ }
+
+ cmd = (struct nk_command_circle*)
+ nk_command_buffer_push(b, NK_COMMAND_CIRCLE, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)NK_MAX(r.w, 0);
+ cmd->h = (unsigned short)NK_MAX(r.h, 0);
+ cmd->color = c;
+}
+NK_API void
+nk_fill_circle(struct nk_command_buffer *b, struct nk_rect r, struct nk_color c)
+{
+ struct nk_command_circle_filled *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0 || r.w == 0 || r.h == 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INTERSECT(r.x, r.y, r.w, r.h, clip->x, clip->y, clip->w, clip->h))
+ return;
+ }
+
+ cmd = (struct nk_command_circle_filled*)
+ nk_command_buffer_push(b, NK_COMMAND_CIRCLE_FILLED, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)NK_MAX(r.w, 0);
+ cmd->h = (unsigned short)NK_MAX(r.h, 0);
+ cmd->color = c;
+}
+NK_API void
+nk_stroke_arc(struct nk_command_buffer *b, float cx, float cy, float radius,
+ float a_min, float a_max, float line_thickness, struct nk_color c)
+{
+ struct nk_command_arc *cmd;
+ if (!b || c.a == 0 || line_thickness <= 0) return;
+ cmd = (struct nk_command_arc*)
+ nk_command_buffer_push(b, NK_COMMAND_ARC, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->cx = (short)cx;
+ cmd->cy = (short)cy;
+ cmd->r = (unsigned short)radius;
+ cmd->a[0] = a_min;
+ cmd->a[1] = a_max;
+ cmd->color = c;
+}
+NK_API void
+nk_fill_arc(struct nk_command_buffer *b, float cx, float cy, float radius,
+ float a_min, float a_max, struct nk_color c)
+{
+ struct nk_command_arc_filled *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0) return;
+ cmd = (struct nk_command_arc_filled*)
+ nk_command_buffer_push(b, NK_COMMAND_ARC_FILLED, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->cx = (short)cx;
+ cmd->cy = (short)cy;
+ cmd->r = (unsigned short)radius;
+ cmd->a[0] = a_min;
+ cmd->a[1] = a_max;
+ cmd->color = c;
+}
+NK_API void
+nk_stroke_triangle(struct nk_command_buffer *b, float x0, float y0, float x1,
+ float y1, float x2, float y2, float line_thickness, struct nk_color c)
+{
+ struct nk_command_triangle *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0 || line_thickness <= 0) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INBOX(x0, y0, clip->x, clip->y, clip->w, clip->h) &&
+ !NK_INBOX(x1, y1, clip->x, clip->y, clip->w, clip->h) &&
+ !NK_INBOX(x2, y2, clip->x, clip->y, clip->w, clip->h))
+ return;
+ }
+
+ cmd = (struct nk_command_triangle*)
+ nk_command_buffer_push(b, NK_COMMAND_TRIANGLE, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->a.x = (short)x0;
+ cmd->a.y = (short)y0;
+ cmd->b.x = (short)x1;
+ cmd->b.y = (short)y1;
+ cmd->c.x = (short)x2;
+ cmd->c.y = (short)y2;
+ cmd->color = c;
+}
+NK_API void
+nk_fill_triangle(struct nk_command_buffer *b, float x0, float y0, float x1,
+ float y1, float x2, float y2, struct nk_color c)
+{
+ struct nk_command_triangle_filled *cmd;
+ NK_ASSERT(b);
+ if (!b || c.a == 0) return;
+ if (!b) return;
+ if (b->use_clipping) {
+ const struct nk_rect *clip = &b->clip;
+ if (!NK_INBOX(x0, y0, clip->x, clip->y, clip->w, clip->h) &&
+ !NK_INBOX(x1, y1, clip->x, clip->y, clip->w, clip->h) &&
+ !NK_INBOX(x2, y2, clip->x, clip->y, clip->w, clip->h))
+ return;
+ }
+
+ cmd = (struct nk_command_triangle_filled*)
+ nk_command_buffer_push(b, NK_COMMAND_TRIANGLE_FILLED, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->a.x = (short)x0;
+ cmd->a.y = (short)y0;
+ cmd->b.x = (short)x1;
+ cmd->b.y = (short)y1;
+ cmd->c.x = (short)x2;
+ cmd->c.y = (short)y2;
+ cmd->color = c;
+}
+NK_API void
+nk_stroke_polygon(struct nk_command_buffer *b, float *points, int point_count,
+ float line_thickness, struct nk_color col)
+{
+ int i;
+ nk_size size = 0;
+ struct nk_command_polygon *cmd;
+
+ NK_ASSERT(b);
+ if (!b || col.a == 0 || line_thickness <= 0) return;
+ size = sizeof(*cmd) + sizeof(short) * 2 * (nk_size)point_count;
+ cmd = (struct nk_command_polygon*) nk_command_buffer_push(b, NK_COMMAND_POLYGON, size);
+ if (!cmd) return;
+ cmd->color = col;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ cmd->point_count = (unsigned short)point_count;
+ for (i = 0; i < point_count; ++i) {
+ cmd->points[i].x = (short)points[i*2];
+ cmd->points[i].y = (short)points[i*2+1];
+ }
+}
+NK_API void
+nk_fill_polygon(struct nk_command_buffer *b, float *points, int point_count,
+ struct nk_color col)
+{
+ int i;
+ nk_size size = 0;
+ struct nk_command_polygon_filled *cmd;
+
+ NK_ASSERT(b);
+ if (!b || col.a == 0) return;
+ size = sizeof(*cmd) + sizeof(short) * 2 * (nk_size)point_count;
+ cmd = (struct nk_command_polygon_filled*)
+ nk_command_buffer_push(b, NK_COMMAND_POLYGON_FILLED, size);
+ if (!cmd) return;
+ cmd->color = col;
+ cmd->point_count = (unsigned short)point_count;
+ for (i = 0; i < point_count; ++i) {
+ cmd->points[i].x = (short)points[i*2+0];
+ cmd->points[i].y = (short)points[i*2+1];
+ }
+}
+NK_API void
+nk_stroke_polyline(struct nk_command_buffer *b, float *points, int point_count,
+ float line_thickness, struct nk_color col)
+{
+ int i;
+ nk_size size = 0;
+ struct nk_command_polyline *cmd;
+
+ NK_ASSERT(b);
+ if (!b || col.a == 0 || line_thickness <= 0) return;
+ size = sizeof(*cmd) + sizeof(short) * 2 * (nk_size)point_count;
+ cmd = (struct nk_command_polyline*) nk_command_buffer_push(b, NK_COMMAND_POLYLINE, size);
+ if (!cmd) return;
+ cmd->color = col;
+ cmd->point_count = (unsigned short)point_count;
+ cmd->line_thickness = (unsigned short)line_thickness;
+ for (i = 0; i < point_count; ++i) {
+ cmd->points[i].x = (short)points[i*2];
+ cmd->points[i].y = (short)points[i*2+1];
+ }
+}
+NK_API void
+nk_draw_image(struct nk_command_buffer *b, struct nk_rect r,
+ const struct nk_image *img, struct nk_color col)
+{
+ struct nk_command_image *cmd;
+ NK_ASSERT(b);
+ if (!b) return;
+ if (b->use_clipping) {
+ const struct nk_rect *c = &b->clip;
+ if (c->w == 0 || c->h == 0 || !NK_INTERSECT(r.x, r.y, r.w, r.h, c->x, c->y, c->w, c->h))
+ return;
+ }
+
+ cmd = (struct nk_command_image*)
+ nk_command_buffer_push(b, NK_COMMAND_IMAGE, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)NK_MAX(0, r.w);
+ cmd->h = (unsigned short)NK_MAX(0, r.h);
+ cmd->img = *img;
+ cmd->col = col;
+}
+NK_API void
+nk_push_custom(struct nk_command_buffer *b, struct nk_rect r,
+ nk_command_custom_callback cb, nk_handle usr)
+{
+ struct nk_command_custom *cmd;
+ NK_ASSERT(b);
+ if (!b) return;
+ if (b->use_clipping) {
+ const struct nk_rect *c = &b->clip;
+ if (c->w == 0 || c->h == 0 || !NK_INTERSECT(r.x, r.y, r.w, r.h, c->x, c->y, c->w, c->h))
+ return;
+ }
+
+ cmd = (struct nk_command_custom*)
+ nk_command_buffer_push(b, NK_COMMAND_CUSTOM, sizeof(*cmd));
+ if (!cmd) return;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)NK_MAX(0, r.w);
+ cmd->h = (unsigned short)NK_MAX(0, r.h);
+ cmd->callback_data = usr;
+ cmd->callback = cb;
+}
+NK_API void
+nk_draw_text(struct nk_command_buffer *b, struct nk_rect r,
+ const char *string, int length, const struct nk_user_font *font,
+ struct nk_color bg, struct nk_color fg)
+{
+ float text_width = 0;
+ struct nk_command_text *cmd;
+
+ NK_ASSERT(b);
+ NK_ASSERT(font);
+ if (!b || !string || !length || (bg.a == 0 && fg.a == 0)) return;
+ if (b->use_clipping) {
+ const struct nk_rect *c = &b->clip;
+ if (c->w == 0 || c->h == 0 || !NK_INTERSECT(r.x, r.y, r.w, r.h, c->x, c->y, c->w, c->h))
+ return;
+ }
+
+ /* make sure text fits inside bounds */
+ text_width = font->width(font->userdata, font->height, string, length);
+ if (text_width > r.w){
+ int glyphs = 0;
+ float txt_width = (float)text_width;
+ length = nk_text_clamp(font, string, length, r.w, &glyphs, &txt_width, 0,0);
+ }
+
+ if (!length) return;
+ cmd = (struct nk_command_text*)
+ nk_command_buffer_push(b, NK_COMMAND_TEXT, sizeof(*cmd) + (nk_size)(length + 1));
+ if (!cmd) return;
+ cmd->x = (short)r.x;
+ cmd->y = (short)r.y;
+ cmd->w = (unsigned short)r.w;
+ cmd->h = (unsigned short)r.h;
+ cmd->background = bg;
+ cmd->foreground = fg;
+ cmd->font = font;
+ cmd->length = length;
+ cmd->height = font->height;
+ NK_MEMCPY(cmd->string, string, (nk_size)length);
+ cmd->string[length] = '\0';
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * VERTEX
+ *
+ * ===============================================================*/
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+NK_API void
+nk_draw_list_init(struct nk_draw_list *list)
+{
+ nk_size i = 0;
+ NK_ASSERT(list);
+ if (!list) return;
+ nk_zero(list, sizeof(*list));
+ for (i = 0; i < NK_LEN(list->circle_vtx); ++i) {
+ const float a = ((float)i / (float)NK_LEN(list->circle_vtx)) * 2 * NK_PI;
+ list->circle_vtx[i].x = (float)NK_COS(a);
+ list->circle_vtx[i].y = (float)NK_SIN(a);
+ }
+}
+NK_API void
+nk_draw_list_setup(struct nk_draw_list *canvas, const struct nk_convert_config *config,
+ struct nk_buffer *cmds, struct nk_buffer *vertices, struct nk_buffer *elements,
+ enum nk_anti_aliasing line_aa, enum nk_anti_aliasing shape_aa)
+{
+ NK_ASSERT(canvas);
+ NK_ASSERT(config);
+ NK_ASSERT(cmds);
+ NK_ASSERT(vertices);
+ NK_ASSERT(elements);
+ if (!canvas || !config || !cmds || !vertices || !elements)
+ return;
+
+ canvas->buffer = cmds;
+ canvas->config = *config;
+ canvas->elements = elements;
+ canvas->vertices = vertices;
+ canvas->line_AA = line_aa;
+ canvas->shape_AA = shape_aa;
+ canvas->clip_rect = nk_null_rect;
+
+ canvas->cmd_offset = 0;
+ canvas->element_count = 0;
+ canvas->vertex_count = 0;
+ canvas->cmd_offset = 0;
+ canvas->cmd_count = 0;
+ canvas->path_count = 0;
+}
+NK_API const struct nk_draw_command*
+nk__draw_list_begin(const struct nk_draw_list *canvas, const struct nk_buffer *buffer)
+{
+ nk_byte *memory;
+ nk_size offset;
+ const struct nk_draw_command *cmd;
+
+ NK_ASSERT(buffer);
+ if (!buffer || !buffer->size || !canvas->cmd_count)
+ return 0;
+
+ memory = (nk_byte*)buffer->memory.ptr;
+ offset = buffer->memory.size - canvas->cmd_offset;
+ cmd = nk_ptr_add(const struct nk_draw_command, memory, offset);
+ return cmd;
+}
+NK_API const struct nk_draw_command*
+nk__draw_list_end(const struct nk_draw_list *canvas, const struct nk_buffer *buffer)
+{
+ nk_size size;
+ nk_size offset;
+ nk_byte *memory;
+ const struct nk_draw_command *end;
+
+ NK_ASSERT(buffer);
+ NK_ASSERT(canvas);
+ if (!buffer || !canvas)
+ return 0;
+
+ memory = (nk_byte*)buffer->memory.ptr;
+ size = buffer->memory.size;
+ offset = size - canvas->cmd_offset;
+ end = nk_ptr_add(const struct nk_draw_command, memory, offset);
+ end -= (canvas->cmd_count-1);
+ return end;
+}
+NK_API const struct nk_draw_command*
+nk__draw_list_next(const struct nk_draw_command *cmd,
+ const struct nk_buffer *buffer, const struct nk_draw_list *canvas)
+{
+ const struct nk_draw_command *end;
+ NK_ASSERT(buffer);
+ NK_ASSERT(canvas);
+ if (!cmd || !buffer || !canvas)
+ return 0;
+
+ end = nk__draw_list_end(canvas, buffer);
+ if (cmd <= end) return 0;
+ return (cmd-1);
+}
+NK_INTERN struct nk_vec2*
+nk_draw_list_alloc_path(struct nk_draw_list *list, int count)
+{
+ struct nk_vec2 *points;
+ NK_STORAGE const nk_size point_align = NK_ALIGNOF(struct nk_vec2);
+ NK_STORAGE const nk_size point_size = sizeof(struct nk_vec2);
+ points = (struct nk_vec2*)
+ nk_buffer_alloc(list->buffer, NK_BUFFER_FRONT,
+ point_size * (nk_size)count, point_align);
+
+ if (!points) return 0;
+ if (!list->path_offset) {
+ void *memory = nk_buffer_memory(list->buffer);
+ list->path_offset = (unsigned int)((nk_byte*)points - (nk_byte*)memory);
+ }
+ list->path_count += (unsigned int)count;
+ return points;
+}
+NK_INTERN struct nk_vec2
+nk_draw_list_path_last(struct nk_draw_list *list)
+{
+ void *memory;
+ struct nk_vec2 *point;
+ NK_ASSERT(list->path_count);
+ memory = nk_buffer_memory(list->buffer);
+ point = nk_ptr_add(struct nk_vec2, memory, list->path_offset);
+ point += (list->path_count-1);
+ return *point;
+}
+NK_INTERN struct nk_draw_command*
+nk_draw_list_push_command(struct nk_draw_list *list, struct nk_rect clip,
+ nk_handle texture)
+{
+ NK_STORAGE const nk_size cmd_align = NK_ALIGNOF(struct nk_draw_command);
+ NK_STORAGE const nk_size cmd_size = sizeof(struct nk_draw_command);
+ struct nk_draw_command *cmd;
+
+ NK_ASSERT(list);
+ cmd = (struct nk_draw_command*)
+ nk_buffer_alloc(list->buffer, NK_BUFFER_BACK, cmd_size, cmd_align);
+
+ if (!cmd) return 0;
+ if (!list->cmd_count) {
+ nk_byte *memory = (nk_byte*)nk_buffer_memory(list->buffer);
+ nk_size total = nk_buffer_total(list->buffer);
+ memory = nk_ptr_add(nk_byte, memory, total);
+ list->cmd_offset = (nk_size)(memory - (nk_byte*)cmd);
+ }
+
+ cmd->elem_count = 0;
+ cmd->clip_rect = clip;
+ cmd->texture = texture;
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ cmd->userdata = list->userdata;
+#endif
+
+ list->cmd_count++;
+ list->clip_rect = clip;
+ return cmd;
+}
+NK_INTERN struct nk_draw_command*
+nk_draw_list_command_last(struct nk_draw_list *list)
+{
+ void *memory;
+ nk_size size;
+ struct nk_draw_command *cmd;
+ NK_ASSERT(list->cmd_count);
+
+ memory = nk_buffer_memory(list->buffer);
+ size = nk_buffer_total(list->buffer);
+ cmd = nk_ptr_add(struct nk_draw_command, memory, size - list->cmd_offset);
+ return (cmd - (list->cmd_count-1));
+}
+NK_INTERN void
+nk_draw_list_add_clip(struct nk_draw_list *list, struct nk_rect rect)
+{
+ NK_ASSERT(list);
+ if (!list) return;
+ if (!list->cmd_count) {
+ nk_draw_list_push_command(list, rect, list->config.null.texture);
+ } else {
+ struct nk_draw_command *prev = nk_draw_list_command_last(list);
+ if (prev->elem_count == 0)
+ prev->clip_rect = rect;
+ nk_draw_list_push_command(list, rect, prev->texture);
+ }
+}
+NK_INTERN void
+nk_draw_list_push_image(struct nk_draw_list *list, nk_handle texture)
+{
+ NK_ASSERT(list);
+ if (!list) return;
+ if (!list->cmd_count) {
+ nk_draw_list_push_command(list, nk_null_rect, texture);
+ } else {
+ struct nk_draw_command *prev = nk_draw_list_command_last(list);
+ if (prev->elem_count == 0) {
+ prev->texture = texture;
+ #ifdef NK_INCLUDE_COMMAND_USERDATA
+ prev->userdata = list->userdata;
+ #endif
+ } else if (prev->texture.id != texture.id
+ #ifdef NK_INCLUDE_COMMAND_USERDATA
+ || prev->userdata.id != list->userdata.id
+ #endif
+ ) nk_draw_list_push_command(list, prev->clip_rect, texture);
+ }
+}
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+NK_API void
+nk_draw_list_push_userdata(struct nk_draw_list *list, nk_handle userdata)
+{
+ list->userdata = userdata;
+}
+#endif
+NK_INTERN void*
+nk_draw_list_alloc_vertices(struct nk_draw_list *list, nk_size count)
+{
+ void *vtx;
+ NK_ASSERT(list);
+ if (!list) return 0;
+ vtx = nk_buffer_alloc(list->vertices, NK_BUFFER_FRONT,
+ list->config.vertex_size*count, list->config.vertex_alignment);
+ if (!vtx) return 0;
+ list->vertex_count += (unsigned int)count;
+
+ /* This assert triggers because your are drawing a lot of stuff and nuklear
+ * defined `nk_draw_index` as `nk_ushort` to safe space be default.
+ *
+ * So you reached the maximum number of indicies or rather vertexes.
+ * To solve this issue please change typdef `nk_draw_index` to `nk_uint`
+ * and don't forget to specify the new element size in your drawing
+ * backend (OpenGL, DirectX, ...). For example in OpenGL for `glDrawElements`
+ * instead of specifing `GL_UNSIGNED_SHORT` you have to define `GL_UNSIGNED_INT`.
+ * Sorry for the inconvenience. */
+ if(sizeof(nk_draw_index)==2) NK_ASSERT((list->vertex_count < NK_USHORT_MAX &&
+ "To many verticies for 16-bit vertex indicies. Please read comment above on how to solve this problem"));
+ return vtx;
+}
+NK_INTERN nk_draw_index*
+nk_draw_list_alloc_elements(struct nk_draw_list *list, nk_size count)
+{
+ nk_draw_index *ids;
+ struct nk_draw_command *cmd;
+ NK_STORAGE const nk_size elem_align = NK_ALIGNOF(nk_draw_index);
+ NK_STORAGE const nk_size elem_size = sizeof(nk_draw_index);
+ NK_ASSERT(list);
+ if (!list) return 0;
+
+ ids = (nk_draw_index*)
+ nk_buffer_alloc(list->elements, NK_BUFFER_FRONT, elem_size*count, elem_align);
+ if (!ids) return 0;
+ cmd = nk_draw_list_command_last(list);
+ list->element_count += (unsigned int)count;
+ cmd->elem_count += (unsigned int)count;
+ return ids;
+}
+NK_INTERN int
+nk_draw_vertex_layout_element_is_end_of_layout(
+ const struct nk_draw_vertex_layout_element *element)
+{
+ return (element->attribute == NK_VERTEX_ATTRIBUTE_COUNT ||
+ element->format == NK_FORMAT_COUNT);
+}
+NK_INTERN void
+nk_draw_vertex_color(void *attr, const float *vals,
+ enum nk_draw_vertex_layout_format format)
+{
+ /* if this triggers you tried to provide a value format for a color */
+ float val[4];
+ NK_ASSERT(format >= NK_FORMAT_COLOR_BEGIN);
+ NK_ASSERT(format <= NK_FORMAT_COLOR_END);
+ if (format < NK_FORMAT_COLOR_BEGIN || format > NK_FORMAT_COLOR_END) return;
+
+ val[0] = NK_SATURATE(vals[0]);
+ val[1] = NK_SATURATE(vals[1]);
+ val[2] = NK_SATURATE(vals[2]);
+ val[3] = NK_SATURATE(vals[3]);
+
+ switch (format) {
+ default: NK_ASSERT(0 && "Invalid vertex layout color format"); break;
+ case NK_FORMAT_R8G8B8A8:
+ case NK_FORMAT_R8G8B8: {
+ struct nk_color col = nk_rgba_fv(val);
+ NK_MEMCPY(attr, &col.r, sizeof(col));
+ } break;
+ case NK_FORMAT_B8G8R8A8: {
+ struct nk_color col = nk_rgba_fv(val);
+ struct nk_color bgra = nk_rgba(col.b, col.g, col.r, col.a);
+ NK_MEMCPY(attr, &bgra, sizeof(bgra));
+ } break;
+ case NK_FORMAT_R16G15B16: {
+ nk_ushort col[3];
+ col[0] = (nk_ushort)(val[0]*(float)NK_USHORT_MAX);
+ col[1] = (nk_ushort)(val[1]*(float)NK_USHORT_MAX);
+ col[2] = (nk_ushort)(val[2]*(float)NK_USHORT_MAX);
+ NK_MEMCPY(attr, col, sizeof(col));
+ } break;
+ case NK_FORMAT_R16G15B16A16: {
+ nk_ushort col[4];
+ col[0] = (nk_ushort)(val[0]*(float)NK_USHORT_MAX);
+ col[1] = (nk_ushort)(val[1]*(float)NK_USHORT_MAX);
+ col[2] = (nk_ushort)(val[2]*(float)NK_USHORT_MAX);
+ col[3] = (nk_ushort)(val[3]*(float)NK_USHORT_MAX);
+ NK_MEMCPY(attr, col, sizeof(col));
+ } break;
+ case NK_FORMAT_R32G32B32: {
+ nk_uint col[3];
+ col[0] = (nk_uint)(val[0]*(float)NK_UINT_MAX);
+ col[1] = (nk_uint)(val[1]*(float)NK_UINT_MAX);
+ col[2] = (nk_uint)(val[2]*(float)NK_UINT_MAX);
+ NK_MEMCPY(attr, col, sizeof(col));
+ } break;
+ case NK_FORMAT_R32G32B32A32: {
+ nk_uint col[4];
+ col[0] = (nk_uint)(val[0]*(float)NK_UINT_MAX);
+ col[1] = (nk_uint)(val[1]*(float)NK_UINT_MAX);
+ col[2] = (nk_uint)(val[2]*(float)NK_UINT_MAX);
+ col[3] = (nk_uint)(val[3]*(float)NK_UINT_MAX);
+ NK_MEMCPY(attr, col, sizeof(col));
+ } break;
+ case NK_FORMAT_R32G32B32A32_FLOAT:
+ NK_MEMCPY(attr, val, sizeof(float)*4);
+ break;
+ case NK_FORMAT_R32G32B32A32_DOUBLE: {
+ double col[4];
+ col[0] = (double)val[0];
+ col[1] = (double)val[1];
+ col[2] = (double)val[2];
+ col[3] = (double)val[3];
+ NK_MEMCPY(attr, col, sizeof(col));
+ } break;
+ case NK_FORMAT_RGB32:
+ case NK_FORMAT_RGBA32: {
+ struct nk_color col = nk_rgba_fv(val);
+ nk_uint color = nk_color_u32(col);
+ NK_MEMCPY(attr, &color, sizeof(color));
+ } break; }
+}
+NK_INTERN void
+nk_draw_vertex_element(void *dst, const float *values, int value_count,
+ enum nk_draw_vertex_layout_format format)
+{
+ int value_index;
+ void *attribute = dst;
+ /* if this triggers you tried to provide a color format for a value */
+ NK_ASSERT(format < NK_FORMAT_COLOR_BEGIN);
+ if (format >= NK_FORMAT_COLOR_BEGIN && format <= NK_FORMAT_COLOR_END) return;
+ for (value_index = 0; value_index < value_count; ++value_index) {
+ switch (format) {
+ default: NK_ASSERT(0 && "invalid vertex layout format"); break;
+ case NK_FORMAT_SCHAR: {
+ char value = (char)NK_CLAMP((float)NK_SCHAR_MIN, values[value_index], (float)NK_SCHAR_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(char));
+ } break;
+ case NK_FORMAT_SSHORT: {
+ nk_short value = (nk_short)NK_CLAMP((float)NK_SSHORT_MIN, values[value_index], (float)NK_SSHORT_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(value));
+ } break;
+ case NK_FORMAT_SINT: {
+ nk_int value = (nk_int)NK_CLAMP((float)NK_SINT_MIN, values[value_index], (float)NK_SINT_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(nk_int));
+ } break;
+ case NK_FORMAT_UCHAR: {
+ unsigned char value = (unsigned char)NK_CLAMP((float)NK_UCHAR_MIN, values[value_index], (float)NK_UCHAR_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(unsigned char));
+ } break;
+ case NK_FORMAT_USHORT: {
+ nk_ushort value = (nk_ushort)NK_CLAMP((float)NK_USHORT_MIN, values[value_index], (float)NK_USHORT_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(value));
+ } break;
+ case NK_FORMAT_UINT: {
+ nk_uint value = (nk_uint)NK_CLAMP((float)NK_UINT_MIN, values[value_index], (float)NK_UINT_MAX);
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(nk_uint));
+ } break;
+ case NK_FORMAT_FLOAT:
+ NK_MEMCPY(attribute, &values[value_index], sizeof(values[value_index]));
+ attribute = (void*)((char*)attribute + sizeof(float));
+ break;
+ case NK_FORMAT_DOUBLE: {
+ double value = (double)values[value_index];
+ NK_MEMCPY(attribute, &value, sizeof(value));
+ attribute = (void*)((char*)attribute + sizeof(double));
+ } break;
+ }
+ }
+}
+NK_INTERN void*
+nk_draw_vertex(void *dst, const struct nk_convert_config *config,
+ struct nk_vec2 pos, struct nk_vec2 uv, struct nk_colorf color)
+{
+ void *result = (void*)((char*)dst + config->vertex_size);
+ const struct nk_draw_vertex_layout_element *elem_iter = config->vertex_layout;
+ while (!nk_draw_vertex_layout_element_is_end_of_layout(elem_iter)) {
+ void *address = (void*)((char*)dst + elem_iter->offset);
+ switch (elem_iter->attribute) {
+ case NK_VERTEX_ATTRIBUTE_COUNT:
+ default: NK_ASSERT(0 && "wrong element attribute"); break;
+ case NK_VERTEX_POSITION: nk_draw_vertex_element(address, &pos.x, 2, elem_iter->format); break;
+ case NK_VERTEX_TEXCOORD: nk_draw_vertex_element(address, &uv.x, 2, elem_iter->format); break;
+ case NK_VERTEX_COLOR: nk_draw_vertex_color(address, &color.r, elem_iter->format); break;
+ }
+ elem_iter++;
+ }
+ return result;
+}
+NK_API void
+nk_draw_list_stroke_poly_line(struct nk_draw_list *list, const struct nk_vec2 *points,
+ const unsigned int points_count, struct nk_color color, enum nk_draw_list_stroke closed,
+ float thickness, enum nk_anti_aliasing aliasing)
+{
+ nk_size count;
+ int thick_line;
+ struct nk_colorf col;
+ struct nk_colorf col_trans;
+ NK_ASSERT(list);
+ if (!list || points_count < 2) return;
+
+ color.a = (nk_byte)((float)color.a * list->config.global_alpha);
+ count = points_count;
+ if (!closed) count = points_count-1;
+ thick_line = thickness > 1.0f;
+
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_draw_list_push_userdata(list, list->userdata);
+#endif
+
+ color.a = (nk_byte)((float)color.a * list->config.global_alpha);
+ nk_color_fv(&col.r, color);
+ col_trans = col;
+ col_trans.a = 0;
+
+ if (aliasing == NK_ANTI_ALIASING_ON) {
+ /* ANTI-ALIASED STROKE */
+ const float AA_SIZE = 1.0f;
+ NK_STORAGE const nk_size pnt_align = NK_ALIGNOF(struct nk_vec2);
+ NK_STORAGE const nk_size pnt_size = sizeof(struct nk_vec2);
+
+ /* allocate vertices and elements */
+ nk_size i1 = 0;
+ nk_size vertex_offset;
+ nk_size index = list->vertex_count;
+
+ const nk_size idx_count = (thick_line) ? (count * 18) : (count * 12);
+ const nk_size vtx_count = (thick_line) ? (points_count * 4): (points_count *3);
+
+ void *vtx = nk_draw_list_alloc_vertices(list, vtx_count);
+ nk_draw_index *ids = nk_draw_list_alloc_elements(list, idx_count);
+
+ nk_size size;
+ struct nk_vec2 *normals, *temp;
+ if (!vtx || !ids) return;
+
+ /* temporary allocate normals + points */
+ vertex_offset = (nk_size)((nk_byte*)vtx - (nk_byte*)list->vertices->memory.ptr);
+ nk_buffer_mark(list->vertices, NK_BUFFER_FRONT);
+ size = pnt_size * ((thick_line) ? 5 : 3) * points_count;
+ normals = (struct nk_vec2*) nk_buffer_alloc(list->vertices, NK_BUFFER_FRONT, size, pnt_align);
+ if (!normals) return;
+ temp = normals + points_count;
+
+ /* make sure vertex pointer is still correct */
+ vtx = (void*)((nk_byte*)list->vertices->memory.ptr + vertex_offset);
+
+ /* calculate normals */
+ for (i1 = 0; i1 < count; ++i1) {
+ const nk_size i2 = ((i1 + 1) == points_count) ? 0 : (i1 + 1);
+ struct nk_vec2 diff = nk_vec2_sub(points[i2], points[i1]);
+ float len;
+
+ /* vec2 inverted length */
+ len = nk_vec2_len_sqr(diff);
+ if (len != 0.0f)
+ len = nk_inv_sqrt(len);
+ else len = 1.0f;
+
+ diff = nk_vec2_muls(diff, len);
+ normals[i1].x = diff.y;
+ normals[i1].y = -diff.x;
+ }
+
+ if (!closed)
+ normals[points_count-1] = normals[points_count-2];
+
+ if (!thick_line) {
+ nk_size idx1, i;
+ if (!closed) {
+ struct nk_vec2 d;
+ temp[0] = nk_vec2_add(points[0], nk_vec2_muls(normals[0], AA_SIZE));
+ temp[1] = nk_vec2_sub(points[0], nk_vec2_muls(normals[0], AA_SIZE));
+ d = nk_vec2_muls(normals[points_count-1], AA_SIZE);
+ temp[(points_count-1) * 2 + 0] = nk_vec2_add(points[points_count-1], d);
+ temp[(points_count-1) * 2 + 1] = nk_vec2_sub(points[points_count-1], d);
+ }
+
+ /* fill elements */
+ idx1 = index;
+ for (i1 = 0; i1 < count; i1++) {
+ struct nk_vec2 dm;
+ float dmr2;
+ nk_size i2 = ((i1 + 1) == points_count) ? 0 : (i1 + 1);
+ nk_size idx2 = ((i1+1) == points_count) ? index: (idx1 + 3);
+
+ /* average normals */
+ dm = nk_vec2_muls(nk_vec2_add(normals[i1], normals[i2]), 0.5f);
+ dmr2 = dm.x * dm.x + dm.y* dm.y;
+ if (dmr2 > 0.000001f) {
+ float scale = 1.0f/dmr2;
+ scale = NK_MIN(100.0f, scale);
+ dm = nk_vec2_muls(dm, scale);
+ }
+
+ dm = nk_vec2_muls(dm, AA_SIZE);
+ temp[i2*2+0] = nk_vec2_add(points[i2], dm);
+ temp[i2*2+1] = nk_vec2_sub(points[i2], dm);
+
+ ids[0] = (nk_draw_index)(idx2 + 0); ids[1] = (nk_draw_index)(idx1+0);
+ ids[2] = (nk_draw_index)(idx1 + 2); ids[3] = (nk_draw_index)(idx1+2);
+ ids[4] = (nk_draw_index)(idx2 + 2); ids[5] = (nk_draw_index)(idx2+0);
+ ids[6] = (nk_draw_index)(idx2 + 1); ids[7] = (nk_draw_index)(idx1+1);
+ ids[8] = (nk_draw_index)(idx1 + 0); ids[9] = (nk_draw_index)(idx1+0);
+ ids[10]= (nk_draw_index)(idx2 + 0); ids[11]= (nk_draw_index)(idx2+1);
+ ids += 12;
+ idx1 = idx2;
+ }
+
+ /* fill vertices */
+ for (i = 0; i < points_count; ++i) {
+ const struct nk_vec2 uv = list->config.null.uv;
+ vtx = nk_draw_vertex(vtx, &list->config, points[i], uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*2+0], uv, col_trans);
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*2+1], uv, col_trans);
+ }
+ } else {
+ nk_size idx1, i;
+ const float half_inner_thickness = (thickness - AA_SIZE) * 0.5f;
+ if (!closed) {
+ struct nk_vec2 d1 = nk_vec2_muls(normals[0], half_inner_thickness + AA_SIZE);
+ struct nk_vec2 d2 = nk_vec2_muls(normals[0], half_inner_thickness);
+
+ temp[0] = nk_vec2_add(points[0], d1);
+ temp[1] = nk_vec2_add(points[0], d2);
+ temp[2] = nk_vec2_sub(points[0], d2);
+ temp[3] = nk_vec2_sub(points[0], d1);
+
+ d1 = nk_vec2_muls(normals[points_count-1], half_inner_thickness + AA_SIZE);
+ d2 = nk_vec2_muls(normals[points_count-1], half_inner_thickness);
+
+ temp[(points_count-1)*4+0] = nk_vec2_add(points[points_count-1], d1);
+ temp[(points_count-1)*4+1] = nk_vec2_add(points[points_count-1], d2);
+ temp[(points_count-1)*4+2] = nk_vec2_sub(points[points_count-1], d2);
+ temp[(points_count-1)*4+3] = nk_vec2_sub(points[points_count-1], d1);
+ }
+
+ /* add all elements */
+ idx1 = index;
+ for (i1 = 0; i1 < count; ++i1) {
+ struct nk_vec2 dm_out, dm_in;
+ const nk_size i2 = ((i1+1) == points_count) ? 0: (i1 + 1);
+ nk_size idx2 = ((i1+1) == points_count) ? index: (idx1 + 4);
+
+ /* average normals */
+ struct nk_vec2 dm = nk_vec2_muls(nk_vec2_add(normals[i1], normals[i2]), 0.5f);
+ float dmr2 = dm.x * dm.x + dm.y* dm.y;
+ if (dmr2 > 0.000001f) {
+ float scale = 1.0f/dmr2;
+ scale = NK_MIN(100.0f, scale);
+ dm = nk_vec2_muls(dm, scale);
+ }
+
+ dm_out = nk_vec2_muls(dm, ((half_inner_thickness) + AA_SIZE));
+ dm_in = nk_vec2_muls(dm, half_inner_thickness);
+ temp[i2*4+0] = nk_vec2_add(points[i2], dm_out);
+ temp[i2*4+1] = nk_vec2_add(points[i2], dm_in);
+ temp[i2*4+2] = nk_vec2_sub(points[i2], dm_in);
+ temp[i2*4+3] = nk_vec2_sub(points[i2], dm_out);
+
+ /* add indexes */
+ ids[0] = (nk_draw_index)(idx2 + 1); ids[1] = (nk_draw_index)(idx1+1);
+ ids[2] = (nk_draw_index)(idx1 + 2); ids[3] = (nk_draw_index)(idx1+2);
+ ids[4] = (nk_draw_index)(idx2 + 2); ids[5] = (nk_draw_index)(idx2+1);
+ ids[6] = (nk_draw_index)(idx2 + 1); ids[7] = (nk_draw_index)(idx1+1);
+ ids[8] = (nk_draw_index)(idx1 + 0); ids[9] = (nk_draw_index)(idx1+0);
+ ids[10]= (nk_draw_index)(idx2 + 0); ids[11] = (nk_draw_index)(idx2+1);
+ ids[12]= (nk_draw_index)(idx2 + 2); ids[13] = (nk_draw_index)(idx1+2);
+ ids[14]= (nk_draw_index)(idx1 + 3); ids[15] = (nk_draw_index)(idx1+3);
+ ids[16]= (nk_draw_index)(idx2 + 3); ids[17] = (nk_draw_index)(idx2+2);
+ ids += 18;
+ idx1 = idx2;
+ }
+
+ /* add vertices */
+ for (i = 0; i < points_count; ++i) {
+ const struct nk_vec2 uv = list->config.null.uv;
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*4+0], uv, col_trans);
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*4+1], uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*4+2], uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, temp[i*4+3], uv, col_trans);
+ }
+ }
+ /* free temporary normals + points */
+ nk_buffer_reset(list->vertices, NK_BUFFER_FRONT);
+ } else {
+ /* NON ANTI-ALIASED STROKE */
+ nk_size i1 = 0;
+ nk_size idx = list->vertex_count;
+ const nk_size idx_count = count * 6;
+ const nk_size vtx_count = count * 4;
+ void *vtx = nk_draw_list_alloc_vertices(list, vtx_count);
+ nk_draw_index *ids = nk_draw_list_alloc_elements(list, idx_count);
+ if (!vtx || !ids) return;
+
+ for (i1 = 0; i1 < count; ++i1) {
+ float dx, dy;
+ const struct nk_vec2 uv = list->config.null.uv;
+ const nk_size i2 = ((i1+1) == points_count) ? 0 : i1 + 1;
+ const struct nk_vec2 p1 = points[i1];
+ const struct nk_vec2 p2 = points[i2];
+ struct nk_vec2 diff = nk_vec2_sub(p2, p1);
+ float len;
+
+ /* vec2 inverted length */
+ len = nk_vec2_len_sqr(diff);
+ if (len != 0.0f)
+ len = nk_inv_sqrt(len);
+ else len = 1.0f;
+ diff = nk_vec2_muls(diff, len);
+
+ /* add vertices */
+ dx = diff.x * (thickness * 0.5f);
+ dy = diff.y * (thickness * 0.5f);
+
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(p1.x + dy, p1.y - dx), uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(p2.x + dy, p2.y - dx), uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(p2.x - dy, p2.y + dx), uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(p1.x - dy, p1.y + dx), uv, col);
+
+ ids[0] = (nk_draw_index)(idx+0); ids[1] = (nk_draw_index)(idx+1);
+ ids[2] = (nk_draw_index)(idx+2); ids[3] = (nk_draw_index)(idx+0);
+ ids[4] = (nk_draw_index)(idx+2); ids[5] = (nk_draw_index)(idx+3);
+
+ ids += 6;
+ idx += 4;
+ }
+ }
+}
+NK_API void
+nk_draw_list_fill_poly_convex(struct nk_draw_list *list,
+ const struct nk_vec2 *points, const unsigned int points_count,
+ struct nk_color color, enum nk_anti_aliasing aliasing)
+{
+ struct nk_colorf col;
+ struct nk_colorf col_trans;
+
+ NK_STORAGE const nk_size pnt_align = NK_ALIGNOF(struct nk_vec2);
+ NK_STORAGE const nk_size pnt_size = sizeof(struct nk_vec2);
+ NK_ASSERT(list);
+ if (!list || points_count < 3) return;
+
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ nk_draw_list_push_userdata(list, list->userdata);
+#endif
+
+ color.a = (nk_byte)((float)color.a * list->config.global_alpha);
+ nk_color_fv(&col.r, color);
+ col_trans = col;
+ col_trans.a = 0;
+
+ if (aliasing == NK_ANTI_ALIASING_ON) {
+ nk_size i = 0;
+ nk_size i0 = 0;
+ nk_size i1 = 0;
+
+ const float AA_SIZE = 1.0f;
+ nk_size vertex_offset = 0;
+ nk_size index = list->vertex_count;
+
+ const nk_size idx_count = (points_count-2)*3 + points_count*6;
+ const nk_size vtx_count = (points_count*2);
+
+ void *vtx = nk_draw_list_alloc_vertices(list, vtx_count);
+ nk_draw_index *ids = nk_draw_list_alloc_elements(list, idx_count);
+
+ nk_size size = 0;
+ struct nk_vec2 *normals = 0;
+ unsigned int vtx_inner_idx = (unsigned int)(index + 0);
+ unsigned int vtx_outer_idx = (unsigned int)(index + 1);
+ if (!vtx || !ids) return;
+
+ /* temporary allocate normals */
+ vertex_offset = (nk_size)((nk_byte*)vtx - (nk_byte*)list->vertices->memory.ptr);
+ nk_buffer_mark(list->vertices, NK_BUFFER_FRONT);
+ size = pnt_size * points_count;
+ normals = (struct nk_vec2*) nk_buffer_alloc(list->vertices, NK_BUFFER_FRONT, size, pnt_align);
+ if (!normals) return;
+ vtx = (void*)((nk_byte*)list->vertices->memory.ptr + vertex_offset);
+
+ /* add elements */
+ for (i = 2; i < points_count; i++) {
+ ids[0] = (nk_draw_index)(vtx_inner_idx);
+ ids[1] = (nk_draw_index)(vtx_inner_idx + ((i-1) << 1));
+ ids[2] = (nk_draw_index)(vtx_inner_idx + (i << 1));
+ ids += 3;
+ }
+
+ /* compute normals */
+ for (i0 = points_count-1, i1 = 0; i1 < points_count; i0 = i1++) {
+ struct nk_vec2 p0 = points[i0];
+ struct nk_vec2 p1 = points[i1];
+ struct nk_vec2 diff = nk_vec2_sub(p1, p0);
+
+ /* vec2 inverted length */
+ float len = nk_vec2_len_sqr(diff);
+ if (len != 0.0f)
+ len = nk_inv_sqrt(len);
+ else len = 1.0f;
+ diff = nk_vec2_muls(diff, len);
+
+ normals[i0].x = diff.y;
+ normals[i0].y = -diff.x;
+ }
+
+ /* add vertices + indexes */
+ for (i0 = points_count-1, i1 = 0; i1 < points_count; i0 = i1++) {
+ const struct nk_vec2 uv = list->config.null.uv;
+ struct nk_vec2 n0 = normals[i0];
+ struct nk_vec2 n1 = normals[i1];
+ struct nk_vec2 dm = nk_vec2_muls(nk_vec2_add(n0, n1), 0.5f);
+ float dmr2 = dm.x*dm.x + dm.y*dm.y;
+ if (dmr2 > 0.000001f) {
+ float scale = 1.0f / dmr2;
+ scale = NK_MIN(scale, 100.0f);
+ dm = nk_vec2_muls(dm, scale);
+ }
+ dm = nk_vec2_muls(dm, AA_SIZE * 0.5f);
+
+ /* add vertices */
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2_sub(points[i1], dm), uv, col);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2_add(points[i1], dm), uv, col_trans);
+
+ /* add indexes */
+ ids[0] = (nk_draw_index)(vtx_inner_idx+(i1<<1));
+ ids[1] = (nk_draw_index)(vtx_inner_idx+(i0<<1));
+ ids[2] = (nk_draw_index)(vtx_outer_idx+(i0<<1));
+ ids[3] = (nk_draw_index)(vtx_outer_idx+(i0<<1));
+ ids[4] = (nk_draw_index)(vtx_outer_idx+(i1<<1));
+ ids[5] = (nk_draw_index)(vtx_inner_idx+(i1<<1));
+ ids += 6;
+ }
+ /* free temporary normals + points */
+ nk_buffer_reset(list->vertices, NK_BUFFER_FRONT);
+ } else {
+ nk_size i = 0;
+ nk_size index = list->vertex_count;
+ const nk_size idx_count = (points_count-2)*3;
+ const nk_size vtx_count = points_count;
+ void *vtx = nk_draw_list_alloc_vertices(list, vtx_count);
+ nk_draw_index *ids = nk_draw_list_alloc_elements(list, idx_count);
+
+ if (!vtx || !ids) return;
+ for (i = 0; i < vtx_count; ++i)
+ vtx = nk_draw_vertex(vtx, &list->config, points[i], list->config.null.uv, col);
+ for (i = 2; i < points_count; ++i) {
+ ids[0] = (nk_draw_index)index;
+ ids[1] = (nk_draw_index)(index+ i - 1);
+ ids[2] = (nk_draw_index)(index+i);
+ ids += 3;
+ }
+ }
+}
+NK_API void
+nk_draw_list_path_clear(struct nk_draw_list *list)
+{
+ NK_ASSERT(list);
+ if (!list) return;
+ nk_buffer_reset(list->buffer, NK_BUFFER_FRONT);
+ list->path_count = 0;
+ list->path_offset = 0;
+}
+NK_API void
+nk_draw_list_path_line_to(struct nk_draw_list *list, struct nk_vec2 pos)
+{
+ struct nk_vec2 *points = 0;
+ struct nk_draw_command *cmd = 0;
+ NK_ASSERT(list);
+ if (!list) return;
+ if (!list->cmd_count)
+ nk_draw_list_add_clip(list, nk_null_rect);
+
+ cmd = nk_draw_list_command_last(list);
+ if (cmd && cmd->texture.ptr != list->config.null.texture.ptr)
+ nk_draw_list_push_image(list, list->config.null.texture);
+
+ points = nk_draw_list_alloc_path(list, 1);
+ if (!points) return;
+ points[0] = pos;
+}
+NK_API void
+nk_draw_list_path_arc_to_fast(struct nk_draw_list *list, struct nk_vec2 center,
+ float radius, int a_min, int a_max)
+{
+ int a = 0;
+ NK_ASSERT(list);
+ if (!list) return;
+ if (a_min <= a_max) {
+ for (a = a_min; a <= a_max; a++) {
+ const struct nk_vec2 c = list->circle_vtx[(nk_size)a % NK_LEN(list->circle_vtx)];
+ const float x = center.x + c.x * radius;
+ const float y = center.y + c.y * radius;
+ nk_draw_list_path_line_to(list, nk_vec2(x, y));
+ }
+ }
+}
+NK_API void
+nk_draw_list_path_arc_to(struct nk_draw_list *list, struct nk_vec2 center,
+ float radius, float a_min, float a_max, unsigned int segments)
+{
+ unsigned int i = 0;
+ NK_ASSERT(list);
+ if (!list) return;
+ if (radius == 0.0f) return;
+
+ /* This algorithm for arc drawing relies on these two trigonometric identities[1]:
+ sin(a + b) = sin(a) * cos(b) + cos(a) * sin(b)
+ cos(a + b) = cos(a) * cos(b) - sin(a) * sin(b)
+
+ Two coordinates (x, y) of a point on a circle centered on
+ the origin can be written in polar form as:
+ x = r * cos(a)
+ y = r * sin(a)
+ where r is the radius of the circle,
+ a is the angle between (x, y) and the origin.
+
+ This allows us to rotate the coordinates around the
+ origin by an angle b using the following transformation:
+ x' = r * cos(a + b) = x * cos(b) - y * sin(b)
+ y' = r * sin(a + b) = y * cos(b) + x * sin(b)
+
+ [1] https://en.wikipedia.org/wiki/List_of_trigonometric_identities#Angle_sum_and_difference_identities
+ */
+ {const float d_angle = (a_max - a_min) / (float)segments;
+ const float sin_d = (float)NK_SIN(d_angle);
+ const float cos_d = (float)NK_COS(d_angle);
+
+ float cx = (float)NK_COS(a_min) * radius;
+ float cy = (float)NK_SIN(a_min) * radius;
+ for(i = 0; i <= segments; ++i) {
+ float new_cx, new_cy;
+ const float x = center.x + cx;
+ const float y = center.y + cy;
+ nk_draw_list_path_line_to(list, nk_vec2(x, y));
+
+ new_cx = cx * cos_d - cy * sin_d;
+ new_cy = cy * cos_d + cx * sin_d;
+ cx = new_cx;
+ cy = new_cy;
+ }}
+}
+NK_API void
+nk_draw_list_path_rect_to(struct nk_draw_list *list, struct nk_vec2 a,
+ struct nk_vec2 b, float rounding)
+{
+ float r;
+ NK_ASSERT(list);
+ if (!list) return;
+ r = rounding;
+ r = NK_MIN(r, ((b.x-a.x) < 0) ? -(b.x-a.x): (b.x-a.x));
+ r = NK_MIN(r, ((b.y-a.y) < 0) ? -(b.y-a.y): (b.y-a.y));
+
+ if (r == 0.0f) {
+ nk_draw_list_path_line_to(list, a);
+ nk_draw_list_path_line_to(list, nk_vec2(b.x,a.y));
+ nk_draw_list_path_line_to(list, b);
+ nk_draw_list_path_line_to(list, nk_vec2(a.x,b.y));
+ } else {
+ nk_draw_list_path_arc_to_fast(list, nk_vec2(a.x + r, a.y + r), r, 6, 9);
+ nk_draw_list_path_arc_to_fast(list, nk_vec2(b.x - r, a.y + r), r, 9, 12);
+ nk_draw_list_path_arc_to_fast(list, nk_vec2(b.x - r, b.y - r), r, 0, 3);
+ nk_draw_list_path_arc_to_fast(list, nk_vec2(a.x + r, b.y - r), r, 3, 6);
+ }
+}
+NK_API void
+nk_draw_list_path_curve_to(struct nk_draw_list *list, struct nk_vec2 p2,
+ struct nk_vec2 p3, struct nk_vec2 p4, unsigned int num_segments)
+{
+ float t_step;
+ unsigned int i_step;
+ struct nk_vec2 p1;
+
+ NK_ASSERT(list);
+ NK_ASSERT(list->path_count);
+ if (!list || !list->path_count) return;
+ num_segments = NK_MAX(num_segments, 1);
+
+ p1 = nk_draw_list_path_last(list);
+ t_step = 1.0f/(float)num_segments;
+ for (i_step = 1; i_step <= num_segments; ++i_step) {
+ float t = t_step * (float)i_step;
+ float u = 1.0f - t;
+ float w1 = u*u*u;
+ float w2 = 3*u*u*t;
+ float w3 = 3*u*t*t;
+ float w4 = t * t *t;
+ float x = w1 * p1.x + w2 * p2.x + w3 * p3.x + w4 * p4.x;
+ float y = w1 * p1.y + w2 * p2.y + w3 * p3.y + w4 * p4.y;
+ nk_draw_list_path_line_to(list, nk_vec2(x,y));
+ }
+}
+NK_API void
+nk_draw_list_path_fill(struct nk_draw_list *list, struct nk_color color)
+{
+ struct nk_vec2 *points;
+ NK_ASSERT(list);
+ if (!list) return;
+ points = (struct nk_vec2*)nk_buffer_memory(list->buffer);
+ nk_draw_list_fill_poly_convex(list, points, list->path_count, color, list->config.shape_AA);
+ nk_draw_list_path_clear(list);
+}
+NK_API void
+nk_draw_list_path_stroke(struct nk_draw_list *list, struct nk_color color,
+ enum nk_draw_list_stroke closed, float thickness)
+{
+ struct nk_vec2 *points;
+ NK_ASSERT(list);
+ if (!list) return;
+ points = (struct nk_vec2*)nk_buffer_memory(list->buffer);
+ nk_draw_list_stroke_poly_line(list, points, list->path_count, color,
+ closed, thickness, list->config.line_AA);
+ nk_draw_list_path_clear(list);
+}
+NK_API void
+nk_draw_list_stroke_line(struct nk_draw_list *list, struct nk_vec2 a,
+ struct nk_vec2 b, struct nk_color col, float thickness)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ if (list->line_AA == NK_ANTI_ALIASING_ON) {
+ nk_draw_list_path_line_to(list, a);
+ nk_draw_list_path_line_to(list, b);
+ } else {
+ nk_draw_list_path_line_to(list, nk_vec2_sub(a,nk_vec2(0.5f,0.5f)));
+ nk_draw_list_path_line_to(list, nk_vec2_sub(b,nk_vec2(0.5f,0.5f)));
+ }
+ nk_draw_list_path_stroke(list, col, NK_STROKE_OPEN, thickness);
+}
+NK_API void
+nk_draw_list_fill_rect(struct nk_draw_list *list, struct nk_rect rect,
+ struct nk_color col, float rounding)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+
+ if (list->line_AA == NK_ANTI_ALIASING_ON) {
+ nk_draw_list_path_rect_to(list, nk_vec2(rect.x, rect.y),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h), rounding);
+ } else {
+ nk_draw_list_path_rect_to(list, nk_vec2(rect.x-0.5f, rect.y-0.5f),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h), rounding);
+ } nk_draw_list_path_fill(list, col);
+}
+NK_API void
+nk_draw_list_stroke_rect(struct nk_draw_list *list, struct nk_rect rect,
+ struct nk_color col, float rounding, float thickness)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ if (list->line_AA == NK_ANTI_ALIASING_ON) {
+ nk_draw_list_path_rect_to(list, nk_vec2(rect.x, rect.y),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h), rounding);
+ } else {
+ nk_draw_list_path_rect_to(list, nk_vec2(rect.x-0.5f, rect.y-0.5f),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h), rounding);
+ } nk_draw_list_path_stroke(list, col, NK_STROKE_CLOSED, thickness);
+}
+NK_API void
+nk_draw_list_fill_rect_multi_color(struct nk_draw_list *list, struct nk_rect rect,
+ struct nk_color left, struct nk_color top, struct nk_color right,
+ struct nk_color bottom)
+{
+ void *vtx;
+ struct nk_colorf col_left, col_top;
+ struct nk_colorf col_right, col_bottom;
+ nk_draw_index *idx;
+ nk_draw_index index;
+
+ nk_color_fv(&col_left.r, left);
+ nk_color_fv(&col_right.r, right);
+ nk_color_fv(&col_top.r, top);
+ nk_color_fv(&col_bottom.r, bottom);
+
+ NK_ASSERT(list);
+ if (!list) return;
+
+ nk_draw_list_push_image(list, list->config.null.texture);
+ index = (nk_draw_index)list->vertex_count;
+ vtx = nk_draw_list_alloc_vertices(list, 4);
+ idx = nk_draw_list_alloc_elements(list, 6);
+ if (!vtx || !idx) return;
+
+ idx[0] = (nk_draw_index)(index+0); idx[1] = (nk_draw_index)(index+1);
+ idx[2] = (nk_draw_index)(index+2); idx[3] = (nk_draw_index)(index+0);
+ idx[4] = (nk_draw_index)(index+2); idx[5] = (nk_draw_index)(index+3);
+
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(rect.x, rect.y), list->config.null.uv, col_left);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(rect.x + rect.w, rect.y), list->config.null.uv, col_top);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(rect.x + rect.w, rect.y + rect.h), list->config.null.uv, col_right);
+ vtx = nk_draw_vertex(vtx, &list->config, nk_vec2(rect.x, rect.y + rect.h), list->config.null.uv, col_bottom);
+}
+NK_API void
+nk_draw_list_fill_triangle(struct nk_draw_list *list, struct nk_vec2 a,
+ struct nk_vec2 b, struct nk_vec2 c, struct nk_color col)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ nk_draw_list_path_line_to(list, a);
+ nk_draw_list_path_line_to(list, b);
+ nk_draw_list_path_line_to(list, c);
+ nk_draw_list_path_fill(list, col);
+}
+NK_API void
+nk_draw_list_stroke_triangle(struct nk_draw_list *list, struct nk_vec2 a,
+ struct nk_vec2 b, struct nk_vec2 c, struct nk_color col, float thickness)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ nk_draw_list_path_line_to(list, a);
+ nk_draw_list_path_line_to(list, b);
+ nk_draw_list_path_line_to(list, c);
+ nk_draw_list_path_stroke(list, col, NK_STROKE_CLOSED, thickness);
+}
+NK_API void
+nk_draw_list_fill_circle(struct nk_draw_list *list, struct nk_vec2 center,
+ float radius, struct nk_color col, unsigned int segs)
+{
+ float a_max;
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ a_max = NK_PI * 2.0f * ((float)segs - 1.0f) / (float)segs;
+ nk_draw_list_path_arc_to(list, center, radius, 0.0f, a_max, segs);
+ nk_draw_list_path_fill(list, col);
+}
+NK_API void
+nk_draw_list_stroke_circle(struct nk_draw_list *list, struct nk_vec2 center,
+ float radius, struct nk_color col, unsigned int segs, float thickness)
+{
+ float a_max;
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ a_max = NK_PI * 2.0f * ((float)segs - 1.0f) / (float)segs;
+ nk_draw_list_path_arc_to(list, center, radius, 0.0f, a_max, segs);
+ nk_draw_list_path_stroke(list, col, NK_STROKE_CLOSED, thickness);
+}
+NK_API void
+nk_draw_list_stroke_curve(struct nk_draw_list *list, struct nk_vec2 p0,
+ struct nk_vec2 cp0, struct nk_vec2 cp1, struct nk_vec2 p1,
+ struct nk_color col, unsigned int segments, float thickness)
+{
+ NK_ASSERT(list);
+ if (!list || !col.a) return;
+ nk_draw_list_path_line_to(list, p0);
+ nk_draw_list_path_curve_to(list, cp0, cp1, p1, segments);
+ nk_draw_list_path_stroke(list, col, NK_STROKE_OPEN, thickness);
+}
+NK_INTERN void
+nk_draw_list_push_rect_uv(struct nk_draw_list *list, struct nk_vec2 a,
+ struct nk_vec2 c, struct nk_vec2 uva, struct nk_vec2 uvc,
+ struct nk_color color)
+{
+ void *vtx;
+ struct nk_vec2 uvb;
+ struct nk_vec2 uvd;
+ struct nk_vec2 b;
+ struct nk_vec2 d;
+
+ struct nk_colorf col;
+ nk_draw_index *idx;
+ nk_draw_index index;
+ NK_ASSERT(list);
+ if (!list) return;
+
+ nk_color_fv(&col.r, color);
+ uvb = nk_vec2(uvc.x, uva.y);
+ uvd = nk_vec2(uva.x, uvc.y);
+ b = nk_vec2(c.x, a.y);
+ d = nk_vec2(a.x, c.y);
+
+ index = (nk_draw_index)list->vertex_count;
+ vtx = nk_draw_list_alloc_vertices(list, 4);
+ idx = nk_draw_list_alloc_elements(list, 6);
+ if (!vtx || !idx) return;
+
+ idx[0] = (nk_draw_index)(index+0); idx[1] = (nk_draw_index)(index+1);
+ idx[2] = (nk_draw_index)(index+2); idx[3] = (nk_draw_index)(index+0);
+ idx[4] = (nk_draw_index)(index+2); idx[5] = (nk_draw_index)(index+3);
+
+ vtx = nk_draw_vertex(vtx, &list->config, a, uva, col);
+ vtx = nk_draw_vertex(vtx, &list->config, b, uvb, col);
+ vtx = nk_draw_vertex(vtx, &list->config, c, uvc, col);
+ vtx = nk_draw_vertex(vtx, &list->config, d, uvd, col);
+}
+NK_API void
+nk_draw_list_add_image(struct nk_draw_list *list, struct nk_image texture,
+ struct nk_rect rect, struct nk_color color)
+{
+ NK_ASSERT(list);
+ if (!list) return;
+ /* push new command with given texture */
+ nk_draw_list_push_image(list, texture.handle);
+ if (nk_image_is_subimage(&texture)) {
+ /* add region inside of the texture */
+ struct nk_vec2 uv[2];
+ uv[0].x = (float)texture.region[0]/(float)texture.w;
+ uv[0].y = (float)texture.region[1]/(float)texture.h;
+ uv[1].x = (float)(texture.region[0] + texture.region[2])/(float)texture.w;
+ uv[1].y = (float)(texture.region[1] + texture.region[3])/(float)texture.h;
+ nk_draw_list_push_rect_uv(list, nk_vec2(rect.x, rect.y),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h), uv[0], uv[1], color);
+ } else nk_draw_list_push_rect_uv(list, nk_vec2(rect.x, rect.y),
+ nk_vec2(rect.x + rect.w, rect.y + rect.h),
+ nk_vec2(0.0f, 0.0f), nk_vec2(1.0f, 1.0f),color);
+}
+NK_API void
+nk_draw_list_add_text(struct nk_draw_list *list, const struct nk_user_font *font,
+ struct nk_rect rect, const char *text, int len, float font_height,
+ struct nk_color fg)
+{
+ float x = 0;
+ int text_len = 0;
+ nk_rune unicode = 0;
+ nk_rune next = 0;
+ int glyph_len = 0;
+ int next_glyph_len = 0;
+ struct nk_user_font_glyph g;
+
+ NK_ASSERT(list);
+ if (!list || !len || !text) return;
+ if (!NK_INTERSECT(rect.x, rect.y, rect.w, rect.h,
+ list->clip_rect.x, list->clip_rect.y, list->clip_rect.w, list->clip_rect.h)) return;
+
+ nk_draw_list_push_image(list, font->texture);
+ x = rect.x;
+ glyph_len = nk_utf_decode(text, &unicode, len);
+ if (!glyph_len) return;
+
+ /* draw every glyph image */
+ fg.a = (nk_byte)((float)fg.a * list->config.global_alpha);
+ while (text_len < len && glyph_len) {
+ float gx, gy, gh, gw;
+ float char_width = 0;
+ if (unicode == NK_UTF_INVALID) break;
+
+ /* query currently drawn glyph information */
+ next_glyph_len = nk_utf_decode(text + text_len + glyph_len, &next, (int)len - text_len);
+ font->query(font->userdata, font_height, &g, unicode,
+ (next == NK_UTF_INVALID) ? '\0' : next);
+
+ /* calculate and draw glyph drawing rectangle and image */
+ gx = x + g.offset.x;
+ gy = rect.y + g.offset.y;
+ gw = g.width; gh = g.height;
+ char_width = g.xadvance;
+ nk_draw_list_push_rect_uv(list, nk_vec2(gx,gy), nk_vec2(gx + gw, gy+ gh),
+ g.uv[0], g.uv[1], fg);
+
+ /* offset next glyph */
+ text_len += glyph_len;
+ x += char_width;
+ glyph_len = next_glyph_len;
+ unicode = next;
+ }
+}
+NK_API nk_flags
+nk_convert(struct nk_context *ctx, struct nk_buffer *cmds,
+ struct nk_buffer *vertices, struct nk_buffer *elements,
+ const struct nk_convert_config *config)
+{
+ nk_flags res = NK_CONVERT_SUCCESS;
+ const struct nk_command *cmd;
+ NK_ASSERT(ctx);
+ NK_ASSERT(cmds);
+ NK_ASSERT(vertices);
+ NK_ASSERT(elements);
+ NK_ASSERT(config);
+ NK_ASSERT(config->vertex_layout);
+ NK_ASSERT(config->vertex_size);
+ if (!ctx || !cmds || !vertices || !elements || !config || !config->vertex_layout)
+ return NK_CONVERT_INVALID_PARAM;
+
+ nk_draw_list_setup(&ctx->draw_list, config, cmds, vertices, elements,
+ config->line_AA, config->shape_AA);
+ nk_foreach(cmd, ctx)
+ {
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ ctx->draw_list.userdata = cmd->userdata;
+#endif
+ switch (cmd->type) {
+ case NK_COMMAND_NOP: break;
+ case NK_COMMAND_SCISSOR: {
+ const struct nk_command_scissor *s = (const struct nk_command_scissor*)cmd;
+ nk_draw_list_add_clip(&ctx->draw_list, nk_rect(s->x, s->y, s->w, s->h));
+ } break;
+ case NK_COMMAND_LINE: {
+ const struct nk_command_line *l = (const struct nk_command_line*)cmd;
+ nk_draw_list_stroke_line(&ctx->draw_list, nk_vec2(l->begin.x, l->begin.y),
+ nk_vec2(l->end.x, l->end.y), l->color, l->line_thickness);
+ } break;
+ case NK_COMMAND_CURVE: {
+ const struct nk_command_curve *q = (const struct nk_command_curve*)cmd;
+ nk_draw_list_stroke_curve(&ctx->draw_list, nk_vec2(q->begin.x, q->begin.y),
+ nk_vec2(q->ctrl[0].x, q->ctrl[0].y), nk_vec2(q->ctrl[1].x,
+ q->ctrl[1].y), nk_vec2(q->end.x, q->end.y), q->color,
+ config->curve_segment_count, q->line_thickness);
+ } break;
+ case NK_COMMAND_RECT: {
+ const struct nk_command_rect *r = (const struct nk_command_rect*)cmd;
+ nk_draw_list_stroke_rect(&ctx->draw_list, nk_rect(r->x, r->y, r->w, r->h),
+ r->color, (float)r->rounding, r->line_thickness);
+ } break;
+ case NK_COMMAND_RECT_FILLED: {
+ const struct nk_command_rect_filled *r = (const struct nk_command_rect_filled*)cmd;
+ nk_draw_list_fill_rect(&ctx->draw_list, nk_rect(r->x, r->y, r->w, r->h),
+ r->color, (float)r->rounding);
+ } break;
+ case NK_COMMAND_RECT_MULTI_COLOR: {
+ const struct nk_command_rect_multi_color *r = (const struct nk_command_rect_multi_color*)cmd;
+ nk_draw_list_fill_rect_multi_color(&ctx->draw_list, nk_rect(r->x, r->y, r->w, r->h),
+ r->left, r->top, r->right, r->bottom);
+ } break;
+ case NK_COMMAND_CIRCLE: {
+ const struct nk_command_circle *c = (const struct nk_command_circle*)cmd;
+ nk_draw_list_stroke_circle(&ctx->draw_list, nk_vec2((float)c->x + (float)c->w/2,
+ (float)c->y + (float)c->h/2), (float)c->w/2, c->color,
+ config->circle_segment_count, c->line_thickness);
+ } break;
+ case NK_COMMAND_CIRCLE_FILLED: {
+ const struct nk_command_circle_filled *c = (const struct nk_command_circle_filled *)cmd;
+ nk_draw_list_fill_circle(&ctx->draw_list, nk_vec2((float)c->x + (float)c->w/2,
+ (float)c->y + (float)c->h/2), (float)c->w/2, c->color,
+ config->circle_segment_count);
+ } break;
+ case NK_COMMAND_ARC: {
+ const struct nk_command_arc *c = (const struct nk_command_arc*)cmd;
+ nk_draw_list_path_line_to(&ctx->draw_list, nk_vec2(c->cx, c->cy));
+ nk_draw_list_path_arc_to(&ctx->draw_list, nk_vec2(c->cx, c->cy), c->r,
+ c->a[0], c->a[1], config->arc_segment_count);
+ nk_draw_list_path_stroke(&ctx->draw_list, c->color, NK_STROKE_CLOSED, c->line_thickness);
+ } break;
+ case NK_COMMAND_ARC_FILLED: {
+ const struct nk_command_arc_filled *c = (const struct nk_command_arc_filled*)cmd;
+ nk_draw_list_path_line_to(&ctx->draw_list, nk_vec2(c->cx, c->cy));
+ nk_draw_list_path_arc_to(&ctx->draw_list, nk_vec2(c->cx, c->cy), c->r,
+ c->a[0], c->a[1], config->arc_segment_count);
+ nk_draw_list_path_fill(&ctx->draw_list, c->color);
+ } break;
+ case NK_COMMAND_TRIANGLE: {
+ const struct nk_command_triangle *t = (const struct nk_command_triangle*)cmd;
+ nk_draw_list_stroke_triangle(&ctx->draw_list, nk_vec2(t->a.x, t->a.y),
+ nk_vec2(t->b.x, t->b.y), nk_vec2(t->c.x, t->c.y), t->color,
+ t->line_thickness);
+ } break;
+ case NK_COMMAND_TRIANGLE_FILLED: {
+ const struct nk_command_triangle_filled *t = (const struct nk_command_triangle_filled*)cmd;
+ nk_draw_list_fill_triangle(&ctx->draw_list, nk_vec2(t->a.x, t->a.y),
+ nk_vec2(t->b.x, t->b.y), nk_vec2(t->c.x, t->c.y), t->color);
+ } break;
+ case NK_COMMAND_POLYGON: {
+ int i;
+ const struct nk_command_polygon*p = (const struct nk_command_polygon*)cmd;
+ for (i = 0; i < p->point_count; ++i) {
+ struct nk_vec2 pnt = nk_vec2((float)p->points[i].x, (float)p->points[i].y);
+ nk_draw_list_path_line_to(&ctx->draw_list, pnt);
+ }
+ nk_draw_list_path_stroke(&ctx->draw_list, p->color, NK_STROKE_CLOSED, p->line_thickness);
+ } break;
+ case NK_COMMAND_POLYGON_FILLED: {
+ int i;
+ const struct nk_command_polygon_filled *p = (const struct nk_command_polygon_filled*)cmd;
+ for (i = 0; i < p->point_count; ++i) {
+ struct nk_vec2 pnt = nk_vec2((float)p->points[i].x, (float)p->points[i].y);
+ nk_draw_list_path_line_to(&ctx->draw_list, pnt);
+ }
+ nk_draw_list_path_fill(&ctx->draw_list, p->color);
+ } break;
+ case NK_COMMAND_POLYLINE: {
+ int i;
+ const struct nk_command_polyline *p = (const struct nk_command_polyline*)cmd;
+ for (i = 0; i < p->point_count; ++i) {
+ struct nk_vec2 pnt = nk_vec2((float)p->points[i].x, (float)p->points[i].y);
+ nk_draw_list_path_line_to(&ctx->draw_list, pnt);
+ }
+ nk_draw_list_path_stroke(&ctx->draw_list, p->color, NK_STROKE_OPEN, p->line_thickness);
+ } break;
+ case NK_COMMAND_TEXT: {
+ const struct nk_command_text *t = (const struct nk_command_text*)cmd;
+ nk_draw_list_add_text(&ctx->draw_list, t->font, nk_rect(t->x, t->y, t->w, t->h),
+ t->string, t->length, t->height, t->foreground);
+ } break;
+ case NK_COMMAND_IMAGE: {
+ const struct nk_command_image *i = (const struct nk_command_image*)cmd;
+ nk_draw_list_add_image(&ctx->draw_list, i->img, nk_rect(i->x, i->y, i->w, i->h), i->col);
+ } break;
+ case NK_COMMAND_CUSTOM: {
+ const struct nk_command_custom *c = (const struct nk_command_custom*)cmd;
+ c->callback(&ctx->draw_list, c->x, c->y, c->w, c->h, c->callback_data);
+ } break;
+ default: break;
+ }
+ }
+ res |= (cmds->needed > cmds->allocated + (cmds->memory.size - cmds->size)) ? NK_CONVERT_COMMAND_BUFFER_FULL: 0;
+ res |= (vertices->needed > vertices->allocated) ? NK_CONVERT_VERTEX_BUFFER_FULL: 0;
+ res |= (elements->needed > elements->allocated) ? NK_CONVERT_ELEMENT_BUFFER_FULL: 0;
+ return res;
+}
+NK_API const struct nk_draw_command*
+nk__draw_begin(const struct nk_context *ctx,
+ const struct nk_buffer *buffer)
+{
+ return nk__draw_list_begin(&ctx->draw_list, buffer);
+}
+NK_API const struct nk_draw_command*
+nk__draw_end(const struct nk_context *ctx, const struct nk_buffer *buffer)
+{
+ return nk__draw_list_end(&ctx->draw_list, buffer);
+}
+NK_API const struct nk_draw_command*
+nk__draw_next(const struct nk_draw_command *cmd,
+ const struct nk_buffer *buffer, const struct nk_context *ctx)
+{
+ return nk__draw_list_next(cmd, buffer, &ctx->draw_list);
+}
+#endif
+
+
+
+
+
+#ifdef NK_INCLUDE_FONT_BAKING
+/* -------------------------------------------------------------
+ *
+ * RECT PACK
+ *
+ * --------------------------------------------------------------*/
+/* stb_rect_pack.h - v0.05 - public domain - rectangle packing */
+/* Sean Barrett 2014 */
+#define NK_RP__MAXVAL 0xffff
+typedef unsigned short nk_rp_coord;
+
+struct nk_rp_rect {
+ /* reserved for your use: */
+ int id;
+ /* input: */
+ nk_rp_coord w, h;
+ /* output: */
+ nk_rp_coord x, y;
+ int was_packed;
+ /* non-zero if valid packing */
+}; /* 16 bytes, nominally */
+
+struct nk_rp_node {
+ nk_rp_coord x,y;
+ struct nk_rp_node *next;
+};
+
+struct nk_rp_context {
+ int width;
+ int height;
+ int align;
+ int init_mode;
+ int heuristic;
+ int num_nodes;
+ struct nk_rp_node *active_head;
+ struct nk_rp_node *free_head;
+ struct nk_rp_node extra[2];
+ /* we allocate two extra nodes so optimal user-node-count is 'width' not 'width+2' */
+};
+
+struct nk_rp__findresult {
+ int x,y;
+ struct nk_rp_node **prev_link;
+};
+
+enum NK_RP_HEURISTIC {
+ NK_RP_HEURISTIC_Skyline_default=0,
+ NK_RP_HEURISTIC_Skyline_BL_sortHeight = NK_RP_HEURISTIC_Skyline_default,
+ NK_RP_HEURISTIC_Skyline_BF_sortHeight
+};
+enum NK_RP_INIT_STATE{NK_RP__INIT_skyline = 1};
+
+NK_INTERN void
+nk_rp_setup_allow_out_of_mem(struct nk_rp_context *context, int allow_out_of_mem)
+{
+ if (allow_out_of_mem)
+ /* if it's ok to run out of memory, then don't bother aligning them; */
+ /* this gives better packing, but may fail due to OOM (even though */
+ /* the rectangles easily fit). @TODO a smarter approach would be to only */
+ /* quantize once we've hit OOM, then we could get rid of this parameter. */
+ context->align = 1;
+ else {
+ /* if it's not ok to run out of memory, then quantize the widths */
+ /* so that num_nodes is always enough nodes. */
+ /* */
+ /* I.e. num_nodes * align >= width */
+ /* align >= width / num_nodes */
+ /* align = ceil(width/num_nodes) */
+ context->align = (context->width + context->num_nodes-1) / context->num_nodes;
+ }
+}
+NK_INTERN void
+nk_rp_init_target(struct nk_rp_context *context, int width, int height,
+ struct nk_rp_node *nodes, int num_nodes)
+{
+ int i;
+#ifndef STBRP_LARGE_RECTS
+ NK_ASSERT(width <= 0xffff && height <= 0xffff);
+#endif
+
+ for (i=0; i < num_nodes-1; ++i)
+ nodes[i].next = &nodes[i+1];
+ nodes[i].next = 0;
+ context->init_mode = NK_RP__INIT_skyline;
+ context->heuristic = NK_RP_HEURISTIC_Skyline_default;
+ context->free_head = &nodes[0];
+ context->active_head = &context->extra[0];
+ context->width = width;
+ context->height = height;
+ context->num_nodes = num_nodes;
+ nk_rp_setup_allow_out_of_mem(context, 0);
+
+ /* node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly) */
+ context->extra[0].x = 0;
+ context->extra[0].y = 0;
+ context->extra[0].next = &context->extra[1];
+ context->extra[1].x = (nk_rp_coord) width;
+ context->extra[1].y = 65535;
+ context->extra[1].next = 0;
+}
+/* find minimum y position if it starts at x1 */
+NK_INTERN int
+nk_rp__skyline_find_min_y(struct nk_rp_context *c, struct nk_rp_node *first,
+ int x0, int width, int *pwaste)
+{
+ struct nk_rp_node *node = first;
+ int x1 = x0 + width;
+ int min_y, visited_width, waste_area;
+ NK_ASSERT(first->x <= x0);
+ NK_UNUSED(c);
+
+ NK_ASSERT(node->next->x > x0);
+ /* we ended up handling this in the caller for efficiency */
+ NK_ASSERT(node->x <= x0);
+
+ min_y = 0;
+ waste_area = 0;
+ visited_width = 0;
+ while (node->x < x1)
+ {
+ if (node->y > min_y) {
+ /* raise min_y higher. */
+ /* we've accounted for all waste up to min_y, */
+ /* but we'll now add more waste for everything we've visited */
+ waste_area += visited_width * (node->y - min_y);
+ min_y = node->y;
+ /* the first time through, visited_width might be reduced */
+ if (node->x < x0)
+ visited_width += node->next->x - x0;
+ else
+ visited_width += node->next->x - node->x;
+ } else {
+ /* add waste area */
+ int under_width = node->next->x - node->x;
+ if (under_width + visited_width > width)
+ under_width = width - visited_width;
+ waste_area += under_width * (min_y - node->y);
+ visited_width += under_width;
+ }
+ node = node->next;
+ }
+ *pwaste = waste_area;
+ return min_y;
+}
+NK_INTERN struct nk_rp__findresult
+nk_rp__skyline_find_best_pos(struct nk_rp_context *c, int width, int height)
+{
+ int best_waste = (1<<30), best_x, best_y = (1 << 30);
+ struct nk_rp__findresult fr;
+ struct nk_rp_node **prev, *node, *tail, **best = 0;
+
+ /* align to multiple of c->align */
+ width = (width + c->align - 1);
+ width -= width % c->align;
+ NK_ASSERT(width % c->align == 0);
+
+ node = c->active_head;
+ prev = &c->active_head;
+ while (node->x + width <= c->width) {
+ int y,waste;
+ y = nk_rp__skyline_find_min_y(c, node, node->x, width, &waste);
+ /* actually just want to test BL */
+ if (c->heuristic == NK_RP_HEURISTIC_Skyline_BL_sortHeight) {
+ /* bottom left */
+ if (y < best_y) {
+ best_y = y;
+ best = prev;
+ }
+ } else {
+ /* best-fit */
+ if (y + height <= c->height) {
+ /* can only use it if it first vertically */
+ if (y < best_y || (y == best_y && waste < best_waste)) {
+ best_y = y;
+ best_waste = waste;
+ best = prev;
+ }
+ }
+ }
+ prev = &node->next;
+ node = node->next;
+ }
+ best_x = (best == 0) ? 0 : (*best)->x;
+
+ /* if doing best-fit (BF), we also have to try aligning right edge to each node position */
+ /* */
+ /* e.g, if fitting */
+ /* */
+ /* ____________________ */
+ /* |____________________| */
+ /* */
+ /* into */
+ /* */
+ /* | | */
+ /* | ____________| */
+ /* |____________| */
+ /* */
+ /* then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned */
+ /* */
+ /* This makes BF take about 2x the time */
+ if (c->heuristic == NK_RP_HEURISTIC_Skyline_BF_sortHeight)
+ {
+ tail = c->active_head;
+ node = c->active_head;
+ prev = &c->active_head;
+ /* find first node that's admissible */
+ while (tail->x < width)
+ tail = tail->next;
+ while (tail)
+ {
+ int xpos = tail->x - width;
+ int y,waste;
+ NK_ASSERT(xpos >= 0);
+ /* find the left position that matches this */
+ while (node->next->x <= xpos) {
+ prev = &node->next;
+ node = node->next;
+ }
+ NK_ASSERT(node->next->x > xpos && node->x <= xpos);
+ y = nk_rp__skyline_find_min_y(c, node, xpos, width, &waste);
+ if (y + height < c->height) {
+ if (y <= best_y) {
+ if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) {
+ best_x = xpos;
+ NK_ASSERT(y <= best_y);
+ best_y = y;
+ best_waste = waste;
+ best = prev;
+ }
+ }
+ }
+ tail = tail->next;
+ }
+ }
+ fr.prev_link = best;
+ fr.x = best_x;
+ fr.y = best_y;
+ return fr;
+}
+NK_INTERN struct nk_rp__findresult
+nk_rp__skyline_pack_rectangle(struct nk_rp_context *context, int width, int height)
+{
+ /* find best position according to heuristic */
+ struct nk_rp__findresult res = nk_rp__skyline_find_best_pos(context, width, height);
+ struct nk_rp_node *node, *cur;
+
+ /* bail if: */
+ /* 1. it failed */
+ /* 2. the best node doesn't fit (we don't always check this) */
+ /* 3. we're out of memory */
+ if (res.prev_link == 0 || res.y + height > context->height || context->free_head == 0) {
+ res.prev_link = 0;
+ return res;
+ }
+
+ /* on success, create new node */
+ node = context->free_head;
+ node->x = (nk_rp_coord) res.x;
+ node->y = (nk_rp_coord) (res.y + height);
+
+ context->free_head = node->next;
+
+ /* insert the new node into the right starting point, and */
+ /* let 'cur' point to the remaining nodes needing to be */
+ /* stitched back in */
+ cur = *res.prev_link;
+ if (cur->x < res.x) {
+ /* preserve the existing one, so start testing with the next one */
+ struct nk_rp_node *next = cur->next;
+ cur->next = node;
+ cur = next;
+ } else {
+ *res.prev_link = node;
+ }
+
+ /* from here, traverse cur and free the nodes, until we get to one */
+ /* that shouldn't be freed */
+ while (cur->next && cur->next->x <= res.x + width) {
+ struct nk_rp_node *next = cur->next;
+ /* move the current node to the free list */
+ cur->next = context->free_head;
+ context->free_head = cur;
+ cur = next;
+ }
+ /* stitch the list back in */
+ node->next = cur;
+
+ if (cur->x < res.x + width)
+ cur->x = (nk_rp_coord) (res.x + width);
+ return res;
+}
+NK_INTERN int
+nk_rect_height_compare(const void *a, const void *b)
+{
+ const struct nk_rp_rect *p = (const struct nk_rp_rect *) a;
+ const struct nk_rp_rect *q = (const struct nk_rp_rect *) b;
+ if (p->h > q->h)
+ return -1;
+ if (p->h < q->h)
+ return 1;
+ return (p->w > q->w) ? -1 : (p->w < q->w);
+}
+NK_INTERN int
+nk_rect_original_order(const void *a, const void *b)
+{
+ const struct nk_rp_rect *p = (const struct nk_rp_rect *) a;
+ const struct nk_rp_rect *q = (const struct nk_rp_rect *) b;
+ return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed);
+}
+NK_INTERN void
+nk_rp_qsort(struct nk_rp_rect *array, unsigned int len, int(*cmp)(const void*,const void*))
+{
+ /* iterative quick sort */
+ #define NK_MAX_SORT_STACK 64
+ unsigned right, left = 0, stack[NK_MAX_SORT_STACK], pos = 0;
+ unsigned seed = len/2 * 69069+1;
+ for (;;) {
+ for (; left+1 < len; len++) {
+ struct nk_rp_rect pivot, tmp;
+ if (pos == NK_MAX_SORT_STACK) len = stack[pos = 0];
+ pivot = array[left+seed%(len-left)];
+ seed = seed * 69069 + 1;
+ stack[pos++] = len;
+ for (right = left-1;;) {
+ while (cmp(&array[++right], &pivot) < 0);
+ while (cmp(&pivot, &array[--len]) < 0);
+ if (right >= len) break;
+ tmp = array[right];
+ array[right] = array[len];
+ array[len] = tmp;
+ }
+ }
+ if (pos == 0) break;
+ left = len;
+ len = stack[--pos];
+ }
+ #undef NK_MAX_SORT_STACK
+}
+NK_INTERN void
+nk_rp_pack_rects(struct nk_rp_context *context, struct nk_rp_rect *rects, int num_rects)
+{
+ int i;
+ /* we use the 'was_packed' field internally to allow sorting/unsorting */
+ for (i=0; i < num_rects; ++i) {
+ rects[i].was_packed = i;
+ }
+
+ /* sort according to heuristic */
+ nk_rp_qsort(rects, (unsigned)num_rects, nk_rect_height_compare);
+
+ for (i=0; i < num_rects; ++i) {
+ struct nk_rp__findresult fr = nk_rp__skyline_pack_rectangle(context, rects[i].w, rects[i].h);
+ if (fr.prev_link) {
+ rects[i].x = (nk_rp_coord) fr.x;
+ rects[i].y = (nk_rp_coord) fr.y;
+ } else {
+ rects[i].x = rects[i].y = NK_RP__MAXVAL;
+ }
+ }
+
+ /* unsort */
+ nk_rp_qsort(rects, (unsigned)num_rects, nk_rect_original_order);
+
+ /* set was_packed flags */
+ for (i=0; i < num_rects; ++i)
+ rects[i].was_packed = !(rects[i].x == NK_RP__MAXVAL && rects[i].y == NK_RP__MAXVAL);
+}
+
+/*
+ * ==============================================================
+ *
+ * TRUETYPE
+ *
+ * ===============================================================
+ */
+/* stb_truetype.h - v1.07 - public domain */
+#define NK_TT_MAX_OVERSAMPLE 8
+#define NK_TT__OVER_MASK (NK_TT_MAX_OVERSAMPLE-1)
+
+struct nk_tt_bakedchar {
+ unsigned short x0,y0,x1,y1;
+ /* coordinates of bbox in bitmap */
+ float xoff,yoff,xadvance;
+};
+
+struct nk_tt_aligned_quad{
+ float x0,y0,s0,t0; /* top-left */
+ float x1,y1,s1,t1; /* bottom-right */
+};
+
+struct nk_tt_packedchar {
+ unsigned short x0,y0,x1,y1;
+ /* coordinates of bbox in bitmap */
+ float xoff,yoff,xadvance;
+ float xoff2,yoff2;
+};
+
+struct nk_tt_pack_range {
+ float font_size;
+ int first_unicode_codepoint_in_range;
+ /* if non-zero, then the chars are continuous, and this is the first codepoint */
+ int *array_of_unicode_codepoints;
+ /* if non-zero, then this is an array of unicode codepoints */
+ int num_chars;
+ struct nk_tt_packedchar *chardata_for_range; /* output */
+ unsigned char h_oversample, v_oversample;
+ /* don't set these, they're used internally */
+};
+
+struct nk_tt_pack_context {
+ void *pack_info;
+ int width;
+ int height;
+ int stride_in_bytes;
+ int padding;
+ unsigned int h_oversample, v_oversample;
+ unsigned char *pixels;
+ void *nodes;
+};
+
+struct nk_tt_fontinfo {
+ const unsigned char* data; /* pointer to .ttf file */
+ int fontstart;/* offset of start of font */
+ int numGlyphs;/* number of glyphs, needed for range checking */
+ int loca,head,glyf,hhea,hmtx,kern; /* table locations as offset from start of .ttf */
+ int index_map; /* a cmap mapping for our chosen character encoding */
+ int indexToLocFormat; /* format needed to map from glyph index to glyph */
+};
+
+enum {
+ NK_TT_vmove=1,
+ NK_TT_vline,
+ NK_TT_vcurve
+};
+
+struct nk_tt_vertex {
+ short x,y,cx,cy;
+ unsigned char type,padding;
+};
+
+struct nk_tt__bitmap{
+ int w,h,stride;
+ unsigned char *pixels;
+};
+
+struct nk_tt__hheap_chunk {
+ struct nk_tt__hheap_chunk *next;
+};
+struct nk_tt__hheap {
+ struct nk_allocator alloc;
+ struct nk_tt__hheap_chunk *head;
+ void *first_free;
+ int num_remaining_in_head_chunk;
+};
+
+struct nk_tt__edge {
+ float x0,y0, x1,y1;
+ int invert;
+};
+
+struct nk_tt__active_edge {
+ struct nk_tt__active_edge *next;
+ float fx,fdx,fdy;
+ float direction;
+ float sy;
+ float ey;
+};
+struct nk_tt__point {float x,y;};
+
+#define NK_TT_MACSTYLE_DONTCARE 0
+#define NK_TT_MACSTYLE_BOLD 1
+#define NK_TT_MACSTYLE_ITALIC 2
+#define NK_TT_MACSTYLE_UNDERSCORE 4
+#define NK_TT_MACSTYLE_NONE 8
+/* <= not same as 0, this makes us check the bitfield is 0 */
+
+enum { /* platformID */
+ NK_TT_PLATFORM_ID_UNICODE =0,
+ NK_TT_PLATFORM_ID_MAC =1,
+ NK_TT_PLATFORM_ID_ISO =2,
+ NK_TT_PLATFORM_ID_MICROSOFT =3
+};
+
+enum { /* encodingID for NK_TT_PLATFORM_ID_UNICODE */
+ NK_TT_UNICODE_EID_UNICODE_1_0 =0,
+ NK_TT_UNICODE_EID_UNICODE_1_1 =1,
+ NK_TT_UNICODE_EID_ISO_10646 =2,
+ NK_TT_UNICODE_EID_UNICODE_2_0_BMP=3,
+ NK_TT_UNICODE_EID_UNICODE_2_0_FULL=4
+};
+
+enum { /* encodingID for NK_TT_PLATFORM_ID_MICROSOFT */
+ NK_TT_MS_EID_SYMBOL =0,
+ NK_TT_MS_EID_UNICODE_BMP =1,
+ NK_TT_MS_EID_SHIFTJIS =2,
+ NK_TT_MS_EID_UNICODE_FULL =10
+};
+
+enum { /* encodingID for NK_TT_PLATFORM_ID_MAC; same as Script Manager codes */
+ NK_TT_MAC_EID_ROMAN =0, NK_TT_MAC_EID_ARABIC =4,
+ NK_TT_MAC_EID_JAPANESE =1, NK_TT_MAC_EID_HEBREW =5,
+ NK_TT_MAC_EID_CHINESE_TRAD =2, NK_TT_MAC_EID_GREEK =6,
+ NK_TT_MAC_EID_KOREAN =3, NK_TT_MAC_EID_RUSSIAN =7
+};
+
+enum { /* languageID for NK_TT_PLATFORM_ID_MICROSOFT; same as LCID... */
+ /* problematic because there are e.g. 16 english LCIDs and 16 arabic LCIDs */
+ NK_TT_MS_LANG_ENGLISH =0x0409, NK_TT_MS_LANG_ITALIAN =0x0410,
+ NK_TT_MS_LANG_CHINESE =0x0804, NK_TT_MS_LANG_JAPANESE =0x0411,
+ NK_TT_MS_LANG_DUTCH =0x0413, NK_TT_MS_LANG_KOREAN =0x0412,
+ NK_TT_MS_LANG_FRENCH =0x040c, NK_TT_MS_LANG_RUSSIAN =0x0419,
+ NK_TT_MS_LANG_GERMAN =0x0407, NK_TT_MS_LANG_SPANISH =0x0409,
+ NK_TT_MS_LANG_HEBREW =0x040d, NK_TT_MS_LANG_SWEDISH =0x041D
+};
+
+enum { /* languageID for NK_TT_PLATFORM_ID_MAC */
+ NK_TT_MAC_LANG_ENGLISH =0 , NK_TT_MAC_LANG_JAPANESE =11,
+ NK_TT_MAC_LANG_ARABIC =12, NK_TT_MAC_LANG_KOREAN =23,
+ NK_TT_MAC_LANG_DUTCH =4 , NK_TT_MAC_LANG_RUSSIAN =32,
+ NK_TT_MAC_LANG_FRENCH =1 , NK_TT_MAC_LANG_SPANISH =6 ,
+ NK_TT_MAC_LANG_GERMAN =2 , NK_TT_MAC_LANG_SWEDISH =5 ,
+ NK_TT_MAC_LANG_HEBREW =10, NK_TT_MAC_LANG_CHINESE_SIMPLIFIED =33,
+ NK_TT_MAC_LANG_ITALIAN =3 , NK_TT_MAC_LANG_CHINESE_TRAD =19
+};
+
+#define nk_ttBYTE(p) (* (const nk_byte *) (p))
+#define nk_ttCHAR(p) (* (const char *) (p))
+
+#if defined(NK_BIGENDIAN) && !defined(NK_ALLOW_UNALIGNED_TRUETYPE)
+ #define nk_ttUSHORT(p) (* (nk_ushort *) (p))
+ #define nk_ttSHORT(p) (* (nk_short *) (p))
+ #define nk_ttULONG(p) (* (nk_uint *) (p))
+ #define nk_ttLONG(p) (* (nk_int *) (p))
+#else
+ static nk_ushort nk_ttUSHORT(const nk_byte *p) { return (nk_ushort)(p[0]*256 + p[1]); }
+ static nk_short nk_ttSHORT(const nk_byte *p) { return (nk_short)(p[0]*256 + p[1]); }
+ static nk_uint nk_ttULONG(const nk_byte *p) { return (nk_uint)((p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]); }
+#endif
+
+#define nk_tt_tag4(p,c0,c1,c2,c3)\
+ ((p)[0] == (c0) && (p)[1] == (c1) && (p)[2] == (c2) && (p)[3] == (c3))
+#define nk_tt_tag(p,str) nk_tt_tag4(p,str[0],str[1],str[2],str[3])
+
+NK_INTERN int nk_tt_GetGlyphShape(const struct nk_tt_fontinfo *info, struct nk_allocator *alloc,
+ int glyph_index, struct nk_tt_vertex **pvertices);
+
+NK_INTERN nk_uint
+nk_tt__find_table(const nk_byte *data, nk_uint fontstart, const char *tag)
+{
+ /* @OPTIMIZE: binary search */
+ nk_int num_tables = nk_ttUSHORT(data+fontstart+4);
+ nk_uint tabledir = fontstart + 12;
+ nk_int i;
+ for (i = 0; i < num_tables; ++i) {
+ nk_uint loc = tabledir + (nk_uint)(16*i);
+ if (nk_tt_tag(data+loc+0, tag))
+ return nk_ttULONG(data+loc+8);
+ }
+ return 0;
+}
+NK_INTERN int
+nk_tt_InitFont(struct nk_tt_fontinfo *info, const unsigned char *data2, int fontstart)
+{
+ nk_uint cmap, t;
+ nk_int i,numTables;
+ const nk_byte *data = (const nk_byte *) data2;
+
+ info->data = data;
+ info->fontstart = fontstart;
+
+ cmap = nk_tt__find_table(data, (nk_uint)fontstart, "cmap"); /* required */
+ info->loca = (int)nk_tt__find_table(data, (nk_uint)fontstart, "loca"); /* required */
+ info->head = (int)nk_tt__find_table(data, (nk_uint)fontstart, "head"); /* required */
+ info->glyf = (int)nk_tt__find_table(data, (nk_uint)fontstart, "glyf"); /* required */
+ info->hhea = (int)nk_tt__find_table(data, (nk_uint)fontstart, "hhea"); /* required */
+ info->hmtx = (int)nk_tt__find_table(data, (nk_uint)fontstart, "hmtx"); /* required */
+ info->kern = (int)nk_tt__find_table(data, (nk_uint)fontstart, "kern"); /* not required */
+ if (!cmap || !info->loca || !info->head || !info->glyf || !info->hhea || !info->hmtx)
+ return 0;
+
+ t = nk_tt__find_table(data, (nk_uint)fontstart, "maxp");
+ if (t) info->numGlyphs = nk_ttUSHORT(data+t+4);
+ else info->numGlyphs = 0xffff;
+
+ /* find a cmap encoding table we understand *now* to avoid searching */
+ /* later. (todo: could make this installable) */
+ /* the same regardless of glyph. */
+ numTables = nk_ttUSHORT(data + cmap + 2);
+ info->index_map = 0;
+ for (i=0; i < numTables; ++i)
+ {
+ nk_uint encoding_record = cmap + 4 + 8 * (nk_uint)i;
+ /* find an encoding we understand: */
+ switch(nk_ttUSHORT(data+encoding_record)) {
+ case NK_TT_PLATFORM_ID_MICROSOFT:
+ switch (nk_ttUSHORT(data+encoding_record+2)) {
+ case NK_TT_MS_EID_UNICODE_BMP:
+ case NK_TT_MS_EID_UNICODE_FULL:
+ /* MS/Unicode */
+ info->index_map = (int)(cmap + nk_ttULONG(data+encoding_record+4));
+ break;
+ default: break;
+ } break;
+ case NK_TT_PLATFORM_ID_UNICODE:
+ /* Mac/iOS has these */
+ /* all the encodingIDs are unicode, so we don't bother to check it */
+ info->index_map = (int)(cmap + nk_ttULONG(data+encoding_record+4));
+ break;
+ default: break;
+ }
+ }
+ if (info->index_map == 0)
+ return 0;
+ info->indexToLocFormat = nk_ttUSHORT(data+info->head + 50);
+ return 1;
+}
+NK_INTERN int
+nk_tt_FindGlyphIndex(const struct nk_tt_fontinfo *info, int unicode_codepoint)
+{
+ const nk_byte *data = info->data;
+ nk_uint index_map = (nk_uint)info->index_map;
+
+ nk_ushort format = nk_ttUSHORT(data + index_map + 0);
+ if (format == 0) { /* apple byte encoding */
+ nk_int bytes = nk_ttUSHORT(data + index_map + 2);
+ if (unicode_codepoint < bytes-6)
+ return nk_ttBYTE(data + index_map + 6 + unicode_codepoint);
+ return 0;
+ } else if (format == 6) {
+ nk_uint first = nk_ttUSHORT(data + index_map + 6);
+ nk_uint count = nk_ttUSHORT(data + index_map + 8);
+ if ((nk_uint) unicode_codepoint >= first && (nk_uint) unicode_codepoint < first+count)
+ return nk_ttUSHORT(data + index_map + 10 + (unicode_codepoint - (int)first)*2);
+ return 0;
+ } else if (format == 2) {
+ NK_ASSERT(0); /* @TODO: high-byte mapping for japanese/chinese/korean */
+ return 0;
+ } else if (format == 4) { /* standard mapping for windows fonts: binary search collection of ranges */
+ nk_ushort segcount = nk_ttUSHORT(data+index_map+6) >> 1;
+ nk_ushort searchRange = nk_ttUSHORT(data+index_map+8) >> 1;
+ nk_ushort entrySelector = nk_ttUSHORT(data+index_map+10);
+ nk_ushort rangeShift = nk_ttUSHORT(data+index_map+12) >> 1;
+
+ /* do a binary search of the segments */
+ nk_uint endCount = index_map + 14;
+ nk_uint search = endCount;
+
+ if (unicode_codepoint > 0xffff)
+ return 0;
+
+ /* they lie from endCount .. endCount + segCount */
+ /* but searchRange is the nearest power of two, so... */
+ if (unicode_codepoint >= nk_ttUSHORT(data + search + rangeShift*2))
+ search += (nk_uint)(rangeShift*2);
+
+ /* now decrement to bias correctly to find smallest */
+ search -= 2;
+ while (entrySelector) {
+ nk_ushort end;
+ searchRange >>= 1;
+ end = nk_ttUSHORT(data + search + searchRange*2);
+ if (unicode_codepoint > end)
+ search += (nk_uint)(searchRange*2);
+ --entrySelector;
+ }
+ search += 2;
+
+ {
+ nk_ushort offset, start;
+ nk_ushort item = (nk_ushort) ((search - endCount) >> 1);
+
+ NK_ASSERT(unicode_codepoint <= nk_ttUSHORT(data + endCount + 2*item));
+ start = nk_ttUSHORT(data + index_map + 14 + segcount*2 + 2 + 2*item);
+ if (unicode_codepoint < start)
+ return 0;
+
+ offset = nk_ttUSHORT(data + index_map + 14 + segcount*6 + 2 + 2*item);
+ if (offset == 0)
+ return (nk_ushort) (unicode_codepoint + nk_ttSHORT(data + index_map + 14 + segcount*4 + 2 + 2*item));
+
+ return nk_ttUSHORT(data + offset + (unicode_codepoint-start)*2 + index_map + 14 + segcount*6 + 2 + 2*item);
+ }
+ } else if (format == 12 || format == 13) {
+ nk_uint ngroups = nk_ttULONG(data+index_map+12);
+ nk_int low,high;
+ low = 0; high = (nk_int)ngroups;
+ /* Binary search the right group. */
+ while (low < high) {
+ nk_int mid = low + ((high-low) >> 1); /* rounds down, so low <= mid < high */
+ nk_uint start_char = nk_ttULONG(data+index_map+16+mid*12);
+ nk_uint end_char = nk_ttULONG(data+index_map+16+mid*12+4);
+ if ((nk_uint) unicode_codepoint < start_char)
+ high = mid;
+ else if ((nk_uint) unicode_codepoint > end_char)
+ low = mid+1;
+ else {
+ nk_uint start_glyph = nk_ttULONG(data+index_map+16+mid*12+8);
+ if (format == 12)
+ return (int)start_glyph + (int)unicode_codepoint - (int)start_char;
+ else /* format == 13 */
+ return (int)start_glyph;
+ }
+ }
+ return 0; /* not found */
+ }
+ /* @TODO */
+ NK_ASSERT(0);
+ return 0;
+}
+NK_INTERN void
+nk_tt_setvertex(struct nk_tt_vertex *v, nk_byte type, nk_int x, nk_int y, nk_int cx, nk_int cy)
+{
+ v->type = type;
+ v->x = (nk_short) x;
+ v->y = (nk_short) y;
+ v->cx = (nk_short) cx;
+ v->cy = (nk_short) cy;
+}
+NK_INTERN int
+nk_tt__GetGlyfOffset(const struct nk_tt_fontinfo *info, int glyph_index)
+{
+ int g1,g2;
+ if (glyph_index >= info->numGlyphs) return -1; /* glyph index out of range */
+ if (info->indexToLocFormat >= 2) return -1; /* unknown index->glyph map format */
+
+ if (info->indexToLocFormat == 0) {
+ g1 = info->glyf + nk_ttUSHORT(info->data + info->loca + glyph_index * 2) * 2;
+ g2 = info->glyf + nk_ttUSHORT(info->data + info->loca + glyph_index * 2 + 2) * 2;
+ } else {
+ g1 = info->glyf + (int)nk_ttULONG (info->data + info->loca + glyph_index * 4);
+ g2 = info->glyf + (int)nk_ttULONG (info->data + info->loca + glyph_index * 4 + 4);
+ }
+ return g1==g2 ? -1 : g1; /* if length is 0, return -1 */
+}
+NK_INTERN int
+nk_tt_GetGlyphBox(const struct nk_tt_fontinfo *info, int glyph_index,
+ int *x0, int *y0, int *x1, int *y1)
+{
+ int g = nk_tt__GetGlyfOffset(info, glyph_index);
+ if (g < 0) return 0;
+
+ if (x0) *x0 = nk_ttSHORT(info->data + g + 2);
+ if (y0) *y0 = nk_ttSHORT(info->data + g + 4);
+ if (x1) *x1 = nk_ttSHORT(info->data + g + 6);
+ if (y1) *y1 = nk_ttSHORT(info->data + g + 8);
+ return 1;
+}
+NK_INTERN int
+nk_tt__close_shape(struct nk_tt_vertex *vertices, int num_vertices, int was_off,
+ int start_off, nk_int sx, nk_int sy, nk_int scx, nk_int scy, nk_int cx, nk_int cy)
+{
+ if (start_off) {
+ if (was_off)
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vcurve, (cx+scx)>>1, (cy+scy)>>1, cx,cy);
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vcurve, sx,sy,scx,scy);
+ } else {
+ if (was_off)
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vcurve,sx,sy,cx,cy);
+ else
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vline,sx,sy,0,0);
+ }
+ return num_vertices;
+}
+NK_INTERN int
+nk_tt_GetGlyphShape(const struct nk_tt_fontinfo *info, struct nk_allocator *alloc,
+ int glyph_index, struct nk_tt_vertex **pvertices)
+{
+ nk_short numberOfContours;
+ const nk_byte *endPtsOfContours;
+ const nk_byte *data = info->data;
+ struct nk_tt_vertex *vertices=0;
+ int num_vertices=0;
+ int g = nk_tt__GetGlyfOffset(info, glyph_index);
+ *pvertices = 0;
+
+ if (g < 0) return 0;
+ numberOfContours = nk_ttSHORT(data + g);
+ if (numberOfContours > 0) {
+ nk_byte flags=0,flagcount;
+ nk_int ins, i,j=0,m,n, next_move, was_off=0, off, start_off=0;
+ nk_int x,y,cx,cy,sx,sy, scx,scy;
+ const nk_byte *points;
+ endPtsOfContours = (data + g + 10);
+ ins = nk_ttUSHORT(data + g + 10 + numberOfContours * 2);
+ points = data + g + 10 + numberOfContours * 2 + 2 + ins;
+
+ n = 1+nk_ttUSHORT(endPtsOfContours + numberOfContours*2-2);
+ m = n + 2*numberOfContours; /* a loose bound on how many vertices we might need */
+ vertices = (struct nk_tt_vertex *)alloc->alloc(alloc->userdata, 0, (nk_size)m * sizeof(vertices[0]));
+ if (vertices == 0)
+ return 0;
+
+ next_move = 0;
+ flagcount=0;
+
+ /* in first pass, we load uninterpreted data into the allocated array */
+ /* above, shifted to the end of the array so we won't overwrite it when */
+ /* we create our final data starting from the front */
+ off = m - n; /* starting offset for uninterpreted data, regardless of how m ends up being calculated */
+
+ /* first load flags */
+ for (i=0; i < n; ++i) {
+ if (flagcount == 0) {
+ flags = *points++;
+ if (flags & 8)
+ flagcount = *points++;
+ } else --flagcount;
+ vertices[off+i].type = flags;
+ }
+
+ /* now load x coordinates */
+ x=0;
+ for (i=0; i < n; ++i) {
+ flags = vertices[off+i].type;
+ if (flags & 2) {
+ nk_short dx = *points++;
+ x += (flags & 16) ? dx : -dx; /* ??? */
+ } else {
+ if (!(flags & 16)) {
+ x = x + (nk_short) (points[0]*256 + points[1]);
+ points += 2;
+ }
+ }
+ vertices[off+i].x = (nk_short) x;
+ }
+
+ /* now load y coordinates */
+ y=0;
+ for (i=0; i < n; ++i) {
+ flags = vertices[off+i].type;
+ if (flags & 4) {
+ nk_short dy = *points++;
+ y += (flags & 32) ? dy : -dy; /* ??? */
+ } else {
+ if (!(flags & 32)) {
+ y = y + (nk_short) (points[0]*256 + points[1]);
+ points += 2;
+ }
+ }
+ vertices[off+i].y = (nk_short) y;
+ }
+
+ /* now convert them to our format */
+ num_vertices=0;
+ sx = sy = cx = cy = scx = scy = 0;
+ for (i=0; i < n; ++i)
+ {
+ flags = vertices[off+i].type;
+ x = (nk_short) vertices[off+i].x;
+ y = (nk_short) vertices[off+i].y;
+
+ if (next_move == i) {
+ if (i != 0)
+ num_vertices = nk_tt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy);
+
+ /* now start the new one */
+ start_off = !(flags & 1);
+ if (start_off) {
+ /* if we start off with an off-curve point, then when we need to find a point on the curve */
+ /* where we can start, and we need to save some state for when we wraparound. */
+ scx = x;
+ scy = y;
+ if (!(vertices[off+i+1].type & 1)) {
+ /* next point is also a curve point, so interpolate an on-point curve */
+ sx = (x + (nk_int) vertices[off+i+1].x) >> 1;
+ sy = (y + (nk_int) vertices[off+i+1].y) >> 1;
+ } else {
+ /* otherwise just use the next point as our start point */
+ sx = (nk_int) vertices[off+i+1].x;
+ sy = (nk_int) vertices[off+i+1].y;
+ ++i; /* we're using point i+1 as the starting point, so skip it */
+ }
+ } else {
+ sx = x;
+ sy = y;
+ }
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vmove,sx,sy,0,0);
+ was_off = 0;
+ next_move = 1 + nk_ttUSHORT(endPtsOfContours+j*2);
+ ++j;
+ } else {
+ if (!(flags & 1))
+ { /* if it's a curve */
+ if (was_off) /* two off-curve control points in a row means interpolate an on-curve midpoint */
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vcurve, (cx+x)>>1, (cy+y)>>1, cx, cy);
+ cx = x;
+ cy = y;
+ was_off = 1;
+ } else {
+ if (was_off)
+ nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vcurve, x,y, cx, cy);
+ else nk_tt_setvertex(&vertices[num_vertices++], NK_TT_vline, x,y,0,0);
+ was_off = 0;
+ }
+ }
+ }
+ num_vertices = nk_tt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy);
+ } else if (numberOfContours == -1) {
+ /* Compound shapes. */
+ int more = 1;
+ const nk_byte *comp = data + g + 10;
+ num_vertices = 0;
+ vertices = 0;
+
+ while (more)
+ {
+ nk_ushort flags, gidx;
+ int comp_num_verts = 0, i;
+ struct nk_tt_vertex *comp_verts = 0, *tmp = 0;
+ float mtx[6] = {1,0,0,1,0,0}, m, n;
+
+ flags = (nk_ushort)nk_ttSHORT(comp); comp+=2;
+ gidx = (nk_ushort)nk_ttSHORT(comp); comp+=2;
+
+ if (flags & 2) { /* XY values */
+ if (flags & 1) { /* shorts */
+ mtx[4] = nk_ttSHORT(comp); comp+=2;
+ mtx[5] = nk_ttSHORT(comp); comp+=2;
+ } else {
+ mtx[4] = nk_ttCHAR(comp); comp+=1;
+ mtx[5] = nk_ttCHAR(comp); comp+=1;
+ }
+ } else {
+ /* @TODO handle matching point */
+ NK_ASSERT(0);
+ }
+ if (flags & (1<<3)) { /* WE_HAVE_A_SCALE */
+ mtx[0] = mtx[3] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ mtx[1] = mtx[2] = 0;
+ } else if (flags & (1<<6)) { /* WE_HAVE_AN_X_AND_YSCALE */
+ mtx[0] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ mtx[1] = mtx[2] = 0;
+ mtx[3] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ } else if (flags & (1<<7)) { /* WE_HAVE_A_TWO_BY_TWO */
+ mtx[0] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ mtx[1] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ mtx[2] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ mtx[3] = nk_ttSHORT(comp)/16384.0f; comp+=2;
+ }
+
+ /* Find transformation scales. */
+ m = (float) NK_SQRT(mtx[0]*mtx[0] + mtx[1]*mtx[1]);
+ n = (float) NK_SQRT(mtx[2]*mtx[2] + mtx[3]*mtx[3]);
+
+ /* Get indexed glyph. */
+ comp_num_verts = nk_tt_GetGlyphShape(info, alloc, gidx, &comp_verts);
+ if (comp_num_verts > 0)
+ {
+ /* Transform vertices. */
+ for (i = 0; i < comp_num_verts; ++i) {
+ struct nk_tt_vertex* v = &comp_verts[i];
+ short x,y;
+ x=v->x; y=v->y;
+ v->x = (short)(m * (mtx[0]*x + mtx[2]*y + mtx[4]));
+ v->y = (short)(n * (mtx[1]*x + mtx[3]*y + mtx[5]));
+ x=v->cx; y=v->cy;
+ v->cx = (short)(m * (mtx[0]*x + mtx[2]*y + mtx[4]));
+ v->cy = (short)(n * (mtx[1]*x + mtx[3]*y + mtx[5]));
+ }
+ /* Append vertices. */
+ tmp = (struct nk_tt_vertex*)alloc->alloc(alloc->userdata, 0,
+ (nk_size)(num_vertices+comp_num_verts)*sizeof(struct nk_tt_vertex));
+ if (!tmp) {
+ if (vertices) alloc->free(alloc->userdata, vertices);
+ if (comp_verts) alloc->free(alloc->userdata, comp_verts);
+ return 0;
+ }
+ if (num_vertices > 0) NK_MEMCPY(tmp, vertices, (nk_size)num_vertices*sizeof(struct nk_tt_vertex));
+ NK_MEMCPY(tmp+num_vertices, comp_verts, (nk_size)comp_num_verts*sizeof(struct nk_tt_vertex));
+ if (vertices) alloc->free(alloc->userdata,vertices);
+ vertices = tmp;
+ alloc->free(alloc->userdata,comp_verts);
+ num_vertices += comp_num_verts;
+ }
+ /* More components ? */
+ more = flags & (1<<5);
+ }
+ } else if (numberOfContours < 0) {
+ /* @TODO other compound variations? */
+ NK_ASSERT(0);
+ } else {
+ /* numberOfCounters == 0, do nothing */
+ }
+ *pvertices = vertices;
+ return num_vertices;
+}
+NK_INTERN void
+nk_tt_GetGlyphHMetrics(const struct nk_tt_fontinfo *info, int glyph_index,
+ int *advanceWidth, int *leftSideBearing)
+{
+ nk_ushort numOfLongHorMetrics = nk_ttUSHORT(info->data+info->hhea + 34);
+ if (glyph_index < numOfLongHorMetrics) {
+ if (advanceWidth)
+ *advanceWidth = nk_ttSHORT(info->data + info->hmtx + 4*glyph_index);
+ if (leftSideBearing)
+ *leftSideBearing = nk_ttSHORT(info->data + info->hmtx + 4*glyph_index + 2);
+ } else {
+ if (advanceWidth)
+ *advanceWidth = nk_ttSHORT(info->data + info->hmtx + 4*(numOfLongHorMetrics-1));
+ if (leftSideBearing)
+ *leftSideBearing = nk_ttSHORT(info->data + info->hmtx + 4*numOfLongHorMetrics + 2*(glyph_index - numOfLongHorMetrics));
+ }
+}
+NK_INTERN void
+nk_tt_GetFontVMetrics(const struct nk_tt_fontinfo *info,
+ int *ascent, int *descent, int *lineGap)
+{
+ if (ascent ) *ascent = nk_ttSHORT(info->data+info->hhea + 4);
+ if (descent) *descent = nk_ttSHORT(info->data+info->hhea + 6);
+ if (lineGap) *lineGap = nk_ttSHORT(info->data+info->hhea + 8);
+}
+NK_INTERN float
+nk_tt_ScaleForPixelHeight(const struct nk_tt_fontinfo *info, float height)
+{
+ int fheight = nk_ttSHORT(info->data + info->hhea + 4) - nk_ttSHORT(info->data + info->hhea + 6);
+ return (float) height / (float)fheight;
+}
+NK_INTERN float
+nk_tt_ScaleForMappingEmToPixels(const struct nk_tt_fontinfo *info, float pixels)
+{
+ int unitsPerEm = nk_ttUSHORT(info->data + info->head + 18);
+ return pixels / (float)unitsPerEm;
+}
+
+/*-------------------------------------------------------------
+ * antialiasing software rasterizer
+ * --------------------------------------------------------------*/
+NK_INTERN void
+nk_tt_GetGlyphBitmapBoxSubpixel(const struct nk_tt_fontinfo *font,
+ int glyph, float scale_x, float scale_y,float shift_x, float shift_y,
+ int *ix0, int *iy0, int *ix1, int *iy1)
+{
+ int x0,y0,x1,y1;
+ if (!nk_tt_GetGlyphBox(font, glyph, &x0,&y0,&x1,&y1)) {
+ /* e.g. space character */
+ if (ix0) *ix0 = 0;
+ if (iy0) *iy0 = 0;
+ if (ix1) *ix1 = 0;
+ if (iy1) *iy1 = 0;
+ } else {
+ /* move to integral bboxes (treating pixels as little squares, what pixels get touched)? */
+ if (ix0) *ix0 = nk_ifloorf((float)x0 * scale_x + shift_x);
+ if (iy0) *iy0 = nk_ifloorf((float)-y1 * scale_y + shift_y);
+ if (ix1) *ix1 = nk_iceilf ((float)x1 * scale_x + shift_x);
+ if (iy1) *iy1 = nk_iceilf ((float)-y0 * scale_y + shift_y);
+ }
+}
+NK_INTERN void
+nk_tt_GetGlyphBitmapBox(const struct nk_tt_fontinfo *font, int glyph,
+ float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1)
+{
+ nk_tt_GetGlyphBitmapBoxSubpixel(font, glyph, scale_x, scale_y,0.0f,0.0f, ix0, iy0, ix1, iy1);
+}
+
+/*-------------------------------------------------------------
+ * Rasterizer
+ * --------------------------------------------------------------*/
+NK_INTERN void*
+nk_tt__hheap_alloc(struct nk_tt__hheap *hh, nk_size size)
+{
+ if (hh->first_free) {
+ void *p = hh->first_free;
+ hh->first_free = * (void **) p;
+ return p;
+ } else {
+ if (hh->num_remaining_in_head_chunk == 0) {
+ int count = (size < 32 ? 2000 : size < 128 ? 800 : 100);
+ struct nk_tt__hheap_chunk *c = (struct nk_tt__hheap_chunk *)
+ hh->alloc.alloc(hh->alloc.userdata, 0,
+ sizeof(struct nk_tt__hheap_chunk) + size * (nk_size)count);
+ if (c == 0) return 0;
+ c->next = hh->head;
+ hh->head = c;
+ hh->num_remaining_in_head_chunk = count;
+ }
+ --hh->num_remaining_in_head_chunk;
+ return (char *) (hh->head) + size * (nk_size)hh->num_remaining_in_head_chunk;
+ }
+}
+NK_INTERN void
+nk_tt__hheap_free(struct nk_tt__hheap *hh, void *p)
+{
+ *(void **) p = hh->first_free;
+ hh->first_free = p;
+}
+NK_INTERN void
+nk_tt__hheap_cleanup(struct nk_tt__hheap *hh)
+{
+ struct nk_tt__hheap_chunk *c = hh->head;
+ while (c) {
+ struct nk_tt__hheap_chunk *n = c->next;
+ hh->alloc.free(hh->alloc.userdata, c);
+ c = n;
+ }
+}
+NK_INTERN struct nk_tt__active_edge*
+nk_tt__new_active(struct nk_tt__hheap *hh, struct nk_tt__edge *e,
+ int off_x, float start_point)
+{
+ struct nk_tt__active_edge *z = (struct nk_tt__active_edge *)
+ nk_tt__hheap_alloc(hh, sizeof(*z));
+ float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0);
+ /*STBTT_assert(e->y0 <= start_point); */
+ if (!z) return z;
+ z->fdx = dxdy;
+ z->fdy = (dxdy != 0) ? (1/dxdy): 0;
+ z->fx = e->x0 + dxdy * (start_point - e->y0);
+ z->fx -= (float)off_x;
+ z->direction = e->invert ? 1.0f : -1.0f;
+ z->sy = e->y0;
+ z->ey = e->y1;
+ z->next = 0;
+ return z;
+}
+NK_INTERN void
+nk_tt__handle_clipped_edge(float *scanline, int x, struct nk_tt__active_edge *e,
+ float x0, float y0, float x1, float y1)
+{
+ if (y0 == y1) return;
+ NK_ASSERT(y0 < y1);
+ NK_ASSERT(e->sy <= e->ey);
+ if (y0 > e->ey) return;
+ if (y1 < e->sy) return;
+ if (y0 < e->sy) {
+ x0 += (x1-x0) * (e->sy - y0) / (y1-y0);
+ y0 = e->sy;
+ }
+ if (y1 > e->ey) {
+ x1 += (x1-x0) * (e->ey - y1) / (y1-y0);
+ y1 = e->ey;
+ }
+
+ if (x0 == x) NK_ASSERT(x1 <= x+1);
+ else if (x0 == x+1) NK_ASSERT(x1 >= x);
+ else if (x0 <= x) NK_ASSERT(x1 <= x);
+ else if (x0 >= x+1) NK_ASSERT(x1 >= x+1);
+ else NK_ASSERT(x1 >= x && x1 <= x+1);
+
+ if (x0 <= x && x1 <= x)
+ scanline[x] += e->direction * (y1-y0);
+ else if (x0 >= x+1 && x1 >= x+1);
+ else {
+ NK_ASSERT(x0 >= x && x0 <= x+1 && x1 >= x && x1 <= x+1);
+ /* coverage = 1 - average x position */
+ scanline[x] += (float)e->direction * (float)(y1-y0) * (1.0f-((x0-(float)x)+(x1-(float)x))/2.0f);
+ }
+}
+NK_INTERN void
+nk_tt__fill_active_edges_new(float *scanline, float *scanline_fill, int len,
+ struct nk_tt__active_edge *e, float y_top)
+{
+ float y_bottom = y_top+1;
+ while (e)
+ {
+ /* brute force every pixel */
+ /* compute intersection points with top & bottom */
+ NK_ASSERT(e->ey >= y_top);
+ if (e->fdx == 0) {
+ float x0 = e->fx;
+ if (x0 < len) {
+ if (x0 >= 0) {
+ nk_tt__handle_clipped_edge(scanline,(int) x0,e, x0,y_top, x0,y_bottom);
+ nk_tt__handle_clipped_edge(scanline_fill-1,(int) x0+1,e, x0,y_top, x0,y_bottom);
+ } else {
+ nk_tt__handle_clipped_edge(scanline_fill-1,0,e, x0,y_top, x0,y_bottom);
+ }
+ }
+ } else {
+ float x0 = e->fx;
+ float dx = e->fdx;
+ float xb = x0 + dx;
+ float x_top, x_bottom;
+ float y0,y1;
+ float dy = e->fdy;
+ NK_ASSERT(e->sy <= y_bottom && e->ey >= y_top);
+
+ /* compute endpoints of line segment clipped to this scanline (if the */
+ /* line segment starts on this scanline. x0 is the intersection of the */
+ /* line with y_top, but that may be off the line segment. */
+ if (e->sy > y_top) {
+ x_top = x0 + dx * (e->sy - y_top);
+ y0 = e->sy;
+ } else {
+ x_top = x0;
+ y0 = y_top;
+ }
+
+ if (e->ey < y_bottom) {
+ x_bottom = x0 + dx * (e->ey - y_top);
+ y1 = e->ey;
+ } else {
+ x_bottom = xb;
+ y1 = y_bottom;
+ }
+
+ if (x_top >= 0 && x_bottom >= 0 && x_top < len && x_bottom < len)
+ {
+ /* from here on, we don't have to range check x values */
+ if ((int) x_top == (int) x_bottom) {
+ float height;
+ /* simple case, only spans one pixel */
+ int x = (int) x_top;
+ height = y1 - y0;
+ NK_ASSERT(x >= 0 && x < len);
+ scanline[x] += e->direction * (1.0f-(((float)x_top - (float)x) + ((float)x_bottom-(float)x))/2.0f) * (float)height;
+ scanline_fill[x] += e->direction * (float)height; /* everything right of this pixel is filled */
+ } else {
+ int x,x1,x2;
+ float y_crossing, step, sign, area;
+ /* covers 2+ pixels */
+ if (x_top > x_bottom)
+ {
+ /* flip scanline vertically; signed area is the same */
+ float t;
+ y0 = y_bottom - (y0 - y_top);
+ y1 = y_bottom - (y1 - y_top);
+ t = y0; y0 = y1; y1 = t;
+ t = x_bottom; x_bottom = x_top; x_top = t;
+ dx = -dx;
+ dy = -dy;
+ t = x0; x0 = xb; xb = t;
+ }
+
+ x1 = (int) x_top;
+ x2 = (int) x_bottom;
+ /* compute intersection with y axis at x1+1 */
+ y_crossing = ((float)x1+1 - (float)x0) * (float)dy + (float)y_top;
+
+ sign = e->direction;
+ /* area of the rectangle covered from y0..y_crossing */
+ area = sign * (y_crossing-y0);
+ /* area of the triangle (x_top,y0), (x+1,y0), (x+1,y_crossing) */
+ scanline[x1] += area * (1.0f-((float)((float)x_top - (float)x1)+(float)(x1+1-x1))/2.0f);
+
+ step = sign * dy;
+ for (x = x1+1; x < x2; ++x) {
+ scanline[x] += area + step/2;
+ area += step;
+ }
+ y_crossing += (float)dy * (float)(x2 - (x1+1));
+
+ scanline[x2] += area + sign * (1.0f-((float)(x2-x2)+((float)x_bottom-(float)x2))/2.0f) * (y1-y_crossing);
+ scanline_fill[x2] += sign * (y1-y0);
+ }
+ }
+ else
+ {
+ /* if edge goes outside of box we're drawing, we require */
+ /* clipping logic. since this does not match the intended use */
+ /* of this library, we use a different, very slow brute */
+ /* force implementation */
+ int x;
+ for (x=0; x < len; ++x)
+ {
+ /* cases: */
+ /* */
+ /* there can be up to two intersections with the pixel. any intersection */
+ /* with left or right edges can be handled by splitting into two (or three) */
+ /* regions. intersections with top & bottom do not necessitate case-wise logic. */
+ /* */
+ /* the old way of doing this found the intersections with the left & right edges, */
+ /* then used some simple logic to produce up to three segments in sorted order */
+ /* from top-to-bottom. however, this had a problem: if an x edge was epsilon */
+ /* across the x border, then the corresponding y position might not be distinct */
+ /* from the other y segment, and it might ignored as an empty segment. to avoid */
+ /* that, we need to explicitly produce segments based on x positions. */
+
+ /* rename variables to clear pairs */
+ float ya = y_top;
+ float x1 = (float) (x);
+ float x2 = (float) (x+1);
+ float x3 = xb;
+ float y3 = y_bottom;
+ float yb,y2;
+
+ yb = ((float)x - x0) / dx + y_top;
+ y2 = ((float)x+1 - x0) / dx + y_top;
+
+ if (x0 < x1 && x3 > x2) { /* three segments descending down-right */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x1,yb);
+ nk_tt__handle_clipped_edge(scanline,x,e, x1,yb, x2,y2);
+ nk_tt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3);
+ } else if (x3 < x1 && x0 > x2) { /* three segments descending down-left */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x2,y2);
+ nk_tt__handle_clipped_edge(scanline,x,e, x2,y2, x1,yb);
+ nk_tt__handle_clipped_edge(scanline,x,e, x1,yb, x3,y3);
+ } else if (x0 < x1 && x3 > x1) { /* two segments across x, down-right */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x1,yb);
+ nk_tt__handle_clipped_edge(scanline,x,e, x1,yb, x3,y3);
+ } else if (x3 < x1 && x0 > x1) { /* two segments across x, down-left */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x1,yb);
+ nk_tt__handle_clipped_edge(scanline,x,e, x1,yb, x3,y3);
+ } else if (x0 < x2 && x3 > x2) { /* two segments across x+1, down-right */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x2,y2);
+ nk_tt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3);
+ } else if (x3 < x2 && x0 > x2) { /* two segments across x+1, down-left */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x2,y2);
+ nk_tt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3);
+ } else { /* one segment */
+ nk_tt__handle_clipped_edge(scanline,x,e, x0,ya, x3,y3);
+ }
+ }
+ }
+ }
+ e = e->next;
+ }
+}
+NK_INTERN void
+nk_tt__rasterize_sorted_edges(struct nk_tt__bitmap *result, struct nk_tt__edge *e,
+ int n, int vsubsample, int off_x, int off_y, struct nk_allocator *alloc)
+{
+ /* directly AA rasterize edges w/o supersampling */
+ struct nk_tt__hheap hh;
+ struct nk_tt__active_edge *active = 0;
+ int y,j=0, i;
+ float scanline_data[129], *scanline, *scanline2;
+
+ NK_UNUSED(vsubsample);
+ nk_zero_struct(hh);
+ hh.alloc = *alloc;
+
+ if (result->w > 64)
+ scanline = (float *) alloc->alloc(alloc->userdata,0, (nk_size)(result->w*2+1) * sizeof(float));
+ else scanline = scanline_data;
+
+ scanline2 = scanline + result->w;
+ y = off_y;
+ e[n].y0 = (float) (off_y + result->h) + 1;
+
+ while (j < result->h)
+ {
+ /* find center of pixel for this scanline */
+ float scan_y_top = (float)y + 0.0f;
+ float scan_y_bottom = (float)y + 1.0f;
+ struct nk_tt__active_edge **step = &active;
+
+ NK_MEMSET(scanline , 0, (nk_size)result->w*sizeof(scanline[0]));
+ NK_MEMSET(scanline2, 0, (nk_size)(result->w+1)*sizeof(scanline[0]));
+
+ /* update all active edges; */
+ /* remove all active edges that terminate before the top of this scanline */
+ while (*step) {
+ struct nk_tt__active_edge * z = *step;
+ if (z->ey <= scan_y_top) {
+ *step = z->next; /* delete from list */
+ NK_ASSERT(z->direction);
+ z->direction = 0;
+ nk_tt__hheap_free(&hh, z);
+ } else {
+ step = &((*step)->next); /* advance through list */
+ }
+ }
+
+ /* insert all edges that start before the bottom of this scanline */
+ while (e->y0 <= scan_y_bottom) {
+ if (e->y0 != e->y1) {
+ struct nk_tt__active_edge *z = nk_tt__new_active(&hh, e, off_x, scan_y_top);
+ if (z != 0) {
+ NK_ASSERT(z->ey >= scan_y_top);
+ /* insert at front */
+ z->next = active;
+ active = z;
+ }
+ }
+ ++e;
+ }
+
+ /* now process all active edges */
+ if (active)
+ nk_tt__fill_active_edges_new(scanline, scanline2+1, result->w, active, scan_y_top);
+
+ {
+ float sum = 0;
+ for (i=0; i < result->w; ++i) {
+ float k;
+ int m;
+ sum += scanline2[i];
+ k = scanline[i] + sum;
+ k = (float) NK_ABS(k) * 255.0f + 0.5f;
+ m = (int) k;
+ if (m > 255) m = 255;
+ result->pixels[j*result->stride + i] = (unsigned char) m;
+ }
+ }
+ /* advance all the edges */
+ step = &active;
+ while (*step) {
+ struct nk_tt__active_edge *z = *step;
+ z->fx += z->fdx; /* advance to position for current scanline */
+ step = &((*step)->next); /* advance through list */
+ }
+ ++y;
+ ++j;
+ }
+ nk_tt__hheap_cleanup(&hh);
+ if (scanline != scanline_data)
+ alloc->free(alloc->userdata, scanline);
+}
+NK_INTERN void
+nk_tt__sort_edges_ins_sort(struct nk_tt__edge *p, int n)
+{
+ int i,j;
+ #define NK_TT__COMPARE(a,b) ((a)->y0 < (b)->y0)
+ for (i=1; i < n; ++i) {
+ struct nk_tt__edge t = p[i], *a = &t;
+ j = i;
+ while (j > 0) {
+ struct nk_tt__edge *b = &p[j-1];
+ int c = NK_TT__COMPARE(a,b);
+ if (!c) break;
+ p[j] = p[j-1];
+ --j;
+ }
+ if (i != j)
+ p[j] = t;
+ }
+}
+NK_INTERN void
+nk_tt__sort_edges_quicksort(struct nk_tt__edge *p, int n)
+{
+ /* threshold for transitioning to insertion sort */
+ while (n > 12) {
+ struct nk_tt__edge t;
+ int c01,c12,c,m,i,j;
+
+ /* compute median of three */
+ m = n >> 1;
+ c01 = NK_TT__COMPARE(&p[0],&p[m]);
+ c12 = NK_TT__COMPARE(&p[m],&p[n-1]);
+
+ /* if 0 >= mid >= end, or 0 < mid < end, then use mid */
+ if (c01 != c12) {
+ /* otherwise, we'll need to swap something else to middle */
+ int z;
+ c = NK_TT__COMPARE(&p[0],&p[n-1]);
+ /* 0>mid && mid<n: 0>n => n; 0<n => 0 */
+ /* 0<mid && mid>n: 0>n => 0; 0<n => n */
+ z = (c == c12) ? 0 : n-1;
+ t = p[z];
+ p[z] = p[m];
+ p[m] = t;
+ }
+
+ /* now p[m] is the median-of-three */
+ /* swap it to the beginning so it won't move around */
+ t = p[0];
+ p[0] = p[m];
+ p[m] = t;
+
+ /* partition loop */
+ i=1;
+ j=n-1;
+ for(;;) {
+ /* handling of equality is crucial here */
+ /* for sentinels & efficiency with duplicates */
+ for (;;++i) {
+ if (!NK_TT__COMPARE(&p[i], &p[0])) break;
+ }
+ for (;;--j) {
+ if (!NK_TT__COMPARE(&p[0], &p[j])) break;
+ }
+
+ /* make sure we haven't crossed */
+ if (i >= j) break;
+ t = p[i];
+ p[i] = p[j];
+ p[j] = t;
+
+ ++i;
+ --j;
+
+ }
+
+ /* recurse on smaller side, iterate on larger */
+ if (j < (n-i)) {
+ nk_tt__sort_edges_quicksort(p,j);
+ p = p+i;
+ n = n-i;
+ } else {
+ nk_tt__sort_edges_quicksort(p+i, n-i);
+ n = j;
+ }
+ }
+}
+NK_INTERN void
+nk_tt__sort_edges(struct nk_tt__edge *p, int n)
+{
+ nk_tt__sort_edges_quicksort(p, n);
+ nk_tt__sort_edges_ins_sort(p, n);
+}
+NK_INTERN void
+nk_tt__rasterize(struct nk_tt__bitmap *result, struct nk_tt__point *pts,
+ int *wcount, int windings, float scale_x, float scale_y,
+ float shift_x, float shift_y, int off_x, int off_y, int invert,
+ struct nk_allocator *alloc)
+{
+ float y_scale_inv = invert ? -scale_y : scale_y;
+ struct nk_tt__edge *e;
+ int n,i,j,k,m;
+ int vsubsample = 1;
+ /* vsubsample should divide 255 evenly; otherwise we won't reach full opacity */
+
+ /* now we have to blow out the windings into explicit edge lists */
+ n = 0;
+ for (i=0; i < windings; ++i)
+ n += wcount[i];
+
+ e = (struct nk_tt__edge*)
+ alloc->alloc(alloc->userdata, 0,(sizeof(*e) * (nk_size)(n+1)));
+ if (e == 0) return;
+ n = 0;
+
+ m=0;
+ for (i=0; i < windings; ++i)
+ {
+ struct nk_tt__point *p = pts + m;
+ m += wcount[i];
+ j = wcount[i]-1;
+ for (k=0; k < wcount[i]; j=k++) {
+ int a=k,b=j;
+ /* skip the edge if horizontal */
+ if (p[j].y == p[k].y)
+ continue;
+
+ /* add edge from j to k to the list */
+ e[n].invert = 0;
+ if (invert ? p[j].y > p[k].y : p[j].y < p[k].y) {
+ e[n].invert = 1;
+ a=j,b=k;
+ }
+ e[n].x0 = p[a].x * scale_x + shift_x;
+ e[n].y0 = (p[a].y * y_scale_inv + shift_y) * (float)vsubsample;
+ e[n].x1 = p[b].x * scale_x + shift_x;
+ e[n].y1 = (p[b].y * y_scale_inv + shift_y) * (float)vsubsample;
+ ++n;
+ }
+ }
+
+ /* now sort the edges by their highest point (should snap to integer, and then by x) */
+ /*STBTT_sort(e, n, sizeof(e[0]), nk_tt__edge_compare); */
+ nk_tt__sort_edges(e, n);
+ /* now, traverse the scanlines and find the intersections on each scanline, use xor winding rule */
+ nk_tt__rasterize_sorted_edges(result, e, n, vsubsample, off_x, off_y, alloc);
+ alloc->free(alloc->userdata, e);
+}
+NK_INTERN void
+nk_tt__add_point(struct nk_tt__point *points, int n, float x, float y)
+{
+ if (!points) return; /* during first pass, it's unallocated */
+ points[n].x = x;
+ points[n].y = y;
+}
+NK_INTERN int
+nk_tt__tesselate_curve(struct nk_tt__point *points, int *num_points,
+ float x0, float y0, float x1, float y1, float x2, float y2,
+ float objspace_flatness_squared, int n)
+{
+ /* tesselate until threshold p is happy...
+ * @TODO warped to compensate for non-linear stretching */
+ /* midpoint */
+ float mx = (x0 + 2*x1 + x2)/4;
+ float my = (y0 + 2*y1 + y2)/4;
+ /* versus directly drawn line */
+ float dx = (x0+x2)/2 - mx;
+ float dy = (y0+y2)/2 - my;
+ if (n > 16) /* 65536 segments on one curve better be enough! */
+ return 1;
+
+ /* half-pixel error allowed... need to be smaller if AA */
+ if (dx*dx+dy*dy > objspace_flatness_squared) {
+ nk_tt__tesselate_curve(points, num_points, x0,y0,
+ (x0+x1)/2.0f,(y0+y1)/2.0f, mx,my, objspace_flatness_squared,n+1);
+ nk_tt__tesselate_curve(points, num_points, mx,my,
+ (x1+x2)/2.0f,(y1+y2)/2.0f, x2,y2, objspace_flatness_squared,n+1);
+ } else {
+ nk_tt__add_point(points, *num_points,x2,y2);
+ *num_points = *num_points+1;
+ }
+ return 1;
+}
+NK_INTERN struct nk_tt__point*
+nk_tt_FlattenCurves(struct nk_tt_vertex *vertices, int num_verts,
+ float objspace_flatness, int **contour_lengths, int *num_contours,
+ struct nk_allocator *alloc)
+{
+ /* returns number of contours */
+ struct nk_tt__point *points=0;
+ int num_points=0;
+ float objspace_flatness_squared = objspace_flatness * objspace_flatness;
+ int i;
+ int n=0;
+ int start=0;
+ int pass;
+
+ /* count how many "moves" there are to get the contour count */
+ for (i=0; i < num_verts; ++i)
+ if (vertices[i].type == NK_TT_vmove) ++n;
+
+ *num_contours = n;
+ if (n == 0) return 0;
+
+ *contour_lengths = (int *)
+ alloc->alloc(alloc->userdata,0, (sizeof(**contour_lengths) * (nk_size)n));
+ if (*contour_lengths == 0) {
+ *num_contours = 0;
+ return 0;
+ }
+
+ /* make two passes through the points so we don't need to realloc */
+ for (pass=0; pass < 2; ++pass)
+ {
+ float x=0,y=0;
+ if (pass == 1) {
+ points = (struct nk_tt__point *)
+ alloc->alloc(alloc->userdata,0, (nk_size)num_points * sizeof(points[0]));
+ if (points == 0) goto error;
+ }
+ num_points = 0;
+ n= -1;
+
+ for (i=0; i < num_verts; ++i)
+ {
+ switch (vertices[i].type) {
+ case NK_TT_vmove:
+ /* start the next contour */
+ if (n >= 0)
+ (*contour_lengths)[n] = num_points - start;
+ ++n;
+ start = num_points;
+
+ x = vertices[i].x, y = vertices[i].y;
+ nk_tt__add_point(points, num_points++, x,y);
+ break;
+ case NK_TT_vline:
+ x = vertices[i].x, y = vertices[i].y;
+ nk_tt__add_point(points, num_points++, x, y);
+ break;
+ case NK_TT_vcurve:
+ nk_tt__tesselate_curve(points, &num_points, x,y,
+ vertices[i].cx, vertices[i].cy,
+ vertices[i].x, vertices[i].y,
+ objspace_flatness_squared, 0);
+ x = vertices[i].x, y = vertices[i].y;
+ break;
+ default: break;
+ }
+ }
+ (*contour_lengths)[n] = num_points - start;
+ }
+ return points;
+
+error:
+ alloc->free(alloc->userdata, points);
+ alloc->free(alloc->userdata, *contour_lengths);
+ *contour_lengths = 0;
+ *num_contours = 0;
+ return 0;
+}
+NK_INTERN void
+nk_tt_Rasterize(struct nk_tt__bitmap *result, float flatness_in_pixels,
+ struct nk_tt_vertex *vertices, int num_verts,
+ float scale_x, float scale_y, float shift_x, float shift_y,
+ int x_off, int y_off, int invert, struct nk_allocator *alloc)
+{
+ float scale = scale_x > scale_y ? scale_y : scale_x;
+ int winding_count, *winding_lengths;
+ struct nk_tt__point *windings = nk_tt_FlattenCurves(vertices, num_verts,
+ flatness_in_pixels / scale, &winding_lengths, &winding_count, alloc);
+
+ NK_ASSERT(alloc);
+ if (windings) {
+ nk_tt__rasterize(result, windings, winding_lengths, winding_count,
+ scale_x, scale_y, shift_x, shift_y, x_off, y_off, invert, alloc);
+ alloc->free(alloc->userdata, winding_lengths);
+ alloc->free(alloc->userdata, windings);
+ }
+}
+NK_INTERN void
+nk_tt_MakeGlyphBitmapSubpixel(const struct nk_tt_fontinfo *info, unsigned char *output,
+ int out_w, int out_h, int out_stride, float scale_x, float scale_y,
+ float shift_x, float shift_y, int glyph, struct nk_allocator *alloc)
+{
+ int ix0,iy0;
+ struct nk_tt_vertex *vertices;
+ int num_verts = nk_tt_GetGlyphShape(info, alloc, glyph, &vertices);
+ struct nk_tt__bitmap gbm;
+
+ nk_tt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x,
+ shift_y, &ix0,&iy0,0,0);
+ gbm.pixels = output;
+ gbm.w = out_w;
+ gbm.h = out_h;
+ gbm.stride = out_stride;
+
+ if (gbm.w && gbm.h)
+ nk_tt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y,
+ shift_x, shift_y, ix0,iy0, 1, alloc);
+ alloc->free(alloc->userdata, vertices);
+}
+
+/*-------------------------------------------------------------
+ * Bitmap baking
+ * --------------------------------------------------------------*/
+NK_INTERN int
+nk_tt_PackBegin(struct nk_tt_pack_context *spc, unsigned char *pixels,
+ int pw, int ph, int stride_in_bytes, int padding, struct nk_allocator *alloc)
+{
+ int num_nodes = pw - padding;
+ struct nk_rp_context *context = (struct nk_rp_context *)
+ alloc->alloc(alloc->userdata,0, sizeof(*context));
+ struct nk_rp_node *nodes = (struct nk_rp_node*)
+ alloc->alloc(alloc->userdata,0, (sizeof(*nodes ) * (nk_size)num_nodes));
+
+ if (context == 0 || nodes == 0) {
+ if (context != 0) alloc->free(alloc->userdata, context);
+ if (nodes != 0) alloc->free(alloc->userdata, nodes);
+ return 0;
+ }
+
+ spc->width = pw;
+ spc->height = ph;
+ spc->pixels = pixels;
+ spc->pack_info = context;
+ spc->nodes = nodes;
+ spc->padding = padding;
+ spc->stride_in_bytes = (stride_in_bytes != 0) ? stride_in_bytes : pw;
+ spc->h_oversample = 1;
+ spc->v_oversample = 1;
+
+ nk_rp_init_target(context, pw-padding, ph-padding, nodes, num_nodes);
+ if (pixels)
+ NK_MEMSET(pixels, 0, (nk_size)(pw*ph)); /* background of 0 around pixels */
+ return 1;
+}
+NK_INTERN void
+nk_tt_PackEnd(struct nk_tt_pack_context *spc, struct nk_allocator *alloc)
+{
+ alloc->free(alloc->userdata, spc->nodes);
+ alloc->free(alloc->userdata, spc->pack_info);
+}
+NK_INTERN void
+nk_tt_PackSetOversampling(struct nk_tt_pack_context *spc,
+ unsigned int h_oversample, unsigned int v_oversample)
+{
+ NK_ASSERT(h_oversample <= NK_TT_MAX_OVERSAMPLE);
+ NK_ASSERT(v_oversample <= NK_TT_MAX_OVERSAMPLE);
+ if (h_oversample <= NK_TT_MAX_OVERSAMPLE)
+ spc->h_oversample = h_oversample;
+ if (v_oversample <= NK_TT_MAX_OVERSAMPLE)
+ spc->v_oversample = v_oversample;
+}
+NK_INTERN void
+nk_tt__h_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes,
+ int kernel_width)
+{
+ unsigned char buffer[NK_TT_MAX_OVERSAMPLE];
+ int safe_w = w - kernel_width;
+ int j;
+
+ for (j=0; j < h; ++j)
+ {
+ int i;
+ unsigned int total;
+ NK_MEMSET(buffer, 0, (nk_size)kernel_width);
+
+ total = 0;
+
+ /* make kernel_width a constant in common cases so compiler can optimize out the divide */
+ switch (kernel_width) {
+ case 2:
+ for (i=0; i <= safe_w; ++i) {
+ total += (unsigned int)(pixels[i] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i];
+ pixels[i] = (unsigned char) (total / 2);
+ }
+ break;
+ case 3:
+ for (i=0; i <= safe_w; ++i) {
+ total += (unsigned int)(pixels[i] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i];
+ pixels[i] = (unsigned char) (total / 3);
+ }
+ break;
+ case 4:
+ for (i=0; i <= safe_w; ++i) {
+ total += (unsigned int)pixels[i] - buffer[i & NK_TT__OVER_MASK];
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i];
+ pixels[i] = (unsigned char) (total / 4);
+ }
+ break;
+ case 5:
+ for (i=0; i <= safe_w; ++i) {
+ total += (unsigned int)(pixels[i] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i];
+ pixels[i] = (unsigned char) (total / 5);
+ }
+ break;
+ default:
+ for (i=0; i <= safe_w; ++i) {
+ total += (unsigned int)(pixels[i] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i];
+ pixels[i] = (unsigned char) (total / (unsigned int)kernel_width);
+ }
+ break;
+ }
+
+ for (; i < w; ++i) {
+ NK_ASSERT(pixels[i] == 0);
+ total -= (unsigned int)(buffer[i & NK_TT__OVER_MASK]);
+ pixels[i] = (unsigned char) (total / (unsigned int)kernel_width);
+ }
+ pixels += stride_in_bytes;
+ }
+}
+NK_INTERN void
+nk_tt__v_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes,
+ int kernel_width)
+{
+ unsigned char buffer[NK_TT_MAX_OVERSAMPLE];
+ int safe_h = h - kernel_width;
+ int j;
+
+ for (j=0; j < w; ++j)
+ {
+ int i;
+ unsigned int total;
+ NK_MEMSET(buffer, 0, (nk_size)kernel_width);
+
+ total = 0;
+
+ /* make kernel_width a constant in common cases so compiler can optimize out the divide */
+ switch (kernel_width) {
+ case 2:
+ for (i=0; i <= safe_h; ++i) {
+ total += (unsigned int)(pixels[i*stride_in_bytes] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i*stride_in_bytes];
+ pixels[i*stride_in_bytes] = (unsigned char) (total / 2);
+ }
+ break;
+ case 3:
+ for (i=0; i <= safe_h; ++i) {
+ total += (unsigned int)(pixels[i*stride_in_bytes] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i*stride_in_bytes];
+ pixels[i*stride_in_bytes] = (unsigned char) (total / 3);
+ }
+ break;
+ case 4:
+ for (i=0; i <= safe_h; ++i) {
+ total += (unsigned int)(pixels[i*stride_in_bytes] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i*stride_in_bytes];
+ pixels[i*stride_in_bytes] = (unsigned char) (total / 4);
+ }
+ break;
+ case 5:
+ for (i=0; i <= safe_h; ++i) {
+ total += (unsigned int)(pixels[i*stride_in_bytes] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i*stride_in_bytes];
+ pixels[i*stride_in_bytes] = (unsigned char) (total / 5);
+ }
+ break;
+ default:
+ for (i=0; i <= safe_h; ++i) {
+ total += (unsigned int)(pixels[i*stride_in_bytes] - buffer[i & NK_TT__OVER_MASK]);
+ buffer[(i+kernel_width) & NK_TT__OVER_MASK] = pixels[i*stride_in_bytes];
+ pixels[i*stride_in_bytes] = (unsigned char) (total / (unsigned int)kernel_width);
+ }
+ break;
+ }
+
+ for (; i < h; ++i) {
+ NK_ASSERT(pixels[i*stride_in_bytes] == 0);
+ total -= (unsigned int)(buffer[i & NK_TT__OVER_MASK]);
+ pixels[i*stride_in_bytes] = (unsigned char) (total / (unsigned int)kernel_width);
+ }
+ pixels += 1;
+ }
+}
+NK_INTERN float
+nk_tt__oversample_shift(int oversample)
+{
+ if (!oversample)
+ return 0.0f;
+
+ /* The prefilter is a box filter of width "oversample", */
+ /* which shifts phase by (oversample - 1)/2 pixels in */
+ /* oversampled space. We want to shift in the opposite */
+ /* direction to counter this. */
+ return (float)-(oversample - 1) / (2.0f * (float)oversample);
+}
+NK_INTERN int
+nk_tt_PackFontRangesGatherRects(struct nk_tt_pack_context *spc,
+ struct nk_tt_fontinfo *info, struct nk_tt_pack_range *ranges,
+ int num_ranges, struct nk_rp_rect *rects)
+{
+ /* rects array must be big enough to accommodate all characters in the given ranges */
+ int i,j,k;
+ k = 0;
+
+ for (i=0; i < num_ranges; ++i) {
+ float fh = ranges[i].font_size;
+ float scale = (fh > 0) ? nk_tt_ScaleForPixelHeight(info, fh):
+ nk_tt_ScaleForMappingEmToPixels(info, -fh);
+ ranges[i].h_oversample = (unsigned char) spc->h_oversample;
+ ranges[i].v_oversample = (unsigned char) spc->v_oversample;
+ for (j=0; j < ranges[i].num_chars; ++j) {
+ int x0,y0,x1,y1;
+ int codepoint = ranges[i].first_unicode_codepoint_in_range ?
+ ranges[i].first_unicode_codepoint_in_range + j :
+ ranges[i].array_of_unicode_codepoints[j];
+
+ int glyph = nk_tt_FindGlyphIndex(info, codepoint);
+ nk_tt_GetGlyphBitmapBoxSubpixel(info,glyph, scale * (float)spc->h_oversample,
+ scale * (float)spc->v_oversample, 0,0, &x0,&y0,&x1,&y1);
+ rects[k].w = (nk_rp_coord) (x1-x0 + spc->padding + (int)spc->h_oversample-1);
+ rects[k].h = (nk_rp_coord) (y1-y0 + spc->padding + (int)spc->v_oversample-1);
+ ++k;
+ }
+ }
+ return k;
+}
+NK_INTERN int
+nk_tt_PackFontRangesRenderIntoRects(struct nk_tt_pack_context *spc,
+ struct nk_tt_fontinfo *info, struct nk_tt_pack_range *ranges,
+ int num_ranges, struct nk_rp_rect *rects, struct nk_allocator *alloc)
+{
+ int i,j,k, return_value = 1;
+ /* save current values */
+ int old_h_over = (int)spc->h_oversample;
+ int old_v_over = (int)spc->v_oversample;
+ /* rects array must be big enough to accommodate all characters in the given ranges */
+
+ k = 0;
+ for (i=0; i < num_ranges; ++i)
+ {
+ float fh = ranges[i].font_size;
+ float recip_h,recip_v,sub_x,sub_y;
+ float scale = fh > 0 ? nk_tt_ScaleForPixelHeight(info, fh):
+ nk_tt_ScaleForMappingEmToPixels(info, -fh);
+
+ spc->h_oversample = ranges[i].h_oversample;
+ spc->v_oversample = ranges[i].v_oversample;
+
+ recip_h = 1.0f / (float)spc->h_oversample;
+ recip_v = 1.0f / (float)spc->v_oversample;
+
+ sub_x = nk_tt__oversample_shift((int)spc->h_oversample);
+ sub_y = nk_tt__oversample_shift((int)spc->v_oversample);
+
+ for (j=0; j < ranges[i].num_chars; ++j)
+ {
+ struct nk_rp_rect *r = &rects[k];
+ if (r->was_packed)
+ {
+ struct nk_tt_packedchar *bc = &ranges[i].chardata_for_range[j];
+ int advance, lsb, x0,y0,x1,y1;
+ int codepoint = ranges[i].first_unicode_codepoint_in_range ?
+ ranges[i].first_unicode_codepoint_in_range + j :
+ ranges[i].array_of_unicode_codepoints[j];
+ int glyph = nk_tt_FindGlyphIndex(info, codepoint);
+ nk_rp_coord pad = (nk_rp_coord) spc->padding;
+
+ /* pad on left and top */
+ r->x = (nk_rp_coord)((int)r->x + (int)pad);
+ r->y = (nk_rp_coord)((int)r->y + (int)pad);
+ r->w = (nk_rp_coord)((int)r->w - (int)pad);
+ r->h = (nk_rp_coord)((int)r->h - (int)pad);
+
+ nk_tt_GetGlyphHMetrics(info, glyph, &advance, &lsb);
+ nk_tt_GetGlyphBitmapBox(info, glyph, scale * (float)spc->h_oversample,
+ (scale * (float)spc->v_oversample), &x0,&y0,&x1,&y1);
+ nk_tt_MakeGlyphBitmapSubpixel(info, spc->pixels + r->x + r->y*spc->stride_in_bytes,
+ (int)(r->w - spc->h_oversample+1), (int)(r->h - spc->v_oversample+1),
+ spc->stride_in_bytes, scale * (float)spc->h_oversample,
+ scale * (float)spc->v_oversample, 0,0, glyph, alloc);
+
+ if (spc->h_oversample > 1)
+ nk_tt__h_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes,
+ r->w, r->h, spc->stride_in_bytes, (int)spc->h_oversample);
+
+ if (spc->v_oversample > 1)
+ nk_tt__v_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes,
+ r->w, r->h, spc->stride_in_bytes, (int)spc->v_oversample);
+
+ bc->x0 = (nk_ushort) r->x;
+ bc->y0 = (nk_ushort) r->y;
+ bc->x1 = (nk_ushort) (r->x + r->w);
+ bc->y1 = (nk_ushort) (r->y + r->h);
+ bc->xadvance = scale * (float)advance;
+ bc->xoff = (float) x0 * recip_h + sub_x;
+ bc->yoff = (float) y0 * recip_v + sub_y;
+ bc->xoff2 = ((float)x0 + r->w) * recip_h + sub_x;
+ bc->yoff2 = ((float)y0 + r->h) * recip_v + sub_y;
+ } else {
+ return_value = 0; /* if any fail, report failure */
+ }
+ ++k;
+ }
+ }
+ /* restore original values */
+ spc->h_oversample = (unsigned int)old_h_over;
+ spc->v_oversample = (unsigned int)old_v_over;
+ return return_value;
+}
+NK_INTERN void
+nk_tt_GetPackedQuad(struct nk_tt_packedchar *chardata, int pw, int ph,
+ int char_index, float *xpos, float *ypos, struct nk_tt_aligned_quad *q,
+ int align_to_integer)
+{
+ float ipw = 1.0f / (float)pw, iph = 1.0f / (float)ph;
+ struct nk_tt_packedchar *b = (struct nk_tt_packedchar*)(chardata + char_index);
+ if (align_to_integer) {
+ int tx = nk_ifloorf((*xpos + b->xoff) + 0.5f);
+ int ty = nk_ifloorf((*ypos + b->yoff) + 0.5f);
+
+ float x = (float)tx;
+ float y = (float)ty;
+
+ q->x0 = x;
+ q->y0 = y;
+ q->x1 = x + b->xoff2 - b->xoff;
+ q->y1 = y + b->yoff2 - b->yoff;
+ } else {
+ q->x0 = *xpos + b->xoff;
+ q->y0 = *ypos + b->yoff;
+ q->x1 = *xpos + b->xoff2;
+ q->y1 = *ypos + b->yoff2;
+ }
+ q->s0 = b->x0 * ipw;
+ q->t0 = b->y0 * iph;
+ q->s1 = b->x1 * ipw;
+ q->t1 = b->y1 * iph;
+ *xpos += b->xadvance;
+}
+
+/* -------------------------------------------------------------
+ *
+ * FONT BAKING
+ *
+ * --------------------------------------------------------------*/
+struct nk_font_bake_data {
+ struct nk_tt_fontinfo info;
+ struct nk_rp_rect *rects;
+ struct nk_tt_pack_range *ranges;
+ nk_rune range_count;
+};
+
+struct nk_font_baker {
+ struct nk_allocator alloc;
+ struct nk_tt_pack_context spc;
+ struct nk_font_bake_data *build;
+ struct nk_tt_packedchar *packed_chars;
+ struct nk_rp_rect *rects;
+ struct nk_tt_pack_range *ranges;
+};
+
+NK_GLOBAL const nk_size nk_rect_align = NK_ALIGNOF(struct nk_rp_rect);
+NK_GLOBAL const nk_size nk_range_align = NK_ALIGNOF(struct nk_tt_pack_range);
+NK_GLOBAL const nk_size nk_char_align = NK_ALIGNOF(struct nk_tt_packedchar);
+NK_GLOBAL const nk_size nk_build_align = NK_ALIGNOF(struct nk_font_bake_data);
+NK_GLOBAL const nk_size nk_baker_align = NK_ALIGNOF(struct nk_font_baker);
+
+NK_INTERN int
+nk_range_count(const nk_rune *range)
+{
+ const nk_rune *iter = range;
+ NK_ASSERT(range);
+ if (!range) return 0;
+ while (*(iter++) != 0);
+ return (iter == range) ? 0 : (int)((iter - range)/2);
+}
+NK_INTERN int
+nk_range_glyph_count(const nk_rune *range, int count)
+{
+ int i = 0;
+ int total_glyphs = 0;
+ for (i = 0; i < count; ++i) {
+ int diff;
+ nk_rune f = range[(i*2)+0];
+ nk_rune t = range[(i*2)+1];
+ NK_ASSERT(t >= f);
+ diff = (int)((t - f) + 1);
+ total_glyphs += diff;
+ }
+ return total_glyphs;
+}
+NK_API const nk_rune*
+nk_font_default_glyph_ranges(void)
+{
+ NK_STORAGE const nk_rune ranges[] = {0x0020, 0x00FF, 0};
+ return ranges;
+}
+NK_API const nk_rune*
+nk_font_chinese_glyph_ranges(void)
+{
+ NK_STORAGE const nk_rune ranges[] = {
+ 0x0020, 0x00FF,
+ 0x3000, 0x30FF,
+ 0x31F0, 0x31FF,
+ 0xFF00, 0xFFEF,
+ 0x4e00, 0x9FAF,
+ 0
+ };
+ return ranges;
+}
+NK_API const nk_rune*
+nk_font_cyrillic_glyph_ranges(void)
+{
+ NK_STORAGE const nk_rune ranges[] = {
+ 0x0020, 0x00FF,
+ 0x0400, 0x052F,
+ 0x2DE0, 0x2DFF,
+ 0xA640, 0xA69F,
+ 0
+ };
+ return ranges;
+}
+NK_API const nk_rune*
+nk_font_korean_glyph_ranges(void)
+{
+ NK_STORAGE const nk_rune ranges[] = {
+ 0x0020, 0x00FF,
+ 0x3131, 0x3163,
+ 0xAC00, 0xD79D,
+ 0
+ };
+ return ranges;
+}
+NK_INTERN void
+nk_font_baker_memory(nk_size *temp, int *glyph_count,
+ struct nk_font_config *config_list, int count)
+{
+ int range_count = 0;
+ int total_range_count = 0;
+ struct nk_font_config *iter, *i;
+
+ NK_ASSERT(config_list);
+ NK_ASSERT(glyph_count);
+ if (!config_list) {
+ *temp = 0;
+ *glyph_count = 0;
+ return;
+ }
+ *glyph_count = 0;
+ for (iter = config_list; iter; iter = iter->next) {
+ i = iter;
+ do {if (!i->range) iter->range = nk_font_default_glyph_ranges();
+ range_count = nk_range_count(i->range);
+ total_range_count += range_count;
+ *glyph_count += nk_range_glyph_count(i->range, range_count);
+ } while ((i = i->n) != iter);
+ }
+ *temp = (nk_size)*glyph_count * sizeof(struct nk_rp_rect);
+ *temp += (nk_size)total_range_count * sizeof(struct nk_tt_pack_range);
+ *temp += (nk_size)*glyph_count * sizeof(struct nk_tt_packedchar);
+ *temp += (nk_size)count * sizeof(struct nk_font_bake_data);
+ *temp += sizeof(struct nk_font_baker);
+ *temp += nk_rect_align + nk_range_align + nk_char_align;
+ *temp += nk_build_align + nk_baker_align;
+}
+NK_INTERN struct nk_font_baker*
+nk_font_baker(void *memory, int glyph_count, int count, struct nk_allocator *alloc)
+{
+ struct nk_font_baker *baker;
+ if (!memory) return 0;
+ /* setup baker inside a memory block */
+ baker = (struct nk_font_baker*)NK_ALIGN_PTR(memory, nk_baker_align);
+ baker->build = (struct nk_font_bake_data*)NK_ALIGN_PTR((baker + 1), nk_build_align);
+ baker->packed_chars = (struct nk_tt_packedchar*)NK_ALIGN_PTR((baker->build + count), nk_char_align);
+ baker->rects = (struct nk_rp_rect*)NK_ALIGN_PTR((baker->packed_chars + glyph_count), nk_rect_align);
+ baker->ranges = (struct nk_tt_pack_range*)NK_ALIGN_PTR((baker->rects + glyph_count), nk_range_align);
+ baker->alloc = *alloc;
+ return baker;
+}
+NK_INTERN int
+nk_font_bake_pack(struct nk_font_baker *baker,
+ nk_size *image_memory, int *width, int *height, struct nk_recti *custom,
+ const struct nk_font_config *config_list, int count,
+ struct nk_allocator *alloc)
+{
+ NK_STORAGE const nk_size max_height = 1024 * 32;
+ const struct nk_font_config *config_iter, *it;
+ int total_glyph_count = 0;
+ int total_range_count = 0;
+ int range_count = 0;
+ int i = 0;
+
+ NK_ASSERT(image_memory);
+ NK_ASSERT(width);
+ NK_ASSERT(height);
+ NK_ASSERT(config_list);
+ NK_ASSERT(count);
+ NK_ASSERT(alloc);
+
+ if (!image_memory || !width || !height || !config_list || !count) return nk_false;
+ for (config_iter = config_list; config_iter; config_iter = config_iter->next) {
+ it = config_iter;
+ do {range_count = nk_range_count(it->range);
+ total_range_count += range_count;
+ total_glyph_count += nk_range_glyph_count(it->range, range_count);
+ } while ((it = it->n) != config_iter);
+ }
+ /* setup font baker from temporary memory */
+ for (config_iter = config_list; config_iter; config_iter = config_iter->next) {
+ it = config_iter;
+ do {if (!nk_tt_InitFont(&baker->build[i++].info, (const unsigned char*)it->ttf_blob, 0))
+ return nk_false;
+ } while ((it = it->n) != config_iter);
+ }
+ *height = 0;
+ *width = (total_glyph_count > 1000) ? 1024 : 512;
+ nk_tt_PackBegin(&baker->spc, 0, (int)*width, (int)max_height, 0, 1, alloc);
+ {
+ int input_i = 0;
+ int range_n = 0;
+ int rect_n = 0;
+ int char_n = 0;
+
+ if (custom) {
+ /* pack custom user data first so it will be in the upper left corner*/
+ struct nk_rp_rect custom_space;
+ nk_zero(&custom_space, sizeof(custom_space));
+ custom_space.w = (nk_rp_coord)(custom->w);
+ custom_space.h = (nk_rp_coord)(custom->h);
+
+ nk_tt_PackSetOversampling(&baker->spc, 1, 1);
+ nk_rp_pack_rects((struct nk_rp_context*)baker->spc.pack_info, &custom_space, 1);
+ *height = NK_MAX(*height, (int)(custom_space.y + custom_space.h));
+
+ custom->x = (short)custom_space.x;
+ custom->y = (short)custom_space.y;
+ custom->w = (short)custom_space.w;
+ custom->h = (short)custom_space.h;
+ }
+
+ /* first font pass: pack all glyphs */
+ for (input_i = 0, config_iter = config_list; input_i < count && config_iter;
+ config_iter = config_iter->next) {
+ it = config_iter;
+ do {int n = 0;
+ int glyph_count;
+ const nk_rune *in_range;
+ const struct nk_font_config *cfg = it;
+ struct nk_font_bake_data *tmp = &baker->build[input_i++];
+
+ /* count glyphs + ranges in current font */
+ glyph_count = 0; range_count = 0;
+ for (in_range = cfg->range; in_range[0] && in_range[1]; in_range += 2) {
+ glyph_count += (int)(in_range[1] - in_range[0]) + 1;
+ range_count++;
+ }
+
+ /* setup ranges */
+ tmp->ranges = baker->ranges + range_n;
+ tmp->range_count = (nk_rune)range_count;
+ range_n += range_count;
+ for (i = 0; i < range_count; ++i) {
+ in_range = &cfg->range[i * 2];
+ tmp->ranges[i].font_size = cfg->size;
+ tmp->ranges[i].first_unicode_codepoint_in_range = (int)in_range[0];
+ tmp->ranges[i].num_chars = (int)(in_range[1]- in_range[0]) + 1;
+ tmp->ranges[i].chardata_for_range = baker->packed_chars + char_n;
+ char_n += tmp->ranges[i].num_chars;
+ }
+
+ /* pack */
+ tmp->rects = baker->rects + rect_n;
+ rect_n += glyph_count;
+ nk_tt_PackSetOversampling(&baker->spc, cfg->oversample_h, cfg->oversample_v);
+ n = nk_tt_PackFontRangesGatherRects(&baker->spc, &tmp->info,
+ tmp->ranges, (int)tmp->range_count, tmp->rects);
+ nk_rp_pack_rects((struct nk_rp_context*)baker->spc.pack_info, tmp->rects, (int)n);
+
+ /* texture height */
+ for (i = 0; i < n; ++i) {
+ if (tmp->rects[i].was_packed)
+ *height = NK_MAX(*height, tmp->rects[i].y + tmp->rects[i].h);
+ }
+ } while ((it = it->n) != config_iter);
+ }
+ NK_ASSERT(rect_n == total_glyph_count);
+ NK_ASSERT(char_n == total_glyph_count);
+ NK_ASSERT(range_n == total_range_count);
+ }
+ *height = (int)nk_round_up_pow2((nk_uint)*height);
+ *image_memory = (nk_size)(*width) * (nk_size)(*height);
+ return nk_true;
+}
+NK_INTERN void
+nk_font_bake(struct nk_font_baker *baker, void *image_memory, int width, int height,
+ struct nk_font_glyph *glyphs, int glyphs_count,
+ const struct nk_font_config *config_list, int font_count)
+{
+ int input_i = 0;
+ nk_rune glyph_n = 0;
+ const struct nk_font_config *config_iter;
+ const struct nk_font_config *it;
+
+ NK_ASSERT(image_memory);
+ NK_ASSERT(width);
+ NK_ASSERT(height);
+ NK_ASSERT(config_list);
+ NK_ASSERT(baker);
+ NK_ASSERT(font_count);
+ NK_ASSERT(glyphs_count);
+ if (!image_memory || !width || !height || !config_list ||
+ !font_count || !glyphs || !glyphs_count)
+ return;
+
+ /* second font pass: render glyphs */
+ nk_zero(image_memory, (nk_size)((nk_size)width * (nk_size)height));
+ baker->spc.pixels = (unsigned char*)image_memory;
+ baker->spc.height = (int)height;
+ for (input_i = 0, config_iter = config_list; input_i < font_count && config_iter;
+ config_iter = config_iter->next) {
+ it = config_iter;
+ do {const struct nk_font_config *cfg = it;
+ struct nk_font_bake_data *tmp = &baker->build[input_i++];
+ nk_tt_PackSetOversampling(&baker->spc, cfg->oversample_h, cfg->oversample_v);
+ nk_tt_PackFontRangesRenderIntoRects(&baker->spc, &tmp->info, tmp->ranges,
+ (int)tmp->range_count, tmp->rects, &baker->alloc);
+ } while ((it = it->n) != config_iter);
+ } nk_tt_PackEnd(&baker->spc, &baker->alloc);
+
+ /* third pass: setup font and glyphs */
+ for (input_i = 0, config_iter = config_list; input_i < font_count && config_iter;
+ config_iter = config_iter->next) {
+ it = config_iter;
+ do {nk_size i = 0;
+ int char_idx = 0;
+ nk_rune glyph_count = 0;
+ const struct nk_font_config *cfg = it;
+ struct nk_font_bake_data *tmp = &baker->build[input_i++];
+ struct nk_baked_font *dst_font = cfg->font;
+
+ float font_scale = nk_tt_ScaleForPixelHeight(&tmp->info, cfg->size);
+ int unscaled_ascent, unscaled_descent, unscaled_line_gap;
+ nk_tt_GetFontVMetrics(&tmp->info, &unscaled_ascent, &unscaled_descent,
+ &unscaled_line_gap);
+
+ /* fill baked font */
+ if (!cfg->merge_mode) {
+ dst_font->ranges = cfg->range;
+ dst_font->height = cfg->size;
+ dst_font->ascent = ((float)unscaled_ascent * font_scale);
+ dst_font->descent = ((float)unscaled_descent * font_scale);
+ dst_font->glyph_offset = glyph_n;
+ // Need to zero this, or it will carry over from a previous
+ // bake, and cause a segfault when accessing glyphs[].
+ dst_font->glyph_count = 0;
+ }
+
+ /* fill own baked font glyph array */
+ for (i = 0; i < tmp->range_count; ++i) {
+ struct nk_tt_pack_range *range = &tmp->ranges[i];
+ for (char_idx = 0; char_idx < range->num_chars; char_idx++)
+ {
+ nk_rune codepoint = 0;
+ float dummy_x = 0, dummy_y = 0;
+ struct nk_tt_aligned_quad q;
+ struct nk_font_glyph *glyph;
+
+ /* query glyph bounds from stb_truetype */
+ const struct nk_tt_packedchar *pc = &range->chardata_for_range[char_idx];
+ if (!pc->x0 && !pc->x1 && !pc->y0 && !pc->y1) continue;
+ codepoint = (nk_rune)(range->first_unicode_codepoint_in_range + char_idx);
+ nk_tt_GetPackedQuad(range->chardata_for_range, (int)width,
+ (int)height, char_idx, &dummy_x, &dummy_y, &q, 0);
+
+ /* fill own glyph type with data */
+ glyph = &glyphs[dst_font->glyph_offset + dst_font->glyph_count + (unsigned int)glyph_count];
+ glyph->codepoint = codepoint;
+ glyph->x0 = q.x0; glyph->y0 = q.y0;
+ glyph->x1 = q.x1; glyph->y1 = q.y1;
+ glyph->y0 += (dst_font->ascent + 0.5f);
+ glyph->y1 += (dst_font->ascent + 0.5f);
+ glyph->w = glyph->x1 - glyph->x0 + 0.5f;
+ glyph->h = glyph->y1 - glyph->y0;
+
+ if (cfg->coord_type == NK_COORD_PIXEL) {
+ glyph->u0 = q.s0 * (float)width;
+ glyph->v0 = q.t0 * (float)height;
+ glyph->u1 = q.s1 * (float)width;
+ glyph->v1 = q.t1 * (float)height;
+ } else {
+ glyph->u0 = q.s0;
+ glyph->v0 = q.t0;
+ glyph->u1 = q.s1;
+ glyph->v1 = q.t1;
+ }
+ glyph->xadvance = (pc->xadvance + cfg->spacing.x);
+ if (cfg->pixel_snap)
+ glyph->xadvance = (float)(int)(glyph->xadvance + 0.5f);
+ glyph_count++;
+ }
+ }
+ dst_font->glyph_count += glyph_count;
+ glyph_n += glyph_count;
+ } while ((it = it->n) != config_iter);
+ }
+}
+NK_INTERN void
+nk_font_bake_custom_data(void *img_memory, int img_width, int img_height,
+ struct nk_recti img_dst, const char *texture_data_mask, int tex_width,
+ int tex_height, char white, char black)
+{
+ nk_byte *pixels;
+ int y = 0;
+ int x = 0;
+ int n = 0;
+
+ NK_ASSERT(img_memory);
+ NK_ASSERT(img_width);
+ NK_ASSERT(img_height);
+ NK_ASSERT(texture_data_mask);
+ NK_UNUSED(tex_height);
+ if (!img_memory || !img_width || !img_height || !texture_data_mask)
+ return;
+
+ pixels = (nk_byte*)img_memory;
+ for (y = 0, n = 0; y < tex_height; ++y) {
+ for (x = 0; x < tex_width; ++x, ++n) {
+ const int off0 = ((img_dst.x + x) + (img_dst.y + y) * img_width);
+ const int off1 = off0 + 1 + tex_width;
+ pixels[off0] = (texture_data_mask[n] == white) ? 0xFF : 0x00;
+ pixels[off1] = (texture_data_mask[n] == black) ? 0xFF : 0x00;
+ }
+ }
+}
+NK_INTERN void
+nk_font_bake_convert(void *out_memory, int img_width, int img_height,
+ const void *in_memory)
+{
+ int n = 0;
+ nk_rune *dst;
+ const nk_byte *src;
+
+ NK_ASSERT(out_memory);
+ NK_ASSERT(in_memory);
+ NK_ASSERT(img_width);
+ NK_ASSERT(img_height);
+ if (!out_memory || !in_memory || !img_height || !img_width) return;
+
+ dst = (nk_rune*)out_memory;
+ src = (const nk_byte*)in_memory;
+ for (n = (int)(img_width * img_height); n > 0; n--)
+ *dst++ = ((nk_rune)(*src++) << 24) | 0x00FFFFFF;
+}
+
+/* -------------------------------------------------------------
+ *
+ * FONT
+ *
+ * --------------------------------------------------------------*/
+NK_INTERN float
+nk_font_text_width(nk_handle handle, float height, const char *text, int len)
+{
+ nk_rune unicode;
+ int text_len = 0;
+ float text_width = 0;
+ int glyph_len = 0;
+ float scale = 0;
+
+ struct nk_font *font = (struct nk_font*)handle.ptr;
+ NK_ASSERT(font);
+ NK_ASSERT(font->glyphs);
+ if (!font || !text || !len)
+ return 0;
+
+ scale = height/font->info.height;
+ glyph_len = text_len = nk_utf_decode(text, &unicode, (int)len);
+ if (!glyph_len) return 0;
+ while (text_len <= (int)len && glyph_len) {
+ const struct nk_font_glyph *g;
+ if (unicode == NK_UTF_INVALID) break;
+
+ /* query currently drawn glyph information */
+ g = nk_font_find_glyph(font, unicode);
+ text_width += g->xadvance * scale;
+
+ /* offset next glyph */
+ glyph_len = nk_utf_decode(text + text_len, &unicode, (int)len - text_len);
+ text_len += glyph_len;
+ }
+ return text_width;
+}
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+NK_INTERN void
+nk_font_query_font_glyph(nk_handle handle, float height,
+ struct nk_user_font_glyph *glyph, nk_rune codepoint, nk_rune next_codepoint)
+{
+ float scale;
+ const struct nk_font_glyph *g;
+ struct nk_font *font;
+
+ NK_ASSERT(glyph);
+ NK_UNUSED(next_codepoint);
+
+ font = (struct nk_font*)handle.ptr;
+ NK_ASSERT(font);
+ NK_ASSERT(font->glyphs);
+ if (!font || !glyph)
+ return;
+
+ scale = height/font->info.height;
+ g = nk_font_find_glyph(font, codepoint);
+ glyph->width = (g->x1 - g->x0) * scale;
+ glyph->height = (g->y1 - g->y0) * scale;
+ glyph->offset = nk_vec2(g->x0 * scale, g->y0 * scale);
+ glyph->xadvance = (g->xadvance * scale);
+ glyph->uv[0] = nk_vec2(g->u0, g->v0);
+ glyph->uv[1] = nk_vec2(g->u1, g->v1);
+}
+#endif
+NK_API const struct nk_font_glyph*
+nk_font_find_glyph(struct nk_font *font, nk_rune unicode)
+{
+ int i = 0;
+ int count;
+ int total_glyphs = 0;
+ const struct nk_font_glyph *glyph = 0;
+ const struct nk_font_config *iter = 0;
+
+ NK_ASSERT(font);
+ NK_ASSERT(font->glyphs);
+ NK_ASSERT(font->info.ranges);
+ if (!font || !font->glyphs) return 0;
+
+ glyph = font->fallback;
+ iter = font->config;
+ do {count = nk_range_count(iter->range);
+ for (i = 0; i < count; ++i) {
+ nk_rune f = iter->range[(i*2)+0];
+ nk_rune t = iter->range[(i*2)+1];
+ int diff = (int)((t - f) + 1);
+ if (unicode >= f && unicode <= t)
+ return &font->glyphs[((nk_rune)total_glyphs + (unicode - f))];
+ total_glyphs += diff;
+ }
+ } while ((iter = iter->n) != font->config);
+ return glyph;
+}
+NK_INTERN void
+nk_font_init(struct nk_font *font, float pixel_height,
+ nk_rune fallback_codepoint, struct nk_font_glyph *glyphs,
+ const struct nk_baked_font *baked_font, nk_handle atlas)
+{
+ struct nk_baked_font baked;
+ NK_ASSERT(font);
+ NK_ASSERT(glyphs);
+ NK_ASSERT(baked_font);
+ if (!font || !glyphs || !baked_font)
+ return;
+
+ baked = *baked_font;
+ font->fallback = 0;
+ font->info = baked;
+ font->scale = (float)pixel_height / (float)font->info.height;
+ font->glyphs = &glyphs[baked_font->glyph_offset];
+ font->texture = atlas;
+ font->fallback_codepoint = fallback_codepoint;
+ font->fallback = nk_font_find_glyph(font, fallback_codepoint);
+
+ font->handle.height = font->info.height * font->scale;
+ font->handle.width = nk_font_text_width;
+ font->handle.userdata.ptr = font;
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+ font->handle.query = nk_font_query_font_glyph;
+ font->handle.texture = font->texture;
+#endif
+}
+
+/* ---------------------------------------------------------------------------
+ *
+ * DEFAULT FONT
+ *
+ * ProggyClean.ttf
+ * Copyright (c) 2004, 2005 Tristan Grimmer
+ * MIT license (see License.txt in http://www.upperbounds.net/download/ProggyClean.ttf.zip)
+ * Download and more information at http://upperbounds.net
+ *-----------------------------------------------------------------------------*/
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Woverlength-strings"
+#elif defined(__GNUC__) || defined(__GNUG__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverlength-strings"
+#endif
+
+#ifdef NK_INCLUDE_DEFAULT_FONT
+
+NK_GLOBAL const char nk_proggy_clean_ttf_compressed_data_base85[11980+1] =
+ "7])#######hV0qs'/###[),##/l:$#Q6>##5[n42>c-TH`->>#/e>11NNV=Bv(*:.F?uu#(gRU.o0XGH`$vhLG1hxt9?W`#,5LsCp#-i>.r$<$6pD>Lb';9Crc6tgXmKVeU2cD4Eo3R/"
+ "2*>]b(MC;$jPfY.;h^`IWM9<Lh2TlS+f-s$o6Q<BWH`YiU.xfLq$N;$0iR/GX:U(jcW2p/W*q?-qmnUCI;jHSAiFWM.R*kU@C=GH?a9wp8f$e.-4^Qg1)Q-GL(lf(r/7GrRgwV%MS=C#"
+ "`8ND>Qo#t'X#(v#Y9w0#1D$CIf;W'#pWUPXOuxXuU(H9M(1<q-UE31#^-V'8IRUo7Qf./L>=Ke$$'5F%)]0^#0X@U.a<r:QLtFsLcL6##lOj)#.Y5<-R&KgLwqJfLgN&;Q?gI^#DY2uL"
+ "i@^rMl9t=cWq6##weg>$FBjVQTSDgEKnIS7EM9>ZY9w0#L;>>#Mx&4Mvt//L[MkA#W@lK.N'[0#7RL_&#w+F%HtG9M#XL`N&.,GM4Pg;-<nLENhvx>-VsM.M0rJfLH2eTM`*oJMHRC`N"
+ "kfimM2J,W-jXS:)r0wK#@Fge$U>`w'N7G#$#fB#$E^$#:9:hk+eOe--6x)F7*E%?76%^GMHePW-Z5l'&GiF#$956:rS?dA#fiK:)Yr+`&#0j@'DbG&#^$PG.Ll+DNa<XCMKEV*N)LN/N"
+ "*b=%Q6pia-Xg8I$<MR&,VdJe$<(7G;Ckl'&hF;;$<_=X(b.RS%%)###MPBuuE1V:v&cX&#2m#(&cV]`k9OhLMbn%s$G2,B$BfD3X*sp5#l,$R#]x_X1xKX%b5U*[r5iMfUo9U`N99hG)"
+ "tm+/Us9pG)XPu`<0s-)WTt(gCRxIg(%6sfh=ktMKn3j)<6<b5Sk_/0(^]AaN#(p/L>&VZ>1i%h1S9u5o@YaaW$e+b<TWFn/Z:Oh(Cx2$lNEoN^e)#CFY@@I;BOQ*sRwZtZxRcU7uW6CX"
+ "ow0i(?$Q[cjOd[P4d)]>ROPOpxTO7Stwi1::iB1q)C_=dV26J;2,]7op$]uQr@_V7$q^%lQwtuHY]=DX,n3L#0PHDO4f9>dC@O>HBuKPpP*E,N+b3L#lpR/MrTEH.IAQk.a>D[.e;mc."
+ "x]Ip.PH^'/aqUO/$1WxLoW0[iLA<QT;5HKD+@qQ'NQ(3_PLhE48R.qAPSwQ0/WK?Z,[x?-J;jQTWA0X@KJ(_Y8N-:/M74:/-ZpKrUss?d#dZq]DAbkU*JqkL+nwX@@47`5>w=4h(9.`G"
+ "CRUxHPeR`5Mjol(dUWxZa(>STrPkrJiWx`5U7F#.g*jrohGg`cg:lSTvEY/EV_7H4Q9[Z%cnv;JQYZ5q.l7Zeas:HOIZOB?G<Nald$qs]@]L<J7bR*>gv:[7MI2k).'2($5FNP&EQ(,)"
+ "U]W]+fh18.vsai00);D3@4ku5P?DP8aJt+;qUM]=+b'8@;mViBKx0DE[-auGl8:PJ&Dj+M6OC]O^((##]`0i)drT;-7X`=-H3[igUnPG-NZlo.#k@h#=Ork$m>a>$-?Tm$UV(?#P6YY#"
+ "'/###xe7q.73rI3*pP/$1>s9)W,JrM7SN]'/4C#v$U`0#V.[0>xQsH$fEmPMgY2u7Kh(G%siIfLSoS+MK2eTM$=5,M8p`A.;_R%#u[K#$x4AG8.kK/HSB==-'Ie/QTtG?-.*^N-4B/ZM"
+ "_3YlQC7(p7q)&](`6_c)$/*JL(L-^(]$wIM`dPtOdGA,U3:w2M-0<q-]L_?^)1vw'.,MRsqVr.L;aN&#/EgJ)PBc[-f>+WomX2u7lqM2iEumMTcsF?-aT=Z-97UEnXglEn1K-bnEO`gu"
+ "Ft(c%=;Am_Qs@jLooI&NX;]0#j4#F14;gl8-GQpgwhrq8'=l_f-b49'UOqkLu7-##oDY2L(te+Mch&gLYtJ,MEtJfLh'x'M=$CS-ZZ%P]8bZ>#S?YY#%Q&q'3^Fw&?D)UDNrocM3A76/"
+ "/oL?#h7gl85[qW/NDOk%16ij;+:1a'iNIdb-ou8.P*w,v5#EI$TWS>Pot-R*H'-SEpA:g)f+O$%%`kA#G=8RMmG1&O`>to8bC]T&$,n.LoO>29sp3dt-52U%VM#q7'DHpg+#Z9%H[K<L"
+ "%a2E-grWVM3@2=-k22tL]4$##6We'8UJCKE[d_=%wI;'6X-GsLX4j^SgJ$##R*w,vP3wK#iiW&#*h^D&R?jp7+/u&#(AP##XU8c$fSYW-J95_-Dp[g9wcO&#M-h1OcJlc-*vpw0xUX&#"
+ "OQFKNX@QI'IoPp7nb,QU//MQ&ZDkKP)X<WSVL(68uVl&#c'[0#(s1X&xm$Y%B7*K:eDA323j998GXbA#pwMs-jgD$9QISB-A_(aN4xoFM^@C58D0+Q+q3n0#3U1InDjF682-SjMXJK)("
+ "h$hxua_K]ul92%'BOU&#BRRh-slg8KDlr:%L71Ka:.A;%YULjDPmL<LYs8i#XwJOYaKPKc1h:'9Ke,g)b),78=I39B;xiY$bgGw-&.Zi9InXDuYa%G*f2Bq7mn9^#p1vv%#(Wi-;/Z5h"
+ "o;#2:;%d&#x9v68C5g?ntX0X)pT`;%pB3q7mgGN)3%(P8nTd5L7GeA-GL@+%J3u2:(Yf>et`e;)f#Km8&+DC$I46>#Kr]]u-[=99tts1.qb#q72g1WJO81q+eN'03'eM>&1XxY-caEnO"
+ "j%2n8)),?ILR5^.Ibn<-X-Mq7[a82Lq:F&#ce+S9wsCK*x`569E8ew'He]h:sI[2LM$[guka3ZRd6:t%IG:;$%YiJ:Nq=?eAw;/:nnDq0(CYcMpG)qLN4$##&J<j$UpK<Q4a1]MupW^-"
+ "sj_$%[HK%'F####QRZJ::Y3EGl4'@%FkiAOg#p[##O`gukTfBHagL<LHw%q&OV0##F=6/:chIm0@eCP8X]:kFI%hl8hgO@RcBhS-@Qb$%+m=hPDLg*%K8ln(wcf3/'DW-$.lR?n[nCH-"
+ "eXOONTJlh:.RYF%3'p6sq:UIMA945&^HFS87@$EP2iG<-lCO$%c`uKGD3rC$x0BL8aFn--`ke%#HMP'vh1/R&O_J9'um,.<tx[@%wsJk&bUT2`0uMv7gg#qp/ij.L56'hl;.s5CUrxjO"
+ "M7-##.l+Au'A&O:-T72L]P`&=;ctp'XScX*rU.>-XTt,%OVU4)S1+R-#dg0/Nn?Ku1^0f$B*P:Rowwm-`0PKjYDDM'3]d39VZHEl4,.j']Pk-M.h^&:0FACm$maq-&sgw0t7/6(^xtk%"
+ "LuH88Fj-ekm>GA#_>568x6(OFRl-IZp`&b,_P'$M<Jnq79VsJW/mWS*PUiq76;]/NM_>hLbxfc$mj`,O;&%W2m`Zh:/)Uetw:aJ%]K9h:TcF]u_-Sj9,VK3M.*'&0D[Ca]J9gp8,kAW]"
+ "%(?A%R$f<->Zts'^kn=-^@c4%-pY6qI%J%1IGxfLU9CP8cbPlXv);C=b),<2mOvP8up,UVf3839acAWAW-W?#ao/^#%KYo8fRULNd2.>%m]UK:n%r$'sw]J;5pAoO_#2mO3n,'=H5(et"
+ "Hg*`+RLgv>=4U8guD$I%D:W>-r5V*%j*W:Kvej.Lp$<M-SGZ':+Q_k+uvOSLiEo(<aD/K<CCc`'Lx>'?;++O'>()jLR-^u68PHm8ZFWe+ej8h:9r6L*0//c&iH&R8pRbA#Kjm%upV1g:"
+ "a_#Ur7FuA#(tRh#.Y5K+@?3<-8m0$PEn;J:rh6?I6uG<-`wMU'ircp0LaE_OtlMb&1#6T.#FDKu#1Lw%u%+GM+X'e?YLfjM[VO0MbuFp7;>Q&#WIo)0@F%q7c#4XAXN-U&VB<HFF*qL("
+ "$/V,;(kXZejWO`<[5?\?ewY(*9=%wDc;,u<'9t3W-(H1th3+G]ucQ]kLs7df($/*JL]@*t7Bu_G3_7mp7<iaQjO@.kLg;x3B0lqp7Hf,^Ze7-##@/c58Mo(3;knp0%)A7?-W+eI'o8)b<"
+ "nKnw'Ho8C=Y>pqB>0ie&jhZ[?iLR@@_AvA-iQC(=ksRZRVp7`.=+NpBC%rh&3]R:8XDmE5^V8O(x<<aG/1N$#FX$0V5Y6x'aErI3I$7x%E`v<-BY,)%-?Psf*l?%C3.mM(=/M0:JxG'?"
+ "7WhH%o'a<-80g0NBxoO(GH<dM]n.+%q@jH?f.UsJ2Ggs&4<-e47&Kl+f//9@`b+?.TeN_&B8Ss?v;^Trk;f#YvJkl&w$]>-+k?'(<S:68tq*WoDfZu';mM?8X[ma8W%*`-=;D.(nc7/;"
+ ")g:T1=^J$&BRV(-lTmNB6xqB[@0*o.erM*<SWF]u2=st-*(6v>^](H.aREZSi,#1:[IXaZFOm<-ui#qUq2$##Ri;u75OK#(RtaW-K-F`S+cF]uN`-KMQ%rP/Xri.LRcB##=YL3BgM/3M"
+ "D?@f&1'BW-)Ju<L25gl8uhVm1hL$##*8###'A3/LkKW+(^rWX?5W_8g)a(m&K8P>#bmmWCMkk&#TR`C,5d>g)F;t,4:@_l8G/5h4vUd%&%950:VXD'QdWoY-F$BtUwmfe$YqL'8(PWX("
+ "P?^@Po3$##`MSs?DWBZ/S>+4%>fX,VWv/w'KD`LP5IbH;rTV>n3cEK8U#bX]l-/V+^lj3;vlMb&[5YQ8#pekX9JP3XUC72L,,?+Ni&co7ApnO*5NK,((W-i:$,kp'UDAO(G0Sq7MVjJs"
+ "bIu)'Z,*[>br5fX^:FPAWr-m2KgL<LUN098kTF&#lvo58=/vjDo;.;)Ka*hLR#/k=rKbxuV`>Q_nN6'8uTG&#1T5g)uLv:873UpTLgH+#FgpH'_o1780Ph8KmxQJ8#H72L4@768@Tm&Q"
+ "h4CB/5OvmA&,Q&QbUoi$a_%3M01H)4x7I^&KQVgtFnV+;[Pc>[m4k//,]1?#`VY[Jr*3&&slRfLiVZJ:]?=K3Sw=[$=uRB?3xk48@aeg<Z'<$#4H)6,>e0jT6'N#(q%.O=?2S]u*(m<-"
+ "V8J'(1)G][68hW$5'q[GC&5j`TE?m'esFGNRM)j,ffZ?-qx8;->g4t*:CIP/[Qap7/9'#(1sao7w-.qNUdkJ)tCF&#B^;xGvn2r9FEPFFFcL@.iFNkTve$m%#QvQS8U@)2Z+3K:AKM5i"
+ "sZ88+dKQ)W6>J%CL<KE>`.d*(B`-n8D9oK<Up]c$X$(,)M8Zt7/[rdkqTgl-0cuGMv'?>-XV1q['-5k'cAZ69e;D_?$ZPP&s^+7])$*$#@QYi9,5P&#9r+$%CE=68>K8r0=dSC%%(@p7"
+ ".m7jilQ02'0-VWAg<a/''3u.=4L$Y)6k/K:_[3=&jvL<L0C/2'v:^;-DIBW,B4E68:kZ;%?8(Q8BH=kO65BW?xSG&#@uU,DS*,?.+(o(#1vCS8#CHF>TlGW'b)Tq7VT9q^*^$$.:&N@@"
+ "$&)WHtPm*5_rO0&e%K&#-30j(E4#'Zb.o/(Tpm$>K'f@[PvFl,hfINTNU6u'0pao7%XUp9]5.>%h`8_=VYbxuel.NTSsJfLacFu3B'lQSu/m6-Oqem8T+oE--$0a/k]uj9EwsG>%veR*"
+ "hv^BFpQj:K'#SJ,sB-'#](j.Lg92rTw-*n%@/;39rrJF,l#qV%OrtBeC6/,;qB3ebNW[?,Hqj2L.1NP&GjUR=1D8QaS3Up&@*9wP?+lo7b?@%'k4`p0Z$22%K3+iCZj?XJN4Nm&+YF]u"
+ "@-W$U%VEQ/,,>>#)D<h#`)h0:<Q6909ua+&VU%n2:cG3FJ-%@Bj-DgLr`Hw&HAKjKjseK</xKT*)B,N9X3]krc12t'pgTV(Lv-tL[xg_%=M_q7a^x?7Ubd>#%8cY#YZ?=,`Wdxu/ae&#"
+ "w6)R89tI#6@s'(6Bf7a&?S=^ZI_kS&ai`&=tE72L_D,;^R)7[$s<Eh#c&)q.MXI%#v9ROa5FZO%sF7q7Nwb&#ptUJ:aqJe$Sl68%.D###EC><?-aF&#RNQv>o8lKN%5/$(vdfq7+ebA#"
+ "u1p]ovUKW&Y%q]'>$1@-[xfn$7ZTp7mM,G,Ko7a&Gu%G[RMxJs[0MM%wci.LFDK)(<c`Q8N)jEIF*+?P2a8g%)$q]o2aH8C&<SibC/q,(e:v;-b#6[$NtDZ84Je2KNvB#$P5?tQ3nt(0"
+ "d=j.LQf./Ll33+(;q3L-w=8dX$#WF&uIJ@-bfI>%:_i2B5CsR8&9Z&#=mPEnm0f`<&c)QL5uJ#%u%lJj+D-r;BoF&#4DoS97h5g)E#o:&S4weDF,9^Hoe`h*L+_a*NrLW-1pG_&2UdB8"
+ "6e%B/:=>)N4xeW.*wft-;$'58-ESqr<b?UI(_%@[P46>#U`'6AQ]m&6/`Z>#S?YY#Vc;r7U2&326d=w&H####?TZ`*4?&.MK?LP8Vxg>$[QXc%QJv92.(Db*B)gb*BM9dM*hJMAo*c&#"
+ "b0v=Pjer]$gG&JXDf->'StvU7505l9$AFvgYRI^&<^b68?j#q9QX4SM'RO#&sL1IM.rJfLUAj221]d##DW=m83u5;'bYx,*Sl0hL(W;;$doB&O/TQ:(Z^xBdLjL<Lni;''X.`$#8+1GD"
+ ":k$YUWsbn8ogh6rxZ2Z9]%nd+>V#*8U_72Lh+2Q8Cj0i:6hp&$C/:p(HK>T8Y[gHQ4`4)'$Ab(Nof%V'8hL&#<NEdtg(n'=S1A(Q1/I&4([%dM`,Iu'1:_hL>SfD07&6D<fp8dHM7/g+"
+ "tlPN9J*rKaPct&?'uBCem^jn%9_K)<,C5K3s=5g&GmJb*[SYq7K;TRLGCsM-$$;S%:Y@r7AK0pprpL<Lrh,q7e/%KWK:50I^+m'vi`3?%Zp+<-d+$L-Sv:@.o19n$s0&39;kn;S%BSq*"
+ "$3WoJSCLweV[aZ'MQIjO<7;X-X;&+dMLvu#^UsGEC9WEc[X(wI7#2.(F0jV*eZf<-Qv3J-c+J5AlrB#$p(H68LvEA'q3n0#m,[`*8Ft)FcYgEud]CWfm68,(aLA$@EFTgLXoBq/UPlp7"
+ ":d[/;r_ix=:TF`S5H-b<LI&HY(K=h#)]Lk$K14lVfm:x$H<3^Ql<M`$OhapBnkup'D#L$Pb_`N*g]2e;X/Dtg,bsj&K#2[-:iYr'_wgH)NUIR8a1n#S?Yej'h8^58UbZd+^FKD*T@;6A"
+ "7aQC[K8d-(v6GI$x:T<&'Gp5Uf>@M.*J:;$-rv29'M]8qMv-tLp,'886iaC=Hb*YJoKJ,(j%K=H`K.v9HggqBIiZu'QvBT.#=)0ukruV&.)3=(^1`o*Pj4<-<aN((^7('#Z0wK#5GX@7"
+ "u][`*S^43933A4rl][`*O4CgLEl]v$1Q3AeF37dbXk,.)vj#x'd`;qgbQR%FW,2(?LO=s%Sc68%NP'##Aotl8x=BE#j1UD([3$M(]UI2LX3RpKN@;/#f'f/&_mt&F)XdF<9t4)Qa.*kT"
+ "LwQ'(TTB9.xH'>#MJ+gLq9-##@HuZPN0]u:h7.T..G:;$/Usj(T7`Q8tT72LnYl<-qx8;-HV7Q-&Xdx%1a,hC=0u+HlsV>nuIQL-5<N?)NBS)QN*_I,?&)2'IM%L3I)X((e/dl2&8'<M"
+ ":^#M*Q+[T.Xri.LYS3v%fF`68h;b-X[/En'CR.q7E)p'/kle2HM,u;^%OKC-N+Ll%F9CF<Nf'^#t2L,;27W:0O@6##U6W7:$rJfLWHj$#)woqBefIZ.PK<b*t7ed;p*_m;4ExK#h@&]>"
+ "_>@kXQtMacfD.m-VAb8;IReM3$wf0''hra*so568'Ip&vRs849'MRYSp%:t:h5qSgwpEr$B>Q,;s(C#$)`svQuF$##-D,##,g68@2[T;.XSdN9Qe)rpt._K-#5wF)sP'##p#C0c%-Gb%"
+ "hd+<-j'Ai*x&&HMkT]C'OSl##5RG[JXaHN;d'uA#x._U;.`PU@(Z3dt4r152@:v,'R.Sj'w#0<-;kPI)FfJ&#AYJ&#//)>-k=m=*XnK$>=)72L]0I%>.G690a:$##<,);?;72#?x9+d;"
+ "^V'9;jY@;)br#q^YQpx:X#Te$Z^'=-=bGhLf:D6&bNwZ9-ZD#n^9HhLMr5G;']d&6'wYmTFmL<LD)F^%[tC'8;+9E#C$g%#5Y>q9wI>P(9mI[>kC-ekLC/R&CH+s'B;K-M6$EB%is00:"
+ "+A4[7xks.LrNk0&E)wILYF@2L'0Nb$+pv<(2.768/FrY&h$^3i&@+G%JT'<-,v`3;_)I9M^AE]CN?Cl2AZg+%4iTpT3<n-&%H%b<FDj2M<hH=&Eh<2Len$b*aTX=-8QxN)k11IM1c^j%"
+ "9s<L<NFSo)B?+<-(GxsF,^-Eh@$4dXhN$+#rxK8'je'D7k`e;)2pYwPA'_p9&@^18ml1^[@g4t*[JOa*[=Qp7(qJ_oOL^('7fB&Hq-:sf,sNj8xq^>$U4O]GKx'm9)b@p7YsvK3w^YR-"
+ "CdQ*:Ir<($u&)#(&?L9Rg3H)4fiEp^iI9O8KnTj,]H?D*r7'M;PwZ9K0E^k&-cpI;.p/6_vwoFMV<->#%Xi.LxVnrU(4&8/P+:hLSKj$#U%]49t'I:rgMi'FL@a:0Y-uA[39',(vbma*"
+ "hU%<-SRF`Tt:542R_VV$p@[p8DV[A,?1839FWdF<TddF<9Ah-6&9tWoDlh]&1SpGMq>Ti1O*H&#(AL8[_P%.M>v^-))qOT*F5Cq0`Ye%+$B6i:7@0IX<N+T+0MlMBPQ*Vj>SsD<U4JHY"
+ "8kD2)2fU/M#$e.)T4,_=8hLim[&);?UkK'-x?'(:siIfL<$pFM`i<?%W(mGDHM%>iWP,##P`%/L<eXi:@Z9C.7o=@(pXdAO/NLQ8lPl+HPOQa8wD8=^GlPa8TKI1CjhsCTSLJM'/Wl>-"
+ "S(qw%sf/@%#B6;/U7K]uZbi^Oc^2n<bhPmUkMw>%t<)'mEVE''n`WnJra$^TKvX5B>;_aSEK',(hwa0:i4G?.Bci.(X[?b*($,=-n<.Q%`(X=?+@Am*Js0&=3bh8K]mL<LoNs'6,'85`"
+ "0?t/'_U59@]ddF<#LdF<eWdF<OuN/45rY<-L@&#+fm>69=Lb,OcZV/);TTm8VI;?%OtJ<(b4mq7M6:u?KRdF<gR@2L=FNU-<b[(9c/ML3m;Z[$oF3g)GAWqpARc=<ROu7cL5l;-[A]%/"
+ "+fsd;l#SafT/f*W]0=O'$(Tb<[)*@e775R-:Yob%g*>l*:xP?Yb.5)%w_I?7uk5JC+FS(m#i'k.'a0i)9<7b'fs'59hq$*5Uhv##pi^8+hIEBF`nvo`;'l0.^S1<-wUK2/Coh58KKhLj"
+ "M=SO*rfO`+qC`W-On.=AJ56>>i2@2LH6A:&5q`?9I3@@'04&p2/LVa*T-4<-i3;M9UvZd+N7>b*eIwg:CC)c<>nO&#<IGe;__.thjZl<%w(Wk2xmp4Q@I#I9,DF]u7-P=.-_:YJ]aS@V"
+ "?6*C()dOp7:WL,b&3Rg/.cmM9&r^>$(>.Z-I&J(Q0Hd5Q%7Co-b`-c<N(6r@ip+AurK<m86QIth*#v;-OBqi+L7wDE-Ir8K['m+DDSLwK&/.?-V%U_%3:qKNu$_b*B-kp7NaD'QdWQPK"
+ "Yq[@>P)hI;*_F]u`Rb[.j8_Q/<&>uu+VsH$sM9TA%?)(vmJ80),P7E>)tjD%2L=-t#fK[%`v=Q8<FfNkgg^oIbah*#8/Qt$F&:K*-(N/'+1vMB,u()-a.VUU*#[e%gAAO(S>WlA2);Sa"
+ ">gXm8YB`1d@K#n]76-a$U,mF<fX]idqd)<3,]J7JmW4`6]uks=4-72L(jEk+:bJ0M^q-8Dm_Z?0olP1C9Sa&H[d&c$ooQUj]Exd*3ZM@-WGW2%s',B-_M%>%Ul:#/'xoFM9QX-$.QN'>"
+ "[%$Z$uF6pA6Ki2O5:8w*vP1<-1`[G,)-m#>0`P&#eb#.3i)rtB61(o'$?X3B</R90;eZ]%Ncq;-Tl]#F>2Qft^ae_5tKL9MUe9b*sLEQ95C&`=G?@Mj=wh*'3E>=-<)Gt*Iw)'QG:`@I"
+ "wOf7&]1i'S01B+Ev/Nac#9S;=;YQpg_6U`*kVY39xK,[/6Aj7:'1Bm-_1EYfa1+o&o4hp7KN_Q(OlIo@S%;jVdn0'1<Vc52=u`3^o-n1'g4v58Hj&6_t7$##?M)c<$bgQ_'SY((-xkA#"
+ "Y(,p'H9rIVY-b,'%bCPF7.J<Up^,(dU1VY*5#WkTU>h19w,WQhLI)3S#f$2(eb,jr*b;3Vw]*7NH%$c4Vs,eD9>XW8?N]o+(*pgC%/72LV-u<Hp,3@e^9UB1J+ak9-TN/mhKPg+AJYd$"
+ "MlvAF_jCK*.O-^(63adMT->W%iewS8W6m2rtCpo'RS1R84=@paTKt)>=%&1[)*vp'u+x,VrwN;&]kuO9JDbg=pO$J*.jVe;u'm0dr9l,<*wMK*Oe=g8lV_KEBFkO'oU]^=[-792#ok,)"
+ "i]lR8qQ2oA8wcRCZ^7w/Njh;?.stX?Q1>S1q4Bn$)K1<-rGdO'$Wr.Lc.CG)$/*JL4tNR/,SVO3,aUw'DJN:)Ss;wGn9A32ijw%FL+Z0Fn.U9;reSq)bmI32U==5ALuG&#Vf1398/pVo"
+ "1*c-(aY168o<`JsSbk-,1N;$>0:OUas(3:8Z972LSfF8eb=c-;>SPw7.6hn3m`9^Xkn(r.qS[0;T%&Qc=+STRxX'q1BNk3&*eu2;&8q$&x>Q#Q7^Tf+6<(d%ZVmj2bDi%.3L2n+4W'$P"
+ "iDDG)g,r%+?,$@?uou5tSe2aN_AQU*<h`e-GI7)?OK2A.d7_c)?wQ5AS@DL3r#7fSkgl6-++D:'A,uq7SvlB$pcpH'q3n0#_%dY#xCpr-l<F0NR@-##FEV6NTF6##$l84N1w?AO>'IAO"
+ "URQ##V^Fv-XFbGM7Fl(N<3DhLGF%q.1rC$#:T__&Pi68%0xi_&[qFJ(77j_&JWoF.V735&T,[R*:xFR*K5>>#`bW-?4Ne_&6Ne_&6Ne_&n`kr-#GJcM6X;uM6X;uM(.a..^2TkL%oR(#"
+ ";u.T%fAr%4tJ8&><1=GHZ_+m9/#H1F^R#SC#*N=BA9(D?v[UiFY>>^8p,KKF.W]L29uLkLlu/+4T<XoIB&hx=T1PcDaB&;HH+-AFr?(m9HZV)FKS8JCw;SD=6[^/DZUL`EUDf]GGlG&>"
+ "w$)F./^n3+rlo+DB;5sIYGNk+i1t-69Jg--0pao7Sm#K)pdHW&;LuDNH@H>#/X-TI(;P>#,Gc>#0Su>#4`1?#8lC?#<xU?#@.i?#D:%@#HF7@#LRI@#P_[@#Tkn@#Xw*A#]-=A#a9OA#"
+ "d<F&#*;G##.GY##2Sl##6`($#:l:$#>xL$#B.`$#F:r$#JF.%#NR@%#R_R%#Vke%#Zww%#_-4&#3^Rh%Sflr-k'MS.o?.5/sWel/wpEM0%3'/1)K^f1-d>G21&v(35>V`39V7A4=onx4"
+ "A1OY5EI0;6Ibgr6M$HS7Q<)58C5w,;WoA*#[%T*#`1g*#d=#+#hI5+#lUG+#pbY+#tnl+#x$),#&1;,#*=M,#.I`,#2Ur,#6b.-#;w[H#iQtA#m^0B#qjBB#uvTB##-hB#'9$C#+E6C#"
+ "/QHC#3^ZC#7jmC#;v)D#?,<D#C8ND#GDaD#KPsD#O]/E#g1A5#KA*1#gC17#MGd;#8(02#L-d3#rWM4#Hga1#,<w0#T.j<#O#'2#CYN1#qa^:#_4m3#o@/=#eG8=#t8J5#`+78#4uI-#"
+ "m3B2#SB[8#Q0@8#i[*9#iOn8#1Nm;#^sN9#qh<9#:=x-#P;K2#$%X9#bC+.#Rg;<#mN=.#MTF.#RZO.#2?)4#Y#(/#[)1/#b;L/#dAU/#0Sv;#lY$0#n`-0#sf60#(F24#wrH0#%/e0#"
+ "TmD<#%JSMFove:CTBEXI:<eh2g)B,3h2^G3i;#d3jD>)4kMYD4lVu`4m`:&5niUA5@(A5BA1]PBB:xlBCC=2CDLXMCEUtiCf&0g2'tN?PGT4CPGT4CPGT4CPGT4CPGT4CPGT4CPGT4CP"
+ "GT4CPGT4CPGT4CPGT4CPGT4CPGT4CP-qekC`.9kEg^+F$kwViFJTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5o,^<-28ZI'O?;xp"
+ "O?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xp;7q-#lLYI:xvD=#";
+
+#endif /* NK_INCLUDE_DEFAULT_FONT */
+
+#define NK_CURSOR_DATA_W 90
+#define NK_CURSOR_DATA_H 27
+NK_GLOBAL const char nk_custom_cursor_data[NK_CURSOR_DATA_W * NK_CURSOR_DATA_H + 1] =
+{
+ "..- -XXXXXXX- X - X -XXXXXXX - XXXXXXX"
+ "..- -X.....X- X.X - X.X -X.....X - X.....X"
+ "--- -XXX.XXX- X...X - X...X -X....X - X....X"
+ "X - X.X - X.....X - X.....X -X...X - X...X"
+ "XX - X.X -X.......X- X.......X -X..X.X - X.X..X"
+ "X.X - X.X -XXXX.XXXX- XXXX.XXXX -X.X X.X - X.X X.X"
+ "X..X - X.X - X.X - X.X -XX X.X - X.X XX"
+ "X...X - X.X - X.X - XX X.X XX - X.X - X.X "
+ "X....X - X.X - X.X - X.X X.X X.X - X.X - X.X "
+ "X.....X - X.X - X.X - X..X X.X X..X - X.X - X.X "
+ "X......X - X.X - X.X - X...XXXXXX.XXXXXX...X - X.X XX-XX X.X "
+ "X.......X - X.X - X.X -X.....................X- X.X X.X-X.X X.X "
+ "X........X - X.X - X.X - X...XXXXXX.XXXXXX...X - X.X..X-X..X.X "
+ "X.........X -XXX.XXX- X.X - X..X X.X X..X - X...X-X...X "
+ "X..........X-X.....X- X.X - X.X X.X X.X - X....X-X....X "
+ "X......XXXXX-XXXXXXX- X.X - XX X.X XX - X.....X-X.....X "
+ "X...X..X --------- X.X - X.X - XXXXXXX-XXXXXXX "
+ "X..X X..X - -XXXX.XXXX- XXXX.XXXX ------------------------------------"
+ "X.X X..X - -X.......X- X.......X - XX XX - "
+ "XX X..X - - X.....X - X.....X - X.X X.X - "
+ " X..X - X...X - X...X - X..X X..X - "
+ " XX - X.X - X.X - X...XXXXXXXXXXXXX...X - "
+ "------------ - X - X -X.....................X- "
+ " ----------------------------------- X...XXXXXXXXXXXXX...X - "
+ " - X..X X..X - "
+ " - X.X X.X - "
+ " - XX XX - "
+};
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#elif defined(__GNUC__) || defined(__GNUG__)
+#pragma GCC diagnostic pop
+#endif
+
+NK_GLOBAL unsigned char *nk__barrier;
+NK_GLOBAL unsigned char *nk__barrier2;
+NK_GLOBAL unsigned char *nk__barrier3;
+NK_GLOBAL unsigned char *nk__barrier4;
+NK_GLOBAL unsigned char *nk__dout;
+
+NK_INTERN unsigned int
+nk_decompress_length(unsigned char *input)
+{
+ return (unsigned int)((input[8] << 24) + (input[9] << 16) + (input[10] << 8) + input[11]);
+}
+NK_INTERN void
+nk__match(unsigned char *data, unsigned int length)
+{
+ /* INVERSE of memmove... write each byte before copying the next...*/
+ NK_ASSERT (nk__dout + length <= nk__barrier);
+ if (nk__dout + length > nk__barrier) { nk__dout += length; return; }
+ if (data < nk__barrier4) { nk__dout = nk__barrier+1; return; }
+ while (length--) *nk__dout++ = *data++;
+}
+NK_INTERN void
+nk__lit(unsigned char *data, unsigned int length)
+{
+ NK_ASSERT (nk__dout + length <= nk__barrier);
+ if (nk__dout + length > nk__barrier) { nk__dout += length; return; }
+ if (data < nk__barrier2) { nk__dout = nk__barrier+1; return; }
+ NK_MEMCPY(nk__dout, data, length);
+ nk__dout += length;
+}
+NK_INTERN unsigned char*
+nk_decompress_token(unsigned char *i)
+{
+ #define nk__in2(x) ((i[x] << 8) + i[(x)+1])
+ #define nk__in3(x) ((i[x] << 16) + nk__in2((x)+1))
+ #define nk__in4(x) ((i[x] << 24) + nk__in3((x)+1))
+
+ if (*i >= 0x20) { /* use fewer if's for cases that expand small */
+ if (*i >= 0x80) nk__match(nk__dout-i[1]-1, (unsigned int)i[0] - 0x80 + 1), i += 2;
+ else if (*i >= 0x40) nk__match(nk__dout-(nk__in2(0) - 0x4000 + 1), (unsigned int)i[2]+1), i += 3;
+ else /* *i >= 0x20 */ nk__lit(i+1, (unsigned int)i[0] - 0x20 + 1), i += 1 + (i[0] - 0x20 + 1);
+ } else { /* more ifs for cases that expand large, since overhead is amortized */
+ if (*i >= 0x18) nk__match(nk__dout-(unsigned int)(nk__in3(0) - 0x180000 + 1), (unsigned int)i[3]+1), i += 4;
+ else if (*i >= 0x10) nk__match(nk__dout-(unsigned int)(nk__in3(0) - 0x100000 + 1), (unsigned int)nk__in2(3)+1), i += 5;
+ else if (*i >= 0x08) nk__lit(i+2, (unsigned int)nk__in2(0) - 0x0800 + 1), i += 2 + (nk__in2(0) - 0x0800 + 1);
+ else if (*i == 0x07) nk__lit(i+3, (unsigned int)nk__in2(1) + 1), i += 3 + (nk__in2(1) + 1);
+ else if (*i == 0x06) nk__match(nk__dout-(unsigned int)(nk__in3(1)+1), i[4]+1u), i += 5;
+ else if (*i == 0x04) nk__match(nk__dout-(unsigned int)(nk__in3(1)+1), (unsigned int)nk__in2(4)+1u), i += 6;
+ }
+ return i;
+}
+NK_INTERN unsigned int
+nk_adler32(unsigned int adler32, unsigned char *buffer, unsigned int buflen)
+{
+ const unsigned long ADLER_MOD = 65521;
+ unsigned long s1 = adler32 & 0xffff, s2 = adler32 >> 16;
+ unsigned long blocklen, i;
+
+ blocklen = buflen % 5552;
+ while (buflen) {
+ for (i=0; i + 7 < blocklen; i += 8) {
+ s1 += buffer[0]; s2 += s1;
+ s1 += buffer[1]; s2 += s1;
+ s1 += buffer[2]; s2 += s1;
+ s1 += buffer[3]; s2 += s1;
+ s1 += buffer[4]; s2 += s1;
+ s1 += buffer[5]; s2 += s1;
+ s1 += buffer[6]; s2 += s1;
+ s1 += buffer[7]; s2 += s1;
+ buffer += 8;
+ }
+ for (; i < blocklen; ++i) {
+ s1 += *buffer++; s2 += s1;
+ }
+
+ s1 %= ADLER_MOD; s2 %= ADLER_MOD;
+ buflen -= (unsigned int)blocklen;
+ blocklen = 5552;
+ }
+ return (unsigned int)(s2 << 16) + (unsigned int)s1;
+}
+NK_INTERN unsigned int
+nk_decompress(unsigned char *output, unsigned char *i, unsigned int length)
+{
+ unsigned int olen;
+ if (nk__in4(0) != 0x57bC0000) return 0;
+ if (nk__in4(4) != 0) return 0; /* error! stream is > 4GB */
+ olen = nk_decompress_length(i);
+ nk__barrier2 = i;
+ nk__barrier3 = i+length;
+ nk__barrier = output + olen;
+ nk__barrier4 = output;
+ i += 16;
+
+ nk__dout = output;
+ for (;;) {
+ unsigned char *old_i = i;
+ i = nk_decompress_token(i);
+ if (i == old_i) {
+ if (*i == 0x05 && i[1] == 0xfa) {
+ NK_ASSERT(nk__dout == output + olen);
+ if (nk__dout != output + olen) return 0;
+ if (nk_adler32(1, output, olen) != (unsigned int) nk__in4(2))
+ return 0;
+ return olen;
+ } else {
+ NK_ASSERT(0); /* NOTREACHED */
+ return 0;
+ }
+ }
+ NK_ASSERT(nk__dout <= output + olen);
+ if (nk__dout > output + olen)
+ return 0;
+ }
+}
+NK_INTERN unsigned int
+nk_decode_85_byte(char c)
+{
+ return (unsigned int)((c >= '\\') ? c-36 : c-35);
+}
+NK_INTERN void
+nk_decode_85(unsigned char* dst, const unsigned char* src)
+{
+ while (*src)
+ {
+ unsigned int tmp =
+ nk_decode_85_byte((char)src[0]) +
+ 85 * (nk_decode_85_byte((char)src[1]) +
+ 85 * (nk_decode_85_byte((char)src[2]) +
+ 85 * (nk_decode_85_byte((char)src[3]) +
+ 85 * nk_decode_85_byte((char)src[4]))));
+
+ /* we can't assume little-endianess. */
+ dst[0] = (unsigned char)((tmp >> 0) & 0xFF);
+ dst[1] = (unsigned char)((tmp >> 8) & 0xFF);
+ dst[2] = (unsigned char)((tmp >> 16) & 0xFF);
+ dst[3] = (unsigned char)((tmp >> 24) & 0xFF);
+
+ src += 5;
+ dst += 4;
+ }
+}
+
+/* -------------------------------------------------------------
+ *
+ * FONT ATLAS
+ *
+ * --------------------------------------------------------------*/
+NK_API struct nk_font_config
+nk_font_config(float pixel_height)
+{
+ struct nk_font_config cfg;
+ nk_zero_struct(cfg);
+ cfg.ttf_blob = 0;
+ cfg.ttf_size = 0;
+ cfg.ttf_data_owned_by_atlas = 0;
+ cfg.size = pixel_height;
+ cfg.oversample_h = 3;
+ cfg.oversample_v = 1;
+ cfg.pixel_snap = 0;
+ cfg.coord_type = NK_COORD_UV;
+ cfg.spacing = nk_vec2(0,0);
+ cfg.range = nk_font_default_glyph_ranges();
+ cfg.merge_mode = 0;
+ cfg.fallback_glyph = '?';
+ cfg.font = 0;
+ cfg.n = 0;
+ return cfg;
+}
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void
+nk_font_atlas_init_default(struct nk_font_atlas *atlas)
+{
+ NK_ASSERT(atlas);
+ if (!atlas) return;
+ nk_zero_struct(*atlas);
+ atlas->temporary.userdata.ptr = 0;
+ atlas->temporary.alloc = nk_malloc;
+ atlas->temporary.free = nk_mfree;
+ atlas->permanent.userdata.ptr = 0;
+ atlas->permanent.alloc = nk_malloc;
+ atlas->permanent.free = nk_mfree;
+}
+#endif
+NK_API void
+nk_font_atlas_init(struct nk_font_atlas *atlas, struct nk_allocator *alloc)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(alloc);
+ if (!atlas || !alloc) return;
+ nk_zero_struct(*atlas);
+ atlas->permanent = *alloc;
+ atlas->temporary = *alloc;
+}
+NK_API void
+nk_font_atlas_init_custom(struct nk_font_atlas *atlas,
+ struct nk_allocator *permanent, struct nk_allocator *temporary)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(permanent);
+ NK_ASSERT(temporary);
+ if (!atlas || !permanent || !temporary) return;
+ nk_zero_struct(*atlas);
+ atlas->permanent = *permanent;
+ atlas->temporary = *temporary;
+}
+NK_API void
+nk_font_atlas_begin(struct nk_font_atlas *atlas)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc && atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc && atlas->permanent.free);
+ if (!atlas || !atlas->permanent.alloc || !atlas->permanent.free ||
+ !atlas->temporary.alloc || !atlas->temporary.free) return;
+ if (atlas->glyphs) {
+ atlas->permanent.free(atlas->permanent.userdata, atlas->glyphs);
+ atlas->glyphs = 0;
+ }
+ if (atlas->pixel) {
+ atlas->permanent.free(atlas->permanent.userdata, atlas->pixel);
+ atlas->pixel = 0;
+ }
+}
+NK_API struct nk_font*
+nk_font_atlas_add(struct nk_font_atlas *atlas, const struct nk_font_config *config)
+{
+ struct nk_font *font = 0;
+ struct nk_font_config *cfg;
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+
+ NK_ASSERT(config);
+ NK_ASSERT(config->ttf_blob);
+ NK_ASSERT(config->ttf_size);
+ NK_ASSERT(config->size > 0.0f);
+
+ if (!atlas || !config || !config->ttf_blob || !config->ttf_size || config->size <= 0.0f||
+ !atlas->permanent.alloc || !atlas->permanent.free ||
+ !atlas->temporary.alloc || !atlas->temporary.free)
+ return 0;
+
+ /* allocate font config */
+ cfg = (struct nk_font_config*)
+ atlas->permanent.alloc(atlas->permanent.userdata,0, sizeof(struct nk_font_config));
+ NK_MEMCPY(cfg, config, sizeof(*config));
+ cfg->n = cfg;
+ cfg->p = cfg;
+
+ if (!config->merge_mode) {
+ /* insert font config into list */
+ if (!atlas->config) {
+ atlas->config = cfg;
+ cfg->next = 0;
+ } else {
+ struct nk_font_config *i = atlas->config;
+ while (i->next) i = i->next;
+ i->next = cfg;
+ cfg->next = 0;
+ }
+ /* allocate new font */
+ font = (struct nk_font*)
+ atlas->permanent.alloc(atlas->permanent.userdata,0, sizeof(struct nk_font));
+ NK_ASSERT(font);
+ nk_zero(font, sizeof(*font));
+ if (!font) return 0;
+ font->config = cfg;
+
+ /* insert font into list */
+ if (!atlas->fonts) {
+ atlas->fonts = font;
+ font->next = 0;
+ } else {
+ struct nk_font *i = atlas->fonts;
+ while (i->next) i = i->next;
+ i->next = font;
+ font->next = 0;
+ }
+ cfg->font = &font->info;
+ } else {
+ /* extend previously added font */
+ struct nk_font *f = 0;
+ struct nk_font_config *c = 0;
+ NK_ASSERT(atlas->font_num);
+ f = atlas->fonts;
+ c = f->config;
+ cfg->font = &f->info;
+
+ cfg->n = c;
+ cfg->p = c->p;
+ c->p->n = cfg;
+ c->p = cfg;
+ }
+ /* create own copy of .TTF font blob */
+ if (!config->ttf_data_owned_by_atlas) {
+ cfg->ttf_blob = atlas->permanent.alloc(atlas->permanent.userdata,0, cfg->ttf_size);
+ NK_ASSERT(cfg->ttf_blob);
+ if (!cfg->ttf_blob) {
+ atlas->font_num++;
+ return 0;
+ }
+ NK_MEMCPY(cfg->ttf_blob, config->ttf_blob, cfg->ttf_size);
+ cfg->ttf_data_owned_by_atlas = 1;
+ }
+ atlas->font_num++;
+ return font;
+}
+NK_API struct nk_font*
+nk_font_atlas_add_from_memory(struct nk_font_atlas *atlas, void *memory,
+ nk_size size, float height, const struct nk_font_config *config)
+{
+ struct nk_font_config cfg;
+ NK_ASSERT(memory);
+ NK_ASSERT(size);
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+ if (!atlas || !atlas->temporary.alloc || !atlas->temporary.free || !memory || !size ||
+ !atlas->permanent.alloc || !atlas->permanent.free)
+ return 0;
+
+ cfg = (config) ? *config: nk_font_config(height);
+ cfg.ttf_blob = memory;
+ cfg.ttf_size = size;
+ cfg.size = height;
+ cfg.ttf_data_owned_by_atlas = 0;
+ return nk_font_atlas_add(atlas, &cfg);
+}
+#ifdef NK_INCLUDE_STANDARD_IO
+NK_API struct nk_font*
+nk_font_atlas_add_from_file(struct nk_font_atlas *atlas, const char *file_path,
+ float height, const struct nk_font_config *config)
+{
+ nk_size size;
+ char *memory;
+ struct nk_font_config cfg;
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+
+ if (!atlas || !file_path) return 0;
+ memory = nk_file_load(file_path, &size, &atlas->permanent);
+ if (!memory) return 0;
+
+ cfg = (config) ? *config: nk_font_config(height);
+ cfg.ttf_blob = memory;
+ cfg.ttf_size = size;
+ cfg.size = height;
+ cfg.ttf_data_owned_by_atlas = 1;
+ return nk_font_atlas_add(atlas, &cfg);
+}
+#endif
+NK_API struct nk_font*
+nk_font_atlas_add_compressed(struct nk_font_atlas *atlas,
+ void *compressed_data, nk_size compressed_size, float height,
+ const struct nk_font_config *config)
+{
+ unsigned int decompressed_size;
+ void *decompressed_data;
+ struct nk_font_config cfg;
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+
+ NK_ASSERT(compressed_data);
+ NK_ASSERT(compressed_size);
+ if (!atlas || !compressed_data || !atlas->temporary.alloc || !atlas->temporary.free ||
+ !atlas->permanent.alloc || !atlas->permanent.free)
+ return 0;
+
+ decompressed_size = nk_decompress_length((unsigned char*)compressed_data);
+ decompressed_data = atlas->permanent.alloc(atlas->permanent.userdata,0,decompressed_size);
+ NK_ASSERT(decompressed_data);
+ if (!decompressed_data) return 0;
+ nk_decompress((unsigned char*)decompressed_data, (unsigned char*)compressed_data,
+ (unsigned int)compressed_size);
+
+ cfg = (config) ? *config: nk_font_config(height);
+ cfg.ttf_blob = decompressed_data;
+ cfg.ttf_size = decompressed_size;
+ cfg.size = height;
+ cfg.ttf_data_owned_by_atlas = 1;
+ return nk_font_atlas_add(atlas, &cfg);
+}
+NK_API struct nk_font*
+nk_font_atlas_add_compressed_base85(struct nk_font_atlas *atlas,
+ const char *data_base85, float height, const struct nk_font_config *config)
+{
+ int compressed_size;
+ void *compressed_data;
+ struct nk_font *font;
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+
+ NK_ASSERT(data_base85);
+ if (!atlas || !data_base85 || !atlas->temporary.alloc || !atlas->temporary.free ||
+ !atlas->permanent.alloc || !atlas->permanent.free)
+ return 0;
+
+ compressed_size = (((int)nk_strlen(data_base85) + 4) / 5) * 4;
+ compressed_data = atlas->temporary.alloc(atlas->temporary.userdata,0, (nk_size)compressed_size);
+ NK_ASSERT(compressed_data);
+ if (!compressed_data) return 0;
+ nk_decode_85((unsigned char*)compressed_data, (const unsigned char*)data_base85);
+ font = nk_font_atlas_add_compressed(atlas, compressed_data,
+ (nk_size)compressed_size, height, config);
+ atlas->temporary.free(atlas->temporary.userdata, compressed_data);
+ return font;
+}
+
+#ifdef NK_INCLUDE_DEFAULT_FONT
+NK_API struct nk_font*
+nk_font_atlas_add_default(struct nk_font_atlas *atlas,
+ float pixel_height, const struct nk_font_config *config)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+ return nk_font_atlas_add_compressed_base85(atlas,
+ nk_proggy_clean_ttf_compressed_data_base85, pixel_height, config);
+}
+#endif
+NK_API const void*
+nk_font_atlas_bake(struct nk_font_atlas *atlas, int *width, int *height,
+ enum nk_font_atlas_format fmt)
+{
+ int i = 0;
+ void *tmp = 0;
+ nk_size tmp_size, img_size;
+ struct nk_font *font_iter;
+ struct nk_font_baker *baker;
+
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+
+ NK_ASSERT(width);
+ NK_ASSERT(height);
+ if (!atlas || !width || !height ||
+ !atlas->temporary.alloc || !atlas->temporary.free ||
+ !atlas->permanent.alloc || !atlas->permanent.free)
+ return 0;
+
+#ifdef NK_INCLUDE_DEFAULT_FONT
+ /* no font added so just use default font */
+ if (!atlas->font_num)
+ atlas->default_font = nk_font_atlas_add_default(atlas, 13.0f, 0);
+#endif
+ NK_ASSERT(atlas->font_num);
+ if (!atlas->font_num) return 0;
+
+ /* allocate temporary baker memory required for the baking process */
+ nk_font_baker_memory(&tmp_size, &atlas->glyph_count, atlas->config, atlas->font_num);
+ tmp = atlas->temporary.alloc(atlas->temporary.userdata,0, tmp_size);
+ NK_ASSERT(tmp);
+ if (!tmp) goto failed;
+
+ /* allocate glyph memory for all fonts */
+ baker = nk_font_baker(tmp, atlas->glyph_count, atlas->font_num, &atlas->temporary);
+ atlas->glyphs = (struct nk_font_glyph*)atlas->permanent.alloc(
+ atlas->permanent.userdata,0, sizeof(struct nk_font_glyph)*(nk_size)atlas->glyph_count);
+ NK_ASSERT(atlas->glyphs);
+ if (!atlas->glyphs)
+ goto failed;
+
+ /* pack all glyphs into a tight fit space */
+ atlas->custom.w = (NK_CURSOR_DATA_W*2)+1;
+ atlas->custom.h = NK_CURSOR_DATA_H + 1;
+ if (!nk_font_bake_pack(baker, &img_size, width, height, &atlas->custom,
+ atlas->config, atlas->font_num, &atlas->temporary))
+ goto failed;
+
+ /* allocate memory for the baked image font atlas */
+ atlas->pixel = atlas->temporary.alloc(atlas->temporary.userdata,0, img_size);
+ NK_ASSERT(atlas->pixel);
+ if (!atlas->pixel)
+ goto failed;
+
+ /* bake glyphs and custom white pixel into image */
+ nk_font_bake(baker, atlas->pixel, *width, *height,
+ atlas->glyphs, atlas->glyph_count, atlas->config, atlas->font_num);
+ nk_font_bake_custom_data(atlas->pixel, *width, *height, atlas->custom,
+ nk_custom_cursor_data, NK_CURSOR_DATA_W, NK_CURSOR_DATA_H, '.', 'X');
+
+ if (fmt == NK_FONT_ATLAS_RGBA32) {
+ /* convert alpha8 image into rgba32 image */
+ void *img_rgba = atlas->temporary.alloc(atlas->temporary.userdata,0,
+ (nk_size)(*width * *height * 4));
+ NK_ASSERT(img_rgba);
+ if (!img_rgba) goto failed;
+ nk_font_bake_convert(img_rgba, *width, *height, atlas->pixel);
+ atlas->temporary.free(atlas->temporary.userdata, atlas->pixel);
+ atlas->pixel = img_rgba;
+ }
+ atlas->tex_width = *width;
+ atlas->tex_height = *height;
+
+ /* initialize each font */
+ for (font_iter = atlas->fonts; font_iter; font_iter = font_iter->next) {
+ struct nk_font *font = font_iter;
+ struct nk_font_config *config = font->config;
+ nk_font_init(font, config->size, config->fallback_glyph, atlas->glyphs,
+ config->font, nk_handle_ptr(0));
+ }
+
+ /* initialize each cursor */
+ {NK_STORAGE const struct nk_vec2 nk_cursor_data[NK_CURSOR_COUNT][3] = {
+ /* Pos Size Offset */
+ {{ 0, 3}, {12,19}, { 0, 0}},
+ {{13, 0}, { 7,16}, { 4, 8}},
+ {{31, 0}, {23,23}, {11,11}},
+ {{21, 0}, { 9, 23}, { 5,11}},
+ {{55,18}, {23, 9}, {11, 5}},
+ {{73, 0}, {17,17}, { 9, 9}},
+ {{55, 0}, {17,17}, { 9, 9}}
+ };
+ for (i = 0; i < NK_CURSOR_COUNT; ++i) {
+ struct nk_cursor *cursor = &atlas->cursors[i];
+ cursor->img.w = (unsigned short)*width;
+ cursor->img.h = (unsigned short)*height;
+ cursor->img.region[0] = (unsigned short)(atlas->custom.x + nk_cursor_data[i][0].x);
+ cursor->img.region[1] = (unsigned short)(atlas->custom.y + nk_cursor_data[i][0].y);
+ cursor->img.region[2] = (unsigned short)nk_cursor_data[i][1].x;
+ cursor->img.region[3] = (unsigned short)nk_cursor_data[i][1].y;
+ cursor->size = nk_cursor_data[i][1];
+ cursor->offset = nk_cursor_data[i][2];
+ }}
+ /* free temporary memory */
+ atlas->temporary.free(atlas->temporary.userdata, tmp);
+ return atlas->pixel;
+
+failed:
+ /* error so cleanup all memory */
+ if (tmp) atlas->temporary.free(atlas->temporary.userdata, tmp);
+ if (atlas->glyphs) {
+ atlas->permanent.free(atlas->permanent.userdata, atlas->glyphs);
+ atlas->glyphs = 0;
+ }
+ if (atlas->pixel) {
+ atlas->temporary.free(atlas->temporary.userdata, atlas->pixel);
+ atlas->pixel = 0;
+ }
+ return 0;
+}
+NK_API void
+nk_font_atlas_end(struct nk_font_atlas *atlas, nk_handle texture,
+ struct nk_draw_null_texture *null)
+{
+ int i = 0;
+ struct nk_font *font_iter;
+ NK_ASSERT(atlas);
+ if (!atlas) {
+ if (!null) return;
+ null->texture = texture;
+ null->uv = nk_vec2(0.5f,0.5f);
+ }
+ if (null) {
+ null->texture = texture;
+ null->uv.x = (atlas->custom.x + 0.5f)/(float)atlas->tex_width;
+ null->uv.y = (atlas->custom.y + 0.5f)/(float)atlas->tex_height;
+ }
+ for (font_iter = atlas->fonts; font_iter; font_iter = font_iter->next) {
+ font_iter->texture = texture;
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+ font_iter->handle.texture = texture;
+#endif
+ }
+ for (i = 0; i < NK_CURSOR_COUNT; ++i)
+ atlas->cursors[i].img.handle = texture;
+
+ atlas->temporary.free(atlas->temporary.userdata, atlas->pixel);
+ atlas->pixel = 0;
+ atlas->tex_width = 0;
+ atlas->tex_height = 0;
+ atlas->custom.x = 0;
+ atlas->custom.y = 0;
+ atlas->custom.w = 0;
+ atlas->custom.h = 0;
+}
+NK_API void
+nk_font_atlas_cleanup(struct nk_font_atlas *atlas)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+ if (!atlas || !atlas->permanent.alloc || !atlas->permanent.free) return;
+ if (atlas->config) {
+ struct nk_font_config *iter;
+ for (iter = atlas->config; iter; iter = iter->next) {
+ struct nk_font_config *i;
+ for (i = iter->n; i != iter; i = i->n) {
+ atlas->permanent.free(atlas->permanent.userdata, i->ttf_blob);
+ i->ttf_blob = 0;
+ }
+ atlas->permanent.free(atlas->permanent.userdata, iter->ttf_blob);
+ iter->ttf_blob = 0;
+ }
+ }
+}
+NK_API void
+nk_font_atlas_clear(struct nk_font_atlas *atlas)
+{
+ NK_ASSERT(atlas);
+ NK_ASSERT(atlas->temporary.alloc);
+ NK_ASSERT(atlas->temporary.free);
+ NK_ASSERT(atlas->permanent.alloc);
+ NK_ASSERT(atlas->permanent.free);
+ if (!atlas || !atlas->permanent.alloc || !atlas->permanent.free) return;
+
+ if (atlas->config) {
+ struct nk_font_config *iter, *next;
+ for (iter = atlas->config; iter; iter = next) {
+ struct nk_font_config *i, *n;
+ for (i = iter->n; i != iter; i = n) {
+ n = i->n;
+ if (i->ttf_blob)
+ atlas->permanent.free(atlas->permanent.userdata, i->ttf_blob);
+ atlas->permanent.free(atlas->permanent.userdata, i);
+ }
+ next = iter->next;
+ if (i->ttf_blob)
+ atlas->permanent.free(atlas->permanent.userdata, iter->ttf_blob);
+ atlas->permanent.free(atlas->permanent.userdata, iter);
+ }
+ atlas->config = 0;
+ }
+ if (atlas->fonts) {
+ struct nk_font *iter, *next;
+ for (iter = atlas->fonts; iter; iter = next) {
+ next = iter->next;
+ atlas->permanent.free(atlas->permanent.userdata, iter);
+ }
+ atlas->fonts = 0;
+ }
+ if (atlas->glyphs)
+ atlas->permanent.free(atlas->permanent.userdata, atlas->glyphs);
+ nk_zero_struct(*atlas);
+}
+#endif
+
+
+
+
+
+/* ===============================================================
+ *
+ * INPUT
+ *
+ * ===============================================================*/
+NK_API void
+nk_input_begin(struct nk_context *ctx)
+{
+ int i;
+ struct nk_input *in;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+ for (i = 0; i < NK_BUTTON_MAX; ++i)
+ in->mouse.buttons[i].clicked = 0;
+
+ in->keyboard.text_len = 0;
+ in->mouse.scroll_delta = nk_vec2(0,0);
+ in->mouse.prev.x = in->mouse.pos.x;
+ in->mouse.prev.y = in->mouse.pos.y;
+ in->mouse.delta.x = 0;
+ in->mouse.delta.y = 0;
+ for (i = 0; i < NK_KEY_MAX; i++)
+ in->keyboard.keys[i].clicked = 0;
+}
+NK_API void
+nk_input_end(struct nk_context *ctx)
+{
+ struct nk_input *in;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+ if (in->mouse.grab)
+ in->mouse.grab = 0;
+ if (in->mouse.ungrab) {
+ in->mouse.grabbed = 0;
+ in->mouse.ungrab = 0;
+ in->mouse.grab = 0;
+ }
+}
+NK_API void
+nk_input_motion(struct nk_context *ctx, int x, int y)
+{
+ struct nk_input *in;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+ in->mouse.pos.x = (float)x;
+ in->mouse.pos.y = (float)y;
+ in->mouse.delta.x = in->mouse.pos.x - in->mouse.prev.x;
+ in->mouse.delta.y = in->mouse.pos.y - in->mouse.prev.y;
+}
+NK_API void
+nk_input_key(struct nk_context *ctx, enum nk_keys key, int down)
+{
+ struct nk_input *in;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+#ifdef NK_KEYSTATE_BASED_INPUT
+ if (in->keyboard.keys[key].down != down)
+ in->keyboard.keys[key].clicked++;
+#else
+ in->keyboard.keys[key].clicked++;
+#endif
+ in->keyboard.keys[key].down = down;
+}
+NK_API void
+nk_input_button(struct nk_context *ctx, enum nk_buttons id, int x, int y, int down)
+{
+ struct nk_mouse_button *btn;
+ struct nk_input *in;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+ if (in->mouse.buttons[id].down == down) return;
+
+ btn = &in->mouse.buttons[id];
+ btn->clicked_pos.x = (float)x;
+ btn->clicked_pos.y = (float)y;
+ btn->down = down;
+ btn->clicked++;
+}
+NK_API void
+nk_input_scroll(struct nk_context *ctx, struct nk_vec2 val)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ ctx->input.mouse.scroll_delta.x += val.x;
+ ctx->input.mouse.scroll_delta.y += val.y;
+}
+NK_API void
+nk_input_glyph(struct nk_context *ctx, const nk_glyph glyph)
+{
+ int len = 0;
+ nk_rune unicode;
+ struct nk_input *in;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ in = &ctx->input;
+
+ len = nk_utf_decode(glyph, &unicode, NK_UTF_SIZE);
+ if (len && ((in->keyboard.text_len + len) < NK_INPUT_MAX)) {
+ nk_utf_encode(unicode, &in->keyboard.text[in->keyboard.text_len],
+ NK_INPUT_MAX - in->keyboard.text_len);
+ in->keyboard.text_len += len;
+ }
+}
+NK_API void
+nk_input_char(struct nk_context *ctx, char c)
+{
+ nk_glyph glyph;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ glyph[0] = c;
+ nk_input_glyph(ctx, glyph);
+}
+NK_API void
+nk_input_unicode(struct nk_context *ctx, nk_rune unicode)
+{
+ nk_glyph rune;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ nk_utf_encode(unicode, rune, NK_UTF_SIZE);
+ nk_input_glyph(ctx, rune);
+}
+NK_API int
+nk_input_has_mouse_click(const struct nk_input *i, enum nk_buttons id)
+{
+ const struct nk_mouse_button *btn;
+ if (!i) return nk_false;
+ btn = &i->mouse.buttons[id];
+ return (btn->clicked && btn->down == nk_false) ? nk_true : nk_false;
+}
+NK_API int
+nk_input_has_mouse_click_in_rect(const struct nk_input *i, enum nk_buttons id,
+ struct nk_rect b)
+{
+ const struct nk_mouse_button *btn;
+ if (!i) return nk_false;
+ btn = &i->mouse.buttons[id];
+ if (!NK_INBOX(btn->clicked_pos.x,btn->clicked_pos.y,b.x,b.y,b.w,b.h))
+ return nk_false;
+ return nk_true;
+}
+NK_API int
+nk_input_has_mouse_click_down_in_rect(const struct nk_input *i, enum nk_buttons id,
+ struct nk_rect b, int down)
+{
+ const struct nk_mouse_button *btn;
+ if (!i) return nk_false;
+ btn = &i->mouse.buttons[id];
+ return nk_input_has_mouse_click_in_rect(i, id, b) && (btn->down == down);
+}
+NK_API int
+nk_input_is_mouse_click_in_rect(const struct nk_input *i, enum nk_buttons id,
+ struct nk_rect b)
+{
+ const struct nk_mouse_button *btn;
+ if (!i) return nk_false;
+ btn = &i->mouse.buttons[id];
+ return (nk_input_has_mouse_click_down_in_rect(i, id, b, nk_false) &&
+ btn->clicked) ? nk_true : nk_false;
+}
+NK_API int
+nk_input_is_mouse_click_down_in_rect(const struct nk_input *i, enum nk_buttons id,
+ struct nk_rect b, int down)
+{
+ const struct nk_mouse_button *btn;
+ if (!i) return nk_false;
+ btn = &i->mouse.buttons[id];
+ return (nk_input_has_mouse_click_down_in_rect(i, id, b, down) &&
+ btn->clicked) ? nk_true : nk_false;
+}
+NK_API int
+nk_input_any_mouse_click_in_rect(const struct nk_input *in, struct nk_rect b)
+{
+ int i, down = 0;
+ for (i = 0; i < NK_BUTTON_MAX; ++i)
+ down = down || nk_input_is_mouse_click_in_rect(in, (enum nk_buttons)i, b);
+ return down;
+}
+NK_API int
+nk_input_is_mouse_hovering_rect(const struct nk_input *i, struct nk_rect rect)
+{
+ if (!i) return nk_false;
+ return NK_INBOX(i->mouse.pos.x, i->mouse.pos.y, rect.x, rect.y, rect.w, rect.h);
+}
+NK_API int
+nk_input_is_mouse_prev_hovering_rect(const struct nk_input *i, struct nk_rect rect)
+{
+ if (!i) return nk_false;
+ return NK_INBOX(i->mouse.prev.x, i->mouse.prev.y, rect.x, rect.y, rect.w, rect.h);
+}
+NK_API int
+nk_input_mouse_clicked(const struct nk_input *i, enum nk_buttons id, struct nk_rect rect)
+{
+ if (!i) return nk_false;
+ if (!nk_input_is_mouse_hovering_rect(i, rect)) return nk_false;
+ return nk_input_is_mouse_click_in_rect(i, id, rect);
+}
+NK_API int
+nk_input_is_mouse_down(const struct nk_input *i, enum nk_buttons id)
+{
+ if (!i) return nk_false;
+ return i->mouse.buttons[id].down;
+}
+NK_API int
+nk_input_is_mouse_pressed(const struct nk_input *i, enum nk_buttons id)
+{
+ const struct nk_mouse_button *b;
+ if (!i) return nk_false;
+ b = &i->mouse.buttons[id];
+ if (b->down && b->clicked)
+ return nk_true;
+ return nk_false;
+}
+NK_API int
+nk_input_is_mouse_released(const struct nk_input *i, enum nk_buttons id)
+{
+ if (!i) return nk_false;
+ return (!i->mouse.buttons[id].down && i->mouse.buttons[id].clicked);
+}
+NK_API int
+nk_input_is_key_pressed(const struct nk_input *i, enum nk_keys key)
+{
+ const struct nk_key *k;
+ if (!i) return nk_false;
+ k = &i->keyboard.keys[key];
+ if ((k->down && k->clicked) || (!k->down && k->clicked >= 2))
+ return nk_true;
+ return nk_false;
+}
+NK_API int
+nk_input_is_key_released(const struct nk_input *i, enum nk_keys key)
+{
+ const struct nk_key *k;
+ if (!i) return nk_false;
+ k = &i->keyboard.keys[key];
+ if ((!k->down && k->clicked) || (k->down && k->clicked >= 2))
+ return nk_true;
+ return nk_false;
+}
+NK_API int
+nk_input_is_key_down(const struct nk_input *i, enum nk_keys key)
+{
+ const struct nk_key *k;
+ if (!i) return nk_false;
+ k = &i->keyboard.keys[key];
+ if (k->down) return nk_true;
+ return nk_false;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * STYLE
+ *
+ * ===============================================================*/
+NK_API void nk_style_default(struct nk_context *ctx){nk_style_from_table(ctx, 0);}
+#define NK_COLOR_MAP(NK_COLOR)\
+ NK_COLOR(NK_COLOR_TEXT, 175,175,175,255) \
+ NK_COLOR(NK_COLOR_WINDOW, 45, 45, 45, 255) \
+ NK_COLOR(NK_COLOR_HEADER, 40, 40, 40, 255) \
+ NK_COLOR(NK_COLOR_BORDER, 65, 65, 65, 255) \
+ NK_COLOR(NK_COLOR_BUTTON, 50, 50, 50, 255) \
+ NK_COLOR(NK_COLOR_BUTTON_HOVER, 40, 40, 40, 255) \
+ NK_COLOR(NK_COLOR_BUTTON_ACTIVE, 35, 35, 35, 255) \
+ NK_COLOR(NK_COLOR_TOGGLE, 100,100,100,255) \
+ NK_COLOR(NK_COLOR_TOGGLE_HOVER, 120,120,120,255) \
+ NK_COLOR(NK_COLOR_TOGGLE_CURSOR, 45, 45, 45, 255) \
+ NK_COLOR(NK_COLOR_SELECT, 45, 45, 45, 255) \
+ NK_COLOR(NK_COLOR_SELECT_ACTIVE, 35, 35, 35,255) \
+ NK_COLOR(NK_COLOR_SLIDER, 38, 38, 38, 255) \
+ NK_COLOR(NK_COLOR_SLIDER_CURSOR, 100,100,100,255) \
+ NK_COLOR(NK_COLOR_SLIDER_CURSOR_HOVER, 120,120,120,255) \
+ NK_COLOR(NK_COLOR_SLIDER_CURSOR_ACTIVE, 150,150,150,255) \
+ NK_COLOR(NK_COLOR_PROPERTY, 38, 38, 38, 255) \
+ NK_COLOR(NK_COLOR_EDIT, 38, 38, 38, 255) \
+ NK_COLOR(NK_COLOR_EDIT_CURSOR, 175,175,175,255) \
+ NK_COLOR(NK_COLOR_COMBO, 45, 45, 45, 255) \
+ NK_COLOR(NK_COLOR_CHART, 120,120,120,255) \
+ NK_COLOR(NK_COLOR_CHART_COLOR, 45, 45, 45, 255) \
+ NK_COLOR(NK_COLOR_CHART_COLOR_HIGHLIGHT, 255, 0, 0, 255) \
+ NK_COLOR(NK_COLOR_SCROLLBAR, 40, 40, 40, 255) \
+ NK_COLOR(NK_COLOR_SCROLLBAR_CURSOR, 100,100,100,255) \
+ NK_COLOR(NK_COLOR_SCROLLBAR_CURSOR_HOVER, 120,120,120,255) \
+ NK_COLOR(NK_COLOR_SCROLLBAR_CURSOR_ACTIVE, 150,150,150,255) \
+ NK_COLOR(NK_COLOR_TAB_HEADER, 40, 40, 40,255)
+
+NK_GLOBAL const struct nk_color
+nk_default_color_style[NK_COLOR_COUNT] = {
+#define NK_COLOR(a,b,c,d,e) {b,c,d,e},
+ NK_COLOR_MAP(NK_COLOR)
+#undef NK_COLOR
+};
+NK_GLOBAL const char *nk_color_names[NK_COLOR_COUNT] = {
+#define NK_COLOR(a,b,c,d,e) #a,
+ NK_COLOR_MAP(NK_COLOR)
+#undef NK_COLOR
+};
+
+NK_API const char*
+nk_style_get_color_by_name(enum nk_style_colors c)
+{
+ return nk_color_names[c];
+}
+NK_API struct nk_style_item
+nk_style_item_image(struct nk_image img)
+{
+ struct nk_style_item i;
+ i.type = NK_STYLE_ITEM_IMAGE;
+ i.data.image = img;
+ return i;
+}
+NK_API struct nk_style_item
+nk_style_item_color(struct nk_color col)
+{
+ struct nk_style_item i;
+ i.type = NK_STYLE_ITEM_COLOR;
+ i.data.color = col;
+ return i;
+}
+NK_API struct nk_style_item
+nk_style_item_hide(void)
+{
+ struct nk_style_item i;
+ i.type = NK_STYLE_ITEM_COLOR;
+ i.data.color = nk_rgba(0,0,0,0);
+ return i;
+}
+NK_API void
+nk_style_from_table(struct nk_context *ctx, const struct nk_color *table)
+{
+ struct nk_style *style;
+ struct nk_style_text *text;
+ struct nk_style_button *button;
+ struct nk_style_toggle *toggle;
+ struct nk_style_selectable *select;
+ struct nk_style_slider *slider;
+ struct nk_style_progress *prog;
+ struct nk_style_scrollbar *scroll;
+ struct nk_style_edit *edit;
+ struct nk_style_property *property;
+ struct nk_style_combo *combo;
+ struct nk_style_chart *chart;
+ struct nk_style_tab *tab;
+ struct nk_style_window *win;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ style = &ctx->style;
+ table = (!table) ? nk_default_color_style: table;
+
+ /* default text */
+ text = &style->text;
+ text->color = table[NK_COLOR_TEXT];
+ text->padding = nk_vec2(0,0);
+
+ /* default button */
+ button = &style->button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_BUTTON]);
+ button->hover = nk_style_item_color(table[NK_COLOR_BUTTON_HOVER]);
+ button->active = nk_style_item_color(table[NK_COLOR_BUTTON_ACTIVE]);
+ button->border_color = table[NK_COLOR_BORDER];
+ button->text_background = table[NK_COLOR_BUTTON];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->image_padding = nk_vec2(0.0f,0.0f);
+ button->touch_padding = nk_vec2(0.0f, 0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 1.0f;
+ button->rounding = 4.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* contextual button */
+ button = &style->contextual_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->hover = nk_style_item_color(table[NK_COLOR_BUTTON_HOVER]);
+ button->active = nk_style_item_color(table[NK_COLOR_BUTTON_ACTIVE]);
+ button->border_color = table[NK_COLOR_WINDOW];
+ button->text_background = table[NK_COLOR_WINDOW];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* menu button */
+ button = &style->menu_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->hover = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->active = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->border_color = table[NK_COLOR_WINDOW];
+ button->text_background = table[NK_COLOR_WINDOW];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 1.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* checkbox toggle */
+ toggle = &style->checkbox;
+ nk_zero_struct(*toggle);
+ toggle->normal = nk_style_item_color(table[NK_COLOR_TOGGLE]);
+ toggle->hover = nk_style_item_color(table[NK_COLOR_TOGGLE_HOVER]);
+ toggle->active = nk_style_item_color(table[NK_COLOR_TOGGLE_HOVER]);
+ toggle->cursor_normal = nk_style_item_color(table[NK_COLOR_TOGGLE_CURSOR]);
+ toggle->cursor_hover = nk_style_item_color(table[NK_COLOR_TOGGLE_CURSOR]);
+ toggle->userdata = nk_handle_ptr(0);
+ toggle->text_background = table[NK_COLOR_WINDOW];
+ toggle->text_normal = table[NK_COLOR_TEXT];
+ toggle->text_hover = table[NK_COLOR_TEXT];
+ toggle->text_active = table[NK_COLOR_TEXT];
+ toggle->padding = nk_vec2(2.0f, 2.0f);
+ toggle->touch_padding = nk_vec2(0,0);
+ toggle->border_color = nk_rgba(0,0,0,0);
+ toggle->border = 0.0f;
+ toggle->spacing = 4;
+
+ /* option toggle */
+ toggle = &style->option;
+ nk_zero_struct(*toggle);
+ toggle->normal = nk_style_item_color(table[NK_COLOR_TOGGLE]);
+ toggle->hover = nk_style_item_color(table[NK_COLOR_TOGGLE_HOVER]);
+ toggle->active = nk_style_item_color(table[NK_COLOR_TOGGLE_HOVER]);
+ toggle->cursor_normal = nk_style_item_color(table[NK_COLOR_TOGGLE_CURSOR]);
+ toggle->cursor_hover = nk_style_item_color(table[NK_COLOR_TOGGLE_CURSOR]);
+ toggle->userdata = nk_handle_ptr(0);
+ toggle->text_background = table[NK_COLOR_WINDOW];
+ toggle->text_normal = table[NK_COLOR_TEXT];
+ toggle->text_hover = table[NK_COLOR_TEXT];
+ toggle->text_active = table[NK_COLOR_TEXT];
+ toggle->padding = nk_vec2(3.0f, 3.0f);
+ toggle->touch_padding = nk_vec2(0,0);
+ toggle->border_color = nk_rgba(0,0,0,0);
+ toggle->border = 0.0f;
+ toggle->spacing = 4;
+
+ /* selectable */
+ select = &style->selectable;
+ nk_zero_struct(*select);
+ select->normal = nk_style_item_color(table[NK_COLOR_SELECT]);
+ select->hover = nk_style_item_color(table[NK_COLOR_SELECT]);
+ select->pressed = nk_style_item_color(table[NK_COLOR_SELECT]);
+ select->normal_active = nk_style_item_color(table[NK_COLOR_SELECT_ACTIVE]);
+ select->hover_active = nk_style_item_color(table[NK_COLOR_SELECT_ACTIVE]);
+ select->pressed_active = nk_style_item_color(table[NK_COLOR_SELECT_ACTIVE]);
+ select->text_normal = table[NK_COLOR_TEXT];
+ select->text_hover = table[NK_COLOR_TEXT];
+ select->text_pressed = table[NK_COLOR_TEXT];
+ select->text_normal_active = table[NK_COLOR_TEXT];
+ select->text_hover_active = table[NK_COLOR_TEXT];
+ select->text_pressed_active = table[NK_COLOR_TEXT];
+ select->padding = nk_vec2(2.0f,2.0f);
+ select->image_padding = nk_vec2(2.0f,2.0f);
+ select->touch_padding = nk_vec2(0,0);
+ select->userdata = nk_handle_ptr(0);
+ select->rounding = 0.0f;
+ select->draw_begin = 0;
+ select->draw_end = 0;
+
+ /* slider */
+ slider = &style->slider;
+ nk_zero_struct(*slider);
+ slider->normal = nk_style_item_hide();
+ slider->hover = nk_style_item_hide();
+ slider->active = nk_style_item_hide();
+ slider->bar_normal = table[NK_COLOR_SLIDER];
+ slider->bar_hover = table[NK_COLOR_SLIDER];
+ slider->bar_active = table[NK_COLOR_SLIDER];
+ slider->bar_filled = table[NK_COLOR_SLIDER_CURSOR];
+ slider->cursor_normal = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR]);
+ slider->cursor_hover = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR_HOVER]);
+ slider->cursor_active = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR_ACTIVE]);
+ slider->inc_symbol = NK_SYMBOL_TRIANGLE_RIGHT;
+ slider->dec_symbol = NK_SYMBOL_TRIANGLE_LEFT;
+ slider->cursor_size = nk_vec2(16,16);
+ slider->padding = nk_vec2(2,2);
+ slider->spacing = nk_vec2(2,2);
+ slider->userdata = nk_handle_ptr(0);
+ slider->show_buttons = nk_false;
+ slider->bar_height = 8;
+ slider->rounding = 0;
+ slider->draw_begin = 0;
+ slider->draw_end = 0;
+
+ /* slider buttons */
+ button = &style->slider.inc_button;
+ button->normal = nk_style_item_color(nk_rgb(40,40,40));
+ button->hover = nk_style_item_color(nk_rgb(42,42,42));
+ button->active = nk_style_item_color(nk_rgb(44,44,44));
+ button->border_color = nk_rgb(65,65,65);
+ button->text_background = nk_rgb(40,40,40);
+ button->text_normal = nk_rgb(175,175,175);
+ button->text_hover = nk_rgb(175,175,175);
+ button->text_active = nk_rgb(175,175,175);
+ button->padding = nk_vec2(8.0f,8.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 1.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+ style->slider.dec_button = style->slider.inc_button;
+
+ /* progressbar */
+ prog = &style->progress;
+ nk_zero_struct(*prog);
+ prog->normal = nk_style_item_color(table[NK_COLOR_SLIDER]);
+ prog->hover = nk_style_item_color(table[NK_COLOR_SLIDER]);
+ prog->active = nk_style_item_color(table[NK_COLOR_SLIDER]);
+ prog->cursor_normal = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR]);
+ prog->cursor_hover = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR_HOVER]);
+ prog->cursor_active = nk_style_item_color(table[NK_COLOR_SLIDER_CURSOR_ACTIVE]);
+ prog->border_color = nk_rgba(0,0,0,0);
+ prog->cursor_border_color = nk_rgba(0,0,0,0);
+ prog->userdata = nk_handle_ptr(0);
+ prog->padding = nk_vec2(4,4);
+ prog->rounding = 0;
+ prog->border = 0;
+ prog->cursor_rounding = 0;
+ prog->cursor_border = 0;
+ prog->draw_begin = 0;
+ prog->draw_end = 0;
+
+ /* scrollbars */
+ scroll = &style->scrollh;
+ nk_zero_struct(*scroll);
+ scroll->normal = nk_style_item_color(table[NK_COLOR_SCROLLBAR]);
+ scroll->hover = nk_style_item_color(table[NK_COLOR_SCROLLBAR]);
+ scroll->active = nk_style_item_color(table[NK_COLOR_SCROLLBAR]);
+ scroll->cursor_normal = nk_style_item_color(table[NK_COLOR_SCROLLBAR_CURSOR]);
+ scroll->cursor_hover = nk_style_item_color(table[NK_COLOR_SCROLLBAR_CURSOR_HOVER]);
+ scroll->cursor_active = nk_style_item_color(table[NK_COLOR_SCROLLBAR_CURSOR_ACTIVE]);
+ scroll->dec_symbol = NK_SYMBOL_CIRCLE_SOLID;
+ scroll->inc_symbol = NK_SYMBOL_CIRCLE_SOLID;
+ scroll->userdata = nk_handle_ptr(0);
+ scroll->border_color = table[NK_COLOR_SCROLLBAR];
+ scroll->cursor_border_color = table[NK_COLOR_SCROLLBAR];
+ scroll->padding = nk_vec2(0,0);
+ scroll->show_buttons = nk_false;
+ scroll->border = 0;
+ scroll->rounding = 0;
+ scroll->border_cursor = 0;
+ scroll->rounding_cursor = 0;
+ scroll->draw_begin = 0;
+ scroll->draw_end = 0;
+ style->scrollv = style->scrollh;
+
+ /* scrollbars buttons */
+ button = &style->scrollh.inc_button;
+ button->normal = nk_style_item_color(nk_rgb(40,40,40));
+ button->hover = nk_style_item_color(nk_rgb(42,42,42));
+ button->active = nk_style_item_color(nk_rgb(44,44,44));
+ button->border_color = nk_rgb(65,65,65);
+ button->text_background = nk_rgb(40,40,40);
+ button->text_normal = nk_rgb(175,175,175);
+ button->text_hover = nk_rgb(175,175,175);
+ button->text_active = nk_rgb(175,175,175);
+ button->padding = nk_vec2(4.0f,4.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 1.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+ style->scrollh.dec_button = style->scrollh.inc_button;
+ style->scrollv.inc_button = style->scrollh.inc_button;
+ style->scrollv.dec_button = style->scrollh.inc_button;
+
+ /* edit */
+ edit = &style->edit;
+ nk_zero_struct(*edit);
+ edit->normal = nk_style_item_color(table[NK_COLOR_EDIT]);
+ edit->hover = nk_style_item_color(table[NK_COLOR_EDIT]);
+ edit->active = nk_style_item_color(table[NK_COLOR_EDIT]);
+ edit->cursor_normal = table[NK_COLOR_TEXT];
+ edit->cursor_hover = table[NK_COLOR_TEXT];
+ edit->cursor_text_normal= table[NK_COLOR_EDIT];
+ edit->cursor_text_hover = table[NK_COLOR_EDIT];
+ edit->border_color = table[NK_COLOR_BORDER];
+ edit->text_normal = table[NK_COLOR_TEXT];
+ edit->text_hover = table[NK_COLOR_TEXT];
+ edit->text_active = table[NK_COLOR_TEXT];
+ edit->selected_normal = table[NK_COLOR_TEXT];
+ edit->selected_hover = table[NK_COLOR_TEXT];
+ edit->selected_text_normal = table[NK_COLOR_EDIT];
+ edit->selected_text_hover = table[NK_COLOR_EDIT];
+ edit->scrollbar_size = nk_vec2(10,10);
+ edit->scrollbar = style->scrollv;
+ edit->padding = nk_vec2(4,4);
+ edit->row_padding = 2;
+ edit->cursor_size = 4;
+ edit->border = 1;
+ edit->rounding = 0;
+
+ /* property */
+ property = &style->property;
+ nk_zero_struct(*property);
+ property->normal = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ property->hover = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ property->active = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ property->border_color = table[NK_COLOR_BORDER];
+ property->label_normal = table[NK_COLOR_TEXT];
+ property->label_hover = table[NK_COLOR_TEXT];
+ property->label_active = table[NK_COLOR_TEXT];
+ property->sym_left = NK_SYMBOL_TRIANGLE_LEFT;
+ property->sym_right = NK_SYMBOL_TRIANGLE_RIGHT;
+ property->userdata = nk_handle_ptr(0);
+ property->padding = nk_vec2(4,4);
+ property->border = 1;
+ property->rounding = 10;
+ property->draw_begin = 0;
+ property->draw_end = 0;
+
+ /* property buttons */
+ button = &style->property.dec_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ button->hover = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ button->active = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_PROPERTY];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(0.0f,0.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+ style->property.inc_button = style->property.dec_button;
+
+ /* property edit */
+ edit = &style->property.edit;
+ nk_zero_struct(*edit);
+ edit->normal = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ edit->hover = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ edit->active = nk_style_item_color(table[NK_COLOR_PROPERTY]);
+ edit->border_color = nk_rgba(0,0,0,0);
+ edit->cursor_normal = table[NK_COLOR_TEXT];
+ edit->cursor_hover = table[NK_COLOR_TEXT];
+ edit->cursor_text_normal= table[NK_COLOR_EDIT];
+ edit->cursor_text_hover = table[NK_COLOR_EDIT];
+ edit->text_normal = table[NK_COLOR_TEXT];
+ edit->text_hover = table[NK_COLOR_TEXT];
+ edit->text_active = table[NK_COLOR_TEXT];
+ edit->selected_normal = table[NK_COLOR_TEXT];
+ edit->selected_hover = table[NK_COLOR_TEXT];
+ edit->selected_text_normal = table[NK_COLOR_EDIT];
+ edit->selected_text_hover = table[NK_COLOR_EDIT];
+ edit->padding = nk_vec2(0,0);
+ edit->cursor_size = 8;
+ edit->border = 0;
+ edit->rounding = 0;
+
+ /* chart */
+ chart = &style->chart;
+ nk_zero_struct(*chart);
+ chart->background = nk_style_item_color(table[NK_COLOR_CHART]);
+ chart->border_color = table[NK_COLOR_BORDER];
+ chart->selected_color = table[NK_COLOR_CHART_COLOR_HIGHLIGHT];
+ chart->color = table[NK_COLOR_CHART_COLOR];
+ chart->padding = nk_vec2(4,4);
+ chart->border = 0;
+ chart->rounding = 0;
+
+ /* combo */
+ combo = &style->combo;
+ combo->normal = nk_style_item_color(table[NK_COLOR_COMBO]);
+ combo->hover = nk_style_item_color(table[NK_COLOR_COMBO]);
+ combo->active = nk_style_item_color(table[NK_COLOR_COMBO]);
+ combo->border_color = table[NK_COLOR_BORDER];
+ combo->label_normal = table[NK_COLOR_TEXT];
+ combo->label_hover = table[NK_COLOR_TEXT];
+ combo->label_active = table[NK_COLOR_TEXT];
+ combo->sym_normal = NK_SYMBOL_TRIANGLE_DOWN;
+ combo->sym_hover = NK_SYMBOL_TRIANGLE_DOWN;
+ combo->sym_active = NK_SYMBOL_TRIANGLE_DOWN;
+ combo->content_padding = nk_vec2(4,4);
+ combo->button_padding = nk_vec2(0,4);
+ combo->spacing = nk_vec2(4,0);
+ combo->border = 1;
+ combo->rounding = 0;
+
+ /* combo button */
+ button = &style->combo.button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_COMBO]);
+ button->hover = nk_style_item_color(table[NK_COLOR_COMBO]);
+ button->active = nk_style_item_color(table[NK_COLOR_COMBO]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_COMBO];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* tab */
+ tab = &style->tab;
+ tab->background = nk_style_item_color(table[NK_COLOR_TAB_HEADER]);
+ tab->border_color = table[NK_COLOR_BORDER];
+ tab->text = table[NK_COLOR_TEXT];
+ tab->sym_minimize = NK_SYMBOL_TRIANGLE_RIGHT;
+ tab->sym_maximize = NK_SYMBOL_TRIANGLE_DOWN;
+ tab->padding = nk_vec2(4,4);
+ tab->spacing = nk_vec2(4,4);
+ tab->indent = 10.0f;
+ tab->border = 1;
+ tab->rounding = 0;
+
+ /* tab button */
+ button = &style->tab.tab_minimize_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_TAB_HEADER]);
+ button->hover = nk_style_item_color(table[NK_COLOR_TAB_HEADER]);
+ button->active = nk_style_item_color(table[NK_COLOR_TAB_HEADER]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_TAB_HEADER];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+ style->tab.tab_maximize_button =*button;
+
+ /* node button */
+ button = &style->tab.node_minimize_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->hover = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->active = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_TAB_HEADER];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(2.0f,2.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+ style->tab.node_maximize_button =*button;
+
+ /* window header */
+ win = &style->window;
+ win->header.align = NK_HEADER_RIGHT;
+ win->header.close_symbol = NK_SYMBOL_X;
+ win->header.minimize_symbol = NK_SYMBOL_MINUS;
+ win->header.maximize_symbol = NK_SYMBOL_PLUS;
+ win->header.normal = nk_style_item_color(table[NK_COLOR_HEADER]);
+ win->header.hover = nk_style_item_color(table[NK_COLOR_HEADER]);
+ win->header.active = nk_style_item_color(table[NK_COLOR_HEADER]);
+ win->header.label_normal = table[NK_COLOR_TEXT];
+ win->header.label_hover = table[NK_COLOR_TEXT];
+ win->header.label_active = table[NK_COLOR_TEXT];
+ win->header.label_padding = nk_vec2(4,4);
+ win->header.padding = nk_vec2(4,4);
+ win->header.spacing = nk_vec2(0,0);
+
+ /* window header close button */
+ button = &style->window.header.close_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->hover = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->active = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_HEADER];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(0.0f,0.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* window header minimize button */
+ button = &style->window.header.minimize_button;
+ nk_zero_struct(*button);
+ button->normal = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->hover = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->active = nk_style_item_color(table[NK_COLOR_HEADER]);
+ button->border_color = nk_rgba(0,0,0,0);
+ button->text_background = table[NK_COLOR_HEADER];
+ button->text_normal = table[NK_COLOR_TEXT];
+ button->text_hover = table[NK_COLOR_TEXT];
+ button->text_active = table[NK_COLOR_TEXT];
+ button->padding = nk_vec2(0.0f,0.0f);
+ button->touch_padding = nk_vec2(0.0f,0.0f);
+ button->userdata = nk_handle_ptr(0);
+ button->text_alignment = NK_TEXT_CENTERED;
+ button->border = 0.0f;
+ button->rounding = 0.0f;
+ button->draw_begin = 0;
+ button->draw_end = 0;
+
+ /* window */
+ win->background = table[NK_COLOR_WINDOW];
+ win->fixed_background = nk_style_item_color(table[NK_COLOR_WINDOW]);
+ win->border_color = table[NK_COLOR_BORDER];
+ win->popup_border_color = table[NK_COLOR_BORDER];
+ win->combo_border_color = table[NK_COLOR_BORDER];
+ win->contextual_border_color = table[NK_COLOR_BORDER];
+ win->menu_border_color = table[NK_COLOR_BORDER];
+ win->group_border_color = table[NK_COLOR_BORDER];
+ win->tooltip_border_color = table[NK_COLOR_BORDER];
+ win->scaler = nk_style_item_color(table[NK_COLOR_TEXT]);
+
+ win->rounding = 0.0f;
+ win->spacing = nk_vec2(4,4);
+ win->scrollbar_size = nk_vec2(10,10);
+ win->min_size = nk_vec2(64,64);
+
+ win->combo_border = 1.0f;
+ win->contextual_border = 1.0f;
+ win->menu_border = 1.0f;
+ win->group_border = 1.0f;
+ win->tooltip_border = 1.0f;
+ win->popup_border = 1.0f;
+ win->border = 2.0f;
+ win->min_row_height_padding = 8;
+
+ win->padding = nk_vec2(4,4);
+ win->group_padding = nk_vec2(4,4);
+ win->popup_padding = nk_vec2(4,4);
+ win->combo_padding = nk_vec2(4,4);
+ win->contextual_padding = nk_vec2(4,4);
+ win->menu_padding = nk_vec2(4,4);
+ win->tooltip_padding = nk_vec2(4,4);
+}
+NK_API void
+nk_style_set_font(struct nk_context *ctx, const struct nk_user_font *font)
+{
+ struct nk_style *style;
+ NK_ASSERT(ctx);
+
+ if (!ctx) return;
+ style = &ctx->style;
+ style->font = font;
+ ctx->stacks.fonts.head = 0;
+ if (ctx->current)
+ nk_layout_reset_min_row_height(ctx);
+}
+NK_API int
+nk_style_push_font(struct nk_context *ctx, const struct nk_user_font *font)
+{
+ struct nk_config_stack_user_font *font_stack;
+ struct nk_config_stack_user_font_element *element;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ font_stack = &ctx->stacks.fonts;
+ NK_ASSERT(font_stack->head < (int)NK_LEN(font_stack->elements));
+ if (font_stack->head >= (int)NK_LEN(font_stack->elements))
+ return 0;
+
+ element = &font_stack->elements[font_stack->head++];
+ element->address = &ctx->style.font;
+ element->old_value = ctx->style.font;
+ ctx->style.font = font;
+ return 1;
+}
+NK_API int
+nk_style_pop_font(struct nk_context *ctx)
+{
+ struct nk_config_stack_user_font *font_stack;
+ struct nk_config_stack_user_font_element *element;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ font_stack = &ctx->stacks.fonts;
+ NK_ASSERT(font_stack->head > 0);
+ if (font_stack->head < 1)
+ return 0;
+
+ element = &font_stack->elements[--font_stack->head];
+ *element->address = element->old_value;
+ return 1;
+}
+#define NK_STYLE_PUSH_IMPLEMENATION(prefix, type, stack) \
+nk_style_push_##type(struct nk_context *ctx, prefix##_##type *address, prefix##_##type value)\
+{\
+ struct nk_config_stack_##type * type_stack;\
+ struct nk_config_stack_##type##_element *element;\
+ NK_ASSERT(ctx);\
+ if (!ctx) return 0;\
+ type_stack = &ctx->stacks.stack;\
+ NK_ASSERT(type_stack->head < (int)NK_LEN(type_stack->elements));\
+ if (type_stack->head >= (int)NK_LEN(type_stack->elements))\
+ return 0;\
+ element = &type_stack->elements[type_stack->head++];\
+ element->address = address;\
+ element->old_value = *address;\
+ *address = value;\
+ return 1;\
+}
+#define NK_STYLE_POP_IMPLEMENATION(type, stack) \
+nk_style_pop_##type(struct nk_context *ctx)\
+{\
+ struct nk_config_stack_##type *type_stack;\
+ struct nk_config_stack_##type##_element *element;\
+ NK_ASSERT(ctx);\
+ if (!ctx) return 0;\
+ type_stack = &ctx->stacks.stack;\
+ NK_ASSERT(type_stack->head > 0);\
+ if (type_stack->head < 1)\
+ return 0;\
+ element = &type_stack->elements[--type_stack->head];\
+ *element->address = element->old_value;\
+ return 1;\
+}
+NK_API int NK_STYLE_PUSH_IMPLEMENATION(struct nk, style_item, style_items)
+NK_API int NK_STYLE_PUSH_IMPLEMENATION(nk,float, floats)
+NK_API int NK_STYLE_PUSH_IMPLEMENATION(struct nk, vec2, vectors)
+NK_API int NK_STYLE_PUSH_IMPLEMENATION(nk,flags, flags)
+NK_API int NK_STYLE_PUSH_IMPLEMENATION(struct nk,color, colors)
+
+NK_API int NK_STYLE_POP_IMPLEMENATION(style_item, style_items)
+NK_API int NK_STYLE_POP_IMPLEMENATION(float,floats)
+NK_API int NK_STYLE_POP_IMPLEMENATION(vec2, vectors)
+NK_API int NK_STYLE_POP_IMPLEMENATION(flags,flags)
+NK_API int NK_STYLE_POP_IMPLEMENATION(color,colors)
+
+NK_API int
+nk_style_set_cursor(struct nk_context *ctx, enum nk_style_cursor c)
+{
+ struct nk_style *style;
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ style = &ctx->style;
+ if (style->cursors[c]) {
+ style->cursor_active = style->cursors[c];
+ return 1;
+ }
+ return 0;
+}
+NK_API void
+nk_style_show_cursor(struct nk_context *ctx)
+{
+ ctx->style.cursor_visible = nk_true;
+}
+NK_API void
+nk_style_hide_cursor(struct nk_context *ctx)
+{
+ ctx->style.cursor_visible = nk_false;
+}
+NK_API void
+nk_style_load_cursor(struct nk_context *ctx, enum nk_style_cursor cursor,
+ const struct nk_cursor *c)
+{
+ struct nk_style *style;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ style = &ctx->style;
+ style->cursors[cursor] = c;
+}
+NK_API void
+nk_style_load_all_cursors(struct nk_context *ctx, struct nk_cursor *cursors)
+{
+ int i = 0;
+ struct nk_style *style;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ style = &ctx->style;
+ for (i = 0; i < NK_CURSOR_COUNT; ++i)
+ style->cursors[i] = &cursors[i];
+ style->cursor_visible = nk_true;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * CONTEXT
+ *
+ * ===============================================================*/
+NK_INTERN void
+nk_setup(struct nk_context *ctx, const struct nk_user_font *font)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ nk_zero_struct(*ctx);
+ nk_style_default(ctx);
+ ctx->seq = 1;
+ if (font) ctx->style.font = font;
+#ifdef NK_INCLUDE_VERTEX_BUFFER_OUTPUT
+ nk_draw_list_init(&ctx->draw_list);
+#endif
+}
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API int
+nk_init_default(struct nk_context *ctx, const struct nk_user_font *font)
+{
+ struct nk_allocator alloc;
+ alloc.userdata.ptr = 0;
+ alloc.alloc = nk_malloc;
+ alloc.free = nk_mfree;
+ return nk_init(ctx, &alloc, font);
+}
+#endif
+NK_API int
+nk_init_fixed(struct nk_context *ctx, void *memory, nk_size size,
+ const struct nk_user_font *font)
+{
+ NK_ASSERT(memory);
+ if (!memory) return 0;
+ nk_setup(ctx, font);
+ nk_buffer_init_fixed(&ctx->memory, memory, size);
+ ctx->use_pool = nk_false;
+ return 1;
+}
+NK_API int
+nk_init_custom(struct nk_context *ctx, struct nk_buffer *cmds,
+ struct nk_buffer *pool, const struct nk_user_font *font)
+{
+ NK_ASSERT(cmds);
+ NK_ASSERT(pool);
+ if (!cmds || !pool) return 0;
+
+ nk_setup(ctx, font);
+ ctx->memory = *cmds;
+ if (pool->type == NK_BUFFER_FIXED) {
+ /* take memory from buffer and alloc fixed pool */
+ nk_pool_init_fixed(&ctx->pool, pool->memory.ptr, pool->memory.size);
+ } else {
+ /* create dynamic pool from buffer allocator */
+ struct nk_allocator *alloc = &pool->pool;
+ nk_pool_init(&ctx->pool, alloc, NK_POOL_DEFAULT_CAPACITY);
+ }
+ ctx->use_pool = nk_true;
+ return 1;
+}
+NK_API int
+nk_init(struct nk_context *ctx, struct nk_allocator *alloc,
+ const struct nk_user_font *font)
+{
+ NK_ASSERT(alloc);
+ if (!alloc) return 0;
+ nk_setup(ctx, font);
+ nk_buffer_init(&ctx->memory, alloc, NK_DEFAULT_COMMAND_BUFFER_SIZE);
+ nk_pool_init(&ctx->pool, alloc, NK_POOL_DEFAULT_CAPACITY);
+ ctx->use_pool = nk_true;
+ return 1;
+}
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+NK_API void
+nk_set_user_data(struct nk_context *ctx, nk_handle handle)
+{
+ if (!ctx) return;
+ ctx->userdata = handle;
+ if (ctx->current)
+ ctx->current->buffer.userdata = handle;
+}
+#endif
+NK_API void
+nk_free(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ nk_buffer_free(&ctx->memory);
+ if (ctx->use_pool)
+ nk_pool_free(&ctx->pool);
+
+ nk_zero(&ctx->input, sizeof(ctx->input));
+ nk_zero(&ctx->style, sizeof(ctx->style));
+ nk_zero(&ctx->memory, sizeof(ctx->memory));
+
+ ctx->seq = 0;
+ ctx->build = 0;
+ ctx->begin = 0;
+ ctx->end = 0;
+ ctx->active = 0;
+ ctx->current = 0;
+ ctx->freelist = 0;
+ ctx->count = 0;
+}
+NK_API void
+nk_clear(struct nk_context *ctx)
+{
+ struct nk_window *iter;
+ struct nk_window *next;
+ NK_ASSERT(ctx);
+
+ if (!ctx) return;
+ if (ctx->use_pool)
+ nk_buffer_clear(&ctx->memory);
+ else nk_buffer_reset(&ctx->memory, NK_BUFFER_FRONT);
+
+ ctx->build = 0;
+ ctx->memory.calls = 0;
+ ctx->last_widget_state = 0;
+ ctx->style.cursor_active = ctx->style.cursors[NK_CURSOR_ARROW];
+ NK_MEMSET(&ctx->overlay, 0, sizeof(ctx->overlay));
+
+ /* garbage collector */
+ iter = ctx->begin;
+ while (iter) {
+ /* make sure valid minimized windows do not get removed */
+ if ((iter->flags & NK_WINDOW_MINIMIZED) &&
+ !(iter->flags & NK_WINDOW_CLOSED) &&
+ iter->seq == ctx->seq) {
+ iter = iter->next;
+ continue;
+ }
+ /* remove hotness from hidden or closed windows*/
+ if (((iter->flags & NK_WINDOW_HIDDEN) ||
+ (iter->flags & NK_WINDOW_CLOSED)) &&
+ iter == ctx->active) {
+ ctx->active = iter->prev;
+ ctx->end = iter->prev;
+ if (!ctx->end)
+ ctx->begin = 0;
+ if (ctx->active)
+ ctx->active->flags &= ~(unsigned)NK_WINDOW_ROM;
+ }
+ /* free unused popup windows */
+ if (iter->popup.win && iter->popup.win->seq != ctx->seq) {
+ nk_free_window(ctx, iter->popup.win);
+ iter->popup.win = 0;
+ }
+ /* remove unused window state tables */
+ {struct nk_table *n, *it = iter->tables;
+ while (it) {
+ n = it->next;
+ if (it->seq != ctx->seq) {
+ nk_remove_table(iter, it);
+ nk_zero(it, sizeof(union nk_page_data));
+ nk_free_table(ctx, it);
+ if (it == iter->tables)
+ iter->tables = n;
+ } it = n;
+ }}
+ /* window itself is not used anymore so free */
+ if (iter->seq != ctx->seq || iter->flags & NK_WINDOW_CLOSED) {
+ next = iter->next;
+ nk_remove_window(ctx, iter);
+ nk_free_window(ctx, iter);
+ iter = next;
+ } else iter = iter->next;
+ }
+ ctx->seq++;
+}
+NK_LIB void
+nk_start_buffer(struct nk_context *ctx, struct nk_command_buffer *buffer)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(buffer);
+ if (!ctx || !buffer) return;
+ buffer->begin = ctx->memory.allocated;
+ buffer->end = buffer->begin;
+ buffer->last = buffer->begin;
+ buffer->clip = nk_null_rect;
+}
+NK_LIB void
+nk_start(struct nk_context *ctx, struct nk_window *win)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ nk_start_buffer(ctx, &win->buffer);
+}
+NK_LIB void
+nk_start_popup(struct nk_context *ctx, struct nk_window *win)
+{
+ struct nk_popup_buffer *buf;
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ if (!ctx || !win) return;
+
+ /* save buffer fill state for popup */
+ buf = &win->popup.buf;
+ buf->begin = win->buffer.end;
+ buf->end = win->buffer.end;
+ buf->parent = win->buffer.last;
+ buf->last = buf->begin;
+ buf->active = nk_true;
+}
+NK_LIB void
+nk_finish_popup(struct nk_context *ctx, struct nk_window *win)
+{
+ struct nk_popup_buffer *buf;
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ if (!ctx || !win) return;
+
+ buf = &win->popup.buf;
+ buf->last = win->buffer.last;
+ buf->end = win->buffer.end;
+}
+NK_LIB void
+nk_finish_buffer(struct nk_context *ctx, struct nk_command_buffer *buffer)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(buffer);
+ if (!ctx || !buffer) return;
+ buffer->end = ctx->memory.allocated;
+}
+NK_LIB void
+nk_finish(struct nk_context *ctx, struct nk_window *win)
+{
+ struct nk_popup_buffer *buf;
+ struct nk_command *parent_last;
+ void *memory;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ if (!ctx || !win) return;
+ nk_finish_buffer(ctx, &win->buffer);
+ if (!win->popup.buf.active) return;
+
+ buf = &win->popup.buf;
+ memory = ctx->memory.memory.ptr;
+ parent_last = nk_ptr_add(struct nk_command, memory, buf->parent);
+ parent_last->next = buf->end;
+}
+NK_LIB void
+nk_build(struct nk_context *ctx)
+{
+ struct nk_window *it = 0;
+ struct nk_command *cmd = 0;
+ nk_byte *buffer = 0;
+
+ /* draw cursor overlay */
+ if (!ctx->style.cursor_active)
+ ctx->style.cursor_active = ctx->style.cursors[NK_CURSOR_ARROW];
+ if (ctx->style.cursor_active && !ctx->input.mouse.grabbed && ctx->style.cursor_visible) {
+ struct nk_rect mouse_bounds;
+ const struct nk_cursor *cursor = ctx->style.cursor_active;
+ nk_command_buffer_init(&ctx->overlay, &ctx->memory, NK_CLIPPING_OFF);
+ nk_start_buffer(ctx, &ctx->overlay);
+
+ mouse_bounds.x = ctx->input.mouse.pos.x - cursor->offset.x;
+ mouse_bounds.y = ctx->input.mouse.pos.y - cursor->offset.y;
+ mouse_bounds.w = cursor->size.x;
+ mouse_bounds.h = cursor->size.y;
+
+ nk_draw_image(&ctx->overlay, mouse_bounds, &cursor->img, nk_white);
+ nk_finish_buffer(ctx, &ctx->overlay);
+ }
+ /* build one big draw command list out of all window buffers */
+ it = ctx->begin;
+ buffer = (nk_byte*)ctx->memory.memory.ptr;
+ while (it != 0) {
+ struct nk_window *next = it->next;
+ if (it->buffer.last == it->buffer.begin || (it->flags & NK_WINDOW_HIDDEN)||
+ it->seq != ctx->seq)
+ goto cont;
+
+ cmd = nk_ptr_add(struct nk_command, buffer, it->buffer.last);
+ while (next && ((next->buffer.last == next->buffer.begin) ||
+ (next->flags & NK_WINDOW_HIDDEN) || next->seq != ctx->seq))
+ next = next->next; /* skip empty command buffers */
+
+ if (next) cmd->next = next->buffer.begin;
+ cont: it = next;
+ }
+ /* append all popup draw commands into lists */
+ it = ctx->begin;
+ while (it != 0) {
+ struct nk_window *next = it->next;
+ struct nk_popup_buffer *buf;
+ if (!it->popup.buf.active)
+ goto skip;
+
+ buf = &it->popup.buf;
+ cmd->next = buf->begin;
+ cmd = nk_ptr_add(struct nk_command, buffer, buf->last);
+ buf->active = nk_false;
+ skip: it = next;
+ }
+ if (cmd) {
+ /* append overlay commands */
+ if (ctx->overlay.end != ctx->overlay.begin)
+ cmd->next = ctx->overlay.begin;
+ else cmd->next = ctx->memory.allocated;
+ }
+}
+NK_API const struct nk_command*
+nk__begin(struct nk_context *ctx)
+{
+ struct nk_window *iter;
+ nk_byte *buffer;
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ if (!ctx->count) return 0;
+
+ buffer = (nk_byte*)ctx->memory.memory.ptr;
+ if (!ctx->build) {
+ nk_build(ctx);
+ ctx->build = nk_true;
+ }
+ iter = ctx->begin;
+ while (iter && ((iter->buffer.begin == iter->buffer.end) ||
+ (iter->flags & NK_WINDOW_HIDDEN) || iter->seq != ctx->seq))
+ iter = iter->next;
+ if (!iter) return 0;
+ return nk_ptr_add_const(struct nk_command, buffer, iter->buffer.begin);
+}
+
+NK_API const struct nk_command*
+nk__next(struct nk_context *ctx, const struct nk_command *cmd)
+{
+ nk_byte *buffer;
+ const struct nk_command *next;
+ NK_ASSERT(ctx);
+ if (!ctx || !cmd || !ctx->count) return 0;
+ if (cmd->next >= ctx->memory.allocated) return 0;
+ buffer = (nk_byte*)ctx->memory.memory.ptr;
+ next = nk_ptr_add_const(struct nk_command, buffer, cmd->next);
+ return next;
+}
+
+
+
+
+
+
+/* ===============================================================
+ *
+ * POOL
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_pool_init(struct nk_pool *pool, struct nk_allocator *alloc,
+ unsigned int capacity)
+{
+ nk_zero(pool, sizeof(*pool));
+ pool->alloc = *alloc;
+ pool->capacity = capacity;
+ pool->type = NK_BUFFER_DYNAMIC;
+ pool->pages = 0;
+}
+NK_LIB void
+nk_pool_free(struct nk_pool *pool)
+{
+ struct nk_page *iter = pool->pages;
+ if (!pool) return;
+ if (pool->type == NK_BUFFER_FIXED) return;
+ while (iter) {
+ struct nk_page *next = iter->next;
+ pool->alloc.free(pool->alloc.userdata, iter);
+ iter = next;
+ }
+}
+NK_LIB void
+nk_pool_init_fixed(struct nk_pool *pool, void *memory, nk_size size)
+{
+ nk_zero(pool, sizeof(*pool));
+ NK_ASSERT(size >= sizeof(struct nk_page));
+ if (size < sizeof(struct nk_page)) return;
+ pool->capacity = (unsigned)(size - sizeof(struct nk_page)) / sizeof(struct nk_page_element);
+ pool->pages = (struct nk_page*)memory;
+ pool->type = NK_BUFFER_FIXED;
+ pool->size = size;
+}
+NK_LIB struct nk_page_element*
+nk_pool_alloc(struct nk_pool *pool)
+{
+ if (!pool->pages || pool->pages->size >= pool->capacity) {
+ /* allocate new page */
+ struct nk_page *page;
+ if (pool->type == NK_BUFFER_FIXED) {
+ NK_ASSERT(pool->pages);
+ if (!pool->pages) return 0;
+ NK_ASSERT(pool->pages->size < pool->capacity);
+ return 0;
+ } else {
+ nk_size size = sizeof(struct nk_page);
+ size += NK_POOL_DEFAULT_CAPACITY * sizeof(union nk_page_data);
+ page = (struct nk_page*)pool->alloc.alloc(pool->alloc.userdata,0, size);
+ page->next = pool->pages;
+ pool->pages = page;
+ page->size = 0;
+ }
+ } return &pool->pages->win[pool->pages->size++];
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * PAGE ELEMENT
+ *
+ * ===============================================================*/
+NK_LIB struct nk_page_element*
+nk_create_page_element(struct nk_context *ctx)
+{
+ struct nk_page_element *elem;
+ if (ctx->freelist) {
+ /* unlink page element from free list */
+ elem = ctx->freelist;
+ ctx->freelist = elem->next;
+ } else if (ctx->use_pool) {
+ /* allocate page element from memory pool */
+ elem = nk_pool_alloc(&ctx->pool);
+ NK_ASSERT(elem);
+ if (!elem) return 0;
+ } else {
+ /* allocate new page element from back of fixed size memory buffer */
+ NK_STORAGE const nk_size size = sizeof(struct nk_page_element);
+ NK_STORAGE const nk_size align = NK_ALIGNOF(struct nk_page_element);
+ elem = (struct nk_page_element*)nk_buffer_alloc(&ctx->memory, NK_BUFFER_BACK, size, align);
+ NK_ASSERT(elem);
+ if (!elem) return 0;
+ }
+ nk_zero_struct(*elem);
+ elem->next = 0;
+ elem->prev = 0;
+ return elem;
+}
+NK_LIB void
+nk_link_page_element_into_freelist(struct nk_context *ctx,
+ struct nk_page_element *elem)
+{
+ /* link table into freelist */
+ if (!ctx->freelist) {
+ ctx->freelist = elem;
+ } else {
+ elem->next = ctx->freelist;
+ ctx->freelist = elem;
+ }
+}
+NK_LIB void
+nk_free_page_element(struct nk_context *ctx, struct nk_page_element *elem)
+{
+ /* we have a pool so just add to free list */
+ if (ctx->use_pool) {
+ nk_link_page_element_into_freelist(ctx, elem);
+ return;
+ }
+ /* if possible remove last element from back of fixed memory buffer */
+ {void *elem_end = (void*)(elem + 1);
+ void *buffer_end = (nk_byte*)ctx->memory.memory.ptr + ctx->memory.size;
+ if (elem_end == buffer_end)
+ ctx->memory.size -= sizeof(struct nk_page_element);
+ else nk_link_page_element_into_freelist(ctx, elem);}
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TABLE
+ *
+ * ===============================================================*/
+NK_LIB struct nk_table*
+nk_create_table(struct nk_context *ctx)
+{
+ struct nk_page_element *elem;
+ elem = nk_create_page_element(ctx);
+ if (!elem) return 0;
+ nk_zero_struct(*elem);
+ return &elem->data.tbl;
+}
+NK_LIB void
+nk_free_table(struct nk_context *ctx, struct nk_table *tbl)
+{
+ union nk_page_data *pd = NK_CONTAINER_OF(tbl, union nk_page_data, tbl);
+ struct nk_page_element *pe = NK_CONTAINER_OF(pd, struct nk_page_element, data);
+ nk_free_page_element(ctx, pe);
+}
+NK_LIB void
+nk_push_table(struct nk_window *win, struct nk_table *tbl)
+{
+ if (!win->tables) {
+ win->tables = tbl;
+ tbl->next = 0;
+ tbl->prev = 0;
+ tbl->size = 0;
+ win->table_count = 1;
+ return;
+ }
+ win->tables->prev = tbl;
+ tbl->next = win->tables;
+ tbl->prev = 0;
+ tbl->size = 0;
+ win->tables = tbl;
+ win->table_count++;
+}
+NK_LIB void
+nk_remove_table(struct nk_window *win, struct nk_table *tbl)
+{
+ if (win->tables == tbl)
+ win->tables = tbl->next;
+ if (tbl->next)
+ tbl->next->prev = tbl->prev;
+ if (tbl->prev)
+ tbl->prev->next = tbl->next;
+ tbl->next = 0;
+ tbl->prev = 0;
+}
+NK_LIB nk_uint*
+nk_add_value(struct nk_context *ctx, struct nk_window *win,
+ nk_hash name, nk_uint value)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ if (!win || !ctx) return 0;
+ if (!win->tables || win->tables->size >= NK_VALUE_PAGE_CAPACITY) {
+ struct nk_table *tbl = nk_create_table(ctx);
+ NK_ASSERT(tbl);
+ if (!tbl) return 0;
+ nk_push_table(win, tbl);
+ }
+ win->tables->seq = win->seq;
+ win->tables->keys[win->tables->size] = name;
+ win->tables->values[win->tables->size] = value;
+ return &win->tables->values[win->tables->size++];
+}
+NK_LIB nk_uint*
+nk_find_value(struct nk_window *win, nk_hash name)
+{
+ struct nk_table *iter = win->tables;
+ while (iter) {
+ unsigned int i = 0;
+ unsigned int size = iter->size;
+ for (i = 0; i < size; ++i) {
+ if (iter->keys[i] == name) {
+ iter->seq = win->seq;
+ return &iter->values[i];
+ }
+ } size = NK_VALUE_PAGE_CAPACITY;
+ iter = iter->next;
+ }
+ return 0;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * PANEL
+ *
+ * ===============================================================*/
+NK_LIB void*
+nk_create_panel(struct nk_context *ctx)
+{
+ struct nk_page_element *elem;
+ elem = nk_create_page_element(ctx);
+ if (!elem) return 0;
+ nk_zero_struct(*elem);
+ return &elem->data.pan;
+}
+NK_LIB void
+nk_free_panel(struct nk_context *ctx, struct nk_panel *pan)
+{
+ union nk_page_data *pd = NK_CONTAINER_OF(pan, union nk_page_data, pan);
+ struct nk_page_element *pe = NK_CONTAINER_OF(pd, struct nk_page_element, data);
+ nk_free_page_element(ctx, pe);
+}
+NK_LIB int
+nk_panel_has_header(nk_flags flags, const char *title)
+{
+ int active = 0;
+ active = (flags & (NK_WINDOW_CLOSABLE|NK_WINDOW_MINIMIZABLE));
+ active = active || (flags & NK_WINDOW_TITLE);
+ active = active && !(flags & NK_WINDOW_HIDDEN) && title;
+ return active;
+}
+NK_LIB struct nk_vec2
+nk_panel_get_padding(const struct nk_style *style, enum nk_panel_type type)
+{
+ switch (type) {
+ default:
+ case NK_PANEL_WINDOW: return style->window.padding;
+ case NK_PANEL_GROUP: return style->window.group_padding;
+ case NK_PANEL_POPUP: return style->window.popup_padding;
+ case NK_PANEL_CONTEXTUAL: return style->window.contextual_padding;
+ case NK_PANEL_COMBO: return style->window.combo_padding;
+ case NK_PANEL_MENU: return style->window.menu_padding;
+ case NK_PANEL_TOOLTIP: return style->window.menu_padding;}
+}
+NK_LIB float
+nk_panel_get_border(const struct nk_style *style, nk_flags flags,
+ enum nk_panel_type type)
+{
+ if (flags & NK_WINDOW_BORDER) {
+ switch (type) {
+ default:
+ case NK_PANEL_WINDOW: return style->window.border;
+ case NK_PANEL_GROUP: return style->window.group_border;
+ case NK_PANEL_POPUP: return style->window.popup_border;
+ case NK_PANEL_CONTEXTUAL: return style->window.contextual_border;
+ case NK_PANEL_COMBO: return style->window.combo_border;
+ case NK_PANEL_MENU: return style->window.menu_border;
+ case NK_PANEL_TOOLTIP: return style->window.menu_border;
+ }} else return 0;
+}
+NK_LIB struct nk_color
+nk_panel_get_border_color(const struct nk_style *style, enum nk_panel_type type)
+{
+ switch (type) {
+ default:
+ case NK_PANEL_WINDOW: return style->window.border_color;
+ case NK_PANEL_GROUP: return style->window.group_border_color;
+ case NK_PANEL_POPUP: return style->window.popup_border_color;
+ case NK_PANEL_CONTEXTUAL: return style->window.contextual_border_color;
+ case NK_PANEL_COMBO: return style->window.combo_border_color;
+ case NK_PANEL_MENU: return style->window.menu_border_color;
+ case NK_PANEL_TOOLTIP: return style->window.menu_border_color;}
+}
+NK_LIB int
+nk_panel_is_sub(enum nk_panel_type type)
+{
+ return (type & NK_PANEL_SET_SUB)?1:0;
+}
+NK_LIB int
+nk_panel_is_nonblock(enum nk_panel_type type)
+{
+ return (type & NK_PANEL_SET_NONBLOCK)?1:0;
+}
+NK_LIB int
+nk_panel_begin(struct nk_context *ctx, const char *title, enum nk_panel_type panel_type)
+{
+ struct nk_input *in;
+ struct nk_window *win;
+ struct nk_panel *layout;
+ struct nk_command_buffer *out;
+ const struct nk_style *style;
+ const struct nk_user_font *font;
+
+ struct nk_vec2 scrollbar_size;
+ struct nk_vec2 panel_padding;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return 0;
+ nk_zero(ctx->current->layout, sizeof(*ctx->current->layout));
+ if ((ctx->current->flags & NK_WINDOW_HIDDEN) || (ctx->current->flags & NK_WINDOW_CLOSED)) {
+ nk_zero(ctx->current->layout, sizeof(struct nk_panel));
+ ctx->current->layout->type = panel_type;
+ return 0;
+ }
+ /* pull state into local stack */
+ style = &ctx->style;
+ font = style->font;
+ win = ctx->current;
+ layout = win->layout;
+ out = &win->buffer;
+ in = (win->flags & NK_WINDOW_NO_INPUT) ? 0: &ctx->input;
+#ifdef NK_INCLUDE_COMMAND_USERDATA
+ win->buffer.userdata = ctx->userdata;
+#endif
+ /* pull style configuration into local stack */
+ scrollbar_size = style->window.scrollbar_size;
+ panel_padding = nk_panel_get_padding(style, panel_type);
+
+ /* window movement */
+ if ((win->flags & NK_WINDOW_MOVABLE) && !(win->flags & NK_WINDOW_ROM)) {
+ int left_mouse_down;
+ int left_mouse_clicked;
+ int left_mouse_click_in_cursor;
+
+ /* calculate draggable window space */
+ struct nk_rect header;
+ header.x = win->bounds.x;
+ header.y = win->bounds.y;
+ header.w = win->bounds.w;
+ if (nk_panel_has_header(win->flags, title)) {
+ header.h = font->height + 2.0f * style->window.header.padding.y;
+ header.h += 2.0f * style->window.header.label_padding.y;
+ } else header.h = panel_padding.y;
+
+ /* window movement by dragging */
+ left_mouse_down = in->mouse.buttons[NK_BUTTON_LEFT].down;
+ left_mouse_clicked = (int)in->mouse.buttons[NK_BUTTON_LEFT].clicked;
+ left_mouse_click_in_cursor = nk_input_has_mouse_click_down_in_rect(in,
+ NK_BUTTON_LEFT, header, nk_true);
+ if (left_mouse_down && left_mouse_click_in_cursor && !left_mouse_clicked) {
+ win->bounds.x = win->bounds.x + in->mouse.delta.x;
+ win->bounds.y = win->bounds.y + in->mouse.delta.y;
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.x += in->mouse.delta.x;
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.y += in->mouse.delta.y;
+ ctx->style.cursor_active = ctx->style.cursors[NK_CURSOR_MOVE];
+ }
+ }
+
+ /* setup panel */
+ layout->type = panel_type;
+ layout->flags = win->flags;
+ layout->bounds = win->bounds;
+ layout->bounds.x += panel_padding.x;
+ layout->bounds.w -= 2*panel_padding.x;
+ if (win->flags & NK_WINDOW_BORDER) {
+ layout->border = nk_panel_get_border(style, win->flags, panel_type);
+ layout->bounds = nk_shrink_rect(layout->bounds, layout->border);
+ } else layout->border = 0;
+ layout->at_y = layout->bounds.y;
+ layout->at_x = layout->bounds.x;
+ layout->max_x = 0;
+ layout->header_height = 0;
+ layout->footer_height = 0;
+ nk_layout_reset_min_row_height(ctx);
+ layout->row.index = 0;
+ layout->row.columns = 0;
+ layout->row.ratio = 0;
+ layout->row.item_width = 0;
+ layout->row.tree_depth = 0;
+ layout->row.height = panel_padding.y;
+ layout->has_scrolling = nk_true;
+ if (!(win->flags & NK_WINDOW_NO_SCROLLBAR))
+ layout->bounds.w -= scrollbar_size.x;
+ if (!nk_panel_is_nonblock(panel_type)) {
+ layout->footer_height = 0;
+ if (!(win->flags & NK_WINDOW_NO_SCROLLBAR) || win->flags & NK_WINDOW_SCALABLE)
+ layout->footer_height = scrollbar_size.y;
+ layout->bounds.h -= layout->footer_height;
+ }
+
+ /* panel header */
+ if (nk_panel_has_header(win->flags, title))
+ {
+ struct nk_text text;
+ struct nk_rect header;
+ const struct nk_style_item *background = 0;
+
+ /* calculate header bounds */
+ header.x = win->bounds.x;
+ header.y = win->bounds.y;
+ header.w = win->bounds.w;
+ header.h = font->height + 2.0f * style->window.header.padding.y;
+ header.h += (2.0f * style->window.header.label_padding.y);
+
+ /* shrink panel by header */
+ layout->header_height = header.h;
+ layout->bounds.y += header.h;
+ layout->bounds.h -= header.h;
+ layout->at_y += header.h;
+
+ /* select correct header background and text color */
+ if (ctx->active == win) {
+ background = &style->window.header.active;
+ text.text = style->window.header.label_active;
+ } else if (nk_input_is_mouse_hovering_rect(&ctx->input, header)) {
+ background = &style->window.header.hover;
+ text.text = style->window.header.label_hover;
+ } else {
+ background = &style->window.header.normal;
+ text.text = style->window.header.label_normal;
+ }
+
+ /* draw header background */
+ header.h += 1.0f;
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ text.background = nk_rgba(0,0,0,0);
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(out, header, 0, background->data.color);
+ }
+
+ /* window close button */
+ {struct nk_rect button;
+ button.y = header.y + style->window.header.padding.y;
+ button.h = header.h - 2 * style->window.header.padding.y;
+ button.w = button.h;
+ if (win->flags & NK_WINDOW_CLOSABLE) {
+ nk_flags ws = 0;
+ if (style->window.header.align == NK_HEADER_RIGHT) {
+ button.x = (header.w + header.x) - (button.w + style->window.header.padding.x);
+ header.w -= button.w + style->window.header.spacing.x + style->window.header.padding.x;
+ } else {
+ button.x = header.x + style->window.header.padding.x;
+ header.x += button.w + style->window.header.spacing.x + style->window.header.padding.x;
+ }
+
+ if (nk_do_button_symbol(&ws, &win->buffer, button,
+ style->window.header.close_symbol, NK_BUTTON_DEFAULT,
+ &style->window.header.close_button, in, style->font) && !(win->flags & NK_WINDOW_ROM))
+ {
+ layout->flags |= NK_WINDOW_HIDDEN;
+ layout->flags &= (nk_flags)~NK_WINDOW_MINIMIZED;
+ }
+ }
+
+ /* window minimize button */
+ if (win->flags & NK_WINDOW_MINIMIZABLE) {
+ nk_flags ws = 0;
+ if (style->window.header.align == NK_HEADER_RIGHT) {
+ button.x = (header.w + header.x) - button.w;
+ if (!(win->flags & NK_WINDOW_CLOSABLE)) {
+ button.x -= style->window.header.padding.x;
+ header.w -= style->window.header.padding.x;
+ }
+ header.w -= button.w + style->window.header.spacing.x;
+ } else {
+ button.x = header.x;
+ header.x += button.w + style->window.header.spacing.x + style->window.header.padding.x;
+ }
+ if (nk_do_button_symbol(&ws, &win->buffer, button, (layout->flags & NK_WINDOW_MINIMIZED)?
+ style->window.header.maximize_symbol: style->window.header.minimize_symbol,
+ NK_BUTTON_DEFAULT, &style->window.header.minimize_button, in, style->font) && !(win->flags & NK_WINDOW_ROM))
+ layout->flags = (layout->flags & NK_WINDOW_MINIMIZED) ?
+ layout->flags & (nk_flags)~NK_WINDOW_MINIMIZED:
+ layout->flags | NK_WINDOW_MINIMIZED;
+ }}
+
+ {/* window header title */
+ int text_len = nk_strlen(title);
+ struct nk_rect label = {0,0,0,0};
+ float t = font->width(font->userdata, font->height, title, text_len);
+ text.padding = nk_vec2(0,0);
+
+ label.x = header.x + style->window.header.padding.x;
+ label.x += style->window.header.label_padding.x;
+ label.y = header.y + style->window.header.label_padding.y;
+ label.h = font->height + 2 * style->window.header.label_padding.y;
+ label.w = t + 2 * style->window.header.spacing.x;
+ label.w = NK_CLAMP(0, label.w, header.x + header.w - label.x);
+ nk_widget_text(out, label,(const char*)title, text_len, &text, NK_TEXT_LEFT, font);}
+ }
+
+ /* draw window background */
+ if (!(layout->flags & NK_WINDOW_MINIMIZED) && !(layout->flags & NK_WINDOW_DYNAMIC)) {
+ struct nk_rect body;
+ body.x = win->bounds.x;
+ body.w = win->bounds.w;
+ body.y = (win->bounds.y + layout->header_height);
+ body.h = (win->bounds.h - layout->header_height);
+ if (style->window.fixed_background.type == NK_STYLE_ITEM_IMAGE)
+ nk_draw_image(out, body, &style->window.fixed_background.data.image, nk_white);
+ else nk_fill_rect(out, body, 0, style->window.fixed_background.data.color);
+ }
+
+ /* set clipping rectangle */
+ {struct nk_rect clip;
+ layout->clip = layout->bounds;
+ nk_unify(&clip, &win->buffer.clip, layout->clip.x, layout->clip.y,
+ layout->clip.x + layout->clip.w, layout->clip.y + layout->clip.h);
+ nk_push_scissor(out, clip);
+ layout->clip = clip;}
+ return !(layout->flags & NK_WINDOW_HIDDEN) && !(layout->flags & NK_WINDOW_MINIMIZED);
+}
+NK_LIB void
+nk_panel_end(struct nk_context *ctx)
+{
+ struct nk_input *in;
+ struct nk_window *window;
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_command_buffer *out;
+
+ struct nk_vec2 scrollbar_size;
+ struct nk_vec2 panel_padding;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ window = ctx->current;
+ layout = window->layout;
+ style = &ctx->style;
+ out = &window->buffer;
+ in = (layout->flags & NK_WINDOW_ROM || layout->flags & NK_WINDOW_NO_INPUT) ? 0 :&ctx->input;
+ if (!nk_panel_is_sub(layout->type))
+ nk_push_scissor(out, nk_null_rect);
+
+ /* cache configuration data */
+ scrollbar_size = style->window.scrollbar_size;
+ panel_padding = nk_panel_get_padding(style, layout->type);
+
+ /* update the current cursor Y-position to point over the last added widget */
+ layout->at_y += layout->row.height;
+
+ /* dynamic panels */
+ if (layout->flags & NK_WINDOW_DYNAMIC && !(layout->flags & NK_WINDOW_MINIMIZED))
+ {
+ /* update panel height to fit dynamic growth */
+ struct nk_rect empty_space;
+ if (layout->at_y < (layout->bounds.y + layout->bounds.h))
+ layout->bounds.h = layout->at_y - layout->bounds.y;
+
+ /* fill top empty space */
+ empty_space.x = window->bounds.x;
+ empty_space.y = layout->bounds.y;
+ empty_space.h = panel_padding.y;
+ empty_space.w = window->bounds.w;
+ nk_fill_rect(out, empty_space, 0, style->window.background);
+
+ /* fill left empty space */
+ empty_space.x = window->bounds.x;
+ empty_space.y = layout->bounds.y;
+ empty_space.w = panel_padding.x + layout->border;
+ empty_space.h = layout->bounds.h;
+ nk_fill_rect(out, empty_space, 0, style->window.background);
+
+ /* fill right empty space */
+ empty_space.x = layout->bounds.x + layout->bounds.w;
+ empty_space.y = layout->bounds.y;
+ empty_space.w = panel_padding.x + layout->border;
+ empty_space.h = layout->bounds.h;
+ if (*layout->offset_y == 0 && !(layout->flags & NK_WINDOW_NO_SCROLLBAR))
+ empty_space.w += scrollbar_size.x;
+ nk_fill_rect(out, empty_space, 0, style->window.background);
+
+ /* fill bottom empty space */
+ if (layout->footer_height > 0) {
+ empty_space.x = window->bounds.x;
+ empty_space.y = layout->bounds.y + layout->bounds.h;
+ empty_space.w = window->bounds.w;
+ empty_space.h = layout->footer_height;
+ nk_fill_rect(out, empty_space, 0, style->window.background);
+ }
+ }
+
+ /* scrollbars */
+ if (!(layout->flags & NK_WINDOW_NO_SCROLLBAR) &&
+ !(layout->flags & NK_WINDOW_MINIMIZED) &&
+ window->scrollbar_hiding_timer < NK_SCROLLBAR_HIDING_TIMEOUT)
+ {
+ struct nk_rect scroll;
+ int scroll_has_scrolling;
+ float scroll_target;
+ float scroll_offset;
+ float scroll_step;
+ float scroll_inc;
+
+ /* mouse wheel scrolling */
+ if (nk_panel_is_sub(layout->type))
+ {
+ /* sub-window mouse wheel scrolling */
+ struct nk_window *root_window = window;
+ struct nk_panel *root_panel = window->layout;
+ while (root_panel->parent)
+ root_panel = root_panel->parent;
+ while (root_window->parent)
+ root_window = root_window->parent;
+
+ /* only allow scrolling if parent window is active */
+ scroll_has_scrolling = 0;
+ if ((root_window == ctx->active) && layout->has_scrolling) {
+ /* and panel is being hovered and inside clip rect*/
+ if (nk_input_is_mouse_hovering_rect(in, layout->bounds) &&
+ NK_INTERSECT(layout->bounds.x, layout->bounds.y, layout->bounds.w, layout->bounds.h,
+ root_panel->clip.x, root_panel->clip.y, root_panel->clip.w, root_panel->clip.h))
+ {
+ /* deactivate all parent scrolling */
+ root_panel = window->layout;
+ while (root_panel->parent) {
+ root_panel->has_scrolling = nk_false;
+ root_panel = root_panel->parent;
+ }
+ root_panel->has_scrolling = nk_false;
+ scroll_has_scrolling = nk_true;
+ }
+ }
+ } else if (!nk_panel_is_sub(layout->type)) {
+ /* window mouse wheel scrolling */
+ scroll_has_scrolling = (window == ctx->active) && layout->has_scrolling;
+ if (in && (in->mouse.scroll_delta.y > 0 || in->mouse.scroll_delta.x > 0) && scroll_has_scrolling)
+ window->scrolled = nk_true;
+ else window->scrolled = nk_false;
+ } else scroll_has_scrolling = nk_false;
+
+ {
+ /* vertical scrollbar */
+ nk_flags state = 0;
+ scroll.x = layout->bounds.x + layout->bounds.w + panel_padding.x;
+ scroll.y = layout->bounds.y;
+ scroll.w = scrollbar_size.x;
+ scroll.h = layout->bounds.h;
+
+ scroll_offset = (float)*layout->offset_y;
+ scroll_step = scroll.h * 0.10f;
+ scroll_inc = scroll.h * 0.01f;
+ scroll_target = (float)(int)(layout->at_y - scroll.y);
+ scroll_offset = nk_do_scrollbarv(&state, out, scroll, scroll_has_scrolling,
+ scroll_offset, scroll_target, scroll_step, scroll_inc,
+ &ctx->style.scrollv, in, style->font);
+ *layout->offset_y = (nk_uint)scroll_offset;
+ if (in && scroll_has_scrolling)
+ in->mouse.scroll_delta.y = 0;
+ }
+ {
+ /* horizontal scrollbar */
+ nk_flags state = 0;
+ scroll.x = layout->bounds.x;
+ scroll.y = layout->bounds.y + layout->bounds.h;
+ scroll.w = layout->bounds.w;
+ scroll.h = scrollbar_size.y;
+
+ scroll_offset = (float)*layout->offset_x;
+ scroll_target = (float)(int)(layout->max_x - scroll.x);
+ scroll_step = layout->max_x * 0.05f;
+ scroll_inc = layout->max_x * 0.005f;
+ scroll_offset = nk_do_scrollbarh(&state, out, scroll, scroll_has_scrolling,
+ scroll_offset, scroll_target, scroll_step, scroll_inc,
+ &ctx->style.scrollh, in, style->font);
+ *layout->offset_x = (nk_uint)scroll_offset;
+ }
+ }
+
+ /* hide scroll if no user input */
+ if (window->flags & NK_WINDOW_SCROLL_AUTO_HIDE) {
+ int has_input = ctx->input.mouse.delta.x != 0 || ctx->input.mouse.delta.y != 0 || ctx->input.mouse.scroll_delta.y != 0;
+ int is_window_hovered = nk_window_is_hovered(ctx);
+ int any_item_active = (ctx->last_widget_state & NK_WIDGET_STATE_MODIFIED);
+ if ((!has_input && is_window_hovered) || (!is_window_hovered && !any_item_active))
+ window->scrollbar_hiding_timer += ctx->delta_time_seconds;
+ else window->scrollbar_hiding_timer = 0;
+ } else window->scrollbar_hiding_timer = 0;
+
+ /* window border */
+ if (layout->flags & NK_WINDOW_BORDER)
+ {
+ struct nk_color border_color = nk_panel_get_border_color(style, layout->type);
+ const float padding_y = (layout->flags & NK_WINDOW_MINIMIZED)
+ ? (style->window.border + window->bounds.y + layout->header_height)
+ : ((layout->flags & NK_WINDOW_DYNAMIC)
+ ? (layout->bounds.y + layout->bounds.h + layout->footer_height)
+ : (window->bounds.y + window->bounds.h));
+ struct nk_rect b = window->bounds;
+ b.h = padding_y - window->bounds.y;
+ nk_stroke_rect(out, b, 0, layout->border, border_color);
+ }
+
+ /* scaler */
+ if ((layout->flags & NK_WINDOW_SCALABLE) && in && !(layout->flags & NK_WINDOW_MINIMIZED))
+ {
+ /* calculate scaler bounds */
+ struct nk_rect scaler;
+ scaler.w = scrollbar_size.x;
+ scaler.h = scrollbar_size.y;
+ scaler.y = layout->bounds.y + layout->bounds.h;
+ if (layout->flags & NK_WINDOW_SCALE_LEFT)
+ scaler.x = layout->bounds.x - panel_padding.x * 0.5f;
+ else scaler.x = layout->bounds.x + layout->bounds.w + panel_padding.x;
+ if (layout->flags & NK_WINDOW_NO_SCROLLBAR)
+ scaler.x -= scaler.w;
+
+ /* draw scaler */
+ {const struct nk_style_item *item = &style->window.scaler;
+ if (item->type == NK_STYLE_ITEM_IMAGE)
+ nk_draw_image(out, scaler, &item->data.image, nk_white);
+ else {
+ if (layout->flags & NK_WINDOW_SCALE_LEFT) {
+ nk_fill_triangle(out, scaler.x, scaler.y, scaler.x,
+ scaler.y + scaler.h, scaler.x + scaler.w,
+ scaler.y + scaler.h, item->data.color);
+ } else {
+ nk_fill_triangle(out, scaler.x + scaler.w, scaler.y, scaler.x + scaler.w,
+ scaler.y + scaler.h, scaler.x, scaler.y + scaler.h, item->data.color);
+ }
+ }}
+
+ /* do window scaling */
+ if (!(window->flags & NK_WINDOW_ROM)) {
+ struct nk_vec2 window_size = style->window.min_size;
+ int left_mouse_down = in->mouse.buttons[NK_BUTTON_LEFT].down;
+ int left_mouse_click_in_scaler = nk_input_has_mouse_click_down_in_rect(in,
+ NK_BUTTON_LEFT, scaler, nk_true);
+
+ if (left_mouse_down && left_mouse_click_in_scaler) {
+ float delta_x = in->mouse.delta.x;
+ if (layout->flags & NK_WINDOW_SCALE_LEFT) {
+ delta_x = -delta_x;
+ window->bounds.x += in->mouse.delta.x;
+ }
+ /* dragging in x-direction */
+ if (window->bounds.w + delta_x >= window_size.x) {
+ if ((delta_x < 0) || (delta_x > 0 && in->mouse.pos.x >= scaler.x)) {
+ window->bounds.w = window->bounds.w + delta_x;
+ scaler.x += in->mouse.delta.x;
+ }
+ }
+ /* dragging in y-direction (only possible if static window) */
+ if (!(layout->flags & NK_WINDOW_DYNAMIC)) {
+ if (window_size.y < window->bounds.h + in->mouse.delta.y) {
+ if ((in->mouse.delta.y < 0) || (in->mouse.delta.y > 0 && in->mouse.pos.y >= scaler.y)) {
+ window->bounds.h = window->bounds.h + in->mouse.delta.y;
+ scaler.y += in->mouse.delta.y;
+ }
+ }
+ }
+ ctx->style.cursor_active = ctx->style.cursors[NK_CURSOR_RESIZE_TOP_RIGHT_DOWN_LEFT];
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.x = scaler.x + scaler.w/2.0f;
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.y = scaler.y + scaler.h/2.0f;
+ }
+ }
+ }
+ if (!nk_panel_is_sub(layout->type)) {
+ /* window is hidden so clear command buffer */
+ if (layout->flags & NK_WINDOW_HIDDEN)
+ nk_command_buffer_reset(&window->buffer);
+ /* window is visible and not tab */
+ else nk_finish(ctx, window);
+ }
+
+ /* NK_WINDOW_REMOVE_ROM flag was set so remove NK_WINDOW_ROM */
+ if (layout->flags & NK_WINDOW_REMOVE_ROM) {
+ layout->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ layout->flags &= ~(nk_flags)NK_WINDOW_REMOVE_ROM;
+ }
+ window->flags = layout->flags;
+
+ /* property garbage collector */
+ if (window->property.active && window->property.old != window->property.seq &&
+ window->property.active == window->property.prev) {
+ nk_zero(&window->property, sizeof(window->property));
+ } else {
+ window->property.old = window->property.seq;
+ window->property.prev = window->property.active;
+ window->property.seq = 0;
+ }
+ /* edit garbage collector */
+ if (window->edit.active && window->edit.old != window->edit.seq &&
+ window->edit.active == window->edit.prev) {
+ nk_zero(&window->edit, sizeof(window->edit));
+ } else {
+ window->edit.old = window->edit.seq;
+ window->edit.prev = window->edit.active;
+ window->edit.seq = 0;
+ }
+ /* contextual garbage collector */
+ if (window->popup.active_con && window->popup.con_old != window->popup.con_count) {
+ window->popup.con_count = 0;
+ window->popup.con_old = 0;
+ window->popup.active_con = 0;
+ } else {
+ window->popup.con_old = window->popup.con_count;
+ window->popup.con_count = 0;
+ }
+ window->popup.combo_count = 0;
+ /* helper to make sure you have a 'nk_tree_push' for every 'nk_tree_pop' */
+ NK_ASSERT(!layout->row.tree_depth);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * WINDOW
+ *
+ * ===============================================================*/
+NK_LIB void*
+nk_create_window(struct nk_context *ctx)
+{
+ struct nk_page_element *elem;
+ elem = nk_create_page_element(ctx);
+ if (!elem) return 0;
+ elem->data.win.seq = ctx->seq;
+ return &elem->data.win;
+}
+NK_LIB void
+nk_free_window(struct nk_context *ctx, struct nk_window *win)
+{
+ /* unlink windows from list */
+ struct nk_table *it = win->tables;
+ if (win->popup.win) {
+ nk_free_window(ctx, win->popup.win);
+ win->popup.win = 0;
+ }
+ win->next = 0;
+ win->prev = 0;
+
+ while (it) {
+ /*free window state tables */
+ struct nk_table *n = it->next;
+ nk_remove_table(win, it);
+ nk_free_table(ctx, it);
+ if (it == win->tables)
+ win->tables = n;
+ it = n;
+ }
+
+ /* link windows into freelist */
+ {union nk_page_data *pd = NK_CONTAINER_OF(win, union nk_page_data, win);
+ struct nk_page_element *pe = NK_CONTAINER_OF(pd, struct nk_page_element, data);
+ nk_free_page_element(ctx, pe);}
+}
+NK_LIB struct nk_window*
+nk_find_window(struct nk_context *ctx, nk_hash hash, const char *name)
+{
+ struct nk_window *iter;
+ iter = ctx->begin;
+ while (iter) {
+ NK_ASSERT(iter != iter->next);
+ if (iter->name == hash) {
+ int max_len = nk_strlen(iter->name_string);
+ if (!nk_stricmpn(iter->name_string, name, max_len))
+ return iter;
+ }
+ iter = iter->next;
+ }
+ return 0;
+}
+NK_LIB void
+nk_insert_window(struct nk_context *ctx, struct nk_window *win,
+ enum nk_window_insert_location loc)
+{
+ const struct nk_window *iter;
+ NK_ASSERT(ctx);
+ NK_ASSERT(win);
+ if (!win || !ctx) return;
+
+ iter = ctx->begin;
+ while (iter) {
+ NK_ASSERT(iter != iter->next);
+ NK_ASSERT(iter != win);
+ if (iter == win) return;
+ iter = iter->next;
+ }
+
+ if (!ctx->begin) {
+ win->next = 0;
+ win->prev = 0;
+ ctx->begin = win;
+ ctx->end = win;
+ ctx->count = 1;
+ return;
+ }
+ if (loc == NK_INSERT_BACK) {
+ struct nk_window *end;
+ end = ctx->end;
+ end->flags |= NK_WINDOW_ROM;
+ end->next = win;
+ win->prev = ctx->end;
+ win->next = 0;
+ ctx->end = win;
+ ctx->active = ctx->end;
+ ctx->end->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ } else {
+ /*ctx->end->flags |= NK_WINDOW_ROM;*/
+ ctx->begin->prev = win;
+ win->next = ctx->begin;
+ win->prev = 0;
+ ctx->begin = win;
+ ctx->begin->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ }
+ ctx->count++;
+}
+NK_LIB void
+nk_remove_window(struct nk_context *ctx, struct nk_window *win)
+{
+ if (win == ctx->begin || win == ctx->end) {
+ if (win == ctx->begin) {
+ ctx->begin = win->next;
+ if (win->next)
+ win->next->prev = 0;
+ }
+ if (win == ctx->end) {
+ ctx->end = win->prev;
+ if (win->prev)
+ win->prev->next = 0;
+ }
+ } else {
+ if (win->next)
+ win->next->prev = win->prev;
+ if (win->prev)
+ win->prev->next = win->next;
+ }
+ if (win == ctx->active || !ctx->active) {
+ ctx->active = ctx->end;
+ if (ctx->end)
+ ctx->end->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ }
+ win->next = 0;
+ win->prev = 0;
+ ctx->count--;
+}
+NK_API int
+nk_begin(struct nk_context *ctx, const char *title,
+ struct nk_rect bounds, nk_flags flags)
+{
+ return nk_begin_titled(ctx, title, title, bounds, flags);
+}
+NK_API int
+nk_begin_titled(struct nk_context *ctx, const char *name, const char *title,
+ struct nk_rect bounds, nk_flags flags)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ nk_hash name_hash;
+ int name_len;
+ int ret = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+ NK_ASSERT(title);
+ NK_ASSERT(ctx->style.font && ctx->style.font->width && "if this triggers you forgot to add a font");
+ NK_ASSERT(!ctx->current && "if this triggers you missed a `nk_end` call");
+ if (!ctx || ctx->current || !title || !name)
+ return 0;
+
+ /* find or create window */
+ style = &ctx->style;
+ name_len = (int)nk_strlen(name);
+ name_hash = nk_murmur_hash(name, (int)name_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, name_hash, name);
+ if (!win) {
+ /* create new window */
+ nk_size name_length = (nk_size)name_len;
+ win = (struct nk_window*)nk_create_window(ctx);
+ NK_ASSERT(win);
+ if (!win) return 0;
+
+ if (flags & NK_WINDOW_BACKGROUND)
+ nk_insert_window(ctx, win, NK_INSERT_FRONT);
+ else nk_insert_window(ctx, win, NK_INSERT_BACK);
+ nk_command_buffer_init(&win->buffer, &ctx->memory, NK_CLIPPING_ON);
+
+ win->flags = flags;
+ win->bounds = bounds;
+ win->name = name_hash;
+ name_length = NK_MIN(name_length, NK_WINDOW_MAX_NAME-1);
+ NK_MEMCPY(win->name_string, name, name_length);
+ win->name_string[name_length] = 0;
+ win->popup.win = 0;
+ if (!ctx->active)
+ ctx->active = win;
+ } else {
+ /* update window */
+ win->flags &= ~(nk_flags)(NK_WINDOW_PRIVATE-1);
+ win->flags |= flags;
+ if (!(win->flags & (NK_WINDOW_MOVABLE | NK_WINDOW_SCALABLE)))
+ win->bounds = bounds;
+ /* If this assert triggers you either:
+ *
+ * I.) Have more than one window with the same name or
+ * II.) You forgot to actually draw the window.
+ * More specific you did not call `nk_clear` (nk_clear will be
+ * automatically called for you if you are using one of the
+ * provided demo backends). */
+ NK_ASSERT(win->seq != ctx->seq);
+ win->seq = ctx->seq;
+ if (!ctx->active && !(win->flags & NK_WINDOW_HIDDEN)) {
+ ctx->active = win;
+ ctx->end = win;
+ }
+ }
+ if (win->flags & NK_WINDOW_HIDDEN) {
+ ctx->current = win;
+ win->layout = 0;
+ return 0;
+ } else nk_start(ctx, win);
+
+ /* window overlapping */
+ if (!(win->flags & NK_WINDOW_HIDDEN) && !(win->flags & NK_WINDOW_NO_INPUT))
+ {
+ int inpanel, ishovered;
+ struct nk_window *iter = win;
+ float h = ctx->style.font->height + 2.0f * style->window.header.padding.y +
+ (2.0f * style->window.header.label_padding.y);
+ struct nk_rect win_bounds = (!(win->flags & NK_WINDOW_MINIMIZED))?
+ win->bounds: nk_rect(win->bounds.x, win->bounds.y, win->bounds.w, h);
+
+ /* activate window if hovered and no other window is overlapping this window */
+ inpanel = nk_input_has_mouse_click_down_in_rect(&ctx->input, NK_BUTTON_LEFT, win_bounds, nk_true);
+ inpanel = inpanel && ctx->input.mouse.buttons[NK_BUTTON_LEFT].clicked;
+ ishovered = nk_input_is_mouse_hovering_rect(&ctx->input, win_bounds);
+ if ((win != ctx->active) && ishovered && !ctx->input.mouse.buttons[NK_BUTTON_LEFT].down) {
+ iter = win->next;
+ while (iter) {
+ struct nk_rect iter_bounds = (!(iter->flags & NK_WINDOW_MINIMIZED))?
+ iter->bounds: nk_rect(iter->bounds.x, iter->bounds.y, iter->bounds.w, h);
+ if (NK_INTERSECT(win_bounds.x, win_bounds.y, win_bounds.w, win_bounds.h,
+ iter_bounds.x, iter_bounds.y, iter_bounds.w, iter_bounds.h) &&
+ (!(iter->flags & NK_WINDOW_HIDDEN)))
+ break;
+
+ if (iter->popup.win && iter->popup.active && !(iter->flags & NK_WINDOW_HIDDEN) &&
+ NK_INTERSECT(win->bounds.x, win_bounds.y, win_bounds.w, win_bounds.h,
+ iter->popup.win->bounds.x, iter->popup.win->bounds.y,
+ iter->popup.win->bounds.w, iter->popup.win->bounds.h))
+ break;
+ iter = iter->next;
+ }
+ }
+
+ /* activate window if clicked */
+ if (iter && inpanel && (win != ctx->end)) {
+ iter = win->next;
+ while (iter) {
+ /* try to find a panel with higher priority in the same position */
+ struct nk_rect iter_bounds = (!(iter->flags & NK_WINDOW_MINIMIZED))?
+ iter->bounds: nk_rect(iter->bounds.x, iter->bounds.y, iter->bounds.w, h);
+ if (NK_INBOX(ctx->input.mouse.pos.x, ctx->input.mouse.pos.y,
+ iter_bounds.x, iter_bounds.y, iter_bounds.w, iter_bounds.h) &&
+ !(iter->flags & NK_WINDOW_HIDDEN))
+ break;
+ if (iter->popup.win && iter->popup.active && !(iter->flags & NK_WINDOW_HIDDEN) &&
+ NK_INTERSECT(win_bounds.x, win_bounds.y, win_bounds.w, win_bounds.h,
+ iter->popup.win->bounds.x, iter->popup.win->bounds.y,
+ iter->popup.win->bounds.w, iter->popup.win->bounds.h))
+ break;
+ iter = iter->next;
+ }
+ }
+ if (iter && !(win->flags & NK_WINDOW_ROM) && (win->flags & NK_WINDOW_BACKGROUND)) {
+ win->flags |= (nk_flags)NK_WINDOW_ROM;
+ iter->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ ctx->active = iter;
+ if (!(iter->flags & NK_WINDOW_BACKGROUND)) {
+ /* current window is active in that position so transfer to top
+ * at the highest priority in stack */
+ nk_remove_window(ctx, iter);
+ nk_insert_window(ctx, iter, NK_INSERT_BACK);
+ }
+ } else {
+ if (!iter && ctx->end != win) {
+ if (!(win->flags & NK_WINDOW_BACKGROUND)) {
+ /* current window is active in that position so transfer to top
+ * at the highest priority in stack */
+ nk_remove_window(ctx, win);
+ nk_insert_window(ctx, win, NK_INSERT_BACK);
+ }
+ win->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ ctx->active = win;
+ }
+ if (ctx->end != win && !(win->flags & NK_WINDOW_BACKGROUND))
+ win->flags |= NK_WINDOW_ROM;
+ }
+ }
+ win->layout = (struct nk_panel*)nk_create_panel(ctx);
+ ctx->current = win;
+ ret = nk_panel_begin(ctx, title, NK_PANEL_WINDOW);
+ win->layout->offset_x = &win->scrollbar.x;
+ win->layout->offset_y = &win->scrollbar.y;
+ return ret;
+}
+NK_API void
+nk_end(struct nk_context *ctx)
+{
+ struct nk_panel *layout;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current && "if this triggers you forgot to call `nk_begin`");
+ if (!ctx || !ctx->current)
+ return;
+
+ layout = ctx->current->layout;
+ if (!layout || (layout->type == NK_PANEL_WINDOW && (ctx->current->flags & NK_WINDOW_HIDDEN))) {
+ ctx->current = 0;
+ return;
+ }
+ nk_panel_end(ctx);
+ nk_free_panel(ctx, ctx->current->layout);
+ ctx->current = 0;
+}
+NK_API struct nk_rect
+nk_window_get_bounds(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return nk_rect(0,0,0,0);
+ return ctx->current->bounds;
+}
+NK_API struct nk_vec2
+nk_window_get_position(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return nk_vec2(0,0);
+ return nk_vec2(ctx->current->bounds.x, ctx->current->bounds.y);
+}
+NK_API struct nk_vec2
+nk_window_get_size(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return nk_vec2(0,0);
+ return nk_vec2(ctx->current->bounds.w, ctx->current->bounds.h);
+}
+NK_API float
+nk_window_get_width(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return 0;
+ return ctx->current->bounds.w;
+}
+NK_API float
+nk_window_get_height(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return 0;
+ return ctx->current->bounds.h;
+}
+NK_API struct nk_rect
+nk_window_get_content_region(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return nk_rect(0,0,0,0);
+ return ctx->current->layout->clip;
+}
+NK_API struct nk_vec2
+nk_window_get_content_region_min(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current) return nk_vec2(0,0);
+ return nk_vec2(ctx->current->layout->clip.x, ctx->current->layout->clip.y);
+}
+NK_API struct nk_vec2
+nk_window_get_content_region_max(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current) return nk_vec2(0,0);
+ return nk_vec2(ctx->current->layout->clip.x + ctx->current->layout->clip.w,
+ ctx->current->layout->clip.y + ctx->current->layout->clip.h);
+}
+NK_API struct nk_vec2
+nk_window_get_content_region_size(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current) return nk_vec2(0,0);
+ return nk_vec2(ctx->current->layout->clip.w, ctx->current->layout->clip.h);
+}
+NK_API struct nk_command_buffer*
+nk_window_get_canvas(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current) return 0;
+ return &ctx->current->buffer;
+}
+NK_API struct nk_panel*
+nk_window_get_panel(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return 0;
+ return ctx->current->layout;
+}
+NK_API void
+nk_window_get_scroll(struct nk_context *ctx, nk_uint *offset_x, nk_uint *offset_y)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return ;
+ win = ctx->current;
+ if (offset_x)
+ *offset_x = win->scrollbar.x;
+ if (offset_y)
+ *offset_y = win->scrollbar.y;
+}
+NK_API int
+nk_window_has_focus(const struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current) return 0;
+ return ctx->current == ctx->active;
+}
+NK_API int
+nk_window_is_hovered(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return 0;
+ if(ctx->current->flags & NK_WINDOW_HIDDEN)
+ return 0;
+ return nk_input_is_mouse_hovering_rect(&ctx->input, ctx->current->bounds);
+}
+NK_API int
+nk_window_is_any_hovered(struct nk_context *ctx)
+{
+ struct nk_window *iter;
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ iter = ctx->begin;
+ while (iter) {
+ /* check if window is being hovered */
+ if(!(iter->flags & NK_WINDOW_HIDDEN)) {
+ /* check if window popup is being hovered */
+ if (iter->popup.active && iter->popup.win && nk_input_is_mouse_hovering_rect(&ctx->input, iter->popup.win->bounds))
+ return 1;
+
+ if (iter->flags & NK_WINDOW_MINIMIZED) {
+ struct nk_rect header = iter->bounds;
+ header.h = ctx->style.font->height + 2 * ctx->style.window.header.padding.y;
+ if (nk_input_is_mouse_hovering_rect(&ctx->input, header))
+ return 1;
+ } else if (nk_input_is_mouse_hovering_rect(&ctx->input, iter->bounds)) {
+ return 1;
+ }
+ }
+ iter = iter->next;
+ }
+ return 0;
+}
+NK_API int
+nk_item_is_any_active(struct nk_context *ctx)
+{
+ int any_hovered = nk_window_is_any_hovered(ctx);
+ int any_active = (ctx->last_widget_state & NK_WIDGET_STATE_MODIFIED);
+ return any_hovered || any_active;
+}
+NK_API int
+nk_window_is_collapsed(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return 0;
+ return win->flags & NK_WINDOW_MINIMIZED;
+}
+NK_API int
+nk_window_is_closed(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return 1;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return 1;
+ return (win->flags & NK_WINDOW_CLOSED);
+}
+NK_API int
+nk_window_is_hidden(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return 1;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return 1;
+ return (win->flags & NK_WINDOW_HIDDEN);
+}
+NK_API int
+nk_window_is_active(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return 0;
+ return win == ctx->active;
+}
+NK_API struct nk_window*
+nk_window_find(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ return nk_find_window(ctx, title_hash, name);
+}
+NK_API void
+nk_window_close(struct nk_context *ctx, const char *name)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ win = nk_window_find(ctx, name);
+ if (!win) return;
+ NK_ASSERT(ctx->current != win && "You cannot close a currently active window");
+ if (ctx->current == win) return;
+ win->flags |= NK_WINDOW_HIDDEN;
+ win->flags |= NK_WINDOW_CLOSED;
+}
+NK_API void
+nk_window_set_bounds(struct nk_context *ctx,
+ const char *name, struct nk_rect bounds)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ win = nk_window_find(ctx, name);
+ if (!win) return;
+ NK_ASSERT(ctx->current != win && "You cannot update a currently in procecss window");
+ win->bounds = bounds;
+}
+NK_API void
+nk_window_set_position(struct nk_context *ctx,
+ const char *name, struct nk_vec2 pos)
+{
+ struct nk_window *win = nk_window_find(ctx, name);
+ if (!win) return;
+ win->bounds.x = pos.x;
+ win->bounds.y = pos.y;
+}
+NK_API void
+nk_window_set_size(struct nk_context *ctx,
+ const char *name, struct nk_vec2 size)
+{
+ struct nk_window *win = nk_window_find(ctx, name);
+ if (!win) return;
+ win->bounds.w = size.x;
+ win->bounds.h = size.y;
+}
+NK_API void
+nk_window_set_scroll(struct nk_context *ctx, nk_uint offset_x, nk_uint offset_y)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return;
+ win = ctx->current;
+ win->scrollbar.x = offset_x;
+ win->scrollbar.y = offset_y;
+}
+NK_API void
+nk_window_collapse(struct nk_context *ctx, const char *name,
+ enum nk_collapse_states c)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return;
+ if (c == NK_MINIMIZED)
+ win->flags |= NK_WINDOW_MINIMIZED;
+ else win->flags &= ~(nk_flags)NK_WINDOW_MINIMIZED;
+}
+NK_API void
+nk_window_collapse_if(struct nk_context *ctx, const char *name,
+ enum nk_collapse_states c, int cond)
+{
+ NK_ASSERT(ctx);
+ if (!ctx || !cond) return;
+ nk_window_collapse(ctx, name, c);
+}
+NK_API void
+nk_window_show(struct nk_context *ctx, const char *name, enum nk_show_states s)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (!win) return;
+ if (s == NK_HIDDEN) {
+ win->flags |= NK_WINDOW_HIDDEN;
+ } else win->flags &= ~(nk_flags)NK_WINDOW_HIDDEN;
+}
+NK_API void
+nk_window_show_if(struct nk_context *ctx, const char *name,
+ enum nk_show_states s, int cond)
+{
+ NK_ASSERT(ctx);
+ if (!ctx || !cond) return;
+ nk_window_show(ctx, name, s);
+}
+
+NK_API void
+nk_window_set_focus(struct nk_context *ctx, const char *name)
+{
+ int title_len;
+ nk_hash title_hash;
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+
+ title_len = (int)nk_strlen(name);
+ title_hash = nk_murmur_hash(name, (int)title_len, NK_WINDOW_TITLE);
+ win = nk_find_window(ctx, title_hash, name);
+ if (win && ctx->end != win) {
+ nk_remove_window(ctx, win);
+ nk_insert_window(ctx, win, NK_INSERT_BACK);
+ }
+ ctx->active = win;
+}
+
+
+
+
+/* ===============================================================
+ *
+ * POPUP
+ *
+ * ===============================================================*/
+NK_API int
+nk_popup_begin(struct nk_context *ctx, enum nk_popup_type type,
+ const char *title, nk_flags flags, struct nk_rect rect)
+{
+ struct nk_window *popup;
+ struct nk_window *win;
+ struct nk_panel *panel;
+
+ int title_len;
+ nk_hash title_hash;
+ nk_size allocated;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(title);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ panel = win->layout;
+ NK_ASSERT(!(panel->type & NK_PANEL_SET_POPUP) && "popups are not allowed to have popups");
+ (void)panel;
+ title_len = (int)nk_strlen(title);
+ title_hash = nk_murmur_hash(title, (int)title_len, NK_PANEL_POPUP);
+
+ popup = win->popup.win;
+ if (!popup) {
+ popup = (struct nk_window*)nk_create_window(ctx);
+ popup->parent = win;
+ win->popup.win = popup;
+ win->popup.active = 0;
+ win->popup.type = NK_PANEL_POPUP;
+ }
+
+ /* make sure we have correct popup */
+ if (win->popup.name != title_hash) {
+ if (!win->popup.active) {
+ nk_zero(popup, sizeof(*popup));
+ win->popup.name = title_hash;
+ win->popup.active = 1;
+ win->popup.type = NK_PANEL_POPUP;
+ } else return 0;
+ }
+
+ /* popup position is local to window */
+ ctx->current = popup;
+ rect.x += win->layout->clip.x;
+ rect.y += win->layout->clip.y;
+
+ /* setup popup data */
+ popup->parent = win;
+ popup->bounds = rect;
+ popup->seq = ctx->seq;
+ popup->layout = (struct nk_panel*)nk_create_panel(ctx);
+ popup->flags = flags;
+ popup->flags |= NK_WINDOW_BORDER;
+ if (type == NK_POPUP_DYNAMIC)
+ popup->flags |= NK_WINDOW_DYNAMIC;
+
+ popup->buffer = win->buffer;
+ nk_start_popup(ctx, win);
+ allocated = ctx->memory.allocated;
+ nk_push_scissor(&popup->buffer, nk_null_rect);
+
+ if (nk_panel_begin(ctx, title, NK_PANEL_POPUP)) {
+ /* popup is running therefore invalidate parent panels */
+ struct nk_panel *root;
+ root = win->layout;
+ while (root) {
+ root->flags |= NK_WINDOW_ROM;
+ root->flags &= ~(nk_flags)NK_WINDOW_REMOVE_ROM;
+ root = root->parent;
+ }
+ win->popup.active = 1;
+ popup->layout->offset_x = &popup->scrollbar.x;
+ popup->layout->offset_y = &popup->scrollbar.y;
+ popup->layout->parent = win->layout;
+ return 1;
+ } else {
+ /* popup was closed/is invalid so cleanup */
+ struct nk_panel *root;
+ root = win->layout;
+ while (root) {
+ root->flags |= NK_WINDOW_REMOVE_ROM;
+ root = root->parent;
+ }
+ win->popup.buf.active = 0;
+ win->popup.active = 0;
+ ctx->memory.allocated = allocated;
+ ctx->current = win;
+ nk_free_panel(ctx, popup->layout);
+ popup->layout = 0;
+ return 0;
+ }
+}
+NK_LIB int
+nk_nonblock_begin(struct nk_context *ctx,
+ nk_flags flags, struct nk_rect body, struct nk_rect header,
+ enum nk_panel_type panel_type)
+{
+ struct nk_window *popup;
+ struct nk_window *win;
+ struct nk_panel *panel;
+ int is_active = nk_true;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ /* popups cannot have popups */
+ win = ctx->current;
+ panel = win->layout;
+ NK_ASSERT(!(panel->type & NK_PANEL_SET_POPUP));
+ (void)panel;
+ popup = win->popup.win;
+ if (!popup) {
+ /* create window for nonblocking popup */
+ popup = (struct nk_window*)nk_create_window(ctx);
+ popup->parent = win;
+ win->popup.win = popup;
+ win->popup.type = panel_type;
+ nk_command_buffer_init(&popup->buffer, &ctx->memory, NK_CLIPPING_ON);
+ } else {
+ /* close the popup if user pressed outside or in the header */
+ int pressed, in_body, in_header;
+#ifdef NK_BUTTON_TRIGGER_ON_RELEASE
+ pressed = nk_input_is_mouse_released(&ctx->input, NK_BUTTON_LEFT);
+#else
+ pressed = nk_input_is_mouse_pressed(&ctx->input, NK_BUTTON_LEFT);
+#endif
+ in_body = nk_input_is_mouse_hovering_rect(&ctx->input, body);
+ in_header = nk_input_is_mouse_hovering_rect(&ctx->input, header);
+ if (pressed && (!in_body || in_header))
+ is_active = nk_false;
+ }
+ win->popup.header = header;
+
+ if (!is_active) {
+ /* remove read only mode from all parent panels */
+ struct nk_panel *root = win->layout;
+ while (root) {
+ root->flags |= NK_WINDOW_REMOVE_ROM;
+ root = root->parent;
+ }
+ return is_active;
+ }
+ popup->bounds = body;
+ popup->parent = win;
+ popup->layout = (struct nk_panel*)nk_create_panel(ctx);
+ popup->flags = flags;
+ popup->flags |= NK_WINDOW_BORDER;
+ popup->flags |= NK_WINDOW_DYNAMIC;
+ popup->seq = ctx->seq;
+ win->popup.active = 1;
+ NK_ASSERT(popup->layout);
+
+ nk_start_popup(ctx, win);
+ popup->buffer = win->buffer;
+ nk_push_scissor(&popup->buffer, nk_null_rect);
+ ctx->current = popup;
+
+ nk_panel_begin(ctx, 0, panel_type);
+ win->buffer = popup->buffer;
+ popup->layout->parent = win->layout;
+ popup->layout->offset_x = &popup->scrollbar.x;
+ popup->layout->offset_y = &popup->scrollbar.y;
+
+ /* set read only mode to all parent panels */
+ {struct nk_panel *root;
+ root = win->layout;
+ while (root) {
+ root->flags |= NK_WINDOW_ROM;
+ root = root->parent;
+ }}
+ return is_active;
+}
+NK_API void
+nk_popup_close(struct nk_context *ctx)
+{
+ struct nk_window *popup;
+ NK_ASSERT(ctx);
+ if (!ctx || !ctx->current) return;
+
+ popup = ctx->current;
+ NK_ASSERT(popup->parent);
+ NK_ASSERT(popup->layout->type & NK_PANEL_SET_POPUP);
+ popup->flags |= NK_WINDOW_HIDDEN;
+}
+NK_API void
+nk_popup_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_window *popup;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ popup = ctx->current;
+ if (!popup->parent) return;
+ win = popup->parent;
+ if (popup->flags & NK_WINDOW_HIDDEN) {
+ struct nk_panel *root;
+ root = win->layout;
+ while (root) {
+ root->flags |= NK_WINDOW_REMOVE_ROM;
+ root = root->parent;
+ }
+ win->popup.active = 0;
+ }
+ nk_push_scissor(&popup->buffer, nk_null_rect);
+ nk_end(ctx);
+
+ win->buffer = popup->buffer;
+ nk_finish_popup(ctx, win);
+ ctx->current = win;
+ nk_push_scissor(&win->buffer, win->layout->clip);
+}
+NK_API void
+nk_popup_get_scroll(struct nk_context *ctx, nk_uint *offset_x, nk_uint *offset_y)
+{
+ struct nk_window *popup;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ popup = ctx->current;
+ if (offset_x)
+ *offset_x = popup->scrollbar.x;
+ if (offset_y)
+ *offset_y = popup->scrollbar.y;
+}
+NK_API void
+nk_popup_set_scroll(struct nk_context *ctx, nk_uint offset_x, nk_uint offset_y)
+{
+ struct nk_window *popup;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ popup = ctx->current;
+ popup->scrollbar.x = offset_x;
+ popup->scrollbar.y = offset_y;
+}
+
+
+
+
+/* ==============================================================
+ *
+ * CONTEXTUAL
+ *
+ * ===============================================================*/
+NK_API int
+nk_contextual_begin(struct nk_context *ctx, nk_flags flags, struct nk_vec2 size,
+ struct nk_rect trigger_bounds)
+{
+ struct nk_window *win;
+ struct nk_window *popup;
+ struct nk_rect body;
+
+ NK_STORAGE const struct nk_rect null_rect = {-1,-1,0,0};
+ int is_clicked = 0;
+ int is_open = 0;
+ int ret = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ ++win->popup.con_count;
+ if (ctx->current != ctx->active)
+ return 0;
+
+ /* check if currently active contextual is active */
+ popup = win->popup.win;
+ is_open = (popup && win->popup.type == NK_PANEL_CONTEXTUAL);
+ is_clicked = nk_input_mouse_clicked(&ctx->input, NK_BUTTON_RIGHT, trigger_bounds);
+ if (win->popup.active_con && win->popup.con_count != win->popup.active_con)
+ return 0;
+ if (!is_open && win->popup.active_con)
+ win->popup.active_con = 0;
+ if ((!is_open && !is_clicked))
+ return 0;
+
+ /* calculate contextual position on click */
+ win->popup.active_con = win->popup.con_count;
+ if (is_clicked) {
+ body.x = ctx->input.mouse.pos.x;
+ body.y = ctx->input.mouse.pos.y;
+ } else {
+ body.x = popup->bounds.x;
+ body.y = popup->bounds.y;
+ }
+ body.w = size.x;
+ body.h = size.y;
+
+ /* start nonblocking contextual popup */
+ ret = nk_nonblock_begin(ctx, flags|NK_WINDOW_NO_SCROLLBAR, body,
+ null_rect, NK_PANEL_CONTEXTUAL);
+ if (ret) win->popup.type = NK_PANEL_CONTEXTUAL;
+ else {
+ win->popup.active_con = 0;
+ win->popup.type = NK_PANEL_NONE;
+ if (win->popup.win)
+ win->popup.win->flags = 0;
+ }
+ return ret;
+}
+NK_API int
+nk_contextual_item_text(struct nk_context *ctx, const char *text, int len,
+ nk_flags alignment)
+{
+ struct nk_window *win;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ state = nk_widget_fitting(&bounds, ctx, style->contextual_button.padding);
+ if (!state) return nk_false;
+
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text(&ctx->last_widget_state, &win->buffer, bounds,
+ text, len, alignment, NK_BUTTON_DEFAULT, &style->contextual_button, in, style->font)) {
+ nk_contextual_close(ctx);
+ return nk_true;
+ }
+ return nk_false;
+}
+NK_API int
+nk_contextual_item_label(struct nk_context *ctx, const char *label, nk_flags align)
+{
+ return nk_contextual_item_text(ctx, label, nk_strlen(label), align);
+}
+NK_API int
+nk_contextual_item_image_text(struct nk_context *ctx, struct nk_image img,
+ const char *text, int len, nk_flags align)
+{
+ struct nk_window *win;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ state = nk_widget_fitting(&bounds, ctx, style->contextual_button.padding);
+ if (!state) return nk_false;
+
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text_image(&ctx->last_widget_state, &win->buffer, bounds,
+ img, text, len, align, NK_BUTTON_DEFAULT, &style->contextual_button, style->font, in)){
+ nk_contextual_close(ctx);
+ return nk_true;
+ }
+ return nk_false;
+}
+NK_API int
+nk_contextual_item_image_label(struct nk_context *ctx, struct nk_image img,
+ const char *label, nk_flags align)
+{
+ return nk_contextual_item_image_text(ctx, img, label, nk_strlen(label), align);
+}
+NK_API int
+nk_contextual_item_symbol_text(struct nk_context *ctx, enum nk_symbol_type symbol,
+ const char *text, int len, nk_flags align)
+{
+ struct nk_window *win;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ state = nk_widget_fitting(&bounds, ctx, style->contextual_button.padding);
+ if (!state) return nk_false;
+
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text_symbol(&ctx->last_widget_state, &win->buffer, bounds,
+ symbol, text, len, align, NK_BUTTON_DEFAULT, &style->contextual_button, style->font, in)) {
+ nk_contextual_close(ctx);
+ return nk_true;
+ }
+ return nk_false;
+}
+NK_API int
+nk_contextual_item_symbol_label(struct nk_context *ctx, enum nk_symbol_type symbol,
+ const char *text, nk_flags align)
+{
+ return nk_contextual_item_symbol_text(ctx, symbol, text, nk_strlen(text), align);
+}
+NK_API void
+nk_contextual_close(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+ nk_popup_close(ctx);
+}
+NK_API void
+nk_contextual_end(struct nk_context *ctx)
+{
+ struct nk_window *popup;
+ struct nk_panel *panel;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return;
+
+ popup = ctx->current;
+ panel = popup->layout;
+ NK_ASSERT(popup->parent);
+ NK_ASSERT(panel->type & NK_PANEL_SET_POPUP);
+ if (panel->flags & NK_WINDOW_DYNAMIC) {
+ /* Close behavior
+ This is a bit of a hack solution since we do not know before we end our popup
+ how big it will be. We therefore do not directly know when a
+ click outside the non-blocking popup must close it at that direct frame.
+ Instead it will be closed in the next frame.*/
+ struct nk_rect body = {0,0,0,0};
+ if (panel->at_y < (panel->bounds.y + panel->bounds.h)) {
+ struct nk_vec2 padding = nk_panel_get_padding(&ctx->style, panel->type);
+ body = panel->bounds;
+ body.y = (panel->at_y + panel->footer_height + panel->border + padding.y + panel->row.height);
+ body.h = (panel->bounds.y + panel->bounds.h) - body.y;
+ }
+ {int pressed = nk_input_is_mouse_pressed(&ctx->input, NK_BUTTON_LEFT);
+ int in_body = nk_input_is_mouse_hovering_rect(&ctx->input, body);
+ if (pressed && in_body)
+ popup->flags |= NK_WINDOW_HIDDEN;
+ }
+ }
+ if (popup->flags & NK_WINDOW_HIDDEN)
+ popup->seq = 0;
+ nk_popup_end(ctx);
+ return;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * MENU
+ *
+ * ===============================================================*/
+NK_API void
+nk_menubar_begin(struct nk_context *ctx)
+{
+ struct nk_panel *layout;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ layout = ctx->current->layout;
+ NK_ASSERT(layout->at_y == layout->bounds.y);
+ /* if this assert triggers you allocated space between nk_begin and nk_menubar_begin.
+ If you want a menubar the first nuklear function after `nk_begin` has to be a
+ `nk_menubar_begin` call. Inside the menubar you then have to allocate space for
+ widgets (also supports multiple rows).
+ Example:
+ if (nk_begin(...)) {
+ nk_menubar_begin(...);
+ nk_layout_xxxx(...);
+ nk_button(...);
+ nk_layout_xxxx(...);
+ nk_button(...);
+ nk_menubar_end(...);
+ }
+ nk_end(...);
+ */
+ if (layout->flags & NK_WINDOW_HIDDEN || layout->flags & NK_WINDOW_MINIMIZED)
+ return;
+
+ layout->menu.x = layout->at_x;
+ layout->menu.y = layout->at_y + layout->row.height;
+ layout->menu.w = layout->bounds.w;
+ layout->menu.offset.x = *layout->offset_x;
+ layout->menu.offset.y = *layout->offset_y;
+ *layout->offset_y = 0;
+}
+NK_API void
+nk_menubar_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ struct nk_command_buffer *out;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ out = &win->buffer;
+ layout = win->layout;
+ if (layout->flags & NK_WINDOW_HIDDEN || layout->flags & NK_WINDOW_MINIMIZED)
+ return;
+
+ layout->menu.h = layout->at_y - layout->menu.y;
+ layout->bounds.y += layout->menu.h + ctx->style.window.spacing.y + layout->row.height;
+ layout->bounds.h -= layout->menu.h + ctx->style.window.spacing.y + layout->row.height;
+
+ *layout->offset_x = layout->menu.offset.x;
+ *layout->offset_y = layout->menu.offset.y;
+ layout->at_y = layout->bounds.y - layout->row.height;
+
+ layout->clip.y = layout->bounds.y;
+ layout->clip.h = layout->bounds.h;
+ nk_push_scissor(out, layout->clip);
+}
+NK_INTERN int
+nk_menu_begin(struct nk_context *ctx, struct nk_window *win,
+ const char *id, int is_clicked, struct nk_rect header, struct nk_vec2 size)
+{
+ int is_open = 0;
+ int is_active = 0;
+ struct nk_rect body;
+ struct nk_window *popup;
+ nk_hash hash = nk_murmur_hash(id, (int)nk_strlen(id), NK_PANEL_MENU);
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ body.x = header.x;
+ body.w = size.x;
+ body.y = header.y + header.h;
+ body.h = size.y;
+
+ popup = win->popup.win;
+ is_open = popup ? nk_true : nk_false;
+ is_active = (popup && (win->popup.name == hash) && win->popup.type == NK_PANEL_MENU);
+ if ((is_clicked && is_open && !is_active) || (is_open && !is_active) ||
+ (!is_open && !is_active && !is_clicked)) return 0;
+ if (!nk_nonblock_begin(ctx, NK_WINDOW_NO_SCROLLBAR, body, header, NK_PANEL_MENU))
+ return 0;
+
+ win->popup.type = NK_PANEL_MENU;
+ win->popup.name = hash;
+ return 1;
+}
+NK_API int
+nk_menu_begin_text(struct nk_context *ctx, const char *title, int len,
+ nk_flags align, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ const struct nk_input *in;
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ nk_flags state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ state = nk_widget(&header, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || win->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text(&ctx->last_widget_state, &win->buffer, header,
+ title, len, align, NK_BUTTON_DEFAULT, &ctx->style.menu_button, in, ctx->style.font))
+ is_clicked = nk_true;
+ return nk_menu_begin(ctx, win, title, is_clicked, header, size);
+}
+NK_API int nk_menu_begin_label(struct nk_context *ctx,
+ const char *text, nk_flags align, struct nk_vec2 size)
+{
+ return nk_menu_begin_text(ctx, text, nk_strlen(text), align, size);
+}
+NK_API int
+nk_menu_begin_image(struct nk_context *ctx, const char *id, struct nk_image img,
+ struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_rect header;
+ const struct nk_input *in;
+ int is_clicked = nk_false;
+ nk_flags state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ state = nk_widget(&header, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_image(&ctx->last_widget_state, &win->buffer, header,
+ img, NK_BUTTON_DEFAULT, &ctx->style.menu_button, in))
+ is_clicked = nk_true;
+ return nk_menu_begin(ctx, win, id, is_clicked, header, size);
+}
+NK_API int
+nk_menu_begin_symbol(struct nk_context *ctx, const char *id,
+ enum nk_symbol_type sym, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ const struct nk_input *in;
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ nk_flags state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ state = nk_widget(&header, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_symbol(&ctx->last_widget_state, &win->buffer, header,
+ sym, NK_BUTTON_DEFAULT, &ctx->style.menu_button, in, ctx->style.font))
+ is_clicked = nk_true;
+ return nk_menu_begin(ctx, win, id, is_clicked, header, size);
+}
+NK_API int
+nk_menu_begin_image_text(struct nk_context *ctx, const char *title, int len,
+ nk_flags align, struct nk_image img, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_rect header;
+ const struct nk_input *in;
+ int is_clicked = nk_false;
+ nk_flags state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ state = nk_widget(&header, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text_image(&ctx->last_widget_state, &win->buffer,
+ header, img, title, len, align, NK_BUTTON_DEFAULT, &ctx->style.menu_button,
+ ctx->style.font, in))
+ is_clicked = nk_true;
+ return nk_menu_begin(ctx, win, title, is_clicked, header, size);
+}
+NK_API int
+nk_menu_begin_image_label(struct nk_context *ctx,
+ const char *title, nk_flags align, struct nk_image img, struct nk_vec2 size)
+{
+ return nk_menu_begin_image_text(ctx, title, nk_strlen(title), align, img, size);
+}
+NK_API int
+nk_menu_begin_symbol_text(struct nk_context *ctx, const char *title, int len,
+ nk_flags align, enum nk_symbol_type sym, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_rect header;
+ const struct nk_input *in;
+ int is_clicked = nk_false;
+ nk_flags state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ state = nk_widget(&header, ctx);
+ if (!state) return 0;
+
+ in = (state == NK_WIDGET_ROM || win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ if (nk_do_button_text_symbol(&ctx->last_widget_state, &win->buffer,
+ header, sym, title, len, align, NK_BUTTON_DEFAULT, &ctx->style.menu_button,
+ ctx->style.font, in)) is_clicked = nk_true;
+ return nk_menu_begin(ctx, win, title, is_clicked, header, size);
+}
+NK_API int
+nk_menu_begin_symbol_label(struct nk_context *ctx,
+ const char *title, nk_flags align, enum nk_symbol_type sym, struct nk_vec2 size )
+{
+ return nk_menu_begin_symbol_text(ctx, title, nk_strlen(title), align,sym,size);
+}
+NK_API int
+nk_menu_item_text(struct nk_context *ctx, const char *title, int len, nk_flags align)
+{
+ return nk_contextual_item_text(ctx, title, len, align);
+}
+NK_API int
+nk_menu_item_label(struct nk_context *ctx, const char *label, nk_flags align)
+{
+ return nk_contextual_item_label(ctx, label, align);
+}
+NK_API int
+nk_menu_item_image_label(struct nk_context *ctx, struct nk_image img,
+ const char *label, nk_flags align)
+{
+ return nk_contextual_item_image_label(ctx, img, label, align);
+}
+NK_API int
+nk_menu_item_image_text(struct nk_context *ctx, struct nk_image img,
+ const char *text, int len, nk_flags align)
+{
+ return nk_contextual_item_image_text(ctx, img, text, len, align);
+}
+NK_API int nk_menu_item_symbol_text(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *text, int len, nk_flags align)
+{
+ return nk_contextual_item_symbol_text(ctx, sym, text, len, align);
+}
+NK_API int nk_menu_item_symbol_label(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *label, nk_flags align)
+{
+ return nk_contextual_item_symbol_label(ctx, sym, label, align);
+}
+NK_API void nk_menu_close(struct nk_context *ctx)
+{
+ nk_contextual_close(ctx);
+}
+NK_API void
+nk_menu_end(struct nk_context *ctx)
+{
+ nk_contextual_end(ctx);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * LAYOUT
+ *
+ * ===============================================================*/
+NK_API void
+nk_layout_set_min_row_height(struct nk_context *ctx, float height)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ layout->row.min_height = height;
+}
+NK_API void
+nk_layout_reset_min_row_height(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ layout->row.min_height = ctx->style.font->height;
+ layout->row.min_height += ctx->style.text.padding.y*2;
+ layout->row.min_height += ctx->style.window.min_row_height_padding*2;
+}
+NK_LIB float
+nk_layout_row_calculate_usable_space(const struct nk_style *style, enum nk_panel_type type,
+ float total_space, int columns)
+{
+ float panel_padding;
+ float panel_spacing;
+ float panel_space;
+
+ struct nk_vec2 spacing;
+ struct nk_vec2 padding;
+
+ spacing = style->window.spacing;
+ padding = nk_panel_get_padding(style, type);
+
+ /* calculate the usable panel space */
+ panel_padding = 2 * padding.x;
+ panel_spacing = (float)NK_MAX(columns - 1, 0) * spacing.x;
+ panel_space = total_space - panel_padding - panel_spacing;
+ return panel_space;
+}
+NK_LIB void
+nk_panel_layout(const struct nk_context *ctx, struct nk_window *win,
+ float height, int cols)
+{
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_command_buffer *out;
+
+ struct nk_vec2 item_spacing;
+ struct nk_color color;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ /* prefetch some configuration data */
+ layout = win->layout;
+ style = &ctx->style;
+ out = &win->buffer;
+ color = style->window.background;
+ item_spacing = style->window.spacing;
+
+ /* if one of these triggers you forgot to add an `if` condition around either
+ a window, group, popup, combobox or contextual menu `begin` and `end` block.
+ Example:
+ if (nk_begin(...) {...} nk_end(...); or
+ if (nk_group_begin(...) { nk_group_end(...);} */
+ NK_ASSERT(!(layout->flags & NK_WINDOW_MINIMIZED));
+ NK_ASSERT(!(layout->flags & NK_WINDOW_HIDDEN));
+ NK_ASSERT(!(layout->flags & NK_WINDOW_CLOSED));
+
+ /* update the current row and set the current row layout */
+ layout->row.index = 0;
+ layout->at_y += layout->row.height;
+ layout->row.columns = cols;
+ if (height == 0.0f)
+ layout->row.height = NK_MAX(height, layout->row.min_height) + item_spacing.y;
+ else layout->row.height = height + item_spacing.y;
+
+ layout->row.item_offset = 0;
+ if (layout->flags & NK_WINDOW_DYNAMIC) {
+ /* draw background for dynamic panels */
+ struct nk_rect background;
+ background.x = win->bounds.x;
+ background.w = win->bounds.w;
+ background.y = layout->at_y - 1.0f;
+ background.h = layout->row.height + 1.0f;
+ nk_fill_rect(out, background, 0, color);
+ }
+}
+NK_LIB void
+nk_row_layout(struct nk_context *ctx, enum nk_layout_format fmt,
+ float height, int cols, int width)
+{
+ /* update the current row and set the current row layout */
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ nk_panel_layout(ctx, win, height, cols);
+ if (fmt == NK_DYNAMIC)
+ win->layout->row.type = NK_LAYOUT_DYNAMIC_FIXED;
+ else win->layout->row.type = NK_LAYOUT_STATIC_FIXED;
+
+ win->layout->row.ratio = 0;
+ win->layout->row.filled = 0;
+ win->layout->row.item_offset = 0;
+ win->layout->row.item_width = (float)width;
+}
+NK_API float
+nk_layout_ratio_from_pixel(struct nk_context *ctx, float pixel_width)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ NK_ASSERT(pixel_width);
+ if (!ctx || !ctx->current || !ctx->current->layout) return 0;
+ win = ctx->current;
+ return NK_CLAMP(0.0f, pixel_width/win->bounds.x, 1.0f);
+}
+NK_API void
+nk_layout_row_dynamic(struct nk_context *ctx, float height, int cols)
+{
+ nk_row_layout(ctx, NK_DYNAMIC, height, cols, 0);
+}
+NK_API void
+nk_layout_row_static(struct nk_context *ctx, float height, int item_width, int cols)
+{
+ nk_row_layout(ctx, NK_STATIC, height, cols, item_width);
+}
+NK_API void
+nk_layout_row_begin(struct nk_context *ctx, enum nk_layout_format fmt,
+ float row_height, int cols)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ nk_panel_layout(ctx, win, row_height, cols);
+ if (fmt == NK_DYNAMIC)
+ layout->row.type = NK_LAYOUT_DYNAMIC_ROW;
+ else layout->row.type = NK_LAYOUT_STATIC_ROW;
+
+ layout->row.ratio = 0;
+ layout->row.filled = 0;
+ layout->row.item_width = 0;
+ layout->row.item_offset = 0;
+ layout->row.columns = cols;
+}
+NK_API void
+nk_layout_row_push(struct nk_context *ctx, float ratio_or_width)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_STATIC_ROW || layout->row.type == NK_LAYOUT_DYNAMIC_ROW);
+ if (layout->row.type != NK_LAYOUT_STATIC_ROW && layout->row.type != NK_LAYOUT_DYNAMIC_ROW)
+ return;
+
+ if (layout->row.type == NK_LAYOUT_DYNAMIC_ROW) {
+ float ratio = ratio_or_width;
+ if ((ratio + layout->row.filled) > 1.0f) return;
+ if (ratio > 0.0f)
+ layout->row.item_width = NK_SATURATE(ratio);
+ else layout->row.item_width = 1.0f - layout->row.filled;
+ } else layout->row.item_width = ratio_or_width;
+}
+NK_API void
+nk_layout_row_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_STATIC_ROW || layout->row.type == NK_LAYOUT_DYNAMIC_ROW);
+ if (layout->row.type != NK_LAYOUT_STATIC_ROW && layout->row.type != NK_LAYOUT_DYNAMIC_ROW)
+ return;
+ layout->row.item_width = 0;
+ layout->row.item_offset = 0;
+}
+NK_API void
+nk_layout_row(struct nk_context *ctx, enum nk_layout_format fmt,
+ float height, int cols, const float *ratio)
+{
+ int i;
+ int n_undef = 0;
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ nk_panel_layout(ctx, win, height, cols);
+ if (fmt == NK_DYNAMIC) {
+ /* calculate width of undefined widget ratios */
+ float r = 0;
+ layout->row.ratio = ratio;
+ for (i = 0; i < cols; ++i) {
+ if (ratio[i] < 0.0f)
+ n_undef++;
+ else r += ratio[i];
+ }
+ r = NK_SATURATE(1.0f - r);
+ layout->row.type = NK_LAYOUT_DYNAMIC;
+ layout->row.item_width = (r > 0 && n_undef > 0) ? (r / (float)n_undef):0;
+ } else {
+ layout->row.ratio = ratio;
+ layout->row.type = NK_LAYOUT_STATIC;
+ layout->row.item_width = 0;
+ layout->row.item_offset = 0;
+ }
+ layout->row.item_offset = 0;
+ layout->row.filled = 0;
+}
+NK_API void
+nk_layout_row_template_begin(struct nk_context *ctx, float height)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ nk_panel_layout(ctx, win, height, 1);
+ layout->row.type = NK_LAYOUT_TEMPLATE;
+ layout->row.columns = 0;
+ layout->row.ratio = 0;
+ layout->row.item_width = 0;
+ layout->row.item_height = 0;
+ layout->row.item_offset = 0;
+ layout->row.filled = 0;
+ layout->row.item.x = 0;
+ layout->row.item.y = 0;
+ layout->row.item.w = 0;
+ layout->row.item.h = 0;
+}
+NK_API void
+nk_layout_row_template_push_dynamic(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_TEMPLATE);
+ NK_ASSERT(layout->row.columns < NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS);
+ if (layout->row.type != NK_LAYOUT_TEMPLATE) return;
+ if (layout->row.columns >= NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS) return;
+ layout->row.templates[layout->row.columns++] = -1.0f;
+}
+NK_API void
+nk_layout_row_template_push_variable(struct nk_context *ctx, float min_width)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_TEMPLATE);
+ NK_ASSERT(layout->row.columns < NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS);
+ if (layout->row.type != NK_LAYOUT_TEMPLATE) return;
+ if (layout->row.columns >= NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS) return;
+ layout->row.templates[layout->row.columns++] = -min_width;
+}
+NK_API void
+nk_layout_row_template_push_static(struct nk_context *ctx, float width)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_TEMPLATE);
+ NK_ASSERT(layout->row.columns < NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS);
+ if (layout->row.type != NK_LAYOUT_TEMPLATE) return;
+ if (layout->row.columns >= NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS) return;
+ layout->row.templates[layout->row.columns++] = width;
+}
+NK_API void
+nk_layout_row_template_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ int i = 0;
+ int variable_count = 0;
+ int min_variable_count = 0;
+ float min_fixed_width = 0.0f;
+ float total_fixed_width = 0.0f;
+ float max_variable_width = 0.0f;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ NK_ASSERT(layout->row.type == NK_LAYOUT_TEMPLATE);
+ if (layout->row.type != NK_LAYOUT_TEMPLATE) return;
+ for (i = 0; i < layout->row.columns; ++i) {
+ float width = layout->row.templates[i];
+ if (width >= 0.0f) {
+ total_fixed_width += width;
+ min_fixed_width += width;
+ } else if (width < -1.0f) {
+ width = -width;
+ total_fixed_width += width;
+ max_variable_width = NK_MAX(max_variable_width, width);
+ variable_count++;
+ } else {
+ min_variable_count++;
+ variable_count++;
+ }
+ }
+ if (variable_count) {
+ float space = nk_layout_row_calculate_usable_space(&ctx->style, layout->type,
+ layout->bounds.w, layout->row.columns);
+ float var_width = (NK_MAX(space-min_fixed_width,0.0f)) / (float)variable_count;
+ int enough_space = var_width >= max_variable_width;
+ if (!enough_space)
+ var_width = (NK_MAX(space-total_fixed_width,0)) / (float)min_variable_count;
+ for (i = 0; i < layout->row.columns; ++i) {
+ float *width = &layout->row.templates[i];
+ *width = (*width >= 0.0f)? *width: (*width < -1.0f && !enough_space)? -(*width): var_width;
+ }
+ }
+}
+NK_API void
+nk_layout_space_begin(struct nk_context *ctx, enum nk_layout_format fmt,
+ float height, int widget_count)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ nk_panel_layout(ctx, win, height, widget_count);
+ if (fmt == NK_STATIC)
+ layout->row.type = NK_LAYOUT_STATIC_FREE;
+ else layout->row.type = NK_LAYOUT_DYNAMIC_FREE;
+
+ layout->row.ratio = 0;
+ layout->row.filled = 0;
+ layout->row.item_width = 0;
+ layout->row.item_offset = 0;
+}
+NK_API void
+nk_layout_space_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ layout->row.item_width = 0;
+ layout->row.item_height = 0;
+ layout->row.item_offset = 0;
+ nk_zero(&layout->row.item, sizeof(layout->row.item));
+}
+NK_API void
+nk_layout_space_push(struct nk_context *ctx, struct nk_rect rect)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ layout->row.item = rect;
+}
+NK_API struct nk_rect
+nk_layout_space_bounds(struct nk_context *ctx)
+{
+ struct nk_rect ret;
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x = layout->clip.x;
+ ret.y = layout->clip.y;
+ ret.w = layout->clip.w;
+ ret.h = layout->row.height;
+ return ret;
+}
+NK_API struct nk_rect
+nk_layout_widget_bounds(struct nk_context *ctx)
+{
+ struct nk_rect ret;
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x = layout->at_x;
+ ret.y = layout->at_y;
+ ret.w = layout->bounds.w - NK_MAX(layout->at_x - layout->bounds.x,0);
+ ret.h = layout->row.height;
+ return ret;
+}
+NK_API struct nk_vec2
+nk_layout_space_to_screen(struct nk_context *ctx, struct nk_vec2 ret)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x += layout->at_x - (float)*layout->offset_x;
+ ret.y += layout->at_y - (float)*layout->offset_y;
+ return ret;
+}
+NK_API struct nk_vec2
+nk_layout_space_to_local(struct nk_context *ctx, struct nk_vec2 ret)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x += -layout->at_x + (float)*layout->offset_x;
+ ret.y += -layout->at_y + (float)*layout->offset_y;
+ return ret;
+}
+NK_API struct nk_rect
+nk_layout_space_rect_to_screen(struct nk_context *ctx, struct nk_rect ret)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x += layout->at_x - (float)*layout->offset_x;
+ ret.y += layout->at_y - (float)*layout->offset_y;
+ return ret;
+}
+NK_API struct nk_rect
+nk_layout_space_rect_to_local(struct nk_context *ctx, struct nk_rect ret)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ win = ctx->current;
+ layout = win->layout;
+
+ ret.x += -layout->at_x + (float)*layout->offset_x;
+ ret.y += -layout->at_y + (float)*layout->offset_y;
+ return ret;
+}
+NK_LIB void
+nk_panel_alloc_row(const struct nk_context *ctx, struct nk_window *win)
+{
+ struct nk_panel *layout = win->layout;
+ struct nk_vec2 spacing = ctx->style.window.spacing;
+ const float row_height = layout->row.height - spacing.y;
+ nk_panel_layout(ctx, win, row_height, layout->row.columns);
+}
+NK_LIB void
+nk_layout_widget_space(struct nk_rect *bounds, const struct nk_context *ctx,
+ struct nk_window *win, int modify)
+{
+ struct nk_panel *layout;
+ const struct nk_style *style;
+
+ struct nk_vec2 spacing;
+ struct nk_vec2 padding;
+
+ float item_offset = 0;
+ float item_width = 0;
+ float item_spacing = 0;
+ float panel_space = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ style = &ctx->style;
+ NK_ASSERT(bounds);
+
+ spacing = style->window.spacing;
+ padding = nk_panel_get_padding(style, layout->type);
+ panel_space = nk_layout_row_calculate_usable_space(&ctx->style, layout->type,
+ layout->bounds.w, layout->row.columns);
+
+ #define NK_FRAC(x) (x - (int)x) /* will be used to remove fookin gaps */
+ /* calculate the width of one item inside the current layout space */
+ switch (layout->row.type) {
+ case NK_LAYOUT_DYNAMIC_FIXED: {
+ /* scaling fixed size widgets item width */
+ float w = NK_MAX(1.0f,panel_space) / (float)layout->row.columns;
+ item_offset = (float)layout->row.index * w;
+ item_width = w + NK_FRAC(item_offset);
+ item_spacing = (float)layout->row.index * spacing.x;
+ } break;
+ case NK_LAYOUT_DYNAMIC_ROW: {
+ /* scaling single ratio widget width */
+ float w = layout->row.item_width * panel_space;
+ item_offset = layout->row.item_offset;
+ item_width = w + NK_FRAC(item_offset);
+ item_spacing = 0;
+
+ if (modify) {
+ layout->row.item_offset += w + spacing.x;
+ layout->row.filled += layout->row.item_width;
+ layout->row.index = 0;
+ }
+ } break;
+ case NK_LAYOUT_DYNAMIC_FREE: {
+ /* panel width depended free widget placing */
+ bounds->x = layout->at_x + (layout->bounds.w * layout->row.item.x);
+ bounds->x -= (float)*layout->offset_x;
+ bounds->y = layout->at_y + (layout->row.height * layout->row.item.y);
+ bounds->y -= (float)*layout->offset_y;
+ bounds->w = layout->bounds.w * layout->row.item.w + NK_FRAC(bounds->x);
+ bounds->h = layout->row.height * layout->row.item.h + NK_FRAC(bounds->y);
+ return;
+ }
+ case NK_LAYOUT_DYNAMIC: {
+ /* scaling arrays of panel width ratios for every widget */
+ float ratio, w;
+ NK_ASSERT(layout->row.ratio);
+ ratio = (layout->row.ratio[layout->row.index] < 0) ?
+ layout->row.item_width : layout->row.ratio[layout->row.index];
+
+ w = (ratio * panel_space);
+ item_spacing = (float)layout->row.index * spacing.x;
+ item_offset = layout->row.item_offset;
+ item_width = w + NK_FRAC(item_offset);
+
+ if (modify) {
+ layout->row.item_offset += w;
+ layout->row.filled += ratio;
+ }
+ } break;
+ case NK_LAYOUT_STATIC_FIXED: {
+ /* non-scaling fixed widgets item width */
+ item_width = layout->row.item_width;
+ item_offset = (float)layout->row.index * item_width;
+ item_spacing = (float)layout->row.index * spacing.x;
+ } break;
+ case NK_LAYOUT_STATIC_ROW: {
+ /* scaling single ratio widget width */
+ item_width = layout->row.item_width;
+ item_offset = layout->row.item_offset;
+ item_spacing = (float)layout->row.index * spacing.x;
+ if (modify) layout->row.item_offset += item_width;
+ } break;
+ case NK_LAYOUT_STATIC_FREE: {
+ /* free widget placing */
+ bounds->x = layout->at_x + layout->row.item.x;
+ bounds->w = layout->row.item.w;
+ if (((bounds->x + bounds->w) > layout->max_x) && modify)
+ layout->max_x = (bounds->x + bounds->w);
+ bounds->x -= (float)*layout->offset_x;
+ bounds->y = layout->at_y + layout->row.item.y;
+ bounds->y -= (float)*layout->offset_y;
+ bounds->h = layout->row.item.h;
+ return;
+ }
+ case NK_LAYOUT_STATIC: {
+ /* non-scaling array of panel pixel width for every widget */
+ item_spacing = (float)layout->row.index * spacing.x;
+ item_width = layout->row.ratio[layout->row.index];
+ item_offset = layout->row.item_offset;
+ if (modify) layout->row.item_offset += item_width;
+ } break;
+ case NK_LAYOUT_TEMPLATE: {
+ /* stretchy row layout with combined dynamic/static widget width*/
+ float w;
+ NK_ASSERT(layout->row.index < layout->row.columns);
+ NK_ASSERT(layout->row.index < NK_MAX_LAYOUT_ROW_TEMPLATE_COLUMNS);
+ w = layout->row.templates[layout->row.index];
+ item_offset = layout->row.item_offset;
+ item_width = w + NK_FRAC(item_offset);
+ item_spacing = (float)layout->row.index * spacing.x;
+ if (modify) layout->row.item_offset += w;
+ } break;
+ #undef NK_FRAC
+ default: NK_ASSERT(0); break;
+ };
+
+ /* set the bounds of the newly allocated widget */
+ bounds->w = item_width;
+ bounds->h = layout->row.height - spacing.y;
+ bounds->y = layout->at_y - (float)*layout->offset_y;
+ bounds->x = layout->at_x + item_offset + item_spacing + padding.x;
+ if (((bounds->x + bounds->w) > layout->max_x) && modify)
+ layout->max_x = bounds->x + bounds->w;
+ bounds->x -= (float)*layout->offset_x;
+}
+NK_LIB void
+nk_panel_alloc_space(struct nk_rect *bounds, const struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ /* check if the end of the row has been hit and begin new row if so */
+ win = ctx->current;
+ layout = win->layout;
+ if (layout->row.index >= layout->row.columns)
+ nk_panel_alloc_row(ctx, win);
+
+ /* calculate widget position and size */
+ nk_layout_widget_space(bounds, ctx, win, nk_true);
+ layout->row.index++;
+}
+NK_LIB void
+nk_layout_peek(struct nk_rect *bounds, struct nk_context *ctx)
+{
+ float y;
+ int index;
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ y = layout->at_y;
+ index = layout->row.index;
+ if (layout->row.index >= layout->row.columns) {
+ layout->at_y += layout->row.height;
+ layout->row.index = 0;
+ }
+ nk_layout_widget_space(bounds, ctx, win, nk_false);
+ if (!layout->row.index) {
+ bounds->x -= layout->row.item_offset;
+ }
+ layout->at_y = y;
+ layout->row.index = index;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TREE
+ *
+ * ===============================================================*/
+NK_INTERN int
+nk_tree_state_base(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image *img, const char *title, enum nk_collapse_states *state)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_command_buffer *out;
+ const struct nk_input *in;
+ const struct nk_style_button *button;
+ enum nk_symbol_type symbol;
+ float row_height;
+
+ struct nk_vec2 item_spacing;
+ struct nk_rect header = {0,0,0,0};
+ struct nk_rect sym = {0,0,0,0};
+ struct nk_text text;
+
+ nk_flags ws = 0;
+ enum nk_widget_layout_states widget_state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ /* cache some data */
+ win = ctx->current;
+ layout = win->layout;
+ out = &win->buffer;
+ style = &ctx->style;
+ item_spacing = style->window.spacing;
+
+ /* calculate header bounds and draw background */
+ row_height = style->font->height + 2 * style->tab.padding.y;
+ nk_layout_set_min_row_height(ctx, row_height);
+ nk_layout_row_dynamic(ctx, row_height, 1);
+ nk_layout_reset_min_row_height(ctx);
+
+ widget_state = nk_widget(&header, ctx);
+ if (type == NK_TREE_TAB) {
+ const struct nk_style_item *background = &style->tab.background;
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, header, &background->data.image, nk_white);
+ text.background = nk_rgba(0,0,0,0);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(out, header, 0, style->tab.border_color);
+ nk_fill_rect(out, nk_shrink_rect(header, style->tab.border),
+ style->tab.rounding, background->data.color);
+ }
+ } else text.background = style->window.background;
+
+ /* update node state */
+ in = (!(layout->flags & NK_WINDOW_ROM)) ? &ctx->input: 0;
+ in = (in && widget_state == NK_WIDGET_VALID) ? &ctx->input : 0;
+ if (nk_button_behavior(&ws, header, in, NK_BUTTON_DEFAULT))
+ *state = (*state == NK_MAXIMIZED) ? NK_MINIMIZED : NK_MAXIMIZED;
+
+ /* select correct button style */
+ if (*state == NK_MAXIMIZED) {
+ symbol = style->tab.sym_maximize;
+ if (type == NK_TREE_TAB)
+ button = &style->tab.tab_maximize_button;
+ else button = &style->tab.node_maximize_button;
+ } else {
+ symbol = style->tab.sym_minimize;
+ if (type == NK_TREE_TAB)
+ button = &style->tab.tab_minimize_button;
+ else button = &style->tab.node_minimize_button;
+ }
+
+ {/* draw triangle button */
+ sym.w = sym.h = style->font->height;
+ sym.y = header.y + style->tab.padding.y;
+ sym.x = header.x + style->tab.padding.x;
+ nk_do_button_symbol(&ws, &win->buffer, sym, symbol, NK_BUTTON_DEFAULT,
+ button, 0, style->font);
+
+ if (img) {
+ /* draw optional image icon */
+ sym.x = sym.x + sym.w + 4 * item_spacing.x;
+ nk_draw_image(&win->buffer, sym, img, nk_white);
+ sym.w = style->font->height + style->tab.spacing.x;}
+ }
+
+ {/* draw label */
+ struct nk_rect label;
+ header.w = NK_MAX(header.w, sym.w + item_spacing.x);
+ label.x = sym.x + sym.w + item_spacing.x;
+ label.y = sym.y;
+ label.w = header.w - (sym.w + item_spacing.y + style->tab.indent);
+ label.h = style->font->height;
+ text.text = style->tab.text;
+ text.padding = nk_vec2(0,0);
+ nk_widget_text(out, label, title, nk_strlen(title), &text,
+ NK_TEXT_LEFT, style->font);}
+
+ /* increase x-axis cursor widget position pointer */
+ if (*state == NK_MAXIMIZED) {
+ layout->at_x = header.x + (float)*layout->offset_x + style->tab.indent;
+ layout->bounds.w = NK_MAX(layout->bounds.w, style->tab.indent);
+ layout->bounds.w -= (style->tab.indent + style->window.padding.x);
+ layout->row.tree_depth++;
+ return nk_true;
+ } else return nk_false;
+}
+NK_INTERN int
+nk_tree_base(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image *img, const char *title, enum nk_collapse_states initial_state,
+ const char *hash, int len, int line)
+{
+ struct nk_window *win = ctx->current;
+ int title_len = 0;
+ nk_hash tree_hash = 0;
+ nk_uint *state = 0;
+
+ /* retrieve tree state from internal widget state tables */
+ if (!hash) {
+ title_len = (int)nk_strlen(title);
+ tree_hash = nk_murmur_hash(title, (int)title_len, (nk_hash)line);
+ } else tree_hash = nk_murmur_hash(hash, len, (nk_hash)line);
+ state = nk_find_value(win, tree_hash);
+ if (!state) {
+ state = nk_add_value(ctx, win, tree_hash, 0);
+ *state = initial_state;
+ }
+ return nk_tree_state_base(ctx, type, img, title, (enum nk_collapse_states*)state);
+}
+NK_API int
+nk_tree_state_push(struct nk_context *ctx, enum nk_tree_type type,
+ const char *title, enum nk_collapse_states *state)
+{
+ return nk_tree_state_base(ctx, type, 0, title, state);
+}
+NK_API int
+nk_tree_state_image_push(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image img, const char *title, enum nk_collapse_states *state)
+{
+ return nk_tree_state_base(ctx, type, &img, title, state);
+}
+NK_API void
+nk_tree_state_pop(struct nk_context *ctx)
+{
+ struct nk_window *win = 0;
+ struct nk_panel *layout = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ layout->at_x -= ctx->style.tab.indent + ctx->style.window.padding.x;
+ layout->bounds.w += ctx->style.tab.indent + ctx->style.window.padding.x;
+ NK_ASSERT(layout->row.tree_depth);
+ layout->row.tree_depth--;
+}
+NK_API int
+nk_tree_push_hashed(struct nk_context *ctx, enum nk_tree_type type,
+ const char *title, enum nk_collapse_states initial_state,
+ const char *hash, int len, int line)
+{
+ return nk_tree_base(ctx, type, 0, title, initial_state, hash, len, line);
+}
+NK_API int
+nk_tree_image_push_hashed(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image img, const char *title, enum nk_collapse_states initial_state,
+ const char *hash, int len,int seed)
+{
+ return nk_tree_base(ctx, type, &img, title, initial_state, hash, len, seed);
+}
+NK_API void
+nk_tree_pop(struct nk_context *ctx)
+{
+ nk_tree_state_pop(ctx);
+}
+NK_INTERN int
+nk_tree_element_image_push_hashed_base(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image *img, const char *title, int title_len,
+ enum nk_collapse_states *state, int *selected)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_command_buffer *out;
+ const struct nk_input *in;
+ const struct nk_style_button *button;
+ enum nk_symbol_type symbol;
+ float row_height;
+ struct nk_vec2 padding;
+
+ int text_len;
+ float text_width;
+
+ struct nk_vec2 item_spacing;
+ struct nk_rect header = {0,0,0,0};
+ struct nk_rect sym = {0,0,0,0};
+ struct nk_text text;
+
+ nk_flags ws = 0;
+ enum nk_widget_layout_states widget_state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ /* cache some data */
+ win = ctx->current;
+ layout = win->layout;
+ out = &win->buffer;
+ style = &ctx->style;
+ item_spacing = style->window.spacing;
+ padding = style->selectable.padding;
+
+ /* calculate header bounds and draw background */
+ row_height = style->font->height + 2 * style->tab.padding.y;
+ nk_layout_set_min_row_height(ctx, row_height);
+ nk_layout_row_dynamic(ctx, row_height, 1);
+ nk_layout_reset_min_row_height(ctx);
+
+ widget_state = nk_widget(&header, ctx);
+ if (type == NK_TREE_TAB) {
+ const struct nk_style_item *background = &style->tab.background;
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, header, &background->data.image, nk_white);
+ text.background = nk_rgba(0,0,0,0);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(out, header, 0, style->tab.border_color);
+ nk_fill_rect(out, nk_shrink_rect(header, style->tab.border),
+ style->tab.rounding, background->data.color);
+ }
+ } else text.background = style->window.background;
+
+ in = (!(layout->flags & NK_WINDOW_ROM)) ? &ctx->input: 0;
+ in = (in && widget_state == NK_WIDGET_VALID) ? &ctx->input : 0;
+
+ /* select correct button style */
+ if (*state == NK_MAXIMIZED) {
+ symbol = style->tab.sym_maximize;
+ if (type == NK_TREE_TAB)
+ button = &style->tab.tab_maximize_button;
+ else button = &style->tab.node_maximize_button;
+ } else {
+ symbol = style->tab.sym_minimize;
+ if (type == NK_TREE_TAB)
+ button = &style->tab.tab_minimize_button;
+ else button = &style->tab.node_minimize_button;
+ }
+ {/* draw triangle button */
+ sym.w = sym.h = style->font->height;
+ sym.y = header.y + style->tab.padding.y;
+ sym.x = header.x + style->tab.padding.x;
+ if (nk_do_button_symbol(&ws, &win->buffer, sym, symbol, NK_BUTTON_DEFAULT, button, in, style->font))
+ *state = (*state == NK_MAXIMIZED) ? NK_MINIMIZED : NK_MAXIMIZED;}
+
+ /* draw label */
+ {nk_flags dummy = 0;
+ struct nk_rect label;
+ /* calculate size of the text and tooltip */
+ text_len = nk_strlen(title);
+ text_width = style->font->width(style->font->userdata, style->font->height, title, text_len);
+ text_width += (4 * padding.x);
+
+ header.w = NK_MAX(header.w, sym.w + item_spacing.x);
+ label.x = sym.x + sym.w + item_spacing.x;
+ label.y = sym.y;
+ label.w = NK_MIN(header.w - (sym.w + item_spacing.y + style->tab.indent), text_width);
+ label.h = style->font->height;
+
+ if (img) {
+ nk_do_selectable_image(&dummy, &win->buffer, label, title, title_len, NK_TEXT_LEFT,
+ selected, img, &style->selectable, in, style->font);
+ } else nk_do_selectable(&dummy, &win->buffer, label, title, title_len, NK_TEXT_LEFT,
+ selected, &style->selectable, in, style->font);
+ }
+ /* increase x-axis cursor widget position pointer */
+ if (*state == NK_MAXIMIZED) {
+ layout->at_x = header.x + (float)*layout->offset_x + style->tab.indent;
+ layout->bounds.w = NK_MAX(layout->bounds.w, style->tab.indent);
+ layout->bounds.w -= (style->tab.indent + style->window.padding.x);
+ layout->row.tree_depth++;
+ return nk_true;
+ } else return nk_false;
+}
+NK_INTERN int
+nk_tree_element_base(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image *img, const char *title, enum nk_collapse_states initial_state,
+ int *selected, const char *hash, int len, int line)
+{
+ struct nk_window *win = ctx->current;
+ int title_len = 0;
+ nk_hash tree_hash = 0;
+ nk_uint *state = 0;
+
+ /* retrieve tree state from internal widget state tables */
+ if (!hash) {
+ title_len = (int)nk_strlen(title);
+ tree_hash = nk_murmur_hash(title, (int)title_len, (nk_hash)line);
+ } else tree_hash = nk_murmur_hash(hash, len, (nk_hash)line);
+ state = nk_find_value(win, tree_hash);
+ if (!state) {
+ state = nk_add_value(ctx, win, tree_hash, 0);
+ *state = initial_state;
+ } return nk_tree_element_image_push_hashed_base(ctx, type, img, title,
+ nk_strlen(title), (enum nk_collapse_states*)state, selected);
+}
+NK_API int
+nk_tree_element_push_hashed(struct nk_context *ctx, enum nk_tree_type type,
+ const char *title, enum nk_collapse_states initial_state,
+ int *selected, const char *hash, int len, int seed)
+{
+ return nk_tree_element_base(ctx, type, 0, title, initial_state, selected, hash, len, seed);
+}
+NK_API int
+nk_tree_element_image_push_hashed(struct nk_context *ctx, enum nk_tree_type type,
+ struct nk_image img, const char *title, enum nk_collapse_states initial_state,
+ int *selected, const char *hash, int len,int seed)
+{
+ return nk_tree_element_base(ctx, type, &img, title, initial_state, selected, hash, len, seed);
+}
+NK_API void
+nk_tree_element_pop(struct nk_context *ctx)
+{
+ nk_tree_state_pop(ctx);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * GROUP
+ *
+ * ===============================================================*/
+NK_API int
+nk_group_scrolled_offset_begin(struct nk_context *ctx,
+ nk_uint *x_offset, nk_uint *y_offset, const char *title, nk_flags flags)
+{
+ struct nk_rect bounds;
+ struct nk_window panel;
+ struct nk_window *win;
+
+ win = ctx->current;
+ nk_panel_alloc_space(&bounds, ctx);
+ {const struct nk_rect *c = &win->layout->clip;
+ if (!NK_INTERSECT(c->x, c->y, c->w, c->h, bounds.x, bounds.y, bounds.w, bounds.h) &&
+ !(flags & NK_WINDOW_MOVABLE)) {
+ return 0;
+ }}
+ if (win->flags & NK_WINDOW_ROM)
+ flags |= NK_WINDOW_ROM;
+
+ /* initialize a fake window to create the panel from */
+ nk_zero(&panel, sizeof(panel));
+ panel.bounds = bounds;
+ panel.flags = flags;
+ panel.scrollbar.x = *x_offset;
+ panel.scrollbar.y = *y_offset;
+ panel.buffer = win->buffer;
+ panel.layout = (struct nk_panel*)nk_create_panel(ctx);
+ ctx->current = &panel;
+ nk_panel_begin(ctx, (flags & NK_WINDOW_TITLE) ? title: 0, NK_PANEL_GROUP);
+
+ win->buffer = panel.buffer;
+ win->buffer.clip = panel.layout->clip;
+ panel.layout->offset_x = x_offset;
+ panel.layout->offset_y = y_offset;
+ panel.layout->parent = win->layout;
+ win->layout = panel.layout;
+
+ ctx->current = win;
+ if ((panel.layout->flags & NK_WINDOW_CLOSED) ||
+ (panel.layout->flags & NK_WINDOW_MINIMIZED))
+ {
+ nk_flags f = panel.layout->flags;
+ nk_group_scrolled_end(ctx);
+ if (f & NK_WINDOW_CLOSED)
+ return NK_WINDOW_CLOSED;
+ if (f & NK_WINDOW_MINIMIZED)
+ return NK_WINDOW_MINIMIZED;
+ }
+ return 1;
+}
+NK_API void
+nk_group_scrolled_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_panel *parent;
+ struct nk_panel *g;
+
+ struct nk_rect clip;
+ struct nk_window pan;
+ struct nk_vec2 panel_padding;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return;
+
+ /* make sure nk_group_begin was called correctly */
+ NK_ASSERT(ctx->current);
+ win = ctx->current;
+ NK_ASSERT(win->layout);
+ g = win->layout;
+ NK_ASSERT(g->parent);
+ parent = g->parent;
+
+ /* dummy window */
+ nk_zero_struct(pan);
+ panel_padding = nk_panel_get_padding(&ctx->style, NK_PANEL_GROUP);
+ pan.bounds.y = g->bounds.y - (g->header_height + g->menu.h);
+ pan.bounds.x = g->bounds.x - panel_padding.x;
+ pan.bounds.w = g->bounds.w + 2 * panel_padding.x;
+ pan.bounds.h = g->bounds.h + g->header_height + g->menu.h;
+ if (g->flags & NK_WINDOW_BORDER) {
+ pan.bounds.x -= g->border;
+ pan.bounds.y -= g->border;
+ pan.bounds.w += 2*g->border;
+ pan.bounds.h += 2*g->border;
+ }
+ if (!(g->flags & NK_WINDOW_NO_SCROLLBAR)) {
+ pan.bounds.w += ctx->style.window.scrollbar_size.x;
+ pan.bounds.h += ctx->style.window.scrollbar_size.y;
+ }
+ pan.scrollbar.x = *g->offset_x;
+ pan.scrollbar.y = *g->offset_y;
+ pan.flags = g->flags;
+ pan.buffer = win->buffer;
+ pan.layout = g;
+ pan.parent = win;
+ ctx->current = &pan;
+
+ /* make sure group has correct clipping rectangle */
+ nk_unify(&clip, &parent->clip, pan.bounds.x, pan.bounds.y,
+ pan.bounds.x + pan.bounds.w, pan.bounds.y + pan.bounds.h + panel_padding.x);
+ nk_push_scissor(&pan.buffer, clip);
+ nk_end(ctx);
+
+ win->buffer = pan.buffer;
+ nk_push_scissor(&win->buffer, parent->clip);
+ ctx->current = win;
+ win->layout = parent;
+ g->bounds = pan.bounds;
+ return;
+}
+NK_API int
+nk_group_scrolled_begin(struct nk_context *ctx,
+ struct nk_scroll *scroll, const char *title, nk_flags flags)
+{
+ return nk_group_scrolled_offset_begin(ctx, &scroll->x, &scroll->y, title, flags);
+}
+NK_API int
+nk_group_begin_titled(struct nk_context *ctx, const char *id,
+ const char *title, nk_flags flags)
+{
+ int id_len;
+ nk_hash id_hash;
+ struct nk_window *win;
+ nk_uint *x_offset;
+ nk_uint *y_offset;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(id);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !id)
+ return 0;
+
+ /* find persistent group scrollbar value */
+ win = ctx->current;
+ id_len = (int)nk_strlen(id);
+ id_hash = nk_murmur_hash(id, (int)id_len, NK_PANEL_GROUP);
+ x_offset = nk_find_value(win, id_hash);
+ if (!x_offset) {
+ x_offset = nk_add_value(ctx, win, id_hash, 0);
+ y_offset = nk_add_value(ctx, win, id_hash+1, 0);
+
+ NK_ASSERT(x_offset);
+ NK_ASSERT(y_offset);
+ if (!x_offset || !y_offset) return 0;
+ *x_offset = *y_offset = 0;
+ } else y_offset = nk_find_value(win, id_hash+1);
+ return nk_group_scrolled_offset_begin(ctx, x_offset, y_offset, title, flags);
+}
+NK_API int
+nk_group_begin(struct nk_context *ctx, const char *title, nk_flags flags)
+{
+ return nk_group_begin_titled(ctx, title, title, flags);
+}
+NK_API void
+nk_group_end(struct nk_context *ctx)
+{
+ nk_group_scrolled_end(ctx);
+}
+NK_API void
+nk_group_get_scroll(struct nk_context *ctx, const char *id, nk_uint *x_offset, nk_uint *y_offset)
+{
+ int id_len;
+ nk_hash id_hash;
+ struct nk_window *win;
+ nk_uint *x_offset_ptr;
+ nk_uint *y_offset_ptr;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(id);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !id)
+ return;
+
+ /* find persistent group scrollbar value */
+ win = ctx->current;
+ id_len = (int)nk_strlen(id);
+ id_hash = nk_murmur_hash(id, (int)id_len, NK_PANEL_GROUP);
+ x_offset_ptr = nk_find_value(win, id_hash);
+ if (!x_offset_ptr) {
+ x_offset_ptr = nk_add_value(ctx, win, id_hash, 0);
+ y_offset_ptr = nk_add_value(ctx, win, id_hash+1, 0);
+
+ NK_ASSERT(x_offset_ptr);
+ NK_ASSERT(y_offset_ptr);
+ if (!x_offset_ptr || !y_offset_ptr) return;
+ *x_offset_ptr = *y_offset_ptr = 0;
+ } else y_offset_ptr = nk_find_value(win, id_hash+1);
+ if (x_offset)
+ *x_offset = *x_offset_ptr;
+ if (y_offset)
+ *y_offset = *y_offset_ptr;
+}
+NK_API void
+nk_group_set_scroll(struct nk_context *ctx, const char *id, nk_uint x_offset, nk_uint y_offset)
+{
+ int id_len;
+ nk_hash id_hash;
+ struct nk_window *win;
+ nk_uint *x_offset_ptr;
+ nk_uint *y_offset_ptr;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(id);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !id)
+ return;
+
+ /* find persistent group scrollbar value */
+ win = ctx->current;
+ id_len = (int)nk_strlen(id);
+ id_hash = nk_murmur_hash(id, (int)id_len, NK_PANEL_GROUP);
+ x_offset_ptr = nk_find_value(win, id_hash);
+ if (!x_offset_ptr) {
+ x_offset_ptr = nk_add_value(ctx, win, id_hash, 0);
+ y_offset_ptr = nk_add_value(ctx, win, id_hash+1, 0);
+
+ NK_ASSERT(x_offset_ptr);
+ NK_ASSERT(y_offset_ptr);
+ if (!x_offset_ptr || !y_offset_ptr) return;
+ *x_offset_ptr = *y_offset_ptr = 0;
+ } else y_offset_ptr = nk_find_value(win, id_hash+1);
+ *x_offset_ptr = x_offset;
+ *y_offset_ptr = y_offset;
+}
+
+
+
+
+/* ===============================================================
+ *
+ * LIST VIEW
+ *
+ * ===============================================================*/
+NK_API int
+nk_list_view_begin(struct nk_context *ctx, struct nk_list_view *view,
+ const char *title, nk_flags flags, int row_height, int row_count)
+{
+ int title_len;
+ nk_hash title_hash;
+ nk_uint *x_offset;
+ nk_uint *y_offset;
+
+ int result;
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_vec2 item_spacing;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(view);
+ NK_ASSERT(title);
+ if (!ctx || !view || !title) return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ item_spacing = style->window.spacing;
+ row_height += NK_MAX(0, (int)item_spacing.y);
+
+ /* find persistent list view scrollbar offset */
+ title_len = (int)nk_strlen(title);
+ title_hash = nk_murmur_hash(title, (int)title_len, NK_PANEL_GROUP);
+ x_offset = nk_find_value(win, title_hash);
+ if (!x_offset) {
+ x_offset = nk_add_value(ctx, win, title_hash, 0);
+ y_offset = nk_add_value(ctx, win, title_hash+1, 0);
+
+ NK_ASSERT(x_offset);
+ NK_ASSERT(y_offset);
+ if (!x_offset || !y_offset) return 0;
+ *x_offset = *y_offset = 0;
+ } else y_offset = nk_find_value(win, title_hash+1);
+ view->scroll_value = *y_offset;
+ view->scroll_pointer = y_offset;
+
+ *y_offset = 0;
+ result = nk_group_scrolled_offset_begin(ctx, x_offset, y_offset, title, flags);
+ win = ctx->current;
+ layout = win->layout;
+
+ view->total_height = row_height * NK_MAX(row_count,1);
+ view->begin = (int)NK_MAX(((float)view->scroll_value / (float)row_height), 0.0f);
+ view->count = (int)NK_MAX(nk_iceilf((layout->clip.h)/(float)row_height),0);
+ view->count = NK_MIN(view->count, row_count - view->begin);
+ view->end = view->begin + view->count;
+ view->ctx = ctx;
+ return result;
+}
+NK_API void
+nk_list_view_end(struct nk_list_view *view)
+{
+ struct nk_context *ctx;
+ struct nk_window *win;
+ struct nk_panel *layout;
+
+ NK_ASSERT(view);
+ NK_ASSERT(view->ctx);
+ NK_ASSERT(view->scroll_pointer);
+ if (!view || !view->ctx) return;
+
+ ctx = view->ctx;
+ win = ctx->current;
+ layout = win->layout;
+ layout->at_y = layout->bounds.y + (float)view->total_height;
+ *view->scroll_pointer = *view->scroll_pointer + view->scroll_value;
+ nk_group_end(view->ctx);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * WIDGET
+ *
+ * ===============================================================*/
+NK_API struct nk_rect
+nk_widget_bounds(struct nk_context *ctx)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return nk_rect(0,0,0,0);
+ nk_layout_peek(&bounds, ctx);
+ return bounds;
+}
+NK_API struct nk_vec2
+nk_widget_position(struct nk_context *ctx)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return nk_vec2(0,0);
+
+ nk_layout_peek(&bounds, ctx);
+ return nk_vec2(bounds.x, bounds.y);
+}
+NK_API struct nk_vec2
+nk_widget_size(struct nk_context *ctx)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return nk_vec2(0,0);
+
+ nk_layout_peek(&bounds, ctx);
+ return nk_vec2(bounds.w, bounds.h);
+}
+NK_API float
+nk_widget_width(struct nk_context *ctx)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return 0;
+
+ nk_layout_peek(&bounds, ctx);
+ return bounds.w;
+}
+NK_API float
+nk_widget_height(struct nk_context *ctx)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return 0;
+
+ nk_layout_peek(&bounds, ctx);
+ return bounds.h;
+}
+NK_API int
+nk_widget_is_hovered(struct nk_context *ctx)
+{
+ struct nk_rect c, v;
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current || ctx->active != ctx->current)
+ return 0;
+
+ c = ctx->current->layout->clip;
+ c.x = (float)((int)c.x);
+ c.y = (float)((int)c.y);
+ c.w = (float)((int)c.w);
+ c.h = (float)((int)c.h);
+
+ nk_layout_peek(&bounds, ctx);
+ nk_unify(&v, &c, bounds.x, bounds.y, bounds.x + bounds.w, bounds.y + bounds.h);
+ if (!NK_INTERSECT(c.x, c.y, c.w, c.h, bounds.x, bounds.y, bounds.w, bounds.h))
+ return 0;
+ return nk_input_is_mouse_hovering_rect(&ctx->input, bounds);
+}
+NK_API int
+nk_widget_is_mouse_clicked(struct nk_context *ctx, enum nk_buttons btn)
+{
+ struct nk_rect c, v;
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current || ctx->active != ctx->current)
+ return 0;
+
+ c = ctx->current->layout->clip;
+ c.x = (float)((int)c.x);
+ c.y = (float)((int)c.y);
+ c.w = (float)((int)c.w);
+ c.h = (float)((int)c.h);
+
+ nk_layout_peek(&bounds, ctx);
+ nk_unify(&v, &c, bounds.x, bounds.y, bounds.x + bounds.w, bounds.y + bounds.h);
+ if (!NK_INTERSECT(c.x, c.y, c.w, c.h, bounds.x, bounds.y, bounds.w, bounds.h))
+ return 0;
+ return nk_input_mouse_clicked(&ctx->input, btn, bounds);
+}
+NK_API int
+nk_widget_has_mouse_click_down(struct nk_context *ctx, enum nk_buttons btn, int down)
+{
+ struct nk_rect c, v;
+ struct nk_rect bounds;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current || ctx->active != ctx->current)
+ return 0;
+
+ c = ctx->current->layout->clip;
+ c.x = (float)((int)c.x);
+ c.y = (float)((int)c.y);
+ c.w = (float)((int)c.w);
+ c.h = (float)((int)c.h);
+
+ nk_layout_peek(&bounds, ctx);
+ nk_unify(&v, &c, bounds.x, bounds.y, bounds.x + bounds.w, bounds.y + bounds.h);
+ if (!NK_INTERSECT(c.x, c.y, c.w, c.h, bounds.x, bounds.y, bounds.w, bounds.h))
+ return 0;
+ return nk_input_has_mouse_click_down_in_rect(&ctx->input, btn, bounds, down);
+}
+NK_API enum nk_widget_layout_states
+nk_widget(struct nk_rect *bounds, const struct nk_context *ctx)
+{
+ struct nk_rect c, v;
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return NK_WIDGET_INVALID;
+
+ /* allocate space and check if the widget needs to be updated and drawn */
+ nk_panel_alloc_space(bounds, ctx);
+ win = ctx->current;
+ layout = win->layout;
+ in = &ctx->input;
+ c = layout->clip;
+
+ /* if one of these triggers you forgot to add an `if` condition around either
+ a window, group, popup, combobox or contextual menu `begin` and `end` block.
+ Example:
+ if (nk_begin(...) {...} nk_end(...); or
+ if (nk_group_begin(...) { nk_group_end(...);} */
+ NK_ASSERT(!(layout->flags & NK_WINDOW_MINIMIZED));
+ NK_ASSERT(!(layout->flags & NK_WINDOW_HIDDEN));
+ NK_ASSERT(!(layout->flags & NK_WINDOW_CLOSED));
+
+ /* need to convert to int here to remove floating point errors */
+ bounds->x = (float)((int)bounds->x);
+ bounds->y = (float)((int)bounds->y);
+ bounds->w = (float)((int)bounds->w);
+ bounds->h = (float)((int)bounds->h);
+
+ c.x = (float)((int)c.x);
+ c.y = (float)((int)c.y);
+ c.w = (float)((int)c.w);
+ c.h = (float)((int)c.h);
+
+ nk_unify(&v, &c, bounds->x, bounds->y, bounds->x + bounds->w, bounds->y + bounds->h);
+ if (!NK_INTERSECT(c.x, c.y, c.w, c.h, bounds->x, bounds->y, bounds->w, bounds->h))
+ return NK_WIDGET_INVALID;
+ if (!NK_INBOX(in->mouse.pos.x, in->mouse.pos.y, v.x, v.y, v.w, v.h))
+ return NK_WIDGET_ROM;
+ return NK_WIDGET_VALID;
+}
+NK_API enum nk_widget_layout_states
+nk_widget_fitting(struct nk_rect *bounds, struct nk_context *ctx,
+ struct nk_vec2 item_padding)
+{
+ /* update the bounds to stand without padding */
+ struct nk_window *win;
+ struct nk_style *style;
+ struct nk_panel *layout;
+ enum nk_widget_layout_states state;
+ struct nk_vec2 panel_padding;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return NK_WIDGET_INVALID;
+
+ win = ctx->current;
+ style = &ctx->style;
+ layout = win->layout;
+ state = nk_widget(bounds, ctx);
+
+ panel_padding = nk_panel_get_padding(style, layout->type);
+ if (layout->row.index == 1) {
+ bounds->w += panel_padding.x;
+ bounds->x -= panel_padding.x;
+ } else bounds->x -= item_padding.x;
+
+ if (layout->row.index == layout->row.columns)
+ bounds->w += panel_padding.x;
+ else bounds->w += item_padding.x;
+ return state;
+}
+NK_API void
+nk_spacing(struct nk_context *ctx, int cols)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ struct nk_rect none;
+ int i, index, rows;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ /* spacing over row boundaries */
+ win = ctx->current;
+ layout = win->layout;
+ index = (layout->row.index + cols) % layout->row.columns;
+ rows = (layout->row.index + cols) / layout->row.columns;
+ if (rows) {
+ for (i = 0; i < rows; ++i)
+ nk_panel_alloc_row(ctx, win);
+ cols = index;
+ }
+ /* non table layout need to allocate space */
+ if (layout->row.type != NK_LAYOUT_DYNAMIC_FIXED &&
+ layout->row.type != NK_LAYOUT_STATIC_FIXED) {
+ for (i = 0; i < cols; ++i)
+ nk_panel_alloc_space(&none, ctx);
+ } layout->row.index = index;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TEXT
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_widget_text(struct nk_command_buffer *o, struct nk_rect b,
+ const char *string, int len, const struct nk_text *t,
+ nk_flags a, const struct nk_user_font *f)
+{
+ struct nk_rect label;
+ float text_width;
+
+ NK_ASSERT(o);
+ NK_ASSERT(t);
+ if (!o || !t) return;
+
+ b.h = NK_MAX(b.h, 2 * t->padding.y);
+ label.x = 0; label.w = 0;
+ label.y = b.y + t->padding.y;
+ label.h = NK_MIN(f->height, b.h - 2 * t->padding.y);
+
+ text_width = f->width(f->userdata, f->height, (const char*)string, len);
+ text_width += (2.0f * t->padding.x);
+
+ /* align in x-axis */
+ if (a & NK_TEXT_ALIGN_LEFT) {
+ label.x = b.x + t->padding.x;
+ label.w = NK_MAX(0, b.w - 2 * t->padding.x);
+ } else if (a & NK_TEXT_ALIGN_CENTERED) {
+ label.w = NK_MAX(1, 2 * t->padding.x + (float)text_width);
+ label.x = (b.x + t->padding.x + ((b.w - 2 * t->padding.x) - label.w) / 2);
+ label.x = NK_MAX(b.x + t->padding.x, label.x);
+ label.w = NK_MIN(b.x + b.w, label.x + label.w);
+ if (label.w >= label.x) label.w -= label.x;
+ } else if (a & NK_TEXT_ALIGN_RIGHT) {
+ label.x = NK_MAX(b.x + t->padding.x, (b.x + b.w) - (2 * t->padding.x + (float)text_width));
+ label.w = (float)text_width + 2 * t->padding.x;
+ } else return;
+
+ /* align in y-axis */
+ if (a & NK_TEXT_ALIGN_MIDDLE) {
+ label.y = b.y + b.h/2.0f - (float)f->height/2.0f;
+ label.h = NK_MAX(b.h/2.0f, b.h - (b.h/2.0f + f->height/2.0f));
+ } else if (a & NK_TEXT_ALIGN_BOTTOM) {
+ label.y = b.y + b.h - f->height;
+ label.h = f->height;
+ }
+ nk_draw_text(o, label, (const char*)string, len, f, t->background, t->text);
+}
+NK_LIB void
+nk_widget_text_wrap(struct nk_command_buffer *o, struct nk_rect b,
+ const char *string, int len, const struct nk_text *t,
+ const struct nk_user_font *f)
+{
+ float width;
+ int glyphs = 0;
+ int fitting = 0;
+ int done = 0;
+ struct nk_rect line;
+ struct nk_text text;
+ NK_INTERN nk_rune seperator[] = {' '};
+
+ NK_ASSERT(o);
+ NK_ASSERT(t);
+ if (!o || !t) return;
+
+ text.padding = nk_vec2(0,0);
+ text.background = t->background;
+ text.text = t->text;
+
+ b.w = NK_MAX(b.w, 2 * t->padding.x);
+ b.h = NK_MAX(b.h, 2 * t->padding.y);
+ b.h = b.h - 2 * t->padding.y;
+
+ line.x = b.x + t->padding.x;
+ line.y = b.y + t->padding.y;
+ line.w = b.w - 2 * t->padding.x;
+ line.h = 2 * t->padding.y + f->height;
+
+ fitting = nk_text_clamp(f, string, len, line.w, &glyphs, &width, seperator,NK_LEN(seperator));
+ while (done < len) {
+ if (!fitting || line.y + line.h >= (b.y + b.h)) break;
+ nk_widget_text(o, line, &string[done], fitting, &text, NK_TEXT_LEFT, f);
+ done += fitting;
+ line.y += f->height + 2 * t->padding.y;
+ fitting = nk_text_clamp(f, &string[done], len - done, line.w, &glyphs, &width, seperator,NK_LEN(seperator));
+ }
+}
+NK_API void
+nk_text_colored(struct nk_context *ctx, const char *str, int len,
+ nk_flags alignment, struct nk_color color)
+{
+ struct nk_window *win;
+ const struct nk_style *style;
+
+ struct nk_vec2 item_padding;
+ struct nk_rect bounds;
+ struct nk_text text;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+
+ win = ctx->current;
+ style = &ctx->style;
+ nk_panel_alloc_space(&bounds, ctx);
+ item_padding = style->text.padding;
+
+ text.padding.x = item_padding.x;
+ text.padding.y = item_padding.y;
+ text.background = style->window.background;
+ text.text = color;
+ nk_widget_text(&win->buffer, bounds, str, len, &text, alignment, style->font);
+}
+NK_API void
+nk_text_wrap_colored(struct nk_context *ctx, const char *str,
+ int len, struct nk_color color)
+{
+ struct nk_window *win;
+ const struct nk_style *style;
+
+ struct nk_vec2 item_padding;
+ struct nk_rect bounds;
+ struct nk_text text;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+
+ win = ctx->current;
+ style = &ctx->style;
+ nk_panel_alloc_space(&bounds, ctx);
+ item_padding = style->text.padding;
+
+ text.padding.x = item_padding.x;
+ text.padding.y = item_padding.y;
+ text.background = style->window.background;
+ text.text = color;
+ nk_widget_text_wrap(&win->buffer, bounds, str, len, &text, style->font);
+}
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+NK_API void
+nk_labelf_colored(struct nk_context *ctx, nk_flags flags,
+ struct nk_color color, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ nk_labelfv_colored(ctx, flags, color, fmt, args);
+ va_end(args);
+}
+NK_API void
+nk_labelf_colored_wrap(struct nk_context *ctx, struct nk_color color,
+ const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ nk_labelfv_colored_wrap(ctx, color, fmt, args);
+ va_end(args);
+}
+NK_API void
+nk_labelf(struct nk_context *ctx, nk_flags flags, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ nk_labelfv(ctx, flags, fmt, args);
+ va_end(args);
+}
+NK_API void
+nk_labelf_wrap(struct nk_context *ctx, const char *fmt,...)
+{
+ va_list args;
+ va_start(args, fmt);
+ nk_labelfv_wrap(ctx, fmt, args);
+ va_end(args);
+}
+NK_API void
+nk_labelfv_colored(struct nk_context *ctx, nk_flags flags,
+ struct nk_color color, const char *fmt, va_list args)
+{
+ char buf[256];
+ nk_strfmt(buf, NK_LEN(buf), fmt, args);
+ nk_label_colored(ctx, buf, flags, color);
+}
+
+NK_API void
+nk_labelfv_colored_wrap(struct nk_context *ctx, struct nk_color color,
+ const char *fmt, va_list args)
+{
+ char buf[256];
+ nk_strfmt(buf, NK_LEN(buf), fmt, args);
+ nk_label_colored_wrap(ctx, buf, color);
+}
+
+NK_API void
+nk_labelfv(struct nk_context *ctx, nk_flags flags, const char *fmt, va_list args)
+{
+ char buf[256];
+ nk_strfmt(buf, NK_LEN(buf), fmt, args);
+ nk_label(ctx, buf, flags);
+}
+
+NK_API void
+nk_labelfv_wrap(struct nk_context *ctx, const char *fmt, va_list args)
+{
+ char buf[256];
+ nk_strfmt(buf, NK_LEN(buf), fmt, args);
+ nk_label_wrap(ctx, buf);
+}
+
+NK_API void
+nk_value_bool(struct nk_context *ctx, const char *prefix, int value)
+{
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: %s", prefix, ((value) ? "true": "false"));
+}
+NK_API void
+nk_value_int(struct nk_context *ctx, const char *prefix, int value)
+{
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: %d", prefix, value);
+}
+NK_API void
+nk_value_uint(struct nk_context *ctx, const char *prefix, unsigned int value)
+{
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: %u", prefix, value);
+}
+NK_API void
+nk_value_float(struct nk_context *ctx, const char *prefix, float value)
+{
+ double double_value = (double)value;
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: %.3f", prefix, double_value);
+}
+NK_API void
+nk_value_color_byte(struct nk_context *ctx, const char *p, struct nk_color c)
+{
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: (%d, %d, %d, %d)", p, c.r, c.g, c.b, c.a);
+}
+NK_API void
+nk_value_color_float(struct nk_context *ctx, const char *p, struct nk_color color)
+{
+ double c[4]; nk_color_dv(c, color);
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: (%.2f, %.2f, %.2f, %.2f)",
+ p, c[0], c[1], c[2], c[3]);
+}
+NK_API void
+nk_value_color_hex(struct nk_context *ctx, const char *prefix, struct nk_color color)
+{
+ char hex[16];
+ nk_color_hex_rgba(hex, color);
+ nk_labelf(ctx, NK_TEXT_LEFT, "%s: %s", prefix, hex);
+}
+#endif
+NK_API void
+nk_text(struct nk_context *ctx, const char *str, int len, nk_flags alignment)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ nk_text_colored(ctx, str, len, alignment, ctx->style.text.color);
+}
+NK_API void
+nk_text_wrap(struct nk_context *ctx, const char *str, int len)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ nk_text_wrap_colored(ctx, str, len, ctx->style.text.color);
+}
+NK_API void
+nk_label(struct nk_context *ctx, const char *str, nk_flags alignment)
+{
+ nk_text(ctx, str, nk_strlen(str), alignment);
+}
+NK_API void
+nk_label_colored(struct nk_context *ctx, const char *str, nk_flags align,
+ struct nk_color color)
+{
+ nk_text_colored(ctx, str, nk_strlen(str), align, color);
+}
+NK_API void
+nk_label_wrap(struct nk_context *ctx, const char *str)
+{
+ nk_text_wrap(ctx, str, nk_strlen(str));
+}
+NK_API void
+nk_label_colored_wrap(struct nk_context *ctx, const char *str, struct nk_color color)
+{
+ nk_text_wrap_colored(ctx, str, nk_strlen(str), color);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * IMAGE
+ *
+ * ===============================================================*/
+NK_API nk_handle
+nk_handle_ptr(void *ptr)
+{
+ nk_handle handle = {0};
+ handle.ptr = ptr;
+ return handle;
+}
+NK_API nk_handle
+nk_handle_id(int id)
+{
+ nk_handle handle;
+ nk_zero_struct(handle);
+ handle.id = id;
+ return handle;
+}
+NK_API struct nk_image
+nk_subimage_ptr(void *ptr, unsigned short w, unsigned short h, struct nk_rect r)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ s.handle.ptr = ptr;
+ s.w = w; s.h = h;
+ s.region[0] = (unsigned short)r.x;
+ s.region[1] = (unsigned short)r.y;
+ s.region[2] = (unsigned short)r.w;
+ s.region[3] = (unsigned short)r.h;
+ return s;
+}
+NK_API struct nk_image
+nk_subimage_id(int id, unsigned short w, unsigned short h, struct nk_rect r)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ s.handle.id = id;
+ s.w = w; s.h = h;
+ s.region[0] = (unsigned short)r.x;
+ s.region[1] = (unsigned short)r.y;
+ s.region[2] = (unsigned short)r.w;
+ s.region[3] = (unsigned short)r.h;
+ return s;
+}
+NK_API struct nk_image
+nk_subimage_handle(nk_handle handle, unsigned short w, unsigned short h,
+ struct nk_rect r)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ s.handle = handle;
+ s.w = w; s.h = h;
+ s.region[0] = (unsigned short)r.x;
+ s.region[1] = (unsigned short)r.y;
+ s.region[2] = (unsigned short)r.w;
+ s.region[3] = (unsigned short)r.h;
+ return s;
+}
+NK_API struct nk_image
+nk_image_handle(nk_handle handle)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ s.handle = handle;
+ s.w = 0; s.h = 0;
+ s.region[0] = 0;
+ s.region[1] = 0;
+ s.region[2] = 0;
+ s.region[3] = 0;
+ return s;
+}
+NK_API struct nk_image
+nk_image_ptr(void *ptr)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ NK_ASSERT(ptr);
+ s.handle.ptr = ptr;
+ s.w = 0; s.h = 0;
+ s.region[0] = 0;
+ s.region[1] = 0;
+ s.region[2] = 0;
+ s.region[3] = 0;
+ return s;
+}
+NK_API struct nk_image
+nk_image_id(int id)
+{
+ struct nk_image s;
+ nk_zero(&s, sizeof(s));
+ s.handle.id = id;
+ s.w = 0; s.h = 0;
+ s.region[0] = 0;
+ s.region[1] = 0;
+ s.region[2] = 0;
+ s.region[3] = 0;
+ return s;
+}
+NK_API int
+nk_image_is_subimage(const struct nk_image* img)
+{
+ NK_ASSERT(img);
+ return !(img->w == 0 && img->h == 0);
+}
+NK_API void
+nk_image(struct nk_context *ctx, struct nk_image img)
+{
+ struct nk_window *win;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+
+ win = ctx->current;
+ if (!nk_widget(&bounds, ctx)) return;
+ nk_draw_image(&win->buffer, bounds, &img, nk_white);
+}
+NK_API void
+nk_image_color(struct nk_context *ctx, struct nk_image img, struct nk_color col)
+{
+ struct nk_window *win;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+
+ win = ctx->current;
+ if (!nk_widget(&bounds, ctx)) return;
+ nk_draw_image(&win->buffer, bounds, &img, col);
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * BUTTON
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_draw_symbol(struct nk_command_buffer *out, enum nk_symbol_type type,
+ struct nk_rect content, struct nk_color background, struct nk_color foreground,
+ float border_width, const struct nk_user_font *font)
+{
+ switch (type) {
+ case NK_SYMBOL_X:
+ case NK_SYMBOL_UNDERSCORE:
+ case NK_SYMBOL_PLUS:
+ case NK_SYMBOL_MINUS: {
+ /* single character text symbol */
+ const char *X = (type == NK_SYMBOL_X) ? "x":
+ (type == NK_SYMBOL_UNDERSCORE) ? "_":
+ (type == NK_SYMBOL_PLUS) ? "+": "-";
+ struct nk_text text;
+ text.padding = nk_vec2(0,0);
+ text.background = background;
+ text.text = foreground;
+ nk_widget_text(out, content, X, 1, &text, NK_TEXT_CENTERED, font);
+ } break;
+ case NK_SYMBOL_CIRCLE_SOLID:
+ case NK_SYMBOL_CIRCLE_OUTLINE:
+ case NK_SYMBOL_RECT_SOLID:
+ case NK_SYMBOL_RECT_OUTLINE: {
+ /* simple empty/filled shapes */
+ if (type == NK_SYMBOL_RECT_SOLID || type == NK_SYMBOL_RECT_OUTLINE) {
+ nk_fill_rect(out, content, 0, foreground);
+ if (type == NK_SYMBOL_RECT_OUTLINE)
+ nk_fill_rect(out, nk_shrink_rect(content, border_width), 0, background);
+ } else {
+ nk_fill_circle(out, content, foreground);
+ if (type == NK_SYMBOL_CIRCLE_OUTLINE)
+ nk_fill_circle(out, nk_shrink_rect(content, 1), background);
+ }
+ } break;
+ case NK_SYMBOL_TRIANGLE_UP:
+ case NK_SYMBOL_TRIANGLE_DOWN:
+ case NK_SYMBOL_TRIANGLE_LEFT:
+ case NK_SYMBOL_TRIANGLE_RIGHT: {
+ enum nk_heading heading;
+ struct nk_vec2 points[3];
+ heading = (type == NK_SYMBOL_TRIANGLE_RIGHT) ? NK_RIGHT :
+ (type == NK_SYMBOL_TRIANGLE_LEFT) ? NK_LEFT:
+ (type == NK_SYMBOL_TRIANGLE_UP) ? NK_UP: NK_DOWN;
+ nk_triangle_from_direction(points, content, 0, 0, heading);
+ nk_fill_triangle(out, points[0].x, points[0].y, points[1].x, points[1].y,
+ points[2].x, points[2].y, foreground);
+ } break;
+ default:
+ case NK_SYMBOL_NONE:
+ case NK_SYMBOL_MAX: break;
+ }
+}
+NK_LIB int
+nk_button_behavior(nk_flags *state, struct nk_rect r,
+ const struct nk_input *i, enum nk_button_behavior behavior)
+{
+ int ret = 0;
+ nk_widget_state_reset(state);
+ if (!i) return 0;
+ if (nk_input_is_mouse_hovering_rect(i, r)) {
+ *state = NK_WIDGET_STATE_HOVERED;
+ if (nk_input_is_mouse_down(i, NK_BUTTON_LEFT))
+ *state = NK_WIDGET_STATE_ACTIVE;
+ if (nk_input_has_mouse_click_in_rect(i, NK_BUTTON_LEFT, r)) {
+ ret = (behavior != NK_BUTTON_DEFAULT) ?
+ nk_input_is_mouse_down(i, NK_BUTTON_LEFT):
+#ifdef NK_BUTTON_TRIGGER_ON_RELEASE
+ nk_input_is_mouse_released(i, NK_BUTTON_LEFT);
+#else
+ nk_input_is_mouse_pressed(i, NK_BUTTON_LEFT);
+#endif
+ }
+ }
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(i, r))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(i, r))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return ret;
+}
+NK_LIB const struct nk_style_item*
+nk_draw_button(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, nk_flags state,
+ const struct nk_style_button *style)
+{
+ const struct nk_style_item *background;
+ if (state & NK_WIDGET_STATE_HOVER)
+ background = &style->hover;
+ else if (state & NK_WIDGET_STATE_ACTIVED)
+ background = &style->active;
+ else background = &style->normal;
+
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, *bounds, &background->data.image, nk_white);
+ } else {
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ nk_stroke_rect(out, *bounds, style->rounding, style->border, style->border_color);
+ }
+ return background;
+}
+NK_LIB int
+nk_do_button(nk_flags *state, struct nk_command_buffer *out, struct nk_rect r,
+ const struct nk_style_button *style, const struct nk_input *in,
+ enum nk_button_behavior behavior, struct nk_rect *content)
+{
+ struct nk_rect bounds;
+ NK_ASSERT(style);
+ NK_ASSERT(state);
+ NK_ASSERT(out);
+ if (!out || !style)
+ return nk_false;
+
+ /* calculate button content space */
+ content->x = r.x + style->padding.x + style->border + style->rounding;
+ content->y = r.y + style->padding.y + style->border + style->rounding;
+ content->w = r.w - (2 * style->padding.x + style->border + style->rounding*2);
+ content->h = r.h - (2 * style->padding.y + style->border + style->rounding*2);
+
+ /* execute button behavior */
+ bounds.x = r.x - style->touch_padding.x;
+ bounds.y = r.y - style->touch_padding.y;
+ bounds.w = r.w + 2 * style->touch_padding.x;
+ bounds.h = r.h + 2 * style->touch_padding.y;
+ return nk_button_behavior(state, bounds, in, behavior);
+}
+NK_LIB void
+nk_draw_button_text(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, const struct nk_rect *content, nk_flags state,
+ const struct nk_style_button *style, const char *txt, int len,
+ nk_flags text_alignment, const struct nk_user_font *font)
+{
+ struct nk_text text;
+ const struct nk_style_item *background;
+ background = nk_draw_button(out, bounds, state, style);
+
+ /* select correct colors/images */
+ if (background->type == NK_STYLE_ITEM_COLOR)
+ text.background = background->data.color;
+ else text.background = style->text_background;
+ if (state & NK_WIDGET_STATE_HOVER)
+ text.text = style->text_hover;
+ else if (state & NK_WIDGET_STATE_ACTIVED)
+ text.text = style->text_active;
+ else text.text = style->text_normal;
+
+ text.padding = nk_vec2(0,0);
+ nk_widget_text(out, *content, txt, len, &text, text_alignment, font);
+}
+NK_LIB int
+nk_do_button_text(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ const char *string, int len, nk_flags align, enum nk_button_behavior behavior,
+ const struct nk_style_button *style, const struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ struct nk_rect content;
+ int ret = nk_false;
+
+ NK_ASSERT(state);
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ NK_ASSERT(string);
+ NK_ASSERT(font);
+ if (!out || !style || !font || !string)
+ return nk_false;
+
+ ret = nk_do_button(state, out, bounds, style, in, behavior, &content);
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_button_text(out, &bounds, &content, *state, style, string, len, align, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return ret;
+}
+NK_LIB void
+nk_draw_button_symbol(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, const struct nk_rect *content,
+ nk_flags state, const struct nk_style_button *style,
+ enum nk_symbol_type type, const struct nk_user_font *font)
+{
+ struct nk_color sym, bg;
+ const struct nk_style_item *background;
+
+ /* select correct colors/images */
+ background = nk_draw_button(out, bounds, state, style);
+ if (background->type == NK_STYLE_ITEM_COLOR)
+ bg = background->data.color;
+ else bg = style->text_background;
+
+ if (state & NK_WIDGET_STATE_HOVER)
+ sym = style->text_hover;
+ else if (state & NK_WIDGET_STATE_ACTIVED)
+ sym = style->text_active;
+ else sym = style->text_normal;
+ nk_draw_symbol(out, type, *content, bg, sym, 1, font);
+}
+NK_LIB int
+nk_do_button_symbol(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ enum nk_symbol_type symbol, enum nk_button_behavior behavior,
+ const struct nk_style_button *style, const struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ int ret;
+ struct nk_rect content;
+
+ NK_ASSERT(state);
+ NK_ASSERT(style);
+ NK_ASSERT(font);
+ NK_ASSERT(out);
+ if (!out || !style || !font || !state)
+ return nk_false;
+
+ ret = nk_do_button(state, out, bounds, style, in, behavior, &content);
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_button_symbol(out, &bounds, &content, *state, style, symbol, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return ret;
+}
+NK_LIB void
+nk_draw_button_image(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, const struct nk_rect *content,
+ nk_flags state, const struct nk_style_button *style, const struct nk_image *img)
+{
+ nk_draw_button(out, bounds, state, style);
+ nk_draw_image(out, *content, img, nk_white);
+}
+NK_LIB int
+nk_do_button_image(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ struct nk_image img, enum nk_button_behavior b,
+ const struct nk_style_button *style, const struct nk_input *in)
+{
+ int ret;
+ struct nk_rect content;
+
+ NK_ASSERT(state);
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ if (!out || !style || !state)
+ return nk_false;
+
+ ret = nk_do_button(state, out, bounds, style, in, b, &content);
+ content.x += style->image_padding.x;
+ content.y += style->image_padding.y;
+ content.w -= 2 * style->image_padding.x;
+ content.h -= 2 * style->image_padding.y;
+
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_button_image(out, &bounds, &content, *state, style, &img);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return ret;
+}
+NK_LIB void
+nk_draw_button_text_symbol(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, const struct nk_rect *label,
+ const struct nk_rect *symbol, nk_flags state, const struct nk_style_button *style,
+ const char *str, int len, enum nk_symbol_type type,
+ const struct nk_user_font *font)
+{
+ struct nk_color sym;
+ struct nk_text text;
+ const struct nk_style_item *background;
+
+ /* select correct background colors/images */
+ background = nk_draw_button(out, bounds, state, style);
+ if (background->type == NK_STYLE_ITEM_COLOR)
+ text.background = background->data.color;
+ else text.background = style->text_background;
+
+ /* select correct text colors */
+ if (state & NK_WIDGET_STATE_HOVER) {
+ sym = style->text_hover;
+ text.text = style->text_hover;
+ } else if (state & NK_WIDGET_STATE_ACTIVED) {
+ sym = style->text_active;
+ text.text = style->text_active;
+ } else {
+ sym = style->text_normal;
+ text.text = style->text_normal;
+ }
+
+ text.padding = nk_vec2(0,0);
+ nk_draw_symbol(out, type, *symbol, style->text_background, sym, 0, font);
+ nk_widget_text(out, *label, str, len, &text, NK_TEXT_CENTERED, font);
+}
+NK_LIB int
+nk_do_button_text_symbol(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ enum nk_symbol_type symbol, const char *str, int len, nk_flags align,
+ enum nk_button_behavior behavior, const struct nk_style_button *style,
+ const struct nk_user_font *font, const struct nk_input *in)
+{
+ int ret;
+ struct nk_rect tri = {0,0,0,0};
+ struct nk_rect content;
+
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ NK_ASSERT(font);
+ if (!out || !style || !font)
+ return nk_false;
+
+ ret = nk_do_button(state, out, bounds, style, in, behavior, &content);
+ tri.y = content.y + (content.h/2) - font->height/2;
+ tri.w = font->height; tri.h = font->height;
+ if (align & NK_TEXT_ALIGN_LEFT) {
+ tri.x = (content.x + content.w) - (2 * style->padding.x + tri.w);
+ tri.x = NK_MAX(tri.x, 0);
+ } else tri.x = content.x + 2 * style->padding.x;
+
+ /* draw button */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_button_text_symbol(out, &bounds, &content, &tri,
+ *state, style, str, len, symbol, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return ret;
+}
+NK_LIB void
+nk_draw_button_text_image(struct nk_command_buffer *out,
+ const struct nk_rect *bounds, const struct nk_rect *label,
+ const struct nk_rect *image, nk_flags state, const struct nk_style_button *style,
+ const char *str, int len, const struct nk_user_font *font,
+ const struct nk_image *img)
+{
+ struct nk_text text;
+ const struct nk_style_item *background;
+ background = nk_draw_button(out, bounds, state, style);
+
+ /* select correct colors */
+ if (background->type == NK_STYLE_ITEM_COLOR)
+ text.background = background->data.color;
+ else text.background = style->text_background;
+ if (state & NK_WIDGET_STATE_HOVER)
+ text.text = style->text_hover;
+ else if (state & NK_WIDGET_STATE_ACTIVED)
+ text.text = style->text_active;
+ else text.text = style->text_normal;
+
+ text.padding = nk_vec2(0,0);
+ nk_widget_text(out, *label, str, len, &text, NK_TEXT_CENTERED, font);
+ nk_draw_image(out, *image, img, nk_white);
+}
+NK_LIB int
+nk_do_button_text_image(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ struct nk_image img, const char* str, int len, nk_flags align,
+ enum nk_button_behavior behavior, const struct nk_style_button *style,
+ const struct nk_user_font *font, const struct nk_input *in)
+{
+ int ret;
+ struct nk_rect icon;
+ struct nk_rect content;
+
+ NK_ASSERT(style);
+ NK_ASSERT(state);
+ NK_ASSERT(font);
+ NK_ASSERT(out);
+ if (!out || !font || !style || !str)
+ return nk_false;
+
+ ret = nk_do_button(state, out, bounds, style, in, behavior, &content);
+ icon.y = bounds.y + style->padding.y;
+ icon.w = icon.h = bounds.h - 2 * style->padding.y;
+ if (align & NK_TEXT_ALIGN_LEFT) {
+ icon.x = (bounds.x + bounds.w) - (2 * style->padding.x + icon.w);
+ icon.x = NK_MAX(icon.x, 0);
+ } else icon.x = bounds.x + 2 * style->padding.x;
+
+ icon.x += style->image_padding.x;
+ icon.y += style->image_padding.y;
+ icon.w -= 2 * style->image_padding.x;
+ icon.h -= 2 * style->image_padding.y;
+
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_button_text_image(out, &bounds, &content, &icon, *state, style, str, len, font, &img);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return ret;
+}
+NK_API void
+nk_button_set_behavior(struct nk_context *ctx, enum nk_button_behavior behavior)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return;
+ ctx->button_behavior = behavior;
+}
+NK_API int
+nk_button_push_behavior(struct nk_context *ctx, enum nk_button_behavior behavior)
+{
+ struct nk_config_stack_button_behavior *button_stack;
+ struct nk_config_stack_button_behavior_element *element;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ button_stack = &ctx->stacks.button_behaviors;
+ NK_ASSERT(button_stack->head < (int)NK_LEN(button_stack->elements));
+ if (button_stack->head >= (int)NK_LEN(button_stack->elements))
+ return 0;
+
+ element = &button_stack->elements[button_stack->head++];
+ element->address = &ctx->button_behavior;
+ element->old_value = ctx->button_behavior;
+ ctx->button_behavior = behavior;
+ return 1;
+}
+NK_API int
+nk_button_pop_behavior(struct nk_context *ctx)
+{
+ struct nk_config_stack_button_behavior *button_stack;
+ struct nk_config_stack_button_behavior_element *element;
+
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+
+ button_stack = &ctx->stacks.button_behaviors;
+ NK_ASSERT(button_stack->head > 0);
+ if (button_stack->head < 1)
+ return 0;
+
+ element = &button_stack->elements[--button_stack->head];
+ *element->address = element->old_value;
+ return 1;
+}
+NK_API int
+nk_button_text_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, const char *title, int len)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(style);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!style || !ctx || !ctx->current || !ctx->current->layout) return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+ state = nk_widget(&bounds, ctx);
+
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_button_text(&ctx->last_widget_state, &win->buffer, bounds,
+ title, len, style->text_alignment, ctx->button_behavior,
+ style, in, ctx->style.font);
+}
+NK_API int
+nk_button_text(struct nk_context *ctx, const char *title, int len)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ return nk_button_text_styled(ctx, &ctx->style.button, title, len);
+}
+NK_API int nk_button_label_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, const char *title)
+{
+ return nk_button_text_styled(ctx, style, title, nk_strlen(title));
+}
+NK_API int nk_button_label(struct nk_context *ctx, const char *title)
+{
+ return nk_button_text(ctx, title, nk_strlen(title));
+}
+NK_API int
+nk_button_color(struct nk_context *ctx, struct nk_color color)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ struct nk_style_button button;
+
+ int ret = 0;
+ struct nk_rect bounds;
+ struct nk_rect content;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+
+ button = ctx->style.button;
+ button.normal = nk_style_item_color(color);
+ button.hover = nk_style_item_color(color);
+ button.active = nk_style_item_color(color);
+ ret = nk_do_button(&ctx->last_widget_state, &win->buffer, bounds,
+ &button, in, ctx->button_behavior, &content);
+ nk_draw_button(&win->buffer, &bounds, ctx->last_widget_state, &button);
+ return ret;
+}
+NK_API int
+nk_button_symbol_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, enum nk_symbol_type symbol)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_button_symbol(&ctx->last_widget_state, &win->buffer, bounds,
+ symbol, ctx->button_behavior, style, in, ctx->style.font);
+}
+NK_API int
+nk_button_symbol(struct nk_context *ctx, enum nk_symbol_type symbol)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ return nk_button_symbol_styled(ctx, &ctx->style.button, symbol);
+}
+NK_API int
+nk_button_image_styled(struct nk_context *ctx, const struct nk_style_button *style,
+ struct nk_image img)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_button_image(&ctx->last_widget_state, &win->buffer, bounds,
+ img, ctx->button_behavior, style, in);
+}
+NK_API int
+nk_button_image(struct nk_context *ctx, struct nk_image img)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ return nk_button_image_styled(ctx, &ctx->style.button, img);
+}
+NK_API int
+nk_button_symbol_text_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, enum nk_symbol_type symbol,
+ const char *text, int len, nk_flags align)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_button_text_symbol(&ctx->last_widget_state, &win->buffer, bounds,
+ symbol, text, len, align, ctx->button_behavior,
+ style, ctx->style.font, in);
+}
+NK_API int
+nk_button_symbol_text(struct nk_context *ctx, enum nk_symbol_type symbol,
+ const char* text, int len, nk_flags align)
+{
+ NK_ASSERT(ctx);
+ if (!ctx) return 0;
+ return nk_button_symbol_text_styled(ctx, &ctx->style.button, symbol, text, len, align);
+}
+NK_API int nk_button_symbol_label(struct nk_context *ctx, enum nk_symbol_type symbol,
+ const char *label, nk_flags align)
+{
+ return nk_button_symbol_text(ctx, symbol, label, nk_strlen(label), align);
+}
+NK_API int nk_button_symbol_label_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, enum nk_symbol_type symbol,
+ const char *title, nk_flags align)
+{
+ return nk_button_symbol_text_styled(ctx, style, symbol, title, nk_strlen(title), align);
+}
+NK_API int
+nk_button_image_text_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, struct nk_image img, const char *text,
+ int len, nk_flags align)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_button_text_image(&ctx->last_widget_state, &win->buffer,
+ bounds, img, text, len, align, ctx->button_behavior,
+ style, ctx->style.font, in);
+}
+NK_API int
+nk_button_image_text(struct nk_context *ctx, struct nk_image img,
+ const char *text, int len, nk_flags align)
+{
+ return nk_button_image_text_styled(ctx, &ctx->style.button,img, text, len, align);
+}
+NK_API int nk_button_image_label(struct nk_context *ctx, struct nk_image img,
+ const char *label, nk_flags align)
+{
+ return nk_button_image_text(ctx, img, label, nk_strlen(label), align);
+}
+NK_API int nk_button_image_label_styled(struct nk_context *ctx,
+ const struct nk_style_button *style, struct nk_image img,
+ const char *label, nk_flags text_alignment)
+{
+ return nk_button_image_text_styled(ctx, style, img, label, nk_strlen(label), text_alignment);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TOGGLE
+ *
+ * ===============================================================*/
+NK_LIB int
+nk_toggle_behavior(const struct nk_input *in, struct nk_rect select,
+ nk_flags *state, int active)
+{
+ nk_widget_state_reset(state);
+ if (nk_button_behavior(state, select, in, NK_BUTTON_DEFAULT)) {
+ *state = NK_WIDGET_STATE_ACTIVE;
+ active = !active;
+ }
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(in, select))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, select))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return active;
+}
+NK_LIB void
+nk_draw_checkbox(struct nk_command_buffer *out,
+ nk_flags state, const struct nk_style_toggle *style, int active,
+ const struct nk_rect *label, const struct nk_rect *selector,
+ const struct nk_rect *cursors, const char *string, int len,
+ const struct nk_user_font *font)
+{
+ const struct nk_style_item *background;
+ const struct nk_style_item *cursor;
+ struct nk_text text;
+
+ /* select correct colors/images */
+ if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ text.text = style->text_hover;
+ } else if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ text.text = style->text_active;
+ } else {
+ background = &style->normal;
+ cursor = &style->cursor_normal;
+ text.text = style->text_normal;
+ }
+
+ /* draw background and cursor */
+ if (background->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_rect(out, *selector, 0, style->border_color);
+ nk_fill_rect(out, nk_shrink_rect(*selector, style->border), 0, background->data.color);
+ } else nk_draw_image(out, *selector, &background->data.image, nk_white);
+ if (active) {
+ if (cursor->type == NK_STYLE_ITEM_IMAGE)
+ nk_draw_image(out, *cursors, &cursor->data.image, nk_white);
+ else nk_fill_rect(out, *cursors, 0, cursor->data.color);
+ }
+
+ text.padding.x = 0;
+ text.padding.y = 0;
+ text.background = style->text_background;
+ nk_widget_text(out, *label, string, len, &text, NK_TEXT_LEFT, font);
+}
+NK_LIB void
+nk_draw_option(struct nk_command_buffer *out,
+ nk_flags state, const struct nk_style_toggle *style, int active,
+ const struct nk_rect *label, const struct nk_rect *selector,
+ const struct nk_rect *cursors, const char *string, int len,
+ const struct nk_user_font *font)
+{
+ const struct nk_style_item *background;
+ const struct nk_style_item *cursor;
+ struct nk_text text;
+
+ /* select correct colors/images */
+ if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ text.text = style->text_hover;
+ } else if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ text.text = style->text_active;
+ } else {
+ background = &style->normal;
+ cursor = &style->cursor_normal;
+ text.text = style->text_normal;
+ }
+
+ /* draw background and cursor */
+ if (background->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_circle(out, *selector, style->border_color);
+ nk_fill_circle(out, nk_shrink_rect(*selector, style->border), background->data.color);
+ } else nk_draw_image(out, *selector, &background->data.image, nk_white);
+ if (active) {
+ if (cursor->type == NK_STYLE_ITEM_IMAGE)
+ nk_draw_image(out, *cursors, &cursor->data.image, nk_white);
+ else nk_fill_circle(out, *cursors, cursor->data.color);
+ }
+
+ text.padding.x = 0;
+ text.padding.y = 0;
+ text.background = style->text_background;
+ nk_widget_text(out, *label, string, len, &text, NK_TEXT_LEFT, font);
+}
+NK_LIB int
+nk_do_toggle(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect r,
+ int *active, const char *str, int len, enum nk_toggle_type type,
+ const struct nk_style_toggle *style, const struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ int was_active;
+ struct nk_rect bounds;
+ struct nk_rect select;
+ struct nk_rect cursor;
+ struct nk_rect label;
+
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ NK_ASSERT(font);
+ if (!out || !style || !font || !active)
+ return 0;
+
+ r.w = NK_MAX(r.w, font->height + 2 * style->padding.x);
+ r.h = NK_MAX(r.h, font->height + 2 * style->padding.y);
+
+ /* add additional touch padding for touch screen devices */
+ bounds.x = r.x - style->touch_padding.x;
+ bounds.y = r.y - style->touch_padding.y;
+ bounds.w = r.w + 2 * style->touch_padding.x;
+ bounds.h = r.h + 2 * style->touch_padding.y;
+
+ /* calculate the selector space */
+ select.w = font->height;
+ select.h = select.w;
+ select.y = r.y + r.h/2.0f - select.h/2.0f;
+ select.x = r.x;
+
+ /* calculate the bounds of the cursor inside the selector */
+ cursor.x = select.x + style->padding.x + style->border;
+ cursor.y = select.y + style->padding.y + style->border;
+ cursor.w = select.w - (2 * style->padding.x + 2 * style->border);
+ cursor.h = select.h - (2 * style->padding.y + 2 * style->border);
+
+ /* label behind the selector */
+ label.x = select.x + select.w + style->spacing;
+ label.y = select.y;
+ label.w = NK_MAX(r.x + r.w, label.x) - label.x;
+ label.h = select.w;
+
+ /* update selector */
+ was_active = *active;
+ *active = nk_toggle_behavior(in, bounds, state, *active);
+
+ /* draw selector */
+ if (style->draw_begin)
+ style->draw_begin(out, style->userdata);
+ if (type == NK_TOGGLE_CHECK) {
+ nk_draw_checkbox(out, *state, style, *active, &label, &select, &cursor, str, len, font);
+ } else {
+ nk_draw_option(out, *state, style, *active, &label, &select, &cursor, str, len, font);
+ }
+ if (style->draw_end)
+ style->draw_end(out, style->userdata);
+ return (was_active != *active);
+}
+/*----------------------------------------------------------------
+ *
+ * CHECKBOX
+ *
+ * --------------------------------------------------------------*/
+NK_API int
+nk_check_text(struct nk_context *ctx, const char *text, int len, int active)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return active;
+
+ win = ctx->current;
+ style = &ctx->style;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return active;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ nk_do_toggle(&ctx->last_widget_state, &win->buffer, bounds, &active,
+ text, len, NK_TOGGLE_CHECK, &style->checkbox, in, style->font);
+ return active;
+}
+NK_API unsigned int
+nk_check_flags_text(struct nk_context *ctx, const char *text, int len,
+ unsigned int flags, unsigned int value)
+{
+ int old_active;
+ NK_ASSERT(ctx);
+ NK_ASSERT(text);
+ if (!ctx || !text) return flags;
+ old_active = (int)((flags & value) & value);
+ if (nk_check_text(ctx, text, len, old_active))
+ flags |= value;
+ else flags &= ~value;
+ return flags;
+}
+NK_API int
+nk_checkbox_text(struct nk_context *ctx, const char *text, int len, int *active)
+{
+ int old_val;
+ NK_ASSERT(ctx);
+ NK_ASSERT(text);
+ NK_ASSERT(active);
+ if (!ctx || !text || !active) return 0;
+ old_val = *active;
+ *active = nk_check_text(ctx, text, len, *active);
+ return old_val != *active;
+}
+NK_API int
+nk_checkbox_flags_text(struct nk_context *ctx, const char *text, int len,
+ unsigned int *flags, unsigned int value)
+{
+ int active;
+ NK_ASSERT(ctx);
+ NK_ASSERT(text);
+ NK_ASSERT(flags);
+ if (!ctx || !text || !flags) return 0;
+
+ active = (int)((*flags & value) & value);
+ if (nk_checkbox_text(ctx, text, len, &active)) {
+ if (active) *flags |= value;
+ else *flags &= ~value;
+ return 1;
+ }
+ return 0;
+}
+NK_API int nk_check_label(struct nk_context *ctx, const char *label, int active)
+{
+ return nk_check_text(ctx, label, nk_strlen(label), active);
+}
+NK_API unsigned int nk_check_flags_label(struct nk_context *ctx, const char *label,
+ unsigned int flags, unsigned int value)
+{
+ return nk_check_flags_text(ctx, label, nk_strlen(label), flags, value);
+}
+NK_API int nk_checkbox_label(struct nk_context *ctx, const char *label, int *active)
+{
+ return nk_checkbox_text(ctx, label, nk_strlen(label), active);
+}
+NK_API int nk_checkbox_flags_label(struct nk_context *ctx, const char *label,
+ unsigned int *flags, unsigned int value)
+{
+ return nk_checkbox_flags_text(ctx, label, nk_strlen(label), flags, value);
+}
+/*----------------------------------------------------------------
+ *
+ * OPTION
+ *
+ * --------------------------------------------------------------*/
+NK_API int
+nk_option_text(struct nk_context *ctx, const char *text, int len, int is_active)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return is_active;
+
+ win = ctx->current;
+ style = &ctx->style;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return (int)state;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ nk_do_toggle(&ctx->last_widget_state, &win->buffer, bounds, &is_active,
+ text, len, NK_TOGGLE_OPTION, &style->option, in, style->font);
+ return is_active;
+}
+NK_API int
+nk_radio_text(struct nk_context *ctx, const char *text, int len, int *active)
+{
+ int old_value;
+ NK_ASSERT(ctx);
+ NK_ASSERT(text);
+ NK_ASSERT(active);
+ if (!ctx || !text || !active) return 0;
+ old_value = *active;
+ *active = nk_option_text(ctx, text, len, old_value);
+ return old_value != *active;
+}
+NK_API int
+nk_option_label(struct nk_context *ctx, const char *label, int active)
+{
+ return nk_option_text(ctx, label, nk_strlen(label), active);
+}
+NK_API int
+nk_radio_label(struct nk_context *ctx, const char *label, int *active)
+{
+ return nk_radio_text(ctx, label, nk_strlen(label), active);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * SELECTABLE
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_draw_selectable(struct nk_command_buffer *out,
+ nk_flags state, const struct nk_style_selectable *style, int active,
+ const struct nk_rect *bounds,
+ const struct nk_rect *icon, const struct nk_image *img, enum nk_symbol_type sym,
+ const char *string, int len, nk_flags align, const struct nk_user_font *font)
+{
+ const struct nk_style_item *background;
+ struct nk_text text;
+ text.padding = style->padding;
+
+ /* select correct colors/images */
+ if (!active) {
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->pressed;
+ text.text = style->text_pressed;
+ } else if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ text.text = style->text_hover;
+ } else {
+ background = &style->normal;
+ text.text = style->text_normal;
+ }
+ } else {
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->pressed_active;
+ text.text = style->text_pressed_active;
+ } else if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover_active;
+ text.text = style->text_hover_active;
+ } else {
+ background = &style->normal_active;
+ text.text = style->text_normal_active;
+ }
+ }
+ /* draw selectable background and text */
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, *bounds, &background->data.image, nk_white);
+ text.background = nk_rgba(0,0,0,0);
+ } else {
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ text.background = background->data.color;
+ }
+ if (icon) {
+ if (img) nk_draw_image(out, *icon, img, nk_white);
+ else nk_draw_symbol(out, sym, *icon, text.background, text.text, 1, font);
+ }
+ nk_widget_text(out, *bounds, string, len, &text, align, font);
+}
+NK_LIB int
+nk_do_selectable(nk_flags *state, struct nk_command_buffer *out,
+ struct nk_rect bounds, const char *str, int len, nk_flags align, int *value,
+ const struct nk_style_selectable *style, const struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ int old_value;
+ struct nk_rect touch;
+
+ NK_ASSERT(state);
+ NK_ASSERT(out);
+ NK_ASSERT(str);
+ NK_ASSERT(len);
+ NK_ASSERT(value);
+ NK_ASSERT(style);
+ NK_ASSERT(font);
+
+ if (!state || !out || !str || !len || !value || !style || !font) return 0;
+ old_value = *value;
+
+ /* remove padding */
+ touch.x = bounds.x - style->touch_padding.x;
+ touch.y = bounds.y - style->touch_padding.y;
+ touch.w = bounds.w + style->touch_padding.x * 2;
+ touch.h = bounds.h + style->touch_padding.y * 2;
+
+ /* update button */
+ if (nk_button_behavior(state, touch, in, NK_BUTTON_DEFAULT))
+ *value = !(*value);
+
+ /* draw selectable */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_selectable(out, *state, style, *value, &bounds, 0,0,NK_SYMBOL_NONE, str, len, align, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return old_value != *value;
+}
+NK_LIB int
+nk_do_selectable_image(nk_flags *state, struct nk_command_buffer *out,
+ struct nk_rect bounds, const char *str, int len, nk_flags align, int *value,
+ const struct nk_image *img, const struct nk_style_selectable *style,
+ const struct nk_input *in, const struct nk_user_font *font)
+{
+ int old_value;
+ struct nk_rect touch;
+ struct nk_rect icon;
+
+ NK_ASSERT(state);
+ NK_ASSERT(out);
+ NK_ASSERT(str);
+ NK_ASSERT(len);
+ NK_ASSERT(value);
+ NK_ASSERT(style);
+ NK_ASSERT(font);
+
+ if (!state || !out || !str || !len || !value || !style || !font) return 0;
+ old_value = *value;
+
+ /* toggle behavior */
+ touch.x = bounds.x - style->touch_padding.x;
+ touch.y = bounds.y - style->touch_padding.y;
+ touch.w = bounds.w + style->touch_padding.x * 2;
+ touch.h = bounds.h + style->touch_padding.y * 2;
+ if (nk_button_behavior(state, touch, in, NK_BUTTON_DEFAULT))
+ *value = !(*value);
+
+ icon.y = bounds.y + style->padding.y;
+ icon.w = icon.h = bounds.h - 2 * style->padding.y;
+ if (align & NK_TEXT_ALIGN_LEFT) {
+ icon.x = (bounds.x + bounds.w) - (2 * style->padding.x + icon.w);
+ icon.x = NK_MAX(icon.x, 0);
+ } else icon.x = bounds.x + 2 * style->padding.x;
+
+ icon.x += style->image_padding.x;
+ icon.y += style->image_padding.y;
+ icon.w -= 2 * style->image_padding.x;
+ icon.h -= 2 * style->image_padding.y;
+
+ /* draw selectable */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_selectable(out, *state, style, *value, &bounds, &icon, img, NK_SYMBOL_NONE, str, len, align, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return old_value != *value;
+}
+NK_LIB int
+nk_do_selectable_symbol(nk_flags *state, struct nk_command_buffer *out,
+ struct nk_rect bounds, const char *str, int len, nk_flags align, int *value,
+ enum nk_symbol_type sym, const struct nk_style_selectable *style,
+ const struct nk_input *in, const struct nk_user_font *font)
+{
+ int old_value;
+ struct nk_rect touch;
+ struct nk_rect icon;
+
+ NK_ASSERT(state);
+ NK_ASSERT(out);
+ NK_ASSERT(str);
+ NK_ASSERT(len);
+ NK_ASSERT(value);
+ NK_ASSERT(style);
+ NK_ASSERT(font);
+
+ if (!state || !out || !str || !len || !value || !style || !font) return 0;
+ old_value = *value;
+
+ /* toggle behavior */
+ touch.x = bounds.x - style->touch_padding.x;
+ touch.y = bounds.y - style->touch_padding.y;
+ touch.w = bounds.w + style->touch_padding.x * 2;
+ touch.h = bounds.h + style->touch_padding.y * 2;
+ if (nk_button_behavior(state, touch, in, NK_BUTTON_DEFAULT))
+ *value = !(*value);
+
+ icon.y = bounds.y + style->padding.y;
+ icon.w = icon.h = bounds.h - 2 * style->padding.y;
+ if (align & NK_TEXT_ALIGN_LEFT) {
+ icon.x = (bounds.x + bounds.w) - (2 * style->padding.x + icon.w);
+ icon.x = NK_MAX(icon.x, 0);
+ } else icon.x = bounds.x + 2 * style->padding.x;
+
+ icon.x += style->image_padding.x;
+ icon.y += style->image_padding.y;
+ icon.w -= 2 * style->image_padding.x;
+ icon.h -= 2 * style->image_padding.y;
+
+ /* draw selectable */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_selectable(out, *state, style, *value, &bounds, &icon, 0, sym, str, len, align, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return old_value != *value;
+}
+
+NK_API int
+nk_selectable_text(struct nk_context *ctx, const char *str, int len,
+ nk_flags align, int *value)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ enum nk_widget_layout_states state;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(value);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !value)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+ style = &ctx->style;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_selectable(&ctx->last_widget_state, &win->buffer, bounds,
+ str, len, align, value, &style->selectable, in, style->font);
+}
+NK_API int
+nk_selectable_image_text(struct nk_context *ctx, struct nk_image img,
+ const char *str, int len, nk_flags align, int *value)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ enum nk_widget_layout_states state;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(value);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !value)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+ style = &ctx->style;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_selectable_image(&ctx->last_widget_state, &win->buffer, bounds,
+ str, len, align, value, &img, &style->selectable, in, style->font);
+}
+NK_API int
+nk_selectable_symbol_text(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *str, int len, nk_flags align, int *value)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_input *in;
+ const struct nk_style *style;
+
+ enum nk_widget_layout_states state;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(value);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !value)
+ return 0;
+
+ win = ctx->current;
+ layout = win->layout;
+ style = &ctx->style;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_selectable_symbol(&ctx->last_widget_state, &win->buffer, bounds,
+ str, len, align, value, sym, &style->selectable, in, style->font);
+}
+NK_API int
+nk_selectable_symbol_label(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *title, nk_flags align, int *value)
+{
+ return nk_selectable_symbol_text(ctx, sym, title, nk_strlen(title), align, value);
+}
+NK_API int nk_select_text(struct nk_context *ctx, const char *str, int len,
+ nk_flags align, int value)
+{
+ nk_selectable_text(ctx, str, len, align, &value);return value;
+}
+NK_API int nk_selectable_label(struct nk_context *ctx, const char *str, nk_flags align, int *value)
+{
+ return nk_selectable_text(ctx, str, nk_strlen(str), align, value);
+}
+NK_API int nk_selectable_image_label(struct nk_context *ctx,struct nk_image img,
+ const char *str, nk_flags align, int *value)
+{
+ return nk_selectable_image_text(ctx, img, str, nk_strlen(str), align, value);
+}
+NK_API int nk_select_label(struct nk_context *ctx, const char *str, nk_flags align, int value)
+{
+ nk_selectable_text(ctx, str, nk_strlen(str), align, &value);return value;
+}
+NK_API int nk_select_image_label(struct nk_context *ctx, struct nk_image img,
+ const char *str, nk_flags align, int value)
+{
+ nk_selectable_image_text(ctx, img, str, nk_strlen(str), align, &value);return value;
+}
+NK_API int nk_select_image_text(struct nk_context *ctx, struct nk_image img,
+ const char *str, int len, nk_flags align, int value)
+{
+ nk_selectable_image_text(ctx, img, str, len, align, &value);return value;
+}
+NK_API int
+nk_select_symbol_text(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *title, int title_len, nk_flags align, int value)
+{
+ nk_selectable_symbol_text(ctx, sym, title, title_len, align, &value);return value;
+}
+NK_API int
+nk_select_symbol_label(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *title, nk_flags align, int value)
+{
+ return nk_select_symbol_text(ctx, sym, title, nk_strlen(title), align, value);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * SLIDER
+ *
+ * ===============================================================*/
+NK_LIB float
+nk_slider_behavior(nk_flags *state, struct nk_rect *logical_cursor,
+ struct nk_rect *visual_cursor, struct nk_input *in,
+ struct nk_rect bounds, float slider_min, float slider_max, float slider_value,
+ float slider_step, float slider_steps)
+{
+ int left_mouse_down;
+ int left_mouse_click_in_cursor;
+
+ /* check if visual cursor is being dragged */
+ nk_widget_state_reset(state);
+ left_mouse_down = in && in->mouse.buttons[NK_BUTTON_LEFT].down;
+ left_mouse_click_in_cursor = in && nk_input_has_mouse_click_down_in_rect(in,
+ NK_BUTTON_LEFT, *visual_cursor, nk_true);
+
+ if (left_mouse_down && left_mouse_click_in_cursor) {
+ float ratio = 0;
+ const float d = in->mouse.pos.x - (visual_cursor->x+visual_cursor->w*0.5f);
+ const float pxstep = bounds.w / slider_steps;
+
+ /* only update value if the next slider step is reached */
+ *state = NK_WIDGET_STATE_ACTIVE;
+ if (NK_ABS(d) >= pxstep) {
+ const float steps = (float)((int)(NK_ABS(d) / pxstep));
+ slider_value += (d > 0) ? (slider_step*steps) : -(slider_step*steps);
+ slider_value = NK_CLAMP(slider_min, slider_value, slider_max);
+ ratio = (slider_value - slider_min)/slider_step;
+ logical_cursor->x = bounds.x + (logical_cursor->w * ratio);
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.x = logical_cursor->x;
+ }
+ }
+
+ /* slider widget state */
+ if (nk_input_is_mouse_hovering_rect(in, bounds))
+ *state = NK_WIDGET_STATE_HOVERED;
+ if (*state & NK_WIDGET_STATE_HOVER &&
+ !nk_input_is_mouse_prev_hovering_rect(in, bounds))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, bounds))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return slider_value;
+}
+NK_LIB void
+nk_draw_slider(struct nk_command_buffer *out, nk_flags state,
+ const struct nk_style_slider *style, const struct nk_rect *bounds,
+ const struct nk_rect *visual_cursor, float min, float value, float max)
+{
+ struct nk_rect fill;
+ struct nk_rect bar;
+ const struct nk_style_item *background;
+
+ /* select correct slider images/colors */
+ struct nk_color bar_color;
+ const struct nk_style_item *cursor;
+
+ NK_UNUSED(min);
+ NK_UNUSED(max);
+ NK_UNUSED(value);
+
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ bar_color = style->bar_active;
+ cursor = &style->cursor_active;
+ } else if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ bar_color = style->bar_hover;
+ cursor = &style->cursor_hover;
+ } else {
+ background = &style->normal;
+ bar_color = style->bar_normal;
+ cursor = &style->cursor_normal;
+ }
+ /* calculate slider background bar */
+ bar.x = bounds->x;
+ bar.y = (visual_cursor->y + visual_cursor->h/2) - bounds->h/12;
+ bar.w = bounds->w;
+ bar.h = bounds->h/6;
+
+ /* filled background bar style */
+ fill.w = (visual_cursor->x + (visual_cursor->w/2.0f)) - bar.x;
+ fill.x = bar.x;
+ fill.y = bar.y;
+ fill.h = bar.h;
+
+ /* draw background */
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, *bounds, &background->data.image, nk_white);
+ } else {
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ nk_stroke_rect(out, *bounds, style->rounding, style->border, style->border_color);
+ }
+
+ /* draw slider bar */
+ nk_fill_rect(out, bar, style->rounding, bar_color);
+ nk_fill_rect(out, fill, style->rounding, style->bar_filled);
+
+ /* draw cursor */
+ if (cursor->type == NK_STYLE_ITEM_IMAGE)
+ nk_draw_image(out, *visual_cursor, &cursor->data.image, nk_white);
+ else nk_fill_circle(out, *visual_cursor, cursor->data.color);
+}
+NK_LIB float
+nk_do_slider(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ float min, float val, float max, float step,
+ const struct nk_style_slider *style, struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ float slider_range;
+ float slider_min;
+ float slider_max;
+ float slider_value;
+ float slider_steps;
+ float cursor_offset;
+
+ struct nk_rect visual_cursor;
+ struct nk_rect logical_cursor;
+
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ if (!out || !style)
+ return 0;
+
+ /* remove padding from slider bounds */
+ bounds.x = bounds.x + style->padding.x;
+ bounds.y = bounds.y + style->padding.y;
+ bounds.h = NK_MAX(bounds.h, 2*style->padding.y);
+ bounds.w = NK_MAX(bounds.w, 2*style->padding.x + style->cursor_size.x);
+ bounds.w -= 2 * style->padding.x;
+ bounds.h -= 2 * style->padding.y;
+
+ /* optional buttons */
+ if (style->show_buttons) {
+ nk_flags ws;
+ struct nk_rect button;
+ button.y = bounds.y;
+ button.w = bounds.h;
+ button.h = bounds.h;
+
+ /* decrement button */
+ button.x = bounds.x;
+ if (nk_do_button_symbol(&ws, out, button, style->dec_symbol, NK_BUTTON_DEFAULT,
+ &style->dec_button, in, font))
+ val -= step;
+
+ /* increment button */
+ button.x = (bounds.x + bounds.w) - button.w;
+ if (nk_do_button_symbol(&ws, out, button, style->inc_symbol, NK_BUTTON_DEFAULT,
+ &style->inc_button, in, font))
+ val += step;
+
+ bounds.x = bounds.x + button.w + style->spacing.x;
+ bounds.w = bounds.w - (2*button.w + 2*style->spacing.x);
+ }
+
+ /* remove one cursor size to support visual cursor */
+ bounds.x += style->cursor_size.x*0.5f;
+ bounds.w -= style->cursor_size.x;
+
+ /* make sure the provided values are correct */
+ slider_max = NK_MAX(min, max);
+ slider_min = NK_MIN(min, max);
+ slider_value = NK_CLAMP(slider_min, val, slider_max);
+ slider_range = slider_max - slider_min;
+ slider_steps = slider_range / step;
+ cursor_offset = (slider_value - slider_min) / step;
+
+ /* calculate cursor
+ Basically you have two cursors. One for visual representation and interaction
+ and one for updating the actual cursor value. */
+ logical_cursor.h = bounds.h;
+ logical_cursor.w = bounds.w / slider_steps;
+ logical_cursor.x = bounds.x + (logical_cursor.w * cursor_offset);
+ logical_cursor.y = bounds.y;
+
+ visual_cursor.h = style->cursor_size.y;
+ visual_cursor.w = style->cursor_size.x;
+ visual_cursor.y = (bounds.y + bounds.h*0.5f) - visual_cursor.h*0.5f;
+ visual_cursor.x = logical_cursor.x - visual_cursor.w*0.5f;
+
+ slider_value = nk_slider_behavior(state, &logical_cursor, &visual_cursor,
+ in, bounds, slider_min, slider_max, slider_value, step, slider_steps);
+ visual_cursor.x = logical_cursor.x - visual_cursor.w*0.5f;
+
+ /* draw slider */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_slider(out, *state, style, &bounds, &visual_cursor, slider_min, slider_value, slider_max);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return slider_value;
+}
+NK_API int
+nk_slider_float(struct nk_context *ctx, float min_value, float *value, float max_value,
+ float value_step)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ struct nk_input *in;
+ const struct nk_style *style;
+
+ int ret = 0;
+ float old_value;
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ NK_ASSERT(value);
+ if (!ctx || !ctx->current || !ctx->current->layout || !value)
+ return ret;
+
+ win = ctx->current;
+ style = &ctx->style;
+ layout = win->layout;
+
+ state = nk_widget(&bounds, ctx);
+ if (!state) return ret;
+ in = (/*state == NK_WIDGET_ROM || */ layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+
+ old_value = *value;
+ *value = nk_do_slider(&ctx->last_widget_state, &win->buffer, bounds, min_value,
+ old_value, max_value, value_step, &style->slider, in, style->font);
+ return (old_value > *value || old_value < *value);
+}
+NK_API float
+nk_slide_float(struct nk_context *ctx, float min, float val, float max, float step)
+{
+ nk_slider_float(ctx, min, &val, max, step); return val;
+}
+NK_API int
+nk_slide_int(struct nk_context *ctx, int min, int val, int max, int step)
+{
+ float value = (float)val;
+ nk_slider_float(ctx, (float)min, &value, (float)max, (float)step);
+ return (int)value;
+}
+NK_API int
+nk_slider_int(struct nk_context *ctx, int min, int *val, int max, int step)
+{
+ int ret;
+ float value = (float)*val;
+ ret = nk_slider_float(ctx, (float)min, &value, (float)max, (float)step);
+ *val = (int)value;
+ return ret;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * PROGRESS
+ *
+ * ===============================================================*/
+NK_LIB nk_size
+nk_progress_behavior(nk_flags *state, struct nk_input *in,
+ struct nk_rect r, struct nk_rect cursor, nk_size max, nk_size value, int modifiable)
+{
+ int left_mouse_down = 0;
+ int left_mouse_click_in_cursor = 0;
+
+ nk_widget_state_reset(state);
+ if (!in || !modifiable) return value;
+ left_mouse_down = in && in->mouse.buttons[NK_BUTTON_LEFT].down;
+ left_mouse_click_in_cursor = in && nk_input_has_mouse_click_down_in_rect(in,
+ NK_BUTTON_LEFT, cursor, nk_true);
+ if (nk_input_is_mouse_hovering_rect(in, r))
+ *state = NK_WIDGET_STATE_HOVERED;
+
+ if (in && left_mouse_down && left_mouse_click_in_cursor) {
+ if (left_mouse_down && left_mouse_click_in_cursor) {
+ float ratio = NK_MAX(0, (float)(in->mouse.pos.x - cursor.x)) / (float)cursor.w;
+ value = (nk_size)NK_CLAMP(0, (float)max * ratio, (float)max);
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.x = cursor.x + cursor.w/2.0f;
+ *state |= NK_WIDGET_STATE_ACTIVE;
+ }
+ }
+ /* set progressbar widget state */
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(in, r))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, r))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return value;
+}
+NK_LIB void
+nk_draw_progress(struct nk_command_buffer *out, nk_flags state,
+ const struct nk_style_progress *style, const struct nk_rect *bounds,
+ const struct nk_rect *scursor, nk_size value, nk_size max)
+{
+ const struct nk_style_item *background;
+ const struct nk_style_item *cursor;
+
+ NK_UNUSED(max);
+ NK_UNUSED(value);
+
+ /* select correct colors/images to draw */
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ cursor = &style->cursor_active;
+ } else if (state & NK_WIDGET_STATE_HOVER){
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ } else {
+ background = &style->normal;
+ cursor = &style->cursor_normal;
+ }
+
+ /* draw background */
+ if (background->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ nk_stroke_rect(out, *bounds, style->rounding, style->border, style->border_color);
+ } else nk_draw_image(out, *bounds, &background->data.image, nk_white);
+
+ /* draw cursor */
+ if (cursor->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_rect(out, *scursor, style->rounding, cursor->data.color);
+ nk_stroke_rect(out, *scursor, style->rounding, style->border, style->border_color);
+ } else nk_draw_image(out, *scursor, &cursor->data.image, nk_white);
+}
+NK_LIB nk_size
+nk_do_progress(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect bounds,
+ nk_size value, nk_size max, int modifiable,
+ const struct nk_style_progress *style, struct nk_input *in)
+{
+ float prog_scale;
+ nk_size prog_value;
+ struct nk_rect cursor;
+
+ NK_ASSERT(style);
+ NK_ASSERT(out);
+ if (!out || !style) return 0;
+
+ /* calculate progressbar cursor */
+ cursor.w = NK_MAX(bounds.w, 2 * style->padding.x + 2 * style->border);
+ cursor.h = NK_MAX(bounds.h, 2 * style->padding.y + 2 * style->border);
+ cursor = nk_pad_rect(bounds, nk_vec2(style->padding.x + style->border, style->padding.y + style->border));
+ prog_scale = (float)value / (float)max;
+
+ /* update progressbar */
+ prog_value = NK_MIN(value, max);
+ prog_value = nk_progress_behavior(state, in, bounds, cursor,max, prog_value, modifiable);
+ cursor.w = cursor.w * prog_scale;
+
+ /* draw progressbar */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_progress(out, *state, style, &bounds, &cursor, value, max);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return prog_value;
+}
+NK_API int
+nk_progress(struct nk_context *ctx, nk_size *cur, nk_size max, int is_modifyable)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_style *style;
+ struct nk_input *in;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states state;
+ nk_size old_value;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(cur);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !cur)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ layout = win->layout;
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ old_value = *cur;
+ *cur = nk_do_progress(&ctx->last_widget_state, &win->buffer, bounds,
+ *cur, max, is_modifyable, &style->progress, in);
+ return (*cur != old_value);
+}
+NK_API nk_size
+nk_prog(struct nk_context *ctx, nk_size cur, nk_size max, int modifyable)
+{
+ nk_progress(ctx, &cur, max, modifyable);
+ return cur;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * SCROLLBAR
+ *
+ * ===============================================================*/
+NK_LIB float
+nk_scrollbar_behavior(nk_flags *state, struct nk_input *in,
+ int has_scrolling, const struct nk_rect *scroll,
+ const struct nk_rect *cursor, const struct nk_rect *empty0,
+ const struct nk_rect *empty1, float scroll_offset,
+ float target, float scroll_step, enum nk_orientation o)
+{
+ nk_flags ws = 0;
+ int left_mouse_down;
+ int left_mouse_clicked;
+ int left_mouse_click_in_cursor;
+ float scroll_delta;
+
+ nk_widget_state_reset(state);
+ if (!in) return scroll_offset;
+
+ left_mouse_down = in->mouse.buttons[NK_BUTTON_LEFT].down;
+ left_mouse_clicked = in->mouse.buttons[NK_BUTTON_LEFT].clicked;
+ left_mouse_click_in_cursor = nk_input_has_mouse_click_down_in_rect(in,
+ NK_BUTTON_LEFT, *cursor, nk_true);
+ if (nk_input_is_mouse_hovering_rect(in, *scroll))
+ *state = NK_WIDGET_STATE_HOVERED;
+
+ scroll_delta = (o == NK_VERTICAL) ? in->mouse.scroll_delta.y: in->mouse.scroll_delta.x;
+ if (left_mouse_down && left_mouse_click_in_cursor && !left_mouse_clicked) {
+ /* update cursor by mouse dragging */
+ float pixel, delta;
+ *state = NK_WIDGET_STATE_ACTIVE;
+ if (o == NK_VERTICAL) {
+ float cursor_y;
+ pixel = in->mouse.delta.y;
+ delta = (pixel / scroll->h) * target;
+ scroll_offset = NK_CLAMP(0, scroll_offset + delta, target - scroll->h);
+ cursor_y = scroll->y + ((scroll_offset/target) * scroll->h);
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.y = cursor_y + cursor->h/2.0f;
+ } else {
+ float cursor_x;
+ pixel = in->mouse.delta.x;
+ delta = (pixel / scroll->w) * target;
+ scroll_offset = NK_CLAMP(0, scroll_offset + delta, target - scroll->w);
+ cursor_x = scroll->x + ((scroll_offset/target) * scroll->w);
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked_pos.x = cursor_x + cursor->w/2.0f;
+ }
+ } else if ((nk_input_is_key_pressed(in, NK_KEY_SCROLL_UP) && o == NK_VERTICAL && has_scrolling)||
+ nk_button_behavior(&ws, *empty0, in, NK_BUTTON_DEFAULT)) {
+ /* scroll page up by click on empty space or shortcut */
+ if (o == NK_VERTICAL)
+ scroll_offset = NK_MAX(0, scroll_offset - scroll->h);
+ else scroll_offset = NK_MAX(0, scroll_offset - scroll->w);
+ } else if ((nk_input_is_key_pressed(in, NK_KEY_SCROLL_DOWN) && o == NK_VERTICAL && has_scrolling) ||
+ nk_button_behavior(&ws, *empty1, in, NK_BUTTON_DEFAULT)) {
+ /* scroll page down by click on empty space or shortcut */
+ if (o == NK_VERTICAL)
+ scroll_offset = NK_MIN(scroll_offset + scroll->h, target - scroll->h);
+ else scroll_offset = NK_MIN(scroll_offset + scroll->w, target - scroll->w);
+ } else if (has_scrolling) {
+ if ((scroll_delta < 0 || (scroll_delta > 0))) {
+ /* update cursor by mouse scrolling */
+ scroll_offset = scroll_offset + scroll_step * (-scroll_delta);
+ if (o == NK_VERTICAL)
+ scroll_offset = NK_CLAMP(0, scroll_offset, target - scroll->h);
+ else scroll_offset = NK_CLAMP(0, scroll_offset, target - scroll->w);
+ } else if (nk_input_is_key_pressed(in, NK_KEY_SCROLL_START)) {
+ /* update cursor to the beginning */
+ if (o == NK_VERTICAL) scroll_offset = 0;
+ } else if (nk_input_is_key_pressed(in, NK_KEY_SCROLL_END)) {
+ /* update cursor to the end */
+ if (o == NK_VERTICAL) scroll_offset = target - scroll->h;
+ }
+ }
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(in, *scroll))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, *scroll))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return scroll_offset;
+}
+NK_LIB void
+nk_draw_scrollbar(struct nk_command_buffer *out, nk_flags state,
+ const struct nk_style_scrollbar *style, const struct nk_rect *bounds,
+ const struct nk_rect *scroll)
+{
+ const struct nk_style_item *background;
+ const struct nk_style_item *cursor;
+
+ /* select correct colors/images to draw */
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ cursor = &style->cursor_active;
+ } else if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ cursor = &style->cursor_hover;
+ } else {
+ background = &style->normal;
+ cursor = &style->cursor_normal;
+ }
+
+ /* draw background */
+ if (background->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ nk_stroke_rect(out, *bounds, style->rounding, style->border, style->border_color);
+ } else {
+ nk_draw_image(out, *bounds, &background->data.image, nk_white);
+ }
+
+ /* draw cursor */
+ if (cursor->type == NK_STYLE_ITEM_COLOR) {
+ nk_fill_rect(out, *scroll, style->rounding_cursor, cursor->data.color);
+ nk_stroke_rect(out, *scroll, style->rounding_cursor, style->border_cursor, style->cursor_border_color);
+ } else nk_draw_image(out, *scroll, &cursor->data.image, nk_white);
+}
+NK_LIB float
+nk_do_scrollbarv(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect scroll, int has_scrolling,
+ float offset, float target, float step, float button_pixel_inc,
+ const struct nk_style_scrollbar *style, struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ struct nk_rect empty_north;
+ struct nk_rect empty_south;
+ struct nk_rect cursor;
+
+ float scroll_step;
+ float scroll_offset;
+ float scroll_off;
+ float scroll_ratio;
+
+ NK_ASSERT(out);
+ NK_ASSERT(style);
+ NK_ASSERT(state);
+ if (!out || !style) return 0;
+
+ scroll.w = NK_MAX(scroll.w, 1);
+ scroll.h = NK_MAX(scroll.h, 0);
+ if (target <= scroll.h) return 0;
+
+ /* optional scrollbar buttons */
+ if (style->show_buttons) {
+ nk_flags ws;
+ float scroll_h;
+ struct nk_rect button;
+
+ button.x = scroll.x;
+ button.w = scroll.w;
+ button.h = scroll.w;
+
+ scroll_h = NK_MAX(scroll.h - 2 * button.h,0);
+ scroll_step = NK_MIN(step, button_pixel_inc);
+
+ /* decrement button */
+ button.y = scroll.y;
+ if (nk_do_button_symbol(&ws, out, button, style->dec_symbol,
+ NK_BUTTON_REPEATER, &style->dec_button, in, font))
+ offset = offset - scroll_step;
+
+ /* increment button */
+ button.y = scroll.y + scroll.h - button.h;
+ if (nk_do_button_symbol(&ws, out, button, style->inc_symbol,
+ NK_BUTTON_REPEATER, &style->inc_button, in, font))
+ offset = offset + scroll_step;
+
+ scroll.y = scroll.y + button.h;
+ scroll.h = scroll_h;
+ }
+
+ /* calculate scrollbar constants */
+ scroll_step = NK_MIN(step, scroll.h);
+ scroll_offset = NK_CLAMP(0, offset, target - scroll.h);
+ scroll_ratio = scroll.h / target;
+ scroll_off = scroll_offset / target;
+
+ /* calculate scrollbar cursor bounds */
+ cursor.h = NK_MAX((scroll_ratio * scroll.h) - (2*style->border + 2*style->padding.y), 0);
+ cursor.y = scroll.y + (scroll_off * scroll.h) + style->border + style->padding.y;
+ cursor.w = scroll.w - (2 * style->border + 2 * style->padding.x);
+ cursor.x = scroll.x + style->border + style->padding.x;
+
+ /* calculate empty space around cursor */
+ empty_north.x = scroll.x;
+ empty_north.y = scroll.y;
+ empty_north.w = scroll.w;
+ empty_north.h = NK_MAX(cursor.y - scroll.y, 0);
+
+ empty_south.x = scroll.x;
+ empty_south.y = cursor.y + cursor.h;
+ empty_south.w = scroll.w;
+ empty_south.h = NK_MAX((scroll.y + scroll.h) - (cursor.y + cursor.h), 0);
+
+ /* update scrollbar */
+ scroll_offset = nk_scrollbar_behavior(state, in, has_scrolling, &scroll, &cursor,
+ &empty_north, &empty_south, scroll_offset, target, scroll_step, NK_VERTICAL);
+ scroll_off = scroll_offset / target;
+ cursor.y = scroll.y + (scroll_off * scroll.h) + style->border_cursor + style->padding.y;
+
+ /* draw scrollbar */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_scrollbar(out, *state, style, &scroll, &cursor);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return scroll_offset;
+}
+NK_LIB float
+nk_do_scrollbarh(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_rect scroll, int has_scrolling,
+ float offset, float target, float step, float button_pixel_inc,
+ const struct nk_style_scrollbar *style, struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ struct nk_rect cursor;
+ struct nk_rect empty_west;
+ struct nk_rect empty_east;
+
+ float scroll_step;
+ float scroll_offset;
+ float scroll_off;
+ float scroll_ratio;
+
+ NK_ASSERT(out);
+ NK_ASSERT(style);
+ if (!out || !style) return 0;
+
+ /* scrollbar background */
+ scroll.h = NK_MAX(scroll.h, 1);
+ scroll.w = NK_MAX(scroll.w, 2 * scroll.h);
+ if (target <= scroll.w) return 0;
+
+ /* optional scrollbar buttons */
+ if (style->show_buttons) {
+ nk_flags ws;
+ float scroll_w;
+ struct nk_rect button;
+ button.y = scroll.y;
+ button.w = scroll.h;
+ button.h = scroll.h;
+
+ scroll_w = scroll.w - 2 * button.w;
+ scroll_step = NK_MIN(step, button_pixel_inc);
+
+ /* decrement button */
+ button.x = scroll.x;
+ if (nk_do_button_symbol(&ws, out, button, style->dec_symbol,
+ NK_BUTTON_REPEATER, &style->dec_button, in, font))
+ offset = offset - scroll_step;
+
+ /* increment button */
+ button.x = scroll.x + scroll.w - button.w;
+ if (nk_do_button_symbol(&ws, out, button, style->inc_symbol,
+ NK_BUTTON_REPEATER, &style->inc_button, in, font))
+ offset = offset + scroll_step;
+
+ scroll.x = scroll.x + button.w;
+ scroll.w = scroll_w;
+ }
+
+ /* calculate scrollbar constants */
+ scroll_step = NK_MIN(step, scroll.w);
+ scroll_offset = NK_CLAMP(0, offset, target - scroll.w);
+ scroll_ratio = scroll.w / target;
+ scroll_off = scroll_offset / target;
+
+ /* calculate cursor bounds */
+ cursor.w = (scroll_ratio * scroll.w) - (2*style->border + 2*style->padding.x);
+ cursor.x = scroll.x + (scroll_off * scroll.w) + style->border + style->padding.x;
+ cursor.h = scroll.h - (2 * style->border + 2 * style->padding.y);
+ cursor.y = scroll.y + style->border + style->padding.y;
+
+ /* calculate empty space around cursor */
+ empty_west.x = scroll.x;
+ empty_west.y = scroll.y;
+ empty_west.w = cursor.x - scroll.x;
+ empty_west.h = scroll.h;
+
+ empty_east.x = cursor.x + cursor.w;
+ empty_east.y = scroll.y;
+ empty_east.w = (scroll.x + scroll.w) - (cursor.x + cursor.w);
+ empty_east.h = scroll.h;
+
+ /* update scrollbar */
+ scroll_offset = nk_scrollbar_behavior(state, in, has_scrolling, &scroll, &cursor,
+ &empty_west, &empty_east, scroll_offset, target, scroll_step, NK_HORIZONTAL);
+ scroll_off = scroll_offset / target;
+ cursor.x = scroll.x + (scroll_off * scroll.w);
+
+ /* draw scrollbar */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_scrollbar(out, *state, style, &scroll, &cursor);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+ return scroll_offset;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TEXT EDITOR
+ *
+ * ===============================================================*/
+/* stb_textedit.h - v1.8 - public domain - Sean Barrett */
+struct nk_text_find {
+ float x,y; /* position of n'th character */
+ float height; /* height of line */
+ int first_char, length; /* first char of row, and length */
+ int prev_first; /*_ first char of previous row */
+};
+
+struct nk_text_edit_row {
+ float x0,x1;
+ /* starting x location, end x location (allows for align=right, etc) */
+ float baseline_y_delta;
+ /* position of baseline relative to previous row's baseline*/
+ float ymin,ymax;
+ /* height of row above and below baseline */
+ int num_chars;
+};
+
+/* forward declarations */
+NK_INTERN void nk_textedit_makeundo_delete(struct nk_text_edit*, int, int);
+NK_INTERN void nk_textedit_makeundo_insert(struct nk_text_edit*, int, int);
+NK_INTERN void nk_textedit_makeundo_replace(struct nk_text_edit*, int, int, int);
+#define NK_TEXT_HAS_SELECTION(s) ((s)->select_start != (s)->select_end)
+
+NK_INTERN float
+nk_textedit_get_width(const struct nk_text_edit *edit, int line_start, int char_id,
+ const struct nk_user_font *font)
+{
+ int len = 0;
+ nk_rune unicode = 0;
+ const char *str = nk_str_at_const(&edit->string, line_start + char_id, &unicode, &len);
+ return font->width(font->userdata, font->height, str, len);
+}
+NK_INTERN void
+nk_textedit_layout_row(struct nk_text_edit_row *r, struct nk_text_edit *edit,
+ int line_start_id, float row_height, const struct nk_user_font *font)
+{
+ int l;
+ int glyphs = 0;
+ nk_rune unicode;
+ const char *remaining;
+ int len = nk_str_len_char(&edit->string);
+ const char *end = nk_str_get_const(&edit->string) + len;
+ const char *text = nk_str_at_const(&edit->string, line_start_id, &unicode, &l);
+ const struct nk_vec2 size = nk_text_calculate_text_bounds(font,
+ text, (int)(end - text), row_height, &remaining, 0, &glyphs, NK_STOP_ON_NEW_LINE);
+
+ r->x0 = 0.0f;
+ r->x1 = size.x;
+ r->baseline_y_delta = size.y;
+ r->ymin = 0.0f;
+ r->ymax = size.y;
+ r->num_chars = glyphs;
+}
+NK_INTERN int
+nk_textedit_locate_coord(struct nk_text_edit *edit, float x, float y,
+ const struct nk_user_font *font, float row_height)
+{
+ struct nk_text_edit_row r;
+ int n = edit->string.len;
+ float base_y = 0, prev_x;
+ int i=0, k;
+
+ r.x0 = r.x1 = 0;
+ r.ymin = r.ymax = 0;
+ r.num_chars = 0;
+
+ /* search rows to find one that straddles 'y' */
+ while (i < n) {
+ nk_textedit_layout_row(&r, edit, i, row_height, font);
+ if (r.num_chars <= 0)
+ return n;
+
+ if (i==0 && y < base_y + r.ymin)
+ return 0;
+
+ if (y < base_y + r.ymax)
+ break;
+
+ i += r.num_chars;
+ base_y += r.baseline_y_delta;
+ }
+
+ /* below all text, return 'after' last character */
+ if (i >= n)
+ return n;
+
+ /* check if it's before the beginning of the line */
+ if (x < r.x0)
+ return i;
+
+ /* check if it's before the end of the line */
+ if (x < r.x1) {
+ /* search characters in row for one that straddles 'x' */
+ k = i;
+ prev_x = r.x0;
+ for (i=0; i < r.num_chars; ++i) {
+ float w = nk_textedit_get_width(edit, k, i, font);
+ if (x < prev_x+w) {
+ if (x < prev_x+w/2)
+ return k+i;
+ else return k+i+1;
+ }
+ prev_x += w;
+ }
+ /* shouldn't happen, but if it does, fall through to end-of-line case */
+ }
+
+ /* if the last character is a newline, return that.
+ * otherwise return 'after' the last character */
+ if (nk_str_rune_at(&edit->string, i+r.num_chars-1) == '\n')
+ return i+r.num_chars-1;
+ else return i+r.num_chars;
+}
+NK_LIB void
+nk_textedit_click(struct nk_text_edit *state, float x, float y,
+ const struct nk_user_font *font, float row_height)
+{
+ /* API click: on mouse down, move the cursor to the clicked location,
+ * and reset the selection */
+ state->cursor = nk_textedit_locate_coord(state, x, y, font, row_height);
+ state->select_start = state->cursor;
+ state->select_end = state->cursor;
+ state->has_preferred_x = 0;
+}
+NK_LIB void
+nk_textedit_drag(struct nk_text_edit *state, float x, float y,
+ const struct nk_user_font *font, float row_height)
+{
+ /* API drag: on mouse drag, move the cursor and selection endpoint
+ * to the clicked location */
+ int p = nk_textedit_locate_coord(state, x, y, font, row_height);
+ if (state->select_start == state->select_end)
+ state->select_start = state->cursor;
+ state->cursor = state->select_end = p;
+}
+NK_INTERN void
+nk_textedit_find_charpos(struct nk_text_find *find, struct nk_text_edit *state,
+ int n, int single_line, const struct nk_user_font *font, float row_height)
+{
+ /* find the x/y location of a character, and remember info about the previous
+ * row in case we get a move-up event (for page up, we'll have to rescan) */
+ struct nk_text_edit_row r;
+ int prev_start = 0;
+ int z = state->string.len;
+ int i=0, first;
+
+ nk_zero_struct(r);
+ if (n == z) {
+ /* if it's at the end, then find the last line -- simpler than trying to
+ explicitly handle this case in the regular code */
+ nk_textedit_layout_row(&r, state, 0, row_height, font);
+ if (single_line) {
+ find->first_char = 0;
+ find->length = z;
+ } else {
+ while (i < z) {
+ prev_start = i;
+ i += r.num_chars;
+ nk_textedit_layout_row(&r, state, i, row_height, font);
+ }
+
+ find->first_char = i;
+ find->length = r.num_chars;
+ }
+ find->x = r.x1;
+ find->y = r.ymin;
+ find->height = r.ymax - r.ymin;
+ find->prev_first = prev_start;
+ return;
+ }
+
+ /* search rows to find the one that straddles character n */
+ find->y = 0;
+
+ for(;;) {
+ nk_textedit_layout_row(&r, state, i, row_height, font);
+ if (n < i + r.num_chars) break;
+ prev_start = i;
+ i += r.num_chars;
+ find->y += r.baseline_y_delta;
+ }
+
+ find->first_char = first = i;
+ find->length = r.num_chars;
+ find->height = r.ymax - r.ymin;
+ find->prev_first = prev_start;
+
+ /* now scan to find xpos */
+ find->x = r.x0;
+ for (i=0; first+i < n; ++i)
+ find->x += nk_textedit_get_width(state, first, i, font);
+}
+NK_INTERN void
+nk_textedit_clamp(struct nk_text_edit *state)
+{
+ /* make the selection/cursor state valid if client altered the string */
+ int n = state->string.len;
+ if (NK_TEXT_HAS_SELECTION(state)) {
+ if (state->select_start > n) state->select_start = n;
+ if (state->select_end > n) state->select_end = n;
+ /* if clamping forced them to be equal, move the cursor to match */
+ if (state->select_start == state->select_end)
+ state->cursor = state->select_start;
+ }
+ if (state->cursor > n) state->cursor = n;
+}
+NK_API void
+nk_textedit_delete(struct nk_text_edit *state, int where, int len)
+{
+ /* delete characters while updating undo */
+ nk_textedit_makeundo_delete(state, where, len);
+ nk_str_delete_runes(&state->string, where, len);
+ state->has_preferred_x = 0;
+}
+NK_API void
+nk_textedit_delete_selection(struct nk_text_edit *state)
+{
+ /* delete the section */
+ nk_textedit_clamp(state);
+ if (NK_TEXT_HAS_SELECTION(state)) {
+ if (state->select_start < state->select_end) {
+ nk_textedit_delete(state, state->select_start,
+ state->select_end - state->select_start);
+ state->select_end = state->cursor = state->select_start;
+ } else {
+ nk_textedit_delete(state, state->select_end,
+ state->select_start - state->select_end);
+ state->select_start = state->cursor = state->select_end;
+ }
+ state->has_preferred_x = 0;
+ }
+}
+NK_INTERN void
+nk_textedit_sortselection(struct nk_text_edit *state)
+{
+ /* canonicalize the selection so start <= end */
+ if (state->select_end < state->select_start) {
+ int temp = state->select_end;
+ state->select_end = state->select_start;
+ state->select_start = temp;
+ }
+}
+NK_INTERN void
+nk_textedit_move_to_first(struct nk_text_edit *state)
+{
+ /* move cursor to first character of selection */
+ if (NK_TEXT_HAS_SELECTION(state)) {
+ nk_textedit_sortselection(state);
+ state->cursor = state->select_start;
+ state->select_end = state->select_start;
+ state->has_preferred_x = 0;
+ }
+}
+NK_INTERN void
+nk_textedit_move_to_last(struct nk_text_edit *state)
+{
+ /* move cursor to last character of selection */
+ if (NK_TEXT_HAS_SELECTION(state)) {
+ nk_textedit_sortselection(state);
+ nk_textedit_clamp(state);
+ state->cursor = state->select_end;
+ state->select_start = state->select_end;
+ state->has_preferred_x = 0;
+ }
+}
+NK_INTERN int
+nk_is_word_boundary( struct nk_text_edit *state, int idx)
+{
+ int len;
+ nk_rune c;
+ if (idx <= 0) return 1;
+ if (!nk_str_at_rune(&state->string, idx, &c, &len)) return 1;
+ return (c == ' ' || c == '\t' ||c == 0x3000 || c == ',' || c == ';' ||
+ c == '(' || c == ')' || c == '{' || c == '}' || c == '[' || c == ']' ||
+ c == '|');
+}
+NK_INTERN int
+nk_textedit_move_to_word_previous(struct nk_text_edit *state)
+{
+ int c = state->cursor - 1;
+ while( c >= 0 && !nk_is_word_boundary(state, c))
+ --c;
+
+ if( c < 0 )
+ c = 0;
+
+ return c;
+}
+NK_INTERN int
+nk_textedit_move_to_word_next(struct nk_text_edit *state)
+{
+ const int len = state->string.len;
+ int c = state->cursor+1;
+ while( c < len && !nk_is_word_boundary(state, c))
+ ++c;
+
+ if( c > len )
+ c = len;
+
+ return c;
+}
+NK_INTERN void
+nk_textedit_prep_selection_at_cursor(struct nk_text_edit *state)
+{
+ /* update selection and cursor to match each other */
+ if (!NK_TEXT_HAS_SELECTION(state))
+ state->select_start = state->select_end = state->cursor;
+ else state->cursor = state->select_end;
+}
+NK_API int
+nk_textedit_cut(struct nk_text_edit *state)
+{
+ /* API cut: delete selection */
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW)
+ return 0;
+ if (NK_TEXT_HAS_SELECTION(state)) {
+ nk_textedit_delete_selection(state); /* implicitly clamps */
+ state->has_preferred_x = 0;
+ return 1;
+ }
+ return 0;
+}
+NK_API int
+nk_textedit_paste(struct nk_text_edit *state, char const *ctext, int len)
+{
+ /* API paste: replace existing selection with passed-in text */
+ int glyphs;
+ const char *text = (const char *) ctext;
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW) return 0;
+
+ /* if there's a selection, the paste should delete it */
+ nk_textedit_clamp(state);
+ nk_textedit_delete_selection(state);
+
+ /* try to insert the characters */
+ glyphs = nk_utf_len(ctext, len);
+ if (nk_str_insert_text_char(&state->string, state->cursor, text, len)) {
+ nk_textedit_makeundo_insert(state, state->cursor, glyphs);
+ state->cursor += len;
+ state->has_preferred_x = 0;
+ return 1;
+ }
+ /* remove the undo since we didn't actually insert the characters */
+ if (state->undo.undo_point)
+ --state->undo.undo_point;
+ return 0;
+}
+NK_API void
+nk_textedit_text(struct nk_text_edit *state, const char *text, int total_len)
+{
+ nk_rune unicode;
+ int glyph_len;
+ int text_len = 0;
+
+ NK_ASSERT(state);
+ NK_ASSERT(text);
+ if (!text || !total_len || state->mode == NK_TEXT_EDIT_MODE_VIEW) return;
+
+ glyph_len = nk_utf_decode(text, &unicode, total_len);
+ while ((text_len < total_len) && glyph_len)
+ {
+ /* don't insert a backward delete, just process the event */
+ if (unicode == 127) goto next;
+ /* can't add newline in single-line mode */
+ if (unicode == '\n' && state->single_line) goto next;
+ /* filter incoming text */
+ if (state->filter && !state->filter(state, unicode)) goto next;
+
+ if (!NK_TEXT_HAS_SELECTION(state) &&
+ state->cursor < state->string.len)
+ {
+ if (state->mode == NK_TEXT_EDIT_MODE_REPLACE) {
+ nk_textedit_makeundo_replace(state, state->cursor, 1, 1);
+ nk_str_delete_runes(&state->string, state->cursor, 1);
+ }
+ if (nk_str_insert_text_utf8(&state->string, state->cursor,
+ text+text_len, 1))
+ {
+ ++state->cursor;
+ state->has_preferred_x = 0;
+ }
+ } else {
+ nk_textedit_delete_selection(state); /* implicitly clamps */
+ if (nk_str_insert_text_utf8(&state->string, state->cursor,
+ text+text_len, 1))
+ {
+ nk_textedit_makeundo_insert(state, state->cursor, 1);
+ ++state->cursor;
+ state->has_preferred_x = 0;
+ }
+ }
+ next:
+ text_len += glyph_len;
+ glyph_len = nk_utf_decode(text + text_len, &unicode, total_len-text_len);
+ }
+}
+NK_LIB void
+nk_textedit_key(struct nk_text_edit *state, enum nk_keys key, int shift_mod,
+ const struct nk_user_font *font, float row_height)
+{
+retry:
+ switch (key)
+ {
+ case NK_KEY_NONE:
+ case NK_KEY_CTRL:
+ case NK_KEY_ENTER:
+ case NK_KEY_SHIFT:
+ case NK_KEY_TAB:
+ case NK_KEY_COPY:
+ case NK_KEY_CUT:
+ case NK_KEY_PASTE:
+ case NK_KEY_MAX:
+ default: break;
+ case NK_KEY_TEXT_UNDO:
+ nk_textedit_undo(state);
+ state->has_preferred_x = 0;
+ break;
+
+ case NK_KEY_TEXT_REDO:
+ nk_textedit_redo(state);
+ state->has_preferred_x = 0;
+ break;
+
+ case NK_KEY_TEXT_SELECT_ALL:
+ nk_textedit_select_all(state);
+ state->has_preferred_x = 0;
+ break;
+
+ case NK_KEY_TEXT_INSERT_MODE:
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW)
+ state->mode = NK_TEXT_EDIT_MODE_INSERT;
+ break;
+ case NK_KEY_TEXT_REPLACE_MODE:
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW)
+ state->mode = NK_TEXT_EDIT_MODE_REPLACE;
+ break;
+ case NK_KEY_TEXT_RESET_MODE:
+ if (state->mode == NK_TEXT_EDIT_MODE_INSERT ||
+ state->mode == NK_TEXT_EDIT_MODE_REPLACE)
+ state->mode = NK_TEXT_EDIT_MODE_VIEW;
+ break;
+
+ case NK_KEY_LEFT:
+ if (shift_mod) {
+ nk_textedit_clamp(state);
+ nk_textedit_prep_selection_at_cursor(state);
+ /* move selection left */
+ if (state->select_end > 0)
+ --state->select_end;
+ state->cursor = state->select_end;
+ state->has_preferred_x = 0;
+ } else {
+ /* if currently there's a selection,
+ * move cursor to start of selection */
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_first(state);
+ else if (state->cursor > 0)
+ --state->cursor;
+ state->has_preferred_x = 0;
+ } break;
+
+ case NK_KEY_RIGHT:
+ if (shift_mod) {
+ nk_textedit_prep_selection_at_cursor(state);
+ /* move selection right */
+ ++state->select_end;
+ nk_textedit_clamp(state);
+ state->cursor = state->select_end;
+ state->has_preferred_x = 0;
+ } else {
+ /* if currently there's a selection,
+ * move cursor to end of selection */
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_last(state);
+ else ++state->cursor;
+ nk_textedit_clamp(state);
+ state->has_preferred_x = 0;
+ } break;
+
+ case NK_KEY_TEXT_WORD_LEFT:
+ if (shift_mod) {
+ if( !NK_TEXT_HAS_SELECTION( state ) )
+ nk_textedit_prep_selection_at_cursor(state);
+ state->cursor = nk_textedit_move_to_word_previous(state);
+ state->select_end = state->cursor;
+ nk_textedit_clamp(state );
+ } else {
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_first(state);
+ else {
+ state->cursor = nk_textedit_move_to_word_previous(state);
+ nk_textedit_clamp(state );
+ }
+ } break;
+
+ case NK_KEY_TEXT_WORD_RIGHT:
+ if (shift_mod) {
+ if( !NK_TEXT_HAS_SELECTION( state ) )
+ nk_textedit_prep_selection_at_cursor(state);
+ state->cursor = nk_textedit_move_to_word_next(state);
+ state->select_end = state->cursor;
+ nk_textedit_clamp(state);
+ } else {
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_last(state);
+ else {
+ state->cursor = nk_textedit_move_to_word_next(state);
+ nk_textedit_clamp(state );
+ }
+ } break;
+
+ case NK_KEY_DOWN: {
+ struct nk_text_find find;
+ struct nk_text_edit_row row;
+ int i, sel = shift_mod;
+
+ if (state->single_line) {
+ /* on windows, up&down in single-line behave like left&right */
+ key = NK_KEY_RIGHT;
+ goto retry;
+ }
+
+ if (sel)
+ nk_textedit_prep_selection_at_cursor(state);
+ else if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_last(state);
+
+ /* compute current position of cursor point */
+ nk_textedit_clamp(state);
+ nk_textedit_find_charpos(&find, state, state->cursor, state->single_line,
+ font, row_height);
+
+ /* now find character position down a row */
+ if (find.length)
+ {
+ float x;
+ float goal_x = state->has_preferred_x ? state->preferred_x : find.x;
+ int start = find.first_char + find.length;
+
+ state->cursor = start;
+ nk_textedit_layout_row(&row, state, state->cursor, row_height, font);
+ x = row.x0;
+
+ for (i=0; i < row.num_chars && x < row.x1; ++i) {
+ float dx = nk_textedit_get_width(state, start, i, font);
+ x += dx;
+ if (x > goal_x)
+ break;
+ ++state->cursor;
+ }
+ nk_textedit_clamp(state);
+
+ state->has_preferred_x = 1;
+ state->preferred_x = goal_x;
+ if (sel)
+ state->select_end = state->cursor;
+ }
+ } break;
+
+ case NK_KEY_UP: {
+ struct nk_text_find find;
+ struct nk_text_edit_row row;
+ int i, sel = shift_mod;
+
+ if (state->single_line) {
+ /* on windows, up&down become left&right */
+ key = NK_KEY_LEFT;
+ goto retry;
+ }
+
+ if (sel)
+ nk_textedit_prep_selection_at_cursor(state);
+ else if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_move_to_first(state);
+
+ /* compute current position of cursor point */
+ nk_textedit_clamp(state);
+ nk_textedit_find_charpos(&find, state, state->cursor, state->single_line,
+ font, row_height);
+
+ /* can only go up if there's a previous row */
+ if (find.prev_first != find.first_char) {
+ /* now find character position up a row */
+ float x;
+ float goal_x = state->has_preferred_x ? state->preferred_x : find.x;
+
+ state->cursor = find.prev_first;
+ nk_textedit_layout_row(&row, state, state->cursor, row_height, font);
+ x = row.x0;
+
+ for (i=0; i < row.num_chars && x < row.x1; ++i) {
+ float dx = nk_textedit_get_width(state, find.prev_first, i, font);
+ x += dx;
+ if (x > goal_x)
+ break;
+ ++state->cursor;
+ }
+ nk_textedit_clamp(state);
+
+ state->has_preferred_x = 1;
+ state->preferred_x = goal_x;
+ if (sel) state->select_end = state->cursor;
+ }
+ } break;
+
+ case NK_KEY_DEL:
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW)
+ break;
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_delete_selection(state);
+ else {
+ int n = state->string.len;
+ if (state->cursor < n)
+ nk_textedit_delete(state, state->cursor, 1);
+ }
+ state->has_preferred_x = 0;
+ break;
+
+ case NK_KEY_BACKSPACE:
+ if (state->mode == NK_TEXT_EDIT_MODE_VIEW)
+ break;
+ if (NK_TEXT_HAS_SELECTION(state))
+ nk_textedit_delete_selection(state);
+ else {
+ nk_textedit_clamp(state);
+ if (state->cursor > 0) {
+ nk_textedit_delete(state, state->cursor-1, 1);
+ --state->cursor;
+ }
+ }
+ state->has_preferred_x = 0;
+ break;
+
+ case NK_KEY_TEXT_START:
+ if (shift_mod) {
+ nk_textedit_prep_selection_at_cursor(state);
+ state->cursor = state->select_end = 0;
+ state->has_preferred_x = 0;
+ } else {
+ state->cursor = state->select_start = state->select_end = 0;
+ state->has_preferred_x = 0;
+ }
+ break;
+
+ case NK_KEY_TEXT_END:
+ if (shift_mod) {
+ nk_textedit_prep_selection_at_cursor(state);
+ state->cursor = state->select_end = state->string.len;
+ state->has_preferred_x = 0;
+ } else {
+ state->cursor = state->string.len;
+ state->select_start = state->select_end = 0;
+ state->has_preferred_x = 0;
+ }
+ break;
+
+ case NK_KEY_TEXT_LINE_START: {
+ if (shift_mod) {
+ struct nk_text_find find;
+ nk_textedit_clamp(state);
+ nk_textedit_prep_selection_at_cursor(state);
+ if (state->string.len && state->cursor == state->string.len)
+ --state->cursor;
+ nk_textedit_find_charpos(&find, state,state->cursor, state->single_line,
+ font, row_height);
+ state->cursor = state->select_end = find.first_char;
+ state->has_preferred_x = 0;
+ } else {
+ struct nk_text_find find;
+ if (state->string.len && state->cursor == state->string.len)
+ --state->cursor;
+ nk_textedit_clamp(state);
+ nk_textedit_move_to_first(state);
+ nk_textedit_find_charpos(&find, state, state->cursor, state->single_line,
+ font, row_height);
+ state->cursor = find.first_char;
+ state->has_preferred_x = 0;
+ }
+ } break;
+
+ case NK_KEY_TEXT_LINE_END: {
+ if (shift_mod) {
+ struct nk_text_find find;
+ nk_textedit_clamp(state);
+ nk_textedit_prep_selection_at_cursor(state);
+ nk_textedit_find_charpos(&find, state, state->cursor, state->single_line,
+ font, row_height);
+ state->has_preferred_x = 0;
+ state->cursor = find.first_char + find.length;
+ if (find.length > 0 && nk_str_rune_at(&state->string, state->cursor-1) == '\n')
+ --state->cursor;
+ state->select_end = state->cursor;
+ } else {
+ struct nk_text_find find;
+ nk_textedit_clamp(state);
+ nk_textedit_move_to_first(state);
+ nk_textedit_find_charpos(&find, state, state->cursor, state->single_line,
+ font, row_height);
+
+ state->has_preferred_x = 0;
+ state->cursor = find.first_char + find.length;
+ if (find.length > 0 && nk_str_rune_at(&state->string, state->cursor-1) == '\n')
+ --state->cursor;
+ }} break;
+ }
+}
+NK_INTERN void
+nk_textedit_flush_redo(struct nk_text_undo_state *state)
+{
+ state->redo_point = NK_TEXTEDIT_UNDOSTATECOUNT;
+ state->redo_char_point = NK_TEXTEDIT_UNDOCHARCOUNT;
+}
+NK_INTERN void
+nk_textedit_discard_undo(struct nk_text_undo_state *state)
+{
+ /* discard the oldest entry in the undo list */
+ if (state->undo_point > 0) {
+ /* if the 0th undo state has characters, clean those up */
+ if (state->undo_rec[0].char_storage >= 0) {
+ int n = state->undo_rec[0].insert_length, i;
+ /* delete n characters from all other records */
+ state->undo_char_point = (short)(state->undo_char_point - n);
+ NK_MEMCPY(state->undo_char, state->undo_char + n,
+ (nk_size)state->undo_char_point*sizeof(nk_rune));
+ for (i=0; i < state->undo_point; ++i) {
+ if (state->undo_rec[i].char_storage >= 0)
+ state->undo_rec[i].char_storage = (short)
+ (state->undo_rec[i].char_storage - n);
+ }
+ }
+ --state->undo_point;
+ NK_MEMCPY(state->undo_rec, state->undo_rec+1,
+ (nk_size)((nk_size)state->undo_point * sizeof(state->undo_rec[0])));
+ }
+}
+NK_INTERN void
+nk_textedit_discard_redo(struct nk_text_undo_state *state)
+{
+/* discard the oldest entry in the redo list--it's bad if this
+ ever happens, but because undo & redo have to store the actual
+ characters in different cases, the redo character buffer can
+ fill up even though the undo buffer didn't */
+ nk_size num;
+ int k = NK_TEXTEDIT_UNDOSTATECOUNT-1;
+ if (state->redo_point <= k) {
+ /* if the k'th undo state has characters, clean those up */
+ if (state->undo_rec[k].char_storage >= 0) {
+ int n = state->undo_rec[k].insert_length, i;
+ /* delete n characters from all other records */
+ state->redo_char_point = (short)(state->redo_char_point + n);
+ num = (nk_size)(NK_TEXTEDIT_UNDOCHARCOUNT - state->redo_char_point);
+ NK_MEMCPY(state->undo_char + state->redo_char_point,
+ state->undo_char + state->redo_char_point-n, num * sizeof(char));
+ for (i = state->redo_point; i < k; ++i) {
+ if (state->undo_rec[i].char_storage >= 0) {
+ state->undo_rec[i].char_storage = (short)
+ (state->undo_rec[i].char_storage + n);
+ }
+ }
+ }
+ ++state->redo_point;
+ num = (nk_size)(NK_TEXTEDIT_UNDOSTATECOUNT - state->redo_point);
+ if (num) NK_MEMCPY(state->undo_rec + state->redo_point-1,
+ state->undo_rec + state->redo_point, num * sizeof(state->undo_rec[0]));
+ }
+}
+NK_INTERN struct nk_text_undo_record*
+nk_textedit_create_undo_record(struct nk_text_undo_state *state, int numchars)
+{
+ /* any time we create a new undo record, we discard redo*/
+ nk_textedit_flush_redo(state);
+
+ /* if we have no free records, we have to make room,
+ * by sliding the existing records down */
+ if (state->undo_point == NK_TEXTEDIT_UNDOSTATECOUNT)
+ nk_textedit_discard_undo(state);
+
+ /* if the characters to store won't possibly fit in the buffer,
+ * we can't undo */
+ if (numchars > NK_TEXTEDIT_UNDOCHARCOUNT) {
+ state->undo_point = 0;
+ state->undo_char_point = 0;
+ return 0;
+ }
+
+ /* if we don't have enough free characters in the buffer,
+ * we have to make room */
+ while (state->undo_char_point + numchars > NK_TEXTEDIT_UNDOCHARCOUNT)
+ nk_textedit_discard_undo(state);
+ return &state->undo_rec[state->undo_point++];
+}
+NK_INTERN nk_rune*
+nk_textedit_createundo(struct nk_text_undo_state *state, int pos,
+ int insert_len, int delete_len)
+{
+ struct nk_text_undo_record *r = nk_textedit_create_undo_record(state, insert_len);
+ if (r == 0)
+ return 0;
+
+ r->where = pos;
+ r->insert_length = (short) insert_len;
+ r->delete_length = (short) delete_len;
+
+ if (insert_len == 0) {
+ r->char_storage = -1;
+ return 0;
+ } else {
+ r->char_storage = state->undo_char_point;
+ state->undo_char_point = (short)(state->undo_char_point + insert_len);
+ return &state->undo_char[r->char_storage];
+ }
+}
+NK_API void
+nk_textedit_undo(struct nk_text_edit *state)
+{
+ struct nk_text_undo_state *s = &state->undo;
+ struct nk_text_undo_record u, *r;
+ if (s->undo_point == 0)
+ return;
+
+ /* we need to do two things: apply the undo record, and create a redo record */
+ u = s->undo_rec[s->undo_point-1];
+ r = &s->undo_rec[s->redo_point-1];
+ r->char_storage = -1;
+
+ r->insert_length = u.delete_length;
+ r->delete_length = u.insert_length;
+ r->where = u.where;
+
+ if (u.delete_length)
+ {
+ /* if the undo record says to delete characters, then the redo record will
+ need to re-insert the characters that get deleted, so we need to store
+ them.
+ there are three cases:
+ - there's enough room to store the characters
+ - characters stored for *redoing* don't leave room for redo
+ - characters stored for *undoing* don't leave room for redo
+ if the last is true, we have to bail */
+ if (s->undo_char_point + u.delete_length >= NK_TEXTEDIT_UNDOCHARCOUNT) {
+ /* the undo records take up too much character space; there's no space
+ * to store the redo characters */
+ r->insert_length = 0;
+ } else {
+ int i;
+ /* there's definitely room to store the characters eventually */
+ while (s->undo_char_point + u.delete_length > s->redo_char_point) {
+ /* there's currently not enough room, so discard a redo record */
+ nk_textedit_discard_redo(s);
+ /* should never happen: */
+ if (s->redo_point == NK_TEXTEDIT_UNDOSTATECOUNT)
+ return;
+ }
+
+ r = &s->undo_rec[s->redo_point-1];
+ r->char_storage = (short)(s->redo_char_point - u.delete_length);
+ s->redo_char_point = (short)(s->redo_char_point - u.delete_length);
+
+ /* now save the characters */
+ for (i=0; i < u.delete_length; ++i)
+ s->undo_char[r->char_storage + i] =
+ nk_str_rune_at(&state->string, u.where + i);
+ }
+ /* now we can carry out the deletion */
+ nk_str_delete_runes(&state->string, u.where, u.delete_length);
+ }
+
+ /* check type of recorded action: */
+ if (u.insert_length) {
+ /* easy case: was a deletion, so we need to insert n characters */
+ nk_str_insert_text_runes(&state->string, u.where,
+ &s->undo_char[u.char_storage], u.insert_length);
+ s->undo_char_point = (short)(s->undo_char_point - u.insert_length);
+ }
+ state->cursor = (short)(u.where + u.insert_length);
+
+ s->undo_point--;
+ s->redo_point--;
+}
+NK_API void
+nk_textedit_redo(struct nk_text_edit *state)
+{
+ struct nk_text_undo_state *s = &state->undo;
+ struct nk_text_undo_record *u, r;
+ if (s->redo_point == NK_TEXTEDIT_UNDOSTATECOUNT)
+ return;
+
+ /* we need to do two things: apply the redo record, and create an undo record */
+ u = &s->undo_rec[s->undo_point];
+ r = s->undo_rec[s->redo_point];
+
+ /* we KNOW there must be room for the undo record, because the redo record
+ was derived from an undo record */
+ u->delete_length = r.insert_length;
+ u->insert_length = r.delete_length;
+ u->where = r.where;
+ u->char_storage = -1;
+
+ if (r.delete_length) {
+ /* the redo record requires us to delete characters, so the undo record
+ needs to store the characters */
+ if (s->undo_char_point + u->insert_length > s->redo_char_point) {
+ u->insert_length = 0;
+ u->delete_length = 0;
+ } else {
+ int i;
+ u->char_storage = s->undo_char_point;
+ s->undo_char_point = (short)(s->undo_char_point + u->insert_length);
+
+ /* now save the characters */
+ for (i=0; i < u->insert_length; ++i) {
+ s->undo_char[u->char_storage + i] =
+ nk_str_rune_at(&state->string, u->where + i);
+ }
+ }
+ nk_str_delete_runes(&state->string, r.where, r.delete_length);
+ }
+
+ if (r.insert_length) {
+ /* easy case: need to insert n characters */
+ nk_str_insert_text_runes(&state->string, r.where,
+ &s->undo_char[r.char_storage], r.insert_length);
+ }
+ state->cursor = r.where + r.insert_length;
+
+ s->undo_point++;
+ s->redo_point++;
+}
+NK_INTERN void
+nk_textedit_makeundo_insert(struct nk_text_edit *state, int where, int length)
+{
+ nk_textedit_createundo(&state->undo, where, 0, length);
+}
+NK_INTERN void
+nk_textedit_makeundo_delete(struct nk_text_edit *state, int where, int length)
+{
+ int i;
+ nk_rune *p = nk_textedit_createundo(&state->undo, where, length, 0);
+ if (p) {
+ for (i=0; i < length; ++i)
+ p[i] = nk_str_rune_at(&state->string, where+i);
+ }
+}
+NK_INTERN void
+nk_textedit_makeundo_replace(struct nk_text_edit *state, int where,
+ int old_length, int new_length)
+{
+ int i;
+ nk_rune *p = nk_textedit_createundo(&state->undo, where, old_length, new_length);
+ if (p) {
+ for (i=0; i < old_length; ++i)
+ p[i] = nk_str_rune_at(&state->string, where+i);
+ }
+}
+NK_LIB void
+nk_textedit_clear_state(struct nk_text_edit *state, enum nk_text_edit_type type,
+ nk_plugin_filter filter)
+{
+ /* reset the state to default */
+ state->undo.undo_point = 0;
+ state->undo.undo_char_point = 0;
+ state->undo.redo_point = NK_TEXTEDIT_UNDOSTATECOUNT;
+ state->undo.redo_char_point = NK_TEXTEDIT_UNDOCHARCOUNT;
+ state->select_end = state->select_start = 0;
+ state->cursor = 0;
+ state->has_preferred_x = 0;
+ state->preferred_x = 0;
+ state->cursor_at_end_of_line = 0;
+ state->initialized = 1;
+ state->single_line = (unsigned char)(type == NK_TEXT_EDIT_SINGLE_LINE);
+ state->mode = NK_TEXT_EDIT_MODE_VIEW;
+ state->filter = filter;
+ state->scrollbar = nk_vec2(0,0);
+}
+NK_API void
+nk_textedit_init_fixed(struct nk_text_edit *state, void *memory, nk_size size)
+{
+ NK_ASSERT(state);
+ NK_ASSERT(memory);
+ if (!state || !memory || !size) return;
+ NK_MEMSET(state, 0, sizeof(struct nk_text_edit));
+ nk_textedit_clear_state(state, NK_TEXT_EDIT_SINGLE_LINE, 0);
+ nk_str_init_fixed(&state->string, memory, size);
+}
+NK_API void
+nk_textedit_init(struct nk_text_edit *state, struct nk_allocator *alloc, nk_size size)
+{
+ NK_ASSERT(state);
+ NK_ASSERT(alloc);
+ if (!state || !alloc) return;
+ NK_MEMSET(state, 0, sizeof(struct nk_text_edit));
+ nk_textedit_clear_state(state, NK_TEXT_EDIT_SINGLE_LINE, 0);
+ nk_str_init(&state->string, alloc, size);
+}
+#ifdef NK_INCLUDE_DEFAULT_ALLOCATOR
+NK_API void
+nk_textedit_init_default(struct nk_text_edit *state)
+{
+ NK_ASSERT(state);
+ if (!state) return;
+ NK_MEMSET(state, 0, sizeof(struct nk_text_edit));
+ nk_textedit_clear_state(state, NK_TEXT_EDIT_SINGLE_LINE, 0);
+ nk_str_init_default(&state->string);
+}
+#endif
+NK_API void
+nk_textedit_select_all(struct nk_text_edit *state)
+{
+ NK_ASSERT(state);
+ state->select_start = 0;
+ state->select_end = state->string.len;
+}
+NK_API void
+nk_textedit_free(struct nk_text_edit *state)
+{
+ NK_ASSERT(state);
+ if (!state) return;
+ nk_str_free(&state->string);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * FILTER
+ *
+ * ===============================================================*/
+NK_API int
+nk_filter_default(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(unicode);
+ NK_UNUSED(box);
+ return nk_true;
+}
+NK_API int
+nk_filter_ascii(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if (unicode > 128) return nk_false;
+ else return nk_true;
+}
+NK_API int
+nk_filter_float(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if ((unicode < '0' || unicode > '9') && unicode != '.' && unicode != '-')
+ return nk_false;
+ else return nk_true;
+}
+NK_API int
+nk_filter_decimal(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if ((unicode < '0' || unicode > '9') && unicode != '-')
+ return nk_false;
+ else return nk_true;
+}
+NK_API int
+nk_filter_hex(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if ((unicode < '0' || unicode > '9') &&
+ (unicode < 'a' || unicode > 'f') &&
+ (unicode < 'A' || unicode > 'F'))
+ return nk_false;
+ else return nk_true;
+}
+NK_API int
+nk_filter_oct(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if (unicode < '0' || unicode > '7')
+ return nk_false;
+ else return nk_true;
+}
+NK_API int
+nk_filter_binary(const struct nk_text_edit *box, nk_rune unicode)
+{
+ NK_UNUSED(box);
+ if (unicode != '0' && unicode != '1')
+ return nk_false;
+ else return nk_true;
+}
+
+/* ===============================================================
+ *
+ * EDIT
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_edit_draw_text(struct nk_command_buffer *out,
+ const struct nk_style_edit *style, float pos_x, float pos_y,
+ float x_offset, const char *text, int byte_len, float row_height,
+ const struct nk_user_font *font, struct nk_color background,
+ struct nk_color foreground, int is_selected)
+{
+ NK_ASSERT(out);
+ NK_ASSERT(font);
+ NK_ASSERT(style);
+ if (!text || !byte_len || !out || !style) return;
+
+ {int glyph_len = 0;
+ nk_rune unicode = 0;
+ int text_len = 0;
+ float line_width = 0;
+ float glyph_width;
+ const char *line = text;
+ float line_offset = 0;
+ int line_count = 0;
+
+ struct nk_text txt;
+ txt.padding = nk_vec2(0,0);
+ txt.background = background;
+ txt.text = foreground;
+
+ glyph_len = nk_utf_decode(text+text_len, &unicode, byte_len-text_len);
+ if (!glyph_len) return;
+ while ((text_len < byte_len) && glyph_len)
+ {
+ if (unicode == '\n') {
+ /* new line separator so draw previous line */
+ struct nk_rect label;
+ label.y = pos_y + line_offset;
+ label.h = row_height;
+ label.w = line_width;
+ label.x = pos_x;
+ if (!line_count)
+ label.x += x_offset;
+
+ if (is_selected) /* selection needs to draw different background color */
+ nk_fill_rect(out, label, 0, background);
+ nk_widget_text(out, label, line, (int)((text + text_len) - line),
+ &txt, NK_TEXT_CENTERED, font);
+
+ text_len++;
+ line_count++;
+ line_width = 0;
+ line = text + text_len;
+ line_offset += row_height;
+ glyph_len = nk_utf_decode(text + text_len, &unicode, (int)(byte_len-text_len));
+ continue;
+ }
+ if (unicode == '\r') {
+ text_len++;
+ glyph_len = nk_utf_decode(text + text_len, &unicode, byte_len-text_len);
+ continue;
+ }
+ glyph_width = font->width(font->userdata, font->height, text+text_len, glyph_len);
+ line_width += (float)glyph_width;
+ text_len += glyph_len;
+ glyph_len = nk_utf_decode(text + text_len, &unicode, byte_len-text_len);
+ continue;
+ }
+ if (line_width > 0) {
+ /* draw last line */
+ struct nk_rect label;
+ label.y = pos_y + line_offset;
+ label.h = row_height;
+ label.w = line_width;
+ label.x = pos_x;
+ if (!line_count)
+ label.x += x_offset;
+
+ if (is_selected)
+ nk_fill_rect(out, label, 0, background);
+ nk_widget_text(out, label, line, (int)((text + text_len) - line),
+ &txt, NK_TEXT_LEFT, font);
+ }}
+}
+NK_LIB nk_flags
+nk_do_edit(nk_flags *state, struct nk_command_buffer *out,
+ struct nk_rect bounds, nk_flags flags, nk_plugin_filter filter,
+ struct nk_text_edit *edit, const struct nk_style_edit *style,
+ struct nk_input *in, const struct nk_user_font *font)
+{
+ struct nk_rect area;
+ nk_flags ret = 0;
+ float row_height;
+ char prev_state = 0;
+ char is_hovered = 0;
+ char select_all = 0;
+ char cursor_follow = 0;
+ struct nk_rect old_clip;
+ struct nk_rect clip;
+
+ NK_ASSERT(state);
+ NK_ASSERT(out);
+ NK_ASSERT(style);
+ if (!state || !out || !style)
+ return ret;
+
+ /* visible text area calculation */
+ area.x = bounds.x + style->padding.x + style->border;
+ area.y = bounds.y + style->padding.y + style->border;
+ area.w = bounds.w - (2.0f * style->padding.x + 2 * style->border);
+ area.h = bounds.h - (2.0f * style->padding.y + 2 * style->border);
+ if (flags & NK_EDIT_MULTILINE)
+ area.w = NK_MAX(0, area.w - style->scrollbar_size.x);
+ row_height = (flags & NK_EDIT_MULTILINE)? font->height + style->row_padding: area.h;
+
+ /* calculate clipping rectangle */
+ old_clip = out->clip;
+ nk_unify(&clip, &old_clip, area.x, area.y, area.x + area.w, area.y + area.h);
+
+ /* update edit state */
+ prev_state = (char)edit->active;
+ is_hovered = (char)nk_input_is_mouse_hovering_rect(in, bounds);
+ if (in && in->mouse.buttons[NK_BUTTON_LEFT].clicked && in->mouse.buttons[NK_BUTTON_LEFT].down) {
+ edit->active = NK_INBOX(in->mouse.pos.x, in->mouse.pos.y,
+ bounds.x, bounds.y, bounds.w, bounds.h);
+ }
+
+ /* (de)activate text editor */
+ if (!prev_state && edit->active) {
+ const enum nk_text_edit_type type = (flags & NK_EDIT_MULTILINE) ?
+ NK_TEXT_EDIT_MULTI_LINE: NK_TEXT_EDIT_SINGLE_LINE;
+ nk_textedit_clear_state(edit, type, filter);
+ if (flags & NK_EDIT_AUTO_SELECT)
+ select_all = nk_true;
+ if (flags & NK_EDIT_GOTO_END_ON_ACTIVATE) {
+ edit->cursor = edit->string.len;
+ in = 0;
+ }
+ } else if (!edit->active) edit->mode = NK_TEXT_EDIT_MODE_VIEW;
+ if (flags & NK_EDIT_READ_ONLY)
+ edit->mode = NK_TEXT_EDIT_MODE_VIEW;
+ else if (flags & NK_EDIT_ALWAYS_INSERT_MODE)
+ edit->mode = NK_TEXT_EDIT_MODE_INSERT;
+
+ ret = (edit->active) ? NK_EDIT_ACTIVE: NK_EDIT_INACTIVE;
+ if (prev_state != edit->active)
+ ret |= (edit->active) ? NK_EDIT_ACTIVATED: NK_EDIT_DEACTIVATED;
+
+ /* handle user input */
+ if (edit->active && in)
+ {
+ int shift_mod = in->keyboard.keys[NK_KEY_SHIFT].down;
+ const float mouse_x = (in->mouse.pos.x - area.x) + edit->scrollbar.x;
+ const float mouse_y = (in->mouse.pos.y - area.y) + edit->scrollbar.y;
+
+ /* mouse click handler */
+ is_hovered = (char)nk_input_is_mouse_hovering_rect(in, area);
+ if (select_all) {
+ nk_textedit_select_all(edit);
+ } else if (is_hovered && in->mouse.buttons[NK_BUTTON_LEFT].down &&
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked) {
+ nk_textedit_click(edit, mouse_x, mouse_y, font, row_height);
+ } else if (is_hovered && in->mouse.buttons[NK_BUTTON_LEFT].down &&
+ (in->mouse.delta.x != 0.0f || in->mouse.delta.y != 0.0f)) {
+ nk_textedit_drag(edit, mouse_x, mouse_y, font, row_height);
+ cursor_follow = nk_true;
+ } else if (is_hovered && in->mouse.buttons[NK_BUTTON_RIGHT].clicked &&
+ in->mouse.buttons[NK_BUTTON_RIGHT].down) {
+ nk_textedit_key(edit, NK_KEY_TEXT_WORD_LEFT, nk_false, font, row_height);
+ nk_textedit_key(edit, NK_KEY_TEXT_WORD_RIGHT, nk_true, font, row_height);
+ cursor_follow = nk_true;
+ }
+
+ {int i; /* keyboard input */
+ int old_mode = edit->mode;
+ for (i = 0; i < NK_KEY_MAX; ++i) {
+ if (i == NK_KEY_ENTER || i == NK_KEY_TAB) continue; /* special case */
+ if (nk_input_is_key_pressed(in, (enum nk_keys)i)) {
+ nk_textedit_key(edit, (enum nk_keys)i, shift_mod, font, row_height);
+ cursor_follow = nk_true;
+ }
+ }
+ if (old_mode != edit->mode) {
+ in->keyboard.text_len = 0;
+ }}
+
+ /* text input */
+ edit->filter = filter;
+ if (in->keyboard.text_len) {
+ nk_textedit_text(edit, in->keyboard.text, in->keyboard.text_len);
+ cursor_follow = nk_true;
+ in->keyboard.text_len = 0;
+ }
+
+ /* enter key handler */
+ if (nk_input_is_key_pressed(in, NK_KEY_ENTER)) {
+ cursor_follow = nk_true;
+ if (flags & NK_EDIT_CTRL_ENTER_NEWLINE && shift_mod)
+ nk_textedit_text(edit, "\n", 1);
+ else if (flags & NK_EDIT_SIG_ENTER)
+ ret |= NK_EDIT_COMMITED;
+ else nk_textedit_text(edit, "\n", 1);
+ }
+
+ /* cut & copy handler */
+ {int copy= nk_input_is_key_pressed(in, NK_KEY_COPY);
+ int cut = nk_input_is_key_pressed(in, NK_KEY_CUT);
+ if ((copy || cut) && (flags & NK_EDIT_CLIPBOARD))
+ {
+ int glyph_len;
+ nk_rune unicode;
+ const char *text;
+ int b = edit->select_start;
+ int e = edit->select_end;
+
+ int begin = NK_MIN(b, e);
+ int end = NK_MAX(b, e);
+ text = nk_str_at_const(&edit->string, begin, &unicode, &glyph_len);
+ if (edit->clip.copy)
+ edit->clip.copy(edit->clip.userdata, text, end - begin);
+ if (cut && !(flags & NK_EDIT_READ_ONLY)){
+ nk_textedit_cut(edit);
+ cursor_follow = nk_true;
+ }
+ }}
+
+ /* paste handler */
+ {int paste = nk_input_is_key_pressed(in, NK_KEY_PASTE);
+ if (paste && (flags & NK_EDIT_CLIPBOARD) && edit->clip.paste) {
+ edit->clip.paste(edit->clip.userdata, edit);
+ cursor_follow = nk_true;
+ }}
+
+ /* tab handler */
+ {int tab = nk_input_is_key_pressed(in, NK_KEY_TAB);
+ if (tab && (flags & NK_EDIT_ALLOW_TAB)) {
+ nk_textedit_text(edit, " ", 4);
+ cursor_follow = nk_true;
+ }}
+ }
+
+ /* set widget state */
+ if (edit->active)
+ *state = NK_WIDGET_STATE_ACTIVE;
+ else nk_widget_state_reset(state);
+
+ if (is_hovered)
+ *state |= NK_WIDGET_STATE_HOVERED;
+
+ /* DRAW EDIT */
+ {const char *text = nk_str_get_const(&edit->string);
+ int len = nk_str_len_char(&edit->string);
+
+ {/* select background colors/images */
+ const struct nk_style_item *background;
+ if (*state & NK_WIDGET_STATE_ACTIVED)
+ background = &style->active;
+ else if (*state & NK_WIDGET_STATE_HOVER)
+ background = &style->hover;
+ else background = &style->normal;
+
+ /* draw background frame */
+ if (background->type == NK_STYLE_ITEM_COLOR) {
+ nk_stroke_rect(out, bounds, style->rounding, style->border, style->border_color);
+ nk_fill_rect(out, bounds, style->rounding, background->data.color);
+ } else nk_draw_image(out, bounds, &background->data.image, nk_white);}
+
+ area.w = NK_MAX(0, area.w - style->cursor_size);
+ if (edit->active)
+ {
+ int total_lines = 1;
+ struct nk_vec2 text_size = nk_vec2(0,0);
+
+ /* text pointer positions */
+ const char *cursor_ptr = 0;
+ const char *select_begin_ptr = 0;
+ const char *select_end_ptr = 0;
+
+ /* 2D pixel positions */
+ struct nk_vec2 cursor_pos = nk_vec2(0,0);
+ struct nk_vec2 selection_offset_start = nk_vec2(0,0);
+ struct nk_vec2 selection_offset_end = nk_vec2(0,0);
+
+ int selection_begin = NK_MIN(edit->select_start, edit->select_end);
+ int selection_end = NK_MAX(edit->select_start, edit->select_end);
+
+ /* calculate total line count + total space + cursor/selection position */
+ float line_width = 0.0f;
+ if (text && len)
+ {
+ /* utf8 encoding */
+ float glyph_width;
+ int glyph_len = 0;
+ nk_rune unicode = 0;
+ int text_len = 0;
+ int glyphs = 0;
+ int row_begin = 0;
+
+ glyph_len = nk_utf_decode(text, &unicode, len);
+ glyph_width = font->width(font->userdata, font->height, text, glyph_len);
+ line_width = 0;
+
+ /* iterate all lines */
+ while ((text_len < len) && glyph_len)
+ {
+ /* set cursor 2D position and line */
+ if (!cursor_ptr && glyphs == edit->cursor)
+ {
+ int glyph_offset;
+ struct nk_vec2 out_offset;
+ struct nk_vec2 row_size;
+ const char *remaining;
+
+ /* calculate 2d position */
+ cursor_pos.y = (float)(total_lines-1) * row_height;
+ row_size = nk_text_calculate_text_bounds(font, text+row_begin,
+ text_len-row_begin, row_height, &remaining,
+ &out_offset, &glyph_offset, NK_STOP_ON_NEW_LINE);
+ cursor_pos.x = row_size.x;
+ cursor_ptr = text + text_len;
+ }
+
+ /* set start selection 2D position and line */
+ if (!select_begin_ptr && edit->select_start != edit->select_end &&
+ glyphs == selection_begin)
+ {
+ int glyph_offset;
+ struct nk_vec2 out_offset;
+ struct nk_vec2 row_size;
+ const char *remaining;
+
+ /* calculate 2d position */
+ selection_offset_start.y = (float)(NK_MAX(total_lines-1,0)) * row_height;
+ row_size = nk_text_calculate_text_bounds(font, text+row_begin,
+ text_len-row_begin, row_height, &remaining,
+ &out_offset, &glyph_offset, NK_STOP_ON_NEW_LINE);
+ selection_offset_start.x = row_size.x;
+ select_begin_ptr = text + text_len;
+ }
+
+ /* set end selection 2D position and line */
+ if (!select_end_ptr && edit->select_start != edit->select_end &&
+ glyphs == selection_end)
+ {
+ int glyph_offset;
+ struct nk_vec2 out_offset;
+ struct nk_vec2 row_size;
+ const char *remaining;
+
+ /* calculate 2d position */
+ selection_offset_end.y = (float)(total_lines-1) * row_height;
+ row_size = nk_text_calculate_text_bounds(font, text+row_begin,
+ text_len-row_begin, row_height, &remaining,
+ &out_offset, &glyph_offset, NK_STOP_ON_NEW_LINE);
+ selection_offset_end.x = row_size.x;
+ select_end_ptr = text + text_len;
+ }
+ if (unicode == '\n') {
+ text_size.x = NK_MAX(text_size.x, line_width);
+ total_lines++;
+ line_width = 0;
+ text_len++;
+ glyphs++;
+ row_begin = text_len;
+ glyph_len = nk_utf_decode(text + text_len, &unicode, len-text_len);
+ glyph_width = font->width(font->userdata, font->height, text+text_len, glyph_len);
+ continue;
+ }
+
+ glyphs++;
+ text_len += glyph_len;
+ line_width += (float)glyph_width;
+
+ glyph_len = nk_utf_decode(text + text_len, &unicode, len-text_len);
+ glyph_width = font->width(font->userdata, font->height,
+ text+text_len, glyph_len);
+ continue;
+ }
+ text_size.y = (float)total_lines * row_height;
+
+ /* handle case when cursor is at end of text buffer */
+ if (!cursor_ptr && edit->cursor == edit->string.len) {
+ cursor_pos.x = line_width;
+ cursor_pos.y = text_size.y - row_height;
+ }
+ }
+ {
+ /* scrollbar */
+ if (cursor_follow)
+ {
+ /* update scrollbar to follow cursor */
+ if (!(flags & NK_EDIT_NO_HORIZONTAL_SCROLL)) {
+ /* horizontal scroll */
+ const float scroll_increment = area.w * 0.25f;
+ if (cursor_pos.x < edit->scrollbar.x)
+ edit->scrollbar.x = (float)(int)NK_MAX(0.0f, cursor_pos.x - scroll_increment);
+ if (cursor_pos.x >= edit->scrollbar.x + area.w)
+ edit->scrollbar.x = (float)(int)NK_MAX(0.0f, edit->scrollbar.x + scroll_increment);
+ } else edit->scrollbar.x = 0;
+
+ if (flags & NK_EDIT_MULTILINE) {
+ /* vertical scroll */
+ if (cursor_pos.y < edit->scrollbar.y)
+ edit->scrollbar.y = NK_MAX(0.0f, cursor_pos.y - row_height);
+ if (cursor_pos.y >= edit->scrollbar.y + area.h)
+ edit->scrollbar.y = edit->scrollbar.y + row_height;
+ } else edit->scrollbar.y = 0;
+ }
+
+ /* scrollbar widget */
+ if (flags & NK_EDIT_MULTILINE)
+ {
+ nk_flags ws;
+ struct nk_rect scroll;
+ float scroll_target;
+ float scroll_offset;
+ float scroll_step;
+ float scroll_inc;
+
+ scroll = area;
+ scroll.x = (bounds.x + bounds.w - style->border) - style->scrollbar_size.x;
+ scroll.w = style->scrollbar_size.x;
+
+ scroll_offset = edit->scrollbar.y;
+ scroll_step = scroll.h * 0.10f;
+ scroll_inc = scroll.h * 0.01f;
+ scroll_target = text_size.y;
+ edit->scrollbar.y = nk_do_scrollbarv(&ws, out, scroll, 0,
+ scroll_offset, scroll_target, scroll_step, scroll_inc,
+ &style->scrollbar, in, font);
+ }
+ }
+
+ /* draw text */
+ {struct nk_color background_color;
+ struct nk_color text_color;
+ struct nk_color sel_background_color;
+ struct nk_color sel_text_color;
+ struct nk_color cursor_color;
+ struct nk_color cursor_text_color;
+ const struct nk_style_item *background;
+ nk_push_scissor(out, clip);
+
+ /* select correct colors to draw */
+ if (*state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ text_color = style->text_active;
+ sel_text_color = style->selected_text_hover;
+ sel_background_color = style->selected_hover;
+ cursor_color = style->cursor_hover;
+ cursor_text_color = style->cursor_text_hover;
+ } else if (*state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ text_color = style->text_hover;
+ sel_text_color = style->selected_text_hover;
+ sel_background_color = style->selected_hover;
+ cursor_text_color = style->cursor_text_hover;
+ cursor_color = style->cursor_hover;
+ } else {
+ background = &style->normal;
+ text_color = style->text_normal;
+ sel_text_color = style->selected_text_normal;
+ sel_background_color = style->selected_normal;
+ cursor_color = style->cursor_normal;
+ cursor_text_color = style->cursor_text_normal;
+ }
+ if (background->type == NK_STYLE_ITEM_IMAGE)
+ background_color = nk_rgba(0,0,0,0);
+ else background_color = background->data.color;
+
+
+ if (edit->select_start == edit->select_end) {
+ /* no selection so just draw the complete text */
+ const char *begin = nk_str_get_const(&edit->string);
+ int l = nk_str_len_char(&edit->string);
+ nk_edit_draw_text(out, style, area.x - edit->scrollbar.x,
+ area.y - edit->scrollbar.y, 0, begin, l, row_height, font,
+ background_color, text_color, nk_false);
+ } else {
+ /* edit has selection so draw 1-3 text chunks */
+ if (edit->select_start != edit->select_end && selection_begin > 0){
+ /* draw unselected text before selection */
+ const char *begin = nk_str_get_const(&edit->string);
+ NK_ASSERT(select_begin_ptr);
+ nk_edit_draw_text(out, style, area.x - edit->scrollbar.x,
+ area.y - edit->scrollbar.y, 0, begin, (int)(select_begin_ptr - begin),
+ row_height, font, background_color, text_color, nk_false);
+ }
+ if (edit->select_start != edit->select_end) {
+ /* draw selected text */
+ NK_ASSERT(select_begin_ptr);
+ if (!select_end_ptr) {
+ const char *begin = nk_str_get_const(&edit->string);
+ select_end_ptr = begin + nk_str_len_char(&edit->string);
+ }
+ nk_edit_draw_text(out, style,
+ area.x - edit->scrollbar.x,
+ area.y + selection_offset_start.y - edit->scrollbar.y,
+ selection_offset_start.x,
+ select_begin_ptr, (int)(select_end_ptr - select_begin_ptr),
+ row_height, font, sel_background_color, sel_text_color, nk_true);
+ }
+ if ((edit->select_start != edit->select_end &&
+ selection_end < edit->string.len))
+ {
+ /* draw unselected text after selected text */
+ const char *begin = select_end_ptr;
+ const char *end = nk_str_get_const(&edit->string) +
+ nk_str_len_char(&edit->string);
+ NK_ASSERT(select_end_ptr);
+ nk_edit_draw_text(out, style,
+ area.x - edit->scrollbar.x,
+ area.y + selection_offset_end.y - edit->scrollbar.y,
+ selection_offset_end.x,
+ begin, (int)(end - begin), row_height, font,
+ background_color, text_color, nk_true);
+ }
+ }
+
+ /* cursor */
+ if (edit->select_start == edit->select_end)
+ {
+ if (edit->cursor >= nk_str_len(&edit->string) ||
+ (cursor_ptr && *cursor_ptr == '\n')) {
+ /* draw cursor at end of line */
+ struct nk_rect cursor;
+ cursor.w = style->cursor_size;
+ cursor.h = font->height;
+ cursor.x = area.x + cursor_pos.x - edit->scrollbar.x;
+ cursor.y = area.y + cursor_pos.y + row_height/2.0f - cursor.h/2.0f;
+ cursor.y -= edit->scrollbar.y;
+ nk_fill_rect(out, cursor, 0, cursor_color);
+ } else {
+ /* draw cursor inside text */
+ int glyph_len;
+ struct nk_rect label;
+ struct nk_text txt;
+
+ nk_rune unicode;
+ NK_ASSERT(cursor_ptr);
+ glyph_len = nk_utf_decode(cursor_ptr, &unicode, 4);
+
+ label.x = area.x + cursor_pos.x - edit->scrollbar.x;
+ label.y = area.y + cursor_pos.y - edit->scrollbar.y;
+ label.w = font->width(font->userdata, font->height, cursor_ptr, glyph_len);
+ label.h = row_height;
+
+ txt.padding = nk_vec2(0,0);
+ txt.background = cursor_color;;
+ txt.text = cursor_text_color;
+ nk_fill_rect(out, label, 0, cursor_color);
+ nk_widget_text(out, label, cursor_ptr, glyph_len, &txt, NK_TEXT_LEFT, font);
+ }
+ }}
+ } else {
+ /* not active so just draw text */
+ int l = nk_str_len_char(&edit->string);
+ const char *begin = nk_str_get_const(&edit->string);
+
+ const struct nk_style_item *background;
+ struct nk_color background_color;
+ struct nk_color text_color;
+ nk_push_scissor(out, clip);
+ if (*state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ text_color = style->text_active;
+ } else if (*state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ text_color = style->text_hover;
+ } else {
+ background = &style->normal;
+ text_color = style->text_normal;
+ }
+ if (background->type == NK_STYLE_ITEM_IMAGE)
+ background_color = nk_rgba(0,0,0,0);
+ else background_color = background->data.color;
+ nk_edit_draw_text(out, style, area.x - edit->scrollbar.x,
+ area.y - edit->scrollbar.y, 0, begin, l, row_height, font,
+ background_color, text_color, nk_false);
+ }
+ nk_push_scissor(out, old_clip);}
+ return ret;
+}
+NK_API void
+nk_edit_focus(struct nk_context *ctx, nk_flags flags)
+{
+ nk_hash hash;
+ struct nk_window *win;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return;
+
+ win = ctx->current;
+ hash = win->edit.seq;
+ win->edit.active = nk_true;
+ win->edit.name = hash;
+ if (flags & NK_EDIT_ALWAYS_INSERT_MODE)
+ win->edit.mode = NK_TEXT_EDIT_MODE_INSERT;
+}
+NK_API void
+nk_edit_unfocus(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return;
+
+ win = ctx->current;
+ win->edit.active = nk_false;
+ win->edit.name = 0;
+}
+NK_API nk_flags
+nk_edit_string(struct nk_context *ctx, nk_flags flags,
+ char *memory, int *len, int max, nk_plugin_filter filter)
+{
+ nk_hash hash;
+ nk_flags state;
+ struct nk_text_edit *edit;
+ struct nk_window *win;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(memory);
+ NK_ASSERT(len);
+ if (!ctx || !memory || !len)
+ return 0;
+
+ filter = (!filter) ? nk_filter_default: filter;
+ win = ctx->current;
+ hash = win->edit.seq;
+ edit = &ctx->text_edit;
+ nk_textedit_clear_state(&ctx->text_edit, (flags & NK_EDIT_MULTILINE)?
+ NK_TEXT_EDIT_MULTI_LINE: NK_TEXT_EDIT_SINGLE_LINE, filter);
+
+ if (win->edit.active && hash == win->edit.name) {
+ if (flags & NK_EDIT_NO_CURSOR)
+ edit->cursor = nk_utf_len(memory, *len);
+ else edit->cursor = win->edit.cursor;
+ if (!(flags & NK_EDIT_SELECTABLE)) {
+ edit->select_start = win->edit.cursor;
+ edit->select_end = win->edit.cursor;
+ } else {
+ edit->select_start = win->edit.sel_start;
+ edit->select_end = win->edit.sel_end;
+ }
+ edit->mode = win->edit.mode;
+ edit->scrollbar.x = (float)win->edit.scrollbar.x;
+ edit->scrollbar.y = (float)win->edit.scrollbar.y;
+ edit->active = nk_true;
+ } else edit->active = nk_false;
+
+ max = NK_MAX(1, max);
+ *len = NK_MIN(*len, max-1);
+ nk_str_init_fixed(&edit->string, memory, (nk_size)max);
+ edit->string.buffer.allocated = (nk_size)*len;
+ edit->string.len = nk_utf_len(memory, *len);
+ state = nk_edit_buffer(ctx, flags, edit, filter);
+ *len = (int)edit->string.buffer.allocated;
+
+ if (edit->active) {
+ win->edit.cursor = edit->cursor;
+ win->edit.sel_start = edit->select_start;
+ win->edit.sel_end = edit->select_end;
+ win->edit.mode = edit->mode;
+ win->edit.scrollbar.x = (nk_uint)edit->scrollbar.x;
+ win->edit.scrollbar.y = (nk_uint)edit->scrollbar.y;
+ } return state;
+}
+NK_API nk_flags
+nk_edit_buffer(struct nk_context *ctx, nk_flags flags,
+ struct nk_text_edit *edit, nk_plugin_filter filter)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ struct nk_input *in;
+
+ enum nk_widget_layout_states state;
+ struct nk_rect bounds;
+
+ nk_flags ret_flags = 0;
+ unsigned char prev_state;
+ nk_hash hash;
+
+ /* make sure correct values */
+ NK_ASSERT(ctx);
+ NK_ASSERT(edit);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ state = nk_widget(&bounds, ctx);
+ if (!state) return state;
+ in = (win->layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+
+ /* check if edit is currently hot item */
+ hash = win->edit.seq++;
+ if (win->edit.active && hash == win->edit.name) {
+ if (flags & NK_EDIT_NO_CURSOR)
+ edit->cursor = edit->string.len;
+ if (!(flags & NK_EDIT_SELECTABLE)) {
+ edit->select_start = edit->cursor;
+ edit->select_end = edit->cursor;
+ }
+ if (flags & NK_EDIT_CLIPBOARD)
+ edit->clip = ctx->clip;
+ edit->active = (unsigned char)win->edit.active;
+ } else edit->active = nk_false;
+ edit->mode = win->edit.mode;
+
+ filter = (!filter) ? nk_filter_default: filter;
+ prev_state = (unsigned char)edit->active;
+ in = (flags & NK_EDIT_READ_ONLY) ? 0: in;
+ ret_flags = nk_do_edit(&ctx->last_widget_state, &win->buffer, bounds, flags,
+ filter, edit, &style->edit, in, style->font);
+
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ ctx->style.cursor_active = ctx->style.cursors[NK_CURSOR_TEXT];
+ if (edit->active && prev_state != edit->active) {
+ /* current edit is now hot */
+ win->edit.active = nk_true;
+ win->edit.name = hash;
+ } else if (prev_state && !edit->active) {
+ /* current edit is now cold */
+ win->edit.active = nk_false;
+ } return ret_flags;
+}
+NK_API nk_flags
+nk_edit_string_zero_terminated(struct nk_context *ctx, nk_flags flags,
+ char *buffer, int max, nk_plugin_filter filter)
+{
+ nk_flags result;
+ int len = nk_strlen(buffer);
+ result = nk_edit_string(ctx, flags, buffer, &len, max, filter);
+ buffer[NK_MIN(NK_MAX(max-1,0), len)] = '\0';
+ return result;
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * PROPERTY
+ *
+ * ===============================================================*/
+NK_LIB void
+nk_drag_behavior(nk_flags *state, const struct nk_input *in,
+ struct nk_rect drag, struct nk_property_variant *variant,
+ float inc_per_pixel)
+{
+ int left_mouse_down = in && in->mouse.buttons[NK_BUTTON_LEFT].down;
+ int left_mouse_click_in_cursor = in &&
+ nk_input_has_mouse_click_down_in_rect(in, NK_BUTTON_LEFT, drag, nk_true);
+
+ nk_widget_state_reset(state);
+ if (nk_input_is_mouse_hovering_rect(in, drag))
+ *state = NK_WIDGET_STATE_HOVERED;
+
+ if (left_mouse_down && left_mouse_click_in_cursor) {
+ float delta, pixels;
+ pixels = in->mouse.delta.x;
+ delta = pixels * inc_per_pixel;
+ switch (variant->kind) {
+ default: break;
+ case NK_PROPERTY_INT:
+ variant->value.i = variant->value.i + (int)delta;
+ variant->value.i = NK_CLAMP(variant->min_value.i, variant->value.i, variant->max_value.i);
+ break;
+ case NK_PROPERTY_FLOAT:
+ variant->value.f = variant->value.f + (float)delta;
+ variant->value.f = NK_CLAMP(variant->min_value.f, variant->value.f, variant->max_value.f);
+ break;
+ case NK_PROPERTY_DOUBLE:
+ variant->value.d = variant->value.d + (double)delta;
+ variant->value.d = NK_CLAMP(variant->min_value.d, variant->value.d, variant->max_value.d);
+ break;
+ }
+ *state = NK_WIDGET_STATE_ACTIVE;
+ }
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(in, drag))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, drag))
+ *state |= NK_WIDGET_STATE_LEFT;
+}
+NK_LIB void
+nk_property_behavior(nk_flags *ws, const struct nk_input *in,
+ struct nk_rect property, struct nk_rect label, struct nk_rect edit,
+ struct nk_rect empty, int *state, struct nk_property_variant *variant,
+ float inc_per_pixel)
+{
+ if (in && *state == NK_PROPERTY_DEFAULT) {
+ if (nk_button_behavior(ws, edit, in, NK_BUTTON_DEFAULT))
+ *state = NK_PROPERTY_EDIT;
+ else if (nk_input_is_mouse_click_down_in_rect(in, NK_BUTTON_LEFT, label, nk_true))
+ *state = NK_PROPERTY_DRAG;
+ else if (nk_input_is_mouse_click_down_in_rect(in, NK_BUTTON_LEFT, empty, nk_true))
+ *state = NK_PROPERTY_DRAG;
+ }
+ if (*state == NK_PROPERTY_DRAG) {
+ nk_drag_behavior(ws, in, property, variant, inc_per_pixel);
+ if (!(*ws & NK_WIDGET_STATE_ACTIVED)) *state = NK_PROPERTY_DEFAULT;
+ }
+}
+NK_LIB void
+nk_draw_property(struct nk_command_buffer *out, const struct nk_style_property *style,
+ const struct nk_rect *bounds, const struct nk_rect *label, nk_flags state,
+ const char *name, int len, const struct nk_user_font *font)
+{
+ struct nk_text text;
+ const struct nk_style_item *background;
+
+ /* select correct background and text color */
+ if (state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->active;
+ text.text = style->label_active;
+ } else if (state & NK_WIDGET_STATE_HOVER) {
+ background = &style->hover;
+ text.text = style->label_hover;
+ } else {
+ background = &style->normal;
+ text.text = style->label_normal;
+ }
+
+ /* draw background */
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(out, *bounds, &background->data.image, nk_white);
+ text.background = nk_rgba(0,0,0,0);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(out, *bounds, style->rounding, background->data.color);
+ nk_stroke_rect(out, *bounds, style->rounding, style->border, background->data.color);
+ }
+
+ /* draw label */
+ text.padding = nk_vec2(0,0);
+ nk_widget_text(out, *label, name, len, &text, NK_TEXT_CENTERED, font);
+}
+NK_LIB void
+nk_do_property(nk_flags *ws,
+ struct nk_command_buffer *out, struct nk_rect property,
+ const char *name, struct nk_property_variant *variant,
+ float inc_per_pixel, char *buffer, int *len,
+ int *state, int *cursor, int *select_begin, int *select_end,
+ const struct nk_style_property *style,
+ enum nk_property_filter filter, struct nk_input *in,
+ const struct nk_user_font *font, struct nk_text_edit *text_edit,
+ enum nk_button_behavior behavior)
+{
+ const nk_plugin_filter filters[] = {
+ nk_filter_decimal,
+ nk_filter_float
+ };
+ int active, old;
+ int num_len, name_len;
+ char string[NK_MAX_NUMBER_BUFFER];
+ float size;
+
+ char *dst = 0;
+ int *length;
+
+ struct nk_rect left;
+ struct nk_rect right;
+ struct nk_rect label;
+ struct nk_rect edit;
+ struct nk_rect empty;
+
+ /* left decrement button */
+ left.h = font->height/2;
+ left.w = left.h;
+ left.x = property.x + style->border + style->padding.x;
+ left.y = property.y + style->border + property.h/2.0f - left.h/2;
+
+ /* text label */
+ name_len = nk_strlen(name);
+ size = font->width(font->userdata, font->height, name, name_len);
+ label.x = left.x + left.w + style->padding.x;
+ label.w = (float)size + 2 * style->padding.x;
+ label.y = property.y + style->border + style->padding.y;
+ label.h = property.h - (2 * style->border + 2 * style->padding.y);
+
+ /* right increment button */
+ right.y = left.y;
+ right.w = left.w;
+ right.h = left.h;
+ right.x = property.x + property.w - (right.w + style->padding.x);
+
+ /* edit */
+ if (*state == NK_PROPERTY_EDIT) {
+ size = font->width(font->userdata, font->height, buffer, *len);
+ size += style->edit.cursor_size;
+ length = len;
+ dst = buffer;
+ } else {
+ switch (variant->kind) {
+ default: break;
+ case NK_PROPERTY_INT:
+ nk_itoa(string, variant->value.i);
+ num_len = nk_strlen(string);
+ break;
+ case NK_PROPERTY_FLOAT:
+ NK_DTOA(string, (double)variant->value.f);
+ num_len = nk_string_float_limit(string, NK_MAX_FLOAT_PRECISION);
+ break;
+ case NK_PROPERTY_DOUBLE:
+ NK_DTOA(string, variant->value.d);
+ num_len = nk_string_float_limit(string, NK_MAX_FLOAT_PRECISION);
+ break;
+ }
+ size = font->width(font->userdata, font->height, string, num_len);
+ dst = string;
+ length = &num_len;
+ }
+
+ edit.w = (float)size + 2 * style->padding.x;
+ edit.w = NK_MIN(edit.w, right.x - (label.x + label.w));
+ edit.x = right.x - (edit.w + style->padding.x);
+ edit.y = property.y + style->border;
+ edit.h = property.h - (2 * style->border);
+
+ /* empty left space activator */
+ empty.w = edit.x - (label.x + label.w);
+ empty.x = label.x + label.w;
+ empty.y = property.y;
+ empty.h = property.h;
+
+ /* update property */
+ old = (*state == NK_PROPERTY_EDIT);
+ nk_property_behavior(ws, in, property, label, edit, empty, state, variant, inc_per_pixel);
+
+ /* draw property */
+ if (style->draw_begin) style->draw_begin(out, style->userdata);
+ nk_draw_property(out, style, &property, &label, *ws, name, name_len, font);
+ if (style->draw_end) style->draw_end(out, style->userdata);
+
+ /* execute right button */
+ if (nk_do_button_symbol(ws, out, left, style->sym_left, behavior, &style->dec_button, in, font)) {
+ switch (variant->kind) {
+ default: break;
+ case NK_PROPERTY_INT:
+ variant->value.i = NK_CLAMP(variant->min_value.i, variant->value.i - variant->step.i, variant->max_value.i); break;
+ case NK_PROPERTY_FLOAT:
+ variant->value.f = NK_CLAMP(variant->min_value.f, variant->value.f - variant->step.f, variant->max_value.f); break;
+ case NK_PROPERTY_DOUBLE:
+ variant->value.d = NK_CLAMP(variant->min_value.d, variant->value.d - variant->step.d, variant->max_value.d); break;
+ }
+ }
+ /* execute left button */
+ if (nk_do_button_symbol(ws, out, right, style->sym_right, behavior, &style->inc_button, in, font)) {
+ switch (variant->kind) {
+ default: break;
+ case NK_PROPERTY_INT:
+ variant->value.i = NK_CLAMP(variant->min_value.i, variant->value.i + variant->step.i, variant->max_value.i); break;
+ case NK_PROPERTY_FLOAT:
+ variant->value.f = NK_CLAMP(variant->min_value.f, variant->value.f + variant->step.f, variant->max_value.f); break;
+ case NK_PROPERTY_DOUBLE:
+ variant->value.d = NK_CLAMP(variant->min_value.d, variant->value.d + variant->step.d, variant->max_value.d); break;
+ }
+ }
+ if (old != NK_PROPERTY_EDIT && (*state == NK_PROPERTY_EDIT)) {
+ /* property has been activated so setup buffer */
+ NK_MEMCPY(buffer, dst, (nk_size)*length);
+ *cursor = nk_utf_len(buffer, *length);
+ *len = *length;
+ length = len;
+ dst = buffer;
+ active = 0;
+ } else active = (*state == NK_PROPERTY_EDIT);
+
+ /* execute and run text edit field */
+ nk_textedit_clear_state(text_edit, NK_TEXT_EDIT_SINGLE_LINE, filters[filter]);
+ text_edit->active = (unsigned char)active;
+ text_edit->string.len = *length;
+ text_edit->cursor = NK_CLAMP(0, *cursor, *length);
+ text_edit->select_start = NK_CLAMP(0,*select_begin, *length);
+ text_edit->select_end = NK_CLAMP(0,*select_end, *length);
+ text_edit->string.buffer.allocated = (nk_size)*length;
+ text_edit->string.buffer.memory.size = NK_MAX_NUMBER_BUFFER;
+ text_edit->string.buffer.memory.ptr = dst;
+ text_edit->string.buffer.size = NK_MAX_NUMBER_BUFFER;
+ text_edit->mode = NK_TEXT_EDIT_MODE_INSERT;
+ nk_do_edit(ws, out, edit, NK_EDIT_FIELD|NK_EDIT_AUTO_SELECT,
+ filters[filter], text_edit, &style->edit, (*state == NK_PROPERTY_EDIT) ? in: 0, font);
+
+ *length = text_edit->string.len;
+ *cursor = text_edit->cursor;
+ *select_begin = text_edit->select_start;
+ *select_end = text_edit->select_end;
+ if (text_edit->active && nk_input_is_key_pressed(in, NK_KEY_ENTER))
+ text_edit->active = nk_false;
+
+ if (active && !text_edit->active) {
+ /* property is now not active so convert edit text to value*/
+ *state = NK_PROPERTY_DEFAULT;
+ buffer[*len] = '\0';
+ switch (variant->kind) {
+ default: break;
+ case NK_PROPERTY_INT:
+ variant->value.i = nk_strtoi(buffer, 0);
+ variant->value.i = NK_CLAMP(variant->min_value.i, variant->value.i, variant->max_value.i);
+ break;
+ case NK_PROPERTY_FLOAT:
+ nk_string_float_limit(buffer, NK_MAX_FLOAT_PRECISION);
+ variant->value.f = nk_strtof(buffer, 0);
+ variant->value.f = NK_CLAMP(variant->min_value.f, variant->value.f, variant->max_value.f);
+ break;
+ case NK_PROPERTY_DOUBLE:
+ nk_string_float_limit(buffer, NK_MAX_FLOAT_PRECISION);
+ variant->value.d = nk_strtod(buffer, 0);
+ variant->value.d = NK_CLAMP(variant->min_value.d, variant->value.d, variant->max_value.d);
+ break;
+ }
+ }
+}
+NK_LIB struct nk_property_variant
+nk_property_variant_int(int value, int min_value, int max_value, int step)
+{
+ struct nk_property_variant result;
+ result.kind = NK_PROPERTY_INT;
+ result.value.i = value;
+ result.min_value.i = min_value;
+ result.max_value.i = max_value;
+ result.step.i = step;
+ return result;
+}
+NK_LIB struct nk_property_variant
+nk_property_variant_float(float value, float min_value, float max_value, float step)
+{
+ struct nk_property_variant result;
+ result.kind = NK_PROPERTY_FLOAT;
+ result.value.f = value;
+ result.min_value.f = min_value;
+ result.max_value.f = max_value;
+ result.step.f = step;
+ return result;
+}
+NK_LIB struct nk_property_variant
+nk_property_variant_double(double value, double min_value, double max_value,
+ double step)
+{
+ struct nk_property_variant result;
+ result.kind = NK_PROPERTY_DOUBLE;
+ result.value.d = value;
+ result.min_value.d = min_value;
+ result.max_value.d = max_value;
+ result.step.d = step;
+ return result;
+}
+NK_LIB void
+nk_property(struct nk_context *ctx, const char *name, struct nk_property_variant *variant,
+ float inc_per_pixel, const enum nk_property_filter filter)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ struct nk_input *in;
+ const struct nk_style *style;
+
+ struct nk_rect bounds;
+ enum nk_widget_layout_states s;
+
+ int *state = 0;
+ nk_hash hash = 0;
+ char *buffer = 0;
+ int *len = 0;
+ int *cursor = 0;
+ int *select_begin = 0;
+ int *select_end = 0;
+ int old_state;
+
+ char dummy_buffer[NK_MAX_NUMBER_BUFFER];
+ int dummy_state = NK_PROPERTY_DEFAULT;
+ int dummy_length = 0;
+ int dummy_cursor = 0;
+ int dummy_select_begin = 0;
+ int dummy_select_end = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return;
+
+ win = ctx->current;
+ layout = win->layout;
+ style = &ctx->style;
+ s = nk_widget(&bounds, ctx);
+ if (!s) return;
+
+ /* calculate hash from name */
+ if (name[0] == '#') {
+ hash = nk_murmur_hash(name, (int)nk_strlen(name), win->property.seq++);
+ name++; /* special number hash */
+ } else hash = nk_murmur_hash(name, (int)nk_strlen(name), 42);
+
+ /* check if property is currently hot item */
+ if (win->property.active && hash == win->property.name) {
+ buffer = win->property.buffer;
+ len = &win->property.length;
+ cursor = &win->property.cursor;
+ state = &win->property.state;
+ select_begin = &win->property.select_start;
+ select_end = &win->property.select_end;
+ } else {
+ buffer = dummy_buffer;
+ len = &dummy_length;
+ cursor = &dummy_cursor;
+ state = &dummy_state;
+ select_begin = &dummy_select_begin;
+ select_end = &dummy_select_end;
+ }
+
+ /* execute property widget */
+ old_state = *state;
+ ctx->text_edit.clip = ctx->clip;
+ in = ((s == NK_WIDGET_ROM && !win->property.active) ||
+ layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ nk_do_property(&ctx->last_widget_state, &win->buffer, bounds, name,
+ variant, inc_per_pixel, buffer, len, state, cursor, select_begin,
+ select_end, &style->property, filter, in, style->font, &ctx->text_edit,
+ ctx->button_behavior);
+
+ if (in && *state != NK_PROPERTY_DEFAULT && !win->property.active) {
+ /* current property is now hot */
+ win->property.active = 1;
+ NK_MEMCPY(win->property.buffer, buffer, (nk_size)*len);
+ win->property.length = *len;
+ win->property.cursor = *cursor;
+ win->property.state = *state;
+ win->property.name = hash;
+ win->property.select_start = *select_begin;
+ win->property.select_end = *select_end;
+ if (*state == NK_PROPERTY_DRAG) {
+ ctx->input.mouse.grab = nk_true;
+ ctx->input.mouse.grabbed = nk_true;
+ }
+ }
+ /* check if previously active property is now inactive */
+ if (*state == NK_PROPERTY_DEFAULT && old_state != NK_PROPERTY_DEFAULT) {
+ if (old_state == NK_PROPERTY_DRAG) {
+ ctx->input.mouse.grab = nk_false;
+ ctx->input.mouse.grabbed = nk_false;
+ ctx->input.mouse.ungrab = nk_true;
+ }
+ win->property.select_start = 0;
+ win->property.select_end = 0;
+ win->property.active = 0;
+ }
+}
+NK_API void
+nk_property_int(struct nk_context *ctx, const char *name,
+ int min, int *val, int max, int step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+ NK_ASSERT(val);
+
+ if (!ctx || !ctx->current || !name || !val) return;
+ variant = nk_property_variant_int(*val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_INT);
+ *val = variant.value.i;
+}
+NK_API void
+nk_property_float(struct nk_context *ctx, const char *name,
+ float min, float *val, float max, float step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+ NK_ASSERT(val);
+
+ if (!ctx || !ctx->current || !name || !val) return;
+ variant = nk_property_variant_float(*val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_FLOAT);
+ *val = variant.value.f;
+}
+NK_API void
+nk_property_double(struct nk_context *ctx, const char *name,
+ double min, double *val, double max, double step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+ NK_ASSERT(val);
+
+ if (!ctx || !ctx->current || !name || !val) return;
+ variant = nk_property_variant_double(*val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_FLOAT);
+ *val = variant.value.d;
+}
+NK_API int
+nk_propertyi(struct nk_context *ctx, const char *name, int min, int val,
+ int max, int step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+
+ if (!ctx || !ctx->current || !name) return val;
+ variant = nk_property_variant_int(val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_INT);
+ val = variant.value.i;
+ return val;
+}
+NK_API float
+nk_propertyf(struct nk_context *ctx, const char *name, float min,
+ float val, float max, float step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+
+ if (!ctx || !ctx->current || !name) return val;
+ variant = nk_property_variant_float(val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_FLOAT);
+ val = variant.value.f;
+ return val;
+}
+NK_API double
+nk_propertyd(struct nk_context *ctx, const char *name, double min,
+ double val, double max, double step, float inc_per_pixel)
+{
+ struct nk_property_variant variant;
+ NK_ASSERT(ctx);
+ NK_ASSERT(name);
+
+ if (!ctx || !ctx->current || !name) return val;
+ variant = nk_property_variant_double(val, min, max, step);
+ nk_property(ctx, name, &variant, inc_per_pixel, NK_FILTER_FLOAT);
+ val = variant.value.d;
+ return val;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * CHART
+ *
+ * ===============================================================*/
+NK_API int
+nk_chart_begin_colored(struct nk_context *ctx, enum nk_chart_type type,
+ struct nk_color color, struct nk_color highlight,
+ int count, float min_value, float max_value)
+{
+ struct nk_window *win;
+ struct nk_chart *chart;
+ const struct nk_style *config;
+ const struct nk_style_chart *style;
+
+ const struct nk_style_item *background;
+ struct nk_rect bounds = {0, 0, 0, 0};
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+
+ if (!ctx || !ctx->current || !ctx->current->layout) return 0;
+ if (!nk_widget(&bounds, ctx)) {
+ chart = &ctx->current->layout->chart;
+ nk_zero(chart, sizeof(*chart));
+ return 0;
+ }
+
+ win = ctx->current;
+ config = &ctx->style;
+ chart = &win->layout->chart;
+ style = &config->chart;
+
+ /* setup basic generic chart */
+ nk_zero(chart, sizeof(*chart));
+ chart->x = bounds.x + style->padding.x;
+ chart->y = bounds.y + style->padding.y;
+ chart->w = bounds.w - 2 * style->padding.x;
+ chart->h = bounds.h - 2 * style->padding.y;
+ chart->w = NK_MAX(chart->w, 2 * style->padding.x);
+ chart->h = NK_MAX(chart->h, 2 * style->padding.y);
+
+ /* add first slot into chart */
+ {struct nk_chart_slot *slot = &chart->slots[chart->slot++];
+ slot->type = type;
+ slot->count = count;
+ slot->color = color;
+ slot->highlight = highlight;
+ slot->min = NK_MIN(min_value, max_value);
+ slot->max = NK_MAX(min_value, max_value);
+ slot->range = slot->max - slot->min;}
+
+ /* draw chart background */
+ background = &style->background;
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(&win->buffer, bounds, &background->data.image, nk_white);
+ } else {
+ nk_fill_rect(&win->buffer, bounds, style->rounding, style->border_color);
+ nk_fill_rect(&win->buffer, nk_shrink_rect(bounds, style->border),
+ style->rounding, style->background.data.color);
+ }
+ return 1;
+}
+NK_API int
+nk_chart_begin(struct nk_context *ctx, const enum nk_chart_type type,
+ int count, float min_value, float max_value)
+{
+ return nk_chart_begin_colored(ctx, type, ctx->style.chart.color,
+ ctx->style.chart.selected_color, count, min_value, max_value);
+}
+NK_API void
+nk_chart_add_slot_colored(struct nk_context *ctx, const enum nk_chart_type type,
+ struct nk_color color, struct nk_color highlight,
+ int count, float min_value, float max_value)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ NK_ASSERT(ctx->current->layout->chart.slot < NK_CHART_MAX_SLOT);
+ if (!ctx || !ctx->current || !ctx->current->layout) return;
+ if (ctx->current->layout->chart.slot >= NK_CHART_MAX_SLOT) return;
+
+ /* add another slot into the graph */
+ {struct nk_chart *chart = &ctx->current->layout->chart;
+ struct nk_chart_slot *slot = &chart->slots[chart->slot++];
+ slot->type = type;
+ slot->count = count;
+ slot->color = color;
+ slot->highlight = highlight;
+ slot->min = NK_MIN(min_value, max_value);
+ slot->max = NK_MAX(min_value, max_value);
+ slot->range = slot->max - slot->min;}
+}
+NK_API void
+nk_chart_add_slot(struct nk_context *ctx, const enum nk_chart_type type,
+ int count, float min_value, float max_value)
+{
+ nk_chart_add_slot_colored(ctx, type, ctx->style.chart.color,
+ ctx->style.chart.selected_color, count, min_value, max_value);
+}
+NK_INTERN nk_flags
+nk_chart_push_line(struct nk_context *ctx, struct nk_window *win,
+ struct nk_chart *g, float value, int slot)
+{
+ struct nk_panel *layout = win->layout;
+ const struct nk_input *i = &ctx->input;
+ struct nk_command_buffer *out = &win->buffer;
+
+ nk_flags ret = 0;
+ struct nk_vec2 cur;
+ struct nk_rect bounds;
+ struct nk_color color;
+ float step;
+ float range;
+ float ratio;
+
+ NK_ASSERT(slot >= 0 && slot < NK_CHART_MAX_SLOT);
+ step = g->w / (float)g->slots[slot].count;
+ range = g->slots[slot].max - g->slots[slot].min;
+ ratio = (value - g->slots[slot].min) / range;
+
+ if (g->slots[slot].index == 0) {
+ /* first data point does not have a connection */
+ g->slots[slot].last.x = g->x;
+ g->slots[slot].last.y = (g->y + g->h) - ratio * (float)g->h;
+
+ bounds.x = g->slots[slot].last.x - 2;
+ bounds.y = g->slots[slot].last.y - 2;
+ bounds.w = bounds.h = 4;
+
+ color = g->slots[slot].color;
+ if (!(layout->flags & NK_WINDOW_ROM) &&
+ NK_INBOX(i->mouse.pos.x,i->mouse.pos.y, g->slots[slot].last.x-3, g->slots[slot].last.y-3, 6, 6)){
+ ret = nk_input_is_mouse_hovering_rect(i, bounds) ? NK_CHART_HOVERING : 0;
+ ret |= (i->mouse.buttons[NK_BUTTON_LEFT].down &&
+ i->mouse.buttons[NK_BUTTON_LEFT].clicked) ? NK_CHART_CLICKED: 0;
+ color = g->slots[slot].highlight;
+ }
+ nk_fill_rect(out, bounds, 0, color);
+ g->slots[slot].index += 1;
+ return ret;
+ }
+
+ /* draw a line between the last data point and the new one */
+ color = g->slots[slot].color;
+ cur.x = g->x + (float)(step * (float)g->slots[slot].index);
+ cur.y = (g->y + g->h) - (ratio * (float)g->h);
+ nk_stroke_line(out, g->slots[slot].last.x, g->slots[slot].last.y, cur.x, cur.y, 1.0f, color);
+
+ bounds.x = cur.x - 3;
+ bounds.y = cur.y - 3;
+ bounds.w = bounds.h = 6;
+
+ /* user selection of current data point */
+ if (!(layout->flags & NK_WINDOW_ROM)) {
+ if (nk_input_is_mouse_hovering_rect(i, bounds)) {
+ ret = NK_CHART_HOVERING;
+ ret |= (!i->mouse.buttons[NK_BUTTON_LEFT].down &&
+ i->mouse.buttons[NK_BUTTON_LEFT].clicked) ? NK_CHART_CLICKED: 0;
+ color = g->slots[slot].highlight;
+ }
+ }
+ nk_fill_rect(out, nk_rect(cur.x - 2, cur.y - 2, 4, 4), 0, color);
+
+ /* save current data point position */
+ g->slots[slot].last.x = cur.x;
+ g->slots[slot].last.y = cur.y;
+ g->slots[slot].index += 1;
+ return ret;
+}
+NK_INTERN nk_flags
+nk_chart_push_column(const struct nk_context *ctx, struct nk_window *win,
+ struct nk_chart *chart, float value, int slot)
+{
+ struct nk_command_buffer *out = &win->buffer;
+ const struct nk_input *in = &ctx->input;
+ struct nk_panel *layout = win->layout;
+
+ float ratio;
+ nk_flags ret = 0;
+ struct nk_color color;
+ struct nk_rect item = {0,0,0,0};
+
+ NK_ASSERT(slot >= 0 && slot < NK_CHART_MAX_SLOT);
+ if (chart->slots[slot].index >= chart->slots[slot].count)
+ return nk_false;
+ if (chart->slots[slot].count) {
+ float padding = (float)(chart->slots[slot].count-1);
+ item.w = (chart->w - padding) / (float)(chart->slots[slot].count);
+ }
+
+ /* calculate bounds of current bar chart entry */
+ color = chart->slots[slot].color;;
+ item.h = chart->h * NK_ABS((value/chart->slots[slot].range));
+ if (value >= 0) {
+ ratio = (value + NK_ABS(chart->slots[slot].min)) / NK_ABS(chart->slots[slot].range);
+ item.y = (chart->y + chart->h) - chart->h * ratio;
+ } else {
+ ratio = (value - chart->slots[slot].max) / chart->slots[slot].range;
+ item.y = chart->y + (chart->h * NK_ABS(ratio)) - item.h;
+ }
+ item.x = chart->x + ((float)chart->slots[slot].index * item.w);
+ item.x = item.x + ((float)chart->slots[slot].index);
+
+ /* user chart bar selection */
+ if (!(layout->flags & NK_WINDOW_ROM) &&
+ NK_INBOX(in->mouse.pos.x,in->mouse.pos.y,item.x,item.y,item.w,item.h)) {
+ ret = NK_CHART_HOVERING;
+ ret |= (!in->mouse.buttons[NK_BUTTON_LEFT].down &&
+ in->mouse.buttons[NK_BUTTON_LEFT].clicked) ? NK_CHART_CLICKED: 0;
+ color = chart->slots[slot].highlight;
+ }
+ nk_fill_rect(out, item, 0, color);
+ chart->slots[slot].index += 1;
+ return ret;
+}
+NK_API nk_flags
+nk_chart_push_slot(struct nk_context *ctx, float value, int slot)
+{
+ nk_flags flags;
+ struct nk_window *win;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(slot >= 0 && slot < NK_CHART_MAX_SLOT);
+ NK_ASSERT(slot < ctx->current->layout->chart.slot);
+ if (!ctx || !ctx->current || slot >= NK_CHART_MAX_SLOT) return nk_false;
+ if (slot >= ctx->current->layout->chart.slot) return nk_false;
+
+ win = ctx->current;
+ if (win->layout->chart.slot < slot) return nk_false;
+ switch (win->layout->chart.slots[slot].type) {
+ case NK_CHART_LINES:
+ flags = nk_chart_push_line(ctx, win, &win->layout->chart, value, slot); break;
+ case NK_CHART_COLUMN:
+ flags = nk_chart_push_column(ctx, win, &win->layout->chart, value, slot); break;
+ default:
+ case NK_CHART_MAX:
+ flags = 0;
+ }
+ return flags;
+}
+NK_API nk_flags
+nk_chart_push(struct nk_context *ctx, float value)
+{
+ return nk_chart_push_slot(ctx, value, 0);
+}
+NK_API void
+nk_chart_end(struct nk_context *ctx)
+{
+ struct nk_window *win;
+ struct nk_chart *chart;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current)
+ return;
+
+ win = ctx->current;
+ chart = &win->layout->chart;
+ NK_MEMSET(chart, 0, sizeof(*chart));
+ return;
+}
+NK_API void
+nk_plot(struct nk_context *ctx, enum nk_chart_type type, const float *values,
+ int count, int offset)
+{
+ int i = 0;
+ float min_value;
+ float max_value;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(values);
+ if (!ctx || !values || !count) return;
+
+ min_value = values[offset];
+ max_value = values[offset];
+ for (i = 0; i < count; ++i) {
+ min_value = NK_MIN(values[i + offset], min_value);
+ max_value = NK_MAX(values[i + offset], max_value);
+ }
+
+ if (nk_chart_begin(ctx, type, count, min_value, max_value)) {
+ for (i = 0; i < count; ++i)
+ nk_chart_push(ctx, values[i + offset]);
+ nk_chart_end(ctx);
+ }
+}
+NK_API void
+nk_plot_function(struct nk_context *ctx, enum nk_chart_type type, void *userdata,
+ float(*value_getter)(void* user, int index), int count, int offset)
+{
+ int i = 0;
+ float min_value;
+ float max_value;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(value_getter);
+ if (!ctx || !value_getter || !count) return;
+
+ max_value = min_value = value_getter(userdata, offset);
+ for (i = 0; i < count; ++i) {
+ float value = value_getter(userdata, i + offset);
+ min_value = NK_MIN(value, min_value);
+ max_value = NK_MAX(value, max_value);
+ }
+
+ if (nk_chart_begin(ctx, type, count, min_value, max_value)) {
+ for (i = 0; i < count; ++i)
+ nk_chart_push(ctx, value_getter(userdata, i + offset));
+ nk_chart_end(ctx);
+ }
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * COLOR PICKER
+ *
+ * ===============================================================*/
+NK_LIB int
+nk_color_picker_behavior(nk_flags *state,
+ const struct nk_rect *bounds, const struct nk_rect *matrix,
+ const struct nk_rect *hue_bar, const struct nk_rect *alpha_bar,
+ struct nk_colorf *color, const struct nk_input *in)
+{
+ float hsva[4];
+ int value_changed = 0;
+ int hsv_changed = 0;
+
+ NK_ASSERT(state);
+ NK_ASSERT(matrix);
+ NK_ASSERT(hue_bar);
+ NK_ASSERT(color);
+
+ /* color matrix */
+ nk_colorf_hsva_fv(hsva, *color);
+ if (nk_button_behavior(state, *matrix, in, NK_BUTTON_REPEATER)) {
+ hsva[1] = NK_SATURATE((in->mouse.pos.x - matrix->x) / (matrix->w-1));
+ hsva[2] = 1.0f - NK_SATURATE((in->mouse.pos.y - matrix->y) / (matrix->h-1));
+ value_changed = hsv_changed = 1;
+ }
+ /* hue bar */
+ if (nk_button_behavior(state, *hue_bar, in, NK_BUTTON_REPEATER)) {
+ hsva[0] = NK_SATURATE((in->mouse.pos.y - hue_bar->y) / (hue_bar->h-1));
+ value_changed = hsv_changed = 1;
+ }
+ /* alpha bar */
+ if (alpha_bar) {
+ if (nk_button_behavior(state, *alpha_bar, in, NK_BUTTON_REPEATER)) {
+ hsva[3] = 1.0f - NK_SATURATE((in->mouse.pos.y - alpha_bar->y) / (alpha_bar->h-1));
+ value_changed = 1;
+ }
+ }
+ nk_widget_state_reset(state);
+ if (hsv_changed) {
+ *color = nk_hsva_colorfv(hsva);
+ *state = NK_WIDGET_STATE_ACTIVE;
+ }
+ if (value_changed) {
+ color->a = hsva[3];
+ *state = NK_WIDGET_STATE_ACTIVE;
+ }
+ /* set color picker widget state */
+ if (nk_input_is_mouse_hovering_rect(in, *bounds))
+ *state = NK_WIDGET_STATE_HOVERED;
+ if (*state & NK_WIDGET_STATE_HOVER && !nk_input_is_mouse_prev_hovering_rect(in, *bounds))
+ *state |= NK_WIDGET_STATE_ENTERED;
+ else if (nk_input_is_mouse_prev_hovering_rect(in, *bounds))
+ *state |= NK_WIDGET_STATE_LEFT;
+ return value_changed;
+}
+NK_LIB void
+nk_draw_color_picker(struct nk_command_buffer *o, const struct nk_rect *matrix,
+ const struct nk_rect *hue_bar, const struct nk_rect *alpha_bar,
+ struct nk_colorf col)
+{
+ NK_STORAGE const struct nk_color black = {0,0,0,255};
+ NK_STORAGE const struct nk_color white = {255, 255, 255, 255};
+ NK_STORAGE const struct nk_color black_trans = {0,0,0,0};
+
+ const float crosshair_size = 7.0f;
+ struct nk_color temp;
+ float hsva[4];
+ float line_y;
+ int i;
+
+ NK_ASSERT(o);
+ NK_ASSERT(matrix);
+ NK_ASSERT(hue_bar);
+
+ /* draw hue bar */
+ nk_colorf_hsva_fv(hsva, col);
+ for (i = 0; i < 6; ++i) {
+ NK_GLOBAL const struct nk_color hue_colors[] = {
+ {255, 0, 0, 255}, {255,255,0,255}, {0,255,0,255}, {0, 255,255,255},
+ {0,0,255,255}, {255, 0, 255, 255}, {255, 0, 0, 255}
+ };
+ nk_fill_rect_multi_color(o,
+ nk_rect(hue_bar->x, hue_bar->y + (float)i * (hue_bar->h/6.0f) + 0.5f,
+ hue_bar->w, (hue_bar->h/6.0f) + 0.5f), hue_colors[i], hue_colors[i],
+ hue_colors[i+1], hue_colors[i+1]);
+ }
+ line_y = (float)(int)(hue_bar->y + hsva[0] * matrix->h + 0.5f);
+ nk_stroke_line(o, hue_bar->x-1, line_y, hue_bar->x + hue_bar->w + 2,
+ line_y, 1, nk_rgb(255,255,255));
+
+ /* draw alpha bar */
+ if (alpha_bar) {
+ float alpha = NK_SATURATE(col.a);
+ line_y = (float)(int)(alpha_bar->y + (1.0f - alpha) * matrix->h + 0.5f);
+
+ nk_fill_rect_multi_color(o, *alpha_bar, white, white, black, black);
+ nk_stroke_line(o, alpha_bar->x-1, line_y, alpha_bar->x + alpha_bar->w + 2,
+ line_y, 1, nk_rgb(255,255,255));
+ }
+
+ /* draw color matrix */
+ temp = nk_hsv_f(hsva[0], 1.0f, 1.0f);
+ nk_fill_rect_multi_color(o, *matrix, white, temp, temp, white);
+ nk_fill_rect_multi_color(o, *matrix, black_trans, black_trans, black, black);
+
+ /* draw cross-hair */
+ {struct nk_vec2 p; float S = hsva[1]; float V = hsva[2];
+ p.x = (float)(int)(matrix->x + S * matrix->w);
+ p.y = (float)(int)(matrix->y + (1.0f - V) * matrix->h);
+ nk_stroke_line(o, p.x - crosshair_size, p.y, p.x-2, p.y, 1.0f, white);
+ nk_stroke_line(o, p.x + crosshair_size + 1, p.y, p.x+3, p.y, 1.0f, white);
+ nk_stroke_line(o, p.x, p.y + crosshair_size + 1, p.x, p.y+3, 1.0f, white);
+ nk_stroke_line(o, p.x, p.y - crosshair_size, p.x, p.y-2, 1.0f, white);}
+}
+NK_LIB int
+nk_do_color_picker(nk_flags *state,
+ struct nk_command_buffer *out, struct nk_colorf *col,
+ enum nk_color_format fmt, struct nk_rect bounds,
+ struct nk_vec2 padding, const struct nk_input *in,
+ const struct nk_user_font *font)
+{
+ int ret = 0;
+ struct nk_rect matrix;
+ struct nk_rect hue_bar;
+ struct nk_rect alpha_bar;
+ float bar_w;
+
+ NK_ASSERT(out);
+ NK_ASSERT(col);
+ NK_ASSERT(state);
+ NK_ASSERT(font);
+ if (!out || !col || !state || !font)
+ return ret;
+
+ bar_w = font->height;
+ bounds.x += padding.x;
+ bounds.y += padding.x;
+ bounds.w -= 2 * padding.x;
+ bounds.h -= 2 * padding.y;
+
+ matrix.x = bounds.x;
+ matrix.y = bounds.y;
+ matrix.h = bounds.h;
+ matrix.w = bounds.w - (3 * padding.x + 2 * bar_w);
+
+ hue_bar.w = bar_w;
+ hue_bar.y = bounds.y;
+ hue_bar.h = matrix.h;
+ hue_bar.x = matrix.x + matrix.w + padding.x;
+
+ alpha_bar.x = hue_bar.x + hue_bar.w + padding.x;
+ alpha_bar.y = bounds.y;
+ alpha_bar.w = bar_w;
+ alpha_bar.h = matrix.h;
+
+ ret = nk_color_picker_behavior(state, &bounds, &matrix, &hue_bar,
+ (fmt == NK_RGBA) ? &alpha_bar:0, col, in);
+ nk_draw_color_picker(out, &matrix, &hue_bar, (fmt == NK_RGBA) ? &alpha_bar:0, *col);
+ return ret;
+}
+NK_API int
+nk_color_pick(struct nk_context * ctx, struct nk_colorf *color,
+ enum nk_color_format fmt)
+{
+ struct nk_window *win;
+ struct nk_panel *layout;
+ const struct nk_style *config;
+ const struct nk_input *in;
+
+ enum nk_widget_layout_states state;
+ struct nk_rect bounds;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(color);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !color)
+ return 0;
+
+ win = ctx->current;
+ config = &ctx->style;
+ layout = win->layout;
+ state = nk_widget(&bounds, ctx);
+ if (!state) return 0;
+ in = (state == NK_WIDGET_ROM || layout->flags & NK_WINDOW_ROM) ? 0 : &ctx->input;
+ return nk_do_color_picker(&ctx->last_widget_state, &win->buffer, color, fmt, bounds,
+ nk_vec2(0,0), in, config->font);
+}
+NK_API struct nk_colorf
+nk_color_picker(struct nk_context *ctx, struct nk_colorf color,
+ enum nk_color_format fmt)
+{
+ nk_color_pick(ctx, &color, fmt);
+ return color;
+}
+
+
+
+
+
+/* ==============================================================
+ *
+ * COMBO
+ *
+ * ===============================================================*/
+NK_INTERN int
+nk_combo_begin(struct nk_context *ctx, struct nk_window *win,
+ struct nk_vec2 size, int is_clicked, struct nk_rect header)
+{
+ struct nk_window *popup;
+ int is_open = 0;
+ int is_active = 0;
+ struct nk_rect body;
+ nk_hash hash;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ popup = win->popup.win;
+ body.x = header.x;
+ body.w = size.x;
+ body.y = header.y + header.h-ctx->style.window.combo_border;
+ body.h = size.y;
+
+ hash = win->popup.combo_count++;
+ is_open = (popup) ? nk_true:nk_false;
+ is_active = (popup && (win->popup.name == hash) && win->popup.type == NK_PANEL_COMBO);
+ if ((is_clicked && is_open && !is_active) || (is_open && !is_active) ||
+ (!is_open && !is_active && !is_clicked)) return 0;
+ if (!nk_nonblock_begin(ctx, 0, body,
+ (is_clicked && is_open)?nk_rect(0,0,0,0):header, NK_PANEL_COMBO)) return 0;
+
+ win->popup.type = NK_PANEL_COMBO;
+ win->popup.name = hash;
+ return 1;
+}
+NK_API int
+nk_combo_begin_text(struct nk_context *ctx, const char *selected, int len,
+ struct nk_vec2 size)
+{
+ const struct nk_input *in;
+ struct nk_window *win;
+ struct nk_style *style;
+
+ enum nk_widget_layout_states s;
+ int is_clicked = nk_false;
+ struct nk_rect header;
+ const struct nk_style_item *background;
+ struct nk_text text;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(selected);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout || !selected)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (s == NK_WIDGET_INVALID)
+ return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->combo.active;
+ text.text = style->combo.label_active;
+ } else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER) {
+ background = &style->combo.hover;
+ text.text = style->combo.label_hover;
+ } else {
+ background = &style->combo.normal;
+ text.text = style->combo.label_normal;
+ }
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ text.background = nk_rgba(0,0,0,0);
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ /* print currently selected text item */
+ struct nk_rect label;
+ struct nk_rect button;
+ struct nk_rect content;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.x;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+
+ /* draw selected label */
+ text.padding = nk_vec2(0,0);
+ label.x = header.x + style->combo.content_padding.x;
+ label.y = header.y + style->combo.content_padding.y;
+ label.w = button.x - (style->combo.content_padding.x + style->combo.spacing.x) - label.x;;
+ label.h = header.h - 2 * style->combo.content_padding.y;
+ nk_widget_text(&win->buffer, label, selected, len, &text,
+ NK_TEXT_LEFT, ctx->style.font);
+
+ /* draw open/close button */
+ nk_draw_button_symbol(&win->buffer, &button, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_label(struct nk_context *ctx, const char *selected, struct nk_vec2 size)
+{
+ return nk_combo_begin_text(ctx, selected, nk_strlen(selected), size);
+}
+NK_API int
+nk_combo_begin_color(struct nk_context *ctx, struct nk_color color, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ const struct nk_input *in;
+
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ enum nk_widget_layout_states s;
+ const struct nk_style_item *background;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (s == NK_WIDGET_INVALID)
+ return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED)
+ background = &style->combo.active;
+ else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ background = &style->combo.hover;
+ else background = &style->combo.normal;
+
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(&win->buffer, header, &background->data.image,nk_white);
+ } else {
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ struct nk_rect content;
+ struct nk_rect button;
+ struct nk_rect bounds;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.x;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+
+ /* draw color */
+ bounds.h = header.h - 4 * style->combo.content_padding.y;
+ bounds.y = header.y + 2 * style->combo.content_padding.y;
+ bounds.x = header.x + 2 * style->combo.content_padding.x;
+ bounds.w = (button.x - (style->combo.content_padding.x + style->combo.spacing.x)) - bounds.x;
+ nk_fill_rect(&win->buffer, bounds, 0, color);
+
+ /* draw open/close button */
+ nk_draw_button_symbol(&win->buffer, &button, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_symbol(struct nk_context *ctx, enum nk_symbol_type symbol, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ const struct nk_input *in;
+
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ enum nk_widget_layout_states s;
+ const struct nk_style_item *background;
+ struct nk_color sym_background;
+ struct nk_color symbol_color;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (s == NK_WIDGET_INVALID)
+ return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->combo.active;
+ symbol_color = style->combo.symbol_active;
+ } else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER) {
+ background = &style->combo.hover;
+ symbol_color = style->combo.symbol_hover;
+ } else {
+ background = &style->combo.normal;
+ symbol_color = style->combo.symbol_hover;
+ }
+
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ sym_background = nk_rgba(0,0,0,0);
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ sym_background = background->data.color;
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ struct nk_rect bounds = {0,0,0,0};
+ struct nk_rect content;
+ struct nk_rect button;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.y;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+
+ /* draw symbol */
+ bounds.h = header.h - 2 * style->combo.content_padding.y;
+ bounds.y = header.y + style->combo.content_padding.y;
+ bounds.x = header.x + style->combo.content_padding.x;
+ bounds.w = (button.x - style->combo.content_padding.y) - bounds.x;
+ nk_draw_symbol(&win->buffer, symbol, bounds, sym_background, symbol_color,
+ 1.0f, style->font);
+
+ /* draw open/close button */
+ nk_draw_button_symbol(&win->buffer, &bounds, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_symbol_text(struct nk_context *ctx, const char *selected, int len,
+ enum nk_symbol_type symbol, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ struct nk_input *in;
+
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ enum nk_widget_layout_states s;
+ const struct nk_style_item *background;
+ struct nk_color symbol_color;
+ struct nk_text text;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (!s) return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->combo.active;
+ symbol_color = style->combo.symbol_active;
+ text.text = style->combo.label_active;
+ } else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER) {
+ background = &style->combo.hover;
+ symbol_color = style->combo.symbol_hover;
+ text.text = style->combo.label_hover;
+ } else {
+ background = &style->combo.normal;
+ symbol_color = style->combo.symbol_normal;
+ text.text = style->combo.label_normal;
+ }
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ text.background = nk_rgba(0,0,0,0);
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ struct nk_rect content;
+ struct nk_rect button;
+ struct nk_rect label;
+ struct nk_rect image;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.x;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+ nk_draw_button_symbol(&win->buffer, &button, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+
+ /* draw symbol */
+ image.x = header.x + style->combo.content_padding.x;
+ image.y = header.y + style->combo.content_padding.y;
+ image.h = header.h - 2 * style->combo.content_padding.y;
+ image.w = image.h;
+ nk_draw_symbol(&win->buffer, symbol, image, text.background, symbol_color,
+ 1.0f, style->font);
+
+ /* draw label */
+ text.padding = nk_vec2(0,0);
+ label.x = image.x + image.w + style->combo.spacing.x + style->combo.content_padding.x;
+ label.y = header.y + style->combo.content_padding.y;
+ label.w = (button.x - style->combo.content_padding.x) - label.x;
+ label.h = header.h - 2 * style->combo.content_padding.y;
+ nk_widget_text(&win->buffer, label, selected, len, &text, NK_TEXT_LEFT, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_image(struct nk_context *ctx, struct nk_image img, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ const struct nk_input *in;
+
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ enum nk_widget_layout_states s;
+ const struct nk_style_item *background;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (s == NK_WIDGET_INVALID)
+ return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED)
+ background = &style->combo.active;
+ else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ background = &style->combo.hover;
+ else background = &style->combo.normal;
+
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ struct nk_rect bounds = {0,0,0,0};
+ struct nk_rect content;
+ struct nk_rect button;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.y;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+
+ /* draw image */
+ bounds.h = header.h - 2 * style->combo.content_padding.y;
+ bounds.y = header.y + style->combo.content_padding.y;
+ bounds.x = header.x + style->combo.content_padding.x;
+ bounds.w = (button.x - style->combo.content_padding.y) - bounds.x;
+ nk_draw_image(&win->buffer, bounds, &img, nk_white);
+
+ /* draw open/close button */
+ nk_draw_button_symbol(&win->buffer, &bounds, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_image_text(struct nk_context *ctx, const char *selected, int len,
+ struct nk_image img, struct nk_vec2 size)
+{
+ struct nk_window *win;
+ struct nk_style *style;
+ struct nk_input *in;
+
+ struct nk_rect header;
+ int is_clicked = nk_false;
+ enum nk_widget_layout_states s;
+ const struct nk_style_item *background;
+ struct nk_text text;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ win = ctx->current;
+ style = &ctx->style;
+ s = nk_widget(&header, ctx);
+ if (!s) return 0;
+
+ in = (win->layout->flags & NK_WINDOW_ROM || s == NK_WIDGET_ROM)? 0: &ctx->input;
+ if (nk_button_behavior(&ctx->last_widget_state, header, in, NK_BUTTON_DEFAULT))
+ is_clicked = nk_true;
+
+ /* draw combo box header background and border */
+ if (ctx->last_widget_state & NK_WIDGET_STATE_ACTIVED) {
+ background = &style->combo.active;
+ text.text = style->combo.label_active;
+ } else if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER) {
+ background = &style->combo.hover;
+ text.text = style->combo.label_hover;
+ } else {
+ background = &style->combo.normal;
+ text.text = style->combo.label_normal;
+ }
+ if (background->type == NK_STYLE_ITEM_IMAGE) {
+ text.background = nk_rgba(0,0,0,0);
+ nk_draw_image(&win->buffer, header, &background->data.image, nk_white);
+ } else {
+ text.background = background->data.color;
+ nk_fill_rect(&win->buffer, header, style->combo.rounding, background->data.color);
+ nk_stroke_rect(&win->buffer, header, style->combo.rounding, style->combo.border, style->combo.border_color);
+ }
+ {
+ struct nk_rect content;
+ struct nk_rect button;
+ struct nk_rect label;
+ struct nk_rect image;
+
+ enum nk_symbol_type sym;
+ if (ctx->last_widget_state & NK_WIDGET_STATE_HOVER)
+ sym = style->combo.sym_hover;
+ else if (is_clicked)
+ sym = style->combo.sym_active;
+ else sym = style->combo.sym_normal;
+
+ /* calculate button */
+ button.w = header.h - 2 * style->combo.button_padding.y;
+ button.x = (header.x + header.w - header.h) - style->combo.button_padding.x;
+ button.y = header.y + style->combo.button_padding.y;
+ button.h = button.w;
+
+ content.x = button.x + style->combo.button.padding.x;
+ content.y = button.y + style->combo.button.padding.y;
+ content.w = button.w - 2 * style->combo.button.padding.x;
+ content.h = button.h - 2 * style->combo.button.padding.y;
+ nk_draw_button_symbol(&win->buffer, &button, &content, ctx->last_widget_state,
+ &ctx->style.combo.button, sym, style->font);
+
+ /* draw image */
+ image.x = header.x + style->combo.content_padding.x;
+ image.y = header.y + style->combo.content_padding.y;
+ image.h = header.h - 2 * style->combo.content_padding.y;
+ image.w = image.h;
+ nk_draw_image(&win->buffer, image, &img, nk_white);
+
+ /* draw label */
+ text.padding = nk_vec2(0,0);
+ label.x = image.x + image.w + style->combo.spacing.x + style->combo.content_padding.x;
+ label.y = header.y + style->combo.content_padding.y;
+ label.w = (button.x - style->combo.content_padding.x) - label.x;
+ label.h = header.h - 2 * style->combo.content_padding.y;
+ nk_widget_text(&win->buffer, label, selected, len, &text, NK_TEXT_LEFT, style->font);
+ }
+ return nk_combo_begin(ctx, win, size, is_clicked, header);
+}
+NK_API int
+nk_combo_begin_symbol_label(struct nk_context *ctx,
+ const char *selected, enum nk_symbol_type type, struct nk_vec2 size)
+{
+ return nk_combo_begin_symbol_text(ctx, selected, nk_strlen(selected), type, size);
+}
+NK_API int
+nk_combo_begin_image_label(struct nk_context *ctx,
+ const char *selected, struct nk_image img, struct nk_vec2 size)
+{
+ return nk_combo_begin_image_text(ctx, selected, nk_strlen(selected), img, size);
+}
+NK_API int
+nk_combo_item_text(struct nk_context *ctx, const char *text, int len,nk_flags align)
+{
+ return nk_contextual_item_text(ctx, text, len, align);
+}
+NK_API int
+nk_combo_item_label(struct nk_context *ctx, const char *label, nk_flags align)
+{
+ return nk_contextual_item_label(ctx, label, align);
+}
+NK_API int
+nk_combo_item_image_text(struct nk_context *ctx, struct nk_image img, const char *text,
+ int len, nk_flags alignment)
+{
+ return nk_contextual_item_image_text(ctx, img, text, len, alignment);
+}
+NK_API int
+nk_combo_item_image_label(struct nk_context *ctx, struct nk_image img,
+ const char *text, nk_flags alignment)
+{
+ return nk_contextual_item_image_label(ctx, img, text, alignment);
+}
+NK_API int
+nk_combo_item_symbol_text(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *text, int len, nk_flags alignment)
+{
+ return nk_contextual_item_symbol_text(ctx, sym, text, len, alignment);
+}
+NK_API int
+nk_combo_item_symbol_label(struct nk_context *ctx, enum nk_symbol_type sym,
+ const char *label, nk_flags alignment)
+{
+ return nk_contextual_item_symbol_label(ctx, sym, label, alignment);
+}
+NK_API void nk_combo_end(struct nk_context *ctx)
+{
+ nk_contextual_end(ctx);
+}
+NK_API void nk_combo_close(struct nk_context *ctx)
+{
+ nk_contextual_close(ctx);
+}
+NK_API int
+nk_combo(struct nk_context *ctx, const char **items, int count,
+ int selected, int item_height, struct nk_vec2 size)
+{
+ int i = 0;
+ int max_height;
+ struct nk_vec2 item_spacing;
+ struct nk_vec2 window_padding;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(items);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !items ||!count)
+ return selected;
+
+ item_spacing = ctx->style.window.spacing;
+ window_padding = nk_panel_get_padding(&ctx->style, ctx->current->layout->type);
+ max_height = count * item_height + count * (int)item_spacing.y;
+ max_height += (int)item_spacing.y * 2 + (int)window_padding.y * 2;
+ size.y = NK_MIN(size.y, (float)max_height);
+ if (nk_combo_begin_label(ctx, items[selected], size)) {
+ nk_layout_row_dynamic(ctx, (float)item_height, 1);
+ for (i = 0; i < count; ++i) {
+ if (nk_combo_item_label(ctx, items[i], NK_TEXT_LEFT))
+ selected = i;
+ }
+ nk_combo_end(ctx);
+ }
+ return selected;
+}
+NK_API int
+nk_combo_separator(struct nk_context *ctx, const char *items_separated_by_separator,
+ int separator, int selected, int count, int item_height, struct nk_vec2 size)
+{
+ int i;
+ int max_height;
+ struct nk_vec2 item_spacing;
+ struct nk_vec2 window_padding;
+ const char *current_item;
+ const char *iter;
+ int length = 0;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(items_separated_by_separator);
+ if (!ctx || !items_separated_by_separator)
+ return selected;
+
+ /* calculate popup window */
+ item_spacing = ctx->style.window.spacing;
+ window_padding = nk_panel_get_padding(&ctx->style, ctx->current->layout->type);
+ max_height = count * item_height + count * (int)item_spacing.y;
+ max_height += (int)item_spacing.y * 2 + (int)window_padding.y * 2;
+ size.y = NK_MIN(size.y, (float)max_height);
+
+ /* find selected item */
+ current_item = items_separated_by_separator;
+ for (i = 0; i < count; ++i) {
+ iter = current_item;
+ while (*iter && *iter != separator) iter++;
+ length = (int)(iter - current_item);
+ if (i == selected) break;
+ current_item = iter + 1;
+ }
+
+ if (nk_combo_begin_text(ctx, current_item, length, size)) {
+ current_item = items_separated_by_separator;
+ nk_layout_row_dynamic(ctx, (float)item_height, 1);
+ for (i = 0; i < count; ++i) {
+ iter = current_item;
+ while (*iter && *iter != separator) iter++;
+ length = (int)(iter - current_item);
+ if (nk_combo_item_text(ctx, current_item, length, NK_TEXT_LEFT))
+ selected = i;
+ current_item = current_item + length + 1;
+ }
+ nk_combo_end(ctx);
+ }
+ return selected;
+}
+NK_API int
+nk_combo_string(struct nk_context *ctx, const char *items_separated_by_zeros,
+ int selected, int count, int item_height, struct nk_vec2 size)
+{
+ return nk_combo_separator(ctx, items_separated_by_zeros, '\0', selected, count, item_height, size);
+}
+NK_API int
+nk_combo_callback(struct nk_context *ctx, void(*item_getter)(void*, int, const char**),
+ void *userdata, int selected, int count, int item_height, struct nk_vec2 size)
+{
+ int i;
+ int max_height;
+ struct nk_vec2 item_spacing;
+ struct nk_vec2 window_padding;
+ const char *item;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(item_getter);
+ if (!ctx || !item_getter)
+ return selected;
+
+ /* calculate popup window */
+ item_spacing = ctx->style.window.spacing;
+ window_padding = nk_panel_get_padding(&ctx->style, ctx->current->layout->type);
+ max_height = count * item_height + count * (int)item_spacing.y;
+ max_height += (int)item_spacing.y * 2 + (int)window_padding.y * 2;
+ size.y = NK_MIN(size.y, (float)max_height);
+
+ item_getter(userdata, selected, &item);
+ if (nk_combo_begin_label(ctx, item, size)) {
+ nk_layout_row_dynamic(ctx, (float)item_height, 1);
+ for (i = 0; i < count; ++i) {
+ item_getter(userdata, i, &item);
+ if (nk_combo_item_label(ctx, item, NK_TEXT_LEFT))
+ selected = i;
+ }
+ nk_combo_end(ctx);
+ } return selected;
+}
+NK_API void
+nk_combobox(struct nk_context *ctx, const char **items, int count,
+ int *selected, int item_height, struct nk_vec2 size)
+{
+ *selected = nk_combo(ctx, items, count, *selected, item_height, size);
+}
+NK_API void
+nk_combobox_string(struct nk_context *ctx, const char *items_separated_by_zeros,
+ int *selected, int count, int item_height, struct nk_vec2 size)
+{
+ *selected = nk_combo_string(ctx, items_separated_by_zeros, *selected, count, item_height, size);
+}
+NK_API void
+nk_combobox_separator(struct nk_context *ctx, const char *items_separated_by_separator,
+ int separator,int *selected, int count, int item_height, struct nk_vec2 size)
+{
+ *selected = nk_combo_separator(ctx, items_separated_by_separator, separator,
+ *selected, count, item_height, size);
+}
+NK_API void
+nk_combobox_callback(struct nk_context *ctx,
+ void(*item_getter)(void* data, int id, const char **out_text),
+ void *userdata, int *selected, int count, int item_height, struct nk_vec2 size)
+{
+ *selected = nk_combo_callback(ctx, item_getter, userdata, *selected, count, item_height, size);
+}
+
+
+
+
+
+/* ===============================================================
+ *
+ * TOOLTIP
+ *
+ * ===============================================================*/
+NK_API int
+nk_tooltip_begin(struct nk_context *ctx, float width)
+{
+ int x,y,w,h;
+ struct nk_window *win;
+ const struct nk_input *in;
+ struct nk_rect bounds;
+ int ret;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ if (!ctx || !ctx->current || !ctx->current->layout)
+ return 0;
+
+ /* make sure that no nonblocking popup is currently active */
+ win = ctx->current;
+ in = &ctx->input;
+ if (win->popup.win && (win->popup.type & NK_PANEL_SET_NONBLOCK))
+ return 0;
+
+ w = nk_iceilf(width);
+ h = nk_iceilf(nk_null_rect.h);
+ x = nk_ifloorf(in->mouse.pos.x + 1) - (int)win->layout->clip.x;
+ y = nk_ifloorf(in->mouse.pos.y + 1) - (int)win->layout->clip.y;
+
+ bounds.x = (float)x;
+ bounds.y = (float)y;
+ bounds.w = (float)w;
+ bounds.h = (float)h;
+
+ ret = nk_popup_begin(ctx, NK_POPUP_DYNAMIC,
+ "__##Tooltip##__", NK_WINDOW_NO_SCROLLBAR|NK_WINDOW_BORDER, bounds);
+ if (ret) win->layout->flags &= ~(nk_flags)NK_WINDOW_ROM;
+ win->popup.type = NK_PANEL_TOOLTIP;
+ ctx->current->layout->type = NK_PANEL_TOOLTIP;
+ return ret;
+}
+
+NK_API void
+nk_tooltip_end(struct nk_context *ctx)
+{
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ if (!ctx || !ctx->current) return;
+ ctx->current->seq--;
+ nk_popup_close(ctx);
+ nk_popup_end(ctx);
+}
+NK_API void
+nk_tooltip(struct nk_context *ctx, const char *text)
+{
+ const struct nk_style *style;
+ struct nk_vec2 padding;
+
+ int text_len;
+ float text_width;
+ float text_height;
+
+ NK_ASSERT(ctx);
+ NK_ASSERT(ctx->current);
+ NK_ASSERT(ctx->current->layout);
+ NK_ASSERT(text);
+ if (!ctx || !ctx->current || !ctx->current->layout || !text)
+ return;
+
+ /* fetch configuration data */
+ style = &ctx->style;
+ padding = style->window.padding;
+
+ /* calculate size of the text and tooltip */
+ text_len = nk_strlen(text);
+ text_width = style->font->width(style->font->userdata,
+ style->font->height, text, text_len);
+ text_width += (4 * padding.x);
+ text_height = (style->font->height + 2 * padding.y);
+
+ /* execute tooltip and fill with text */
+ if (nk_tooltip_begin(ctx, (float)text_width)) {
+ nk_layout_row_dynamic(ctx, (float)text_height, 1);
+ nk_text(ctx, text, text_len, NK_TEXT_LEFT);
+ nk_tooltip_end(ctx);
+ }
+}
+#ifdef NK_INCLUDE_STANDARD_VARARGS
+NK_API void
+nk_tooltipf(struct nk_context *ctx, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ nk_tooltipfv(ctx, fmt, args);
+ va_end(args);
+}
+NK_API void
+nk_tooltipfv(struct nk_context *ctx, const char *fmt, va_list args)
+{
+ char buf[256];
+ nk_strfmt(buf, NK_LEN(buf), fmt, args);
+ nk_tooltip(ctx, buf);
+}
+#endif
+
+
+
+#endif /* NK_IMPLEMENTATION */
+
+/*
+/// ## License
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~none
+/// ------------------------------------------------------------------------------
+/// This software is available under 2 licenses -- choose whichever you prefer.
+/// ------------------------------------------------------------------------------
+/// ALTERNATIVE A - MIT License
+/// Copyright (c) 2016-2018 Micha Mettke
+/// Permission is hereby granted, free of charge, to any person obtaining a copy of
+/// this software and associated documentation files (the "Software"), to deal in
+/// the Software without restriction, including without limitation the rights to
+/// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+/// of the Software, and to permit persons to whom the Software is furnished to do
+/// so, subject to the following conditions:
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+/// ------------------------------------------------------------------------------
+/// ALTERNATIVE B - Public Domain (www.unlicense.org)
+/// This is free and unencumbered software released into the public domain.
+/// Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+/// software, either in source code form or as a compiled binary, for any purpose,
+/// commercial or non-commercial, and by any means.
+/// In jurisdictions that recognize copyright laws, the author or authors of this
+/// software dedicate any and all copyright interest in the software to the public
+/// domain. We make this dedication for the benefit of the public at large and to
+/// the detriment of our heirs and successors. We intend this dedication to be an
+/// overt act of relinquishment in perpetuity of all present and future rights to
+/// this software under copyright law.
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+/// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+/// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+/// ------------------------------------------------------------------------------
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+/// ## Changelog
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~none
+/// [date][x.yy.zz]-[description]
+/// -[date]: date on which the change has been pushed
+/// -[x.yy.zz]: Numerical version string representation. Each version number on the right
+/// resets back to zero if version on the left is incremented.
+/// - [x]: Major version with API and library breaking changes
+/// - [yy]: Minor version with non-breaking API and library changes
+/// - [zz]: Bug fix version with no direct changes to API
+///
+/// - 2019/09/20 (4.01.3) - Fixed a bug wherein combobox cannot be closed by clicking the header
+/// when NK_BUTTON_TRIGGER_ON_RELEASE is defined.
+/// - 2019/09/10 (4.01.2) - Fixed the nk_cos function, which deviated significantly.
+/// - 2019/09/08 (4.01.1) - Fixed a bug wherein re-baking of fonts caused a segmentation
+/// fault due to dst_font->glyph_count not being zeroed on subsequent
+/// bakes of the same set of fonts.
+/// - 2019/06/23 (4.01.0) - Added nk_***_get_scroll and nk_***_set_scroll for groups, windows, and popups.
+/// - 2019/06/12 (4.00.3) - Fix panel background drawing bug.
+/// - 2018/10/31 (4.00.2) - Added NK_KEYSTATE_BASED_INPUT to "fix" state based backends
+/// like GLFW without breaking key repeat behavior on event based.
+/// - 2018/04/01 (4.00.1) - Fixed calling `nk_convert` multiple time per single frame.
+/// - 2018/04/01 (4.00.0) - BREAKING CHANGE: nk_draw_list_clear no longer tries to
+/// clear provided buffers. So make sure to either free
+/// or clear each passed buffer after calling nk_convert.
+/// - 2018/02/23 (3.00.6) - Fixed slider dragging behavior.
+/// - 2018/01/31 (3.00.5) - Fixed overcalculation of cursor data in font baking process.
+/// - 2018/01/31 (3.00.4) - Removed name collision with stb_truetype.
+/// - 2018/01/28 (3.00.3) - Fixed panel window border drawing bug.
+/// - 2018/01/12 (3.00.2) - Added `nk_group_begin_titled` for separed group identifier and title.
+/// - 2018/01/07 (3.00.1) - Started to change documentation style.
+/// - 2018/01/05 (3.00.0) - BREAKING CHANGE: The previous color picker API was broken
+/// because of conversions between float and byte color representation.
+/// Color pickers now use floating point values to represent
+/// HSV values. To get back the old behavior I added some additional
+/// color conversion functions to cast between nk_color and
+/// nk_colorf.
+/// - 2017/12/23 (2.00.7) - Fixed small warning.
+/// - 2017/12/23 (2.00.7) - Fixed `nk_edit_buffer` behavior if activated to allow input.
+/// - 2017/12/23 (2.00.7) - Fixed modifyable progressbar dragging visuals and input behavior.
+/// - 2017/12/04 (2.00.6) - Added formated string tooltip widget.
+/// - 2017/11/18 (2.00.5) - Fixed window becoming hidden with flag `NK_WINDOW_NO_INPUT`.
+/// - 2017/11/15 (2.00.4) - Fixed font merging.
+/// - 2017/11/07 (2.00.3) - Fixed window size and position modifier functions.
+/// - 2017/09/14 (2.00.2) - Fixed `nk_edit_buffer` and `nk_edit_focus` behavior.
+/// - 2017/09/14 (2.00.1) - Fixed window closing behavior.
+/// - 2017/09/14 (2.00.0) - BREAKING CHANGE: Modifing window position and size funtions now
+/// require the name of the window and must happen outside the window
+/// building process (between function call nk_begin and nk_end).
+/// - 2017/09/11 (1.40.9) - Fixed window background flag if background window is declared last.
+/// - 2017/08/27 (1.40.8) - Fixed `nk_item_is_any_active` for hidden windows.
+/// - 2017/08/27 (1.40.7) - Fixed window background flag.
+/// - 2017/07/07 (1.40.6) - Fixed missing clipping rect check for hovering/clicked
+/// query for widgets.
+/// - 2017/07/07 (1.40.5) - Fixed drawing bug for vertex output for lines and stroked
+/// and filled rectangles.
+/// - 2017/07/07 (1.40.4) - Fixed bug in nk_convert trying to add windows that are in
+/// process of being destroyed.
+/// - 2017/07/07 (1.40.3) - Fixed table internal bug caused by storing table size in
+/// window instead of directly in table.
+/// - 2017/06/30 (1.40.2) - Removed unneeded semicolon in C++ NK_ALIGNOF macro.
+/// - 2017/06/30 (1.40.1) - Fixed drawing lines smaller or equal zero.
+/// - 2017/06/08 (1.40.0) - Removed the breaking part of last commit. Auto layout now only
+/// comes in effect if you pass in zero was row height argument.
+/// - 2017/06/08 (1.40.0) - BREAKING CHANGE: while not directly API breaking it will change
+/// how layouting works. From now there will be an internal minimum
+/// row height derived from font height. If you need a row smaller than
+/// that you can directly set it by `nk_layout_set_min_row_height` and
+/// reset the value back by calling `nk_layout_reset_min_row_height.
+/// - 2017/06/08 (1.39.1) - Fixed property text edit handling bug caused by past `nk_widget` fix.
+/// - 2017/06/08 (1.39.0) - Added function to retrieve window space without calling a `nk_layout_xxx` function.
+/// - 2017/06/06 (1.38.5) - Fixed `nk_convert` return flag for command buffer.
+/// - 2017/05/23 (1.38.4) - Fixed activation behavior for widgets partially clipped.
+/// - 2017/05/10 (1.38.3) - Fixed wrong min window size mouse scaling over boundries.
+/// - 2017/05/09 (1.38.2) - Fixed vertical scrollbar drawing with not enough space.
+/// - 2017/05/09 (1.38.1) - Fixed scaler dragging behavior if window size hits minimum size.
+/// - 2017/05/06 (1.38.0) - Added platform double-click support.
+/// - 2017/04/20 (1.37.1) - Fixed key repeat found inside glfw demo backends.
+/// - 2017/04/20 (1.37.0) - Extended properties with selection and clipboard support.
+/// - 2017/04/20 (1.36.2) - Fixed #405 overlapping rows with zero padding and spacing.
+/// - 2017/04/09 (1.36.1) - Fixed #403 with another widget float error.
+/// - 2017/04/09 (1.36.0) - Added window `NK_WINDOW_NO_INPUT` and `NK_WINDOW_NOT_INTERACTIVE` flags.
+/// - 2017/04/09 (1.35.3) - Fixed buffer heap corruption.
+/// - 2017/03/25 (1.35.2) - Fixed popup overlapping for `NK_WINDOW_BACKGROUND` windows.
+/// - 2017/03/25 (1.35.1) - Fixed windows closing behavior.
+/// - 2017/03/18 (1.35.0) - Added horizontal scroll requested in #377.
+/// - 2017/03/18 (1.34.3) - Fixed long window header titles.
+/// - 2017/03/04 (1.34.2) - Fixed text edit filtering.
+/// - 2017/03/04 (1.34.1) - Fixed group closable flag.
+/// - 2017/02/25 (1.34.0) - Added custom draw command for better language binding support.
+/// - 2017/01/24 (1.33.0) - Added programatic way of remove edit focus.
+/// - 2017/01/24 (1.32.3) - Fixed wrong define for basic type definitions for windows.
+/// - 2017/01/21 (1.32.2) - Fixed input capture from hidden or closed windows.
+/// - 2017/01/21 (1.32.1) - Fixed slider behavior and drawing.
+/// - 2017/01/13 (1.32.0) - Added flag to put scaler into the bottom left corner.
+/// - 2017/01/13 (1.31.0) - Added additional row layouting method to combine both
+/// dynamic and static widgets.
+/// - 2016/12/31 (1.30.0) - Extended scrollbar offset from 16-bit to 32-bit.
+/// - 2016/12/31 (1.29.2) - Fixed closing window bug of minimized windows.
+/// - 2016/12/03 (1.29.1) - Fixed wrapped text with no seperator and C89 error.
+/// - 2016/12/03 (1.29.0) - Changed text wrapping to process words not characters.
+/// - 2016/11/22 (1.28.6) - Fixed window minimized closing bug.
+/// - 2016/11/19 (1.28.5) - Fixed abstract combo box closing behavior.
+/// - 2016/11/19 (1.28.4) - Fixed tooltip flickering.
+/// - 2016/11/19 (1.28.3) - Fixed memory leak caused by popup repeated closing.
+/// - 2016/11/18 (1.28.2) - Fixed memory leak caused by popup panel allocation.
+/// - 2016/11/10 (1.28.1) - Fixed some warnings and C++ error.
+/// - 2016/11/10 (1.28.0) - Added additional `nk_button` versions which allows to directly
+/// pass in a style struct to change buttons visual.
+/// - 2016/11/10 (1.27.0) - Added additional `nk_tree` versions to support external state
+/// storage. Just like last the `nk_group` commit the main
+/// advantage is that you optionally can minimize nuklears runtime
+/// memory consumption or handle hash collisions.
+/// - 2016/11/09 (1.26.0) - Added additional `nk_group` version to support external scrollbar
+/// offset storage. Main advantage is that you can externalize
+/// the memory management for the offset. It could also be helpful
+/// if you have a hash collision in `nk_group_begin` but really
+/// want the name. In addition I added `nk_list_view` which allows
+/// to draw big lists inside a group without actually having to
+/// commit the whole list to nuklear (issue #269).
+/// - 2016/10/30 (1.25.1) - Fixed clipping rectangle bug inside `nk_draw_list`.
+/// - 2016/10/29 (1.25.0) - Pulled `nk_panel` memory management into nuklear and out of
+/// the hands of the user. From now on users don't have to care
+/// about panels unless they care about some information. If you
+/// still need the panel just call `nk_window_get_panel`.
+/// - 2016/10/21 (1.24.0) - Changed widget border drawing to stroked rectangle from filled
+/// rectangle for less overdraw and widget background transparency.
+/// - 2016/10/18 (1.23.0) - Added `nk_edit_focus` for manually edit widget focus control.
+/// - 2016/09/29 (1.22.7) - Fixed deduction of basic type in non `<stdint.h>` compilation.
+/// - 2016/09/29 (1.22.6) - Fixed edit widget UTF-8 text cursor drawing bug.
+/// - 2016/09/28 (1.22.5) - Fixed edit widget UTF-8 text appending/inserting/removing.
+/// - 2016/09/28 (1.22.4) - Fixed drawing bug inside edit widgets which offset all text
+/// text in every edit widget if one of them is scrolled.
+/// - 2016/09/28 (1.22.3) - Fixed small bug in edit widgets if not active. The wrong
+/// text length is passed. It should have been in bytes but
+/// was passed as glyphes.
+/// - 2016/09/20 (1.22.2) - Fixed color button size calculation.
+/// - 2016/09/20 (1.22.1) - Fixed some `nk_vsnprintf` behavior bugs and removed `<stdio.h>`
+/// again from `NK_INCLUDE_STANDARD_VARARGS`.
+/// - 2016/09/18 (1.22.0) - C89 does not support vsnprintf only C99 and newer as well
+/// as C++11 and newer. In addition to use vsnprintf you have
+/// to include <stdio.h>. So just defining `NK_INCLUDE_STD_VAR_ARGS`
+/// is not enough. That behavior is now fixed. By default if
+/// both varargs as well as stdio is selected I try to use
+/// vsnprintf if not possible I will revert to vsprintf. If
+/// varargs but not stdio was defined I will use my own function.
+/// - 2016/09/15 (1.21.2) - Fixed panel `close` behavior for deeper panel levels.
+/// - 2016/09/15 (1.21.1) - Fixed C++ errors and wrong argument to `nk_panel_get_xxxx`.
+/// - 2016/09/13 (1.21.0) - !BREAKING! Fixed nonblocking popup behavior in menu, combo,
+/// and contextual which prevented closing in y-direction if
+/// popup did not reach max height.
+/// In addition the height parameter was changed into vec2
+/// for width and height to have more control over the popup size.
+/// - 2016/09/13 (1.20.3) - Cleaned up and extended type selection.
+/// - 2016/09/13 (1.20.2) - Fixed slider behavior hopefully for the last time. This time
+/// all calculation are correct so no more hackery.
+/// - 2016/09/13 (1.20.1) - Internal change to divide window/panel flags into panel flags and types.
+/// Suprisinly spend years in C and still happened to confuse types
+/// with flags. Probably something to take note.
+/// - 2016/09/08 (1.20.0) - Added additional helper function to make it easier to just
+/// take the produced buffers from `nk_convert` and unplug the
+/// iteration process from `nk_context`. So now you can
+/// just use the vertex,element and command buffer + two pointer
+/// inside the command buffer retrieved by calls `nk__draw_begin`
+/// and `nk__draw_end` and macro `nk_draw_foreach_bounded`.
+/// - 2016/09/08 (1.19.0) - Added additional asserts to make sure every `nk_xxx_begin` call
+/// for windows, popups, combobox, menu and contextual is guarded by
+/// `if` condition and does not produce false drawing output.
+/// - 2016/09/08 (1.18.0) - Changed confusing name for `NK_SYMBOL_RECT_FILLED`, `NK_SYMBOL_RECT`
+/// to hopefully easier to understand `NK_SYMBOL_RECT_FILLED` and
+/// `NK_SYMBOL_RECT_OUTLINE`.
+/// - 2016/09/08 (1.17.0) - Changed confusing name for `NK_SYMBOL_CIRLCE_FILLED`, `NK_SYMBOL_CIRCLE`
+/// to hopefully easier to understand `NK_SYMBOL_CIRCLE_FILLED` and
+/// `NK_SYMBOL_CIRCLE_OUTLINE`.
+/// - 2016/09/08 (1.16.0) - Added additional checks to select correct types if `NK_INCLUDE_FIXED_TYPES`
+/// is not defined by supporting the biggest compiler GCC, clang and MSVC.
+/// - 2016/09/07 (1.15.3) - Fixed `NK_INCLUDE_COMMAND_USERDATA` define to not cause an error.
+/// - 2016/09/04 (1.15.2) - Fixed wrong combobox height calculation.
+/// - 2016/09/03 (1.15.1) - Fixed gaps inside combo boxes in OpenGL.
+/// - 2016/09/02 (1.15.0) - Changed nuklear to not have any default vertex layout and
+/// instead made it user provided. The range of types to convert
+/// to is quite limited at the moment, but I would be more than
+/// happy to accept PRs to add additional.
+/// - 2016/08/30 (1.14.2) - Removed unused variables.
+/// - 2016/08/30 (1.14.1) - Fixed C++ build errors.
+/// - 2016/08/30 (1.14.0) - Removed mouse dragging from SDL demo since it does not work correctly.
+/// - 2016/08/30 (1.13.4) - Tweaked some default styling variables.
+/// - 2016/08/30 (1.13.3) - Hopefully fixed drawing bug in slider, in general I would
+/// refrain from using slider with a big number of steps.
+/// - 2016/08/30 (1.13.2) - Fixed close and minimize button which would fire even if the
+/// window was in Read Only Mode.
+/// - 2016/08/30 (1.13.1) - Fixed popup panel padding handling which was previously just
+/// a hack for combo box and menu.
+/// - 2016/08/30 (1.13.0) - Removed `NK_WINDOW_DYNAMIC` flag from public API since
+/// it is bugged and causes issues in window selection.
+/// - 2016/08/30 (1.12.0) - Removed scaler size. The size of the scaler is now
+/// determined by the scrollbar size.
+/// - 2016/08/30 (1.11.2) - Fixed some drawing bugs caused by changes from 1.11.0.
+/// - 2016/08/30 (1.11.1) - Fixed overlapping minimized window selection.
+/// - 2016/08/30 (1.11.0) - Removed some internal complexity and overly complex code
+/// handling panel padding and panel border.
+/// - 2016/08/29 (1.10.0) - Added additional height parameter to `nk_combobox_xxx`.
+/// - 2016/08/29 (1.10.0) - Fixed drawing bug in dynamic popups.
+/// - 2016/08/29 (1.10.0) - Added experimental mouse scrolling to popups, menus and comboboxes.
+/// - 2016/08/26 (1.10.0) - Added window name string prepresentation to account for
+/// hash collisions. Currently limited to `NK_WINDOW_MAX_NAME`
+/// which in term can be redefined if not big enough.
+/// - 2016/08/26 (1.10.0) - Added stacks for temporary style/UI changes in code.
+/// - 2016/08/25 (1.10.0) - Changed `nk_input_is_key_pressed` and 'nk_input_is_key_released'
+/// to account for key press and release happening in one frame.
+/// - 2016/08/25 (1.10.0) - Added additional nk_edit flag to directly jump to the end on activate.
+/// - 2016/08/17 (1.09.6) - Removed invalid check for value zero in `nk_propertyx`.
+/// - 2016/08/16 (1.09.5) - Fixed ROM mode for deeper levels of popup windows parents.
+/// - 2016/08/15 (1.09.4) - Editbox are now still active if enter was pressed with flag
+/// `NK_EDIT_SIG_ENTER`. Main reasoning is to be able to keep
+/// typing after commiting.
+/// - 2016/08/15 (1.09.4) - Removed redundant code.
+/// - 2016/08/15 (1.09.4) - Fixed negative numbers in `nk_strtoi` and remove unused variable.
+/// - 2016/08/15 (1.09.3) - Fixed `NK_WINDOW_BACKGROUND` flag behavior to select a background
+/// window only as selected by hovering and not by clicking.
+/// - 2016/08/14 (1.09.2) - Fixed a bug in font atlas which caused wrong loading
+/// of glyphes for font with multiple ranges.
+/// - 2016/08/12 (1.09.1) - Added additional function to check if window is currently
+/// hidden and therefore not visible.
+/// - 2016/08/12 (1.09.1) - nk_window_is_closed now queries the correct flag `NK_WINDOW_CLOSED`
+/// instead of the old flag `NK_WINDOW_HIDDEN`.
+/// - 2016/08/09 (1.09.0) - Added additional double version to nk_property and changed
+/// the underlying implementation to not cast to float and instead
+/// work directly on the given values.
+/// - 2016/08/09 (1.08.0) - Added additional define to overwrite library internal
+/// floating pointer number to string conversion for additional
+/// precision.
+/// - 2016/08/09 (1.08.0) - Added additional define to overwrite library internal
+/// string to floating point number conversion for additional
+/// precision.
+/// - 2016/08/08 (1.07.2) - Fixed compiling error without define `NK_INCLUDE_FIXED_TYPE`.
+/// - 2016/08/08 (1.07.1) - Fixed possible floating point error inside `nk_widget` leading
+/// to wrong wiget width calculation which results in widgets falsly
+/// becomming tagged as not inside window and cannot be accessed.
+/// - 2016/08/08 (1.07.0) - Nuklear now differentiates between hiding a window (NK_WINDOW_HIDDEN) and
+/// closing a window (NK_WINDOW_CLOSED). A window can be hidden/shown
+/// by using `nk_window_show` and closed by either clicking the close
+/// icon in a window or by calling `nk_window_close`. Only closed
+/// windows get removed at the end of the frame while hidden windows
+/// remain.
+/// - 2016/08/08 (1.06.0) - Added `nk_edit_string_zero_terminated` as a second option to
+/// `nk_edit_string` which takes, edits and outputs a '\0' terminated string.
+/// - 2016/08/08 (1.05.4) - Fixed scrollbar auto hiding behavior.
+/// - 2016/08/08 (1.05.3) - Fixed wrong panel padding selection in `nk_layout_widget_space`.
+/// - 2016/08/07 (1.05.2) - Fixed old bug in dynamic immediate mode layout API, calculating
+/// wrong item spacing and panel width.
+/// - 2016/08/07 (1.05.1) - Hopefully finally fixed combobox popup drawing bug.
+/// - 2016/08/07 (1.05.0) - Split varargs away from `NK_INCLUDE_STANDARD_IO` into own
+/// define `NK_INCLUDE_STANDARD_VARARGS` to allow more fine
+/// grained controlled over library includes.
+/// - 2016/08/06 (1.04.5) - Changed memset calls to `NK_MEMSET`.
+/// - 2016/08/04 (1.04.4) - Fixed fast window scaling behavior.
+/// - 2016/08/04 (1.04.3) - Fixed window scaling, movement bug which appears if you
+/// move/scale a window and another window is behind it.
+/// If you are fast enough then the window behind gets activated
+/// and the operation is blocked. I now require activating
+/// by hovering only if mouse is not pressed.
+/// - 2016/08/04 (1.04.2) - Fixed changing fonts.
+/// - 2016/08/03 (1.04.1) - Fixed `NK_WINDOW_BACKGROUND` behavior.
+/// - 2016/08/03 (1.04.0) - Added color parameter to `nk_draw_image`.
+/// - 2016/08/03 (1.04.0) - Added additional window padding style attributes for
+/// sub windows (combo, menu, ...).
+/// - 2016/08/03 (1.04.0) - Added functions to show/hide software cursor.
+/// - 2016/08/03 (1.04.0) - Added `NK_WINDOW_BACKGROUND` flag to force a window
+/// to be always in the background of the screen.
+/// - 2016/08/03 (1.03.2) - Removed invalid assert macro for NK_RGB color picker.
+/// - 2016/08/01 (1.03.1) - Added helper macros into header include guard.
+/// - 2016/07/29 (1.03.0) - Moved the window/table pool into the header part to
+/// simplify memory management by removing the need to
+/// allocate the pool.
+/// - 2016/07/29 (1.02.0) - Added auto scrollbar hiding window flag which if enabled
+/// will hide the window scrollbar after NK_SCROLLBAR_HIDING_TIMEOUT
+/// seconds without window interaction. To make it work
+/// you have to also set a delta time inside the `nk_context`.
+/// - 2016/07/25 (1.01.1) - Fixed small panel and panel border drawing bugs.
+/// - 2016/07/15 (1.01.0) - Added software cursor to `nk_style` and `nk_context`.
+/// - 2016/07/15 (1.01.0) - Added const correctness to `nk_buffer_push' data argument.
+/// - 2016/07/15 (1.01.0) - Removed internal font baking API and simplified
+/// font atlas memory management by converting pointer
+/// arrays for fonts and font configurations to lists.
+/// - 2016/07/15 (1.00.0) - Changed button API to use context dependend button
+/// behavior instead of passing it for every function call.
+/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+/// ## Gallery
+/// ![Figure [blue]: Feature overview with blue color styling](https://cloud.githubusercontent.com/assets/8057201/13538240/acd96876-e249-11e5-9547-5ac0b19667a0.png)
+/// ![Figure [red]: Feature overview with red color styling](https://cloud.githubusercontent.com/assets/8057201/13538243/b04acd4c-e249-11e5-8fd2-ad7744a5b446.png)
+/// ![Figure [widgets]: Widget overview](https://cloud.githubusercontent.com/assets/8057201/11282359/3325e3c6-8eff-11e5-86cb-cf02b0596087.png)
+/// ![Figure [blackwhite]: Black and white](https://cloud.githubusercontent.com/assets/8057201/11033668/59ab5d04-86e5-11e5-8091-c56f16411565.png)
+/// ![Figure [filexp]: File explorer](https://cloud.githubusercontent.com/assets/8057201/10718115/02a9ba08-7b6b-11e5-950f-adacdd637739.png)
+/// ![Figure [opengl]: OpenGL Editor](https://cloud.githubusercontent.com/assets/8057201/12779619/2a20d72c-ca69-11e5-95fe-4edecf820d5c.png)
+/// ![Figure [nodedit]: Node Editor](https://cloud.githubusercontent.com/assets/8057201/9976995/e81ac04a-5ef7-11e5-872b-acd54fbeee03.gif)
+/// ![Figure [skinning]: Using skinning in Nuklear](https://cloud.githubusercontent.com/assets/8057201/15991632/76494854-30b8-11e6-9555-a69840d0d50b.png)
+/// ![Figure [bf]: Heavy modified version](https://cloud.githubusercontent.com/assets/8057201/14902576/339926a8-0d9c-11e6-9fee-a8b73af04473.png)
+///
+/// ## Credits
+/// Developed by Micha Mettke and every direct or indirect github contributor. <br /><br />
+///
+/// Embeds [stb_texedit](https://github.com/nothings/stb/blob/master/stb_textedit.h), [stb_truetype](https://github.com/nothings/stb/blob/master/stb_truetype.h) and [stb_rectpack](https://github.com/nothings/stb/blob/master/stb_rect_pack.h) by Sean Barret (public domain) <br />
+/// Uses [stddoc.c](https://github.com/r-lyeh/stddoc.c) from r-lyeh@github.com for documentation generation <br /><br />
+/// Embeds ProggyClean.ttf font by Tristan Grimmer (MIT license). <br />
+///
+/// Big thank you to Omar Cornut (ocornut@github) for his [imgui library](https://github.com/ocornut/imgui) and
+/// giving me the inspiration for this library, Casey Muratori for handmade hero
+/// and his original immediate mode graphical user interface idea and Sean
+/// Barret for his amazing single header libraries which restored my faith
+/// in libraries and brought me to create some of my own. Finally Apoorva Joshi
+/// for his single header file packer.
+*/
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/nuklear_glfw_gl2.h b/chromium/third_party/dawn/third_party/glfw/deps/nuklear_glfw_gl2.h
new file mode 100644
index 00000000000..a959b14a5c2
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/nuklear_glfw_gl2.h
@@ -0,0 +1,381 @@
+/*
+ * Nuklear - v1.32.0 - public domain
+ * no warrenty implied; use at your own risk.
+ * authored from 2015-2017 by Micha Mettke
+ */
+/*
+ * ==============================================================
+ *
+ * API
+ *
+ * ===============================================================
+ */
+#ifndef NK_GLFW_GL2_H_
+#define NK_GLFW_GL2_H_
+
+#include <GLFW/glfw3.h>
+
+enum nk_glfw_init_state{
+ NK_GLFW3_DEFAULT = 0,
+ NK_GLFW3_INSTALL_CALLBACKS
+};
+NK_API struct nk_context* nk_glfw3_init(GLFWwindow *win, enum nk_glfw_init_state);
+NK_API void nk_glfw3_font_stash_begin(struct nk_font_atlas **atlas);
+NK_API void nk_glfw3_font_stash_end(void);
+
+NK_API void nk_glfw3_new_frame(void);
+NK_API void nk_glfw3_render(enum nk_anti_aliasing);
+NK_API void nk_glfw3_shutdown(void);
+
+NK_API void nk_glfw3_char_callback(GLFWwindow *win, unsigned int codepoint);
+NK_API void nk_gflw3_scroll_callback(GLFWwindow *win, double xoff, double yoff);
+
+#endif
+
+/*
+ * ==============================================================
+ *
+ * IMPLEMENTATION
+ *
+ * ===============================================================
+ */
+#ifdef NK_GLFW_GL2_IMPLEMENTATION
+
+#ifndef NK_GLFW_TEXT_MAX
+#define NK_GLFW_TEXT_MAX 256
+#endif
+#ifndef NK_GLFW_DOUBLE_CLICK_LO
+#define NK_GLFW_DOUBLE_CLICK_LO 0.02
+#endif
+#ifndef NK_GLFW_DOUBLE_CLICK_HI
+#define NK_GLFW_DOUBLE_CLICK_HI 0.2
+#endif
+
+struct nk_glfw_device {
+ struct nk_buffer cmds;
+ struct nk_draw_null_texture null;
+ GLuint font_tex;
+};
+
+struct nk_glfw_vertex {
+ float position[2];
+ float uv[2];
+ nk_byte col[4];
+};
+
+static struct nk_glfw {
+ GLFWwindow *win;
+ int width, height;
+ int display_width, display_height;
+ struct nk_glfw_device ogl;
+ struct nk_context ctx;
+ struct nk_font_atlas atlas;
+ struct nk_vec2 fb_scale;
+ unsigned int text[NK_GLFW_TEXT_MAX];
+ int text_len;
+ struct nk_vec2 scroll;
+ double last_button_click;
+ int is_double_click_down;
+ struct nk_vec2 double_click_pos;
+} glfw;
+
+NK_INTERN void
+nk_glfw3_device_upload_atlas(const void *image, int width, int height)
+{
+ struct nk_glfw_device *dev = &glfw.ogl;
+ glGenTextures(1, &dev->font_tex);
+ glBindTexture(GL_TEXTURE_2D, dev->font_tex);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei)width, (GLsizei)height, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, image);
+}
+
+NK_API void
+nk_glfw3_render(enum nk_anti_aliasing AA)
+{
+ /* setup global state */
+ struct nk_glfw_device *dev = &glfw.ogl;
+ glPushAttrib(GL_ENABLE_BIT|GL_COLOR_BUFFER_BIT|GL_TRANSFORM_BIT);
+ glDisable(GL_CULL_FACE);
+ glDisable(GL_DEPTH_TEST);
+ glEnable(GL_SCISSOR_TEST);
+ glEnable(GL_BLEND);
+ glEnable(GL_TEXTURE_2D);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ /* setup viewport/project */
+ glViewport(0,0,(GLsizei)glfw.display_width,(GLsizei)glfw.display_height);
+ glMatrixMode(GL_PROJECTION);
+ glPushMatrix();
+ glLoadIdentity();
+ glOrtho(0.0f, glfw.width, glfw.height, 0.0f, -1.0f, 1.0f);
+ glMatrixMode(GL_MODELVIEW);
+ glPushMatrix();
+ glLoadIdentity();
+
+ glEnableClientState(GL_VERTEX_ARRAY);
+ glEnableClientState(GL_TEXTURE_COORD_ARRAY);
+ glEnableClientState(GL_COLOR_ARRAY);
+ {
+ GLsizei vs = sizeof(struct nk_glfw_vertex);
+ size_t vp = offsetof(struct nk_glfw_vertex, position);
+ size_t vt = offsetof(struct nk_glfw_vertex, uv);
+ size_t vc = offsetof(struct nk_glfw_vertex, col);
+
+ /* convert from command queue into draw list and draw to screen */
+ const struct nk_draw_command *cmd;
+ const nk_draw_index *offset = NULL;
+ struct nk_buffer vbuf, ebuf;
+
+ /* fill convert configuration */
+ struct nk_convert_config config;
+ static const struct nk_draw_vertex_layout_element vertex_layout[] = {
+ {NK_VERTEX_POSITION, NK_FORMAT_FLOAT, NK_OFFSETOF(struct nk_glfw_vertex, position)},
+ {NK_VERTEX_TEXCOORD, NK_FORMAT_FLOAT, NK_OFFSETOF(struct nk_glfw_vertex, uv)},
+ {NK_VERTEX_COLOR, NK_FORMAT_R8G8B8A8, NK_OFFSETOF(struct nk_glfw_vertex, col)},
+ {NK_VERTEX_LAYOUT_END}
+ };
+ NK_MEMSET(&config, 0, sizeof(config));
+ config.vertex_layout = vertex_layout;
+ config.vertex_size = sizeof(struct nk_glfw_vertex);
+ config.vertex_alignment = NK_ALIGNOF(struct nk_glfw_vertex);
+ config.null = dev->null;
+ config.circle_segment_count = 22;
+ config.curve_segment_count = 22;
+ config.arc_segment_count = 22;
+ config.global_alpha = 1.0f;
+ config.shape_AA = AA;
+ config.line_AA = AA;
+
+ /* convert shapes into vertexes */
+ nk_buffer_init_default(&vbuf);
+ nk_buffer_init_default(&ebuf);
+ nk_convert(&glfw.ctx, &dev->cmds, &vbuf, &ebuf, &config);
+
+ /* setup vertex buffer pointer */
+ {const void *vertices = nk_buffer_memory_const(&vbuf);
+ glVertexPointer(2, GL_FLOAT, vs, (const void*)((const nk_byte*)vertices + vp));
+ glTexCoordPointer(2, GL_FLOAT, vs, (const void*)((const nk_byte*)vertices + vt));
+ glColorPointer(4, GL_UNSIGNED_BYTE, vs, (const void*)((const nk_byte*)vertices + vc));}
+
+ /* iterate over and execute each draw command */
+ offset = (const nk_draw_index*)nk_buffer_memory_const(&ebuf);
+ nk_draw_foreach(cmd, &glfw.ctx, &dev->cmds)
+ {
+ if (!cmd->elem_count) continue;
+ glBindTexture(GL_TEXTURE_2D, (GLuint)cmd->texture.id);
+ glScissor(
+ (GLint)(cmd->clip_rect.x * glfw.fb_scale.x),
+ (GLint)((glfw.height - (GLint)(cmd->clip_rect.y + cmd->clip_rect.h)) * glfw.fb_scale.y),
+ (GLint)(cmd->clip_rect.w * glfw.fb_scale.x),
+ (GLint)(cmd->clip_rect.h * glfw.fb_scale.y));
+ glDrawElements(GL_TRIANGLES, (GLsizei)cmd->elem_count, GL_UNSIGNED_SHORT, offset);
+ offset += cmd->elem_count;
+ }
+ nk_clear(&glfw.ctx);
+ nk_buffer_free(&vbuf);
+ nk_buffer_free(&ebuf);
+ }
+
+ /* default OpenGL state */
+ glDisableClientState(GL_VERTEX_ARRAY);
+ glDisableClientState(GL_TEXTURE_COORD_ARRAY);
+ glDisableClientState(GL_COLOR_ARRAY);
+
+ glDisable(GL_CULL_FACE);
+ glDisable(GL_DEPTH_TEST);
+ glDisable(GL_SCISSOR_TEST);
+ glDisable(GL_BLEND);
+ glDisable(GL_TEXTURE_2D);
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glMatrixMode(GL_MODELVIEW);
+ glPopMatrix();
+ glMatrixMode(GL_PROJECTION);
+ glPopMatrix();
+ glPopAttrib();
+}
+
+NK_API void
+nk_glfw3_char_callback(GLFWwindow *win, unsigned int codepoint)
+{
+ (void)win;
+ if (glfw.text_len < NK_GLFW_TEXT_MAX)
+ glfw.text[glfw.text_len++] = codepoint;
+}
+
+NK_API void
+nk_gflw3_scroll_callback(GLFWwindow *win, double xoff, double yoff)
+{
+ (void)win; (void)xoff;
+ glfw.scroll.x += (float)xoff;
+ glfw.scroll.y += (float)yoff;
+}
+
+NK_API void
+nk_glfw3_mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
+{
+ double x, y;
+ if (button != GLFW_MOUSE_BUTTON_LEFT) return;
+ glfwGetCursorPos(window, &x, &y);
+ if (action == GLFW_PRESS) {
+ double dt = glfwGetTime() - glfw.last_button_click;
+ if (dt > NK_GLFW_DOUBLE_CLICK_LO && dt < NK_GLFW_DOUBLE_CLICK_HI) {
+ glfw.is_double_click_down = nk_true;
+ glfw.double_click_pos = nk_vec2((float)x, (float)y);
+ }
+ glfw.last_button_click = glfwGetTime();
+ } else glfw.is_double_click_down = nk_false;
+}
+
+NK_INTERN void
+nk_glfw3_clipboard_paste(nk_handle usr, struct nk_text_edit *edit)
+{
+ const char *text = glfwGetClipboardString(glfw.win);
+ if (text) nk_textedit_paste(edit, text, nk_strlen(text));
+ (void)usr;
+}
+
+NK_INTERN void
+nk_glfw3_clipboard_copy(nk_handle usr, const char *text, int len)
+{
+ char *str = 0;
+ (void)usr;
+ if (!len) return;
+ str = (char*)malloc((size_t)len+1);
+ if (!str) return;
+ NK_MEMCPY(str, text, (size_t)len);
+ str[len] = '\0';
+ glfwSetClipboardString(glfw.win, str);
+ free(str);
+}
+
+NK_API struct nk_context*
+nk_glfw3_init(GLFWwindow *win, enum nk_glfw_init_state init_state)
+{
+ glfw.win = win;
+ if (init_state == NK_GLFW3_INSTALL_CALLBACKS) {
+ glfwSetScrollCallback(win, nk_gflw3_scroll_callback);
+ glfwSetCharCallback(win, nk_glfw3_char_callback);
+ glfwSetMouseButtonCallback(win, nk_glfw3_mouse_button_callback);
+ }
+ nk_init_default(&glfw.ctx, 0);
+ glfw.ctx.clip.copy = nk_glfw3_clipboard_copy;
+ glfw.ctx.clip.paste = nk_glfw3_clipboard_paste;
+ glfw.ctx.clip.userdata = nk_handle_ptr(0);
+ nk_buffer_init_default(&glfw.ogl.cmds);
+
+ glfw.is_double_click_down = nk_false;
+ glfw.double_click_pos = nk_vec2(0, 0);
+
+ return &glfw.ctx;
+}
+
+NK_API void
+nk_glfw3_font_stash_begin(struct nk_font_atlas **atlas)
+{
+ nk_font_atlas_init_default(&glfw.atlas);
+ nk_font_atlas_begin(&glfw.atlas);
+ *atlas = &glfw.atlas;
+}
+
+NK_API void
+nk_glfw3_font_stash_end(void)
+{
+ const void *image; int w, h;
+ image = nk_font_atlas_bake(&glfw.atlas, &w, &h, NK_FONT_ATLAS_RGBA32);
+ nk_glfw3_device_upload_atlas(image, w, h);
+ nk_font_atlas_end(&glfw.atlas, nk_handle_id((int)glfw.ogl.font_tex), &glfw.ogl.null);
+ if (glfw.atlas.default_font)
+ nk_style_set_font(&glfw.ctx, &glfw.atlas.default_font->handle);
+}
+
+NK_API void
+nk_glfw3_new_frame(void)
+{
+ int i;
+ double x, y;
+ struct nk_context *ctx = &glfw.ctx;
+ struct GLFWwindow *win = glfw.win;
+
+ glfwGetWindowSize(win, &glfw.width, &glfw.height);
+ glfwGetFramebufferSize(win, &glfw.display_width, &glfw.display_height);
+ glfw.fb_scale.x = (float)glfw.display_width/(float)glfw.width;
+ glfw.fb_scale.y = (float)glfw.display_height/(float)glfw.height;
+
+ nk_input_begin(ctx);
+ for (i = 0; i < glfw.text_len; ++i)
+ nk_input_unicode(ctx, glfw.text[i]);
+
+ /* optional grabbing behavior */
+ if (ctx->input.mouse.grab)
+ glfwSetInputMode(glfw.win, GLFW_CURSOR, GLFW_CURSOR_HIDDEN);
+ else if (ctx->input.mouse.ungrab)
+ glfwSetInputMode(glfw.win, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
+
+ nk_input_key(ctx, NK_KEY_DEL, glfwGetKey(win, GLFW_KEY_DELETE) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_ENTER, glfwGetKey(win, GLFW_KEY_ENTER) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TAB, glfwGetKey(win, GLFW_KEY_TAB) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_BACKSPACE, glfwGetKey(win, GLFW_KEY_BACKSPACE) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_UP, glfwGetKey(win, GLFW_KEY_UP) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_DOWN, glfwGetKey(win, GLFW_KEY_DOWN) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_START, glfwGetKey(win, GLFW_KEY_HOME) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_END, glfwGetKey(win, GLFW_KEY_END) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_SCROLL_START, glfwGetKey(win, GLFW_KEY_HOME) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_SCROLL_END, glfwGetKey(win, GLFW_KEY_END) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_SCROLL_DOWN, glfwGetKey(win, GLFW_KEY_PAGE_DOWN) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_SCROLL_UP, glfwGetKey(win, GLFW_KEY_PAGE_UP) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_SHIFT, glfwGetKey(win, GLFW_KEY_LEFT_SHIFT) == GLFW_PRESS||
+ glfwGetKey(win, GLFW_KEY_RIGHT_SHIFT) == GLFW_PRESS);
+
+ if (glfwGetKey(win, GLFW_KEY_LEFT_CONTROL) == GLFW_PRESS ||
+ glfwGetKey(win, GLFW_KEY_RIGHT_CONTROL) == GLFW_PRESS) {
+ nk_input_key(ctx, NK_KEY_COPY, glfwGetKey(win, GLFW_KEY_C) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_PASTE, glfwGetKey(win, GLFW_KEY_V) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_CUT, glfwGetKey(win, GLFW_KEY_X) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_UNDO, glfwGetKey(win, GLFW_KEY_Z) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_REDO, glfwGetKey(win, GLFW_KEY_R) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_WORD_LEFT, glfwGetKey(win, GLFW_KEY_LEFT) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_WORD_RIGHT, glfwGetKey(win, GLFW_KEY_RIGHT) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_LINE_START, glfwGetKey(win, GLFW_KEY_B) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_TEXT_LINE_END, glfwGetKey(win, GLFW_KEY_E) == GLFW_PRESS);
+ } else {
+ nk_input_key(ctx, NK_KEY_LEFT, glfwGetKey(win, GLFW_KEY_LEFT) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_RIGHT, glfwGetKey(win, GLFW_KEY_RIGHT) == GLFW_PRESS);
+ nk_input_key(ctx, NK_KEY_COPY, 0);
+ nk_input_key(ctx, NK_KEY_PASTE, 0);
+ nk_input_key(ctx, NK_KEY_CUT, 0);
+ nk_input_key(ctx, NK_KEY_SHIFT, 0);
+ }
+
+ glfwGetCursorPos(win, &x, &y);
+ nk_input_motion(ctx, (int)x, (int)y);
+ if (ctx->input.mouse.grabbed) {
+ glfwSetCursorPos(glfw.win, (double)ctx->input.mouse.prev.x, (double)ctx->input.mouse.prev.y);
+ ctx->input.mouse.pos.x = ctx->input.mouse.prev.x;
+ ctx->input.mouse.pos.y = ctx->input.mouse.prev.y;
+ }
+
+ nk_input_button(ctx, NK_BUTTON_LEFT, (int)x, (int)y, glfwGetMouseButton(win, GLFW_MOUSE_BUTTON_LEFT) == GLFW_PRESS);
+ nk_input_button(ctx, NK_BUTTON_MIDDLE, (int)x, (int)y, glfwGetMouseButton(win, GLFW_MOUSE_BUTTON_MIDDLE) == GLFW_PRESS);
+ nk_input_button(ctx, NK_BUTTON_RIGHT, (int)x, (int)y, glfwGetMouseButton(win, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS);
+ nk_input_button(ctx, NK_BUTTON_DOUBLE, (int)glfw.double_click_pos.x, (int)glfw.double_click_pos.y, glfw.is_double_click_down);
+ nk_input_scroll(ctx, glfw.scroll);
+ nk_input_end(&glfw.ctx);
+ glfw.text_len = 0;
+ glfw.scroll = nk_vec2(0,0);
+}
+
+NK_API
+void nk_glfw3_shutdown(void)
+{
+ struct nk_glfw_device *dev = &glfw.ogl;
+ nk_font_atlas_clear(&glfw.atlas);
+ nk_free(&glfw.ctx);
+ glDeleteTextures(1, &dev->font_tex);
+ nk_buffer_free(&dev->cmds);
+ NK_MEMSET(&glfw, 0, sizeof(glfw));
+}
+
+#endif
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/stb_image_write.h b/chromium/third_party/dawn/third_party/glfw/deps/stb_image_write.h
new file mode 100644
index 00000000000..e4b32ed1bc3
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/stb_image_write.h
@@ -0,0 +1,1724 @@
+/* stb_image_write - v1.16 - public domain - http://nothings.org/stb
+ writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015
+ no warranty implied; use at your own risk
+
+ Before #including,
+
+ #define STB_IMAGE_WRITE_IMPLEMENTATION
+
+ in the file that you want to have the implementation.
+
+ Will probably not work correctly with strict-aliasing optimizations.
+
+ABOUT:
+
+ This header file is a library for writing images to C stdio or a callback.
+
+ The PNG output is not optimal; it is 20-50% larger than the file
+ written by a decent optimizing implementation; though providing a custom
+ zlib compress function (see STBIW_ZLIB_COMPRESS) can mitigate that.
+ This library is designed for source code compactness and simplicity,
+ not optimal image file size or run-time performance.
+
+BUILDING:
+
+ You can #define STBIW_ASSERT(x) before the #include to avoid using assert.h.
+ You can #define STBIW_MALLOC(), STBIW_REALLOC(), and STBIW_FREE() to replace
+ malloc,realloc,free.
+ You can #define STBIW_MEMMOVE() to replace memmove()
+ You can #define STBIW_ZLIB_COMPRESS to use a custom zlib-style compress function
+ for PNG compression (instead of the builtin one), it must have the following signature:
+ unsigned char * my_compress(unsigned char *data, int data_len, int *out_len, int quality);
+ The returned data will be freed with STBIW_FREE() (free() by default),
+ so it must be heap allocated with STBIW_MALLOC() (malloc() by default),
+
+UNICODE:
+
+ If compiling for Windows and you wish to use Unicode filenames, compile
+ with
+ #define STBIW_WINDOWS_UTF8
+ and pass utf8-encoded filenames. Call stbiw_convert_wchar_to_utf8 to convert
+ Windows wchar_t filenames to utf8.
+
+USAGE:
+
+ There are five functions, one for each image file format:
+
+ int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes);
+ int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data);
+ int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data);
+ int stbi_write_jpg(char const *filename, int w, int h, int comp, const void *data, int quality);
+ int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data);
+
+ void stbi_flip_vertically_on_write(int flag); // flag is non-zero to flip data vertically
+
+ There are also five equivalent functions that use an arbitrary write function. You are
+ expected to open/close your file-equivalent before and after calling these:
+
+ int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes);
+ int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+ int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+ int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data);
+ int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality);
+
+ where the callback is:
+ void stbi_write_func(void *context, void *data, int size);
+
+ You can configure it with these global variables:
+ int stbi_write_tga_with_rle; // defaults to true; set to 0 to disable RLE
+ int stbi_write_png_compression_level; // defaults to 8; set to higher for more compression
+ int stbi_write_force_png_filter; // defaults to -1; set to 0..5 to force a filter mode
+
+
+ You can define STBI_WRITE_NO_STDIO to disable the file variant of these
+ functions, so the library will not use stdio.h at all. However, this will
+ also disable HDR writing, because it requires stdio for formatted output.
+
+ Each function returns 0 on failure and non-0 on success.
+
+ The functions create an image file defined by the parameters. The image
+ is a rectangle of pixels stored from left-to-right, top-to-bottom.
+ Each pixel contains 'comp' channels of data stored interleaved with 8-bits
+ per channel, in the following order: 1=Y, 2=YA, 3=RGB, 4=RGBA. (Y is
+ monochrome color.) The rectangle is 'w' pixels wide and 'h' pixels tall.
+ The *data pointer points to the first byte of the top-left-most pixel.
+ For PNG, "stride_in_bytes" is the distance in bytes from the first byte of
+ a row of pixels to the first byte of the next row of pixels.
+
+ PNG creates output files with the same number of components as the input.
+ The BMP format expands Y to RGB in the file format and does not
+ output alpha.
+
+ PNG supports writing rectangles of data even when the bytes storing rows of
+ data are not consecutive in memory (e.g. sub-rectangles of a larger image),
+ by supplying the stride between the beginning of adjacent rows. The other
+ formats do not. (Thus you cannot write a native-format BMP through the BMP
+ writer, both because it is in BGR order and because it may have padding
+ at the end of the line.)
+
+ PNG allows you to set the deflate compression level by setting the global
+ variable 'stbi_write_png_compression_level' (it defaults to 8).
+
+ HDR expects linear float data. Since the format is always 32-bit rgb(e)
+ data, alpha (if provided) is discarded, and for monochrome data it is
+ replicated across all three channels.
+
+ TGA supports RLE or non-RLE compressed data. To use non-RLE-compressed
+ data, set the global variable 'stbi_write_tga_with_rle' to 0.
+
+ JPEG does ignore alpha channels in input data; quality is between 1 and 100.
+ Higher quality looks better but results in a bigger image.
+ JPEG baseline (no JPEG progressive).
+
+CREDITS:
+
+
+ Sean Barrett - PNG/BMP/TGA
+ Baldur Karlsson - HDR
+ Jean-Sebastien Guay - TGA monochrome
+ Tim Kelsey - misc enhancements
+ Alan Hickman - TGA RLE
+ Emmanuel Julien - initial file IO callback implementation
+ Jon Olick - original jo_jpeg.cpp code
+ Daniel Gibson - integrate JPEG, allow external zlib
+ Aarni Koskela - allow choosing PNG filter
+
+ bugfixes:
+ github:Chribba
+ Guillaume Chereau
+ github:jry2
+ github:romigrou
+ Sergio Gonzalez
+ Jonas Karlsson
+ Filip Wasil
+ Thatcher Ulrich
+ github:poppolopoppo
+ Patrick Boettcher
+ github:xeekworx
+ Cap Petschulat
+ Simon Rodriguez
+ Ivan Tikhonov
+ github:ignotion
+ Adam Schackart
+ Andrew Kensler
+
+LICENSE
+
+ See end of file for license information.
+
+*/
+
+#ifndef INCLUDE_STB_IMAGE_WRITE_H
+#define INCLUDE_STB_IMAGE_WRITE_H
+
+#include <stdlib.h>
+
+// if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline'
+#ifndef STBIWDEF
+#ifdef STB_IMAGE_WRITE_STATIC
+#define STBIWDEF static
+#else
+#ifdef __cplusplus
+#define STBIWDEF extern "C"
+#else
+#define STBIWDEF extern
+#endif
+#endif
+#endif
+
+#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations
+STBIWDEF int stbi_write_tga_with_rle;
+STBIWDEF int stbi_write_png_compression_level;
+STBIWDEF int stbi_write_force_png_filter;
+#endif
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes);
+STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data);
+STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality);
+
+#ifdef STBIW_WINDOWS_UTF8
+STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input);
+#endif
+#endif
+
+typedef void stbi_write_func(void *context, void *data, int size);
+
+STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes);
+STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data);
+STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality);
+
+STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean);
+
+#endif//INCLUDE_STB_IMAGE_WRITE_H
+
+#ifdef STB_IMAGE_WRITE_IMPLEMENTATION
+
+#ifdef _WIN32
+ #ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+ #endif
+ #ifndef _CRT_NONSTDC_NO_DEPRECATE
+ #define _CRT_NONSTDC_NO_DEPRECATE
+ #endif
+#endif
+
+#ifndef STBI_WRITE_NO_STDIO
+#include <stdio.h>
+#endif // STBI_WRITE_NO_STDIO
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED))
+// ok
+#elif !defined(STBIW_MALLOC) && !defined(STBIW_FREE) && !defined(STBIW_REALLOC) && !defined(STBIW_REALLOC_SIZED)
+// ok
+#else
+#error "Must define all or none of STBIW_MALLOC, STBIW_FREE, and STBIW_REALLOC (or STBIW_REALLOC_SIZED)."
+#endif
+
+#ifndef STBIW_MALLOC
+#define STBIW_MALLOC(sz) malloc(sz)
+#define STBIW_REALLOC(p,newsz) realloc(p,newsz)
+#define STBIW_FREE(p) free(p)
+#endif
+
+#ifndef STBIW_REALLOC_SIZED
+#define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz)
+#endif
+
+
+#ifndef STBIW_MEMMOVE
+#define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz)
+#endif
+
+
+#ifndef STBIW_ASSERT
+#include <assert.h>
+#define STBIW_ASSERT(x) assert(x)
+#endif
+
+#define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff)
+
+#ifdef STB_IMAGE_WRITE_STATIC
+static int stbi_write_png_compression_level = 8;
+static int stbi_write_tga_with_rle = 1;
+static int stbi_write_force_png_filter = -1;
+#else
+int stbi_write_png_compression_level = 8;
+int stbi_write_tga_with_rle = 1;
+int stbi_write_force_png_filter = -1;
+#endif
+
+static int stbi__flip_vertically_on_write = 0;
+
+STBIWDEF void stbi_flip_vertically_on_write(int flag)
+{
+ stbi__flip_vertically_on_write = flag;
+}
+
+typedef struct
+{
+ stbi_write_func *func;
+ void *context;
+ unsigned char buffer[64];
+ int buf_used;
+} stbi__write_context;
+
+// initialize a callback-based context
+static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context)
+{
+ s->func = c;
+ s->context = context;
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+
+static void stbi__stdio_write(void *context, void *data, int size)
+{
+ fwrite(data,1,size,(FILE*) context);
+}
+
+#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8)
+#ifdef __cplusplus
+#define STBIW_EXTERN extern "C"
+#else
+#define STBIW_EXTERN extern
+#endif
+STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide);
+STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default);
+
+STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input)
+{
+ return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL);
+}
+#endif
+
+static FILE *stbiw__fopen(char const *filename, char const *mode)
+{
+ FILE *f;
+#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8)
+ wchar_t wMode[64];
+ wchar_t wFilename[1024];
+ if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename)))
+ return 0;
+
+ if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode)))
+ return 0;
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ if (0 != _wfopen_s(&f, wFilename, wMode))
+ f = 0;
+#else
+ f = _wfopen(wFilename, wMode);
+#endif
+
+#elif defined(_MSC_VER) && _MSC_VER >= 1400
+ if (0 != fopen_s(&f, filename, mode))
+ f=0;
+#else
+ f = fopen(filename, mode);
+#endif
+ return f;
+}
+
+static int stbi__start_write_file(stbi__write_context *s, const char *filename)
+{
+ FILE *f = stbiw__fopen(filename, "wb");
+ stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f);
+ return f != NULL;
+}
+
+static void stbi__end_write_file(stbi__write_context *s)
+{
+ fclose((FILE *)s->context);
+}
+
+#endif // !STBI_WRITE_NO_STDIO
+
+typedef unsigned int stbiw_uint32;
+typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1];
+
+static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v)
+{
+ while (*fmt) {
+ switch (*fmt++) {
+ case ' ': break;
+ case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int));
+ s->func(s->context,&x,1);
+ break; }
+ case '2': { int x = va_arg(v,int);
+ unsigned char b[2];
+ b[0] = STBIW_UCHAR(x);
+ b[1] = STBIW_UCHAR(x>>8);
+ s->func(s->context,b,2);
+ break; }
+ case '4': { stbiw_uint32 x = va_arg(v,int);
+ unsigned char b[4];
+ b[0]=STBIW_UCHAR(x);
+ b[1]=STBIW_UCHAR(x>>8);
+ b[2]=STBIW_UCHAR(x>>16);
+ b[3]=STBIW_UCHAR(x>>24);
+ s->func(s->context,b,4);
+ break; }
+ default:
+ STBIW_ASSERT(0);
+ return;
+ }
+ }
+}
+
+static void stbiw__writef(stbi__write_context *s, const char *fmt, ...)
+{
+ va_list v;
+ va_start(v, fmt);
+ stbiw__writefv(s, fmt, v);
+ va_end(v);
+}
+
+static void stbiw__write_flush(stbi__write_context *s)
+{
+ if (s->buf_used) {
+ s->func(s->context, &s->buffer, s->buf_used);
+ s->buf_used = 0;
+ }
+}
+
+static void stbiw__putc(stbi__write_context *s, unsigned char c)
+{
+ s->func(s->context, &c, 1);
+}
+
+static void stbiw__write1(stbi__write_context *s, unsigned char a)
+{
+ if ((size_t)s->buf_used + 1 > sizeof(s->buffer))
+ stbiw__write_flush(s);
+ s->buffer[s->buf_used++] = a;
+}
+
+static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c)
+{
+ int n;
+ if ((size_t)s->buf_used + 3 > sizeof(s->buffer))
+ stbiw__write_flush(s);
+ n = s->buf_used;
+ s->buf_used = n+3;
+ s->buffer[n+0] = a;
+ s->buffer[n+1] = b;
+ s->buffer[n+2] = c;
+}
+
+static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d)
+{
+ unsigned char bg[3] = { 255, 0, 255}, px[3];
+ int k;
+
+ if (write_alpha < 0)
+ stbiw__write1(s, d[comp - 1]);
+
+ switch (comp) {
+ case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case
+ case 1:
+ if (expand_mono)
+ stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp
+ else
+ stbiw__write1(s, d[0]); // monochrome TGA
+ break;
+ case 4:
+ if (!write_alpha) {
+ // composite against pink background
+ for (k = 0; k < 3; ++k)
+ px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255;
+ stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]);
+ break;
+ }
+ /* FALLTHROUGH */
+ case 3:
+ stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]);
+ break;
+ }
+ if (write_alpha > 0)
+ stbiw__write1(s, d[comp - 1]);
+}
+
+static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono)
+{
+ stbiw_uint32 zero = 0;
+ int i,j, j_end;
+
+ if (y <= 0)
+ return;
+
+ if (stbi__flip_vertically_on_write)
+ vdir *= -1;
+
+ if (vdir < 0) {
+ j_end = -1; j = y-1;
+ } else {
+ j_end = y; j = 0;
+ }
+
+ for (; j != j_end; j += vdir) {
+ for (i=0; i < x; ++i) {
+ unsigned char *d = (unsigned char *) data + (j*x+i)*comp;
+ stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d);
+ }
+ stbiw__write_flush(s);
+ s->func(s->context, &zero, scanline_pad);
+ }
+}
+
+static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...)
+{
+ if (y < 0 || x < 0) {
+ return 0;
+ } else {
+ va_list v;
+ va_start(v, fmt);
+ stbiw__writefv(s, fmt, v);
+ va_end(v);
+ stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono);
+ return 1;
+ }
+}
+
+static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data)
+{
+ if (comp != 4) {
+ // write RGB bitmap
+ int pad = (-x*3) & 3;
+ return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad,
+ "11 4 22 4" "4 44 22 444444",
+ 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header
+ 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header
+ } else {
+ // RGBA bitmaps need a v4 header
+ // use BI_BITFIELDS mode with 32bpp and alpha mask
+ // (straight BI_RGB with alpha mask doesn't work in most readers)
+ return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *)data,1,0,
+ "11 4 22 4" "4 44 22 444444 4444 4 444 444 444 444",
+ 'B', 'M', 14+108+x*y*4, 0, 0, 14+108, // file header
+ 108, x,y, 1,32, 3,0,0,0,0,0, 0xff0000,0xff00,0xff,0xff000000u, 0, 0,0,0, 0,0,0, 0,0,0, 0,0,0); // bitmap V4 header
+ }
+}
+
+STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s = { 0 };
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_bmp_core(&s, x, y, comp, data);
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s = { 0 };
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_bmp_core(&s, x, y, comp, data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif //!STBI_WRITE_NO_STDIO
+
+static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data)
+{
+ int has_alpha = (comp == 2 || comp == 4);
+ int colorbytes = has_alpha ? comp-1 : comp;
+ int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3
+
+ if (y < 0 || x < 0)
+ return 0;
+
+ if (!stbi_write_tga_with_rle) {
+ return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0,
+ "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8);
+ } else {
+ int i,j,k;
+ int jend, jdir;
+
+ stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8);
+
+ if (stbi__flip_vertically_on_write) {
+ j = 0;
+ jend = y;
+ jdir = 1;
+ } else {
+ j = y-1;
+ jend = -1;
+ jdir = -1;
+ }
+ for (; j != jend; j += jdir) {
+ unsigned char *row = (unsigned char *) data + j * x * comp;
+ int len;
+
+ for (i = 0; i < x; i += len) {
+ unsigned char *begin = row + i * comp;
+ int diff = 1;
+ len = 1;
+
+ if (i < x - 1) {
+ ++len;
+ diff = memcmp(begin, row + (i + 1) * comp, comp);
+ if (diff) {
+ const unsigned char *prev = begin;
+ for (k = i + 2; k < x && len < 128; ++k) {
+ if (memcmp(prev, row + k * comp, comp)) {
+ prev += comp;
+ ++len;
+ } else {
+ --len;
+ break;
+ }
+ }
+ } else {
+ for (k = i + 2; k < x && len < 128; ++k) {
+ if (!memcmp(begin, row + k * comp, comp)) {
+ ++len;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ if (diff) {
+ unsigned char header = STBIW_UCHAR(len - 1);
+ stbiw__write1(s, header);
+ for (k = 0; k < len; ++k) {
+ stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp);
+ }
+ } else {
+ unsigned char header = STBIW_UCHAR(len - 129);
+ stbiw__write1(s, header);
+ stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin);
+ }
+ }
+ }
+ stbiw__write_flush(s);
+ }
+ return 1;
+}
+
+STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s = { 0 };
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_tga_core(&s, x, y, comp, (void *) data);
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s = { 0 };
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_tga_core(&s, x, y, comp, (void *) data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif
+
+// *************************************************************************************************
+// Radiance RGBE HDR writer
+// by Baldur Karlsson
+
+#define stbiw__max(a, b) ((a) > (b) ? (a) : (b))
+
+#ifndef STBI_WRITE_NO_STDIO
+
+static void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear)
+{
+ int exponent;
+ float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2]));
+
+ if (maxcomp < 1e-32f) {
+ rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0;
+ } else {
+ float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp;
+
+ rgbe[0] = (unsigned char)(linear[0] * normalize);
+ rgbe[1] = (unsigned char)(linear[1] * normalize);
+ rgbe[2] = (unsigned char)(linear[2] * normalize);
+ rgbe[3] = (unsigned char)(exponent + 128);
+ }
+}
+
+static void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte)
+{
+ unsigned char lengthbyte = STBIW_UCHAR(length+128);
+ STBIW_ASSERT(length+128 <= 255);
+ s->func(s->context, &lengthbyte, 1);
+ s->func(s->context, &databyte, 1);
+}
+
+static void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data)
+{
+ unsigned char lengthbyte = STBIW_UCHAR(length);
+ STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code
+ s->func(s->context, &lengthbyte, 1);
+ s->func(s->context, data, length);
+}
+
+static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline)
+{
+ unsigned char scanlineheader[4] = { 2, 2, 0, 0 };
+ unsigned char rgbe[4];
+ float linear[3];
+ int x;
+
+ scanlineheader[2] = (width&0xff00)>>8;
+ scanlineheader[3] = (width&0x00ff);
+
+ /* skip RLE for images too small or large */
+ if (width < 8 || width >= 32768) {
+ for (x=0; x < width; x++) {
+ switch (ncomp) {
+ case 4: /* fallthrough */
+ case 3: linear[2] = scanline[x*ncomp + 2];
+ linear[1] = scanline[x*ncomp + 1];
+ linear[0] = scanline[x*ncomp + 0];
+ break;
+ default:
+ linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0];
+ break;
+ }
+ stbiw__linear_to_rgbe(rgbe, linear);
+ s->func(s->context, rgbe, 4);
+ }
+ } else {
+ int c,r;
+ /* encode into scratch buffer */
+ for (x=0; x < width; x++) {
+ switch(ncomp) {
+ case 4: /* fallthrough */
+ case 3: linear[2] = scanline[x*ncomp + 2];
+ linear[1] = scanline[x*ncomp + 1];
+ linear[0] = scanline[x*ncomp + 0];
+ break;
+ default:
+ linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0];
+ break;
+ }
+ stbiw__linear_to_rgbe(rgbe, linear);
+ scratch[x + width*0] = rgbe[0];
+ scratch[x + width*1] = rgbe[1];
+ scratch[x + width*2] = rgbe[2];
+ scratch[x + width*3] = rgbe[3];
+ }
+
+ s->func(s->context, scanlineheader, 4);
+
+ /* RLE each component separately */
+ for (c=0; c < 4; c++) {
+ unsigned char *comp = &scratch[width*c];
+
+ x = 0;
+ while (x < width) {
+ // find first run
+ r = x;
+ while (r+2 < width) {
+ if (comp[r] == comp[r+1] && comp[r] == comp[r+2])
+ break;
+ ++r;
+ }
+ if (r+2 >= width)
+ r = width;
+ // dump up to first run
+ while (x < r) {
+ int len = r-x;
+ if (len > 128) len = 128;
+ stbiw__write_dump_data(s, len, &comp[x]);
+ x += len;
+ }
+ // if there's a run, output it
+ if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd
+ // find next byte after run
+ while (r < width && comp[r] == comp[x])
+ ++r;
+ // output run up to r
+ while (x < r) {
+ int len = r-x;
+ if (len > 127) len = 127;
+ stbiw__write_run_data(s, len, comp[x]);
+ x += len;
+ }
+ }
+ }
+ }
+ }
+}
+
+static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data)
+{
+ if (y <= 0 || x <= 0 || data == NULL)
+ return 0;
+ else {
+ // Each component is stored separately. Allocate scratch space for full output scanline.
+ unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4);
+ int i, len;
+ char buffer[128];
+ char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n";
+ s->func(s->context, header, sizeof(header)-1);
+
+#ifdef __STDC_LIB_EXT1__
+ len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x);
+#else
+ len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x);
+#endif
+ s->func(s->context, buffer, len);
+
+ for(i=0; i < y; i++)
+ stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i));
+ STBIW_FREE(scratch);
+ return 1;
+ }
+}
+
+STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data)
+{
+ stbi__write_context s = { 0 };
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_hdr_core(&s, x, y, comp, (float *) data);
+}
+
+STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data)
+{
+ stbi__write_context s = { 0 };
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif // STBI_WRITE_NO_STDIO
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// PNG writer
+//
+
+#ifndef STBIW_ZLIB_COMPRESS
+// stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size()
+#define stbiw__sbraw(a) ((int *) (void *) (a) - 2)
+#define stbiw__sbm(a) stbiw__sbraw(a)[0]
+#define stbiw__sbn(a) stbiw__sbraw(a)[1]
+
+#define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a))
+#define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0)
+#define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a)))
+
+#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v))
+#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0)
+#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0)
+
+static void *stbiw__sbgrowf(void **arr, int increment, int itemsize)
+{
+ int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1;
+ void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2);
+ STBIW_ASSERT(p);
+ if (p) {
+ if (!*arr) ((int *) p)[1] = 0;
+ *arr = (void *) ((int *) p + 2);
+ stbiw__sbm(*arr) = m;
+ }
+ return *arr;
+}
+
+static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount)
+{
+ while (*bitcount >= 8) {
+ stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer));
+ *bitbuffer >>= 8;
+ *bitcount -= 8;
+ }
+ return data;
+}
+
+static int stbiw__zlib_bitrev(int code, int codebits)
+{
+ int res=0;
+ while (codebits--) {
+ res = (res << 1) | (code & 1);
+ code >>= 1;
+ }
+ return res;
+}
+
+static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit)
+{
+ int i;
+ for (i=0; i < limit && i < 258; ++i)
+ if (a[i] != b[i]) break;
+ return i;
+}
+
+static unsigned int stbiw__zhash(unsigned char *data)
+{
+ stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16);
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+ return hash;
+}
+
+#define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount))
+#define stbiw__zlib_add(code,codebits) \
+ (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush())
+#define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c)
+// default huffman tables
+#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8)
+#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9)
+#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7)
+#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8)
+#define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n))
+#define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n))
+
+#define stbiw__ZHASH 16384
+
+#endif // STBIW_ZLIB_COMPRESS
+
+STBIWDEF unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality)
+{
+#ifdef STBIW_ZLIB_COMPRESS
+ // user provided a zlib compress implementation, use that
+ return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality);
+#else // use builtin
+ static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 };
+ static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 };
+ static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 };
+ static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 };
+ unsigned int bitbuf=0;
+ int i,j, bitcount=0;
+ unsigned char *out = NULL;
+ unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**));
+ if (hash_table == NULL)
+ return NULL;
+ if (quality < 5) quality = 5;
+
+ stbiw__sbpush(out, 0x78); // DEFLATE 32K window
+ stbiw__sbpush(out, 0x5e); // FLEVEL = 1
+ stbiw__zlib_add(1,1); // BFINAL = 1
+ stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman
+
+ for (i=0; i < stbiw__ZHASH; ++i)
+ hash_table[i] = NULL;
+
+ i=0;
+ while (i < data_len-3) {
+ // hash next 3 bytes of data to be compressed
+ int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3;
+ unsigned char *bestloc = 0;
+ unsigned char **hlist = hash_table[h];
+ int n = stbiw__sbcount(hlist);
+ for (j=0; j < n; ++j) {
+ if (hlist[j]-data > i-32768) { // if entry lies within window
+ int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i);
+ if (d >= best) { best=d; bestloc=hlist[j]; }
+ }
+ }
+ // when hash table entry is too long, delete half the entries
+ if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) {
+ STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality);
+ stbiw__sbn(hash_table[h]) = quality;
+ }
+ stbiw__sbpush(hash_table[h],data+i);
+
+ if (bestloc) {
+ // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal
+ h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1);
+ hlist = hash_table[h];
+ n = stbiw__sbcount(hlist);
+ for (j=0; j < n; ++j) {
+ if (hlist[j]-data > i-32767) {
+ int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1);
+ if (e > best) { // if next match is better, bail on current match
+ bestloc = NULL;
+ break;
+ }
+ }
+ }
+ }
+
+ if (bestloc) {
+ int d = (int) (data+i - bestloc); // distance back
+ STBIW_ASSERT(d <= 32767 && best <= 258);
+ for (j=0; best > lengthc[j+1]-1; ++j);
+ stbiw__zlib_huff(j+257);
+ if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]);
+ for (j=0; d > distc[j+1]-1; ++j);
+ stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5);
+ if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]);
+ i += best;
+ } else {
+ stbiw__zlib_huffb(data[i]);
+ ++i;
+ }
+ }
+ // write out final bytes
+ for (;i < data_len; ++i)
+ stbiw__zlib_huffb(data[i]);
+ stbiw__zlib_huff(256); // end of block
+ // pad with 0 bits to byte boundary
+ while (bitcount)
+ stbiw__zlib_add(0,1);
+
+ for (i=0; i < stbiw__ZHASH; ++i)
+ (void) stbiw__sbfree(hash_table[i]);
+ STBIW_FREE(hash_table);
+
+ // store uncompressed instead if compression was worse
+ if (stbiw__sbn(out) > data_len + 2 + ((data_len+32766)/32767)*5) {
+ stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1
+ for (j = 0; j < data_len;) {
+ int blocklen = data_len - j;
+ if (blocklen > 32767) blocklen = 32767;
+ stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression
+ stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN
+ stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8));
+ stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN
+ stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8));
+ memcpy(out+stbiw__sbn(out), data+j, blocklen);
+ stbiw__sbn(out) += blocklen;
+ j += blocklen;
+ }
+ }
+
+ {
+ // compute adler32 on input
+ unsigned int s1=1, s2=0;
+ int blocklen = (int) (data_len % 5552);
+ j=0;
+ while (j < data_len) {
+ for (i=0; i < blocklen; ++i) { s1 += data[j+i]; s2 += s1; }
+ s1 %= 65521; s2 %= 65521;
+ j += blocklen;
+ blocklen = 5552;
+ }
+ stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8));
+ stbiw__sbpush(out, STBIW_UCHAR(s2));
+ stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8));
+ stbiw__sbpush(out, STBIW_UCHAR(s1));
+ }
+ *out_len = stbiw__sbn(out);
+ // make returned pointer freeable
+ STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len);
+ return (unsigned char *) stbiw__sbraw(out);
+#endif // STBIW_ZLIB_COMPRESS
+}
+
+static unsigned int stbiw__crc32(unsigned char *buffer, int len)
+{
+#ifdef STBIW_CRC32
+ return STBIW_CRC32(buffer, len);
+#else
+ static unsigned int crc_table[256] =
+ {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+ };
+
+ unsigned int crc = ~0u;
+ int i;
+ for (i=0; i < len; ++i)
+ crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)];
+ return ~crc;
+#endif
+}
+
+#define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4)
+#define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v));
+#define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3])
+
+static void stbiw__wpcrc(unsigned char **data, int len)
+{
+ unsigned int crc = stbiw__crc32(*data - len - 4, len+4);
+ stbiw__wp32(*data, crc);
+}
+
+static unsigned char stbiw__paeth(int a, int b, int c)
+{
+ int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c);
+ if (pa <= pb && pa <= pc) return STBIW_UCHAR(a);
+ if (pb <= pc) return STBIW_UCHAR(b);
+ return STBIW_UCHAR(c);
+}
+
+// @OPTIMIZE: provide an option that always forces left-predict or paeth predict
+static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer)
+{
+ static int mapping[] = { 0,1,2,3,4 };
+ static int firstmap[] = { 0,1,0,5,6 };
+ int *mymap = (y != 0) ? mapping : firstmap;
+ int i;
+ int type = mymap[filter_type];
+ unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y);
+ int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes;
+
+ if (type==0) {
+ memcpy(line_buffer, z, width*n);
+ return;
+ }
+
+ // first loop isn't optimized since it's just one pixel
+ for (i = 0; i < n; ++i) {
+ switch (type) {
+ case 1: line_buffer[i] = z[i]; break;
+ case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break;
+ case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break;
+ case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break;
+ case 5: line_buffer[i] = z[i]; break;
+ case 6: line_buffer[i] = z[i]; break;
+ }
+ }
+ switch (type) {
+ case 1: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-n]; break;
+ case 2: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-signed_stride]; break;
+ case 3: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break;
+ case 4: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break;
+ case 5: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - (z[i-n]>>1); break;
+ case 6: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break;
+ }
+}
+
+STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len)
+{
+ int force_filter = stbi_write_force_png_filter;
+ int ctype[5] = { -1, 0, 4, 2, 6 };
+ unsigned char sig[8] = { 137,80,78,71,13,10,26,10 };
+ unsigned char *out,*o, *filt, *zlib;
+ signed char *line_buffer;
+ int j,zlen;
+
+ if (stride_bytes == 0)
+ stride_bytes = x * n;
+
+ if (force_filter >= 5) {
+ force_filter = -1;
+ }
+
+ filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0;
+ line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; }
+ for (j=0; j < y; ++j) {
+ int filter_type;
+ if (force_filter > -1) {
+ filter_type = force_filter;
+ stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer);
+ } else { // Estimate the best filter by running through all of them:
+ int best_filter = 0, best_filter_val = 0x7fffffff, est, i;
+ for (filter_type = 0; filter_type < 5; filter_type++) {
+ stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer);
+
+ // Estimate the entropy of the line using this filter; the less, the better.
+ est = 0;
+ for (i = 0; i < x*n; ++i) {
+ est += abs((signed char) line_buffer[i]);
+ }
+ if (est < best_filter_val) {
+ best_filter_val = est;
+ best_filter = filter_type;
+ }
+ }
+ if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it
+ stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer);
+ filter_type = best_filter;
+ }
+ }
+ // when we get here, filter_type contains the filter type, and line_buffer contains the data
+ filt[j*(x*n+1)] = (unsigned char) filter_type;
+ STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n);
+ }
+ STBIW_FREE(line_buffer);
+ zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level);
+ STBIW_FREE(filt);
+ if (!zlib) return 0;
+
+ // each tag requires 12 bytes of overhead
+ out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12);
+ if (!out) return 0;
+ *out_len = 8 + 12+13 + 12+zlen + 12;
+
+ o=out;
+ STBIW_MEMMOVE(o,sig,8); o+= 8;
+ stbiw__wp32(o, 13); // header length
+ stbiw__wptag(o, "IHDR");
+ stbiw__wp32(o, x);
+ stbiw__wp32(o, y);
+ *o++ = 8;
+ *o++ = STBIW_UCHAR(ctype[n]);
+ *o++ = 0;
+ *o++ = 0;
+ *o++ = 0;
+ stbiw__wpcrc(&o,13);
+
+ stbiw__wp32(o, zlen);
+ stbiw__wptag(o, "IDAT");
+ STBIW_MEMMOVE(o, zlib, zlen);
+ o += zlen;
+ STBIW_FREE(zlib);
+ stbiw__wpcrc(&o, zlen);
+
+ stbiw__wp32(o,0);
+ stbiw__wptag(o, "IEND");
+ stbiw__wpcrc(&o,0);
+
+ STBIW_ASSERT(o == out + *out_len);
+
+ return out;
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes)
+{
+ FILE *f;
+ int len;
+ unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len);
+ if (png == NULL) return 0;
+
+ f = stbiw__fopen(filename, "wb");
+ if (!f) { STBIW_FREE(png); return 0; }
+ fwrite(png, 1, len, f);
+ fclose(f);
+ STBIW_FREE(png);
+ return 1;
+}
+#endif
+
+STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes)
+{
+ int len;
+ unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len);
+ if (png == NULL) return 0;
+ func(context, png, len);
+ STBIW_FREE(png);
+ return 1;
+}
+
+
+/* ***************************************************************************
+ *
+ * JPEG writer
+ *
+ * This is based on Jon Olick's jo_jpeg.cpp:
+ * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html
+ */
+
+static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18,
+ 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 };
+
+static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) {
+ int bitBuf = *bitBufP, bitCnt = *bitCntP;
+ bitCnt += bs[1];
+ bitBuf |= bs[0] << (24 - bitCnt);
+ while(bitCnt >= 8) {
+ unsigned char c = (bitBuf >> 16) & 255;
+ stbiw__putc(s, c);
+ if(c == 255) {
+ stbiw__putc(s, 0);
+ }
+ bitBuf <<= 8;
+ bitCnt -= 8;
+ }
+ *bitBufP = bitBuf;
+ *bitCntP = bitCnt;
+}
+
+static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) {
+ float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p;
+ float z1, z2, z3, z4, z5, z11, z13;
+
+ float tmp0 = d0 + d7;
+ float tmp7 = d0 - d7;
+ float tmp1 = d1 + d6;
+ float tmp6 = d1 - d6;
+ float tmp2 = d2 + d5;
+ float tmp5 = d2 - d5;
+ float tmp3 = d3 + d4;
+ float tmp4 = d3 - d4;
+
+ // Even part
+ float tmp10 = tmp0 + tmp3; // phase 2
+ float tmp13 = tmp0 - tmp3;
+ float tmp11 = tmp1 + tmp2;
+ float tmp12 = tmp1 - tmp2;
+
+ d0 = tmp10 + tmp11; // phase 3
+ d4 = tmp10 - tmp11;
+
+ z1 = (tmp12 + tmp13) * 0.707106781f; // c4
+ d2 = tmp13 + z1; // phase 5
+ d6 = tmp13 - z1;
+
+ // Odd part
+ tmp10 = tmp4 + tmp5; // phase 2
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ // The rotator is modified from fig 4-8 to avoid extra negations.
+ z5 = (tmp10 - tmp12) * 0.382683433f; // c6
+ z2 = tmp10 * 0.541196100f + z5; // c2-c6
+ z4 = tmp12 * 1.306562965f + z5; // c2+c6
+ z3 = tmp11 * 0.707106781f; // c4
+
+ z11 = tmp7 + z3; // phase 5
+ z13 = tmp7 - z3;
+
+ *d5p = z13 + z2; // phase 6
+ *d3p = z13 - z2;
+ *d1p = z11 + z4;
+ *d7p = z11 - z4;
+
+ *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6;
+}
+
+static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) {
+ int tmp1 = val < 0 ? -val : val;
+ val = val < 0 ? val-1 : val;
+ bits[1] = 1;
+ while(tmp1 >>= 1) {
+ ++bits[1];
+ }
+ bits[0] = val & ((1<<bits[1])-1);
+}
+
+static int stbiw__jpg_processDU(stbi__write_context *s, int *bitBuf, int *bitCnt, float *CDU, int du_stride, float *fdtbl, int DC, const unsigned short HTDC[256][2], const unsigned short HTAC[256][2]) {
+ const unsigned short EOB[2] = { HTAC[0x00][0], HTAC[0x00][1] };
+ const unsigned short M16zeroes[2] = { HTAC[0xF0][0], HTAC[0xF0][1] };
+ int dataOff, i, j, n, diff, end0pos, x, y;
+ int DU[64];
+
+ // DCT rows
+ for(dataOff=0, n=du_stride*8; dataOff<n; dataOff+=du_stride) {
+ stbiw__jpg_DCT(&CDU[dataOff], &CDU[dataOff+1], &CDU[dataOff+2], &CDU[dataOff+3], &CDU[dataOff+4], &CDU[dataOff+5], &CDU[dataOff+6], &CDU[dataOff+7]);
+ }
+ // DCT columns
+ for(dataOff=0; dataOff<8; ++dataOff) {
+ stbiw__jpg_DCT(&CDU[dataOff], &CDU[dataOff+du_stride], &CDU[dataOff+du_stride*2], &CDU[dataOff+du_stride*3], &CDU[dataOff+du_stride*4],
+ &CDU[dataOff+du_stride*5], &CDU[dataOff+du_stride*6], &CDU[dataOff+du_stride*7]);
+ }
+ // Quantize/descale/zigzag the coefficients
+ for(y = 0, j=0; y < 8; ++y) {
+ for(x = 0; x < 8; ++x,++j) {
+ float v;
+ i = y*du_stride+x;
+ v = CDU[i]*fdtbl[j];
+ // DU[stbiw__jpg_ZigZag[j]] = (int)(v < 0 ? ceilf(v - 0.5f) : floorf(v + 0.5f));
+ // ceilf() and floorf() are C99, not C89, but I /think/ they're not needed here anyway?
+ DU[stbiw__jpg_ZigZag[j]] = (int)(v < 0 ? v - 0.5f : v + 0.5f);
+ }
+ }
+
+ // Encode DC
+ diff = DU[0] - DC;
+ if (diff == 0) {
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTDC[0]);
+ } else {
+ unsigned short bits[2];
+ stbiw__jpg_calcBits(diff, bits);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTDC[bits[1]]);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits);
+ }
+ // Encode ACs
+ end0pos = 63;
+ for(; (end0pos>0)&&(DU[end0pos]==0); --end0pos) {
+ }
+ // end0pos = first element in reverse order !=0
+ if(end0pos == 0) {
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB);
+ return DU[0];
+ }
+ for(i = 1; i <= end0pos; ++i) {
+ int startpos = i;
+ int nrzeroes;
+ unsigned short bits[2];
+ for (; DU[i]==0 && i<=end0pos; ++i) {
+ }
+ nrzeroes = i-startpos;
+ if ( nrzeroes >= 16 ) {
+ int lng = nrzeroes>>4;
+ int nrmarker;
+ for (nrmarker=1; nrmarker <= lng; ++nrmarker)
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes);
+ nrzeroes &= 15;
+ }
+ stbiw__jpg_calcBits(DU[i], bits);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits);
+ }
+ if(end0pos != 63) {
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB);
+ }
+ return DU[0];
+}
+
+static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) {
+ // Constants that don't pollute global namespace
+ static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0};
+ static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11};
+ static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d};
+ static const unsigned char std_ac_luminance_values[] = {
+ 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,
+ 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,
+ 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,
+ 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
+ 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,
+ 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,
+ 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa
+ };
+ static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0};
+ static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11};
+ static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77};
+ static const unsigned char std_ac_chrominance_values[] = {
+ 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,
+ 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,
+ 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,
+ 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
+ 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,
+ 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,
+ 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa
+ };
+ // Huffman tables
+ static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}};
+ static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}};
+ static const unsigned short YAC_HT[256][2] = {
+ {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0}
+ };
+ static const unsigned short UVAC_HT[256][2] = {
+ {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0}
+ };
+ static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22,
+ 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99};
+ static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99,
+ 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99};
+ static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f,
+ 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f };
+
+ int row, col, i, k, subsample;
+ float fdtbl_Y[64], fdtbl_UV[64];
+ unsigned char YTable[64], UVTable[64];
+
+ if(!data || !width || !height || comp > 4 || comp < 1) {
+ return 0;
+ }
+
+ quality = quality ? quality : 90;
+ subsample = quality <= 90 ? 1 : 0;
+ quality = quality < 1 ? 1 : quality > 100 ? 100 : quality;
+ quality = quality < 50 ? 5000 / quality : 200 - quality * 2;
+
+ for(i = 0; i < 64; ++i) {
+ int uvti, yti = (YQT[i]*quality+50)/100;
+ YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti);
+ uvti = (UVQT[i]*quality+50)/100;
+ UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti);
+ }
+
+ for(row = 0, k = 0; row < 8; ++row) {
+ for(col = 0; col < 8; ++col, ++k) {
+ fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]);
+ fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]);
+ }
+ }
+
+ // Write Headers
+ {
+ static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 };
+ static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 };
+ const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width),
+ 3,1,(unsigned char)(subsample?0x22:0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 };
+ s->func(s->context, (void*)head0, sizeof(head0));
+ s->func(s->context, (void*)YTable, sizeof(YTable));
+ stbiw__putc(s, 1);
+ s->func(s->context, UVTable, sizeof(UVTable));
+ s->func(s->context, (void*)head1, sizeof(head1));
+ s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1);
+ s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values));
+ stbiw__putc(s, 0x10); // HTYACinfo
+ s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1);
+ s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values));
+ stbiw__putc(s, 1); // HTUDCinfo
+ s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1);
+ s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values));
+ stbiw__putc(s, 0x11); // HTUACinfo
+ s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1);
+ s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values));
+ s->func(s->context, (void*)head2, sizeof(head2));
+ }
+
+ // Encode 8x8 macroblocks
+ {
+ static const unsigned short fillBits[] = {0x7F, 7};
+ int DCY=0, DCU=0, DCV=0;
+ int bitBuf=0, bitCnt=0;
+ // comp == 2 is grey+alpha (alpha is ignored)
+ int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0;
+ const unsigned char *dataR = (const unsigned char *)data;
+ const unsigned char *dataG = dataR + ofsG;
+ const unsigned char *dataB = dataR + ofsB;
+ int x, y, pos;
+ if(subsample) {
+ for(y = 0; y < height; y += 16) {
+ for(x = 0; x < width; x += 16) {
+ float Y[256], U[256], V[256];
+ for(row = y, pos = 0; row < y+16; ++row) {
+ // row >= height => use last input row
+ int clamped_row = (row < height) ? row : height - 1;
+ int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp;
+ for(col = x; col < x+16; ++col, ++pos) {
+ // if col >= width => use pixel from last input column
+ int p = base_p + ((col < width) ? col : (width-1))*comp;
+ float r = dataR[p], g = dataG[p], b = dataB[p];
+ Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128;
+ U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b;
+ V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b;
+ }
+ }
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+
+ // subsample U,V
+ {
+ float subU[64], subV[64];
+ int yy, xx;
+ for(yy = 0, pos = 0; yy < 8; ++yy) {
+ for(xx = 0; xx < 8; ++xx, ++pos) {
+ int j = yy*32+xx*2;
+ subU[pos] = (U[j+0] + U[j+1] + U[j+16] + U[j+17]) * 0.25f;
+ subV[pos] = (V[j+0] + V[j+1] + V[j+16] + V[j+17]) * 0.25f;
+ }
+ }
+ DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);
+ DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT);
+ }
+ }
+ }
+ } else {
+ for(y = 0; y < height; y += 8) {
+ for(x = 0; x < width; x += 8) {
+ float Y[64], U[64], V[64];
+ for(row = y, pos = 0; row < y+8; ++row) {
+ // row >= height => use last input row
+ int clamped_row = (row < height) ? row : height - 1;
+ int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp;
+ for(col = x; col < x+8; ++col, ++pos) {
+ // if col >= width => use pixel from last input column
+ int p = base_p + ((col < width) ? col : (width-1))*comp;
+ float r = dataR[p], g = dataG[p], b = dataB[p];
+ Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128;
+ U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b;
+ V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b;
+ }
+ }
+
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+ DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);
+ DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT);
+ }
+ }
+ }
+
+ // Do the bit alignment of the EOI marker
+ stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits);
+ }
+
+ // EOI
+ stbiw__putc(s, 0xFF);
+ stbiw__putc(s, 0xD9);
+
+ return 1;
+}
+
+STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality)
+{
+ stbi__write_context s = { 0 };
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality);
+}
+
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality)
+{
+ stbi__write_context s = { 0 };
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_jpg_core(&s, x, y, comp, data, quality);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif
+
+#endif // STB_IMAGE_WRITE_IMPLEMENTATION
+
+/* Revision history
+ 1.16 (2021-07-11)
+ make Deflate code emit uncompressed blocks when it would otherwise expand
+ support writing BMPs with alpha channel
+ 1.15 (2020-07-13) unknown
+ 1.14 (2020-02-02) updated JPEG writer to downsample chroma channels
+ 1.13
+ 1.12
+ 1.11 (2019-08-11)
+
+ 1.10 (2019-02-07)
+ support utf8 filenames in Windows; fix warnings and platform ifdefs
+ 1.09 (2018-02-11)
+ fix typo in zlib quality API, improve STB_I_W_STATIC in C++
+ 1.08 (2018-01-29)
+ add stbi__flip_vertically_on_write, external zlib, zlib quality, choose PNG filter
+ 1.07 (2017-07-24)
+ doc fix
+ 1.06 (2017-07-23)
+ writing JPEG (using Jon Olick's code)
+ 1.05 ???
+ 1.04 (2017-03-03)
+ monochrome BMP expansion
+ 1.03 ???
+ 1.02 (2016-04-02)
+ avoid allocating large structures on the stack
+ 1.01 (2016-01-16)
+ STBIW_REALLOC_SIZED: support allocators with no realloc support
+ avoid race-condition in crc initialization
+ minor compile issues
+ 1.00 (2015-09-14)
+ installable file IO function
+ 0.99 (2015-09-13)
+ warning fixes; TGA rle support
+ 0.98 (2015-04-08)
+ added STBIW_MALLOC, STBIW_ASSERT etc
+ 0.97 (2015-01-18)
+ fixed HDR asserts, rewrote HDR rle logic
+ 0.96 (2015-01-17)
+ add HDR output
+ fix monochrome BMP
+ 0.95 (2014-08-17)
+ add monochrome TGA output
+ 0.94 (2014-05-31)
+ rename private functions to avoid conflicts with stb_image.h
+ 0.93 (2014-05-27)
+ warning fixes
+ 0.92 (2010-08-01)
+ casts to unsigned char to fix warnings
+ 0.91 (2010-07-17)
+ first public release
+ 0.90 first internal release
+*/
+
+/*
+------------------------------------------------------------------------------
+This software is available under 2 licenses -- choose whichever you prefer.
+------------------------------------------------------------------------------
+ALTERNATIVE A - MIT License
+Copyright (c) 2017 Sean Barrett
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+------------------------------------------------------------------------------
+ALTERNATIVE B - Public Domain (www.unlicense.org)
+This is free and unencumbered software released into the public domain.
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
+commercial or non-commercial, and by any means.
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
+this software under copyright law.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+------------------------------------------------------------------------------
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.c b/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.c
new file mode 100644
index 00000000000..f9cea2ed8c8
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.c
@@ -0,0 +1,594 @@
+/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
+Copyright (c) 2012 Marcus Geelnard
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
+*/
+
+/* 2013-01-06 Camilla Löwy <elmindreda@glfw.org>
+ *
+ * Added casts from time_t to DWORD to avoid warnings on VC++.
+ * Fixed time retrieval on POSIX systems.
+ */
+
+#include "tinycthread.h"
+#include <stdlib.h>
+
+/* Platform specific includes */
+#if defined(_TTHREAD_POSIX_)
+ #include <signal.h>
+ #include <sched.h>
+ #include <unistd.h>
+ #include <sys/time.h>
+ #include <errno.h>
+#elif defined(_TTHREAD_WIN32_)
+ #include <process.h>
+ #include <sys/timeb.h>
+#endif
+
+/* Standard, good-to-have defines */
+#ifndef NULL
+ #define NULL (void*)0
+#endif
+#ifndef TRUE
+ #define TRUE 1
+#endif
+#ifndef FALSE
+ #define FALSE 0
+#endif
+
+int mtx_init(mtx_t *mtx, int type)
+{
+#if defined(_TTHREAD_WIN32_)
+ mtx->mAlreadyLocked = FALSE;
+ mtx->mRecursive = type & mtx_recursive;
+ InitializeCriticalSection(&mtx->mHandle);
+ return thrd_success;
+#else
+ int ret;
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ if (type & mtx_recursive)
+ {
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ }
+ ret = pthread_mutex_init(mtx, &attr);
+ pthread_mutexattr_destroy(&attr);
+ return ret == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+void mtx_destroy(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ DeleteCriticalSection(&mtx->mHandle);
+#else
+ pthread_mutex_destroy(mtx);
+#endif
+}
+
+int mtx_lock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ EnterCriticalSection(&mtx->mHandle);
+ if (!mtx->mRecursive)
+ {
+ while(mtx->mAlreadyLocked) Sleep(1000); /* Simulate deadlock... */
+ mtx->mAlreadyLocked = TRUE;
+ }
+ return thrd_success;
+#else
+ return pthread_mutex_lock(mtx) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
+{
+ /* FIXME! */
+ (void)mtx;
+ (void)ts;
+ return thrd_error;
+}
+
+int mtx_trylock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ int ret = TryEnterCriticalSection(&mtx->mHandle) ? thrd_success : thrd_busy;
+ if ((!mtx->mRecursive) && (ret == thrd_success) && mtx->mAlreadyLocked)
+ {
+ LeaveCriticalSection(&mtx->mHandle);
+ ret = thrd_busy;
+ }
+ return ret;
+#else
+ return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
+#endif
+}
+
+int mtx_unlock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ mtx->mAlreadyLocked = FALSE;
+ LeaveCriticalSection(&mtx->mHandle);
+ return thrd_success;
+#else
+ return pthread_mutex_unlock(mtx) == 0 ? thrd_success : thrd_error;;
+#endif
+}
+
+#if defined(_TTHREAD_WIN32_)
+#define _CONDITION_EVENT_ONE 0
+#define _CONDITION_EVENT_ALL 1
+#endif
+
+int cnd_init(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ cond->mWaitersCount = 0;
+
+ /* Init critical section */
+ InitializeCriticalSection(&cond->mWaitersCountLock);
+
+ /* Init events */
+ cond->mEvents[_CONDITION_EVENT_ONE] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (cond->mEvents[_CONDITION_EVENT_ONE] == NULL)
+ {
+ cond->mEvents[_CONDITION_EVENT_ALL] = NULL;
+ return thrd_error;
+ }
+ cond->mEvents[_CONDITION_EVENT_ALL] = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (cond->mEvents[_CONDITION_EVENT_ALL] == NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
+ cond->mEvents[_CONDITION_EVENT_ONE] = NULL;
+ return thrd_error;
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_init(cond, NULL) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+void cnd_destroy(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (cond->mEvents[_CONDITION_EVENT_ONE] != NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
+ }
+ if (cond->mEvents[_CONDITION_EVENT_ALL] != NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ALL]);
+ }
+ DeleteCriticalSection(&cond->mWaitersCountLock);
+#else
+ pthread_cond_destroy(cond);
+#endif
+}
+
+int cnd_signal(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ int haveWaiters;
+
+ /* Are there any waiters? */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ haveWaiters = (cond->mWaitersCount > 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we have any waiting threads, send them a signal */
+ if(haveWaiters)
+ {
+ if (SetEvent(cond->mEvents[_CONDITION_EVENT_ONE]) == 0)
+ {
+ return thrd_error;
+ }
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int cnd_broadcast(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ int haveWaiters;
+
+ /* Are there any waiters? */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ haveWaiters = (cond->mWaitersCount > 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we have any waiting threads, send them a signal */
+ if(haveWaiters)
+ {
+ if (SetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
+ {
+ return thrd_error;
+ }
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+#if defined(_TTHREAD_WIN32_)
+static int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout)
+{
+ int result, lastWaiter;
+
+ /* Increment number of waiters */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ ++ cond->mWaitersCount;
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* Release the mutex while waiting for the condition (will decrease
+ the number of waiters when done)... */
+ mtx_unlock(mtx);
+
+ /* Wait for either event to become signaled due to cnd_signal() or
+ cnd_broadcast() being called */
+ result = WaitForMultipleObjects(2, cond->mEvents, FALSE, timeout);
+ if (result == WAIT_TIMEOUT)
+ {
+ return thrd_timeout;
+ }
+ else if (result == (int)WAIT_FAILED)
+ {
+ return thrd_error;
+ }
+
+ /* Check if we are the last waiter */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ -- cond->mWaitersCount;
+ lastWaiter = (result == (WAIT_OBJECT_0 + _CONDITION_EVENT_ALL)) &&
+ (cond->mWaitersCount == 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we are the last waiter to be notified to stop waiting, reset the event */
+ if (lastWaiter)
+ {
+ if (ResetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
+ {
+ return thrd_error;
+ }
+ }
+
+ /* Re-acquire the mutex */
+ mtx_lock(mtx);
+
+ return thrd_success;
+}
+#endif
+
+int cnd_wait(cnd_t *cond, mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ return _cnd_timedwait_win32(cond, mtx, INFINITE);
+#else
+ return pthread_cond_wait(cond, mtx) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct timespec now;
+ if (clock_gettime(CLOCK_REALTIME, &now) == 0)
+ {
+ DWORD delta = (DWORD) ((ts->tv_sec - now.tv_sec) * 1000 +
+ (ts->tv_nsec - now.tv_nsec + 500000) / 1000000);
+ return _cnd_timedwait_win32(cond, mtx, delta);
+ }
+ else
+ return thrd_error;
+#else
+ int ret;
+ ret = pthread_cond_timedwait(cond, mtx, ts);
+ if (ret == ETIMEDOUT)
+ {
+ return thrd_timeout;
+ }
+ return ret == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+
+/** Information to pass to the new thread (what to run). */
+typedef struct {
+ thrd_start_t mFunction; /**< Pointer to the function to be executed. */
+ void * mArg; /**< Function argument for the thread function. */
+} _thread_start_info;
+
+/* Thread wrapper function. */
+#if defined(_TTHREAD_WIN32_)
+static unsigned WINAPI _thrd_wrapper_function(void * aArg)
+#elif defined(_TTHREAD_POSIX_)
+static void * _thrd_wrapper_function(void * aArg)
+#endif
+{
+ thrd_start_t fun;
+ void *arg;
+ int res;
+#if defined(_TTHREAD_POSIX_)
+ void *pres;
+#endif
+
+ /* Get thread startup information */
+ _thread_start_info *ti = (_thread_start_info *) aArg;
+ fun = ti->mFunction;
+ arg = ti->mArg;
+
+ /* The thread is responsible for freeing the startup information */
+ free((void *)ti);
+
+ /* Call the actual client thread function */
+ res = fun(arg);
+
+#if defined(_TTHREAD_WIN32_)
+ return res;
+#else
+ pres = malloc(sizeof(int));
+ if (pres != NULL)
+ {
+ *(int*)pres = res;
+ }
+ return pres;
+#endif
+}
+
+int thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
+{
+ /* Fill out the thread startup information (passed to the thread wrapper,
+ which will eventually free it) */
+ _thread_start_info* ti = (_thread_start_info*)malloc(sizeof(_thread_start_info));
+ if (ti == NULL)
+ {
+ return thrd_nomem;
+ }
+ ti->mFunction = func;
+ ti->mArg = arg;
+
+ /* Create the thread */
+#if defined(_TTHREAD_WIN32_)
+ *thr = (HANDLE)_beginthreadex(NULL, 0, _thrd_wrapper_function, (void *)ti, 0, NULL);
+#elif defined(_TTHREAD_POSIX_)
+ if(pthread_create(thr, NULL, _thrd_wrapper_function, (void *)ti) != 0)
+ {
+ *thr = 0;
+ }
+#endif
+
+ /* Did we fail to create the thread? */
+ if(!*thr)
+ {
+ free(ti);
+ return thrd_error;
+ }
+
+ return thrd_success;
+}
+
+thrd_t thrd_current(void)
+{
+#if defined(_TTHREAD_WIN32_)
+ return GetCurrentThread();
+#else
+ return pthread_self();
+#endif
+}
+
+int thrd_detach(thrd_t thr)
+{
+ /* FIXME! */
+ (void)thr;
+ return thrd_error;
+}
+
+int thrd_equal(thrd_t thr0, thrd_t thr1)
+{
+#if defined(_TTHREAD_WIN32_)
+ return thr0 == thr1;
+#else
+ return pthread_equal(thr0, thr1);
+#endif
+}
+
+void thrd_exit(int res)
+{
+#if defined(_TTHREAD_WIN32_)
+ ExitThread(res);
+#else
+ void *pres = malloc(sizeof(int));
+ if (pres != NULL)
+ {
+ *(int*)pres = res;
+ }
+ pthread_exit(pres);
+#endif
+}
+
+int thrd_join(thrd_t thr, int *res)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (WaitForSingleObject(thr, INFINITE) == WAIT_FAILED)
+ {
+ return thrd_error;
+ }
+ if (res != NULL)
+ {
+ DWORD dwRes;
+ GetExitCodeThread(thr, &dwRes);
+ *res = dwRes;
+ }
+#elif defined(_TTHREAD_POSIX_)
+ void *pres;
+ int ires = 0;
+ if (pthread_join(thr, &pres) != 0)
+ {
+ return thrd_error;
+ }
+ if (pres != NULL)
+ {
+ ires = *(int*)pres;
+ free(pres);
+ }
+ if (res != NULL)
+ {
+ *res = ires;
+ }
+#endif
+ return thrd_success;
+}
+
+int thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
+{
+ struct timespec now;
+#if defined(_TTHREAD_WIN32_)
+ DWORD delta;
+#else
+ long delta;
+#endif
+
+ /* Get the current time */
+ if (clock_gettime(CLOCK_REALTIME, &now) != 0)
+ return -2; // FIXME: Some specific error code?
+
+#if defined(_TTHREAD_WIN32_)
+ /* Delta in milliseconds */
+ delta = (DWORD) ((time_point->tv_sec - now.tv_sec) * 1000 +
+ (time_point->tv_nsec - now.tv_nsec + 500000) / 1000000);
+ if (delta > 0)
+ {
+ Sleep(delta);
+ }
+#else
+ /* Delta in microseconds */
+ delta = (time_point->tv_sec - now.tv_sec) * 1000000L +
+ (time_point->tv_nsec - now.tv_nsec + 500L) / 1000L;
+
+ /* On some systems, the usleep argument must be < 1000000 */
+ while (delta > 999999L)
+ {
+ usleep(999999);
+ delta -= 999999L;
+ }
+ if (delta > 0L)
+ {
+ usleep((useconds_t)delta);
+ }
+#endif
+
+ /* We don't support waking up prematurely (yet) */
+ if (remaining)
+ {
+ remaining->tv_sec = 0;
+ remaining->tv_nsec = 0;
+ }
+ return 0;
+}
+
+void thrd_yield(void)
+{
+#if defined(_TTHREAD_WIN32_)
+ Sleep(0);
+#else
+ sched_yield();
+#endif
+}
+
+int tss_create(tss_t *key, tss_dtor_t dtor)
+{
+#if defined(_TTHREAD_WIN32_)
+ /* FIXME: The destructor function is not supported yet... */
+ if (dtor != NULL)
+ {
+ return thrd_error;
+ }
+ *key = TlsAlloc();
+ if (*key == TLS_OUT_OF_INDEXES)
+ {
+ return thrd_error;
+ }
+#else
+ if (pthread_key_create(key, dtor) != 0)
+ {
+ return thrd_error;
+ }
+#endif
+ return thrd_success;
+}
+
+void tss_delete(tss_t key)
+{
+#if defined(_TTHREAD_WIN32_)
+ TlsFree(key);
+#else
+ pthread_key_delete(key);
+#endif
+}
+
+void *tss_get(tss_t key)
+{
+#if defined(_TTHREAD_WIN32_)
+ return TlsGetValue(key);
+#else
+ return pthread_getspecific(key);
+#endif
+}
+
+int tss_set(tss_t key, void *val)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (TlsSetValue(key, val) == 0)
+ {
+ return thrd_error;
+ }
+#else
+ if (pthread_setspecific(key, val) != 0)
+ {
+ return thrd_error;
+ }
+#endif
+ return thrd_success;
+}
+
+#if defined(_TTHREAD_EMULATE_CLOCK_GETTIME_)
+int _tthread_clock_gettime(clockid_t clk_id, struct timespec *ts)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct _timeb tb;
+ _ftime(&tb);
+ ts->tv_sec = (time_t)tb.time;
+ ts->tv_nsec = 1000000L * (long)tb.millitm;
+#else
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ ts->tv_sec = (time_t)tv.tv_sec;
+ ts->tv_nsec = 1000L * (long)tv.tv_usec;
+#endif
+ return 0;
+}
+#endif // _TTHREAD_EMULATE_CLOCK_GETTIME_
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.h b/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.h
new file mode 100644
index 00000000000..42958c393e8
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/tinycthread.h
@@ -0,0 +1,443 @@
+/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
+Copyright (c) 2012 Marcus Geelnard
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
+*/
+
+#ifndef _TINYCTHREAD_H_
+#define _TINYCTHREAD_H_
+
+/**
+* @file
+* @mainpage TinyCThread API Reference
+*
+* @section intro_sec Introduction
+* TinyCThread is a minimal, portable implementation of basic threading
+* classes for C.
+*
+* They closely mimic the functionality and naming of the C11 standard, and
+* should be easily replaceable with the corresponding standard variants.
+*
+* @section port_sec Portability
+* The Win32 variant uses the native Win32 API for implementing the thread
+* classes, while for other systems, the POSIX threads API (pthread) is used.
+*
+* @section misc_sec Miscellaneous
+* The following special keywords are available: #_Thread_local.
+*
+* For more detailed information, browse the different sections of this
+* documentation. A good place to start is:
+* tinycthread.h.
+*/
+
+/* Which platform are we on? */
+#if !defined(_TTHREAD_PLATFORM_DEFINED_)
+ #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__)
+ #define _TTHREAD_WIN32_
+ #else
+ #define _TTHREAD_POSIX_
+ #endif
+ #define _TTHREAD_PLATFORM_DEFINED_
+#endif
+
+/* Activate some POSIX functionality (e.g. clock_gettime and recursive mutexes) */
+#if defined(_TTHREAD_POSIX_)
+ #undef _FEATURES_H
+ #if !defined(_GNU_SOURCE)
+ #define _GNU_SOURCE
+ #endif
+ #if !defined(_POSIX_C_SOURCE) || ((_POSIX_C_SOURCE - 0) < 199309L)
+ #undef _POSIX_C_SOURCE
+ #define _POSIX_C_SOURCE 199309L
+ #endif
+ #if !defined(_XOPEN_SOURCE) || ((_XOPEN_SOURCE - 0) < 500)
+ #undef _XOPEN_SOURCE
+ #define _XOPEN_SOURCE 500
+ #endif
+#endif
+
+/* Generic includes */
+#include <time.h>
+
+/* Platform specific includes */
+#if defined(_TTHREAD_POSIX_)
+ #include <sys/time.h>
+ #include <pthread.h>
+#elif defined(_TTHREAD_WIN32_)
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #define __UNDEF_LEAN_AND_MEAN
+ #endif
+ #include <windows.h>
+ #ifdef __UNDEF_LEAN_AND_MEAN
+ #undef WIN32_LEAN_AND_MEAN
+ #undef __UNDEF_LEAN_AND_MEAN
+ #endif
+#endif
+
+/* Workaround for missing TIME_UTC: If time.h doesn't provide TIME_UTC,
+ it's quite likely that libc does not support it either. Hence, fall back to
+ the only other supported time specifier: CLOCK_REALTIME (and if that fails,
+ we're probably emulating clock_gettime anyway, so anything goes). */
+#ifndef TIME_UTC
+ #ifdef CLOCK_REALTIME
+ #define TIME_UTC CLOCK_REALTIME
+ #else
+ #define TIME_UTC 0
+ #endif
+#endif
+
+/* Workaround for missing clock_gettime (most Windows compilers, afaik) */
+#if defined(_TTHREAD_WIN32_) || defined(__APPLE_CC__)
+#define _TTHREAD_EMULATE_CLOCK_GETTIME_
+/* Emulate struct timespec */
+#if defined(_TTHREAD_WIN32_)
+struct _ttherad_timespec {
+ time_t tv_sec;
+ long tv_nsec;
+};
+#define timespec _ttherad_timespec
+#endif
+
+/* Emulate clockid_t */
+typedef int _tthread_clockid_t;
+#define clockid_t _tthread_clockid_t
+
+/* Emulate clock_gettime */
+int _tthread_clock_gettime(clockid_t clk_id, struct timespec *ts);
+#define clock_gettime _tthread_clock_gettime
+#ifndef CLOCK_REALTIME
+ #define CLOCK_REALTIME 0
+#endif
+#endif
+
+
+/** TinyCThread version (major number). */
+#define TINYCTHREAD_VERSION_MAJOR 1
+/** TinyCThread version (minor number). */
+#define TINYCTHREAD_VERSION_MINOR 1
+/** TinyCThread version (full version). */
+#define TINYCTHREAD_VERSION (TINYCTHREAD_VERSION_MAJOR * 100 + TINYCTHREAD_VERSION_MINOR)
+
+/**
+* @def _Thread_local
+* Thread local storage keyword.
+* A variable that is declared with the @c _Thread_local keyword makes the
+* value of the variable local to each thread (known as thread-local storage,
+* or TLS). Example usage:
+* @code
+* // This variable is local to each thread.
+* _Thread_local int variable;
+* @endcode
+* @note The @c _Thread_local keyword is a macro that maps to the corresponding
+* compiler directive (e.g. @c __declspec(thread)).
+* @note This directive is currently not supported on Mac OS X (it will give
+* a compiler error), since compile-time TLS is not supported in the Mac OS X
+* executable format. Also, some older versions of MinGW (before GCC 4.x) do
+* not support this directive.
+* @hideinitializer
+*/
+
+/* FIXME: Check for a PROPER value of __STDC_VERSION__ to know if we have C11 */
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) && !defined(_Thread_local)
+ #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
+ #define _Thread_local __thread
+ #else
+ #define _Thread_local __declspec(thread)
+ #endif
+#endif
+
+/* Macros */
+#define TSS_DTOR_ITERATIONS 0
+
+/* Function return values */
+#define thrd_error 0 /**< The requested operation failed */
+#define thrd_success 1 /**< The requested operation succeeded */
+#define thrd_timeout 2 /**< The time specified in the call was reached without acquiring the requested resource */
+#define thrd_busy 3 /**< The requested operation failed because a tesource requested by a test and return function is already in use */
+#define thrd_nomem 4 /**< The requested operation failed because it was unable to allocate memory */
+
+/* Mutex types */
+#define mtx_plain 1
+#define mtx_timed 2
+#define mtx_try 4
+#define mtx_recursive 8
+
+/* Mutex */
+#if defined(_TTHREAD_WIN32_)
+typedef struct {
+ CRITICAL_SECTION mHandle; /* Critical section handle */
+ int mAlreadyLocked; /* TRUE if the mutex is already locked */
+ int mRecursive; /* TRUE if the mutex is recursive */
+} mtx_t;
+#else
+typedef pthread_mutex_t mtx_t;
+#endif
+
+/** Create a mutex object.
+* @param mtx A mutex object.
+* @param type Bit-mask that must have one of the following six values:
+* @li @c mtx_plain for a simple non-recursive mutex
+* @li @c mtx_timed for a non-recursive mutex that supports timeout
+* @li @c mtx_try for a non-recursive mutex that supports test and return
+* @li @c mtx_plain | @c mtx_recursive (same as @c mtx_plain, but recursive)
+* @li @c mtx_timed | @c mtx_recursive (same as @c mtx_timed, but recursive)
+* @li @c mtx_try | @c mtx_recursive (same as @c mtx_try, but recursive)
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_init(mtx_t *mtx, int type);
+
+/** Release any resources used by the given mutex.
+* @param mtx A mutex object.
+*/
+void mtx_destroy(mtx_t *mtx);
+
+/** Lock the given mutex.
+* Blocks until the given mutex can be locked. If the mutex is non-recursive, and
+* the calling thread already has a lock on the mutex, this call will block
+* forever.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_lock(mtx_t *mtx);
+
+/** NOT YET IMPLEMENTED.
+*/
+int mtx_timedlock(mtx_t *mtx, const struct timespec *ts);
+
+/** Try to lock the given mutex.
+* The specified mutex shall support either test and return or timeout. If the
+* mutex is already locked, the function returns without blocking.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_busy if the resource
+* requested is already in use, or @ref thrd_error if the request could not be
+* honored.
+*/
+int mtx_trylock(mtx_t *mtx);
+
+/** Unlock the given mutex.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_unlock(mtx_t *mtx);
+
+/* Condition variable */
+#if defined(_TTHREAD_WIN32_)
+typedef struct {
+ HANDLE mEvents[2]; /* Signal and broadcast event HANDLEs. */
+ unsigned int mWaitersCount; /* Count of the number of waiters. */
+ CRITICAL_SECTION mWaitersCountLock; /* Serialize access to mWaitersCount. */
+} cnd_t;
+#else
+typedef pthread_cond_t cnd_t;
+#endif
+
+/** Create a condition variable object.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_init(cnd_t *cond);
+
+/** Release any resources used by the given condition variable.
+* @param cond A condition variable object.
+*/
+void cnd_destroy(cnd_t *cond);
+
+/** Signal a condition variable.
+* Unblocks one of the threads that are blocked on the given condition variable
+* at the time of the call. If no threads are blocked on the condition variable
+* at the time of the call, the function does nothing and return success.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_signal(cnd_t *cond);
+
+/** Broadcast a condition variable.
+* Unblocks all of the threads that are blocked on the given condition variable
+* at the time of the call. If no threads are blocked on the condition variable
+* at the time of the call, the function does nothing and return success.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_broadcast(cnd_t *cond);
+
+/** Wait for a condition variable to become signaled.
+* The function atomically unlocks the given mutex and endeavors to block until
+* the given condition variable is signaled by a call to cnd_signal or to
+* cnd_broadcast. When the calling thread becomes unblocked it locks the mutex
+* before it returns.
+* @param cond A condition variable object.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_wait(cnd_t *cond, mtx_t *mtx);
+
+/** Wait for a condition variable to become signaled.
+* The function atomically unlocks the given mutex and endeavors to block until
+* the given condition variable is signaled by a call to cnd_signal or to
+* cnd_broadcast, or until after the specified time. When the calling thread
+* becomes unblocked it locks the mutex before it returns.
+* @param cond A condition variable object.
+* @param mtx A mutex object.
+* @param xt A point in time at which the request will time out (absolute time).
+* @return @ref thrd_success upon success, or @ref thrd_timeout if the time
+* specified in the call was reached without acquiring the requested resource, or
+* @ref thrd_error if the request could not be honored.
+*/
+int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts);
+
+/* Thread */
+#if defined(_TTHREAD_WIN32_)
+typedef HANDLE thrd_t;
+#else
+typedef pthread_t thrd_t;
+#endif
+
+/** Thread start function.
+* Any thread that is started with the @ref thrd_create() function must be
+* started through a function of this type.
+* @param arg The thread argument (the @c arg argument of the corresponding
+* @ref thrd_create() call).
+* @return The thread return value, which can be obtained by another thread
+* by using the @ref thrd_join() function.
+*/
+typedef int (*thrd_start_t)(void *arg);
+
+/** Create a new thread.
+* @param thr Identifier of the newly created thread.
+* @param func A function pointer to the function that will be executed in
+* the new thread.
+* @param arg An argument to the thread function.
+* @return @ref thrd_success on success, or @ref thrd_nomem if no memory could
+* be allocated for the thread requested, or @ref thrd_error if the request
+* could not be honored.
+* @note A thread’s identifier may be reused for a different thread once the
+* original thread has exited and either been detached or joined to another
+* thread.
+*/
+int thrd_create(thrd_t *thr, thrd_start_t func, void *arg);
+
+/** Identify the calling thread.
+* @return The identifier of the calling thread.
+*/
+thrd_t thrd_current(void);
+
+/** NOT YET IMPLEMENTED.
+*/
+int thrd_detach(thrd_t thr);
+
+/** Compare two thread identifiers.
+* The function determines if two thread identifiers refer to the same thread.
+* @return Zero if the two thread identifiers refer to different threads.
+* Otherwise a nonzero value is returned.
+*/
+int thrd_equal(thrd_t thr0, thrd_t thr1);
+
+/** Terminate execution of the calling thread.
+* @param res Result code of the calling thread.
+*/
+void thrd_exit(int res);
+
+/** Wait for a thread to terminate.
+* The function joins the given thread with the current thread by blocking
+* until the other thread has terminated.
+* @param thr The thread to join with.
+* @param res If this pointer is not NULL, the function will store the result
+* code of the given thread in the integer pointed to by @c res.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int thrd_join(thrd_t thr, int *res);
+
+/** Put the calling thread to sleep.
+* Suspend execution of the calling thread.
+* @param time_point A point in time at which the thread will resume (absolute time).
+* @param remaining If non-NULL, this parameter will hold the remaining time until
+* time_point upon return. This will typically be zero, but if
+* the thread was woken up by a signal that is not ignored before
+* time_point was reached @c remaining will hold a positive
+* time.
+* @return 0 (zero) on successful sleep, or -1 if an interrupt occurred.
+*/
+int thrd_sleep(const struct timespec *time_point, struct timespec *remaining);
+
+/** Yield execution to another thread.
+* Permit other threads to run, even if the current thread would ordinarily
+* continue to run.
+*/
+void thrd_yield(void);
+
+/* Thread local storage */
+#if defined(_TTHREAD_WIN32_)
+typedef DWORD tss_t;
+#else
+typedef pthread_key_t tss_t;
+#endif
+
+/** Destructor function for a thread-specific storage.
+* @param val The value of the destructed thread-specific storage.
+*/
+typedef void (*tss_dtor_t)(void *val);
+
+/** Create a thread-specific storage.
+* @param key The unique key identifier that will be set if the function is
+* successful.
+* @param dtor Destructor function. This can be NULL.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+* @note The destructor function is not supported under Windows. If @c dtor is
+* not NULL when calling this function under Windows, the function will fail
+* and return @ref thrd_error.
+*/
+int tss_create(tss_t *key, tss_dtor_t dtor);
+
+/** Delete a thread-specific storage.
+* The function releases any resources used by the given thread-specific
+* storage.
+* @param key The key that shall be deleted.
+*/
+void tss_delete(tss_t key);
+
+/** Get the value for a thread-specific storage.
+* @param key The thread-specific storage identifier.
+* @return The value for the current thread held in the given thread-specific
+* storage.
+*/
+void *tss_get(tss_t key);
+
+/** Set the value for a thread-specific storage.
+* @param key The thread-specific storage identifier.
+* @param val The value of the thread-specific storage to set for the current
+* thread.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int tss_set(tss_t key, void *val);
+
+
+#endif /* _TINYTHREAD_H_ */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/deps/vs2008/stdint.h b/chromium/third_party/dawn/third_party/glfw/deps/vs2008/stdint.h
new file mode 100644
index 00000000000..d02608a5972
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/deps/vs2008/stdint.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef signed __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef signed __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 signed int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/CMakeLists.txt b/chromium/third_party/dawn/third_party/glfw/docs/CMakeLists.txt
new file mode 100644
index 00000000000..79cad5608b9
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/CMakeLists.txt
@@ -0,0 +1,46 @@
+
+# NOTE: The order of this list determines the order of items in the Guides
+# (i.e. Pages) list in the generated documentation
+set(source_files
+ main.dox
+ news.dox
+ quick.dox
+ moving.dox
+ compile.dox
+ build.dox
+ intro.dox
+ context.dox
+ monitor.dox
+ window.dox
+ input.dox
+ vulkan.dox
+ compat.dox
+ internal.dox)
+
+set(extra_files DoxygenLayout.xml header.html footer.html extra.css spaces.svg)
+
+set(header_paths
+ "${GLFW_SOURCE_DIR}/include/GLFW/glfw3.h"
+ "${GLFW_SOURCE_DIR}/include/GLFW/glfw3native.h")
+
+# Format the source list into a Doxyfile INPUT value that Doxygen can parse
+foreach(path IN LISTS header_paths)
+ string(APPEND GLFW_DOXYGEN_INPUT " \\\n\"${path}\"")
+endforeach()
+foreach(file IN LISTS source_files)
+ string(APPEND GLFW_DOXYGEN_INPUT " \\\n\"${CMAKE_CURRENT_SOURCE_DIR}/${file}\"")
+endforeach()
+
+configure_file(Doxyfile.in Doxyfile @ONLY)
+
+add_custom_command(OUTPUT "html/index.html"
+ COMMAND "${DOXYGEN_EXECUTABLE}"
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
+ MAIN_DEPENDENCY Doxyfile
+ DEPENDS ${header_paths} ${source_files} ${extra_files}
+ COMMENT "Generating HTML documentation"
+ VERBATIM)
+
+add_custom_target(docs ALL SOURCES "html/index.html")
+set_target_properties(docs PROPERTIES FOLDER "GLFW3")
+
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/CONTRIBUTING.md b/chromium/third_party/dawn/third_party/glfw/docs/CONTRIBUTING.md
new file mode 100644
index 00000000000..11ddf09bb3e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/CONTRIBUTING.md
@@ -0,0 +1,391 @@
+# Contribution Guide
+
+## Contents
+
+- [Asking a question](#asking-a-question)
+- [Reporting a bug](#reporting-a-bug)
+ - [Reporting a compile or link bug](#reporting-a-compile-or-link-bug)
+ - [Reporting a segfault or other crash bug](#reporting-a-segfault-or-other-crash-bug)
+ - [Reporting a context creation bug](#reporting-a-context-creation-bug)
+ - [Reporting a monitor or video mode bug](#reporting-a-monitor-or-video-mode-bug)
+ - [Reporting a window, input or event bug](#reporting-a-window-input-or-event-bug)
+ - [Reporting some other library bug](#reporting-some-other-library-bug)
+ - [Reporting a documentation bug](#reporting-a-documentation-bug)
+ - [Reporting a website bug](#reporting-a-website-bug)
+- [Requesting a feature](#requesting-a-feature)
+- [Contributing a bug fix](#contributing-a-bug-fix)
+- [Contributing a feature](#contributing-a-feature)
+
+
+## Asking a question
+
+Questions about how to use GLFW should be asked either in the [support
+section](https://discourse.glfw.org/c/support) of the forum, under the [Stack
+Overflow tag](https://stackoverflow.com/questions/tagged/glfw) or [Game
+Development tag](https://gamedev.stackexchange.com/questions/tagged/glfw) on
+Stack Exchange or in the IRC channel `#glfw` on
+[Libera.Chat](https://libera.chat/).
+
+Questions about the design or implementation of GLFW or about future plans
+should be asked in the [dev section](https://discourse.glfw.org/c/dev) of the
+forum or in the IRC channel. Please don't open a GitHub issue to discuss design
+questions without first checking with a maintainer.
+
+
+## Reporting a bug
+
+If GLFW is behaving unexpectedly at run-time, start by setting an [error
+callback](https://www.glfw.org/docs/latest/intro_guide.html#error_handling).
+GLFW will often tell you the cause of an error via this callback. If it
+doesn't, that might be a separate bug.
+
+If GLFW is crashing or triggering asserts, make sure that all your object
+handles and other pointers are valid.
+
+For bugs where it makes sense, a short, self contained example is absolutely
+invaluable. Just put it inline in the body text. Note that if the bug is
+reproducible with one of the test programs that come with GLFW, just mention
+that instead.
+
+__Don't worry about adding too much information__. Unimportant information can
+be abbreviated or removed later, but missing information can stall bug fixing,
+especially when your schedule doesn't align with that of the maintainer.
+
+__Please provide text as text, not as images__. This includes code, error
+messages and any other text. Text in images cannot be found by other users
+searching for the same problem and may have to be re-typed by maintainers when
+debugging.
+
+You don't need to manually indent your code or other text to quote it with
+GitHub Markdown; just surround it with triple backticks:
+
+ ```
+ Some quoted text.
+ ```
+
+You can also add syntax highlighting by appending the common file extension:
+
+ ```c
+ int five(void)
+ {
+ return 5;
+ }
+ ```
+
+There are issue labels for both platforms and GPU manufacturers, so there is no
+need to mention these in the subject line. If you do, it will be removed when
+the issue is labeled.
+
+If your bug is already reported, please add any new information you have, or if
+it already has everything, give it a :+1:.
+
+
+### Reporting a compile or link bug
+
+__Note:__ GLFW needs many system APIs to do its job, which on some platforms
+means linking to many system libraries. If you are using GLFW as a static
+library, that means your application needs to link to these in addition to GLFW.
+
+__Note:__ Check the [Compiling
+GLFW](https://www.glfw.org/docs/latest/compile.html) guide and or [Building
+applications](https://www.glfw.org/docs/latest/build.html) guide for before
+opening an issue of this kind. Most issues are caused by a missing package or
+linker flag.
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`) and the __compiler name and version__ (e.g. `Visual
+C++ 2015 Update 2`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+Please also include the __complete build log__ from your compiler and linker,
+even if it's long. It can always be shortened later, if necessary.
+
+
+#### Quick template
+
+```
+OS and version:
+Compiler version:
+Release or commit:
+Build log:
+```
+
+
+### Reporting a segfault or other crash bug
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+Please also include any __error messages__ provided to your application via the
+[error
+callback](https://www.glfw.org/docs/latest/intro_guide.html#error_handling) and
+the __full call stack__ of the crash, or if the crash does not occur in debug
+mode, mention that instead.
+
+
+#### Quick template
+
+```
+OS and version:
+Release or commit:
+Error messages:
+Call stack:
+```
+
+
+### Reporting a context creation bug
+
+__Note:__ Windows ships with graphics drivers that do not support OpenGL. If
+GLFW says that your machine lacks support for OpenGL, it very likely does.
+Install drivers from the computer manufacturer or graphics card manufacturer
+([Nvidia](https://www.geforce.com/drivers),
+[AMD](https://www.amd.com/en/support),
+[Intel](https://www-ssl.intel.com/content/www/us/en/support/detect.html)) to
+fix this.
+
+__Note:__ AMD only supports OpenGL ES on Windows via EGL. See the
+[GLFW\_CONTEXT\_CREATION\_API](https://www.glfw.org/docs/latest/window_guide.html#window_hints_ctx)
+hint for how to select EGL.
+
+Please verify that context creation also fails with the `glfwinfo` tool before
+reporting it as a bug. This tool is included in the GLFW source tree as
+`tests/glfwinfo.c` and is built along with the library. It has switches for all
+GLFW context and framebuffer hints. Run `glfwinfo -h` for a complete list.
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+If you are running your program in a virtual machine, please mention this and
+include the __VM name and version__ (e.g. `VirtualBox 5.1`).
+
+Please also include the __GLFW version string__ (`3.2.0 X11 EGL clock_gettime
+/dev/js`), as described
+[here](https://www.glfw.org/docs/latest/intro.html#intro_version_string), the
+__GPU model and driver version__ (e.g. `GeForce GTX660 with 352.79`), and the
+__output of `glfwinfo`__ (with switches matching any hints you set in your
+code) when reporting this kind of bug. If this tool doesn't run on the machine,
+mention that instead.
+
+
+#### Quick template
+
+```
+OS and version:
+GPU and driver:
+Release or commit:
+Version string:
+glfwinfo output:
+```
+
+
+### Reporting a monitor or video mode bug
+
+__Note:__ On headless systems on some platforms, no monitors are reported. This
+causes glfwGetPrimaryMonitor to return `NULL`, which not all applications are
+prepared for.
+
+__Note:__ Some third-party tools report more video modes than are approved of
+by the OS. For safety and compatibility, GLFW only reports video modes the OS
+wants programs to use. This is not a bug.
+
+The `monitors` tool is included in the GLFW source tree as `tests/monitors.c`
+and is built along with the library. It lists all information GLFW provides
+about monitors it detects.
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+If you are running your program in a virtual machine, please mention this and
+include the __VM name and version__ (e.g. `VirtualBox 5.1`).
+
+Please also include any __error messages__ provided to your application via the
+[error
+callback](https://www.glfw.org/docs/latest/intro_guide.html#error_handling) and
+the __output of `monitors`__ when reporting this kind of bug. If this tool
+doesn't run on the machine, mention this instead.
+
+
+#### Quick template
+
+```
+OS and version:
+Release or commit:
+Error messages:
+monitors output:
+```
+
+
+### Reporting a window, input or event bug
+
+__Note:__ The exact ordering of related window events will sometimes differ.
+
+__Note:__ Window moving and resizing (by the user) will block the main thread on
+some platforms. This is not a bug. Set a [refresh
+callback](https://www.glfw.org/docs/latest/window.html#window_refresh) if you
+want to keep the window contents updated during a move or size operation.
+
+The `events` tool is included in the GLFW source tree as `tests/events.c` and is
+built along with the library. It prints all information provided to every
+callback supported by GLFW as events occur. Each event is listed with the time
+and a unique number to make discussions about event logs easier. The tool has
+command-line options for creating multiple windows and full screen windows.
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+If you are running your program in a virtual machine, please mention this and
+include the __VM name and version__ (e.g. `VirtualBox 5.1`).
+
+Please also include any __error messages__ provided to your application via the
+[error
+callback](https://www.glfw.org/docs/latest/intro_guide.html#error_handling) and
+if relevant, the __output of `events`__ when reporting this kind of bug. If
+this tool doesn't run on the machine, mention this instead.
+
+__X11:__ If possible, please include what desktop environment (e.g. GNOME,
+Unity, KDE) and/or window manager (e.g. Openbox, dwm, Window Maker) you are
+running. If the bug is related to keyboard input, please include any input
+method (e.g. ibus, SCIM) you are using.
+
+
+#### Quick template
+
+```
+OS and version:
+Release or commit:
+Error messages:
+events output:
+```
+
+
+### Reporting some other library bug
+
+Always include the __operating system name and version__ (e.g. `Windows
+7 64-bit` or `Ubuntu 15.10`). If you are using an official release of GLFW,
+include the __GLFW release version__ (e.g. `3.1.2`), otherwise include the
+__GLFW commit ID__ (e.g. `3795d78b14ef06008889cc422a1fb8d642597751`) from Git.
+
+Please also include any __error messages__ provided to your application via the
+[error
+callback](https://www.glfw.org/docs/latest/intro_guide.html#error_handling), if
+relevant.
+
+
+#### Quick template
+
+```
+OS and version:
+Release or commit:
+Error messages:
+```
+
+
+### Reporting a documentation bug
+
+If you found a bug in the documentation, including this file, then it's fine to
+just link to that web page or mention that source file. You don't need to match
+the source to the output or vice versa.
+
+
+### Reporting a website bug
+
+If the bug is in the documentation (anything under `/docs/`) then please see the
+section above. Bugs in the rest of the site are reported to the [website
+source repository](https://github.com/glfw/website/issues).
+
+
+## Requesting a feature
+
+Please explain why you need the feature and how you intend to use it. If you
+have a specific API design in mind, please add that as well. If you have or are
+planning to write code for the feature, see the section below.
+
+If there already is a request for the feature you need, add your specific use
+case unless it is already mentioned. If it is, give it a :+1:.
+
+
+## Contributing a bug fix
+
+__Note:__ You must have all necessary [intellectual
+property rights](https://en.wikipedia.org/wiki/Intellectual_property) to any
+code you contribute. If you did not write the code yourself, you must explain
+where it came from and under what license you received it. Even code using the
+same license as GLFW may not be copied without attribution.
+
+__There is no preferred patch size__. A one character fix is just as welcome as
+a thousand line one, if that is the appropriate size for the fix.
+
+In addition to the code, a complete bug fix includes:
+
+- Change log entry in `README.md`, describing the incorrect behavior
+- Credits entries for all authors of the bug fix
+
+Bug fixes will not be rejected because they don't include all the above parts,
+but please keep in mind that maintainer time is finite and that there are many
+other bugs and features to work on.
+
+If the patch fixes a bug introduced after the last release, it should not get
+a change log entry.
+
+If you haven't already, read the excellent article [How to Write a Git Commit
+Message](https://chris.beams.io/posts/git-commit/).
+
+
+## Contributing a feature
+
+__Note:__ You must have all necessary rights to any code you contribute. If you
+did not write the code yourself, you must explain where it came from and under
+what license. Even code using the same license as GLFW may not be copied
+without attribution.
+
+__Note:__ If you haven't already implemented the feature, check first if there
+already is an open issue for it and if it's already being developed in an
+[experimental branch](https://github.com/glfw/glfw/branches/all).
+
+__There is no preferred patch size__. A one character change is just as welcome
+as one adding a thousand line one, if that is the appropriate size for the
+feature.
+
+In addition to the code, a complete feature includes:
+
+- Change log entry in `README.md`, listing all new symbols
+- News page entry, briefly describing the feature
+- Guide documentation, with minimal examples, in the relevant guide
+- Reference documentation, with all applicable tags
+- Cross-references and mentions in appropriate places
+- Credits entries for all authors of the feature
+
+If the feature requires platform-specific code, at minimum stubs must be added
+for the new platform function to all supported and experimental platforms.
+
+If it adds a new callback, support for it must be added to `tests/event.c`.
+
+If it adds a new monitor property, support for it must be added to
+`tests/monitor.c`.
+
+If it adds a new OpenGL, OpenGL ES or Vulkan option or extension, support
+for it must be added to `tests/glfwinfo.c` and the behavior of the library when
+the extension is missing documented in `docs/compat.dox`.
+
+If you haven't already, read the excellent article [How to Write a Git Commit
+Message](https://chris.beams.io/posts/git-commit/).
+
+Features will not be rejected because they don't include all the above parts,
+but please keep in mind that maintainer time is finite and that there are many
+other features and bugs to work on.
+
+Please also keep in mind that any part of the public API that has been included
+in a release cannot be changed until the next _major_ version. Features can be
+added and existing parts can sometimes be overloaded (in the general sense of
+doing more things, not in the C++ sense), but code written to the API of one
+minor release should both compile and run on subsequent minor releases.
+
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/Doxyfile.in b/chromium/third_party/dawn/third_party/glfw/docs/Doxyfile.in
new file mode 100644
index 00000000000..812eec5d687
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/Doxyfile.in
@@ -0,0 +1,2465 @@
+# Doxyfile 1.8.18
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "GLFW"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER = @GLFW_VERSION@
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF = "A multi-platform library for OpenGL, window and input"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = "@GLFW_BINARY_DIR@/docs"
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = NO
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = NO
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines (in the resulting output). You can put ^^ in the value part of an
+# alias to insert a newline as if a physical newline was in the original file.
+# When you need a literal { or } or , in the value part of an alias you have to
+# escape them by means of a backslash (\), this can lead to conflicts with the
+# commands \{ and \} for these it is advised to use the version @{ and @} or use
+# a double escape (\\{ and \\})
+
+ALIASES = "thread_safety=@par Thread safety^^" \
+ "pointer_lifetime=@par Pointer lifetime^^" \
+ "analysis=@par Analysis^^" \
+ "reentrancy=@par Reentrancy^^" \
+ "errors=@par Errors^^" \
+ "callback_signature=@par Callback signature^^" \
+ "glfw3=__GLFW 3:__" \
+ "x11=__X11:__" \
+ "wayland=__Wayland:__" \
+ "win32=__Windows:__" \
+ "macos=__macOS:__" \
+ "linux=__Linux:__"
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
+# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 5
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# (including Cygwin) ands Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = NO
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = YES
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE = "@GLFW_SOURCE_DIR@/docs/DoxygenLayout.xml"
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation. If
+# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = YES
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE = "@GLFW_BINARY_DIR@/docs/warnings.txt"
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = @GLFW_DOXYGEN_INPUT@
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
+# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
+# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
+# *.vhdl, *.ucf, *.qsf and *.ice.
+
+FILE_PATTERNS = *.h *.dox
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS = APIENTRY GLFWAPI
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH = "@GLFW_SOURCE_DIR@/examples"
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# entity all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see https://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX = glfw GLFW_
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER = "@GLFW_SOURCE_DIR@/docs/header.html"
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER = "@GLFW_SOURCE_DIR@/docs/footer.html"
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET = "@GLFW_SOURCE_DIR@/docs/extra.css"
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES = "@GLFW_SOURCE_DIR@/docs/spaces.svg"
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: https://developer.apple.com/xcode/), introduced with OSX
+# 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 300
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png The default and svg Looks nicer but requires the
+# pdf2svg tool.
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT = png
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE =
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from https://www.mathjax.org before deployment.
+# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: https://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: https://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when not enabling USE_PDFLATEX the default is latex when enabling
+# USE_PDFLATEX the default is pdflatex and when in the later case latex is
+# chosen this is overwritten by pdflatex. For specific output languages the
+# default can have been set differently, this depends on the implementation of
+# the output language.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME =
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# Note: This tag is used in the Makefile / make.bat.
+# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
+# (.tex).
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
+# Note: This tag is used in the generated output file (.tex).
+# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
+# The default value is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_MAKEINDEX_CMD = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
+# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
+# path from which the emoji images will be read. If a relative path is entered,
+# it will be relative to the LATEX_OUTPUT directory. If left blank the
+# LATEX_OUTPUT directory will be used.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EMOJI_DIRECTORY =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# configuration file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's configuration file. A template extensions file can be
+# generated using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
+# namespace members in file scope as well, matching the HTML output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_NS_MEMB_FILE_SCOPE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# the structure of the code including all documentation. Note that this feature
+# is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED = GLFWAPI= \
+ GLFW_EXPOSE_NATIVE_WIN32 \
+ GLFW_EXPOSE_NATIVE_WGL \
+ GLFW_EXPOSE_NATIVE_X11 \
+ GLFW_EXPOSE_NATIVE_WAYLAND \
+ GLFW_EXPOSE_NATIVE_GLX \
+ GLFW_EXPOSE_NATIVE_COCOA \
+ GLFW_EXPOSE_NATIVE_NSGL \
+ GLFW_EXPOSE_NATIVE_EGL \
+ GLFW_EXPOSE_NATIVE_OSMESA \
+ VK_VERSION_1_0
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/DoxygenLayout.xml b/chromium/third_party/dawn/third_party/glfw/docs/DoxygenLayout.xml
new file mode 100644
index 00000000000..ab971721860
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/DoxygenLayout.xml
@@ -0,0 +1,71 @@
+<doxygenlayout version="1.0">
+ <!-- Generated by doxygen 1.8.14 -->
+ <!-- Navigation index tabs for HTML output -->
+ <navindex>
+ <tab type="mainpage" visible="yes" title="Introduction"/>
+ <tab type="user" url="quick_guide.html" title="Tutorial"/>
+ <tab type="pages" visible="yes" title="Guides" intro=""/>
+ <tab type="modules" visible="yes" title="Reference" intro=""/>
+ <tab type="filelist" visible="yes" title="Files"/>
+ </navindex>
+
+ <!-- Layout definition for a file page -->
+ <file>
+ <detaileddescription title="Description"/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <sourcelink visible="yes"/>
+ <memberdecl>
+ <constantgroups visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <memberdef>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection/>
+ </file>
+
+ <!-- Layout definition for a group page -->
+ <group>
+ <detaileddescription title="Description"/>
+ <memberdecl>
+ <nestedgroups visible="yes" title=""/>
+ <dirs visible="yes" title=""/>
+ <files visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdecl>
+ <memberdef>
+ <pagedocs/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </group>
+
+ <!-- Layout definition for a directory page -->
+ <directory>
+ <briefdescription visible="yes"/>
+ <memberdecl>
+ <dirs visible="yes"/>
+ <files visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ </directory>
+</doxygenlayout>
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/SUPPORT.md b/chromium/third_party/dawn/third_party/glfw/docs/SUPPORT.md
new file mode 100644
index 00000000000..79a45a8ff14
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/SUPPORT.md
@@ -0,0 +1,14 @@
+# Support resources
+
+See the [latest documentation](https://www.glfw.org/docs/latest/) for tutorials,
+guides and the API reference.
+
+If you have questions about using GLFW, we have a
+[forum](https://discourse.glfw.org/), and the `#glfw` IRC channel on
+[Libera.Chat](https://libera.chat/).
+
+Bugs are reported to our [issue tracker](https://github.com/glfw/glfw/issues).
+Please check the [contribution
+guide](https://github.com/glfw/glfw/blob/master/docs/CONTRIBUTING.md) for
+information on what to include when reporting a bug.
+
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/build.dox b/chromium/third_party/dawn/third_party/glfw/docs/build.dox
new file mode 100644
index 00000000000..aa06b0f48ef
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/build.dox
@@ -0,0 +1,338 @@
+/*!
+
+@page build_guide Building applications
+
+@tableofcontents
+
+This is about compiling and linking applications that use GLFW. For information on
+how to write such applications, start with the
+[introductory tutorial](@ref quick_guide). For information on how to compile
+the GLFW library itself, see @ref compile_guide.
+
+This is not a tutorial on compilation or linking. It assumes basic
+understanding of how to compile and link a C program as well as how to use the
+specific compiler of your chosen development environment. The compilation
+and linking process should be explained in your C programming material and in
+the documentation for your development environment.
+
+
+@section build_include Including the GLFW header file
+
+You should include the GLFW header in the source files where you use OpenGL or
+GLFW.
+
+@code
+#include <GLFW/glfw3.h>
+@endcode
+
+This header defines all the constants and declares all the types and function
+prototypes of the GLFW API. By default it also includes the OpenGL header from
+your development environment. See [option macros](@ref build_macros) below for
+how to select OpenGL ES headers and more.
+
+The GLFW header also defines any platform-specific macros needed by your OpenGL
+header, so that it can be included without needing any window system headers.
+
+It does this only when needed, so if window system headers are included, the
+GLFW header does not try to redefine those symbols. The reverse is not true,
+i.e. `windows.h` cannot cope if any Win32 symbols have already been defined.
+
+In other words:
+
+ - Use the GLFW header to include OpenGL or OpenGL ES headers portably
+ - Do not include window system headers unless you will use those APIs directly
+ - If you do need such headers, include them before the GLFW header
+
+If you are using an OpenGL extension loading library such as
+[glad](https://github.com/Dav1dde/glad), the extension loader header should
+be included before the GLFW one. GLFW attempts to detect any OpenGL or OpenGL
+ES header or extension loader header included before it and will then disable
+the inclusion of the default OpenGL header. Most extension loaders also define
+macros that disable similar headers below it.
+
+@code
+#include <glad/gl.h>
+#include <GLFW/glfw3.h>
+@endcode
+
+Both of these mechanisms depend on the extension loader header defining a known
+macro. If yours doesn't or you don't know which one your users will pick, the
+@ref GLFW_INCLUDE_NONE macro will explicitly to prevent the GLFW header from
+including the OpenGL header. This will also allow you to include the two
+headers in any order.
+
+@code
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+#include <glad/gl.h>
+@endcode
+
+
+@subsection build_macros GLFW header option macros
+
+These macros may be defined before the inclusion of the GLFW header and affect
+its behavior.
+
+@anchor GLFW_DLL
+__GLFW_DLL__ is required on Windows when using the GLFW DLL, to tell the
+compiler that the GLFW functions are defined in a DLL.
+
+The following macros control which OpenGL or OpenGL ES API header is included.
+Only one of these may be defined at a time.
+
+@note GLFW does not provide any of the API headers mentioned below. They are
+provided by your development environment or your OpenGL, OpenGL ES or Vulkan
+SDK, and most of them can be downloaded from the
+[Khronos Registry](https://www.khronos.org/registry/).
+
+@anchor GLFW_INCLUDE_GLCOREARB
+__GLFW_INCLUDE_GLCOREARB__ makes the GLFW header include the modern
+`GL/glcorearb.h` header (`OpenGL/gl3.h` on macOS) instead of the regular OpenGL
+header.
+
+@anchor GLFW_INCLUDE_ES1
+__GLFW_INCLUDE_ES1__ makes the GLFW header include the OpenGL ES 1.x `GLES/gl.h`
+header instead of the regular OpenGL header.
+
+@anchor GLFW_INCLUDE_ES2
+__GLFW_INCLUDE_ES2__ makes the GLFW header include the OpenGL ES 2.0
+`GLES2/gl2.h` header instead of the regular OpenGL header.
+
+@anchor GLFW_INCLUDE_ES3
+__GLFW_INCLUDE_ES3__ makes the GLFW header include the OpenGL ES 3.0
+`GLES3/gl3.h` header instead of the regular OpenGL header.
+
+@anchor GLFW_INCLUDE_ES31
+__GLFW_INCLUDE_ES31__ makes the GLFW header include the OpenGL ES 3.1
+`GLES3/gl31.h` header instead of the regular OpenGL header.
+
+@anchor GLFW_INCLUDE_ES32
+__GLFW_INCLUDE_ES32__ makes the GLFW header include the OpenGL ES 3.2
+`GLES3/gl32.h` header instead of the regular OpenGL header.
+
+@anchor GLFW_INCLUDE_NONE
+__GLFW_INCLUDE_NONE__ makes the GLFW header not include any OpenGL or OpenGL ES
+API header. This is useful in combination with an extension loading library.
+
+If none of the above inclusion macros are defined, the standard OpenGL `GL/gl.h`
+header (`OpenGL/gl.h` on macOS) is included, unless GLFW detects the inclusion
+guards of any OpenGL, OpenGL ES or extension loader header it knows about.
+
+The following macros control the inclusion of additional API headers. Any
+number of these may be defined simultaneously, and/or together with one of the
+above macros.
+
+@anchor GLFW_INCLUDE_VULKAN
+__GLFW_INCLUDE_VULKAN__ makes the GLFW header include the Vulkan
+`vulkan/vulkan.h` header in addition to any selected OpenGL or OpenGL ES header.
+
+@anchor GLFW_INCLUDE_GLEXT
+__GLFW_INCLUDE_GLEXT__ makes the GLFW header include the appropriate extension
+header for the OpenGL or OpenGL ES header selected above after and in addition
+to that header.
+
+@anchor GLFW_INCLUDE_GLU
+__GLFW_INCLUDE_GLU__ makes the header include the GLU header in addition to the
+header selected above. This should only be used with the standard OpenGL header
+and only for compatibility with legacy code. GLU has been deprecated and should
+not be used in new code.
+
+@note None of these macros may be defined during the compilation of GLFW itself.
+If your build includes GLFW and you define any these in your build files, make
+sure they are not applied to the GLFW sources.
+
+
+@section build_link Link with the right libraries
+
+GLFW is essentially a wrapper of various platform-specific APIs and therefore
+needs to link against many different system libraries. If you are using GLFW as
+a shared library / dynamic library / DLL then it takes care of these links.
+However, if you are using GLFW as a static library then your executable will
+need to link against these libraries.
+
+On Windows and macOS, the list of system libraries is static and can be
+hard-coded into your build environment. See the section for your development
+environment below. On Linux and other Unix-like operating systems, the list
+varies but can be retrieved in various ways as described below.
+
+A good general introduction to linking is
+[Beginner's Guide to Linkers](https://www.lurklurk.org/linkers/linkers.html) by
+David Drysdale.
+
+
+@subsection build_link_win32 With MinGW or Visual C++ on Windows
+
+The static version of the GLFW library is named `glfw3`. When using this
+version, it is also necessary to link with some libraries that GLFW uses.
+
+When using MinGW to link an application with the static version of GLFW, you
+must also explicitly link with `gdi32`. Other toolchains including MinGW-w64
+include it in the set of default libraries along with other dependencies like
+`user32` and `kernel32`.
+
+The link library for the GLFW DLL is named `glfw3dll`. When compiling an
+application that uses the DLL version of GLFW, you need to define the @ref
+GLFW_DLL macro _before_ any inclusion of the GLFW header. This can be done
+either with a compiler switch or by defining it in your source code.
+
+
+@subsection build_link_cmake_source With CMake and GLFW source
+
+This section is about using CMake to compile and link GLFW along with your
+application. If you want to use an installed binary instead, see @ref
+build_link_cmake_package.
+
+With a few changes to your `CMakeLists.txt` you can have the GLFW source tree
+built along with your application.
+
+Add the root directory of the GLFW source tree to your project. This will add
+the `glfw` target to your project.
+
+@code{.cmake}
+add_subdirectory(path/to/glfw)
+@endcode
+
+Once GLFW has been added, link your application against the `glfw` target.
+This adds the GLFW library and its link-time dependencies as it is currently
+configured, the include directory for the GLFW header and, when applicable, the
+@ref GLFW_DLL macro.
+
+@code{.cmake}
+target_link_libraries(myapp glfw)
+@endcode
+
+Note that the `glfw` target does not depend on OpenGL, as GLFW loads any OpenGL,
+OpenGL ES or Vulkan libraries it needs at runtime. If your application calls
+OpenGL directly, instead of using a modern
+[extension loader library](@ref context_glext_auto), use the OpenGL CMake
+package.
+
+@code{.cmake}
+find_package(OpenGL REQUIRED)
+@endcode
+
+If OpenGL is found, the `OpenGL::GL` target is added to your project, containing
+library and include directory paths. Link against this like any other library.
+
+@code{.cmake}
+target_link_libraries(myapp OpenGL::GL)
+@endcode
+
+For a minimal example of a program and GLFW sources built with CMake, see the
+[GLFW CMake Starter](https://github.com/juliettef/GLFW-CMake-starter) on GitHub.
+
+
+@subsection build_link_cmake_package With CMake and installed GLFW binaries
+
+This section is about using CMake to link GLFW after it has been built and
+installed. If you want to build it along with your application instead, see
+@ref build_link_cmake_source.
+
+With a few changes to your `CMakeLists.txt` you can locate the package and
+target files generated when GLFW is installed.
+
+@code{.cmake}
+find_package(glfw3 3.4 REQUIRED)
+@endcode
+
+Once GLFW has been added to the project, link against it with the `glfw` target.
+This adds the GLFW library and its link-time dependencies, the include directory
+for the GLFW header and, when applicable, the @ref GLFW_DLL macro.
+
+@code{.cmake}
+target_link_libraries(myapp glfw)
+@endcode
+
+Note that the `glfw` target does not depend on OpenGL, as GLFW loads any OpenGL,
+OpenGL ES or Vulkan libraries it needs at runtime. If your application calls
+OpenGL directly, instead of using a modern
+[extension loader library](@ref context_glext_auto), use the OpenGL CMake
+package.
+
+@code{.cmake}
+find_package(OpenGL REQUIRED)
+@endcode
+
+If OpenGL is found, the `OpenGL::GL` target is added to your project, containing
+library and include directory paths. Link against this like any other library.
+
+@code{.cmake}
+target_link_libraries(myapp OpenGL::GL)
+@endcode
+
+
+@subsection build_link_pkgconfig With makefiles and pkg-config on Unix
+
+GLFW supports [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/),
+and the `glfw3.pc` pkg-config file is generated when the GLFW library is built
+and is installed along with it. A pkg-config file describes all necessary
+compile-time and link-time flags and dependencies needed to use a library. When
+they are updated or if they differ between systems, you will get the correct
+ones automatically.
+
+A typical compile and link command-line when using the static version of the
+GLFW library may look like this:
+
+@code{.sh}
+cc $(pkg-config --cflags glfw3) -o myprog myprog.c $(pkg-config --static --libs glfw3)
+@endcode
+
+If you are using the shared version of the GLFW library, omit the `--static`
+flag.
+
+@code{.sh}
+cc $(pkg-config --cflags glfw3) -o myprog myprog.c $(pkg-config --libs glfw3)
+@endcode
+
+You can also use the `glfw3.pc` file without installing it first, by using the
+`PKG_CONFIG_PATH` environment variable.
+
+@code{.sh}
+env PKG_CONFIG_PATH=path/to/glfw/src cc $(pkg-config --cflags glfw3) -o myprog myprog.c $(pkg-config --libs glfw3)
+@endcode
+
+The dependencies do not include OpenGL, as GLFW loads any OpenGL, OpenGL ES or
+Vulkan libraries it needs at runtime. If your application calls OpenGL
+directly, instead of using a modern
+[extension loader library](@ref context_glext_auto), you should add the `gl`
+pkg-config package.
+
+@code{.sh}
+cc $(pkg-config --cflags glfw3 gl) -o myprog myprog.c $(pkg-config --libs glfw3 gl)
+@endcode
+
+
+@subsection build_link_xcode With Xcode on macOS
+
+If you are using the dynamic library version of GLFW, add it to the project
+dependencies.
+
+If you are using the static library version of GLFW, add it and the Cocoa,
+OpenGL and IOKit frameworks to the project as dependencies. They can all be
+found in `/System/Library/Frameworks`.
+
+
+@subsection build_link_osx With command-line on macOS
+
+It is recommended that you use [pkg-config](@ref build_link_pkgconfig) when
+building from the command line on macOS. That way you will get any new
+dependencies added automatically. If you still wish to build manually, you need
+to add the required frameworks and libraries to your command-line yourself using
+the `-l` and `-framework` switches.
+
+If you are using the dynamic GLFW library, which is named `libglfw.3.dylib`, do:
+
+@code{.sh}
+cc -o myprog myprog.c -lglfw -framework Cocoa -framework OpenGL -framework IOKit
+@endcode
+
+If you are using the static library, named `libglfw3.a`, substitute `-lglfw3`
+for `-lglfw`.
+
+Note that you do not add the `.framework` extension to a framework when linking
+against it from the command-line.
+
+@note Your machine may have `libGL.*.dylib` style OpenGL library, but that is
+for the X Window System and will not work with the macOS native version of GLFW.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/compat.dox b/chromium/third_party/dawn/third_party/glfw/docs/compat.dox
new file mode 100644
index 00000000000..989c4c19bfc
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/compat.dox
@@ -0,0 +1,284 @@
+/*!
+
+@page compat_guide Standards conformance
+
+@tableofcontents
+
+This guide describes the various API extensions used by this version of GLFW.
+It lists what are essentially implementation details, but which are nonetheless
+vital knowledge for developers intending to deploy their applications on a wide
+range of machines.
+
+The information in this guide is not a part of GLFW API, but merely
+preconditions for some parts of the library to function on a given machine. Any
+part of this information may change in future versions of GLFW and that will not
+be considered a breaking API change.
+
+
+@section compat_x11 X11 extensions, protocols and IPC standards
+
+As GLFW uses Xlib directly, without any intervening toolkit
+library, it has sole responsibility for interacting well with the many and
+varied window managers in use on Unix-like systems. In order for applications
+and window managers to work well together, a number of standards and
+conventions have been developed that regulate behavior outside the scope of the
+X11 API; most importantly the
+[Inter-Client Communication Conventions Manual](https://www.tronche.com/gui/x/icccm/)
+(ICCCM) and
+[Extended Window Manager Hints](https://standards.freedesktop.org/wm-spec/wm-spec-latest.html)
+(EWMH) standards.
+
+GLFW uses the `_MOTIF_WM_HINTS` window property to support borderless windows.
+If the running window manager does not support this property, the
+`GLFW_DECORATED` hint will have no effect.
+
+GLFW uses the ICCCM `WM_DELETE_WINDOW` protocol to intercept the user
+attempting to close the GLFW window. If the running window manager does not
+support this protocol, the close callback will never be called.
+
+GLFW uses the EWMH `_NET_WM_PING` protocol, allowing the window manager notify
+the user when the application has stopped responding, i.e. when it has ceased to
+process events. If the running window manager does not support this protocol,
+the user will not be notified if the application locks up.
+
+GLFW uses the EWMH `_NET_WM_STATE_FULLSCREEN` window state to tell the window
+manager to make the GLFW window full screen. If the running window manager does
+not support this state, full screen windows may not work properly. GLFW has
+a fallback code path in case this state is unavailable, but every window manager
+behaves slightly differently in this regard.
+
+GLFW uses the EWMH `_NET_WM_BYPASS_COMPOSITOR` window property to tell a
+compositing window manager to un-redirect full screen GLFW windows. If the
+running window manager uses compositing but does not support this property then
+additional copying may be performed for each buffer swap of full screen windows.
+
+GLFW uses the
+[clipboard manager protocol](https://www.freedesktop.org/wiki/ClipboardManager/)
+to push a clipboard string (i.e. selection) owned by a GLFW window about to be
+destroyed to the clipboard manager. If there is no running clipboard manager,
+the clipboard string will be unavailable once the window has been destroyed.
+
+GLFW uses the
+[X drag-and-drop protocol](https://www.freedesktop.org/wiki/Specifications/XDND/)
+to provide file drop events. If the application originating the drag does not
+support this protocol, drag and drop will not work.
+
+GLFW uses the XRandR 1.3 extension to provide multi-monitor support. If the
+running X server does not support this version of this extension, multi-monitor
+support will not function and only a single, desktop-spanning monitor will be
+reported.
+
+GLFW uses the XRandR 1.3 and Xf86vidmode extensions to provide gamma ramp
+support. If the running X server does not support either or both of these
+extensions, gamma ramp support will not function.
+
+GLFW uses the Xkb extension and detectable auto-repeat to provide keyboard
+input. If the running X server does not support this extension, a non-Xkb
+fallback path is used.
+
+GLFW uses the XInput2 extension to provide raw, non-accelerated mouse motion
+when the cursor is disabled. If the running X server does not support this
+extension, regular accelerated mouse motion will be used.
+
+GLFW uses both the XRender extension and the compositing manager to support
+transparent window framebuffers. If the running X server does not support this
+extension or there is no running compositing manager, the
+`GLFW_TRANSPARENT_FRAMEBUFFER` framebuffer hint will have no effect.
+
+GLFW uses both the Xcursor extension and the freedesktop cursor conventions to
+provide an expanded set of standard cursor shapes. If the running X server does
+not support this extension or the current cursor theme does not support the
+conventions, the `GLFW_RESIZE_NWSE_CURSOR`, `GLFW_RESIZE_NESW_CURSOR` and
+`GLFW_NOT_ALLOWED_CURSOR` shapes will not be available and other shapes may use
+legacy images.
+
+
+@section compat_wayland Wayland protocols and IPC standards
+
+As GLFW uses libwayland directly, without any intervening toolkit library, it
+has sole responsibility for interacting well with every compositor in use on
+Unix-like systems. Most of the features are provided by the core protocol,
+while cursor support is provided by the libwayland-cursor helper library, EGL
+integration by libwayland-egl, and keyboard handling by
+[libxkbcommon](https://xkbcommon.org/). In addition, GLFW uses some protocols
+from wayland-protocols to provide additional features if the compositor
+supports them.
+
+GLFW uses xkbcommon 0.5.0 to provide key and text input support. Earlier
+versions are not supported.
+
+GLFW uses the [xdg-shell
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/stable/xdg-shell/xdg-shell.xml)
+to provide better window management. This protocol is part of
+wayland-protocols 1.12, and is mandatory for GLFW to display a window.
+
+GLFW uses the [relative pointer
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/unstable/relative-pointer/relative-pointer-unstable-v1.xml)
+alongside the [pointer constraints
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/unstable/pointer-constraints/pointer-constraints-unstable-v1.xml)
+to implement disabled cursor. These two protocols are part of
+wayland-protocols 1.1, and mandatory at build time. If the running compositor
+does not support both of these protocols, disabling the cursor will have no
+effect.
+
+GLFW uses the [idle inhibit
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/unstable/idle-inhibit/idle-inhibit-unstable-v1.xml)
+to prohibit the screensaver from starting. This protocol is part of
+wayland-protocols 1.6, and mandatory at build time. If the running compositor
+does not support this protocol, the screensaver may start even for full screen
+windows.
+
+GLFW uses the [xdg-decoration
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/unstable/xdg-decoration/xdg-decoration-unstable-v1.xml)
+to request decorations to be drawn around its windows. This protocol is part
+of wayland-protocols 1.15, and mandatory at build time. If the running
+compositor does not support this protocol, a very simple frame will be drawn by
+GLFW itself, using the [viewporter
+protocol](https://cgit.freedesktop.org/wayland/wayland-protocols/tree/stable/viewporter/viewporter.xml)
+alongside
+[subsurfaces](https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml#n2598).
+This protocol is part of wayland-protocols 1.4, and mandatory at build time.
+If the running compositor does not support this protocol either, no decorations
+will be drawn around windows.
+
+
+@section compat_glx GLX extensions
+
+The GLX API is the default API used to create OpenGL contexts on Unix-like
+systems using the X Window System.
+
+GLFW uses the GLX 1.3 `GLXFBConfig` functions to enumerate and select framebuffer pixel
+formats. If GLX 1.3 is not supported, @ref glfwInit will fail.
+
+GLFW uses the `GLX_MESA_swap_control,` `GLX_EXT_swap_control` and
+`GLX_SGI_swap_control` extensions to provide vertical retrace synchronization
+(or _vsync_), in that order of preference. Where none of these extension are
+available, calling @ref glfwSwapInterval will have no effect.
+
+GLFW uses the `GLX_ARB_multisample` extension to create contexts with
+multisampling anti-aliasing. Where this extension is unavailable, the
+`GLFW_SAMPLES` hint will have no effect.
+
+GLFW uses the `GLX_ARB_create_context` extension when available, even when
+creating OpenGL contexts of version 2.1 and below. Where this extension is
+unavailable, the `GLFW_CONTEXT_VERSION_MAJOR` and `GLFW_CONTEXT_VERSION_MINOR`
+hints will only be partially supported, the `GLFW_CONTEXT_DEBUG` hint will have
+no effect, and setting the `GLFW_OPENGL_PROFILE` or `GLFW_OPENGL_FORWARD_COMPAT`
+hints to `GLFW_TRUE` will cause @ref glfwCreateWindow to fail.
+
+GLFW uses the `GLX_ARB_create_context_profile` extension to provide support for
+context profiles. Where this extension is unavailable, setting the
+`GLFW_OPENGL_PROFILE` hint to anything but `GLFW_OPENGL_ANY_PROFILE`, or setting
+`GLFW_CLIENT_API` to anything but `GLFW_OPENGL_API` or `GLFW_NO_API` will cause
+@ref glfwCreateWindow to fail.
+
+GLFW uses the `GLX_ARB_context_flush_control` extension to provide control over
+whether a context is flushed when it is released (made non-current). Where this
+extension is unavailable, the `GLFW_CONTEXT_RELEASE_BEHAVIOR` hint will have no
+effect and the context will always be flushed when released.
+
+GLFW uses the `GLX_ARB_framebuffer_sRGB` and `GLX_EXT_framebuffer_sRGB`
+extensions to provide support for sRGB framebuffers. Where both of these
+extensions are unavailable, the `GLFW_SRGB_CAPABLE` hint will have no effect.
+
+
+@section compat_wgl WGL extensions
+
+The WGL API is used to create OpenGL contexts on Microsoft Windows and other
+implementations of the Win32 API, such as Wine.
+
+GLFW uses either the `WGL_EXT_extension_string` or the
+`WGL_ARB_extension_string` extension to check for the presence of all other WGL
+extensions listed below. If both are available, the EXT one is preferred. If
+neither is available, no other extensions are used and many GLFW features
+related to context creation will have no effect or cause errors when used.
+
+GLFW uses the `WGL_EXT_swap_control` extension to provide vertical retrace
+synchronization (or _vsync_). Where this extension is unavailable, calling @ref
+glfwSwapInterval will have no effect.
+
+GLFW uses the `WGL_ARB_pixel_format` and `WGL_ARB_multisample` extensions to
+create contexts with multisampling anti-aliasing. Where these extensions are
+unavailable, the `GLFW_SAMPLES` hint will have no effect.
+
+GLFW uses the `WGL_ARB_create_context` extension when available, even when
+creating OpenGL contexts of version 2.1 and below. Where this extension is
+unavailable, the `GLFW_CONTEXT_VERSION_MAJOR` and `GLFW_CONTEXT_VERSION_MINOR`
+hints will only be partially supported, the `GLFW_CONTEXT_DEBUG` hint will have
+no effect, and setting the `GLFW_OPENGL_PROFILE` or `GLFW_OPENGL_FORWARD_COMPAT`
+hints to `GLFW_TRUE` will cause @ref glfwCreateWindow to fail.
+
+GLFW uses the `WGL_ARB_create_context_profile` extension to provide support for
+context profiles. Where this extension is unavailable, setting the
+`GLFW_OPENGL_PROFILE` hint to anything but `GLFW_OPENGL_ANY_PROFILE` will cause
+@ref glfwCreateWindow to fail.
+
+GLFW uses the `WGL_ARB_context_flush_control` extension to provide control over
+whether a context is flushed when it is released (made non-current). Where this
+extension is unavailable, the `GLFW_CONTEXT_RELEASE_BEHAVIOR` hint will have no
+effect and the context will always be flushed when released.
+
+GLFW uses the `WGL_ARB_framebuffer_sRGB` and `WGL_EXT_framebuffer_sRGB`
+extensions to provide support for sRGB framebuffers. Where both of these
+extension are unavailable, the `GLFW_SRGB_CAPABLE` hint will have no effect.
+
+
+@section compat_osx OpenGL on macOS
+
+Support for OpenGL 3.2 and above was introduced with OS X 10.7 and even then
+only forward-compatible, core profile contexts are supported. Support for
+OpenGL 4.1 was introduced with OS X 10.9, also limited to forward-compatible,
+core profile contexts. There is also still no mechanism for requesting debug
+contexts or no-error contexts. Versions of Mac OS X earlier than 10.7 support
+at most OpenGL version 2.1.
+
+Because of this, on OS X 10.7 and later, the `GLFW_CONTEXT_VERSION_MAJOR` and
+`GLFW_CONTEXT_VERSION_MINOR` hints will cause @ref glfwCreateWindow to fail if
+given version 3.0 or 3.1. The `GLFW_OPENGL_PROFILE` hint must be set to
+`GLFW_OPENGL_CORE_PROFILE` when creating OpenGL 3.2 and later contexts. The
+`GLFW_CONTEXT_DEBUG` and `GLFW_CONTEXT_NO_ERROR` hints are ignored.
+
+Also, on Mac OS X 10.6 and below, the `GLFW_CONTEXT_VERSION_MAJOR` and
+`GLFW_CONTEXT_VERSION_MINOR` hints will fail if given a version above 2.1,
+setting the `GLFW_OPENGL_PROFILE` or `GLFW_OPENGL_FORWARD_COMPAT` hints to
+a non-default value will cause @ref glfwCreateWindow to fail and the
+`GLFW_CONTEXT_DEBUG` hint is ignored.
+
+
+@section compat_vulkan Vulkan loader and API
+
+By default, GLFW uses the standard system-wide Vulkan loader to access the
+Vulkan API on all platforms except macOS. This is installed by both graphics
+drivers and Vulkan SDKs. If either the loader or at least one minimally
+functional ICD is missing, @ref glfwVulkanSupported will return `GLFW_FALSE` and
+all other Vulkan-related functions will fail with an @ref GLFW_API_UNAVAILABLE
+error.
+
+
+@section compat_wsi Vulkan WSI extensions
+
+The Vulkan WSI extensions are used to create Vulkan surfaces for GLFW windows on
+all supported platforms.
+
+GLFW uses the `VK_KHR_surface` and `VK_KHR_win32_surface` extensions to create
+surfaces on Microsoft Windows. If any of these extensions are not available,
+@ref glfwGetRequiredInstanceExtensions will return an empty list and window
+surface creation will fail.
+
+GLFW uses the `VK_KHR_surface` and either the `VK_MVK_macos_surface` or
+`VK_EXT_metal_surface` extensions to create surfaces on macOS. If any of these
+extensions are not available, @ref glfwGetRequiredInstanceExtensions will
+return an empty list and window surface creation will fail.
+
+GLFW uses the `VK_KHR_surface` and either the `VK_KHR_xlib_surface` or
+`VK_KHR_xcb_surface` extensions to create surfaces on X11. If `VK_KHR_surface`
+or both `VK_KHR_xlib_surface` and `VK_KHR_xcb_surface` are not available, @ref
+glfwGetRequiredInstanceExtensions will return an empty list and window surface
+creation will fail.
+
+GLFW uses the `VK_KHR_surface` and `VK_KHR_wayland_surface` extensions to create
+surfaces on Wayland. If any of these extensions are not available, @ref
+glfwGetRequiredInstanceExtensions will return an empty list and window surface
+creation will fail.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/compile.dox b/chromium/third_party/dawn/third_party/glfw/docs/compile.dox
new file mode 100644
index 00000000000..925ab1abb21
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/compile.dox
@@ -0,0 +1,394 @@
+/*!
+
+@page compile_guide Compiling GLFW
+
+@tableofcontents
+
+This is about compiling the GLFW library itself. For information on how to
+build applications that use GLFW, see @ref build_guide.
+
+
+@section compile_cmake Using CMake
+
+GLFW behaves like most other libraries that use CMake so this guide mostly
+describes the standard configure, generate and compile sequence. If you are already
+familiar with this from other projects, you may want to focus on the @ref
+compile_deps and @ref compile_options sections for GLFW-specific information.
+
+GLFW uses [CMake](https://cmake.org/) to generate project files or makefiles
+for your chosen development environment. To compile GLFW, first generate these
+files with CMake and then use them to compile the GLFW library.
+
+If you are on Windows and macOS you can
+[download CMake](https://cmake.org/download/) from their site.
+
+If you are on a Unix-like system such as Linux, FreeBSD or Cygwin or have
+a package system like Fink, MacPorts or Homebrew, you can install its CMake
+package.
+
+CMake is a complex tool and this guide will only show a few of the possible ways
+to set up and compile GLFW. The CMake project has their own much more detailed
+[CMake user guide](https://cmake.org/cmake/help/latest/guide/user-interaction/)
+that includes everything in this guide not specific to GLFW. It may be a useful
+companion to this one.
+
+
+@subsection compile_deps Installing dependencies
+
+The C/C++ development environments in Visual Studio, Xcode and MinGW come with
+all necessary dependencies for compiling GLFW, but on Unix-like systems like
+Linux and FreeBSD you will need a few extra packages.
+
+
+@subsubsection compile_deps_x11 Dependencies for X11
+
+To compile GLFW for X11, you need to have the X11 development packages
+installed. They are not needed to build or run programs that use GLFW.
+
+On Debian and derivates like Ubuntu and Linux Mint the `xorg-dev` meta-package
+pulls in the development packages for all of X11.
+
+@code{.sh}
+sudo apt install xorg-dev
+@endcode
+
+On Fedora and derivatives like Red Hat the X11 extension packages
+`libXcursor-devel`, `libXi-devel`, `libXinerama-devel` and `libXrandr-devel`
+required by GLFW pull in all its other dependencies.
+
+@code{.sh}
+sudo dnf install libXcursor-devel libXi-devel libXinerama-devel libXrandr-devel
+@endcode
+
+On FreeBSD the X11 headers are installed along the end-user X11 packages, so if
+you have an X server running you should have the headers as well. If not,
+install the `xorgproto` package.
+
+@code{.sh}
+pkg install xorgproto
+@endcode
+
+On Cygwin the `libXcursor-devel`, `libXi-devel`, `libXinerama-devel`,
+`libXrandr-devel` and `libXrender-devel` packages in the Libs section of the GUI
+installer will install all the headers and other development related files GLFW
+requires for X11.
+
+Once you have the required dependencies, move on to @ref compile_generate.
+
+
+@subsubsection compile_deps_wayland Dependencies for Wayland and X11
+
+To compile GLFW for both Wayland and X11, you need to have the X11, Wayland and xkbcommon
+development packages installed. They are not needed to build or run programs that use
+GLFW. You will also need to set the @ref GLFW_BUILD_WAYLAND CMake option in the next
+step when generating build files.
+
+On Debian and derivates like Ubuntu and Linux Mint you will need the `libwayland-dev`,
+`libxkbcommon-dev` and `wayland-protocols` packages and the `xorg-dev` meta-package.
+These will pull in all other dependencies.
+
+@code{.sh}
+sudo apt install libwayland-dev libxkbcommon-dev wayland-protocols xorg-dev
+@endcode
+
+On Fedora and derivatives like Red Hat you will need the `wayland-devel`,
+`libxkbcommon-devel`, `wayland-protocols-devel`, `libXcursor-devel`, `libXi-devel`,
+`libXinerama-devel` and `libXrandr-devel` packages. These will pull in all other
+dependencies.
+
+@code{.sh}
+sudo dnf install wayland-devel libxkbcommon-devel wayland-protocols-devel libXcursor-devel libXi-devel libXinerama-devel libXrandr-devel
+@endcode
+
+On FreeBSD you will need the `wayland`, `libxkbcommon` and `wayland-protocols` packages.
+The X11 headers are installed along the end-user X11 packages, so if you have an X server
+running you should have the headers as well. If not, install the `xorgproto` package.
+
+@code{.sh}
+pkg install wayland libxkbcommon wayland-protocols xorgproto
+@endcode
+
+Once you have the required dependencies, move on to @ref compile_generate.
+
+
+@subsection compile_generate Generating build files with CMake
+
+Once you have all necessary dependencies it is time to generate the project
+files or makefiles for your development environment. CMake needs two paths for
+this:
+
+ - the path to the root directory of the GLFW source tree (not its `src`
+ subdirectory)
+ - the path to the directory where the generated build files and compiled
+ binaries will be placed
+
+If these are the same, it is called an in-tree build, otherwise it is called an
+out-of-tree build.
+
+Out-of-tree builds are recommended as they avoid cluttering up the source tree.
+They also allow you to have several build directories for different
+configurations all using the same source tree.
+
+A common pattern when building a single configuration is to have a build
+directory named `build` in the root of the source tree.
+
+
+@subsubsection compile_generate_gui Generating with the CMake GUI
+
+Start the CMake GUI and set the paths to the source and build directories
+described above. Then press _Configure_ and _Generate_.
+
+If you wish change any CMake variables in the list, press _Configure_ and then
+_Generate_ to have the new values take effect. The variable list will be
+populated after the first configure step.
+
+By default GLFW will use X11 on Linux and other Unix-like systems other than macOS. To
+include support for Wayland as well, set the @ref GLFW_BUILD_WAYLAND option in the GLFW
+section of the variable list, then apply the new value as described above.
+
+Once you have generated the project files or makefiles for your chosen
+development environment, move on to @ref compile_compile.
+
+
+@subsubsection compile_generate_cli Generating with command-line CMake
+
+To make a build directory, pass the source and build directories to the `cmake`
+command. These can be relative or absolute paths. The build directory is
+created if it doesn't already exist.
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build
+@endcode
+
+It is common to name the build directory `build` and place it in the root of the
+source tree when only planning to build a single configuration.
+
+@code{.sh}
+cd path/to/glfw
+cmake -S . -B build
+@endcode
+
+Without other flags these will generate Visual Studio project files on Windows
+and makefiles on other platforms. You can choose other targets using the `-G`
+flag.
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build -G Xcode
+@endcode
+
+By default GLFW will use X11 on Linux and other Unix-like systems other
+than macOS. To also include support for Wayland, set the @ref GLFW_BUILD_WAYLAND CMake
+option.
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build -D GLFW_BUILD_WAYLAND=1
+@endcode
+
+Once you have generated the project files or makefiles for your chosen
+development environment, move on to @ref compile_compile.
+
+
+@subsection compile_compile Compiling the library
+
+You should now have all required dependencies and the project files or makefiles
+necessary to compile GLFW. Go ahead and compile the actual GLFW library with
+these files as you would with any other project.
+
+With Visual Studio open `GLFW.sln` and use the Build menu. With Xcode open
+`GLFW.xcodeproj` and use the Project menu.
+
+With Linux, macOS and other forms of Unix, run `make`.
+
+@code{.sh}
+cd path/to/build
+make
+@endcode
+
+With MinGW, it is `mingw32-make`.
+
+@code{.sh}
+cd path/to/build
+mingw32-make
+@endcode
+
+Any CMake build directory can also be built with the `cmake` command and the
+`--build` flag.
+
+@code{.sh}
+cmake --build path/to/build
+@endcode
+
+This will run the platform specific build tool the directory was generated for.
+
+Once the GLFW library is compiled you are ready to build your application,
+linking it to the GLFW library. See @ref build_guide for more information.
+
+
+@section compile_options CMake options
+
+The CMake files for GLFW provide a number of options, although not all are
+available on all supported platforms. Some of these are de facto standards
+among projects using CMake and so have no `GLFW_` prefix.
+
+If you are using the GUI version of CMake, these are listed and can be changed
+from there. If you are using the command-line version of CMake you can use the
+`ccmake` ncurses GUI to set options. Some package systems like Ubuntu and other
+distributions based on Debian GNU/Linux have this tool in a separate
+`cmake-curses-gui` package.
+
+Finally, if you don't want to use any GUI, you can set options from the `cmake`
+command-line with the `-D` flag.
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build -D BUILD_SHARED_LIBS=ON
+@endcode
+
+
+@subsection compile_options_shared Shared CMake options
+
+@anchor BUILD_SHARED_LIBS
+__BUILD_SHARED_LIBS__ determines whether GLFW is built as a static library or as
+a DLL / shared library / dynamic library. This is disabled by default,
+producing a static GLFW library. This variable has no `GLFW_` prefix because it
+is defined by CMake. If you want to change the library only for GLFW when it is
+part of a larger project, see @ref GLFW_LIBRARY_TYPE.
+
+@anchor GLFW_LIBRARY_TYPE
+__GLFW_LIBRARY_TYPE__ allows you to override @ref BUILD_SHARED_LIBS only for
+GLFW, without affecting other libraries in a larger project. When set, the
+value of this option must be a valid CMake library type. Set it to `STATIC` to
+build GLFW as a static library, `SHARED` to build it as a shared library
+/ dynamic library / DLL, or `OBJECT` to make GLFW a CMake object library.
+
+@anchor GLFW_BUILD_EXAMPLES
+__GLFW_BUILD_EXAMPLES__ determines whether the GLFW examples are built
+along with the library. This is enabled by default unless GLFW is being built
+as a sub-project of a larger CMake project.
+
+@anchor GLFW_BUILD_TESTS
+__GLFW_BUILD_TESTS__ determines whether the GLFW test programs are
+built along with the library. This is enabled by default unless GLFW is being
+built as a sub-project of a larger CMake project.
+
+@anchor GLFW_BUILD_DOCS
+__GLFW_BUILD_DOCS__ determines whether the GLFW documentation is built along
+with the library. This is enabled by default if
+[Doxygen](https://www.doxygen.nl/) is found by CMake during configuration.
+
+
+@subsection compile_options_win32 Win32 specific CMake options
+
+@anchor GLFW_BUILD_WIN32
+__GLFW_BUILD_WIN32__ determines whether to include support for Win32 when compiling the
+library. This option is only available when compiling for Windows. This is enabled by
+default.
+
+@anchor USE_MSVC_RUNTIME_LIBRARY_DLL
+__USE_MSVC_RUNTIME_LIBRARY_DLL__ determines whether to use the DLL version or the
+static library version of the Visual C++ runtime library. When enabled, the
+DLL version of the Visual C++ library is used. This is enabled by default.
+
+On CMake 3.15 and later you can set the standard CMake
+[CMAKE_MSVC_RUNTIME_LIBRARY](https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html)
+variable instead of this GLFW-specific option.
+
+@anchor GLFW_USE_HYBRID_HPG
+__GLFW_USE_HYBRID_HPG__ determines whether to export the `NvOptimusEnablement` and
+`AmdPowerXpressRequestHighPerformance` symbols, which force the use of the
+high-performance GPU on Nvidia Optimus and AMD PowerXpress systems. These symbols
+need to be exported by the EXE to be detected by the driver, so the override
+will not work if GLFW is built as a DLL. This is disabled by default, letting
+the operating system and driver decide.
+
+
+@subsection compile_options_macos macOS specific CMake options
+
+@anchor GLFW_BUILD_COCOA
+__GLFW_BUILD_COCOA__ determines whether to include support for Cocoa when compiling the
+library. This option is only available when compiling for macOS. This is enabled by
+default.
+
+
+@subsection compile_options_unix Unix-like system specific CMake options
+
+@anchor GLFW_BUILD_WAYLAND
+__GLFW_BUILD_WAYLAND__ determines whether to include support for Wayland when compiling
+the library. This option is only available when compiling for Linux and other Unix-like
+systems other than macOS. This is disabled by default.
+
+@anchor GLFW_BUILD_X11
+__GLFW_BUILD_X11__ determines whether to include support for X11 when compiling the
+library. This option is only available when compiling for Linux and other Unix-like
+systems other than macOS. This is enabled by default.
+
+
+@section compile_mingw_cross Cross-compilation with CMake and MinGW
+
+Both Cygwin and many Linux distributions have MinGW or MinGW-w64 packages. For
+example, Cygwin has the `mingw64-i686-gcc` and `mingw64-x86_64-gcc` packages
+for 32- and 64-bit version of MinGW-w64, while Debian GNU/Linux and derivatives
+like Ubuntu have the `mingw-w64` package for both.
+
+GLFW has CMake toolchain files in the `CMake` subdirectory that set up
+cross-compilation of Windows binaries. To use these files you set the
+`CMAKE_TOOLCHAIN_FILE` CMake variable with the `-D` flag add an option when
+configuring and generating the build files.
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build -D CMAKE_TOOLCHAIN_FILE=path/to/file
+@endcode
+
+The exact toolchain file to use depends on the prefix used by the MinGW or
+MinGW-w64 binaries on your system. You can usually see this in the /usr
+directory. For example, both the Ubuntu and Cygwin MinGW-w64 packages have
+`/usr/x86_64-w64-mingw32` for the 64-bit compilers, so the correct invocation
+would be:
+
+@code{.sh}
+cmake -S path/to/glfw -B path/to/build -D CMAKE_TOOLCHAIN_FILE=CMake/x86_64-w64-mingw32.cmake
+@endcode
+
+The path to the toolchain file is relative to the path to the GLFW source tree
+passed to the `-S` flag, not to the current directory.
+
+For more details see the
+[CMake toolchain guide](https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html).
+
+
+@section compile_manual Compiling GLFW manually
+
+If you wish to compile GLFW without its CMake build environment then you will have to do
+at least some of the platform detection yourself. There are preprocessor macros for
+enabling support for the platforms (window systems) available. There are also optional,
+platform-specific macros for various features.
+
+When building, GLFW will expect the necessary configuration macros to be defined
+on the command-line. The GLFW CMake files set these as private compile
+definitions on the GLFW target but if you compile the GLFW sources manually you
+will need to define them yourself.
+
+The window system is used to create windows, handle input, monitors, gamma ramps and
+clipboard. The options are:
+
+ - @b _GLFW_COCOA to use the Cocoa frameworks
+ - @b _GLFW_WIN32 to use the Win32 API
+ - @b _GLFW_X11 to use the X Window System
+ - @b _GLFW_WAYLAND to use the Wayland API (incomplete)
+
+The @b _GLFW_WAYLAND and @b _GLFW_X11 macros may be combined and produces a library that
+attempts to detect the appropriate platform at initialization.
+
+If you are building GLFW as a shared library / dynamic library / DLL then you
+must also define @b _GLFW_BUILD_DLL. Otherwise, you must not define it.
+
+If you are using a custom name for the Vulkan, EGL, GLX, OSMesa, OpenGL, GLESv1
+or GLESv2 library, you can override the default names by defining those you need
+of @b _GLFW_VULKAN_LIBRARY, @b _GLFW_EGL_LIBRARY, @b _GLFW_GLX_LIBRARY, @b
+_GLFW_OSMESA_LIBRARY, @b _GLFW_OPENGL_LIBRARY, @b _GLFW_GLESV1_LIBRARY and @b
+_GLFW_GLESV2_LIBRARY. Otherwise, GLFW will use the built-in default names.
+
+@note None of the @ref build_macros may be defined during the compilation of
+GLFW. If you define any of these in your build files, make sure they are not
+applied to the GLFW sources.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/context.dox b/chromium/third_party/dawn/third_party/glfw/docs/context.dox
new file mode 100644
index 00000000000..c51e268cb3e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/context.dox
@@ -0,0 +1,342 @@
+/*!
+
+@page context_guide Context guide
+
+@tableofcontents
+
+This guide introduces the OpenGL and OpenGL ES context related functions of
+GLFW. For details on a specific function in this category, see the @ref
+context. There are also guides for the other areas of the GLFW API.
+
+ - @ref intro_guide
+ - @ref window_guide
+ - @ref vulkan_guide
+ - @ref monitor_guide
+ - @ref input_guide
+
+
+@section context_object Context objects
+
+A window object encapsulates both a top-level window and an OpenGL or OpenGL ES
+context. It is created with @ref glfwCreateWindow and destroyed with @ref
+glfwDestroyWindow or @ref glfwTerminate. See @ref window_creation for more
+information.
+
+As the window and context are inseparably linked, the window object also serves
+as the context handle.
+
+To test the creation of various kinds of contexts and see their properties, run
+the `glfwinfo` test program.
+
+@note Vulkan does not have a context and the Vulkan instance is created via the
+Vulkan API itself. If you will be using Vulkan to render to a window, disable
+context creation by setting the [GLFW_CLIENT_API](@ref GLFW_CLIENT_API_hint)
+hint to `GLFW_NO_API`. For more information, see the @ref vulkan_guide.
+
+
+@subsection context_hints Context creation hints
+
+There are a number of hints, specified using @ref glfwWindowHint, related to
+what kind of context is created. See
+[context related hints](@ref window_hints_ctx) in the window guide.
+
+
+@subsection context_sharing Context object sharing
+
+When creating a window and its OpenGL or OpenGL ES context with @ref
+glfwCreateWindow, you can specify another window whose context the new one
+should share its objects (textures, vertex and element buffers, etc.) with.
+
+@code
+GLFWwindow* second_window = glfwCreateWindow(640, 480, "Second Window", NULL, first_window);
+@endcode
+
+Object sharing is implemented by the operating system and graphics driver. On
+platforms where it is possible to choose which types of objects are shared, GLFW
+requests that all types are shared.
+
+See the relevant chapter of the [OpenGL](https://www.opengl.org/registry/) or
+[OpenGL ES](https://www.khronos.org/opengles/) reference documents for more
+information. The name and number of this chapter unfortunately varies between
+versions and APIs, but has at times been named _Shared Objects and Multiple
+Contexts_.
+
+GLFW comes with a barebones object sharing example program called `sharing`.
+
+
+@subsection context_offscreen Offscreen contexts
+
+GLFW doesn't support creating contexts without an associated window. However,
+contexts with hidden windows can be created with the
+[GLFW_VISIBLE](@ref GLFW_VISIBLE_hint) window hint.
+
+@code
+glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+GLFWwindow* offscreen_context = glfwCreateWindow(640, 480, "", NULL, NULL);
+@endcode
+
+The window never needs to be shown and its context can be used as a plain
+offscreen context. Depending on the window manager, the size of a hidden
+window's framebuffer may not be usable or modifiable, so framebuffer
+objects are recommended for rendering with such contexts.
+
+You should still [process events](@ref events) as long as you have at least one
+window, even if none of them are visible.
+
+
+@subsection context_less Windows without contexts
+
+You can disable context creation by setting the
+[GLFW_CLIENT_API](@ref GLFW_CLIENT_API_hint) hint to `GLFW_NO_API`. Windows
+without contexts must not be passed to @ref glfwMakeContextCurrent or @ref
+glfwSwapBuffers.
+
+
+@section context_current Current context
+
+Before you can make OpenGL or OpenGL ES calls, you need to have a current
+context of the correct type. A context can only be current for a single thread
+at a time, and a thread can only have a single context current at a time.
+
+When moving a context between threads, you must make it non-current on the old
+thread before making it current on the new one.
+
+The context of a window is made current with @ref glfwMakeContextCurrent.
+
+@code
+glfwMakeContextCurrent(window);
+@endcode
+
+The window of the current context is returned by @ref glfwGetCurrentContext.
+
+@code
+GLFWwindow* window = glfwGetCurrentContext();
+@endcode
+
+The following GLFW functions require a context to be current. Calling any these
+functions without a current context will generate a @ref GLFW_NO_CURRENT_CONTEXT
+error.
+
+ - @ref glfwSwapInterval
+ - @ref glfwExtensionSupported
+ - @ref glfwGetProcAddress
+
+
+@section context_swap Buffer swapping
+
+See @ref buffer_swap in the window guide.
+
+
+@section context_glext OpenGL and OpenGL ES extensions
+
+One of the benefits of OpenGL and OpenGL ES is their extensibility.
+Hardware vendors may include extensions in their implementations that extend the
+API before that functionality is included in a new version of the OpenGL or
+OpenGL ES specification, and some extensions are never included and remain
+as extensions until they become obsolete.
+
+An extension is defined by:
+
+- An extension name (e.g. `GL_ARB_gl_spirv`)
+- New OpenGL tokens (e.g. `GL_SPIR_V_BINARY_ARB`)
+- New OpenGL functions (e.g. `glSpecializeShaderARB`)
+
+Note the `ARB` affix, which stands for Architecture Review Board and is used
+for official extensions. The extension above was created by the ARB, but there
+are many different affixes, like `NV` for Nvidia and `AMD` for, well, AMD. Any
+group may also use the generic `EXT` affix. Lists of extensions, together with
+their specifications, can be found at the
+[OpenGL Registry](https://www.opengl.org/registry/) and
+[OpenGL ES Registry](https://www.khronos.org/registry/gles/).
+
+
+@subsection context_glext_auto Loading extension with a loader library
+
+An extension loader library is the easiest and best way to access both OpenGL and
+OpenGL ES extensions and modern versions of the core OpenGL or OpenGL ES APIs.
+They will take care of all the details of declaring and loading everything you
+need. One such library is [glad](https://github.com/Dav1dde/glad) and there are
+several others.
+
+The following example will use glad but all extension loader libraries work
+similarly.
+
+First you need to generate the source files using the glad Python script. This
+example generates a loader for any version of OpenGL, which is the default for
+both GLFW and glad, but loaders for OpenGL ES, as well as loaders for specific
+API versions and extension sets can be generated. The generated files are
+written to the `output` directory.
+
+@code{.sh}
+python main.py --generator c --no-loader --out-path output
+@endcode
+
+The `--no-loader` option is added because GLFW already provides a function for
+loading OpenGL and OpenGL ES function pointers, one that automatically uses the
+selected context creation API, and glad can call this instead of having to
+implement its own. There are several other command-line options as well. See
+the glad documentation for details.
+
+Add the generated `output/src/glad.c`, `output/include/glad/glad.h` and
+`output/include/KHR/khrplatform.h` files to your build. Then you need to
+include the glad header file, which will replace the OpenGL header of your
+development environment. By including the glad header before the GLFW header,
+it suppresses the development environment's OpenGL or OpenGL ES header.
+
+@code
+#include <glad/glad.h>
+#include <GLFW/glfw3.h>
+@endcode
+
+Finally you need to initialize glad once you have a suitable current context.
+
+@code
+window = glfwCreateWindow(640, 480, "My Window", NULL, NULL);
+if (!window)
+{
+ ...
+}
+
+glfwMakeContextCurrent(window);
+
+gladLoadGLLoader((GLADloadproc) glfwGetProcAddress);
+@endcode
+
+Once glad has been loaded, you have access to all OpenGL core and extension
+functions supported by both the context you created and the glad loader you
+generated and you are ready to start rendering.
+
+You can specify a minimum required OpenGL or OpenGL ES version with
+[context hints](@ref window_hints_ctx). If your needs are more complex, you can
+check the actual OpenGL or OpenGL ES version with
+[context attributes](@ref window_attribs_ctx), or you can check whether
+a specific version is supported by the current context with the
+`GLAD_GL_VERSION_x_x` booleans.
+
+@code
+if (GLAD_GL_VERSION_3_2)
+{
+ // Call OpenGL 3.2+ specific code
+}
+@endcode
+
+To check whether a specific extension is supported, use the `GLAD_GL_xxx`
+booleans.
+
+@code
+if (GLAD_GL_ARB_gl_spirv)
+{
+ // Use GL_ARB_gl_spirv
+}
+@endcode
+
+
+@subsection context_glext_manual Loading extensions manually
+
+__Do not use this technique__ unless it is absolutely necessary. An
+[extension loader library](@ref context_glext_auto) will save you a ton of
+tedious, repetitive, error prone work.
+
+To use a certain extension, you must first check whether the context supports
+that extension and then, if it introduces new functions, retrieve the pointers
+to those functions. GLFW provides @ref glfwExtensionSupported and @ref
+glfwGetProcAddress for manual loading of extensions and new API functions.
+
+This section will demonstrate manual loading of OpenGL extensions. The loading
+of OpenGL ES extensions is identical except for the name of the extension header.
+
+
+@subsubsection context_glext_header The glext.h header
+
+The `glext.h` extension header is a continually updated file that defines the
+interfaces for all OpenGL extensions. The latest version of this can always be
+found at the [OpenGL Registry](https://www.opengl.org/registry/). There are also
+extension headers for the various versions of OpenGL ES at the
+[OpenGL ES Registry](https://www.khronos.org/registry/gles/). It it strongly
+recommended that you use your own copy of the extension header, as the one
+included in your development environment may be several years out of date and
+may not include the extensions you wish to use.
+
+The header defines function pointer types for all functions of all extensions it
+supports. These have names like `PFNGLSPECIALIZESHADERARBPROC` (for
+`glSpecializeShaderARB`), i.e. the name is made uppercase and `PFN` (pointer
+to function) and `PROC` (procedure) are added to the ends.
+
+To include the extension header, define @ref GLFW_INCLUDE_GLEXT before including
+the GLFW header.
+
+@code
+#define GLFW_INCLUDE_GLEXT
+#include <GLFW/glfw3.h>
+@endcode
+
+
+@subsubsection context_glext_string Checking for extensions
+
+A given machine may not actually support the extension (it may have older
+drivers or a graphics card that lacks the necessary hardware features), so it
+is necessary to check at run-time whether the context supports the extension.
+This is done with @ref glfwExtensionSupported.
+
+@code
+if (glfwExtensionSupported("GL_ARB_gl_spirv"))
+{
+ // The extension is supported by the current context
+}
+@endcode
+
+The argument is a null terminated ASCII string with the extension name. If the
+extension is supported, @ref glfwExtensionSupported returns `GLFW_TRUE`,
+otherwise it returns `GLFW_FALSE`.
+
+
+@subsubsection context_glext_proc Fetching function pointers
+
+Many extensions, though not all, require the use of new OpenGL functions.
+These functions often do not have entry points in the client API libraries of
+your operating system, making it necessary to fetch them at run time. You can
+retrieve pointers to these functions with @ref glfwGetProcAddress.
+
+@code
+PFNGLSPECIALIZESHADERARBPROC pfnSpecializeShaderARB = glfwGetProcAddress("glSpecializeShaderARB");
+@endcode
+
+In general, you should avoid giving the function pointer variables the (exact)
+same name as the function, as this may confuse your linker. Instead, you can
+use a different prefix, like above, or some other naming scheme.
+
+Now that all the pieces have been introduced, here is what they might look like
+when used together.
+
+@code
+#define GLFW_INCLUDE_GLEXT
+#include <GLFW/glfw3.h>
+
+#define glSpecializeShaderARB pfnSpecializeShaderARB
+PFNGLSPECIALIZESHADERARBPROC pfnSpecializeShaderARB;
+
+// Flag indicating whether the extension is supported
+int has_ARB_gl_spirv = 0;
+
+void load_extensions(void)
+{
+ if (glfwExtensionSupported("GL_ARB_gl_spirv"))
+ {
+ pfnSpecializeShaderARB = (PFNGLSPECIALIZESHADERARBPROC)
+ glfwGetProcAddress("glSpecializeShaderARB");
+ has_ARB_gl_spirv = 1;
+ }
+}
+
+void some_function(void)
+{
+ if (has_ARB_gl_spirv)
+ {
+ // Now the extension function can be called as usual
+ glSpecializeShaderARB(...);
+ }
+}
+@endcode
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/extra.css b/chromium/third_party/dawn/third_party/glfw/docs/extra.css
new file mode 100644
index 00000000000..1a2873433f9
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/extra.css
@@ -0,0 +1,2 @@
+.sm-dox,.sm-dox a,.sm-dox a:focus,.sm-dox a:active,.sm-dox a:hover,.sm-dox a.highlighted,.sm-dox ul a:hover{background:none;text-shadow:none}.sm-dox a span.sub-arrow{border-color:#f2f2f2 transparent transparent transparent}.sm-dox a span.sub-arrow:active,.sm-dox a span.sub-arrow:focus,.sm-dox a span.sub-arrow:hover,.sm-dox a:hover span.sub-arrow{border-color:#f60 transparent transparent transparent}.sm-dox ul a span.sub-arrow:active,.sm-dox ul a span.sub-arrow:focus,.sm-dox ul a span.sub-arrow:hover,.sm-dox ul a:hover span.sub-arrow{border-color:transparent transparent transparent #f60}.sm-dox ul a:hover{background:#666;text-shadow:none}.sm-dox ul.sm-nowrap a{color:#4d4d4d;text-shadow:none}#main-nav,#main-menu,#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li,.memdoc,dl.reflist dd,div.toc li,.ah,span.lineno,span.lineno a,span.lineno a:hover,.note code,.pre code,.post code,.invariant code,.warning code,.attention code,.deprecated code,.bug code,.todo code,.test code,.doxtable code,.markdownTable code{background:none}#titlearea,.footer,.contents,div.header,.memdoc,table.doxtable td,table.doxtable th,table.markdownTable td,table.markdownTable th,hr,.memSeparator{border:none}#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li,.reflist dt a.el,.levels span,.directory .levels span{text-shadow:none}.memdoc,dl.reflist dd{box-shadow:none}div.headertitle,.note code,.pre code,.post code,.invariant code,.warning code,.attention code,.deprecated code,.bug code,.todo code,.test code,table.doxtable code,table.markdownTable code{padding:0}#nav-path,.directory .levels,span.lineno{display:none}html,#titlearea,.footer,tr.even,.directory tr.even,.doxtable tr:nth-child(even),tr.markdownTableBody:nth-child(even),.mdescLeft,.mdescRight,.memItemLeft,.memItemRight,code,.markdownTableRowEven{background:#f2f2f2}body{color:#4d4d4d}div.title{font-size:170%;margin:1em 0 0.5em 0}h1,h2,h2.groupheader,h3,div.toc h3,h4,h5,h6,strong,em{color:#1a1a1a;border-bottom:none}h1{padding-top:0.5em;font-size:150%}h2{padding-top:0.5em;margin-bottom:0;font-size:130%}h3{padding-top:0.5em;margin-bottom:0;font-size:110%}.glfwheader{font-size:16px;min-height:64px;max-width:920px;padding:0 32px;margin:0 auto;display:flex;flex-direction:row;flex-wrap:wrap;justify-content:flex-start;align-items:center;align-content:stretch}#glfwhome{line-height:64px;padding-right:48px;color:#666;font-size:2.5em;background:url("https://www.glfw.org/css/arrow.png") no-repeat right}.glfwnavbar{list-style-type:none;margin:0 0 0 auto;float:right}#glfwhome,.glfwnavbar li{float:left}.glfwnavbar a,.glfwnavbar a:visited{line-height:64px;margin-left:2em;display:block;color:#666}.glfwnavbar{padding-left:0}#glfwhome,.glfwnavbar a,.glfwnavbar a:visited{transition:.35s ease}#titlearea,.footer{color:#666}address.footer{text-align:center;padding:2em;margin-top:3em}#top{background:#666}#main-nav{max-width:960px;margin:0 auto;font-size:13px}#main-menu{max-width:920px;margin:0 auto;font-size:13px}.memtitle{display:none}.memproto,.memname{font-weight:bold;text-shadow:none}#main-menu{min-height:36px;display:flex;flex-direction:row;flex-wrap:wrap;justify-content:flex-start;align-items:center;align-content:stretch}#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li{color:#f2f2f2}#main-menu li ul.sm-nowrap li a{color:#4d4d4d}#main-menu li ul.sm-nowrap li a:hover{color:#f60}#main-menu>li:last-child{margin:0 0 0 auto}.contents{min-height:590px}div.contents,div.header{max-width:920px;margin:0 auto;padding:0 32px;background:#fff none}table.doxtable th,table.markdownTable th,dl.reflist dt{background:linear-gradient(to bottom, #ffa733 0%, #f60 100%);box-shadow:inset 0 0 32px #f60;text-shadow:0 -1px 1px #b34700;text-align:left;color:#fff}dl.reflist dt a.el{color:#f60;padding:.2em;border-radius:4px;background-color:#ffe0cc}div.toc{float:right;width:35%}@media screen and (max-width: 600px){div.toc{float:none;width:inherit;margin:0}}div.toc h3{font-size:1.17em}div.toc ul{padding-left:1.5em}div.toc li{font-size:1em;padding-left:0;list-style-type:disc}div.toc li.level2,div.toc li.level3{margin-left:0.5em}div.toc,.memproto,div.qindex,div.ah{background:linear-gradient(to bottom, #f2f2f2 0%, #e6e6e6 100%);box-shadow:inset 0 0 32px #e6e6e6;text-shadow:0 1px 1px #fff;color:#1a1a1a;border:2px solid #e6e6e6;border-radius:4px}.paramname{color:#803300}dl.reflist dt{border:2px solid #f60;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom:none}dl.reflist dd{border:2px solid #f60;border-bottom-right-radius:4px;border-bottom-left-radius:4px;border-top:none}table.doxtable,table.markdownTable{border-collapse:inherit;border-spacing:0;border:2px solid #f60;border-radius:4px}a,a:hover,a:visited,a:visited:hover,.contents a:visited,.el,a.el:visited,#glfwhome:hover,#main-menu a:hover,span.lineno a:hover{color:#f60;text-decoration:none}div.directory{border-collapse:inherit;border-spacing:0;border:2px solid #f60;border-radius:4px}hr,.memSeparator{height:2px;background:linear-gradient(to right, #f2f2f2 0%, #d9d9d9 50%, #f2f2f2 100%)}dl.note,dl.pre,dl.post,dl.invariant{background:linear-gradient(to bottom, #ddfad1 0%, #cbf7ba 100%);box-shadow:inset 0 0 32px #baf5a3;color:#1e5309;border:2px solid #afe699}dl.warning,dl.attention{background:linear-gradient(to bottom, #fae8d1 0%, #f7ddba 100%);box-shadow:inset 0 0 32px #f5d1a3;color:#533309;border:2px solid #e6c499}dl.deprecated,dl.bug{background:linear-gradient(to bottom, #fad1e3 0%, #f7bad6 100%);box-shadow:inset 0 0 32px #f5a3c8;color:#53092a;border:2px solid #e699bb}dl.todo,dl.test{background:linear-gradient(to bottom, #d1ecfa 0%, #bae3f7 100%);box-shadow:inset 0 0 32px #a3daf5;color:#093a53;border:2px solid #99cce6}dl.note,dl.pre,dl.post,dl.invariant,dl.warning,dl.attention,dl.deprecated,dl.bug,dl.todo,dl.test{border-radius:4px;padding:1em;text-shadow:0 1px 1px #fff;margin:1em 0}.note a,.pre a,.post a,.invariant a,.warning a,.attention a,.deprecated a,.bug a,.todo a,.test a,.note a:visited,.pre a:visited,.post a:visited,.invariant a:visited,.warning a:visited,.attention a:visited,.deprecated a:visited,.bug a:visited,.todo a:visited,.test a:visited{color:inherit}div.line{line-height:inherit}div.fragment,pre.fragment{background:#f2f2f2;border-radius:4px;border:none;padding:1em;overflow:auto;border-left:4px solid #ccc;margin:1em 0}.lineno a,.lineno a:visited,.line,pre.fragment{color:#4d4d4d}span.preprocessor,span.comment{color:#007899}a.code,a.code:visited{color:#e64500}span.keyword,span.keywordtype,span.keywordflow{color:#404040;font-weight:bold}span.stringliteral{color:#360099}code{padding:.1em;border-radius:4px}
+/*# sourceMappingURL=extra.css.map */
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/extra.css.map b/chromium/third_party/dawn/third_party/glfw/docs/extra.css.map
new file mode 100644
index 00000000000..4d9333c2fec
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/extra.css.map
@@ -0,0 +1,7 @@
+{
+"version": 3,
+"mappings": "AA8EA,2GAA4G,CAC3G,UAAU,CAAC,IAAI,CACf,WAAW,CAAC,IAAI,CAGjB,wBAAyB,CACxB,YAAY,CAAC,2CAAsD,CAGpE,4HAA6H,CAC5H,YAAY,CAAC,wCAAuD,CAGrE,wIAAyI,CACxI,YAAY,CAAC,wCAAuD,CAGrE,kBAAmB,CAClB,UAAU,CA9EgB,IAAa,CA+EvC,WAAW,CAAC,IAAI,CAGjB,sBAAuB,CACtB,KAAK,CAzFe,OAAa,CA0FjC,WAAW,CAAC,IAAI,CAGjB,4UAA6U,CAC5U,UAAU,CAAC,IAAI,CAGhB,kJAAmJ,CAClJ,MAAM,CAAC,IAAI,CAGZ,wHAAyH,CACxH,WAAW,CAAC,IAAI,CAGjB,qBAAsB,CACrB,UAAU,CAAC,IAAI,CAGhB,2LAA4L,CAC3L,OAAO,CAAC,CAAC,CAGV,wCAAyC,CACxC,OAAO,CAAC,IAAI,CAGb,iMAAkM,CACjM,UAAU,CApGW,OAA+B,CAuGrD,IAAK,CACJ,KAAK,CA1He,OAAa,CA6HlC,SAAU,CACN,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,aAAa,CAGzB,qDAAsD,CACrD,KAAK,CApHU,OAAa,CAqH5B,aAAa,CAAC,IAAI,CAGnB,EAAG,CACF,WAAW,CAAC,KAAK,CACjB,SAAS,CAAC,IAAI,CAGf,EAAG,CACF,WAAW,CAAC,KAAK,CACjB,aAAa,CAAC,CAAC,CACf,SAAS,CAAC,IAAI,CAGf,EAAG,CACF,WAAW,CAAC,KAAK,CACjB,aAAa,CAAC,CAAC,CACf,SAAS,CAAC,IAAI,CAGf,WAAY,CACX,SAAS,CAAC,IAAI,CACd,UAAU,CAAC,IAAI,CACf,SAAS,CAAC,KAAK,CACf,OAAO,CAAC,MAAM,CACd,MAAM,CAAC,MAAM,CAEb,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,SAAS,CAAE,IAAI,CACf,eAAe,CAAE,UAAU,CAC3B,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,OAAO,CAGvB,SAAU,CACT,WAAW,CAAC,IAAI,CAChB,aAAa,CAAC,IAAI,CAClB,KAAK,CApKqB,IAAa,CAqKvC,SAAS,CAAC,KAAK,CACf,UAAU,CAAC,yDAAyD,CAGrE,WAAY,CACX,eAAe,CAAC,IAAI,CACpB,MAAM,CAAC,UAAU,CACjB,KAAK,CAAC,KAAK,CAGZ,wBAAyB,CACxB,KAAK,CAAC,IAAI,CAGX,mCAAoC,CACnC,WAAW,CAAC,IAAI,CAChB,WAAW,CAAC,GAAG,CACf,OAAO,CAAC,KAAK,CACb,KAAK,CAvLqB,IAAa,CA0LxC,WAAY,CACX,YAAY,CAAE,CAAC,CAGhB,6CAA8C,CAC7C,UAAU,CAAC,SAAS,CAGrB,kBAAmB,CAClB,KAAK,CAnMqB,IAAa,CAsMxC,cAAe,CACd,UAAU,CAAC,MAAM,CACjB,OAAO,CAAC,GAAG,CACX,UAAU,CAAC,GAAG,CAGf,IAAK,CACJ,UAAU,CA7MgB,IAAa,CAgNxC,SAAU,CACT,SAAS,CAAC,KAAK,CACf,MAAM,CAAC,MAAM,CACb,SAAS,CAAC,IAAI,CAGf,UAAW,CACV,SAAS,CAAC,KAAK,CACf,MAAM,CAAC,MAAM,CACb,SAAS,CAAC,IAAI,CAGf,SAAU,CACT,OAAO,CAAC,IAAI,CAGb,kBAAmB,CAClB,WAAW,CAAC,IAAI,CAChB,WAAW,CAAC,IAAI,CAGjB,UAAW,CACV,UAAU,CAAC,IAAI,CACf,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,SAAS,CAAE,IAAI,CACf,eAAe,CAAE,UAAU,CAC3B,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,OAAO,CAGvB,kEAAmE,CAClE,KAAK,CApOgB,OAA+B,CAuOrD,+BAAgC,CAC/B,KAAK,CA1Pe,OAAa,CA6PlC,qCAAsC,CACrC,KAAK,CA1NoB,IAAsB,CA6NhD,wBAA2B,CAC1B,MAAM,CAAE,UAAU,CAGnB,SAAU,CACT,UAAU,CAAC,KAAK,CAGjB,uBAAwB,CACvB,SAAS,CAAC,KAAK,CACf,MAAM,CAAC,MAAM,CACb,OAAO,CAAC,MAAM,CACd,UAAU,CAAC,SAA8B,CAG1C,sDAAuD,CACtD,UAAU,CAAC,iDAAoF,CAC/F,UAAU,CAAC,mBAAuC,CAClD,WAAW,CAAC,kBAAgD,CAC5D,UAAU,CAAC,IAAI,CACf,KAAK,CAlPa,IAAe,CAqPlC,kBAAmB,CAClB,KAAK,CArPoB,IAAsB,CAsP/C,OAAO,CAAC,IAAI,CACZ,aAAa,CAAC,GAAG,CACjB,gBAAgB,CAAC,OAAiC,CAGnD,OAAQ,CACP,KAAK,CAAC,KAAK,CACX,KAAK,CAAC,GAAG,CAGV,oCAAoC,CACnC,OAAQ,CACP,KAAK,CAAC,IAAI,CACV,KAAK,CAAC,OAAO,CACb,MAAM,CAAC,CAAC,EAIV,UAAW,CACV,SAAS,CAAC,MAAM,CAGjB,UAAW,CACV,YAAY,CAAC,KAAK,CAGnB,UAAW,CACV,SAAS,CAAC,GAAG,CACb,YAAY,CAAC,CAAC,CACd,eAAe,CAAC,IAAI,CAIjB,mCAAqB,CACjB,WAAW,CAAC,KAAK,CAIzB,mCAAoC,CACnC,UAAU,CAAC,oDAAgF,CAC3F,UAAU,CAAC,sBAAqC,CAChD,WAAW,CAAC,cAA8C,CAC1D,KAAK,CArTU,OAAa,CAsT5B,MAAM,CAAC,iBAAgC,CACvC,aAAa,CAAC,GAAG,CAGlB,UAAW,CACV,KAAK,CA9RkB,OAAgC,CAiSxD,aAAc,CACb,MAAM,CAAC,cAA+B,CACtC,sBAAsB,CAAC,GAAG,CAC1B,uBAAuB,CAAC,GAAG,CAC3B,aAAa,CAAC,IAAI,CAGnB,aAAc,CACb,MAAM,CAAC,cAA+B,CACtC,0BAA0B,CAAC,GAAG,CAC9B,yBAAyB,CAAC,GAAG,CAC7B,UAAU,CAAC,IAAI,CAGhB,kCAAmC,CAClC,eAAe,CAAC,OAAO,CACvB,cAAc,CAAC,CAAC,CAChB,MAAM,CAAC,cAA+B,CACtC,aAAa,CAAC,GAAG,CAGlB,+HAAgI,CAC/H,KAAK,CA/ToB,IAAsB,CAgU/C,eAAe,CAAC,IAAI,CAGrB,aAAc,CACb,eAAe,CAAC,OAAO,CACvB,cAAc,CAAC,CAAC,CAChB,MAAM,CAAC,cAA+B,CACtC,aAAa,CAAC,GAAG,CAGlB,gBAAiB,CAChB,MAAM,CAAC,GAAG,CACV,UAAU,CAAC,gEAAiH,CAG7H,mCAAoC,CAvTnC,UAAU,CAAC,oDAAuE,CAClF,UAAU,CAAC,sBAAsC,CACjD,KAAK,CAAC,OAAwB,CAC9B,MAAM,CAAC,iBAAmD,CAwT3D,uBAAwB,CA3TvB,UAAU,CAAC,oDAAuE,CAClF,UAAU,CAAC,sBAAsC,CACjD,KAAK,CAAC,OAAwB,CAC9B,MAAM,CAAC,iBAAmD,CA4T3D,oBAAqB,CA/TpB,UAAU,CAAC,oDAAuE,CAClF,UAAU,CAAC,sBAAsC,CACjD,KAAK,CAAC,OAAwB,CAC9B,MAAM,CAAC,iBAAmD,CAgU3D,eAAgB,CAnUf,UAAU,CAAC,oDAAuE,CAClF,UAAU,CAAC,sBAAsC,CACjD,KAAK,CAAC,OAAwB,CAC9B,MAAM,CAAC,iBAAmD,CAoU3D,gGAAiG,CAChG,aAAa,CAAC,GAAG,CACjB,OAAO,CAAC,GAAG,CACX,WAAW,CAAC,cAAwB,CACpC,MAAM,CAAC,KAAK,CAGb,iRAAkR,CACjR,KAAK,CAAC,OAAO,CAGd,QAAS,CACR,WAAW,CAAC,OAAO,CAGpB,yBAA0B,CACzB,UAAU,CAAC,OAAa,CACxB,aAAa,CAAC,GAAG,CACjB,MAAM,CAAC,IAAI,CACX,OAAO,CAAC,GAAG,CACX,QAAQ,CAAC,IAAI,CACb,WAAW,CAAC,cAAuB,CACnC,MAAM,CAAC,KAAK,CAGb,8CAA+C,CAC9C,KAAK,CA7Ze,OAAa,CAgalC,8BAA+B,CAC9B,KAAK,CAAC,OAAiB,CAGxB,qBAAsB,CACrB,KAAK,CAAC,OAAgB,CAGvB,8CAA+C,CAC9C,KAAK,CAAC,OAA+B,CACrC,WAAW,CAAC,IAAI,CAGjB,kBAAmB,CAClB,KAAK,CAAC,OAAiB,CAGxB,IAAK,CACJ,OAAO,CAAC,IAAI,CACZ,aAAa,CAAC,GAAG",
+"sources": ["extra.scss"],
+"names": [],
+"file": "extra.css"
+}
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/extra.scss b/chromium/third_party/dawn/third_party/glfw/docs/extra.scss
new file mode 100644
index 00000000000..43fe98314ef
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/extra.scss
@@ -0,0 +1,449 @@
+// NOTE: Please use this file to perform modifications on default style sheets.
+//
+// You need to install the official Sass CLI tool:
+// npm install -g sass
+//
+// Run this command to regenerate extra.css after you're finished with changes:
+// sass --style=compressed extra.scss extra.css
+//
+// Alternatively you can use online services to regenerate extra.css.
+
+
+// Default text color for page contents
+$default-text-color: hsl(0,0%,30%);
+
+// Page header, footer, table rows, inline codes and definition lists
+$header-footer-background-color: hsl(0,0%,95%);
+
+// Page header, footer links and navigation bar background
+$header-footer-link-color: hsl(0,0%,40%);
+
+// Doxygen navigation bar links
+$navbar-link-color: $header-footer-background-color;
+
+// Page content background color
+$content-background-color: hsl(0,0%,100%);
+
+// Bold, italic, h1, h2, ... and table of contents
+$heading-color: hsl(0,0%,10%);
+
+// Function, enum and macro definition separator
+$def-separator-color: $header-footer-background-color;
+
+// Base color hue
+$base-hue: 24;
+
+// Default color used for links
+$default-link-color: hsl($base-hue,100%,50%);
+
+// Doxygen navigation bar active tab
+$tab-text-color: hsl(0,0%,100%);
+$tab-background-color1: $default-link-color;
+$tab-background-color2: lighten(adjust-hue($tab-background-color1, 10), 10%);
+
+// Table borders
+$default-border-color: $default-link-color;
+
+// Table header
+$table-text-color: $tab-text-color;
+$table-background-color1: $tab-background-color1;
+$table-background-color2: $tab-background-color2;
+
+// Table of contents, data structure index and prototypes
+$toc-background-color1: hsl(0,0%,90%);
+$toc-background-color2: lighten($toc-background-color1, 5%);
+
+// Function prototype parameters color
+$prototype-param-color: darken($default-link-color, 25%);
+
+// Message box color: note, pre, post and invariant
+$box-note-color: hsl(103,80%,85%);
+
+// Message box color: warning and attention
+$box-warning-color: hsl(34,80%,85%);
+
+// Message box color: deprecated and bug
+$box-bug-color: hsl(333,80%,85%);
+
+// Message box color: todo and test
+$box-todo-color: hsl(200,80%,85%);
+
+// Message box helper function
+@mixin message-box($base-color){
+ background:linear-gradient(to bottom,lighten($base-color, 5%) 0%,$base-color 100%);
+ box-shadow:inset 0 0 32px darken($base-color, 5%);
+ color:darken($base-color, 67%);
+ border:2px solid desaturate(darken($base-color, 10%), 20%);
+}
+
+.sm-dox,.sm-dox a,.sm-dox a:focus,.sm-dox a:active,.sm-dox a:hover,.sm-dox a.highlighted,.sm-dox ul a:hover {
+ background:none;
+ text-shadow:none;
+}
+
+.sm-dox a span.sub-arrow {
+ border-color:$navbar-link-color transparent transparent transparent;
+}
+
+.sm-dox a span.sub-arrow:active,.sm-dox a span.sub-arrow:focus,.sm-dox a span.sub-arrow:hover,.sm-dox a:hover span.sub-arrow {
+ border-color:$default-link-color transparent transparent transparent;
+}
+
+.sm-dox ul a span.sub-arrow:active,.sm-dox ul a span.sub-arrow:focus,.sm-dox ul a span.sub-arrow:hover,.sm-dox ul a:hover span.sub-arrow {
+ border-color:transparent transparent transparent $default-link-color;
+}
+
+.sm-dox ul a:hover {
+ background:$header-footer-link-color;
+ text-shadow:none;
+}
+
+.sm-dox ul.sm-nowrap a {
+ color:$default-text-color;
+ text-shadow:none;
+}
+
+#main-nav,#main-menu,#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li,.memdoc,dl.reflist dd,div.toc li,.ah,span.lineno,span.lineno a,span.lineno a:hover,.note code,.pre code,.post code,.invariant code,.warning code,.attention code,.deprecated code,.bug code,.todo code,.test code,.doxtable code,.markdownTable code {
+ background:none;
+}
+
+#titlearea,.footer,.contents,div.header,.memdoc,table.doxtable td,table.doxtable th,table.markdownTable td,table.markdownTable th,hr,.memSeparator {
+ border:none;
+}
+
+#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li,.reflist dt a.el,.levels span,.directory .levels span {
+ text-shadow:none;
+}
+
+.memdoc,dl.reflist dd {
+ box-shadow:none;
+}
+
+div.headertitle,.note code,.pre code,.post code,.invariant code,.warning code,.attention code,.deprecated code,.bug code,.todo code,.test code,table.doxtable code,table.markdownTable code {
+ padding:0;
+}
+
+#nav-path,.directory .levels,span.lineno {
+ display:none;
+}
+
+html,#titlearea,.footer,tr.even,.directory tr.even,.doxtable tr:nth-child(even),tr.markdownTableBody:nth-child(even),.mdescLeft,.mdescRight,.memItemLeft,.memItemRight,code,.markdownTableRowEven {
+ background:$header-footer-background-color;
+}
+
+body {
+ color:$default-text-color;
+}
+
+div.title {
+ font-size: 170%;
+ margin: 1em 0 0.5em 0;
+}
+
+h1,h2,h2.groupheader,h3,div.toc h3,h4,h5,h6,strong,em {
+ color:$heading-color;
+ border-bottom:none;
+}
+
+h1 {
+ padding-top:0.5em;
+ font-size:150%;
+}
+
+h2 {
+ padding-top:0.5em;
+ margin-bottom:0;
+ font-size:130%;
+}
+
+h3 {
+ padding-top:0.5em;
+ margin-bottom:0;
+ font-size:110%;
+}
+
+.glfwheader {
+ font-size:16px;
+ min-height:64px;
+ max-width:920px;
+ padding:0 32px;
+ margin:0 auto;
+
+ display: flex;
+ flex-direction: row;
+ flex-wrap: wrap;
+ justify-content: flex-start;
+ align-items: center;
+ align-content: stretch;
+}
+
+#glfwhome {
+ line-height:64px;
+ padding-right:48px;
+ color:$header-footer-link-color;
+ font-size:2.5em;
+ background:url("https://www.glfw.org/css/arrow.png") no-repeat right;
+}
+
+.glfwnavbar {
+ list-style-type:none;
+ margin:0 0 0 auto;
+ float:right;
+}
+
+#glfwhome,.glfwnavbar li {
+ float:left;
+}
+
+.glfwnavbar a,.glfwnavbar a:visited {
+ line-height:64px;
+ margin-left:2em;
+ display:block;
+ color:$header-footer-link-color;
+}
+
+.glfwnavbar {
+ padding-left: 0;
+}
+
+#glfwhome,.glfwnavbar a,.glfwnavbar a:visited {
+ transition:.35s ease;
+}
+
+#titlearea,.footer {
+ color:$header-footer-link-color;
+}
+
+address.footer {
+ text-align:center;
+ padding:2em;
+ margin-top:3em;
+}
+
+#top {
+ background:$header-footer-link-color;
+}
+
+#main-nav {
+ max-width:960px;
+ margin:0 auto;
+ font-size:13px;
+}
+
+#main-menu {
+ max-width:920px;
+ margin:0 auto;
+ font-size:13px;
+}
+
+.memtitle {
+ display:none;
+}
+
+.memproto,.memname {
+ font-weight:bold;
+ text-shadow:none;
+}
+
+#main-menu {
+ min-height:36px;
+ display: flex;
+ flex-direction: row;
+ flex-wrap: wrap;
+ justify-content: flex-start;
+ align-items: center;
+ align-content: stretch;
+}
+
+#main-menu a,#main-menu a:visited,#main-menu a:hover,#main-menu li {
+ color:$navbar-link-color;
+}
+
+#main-menu li ul.sm-nowrap li a {
+ color:$default-text-color;
+}
+
+#main-menu li ul.sm-nowrap li a:hover {
+ color:$default-link-color;
+}
+
+#main-menu > li:last-child {
+ margin: 0 0 0 auto;
+}
+
+.contents {
+ min-height:590px;
+}
+
+div.contents,div.header {
+ max-width:920px;
+ margin:0 auto;
+ padding:0 32px;
+ background:$content-background-color none;
+}
+
+table.doxtable th,table.markdownTable th,dl.reflist dt {
+ background:linear-gradient(to bottom,$table-background-color2 0%,$table-background-color1 100%);
+ box-shadow:inset 0 0 32px $table-background-color1;
+ text-shadow:0 -1px 1px darken($table-background-color1, 15%);
+ text-align:left;
+ color:$table-text-color;
+}
+
+dl.reflist dt a.el {
+ color:$default-link-color;
+ padding:.2em;
+ border-radius:4px;
+ background-color:lighten($default-link-color, 40%);
+}
+
+div.toc {
+ float:right;
+ width:35%;
+}
+
+@media screen and (max-width:600px) {
+ div.toc {
+ float:none;
+ width:inherit;
+ margin:0;
+ }
+}
+
+div.toc h3 {
+ font-size:1.17em;
+}
+
+div.toc ul {
+ padding-left:1.5em;
+}
+
+div.toc li {
+ font-size:1em;
+ padding-left:0;
+ list-style-type:disc;
+}
+
+div.toc {
+ li.level2, li.level3 {
+ margin-left:0.5em;
+ }
+}
+
+div.toc,.memproto,div.qindex,div.ah {
+ background:linear-gradient(to bottom,$toc-background-color2 0%,$toc-background-color1 100%);
+ box-shadow:inset 0 0 32px $toc-background-color1;
+ text-shadow:0 1px 1px lighten($toc-background-color2, 10%);
+ color:$heading-color;
+ border:2px solid $toc-background-color1;
+ border-radius:4px;
+}
+
+.paramname {
+ color:$prototype-param-color;
+}
+
+dl.reflist dt {
+ border:2px solid $default-border-color;
+ border-top-left-radius:4px;
+ border-top-right-radius:4px;
+ border-bottom:none;
+}
+
+dl.reflist dd {
+ border:2px solid $default-border-color;
+ border-bottom-right-radius:4px;
+ border-bottom-left-radius:4px;
+ border-top:none;
+}
+
+table.doxtable,table.markdownTable {
+ border-collapse:inherit;
+ border-spacing:0;
+ border:2px solid $default-border-color;
+ border-radius:4px;
+}
+
+a,a:hover,a:visited,a:visited:hover,.contents a:visited,.el,a.el:visited,#glfwhome:hover,#main-menu a:hover,span.lineno a:hover {
+ color:$default-link-color;
+ text-decoration:none;
+}
+
+div.directory {
+ border-collapse:inherit;
+ border-spacing:0;
+ border:2px solid $default-border-color;
+ border-radius:4px;
+}
+
+hr,.memSeparator {
+ height:2px;
+ background:linear-gradient(to right,$def-separator-color 0%,darken($def-separator-color, 10%) 50%,$def-separator-color 100%);
+}
+
+dl.note,dl.pre,dl.post,dl.invariant {
+ @include message-box($box-note-color);
+}
+
+dl.warning,dl.attention {
+ @include message-box($box-warning-color);
+}
+
+dl.deprecated,dl.bug {
+ @include message-box($box-bug-color);
+}
+
+dl.todo,dl.test {
+ @include message-box($box-todo-color);
+}
+
+dl.note,dl.pre,dl.post,dl.invariant,dl.warning,dl.attention,dl.deprecated,dl.bug,dl.todo,dl.test {
+ border-radius:4px;
+ padding:1em;
+ text-shadow:0 1px 1px hsl(0,0%,100%);
+ margin:1em 0;
+}
+
+.note a,.pre a,.post a,.invariant a,.warning a,.attention a,.deprecated a,.bug a,.todo a,.test a,.note a:visited,.pre a:visited,.post a:visited,.invariant a:visited,.warning a:visited,.attention a:visited,.deprecated a:visited,.bug a:visited,.todo a:visited,.test a:visited {
+ color:inherit;
+}
+
+div.line {
+ line-height:inherit;
+}
+
+div.fragment,pre.fragment {
+ background:hsl(0,0%,95%);
+ border-radius:4px;
+ border:none;
+ padding:1em;
+ overflow:auto;
+ border-left:4px solid hsl(0,0%,80%);
+ margin:1em 0;
+}
+
+.lineno a,.lineno a:visited,.line,pre.fragment {
+ color:$default-text-color;
+}
+
+span.preprocessor,span.comment {
+ color:hsl(193,100%,30%);
+}
+
+a.code,a.code:visited {
+ color:hsl(18,100%,45%);
+}
+
+span.keyword,span.keywordtype,span.keywordflow {
+ color:darken($default-text-color, 5%);
+ font-weight:bold;
+}
+
+span.stringliteral {
+ color:hsl(261,100%,30%);
+}
+
+code {
+ padding:.1em;
+ border-radius:4px;
+}
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/footer.html b/chromium/third_party/dawn/third_party/glfw/docs/footer.html
new file mode 100644
index 00000000000..b0434ca1839
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/footer.html
@@ -0,0 +1,7 @@
+<address class="footer">
+<p>
+Last update on $date for $projectname $projectnumber
+</p>
+</address>
+</body>
+</html>
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/header.html b/chromium/third_party/dawn/third_party/glfw/docs/header.html
new file mode 100644
index 00000000000..4cefa3d09a5
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/header.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta name="viewport" content="width=device-width, initial-scale=1.0">
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen $doxygenversion"/>
+<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
+<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
+$treeview
+$search
+$mathjax
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+$extrastylesheet
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+
+<!--BEGIN TITLEAREA-->
+<div id="titlearea">
+ <div class="glfwheader">
+ <a href="https://www.glfw.org/" id="glfwhome">GLFW</a>
+ <ul class="glfwnavbar">
+ <li><a href="https://www.glfw.org/documentation.html">Documentation</a></li>
+ <li><a href="https://www.glfw.org/download.html">Download</a></li>
+ <li><a href="https://www.glfw.org/community.html">Community</a></li>
+ </ul>
+ </div>
+</div>
+<!--END TITLEAREA-->
+<!-- end header part -->
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/input.dox b/chromium/third_party/dawn/third_party/glfw/docs/input.dox
new file mode 100644
index 00000000000..faa94cd4b58
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/input.dox
@@ -0,0 +1,953 @@
+/*!
+
+@page input_guide Input guide
+
+@tableofcontents
+
+This guide introduces the input related functions of GLFW. For details on
+a specific function in this category, see the @ref input. There are also guides
+for the other areas of GLFW.
+
+ - @ref intro_guide
+ - @ref window_guide
+ - @ref context_guide
+ - @ref vulkan_guide
+ - @ref monitor_guide
+
+GLFW provides many kinds of input. While some can only be polled, like time, or
+only received via callbacks, like scrolling, many provide both callbacks and
+polling. Callbacks are more work to use than polling but is less CPU intensive
+and guarantees that you do not miss state changes.
+
+All input callbacks receive a window handle. By using the
+[window user pointer](@ref window_userptr), you can access non-global structures
+or objects from your callbacks.
+
+To get a better feel for how the various events callbacks behave, run the
+`events` test program. It register every callback supported by GLFW and prints
+out all arguments provided for every event, along with time and sequence
+information.
+
+
+@section events Event processing
+
+GLFW needs to poll the window system for events both to provide input to the
+application and to prove to the window system that the application hasn't locked
+up. Event processing is normally done each frame after
+[buffer swapping](@ref buffer_swap). Even when you have no windows, event
+polling needs to be done in order to receive monitor and joystick connection
+events.
+
+There are three functions for processing pending events. @ref glfwPollEvents,
+processes only those events that have already been received and then returns
+immediately.
+
+@code
+glfwPollEvents();
+@endcode
+
+This is the best choice when rendering continuously, like most games do.
+
+If you only need to update the contents of the window when you receive new
+input, @ref glfwWaitEvents is a better choice.
+
+@code
+glfwWaitEvents();
+@endcode
+
+It puts the thread to sleep until at least one event has been received and then
+processes all received events. This saves a great deal of CPU cycles and is
+useful for, for example, editing tools.
+
+If you want to wait for events but have UI elements or other tasks that need
+periodic updates, @ref glfwWaitEventsTimeout lets you specify a timeout.
+
+@code
+glfwWaitEventsTimeout(0.7);
+@endcode
+
+It puts the thread to sleep until at least one event has been received, or until
+the specified number of seconds have elapsed. It then processes any received
+events.
+
+If the main thread is sleeping in @ref glfwWaitEvents, you can wake it from
+another thread by posting an empty event to the event queue with @ref
+glfwPostEmptyEvent.
+
+@code
+glfwPostEmptyEvent();
+@endcode
+
+Do not assume that callbacks will _only_ be called in response to the above
+functions. While it is necessary to process events in one or more of the ways
+above, window systems that require GLFW to register callbacks of its own can
+pass events to GLFW in response to many window system function calls. GLFW will
+pass those events on to the application callbacks before returning.
+
+For example, on Windows the system function that @ref glfwSetWindowSize is
+implemented with will send window size events directly to the event callback
+that every window has and that GLFW implements for its windows. If you have set
+a [window size callback](@ref window_size) GLFW will call it in turn with the
+new size before everything returns back out of the @ref glfwSetWindowSize call.
+
+
+@section input_keyboard Keyboard input
+
+GLFW divides keyboard input into two categories; key events and character
+events. Key events relate to actual physical keyboard keys, whereas character
+events relate to the Unicode code points generated by pressing some of them.
+
+Keys and characters do not map 1:1. A single key press may produce several
+characters, and a single character may require several keys to produce. This
+may not be the case on your machine, but your users are likely not all using the
+same keyboard layout, input method or even operating system as you.
+
+
+@subsection input_key Key input
+
+If you wish to be notified when a physical key is pressed or released or when it
+repeats, set a key callback.
+
+@code
+glfwSetKeyCallback(window, key_callback);
+@endcode
+
+The callback function receives the [keyboard key](@ref keys), platform-specific
+scancode, key action and [modifier bits](@ref mods).
+
+@code
+void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key == GLFW_KEY_E && action == GLFW_PRESS)
+ activate_airship();
+}
+@endcode
+
+The action is one of `GLFW_PRESS`, `GLFW_REPEAT` or `GLFW_RELEASE`. The key
+will be `GLFW_KEY_UNKNOWN` if GLFW lacks a key token for it, for example
+_E-mail_ and _Play_ keys.
+
+The scancode is unique for every key, regardless of whether it has a key token.
+Scancodes are platform-specific but consistent over time, so keys will have
+different scancodes depending on the platform but they are safe to save to disk.
+You can query the scancode for any [named key](@ref keys) on the current
+platform with @ref glfwGetKeyScancode.
+
+@code
+const int scancode = glfwGetKeyScancode(GLFW_KEY_X);
+set_key_mapping(scancode, swap_weapons);
+@endcode
+
+The last reported state for every [named key](@ref keys) is also saved in
+per-window state arrays that can be polled with @ref glfwGetKey.
+
+@code
+int state = glfwGetKey(window, GLFW_KEY_E);
+if (state == GLFW_PRESS)
+{
+ activate_airship();
+}
+@endcode
+
+The returned state is one of `GLFW_PRESS` or `GLFW_RELEASE`.
+
+This function only returns cached key event state. It does not poll the
+system for the current physical state of the key.
+
+@anchor GLFW_STICKY_KEYS
+Whenever you poll state, you risk missing the state change you are looking for.
+If a pressed key is released again before you poll its state, you will have
+missed the key press. The recommended solution for this is to use a
+key callback, but there is also the `GLFW_STICKY_KEYS` input mode.
+
+@code
+glfwSetInputMode(window, GLFW_STICKY_KEYS, GLFW_TRUE);
+@endcode
+
+When sticky keys mode is enabled, the pollable state of a key will remain
+`GLFW_PRESS` until the state of that key is polled with @ref glfwGetKey. Once
+it has been polled, if a key release event had been processed in the meantime,
+the state will reset to `GLFW_RELEASE`, otherwise it will remain `GLFW_PRESS`.
+
+@anchor GLFW_LOCK_KEY_MODS
+If you wish to know what the state of the Caps Lock and Num Lock keys was when
+input events were generated, set the `GLFW_LOCK_KEY_MODS` input mode.
+
+@code
+glfwSetInputMode(window, GLFW_LOCK_KEY_MODS, GLFW_TRUE);
+@endcode
+
+When this input mode is enabled, any callback that receives
+[modifier bits](@ref mods) will have the @ref GLFW_MOD_CAPS_LOCK bit set if Caps
+Lock was on when the event occurred and the @ref GLFW_MOD_NUM_LOCK bit set if
+Num Lock was on.
+
+The `GLFW_KEY_LAST` constant holds the highest value of any
+[named key](@ref keys).
+
+
+@subsection input_char Text input
+
+GLFW supports text input in the form of a stream of
+[Unicode code points](https://en.wikipedia.org/wiki/Unicode), as produced by the
+operating system text input system. Unlike key input, text input obeys keyboard
+layouts and modifier keys and supports composing characters using
+[dead keys](https://en.wikipedia.org/wiki/Dead_key). Once received, you can
+encode the code points into UTF-8 or any other encoding you prefer.
+
+Because an `unsigned int` is 32 bits long on all platforms supported by GLFW,
+you can treat the code point argument as native endian UTF-32.
+
+If you wish to offer regular text input, set a character callback.
+
+@code
+glfwSetCharCallback(window, character_callback);
+@endcode
+
+The callback function receives Unicode code points for key events that would
+have led to regular text input and generally behaves as a standard text field on
+that platform.
+
+@code
+void character_callback(GLFWwindow* window, unsigned int codepoint)
+{
+}
+@endcode
+
+
+@subsection input_key_name Key names
+
+If you wish to refer to keys by name, you can query the keyboard layout
+dependent name of printable keys with @ref glfwGetKeyName.
+
+@code
+const char* key_name = glfwGetKeyName(GLFW_KEY_W, 0);
+show_tutorial_hint("Press %s to move forward", key_name);
+@endcode
+
+This function can handle both [keys and scancodes](@ref input_key). If the
+specified key is `GLFW_KEY_UNKNOWN` then the scancode is used, otherwise it is
+ignored. This matches the behavior of the key callback, meaning the callback
+arguments can always be passed unmodified to this function.
+
+
+@section input_mouse Mouse input
+
+Mouse input comes in many forms, including mouse motion, button presses and
+scrolling offsets. The cursor appearance can also be changed, either to
+a custom image or a standard cursor shape from the system theme.
+
+
+@subsection cursor_pos Cursor position
+
+If you wish to be notified when the cursor moves over the window, set a cursor
+position callback.
+
+@code
+glfwSetCursorPosCallback(window, cursor_position_callback);
+@endcode
+
+The callback functions receives the cursor position, measured in screen
+coordinates but relative to the top-left corner of the window content area. On
+platforms that provide it, the full sub-pixel cursor position is passed on.
+
+@code
+static void cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
+{
+}
+@endcode
+
+The cursor position is also saved per-window and can be polled with @ref
+glfwGetCursorPos.
+
+@code
+double xpos, ypos;
+glfwGetCursorPos(window, &xpos, &ypos);
+@endcode
+
+
+@subsection cursor_mode Cursor mode
+
+@anchor GLFW_CURSOR
+The `GLFW_CURSOR` input mode provides several cursor modes for special forms of
+mouse motion input. By default, the cursor mode is `GLFW_CURSOR_NORMAL`,
+meaning the regular arrow cursor (or another cursor set with @ref glfwSetCursor)
+is used and cursor motion is not limited.
+
+If you wish to implement mouse motion based camera controls or other input
+schemes that require unlimited mouse movement, set the cursor mode to
+`GLFW_CURSOR_DISABLED`.
+
+@code
+glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
+@endcode
+
+This will hide the cursor and lock it to the specified window. GLFW will then
+take care of all the details of cursor re-centering and offset calculation and
+providing the application with a virtual cursor position. This virtual position
+is provided normally via both the cursor position callback and through polling.
+
+@note You should not implement your own version of this functionality using
+other features of GLFW. It is not supported and will not work as robustly as
+`GLFW_CURSOR_DISABLED`.
+
+If you only wish the cursor to become hidden when it is over a window but still
+want it to behave normally, set the cursor mode to `GLFW_CURSOR_HIDDEN`.
+
+@code
+glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_HIDDEN);
+@endcode
+
+This mode puts no limit on the motion of the cursor.
+
+To exit out of either of these special modes, restore the `GLFW_CURSOR_NORMAL`
+cursor mode.
+
+@code
+glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
+@endcode
+
+
+@anchor GLFW_RAW_MOUSE_MOTION
+@subsection raw_mouse_motion Raw mouse motion
+
+When the cursor is disabled, raw (unscaled and unaccelerated) mouse motion can
+be enabled if available.
+
+Raw mouse motion is closer to the actual motion of the mouse across a surface.
+It is not affected by the scaling and acceleration applied to the motion of the
+desktop cursor. That processing is suitable for a cursor while raw motion is
+better for controlling for example a 3D camera. Because of this, raw mouse
+motion is only provided when the cursor is disabled.
+
+Call @ref glfwRawMouseMotionSupported to check if the current machine provides
+raw motion and set the `GLFW_RAW_MOUSE_MOTION` input mode to enable it. It is
+disabled by default.
+
+@code
+if (glfwRawMouseMotionSupported())
+ glfwSetInputMode(window, GLFW_RAW_MOUSE_MOTION, GLFW_TRUE);
+@endcode
+
+If supported, raw mouse motion can be enabled or disabled per-window and at any
+time but it will only be provided when the cursor is disabled.
+
+
+@subsection cursor_object Cursor objects
+
+GLFW supports creating both custom and system theme cursor images, encapsulated
+as @ref GLFWcursor objects. They are created with @ref glfwCreateCursor or @ref
+glfwCreateStandardCursor and destroyed with @ref glfwDestroyCursor, or @ref
+glfwTerminate, if any remain.
+
+
+@subsubsection cursor_custom Custom cursor creation
+
+A custom cursor is created with @ref glfwCreateCursor, which returns a handle to
+the created cursor object. For example, this creates a 16x16 white square
+cursor with the hot-spot in the upper-left corner:
+
+@code
+unsigned char pixels[16 * 16 * 4];
+memset(pixels, 0xff, sizeof(pixels));
+
+GLFWimage image;
+image.width = 16;
+image.height = 16;
+image.pixels = pixels;
+
+GLFWcursor* cursor = glfwCreateCursor(&image, 0, 0);
+@endcode
+
+If cursor creation fails, `NULL` will be returned, so it is necessary to check
+the return value.
+
+The image data is 32-bit, little-endian, non-premultiplied RGBA, i.e. eight bits
+per channel with the red channel first. The pixels are arranged canonically as
+sequential rows, starting from the top-left corner.
+
+
+@subsubsection cursor_standard Standard cursor creation
+
+A cursor with a [standard shape](@ref shapes) from the current system cursor
+theme can be can be created with @ref glfwCreateStandardCursor.
+
+@code
+GLFWcursor* url_cursor = glfwCreateStandardCursor(GLFW_POINTING_HAND_CURSOR);
+@endcode
+
+These cursor objects behave in the exact same way as those created with @ref
+glfwCreateCursor except that the system cursor theme provides the actual image.
+
+A few of these shapes are not available everywhere. If a shape is unavailable,
+`NULL` is returned. See @ref glfwCreateStandardCursor for details.
+
+
+@subsubsection cursor_destruction Cursor destruction
+
+When a cursor is no longer needed, destroy it with @ref glfwDestroyCursor.
+
+@code
+glfwDestroyCursor(cursor);
+@endcode
+
+Cursor destruction always succeeds. If the cursor is current for any window,
+that window will revert to the default cursor. This does not affect the cursor
+mode. All remaining cursors are destroyed when @ref glfwTerminate is called.
+
+
+@subsubsection cursor_set Cursor setting
+
+A cursor can be set as current for a window with @ref glfwSetCursor.
+
+@code
+glfwSetCursor(window, cursor);
+@endcode
+
+Once set, the cursor image will be used as long as the system cursor is over the
+content area of the window and the [cursor mode](@ref cursor_mode) is set
+to `GLFW_CURSOR_NORMAL`.
+
+A single cursor may be set for any number of windows.
+
+To revert to the default cursor, set the cursor of that window to `NULL`.
+
+@code
+glfwSetCursor(window, NULL);
+@endcode
+
+When a cursor is destroyed, any window that has it set will revert to the
+default cursor. This does not affect the cursor mode.
+
+
+@subsection cursor_enter Cursor enter/leave events
+
+If you wish to be notified when the cursor enters or leaves the content area of
+a window, set a cursor enter/leave callback.
+
+@code
+glfwSetCursorEnterCallback(window, cursor_enter_callback);
+@endcode
+
+The callback function receives the new classification of the cursor.
+
+@code
+void cursor_enter_callback(GLFWwindow* window, int entered)
+{
+ if (entered)
+ {
+ // The cursor entered the content area of the window
+ }
+ else
+ {
+ // The cursor left the content area of the window
+ }
+}
+@endcode
+
+You can query whether the cursor is currently inside the content area of the
+window with the [GLFW_HOVERED](@ref GLFW_HOVERED_attrib) window attribute.
+
+@code
+if (glfwGetWindowAttrib(window, GLFW_HOVERED))
+{
+ highlight_interface();
+}
+@endcode
+
+
+@subsection input_mouse_button Mouse button input
+
+If you wish to be notified when a mouse button is pressed or released, set
+a mouse button callback.
+
+@code
+glfwSetMouseButtonCallback(window, mouse_button_callback);
+@endcode
+
+The callback function receives the [mouse button](@ref buttons), button action
+and [modifier bits](@ref mods).
+
+@code
+void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
+{
+ if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS)
+ popup_menu();
+}
+@endcode
+
+The action is one of `GLFW_PRESS` or `GLFW_RELEASE`.
+
+Mouse button states for [named buttons](@ref buttons) are also saved in
+per-window state arrays that can be polled with @ref glfwGetMouseButton.
+
+@code
+int state = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT);
+if (state == GLFW_PRESS)
+{
+ upgrade_cow();
+}
+@endcode
+
+The returned state is one of `GLFW_PRESS` or `GLFW_RELEASE`.
+
+This function only returns cached mouse button event state. It does not poll
+the system for the current state of the mouse button.
+
+@anchor GLFW_STICKY_MOUSE_BUTTONS
+Whenever you poll state, you risk missing the state change you are looking for.
+If a pressed mouse button is released again before you poll its state, you will have
+missed the button press. The recommended solution for this is to use a
+mouse button callback, but there is also the `GLFW_STICKY_MOUSE_BUTTONS`
+input mode.
+
+@code
+glfwSetInputMode(window, GLFW_STICKY_MOUSE_BUTTONS, GLFW_TRUE);
+@endcode
+
+When sticky mouse buttons mode is enabled, the pollable state of a mouse button
+will remain `GLFW_PRESS` until the state of that button is polled with @ref
+glfwGetMouseButton. Once it has been polled, if a mouse button release event
+had been processed in the meantime, the state will reset to `GLFW_RELEASE`,
+otherwise it will remain `GLFW_PRESS`.
+
+The `GLFW_MOUSE_BUTTON_LAST` constant holds the highest value of any
+[named button](@ref buttons).
+
+
+@subsection scrolling Scroll input
+
+If you wish to be notified when the user scrolls, whether with a mouse wheel or
+touchpad gesture, set a scroll callback.
+
+@code
+glfwSetScrollCallback(window, scroll_callback);
+@endcode
+
+The callback function receives two-dimensional scroll offsets.
+
+@code
+void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
+{
+}
+@endcode
+
+A normal mouse wheel, being vertical, provides offsets along the Y-axis.
+
+
+@section joystick Joystick input
+
+The joystick functions expose connected joysticks and controllers, with both
+referred to as joysticks. It supports up to sixteen joysticks, ranging from
+`GLFW_JOYSTICK_1`, `GLFW_JOYSTICK_2` up to and including `GLFW_JOYSTICK_16` or
+`GLFW_JOYSTICK_LAST`. You can test whether a [joystick](@ref joysticks) is
+present with @ref glfwJoystickPresent.
+
+@code
+int present = glfwJoystickPresent(GLFW_JOYSTICK_1);
+@endcode
+
+Each joystick has zero or more axes, zero or more buttons, zero or more hats,
+a human-readable name, a user pointer and an SDL compatible GUID.
+
+Detected joysticks are added to the beginning of the array. Once a joystick is
+detected, it keeps its assigned ID until it is disconnected or the library is
+terminated, so as joysticks are connected and disconnected, there may appear
+gaps in the IDs.
+
+Joystick axis, button and hat state is updated when polled and does not require
+a window to be created or events to be processed. However, if you want joystick
+connection and disconnection events reliably delivered to the
+[joystick callback](@ref joystick_event) then you must
+[process events](@ref events).
+
+To see all the properties of all connected joysticks in real-time, run the
+`joysticks` test program.
+
+
+@subsection joystick_axis Joystick axis states
+
+The positions of all axes of a joystick are returned by @ref
+glfwGetJoystickAxes. See the reference documentation for the lifetime of the
+returned array.
+
+@code
+int count;
+const float* axes = glfwGetJoystickAxes(GLFW_JOYSTICK_5, &count);
+@endcode
+
+Each element in the returned array is a value between -1.0 and 1.0.
+
+
+@subsection joystick_button Joystick button states
+
+The states of all buttons of a joystick are returned by @ref
+glfwGetJoystickButtons. See the reference documentation for the lifetime of the
+returned array.
+
+@code
+int count;
+const unsigned char* buttons = glfwGetJoystickButtons(GLFW_JOYSTICK_3, &count);
+@endcode
+
+Each element in the returned array is either `GLFW_PRESS` or `GLFW_RELEASE`.
+
+For backward compatibility with earlier versions that did not have @ref
+glfwGetJoystickHats, the button array by default also includes all hats. See
+the reference documentation for @ref glfwGetJoystickButtons for details.
+
+
+@subsection joystick_hat Joystick hat states
+
+The states of all hats are returned by @ref glfwGetJoystickHats. See the
+reference documentation for the lifetime of the returned array.
+
+@code
+int count;
+const unsigned char* hats = glfwGetJoystickHats(GLFW_JOYSTICK_7, &count);
+@endcode
+
+Each element in the returned array is one of the following:
+
+Name | Value
+---- | -----
+`GLFW_HAT_CENTERED` | 0
+`GLFW_HAT_UP` | 1
+`GLFW_HAT_RIGHT` | 2
+`GLFW_HAT_DOWN` | 4
+`GLFW_HAT_LEFT` | 8
+`GLFW_HAT_RIGHT_UP` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_UP`
+`GLFW_HAT_RIGHT_DOWN` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_DOWN`
+`GLFW_HAT_LEFT_UP` | `GLFW_HAT_LEFT` \| `GLFW_HAT_UP`
+`GLFW_HAT_LEFT_DOWN` | `GLFW_HAT_LEFT` \| `GLFW_HAT_DOWN`
+
+The diagonal directions are bitwise combinations of the primary (up, right, down
+and left) directions and you can test for these individually by ANDing it with
+the corresponding direction.
+
+@code
+if (hats[2] & GLFW_HAT_RIGHT)
+{
+ // State of hat 2 could be right-up, right or right-down
+}
+@endcode
+
+For backward compatibility with earlier versions that did not have @ref
+glfwGetJoystickHats, all hats are by default also included in the button array.
+See the reference documentation for @ref glfwGetJoystickButtons for details.
+
+
+@subsection joystick_name Joystick name
+
+The human-readable, UTF-8 encoded name of a joystick is returned by @ref
+glfwGetJoystickName. See the reference documentation for the lifetime of the
+returned string.
+
+@code
+const char* name = glfwGetJoystickName(GLFW_JOYSTICK_4);
+@endcode
+
+Joystick names are not guaranteed to be unique. Two joysticks of the same model
+and make may have the same name. Only the [joystick ID](@ref joysticks) is
+guaranteed to be unique, and only until that joystick is disconnected.
+
+
+@subsection joystick_userptr Joystick user pointer
+
+Each joystick has a user pointer that can be set with @ref
+glfwSetJoystickUserPointer and queried with @ref glfwGetJoystickUserPointer.
+This can be used for any purpose you need and will not be modified by GLFW. The
+value will be kept until the joystick is disconnected or until the library is
+terminated.
+
+The initial value of the pointer is `NULL`.
+
+
+@subsection joystick_event Joystick configuration changes
+
+If you wish to be notified when a joystick is connected or disconnected, set
+a joystick callback.
+
+@code
+glfwSetJoystickCallback(joystick_callback);
+@endcode
+
+The callback function receives the ID of the joystick that has been connected
+and disconnected and the event that occurred.
+
+@code
+void joystick_callback(int jid, int event)
+{
+ if (event == GLFW_CONNECTED)
+ {
+ // The joystick was connected
+ }
+ else if (event == GLFW_DISCONNECTED)
+ {
+ // The joystick was disconnected
+ }
+}
+@endcode
+
+For joystick connection and disconnection events to be delivered on all
+platforms, you need to call one of the [event processing](@ref events)
+functions. Joystick disconnection may also be detected and the callback
+called by joystick functions. The function will then return whatever it
+returns for a disconnected joystick.
+
+Only @ref glfwGetJoystickName and @ref glfwGetJoystickUserPointer will return
+useful values for a disconnected joystick and only before the monitor callback
+returns.
+
+
+@subsection gamepad Gamepad input
+
+The joystick functions provide unlabeled axes, buttons and hats, with no
+indication of where they are located on the device. Their order may also vary
+between platforms even with the same device.
+
+To solve this problem the SDL community crowdsourced the
+[SDL_GameControllerDB](https://github.com/gabomdq/SDL_GameControllerDB) project,
+a database of mappings from many different devices to an Xbox-like gamepad.
+
+GLFW supports this mapping format and contains a copy of the mappings
+available at the time of release. See @ref gamepad_mapping for how to update
+this at runtime. Mappings will be assigned to joysticks automatically any time
+a joystick is connected or the mappings are updated.
+
+You can check whether a joystick is both present and has a gamepad mapping with
+@ref glfwJoystickIsGamepad.
+
+@code
+if (glfwJoystickIsGamepad(GLFW_JOYSTICK_2))
+{
+ // Use as gamepad
+}
+@endcode
+
+If you are only interested in gamepad input you can use this function instead of
+@ref glfwJoystickPresent.
+
+You can query the human-readable name provided by the gamepad mapping with @ref
+glfwGetGamepadName. This may or may not be the same as the
+[joystick name](@ref joystick_name).
+
+@code
+const char* name = glfwGetGamepadName(GLFW_JOYSTICK_7);
+@endcode
+
+To retrieve the gamepad state of a joystick, call @ref glfwGetGamepadState.
+
+@code
+GLFWgamepadstate state;
+
+if (glfwGetGamepadState(GLFW_JOYSTICK_3, &state))
+{
+ if (state.buttons[GLFW_GAMEPAD_BUTTON_A])
+ {
+ input_jump();
+ }
+
+ input_speed(state.axes[GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER]);
+}
+@endcode
+
+The @ref GLFWgamepadstate struct has two arrays; one for button states and one
+for axis states. The values for each button and axis are the same as for the
+@ref glfwGetJoystickButtons and @ref glfwGetJoystickAxes functions, i.e.
+`GLFW_PRESS` or `GLFW_RELEASE` for buttons and -1.0 to 1.0 inclusive for axes.
+
+The sizes of the arrays and the positions within each array are fixed.
+
+The [button indices](@ref gamepad_buttons) are `GLFW_GAMEPAD_BUTTON_A`,
+`GLFW_GAMEPAD_BUTTON_B`, `GLFW_GAMEPAD_BUTTON_X`, `GLFW_GAMEPAD_BUTTON_Y`,
+`GLFW_GAMEPAD_BUTTON_LEFT_BUMPER`, `GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER`,
+`GLFW_GAMEPAD_BUTTON_BACK`, `GLFW_GAMEPAD_BUTTON_START`,
+`GLFW_GAMEPAD_BUTTON_GUIDE`, `GLFW_GAMEPAD_BUTTON_LEFT_THUMB`,
+`GLFW_GAMEPAD_BUTTON_RIGHT_THUMB`, `GLFW_GAMEPAD_BUTTON_DPAD_UP`,
+`GLFW_GAMEPAD_BUTTON_DPAD_RIGHT`, `GLFW_GAMEPAD_BUTTON_DPAD_DOWN` and
+`GLFW_GAMEPAD_BUTTON_DPAD_LEFT`.
+
+For those who prefer, there are also the `GLFW_GAMEPAD_BUTTON_CROSS`,
+`GLFW_GAMEPAD_BUTTON_CIRCLE`, `GLFW_GAMEPAD_BUTTON_SQUARE` and
+`GLFW_GAMEPAD_BUTTON_TRIANGLE` aliases for the A, B, X and Y button indices.
+
+The [axis indices](@ref gamepad_axes) are `GLFW_GAMEPAD_AXIS_LEFT_X`,
+`GLFW_GAMEPAD_AXIS_LEFT_Y`, `GLFW_GAMEPAD_AXIS_RIGHT_X`,
+`GLFW_GAMEPAD_AXIS_RIGHT_Y`, `GLFW_GAMEPAD_AXIS_LEFT_TRIGGER` and
+`GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER`.
+
+The `GLFW_GAMEPAD_BUTTON_LAST` and `GLFW_GAMEPAD_AXIS_LAST` constants equal
+the largest available index for each array.
+
+
+@subsection gamepad_mapping Gamepad mappings
+
+GLFW contains a copy of the mappings available in
+[SDL_GameControllerDB](https://github.com/gabomdq/SDL_GameControllerDB) at the
+time of release. Newer ones can be added at runtime with @ref
+glfwUpdateGamepadMappings.
+
+@code
+const char* mappings = load_file_contents("game/data/gamecontrollerdb.txt");
+
+glfwUpdateGamepadMappings(mappings);
+@endcode
+
+This function supports everything from single lines up to and including the
+unmodified contents of the whole `gamecontrollerdb.txt` file.
+
+If you are compiling GLFW from source with CMake you can update the built-in mappings by
+building the _update_mappings_ target. This runs the `GenerateMappings.cmake` CMake
+script, which downloads `gamecontrollerdb.txt` and regenerates the `mappings.h` header
+file.
+
+Below is a description of the mapping format. Please keep in mind that __this
+description is not authoritative__. The format is defined by the SDL and
+SDL_GameControllerDB projects and their documentation and code takes precedence.
+
+Each mapping is a single line of comma-separated values describing the GUID,
+name and layout of the gamepad. Lines that do not begin with a hexadecimal
+digit are ignored.
+
+The first value is always the gamepad GUID, a 32 character long hexadecimal
+string that typically identifies its make, model, revision and the type of
+connection to the computer. When this information is not available, the GUID is
+generated using the gamepad name. GLFW uses the SDL 2.0.5+ GUID format but can
+convert from the older formats.
+
+The second value is always the human-readable name of the gamepad.
+
+All subsequent values are in the form `<field>:<value>` and describe the layout
+of the mapping. These fields may not all be present and may occur in any order.
+
+The button fields are `a`, `b`, `x`, `y`, `back`, `start`, `guide`, `dpup`,
+`dpright`, `dpdown`, `dpleft`, `leftshoulder`, `rightshoulder`, `leftstick` and
+`rightstick`.
+
+The axis fields are `leftx`, `lefty`, `rightx`, `righty`, `lefttrigger` and
+`righttrigger`.
+
+The value of an axis or button field can be a joystick button, a joystick axis,
+a hat bitmask or empty. Joystick buttons are specified as `bN`, for example
+`b2` for the third button. Joystick axes are specified as `aN`, for example
+`a7` for the eighth button. Joystick hat bit masks are specified as `hN.N`, for
+example `h0.8` for left on the first hat. More than one bit may be set in the
+mask.
+
+Before an axis there may be a `+` or `-` range modifier, for example `+a3` for
+the positive half of the fourth axis. This restricts input to only the positive
+or negative halves of the joystick axis. After an axis or half-axis there may
+be the `~` inversion modifier, for example `a2~` or `-a7~`. This negates the
+values of the gamepad axis.
+
+The hat bit mask match the [hat states](@ref hat_state) in the joystick
+functions.
+
+There is also the special `platform` field that specifies which platform the
+mapping is valid for. Possible values are `Windows`, `Mac OS X` and `Linux`.
+
+Below is an example of what a gamepad mapping might look like. It is the
+one built into GLFW for Xbox controllers accessed via the XInput API on Windows.
+This example has been broken into several lines to fit on the page, but real
+gamepad mappings must be a single line.
+
+@code{.unparsed}
+78696e70757401000000000000000000,XInput Gamepad (GLFW),platform:Windows,a:b0,
+b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,
+rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,
+righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,
+@endcode
+
+@note GLFW does not yet support the output range and modifiers `+` and `-` that
+were recently added to SDL. The input modifiers `+`, `-` and `~` are supported
+and described above.
+
+
+@section time Time input
+
+GLFW provides high-resolution time input, in seconds, with @ref glfwGetTime.
+
+@code
+double seconds = glfwGetTime();
+@endcode
+
+It returns the number of seconds since the library was initialized with @ref
+glfwInit. The platform-specific time sources used typically have micro- or
+nanosecond resolution.
+
+You can modify the base time with @ref glfwSetTime.
+
+@code
+glfwSetTime(4.0);
+@endcode
+
+This sets the time to the specified time, in seconds, and it continues to count
+from there.
+
+You can also access the raw timer used to implement the functions above,
+with @ref glfwGetTimerValue.
+
+@code
+uint64_t value = glfwGetTimerValue();
+@endcode
+
+This value is in 1&nbsp;/&nbsp;frequency seconds. The frequency of the raw
+timer varies depending on the operating system and hardware. You can query the
+frequency, in Hz, with @ref glfwGetTimerFrequency.
+
+@code
+uint64_t frequency = glfwGetTimerFrequency();
+@endcode
+
+
+@section clipboard Clipboard input and output
+
+If the system clipboard contains a UTF-8 encoded string or if it can be
+converted to one, you can retrieve it with @ref glfwGetClipboardString. See the
+reference documentation for the lifetime of the returned string.
+
+@code
+const char* text = glfwGetClipboardString(NULL);
+if (text)
+{
+ insert_text(text);
+}
+@endcode
+
+If the clipboard is empty or if its contents could not be converted, `NULL` is
+returned.
+
+The contents of the system clipboard can be set to a UTF-8 encoded string with
+@ref glfwSetClipboardString.
+
+@code
+glfwSetClipboardString(NULL, "A string with words in it");
+@endcode
+
+
+@section path_drop Path drop input
+
+If you wish to receive the paths of files and/or directories dropped on
+a window, set a file drop callback.
+
+@code
+glfwSetDropCallback(window, drop_callback);
+@endcode
+
+The callback function receives an array of paths encoded as UTF-8.
+
+@code
+void drop_callback(GLFWwindow* window, int count, const char** paths)
+{
+ int i;
+ for (i = 0; i < count; i++)
+ handle_dropped_file(paths[i]);
+}
+@endcode
+
+The path array and its strings are only valid until the file drop callback
+returns, as they may have been generated specifically for that event. You need
+to make a deep copy of the array if you want to keep the paths.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/internal.dox b/chromium/third_party/dawn/third_party/glfw/docs/internal.dox
new file mode 100644
index 00000000000..69227568c19
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/internal.dox
@@ -0,0 +1,123 @@
+/*!
+
+@page internals_guide Internal structure
+
+@tableofcontents
+
+There are several interfaces inside GLFW. Each interface has its own area of
+responsibility and its own naming conventions.
+
+
+@section internals_public Public interface
+
+The most well-known is the public interface, described in the glfw3.h header
+file. This is implemented in source files shared by all platforms and these
+files contain no platform-specific code. This code usually ends up calling the
+platform and internal interfaces to do the actual work.
+
+The public interface uses the OpenGL naming conventions except with GLFW and
+glfw instead of GL and gl. For struct members, where OpenGL sets no precedent,
+it use headless camel case.
+
+Examples: `glfwCreateWindow`, `GLFWwindow`, `GLFW_RED_BITS`
+
+
+@section internals_native Native interface
+
+The [native interface](@ref native) is a small set of publicly available
+but platform-specific functions, described in the glfw3native.h header file and
+used to gain access to the underlying window, context and (on some platforms)
+display handles used by the platform interface.
+
+The function names of the native interface are similar to those of the public
+interface, but embeds the name of the interface that the returned handle is
+from.
+
+Examples: `glfwGetX11Window`, `glfwGetWGLContext`
+
+
+@section internals_internal Internal interface
+
+The internal interface consists of utility functions used by all other
+interfaces. It is shared code implemented in the same shared source files as
+the public and event interfaces. The internal interface is described in the
+internal.h header file.
+
+The internal interface is in charge of GLFW's global data, which it stores in
+a `_GLFWlibrary` struct named `_glfw`.
+
+The internal interface uses the same style as the public interface, except all
+global names have a leading underscore.
+
+Examples: `_glfwIsValidContextConfig`, `_GLFWwindow`, `_glfw.monitorCount`
+
+
+@section internals_platform Platform interface
+
+The platform interface implements all platform-specific operations as a service
+to the public interface. This includes event processing. The platform
+interface is never directly called by application code and never directly calls
+application-provided callbacks. It is also prohibited from modifying the
+platform-independent part of the internal structs. Instead, it calls the event
+interface when events interesting to GLFW are received.
+
+The platform interface mostly mirrors those parts of the public interface that needs to
+perform platform-specific operations on some or all platforms.
+
+The window system bits of the platform API is called through the `_GLFWplatform` struct of
+function pointers, to allow runtime selection of platform. This includes the window and
+context creation, input and event processing, monitor and Vulkan surface creation parts of
+GLFW. This is located in the global `_glfw` struct.
+
+Examples: `_glfw.platform.createWindow`
+
+The timer, threading and module loading bits of the platform API are plain functions with
+a `_glfwPlatform` prefix, as these things are independent of what window system is being
+used.
+
+Examples: `_glfwPlatformGetTimerValue`
+
+The platform interface also defines structs that contain platform-specific
+global and per-object state. Their names mirror those of the internal
+interface, except that an interface-specific suffix is added.
+
+Examples: `_GLFWwindowX11`, `_GLFWcontextWGL`
+
+These structs are incorporated as members into the internal interface structs
+using special macros that name them after the specific interface used. This
+prevents shared code from accidentally using these members.
+
+Examples: `window->win32.handle`, `_glfw.x11.display`
+
+
+@section internals_event Event interface
+
+The event interface is implemented in the same shared source files as the public
+interface and is responsible for delivering the events it receives to the
+application, either via callbacks, via window state changes or both.
+
+The function names of the event interface use a `_glfwInput` prefix and the
+ObjectEvent pattern.
+
+Examples: `_glfwInputWindowFocus`, `_glfwInputCursorPos`
+
+
+@section internals_static Static functions
+
+Static functions may be used by any interface and have no prefixes or suffixes.
+These use headless camel case.
+
+Examples: `isValidElementForJoystick`
+
+
+@section internals_config Configuration macros
+
+GLFW uses a number of configuration macros to select at compile time which
+interfaces and code paths to use. They are defined in the GLFW CMake target.
+
+Configuration macros the same style as tokens in the public interface, except
+with a leading underscore.
+
+Examples: `_GLFW_WIN32`, `_GLFW_BUILD_DLL`
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/intro.dox b/chromium/third_party/dawn/third_party/glfw/docs/intro.dox
new file mode 100644
index 00000000000..5cbd7eb0861
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/intro.dox
@@ -0,0 +1,619 @@
+/*!
+
+@page intro_guide Introduction to the API
+
+@tableofcontents
+
+This guide introduces the basic concepts of GLFW and describes initialization,
+error handling and API guarantees and limitations. For a broad but shallow
+tutorial, see @ref quick_guide instead. For details on a specific function in
+this category, see the @ref init.
+
+There are also guides for the other areas of GLFW.
+
+ - @ref window_guide
+ - @ref context_guide
+ - @ref vulkan_guide
+ - @ref monitor_guide
+ - @ref input_guide
+
+
+@section intro_init Initialization and termination
+
+Before most GLFW functions may be called, the library must be initialized.
+This initialization checks what features are available on the machine,
+enumerates monitors, initializes the timer and performs any required
+platform-specific initialization.
+
+Only the following functions may be called before the library has been
+successfully initialized, and only from the main thread.
+
+ - @ref glfwGetVersion
+ - @ref glfwGetVersionString
+ - @ref glfwPlatformSupported
+ - @ref glfwGetError
+ - @ref glfwSetErrorCallback
+ - @ref glfwInitHint
+ - @ref glfwInitAllocator
+ - @ref glfwInitVulkanLoader
+ - @ref glfwInit
+ - @ref glfwTerminate
+
+Calling any other function before successful initialization will cause a @ref
+GLFW_NOT_INITIALIZED error.
+
+
+@subsection intro_init_init Initializing GLFW
+
+The library is initialized with @ref glfwInit, which returns `GLFW_FALSE` if an
+error occurred.
+
+@code
+if (!glfwInit())
+{
+ // Handle initialization failure
+}
+@endcode
+
+If any part of initialization fails, any parts that succeeded are terminated as
+if @ref glfwTerminate had been called. The library only needs to be initialized
+once and additional calls to an already initialized library will return
+`GLFW_TRUE` immediately.
+
+Once the library has been successfully initialized, it should be terminated
+before the application exits. Modern systems are very good at freeing resources
+allocated by programs that exit, but GLFW sometimes has to change global system
+settings and these might not be restored without termination.
+
+@macos When the library is initialized the main menu and dock icon are created.
+These are not desirable for a command-line only program. The creation of the
+main menu and dock icon can be disabled with the @ref GLFW_COCOA_MENUBAR init
+hint.
+
+
+@subsection init_hints Initialization hints
+
+Initialization hints are set before @ref glfwInit and affect how the library
+behaves until termination. Hints are set with @ref glfwInitHint.
+
+@code
+glfwInitHint(GLFW_JOYSTICK_HAT_BUTTONS, GLFW_FALSE);
+@endcode
+
+The values you set hints to are never reset by GLFW, but they only take effect
+during initialization. Once GLFW has been initialized, any values you set will
+be ignored until the library is terminated and initialized again.
+
+Some hints are platform specific. These may be set on any platform but they
+will only affect their specific platform. Other platforms will ignore them.
+Setting these hints requires no platform specific headers or functions.
+
+
+@subsubsection init_hints_shared Shared init hints
+
+@anchor GLFW_PLATFORM
+__GLFW_PLATFORM__ specifies the platform to use for windowing and input.
+Possible values are `GLFW_ANY_PLATFORM`, `GLFW_PLATFORM_WIN32`,
+`GLFW_PLATFORM_COCOA`, `GLFW_PLATFORM_X11`, `GLFW_PLATFORM_WAYLAND` and
+`GLFW_PLATFORM_NULL`. The default value is `GLFW_ANY_PLATFORM`, which will
+choose any platform the library includes support for except for the Null
+backend.
+
+
+@anchor GLFW_JOYSTICK_HAT_BUTTONS
+__GLFW_JOYSTICK_HAT_BUTTONS__ specifies whether to also expose joystick hats as
+buttons, for compatibility with earlier versions of GLFW that did not have @ref
+glfwGetJoystickHats. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+
+@anchor GLFW_ANGLE_PLATFORM_TYPE_hint
+__GLFW_ANGLE_PLATFORM_TYPE__ specifies the platform type (rendering backend) to
+request when using OpenGL ES and EGL via
+[ANGLE](https://chromium.googlesource.com/angle/angle/). If the requested
+platform type is unavailable, ANGLE will use its default. Possible values are
+one of `GLFW_ANGLE_PLATFORM_TYPE_NONE`, `GLFW_ANGLE_PLATFORM_TYPE_OPENGL`,
+`GLFW_ANGLE_PLATFORM_TYPE_OPENGLES`, `GLFW_ANGLE_PLATFORM_TYPE_D3D9`,
+`GLFW_ANGLE_PLATFORM_TYPE_D3D11`, `GLFW_ANGLE_PLATFORM_TYPE_VULKAN` and
+`GLFW_ANGLE_PLATFORM_TYPE_METAL`.
+
+The ANGLE platform type is specified via the `EGL_ANGLE_platform_angle`
+extension. This extension is not used if this hint is
+`GLFW_ANGLE_PLATFORM_TYPE_NONE`, which is the default value.
+
+
+@subsubsection init_hints_osx macOS specific init hints
+
+@anchor GLFW_COCOA_CHDIR_RESOURCES_hint
+__GLFW_COCOA_CHDIR_RESOURCES__ specifies whether to set the current directory to
+the application to the `Contents/Resources` subdirectory of the application's
+bundle, if present. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This is
+ignored on other platforms.
+
+@anchor GLFW_COCOA_MENUBAR_hint
+__GLFW_COCOA_MENUBAR__ specifies whether to create the menu bar and dock icon
+when GLFW is initialized. This applies whether the menu bar is created from
+a nib or manually by GLFW. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+This is ignored on other platforms.
+
+
+@subsubsection init_hints_x11 X11 specific init hints
+
+@anchor GLFW_X11_XCB_VULKAN_SURFACE_hint
+__GLFW_X11_XCB_VULKAN_SURFACE__ specifies whether to prefer the
+`VK_KHR_xcb_surface` extension for creating Vulkan surfaces, or whether to use
+the `VK_KHR_xlib_surface` extension. Possible values are `GLFW_TRUE` and
+`GLFW_FALSE`. This is ignored on other platforms.
+
+
+@subsubsection init_hints_values Supported and default values
+
+Initialization hint | Default value | Supported values
+-------------------------------- | ------------------------------- | ----------------
+@ref GLFW_PLATFORM | `GLFW_ANY_PLATFORM` | `GLFW_ANY_PLATFORM`, `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`, `GLFW_PLATFORM_X11`, `GLFW_PLATFORM_WAYLAND` or `GLFW_PLATFORM_NULL`
+@ref GLFW_JOYSTICK_HAT_BUTTONS | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+@ref GLFW_ANGLE_PLATFORM_TYPE | `GLFW_ANGLE_PLATFORM_TYPE_NONE` | `GLFW_ANGLE_PLATFORM_TYPE_NONE`, `GLFW_ANGLE_PLATFORM_TYPE_OPENGL`, `GLFW_ANGLE_PLATFORM_TYPE_OPENGLES`, `GLFW_ANGLE_PLATFORM_TYPE_D3D9`, `GLFW_ANGLE_PLATFORM_TYPE_D3D11`, `GLFW_ANGLE_PLATFORM_TYPE_VULKAN` or `GLFW_ANGLE_PLATFORM_TYPE_METAL`
+@ref GLFW_COCOA_CHDIR_RESOURCES | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+@ref GLFW_COCOA_MENUBAR | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+@ref GLFW_X11_XCB_VULKAN_SURFACE | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+
+
+@subsection platform Runtime platform selection
+
+GLFW can be compiled for more than one platform (window system) at once. This lets
+a single library binary support both X11 and Wayland on Linux and other Unix-like systems.
+
+You can control platform selection via the @ref GLFW_PLATFORM initialization hint. By
+default this is set to @ref GLFW_ANY_PLATFORM, which will look for supported window
+systems in order of priority and select the first one it finds. It can also be set to any
+specific platform to have GLFW only look for that one.
+
+@code
+glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_X11);
+@endcode
+
+This mechanism also provides the Null platform, which is always supported but needs to be
+explicitly requested. This platform is effectively a stub, emulating a window system on
+a single 1080p monitor, but will not interact with any actual window system.
+
+@code
+glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_NULL);
+@endcode
+
+You can test whether a library binary was compiled with support for a specific platform
+with @ref glfwPlatformSupported.
+
+@code
+if (glfwPlatformSupported(GLFW_PLATFORM_WAYLAND))
+ glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_WAYLAND);
+@endcode
+
+Once GLFW has been initialized, you can query which platform was selected with @ref
+glfwGetPlatform.
+
+@code
+int platform = glfwGetPlatform();
+@endcode
+
+If you are using any [native access functions](@ref native), especially on Linux and other
+Unix-like systems, then you may need to check that you are calling the ones matching the
+selected platform.
+
+
+@subsection init_allocator Custom heap memory allocator
+
+The heap memory allocator can be customized before initialization with @ref
+glfwInitAllocator.
+
+@code
+GLFWallocator allocator;
+allocator.allocate = my_malloc;
+allocator.reallocate = my_realloc;
+allocator.deallocate = my_free;
+allocator.user = NULL;
+
+glfwInitAllocator(&allocator);
+@endcode
+
+The allocator will be picked up at the beginning of initialization and will be
+used until GLFW has been fully terminated. Any allocator set after
+initialization will be picked up only at the next initialization.
+
+The allocator will only be used for allocations that would have been made with
+the C standard library. Memory allocations that must be made with platform
+specific APIs will still use those.
+
+The allocation function must have a signature matching @ref GLFWallocatefun. It receives
+the desired size, in bytes, and the user pointer passed to @ref glfwInitAllocator and
+returns the address to the allocated memory block.
+
+@code
+void* my_malloc(size_t size, void* user)
+{
+ ...
+}
+@endcode
+
+The reallocation function must have a function signature matching @ref GLFWreallocatefun.
+It receives the memory block to be reallocated, the new desired size, in bytes, and the user
+pointer passed to @ref glfwInitAllocator and returns the address to the resized memory
+block.
+
+@code
+void* my_realloc(void* block, size_t size, void* user)
+{
+ ...
+}
+@endcode
+
+The deallocation function must have a function signature matching @ref GLFWdeallocatefun.
+It receives the memory block to be deallocated and the user pointer passed to @ref
+glfwInitAllocator.
+
+@code
+void my_free(void* block, void* user)
+{
+ ...
+}
+@endcode
+
+
+@subsection intro_init_terminate Terminating GLFW
+
+Before your application exits, you should terminate the GLFW library if it has
+been initialized. This is done with @ref glfwTerminate.
+
+@code
+glfwTerminate();
+@endcode
+
+This will destroy any remaining window, monitor and cursor objects, restore any
+modified gamma ramps, re-enable the screensaver if it had been disabled and free
+any other resources allocated by GLFW.
+
+Once the library is terminated, it is as if it had never been initialized and
+you will need to initialize it again before being able to use GLFW. If the
+library was not initialized or had already been terminated, it return
+immediately.
+
+
+@section error_handling Error handling
+
+Some GLFW functions have return values that indicate an error, but this is often
+not very helpful when trying to figure out what happened or why it occurred.
+Other functions have no return value reserved for errors, so error notification
+needs a separate channel. Finally, far from all GLFW functions have return
+values.
+
+The last [error code](@ref errors) for the calling thread can be queried at any
+time with @ref glfwGetError.
+
+@code
+int code = glfwGetError(NULL);
+
+if (code != GLFW_NO_ERROR)
+ handle_error(code);
+@endcode
+
+If no error has occurred since the last call, @ref GLFW_NO_ERROR (zero) is
+returned. The error is cleared before the function returns.
+
+The error code indicates the general category of the error. Some error codes,
+such as @ref GLFW_NOT_INITIALIZED has only a single meaning, whereas others like
+@ref GLFW_PLATFORM_ERROR are used for many different errors.
+
+GLFW often has more information about an error than its general category. You
+can retrieve a UTF-8 encoded human-readable description along with the error
+code. If no error has occurred since the last call, the description is set to
+`NULL`.
+
+@code
+const char* description;
+int code = glfwGetError(&description);
+
+if (description)
+ display_error_message(code, description);
+@endcode
+
+The retrieved description string is only valid until the next error occurs.
+This means you must make a copy of it if you want to keep it.
+
+You can also set an error callback, which will be called each time an error
+occurs. It is set with @ref glfwSetErrorCallback.
+
+@code
+glfwSetErrorCallback(error_callback);
+@endcode
+
+The error callback receives the same error code and human-readable description
+returned by @ref glfwGetError.
+
+@code
+void error_callback(int code, const char* description)
+{
+ display_error_message(code, description);
+}
+@endcode
+
+The error callback is called after the error is stored, so calling @ref
+glfwGetError from within the error callback returns the same values as the
+callback argument.
+
+The description string passed to the callback is only valid until the error
+callback returns. This means you must make a copy of it if you want to keep it.
+
+__Reported errors are never fatal.__ As long as GLFW was successfully
+initialized, it will remain initialized and in a safe state until terminated
+regardless of how many errors occur. If an error occurs during initialization
+that causes @ref glfwInit to fail, any part of the library that was initialized
+will be safely terminated.
+
+Do not rely on a currently invalid call to generate a specific error, as in the
+future that same call may generate a different error or become valid.
+
+
+@section coordinate_systems Coordinate systems
+
+GLFW has two primary coordinate systems: the _virtual screen_ and the window
+_content area_ or _content area_. Both use the same unit: _virtual screen
+coordinates_, or just _screen coordinates_, which don't necessarily correspond
+to pixels.
+
+<img src="spaces.svg" width="90%" />
+
+Both the virtual screen and the content area coordinate systems have the X-axis
+pointing to the right and the Y-axis pointing down.
+
+Window and monitor positions are specified as the position of the upper-left
+corners of their content areas relative to the virtual screen, while cursor
+positions are specified relative to a window's content area.
+
+Because the origin of the window's content area coordinate system is also the
+point from which the window position is specified, you can translate content
+area coordinates to the virtual screen by adding the window position. The
+window frame, when present, extends out from the content area but does not
+affect the window position.
+
+Almost all positions and sizes in GLFW are measured in screen coordinates
+relative to one of the two origins above. This includes cursor positions,
+window positions and sizes, window frame sizes, monitor positions and video mode
+resolutions.
+
+Two exceptions are the [monitor physical size](@ref monitor_size), which is
+measured in millimetres, and [framebuffer size](@ref window_fbsize), which is
+measured in pixels.
+
+Pixels and screen coordinates may map 1:1 on your machine, but they won't on
+every other machine, for example on a Mac with a Retina display. The ratio
+between screen coordinates and pixels may also change at run-time depending on
+which monitor the window is currently considered to be on.
+
+
+@section guarantees_limitations Guarantees and limitations
+
+This section describes the conditions under which GLFW can be expected to
+function, barring bugs in the operating system or drivers. Use of GLFW outside
+of these limits may work on some platforms, or on some machines, or some of the
+time, or on some versions of GLFW, but it may break at any time and this will
+not be considered a bug.
+
+
+@subsection lifetime Pointer lifetimes
+
+GLFW will never free any pointer you provide to it and you must never free any
+pointer it provides to you.
+
+Many GLFW functions return pointers to dynamically allocated structures, strings
+or arrays, and some callbacks are provided with strings or arrays. These are
+always managed by GLFW and should never be freed by the application. The
+lifetime of these pointers is documented for each GLFW function and callback.
+If you need to keep this data, you must copy it before its lifetime expires.
+
+Many GLFW functions accept pointers to structures or strings allocated by the
+application. These are never freed by GLFW and are always the responsibility of
+the application. If GLFW needs to keep the data in these structures or strings,
+it is copied before the function returns.
+
+Pointer lifetimes are guaranteed not to be shortened in future minor or patch
+releases.
+
+
+@subsection reentrancy Reentrancy
+
+GLFW event processing and object destruction are not reentrant. This means that
+the following functions must not be called from any callback function:
+
+ - @ref glfwDestroyWindow
+ - @ref glfwDestroyCursor
+ - @ref glfwPollEvents
+ - @ref glfwWaitEvents
+ - @ref glfwWaitEventsTimeout
+ - @ref glfwTerminate
+
+These functions may be made reentrant in future minor or patch releases, but
+functions not on this list will not be made non-reentrant.
+
+
+@subsection thread_safety Thread safety
+
+Most GLFW functions must only be called from the main thread (the thread that
+calls main), but some may be called from any thread once the library has been
+initialized. Before initialization the whole library is thread-unsafe.
+
+The reference documentation for every GLFW function states whether it is limited
+to the main thread.
+
+Initialization, termination, event processing and the creation and
+destruction of windows, cursors and OpenGL and OpenGL ES contexts are all
+restricted to the main thread due to limitations of one or several platforms.
+
+Because event processing must be performed on the main thread, all callbacks
+except for the error callback will only be called on that thread. The error
+callback may be called on any thread, as any GLFW function may generate errors.
+
+The error code and description may be queried from any thread.
+
+ - @ref glfwGetError
+
+Empty events may be posted from any thread.
+
+ - @ref glfwPostEmptyEvent
+
+The window user pointer and close flag may be read and written from any thread,
+but this is not synchronized by GLFW.
+
+ - @ref glfwGetWindowUserPointer
+ - @ref glfwSetWindowUserPointer
+ - @ref glfwWindowShouldClose
+ - @ref glfwSetWindowShouldClose
+
+These functions for working with OpenGL and OpenGL ES contexts may be called
+from any thread, but the window object is not synchronized by GLFW.
+
+ - @ref glfwMakeContextCurrent
+ - @ref glfwGetCurrentContext
+ - @ref glfwSwapBuffers
+ - @ref glfwSwapInterval
+ - @ref glfwExtensionSupported
+ - @ref glfwGetProcAddress
+
+The raw timer functions may be called from any thread.
+
+ - @ref glfwGetTimerFrequency
+ - @ref glfwGetTimerValue
+
+The regular timer may be used from any thread, but reading and writing the timer
+offset is not synchronized by GLFW.
+
+ - @ref glfwGetTime
+ - @ref glfwSetTime
+
+Library version information may be queried from any thread.
+
+ - @ref glfwGetVersion
+ - @ref glfwGetVersionString
+
+Platform information may be queried from any thread.
+
+ - @ref glfwPlatformSupported
+ - @ref glfwGetPlatform
+
+All Vulkan related functions may be called from any thread.
+
+ - @ref glfwVulkanSupported
+ - @ref glfwGetRequiredInstanceExtensions
+ - @ref glfwGetInstanceProcAddress
+ - @ref glfwGetPhysicalDevicePresentationSupport
+ - @ref glfwCreateWindowSurface
+
+GLFW uses synchronization objects internally only to manage the per-thread
+context and error states. Additional synchronization is left to the
+application.
+
+Functions that may currently be called from any thread will always remain so,
+but functions that are currently limited to the main thread may be updated to
+allow calls from any thread in future releases.
+
+
+@subsection compatibility Version compatibility
+
+GLFW uses [Semantic Versioning](https://semver.org/). This guarantees source
+and binary backward compatibility with earlier minor versions of the API. This
+means that you can drop in a newer version of the library and existing programs
+will continue to compile and existing binaries will continue to run.
+
+Once a function or constant has been added, the signature of that function or
+value of that constant will remain unchanged until the next major version of
+GLFW. No compatibility of any kind is guaranteed between major versions.
+
+Undocumented behavior, i.e. behavior that is not described in the documentation,
+may change at any time until it is documented.
+
+If the reference documentation and the implementation differ, the reference
+documentation will almost always take precedence and the implementation will be
+fixed in the next release. The reference documentation will also take
+precedence over anything stated in a guide.
+
+
+@subsection event_order Event order
+
+The order of arrival of related events is not guaranteed to be consistent
+across platforms. The exception is synthetic key and mouse button release
+events, which are always delivered after the window defocus event.
+
+
+@section intro_version Version management
+
+GLFW provides mechanisms for identifying what version of GLFW your application
+was compiled against as well as what version it is currently running against.
+If you are loading GLFW dynamically (not just linking dynamically), you can use
+this to verify that the library binary is compatible with your application.
+
+
+@subsection intro_version_compile Compile-time version
+
+The compile-time version of GLFW is provided by the GLFW header with the
+`GLFW_VERSION_MAJOR`, `GLFW_VERSION_MINOR` and `GLFW_VERSION_REVISION` macros.
+
+@code
+printf("Compiled against GLFW %i.%i.%i\n",
+ GLFW_VERSION_MAJOR,
+ GLFW_VERSION_MINOR,
+ GLFW_VERSION_REVISION);
+@endcode
+
+
+@subsection intro_version_runtime Run-time version
+
+The run-time version can be retrieved with @ref glfwGetVersion, a function that
+may be called regardless of whether GLFW is initialized.
+
+@code
+int major, minor, revision;
+glfwGetVersion(&major, &minor, &revision);
+
+printf("Running against GLFW %i.%i.%i\n", major, minor, revision);
+@endcode
+
+
+@subsection intro_version_string Version string
+
+GLFW 3 also provides a compile-time generated version string that describes the
+version, platform, compiler and any platform-specific compile-time options.
+This is primarily intended for submitting bug reports, to allow developers to
+see which code paths are enabled in a binary.
+
+The version string is returned by @ref glfwGetVersionString, a function that may
+be called regardless of whether GLFW is initialized.
+
+__Do not use the version string__ to parse the GLFW library version. The @ref
+glfwGetVersion function already provides the version of the running library
+binary.
+
+__Do not use the version string__ to parse what platforms are supported. The @ref
+glfwPlatformSupported function lets you query platform support.
+
+__GLFW 3.4:__ The format of this string was changed to support the addition of
+[runtime platform selection](@ref platform).
+
+The format of the string is as follows:
+ - The version of GLFW
+ - For each supported platform:
+ - The name of the window system API
+ - The name of the window system specific context creation API, if applicable
+ - The names of the always supported context creation APIs EGL and OSMesa
+ - Any additional compile-time options, APIs and (on Windows) what compiler was used
+
+For example, GLFW 3.4 compiled as a DLL for Windows with MinGW may have a version string
+like this:
+
+@code
+3.4.0 Win32 WGL Null EGL OSMesa MinGW DLL
+@endcode
+
+While GLFW compiled as as static library for Linux with both Wayland and X11 enabled may
+have a version string like this:
+
+@code
+3.4.0 Wayland X11 GLX Null EGL OSMesa monotonic
+@endcode
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/main.dox b/chromium/third_party/dawn/third_party/glfw/docs/main.dox
new file mode 100644
index 00000000000..995c2f56829
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/main.dox
@@ -0,0 +1,46 @@
+/*!
+
+@mainpage notitle
+
+@section main_intro Introduction
+
+GLFW is a free, Open Source, multi-platform library for OpenGL, OpenGL ES and
+Vulkan application development. It provides a simple, platform-independent API
+for creating windows, contexts and surfaces, reading input, handling events, etc.
+
+@ref news_34 list new features, caveats and deprecations.
+
+@ref quick_guide is a guide for users new to GLFW. It takes you through how to
+write a small but complete program.
+
+There are guides for each section of the API:
+
+ - @ref intro_guide – initialization, error handling and high-level design
+ - @ref window_guide – creating and working with windows and framebuffers
+ - @ref context_guide – working with OpenGL and OpenGL ES contexts
+ - @ref vulkan_guide - working with Vulkan objects and extensions
+ - @ref monitor_guide – enumerating and working with monitors and video modes
+ - @ref input_guide – receiving events, polling and processing input
+
+Once you have written a program, see @ref compile_guide and @ref build_guide.
+
+The [reference documentation](modules.html) provides more detailed information
+about specific functions.
+
+@ref moving_guide explains what has changed and how to update existing code to
+use the new API.
+
+There is a section on @ref guarantees_limitations for pointer lifetimes,
+reentrancy, thread safety, event order and backward and forward compatibility.
+
+The [FAQ](https://www.glfw.org/faq.html) answers many common questions about the
+design, implementation and use of GLFW.
+
+Finally, @ref compat_guide explains what APIs, standards and protocols GLFW uses
+and what happens when they are not present on a given machine.
+
+This documentation was generated with Doxygen. The sources for it are available
+in both the [source distribution](https://www.glfw.org/download.html) and
+[GitHub repository](https://github.com/glfw/glfw).
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/monitor.dox b/chromium/third_party/dawn/third_party/glfw/docs/monitor.dox
new file mode 100644
index 00000000000..86eb4540b08
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/monitor.dox
@@ -0,0 +1,268 @@
+/*!
+
+@page monitor_guide Monitor guide
+
+@tableofcontents
+
+This guide introduces the monitor related functions of GLFW. For details on
+a specific function in this category, see the @ref monitor. There are also
+guides for the other areas of GLFW.
+
+ - @ref intro_guide
+ - @ref window_guide
+ - @ref context_guide
+ - @ref vulkan_guide
+ - @ref input_guide
+
+
+@section monitor_object Monitor objects
+
+A monitor object represents a currently connected monitor and is represented as
+a pointer to the [opaque](https://en.wikipedia.org/wiki/Opaque_data_type) type
+@ref GLFWmonitor. Monitor objects cannot be created or destroyed by the
+application and retain their addresses until the monitors they represent are
+disconnected or until the library is [terminated](@ref intro_init_terminate).
+
+Each monitor has a current video mode, a list of supported video modes,
+a virtual position, a human-readable name, an estimated physical size and
+a gamma ramp. One of the monitors is the primary monitor.
+
+The virtual position of a monitor is in
+[screen coordinates](@ref coordinate_systems) and, together with the current
+video mode, describes the viewports that the connected monitors provide into the
+virtual desktop that spans them.
+
+To see how GLFW views your monitor setup and its available video modes, run the
+`monitors` test program.
+
+
+@subsection monitor_monitors Retrieving monitors
+
+The primary monitor is returned by @ref glfwGetPrimaryMonitor. It is the user's
+preferred monitor and is usually the one with global UI elements like task bar
+or menu bar.
+
+@code
+GLFWmonitor* primary = glfwGetPrimaryMonitor();
+@endcode
+
+You can retrieve all currently connected monitors with @ref glfwGetMonitors.
+See the reference documentation for the lifetime of the returned array.
+
+@code
+int count;
+GLFWmonitor** monitors = glfwGetMonitors(&count);
+@endcode
+
+The primary monitor is always the first monitor in the returned array, but other
+monitors may be moved to a different index when a monitor is connected or
+disconnected.
+
+
+@subsection monitor_event Monitor configuration changes
+
+If you wish to be notified when a monitor is connected or disconnected, set
+a monitor callback.
+
+@code
+glfwSetMonitorCallback(monitor_callback);
+@endcode
+
+The callback function receives the handle for the monitor that has been
+connected or disconnected and the event that occurred.
+
+@code
+void monitor_callback(GLFWmonitor* monitor, int event)
+{
+ if (event == GLFW_CONNECTED)
+ {
+ // The monitor was connected
+ }
+ else if (event == GLFW_DISCONNECTED)
+ {
+ // The monitor was disconnected
+ }
+}
+@endcode
+
+If a monitor is disconnected, all windows that are full screen on it will be
+switched to windowed mode before the callback is called. Only @ref
+glfwGetMonitorName and @ref glfwGetMonitorUserPointer will return useful values
+for a disconnected monitor and only before the monitor callback returns.
+
+
+@section monitor_properties Monitor properties
+
+Each monitor has a current video mode, a list of supported video modes,
+a virtual position, a content scale, a human-readable name, a user pointer, an
+estimated physical size and a gamma ramp.
+
+
+@subsection monitor_modes Video modes
+
+GLFW generally does a good job selecting a suitable video mode when you create
+a full screen window, change its video mode or make a windowed one full
+screen, but it is sometimes useful to know exactly which video modes are
+supported.
+
+Video modes are represented as @ref GLFWvidmode structures. You can get an
+array of the video modes supported by a monitor with @ref glfwGetVideoModes.
+See the reference documentation for the lifetime of the returned array.
+
+@code
+int count;
+GLFWvidmode* modes = glfwGetVideoModes(monitor, &count);
+@endcode
+
+To get the current video mode of a monitor call @ref glfwGetVideoMode. See the
+reference documentation for the lifetime of the returned pointer.
+
+@code
+const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+@endcode
+
+The resolution of a video mode is specified in
+[screen coordinates](@ref coordinate_systems), not pixels.
+
+
+@subsection monitor_size Physical size
+
+The physical size of a monitor in millimetres, or an estimation of it, can be
+retrieved with @ref glfwGetMonitorPhysicalSize. This has no relation to its
+current _resolution_, i.e. the width and height of its current
+[video mode](@ref monitor_modes).
+
+@code
+int width_mm, height_mm;
+glfwGetMonitorPhysicalSize(monitor, &width_mm, &height_mm);
+@endcode
+
+While this can be used to calculate the raw DPI of a monitor, this is often not
+useful. Instead use the [monitor content scale](@ref monitor_scale) and
+[window content scale](@ref window_scale) to scale your content.
+
+
+@subsection monitor_scale Content scale
+
+The content scale for a monitor can be retrieved with @ref
+glfwGetMonitorContentScale.
+
+@code
+float xscale, yscale;
+glfwGetMonitorContentScale(monitor, &xscale, &yscale);
+@endcode
+
+The content scale is the ratio between the current DPI and the platform's
+default DPI. This is especially important for text and any UI elements. If the
+pixel dimensions of your UI scaled by this look appropriate on your machine then
+it should appear at a reasonable size on other machines regardless of their DPI
+and scaling settings. This relies on the system DPI and scaling settings being
+somewhat correct.
+
+The content scale may depend on both the monitor resolution and pixel density
+and on user settings. It may be very different from the raw DPI calculated from
+the physical size and current resolution.
+
+
+@subsection monitor_pos Virtual position
+
+The position of the monitor on the virtual desktop, in
+[screen coordinates](@ref coordinate_systems), can be retrieved with @ref
+glfwGetMonitorPos.
+
+@code
+int xpos, ypos;
+glfwGetMonitorPos(monitor, &xpos, &ypos);
+@endcode
+
+
+@subsection monitor_workarea Work area
+
+The area of a monitor not occupied by global task bars or menu bars is the work
+area. This is specified in [screen coordinates](@ref coordinate_systems) and
+can be retrieved with @ref glfwGetMonitorWorkarea.
+
+@code
+int xpos, ypos, width, height;
+glfwGetMonitorWorkarea(monitor, &xpos, &ypos, &width, &height);
+@endcode
+
+
+@subsection monitor_name Human-readable name
+
+The human-readable, UTF-8 encoded name of a monitor is returned by @ref
+glfwGetMonitorName. See the reference documentation for the lifetime of the
+returned string.
+
+@code
+const char* name = glfwGetMonitorName(monitor);
+@endcode
+
+Monitor names are not guaranteed to be unique. Two monitors of the same model
+and make may have the same name. Only the monitor handle is guaranteed to be
+unique, and only until that monitor is disconnected.
+
+
+@subsection monitor_userptr User pointer
+
+Each monitor has a user pointer that can be set with @ref
+glfwSetMonitorUserPointer and queried with @ref glfwGetMonitorUserPointer. This
+can be used for any purpose you need and will not be modified by GLFW. The
+value will be kept until the monitor is disconnected or until the library is
+terminated.
+
+The initial value of the pointer is `NULL`.
+
+
+@subsection monitor_gamma Gamma ramp
+
+The gamma ramp of a monitor can be set with @ref glfwSetGammaRamp, which accepts
+a monitor handle and a pointer to a @ref GLFWgammaramp structure.
+
+@code
+GLFWgammaramp ramp;
+unsigned short red[256], green[256], blue[256];
+
+ramp.size = 256;
+ramp.red = red;
+ramp.green = green;
+ramp.blue = blue;
+
+for (i = 0; i < ramp.size; i++)
+{
+ // Fill out gamma ramp arrays as desired
+}
+
+glfwSetGammaRamp(monitor, &ramp);
+@endcode
+
+The gamma ramp data is copied before the function returns, so there is no need
+to keep it around once the ramp has been set.
+
+It is recommended that your gamma ramp have the same size as the current gamma
+ramp for that monitor.
+
+The current gamma ramp for a monitor is returned by @ref glfwGetGammaRamp. See
+the reference documentation for the lifetime of the returned structure.
+
+@code
+const GLFWgammaramp* ramp = glfwGetGammaRamp(monitor);
+@endcode
+
+If you wish to set a regular gamma ramp, you can have GLFW calculate it for you
+from the desired exponent with @ref glfwSetGamma, which in turn calls @ref
+glfwSetGammaRamp with the resulting ramp.
+
+@code
+glfwSetGamma(monitor, 1.0);
+@endcode
+
+To experiment with gamma correction via the @ref glfwSetGamma function, run the
+`gamma` test program.
+
+@note The software controlled gamma ramp is applied _in addition_ to the
+hardware gamma correction, which today is usually an approximation of sRGB
+gamma. This means that setting a perfectly linear ramp, or gamma 1.0, will
+produce the default (usually sRGB-like) behavior.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/moving.dox b/chromium/third_party/dawn/third_party/glfw/docs/moving.dox
new file mode 100644
index 00000000000..b80d84a2b10
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/moving.dox
@@ -0,0 +1,513 @@
+/*!
+
+@page moving_guide Moving from GLFW 2 to 3
+
+@tableofcontents
+
+This is a transition guide for moving from GLFW 2 to 3. It describes what has
+changed or been removed, but does _not_ include
+[new features](@ref news) unless they are required when moving an existing code
+base onto the new API. For example, the new multi-monitor functions are
+required to create full screen windows with GLFW 3.
+
+
+@section moving_removed Changed and removed features
+
+@subsection moving_renamed_files Renamed library and header file
+
+The GLFW 3 header is named @ref glfw3.h and moved to the `GLFW` directory, to
+avoid collisions with the headers of other major versions. Similarly, the GLFW
+3 library is named `glfw3,` except when it's installed as a shared library on
+Unix-like systems, where it uses the
+[soname](https://en.wikipedia.org/wiki/soname) `libglfw.so.3`.
+
+@par Old syntax
+@code
+#include <GL/glfw.h>
+@endcode
+
+@par New syntax
+@code
+#include <GLFW/glfw3.h>
+@endcode
+
+
+@subsection moving_threads Removal of threading functions
+
+The threading functions have been removed, including the per-thread sleep
+function. They were fairly primitive, under-used, poorly integrated and took
+time away from the focus of GLFW (i.e. context, input and window). There are
+better threading libraries available and native threading support is available
+in both [C++11](https://en.cppreference.com/w/cpp/thread) and
+[C11](https://en.cppreference.com/w/c/thread), both of which are gaining
+traction.
+
+If you wish to use the C++11 or C11 facilities but your compiler doesn't yet
+support them, see the
+[TinyThread++](https://gitorious.org/tinythread/tinythreadpp) and
+[TinyCThread](https://github.com/tinycthread/tinycthread) projects created by
+the original author of GLFW. These libraries implement a usable subset of the
+threading APIs in C++11 and C11, and in fact some GLFW 3 test programs use
+TinyCThread.
+
+However, GLFW 3 has better support for _use from multiple threads_ than GLFW
+2 had. Contexts can be made current on any thread, although only a single
+thread at a time, and the documentation explicitly states which functions may be
+used from any thread and which must only be used from the main thread.
+
+@par Removed functions
+`glfwSleep`, `glfwCreateThread`, `glfwDestroyThread`, `glfwWaitThread`,
+`glfwGetThreadID`, `glfwCreateMutex`, `glfwDestroyMutex`, `glfwLockMutex`,
+`glfwUnlockMutex`, `glfwCreateCond`, `glfwDestroyCond`, `glfwWaitCond`,
+`glfwSignalCond`, `glfwBroadcastCond` and `glfwGetNumberOfProcessors`.
+
+@par Removed types
+`GLFWthreadfun`
+
+
+@subsection moving_image Removal of image and texture loading
+
+The image and texture loading functions have been removed. They only supported
+the Targa image format, making them mostly useful for beginner level examples.
+To become of sufficiently high quality to warrant keeping them in GLFW 3, they
+would need not only to support other formats, but also modern extensions to
+OpenGL texturing. This would either add a number of external
+dependencies (libjpeg, libpng, etc.), or force GLFW to ship with inline versions
+of these libraries.
+
+As there already are libraries doing this, it is unnecessary both to duplicate
+the work and to tie the duplicate to GLFW. The resulting library would also be
+platform-independent, as both OpenGL and stdio are available wherever GLFW is.
+
+@par Removed functions
+`glfwReadImage`, `glfwReadMemoryImage`, `glfwFreeImage`, `glfwLoadTexture2D`,
+`glfwLoadMemoryTexture2D` and `glfwLoadTextureImage2D`.
+
+
+@subsection moving_stdcall Removal of GLFWCALL macro
+
+The `GLFWCALL` macro, which made callback functions use
+[__stdcall](https://msdn.microsoft.com/en-us/library/zxk0tw93.aspx) on Windows,
+has been removed. GLFW is written in C, not Pascal. Removing this macro means
+there's one less thing for application programmers to remember, i.e. the
+requirement to mark all callback functions with `GLFWCALL`. It also simplifies
+the creation of DLLs and DLL link libraries, as there's no need to explicitly
+disable `@n` entry point suffixes.
+
+@par Old syntax
+@code
+void GLFWCALL callback_function(...);
+@endcode
+
+@par New syntax
+@code
+void callback_function(...);
+@endcode
+
+
+@subsection moving_window_handles Window handle parameters
+
+Because GLFW 3 supports multiple windows, window handle parameters have been
+added to all window-related GLFW functions and callbacks. The handle of
+a newly created window is returned by @ref glfwCreateWindow (formerly
+`glfwOpenWindow`). Window handles are pointers to the
+[opaque](https://en.wikipedia.org/wiki/Opaque_data_type) type @ref GLFWwindow.
+
+@par Old syntax
+@code
+glfwSetWindowTitle("New Window Title");
+@endcode
+
+@par New syntax
+@code
+glfwSetWindowTitle(window, "New Window Title");
+@endcode
+
+
+@subsection moving_monitor Explicit monitor selection
+
+GLFW 3 provides support for multiple monitors. To request a full screen mode window,
+instead of passing `GLFW_FULLSCREEN` you specify which monitor you wish the
+window to use. The @ref glfwGetPrimaryMonitor function returns the monitor that
+GLFW 2 would have selected, but there are many other
+[monitor functions](@ref monitor_guide). Monitor handles are pointers to the
+[opaque](https://en.wikipedia.org/wiki/Opaque_data_type) type @ref GLFWmonitor.
+
+@par Old basic full screen
+@code
+glfwOpenWindow(640, 480, 8, 8, 8, 0, 24, 0, GLFW_FULLSCREEN);
+@endcode
+
+@par New basic full screen
+@code
+window = glfwCreateWindow(640, 480, "My Window", glfwGetPrimaryMonitor(), NULL);
+@endcode
+
+@note The framebuffer bit depth parameters of `glfwOpenWindow` have been turned
+into [window hints](@ref window_hints), but as they have been given
+[sane defaults](@ref window_hints_values) you rarely need to set these hints.
+
+
+@subsection moving_autopoll Removal of automatic event polling
+
+GLFW 3 does not automatically poll for events in @ref glfwSwapBuffers, meaning
+you need to call @ref glfwPollEvents or @ref glfwWaitEvents yourself. Unlike
+buffer swap, which acts on a single window, the event processing functions act
+on all windows at once.
+
+@par Old basic main loop
+@code
+while (...)
+{
+ // Process input
+ // Render output
+ glfwSwapBuffers();
+}
+@endcode
+
+@par New basic main loop
+@code
+while (...)
+{
+ // Process input
+ // Render output
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+}
+@endcode
+
+
+@subsection moving_context Explicit context management
+
+Each GLFW 3 window has its own OpenGL context and only you, the application
+programmer, can know which context should be current on which thread at any
+given time. Therefore, GLFW 3 leaves that decision to you.
+
+This means that you need to call @ref glfwMakeContextCurrent after creating
+a window before you can call any OpenGL functions.
+
+
+@subsection moving_hidpi Separation of window and framebuffer sizes
+
+Window positions and sizes now use screen coordinates, which may not be the same
+as pixels on machines with high-DPI monitors. This is important as OpenGL uses
+pixels, not screen coordinates. For example, the rectangle specified with
+`glViewport` needs to use pixels. Therefore, framebuffer size functions have
+been added. You can retrieve the size of the framebuffer of a window with @ref
+glfwGetFramebufferSize function. A framebuffer size callback has also been
+added, which can be set with @ref glfwSetFramebufferSizeCallback.
+
+@par Old basic viewport setup
+@code
+glfwGetWindowSize(&width, &height);
+glViewport(0, 0, width, height);
+@endcode
+
+@par New basic viewport setup
+@code
+glfwGetFramebufferSize(window, &width, &height);
+glViewport(0, 0, width, height);
+@endcode
+
+
+@subsection moving_window_close Window closing changes
+
+The `GLFW_OPENED` window parameter has been removed. As long as the window has
+not been destroyed, whether through @ref glfwDestroyWindow or @ref
+glfwTerminate, the window is "open".
+
+A user attempting to close a window is now just an event like any other. Unlike
+GLFW 2, windows and contexts created with GLFW 3 will never be destroyed unless
+you choose them to be. Each window now has a close flag that is set to
+`GLFW_TRUE` when the user attempts to close that window. By default, nothing else
+happens and the window stays visible. It is then up to you to either destroy
+the window, take some other action or ignore the request.
+
+You can query the close flag at any time with @ref glfwWindowShouldClose and set
+it at any time with @ref glfwSetWindowShouldClose.
+
+@par Old basic main loop
+@code
+while (glfwGetWindowParam(GLFW_OPENED))
+{
+ ...
+}
+@endcode
+
+@par New basic main loop
+@code
+while (!glfwWindowShouldClose(window))
+{
+ ...
+}
+@endcode
+
+The close callback no longer returns a value. Instead, it is called after the
+close flag has been set so it can override its value, if it chooses to, before
+event processing completes. You may however not call @ref glfwDestroyWindow
+from the close callback (or any other window related callback).
+
+@par Old syntax
+@code
+int GLFWCALL window_close_callback(void);
+@endcode
+
+@par New syntax
+@code
+void window_close_callback(GLFWwindow* window);
+@endcode
+
+@note GLFW never clears the close flag to `GLFW_FALSE`, meaning you can use it
+for other reasons to close the window as well, for example the user choosing
+Quit from an in-game menu.
+
+
+@subsection moving_hints Persistent window hints
+
+The `glfwOpenWindowHint` function has been renamed to @ref glfwWindowHint.
+
+Window hints are no longer reset to their default values on window creation, but
+instead retain their values until modified by @ref glfwWindowHint or @ref
+glfwDefaultWindowHints, or until the library is terminated and re-initialized.
+
+
+@subsection moving_video_modes Video mode enumeration
+
+Video mode enumeration is now per-monitor. The @ref glfwGetVideoModes function
+now returns all available modes for a specific monitor instead of requiring you
+to guess how large an array you need. The `glfwGetDesktopMode` function, which
+had poorly defined behavior, has been replaced by @ref glfwGetVideoMode, which
+returns the current mode of a monitor.
+
+
+@subsection moving_char_up Removal of character actions
+
+The action parameter of the [character callback](@ref GLFWcharfun) has been
+removed. This was an artefact of the origin of GLFW, i.e. being developed in
+English by a Swede. However, many keyboard layouts require more than one key to
+produce characters with diacritical marks. Even the Swedish keyboard layout
+requires this for uncommon cases like ü.
+
+@par Old syntax
+@code
+void GLFWCALL character_callback(int character, int action);
+@endcode
+
+@par New syntax
+@code
+void character_callback(GLFWwindow* window, int character);
+@endcode
+
+
+@subsection moving_cursorpos Cursor position changes
+
+The `glfwGetMousePos` function has been renamed to @ref glfwGetCursorPos,
+`glfwSetMousePos` to @ref glfwSetCursorPos and `glfwSetMousePosCallback` to @ref
+glfwSetCursorPosCallback.
+
+The cursor position is now `double` instead of `int`, both for the direct
+functions and for the callback. Some platforms can provide sub-pixel cursor
+movement and this data is now passed on to the application where available. On
+platforms where this is not provided, the decimal part is zero.
+
+GLFW 3 only allows you to position the cursor within a window using @ref
+glfwSetCursorPos (formerly `glfwSetMousePos`) when that window is active.
+Unless the window is active, the function fails silently.
+
+
+@subsection moving_wheel Wheel position replaced by scroll offsets
+
+The `glfwGetMouseWheel` function has been removed. Scrolling is the input of
+offsets and has no absolute position. The mouse wheel callback has been
+replaced by a [scroll callback](@ref GLFWscrollfun) that receives
+two-dimensional floating point scroll offsets. This allows you to receive
+precise scroll data from for example modern touchpads.
+
+@par Old syntax
+@code
+void GLFWCALL mouse_wheel_callback(int position);
+@endcode
+
+@par New syntax
+@code
+void scroll_callback(GLFWwindow* window, double xoffset, double yoffset);
+@endcode
+
+@par Removed functions
+`glfwGetMouseWheel`
+
+
+@subsection moving_repeat Key repeat action
+
+The `GLFW_KEY_REPEAT` enable has been removed and key repeat is always enabled
+for both keys and characters. A new key action, `GLFW_REPEAT`, has been added
+to allow the [key callback](@ref GLFWkeyfun) to distinguish an initial key press
+from a repeat. Note that @ref glfwGetKey still returns only `GLFW_PRESS` or
+`GLFW_RELEASE`.
+
+
+@subsection moving_keys Physical key input
+
+GLFW 3 key tokens map to physical keys, unlike in GLFW 2 where they mapped to
+the values generated by the current keyboard layout. The tokens are named
+according to the values they would have using the standard US layout, but this
+is only a convenience, as most programmers are assumed to know that layout.
+This means that (for example) `GLFW_KEY_LEFT_BRACKET` is always a single key and
+is the same key in the same place regardless of what keyboard layouts the users
+of your program has.
+
+The key input facility was never meant for text input, although using it that
+way worked slightly better in GLFW 2. If you were using it to input text, you
+should be using the character callback instead, on both GLFW 2 and 3. This will
+give you the characters being input, as opposed to the keys being pressed.
+
+GLFW 3 has key tokens for all keys on a standard 105 key keyboard, so instead of
+having to remember whether to check for `a` or `A`, you now check for
+@ref GLFW_KEY_A.
+
+
+@subsection moving_joystick Joystick function changes
+
+The `glfwGetJoystickPos` function has been renamed to @ref glfwGetJoystickAxes.
+
+The `glfwGetJoystickParam` function and the `GLFW_PRESENT`, `GLFW_AXES` and
+`GLFW_BUTTONS` tokens have been replaced by the @ref glfwJoystickPresent
+function as well as axis and button counts returned by the @ref
+glfwGetJoystickAxes and @ref glfwGetJoystickButtons functions.
+
+
+@subsection moving_mbcs Win32 MBCS support
+
+The Win32 port of GLFW 3 will not compile in
+[MBCS mode](https://msdn.microsoft.com/en-us/library/5z097dxa.aspx).
+However, because the use of the Unicode version of the Win32 API doesn't affect
+the process as a whole, but only those windows created using it, it's perfectly
+possible to call MBCS functions from other parts of the same application.
+Therefore, even if an application using GLFW has MBCS mode code, there's no need
+for GLFW itself to support it.
+
+
+@subsection moving_windows Support for versions of Windows older than XP
+
+All explicit support for version of Windows older than XP has been removed.
+There is no code that actively prevents GLFW 3 from running on these earlier
+versions, but it uses Win32 functions that those versions lack.
+
+Windows XP was released in 2001, and by now (January 2015) it has not only
+replaced almost all earlier versions of Windows, but is itself rapidly being
+replaced by Windows 7 and 8. The MSDN library doesn't even provide
+documentation for version older than Windows 2000, making it difficult to
+maintain compatibility with these versions even if it was deemed worth the
+effort.
+
+The Win32 API has also not stood still, and GLFW 3 uses many functions only
+present on Windows XP or later. Even supporting an OS as new as XP (new
+from the perspective of GLFW 2, which still supports Windows 95) requires
+runtime checking for a number of functions that are present only on modern
+version of Windows.
+
+
+@subsection moving_syskeys Capture of system-wide hotkeys
+
+The ability to disable and capture system-wide hotkeys like Alt+Tab has been
+removed. Modern applications, whether they're games, scientific visualisations
+or something else, are nowadays expected to be good desktop citizens and allow
+these hotkeys to function even when running in full screen mode.
+
+
+@subsection moving_terminate Automatic termination
+
+GLFW 3 does not register @ref glfwTerminate with `atexit` at initialization,
+because `exit` calls registered functions from the calling thread and while it
+is permitted to call `exit` from any thread, @ref glfwTerminate must only be
+called from the main thread.
+
+To release all resources allocated by GLFW, you should call @ref glfwTerminate
+yourself, from the main thread, before the program terminates. Note that this
+destroys all windows not already destroyed with @ref glfwDestroyWindow,
+invalidating any window handles you may still have.
+
+
+@subsection moving_glu GLU header inclusion
+
+GLFW 3 does not by default include the GLU header and GLU itself has been
+deprecated by [Khronos](https://en.wikipedia.org/wiki/Khronos_Group). __New
+projects should not use GLU__, but if you need it for legacy code that
+has been moved to GLFW 3, you can request that the GLFW header includes it by
+defining @ref GLFW_INCLUDE_GLU before the inclusion of the GLFW header.
+
+@par Old syntax
+@code
+#include <GL/glfw.h>
+@endcode
+
+@par New syntax
+@code
+#define GLFW_INCLUDE_GLU
+#include <GLFW/glfw3.h>
+@endcode
+
+There are many libraries that offer replacements for the functionality offered
+by GLU. For the matrix helper functions, see math libraries like
+[GLM](https://github.com/g-truc/glm) (for C++),
+[linmath.h](https://github.com/datenwolf/linmath.h) (for C) and others. For the
+tessellation functions, see for example
+[libtess2](https://github.com/memononen/libtess2).
+
+
+@section moving_tables Name change tables
+
+
+@subsection moving_renamed_functions Renamed functions
+
+| GLFW 2 | GLFW 3 | Notes |
+| --------------------------- | ----------------------------- | ----- |
+| `glfwOpenWindow` | @ref glfwCreateWindow | All channel bit depths are now hints
+| `glfwCloseWindow` | @ref glfwDestroyWindow | |
+| `glfwOpenWindowHint` | @ref glfwWindowHint | Now accepts all `GLFW_*_BITS` tokens |
+| `glfwEnable` | @ref glfwSetInputMode | |
+| `glfwDisable` | @ref glfwSetInputMode | |
+| `glfwGetMousePos` | @ref glfwGetCursorPos | |
+| `glfwSetMousePos` | @ref glfwSetCursorPos | |
+| `glfwSetMousePosCallback` | @ref glfwSetCursorPosCallback | |
+| `glfwSetMouseWheelCallback` | @ref glfwSetScrollCallback | Accepts two-dimensional scroll offsets as doubles |
+| `glfwGetJoystickPos` | @ref glfwGetJoystickAxes | |
+| `glfwGetWindowParam` | @ref glfwGetWindowAttrib | |
+| `glfwGetGLVersion` | @ref glfwGetWindowAttrib | Use `GLFW_CONTEXT_VERSION_MAJOR`, `GLFW_CONTEXT_VERSION_MINOR` and `GLFW_CONTEXT_REVISION` |
+| `glfwGetDesktopMode` | @ref glfwGetVideoMode | Returns the current mode of a monitor |
+| `glfwGetJoystickParam` | @ref glfwJoystickPresent | The axis and button counts are provided by @ref glfwGetJoystickAxes and @ref glfwGetJoystickButtons |
+
+
+@subsection moving_renamed_types Renamed types
+
+| GLFW 2 | GLFW 3 | Notes |
+| ------------------- | --------------------- | |
+| `GLFWmousewheelfun` | @ref GLFWscrollfun | |
+| `GLFWmouseposfun` | @ref GLFWcursorposfun | |
+
+
+@subsection moving_renamed_tokens Renamed tokens
+
+| GLFW 2 | GLFW 3 | Notes |
+| --------------------------- | ---------------------------- | ----- |
+| `GLFW_OPENGL_VERSION_MAJOR` | `GLFW_CONTEXT_VERSION_MAJOR` | Renamed as it applies to OpenGL ES as well |
+| `GLFW_OPENGL_VERSION_MINOR` | `GLFW_CONTEXT_VERSION_MINOR` | Renamed as it applies to OpenGL ES as well |
+| `GLFW_FSAA_SAMPLES` | `GLFW_SAMPLES` | Renamed to match the OpenGL API |
+| `GLFW_ACTIVE` | `GLFW_FOCUSED` | Renamed to match the window focus callback |
+| `GLFW_WINDOW_NO_RESIZE` | `GLFW_RESIZABLE` | The default has been inverted |
+| `GLFW_MOUSE_CURSOR` | `GLFW_CURSOR` | Used with @ref glfwSetInputMode |
+| `GLFW_KEY_ESC` | `GLFW_KEY_ESCAPE` | |
+| `GLFW_KEY_DEL` | `GLFW_KEY_DELETE` | |
+| `GLFW_KEY_PAGEUP` | `GLFW_KEY_PAGE_UP` | |
+| `GLFW_KEY_PAGEDOWN` | `GLFW_KEY_PAGE_DOWN` | |
+| `GLFW_KEY_KP_NUM_LOCK` | `GLFW_KEY_NUM_LOCK` | |
+| `GLFW_KEY_LCTRL` | `GLFW_KEY_LEFT_CONTROL` | |
+| `GLFW_KEY_LSHIFT` | `GLFW_KEY_LEFT_SHIFT` | |
+| `GLFW_KEY_LALT` | `GLFW_KEY_LEFT_ALT` | |
+| `GLFW_KEY_LSUPER` | `GLFW_KEY_LEFT_SUPER` | |
+| `GLFW_KEY_RCTRL` | `GLFW_KEY_RIGHT_CONTROL` | |
+| `GLFW_KEY_RSHIFT` | `GLFW_KEY_RIGHT_SHIFT` | |
+| `GLFW_KEY_RALT` | `GLFW_KEY_RIGHT_ALT` | |
+| `GLFW_KEY_RSUPER` | `GLFW_KEY_RIGHT_SUPER` | |
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/news.dox b/chromium/third_party/dawn/third_party/glfw/docs/news.dox
new file mode 100644
index 00000000000..fbf603195c1
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/news.dox
@@ -0,0 +1,240 @@
+/*!
+
+@page news Release notes
+
+@tableofcontents
+
+
+@section news_34 Release notes for version 3.4
+
+@subsection features_34 New features in version 3.4
+
+@subsubsection runtime_platform_34 Runtime platform selection
+
+GLFW now supports being compiled for multiple backends and selecting between
+them at runtime with the @ref GLFW_PLATFORM init hint. After initialization the
+selected platform can be queried with @ref glfwGetPlatform. You can check if
+support for a given platform is compiled in with @ref glfwPlatformSupported.
+
+
+@subsubsection standard_cursors_34 More standard cursors
+
+GLFW now provides the standard cursor shapes @ref GLFW_RESIZE_NWSE_CURSOR and
+@ref GLFW_RESIZE_NESW_CURSOR for diagonal resizing, @ref GLFW_RESIZE_ALL_CURSOR
+for omni-directional resizing and @ref GLFW_NOT_ALLOWED_CURSOR for showing an
+action is not allowed.
+
+Unlike the original set, these shapes may not be available everywhere and
+creation will then fail with the new @ref GLFW_CURSOR_UNAVAILABLE error.
+
+The cursors for horizontal and vertical resizing are now referred to as @ref
+GLFW_RESIZE_EW_CURSOR and @ref GLFW_RESIZE_NS_CURSOR, and the pointing hand
+cursor is now referred to as @ref GLFW_POINTING_HAND_CURSOR. The older names
+are still available.
+
+For more information see @ref cursor_standard.
+
+
+@subsubsection mouse_passthrough_34 Mouse event passthrough
+
+GLFW now provides the [GLFW_MOUSE_PASSTHROUGH](@ref GLFW_MOUSE_PASSTHROUGH_hint)
+window hint for making a window transparent to mouse input, lettings events pass
+to whatever window is behind it. This can also be changed after window
+creation with the matching [window attribute](@ref GLFW_MOUSE_PASSTHROUGH_attrib).
+
+
+@subsubsection features_34_angle_backend Support for ANGLE rendering backend selection
+
+GLFW now provides the
+[GLFW_ANGLE_PLATFORM_TYPE](@ref GLFW_ANGLE_PLATFORM_TYPE_hint) init hint for
+requesting a specific rendering backend when using
+[ANGLE](https://chromium.googlesource.com/angle/angle/) to create OpenGL ES
+contexts.
+
+
+@subsubsection features_34_init_allocator Support for custom memory allocator
+
+GLFW now supports plugging a custom memory allocator at initialization with @ref
+glfwInitAllocator. The allocator is a struct of type @ref GLFWallocator with
+function pointers corresponding to the standard library functions `malloc`,
+`realloc` and `free`.
+
+For more information see @ref init_allocator.
+
+
+@subsubsection features_34_win32_keymenu Support for keyboard access to Windows window menu
+
+GLFW now provides the
+[GLFW_WIN32_KEYBOARD_MENU](@ref GLFW_WIN32_KEYBOARD_MENU_hint) window hint for
+enabling keyboard access to the window menu via the Alt+Space and
+Alt-and-then-Space shortcuts. This may be useful for more GUI-oriented
+applications.
+
+
+@subsection caveats Caveats for version 3.4
+
+@subsubsection native_34 Multiple sets of native access functions
+
+Because GLFW now supports runtime selection of platform (window system), a library binary
+may export native access functions for multiple platforms. Starting with version 3.4 you
+must not assume that GLFW is running on a platform just because it exports native access
+functions for it. After initialization you can query the selected platform with @ref
+glfwGetPlatform.
+
+
+@subsubsection version_string_34 Version string format has been changed
+
+Because GLFW now supports runtime selection of platform (window system), the version
+string returned by @ref glfwGetVersionString has been expanded. It now contains the names
+of all APIs for all the platforms that the library binary supports.
+
+
+@subsubsection joysticks_34 Joystick support is initialized on demand
+
+The joystick part of GLFW is now initialized when first used, primarily to work
+around faulty Windows drivers that cause DirectInput to take up to several
+seconds to enumerate devices.
+
+This change will usually not be observable. However, if your application waits
+for events without having first called any joystick function or created any
+visible windows, the wait may never unblock as GLFW may not yet have subscribed
+to joystick related OS events.
+
+To work around this, call any joystick function before waiting for events, for
+example by setting a [joystick callback](@ref joystick_event).
+
+
+@subsubsection standalone_34 Tests and examples are disabled when built as a sub-project
+
+GLFW now does not build the tests and examples when it is added as
+a subdirectory of another CMake project. To enable these, set the @ref
+GLFW_BUILD_TESTS and @ref GLFW_BUILD_EXAMPLES cache variables before adding the
+GLFW subdirectory.
+
+@code{.cmake}
+set(GLFW_BUILD_EXAMPLES ON CACHE BOOL "" FORCE)
+set(GLFW_BUILD_TESTS ON CACHE BOOL "" FORCE)
+add_subdirectory(path/to/glfw)
+@endcode
+
+
+@subsubsection initmenu_34 macOS main menu now created at initialization
+
+GLFW now creates the main menu and completes the initialization of NSApplication
+during initialization. Programs that do not want a main menu can disable it
+with the [GLFW_COCOA_MENUBAR](@ref GLFW_COCOA_MENUBAR_hint) init hint.
+
+
+@subsubsection corevideo_34 CoreVideo dependency has been removed
+
+GLFW no longer depends on the CoreVideo framework on macOS and it no longer
+needs to be specified during compilation or linking.
+
+
+@subsubsection caveat_fbtransparency_34 Framebuffer transparency requires DWM transparency
+
+GLFW no longer supports framebuffer transparency enabled via @ref
+GLFW_TRANSPARENT_FRAMEBUFFER on Windows 7 if DWM transparency is off
+(the Transparency setting under Personalization > Window Color).
+
+
+@subsubsection emptyevents_34 Empty events on X11 no longer roundtrip to server
+
+Events posted with @ref glfwPostEmptyEvent now use a separate unnamed pipe
+instead of sending an X11 client event to the helper window.
+
+
+@subsection deprecations_34 Deprecations in version 3.4
+
+@subsection removals_34 Removals in 3.4
+
+@subsubsection vulkan_static_34 GLFW_VULKAN_STATIC CMake option has been removed
+
+This option was used to compile GLFW directly linked with the Vulkan loader, instead of
+using dynamic loading to get hold of `vkGetInstanceProcAddr` at initialization. This is
+now done by calling the @ref glfwInitVulkanLoader function before initialization.
+
+If you need backward compatibility, this macro can still be defined for GLFW 3.4 and will
+have no effect. The call to @ref glfwInitVulkanLoader can be conditionally enabled in
+your code by checking the @ref GLFW_VERSION_MAJOR and @ref GLFW_VERSION_MINOR macros.
+
+
+@subsubsection osmesa_option_34 GLFW_USE_OSMESA CMake option has been removed
+
+This option was used to compile GLFW for the Null platform. The Null platform is now
+always supported. To produce a library binary that only supports this platform, the way
+this CMake option used to do, you will instead need to disable the default platform for
+the target OS. This means setting the @ref GLFW_BUILD_WIN32, @ref GLFW_BUILD_COCOA or
+@ref GLFW_BUILD_X11 CMake option to false.
+
+You can set all of them to false and the ones that don't apply for the target OS will be
+ignored.
+
+
+@subsubsection wl_shell_34 Support for the wl_shell protocol has been removed
+
+Support for the wl_shell protocol has been removed and GLFW now only supports
+the XDG-Shell protocol. If your Wayland compositor does not support XDG-Shell
+then GLFW will fail to initialize.
+
+
+@subsection symbols_34 New symbols in version 3.4
+
+@subsubsection functions_34 New functions in version 3.4
+
+ - @ref glfwInitAllocator
+ - @ref glfwGetPlatform
+ - @ref glfwPlatformSupported
+ - @ref glfwInitVulkanLoader
+
+
+@subsubsection types_34 New types in version 3.4
+
+ - @ref GLFWallocator
+ - @ref GLFWallocatefun
+ - @ref GLFWreallocatefun
+ - @ref GLFWdeallocatefun
+
+
+@subsubsection constants_34 New constants in version 3.4
+
+ - @ref GLFW_PLATFORM
+ - @ref GLFW_ANY_PLATFORM
+ - @ref GLFW_PLATFORM_WIN32
+ - @ref GLFW_PLATFORM_COCOA
+ - @ref GLFW_PLATFORM_WAYLAND
+ - @ref GLFW_PLATFORM_X11
+ - @ref GLFW_PLATFORM_NULL
+ - @ref GLFW_PLATFORM_UNAVAILABLE
+ - @ref GLFW_POINTING_HAND_CURSOR
+ - @ref GLFW_RESIZE_EW_CURSOR
+ - @ref GLFW_RESIZE_NS_CURSOR
+ - @ref GLFW_RESIZE_NWSE_CURSOR
+ - @ref GLFW_RESIZE_NESW_CURSOR
+ - @ref GLFW_RESIZE_ALL_CURSOR
+ - @ref GLFW_MOUSE_PASSTHROUGH
+ - @ref GLFW_NOT_ALLOWED_CURSOR
+ - @ref GLFW_CURSOR_UNAVAILABLE
+ - @ref GLFW_WIN32_KEYBOARD_MENU
+ - @ref GLFW_CONTEXT_DEBUG
+ - @ref GLFW_FEATURE_UNAVAILABLE
+ - @ref GLFW_FEATURE_UNIMPLEMENTED
+ - @ref GLFW_ANGLE_PLATFORM_TYPE
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_NONE
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_OPENGL
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_OPENGLES
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_D3D9
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_D3D11
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_VULKAN
+ - @ref GLFW_ANGLE_PLATFORM_TYPE_METAL
+ - @ref GLFW_X11_XCB_VULKAN_SURFACE
+
+
+@section news_archive Release notes for earlier versions
+
+- [Release notes for 3.3](https://www.glfw.org/docs/3.3/news.html)
+- [Release notes for 3.2](https://www.glfw.org/docs/3.2/news.html)
+- [Release notes for 3.1](https://www.glfw.org/docs/3.1/news.html)
+- [Release notes for 3.0](https://www.glfw.org/docs/3.0/news.html)
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/quick.dox b/chromium/third_party/dawn/third_party/glfw/docs/quick.dox
new file mode 100644
index 00000000000..c3f47aa199e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/quick.dox
@@ -0,0 +1,371 @@
+/*!
+
+@page quick_guide Getting started
+
+@tableofcontents
+
+This guide takes you through writing a small application using GLFW 3. The
+application will create a window and OpenGL context, render a rotating triangle
+and exit when the user closes the window or presses _Escape_. This guide will
+introduce a few of the most commonly used functions, but there are many more.
+
+This guide assumes no experience with earlier versions of GLFW. If you
+have used GLFW 2 in the past, read @ref moving_guide, as some functions
+behave differently in GLFW 3.
+
+
+@section quick_steps Step by step
+
+@subsection quick_include Including the GLFW header
+
+In the source files of your application where you use GLFW, you need to include
+its header file.
+
+@code
+#include <GLFW/glfw3.h>
+@endcode
+
+This header provides all the constants, types and function prototypes of the
+GLFW API.
+
+By default it also includes the OpenGL header from your development environment.
+On some platforms this header only supports older versions of OpenGL. The most
+extreme case is Windows, where it typically only supports OpenGL 1.2.
+
+Most programs will instead use an
+[extension loader library](@ref context_glext_auto) and include its header.
+This example uses files generated by [glad](https://gen.glad.sh/). The GLFW
+header can detect most such headers if they are included first and will then not
+include the one from your development environment.
+
+@code
+#include <glad/gl.h>
+#include <GLFW/glfw3.h>
+@endcode
+
+To make sure there will be no header conflicts, you can define @ref
+GLFW_INCLUDE_NONE before the GLFW header to explicitly disable inclusion of the
+development environment header. This also allows the two headers to be included
+in any order.
+
+@code
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+#include <glad/gl.h>
+@endcode
+
+
+@subsection quick_init_term Initializing and terminating GLFW
+
+Before you can use most GLFW functions, the library must be initialized. On
+successful initialization, `GLFW_TRUE` is returned. If an error occurred,
+`GLFW_FALSE` is returned.
+
+@code
+if (!glfwInit())
+{
+ // Initialization failed
+}
+@endcode
+
+Note that `GLFW_TRUE` and `GLFW_FALSE` are and will always be one and zero.
+
+When you are done using GLFW, typically just before the application exits, you
+need to terminate GLFW.
+
+@code
+glfwTerminate();
+@endcode
+
+This destroys any remaining windows and releases any other resources allocated by
+GLFW. After this call, you must initialize GLFW again before using any GLFW
+functions that require it.
+
+
+@subsection quick_capture_error Setting an error callback
+
+Most events are reported through callbacks, whether it's a key being pressed,
+a GLFW window being moved, or an error occurring. Callbacks are C functions (or
+C++ static methods) that are called by GLFW with arguments describing the event.
+
+In case a GLFW function fails, an error is reported to the GLFW error callback.
+You can receive these reports with an error callback. This function must have
+the signature below but may do anything permitted in other callbacks.
+
+@code
+void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+@endcode
+
+Callback functions must be set, so GLFW knows to call them. The function to set
+the error callback is one of the few GLFW functions that may be called before
+initialization, which lets you be notified of errors both during and after
+initialization.
+
+@code
+glfwSetErrorCallback(error_callback);
+@endcode
+
+
+@subsection quick_create_window Creating a window and context
+
+The window and its OpenGL context are created with a single call to @ref
+glfwCreateWindow, which returns a handle to the created combined window and
+context object
+
+@code
+GLFWwindow* window = glfwCreateWindow(640, 480, "My Title", NULL, NULL);
+if (!window)
+{
+ // Window or OpenGL context creation failed
+}
+@endcode
+
+This creates a 640 by 480 windowed mode window with an OpenGL context. If
+window or OpenGL context creation fails, `NULL` will be returned. You should
+always check the return value. While window creation rarely fails, context
+creation depends on properly installed drivers and may fail even on machines
+with the necessary hardware.
+
+By default, the OpenGL context GLFW creates may have any version. You can
+require a minimum OpenGL version by setting the `GLFW_CONTEXT_VERSION_MAJOR` and
+`GLFW_CONTEXT_VERSION_MINOR` hints _before_ creation. If the required minimum
+version is not supported on the machine, context (and window) creation fails.
+
+You can select the OpenGL profile by setting the `GLFW_OPENGL_PROFILE` hint.
+This program uses the core profile as that is the only profile macOS supports
+for OpenGL 3.x and 4.x.
+
+@code
+glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
+glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+GLFWwindow* window = glfwCreateWindow(640, 480, "My Title", NULL, NULL);
+if (!window)
+{
+ // Window or context creation failed
+}
+@endcode
+
+The window handle is passed to all window related functions and is provided to
+along to all window related callbacks, so they can tell which window received
+the event.
+
+When a window and context is no longer needed, destroy it.
+
+@code
+glfwDestroyWindow(window);
+@endcode
+
+Once this function is called, no more events will be delivered for that window
+and its handle becomes invalid.
+
+
+@subsection quick_context_current Making the OpenGL context current
+
+Before you can use the OpenGL API, you must have a current OpenGL context.
+
+@code
+glfwMakeContextCurrent(window);
+@endcode
+
+The context will remain current until you make another context current or until
+the window owning the current context is destroyed.
+
+If you are using an [extension loader library](@ref context_glext_auto) to
+access modern OpenGL then this is when to initialize it, as the loader needs
+a current context to load from. This example uses
+[glad](https://github.com/Dav1dde/glad), but the same rule applies to all such
+libraries.
+
+@code
+gladLoadGL(glfwGetProcAddress);
+@endcode
+
+
+@subsection quick_window_close Checking the window close flag
+
+Each window has a flag indicating whether the window should be closed.
+
+When the user attempts to close the window, either by pressing the close widget
+in the title bar or using a key combination like Alt+F4, this flag is set to 1.
+Note that __the window isn't actually closed__, so you are expected to monitor
+this flag and either destroy the window or give some kind of feedback to the
+user.
+
+@code
+while (!glfwWindowShouldClose(window))
+{
+ // Keep running
+}
+@endcode
+
+You can be notified when the user is attempting to close the window by setting
+a close callback with @ref glfwSetWindowCloseCallback. The callback will be
+called immediately after the close flag has been set.
+
+You can also set it yourself with @ref glfwSetWindowShouldClose. This can be
+useful if you want to interpret other kinds of input as closing the window, like
+for example pressing the _Escape_ key.
+
+
+@subsection quick_key_input Receiving input events
+
+Each window has a large number of callbacks that can be set to receive all the
+various kinds of events. To receive key press and release events, create a key
+callback function.
+
+@code
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+}
+@endcode
+
+The key callback, like other window related callbacks, are set per-window.
+
+@code
+glfwSetKeyCallback(window, key_callback);
+@endcode
+
+In order for event callbacks to be called when events occur, you need to process
+events as described below.
+
+
+@subsection quick_render Rendering with OpenGL
+
+Once you have a current OpenGL context, you can use OpenGL normally. In this
+tutorial, a multi-colored rotating triangle will be rendered. The framebuffer
+size needs to be retrieved for `glViewport`.
+
+@code
+int width, height;
+glfwGetFramebufferSize(window, &width, &height);
+glViewport(0, 0, width, height);
+@endcode
+
+You can also set a framebuffer size callback using @ref
+glfwSetFramebufferSizeCallback and be notified when the size changes.
+
+The details of how to render with OpenGL is outside the scope of this tutorial,
+but there are many excellent resources for learning modern OpenGL. Here are
+a few of them:
+
+ - [Anton's OpenGL 4 Tutorials](https://antongerdelan.net/opengl/)
+ - [Learn OpenGL](https://learnopengl.com/)
+ - [Open.GL](https://open.gl/)
+
+These all happen to use GLFW, but OpenGL itself works the same whatever API you
+use to create the window and context.
+
+
+@subsection quick_timer Reading the timer
+
+To create smooth animation, a time source is needed. GLFW provides a timer that
+returns the number of seconds since initialization. The time source used is the
+most accurate on each platform and generally has micro- or nanosecond
+resolution.
+
+@code
+double time = glfwGetTime();
+@endcode
+
+
+@subsection quick_swap_buffers Swapping buffers
+
+GLFW windows by default use double buffering. That means that each window has
+two rendering buffers; a front buffer and a back buffer. The front buffer is
+the one being displayed and the back buffer the one you render to.
+
+When the entire frame has been rendered, the buffers need to be swapped with one
+another, so the back buffer becomes the front buffer and vice versa.
+
+@code
+glfwSwapBuffers(window);
+@endcode
+
+The swap interval indicates how many frames to wait until swapping the buffers,
+commonly known as _vsync_. By default, the swap interval is zero, meaning
+buffer swapping will occur immediately. On fast machines, many of those frames
+will never be seen, as the screen is still only updated typically 60-75 times
+per second, so this wastes a lot of CPU and GPU cycles.
+
+Also, because the buffers will be swapped in the middle the screen update,
+leading to [screen tearing](https://en.wikipedia.org/wiki/Screen_tearing).
+
+For these reasons, applications will typically want to set the swap interval to
+one. It can be set to higher values, but this is usually not recommended,
+because of the input latency it leads to.
+
+@code
+glfwSwapInterval(1);
+@endcode
+
+This function acts on the current context and will fail unless a context is
+current.
+
+
+@subsection quick_process_events Processing events
+
+GLFW needs to communicate regularly with the window system both in order to
+receive events and to show that the application hasn't locked up. Event
+processing must be done regularly while you have visible windows and is normally
+done each frame after buffer swapping.
+
+There are two methods for processing pending events; polling and waiting. This
+example will use event polling, which processes only those events that have
+already been received and then returns immediately.
+
+@code
+glfwPollEvents();
+@endcode
+
+This is the best choice when rendering continually, like most games do. If
+instead you only need to update your rendering once you have received new input,
+@ref glfwWaitEvents is a better choice. It waits until at least one event has
+been received, putting the thread to sleep in the meantime, and then processes
+all received events. This saves a great deal of CPU cycles and is useful for,
+for example, many kinds of editing tools.
+
+
+@section quick_example Putting it together
+
+Now that you know how to initialize GLFW, create a window and poll for
+keyboard input, it's possible to create a small program.
+
+This program creates a 640 by 480 windowed mode window and starts a loop that
+clears the screen, renders a triangle and processes events until the user either
+presses _Escape_ or closes the window.
+
+@snippet triangle-opengl.c code
+
+The program above can be found in the
+[source package](https://www.glfw.org/download.html) as
+`examples/triangle-opengl.c` and is compiled along with all other examples when
+you build GLFW. If you built GLFW from the source package then you already have
+this as `triangle-opengl.exe` on Windows, `triangle-opengl` on Linux or
+`triangle-opengl.app` on macOS.
+
+This tutorial used only a few of the many functions GLFW provides. There are
+guides for each of the areas covered by GLFW. Each guide will introduce all the
+functions for that category.
+
+ - @ref intro_guide
+ - @ref window_guide
+ - @ref context_guide
+ - @ref monitor_guide
+ - @ref input_guide
+
+You can access reference documentation for any GLFW function by clicking it and
+the reference for each function links to related functions and guide sections.
+
+The tutorial ends here. Once you have written a program that uses GLFW, you
+will need to compile and link it. How to do that depends on the development
+environment you are using and is best explained by the documentation for that
+environment. To learn about the details that are specific to GLFW, see
+@ref build_guide.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/spaces.svg b/chromium/third_party/dawn/third_party/glfw/docs/spaces.svg
new file mode 100644
index 00000000000..5b326460924
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/spaces.svg
@@ -0,0 +1,877 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="688.48718"
+ height="327.98221"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
+ sodipodi:docname="spaces.svg">
+ <defs
+ id="defs4">
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Lend"
+ style="overflow:visible;">
+ <path
+ id="path3888"
+ style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(1.1) rotate(180) translate(1,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.8110012"
+ inkscape:cx="320.68941"
+ inkscape:cy="159.80509"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1920"
+ inkscape:window-height="1021"
+ inkscape:window-x="0"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ units="px"
+ showborder="false"
+ inkscape:showpageshadow="false" />
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-12.627039,-339.86462)">
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:#0000ff;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3,3;stroke-dashoffset:0"
+ id="rect2985"
+ width="687.36469"
+ height="326.85971"
+ x="13.188287"
+ y="340.42587"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <rect
+ style="fill:#f3fff3;fill-opacity:1;stroke:#00b800;stroke-width:1;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="rect3757"
+ width="318.05698"
+ height="277.04684"
+ x="38.315689"
+ y="366.05841"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <rect
+ style="fill:#f3fff3;fill-opacity:1;stroke:#00b800;stroke-width:1;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="rect3767"
+ width="319.01456"
+ height="198.09369"
+ x="356.36722"
+ y="366.01291"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3769">
+ <path
+ d="m 365.3732,374.63632 0,2.73926 1.24023,0 c 0.45898,0 0.8138,-0.11881 1.06446,-0.35645 0.25064,-0.23762 0.37597,-0.57616 0.37597,-1.01562 0,-0.43619 -0.12533,-0.77311 -0.37597,-1.01074 -0.25066,-0.23763 -0.60548,-0.35644 -1.06446,-0.35645 l -1.24023,0 m -0.98633,-0.81054 2.22656,0 c 0.81706,0 1.43392,0.18555 1.85059,0.55664 0.41992,0.36784 0.62988,0.9082 0.62988,1.62109 0,0.7194 -0.20996,1.26302 -0.62988,1.63086 -0.41667,0.36784 -1.03353,0.55176 -1.85059,0.55176 l -1.24023,0 0,2.92968 -0.98633,0 0,-7.29003"
+ style="font-size:10px"
+ id="path3281" />
+ <path
+ d="m 373.37613,376.48691 c -0.10092,-0.0586 -0.21159,-0.10091 -0.33203,-0.12696 -0.11719,-0.0293 -0.2474,-0.0439 -0.39063,-0.0439 -0.50781,0 -0.89844,0.16602 -1.17187,0.49805 -0.27019,0.32878 -0.40528,0.80241 -0.40528,1.42089 l 0,2.88086 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.73731,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10351,-0.24414 0.0618,0 0.13021,0.005 0.20508,0.0147 0.0749,0.007 0.15788,0.0179 0.24903,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3283" />
+ <path
+ d="m 374.32828,375.64706 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3285" />
+ <path
+ d="m 381.35953,376.69687 c 0.2246,-0.40364 0.49316,-0.70149 0.80566,-0.89356 0.3125,-0.19205 0.68033,-0.28808 1.10352,-0.28808 0.56965,0 1.0091,0.2002 1.31836,0.60058 0.30923,0.39714 0.46385,0.96355 0.46386,1.69922 l 0,3.30078 -0.90332,0 0,-3.27148 c 0,-0.52408 -0.0928,-0.91308 -0.27832,-1.16699 -0.18555,-0.2539 -0.46875,-0.38086 -0.84961,-0.38086 -0.4655,0 -0.83334,0.15463 -1.10351,0.46387 -0.27019,0.30924 -0.40528,0.73079 -0.40528,1.26464 l 0,3.09082 -0.90332,0 0,-3.27148 c 0,-0.52734 -0.0928,-0.91634 -0.27832,-1.16699 -0.18555,-0.2539 -0.472,-0.38086 -0.85937,-0.38086 -0.45899,0 -0.82357,0.15625 -1.09375,0.46875 -0.27019,0.30925 -0.40528,0.72917 -0.40527,1.25976 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.20507,-0.33528 0.45084,-0.58267 0.7373,-0.74218 0.28646,-0.1595 0.62662,-0.23926 1.02051,-0.23926 0.39713,0 0.73404,0.10092 1.01074,0.30273 0.27994,0.20183 0.48665,0.4948 0.62012,0.87891"
+ style="font-size:10px"
+ id="path3287" />
+ <path
+ d="m 389.33316,378.36679 c -0.72591,0 -1.22884,0.083 -1.50879,0.24902 -0.27995,0.16602 -0.41992,0.44923 -0.41992,0.84961 0,0.31902 0.10416,0.57292 0.3125,0.76172 0.21159,0.18555 0.49804,0.27832 0.85937,0.27832 0.49805,0 0.89681,-0.17578 1.19629,-0.52734 0.30273,-0.35482 0.4541,-0.82519 0.45411,-1.41113 l 0,-0.2002 -0.89356,0 m 1.79199,-0.37109 0,3.12011 -0.89843,0 0,-0.83007 c -0.20509,0.33203 -0.46062,0.5778 -0.76661,0.7373 -0.30599,0.15625 -0.68034,0.23438 -1.12304,0.23438 -0.5599,0 -1.00586,-0.15625 -1.33789,-0.46875 -0.32878,-0.31576 -0.49317,-0.73731 -0.49317,-1.26465 0,-0.61523 0.20508,-1.0791 0.61524,-1.3916 0.41341,-0.3125 1.02864,-0.46875 1.8457,-0.46875 l 1.25977,0 0,-0.0879 c -10e-6,-0.41341 -0.13673,-0.73242 -0.41016,-0.95704 -0.27019,-0.22786 -0.65105,-0.34179 -1.14258,-0.34179 -0.3125,0 -0.61686,0.0374 -0.91309,0.1123 -0.29622,0.0749 -0.58105,0.18718 -0.85449,0.33692 l 0,-0.83008 c 0.32878,-0.12695 0.64779,-0.22135 0.95703,-0.28321 0.30925,-0.0651 0.61035,-0.0977 0.90332,-0.0977 0.79102,0 1.38184,0.20508 1.77247,0.61523 0.39062,0.41016 0.58593,1.03191 0.58593,1.86524"
+ style="font-size:10px"
+ id="path3289" />
+ <path
+ d="m 396.14957,376.48691 c -0.10092,-0.0586 -0.2116,-0.10091 -0.33203,-0.12696 -0.1172,-0.0293 -0.2474,-0.0439 -0.39063,-0.0439 -0.50781,0 -0.89844,0.16602 -1.17187,0.49805 -0.27019,0.32878 -0.40528,0.80241 -0.40528,1.42089 l 0,2.88086 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.73731,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10351,-0.24414 0.0618,0 0.13021,0.005 0.20508,0.0147 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3291" />
+ <path
+ d="m 399.37711,381.62363 c -0.25391,0.65104 -0.50131,1.07584 -0.74219,1.27441 -0.24089,0.19857 -0.56315,0.29785 -0.9668,0.29785 l -0.71777,0 0,-0.75195 0.52734,0 c 0.2474,0 0.43945,-0.0586 0.57617,-0.17578 0.13672,-0.11719 0.28809,-0.39388 0.45411,-0.83008 l 0.16113,-0.41016 -2.21192,-5.38086 0.95215,0 1.70899,4.27735 1.70898,-4.27735 0.95215,0 -2.40234,5.97657"
+ style="font-size:10px"
+ id="path3293" />
+ <path
+ d="m 410.46109,376.69687 c 0.2246,-0.40364 0.49316,-0.70149 0.80566,-0.89356 0.3125,-0.19205 0.68034,-0.28808 1.10352,-0.28808 0.56965,0 1.00911,0.2002 1.31836,0.60058 0.30924,0.39714 0.46386,0.96355 0.46387,1.69922 l 0,3.30078 -0.90332,0 0,-3.27148 c -1e-5,-0.52408 -0.0928,-0.91308 -0.27832,-1.16699 -0.18556,-0.2539 -0.46876,-0.38086 -0.84961,-0.38086 -0.4655,0 -0.83334,0.15463 -1.10352,0.46387 -0.27019,0.30924 -0.40528,0.73079 -0.40527,1.26464 l 0,3.09082 -0.90332,0 0,-3.27148 c -10e-6,-0.52734 -0.0928,-0.91634 -0.27832,-1.16699 -0.18555,-0.2539 -0.47201,-0.38086 -0.85938,-0.38086 -0.45899,0 -0.82357,0.15625 -1.09375,0.46875 -0.27018,0.30925 -0.40527,0.72917 -0.40527,1.25976 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.20507,-0.33528 0.45084,-0.58267 0.7373,-0.74218 0.28646,-0.1595 0.62663,-0.23926 1.02051,-0.23926 0.39713,0 0.73405,0.10092 1.01074,0.30273 0.27995,0.20183 0.48665,0.4948 0.62012,0.87891"
+ style="font-size:10px"
+ id="path3295" />
+ <path
+ d="m 418.06851,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85775,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41991,-0.89192 0.41992,-1.54297 -10e-6,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65919,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44597,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.06119,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3297" />
+ <path
+ d="m 426.60855,377.81503 0,3.30078 -0.89844,0 0,-3.27148 c 0,-0.51757 -0.10091,-0.90494 -0.30273,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.9082,-0.38574 -0.48503,0 -0.86752,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41993,0.73079 -0.41993,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75684,-0.7373 0.29297,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19532 1.42578,0.58594 0.32226,0.38737 0.4834,0.95866 0.4834,1.71386"
+ style="font-size:10px"
+ id="path3299" />
+ <path
+ d="m 428.41031,375.64706 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3301" />
+ <path
+ d="m 432.07242,374.09433 0,1.55273 1.85058,0 0,0.69825 -1.85058,0 0,2.96875 c 0,0.44596 0.0602,0.73242 0.18066,0.85937 0.1237,0.12696 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17187,-0.12858 -1.43554,-0.38574 -0.26368,-0.26041 -0.39551,-0.73242 -0.39551,-1.41601 l 0,-2.96875 -0.65918,0 0,-0.69825 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3303" />
+ <path
+ d="m 437.22867,376.27695 c -0.48178,0 -0.86263,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13834,1.17025 0.41504,1.54785 0.27994,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85774,-0.1888 1.13769,-0.56641 0.27995,-0.3776 0.41992,-0.89192 0.41993,-1.54297 -1e-5,-0.64778 -0.13998,-1.16048 -0.41993,-1.53808 -0.27995,-0.38086 -0.65918,-0.57129 -1.13769,-0.57129 m 0,-0.76172 c 0.78124,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66894,2.10937 0,0.89519 -0.22298,1.59831 -0.66894,2.10938 -0.44597,0.50781 -1.05958,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66407,-1.21419 -0.66407,-2.10938 0,-0.89843 0.22136,-1.60156 0.66407,-2.10937 0.44596,-0.50781 1.06119,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3305" />
+ <path
+ d="m 444.39175,376.48691 c -0.10091,-0.0586 -0.21159,-0.10091 -0.33203,-0.12696 -0.11719,-0.0293 -0.2474,-0.0439 -0.39062,-0.0439 -0.50782,0 -0.89844,0.16602 -1.17188,0.49805 -0.27018,0.32878 -0.40527,0.80241 -0.40527,1.42089 l 0,2.88086 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.7373,-0.7373 0.30274,-0.16276 0.67057,-0.24414 1.10352,-0.24414 0.0618,0 0.1302,0.005 0.20508,0.0147 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3307" />
+ <path
+ d="m 449.39664,380.2955 0,2.90039 -0.90332,0 0,-7.54883 0.90332,0 0,0.83008 c 0.1888,-0.32551 0.42643,-0.5664 0.71289,-0.72265 0.28971,-0.1595 0.63476,-0.23926 1.03515,-0.23926 0.66406,0 1.2028,0.26368 1.61621,0.79101 0.41667,0.52735 0.625,1.22071 0.625,2.08008 0,0.85938 -0.20833,1.55274 -0.625,2.08008 -0.41341,0.52734 -0.95215,0.79102 -1.61621,0.79102 -0.40039,0 -0.74544,-0.0781 -1.03515,-0.23438 -0.28646,-0.1595 -0.52409,-0.40202 -0.71289,-0.72754 m 3.05664,-1.90918 c -1e-5,-0.6608 -0.13673,-1.17838 -0.41016,-1.55273 -0.27019,-0.3776 -0.64291,-0.5664 -1.11816,-0.56641 -0.47527,1e-5 -0.84961,0.18881 -1.12305,0.56641 -0.27018,0.37435 -0.40527,0.89193 -0.40527,1.55273 0,0.66081 0.13509,1.18002 0.40527,1.55762 0.27344,0.37435 0.64778,0.56152 1.12305,0.56152 0.47525,0 0.84797,-0.18717 1.11816,-0.56152 0.27343,-0.3776 0.41015,-0.89681 0.41016,-1.55762"
+ style="font-size:10px"
+ id="path3309" />
+ <path
+ d="m 456.99429,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14257,0.5664 -0.27995,0.37436 -0.41993,0.88868 -0.41993,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56153 0.47852,0 0.85775,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41992,-0.89192 0.41992,-1.54297 0,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65918,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39486,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -10e-6,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44596,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.7845,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.0612,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3311" />
+ <path
+ d="m 464.47476,375.8082 0,0.84961 c -0.25391,-0.13021 -0.51758,-0.22786 -0.79101,-0.29297 -0.27344,-0.0651 -0.55665,-0.0976 -0.84961,-0.0977 -0.44597,1e-5 -0.78126,0.0684 -1.00586,0.20508 -0.22136,0.13672 -0.33204,0.3418 -0.33203,0.61523 -10e-6,0.20834 0.0798,0.37273 0.23925,0.49317 0.15951,0.11719 0.48015,0.22949 0.96192,0.33691 l 0.30762,0.0684 c 0.63801,0.13672 1.09049,0.33041 1.35742,0.58106 0.27017,0.24739 0.40527,0.59407 0.40527,1.04004 0,0.50781 -0.20183,0.90983 -0.60547,1.20605 -0.40039,0.29622 -0.95215,0.44434 -1.65527,0.44434 -0.29297,0 -0.59896,-0.0293 -0.91797,-0.0879 -0.31576,-0.0553 -0.64942,-0.13998 -1.00098,-0.25391 l 0,-0.92774 c 0.33203,0.17253 0.65918,0.30274 0.98145,0.39063 0.32226,0.0846 0.64127,0.12695 0.95703,0.12695 0.42317,0 0.74869,-0.0716 0.97656,-0.21484 0.22786,-0.14648 0.3418,-0.35156 0.3418,-0.61524 0,-0.24413 -0.083,-0.43131 -0.24902,-0.56152 -0.16277,-0.13021 -0.52247,-0.25553 -1.07911,-0.37598 l -0.3125,-0.0732 c -0.55664,-0.11718 -0.95866,-0.29622 -1.20605,-0.53711 -0.2474,-0.24413 -0.37109,-0.57779 -0.37109,-1.00097 0,-0.51432 0.18229,-0.91146 0.54687,-1.19141 0.36458,-0.27994 0.88216,-0.41992 1.55274,-0.41992 0.33202,0 0.64452,0.0244 0.9375,0.0732 0.29296,0.0488 0.56314,0.12208 0.81054,0.21973"
+ style="font-size:10px"
+ id="path3313" />
+ <path
+ d="m 466.20328,375.64706 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3315" />
+ <path
+ d="m 469.86539,374.09433 0,1.55273 1.85058,0 0,0.69825 -1.85058,0 0,2.96875 c 0,0.44596 0.0602,0.73242 0.18066,0.85937 0.1237,0.12696 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17188,-0.12858 -1.43555,-0.38574 -0.26367,-0.26041 -0.3955,-0.73242 -0.3955,-1.41601 l 0,-2.96875 -0.65918,0 0,-0.69825 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3317" />
+ <path
+ d="m 472.9025,375.64706 0.89843,0 0,5.46875 -0.89843,0 0,-5.46875 m 0,-2.1289 0.89843,0 0,1.13769 -0.89843,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3319" />
+ <path
+ d="m 477.79507,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14257,0.5664 -0.27995,0.37436 -0.41993,0.88868 -0.41993,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66244,0.56153 1.14746,0.56153 0.47852,0 0.85775,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41992,-0.89192 0.41992,-1.54297 0,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65918,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39486,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44596,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.7845,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.0612,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3321" />
+ <path
+ d="m 486.33511,377.81503 0,3.30078 -0.89843,0 0,-3.27148 c -1e-5,-0.51757 -0.10092,-0.90494 -0.30274,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.9082,-0.38574 -0.48503,0 -0.86752,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41993,0.73079 -0.41992,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75683,-0.7373 0.29297,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19532 1.42579,0.58594 0.32226,0.38737 0.48339,0.95866 0.48339,1.71386"
+ style="font-size:10px"
+ id="path3323" />
+ </g>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#00b800;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3773">
+ <path
+ d="m 242.85294,625.22699 0,1.1543 c -0.44923,-0.21484 -0.87306,-0.375 -1.27149,-0.48047 -0.39844,-0.10546 -0.78321,-0.1582 -1.1543,-0.15821 -0.64453,10e-6 -1.14258,0.12501 -1.49414,0.375 -0.34765,0.25001 -0.52148,0.60548 -0.52148,1.06641 0,0.38673 0.11523,0.67969 0.3457,0.87891 0.23438,0.19532 0.67578,0.35352 1.32422,0.47461 l 0.71485,0.14648 c 0.8828,0.16797 1.53319,0.46485 1.95117,0.89063 0.42187,0.42187 0.6328,0.98828 0.63281,1.69921 -1e-5,0.84766 -0.28516,1.49024 -0.85547,1.92774 -0.56641,0.4375 -1.39844,0.65625 -2.49609,0.65625 -0.41407,0 -0.85547,-0.0469 -1.32422,-0.14063 -0.46485,-0.0937 -0.94727,-0.23242 -1.44727,-0.41601 l 0,-1.21875 c 0.48047,0.26953 0.95117,0.47266 1.41211,0.60937 0.46094,0.13672 0.91406,0.20508 1.35938,0.20508 0.67577,0 1.19726,-0.13281 1.56445,-0.39844 0.36718,-0.26562 0.55078,-0.64453 0.55078,-1.13671 0,-0.42969 -0.13282,-0.76563 -0.39844,-1.00782 -0.26172,-0.24218 -0.69336,-0.42382 -1.29492,-0.54492 l -0.7207,-0.14062 c -0.88282,-0.17578 -1.52149,-0.45117 -1.91602,-0.82618 -0.39453,-0.37499 -0.59179,-0.89647 -0.59179,-1.56445 0,-0.77343 0.27148,-1.3828 0.81445,-1.82812 0.54687,-0.44531 1.29882,-0.66796 2.25586,-0.66797 0.41015,10e-6 0.82812,0.0371 1.25391,0.11133 0.42577,0.0742 0.86132,0.18555 1.30664,0.33398"
+ style=""
+ id="path3355" />
+ <path
+ d="m 250.79239,630.13715 0,0.52734 -4.95703,0 c 0.0469,0.74219 0.26953,1.3086 0.66797,1.69922 0.40234,0.38672 0.96093,0.58008 1.67578,0.58008 0.41406,0 0.81445,-0.0508 1.20117,-0.15235 0.39062,-0.10156 0.77734,-0.2539 1.16016,-0.45703 l 0,1.01953 c -0.38673,0.16407 -0.78321,0.28907 -1.18946,0.375 -0.40625,0.0859 -0.81836,0.12891 -1.23633,0.12891 -1.04687,0 -1.87695,-0.30469 -2.49023,-0.91406 -0.60938,-0.60938 -0.91406,-1.43359 -0.91406,-2.47266 0,-1.07421 0.28906,-1.92578 0.86719,-2.55469 0.58202,-0.6328 1.36523,-0.94921 2.3496,-0.94922 0.88281,10e-6 1.58008,0.28517 2.0918,0.85547 0.51562,0.56641 0.77343,1.3379 0.77344,2.31446 m -1.07813,-0.31641 c -0.008,-0.58984 -0.17383,-1.06054 -0.49804,-1.41211 -0.32032,-0.35156 -0.7461,-0.52734 -1.27735,-0.52734 -0.60156,0 -1.08398,0.16992 -1.44726,0.50976 -0.35938,0.33985 -0.56641,0.81837 -0.6211,1.43555 l 3.84375,-0.006"
+ style=""
+ id="path3357" />
+ <path
+ d="m 257.28458,627.37738 0,1.00781 c -0.3047,-0.16796 -0.61134,-0.29296 -0.91993,-0.375 -0.30469,-0.0859 -0.61328,-0.1289 -0.92578,-0.1289 -0.69922,0 -1.24219,0.22266 -1.6289,0.66797 -0.38672,0.44141 -0.58008,1.0625 -0.58008,1.86328 0,0.80078 0.19336,1.42383 0.58008,1.86914 0.38671,0.4414 0.92968,0.66211 1.6289,0.66211 0.3125,0 0.62109,-0.041 0.92578,-0.12305 0.30859,-0.0859 0.61523,-0.21289 0.91993,-0.38086 l 0,0.99609 c -0.30079,0.14063 -0.61329,0.2461 -0.9375,0.31641 -0.32032,0.0703 -0.66212,0.10547 -1.02539,0.10547 -0.98829,0 -1.77344,-0.31055 -2.35547,-0.93164 -0.58204,-0.62109 -0.87305,-1.45898 -0.87305,-2.51367 0,-1.07031 0.29297,-1.91211 0.87891,-2.52539 0.58984,-0.61328 1.39648,-0.91992 2.41992,-0.91993 0.33203,10e-6 0.65624,0.0352 0.97265,0.10547 0.31641,0.0664 0.62305,0.16798 0.91993,0.30469"
+ style=""
+ id="path3359" />
+ <path
+ d="m 261.71426,627.88129 c -0.57812,0 -1.03515,0.22656 -1.37109,0.67968 -0.33594,0.44923 -0.50391,1.06641 -0.50391,1.85157 0,0.78516 0.16602,1.4043 0.49805,1.85742 0.33594,0.44922 0.79492,0.67383 1.37695,0.67383 0.57422,0 1.0293,-0.22656 1.36524,-0.67969 0.33593,-0.45312 0.5039,-1.07031 0.5039,-1.85156 0,-0.77734 -0.16797,-1.39258 -0.5039,-1.84571 -0.33594,-0.45702 -0.79102,-0.68554 -1.36524,-0.68554 m 0,-0.91407 c 0.9375,10e-6 1.67383,0.3047 2.20899,0.91407 0.53515,0.60938 0.80273,1.45313 0.80273,2.53125 0,1.07422 -0.26758,1.91797 -0.80273,2.53125 -0.53516,0.60937 -1.27149,0.91406 -2.20899,0.91406 -0.94141,0 -1.67969,-0.30469 -2.21484,-0.91406 -0.53125,-0.61328 -0.79688,-1.45703 -0.79687,-2.53125 -10e-6,-1.07812 0.26562,-1.92187 0.79687,-2.53125 0.53515,-0.60937 1.27343,-0.91406 2.21484,-0.91407"
+ style=""
+ id="path3361" />
+ <path
+ d="m 271.96231,629.72699 0,3.96094 -1.07812,0 0,-3.92578 c -10e-6,-0.62109 -0.1211,-1.08594 -0.36329,-1.39454 -0.24219,-0.30858 -0.60547,-0.46288 -1.08984,-0.46289 -0.58203,10e-6 -1.04102,0.18556 -1.37695,0.55664 -0.33594,0.3711 -0.50391,0.87696 -0.50391,1.51758 l 0,3.70899 -1.08398,0 0,-6.5625 1.08398,0 0,1.01953 c 0.25781,-0.39453 0.56055,-0.68945 0.9082,-0.88477 0.35156,-0.1953 0.75586,-0.29296 1.2129,-0.29297 0.7539,10e-6 1.32421,0.23439 1.71093,0.70313 0.38672,0.46485 0.58007,1.15039 0.58008,2.05664"
+ style=""
+ id="path3363" />
+ <path
+ d="m 278.44278,628.12152 0,-3.55078 1.07812,0 0,9.11719 -1.07812,0 0,-0.98438 c -0.22657,0.39063 -0.51368,0.68164 -0.86133,0.87305 -0.34375,0.1875 -0.75781,0.28125 -1.24219,0.28125 -0.79297,0 -1.43945,-0.31641 -1.93945,-0.94922 -0.49609,-0.63281 -0.74414,-1.46484 -0.74414,-2.49609 0,-1.03125 0.24805,-1.86328 0.74414,-2.4961 0.5,-0.6328 1.14648,-0.94921 1.93945,-0.94922 0.48438,10e-6 0.89844,0.0957 1.24219,0.28711 0.34765,0.18751 0.63476,0.47657 0.86133,0.86719 m -3.67383,2.29102 c 0,0.79297 0.16211,1.41601 0.48633,1.86914 0.32812,0.44922 0.77734,0.67383 1.34766,0.67383 0.5703,0 1.01952,-0.22461 1.34765,-0.67383 0.32812,-0.45313 0.49218,-1.07617 0.49219,-1.86914 -1e-5,-0.79297 -0.16407,-1.41406 -0.49219,-1.86328 -0.32813,-0.45312 -0.77735,-0.67969 -1.34765,-0.67969 -0.57032,0 -1.01954,0.22657 -1.34766,0.67969 -0.32422,0.44922 -0.48633,1.07031 -0.48633,1.86328"
+ style=""
+ id="path3365" />
+ <path
+ d="m 284.72403,630.3891 c -0.8711,0 -1.47461,0.0996 -1.81055,0.29883 -0.33594,0.19922 -0.50391,0.53906 -0.5039,1.01953 -1e-5,0.38281 0.12499,0.6875 0.375,0.91406 0.2539,0.22266 0.59765,0.33399 1.03125,0.33399 0.59765,0 1.07616,-0.21094 1.43554,-0.63282 0.36328,-0.42578 0.54492,-0.99023 0.54493,-1.69336 l 0,-0.24023 -1.07227,0 m 2.15039,-0.44531 0,3.74414 -1.07812,0 0,-0.9961 c -0.2461,0.39844 -0.55274,0.69336 -0.91993,0.88477 -0.36719,0.1875 -0.81641,0.28125 -1.34765,0.28125 -0.67188,0 -1.20704,-0.1875 -1.60547,-0.5625 -0.39453,-0.37891 -0.5918,-0.88477 -0.5918,-1.51758 0,-0.73828 0.24609,-1.29492 0.73828,-1.66992 0.49609,-0.375 1.23437,-0.5625 2.21485,-0.5625 l 1.51172,0 0,-0.10547 c -10e-6,-0.49609 -0.16407,-0.8789 -0.49219,-1.14844 -0.32423,-0.27343 -0.78126,-0.41015 -1.3711,-0.41015 -0.375,0 -0.74023,0.0449 -1.0957,0.13476 -0.35547,0.0899 -0.69727,0.22462 -1.02539,0.4043 l 0,-0.99609 c 0.39453,-0.15234 0.77734,-0.26562 1.14844,-0.33985 0.37109,-0.0781 0.73242,-0.11718 1.08398,-0.11719 0.94922,10e-6 1.6582,0.2461 2.12696,0.73829 0.46874,0.49219 0.70311,1.23828 0.70312,2.23828"
+ style=""
+ id="path3367" />
+ <path
+ d="m 292.90372,628.13324 c -0.1211,-0.0703 -0.25391,-0.12109 -0.39844,-0.15234 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,10e-6 -1.07813,0.19923 -1.40625,0.59766 -0.32422,0.39453 -0.48633,0.96289 -0.48633,1.70508 l 0,3.45703 -1.08398,0 0,-6.5625 1.08398,0 0,1.01953 c 0.22656,-0.39843 0.52148,-0.69335 0.88477,-0.88477 0.36327,-0.1953 0.80468,-0.29296 1.32422,-0.29297 0.0742,10e-6 0.15624,0.006 0.24609,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10742"
+ style=""
+ id="path3369" />
+ <path
+ d="m 296.77676,634.2973 c -0.30469,0.78125 -0.60156,1.29102 -0.89062,1.5293 -0.28907,0.23828 -0.67578,0.35742 -1.16016,0.35742 l -0.86133,0 0,-0.90234 0.63282,0 c 0.29687,0 0.52734,-0.0703 0.6914,-0.21094 0.16406,-0.14063 0.3457,-0.47266 0.54493,-0.99609 l 0.19335,-0.49219 -2.65429,-6.45703 1.14258,0 2.05078,5.13281 2.05078,-5.13281 1.14258,0 -2.88282,7.17187"
+ style=""
+ id="path3371" />
+ <path
+ d="m 305.01505,624.93988 1.76367,0 2.23242,5.95313 2.24414,-5.95313 1.76367,0 0,8.74805 -1.1543,0 0,-7.68164 -2.25585,6 -1.18946,0 -2.25586,-6 0,7.68164 -1.14843,0 0,-8.74805"
+ style=""
+ id="path3373" />
+ <path
+ d="m 317.87051,627.88129 c -0.57812,0 -1.03515,0.22656 -1.37109,0.67968 -0.33594,0.44923 -0.50391,1.06641 -0.50391,1.85157 0,0.78516 0.16602,1.4043 0.49805,1.85742 0.33594,0.44922 0.79492,0.67383 1.37695,0.67383 0.57422,0 1.0293,-0.22656 1.36524,-0.67969 0.33593,-0.45312 0.5039,-1.07031 0.5039,-1.85156 0,-0.77734 -0.16797,-1.39258 -0.5039,-1.84571 -0.33594,-0.45702 -0.79102,-0.68554 -1.36524,-0.68554 m 0,-0.91407 c 0.9375,10e-6 1.67383,0.3047 2.20899,0.91407 0.53515,0.60938 0.80273,1.45313 0.80273,2.53125 0,1.07422 -0.26758,1.91797 -0.80273,2.53125 -0.53516,0.60937 -1.27149,0.91406 -2.20899,0.91406 -0.94141,0 -1.67969,-0.30469 -2.21484,-0.91406 -0.53125,-0.61328 -0.79688,-1.45703 -0.79687,-2.53125 -10e-6,-1.07812 0.26562,-1.92187 0.79687,-2.53125 0.53515,-0.60937 1.27343,-0.91406 2.21484,-0.91407"
+ style=""
+ id="path3375" />
+ <path
+ d="m 328.11856,629.72699 0,3.96094 -1.07812,0 0,-3.92578 c -10e-6,-0.62109 -0.1211,-1.08594 -0.36329,-1.39454 -0.24219,-0.30858 -0.60547,-0.46288 -1.08984,-0.46289 -0.58203,10e-6 -1.04102,0.18556 -1.37695,0.55664 -0.33594,0.3711 -0.50391,0.87696 -0.50391,1.51758 l 0,3.70899 -1.08398,0 0,-6.5625 1.08398,0 0,1.01953 c 0.25781,-0.39453 0.56055,-0.68945 0.9082,-0.88477 0.35156,-0.1953 0.75586,-0.29296 1.2129,-0.29297 0.7539,10e-6 1.32421,0.23439 1.71093,0.70313 0.38672,0.46485 0.58007,1.15039 0.58008,2.05664"
+ style=""
+ id="path3377" />
+ <path
+ d="m 330.28067,627.12543 1.07813,0 0,6.5625 -1.07813,0 0,-6.5625 m 0,-2.55469 1.07813,0 0,1.36523 -1.07813,0 0,-1.36523"
+ style=""
+ id="path3379" />
+ <path
+ d="m 334.6752,625.26215 0,1.86328 2.2207,0 0,0.83789 -2.2207,0 0,3.5625 c 0,0.53516 0.0723,0.87891 0.2168,1.03125 0.14843,0.15234 0.44726,0.22851 0.89648,0.22851 l 1.10742,0 0,0.90235 -1.10742,0 c -0.83203,0 -1.40625,-0.1543 -1.72265,-0.46289 -0.31641,-0.3125 -0.47461,-0.87891 -0.47461,-1.69922 l 0,-3.5625 -0.79102,0 0,-0.83789 0.79102,0 0,-1.86328 1.08398,0"
+ style=""
+ id="path3381" />
+ <path
+ d="m 340.8627,627.88129 c -0.57813,0 -1.03516,0.22656 -1.37109,0.67968 -0.33594,0.44923 -0.50391,1.06641 -0.50391,1.85157 0,0.78516 0.16602,1.4043 0.49805,1.85742 0.33593,0.44922 0.79492,0.67383 1.37695,0.67383 0.57422,0 1.02929,-0.22656 1.36524,-0.67969 0.33593,-0.45312 0.5039,-1.07031 0.5039,-1.85156 0,-0.77734 -0.16797,-1.39258 -0.5039,-1.84571 -0.33595,-0.45702 -0.79102,-0.68554 -1.36524,-0.68554 m 0,-0.91407 c 0.9375,10e-6 1.67382,0.3047 2.20899,0.91407 0.53515,0.60938 0.80272,1.45313 0.80273,2.53125 -10e-6,1.07422 -0.26758,1.91797 -0.80273,2.53125 -0.53517,0.60937 -1.27149,0.91406 -2.20899,0.91406 -0.94141,0 -1.67969,-0.30469 -2.21484,-0.91406 -0.53125,-0.61328 -0.79688,-1.45703 -0.79688,-2.53125 0,-1.07812 0.26563,-1.92187 0.79688,-2.53125 0.53515,-0.60937 1.27343,-0.91406 2.21484,-0.91407"
+ style=""
+ id="path3383" />
+ <path
+ d="m 349.4584,628.13324 c -0.12109,-0.0703 -0.25391,-0.12109 -0.39843,-0.15234 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,10e-6 -1.07813,0.19923 -1.40625,0.59766 -0.32422,0.39453 -0.48633,0.96289 -0.48633,1.70508 l 0,3.45703 -1.08399,0 0,-6.5625 1.08399,0 0,1.01953 c 0.22656,-0.39843 0.52148,-0.69335 0.88476,-0.88477 0.36328,-0.1953 0.80469,-0.29296 1.32422,-0.29297 0.0742,10e-6 0.15625,0.006 0.2461,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10742"
+ style=""
+ id="path3385" />
+ </g>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#00b800;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3777">
+ <path
+ d="m 575.27002,547.52673 0,3.28711 1.48828,0 c 0.55078,10e-6 0.97656,-0.14257 1.27734,-0.42773 0.30078,-0.28515 0.45117,-0.6914 0.45118,-1.21875 -10e-6,-0.52343 -0.1504,-0.92773 -0.45118,-1.21289 -0.30078,-0.28515 -0.72656,-0.42773 -1.27734,-0.42774 l -1.48828,0 m -1.18359,-0.97265 2.67187,0 c 0.98046,10e-6 1.7207,0.22266 2.2207,0.66797 0.5039,0.44141 0.75586,1.08985 0.75586,1.94531 0,0.86328 -0.25196,1.51563 -0.75586,1.95703 -0.5,0.44141 -1.24024,0.66211 -2.2207,0.66211 l -1.48828,0 0,3.51562 -1.18359,0 0,-8.74804"
+ style=""
+ id="path3326" />
+ <path
+ d="m 584.87354,549.74744 c -0.1211,-0.0703 -0.25392,-0.12109 -0.39844,-0.15235 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,0 -1.07813,0.19922 -1.40625,0.59765 -0.32422,0.39454 -0.48633,0.9629 -0.48633,1.70508 l 0,3.45703 -1.08398,0 0,-6.5625 1.08398,0 0,1.01954 c 0.22656,-0.39844 0.52148,-0.69336 0.88477,-0.88477 0.36327,-0.19531 0.80468,-0.29296 1.32421,-0.29297 0.0742,10e-6 0.15625,0.006 0.2461,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10743"
+ style=""
+ id="path3328" />
+ <path
+ d="m 586.01611,548.73962 1.07813,0 0,6.5625 -1.07813,0 0,-6.5625 m 0,-2.55468 1.07813,0 0,1.36523 -1.07813,0 0,-1.36523"
+ style=""
+ id="path3330" />
+ <path
+ d="m 594.45361,549.99939 c 0.26953,-0.48437 0.59179,-0.84179 0.9668,-1.07227 0.37499,-0.23046 0.8164,-0.34569 1.32422,-0.3457 0.68358,10e-6 1.21093,0.24024 1.58203,0.7207 0.37108,0.47657 0.55663,1.15626 0.55664,2.03907 l 0,3.96093 -1.08398,0 0,-3.92578 c -10e-6,-0.6289 -0.11134,-1.0957 -0.33399,-1.40039 -0.22266,-0.30468 -0.56251,-0.45702 -1.01953,-0.45703 -0.5586,10e-6 -1.00001,0.18555 -1.32422,0.55664 -0.32422,0.3711 -0.48633,0.87696 -0.48633,1.51758 l 0,3.70898 -1.08398,0 0,-3.92578 c -1e-5,-0.63281 -0.11133,-1.0996 -0.33398,-1.40039 -0.22267,-0.30468 -0.56642,-0.45702 -1.03125,-0.45703 -0.55079,10e-6 -0.98829,0.18751 -1.3125,0.5625 -0.32423,0.3711 -0.48634,0.87501 -0.48633,1.51172 l 0,3.70898 -1.08399,0 0,-6.5625 1.08399,0 0,1.01954 c 0.24609,-0.40234 0.54101,-0.69922 0.88476,-0.89063 0.34375,-0.1914 0.75195,-0.2871 1.22461,-0.28711 0.47656,10e-6 0.88086,0.1211 1.21289,0.36328 0.33593,0.2422 0.58398,0.59376 0.74414,1.05469"
+ style=""
+ id="path3332" />
+ <path
+ d="m 604.02197,552.0033 c -0.87109,0 -1.47461,0.0996 -1.81054,0.29882 -0.33594,0.19923 -0.50391,0.53907 -0.50391,1.01954 0,0.38281 0.125,0.6875 0.375,0.91406 0.2539,0.22265 0.59765,0.33398 1.03125,0.33398 0.59765,0 1.07617,-0.21093 1.43555,-0.63281 0.36327,-0.42578 0.54491,-0.99023 0.54492,-1.69336 l 0,-0.24023 -1.07227,0 m 2.15039,-0.44532 0,3.74414 -1.07812,0 0,-0.99609 c -0.2461,0.39844 -0.55274,0.69336 -0.91992,0.88477 -0.36719,0.1875 -0.81641,0.28125 -1.34766,0.28125 -0.67188,0 -1.20703,-0.1875 -1.60547,-0.5625 -0.39453,-0.37891 -0.5918,-0.88477 -0.5918,-1.51758 0,-0.73828 0.2461,-1.29492 0.73829,-1.66992 0.49609,-0.375 1.23437,-0.5625 2.21484,-0.5625 l 1.51172,0 0,-0.10547 c -10e-6,-0.49609 -0.16407,-0.8789 -0.49219,-1.14844 -0.32422,-0.27343 -0.78125,-0.41015 -1.37109,-0.41016 -0.37501,10e-6 -0.74024,0.0449 -1.09571,0.13477 -0.35547,0.0898 -0.69726,0.22461 -1.02539,0.4043 l 0,-0.9961 c 0.39453,-0.15234 0.77735,-0.26562 1.14844,-0.33984 0.37109,-0.0781 0.73242,-0.11718 1.08399,-0.11719 0.94921,10e-6 1.65819,0.2461 2.12695,0.73828 0.46874,0.4922 0.70312,1.23829 0.70312,2.23828"
+ style=""
+ id="path3334" />
+ <path
+ d="m 612.20166,549.74744 c -0.1211,-0.0703 -0.25391,-0.12109 -0.39844,-0.15235 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,0 -1.07813,0.19922 -1.40625,0.59765 -0.32422,0.39454 -0.48633,0.9629 -0.48633,1.70508 l 0,3.45703 -1.08398,0 0,-6.5625 1.08398,0 0,1.01954 c 0.22656,-0.39844 0.52149,-0.69336 0.88477,-0.88477 0.36328,-0.19531 0.80468,-0.29296 1.32422,-0.29297 0.0742,10e-6 0.15624,0.006 0.24609,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10743"
+ style=""
+ id="path3336" />
+ <path
+ d="m 616.07471,555.9115 c -0.30469,0.78125 -0.60157,1.29101 -0.89063,1.5293 -0.28906,0.23827 -0.67578,0.35742 -1.16015,0.35742 l -0.86133,0 0,-0.90235 0.63281,0 c 0.29687,0 0.52734,-0.0703 0.69141,-0.21093 0.16406,-0.14063 0.3457,-0.47266 0.54492,-0.9961 l 0.19336,-0.49218 -2.6543,-6.45704 1.14258,0 2.05078,5.13282 2.05078,-5.13282 1.14258,0 -2.88281,7.17188"
+ style=""
+ id="path3338" />
+ <path
+ d="m 624.31299,546.55408 1.76367,0 2.23242,5.95312 2.24414,-5.95312 1.76367,0 0,8.74804 -1.15429,0 0,-7.68164 -2.25586,6 -1.18945,0 -2.25586,-6 0,7.68164 -1.14844,0 0,-8.74804"
+ style=""
+ id="path3340" />
+ <path
+ d="m 637.16846,549.49548 c -0.57813,10e-6 -1.03516,0.22657 -1.3711,0.67969 -0.33594,0.44922 -0.5039,1.06641 -0.5039,1.85156 0,0.78516 0.16601,1.4043 0.49804,1.85743 0.33594,0.44921 0.79492,0.67382 1.37696,0.67382 0.57421,0 1.02929,-0.22656 1.36523,-0.67968 0.33593,-0.45313 0.5039,-1.07031 0.50391,-1.85157 -10e-6,-0.77734 -0.16798,-1.39257 -0.50391,-1.8457 -0.33594,-0.45703 -0.79102,-0.68554 -1.36523,-0.68555 m 0,-0.91406 c 0.93749,10e-6 1.67382,0.30469 2.20898,0.91406 0.53515,0.60938 0.80273,1.45313 0.80274,2.53125 -10e-6,1.07422 -0.26759,1.91797 -0.80274,2.53125 -0.53516,0.60938 -1.27149,0.91407 -2.20898,0.91407 -0.94141,0 -1.67969,-0.30469 -2.21485,-0.91407 -0.53125,-0.61328 -0.79687,-1.45703 -0.79687,-2.53125 0,-1.07812 0.26562,-1.92187 0.79687,-2.53125 0.53516,-0.60937 1.27344,-0.91405 2.21485,-0.91406"
+ style=""
+ id="path3342" />
+ <path
+ d="m 647.4165,551.34119 0,3.96093 -1.07812,0 0,-3.92578 c -10e-6,-0.62109 -0.1211,-1.08593 -0.36328,-1.39453 -0.24219,-0.30859 -0.60548,-0.46288 -1.08985,-0.46289 -0.58203,10e-6 -1.04101,0.18555 -1.37695,0.55664 -0.33594,0.3711 -0.50391,0.87696 -0.50391,1.51758 l 0,3.70898 -1.08398,0 0,-6.5625 1.08398,0 0,1.01954 c 0.25781,-0.39453 0.56055,-0.68945 0.90821,-0.88477 0.35156,-0.19531 0.75585,-0.29296 1.21289,-0.29297 0.7539,10e-6 1.32421,0.23438 1.71094,0.70313 0.38671,0.46485 0.58007,1.15039 0.58007,2.05664"
+ style=""
+ id="path3344" />
+ <path
+ d="m 649.57861,548.73962 1.07813,0 0,6.5625 -1.07813,0 0,-6.5625 m 0,-2.55468 1.07813,0 0,1.36523 -1.07813,0 0,-1.36523"
+ style=""
+ id="path3346" />
+ <path
+ d="m 653.97314,546.87634 0,1.86328 2.22071,0 0,0.83789 -2.22071,0 0,3.5625 c 0,0.53516 0.0723,0.87891 0.2168,1.03125 0.14844,0.15235 0.44726,0.22852 0.89649,0.22852 l 1.10742,0 0,0.90234 -1.10742,0 c -0.83204,0 -1.40626,-0.15429 -1.72266,-0.46289 -0.31641,-0.3125 -0.47461,-0.8789 -0.47461,-1.69922 l 0,-3.5625 -0.79102,0 0,-0.83789 0.79102,0 0,-1.86328 1.08398,0"
+ style=""
+ id="path3348" />
+ <path
+ d="m 660.16064,549.49548 c -0.57812,10e-6 -1.03515,0.22657 -1.37109,0.67969 -0.33594,0.44922 -0.50391,1.06641 -0.50391,1.85156 0,0.78516 0.16602,1.4043 0.49805,1.85743 0.33594,0.44921 0.79492,0.67382 1.37695,0.67382 0.57422,0 1.0293,-0.22656 1.36524,-0.67968 0.33593,-0.45313 0.5039,-1.07031 0.50391,-1.85157 -1e-5,-0.77734 -0.16798,-1.39257 -0.50391,-1.8457 -0.33594,-0.45703 -0.79102,-0.68554 -1.36524,-0.68555 m 0,-0.91406 c 0.9375,10e-6 1.67383,0.30469 2.20899,0.91406 0.53515,0.60938 0.80273,1.45313 0.80273,2.53125 0,1.07422 -0.26758,1.91797 -0.80273,2.53125 -0.53516,0.60938 -1.27149,0.91407 -2.20899,0.91407 -0.9414,0 -1.67968,-0.30469 -2.21484,-0.91407 -0.53125,-0.61328 -0.79687,-1.45703 -0.79687,-2.53125 0,-1.07812 0.26562,-1.92187 0.79687,-2.53125 0.53516,-0.60937 1.27344,-0.91405 2.21484,-0.91406"
+ style=""
+ id="path3350" />
+ <path
+ d="m 668.75635,549.74744 c -0.1211,-0.0703 -0.25391,-0.12109 -0.39844,-0.15235 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,0 -1.07813,0.19922 -1.40625,0.59765 -0.32422,0.39454 -0.48633,0.9629 -0.48633,1.70508 l 0,3.45703 -1.08398,0 0,-6.5625 1.08398,0 0,1.01954 c 0.22656,-0.39844 0.52148,-0.69336 0.88477,-0.88477 0.36328,-0.19531 0.80468,-0.29296 1.32422,-0.29297 0.0742,10e-6 0.15624,0.006 0.24609,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10743"
+ style=""
+ id="path3352" />
+ </g>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#0000ff;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3781">
+ <path
+ d="m 612.64136,657.77118 -3.33985,-8.74805 1.23633,0 2.77149,7.36524 2.77734,-7.36524 1.23047,0 -3.33399,8.74805 -1.34179,0"
+ style=""
+ id="path3401" />
+ <path
+ d="m 618.28394,651.20868 1.07812,0 0,6.5625 -1.07812,0 0,-6.5625 m 0,-2.55469 1.07812,0 0,1.36524 -1.07812,0 0,-1.36524"
+ style=""
+ id="path3403" />
+ <path
+ d="m 625.41479,652.21649 c -0.12109,-0.0703 -0.25391,-0.12109 -0.39843,-0.15234 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,1e-5 -1.07813,0.19923 -1.40625,0.59766 -0.32422,0.39454 -0.48633,0.96289 -0.48633,1.70508 l 0,3.45703 -1.08399,0 0,-6.5625 1.08399,0 0,1.01953 c 0.22656,-0.39843 0.52148,-0.69335 0.88476,-0.88477 0.36328,-0.1953 0.80469,-0.29296 1.32422,-0.29296 0.0742,0 0.15625,0.006 0.2461,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10742"
+ style=""
+ id="path3405" />
+ <path
+ d="m 627.62378,649.3454 0,1.86328 2.2207,0 0,0.83789 -2.2207,0 0,3.5625 c 0,0.53516 0.0723,0.87891 0.2168,1.03125 0.14843,0.15234 0.44726,0.22852 0.89648,0.22852 l 1.10742,0 0,0.90234 -1.10742,0 c -0.83203,0 -1.40625,-0.1543 -1.72266,-0.46289 -0.3164,-0.3125 -0.47461,-0.87891 -0.47461,-1.69922 l 0,-3.5625 -0.79101,0 0,-0.83789 0.79101,0 0,-1.86328 1.08399,0"
+ style=""
+ id="path3407" />
+ <path
+ d="m 631.15698,655.18134 0,-3.97266 1.07813,0 0,3.93164 c 0,0.6211 0.12109,1.08789 0.36328,1.40039 0.24218,0.3086 0.60546,0.46289 1.08984,0.46289 0.58203,0 1.04101,-0.18554 1.37696,-0.55664 0.33983,-0.37109 0.50976,-0.87695 0.50976,-1.51758 l 0,-3.7207 1.07813,0 0,6.5625 -1.07813,0 0,-1.00781 c -0.26172,0.39843 -0.56641,0.69531 -0.91406,0.89062 -0.34376,0.19141 -0.74415,0.28711 -1.20117,0.28711 -0.75391,0 -1.32618,-0.23437 -1.7168,-0.70312 -0.39063,-0.46875 -0.58594,-1.1543 -0.58594,-2.05664 m 2.71289,-4.13086 0,0"
+ style=""
+ id="path3409" />
+ <path
+ d="m 641.86792,654.47235 c -0.8711,0 -1.47461,0.0996 -1.81055,0.29883 -0.33594,0.19922 -0.50391,0.53906 -0.5039,1.01953 -10e-6,0.38281 0.12499,0.6875 0.375,0.91406 0.2539,0.22266 0.59765,0.33399 1.03125,0.33399 0.59765,0 1.07616,-0.21094 1.43554,-0.63282 0.36328,-0.42577 0.54492,-0.99023 0.54493,-1.69335 l 0,-0.24024 -1.07227,0 m 2.15039,-0.44531 0,3.74414 -1.07812,0 0,-0.99609 c -0.2461,0.39843 -0.55274,0.69336 -0.91993,0.88476 -0.36719,0.1875 -0.81641,0.28125 -1.34765,0.28125 -0.67188,0 -1.20704,-0.1875 -1.60547,-0.5625 -0.39453,-0.3789 -0.5918,-0.88476 -0.5918,-1.51758 0,-0.73828 0.24609,-1.29492 0.73828,-1.66992 0.49609,-0.37499 1.23438,-0.56249 2.21485,-0.5625 l 1.51172,0 0,-0.10547 c -10e-6,-0.49609 -0.16407,-0.8789 -0.49219,-1.14844 -0.32423,-0.27343 -0.78126,-0.41015 -1.3711,-0.41015 -0.375,0 -0.74023,0.0449 -1.0957,0.13476 -0.35547,0.0899 -0.69727,0.22462 -1.02539,0.4043 l 0,-0.99609 c 0.39453,-0.15234 0.77734,-0.26562 1.14844,-0.33985 0.37109,-0.0781 0.73242,-0.11718 1.08398,-0.11718 0.94922,0 1.6582,0.2461 2.12696,0.73828 0.46874,0.49219 0.70311,1.23828 0.70312,2.23828"
+ style=""
+ id="path3411" />
+ <path
+ d="m 646.24487,648.65399 1.07813,0 0,9.11719 -1.07813,0 0,-9.11719"
+ style=""
+ id="path3413" />
+ <path
+ d="m 658.68433,649.31024 0,1.1543 c -0.44923,-0.21484 -0.87306,-0.37499 -1.27149,-0.48047 -0.39844,-0.10546 -0.78321,-0.1582 -1.1543,-0.1582 -0.64453,0 -1.14258,0.125 -1.49414,0.375 -0.34765,0.25 -0.52148,0.60547 -0.52148,1.0664 0,0.38673 0.11523,0.6797 0.3457,0.87891 0.23438,0.19532 0.67578,0.35352 1.32422,0.47461 l 0.71485,0.14648 c 0.8828,0.16798 1.53319,0.46485 1.95117,0.89063 0.42187,0.42188 0.6328,0.98828 0.63281,1.69922 -10e-6,0.84765 -0.28516,1.49023 -0.85547,1.92773 -0.56641,0.4375 -1.39844,0.65625 -2.49609,0.65625 -0.41407,0 -0.85547,-0.0469 -1.32422,-0.14062 -0.46485,-0.0937 -0.94727,-0.23243 -1.44727,-0.41602 l 0,-1.21875 c 0.48047,0.26953 0.95117,0.47266 1.41211,0.60938 0.46094,0.13672 0.91406,0.20507 1.35938,0.20507 0.67577,0 1.19726,-0.13281 1.56445,-0.39843 0.36718,-0.26563 0.55078,-0.64453 0.55078,-1.13672 0,-0.42969 -0.13282,-0.76562 -0.39844,-1.00782 -0.26172,-0.24218 -0.69336,-0.42382 -1.29492,-0.54492 l -0.7207,-0.14062 c -0.88282,-0.17578 -1.52149,-0.45117 -1.91602,-0.82617 -0.39453,-0.375 -0.59179,-0.89648 -0.59179,-1.56446 0,-0.77343 0.27148,-1.3828 0.81445,-1.82812 0.54687,-0.44531 1.29883,-0.66796 2.25586,-0.66797 0.41015,1e-5 0.82812,0.0371 1.25391,0.11133 0.42577,0.0742 0.86132,0.18555 1.30664,0.33398"
+ style=""
+ id="path3415" />
+ <path
+ d="m 665.73315,651.46063 0,1.00781 c -0.30469,-0.16796 -0.61133,-0.29296 -0.91992,-0.375 -0.30469,-0.0859 -0.61328,-0.1289 -0.92578,-0.1289 -0.69922,0 -1.24219,0.22266 -1.62891,0.66797 -0.38672,0.44141 -0.58008,1.0625 -0.58007,1.86328 -10e-6,0.80078 0.19335,1.42383 0.58007,1.86914 0.38672,0.44141 0.92969,0.66211 1.62891,0.66211 0.3125,0 0.62109,-0.041 0.92578,-0.12305 0.30859,-0.0859 0.61523,-0.21289 0.91992,-0.38086 l 0,0.9961 c -0.30078,0.14062 -0.61328,0.24609 -0.9375,0.3164 -0.32031,0.0703 -0.66211,0.10547 -1.02539,0.10547 -0.98828,0 -1.77344,-0.31055 -2.35547,-0.93164 -0.58203,-0.62109 -0.87304,-1.45898 -0.87304,-2.51367 0,-1.07031 0.29297,-1.91211 0.8789,-2.52539 0.58985,-0.61328 1.39649,-0.91992 2.41993,-0.91992 0.33202,0 0.65624,0.0352 0.97265,0.10546 0.3164,0.0664 0.62304,0.16798 0.91992,0.30469"
+ style=""
+ id="path3417" />
+ <path
+ d="m 671.42261,652.21649 c -0.1211,-0.0703 -0.25391,-0.12109 -0.39844,-0.15234 -0.14063,-0.0352 -0.29688,-0.0527 -0.46875,-0.0527 -0.60938,1e-5 -1.07813,0.19923 -1.40625,0.59766 -0.32422,0.39454 -0.48633,0.96289 -0.48633,1.70508 l 0,3.45703 -1.08398,0 0,-6.5625 1.08398,0 0,1.01953 c 0.22656,-0.39843 0.52148,-0.69335 0.88477,-0.88477 0.36328,-0.1953 0.80468,-0.29296 1.32422,-0.29296 0.0742,0 0.15624,0.006 0.24609,0.0176 0.0898,0.008 0.18945,0.0215 0.29883,0.041 l 0.006,1.10742"
+ style=""
+ id="path3419" />
+ <path
+ d="m 677.92065,654.2204 0,0.52734 -4.95703,0 c 0.0469,0.74219 0.26953,1.3086 0.66797,1.69922 0.40234,0.38672 0.96094,0.58008 1.67578,0.58008 0.41406,0 0.81445,-0.0508 1.20117,-0.15235 0.39062,-0.10156 0.77734,-0.2539 1.16016,-0.45703 l 0,1.01953 c -0.38672,0.16407 -0.78321,0.28907 -1.18945,0.375 -0.40626,0.0859 -0.81837,0.12891 -1.23633,0.12891 -1.04688,0 -1.87696,-0.30469 -2.49023,-0.91406 -0.60938,-0.60938 -0.91407,-1.43359 -0.91407,-2.47266 0,-1.07421 0.28906,-1.92577 0.86719,-2.55469 0.58203,-0.6328 1.36523,-0.94921 2.34961,-0.94921 0.88281,0 1.58007,0.28516 2.0918,0.85546 0.51562,0.56642 0.77343,1.3379 0.77343,2.31446 m -1.07812,-0.31641 c -0.008,-0.58984 -0.17383,-1.06054 -0.49805,-1.41211 -0.32031,-0.35155 -0.7461,-0.52734 -1.27734,-0.52734 -0.60157,0 -1.08399,0.16993 -1.44727,0.50976 -0.35937,0.33985 -0.56641,0.81837 -0.62109,1.43555 l 3.84375,-0.006"
+ style=""
+ id="path3421" />
+ <path
+ d="m 685.30347,654.2204 0,0.52734 -4.95703,0 c 0.0469,0.74219 0.26952,1.3086 0.66796,1.69922 0.40235,0.38672 0.96094,0.58008 1.67579,0.58008 0.41405,0 0.81444,-0.0508 1.20117,-0.15235 0.39062,-0.10156 0.77734,-0.2539 1.16015,-0.45703 l 0,1.01953 c -0.38672,0.16407 -0.78321,0.28907 -1.18945,0.375 -0.40625,0.0859 -0.81836,0.12891 -1.23633,0.12891 -1.04688,0 -1.87695,-0.30469 -2.49023,-0.91406 -0.60938,-0.60938 -0.91407,-1.43359 -0.91406,-2.47266 -10e-6,-1.07421 0.28906,-1.92577 0.86718,-2.55469 0.58203,-0.6328 1.36523,-0.94921 2.34961,-0.94921 0.88281,0 1.58008,0.28516 2.0918,0.85546 0.51562,0.56642 0.77343,1.3379 0.77344,2.31446 m -1.07813,-0.31641 c -0.008,-0.58984 -0.17383,-1.06054 -0.49805,-1.41211 -0.32031,-0.35155 -0.74609,-0.52734 -1.27734,-0.52734 -0.60156,0 -1.08399,0.16993 -1.44726,0.50976 -0.35938,0.33985 -0.56641,0.81837 -0.6211,1.43555 l 3.84375,-0.006"
+ style=""
+ id="path3423" />
+ <path
+ d="m 692.52808,653.81024 0,3.96094 -1.07813,0 0,-3.92578 c 0,-0.62109 -0.1211,-1.08593 -0.36328,-1.39453 -0.24219,-0.30859 -0.60547,-0.46289 -1.08984,-0.46289 -0.58204,0 -1.04102,0.18555 -1.37696,0.55664 -0.33594,0.3711 -0.50391,0.87695 -0.5039,1.51757 l 0,3.70899 -1.08399,0 0,-6.5625 1.08399,0 0,1.01953 c 0.25781,-0.39452 0.56054,-0.68945 0.9082,-0.88477 0.35156,-0.1953 0.75586,-0.29296 1.21289,-0.29296 0.7539,0 1.32421,0.23438 1.71094,0.70312 0.38671,0.46485 0.58007,1.1504 0.58008,2.05664"
+ style=""
+ id="path3425" />
+ </g>
+ <rect
+ style="fill:#b8b8b8;fill-opacity:1;stroke:#b8b8b8;stroke-width:1;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+ id="rect5577"
+ width="173.25098"
+ height="141.43118"
+ x="157.75581"
+ y="436.97159" />
+ <rect
+ style="fill:#ededed;fill-opacity:1;stroke:#ededed;stroke-width:1;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="rect3789"
+ width="168.99611"
+ height="136.87178"
+ x="159.87543"
+ y="439.39697"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#454545;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3791">
+ <path
+ d="m 274.28683,558.98682 1.19532,0 1.83984,7.39453 1.83399,-7.39453 1.33007,0 1.83985,7.39453 1.83398,-7.39453 1.20117,0 -2.19726,8.74804 -1.48828,0 -1.84571,-7.59375 -1.86328,7.59375 -1.48828,0 -2.19141,-8.74804"
+ style=""
+ id="path3388" />
+ <path
+ d="m 286.62082,561.17236 1.07812,0 0,6.5625 -1.07812,0 0,-6.5625 m 0,-2.55468 1.07812,0 0,1.36523 -1.07812,0 0,-1.36523"
+ style=""
+ id="path3390" />
+ <path
+ d="m 295.40402,563.77393 0,3.96093 -1.07812,0 0,-3.92578 c -10e-6,-0.62109 -0.1211,-1.08593 -0.36328,-1.39453 -0.2422,-0.30859 -0.60548,-0.46288 -1.08985,-0.46289 -0.58203,1e-5 -1.04102,0.18555 -1.37695,0.55664 -0.33594,0.3711 -0.50391,0.87696 -0.50391,1.51758 l 0,3.70898 -1.08398,0 0,-6.5625 1.08398,0 0,1.01953 c 0.25781,-0.39452 0.56055,-0.68944 0.90821,-0.88476 0.35156,-0.19531 0.75585,-0.29296 1.21289,-0.29297 0.7539,1e-5 1.32421,0.23438 1.71093,0.70313 0.38672,0.46484 0.58008,1.15039 0.58008,2.05664"
+ style=""
+ id="path3392" />
+ <path
+ d="m 301.88449,562.16846 0,-3.55078 1.07813,0 0,9.11718 -1.07813,0 0,-0.98437 c -0.22657,0.39062 -0.51368,0.68164 -0.86133,0.87305 -0.34375,0.1875 -0.75781,0.28124 -1.24218,0.28125 -0.79298,-1e-5 -1.43946,-0.31641 -1.93946,-0.94922 -0.49609,-0.63281 -0.74414,-1.46485 -0.74414,-2.4961 0,-1.03124 0.24805,-1.86327 0.74414,-2.49609 0.5,-0.63281 1.14648,-0.94921 1.93946,-0.94922 0.48437,1e-5 0.89843,0.0957 1.24218,0.28711 0.34765,0.18751 0.63476,0.47657 0.86133,0.86719 m -3.67383,2.29101 c 0,0.79297 0.16211,1.41602 0.48633,1.86914 0.32812,0.44922 0.77734,0.67383 1.34766,0.67383 0.57031,0 1.01952,-0.22461 1.34765,-0.67383 0.32812,-0.45312 0.49219,-1.07617 0.49219,-1.86914 0,-0.79296 -0.16407,-1.41406 -0.49219,-1.86328 -0.32813,-0.45312 -0.77734,-0.67968 -1.34765,-0.67969 -0.57032,10e-6 -1.01954,0.22657 -1.34766,0.67969 -0.32422,0.44922 -0.48633,1.07032 -0.48633,1.86328"
+ style=""
+ id="path3394" />
+ <path
+ d="m 307.72629,561.92822 c -0.57813,10e-6 -1.03516,0.22657 -1.3711,0.67969 -0.33594,0.44922 -0.5039,1.06641 -0.5039,1.85156 0,0.78516 0.16601,1.4043 0.49804,1.85742 0.33594,0.44922 0.79492,0.67383 1.37696,0.67383 0.57421,0 1.02929,-0.22656 1.36523,-0.67968 0.33593,-0.45313 0.5039,-1.07031 0.50391,-1.85157 -1e-5,-0.77734 -0.16798,-1.39257 -0.50391,-1.8457 -0.33594,-0.45703 -0.79102,-0.68554 -1.36523,-0.68555 m 0,-0.91406 c 0.93749,1e-5 1.67382,0.30469 2.20898,0.91406 0.53515,0.60938 0.80273,1.45313 0.80274,2.53125 -10e-6,1.07422 -0.26759,1.91797 -0.80274,2.53125 -0.53516,0.60938 -1.27149,0.91406 -2.20898,0.91407 -0.94141,-1e-5 -1.67969,-0.30469 -2.21485,-0.91407 -0.53125,-0.61328 -0.79687,-1.45703 -0.79687,-2.53125 0,-1.07812 0.26562,-1.92187 0.79687,-2.53125 0.53516,-0.60937 1.27344,-0.91405 2.21485,-0.91406"
+ style=""
+ id="path3396" />
+ <path
+ d="m 311.8923,561.17236 1.07813,0 1.34765,5.1211 1.3418,-5.1211 1.27149,0 1.34765,5.1211 1.3418,-5.1211 1.07812,0 -1.71679,6.5625 -1.27149,0 -1.41211,-5.3789 -1.41797,5.3789 -1.27148,0 -1.7168,-6.5625"
+ style=""
+ id="path3398" />
+ </g>
+ <rect
+ y="439.39581"
+ x="159.87428"
+ height="8.8251209"
+ width="168.99841"
+ id="rect3795"
+ style="fill:#7b7bff;fill-opacity:1;stroke:#7b7bff;stroke-width:1;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ id="path3797"
+ sodipodi:cx="352.54324"
+ sodipodi:cy="373.03461"
+ sodipodi:rx="2.5253813"
+ sodipodi:ry="2.5253813"
+ d="m 355.06862,373.03461 c 0,1.39473 -1.13065,2.52538 -2.52538,2.52538 -1.39473,0 -2.52538,-1.13065 -2.52538,-2.52538 0,-1.39473 1.13065,-2.52538 2.52538,-2.52538 1.39473,0 2.52538,1.13065 2.52538,2.52538 z"
+ transform="matrix(0.66107369,0,0,0.66107369,123.32145,119.41326)"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <path
+ transform="matrix(0.66107369,0,0,0.66107369,-194.73594,119.44704)"
+ d="m 355.06862,373.03461 c 0,1.39473 -1.13065,2.52538 -2.52538,2.52538 -1.39473,0 -2.52538,-1.13065 -2.52538,-2.52538 0,-1.39473 1.13065,-2.52538 2.52538,-2.52538 1.39473,0 2.52538,1.13065 2.52538,2.52538 z"
+ sodipodi:ry="2.5253813"
+ sodipodi:rx="2.5253813"
+ sodipodi:cy="373.03461"
+ sodipodi:cx="352.54324"
+ id="path3799"
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ sodipodi:type="arc"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ id="path3801"
+ sodipodi:cx="352.54324"
+ sodipodi:cy="373.03461"
+ sodipodi:rx="2.5253813"
+ sodipodi:ry="2.5253813"
+ d="m 355.06862,373.03461 c 0,1.39473 -1.13065,2.52538 -2.52538,2.52538 -1.39473,0 -2.52538,-1.13065 -2.52538,-2.52538 0,-1.39473 1.13065,-2.52538 2.52538,-2.52538 1.39473,0 2.52538,1.13065 2.52538,2.52538 z"
+ transform="matrix(0.66107369,0,0,0.66107369,-73.218648,201.61091)"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="21.213203"
+ y="340.20465"
+ id="text3803"
+ inkscape:export-filename="/home/elmindreda/projects/glfw/glfw/docs/spaces.png"
+ inkscape:export-xdpi="109.89113"
+ inkscape:export-ydpi="109.89113"><tspan
+ sodipodi:role="line"
+ id="tspan3805"
+ x="21.213203"
+ y="340.20465"
+ style="font-size:12px;line-height:1.25;font-family:sans-serif"> </tspan></text>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3807">
+ <path
+ d="m 71.179893,455.55557 0.996094,0 1.533203,6.16211 1.528321,-6.16211 1.108398,0 1.533203,6.16211 1.528321,-6.16211 1.000976,0 -1.831055,7.29004 -1.240234,0 -1.538086,-6.32812 -1.552734,6.32812 -1.240235,0 -1.826172,-7.29004"
+ style="font-size:10px"
+ id="path3164" />
+ <path
+ d="m 81.458214,457.37686 0.898437,0 0,5.46875 -0.898437,0 0,-5.46875 m 0,-2.1289 0.898437,0 0,1.13769 -0.898437,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3166" />
+ <path
+ d="m 88.77755,459.54483 0,3.30078 -0.898438,0 0,-3.27148 c -4e-6,-0.51758 -0.100916,-0.90495 -0.302734,-1.16211 -0.201827,-0.25716 -0.504561,-0.38574 -0.908203,-0.38574 -0.485029,0 -0.867516,0.15462 -1.147461,0.46386 -0.27995,0.30925 -0.419924,0.7308 -0.419922,1.26465 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.214842,-0.32877 0.46712,-0.57454 0.756836,-0.7373 0.292966,-0.16276 0.629879,-0.24414 1.010742,-0.24414 0.628251,0 1.103511,0.19531 1.425781,0.58593 0.322261,0.38738 0.483393,0.95867 0.483399,1.71387"
+ style="font-size:10px"
+ id="path3168" />
+ <path
+ d="m 94.17794,458.20694 0,-2.95898 0.898438,0 0,7.59765 -0.898438,0 0,-0.82031 c -0.188806,0.32552 -0.428064,0.56803 -0.717773,0.72754 -0.286462,0.15625 -0.631514,0.23437 -1.035156,0.23437 -0.66081,0 -1.199546,-0.26367 -1.616211,-0.79101 -0.413412,-0.52735 -0.620118,-1.2207 -0.620117,-2.08008 -10e-7,-0.85937 0.206705,-1.55273 0.620117,-2.08008 0.416665,-0.52734 0.955401,-0.79101 1.616211,-0.79101 0.403642,0 0.748694,0.0797 1.035156,0.23925 0.289709,0.15626 0.528967,0.39714 0.717773,0.72266 m -3.061523,1.90918 c -2e-6,0.66081 0.135089,1.18001 0.405273,1.55762 0.273436,0.37435 0.647784,0.56152 1.123047,0.56152 0.475257,0 0.849606,-0.18717 1.123047,-0.56152 0.273433,-0.37761 0.410152,-0.89681 0.410156,-1.55762 -4e-6,-0.6608 -0.136723,-1.17838 -0.410156,-1.55273 -0.273441,-0.3776 -0.64779,-0.56641 -1.123047,-0.56641 -0.475263,0 -0.849611,0.18881 -1.123047,0.56641 -0.270184,0.37435 -0.405275,0.89193 -0.405273,1.55273"
+ style="font-size:10px"
+ id="path3170" />
+ <path
+ d="m 99.046104,458.00674 c -0.481773,1e-5 -0.862632,0.18881 -1.142578,0.56641 -0.279949,0.37435 -0.419923,0.88868 -0.419922,1.54297 -10e-7,0.6543 0.138345,1.17025 0.415039,1.54785 0.279946,0.37435 0.662433,0.56153 1.147461,0.56152 0.478513,1e-5 0.857744,-0.1888 1.137696,-0.5664 0.27994,-0.3776 0.41992,-0.89193 0.41992,-1.54297 0,-0.64778 -0.13998,-1.16048 -0.41992,-1.53809 -0.279952,-0.38085 -0.659183,-0.57128 -1.137696,-0.57129 m 0,-0.76171 c 0.781247,0 1.394856,0.25391 1.840816,0.76171 0.44596,0.50782 0.66894,1.21095 0.66895,2.10938 -1e-5,0.89518 -0.22299,1.59831 -0.66895,2.10937 -0.44596,0.50782 -1.059569,0.76172 -1.840816,0.76172 -0.784507,0 -1.399741,-0.2539 -1.845703,-0.76172 -0.442709,-0.51106 -0.664063,-1.21419 -0.664062,-2.10937 -10e-7,-0.89843 0.221353,-1.60156 0.664062,-2.10938 0.445962,-0.5078 1.061196,-0.76171 1.845703,-0.76171"
+ style="font-size:10px"
+ id="path3172" />
+ <path
+ d="m 102.51778,457.37686 0.89844,0 1.12305,4.26758 1.11816,-4.26758 1.05957,0 1.12305,4.26758 1.11816,-4.26758 0.89844,0 -1.43066,5.46875 -1.05957,0 -1.17676,-4.48242 -1.18164,4.48242 -1.05957,0 -1.43067,-5.46875"
+ style="font-size:10px"
+ id="path3174" />
+ <path
+ d="m 115.27657,462.0253 0,2.90039 -0.90332,0 0,-7.54883 0.90332,0 0,0.83008 c 0.1888,-0.32552 0.42643,-0.5664 0.71289,-0.72266 0.28971,-0.1595 0.63477,-0.23925 1.03516,-0.23925 0.66406,0 1.20279,0.26367 1.61621,0.79101 0.41666,0.52735 0.625,1.22071 0.625,2.08008 0,0.85938 -0.20834,1.55273 -0.625,2.08008 -0.41342,0.52734 -0.95215,0.79101 -1.61621,0.79101 -0.40039,0 -0.74545,-0.0781 -1.03516,-0.23437 -0.28646,-0.15951 -0.52409,-0.40202 -0.71289,-0.72754 m 3.05664,-1.90918 c 0,-0.6608 -0.13672,-1.17838 -0.41015,-1.55273 -0.27019,-0.3776 -0.64291,-0.56641 -1.11817,-0.56641 -0.47526,0 -0.84961,0.18881 -1.12304,0.56641 -0.27019,0.37435 -0.40528,0.89193 -0.40528,1.55273 0,0.66081 0.13509,1.18001 0.40528,1.55762 0.27343,0.37435 0.64778,0.56152 1.12304,0.56152 0.47526,0 0.84798,-0.18717 1.11817,-0.56152 0.27343,-0.37761 0.41015,-0.89681 0.41015,-1.55762"
+ style="font-size:10px"
+ id="path3176" />
+ <path
+ d="m 122.87423,458.00674 c -0.48177,1e-5 -0.86263,0.18881 -1.14258,0.56641 -0.27995,0.37435 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13834,1.17025 0.41504,1.54785 0.27994,0.37435 0.66243,0.56153 1.14746,0.56152 0.47851,1e-5 0.85774,-0.1888 1.13769,-0.5664 0.27995,-0.3776 0.41992,-0.89193 0.41993,-1.54297 -1e-5,-0.64778 -0.13998,-1.16048 -0.41993,-1.53809 -0.27995,-0.38085 -0.65918,-0.57128 -1.13769,-0.57129 m 0,-0.76171 c 0.78125,0 1.39485,0.25391 1.84082,0.76171 0.44596,0.50782 0.66894,1.21095 0.66895,2.10938 -1e-5,0.89518 -0.22299,1.59831 -0.66895,2.10937 -0.44597,0.50782 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.2539 -1.8457,-0.76172 -0.44271,-0.51106 -0.66407,-1.21419 -0.66407,-2.10937 0,-0.89843 0.22136,-1.60156 0.66407,-2.10938 0.44596,-0.5078 1.06119,-0.76171 1.8457,-0.76171"
+ style="font-size:10px"
+ id="path3178" />
+ <path
+ d="m 130.3547,457.53799 0,0.84961 c -0.25391,-0.1302 -0.51758,-0.22786 -0.79102,-0.29296 -0.27344,-0.0651 -0.55664,-0.0977 -0.84961,-0.0977 -0.44596,0 -0.78125,0.0684 -1.00586,0.20508 -0.22135,0.13672 -0.33203,0.3418 -0.33203,0.61523 0,0.20834 0.0797,0.37273 0.23926,0.49317 0.1595,0.11719 0.48014,0.22949 0.96191,0.33691 l 0.30762,0.0684 c 0.63802,0.13672 1.09049,0.33041 1.35742,0.58105 0.27018,0.2474 0.40527,0.59408 0.40528,1.04004 -1e-5,0.50782 -0.20183,0.90983 -0.60547,1.20606 -0.4004,0.29622 -0.95215,0.44433 -1.65528,0.44433 -0.29297,0 -0.59896,-0.0293 -0.91796,-0.0879 -0.31576,-0.0553 -0.64942,-0.13997 -1.00098,-0.2539 l 0,-0.92774 c 0.33203,0.17253 0.65918,0.30274 0.98144,0.39063 0.32227,0.0846 0.64128,0.12695 0.95704,0.12695 0.42317,0 0.74869,-0.0716 0.97656,-0.21484 0.22786,-0.14649 0.34179,-0.35157 0.3418,-0.61524 -1e-5,-0.24414 -0.083,-0.43131 -0.24903,-0.56152 -0.16276,-0.13021 -0.52246,-0.25553 -1.0791,-0.37598 l -0.3125,-0.0732 c -0.55664,-0.11719 -0.95866,-0.29622 -1.20605,-0.53711 -0.2474,-0.24414 -0.3711,-0.5778 -0.3711,-1.00098 0,-0.51431 0.18229,-0.91145 0.54688,-1.1914 0.36458,-0.27994 0.88216,-0.41992 1.55273,-0.41992 0.33203,0 0.64453,0.0244 0.9375,0.0732 0.29297,0.0488 0.56315,0.12207 0.81055,0.21972"
+ style="font-size:10px"
+ id="path3180" />
+ <path
+ d="m 132.08321,457.37686 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3182" />
+ <path
+ d="m 135.74532,455.82413 0,1.55273 1.85059,0 0,0.69824 -1.85059,0 0,2.96875 c 0,0.44597 0.0602,0.73243 0.18067,0.85938 0.12369,0.12695 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17188,-0.12858 -1.43555,-0.38574 -0.26367,-0.26042 -0.39551,-0.73242 -0.39551,-1.41602 l 0,-2.96875 -0.65918,0 0,-0.69824 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3184" />
+ <path
+ d="m 138.78243,457.37686 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3186" />
+ <path
+ d="m 143.67501,458.00674 c -0.48177,1e-5 -0.86263,0.18881 -1.14258,0.56641 -0.27995,0.37435 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56152 0.47851,1e-5 0.85774,-0.1888 1.1377,-0.5664 0.27994,-0.3776 0.41991,-0.89193 0.41992,-1.54297 -1e-5,-0.64778 -0.13998,-1.16048 -0.41992,-1.53809 -0.27996,-0.38085 -0.65919,-0.57128 -1.1377,-0.57129 m 0,-0.76171 c 0.78125,0 1.39485,0.25391 1.84082,0.76171 0.44596,0.50782 0.66894,1.21095 0.66895,2.10938 -1e-5,0.89518 -0.22299,1.59831 -0.66895,2.10937 -0.44597,0.50782 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.2539 -1.8457,-0.76172 -0.44271,-0.51106 -0.66407,-1.21419 -0.66406,-2.10937 -10e-6,-0.89843 0.22135,-1.60156 0.66406,-2.10938 0.44596,-0.5078 1.06119,-0.76171 1.8457,-0.76171"
+ style="font-size:10px"
+ id="path3188" />
+ <path
+ d="m 152.21505,459.54483 0,3.30078 -0.89844,0 0,-3.27148 c 0,-0.51758 -0.10091,-0.90495 -0.30273,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.90821,-0.38574 -0.48502,0 -0.86751,0.15462 -1.14746,0.46386 -0.27995,0.30925 -0.41992,0.7308 -0.41992,1.26465 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75684,-0.7373 0.29296,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19531 1.42578,0.58593 0.32226,0.38738 0.48339,0.95867 0.4834,1.71387"
+ style="font-size:10px"
+ id="path3190" />
+ </g>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3817">
+ <path
+ d="m 49.798271,374.06503 0,0.96192 c -0.374354,-0.17903 -0.727544,-0.3125 -1.05957,-0.40039 -0.332035,-0.0879 -0.652673,-0.13183 -0.961914,-0.13184 -0.537112,1e-5 -0.952151,0.10417 -1.245117,0.3125 -0.289716,0.20834 -0.434572,0.50456 -0.434571,0.88867 -10e-7,0.32227 0.09603,0.56641 0.288086,0.73242 0.195311,0.16277 0.563149,0.2946 1.103516,0.39551 l 0.595703,0.12207 c 0.735673,0.13998 1.277664,0.38738 1.625977,0.74219 0.351556,0.35157 0.527338,0.82357 0.527343,1.41602 -5e-6,0.70638 -0.237635,1.24186 -0.71289,1.60644 -0.47201,0.36458 -1.165369,0.54688 -2.080078,0.54688 -0.345055,0 -0.712893,-0.0391 -1.103516,-0.11719 -0.387371,-0.0781 -0.789389,-0.19369 -1.206055,-0.34668 l 0,-1.01563 c 0.40039,0.22461 0.792642,0.39388 1.176758,0.50782 0.384112,0.11393 0.761716,0.1709 1.132813,0.17089 0.563147,10e-6 0.997717,-0.11067 1.30371,-0.33203 0.305985,-0.22135 0.45898,-0.5371 0.458985,-0.94726 -5e-6,-0.35807 -0.110682,-0.63802 -0.332031,-0.83985 -0.218104,-0.20182 -0.577804,-0.35318 -1.079102,-0.4541 l -0.600586,-0.11719 c -0.735679,-0.14648 -1.267905,-0.37597 -1.59668,-0.68847 -0.328776,-0.3125 -0.493164,-0.74707 -0.493164,-1.30371 0,-0.64453 0.226236,-1.15234 0.678711,-1.52344 0.455728,-0.37109 1.082355,-0.55663 1.879883,-0.55664 0.341793,10e-6 0.6901,0.0309 1.044922,0.0928 0.354813,0.0619 0.717768,0.15463 1.088867,0.27832"
+ style="font-size:10px"
+ id="path3193" />
+ <path
+ d="m 56.414482,378.15683 0,0.43945 -4.130859,0 c 0.03906,0.61849 0.224607,1.0905 0.55664,1.41602 0.335284,0.32226 0.800779,0.4834 1.396485,0.4834 0.345048,0 0.678707,-0.0423 1.000976,-0.12696 0.325516,-0.0846 0.647782,-0.21158 0.966797,-0.38086 l 0,0.84961 c -0.322271,0.13672 -0.652674,0.24089 -0.991211,0.3125 -0.338546,0.0716 -0.68197,0.10743 -1.030273,0.10743 -0.872399,0 -1.56413,-0.25391 -2.075196,-0.76172 -0.507813,-0.50781 -0.761719,-1.19466 -0.761718,-2.06055 -10e-7,-0.89518 0.240884,-1.60481 0.722656,-2.12891 0.485024,-0.52733 1.137693,-0.79101 1.958008,-0.79101 0.735673,0 1.316727,0.23763 1.743164,0.71289 0.429682,0.47201 0.644525,1.11491 0.644531,1.92871 m -0.898437,-0.26367 c -0.0065,-0.49153 -0.144862,-0.88379 -0.415039,-1.17676 -0.266932,-0.29296 -0.621749,-0.43945 -1.064454,-0.43945 -0.501304,0 -0.903322,0.1416 -1.206054,0.4248 -0.299481,0.28321 -0.472007,0.68197 -0.517578,1.19629 l 3.203125,-0.005"
+ style="font-size:10px"
+ id="path3195" />
+ <path
+ d="m 61.824638,375.85703 0,0.83984 c -0.253911,-0.13997 -0.509444,-0.24414 -0.766601,-0.3125 -0.25391,-0.0716 -0.511072,-0.10742 -0.771485,-0.10742 -0.582685,0 -1.035158,0.18555 -1.357421,0.55664 -0.322268,0.36784 -0.4834,0.88542 -0.483399,1.55273 -10e-7,0.66732 0.161131,1.18653 0.483399,1.55762 0.322263,0.36784 0.774736,0.55176 1.357421,0.55176 0.260413,0 0.517575,-0.0342 0.771485,-0.10254 0.257157,-0.0716 0.51269,-0.17741 0.766601,-0.31738 l 0,0.83007 c -0.250655,0.11719 -0.511072,0.20508 -0.78125,0.26368 -0.266931,0.0586 -0.551761,0.0879 -0.854492,0.0879 -0.82357,0 -1.477866,-0.25879 -1.96289,-0.77637 -0.485027,-0.51758 -0.72754,-1.21582 -0.72754,-2.09473 0,-0.89192 0.24414,-1.59342 0.732422,-2.10449 0.491535,-0.51106 1.163735,-0.7666 2.016602,-0.7666 0.276689,0 0.546871,0.0293 0.810547,0.0879 0.263667,0.0553 0.519201,0.13998 0.766601,0.25391"
+ style="font-size:10px"
+ id="path3197" />
+ <path
+ d="m 65.516045,376.27695 c -0.481774,0 -0.862633,0.1888 -1.142579,0.5664 -0.279949,0.37436 -0.419923,0.88868 -0.419921,1.54297 -2e-6,0.6543 0.138344,1.17025 0.415039,1.54785 0.279945,0.37435 0.662432,0.56153 1.147461,0.56153 0.478512,0 0.857743,-0.1888 1.137695,-0.56641 0.279943,-0.3776 0.419917,-0.89192 0.419922,-1.54297 -5e-6,-0.64778 -0.139979,-1.16048 -0.419922,-1.53808 -0.279952,-0.38086 -0.659183,-0.57129 -1.137695,-0.57129 m 0,-0.76172 c 0.781246,0 1.394852,0.25391 1.84082,0.76172 0.445958,0.50781 0.66894,1.21094 0.668945,2.10937 -5e-6,0.89519 -0.222987,1.59831 -0.668945,2.10938 -0.445968,0.50781 -1.059574,0.76172 -1.84082,0.76172 -0.784508,0 -1.399742,-0.25391 -1.845704,-0.76172 -0.442709,-0.51107 -0.664063,-1.21419 -0.664062,-2.10938 -10e-7,-0.89843 0.221353,-1.60156 0.664062,-2.10937 0.445962,-0.50781 1.061196,-0.76172 1.845704,-0.76172"
+ style="font-size:10px"
+ id="path3199" />
+ <path
+ d="m 74.056084,377.81503 0,3.30078 -0.898438,0 0,-3.27148 c -4e-6,-0.51757 -0.100916,-0.90494 -0.302734,-1.16211 -0.201827,-0.25716 -0.504561,-0.38574 -0.908203,-0.38574 -0.485029,0 -0.867516,0.15463 -1.147461,0.46387 -0.27995,0.30924 -0.419924,0.73079 -0.419922,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.214842,-0.32877 0.46712,-0.57454 0.756836,-0.7373 0.292966,-0.16276 0.629879,-0.24414 1.010742,-0.24414 0.628251,0 1.103511,0.19532 1.425781,0.58594 0.322261,0.38737 0.483393,0.95866 0.483399,1.71386"
+ style="font-size:10px"
+ id="path3201" />
+ <path
+ d="m 79.456474,376.47714 0,-2.95898 0.898438,0 0,7.59765 -0.898438,0 0,-0.82031 c -0.188806,0.32552 -0.428064,0.56804 -0.717773,0.72754 -0.286462,0.15625 -0.631514,0.23438 -1.035156,0.23438 -0.66081,0 -1.199546,-0.26368 -1.616211,-0.79102 -0.413413,-0.52734 -0.620118,-1.2207 -0.620118,-2.08008 0,-0.85937 0.206705,-1.55273 0.620118,-2.08008 0.416665,-0.52733 0.955401,-0.79101 1.616211,-0.79101 0.403642,0 0.748694,0.0798 1.035156,0.23926 0.289709,0.15625 0.528967,0.39714 0.717773,0.72265 m -3.061523,1.90918 c -2e-6,0.66081 0.135089,1.18002 0.405273,1.55762 0.273436,0.37435 0.647784,0.56152 1.123047,0.56152 0.475257,0 0.849606,-0.18717 1.123047,-0.56152 0.273433,-0.3776 0.410152,-0.89681 0.410156,-1.55762 -4e-6,-0.6608 -0.136723,-1.17838 -0.410156,-1.55273 -0.273441,-0.3776 -0.64779,-0.5664 -1.123047,-0.56641 -0.475263,1e-5 -0.849611,0.18881 -1.123047,0.56641 -0.270184,0.37435 -0.405275,0.89193 -0.405273,1.55273"
+ style="font-size:10px"
+ id="path3203" />
+ <path
+ d="m 84.690849,378.36679 c -0.725914,0 -1.228843,0.083 -1.508789,0.24902 -0.279949,0.16602 -0.419923,0.44923 -0.419922,0.84961 -10e-7,0.31902 0.104165,0.57292 0.3125,0.76172 0.211587,0.18555 0.498045,0.27832 0.859375,0.27832 0.498044,0 0.896807,-0.17578 1.196289,-0.52734 0.302731,-0.35482 0.454098,-0.82519 0.454102,-1.41113 l 0,-0.2002 -0.893555,0 m 1.791992,-0.37109 0,3.12011 -0.898437,0 0,-0.83007 c -0.205082,0.33203 -0.460616,0.5778 -0.766602,0.7373 -0.305992,0.15625 -0.680341,0.23438 -1.123046,0.23438 -0.559898,0 -1.005861,-0.15625 -1.337891,-0.46875 -0.328777,-0.31576 -0.493165,-0.73731 -0.493164,-1.26465 -1e-6,-0.61523 0.205077,-1.0791 0.615234,-1.3916 0.41341,-0.3125 1.028644,-0.46875 1.845703,-0.46875 l 1.259766,0 0,-0.0879 c -4e-6,-0.41341 -0.136723,-0.73242 -0.410156,-0.95704 -0.270186,-0.22786 -0.651045,-0.34179 -1.142578,-0.34179 -0.312503,0 -0.616865,0.0374 -0.913086,0.1123 -0.296226,0.0749 -0.581056,0.18718 -0.854493,0.33692 l 0,-0.83008 c 0.328775,-0.12695 0.647785,-0.22135 0.957032,-0.28321 0.309242,-0.0651 0.610349,-0.0977 0.90332,-0.0977 0.791012,0 1.381832,0.20508 1.772461,0.61523 0.39062,0.41016 0.585932,1.03191 0.585937,1.86524"
+ style="font-size:10px"
+ id="path3205" />
+ <path
+ d="m 91.507256,376.48691 c -0.100916,-0.0586 -0.211593,-0.10091 -0.332032,-0.12696 -0.117191,-0.0293 -0.247399,-0.0439 -0.390625,-0.0439 -0.507815,0 -0.89844,0.16602 -1.171875,0.49805 -0.270184,0.32878 -0.405275,0.80241 -0.405273,1.42089 l 0,2.88086 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.434568,-0.57779 0.737305,-0.7373 0.302731,-0.16276 0.670569,-0.24414 1.103515,-0.24414 0.06185,0 0.130205,0.005 0.205078,0.0147 0.07487,0.007 0.157874,0.0179 0.249024,0.0342 l 0.0049,0.92285"
+ style="font-size:10px"
+ id="path3207" />
+ <path
+ d="m 94.734795,381.62363 c -0.25391,0.65104 -0.501305,1.07584 -0.742188,1.27441 -0.240888,0.19857 -0.563153,0.29785 -0.966797,0.29785 l -0.717773,0 0,-0.75195 0.527344,0 c 0.247394,0 0.439451,-0.0586 0.576171,-0.17578 0.136717,-0.11719 0.288084,-0.39388 0.454102,-0.83008 l 0.161133,-0.41016 -2.211914,-5.38086 0.952148,0 1.708985,4.27735 1.708984,-4.27735 0.952148,0 -2.402343,5.97657"
+ style="font-size:10px"
+ id="path3209" />
+ <path
+ d="m 105.81878,376.69687 c 0.2246,-0.40364 0.49316,-0.70149 0.80566,-0.89356 0.3125,-0.19205 0.68033,-0.28808 1.10352,-0.28808 0.56965,0 1.00911,0.2002 1.31836,0.60058 0.30923,0.39714 0.46386,0.96355 0.46387,1.69922 l 0,3.30078 -0.90333,0 0,-3.27148 c 0,-0.52408 -0.0928,-0.91308 -0.27832,-1.16699 -0.18555,-0.2539 -0.46875,-0.38086 -0.8496,-0.38086 -0.46551,0 -0.83334,0.15463 -1.10352,0.46387 -0.27019,0.30924 -0.40528,0.73079 -0.40527,1.26464 l 0,3.09082 -0.90332,0 0,-3.27148 c -10e-6,-0.52734 -0.0928,-0.91634 -0.27832,-1.16699 -0.18556,-0.2539 -0.47201,-0.38086 -0.85938,-0.38086 -0.45899,0 -0.82357,0.15625 -1.09375,0.46875 -0.27018,0.30925 -0.40527,0.72917 -0.40527,1.25976 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.20507,-0.33528 0.45084,-0.58267 0.7373,-0.74218 0.28646,-0.1595 0.62663,-0.23926 1.02051,-0.23926 0.39713,0 0.73404,0.10092 1.01074,0.30273 0.27994,0.20183 0.48665,0.4948 0.62012,0.87891"
+ style="font-size:10px"
+ id="path3211" />
+ <path
+ d="m 113.4262,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85774,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41991,-0.89192 0.41992,-1.54297 -10e-6,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27996,-0.38086 -0.65919,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44597,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66407,-1.21419 -0.66406,-2.10938 -10e-6,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.06119,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3213" />
+ <path
+ d="m 121.96624,377.81503 0,3.30078 -0.89844,0 0,-3.27148 c 0,-0.51757 -0.10091,-0.90494 -0.30273,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.90821,-0.38574 -0.48502,0 -0.86751,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41992,0.73079 -0.41992,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75684,-0.7373 0.29296,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19532 1.42578,0.58594 0.32226,0.38737 0.48339,0.95866 0.4834,1.71386"
+ style="font-size:10px"
+ id="path3215" />
+ <path
+ d="m 123.768,375.64706 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3217" />
+ <path
+ d="m 127.43011,374.09433 0,1.55273 1.85058,0 0,0.69825 -1.85058,0 0,2.96875 c 0,0.44596 0.0602,0.73242 0.18066,0.85937 0.1237,0.12696 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17188,-0.12858 -1.43555,-0.38574 -0.26367,-0.26041 -0.3955,-0.73242 -0.3955,-1.41601 l 0,-2.96875 -0.65918,0 0,-0.69825 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3219" />
+ <path
+ d="m 132.58636,376.27695 c -0.48178,0 -0.86264,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13834,1.17025 0.41504,1.54785 0.27994,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85774,-0.1888 1.13769,-0.56641 0.27995,-0.3776 0.41992,-0.89192 0.41992,-1.54297 0,-0.64778 -0.13997,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65918,-0.57129 -1.13769,-0.57129 m 0,-0.76172 c 0.78124,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66894,2.10937 0,0.89519 -0.22298,1.59831 -0.66894,2.10938 -0.44597,0.50781 -1.05958,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.84571,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44597,-0.50781 1.0612,-0.76172 1.84571,-0.76172"
+ style="font-size:10px"
+ id="path3221" />
+ <path
+ d="m 139.74944,376.48691 c -0.10091,-0.0586 -0.21159,-0.10091 -0.33203,-0.12696 -0.11719,-0.0293 -0.2474,-0.0439 -0.39062,-0.0439 -0.50782,0 -0.89844,0.16602 -1.17188,0.49805 -0.27018,0.32878 -0.40527,0.80241 -0.40527,1.42089 l 0,2.88086 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.7373,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10352,-0.24414 0.0618,0 0.1302,0.005 0.20508,0.0147 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3223" />
+ <path
+ d="m 144.75433,380.2955 0,2.90039 -0.90332,0 0,-7.54883 0.90332,0 0,0.83008 c 0.1888,-0.32551 0.42643,-0.5664 0.71289,-0.72265 0.28971,-0.1595 0.63476,-0.23926 1.03515,-0.23926 0.66406,0 1.2028,0.26368 1.61621,0.79101 0.41666,0.52735 0.625,1.22071 0.625,2.08008 0,0.85938 -0.20834,1.55274 -0.625,2.08008 -0.41341,0.52734 -0.95215,0.79102 -1.61621,0.79102 -0.40039,0 -0.74544,-0.0781 -1.03515,-0.23438 -0.28646,-0.1595 -0.52409,-0.40202 -0.71289,-0.72754 m 3.05664,-1.90918 c -1e-5,-0.6608 -0.13673,-1.17838 -0.41016,-1.55273 -0.27019,-0.3776 -0.64291,-0.5664 -1.11816,-0.56641 -0.47527,1e-5 -0.84962,0.18881 -1.12305,0.56641 -0.27018,0.37435 -0.40528,0.89193 -0.40527,1.55273 -1e-5,0.66081 0.13509,1.18002 0.40527,1.55762 0.27343,0.37435 0.64778,0.56152 1.12305,0.56152 0.47525,0 0.84797,-0.18717 1.11816,-0.56152 0.27343,-0.3776 0.41015,-0.89681 0.41016,-1.55762"
+ style="font-size:10px"
+ id="path3225" />
+ <path
+ d="m 152.35198,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85775,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41991,-0.89192 0.41992,-1.54297 -1e-5,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65919,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44597,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.06119,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3227" />
+ <path
+ d="m 159.83245,375.8082 0,0.84961 c -0.25391,-0.13021 -0.51758,-0.22786 -0.79101,-0.29297 -0.27345,-0.0651 -0.55665,-0.0976 -0.84961,-0.0977 -0.44597,1e-5 -0.78126,0.0684 -1.00586,0.20508 -0.22136,0.13672 -0.33204,0.3418 -0.33203,0.61523 -1e-5,0.20834 0.0798,0.37273 0.23925,0.49317 0.15951,0.11719 0.48014,0.22949 0.96192,0.33691 l 0.30761,0.0684 c 0.63802,0.13672 1.0905,0.33041 1.35743,0.58106 0.27017,0.24739 0.40526,0.59407 0.40527,1.04004 -1e-5,0.50781 -0.20183,0.90983 -0.60547,1.20605 -0.40039,0.29622 -0.95215,0.44434 -1.65527,0.44434 -0.29297,0 -0.59896,-0.0293 -0.91797,-0.0879 -0.31576,-0.0553 -0.64942,-0.13998 -1.00098,-0.25391 l 0,-0.92774 c 0.33203,0.17253 0.65918,0.30274 0.98145,0.39063 0.32226,0.0846 0.64127,0.12695 0.95703,0.12695 0.42317,0 0.74869,-0.0716 0.97656,-0.21484 0.22786,-0.14648 0.34179,-0.35156 0.3418,-0.61524 -1e-5,-0.24413 -0.083,-0.43131 -0.24903,-0.56152 -0.16276,-0.13021 -0.52246,-0.25553 -1.0791,-0.37598 l -0.3125,-0.0732 c -0.55664,-0.11718 -0.95866,-0.29622 -1.20605,-0.53711 -0.2474,-0.24413 -0.3711,-0.57779 -0.3711,-1.00097 0,-0.51432 0.1823,-0.91146 0.54688,-1.19141 0.36458,-0.27994 0.88216,-0.41992 1.55273,-0.41992 0.33203,0 0.64453,0.0244 0.9375,0.0732 0.29297,0.0488 0.56315,0.12208 0.81055,0.21973"
+ style="font-size:10px"
+ id="path3229" />
+ <path
+ d="m 161.56097,375.64706 0.89843,0 0,5.46875 -0.89843,0 0,-5.46875 m 0,-2.1289 0.89843,0 0,1.13769 -0.89843,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3231" />
+ <path
+ d="m 165.22308,374.09433 0,1.55273 1.85058,0 0,0.69825 -1.85058,0 0,2.96875 c -1e-5,0.44596 0.0602,0.73242 0.18066,0.85937 0.1237,0.12696 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17188,-0.12858 -1.43555,-0.38574 -0.26367,-0.26041 -0.39551,-0.73242 -0.3955,-1.41601 l 0,-2.96875 -0.65918,0 0,-0.69825 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3233" />
+ <path
+ d="m 168.26019,375.64706 0.89843,0 0,5.46875 -0.89843,0 0,-5.46875 m 0,-2.1289 0.89843,0 0,1.13769 -0.89843,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3235" />
+ <path
+ d="m 173.15276,376.27695 c -0.48177,0 -0.86263,0.1888 -1.14257,0.5664 -0.27995,0.37436 -0.41993,0.88868 -0.41993,1.54297 0,0.6543 0.13835,1.17025 0.41504,1.54785 0.27995,0.37435 0.66243,0.56153 1.14746,0.56153 0.47852,0 0.85775,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41992,-0.89192 0.41992,-1.54297 0,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27995,-0.38086 -0.65918,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39486,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44596,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.7845,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66406,-1.21419 -0.66406,-2.10938 0,-0.89843 0.22135,-1.60156 0.66406,-2.10937 0.44596,-0.50781 1.0612,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3237" />
+ <path
+ d="m 181.6928,377.81503 0,3.30078 -0.89844,0 0,-3.27148 c 0,-0.51757 -0.10091,-0.90494 -0.30273,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.9082,-0.38574 -0.48503,0 -0.86752,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41993,0.73079 -0.41993,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21485,-0.32877 0.46712,-0.57454 0.75684,-0.7373 0.29297,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19532 1.42578,0.58594 0.32226,0.38737 0.4834,0.95866 0.4834,1.71386"
+ style="font-size:10px"
+ id="path3239" />
+ </g>
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text3017">
+ <path
+ d="m 367.17007,356.71783 -2.7832,-7.29003 1.03027,0 2.30958,6.13769 2.31445,-6.13769 1.02539,0 -2.77832,7.29003 -1.11817,0"
+ style="font-size:10px"
+ id="path3242" />
+ <path
+ d="m 371.87222,351.24908 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3244" />
+ <path
+ d="m 377.81461,352.08893 c -0.10092,-0.0586 -0.2116,-0.10091 -0.33204,-0.12695 -0.11719,-0.0293 -0.24739,-0.0439 -0.39062,-0.0439 -0.50782,0 -0.89844,0.16602 -1.17188,0.49805 -0.27018,0.32878 -0.40527,0.80241 -0.40527,1.4209 l 0,2.88085 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.73731,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10351,-0.24414 0.0618,0 0.13021,0.005 0.20508,0.0146 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3246" />
+ <path
+ d="m 379.65543,349.69635 0,1.55273 1.85058,0 0,0.69825 -1.85058,0 0,2.96875 c -1e-5,0.44596 0.0602,0.73242 0.18066,0.85937 0.1237,0.12696 0.37272,0.19043 0.74707,0.19043 l 0.92285,0 0,0.75195 -0.92285,0 c -0.69336,0 -1.17188,-0.12858 -1.43555,-0.38574 -0.26367,-0.26041 -0.39551,-0.73242 -0.3955,-1.41601 l 0,-2.96875 -0.65918,0 0,-0.69825 0.65918,0 0,-1.55273 0.90332,0"
+ style="font-size:10px"
+ id="path3248" />
+ <path
+ d="m 382.59976,354.55963 0,-3.31055 0.89844,0 0,3.27637 c 0,0.51758 0.10091,0.90658 0.30273,1.16699 0.20182,0.25717 0.50456,0.38575 0.90821,0.38575 0.48502,0 0.86751,-0.15463 1.14746,-0.46387 0.2832,-0.30924 0.4248,-0.73079 0.4248,-1.26465 l 0,-3.10059 0.89844,0 0,5.46875 -0.89844,0 0,-0.83984 c -0.2181,0.33203 -0.47201,0.57943 -0.76172,0.74219 -0.28646,0.1595 -0.62012,0.23926 -1.00097,0.23926 -0.62826,0 -1.10515,-0.19532 -1.43067,-0.58594 -0.32552,-0.39063 -0.48828,-0.96191 -0.48828,-1.71387 m 2.26074,-3.44238 0,0"
+ style="font-size:10px"
+ id="path3250" />
+ <path
+ d="m 391.52554,353.96881 c -0.72591,0 -1.22884,0.083 -1.50879,0.24902 -0.27995,0.16602 -0.41992,0.44923 -0.41992,0.84961 0,0.31902 0.10417,0.57292 0.3125,0.76172 0.21159,0.18555 0.49805,0.27832 0.85938,0.27832 0.49804,0 0.8968,-0.17578 1.19629,-0.52734 0.30273,-0.35482 0.45409,-0.82519 0.4541,-1.41113 l 0,-0.2002 -0.89356,0 m 1.792,-0.37109 0,3.12011 -0.89844,0 0,-0.83007 c -0.20508,0.33203 -0.46062,0.5778 -0.7666,0.7373 -0.306,0.15625 -0.68035,0.23438 -1.12305,0.23438 -0.5599,0 -1.00586,-0.15625 -1.33789,-0.46875 -0.32878,-0.31576 -0.49317,-0.73731 -0.49317,-1.26465 0,-0.61523 0.20508,-1.0791 0.61524,-1.3916 0.41341,-0.3125 1.02864,-0.46875 1.8457,-0.46875 l 1.25977,0 0,-0.0879 c -1e-5,-0.41341 -0.13673,-0.73242 -0.41016,-0.95704 -0.27018,-0.22786 -0.65104,-0.34179 -1.14258,-0.34179 -0.3125,0 -0.61686,0.0374 -0.91308,0.1123 -0.29623,0.0749 -0.58106,0.18718 -0.85449,0.33692 l 0,-0.83008 c 0.32877,-0.12695 0.64778,-0.22135 0.95703,-0.28321 0.30924,-0.0651 0.61035,-0.0976 0.90332,-0.0976 0.79101,0 1.38183,0.20508 1.77246,0.61523 0.39062,0.41016 0.58593,1.03191 0.58594,1.86524"
+ style="font-size:10px"
+ id="path3252" />
+ <path
+ d="m 395.173,349.12018 0.89844,0 0,7.59765 -0.89844,0 0,-7.59765"
+ style="font-size:10px"
+ id="path3254" />
+ <path
+ d="m 404.61636,351.41022 0,0.84961 c -0.25391,-0.13021 -0.51758,-0.22786 -0.79101,-0.29297 -0.27344,-0.0651 -0.55665,-0.0976 -0.84961,-0.0977 -0.44597,1e-5 -0.78125,0.0684 -1.00586,0.20508 -0.22136,0.13672 -0.33203,0.3418 -0.33203,0.61523 0,0.20834 0.0797,0.37273 0.23926,0.49317 0.1595,0.11719 0.48014,0.22949 0.96191,0.33691 l 0.30762,0.0684 c 0.63801,0.13672 1.09049,0.33041 1.35742,0.58106 0.27018,0.24739 0.40527,0.59407 0.40527,1.04004 0,0.50781 -0.20183,0.90983 -0.60547,1.20605 -0.40039,0.29622 -0.95215,0.44434 -1.65527,0.44434 -0.29297,0 -0.59896,-0.0293 -0.91797,-0.0879 -0.31576,-0.0553 -0.64941,-0.13998 -1.00098,-0.25391 l 0,-0.92774 c 0.33204,0.17253 0.65918,0.30274 0.98145,0.39063 0.32226,0.0846 0.64127,0.12695 0.95703,0.12695 0.42318,0 0.7487,-0.0716 0.97656,-0.21484 0.22786,-0.14648 0.3418,-0.35156 0.3418,-0.61524 0,-0.24413 -0.083,-0.43131 -0.24902,-0.56152 -0.16277,-0.1302 -0.52247,-0.25553 -1.0791,-0.37598 l -0.3125,-0.0732 c -0.55665,-0.11718 -0.95866,-0.29622 -1.20606,-0.53711 -0.2474,-0.24413 -0.37109,-0.57779 -0.37109,-1.00097 0,-0.51432 0.18229,-0.91146 0.54687,-1.19141 0.36458,-0.27994 0.88216,-0.41992 1.55274,-0.41992 0.33202,0 0.64452,0.0244 0.9375,0.0732 0.29296,0.0488 0.56314,0.12208 0.81054,0.21973"
+ style="font-size:10px"
+ id="path3256" />
+ <path
+ d="m 410.28043,351.45905 0,0.83984 c -0.25391,-0.13997 -0.50945,-0.24414 -0.76661,-0.3125 -0.25391,-0.0716 -0.51107,-0.10742 -0.77148,-0.10742 -0.58268,0 -1.03516,0.18555 -1.35742,0.55664 -0.32227,0.36784 -0.4834,0.88542 -0.4834,1.55273 0,0.66732 0.16113,1.18653 0.4834,1.55762 0.32226,0.36784 0.77474,0.55176 1.35742,0.55176 0.26041,0 0.51757,-0.0342 0.77148,-0.10254 0.25716,-0.0716 0.5127,-0.17741 0.76661,-0.31738 l 0,0.83007 c -0.25066,0.11719 -0.51108,0.20508 -0.78125,0.26368 -0.26693,0.0586 -0.55177,0.0879 -0.8545,0.0879 -0.82357,0 -1.47786,-0.25879 -1.96289,-0.77637 -0.48502,-0.51758 -0.72754,-1.21582 -0.72754,-2.09473 0,-0.89192 0.24414,-1.59342 0.73243,-2.10449 0.49153,-0.51106 1.16373,-0.7666 2.0166,-0.7666 0.27669,0 0.54687,0.0293 0.81054,0.0879 0.26367,0.0553 0.51921,0.13998 0.76661,0.25391"
+ style="font-size:10px"
+ id="path3258" />
+ <path
+ d="m 415.02164,352.08893 c -0.10092,-0.0586 -0.2116,-0.10091 -0.33203,-0.12695 -0.1172,-0.0293 -0.2474,-0.0439 -0.39063,-0.0439 -0.50781,0 -0.89844,0.16602 -1.17187,0.49805 -0.27019,0.32878 -0.40528,0.80241 -0.40528,1.4209 l 0,2.88085 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.73731,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10351,-0.24414 0.0618,0 0.13021,0.005 0.20508,0.0146 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3260" />
+ <path
+ d="m 420.43668,353.75885 0,0.43945 -4.13086,0 c 0.0391,0.61849 0.2246,1.0905 0.55664,1.41602 0.33528,0.32227 0.80078,0.4834 1.39648,0.4834 0.34505,0 0.67871,-0.0423 1.00098,-0.12696 0.32551,-0.0846 0.64778,-0.21158 0.9668,-0.38086 l 0,0.84961 c -0.32228,0.13672 -0.65268,0.24089 -0.99122,0.3125 -0.33854,0.0716 -0.68197,0.10743 -1.03027,0.10743 -0.8724,0 -1.56413,-0.25391 -2.07519,-0.76172 -0.50782,-0.50781 -0.76172,-1.19466 -0.76172,-2.06055 0,-0.89518 0.24088,-1.60481 0.72265,-2.12891 0.48503,-0.52733 1.1377,-0.79101 1.95801,-0.79101 0.73567,0 1.31673,0.23763 1.74316,0.71289 0.42969,0.47201 0.64453,1.11491 0.64454,1.92871 m -0.89844,-0.26367 c -0.007,-0.49153 -0.14486,-0.88379 -0.41504,-1.17676 -0.26693,-0.29296 -0.62175,-0.43945 -1.06445,-0.43945 -0.50131,0 -0.90333,0.1416 -1.20606,0.4248 -0.29948,0.28321 -0.47201,0.68197 -0.51758,1.19629 l 3.20313,-0.005"
+ style="font-size:10px"
+ id="path3262" />
+ <path
+ d="m 426.58902,353.75885 0,0.43945 -4.13086,0 c 0.0391,0.61849 0.22461,1.0905 0.55664,1.41602 0.33529,0.32227 0.80078,0.4834 1.39649,0.4834 0.34504,0 0.6787,-0.0423 1.00097,-0.12696 0.32552,-0.0846 0.64778,-0.21158 0.9668,-0.38086 l 0,0.84961 c -0.32227,0.13672 -0.65268,0.24089 -0.99121,0.3125 -0.33855,0.0716 -0.68197,0.10743 -1.03028,0.10743 -0.87239,0 -1.56412,-0.25391 -2.07519,-0.76172 -0.50781,-0.50781 -0.76172,-1.19466 -0.76172,-2.06055 0,-0.89518 0.24089,-1.60481 0.72266,-2.12891 0.48502,-0.52733 1.13769,-0.79101 1.958,-0.79101 0.73568,0 1.31673,0.23763 1.74317,0.71289 0.42968,0.47201 0.64452,1.11491 0.64453,1.92871 m -0.89844,-0.26367 c -0.007,-0.49153 -0.14486,-0.88379 -0.41504,-1.17676 -0.26693,-0.29296 -0.62175,-0.43945 -1.06445,-0.43945 -0.5013,0 -0.90332,0.1416 -1.20605,0.4248 -0.29949,0.28321 -0.47201,0.68197 -0.51758,1.19629 l 3.20312,-0.005"
+ style="font-size:10px"
+ id="path3264" />
+ <path
+ d="m 432.60953,353.41705 0,3.30078 -0.89844,0 0,-3.27148 c 0,-0.51757 -0.10092,-0.90494 -0.30273,-1.16211 -0.20183,-0.25716 -0.50457,-0.38574 -0.90821,-0.38574 -0.48503,0 -0.86751,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41992,0.73079 -0.41992,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75684,-0.7373 0.29296,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62825,0 1.10351,0.19532 1.42578,0.58594 0.32226,0.38737 0.48339,0.95866 0.4834,1.71386"
+ style="font-size:10px"
+ id="path3266" />
+ <path
+ d="m 439.71402,351.87897 c -0.48177,0 -0.86263,0.1888 -1.14258,0.5664 -0.27995,0.37436 -0.41992,0.88868 -0.41992,1.54297 0,0.6543 0.13834,1.17025 0.41504,1.54785 0.27994,0.37435 0.66243,0.56153 1.14746,0.56153 0.47851,0 0.85774,-0.1888 1.1377,-0.56641 0.27994,-0.3776 0.41991,-0.89192 0.41992,-1.54297 -10e-6,-0.64778 -0.13998,-1.16048 -0.41992,-1.53808 -0.27996,-0.38086 -0.65919,-0.57129 -1.1377,-0.57129 m 0,-0.76172 c 0.78125,0 1.39485,0.25391 1.84082,0.76172 0.44596,0.50781 0.66894,1.21094 0.66895,2.10937 -1e-5,0.89519 -0.22299,1.59831 -0.66895,2.10938 -0.44597,0.50781 -1.05957,0.76172 -1.84082,0.76172 -0.78451,0 -1.39974,-0.25391 -1.8457,-0.76172 -0.44271,-0.51107 -0.66407,-1.21419 -0.66407,-2.10938 0,-0.89843 0.22136,-1.60156 0.66407,-2.10937 0.44596,-0.50781 1.06119,-0.76172 1.8457,-0.76172"
+ style="font-size:10px"
+ id="path3268" />
+ <path
+ d="m 446.87711,352.08893 c -0.10092,-0.0586 -0.2116,-0.10091 -0.33204,-0.12695 -0.11719,-0.0293 -0.24739,-0.0439 -0.39062,-0.0439 -0.50782,0 -0.89844,0.16602 -1.17188,0.49805 -0.27018,0.32878 -0.40527,0.80241 -0.40527,1.4209 l 0,2.88085 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.1888,-0.33202 0.43457,-0.57779 0.73731,-0.7373 0.30273,-0.16276 0.67057,-0.24414 1.10351,-0.24414 0.0618,0 0.13021,0.005 0.20508,0.0146 0.0749,0.007 0.15787,0.0179 0.24902,0.0342 l 0.005,0.92285"
+ style="font-size:10px"
+ id="path3270" />
+ <path
+ d="m 447.82925,351.24908 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3272" />
+ <path
+ d="m 454.20132,353.91998 c 0,-0.65104 -0.13509,-1.15559 -0.40527,-1.51367 -0.26693,-0.35807 -0.64291,-0.5371 -1.12793,-0.53711 -0.48177,1e-5 -0.85775,0.17904 -1.12793,0.53711 -0.26693,0.35808 -0.40039,0.86263 -0.40039,1.51367 0,0.64779 0.13346,1.15072 0.40039,1.50879 0.27018,0.35808 0.64616,0.53711 1.12793,0.53711 0.48502,0 0.861,-0.17903 1.12793,-0.53711 0.27018,-0.35807 0.40527,-0.861 0.40527,-1.50879 m 0.89844,2.11914 c 0,0.93099 -0.20671,1.62272 -0.62012,2.0752 -0.41341,0.45573 -1.04655,0.68359 -1.89941,0.68359 -0.31576,0 -0.61361,-0.0244 -0.89355,-0.0732 -0.27995,-0.0456 -0.55176,-0.11719 -0.81543,-0.21484 l 0,-0.87403 c 0.26367,0.14323 0.52408,0.24903 0.78125,0.31739 0.25716,0.0684 0.5192,0.10253 0.78613,0.10254 0.58919,-10e-6 1.03027,-0.15463 1.32324,-0.46387 0.29297,-0.30599 0.43945,-0.76986 0.43945,-1.3916 l 0,-0.44434 c -0.18555,0.32227 -0.42318,0.56315 -0.71289,0.72266 -0.28971,0.1595 -0.63639,0.23925 -1.04004,0.23925 -0.67057,0 -1.21093,-0.25553 -1.62109,-0.7666 -0.41016,-0.51106 -0.61523,-1.18815 -0.61523,-2.03125 0,-0.84635 0.20507,-1.52506 0.61523,-2.03613 0.41016,-0.51106 0.95052,-0.7666 1.62109,-0.7666 0.40365,0 0.75033,0.0798 1.04004,0.23926 0.28971,0.15951 0.52734,0.40039 0.71289,0.72265 l 0,-0.83008 0.89844,0 0,4.79004"
+ style="font-size:10px"
+ id="path3274" />
+ <path
+ d="m 456.95035,351.24908 0.89844,0 0,5.46875 -0.89844,0 0,-5.46875 m 0,-2.1289 0.89844,0 0,1.13769 -0.89844,0 0,-1.13769"
+ style="font-size:10px"
+ id="path3276" />
+ <path
+ d="m 464.26968,353.41705 0,3.30078 -0.89843,0 0,-3.27148 c -10e-6,-0.51757 -0.10092,-0.90494 -0.30274,-1.16211 -0.20183,-0.25716 -0.50456,-0.38574 -0.9082,-0.38574 -0.48503,0 -0.86752,0.15463 -1.14746,0.46387 -0.27995,0.30924 -0.41993,0.73079 -0.41992,1.26464 l 0,3.09082 -0.90332,0 0,-5.46875 0.90332,0 0,0.84961 c 0.21484,-0.32877 0.46712,-0.57454 0.75683,-0.7373 0.29297,-0.16276 0.62988,-0.24414 1.01074,-0.24414 0.62826,0 1.10352,0.19532 1.42579,0.58594 0.32226,0.38737 0.48339,0.95866 0.48339,1.71386"
+ style="font-size:10px"
+ id="path3278" />
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 343.73692,26.224389 0.01,294.941191"
+ id="path3861"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc"
+ transform="translate(12.627039,339.86462)" />
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path4307"
+ d="m 356.48533,366.00457 336.31202,-0.0196"
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none" />
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path4309"
+ d="m 159.89916,447.6257 -0.0625,145.00422"
+ style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 160.03997,448.23877 184.95568,-0.0159"
+ id="path4493"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ style="font-size:12px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ id="text4495">
+ <path
+ d="m 228.57881,442.02737 0.49805,0 0.7666,3.08106 0.76416,-3.08106 0.5542,0 0.7666,3.08106 0.76416,-3.08106 0.50049,0 -0.91553,3.64502 -0.62012,0 -0.76904,-3.16406 -0.77637,3.16406 -0.62011,0 -0.91309,-3.64502"
+ style="font-size:5px"
+ id="path3141" />
+ <path
+ d="m 233.71797,442.93802 0.44922,0 0,2.73437 -0.44922,0 0,-2.73437 m 0,-1.06445 0.44922,0 0,0.56884 -0.44922,0 0,-0.56884"
+ style="font-size:5px"
+ id="path3143" />
+ <path
+ d="m 237.37764,444.022 0,1.65039 -0.44922,0 0,-1.63574 c 0,-0.25879 -0.0505,-0.45247 -0.15137,-0.58105 -0.10091,-0.12858 -0.25228,-0.19287 -0.4541,-0.19287 -0.24251,0 -0.43376,0.0773 -0.57373,0.23193 -0.13997,0.15462 -0.20996,0.3654 -0.20996,0.63232 l 0,1.54541 -0.45166,0 0,-2.73437 0.45166,0 0,0.4248 c 0.10742,-0.16438 0.23356,-0.28727 0.37842,-0.36865 0.14648,-0.0814 0.31494,-0.12207 0.50537,-0.12207 0.31413,0 0.55176,0.0977 0.71289,0.29297 0.16113,0.19369 0.2417,0.47933 0.2417,0.85693"
+ style="font-size:5px"
+ id="path3145" />
+ <path
+ d="m 240.07784,443.35306 0,-1.47949 0.44921,0 0,3.79882 -0.44921,0 0,-0.41015 c -0.0944,0.16276 -0.21404,0.28401 -0.35889,0.36377 -0.14323,0.0781 -0.31576,0.11718 -0.51758,0.11718 -0.3304,0 -0.59977,-0.13183 -0.80811,-0.3955 -0.2067,-0.26367 -0.31005,-0.61035 -0.31005,-1.04004 0,-0.42969 0.10335,-0.77637 0.31005,-1.04004 0.20834,-0.26367 0.47771,-0.39551 0.80811,-0.39551 0.20182,0 0.37435,0.0399 0.51758,0.11963 0.14485,0.0781 0.26448,0.19857 0.35889,0.36133 m -1.53077,0.95459 c 0,0.3304 0.0676,0.59 0.20264,0.77881 0.13672,0.18717 0.32389,0.28076 0.56152,0.28076 0.23763,0 0.42481,-0.0936 0.56153,-0.28076 0.13671,-0.18881 0.20507,-0.44841 0.20508,-0.77881 -1e-5,-0.3304 -0.0684,-0.58919 -0.20508,-0.77637 -0.13672,-0.1888 -0.3239,-0.2832 -0.56153,-0.2832 -0.23763,0 -0.4248,0.0944 -0.56152,0.2832 -0.13509,0.18718 -0.20264,0.44597 -0.20264,0.77637"
+ style="font-size:5px"
+ id="path3147" />
+ <path
+ d="m 242.51192,443.25296 c -0.24089,0 -0.43132,0.0944 -0.57129,0.2832 -0.13998,0.18718 -0.20996,0.44434 -0.20996,0.77149 0,0.32715 0.0692,0.58512 0.20752,0.77392 0.13997,0.18718 0.33121,0.28077 0.57373,0.28077 0.23925,0 0.42887,-0.0944 0.56884,-0.28321 0.13998,-0.1888 0.20996,-0.44596 0.20997,-0.77148 -1e-5,-0.32389 -0.07,-0.58024 -0.20997,-0.76905 -0.13997,-0.19042 -0.32959,-0.28564 -0.56884,-0.28564 m 0,-0.38086 c 0.39062,0 0.69742,0.12696 0.92041,0.38086 0.22298,0.25391 0.33447,0.60547 0.33447,1.05469 0,0.44759 -0.11149,0.79915 -0.33447,1.05469 -0.22299,0.2539 -0.52979,0.38085 -0.92041,0.38085 -0.39226,0 -0.69987,-0.12695 -0.92285,-0.38085 -0.22136,-0.25554 -0.33204,-0.6071 -0.33204,-1.05469 0,-0.44922 0.11068,-0.80078 0.33204,-1.05469 0.22298,-0.2539 0.53059,-0.38086 0.92285,-0.38086"
+ style="font-size:5px"
+ id="path3149" />
+ <path
+ d="m 244.24776,442.93802 0.44922,0 0.56152,2.13379 0.55908,-2.13379 0.52979,0 0.56152,2.13379 0.55908,-2.13379 0.44922,0 -0.71533,2.73437 -0.52979,0 -0.58838,-2.24121 -0.59082,2.24121 -0.52978,0 -0.71533,-2.73437"
+ style="font-size:5px"
+ id="path3151" />
+ <path
+ d="m 249.70674,442.02737 3.0835,0 0,0.41504 -1.29395,0 0,3.22998 -0.4956,0 0,-3.22998 -1.29395,0 0,-0.41504"
+ style="font-size:5px"
+ id="path3153" />
+ <path
+ d="m 253.09297,442.93802 0.44922,0 0,2.73437 -0.44922,0 0,-2.73437 m 0,-1.06445 0.44922,0 0,0.56884 -0.44922,0 0,-0.56884"
+ style="font-size:5px"
+ id="path3155" />
+ <path
+ d="m 254.92403,442.16165 0,0.77637 0.92529,0 0,0.34912 -0.92529,0 0,1.48437 c 0,0.22299 0.0301,0.36622 0.0903,0.42969 0.0618,0.0635 0.18636,0.0952 0.37353,0.0952 l 0.46143,0 0,0.37597 -0.46143,0 c -0.34668,0 -0.58593,-0.0643 -0.71777,-0.19287 -0.13184,-0.13021 -0.19775,-0.36621 -0.19775,-0.70801 l 0,-1.48437 -0.32959,0 0,-0.34912 0.32959,0 0,-0.77637 0.45166,0"
+ style="font-size:5px"
+ id="path3157" />
+ <path
+ d="m 256.44258,441.87357 0.44922,0 0,3.79882 -0.44922,0 0,-3.79882"
+ style="font-size:5px"
+ id="path3159" />
+ <path
+ d="m 260.16817,444.1929 0,0.21973 -2.06543,0 c 0.0195,0.30924 0.1123,0.54525 0.27832,0.70801 0.16764,0.16113 0.40039,0.2417 0.69824,0.2417 0.17252,0 0.33935,-0.0212 0.50049,-0.0635 0.16276,-0.0423 0.32389,-0.1058 0.4834,-0.19043 l 0,0.4248 c -0.16114,0.0684 -0.32634,0.12045 -0.49561,0.15625 -0.16927,0.0358 -0.34098,0.0537 -0.51514,0.0537 -0.43619,0 -0.78206,-0.12695 -1.03759,-0.38085 -0.25391,-0.25391 -0.38086,-0.59733 -0.38086,-1.03028 0,-0.44759 0.12044,-0.8024 0.36133,-1.06445 0.24251,-0.26367 0.56884,-0.39551 0.979,-0.39551 0.36784,0 0.65836,0.11882 0.87158,0.35645 0.21484,0.236 0.32226,0.55745 0.32227,0.96435 m -0.44922,-0.13183 c -0.003,-0.24577 -0.0724,-0.4419 -0.20752,-0.58838 -0.13347,-0.14649 -0.31088,-0.21973 -0.53223,-0.21973 -0.25065,0 -0.45166,0.0708 -0.60303,0.2124 -0.14974,0.14161 -0.236,0.34099 -0.25878,0.59815 l 1.60156,-0.002"
+ style="font-size:5px"
+ id="path3161" />
+ </g>
+ <g
+ aria-label="Content area origin"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ id="text4089">
+ <path
+ d="m 172.21587,456.09125 v 1.04004 q -0.49805,-0.46387 -1.06446,-0.69336 -0.56152,-0.22949 -1.19629,-0.22949 -1.25,0 -1.91406,0.7666 -0.66406,0.76172 -0.66406,2.20703 0,1.44043 0.66406,2.20703 0.66406,0.76172 1.91406,0.76172 0.63477,0 1.19629,-0.22949 0.56641,-0.2295 1.06446,-0.69336 v 1.03027 q -0.51758,0.35156 -1.09864,0.52734 -0.57617,0.17578 -1.2207,0.17578 -1.65527,0 -2.60742,-1.01074 -0.95215,-1.01562 -0.95215,-2.76855 0,-1.75781 0.95215,-2.76856 0.95215,-1.01562 2.60742,-1.01562 0.6543,0 1.23047,0.17578 0.58105,0.1709 1.08887,0.51758 z"
+ style="font-size:10px"
+ id="path4091" />
+ <path
+ d="m 175.82915,457.9809 q -0.72266,0 -1.14258,0.5664 -0.41992,0.56153 -0.41992,1.54297 0,0.98145 0.41504,1.54785 0.41992,0.56153 1.14746,0.56153 0.71777,0 1.13769,-0.56641 0.41992,-0.56641 0.41992,-1.54297 0,-0.97168 -0.41992,-1.53808 -0.41992,-0.57129 -1.13769,-0.57129 z m 0,-0.76172 q 1.17187,0 1.84082,0.76172 0.66894,0.76171 0.66894,2.10937 0,1.34277 -0.66894,2.10938 -0.66895,0.76171 -1.84082,0.76171 -1.17676,0 -1.84571,-0.76171 -0.66406,-0.76661 -0.66406,-2.10938 0,-1.34766 0.66406,-2.10937 0.66895,-0.76172 1.84571,-0.76172 z"
+ style="font-size:10px"
+ id="path4093" />
+ <path
+ d="m 184.36919,459.51898 v 3.30078 h -0.89844 v -3.27148 q 0,-0.77637 -0.30274,-1.16211 -0.30273,-0.38574 -0.9082,-0.38574 -0.72754,0 -1.14746,0.46386 -0.41992,0.46387 -0.41992,1.26465 v 3.09082 h -0.90332 v -5.46875 h 0.90332 v 0.84961 q 0.32226,-0.49316 0.75683,-0.7373 0.43946,-0.24414 1.01075,-0.24414 0.94238,0 1.42578,0.58593 0.4834,0.58106 0.4834,1.71387 z"
+ style="font-size:10px"
+ id="path4095" />
+ <path
+ d="m 187.05962,455.79828 v 1.55273 h 1.85058 v 0.69825 h -1.85058 v 2.96875 q 0,0.66894 0.18066,0.85937 0.18555,0.19043 0.74707,0.19043 h 0.92285 v 0.75195 h -0.92285 q -1.04004,0 -1.43555,-0.38574 -0.3955,-0.39062 -0.3955,-1.41601 v -2.96875 h -0.65918 v -0.69825 h 0.65918 v -1.55273 z"
+ style="font-size:10px"
+ id="path4097" />
+ <path
+ d="m 194.77446,459.86078 v 0.43945 h -4.13086 q 0.0586,0.92774 0.55664,1.41602 0.50293,0.4834 1.39649,0.4834 0.51757,0 1.00097,-0.12696 0.48828,-0.12695 0.9668,-0.38086 v 0.84961 q -0.4834,0.20508 -0.99121,0.3125 -0.50781,0.10742 -1.03028,0.10742 -1.30859,0 -2.07519,-0.76171 -0.76172,-0.76172 -0.76172,-2.06055 0,-1.34277 0.72266,-2.12891 0.72754,-0.79101 1.958,-0.79101 1.10352,0 1.74317,0.71289 0.64453,0.70801 0.64453,1.92871 z m -0.89844,-0.26367 q -0.01,-0.73731 -0.41504,-1.17676 -0.40039,-0.43945 -1.06445,-0.43945 -0.75195,0 -1.20605,0.4248 -0.44922,0.42481 -0.51758,1.19629 z"
+ style="font-size:10px"
+ id="path4099" />
+ <path
+ d="m 200.79497,459.51898 v 3.30078 h -0.89844 v -3.27148 q 0,-0.77637 -0.30273,-1.16211 -0.30274,-0.38574 -0.90821,-0.38574 -0.72754,0 -1.14746,0.46386 -0.41992,0.46387 -0.41992,1.26465 v 3.09082 h -0.90332 v -5.46875 h 0.90332 v 0.84961 q 0.32227,-0.49316 0.75684,-0.7373 0.43945,-0.24414 1.01074,-0.24414 0.94238,0 1.42578,0.58593 0.4834,0.58106 0.4834,1.71387 z"
+ style="font-size:10px"
+ id="path4101" />
+ <path
+ d="m 203.4854,455.79828 v 1.55273 h 1.85058 v 0.69825 h -1.85058 v 2.96875 q 0,0.66894 0.18066,0.85937 0.18555,0.19043 0.74707,0.19043 h 0.92285 v 0.75195 h -0.92285 q -1.04004,0 -1.43555,-0.38574 -0.3955,-0.39062 -0.3955,-1.41601 v -2.96875 h -0.65918 v -0.69825 h 0.65918 v -1.55273 z"
+ style="font-size:10px"
+ id="path4103" />
+ <path
+ d="m 212.19145,460.07074 q -1.08887,0 -1.50879,0.24902 -0.41992,0.24903 -0.41992,0.84961 0,0.47852 0.3125,0.76172 0.31738,0.27832 0.85938,0.27832 0.74707,0 1.19629,-0.52734 0.4541,-0.53223 0.4541,-1.41113 v -0.2002 z m 1.79199,-0.37109 v 3.12011 h -0.89843 v -0.83007 q -0.30762,0.49804 -0.7666,0.7373 -0.45899,0.23437 -1.12305,0.23437 -0.83985,0 -1.33789,-0.46875 -0.49317,-0.47363 -0.49317,-1.26464 0,-0.92286 0.61524,-1.39161 0.62012,-0.46875 1.8457,-0.46875 h 1.25977 v -0.0879 q 0,-0.62011 -0.41016,-0.95703 -0.40527,-0.34179 -1.14258,-0.34179 -0.46875,0 -0.91308,0.1123 -0.44434,0.11231 -0.8545,0.33691 v -0.83007 q 0.49317,-0.19043 0.95704,-0.28321 0.46386,-0.0976 0.90332,-0.0976 1.18652,0 1.77246,0.61523 0.58593,0.61524 0.58593,1.86524 z"
+ style="font-size:10px"
+ id="path4105" />
+ <path
+ d="m 219.00786,458.19086 q -0.15137,-0.0879 -0.33203,-0.12696 -0.17578,-0.0439 -0.39063,-0.0439 -0.76172,0 -1.17187,0.49805 -0.40528,0.49316 -0.40528,1.42089 v 2.88086 h -0.90332 v -5.46875 h 0.90332 v 0.84961 q 0.28321,-0.49804 0.73731,-0.7373 0.4541,-0.24414 1.10351,-0.24414 0.0928,0 0.20508,0.0147 0.11231,0.01 0.24903,0.0342 z"
+ style="font-size:10px"
+ id="path4107" />
+ <path
+ d="m 224.4229,459.86078 v 0.43945 h -4.13086 q 0.0586,0.92774 0.55664,1.41602 0.50293,0.4834 1.39648,0.4834 0.51758,0 1.00098,-0.12696 0.48828,-0.12695 0.9668,-0.38086 v 0.84961 q -0.4834,0.20508 -0.99121,0.3125 -0.50782,0.10742 -1.03028,0.10742 -1.30859,0 -2.07519,-0.76171 -0.76172,-0.76172 -0.76172,-2.06055 0,-1.34277 0.72265,-2.12891 0.72754,-0.79101 1.95801,-0.79101 1.10352,0 1.74317,0.71289 0.64453,0.70801 0.64453,1.92871 z m -0.89844,-0.26367 q -0.01,-0.73731 -0.41504,-1.17676 -0.40039,-0.43945 -1.06445,-0.43945 -0.75196,0 -1.20606,0.4248 -0.44922,0.42481 -0.51758,1.19629 z"
+ style="font-size:10px"
+ id="path4109" />
+ <path
+ d="m 228.38286,460.07074 q -1.08887,0 -1.50879,0.24902 -0.41992,0.24903 -0.41992,0.84961 0,0.47852 0.3125,0.76172 0.31738,0.27832 0.85937,0.27832 0.74707,0 1.19629,-0.52734 0.4541,-0.53223 0.4541,-1.41113 v -0.2002 z m 1.79199,-0.37109 v 3.12011 h -0.89844 v -0.83007 q -0.30761,0.49804 -0.7666,0.7373 -0.45898,0.23437 -1.12305,0.23437 -0.83984,0 -1.33789,-0.46875 -0.49316,-0.47363 -0.49316,-1.26464 0,-0.92286 0.61523,-1.39161 0.62012,-0.46875 1.84571,-0.46875 h 1.25976 v -0.0879 q 0,-0.62011 -0.41015,-0.95703 -0.40528,-0.34179 -1.14258,-0.34179 -0.46875,0 -0.91309,0.1123 -0.44433,0.11231 -0.85449,0.33691 v -0.83007 q 0.49316,-0.19043 0.95703,-0.28321 0.46387,-0.0976 0.90332,-0.0976 1.18653,0 1.77246,0.61523 0.58594,0.61524 0.58594,1.86524 z"
+ style="font-size:10px"
+ id="path4111" />
+ <path
+ d="m 237.33305,457.9809 q -0.72265,0 -1.14257,0.5664 -0.41993,0.56153 -0.41993,1.54297 0,0.98145 0.41504,1.54785 0.41992,0.56153 1.14746,0.56153 0.71778,0 1.1377,-0.56641 0.41992,-0.56641 0.41992,-1.54297 0,-0.97168 -0.41992,-1.53808 -0.41992,-0.57129 -1.1377,-0.57129 z m 0,-0.76172 q 1.17188,0 1.84082,0.76172 0.66895,0.76171 0.66895,2.10937 0,1.34277 -0.66895,2.10938 -0.66894,0.76171 -1.84082,0.76171 -1.17675,0 -1.8457,-0.76171 -0.66406,-0.76661 -0.66406,-2.10938 0,-1.34766 0.66406,-2.10937 0.66895,-0.76172 1.8457,-0.76172 z"
+ style="font-size:10px"
+ id="path4113" />
+ <path
+ d="m 244.49614,458.19086 q -0.15137,-0.0879 -0.33203,-0.12696 -0.17578,-0.0439 -0.39063,-0.0439 -0.76172,0 -1.17187,0.49805 -0.40528,0.49316 -0.40528,1.42089 v 2.88086 h -0.90332 v -5.46875 h 0.90332 v 0.84961 q 0.28321,-0.49804 0.73731,-0.7373 0.4541,-0.24414 1.10352,-0.24414 0.0928,0 0.20507,0.0147 0.11231,0.01 0.24903,0.0342 z"
+ style="font-size:10px"
+ id="path4115" />
+ <path
+ d="m 245.44829,457.35101 h 0.89844 v 5.46875 h -0.89844 z m 0,-2.1289 h 0.89844 v 1.13769 h -0.89844 z"
+ style="font-size:10px"
+ id="path4117" />
+ <path
+ d="m 251.82036,460.02191 q 0,-0.97656 -0.40528,-1.51367 -0.40039,-0.53711 -1.12792,-0.53711 -0.72266,0 -1.12793,0.53711 -0.4004,0.53711 -0.4004,1.51367 0,0.97168 0.4004,1.50879 0.40527,0.53711 1.12793,0.53711 0.72753,0 1.12792,-0.53711 0.40528,-0.53711 0.40528,-1.50879 z m 0.89844,2.11914 q 0,1.39649 -0.62012,2.0752 -0.62012,0.68359 -1.89942,0.68359 -0.47363,0 -0.89355,-0.0732 -0.41992,-0.0684 -0.81543,-0.21484 v -0.87403 q 0.39551,0.21485 0.78125,0.31738 0.38574,0.10254 0.78613,0.10254 0.88379,0 1.32325,-0.46386 0.43945,-0.45899 0.43945,-1.3916 v -0.44434 q -0.27832,0.4834 -0.71289,0.72266 -0.43457,0.23925 -1.04004,0.23925 -1.00586,0 -1.6211,-0.7666 -0.61523,-0.7666 -0.61523,-2.03125 0,-1.26953 0.61523,-2.03613 0.61524,-0.7666 1.6211,-0.7666 0.60547,0 1.04004,0.23926 0.43457,0.23925 0.71289,0.72265 v -0.83008 h 0.89844 z"
+ style="font-size:10px"
+ id="path4119" />
+ <path
+ d="m 254.56938,457.35101 h 0.89844 v 5.46875 h -0.89844 z m 0,-2.1289 h 0.89844 v 1.13769 h -0.89844 z"
+ style="font-size:10px"
+ id="path4121" />
+ <path
+ d="m 261.88872,459.51898 v 3.30078 h -0.89844 v -3.27148 q 0,-0.77637 -0.30273,-1.16211 -0.30274,-0.38574 -0.90821,-0.38574 -0.72754,0 -1.14746,0.46386 -0.41992,0.46387 -0.41992,1.26465 v 3.09082 h -0.90332 v -5.46875 h 0.90332 v 0.84961 q 0.32227,-0.49316 0.75684,-0.7373 0.43945,-0.24414 1.01074,-0.24414 0.94238,0 1.42578,0.58593 0.4834,0.58106 0.4834,1.71387 z"
+ style="font-size:10px"
+ id="path4123" />
+ </g>
+ </g>
+</svg>
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/vulkan.dox b/chromium/third_party/dawn/third_party/glfw/docs/vulkan.dox
new file mode 100644
index 00000000000..31891036b8e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/vulkan.dox
@@ -0,0 +1,246 @@
+/*!
+
+@page vulkan_guide Vulkan guide
+
+@tableofcontents
+
+This guide is intended to fill the gaps between the official [Vulkan
+resources](https://www.khronos.org/vulkan/) and the rest of the GLFW
+documentation and is not a replacement for either. It assumes some familiarity
+with Vulkan concepts like loaders, devices, queues and surfaces and leaves it to
+the Vulkan documentation to explain the details of Vulkan functions.
+
+To develop for Vulkan you should download the [LunarG Vulkan
+SDK](https://vulkan.lunarg.com/) for your platform. Apart from headers and link
+libraries, they also provide the validation layers necessary for development.
+
+The [Vulkan Tutorial](https://vulkan-tutorial.com/) has more information on how
+to use GLFW and Vulkan. The [Khronos Vulkan
+Samples](https://github.com/KhronosGroup/Vulkan-Samples) also use GLFW, although
+with a small framework in between.
+
+For details on a specific Vulkan support function, see the @ref vulkan. There
+are also guides for the other areas of the GLFW API.
+
+ - @ref intro_guide
+ - @ref window_guide
+ - @ref context_guide
+ - @ref monitor_guide
+ - @ref input_guide
+
+
+@section vulkan_loader Finding the Vulkan loader
+
+GLFW itself does not ever need to be linked against the Vulkan loader.
+
+By default, GLFW will load the Vulkan loader dynamically at runtime via its standard name:
+`vulkan-1.dll` on Windows, `libvulkan.so.1` on Linux and other Unix-like systems and
+`libvulkan.1.dylib` on macOS.
+
+@macos GLFW will also look up and search the executable subdirectory of your application
+bundle.
+
+If your code is using a Vulkan loader with a different name or in a non-standard location
+you will need to direct GLFW to it. Pass your version of `vkGetInstanceProcAddr` to @ref
+glfwInitVulkanLoader before initializing GLFW and it will use that function for all Vulkan
+entry point retrieval. This prevents GLFW from dynamically loading the Vulkan loader.
+
+@code
+glfwInitVulkanLoader(vkGetInstanceProcAddr);
+@endcode
+
+@macos To make your application be redistributable you will need to set up the application
+bundle according to the LunarG SDK documentation. This is explained in more detail in the
+[SDK documentation for macOS](https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html).
+
+
+@section vulkan_include Including the Vulkan header file
+
+To have GLFW include the Vulkan header, define @ref GLFW_INCLUDE_VULKAN before including
+the GLFW header.
+
+@code
+#define GLFW_INCLUDE_VULKAN
+#include <GLFW/glfw3.h>
+@endcode
+
+If you instead want to include the Vulkan header from a custom location or use
+your own custom Vulkan header then do this before the GLFW header.
+
+@code
+#include <path/to/vulkan.h>
+#include <GLFW/glfw3.h>
+@endcode
+
+Unless a Vulkan header is included, either by the GLFW header or above it, the following
+GLFW functions will not be declared, as depend on Vulkan types.
+
+ - @ref glfwInitVulkanLoader
+ - @ref glfwGetInstanceProcAddress
+ - @ref glfwGetPhysicalDevicePresentationSupport
+ - @ref glfwCreateWindowSurface
+
+The `VK_USE_PLATFORM_*_KHR` macros do not need to be defined for the Vulkan part
+of GLFW to work. Define them only if you are using these extensions directly.
+
+
+@section vulkan_support Querying for Vulkan support
+
+If you are linking directly against the Vulkan loader then you can skip this
+section. The canonical desktop loader library exports all Vulkan core and
+Khronos extension functions, allowing them to be called directly.
+
+If you are loading the Vulkan loader dynamically instead of linking directly
+against it, you can check for the availability of a loader and ICD with @ref
+glfwVulkanSupported.
+
+@code
+if (glfwVulkanSupported())
+{
+ // Vulkan is available, at least for compute
+}
+@endcode
+
+This function returns `GLFW_TRUE` if the Vulkan loader and any minimally
+functional ICD was found.
+
+If one or both were not found, calling any other Vulkan related GLFW function
+will generate a @ref GLFW_API_UNAVAILABLE error.
+
+
+@subsection vulkan_proc Querying Vulkan function pointers
+
+To load any Vulkan core or extension function from the found loader, call @ref
+glfwGetInstanceProcAddress. To load functions needed for instance creation,
+pass `NULL` as the instance.
+
+@code
+PFN_vkCreateInstance pfnCreateInstance = (PFN_vkCreateInstance)
+ glfwGetInstanceProcAddress(NULL, "vkCreateInstance");
+@endcode
+
+Once you have created an instance, you can load from it all other Vulkan core
+functions and functions from any instance extensions you enabled.
+
+@code
+PFN_vkCreateDevice pfnCreateDevice = (PFN_vkCreateDevice)
+ glfwGetInstanceProcAddress(instance, "vkCreateDevice");
+@endcode
+
+This function in turn calls `vkGetInstanceProcAddr`. If that fails, the
+function falls back to a platform-specific query of the Vulkan loader (i.e.
+`dlsym` or `GetProcAddress`). If that also fails, the function returns `NULL`.
+For more information about `vkGetInstanceProcAddr`, see the Vulkan
+documentation.
+
+Vulkan also provides `vkGetDeviceProcAddr` for loading device-specific versions
+of Vulkan function. This function can be retrieved from an instance with @ref
+glfwGetInstanceProcAddress.
+
+@code
+PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)
+ glfwGetInstanceProcAddress(instance, "vkGetDeviceProcAddr");
+@endcode
+
+Device-specific functions may execute a little bit faster, due to not having to
+dispatch internally based on the device passed to them. For more information
+about `vkGetDeviceProcAddr`, see the Vulkan documentation.
+
+
+@section vulkan_ext Querying required Vulkan extensions
+
+To do anything useful with Vulkan you need to create an instance. If you want
+to use Vulkan to render to a window, you must enable the instance extensions
+GLFW requires to create Vulkan surfaces.
+
+To query the instance extensions required, call @ref
+glfwGetRequiredInstanceExtensions.
+
+@code
+uint32_t count;
+const char** extensions = glfwGetRequiredInstanceExtensions(&count);
+@endcode
+
+These extensions must all be enabled when creating instances that are going to
+be passed to @ref glfwGetPhysicalDevicePresentationSupport and @ref
+glfwCreateWindowSurface. The set of extensions will vary depending on platform
+and may also vary depending on graphics drivers and other factors.
+
+If it fails it will return `NULL` and GLFW will not be able to create Vulkan
+window surfaces. You can still use Vulkan for off-screen rendering and compute
+work.
+
+If successful the returned array will always include `VK_KHR_surface`, so if
+you don't require any additional extensions you can pass this list directly to
+the `VkInstanceCreateInfo` struct.
+
+@code
+VkInstanceCreateInfo ici;
+
+memset(&ici, 0, sizeof(ici));
+ici.enabledExtensionCount = count;
+ici.ppEnabledExtensionNames = extensions;
+...
+@endcode
+
+Additional extensions may be required by future versions of GLFW. You should
+check whether any extensions you wish to enable are already in the returned
+array, as it is an error to specify an extension more than once in the
+`VkInstanceCreateInfo` struct.
+
+
+@section vulkan_present Querying for Vulkan presentation support
+
+Not every queue family of every Vulkan device can present images to surfaces.
+To check whether a specific queue family of a physical device supports image
+presentation without first having to create a window and surface, call @ref
+glfwGetPhysicalDevicePresentationSupport.
+
+@code
+if (glfwGetPhysicalDevicePresentationSupport(instance, physical_device, queue_family_index))
+{
+ // Queue family supports image presentation
+}
+@endcode
+
+The `VK_KHR_surface` extension additionally provides the
+`vkGetPhysicalDeviceSurfaceSupportKHR` function, which performs the same test on
+an existing Vulkan surface.
+
+
+@section vulkan_window Creating the window
+
+Unless you will be using OpenGL or OpenGL ES with the same window as Vulkan,
+there is no need to create a context. You can disable context creation with the
+[GLFW_CLIENT_API](@ref GLFW_CLIENT_API_hint) hint.
+
+@code
+glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+GLFWwindow* window = glfwCreateWindow(640, 480, "Window Title", NULL, NULL);
+@endcode
+
+See @ref context_less for more information.
+
+
+@section vulkan_surface Creating a Vulkan window surface
+
+You can create a Vulkan surface (as defined by the `VK_KHR_surface` extension)
+for a GLFW window with @ref glfwCreateWindowSurface.
+
+@code
+VkSurfaceKHR surface;
+VkResult err = glfwCreateWindowSurface(instance, window, NULL, &surface);
+if (err)
+{
+ // Window surface creation failed
+}
+@endcode
+
+If an OpenGL or OpenGL ES context was created on the window, the context has
+ownership of the presentation on the window and a Vulkan surface cannot be
+created.
+
+It is your responsibility to destroy the surface. GLFW does not destroy it for
+you. Call `vkDestroySurfaceKHR` function from the same extension to destroy it.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/docs/window.dox b/chromium/third_party/dawn/third_party/glfw/docs/window.dox
new file mode 100644
index 00000000000..32271e3a98a
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/docs/window.dox
@@ -0,0 +1,1457 @@
+/*!
+
+@page window_guide Window guide
+
+@tableofcontents
+
+This guide introduces the window related functions of GLFW. For details on
+a specific function in this category, see the @ref window. There are also
+guides for the other areas of GLFW.
+
+ - @ref intro_guide
+ - @ref context_guide
+ - @ref vulkan_guide
+ - @ref monitor_guide
+ - @ref input_guide
+
+
+@section window_object Window objects
+
+The @ref GLFWwindow object encapsulates both a window and a context. They are
+created with @ref glfwCreateWindow and destroyed with @ref glfwDestroyWindow, or
+@ref glfwTerminate, if any remain. As the window and context are inseparably
+linked, the object pointer is used as both a context and window handle.
+
+To see the event stream provided to the various window related callbacks, run
+the `events` test program.
+
+
+@subsection window_creation Window creation
+
+A window and its OpenGL or OpenGL ES context are created with @ref
+glfwCreateWindow, which returns a handle to the created window object. For
+example, this creates a 640 by 480 windowed mode window:
+
+@code
+GLFWwindow* window = glfwCreateWindow(640, 480, "My Title", NULL, NULL);
+@endcode
+
+If window creation fails, `NULL` will be returned, so it is necessary to check
+the return value.
+
+The window handle is passed to all window related functions and is provided to
+along with all input events, so event handlers can tell which window received
+the event.
+
+
+@subsubsection window_full_screen Full screen windows
+
+To create a full screen window, you need to specify which monitor the window
+should use. In most cases, the user's primary monitor is a good choice.
+For more information about retrieving monitors, see @ref monitor_monitors.
+
+@code
+GLFWwindow* window = glfwCreateWindow(640, 480, "My Title", glfwGetPrimaryMonitor(), NULL);
+@endcode
+
+Full screen windows cover the entire display area of a monitor, have no border
+or decorations.
+
+Windowed mode windows can be made full screen by setting a monitor with @ref
+glfwSetWindowMonitor, and full screen ones can be made windowed by unsetting it
+with the same function.
+
+Each field of the @ref GLFWvidmode structure corresponds to a function parameter
+or window hint and combine to form the _desired video mode_ for that window.
+The supported video mode most closely matching the desired video mode will be
+set for the chosen monitor as long as the window has input focus. For more
+information about retrieving video modes, see @ref monitor_modes.
+
+Video mode field | Corresponds to
+---------------- | --------------
+GLFWvidmode.width | `width` parameter of @ref glfwCreateWindow
+GLFWvidmode.height | `height` parameter of @ref glfwCreateWindow
+GLFWvidmode.redBits | @ref GLFW_RED_BITS hint
+GLFWvidmode.greenBits | @ref GLFW_GREEN_BITS hint
+GLFWvidmode.blueBits | @ref GLFW_BLUE_BITS hint
+GLFWvidmode.refreshRate | @ref GLFW_REFRESH_RATE hint
+
+Once you have a full screen window, you can change its resolution, refresh rate
+and monitor with @ref glfwSetWindowMonitor. If you only need change its
+resolution you can also call @ref glfwSetWindowSize. In all cases, the new
+video mode will be selected the same way as the video mode chosen by @ref
+glfwCreateWindow. If the window has an OpenGL or OpenGL ES context, it will be
+unaffected.
+
+By default, the original video mode of the monitor will be restored and the
+window iconified if it loses input focus, to allow the user to switch back to
+the desktop. This behavior can be disabled with the
+[GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_hint) window hint, for example if you
+wish to simultaneously cover multiple monitors with full screen windows.
+
+If a monitor is disconnected, all windows that are full screen on that monitor
+will be switched to windowed mode. See @ref monitor_event for more information.
+
+
+@subsubsection window_windowed_full_screen "Windowed full screen" windows
+
+If the closest match for the desired video mode is the current one, the video
+mode will not be changed, making window creation faster and application
+switching much smoother. This is sometimes called _windowed full screen_ or
+_borderless full screen_ window and counts as a full screen window. To create
+such a window, request the current video mode.
+
+@code
+const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+
+glfwWindowHint(GLFW_RED_BITS, mode->redBits);
+glfwWindowHint(GLFW_GREEN_BITS, mode->greenBits);
+glfwWindowHint(GLFW_BLUE_BITS, mode->blueBits);
+glfwWindowHint(GLFW_REFRESH_RATE, mode->refreshRate);
+
+GLFWwindow* window = glfwCreateWindow(mode->width, mode->height, "My Title", monitor, NULL);
+@endcode
+
+This also works for windowed mode windows that are made full screen.
+
+@code
+const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+
+glfwSetWindowMonitor(window, monitor, 0, 0, mode->width, mode->height, mode->refreshRate);
+@endcode
+
+Note that @ref glfwGetVideoMode returns the _current_ video mode of a monitor,
+so if you already have a full screen window on that monitor that you want to
+make windowed full screen, you need to have saved the desktop resolution before.
+
+
+@subsection window_destruction Window destruction
+
+When a window is no longer needed, destroy it with @ref glfwDestroyWindow.
+
+@code
+glfwDestroyWindow(window);
+@endcode
+
+Window destruction always succeeds. Before the actual destruction, all
+callbacks are removed so no further events will be delivered for the window.
+All windows remaining when @ref glfwTerminate is called are destroyed as well.
+
+When a full screen window is destroyed, the original video mode of its monitor
+is restored, but the gamma ramp is left untouched.
+
+
+@subsection window_hints Window creation hints
+
+There are a number of hints that can be set before the creation of a window and
+context. Some affect the window itself, others affect the framebuffer or
+context. These hints are set to their default values each time the library is
+initialized with @ref glfwInit. Integer value hints can be set individually
+with @ref glfwWindowHint and string value hints with @ref glfwWindowHintString.
+You can reset all at once to their defaults with @ref glfwDefaultWindowHints.
+
+Some hints are platform specific. These are always valid to set on any
+platform but they will only affect their specific platform. Other platforms
+will ignore them. Setting these hints requires no platform specific headers or
+calls.
+
+@note Window hints need to be set before the creation of the window and context
+you wish to have the specified attributes. They function as additional
+arguments to @ref glfwCreateWindow.
+
+
+@subsubsection window_hints_hard Hard and soft constraints
+
+Some window hints are hard constraints. These must match the available
+capabilities _exactly_ for window and context creation to succeed. Hints
+that are not hard constraints are matched as closely as possible, but the
+resulting context and framebuffer may differ from what these hints requested.
+
+The following hints are always hard constraints:
+- @ref GLFW_STEREO
+- @ref GLFW_DOUBLEBUFFER
+- [GLFW_CLIENT_API](@ref GLFW_CLIENT_API_hint)
+- [GLFW_CONTEXT_CREATION_API](@ref GLFW_CONTEXT_CREATION_API_hint)
+
+The following additional hints are hard constraints when requesting an OpenGL
+context, but are ignored when requesting an OpenGL ES context:
+- [GLFW_OPENGL_FORWARD_COMPAT](@ref GLFW_OPENGL_FORWARD_COMPAT_hint)
+- [GLFW_OPENGL_PROFILE](@ref GLFW_OPENGL_PROFILE_hint)
+
+
+@subsubsection window_hints_wnd Window related hints
+
+@anchor GLFW_RESIZABLE_hint
+__GLFW_RESIZABLE__ specifies whether the windowed mode window will be resizable
+_by the user_. The window will still be resizable using the @ref
+glfwSetWindowSize function. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+This hint is ignored for full screen and undecorated windows.
+
+@anchor GLFW_VISIBLE_hint
+__GLFW_VISIBLE__ specifies whether the windowed mode window will be initially
+visible. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This hint is
+ignored for full screen windows.
+
+@anchor GLFW_DECORATED_hint
+__GLFW_DECORATED__ specifies whether the windowed mode window will have window
+decorations such as a border, a close widget, etc. An undecorated window will
+not be resizable by the user but will still allow the user to generate close
+events on some platforms. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+This hint is ignored for full screen windows.
+
+@anchor GLFW_FOCUSED_hint
+__GLFW_FOCUSED__ specifies whether the windowed mode window will be given input
+focus when created. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This
+hint is ignored for full screen and initially hidden windows.
+
+@anchor GLFW_AUTO_ICONIFY_hint
+__GLFW_AUTO_ICONIFY__ specifies whether the full screen window will
+automatically iconify and restore the previous video mode on input focus loss.
+Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This hint is ignored for
+windowed mode windows.
+
+@anchor GLFW_FLOATING_hint
+__GLFW_FLOATING__ specifies whether the windowed mode window will be floating
+above other regular windows, also called topmost or always-on-top. This is
+intended primarily for debugging purposes and cannot be used to implement proper
+full screen windows. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This
+hint is ignored for full screen windows.
+
+@anchor GLFW_MAXIMIZED_hint
+__GLFW_MAXIMIZED__ specifies whether the windowed mode window will be maximized
+when created. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This hint is
+ignored for full screen windows.
+
+@anchor GLFW_CENTER_CURSOR_hint
+__GLFW_CENTER_CURSOR__ specifies whether the cursor should be centered over
+newly created full screen windows. Possible values are `GLFW_TRUE` and
+`GLFW_FALSE`. This hint is ignored for windowed mode windows.
+
+@anchor GLFW_TRANSPARENT_FRAMEBUFFER_hint
+__GLFW_TRANSPARENT_FRAMEBUFFER__ specifies whether the window framebuffer will
+be transparent. If enabled and supported by the system, the window framebuffer
+alpha channel will be used to combine the framebuffer with the background. This
+does not affect window decorations. Possible values are `GLFW_TRUE` and
+`GLFW_FALSE`.
+
+@anchor GLFW_FOCUS_ON_SHOW_hint
+__GLFW_FOCUS_ON_SHOW__ specifies whether the window will be given input
+focus when @ref glfwShowWindow is called. Possible values are `GLFW_TRUE` and
+`GLFW_FALSE`.
+
+@anchor GLFW_SCALE_TO_MONITOR
+__GLFW_SCALE_TO_MONITOR__ specified whether the window content area should be
+resized based on the [monitor content scale](@ref monitor_scale) of any monitor
+it is placed on. This includes the initial placement when the window is
+created. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+
+This hint only has an effect on platforms where screen coordinates and pixels
+always map 1:1 such as Windows and X11. On platforms like macOS the resolution
+of the framebuffer is changed independently of the window size.
+
+@anchor GLFW_MOUSE_PASSTHROUGH_hint
+__GLFW_MOUSE_PASSTHROUGH__ specifies whether the window is transparent to mouse
+input, letting any mouse events pass through to whatever window is behind it.
+This is only supported for undecorated windows. Decorated windows with this
+enabled will behave differently between platforms. Possible values are
+`GLFW_TRUE` and `GLFW_FALSE`.
+
+
+@subsubsection window_hints_fb Framebuffer related hints
+
+@anchor GLFW_RED_BITS
+@anchor GLFW_GREEN_BITS
+@anchor GLFW_BLUE_BITS
+@anchor GLFW_ALPHA_BITS
+@anchor GLFW_DEPTH_BITS
+@anchor GLFW_STENCIL_BITS
+__GLFW_RED_BITS__, __GLFW_GREEN_BITS__, __GLFW_BLUE_BITS__, __GLFW_ALPHA_BITS__,
+__GLFW_DEPTH_BITS__ and __GLFW_STENCIL_BITS__ specify the desired bit depths of
+the various components of the default framebuffer. A value of `GLFW_DONT_CARE`
+means the application has no preference.
+
+@anchor GLFW_ACCUM_RED_BITS
+@anchor GLFW_ACCUM_GREEN_BITS
+@anchor GLFW_ACCUM_BLUE_BITS
+@anchor GLFW_ACCUM_ALPHA_BITS
+__GLFW_ACCUM_RED_BITS__, __GLFW_ACCUM_GREEN_BITS__, __GLFW_ACCUM_BLUE_BITS__ and
+__GLFW_ACCUM_ALPHA_BITS__ specify the desired bit depths of the various
+components of the accumulation buffer. A value of `GLFW_DONT_CARE` means the
+application has no preference.
+
+Accumulation buffers are a legacy OpenGL feature and should not be used in new
+code.
+
+@anchor GLFW_AUX_BUFFERS
+__GLFW_AUX_BUFFERS__ specifies the desired number of auxiliary buffers. A value
+of `GLFW_DONT_CARE` means the application has no preference.
+
+Auxiliary buffers are a legacy OpenGL feature and should not be used in new
+code.
+
+@anchor GLFW_STEREO
+__GLFW_STEREO__ specifies whether to use OpenGL stereoscopic rendering.
+Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This is a hard constraint.
+
+@anchor GLFW_SAMPLES
+__GLFW_SAMPLES__ specifies the desired number of samples to use for
+multisampling. Zero disables multisampling. A value of `GLFW_DONT_CARE` means
+the application has no preference.
+
+@anchor GLFW_SRGB_CAPABLE
+__GLFW_SRGB_CAPABLE__ specifies whether the framebuffer should be sRGB capable.
+Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+
+@note __OpenGL:__ If enabled and supported by the system, the
+`GL_FRAMEBUFFER_SRGB` enable will control sRGB rendering. By default, sRGB
+rendering will be disabled.
+
+@note __OpenGL ES:__ If enabled and supported by the system, the context will
+always have sRGB rendering enabled.
+
+@anchor GLFW_DOUBLEBUFFER
+@anchor GLFW_DOUBLEBUFFER_hint
+__GLFW_DOUBLEBUFFER__ specifies whether the framebuffer should be double
+buffered. You nearly always want to use double buffering. This is a hard
+constraint. Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+
+
+@subsubsection window_hints_mtr Monitor related hints
+
+@anchor GLFW_REFRESH_RATE
+__GLFW_REFRESH_RATE__ specifies the desired refresh rate for full screen
+windows. A value of `GLFW_DONT_CARE` means the highest available refresh rate
+will be used. This hint is ignored for windowed mode windows.
+
+
+@subsubsection window_hints_ctx Context related hints
+
+@anchor GLFW_CLIENT_API_hint
+__GLFW_CLIENT_API__ specifies which client API to create the context for.
+Possible values are `GLFW_OPENGL_API`, `GLFW_OPENGL_ES_API` and `GLFW_NO_API`.
+This is a hard constraint.
+
+@anchor GLFW_CONTEXT_CREATION_API_hint
+__GLFW_CONTEXT_CREATION_API__ specifies which context creation API to use to
+create the context. Possible values are `GLFW_NATIVE_CONTEXT_API`,
+`GLFW_EGL_CONTEXT_API` and `GLFW_OSMESA_CONTEXT_API`. This is a hard
+constraint. If no client API is requested, this hint is ignored.
+
+An [extension loader library](@ref context_glext_auto) that assumes it knows
+which API was used to create the current context may fail if you change this
+hint. This can be resolved by having it load functions via @ref
+glfwGetProcAddress.
+
+@note @wayland The EGL API _is_ the native context creation API, so this hint
+will have no effect.
+
+@note @x11 On some Linux systems, creating contexts via both the native and EGL
+APIs in a single process will cause the application to segfault. Stick to one
+API or the other on Linux for now.
+
+@note __OSMesa:__ As its name implies, an OpenGL context created with OSMesa
+does not update the window contents when its buffers are swapped. Use OpenGL
+functions or the OSMesa native access functions @ref glfwGetOSMesaColorBuffer
+and @ref glfwGetOSMesaDepthBuffer to retrieve the framebuffer contents.
+
+@anchor GLFW_CONTEXT_VERSION_MAJOR_hint
+@anchor GLFW_CONTEXT_VERSION_MINOR_hint
+__GLFW_CONTEXT_VERSION_MAJOR__ and __GLFW_CONTEXT_VERSION_MINOR__ specify the
+client API version that the created context must be compatible with. The exact
+behavior of these hints depend on the requested client API.
+
+While there is no way to ask the driver for a context of the highest supported
+version, GLFW will attempt to provide this when you ask for a version 1.0
+context, which is the default for these hints.
+
+Do not confuse these hints with @ref GLFW_VERSION_MAJOR and @ref
+GLFW_VERSION_MINOR, which provide the API version of the GLFW header.
+
+@note __OpenGL:__ These hints are not hard constraints, but creation will fail
+if the OpenGL version of the created context is less than the one requested. It
+is therefore perfectly safe to use the default of version 1.0 for legacy code
+and you will still get backwards-compatible contexts of version 3.0 and above
+when available.
+
+@note __OpenGL ES:__ These hints are not hard constraints, but creation will
+fail if the OpenGL ES version of the created context is less than the one
+requested. Additionally, OpenGL ES 1.x cannot be returned if 2.0 or later was
+requested, and vice versa. This is because OpenGL ES 3.x is backward compatible
+with 2.0, but OpenGL ES 2.0 is not backward compatible with 1.x.
+
+@note @macos The OS only supports core profile contexts for OpenGL versions 3.2
+and later. Before creating an OpenGL context of version 3.2 or later you must
+set the [GLFW_OPENGL_PROFILE](@ref GLFW_OPENGL_PROFILE_hint) hint accordingly.
+OpenGL 3.0 and 3.1 contexts are not supported at all on macOS.
+
+@anchor GLFW_OPENGL_FORWARD_COMPAT_hint
+__GLFW_OPENGL_FORWARD_COMPAT__ specifies whether the OpenGL context should be
+forward-compatible, i.e. one where all functionality deprecated in the requested
+version of OpenGL is removed. This must only be used if the requested OpenGL
+version is 3.0 or above. If OpenGL ES is requested, this hint is ignored.
+
+Forward-compatibility is described in detail in the
+[OpenGL Reference Manual](https://www.opengl.org/registry/).
+
+@anchor GLFW_CONTEXT_DEBUG_hint
+@anchor GLFW_OPENGL_DEBUG_CONTEXT_hint
+__GLFW_CONTEXT_DEBUG__ specifies whether the context should be created in debug
+mode, which may provide additional error and diagnostic reporting functionality.
+Possible values are `GLFW_TRUE` and `GLFW_FALSE`.
+
+Debug contexts for OpenGL and OpenGL ES are described in detail by the
+[GL_KHR_debug](https://www.khronos.org/registry/OpenGL/extensions/KHR/KHR_debug.txt)
+extension.
+
+@note `GLFW_CONTEXT_DEBUG` is the new name introduced in GLFW 3.4. The older
+`GLFW_OPENGL_DEBUG_CONTEXT` name is also available for compatibility.
+
+@anchor GLFW_OPENGL_PROFILE_hint
+__GLFW_OPENGL_PROFILE__ specifies which OpenGL profile to create the context
+for. Possible values are one of `GLFW_OPENGL_CORE_PROFILE` or
+`GLFW_OPENGL_COMPAT_PROFILE`, or `GLFW_OPENGL_ANY_PROFILE` to not request
+a specific profile. If requesting an OpenGL version below 3.2,
+`GLFW_OPENGL_ANY_PROFILE` must be used. If OpenGL ES is requested, this hint
+is ignored.
+
+OpenGL profiles are described in detail in the
+[OpenGL Reference Manual](https://www.opengl.org/registry/).
+
+@anchor GLFW_CONTEXT_ROBUSTNESS_hint
+__GLFW_CONTEXT_ROBUSTNESS__ specifies the robustness strategy to be used by the
+context. This can be one of `GLFW_NO_RESET_NOTIFICATION` or
+`GLFW_LOSE_CONTEXT_ON_RESET`, or `GLFW_NO_ROBUSTNESS` to not request
+a robustness strategy.
+
+@anchor GLFW_CONTEXT_RELEASE_BEHAVIOR_hint
+__GLFW_CONTEXT_RELEASE_BEHAVIOR__ specifies the release behavior to be
+used by the context. Possible values are one of `GLFW_ANY_RELEASE_BEHAVIOR`,
+`GLFW_RELEASE_BEHAVIOR_FLUSH` or `GLFW_RELEASE_BEHAVIOR_NONE`. If the
+behavior is `GLFW_ANY_RELEASE_BEHAVIOR`, the default behavior of the context
+creation API will be used. If the behavior is `GLFW_RELEASE_BEHAVIOR_FLUSH`,
+the pipeline will be flushed whenever the context is released from being the
+current one. If the behavior is `GLFW_RELEASE_BEHAVIOR_NONE`, the pipeline will
+not be flushed on release.
+
+Context release behaviors are described in detail by the
+[GL_KHR_context_flush_control](https://www.opengl.org/registry/specs/KHR/context_flush_control.txt)
+extension.
+
+@anchor GLFW_CONTEXT_NO_ERROR_hint
+__GLFW_CONTEXT_NO_ERROR__ specifies whether errors should be generated by the
+context. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. If enabled,
+situations that would have generated errors instead cause undefined behavior.
+
+The no error mode for OpenGL and OpenGL ES is described in detail by the
+[GL_KHR_no_error](https://www.opengl.org/registry/specs/KHR/no_error.txt)
+extension.
+
+
+@subsubsection window_hints_win32 Win32 specific hints
+
+@anchor GLFW_WIN32_KEYBOARD_MENU_hint
+__GLFW_WIN32_KEYBOARD_MENU__ specifies whether to allow access to the window
+menu via the Alt+Space and Alt-and-then-Space keyboard shortcuts. This is
+ignored on other platforms.
+
+
+@subsubsection window_hints_osx macOS specific hints
+
+@anchor GLFW_COCOA_RETINA_FRAMEBUFFER_hint
+__GLFW_COCOA_RETINA_FRAMEBUFFER__ specifies whether to use full resolution
+framebuffers on Retina displays. Possible values are `GLFW_TRUE` and
+`GLFW_FALSE`. This is ignored on other platforms.
+
+@anchor GLFW_COCOA_FRAME_NAME_hint
+__GLFW_COCOA_FRAME_NAME__ specifies the UTF-8 encoded name to use for autosaving
+the window frame, or if empty disables frame autosaving for the window. This is
+ignored on other platforms. This is set with @ref glfwWindowHintString.
+
+@anchor GLFW_COCOA_GRAPHICS_SWITCHING_hint
+__GLFW_COCOA_GRAPHICS_SWITCHING__ specifies whether to in Automatic Graphics
+Switching, i.e. to allow the system to choose the integrated GPU for the OpenGL
+context and move it between GPUs if necessary or whether to force it to always
+run on the discrete GPU. This only affects systems with both integrated and
+discrete GPUs. Possible values are `GLFW_TRUE` and `GLFW_FALSE`. This is
+ignored on other platforms.
+
+Simpler programs and tools may want to enable this to save power, while games
+and other applications performing advanced rendering will want to leave it
+disabled.
+
+A bundled application that wishes to participate in Automatic Graphics Switching
+should also declare this in its `Info.plist` by setting the
+`NSSupportsAutomaticGraphicsSwitching` key to `true`.
+
+
+@subsubsection window_hints_x11 X11 specific window hints
+
+@anchor GLFW_X11_CLASS_NAME_hint
+@anchor GLFW_X11_INSTANCE_NAME_hint
+__GLFW_X11_CLASS_NAME__ and __GLFW_X11_INSTANCE_NAME__ specifies the desired
+ASCII encoded class and instance parts of the ICCCM `WM_CLASS` window property.
+These are set with @ref glfwWindowHintString.
+
+
+@subsubsection window_hints_values Supported and default values
+
+Window hint | Default value | Supported values
+----------------------------- | --------------------------- | ----------------
+GLFW_RESIZABLE | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_VISIBLE | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_DECORATED | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_FOCUSED | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_AUTO_ICONIFY | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_FLOATING | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_MAXIMIZED | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_CENTER_CURSOR | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_TRANSPARENT_FRAMEBUFFER | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_FOCUS_ON_SHOW | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_SCALE_TO_MONITOR | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_MOUSE_PASSTHROUGH | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_RED_BITS | 8 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_GREEN_BITS | 8 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_BLUE_BITS | 8 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_ALPHA_BITS | 8 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_DEPTH_BITS | 24 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_STENCIL_BITS | 8 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_ACCUM_RED_BITS | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_ACCUM_GREEN_BITS | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_ACCUM_BLUE_BITS | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_ACCUM_ALPHA_BITS | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_AUX_BUFFERS | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_SAMPLES | 0 | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_REFRESH_RATE | `GLFW_DONT_CARE` | 0 to `INT_MAX` or `GLFW_DONT_CARE`
+GLFW_STEREO | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_SRGB_CAPABLE | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_DOUBLEBUFFER | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_CLIENT_API | `GLFW_OPENGL_API` | `GLFW_OPENGL_API`, `GLFW_OPENGL_ES_API` or `GLFW_NO_API`
+GLFW_CONTEXT_CREATION_API | `GLFW_NATIVE_CONTEXT_API` | `GLFW_NATIVE_CONTEXT_API`, `GLFW_EGL_CONTEXT_API` or `GLFW_OSMESA_CONTEXT_API`
+GLFW_CONTEXT_VERSION_MAJOR | 1 | Any valid major version number of the chosen client API
+GLFW_CONTEXT_VERSION_MINOR | 0 | Any valid minor version number of the chosen client API
+GLFW_CONTEXT_ROBUSTNESS | `GLFW_NO_ROBUSTNESS` | `GLFW_NO_ROBUSTNESS`, `GLFW_NO_RESET_NOTIFICATION` or `GLFW_LOSE_CONTEXT_ON_RESET`
+GLFW_CONTEXT_RELEASE_BEHAVIOR | `GLFW_ANY_RELEASE_BEHAVIOR` | `GLFW_ANY_RELEASE_BEHAVIOR`, `GLFW_RELEASE_BEHAVIOR_FLUSH` or `GLFW_RELEASE_BEHAVIOR_NONE`
+GLFW_OPENGL_FORWARD_COMPAT | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_CONTEXT_DEBUG | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_OPENGL_PROFILE | `GLFW_OPENGL_ANY_PROFILE` | `GLFW_OPENGL_ANY_PROFILE`, `GLFW_OPENGL_COMPAT_PROFILE` or `GLFW_OPENGL_CORE_PROFILE`
+GLFW_WIN32_KEYBOARD_MENU | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_COCOA_RETINA_FRAMEBUFFER | `GLFW_TRUE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_COCOA_FRAME_NAME | `""` | A UTF-8 encoded frame autosave name
+GLFW_COCOA_GRAPHICS_SWITCHING | `GLFW_FALSE` | `GLFW_TRUE` or `GLFW_FALSE`
+GLFW_X11_CLASS_NAME | `""` | An ASCII encoded `WM_CLASS` class name
+GLFW_X11_INSTANCE_NAME | `""` | An ASCII encoded `WM_CLASS` instance name
+
+
+@section window_events Window event processing
+
+See @ref events.
+
+
+@section window_properties Window properties and events
+
+@subsection window_userptr User pointer
+
+Each window has a user pointer that can be set with @ref
+glfwSetWindowUserPointer and queried with @ref glfwGetWindowUserPointer. This
+can be used for any purpose you need and will not be modified by GLFW throughout
+the life-time of the window.
+
+The initial value of the pointer is `NULL`.
+
+
+@subsection window_close Window closing and close flag
+
+When the user attempts to close the window, for example by clicking the close
+widget or using a key chord like Alt+F4, the _close flag_ of the window is set.
+The window is however not actually destroyed and, unless you watch for this
+state change, nothing further happens.
+
+The current state of the close flag is returned by @ref glfwWindowShouldClose
+and can be set or cleared directly with @ref glfwSetWindowShouldClose. A common
+pattern is to use the close flag as a main loop condition.
+
+@code
+while (!glfwWindowShouldClose(window))
+{
+ render(window);
+
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+}
+@endcode
+
+If you wish to be notified when the user attempts to close a window, set a close
+callback.
+
+@code
+glfwSetWindowCloseCallback(window, window_close_callback);
+@endcode
+
+The callback function is called directly _after_ the close flag has been set.
+It can be used for example to filter close requests and clear the close flag
+again unless certain conditions are met.
+
+@code
+void window_close_callback(GLFWwindow* window)
+{
+ if (!time_to_close)
+ glfwSetWindowShouldClose(window, GLFW_FALSE);
+}
+@endcode
+
+
+@subsection window_size Window size
+
+The size of a window can be changed with @ref glfwSetWindowSize. For windowed
+mode windows, this sets the size, in
+[screen coordinates](@ref coordinate_systems) of the _content area_ or _content
+area_ of the window. The window system may impose limits on window size.
+
+@code
+glfwSetWindowSize(window, 640, 480);
+@endcode
+
+For full screen windows, the specified size becomes the new resolution of the
+window's desired video mode. The video mode most closely matching the new
+desired video mode is set immediately. The window is resized to fit the
+resolution of the set video mode.
+
+If you wish to be notified when a window is resized, whether by the user, the
+system or your own code, set a size callback.
+
+@code
+glfwSetWindowSizeCallback(window, window_size_callback);
+@endcode
+
+The callback function receives the new size, in screen coordinates, of the
+content area of the window when the window is resized.
+
+@code
+void window_size_callback(GLFWwindow* window, int width, int height)
+{
+}
+@endcode
+
+There is also @ref glfwGetWindowSize for directly retrieving the current size of
+a window.
+
+@code
+int width, height;
+glfwGetWindowSize(window, &width, &height);
+@endcode
+
+@note Do not pass the window size to `glViewport` or other pixel-based OpenGL
+calls. The window size is in screen coordinates, not pixels. Use the
+[framebuffer size](@ref window_fbsize), which is in pixels, for pixel-based
+calls.
+
+The above functions work with the size of the content area, but decorated
+windows typically have title bars and window frames around this rectangle. You
+can retrieve the extents of these with @ref glfwGetWindowFrameSize.
+
+@code
+int left, top, right, bottom;
+glfwGetWindowFrameSize(window, &left, &top, &right, &bottom);
+@endcode
+
+The returned values are the distances, in screen coordinates, from the edges of
+the content area to the corresponding edges of the full window. As they are
+distances and not coordinates, they are always zero or positive.
+
+
+@subsection window_fbsize Framebuffer size
+
+While the size of a window is measured in screen coordinates, OpenGL works with
+pixels. The size you pass into `glViewport`, for example, should be in pixels.
+On some machines screen coordinates and pixels are the same, but on others they
+will not be. There is a second set of functions to retrieve the size, in
+pixels, of the framebuffer of a window.
+
+If you wish to be notified when the framebuffer of a window is resized, whether
+by the user or the system, set a size callback.
+
+@code
+glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
+@endcode
+
+The callback function receives the new size of the framebuffer when it is
+resized, which can for example be used to update the OpenGL viewport.
+
+@code
+void framebuffer_size_callback(GLFWwindow* window, int width, int height)
+{
+ glViewport(0, 0, width, height);
+}
+@endcode
+
+There is also @ref glfwGetFramebufferSize for directly retrieving the current
+size of the framebuffer of a window.
+
+@code
+int width, height;
+glfwGetFramebufferSize(window, &width, &height);
+glViewport(0, 0, width, height);
+@endcode
+
+The size of a framebuffer may change independently of the size of a window, for
+example if the window is dragged between a regular monitor and a high-DPI one.
+
+
+@subsection window_scale Window content scale
+
+The content scale for a window can be retrieved with @ref
+glfwGetWindowContentScale.
+
+@code
+float xscale, yscale;
+glfwGetWindowContentScale(window, &xscale, &yscale);
+@endcode
+
+The content scale is the ratio between the current DPI and the platform's
+default DPI. This is especially important for text and any UI elements. If the
+pixel dimensions of your UI scaled by this look appropriate on your machine then
+it should appear at a reasonable size on other machines regardless of their DPI
+and scaling settings. This relies on the system DPI and scaling settings being
+somewhat correct.
+
+On systems where each monitors can have its own content scale, the window
+content scale will depend on which monitor the system considers the window to be
+on.
+
+If you wish to be notified when the content scale of a window changes, whether
+because of a system setting change or because it was moved to a monitor with
+a different scale, set a content scale callback.
+
+@code
+glfwSetWindowContentScaleCallback(window, window_content_scale_callback);
+@endcode
+
+The callback function receives the new content scale of the window.
+
+@code
+void window_content_scale_callback(GLFWwindow* window, float xscale, float yscale)
+{
+ set_interface_scale(xscale, yscale);
+}
+@endcode
+
+On platforms where pixels and screen coordinates always map 1:1, the window
+will need to be resized to appear the same size when it is moved to a monitor
+with a different content scale. To have this done automatically both when the
+window is created and when its content scale later changes, set the @ref
+GLFW_SCALE_TO_MONITOR window hint.
+
+
+@subsection window_sizelimits Window size limits
+
+The minimum and maximum size of the content area of a windowed mode window can
+be enforced with @ref glfwSetWindowSizeLimits. The user may resize the window
+to any size and aspect ratio within the specified limits, unless the aspect
+ratio is also set.
+
+@code
+glfwSetWindowSizeLimits(window, 200, 200, 400, 400);
+@endcode
+
+To specify only a minimum size or only a maximum one, set the other pair to
+`GLFW_DONT_CARE`.
+
+@code
+glfwSetWindowSizeLimits(window, 640, 480, GLFW_DONT_CARE, GLFW_DONT_CARE);
+@endcode
+
+To disable size limits for a window, set them all to `GLFW_DONT_CARE`.
+
+The aspect ratio of the content area of a windowed mode window can be enforced
+with @ref glfwSetWindowAspectRatio. The user may resize the window freely
+unless size limits are also set, but the size will be constrained to maintain
+the aspect ratio.
+
+@code
+glfwSetWindowAspectRatio(window, 16, 9);
+@endcode
+
+The aspect ratio is specified as a numerator and denominator, corresponding to
+the width and height, respectively. If you want a window to maintain its
+current aspect ratio, use its current size as the ratio.
+
+@code
+int width, height;
+glfwGetWindowSize(window, &width, &height);
+glfwSetWindowAspectRatio(window, width, height);
+@endcode
+
+To disable the aspect ratio limit for a window, set both terms to
+`GLFW_DONT_CARE`.
+
+You can have both size limits and aspect ratio set for a window, but the results
+are undefined if they conflict.
+
+
+@subsection window_pos Window position
+
+The position of a windowed-mode window can be changed with @ref
+glfwSetWindowPos. This moves the window so that the upper-left corner of its
+content area has the specified [screen coordinates](@ref coordinate_systems).
+The window system may put limitations on window placement.
+
+@code
+glfwSetWindowPos(window, 100, 100);
+@endcode
+
+If you wish to be notified when a window is moved, whether by the user, the
+system or your own code, set a position callback.
+
+@code
+glfwSetWindowPosCallback(window, window_pos_callback);
+@endcode
+
+The callback function receives the new position, in screen coordinates, of the
+upper-left corner of the content area when the window is moved.
+
+@code
+void window_pos_callback(GLFWwindow* window, int xpos, int ypos)
+{
+}
+@endcode
+
+There is also @ref glfwGetWindowPos for directly retrieving the current position
+of the content area of the window.
+
+@code
+int xpos, ypos;
+glfwGetWindowPos(window, &xpos, &ypos);
+@endcode
+
+
+@subsection window_title Window title
+
+All GLFW windows have a title, although undecorated or full screen windows may
+not display it or only display it in a task bar or similar interface. You can
+set a UTF-8 encoded window title with @ref glfwSetWindowTitle.
+
+@code
+glfwSetWindowTitle(window, "My Window");
+@endcode
+
+The specified string is copied before the function returns, so there is no need
+to keep it around.
+
+As long as your source file is encoded as UTF-8, you can use any Unicode
+characters directly in the source.
+
+@code
+glfwSetWindowTitle(window, "ラストエグザイル");
+@endcode
+
+If you are using C++11 or C11, you can use a UTF-8 string literal.
+
+@code
+glfwSetWindowTitle(window, u8"This is always a UTF-8 string");
+@endcode
+
+
+@subsection window_icon Window icon
+
+Decorated windows have icons on some platforms. You can set this icon by
+specifying a list of candidate images with @ref glfwSetWindowIcon.
+
+@code
+GLFWimage images[2];
+images[0] = load_icon("my_icon.png");
+images[1] = load_icon("my_icon_small.png");
+
+glfwSetWindowIcon(window, 2, images);
+@endcode
+
+The image data is 32-bit, little-endian, non-premultiplied RGBA, i.e. eight bits
+per channel with the red channel first. The pixels are arranged canonically as
+sequential rows, starting from the top-left corner.
+
+To revert to the default window icon, pass in an empty image array.
+
+@code
+glfwSetWindowIcon(window, 0, NULL);
+@endcode
+
+
+@subsection window_monitor Window monitor
+
+Full screen windows are associated with a specific monitor. You can get the
+handle for this monitor with @ref glfwGetWindowMonitor.
+
+@code
+GLFWmonitor* monitor = glfwGetWindowMonitor(window);
+@endcode
+
+This monitor handle is one of those returned by @ref glfwGetMonitors.
+
+For windowed mode windows, this function returns `NULL`. This is how to tell
+full screen windows from windowed mode windows.
+
+You can move windows between monitors or between full screen and windowed mode
+with @ref glfwSetWindowMonitor. When making a window full screen on the same or
+on a different monitor, specify the desired monitor, resolution and refresh
+rate. The position arguments are ignored.
+
+@code
+const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+
+glfwSetWindowMonitor(window, monitor, 0, 0, mode->width, mode->height, mode->refreshRate);
+@endcode
+
+When making the window windowed, specify the desired position and size. The
+refresh rate argument is ignored.
+
+@code
+glfwSetWindowMonitor(window, NULL, xpos, ypos, width, height, 0);
+@endcode
+
+This restores any previous window settings such as whether it is decorated,
+floating, resizable, has size or aspect ratio limits, etc.. To restore a window
+that was originally windowed to its original size and position, save these
+before making it full screen and then pass them in as above.
+
+
+@subsection window_iconify Window iconification
+
+Windows can be iconified (i.e. minimized) with @ref glfwIconifyWindow.
+
+@code
+glfwIconifyWindow(window);
+@endcode
+
+When a full screen window is iconified, the original video mode of its monitor
+is restored until the user or application restores the window.
+
+Iconified windows can be restored with @ref glfwRestoreWindow. This function
+also restores windows from maximization.
+
+@code
+glfwRestoreWindow(window);
+@endcode
+
+When a full screen window is restored, the desired video mode is restored to its
+monitor as well.
+
+If you wish to be notified when a window is iconified or restored, whether by
+the user, system or your own code, set an iconify callback.
+
+@code
+glfwSetWindowIconifyCallback(window, window_iconify_callback);
+@endcode
+
+The callback function receives changes in the iconification state of the window.
+
+@code
+void window_iconify_callback(GLFWwindow* window, int iconified)
+{
+ if (iconified)
+ {
+ // The window was iconified
+ }
+ else
+ {
+ // The window was restored
+ }
+}
+@endcode
+
+You can also get the current iconification state with @ref glfwGetWindowAttrib.
+
+@code
+int iconified = glfwGetWindowAttrib(window, GLFW_ICONIFIED);
+@endcode
+
+
+@subsection window_maximize Window maximization
+
+Windows can be maximized (i.e. zoomed) with @ref glfwMaximizeWindow.
+
+@code
+glfwMaximizeWindow(window);
+@endcode
+
+Full screen windows cannot be maximized and passing a full screen window to this
+function does nothing.
+
+Maximized windows can be restored with @ref glfwRestoreWindow. This function
+also restores windows from iconification.
+
+@code
+glfwRestoreWindow(window);
+@endcode
+
+If you wish to be notified when a window is maximized or restored, whether by
+the user, system or your own code, set a maximize callback.
+
+@code
+glfwSetWindowMaximizeCallback(window, window_maximize_callback);
+@endcode
+
+The callback function receives changes in the maximization state of the window.
+
+@code
+void window_maximize_callback(GLFWwindow* window, int maximized)
+{
+ if (maximized)
+ {
+ // The window was maximized
+ }
+ else
+ {
+ // The window was restored
+ }
+}
+@endcode
+
+You can also get the current maximization state with @ref glfwGetWindowAttrib.
+
+@code
+int maximized = glfwGetWindowAttrib(window, GLFW_MAXIMIZED);
+@endcode
+
+By default, newly created windows are not maximized. You can change this
+behavior by setting the [GLFW_MAXIMIZED](@ref GLFW_MAXIMIZED_hint) window hint
+before creating the window.
+
+@code
+glfwWindowHint(GLFW_MAXIMIZED, GLFW_TRUE);
+@endcode
+
+
+@subsection window_hide Window visibility
+
+Windowed mode windows can be hidden with @ref glfwHideWindow.
+
+@code
+glfwHideWindow(window);
+@endcode
+
+This makes the window completely invisible to the user, including removing it
+from the task bar, dock or window list. Full screen windows cannot be hidden
+and calling @ref glfwHideWindow on a full screen window does nothing.
+
+Hidden windows can be shown with @ref glfwShowWindow.
+
+@code
+glfwShowWindow(window);
+@endcode
+
+By default, this function will also set the input focus to that window. Set
+the [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) window hint to change
+this behavior for all newly created windows, or change the behavior for an
+existing window with @ref glfwSetWindowAttrib.
+
+You can also get the current visibility state with @ref glfwGetWindowAttrib.
+
+@code
+int visible = glfwGetWindowAttrib(window, GLFW_VISIBLE);
+@endcode
+
+By default, newly created windows are visible. You can change this behavior by
+setting the [GLFW_VISIBLE](@ref GLFW_VISIBLE_hint) window hint before creating
+the window.
+
+@code
+glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+@endcode
+
+Windows created hidden are completely invisible to the user until shown. This
+can be useful if you need to set up your window further before showing it, for
+example moving it to a specific location.
+
+
+@subsection window_focus Window input focus
+
+Windows can be given input focus and brought to the front with @ref
+glfwFocusWindow.
+
+@code
+glfwFocusWindow(window);
+@endcode
+
+Keep in mind that it can be very disruptive to the user when a window is forced
+to the top. For a less disruptive way of getting the user's attention, see
+[attention requests](@ref window_attention).
+
+If you wish to be notified when a window gains or loses input focus, whether by
+the user, system or your own code, set a focus callback.
+
+@code
+glfwSetWindowFocusCallback(window, window_focus_callback);
+@endcode
+
+The callback function receives changes in the input focus state of the window.
+
+@code
+void window_focus_callback(GLFWwindow* window, int focused)
+{
+ if (focused)
+ {
+ // The window gained input focus
+ }
+ else
+ {
+ // The window lost input focus
+ }
+}
+@endcode
+
+You can also get the current input focus state with @ref glfwGetWindowAttrib.
+
+@code
+int focused = glfwGetWindowAttrib(window, GLFW_FOCUSED);
+@endcode
+
+By default, newly created windows are given input focus. You can change this
+behavior by setting the [GLFW_FOCUSED](@ref GLFW_FOCUSED_hint) window hint
+before creating the window.
+
+@code
+glfwWindowHint(GLFW_FOCUSED, GLFW_FALSE);
+@endcode
+
+
+@subsection window_attention Window attention request
+
+If you wish to notify the user of an event without interrupting, you can request
+attention with @ref glfwRequestWindowAttention.
+
+@code
+glfwRequestWindowAttention(window);
+@endcode
+
+The system will highlight the specified window, or on platforms where this is
+not supported, the application as a whole. Once the user has given it
+attention, the system will automatically end the request.
+
+
+@subsection window_refresh Window damage and refresh
+
+If you wish to be notified when the contents of a window is damaged and needs
+to be refreshed, set a window refresh callback.
+
+@code
+glfwSetWindowRefreshCallback(m_handle, window_refresh_callback);
+@endcode
+
+The callback function is called when the contents of the window needs to be
+refreshed.
+
+@code
+void window_refresh_callback(GLFWwindow* window)
+{
+ draw_editor_ui(window);
+ glfwSwapBuffers(window);
+}
+@endcode
+
+@note On compositing window systems such as Aero, Compiz or Aqua, where the
+window contents are saved off-screen, this callback might only be called when
+the window or framebuffer is resized.
+
+
+@subsection window_transparency Window transparency
+
+GLFW supports two kinds of transparency for windows; framebuffer transparency
+and whole window transparency. A single window may not use both methods. The
+results of doing this are undefined.
+
+Both methods require the platform to support it and not every version of every
+platform GLFW supports does this, so there are mechanisms to check whether the
+window really is transparent.
+
+Window framebuffers can be made transparent on a per-pixel per-frame basis with
+the [GLFW_TRANSPARENT_FRAMEBUFFER](@ref GLFW_TRANSPARENT_FRAMEBUFFER_hint)
+window hint.
+
+@code
+glfwWindowHint(GLFW_TRANSPARENT_FRAMEBUFFER, GLFW_TRUE);
+@endcode
+
+If supported by the system, the window content area will be composited with the
+background using the framebuffer per-pixel alpha channel. This requires desktop
+compositing to be enabled on the system. It does not affect window decorations.
+
+You can check whether the window framebuffer was successfully made transparent
+with the
+[GLFW_TRANSPARENT_FRAMEBUFFER](@ref GLFW_TRANSPARENT_FRAMEBUFFER_attrib)
+window attribute.
+
+@code
+if (glfwGetWindowAttrib(window, GLFW_TRANSPARENT_FRAMEBUFFER))
+{
+ // window framebuffer is currently transparent
+}
+@endcode
+
+GLFW comes with an example that enabled framebuffer transparency called `gears`.
+
+The opacity of the whole window, including any decorations, can be set with @ref
+glfwSetWindowOpacity.
+
+@code
+glfwSetWindowOpacity(window, 0.5f);
+@endcode
+
+The opacity (or alpha) value is a positive finite number between zero and one,
+where 0 (zero) is fully transparent and 1 (one) is fully opaque. The initial
+opacity value for newly created windows is 1.
+
+The current opacity of a window can be queried with @ref glfwGetWindowOpacity.
+
+@code
+float opacity = glfwGetWindowOpacity(window);
+@endcode
+
+If the system does not support whole window transparency, this function always
+returns one.
+
+GLFW comes with a test program that lets you control whole window transparency
+at run-time called `window`.
+
+If you want to use either of these transparency methods to display a temporary
+overlay like for example a notification, the @ref GLFW_FLOATING and @ref
+GLFW_MOUSE_PASSTHROUGH window hints and attributes may be useful.
+
+
+@subsection window_attribs Window attributes
+
+Windows have a number of attributes that can be returned using @ref
+glfwGetWindowAttrib. Some reflect state that may change as a result of user
+interaction, (e.g. whether it has input focus), while others reflect inherent
+properties of the window (e.g. what kind of border it has). Some are related to
+the window and others to its OpenGL or OpenGL ES context.
+
+@code
+if (glfwGetWindowAttrib(window, GLFW_FOCUSED))
+{
+ // window has input focus
+}
+@endcode
+
+The [GLFW_DECORATED](@ref GLFW_DECORATED_attrib),
+[GLFW_RESIZABLE](@ref GLFW_RESIZABLE_attrib),
+[GLFW_FLOATING](@ref GLFW_FLOATING_attrib),
+[GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_attrib) and
+[GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_attrib) window attributes can be
+changed with @ref glfwSetWindowAttrib.
+
+@code
+glfwSetWindowAttrib(window, GLFW_RESIZABLE, GLFW_FALSE);
+@endcode
+
+
+
+@subsubsection window_attribs_wnd Window related attributes
+
+@anchor GLFW_FOCUSED_attrib
+__GLFW_FOCUSED__ indicates whether the specified window has input focus. See
+@ref window_focus for details.
+
+@anchor GLFW_ICONIFIED_attrib
+__GLFW_ICONIFIED__ indicates whether the specified window is iconified.
+See @ref window_iconify for details.
+
+@anchor GLFW_MAXIMIZED_attrib
+__GLFW_MAXIMIZED__ indicates whether the specified window is maximized. See
+@ref window_maximize for details.
+
+@anchor GLFW_HOVERED_attrib
+__GLFW_HOVERED__ indicates whether the cursor is currently directly over the
+content area of the window, with no other windows between. See @ref
+cursor_enter for details.
+
+@anchor GLFW_VISIBLE_attrib
+__GLFW_VISIBLE__ indicates whether the specified window is visible. See @ref
+window_hide for details.
+
+@anchor GLFW_RESIZABLE_attrib
+__GLFW_RESIZABLE__ indicates whether the specified window is resizable _by the
+user_. This can be set before creation with the
+[GLFW_RESIZABLE](@ref GLFW_RESIZABLE_hint) window hint or after with @ref
+glfwSetWindowAttrib.
+
+@anchor GLFW_DECORATED_attrib
+__GLFW_DECORATED__ indicates whether the specified window has decorations such
+as a border, a close widget, etc. This can be set before creation with the
+[GLFW_DECORATED](@ref GLFW_DECORATED_hint) window hint or after with @ref
+glfwSetWindowAttrib.
+
+@anchor GLFW_AUTO_ICONIFY_attrib
+__GLFW_AUTO_ICONIFY__ indicates whether the specified full screen window is
+iconified on focus loss, a close widget, etc. This can be set before creation
+with the [GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_hint) window hint or after
+with @ref glfwSetWindowAttrib.
+
+@anchor GLFW_FLOATING_attrib
+__GLFW_FLOATING__ indicates whether the specified window is floating, also
+called topmost or always-on-top. This can be set before creation with the
+[GLFW_FLOATING](@ref GLFW_FLOATING_hint) window hint or after with @ref
+glfwSetWindowAttrib.
+
+@anchor GLFW_TRANSPARENT_FRAMEBUFFER_attrib
+__GLFW_TRANSPARENT_FRAMEBUFFER__ indicates whether the specified window has
+a transparent framebuffer, i.e. the window contents is composited with the
+background using the window framebuffer alpha channel. See @ref
+window_transparency for details.
+
+@anchor GLFW_FOCUS_ON_SHOW_attrib
+__GLFW_FOCUS_ON_SHOW__ specifies whether the window will be given input
+focus when @ref glfwShowWindow is called. This can be set before creation
+with the [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) window hint or
+after with @ref glfwSetWindowAttrib.
+
+@anchor GLFW_MOUSE_PASSTHROUGH_attrib
+__GLFW_MOUSE_PASSTHROUGH__ specifies whether the window is transparent to mouse
+input, letting any mouse events pass through to whatever window is behind it.
+This can be set before creation with the
+[GLFW_MOUSE_PASSTHROUGH](@ref GLFW_MOUSE_PASSTHROUGH_hint) window hint or after
+with @ref glfwSetWindowAttrib. This is only supported for undecorated windows.
+Decorated windows with this enabled will behave differently between platforms.
+
+
+@subsubsection window_attribs_ctx Context related attributes
+
+@anchor GLFW_CLIENT_API_attrib
+__GLFW_CLIENT_API__ indicates the client API provided by the window's context;
+either `GLFW_OPENGL_API`, `GLFW_OPENGL_ES_API` or `GLFW_NO_API`.
+
+@anchor GLFW_CONTEXT_CREATION_API_attrib
+__GLFW_CONTEXT_CREATION_API__ indicates the context creation API used to create
+the window's context; either `GLFW_NATIVE_CONTEXT_API`, `GLFW_EGL_CONTEXT_API`
+or `GLFW_OSMESA_CONTEXT_API`.
+
+@anchor GLFW_CONTEXT_VERSION_MAJOR_attrib
+@anchor GLFW_CONTEXT_VERSION_MINOR_attrib
+@anchor GLFW_CONTEXT_REVISION_attrib
+__GLFW_CONTEXT_VERSION_MAJOR__, __GLFW_CONTEXT_VERSION_MINOR__ and
+__GLFW_CONTEXT_REVISION__ indicate the client API version of the window's
+context.
+
+@note Do not confuse these attributes with `GLFW_VERSION_MAJOR`,
+`GLFW_VERSION_MINOR` and `GLFW_VERSION_REVISION` which provide the API version
+of the GLFW header.
+
+@anchor GLFW_OPENGL_FORWARD_COMPAT_attrib
+__GLFW_OPENGL_FORWARD_COMPAT__ is `GLFW_TRUE` if the window's context is an
+OpenGL forward-compatible one, or `GLFW_FALSE` otherwise.
+
+@anchor GLFW_CONTEXT_DEBUG_attrib
+@anchor GLFW_OPENGL_DEBUG_CONTEXT_attrib
+__GLFW_CONTEXT_DEBUG__ is `GLFW_TRUE` if the window's context is in debug
+mode, or `GLFW_FALSE` otherwise.
+
+@par
+This is the new name, introduced in GLFW 3.4. The older
+`GLFW_OPENGL_DEBUG_CONTEXT` name is also available for compatibility.
+
+@anchor GLFW_OPENGL_PROFILE_attrib
+__GLFW_OPENGL_PROFILE__ indicates the OpenGL profile used by the context. This
+is `GLFW_OPENGL_CORE_PROFILE` or `GLFW_OPENGL_COMPAT_PROFILE` if the context
+uses a known profile, or `GLFW_OPENGL_ANY_PROFILE` if the OpenGL profile is
+unknown or the context is an OpenGL ES context. Note that the returned profile
+may not match the profile bits of the context flags, as GLFW will try other
+means of detecting the profile when no bits are set.
+
+@anchor GLFW_CONTEXT_RELEASE_BEHAVIOR_attrib
+__GLFW_CONTEXT_RELEASE_BEHAVIOR__ indicates the release used by the context.
+Possible values are one of `GLFW_ANY_RELEASE_BEHAVIOR`,
+`GLFW_RELEASE_BEHAVIOR_FLUSH` or `GLFW_RELEASE_BEHAVIOR_NONE`. If the
+behavior is `GLFW_ANY_RELEASE_BEHAVIOR`, the default behavior of the context
+creation API will be used. If the behavior is `GLFW_RELEASE_BEHAVIOR_FLUSH`,
+the pipeline will be flushed whenever the context is released from being the
+current one. If the behavior is `GLFW_RELEASE_BEHAVIOR_NONE`, the pipeline will
+not be flushed on release.
+
+@anchor GLFW_CONTEXT_NO_ERROR_attrib
+__GLFW_CONTEXT_NO_ERROR__ indicates whether errors are generated by the context.
+Possible values are `GLFW_TRUE` and `GLFW_FALSE`. If enabled, situations that
+would have generated errors instead cause undefined behavior.
+
+@anchor GLFW_CONTEXT_ROBUSTNESS_attrib
+__GLFW_CONTEXT_ROBUSTNESS__ indicates the robustness strategy used by the
+context. This is `GLFW_LOSE_CONTEXT_ON_RESET` or `GLFW_NO_RESET_NOTIFICATION`
+if the window's context supports robustness, or `GLFW_NO_ROBUSTNESS` otherwise.
+
+
+@subsubsection window_attribs_fb Framebuffer related attributes
+
+GLFW does not expose most attributes of the default framebuffer (i.e. the
+framebuffer attached to the window) as these can be queried directly with either
+OpenGL, OpenGL ES or Vulkan. The one exception is
+[GLFW_DOUBLEBUFFER](@ref GLFW_DOUBLEBUFFER_attrib), as this is not provided by
+OpenGL ES.
+
+If you are using version 3.0 or later of OpenGL or OpenGL ES, the
+`glGetFramebufferAttachmentParameteriv` function can be used to retrieve the
+number of bits for the red, green, blue, alpha, depth and stencil buffer
+channels. Otherwise, the `glGetIntegerv` function can be used.
+
+The number of MSAA samples are always retrieved with `glGetIntegerv`. For
+contexts supporting framebuffer objects, the number of samples of the currently
+bound framebuffer is returned.
+
+Attribute | glGetIntegerv | glGetFramebufferAttachmentParameteriv
+------------ | ----------------- | -------------------------------------
+Red bits | `GL_RED_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE`
+Green bits | `GL_GREEN_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE`
+Blue bits | `GL_BLUE_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE`
+Alpha bits | `GL_ALPHA_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE`
+Depth bits | `GL_DEPTH_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE`
+Stencil bits | `GL_STENCIL_BITS` | `GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE`
+MSAA samples | `GL_SAMPLES` | _Not provided by this function_
+
+When calling `glGetFramebufferAttachmentParameteriv`, the red, green, blue and
+alpha sizes are queried from the `GL_BACK_LEFT`, while the depth and stencil
+sizes are queried from the `GL_DEPTH` and `GL_STENCIL` attachments,
+respectively.
+
+@anchor GLFW_DOUBLEBUFFER_attrib
+__GLFW_DOUBLEBUFFER__ indicates whether the specified window is double-buffered
+when rendering with OpenGL or OpenGL ES. This can be set before creation with
+the [GLFW_DOUBLEBUFFER](@ref GLFW_DOUBLEBUFFER_hint) window hint.
+
+
+@section buffer_swap Buffer swapping
+
+GLFW windows are by default double buffered. That means that you have two
+rendering buffers; a front buffer and a back buffer. The front buffer is
+the one being displayed and the back buffer the one you render to.
+
+When the entire frame has been rendered, it is time to swap the back and the
+front buffers in order to display what has been rendered and begin rendering
+a new frame. This is done with @ref glfwSwapBuffers.
+
+@code
+glfwSwapBuffers(window);
+@endcode
+
+Sometimes it can be useful to select when the buffer swap will occur. With the
+function @ref glfwSwapInterval it is possible to select the minimum number of
+monitor refreshes the driver should wait from the time @ref glfwSwapBuffers was
+called before swapping the buffers:
+
+@code
+glfwSwapInterval(1);
+@endcode
+
+If the interval is zero, the swap will take place immediately when @ref
+glfwSwapBuffers is called without waiting for a refresh. Otherwise at least
+interval retraces will pass between each buffer swap. Using a swap interval of
+zero can be useful for benchmarking purposes, when it is not desirable to
+measure the time it takes to wait for the vertical retrace. However, a swap
+interval of one lets you avoid tearing.
+
+Note that this may not work on all machines, as some drivers have
+user-controlled settings that override any swap interval the application
+requests.
+
+A context that supports either the `WGL_EXT_swap_control_tear` or the
+`GLX_EXT_swap_control_tear` extension also accepts _negative_ swap intervals,
+which allows the driver to swap immediately even if a frame arrives a little bit
+late. This trades the risk of visible tears for greater framerate stability.
+You can check for these extensions with @ref glfwExtensionSupported.
+
+*/
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/CMakeLists.txt b/chromium/third_party/dawn/third_party/glfw/examples/CMakeLists.txt
new file mode 100644
index 00000000000..e7a037976f5
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/CMakeLists.txt
@@ -0,0 +1,83 @@
+
+link_libraries(glfw)
+
+include_directories("${GLFW_SOURCE_DIR}/deps")
+
+if (MATH_LIBRARY)
+ link_libraries("${MATH_LIBRARY}")
+endif()
+
+# Workaround for the MS CRT deprecating parts of the standard library
+if (MSVC OR CMAKE_C_SIMULATE_ID STREQUAL "MSVC")
+ add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+endif()
+
+if (WIN32)
+ set(ICON glfw.rc)
+elseif (APPLE)
+ set(ICON glfw.icns)
+endif()
+
+set(GLAD_GL "${GLFW_SOURCE_DIR}/deps/glad/gl.h")
+set(GLAD_GLES2 "${GLFW_SOURCE_DIR}/deps/glad/gles2.h")
+set(GETOPT "${GLFW_SOURCE_DIR}/deps/getopt.h"
+ "${GLFW_SOURCE_DIR}/deps/getopt.c")
+set(TINYCTHREAD "${GLFW_SOURCE_DIR}/deps/tinycthread.h"
+ "${GLFW_SOURCE_DIR}/deps/tinycthread.c")
+
+add_executable(boing WIN32 MACOSX_BUNDLE boing.c ${ICON} ${GLAD_GL})
+add_executable(gears WIN32 MACOSX_BUNDLE gears.c ${ICON} ${GLAD_GL})
+add_executable(heightmap WIN32 MACOSX_BUNDLE heightmap.c ${ICON} ${GLAD_GL})
+add_executable(offscreen offscreen.c ${ICON} ${GLAD_GL})
+add_executable(particles WIN32 MACOSX_BUNDLE particles.c ${ICON} ${TINYCTHREAD} ${GETOPT} ${GLAD_GL})
+add_executable(sharing WIN32 MACOSX_BUNDLE sharing.c ${ICON} ${GLAD_GL})
+add_executable(splitview WIN32 MACOSX_BUNDLE splitview.c ${ICON} ${GLAD_GL})
+add_executable(triangle-opengl WIN32 MACOSX_BUNDLE triangle-opengl.c ${ICON} ${GLAD_GL})
+add_executable(triangle-opengles WIN32 MACOSX_BUNDLE triangle-opengles.c ${ICON} ${GLAD_GLES2})
+add_executable(wave WIN32 MACOSX_BUNDLE wave.c ${ICON} ${GLAD_GL})
+add_executable(windows WIN32 MACOSX_BUNDLE windows.c ${ICON} ${GLAD_GL})
+
+target_link_libraries(particles Threads::Threads)
+if (RT_LIBRARY)
+ target_link_libraries(particles "${RT_LIBRARY}")
+endif()
+
+set(GUI_ONLY_BINARIES boing gears heightmap particles sharing splitview
+ triangle-opengl triangle-opengles wave windows)
+set(CONSOLE_BINARIES offscreen)
+
+set_target_properties(${GUI_ONLY_BINARIES} ${CONSOLE_BINARIES} PROPERTIES
+ C_STANDARD 99
+ FOLDER "GLFW3/Examples")
+
+if (MSVC)
+ # Tell MSVC to use main instead of WinMain
+ set_target_properties(${GUI_ONLY_BINARIES} PROPERTIES
+ LINK_FLAGS "/ENTRY:mainCRTStartup")
+elseif (CMAKE_C_SIMULATE_ID STREQUAL "MSVC")
+ # Tell Clang using MS CRT to use main instead of WinMain
+ set_target_properties(${GUI_ONLY_BINARIES} PROPERTIES
+ LINK_FLAGS "-Wl,/entry:mainCRTStartup")
+endif()
+
+if (APPLE)
+ set_target_properties(boing PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Boing")
+ set_target_properties(gears PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Gears")
+ set_target_properties(heightmap PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Heightmap")
+ set_target_properties(particles PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Particles")
+ set_target_properties(sharing PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Sharing")
+ set_target_properties(triangle-opengl PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "OpenGL Triangle")
+ set_target_properties(triangle-opengles PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "OpenGL ES Triangle")
+ set_target_properties(splitview PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "SplitView")
+ set_target_properties(wave PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Wave")
+ set_target_properties(windows PROPERTIES MACOSX_BUNDLE_BUNDLE_NAME "Windows")
+
+ set_source_files_properties(glfw.icns PROPERTIES
+ MACOSX_PACKAGE_LOCATION "Resources")
+ set_target_properties(${GUI_ONLY_BINARIES} PROPERTIES
+ MACOSX_BUNDLE_SHORT_VERSION_STRING ${GLFW_VERSION}
+ MACOSX_BUNDLE_LONG_VERSION_STRING ${GLFW_VERSION}
+ MACOSX_BUNDLE_ICON_FILE glfw.icns
+ MACOSX_BUNDLE_INFO_PLIST "${GLFW_SOURCE_DIR}/CMake/Info.plist.in")
+endif()
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/boing.c b/chromium/third_party/dawn/third_party/glfw/examples/boing.c
new file mode 100644
index 00000000000..ec118a3a82a
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/boing.c
@@ -0,0 +1,680 @@
+/*****************************************************************************
+ * Title: GLBoing
+ * Desc: Tribute to Amiga Boing.
+ * Author: Jim Brooks <gfx@jimbrooks.org>
+ * Original Amiga authors were R.J. Mical and Dale Luck.
+ * GLFW conversion by Marcus Geelnard
+ * Notes: - 360' = 2*PI [radian]
+ *
+ * - Distances between objects are created by doing a relative
+ * Z translations.
+ *
+ * - Although OpenGL enticingly supports alpha-blending,
+ * the shadow of the original Boing didn't affect the color
+ * of the grid.
+ *
+ * - [Marcus] Changed timing scheme from interval driven to frame-
+ * time based animation steps (which results in much smoother
+ * movement)
+ *
+ * History of Amiga Boing:
+ *
+ * Boing was demonstrated on the prototype Amiga (codenamed "Lorraine") in
+ * 1985. According to legend, it was written ad-hoc in one night by
+ * R. J. Mical and Dale Luck. Because the bouncing ball animation was so fast
+ * and smooth, attendees did not believe the Amiga prototype was really doing
+ * the rendering. Suspecting a trick, they began looking around the booth for
+ * a hidden computer or VCR.
+ *****************************************************************************/
+
+#if defined(_MSC_VER)
+ // Make MS math.h define M_PI
+ #define _USE_MATH_DEFINES
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include <linmath.h>
+
+
+/*****************************************************************************
+ * Various declarations and macros
+ *****************************************************************************/
+
+/* Prototypes */
+void init( void );
+void display( void );
+void reshape( GLFWwindow* window, int w, int h );
+void key_callback( GLFWwindow* window, int key, int scancode, int action, int mods );
+void mouse_button_callback( GLFWwindow* window, int button, int action, int mods );
+void cursor_position_callback( GLFWwindow* window, double x, double y );
+void DrawBoingBall( void );
+void BounceBall( double dt );
+void DrawBoingBallBand( GLfloat long_lo, GLfloat long_hi );
+void DrawGrid( void );
+
+#define RADIUS 70.f
+#define STEP_LONGITUDE 22.5f /* 22.5 makes 8 bands like original Boing */
+#define STEP_LATITUDE 22.5f
+
+#define DIST_BALL (RADIUS * 2.f + RADIUS * 0.1f)
+
+#define VIEW_SCENE_DIST (DIST_BALL * 3.f + 200.f)/* distance from viewer to middle of boing area */
+#define GRID_SIZE (RADIUS * 4.5f) /* length (width) of grid */
+#define BOUNCE_HEIGHT (RADIUS * 2.1f)
+#define BOUNCE_WIDTH (RADIUS * 2.1f)
+
+#define SHADOW_OFFSET_X -20.f
+#define SHADOW_OFFSET_Y 10.f
+#define SHADOW_OFFSET_Z 0.f
+
+#define WALL_L_OFFSET 0.f
+#define WALL_R_OFFSET 5.f
+
+/* Animation speed (50.0 mimics the original GLUT demo speed) */
+#define ANIMATION_SPEED 50.f
+
+/* Maximum allowed delta time per physics iteration */
+#define MAX_DELTA_T 0.02f
+
+/* Draw ball, or its shadow */
+typedef enum { DRAW_BALL, DRAW_BALL_SHADOW } DRAW_BALL_ENUM;
+
+/* Vertex type */
+typedef struct {float x; float y; float z;} vertex_t;
+
+/* Global vars */
+int windowed_xpos, windowed_ypos, windowed_width, windowed_height;
+int width, height;
+GLfloat deg_rot_y = 0.f;
+GLfloat deg_rot_y_inc = 2.f;
+int override_pos = GLFW_FALSE;
+GLfloat cursor_x = 0.f;
+GLfloat cursor_y = 0.f;
+GLfloat ball_x = -RADIUS;
+GLfloat ball_y = -RADIUS;
+GLfloat ball_x_inc = 1.f;
+GLfloat ball_y_inc = 2.f;
+DRAW_BALL_ENUM drawBallHow;
+double t;
+double t_old = 0.f;
+double dt;
+
+/* Random number generator */
+#ifndef RAND_MAX
+ #define RAND_MAX 4095
+#endif
+
+
+/*****************************************************************************
+ * Truncate a degree.
+ *****************************************************************************/
+GLfloat TruncateDeg( GLfloat deg )
+{
+ if ( deg >= 360.f )
+ return (deg - 360.f);
+ else
+ return deg;
+}
+
+/*****************************************************************************
+ * Convert a degree (360-based) into a radian.
+ * 360' = 2 * PI
+ *****************************************************************************/
+double deg2rad( double deg )
+{
+ return deg / 360 * (2 * M_PI);
+}
+
+/*****************************************************************************
+ * 360' sin().
+ *****************************************************************************/
+double sin_deg( double deg )
+{
+ return sin( deg2rad( deg ) );
+}
+
+/*****************************************************************************
+ * 360' cos().
+ *****************************************************************************/
+double cos_deg( double deg )
+{
+ return cos( deg2rad( deg ) );
+}
+
+/*****************************************************************************
+ * Compute a cross product (for a normal vector).
+ *
+ * c = a x b
+ *****************************************************************************/
+void CrossProduct( vertex_t a, vertex_t b, vertex_t c, vertex_t *n )
+{
+ GLfloat u1, u2, u3;
+ GLfloat v1, v2, v3;
+
+ u1 = b.x - a.x;
+ u2 = b.y - a.y;
+ u3 = b.y - a.z;
+
+ v1 = c.x - a.x;
+ v2 = c.y - a.y;
+ v3 = c.z - a.z;
+
+ n->x = u2 * v3 - v2 * u3;
+ n->y = u3 * v1 - v3 * u1;
+ n->z = u1 * v2 - v1 * u2;
+}
+
+
+#define BOING_DEBUG 0
+
+
+/*****************************************************************************
+ * init()
+ *****************************************************************************/
+void init( void )
+{
+ /*
+ * Clear background.
+ */
+ glClearColor( 0.55f, 0.55f, 0.55f, 0.f );
+
+ glShadeModel( GL_FLAT );
+}
+
+
+/*****************************************************************************
+ * display()
+ *****************************************************************************/
+void display(void)
+{
+ glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
+ glPushMatrix();
+
+ drawBallHow = DRAW_BALL_SHADOW;
+ DrawBoingBall();
+
+ DrawGrid();
+
+ drawBallHow = DRAW_BALL;
+ DrawBoingBall();
+
+ glPopMatrix();
+ glFlush();
+}
+
+
+/*****************************************************************************
+ * reshape()
+ *****************************************************************************/
+void reshape( GLFWwindow* window, int w, int h )
+{
+ mat4x4 projection, view;
+
+ glViewport( 0, 0, (GLsizei)w, (GLsizei)h );
+
+ glMatrixMode( GL_PROJECTION );
+ mat4x4_perspective( projection,
+ 2.f * (float) atan2( RADIUS, 200.f ),
+ (float)w / (float)h,
+ 1.f, VIEW_SCENE_DIST );
+ glLoadMatrixf((const GLfloat*) projection);
+
+ glMatrixMode( GL_MODELVIEW );
+ {
+ vec3 eye = { 0.f, 0.f, VIEW_SCENE_DIST };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, -1.f, 0.f };
+ mat4x4_look_at( view, eye, center, up );
+ }
+ glLoadMatrixf((const GLfloat*) view);
+}
+
+void key_callback( GLFWwindow* window, int key, int scancode, int action, int mods )
+{
+ if (action != GLFW_PRESS)
+ return;
+
+ if (key == GLFW_KEY_ESCAPE && mods == 0)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+ if ((key == GLFW_KEY_ENTER && mods == GLFW_MOD_ALT) ||
+ (key == GLFW_KEY_F11 && mods == GLFW_MOD_ALT))
+ {
+ if (glfwGetWindowMonitor(window))
+ {
+ glfwSetWindowMonitor(window, NULL,
+ windowed_xpos, windowed_ypos,
+ windowed_width, windowed_height, 0);
+ }
+ else
+ {
+ GLFWmonitor* monitor = glfwGetPrimaryMonitor();
+ if (monitor)
+ {
+ const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+ glfwGetWindowPos(window, &windowed_xpos, &windowed_ypos);
+ glfwGetWindowSize(window, &windowed_width, &windowed_height);
+ glfwSetWindowMonitor(window, monitor, 0, 0, mode->width, mode->height, mode->refreshRate);
+ }
+ }
+ }
+}
+
+static void set_ball_pos ( GLfloat x, GLfloat y )
+{
+ ball_x = (width / 2) - x;
+ ball_y = y - (height / 2);
+}
+
+void mouse_button_callback( GLFWwindow* window, int button, int action, int mods )
+{
+ if (button != GLFW_MOUSE_BUTTON_LEFT)
+ return;
+
+ if (action == GLFW_PRESS)
+ {
+ override_pos = GLFW_TRUE;
+ set_ball_pos(cursor_x, cursor_y);
+ }
+ else
+ {
+ override_pos = GLFW_FALSE;
+ }
+}
+
+void cursor_position_callback( GLFWwindow* window, double x, double y )
+{
+ cursor_x = (float) x;
+ cursor_y = (float) y;
+
+ if ( override_pos )
+ set_ball_pos(cursor_x, cursor_y);
+}
+
+/*****************************************************************************
+ * Draw the Boing ball.
+ *
+ * The Boing ball is sphere in which each facet is a rectangle.
+ * Facet colors alternate between red and white.
+ * The ball is built by stacking latitudinal circles. Each circle is composed
+ * of a widely-separated set of points, so that each facet is noticeably large.
+ *****************************************************************************/
+void DrawBoingBall( void )
+{
+ GLfloat lon_deg; /* degree of longitude */
+ double dt_total, dt2;
+
+ glPushMatrix();
+ glMatrixMode( GL_MODELVIEW );
+
+ /*
+ * Another relative Z translation to separate objects.
+ */
+ glTranslatef( 0.0, 0.0, DIST_BALL );
+
+ /* Update ball position and rotation (iterate if necessary) */
+ dt_total = dt;
+ while( dt_total > 0.0 )
+ {
+ dt2 = dt_total > MAX_DELTA_T ? MAX_DELTA_T : dt_total;
+ dt_total -= dt2;
+ BounceBall( dt2 );
+ deg_rot_y = TruncateDeg( deg_rot_y + deg_rot_y_inc*((float)dt2*ANIMATION_SPEED) );
+ }
+
+ /* Set ball position */
+ glTranslatef( ball_x, ball_y, 0.0 );
+
+ /*
+ * Offset the shadow.
+ */
+ if ( drawBallHow == DRAW_BALL_SHADOW )
+ {
+ glTranslatef( SHADOW_OFFSET_X,
+ SHADOW_OFFSET_Y,
+ SHADOW_OFFSET_Z );
+ }
+
+ /*
+ * Tilt the ball.
+ */
+ glRotatef( -20.0, 0.0, 0.0, 1.0 );
+
+ /*
+ * Continually rotate ball around Y axis.
+ */
+ glRotatef( deg_rot_y, 0.0, 1.0, 0.0 );
+
+ /*
+ * Set OpenGL state for Boing ball.
+ */
+ glCullFace( GL_FRONT );
+ glEnable( GL_CULL_FACE );
+ glEnable( GL_NORMALIZE );
+
+ /*
+ * Build a faceted latitude slice of the Boing ball,
+ * stepping same-sized vertical bands of the sphere.
+ */
+ for ( lon_deg = 0;
+ lon_deg < 180;
+ lon_deg += STEP_LONGITUDE )
+ {
+ /*
+ * Draw a latitude circle at this longitude.
+ */
+ DrawBoingBallBand( lon_deg,
+ lon_deg + STEP_LONGITUDE );
+ }
+
+ glPopMatrix();
+
+ return;
+}
+
+
+/*****************************************************************************
+ * Bounce the ball.
+ *****************************************************************************/
+void BounceBall( double delta_t )
+{
+ GLfloat sign;
+ GLfloat deg;
+
+ if ( override_pos )
+ return;
+
+ /* Bounce on walls */
+ if ( ball_x > (BOUNCE_WIDTH/2 + WALL_R_OFFSET ) )
+ {
+ ball_x_inc = -0.5f - 0.75f * (GLfloat)rand() / (GLfloat)RAND_MAX;
+ deg_rot_y_inc = -deg_rot_y_inc;
+ }
+ if ( ball_x < -(BOUNCE_HEIGHT/2 + WALL_L_OFFSET) )
+ {
+ ball_x_inc = 0.5f + 0.75f * (GLfloat)rand() / (GLfloat)RAND_MAX;
+ deg_rot_y_inc = -deg_rot_y_inc;
+ }
+
+ /* Bounce on floor / roof */
+ if ( ball_y > BOUNCE_HEIGHT/2 )
+ {
+ ball_y_inc = -0.75f - 1.f * (GLfloat)rand() / (GLfloat)RAND_MAX;
+ }
+ if ( ball_y < -BOUNCE_HEIGHT/2*0.85 )
+ {
+ ball_y_inc = 0.75f + 1.f * (GLfloat)rand() / (GLfloat)RAND_MAX;
+ }
+
+ /* Update ball position */
+ ball_x += ball_x_inc * ((float)delta_t*ANIMATION_SPEED);
+ ball_y += ball_y_inc * ((float)delta_t*ANIMATION_SPEED);
+
+ /*
+ * Simulate the effects of gravity on Y movement.
+ */
+ if ( ball_y_inc < 0 ) sign = -1.0; else sign = 1.0;
+
+ deg = (ball_y + BOUNCE_HEIGHT/2) * 90 / BOUNCE_HEIGHT;
+ if ( deg > 80 ) deg = 80;
+ if ( deg < 10 ) deg = 10;
+
+ ball_y_inc = sign * 4.f * (float) sin_deg( deg );
+}
+
+
+/*****************************************************************************
+ * Draw a faceted latitude band of the Boing ball.
+ *
+ * Parms: long_lo, long_hi
+ * Low and high longitudes of slice, resp.
+ *****************************************************************************/
+void DrawBoingBallBand( GLfloat long_lo,
+ GLfloat long_hi )
+{
+ vertex_t vert_ne; /* "ne" means south-east, so on */
+ vertex_t vert_nw;
+ vertex_t vert_sw;
+ vertex_t vert_se;
+ vertex_t vert_norm;
+ GLfloat lat_deg;
+ static int colorToggle = 0;
+
+ /*
+ * Iterate through the points of a latitude circle.
+ * A latitude circle is a 2D set of X,Z points.
+ */
+ for ( lat_deg = 0;
+ lat_deg <= (360 - STEP_LATITUDE);
+ lat_deg += STEP_LATITUDE )
+ {
+ /*
+ * Color this polygon with red or white.
+ */
+ if ( colorToggle )
+ glColor3f( 0.8f, 0.1f, 0.1f );
+ else
+ glColor3f( 0.95f, 0.95f, 0.95f );
+#if 0
+ if ( lat_deg >= 180 )
+ if ( colorToggle )
+ glColor3f( 0.1f, 0.8f, 0.1f );
+ else
+ glColor3f( 0.5f, 0.5f, 0.95f );
+#endif
+ colorToggle = ! colorToggle;
+
+ /*
+ * Change color if drawing shadow.
+ */
+ if ( drawBallHow == DRAW_BALL_SHADOW )
+ glColor3f( 0.35f, 0.35f, 0.35f );
+
+ /*
+ * Assign each Y.
+ */
+ vert_ne.y = vert_nw.y = (float) cos_deg(long_hi) * RADIUS;
+ vert_sw.y = vert_se.y = (float) cos_deg(long_lo) * RADIUS;
+
+ /*
+ * Assign each X,Z with sin,cos values scaled by latitude radius indexed by longitude.
+ * Eg, long=0 and long=180 are at the poles, so zero scale is sin(longitude),
+ * while long=90 (sin(90)=1) is at equator.
+ */
+ vert_ne.x = (float) cos_deg( lat_deg ) * (RADIUS * (float) sin_deg( long_lo + STEP_LONGITUDE ));
+ vert_se.x = (float) cos_deg( lat_deg ) * (RADIUS * (float) sin_deg( long_lo ));
+ vert_nw.x = (float) cos_deg( lat_deg + STEP_LATITUDE ) * (RADIUS * (float) sin_deg( long_lo + STEP_LONGITUDE ));
+ vert_sw.x = (float) cos_deg( lat_deg + STEP_LATITUDE ) * (RADIUS * (float) sin_deg( long_lo ));
+
+ vert_ne.z = (float) sin_deg( lat_deg ) * (RADIUS * (float) sin_deg( long_lo + STEP_LONGITUDE ));
+ vert_se.z = (float) sin_deg( lat_deg ) * (RADIUS * (float) sin_deg( long_lo ));
+ vert_nw.z = (float) sin_deg( lat_deg + STEP_LATITUDE ) * (RADIUS * (float) sin_deg( long_lo + STEP_LONGITUDE ));
+ vert_sw.z = (float) sin_deg( lat_deg + STEP_LATITUDE ) * (RADIUS * (float) sin_deg( long_lo ));
+
+ /*
+ * Draw the facet.
+ */
+ glBegin( GL_POLYGON );
+
+ CrossProduct( vert_ne, vert_nw, vert_sw, &vert_norm );
+ glNormal3f( vert_norm.x, vert_norm.y, vert_norm.z );
+
+ glVertex3f( vert_ne.x, vert_ne.y, vert_ne.z );
+ glVertex3f( vert_nw.x, vert_nw.y, vert_nw.z );
+ glVertex3f( vert_sw.x, vert_sw.y, vert_sw.z );
+ glVertex3f( vert_se.x, vert_se.y, vert_se.z );
+
+ glEnd();
+
+#if BOING_DEBUG
+ printf( "----------------------------------------------------------- \n" );
+ printf( "lat = %f long_lo = %f long_hi = %f \n", lat_deg, long_lo, long_hi );
+ printf( "vert_ne x = %.8f y = %.8f z = %.8f \n", vert_ne.x, vert_ne.y, vert_ne.z );
+ printf( "vert_nw x = %.8f y = %.8f z = %.8f \n", vert_nw.x, vert_nw.y, vert_nw.z );
+ printf( "vert_se x = %.8f y = %.8f z = %.8f \n", vert_se.x, vert_se.y, vert_se.z );
+ printf( "vert_sw x = %.8f y = %.8f z = %.8f \n", vert_sw.x, vert_sw.y, vert_sw.z );
+#endif
+
+ }
+
+ /*
+ * Toggle color so that next band will opposite red/white colors than this one.
+ */
+ colorToggle = ! colorToggle;
+
+ /*
+ * This circular band is done.
+ */
+ return;
+}
+
+
+/*****************************************************************************
+ * Draw the purple grid of lines, behind the Boing ball.
+ * When the Workbench is dropped to the bottom, Boing shows 12 rows.
+ *****************************************************************************/
+void DrawGrid( void )
+{
+ int row, col;
+ const int rowTotal = 12; /* must be divisible by 2 */
+ const int colTotal = rowTotal; /* must be same as rowTotal */
+ const GLfloat widthLine = 2.0; /* should be divisible by 2 */
+ const GLfloat sizeCell = GRID_SIZE / rowTotal;
+ const GLfloat z_offset = -40.0;
+ GLfloat xl, xr;
+ GLfloat yt, yb;
+
+ glPushMatrix();
+ glDisable( GL_CULL_FACE );
+
+ /*
+ * Another relative Z translation to separate objects.
+ */
+ glTranslatef( 0.0, 0.0, DIST_BALL );
+
+ /*
+ * Draw vertical lines (as skinny 3D rectangles).
+ */
+ for ( col = 0; col <= colTotal; col++ )
+ {
+ /*
+ * Compute co-ords of line.
+ */
+ xl = -GRID_SIZE / 2 + col * sizeCell;
+ xr = xl + widthLine;
+
+ yt = GRID_SIZE / 2;
+ yb = -GRID_SIZE / 2 - widthLine;
+
+ glBegin( GL_POLYGON );
+
+ glColor3f( 0.6f, 0.1f, 0.6f ); /* purple */
+
+ glVertex3f( xr, yt, z_offset ); /* NE */
+ glVertex3f( xl, yt, z_offset ); /* NW */
+ glVertex3f( xl, yb, z_offset ); /* SW */
+ glVertex3f( xr, yb, z_offset ); /* SE */
+
+ glEnd();
+ }
+
+ /*
+ * Draw horizontal lines (as skinny 3D rectangles).
+ */
+ for ( row = 0; row <= rowTotal; row++ )
+ {
+ /*
+ * Compute co-ords of line.
+ */
+ yt = GRID_SIZE / 2 - row * sizeCell;
+ yb = yt - widthLine;
+
+ xl = -GRID_SIZE / 2;
+ xr = GRID_SIZE / 2 + widthLine;
+
+ glBegin( GL_POLYGON );
+
+ glColor3f( 0.6f, 0.1f, 0.6f ); /* purple */
+
+ glVertex3f( xr, yt, z_offset ); /* NE */
+ glVertex3f( xl, yt, z_offset ); /* NW */
+ glVertex3f( xl, yb, z_offset ); /* SW */
+ glVertex3f( xr, yb, z_offset ); /* SE */
+
+ glEnd();
+ }
+
+ glPopMatrix();
+
+ return;
+}
+
+
+/*======================================================================*
+ * main()
+ *======================================================================*/
+
+int main( void )
+{
+ GLFWwindow* window;
+
+ /* Init GLFW */
+ if( !glfwInit() )
+ exit( EXIT_FAILURE );
+
+ window = glfwCreateWindow( 400, 400, "Boing (classic Amiga demo)", NULL, NULL );
+ if (!window)
+ {
+ glfwTerminate();
+ exit( EXIT_FAILURE );
+ }
+
+ glfwSetWindowAspectRatio(window, 1, 1);
+
+ glfwSetFramebufferSizeCallback(window, reshape);
+ glfwSetKeyCallback(window, key_callback);
+ glfwSetMouseButtonCallback(window, mouse_button_callback);
+ glfwSetCursorPosCallback(window, cursor_position_callback);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval( 1 );
+
+ glfwGetFramebufferSize(window, &width, &height);
+ reshape(window, width, height);
+
+ glfwSetTime( 0.0 );
+
+ init();
+
+ /* Main loop */
+ for (;;)
+ {
+ /* Timing */
+ t = glfwGetTime();
+ dt = t - t_old;
+ t_old = t;
+
+ /* Draw one frame */
+ display();
+
+ /* Swap buffers */
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+
+ /* Check if we are still running */
+ if (glfwWindowShouldClose(window))
+ break;
+ }
+
+ glfwTerminate();
+ exit( EXIT_SUCCESS );
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/gears.c b/chromium/third_party/dawn/third_party/glfw/examples/gears.c
new file mode 100644
index 00000000000..3d63013d087
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/gears.c
@@ -0,0 +1,361 @@
+/*
+ * 3-D gear wheels. This program is in the public domain.
+ *
+ * Command line options:
+ * -info print GL implementation information
+ * -exit automatically exit after 30 seconds
+ *
+ *
+ * Brian Paul
+ *
+ *
+ * Marcus Geelnard:
+ * - Conversion to GLFW
+ * - Time based rendering (frame rate independent)
+ * - Slightly modified camera that should work better for stereo viewing
+ *
+ *
+ * Camilla Löwy:
+ * - Removed FPS counter (this is not a benchmark)
+ * - Added a few comments
+ * - Enabled vsync
+ */
+
+#if defined(_MSC_VER)
+ // Make MS math.h define M_PI
+ #define _USE_MATH_DEFINES
+#endif
+
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+/**
+
+ Draw a gear wheel. You'll probably want to call this function when
+ building a display list since we do a lot of trig here.
+
+ Input: inner_radius - radius of hole at center
+ outer_radius - radius at center of teeth
+ width - width of gear teeth - number of teeth
+ tooth_depth - depth of tooth
+
+ **/
+
+static void
+gear(GLfloat inner_radius, GLfloat outer_radius, GLfloat width,
+ GLint teeth, GLfloat tooth_depth)
+{
+ GLint i;
+ GLfloat r0, r1, r2;
+ GLfloat angle, da;
+ GLfloat u, v, len;
+
+ r0 = inner_radius;
+ r1 = outer_radius - tooth_depth / 2.f;
+ r2 = outer_radius + tooth_depth / 2.f;
+
+ da = 2.f * (float) M_PI / teeth / 4.f;
+
+ glShadeModel(GL_FLAT);
+
+ glNormal3f(0.f, 0.f, 1.f);
+
+ /* draw front face */
+ glBegin(GL_QUAD_STRIP);
+ for (i = 0; i <= teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), width * 0.5f);
+ if (i < teeth) {
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), width * 0.5f);
+ }
+ }
+ glEnd();
+
+ /* draw front sides of teeth */
+ glBegin(GL_QUADS);
+ da = 2.f * (float) M_PI / teeth / 4.f;
+ for (i = 0; i < teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + da), r2 * (float) sin(angle + da), width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + 2 * da), r2 * (float) sin(angle + 2 * da), width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), width * 0.5f);
+ }
+ glEnd();
+
+ glNormal3f(0.0, 0.0, -1.0);
+
+ /* draw back face */
+ glBegin(GL_QUAD_STRIP);
+ for (i = 0; i <= teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), -width * 0.5f);
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), -width * 0.5f);
+ if (i < teeth) {
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), -width * 0.5f);
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), -width * 0.5f);
+ }
+ }
+ glEnd();
+
+ /* draw back sides of teeth */
+ glBegin(GL_QUADS);
+ da = 2.f * (float) M_PI / teeth / 4.f;
+ for (i = 0; i < teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), -width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + 2 * da), r2 * (float) sin(angle + 2 * da), -width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + da), r2 * (float) sin(angle + da), -width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), -width * 0.5f);
+ }
+ glEnd();
+
+ /* draw outward faces of teeth */
+ glBegin(GL_QUAD_STRIP);
+ for (i = 0; i < teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle), r1 * (float) sin(angle), -width * 0.5f);
+ u = r2 * (float) cos(angle + da) - r1 * (float) cos(angle);
+ v = r2 * (float) sin(angle + da) - r1 * (float) sin(angle);
+ len = (float) sqrt(u * u + v * v);
+ u /= len;
+ v /= len;
+ glNormal3f(v, -u, 0.0);
+ glVertex3f(r2 * (float) cos(angle + da), r2 * (float) sin(angle + da), width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + da), r2 * (float) sin(angle + da), -width * 0.5f);
+ glNormal3f((float) cos(angle), (float) sin(angle), 0.f);
+ glVertex3f(r2 * (float) cos(angle + 2 * da), r2 * (float) sin(angle + 2 * da), width * 0.5f);
+ glVertex3f(r2 * (float) cos(angle + 2 * da), r2 * (float) sin(angle + 2 * da), -width * 0.5f);
+ u = r1 * (float) cos(angle + 3 * da) - r2 * (float) cos(angle + 2 * da);
+ v = r1 * (float) sin(angle + 3 * da) - r2 * (float) sin(angle + 2 * da);
+ glNormal3f(v, -u, 0.f);
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), width * 0.5f);
+ glVertex3f(r1 * (float) cos(angle + 3 * da), r1 * (float) sin(angle + 3 * da), -width * 0.5f);
+ glNormal3f((float) cos(angle), (float) sin(angle), 0.f);
+ }
+
+ glVertex3f(r1 * (float) cos(0), r1 * (float) sin(0), width * 0.5f);
+ glVertex3f(r1 * (float) cos(0), r1 * (float) sin(0), -width * 0.5f);
+
+ glEnd();
+
+ glShadeModel(GL_SMOOTH);
+
+ /* draw inside radius cylinder */
+ glBegin(GL_QUAD_STRIP);
+ for (i = 0; i <= teeth; i++) {
+ angle = i * 2.f * (float) M_PI / teeth;
+ glNormal3f(-(float) cos(angle), -(float) sin(angle), 0.f);
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), -width * 0.5f);
+ glVertex3f(r0 * (float) cos(angle), r0 * (float) sin(angle), width * 0.5f);
+ }
+ glEnd();
+
+}
+
+
+static GLfloat view_rotx = 20.f, view_roty = 30.f, view_rotz = 0.f;
+static GLint gear1, gear2, gear3;
+static GLfloat angle = 0.f;
+
+/* OpenGL draw function & timing */
+static void draw(void)
+{
+ glClearColor(0.0, 0.0, 0.0, 0.0);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ glPushMatrix();
+ glRotatef(view_rotx, 1.0, 0.0, 0.0);
+ glRotatef(view_roty, 0.0, 1.0, 0.0);
+ glRotatef(view_rotz, 0.0, 0.0, 1.0);
+
+ glPushMatrix();
+ glTranslatef(-3.0, -2.0, 0.0);
+ glRotatef(angle, 0.0, 0.0, 1.0);
+ glCallList(gear1);
+ glPopMatrix();
+
+ glPushMatrix();
+ glTranslatef(3.1f, -2.f, 0.f);
+ glRotatef(-2.f * angle - 9.f, 0.f, 0.f, 1.f);
+ glCallList(gear2);
+ glPopMatrix();
+
+ glPushMatrix();
+ glTranslatef(-3.1f, 4.2f, 0.f);
+ glRotatef(-2.f * angle - 25.f, 0.f, 0.f, 1.f);
+ glCallList(gear3);
+ glPopMatrix();
+
+ glPopMatrix();
+}
+
+
+/* update animation parameters */
+static void animate(void)
+{
+ angle = 100.f * (float) glfwGetTime();
+}
+
+
+/* change view angle, exit upon ESC */
+void key( GLFWwindow* window, int k, int s, int action, int mods )
+{
+ if( action != GLFW_PRESS ) return;
+
+ switch (k) {
+ case GLFW_KEY_Z:
+ if( mods & GLFW_MOD_SHIFT )
+ view_rotz -= 5.0;
+ else
+ view_rotz += 5.0;
+ break;
+ case GLFW_KEY_ESCAPE:
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+ break;
+ case GLFW_KEY_UP:
+ view_rotx += 5.0;
+ break;
+ case GLFW_KEY_DOWN:
+ view_rotx -= 5.0;
+ break;
+ case GLFW_KEY_LEFT:
+ view_roty += 5.0;
+ break;
+ case GLFW_KEY_RIGHT:
+ view_roty -= 5.0;
+ break;
+ default:
+ return;
+ }
+}
+
+
+/* new window size */
+void reshape( GLFWwindow* window, int width, int height )
+{
+ GLfloat h = (GLfloat) height / (GLfloat) width;
+ GLfloat xmax, znear, zfar;
+
+ znear = 5.0f;
+ zfar = 30.0f;
+ xmax = znear * 0.5f;
+
+ glViewport( 0, 0, (GLint) width, (GLint) height );
+ glMatrixMode( GL_PROJECTION );
+ glLoadIdentity();
+ glFrustum( -xmax, xmax, -xmax*h, xmax*h, znear, zfar );
+ glMatrixMode( GL_MODELVIEW );
+ glLoadIdentity();
+ glTranslatef( 0.0, 0.0, -20.0 );
+}
+
+
+/* program & OpenGL initialization */
+static void init(void)
+{
+ static GLfloat pos[4] = {5.f, 5.f, 10.f, 0.f};
+ static GLfloat red[4] = {0.8f, 0.1f, 0.f, 1.f};
+ static GLfloat green[4] = {0.f, 0.8f, 0.2f, 1.f};
+ static GLfloat blue[4] = {0.2f, 0.2f, 1.f, 1.f};
+
+ glLightfv(GL_LIGHT0, GL_POSITION, pos);
+ glEnable(GL_CULL_FACE);
+ glEnable(GL_LIGHTING);
+ glEnable(GL_LIGHT0);
+ glEnable(GL_DEPTH_TEST);
+
+ /* make the gears */
+ gear1 = glGenLists(1);
+ glNewList(gear1, GL_COMPILE);
+ glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, red);
+ gear(1.f, 4.f, 1.f, 20, 0.7f);
+ glEndList();
+
+ gear2 = glGenLists(1);
+ glNewList(gear2, GL_COMPILE);
+ glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, green);
+ gear(0.5f, 2.f, 2.f, 10, 0.7f);
+ glEndList();
+
+ gear3 = glGenLists(1);
+ glNewList(gear3, GL_COMPILE);
+ glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, blue);
+ gear(1.3f, 2.f, 0.5f, 10, 0.7f);
+ glEndList();
+
+ glEnable(GL_NORMALIZE);
+}
+
+
+/* program entry */
+int main(int argc, char *argv[])
+{
+ GLFWwindow* window;
+ int width, height;
+
+ if( !glfwInit() )
+ {
+ fprintf( stderr, "Failed to initialize GLFW\n" );
+ exit( EXIT_FAILURE );
+ }
+
+ glfwWindowHint(GLFW_DEPTH_BITS, 16);
+ glfwWindowHint(GLFW_TRANSPARENT_FRAMEBUFFER, GLFW_TRUE);
+
+ window = glfwCreateWindow( 300, 300, "Gears", NULL, NULL );
+ if (!window)
+ {
+ fprintf( stderr, "Failed to open GLFW window\n" );
+ glfwTerminate();
+ exit( EXIT_FAILURE );
+ }
+
+ // Set callback functions
+ glfwSetFramebufferSizeCallback(window, reshape);
+ glfwSetKeyCallback(window, key);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval( 1 );
+
+ glfwGetFramebufferSize(window, &width, &height);
+ reshape(window, width, height);
+
+ // Parse command-line options
+ init();
+
+ // Main loop
+ while( !glfwWindowShouldClose(window) )
+ {
+ // Draw gears
+ draw();
+
+ // Update animation
+ animate();
+
+ // Swap buffers
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+ }
+
+ // Terminate GLFW
+ glfwTerminate();
+
+ // Exit program
+ exit( EXIT_SUCCESS );
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/glfw.icns b/chromium/third_party/dawn/third_party/glfw/examples/glfw.icns
new file mode 100644
index 00000000000..ad98f397521
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/glfw.icns
Binary files differ
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/glfw.ico b/chromium/third_party/dawn/third_party/glfw/examples/glfw.ico
new file mode 100644
index 00000000000..882a66051e3
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/glfw.ico
Binary files differ
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/glfw.rc b/chromium/third_party/dawn/third_party/glfw/examples/glfw.rc
new file mode 100644
index 00000000000..f2b62f6c40b
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/glfw.rc
@@ -0,0 +1,3 @@
+
+GLFW_ICON ICON "glfw.ico"
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/heightmap.c b/chromium/third_party/dawn/third_party/glfw/examples/heightmap.c
new file mode 100644
index 00000000000..ad5d47c141f
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/heightmap.c
@@ -0,0 +1,513 @@
+//========================================================================
+// Heightmap example program using OpenGL 3 core profile
+// Copyright (c) 2010 Olivier Delannoy
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <assert.h>
+#include <stddef.h>
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+/* Map height updates */
+#define MAX_CIRCLE_SIZE (5.0f)
+#define MAX_DISPLACEMENT (1.0f)
+#define DISPLACEMENT_SIGN_LIMIT (0.3f)
+#define MAX_ITER (200)
+#define NUM_ITER_AT_A_TIME (1)
+
+/* Map general information */
+#define MAP_SIZE (10.0f)
+#define MAP_NUM_VERTICES (80)
+#define MAP_NUM_TOTAL_VERTICES (MAP_NUM_VERTICES*MAP_NUM_VERTICES)
+#define MAP_NUM_LINES (3* (MAP_NUM_VERTICES - 1) * (MAP_NUM_VERTICES - 1) + \
+ 2 * (MAP_NUM_VERTICES - 1))
+
+
+/**********************************************************************
+ * Default shader programs
+ *********************************************************************/
+
+static const char* vertex_shader_text =
+"#version 150\n"
+"uniform mat4 project;\n"
+"uniform mat4 modelview;\n"
+"in float x;\n"
+"in float y;\n"
+"in float z;\n"
+"\n"
+"void main()\n"
+"{\n"
+" gl_Position = project * modelview * vec4(x, y, z, 1.0);\n"
+"}\n";
+
+static const char* fragment_shader_text =
+"#version 150\n"
+"out vec4 color;\n"
+"void main()\n"
+"{\n"
+" color = vec4(0.2, 1.0, 0.2, 1.0); \n"
+"}\n";
+
+/**********************************************************************
+ * Values for shader uniforms
+ *********************************************************************/
+
+/* Frustum configuration */
+static GLfloat view_angle = 45.0f;
+static GLfloat aspect_ratio = 4.0f/3.0f;
+static GLfloat z_near = 1.0f;
+static GLfloat z_far = 100.f;
+
+/* Projection matrix */
+static GLfloat projection_matrix[16] = {
+ 1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+/* Model view matrix */
+static GLfloat modelview_matrix[16] = {
+ 1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+/**********************************************************************
+ * Heightmap vertex and index data
+ *********************************************************************/
+
+static GLfloat map_vertices[3][MAP_NUM_TOTAL_VERTICES];
+static GLuint map_line_indices[2*MAP_NUM_LINES];
+
+/* Store uniform location for the shaders
+ * Those values are setup as part of the process of creating
+ * the shader program. They should not be used before creating
+ * the program.
+ */
+static GLuint mesh;
+static GLuint mesh_vbo[4];
+
+/**********************************************************************
+ * OpenGL helper functions
+ *********************************************************************/
+
+/* Creates a shader object of the specified type using the specified text
+ */
+static GLuint make_shader(GLenum type, const char* text)
+{
+ GLuint shader;
+ GLint shader_ok;
+ GLsizei log_length;
+ char info_log[8192];
+
+ shader = glCreateShader(type);
+ if (shader != 0)
+ {
+ glShaderSource(shader, 1, (const GLchar**)&text, NULL);
+ glCompileShader(shader);
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &shader_ok);
+ if (shader_ok != GL_TRUE)
+ {
+ fprintf(stderr, "ERROR: Failed to compile %s shader\n", (type == GL_FRAGMENT_SHADER) ? "fragment" : "vertex" );
+ glGetShaderInfoLog(shader, 8192, &log_length,info_log);
+ fprintf(stderr, "ERROR: \n%s\n\n", info_log);
+ glDeleteShader(shader);
+ shader = 0;
+ }
+ }
+ return shader;
+}
+
+/* Creates a program object using the specified vertex and fragment text
+ */
+static GLuint make_shader_program(const char* vs_text, const char* fs_text)
+{
+ GLuint program = 0u;
+ GLint program_ok;
+ GLuint vertex_shader = 0u;
+ GLuint fragment_shader = 0u;
+ GLsizei log_length;
+ char info_log[8192];
+
+ vertex_shader = make_shader(GL_VERTEX_SHADER, vs_text);
+ if (vertex_shader != 0u)
+ {
+ fragment_shader = make_shader(GL_FRAGMENT_SHADER, fs_text);
+ if (fragment_shader != 0u)
+ {
+ /* make the program that connect the two shader and link it */
+ program = glCreateProgram();
+ if (program != 0u)
+ {
+ /* attach both shader and link */
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glLinkProgram(program);
+ glGetProgramiv(program, GL_LINK_STATUS, &program_ok);
+
+ if (program_ok != GL_TRUE)
+ {
+ fprintf(stderr, "ERROR, failed to link shader program\n");
+ glGetProgramInfoLog(program, 8192, &log_length, info_log);
+ fprintf(stderr, "ERROR: \n%s\n\n", info_log);
+ glDeleteProgram(program);
+ glDeleteShader(fragment_shader);
+ glDeleteShader(vertex_shader);
+ program = 0u;
+ }
+ }
+ }
+ else
+ {
+ fprintf(stderr, "ERROR: Unable to load fragment shader\n");
+ glDeleteShader(vertex_shader);
+ }
+ }
+ else
+ {
+ fprintf(stderr, "ERROR: Unable to load vertex shader\n");
+ }
+ return program;
+}
+
+/**********************************************************************
+ * Geometry creation functions
+ *********************************************************************/
+
+/* Generate vertices and indices for the heightmap
+ */
+static void init_map(void)
+{
+ int i;
+ int j;
+ int k;
+ GLfloat step = MAP_SIZE / (MAP_NUM_VERTICES - 1);
+ GLfloat x = 0.0f;
+ GLfloat z = 0.0f;
+ /* Create a flat grid */
+ k = 0;
+ for (i = 0 ; i < MAP_NUM_VERTICES ; ++i)
+ {
+ for (j = 0 ; j < MAP_NUM_VERTICES ; ++j)
+ {
+ map_vertices[0][k] = x;
+ map_vertices[1][k] = 0.0f;
+ map_vertices[2][k] = z;
+ z += step;
+ ++k;
+ }
+ x += step;
+ z = 0.0f;
+ }
+#if DEBUG_ENABLED
+ for (i = 0 ; i < MAP_NUM_TOTAL_VERTICES ; ++i)
+ {
+ printf ("Vertice %d (%f, %f, %f)\n",
+ i, map_vertices[0][i], map_vertices[1][i], map_vertices[2][i]);
+
+ }
+#endif
+ /* create indices */
+ /* line fan based on i
+ * i+1
+ * | / i + n + 1
+ * | /
+ * |/
+ * i --- i + n
+ */
+
+ /* close the top of the square */
+ k = 0;
+ for (i = 0 ; i < MAP_NUM_VERTICES -1 ; ++i)
+ {
+ map_line_indices[k++] = (i + 1) * MAP_NUM_VERTICES -1;
+ map_line_indices[k++] = (i + 2) * MAP_NUM_VERTICES -1;
+ }
+ /* close the right of the square */
+ for (i = 0 ; i < MAP_NUM_VERTICES -1 ; ++i)
+ {
+ map_line_indices[k++] = (MAP_NUM_VERTICES - 1) * MAP_NUM_VERTICES + i;
+ map_line_indices[k++] = (MAP_NUM_VERTICES - 1) * MAP_NUM_VERTICES + i + 1;
+ }
+
+ for (i = 0 ; i < (MAP_NUM_VERTICES - 1) ; ++i)
+ {
+ for (j = 0 ; j < (MAP_NUM_VERTICES - 1) ; ++j)
+ {
+ int ref = i * (MAP_NUM_VERTICES) + j;
+ map_line_indices[k++] = ref;
+ map_line_indices[k++] = ref + 1;
+
+ map_line_indices[k++] = ref;
+ map_line_indices[k++] = ref + MAP_NUM_VERTICES;
+
+ map_line_indices[k++] = ref;
+ map_line_indices[k++] = ref + MAP_NUM_VERTICES + 1;
+ }
+ }
+
+#ifdef DEBUG_ENABLED
+ for (k = 0 ; k < 2 * MAP_NUM_LINES ; k += 2)
+ {
+ int beg, end;
+ beg = map_line_indices[k];
+ end = map_line_indices[k+1];
+ printf ("Line %d: %d -> %d (%f, %f, %f) -> (%f, %f, %f)\n",
+ k / 2, beg, end,
+ map_vertices[0][beg], map_vertices[1][beg], map_vertices[2][beg],
+ map_vertices[0][end], map_vertices[1][end], map_vertices[2][end]);
+ }
+#endif
+}
+
+static void generate_heightmap__circle(float* center_x, float* center_y,
+ float* size, float* displacement)
+{
+ float sign;
+ /* random value for element in between [0-1.0] */
+ *center_x = (MAP_SIZE * rand()) / (float) RAND_MAX;
+ *center_y = (MAP_SIZE * rand()) / (float) RAND_MAX;
+ *size = (MAX_CIRCLE_SIZE * rand()) / (float) RAND_MAX;
+ sign = (1.0f * rand()) / (float) RAND_MAX;
+ sign = (sign < DISPLACEMENT_SIGN_LIMIT) ? -1.0f : 1.0f;
+ *displacement = (sign * (MAX_DISPLACEMENT * rand())) / (float) RAND_MAX;
+}
+
+/* Run the specified number of iterations of the generation process for the
+ * heightmap
+ */
+static void update_map(int num_iter)
+{
+ assert(num_iter > 0);
+ while(num_iter)
+ {
+ /* center of the circle */
+ float center_x;
+ float center_z;
+ float circle_size;
+ float disp;
+ size_t ii;
+ generate_heightmap__circle(&center_x, &center_z, &circle_size, &disp);
+ disp = disp / 2.0f;
+ for (ii = 0u ; ii < MAP_NUM_TOTAL_VERTICES ; ++ii)
+ {
+ GLfloat dx = center_x - map_vertices[0][ii];
+ GLfloat dz = center_z - map_vertices[2][ii];
+ GLfloat pd = (2.0f * (float) sqrt((dx * dx) + (dz * dz))) / circle_size;
+ if (fabs(pd) <= 1.0f)
+ {
+ /* tx,tz is within the circle */
+ GLfloat new_height = disp + (float) (cos(pd*3.14f)*disp);
+ map_vertices[1][ii] += new_height;
+ }
+ }
+ --num_iter;
+ }
+}
+
+/**********************************************************************
+ * OpenGL helper functions
+ *********************************************************************/
+
+/* Create VBO, IBO and VAO objects for the heightmap geometry and bind them to
+ * the specified program object
+ */
+static void make_mesh(GLuint program)
+{
+ GLuint attrloc;
+
+ glGenVertexArrays(1, &mesh);
+ glGenBuffers(4, mesh_vbo);
+ glBindVertexArray(mesh);
+ /* Prepare the data for drawing through a buffer inidices */
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh_vbo[3]);
+ glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)* MAP_NUM_LINES * 2, map_line_indices, GL_STATIC_DRAW);
+
+ /* Prepare the attributes for rendering */
+ attrloc = glGetAttribLocation(program, "x");
+ glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo[0]);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * MAP_NUM_TOTAL_VERTICES, &map_vertices[0][0], GL_STATIC_DRAW);
+ glEnableVertexAttribArray(attrloc);
+ glVertexAttribPointer(attrloc, 1, GL_FLOAT, GL_FALSE, 0, 0);
+
+ attrloc = glGetAttribLocation(program, "z");
+ glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo[2]);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * MAP_NUM_TOTAL_VERTICES, &map_vertices[2][0], GL_STATIC_DRAW);
+ glEnableVertexAttribArray(attrloc);
+ glVertexAttribPointer(attrloc, 1, GL_FLOAT, GL_FALSE, 0, 0);
+
+ attrloc = glGetAttribLocation(program, "y");
+ glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo[1]);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * MAP_NUM_TOTAL_VERTICES, &map_vertices[1][0], GL_DYNAMIC_DRAW);
+ glEnableVertexAttribArray(attrloc);
+ glVertexAttribPointer(attrloc, 1, GL_FLOAT, GL_FALSE, 0, 0);
+}
+
+/* Update VBO vertices from source data
+ */
+static void update_mesh(void)
+{
+ glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(GLfloat) * MAP_NUM_TOTAL_VERTICES, &map_vertices[1][0]);
+}
+
+/**********************************************************************
+ * GLFW callback functions
+ *********************************************************************/
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ switch(key)
+ {
+ case GLFW_KEY_ESCAPE:
+ /* Exit program on Escape */
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+ break;
+ }
+}
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+
+int main(int argc, char** argv)
+{
+ GLFWwindow* window;
+ int iter;
+ double dt;
+ double last_update_time;
+ int frame;
+ float f;
+ GLint uloc_modelview;
+ GLint uloc_project;
+ int width, height;
+
+ GLuint shader_program;
+
+ glfwSetErrorCallback(error_callback);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+
+ window = glfwCreateWindow(800, 600, "GLFW OpenGL3 Heightmap demo", NULL, NULL);
+ if (! window )
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ /* Register events callback */
+ glfwSetKeyCallback(window, key_callback);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+
+ /* Prepare opengl resources for rendering */
+ shader_program = make_shader_program(vertex_shader_text, fragment_shader_text);
+
+ if (shader_program == 0u)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glUseProgram(shader_program);
+ uloc_project = glGetUniformLocation(shader_program, "project");
+ uloc_modelview = glGetUniformLocation(shader_program, "modelview");
+
+ /* Compute the projection matrix */
+ f = 1.0f / tanf(view_angle / 2.0f);
+ projection_matrix[0] = f / aspect_ratio;
+ projection_matrix[5] = f;
+ projection_matrix[10] = (z_far + z_near)/ (z_near - z_far);
+ projection_matrix[11] = -1.0f;
+ projection_matrix[14] = 2.0f * (z_far * z_near) / (z_near - z_far);
+ glUniformMatrix4fv(uloc_project, 1, GL_FALSE, projection_matrix);
+
+ /* Set the camera position */
+ modelview_matrix[12] = -5.0f;
+ modelview_matrix[13] = -5.0f;
+ modelview_matrix[14] = -20.0f;
+ glUniformMatrix4fv(uloc_modelview, 1, GL_FALSE, modelview_matrix);
+
+ /* Create mesh data */
+ init_map();
+ make_mesh(shader_program);
+
+ /* Create vao + vbo to store the mesh */
+ /* Create the vbo to store all the information for the grid and the height */
+
+ /* setup the scene ready for rendering */
+ glfwGetFramebufferSize(window, &width, &height);
+ glViewport(0, 0, width, height);
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+
+ /* main loop */
+ frame = 0;
+ iter = 0;
+ last_update_time = glfwGetTime();
+
+ while (!glfwWindowShouldClose(window))
+ {
+ ++frame;
+ /* render the next frame */
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDrawElements(GL_LINES, 2* MAP_NUM_LINES , GL_UNSIGNED_INT, 0);
+
+ /* display and process events through callbacks */
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+ /* Check the frame rate and update the heightmap if needed */
+ dt = glfwGetTime();
+ if ((dt - last_update_time) > 0.2)
+ {
+ /* generate the next iteration of the heightmap */
+ if (iter < MAX_ITER)
+ {
+ update_map(NUM_ITER_AT_A_TIME);
+ update_mesh();
+ iter += NUM_ITER_AT_A_TIME;
+ }
+ last_update_time = dt;
+ frame = 0;
+ }
+ }
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/offscreen.c b/chromium/third_party/dawn/third_party/glfw/examples/offscreen.c
new file mode 100644
index 00000000000..e28528608b0
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/offscreen.c
@@ -0,0 +1,165 @@
+//========================================================================
+// Offscreen rendering example
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include "linmath.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#define STB_IMAGE_WRITE_IMPLEMENTATION
+#include <stb_image_write.h>
+
+static const struct
+{
+ float x, y;
+ float r, g, b;
+} vertices[3] =
+{
+ { -0.6f, -0.4f, 1.f, 0.f, 0.f },
+ { 0.6f, -0.4f, 0.f, 1.f, 0.f },
+ { 0.f, 0.6f, 0.f, 0.f, 1.f }
+};
+
+static const char* vertex_shader_text =
+"#version 110\n"
+"uniform mat4 MVP;\n"
+"attribute vec3 vCol;\n"
+"attribute vec2 vPos;\n"
+"varying vec3 color;\n"
+"void main()\n"
+"{\n"
+" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
+" color = vCol;\n"
+"}\n";
+
+static const char* fragment_shader_text =
+"#version 110\n"
+"varying vec3 color;\n"
+"void main()\n"
+"{\n"
+" gl_FragColor = vec4(color, 1.0);\n"
+"}\n";
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+
+int main(void)
+{
+ GLFWwindow* window;
+ GLuint vertex_buffer, vertex_shader, fragment_shader, program;
+ GLint mvp_location, vpos_location, vcol_location;
+ float ratio;
+ int width, height;
+ mat4x4 mvp;
+ char* buffer;
+
+ glfwSetErrorCallback(error_callback);
+
+ glfwInitHint(GLFW_COCOA_MENUBAR, GLFW_FALSE);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
+ glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+ window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL);
+ if (!window)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+
+ // NOTE: OpenGL error checks have been omitted for brevity
+
+ glGenBuffers(1, &vertex_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+
+ vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
+ glCompileShader(vertex_shader);
+
+ fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
+ glCompileShader(fragment_shader);
+
+ program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glLinkProgram(program);
+
+ mvp_location = glGetUniformLocation(program, "MVP");
+ vpos_location = glGetAttribLocation(program, "vPos");
+ vcol_location = glGetAttribLocation(program, "vCol");
+
+ glEnableVertexAttribArray(vpos_location);
+ glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
+ sizeof(vertices[0]), (void*) 0);
+ glEnableVertexAttribArray(vcol_location);
+ glVertexAttribPointer(vcol_location, 3, GL_FLOAT, GL_FALSE,
+ sizeof(vertices[0]), (void*) (sizeof(float) * 2));
+
+ glfwGetFramebufferSize(window, &width, &height);
+ ratio = width / (float) height;
+
+ glViewport(0, 0, width, height);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ mat4x4_ortho(mvp, -ratio, ratio, -1.f, 1.f, 1.f, -1.f);
+
+ glUseProgram(program);
+ glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) mvp);
+ glDrawArrays(GL_TRIANGLES, 0, 3);
+ glFinish();
+
+ buffer = calloc(4, width * height);
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
+
+ // Write image Y-flipped because OpenGL
+ stbi_write_png("offscreen.png",
+ width, height, 4,
+ buffer + (width * 4 * (height - 1)),
+ -width * 4);
+
+ free(buffer);
+
+ glfwDestroyWindow(window);
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/particles.c b/chromium/third_party/dawn/third_party/glfw/examples/particles.c
new file mode 100644
index 00000000000..baafe826aa6
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/particles.c
@@ -0,0 +1,1074 @@
+//========================================================================
+// A simple particle engine with threaded physics
+// Copyright (c) Marcus Geelnard
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#if defined(_MSC_VER)
+ // Make MS math.h define M_PI
+ #define _USE_MATH_DEFINES
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <time.h>
+
+#include <tinycthread.h>
+#include <getopt.h>
+#include <linmath.h>
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+// Define tokens for GL_EXT_separate_specular_color if not already defined
+#ifndef GL_EXT_separate_specular_color
+#define GL_LIGHT_MODEL_COLOR_CONTROL_EXT 0x81F8
+#define GL_SINGLE_COLOR_EXT 0x81F9
+#define GL_SEPARATE_SPECULAR_COLOR_EXT 0x81FA
+#endif // GL_EXT_separate_specular_color
+
+
+//========================================================================
+// Type definitions
+//========================================================================
+
+typedef struct
+{
+ float x, y, z;
+} Vec3;
+
+// This structure is used for interleaved vertex arrays (see the
+// draw_particles function)
+//
+// NOTE: This structure SHOULD be packed on most systems. It uses 32-bit fields
+// on 32-bit boundaries, and is a multiple of 64 bits in total (6x32=3x64). If
+// it does not work, try using pragmas or whatever to force the structure to be
+// packed.
+typedef struct
+{
+ GLfloat s, t; // Texture coordinates
+ GLuint rgba; // Color (four ubytes packed into an uint)
+ GLfloat x, y, z; // Vertex coordinates
+} Vertex;
+
+
+//========================================================================
+// Program control global variables
+//========================================================================
+
+// Window dimensions
+float aspect_ratio;
+
+// "wireframe" flag (true if we use wireframe view)
+int wireframe;
+
+// Thread synchronization
+struct {
+ double t; // Time (s)
+ float dt; // Time since last frame (s)
+ int p_frame; // Particle physics frame number
+ int d_frame; // Particle draw frame number
+ cnd_t p_done; // Condition: particle physics done
+ cnd_t d_done; // Condition: particle draw done
+ mtx_t particles_lock; // Particles data sharing mutex
+} thread_sync;
+
+
+//========================================================================
+// Texture declarations (we hard-code them into the source code, since
+// they are so simple)
+//========================================================================
+
+#define P_TEX_WIDTH 8 // Particle texture dimensions
+#define P_TEX_HEIGHT 8
+#define F_TEX_WIDTH 16 // Floor texture dimensions
+#define F_TEX_HEIGHT 16
+
+// Texture object IDs
+GLuint particle_tex_id, floor_tex_id;
+
+// Particle texture (a simple spot)
+const unsigned char particle_texture[ P_TEX_WIDTH * P_TEX_HEIGHT ] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0x22, 0x22, 0x11, 0x00, 0x00,
+ 0x00, 0x11, 0x33, 0x88, 0x77, 0x33, 0x11, 0x00,
+ 0x00, 0x22, 0x88, 0xff, 0xee, 0x77, 0x22, 0x00,
+ 0x00, 0x22, 0x77, 0xee, 0xff, 0x88, 0x22, 0x00,
+ 0x00, 0x11, 0x33, 0x77, 0x88, 0x33, 0x11, 0x00,
+ 0x00, 0x00, 0x11, 0x33, 0x22, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+// Floor texture (your basic checkered floor)
+const unsigned char floor_texture[ F_TEX_WIDTH * F_TEX_HEIGHT ] = {
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0xff, 0xf0, 0xcc, 0xf0, 0xf0, 0xf0, 0xff, 0xf0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0xf0, 0xcc, 0xee, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, 0x30, 0x66, 0x30, 0x30, 0x30, 0x20, 0x30, 0x30,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xee, 0xf0, 0xf0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xcc, 0xf0, 0xf0, 0xf0, 0x30, 0x30, 0x55, 0x30, 0x30, 0x44, 0x30, 0x30,
+ 0xf0, 0xdd, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0x33, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xff, 0xf0, 0xf0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x60, 0x30,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0x33, 0x33, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x33, 0x30, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x30, 0x30, 0xf0, 0xff, 0xf0, 0xf0, 0xdd, 0xf0, 0xf0, 0xff,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x55, 0x33, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xff, 0xf0, 0xf0,
+ 0x30, 0x44, 0x66, 0x30, 0x30, 0x30, 0x30, 0x30, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xf0, 0xf0, 0xf0, 0xaa, 0xf0, 0xf0, 0xcc, 0xf0,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xf0, 0xf0, 0xf0, 0xff, 0xf0, 0xdd, 0xf0,
+ 0x30, 0x30, 0x30, 0x77, 0x30, 0x30, 0x30, 0x30, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+};
+
+
+//========================================================================
+// These are fixed constants that control the particle engine. In a
+// modular world, these values should be variables...
+//========================================================================
+
+// Maximum number of particles
+#define MAX_PARTICLES 3000
+
+// Life span of a particle (in seconds)
+#define LIFE_SPAN 8.f
+
+// A new particle is born every [BIRTH_INTERVAL] second
+#define BIRTH_INTERVAL (LIFE_SPAN/(float)MAX_PARTICLES)
+
+// Particle size (meters)
+#define PARTICLE_SIZE 0.7f
+
+// Gravitational constant (m/s^2)
+#define GRAVITY 9.8f
+
+// Base initial velocity (m/s)
+#define VELOCITY 8.f
+
+// Bounce friction (1.0 = no friction, 0.0 = maximum friction)
+#define FRICTION 0.75f
+
+// "Fountain" height (m)
+#define FOUNTAIN_HEIGHT 3.f
+
+// Fountain radius (m)
+#define FOUNTAIN_RADIUS 1.6f
+
+// Minimum delta-time for particle phisics (s)
+#define MIN_DELTA_T (BIRTH_INTERVAL * 0.5f)
+
+
+//========================================================================
+// Particle system global variables
+//========================================================================
+
+// This structure holds all state for a single particle
+typedef struct {
+ float x,y,z; // Position in space
+ float vx,vy,vz; // Velocity vector
+ float r,g,b; // Color of particle
+ float life; // Life of particle (1.0 = newborn, < 0.0 = dead)
+ int active; // Tells if this particle is active
+} PARTICLE;
+
+// Global vectors holding all particles. We use two vectors for double
+// buffering.
+static PARTICLE particles[MAX_PARTICLES];
+
+// Global variable holding the age of the youngest particle
+static float min_age;
+
+// Color of latest born particle (used for fountain lighting)
+static float glow_color[4];
+
+// Position of latest born particle (used for fountain lighting)
+static float glow_pos[4];
+
+
+//========================================================================
+// Object material and fog configuration constants
+//========================================================================
+
+const GLfloat fountain_diffuse[4] = { 0.7f, 1.f, 1.f, 1.f };
+const GLfloat fountain_specular[4] = { 1.f, 1.f, 1.f, 1.f };
+const GLfloat fountain_shininess = 12.f;
+const GLfloat floor_diffuse[4] = { 1.f, 0.6f, 0.6f, 1.f };
+const GLfloat floor_specular[4] = { 0.6f, 0.6f, 0.6f, 1.f };
+const GLfloat floor_shininess = 18.f;
+const GLfloat fog_color[4] = { 0.1f, 0.1f, 0.1f, 1.f };
+
+
+//========================================================================
+// Print usage information
+//========================================================================
+
+static void usage(void)
+{
+ printf("Usage: particles [-bfhs]\n");
+ printf("Options:\n");
+ printf(" -f Run in full screen\n");
+ printf(" -h Display this help\n");
+ printf(" -s Run program as single thread (default is to use two threads)\n");
+ printf("\n");
+ printf("Program runtime controls:\n");
+ printf(" W Toggle wireframe mode\n");
+ printf(" Esc Exit program\n");
+}
+
+
+//========================================================================
+// Initialize a new particle
+//========================================================================
+
+static void init_particle(PARTICLE *p, double t)
+{
+ float xy_angle, velocity;
+
+ // Start position of particle is at the fountain blow-out
+ p->x = 0.f;
+ p->y = 0.f;
+ p->z = FOUNTAIN_HEIGHT;
+
+ // Start velocity is up (Z)...
+ p->vz = 0.7f + (0.3f / 4096.f) * (float) (rand() & 4095);
+
+ // ...and a randomly chosen X/Y direction
+ xy_angle = (2.f * (float) M_PI / 4096.f) * (float) (rand() & 4095);
+ p->vx = 0.4f * (float) cos(xy_angle);
+ p->vy = 0.4f * (float) sin(xy_angle);
+
+ // Scale velocity vector according to a time-varying velocity
+ velocity = VELOCITY * (0.8f + 0.1f * (float) (sin(0.5 * t) + sin(1.31 * t)));
+ p->vx *= velocity;
+ p->vy *= velocity;
+ p->vz *= velocity;
+
+ // Color is time-varying
+ p->r = 0.7f + 0.3f * (float) sin(0.34 * t + 0.1);
+ p->g = 0.6f + 0.4f * (float) sin(0.63 * t + 1.1);
+ p->b = 0.6f + 0.4f * (float) sin(0.91 * t + 2.1);
+
+ // Store settings for fountain glow lighting
+ glow_pos[0] = 0.4f * (float) sin(1.34 * t);
+ glow_pos[1] = 0.4f * (float) sin(3.11 * t);
+ glow_pos[2] = FOUNTAIN_HEIGHT + 1.f;
+ glow_pos[3] = 1.f;
+ glow_color[0] = p->r;
+ glow_color[1] = p->g;
+ glow_color[2] = p->b;
+ glow_color[3] = 1.f;
+
+ // The particle is new-born and active
+ p->life = 1.f;
+ p->active = 1;
+}
+
+
+//========================================================================
+// Update a particle
+//========================================================================
+
+#define FOUNTAIN_R2 (FOUNTAIN_RADIUS+PARTICLE_SIZE/2)*(FOUNTAIN_RADIUS+PARTICLE_SIZE/2)
+
+static void update_particle(PARTICLE *p, float dt)
+{
+ // If the particle is not active, we need not do anything
+ if (!p->active)
+ return;
+
+ // The particle is getting older...
+ p->life -= dt * (1.f / LIFE_SPAN);
+
+ // Did the particle die?
+ if (p->life <= 0.f)
+ {
+ p->active = 0;
+ return;
+ }
+
+ // Apply gravity
+ p->vz = p->vz - GRAVITY * dt;
+
+ // Update particle position
+ p->x = p->x + p->vx * dt;
+ p->y = p->y + p->vy * dt;
+ p->z = p->z + p->vz * dt;
+
+ // Simple collision detection + response
+ if (p->vz < 0.f)
+ {
+ // Particles should bounce on the fountain (with friction)
+ if ((p->x * p->x + p->y * p->y) < FOUNTAIN_R2 &&
+ p->z < (FOUNTAIN_HEIGHT + PARTICLE_SIZE / 2))
+ {
+ p->vz = -FRICTION * p->vz;
+ p->z = FOUNTAIN_HEIGHT + PARTICLE_SIZE / 2 +
+ FRICTION * (FOUNTAIN_HEIGHT +
+ PARTICLE_SIZE / 2 - p->z);
+ }
+
+ // Particles should bounce on the floor (with friction)
+ else if (p->z < PARTICLE_SIZE / 2)
+ {
+ p->vz = -FRICTION * p->vz;
+ p->z = PARTICLE_SIZE / 2 +
+ FRICTION * (PARTICLE_SIZE / 2 - p->z);
+ }
+ }
+}
+
+
+//========================================================================
+// The main frame for the particle engine. Called once per frame.
+//========================================================================
+
+static void particle_engine(double t, float dt)
+{
+ int i;
+ float dt2;
+
+ // Update particles (iterated several times per frame if dt is too large)
+ while (dt > 0.f)
+ {
+ // Calculate delta time for this iteration
+ dt2 = dt < MIN_DELTA_T ? dt : MIN_DELTA_T;
+
+ for (i = 0; i < MAX_PARTICLES; i++)
+ update_particle(&particles[i], dt2);
+
+ min_age += dt2;
+
+ // Should we create any new particle(s)?
+ while (min_age >= BIRTH_INTERVAL)
+ {
+ min_age -= BIRTH_INTERVAL;
+
+ // Find a dead particle to replace with a new one
+ for (i = 0; i < MAX_PARTICLES; i++)
+ {
+ if (!particles[i].active)
+ {
+ init_particle(&particles[i], t + min_age);
+ update_particle(&particles[i], min_age);
+ break;
+ }
+ }
+ }
+
+ dt -= dt2;
+ }
+}
+
+
+//========================================================================
+// Draw all active particles. We use OpenGL 1.1 vertex
+// arrays for this in order to accelerate the drawing.
+//========================================================================
+
+#define BATCH_PARTICLES 70 // Number of particles to draw in each batch
+ // (70 corresponds to 7.5 KB = will not blow
+ // the L1 data cache on most CPUs)
+#define PARTICLE_VERTS 4 // Number of vertices per particle
+
+static void draw_particles(GLFWwindow* window, double t, float dt)
+{
+ int i, particle_count;
+ Vertex vertex_array[BATCH_PARTICLES * PARTICLE_VERTS];
+ Vertex* vptr;
+ float alpha;
+ GLuint rgba;
+ Vec3 quad_lower_left, quad_lower_right;
+ GLfloat mat[16];
+ PARTICLE* pptr;
+
+ // Here comes the real trick with flat single primitive objects (s.c.
+ // "billboards"): We must rotate the textured primitive so that it
+ // always faces the viewer (is coplanar with the view-plane).
+ // We:
+ // 1) Create the primitive around origo (0,0,0)
+ // 2) Rotate it so that it is coplanar with the view plane
+ // 3) Translate it according to the particle position
+ // Note that 1) and 2) is the same for all particles (done only once).
+
+ // Get modelview matrix. We will only use the upper left 3x3 part of
+ // the matrix, which represents the rotation.
+ glGetFloatv(GL_MODELVIEW_MATRIX, mat);
+
+ // 1) & 2) We do it in one swift step:
+ // Although not obvious, the following six lines represent two matrix/
+ // vector multiplications. The matrix is the inverse 3x3 rotation
+ // matrix (i.e. the transpose of the same matrix), and the two vectors
+ // represent the lower left corner of the quad, PARTICLE_SIZE/2 *
+ // (-1,-1,0), and the lower right corner, PARTICLE_SIZE/2 * (1,-1,0).
+ // The upper left/right corners of the quad is always the negative of
+ // the opposite corners (regardless of rotation).
+ quad_lower_left.x = (-PARTICLE_SIZE / 2) * (mat[0] + mat[1]);
+ quad_lower_left.y = (-PARTICLE_SIZE / 2) * (mat[4] + mat[5]);
+ quad_lower_left.z = (-PARTICLE_SIZE / 2) * (mat[8] + mat[9]);
+ quad_lower_right.x = (PARTICLE_SIZE / 2) * (mat[0] - mat[1]);
+ quad_lower_right.y = (PARTICLE_SIZE / 2) * (mat[4] - mat[5]);
+ quad_lower_right.z = (PARTICLE_SIZE / 2) * (mat[8] - mat[9]);
+
+ // Don't update z-buffer, since all particles are transparent!
+ glDepthMask(GL_FALSE);
+
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE);
+
+ // Select particle texture
+ if (!wireframe)
+ {
+ glEnable(GL_TEXTURE_2D);
+ glBindTexture(GL_TEXTURE_2D, particle_tex_id);
+ }
+
+ // Set up vertex arrays. We use interleaved arrays, which is easier to
+ // handle (in most situations) and it gives a linear memory access
+ // access pattern (which may give better performance in some
+ // situations). GL_T2F_C4UB_V3F means: 2 floats for texture coords,
+ // 4 ubytes for color and 3 floats for vertex coord (in that order).
+ // Most OpenGL cards / drivers are optimized for this format.
+ glInterleavedArrays(GL_T2F_C4UB_V3F, 0, vertex_array);
+
+ // Wait for particle physics thread to be done
+ mtx_lock(&thread_sync.particles_lock);
+ while (!glfwWindowShouldClose(window) &&
+ thread_sync.p_frame <= thread_sync.d_frame)
+ {
+ struct timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_nsec += 100 * 1000 * 1000;
+ ts.tv_sec += ts.tv_nsec / (1000 * 1000 * 1000);
+ ts.tv_nsec %= 1000 * 1000 * 1000;
+ cnd_timedwait(&thread_sync.p_done, &thread_sync.particles_lock, &ts);
+ }
+
+ // Store the frame time and delta time for the physics thread
+ thread_sync.t = t;
+ thread_sync.dt = dt;
+
+ // Update frame counter
+ thread_sync.d_frame++;
+
+ // Loop through all particles and build vertex arrays.
+ particle_count = 0;
+ vptr = vertex_array;
+ pptr = particles;
+
+ for (i = 0; i < MAX_PARTICLES; i++)
+ {
+ if (pptr->active)
+ {
+ // Calculate particle intensity (we set it to max during 75%
+ // of its life, then it fades out)
+ alpha = 4.f * pptr->life;
+ if (alpha > 1.f)
+ alpha = 1.f;
+
+ // Convert color from float to 8-bit (store it in a 32-bit
+ // integer using endian independent type casting)
+ ((GLubyte*) &rgba)[0] = (GLubyte)(pptr->r * 255.f);
+ ((GLubyte*) &rgba)[1] = (GLubyte)(pptr->g * 255.f);
+ ((GLubyte*) &rgba)[2] = (GLubyte)(pptr->b * 255.f);
+ ((GLubyte*) &rgba)[3] = (GLubyte)(alpha * 255.f);
+
+ // 3) Translate the quad to the correct position in modelview
+ // space and store its parameters in vertex arrays (we also
+ // store texture coord and color information for each vertex).
+
+ // Lower left corner
+ vptr->s = 0.f;
+ vptr->t = 0.f;
+ vptr->rgba = rgba;
+ vptr->x = pptr->x + quad_lower_left.x;
+ vptr->y = pptr->y + quad_lower_left.y;
+ vptr->z = pptr->z + quad_lower_left.z;
+ vptr ++;
+
+ // Lower right corner
+ vptr->s = 1.f;
+ vptr->t = 0.f;
+ vptr->rgba = rgba;
+ vptr->x = pptr->x + quad_lower_right.x;
+ vptr->y = pptr->y + quad_lower_right.y;
+ vptr->z = pptr->z + quad_lower_right.z;
+ vptr ++;
+
+ // Upper right corner
+ vptr->s = 1.f;
+ vptr->t = 1.f;
+ vptr->rgba = rgba;
+ vptr->x = pptr->x - quad_lower_left.x;
+ vptr->y = pptr->y - quad_lower_left.y;
+ vptr->z = pptr->z - quad_lower_left.z;
+ vptr ++;
+
+ // Upper left corner
+ vptr->s = 0.f;
+ vptr->t = 1.f;
+ vptr->rgba = rgba;
+ vptr->x = pptr->x - quad_lower_right.x;
+ vptr->y = pptr->y - quad_lower_right.y;
+ vptr->z = pptr->z - quad_lower_right.z;
+ vptr ++;
+
+ // Increase count of drawable particles
+ particle_count ++;
+ }
+
+ // If we have filled up one batch of particles, draw it as a set
+ // of quads using glDrawArrays.
+ if (particle_count >= BATCH_PARTICLES)
+ {
+ // The first argument tells which primitive type we use (QUAD)
+ // The second argument tells the index of the first vertex (0)
+ // The last argument is the vertex count
+ glDrawArrays(GL_QUADS, 0, PARTICLE_VERTS * particle_count);
+ particle_count = 0;
+ vptr = vertex_array;
+ }
+
+ // Next particle
+ pptr++;
+ }
+
+ // We are done with the particle data
+ mtx_unlock(&thread_sync.particles_lock);
+ cnd_signal(&thread_sync.d_done);
+
+ // Draw final batch of particles (if any)
+ glDrawArrays(GL_QUADS, 0, PARTICLE_VERTS * particle_count);
+
+ // Disable vertex arrays (Note: glInterleavedArrays implicitly called
+ // glEnableClientState for vertex, texture coord and color arrays)
+ glDisableClientState(GL_VERTEX_ARRAY);
+ glDisableClientState(GL_TEXTURE_COORD_ARRAY);
+ glDisableClientState(GL_COLOR_ARRAY);
+
+ glDisable(GL_TEXTURE_2D);
+ glDisable(GL_BLEND);
+
+ glDepthMask(GL_TRUE);
+}
+
+
+//========================================================================
+// Fountain geometry specification
+//========================================================================
+
+#define FOUNTAIN_SIDE_POINTS 14
+#define FOUNTAIN_SWEEP_STEPS 32
+
+static const float fountain_side[FOUNTAIN_SIDE_POINTS * 2] =
+{
+ 1.2f, 0.f, 1.f, 0.2f, 0.41f, 0.3f, 0.4f, 0.35f,
+ 0.4f, 1.95f, 0.41f, 2.f, 0.8f, 2.2f, 1.2f, 2.4f,
+ 1.5f, 2.7f, 1.55f,2.95f, 1.6f, 3.f, 1.f, 3.f,
+ 0.5f, 3.f, 0.f, 3.f
+};
+
+static const float fountain_normal[FOUNTAIN_SIDE_POINTS * 2] =
+{
+ 1.0000f, 0.0000f, 0.6428f, 0.7660f, 0.3420f, 0.9397f, 1.0000f, 0.0000f,
+ 1.0000f, 0.0000f, 0.3420f,-0.9397f, 0.4226f,-0.9063f, 0.5000f,-0.8660f,
+ 0.7660f,-0.6428f, 0.9063f,-0.4226f, 0.0000f,1.00000f, 0.0000f,1.00000f,
+ 0.0000f,1.00000f, 0.0000f,1.00000f
+};
+
+
+//========================================================================
+// Draw a fountain
+//========================================================================
+
+static void draw_fountain(void)
+{
+ static GLuint fountain_list = 0;
+ double angle;
+ float x, y;
+ int m, n;
+
+ // The first time, we build the fountain display list
+ if (!fountain_list)
+ {
+ fountain_list = glGenLists(1);
+ glNewList(fountain_list, GL_COMPILE_AND_EXECUTE);
+
+ glMaterialfv(GL_FRONT, GL_DIFFUSE, fountain_diffuse);
+ glMaterialfv(GL_FRONT, GL_SPECULAR, fountain_specular);
+ glMaterialf(GL_FRONT, GL_SHININESS, fountain_shininess);
+
+ // Build fountain using triangle strips
+ for (n = 0; n < FOUNTAIN_SIDE_POINTS - 1; n++)
+ {
+ glBegin(GL_TRIANGLE_STRIP);
+ for (m = 0; m <= FOUNTAIN_SWEEP_STEPS; m++)
+ {
+ angle = (double) m * (2.0 * M_PI / (double) FOUNTAIN_SWEEP_STEPS);
+ x = (float) cos(angle);
+ y = (float) sin(angle);
+
+ // Draw triangle strip
+ glNormal3f(x * fountain_normal[n * 2 + 2],
+ y * fountain_normal[n * 2 + 2],
+ fountain_normal[n * 2 + 3]);
+ glVertex3f(x * fountain_side[n * 2 + 2],
+ y * fountain_side[n * 2 + 2],
+ fountain_side[n * 2 +3 ]);
+ glNormal3f(x * fountain_normal[n * 2],
+ y * fountain_normal[n * 2],
+ fountain_normal[n * 2 + 1]);
+ glVertex3f(x * fountain_side[n * 2],
+ y * fountain_side[n * 2],
+ fountain_side[n * 2 + 1]);
+ }
+
+ glEnd();
+ }
+
+ glEndList();
+ }
+ else
+ glCallList(fountain_list);
+}
+
+
+//========================================================================
+// Recursive function for building variable tessellated floor
+//========================================================================
+
+static void tessellate_floor(float x1, float y1, float x2, float y2, int depth)
+{
+ float delta, x, y;
+
+ // Last recursion?
+ if (depth >= 5)
+ delta = 999999.f;
+ else
+ {
+ x = (float) (fabs(x1) < fabs(x2) ? fabs(x1) : fabs(x2));
+ y = (float) (fabs(y1) < fabs(y2) ? fabs(y1) : fabs(y2));
+ delta = x*x + y*y;
+ }
+
+ // Recurse further?
+ if (delta < 0.1f)
+ {
+ x = (x1 + x2) * 0.5f;
+ y = (y1 + y2) * 0.5f;
+ tessellate_floor(x1, y1, x, y, depth + 1);
+ tessellate_floor(x, y1, x2, y, depth + 1);
+ tessellate_floor(x1, y, x, y2, depth + 1);
+ tessellate_floor(x, y, x2, y2, depth + 1);
+ }
+ else
+ {
+ glTexCoord2f(x1 * 30.f, y1 * 30.f);
+ glVertex3f( x1 * 80.f, y1 * 80.f, 0.f);
+ glTexCoord2f(x2 * 30.f, y1 * 30.f);
+ glVertex3f( x2 * 80.f, y1 * 80.f, 0.f);
+ glTexCoord2f(x2 * 30.f, y2 * 30.f);
+ glVertex3f( x2 * 80.f, y2 * 80.f, 0.f);
+ glTexCoord2f(x1 * 30.f, y2 * 30.f);
+ glVertex3f( x1 * 80.f, y2 * 80.f, 0.f);
+ }
+}
+
+
+//========================================================================
+// Draw floor. We build the floor recursively and let the tessellation in the
+// center (near x,y=0,0) be high, while the tessellation around the edges be
+// low.
+//========================================================================
+
+static void draw_floor(void)
+{
+ static GLuint floor_list = 0;
+
+ if (!wireframe)
+ {
+ glEnable(GL_TEXTURE_2D);
+ glBindTexture(GL_TEXTURE_2D, floor_tex_id);
+ }
+
+ // The first time, we build the floor display list
+ if (!floor_list)
+ {
+ floor_list = glGenLists(1);
+ glNewList(floor_list, GL_COMPILE_AND_EXECUTE);
+
+ glMaterialfv(GL_FRONT, GL_DIFFUSE, floor_diffuse);
+ glMaterialfv(GL_FRONT, GL_SPECULAR, floor_specular);
+ glMaterialf(GL_FRONT, GL_SHININESS, floor_shininess);
+
+ // Draw floor as a bunch of triangle strips (high tessellation
+ // improves lighting)
+ glNormal3f(0.f, 0.f, 1.f);
+ glBegin(GL_QUADS);
+ tessellate_floor(-1.f, -1.f, 0.f, 0.f, 0);
+ tessellate_floor( 0.f, -1.f, 1.f, 0.f, 0);
+ tessellate_floor( 0.f, 0.f, 1.f, 1.f, 0);
+ tessellate_floor(-1.f, 0.f, 0.f, 1.f, 0);
+ glEnd();
+
+ glEndList();
+ }
+ else
+ glCallList(floor_list);
+
+ glDisable(GL_TEXTURE_2D);
+
+}
+
+
+//========================================================================
+// Position and configure light sources
+//========================================================================
+
+static void setup_lights(void)
+{
+ float l1pos[4], l1amb[4], l1dif[4], l1spec[4];
+ float l2pos[4], l2amb[4], l2dif[4], l2spec[4];
+
+ // Set light source 1 parameters
+ l1pos[0] = 0.f; l1pos[1] = -9.f; l1pos[2] = 8.f; l1pos[3] = 1.f;
+ l1amb[0] = 0.2f; l1amb[1] = 0.2f; l1amb[2] = 0.2f; l1amb[3] = 1.f;
+ l1dif[0] = 0.8f; l1dif[1] = 0.4f; l1dif[2] = 0.2f; l1dif[3] = 1.f;
+ l1spec[0] = 1.f; l1spec[1] = 0.6f; l1spec[2] = 0.2f; l1spec[3] = 0.f;
+
+ // Set light source 2 parameters
+ l2pos[0] = -15.f; l2pos[1] = 12.f; l2pos[2] = 1.5f; l2pos[3] = 1.f;
+ l2amb[0] = 0.f; l2amb[1] = 0.f; l2amb[2] = 0.f; l2amb[3] = 1.f;
+ l2dif[0] = 0.2f; l2dif[1] = 0.4f; l2dif[2] = 0.8f; l2dif[3] = 1.f;
+ l2spec[0] = 0.2f; l2spec[1] = 0.6f; l2spec[2] = 1.f; l2spec[3] = 0.f;
+
+ glLightfv(GL_LIGHT1, GL_POSITION, l1pos);
+ glLightfv(GL_LIGHT1, GL_AMBIENT, l1amb);
+ glLightfv(GL_LIGHT1, GL_DIFFUSE, l1dif);
+ glLightfv(GL_LIGHT1, GL_SPECULAR, l1spec);
+ glLightfv(GL_LIGHT2, GL_POSITION, l2pos);
+ glLightfv(GL_LIGHT2, GL_AMBIENT, l2amb);
+ glLightfv(GL_LIGHT2, GL_DIFFUSE, l2dif);
+ glLightfv(GL_LIGHT2, GL_SPECULAR, l2spec);
+ glLightfv(GL_LIGHT3, GL_POSITION, glow_pos);
+ glLightfv(GL_LIGHT3, GL_DIFFUSE, glow_color);
+ glLightfv(GL_LIGHT3, GL_SPECULAR, glow_color);
+
+ glEnable(GL_LIGHT1);
+ glEnable(GL_LIGHT2);
+ glEnable(GL_LIGHT3);
+}
+
+
+//========================================================================
+// Main rendering function
+//========================================================================
+
+static void draw_scene(GLFWwindow* window, double t)
+{
+ double xpos, ypos, zpos, angle_x, angle_y, angle_z;
+ static double t_old = 0.0;
+ float dt;
+ mat4x4 projection;
+
+ // Calculate frame-to-frame delta time
+ dt = (float) (t - t_old);
+ t_old = t;
+
+ mat4x4_perspective(projection,
+ 65.f * (float) M_PI / 180.f,
+ aspect_ratio,
+ 1.0, 60.0);
+
+ glClearColor(0.1f, 0.1f, 0.1f, 1.f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ glMatrixMode(GL_PROJECTION);
+ glLoadMatrixf((const GLfloat*) projection);
+
+ // Setup camera
+ glMatrixMode(GL_MODELVIEW);
+ glLoadIdentity();
+
+ // Rotate camera
+ angle_x = 90.0 - 10.0;
+ angle_y = 10.0 * sin(0.3 * t);
+ angle_z = 10.0 * t;
+ glRotated(-angle_x, 1.0, 0.0, 0.0);
+ glRotated(-angle_y, 0.0, 1.0, 0.0);
+ glRotated(-angle_z, 0.0, 0.0, 1.0);
+
+ // Translate camera
+ xpos = 15.0 * sin((M_PI / 180.0) * angle_z) +
+ 2.0 * sin((M_PI / 180.0) * 3.1 * t);
+ ypos = -15.0 * cos((M_PI / 180.0) * angle_z) +
+ 2.0 * cos((M_PI / 180.0) * 2.9 * t);
+ zpos = 4.0 + 2.0 * cos((M_PI / 180.0) * 4.9 * t);
+ glTranslated(-xpos, -ypos, -zpos);
+
+ glFrontFace(GL_CCW);
+ glCullFace(GL_BACK);
+ glEnable(GL_CULL_FACE);
+
+ setup_lights();
+ glEnable(GL_LIGHTING);
+
+ glEnable(GL_FOG);
+ glFogi(GL_FOG_MODE, GL_EXP);
+ glFogf(GL_FOG_DENSITY, 0.05f);
+ glFogfv(GL_FOG_COLOR, fog_color);
+
+ draw_floor();
+
+ glEnable(GL_DEPTH_TEST);
+ glDepthFunc(GL_LEQUAL);
+ glDepthMask(GL_TRUE);
+
+ draw_fountain();
+
+ glDisable(GL_LIGHTING);
+ glDisable(GL_FOG);
+
+ // Particles must be drawn after all solid objects have been drawn
+ draw_particles(window, t, dt);
+
+ // Z-buffer not needed anymore
+ glDisable(GL_DEPTH_TEST);
+}
+
+
+//========================================================================
+// Window resize callback function
+//========================================================================
+
+static void resize_callback(GLFWwindow* window, int width, int height)
+{
+ glViewport(0, 0, width, height);
+ aspect_ratio = height ? width / (float) height : 1.f;
+}
+
+
+//========================================================================
+// Key callback functions
+//========================================================================
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (action == GLFW_PRESS)
+ {
+ switch (key)
+ {
+ case GLFW_KEY_ESCAPE:
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+ break;
+ case GLFW_KEY_W:
+ wireframe = !wireframe;
+ glPolygonMode(GL_FRONT_AND_BACK,
+ wireframe ? GL_LINE : GL_FILL);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+
+//========================================================================
+// Thread for updating particle physics
+//========================================================================
+
+static int physics_thread_main(void* arg)
+{
+ GLFWwindow* window = arg;
+
+ for (;;)
+ {
+ mtx_lock(&thread_sync.particles_lock);
+
+ // Wait for particle drawing to be done
+ while (!glfwWindowShouldClose(window) &&
+ thread_sync.p_frame > thread_sync.d_frame)
+ {
+ struct timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_nsec += 100 * 1000 * 1000;
+ ts.tv_sec += ts.tv_nsec / (1000 * 1000 * 1000);
+ ts.tv_nsec %= 1000 * 1000 * 1000;
+ cnd_timedwait(&thread_sync.d_done, &thread_sync.particles_lock, &ts);
+ }
+
+ if (glfwWindowShouldClose(window))
+ break;
+
+ // Update particles
+ particle_engine(thread_sync.t, thread_sync.dt);
+
+ // Update frame counter
+ thread_sync.p_frame++;
+
+ // Unlock mutex and signal drawing thread
+ mtx_unlock(&thread_sync.particles_lock);
+ cnd_signal(&thread_sync.p_done);
+ }
+
+ return 0;
+}
+
+
+//========================================================================
+// main
+//========================================================================
+
+int main(int argc, char** argv)
+{
+ int ch, width, height;
+ thrd_t physics_thread = 0;
+ GLFWwindow* window;
+ GLFWmonitor* monitor = NULL;
+
+ if (!glfwInit())
+ {
+ fprintf(stderr, "Failed to initialize GLFW\n");
+ exit(EXIT_FAILURE);
+ }
+
+ while ((ch = getopt(argc, argv, "fh")) != -1)
+ {
+ switch (ch)
+ {
+ case 'f':
+ monitor = glfwGetPrimaryMonitor();
+ break;
+ case 'h':
+ usage();
+ exit(EXIT_SUCCESS);
+ }
+ }
+
+ if (monitor)
+ {
+ const GLFWvidmode* mode = glfwGetVideoMode(monitor);
+
+ glfwWindowHint(GLFW_RED_BITS, mode->redBits);
+ glfwWindowHint(GLFW_GREEN_BITS, mode->greenBits);
+ glfwWindowHint(GLFW_BLUE_BITS, mode->blueBits);
+ glfwWindowHint(GLFW_REFRESH_RATE, mode->refreshRate);
+
+ width = mode->width;
+ height = mode->height;
+ }
+ else
+ {
+ width = 640;
+ height = 480;
+ }
+
+ window = glfwCreateWindow(width, height, "Particle Engine", monitor, NULL);
+ if (!window)
+ {
+ fprintf(stderr, "Failed to create GLFW window\n");
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ if (monitor)
+ glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval(1);
+
+ glfwSetFramebufferSizeCallback(window, resize_callback);
+ glfwSetKeyCallback(window, key_callback);
+
+ // Set initial aspect ratio
+ glfwGetFramebufferSize(window, &width, &height);
+ resize_callback(window, width, height);
+
+ // Upload particle texture
+ glGenTextures(1, &particle_tex_id);
+ glBindTexture(GL_TEXTURE_2D, particle_tex_id);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, P_TEX_WIDTH, P_TEX_HEIGHT,
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, particle_texture);
+
+ // Upload floor texture
+ glGenTextures(1, &floor_tex_id);
+ glBindTexture(GL_TEXTURE_2D, floor_tex_id);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, F_TEX_WIDTH, F_TEX_HEIGHT,
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, floor_texture);
+
+ if (glfwExtensionSupported("GL_EXT_separate_specular_color"))
+ {
+ glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL_EXT,
+ GL_SEPARATE_SPECULAR_COLOR_EXT);
+ }
+
+ // Set filled polygon mode as default (not wireframe)
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+ wireframe = 0;
+
+ // Set initial times
+ thread_sync.t = 0.0;
+ thread_sync.dt = 0.001f;
+ thread_sync.p_frame = 0;
+ thread_sync.d_frame = 0;
+
+ mtx_init(&thread_sync.particles_lock, mtx_timed);
+ cnd_init(&thread_sync.p_done);
+ cnd_init(&thread_sync.d_done);
+
+ if (thrd_create(&physics_thread, physics_thread_main, window) != thrd_success)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwSetTime(0.0);
+
+ while (!glfwWindowShouldClose(window))
+ {
+ draw_scene(window, glfwGetTime());
+
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+ }
+
+ thrd_join(physics_thread, NULL);
+
+ glfwDestroyWindow(window);
+ glfwTerminate();
+
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/sharing.c b/chromium/third_party/dawn/third_party/glfw/examples/sharing.c
new file mode 100644
index 00000000000..d840c58c1c4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/sharing.c
@@ -0,0 +1,235 @@
+//========================================================================
+// Context sharing example
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "getopt.h"
+#include "linmath.h"
+
+static const char* vertex_shader_text =
+"#version 110\n"
+"uniform mat4 MVP;\n"
+"attribute vec2 vPos;\n"
+"varying vec2 texcoord;\n"
+"void main()\n"
+"{\n"
+" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
+" texcoord = vPos;\n"
+"}\n";
+
+static const char* fragment_shader_text =
+"#version 110\n"
+"uniform sampler2D texture;\n"
+"uniform vec3 color;\n"
+"varying vec2 texcoord;\n"
+"void main()\n"
+"{\n"
+" gl_FragColor = vec4(color * texture2D(texture, texcoord).rgb, 1.0);\n"
+"}\n";
+
+static const vec2 vertices[4] =
+{
+ { 0.f, 0.f },
+ { 1.f, 0.f },
+ { 1.f, 1.f },
+ { 0.f, 1.f }
+};
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (action == GLFW_PRESS && key == GLFW_KEY_ESCAPE)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+}
+
+int main(int argc, char** argv)
+{
+ GLFWwindow* windows[2];
+ GLuint texture, program, vertex_buffer;
+ GLint mvp_location, vpos_location, color_location, texture_location;
+
+ glfwSetErrorCallback(error_callback);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
+
+ windows[0] = glfwCreateWindow(400, 400, "First", NULL, NULL);
+ if (!windows[0])
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwSetKeyCallback(windows[0], key_callback);
+
+ glfwMakeContextCurrent(windows[0]);
+
+ // Only enable vsync for the first of the windows to be swapped to
+ // avoid waiting out the interval for each window
+ glfwSwapInterval(1);
+
+ // The contexts are created with the same APIs so the function
+ // pointers should be re-usable between them
+ gladLoadGL(glfwGetProcAddress);
+
+ // Create the OpenGL objects inside the first context, created above
+ // All objects will be shared with the second context, created below
+ {
+ int x, y;
+ char pixels[16 * 16];
+ GLuint vertex_shader, fragment_shader;
+
+ glGenTextures(1, &texture);
+ glBindTexture(GL_TEXTURE_2D, texture);
+
+ srand((unsigned int) glfwGetTimerValue());
+
+ for (y = 0; y < 16; y++)
+ {
+ for (x = 0; x < 16; x++)
+ pixels[y * 16 + x] = rand() % 256;
+ }
+
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, 16, 16, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, pixels);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
+ glCompileShader(vertex_shader);
+
+ fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
+ glCompileShader(fragment_shader);
+
+ program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glLinkProgram(program);
+
+ mvp_location = glGetUniformLocation(program, "MVP");
+ color_location = glGetUniformLocation(program, "color");
+ texture_location = glGetUniformLocation(program, "texture");
+ vpos_location = glGetAttribLocation(program, "vPos");
+
+ glGenBuffers(1, &vertex_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+ }
+
+ glUseProgram(program);
+ glUniform1i(texture_location, 0);
+
+ glEnable(GL_TEXTURE_2D);
+ glBindTexture(GL_TEXTURE_2D, texture);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glEnableVertexAttribArray(vpos_location);
+ glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
+ sizeof(vertices[0]), (void*) 0);
+
+ windows[1] = glfwCreateWindow(400, 400, "Second", NULL, windows[0]);
+ if (!windows[1])
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ // Place the second window to the right of the first
+ {
+ int xpos, ypos, left, right, width;
+
+ glfwGetWindowSize(windows[0], &width, NULL);
+ glfwGetWindowFrameSize(windows[0], &left, NULL, &right, NULL);
+ glfwGetWindowPos(windows[0], &xpos, &ypos);
+
+ glfwSetWindowPos(windows[1], xpos + width + left + right, ypos);
+ }
+
+ glfwSetKeyCallback(windows[1], key_callback);
+
+ glfwMakeContextCurrent(windows[1]);
+
+ // While objects are shared, the global context state is not and will
+ // need to be set up for each context
+
+ glUseProgram(program);
+
+ glEnable(GL_TEXTURE_2D);
+ glBindTexture(GL_TEXTURE_2D, texture);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glEnableVertexAttribArray(vpos_location);
+ glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
+ sizeof(vertices[0]), (void*) 0);
+
+ while (!glfwWindowShouldClose(windows[0]) &&
+ !glfwWindowShouldClose(windows[1]))
+ {
+ int i;
+ const vec3 colors[2] =
+ {
+ { 0.8f, 0.4f, 1.f },
+ { 0.3f, 0.4f, 1.f }
+ };
+
+ for (i = 0; i < 2; i++)
+ {
+ int width, height;
+ mat4x4 mvp;
+
+ glfwGetFramebufferSize(windows[i], &width, &height);
+ glfwMakeContextCurrent(windows[i]);
+
+ glViewport(0, 0, width, height);
+
+ mat4x4_ortho(mvp, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f);
+ glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) mvp);
+ glUniform3fv(color_location, 1, colors[i]);
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+
+ glfwSwapBuffers(windows[i]);
+ }
+
+ glfwWaitEvents();
+ }
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/splitview.c b/chromium/third_party/dawn/third_party/glfw/examples/splitview.c
new file mode 100644
index 00000000000..990df12c21f
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/splitview.c
@@ -0,0 +1,547 @@
+//========================================================================
+// This is an example program for the GLFW library
+//
+// The program uses a "split window" view, rendering four views of the
+// same scene in one window (e.g. useful for 3D modelling software). This
+// demo uses scissors to separate the four different rendering areas from
+// each other.
+//
+// (If the code seems a little bit strange here and there, it may be
+// because I am not a friend of orthogonal projections)
+//========================================================================
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#if defined(_MSC_VER)
+ // Make MS math.h define M_PI
+ #define _USE_MATH_DEFINES
+#endif
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <linmath.h>
+
+
+//========================================================================
+// Global variables
+//========================================================================
+
+// Mouse position
+static double xpos = 0, ypos = 0;
+
+// Window size
+static int width, height;
+
+// Active view: 0 = none, 1 = upper left, 2 = upper right, 3 = lower left,
+// 4 = lower right
+static int active_view = 0;
+
+// Rotation around each axis
+static int rot_x = 0, rot_y = 0, rot_z = 0;
+
+// Do redraw?
+static int do_redraw = 1;
+
+
+//========================================================================
+// Draw a solid torus (use a display list for the model)
+//========================================================================
+
+#define TORUS_MAJOR 1.5
+#define TORUS_MINOR 0.5
+#define TORUS_MAJOR_RES 32
+#define TORUS_MINOR_RES 32
+
+static void drawTorus(void)
+{
+ static GLuint torus_list = 0;
+ int i, j, k;
+ double s, t, x, y, z, nx, ny, nz, scale, twopi;
+
+ if (!torus_list)
+ {
+ // Start recording displaylist
+ torus_list = glGenLists(1);
+ glNewList(torus_list, GL_COMPILE_AND_EXECUTE);
+
+ // Draw torus
+ twopi = 2.0 * M_PI;
+ for (i = 0; i < TORUS_MINOR_RES; i++)
+ {
+ glBegin(GL_QUAD_STRIP);
+ for (j = 0; j <= TORUS_MAJOR_RES; j++)
+ {
+ for (k = 1; k >= 0; k--)
+ {
+ s = (i + k) % TORUS_MINOR_RES + 0.5;
+ t = j % TORUS_MAJOR_RES;
+
+ // Calculate point on surface
+ x = (TORUS_MAJOR + TORUS_MINOR * cos(s * twopi / TORUS_MINOR_RES)) * cos(t * twopi / TORUS_MAJOR_RES);
+ y = TORUS_MINOR * sin(s * twopi / TORUS_MINOR_RES);
+ z = (TORUS_MAJOR + TORUS_MINOR * cos(s * twopi / TORUS_MINOR_RES)) * sin(t * twopi / TORUS_MAJOR_RES);
+
+ // Calculate surface normal
+ nx = x - TORUS_MAJOR * cos(t * twopi / TORUS_MAJOR_RES);
+ ny = y;
+ nz = z - TORUS_MAJOR * sin(t * twopi / TORUS_MAJOR_RES);
+ scale = 1.0 / sqrt(nx*nx + ny*ny + nz*nz);
+ nx *= scale;
+ ny *= scale;
+ nz *= scale;
+
+ glNormal3f((float) nx, (float) ny, (float) nz);
+ glVertex3f((float) x, (float) y, (float) z);
+ }
+ }
+
+ glEnd();
+ }
+
+ // Stop recording displaylist
+ glEndList();
+ }
+ else
+ {
+ // Playback displaylist
+ glCallList(torus_list);
+ }
+}
+
+
+//========================================================================
+// Draw the scene (a rotating torus)
+//========================================================================
+
+static void drawScene(void)
+{
+ const GLfloat model_diffuse[4] = {1.0f, 0.8f, 0.8f, 1.0f};
+ const GLfloat model_specular[4] = {0.6f, 0.6f, 0.6f, 1.0f};
+ const GLfloat model_shininess = 20.0f;
+
+ glPushMatrix();
+
+ // Rotate the object
+ glRotatef((GLfloat) rot_x * 0.5f, 1.0f, 0.0f, 0.0f);
+ glRotatef((GLfloat) rot_y * 0.5f, 0.0f, 1.0f, 0.0f);
+ glRotatef((GLfloat) rot_z * 0.5f, 0.0f, 0.0f, 1.0f);
+
+ // Set model color (used for orthogonal views, lighting disabled)
+ glColor4fv(model_diffuse);
+
+ // Set model material (used for perspective view, lighting enabled)
+ glMaterialfv(GL_FRONT, GL_DIFFUSE, model_diffuse);
+ glMaterialfv(GL_FRONT, GL_SPECULAR, model_specular);
+ glMaterialf(GL_FRONT, GL_SHININESS, model_shininess);
+
+ // Draw torus
+ drawTorus();
+
+ glPopMatrix();
+}
+
+
+//========================================================================
+// Draw a 2D grid (used for orthogonal views)
+//========================================================================
+
+static void drawGrid(float scale, int steps)
+{
+ int i;
+ float x, y;
+ mat4x4 view;
+
+ glPushMatrix();
+
+ // Set background to some dark bluish grey
+ glClearColor(0.05f, 0.05f, 0.2f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ // Setup modelview matrix (flat XY view)
+ {
+ vec3 eye = { 0.f, 0.f, 1.f };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, 1.f, 0.f };
+ mat4x4_look_at(view, eye, center, up);
+ }
+ glLoadMatrixf((const GLfloat*) view);
+
+ // We don't want to update the Z-buffer
+ glDepthMask(GL_FALSE);
+
+ // Set grid color
+ glColor3f(0.0f, 0.5f, 0.5f);
+
+ glBegin(GL_LINES);
+
+ // Horizontal lines
+ x = scale * 0.5f * (float) (steps - 1);
+ y = -scale * 0.5f * (float) (steps - 1);
+ for (i = 0; i < steps; i++)
+ {
+ glVertex3f(-x, y, 0.0f);
+ glVertex3f(x, y, 0.0f);
+ y += scale;
+ }
+
+ // Vertical lines
+ x = -scale * 0.5f * (float) (steps - 1);
+ y = scale * 0.5f * (float) (steps - 1);
+ for (i = 0; i < steps; i++)
+ {
+ glVertex3f(x, -y, 0.0f);
+ glVertex3f(x, y, 0.0f);
+ x += scale;
+ }
+
+ glEnd();
+
+ // Enable Z-buffer writing again
+ glDepthMask(GL_TRUE);
+
+ glPopMatrix();
+}
+
+
+//========================================================================
+// Draw all views
+//========================================================================
+
+static void drawAllViews(void)
+{
+ const GLfloat light_position[4] = {0.0f, 8.0f, 8.0f, 1.0f};
+ const GLfloat light_diffuse[4] = {1.0f, 1.0f, 1.0f, 1.0f};
+ const GLfloat light_specular[4] = {1.0f, 1.0f, 1.0f, 1.0f};
+ const GLfloat light_ambient[4] = {0.2f, 0.2f, 0.3f, 1.0f};
+ float aspect;
+ mat4x4 view, projection;
+
+ // Calculate aspect of window
+ if (height > 0)
+ aspect = (float) width / (float) height;
+ else
+ aspect = 1.f;
+
+ // Clear screen
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ // Enable scissor test
+ glEnable(GL_SCISSOR_TEST);
+
+ // Enable depth test
+ glEnable(GL_DEPTH_TEST);
+ glDepthFunc(GL_LEQUAL);
+
+ // ** ORTHOGONAL VIEWS **
+
+ // For orthogonal views, use wireframe rendering
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+
+ // Enable line anti-aliasing
+ glEnable(GL_LINE_SMOOTH);
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ // Setup orthogonal projection matrix
+ glMatrixMode(GL_PROJECTION);
+ glLoadIdentity();
+ glOrtho(-3.0 * aspect, 3.0 * aspect, -3.0, 3.0, 1.0, 50.0);
+
+ // Upper left view (TOP VIEW)
+ glViewport(0, height / 2, width / 2, height / 2);
+ glScissor(0, height / 2, width / 2, height / 2);
+ glMatrixMode(GL_MODELVIEW);
+ {
+ vec3 eye = { 0.f, 10.f, 1e-3f };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, 1.f, 0.f };
+ mat4x4_look_at( view, eye, center, up );
+ }
+ glLoadMatrixf((const GLfloat*) view);
+ drawGrid(0.5, 12);
+ drawScene();
+
+ // Lower left view (FRONT VIEW)
+ glViewport(0, 0, width / 2, height / 2);
+ glScissor(0, 0, width / 2, height / 2);
+ glMatrixMode(GL_MODELVIEW);
+ {
+ vec3 eye = { 0.f, 0.f, 10.f };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, 1.f, 0.f };
+ mat4x4_look_at( view, eye, center, up );
+ }
+ glLoadMatrixf((const GLfloat*) view);
+ drawGrid(0.5, 12);
+ drawScene();
+
+ // Lower right view (SIDE VIEW)
+ glViewport(width / 2, 0, width / 2, height / 2);
+ glScissor(width / 2, 0, width / 2, height / 2);
+ glMatrixMode(GL_MODELVIEW);
+ {
+ vec3 eye = { 10.f, 0.f, 0.f };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, 1.f, 0.f };
+ mat4x4_look_at( view, eye, center, up );
+ }
+ glLoadMatrixf((const GLfloat*) view);
+ drawGrid(0.5, 12);
+ drawScene();
+
+ // Disable line anti-aliasing
+ glDisable(GL_LINE_SMOOTH);
+ glDisable(GL_BLEND);
+
+ // ** PERSPECTIVE VIEW **
+
+ // For perspective view, use solid rendering
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+
+ // Enable face culling (faster rendering)
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_BACK);
+ glFrontFace(GL_CW);
+
+ // Setup perspective projection matrix
+ glMatrixMode(GL_PROJECTION);
+ mat4x4_perspective(projection,
+ 65.f * (float) M_PI / 180.f,
+ aspect,
+ 1.f, 50.f);
+ glLoadMatrixf((const GLfloat*) projection);
+
+ // Upper right view (PERSPECTIVE VIEW)
+ glViewport(width / 2, height / 2, width / 2, height / 2);
+ glScissor(width / 2, height / 2, width / 2, height / 2);
+ glMatrixMode(GL_MODELVIEW);
+ {
+ vec3 eye = { 3.f, 1.5f, 3.f };
+ vec3 center = { 0.f, 0.f, 0.f };
+ vec3 up = { 0.f, 1.f, 0.f };
+ mat4x4_look_at( view, eye, center, up );
+ }
+ glLoadMatrixf((const GLfloat*) view);
+
+ // Configure and enable light source 1
+ glLightfv(GL_LIGHT1, GL_POSITION, light_position);
+ glLightfv(GL_LIGHT1, GL_AMBIENT, light_ambient);
+ glLightfv(GL_LIGHT1, GL_DIFFUSE, light_diffuse);
+ glLightfv(GL_LIGHT1, GL_SPECULAR, light_specular);
+ glEnable(GL_LIGHT1);
+ glEnable(GL_LIGHTING);
+
+ // Draw scene
+ drawScene();
+
+ // Disable lighting
+ glDisable(GL_LIGHTING);
+
+ // Disable face culling
+ glDisable(GL_CULL_FACE);
+
+ // Disable depth test
+ glDisable(GL_DEPTH_TEST);
+
+ // Disable scissor test
+ glDisable(GL_SCISSOR_TEST);
+
+ // Draw a border around the active view
+ if (active_view > 0 && active_view != 2)
+ {
+ glViewport(0, 0, width, height);
+
+ glMatrixMode(GL_PROJECTION);
+ glLoadIdentity();
+ glOrtho(0.0, 2.0, 0.0, 2.0, 0.0, 1.0);
+
+ glMatrixMode(GL_MODELVIEW);
+ glLoadIdentity();
+ glTranslatef((GLfloat) ((active_view - 1) & 1), (GLfloat) (1 - (active_view - 1) / 2), 0.0f);
+
+ glColor3f(1.0f, 1.0f, 0.6f);
+
+ glBegin(GL_LINE_STRIP);
+ glVertex2i(0, 0);
+ glVertex2i(1, 0);
+ glVertex2i(1, 1);
+ glVertex2i(0, 1);
+ glVertex2i(0, 0);
+ glEnd();
+ }
+}
+
+
+//========================================================================
+// Framebuffer size callback function
+//========================================================================
+
+static void framebufferSizeFun(GLFWwindow* window, int w, int h)
+{
+ width = w;
+ height = h > 0 ? h : 1;
+ do_redraw = 1;
+}
+
+
+//========================================================================
+// Window refresh callback function
+//========================================================================
+
+static void windowRefreshFun(GLFWwindow* window)
+{
+ drawAllViews();
+ glfwSwapBuffers(window);
+ do_redraw = 0;
+}
+
+
+//========================================================================
+// Mouse position callback function
+//========================================================================
+
+static void cursorPosFun(GLFWwindow* window, double x, double y)
+{
+ int wnd_width, wnd_height, fb_width, fb_height;
+ double scale;
+
+ glfwGetWindowSize(window, &wnd_width, &wnd_height);
+ glfwGetFramebufferSize(window, &fb_width, &fb_height);
+
+ scale = (double) fb_width / (double) wnd_width;
+
+ x *= scale;
+ y *= scale;
+
+ // Depending on which view was selected, rotate around different axes
+ switch (active_view)
+ {
+ case 1:
+ rot_x += (int) (y - ypos);
+ rot_z += (int) (x - xpos);
+ do_redraw = 1;
+ break;
+ case 3:
+ rot_x += (int) (y - ypos);
+ rot_y += (int) (x - xpos);
+ do_redraw = 1;
+ break;
+ case 4:
+ rot_y += (int) (x - xpos);
+ rot_z += (int) (y - ypos);
+ do_redraw = 1;
+ break;
+ default:
+ // Do nothing for perspective view, or if no view is selected
+ break;
+ }
+
+ // Remember cursor position
+ xpos = x;
+ ypos = y;
+}
+
+
+//========================================================================
+// Mouse button callback function
+//========================================================================
+
+static void mouseButtonFun(GLFWwindow* window, int button, int action, int mods)
+{
+ if ((button == GLFW_MOUSE_BUTTON_LEFT) && action == GLFW_PRESS)
+ {
+ // Detect which of the four views was clicked
+ active_view = 1;
+ if (xpos >= width / 2)
+ active_view += 1;
+ if (ypos >= height / 2)
+ active_view += 2;
+ }
+ else if (button == GLFW_MOUSE_BUTTON_LEFT)
+ {
+ // Deselect any previously selected view
+ active_view = 0;
+ }
+
+ do_redraw = 1;
+}
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+}
+
+
+//========================================================================
+// main
+//========================================================================
+
+int main(void)
+{
+ GLFWwindow* window;
+
+ // Initialise GLFW
+ if (!glfwInit())
+ {
+ fprintf(stderr, "Failed to initialize GLFW\n");
+ exit(EXIT_FAILURE);
+ }
+
+ glfwWindowHint(GLFW_SAMPLES, 4);
+
+ // Open OpenGL window
+ window = glfwCreateWindow(500, 500, "Split view demo", NULL, NULL);
+ if (!window)
+ {
+ fprintf(stderr, "Failed to open GLFW window\n");
+
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ // Set callback functions
+ glfwSetFramebufferSizeCallback(window, framebufferSizeFun);
+ glfwSetWindowRefreshCallback(window, windowRefreshFun);
+ glfwSetCursorPosCallback(window, cursorPosFun);
+ glfwSetMouseButtonCallback(window, mouseButtonFun);
+ glfwSetKeyCallback(window, key_callback);
+
+ // Enable vsync
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval(1);
+
+ if (GLAD_GL_ARB_multisample || GLAD_GL_VERSION_1_3)
+ glEnable(GL_MULTISAMPLE_ARB);
+
+ glfwGetFramebufferSize(window, &width, &height);
+ framebufferSizeFun(window, width, height);
+
+ // Main loop
+ for (;;)
+ {
+ // Only redraw if we need to
+ if (do_redraw)
+ windowRefreshFun(window);
+
+ // Wait for new events
+ glfwWaitEvents();
+
+ // Check if the window should be closed
+ if (glfwWindowShouldClose(window))
+ break;
+ }
+
+ // Close OpenGL window and terminate GLFW
+ glfwTerminate();
+
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengl.c b/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengl.c
new file mode 100644
index 00000000000..ff9e7d3b51d
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengl.c
@@ -0,0 +1,171 @@
+//========================================================================
+// OpenGL triangle example
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+//! [code]
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include "linmath.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+
+typedef struct Vertex
+{
+ vec2 pos;
+ vec3 col;
+} Vertex;
+
+static const Vertex vertices[3] =
+{
+ { { -0.6f, -0.4f }, { 1.f, 0.f, 0.f } },
+ { { 0.6f, -0.4f }, { 0.f, 1.f, 0.f } },
+ { { 0.f, 0.6f }, { 0.f, 0.f, 1.f } }
+};
+
+static const char* vertex_shader_text =
+"#version 330\n"
+"uniform mat4 MVP;\n"
+"in vec3 vCol;\n"
+"in vec2 vPos;\n"
+"out vec3 color;\n"
+"void main()\n"
+"{\n"
+" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
+" color = vCol;\n"
+"}\n";
+
+static const char* fragment_shader_text =
+"#version 330\n"
+"in vec3 color;\n"
+"out vec4 fragment;\n"
+"void main()\n"
+"{\n"
+" fragment = vec4(color, 1.0);\n"
+"}\n";
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+}
+
+int main(void)
+{
+ glfwSetErrorCallback(error_callback);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+
+ GLFWwindow* window = glfwCreateWindow(640, 480, "OpenGL Triangle", NULL, NULL);
+ if (!window)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwSetKeyCallback(window, key_callback);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval(1);
+
+ // NOTE: OpenGL error checks have been omitted for brevity
+
+ GLuint vertex_buffer;
+ glGenBuffers(1, &vertex_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+
+ const GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
+ glCompileShader(vertex_shader);
+
+ const GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
+ glCompileShader(fragment_shader);
+
+ const GLuint program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glLinkProgram(program);
+
+ const GLint mvp_location = glGetUniformLocation(program, "MVP");
+ const GLint vpos_location = glGetAttribLocation(program, "vPos");
+ const GLint vcol_location = glGetAttribLocation(program, "vCol");
+
+ GLuint vertex_array;
+ glGenVertexArrays(1, &vertex_array);
+ glBindVertexArray(vertex_array);
+ glEnableVertexAttribArray(vpos_location);
+ glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
+ sizeof(Vertex), (void*) offsetof(Vertex, pos));
+ glEnableVertexAttribArray(vcol_location);
+ glVertexAttribPointer(vcol_location, 3, GL_FLOAT, GL_FALSE,
+ sizeof(Vertex), (void*) offsetof(Vertex, col));
+
+ while (!glfwWindowShouldClose(window))
+ {
+ int width, height;
+ glfwGetFramebufferSize(window, &width, &height);
+ const float ratio = width / (float) height;
+
+ glViewport(0, 0, width, height);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ mat4x4 m, p, mvp;
+ mat4x4_identity(m);
+ mat4x4_rotate_Z(m, m, (float) glfwGetTime());
+ mat4x4_ortho(p, -ratio, ratio, -1.f, 1.f, 1.f, -1.f);
+ mat4x4_mul(mvp, p, m);
+
+ glUseProgram(program);
+ glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) &mvp);
+ glBindVertexArray(vertex_array);
+ glDrawArrays(GL_TRIANGLES, 0, 3);
+
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+ }
+
+ glfwDestroyWindow(window);
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
+//! [code]
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengles.c b/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengles.c
new file mode 100644
index 00000000000..03eb026f3b3
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/triangle-opengles.c
@@ -0,0 +1,170 @@
+//========================================================================
+// OpenGL ES 2.0 triangle example
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLAD_GLES2_IMPLEMENTATION
+#include <glad/gles2.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include "linmath.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+
+typedef struct Vertex
+{
+ vec2 pos;
+ vec3 col;
+} Vertex;
+
+static const Vertex vertices[3] =
+{
+ { { -0.6f, -0.4f }, { 1.f, 0.f, 0.f } },
+ { { 0.6f, -0.4f }, { 0.f, 1.f, 0.f } },
+ { { 0.f, 0.6f }, { 0.f, 0.f, 1.f } }
+};
+
+static const char* vertex_shader_text =
+"#version 100\n"
+"precision mediump float;\n"
+"uniform mat4 MVP;\n"
+"attribute vec3 vCol;\n"
+"attribute vec2 vPos;\n"
+"varying vec3 color;\n"
+"void main()\n"
+"{\n"
+" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
+" color = vCol;\n"
+"}\n";
+
+static const char* fragment_shader_text =
+"#version 100\n"
+"precision mediump float;\n"
+"varying vec3 color;\n"
+"void main()\n"
+"{\n"
+" gl_FragColor = vec4(color, 1.0);\n"
+"}\n";
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "GLFW Error: %s\n", description);
+}
+
+static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+}
+
+int main(void)
+{
+ glfwSetErrorCallback(error_callback);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
+ glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+
+ GLFWwindow* window = glfwCreateWindow(640, 480, "OpenGL ES 2.0 Triangle (EGL)", NULL, NULL);
+ if (!window)
+ {
+ glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_NATIVE_CONTEXT_API);
+ window = glfwCreateWindow(640, 480, "OpenGL ES 2.0 Triangle", NULL, NULL);
+ if (!window)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ glfwSetKeyCallback(window, key_callback);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGLES2(glfwGetProcAddress);
+ glfwSwapInterval(1);
+
+ GLuint vertex_buffer;
+ glGenBuffers(1, &vertex_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+
+ const GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
+ glCompileShader(vertex_shader);
+
+ const GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
+ glCompileShader(fragment_shader);
+
+ const GLuint program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glLinkProgram(program);
+
+ const GLint mvp_location = glGetUniformLocation(program, "MVP");
+ const GLint vpos_location = glGetAttribLocation(program, "vPos");
+ const GLint vcol_location = glGetAttribLocation(program, "vCol");
+
+ glEnableVertexAttribArray(vpos_location);
+ glEnableVertexAttribArray(vcol_location);
+ glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
+ sizeof(Vertex), (void*) offsetof(Vertex, pos));
+ glVertexAttribPointer(vcol_location, 3, GL_FLOAT, GL_FALSE,
+ sizeof(Vertex), (void*) offsetof(Vertex, col));
+
+ while (!glfwWindowShouldClose(window))
+ {
+ int width, height;
+ glfwGetFramebufferSize(window, &width, &height);
+ const float ratio = width / (float) height;
+
+ glViewport(0, 0, width, height);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ mat4x4 m, p, mvp;
+ mat4x4_identity(m);
+ mat4x4_rotate_Z(m, m, (float) glfwGetTime());
+ mat4x4_ortho(p, -ratio, ratio, -1.f, 1.f, 1.f, -1.f);
+ mat4x4_mul(mvp, p, m);
+
+ glUseProgram(program);
+ glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) &mvp);
+ glDrawArrays(GL_TRIANGLES, 0, 3);
+
+ glfwSwapBuffers(window);
+ glfwPollEvents();
+ }
+
+ glfwDestroyWindow(window);
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/wave.c b/chromium/third_party/dawn/third_party/glfw/examples/wave.c
new file mode 100644
index 00000000000..d7ead49398b
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/wave.c
@@ -0,0 +1,463 @@
+/*****************************************************************************
+ * Wave Simulation in OpenGL
+ * (C) 2002 Jakob Thomsen
+ * http://home.in.tum.de/~thomsen
+ * Modified for GLFW by Sylvain Hellegouarch - sh@programmationworld.com
+ * Modified for variable frame rate by Marcus Geelnard
+ * 2003-Jan-31: Minor cleanups and speedups / MG
+ * 2010-10-24: Formatting and cleanup - Camilla Löwy
+ *****************************************************************************/
+
+#if defined(_MSC_VER)
+ // Make MS math.h define M_PI
+ #define _USE_MATH_DEFINES
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include <linmath.h>
+
+// Maximum delta T to allow for differential calculations
+#define MAX_DELTA_T 0.01
+
+// Animation speed (10.0 looks good)
+#define ANIMATION_SPEED 10.0
+
+GLfloat alpha = 210.f, beta = -70.f;
+GLfloat zoom = 2.f;
+
+double cursorX;
+double cursorY;
+
+struct Vertex
+{
+ GLfloat x, y, z;
+ GLfloat r, g, b;
+};
+
+#define GRIDW 50
+#define GRIDH 50
+#define VERTEXNUM (GRIDW*GRIDH)
+
+#define QUADW (GRIDW - 1)
+#define QUADH (GRIDH - 1)
+#define QUADNUM (QUADW*QUADH)
+
+GLuint quad[4 * QUADNUM];
+struct Vertex vertex[VERTEXNUM];
+
+/* The grid will look like this:
+ *
+ * 3 4 5
+ * *---*---*
+ * | | |
+ * | 0 | 1 |
+ * | | |
+ * *---*---*
+ * 0 1 2
+ */
+
+//========================================================================
+// Initialize grid geometry
+//========================================================================
+
+void init_vertices(void)
+{
+ int x, y, p;
+
+ // Place the vertices in a grid
+ for (y = 0; y < GRIDH; y++)
+ {
+ for (x = 0; x < GRIDW; x++)
+ {
+ p = y * GRIDW + x;
+
+ vertex[p].x = (GLfloat) (x - GRIDW / 2) / (GLfloat) (GRIDW / 2);
+ vertex[p].y = (GLfloat) (y - GRIDH / 2) / (GLfloat) (GRIDH / 2);
+ vertex[p].z = 0;
+
+ if ((x % 4 < 2) ^ (y % 4 < 2))
+ vertex[p].r = 0.0;
+ else
+ vertex[p].r = 1.0;
+
+ vertex[p].g = (GLfloat) y / (GLfloat) GRIDH;
+ vertex[p].b = 1.f - ((GLfloat) x / (GLfloat) GRIDW + (GLfloat) y / (GLfloat) GRIDH) / 2.f;
+ }
+ }
+
+ for (y = 0; y < QUADH; y++)
+ {
+ for (x = 0; x < QUADW; x++)
+ {
+ p = 4 * (y * QUADW + x);
+
+ quad[p + 0] = y * GRIDW + x; // Some point
+ quad[p + 1] = y * GRIDW + x + 1; // Neighbor at the right side
+ quad[p + 2] = (y + 1) * GRIDW + x + 1; // Upper right neighbor
+ quad[p + 3] = (y + 1) * GRIDW + x; // Upper neighbor
+ }
+ }
+}
+
+double dt;
+double p[GRIDW][GRIDH];
+double vx[GRIDW][GRIDH], vy[GRIDW][GRIDH];
+double ax[GRIDW][GRIDH], ay[GRIDW][GRIDH];
+
+//========================================================================
+// Initialize grid
+//========================================================================
+
+void init_grid(void)
+{
+ int x, y;
+ double dx, dy, d;
+
+ for (y = 0; y < GRIDH; y++)
+ {
+ for (x = 0; x < GRIDW; x++)
+ {
+ dx = (double) (x - GRIDW / 2);
+ dy = (double) (y - GRIDH / 2);
+ d = sqrt(dx * dx + dy * dy);
+ if (d < 0.1 * (double) (GRIDW / 2))
+ {
+ d = d * 10.0;
+ p[x][y] = -cos(d * (M_PI / (double)(GRIDW * 4))) * 100.0;
+ }
+ else
+ p[x][y] = 0.0;
+
+ vx[x][y] = 0.0;
+ vy[x][y] = 0.0;
+ }
+ }
+}
+
+
+//========================================================================
+// Draw scene
+//========================================================================
+
+void draw_scene(GLFWwindow* window)
+{
+ // Clear the color and depth buffers
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ // We don't want to modify the projection matrix
+ glMatrixMode(GL_MODELVIEW);
+ glLoadIdentity();
+
+ // Move back
+ glTranslatef(0.0, 0.0, -zoom);
+ // Rotate the view
+ glRotatef(beta, 1.0, 0.0, 0.0);
+ glRotatef(alpha, 0.0, 0.0, 1.0);
+
+ glDrawElements(GL_QUADS, 4 * QUADNUM, GL_UNSIGNED_INT, quad);
+
+ glfwSwapBuffers(window);
+}
+
+
+//========================================================================
+// Initialize Miscellaneous OpenGL state
+//========================================================================
+
+void init_opengl(void)
+{
+ // Use Gouraud (smooth) shading
+ glShadeModel(GL_SMOOTH);
+
+ // Switch on the z-buffer
+ glEnable(GL_DEPTH_TEST);
+
+ glEnableClientState(GL_VERTEX_ARRAY);
+ glEnableClientState(GL_COLOR_ARRAY);
+ glVertexPointer(3, GL_FLOAT, sizeof(struct Vertex), vertex);
+ glColorPointer(3, GL_FLOAT, sizeof(struct Vertex), &vertex[0].r); // Pointer to the first color
+
+ glPointSize(2.0);
+
+ // Background color is black
+ glClearColor(0, 0, 0, 0);
+}
+
+
+//========================================================================
+// Modify the height of each vertex according to the pressure
+//========================================================================
+
+void adjust_grid(void)
+{
+ int pos;
+ int x, y;
+
+ for (y = 0; y < GRIDH; y++)
+ {
+ for (x = 0; x < GRIDW; x++)
+ {
+ pos = y * GRIDW + x;
+ vertex[pos].z = (float) (p[x][y] * (1.0 / 50.0));
+ }
+ }
+}
+
+
+//========================================================================
+// Calculate wave propagation
+//========================================================================
+
+void calc_grid(void)
+{
+ int x, y, x2, y2;
+ double time_step = dt * ANIMATION_SPEED;
+
+ // Compute accelerations
+ for (x = 0; x < GRIDW; x++)
+ {
+ x2 = (x + 1) % GRIDW;
+ for(y = 0; y < GRIDH; y++)
+ ax[x][y] = p[x][y] - p[x2][y];
+ }
+
+ for (y = 0; y < GRIDH; y++)
+ {
+ y2 = (y + 1) % GRIDH;
+ for(x = 0; x < GRIDW; x++)
+ ay[x][y] = p[x][y] - p[x][y2];
+ }
+
+ // Compute speeds
+ for (x = 0; x < GRIDW; x++)
+ {
+ for (y = 0; y < GRIDH; y++)
+ {
+ vx[x][y] = vx[x][y] + ax[x][y] * time_step;
+ vy[x][y] = vy[x][y] + ay[x][y] * time_step;
+ }
+ }
+
+ // Compute pressure
+ for (x = 1; x < GRIDW; x++)
+ {
+ x2 = x - 1;
+ for (y = 1; y < GRIDH; y++)
+ {
+ y2 = y - 1;
+ p[x][y] = p[x][y] + (vx[x2][y] - vx[x][y] + vy[x][y2] - vy[x][y]) * time_step;
+ }
+ }
+}
+
+
+//========================================================================
+// Print errors
+//========================================================================
+
+static void error_callback(int error, const char* description)
+{
+ fprintf(stderr, "Error: %s\n", description);
+}
+
+
+//========================================================================
+// Handle key strokes
+//========================================================================
+
+void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (action != GLFW_PRESS)
+ return;
+
+ switch (key)
+ {
+ case GLFW_KEY_ESCAPE:
+ glfwSetWindowShouldClose(window, GLFW_TRUE);
+ break;
+ case GLFW_KEY_SPACE:
+ init_grid();
+ break;
+ case GLFW_KEY_LEFT:
+ alpha += 5;
+ break;
+ case GLFW_KEY_RIGHT:
+ alpha -= 5;
+ break;
+ case GLFW_KEY_UP:
+ beta -= 5;
+ break;
+ case GLFW_KEY_DOWN:
+ beta += 5;
+ break;
+ case GLFW_KEY_PAGE_UP:
+ zoom -= 0.25f;
+ if (zoom < 0.f)
+ zoom = 0.f;
+ break;
+ case GLFW_KEY_PAGE_DOWN:
+ zoom += 0.25f;
+ break;
+ default:
+ break;
+ }
+}
+
+
+//========================================================================
+// Callback function for mouse button events
+//========================================================================
+
+void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
+{
+ if (button != GLFW_MOUSE_BUTTON_LEFT)
+ return;
+
+ if (action == GLFW_PRESS)
+ {
+ glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
+ glfwGetCursorPos(window, &cursorX, &cursorY);
+ }
+ else
+ glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
+}
+
+
+//========================================================================
+// Callback function for cursor motion events
+//========================================================================
+
+void cursor_position_callback(GLFWwindow* window, double x, double y)
+{
+ if (glfwGetInputMode(window, GLFW_CURSOR) == GLFW_CURSOR_DISABLED)
+ {
+ alpha += (GLfloat) (x - cursorX) / 10.f;
+ beta += (GLfloat) (y - cursorY) / 10.f;
+
+ cursorX = x;
+ cursorY = y;
+ }
+}
+
+
+//========================================================================
+// Callback function for scroll events
+//========================================================================
+
+void scroll_callback(GLFWwindow* window, double x, double y)
+{
+ zoom += (float) y / 4.f;
+ if (zoom < 0)
+ zoom = 0;
+}
+
+
+//========================================================================
+// Callback function for framebuffer resize events
+//========================================================================
+
+void framebuffer_size_callback(GLFWwindow* window, int width, int height)
+{
+ float ratio = 1.f;
+ mat4x4 projection;
+
+ if (height > 0)
+ ratio = (float) width / (float) height;
+
+ // Setup viewport
+ glViewport(0, 0, width, height);
+
+ // Change to the projection matrix and set our viewing volume
+ glMatrixMode(GL_PROJECTION);
+ mat4x4_perspective(projection,
+ 60.f * (float) M_PI / 180.f,
+ ratio,
+ 1.f, 1024.f);
+ glLoadMatrixf((const GLfloat*) projection);
+}
+
+
+//========================================================================
+// main
+//========================================================================
+
+int main(int argc, char* argv[])
+{
+ GLFWwindow* window;
+ double t, dt_total, t_old;
+ int width, height;
+
+ glfwSetErrorCallback(error_callback);
+
+ if (!glfwInit())
+ exit(EXIT_FAILURE);
+
+ window = glfwCreateWindow(640, 480, "Wave Simulation", NULL, NULL);
+ if (!window)
+ {
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwSetKeyCallback(window, key_callback);
+ glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
+ glfwSetMouseButtonCallback(window, mouse_button_callback);
+ glfwSetCursorPosCallback(window, cursor_position_callback);
+ glfwSetScrollCallback(window, scroll_callback);
+
+ glfwMakeContextCurrent(window);
+ gladLoadGL(glfwGetProcAddress);
+ glfwSwapInterval(1);
+
+ glfwGetFramebufferSize(window, &width, &height);
+ framebuffer_size_callback(window, width, height);
+
+ // Initialize OpenGL
+ init_opengl();
+
+ // Initialize simulation
+ init_vertices();
+ init_grid();
+ adjust_grid();
+
+ // Initialize timer
+ t_old = glfwGetTime() - 0.01;
+
+ while (!glfwWindowShouldClose(window))
+ {
+ t = glfwGetTime();
+ dt_total = t - t_old;
+ t_old = t;
+
+ // Safety - iterate if dt_total is too large
+ while (dt_total > 0.f)
+ {
+ // Select iteration time step
+ dt = dt_total > MAX_DELTA_T ? MAX_DELTA_T : dt_total;
+ dt_total -= dt;
+
+ // Calculate wave propagation
+ calc_grid();
+ }
+
+ // Compute height of each vertex
+ adjust_grid();
+
+ // Draw wave grid to OpenGL display
+ draw_scene(window);
+
+ glfwPollEvents();
+ }
+
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/examples/windows.c b/chromium/third_party/dawn/third_party/glfw/examples/windows.c
new file mode 100644
index 00000000000..598e521836c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/examples/windows.c
@@ -0,0 +1,110 @@
+//========================================================================
+// Simple multi-window example
+// Copyright (c) Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLAD_GL_IMPLEMENTATION
+#include <glad/gl.h>
+#define GLFW_INCLUDE_NONE
+#include <GLFW/glfw3.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char** argv)
+{
+ int xpos, ypos, height;
+ const char* description;
+ GLFWwindow* windows[4];
+
+ if (!glfwInit())
+ {
+ glfwGetError(&description);
+ printf("Error: %s\n", description);
+ exit(EXIT_FAILURE);
+ }
+
+ glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+ glfwWindowHint(GLFW_DECORATED, GLFW_FALSE);
+
+ glfwGetMonitorWorkarea(glfwGetPrimaryMonitor(), &xpos, &ypos, NULL, &height);
+
+ for (int i = 0; i < 4; i++)
+ {
+ const int size = height / 5;
+ const struct
+ {
+ float r, g, b;
+ } colors[] =
+ {
+ { 0.95f, 0.32f, 0.11f },
+ { 0.50f, 0.80f, 0.16f },
+ { 0.f, 0.68f, 0.94f },
+ { 0.98f, 0.74f, 0.04f }
+ };
+
+ if (i > 0)
+ glfwWindowHint(GLFW_FOCUS_ON_SHOW, GLFW_FALSE);
+
+ windows[i] = glfwCreateWindow(size, size, "Multi-Window Example", NULL, NULL);
+ if (!windows[i])
+ {
+ glfwGetError(&description);
+ printf("Error: %s\n", description);
+ glfwTerminate();
+ exit(EXIT_FAILURE);
+ }
+
+ glfwSetWindowPos(windows[i],
+ xpos + size * (1 + (i & 1)),
+ ypos + size * (1 + (i >> 1)));
+ glfwSetInputMode(windows[i], GLFW_STICKY_KEYS, GLFW_TRUE);
+
+ glfwMakeContextCurrent(windows[i]);
+ gladLoadGL(glfwGetProcAddress);
+ glClearColor(colors[i].r, colors[i].g, colors[i].b, 1.f);
+ }
+
+ for (int i = 0; i < 4; i++)
+ glfwShowWindow(windows[i]);
+
+ for (;;)
+ {
+ for (int i = 0; i < 4; i++)
+ {
+ glfwMakeContextCurrent(windows[i]);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glfwSwapBuffers(windows[i]);
+
+ if (glfwWindowShouldClose(windows[i]) ||
+ glfwGetKey(windows[i], GLFW_KEY_ESCAPE))
+ {
+ glfwTerminate();
+ exit(EXIT_SUCCESS);
+ }
+ }
+
+ glfwWaitEvents();
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3.h b/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3.h
new file mode 100644
index 00000000000..52225c743d9
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3.h
@@ -0,0 +1,6397 @@
+/*************************************************************************
+ * GLFW 3.4 - www.glfw.org
+ * A library for OpenGL, window and input
+ *------------------------------------------------------------------------
+ * Copyright (c) 2002-2006 Marcus Geelnard
+ * Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would
+ * be appreciated but is not required.
+ *
+ * 2. Altered source versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ * 3. This notice may not be removed or altered from any source
+ * distribution.
+ *
+ *************************************************************************/
+
+#ifndef _glfw3_h_
+#define _glfw3_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*************************************************************************
+ * Doxygen documentation
+ *************************************************************************/
+
+/*! @file glfw3.h
+ * @brief The header of the GLFW 3 API.
+ *
+ * This is the header file of the GLFW 3 API. It defines all its types and
+ * declares all its functions.
+ *
+ * For more information about how to use this file, see @ref build_include.
+ */
+/*! @defgroup context Context reference
+ * @brief Functions and types related to OpenGL and OpenGL ES contexts.
+ *
+ * This is the reference documentation for OpenGL and OpenGL ES context related
+ * functions. For more task-oriented information, see the @ref context_guide.
+ */
+/*! @defgroup vulkan Vulkan support reference
+ * @brief Functions and types related to Vulkan.
+ *
+ * This is the reference documentation for Vulkan related functions and types.
+ * For more task-oriented information, see the @ref vulkan_guide.
+ */
+/*! @defgroup init Initialization, version and error reference
+ * @brief Functions and types related to initialization and error handling.
+ *
+ * This is the reference documentation for initialization and termination of
+ * the library, version management and error handling. For more task-oriented
+ * information, see the @ref intro_guide.
+ */
+/*! @defgroup input Input reference
+ * @brief Functions and types related to input handling.
+ *
+ * This is the reference documentation for input related functions and types.
+ * For more task-oriented information, see the @ref input_guide.
+ */
+/*! @defgroup monitor Monitor reference
+ * @brief Functions and types related to monitors.
+ *
+ * This is the reference documentation for monitor related functions and types.
+ * For more task-oriented information, see the @ref monitor_guide.
+ */
+/*! @defgroup window Window reference
+ * @brief Functions and types related to windows.
+ *
+ * This is the reference documentation for window related functions and types,
+ * including creation, deletion and event polling. For more task-oriented
+ * information, see the @ref window_guide.
+ */
+
+
+/*************************************************************************
+ * Compiler- and platform-specific preprocessor work
+ *************************************************************************/
+
+/* If we are we on Windows, we want a single define for it.
+ */
+#if !defined(_WIN32) && (defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__))
+ #define _WIN32
+#endif /* _WIN32 */
+
+/* Include because most Windows GLU headers need wchar_t and
+ * the macOS OpenGL header blocks the definition of ptrdiff_t by glext.h.
+ * Include it unconditionally to avoid surprising side-effects.
+ */
+#include <stddef.h>
+
+/* Include because it is needed by Vulkan and related functions.
+ * Include it unconditionally to avoid surprising side-effects.
+ */
+#include <stdint.h>
+
+#if defined(GLFW_INCLUDE_VULKAN)
+ #include <vulkan/vulkan.h>
+#endif /* Vulkan header */
+
+/* The Vulkan header may have indirectly included windows.h (because of
+ * VK_USE_PLATFORM_WIN32_KHR) so we offer our replacement symbols after it.
+ */
+
+/* It is customary to use APIENTRY for OpenGL function pointer declarations on
+ * all platforms. Additionally, the Windows OpenGL header needs APIENTRY.
+ */
+#if !defined(APIENTRY)
+ #if defined(_WIN32)
+ #define APIENTRY __stdcall
+ #else
+ #define APIENTRY
+ #endif
+ #define GLFW_APIENTRY_DEFINED
+#endif /* APIENTRY */
+
+/* Some Windows OpenGL headers need this.
+ */
+#if !defined(WINGDIAPI) && defined(_WIN32)
+ #define WINGDIAPI __declspec(dllimport)
+ #define GLFW_WINGDIAPI_DEFINED
+#endif /* WINGDIAPI */
+
+/* Some Windows GLU headers need this.
+ */
+#if !defined(CALLBACK) && defined(_WIN32)
+ #define CALLBACK __stdcall
+ #define GLFW_CALLBACK_DEFINED
+#endif /* CALLBACK */
+
+/* Include the chosen OpenGL or OpenGL ES headers.
+ */
+#if defined(GLFW_INCLUDE_ES1)
+
+ #include <GLES/gl.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GLES/glext.h>
+ #endif
+
+#elif defined(GLFW_INCLUDE_ES2)
+
+ #include <GLES2/gl2.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GLES2/gl2ext.h>
+ #endif
+
+#elif defined(GLFW_INCLUDE_ES3)
+
+ #include <GLES3/gl3.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GLES2/gl2ext.h>
+ #endif
+
+#elif defined(GLFW_INCLUDE_ES31)
+
+ #include <GLES3/gl31.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GLES2/gl2ext.h>
+ #endif
+
+#elif defined(GLFW_INCLUDE_ES32)
+
+ #include <GLES3/gl32.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GLES2/gl2ext.h>
+ #endif
+
+#elif defined(GLFW_INCLUDE_GLCOREARB)
+
+ #if defined(__APPLE__)
+
+ #include <OpenGL/gl3.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <OpenGL/gl3ext.h>
+ #endif /*GLFW_INCLUDE_GLEXT*/
+
+ #else /*__APPLE__*/
+
+ #include <GL/glcorearb.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GL/glext.h>
+ #endif
+
+ #endif /*__APPLE__*/
+
+#elif defined(GLFW_INCLUDE_GLU)
+
+ #if defined(__APPLE__)
+
+ #if defined(GLFW_INCLUDE_GLU)
+ #include <OpenGL/glu.h>
+ #endif
+
+ #else /*__APPLE__*/
+
+ #if defined(GLFW_INCLUDE_GLU)
+ #include <GL/glu.h>
+ #endif
+
+ #endif /*__APPLE__*/
+
+#elif !defined(GLFW_INCLUDE_NONE) && \
+ !defined(__gl_h_) && \
+ !defined(__gles1_gl_h_) && \
+ !defined(__gles2_gl2_h_) && \
+ !defined(__gles2_gl3_h_) && \
+ !defined(__gles2_gl31_h_) && \
+ !defined(__gles2_gl32_h_) && \
+ !defined(__gl_glcorearb_h_) && \
+ !defined(__gl2_h_) /*legacy*/ && \
+ !defined(__gl3_h_) /*legacy*/ && \
+ !defined(__gl31_h_) /*legacy*/ && \
+ !defined(__gl32_h_) /*legacy*/ && \
+ !defined(__glcorearb_h_) /*legacy*/ && \
+ !defined(__GL_H__) /*non-standard*/ && \
+ !defined(__gltypes_h_) /*non-standard*/ && \
+ !defined(__glee_h_) /*non-standard*/
+
+ #if defined(__APPLE__)
+
+ #if !defined(GLFW_INCLUDE_GLEXT)
+ #define GL_GLEXT_LEGACY
+ #endif
+ #include <OpenGL/gl.h>
+
+ #else /*__APPLE__*/
+
+ #include <GL/gl.h>
+ #if defined(GLFW_INCLUDE_GLEXT)
+ #include <GL/glext.h>
+ #endif
+
+ #endif /*__APPLE__*/
+
+#endif /* OpenGL and OpenGL ES headers */
+
+#if defined(GLFW_DLL) && defined(_GLFW_BUILD_DLL)
+ /* GLFW_DLL must be defined by applications that are linking against the DLL
+ * version of the GLFW library. _GLFW_BUILD_DLL is defined by the GLFW
+ * configuration header when compiling the DLL version of the library.
+ */
+ #error "You must not have both GLFW_DLL and _GLFW_BUILD_DLL defined"
+#endif
+
+/* GLFWAPI is used to declare public API functions for export
+ * from the DLL / shared library / dynamic library.
+ */
+#if defined(_WIN32) && defined(_GLFW_BUILD_DLL)
+ /* We are building GLFW as a Win32 DLL */
+ #define GLFWAPI __declspec(dllexport)
+#elif defined(_WIN32) && defined(GLFW_DLL)
+ /* We are calling a GLFW Win32 DLL */
+ #define GLFWAPI __declspec(dllimport)
+#elif defined(__GNUC__) && defined(_GLFW_BUILD_DLL)
+ /* We are building GLFW as a Unix shared library */
+ #define GLFWAPI __attribute__((visibility("default")))
+#else
+ #define GLFWAPI
+#endif
+
+
+/*************************************************************************
+ * GLFW API tokens
+ *************************************************************************/
+
+/*! @name GLFW version macros
+ * @{ */
+/*! @brief The major version number of the GLFW header.
+ *
+ * The major version number of the GLFW header. This is incremented when the
+ * API is changed in non-compatible ways.
+ * @ingroup init
+ */
+#define GLFW_VERSION_MAJOR 3
+/*! @brief The minor version number of the GLFW header.
+ *
+ * The minor version number of the GLFW header. This is incremented when
+ * features are added to the API but it remains backward-compatible.
+ * @ingroup init
+ */
+#define GLFW_VERSION_MINOR 4
+/*! @brief The revision number of the GLFW header.
+ *
+ * The revision number of the GLFW header. This is incremented when a bug fix
+ * release is made that does not contain any API changes.
+ * @ingroup init
+ */
+#define GLFW_VERSION_REVISION 0
+/*! @} */
+
+/*! @brief One.
+ *
+ * This is only semantic sugar for the number 1. You can instead use `1` or
+ * `true` or `_True` or `GL_TRUE` or `VK_TRUE` or anything else that is equal
+ * to one.
+ *
+ * @ingroup init
+ */
+#define GLFW_TRUE 1
+/*! @brief Zero.
+ *
+ * This is only semantic sugar for the number 0. You can instead use `0` or
+ * `false` or `_False` or `GL_FALSE` or `VK_FALSE` or anything else that is
+ * equal to zero.
+ *
+ * @ingroup init
+ */
+#define GLFW_FALSE 0
+
+/*! @name Key and button actions
+ * @{ */
+/*! @brief The key or mouse button was released.
+ *
+ * The key or mouse button was released.
+ *
+ * @ingroup input
+ */
+#define GLFW_RELEASE 0
+/*! @brief The key or mouse button was pressed.
+ *
+ * The key or mouse button was pressed.
+ *
+ * @ingroup input
+ */
+#define GLFW_PRESS 1
+/*! @brief The key was held down until it repeated.
+ *
+ * The key was held down until it repeated.
+ *
+ * @ingroup input
+ */
+#define GLFW_REPEAT 2
+/*! @} */
+
+/*! @defgroup hat_state Joystick hat states
+ * @brief Joystick hat states.
+ *
+ * See [joystick hat input](@ref joystick_hat) for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+#define GLFW_HAT_CENTERED 0
+#define GLFW_HAT_UP 1
+#define GLFW_HAT_RIGHT 2
+#define GLFW_HAT_DOWN 4
+#define GLFW_HAT_LEFT 8
+#define GLFW_HAT_RIGHT_UP (GLFW_HAT_RIGHT | GLFW_HAT_UP)
+#define GLFW_HAT_RIGHT_DOWN (GLFW_HAT_RIGHT | GLFW_HAT_DOWN)
+#define GLFW_HAT_LEFT_UP (GLFW_HAT_LEFT | GLFW_HAT_UP)
+#define GLFW_HAT_LEFT_DOWN (GLFW_HAT_LEFT | GLFW_HAT_DOWN)
+/*! @} */
+
+/*! @defgroup keys Keyboard keys
+ * @brief Keyboard key IDs.
+ *
+ * See [key input](@ref input_key) for how these are used.
+ *
+ * These key codes are inspired by the _USB HID Usage Tables v1.12_ (p. 53-60),
+ * but re-arranged to map to 7-bit ASCII for printable keys (function keys are
+ * put in the 256+ range).
+ *
+ * The naming of the key codes follow these rules:
+ * - The US keyboard layout is used
+ * - Names of printable alphanumeric characters are used (e.g. "A", "R",
+ * "3", etc.)
+ * - For non-alphanumeric characters, Unicode:ish names are used (e.g.
+ * "COMMA", "LEFT_SQUARE_BRACKET", etc.). Note that some names do not
+ * correspond to the Unicode standard (usually for brevity)
+ * - Keys that lack a clear US mapping are named "WORLD_x"
+ * - For non-printable keys, custom names are used (e.g. "F4",
+ * "BACKSPACE", etc.)
+ *
+ * @ingroup input
+ * @{
+ */
+
+/* The unknown key */
+#define GLFW_KEY_UNKNOWN -1
+
+/* Printable keys */
+#define GLFW_KEY_SPACE 32
+#define GLFW_KEY_APOSTROPHE 39 /* ' */
+#define GLFW_KEY_COMMA 44 /* , */
+#define GLFW_KEY_MINUS 45 /* - */
+#define GLFW_KEY_PERIOD 46 /* . */
+#define GLFW_KEY_SLASH 47 /* / */
+#define GLFW_KEY_0 48
+#define GLFW_KEY_1 49
+#define GLFW_KEY_2 50
+#define GLFW_KEY_3 51
+#define GLFW_KEY_4 52
+#define GLFW_KEY_5 53
+#define GLFW_KEY_6 54
+#define GLFW_KEY_7 55
+#define GLFW_KEY_8 56
+#define GLFW_KEY_9 57
+#define GLFW_KEY_SEMICOLON 59 /* ; */
+#define GLFW_KEY_EQUAL 61 /* = */
+#define GLFW_KEY_A 65
+#define GLFW_KEY_B 66
+#define GLFW_KEY_C 67
+#define GLFW_KEY_D 68
+#define GLFW_KEY_E 69
+#define GLFW_KEY_F 70
+#define GLFW_KEY_G 71
+#define GLFW_KEY_H 72
+#define GLFW_KEY_I 73
+#define GLFW_KEY_J 74
+#define GLFW_KEY_K 75
+#define GLFW_KEY_L 76
+#define GLFW_KEY_M 77
+#define GLFW_KEY_N 78
+#define GLFW_KEY_O 79
+#define GLFW_KEY_P 80
+#define GLFW_KEY_Q 81
+#define GLFW_KEY_R 82
+#define GLFW_KEY_S 83
+#define GLFW_KEY_T 84
+#define GLFW_KEY_U 85
+#define GLFW_KEY_V 86
+#define GLFW_KEY_W 87
+#define GLFW_KEY_X 88
+#define GLFW_KEY_Y 89
+#define GLFW_KEY_Z 90
+#define GLFW_KEY_LEFT_BRACKET 91 /* [ */
+#define GLFW_KEY_BACKSLASH 92 /* \ */
+#define GLFW_KEY_RIGHT_BRACKET 93 /* ] */
+#define GLFW_KEY_GRAVE_ACCENT 96 /* ` */
+#define GLFW_KEY_WORLD_1 161 /* non-US #1 */
+#define GLFW_KEY_WORLD_2 162 /* non-US #2 */
+
+/* Function keys */
+#define GLFW_KEY_ESCAPE 256
+#define GLFW_KEY_ENTER 257
+#define GLFW_KEY_TAB 258
+#define GLFW_KEY_BACKSPACE 259
+#define GLFW_KEY_INSERT 260
+#define GLFW_KEY_DELETE 261
+#define GLFW_KEY_RIGHT 262
+#define GLFW_KEY_LEFT 263
+#define GLFW_KEY_DOWN 264
+#define GLFW_KEY_UP 265
+#define GLFW_KEY_PAGE_UP 266
+#define GLFW_KEY_PAGE_DOWN 267
+#define GLFW_KEY_HOME 268
+#define GLFW_KEY_END 269
+#define GLFW_KEY_CAPS_LOCK 280
+#define GLFW_KEY_SCROLL_LOCK 281
+#define GLFW_KEY_NUM_LOCK 282
+#define GLFW_KEY_PRINT_SCREEN 283
+#define GLFW_KEY_PAUSE 284
+#define GLFW_KEY_F1 290
+#define GLFW_KEY_F2 291
+#define GLFW_KEY_F3 292
+#define GLFW_KEY_F4 293
+#define GLFW_KEY_F5 294
+#define GLFW_KEY_F6 295
+#define GLFW_KEY_F7 296
+#define GLFW_KEY_F8 297
+#define GLFW_KEY_F9 298
+#define GLFW_KEY_F10 299
+#define GLFW_KEY_F11 300
+#define GLFW_KEY_F12 301
+#define GLFW_KEY_F13 302
+#define GLFW_KEY_F14 303
+#define GLFW_KEY_F15 304
+#define GLFW_KEY_F16 305
+#define GLFW_KEY_F17 306
+#define GLFW_KEY_F18 307
+#define GLFW_KEY_F19 308
+#define GLFW_KEY_F20 309
+#define GLFW_KEY_F21 310
+#define GLFW_KEY_F22 311
+#define GLFW_KEY_F23 312
+#define GLFW_KEY_F24 313
+#define GLFW_KEY_F25 314
+#define GLFW_KEY_KP_0 320
+#define GLFW_KEY_KP_1 321
+#define GLFW_KEY_KP_2 322
+#define GLFW_KEY_KP_3 323
+#define GLFW_KEY_KP_4 324
+#define GLFW_KEY_KP_5 325
+#define GLFW_KEY_KP_6 326
+#define GLFW_KEY_KP_7 327
+#define GLFW_KEY_KP_8 328
+#define GLFW_KEY_KP_9 329
+#define GLFW_KEY_KP_DECIMAL 330
+#define GLFW_KEY_KP_DIVIDE 331
+#define GLFW_KEY_KP_MULTIPLY 332
+#define GLFW_KEY_KP_SUBTRACT 333
+#define GLFW_KEY_KP_ADD 334
+#define GLFW_KEY_KP_ENTER 335
+#define GLFW_KEY_KP_EQUAL 336
+#define GLFW_KEY_LEFT_SHIFT 340
+#define GLFW_KEY_LEFT_CONTROL 341
+#define GLFW_KEY_LEFT_ALT 342
+#define GLFW_KEY_LEFT_SUPER 343
+#define GLFW_KEY_RIGHT_SHIFT 344
+#define GLFW_KEY_RIGHT_CONTROL 345
+#define GLFW_KEY_RIGHT_ALT 346
+#define GLFW_KEY_RIGHT_SUPER 347
+#define GLFW_KEY_MENU 348
+
+#define GLFW_KEY_LAST GLFW_KEY_MENU
+
+/*! @} */
+
+/*! @defgroup mods Modifier key flags
+ * @brief Modifier key flags.
+ *
+ * See [key input](@ref input_key) for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+
+/*! @brief If this bit is set one or more Shift keys were held down.
+ *
+ * If this bit is set one or more Shift keys were held down.
+ */
+#define GLFW_MOD_SHIFT 0x0001
+/*! @brief If this bit is set one or more Control keys were held down.
+ *
+ * If this bit is set one or more Control keys were held down.
+ */
+#define GLFW_MOD_CONTROL 0x0002
+/*! @brief If this bit is set one or more Alt keys were held down.
+ *
+ * If this bit is set one or more Alt keys were held down.
+ */
+#define GLFW_MOD_ALT 0x0004
+/*! @brief If this bit is set one or more Super keys were held down.
+ *
+ * If this bit is set one or more Super keys were held down.
+ */
+#define GLFW_MOD_SUPER 0x0008
+/*! @brief If this bit is set the Caps Lock key is enabled.
+ *
+ * If this bit is set the Caps Lock key is enabled and the @ref
+ * GLFW_LOCK_KEY_MODS input mode is set.
+ */
+#define GLFW_MOD_CAPS_LOCK 0x0010
+/*! @brief If this bit is set the Num Lock key is enabled.
+ *
+ * If this bit is set the Num Lock key is enabled and the @ref
+ * GLFW_LOCK_KEY_MODS input mode is set.
+ */
+#define GLFW_MOD_NUM_LOCK 0x0020
+
+/*! @} */
+
+/*! @defgroup buttons Mouse buttons
+ * @brief Mouse button IDs.
+ *
+ * See [mouse button input](@ref input_mouse_button) for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+#define GLFW_MOUSE_BUTTON_1 0
+#define GLFW_MOUSE_BUTTON_2 1
+#define GLFW_MOUSE_BUTTON_3 2
+#define GLFW_MOUSE_BUTTON_4 3
+#define GLFW_MOUSE_BUTTON_5 4
+#define GLFW_MOUSE_BUTTON_6 5
+#define GLFW_MOUSE_BUTTON_7 6
+#define GLFW_MOUSE_BUTTON_8 7
+#define GLFW_MOUSE_BUTTON_LAST GLFW_MOUSE_BUTTON_8
+#define GLFW_MOUSE_BUTTON_LEFT GLFW_MOUSE_BUTTON_1
+#define GLFW_MOUSE_BUTTON_RIGHT GLFW_MOUSE_BUTTON_2
+#define GLFW_MOUSE_BUTTON_MIDDLE GLFW_MOUSE_BUTTON_3
+/*! @} */
+
+/*! @defgroup joysticks Joysticks
+ * @brief Joystick IDs.
+ *
+ * See [joystick input](@ref joystick) for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+#define GLFW_JOYSTICK_1 0
+#define GLFW_JOYSTICK_2 1
+#define GLFW_JOYSTICK_3 2
+#define GLFW_JOYSTICK_4 3
+#define GLFW_JOYSTICK_5 4
+#define GLFW_JOYSTICK_6 5
+#define GLFW_JOYSTICK_7 6
+#define GLFW_JOYSTICK_8 7
+#define GLFW_JOYSTICK_9 8
+#define GLFW_JOYSTICK_10 9
+#define GLFW_JOYSTICK_11 10
+#define GLFW_JOYSTICK_12 11
+#define GLFW_JOYSTICK_13 12
+#define GLFW_JOYSTICK_14 13
+#define GLFW_JOYSTICK_15 14
+#define GLFW_JOYSTICK_16 15
+#define GLFW_JOYSTICK_LAST GLFW_JOYSTICK_16
+/*! @} */
+
+/*! @defgroup gamepad_buttons Gamepad buttons
+ * @brief Gamepad buttons.
+ *
+ * See @ref gamepad for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+#define GLFW_GAMEPAD_BUTTON_A 0
+#define GLFW_GAMEPAD_BUTTON_B 1
+#define GLFW_GAMEPAD_BUTTON_X 2
+#define GLFW_GAMEPAD_BUTTON_Y 3
+#define GLFW_GAMEPAD_BUTTON_LEFT_BUMPER 4
+#define GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER 5
+#define GLFW_GAMEPAD_BUTTON_BACK 6
+#define GLFW_GAMEPAD_BUTTON_START 7
+#define GLFW_GAMEPAD_BUTTON_GUIDE 8
+#define GLFW_GAMEPAD_BUTTON_LEFT_THUMB 9
+#define GLFW_GAMEPAD_BUTTON_RIGHT_THUMB 10
+#define GLFW_GAMEPAD_BUTTON_DPAD_UP 11
+#define GLFW_GAMEPAD_BUTTON_DPAD_RIGHT 12
+#define GLFW_GAMEPAD_BUTTON_DPAD_DOWN 13
+#define GLFW_GAMEPAD_BUTTON_DPAD_LEFT 14
+#define GLFW_GAMEPAD_BUTTON_LAST GLFW_GAMEPAD_BUTTON_DPAD_LEFT
+
+#define GLFW_GAMEPAD_BUTTON_CROSS GLFW_GAMEPAD_BUTTON_A
+#define GLFW_GAMEPAD_BUTTON_CIRCLE GLFW_GAMEPAD_BUTTON_B
+#define GLFW_GAMEPAD_BUTTON_SQUARE GLFW_GAMEPAD_BUTTON_X
+#define GLFW_GAMEPAD_BUTTON_TRIANGLE GLFW_GAMEPAD_BUTTON_Y
+/*! @} */
+
+/*! @defgroup gamepad_axes Gamepad axes
+ * @brief Gamepad axes.
+ *
+ * See @ref gamepad for how these are used.
+ *
+ * @ingroup input
+ * @{ */
+#define GLFW_GAMEPAD_AXIS_LEFT_X 0
+#define GLFW_GAMEPAD_AXIS_LEFT_Y 1
+#define GLFW_GAMEPAD_AXIS_RIGHT_X 2
+#define GLFW_GAMEPAD_AXIS_RIGHT_Y 3
+#define GLFW_GAMEPAD_AXIS_LEFT_TRIGGER 4
+#define GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER 5
+#define GLFW_GAMEPAD_AXIS_LAST GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER
+/*! @} */
+
+/*! @defgroup errors Error codes
+ * @brief Error codes.
+ *
+ * See [error handling](@ref error_handling) for how these are used.
+ *
+ * @ingroup init
+ * @{ */
+/*! @brief No error has occurred.
+ *
+ * No error has occurred.
+ *
+ * @analysis Yay.
+ */
+#define GLFW_NO_ERROR 0
+/*! @brief GLFW has not been initialized.
+ *
+ * This occurs if a GLFW function was called that must not be called unless the
+ * library is [initialized](@ref intro_init).
+ *
+ * @analysis Application programmer error. Initialize GLFW before calling any
+ * function that requires initialization.
+ */
+#define GLFW_NOT_INITIALIZED 0x00010001
+/*! @brief No context is current for this thread.
+ *
+ * This occurs if a GLFW function was called that needs and operates on the
+ * current OpenGL or OpenGL ES context but no context is current on the calling
+ * thread. One such function is @ref glfwSwapInterval.
+ *
+ * @analysis Application programmer error. Ensure a context is current before
+ * calling functions that require a current context.
+ */
+#define GLFW_NO_CURRENT_CONTEXT 0x00010002
+/*! @brief One of the arguments to the function was an invalid enum value.
+ *
+ * One of the arguments to the function was an invalid enum value, for example
+ * requesting @ref GLFW_RED_BITS with @ref glfwGetWindowAttrib.
+ *
+ * @analysis Application programmer error. Fix the offending call.
+ */
+#define GLFW_INVALID_ENUM 0x00010003
+/*! @brief One of the arguments to the function was an invalid value.
+ *
+ * One of the arguments to the function was an invalid value, for example
+ * requesting a non-existent OpenGL or OpenGL ES version like 2.7.
+ *
+ * Requesting a valid but unavailable OpenGL or OpenGL ES version will instead
+ * result in a @ref GLFW_VERSION_UNAVAILABLE error.
+ *
+ * @analysis Application programmer error. Fix the offending call.
+ */
+#define GLFW_INVALID_VALUE 0x00010004
+/*! @brief A memory allocation failed.
+ *
+ * A memory allocation failed.
+ *
+ * @analysis A bug in GLFW or the underlying operating system. Report the bug
+ * to our [issue tracker](https://github.com/glfw/glfw/issues).
+ */
+#define GLFW_OUT_OF_MEMORY 0x00010005
+/*! @brief GLFW could not find support for the requested API on the system.
+ *
+ * GLFW could not find support for the requested API on the system.
+ *
+ * @analysis The installed graphics driver does not support the requested
+ * API, or does not support it via the chosen context creation API.
+ * Below are a few examples.
+ *
+ * @par
+ * Some pre-installed Windows graphics drivers do not support OpenGL. AMD only
+ * supports OpenGL ES via EGL, while Nvidia and Intel only support it via
+ * a WGL or GLX extension. macOS does not provide OpenGL ES at all. The Mesa
+ * EGL, OpenGL and OpenGL ES libraries do not interface with the Nvidia binary
+ * driver. Older graphics drivers do not support Vulkan.
+ */
+#define GLFW_API_UNAVAILABLE 0x00010006
+/*! @brief The requested OpenGL or OpenGL ES version is not available.
+ *
+ * The requested OpenGL or OpenGL ES version (including any requested context
+ * or framebuffer hints) is not available on this machine.
+ *
+ * @analysis The machine does not support your requirements. If your
+ * application is sufficiently flexible, downgrade your requirements and try
+ * again. Otherwise, inform the user that their machine does not match your
+ * requirements.
+ *
+ * @par
+ * Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if 5.0
+ * comes out before the 4.x series gets that far, also fail with this error and
+ * not @ref GLFW_INVALID_VALUE, because GLFW cannot know what future versions
+ * will exist.
+ */
+#define GLFW_VERSION_UNAVAILABLE 0x00010007
+/*! @brief A platform-specific error occurred that does not match any of the
+ * more specific categories.
+ *
+ * A platform-specific error occurred that does not match any of the more
+ * specific categories.
+ *
+ * @analysis A bug or configuration error in GLFW, the underlying operating
+ * system or its drivers, or a lack of required resources. Report the issue to
+ * our [issue tracker](https://github.com/glfw/glfw/issues).
+ */
+#define GLFW_PLATFORM_ERROR 0x00010008
+/*! @brief The requested format is not supported or available.
+ *
+ * If emitted during window creation, the requested pixel format is not
+ * supported.
+ *
+ * If emitted when querying the clipboard, the contents of the clipboard could
+ * not be converted to the requested format.
+ *
+ * @analysis If emitted during window creation, one or more
+ * [hard constraints](@ref window_hints_hard) did not match any of the
+ * available pixel formats. If your application is sufficiently flexible,
+ * downgrade your requirements and try again. Otherwise, inform the user that
+ * their machine does not match your requirements.
+ *
+ * @par
+ * If emitted when querying the clipboard, ignore the error or report it to
+ * the user, as appropriate.
+ */
+#define GLFW_FORMAT_UNAVAILABLE 0x00010009
+/*! @brief The specified window does not have an OpenGL or OpenGL ES context.
+ *
+ * A window that does not have an OpenGL or OpenGL ES context was passed to
+ * a function that requires it to have one.
+ *
+ * @analysis Application programmer error. Fix the offending call.
+ */
+#define GLFW_NO_WINDOW_CONTEXT 0x0001000A
+/*! @brief The specified cursor shape is not available.
+ *
+ * The specified standard cursor shape is not available, either because the
+ * current platform cursor theme does not provide it or because it is not
+ * available on the platform.
+ *
+ * @analysis Platform or system settings limitation. Pick another
+ * [standard cursor shape](@ref shapes) or create a
+ * [custom cursor](@ref cursor_custom).
+ */
+#define GLFW_CURSOR_UNAVAILABLE 0x0001000B
+/*! @brief The requested feature is not provided by the platform.
+ *
+ * The requested feature is not provided by the platform, so GLFW is unable to
+ * implement it. The documentation for each function notes if it could emit
+ * this error.
+ *
+ * @analysis Platform or platform version limitation. The error can be ignored
+ * unless the feature is critical to the application.
+ *
+ * @par
+ * A function call that emits this error has no effect other than the error and
+ * updating any existing out parameters.
+ */
+#define GLFW_FEATURE_UNAVAILABLE 0x0001000C
+/*! @brief The requested feature is not implemented for the platform.
+ *
+ * The requested feature has not yet been implemented in GLFW for this platform.
+ *
+ * @analysis An incomplete implementation of GLFW for this platform, hopefully
+ * fixed in a future release. The error can be ignored unless the feature is
+ * critical to the application.
+ *
+ * @par
+ * A function call that emits this error has no effect other than the error and
+ * updating any existing out parameters.
+ */
+#define GLFW_FEATURE_UNIMPLEMENTED 0x0001000D
+/*! @brief Platform unavailable or no matching platform was found.
+ *
+ * If emitted during initialization, no matching platform was found. If @ref
+ * GLFW_PLATFORM is set to `GLFW_ANY_PLATFORM`, GLFW could not detect any of the
+ * platforms supported by this library binary, except for the Null platform. If set to
+ * a specific platform, it is either not supported by this library binary or GLFW was not
+ * able to detect it.
+ *
+ * If emitted by a native access function, GLFW was initialized for a different platform
+ * than the function is for.
+ *
+ * @analysis Failure to detect any platform usually only happens on non-macOS Unix
+ * systems, either when no window system is running or the program was run from
+ * a terminal that does not have the necessary environment variables. Fall back to
+ * a different platform if possible or notify the user that no usable platform was
+ * detected.
+ *
+ * Failure to detect a specific platform may have the same cause as above or be because
+ * support for that platform was not compiled in. Call @ref glfwPlatformSupported to
+ * check whether a specific platform is supported by a library binary.
+ */
+#define GLFW_PLATFORM_UNAVAILABLE 0x0001000E
+/*! @} */
+
+/*! @addtogroup window
+ * @{ */
+/*! @brief Input focus window hint and attribute
+ *
+ * Input focus [window hint](@ref GLFW_FOCUSED_hint) or
+ * [window attribute](@ref GLFW_FOCUSED_attrib).
+ */
+#define GLFW_FOCUSED 0x00020001
+/*! @brief Window iconification window attribute
+ *
+ * Window iconification [window attribute](@ref GLFW_ICONIFIED_attrib).
+ */
+#define GLFW_ICONIFIED 0x00020002
+/*! @brief Window resize-ability window hint and attribute
+ *
+ * Window resize-ability [window hint](@ref GLFW_RESIZABLE_hint) and
+ * [window attribute](@ref GLFW_RESIZABLE_attrib).
+ */
+#define GLFW_RESIZABLE 0x00020003
+/*! @brief Window visibility window hint and attribute
+ *
+ * Window visibility [window hint](@ref GLFW_VISIBLE_hint) and
+ * [window attribute](@ref GLFW_VISIBLE_attrib).
+ */
+#define GLFW_VISIBLE 0x00020004
+/*! @brief Window decoration window hint and attribute
+ *
+ * Window decoration [window hint](@ref GLFW_DECORATED_hint) and
+ * [window attribute](@ref GLFW_DECORATED_attrib).
+ */
+#define GLFW_DECORATED 0x00020005
+/*! @brief Window auto-iconification window hint and attribute
+ *
+ * Window auto-iconification [window hint](@ref GLFW_AUTO_ICONIFY_hint) and
+ * [window attribute](@ref GLFW_AUTO_ICONIFY_attrib).
+ */
+#define GLFW_AUTO_ICONIFY 0x00020006
+/*! @brief Window decoration window hint and attribute
+ *
+ * Window decoration [window hint](@ref GLFW_FLOATING_hint) and
+ * [window attribute](@ref GLFW_FLOATING_attrib).
+ */
+#define GLFW_FLOATING 0x00020007
+/*! @brief Window maximization window hint and attribute
+ *
+ * Window maximization [window hint](@ref GLFW_MAXIMIZED_hint) and
+ * [window attribute](@ref GLFW_MAXIMIZED_attrib).
+ */
+#define GLFW_MAXIMIZED 0x00020008
+/*! @brief Cursor centering window hint
+ *
+ * Cursor centering [window hint](@ref GLFW_CENTER_CURSOR_hint).
+ */
+#define GLFW_CENTER_CURSOR 0x00020009
+/*! @brief Window framebuffer transparency hint and attribute
+ *
+ * Window framebuffer transparency
+ * [window hint](@ref GLFW_TRANSPARENT_FRAMEBUFFER_hint) and
+ * [window attribute](@ref GLFW_TRANSPARENT_FRAMEBUFFER_attrib).
+ */
+#define GLFW_TRANSPARENT_FRAMEBUFFER 0x0002000A
+/*! @brief Mouse cursor hover window attribute.
+ *
+ * Mouse cursor hover [window attribute](@ref GLFW_HOVERED_attrib).
+ */
+#define GLFW_HOVERED 0x0002000B
+/*! @brief Input focus on calling show window hint and attribute
+ *
+ * Input focus [window hint](@ref GLFW_FOCUS_ON_SHOW_hint) or
+ * [window attribute](@ref GLFW_FOCUS_ON_SHOW_attrib).
+ */
+#define GLFW_FOCUS_ON_SHOW 0x0002000C
+
+/*! @brief Mouse input transparency window hint and attribute
+ *
+ * Mouse input transparency [window hint](@ref GLFW_MOUSE_PASSTHROUGH_hint) or
+ * [window attribute](@ref GLFW_MOUSE_PASSTHROUGH_attrib).
+ */
+#define GLFW_MOUSE_PASSTHROUGH 0x0002000D
+
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_RED_BITS).
+ */
+#define GLFW_RED_BITS 0x00021001
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_GREEN_BITS).
+ */
+#define GLFW_GREEN_BITS 0x00021002
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_BLUE_BITS).
+ */
+#define GLFW_BLUE_BITS 0x00021003
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_ALPHA_BITS).
+ */
+#define GLFW_ALPHA_BITS 0x00021004
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_DEPTH_BITS).
+ */
+#define GLFW_DEPTH_BITS 0x00021005
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_STENCIL_BITS).
+ */
+#define GLFW_STENCIL_BITS 0x00021006
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_ACCUM_RED_BITS).
+ */
+#define GLFW_ACCUM_RED_BITS 0x00021007
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_ACCUM_GREEN_BITS).
+ */
+#define GLFW_ACCUM_GREEN_BITS 0x00021008
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_ACCUM_BLUE_BITS).
+ */
+#define GLFW_ACCUM_BLUE_BITS 0x00021009
+/*! @brief Framebuffer bit depth hint.
+ *
+ * Framebuffer bit depth [hint](@ref GLFW_ACCUM_ALPHA_BITS).
+ */
+#define GLFW_ACCUM_ALPHA_BITS 0x0002100A
+/*! @brief Framebuffer auxiliary buffer hint.
+ *
+ * Framebuffer auxiliary buffer [hint](@ref GLFW_AUX_BUFFERS).
+ */
+#define GLFW_AUX_BUFFERS 0x0002100B
+/*! @brief OpenGL stereoscopic rendering hint.
+ *
+ * OpenGL stereoscopic rendering [hint](@ref GLFW_STEREO).
+ */
+#define GLFW_STEREO 0x0002100C
+/*! @brief Framebuffer MSAA samples hint.
+ *
+ * Framebuffer MSAA samples [hint](@ref GLFW_SAMPLES).
+ */
+#define GLFW_SAMPLES 0x0002100D
+/*! @brief Framebuffer sRGB hint.
+ *
+ * Framebuffer sRGB [hint](@ref GLFW_SRGB_CAPABLE).
+ */
+#define GLFW_SRGB_CAPABLE 0x0002100E
+/*! @brief Monitor refresh rate hint.
+ *
+ * Monitor refresh rate [hint](@ref GLFW_REFRESH_RATE).
+ */
+#define GLFW_REFRESH_RATE 0x0002100F
+/*! @brief Framebuffer double buffering hint and attribute.
+ *
+ * Framebuffer double buffering [hint](@ref GLFW_DOUBLEBUFFER_hint) and
+ * [attribute](@ref GLFW_DOUBLEBUFFER_attrib).
+ */
+#define GLFW_DOUBLEBUFFER 0x00021010
+
+/*! @brief Context client API hint and attribute.
+ *
+ * Context client API [hint](@ref GLFW_CLIENT_API_hint) and
+ * [attribute](@ref GLFW_CLIENT_API_attrib).
+ */
+#define GLFW_CLIENT_API 0x00022001
+/*! @brief Context client API major version hint and attribute.
+ *
+ * Context client API major version [hint](@ref GLFW_CONTEXT_VERSION_MAJOR_hint)
+ * and [attribute](@ref GLFW_CONTEXT_VERSION_MAJOR_attrib).
+ */
+#define GLFW_CONTEXT_VERSION_MAJOR 0x00022002
+/*! @brief Context client API minor version hint and attribute.
+ *
+ * Context client API minor version [hint](@ref GLFW_CONTEXT_VERSION_MINOR_hint)
+ * and [attribute](@ref GLFW_CONTEXT_VERSION_MINOR_attrib).
+ */
+#define GLFW_CONTEXT_VERSION_MINOR 0x00022003
+/*! @brief Context client API revision number attribute.
+ *
+ * Context client API revision number
+ * [attribute](@ref GLFW_CONTEXT_REVISION_attrib).
+ */
+#define GLFW_CONTEXT_REVISION 0x00022004
+/*! @brief Context robustness hint and attribute.
+ *
+ * Context client API revision number [hint](@ref GLFW_CONTEXT_ROBUSTNESS_hint)
+ * and [attribute](@ref GLFW_CONTEXT_ROBUSTNESS_attrib).
+ */
+#define GLFW_CONTEXT_ROBUSTNESS 0x00022005
+/*! @brief OpenGL forward-compatibility hint and attribute.
+ *
+ * OpenGL forward-compatibility [hint](@ref GLFW_OPENGL_FORWARD_COMPAT_hint)
+ * and [attribute](@ref GLFW_OPENGL_FORWARD_COMPAT_attrib).
+ */
+#define GLFW_OPENGL_FORWARD_COMPAT 0x00022006
+/*! @brief Debug mode context hint and attribute.
+ *
+ * Debug mode context [hint](@ref GLFW_CONTEXT_DEBUG_hint) and
+ * [attribute](@ref GLFW_CONTEXT_DEBUG_attrib).
+ */
+#define GLFW_CONTEXT_DEBUG 0x00022007
+/*! @brief Legacy name for compatibility.
+ *
+ * This is an alias for compatibility with earlier versions.
+ */
+#define GLFW_OPENGL_DEBUG_CONTEXT GLFW_CONTEXT_DEBUG
+/*! @brief OpenGL profile hint and attribute.
+ *
+ * OpenGL profile [hint](@ref GLFW_OPENGL_PROFILE_hint) and
+ * [attribute](@ref GLFW_OPENGL_PROFILE_attrib).
+ */
+#define GLFW_OPENGL_PROFILE 0x00022008
+/*! @brief Context flush-on-release hint and attribute.
+ *
+ * Context flush-on-release [hint](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint) and
+ * [attribute](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_attrib).
+ */
+#define GLFW_CONTEXT_RELEASE_BEHAVIOR 0x00022009
+/*! @brief Context error suppression hint and attribute.
+ *
+ * Context error suppression [hint](@ref GLFW_CONTEXT_NO_ERROR_hint) and
+ * [attribute](@ref GLFW_CONTEXT_NO_ERROR_attrib).
+ */
+#define GLFW_CONTEXT_NO_ERROR 0x0002200A
+/*! @brief Context creation API hint and attribute.
+ *
+ * Context creation API [hint](@ref GLFW_CONTEXT_CREATION_API_hint) and
+ * [attribute](@ref GLFW_CONTEXT_CREATION_API_attrib).
+ */
+#define GLFW_CONTEXT_CREATION_API 0x0002200B
+/*! @brief Window content area scaling window
+ * [window hint](@ref GLFW_SCALE_TO_MONITOR).
+ */
+#define GLFW_SCALE_TO_MONITOR 0x0002200C
+/*! @brief macOS specific
+ * [window hint](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint).
+ */
+#define GLFW_COCOA_RETINA_FRAMEBUFFER 0x00023001
+/*! @brief macOS specific
+ * [window hint](@ref GLFW_COCOA_FRAME_NAME_hint).
+ */
+#define GLFW_COCOA_FRAME_NAME 0x00023002
+/*! @brief macOS specific
+ * [window hint](@ref GLFW_COCOA_GRAPHICS_SWITCHING_hint).
+ */
+#define GLFW_COCOA_GRAPHICS_SWITCHING 0x00023003
+/*! @brief X11 specific
+ * [window hint](@ref GLFW_X11_CLASS_NAME_hint).
+ */
+#define GLFW_X11_CLASS_NAME 0x00024001
+/*! @brief X11 specific
+ * [window hint](@ref GLFW_X11_CLASS_NAME_hint).
+ */
+#define GLFW_X11_INSTANCE_NAME 0x00024002
+#define GLFW_WIN32_KEYBOARD_MENU 0x00025001
+/*! @} */
+
+#define GLFW_NO_API 0
+#define GLFW_OPENGL_API 0x00030001
+#define GLFW_OPENGL_ES_API 0x00030002
+
+#define GLFW_NO_ROBUSTNESS 0
+#define GLFW_NO_RESET_NOTIFICATION 0x00031001
+#define GLFW_LOSE_CONTEXT_ON_RESET 0x00031002
+
+#define GLFW_OPENGL_ANY_PROFILE 0
+#define GLFW_OPENGL_CORE_PROFILE 0x00032001
+#define GLFW_OPENGL_COMPAT_PROFILE 0x00032002
+
+#define GLFW_CURSOR 0x00033001
+#define GLFW_STICKY_KEYS 0x00033002
+#define GLFW_STICKY_MOUSE_BUTTONS 0x00033003
+#define GLFW_LOCK_KEY_MODS 0x00033004
+#define GLFW_RAW_MOUSE_MOTION 0x00033005
+
+#define GLFW_CURSOR_NORMAL 0x00034001
+#define GLFW_CURSOR_HIDDEN 0x00034002
+#define GLFW_CURSOR_DISABLED 0x00034003
+
+#define GLFW_ANY_RELEASE_BEHAVIOR 0
+#define GLFW_RELEASE_BEHAVIOR_FLUSH 0x00035001
+#define GLFW_RELEASE_BEHAVIOR_NONE 0x00035002
+
+#define GLFW_NATIVE_CONTEXT_API 0x00036001
+#define GLFW_EGL_CONTEXT_API 0x00036002
+#define GLFW_OSMESA_CONTEXT_API 0x00036003
+
+#define GLFW_ANGLE_PLATFORM_TYPE_NONE 0x00037001
+#define GLFW_ANGLE_PLATFORM_TYPE_OPENGL 0x00037002
+#define GLFW_ANGLE_PLATFORM_TYPE_OPENGLES 0x00037003
+#define GLFW_ANGLE_PLATFORM_TYPE_D3D9 0x00037004
+#define GLFW_ANGLE_PLATFORM_TYPE_D3D11 0x00037005
+#define GLFW_ANGLE_PLATFORM_TYPE_VULKAN 0x00037007
+#define GLFW_ANGLE_PLATFORM_TYPE_METAL 0x00037008
+
+/*! @defgroup shapes Standard cursor shapes
+ * @brief Standard system cursor shapes.
+ *
+ * These are the [standard cursor shapes](@ref cursor_standard) that can be
+ * requested from the platform (window system).
+ *
+ * @ingroup input
+ * @{ */
+
+/*! @brief The regular arrow cursor shape.
+ *
+ * The regular arrow cursor shape.
+ */
+#define GLFW_ARROW_CURSOR 0x00036001
+/*! @brief The text input I-beam cursor shape.
+ *
+ * The text input I-beam cursor shape.
+ */
+#define GLFW_IBEAM_CURSOR 0x00036002
+/*! @brief The crosshair cursor shape.
+ *
+ * The crosshair cursor shape.
+ */
+#define GLFW_CROSSHAIR_CURSOR 0x00036003
+/*! @brief The pointing hand cursor shape.
+ *
+ * The pointing hand cursor shape.
+ */
+#define GLFW_POINTING_HAND_CURSOR 0x00036004
+/*! @brief The horizontal resize/move arrow shape.
+ *
+ * The horizontal resize/move arrow shape. This is usually a horizontal
+ * double-headed arrow.
+ */
+#define GLFW_RESIZE_EW_CURSOR 0x00036005
+/*! @brief The vertical resize/move arrow shape.
+ *
+ * The vertical resize/move shape. This is usually a vertical double-headed
+ * arrow.
+ */
+#define GLFW_RESIZE_NS_CURSOR 0x00036006
+/*! @brief The top-left to bottom-right diagonal resize/move arrow shape.
+ *
+ * The top-left to bottom-right diagonal resize/move shape. This is usually
+ * a diagonal double-headed arrow.
+ *
+ * @note @macos This shape is provided by a private system API and may fail
+ * with @ref GLFW_CURSOR_UNAVAILABLE in the future.
+ *
+ * @note @x11 This shape is provided by a newer standard not supported by all
+ * cursor themes.
+ *
+ * @note @wayland This shape is provided by a newer standard not supported by
+ * all cursor themes.
+ */
+#define GLFW_RESIZE_NWSE_CURSOR 0x00036007
+/*! @brief The top-right to bottom-left diagonal resize/move arrow shape.
+ *
+ * The top-right to bottom-left diagonal resize/move shape. This is usually
+ * a diagonal double-headed arrow.
+ *
+ * @note @macos This shape is provided by a private system API and may fail
+ * with @ref GLFW_CURSOR_UNAVAILABLE in the future.
+ *
+ * @note @x11 This shape is provided by a newer standard not supported by all
+ * cursor themes.
+ *
+ * @note @wayland This shape is provided by a newer standard not supported by
+ * all cursor themes.
+ */
+#define GLFW_RESIZE_NESW_CURSOR 0x00036008
+/*! @brief The omni-directional resize/move cursor shape.
+ *
+ * The omni-directional resize cursor/move shape. This is usually either
+ * a combined horizontal and vertical double-headed arrow or a grabbing hand.
+ */
+#define GLFW_RESIZE_ALL_CURSOR 0x00036009
+/*! @brief The operation-not-allowed shape.
+ *
+ * The operation-not-allowed shape. This is usually a circle with a diagonal
+ * line through it.
+ *
+ * @note @x11 This shape is provided by a newer standard not supported by all
+ * cursor themes.
+ *
+ * @note @wayland This shape is provided by a newer standard not supported by
+ * all cursor themes.
+ */
+#define GLFW_NOT_ALLOWED_CURSOR 0x0003600A
+/*! @brief Legacy name for compatibility.
+ *
+ * This is an alias for compatibility with earlier versions.
+ */
+#define GLFW_HRESIZE_CURSOR GLFW_RESIZE_EW_CURSOR
+/*! @brief Legacy name for compatibility.
+ *
+ * This is an alias for compatibility with earlier versions.
+ */
+#define GLFW_VRESIZE_CURSOR GLFW_RESIZE_NS_CURSOR
+/*! @brief Legacy name for compatibility.
+ *
+ * This is an alias for compatibility with earlier versions.
+ */
+#define GLFW_HAND_CURSOR GLFW_POINTING_HAND_CURSOR
+/*! @} */
+
+#define GLFW_CONNECTED 0x00040001
+#define GLFW_DISCONNECTED 0x00040002
+
+/*! @addtogroup init
+ * @{ */
+/*! @brief Joystick hat buttons init hint.
+ *
+ * Joystick hat buttons [init hint](@ref GLFW_JOYSTICK_HAT_BUTTONS).
+ */
+#define GLFW_JOYSTICK_HAT_BUTTONS 0x00050001
+/*! @brief ANGLE rendering backend init hint.
+ *
+ * ANGLE rendering backend [init hint](@ref GLFW_ANGLE_PLATFORM_TYPE_hint).
+ */
+#define GLFW_ANGLE_PLATFORM_TYPE 0x00050002
+/*! @brief Platform selection init hint.
+ *
+ * Platform selection [init hint](@ref GLFW_PLATFORM).
+ */
+#define GLFW_PLATFORM 0x00050003
+/*! @brief macOS specific init hint.
+ *
+ * macOS specific [init hint](@ref GLFW_COCOA_CHDIR_RESOURCES_hint).
+ */
+#define GLFW_COCOA_CHDIR_RESOURCES 0x00051001
+/*! @brief macOS specific init hint.
+ *
+ * macOS specific [init hint](@ref GLFW_COCOA_MENUBAR_hint).
+ */
+#define GLFW_COCOA_MENUBAR 0x00051002
+/*! @brief X11 specific init hint.
+ *
+ * X11 specific [init hint](@ref GLFW_X11_XCB_VULKAN_SURFACE_hint).
+ */
+#define GLFW_X11_XCB_VULKAN_SURFACE 0x00052001
+/*! @} */
+
+/*! @addtogroup init
+ * @{ */
+/*! @brief Hint value that enables automatic platform selection.
+ *
+ * Hint value for @ref GLFW_PLATFORM that enables automatic platform selection.
+ */
+#define GLFW_ANY_PLATFORM 0x00060000
+#define GLFW_PLATFORM_WIN32 0x00060001
+#define GLFW_PLATFORM_COCOA 0x00060002
+#define GLFW_PLATFORM_WAYLAND 0x00060003
+#define GLFW_PLATFORM_X11 0x00060004
+#define GLFW_PLATFORM_NULL 0x00060005
+/*! @} */
+
+#define GLFW_DONT_CARE -1
+
+
+/*************************************************************************
+ * GLFW API types
+ *************************************************************************/
+
+/*! @brief Client API function pointer type.
+ *
+ * Generic function pointer used for returning client API function pointers
+ * without forcing a cast from a regular pointer.
+ *
+ * @sa @ref context_glext
+ * @sa @ref glfwGetProcAddress
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup context
+ */
+typedef void (*GLFWglproc)(void);
+
+/*! @brief Vulkan API function pointer type.
+ *
+ * Generic function pointer used for returning Vulkan API function pointers
+ * without forcing a cast from a regular pointer.
+ *
+ * @sa @ref vulkan_proc
+ * @sa @ref glfwGetInstanceProcAddress
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+typedef void (*GLFWvkproc)(void);
+
+/*! @brief Opaque monitor object.
+ *
+ * Opaque monitor object.
+ *
+ * @see @ref monitor_object
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+typedef struct GLFWmonitor GLFWmonitor;
+
+/*! @brief Opaque window object.
+ *
+ * Opaque window object.
+ *
+ * @see @ref window_object
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+typedef struct GLFWwindow GLFWwindow;
+
+/*! @brief Opaque cursor object.
+ *
+ * Opaque cursor object.
+ *
+ * @see @ref cursor_object
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+typedef struct GLFWcursor GLFWcursor;
+
+/*! @brief The function pointer type for memory allocation callbacks.
+ *
+ * This is the function pointer type for memory allocation callbacks. A memory
+ * allocation callback function has the following signature:
+ * @code
+ * void* function_name(size_t size, void* user)
+ * @endcode
+ *
+ * This function must return either a memory block at least `size` bytes long,
+ * or `NULL` if allocation failed. Note that not all parts of GLFW handle allocation
+ * failures gracefully yet.
+ *
+ * This function may be called during @ref glfwInit but before the library is
+ * flagged as initialized, as well as during @ref glfwTerminate after the
+ * library is no longer flagged as initialized.
+ *
+ * Any memory allocated by this function will be deallocated during library
+ * termination or earlier.
+ *
+ * The size will always be greater than zero. Allocations of size zero are filtered out
+ * before reaching the custom allocator.
+ *
+ * @param[in] size The minimum size, in bytes, of the memory block.
+ * @param[in] user The user-defined pointer from the allocator.
+ * @return The address of the newly allocated memory block, or `NULL` if an
+ * error occurred.
+ *
+ * @pointer_lifetime The returned memory block must be valid at least until it
+ * is deallocated.
+ *
+ * @reentrancy This function should not call any GLFW function.
+ *
+ * @thread_safety This function may be called from any thread that calls GLFW functions.
+ *
+ * @sa @ref init_allocator
+ * @sa @ref GLFWallocator
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+typedef void* (* GLFWallocatefun)(size_t size, void* user);
+
+/*! @brief The function pointer type for memory reallocation callbacks.
+ *
+ * This is the function pointer type for memory reallocation callbacks.
+ * A memory reallocation callback function has the following signature:
+ * @code
+ * void* function_name(void* block, size_t size, void* user)
+ * @endcode
+ *
+ * This function must return a memory block at least `size` bytes long, or
+ * `NULL` if allocation failed. Note that not all parts of GLFW handle allocation
+ * failures gracefully yet.
+ *
+ * This function may be called during @ref glfwInit but before the library is
+ * flagged as initialized, as well as during @ref glfwTerminate after the
+ * library is no longer flagged as initialized.
+ *
+ * Any memory allocated by this function will be deallocated during library
+ * termination or earlier.
+ *
+ * The block address will never be `NULL` and the size will always be greater than zero.
+ * Reallocations of a block to size zero are converted into deallocations. Reallocations
+ * of `NULL` to a non-zero size are converted into regular allocations.
+ *
+ * @param[in] block The address of the memory block to reallocate.
+ * @param[in] size The new minimum size, in bytes, of the memory block.
+ * @param[in] user The user-defined pointer from the allocator.
+ * @return The address of the newly allocated or resized memory block, or
+ * `NULL` if an error occurred.
+ *
+ * @pointer_lifetime The returned memory block must be valid at least until it
+ * is deallocated.
+ *
+ * @reentrancy This function should not call any GLFW function.
+ *
+ * @thread_safety This function may be called from any thread that calls GLFW functions.
+ *
+ * @sa @ref init_allocator
+ * @sa @ref GLFWallocator
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+typedef void* (* GLFWreallocatefun)(void* block, size_t size, void* user);
+
+/*! @brief The function pointer type for memory deallocation callbacks.
+ *
+ * This is the function pointer type for memory deallocation callbacks.
+ * A memory deallocation callback function has the following signature:
+ * @code
+ * void function_name(void* block, void* user)
+ * @endcode
+ *
+ * This function may deallocate the specified memory block. This memory block
+ * will have been allocated with the same allocator.
+ *
+ * This function may be called during @ref glfwInit but before the library is
+ * flagged as initialized, as well as during @ref glfwTerminate after the
+ * library is no longer flagged as initialized.
+ *
+ * The block address will never be `NULL`. Deallocations of `NULL` are filtered out
+ * before reaching the custom allocator.
+ *
+ * @param[in] block The address of the memory block to deallocate.
+ * @param[in] user The user-defined pointer from the allocator.
+ *
+ * @pointer_lifetime The specified memory block will not be accessed by GLFW
+ * after this function is called.
+ *
+ * @reentrancy This function should not call any GLFW function.
+ *
+ * @thread_safety This function may be called from any thread that calls GLFW functions.
+ *
+ * @sa @ref init_allocator
+ * @sa @ref GLFWallocator
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+typedef void (* GLFWdeallocatefun)(void* block, void* user);
+
+/*! @brief The function pointer type for error callbacks.
+ *
+ * This is the function pointer type for error callbacks. An error callback
+ * function has the following signature:
+ * @code
+ * void callback_name(int error_code, const char* description)
+ * @endcode
+ *
+ * @param[in] error_code An [error code](@ref errors). Future releases may add
+ * more error codes.
+ * @param[in] description A UTF-8 encoded string describing the error.
+ *
+ * @pointer_lifetime The error description string is valid until the callback
+ * function returns.
+ *
+ * @sa @ref error_handling
+ * @sa @ref glfwSetErrorCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup init
+ */
+typedef void (* GLFWerrorfun)(int error_code, const char* description);
+
+/*! @brief The function pointer type for window position callbacks.
+ *
+ * This is the function pointer type for window position callbacks. A window
+ * position callback function has the following signature:
+ * @code
+ * void callback_name(GLFWwindow* window, int xpos, int ypos)
+ * @endcode
+ *
+ * @param[in] window The window that was moved.
+ * @param[in] xpos The new x-coordinate, in screen coordinates, of the
+ * upper-left corner of the content area of the window.
+ * @param[in] ypos The new y-coordinate, in screen coordinates, of the
+ * upper-left corner of the content area of the window.
+ *
+ * @sa @ref window_pos
+ * @sa @ref glfwSetWindowPosCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowposfun)(GLFWwindow* window, int xpos, int ypos);
+
+/*! @brief The function pointer type for window size callbacks.
+ *
+ * This is the function pointer type for window size callbacks. A window size
+ * callback function has the following signature:
+ * @code
+ * void callback_name(GLFWwindow* window, int width, int height)
+ * @endcode
+ *
+ * @param[in] window The window that was resized.
+ * @param[in] width The new width, in screen coordinates, of the window.
+ * @param[in] height The new height, in screen coordinates, of the window.
+ *
+ * @sa @ref window_size
+ * @sa @ref glfwSetWindowSizeCallback
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowsizefun)(GLFWwindow* window, int width, int height);
+
+/*! @brief The function pointer type for window close callbacks.
+ *
+ * This is the function pointer type for window close callbacks. A window
+ * close callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window)
+ * @endcode
+ *
+ * @param[in] window The window that the user attempted to close.
+ *
+ * @sa @ref window_close
+ * @sa @ref glfwSetWindowCloseCallback
+ *
+ * @since Added in version 2.5.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowclosefun)(GLFWwindow* window);
+
+/*! @brief The function pointer type for window content refresh callbacks.
+ *
+ * This is the function pointer type for window content refresh callbacks.
+ * A window content refresh callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window);
+ * @endcode
+ *
+ * @param[in] window The window whose content needs to be refreshed.
+ *
+ * @sa @ref window_refresh
+ * @sa @ref glfwSetWindowRefreshCallback
+ *
+ * @since Added in version 2.5.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowrefreshfun)(GLFWwindow* window);
+
+/*! @brief The function pointer type for window focus callbacks.
+ *
+ * This is the function pointer type for window focus callbacks. A window
+ * focus callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int focused)
+ * @endcode
+ *
+ * @param[in] window The window that gained or lost input focus.
+ * @param[in] focused `GLFW_TRUE` if the window was given input focus, or
+ * `GLFW_FALSE` if it lost it.
+ *
+ * @sa @ref window_focus
+ * @sa @ref glfwSetWindowFocusCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowfocusfun)(GLFWwindow* window, int focused);
+
+/*! @brief The function pointer type for window iconify callbacks.
+ *
+ * This is the function pointer type for window iconify callbacks. A window
+ * iconify callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int iconified)
+ * @endcode
+ *
+ * @param[in] window The window that was iconified or restored.
+ * @param[in] iconified `GLFW_TRUE` if the window was iconified, or
+ * `GLFW_FALSE` if it was restored.
+ *
+ * @sa @ref window_iconify
+ * @sa @ref glfwSetWindowIconifyCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowiconifyfun)(GLFWwindow* window, int iconified);
+
+/*! @brief The function pointer type for window maximize callbacks.
+ *
+ * This is the function pointer type for window maximize callbacks. A window
+ * maximize callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int maximized)
+ * @endcode
+ *
+ * @param[in] window The window that was maximized or restored.
+ * @param[in] maximized `GLFW_TRUE` if the window was maximized, or
+ * `GLFW_FALSE` if it was restored.
+ *
+ * @sa @ref window_maximize
+ * @sa glfwSetWindowMaximizeCallback
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowmaximizefun)(GLFWwindow* window, int maximized);
+
+/*! @brief The function pointer type for framebuffer size callbacks.
+ *
+ * This is the function pointer type for framebuffer size callbacks.
+ * A framebuffer size callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int width, int height)
+ * @endcode
+ *
+ * @param[in] window The window whose framebuffer was resized.
+ * @param[in] width The new width, in pixels, of the framebuffer.
+ * @param[in] height The new height, in pixels, of the framebuffer.
+ *
+ * @sa @ref window_fbsize
+ * @sa @ref glfwSetFramebufferSizeCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWframebuffersizefun)(GLFWwindow* window, int width, int height);
+
+/*! @brief The function pointer type for window content scale callbacks.
+ *
+ * This is the function pointer type for window content scale callbacks.
+ * A window content scale callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, float xscale, float yscale)
+ * @endcode
+ *
+ * @param[in] window The window whose content scale changed.
+ * @param[in] xscale The new x-axis content scale of the window.
+ * @param[in] yscale The new y-axis content scale of the window.
+ *
+ * @sa @ref window_scale
+ * @sa @ref glfwSetWindowContentScaleCallback
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+typedef void (* GLFWwindowcontentscalefun)(GLFWwindow* window, float xscale, float yscale);
+
+/*! @brief The function pointer type for mouse button callbacks.
+ *
+ * This is the function pointer type for mouse button callback functions.
+ * A mouse button callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int button, int action, int mods)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] button The [mouse button](@ref buttons) that was pressed or
+ * released.
+ * @param[in] action One of `GLFW_PRESS` or `GLFW_RELEASE`. Future releases
+ * may add more actions.
+ * @param[in] mods Bit field describing which [modifier keys](@ref mods) were
+ * held down.
+ *
+ * @sa @ref input_mouse_button
+ * @sa @ref glfwSetMouseButtonCallback
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle and modifier mask parameters.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWmousebuttonfun)(GLFWwindow* window, int button, int action, int mods);
+
+/*! @brief The function pointer type for cursor position callbacks.
+ *
+ * This is the function pointer type for cursor position callbacks. A cursor
+ * position callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, double xpos, double ypos);
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] xpos The new cursor x-coordinate, relative to the left edge of
+ * the content area.
+ * @param[in] ypos The new cursor y-coordinate, relative to the top edge of the
+ * content area.
+ *
+ * @sa @ref cursor_pos
+ * @sa @ref glfwSetCursorPosCallback
+ *
+ * @since Added in version 3.0. Replaces `GLFWmouseposfun`.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWcursorposfun)(GLFWwindow* window, double xpos, double ypos);
+
+/*! @brief The function pointer type for cursor enter/leave callbacks.
+ *
+ * This is the function pointer type for cursor enter/leave callbacks.
+ * A cursor enter/leave callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int entered)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] entered `GLFW_TRUE` if the cursor entered the window's content
+ * area, or `GLFW_FALSE` if it left it.
+ *
+ * @sa @ref cursor_enter
+ * @sa @ref glfwSetCursorEnterCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWcursorenterfun)(GLFWwindow* window, int entered);
+
+/*! @brief The function pointer type for scroll callbacks.
+ *
+ * This is the function pointer type for scroll callbacks. A scroll callback
+ * function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, double xoffset, double yoffset)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] xoffset The scroll offset along the x-axis.
+ * @param[in] yoffset The scroll offset along the y-axis.
+ *
+ * @sa @ref scrolling
+ * @sa @ref glfwSetScrollCallback
+ *
+ * @since Added in version 3.0. Replaces `GLFWmousewheelfun`.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWscrollfun)(GLFWwindow* window, double xoffset, double yoffset);
+
+/*! @brief The function pointer type for keyboard key callbacks.
+ *
+ * This is the function pointer type for keyboard key callbacks. A keyboard
+ * key callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] key The [keyboard key](@ref keys) that was pressed or released.
+ * @param[in] scancode The platform-specific scancode of the key.
+ * @param[in] action `GLFW_PRESS`, `GLFW_RELEASE` or `GLFW_REPEAT`. Future
+ * releases may add more actions.
+ * @param[in] mods Bit field describing which [modifier keys](@ref mods) were
+ * held down.
+ *
+ * @sa @ref input_key
+ * @sa @ref glfwSetKeyCallback
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle, scancode and modifier mask parameters.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWkeyfun)(GLFWwindow* window, int key, int scancode, int action, int mods);
+
+/*! @brief The function pointer type for Unicode character callbacks.
+ *
+ * This is the function pointer type for Unicode character callbacks.
+ * A Unicode character callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, unsigned int codepoint)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] codepoint The Unicode code point of the character.
+ *
+ * @sa @ref input_char
+ * @sa @ref glfwSetCharCallback
+ *
+ * @since Added in version 2.4.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWcharfun)(GLFWwindow* window, unsigned int codepoint);
+
+/*! @brief The function pointer type for Unicode character with modifiers
+ * callbacks.
+ *
+ * This is the function pointer type for Unicode character with modifiers
+ * callbacks. It is called for each input character, regardless of what
+ * modifier keys are held down. A Unicode character with modifiers callback
+ * function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, unsigned int codepoint, int mods)
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] codepoint The Unicode code point of the character.
+ * @param[in] mods Bit field describing which [modifier keys](@ref mods) were
+ * held down.
+ *
+ * @sa @ref input_char
+ * @sa @ref glfwSetCharModsCallback
+ *
+ * @deprecated Scheduled for removal in version 4.0.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWcharmodsfun)(GLFWwindow* window, unsigned int codepoint, int mods);
+
+/*! @brief The function pointer type for path drop callbacks.
+ *
+ * This is the function pointer type for path drop callbacks. A path drop
+ * callback function has the following signature:
+ * @code
+ * void function_name(GLFWwindow* window, int path_count, const char* paths[])
+ * @endcode
+ *
+ * @param[in] window The window that received the event.
+ * @param[in] path_count The number of dropped paths.
+ * @param[in] paths The UTF-8 encoded file and/or directory path names.
+ *
+ * @pointer_lifetime The path array and its strings are valid until the
+ * callback function returns.
+ *
+ * @sa @ref path_drop
+ * @sa @ref glfwSetDropCallback
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWdropfun)(GLFWwindow* window, int path_count, const char* paths[]);
+
+/*! @brief The function pointer type for monitor configuration callbacks.
+ *
+ * This is the function pointer type for monitor configuration callbacks.
+ * A monitor callback function has the following signature:
+ * @code
+ * void function_name(GLFWmonitor* monitor, int event)
+ * @endcode
+ *
+ * @param[in] monitor The monitor that was connected or disconnected.
+ * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future
+ * releases may add more events.
+ *
+ * @sa @ref monitor_event
+ * @sa @ref glfwSetMonitorCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+typedef void (* GLFWmonitorfun)(GLFWmonitor* monitor, int event);
+
+/*! @brief The function pointer type for joystick configuration callbacks.
+ *
+ * This is the function pointer type for joystick configuration callbacks.
+ * A joystick configuration callback function has the following signature:
+ * @code
+ * void function_name(int jid, int event)
+ * @endcode
+ *
+ * @param[in] jid The joystick that was connected or disconnected.
+ * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future
+ * releases may add more events.
+ *
+ * @sa @ref joystick_event
+ * @sa @ref glfwSetJoystickCallback
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup input
+ */
+typedef void (* GLFWjoystickfun)(int jid, int event);
+
+/*! @brief Video mode type.
+ *
+ * This describes a single video mode.
+ *
+ * @sa @ref monitor_modes
+ * @sa @ref glfwGetVideoMode
+ * @sa @ref glfwGetVideoModes
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added refresh rate member.
+ *
+ * @ingroup monitor
+ */
+typedef struct GLFWvidmode
+{
+ /*! The width, in screen coordinates, of the video mode.
+ */
+ int width;
+ /*! The height, in screen coordinates, of the video mode.
+ */
+ int height;
+ /*! The bit depth of the red channel of the video mode.
+ */
+ int redBits;
+ /*! The bit depth of the green channel of the video mode.
+ */
+ int greenBits;
+ /*! The bit depth of the blue channel of the video mode.
+ */
+ int blueBits;
+ /*! The refresh rate, in Hz, of the video mode.
+ */
+ int refreshRate;
+} GLFWvidmode;
+
+/*! @brief Gamma ramp.
+ *
+ * This describes the gamma ramp for a monitor.
+ *
+ * @sa @ref monitor_gamma
+ * @sa @ref glfwGetGammaRamp
+ * @sa @ref glfwSetGammaRamp
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+typedef struct GLFWgammaramp
+{
+ /*! An array of value describing the response of the red channel.
+ */
+ unsigned short* red;
+ /*! An array of value describing the response of the green channel.
+ */
+ unsigned short* green;
+ /*! An array of value describing the response of the blue channel.
+ */
+ unsigned short* blue;
+ /*! The number of elements in each array.
+ */
+ unsigned int size;
+} GLFWgammaramp;
+
+/*! @brief Image data.
+ *
+ * This describes a single 2D image. See the documentation for each related
+ * function what the expected pixel format is.
+ *
+ * @sa @ref cursor_custom
+ * @sa @ref window_icon
+ *
+ * @since Added in version 2.1.
+ * @glfw3 Removed format and bytes-per-pixel members.
+ *
+ * @ingroup window
+ */
+typedef struct GLFWimage
+{
+ /*! The width, in pixels, of this image.
+ */
+ int width;
+ /*! The height, in pixels, of this image.
+ */
+ int height;
+ /*! The pixel data of this image, arranged left-to-right, top-to-bottom.
+ */
+ unsigned char* pixels;
+} GLFWimage;
+
+/*! @brief Gamepad input state
+ *
+ * This describes the input state of a gamepad.
+ *
+ * @sa @ref gamepad
+ * @sa @ref glfwGetGamepadState
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+typedef struct GLFWgamepadstate
+{
+ /*! The states of each [gamepad button](@ref gamepad_buttons), `GLFW_PRESS`
+ * or `GLFW_RELEASE`.
+ */
+ unsigned char buttons[15];
+ /*! The states of each [gamepad axis](@ref gamepad_axes), in the range -1.0
+ * to 1.0 inclusive.
+ */
+ float axes[6];
+} GLFWgamepadstate;
+
+/*! @brief
+ *
+ * @sa @ref init_allocator
+ * @sa @ref glfwInitAllocator
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+typedef struct GLFWallocator
+{
+ GLFWallocatefun allocate;
+ GLFWreallocatefun reallocate;
+ GLFWdeallocatefun deallocate;
+ void* user;
+} GLFWallocator;
+
+
+/*************************************************************************
+ * GLFW API functions
+ *************************************************************************/
+
+/*! @brief Initializes the GLFW library.
+ *
+ * This function initializes the GLFW library. Before most GLFW functions can
+ * be used, GLFW must be initialized, and before an application terminates GLFW
+ * should be terminated in order to free any resources allocated during or
+ * after initialization.
+ *
+ * If this function fails, it calls @ref glfwTerminate before returning. If it
+ * succeeds, you should call @ref glfwTerminate before the application exits.
+ *
+ * Additional calls to this function after successful initialization but before
+ * termination will return `GLFW_TRUE` immediately.
+ *
+ * The @ref GLFW_PLATFORM init hint controls which platforms are considered during
+ * initialization. This also depends on which platforms the library was compiled to
+ * support.
+ *
+ * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_PLATFORM_UNAVAILABLE and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @macos This function will change the current directory of the
+ * application to the `Contents/Resources` subdirectory of the application's
+ * bundle, if present. This can be disabled with the @ref
+ * GLFW_COCOA_CHDIR_RESOURCES init hint.
+ *
+ * @remark @macos This function will create the main menu and dock icon for the
+ * application. If GLFW finds a `MainMenu.nib` it is loaded and assumed to
+ * contain a menu bar. Otherwise a minimal menu bar is created manually with
+ * common commands like Hide, Quit and About. The About entry opens a minimal
+ * about dialog with information from the application's bundle. The menu bar
+ * and dock icon can be disabled entirely with the @ref GLFW_COCOA_MENUBAR init
+ * hint.
+ *
+ * @remark @x11 This function will set the `LC_CTYPE` category of the
+ * application locale according to the current environment if that category is
+ * still "C". This is because the "C" locale breaks Unicode text input.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref intro_init
+ * @sa @ref glfwInitHint
+ * @sa @ref glfwInitAllocator
+ * @sa @ref glfwTerminate
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup init
+ */
+GLFWAPI int glfwInit(void);
+
+/*! @brief Terminates the GLFW library.
+ *
+ * This function destroys all remaining windows and cursors, restores any
+ * modified gamma ramps and frees any other allocated resources. Once this
+ * function is called, you must again call @ref glfwInit successfully before
+ * you will be able to use most GLFW functions.
+ *
+ * If GLFW has been successfully initialized, this function should be called
+ * before the application exits. If initialization fails, there is no need to
+ * call this function, as it is called by @ref glfwInit before it returns
+ * failure.
+ *
+ * This function has no effect if GLFW is not initialized.
+ *
+ * @errors Possible errors include @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @warning The contexts of any remaining windows must not be current on any
+ * other thread when this function is called.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref intro_init
+ * @sa @ref glfwInit
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup init
+ */
+GLFWAPI void glfwTerminate(void);
+
+/*! @brief Sets the specified init hint to the desired value.
+ *
+ * This function sets hints for the next initialization of GLFW.
+ *
+ * The values you set hints to are never reset by GLFW, but they only take
+ * effect during initialization. Once GLFW has been initialized, any values
+ * you set will be ignored until the library is terminated and initialized
+ * again.
+ *
+ * Some hints are platform specific. These may be set on any platform but they
+ * will only affect their specific platform. Other platforms will ignore them.
+ * Setting these hints requires no platform specific headers or functions.
+ *
+ * @param[in] hint The [init hint](@ref init_hints) to set.
+ * @param[in] value The new value of the init hint.
+ *
+ * @errors Possible errors include @ref GLFW_INVALID_ENUM and @ref
+ * GLFW_INVALID_VALUE.
+ *
+ * @remarks This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa init_hints
+ * @sa glfwInit
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup init
+ */
+GLFWAPI void glfwInitHint(int hint, int value);
+
+/*! @brief Sets the init allocator to the desired value.
+ *
+ * To use the default allocator, call this function with a `NULL` argument.
+ *
+ * If you specify an allocator struct, every member must be a valid function
+ * pointer. If any member is `NULL`, this function emits @ref
+ * GLFW_INVALID_VALUE and the init allocator is unchanged.
+ *
+ * @param[in] allocator The allocator to use at the next initialization, or
+ * `NULL` to use the default one.
+ *
+ * @errors Possible errors include @ref GLFW_INVALID_VALUE.
+ *
+ * @pointer_lifetime The specified allocator is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref init_allocator
+ * @sa @ref glfwInit
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+GLFWAPI void glfwInitAllocator(const GLFWallocator* allocator);
+
+#if defined(VK_VERSION_1_0)
+
+/*! @brief Sets the desired Vulkan `vkGetInstanceProcAddr` function.
+ *
+ * This function sets the `vkGetInstanceProcAddr` function that GLFW will use for all
+ * Vulkan related entry point queries.
+ *
+ * This feature is mostly useful on macOS, if your copy of the Vulkan loader is in
+ * a location where GLFW cannot find it through dynamic loading, or if you are still
+ * using the static library version of the loader.
+ *
+ * If set to `NULL`, GLFW will try to load the Vulkan loader dynamically by its standard
+ * name and get this function from there. This is the default behavior.
+ *
+ * The standard name of the loader is `vulkan-1.dll` on Windows, `libvulkan.so.1` on
+ * Linux and other Unix-like systems and `libvulkan.1.dylib` on macOS. If your code is
+ * also loading it via these names then you probably don't need to use this function.
+ *
+ * The function address you set is never reset by GLFW, but it only takes effect during
+ * initialization. Once GLFW has been initialized, any updates will be ignored until the
+ * library is terminated and initialized again.
+ *
+ * @param[in] loader The address of the function to use, or `NULL`.
+ *
+ * @par Loader function signature
+ * @code
+ * PFN_vkVoidFunction vkGetInstanceProcAddr(VkInstance instance, const char* name)
+ * @endcode
+ * For more information about this function, see the
+ * [Vulkan Registry](https://www.khronos.org/registry/vulkan/).
+ *
+ * @errors None.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref vulkan_loader
+ * @sa @ref glfwInit
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+GLFWAPI void glfwInitVulkanLoader(PFN_vkGetInstanceProcAddr loader);
+
+#endif /*VK_VERSION_1_0*/
+
+/*! @brief Retrieves the version of the GLFW library.
+ *
+ * This function retrieves the major, minor and revision numbers of the GLFW
+ * library. It is intended for when you are using GLFW as a shared library and
+ * want to ensure that you are using the minimum required version.
+ *
+ * Any or all of the version arguments may be `NULL`.
+ *
+ * @param[out] major Where to store the major version number, or `NULL`.
+ * @param[out] minor Where to store the minor version number, or `NULL`.
+ * @param[out] rev Where to store the revision number, or `NULL`.
+ *
+ * @errors None.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref intro_version
+ * @sa @ref glfwGetVersionString
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup init
+ */
+GLFWAPI void glfwGetVersion(int* major, int* minor, int* rev);
+
+/*! @brief Returns a string describing the compile-time configuration.
+ *
+ * This function returns the compile-time generated
+ * [version string](@ref intro_version_string) of the GLFW library binary. It describes
+ * the version, platforms, compiler and any platform or operating system specific
+ * compile-time options. It should not be confused with the OpenGL or OpenGL ES version
+ * string, queried with `glGetString`.
+ *
+ * __Do not use the version string__ to parse the GLFW library version. The
+ * @ref glfwGetVersion function provides the version of the running library
+ * binary in numerical format.
+ *
+ * __Do not use the version string__ to parse what platforms are supported. The @ref
+ * glfwPlatformSupported function lets you query platform support.
+ *
+ * @return The ASCII encoded GLFW version string.
+ *
+ * @errors None.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @pointer_lifetime The returned string is static and compile-time generated.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref intro_version
+ * @sa @ref glfwGetVersion
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup init
+ */
+GLFWAPI const char* glfwGetVersionString(void);
+
+/*! @brief Returns and clears the last error for the calling thread.
+ *
+ * This function returns and clears the [error code](@ref errors) of the last
+ * error that occurred on the calling thread, and optionally a UTF-8 encoded
+ * human-readable description of it. If no error has occurred since the last
+ * call, it returns @ref GLFW_NO_ERROR (zero) and the description pointer is
+ * set to `NULL`.
+ *
+ * @param[in] description Where to store the error description pointer, or `NULL`.
+ * @return The last error code for the calling thread, or @ref GLFW_NO_ERROR
+ * (zero).
+ *
+ * @errors None.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is guaranteed to be valid only until the
+ * next error occurs or the library is terminated.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref error_handling
+ * @sa @ref glfwSetErrorCallback
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup init
+ */
+GLFWAPI int glfwGetError(const char** description);
+
+/*! @brief Sets the error callback.
+ *
+ * This function sets the error callback, which is called with an error code
+ * and a human-readable description each time a GLFW error occurs.
+ *
+ * The error code is set before the callback is called. Calling @ref
+ * glfwGetError from the error callback will return the same value as the error
+ * code argument.
+ *
+ * The error callback is called on the thread where the error occurred. If you
+ * are using GLFW from multiple threads, your error callback needs to be
+ * written accordingly.
+ *
+ * Because the description string may have been generated specifically for that
+ * error, it is not guaranteed to be valid after the callback has returned. If
+ * you wish to use it after the callback returns, you need to make a copy.
+ *
+ * Once set, the error callback remains set even after the library has been
+ * terminated.
+ *
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set.
+ *
+ * @callback_signature
+ * @code
+ * void callback_name(int error_code, const char* description)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [callback pointer type](@ref GLFWerrorfun).
+ *
+ * @errors None.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref error_handling
+ * @sa @ref glfwGetError
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup init
+ */
+GLFWAPI GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun callback);
+
+/*! @brief Returns the currently selected platform.
+ *
+ * This function returns the platform that was selected during initialization. The
+ * returned value will be one of `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`,
+ * `GLFW_PLATFORM_WAYLAND`, `GLFW_PLATFORM_X11` or `GLFW_PLATFORM_NULL`.
+ *
+ * @return The currently selected platform, or zero if an error occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref platform
+ * @sa @ref glfwPlatformSupported
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+GLFWAPI int glfwGetPlatform(void);
+
+/*! @brief Returns whether the library includes support for the specified platform.
+ *
+ * This function returns whether the library was compiled with support for the specified
+ * platform. The platform must be one of `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`,
+ * `GLFW_PLATFORM_WAYLAND`, `GLFW_PLATFORM_X11` or `GLFW_PLATFORM_NULL`.
+ *
+ * @param[in] platform The platform to query.
+ * @return `GLFW_TRUE` if the platform is supported, or `GLFW_FALSE` otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_INVALID_ENUM.
+ *
+ * @remark This function may be called before @ref glfwInit.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref platform
+ * @sa @ref glfwGetPlatform
+ *
+ * @since Added in version 3.4.
+ *
+ * @ingroup init
+ */
+GLFWAPI int glfwPlatformSupported(int platform);
+
+/*! @brief Returns the currently connected monitors.
+ *
+ * This function returns an array of handles for all currently connected
+ * monitors. The primary monitor is always first in the returned array. If no
+ * monitors were found, this function returns `NULL`.
+ *
+ * @param[out] count Where to store the number of monitors in the returned
+ * array. This is set to zero if an error occurred.
+ * @return An array of monitor handles, or `NULL` if no monitors were found or
+ * if an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is guaranteed to be valid only until the
+ * monitor configuration changes or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_monitors
+ * @sa @ref monitor_event
+ * @sa @ref glfwGetPrimaryMonitor
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI GLFWmonitor** glfwGetMonitors(int* count);
+
+/*! @brief Returns the primary monitor.
+ *
+ * This function returns the primary monitor. This is usually the monitor
+ * where elements like the task bar or global menu bar are located.
+ *
+ * @return The primary monitor, or `NULL` if no monitors were found or if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @remark The primary monitor is always first in the array returned by @ref
+ * glfwGetMonitors.
+ *
+ * @sa @ref monitor_monitors
+ * @sa @ref glfwGetMonitors
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI GLFWmonitor* glfwGetPrimaryMonitor(void);
+
+/*! @brief Returns the position of the monitor's viewport on the virtual screen.
+ *
+ * This function returns the position, in screen coordinates, of the upper-left
+ * corner of the specified monitor.
+ *
+ * Any or all of the position arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` position arguments will be set to zero.
+ *
+ * @param[in] monitor The monitor to query.
+ * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`.
+ * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_properties
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos);
+
+/*! @brief Retrieves the work area of the monitor.
+ *
+ * This function returns the position, in screen coordinates, of the upper-left
+ * corner of the work area of the specified monitor along with the work area
+ * size in screen coordinates. The work area is defined as the area of the
+ * monitor not occluded by the window system task bar where present. If no
+ * task bar exists then the work area is the monitor resolution in screen
+ * coordinates.
+ *
+ * Any or all of the position and size arguments may be `NULL`. If an error
+ * occurs, all non-`NULL` position and size arguments will be set to zero.
+ *
+ * @param[in] monitor The monitor to query.
+ * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`.
+ * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`.
+ * @param[out] width Where to store the monitor width, or `NULL`.
+ * @param[out] height Where to store the monitor height, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_workarea
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwGetMonitorWorkarea(GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+
+/*! @brief Returns the physical size of the monitor.
+ *
+ * This function returns the size, in millimetres, of the display area of the
+ * specified monitor.
+ *
+ * Some platforms do not provide accurate monitor size information, either
+ * because the monitor
+ * [EDID](https://en.wikipedia.org/wiki/Extended_display_identification_data)
+ * data is incorrect or because the driver does not report it accurately.
+ *
+ * Any or all of the size arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` size arguments will be set to zero.
+ *
+ * @param[in] monitor The monitor to query.
+ * @param[out] widthMM Where to store the width, in millimetres, of the
+ * monitor's display area, or `NULL`.
+ * @param[out] heightMM Where to store the height, in millimetres, of the
+ * monitor's display area, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark @win32 On Windows 8 and earlier the physical size is calculated from
+ * the current resolution and system DPI instead of querying the monitor EDID data.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_properties
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* widthMM, int* heightMM);
+
+/*! @brief Retrieves the content scale for the specified monitor.
+ *
+ * This function retrieves the content scale for the specified monitor. The
+ * content scale is the ratio between the current DPI and the platform's
+ * default DPI. This is especially important for text and any UI elements. If
+ * the pixel dimensions of your UI scaled by this look appropriate on your
+ * machine then it should appear at a reasonable size on other machines
+ * regardless of their DPI and scaling settings. This relies on the system DPI
+ * and scaling settings being somewhat correct.
+ *
+ * The content scale may depend on both the monitor resolution and pixel
+ * density and on user settings. It may be very different from the raw DPI
+ * calculated from the physical size and current resolution.
+ *
+ * @param[in] monitor The monitor to query.
+ * @param[out] xscale Where to store the x-axis content scale, or `NULL`.
+ * @param[out] yscale Where to store the y-axis content scale, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_scale
+ * @sa @ref glfwGetWindowContentScale
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwGetMonitorContentScale(GLFWmonitor* monitor, float* xscale, float* yscale);
+
+/*! @brief Returns the name of the specified monitor.
+ *
+ * This function returns a human-readable name, encoded as UTF-8, of the
+ * specified monitor. The name typically reflects the make and model of the
+ * monitor and is not guaranteed to be unique among the connected monitors.
+ *
+ * @param[in] monitor The monitor to query.
+ * @return The UTF-8 encoded name of the monitor, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified monitor is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_properties
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI const char* glfwGetMonitorName(GLFWmonitor* monitor);
+
+/*! @brief Sets the user pointer of the specified monitor.
+ *
+ * This function sets the user-defined pointer of the specified monitor. The
+ * current value is retained until the monitor is disconnected. The initial
+ * value is `NULL`.
+ *
+ * This function may be called from the monitor callback, even for a monitor
+ * that is being disconnected.
+ *
+ * @param[in] monitor The monitor whose pointer to set.
+ * @param[in] pointer The new value.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref monitor_userptr
+ * @sa @ref glfwGetMonitorUserPointer
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwSetMonitorUserPointer(GLFWmonitor* monitor, void* pointer);
+
+/*! @brief Returns the user pointer of the specified monitor.
+ *
+ * This function returns the current value of the user-defined pointer of the
+ * specified monitor. The initial value is `NULL`.
+ *
+ * This function may be called from the monitor callback, even for a monitor
+ * that is being disconnected.
+ *
+ * @param[in] monitor The monitor whose pointer to return.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref monitor_userptr
+ * @sa @ref glfwSetMonitorUserPointer
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void* glfwGetMonitorUserPointer(GLFWmonitor* monitor);
+
+/*! @brief Sets the monitor configuration callback.
+ *
+ * This function sets the monitor configuration callback, or removes the
+ * currently set callback. This is called when a monitor is connected to or
+ * disconnected from the system.
+ *
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWmonitor* monitor, int event)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWmonitorfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_event
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun callback);
+
+/*! @brief Returns the available video modes for the specified monitor.
+ *
+ * This function returns an array of all video modes supported by the specified
+ * monitor. The returned array is sorted in ascending order, first by color
+ * bit depth (the sum of all channel depths), then by resolution area (the
+ * product of width and height), then resolution width and finally by refresh
+ * rate.
+ *
+ * @param[in] monitor The monitor to query.
+ * @param[out] count Where to store the number of video modes in the returned
+ * array. This is set to zero if an error occurred.
+ * @return An array of video modes, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified monitor is
+ * disconnected, this function is called again for that monitor or the library
+ * is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_modes
+ * @sa @ref glfwGetVideoMode
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Changed to return an array of modes for a specific monitor.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count);
+
+/*! @brief Returns the current mode of the specified monitor.
+ *
+ * This function returns the current video mode of the specified monitor. If
+ * you have created a full screen window for that monitor, the return value
+ * will depend on whether that window is iconified.
+ *
+ * @param[in] monitor The monitor to query.
+ * @return The current mode of the monitor, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified monitor is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_modes
+ * @sa @ref glfwGetVideoModes
+ *
+ * @since Added in version 3.0. Replaces `glfwGetDesktopMode`.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor);
+
+/*! @brief Generates a gamma ramp and sets it for the specified monitor.
+ *
+ * This function generates an appropriately sized gamma ramp from the specified
+ * exponent and then calls @ref glfwSetGammaRamp with it. The value must be
+ * a finite number greater than zero.
+ *
+ * The software controlled gamma ramp is applied _in addition_ to the hardware
+ * gamma correction, which today is usually an approximation of sRGB gamma.
+ * This means that setting a perfectly linear ramp, or gamma 1.0, will produce
+ * the default (usually sRGB-like) behavior.
+ *
+ * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref
+ * GLFW_SRGB_CAPABLE hint.
+ *
+ * @param[in] monitor The monitor whose gamma ramp to set.
+ * @param[in] gamma The desired exponent.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland Gamma handling is a privileged protocol, this function
+ * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_gamma
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwSetGamma(GLFWmonitor* monitor, float gamma);
+
+/*! @brief Returns the current gamma ramp for the specified monitor.
+ *
+ * This function returns the current gamma ramp of the specified monitor.
+ *
+ * @param[in] monitor The monitor to query.
+ * @return The current gamma ramp, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland Gamma handling is a privileged protocol, this function
+ * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR while
+ * returning `NULL`.
+ *
+ * @pointer_lifetime The returned structure and its arrays are allocated and
+ * freed by GLFW. You should not free them yourself. They are valid until the
+ * specified monitor is disconnected, this function is called again for that
+ * monitor or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_gamma
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor);
+
+/*! @brief Sets the current gamma ramp for the specified monitor.
+ *
+ * This function sets the current gamma ramp for the specified monitor. The
+ * original gamma ramp for that monitor is saved by GLFW the first time this
+ * function is called and is restored by @ref glfwTerminate.
+ *
+ * The software controlled gamma ramp is applied _in addition_ to the hardware
+ * gamma correction, which today is usually an approximation of sRGB gamma.
+ * This means that setting a perfectly linear ramp, or gamma 1.0, will produce
+ * the default (usually sRGB-like) behavior.
+ *
+ * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref
+ * GLFW_SRGB_CAPABLE hint.
+ *
+ * @param[in] monitor The monitor whose gamma ramp to set.
+ * @param[in] ramp The gamma ramp to use.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark The size of the specified gamma ramp should match the size of the
+ * current ramp for that monitor.
+ *
+ * @remark @win32 The gamma ramp size must be 256.
+ *
+ * @remark @wayland Gamma handling is a privileged protocol, this function
+ * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The specified gamma ramp is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref monitor_gamma
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup monitor
+ */
+GLFWAPI void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+/*! @brief Resets all window hints to their default values.
+ *
+ * This function resets all window hints to their
+ * [default values](@ref window_hints_values).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_hints
+ * @sa @ref glfwWindowHint
+ * @sa @ref glfwWindowHintString
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwDefaultWindowHints(void);
+
+/*! @brief Sets the specified window hint to the desired value.
+ *
+ * This function sets hints for the next call to @ref glfwCreateWindow. The
+ * hints, once set, retain their values until changed by a call to this
+ * function or @ref glfwDefaultWindowHints, or until the library is terminated.
+ *
+ * Only integer value hints can be set with this function. String value hints
+ * are set with @ref glfwWindowHintString.
+ *
+ * This function does not check whether the specified hint values are valid.
+ * If you set hints to invalid values this will instead be reported by the next
+ * call to @ref glfwCreateWindow.
+ *
+ * Some hints are platform specific. These may be set on any platform but they
+ * will only affect their specific platform. Other platforms will ignore them.
+ * Setting these hints requires no platform specific headers or functions.
+ *
+ * @param[in] hint The [window hint](@ref window_hints) to set.
+ * @param[in] value The new value of the window hint.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_hints
+ * @sa @ref glfwWindowHintString
+ * @sa @ref glfwDefaultWindowHints
+ *
+ * @since Added in version 3.0. Replaces `glfwOpenWindowHint`.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwWindowHint(int hint, int value);
+
+/*! @brief Sets the specified window hint to the desired value.
+ *
+ * This function sets hints for the next call to @ref glfwCreateWindow. The
+ * hints, once set, retain their values until changed by a call to this
+ * function or @ref glfwDefaultWindowHints, or until the library is terminated.
+ *
+ * Only string type hints can be set with this function. Integer value hints
+ * are set with @ref glfwWindowHint.
+ *
+ * This function does not check whether the specified hint values are valid.
+ * If you set hints to invalid values this will instead be reported by the next
+ * call to @ref glfwCreateWindow.
+ *
+ * Some hints are platform specific. These may be set on any platform but they
+ * will only affect their specific platform. Other platforms will ignore them.
+ * Setting these hints requires no platform specific headers or functions.
+ *
+ * @param[in] hint The [window hint](@ref window_hints) to set.
+ * @param[in] value The new value of the window hint.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @pointer_lifetime The specified string is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_hints
+ * @sa @ref glfwWindowHint
+ * @sa @ref glfwDefaultWindowHints
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwWindowHintString(int hint, const char* value);
+
+/*! @brief Creates a window and its associated context.
+ *
+ * This function creates a window and its associated OpenGL or OpenGL ES
+ * context. Most of the options controlling how the window and its context
+ * should be created are specified with [window hints](@ref window_hints).
+ *
+ * Successful creation does not change which context is current. Before you
+ * can use the newly created context, you need to
+ * [make it current](@ref context_current). For information about the `share`
+ * parameter, see @ref context_sharing.
+ *
+ * The created window, framebuffer and context may differ from what you
+ * requested, as not all parameters and hints are
+ * [hard constraints](@ref window_hints_hard). This includes the size of the
+ * window, especially for full screen windows. To query the actual attributes
+ * of the created window, framebuffer and context, see @ref
+ * glfwGetWindowAttrib, @ref glfwGetWindowSize and @ref glfwGetFramebufferSize.
+ *
+ * To create a full screen window, you need to specify the monitor the window
+ * will cover. If no monitor is specified, the window will be windowed mode.
+ * Unless you have a way for the user to choose a specific monitor, it is
+ * recommended that you pick the primary monitor. For more information on how
+ * to query connected monitors, see @ref monitor_monitors.
+ *
+ * For full screen windows, the specified size becomes the resolution of the
+ * window's _desired video mode_. As long as a full screen window is not
+ * iconified, the supported video mode most closely matching the desired video
+ * mode is set for the specified monitor. For more information about full
+ * screen windows, including the creation of so called _windowed full screen_
+ * or _borderless full screen_ windows, see @ref window_windowed_full_screen.
+ *
+ * Once you have created the window, you can switch it between windowed and
+ * full screen mode with @ref glfwSetWindowMonitor. This will not affect its
+ * OpenGL or OpenGL ES context.
+ *
+ * By default, newly created windows use the placement recommended by the
+ * window system. To create the window at a specific position, make it
+ * initially invisible using the [GLFW_VISIBLE](@ref GLFW_VISIBLE_hint) window
+ * hint, set its [position](@ref window_pos) and then [show](@ref window_hide)
+ * it.
+ *
+ * As long as at least one full screen window is not iconified, the screensaver
+ * is prohibited from starting.
+ *
+ * Window systems put limits on window sizes. Very large or very small window
+ * dimensions may be overridden by the window system on creation. Check the
+ * actual [size](@ref window_size) after creation.
+ *
+ * The [swap interval](@ref buffer_swap) is not set during window creation and
+ * the initial value may vary depending on driver settings and defaults.
+ *
+ * @param[in] width The desired width, in screen coordinates, of the window.
+ * This must be greater than zero.
+ * @param[in] height The desired height, in screen coordinates, of the window.
+ * This must be greater than zero.
+ * @param[in] title The initial, UTF-8 encoded window title.
+ * @param[in] monitor The monitor to use for full screen mode, or `NULL` for
+ * windowed mode.
+ * @param[in] share The window whose context to share resources with, or `NULL`
+ * to not share resources.
+ * @return The handle of the created window, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE, @ref GLFW_API_UNAVAILABLE, @ref
+ * GLFW_VERSION_UNAVAILABLE, @ref GLFW_FORMAT_UNAVAILABLE and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @win32 Window creation will fail if the Microsoft GDI software
+ * OpenGL implementation is the only one available.
+ *
+ * @remark @win32 If the executable has an icon resource named `GLFW_ICON,` it
+ * will be set as the initial icon for the window. If no such icon is present,
+ * the `IDI_APPLICATION` icon will be used instead. To set a different icon,
+ * see @ref glfwSetWindowIcon.
+ *
+ * @remark @win32 The context to share resources with must not be current on
+ * any other thread.
+ *
+ * @remark @macos The OS only supports core profile contexts for OpenGL
+ * versions 3.2 and later. Before creating an OpenGL context of version 3.2 or
+ * later you must set the [GLFW_OPENGL_PROFILE](@ref GLFW_OPENGL_PROFILE_hint)
+ * hint accordingly. OpenGL 3.0 and 3.1 contexts are not supported at all
+ * on macOS.
+ *
+ * @remark @macos The GLFW window has no icon, as it is not a document
+ * window, but the dock icon will be the same as the application bundle's icon.
+ * For more information on bundles, see the
+ * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/)
+ * in the Mac Developer Library.
+ *
+ * @remark @macos On OS X 10.10 and later the window frame will not be rendered
+ * at full resolution on Retina displays unless the
+ * [GLFW_COCOA_RETINA_FRAMEBUFFER](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint)
+ * hint is `GLFW_TRUE` and the `NSHighResolutionCapable` key is enabled in the
+ * application bundle's `Info.plist`. For more information, see
+ * [High Resolution Guidelines for OS X](https://developer.apple.com/library/mac/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html)
+ * in the Mac Developer Library. The GLFW test and example programs use
+ * a custom `Info.plist` template for this, which can be found as
+ * `CMake/Info.plist.in` in the source tree.
+ *
+ * @remark @macos When activating frame autosaving with
+ * [GLFW_COCOA_FRAME_NAME](@ref GLFW_COCOA_FRAME_NAME_hint), the specified
+ * window size and position may be overridden by previously saved values.
+ *
+ * @remark @x11 Some window managers will not respect the placement of
+ * initially hidden windows.
+ *
+ * @remark @x11 Due to the asynchronous nature of X11, it may take a moment for
+ * a window to reach its requested state. This means you may not be able to
+ * query the final size, position or other attributes directly after window
+ * creation.
+ *
+ * @remark @x11 The class part of the `WM_CLASS` window property will by
+ * default be set to the window title passed to this function. The instance
+ * part will use the contents of the `RESOURCE_NAME` environment variable, if
+ * present and not empty, or fall back to the window title. Set the
+ * [GLFW_X11_CLASS_NAME](@ref GLFW_X11_CLASS_NAME_hint) and
+ * [GLFW_X11_INSTANCE_NAME](@ref GLFW_X11_INSTANCE_NAME_hint) window hints to
+ * override this.
+ *
+ * @remark @wayland Compositors should implement the xdg-decoration protocol
+ * for GLFW to decorate the window properly. If this protocol isn't
+ * supported, or if the compositor prefers client-side decorations, a very
+ * simple fallback frame will be drawn using the wp_viewporter protocol. A
+ * compositor can still emit close, maximize or fullscreen events, using for
+ * instance a keybind mechanism. If neither of these protocols is supported,
+ * the window won't be decorated.
+ *
+ * @remark @wayland A full screen window will not attempt to change the mode,
+ * no matter what the requested size or refresh rate.
+ *
+ * @remark @wayland Screensaver inhibition requires the idle-inhibit protocol
+ * to be implemented in the user's compositor.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_creation
+ * @sa @ref glfwDestroyWindow
+ *
+ * @since Added in version 3.0. Replaces `glfwOpenWindow`.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share);
+
+/*! @brief Destroys the specified window and its context.
+ *
+ * This function destroys the specified window and its context. On calling
+ * this function, no further callbacks will be called for that window.
+ *
+ * If the context of the specified window is current on the main thread, it is
+ * detached before being destroyed.
+ *
+ * @param[in] window The window to destroy.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @note The context of the specified window must not be current on any other
+ * thread when this function is called.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_creation
+ * @sa @ref glfwCreateWindow
+ *
+ * @since Added in version 3.0. Replaces `glfwCloseWindow`.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwDestroyWindow(GLFWwindow* window);
+
+/*! @brief Checks the close flag of the specified window.
+ *
+ * This function returns the value of the close flag of the specified window.
+ *
+ * @param[in] window The window to query.
+ * @return The value of the close flag.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref window_close
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI int glfwWindowShouldClose(GLFWwindow* window);
+
+/*! @brief Sets the close flag of the specified window.
+ *
+ * This function sets the value of the close flag of the specified window.
+ * This can be used to override the user's attempt to close the window, or
+ * to signal that it should be closed.
+ *
+ * @param[in] window The window whose flag to change.
+ * @param[in] value The new value.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref window_close
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowShouldClose(GLFWwindow* window, int value);
+
+/*! @brief Sets the title of the specified window.
+ *
+ * This function sets the window title, encoded as UTF-8, of the specified
+ * window.
+ *
+ * @param[in] window The window whose title to change.
+ * @param[in] title The UTF-8 encoded window title.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @macos The window title will not be updated until the next time you
+ * process events.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_title
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowTitle(GLFWwindow* window, const char* title);
+
+/*! @brief Sets the icon for the specified window.
+ *
+ * This function sets the icon of the specified window. If passed an array of
+ * candidate images, those of or closest to the sizes desired by the system are
+ * selected. If no images are specified, the window reverts to its default
+ * icon.
+ *
+ * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight
+ * bits per channel with the red channel first. They are arranged canonically
+ * as packed sequential rows, starting from the top-left corner.
+ *
+ * The desired image sizes varies depending on platform and system settings.
+ * The selected images will be rescaled as needed. Good sizes include 16x16,
+ * 32x32 and 48x48.
+ *
+ * @param[in] window The window whose icon to set.
+ * @param[in] count The number of images in the specified array, or zero to
+ * revert to the default window icon.
+ * @param[in] images The images to create the icon from. This is ignored if
+ * count is zero.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE, @ref GLFW_PLATFORM_ERROR and @ref
+ * GLFW_FEATURE_UNAVAILABLE (see remarks).
+ *
+ * @pointer_lifetime The specified image data is copied before this function
+ * returns.
+ *
+ * @remark @macos Regular windows do not have icons on macOS. This function
+ * will emit @ref GLFW_FEATURE_UNAVAILABLE. The dock icon will be the same as
+ * the application bundle's icon. For more information on bundles, see the
+ * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/)
+ * in the Mac Developer Library.
+ *
+ * @remark @wayland There is no existing protocol to change an icon, the
+ * window will thus inherit the one defined in the application's desktop file.
+ * This function will emit @ref GLFW_FEATURE_UNAVAILABLE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_icon
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowIcon(GLFWwindow* window, int count, const GLFWimage* images);
+
+/*! @brief Retrieves the position of the content area of the specified window.
+ *
+ * This function retrieves the position, in screen coordinates, of the
+ * upper-left corner of the content area of the specified window.
+ *
+ * Any or all of the position arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` position arguments will be set to zero.
+ *
+ * @param[in] window The window to query.
+ * @param[out] xpos Where to store the x-coordinate of the upper-left corner of
+ * the content area, or `NULL`.
+ * @param[out] ypos Where to store the y-coordinate of the upper-left corner of
+ * the content area, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks).
+ *
+ * @remark @wayland There is no way for an application to retrieve the global
+ * position of its windows. This function will emit @ref
+ * GLFW_FEATURE_UNAVAILABLE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_pos
+ * @sa @ref glfwSetWindowPos
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos);
+
+/*! @brief Sets the position of the content area of the specified window.
+ *
+ * This function sets the position, in screen coordinates, of the upper-left
+ * corner of the content area of the specified windowed mode window. If the
+ * window is a full screen window, this function does nothing.
+ *
+ * __Do not use this function__ to move an already visible window unless you
+ * have very good reasons for doing so, as it will confuse and annoy the user.
+ *
+ * The window manager may put limits on what positions are allowed. GLFW
+ * cannot and should not override these limits.
+ *
+ * @param[in] window The window to query.
+ * @param[in] xpos The x-coordinate of the upper-left corner of the content area.
+ * @param[in] ypos The y-coordinate of the upper-left corner of the content area.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks).
+ *
+ * @remark @wayland There is no way for an application to set the global
+ * position of its windows. This function will emit @ref
+ * GLFW_FEATURE_UNAVAILABLE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_pos
+ * @sa @ref glfwGetWindowPos
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos);
+
+/*! @brief Retrieves the size of the content area of the specified window.
+ *
+ * This function retrieves the size, in screen coordinates, of the content area
+ * of the specified window. If you wish to retrieve the size of the
+ * framebuffer of the window in pixels, see @ref glfwGetFramebufferSize.
+ *
+ * Any or all of the size arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` size arguments will be set to zero.
+ *
+ * @param[in] window The window whose size to retrieve.
+ * @param[out] width Where to store the width, in screen coordinates, of the
+ * content area, or `NULL`.
+ * @param[out] height Where to store the height, in screen coordinates, of the
+ * content area, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_size
+ * @sa @ref glfwSetWindowSize
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwGetWindowSize(GLFWwindow* window, int* width, int* height);
+
+/*! @brief Sets the size limits of the specified window.
+ *
+ * This function sets the size limits of the content area of the specified
+ * window. If the window is full screen, the size limits only take effect
+ * once it is made windowed. If the window is not resizable, this function
+ * does nothing.
+ *
+ * The size limits are applied immediately to a windowed mode window and may
+ * cause it to be resized.
+ *
+ * The maximum dimensions must be greater than or equal to the minimum
+ * dimensions and all must be greater than or equal to zero.
+ *
+ * @param[in] window The window to set limits for.
+ * @param[in] minwidth The minimum width, in screen coordinates, of the content
+ * area, or `GLFW_DONT_CARE`.
+ * @param[in] minheight The minimum height, in screen coordinates, of the
+ * content area, or `GLFW_DONT_CARE`.
+ * @param[in] maxwidth The maximum width, in screen coordinates, of the content
+ * area, or `GLFW_DONT_CARE`.
+ * @param[in] maxheight The maximum height, in screen coordinates, of the
+ * content area, or `GLFW_DONT_CARE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark If you set size limits and an aspect ratio that conflict, the
+ * results are undefined.
+ *
+ * @remark @wayland The size limits will not be applied until the window is
+ * actually resized, either by the user or by the compositor.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_sizelimits
+ * @sa @ref glfwSetWindowAspectRatio
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowSizeLimits(GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+
+/*! @brief Sets the aspect ratio of the specified window.
+ *
+ * This function sets the required aspect ratio of the content area of the
+ * specified window. If the window is full screen, the aspect ratio only takes
+ * effect once it is made windowed. If the window is not resizable, this
+ * function does nothing.
+ *
+ * The aspect ratio is specified as a numerator and a denominator and both
+ * values must be greater than zero. For example, the common 16:9 aspect ratio
+ * is specified as 16 and 9, respectively.
+ *
+ * If the numerator and denominator is set to `GLFW_DONT_CARE` then the aspect
+ * ratio limit is disabled.
+ *
+ * The aspect ratio is applied immediately to a windowed mode window and may
+ * cause it to be resized.
+ *
+ * @param[in] window The window to set limits for.
+ * @param[in] numer The numerator of the desired aspect ratio, or
+ * `GLFW_DONT_CARE`.
+ * @param[in] denom The denominator of the desired aspect ratio, or
+ * `GLFW_DONT_CARE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark If you set size limits and an aspect ratio that conflict, the
+ * results are undefined.
+ *
+ * @remark @wayland The aspect ratio will not be applied until the window is
+ * actually resized, either by the user or by the compositor.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_sizelimits
+ * @sa @ref glfwSetWindowSizeLimits
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowAspectRatio(GLFWwindow* window, int numer, int denom);
+
+/*! @brief Sets the size of the content area of the specified window.
+ *
+ * This function sets the size, in screen coordinates, of the content area of
+ * the specified window.
+ *
+ * For full screen windows, this function updates the resolution of its desired
+ * video mode and switches to the video mode closest to it, without affecting
+ * the window's context. As the context is unaffected, the bit depths of the
+ * framebuffer remain unchanged.
+ *
+ * If you wish to update the refresh rate of the desired video mode in addition
+ * to its resolution, see @ref glfwSetWindowMonitor.
+ *
+ * The window manager may put limits on what sizes are allowed. GLFW cannot
+ * and should not override these limits.
+ *
+ * @param[in] window The window to resize.
+ * @param[in] width The desired width, in screen coordinates, of the window
+ * content area.
+ * @param[in] height The desired height, in screen coordinates, of the window
+ * content area.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland A full screen window will not attempt to change the mode,
+ * no matter what the requested size.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_size
+ * @sa @ref glfwGetWindowSize
+ * @sa @ref glfwSetWindowMonitor
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowSize(GLFWwindow* window, int width, int height);
+
+/*! @brief Retrieves the size of the framebuffer of the specified window.
+ *
+ * This function retrieves the size, in pixels, of the framebuffer of the
+ * specified window. If you wish to retrieve the size of the window in screen
+ * coordinates, see @ref glfwGetWindowSize.
+ *
+ * Any or all of the size arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` size arguments will be set to zero.
+ *
+ * @param[in] window The window whose framebuffer to query.
+ * @param[out] width Where to store the width, in pixels, of the framebuffer,
+ * or `NULL`.
+ * @param[out] height Where to store the height, in pixels, of the framebuffer,
+ * or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_fbsize
+ * @sa @ref glfwSetFramebufferSizeCallback
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height);
+
+/*! @brief Retrieves the size of the frame of the window.
+ *
+ * This function retrieves the size, in screen coordinates, of each edge of the
+ * frame of the specified window. This size includes the title bar, if the
+ * window has one. The size of the frame may vary depending on the
+ * [window-related hints](@ref window_hints_wnd) used to create it.
+ *
+ * Because this function retrieves the size of each window frame edge and not
+ * the offset along a particular coordinate axis, the retrieved values will
+ * always be zero or positive.
+ *
+ * Any or all of the size arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` size arguments will be set to zero.
+ *
+ * @param[in] window The window whose frame size to query.
+ * @param[out] left Where to store the size, in screen coordinates, of the left
+ * edge of the window frame, or `NULL`.
+ * @param[out] top Where to store the size, in screen coordinates, of the top
+ * edge of the window frame, or `NULL`.
+ * @param[out] right Where to store the size, in screen coordinates, of the
+ * right edge of the window frame, or `NULL`.
+ * @param[out] bottom Where to store the size, in screen coordinates, of the
+ * bottom edge of the window frame, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_size
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwGetWindowFrameSize(GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+
+/*! @brief Retrieves the content scale for the specified window.
+ *
+ * This function retrieves the content scale for the specified window. The
+ * content scale is the ratio between the current DPI and the platform's
+ * default DPI. This is especially important for text and any UI elements. If
+ * the pixel dimensions of your UI scaled by this look appropriate on your
+ * machine then it should appear at a reasonable size on other machines
+ * regardless of their DPI and scaling settings. This relies on the system DPI
+ * and scaling settings being somewhat correct.
+ *
+ * On platforms where each monitors can have its own content scale, the window
+ * content scale will depend on which monitor the system considers the window
+ * to be on.
+ *
+ * @param[in] window The window to query.
+ * @param[out] xscale Where to store the x-axis content scale, or `NULL`.
+ * @param[out] yscale Where to store the y-axis content scale, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_scale
+ * @sa @ref glfwSetWindowContentScaleCallback
+ * @sa @ref glfwGetMonitorContentScale
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwGetWindowContentScale(GLFWwindow* window, float* xscale, float* yscale);
+
+/*! @brief Returns the opacity of the whole window.
+ *
+ * This function returns the opacity of the window, including any decorations.
+ *
+ * The opacity (or alpha) value is a positive finite number between zero and
+ * one, where zero is fully transparent and one is fully opaque. If the system
+ * does not support whole window transparency, this function always returns one.
+ *
+ * The initial opacity value for newly created windows is one.
+ *
+ * @param[in] window The window to query.
+ * @return The opacity value of the specified window.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_transparency
+ * @sa @ref glfwSetWindowOpacity
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI float glfwGetWindowOpacity(GLFWwindow* window);
+
+/*! @brief Sets the opacity of the whole window.
+ *
+ * This function sets the opacity of the window, including any decorations.
+ *
+ * The opacity (or alpha) value is a positive finite number between zero and
+ * one, where zero is fully transparent and one is fully opaque.
+ *
+ * The initial opacity value for newly created windows is one.
+ *
+ * A window created with framebuffer transparency may not use whole window
+ * transparency. The results of doing this are undefined.
+ *
+ * @param[in] window The window to set the opacity for.
+ * @param[in] opacity The desired opacity of the specified window.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks).
+ *
+ * @remark @wayland There is no way to set an opacity factor for a window.
+ * This function will emit @ref GLFW_FEATURE_UNAVAILABLE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_transparency
+ * @sa @ref glfwGetWindowOpacity
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowOpacity(GLFWwindow* window, float opacity);
+
+/*! @brief Iconifies the specified window.
+ *
+ * This function iconifies (minimizes) the specified window if it was
+ * previously restored. If the window is already iconified, this function does
+ * nothing.
+ *
+ * If the specified window is a full screen window, the original monitor
+ * resolution is restored until the window is restored.
+ *
+ * @param[in] window The window to iconify.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland Once a window is iconified, @ref glfwRestoreWindow won’t
+ * be able to restore it. This is a design decision of the xdg-shell
+ * protocol.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_iconify
+ * @sa @ref glfwRestoreWindow
+ * @sa @ref glfwMaximizeWindow
+ *
+ * @since Added in version 2.1.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwIconifyWindow(GLFWwindow* window);
+
+/*! @brief Restores the specified window.
+ *
+ * This function restores the specified window if it was previously iconified
+ * (minimized) or maximized. If the window is already restored, this function
+ * does nothing.
+ *
+ * If the specified window is a full screen window, the resolution chosen for
+ * the window is restored on the selected monitor.
+ *
+ * @param[in] window The window to restore.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_iconify
+ * @sa @ref glfwIconifyWindow
+ * @sa @ref glfwMaximizeWindow
+ *
+ * @since Added in version 2.1.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwRestoreWindow(GLFWwindow* window);
+
+/*! @brief Maximizes the specified window.
+ *
+ * This function maximizes the specified window if it was previously not
+ * maximized. If the window is already maximized, this function does nothing.
+ *
+ * If the specified window is a full screen window, this function does nothing.
+ *
+ * @param[in] window The window to maximize.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @par Thread Safety
+ * This function may only be called from the main thread.
+ *
+ * @sa @ref window_iconify
+ * @sa @ref glfwIconifyWindow
+ * @sa @ref glfwRestoreWindow
+ *
+ * @since Added in GLFW 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwMaximizeWindow(GLFWwindow* window);
+
+/*! @brief Makes the specified window visible.
+ *
+ * This function makes the specified window visible if it was previously
+ * hidden. If the window is already visible or is in full screen mode, this
+ * function does nothing.
+ *
+ * By default, windowed mode windows are focused when shown
+ * Set the [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) window hint
+ * to change this behavior for all newly created windows, or change the
+ * behavior for an existing window with @ref glfwSetWindowAttrib.
+ *
+ * @param[in] window The window to make visible.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland Because Wayland wants every frame of the desktop to be
+ * complete, this function does not immediately make the window visible.
+ * Instead it will become visible the next time the window framebuffer is
+ * updated after this call.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_hide
+ * @sa @ref glfwHideWindow
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwShowWindow(GLFWwindow* window);
+
+/*! @brief Hides the specified window.
+ *
+ * This function hides the specified window if it was previously visible. If
+ * the window is already hidden or is in full screen mode, this function does
+ * nothing.
+ *
+ * @param[in] window The window to hide.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_hide
+ * @sa @ref glfwShowWindow
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwHideWindow(GLFWwindow* window);
+
+/*! @brief Brings the specified window to front and sets input focus.
+ *
+ * This function brings the specified window to front and sets input focus.
+ * The window should already be visible and not iconified.
+ *
+ * By default, both windowed and full screen mode windows are focused when
+ * initially created. Set the [GLFW_FOCUSED](@ref GLFW_FOCUSED_hint) to
+ * disable this behavior.
+ *
+ * Also by default, windowed mode windows are focused when shown
+ * with @ref glfwShowWindow. Set the
+ * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) to disable this behavior.
+ *
+ * __Do not use this function__ to steal focus from other applications unless
+ * you are certain that is what the user wants. Focus stealing can be
+ * extremely disruptive.
+ *
+ * For a less disruptive way of getting the user's attention, see
+ * [attention requests](@ref window_attention).
+ *
+ * @param[in] window The window to give input focus.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks).
+ *
+ * @remark @wayland It is not possible for an application to set the input
+ * focus. This function will emit @ref GLFW_FEATURE_UNAVAILABLE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_focus
+ * @sa @ref window_attention
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwFocusWindow(GLFWwindow* window);
+
+/*! @brief Requests user attention to the specified window.
+ *
+ * This function requests user attention to the specified window. On
+ * platforms where this is not supported, attention is requested to the
+ * application as a whole.
+ *
+ * Once the user has given attention, usually by focusing the window or
+ * application, the system will end the request automatically.
+ *
+ * @param[in] window The window to request attention to.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @macos Attention is requested to the application as a whole, not the
+ * specific window.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_attention
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwRequestWindowAttention(GLFWwindow* window);
+
+/*! @brief Returns the monitor that the window uses for full screen mode.
+ *
+ * This function returns the handle of the monitor that the specified window is
+ * in full screen on.
+ *
+ * @param[in] window The window to query.
+ * @return The monitor, or `NULL` if the window is in windowed mode or an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_monitor
+ * @sa @ref glfwSetWindowMonitor
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window);
+
+/*! @brief Sets the mode, monitor, video mode and placement of a window.
+ *
+ * This function sets the monitor that the window uses for full screen mode or,
+ * if the monitor is `NULL`, makes it windowed mode.
+ *
+ * When setting a monitor, this function updates the width, height and refresh
+ * rate of the desired video mode and switches to the video mode closest to it.
+ * The window position is ignored when setting a monitor.
+ *
+ * When the monitor is `NULL`, the position, width and height are used to
+ * place the window content area. The refresh rate is ignored when no monitor
+ * is specified.
+ *
+ * If you only wish to update the resolution of a full screen window or the
+ * size of a windowed mode window, see @ref glfwSetWindowSize.
+ *
+ * When a window transitions from full screen to windowed mode, this function
+ * restores any previous window settings such as whether it is decorated,
+ * floating, resizable, has size or aspect ratio limits, etc.
+ *
+ * @param[in] window The window whose monitor, size or video mode to set.
+ * @param[in] monitor The desired monitor, or `NULL` to set windowed mode.
+ * @param[in] xpos The desired x-coordinate of the upper-left corner of the
+ * content area.
+ * @param[in] ypos The desired y-coordinate of the upper-left corner of the
+ * content area.
+ * @param[in] width The desired with, in screen coordinates, of the content
+ * area or video mode.
+ * @param[in] height The desired height, in screen coordinates, of the content
+ * area or video mode.
+ * @param[in] refreshRate The desired refresh rate, in Hz, of the video mode,
+ * or `GLFW_DONT_CARE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark The OpenGL or OpenGL ES context will not be destroyed or otherwise
+ * affected by any resizing or mode switching, although you may need to update
+ * your viewport if the framebuffer size has changed.
+ *
+ * @remark @wayland The desired window position is ignored, as there is no way
+ * for an application to set this property.
+ *
+ * @remark @wayland Setting the window to full screen will not attempt to
+ * change the mode, no matter what the requested size or refresh rate.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_monitor
+ * @sa @ref window_full_screen
+ * @sa @ref glfwGetWindowMonitor
+ * @sa @ref glfwSetWindowSize
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowMonitor(GLFWwindow* window, GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+
+/*! @brief Returns an attribute of the specified window.
+ *
+ * This function returns the value of an attribute of the specified window or
+ * its OpenGL or OpenGL ES context.
+ *
+ * @param[in] window The window to query.
+ * @param[in] attrib The [window attribute](@ref window_attribs) whose value to
+ * return.
+ * @return The value of the attribute, or zero if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark Framebuffer related hints are not window attributes. See @ref
+ * window_attribs_fb for more information.
+ *
+ * @remark Zero is a valid value for many window and context related
+ * attributes so you cannot use a return value of zero as an indication of
+ * errors. However, this function should not fail as long as it is passed
+ * valid arguments and the library has been [initialized](@ref intro_init).
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_attribs
+ * @sa @ref glfwSetWindowAttrib
+ *
+ * @since Added in version 3.0. Replaces `glfwGetWindowParam` and
+ * `glfwGetGLVersion`.
+ *
+ * @ingroup window
+ */
+GLFWAPI int glfwGetWindowAttrib(GLFWwindow* window, int attrib);
+
+/*! @brief Sets an attribute of the specified window.
+ *
+ * This function sets the value of an attribute of the specified window.
+ *
+ * The supported attributes are [GLFW_DECORATED](@ref GLFW_DECORATED_attrib),
+ * [GLFW_RESIZABLE](@ref GLFW_RESIZABLE_attrib),
+ * [GLFW_FLOATING](@ref GLFW_FLOATING_attrib),
+ * [GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_attrib) and
+ * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_attrib).
+ * [GLFW_MOUSE_PASSTHROUGH](@ref GLFW_MOUSE_PASSTHROUGH_attrib)
+ *
+ * Some of these attributes are ignored for full screen windows. The new
+ * value will take effect if the window is later made windowed.
+ *
+ * Some of these attributes are ignored for windowed mode windows. The new
+ * value will take effect if the window is later made full screen.
+ *
+ * @param[in] window The window to set the attribute for.
+ * @param[in] attrib A supported window attribute.
+ * @param[in] value `GLFW_TRUE` or `GLFW_FALSE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark Calling @ref glfwGetWindowAttrib will always return the latest
+ * value, even if that value is ignored by the current mode of the window.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_attribs
+ * @sa @ref glfwGetWindowAttrib
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowAttrib(GLFWwindow* window, int attrib, int value);
+
+/*! @brief Sets the user pointer of the specified window.
+ *
+ * This function sets the user-defined pointer of the specified window. The
+ * current value is retained until the window is destroyed. The initial value
+ * is `NULL`.
+ *
+ * @param[in] window The window whose pointer to set.
+ * @param[in] pointer The new value.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref window_userptr
+ * @sa @ref glfwGetWindowUserPointer
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
+
+/*! @brief Returns the user pointer of the specified window.
+ *
+ * This function returns the current value of the user-defined pointer of the
+ * specified window. The initial value is `NULL`.
+ *
+ * @param[in] window The window whose pointer to return.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref window_userptr
+ * @sa @ref glfwSetWindowUserPointer
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void* glfwGetWindowUserPointer(GLFWwindow* window);
+
+/*! @brief Sets the position callback for the specified window.
+ *
+ * This function sets the position callback of the specified window, which is
+ * called when the window is moved. The callback is provided with the
+ * position, in screen coordinates, of the upper-left corner of the content
+ * area of the window.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int xpos, int ypos)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowposfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark @wayland This callback will never be called, as there is no way for
+ * an application to know its global position.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_pos
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun callback);
+
+/*! @brief Sets the size callback for the specified window.
+ *
+ * This function sets the size callback of the specified window, which is
+ * called when the window is resized. The callback is provided with the size,
+ * in screen coordinates, of the content area of the window.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int width, int height)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowsizefun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_size
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun callback);
+
+/*! @brief Sets the close callback for the specified window.
+ *
+ * This function sets the close callback of the specified window, which is
+ * called when the user attempts to close the window, for example by clicking
+ * the close widget in the title bar.
+ *
+ * The close flag is set before this callback is called, but you can modify it
+ * at any time with @ref glfwSetWindowShouldClose.
+ *
+ * The close callback is not triggered by @ref glfwDestroyWindow.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowclosefun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark @macos Selecting Quit from the application menu will trigger the
+ * close callback for all windows.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_close
+ *
+ * @since Added in version 2.5.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun callback);
+
+/*! @brief Sets the refresh callback for the specified window.
+ *
+ * This function sets the refresh callback of the specified window, which is
+ * called when the content area of the window needs to be redrawn, for example
+ * if the window has been exposed after having been covered by another window.
+ *
+ * On compositing window systems such as Aero, Compiz, Aqua or Wayland, where
+ * the window contents are saved off-screen, this callback may be called only
+ * very infrequently or never at all.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window);
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowrefreshfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_refresh
+ *
+ * @since Added in version 2.5.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun callback);
+
+/*! @brief Sets the focus callback for the specified window.
+ *
+ * This function sets the focus callback of the specified window, which is
+ * called when the window gains or loses input focus.
+ *
+ * After the focus callback is called for a window that lost input focus,
+ * synthetic key and mouse button release events will be generated for all such
+ * that had been pressed. For more information, see @ref glfwSetKeyCallback
+ * and @ref glfwSetMouseButtonCallback.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int focused)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowfocusfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_focus
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun callback);
+
+/*! @brief Sets the iconify callback for the specified window.
+ *
+ * This function sets the iconification callback of the specified window, which
+ * is called when the window is iconified or restored.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int iconified)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowiconifyfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_iconify
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun callback);
+
+/*! @brief Sets the maximize callback for the specified window.
+ *
+ * This function sets the maximization callback of the specified window, which
+ * is called when the window is maximized or restored.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int maximized)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowmaximizefun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_maximize
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowmaximizefun glfwSetWindowMaximizeCallback(GLFWwindow* window, GLFWwindowmaximizefun callback);
+
+/*! @brief Sets the framebuffer resize callback for the specified window.
+ *
+ * This function sets the framebuffer resize callback of the specified window,
+ * which is called when the framebuffer of the specified window is resized.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int width, int height)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWframebuffersizefun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_fbsize
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun callback);
+
+/*! @brief Sets the window content scale callback for the specified window.
+ *
+ * This function sets the window content scale callback of the specified window,
+ * which is called when the content scale of the specified window changes.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, float xscale, float yscale)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWwindowcontentscalefun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref window_scale
+ * @sa @ref glfwGetWindowContentScale
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup window
+ */
+GLFWAPI GLFWwindowcontentscalefun glfwSetWindowContentScaleCallback(GLFWwindow* window, GLFWwindowcontentscalefun callback);
+
+/*! @brief Processes all pending events.
+ *
+ * This function processes only those events that are already in the event
+ * queue and then returns immediately. Processing events will cause the window
+ * and input callbacks associated with those events to be called.
+ *
+ * On some platforms, a window move, resize or menu operation will cause event
+ * processing to block. This is due to how event processing is designed on
+ * those platforms. You can use the
+ * [window refresh callback](@ref window_refresh) to redraw the contents of
+ * your window when necessary during such operations.
+ *
+ * Do not assume that callbacks you set will _only_ be called in response to
+ * event processing functions like this one. While it is necessary to poll for
+ * events, window systems that require GLFW to register callbacks of its own
+ * can pass events to GLFW in response to many window system function calls.
+ * GLFW will pass those events on to the application callbacks before
+ * returning.
+ *
+ * Event processing is not required for joystick input to work.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref events
+ * @sa @ref glfwWaitEvents
+ * @sa @ref glfwWaitEventsTimeout
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwPollEvents(void);
+
+/*! @brief Waits until events are queued and processes them.
+ *
+ * This function puts the calling thread to sleep until at least one event is
+ * available in the event queue. Once one or more events are available,
+ * it behaves exactly like @ref glfwPollEvents, i.e. the events in the queue
+ * are processed and the function then returns immediately. Processing events
+ * will cause the window and input callbacks associated with those events to be
+ * called.
+ *
+ * Since not all events are associated with callbacks, this function may return
+ * without a callback having been called even if you are monitoring all
+ * callbacks.
+ *
+ * On some platforms, a window move, resize or menu operation will cause event
+ * processing to block. This is due to how event processing is designed on
+ * those platforms. You can use the
+ * [window refresh callback](@ref window_refresh) to redraw the contents of
+ * your window when necessary during such operations.
+ *
+ * Do not assume that callbacks you set will _only_ be called in response to
+ * event processing functions like this one. While it is necessary to poll for
+ * events, window systems that require GLFW to register callbacks of its own
+ * can pass events to GLFW in response to many window system function calls.
+ * GLFW will pass those events on to the application callbacks before
+ * returning.
+ *
+ * Event processing is not required for joystick input to work.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref events
+ * @sa @ref glfwPollEvents
+ * @sa @ref glfwWaitEventsTimeout
+ *
+ * @since Added in version 2.5.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwWaitEvents(void);
+
+/*! @brief Waits with timeout until events are queued and processes them.
+ *
+ * This function puts the calling thread to sleep until at least one event is
+ * available in the event queue, or until the specified timeout is reached. If
+ * one or more events are available, it behaves exactly like @ref
+ * glfwPollEvents, i.e. the events in the queue are processed and the function
+ * then returns immediately. Processing events will cause the window and input
+ * callbacks associated with those events to be called.
+ *
+ * The timeout value must be a positive finite number.
+ *
+ * Since not all events are associated with callbacks, this function may return
+ * without a callback having been called even if you are monitoring all
+ * callbacks.
+ *
+ * On some platforms, a window move, resize or menu operation will cause event
+ * processing to block. This is due to how event processing is designed on
+ * those platforms. You can use the
+ * [window refresh callback](@ref window_refresh) to redraw the contents of
+ * your window when necessary during such operations.
+ *
+ * Do not assume that callbacks you set will _only_ be called in response to
+ * event processing functions like this one. While it is necessary to poll for
+ * events, window systems that require GLFW to register callbacks of its own
+ * can pass events to GLFW in response to many window system function calls.
+ * GLFW will pass those events on to the application callbacks before
+ * returning.
+ *
+ * Event processing is not required for joystick input to work.
+ *
+ * @param[in] timeout The maximum amount of time, in seconds, to wait.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref events
+ * @sa @ref glfwPollEvents
+ * @sa @ref glfwWaitEvents
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwWaitEventsTimeout(double timeout);
+
+/*! @brief Posts an empty event to the event queue.
+ *
+ * This function posts an empty event from the current thread to the event
+ * queue, causing @ref glfwWaitEvents or @ref glfwWaitEventsTimeout to return.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref events
+ * @sa @ref glfwWaitEvents
+ * @sa @ref glfwWaitEventsTimeout
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwPostEmptyEvent(void);
+
+/*! @brief Returns the value of an input option for the specified window.
+ *
+ * This function returns the value of an input option for the specified window.
+ * The mode must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS,
+ * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or
+ * @ref GLFW_RAW_MOUSE_MOTION.
+ *
+ * @param[in] window The window to query.
+ * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`,
+ * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or
+ * `GLFW_RAW_MOUSE_MOTION`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref glfwSetInputMode
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwGetInputMode(GLFWwindow* window, int mode);
+
+/*! @brief Sets an input option for the specified window.
+ *
+ * This function sets an input mode option for the specified window. The mode
+ * must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS,
+ * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or
+ * @ref GLFW_RAW_MOUSE_MOTION.
+ *
+ * If the mode is `GLFW_CURSOR`, the value must be one of the following cursor
+ * modes:
+ * - `GLFW_CURSOR_NORMAL` makes the cursor visible and behaving normally.
+ * - `GLFW_CURSOR_HIDDEN` makes the cursor invisible when it is over the
+ * content area of the window but does not restrict the cursor from leaving.
+ * - `GLFW_CURSOR_DISABLED` hides and grabs the cursor, providing virtual
+ * and unlimited cursor movement. This is useful for implementing for
+ * example 3D camera controls.
+ *
+ * If the mode is `GLFW_STICKY_KEYS`, the value must be either `GLFW_TRUE` to
+ * enable sticky keys, or `GLFW_FALSE` to disable it. If sticky keys are
+ * enabled, a key press will ensure that @ref glfwGetKey returns `GLFW_PRESS`
+ * the next time it is called even if the key had been released before the
+ * call. This is useful when you are only interested in whether keys have been
+ * pressed but not when or in which order.
+ *
+ * If the mode is `GLFW_STICKY_MOUSE_BUTTONS`, the value must be either
+ * `GLFW_TRUE` to enable sticky mouse buttons, or `GLFW_FALSE` to disable it.
+ * If sticky mouse buttons are enabled, a mouse button press will ensure that
+ * @ref glfwGetMouseButton returns `GLFW_PRESS` the next time it is called even
+ * if the mouse button had been released before the call. This is useful when
+ * you are only interested in whether mouse buttons have been pressed but not
+ * when or in which order.
+ *
+ * If the mode is `GLFW_LOCK_KEY_MODS`, the value must be either `GLFW_TRUE` to
+ * enable lock key modifier bits, or `GLFW_FALSE` to disable them. If enabled,
+ * callbacks that receive modifier bits will also have the @ref
+ * GLFW_MOD_CAPS_LOCK bit set when the event was generated with Caps Lock on,
+ * and the @ref GLFW_MOD_NUM_LOCK bit when Num Lock was on.
+ *
+ * If the mode is `GLFW_RAW_MOUSE_MOTION`, the value must be either `GLFW_TRUE`
+ * to enable raw (unscaled and unaccelerated) mouse motion when the cursor is
+ * disabled, or `GLFW_FALSE` to disable it. If raw motion is not supported,
+ * attempting to set this will emit @ref GLFW_FEATURE_UNAVAILABLE. Call @ref
+ * glfwRawMouseMotionSupported to check for support.
+ *
+ * @param[in] window The window whose input mode to set.
+ * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`,
+ * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or
+ * `GLFW_RAW_MOUSE_MOTION`.
+ * @param[in] value The new value of the specified input mode.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM, @ref GLFW_PLATFORM_ERROR and @ref
+ * GLFW_FEATURE_UNAVAILABLE (see above).
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref glfwGetInputMode
+ *
+ * @since Added in version 3.0. Replaces `glfwEnable` and `glfwDisable`.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetInputMode(GLFWwindow* window, int mode, int value);
+
+/*! @brief Returns whether raw mouse motion is supported.
+ *
+ * This function returns whether raw mouse motion is supported on the current
+ * system. This status does not change after GLFW has been initialized so you
+ * only need to check this once. If you attempt to enable raw motion on
+ * a system that does not support it, @ref GLFW_PLATFORM_ERROR will be emitted.
+ *
+ * Raw mouse motion is closer to the actual motion of the mouse across
+ * a surface. It is not affected by the scaling and acceleration applied to
+ * the motion of the desktop cursor. That processing is suitable for a cursor
+ * while raw motion is better for controlling for example a 3D camera. Because
+ * of this, raw mouse motion is only provided when the cursor is disabled.
+ *
+ * @return `GLFW_TRUE` if raw mouse motion is supported on the current machine,
+ * or `GLFW_FALSE` otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref raw_mouse_motion
+ * @sa @ref glfwSetInputMode
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwRawMouseMotionSupported(void);
+
+/*! @brief Returns the layout-specific name of the specified printable key.
+ *
+ * This function returns the name of the specified printable key, encoded as
+ * UTF-8. This is typically the character that key would produce without any
+ * modifier keys, intended for displaying key bindings to the user. For dead
+ * keys, it is typically the diacritic it would add to a character.
+ *
+ * __Do not use this function__ for [text input](@ref input_char). You will
+ * break text input for many languages even if it happens to work for yours.
+ *
+ * If the key is `GLFW_KEY_UNKNOWN`, the scancode is used to identify the key,
+ * otherwise the scancode is ignored. If you specify a non-printable key, or
+ * `GLFW_KEY_UNKNOWN` and a scancode that maps to a non-printable key, this
+ * function returns `NULL` but does not emit an error.
+ *
+ * This behavior allows you to always pass in the arguments in the
+ * [key callback](@ref input_key) without modification.
+ *
+ * The printable keys are:
+ * - `GLFW_KEY_APOSTROPHE`
+ * - `GLFW_KEY_COMMA`
+ * - `GLFW_KEY_MINUS`
+ * - `GLFW_KEY_PERIOD`
+ * - `GLFW_KEY_SLASH`
+ * - `GLFW_KEY_SEMICOLON`
+ * - `GLFW_KEY_EQUAL`
+ * - `GLFW_KEY_LEFT_BRACKET`
+ * - `GLFW_KEY_RIGHT_BRACKET`
+ * - `GLFW_KEY_BACKSLASH`
+ * - `GLFW_KEY_WORLD_1`
+ * - `GLFW_KEY_WORLD_2`
+ * - `GLFW_KEY_0` to `GLFW_KEY_9`
+ * - `GLFW_KEY_A` to `GLFW_KEY_Z`
+ * - `GLFW_KEY_KP_0` to `GLFW_KEY_KP_9`
+ * - `GLFW_KEY_KP_DECIMAL`
+ * - `GLFW_KEY_KP_DIVIDE`
+ * - `GLFW_KEY_KP_MULTIPLY`
+ * - `GLFW_KEY_KP_SUBTRACT`
+ * - `GLFW_KEY_KP_ADD`
+ * - `GLFW_KEY_KP_EQUAL`
+ *
+ * Names for printable keys depend on keyboard layout, while names for
+ * non-printable keys are the same across layouts but depend on the application
+ * language and should be localized along with other user interface text.
+ *
+ * @param[in] key The key to query, or `GLFW_KEY_UNKNOWN`.
+ * @param[in] scancode The scancode of the key to query.
+ * @return The UTF-8 encoded, layout-specific name of the key, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark The contents of the returned string may change when a keyboard
+ * layout change event is received.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_key_name
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup input
+ */
+GLFWAPI const char* glfwGetKeyName(int key, int scancode);
+
+/*! @brief Returns the platform-specific scancode of the specified key.
+ *
+ * This function returns the platform-specific scancode of the specified key.
+ *
+ * If the key is `GLFW_KEY_UNKNOWN` or does not exist on the keyboard this
+ * method will return `-1`.
+ *
+ * @param[in] key Any [named key](@ref keys).
+ * @return The platform-specific scancode for the key, or `-1` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref input_key
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwGetKeyScancode(int key);
+
+/*! @brief Returns the last reported state of a keyboard key for the specified
+ * window.
+ *
+ * This function returns the last state reported for the specified key to the
+ * specified window. The returned state is one of `GLFW_PRESS` or
+ * `GLFW_RELEASE`. The higher-level action `GLFW_REPEAT` is only reported to
+ * the key callback.
+ *
+ * If the @ref GLFW_STICKY_KEYS input mode is enabled, this function returns
+ * `GLFW_PRESS` the first time you call it for a key that was pressed, even if
+ * that key has already been released.
+ *
+ * The key functions deal with physical keys, with [key tokens](@ref keys)
+ * named after their use on the standard US keyboard layout. If you want to
+ * input text, use the Unicode character callback instead.
+ *
+ * The [modifier key bit masks](@ref mods) are not key tokens and cannot be
+ * used with this function.
+ *
+ * __Do not use this function__ to implement [text input](@ref input_char).
+ *
+ * @param[in] window The desired window.
+ * @param[in] key The desired [keyboard key](@ref keys). `GLFW_KEY_UNKNOWN` is
+ * not a valid key for this function.
+ * @return One of `GLFW_PRESS` or `GLFW_RELEASE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_key
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwGetKey(GLFWwindow* window, int key);
+
+/*! @brief Returns the last reported state of a mouse button for the specified
+ * window.
+ *
+ * This function returns the last state reported for the specified mouse button
+ * to the specified window. The returned state is one of `GLFW_PRESS` or
+ * `GLFW_RELEASE`.
+ *
+ * If the @ref GLFW_STICKY_MOUSE_BUTTONS input mode is enabled, this function
+ * returns `GLFW_PRESS` the first time you call it for a mouse button that was
+ * pressed, even if that mouse button has already been released.
+ *
+ * @param[in] window The desired window.
+ * @param[in] button The desired [mouse button](@ref buttons).
+ * @return One of `GLFW_PRESS` or `GLFW_RELEASE`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_mouse_button
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwGetMouseButton(GLFWwindow* window, int button);
+
+/*! @brief Retrieves the position of the cursor relative to the content area of
+ * the window.
+ *
+ * This function returns the position of the cursor, in screen coordinates,
+ * relative to the upper-left corner of the content area of the specified
+ * window.
+ *
+ * If the cursor is disabled (with `GLFW_CURSOR_DISABLED`) then the cursor
+ * position is unbounded and limited only by the minimum and maximum values of
+ * a `double`.
+ *
+ * The coordinate can be converted to their integer equivalents with the
+ * `floor` function. Casting directly to an integer type works for positive
+ * coordinates, but fails for negative ones.
+ *
+ * Any or all of the position arguments may be `NULL`. If an error occurs, all
+ * non-`NULL` position arguments will be set to zero.
+ *
+ * @param[in] window The desired window.
+ * @param[out] xpos Where to store the cursor x-coordinate, relative to the
+ * left edge of the content area, or `NULL`.
+ * @param[out] ypos Where to store the cursor y-coordinate, relative to the to
+ * top edge of the content area, or `NULL`.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_pos
+ * @sa @ref glfwSetCursorPos
+ *
+ * @since Added in version 3.0. Replaces `glfwGetMousePos`.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos);
+
+/*! @brief Sets the position of the cursor, relative to the content area of the
+ * window.
+ *
+ * This function sets the position, in screen coordinates, of the cursor
+ * relative to the upper-left corner of the content area of the specified
+ * window. The window must have input focus. If the window does not have
+ * input focus when this function is called, it fails silently.
+ *
+ * __Do not use this function__ to implement things like camera controls. GLFW
+ * already provides the `GLFW_CURSOR_DISABLED` cursor mode that hides the
+ * cursor, transparently re-centers it and provides unconstrained cursor
+ * motion. See @ref glfwSetInputMode for more information.
+ *
+ * If the cursor mode is `GLFW_CURSOR_DISABLED` then the cursor position is
+ * unconstrained and limited only by the minimum and maximum values of
+ * a `double`.
+ *
+ * @param[in] window The desired window.
+ * @param[in] xpos The desired x-coordinate, relative to the left edge of the
+ * content area.
+ * @param[in] ypos The desired y-coordinate, relative to the top edge of the
+ * content area.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @remark @wayland This function will only work when the cursor mode is
+ * `GLFW_CURSOR_DISABLED`, otherwise it will do nothing.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_pos
+ * @sa @ref glfwGetCursorPos
+ *
+ * @since Added in version 3.0. Replaces `glfwSetMousePos`.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos);
+
+/*! @brief Creates a custom cursor.
+ *
+ * Creates a new custom cursor image that can be set for a window with @ref
+ * glfwSetCursor. The cursor can be destroyed with @ref glfwDestroyCursor.
+ * Any remaining cursors are destroyed by @ref glfwTerminate.
+ *
+ * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight
+ * bits per channel with the red channel first. They are arranged canonically
+ * as packed sequential rows, starting from the top-left corner.
+ *
+ * The cursor hotspot is specified in pixels, relative to the upper-left corner
+ * of the cursor image. Like all other coordinate systems in GLFW, the X-axis
+ * points to the right and the Y-axis points down.
+ *
+ * @param[in] image The desired cursor image.
+ * @param[in] xhot The desired x-coordinate, in pixels, of the cursor hotspot.
+ * @param[in] yhot The desired y-coordinate, in pixels, of the cursor hotspot.
+ * @return The handle of the created cursor, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The specified image data is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_object
+ * @sa @ref glfwDestroyCursor
+ * @sa @ref glfwCreateStandardCursor
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot);
+
+/*! @brief Creates a cursor with a standard shape.
+ *
+ * Returns a cursor with a standard shape, that can be set for a window with
+ * @ref glfwSetCursor. The images for these cursors come from the system
+ * cursor theme and their exact appearance will vary between platforms.
+ *
+ * Most of these shapes are guaranteed to exist on every supported platform but
+ * a few may not be present. See the table below for details.
+ *
+ * Cursor shape | Windows | macOS | X11 | Wayland
+ * ------------------------------ | ------- | ----- | ------ | -------
+ * @ref GLFW_ARROW_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_IBEAM_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_CROSSHAIR_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_POINTING_HAND_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_RESIZE_EW_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_RESIZE_NS_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_RESIZE_NWSE_CURSOR | Yes | Yes<sup>1</sup> | Maybe<sup>2</sup> | Maybe<sup>2</sup>
+ * @ref GLFW_RESIZE_NESW_CURSOR | Yes | Yes<sup>1</sup> | Maybe<sup>2</sup> | Maybe<sup>2</sup>
+ * @ref GLFW_RESIZE_ALL_CURSOR | Yes | Yes | Yes | Yes
+ * @ref GLFW_NOT_ALLOWED_CURSOR | Yes | Yes | Maybe<sup>2</sup> | Maybe<sup>2</sup>
+ *
+ * 1) This uses a private system API and may fail in the future.
+ *
+ * 2) This uses a newer standard that not all cursor themes support.
+ *
+ * If the requested shape is not available, this function emits a @ref
+ * GLFW_CURSOR_UNAVAILABLE error and returns `NULL`.
+ *
+ * @param[in] shape One of the [standard shapes](@ref shapes).
+ * @return A new cursor ready to use or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM, @ref GLFW_CURSOR_UNAVAILABLE and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_standard
+ * @sa @ref glfwCreateCursor
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcursor* glfwCreateStandardCursor(int shape);
+
+/*! @brief Destroys a cursor.
+ *
+ * This function destroys a cursor previously created with @ref
+ * glfwCreateCursor. Any remaining cursors will be destroyed by @ref
+ * glfwTerminate.
+ *
+ * If the specified cursor is current for any window, that window will be
+ * reverted to the default cursor. This does not affect the cursor mode.
+ *
+ * @param[in] cursor The cursor object to destroy.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @reentrancy This function must not be called from a callback.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_object
+ * @sa @ref glfwCreateCursor
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwDestroyCursor(GLFWcursor* cursor);
+
+/*! @brief Sets the cursor for the window.
+ *
+ * This function sets the cursor image to be used when the cursor is over the
+ * content area of the specified window. The set cursor will only be visible
+ * when the [cursor mode](@ref cursor_mode) of the window is
+ * `GLFW_CURSOR_NORMAL`.
+ *
+ * On some platforms, the set cursor may not be visible unless the window also
+ * has input focus.
+ *
+ * @param[in] window The window to set the cursor for.
+ * @param[in] cursor The cursor to set, or `NULL` to switch back to the default
+ * arrow cursor.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_object
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetCursor(GLFWwindow* window, GLFWcursor* cursor);
+
+/*! @brief Sets the key callback.
+ *
+ * This function sets the key callback of the specified window, which is called
+ * when a key is pressed, repeated or released.
+ *
+ * The key functions deal with physical keys, with layout independent
+ * [key tokens](@ref keys) named after their values in the standard US keyboard
+ * layout. If you want to input text, use the
+ * [character callback](@ref glfwSetCharCallback) instead.
+ *
+ * When a window loses input focus, it will generate synthetic key release
+ * events for all pressed keys. You can tell these events from user-generated
+ * events by the fact that the synthetic ones are generated after the focus
+ * loss event has been processed, i.e. after the
+ * [window focus callback](@ref glfwSetWindowFocusCallback) has been called.
+ *
+ * The scancode of a key is specific to that platform or sometimes even to that
+ * machine. Scancodes are intended to allow users to bind keys that don't have
+ * a GLFW key token. Such keys have `key` set to `GLFW_KEY_UNKNOWN`, their
+ * state is not saved and so it cannot be queried with @ref glfwGetKey.
+ *
+ * Sometimes GLFW needs to generate synthetic key events, in which case the
+ * scancode may be zero.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new key callback, or `NULL` to remove the currently
+ * set callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWkeyfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_key
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun callback);
+
+/*! @brief Sets the Unicode character callback.
+ *
+ * This function sets the character callback of the specified window, which is
+ * called when a Unicode character is input.
+ *
+ * The character callback is intended for Unicode text input. As it deals with
+ * characters, it is keyboard layout dependent, whereas the
+ * [key callback](@ref glfwSetKeyCallback) is not. Characters do not map 1:1
+ * to physical keys, as a key may produce zero, one or more characters. If you
+ * want to know whether a specific physical key was pressed or released, see
+ * the key callback instead.
+ *
+ * The character callback behaves as system text input normally does and will
+ * not be called if modifier keys are held down that would prevent normal text
+ * input on that platform, for example a Super (Command) key on macOS or Alt key
+ * on Windows.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, unsigned int codepoint)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWcharfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_char
+ *
+ * @since Added in version 2.4.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun callback);
+
+/*! @brief Sets the Unicode character with modifiers callback.
+ *
+ * This function sets the character with modifiers callback of the specified
+ * window, which is called when a Unicode character is input regardless of what
+ * modifier keys are used.
+ *
+ * The character with modifiers callback is intended for implementing custom
+ * Unicode character input. For regular Unicode text input, see the
+ * [character callback](@ref glfwSetCharCallback). Like the character
+ * callback, the character with modifiers callback deals with characters and is
+ * keyboard layout dependent. Characters do not map 1:1 to physical keys, as
+ * a key may produce zero, one or more characters. If you want to know whether
+ * a specific physical key was pressed or released, see the
+ * [key callback](@ref glfwSetKeyCallback) instead.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or an
+ * [error](@ref error_handling) occurred.
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, unsigned int codepoint, int mods)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWcharmodsfun).
+ *
+ * @deprecated Scheduled for removal in version 4.0.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_char
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* window, GLFWcharmodsfun callback);
+
+/*! @brief Sets the mouse button callback.
+ *
+ * This function sets the mouse button callback of the specified window, which
+ * is called when a mouse button is pressed or released.
+ *
+ * When a window loses input focus, it will generate synthetic mouse button
+ * release events for all pressed mouse buttons. You can tell these events
+ * from user-generated events by the fact that the synthetic ones are generated
+ * after the focus loss event has been processed, i.e. after the
+ * [window focus callback](@ref glfwSetWindowFocusCallback) has been called.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int button, int action, int mods)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWmousebuttonfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref input_mouse_button
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter and return value.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun callback);
+
+/*! @brief Sets the cursor position callback.
+ *
+ * This function sets the cursor position callback of the specified window,
+ * which is called when the cursor is moved. The callback is provided with the
+ * position, in screen coordinates, relative to the upper-left corner of the
+ * content area of the window.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, double xpos, double ypos);
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWcursorposfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_pos
+ *
+ * @since Added in version 3.0. Replaces `glfwSetMousePosCallback`.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun callback);
+
+/*! @brief Sets the cursor enter/leave callback.
+ *
+ * This function sets the cursor boundary crossing callback of the specified
+ * window, which is called when the cursor enters or leaves the content area of
+ * the window.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int entered)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWcursorenterfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref cursor_enter
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun callback);
+
+/*! @brief Sets the scroll callback.
+ *
+ * This function sets the scroll callback of the specified window, which is
+ * called when a scrolling device is used, such as a mouse wheel or scrolling
+ * area of a touchpad.
+ *
+ * The scroll callback receives all scrolling input, like that from a mouse
+ * wheel or a touchpad scrolling area.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new scroll callback, or `NULL` to remove the
+ * currently set callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, double xoffset, double yoffset)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWscrollfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref scrolling
+ *
+ * @since Added in version 3.0. Replaces `glfwSetMouseWheelCallback`.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun callback);
+
+/*! @brief Sets the path drop callback.
+ *
+ * This function sets the path drop callback of the specified window, which is
+ * called when one or more dragged paths are dropped on the window.
+ *
+ * Because the path array and its strings may have been generated specifically
+ * for that event, they are not guaranteed to be valid after the callback has
+ * returned. If you wish to use them after the callback returns, you need to
+ * make a deep copy.
+ *
+ * @param[in] window The window whose callback to set.
+ * @param[in] callback The new file drop callback, or `NULL` to remove the
+ * currently set callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(GLFWwindow* window, int path_count, const char* paths[])
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWdropfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark @wayland File drop is currently unimplemented.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref path_drop
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWdropfun glfwSetDropCallback(GLFWwindow* window, GLFWdropfun callback);
+
+/*! @brief Returns whether the specified joystick is present.
+ *
+ * This function returns whether the specified joystick is present.
+ *
+ * There is no need to call this function before other functions that accept
+ * a joystick ID, as they all check for presence before performing any other
+ * work.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @return `GLFW_TRUE` if the joystick is present, or `GLFW_FALSE` otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick
+ *
+ * @since Added in version 3.0. Replaces `glfwGetJoystickParam`.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwJoystickPresent(int jid);
+
+/*! @brief Returns the values of all axes of the specified joystick.
+ *
+ * This function returns the values of all axes of the specified joystick.
+ * Each element in the array is a value between -1.0 and 1.0.
+ *
+ * If the specified joystick is not present this function will return `NULL`
+ * but will not generate an error. This can be used instead of first calling
+ * @ref glfwJoystickPresent.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @param[out] count Where to store the number of axis values in the returned
+ * array. This is set to zero if the joystick is not present or an error
+ * occurred.
+ * @return An array of axis values, or `NULL` if the joystick is not present or
+ * an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick_axis
+ *
+ * @since Added in version 3.0. Replaces `glfwGetJoystickPos`.
+ *
+ * @ingroup input
+ */
+GLFWAPI const float* glfwGetJoystickAxes(int jid, int* count);
+
+/*! @brief Returns the state of all buttons of the specified joystick.
+ *
+ * This function returns the state of all buttons of the specified joystick.
+ * Each element in the array is either `GLFW_PRESS` or `GLFW_RELEASE`.
+ *
+ * For backward compatibility with earlier versions that did not have @ref
+ * glfwGetJoystickHats, the button array also includes all hats, each
+ * represented as four buttons. The hats are in the same order as returned by
+ * __glfwGetJoystickHats__ and are in the order _up_, _right_, _down_ and
+ * _left_. To disable these extra buttons, set the @ref
+ * GLFW_JOYSTICK_HAT_BUTTONS init hint before initialization.
+ *
+ * If the specified joystick is not present this function will return `NULL`
+ * but will not generate an error. This can be used instead of first calling
+ * @ref glfwJoystickPresent.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @param[out] count Where to store the number of button states in the returned
+ * array. This is set to zero if the joystick is not present or an error
+ * occurred.
+ * @return An array of button states, or `NULL` if the joystick is not present
+ * or an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick_button
+ *
+ * @since Added in version 2.2.
+ * @glfw3 Changed to return a dynamic array.
+ *
+ * @ingroup input
+ */
+GLFWAPI const unsigned char* glfwGetJoystickButtons(int jid, int* count);
+
+/*! @brief Returns the state of all hats of the specified joystick.
+ *
+ * This function returns the state of all hats of the specified joystick.
+ * Each element in the array is one of the following values:
+ *
+ * Name | Value
+ * ---- | -----
+ * `GLFW_HAT_CENTERED` | 0
+ * `GLFW_HAT_UP` | 1
+ * `GLFW_HAT_RIGHT` | 2
+ * `GLFW_HAT_DOWN` | 4
+ * `GLFW_HAT_LEFT` | 8
+ * `GLFW_HAT_RIGHT_UP` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_UP`
+ * `GLFW_HAT_RIGHT_DOWN` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_DOWN`
+ * `GLFW_HAT_LEFT_UP` | `GLFW_HAT_LEFT` \| `GLFW_HAT_UP`
+ * `GLFW_HAT_LEFT_DOWN` | `GLFW_HAT_LEFT` \| `GLFW_HAT_DOWN`
+ *
+ * The diagonal directions are bitwise combinations of the primary (up, right,
+ * down and left) directions and you can test for these individually by ANDing
+ * it with the corresponding direction.
+ *
+ * @code
+ * if (hats[2] & GLFW_HAT_RIGHT)
+ * {
+ * // State of hat 2 could be right-up, right or right-down
+ * }
+ * @endcode
+ *
+ * If the specified joystick is not present this function will return `NULL`
+ * but will not generate an error. This can be used instead of first calling
+ * @ref glfwJoystickPresent.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @param[out] count Where to store the number of hat states in the returned
+ * array. This is set to zero if the joystick is not present or an error
+ * occurred.
+ * @return An array of hat states, or `NULL` if the joystick is not present
+ * or an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected, this function is called again for that joystick or the library
+ * is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick_hat
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI const unsigned char* glfwGetJoystickHats(int jid, int* count);
+
+/*! @brief Returns the name of the specified joystick.
+ *
+ * This function returns the name, encoded as UTF-8, of the specified joystick.
+ * The returned string is allocated and freed by GLFW. You should not free it
+ * yourself.
+ *
+ * If the specified joystick is not present this function will return `NULL`
+ * but will not generate an error. This can be used instead of first calling
+ * @ref glfwJoystickPresent.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @return The UTF-8 encoded name of the joystick, or `NULL` if the joystick
+ * is not present or an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick_name
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI const char* glfwGetJoystickName(int jid);
+
+/*! @brief Returns the SDL compatible GUID of the specified joystick.
+ *
+ * This function returns the SDL compatible GUID, as a UTF-8 encoded
+ * hexadecimal string, of the specified joystick. The returned string is
+ * allocated and freed by GLFW. You should not free it yourself.
+ *
+ * The GUID is what connects a joystick to a gamepad mapping. A connected
+ * joystick will always have a GUID even if there is no gamepad mapping
+ * assigned to it.
+ *
+ * If the specified joystick is not present this function will return `NULL`
+ * but will not generate an error. This can be used instead of first calling
+ * @ref glfwJoystickPresent.
+ *
+ * The GUID uses the format introduced in SDL 2.0.5. This GUID tries to
+ * uniquely identify the make and model of a joystick but does not identify
+ * a specific unit, e.g. all wired Xbox 360 controllers will have the same
+ * GUID on that platform. The GUID for a unit may vary between platforms
+ * depending on what hardware information the platform specific APIs provide.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @return The UTF-8 encoded GUID of the joystick, or `NULL` if the joystick
+ * is not present or an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref gamepad
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI const char* glfwGetJoystickGUID(int jid);
+
+/*! @brief Sets the user pointer of the specified joystick.
+ *
+ * This function sets the user-defined pointer of the specified joystick. The
+ * current value is retained until the joystick is disconnected. The initial
+ * value is `NULL`.
+ *
+ * This function may be called from the joystick callback, even for a joystick
+ * that is being disconnected.
+ *
+ * @param[in] jid The joystick whose pointer to set.
+ * @param[in] pointer The new value.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref joystick_userptr
+ * @sa @ref glfwGetJoystickUserPointer
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetJoystickUserPointer(int jid, void* pointer);
+
+/*! @brief Returns the user pointer of the specified joystick.
+ *
+ * This function returns the current value of the user-defined pointer of the
+ * specified joystick. The initial value is `NULL`.
+ *
+ * This function may be called from the joystick callback, even for a joystick
+ * that is being disconnected.
+ *
+ * @param[in] jid The joystick whose pointer to return.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @sa @ref joystick_userptr
+ * @sa @ref glfwSetJoystickUserPointer
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI void* glfwGetJoystickUserPointer(int jid);
+
+/*! @brief Returns whether the specified joystick has a gamepad mapping.
+ *
+ * This function returns whether the specified joystick is both present and has
+ * a gamepad mapping.
+ *
+ * If the specified joystick is present but does not have a gamepad mapping
+ * this function will return `GLFW_FALSE` but will not generate an error. Call
+ * @ref glfwJoystickPresent to check if a joystick is present regardless of
+ * whether it has a mapping.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @return `GLFW_TRUE` if a joystick is both present and has a gamepad mapping,
+ * or `GLFW_FALSE` otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref gamepad
+ * @sa @ref glfwGetGamepadState
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwJoystickIsGamepad(int jid);
+
+/*! @brief Sets the joystick configuration callback.
+ *
+ * This function sets the joystick configuration callback, or removes the
+ * currently set callback. This is called when a joystick is connected to or
+ * disconnected from the system.
+ *
+ * For joystick connection and disconnection events to be delivered on all
+ * platforms, you need to call one of the [event processing](@ref events)
+ * functions. Joystick disconnection may also be detected and the callback
+ * called by joystick functions. The function will then return whatever it
+ * returns if the joystick is not present.
+ *
+ * @param[in] callback The new callback, or `NULL` to remove the currently set
+ * callback.
+ * @return The previously set callback, or `NULL` if no callback was set or the
+ * library had not been [initialized](@ref intro_init).
+ *
+ * @callback_signature
+ * @code
+ * void function_name(int jid, int event)
+ * @endcode
+ * For more information about the callback parameters, see the
+ * [function pointer type](@ref GLFWjoystickfun).
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref joystick_event
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup input
+ */
+GLFWAPI GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun callback);
+
+/*! @brief Adds the specified SDL_GameControllerDB gamepad mappings.
+ *
+ * This function parses the specified ASCII encoded string and updates the
+ * internal list with any gamepad mappings it finds. This string may
+ * contain either a single gamepad mapping or many mappings separated by
+ * newlines. The parser supports the full format of the `gamecontrollerdb.txt`
+ * source file including empty lines and comments.
+ *
+ * See @ref gamepad_mapping for a description of the format.
+ *
+ * If there is already a gamepad mapping for a given GUID in the internal list,
+ * it will be replaced by the one passed to this function. If the library is
+ * terminated and re-initialized the internal list will revert to the built-in
+ * default.
+ *
+ * @param[in] string The string containing the gamepad mappings.
+ * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_VALUE.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref gamepad
+ * @sa @ref glfwJoystickIsGamepad
+ * @sa @ref glfwGetGamepadName
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwUpdateGamepadMappings(const char* string);
+
+/*! @brief Returns the human-readable gamepad name for the specified joystick.
+ *
+ * This function returns the human-readable name of the gamepad from the
+ * gamepad mapping assigned to the specified joystick.
+ *
+ * If the specified joystick is not present or does not have a gamepad mapping
+ * this function will return `NULL` but will not generate an error. Call
+ * @ref glfwJoystickPresent to check whether it is present regardless of
+ * whether it has a mapping.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @return The UTF-8 encoded name of the gamepad, or `NULL` if the
+ * joystick is not present, does not have a mapping or an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref GLFW_INVALID_ENUM.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the specified joystick is
+ * disconnected, the gamepad mappings are updated or the library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref gamepad
+ * @sa @ref glfwJoystickIsGamepad
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI const char* glfwGetGamepadName(int jid);
+
+/*! @brief Retrieves the state of the specified joystick remapped as a gamepad.
+ *
+ * This function retrieves the state of the specified joystick remapped to
+ * an Xbox-like gamepad.
+ *
+ * If the specified joystick is not present or does not have a gamepad mapping
+ * this function will return `GLFW_FALSE` but will not generate an error. Call
+ * @ref glfwJoystickPresent to check whether it is present regardless of
+ * whether it has a mapping.
+ *
+ * The Guide button may not be available for input as it is often hooked by the
+ * system or the Steam client.
+ *
+ * Not all devices have all the buttons or axes provided by @ref
+ * GLFWgamepadstate. Unavailable buttons and axes will always report
+ * `GLFW_RELEASE` and 0.0 respectively.
+ *
+ * @param[in] jid The [joystick](@ref joysticks) to query.
+ * @param[out] state The gamepad input state of the joystick.
+ * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if no joystick is
+ * connected, it has no gamepad mapping or an [error](@ref error_handling)
+ * occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_ENUM.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref gamepad
+ * @sa @ref glfwUpdateGamepadMappings
+ * @sa @ref glfwJoystickIsGamepad
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup input
+ */
+GLFWAPI int glfwGetGamepadState(int jid, GLFWgamepadstate* state);
+
+/*! @brief Sets the clipboard to the specified string.
+ *
+ * This function sets the system clipboard to the specified, UTF-8 encoded
+ * string.
+ *
+ * @param[in] window Deprecated. Any valid window or `NULL`.
+ * @param[in] string A UTF-8 encoded string.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The specified string is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref clipboard
+ * @sa @ref glfwGetClipboardString
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetClipboardString(GLFWwindow* window, const char* string);
+
+/*! @brief Returns the contents of the clipboard as a string.
+ *
+ * This function returns the contents of the system clipboard, if it contains
+ * or is convertible to a UTF-8 encoded string. If the clipboard is empty or
+ * if its contents cannot be converted, `NULL` is returned and a @ref
+ * GLFW_FORMAT_UNAVAILABLE error is generated.
+ *
+ * @param[in] window Deprecated. Any valid window or `NULL`.
+ * @return The contents of the clipboard as a UTF-8 encoded string, or `NULL`
+ * if an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_FORMAT_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the next call to @ref
+ * glfwGetClipboardString or @ref glfwSetClipboardString, or until the library
+ * is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref clipboard
+ * @sa @ref glfwSetClipboardString
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI const char* glfwGetClipboardString(GLFWwindow* window);
+
+/*! @brief Returns the GLFW time.
+ *
+ * This function returns the current GLFW time, in seconds. Unless the time
+ * has been set using @ref glfwSetTime it measures time elapsed since GLFW was
+ * initialized.
+ *
+ * This function and @ref glfwSetTime are helper functions on top of @ref
+ * glfwGetTimerFrequency and @ref glfwGetTimerValue.
+ *
+ * The resolution of the timer is system dependent, but is usually on the order
+ * of a few micro- or nanoseconds. It uses the highest-resolution monotonic
+ * time source on each operating system.
+ *
+ * @return The current time, in seconds, or zero if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Reading and
+ * writing of the internal base time is not atomic, so it needs to be
+ * externally synchronized with calls to @ref glfwSetTime.
+ *
+ * @sa @ref time
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup input
+ */
+GLFWAPI double glfwGetTime(void);
+
+/*! @brief Sets the GLFW time.
+ *
+ * This function sets the current GLFW time, in seconds. The value must be
+ * a positive finite number less than or equal to 18446744073.0, which is
+ * approximately 584.5 years.
+ *
+ * This function and @ref glfwGetTime are helper functions on top of @ref
+ * glfwGetTimerFrequency and @ref glfwGetTimerValue.
+ *
+ * @param[in] time The new value, in seconds.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_INVALID_VALUE.
+ *
+ * @remark The upper limit of GLFW time is calculated as
+ * floor((2<sup>64</sup> - 1) / 10<sup>9</sup>) and is due to implementations
+ * storing nanoseconds in 64 bits. The limit may be increased in the future.
+ *
+ * @thread_safety This function may be called from any thread. Reading and
+ * writing of the internal base time is not atomic, so it needs to be
+ * externally synchronized with calls to @ref glfwGetTime.
+ *
+ * @sa @ref time
+ *
+ * @since Added in version 2.2.
+ *
+ * @ingroup input
+ */
+GLFWAPI void glfwSetTime(double time);
+
+/*! @brief Returns the current value of the raw timer.
+ *
+ * This function returns the current value of the raw timer, measured in
+ * 1&nbsp;/&nbsp;frequency seconds. To get the frequency, call @ref
+ * glfwGetTimerFrequency.
+ *
+ * @return The value of the timer, or zero if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref time
+ * @sa @ref glfwGetTimerFrequency
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup input
+ */
+GLFWAPI uint64_t glfwGetTimerValue(void);
+
+/*! @brief Returns the frequency, in Hz, of the raw timer.
+ *
+ * This function returns the frequency, in Hz, of the raw timer.
+ *
+ * @return The frequency of the timer, in Hz, or zero if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref time
+ * @sa @ref glfwGetTimerValue
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup input
+ */
+GLFWAPI uint64_t glfwGetTimerFrequency(void);
+
+/*! @brief Makes the context of the specified window current for the calling
+ * thread.
+ *
+ * This function makes the OpenGL or OpenGL ES context of the specified window
+ * current on the calling thread. A context must only be made current on
+ * a single thread at a time and each thread can have only a single current
+ * context at a time.
+ *
+ * When moving a context between threads, you must make it non-current on the
+ * old thread before making it current on the new one.
+ *
+ * By default, making a context non-current implicitly forces a pipeline flush.
+ * On machines that support `GL_KHR_context_flush_control`, you can control
+ * whether a context performs this flush by setting the
+ * [GLFW_CONTEXT_RELEASE_BEHAVIOR](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint)
+ * hint.
+ *
+ * The specified window must have an OpenGL or OpenGL ES context. Specifying
+ * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT
+ * error.
+ *
+ * @param[in] window The window whose context to make current, or `NULL` to
+ * detach the current context.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref context_current
+ * @sa @ref glfwGetCurrentContext
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup context
+ */
+GLFWAPI void glfwMakeContextCurrent(GLFWwindow* window);
+
+/*! @brief Returns the window whose context is current on the calling thread.
+ *
+ * This function returns the window whose OpenGL or OpenGL ES context is
+ * current on the calling thread.
+ *
+ * @return The window whose context is current, or `NULL` if no window's
+ * context is current.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref context_current
+ * @sa @ref glfwMakeContextCurrent
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup context
+ */
+GLFWAPI GLFWwindow* glfwGetCurrentContext(void);
+
+/*! @brief Swaps the front and back buffers of the specified window.
+ *
+ * This function swaps the front and back buffers of the specified window when
+ * rendering with OpenGL or OpenGL ES. If the swap interval is greater than
+ * zero, the GPU driver waits the specified number of screen updates before
+ * swapping the buffers.
+ *
+ * The specified window must have an OpenGL or OpenGL ES context. Specifying
+ * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT
+ * error.
+ *
+ * This function does not apply to Vulkan. If you are rendering with Vulkan,
+ * see `vkQueuePresentKHR` instead.
+ *
+ * @param[in] window The window whose buffers to swap.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark __EGL:__ The context of the specified window must be current on the
+ * calling thread.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref buffer_swap
+ * @sa @ref glfwSwapInterval
+ *
+ * @since Added in version 1.0.
+ * @glfw3 Added window handle parameter.
+ *
+ * @ingroup window
+ */
+GLFWAPI void glfwSwapBuffers(GLFWwindow* window);
+
+/*! @brief Sets the swap interval for the current context.
+ *
+ * This function sets the swap interval for the current OpenGL or OpenGL ES
+ * context, i.e. the number of screen updates to wait from the time @ref
+ * glfwSwapBuffers was called before swapping the buffers and returning. This
+ * is sometimes called _vertical synchronization_, _vertical retrace
+ * synchronization_ or just _vsync_.
+ *
+ * A context that supports either of the `WGL_EXT_swap_control_tear` and
+ * `GLX_EXT_swap_control_tear` extensions also accepts _negative_ swap
+ * intervals, which allows the driver to swap immediately even if a frame
+ * arrives a little bit late. You can check for these extensions with @ref
+ * glfwExtensionSupported.
+ *
+ * A context must be current on the calling thread. Calling this function
+ * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error.
+ *
+ * This function does not apply to Vulkan. If you are rendering with Vulkan,
+ * see the present mode of your swapchain instead.
+ *
+ * @param[in] interval The minimum number of screen updates to wait for
+ * until the buffers are swapped by @ref glfwSwapBuffers.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark This function is not called during context creation, leaving the
+ * swap interval set to whatever is the default for that API. This is done
+ * because some swap interval extensions used by GLFW do not allow the swap
+ * interval to be reset to zero once it has been set to a non-zero value.
+ *
+ * @remark Some GPU drivers do not honor the requested swap interval, either
+ * because of a user setting that overrides the application's request or due to
+ * bugs in the driver.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref buffer_swap
+ * @sa @ref glfwSwapBuffers
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup context
+ */
+GLFWAPI void glfwSwapInterval(int interval);
+
+/*! @brief Returns whether the specified extension is available.
+ *
+ * This function returns whether the specified
+ * [API extension](@ref context_glext) is supported by the current OpenGL or
+ * OpenGL ES context. It searches both for client API extension and context
+ * creation API extensions.
+ *
+ * A context must be current on the calling thread. Calling this function
+ * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error.
+ *
+ * As this functions retrieves and searches one or more extension strings each
+ * call, it is recommended that you cache its results if it is going to be used
+ * frequently. The extension strings will not change during the lifetime of
+ * a context, so there is no danger in doing this.
+ *
+ * This function does not apply to Vulkan. If you are using Vulkan, see @ref
+ * glfwGetRequiredInstanceExtensions, `vkEnumerateInstanceExtensionProperties`
+ * and `vkEnumerateDeviceExtensionProperties` instead.
+ *
+ * @param[in] extension The ASCII encoded name of the extension.
+ * @return `GLFW_TRUE` if the extension is available, or `GLFW_FALSE`
+ * otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_NO_CURRENT_CONTEXT, @ref GLFW_INVALID_VALUE and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref context_glext
+ * @sa @ref glfwGetProcAddress
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup context
+ */
+GLFWAPI int glfwExtensionSupported(const char* extension);
+
+/*! @brief Returns the address of the specified function for the current
+ * context.
+ *
+ * This function returns the address of the specified OpenGL or OpenGL ES
+ * [core or extension function](@ref context_glext), if it is supported
+ * by the current context.
+ *
+ * A context must be current on the calling thread. Calling this function
+ * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error.
+ *
+ * This function does not apply to Vulkan. If you are rendering with Vulkan,
+ * see @ref glfwGetInstanceProcAddress, `vkGetInstanceProcAddr` and
+ * `vkGetDeviceProcAddr` instead.
+ *
+ * @param[in] procname The ASCII encoded name of the function.
+ * @return The address of the function, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark The address of a given function is not guaranteed to be the same
+ * between contexts.
+ *
+ * @remark This function may return a non-`NULL` address despite the
+ * associated version or extension not being available. Always check the
+ * context version or extension string first.
+ *
+ * @pointer_lifetime The returned function pointer is valid until the context
+ * is destroyed or the library is terminated.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref context_glext
+ * @sa @ref glfwExtensionSupported
+ *
+ * @since Added in version 1.0.
+ *
+ * @ingroup context
+ */
+GLFWAPI GLFWglproc glfwGetProcAddress(const char* procname);
+
+/*! @brief Returns whether the Vulkan loader and an ICD have been found.
+ *
+ * This function returns whether the Vulkan loader and any minimally functional
+ * ICD have been found.
+ *
+ * The availability of a Vulkan loader and even an ICD does not by itself guarantee that
+ * surface creation or even instance creation is possible. Call @ref
+ * glfwGetRequiredInstanceExtensions to check whether the extensions necessary for Vulkan
+ * surface creation are available and @ref glfwGetPhysicalDevicePresentationSupport to
+ * check whether a queue family of a physical device supports image presentation.
+ *
+ * @return `GLFW_TRUE` if Vulkan is minimally available, or `GLFW_FALSE`
+ * otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref vulkan_support
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+GLFWAPI int glfwVulkanSupported(void);
+
+/*! @brief Returns the Vulkan instance extensions required by GLFW.
+ *
+ * This function returns an array of names of Vulkan instance extensions required
+ * by GLFW for creating Vulkan surfaces for GLFW windows. If successful, the
+ * list will always contain `VK_KHR_surface`, so if you don't require any
+ * additional extensions you can pass this list directly to the
+ * `VkInstanceCreateInfo` struct.
+ *
+ * If Vulkan is not available on the machine, this function returns `NULL` and
+ * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported
+ * to check whether Vulkan is at least minimally available.
+ *
+ * If Vulkan is available but no set of extensions allowing window surface
+ * creation was found, this function returns `NULL`. You may still use Vulkan
+ * for off-screen rendering and compute work.
+ *
+ * @param[out] count Where to store the number of extensions in the returned
+ * array. This is set to zero if an error occurred.
+ * @return An array of ASCII encoded extension names, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_API_UNAVAILABLE.
+ *
+ * @remark Additional extensions may be required by future versions of GLFW.
+ * You should check if any extensions you wish to enable are already in the
+ * returned array, as it is an error to specify an extension more than once in
+ * the `VkInstanceCreateInfo` struct.
+ *
+ * @pointer_lifetime The returned array is allocated and freed by GLFW. You
+ * should not free it yourself. It is guaranteed to be valid only until the
+ * library is terminated.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref vulkan_ext
+ * @sa @ref glfwCreateWindowSurface
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+GLFWAPI const char** glfwGetRequiredInstanceExtensions(uint32_t* count);
+
+#if defined(VK_VERSION_1_0)
+
+/*! @brief Returns the address of the specified Vulkan instance function.
+ *
+ * This function returns the address of the specified Vulkan core or extension
+ * function for the specified instance. If instance is set to `NULL` it can
+ * return any function exported from the Vulkan loader, including at least the
+ * following functions:
+ *
+ * - `vkEnumerateInstanceExtensionProperties`
+ * - `vkEnumerateInstanceLayerProperties`
+ * - `vkCreateInstance`
+ * - `vkGetInstanceProcAddr`
+ *
+ * If Vulkan is not available on the machine, this function returns `NULL` and
+ * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported
+ * to check whether Vulkan is at least minimally available.
+ *
+ * This function is equivalent to calling `vkGetInstanceProcAddr` with
+ * a platform-specific query of the Vulkan loader as a fallback.
+ *
+ * @param[in] instance The Vulkan instance to query, or `NULL` to retrieve
+ * functions related to instance creation.
+ * @param[in] procname The ASCII encoded name of the function.
+ * @return The address of the function, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_API_UNAVAILABLE.
+ *
+ * @pointer_lifetime The returned function pointer is valid until the library
+ * is terminated.
+ *
+ * @thread_safety This function may be called from any thread.
+ *
+ * @sa @ref vulkan_proc
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+GLFWAPI GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance, const char* procname);
+
+/*! @brief Returns whether the specified queue family can present images.
+ *
+ * This function returns whether the specified queue family of the specified
+ * physical device supports presentation to the platform GLFW was built for.
+ *
+ * If Vulkan or the required window surface creation instance extensions are
+ * not available on the machine, or if the specified instance was not created
+ * with the required extensions, this function returns `GLFW_FALSE` and
+ * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported
+ * to check whether Vulkan is at least minimally available and @ref
+ * glfwGetRequiredInstanceExtensions to check what instance extensions are
+ * required.
+ *
+ * @param[in] instance The instance that the physical device belongs to.
+ * @param[in] device The physical device that the queue family belongs to.
+ * @param[in] queuefamily The index of the queue family to query.
+ * @return `GLFW_TRUE` if the queue family supports presentation, or
+ * `GLFW_FALSE` otherwise.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_API_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR.
+ *
+ * @remark @macos This function currently always returns `GLFW_TRUE`, as the
+ * `VK_MVK_macos_surface` and `VK_EXT_metal_surface` extensions do not provide
+ * a `vkGetPhysicalDevice*PresentationSupport` type function.
+ *
+ * @thread_safety This function may be called from any thread. For
+ * synchronization details of Vulkan objects, see the Vulkan specification.
+ *
+ * @sa @ref vulkan_present
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+GLFWAPI int glfwGetPhysicalDevicePresentationSupport(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+
+/*! @brief Creates a Vulkan surface for the specified window.
+ *
+ * This function creates a Vulkan surface for the specified window.
+ *
+ * If the Vulkan loader or at least one minimally functional ICD were not found,
+ * this function returns `VK_ERROR_INITIALIZATION_FAILED` and generates a @ref
+ * GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported to check whether
+ * Vulkan is at least minimally available.
+ *
+ * If the required window surface creation instance extensions are not
+ * available or if the specified instance was not created with these extensions
+ * enabled, this function returns `VK_ERROR_EXTENSION_NOT_PRESENT` and
+ * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref
+ * glfwGetRequiredInstanceExtensions to check what instance extensions are
+ * required.
+ *
+ * The window surface cannot be shared with another API so the window must
+ * have been created with the [client api hint](@ref GLFW_CLIENT_API_attrib)
+ * set to `GLFW_NO_API` otherwise it generates a @ref GLFW_INVALID_VALUE error
+ * and returns `VK_ERROR_NATIVE_WINDOW_IN_USE_KHR`.
+ *
+ * The window surface must be destroyed before the specified Vulkan instance.
+ * It is the responsibility of the caller to destroy the window surface. GLFW
+ * does not destroy it for you. Call `vkDestroySurfaceKHR` to destroy the
+ * surface.
+ *
+ * @param[in] instance The Vulkan instance to create the surface in.
+ * @param[in] window The window to create the surface for.
+ * @param[in] allocator The allocator to use, or `NULL` to use the default
+ * allocator.
+ * @param[out] surface Where to store the handle of the surface. This is set
+ * to `VK_NULL_HANDLE` if an error occurred.
+ * @return `VK_SUCCESS` if successful, or a Vulkan error code if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref
+ * GLFW_API_UNAVAILABLE, @ref GLFW_PLATFORM_ERROR and @ref GLFW_INVALID_VALUE
+ *
+ * @remark If an error occurs before the creation call is made, GLFW returns
+ * the Vulkan error code most appropriate for the error. Appropriate use of
+ * @ref glfwVulkanSupported and @ref glfwGetRequiredInstanceExtensions should
+ * eliminate almost all occurrences of these errors.
+ *
+ * @remark @macos GLFW prefers the `VK_EXT_metal_surface` extension, with the
+ * `VK_MVK_macos_surface` extension as a fallback. The name of the selected
+ * extension, if any, is included in the array returned by @ref
+ * glfwGetRequiredInstanceExtensions.
+ *
+ * @remark @macos This function creates and sets a `CAMetalLayer` instance for
+ * the window content view, which is required for MoltenVK to function.
+ *
+ * @remark @x11 By default GLFW prefers the `VK_KHR_xcb_surface` extension,
+ * with the `VK_KHR_xlib_surface` extension as a fallback. You can make
+ * `VK_KHR_xlib_surface` the preferred extension by setting the
+ * [GLFW_X11_XCB_VULKAN_SURFACE](@ref GLFW_X11_XCB_VULKAN_SURFACE_hint) init
+ * hint. The name of the selected extension, if any, is included in the array
+ * returned by @ref glfwGetRequiredInstanceExtensions.
+ *
+ * @thread_safety This function may be called from any thread. For
+ * synchronization details of Vulkan objects, see the Vulkan specification.
+ *
+ * @sa @ref vulkan_surface
+ * @sa @ref glfwGetRequiredInstanceExtensions
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup vulkan
+ */
+GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+#endif /*VK_VERSION_1_0*/
+
+
+/*************************************************************************
+ * Global definition cleanup
+ *************************************************************************/
+
+/* ------------------- BEGIN SYSTEM/COMPILER SPECIFIC -------------------- */
+
+#ifdef GLFW_WINGDIAPI_DEFINED
+ #undef WINGDIAPI
+ #undef GLFW_WINGDIAPI_DEFINED
+#endif
+
+#ifdef GLFW_CALLBACK_DEFINED
+ #undef CALLBACK
+ #undef GLFW_CALLBACK_DEFINED
+#endif
+
+/* Some OpenGL related headers need GLAPIENTRY, but it is unconditionally
+ * defined by some gl.h variants (OpenBSD) so define it after if needed.
+ */
+#ifndef GLAPIENTRY
+ #define GLAPIENTRY APIENTRY
+ #define GLFW_GLAPIENTRY_DEFINED
+#endif
+
+/* -------------------- END SYSTEM/COMPILER SPECIFIC --------------------- */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _glfw3_h_ */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3native.h b/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3native.h
new file mode 100644
index 00000000000..6d090778cab
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/include/GLFW/glfw3native.h
@@ -0,0 +1,614 @@
+/*************************************************************************
+ * GLFW 3.4 - www.glfw.org
+ * A library for OpenGL, window and input
+ *------------------------------------------------------------------------
+ * Copyright (c) 2002-2006 Marcus Geelnard
+ * Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would
+ * be appreciated but is not required.
+ *
+ * 2. Altered source versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ * 3. This notice may not be removed or altered from any source
+ * distribution.
+ *
+ *************************************************************************/
+
+#ifndef _glfw3_native_h_
+#define _glfw3_native_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*************************************************************************
+ * Doxygen documentation
+ *************************************************************************/
+
+/*! @file glfw3native.h
+ * @brief The header of the native access functions.
+ *
+ * This is the header file of the native access functions. See @ref native for
+ * more information.
+ */
+/*! @defgroup native Native access
+ * @brief Functions related to accessing native handles.
+ *
+ * **By using the native access functions you assert that you know what you're
+ * doing and how to fix problems caused by using them. If you don't, you
+ * shouldn't be using them.**
+ *
+ * Before the inclusion of @ref glfw3native.h, you may define zero or more
+ * window system API macro and zero or more context creation API macros.
+ *
+ * The chosen backends must match those the library was compiled for. Failure
+ * to do this will cause a link-time error.
+ *
+ * The available window API macros are:
+ * * `GLFW_EXPOSE_NATIVE_WIN32`
+ * * `GLFW_EXPOSE_NATIVE_COCOA`
+ * * `GLFW_EXPOSE_NATIVE_X11`
+ * * `GLFW_EXPOSE_NATIVE_WAYLAND`
+ *
+ * The available context API macros are:
+ * * `GLFW_EXPOSE_NATIVE_WGL`
+ * * `GLFW_EXPOSE_NATIVE_NSGL`
+ * * `GLFW_EXPOSE_NATIVE_GLX`
+ * * `GLFW_EXPOSE_NATIVE_EGL`
+ * * `GLFW_EXPOSE_NATIVE_OSMESA`
+ *
+ * These macros select which of the native access functions that are declared
+ * and which platform-specific headers to include. It is then up your (by
+ * definition platform-specific) code to handle which of these should be
+ * defined.
+ */
+
+
+/*************************************************************************
+ * System headers and types
+ *************************************************************************/
+
+#if defined(GLFW_EXPOSE_NATIVE_WIN32) || defined(GLFW_EXPOSE_NATIVE_WGL)
+ /* This is a workaround for the fact that glfw3.h needs to export APIENTRY (for
+ * example to allow applications to correctly declare a GL_KHR_debug callback)
+ * but windows.h assumes no one will define APIENTRY before it does
+ */
+ #if defined(GLFW_APIENTRY_DEFINED)
+ #undef APIENTRY
+ #undef GLFW_APIENTRY_DEFINED
+ #endif
+ #include <windows.h>
+#elif defined(GLFW_EXPOSE_NATIVE_COCOA) || defined(GLFW_EXPOSE_NATIVE_NSGL)
+ #if defined(__OBJC__)
+ #import <Cocoa/Cocoa.h>
+ #else
+ #include <ApplicationServices/ApplicationServices.h>
+ typedef void* id;
+ #endif
+#elif defined(GLFW_EXPOSE_NATIVE_X11) || defined(GLFW_EXPOSE_NATIVE_GLX)
+ #include <X11/Xlib.h>
+ #include <X11/extensions/Xrandr.h>
+#elif defined(GLFW_EXPOSE_NATIVE_WAYLAND)
+ #include <wayland-client.h>
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_WGL)
+ /* WGL is declared by windows.h */
+#endif
+#if defined(GLFW_EXPOSE_NATIVE_NSGL)
+ /* NSGL is declared by Cocoa.h */
+#endif
+#if defined(GLFW_EXPOSE_NATIVE_GLX)
+ /* This is a workaround for the fact that glfw3.h defines GLAPIENTRY because by
+ * default it also acts as an OpenGL header
+ * However, glx.h will include gl.h, which will define it unconditionally
+ */
+ #if defined(GLFW_GLAPIENTRY_DEFINED)
+ #undef GLAPIENTRY
+ #undef GLFW_GLAPIENTRY_DEFINED
+ #endif
+ #include <GL/glx.h>
+#endif
+#if defined(GLFW_EXPOSE_NATIVE_EGL)
+ #include <EGL/egl.h>
+#endif
+#if defined(GLFW_EXPOSE_NATIVE_OSMESA)
+ /* This is a workaround for the fact that glfw3.h defines GLAPIENTRY because by
+ * default it also acts as an OpenGL header
+ * However, osmesa.h will include gl.h, which will define it unconditionally
+ */
+ #if defined(GLFW_GLAPIENTRY_DEFINED)
+ #undef GLAPIENTRY
+ #undef GLFW_GLAPIENTRY_DEFINED
+ #endif
+ #include <GL/osmesa.h>
+#endif
+
+
+/*************************************************************************
+ * Functions
+ *************************************************************************/
+
+#if defined(GLFW_EXPOSE_NATIVE_WIN32)
+/*! @brief Returns the adapter device name of the specified monitor.
+ *
+ * @return The UTF-8 encoded adapter device name (for example `\\.\DISPLAY1`)
+ * of the specified monitor, or `NULL` if an [error](@ref error_handling)
+ * occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup native
+ */
+GLFWAPI const char* glfwGetWin32Adapter(GLFWmonitor* monitor);
+
+/*! @brief Returns the display device name of the specified monitor.
+ *
+ * @return The UTF-8 encoded display device name (for example
+ * `\\.\DISPLAY1\Monitor0`) of the specified monitor, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup native
+ */
+GLFWAPI const char* glfwGetWin32Monitor(GLFWmonitor* monitor);
+
+/*! @brief Returns the `HWND` of the specified window.
+ *
+ * @return The `HWND` of the specified window, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark The `HDC` associated with the window can be queried with the
+ * [GetDC](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getdc)
+ * function.
+ * @code
+ * HDC dc = GetDC(glfwGetWin32Window(window));
+ * @endcode
+ * This DC is private and does not need to be released.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI HWND glfwGetWin32Window(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_WGL)
+/*! @brief Returns the `HGLRC` of the specified window.
+ *
+ * @return The `HGLRC` of the specified window, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @remark The `HDC` associated with the window can be queried with the
+ * [GetDC](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getdc)
+ * function.
+ * @code
+ * HDC dc = GetDC(glfwGetWin32Window(window));
+ * @endcode
+ * This DC is private and does not need to be released.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI HGLRC glfwGetWGLContext(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_COCOA)
+/*! @brief Returns the `CGDirectDisplayID` of the specified monitor.
+ *
+ * @return The `CGDirectDisplayID` of the specified monitor, or
+ * `kCGNullDirectDisplay` if an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup native
+ */
+GLFWAPI CGDirectDisplayID glfwGetCocoaMonitor(GLFWmonitor* monitor);
+
+/*! @brief Returns the `NSWindow` of the specified window.
+ *
+ * @return The `NSWindow` of the specified window, or `nil` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI id glfwGetCocoaWindow(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_NSGL)
+/*! @brief Returns the `NSOpenGLContext` of the specified window.
+ *
+ * @return The `NSOpenGLContext` of the specified window, or `nil` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI id glfwGetNSGLContext(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_X11)
+/*! @brief Returns the `Display` used by GLFW.
+ *
+ * @return The `Display` used by GLFW, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI Display* glfwGetX11Display(void);
+
+/*! @brief Returns the `RRCrtc` of the specified monitor.
+ *
+ * @return The `RRCrtc` of the specified monitor, or `None` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup native
+ */
+GLFWAPI RRCrtc glfwGetX11Adapter(GLFWmonitor* monitor);
+
+/*! @brief Returns the `RROutput` of the specified monitor.
+ *
+ * @return The `RROutput` of the specified monitor, or `None` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.1.
+ *
+ * @ingroup native
+ */
+GLFWAPI RROutput glfwGetX11Monitor(GLFWmonitor* monitor);
+
+/*! @brief Returns the `Window` of the specified window.
+ *
+ * @return The `Window` of the specified window, or `None` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI Window glfwGetX11Window(GLFWwindow* window);
+
+/*! @brief Sets the current primary selection to the specified string.
+ *
+ * @param[in] string A UTF-8 encoded string.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The specified string is copied before this function
+ * returns.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref clipboard
+ * @sa glfwGetX11SelectionString
+ * @sa glfwSetClipboardString
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup native
+ */
+GLFWAPI void glfwSetX11SelectionString(const char* string);
+
+/*! @brief Returns the contents of the current primary selection as a string.
+ *
+ * If the selection is empty or if its contents cannot be converted, `NULL`
+ * is returned and a @ref GLFW_FORMAT_UNAVAILABLE error is generated.
+ *
+ * @return The contents of the selection as a UTF-8 encoded string, or `NULL`
+ * if an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref
+ * GLFW_PLATFORM_ERROR.
+ *
+ * @pointer_lifetime The returned string is allocated and freed by GLFW. You
+ * should not free it yourself. It is valid until the next call to @ref
+ * glfwGetX11SelectionString or @ref glfwSetX11SelectionString, or until the
+ * library is terminated.
+ *
+ * @thread_safety This function must only be called from the main thread.
+ *
+ * @sa @ref clipboard
+ * @sa glfwSetX11SelectionString
+ * @sa glfwGetClipboardString
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup native
+ */
+GLFWAPI const char* glfwGetX11SelectionString(void);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_GLX)
+/*! @brief Returns the `GLXContext` of the specified window.
+ *
+ * @return The `GLXContext` of the specified window, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI GLXContext glfwGetGLXContext(GLFWwindow* window);
+
+/*! @brief Returns the `GLXWindow` of the specified window.
+ *
+ * @return The `GLXWindow` of the specified window, or `None` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup native
+ */
+GLFWAPI GLXWindow glfwGetGLXWindow(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_WAYLAND)
+/*! @brief Returns the `struct wl_display*` used by GLFW.
+ *
+ * @return The `struct wl_display*` used by GLFW, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup native
+ */
+GLFWAPI struct wl_display* glfwGetWaylandDisplay(void);
+
+/*! @brief Returns the `struct wl_output*` of the specified monitor.
+ *
+ * @return The `struct wl_output*` of the specified monitor, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup native
+ */
+GLFWAPI struct wl_output* glfwGetWaylandMonitor(GLFWmonitor* monitor);
+
+/*! @brief Returns the main `struct wl_surface*` of the specified window.
+ *
+ * @return The main `struct wl_surface*` of the specified window, or `NULL` if
+ * an [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.2.
+ *
+ * @ingroup native
+ */
+GLFWAPI struct wl_surface* glfwGetWaylandWindow(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_EGL)
+/*! @brief Returns the `EGLDisplay` used by GLFW.
+ *
+ * @return The `EGLDisplay` used by GLFW, or `EGL_NO_DISPLAY` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NOT_INITIALIZED.
+ *
+ * @remark Because EGL is initialized on demand, this function will return
+ * `EGL_NO_DISPLAY` until the first context has been created via EGL.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI EGLDisplay glfwGetEGLDisplay(void);
+
+/*! @brief Returns the `EGLContext` of the specified window.
+ *
+ * @return The `EGLContext` of the specified window, or `EGL_NO_CONTEXT` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI EGLContext glfwGetEGLContext(GLFWwindow* window);
+
+/*! @brief Returns the `EGLSurface` of the specified window.
+ *
+ * @return The `EGLSurface` of the specified window, or `EGL_NO_SURFACE` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.0.
+ *
+ * @ingroup native
+ */
+GLFWAPI EGLSurface glfwGetEGLSurface(GLFWwindow* window);
+#endif
+
+#if defined(GLFW_EXPOSE_NATIVE_OSMESA)
+/*! @brief Retrieves the color buffer associated with the specified window.
+ *
+ * @param[in] window The window whose color buffer to retrieve.
+ * @param[out] width Where to store the width of the color buffer, or `NULL`.
+ * @param[out] height Where to store the height of the color buffer, or `NULL`.
+ * @param[out] format Where to store the OSMesa pixel format of the color
+ * buffer, or `NULL`.
+ * @param[out] buffer Where to store the address of the color buffer, or
+ * `NULL`.
+ * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup native
+ */
+GLFWAPI int glfwGetOSMesaColorBuffer(GLFWwindow* window, int* width, int* height, int* format, void** buffer);
+
+/*! @brief Retrieves the depth buffer associated with the specified window.
+ *
+ * @param[in] window The window whose depth buffer to retrieve.
+ * @param[out] width Where to store the width of the depth buffer, or `NULL`.
+ * @param[out] height Where to store the height of the depth buffer, or `NULL`.
+ * @param[out] bytesPerValue Where to store the number of bytes per depth
+ * buffer element, or `NULL`.
+ * @param[out] buffer Where to store the address of the depth buffer, or
+ * `NULL`.
+ * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup native
+ */
+GLFWAPI int glfwGetOSMesaDepthBuffer(GLFWwindow* window, int* width, int* height, int* bytesPerValue, void** buffer);
+
+/*! @brief Returns the `OSMesaContext` of the specified window.
+ *
+ * @return The `OSMesaContext` of the specified window, or `NULL` if an
+ * [error](@ref error_handling) occurred.
+ *
+ * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref
+ * GLFW_NOT_INITIALIZED.
+ *
+ * @thread_safety This function may be called from any thread. Access is not
+ * synchronized.
+ *
+ * @since Added in version 3.3.
+ *
+ * @ingroup native
+ */
+GLFWAPI OSMesaContext glfwGetOSMesaContext(GLFWwindow* window);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _glfw3_native_h_ */
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/CMakeLists.txt b/chromium/third_party/dawn/third_party/glfw/src/CMakeLists.txt
new file mode 100644
index 00000000000..01f191c9752
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/CMakeLists.txt
@@ -0,0 +1,400 @@
+
+add_library(glfw ${GLFW_LIBRARY_TYPE}
+ "${GLFW_SOURCE_DIR}/include/GLFW/glfw3.h"
+ "${GLFW_SOURCE_DIR}/include/GLFW/glfw3native.h"
+ internal.h platform.h mappings.h
+ context.c init.c input.c monitor.c platform.c vulkan.c window.c
+ egl_context.c osmesa_context.c null_platform.h null_joystick.h
+ null_init.c null_monitor.c null_window.c null_joystick.c)
+
+# The time, thread and module code is shared between all backends on a given OS,
+# including the null backend, which still needs those bits to be functional
+if (APPLE)
+ target_sources(glfw PRIVATE cocoa_time.h cocoa_time.c posix_thread.h
+ posix_module.c posix_thread.c)
+elseif (WIN32)
+ target_sources(glfw PRIVATE win32_time.h win32_thread.h win32_module.c
+ win32_time.c win32_thread.c)
+else()
+ target_sources(glfw PRIVATE posix_time.h posix_thread.h posix_module.c
+ posix_time.c posix_thread.c)
+endif()
+
+add_custom_target(update_mappings
+ COMMAND "${CMAKE_COMMAND}" -P "${GLFW_SOURCE_DIR}/CMake/GenerateMappings.cmake" mappings.h.in mappings.h
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ COMMENT "Updating gamepad mappings from upstream repository"
+ SOURCES mappings.h.in "${GLFW_SOURCE_DIR}/CMake/GenerateMappings.cmake"
+ VERBATIM)
+
+set_target_properties(update_mappings PROPERTIES FOLDER "GLFW3")
+
+if (GLFW_BUILD_COCOA)
+ target_compile_definitions(glfw PRIVATE _GLFW_COCOA)
+ target_sources(glfw PRIVATE cocoa_platform.h cocoa_joystick.h cocoa_init.m
+ cocoa_joystick.m cocoa_monitor.m cocoa_window.m
+ nsgl_context.m)
+endif()
+
+if (GLFW_BUILD_WIN32)
+ target_compile_definitions(glfw PRIVATE _GLFW_WIN32)
+ target_sources(glfw PRIVATE win32_platform.h win32_joystick.h win32_init.c
+ win32_joystick.c win32_monitor.c win32_window.c
+ wgl_context.c)
+endif()
+
+if (GLFW_BUILD_X11)
+ target_compile_definitions(glfw PRIVATE _GLFW_X11)
+ target_sources(glfw PRIVATE x11_platform.h xkb_unicode.h x11_init.c
+ x11_monitor.c x11_window.c xkb_unicode.c
+ glx_context.c)
+endif()
+
+if (GLFW_BUILD_WAYLAND)
+ target_compile_definitions(glfw PRIVATE _GLFW_WAYLAND)
+ target_sources(glfw PRIVATE wl_platform.h xkb_unicode.h wl_init.c
+ wl_monitor.c wl_window.c xkb_unicode.c)
+endif()
+
+if (GLFW_BUILD_X11 OR GLFW_BUILD_WAYLAND)
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ target_sources(glfw PRIVATE linux_joystick.h linux_joystick.c)
+ endif()
+ target_sources(glfw PRIVATE posix_poll.h posix_poll.c)
+endif()
+
+if (GLFW_BUILD_WAYLAND)
+ include(CheckIncludeFiles)
+ include(CheckFunctionExists)
+ check_function_exists(memfd_create HAVE_MEMFD_CREATE)
+ if (HAVE_MEMFD_CREATE)
+ target_compile_definitions(glfw PRIVATE HAVE_MEMFD_CREATE)
+ endif()
+
+ find_program(WAYLAND_SCANNER_EXECUTABLE NAMES wayland-scanner)
+
+ include(FindPkgConfig)
+ pkg_check_modules(WAYLAND_PROTOCOLS REQUIRED wayland-protocols>=1.15)
+ pkg_get_variable(WAYLAND_PROTOCOLS_BASE wayland-protocols pkgdatadir)
+ pkg_get_variable(WAYLAND_CLIENT_PKGDATADIR wayland-client pkgdatadir)
+
+ macro(wayland_generate protocol_file output_file)
+ add_custom_command(OUTPUT "${output_file}.h"
+ COMMAND "${WAYLAND_SCANNER_EXECUTABLE}" client-header "${protocol_file}" "${output_file}.h"
+ DEPENDS "${protocol_file}"
+ VERBATIM)
+
+ add_custom_command(OUTPUT "${output_file}-code.h"
+ COMMAND "${WAYLAND_SCANNER_EXECUTABLE}" private-code "${protocol_file}" "${output_file}-code.h"
+ DEPENDS "${protocol_file}"
+ VERBATIM)
+
+ target_sources(glfw PRIVATE "${output_file}.h" "${output_file}-code.h")
+ endmacro()
+
+ wayland_generate(
+ "${WAYLAND_CLIENT_PKGDATADIR}/wayland.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/stable/xdg-shell/xdg-shell.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-xdg-shell-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/unstable/xdg-decoration/xdg-decoration-unstable-v1.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-xdg-decoration-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/stable/viewporter/viewporter.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-viewporter-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/unstable/relative-pointer/relative-pointer-unstable-v1.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-relative-pointer-unstable-v1-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/unstable/pointer-constraints/pointer-constraints-unstable-v1.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-pointer-constraints-unstable-v1-client-protocol")
+ wayland_generate(
+ "${WAYLAND_PROTOCOLS_BASE}/unstable/idle-inhibit/idle-inhibit-unstable-v1.xml"
+ "${GLFW_BINARY_DIR}/src/wayland-idle-inhibit-unstable-v1-client-protocol")
+endif()
+
+if (WIN32 AND GLFW_BUILD_SHARED_LIBRARY)
+ configure_file(glfw.rc.in glfw.rc @ONLY)
+ target_sources(glfw PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/glfw.rc")
+endif()
+
+if (UNIX AND GLFW_BUILD_SHARED_LIBRARY)
+ # On Unix-like systems, shared libraries can use the soname system.
+ set(GLFW_LIB_NAME glfw)
+else()
+ set(GLFW_LIB_NAME glfw3)
+endif()
+
+set_target_properties(glfw PROPERTIES
+ OUTPUT_NAME ${GLFW_LIB_NAME}
+ VERSION ${GLFW_VERSION_MAJOR}.${GLFW_VERSION_MINOR}
+ SOVERSION ${GLFW_VERSION_MAJOR}
+ POSITION_INDEPENDENT_CODE ON
+ C_STANDARD 99
+ C_EXTENSIONS OFF
+ DEFINE_SYMBOL _GLFW_BUILD_DLL
+ FOLDER "GLFW3")
+
+target_include_directories(glfw PUBLIC
+ "$<BUILD_INTERFACE:${GLFW_SOURCE_DIR}/include>"
+ "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>")
+target_include_directories(glfw PRIVATE
+ "${GLFW_SOURCE_DIR}/src"
+ "${GLFW_BINARY_DIR}/src")
+target_link_libraries(glfw PRIVATE Threads::Threads)
+
+# Workaround for CMake not knowing about .m files before version 3.16
+if (CMAKE_VERSION VERSION_LESS "3.16" AND APPLE)
+ set_source_files_properties(cocoa_init.m cocoa_joystick.m cocoa_monitor.m
+ cocoa_window.m nsgl_context.m PROPERTIES
+ LANGUAGE C)
+endif()
+
+if (GLFW_BUILD_WIN32)
+ list(APPEND glfw_PKG_LIBS "-lgdi32")
+endif()
+
+if (GLFW_BUILD_COCOA)
+ target_link_libraries(glfw PRIVATE "-framework Cocoa"
+ "-framework IOKit"
+ "-framework CoreFoundation")
+
+ set(glfw_PKG_DEPS "")
+ set(glfw_PKG_LIBS "-framework Cocoa -framework IOKit -framework CoreFoundation")
+endif()
+
+if (GLFW_BUILD_WAYLAND)
+ pkg_check_modules(Wayland REQUIRED
+ wayland-client>=0.2.7
+ wayland-cursor>=0.2.7
+ wayland-egl>=0.2.7
+ xkbcommon>=0.5.0)
+
+ target_include_directories(glfw PRIVATE ${Wayland_INCLUDE_DIRS})
+
+ if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ find_package(EpollShim)
+ if (EPOLLSHIM_FOUND)
+ target_include_directories(glfw PRIVATE ${EPOLLSHIM_INCLUDE_DIRS})
+ target_link_libraries(glfw PRIVATE ${EPOLLSHIM_LIBRARIES})
+ endif()
+ endif()
+endif()
+
+if (GLFW_BUILD_X11)
+ find_package(X11 REQUIRED)
+ target_include_directories(glfw PRIVATE "${X11_X11_INCLUDE_PATH}")
+
+ # Check for XRandR (modern resolution switching and gamma control)
+ if (NOT X11_Xrandr_INCLUDE_PATH)
+ message(FATAL_ERROR "RandR headers not found; install libxrandr development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xrandr_INCLUDE_PATH}")
+
+ # Check for Xinerama (legacy multi-monitor support)
+ if (NOT X11_Xinerama_INCLUDE_PATH)
+ message(FATAL_ERROR "Xinerama headers not found; install libxinerama development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xinerama_INCLUDE_PATH}")
+
+ # Check for Xkb (X keyboard extension)
+ if (NOT X11_Xkb_INCLUDE_PATH)
+ message(FATAL_ERROR "XKB headers not found; install X11 development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xkb_INCLUDE_PATH}")
+
+ # Check for Xcursor (cursor creation from RGBA images)
+ if (NOT X11_Xcursor_INCLUDE_PATH)
+ message(FATAL_ERROR "Xcursor headers not found; install libxcursor development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xcursor_INCLUDE_PATH}")
+
+ # Check for XInput (modern HID input)
+ if (NOT X11_Xi_INCLUDE_PATH)
+ message(FATAL_ERROR "XInput headers not found; install libxi development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xi_INCLUDE_PATH}")
+
+ # Check for X Shape (custom window input shape)
+ if (NOT X11_Xshape_INCLUDE_PATH)
+ message(FATAL_ERROR "X Shape headers not found; install libxext development package")
+ endif()
+ target_include_directories(glfw PRIVATE "${X11_Xshape_INCLUDE_PATH}")
+endif()
+
+if (UNIX AND NOT APPLE)
+ find_library(RT_LIBRARY rt)
+ mark_as_advanced(RT_LIBRARY)
+ if (RT_LIBRARY)
+ target_link_libraries(glfw PRIVATE "${RT_LIBRARY}")
+ list(APPEND glfw_PKG_LIBS "-lrt")
+ endif()
+
+ find_library(MATH_LIBRARY m)
+ mark_as_advanced(MATH_LIBRARY)
+ if (MATH_LIBRARY)
+ target_link_libraries(glfw PRIVATE "${MATH_LIBRARY}")
+ list(APPEND glfw_PKG_LIBS "-lm")
+ endif()
+
+ if (CMAKE_DL_LIBS)
+ target_link_libraries(glfw PRIVATE "${CMAKE_DL_LIBS}")
+ list(APPEND glfw_PKG_LIBS "-l${CMAKE_DL_LIBS}")
+ endif()
+endif()
+
+# Make GCC warn about declarations that VS 2010 and 2012 won't accept for all
+# source files that VS will build (Clang ignores this because we set -std=c99)
+if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ set_source_files_properties(context.c init.c input.c monitor.c platform.c vulkan.c
+ window.c null_init.c null_joystick.c null_monitor.c
+ null_window.c win32_init.c win32_joystick.c win32_module.c
+ win32_monitor.c win32_time.c win32_thread.c win32_window.c
+ wgl_context.c egl_context.c osmesa_context.c PROPERTIES
+ COMPILE_FLAGS -Wdeclaration-after-statement)
+endif()
+
+if (WIN32)
+ if (GLFW_USE_HYBRID_HPG)
+ target_compile_definitions(glfw PRIVATE _GLFW_USE_HYBRID_HPG)
+ endif()
+endif()
+
+# Enable a reasonable set of warnings
+# NOTE: The order matters here, Clang-CL matches both MSVC and Clang
+if (MSVC)
+ target_compile_options(glfw PRIVATE "/W3")
+elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR
+ CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
+ CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
+
+ target_compile_options(glfw PRIVATE "-Wall")
+endif()
+
+if (GLFW_BUILD_WIN32)
+ target_compile_definitions(glfw PRIVATE UNICODE _UNICODE)
+endif()
+
+# HACK: When building on MinGW, WINVER and UNICODE need to be defined before
+# the inclusion of stddef.h (by glfw3.h), which is itself included before
+# win32_platform.h. We define them here until a saner solution can be found
+# NOTE: MinGW-w64 and Visual C++ do /not/ need this hack.
+if (MINGW)
+ target_compile_definitions(glfw PRIVATE WINVER=0x0501)
+endif()
+
+# Workaround for legacy MinGW not providing XInput and DirectInput
+if (MINGW)
+ include(CheckIncludeFile)
+ check_include_file(dinput.h DINPUT_H_FOUND)
+ check_include_file(xinput.h XINPUT_H_FOUND)
+ if (NOT DINPUT_H_FOUND OR NOT XINPUT_H_FOUND)
+ target_include_directories(glfw PRIVATE "${GLFW_SOURCE_DIR}/deps/mingw")
+ endif()
+endif()
+
+# Workaround for the MS CRT deprecating parts of the standard library
+if (MSVC OR CMAKE_C_SIMULATE_ID STREQUAL "MSVC")
+ target_compile_definitions(glfw PRIVATE _CRT_SECURE_NO_WARNINGS)
+endif()
+
+# Workaround for VS 2008 not shipping with stdint.h
+if (MSVC90)
+ target_include_directories(glfw PUBLIC "${GLFW_SOURCE_DIR}/deps/vs2008")
+endif()
+
+# Check for the DirectX 9 SDK as it is not included with VS 2008
+if (MSVC90)
+ include(CheckIncludeFile)
+ check_include_file(dinput.h DINPUT_H_FOUND)
+ if (NOT DINPUT_H_FOUND)
+ message(FATAL_ERROR "DirectX 9 headers not found; install DirectX 9 SDK")
+ endif()
+endif()
+
+# Workaround for -std=c99 on Linux disabling _DEFAULT_SOURCE (POSIX 2008 and more)
+if (GLFW_BUILD_X11 OR GLFW_BUILD_WAYLAND)
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ target_compile_definitions(glfw PRIVATE _DEFAULT_SOURCE)
+ endif()
+endif()
+
+if (GLFW_BUILD_SHARED_LIBRARY)
+ if (WIN32)
+ if (MINGW)
+ # Remove the dependency on the shared version of libgcc
+ # NOTE: MinGW-w64 has the correct default but MinGW needs this
+ target_link_libraries(glfw PRIVATE "-static-libgcc")
+
+ # Remove the lib prefix on the DLL (but not the import library)
+ set_target_properties(glfw PROPERTIES PREFIX "")
+
+ # Add a suffix to the import library to avoid naming conflicts
+ set_target_properties(glfw PROPERTIES IMPORT_SUFFIX "dll.a")
+ else()
+ # Add a suffix to the import library to avoid naming conflicts
+ set_target_properties(glfw PROPERTIES IMPORT_SUFFIX "dll.lib")
+ endif()
+
+ target_compile_definitions(glfw INTERFACE GLFW_DLL)
+ endif()
+
+ if (MINGW)
+ # Enable link-time exploit mitigation features enabled by default on MSVC
+ include(CheckCCompilerFlag)
+
+ # Compatibility with data execution prevention (DEP)
+ set(CMAKE_REQUIRED_FLAGS "-Wl,--nxcompat")
+ check_c_compiler_flag("" _GLFW_HAS_DEP)
+ if (_GLFW_HAS_DEP)
+ target_link_libraries(glfw PRIVATE "-Wl,--nxcompat")
+ endif()
+
+ # Compatibility with address space layout randomization (ASLR)
+ set(CMAKE_REQUIRED_FLAGS "-Wl,--dynamicbase")
+ check_c_compiler_flag("" _GLFW_HAS_ASLR)
+ if (_GLFW_HAS_ASLR)
+ target_link_libraries(glfw PRIVATE "-Wl,--dynamicbase")
+ endif()
+
+ # Compatibility with 64-bit address space layout randomization (ASLR)
+ set(CMAKE_REQUIRED_FLAGS "-Wl,--high-entropy-va")
+ check_c_compiler_flag("" _GLFW_HAS_64ASLR)
+ if (_GLFW_HAS_64ASLR)
+ target_link_libraries(glfw PRIVATE "-Wl,--high-entropy-va")
+ endif()
+
+ # Clear flags again to avoid breaking later tests
+ set(CMAKE_REQUIRED_FLAGS)
+ endif()
+
+ if (UNIX)
+ # Hide symbols not explicitly tagged for export from the shared library
+ target_compile_options(glfw PRIVATE "-fvisibility=hidden")
+ endif()
+endif()
+
+foreach(arg ${glfw_PKG_DEPS})
+ string(APPEND deps " ${arg}")
+endforeach()
+foreach(arg ${glfw_PKG_LIBS})
+ string(APPEND libs " ${arg}")
+endforeach()
+
+set(GLFW_PKG_CONFIG_REQUIRES_PRIVATE "${deps}" CACHE INTERNAL
+ "GLFW pkg-config Requires.private")
+set(GLFW_PKG_CONFIG_LIBS_PRIVATE "${libs}" CACHE INTERNAL
+ "GLFW pkg-config Libs.private")
+
+configure_file("${GLFW_SOURCE_DIR}/CMake/glfw3.pc.in" glfw3.pc @ONLY)
+
+if (GLFW_INSTALL)
+ install(TARGETS glfw
+ EXPORT glfwTargets
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+ ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}")
+endif()
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_init.m b/chromium/third_party/dawn/third_party/glfw/src/cocoa_init.m
new file mode 100644
index 00000000000..6bc6496a801
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_init.m
@@ -0,0 +1,684 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+#include <sys/param.h> // For MAXPATHLEN
+
+// Needed for _NSGetProgname
+#include <crt_externs.h>
+
+// Change to our application bundle's resources directory, if present
+//
+static void changeToResourcesDirectory(void)
+{
+ char resourcesPath[MAXPATHLEN];
+
+ CFBundleRef bundle = CFBundleGetMainBundle();
+ if (!bundle)
+ return;
+
+ CFURLRef resourcesURL = CFBundleCopyResourcesDirectoryURL(bundle);
+
+ CFStringRef last = CFURLCopyLastPathComponent(resourcesURL);
+ if (CFStringCompare(CFSTR("Resources"), last, 0) != kCFCompareEqualTo)
+ {
+ CFRelease(last);
+ CFRelease(resourcesURL);
+ return;
+ }
+
+ CFRelease(last);
+
+ if (!CFURLGetFileSystemRepresentation(resourcesURL,
+ true,
+ (UInt8*) resourcesPath,
+ MAXPATHLEN))
+ {
+ CFRelease(resourcesURL);
+ return;
+ }
+
+ CFRelease(resourcesURL);
+
+ chdir(resourcesPath);
+}
+
+// Set up the menu bar (manually)
+// This is nasty, nasty stuff -- calls to undocumented semi-private APIs that
+// could go away at any moment, lots of stuff that really should be
+// localize(d|able), etc. Add a nib to save us this horror.
+//
+static void createMenuBar(void)
+{
+ NSString* appName = nil;
+ NSDictionary* bundleInfo = [[NSBundle mainBundle] infoDictionary];
+ NSString* nameKeys[] =
+ {
+ @"CFBundleDisplayName",
+ @"CFBundleName",
+ @"CFBundleExecutable",
+ };
+
+ // Try to figure out what the calling application is called
+
+ for (size_t i = 0; i < sizeof(nameKeys) / sizeof(nameKeys[0]); i++)
+ {
+ id name = bundleInfo[nameKeys[i]];
+ if (name &&
+ [name isKindOfClass:[NSString class]] &&
+ ![name isEqualToString:@""])
+ {
+ appName = name;
+ break;
+ }
+ }
+
+ if (!appName)
+ {
+ char** progname = _NSGetProgname();
+ if (progname && *progname)
+ appName = @(*progname);
+ else
+ appName = @"GLFW Application";
+ }
+
+ NSMenu* bar = [[NSMenu alloc] init];
+ [NSApp setMainMenu:bar];
+
+ NSMenuItem* appMenuItem =
+ [bar addItemWithTitle:@"" action:NULL keyEquivalent:@""];
+ NSMenu* appMenu = [[NSMenu alloc] init];
+ [appMenuItem setSubmenu:appMenu];
+
+ [appMenu addItemWithTitle:[NSString stringWithFormat:@"About %@", appName]
+ action:@selector(orderFrontStandardAboutPanel:)
+ keyEquivalent:@""];
+ [appMenu addItem:[NSMenuItem separatorItem]];
+ NSMenu* servicesMenu = [[NSMenu alloc] init];
+ [NSApp setServicesMenu:servicesMenu];
+ [[appMenu addItemWithTitle:@"Services"
+ action:NULL
+ keyEquivalent:@""] setSubmenu:servicesMenu];
+ [servicesMenu release];
+ [appMenu addItem:[NSMenuItem separatorItem]];
+ [appMenu addItemWithTitle:[NSString stringWithFormat:@"Hide %@", appName]
+ action:@selector(hide:)
+ keyEquivalent:@"h"];
+ [[appMenu addItemWithTitle:@"Hide Others"
+ action:@selector(hideOtherApplications:)
+ keyEquivalent:@"h"]
+ setKeyEquivalentModifierMask:NSEventModifierFlagOption | NSEventModifierFlagCommand];
+ [appMenu addItemWithTitle:@"Show All"
+ action:@selector(unhideAllApplications:)
+ keyEquivalent:@""];
+ [appMenu addItem:[NSMenuItem separatorItem]];
+ [appMenu addItemWithTitle:[NSString stringWithFormat:@"Quit %@", appName]
+ action:@selector(terminate:)
+ keyEquivalent:@"q"];
+
+ NSMenuItem* windowMenuItem =
+ [bar addItemWithTitle:@"" action:NULL keyEquivalent:@""];
+ [bar release];
+ NSMenu* windowMenu = [[NSMenu alloc] initWithTitle:@"Window"];
+ [NSApp setWindowsMenu:windowMenu];
+ [windowMenuItem setSubmenu:windowMenu];
+
+ [windowMenu addItemWithTitle:@"Minimize"
+ action:@selector(performMiniaturize:)
+ keyEquivalent:@"m"];
+ [windowMenu addItemWithTitle:@"Zoom"
+ action:@selector(performZoom:)
+ keyEquivalent:@""];
+ [windowMenu addItem:[NSMenuItem separatorItem]];
+ [windowMenu addItemWithTitle:@"Bring All to Front"
+ action:@selector(arrangeInFront:)
+ keyEquivalent:@""];
+
+ // TODO: Make this appear at the bottom of the menu (for consistency)
+ [windowMenu addItem:[NSMenuItem separatorItem]];
+ [[windowMenu addItemWithTitle:@"Enter Full Screen"
+ action:@selector(toggleFullScreen:)
+ keyEquivalent:@"f"]
+ setKeyEquivalentModifierMask:NSEventModifierFlagControl | NSEventModifierFlagCommand];
+
+ // Prior to Snow Leopard, we need to use this oddly-named semi-private API
+ // to get the application menu working properly.
+ SEL setAppleMenuSelector = NSSelectorFromString(@"setAppleMenu:");
+ [NSApp performSelector:setAppleMenuSelector withObject:appMenu];
+}
+
+// Create key code translation tables
+//
+static void createKeyTables(void)
+{
+ memset(_glfw.ns.keycodes, -1, sizeof(_glfw.ns.keycodes));
+ memset(_glfw.ns.scancodes, -1, sizeof(_glfw.ns.scancodes));
+
+ _glfw.ns.keycodes[0x1D] = GLFW_KEY_0;
+ _glfw.ns.keycodes[0x12] = GLFW_KEY_1;
+ _glfw.ns.keycodes[0x13] = GLFW_KEY_2;
+ _glfw.ns.keycodes[0x14] = GLFW_KEY_3;
+ _glfw.ns.keycodes[0x15] = GLFW_KEY_4;
+ _glfw.ns.keycodes[0x17] = GLFW_KEY_5;
+ _glfw.ns.keycodes[0x16] = GLFW_KEY_6;
+ _glfw.ns.keycodes[0x1A] = GLFW_KEY_7;
+ _glfw.ns.keycodes[0x1C] = GLFW_KEY_8;
+ _glfw.ns.keycodes[0x19] = GLFW_KEY_9;
+ _glfw.ns.keycodes[0x00] = GLFW_KEY_A;
+ _glfw.ns.keycodes[0x0B] = GLFW_KEY_B;
+ _glfw.ns.keycodes[0x08] = GLFW_KEY_C;
+ _glfw.ns.keycodes[0x02] = GLFW_KEY_D;
+ _glfw.ns.keycodes[0x0E] = GLFW_KEY_E;
+ _glfw.ns.keycodes[0x03] = GLFW_KEY_F;
+ _glfw.ns.keycodes[0x05] = GLFW_KEY_G;
+ _glfw.ns.keycodes[0x04] = GLFW_KEY_H;
+ _glfw.ns.keycodes[0x22] = GLFW_KEY_I;
+ _glfw.ns.keycodes[0x26] = GLFW_KEY_J;
+ _glfw.ns.keycodes[0x28] = GLFW_KEY_K;
+ _glfw.ns.keycodes[0x25] = GLFW_KEY_L;
+ _glfw.ns.keycodes[0x2E] = GLFW_KEY_M;
+ _glfw.ns.keycodes[0x2D] = GLFW_KEY_N;
+ _glfw.ns.keycodes[0x1F] = GLFW_KEY_O;
+ _glfw.ns.keycodes[0x23] = GLFW_KEY_P;
+ _glfw.ns.keycodes[0x0C] = GLFW_KEY_Q;
+ _glfw.ns.keycodes[0x0F] = GLFW_KEY_R;
+ _glfw.ns.keycodes[0x01] = GLFW_KEY_S;
+ _glfw.ns.keycodes[0x11] = GLFW_KEY_T;
+ _glfw.ns.keycodes[0x20] = GLFW_KEY_U;
+ _glfw.ns.keycodes[0x09] = GLFW_KEY_V;
+ _glfw.ns.keycodes[0x0D] = GLFW_KEY_W;
+ _glfw.ns.keycodes[0x07] = GLFW_KEY_X;
+ _glfw.ns.keycodes[0x10] = GLFW_KEY_Y;
+ _glfw.ns.keycodes[0x06] = GLFW_KEY_Z;
+
+ _glfw.ns.keycodes[0x27] = GLFW_KEY_APOSTROPHE;
+ _glfw.ns.keycodes[0x2A] = GLFW_KEY_BACKSLASH;
+ _glfw.ns.keycodes[0x2B] = GLFW_KEY_COMMA;
+ _glfw.ns.keycodes[0x18] = GLFW_KEY_EQUAL;
+ _glfw.ns.keycodes[0x32] = GLFW_KEY_GRAVE_ACCENT;
+ _glfw.ns.keycodes[0x21] = GLFW_KEY_LEFT_BRACKET;
+ _glfw.ns.keycodes[0x1B] = GLFW_KEY_MINUS;
+ _glfw.ns.keycodes[0x2F] = GLFW_KEY_PERIOD;
+ _glfw.ns.keycodes[0x1E] = GLFW_KEY_RIGHT_BRACKET;
+ _glfw.ns.keycodes[0x29] = GLFW_KEY_SEMICOLON;
+ _glfw.ns.keycodes[0x2C] = GLFW_KEY_SLASH;
+ _glfw.ns.keycodes[0x0A] = GLFW_KEY_WORLD_1;
+
+ _glfw.ns.keycodes[0x33] = GLFW_KEY_BACKSPACE;
+ _glfw.ns.keycodes[0x39] = GLFW_KEY_CAPS_LOCK;
+ _glfw.ns.keycodes[0x75] = GLFW_KEY_DELETE;
+ _glfw.ns.keycodes[0x7D] = GLFW_KEY_DOWN;
+ _glfw.ns.keycodes[0x77] = GLFW_KEY_END;
+ _glfw.ns.keycodes[0x24] = GLFW_KEY_ENTER;
+ _glfw.ns.keycodes[0x35] = GLFW_KEY_ESCAPE;
+ _glfw.ns.keycodes[0x7A] = GLFW_KEY_F1;
+ _glfw.ns.keycodes[0x78] = GLFW_KEY_F2;
+ _glfw.ns.keycodes[0x63] = GLFW_KEY_F3;
+ _glfw.ns.keycodes[0x76] = GLFW_KEY_F4;
+ _glfw.ns.keycodes[0x60] = GLFW_KEY_F5;
+ _glfw.ns.keycodes[0x61] = GLFW_KEY_F6;
+ _glfw.ns.keycodes[0x62] = GLFW_KEY_F7;
+ _glfw.ns.keycodes[0x64] = GLFW_KEY_F8;
+ _glfw.ns.keycodes[0x65] = GLFW_KEY_F9;
+ _glfw.ns.keycodes[0x6D] = GLFW_KEY_F10;
+ _glfw.ns.keycodes[0x67] = GLFW_KEY_F11;
+ _glfw.ns.keycodes[0x6F] = GLFW_KEY_F12;
+ _glfw.ns.keycodes[0x69] = GLFW_KEY_PRINT_SCREEN;
+ _glfw.ns.keycodes[0x6B] = GLFW_KEY_F14;
+ _glfw.ns.keycodes[0x71] = GLFW_KEY_F15;
+ _glfw.ns.keycodes[0x6A] = GLFW_KEY_F16;
+ _glfw.ns.keycodes[0x40] = GLFW_KEY_F17;
+ _glfw.ns.keycodes[0x4F] = GLFW_KEY_F18;
+ _glfw.ns.keycodes[0x50] = GLFW_KEY_F19;
+ _glfw.ns.keycodes[0x5A] = GLFW_KEY_F20;
+ _glfw.ns.keycodes[0x73] = GLFW_KEY_HOME;
+ _glfw.ns.keycodes[0x72] = GLFW_KEY_INSERT;
+ _glfw.ns.keycodes[0x7B] = GLFW_KEY_LEFT;
+ _glfw.ns.keycodes[0x3A] = GLFW_KEY_LEFT_ALT;
+ _glfw.ns.keycodes[0x3B] = GLFW_KEY_LEFT_CONTROL;
+ _glfw.ns.keycodes[0x38] = GLFW_KEY_LEFT_SHIFT;
+ _glfw.ns.keycodes[0x37] = GLFW_KEY_LEFT_SUPER;
+ _glfw.ns.keycodes[0x6E] = GLFW_KEY_MENU;
+ _glfw.ns.keycodes[0x47] = GLFW_KEY_NUM_LOCK;
+ _glfw.ns.keycodes[0x79] = GLFW_KEY_PAGE_DOWN;
+ _glfw.ns.keycodes[0x74] = GLFW_KEY_PAGE_UP;
+ _glfw.ns.keycodes[0x7C] = GLFW_KEY_RIGHT;
+ _glfw.ns.keycodes[0x3D] = GLFW_KEY_RIGHT_ALT;
+ _glfw.ns.keycodes[0x3E] = GLFW_KEY_RIGHT_CONTROL;
+ _glfw.ns.keycodes[0x3C] = GLFW_KEY_RIGHT_SHIFT;
+ _glfw.ns.keycodes[0x36] = GLFW_KEY_RIGHT_SUPER;
+ _glfw.ns.keycodes[0x31] = GLFW_KEY_SPACE;
+ _glfw.ns.keycodes[0x30] = GLFW_KEY_TAB;
+ _glfw.ns.keycodes[0x7E] = GLFW_KEY_UP;
+
+ _glfw.ns.keycodes[0x52] = GLFW_KEY_KP_0;
+ _glfw.ns.keycodes[0x53] = GLFW_KEY_KP_1;
+ _glfw.ns.keycodes[0x54] = GLFW_KEY_KP_2;
+ _glfw.ns.keycodes[0x55] = GLFW_KEY_KP_3;
+ _glfw.ns.keycodes[0x56] = GLFW_KEY_KP_4;
+ _glfw.ns.keycodes[0x57] = GLFW_KEY_KP_5;
+ _glfw.ns.keycodes[0x58] = GLFW_KEY_KP_6;
+ _glfw.ns.keycodes[0x59] = GLFW_KEY_KP_7;
+ _glfw.ns.keycodes[0x5B] = GLFW_KEY_KP_8;
+ _glfw.ns.keycodes[0x5C] = GLFW_KEY_KP_9;
+ _glfw.ns.keycodes[0x45] = GLFW_KEY_KP_ADD;
+ _glfw.ns.keycodes[0x41] = GLFW_KEY_KP_DECIMAL;
+ _glfw.ns.keycodes[0x4B] = GLFW_KEY_KP_DIVIDE;
+ _glfw.ns.keycodes[0x4C] = GLFW_KEY_KP_ENTER;
+ _glfw.ns.keycodes[0x51] = GLFW_KEY_KP_EQUAL;
+ _glfw.ns.keycodes[0x43] = GLFW_KEY_KP_MULTIPLY;
+ _glfw.ns.keycodes[0x4E] = GLFW_KEY_KP_SUBTRACT;
+
+ for (int scancode = 0; scancode < 256; scancode++)
+ {
+ // Store the reverse translation for faster key name lookup
+ if (_glfw.ns.keycodes[scancode] >= 0)
+ _glfw.ns.scancodes[_glfw.ns.keycodes[scancode]] = scancode;
+ }
+}
+
+// Retrieve Unicode data for the current keyboard layout
+//
+static GLFWbool updateUnicodeData(void)
+{
+ if (_glfw.ns.inputSource)
+ {
+ CFRelease(_glfw.ns.inputSource);
+ _glfw.ns.inputSource = NULL;
+ _glfw.ns.unicodeData = nil;
+ }
+
+ _glfw.ns.inputSource = TISCopyCurrentKeyboardLayoutInputSource();
+ if (!_glfw.ns.inputSource)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to retrieve keyboard layout input source");
+ return GLFW_FALSE;
+ }
+
+ _glfw.ns.unicodeData =
+ TISGetInputSourceProperty(_glfw.ns.inputSource,
+ kTISPropertyUnicodeKeyLayoutData);
+ if (!_glfw.ns.unicodeData)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to retrieve keyboard layout Unicode data");
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+// Load HIToolbox.framework and the TIS symbols we need from it
+//
+static GLFWbool initializeTIS(void)
+{
+ // This works only because Cocoa has already loaded it properly
+ _glfw.ns.tis.bundle =
+ CFBundleGetBundleWithIdentifier(CFSTR("com.apple.HIToolbox"));
+ if (!_glfw.ns.tis.bundle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to load HIToolbox.framework");
+ return GLFW_FALSE;
+ }
+
+ CFStringRef* kPropertyUnicodeKeyLayoutData =
+ CFBundleGetDataPointerForName(_glfw.ns.tis.bundle,
+ CFSTR("kTISPropertyUnicodeKeyLayoutData"));
+ _glfw.ns.tis.CopyCurrentKeyboardLayoutInputSource =
+ CFBundleGetFunctionPointerForName(_glfw.ns.tis.bundle,
+ CFSTR("TISCopyCurrentKeyboardLayoutInputSource"));
+ _glfw.ns.tis.GetInputSourceProperty =
+ CFBundleGetFunctionPointerForName(_glfw.ns.tis.bundle,
+ CFSTR("TISGetInputSourceProperty"));
+ _glfw.ns.tis.GetKbdType =
+ CFBundleGetFunctionPointerForName(_glfw.ns.tis.bundle,
+ CFSTR("LMGetKbdType"));
+
+ if (!kPropertyUnicodeKeyLayoutData ||
+ !TISCopyCurrentKeyboardLayoutInputSource ||
+ !TISGetInputSourceProperty ||
+ !LMGetKbdType)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to load TIS API symbols");
+ return GLFW_FALSE;
+ }
+
+ _glfw.ns.tis.kPropertyUnicodeKeyLayoutData =
+ *kPropertyUnicodeKeyLayoutData;
+
+ return updateUnicodeData();
+}
+
+@interface GLFWHelper : NSObject
+@end
+
+@implementation GLFWHelper
+
+- (void)selectedKeyboardInputSourceChanged:(NSObject* )object
+{
+ updateUnicodeData();
+}
+
+- (void)doNothing:(id)object
+{
+}
+
+@end // GLFWHelper
+
+@interface GLFWApplicationDelegate : NSObject <NSApplicationDelegate>
+@end
+
+@implementation GLFWApplicationDelegate
+
+- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender
+{
+ for (_GLFWwindow* window = _glfw.windowListHead; window; window = window->next)
+ _glfwInputWindowCloseRequest(window);
+
+ return NSTerminateCancel;
+}
+
+- (void)applicationDidChangeScreenParameters:(NSNotification *) notification
+{
+ for (_GLFWwindow* window = _glfw.windowListHead; window; window = window->next)
+ {
+ if (window->context.client != GLFW_NO_API)
+ [window->context.nsgl.object update];
+ }
+
+ _glfwPollMonitorsCocoa();
+}
+
+- (void)applicationWillFinishLaunching:(NSNotification *)notification
+{
+ if (_glfw.hints.init.ns.menubar)
+ {
+ // Menu bar setup must go between sharedApplication and finishLaunching
+ // in order to properly emulate the behavior of NSApplicationMain
+
+ if ([[NSBundle mainBundle] pathForResource:@"MainMenu" ofType:@"nib"])
+ {
+ [[NSBundle mainBundle] loadNibNamed:@"MainMenu"
+ owner:NSApp
+ topLevelObjects:&_glfw.ns.nibObjects];
+ }
+ else
+ createMenuBar();
+ }
+}
+
+- (void)applicationDidFinishLaunching:(NSNotification *)notification
+{
+ _glfwPostEmptyEventCocoa();
+ [NSApp stop:nil];
+}
+
+- (void)applicationDidHide:(NSNotification *)notification
+{
+ for (int i = 0; i < _glfw.monitorCount; i++)
+ _glfwRestoreVideoModeCocoa(_glfw.monitors[i]);
+}
+
+@end // GLFWApplicationDelegate
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+void* _glfwLoadLocalVulkanLoaderCocoa(void)
+{
+ CFBundleRef bundle = CFBundleGetMainBundle();
+ if (!bundle)
+ return NULL;
+
+ CFURLRef url =
+ CFBundleCopyAuxiliaryExecutableURL(bundle, CFSTR("libvulkan.1.dylib"));
+ if (!url)
+ return NULL;
+
+ char path[PATH_MAX];
+ void* handle = NULL;
+
+ if (CFURLGetFileSystemRepresentation(url, true, (UInt8*) path, sizeof(path) - 1))
+ handle = _glfwPlatformLoadModule(path);
+
+ CFRelease(url);
+ return handle;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwConnectCocoa(int platformID, _GLFWplatform* platform)
+{
+ const _GLFWplatform cocoa =
+ {
+ GLFW_PLATFORM_COCOA,
+ _glfwInitCocoa,
+ _glfwTerminateCocoa,
+ _glfwGetCursorPosCocoa,
+ _glfwSetCursorPosCocoa,
+ _glfwSetCursorModeCocoa,
+ _glfwSetRawMouseMotionCocoa,
+ _glfwRawMouseMotionSupportedCocoa,
+ _glfwCreateCursorCocoa,
+ _glfwCreateStandardCursorCocoa,
+ _glfwDestroyCursorCocoa,
+ _glfwSetCursorCocoa,
+ _glfwGetScancodeNameCocoa,
+ _glfwGetKeyScancodeCocoa,
+ _glfwSetClipboardStringCocoa,
+ _glfwGetClipboardStringCocoa,
+ _glfwInitJoysticksCocoa,
+ _glfwTerminateJoysticksCocoa,
+ _glfwPollJoystickCocoa,
+ _glfwGetMappingNameCocoa,
+ _glfwUpdateGamepadGUIDCocoa,
+ _glfwFreeMonitorCocoa,
+ _glfwGetMonitorPosCocoa,
+ _glfwGetMonitorContentScaleCocoa,
+ _glfwGetMonitorWorkareaCocoa,
+ _glfwGetVideoModesCocoa,
+ _glfwGetVideoModeCocoa,
+ _glfwGetGammaRampCocoa,
+ _glfwSetGammaRampCocoa,
+ _glfwCreateWindowCocoa,
+ _glfwDestroyWindowCocoa,
+ _glfwSetWindowTitleCocoa,
+ _glfwSetWindowIconCocoa,
+ _glfwGetWindowPosCocoa,
+ _glfwSetWindowPosCocoa,
+ _glfwGetWindowSizeCocoa,
+ _glfwSetWindowSizeCocoa,
+ _glfwSetWindowSizeLimitsCocoa,
+ _glfwSetWindowAspectRatioCocoa,
+ _glfwGetFramebufferSizeCocoa,
+ _glfwGetWindowFrameSizeCocoa,
+ _glfwGetWindowContentScaleCocoa,
+ _glfwIconifyWindowCocoa,
+ _glfwRestoreWindowCocoa,
+ _glfwMaximizeWindowCocoa,
+ _glfwShowWindowCocoa,
+ _glfwHideWindowCocoa,
+ _glfwRequestWindowAttentionCocoa,
+ _glfwFocusWindowCocoa,
+ _glfwSetWindowMonitorCocoa,
+ _glfwWindowFocusedCocoa,
+ _glfwWindowIconifiedCocoa,
+ _glfwWindowVisibleCocoa,
+ _glfwWindowMaximizedCocoa,
+ _glfwWindowHoveredCocoa,
+ _glfwFramebufferTransparentCocoa,
+ _glfwGetWindowOpacityCocoa,
+ _glfwSetWindowResizableCocoa,
+ _glfwSetWindowDecoratedCocoa,
+ _glfwSetWindowFloatingCocoa,
+ _glfwSetWindowOpacityCocoa,
+ _glfwSetWindowMousePassthroughCocoa,
+ _glfwPollEventsCocoa,
+ _glfwWaitEventsCocoa,
+ _glfwWaitEventsTimeoutCocoa,
+ _glfwPostEmptyEventCocoa,
+ _glfwGetEGLPlatformCocoa,
+ _glfwGetEGLNativeDisplayCocoa,
+ _glfwGetEGLNativeWindowCocoa,
+ _glfwGetRequiredInstanceExtensionsCocoa,
+ _glfwGetPhysicalDevicePresentationSupportCocoa,
+ _glfwCreateWindowSurfaceCocoa,
+ };
+
+ *platform = cocoa;
+ return GLFW_TRUE;
+}
+
+int _glfwInitCocoa(void)
+{
+ @autoreleasepool {
+
+ _glfw.ns.helper = [[GLFWHelper alloc] init];
+
+ [NSThread detachNewThreadSelector:@selector(doNothing:)
+ toTarget:_glfw.ns.helper
+ withObject:nil];
+
+ [NSApplication sharedApplication];
+
+ _glfw.ns.delegate = [[GLFWApplicationDelegate alloc] init];
+ if (_glfw.ns.delegate == nil)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to create application delegate");
+ return GLFW_FALSE;
+ }
+
+ [NSApp setDelegate:_glfw.ns.delegate];
+
+ NSEvent* (^block)(NSEvent*) = ^ NSEvent* (NSEvent* event)
+ {
+ if ([event modifierFlags] & NSEventModifierFlagCommand)
+ [[NSApp keyWindow] sendEvent:event];
+
+ return event;
+ };
+
+ _glfw.ns.keyUpMonitor =
+ [NSEvent addLocalMonitorForEventsMatchingMask:NSEventMaskKeyUp
+ handler:block];
+
+ if (_glfw.hints.init.ns.chdir)
+ changeToResourcesDirectory();
+
+ // Press and Hold prevents some keys from emitting repeated characters
+ NSDictionary* defaults = @{@"ApplePressAndHoldEnabled":@NO};
+ [[NSUserDefaults standardUserDefaults] registerDefaults:defaults];
+
+ [[NSNotificationCenter defaultCenter]
+ addObserver:_glfw.ns.helper
+ selector:@selector(selectedKeyboardInputSourceChanged:)
+ name:NSTextInputContextKeyboardSelectionDidChangeNotification
+ object:nil];
+
+ createKeyTables();
+
+ _glfw.ns.eventSource = CGEventSourceCreate(kCGEventSourceStateHIDSystemState);
+ if (!_glfw.ns.eventSource)
+ return GLFW_FALSE;
+
+ CGEventSourceSetLocalEventsSuppressionInterval(_glfw.ns.eventSource, 0.0);
+
+ if (!initializeTIS())
+ return GLFW_FALSE;
+
+ _glfwPollMonitorsCocoa();
+
+ if (![[NSRunningApplication currentApplication] isFinishedLaunching])
+ [NSApp run];
+
+ // In case we are unbundled, make us a proper UI application
+ if (_glfw.hints.init.ns.menubar)
+ [NSApp setActivationPolicy:NSApplicationActivationPolicyRegular];
+
+ return GLFW_TRUE;
+
+ } // autoreleasepool
+}
+
+void _glfwTerminateCocoa(void)
+{
+ @autoreleasepool {
+
+ if (_glfw.ns.inputSource)
+ {
+ CFRelease(_glfw.ns.inputSource);
+ _glfw.ns.inputSource = NULL;
+ _glfw.ns.unicodeData = nil;
+ }
+
+ if (_glfw.ns.eventSource)
+ {
+ CFRelease(_glfw.ns.eventSource);
+ _glfw.ns.eventSource = NULL;
+ }
+
+ if (_glfw.ns.delegate)
+ {
+ [NSApp setDelegate:nil];
+ [_glfw.ns.delegate release];
+ _glfw.ns.delegate = nil;
+ }
+
+ if (_glfw.ns.helper)
+ {
+ [[NSNotificationCenter defaultCenter]
+ removeObserver:_glfw.ns.helper
+ name:NSTextInputContextKeyboardSelectionDidChangeNotification
+ object:nil];
+ [[NSNotificationCenter defaultCenter]
+ removeObserver:_glfw.ns.helper];
+ [_glfw.ns.helper release];
+ _glfw.ns.helper = nil;
+ }
+
+ if (_glfw.ns.keyUpMonitor)
+ [NSEvent removeMonitor:_glfw.ns.keyUpMonitor];
+
+ _glfw_free(_glfw.ns.clipboardString);
+
+ _glfwTerminateNSGL();
+ _glfwTerminateEGL();
+ _glfwTerminateOSMesa();
+
+ } // autoreleasepool
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.h b/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.h
new file mode 100644
index 00000000000..756911a2740
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.h
@@ -0,0 +1,51 @@
+//========================================================================
+// GLFW 3.4 Cocoa - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <IOKit/IOKitLib.h>
+#include <IOKit/IOCFPlugIn.h>
+#include <IOKit/hid/IOHIDKeys.h>
+
+#define GLFW_COCOA_JOYSTICK_STATE _GLFWjoystickNS ns;
+#define GLFW_COCOA_LIBRARY_JOYSTICK_STATE
+
+#define GLFW_BUILD_COCOA_MAPPINGS
+
+// Cocoa-specific per-joystick data
+//
+typedef struct _GLFWjoystickNS
+{
+ IOHIDDeviceRef device;
+ CFMutableArrayRef axes;
+ CFMutableArrayRef buttons;
+ CFMutableArrayRef hats;
+} _GLFWjoystickNS;
+
+GLFWbool _glfwInitJoysticksCocoa(void);
+void _glfwTerminateJoysticksCocoa(void);
+int _glfwPollJoystickCocoa(_GLFWjoystick* js, int mode);
+const char* _glfwGetMappingNameCocoa(void);
+void _glfwUpdateGamepadGUIDCocoa(char* guid);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.m b/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.m
new file mode 100644
index 00000000000..e09e1efae8c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_joystick.m
@@ -0,0 +1,477 @@
+//========================================================================
+// GLFW 3.4 Cocoa - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2019 Camilla Löwy <elmindreda@glfw.org>
+// Copyright (c) 2012 Torsten Walluhn <tw@mad-cad.net>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <unistd.h>
+#include <ctype.h>
+#include <string.h>
+
+#include <mach/mach.h>
+#include <mach/mach_error.h>
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <Kernel/IOKit/hidsystem/IOHIDUsageTables.h>
+
+
+// Joystick element information
+//
+typedef struct _GLFWjoyelementNS
+{
+ IOHIDElementRef native;
+ uint32_t usage;
+ int index;
+ long minimum;
+ long maximum;
+
+} _GLFWjoyelementNS;
+
+
+// Returns the value of the specified element of the specified joystick
+//
+static long getElementValue(_GLFWjoystick* js, _GLFWjoyelementNS* element)
+{
+ IOHIDValueRef valueRef;
+ long value = 0;
+
+ if (js->ns.device)
+ {
+ if (IOHIDDeviceGetValue(js->ns.device,
+ element->native,
+ &valueRef) == kIOReturnSuccess)
+ {
+ value = IOHIDValueGetIntegerValue(valueRef);
+ }
+ }
+
+ return value;
+}
+
+// Comparison function for matching the SDL element order
+//
+static CFComparisonResult compareElements(const void* fp,
+ const void* sp,
+ void* user)
+{
+ const _GLFWjoyelementNS* fe = fp;
+ const _GLFWjoyelementNS* se = sp;
+ if (fe->usage < se->usage)
+ return kCFCompareLessThan;
+ if (fe->usage > se->usage)
+ return kCFCompareGreaterThan;
+ if (fe->index < se->index)
+ return kCFCompareLessThan;
+ if (fe->index > se->index)
+ return kCFCompareGreaterThan;
+ return kCFCompareEqualTo;
+}
+
+// Removes the specified joystick
+//
+static void closeJoystick(_GLFWjoystick* js)
+{
+ if (!js->present)
+ return;
+
+ for (int i = 0; i < CFArrayGetCount(js->ns.axes); i++)
+ _glfw_free((void*) CFArrayGetValueAtIndex(js->ns.axes, i));
+ CFRelease(js->ns.axes);
+
+ for (int i = 0; i < CFArrayGetCount(js->ns.buttons); i++)
+ _glfw_free((void*) CFArrayGetValueAtIndex(js->ns.buttons, i));
+ CFRelease(js->ns.buttons);
+
+ for (int i = 0; i < CFArrayGetCount(js->ns.hats); i++)
+ _glfw_free((void*) CFArrayGetValueAtIndex(js->ns.hats, i));
+ CFRelease(js->ns.hats);
+
+ _glfwFreeJoystick(js);
+ _glfwInputJoystick(js, GLFW_DISCONNECTED);
+}
+
+// Callback for user-initiated joystick addition
+//
+static void matchCallback(void* context,
+ IOReturn result,
+ void* sender,
+ IOHIDDeviceRef device)
+{
+ int jid;
+ char name[256];
+ char guid[33];
+ CFTypeRef property;
+ uint32_t vendor = 0, product = 0, version = 0;
+ _GLFWjoystick* js;
+ CFMutableArrayRef axes, buttons, hats;
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (_glfw.joysticks[jid].ns.device == device)
+ return;
+ }
+
+ axes = CFArrayCreateMutable(NULL, 0, NULL);
+ buttons = CFArrayCreateMutable(NULL, 0, NULL);
+ hats = CFArrayCreateMutable(NULL, 0, NULL);
+
+ property = IOHIDDeviceGetProperty(device, CFSTR(kIOHIDProductKey));
+ if (property)
+ {
+ CFStringGetCString(property,
+ name,
+ sizeof(name),
+ kCFStringEncodingUTF8);
+ }
+ else
+ strncpy(name, "Unknown", sizeof(name));
+
+ property = IOHIDDeviceGetProperty(device, CFSTR(kIOHIDVendorIDKey));
+ if (property)
+ CFNumberGetValue(property, kCFNumberSInt32Type, &vendor);
+
+ property = IOHIDDeviceGetProperty(device, CFSTR(kIOHIDProductIDKey));
+ if (property)
+ CFNumberGetValue(property, kCFNumberSInt32Type, &product);
+
+ property = IOHIDDeviceGetProperty(device, CFSTR(kIOHIDVersionNumberKey));
+ if (property)
+ CFNumberGetValue(property, kCFNumberSInt32Type, &version);
+
+ // Generate a joystick GUID that matches the SDL 2.0.5+ one
+ if (vendor && product)
+ {
+ sprintf(guid, "03000000%02x%02x0000%02x%02x0000%02x%02x0000",
+ (uint8_t) vendor, (uint8_t) (vendor >> 8),
+ (uint8_t) product, (uint8_t) (product >> 8),
+ (uint8_t) version, (uint8_t) (version >> 8));
+ }
+ else
+ {
+ sprintf(guid, "05000000%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x00",
+ name[0], name[1], name[2], name[3],
+ name[4], name[5], name[6], name[7],
+ name[8], name[9], name[10]);
+ }
+
+ CFArrayRef elements =
+ IOHIDDeviceCopyMatchingElements(device, NULL, kIOHIDOptionsTypeNone);
+
+ for (CFIndex i = 0; i < CFArrayGetCount(elements); i++)
+ {
+ IOHIDElementRef native = (IOHIDElementRef)
+ CFArrayGetValueAtIndex(elements, i);
+ if (CFGetTypeID(native) != IOHIDElementGetTypeID())
+ continue;
+
+ const IOHIDElementType type = IOHIDElementGetType(native);
+ if ((type != kIOHIDElementTypeInput_Axis) &&
+ (type != kIOHIDElementTypeInput_Button) &&
+ (type != kIOHIDElementTypeInput_Misc))
+ {
+ continue;
+ }
+
+ CFMutableArrayRef target = NULL;
+
+ const uint32_t usage = IOHIDElementGetUsage(native);
+ const uint32_t page = IOHIDElementGetUsagePage(native);
+ if (page == kHIDPage_GenericDesktop)
+ {
+ switch (usage)
+ {
+ case kHIDUsage_GD_X:
+ case kHIDUsage_GD_Y:
+ case kHIDUsage_GD_Z:
+ case kHIDUsage_GD_Rx:
+ case kHIDUsage_GD_Ry:
+ case kHIDUsage_GD_Rz:
+ case kHIDUsage_GD_Slider:
+ case kHIDUsage_GD_Dial:
+ case kHIDUsage_GD_Wheel:
+ target = axes;
+ break;
+ case kHIDUsage_GD_Hatswitch:
+ target = hats;
+ break;
+ case kHIDUsage_GD_DPadUp:
+ case kHIDUsage_GD_DPadRight:
+ case kHIDUsage_GD_DPadDown:
+ case kHIDUsage_GD_DPadLeft:
+ case kHIDUsage_GD_SystemMainMenu:
+ case kHIDUsage_GD_Select:
+ case kHIDUsage_GD_Start:
+ target = buttons;
+ break;
+ }
+ }
+ else if (page == kHIDPage_Simulation)
+ {
+ switch (usage)
+ {
+ case kHIDUsage_Sim_Accelerator:
+ case kHIDUsage_Sim_Brake:
+ case kHIDUsage_Sim_Throttle:
+ case kHIDUsage_Sim_Rudder:
+ case kHIDUsage_Sim_Steering:
+ target = axes;
+ break;
+ }
+ }
+ else if (page == kHIDPage_Button || page == kHIDPage_Consumer)
+ target = buttons;
+
+ if (target)
+ {
+ _GLFWjoyelementNS* element = _glfw_calloc(1, sizeof(_GLFWjoyelementNS));
+ element->native = native;
+ element->usage = usage;
+ element->index = (int) CFArrayGetCount(target);
+ element->minimum = IOHIDElementGetLogicalMin(native);
+ element->maximum = IOHIDElementGetLogicalMax(native);
+ CFArrayAppendValue(target, element);
+ }
+ }
+
+ CFRelease(elements);
+
+ CFArraySortValues(axes, CFRangeMake(0, CFArrayGetCount(axes)),
+ compareElements, NULL);
+ CFArraySortValues(buttons, CFRangeMake(0, CFArrayGetCount(buttons)),
+ compareElements, NULL);
+ CFArraySortValues(hats, CFRangeMake(0, CFArrayGetCount(hats)),
+ compareElements, NULL);
+
+ js = _glfwAllocJoystick(name, guid,
+ (int) CFArrayGetCount(axes),
+ (int) CFArrayGetCount(buttons),
+ (int) CFArrayGetCount(hats));
+
+ js->ns.device = device;
+ js->ns.axes = axes;
+ js->ns.buttons = buttons;
+ js->ns.hats = hats;
+
+ _glfwInputJoystick(js, GLFW_CONNECTED);
+}
+
+// Callback for user-initiated joystick removal
+//
+static void removeCallback(void* context,
+ IOReturn result,
+ void* sender,
+ IOHIDDeviceRef device)
+{
+ for (int jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (_glfw.joysticks[jid].ns.device == device)
+ {
+ closeJoystick(_glfw.joysticks + jid);
+ break;
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitJoysticksCocoa(void)
+{
+ CFMutableArrayRef matching;
+ const long usages[] =
+ {
+ kHIDUsage_GD_Joystick,
+ kHIDUsage_GD_GamePad,
+ kHIDUsage_GD_MultiAxisController
+ };
+
+ _glfw.ns.hidManager = IOHIDManagerCreate(kCFAllocatorDefault,
+ kIOHIDOptionsTypeNone);
+
+ matching = CFArrayCreateMutable(kCFAllocatorDefault,
+ 0,
+ &kCFTypeArrayCallBacks);
+ if (!matching)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Cocoa: Failed to create array");
+ return GLFW_FALSE;
+ }
+
+ for (size_t i = 0; i < sizeof(usages) / sizeof(long); i++)
+ {
+ const long page = kHIDPage_GenericDesktop;
+
+ CFMutableDictionaryRef dict =
+ CFDictionaryCreateMutable(kCFAllocatorDefault,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ if (!dict)
+ continue;
+
+ CFNumberRef pageRef = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberLongType,
+ &page);
+ CFNumberRef usageRef = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberLongType,
+ &usages[i]);
+ if (pageRef && usageRef)
+ {
+ CFDictionarySetValue(dict,
+ CFSTR(kIOHIDDeviceUsagePageKey),
+ pageRef);
+ CFDictionarySetValue(dict,
+ CFSTR(kIOHIDDeviceUsageKey),
+ usageRef);
+ CFArrayAppendValue(matching, dict);
+ }
+
+ if (pageRef)
+ CFRelease(pageRef);
+ if (usageRef)
+ CFRelease(usageRef);
+
+ CFRelease(dict);
+ }
+
+ IOHIDManagerSetDeviceMatchingMultiple(_glfw.ns.hidManager, matching);
+ CFRelease(matching);
+
+ IOHIDManagerRegisterDeviceMatchingCallback(_glfw.ns.hidManager,
+ &matchCallback, NULL);
+ IOHIDManagerRegisterDeviceRemovalCallback(_glfw.ns.hidManager,
+ &removeCallback, NULL);
+ IOHIDManagerScheduleWithRunLoop(_glfw.ns.hidManager,
+ CFRunLoopGetMain(),
+ kCFRunLoopDefaultMode);
+ IOHIDManagerOpen(_glfw.ns.hidManager, kIOHIDOptionsTypeNone);
+
+ // Execute the run loop once in order to register any initially-attached
+ // joysticks
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0, false);
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateJoysticksCocoa(void)
+{
+ for (int jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ closeJoystick(_glfw.joysticks + jid);
+
+ if (_glfw.ns.hidManager)
+ {
+ CFRelease(_glfw.ns.hidManager);
+ _glfw.ns.hidManager = NULL;
+ }
+}
+
+
+int _glfwPollJoystickCocoa(_GLFWjoystick* js, int mode)
+{
+ if (mode & _GLFW_POLL_AXES)
+ {
+ for (CFIndex i = 0; i < CFArrayGetCount(js->ns.axes); i++)
+ {
+ _GLFWjoyelementNS* axis = (_GLFWjoyelementNS*)
+ CFArrayGetValueAtIndex(js->ns.axes, i);
+
+ const long raw = getElementValue(js, axis);
+ // Perform auto calibration
+ if (raw < axis->minimum)
+ axis->minimum = raw;
+ if (raw > axis->maximum)
+ axis->maximum = raw;
+
+ const long size = axis->maximum - axis->minimum;
+ if (size == 0)
+ _glfwInputJoystickAxis(js, (int) i, 0.f);
+ else
+ {
+ const float value = (2.f * (raw - axis->minimum) / size) - 1.f;
+ _glfwInputJoystickAxis(js, (int) i, value);
+ }
+ }
+ }
+
+ if (mode & _GLFW_POLL_BUTTONS)
+ {
+ for (CFIndex i = 0; i < CFArrayGetCount(js->ns.buttons); i++)
+ {
+ _GLFWjoyelementNS* button = (_GLFWjoyelementNS*)
+ CFArrayGetValueAtIndex(js->ns.buttons, i);
+ const char value = getElementValue(js, button) - button->minimum;
+ const int state = (value > 0) ? GLFW_PRESS : GLFW_RELEASE;
+ _glfwInputJoystickButton(js, (int) i, state);
+ }
+
+ for (CFIndex i = 0; i < CFArrayGetCount(js->ns.hats); i++)
+ {
+ const int states[9] =
+ {
+ GLFW_HAT_UP,
+ GLFW_HAT_RIGHT_UP,
+ GLFW_HAT_RIGHT,
+ GLFW_HAT_RIGHT_DOWN,
+ GLFW_HAT_DOWN,
+ GLFW_HAT_LEFT_DOWN,
+ GLFW_HAT_LEFT,
+ GLFW_HAT_LEFT_UP,
+ GLFW_HAT_CENTERED
+ };
+
+ _GLFWjoyelementNS* hat = (_GLFWjoyelementNS*)
+ CFArrayGetValueAtIndex(js->ns.hats, i);
+ long state = getElementValue(js, hat) - hat->minimum;
+ if (state < 0 || state > 8)
+ state = 8;
+
+ _glfwInputJoystickHat(js, (int) i, states[state]);
+ }
+ }
+
+ return js->present;
+}
+
+const char* _glfwGetMappingNameCocoa(void)
+{
+ return "Mac OS X";
+}
+
+void _glfwUpdateGamepadGUIDCocoa(char* guid)
+{
+ if ((strncmp(guid + 4, "000000000000", 12) == 0) &&
+ (strncmp(guid + 20, "000000000000", 12) == 0))
+ {
+ char original[33];
+ strncpy(original, guid, sizeof(original) - 1);
+ sprintf(guid, "03000000%.4s0000%.4s000000000000",
+ original, original + 16);
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_monitor.m b/chromium/third_party/dawn/third_party/glfw/src/cocoa_monitor.m
new file mode 100644
index 00000000000..64d9eb2c777
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_monitor.m
@@ -0,0 +1,627 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+#include <limits.h>
+#include <math.h>
+
+#include <IOKit/graphics/IOGraphicsLib.h>
+#include <ApplicationServices/ApplicationServices.h>
+
+
+// Get the name of the specified display, or NULL
+//
+static char* getMonitorName(CGDirectDisplayID displayID, NSScreen* screen)
+{
+ // IOKit doesn't work on Apple Silicon anymore
+ // Luckily, 10.15 introduced -[NSScreen localizedName].
+ // Use it if available, and fall back to IOKit otherwise.
+ if (screen)
+ {
+ if ([screen respondsToSelector:@selector(localizedName)])
+ {
+ NSString* name = [screen valueForKey:@"localizedName"];
+ if (name)
+ return _glfw_strdup([name UTF8String]);
+ }
+ }
+
+ io_iterator_t it;
+ io_service_t service;
+ CFDictionaryRef info;
+
+ if (IOServiceGetMatchingServices(MACH_PORT_NULL,
+ IOServiceMatching("IODisplayConnect"),
+ &it) != 0)
+ {
+ // This may happen if a desktop Mac is running headless
+ return _glfw_strdup("Display");
+ }
+
+ while ((service = IOIteratorNext(it)) != 0)
+ {
+ info = IODisplayCreateInfoDictionary(service,
+ kIODisplayOnlyPreferredName);
+
+ CFNumberRef vendorIDRef =
+ CFDictionaryGetValue(info, CFSTR(kDisplayVendorID));
+ CFNumberRef productIDRef =
+ CFDictionaryGetValue(info, CFSTR(kDisplayProductID));
+ if (!vendorIDRef || !productIDRef)
+ {
+ CFRelease(info);
+ continue;
+ }
+
+ unsigned int vendorID, productID;
+ CFNumberGetValue(vendorIDRef, kCFNumberIntType, &vendorID);
+ CFNumberGetValue(productIDRef, kCFNumberIntType, &productID);
+
+ if (CGDisplayVendorNumber(displayID) == vendorID &&
+ CGDisplayModelNumber(displayID) == productID)
+ {
+ // Info dictionary is used and freed below
+ break;
+ }
+
+ CFRelease(info);
+ }
+
+ IOObjectRelease(it);
+
+ if (!service)
+ return _glfw_strdup("Display");
+
+ CFDictionaryRef names =
+ CFDictionaryGetValue(info, CFSTR(kDisplayProductName));
+
+ CFStringRef nameRef;
+
+ if (!names || !CFDictionaryGetValueIfPresent(names, CFSTR("en_US"),
+ (const void**) &nameRef))
+ {
+ // This may happen if a desktop Mac is running headless
+ CFRelease(info);
+ return _glfw_strdup("Display");
+ }
+
+ const CFIndex size =
+ CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef),
+ kCFStringEncodingUTF8);
+ char* name = _glfw_calloc(size + 1, 1);
+ CFStringGetCString(nameRef, name, size, kCFStringEncodingUTF8);
+
+ CFRelease(info);
+ return name;
+}
+
+// Check whether the display mode should be included in enumeration
+//
+static GLFWbool modeIsGood(CGDisplayModeRef mode)
+{
+ uint32_t flags = CGDisplayModeGetIOFlags(mode);
+
+ if (!(flags & kDisplayModeValidFlag) || !(flags & kDisplayModeSafeFlag))
+ return GLFW_FALSE;
+ if (flags & kDisplayModeInterlacedFlag)
+ return GLFW_FALSE;
+ if (flags & kDisplayModeStretchedFlag)
+ return GLFW_FALSE;
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED <= 101100
+ CFStringRef format = CGDisplayModeCopyPixelEncoding(mode);
+ if (CFStringCompare(format, CFSTR(IO16BitDirectPixels), 0) &&
+ CFStringCompare(format, CFSTR(IO32BitDirectPixels), 0))
+ {
+ CFRelease(format);
+ return GLFW_FALSE;
+ }
+
+ CFRelease(format);
+#endif /* MAC_OS_X_VERSION_MAX_ALLOWED */
+ return GLFW_TRUE;
+}
+
+// Convert Core Graphics display mode to GLFW video mode
+//
+static GLFWvidmode vidmodeFromCGDisplayMode(CGDisplayModeRef mode,
+ double fallbackRefreshRate)
+{
+ GLFWvidmode result;
+ result.width = (int) CGDisplayModeGetWidth(mode);
+ result.height = (int) CGDisplayModeGetHeight(mode);
+ result.refreshRate = (int) round(CGDisplayModeGetRefreshRate(mode));
+
+ if (result.refreshRate == 0)
+ result.refreshRate = (int) round(fallbackRefreshRate);
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED <= 101100
+ CFStringRef format = CGDisplayModeCopyPixelEncoding(mode);
+ if (CFStringCompare(format, CFSTR(IO16BitDirectPixels), 0) == 0)
+ {
+ result.redBits = 5;
+ result.greenBits = 5;
+ result.blueBits = 5;
+ }
+ else
+#endif /* MAC_OS_X_VERSION_MAX_ALLOWED */
+ {
+ result.redBits = 8;
+ result.greenBits = 8;
+ result.blueBits = 8;
+ }
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED <= 101100
+ CFRelease(format);
+#endif /* MAC_OS_X_VERSION_MAX_ALLOWED */
+ return result;
+}
+
+// Starts reservation for display fading
+//
+static CGDisplayFadeReservationToken beginFadeReservation(void)
+{
+ CGDisplayFadeReservationToken token = kCGDisplayFadeReservationInvalidToken;
+
+ if (CGAcquireDisplayFadeReservation(5, &token) == kCGErrorSuccess)
+ {
+ CGDisplayFade(token, 0.3,
+ kCGDisplayBlendNormal,
+ kCGDisplayBlendSolidColor,
+ 0.0, 0.0, 0.0,
+ TRUE);
+ }
+
+ return token;
+}
+
+// Ends reservation for display fading
+//
+static void endFadeReservation(CGDisplayFadeReservationToken token)
+{
+ if (token != kCGDisplayFadeReservationInvalidToken)
+ {
+ CGDisplayFade(token, 0.5,
+ kCGDisplayBlendSolidColor,
+ kCGDisplayBlendNormal,
+ 0.0, 0.0, 0.0,
+ FALSE);
+ CGReleaseDisplayFadeReservation(token);
+ }
+}
+
+// Returns the display refresh rate queried from the I/O registry
+//
+static double getFallbackRefreshRate(CGDirectDisplayID displayID)
+{
+ double refreshRate = 60.0;
+
+ io_iterator_t it;
+ io_service_t service;
+
+ if (IOServiceGetMatchingServices(MACH_PORT_NULL,
+ IOServiceMatching("IOFramebuffer"),
+ &it) != 0)
+ {
+ return refreshRate;
+ }
+
+ while ((service = IOIteratorNext(it)) != 0)
+ {
+ const CFNumberRef indexRef =
+ IORegistryEntryCreateCFProperty(service,
+ CFSTR("IOFramebufferOpenGLIndex"),
+ kCFAllocatorDefault,
+ kNilOptions);
+ if (!indexRef)
+ continue;
+
+ uint32_t index = 0;
+ CFNumberGetValue(indexRef, kCFNumberIntType, &index);
+ CFRelease(indexRef);
+
+ if (CGOpenGLDisplayMaskToDisplayID(1 << index) != displayID)
+ continue;
+
+ const CFNumberRef clockRef =
+ IORegistryEntryCreateCFProperty(service,
+ CFSTR("IOFBCurrentPixelClock"),
+ kCFAllocatorDefault,
+ kNilOptions);
+ const CFNumberRef countRef =
+ IORegistryEntryCreateCFProperty(service,
+ CFSTR("IOFBCurrentPixelCount"),
+ kCFAllocatorDefault,
+ kNilOptions);
+
+ uint32_t clock = 0, count = 0;
+
+ if (clockRef)
+ {
+ CFNumberGetValue(clockRef, kCFNumberIntType, &clock);
+ CFRelease(clockRef);
+ }
+
+ if (countRef)
+ {
+ CFNumberGetValue(countRef, kCFNumberIntType, &count);
+ CFRelease(countRef);
+ }
+
+ if (clock > 0 && count > 0)
+ refreshRate = clock / (double) count;
+
+ break;
+ }
+
+ IOObjectRelease(it);
+ return refreshRate;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Poll for changes in the set of connected monitors
+//
+void _glfwPollMonitorsCocoa(void)
+{
+ uint32_t displayCount;
+ CGGetOnlineDisplayList(0, NULL, &displayCount);
+ CGDirectDisplayID* displays = _glfw_calloc(displayCount, sizeof(CGDirectDisplayID));
+ CGGetOnlineDisplayList(displayCount, displays, &displayCount);
+
+ for (int i = 0; i < _glfw.monitorCount; i++)
+ _glfw.monitors[i]->ns.screen = nil;
+
+ _GLFWmonitor** disconnected = NULL;
+ uint32_t disconnectedCount = _glfw.monitorCount;
+ if (disconnectedCount)
+ {
+ disconnected = _glfw_calloc(_glfw.monitorCount, sizeof(_GLFWmonitor*));
+ memcpy(disconnected,
+ _glfw.monitors,
+ _glfw.monitorCount * sizeof(_GLFWmonitor*));
+ }
+
+ for (uint32_t i = 0; i < displayCount; i++)
+ {
+ if (CGDisplayIsAsleep(displays[i]))
+ continue;
+
+ const uint32_t unitNumber = CGDisplayUnitNumber(displays[i]);
+ NSScreen* screen = nil;
+
+ for (screen in [NSScreen screens])
+ {
+ NSNumber* screenNumber = [screen deviceDescription][@"NSScreenNumber"];
+
+ // HACK: Compare unit numbers instead of display IDs to work around
+ // display replacement on machines with automatic graphics
+ // switching
+ if (CGDisplayUnitNumber([screenNumber unsignedIntValue]) == unitNumber)
+ break;
+ }
+
+ // HACK: Compare unit numbers instead of display IDs to work around
+ // display replacement on machines with automatic graphics
+ // switching
+ uint32_t j;
+ for (j = 0; j < disconnectedCount; j++)
+ {
+ if (disconnected[j] && disconnected[j]->ns.unitNumber == unitNumber)
+ {
+ disconnected[j]->ns.screen = screen;
+ disconnected[j] = NULL;
+ break;
+ }
+ }
+
+ if (j < disconnectedCount)
+ continue;
+
+ const CGSize size = CGDisplayScreenSize(displays[i]);
+ char* name = getMonitorName(displays[i], screen);
+ if (!name)
+ continue;
+
+ _GLFWmonitor* monitor = _glfwAllocMonitor(name, size.width, size.height);
+ monitor->ns.displayID = displays[i];
+ monitor->ns.unitNumber = unitNumber;
+ monitor->ns.screen = screen;
+
+ _glfw_free(name);
+
+ CGDisplayModeRef mode = CGDisplayCopyDisplayMode(displays[i]);
+ if (CGDisplayModeGetRefreshRate(mode) == 0.0)
+ monitor->ns.fallbackRefreshRate = getFallbackRefreshRate(displays[i]);
+ CGDisplayModeRelease(mode);
+
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, _GLFW_INSERT_LAST);
+ }
+
+ for (uint32_t i = 0; i < disconnectedCount; i++)
+ {
+ if (disconnected[i])
+ _glfwInputMonitor(disconnected[i], GLFW_DISCONNECTED, 0);
+ }
+
+ _glfw_free(disconnected);
+ _glfw_free(displays);
+}
+
+// Change the current video mode
+//
+void _glfwSetVideoModeCocoa(_GLFWmonitor* monitor, const GLFWvidmode* desired)
+{
+ GLFWvidmode current;
+ _glfwGetVideoModeCocoa(monitor, &current);
+
+ const GLFWvidmode* best = _glfwChooseVideoMode(monitor, desired);
+ if (_glfwCompareVideoModes(&current, best) == 0)
+ return;
+
+ CFArrayRef modes = CGDisplayCopyAllDisplayModes(monitor->ns.displayID, NULL);
+ const CFIndex count = CFArrayGetCount(modes);
+ CGDisplayModeRef native = NULL;
+
+ for (CFIndex i = 0; i < count; i++)
+ {
+ CGDisplayModeRef dm = (CGDisplayModeRef) CFArrayGetValueAtIndex(modes, i);
+ if (!modeIsGood(dm))
+ continue;
+
+ const GLFWvidmode mode =
+ vidmodeFromCGDisplayMode(dm, monitor->ns.fallbackRefreshRate);
+ if (_glfwCompareVideoModes(best, &mode) == 0)
+ {
+ native = dm;
+ break;
+ }
+ }
+
+ if (native)
+ {
+ if (monitor->ns.previousMode == NULL)
+ monitor->ns.previousMode = CGDisplayCopyDisplayMode(monitor->ns.displayID);
+
+ CGDisplayFadeReservationToken token = beginFadeReservation();
+ CGDisplaySetDisplayMode(monitor->ns.displayID, native, NULL);
+ endFadeReservation(token);
+ }
+
+ CFRelease(modes);
+}
+
+// Restore the previously saved (original) video mode
+//
+void _glfwRestoreVideoModeCocoa(_GLFWmonitor* monitor)
+{
+ if (monitor->ns.previousMode)
+ {
+ CGDisplayFadeReservationToken token = beginFadeReservation();
+ CGDisplaySetDisplayMode(monitor->ns.displayID,
+ monitor->ns.previousMode, NULL);
+ endFadeReservation(token);
+
+ CGDisplayModeRelease(monitor->ns.previousMode);
+ monitor->ns.previousMode = NULL;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwFreeMonitorCocoa(_GLFWmonitor* monitor)
+{
+}
+
+void _glfwGetMonitorPosCocoa(_GLFWmonitor* monitor, int* xpos, int* ypos)
+{
+ @autoreleasepool {
+
+ const CGRect bounds = CGDisplayBounds(monitor->ns.displayID);
+
+ if (xpos)
+ *xpos = (int) bounds.origin.x;
+ if (ypos)
+ *ypos = (int) bounds.origin.y;
+
+ } // autoreleasepool
+}
+
+void _glfwGetMonitorContentScaleCocoa(_GLFWmonitor* monitor,
+ float* xscale, float* yscale)
+{
+ @autoreleasepool {
+
+ if (!monitor->ns.screen)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Cannot query content scale without screen");
+ }
+
+ const NSRect points = [monitor->ns.screen frame];
+ const NSRect pixels = [monitor->ns.screen convertRectToBacking:points];
+
+ if (xscale)
+ *xscale = (float) (pixels.size.width / points.size.width);
+ if (yscale)
+ *yscale = (float) (pixels.size.height / points.size.height);
+
+ } // autoreleasepool
+}
+
+void _glfwGetMonitorWorkareaCocoa(_GLFWmonitor* monitor,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ @autoreleasepool {
+
+ if (!monitor->ns.screen)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Cannot query workarea without screen");
+ }
+
+ const NSRect frameRect = [monitor->ns.screen visibleFrame];
+
+ if (xpos)
+ *xpos = frameRect.origin.x;
+ if (ypos)
+ *ypos = _glfwTransformYCocoa(frameRect.origin.y + frameRect.size.height - 1);
+ if (width)
+ *width = frameRect.size.width;
+ if (height)
+ *height = frameRect.size.height;
+
+ } // autoreleasepool
+}
+
+GLFWvidmode* _glfwGetVideoModesCocoa(_GLFWmonitor* monitor, int* count)
+{
+ @autoreleasepool {
+
+ *count = 0;
+
+ CFArrayRef modes = CGDisplayCopyAllDisplayModes(monitor->ns.displayID, NULL);
+ const CFIndex found = CFArrayGetCount(modes);
+ GLFWvidmode* result = _glfw_calloc(found, sizeof(GLFWvidmode));
+
+ for (CFIndex i = 0; i < found; i++)
+ {
+ CGDisplayModeRef dm = (CGDisplayModeRef) CFArrayGetValueAtIndex(modes, i);
+ if (!modeIsGood(dm))
+ continue;
+
+ const GLFWvidmode mode =
+ vidmodeFromCGDisplayMode(dm, monitor->ns.fallbackRefreshRate);
+ CFIndex j;
+
+ for (j = 0; j < *count; j++)
+ {
+ if (_glfwCompareVideoModes(result + j, &mode) == 0)
+ break;
+ }
+
+ // Skip duplicate modes
+ if (j < *count)
+ continue;
+
+ (*count)++;
+ result[*count - 1] = mode;
+ }
+
+ CFRelease(modes);
+ return result;
+
+ } // autoreleasepool
+}
+
+void _glfwGetVideoModeCocoa(_GLFWmonitor* monitor, GLFWvidmode *mode)
+{
+ @autoreleasepool {
+
+ CGDisplayModeRef native = CGDisplayCopyDisplayMode(monitor->ns.displayID);
+ *mode = vidmodeFromCGDisplayMode(native, monitor->ns.fallbackRefreshRate);
+ CGDisplayModeRelease(native);
+
+ } // autoreleasepool
+}
+
+GLFWbool _glfwGetGammaRampCocoa(_GLFWmonitor* monitor, GLFWgammaramp* ramp)
+{
+ @autoreleasepool {
+
+ uint32_t size = CGDisplayGammaTableCapacity(monitor->ns.displayID);
+ CGGammaValue* values = _glfw_calloc(size * 3, sizeof(CGGammaValue));
+
+ CGGetDisplayTransferByTable(monitor->ns.displayID,
+ size,
+ values,
+ values + size,
+ values + size * 2,
+ &size);
+
+ _glfwAllocGammaArrays(ramp, size);
+
+ for (uint32_t i = 0; i < size; i++)
+ {
+ ramp->red[i] = (unsigned short) (values[i] * 65535);
+ ramp->green[i] = (unsigned short) (values[i + size] * 65535);
+ ramp->blue[i] = (unsigned short) (values[i + size * 2] * 65535);
+ }
+
+ _glfw_free(values);
+ return GLFW_TRUE;
+
+ } // autoreleasepool
+}
+
+void _glfwSetGammaRampCocoa(_GLFWmonitor* monitor, const GLFWgammaramp* ramp)
+{
+ @autoreleasepool {
+
+ CGGammaValue* values = _glfw_calloc(ramp->size * 3, sizeof(CGGammaValue));
+
+ for (unsigned int i = 0; i < ramp->size; i++)
+ {
+ values[i] = ramp->red[i] / 65535.f;
+ values[i + ramp->size] = ramp->green[i] / 65535.f;
+ values[i + ramp->size * 2] = ramp->blue[i] / 65535.f;
+ }
+
+ CGSetDisplayTransferByTable(monitor->ns.displayID,
+ ramp->size,
+ values,
+ values + ramp->size,
+ values + ramp->size * 2);
+
+ _glfw_free(values);
+
+ } // autoreleasepool
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI CGDirectDisplayID glfwGetCocoaMonitor(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(kCGNullDirectDisplay);
+ return monitor->ns.displayID;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_platform.h b/chromium/third_party/dawn/third_party/glfw/src/cocoa_platform.h
new file mode 100644
index 00000000000..9259b195e19
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_platform.h
@@ -0,0 +1,302 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <stdint.h>
+
+#include <Carbon/Carbon.h>
+#include <IOKit/hid/IOHIDLib.h>
+
+// NOTE: All of NSGL was deprecated in the 10.14 SDK
+// This disables the pointless warnings for every symbol we use
+#ifndef GL_SILENCE_DEPRECATION
+#define GL_SILENCE_DEPRECATION
+#endif
+
+#if defined(__OBJC__)
+#import <Cocoa/Cocoa.h>
+#else
+typedef void* id;
+#endif
+
+// NOTE: Many Cocoa enum values have been renamed and we need to build across
+// SDK versions where one is unavailable or deprecated.
+// We use the newer names in code and replace them with the older names if
+// the base SDK does not provide the newer names.
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED < 101400
+ #define NSOpenGLContextParameterSwapInterval NSOpenGLCPSwapInterval
+ #define NSOpenGLContextParameterSurfaceOpacity NSOpenGLCPSurfaceOpacity
+#endif
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED < 101200
+ #define NSBitmapFormatAlphaNonpremultiplied NSAlphaNonpremultipliedBitmapFormat
+ #define NSEventMaskAny NSAnyEventMask
+ #define NSEventMaskKeyUp NSKeyUpMask
+ #define NSEventModifierFlagCapsLock NSAlphaShiftKeyMask
+ #define NSEventModifierFlagCommand NSCommandKeyMask
+ #define NSEventModifierFlagControl NSControlKeyMask
+ #define NSEventModifierFlagDeviceIndependentFlagsMask NSDeviceIndependentModifierFlagsMask
+ #define NSEventModifierFlagOption NSAlternateKeyMask
+ #define NSEventModifierFlagShift NSShiftKeyMask
+ #define NSEventTypeApplicationDefined NSApplicationDefined
+ #define NSWindowStyleMaskBorderless NSBorderlessWindowMask
+ #define NSWindowStyleMaskClosable NSClosableWindowMask
+ #define NSWindowStyleMaskMiniaturizable NSMiniaturizableWindowMask
+ #define NSWindowStyleMaskResizable NSResizableWindowMask
+ #define NSWindowStyleMaskTitled NSTitledWindowMask
+#endif
+
+// NOTE: Many Cocoa dynamically linked constants have been renamed and we need
+// to build across SDK versions where one is unavailable or deprecated.
+// We use the newer names in code and replace them with the older names if
+// the deployment target is older than the newer names.
+
+#if MAC_OS_X_VERSION_MIN_REQUIRED < 101300
+ #define NSPasteboardTypeURL NSURLPboardType
+#endif
+
+typedef VkFlags VkMacOSSurfaceCreateFlagsMVK;
+typedef VkFlags VkMetalSurfaceCreateFlagsEXT;
+
+typedef struct VkMacOSSurfaceCreateInfoMVK
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkMacOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+} VkMacOSSurfaceCreateInfoMVK;
+
+typedef struct VkMetalSurfaceCreateInfoEXT
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkMetalSurfaceCreateFlagsEXT flags;
+ const void* pLayer;
+} VkMetalSurfaceCreateInfoEXT;
+
+typedef VkResult (APIENTRY *PFN_vkCreateMacOSSurfaceMVK)(VkInstance,const VkMacOSSurfaceCreateInfoMVK*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+typedef VkResult (APIENTRY *PFN_vkCreateMetalSurfaceEXT)(VkInstance,const VkMetalSurfaceCreateInfoEXT*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+
+#define GLFW_COCOA_WINDOW_STATE _GLFWwindowNS ns;
+#define GLFW_COCOA_LIBRARY_WINDOW_STATE _GLFWlibraryNS ns;
+#define GLFW_COCOA_MONITOR_STATE _GLFWmonitorNS ns;
+#define GLFW_COCOA_CURSOR_STATE _GLFWcursorNS ns;
+
+#define GLFW_NSGL_CONTEXT_STATE _GLFWcontextNSGL nsgl;
+#define GLFW_NSGL_LIBRARY_CONTEXT_STATE _GLFWlibraryNSGL nsgl;
+
+// HIToolbox.framework pointer typedefs
+#define kTISPropertyUnicodeKeyLayoutData _glfw.ns.tis.kPropertyUnicodeKeyLayoutData
+typedef TISInputSourceRef (*PFN_TISCopyCurrentKeyboardLayoutInputSource)(void);
+#define TISCopyCurrentKeyboardLayoutInputSource _glfw.ns.tis.CopyCurrentKeyboardLayoutInputSource
+typedef void* (*PFN_TISGetInputSourceProperty)(TISInputSourceRef,CFStringRef);
+#define TISGetInputSourceProperty _glfw.ns.tis.GetInputSourceProperty
+typedef UInt8 (*PFN_LMGetKbdType)(void);
+#define LMGetKbdType _glfw.ns.tis.GetKbdType
+
+
+// NSGL-specific per-context data
+//
+typedef struct _GLFWcontextNSGL
+{
+ id pixelFormat;
+ id object;
+} _GLFWcontextNSGL;
+
+// NSGL-specific global data
+//
+typedef struct _GLFWlibraryNSGL
+{
+ // dlopen handle for OpenGL.framework (for glfwGetProcAddress)
+ CFBundleRef framework;
+} _GLFWlibraryNSGL;
+
+// Cocoa-specific per-window data
+//
+typedef struct _GLFWwindowNS
+{
+ id object;
+ id delegate;
+ id view;
+ id layer;
+
+ GLFWbool maximized;
+ GLFWbool occluded;
+ GLFWbool retina;
+
+ // Cached window properties to filter out duplicate events
+ int width, height;
+ int fbWidth, fbHeight;
+ float xscale, yscale;
+
+ // The total sum of the distances the cursor has been warped
+ // since the last cursor motion event was processed
+ // This is kept to counteract Cocoa doing the same internally
+ double cursorWarpDeltaX, cursorWarpDeltaY;
+} _GLFWwindowNS;
+
+// Cocoa-specific global data
+//
+typedef struct _GLFWlibraryNS
+{
+ CGEventSourceRef eventSource;
+ id delegate;
+ GLFWbool cursorHidden;
+ TISInputSourceRef inputSource;
+ IOHIDManagerRef hidManager;
+ id unicodeData;
+ id helper;
+ id keyUpMonitor;
+ id nibObjects;
+
+ char keynames[GLFW_KEY_LAST + 1][17];
+ short int keycodes[256];
+ short int scancodes[GLFW_KEY_LAST + 1];
+ char* clipboardString;
+ CGPoint cascadePoint;
+ // Where to place the cursor when re-enabled
+ double restoreCursorPosX, restoreCursorPosY;
+ // The window whose disabled cursor mode is active
+ _GLFWwindow* disabledCursorWindow;
+
+ struct {
+ CFBundleRef bundle;
+ PFN_TISCopyCurrentKeyboardLayoutInputSource CopyCurrentKeyboardLayoutInputSource;
+ PFN_TISGetInputSourceProperty GetInputSourceProperty;
+ PFN_LMGetKbdType GetKbdType;
+ CFStringRef kPropertyUnicodeKeyLayoutData;
+ } tis;
+} _GLFWlibraryNS;
+
+// Cocoa-specific per-monitor data
+//
+typedef struct _GLFWmonitorNS
+{
+ CGDirectDisplayID displayID;
+ CGDisplayModeRef previousMode;
+ uint32_t unitNumber;
+ id screen;
+ double fallbackRefreshRate;
+} _GLFWmonitorNS;
+
+// Cocoa-specific per-cursor data
+//
+typedef struct _GLFWcursorNS
+{
+ id object;
+} _GLFWcursorNS;
+
+
+GLFWbool _glfwConnectCocoa(int platformID, _GLFWplatform* platform);
+int _glfwInitCocoa(void);
+void _glfwTerminateCocoa(void);
+
+int _glfwCreateWindowCocoa(_GLFWwindow* window, const _GLFWwndconfig* wndconfig, const _GLFWctxconfig* ctxconfig, const _GLFWfbconfig* fbconfig);
+void _glfwDestroyWindowCocoa(_GLFWwindow* window);
+void _glfwSetWindowTitleCocoa(_GLFWwindow* window, const char* title);
+void _glfwSetWindowIconCocoa(_GLFWwindow* window, int count, const GLFWimage* images);
+void _glfwGetWindowPosCocoa(_GLFWwindow* window, int* xpos, int* ypos);
+void _glfwSetWindowPosCocoa(_GLFWwindow* window, int xpos, int ypos);
+void _glfwGetWindowSizeCocoa(_GLFWwindow* window, int* width, int* height);
+void _glfwSetWindowSizeCocoa(_GLFWwindow* window, int width, int height);
+void _glfwSetWindowSizeLimitsCocoa(_GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+void _glfwSetWindowAspectRatioCocoa(_GLFWwindow* window, int numer, int denom);
+void _glfwGetFramebufferSizeCocoa(_GLFWwindow* window, int* width, int* height);
+void _glfwGetWindowFrameSizeCocoa(_GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+void _glfwGetWindowContentScaleCocoa(_GLFWwindow* window, float* xscale, float* yscale);
+void _glfwIconifyWindowCocoa(_GLFWwindow* window);
+void _glfwRestoreWindowCocoa(_GLFWwindow* window);
+void _glfwMaximizeWindowCocoa(_GLFWwindow* window);
+void _glfwShowWindowCocoa(_GLFWwindow* window);
+void _glfwHideWindowCocoa(_GLFWwindow* window);
+void _glfwRequestWindowAttentionCocoa(_GLFWwindow* window);
+void _glfwFocusWindowCocoa(_GLFWwindow* window);
+void _glfwSetWindowMonitorCocoa(_GLFWwindow* window, _GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+int _glfwWindowFocusedCocoa(_GLFWwindow* window);
+int _glfwWindowIconifiedCocoa(_GLFWwindow* window);
+int _glfwWindowVisibleCocoa(_GLFWwindow* window);
+int _glfwWindowMaximizedCocoa(_GLFWwindow* window);
+int _glfwWindowHoveredCocoa(_GLFWwindow* window);
+int _glfwFramebufferTransparentCocoa(_GLFWwindow* window);
+void _glfwSetWindowResizableCocoa(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowDecoratedCocoa(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowFloatingCocoa(_GLFWwindow* window, GLFWbool enabled);
+float _glfwGetWindowOpacityCocoa(_GLFWwindow* window);
+void _glfwSetWindowOpacityCocoa(_GLFWwindow* window, float opacity);
+void _glfwSetWindowMousePassthroughCocoa(_GLFWwindow* window, GLFWbool enabled);
+
+void _glfwSetRawMouseMotionCocoa(_GLFWwindow *window, GLFWbool enabled);
+GLFWbool _glfwRawMouseMotionSupportedCocoa(void);
+
+void _glfwPollEventsCocoa(void);
+void _glfwWaitEventsCocoa(void);
+void _glfwWaitEventsTimeoutCocoa(double timeout);
+void _glfwPostEmptyEventCocoa(void);
+
+void _glfwGetCursorPosCocoa(_GLFWwindow* window, double* xpos, double* ypos);
+void _glfwSetCursorPosCocoa(_GLFWwindow* window, double xpos, double ypos);
+void _glfwSetCursorModeCocoa(_GLFWwindow* window, int mode);
+const char* _glfwGetScancodeNameCocoa(int scancode);
+int _glfwGetKeyScancodeCocoa(int key);
+int _glfwCreateCursorCocoa(_GLFWcursor* cursor, const GLFWimage* image, int xhot, int yhot);
+int _glfwCreateStandardCursorCocoa(_GLFWcursor* cursor, int shape);
+void _glfwDestroyCursorCocoa(_GLFWcursor* cursor);
+void _glfwSetCursorCocoa(_GLFWwindow* window, _GLFWcursor* cursor);
+void _glfwSetClipboardStringCocoa(const char* string);
+const char* _glfwGetClipboardStringCocoa(void);
+
+EGLenum _glfwGetEGLPlatformCocoa(EGLint** attribs);
+EGLNativeDisplayType _glfwGetEGLNativeDisplayCocoa(void);
+EGLNativeWindowType _glfwGetEGLNativeWindowCocoa(_GLFWwindow* window);
+
+void _glfwGetRequiredInstanceExtensionsCocoa(char** extensions);
+int _glfwGetPhysicalDevicePresentationSupportCocoa(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+VkResult _glfwCreateWindowSurfaceCocoa(VkInstance instance, _GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+void _glfwFreeMonitorCocoa(_GLFWmonitor* monitor);
+void _glfwGetMonitorPosCocoa(_GLFWmonitor* monitor, int* xpos, int* ypos);
+void _glfwGetMonitorContentScaleCocoa(_GLFWmonitor* monitor, float* xscale, float* yscale);
+void _glfwGetMonitorWorkareaCocoa(_GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+GLFWvidmode* _glfwGetVideoModesCocoa(_GLFWmonitor* monitor, int* count);
+void _glfwGetVideoModeCocoa(_GLFWmonitor* monitor, GLFWvidmode* mode);
+GLFWbool _glfwGetGammaRampCocoa(_GLFWmonitor* monitor, GLFWgammaramp* ramp);
+void _glfwSetGammaRampCocoa(_GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+void _glfwPollMonitorsCocoa(void);
+void _glfwSetVideoModeCocoa(_GLFWmonitor* monitor, const GLFWvidmode* desired);
+void _glfwRestoreVideoModeCocoa(_GLFWmonitor* monitor);
+
+float _glfwTransformYCocoa(float y);
+
+void* _glfwLoadLocalVulkanLoaderCocoa(void);
+
+GLFWbool _glfwInitNSGL(void);
+void _glfwTerminateNSGL(void);
+GLFWbool _glfwCreateContextNSGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig);
+void _glfwDestroyContextNSGL(_GLFWwindow* window);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.c b/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.c
new file mode 100644
index 00000000000..c2bf8edaaa6
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.c
@@ -0,0 +1,55 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2016 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <mach/mach_time.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwPlatformInitTimer(void)
+{
+ mach_timebase_info_data_t info;
+ mach_timebase_info(&info);
+
+ _glfw.timer.ns.frequency = (info.denom * 1e9) / info.numer;
+}
+
+uint64_t _glfwPlatformGetTimerValue(void)
+{
+ return mach_absolute_time();
+}
+
+uint64_t _glfwPlatformGetTimerFrequency(void)
+{
+ return _glfw.timer.ns.frequency;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.h b/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.h
new file mode 100644
index 00000000000..3512e8b6e95
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_time.h
@@ -0,0 +1,35 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2021 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLFW_COCOA_LIBRARY_TIMER_STATE _GLFWtimerNS ns;
+
+// Cocoa-specific global timer data
+//
+typedef struct _GLFWtimerNS
+{
+ uint64_t frequency;
+} _GLFWtimerNS;
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/cocoa_window.m b/chromium/third_party/dawn/third_party/glfw/src/cocoa_window.m
new file mode 100644
index 00000000000..444bd563e97
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/cocoa_window.m
@@ -0,0 +1,1952 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <float.h>
+#include <string.h>
+
+// Returns the style mask corresponding to the window settings
+//
+static NSUInteger getStyleMask(_GLFWwindow* window)
+{
+ NSUInteger styleMask = NSWindowStyleMaskMiniaturizable;
+
+ if (window->monitor || !window->decorated)
+ styleMask |= NSWindowStyleMaskBorderless;
+ else
+ {
+ styleMask |= NSWindowStyleMaskTitled |
+ NSWindowStyleMaskClosable;
+
+ if (window->resizable)
+ styleMask |= NSWindowStyleMaskResizable;
+ }
+
+ return styleMask;
+}
+
+// Returns whether the cursor is in the content area of the specified window
+//
+static GLFWbool cursorInContentArea(_GLFWwindow* window)
+{
+ const NSPoint pos = [window->ns.object mouseLocationOutsideOfEventStream];
+ return [window->ns.view mouse:pos inRect:[window->ns.view frame]];
+}
+
+// Hides the cursor if not already hidden
+//
+static void hideCursor(_GLFWwindow* window)
+{
+ if (!_glfw.ns.cursorHidden)
+ {
+ [NSCursor hide];
+ _glfw.ns.cursorHidden = GLFW_TRUE;
+ }
+}
+
+// Shows the cursor if not already shown
+//
+static void showCursor(_GLFWwindow* window)
+{
+ if (_glfw.ns.cursorHidden)
+ {
+ [NSCursor unhide];
+ _glfw.ns.cursorHidden = GLFW_FALSE;
+ }
+}
+
+// Updates the cursor image according to its cursor mode
+//
+static void updateCursorImage(_GLFWwindow* window)
+{
+ if (window->cursorMode == GLFW_CURSOR_NORMAL)
+ {
+ showCursor(window);
+
+ if (window->cursor)
+ [(NSCursor*) window->cursor->ns.object set];
+ else
+ [[NSCursor arrowCursor] set];
+ }
+ else
+ hideCursor(window);
+}
+
+// Apply chosen cursor mode to a focused window
+//
+static void updateCursorMode(_GLFWwindow* window)
+{
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ _glfw.ns.disabledCursorWindow = window;
+ _glfwGetCursorPosCocoa(window,
+ &_glfw.ns.restoreCursorPosX,
+ &_glfw.ns.restoreCursorPosY);
+ _glfwCenterCursorInContentArea(window);
+ CGAssociateMouseAndMouseCursorPosition(false);
+ }
+ else if (_glfw.ns.disabledCursorWindow == window)
+ {
+ _glfw.ns.disabledCursorWindow = NULL;
+ _glfwSetCursorPosCocoa(window,
+ _glfw.ns.restoreCursorPosX,
+ _glfw.ns.restoreCursorPosY);
+ // NOTE: The matching CGAssociateMouseAndMouseCursorPosition call is
+ // made in _glfwSetCursorPosCocoa as part of a workaround
+ }
+
+ if (cursorInContentArea(window))
+ updateCursorImage(window);
+}
+
+// Make the specified window and its video mode active on its monitor
+//
+static void acquireMonitor(_GLFWwindow* window)
+{
+ _glfwSetVideoModeCocoa(window->monitor, &window->videoMode);
+ const CGRect bounds = CGDisplayBounds(window->monitor->ns.displayID);
+ const NSRect frame = NSMakeRect(bounds.origin.x,
+ _glfwTransformYCocoa(bounds.origin.y + bounds.size.height - 1),
+ bounds.size.width,
+ bounds.size.height);
+
+ [window->ns.object setFrame:frame display:YES];
+
+ _glfwInputMonitorWindow(window->monitor, window);
+}
+
+// Remove the window and restore the original video mode
+//
+static void releaseMonitor(_GLFWwindow* window)
+{
+ if (window->monitor->window != window)
+ return;
+
+ _glfwInputMonitorWindow(window->monitor, NULL);
+ _glfwRestoreVideoModeCocoa(window->monitor);
+}
+
+// Translates macOS key modifiers into GLFW ones
+//
+static int translateFlags(NSUInteger flags)
+{
+ int mods = 0;
+
+ if (flags & NSEventModifierFlagShift)
+ mods |= GLFW_MOD_SHIFT;
+ if (flags & NSEventModifierFlagControl)
+ mods |= GLFW_MOD_CONTROL;
+ if (flags & NSEventModifierFlagOption)
+ mods |= GLFW_MOD_ALT;
+ if (flags & NSEventModifierFlagCommand)
+ mods |= GLFW_MOD_SUPER;
+ if (flags & NSEventModifierFlagCapsLock)
+ mods |= GLFW_MOD_CAPS_LOCK;
+
+ return mods;
+}
+
+// Translates a macOS keycode to a GLFW keycode
+//
+static int translateKey(unsigned int key)
+{
+ if (key >= sizeof(_glfw.ns.keycodes) / sizeof(_glfw.ns.keycodes[0]))
+ return GLFW_KEY_UNKNOWN;
+
+ return _glfw.ns.keycodes[key];
+}
+
+// Translate a GLFW keycode to a Cocoa modifier flag
+//
+static NSUInteger translateKeyToModifierFlag(int key)
+{
+ switch (key)
+ {
+ case GLFW_KEY_LEFT_SHIFT:
+ case GLFW_KEY_RIGHT_SHIFT:
+ return NSEventModifierFlagShift;
+ case GLFW_KEY_LEFT_CONTROL:
+ case GLFW_KEY_RIGHT_CONTROL:
+ return NSEventModifierFlagControl;
+ case GLFW_KEY_LEFT_ALT:
+ case GLFW_KEY_RIGHT_ALT:
+ return NSEventModifierFlagOption;
+ case GLFW_KEY_LEFT_SUPER:
+ case GLFW_KEY_RIGHT_SUPER:
+ return NSEventModifierFlagCommand;
+ case GLFW_KEY_CAPS_LOCK:
+ return NSEventModifierFlagCapsLock;
+ }
+
+ return 0;
+}
+
+// Defines a constant for empty ranges in NSTextInputClient
+//
+static const NSRange kEmptyRange = { NSNotFound, 0 };
+
+
+//------------------------------------------------------------------------
+// Delegate for window related notifications
+//------------------------------------------------------------------------
+
+@interface GLFWWindowDelegate : NSObject
+{
+ _GLFWwindow* window;
+}
+
+- (instancetype)initWithGlfwWindow:(_GLFWwindow *)initWindow;
+
+@end
+
+@implementation GLFWWindowDelegate
+
+- (instancetype)initWithGlfwWindow:(_GLFWwindow *)initWindow
+{
+ self = [super init];
+ if (self != nil)
+ window = initWindow;
+
+ return self;
+}
+
+- (BOOL)windowShouldClose:(id)sender
+{
+ _glfwInputWindowCloseRequest(window);
+ return NO;
+}
+
+- (void)windowDidResize:(NSNotification *)notification
+{
+ if (window->context.source == GLFW_NATIVE_CONTEXT_API)
+ [window->context.nsgl.object update];
+
+ if (_glfw.ns.disabledCursorWindow == window)
+ _glfwCenterCursorInContentArea(window);
+
+ const int maximized = [window->ns.object isZoomed];
+ if (window->ns.maximized != maximized)
+ {
+ window->ns.maximized = maximized;
+ _glfwInputWindowMaximize(window, maximized);
+ }
+
+ const NSRect contentRect = [window->ns.view frame];
+ const NSRect fbRect = [window->ns.view convertRectToBacking:contentRect];
+
+ if (fbRect.size.width != window->ns.fbWidth ||
+ fbRect.size.height != window->ns.fbHeight)
+ {
+ window->ns.fbWidth = fbRect.size.width;
+ window->ns.fbHeight = fbRect.size.height;
+ _glfwInputFramebufferSize(window, fbRect.size.width, fbRect.size.height);
+ }
+
+ if (contentRect.size.width != window->ns.width ||
+ contentRect.size.height != window->ns.height)
+ {
+ window->ns.width = contentRect.size.width;
+ window->ns.height = contentRect.size.height;
+ _glfwInputWindowSize(window, contentRect.size.width, contentRect.size.height);
+ }
+}
+
+- (void)windowDidMove:(NSNotification *)notification
+{
+ if (window->context.source == GLFW_NATIVE_CONTEXT_API)
+ [window->context.nsgl.object update];
+
+ if (_glfw.ns.disabledCursorWindow == window)
+ _glfwCenterCursorInContentArea(window);
+
+ int x, y;
+ _glfwGetWindowPosCocoa(window, &x, &y);
+ _glfwInputWindowPos(window, x, y);
+}
+
+- (void)windowDidMiniaturize:(NSNotification *)notification
+{
+ if (window->monitor)
+ releaseMonitor(window);
+
+ _glfwInputWindowIconify(window, GLFW_TRUE);
+}
+
+- (void)windowDidDeminiaturize:(NSNotification *)notification
+{
+ if (window->monitor)
+ acquireMonitor(window);
+
+ _glfwInputWindowIconify(window, GLFW_FALSE);
+}
+
+- (void)windowDidBecomeKey:(NSNotification *)notification
+{
+ if (_glfw.ns.disabledCursorWindow == window)
+ _glfwCenterCursorInContentArea(window);
+
+ _glfwInputWindowFocus(window, GLFW_TRUE);
+ updateCursorMode(window);
+}
+
+- (void)windowDidResignKey:(NSNotification *)notification
+{
+ if (window->monitor && window->autoIconify)
+ _glfwIconifyWindowCocoa(window);
+
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+}
+
+- (void)windowDidChangeOcclusionState:(NSNotification* )notification
+{
+ if ([window->ns.object occlusionState] & NSWindowOcclusionStateVisible)
+ window->ns.occluded = GLFW_FALSE;
+ else
+ window->ns.occluded = GLFW_TRUE;
+}
+
+@end
+
+
+//------------------------------------------------------------------------
+// Content view class for the GLFW window
+//------------------------------------------------------------------------
+
+@interface GLFWContentView : NSView <NSTextInputClient>
+{
+ _GLFWwindow* window;
+ NSTrackingArea* trackingArea;
+ NSMutableAttributedString* markedText;
+}
+
+- (instancetype)initWithGlfwWindow:(_GLFWwindow *)initWindow;
+
+@end
+
+@implementation GLFWContentView
+
+- (instancetype)initWithGlfwWindow:(_GLFWwindow *)initWindow
+{
+ self = [super init];
+ if (self != nil)
+ {
+ window = initWindow;
+ trackingArea = nil;
+ markedText = [[NSMutableAttributedString alloc] init];
+
+ [self updateTrackingAreas];
+ [self registerForDraggedTypes:@[NSPasteboardTypeURL]];
+ }
+
+ return self;
+}
+
+- (void)dealloc
+{
+ [trackingArea release];
+ [markedText release];
+ [super dealloc];
+}
+
+- (BOOL)isOpaque
+{
+ return [window->ns.object isOpaque];
+}
+
+- (BOOL)canBecomeKeyView
+{
+ return YES;
+}
+
+- (BOOL)acceptsFirstResponder
+{
+ return YES;
+}
+
+- (BOOL)wantsUpdateLayer
+{
+ return YES;
+}
+
+- (void)updateLayer
+{
+ if (window->context.source == GLFW_NATIVE_CONTEXT_API)
+ [window->context.nsgl.object update];
+
+ _glfwInputWindowDamage(window);
+}
+
+- (void)cursorUpdate:(NSEvent *)event
+{
+ updateCursorImage(window);
+}
+
+- (BOOL)acceptsFirstMouse:(NSEvent *)event
+{
+ return YES;
+}
+
+- (void)mouseDown:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_LEFT,
+ GLFW_PRESS,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)mouseDragged:(NSEvent *)event
+{
+ [self mouseMoved:event];
+}
+
+- (void)mouseUp:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_LEFT,
+ GLFW_RELEASE,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)mouseMoved:(NSEvent *)event
+{
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ const double dx = [event deltaX] - window->ns.cursorWarpDeltaX;
+ const double dy = [event deltaY] - window->ns.cursorWarpDeltaY;
+
+ _glfwInputCursorPos(window,
+ window->virtualCursorPosX + dx,
+ window->virtualCursorPosY + dy);
+ }
+ else
+ {
+ const NSRect contentRect = [window->ns.view frame];
+ // NOTE: The returned location uses base 0,1 not 0,0
+ const NSPoint pos = [event locationInWindow];
+
+ _glfwInputCursorPos(window, pos.x, contentRect.size.height - pos.y);
+ }
+
+ window->ns.cursorWarpDeltaX = 0;
+ window->ns.cursorWarpDeltaY = 0;
+}
+
+- (void)rightMouseDown:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_RIGHT,
+ GLFW_PRESS,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)rightMouseDragged:(NSEvent *)event
+{
+ [self mouseMoved:event];
+}
+
+- (void)rightMouseUp:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_RIGHT,
+ GLFW_RELEASE,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)otherMouseDown:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ (int) [event buttonNumber],
+ GLFW_PRESS,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)otherMouseDragged:(NSEvent *)event
+{
+ [self mouseMoved:event];
+}
+
+- (void)otherMouseUp:(NSEvent *)event
+{
+ _glfwInputMouseClick(window,
+ (int) [event buttonNumber],
+ GLFW_RELEASE,
+ translateFlags([event modifierFlags]));
+}
+
+- (void)mouseExited:(NSEvent *)event
+{
+ if (window->cursorMode == GLFW_CURSOR_HIDDEN)
+ showCursor(window);
+
+ _glfwInputCursorEnter(window, GLFW_FALSE);
+}
+
+- (void)mouseEntered:(NSEvent *)event
+{
+ if (window->cursorMode == GLFW_CURSOR_HIDDEN)
+ hideCursor(window);
+
+ _glfwInputCursorEnter(window, GLFW_TRUE);
+}
+
+- (void)viewDidChangeBackingProperties
+{
+ const NSRect contentRect = [window->ns.view frame];
+ const NSRect fbRect = [window->ns.view convertRectToBacking:contentRect];
+ const float xscale = fbRect.size.width / contentRect.size.width;
+ const float yscale = fbRect.size.height / contentRect.size.height;
+
+ if (xscale != window->ns.xscale || yscale != window->ns.yscale)
+ {
+ if (window->ns.retina && window->ns.layer)
+ [window->ns.layer setContentsScale:[window->ns.object backingScaleFactor]];
+
+ window->ns.xscale = xscale;
+ window->ns.yscale = yscale;
+ _glfwInputWindowContentScale(window, xscale, yscale);
+ }
+
+ if (fbRect.size.width != window->ns.fbWidth ||
+ fbRect.size.height != window->ns.fbHeight)
+ {
+ window->ns.fbWidth = fbRect.size.width;
+ window->ns.fbHeight = fbRect.size.height;
+ _glfwInputFramebufferSize(window, fbRect.size.width, fbRect.size.height);
+ }
+}
+
+- (void)drawRect:(NSRect)rect
+{
+ _glfwInputWindowDamage(window);
+}
+
+- (void)updateTrackingAreas
+{
+ if (trackingArea != nil)
+ {
+ [self removeTrackingArea:trackingArea];
+ [trackingArea release];
+ }
+
+ const NSTrackingAreaOptions options = NSTrackingMouseEnteredAndExited |
+ NSTrackingActiveInKeyWindow |
+ NSTrackingEnabledDuringMouseDrag |
+ NSTrackingCursorUpdate |
+ NSTrackingInVisibleRect |
+ NSTrackingAssumeInside;
+
+ trackingArea = [[NSTrackingArea alloc] initWithRect:[self bounds]
+ options:options
+ owner:self
+ userInfo:nil];
+
+ [self addTrackingArea:trackingArea];
+ [super updateTrackingAreas];
+}
+
+- (void)keyDown:(NSEvent *)event
+{
+ const int key = translateKey([event keyCode]);
+ const int mods = translateFlags([event modifierFlags]);
+
+ _glfwInputKey(window, key, [event keyCode], GLFW_PRESS, mods);
+
+ [self interpretKeyEvents:@[event]];
+}
+
+- (void)flagsChanged:(NSEvent *)event
+{
+ int action;
+ const unsigned int modifierFlags =
+ [event modifierFlags] & NSEventModifierFlagDeviceIndependentFlagsMask;
+ const int key = translateKey([event keyCode]);
+ const int mods = translateFlags(modifierFlags);
+ const NSUInteger keyFlag = translateKeyToModifierFlag(key);
+
+ if (keyFlag & modifierFlags)
+ {
+ if (window->keys[key] == GLFW_PRESS)
+ action = GLFW_RELEASE;
+ else
+ action = GLFW_PRESS;
+ }
+ else
+ action = GLFW_RELEASE;
+
+ _glfwInputKey(window, key, [event keyCode], action, mods);
+}
+
+- (void)keyUp:(NSEvent *)event
+{
+ const int key = translateKey([event keyCode]);
+ const int mods = translateFlags([event modifierFlags]);
+ _glfwInputKey(window, key, [event keyCode], GLFW_RELEASE, mods);
+}
+
+- (void)scrollWheel:(NSEvent *)event
+{
+ double deltaX = [event scrollingDeltaX];
+ double deltaY = [event scrollingDeltaY];
+
+ if ([event hasPreciseScrollingDeltas])
+ {
+ deltaX *= 0.1;
+ deltaY *= 0.1;
+ }
+
+ if (fabs(deltaX) > 0.0 || fabs(deltaY) > 0.0)
+ _glfwInputScroll(window, deltaX, deltaY);
+}
+
+- (NSDragOperation)draggingEntered:(id <NSDraggingInfo>)sender
+{
+ // HACK: We don't know what to say here because we don't know what the
+ // application wants to do with the paths
+ return NSDragOperationGeneric;
+}
+
+- (BOOL)performDragOperation:(id <NSDraggingInfo>)sender
+{
+ const NSRect contentRect = [window->ns.view frame];
+ // NOTE: The returned location uses base 0,1 not 0,0
+ const NSPoint pos = [sender draggingLocation];
+ _glfwInputCursorPos(window, pos.x, contentRect.size.height - pos.y);
+
+ NSPasteboard* pasteboard = [sender draggingPasteboard];
+ NSDictionary* options = @{NSPasteboardURLReadingFileURLsOnlyKey:@YES};
+ NSArray* urls = [pasteboard readObjectsForClasses:@[[NSURL class]]
+ options:options];
+ const NSUInteger count = [urls count];
+ if (count)
+ {
+ char** paths = _glfw_calloc(count, sizeof(char*));
+
+ for (NSUInteger i = 0; i < count; i++)
+ paths[i] = _glfw_strdup([urls[i] fileSystemRepresentation]);
+
+ _glfwInputDrop(window, (int) count, (const char**) paths);
+
+ for (NSUInteger i = 0; i < count; i++)
+ _glfw_free(paths[i]);
+ _glfw_free(paths);
+ }
+
+ return YES;
+}
+
+- (BOOL)hasMarkedText
+{
+ return [markedText length] > 0;
+}
+
+- (NSRange)markedRange
+{
+ if ([markedText length] > 0)
+ return NSMakeRange(0, [markedText length] - 1);
+ else
+ return kEmptyRange;
+}
+
+- (NSRange)selectedRange
+{
+ return kEmptyRange;
+}
+
+- (void)setMarkedText:(id)string
+ selectedRange:(NSRange)selectedRange
+ replacementRange:(NSRange)replacementRange
+{
+ [markedText release];
+ if ([string isKindOfClass:[NSAttributedString class]])
+ markedText = [[NSMutableAttributedString alloc] initWithAttributedString:string];
+ else
+ markedText = [[NSMutableAttributedString alloc] initWithString:string];
+}
+
+- (void)unmarkText
+{
+ [[markedText mutableString] setString:@""];
+}
+
+- (NSArray*)validAttributesForMarkedText
+{
+ return [NSArray array];
+}
+
+- (NSAttributedString*)attributedSubstringForProposedRange:(NSRange)range
+ actualRange:(NSRangePointer)actualRange
+{
+ return nil;
+}
+
+- (NSUInteger)characterIndexForPoint:(NSPoint)point
+{
+ return 0;
+}
+
+- (NSRect)firstRectForCharacterRange:(NSRange)range
+ actualRange:(NSRangePointer)actualRange
+{
+ const NSRect frame = [window->ns.view frame];
+ return NSMakeRect(frame.origin.x, frame.origin.y, 0.0, 0.0);
+}
+
+- (void)insertText:(id)string replacementRange:(NSRange)replacementRange
+{
+ NSString* characters;
+ NSEvent* event = [NSApp currentEvent];
+ const int mods = translateFlags([event modifierFlags]);
+ const int plain = !(mods & GLFW_MOD_SUPER);
+
+ if ([string isKindOfClass:[NSAttributedString class]])
+ characters = [string string];
+ else
+ characters = (NSString*) string;
+
+ NSRange range = NSMakeRange(0, [characters length]);
+ while (range.length)
+ {
+ uint32_t codepoint = 0;
+
+ if ([characters getBytes:&codepoint
+ maxLength:sizeof(codepoint)
+ usedLength:NULL
+ encoding:NSUTF32StringEncoding
+ options:0
+ range:range
+ remainingRange:&range])
+ {
+ if (codepoint >= 0xf700 && codepoint <= 0xf7ff)
+ continue;
+
+ _glfwInputChar(window, codepoint, mods, plain);
+ }
+ }
+}
+
+- (void)doCommandBySelector:(SEL)selector
+{
+}
+
+@end
+
+
+//------------------------------------------------------------------------
+// GLFW window class
+//------------------------------------------------------------------------
+
+@interface GLFWWindow : NSWindow {}
+@end
+
+@implementation GLFWWindow
+
+- (BOOL)canBecomeKeyWindow
+{
+ // Required for NSWindowStyleMaskBorderless windows
+ return YES;
+}
+
+- (BOOL)canBecomeMainWindow
+{
+ return YES;
+}
+
+@end
+
+
+// Create the Cocoa window
+//
+static GLFWbool createNativeWindow(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ window->ns.delegate = [[GLFWWindowDelegate alloc] initWithGlfwWindow:window];
+ if (window->ns.delegate == nil)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to create window delegate");
+ return GLFW_FALSE;
+ }
+
+ NSRect contentRect;
+
+ if (window->monitor)
+ {
+ GLFWvidmode mode;
+ int xpos, ypos;
+
+ _glfwGetVideoModeCocoa(window->monitor, &mode);
+ _glfwGetMonitorPosCocoa(window->monitor, &xpos, &ypos);
+
+ contentRect = NSMakeRect(xpos, ypos, mode.width, mode.height);
+ }
+ else
+ contentRect = NSMakeRect(0, 0, wndconfig->width, wndconfig->height);
+
+ window->ns.object = [[GLFWWindow alloc]
+ initWithContentRect:contentRect
+ styleMask:getStyleMask(window)
+ backing:NSBackingStoreBuffered
+ defer:NO];
+
+ if (window->ns.object == nil)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Cocoa: Failed to create window");
+ return GLFW_FALSE;
+ }
+
+ if (window->monitor)
+ [window->ns.object setLevel:NSMainMenuWindowLevel + 1];
+ else
+ {
+ [(NSWindow*) window->ns.object center];
+ _glfw.ns.cascadePoint =
+ NSPointToCGPoint([window->ns.object cascadeTopLeftFromPoint:
+ NSPointFromCGPoint(_glfw.ns.cascadePoint)]);
+
+ if (wndconfig->resizable)
+ {
+ const NSWindowCollectionBehavior behavior =
+ NSWindowCollectionBehaviorFullScreenPrimary |
+ NSWindowCollectionBehaviorManaged;
+ [window->ns.object setCollectionBehavior:behavior];
+ }
+
+ if (wndconfig->floating)
+ [window->ns.object setLevel:NSFloatingWindowLevel];
+
+ if (wndconfig->maximized)
+ [window->ns.object zoom:nil];
+ }
+
+ if (strlen(wndconfig->ns.frameName))
+ [window->ns.object setFrameAutosaveName:@(wndconfig->ns.frameName)];
+
+ window->ns.view = [[GLFWContentView alloc] initWithGlfwWindow:window];
+ window->ns.retina = wndconfig->ns.retina;
+
+ if (fbconfig->transparent)
+ {
+ [window->ns.object setOpaque:NO];
+ [window->ns.object setHasShadow:NO];
+ [window->ns.object setBackgroundColor:[NSColor clearColor]];
+ }
+
+ [window->ns.object setContentView:window->ns.view];
+ [window->ns.object makeFirstResponder:window->ns.view];
+ [window->ns.object setTitle:@(wndconfig->title)];
+ [window->ns.object setDelegate:window->ns.delegate];
+ [window->ns.object setAcceptsMouseMovedEvents:YES];
+ [window->ns.object setRestorable:NO];
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED >= 101200
+ if ([window->ns.object respondsToSelector:@selector(setTabbingMode:)])
+ [window->ns.object setTabbingMode:NSWindowTabbingModeDisallowed];
+#endif
+
+ _glfwGetWindowSizeCocoa(window, &window->ns.width, &window->ns.height);
+ _glfwGetFramebufferSizeCocoa(window, &window->ns.fbWidth, &window->ns.fbHeight);
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Transforms a y-coordinate between the CG display and NS screen spaces
+//
+float _glfwTransformYCocoa(float y)
+{
+ return CGDisplayBounds(CGMainDisplayID()).size.height - y - 1;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+int _glfwCreateWindowCocoa(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ @autoreleasepool {
+
+ if (!createNativeWindow(window, wndconfig, fbconfig))
+ return GLFW_FALSE;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_NATIVE_CONTEXT_API)
+ {
+ if (!_glfwInitNSGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextNSGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_EGL_CONTEXT_API)
+ {
+ // EGL implementation on macOS use CALayer* EGLNativeWindowType so we
+ // need to get the layer for EGL window surface creation.
+ [window->ns.view setWantsLayer:YES];
+ window->ns.layer = [window->ns.view layer];
+
+ if (!_glfwInitEGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextEGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwInitOSMesa())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextOSMesa(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwRefreshContextAttribs(window, ctxconfig))
+ return GLFW_FALSE;
+ }
+
+ if (wndconfig->mousePassthrough)
+ _glfwSetWindowMousePassthroughCocoa(window, GLFW_TRUE);
+
+ if (window->monitor)
+ {
+ _glfwShowWindowCocoa(window);
+ _glfwFocusWindowCocoa(window);
+ acquireMonitor(window);
+
+ if (wndconfig->centerCursor)
+ _glfwCenterCursorInContentArea(window);
+ }
+ else
+ {
+ if (wndconfig->visible)
+ {
+ _glfwShowWindowCocoa(window);
+ if (wndconfig->focused)
+ _glfwFocusWindowCocoa(window);
+ }
+ }
+
+ return GLFW_TRUE;
+
+ } // autoreleasepool
+}
+
+void _glfwDestroyWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+
+ if (_glfw.ns.disabledCursorWindow == window)
+ _glfw.ns.disabledCursorWindow = NULL;
+
+ [window->ns.object orderOut:nil];
+
+ if (window->monitor)
+ releaseMonitor(window);
+
+ if (window->context.destroy)
+ window->context.destroy(window);
+
+ [window->ns.object setDelegate:nil];
+ [window->ns.delegate release];
+ window->ns.delegate = nil;
+
+ [window->ns.view release];
+ window->ns.view = nil;
+
+ [window->ns.object close];
+ window->ns.object = nil;
+
+ // HACK: Allow Cocoa to catch up before returning
+ _glfwPollEventsCocoa();
+
+ } // autoreleasepool
+}
+
+void _glfwSetWindowTitleCocoa(_GLFWwindow* window, const char* title)
+{
+ @autoreleasepool {
+ NSString* string = @(title);
+ [window->ns.object setTitle:string];
+ // HACK: Set the miniwindow title explicitly as setTitle: doesn't update it
+ // if the window lacks NSWindowStyleMaskTitled
+ [window->ns.object setMiniwindowTitle:string];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowIconCocoa(_GLFWwindow* window,
+ int count, const GLFWimage* images)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Cocoa: Regular windows do not have icons on macOS");
+}
+
+void _glfwGetWindowPosCocoa(_GLFWwindow* window, int* xpos, int* ypos)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect =
+ [window->ns.object contentRectForFrameRect:[window->ns.object frame]];
+
+ if (xpos)
+ *xpos = contentRect.origin.x;
+ if (ypos)
+ *ypos = _glfwTransformYCocoa(contentRect.origin.y + contentRect.size.height - 1);
+
+ } // autoreleasepool
+}
+
+void _glfwSetWindowPosCocoa(_GLFWwindow* window, int x, int y)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect = [window->ns.view frame];
+ const NSRect dummyRect = NSMakeRect(x, _glfwTransformYCocoa(y + contentRect.size.height - 1), 0, 0);
+ const NSRect frameRect = [window->ns.object frameRectForContentRect:dummyRect];
+ [window->ns.object setFrameOrigin:frameRect.origin];
+
+ } // autoreleasepool
+}
+
+void _glfwGetWindowSizeCocoa(_GLFWwindow* window, int* width, int* height)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect = [window->ns.view frame];
+
+ if (width)
+ *width = contentRect.size.width;
+ if (height)
+ *height = contentRect.size.height;
+
+ } // autoreleasepool
+}
+
+void _glfwSetWindowSizeCocoa(_GLFWwindow* window, int width, int height)
+{
+ @autoreleasepool {
+
+ if (window->monitor)
+ {
+ if (window->monitor->window == window)
+ acquireMonitor(window);
+ }
+ else
+ {
+ NSRect contentRect =
+ [window->ns.object contentRectForFrameRect:[window->ns.object frame]];
+ contentRect.origin.y += contentRect.size.height - height;
+ contentRect.size = NSMakeSize(width, height);
+ [window->ns.object setFrame:[window->ns.object frameRectForContentRect:contentRect]
+ display:YES];
+ }
+
+ } // autoreleasepool
+}
+
+void _glfwSetWindowSizeLimitsCocoa(_GLFWwindow* window,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ @autoreleasepool {
+
+ if (minwidth == GLFW_DONT_CARE || minheight == GLFW_DONT_CARE)
+ [window->ns.object setContentMinSize:NSMakeSize(0, 0)];
+ else
+ [window->ns.object setContentMinSize:NSMakeSize(minwidth, minheight)];
+
+ if (maxwidth == GLFW_DONT_CARE || maxheight == GLFW_DONT_CARE)
+ [window->ns.object setContentMaxSize:NSMakeSize(DBL_MAX, DBL_MAX)];
+ else
+ [window->ns.object setContentMaxSize:NSMakeSize(maxwidth, maxheight)];
+
+ } // autoreleasepool
+}
+
+void _glfwSetWindowAspectRatioCocoa(_GLFWwindow* window, int numer, int denom)
+{
+ @autoreleasepool {
+ if (numer == GLFW_DONT_CARE || denom == GLFW_DONT_CARE)
+ [window->ns.object setResizeIncrements:NSMakeSize(1.0, 1.0)];
+ else
+ [window->ns.object setContentAspectRatio:NSMakeSize(numer, denom)];
+ } // autoreleasepool
+}
+
+void _glfwGetFramebufferSizeCocoa(_GLFWwindow* window, int* width, int* height)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect = [window->ns.view frame];
+ const NSRect fbRect = [window->ns.view convertRectToBacking:contentRect];
+
+ if (width)
+ *width = (int) fbRect.size.width;
+ if (height)
+ *height = (int) fbRect.size.height;
+
+ } // autoreleasepool
+}
+
+void _glfwGetWindowFrameSizeCocoa(_GLFWwindow* window,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect = [window->ns.view frame];
+ const NSRect frameRect = [window->ns.object frameRectForContentRect:contentRect];
+
+ if (left)
+ *left = contentRect.origin.x - frameRect.origin.x;
+ if (top)
+ *top = frameRect.origin.y + frameRect.size.height -
+ contentRect.origin.y - contentRect.size.height;
+ if (right)
+ *right = frameRect.origin.x + frameRect.size.width -
+ contentRect.origin.x - contentRect.size.width;
+ if (bottom)
+ *bottom = contentRect.origin.y - frameRect.origin.y;
+
+ } // autoreleasepool
+}
+
+void _glfwGetWindowContentScaleCocoa(_GLFWwindow* window,
+ float* xscale, float* yscale)
+{
+ @autoreleasepool {
+
+ const NSRect points = [window->ns.view frame];
+ const NSRect pixels = [window->ns.view convertRectToBacking:points];
+
+ if (xscale)
+ *xscale = (float) (pixels.size.width / points.size.width);
+ if (yscale)
+ *yscale = (float) (pixels.size.height / points.size.height);
+
+ } // autoreleasepool
+}
+
+void _glfwIconifyWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ [window->ns.object miniaturize:nil];
+ } // autoreleasepool
+}
+
+void _glfwRestoreWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ if ([window->ns.object isMiniaturized])
+ [window->ns.object deminiaturize:nil];
+ else if ([window->ns.object isZoomed])
+ [window->ns.object zoom:nil];
+ } // autoreleasepool
+}
+
+void _glfwMaximizeWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ if (![window->ns.object isZoomed])
+ [window->ns.object zoom:nil];
+ } // autoreleasepool
+}
+
+void _glfwShowWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ [window->ns.object orderFront:nil];
+ } // autoreleasepool
+}
+
+void _glfwHideWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ [window->ns.object orderOut:nil];
+ } // autoreleasepool
+}
+
+void _glfwRequestWindowAttentionCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ [NSApp requestUserAttention:NSInformationalRequest];
+ } // autoreleasepool
+}
+
+void _glfwFocusWindowCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ // Make us the active application
+ // HACK: This is here to prevent applications using only hidden windows from
+ // being activated, but should probably not be done every time any
+ // window is shown
+ [NSApp activateIgnoringOtherApps:YES];
+ [window->ns.object makeKeyAndOrderFront:nil];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowMonitorCocoa(_GLFWwindow* window,
+ _GLFWmonitor* monitor,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ @autoreleasepool {
+
+ if (window->monitor == monitor)
+ {
+ if (monitor)
+ {
+ if (monitor->window == window)
+ acquireMonitor(window);
+ }
+ else
+ {
+ const NSRect contentRect =
+ NSMakeRect(xpos, _glfwTransformYCocoa(ypos + height - 1), width, height);
+ const NSRect frameRect =
+ [window->ns.object frameRectForContentRect:contentRect
+ styleMask:getStyleMask(window)];
+
+ [window->ns.object setFrame:frameRect display:YES];
+ }
+
+ return;
+ }
+
+ if (window->monitor)
+ releaseMonitor(window);
+
+ _glfwInputWindowMonitor(window, monitor);
+
+ // HACK: Allow the state cached in Cocoa to catch up to reality
+ // TODO: Solve this in a less terrible way
+ _glfwPollEventsCocoa();
+
+ const NSUInteger styleMask = getStyleMask(window);
+ [window->ns.object setStyleMask:styleMask];
+ // HACK: Changing the style mask can cause the first responder to be cleared
+ [window->ns.object makeFirstResponder:window->ns.view];
+
+ if (window->monitor)
+ {
+ [window->ns.object setLevel:NSMainMenuWindowLevel + 1];
+ [window->ns.object setHasShadow:NO];
+
+ acquireMonitor(window);
+ }
+ else
+ {
+ NSRect contentRect = NSMakeRect(xpos, _glfwTransformYCocoa(ypos + height - 1),
+ width, height);
+ NSRect frameRect = [window->ns.object frameRectForContentRect:contentRect
+ styleMask:styleMask];
+ [window->ns.object setFrame:frameRect display:YES];
+
+ if (window->numer != GLFW_DONT_CARE &&
+ window->denom != GLFW_DONT_CARE)
+ {
+ [window->ns.object setContentAspectRatio:NSMakeSize(window->numer,
+ window->denom)];
+ }
+
+ if (window->minwidth != GLFW_DONT_CARE &&
+ window->minheight != GLFW_DONT_CARE)
+ {
+ [window->ns.object setContentMinSize:NSMakeSize(window->minwidth,
+ window->minheight)];
+ }
+
+ if (window->maxwidth != GLFW_DONT_CARE &&
+ window->maxheight != GLFW_DONT_CARE)
+ {
+ [window->ns.object setContentMaxSize:NSMakeSize(window->maxwidth,
+ window->maxheight)];
+ }
+
+ if (window->floating)
+ [window->ns.object setLevel:NSFloatingWindowLevel];
+ else
+ [window->ns.object setLevel:NSNormalWindowLevel];
+
+ [window->ns.object setHasShadow:YES];
+ // HACK: Clearing NSWindowStyleMaskTitled resets and disables the window
+ // title property but the miniwindow title property is unaffected
+ [window->ns.object setTitle:[window->ns.object miniwindowTitle]];
+ }
+
+ } // autoreleasepool
+}
+
+int _glfwWindowFocusedCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return [window->ns.object isKeyWindow];
+ } // autoreleasepool
+}
+
+int _glfwWindowIconifiedCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return [window->ns.object isMiniaturized];
+ } // autoreleasepool
+}
+
+int _glfwWindowVisibleCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return [window->ns.object isVisible];
+ } // autoreleasepool
+}
+
+int _glfwWindowMaximizedCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return [window->ns.object isZoomed];
+ } // autoreleasepool
+}
+
+int _glfwWindowHoveredCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+
+ const NSPoint point = [NSEvent mouseLocation];
+
+ if ([NSWindow windowNumberAtPoint:point belowWindowWithWindowNumber:0] !=
+ [window->ns.object windowNumber])
+ {
+ return GLFW_FALSE;
+ }
+
+ return NSMouseInRect(point,
+ [window->ns.object convertRectToScreen:[window->ns.view frame]], NO);
+
+ } // autoreleasepool
+}
+
+int _glfwFramebufferTransparentCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return ![window->ns.object isOpaque] && ![window->ns.view isOpaque];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowResizableCocoa(_GLFWwindow* window, GLFWbool enabled)
+{
+ @autoreleasepool {
+ [window->ns.object setStyleMask:getStyleMask(window)];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowDecoratedCocoa(_GLFWwindow* window, GLFWbool enabled)
+{
+ @autoreleasepool {
+ [window->ns.object setStyleMask:getStyleMask(window)];
+ [window->ns.object makeFirstResponder:window->ns.view];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowFloatingCocoa(_GLFWwindow* window, GLFWbool enabled)
+{
+ @autoreleasepool {
+ if (enabled)
+ [window->ns.object setLevel:NSFloatingWindowLevel];
+ else
+ [window->ns.object setLevel:NSNormalWindowLevel];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowMousePassthroughCocoa(_GLFWwindow* window, GLFWbool enabled)
+{
+ @autoreleasepool {
+ [window->ns.object setIgnoresMouseEvents:enabled];
+ }
+}
+
+float _glfwGetWindowOpacityCocoa(_GLFWwindow* window)
+{
+ @autoreleasepool {
+ return (float) [window->ns.object alphaValue];
+ } // autoreleasepool
+}
+
+void _glfwSetWindowOpacityCocoa(_GLFWwindow* window, float opacity)
+{
+ @autoreleasepool {
+ [window->ns.object setAlphaValue:opacity];
+ } // autoreleasepool
+}
+
+void _glfwSetRawMouseMotionCocoa(_GLFWwindow *window, GLFWbool enabled)
+{
+ _glfwInputError(GLFW_FEATURE_UNIMPLEMENTED,
+ "Cocoa: Raw mouse motion not yet implemented");
+}
+
+GLFWbool _glfwRawMouseMotionSupportedCocoa(void)
+{
+ return GLFW_FALSE;
+}
+
+void _glfwPollEventsCocoa(void)
+{
+ @autoreleasepool {
+
+ for (;;)
+ {
+ NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
+ untilDate:[NSDate distantPast]
+ inMode:NSDefaultRunLoopMode
+ dequeue:YES];
+ if (event == nil)
+ break;
+
+ [NSApp sendEvent:event];
+ }
+
+ } // autoreleasepool
+}
+
+void _glfwWaitEventsCocoa(void)
+{
+ @autoreleasepool {
+
+ // I wanted to pass NO to dequeue:, and rely on PollEvents to
+ // dequeue and send. For reasons not at all clear to me, passing
+ // NO to dequeue: causes this method never to return.
+ NSEvent *event = [NSApp nextEventMatchingMask:NSEventMaskAny
+ untilDate:[NSDate distantFuture]
+ inMode:NSDefaultRunLoopMode
+ dequeue:YES];
+ [NSApp sendEvent:event];
+
+ _glfwPollEventsCocoa();
+
+ } // autoreleasepool
+}
+
+void _glfwWaitEventsTimeoutCocoa(double timeout)
+{
+ @autoreleasepool {
+
+ NSDate* date = [NSDate dateWithTimeIntervalSinceNow:timeout];
+ NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
+ untilDate:date
+ inMode:NSDefaultRunLoopMode
+ dequeue:YES];
+ if (event)
+ [NSApp sendEvent:event];
+
+ _glfwPollEventsCocoa();
+
+ } // autoreleasepool
+}
+
+void _glfwPostEmptyEventCocoa(void)
+{
+ @autoreleasepool {
+
+ NSEvent* event = [NSEvent otherEventWithType:NSEventTypeApplicationDefined
+ location:NSMakePoint(0, 0)
+ modifierFlags:0
+ timestamp:0
+ windowNumber:0
+ context:nil
+ subtype:0
+ data1:0
+ data2:0];
+ [NSApp postEvent:event atStart:YES];
+
+ } // autoreleasepool
+}
+
+void _glfwGetCursorPosCocoa(_GLFWwindow* window, double* xpos, double* ypos)
+{
+ @autoreleasepool {
+
+ const NSRect contentRect = [window->ns.view frame];
+ // NOTE: The returned location uses base 0,1 not 0,0
+ const NSPoint pos = [window->ns.object mouseLocationOutsideOfEventStream];
+
+ if (xpos)
+ *xpos = pos.x;
+ if (ypos)
+ *ypos = contentRect.size.height - pos.y;
+
+ } // autoreleasepool
+}
+
+void _glfwSetCursorPosCocoa(_GLFWwindow* window, double x, double y)
+{
+ @autoreleasepool {
+
+ updateCursorImage(window);
+
+ const NSRect contentRect = [window->ns.view frame];
+ // NOTE: The returned location uses base 0,1 not 0,0
+ const NSPoint pos = [window->ns.object mouseLocationOutsideOfEventStream];
+
+ window->ns.cursorWarpDeltaX += x - pos.x;
+ window->ns.cursorWarpDeltaY += y - contentRect.size.height + pos.y;
+
+ if (window->monitor)
+ {
+ CGDisplayMoveCursorToPoint(window->monitor->ns.displayID,
+ CGPointMake(x, y));
+ }
+ else
+ {
+ const NSRect localRect = NSMakeRect(x, contentRect.size.height - y - 1, 0, 0);
+ const NSRect globalRect = [window->ns.object convertRectToScreen:localRect];
+ const NSPoint globalPoint = globalRect.origin;
+
+ CGWarpMouseCursorPosition(CGPointMake(globalPoint.x,
+ _glfwTransformYCocoa(globalPoint.y)));
+ }
+
+ // HACK: Calling this right after setting the cursor position prevents macOS
+ // from freezing the cursor for a fraction of a second afterwards
+ if (window->cursorMode != GLFW_CURSOR_DISABLED)
+ CGAssociateMouseAndMouseCursorPosition(true);
+
+ } // autoreleasepool
+}
+
+void _glfwSetCursorModeCocoa(_GLFWwindow* window, int mode)
+{
+ @autoreleasepool {
+ if (_glfwWindowFocusedCocoa(window))
+ updateCursorMode(window);
+ } // autoreleasepool
+}
+
+const char* _glfwGetScancodeNameCocoa(int scancode)
+{
+ @autoreleasepool {
+
+ if (scancode < 0 || scancode > 0xff ||
+ _glfw.ns.keycodes[scancode] == GLFW_KEY_UNKNOWN)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid scancode %i", scancode);
+ return NULL;
+ }
+
+ const int key = _glfw.ns.keycodes[scancode];
+
+ UInt32 deadKeyState = 0;
+ UniChar characters[4];
+ UniCharCount characterCount = 0;
+
+ if (UCKeyTranslate([(NSData*) _glfw.ns.unicodeData bytes],
+ scancode,
+ kUCKeyActionDisplay,
+ 0,
+ LMGetKbdType(),
+ kUCKeyTranslateNoDeadKeysBit,
+ &deadKeyState,
+ sizeof(characters) / sizeof(characters[0]),
+ &characterCount,
+ characters) != noErr)
+ {
+ return NULL;
+ }
+
+ if (!characterCount)
+ return NULL;
+
+ CFStringRef string = CFStringCreateWithCharactersNoCopy(kCFAllocatorDefault,
+ characters,
+ characterCount,
+ kCFAllocatorNull);
+ CFStringGetCString(string,
+ _glfw.ns.keynames[key],
+ sizeof(_glfw.ns.keynames[key]),
+ kCFStringEncodingUTF8);
+ CFRelease(string);
+
+ return _glfw.ns.keynames[key];
+
+ } // autoreleasepool
+}
+
+int _glfwGetKeyScancodeCocoa(int key)
+{
+ return _glfw.ns.scancodes[key];
+}
+
+int _glfwCreateCursorCocoa(_GLFWcursor* cursor,
+ const GLFWimage* image,
+ int xhot, int yhot)
+{
+ @autoreleasepool {
+
+ NSImage* native;
+ NSBitmapImageRep* rep;
+
+ rep = [[NSBitmapImageRep alloc]
+ initWithBitmapDataPlanes:NULL
+ pixelsWide:image->width
+ pixelsHigh:image->height
+ bitsPerSample:8
+ samplesPerPixel:4
+ hasAlpha:YES
+ isPlanar:NO
+ colorSpaceName:NSCalibratedRGBColorSpace
+ bitmapFormat:NSBitmapFormatAlphaNonpremultiplied
+ bytesPerRow:image->width * 4
+ bitsPerPixel:32];
+
+ if (rep == nil)
+ return GLFW_FALSE;
+
+ memcpy([rep bitmapData], image->pixels, image->width * image->height * 4);
+
+ native = [[NSImage alloc] initWithSize:NSMakeSize(image->width, image->height)];
+ [native addRepresentation:rep];
+
+ cursor->ns.object = [[NSCursor alloc] initWithImage:native
+ hotSpot:NSMakePoint(xhot, yhot)];
+
+ [native release];
+ [rep release];
+
+ if (cursor->ns.object == nil)
+ return GLFW_FALSE;
+
+ return GLFW_TRUE;
+
+ } // autoreleasepool
+}
+
+int _glfwCreateStandardCursorCocoa(_GLFWcursor* cursor, int shape)
+{
+ @autoreleasepool {
+
+ SEL cursorSelector = NULL;
+
+ // HACK: Try to use a private message
+ switch (shape)
+ {
+ case GLFW_RESIZE_EW_CURSOR:
+ cursorSelector = NSSelectorFromString(@"_windowResizeEastWestCursor");
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ cursorSelector = NSSelectorFromString(@"_windowResizeNorthSouthCursor");
+ break;
+ case GLFW_RESIZE_NWSE_CURSOR:
+ cursorSelector = NSSelectorFromString(@"_windowResizeNorthWestSouthEastCursor");
+ break;
+ case GLFW_RESIZE_NESW_CURSOR:
+ cursorSelector = NSSelectorFromString(@"_windowResizeNorthEastSouthWestCursor");
+ break;
+ }
+
+ if (cursorSelector && [NSCursor respondsToSelector:cursorSelector])
+ {
+ id object = [NSCursor performSelector:cursorSelector];
+ if ([object isKindOfClass:[NSCursor class]])
+ cursor->ns.object = object;
+ }
+
+ if (!cursor->ns.object)
+ {
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ cursor->ns.object = [NSCursor arrowCursor];
+ break;
+ case GLFW_IBEAM_CURSOR:
+ cursor->ns.object = [NSCursor IBeamCursor];
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ cursor->ns.object = [NSCursor crosshairCursor];
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ cursor->ns.object = [NSCursor pointingHandCursor];
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ cursor->ns.object = [NSCursor resizeLeftRightCursor];
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ cursor->ns.object = [NSCursor resizeUpDownCursor];
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ cursor->ns.object = [NSCursor closedHandCursor];
+ break;
+ case GLFW_NOT_ALLOWED_CURSOR:
+ cursor->ns.object = [NSCursor operationNotAllowedCursor];
+ break;
+ }
+ }
+
+ if (!cursor->ns.object)
+ {
+ _glfwInputError(GLFW_CURSOR_UNAVAILABLE,
+ "Cocoa: Standard cursor shape unavailable");
+ return GLFW_FALSE;
+ }
+
+ [cursor->ns.object retain];
+ return GLFW_TRUE;
+
+ } // autoreleasepool
+}
+
+void _glfwDestroyCursorCocoa(_GLFWcursor* cursor)
+{
+ @autoreleasepool {
+ if (cursor->ns.object)
+ [(NSCursor*) cursor->ns.object release];
+ } // autoreleasepool
+}
+
+void _glfwSetCursorCocoa(_GLFWwindow* window, _GLFWcursor* cursor)
+{
+ @autoreleasepool {
+ if (cursorInContentArea(window))
+ updateCursorImage(window);
+ } // autoreleasepool
+}
+
+void _glfwSetClipboardStringCocoa(const char* string)
+{
+ @autoreleasepool {
+ NSPasteboard* pasteboard = [NSPasteboard generalPasteboard];
+ [pasteboard declareTypes:@[NSPasteboardTypeString] owner:nil];
+ [pasteboard setString:@(string) forType:NSPasteboardTypeString];
+ } // autoreleasepool
+}
+
+const char* _glfwGetClipboardStringCocoa(void)
+{
+ @autoreleasepool {
+
+ NSPasteboard* pasteboard = [NSPasteboard generalPasteboard];
+
+ if (![[pasteboard types] containsObject:NSPasteboardTypeString])
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "Cocoa: Failed to retrieve string from pasteboard");
+ return NULL;
+ }
+
+ NSString* object = [pasteboard stringForType:NSPasteboardTypeString];
+ if (!object)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to retrieve object from pasteboard");
+ return NULL;
+ }
+
+ _glfw_free(_glfw.ns.clipboardString);
+ _glfw.ns.clipboardString = _glfw_strdup([object UTF8String]);
+
+ return _glfw.ns.clipboardString;
+
+ } // autoreleasepool
+}
+
+EGLenum _glfwGetEGLPlatformCocoa(EGLint** attribs)
+{
+ if (_glfw.egl.ANGLE_platform_angle)
+ {
+ int type = 0;
+
+ if (_glfw.egl.ANGLE_platform_angle_opengl)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_OPENGL)
+ type = EGL_PLATFORM_ANGLE_TYPE_OPENGL_ANGLE;
+ }
+
+ if (_glfw.egl.ANGLE_platform_angle_metal)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_METAL)
+ type = EGL_PLATFORM_ANGLE_TYPE_METAL_ANGLE;
+ }
+
+ if (type)
+ {
+ *attribs = _glfw_calloc(3, sizeof(EGLint));
+ (*attribs)[0] = EGL_PLATFORM_ANGLE_TYPE_ANGLE;
+ (*attribs)[1] = type;
+ (*attribs)[2] = EGL_NONE;
+ return EGL_PLATFORM_ANGLE_ANGLE;
+ }
+ }
+
+ return 0;
+}
+
+EGLNativeDisplayType _glfwGetEGLNativeDisplayCocoa(void)
+{
+ return EGL_DEFAULT_DISPLAY;
+}
+
+EGLNativeWindowType _glfwGetEGLNativeWindowCocoa(_GLFWwindow* window)
+{
+ return window->ns.layer;
+}
+
+void _glfwGetRequiredInstanceExtensionsCocoa(char** extensions)
+{
+ if (_glfw.vk.KHR_surface && _glfw.vk.EXT_metal_surface)
+ {
+ extensions[0] = "VK_KHR_surface";
+ extensions[1] = "VK_EXT_metal_surface";
+ }
+ else if (_glfw.vk.KHR_surface && _glfw.vk.MVK_macos_surface)
+ {
+ extensions[0] = "VK_KHR_surface";
+ extensions[1] = "VK_MVK_macos_surface";
+ }
+}
+
+int _glfwGetPhysicalDevicePresentationSupportCocoa(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ return GLFW_TRUE;
+}
+
+VkResult _glfwCreateWindowSurfaceCocoa(VkInstance instance,
+ _GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ @autoreleasepool {
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED >= 101100
+ // HACK: Dynamically load Core Animation to avoid adding an extra
+ // dependency for the majority who don't use MoltenVK
+ NSBundle* bundle = [NSBundle bundleWithPath:@"/System/Library/Frameworks/QuartzCore.framework"];
+ if (!bundle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to find QuartzCore.framework");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ // NOTE: Create the layer here as makeBackingLayer should not return nil
+ window->ns.layer = [[bundle classNamed:@"CAMetalLayer"] layer];
+ if (!window->ns.layer)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to create layer for view");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ if (window->ns.retina)
+ [window->ns.layer setContentsScale:[window->ns.object backingScaleFactor]];
+
+ [window->ns.view setLayer:window->ns.layer];
+ [window->ns.view setWantsLayer:YES];
+
+ VkResult err;
+
+ if (_glfw.vk.EXT_metal_surface)
+ {
+ VkMetalSurfaceCreateInfoEXT sci;
+
+ PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;
+ vkCreateMetalSurfaceEXT = (PFN_vkCreateMetalSurfaceEXT)
+ vkGetInstanceProcAddr(instance, "vkCreateMetalSurfaceEXT");
+ if (!vkCreateMetalSurfaceEXT)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Cocoa: Vulkan instance missing VK_EXT_metal_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
+ sci.pLayer = window->ns.layer;
+
+ err = vkCreateMetalSurfaceEXT(instance, &sci, allocator, surface);
+ }
+ else
+ {
+ VkMacOSSurfaceCreateInfoMVK sci;
+
+ PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;
+ vkCreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK)
+ vkGetInstanceProcAddr(instance, "vkCreateMacOSSurfaceMVK");
+ if (!vkCreateMacOSSurfaceMVK)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Cocoa: Vulkan instance missing VK_MVK_macos_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK;
+ sci.pView = window->ns.view;
+
+ err = vkCreateMacOSSurfaceMVK(instance, &sci, allocator, surface);
+ }
+
+ if (err)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Cocoa: Failed to create Vulkan surface: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ return err;
+#else
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+#endif
+
+ } // autoreleasepool
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI id glfwGetCocoaWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(nil);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_COCOA)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "Cocoa: Platform not initialized");
+ return NULL;
+ }
+
+ return window->ns.object;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/context.c b/chromium/third_party/dawn/third_party/glfw/src/context.c
new file mode 100644
index 00000000000..7e0449b456c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/context.c
@@ -0,0 +1,758 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2016 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <stdio.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Checks whether the desired context attributes are valid
+//
+// This function checks things like whether the specified client API version
+// exists and whether all relevant options have supported and non-conflicting
+// values
+//
+GLFWbool _glfwIsValidContextConfig(const _GLFWctxconfig* ctxconfig)
+{
+ if (ctxconfig->share)
+ {
+ if (ctxconfig->client == GLFW_NO_API ||
+ ctxconfig->share->context.client == GLFW_NO_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->source != GLFW_NATIVE_CONTEXT_API &&
+ ctxconfig->source != GLFW_EGL_CONTEXT_API &&
+ ctxconfig->source != GLFW_OSMESA_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid context creation API 0x%08X",
+ ctxconfig->source);
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->client != GLFW_NO_API &&
+ ctxconfig->client != GLFW_OPENGL_API &&
+ ctxconfig->client != GLFW_OPENGL_ES_API)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid client API 0x%08X",
+ ctxconfig->client);
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if ((ctxconfig->major < 1 || ctxconfig->minor < 0) ||
+ (ctxconfig->major == 1 && ctxconfig->minor > 5) ||
+ (ctxconfig->major == 2 && ctxconfig->minor > 1) ||
+ (ctxconfig->major == 3 && ctxconfig->minor > 3))
+ {
+ // OpenGL 1.0 is the smallest valid version
+ // OpenGL 1.x series ended with version 1.5
+ // OpenGL 2.x series ended with version 2.1
+ // OpenGL 3.x series ended with version 3.3
+ // For now, let everything else through
+
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid OpenGL version %i.%i",
+ ctxconfig->major, ctxconfig->minor);
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->profile)
+ {
+ if (ctxconfig->profile != GLFW_OPENGL_CORE_PROFILE &&
+ ctxconfig->profile != GLFW_OPENGL_COMPAT_PROFILE)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid OpenGL profile 0x%08X",
+ ctxconfig->profile);
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->major <= 2 ||
+ (ctxconfig->major == 3 && ctxconfig->minor < 2))
+ {
+ // Desktop OpenGL context profiles are only defined for version 3.2
+ // and above
+
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Context profiles are only defined for OpenGL version 3.2 and above");
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->forward && ctxconfig->major <= 2)
+ {
+ // Forward-compatible contexts are only defined for OpenGL version 3.0 and above
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Forward-compatibility is only defined for OpenGL version 3.0 and above");
+ return GLFW_FALSE;
+ }
+ }
+ else if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ if (ctxconfig->major < 1 || ctxconfig->minor < 0 ||
+ (ctxconfig->major == 1 && ctxconfig->minor > 1) ||
+ (ctxconfig->major == 2 && ctxconfig->minor > 0))
+ {
+ // OpenGL ES 1.0 is the smallest valid version
+ // OpenGL ES 1.x series ended with version 1.1
+ // OpenGL ES 2.x series ended with version 2.0
+ // For now, let everything else through
+
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid OpenGL ES version %i.%i",
+ ctxconfig->major, ctxconfig->minor);
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->robustness)
+ {
+ if (ctxconfig->robustness != GLFW_NO_RESET_NOTIFICATION &&
+ ctxconfig->robustness != GLFW_LOSE_CONTEXT_ON_RESET)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid context robustness mode 0x%08X",
+ ctxconfig->robustness);
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->release)
+ {
+ if (ctxconfig->release != GLFW_RELEASE_BEHAVIOR_NONE &&
+ ctxconfig->release != GLFW_RELEASE_BEHAVIOR_FLUSH)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid context release behavior 0x%08X",
+ ctxconfig->release);
+ return GLFW_FALSE;
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+// Chooses the framebuffer config that best matches the desired one
+//
+const _GLFWfbconfig* _glfwChooseFBConfig(const _GLFWfbconfig* desired,
+ const _GLFWfbconfig* alternatives,
+ unsigned int count)
+{
+ unsigned int i;
+ unsigned int missing, leastMissing = UINT_MAX;
+ unsigned int colorDiff, leastColorDiff = UINT_MAX;
+ unsigned int extraDiff, leastExtraDiff = UINT_MAX;
+ const _GLFWfbconfig* current;
+ const _GLFWfbconfig* closest = NULL;
+
+ for (i = 0; i < count; i++)
+ {
+ current = alternatives + i;
+
+ if (desired->stereo > 0 && current->stereo == 0)
+ {
+ // Stereo is a hard constraint
+ continue;
+ }
+
+ // Count number of missing buffers
+ {
+ missing = 0;
+
+ if (desired->alphaBits > 0 && current->alphaBits == 0)
+ missing++;
+
+ if (desired->depthBits > 0 && current->depthBits == 0)
+ missing++;
+
+ if (desired->stencilBits > 0 && current->stencilBits == 0)
+ missing++;
+
+ if (desired->auxBuffers > 0 &&
+ current->auxBuffers < desired->auxBuffers)
+ {
+ missing += desired->auxBuffers - current->auxBuffers;
+ }
+
+ if (desired->samples > 0 && current->samples == 0)
+ {
+ // Technically, several multisampling buffers could be
+ // involved, but that's a lower level implementation detail and
+ // not important to us here, so we count them as one
+ missing++;
+ }
+
+ if (desired->transparent != current->transparent)
+ missing++;
+ }
+
+ // These polynomials make many small channel size differences matter
+ // less than one large channel size difference
+
+ // Calculate color channel size difference value
+ {
+ colorDiff = 0;
+
+ if (desired->redBits != GLFW_DONT_CARE)
+ {
+ colorDiff += (desired->redBits - current->redBits) *
+ (desired->redBits - current->redBits);
+ }
+
+ if (desired->greenBits != GLFW_DONT_CARE)
+ {
+ colorDiff += (desired->greenBits - current->greenBits) *
+ (desired->greenBits - current->greenBits);
+ }
+
+ if (desired->blueBits != GLFW_DONT_CARE)
+ {
+ colorDiff += (desired->blueBits - current->blueBits) *
+ (desired->blueBits - current->blueBits);
+ }
+ }
+
+ // Calculate non-color channel size difference value
+ {
+ extraDiff = 0;
+
+ if (desired->alphaBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->alphaBits - current->alphaBits) *
+ (desired->alphaBits - current->alphaBits);
+ }
+
+ if (desired->depthBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->depthBits - current->depthBits) *
+ (desired->depthBits - current->depthBits);
+ }
+
+ if (desired->stencilBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->stencilBits - current->stencilBits) *
+ (desired->stencilBits - current->stencilBits);
+ }
+
+ if (desired->accumRedBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->accumRedBits - current->accumRedBits) *
+ (desired->accumRedBits - current->accumRedBits);
+ }
+
+ if (desired->accumGreenBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->accumGreenBits - current->accumGreenBits) *
+ (desired->accumGreenBits - current->accumGreenBits);
+ }
+
+ if (desired->accumBlueBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->accumBlueBits - current->accumBlueBits) *
+ (desired->accumBlueBits - current->accumBlueBits);
+ }
+
+ if (desired->accumAlphaBits != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->accumAlphaBits - current->accumAlphaBits) *
+ (desired->accumAlphaBits - current->accumAlphaBits);
+ }
+
+ if (desired->samples != GLFW_DONT_CARE)
+ {
+ extraDiff += (desired->samples - current->samples) *
+ (desired->samples - current->samples);
+ }
+
+ if (desired->sRGB && !current->sRGB)
+ extraDiff++;
+ }
+
+ // Figure out if the current one is better than the best one found so far
+ // Least number of missing buffers is the most important heuristic,
+ // then color buffer size match and lastly size match for other buffers
+
+ if (missing < leastMissing)
+ closest = current;
+ else if (missing == leastMissing)
+ {
+ if ((colorDiff < leastColorDiff) ||
+ (colorDiff == leastColorDiff && extraDiff < leastExtraDiff))
+ {
+ closest = current;
+ }
+ }
+
+ if (current == closest)
+ {
+ leastMissing = missing;
+ leastColorDiff = colorDiff;
+ leastExtraDiff = extraDiff;
+ }
+ }
+
+ return closest;
+}
+
+// Retrieves the attributes of the current context
+//
+GLFWbool _glfwRefreshContextAttribs(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig)
+{
+ int i;
+ _GLFWwindow* previous;
+ const char* version;
+ const char* prefixes[] =
+ {
+ "OpenGL ES-CM ",
+ "OpenGL ES-CL ",
+ "OpenGL ES ",
+ NULL
+ };
+
+ window->context.source = ctxconfig->source;
+ window->context.client = GLFW_OPENGL_API;
+
+ previous = _glfwPlatformGetTls(&_glfw.contextSlot);
+ glfwMakeContextCurrent((GLFWwindow*) window);
+
+ window->context.GetIntegerv = (PFNGLGETINTEGERVPROC)
+ window->context.getProcAddress("glGetIntegerv");
+ window->context.GetString = (PFNGLGETSTRINGPROC)
+ window->context.getProcAddress("glGetString");
+ if (!window->context.GetIntegerv || !window->context.GetString)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Entry point retrieval is broken");
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_FALSE;
+ }
+
+ version = (const char*) window->context.GetString(GL_VERSION);
+ if (!version)
+ {
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OpenGL version string retrieval is broken");
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OpenGL ES version string retrieval is broken");
+ }
+
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_FALSE;
+ }
+
+ for (i = 0; prefixes[i]; i++)
+ {
+ const size_t length = strlen(prefixes[i]);
+
+ if (strncmp(version, prefixes[i], length) == 0)
+ {
+ version += length;
+ window->context.client = GLFW_OPENGL_ES_API;
+ break;
+ }
+ }
+
+ if (!sscanf(version, "%d.%d.%d",
+ &window->context.major,
+ &window->context.minor,
+ &window->context.revision))
+ {
+ if (window->context.client == GLFW_OPENGL_API)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "No version found in OpenGL version string");
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "No version found in OpenGL ES version string");
+ }
+
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_FALSE;
+ }
+
+ if (window->context.major < ctxconfig->major ||
+ (window->context.major == ctxconfig->major &&
+ window->context.minor < ctxconfig->minor))
+ {
+ // The desired OpenGL version is greater than the actual version
+ // This only happens if the machine lacks {GLX|WGL}_ARB_create_context
+ // /and/ the user has requested an OpenGL version greater than 1.0
+
+ // For API consistency, we emulate the behavior of the
+ // {GLX|WGL}_ARB_create_context extension and fail here
+
+ if (window->context.client == GLFW_OPENGL_API)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "Requested OpenGL version %i.%i, got version %i.%i",
+ ctxconfig->major, ctxconfig->minor,
+ window->context.major, window->context.minor);
+ }
+ else
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "Requested OpenGL ES version %i.%i, got version %i.%i",
+ ctxconfig->major, ctxconfig->minor,
+ window->context.major, window->context.minor);
+ }
+
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_FALSE;
+ }
+
+ if (window->context.major >= 3)
+ {
+ // OpenGL 3.0+ uses a different function for extension string retrieval
+ // We cache it here instead of in glfwExtensionSupported mostly to alert
+ // users as early as possible that their build may be broken
+
+ window->context.GetStringi = (PFNGLGETSTRINGIPROC)
+ window->context.getProcAddress("glGetStringi");
+ if (!window->context.GetStringi)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Entry point retrieval is broken");
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_FALSE;
+ }
+ }
+
+ if (window->context.client == GLFW_OPENGL_API)
+ {
+ // Read back context flags (OpenGL 3.0 and above)
+ if (window->context.major >= 3)
+ {
+ GLint flags;
+ window->context.GetIntegerv(GL_CONTEXT_FLAGS, &flags);
+
+ if (flags & GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT)
+ window->context.forward = GLFW_TRUE;
+
+ if (flags & GL_CONTEXT_FLAG_DEBUG_BIT)
+ window->context.debug = GLFW_TRUE;
+ else if (glfwExtensionSupported("GL_ARB_debug_output") &&
+ ctxconfig->debug)
+ {
+ // HACK: This is a workaround for older drivers (pre KHR_debug)
+ // not setting the debug bit in the context flags for
+ // debug contexts
+ window->context.debug = GLFW_TRUE;
+ }
+
+ if (flags & GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR)
+ window->context.noerror = GLFW_TRUE;
+ }
+
+ // Read back OpenGL context profile (OpenGL 3.2 and above)
+ if (window->context.major >= 4 ||
+ (window->context.major == 3 && window->context.minor >= 2))
+ {
+ GLint mask;
+ window->context.GetIntegerv(GL_CONTEXT_PROFILE_MASK, &mask);
+
+ if (mask & GL_CONTEXT_COMPATIBILITY_PROFILE_BIT)
+ window->context.profile = GLFW_OPENGL_COMPAT_PROFILE;
+ else if (mask & GL_CONTEXT_CORE_PROFILE_BIT)
+ window->context.profile = GLFW_OPENGL_CORE_PROFILE;
+ else if (glfwExtensionSupported("GL_ARB_compatibility"))
+ {
+ // HACK: This is a workaround for the compatibility profile bit
+ // not being set in the context flags if an OpenGL 3.2+
+ // context was created without having requested a specific
+ // version
+ window->context.profile = GLFW_OPENGL_COMPAT_PROFILE;
+ }
+ }
+
+ // Read back robustness strategy
+ if (glfwExtensionSupported("GL_ARB_robustness"))
+ {
+ // NOTE: We avoid using the context flags for detection, as they are
+ // only present from 3.0 while the extension applies from 1.1
+
+ GLint strategy;
+ window->context.GetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_ARB,
+ &strategy);
+
+ if (strategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
+ window->context.robustness = GLFW_LOSE_CONTEXT_ON_RESET;
+ else if (strategy == GL_NO_RESET_NOTIFICATION_ARB)
+ window->context.robustness = GLFW_NO_RESET_NOTIFICATION;
+ }
+ }
+ else
+ {
+ // Read back robustness strategy
+ if (glfwExtensionSupported("GL_EXT_robustness"))
+ {
+ // NOTE: The values of these constants match those of the OpenGL ARB
+ // one, so we can reuse them here
+
+ GLint strategy;
+ window->context.GetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_ARB,
+ &strategy);
+
+ if (strategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
+ window->context.robustness = GLFW_LOSE_CONTEXT_ON_RESET;
+ else if (strategy == GL_NO_RESET_NOTIFICATION_ARB)
+ window->context.robustness = GLFW_NO_RESET_NOTIFICATION;
+ }
+ }
+
+ if (glfwExtensionSupported("GL_KHR_context_flush_control"))
+ {
+ GLint behavior;
+ window->context.GetIntegerv(GL_CONTEXT_RELEASE_BEHAVIOR, &behavior);
+
+ if (behavior == GL_NONE)
+ window->context.release = GLFW_RELEASE_BEHAVIOR_NONE;
+ else if (behavior == GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH)
+ window->context.release = GLFW_RELEASE_BEHAVIOR_FLUSH;
+ }
+
+ // Clearing the front buffer to black to avoid garbage pixels left over from
+ // previous uses of our bit of VRAM
+ {
+ PFNGLCLEARPROC glClear = (PFNGLCLEARPROC)
+ window->context.getProcAddress("glClear");
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ if (window->doublebuffer)
+ window->context.swapBuffers(window);
+ }
+
+ glfwMakeContextCurrent((GLFWwindow*) previous);
+ return GLFW_TRUE;
+}
+
+// Searches an extension string for the specified extension
+//
+GLFWbool _glfwStringInExtensionString(const char* string, const char* extensions)
+{
+ const char* start = extensions;
+
+ for (;;)
+ {
+ const char* where;
+ const char* terminator;
+
+ where = strstr(start, string);
+ if (!where)
+ return GLFW_FALSE;
+
+ terminator = where + strlen(string);
+ if (where == start || *(where - 1) == ' ')
+ {
+ if (*terminator == ' ' || *terminator == '\0')
+ break;
+ }
+
+ start = terminator;
+ }
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI void glfwMakeContextCurrent(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFWwindow* previous;
+
+ _GLFW_REQUIRE_INIT();
+
+ previous = _glfwPlatformGetTls(&_glfw.contextSlot);
+
+ if (window && window->context.client == GLFW_NO_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT,
+ "Cannot make current with a window that has no OpenGL or OpenGL ES context");
+ return;
+ }
+
+ if (previous)
+ {
+ if (!window || window->context.source != previous->context.source)
+ previous->context.makeCurrent(NULL);
+ }
+
+ if (window)
+ window->context.makeCurrent(window);
+}
+
+GLFWAPI GLFWwindow* glfwGetCurrentContext(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return _glfwPlatformGetTls(&_glfw.contextSlot);
+}
+
+GLFWAPI void glfwSwapBuffers(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->context.client == GLFW_NO_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT,
+ "Cannot swap buffers of a window that has no OpenGL or OpenGL ES context");
+ return;
+ }
+
+ window->context.swapBuffers(window);
+}
+
+GLFWAPI void glfwSwapInterval(int interval)
+{
+ _GLFWwindow* window;
+
+ _GLFW_REQUIRE_INIT();
+
+ window = _glfwPlatformGetTls(&_glfw.contextSlot);
+ if (!window)
+ {
+ _glfwInputError(GLFW_NO_CURRENT_CONTEXT,
+ "Cannot set swap interval without a current OpenGL or OpenGL ES context");
+ return;
+ }
+
+ window->context.swapInterval(interval);
+}
+
+GLFWAPI int glfwExtensionSupported(const char* extension)
+{
+ _GLFWwindow* window;
+ assert(extension != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ window = _glfwPlatformGetTls(&_glfw.contextSlot);
+ if (!window)
+ {
+ _glfwInputError(GLFW_NO_CURRENT_CONTEXT,
+ "Cannot query extension without a current OpenGL or OpenGL ES context");
+ return GLFW_FALSE;
+ }
+
+ if (*extension == '\0')
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Extension name cannot be an empty string");
+ return GLFW_FALSE;
+ }
+
+ if (window->context.major >= 3)
+ {
+ int i;
+ GLint count;
+
+ // Check if extension is in the modern OpenGL extensions string list
+
+ window->context.GetIntegerv(GL_NUM_EXTENSIONS, &count);
+
+ for (i = 0; i < count; i++)
+ {
+ const char* en = (const char*)
+ window->context.GetStringi(GL_EXTENSIONS, i);
+ if (!en)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Extension string retrieval is broken");
+ return GLFW_FALSE;
+ }
+
+ if (strcmp(en, extension) == 0)
+ return GLFW_TRUE;
+ }
+ }
+ else
+ {
+ // Check if extension is in the old style OpenGL extensions string
+
+ const char* extensions = (const char*)
+ window->context.GetString(GL_EXTENSIONS);
+ if (!extensions)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Extension string retrieval is broken");
+ return GLFW_FALSE;
+ }
+
+ if (_glfwStringInExtensionString(extension, extensions))
+ return GLFW_TRUE;
+ }
+
+ // Check if extension is in the platform-specific string
+ return window->context.extensionSupported(extension);
+}
+
+GLFWAPI GLFWglproc glfwGetProcAddress(const char* procname)
+{
+ _GLFWwindow* window;
+ assert(procname != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ window = _glfwPlatformGetTls(&_glfw.contextSlot);
+ if (!window)
+ {
+ _glfwInputError(GLFW_NO_CURRENT_CONTEXT,
+ "Cannot query entry point without a current OpenGL or OpenGL ES context");
+ return NULL;
+ }
+
+ return window->context.getProcAddress(procname);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/egl_context.c b/chromium/third_party/dawn/third_party/glfw/src/egl_context.c
new file mode 100644
index 00000000000..f8850fa2a5e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/egl_context.c
@@ -0,0 +1,868 @@
+//========================================================================
+// GLFW 3.4 EGL - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+
+// Return a description of the specified EGL error
+//
+static const char* getEGLErrorString(EGLint error)
+{
+ switch (error)
+ {
+ case EGL_SUCCESS:
+ return "Success";
+ case EGL_NOT_INITIALIZED:
+ return "EGL is not or could not be initialized";
+ case EGL_BAD_ACCESS:
+ return "EGL cannot access a requested resource";
+ case EGL_BAD_ALLOC:
+ return "EGL failed to allocate resources for the requested operation";
+ case EGL_BAD_ATTRIBUTE:
+ return "An unrecognized attribute or attribute value was passed in the attribute list";
+ case EGL_BAD_CONTEXT:
+ return "An EGLContext argument does not name a valid EGL rendering context";
+ case EGL_BAD_CONFIG:
+ return "An EGLConfig argument does not name a valid EGL frame buffer configuration";
+ case EGL_BAD_CURRENT_SURFACE:
+ return "The current surface of the calling thread is a window, pixel buffer or pixmap that is no longer valid";
+ case EGL_BAD_DISPLAY:
+ return "An EGLDisplay argument does not name a valid EGL display connection";
+ case EGL_BAD_SURFACE:
+ return "An EGLSurface argument does not name a valid surface configured for GL rendering";
+ case EGL_BAD_MATCH:
+ return "Arguments are inconsistent";
+ case EGL_BAD_PARAMETER:
+ return "One or more argument values are invalid";
+ case EGL_BAD_NATIVE_PIXMAP:
+ return "A NativePixmapType argument does not refer to a valid native pixmap";
+ case EGL_BAD_NATIVE_WINDOW:
+ return "A NativeWindowType argument does not refer to a valid native window";
+ case EGL_CONTEXT_LOST:
+ return "The application must destroy all contexts and reinitialise";
+ default:
+ return "ERROR: UNKNOWN EGL ERROR";
+ }
+}
+
+// Returns the specified attribute of the specified EGLConfig
+//
+static int getEGLConfigAttrib(EGLConfig config, int attrib)
+{
+ int value;
+ eglGetConfigAttrib(_glfw.egl.display, config, attrib, &value);
+ return value;
+}
+
+// Return the EGLConfig most closely matching the specified hints
+//
+static GLFWbool chooseEGLConfig(const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* desired,
+ EGLConfig* result)
+{
+ EGLConfig* nativeConfigs;
+ _GLFWfbconfig* usableConfigs;
+ const _GLFWfbconfig* closest;
+ int i, nativeCount, usableCount;
+
+ eglGetConfigs(_glfw.egl.display, NULL, 0, &nativeCount);
+ if (!nativeCount)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "EGL: No EGLConfigs returned");
+ return GLFW_FALSE;
+ }
+
+ nativeConfigs = _glfw_calloc(nativeCount, sizeof(EGLConfig));
+ eglGetConfigs(_glfw.egl.display, nativeConfigs, nativeCount, &nativeCount);
+
+ usableConfigs = _glfw_calloc(nativeCount, sizeof(_GLFWfbconfig));
+ usableCount = 0;
+
+ for (i = 0; i < nativeCount; i++)
+ {
+ const EGLConfig n = nativeConfigs[i];
+ _GLFWfbconfig* u = usableConfigs + usableCount;
+
+ // Only consider RGB(A) EGLConfigs
+ if (getEGLConfigAttrib(n, EGL_COLOR_BUFFER_TYPE) != EGL_RGB_BUFFER)
+ continue;
+
+ // Only consider window EGLConfigs
+ if (!(getEGLConfigAttrib(n, EGL_SURFACE_TYPE) & EGL_WINDOW_BIT))
+ continue;
+
+#if defined(_GLFW_X11)
+ if (_glfw.platform.platformID == GLFW_PLATFORM_X11)
+ {
+ XVisualInfo vi = {0};
+
+ // Only consider EGLConfigs with associated Visuals
+ vi.visualid = getEGLConfigAttrib(n, EGL_NATIVE_VISUAL_ID);
+ if (!vi.visualid)
+ continue;
+
+ if (desired->transparent)
+ {
+ int count;
+ XVisualInfo* vis =
+ XGetVisualInfo(_glfw.x11.display, VisualIDMask, &vi, &count);
+ if (vis)
+ {
+ u->transparent = _glfwIsVisualTransparentX11(vis[0].visual);
+ XFree(vis);
+ }
+ }
+ }
+#endif // _GLFW_X11
+
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ if (ctxconfig->major == 1)
+ {
+ if (!(getEGLConfigAttrib(n, EGL_RENDERABLE_TYPE) & EGL_OPENGL_ES_BIT))
+ continue;
+ }
+ else
+ {
+ if (!(getEGLConfigAttrib(n, EGL_RENDERABLE_TYPE) & EGL_OPENGL_ES2_BIT))
+ continue;
+ }
+ }
+ else if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (!(getEGLConfigAttrib(n, EGL_RENDERABLE_TYPE) & EGL_OPENGL_BIT))
+ continue;
+ }
+
+ u->redBits = getEGLConfigAttrib(n, EGL_RED_SIZE);
+ u->greenBits = getEGLConfigAttrib(n, EGL_GREEN_SIZE);
+ u->blueBits = getEGLConfigAttrib(n, EGL_BLUE_SIZE);
+
+ u->alphaBits = getEGLConfigAttrib(n, EGL_ALPHA_SIZE);
+ u->depthBits = getEGLConfigAttrib(n, EGL_DEPTH_SIZE);
+ u->stencilBits = getEGLConfigAttrib(n, EGL_STENCIL_SIZE);
+
+ u->samples = getEGLConfigAttrib(n, EGL_SAMPLES);
+ u->doublebuffer = desired->doublebuffer;
+
+ u->handle = (uintptr_t) n;
+ usableCount++;
+ }
+
+ closest = _glfwChooseFBConfig(desired, usableConfigs, usableCount);
+ if (closest)
+ *result = (EGLConfig) closest->handle;
+
+ _glfw_free(nativeConfigs);
+ _glfw_free(usableConfigs);
+
+ return closest != NULL;
+}
+
+static void makeContextCurrentEGL(_GLFWwindow* window)
+{
+ if (window)
+ {
+ if (!eglMakeCurrent(_glfw.egl.display,
+ window->context.egl.surface,
+ window->context.egl.surface,
+ window->context.egl.handle))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: Failed to make context current: %s",
+ getEGLErrorString(eglGetError()));
+ return;
+ }
+ }
+ else
+ {
+ if (!eglMakeCurrent(_glfw.egl.display,
+ EGL_NO_SURFACE,
+ EGL_NO_SURFACE,
+ EGL_NO_CONTEXT))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: Failed to clear current context: %s",
+ getEGLErrorString(eglGetError()));
+ return;
+ }
+ }
+
+ _glfwPlatformSetTls(&_glfw.contextSlot, window);
+}
+
+static void swapBuffersEGL(_GLFWwindow* window)
+{
+ if (window != _glfwPlatformGetTls(&_glfw.contextSlot))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: The context must be current on the calling thread when swapping buffers");
+ return;
+ }
+
+#if defined(_GLFW_WAYLAND)
+ if (_glfw.platform.platformID == GLFW_PLATFORM_WAYLAND)
+ {
+ // NOTE: Swapping buffers on a hidden window on Wayland makes it visible
+ if (!window->wl.visible)
+ return;
+ }
+#endif
+
+ eglSwapBuffers(_glfw.egl.display, window->context.egl.surface);
+}
+
+static void swapIntervalEGL(int interval)
+{
+ eglSwapInterval(_glfw.egl.display, interval);
+}
+
+static int extensionSupportedEGL(const char* extension)
+{
+ const char* extensions = eglQueryString(_glfw.egl.display, EGL_EXTENSIONS);
+ if (extensions)
+ {
+ if (_glfwStringInExtensionString(extension, extensions))
+ return GLFW_TRUE;
+ }
+
+ return GLFW_FALSE;
+}
+
+static GLFWglproc getProcAddressEGL(const char* procname)
+{
+ _GLFWwindow* window = _glfwPlatformGetTls(&_glfw.contextSlot);
+
+ if (window->context.egl.client)
+ {
+ GLFWglproc proc = (GLFWglproc)
+ _glfwPlatformGetModuleSymbol(window->context.egl.client, procname);
+ if (proc)
+ return proc;
+ }
+
+ return eglGetProcAddress(procname);
+}
+
+static void destroyContextEGL(_GLFWwindow* window)
+{
+ // NOTE: Do not unload libGL.so.1 while the X11 display is still open,
+ // as it will make XCloseDisplay segfault
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11 ||
+ window->context.client != GLFW_OPENGL_API)
+ {
+ if (window->context.egl.client)
+ {
+ _glfwPlatformFreeModule(window->context.egl.client);
+ window->context.egl.client = NULL;
+ }
+ }
+
+ if (window->context.egl.surface)
+ {
+ eglDestroySurface(_glfw.egl.display, window->context.egl.surface);
+ window->context.egl.surface = EGL_NO_SURFACE;
+ }
+
+ if (window->context.egl.handle)
+ {
+ eglDestroyContext(_glfw.egl.display, window->context.egl.handle);
+ window->context.egl.handle = EGL_NO_CONTEXT;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Initialize EGL
+//
+GLFWbool _glfwInitEGL(void)
+{
+ int i;
+ EGLint* attribs = NULL;
+ const char* extensions;
+ const char* sonames[] =
+ {
+#if defined(_GLFW_EGL_LIBRARY)
+ _GLFW_EGL_LIBRARY,
+#elif defined(_GLFW_WIN32)
+ "libEGL.dll",
+ "EGL.dll",
+#elif defined(_GLFW_COCOA)
+ "libEGL.dylib",
+#elif defined(__CYGWIN__)
+ "libEGL-1.so",
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libEGL.so",
+#else
+ "libEGL.so.1",
+#endif
+ NULL
+ };
+
+ if (_glfw.egl.handle)
+ return GLFW_TRUE;
+
+ for (i = 0; sonames[i]; i++)
+ {
+ _glfw.egl.handle = _glfwPlatformLoadModule(sonames[i]);
+ if (_glfw.egl.handle)
+ break;
+ }
+
+ if (!_glfw.egl.handle)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "EGL: Library not found");
+ return GLFW_FALSE;
+ }
+
+ _glfw.egl.prefix = (strncmp(sonames[i], "lib", 3) == 0);
+
+ _glfw.egl.GetConfigAttrib = (PFN_eglGetConfigAttrib)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglGetConfigAttrib");
+ _glfw.egl.GetConfigs = (PFN_eglGetConfigs)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglGetConfigs");
+ _glfw.egl.GetDisplay = (PFN_eglGetDisplay)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglGetDisplay");
+ _glfw.egl.GetError = (PFN_eglGetError)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglGetError");
+ _glfw.egl.Initialize = (PFN_eglInitialize)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglInitialize");
+ _glfw.egl.Terminate = (PFN_eglTerminate)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglTerminate");
+ _glfw.egl.BindAPI = (PFN_eglBindAPI)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglBindAPI");
+ _glfw.egl.CreateContext = (PFN_eglCreateContext)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglCreateContext");
+ _glfw.egl.DestroySurface = (PFN_eglDestroySurface)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglDestroySurface");
+ _glfw.egl.DestroyContext = (PFN_eglDestroyContext)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglDestroyContext");
+ _glfw.egl.CreateWindowSurface = (PFN_eglCreateWindowSurface)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglCreateWindowSurface");
+ _glfw.egl.MakeCurrent = (PFN_eglMakeCurrent)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglMakeCurrent");
+ _glfw.egl.SwapBuffers = (PFN_eglSwapBuffers)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglSwapBuffers");
+ _glfw.egl.SwapInterval = (PFN_eglSwapInterval)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglSwapInterval");
+ _glfw.egl.QueryString = (PFN_eglQueryString)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglQueryString");
+ _glfw.egl.GetProcAddress = (PFN_eglGetProcAddress)
+ _glfwPlatformGetModuleSymbol(_glfw.egl.handle, "eglGetProcAddress");
+
+ if (!_glfw.egl.GetConfigAttrib ||
+ !_glfw.egl.GetConfigs ||
+ !_glfw.egl.GetDisplay ||
+ !_glfw.egl.GetError ||
+ !_glfw.egl.Initialize ||
+ !_glfw.egl.Terminate ||
+ !_glfw.egl.BindAPI ||
+ !_glfw.egl.CreateContext ||
+ !_glfw.egl.DestroySurface ||
+ !_glfw.egl.DestroyContext ||
+ !_glfw.egl.CreateWindowSurface ||
+ !_glfw.egl.MakeCurrent ||
+ !_glfw.egl.SwapBuffers ||
+ !_glfw.egl.SwapInterval ||
+ !_glfw.egl.QueryString ||
+ !_glfw.egl.GetProcAddress)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: Failed to load required entry points");
+
+ _glfwTerminateEGL();
+ return GLFW_FALSE;
+ }
+
+ extensions = eglQueryString(EGL_NO_DISPLAY, EGL_EXTENSIONS);
+ if (extensions && eglGetError() == EGL_SUCCESS)
+ _glfw.egl.EXT_client_extensions = GLFW_TRUE;
+
+ if (_glfw.egl.EXT_client_extensions)
+ {
+ _glfw.egl.EXT_platform_base =
+ _glfwStringInExtensionString("EGL_EXT_platform_base", extensions);
+ _glfw.egl.EXT_platform_x11 =
+ _glfwStringInExtensionString("EGL_EXT_platform_x11", extensions);
+ _glfw.egl.EXT_platform_wayland =
+ _glfwStringInExtensionString("EGL_EXT_platform_wayland", extensions);
+ _glfw.egl.ANGLE_platform_angle =
+ _glfwStringInExtensionString("EGL_ANGLE_platform_angle", extensions);
+ _glfw.egl.ANGLE_platform_angle_opengl =
+ _glfwStringInExtensionString("EGL_ANGLE_platform_angle_opengl", extensions);
+ _glfw.egl.ANGLE_platform_angle_d3d =
+ _glfwStringInExtensionString("EGL_ANGLE_platform_angle_d3d", extensions);
+ _glfw.egl.ANGLE_platform_angle_vulkan =
+ _glfwStringInExtensionString("EGL_ANGLE_platform_angle_vulkan", extensions);
+ _glfw.egl.ANGLE_platform_angle_metal =
+ _glfwStringInExtensionString("EGL_ANGLE_platform_angle_metal", extensions);
+ }
+
+ if (_glfw.egl.EXT_platform_base)
+ {
+ _glfw.egl.GetPlatformDisplayEXT = (PFNEGLGETPLATFORMDISPLAYEXTPROC)
+ eglGetProcAddress("eglGetPlatformDisplayEXT");
+ _glfw.egl.CreatePlatformWindowSurfaceEXT = (PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC)
+ eglGetProcAddress("eglCreatePlatformWindowSurfaceEXT");
+ }
+
+ _glfw.egl.platform = _glfw.platform.getEGLPlatform(&attribs);
+ if (_glfw.egl.platform)
+ {
+ _glfw.egl.display =
+ eglGetPlatformDisplayEXT(_glfw.egl.platform,
+ _glfw.platform.getEGLNativeDisplay(),
+ attribs);
+ }
+ else
+ _glfw.egl.display = eglGetDisplay(_glfw.platform.getEGLNativeDisplay());
+
+ _glfw_free(attribs);
+
+ if (_glfw.egl.display == EGL_NO_DISPLAY)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "EGL: Failed to get EGL display: %s",
+ getEGLErrorString(eglGetError()));
+
+ _glfwTerminateEGL();
+ return GLFW_FALSE;
+ }
+
+ if (!eglInitialize(_glfw.egl.display, &_glfw.egl.major, &_glfw.egl.minor))
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "EGL: Failed to initialize EGL: %s",
+ getEGLErrorString(eglGetError()));
+
+ _glfwTerminateEGL();
+ return GLFW_FALSE;
+ }
+
+ _glfw.egl.KHR_create_context =
+ extensionSupportedEGL("EGL_KHR_create_context");
+ _glfw.egl.KHR_create_context_no_error =
+ extensionSupportedEGL("EGL_KHR_create_context_no_error");
+ _glfw.egl.KHR_gl_colorspace =
+ extensionSupportedEGL("EGL_KHR_gl_colorspace");
+ _glfw.egl.KHR_get_all_proc_addresses =
+ extensionSupportedEGL("EGL_KHR_get_all_proc_addresses");
+ _glfw.egl.KHR_context_flush_control =
+ extensionSupportedEGL("EGL_KHR_context_flush_control");
+ _glfw.egl.EXT_present_opaque =
+ extensionSupportedEGL("EGL_EXT_present_opaque");
+
+ return GLFW_TRUE;
+}
+
+// Terminate EGL
+//
+void _glfwTerminateEGL(void)
+{
+ if (_glfw.egl.display)
+ {
+ eglTerminate(_glfw.egl.display);
+ _glfw.egl.display = EGL_NO_DISPLAY;
+ }
+
+ if (_glfw.egl.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.egl.handle);
+ _glfw.egl.handle = NULL;
+ }
+}
+
+#define SET_ATTRIB(a, v) \
+{ \
+ assert(((size_t) index + 1) < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[index++] = a; \
+ attribs[index++] = v; \
+}
+
+// Create the OpenGL or OpenGL ES context
+//
+GLFWbool _glfwCreateContextEGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ EGLint attribs[40];
+ EGLConfig config;
+ EGLContext share = NULL;
+ EGLNativeWindowType native;
+ int index = 0;
+
+ if (!_glfw.egl.display)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "EGL: API not available");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->share)
+ share = ctxconfig->share->context.egl.handle;
+
+ if (!chooseEGLConfig(ctxconfig, fbconfig, &config))
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "EGL: Failed to find a suitable EGLConfig");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ if (!eglBindAPI(EGL_OPENGL_ES_API))
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "EGL: Failed to bind OpenGL ES: %s",
+ getEGLErrorString(eglGetError()));
+ return GLFW_FALSE;
+ }
+ }
+ else
+ {
+ if (!eglBindAPI(EGL_OPENGL_API))
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "EGL: Failed to bind OpenGL: %s",
+ getEGLErrorString(eglGetError()));
+ return GLFW_FALSE;
+ }
+ }
+
+ if (_glfw.egl.KHR_create_context)
+ {
+ int mask = 0, flags = 0;
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (ctxconfig->forward)
+ flags |= EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR;
+
+ if (ctxconfig->profile == GLFW_OPENGL_CORE_PROFILE)
+ mask |= EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR;
+ else if (ctxconfig->profile == GLFW_OPENGL_COMPAT_PROFILE)
+ mask |= EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR;
+ }
+
+ if (ctxconfig->debug)
+ flags |= EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR;
+
+ if (ctxconfig->robustness)
+ {
+ if (ctxconfig->robustness == GLFW_NO_RESET_NOTIFICATION)
+ {
+ SET_ATTRIB(EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR,
+ EGL_NO_RESET_NOTIFICATION_KHR);
+ }
+ else if (ctxconfig->robustness == GLFW_LOSE_CONTEXT_ON_RESET)
+ {
+ SET_ATTRIB(EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR,
+ EGL_LOSE_CONTEXT_ON_RESET_KHR);
+ }
+
+ flags |= EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR;
+ }
+
+ if (ctxconfig->noerror)
+ {
+ if (_glfw.egl.KHR_create_context_no_error)
+ SET_ATTRIB(EGL_CONTEXT_OPENGL_NO_ERROR_KHR, GLFW_TRUE);
+ }
+
+ if (ctxconfig->major != 1 || ctxconfig->minor != 0)
+ {
+ SET_ATTRIB(EGL_CONTEXT_MAJOR_VERSION_KHR, ctxconfig->major);
+ SET_ATTRIB(EGL_CONTEXT_MINOR_VERSION_KHR, ctxconfig->minor);
+ }
+
+ if (mask)
+ SET_ATTRIB(EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR, mask);
+
+ if (flags)
+ SET_ATTRIB(EGL_CONTEXT_FLAGS_KHR, flags);
+ }
+ else
+ {
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ SET_ATTRIB(EGL_CONTEXT_CLIENT_VERSION, ctxconfig->major);
+ }
+
+ if (_glfw.egl.KHR_context_flush_control)
+ {
+ if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_NONE)
+ {
+ SET_ATTRIB(EGL_CONTEXT_RELEASE_BEHAVIOR_KHR,
+ EGL_CONTEXT_RELEASE_BEHAVIOR_NONE_KHR);
+ }
+ else if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_FLUSH)
+ {
+ SET_ATTRIB(EGL_CONTEXT_RELEASE_BEHAVIOR_KHR,
+ EGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR);
+ }
+ }
+
+ SET_ATTRIB(EGL_NONE, EGL_NONE);
+
+ window->context.egl.handle = eglCreateContext(_glfw.egl.display,
+ config, share, attribs);
+
+ if (window->context.egl.handle == EGL_NO_CONTEXT)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "EGL: Failed to create context: %s",
+ getEGLErrorString(eglGetError()));
+ return GLFW_FALSE;
+ }
+
+ // Set up attributes for surface creation
+ index = 0;
+
+ if (fbconfig->sRGB)
+ {
+ if (_glfw.egl.KHR_gl_colorspace)
+ SET_ATTRIB(EGL_GL_COLORSPACE_KHR, EGL_GL_COLORSPACE_SRGB_KHR);
+ }
+
+ if (!fbconfig->doublebuffer)
+ SET_ATTRIB(EGL_RENDER_BUFFER, EGL_SINGLE_BUFFER);
+
+ if (_glfw.egl.EXT_present_opaque)
+ SET_ATTRIB(EGL_PRESENT_OPAQUE_EXT, !fbconfig->transparent);
+
+ SET_ATTRIB(EGL_NONE, EGL_NONE);
+
+ native = _glfw.platform.getEGLNativeWindow(window);
+ // HACK: ANGLE does not implement eglCreatePlatformWindowSurfaceEXT
+ // despite reporting EGL_EXT_platform_base
+ if (_glfw.egl.platform && _glfw.egl.platform != EGL_PLATFORM_ANGLE_ANGLE)
+ {
+ window->context.egl.surface =
+ eglCreatePlatformWindowSurfaceEXT(_glfw.egl.display, config, native, attribs);
+ }
+ else
+ {
+ window->context.egl.surface =
+ eglCreateWindowSurface(_glfw.egl.display, config, native, attribs);
+ }
+
+ if (window->context.egl.surface == EGL_NO_SURFACE)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: Failed to create window surface: %s",
+ getEGLErrorString(eglGetError()));
+ return GLFW_FALSE;
+ }
+
+ window->context.egl.config = config;
+
+ // Load the appropriate client library
+ if (!_glfw.egl.KHR_get_all_proc_addresses)
+ {
+ int i;
+ const char** sonames;
+ const char* es1sonames[] =
+ {
+#if defined(_GLFW_GLESV1_LIBRARY)
+ _GLFW_GLESV1_LIBRARY,
+#elif defined(_GLFW_WIN32)
+ "GLESv1_CM.dll",
+ "libGLES_CM.dll",
+#elif defined(_GLFW_COCOA)
+ "libGLESv1_CM.dylib",
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libGLESv1_CM.so",
+#else
+ "libGLESv1_CM.so.1",
+ "libGLES_CM.so.1",
+#endif
+ NULL
+ };
+ const char* es2sonames[] =
+ {
+#if defined(_GLFW_GLESV2_LIBRARY)
+ _GLFW_GLESV2_LIBRARY,
+#elif defined(_GLFW_WIN32)
+ "GLESv2.dll",
+ "libGLESv2.dll",
+#elif defined(_GLFW_COCOA)
+ "libGLESv2.dylib",
+#elif defined(__CYGWIN__)
+ "libGLESv2-2.so",
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libGLESv2.so",
+#else
+ "libGLESv2.so.2",
+#endif
+ NULL
+ };
+ const char* glsonames[] =
+ {
+#if defined(_GLFW_OPENGL_LIBRARY)
+ _GLFW_OPENGL_LIBRARY,
+#elif defined(_GLFW_WIN32)
+#elif defined(_GLFW_COCOA)
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libGL.so",
+#else
+ "libGL.so.1",
+#endif
+ NULL
+ };
+
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ if (ctxconfig->major == 1)
+ sonames = es1sonames;
+ else
+ sonames = es2sonames;
+ }
+ else
+ sonames = glsonames;
+
+ for (i = 0; sonames[i]; i++)
+ {
+ // HACK: Match presence of lib prefix to increase chance of finding
+ // a matching pair in the jungle that is Win32 EGL/GLES
+ if (_glfw.egl.prefix != (strncmp(sonames[i], "lib", 3) == 0))
+ continue;
+
+ window->context.egl.client = _glfwPlatformLoadModule(sonames[i]);
+ if (window->context.egl.client)
+ break;
+ }
+
+ if (!window->context.egl.client)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "EGL: Failed to load client library");
+ return GLFW_FALSE;
+ }
+ }
+
+ window->context.makeCurrent = makeContextCurrentEGL;
+ window->context.swapBuffers = swapBuffersEGL;
+ window->context.swapInterval = swapIntervalEGL;
+ window->context.extensionSupported = extensionSupportedEGL;
+ window->context.getProcAddress = getProcAddressEGL;
+ window->context.destroy = destroyContextEGL;
+
+ return GLFW_TRUE;
+}
+
+#undef SET_ATTRIB
+
+// Returns the Visual and depth of the chosen EGLConfig
+//
+#if defined(_GLFW_X11)
+GLFWbool _glfwChooseVisualEGL(const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig,
+ Visual** visual, int* depth)
+{
+ XVisualInfo* result;
+ XVisualInfo desired;
+ EGLConfig native;
+ EGLint visualID = 0, count = 0;
+ const long vimask = VisualScreenMask | VisualIDMask;
+
+ if (!chooseEGLConfig(ctxconfig, fbconfig, &native))
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "EGL: Failed to find a suitable EGLConfig");
+ return GLFW_FALSE;
+ }
+
+ eglGetConfigAttrib(_glfw.egl.display, native,
+ EGL_NATIVE_VISUAL_ID, &visualID);
+
+ desired.screen = _glfw.x11.screen;
+ desired.visualid = visualID;
+
+ result = XGetVisualInfo(_glfw.x11.display, vimask, &desired, &count);
+ if (!result)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "EGL: Failed to retrieve Visual for EGLConfig");
+ return GLFW_FALSE;
+ }
+
+ *visual = result->visual;
+ *depth = result->depth;
+
+ XFree(result);
+ return GLFW_TRUE;
+}
+#endif // _GLFW_X11
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI EGLDisplay glfwGetEGLDisplay(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(EGL_NO_DISPLAY);
+ return _glfw.egl.display;
+}
+
+GLFWAPI EGLContext glfwGetEGLContext(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(EGL_NO_CONTEXT);
+
+ if (window->context.source != GLFW_EGL_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return EGL_NO_CONTEXT;
+ }
+
+ return window->context.egl.handle;
+}
+
+GLFWAPI EGLSurface glfwGetEGLSurface(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(EGL_NO_SURFACE);
+
+ if (window->context.source != GLFW_EGL_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return EGL_NO_SURFACE;
+ }
+
+ return window->context.egl.surface;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/glfw.rc.in b/chromium/third_party/dawn/third_party/glfw/src/glfw.rc.in
new file mode 100644
index 00000000000..ac3460a7ca1
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/glfw.rc.in
@@ -0,0 +1,30 @@
+
+#include <winver.h>
+
+VS_VERSION_INFO VERSIONINFO
+FILEVERSION @GLFW_VERSION_MAJOR@,@GLFW_VERSION_MINOR@,@GLFW_VERSION_PATCH@,0
+PRODUCTVERSION @GLFW_VERSION_MAJOR@,@GLFW_VERSION_MINOR@,@GLFW_VERSION_PATCH@,0
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+FILEFLAGS 0
+FILEOS VOS_NT_WINDOWS32
+FILETYPE VFT_DLL
+FILESUBTYPE 0
+{
+ BLOCK "StringFileInfo"
+ {
+ BLOCK "040904B0"
+ {
+ VALUE "CompanyName", "GLFW"
+ VALUE "FileDescription", "GLFW @GLFW_VERSION@ DLL"
+ VALUE "FileVersion", "@GLFW_VERSION@"
+ VALUE "OriginalFilename", "glfw3.dll"
+ VALUE "ProductName", "GLFW"
+ VALUE "ProductVersion", "@GLFW_VERSION@"
+ }
+ }
+ BLOCK "VarFileInfo"
+ {
+ VALUE "Translation", 0x409, 1200
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/glx_context.c b/chromium/third_party/dawn/third_party/glfw/src/glx_context.c
new file mode 100644
index 00000000000..872612d1022
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/glx_context.c
@@ -0,0 +1,712 @@
+//========================================================================
+// GLFW 3.4 GLX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#ifndef GLXBadProfileARB
+ #define GLXBadProfileARB 13
+#endif
+
+
+// Returns the specified attribute of the specified GLXFBConfig
+//
+static int getGLXFBConfigAttrib(GLXFBConfig fbconfig, int attrib)
+{
+ int value;
+ glXGetFBConfigAttrib(_glfw.x11.display, fbconfig, attrib, &value);
+ return value;
+}
+
+// Return the GLXFBConfig most closely matching the specified hints
+//
+static GLFWbool chooseGLXFBConfig(const _GLFWfbconfig* desired,
+ GLXFBConfig* result)
+{
+ GLXFBConfig* nativeConfigs;
+ _GLFWfbconfig* usableConfigs;
+ const _GLFWfbconfig* closest;
+ int nativeCount, usableCount;
+ const char* vendor;
+ GLFWbool trustWindowBit = GLFW_TRUE;
+
+ // HACK: This is a (hopefully temporary) workaround for Chromium
+ // (VirtualBox GL) not setting the window bit on any GLXFBConfigs
+ vendor = glXGetClientString(_glfw.x11.display, GLX_VENDOR);
+ if (vendor && strcmp(vendor, "Chromium") == 0)
+ trustWindowBit = GLFW_FALSE;
+
+ nativeConfigs =
+ glXGetFBConfigs(_glfw.x11.display, _glfw.x11.screen, &nativeCount);
+ if (!nativeConfigs || !nativeCount)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "GLX: No GLXFBConfigs returned");
+ return GLFW_FALSE;
+ }
+
+ usableConfigs = _glfw_calloc(nativeCount, sizeof(_GLFWfbconfig));
+ usableCount = 0;
+
+ for (int i = 0; i < nativeCount; i++)
+ {
+ const GLXFBConfig n = nativeConfigs[i];
+ _GLFWfbconfig* u = usableConfigs + usableCount;
+
+ // Only consider RGBA GLXFBConfigs
+ if (!(getGLXFBConfigAttrib(n, GLX_RENDER_TYPE) & GLX_RGBA_BIT))
+ continue;
+
+ // Only consider window GLXFBConfigs
+ if (!(getGLXFBConfigAttrib(n, GLX_DRAWABLE_TYPE) & GLX_WINDOW_BIT))
+ {
+ if (trustWindowBit)
+ continue;
+ }
+
+ if (getGLXFBConfigAttrib(n, GLX_DOUBLEBUFFER) != desired->doublebuffer)
+ continue;
+
+ if (desired->transparent)
+ {
+ XVisualInfo* vi = glXGetVisualFromFBConfig(_glfw.x11.display, n);
+ if (vi)
+ {
+ u->transparent = _glfwIsVisualTransparentX11(vi->visual);
+ XFree(vi);
+ }
+ }
+
+ u->redBits = getGLXFBConfigAttrib(n, GLX_RED_SIZE);
+ u->greenBits = getGLXFBConfigAttrib(n, GLX_GREEN_SIZE);
+ u->blueBits = getGLXFBConfigAttrib(n, GLX_BLUE_SIZE);
+
+ u->alphaBits = getGLXFBConfigAttrib(n, GLX_ALPHA_SIZE);
+ u->depthBits = getGLXFBConfigAttrib(n, GLX_DEPTH_SIZE);
+ u->stencilBits = getGLXFBConfigAttrib(n, GLX_STENCIL_SIZE);
+
+ u->accumRedBits = getGLXFBConfigAttrib(n, GLX_ACCUM_RED_SIZE);
+ u->accumGreenBits = getGLXFBConfigAttrib(n, GLX_ACCUM_GREEN_SIZE);
+ u->accumBlueBits = getGLXFBConfigAttrib(n, GLX_ACCUM_BLUE_SIZE);
+ u->accumAlphaBits = getGLXFBConfigAttrib(n, GLX_ACCUM_ALPHA_SIZE);
+
+ u->auxBuffers = getGLXFBConfigAttrib(n, GLX_AUX_BUFFERS);
+
+ if (getGLXFBConfigAttrib(n, GLX_STEREO))
+ u->stereo = GLFW_TRUE;
+
+ if (_glfw.glx.ARB_multisample)
+ u->samples = getGLXFBConfigAttrib(n, GLX_SAMPLES);
+
+ if (_glfw.glx.ARB_framebuffer_sRGB || _glfw.glx.EXT_framebuffer_sRGB)
+ u->sRGB = getGLXFBConfigAttrib(n, GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB);
+
+ u->handle = (uintptr_t) n;
+ usableCount++;
+ }
+
+ closest = _glfwChooseFBConfig(desired, usableConfigs, usableCount);
+ if (closest)
+ *result = (GLXFBConfig) closest->handle;
+
+ XFree(nativeConfigs);
+ _glfw_free(usableConfigs);
+
+ return closest != NULL;
+}
+
+// Create the OpenGL context using legacy API
+//
+static GLXContext createLegacyContextGLX(_GLFWwindow* window,
+ GLXFBConfig fbconfig,
+ GLXContext share)
+{
+ return glXCreateNewContext(_glfw.x11.display,
+ fbconfig,
+ GLX_RGBA_TYPE,
+ share,
+ True);
+}
+
+static void makeContextCurrentGLX(_GLFWwindow* window)
+{
+ if (window)
+ {
+ if (!glXMakeCurrent(_glfw.x11.display,
+ window->context.glx.window,
+ window->context.glx.handle))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "GLX: Failed to make context current");
+ return;
+ }
+ }
+ else
+ {
+ if (!glXMakeCurrent(_glfw.x11.display, None, NULL))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "GLX: Failed to clear current context");
+ return;
+ }
+ }
+
+ _glfwPlatformSetTls(&_glfw.contextSlot, window);
+}
+
+static void swapBuffersGLX(_GLFWwindow* window)
+{
+ glXSwapBuffers(_glfw.x11.display, window->context.glx.window);
+}
+
+static void swapIntervalGLX(int interval)
+{
+ _GLFWwindow* window = _glfwPlatformGetTls(&_glfw.contextSlot);
+
+ if (_glfw.glx.EXT_swap_control)
+ {
+ _glfw.glx.SwapIntervalEXT(_glfw.x11.display,
+ window->context.glx.window,
+ interval);
+ }
+ else if (_glfw.glx.MESA_swap_control)
+ _glfw.glx.SwapIntervalMESA(interval);
+ else if (_glfw.glx.SGI_swap_control)
+ {
+ if (interval > 0)
+ _glfw.glx.SwapIntervalSGI(interval);
+ }
+}
+
+static int extensionSupportedGLX(const char* extension)
+{
+ const char* extensions =
+ glXQueryExtensionsString(_glfw.x11.display, _glfw.x11.screen);
+ if (extensions)
+ {
+ if (_glfwStringInExtensionString(extension, extensions))
+ return GLFW_TRUE;
+ }
+
+ return GLFW_FALSE;
+}
+
+static GLFWglproc getProcAddressGLX(const char* procname)
+{
+ if (_glfw.glx.GetProcAddress)
+ return _glfw.glx.GetProcAddress((const GLubyte*) procname);
+ else if (_glfw.glx.GetProcAddressARB)
+ return _glfw.glx.GetProcAddressARB((const GLubyte*) procname);
+ else
+ return _glfwPlatformGetModuleSymbol(_glfw.glx.handle, procname);
+}
+
+static void destroyContextGLX(_GLFWwindow* window)
+{
+ if (window->context.glx.window)
+ {
+ glXDestroyWindow(_glfw.x11.display, window->context.glx.window);
+ window->context.glx.window = None;
+ }
+
+ if (window->context.glx.handle)
+ {
+ glXDestroyContext(_glfw.x11.display, window->context.glx.handle);
+ window->context.glx.handle = NULL;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Initialize GLX
+//
+GLFWbool _glfwInitGLX(void)
+{
+ const char* sonames[] =
+ {
+#if defined(_GLFW_GLX_LIBRARY)
+ _GLFW_GLX_LIBRARY,
+#elif defined(__CYGWIN__)
+ "libGL-1.so",
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libGL.so",
+#else
+ "libGL.so.1",
+ "libGL.so",
+#endif
+ NULL
+ };
+
+ if (_glfw.glx.handle)
+ return GLFW_TRUE;
+
+ for (int i = 0; sonames[i]; i++)
+ {
+ _glfw.glx.handle = _glfwPlatformLoadModule(sonames[i]);
+ if (_glfw.glx.handle)
+ break;
+ }
+
+ if (!_glfw.glx.handle)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "GLX: Failed to load GLX");
+ return GLFW_FALSE;
+ }
+
+ _glfw.glx.GetFBConfigs = (PFNGLXGETFBCONFIGSPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetFBConfigs");
+ _glfw.glx.GetFBConfigAttrib = (PFNGLXGETFBCONFIGATTRIBPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetFBConfigAttrib");
+ _glfw.glx.GetClientString = (PFNGLXGETCLIENTSTRINGPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetClientString");
+ _glfw.glx.QueryExtension = (PFNGLXQUERYEXTENSIONPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXQueryExtension");
+ _glfw.glx.QueryVersion = (PFNGLXQUERYVERSIONPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXQueryVersion");
+ _glfw.glx.DestroyContext = (PFNGLXDESTROYCONTEXTPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXDestroyContext");
+ _glfw.glx.MakeCurrent = (PFNGLXMAKECURRENTPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXMakeCurrent");
+ _glfw.glx.SwapBuffers = (PFNGLXSWAPBUFFERSPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXSwapBuffers");
+ _glfw.glx.QueryExtensionsString = (PFNGLXQUERYEXTENSIONSSTRINGPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXQueryExtensionsString");
+ _glfw.glx.CreateNewContext = (PFNGLXCREATENEWCONTEXTPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXCreateNewContext");
+ _glfw.glx.CreateWindow = (PFNGLXCREATEWINDOWPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXCreateWindow");
+ _glfw.glx.DestroyWindow = (PFNGLXDESTROYWINDOWPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXDestroyWindow");
+ _glfw.glx.GetVisualFromFBConfig = (PFNGLXGETVISUALFROMFBCONFIGPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetVisualFromFBConfig");
+
+ if (!_glfw.glx.GetFBConfigs ||
+ !_glfw.glx.GetFBConfigAttrib ||
+ !_glfw.glx.GetClientString ||
+ !_glfw.glx.QueryExtension ||
+ !_glfw.glx.QueryVersion ||
+ !_glfw.glx.DestroyContext ||
+ !_glfw.glx.MakeCurrent ||
+ !_glfw.glx.SwapBuffers ||
+ !_glfw.glx.QueryExtensionsString ||
+ !_glfw.glx.CreateNewContext ||
+ !_glfw.glx.CreateWindow ||
+ !_glfw.glx.DestroyWindow ||
+ !_glfw.glx.GetVisualFromFBConfig)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "GLX: Failed to load required entry points");
+ return GLFW_FALSE;
+ }
+
+ // NOTE: Unlike GLX 1.3 entry points these are not required to be present
+ _glfw.glx.GetProcAddress = (PFNGLXGETPROCADDRESSPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetProcAddress");
+ _glfw.glx.GetProcAddressARB = (PFNGLXGETPROCADDRESSPROC)
+ _glfwPlatformGetModuleSymbol(_glfw.glx.handle, "glXGetProcAddressARB");
+
+ if (!glXQueryExtension(_glfw.x11.display,
+ &_glfw.glx.errorBase,
+ &_glfw.glx.eventBase))
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "GLX: GLX extension not found");
+ return GLFW_FALSE;
+ }
+
+ if (!glXQueryVersion(_glfw.x11.display, &_glfw.glx.major, &_glfw.glx.minor))
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "GLX: Failed to query GLX version");
+ return GLFW_FALSE;
+ }
+
+ if (_glfw.glx.major == 1 && _glfw.glx.minor < 3)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "GLX: GLX version 1.3 is required");
+ return GLFW_FALSE;
+ }
+
+ if (extensionSupportedGLX("GLX_EXT_swap_control"))
+ {
+ _glfw.glx.SwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC)
+ getProcAddressGLX("glXSwapIntervalEXT");
+
+ if (_glfw.glx.SwapIntervalEXT)
+ _glfw.glx.EXT_swap_control = GLFW_TRUE;
+ }
+
+ if (extensionSupportedGLX("GLX_SGI_swap_control"))
+ {
+ _glfw.glx.SwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC)
+ getProcAddressGLX("glXSwapIntervalSGI");
+
+ if (_glfw.glx.SwapIntervalSGI)
+ _glfw.glx.SGI_swap_control = GLFW_TRUE;
+ }
+
+ if (extensionSupportedGLX("GLX_MESA_swap_control"))
+ {
+ _glfw.glx.SwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC)
+ getProcAddressGLX("glXSwapIntervalMESA");
+
+ if (_glfw.glx.SwapIntervalMESA)
+ _glfw.glx.MESA_swap_control = GLFW_TRUE;
+ }
+
+ if (extensionSupportedGLX("GLX_ARB_multisample"))
+ _glfw.glx.ARB_multisample = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_ARB_framebuffer_sRGB"))
+ _glfw.glx.ARB_framebuffer_sRGB = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_EXT_framebuffer_sRGB"))
+ _glfw.glx.EXT_framebuffer_sRGB = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_ARB_create_context"))
+ {
+ _glfw.glx.CreateContextAttribsARB = (PFNGLXCREATECONTEXTATTRIBSARBPROC)
+ getProcAddressGLX("glXCreateContextAttribsARB");
+
+ if (_glfw.glx.CreateContextAttribsARB)
+ _glfw.glx.ARB_create_context = GLFW_TRUE;
+ }
+
+ if (extensionSupportedGLX("GLX_ARB_create_context_robustness"))
+ _glfw.glx.ARB_create_context_robustness = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_ARB_create_context_profile"))
+ _glfw.glx.ARB_create_context_profile = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_EXT_create_context_es2_profile"))
+ _glfw.glx.EXT_create_context_es2_profile = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_ARB_create_context_no_error"))
+ _glfw.glx.ARB_create_context_no_error = GLFW_TRUE;
+
+ if (extensionSupportedGLX("GLX_ARB_context_flush_control"))
+ _glfw.glx.ARB_context_flush_control = GLFW_TRUE;
+
+ return GLFW_TRUE;
+}
+
+// Terminate GLX
+//
+void _glfwTerminateGLX(void)
+{
+ // NOTE: This function must not call any X11 functions, as it is called
+ // after XCloseDisplay (see _glfwTerminateX11 for details)
+
+ if (_glfw.glx.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.glx.handle);
+ _glfw.glx.handle = NULL;
+ }
+}
+
+#define SET_ATTRIB(a, v) \
+{ \
+ assert(((size_t) index + 1) < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[index++] = a; \
+ attribs[index++] = v; \
+}
+
+// Create the OpenGL or OpenGL ES context
+//
+GLFWbool _glfwCreateContextGLX(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ int attribs[40];
+ GLXFBConfig native = NULL;
+ GLXContext share = NULL;
+
+ if (ctxconfig->share)
+ share = ctxconfig->share->context.glx.handle;
+
+ if (!chooseGLXFBConfig(fbconfig, &native))
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "GLX: Failed to find a suitable GLXFBConfig");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ if (!_glfw.glx.ARB_create_context ||
+ !_glfw.glx.ARB_create_context_profile ||
+ !_glfw.glx.EXT_create_context_es2_profile)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "GLX: OpenGL ES requested but GLX_EXT_create_context_es2_profile is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->forward)
+ {
+ if (!_glfw.glx.ARB_create_context)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "GLX: Forward compatibility requested but GLX_ARB_create_context_profile is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->profile)
+ {
+ if (!_glfw.glx.ARB_create_context ||
+ !_glfw.glx.ARB_create_context_profile)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "GLX: An OpenGL profile requested but GLX_ARB_create_context_profile is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+
+ _glfwGrabErrorHandlerX11();
+
+ if (_glfw.glx.ARB_create_context)
+ {
+ int index = 0, mask = 0, flags = 0;
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (ctxconfig->forward)
+ flags |= GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB;
+
+ if (ctxconfig->profile == GLFW_OPENGL_CORE_PROFILE)
+ mask |= GLX_CONTEXT_CORE_PROFILE_BIT_ARB;
+ else if (ctxconfig->profile == GLFW_OPENGL_COMPAT_PROFILE)
+ mask |= GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB;
+ }
+ else
+ mask |= GLX_CONTEXT_ES2_PROFILE_BIT_EXT;
+
+ if (ctxconfig->debug)
+ flags |= GLX_CONTEXT_DEBUG_BIT_ARB;
+
+ if (ctxconfig->robustness)
+ {
+ if (_glfw.glx.ARB_create_context_robustness)
+ {
+ if (ctxconfig->robustness == GLFW_NO_RESET_NOTIFICATION)
+ {
+ SET_ATTRIB(GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB,
+ GLX_NO_RESET_NOTIFICATION_ARB);
+ }
+ else if (ctxconfig->robustness == GLFW_LOSE_CONTEXT_ON_RESET)
+ {
+ SET_ATTRIB(GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB,
+ GLX_LOSE_CONTEXT_ON_RESET_ARB);
+ }
+
+ flags |= GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB;
+ }
+ }
+
+ if (ctxconfig->release)
+ {
+ if (_glfw.glx.ARB_context_flush_control)
+ {
+ if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_NONE)
+ {
+ SET_ATTRIB(GLX_CONTEXT_RELEASE_BEHAVIOR_ARB,
+ GLX_CONTEXT_RELEASE_BEHAVIOR_NONE_ARB);
+ }
+ else if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_FLUSH)
+ {
+ SET_ATTRIB(GLX_CONTEXT_RELEASE_BEHAVIOR_ARB,
+ GLX_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB);
+ }
+ }
+ }
+
+ if (ctxconfig->noerror)
+ {
+ if (_glfw.glx.ARB_create_context_no_error)
+ SET_ATTRIB(GLX_CONTEXT_OPENGL_NO_ERROR_ARB, GLFW_TRUE);
+ }
+
+ // NOTE: Only request an explicitly versioned context when necessary, as
+ // explicitly requesting version 1.0 does not always return the
+ // highest version supported by the driver
+ if (ctxconfig->major != 1 || ctxconfig->minor != 0)
+ {
+ SET_ATTRIB(GLX_CONTEXT_MAJOR_VERSION_ARB, ctxconfig->major);
+ SET_ATTRIB(GLX_CONTEXT_MINOR_VERSION_ARB, ctxconfig->minor);
+ }
+
+ if (mask)
+ SET_ATTRIB(GLX_CONTEXT_PROFILE_MASK_ARB, mask);
+
+ if (flags)
+ SET_ATTRIB(GLX_CONTEXT_FLAGS_ARB, flags);
+
+ SET_ATTRIB(None, None);
+
+ window->context.glx.handle =
+ _glfw.glx.CreateContextAttribsARB(_glfw.x11.display,
+ native,
+ share,
+ True,
+ attribs);
+
+ // HACK: This is a fallback for broken versions of the Mesa
+ // implementation of GLX_ARB_create_context_profile that fail
+ // default 1.0 context creation with a GLXBadProfileARB error in
+ // violation of the extension spec
+ if (!window->context.glx.handle)
+ {
+ if (_glfw.x11.errorCode == _glfw.glx.errorBase + GLXBadProfileARB &&
+ ctxconfig->client == GLFW_OPENGL_API &&
+ ctxconfig->profile == GLFW_OPENGL_ANY_PROFILE &&
+ ctxconfig->forward == GLFW_FALSE)
+ {
+ window->context.glx.handle =
+ createLegacyContextGLX(window, native, share);
+ }
+ }
+ }
+ else
+ {
+ window->context.glx.handle =
+ createLegacyContextGLX(window, native, share);
+ }
+
+ _glfwReleaseErrorHandlerX11();
+
+ if (!window->context.glx.handle)
+ {
+ _glfwInputErrorX11(GLFW_VERSION_UNAVAILABLE, "GLX: Failed to create context");
+ return GLFW_FALSE;
+ }
+
+ window->context.glx.window =
+ glXCreateWindow(_glfw.x11.display, native, window->x11.handle, NULL);
+ if (!window->context.glx.window)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "GLX: Failed to create window");
+ return GLFW_FALSE;
+ }
+
+ window->context.makeCurrent = makeContextCurrentGLX;
+ window->context.swapBuffers = swapBuffersGLX;
+ window->context.swapInterval = swapIntervalGLX;
+ window->context.extensionSupported = extensionSupportedGLX;
+ window->context.getProcAddress = getProcAddressGLX;
+ window->context.destroy = destroyContextGLX;
+
+ return GLFW_TRUE;
+}
+
+#undef SET_ATTRIB
+
+// Returns the Visual and depth of the chosen GLXFBConfig
+//
+GLFWbool _glfwChooseVisualGLX(const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig,
+ Visual** visual, int* depth)
+{
+ GLXFBConfig native;
+ XVisualInfo* result;
+
+ if (!chooseGLXFBConfig(fbconfig, &native))
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "GLX: Failed to find a suitable GLXFBConfig");
+ return GLFW_FALSE;
+ }
+
+ result = glXGetVisualFromFBConfig(_glfw.x11.display, native);
+ if (!result)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "GLX: Failed to retrieve Visual for GLXFBConfig");
+ return GLFW_FALSE;
+ }
+
+ *visual = result->visual;
+ *depth = result->depth;
+
+ XFree(result);
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI GLXContext glfwGetGLXContext(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "GLX: Platform not initialized");
+ return NULL;
+ }
+
+ if (window->context.source != GLFW_NATIVE_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return NULL;
+ }
+
+ return window->context.glx.handle;
+}
+
+GLFWAPI GLXWindow glfwGetGLXWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(None);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "GLX: Platform not initialized");
+ return None;
+ }
+
+ if (window->context.source != GLFW_NATIVE_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return None;
+ }
+
+ return window->context.glx.window;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/init.c b/chromium/third_party/dawn/third_party/glfw/src/init.c
new file mode 100644
index 00000000000..d07a492ec66
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/init.c
@@ -0,0 +1,545 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <assert.h>
+
+
+// NOTE: The global variables below comprise all mutable global data in GLFW
+// Any other mutable global variable is a bug
+
+// This contains all mutable state shared between compilation units of GLFW
+//
+_GLFWlibrary _glfw = { GLFW_FALSE };
+
+// These are outside of _glfw so they can be used before initialization and
+// after termination without special handling when _glfw is cleared to zero
+//
+static _GLFWerror _glfwMainThreadError;
+static GLFWerrorfun _glfwErrorCallback;
+static GLFWallocator _glfwInitAllocator;
+static _GLFWinitconfig _glfwInitHints =
+{
+ GLFW_TRUE, // hat buttons
+ GLFW_ANGLE_PLATFORM_TYPE_NONE, // ANGLE backend
+ GLFW_ANY_PLATFORM, // preferred platform
+ NULL, // vkGetInstanceProcAddr function
+ {
+ GLFW_TRUE, // macOS menu bar
+ GLFW_TRUE // macOS bundle chdir
+ },
+ {
+ GLFW_TRUE, // X11 XCB Vulkan surface
+ },
+};
+
+// The allocation function used when no custom allocator is set
+//
+static void* defaultAllocate(size_t size, void* user)
+{
+ return malloc(size);
+}
+
+// The deallocation function used when no custom allocator is set
+//
+static void defaultDeallocate(void* block, void* user)
+{
+ free(block);
+}
+
+// The reallocation function used when no custom allocator is set
+//
+static void* defaultReallocate(void* block, size_t size, void* user)
+{
+ return realloc(block, size);
+}
+
+// Terminate the library
+//
+static void terminate(void)
+{
+ int i;
+
+ memset(&_glfw.callbacks, 0, sizeof(_glfw.callbacks));
+
+ while (_glfw.windowListHead)
+ glfwDestroyWindow((GLFWwindow*) _glfw.windowListHead);
+
+ while (_glfw.cursorListHead)
+ glfwDestroyCursor((GLFWcursor*) _glfw.cursorListHead);
+
+ for (i = 0; i < _glfw.monitorCount; i++)
+ {
+ _GLFWmonitor* monitor = _glfw.monitors[i];
+ if (monitor->originalRamp.size)
+ _glfw.platform.setGammaRamp(monitor, &monitor->originalRamp);
+ _glfwFreeMonitor(monitor);
+ }
+
+ _glfw_free(_glfw.monitors);
+ _glfw.monitors = NULL;
+ _glfw.monitorCount = 0;
+
+ _glfw_free(_glfw.mappings);
+ _glfw.mappings = NULL;
+ _glfw.mappingCount = 0;
+
+ _glfwTerminateVulkan();
+ _glfw.platform.terminateJoysticks();
+ _glfw.platform.terminate();
+
+ _glfw.initialized = GLFW_FALSE;
+
+ while (_glfw.errorListHead)
+ {
+ _GLFWerror* error = _glfw.errorListHead;
+ _glfw.errorListHead = error->next;
+ _glfw_free(error);
+ }
+
+ _glfwPlatformDestroyTls(&_glfw.contextSlot);
+ _glfwPlatformDestroyTls(&_glfw.errorSlot);
+ _glfwPlatformDestroyMutex(&_glfw.errorLock);
+
+ memset(&_glfw, 0, sizeof(_glfw));
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Encode a Unicode code point to a UTF-8 stream
+// Based on cutef8 by Jeff Bezanson (Public Domain)
+//
+size_t _glfwEncodeUTF8(char* s, uint32_t codepoint)
+{
+ size_t count = 0;
+
+ if (codepoint < 0x80)
+ s[count++] = (char) codepoint;
+ else if (codepoint < 0x800)
+ {
+ s[count++] = (codepoint >> 6) | 0xc0;
+ s[count++] = (codepoint & 0x3f) | 0x80;
+ }
+ else if (codepoint < 0x10000)
+ {
+ s[count++] = (codepoint >> 12) | 0xe0;
+ s[count++] = ((codepoint >> 6) & 0x3f) | 0x80;
+ s[count++] = (codepoint & 0x3f) | 0x80;
+ }
+ else if (codepoint < 0x110000)
+ {
+ s[count++] = (codepoint >> 18) | 0xf0;
+ s[count++] = ((codepoint >> 12) & 0x3f) | 0x80;
+ s[count++] = ((codepoint >> 6) & 0x3f) | 0x80;
+ s[count++] = (codepoint & 0x3f) | 0x80;
+ }
+
+ return count;
+}
+
+// Splits and translates a text/uri-list into separate file paths
+// NOTE: This function destroys the provided string
+//
+char** _glfwParseUriList(char* text, int* count)
+{
+ const char* prefix = "file://";
+ char** paths = NULL;
+ char* line;
+
+ *count = 0;
+
+ while ((line = strtok(text, "\r\n")))
+ {
+ char* path;
+
+ text = NULL;
+
+ if (line[0] == '#')
+ continue;
+
+ if (strncmp(line, prefix, strlen(prefix)) == 0)
+ {
+ line += strlen(prefix);
+ // TODO: Validate hostname
+ while (*line != '/')
+ line++;
+ }
+
+ (*count)++;
+
+ path = _glfw_calloc(strlen(line) + 1, 1);
+ paths = _glfw_realloc(paths, *count * sizeof(char*));
+ paths[*count - 1] = path;
+
+ while (*line)
+ {
+ if (line[0] == '%' && line[1] && line[2])
+ {
+ const char digits[3] = { line[1], line[2], '\0' };
+ *path = (char) strtol(digits, NULL, 16);
+ line += 2;
+ }
+ else
+ *path = *line;
+
+ path++;
+ line++;
+ }
+ }
+
+ return paths;
+}
+
+char* _glfw_strdup(const char* source)
+{
+ const size_t length = strlen(source);
+ char* result = _glfw_calloc(length + 1, 1);
+ strcpy(result, source);
+ return result;
+}
+
+int _glfw_min(int a, int b)
+{
+ return a < b ? a : b;
+}
+
+int _glfw_max(int a, int b)
+{
+ return a > b ? a : b;
+}
+
+float _glfw_fminf(float a, float b)
+{
+ if (a != a)
+ return b;
+ else if (b != b)
+ return a;
+ else if (a < b)
+ return a;
+ else
+ return b;
+}
+
+float _glfw_fmaxf(float a, float b)
+{
+ if (a != a)
+ return b;
+ else if (b != b)
+ return a;
+ else if (a > b)
+ return a;
+ else
+ return b;
+}
+
+void* _glfw_calloc(size_t count, size_t size)
+{
+ if (count && size)
+ {
+ void* block;
+
+ if (count > SIZE_MAX / size)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Allocation size overflow");
+ return NULL;
+ }
+
+ block = _glfw.allocator.allocate(count * size, _glfw.allocator.user);
+ if (block)
+ return memset(block, 0, count * size);
+ else
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY, NULL);
+ return NULL;
+ }
+ }
+ else
+ return NULL;
+}
+
+void* _glfw_realloc(void* block, size_t size)
+{
+ if (block && size)
+ {
+ void* resized = _glfw.allocator.reallocate(block, size, _glfw.allocator.user);
+ if (resized)
+ return resized;
+ else
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY, NULL);
+ return NULL;
+ }
+ }
+ else if (block)
+ {
+ _glfw_free(block);
+ return NULL;
+ }
+ else
+ return _glfw_calloc(1, size);
+}
+
+void _glfw_free(void* block)
+{
+ if (block)
+ _glfw.allocator.deallocate(block, _glfw.allocator.user);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW event API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Notifies shared code of an error
+//
+void _glfwInputError(int code, const char* format, ...)
+{
+ _GLFWerror* error;
+ char description[_GLFW_MESSAGE_SIZE];
+
+ if (format)
+ {
+ va_list vl;
+
+ va_start(vl, format);
+ vsnprintf(description, sizeof(description), format, vl);
+ va_end(vl);
+
+ description[sizeof(description) - 1] = '\0';
+ }
+ else
+ {
+ if (code == GLFW_NOT_INITIALIZED)
+ strcpy(description, "The GLFW library is not initialized");
+ else if (code == GLFW_NO_CURRENT_CONTEXT)
+ strcpy(description, "There is no current context");
+ else if (code == GLFW_INVALID_ENUM)
+ strcpy(description, "Invalid argument for enum parameter");
+ else if (code == GLFW_INVALID_VALUE)
+ strcpy(description, "Invalid value for parameter");
+ else if (code == GLFW_OUT_OF_MEMORY)
+ strcpy(description, "Out of memory");
+ else if (code == GLFW_API_UNAVAILABLE)
+ strcpy(description, "The requested API is unavailable");
+ else if (code == GLFW_VERSION_UNAVAILABLE)
+ strcpy(description, "The requested API version is unavailable");
+ else if (code == GLFW_PLATFORM_ERROR)
+ strcpy(description, "A platform-specific error occurred");
+ else if (code == GLFW_FORMAT_UNAVAILABLE)
+ strcpy(description, "The requested format is unavailable");
+ else if (code == GLFW_NO_WINDOW_CONTEXT)
+ strcpy(description, "The specified window has no context");
+ else if (code == GLFW_CURSOR_UNAVAILABLE)
+ strcpy(description, "The specified cursor shape is unavailable");
+ else if (code == GLFW_FEATURE_UNAVAILABLE)
+ strcpy(description, "The requested feature cannot be implemented for this platform");
+ else if (code == GLFW_FEATURE_UNIMPLEMENTED)
+ strcpy(description, "The requested feature has not yet been implemented for this platform");
+ else if (code == GLFW_PLATFORM_UNAVAILABLE)
+ strcpy(description, "The requested platform is unavailable");
+ else
+ strcpy(description, "ERROR: UNKNOWN GLFW ERROR");
+ }
+
+ if (_glfw.initialized)
+ {
+ error = _glfwPlatformGetTls(&_glfw.errorSlot);
+ if (!error)
+ {
+ error = _glfw_calloc(1, sizeof(_GLFWerror));
+ _glfwPlatformSetTls(&_glfw.errorSlot, error);
+ _glfwPlatformLockMutex(&_glfw.errorLock);
+ error->next = _glfw.errorListHead;
+ _glfw.errorListHead = error;
+ _glfwPlatformUnlockMutex(&_glfw.errorLock);
+ }
+ }
+ else
+ error = &_glfwMainThreadError;
+
+ error->code = code;
+ strcpy(error->description, description);
+
+ if (_glfwErrorCallback)
+ _glfwErrorCallback(code, description);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI int glfwInit(void)
+{
+ if (_glfw.initialized)
+ return GLFW_TRUE;
+
+ memset(&_glfw, 0, sizeof(_glfw));
+ _glfw.hints.init = _glfwInitHints;
+
+ _glfw.allocator = _glfwInitAllocator;
+ if (!_glfw.allocator.allocate)
+ {
+ _glfw.allocator.allocate = defaultAllocate;
+ _glfw.allocator.reallocate = defaultReallocate;
+ _glfw.allocator.deallocate = defaultDeallocate;
+ }
+
+ if (!_glfwSelectPlatform(_glfw.hints.init.platformID, &_glfw.platform))
+ return GLFW_FALSE;
+
+ if (!_glfw.platform.init())
+ {
+ terminate();
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwPlatformCreateMutex(&_glfw.errorLock) ||
+ !_glfwPlatformCreateTls(&_glfw.errorSlot) ||
+ !_glfwPlatformCreateTls(&_glfw.contextSlot))
+ {
+ terminate();
+ return GLFW_FALSE;
+ }
+
+ _glfwPlatformSetTls(&_glfw.errorSlot, &_glfwMainThreadError);
+
+ _glfwInitGamepadMappings();
+
+ _glfwPlatformInitTimer();
+ _glfw.timer.offset = _glfwPlatformGetTimerValue();
+
+ _glfw.initialized = GLFW_TRUE;
+
+ glfwDefaultWindowHints();
+ return GLFW_TRUE;
+}
+
+GLFWAPI void glfwTerminate(void)
+{
+ if (!_glfw.initialized)
+ return;
+
+ terminate();
+}
+
+GLFWAPI void glfwInitHint(int hint, int value)
+{
+ switch (hint)
+ {
+ case GLFW_JOYSTICK_HAT_BUTTONS:
+ _glfwInitHints.hatButtons = value;
+ return;
+ case GLFW_ANGLE_PLATFORM_TYPE:
+ _glfwInitHints.angleType = value;
+ return;
+ case GLFW_PLATFORM:
+ _glfwInitHints.platformID = value;
+ return;
+ case GLFW_COCOA_CHDIR_RESOURCES:
+ _glfwInitHints.ns.chdir = value;
+ return;
+ case GLFW_COCOA_MENUBAR:
+ _glfwInitHints.ns.menubar = value;
+ return;
+ case GLFW_X11_XCB_VULKAN_SURFACE:
+ _glfwInitHints.x11.xcbVulkanSurface = value;
+ return;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid init hint 0x%08X", hint);
+}
+
+GLFWAPI void glfwInitAllocator(const GLFWallocator* allocator)
+{
+ if (allocator)
+ {
+ if (allocator->allocate && allocator->reallocate && allocator->deallocate)
+ _glfwInitAllocator = *allocator;
+ else
+ _glfwInputError(GLFW_INVALID_VALUE, "Missing function in allocator");
+ }
+ else
+ memset(&_glfwInitAllocator, 0, sizeof(GLFWallocator));
+}
+
+GLFWAPI void glfwInitVulkanLoader(PFN_vkGetInstanceProcAddr loader)
+{
+ _glfwInitHints.vulkanLoader = loader;
+}
+
+GLFWAPI void glfwGetVersion(int* major, int* minor, int* rev)
+{
+ if (major != NULL)
+ *major = GLFW_VERSION_MAJOR;
+ if (minor != NULL)
+ *minor = GLFW_VERSION_MINOR;
+ if (rev != NULL)
+ *rev = GLFW_VERSION_REVISION;
+}
+
+GLFWAPI int glfwGetError(const char** description)
+{
+ _GLFWerror* error;
+ int code = GLFW_NO_ERROR;
+
+ if (description)
+ *description = NULL;
+
+ if (_glfw.initialized)
+ error = _glfwPlatformGetTls(&_glfw.errorSlot);
+ else
+ error = &_glfwMainThreadError;
+
+ if (error)
+ {
+ code = error->code;
+ error->code = GLFW_NO_ERROR;
+ if (description && code)
+ *description = error->description;
+ }
+
+ return code;
+}
+
+GLFWAPI GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun)
+{
+ _GLFW_SWAP(GLFWerrorfun, _glfwErrorCallback, cbfun);
+ return cbfun;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/input.c b/chromium/third_party/dawn/third_party/glfw/src/input.c
new file mode 100644
index 00000000000..e0a12ccea57
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/input.c
@@ -0,0 +1,1431 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+#include "mappings.h"
+
+#include <assert.h>
+#include <float.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Internal key state used for sticky keys
+#define _GLFW_STICK 3
+
+// Internal constants for gamepad mapping source types
+#define _GLFW_JOYSTICK_AXIS 1
+#define _GLFW_JOYSTICK_BUTTON 2
+#define _GLFW_JOYSTICK_HATBIT 3
+
+// Initializes the platform joystick API if it has not been already
+//
+static GLFWbool initJoysticks(void)
+{
+ if (!_glfw.joysticksInitialized)
+ {
+ if (!_glfw.platform.initJoysticks())
+ {
+ _glfw.platform.terminateJoysticks();
+ return GLFW_FALSE;
+ }
+ }
+
+ return _glfw.joysticksInitialized = GLFW_TRUE;
+}
+
+// Finds a mapping based on joystick GUID
+//
+static _GLFWmapping* findMapping(const char* guid)
+{
+ int i;
+
+ for (i = 0; i < _glfw.mappingCount; i++)
+ {
+ if (strcmp(_glfw.mappings[i].guid, guid) == 0)
+ return _glfw.mappings + i;
+ }
+
+ return NULL;
+}
+
+// Checks whether a gamepad mapping element is present in the hardware
+//
+static GLFWbool isValidElementForJoystick(const _GLFWmapelement* e,
+ const _GLFWjoystick* js)
+{
+ if (e->type == _GLFW_JOYSTICK_HATBIT && (e->index >> 4) >= js->hatCount)
+ return GLFW_FALSE;
+ else if (e->type == _GLFW_JOYSTICK_BUTTON && e->index >= js->buttonCount)
+ return GLFW_FALSE;
+ else if (e->type == _GLFW_JOYSTICK_AXIS && e->index >= js->axisCount)
+ return GLFW_FALSE;
+
+ return GLFW_TRUE;
+}
+
+// Finds a mapping based on joystick GUID and verifies element indices
+//
+static _GLFWmapping* findValidMapping(const _GLFWjoystick* js)
+{
+ _GLFWmapping* mapping = findMapping(js->guid);
+ if (mapping)
+ {
+ int i;
+
+ for (i = 0; i <= GLFW_GAMEPAD_BUTTON_LAST; i++)
+ {
+ if (!isValidElementForJoystick(mapping->buttons + i, js))
+ return NULL;
+ }
+
+ for (i = 0; i <= GLFW_GAMEPAD_AXIS_LAST; i++)
+ {
+ if (!isValidElementForJoystick(mapping->axes + i, js))
+ return NULL;
+ }
+ }
+
+ return mapping;
+}
+
+// Parses an SDL_GameControllerDB line and adds it to the mapping list
+//
+static GLFWbool parseMapping(_GLFWmapping* mapping, const char* string)
+{
+ const char* c = string;
+ size_t i, length;
+ struct
+ {
+ const char* name;
+ _GLFWmapelement* element;
+ } fields[] =
+ {
+ { "platform", NULL },
+ { "a", mapping->buttons + GLFW_GAMEPAD_BUTTON_A },
+ { "b", mapping->buttons + GLFW_GAMEPAD_BUTTON_B },
+ { "x", mapping->buttons + GLFW_GAMEPAD_BUTTON_X },
+ { "y", mapping->buttons + GLFW_GAMEPAD_BUTTON_Y },
+ { "back", mapping->buttons + GLFW_GAMEPAD_BUTTON_BACK },
+ { "start", mapping->buttons + GLFW_GAMEPAD_BUTTON_START },
+ { "guide", mapping->buttons + GLFW_GAMEPAD_BUTTON_GUIDE },
+ { "leftshoulder", mapping->buttons + GLFW_GAMEPAD_BUTTON_LEFT_BUMPER },
+ { "rightshoulder", mapping->buttons + GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER },
+ { "leftstick", mapping->buttons + GLFW_GAMEPAD_BUTTON_LEFT_THUMB },
+ { "rightstick", mapping->buttons + GLFW_GAMEPAD_BUTTON_RIGHT_THUMB },
+ { "dpup", mapping->buttons + GLFW_GAMEPAD_BUTTON_DPAD_UP },
+ { "dpright", mapping->buttons + GLFW_GAMEPAD_BUTTON_DPAD_RIGHT },
+ { "dpdown", mapping->buttons + GLFW_GAMEPAD_BUTTON_DPAD_DOWN },
+ { "dpleft", mapping->buttons + GLFW_GAMEPAD_BUTTON_DPAD_LEFT },
+ { "lefttrigger", mapping->axes + GLFW_GAMEPAD_AXIS_LEFT_TRIGGER },
+ { "righttrigger", mapping->axes + GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER },
+ { "leftx", mapping->axes + GLFW_GAMEPAD_AXIS_LEFT_X },
+ { "lefty", mapping->axes + GLFW_GAMEPAD_AXIS_LEFT_Y },
+ { "rightx", mapping->axes + GLFW_GAMEPAD_AXIS_RIGHT_X },
+ { "righty", mapping->axes + GLFW_GAMEPAD_AXIS_RIGHT_Y }
+ };
+
+ length = strcspn(c, ",");
+ if (length != 32 || c[length] != ',')
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, NULL);
+ return GLFW_FALSE;
+ }
+
+ memcpy(mapping->guid, c, length);
+ c += length + 1;
+
+ length = strcspn(c, ",");
+ if (length >= sizeof(mapping->name) || c[length] != ',')
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, NULL);
+ return GLFW_FALSE;
+ }
+
+ memcpy(mapping->name, c, length);
+ c += length + 1;
+
+ while (*c)
+ {
+ // TODO: Implement output modifiers
+ if (*c == '+' || *c == '-')
+ return GLFW_FALSE;
+
+ for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++)
+ {
+ length = strlen(fields[i].name);
+ if (strncmp(c, fields[i].name, length) != 0 || c[length] != ':')
+ continue;
+
+ c += length + 1;
+
+ if (fields[i].element)
+ {
+ _GLFWmapelement* e = fields[i].element;
+ int8_t minimum = -1;
+ int8_t maximum = 1;
+
+ if (*c == '+')
+ {
+ minimum = 0;
+ c += 1;
+ }
+ else if (*c == '-')
+ {
+ maximum = 0;
+ c += 1;
+ }
+
+ if (*c == 'a')
+ e->type = _GLFW_JOYSTICK_AXIS;
+ else if (*c == 'b')
+ e->type = _GLFW_JOYSTICK_BUTTON;
+ else if (*c == 'h')
+ e->type = _GLFW_JOYSTICK_HATBIT;
+ else
+ break;
+
+ if (e->type == _GLFW_JOYSTICK_HATBIT)
+ {
+ const unsigned long hat = strtoul(c + 1, (char**) &c, 10);
+ const unsigned long bit = strtoul(c + 1, (char**) &c, 10);
+ e->index = (uint8_t) ((hat << 4) | bit);
+ }
+ else
+ e->index = (uint8_t) strtoul(c + 1, (char**) &c, 10);
+
+ if (e->type == _GLFW_JOYSTICK_AXIS)
+ {
+ e->axisScale = 2 / (maximum - minimum);
+ e->axisOffset = -(maximum + minimum);
+
+ if (*c == '~')
+ {
+ e->axisScale = -e->axisScale;
+ e->axisOffset = -e->axisOffset;
+ }
+ }
+ }
+ else
+ {
+ const char* name = _glfw.platform.getMappingName();
+ length = strlen(name);
+ if (strncmp(c, name, length) != 0)
+ return GLFW_FALSE;
+ }
+
+ break;
+ }
+
+ c += strcspn(c, ",");
+ c += strspn(c, ",");
+ }
+
+ for (i = 0; i < 32; i++)
+ {
+ if (mapping->guid[i] >= 'A' && mapping->guid[i] <= 'F')
+ mapping->guid[i] += 'a' - 'A';
+ }
+
+ _glfw.platform.updateGamepadGUID(mapping->guid);
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW event API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Notifies shared code of a physical key event
+//
+void _glfwInputKey(_GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+ if (key >= 0 && key <= GLFW_KEY_LAST)
+ {
+ GLFWbool repeated = GLFW_FALSE;
+
+ if (action == GLFW_RELEASE && window->keys[key] == GLFW_RELEASE)
+ return;
+
+ if (action == GLFW_PRESS && window->keys[key] == GLFW_PRESS)
+ repeated = GLFW_TRUE;
+
+ if (action == GLFW_RELEASE && window->stickyKeys)
+ window->keys[key] = _GLFW_STICK;
+ else
+ window->keys[key] = (char) action;
+
+ if (repeated)
+ action = GLFW_REPEAT;
+ }
+
+ if (!window->lockKeyMods)
+ mods &= ~(GLFW_MOD_CAPS_LOCK | GLFW_MOD_NUM_LOCK);
+
+ if (window->callbacks.key)
+ window->callbacks.key((GLFWwindow*) window, key, scancode, action, mods);
+}
+
+// Notifies shared code of a Unicode codepoint input event
+// The 'plain' parameter determines whether to emit a regular character event
+//
+void _glfwInputChar(_GLFWwindow* window, uint32_t codepoint, int mods, GLFWbool plain)
+{
+ if (codepoint < 32 || (codepoint > 126 && codepoint < 160))
+ return;
+
+ if (!window->lockKeyMods)
+ mods &= ~(GLFW_MOD_CAPS_LOCK | GLFW_MOD_NUM_LOCK);
+
+ if (window->callbacks.charmods)
+ window->callbacks.charmods((GLFWwindow*) window, codepoint, mods);
+
+ if (plain)
+ {
+ if (window->callbacks.character)
+ window->callbacks.character((GLFWwindow*) window, codepoint);
+ }
+}
+
+// Notifies shared code of a scroll event
+//
+void _glfwInputScroll(_GLFWwindow* window, double xoffset, double yoffset)
+{
+ if (window->callbacks.scroll)
+ window->callbacks.scroll((GLFWwindow*) window, xoffset, yoffset);
+}
+
+// Notifies shared code of a mouse button click event
+//
+void _glfwInputMouseClick(_GLFWwindow* window, int button, int action, int mods)
+{
+ if (button < 0 || button > GLFW_MOUSE_BUTTON_LAST)
+ return;
+
+ if (!window->lockKeyMods)
+ mods &= ~(GLFW_MOD_CAPS_LOCK | GLFW_MOD_NUM_LOCK);
+
+ if (action == GLFW_RELEASE && window->stickyMouseButtons)
+ window->mouseButtons[button] = _GLFW_STICK;
+ else
+ window->mouseButtons[button] = (char) action;
+
+ if (window->callbacks.mouseButton)
+ window->callbacks.mouseButton((GLFWwindow*) window, button, action, mods);
+}
+
+// Notifies shared code of a cursor motion event
+// The position is specified in content area relative screen coordinates
+//
+void _glfwInputCursorPos(_GLFWwindow* window, double xpos, double ypos)
+{
+ if (window->virtualCursorPosX == xpos && window->virtualCursorPosY == ypos)
+ return;
+
+ window->virtualCursorPosX = xpos;
+ window->virtualCursorPosY = ypos;
+
+ if (window->callbacks.cursorPos)
+ window->callbacks.cursorPos((GLFWwindow*) window, xpos, ypos);
+}
+
+// Notifies shared code of a cursor enter/leave event
+//
+void _glfwInputCursorEnter(_GLFWwindow* window, GLFWbool entered)
+{
+ if (window->callbacks.cursorEnter)
+ window->callbacks.cursorEnter((GLFWwindow*) window, entered);
+}
+
+// Notifies shared code of files or directories dropped on a window
+//
+void _glfwInputDrop(_GLFWwindow* window, int count, const char** paths)
+{
+ if (window->callbacks.drop)
+ window->callbacks.drop((GLFWwindow*) window, count, paths);
+}
+
+// Notifies shared code of a joystick connection or disconnection
+//
+void _glfwInputJoystick(_GLFWjoystick* js, int event)
+{
+ const int jid = (int) (js - _glfw.joysticks);
+
+ if (_glfw.callbacks.joystick)
+ _glfw.callbacks.joystick(jid, event);
+}
+
+// Notifies shared code of the new value of a joystick axis
+//
+void _glfwInputJoystickAxis(_GLFWjoystick* js, int axis, float value)
+{
+ js->axes[axis] = value;
+}
+
+// Notifies shared code of the new value of a joystick button
+//
+void _glfwInputJoystickButton(_GLFWjoystick* js, int button, char value)
+{
+ js->buttons[button] = value;
+}
+
+// Notifies shared code of the new value of a joystick hat
+//
+void _glfwInputJoystickHat(_GLFWjoystick* js, int hat, char value)
+{
+ const int base = js->buttonCount + hat * 4;
+
+ js->buttons[base + 0] = (value & 0x01) ? GLFW_PRESS : GLFW_RELEASE;
+ js->buttons[base + 1] = (value & 0x02) ? GLFW_PRESS : GLFW_RELEASE;
+ js->buttons[base + 2] = (value & 0x04) ? GLFW_PRESS : GLFW_RELEASE;
+ js->buttons[base + 3] = (value & 0x08) ? GLFW_PRESS : GLFW_RELEASE;
+
+ js->hats[hat] = value;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Adds the built-in set of gamepad mappings
+//
+void _glfwInitGamepadMappings(void)
+{
+ size_t i;
+ const size_t count = sizeof(_glfwDefaultMappings) / sizeof(char*);
+ _glfw.mappings = _glfw_calloc(count, sizeof(_GLFWmapping));
+
+ for (i = 0; i < count; i++)
+ {
+ if (parseMapping(&_glfw.mappings[_glfw.mappingCount], _glfwDefaultMappings[i]))
+ _glfw.mappingCount++;
+ }
+}
+
+// Returns an available joystick object with arrays and name allocated
+//
+_GLFWjoystick* _glfwAllocJoystick(const char* name,
+ const char* guid,
+ int axisCount,
+ int buttonCount,
+ int hatCount)
+{
+ int jid;
+ _GLFWjoystick* js;
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (!_glfw.joysticks[jid].present)
+ break;
+ }
+
+ if (jid > GLFW_JOYSTICK_LAST)
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ js->present = GLFW_TRUE;
+ js->axes = _glfw_calloc(axisCount, sizeof(float));
+ js->buttons = _glfw_calloc(buttonCount + (size_t) hatCount * 4, 1);
+ js->hats = _glfw_calloc(hatCount, 1);
+ js->axisCount = axisCount;
+ js->buttonCount = buttonCount;
+ js->hatCount = hatCount;
+
+ strncpy(js->name, name, sizeof(js->name) - 1);
+ strncpy(js->guid, guid, sizeof(js->guid) - 1);
+ js->mapping = findValidMapping(js);
+
+ return js;
+}
+
+// Frees arrays and name and flags the joystick object as unused
+//
+void _glfwFreeJoystick(_GLFWjoystick* js)
+{
+ _glfw_free(js->axes);
+ _glfw_free(js->buttons);
+ _glfw_free(js->hats);
+ memset(js, 0, sizeof(_GLFWjoystick));
+}
+
+// Center the cursor in the content area of the specified window
+//
+void _glfwCenterCursorInContentArea(_GLFWwindow* window)
+{
+ int width, height;
+
+ _glfw.platform.getWindowSize(window, &width, &height);
+ _glfw.platform.setCursorPos(window, width / 2.0, height / 2.0);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI int glfwGetInputMode(GLFWwindow* handle, int mode)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+
+ switch (mode)
+ {
+ case GLFW_CURSOR:
+ return window->cursorMode;
+ case GLFW_STICKY_KEYS:
+ return window->stickyKeys;
+ case GLFW_STICKY_MOUSE_BUTTONS:
+ return window->stickyMouseButtons;
+ case GLFW_LOCK_KEY_MODS:
+ return window->lockKeyMods;
+ case GLFW_RAW_MOUSE_MOTION:
+ return window->rawMouseMotion;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid input mode 0x%08X", mode);
+ return 0;
+}
+
+GLFWAPI void glfwSetInputMode(GLFWwindow* handle, int mode, int value)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ switch (mode)
+ {
+ case GLFW_CURSOR:
+ {
+ if (value != GLFW_CURSOR_NORMAL &&
+ value != GLFW_CURSOR_HIDDEN &&
+ value != GLFW_CURSOR_DISABLED)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM,
+ "Invalid cursor mode 0x%08X",
+ value);
+ return;
+ }
+
+ if (window->cursorMode == value)
+ return;
+
+ window->cursorMode = value;
+
+ _glfw.platform.getCursorPos(window,
+ &window->virtualCursorPosX,
+ &window->virtualCursorPosY);
+ _glfw.platform.setCursorMode(window, value);
+ return;
+ }
+
+ case GLFW_STICKY_KEYS:
+ {
+ value = value ? GLFW_TRUE : GLFW_FALSE;
+ if (window->stickyKeys == value)
+ return;
+
+ if (!value)
+ {
+ int i;
+
+ // Release all sticky keys
+ for (i = 0; i <= GLFW_KEY_LAST; i++)
+ {
+ if (window->keys[i] == _GLFW_STICK)
+ window->keys[i] = GLFW_RELEASE;
+ }
+ }
+
+ window->stickyKeys = value;
+ return;
+ }
+
+ case GLFW_STICKY_MOUSE_BUTTONS:
+ {
+ value = value ? GLFW_TRUE : GLFW_FALSE;
+ if (window->stickyMouseButtons == value)
+ return;
+
+ if (!value)
+ {
+ int i;
+
+ // Release all sticky mouse buttons
+ for (i = 0; i <= GLFW_MOUSE_BUTTON_LAST; i++)
+ {
+ if (window->mouseButtons[i] == _GLFW_STICK)
+ window->mouseButtons[i] = GLFW_RELEASE;
+ }
+ }
+
+ window->stickyMouseButtons = value;
+ return;
+ }
+
+ case GLFW_LOCK_KEY_MODS:
+ {
+ window->lockKeyMods = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ }
+
+ case GLFW_RAW_MOUSE_MOTION:
+ {
+ if (!_glfw.platform.rawMouseMotionSupported())
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Raw mouse motion is not supported on this system");
+ return;
+ }
+
+ value = value ? GLFW_TRUE : GLFW_FALSE;
+ if (window->rawMouseMotion == value)
+ return;
+
+ window->rawMouseMotion = value;
+ _glfw.platform.setRawMouseMotion(window, value);
+ return;
+ }
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid input mode 0x%08X", mode);
+}
+
+GLFWAPI int glfwRawMouseMotionSupported(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+ return _glfw.platform.rawMouseMotionSupported();
+}
+
+GLFWAPI const char* glfwGetKeyName(int key, int scancode)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (key != GLFW_KEY_UNKNOWN)
+ {
+ if (key != GLFW_KEY_KP_EQUAL &&
+ (key < GLFW_KEY_KP_0 || key > GLFW_KEY_KP_ADD) &&
+ (key < GLFW_KEY_APOSTROPHE || key > GLFW_KEY_WORLD_2))
+ {
+ return NULL;
+ }
+
+ scancode = _glfw.platform.getKeyScancode(key);
+ }
+
+ return _glfw.platform.getScancodeName(scancode);
+}
+
+GLFWAPI int glfwGetKeyScancode(int key)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(-1);
+
+ if (key < GLFW_KEY_SPACE || key > GLFW_KEY_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid key %i", key);
+ return GLFW_RELEASE;
+ }
+
+ return _glfw.platform.getKeyScancode(key);
+}
+
+GLFWAPI int glfwGetKey(GLFWwindow* handle, int key)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_RELEASE);
+
+ if (key < GLFW_KEY_SPACE || key > GLFW_KEY_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid key %i", key);
+ return GLFW_RELEASE;
+ }
+
+ if (window->keys[key] == _GLFW_STICK)
+ {
+ // Sticky mode: release key now
+ window->keys[key] = GLFW_RELEASE;
+ return GLFW_PRESS;
+ }
+
+ return (int) window->keys[key];
+}
+
+GLFWAPI int glfwGetMouseButton(GLFWwindow* handle, int button)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_RELEASE);
+
+ if (button < GLFW_MOUSE_BUTTON_1 || button > GLFW_MOUSE_BUTTON_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid mouse button %i", button);
+ return GLFW_RELEASE;
+ }
+
+ if (window->mouseButtons[button] == _GLFW_STICK)
+ {
+ // Sticky mode: release mouse button now
+ window->mouseButtons[button] = GLFW_RELEASE;
+ return GLFW_PRESS;
+ }
+
+ return (int) window->mouseButtons[button];
+}
+
+GLFWAPI void glfwGetCursorPos(GLFWwindow* handle, double* xpos, double* ypos)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 0;
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ if (xpos)
+ *xpos = window->virtualCursorPosX;
+ if (ypos)
+ *ypos = window->virtualCursorPosY;
+ }
+ else
+ _glfw.platform.getCursorPos(window, xpos, ypos);
+}
+
+GLFWAPI void glfwSetCursorPos(GLFWwindow* handle, double xpos, double ypos)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (xpos != xpos || xpos < -DBL_MAX || xpos > DBL_MAX ||
+ ypos != ypos || ypos < -DBL_MAX || ypos > DBL_MAX)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid cursor position %f %f",
+ xpos, ypos);
+ return;
+ }
+
+ if (!_glfw.platform.windowFocused(window))
+ return;
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ // Only update the accumulated position if the cursor is disabled
+ window->virtualCursorPosX = xpos;
+ window->virtualCursorPosY = ypos;
+ }
+ else
+ {
+ // Update system cursor position
+ _glfw.platform.setCursorPos(window, xpos, ypos);
+ }
+}
+
+GLFWAPI GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot)
+{
+ _GLFWcursor* cursor;
+
+ assert(image != NULL);
+ assert(image->pixels != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (image->width <= 0 || image->height <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid image dimensions for cursor");
+ return NULL;
+ }
+
+ cursor = _glfw_calloc(1, sizeof(_GLFWcursor));
+ cursor->next = _glfw.cursorListHead;
+ _glfw.cursorListHead = cursor;
+
+ if (!_glfw.platform.createCursor(cursor, image, xhot, yhot))
+ {
+ glfwDestroyCursor((GLFWcursor*) cursor);
+ return NULL;
+ }
+
+ return (GLFWcursor*) cursor;
+}
+
+GLFWAPI GLFWcursor* glfwCreateStandardCursor(int shape)
+{
+ _GLFWcursor* cursor;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (shape != GLFW_ARROW_CURSOR &&
+ shape != GLFW_IBEAM_CURSOR &&
+ shape != GLFW_CROSSHAIR_CURSOR &&
+ shape != GLFW_POINTING_HAND_CURSOR &&
+ shape != GLFW_RESIZE_EW_CURSOR &&
+ shape != GLFW_RESIZE_NS_CURSOR &&
+ shape != GLFW_RESIZE_NWSE_CURSOR &&
+ shape != GLFW_RESIZE_NESW_CURSOR &&
+ shape != GLFW_RESIZE_ALL_CURSOR &&
+ shape != GLFW_NOT_ALLOWED_CURSOR)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid standard cursor 0x%08X", shape);
+ return NULL;
+ }
+
+ cursor = _glfw_calloc(1, sizeof(_GLFWcursor));
+ cursor->next = _glfw.cursorListHead;
+ _glfw.cursorListHead = cursor;
+
+ if (!_glfw.platform.createStandardCursor(cursor, shape))
+ {
+ glfwDestroyCursor((GLFWcursor*) cursor);
+ return NULL;
+ }
+
+ return (GLFWcursor*) cursor;
+}
+
+GLFWAPI void glfwDestroyCursor(GLFWcursor* handle)
+{
+ _GLFWcursor* cursor = (_GLFWcursor*) handle;
+
+ _GLFW_REQUIRE_INIT();
+
+ if (cursor == NULL)
+ return;
+
+ // Make sure the cursor is not being used by any window
+ {
+ _GLFWwindow* window;
+
+ for (window = _glfw.windowListHead; window; window = window->next)
+ {
+ if (window->cursor == cursor)
+ glfwSetCursor((GLFWwindow*) window, NULL);
+ }
+ }
+
+ _glfw.platform.destroyCursor(cursor);
+
+ // Unlink cursor from global linked list
+ {
+ _GLFWcursor** prev = &_glfw.cursorListHead;
+
+ while (*prev != cursor)
+ prev = &((*prev)->next);
+
+ *prev = cursor->next;
+ }
+
+ _glfw_free(cursor);
+}
+
+GLFWAPI void glfwSetCursor(GLFWwindow* windowHandle, GLFWcursor* cursorHandle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) windowHandle;
+ _GLFWcursor* cursor = (_GLFWcursor*) cursorHandle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ window->cursor = cursor;
+
+ _glfw.platform.setCursor(window, cursor);
+}
+
+GLFWAPI GLFWkeyfun glfwSetKeyCallback(GLFWwindow* handle, GLFWkeyfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWkeyfun, window->callbacks.key, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWcharfun glfwSetCharCallback(GLFWwindow* handle, GLFWcharfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWcharfun, window->callbacks.character, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* handle, GLFWcharmodsfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWcharmodsfun, window->callbacks.charmods, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* handle,
+ GLFWmousebuttonfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWmousebuttonfun, window->callbacks.mouseButton, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* handle,
+ GLFWcursorposfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWcursorposfun, window->callbacks.cursorPos, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* handle,
+ GLFWcursorenterfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWcursorenterfun, window->callbacks.cursorEnter, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWscrollfun glfwSetScrollCallback(GLFWwindow* handle,
+ GLFWscrollfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWscrollfun, window->callbacks.scroll, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWdropfun glfwSetDropCallback(GLFWwindow* handle, GLFWdropfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWdropfun, window->callbacks.drop, cbfun);
+ return cbfun;
+}
+
+GLFWAPI int glfwJoystickPresent(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return GLFW_FALSE;
+ }
+
+ if (!initJoysticks())
+ return GLFW_FALSE;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return GLFW_FALSE;
+
+ return _glfw.platform.pollJoystick(js, _GLFW_POLL_PRESENCE);
+}
+
+GLFWAPI const float* glfwGetJoystickAxes(int jid, int* count)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_AXES))
+ return NULL;
+
+ *count = js->axisCount;
+ return js->axes;
+}
+
+GLFWAPI const unsigned char* glfwGetJoystickButtons(int jid, int* count)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_BUTTONS))
+ return NULL;
+
+ if (_glfw.hints.init.hatButtons)
+ *count = js->buttonCount + js->hatCount * 4;
+ else
+ *count = js->buttonCount;
+
+ return js->buttons;
+}
+
+GLFWAPI const unsigned char* glfwGetJoystickHats(int jid, int* count)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_BUTTONS))
+ return NULL;
+
+ *count = js->hatCount;
+ return js->hats;
+}
+
+GLFWAPI const char* glfwGetJoystickName(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_PRESENCE))
+ return NULL;
+
+ return js->name;
+}
+
+GLFWAPI const char* glfwGetJoystickGUID(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_PRESENCE))
+ return NULL;
+
+ return js->guid;
+}
+
+GLFWAPI void glfwSetJoystickUserPointer(int jid, void* pointer)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT();
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return;
+
+ js->userPointer = pointer;
+}
+
+GLFWAPI void* glfwGetJoystickUserPointer(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ return js->userPointer;
+}
+
+GLFWAPI GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun cbfun)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (!initJoysticks())
+ return NULL;
+
+ _GLFW_SWAP(GLFWjoystickfun, _glfw.callbacks.joystick, cbfun);
+ return cbfun;
+}
+
+GLFWAPI int glfwUpdateGamepadMappings(const char* string)
+{
+ int jid;
+ const char* c = string;
+
+ assert(string != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ while (*c)
+ {
+ if ((*c >= '0' && *c <= '9') ||
+ (*c >= 'a' && *c <= 'f') ||
+ (*c >= 'A' && *c <= 'F'))
+ {
+ char line[1024];
+
+ const size_t length = strcspn(c, "\r\n");
+ if (length < sizeof(line))
+ {
+ _GLFWmapping mapping = {{0}};
+
+ memcpy(line, c, length);
+ line[length] = '\0';
+
+ if (parseMapping(&mapping, line))
+ {
+ _GLFWmapping* previous = findMapping(mapping.guid);
+ if (previous)
+ *previous = mapping;
+ else
+ {
+ _glfw.mappingCount++;
+ _glfw.mappings =
+ _glfw_realloc(_glfw.mappings,
+ sizeof(_GLFWmapping) * _glfw.mappingCount);
+ _glfw.mappings[_glfw.mappingCount - 1] = mapping;
+ }
+ }
+ }
+
+ c += length;
+ }
+ else
+ {
+ c += strcspn(c, "\r\n");
+ c += strspn(c, "\r\n");
+ }
+ }
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ _GLFWjoystick* js = _glfw.joysticks + jid;
+ if (js->present)
+ js->mapping = findValidMapping(js);
+ }
+
+ return GLFW_TRUE;
+}
+
+GLFWAPI int glfwJoystickIsGamepad(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return GLFW_FALSE;
+ }
+
+ if (!initJoysticks())
+ return GLFW_FALSE;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return GLFW_FALSE;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_PRESENCE))
+ return GLFW_FALSE;
+
+ return js->mapping != NULL;
+}
+
+GLFWAPI const char* glfwGetGamepadName(int jid)
+{
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return NULL;
+ }
+
+ if (!initJoysticks())
+ return NULL;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return NULL;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_PRESENCE))
+ return NULL;
+
+ if (!js->mapping)
+ return NULL;
+
+ return js->mapping->name;
+}
+
+GLFWAPI int glfwGetGamepadState(int jid, GLFWgamepadstate* state)
+{
+ int i;
+ _GLFWjoystick* js;
+
+ assert(jid >= GLFW_JOYSTICK_1);
+ assert(jid <= GLFW_JOYSTICK_LAST);
+ assert(state != NULL);
+
+ memset(state, 0, sizeof(GLFWgamepadstate));
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (jid < 0 || jid > GLFW_JOYSTICK_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid joystick ID %i", jid);
+ return GLFW_FALSE;
+ }
+
+ if (!initJoysticks())
+ return GLFW_FALSE;
+
+ js = _glfw.joysticks + jid;
+ if (!js->present)
+ return GLFW_FALSE;
+
+ if (!_glfw.platform.pollJoystick(js, _GLFW_POLL_ALL))
+ return GLFW_FALSE;
+
+ if (!js->mapping)
+ return GLFW_FALSE;
+
+ for (i = 0; i <= GLFW_GAMEPAD_BUTTON_LAST; i++)
+ {
+ const _GLFWmapelement* e = js->mapping->buttons + i;
+ if (e->type == _GLFW_JOYSTICK_AXIS)
+ {
+ const float value = js->axes[e->index] * e->axisScale + e->axisOffset;
+ // HACK: This should be baked into the value transform
+ // TODO: Bake into transform when implementing output modifiers
+ if (e->axisOffset < 0 || (e->axisOffset == 0 && e->axisScale > 0))
+ {
+ if (value >= 0.f)
+ state->buttons[i] = GLFW_PRESS;
+ }
+ else
+ {
+ if (value <= 0.f)
+ state->buttons[i] = GLFW_PRESS;
+ }
+ }
+ else if (e->type == _GLFW_JOYSTICK_HATBIT)
+ {
+ const unsigned int hat = e->index >> 4;
+ const unsigned int bit = e->index & 0xf;
+ if (js->hats[hat] & bit)
+ state->buttons[i] = GLFW_PRESS;
+ }
+ else if (e->type == _GLFW_JOYSTICK_BUTTON)
+ state->buttons[i] = js->buttons[e->index];
+ }
+
+ for (i = 0; i <= GLFW_GAMEPAD_AXIS_LAST; i++)
+ {
+ const _GLFWmapelement* e = js->mapping->axes + i;
+ if (e->type == _GLFW_JOYSTICK_AXIS)
+ {
+ const float value = js->axes[e->index] * e->axisScale + e->axisOffset;
+ state->axes[i] = _glfw_fminf(_glfw_fmaxf(value, -1.f), 1.f);
+ }
+ else if (e->type == _GLFW_JOYSTICK_HATBIT)
+ {
+ const unsigned int hat = e->index >> 4;
+ const unsigned int bit = e->index & 0xf;
+ if (js->hats[hat] & bit)
+ state->axes[i] = 1.f;
+ else
+ state->axes[i] = -1.f;
+ }
+ else if (e->type == _GLFW_JOYSTICK_BUTTON)
+ state->axes[i] = js->buttons[e->index] * 2.f - 1.f;
+ }
+
+ return GLFW_TRUE;
+}
+
+GLFWAPI void glfwSetClipboardString(GLFWwindow* handle, const char* string)
+{
+ assert(string != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.setClipboardString(string);
+}
+
+GLFWAPI const char* glfwGetClipboardString(GLFWwindow* handle)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return _glfw.platform.getClipboardString();
+}
+
+GLFWAPI double glfwGetTime(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(0.0);
+ return (double) (_glfwPlatformGetTimerValue() - _glfw.timer.offset) /
+ _glfwPlatformGetTimerFrequency();
+}
+
+GLFWAPI void glfwSetTime(double time)
+{
+ _GLFW_REQUIRE_INIT();
+
+ if (time != time || time < 0.0 || time > 18446744073.0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid time %f", time);
+ return;
+ }
+
+ _glfw.timer.offset = _glfwPlatformGetTimerValue() -
+ (uint64_t) (time * _glfwPlatformGetTimerFrequency());
+}
+
+GLFWAPI uint64_t glfwGetTimerValue(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+ return _glfwPlatformGetTimerValue();
+}
+
+GLFWAPI uint64_t glfwGetTimerFrequency(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+ return _glfwPlatformGetTimerFrequency();
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/internal.h b/chromium/third_party/dawn/third_party/glfw/src/internal.h
new file mode 100644
index 00000000000..7babe7e8359
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/internal.h
@@ -0,0 +1,1010 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#pragma once
+
+#if defined(_GLFW_USE_CONFIG_H)
+ #include "glfw_config.h"
+#endif
+
+#if defined(GLFW_INCLUDE_GLCOREARB) || \
+ defined(GLFW_INCLUDE_ES1) || \
+ defined(GLFW_INCLUDE_ES2) || \
+ defined(GLFW_INCLUDE_ES3) || \
+ defined(GLFW_INCLUDE_ES31) || \
+ defined(GLFW_INCLUDE_ES32) || \
+ defined(GLFW_INCLUDE_NONE) || \
+ defined(GLFW_INCLUDE_GLEXT) || \
+ defined(GLFW_INCLUDE_GLU) || \
+ defined(GLFW_INCLUDE_VULKAN) || \
+ defined(GLFW_DLL)
+ #error "You must not define any header option macros when compiling GLFW"
+#endif
+
+#define GLFW_INCLUDE_NONE
+#include "../include/GLFW/glfw3.h"
+
+#define _GLFW_INSERT_FIRST 0
+#define _GLFW_INSERT_LAST 1
+
+#define _GLFW_POLL_PRESENCE 0
+#define _GLFW_POLL_AXES 1
+#define _GLFW_POLL_BUTTONS 2
+#define _GLFW_POLL_ALL (_GLFW_POLL_AXES | _GLFW_POLL_BUTTONS)
+
+#define _GLFW_MESSAGE_SIZE 1024
+
+typedef int GLFWbool;
+typedef void (*GLFWproc)(void);
+
+typedef struct _GLFWerror _GLFWerror;
+typedef struct _GLFWinitconfig _GLFWinitconfig;
+typedef struct _GLFWwndconfig _GLFWwndconfig;
+typedef struct _GLFWctxconfig _GLFWctxconfig;
+typedef struct _GLFWfbconfig _GLFWfbconfig;
+typedef struct _GLFWcontext _GLFWcontext;
+typedef struct _GLFWwindow _GLFWwindow;
+typedef struct _GLFWplatform _GLFWplatform;
+typedef struct _GLFWlibrary _GLFWlibrary;
+typedef struct _GLFWmonitor _GLFWmonitor;
+typedef struct _GLFWcursor _GLFWcursor;
+typedef struct _GLFWmapelement _GLFWmapelement;
+typedef struct _GLFWmapping _GLFWmapping;
+typedef struct _GLFWjoystick _GLFWjoystick;
+typedef struct _GLFWtls _GLFWtls;
+typedef struct _GLFWmutex _GLFWmutex;
+
+#define GL_VERSION 0x1f02
+#define GL_NONE 0
+#define GL_COLOR_BUFFER_BIT 0x00004000
+#define GL_UNSIGNED_BYTE 0x1401
+#define GL_EXTENSIONS 0x1f03
+#define GL_NUM_EXTENSIONS 0x821d
+#define GL_CONTEXT_FLAGS 0x821e
+#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001
+#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002
+#define GL_CONTEXT_PROFILE_MASK 0x9126
+#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define GL_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
+#define GL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
+#define GL_NO_RESET_NOTIFICATION_ARB 0x8261
+#define GL_CONTEXT_RELEASE_BEHAVIOR 0x82fb
+#define GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH 0x82fc
+#define GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR 0x00000008
+
+typedef int GLint;
+typedef unsigned int GLuint;
+typedef unsigned int GLenum;
+typedef unsigned int GLbitfield;
+typedef unsigned char GLubyte;
+
+typedef void (APIENTRY * PFNGLCLEARPROC)(GLbitfield);
+typedef const GLubyte* (APIENTRY * PFNGLGETSTRINGPROC)(GLenum);
+typedef void (APIENTRY * PFNGLGETINTEGERVPROC)(GLenum,GLint*);
+typedef const GLubyte* (APIENTRY * PFNGLGETSTRINGIPROC)(GLenum,GLuint);
+
+#if defined(_GLFW_WIN32)
+ #define EGLAPIENTRY __stdcall
+#else
+ #define EGLAPIENTRY
+#endif
+
+#define EGL_SUCCESS 0x3000
+#define EGL_NOT_INITIALIZED 0x3001
+#define EGL_BAD_ACCESS 0x3002
+#define EGL_BAD_ALLOC 0x3003
+#define EGL_BAD_ATTRIBUTE 0x3004
+#define EGL_BAD_CONFIG 0x3005
+#define EGL_BAD_CONTEXT 0x3006
+#define EGL_BAD_CURRENT_SURFACE 0x3007
+#define EGL_BAD_DISPLAY 0x3008
+#define EGL_BAD_MATCH 0x3009
+#define EGL_BAD_NATIVE_PIXMAP 0x300a
+#define EGL_BAD_NATIVE_WINDOW 0x300b
+#define EGL_BAD_PARAMETER 0x300c
+#define EGL_BAD_SURFACE 0x300d
+#define EGL_CONTEXT_LOST 0x300e
+#define EGL_COLOR_BUFFER_TYPE 0x303f
+#define EGL_RGB_BUFFER 0x308e
+#define EGL_SURFACE_TYPE 0x3033
+#define EGL_WINDOW_BIT 0x0004
+#define EGL_RENDERABLE_TYPE 0x3040
+#define EGL_OPENGL_ES_BIT 0x0001
+#define EGL_OPENGL_ES2_BIT 0x0004
+#define EGL_OPENGL_BIT 0x0008
+#define EGL_ALPHA_SIZE 0x3021
+#define EGL_BLUE_SIZE 0x3022
+#define EGL_GREEN_SIZE 0x3023
+#define EGL_RED_SIZE 0x3024
+#define EGL_DEPTH_SIZE 0x3025
+#define EGL_STENCIL_SIZE 0x3026
+#define EGL_SAMPLES 0x3031
+#define EGL_OPENGL_ES_API 0x30a0
+#define EGL_OPENGL_API 0x30a2
+#define EGL_NONE 0x3038
+#define EGL_RENDER_BUFFER 0x3086
+#define EGL_SINGLE_BUFFER 0x3085
+#define EGL_EXTENSIONS 0x3055
+#define EGL_CONTEXT_CLIENT_VERSION 0x3098
+#define EGL_NATIVE_VISUAL_ID 0x302e
+#define EGL_NO_SURFACE ((EGLSurface) 0)
+#define EGL_NO_DISPLAY ((EGLDisplay) 0)
+#define EGL_NO_CONTEXT ((EGLContext) 0)
+#define EGL_DEFAULT_DISPLAY ((EGLNativeDisplayType) 0)
+
+#define EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR 0x00000002
+#define EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR 0x00000001
+#define EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR 0x00000002
+#define EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR 0x00000001
+#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR 0x31bd
+#define EGL_NO_RESET_NOTIFICATION_KHR 0x31be
+#define EGL_LOSE_CONTEXT_ON_RESET_KHR 0x31bf
+#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR 0x00000004
+#define EGL_CONTEXT_MAJOR_VERSION_KHR 0x3098
+#define EGL_CONTEXT_MINOR_VERSION_KHR 0x30fb
+#define EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR 0x30fd
+#define EGL_CONTEXT_FLAGS_KHR 0x30fc
+#define EGL_CONTEXT_OPENGL_NO_ERROR_KHR 0x31b3
+#define EGL_GL_COLORSPACE_KHR 0x309d
+#define EGL_GL_COLORSPACE_SRGB_KHR 0x3089
+#define EGL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x2097
+#define EGL_CONTEXT_RELEASE_BEHAVIOR_NONE_KHR 0
+#define EGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x2098
+#define EGL_PLATFORM_X11_EXT 0x31d5
+#define EGL_PLATFORM_WAYLAND_EXT 0x31d8
+#define EGL_PRESENT_OPAQUE_EXT 0x31df
+#define EGL_PLATFORM_ANGLE_ANGLE 0x3202
+#define EGL_PLATFORM_ANGLE_TYPE_ANGLE 0x3203
+#define EGL_PLATFORM_ANGLE_TYPE_OPENGL_ANGLE 0x320d
+#define EGL_PLATFORM_ANGLE_TYPE_OPENGLES_ANGLE 0x320e
+#define EGL_PLATFORM_ANGLE_TYPE_D3D9_ANGLE 0x3207
+#define EGL_PLATFORM_ANGLE_TYPE_D3D11_ANGLE 0x3208
+#define EGL_PLATFORM_ANGLE_TYPE_VULKAN_ANGLE 0x3450
+#define EGL_PLATFORM_ANGLE_TYPE_METAL_ANGLE 0x3489
+#define EGL_PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE 0x348f
+
+typedef int EGLint;
+typedef unsigned int EGLBoolean;
+typedef unsigned int EGLenum;
+typedef void* EGLConfig;
+typedef void* EGLContext;
+typedef void* EGLDisplay;
+typedef void* EGLSurface;
+
+typedef void* EGLNativeDisplayType;
+typedef void* EGLNativeWindowType;
+
+// EGL function pointer typedefs
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglGetConfigAttrib)(EGLDisplay,EGLConfig,EGLint,EGLint*);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglGetConfigs)(EGLDisplay,EGLConfig*,EGLint,EGLint*);
+typedef EGLDisplay (EGLAPIENTRY * PFN_eglGetDisplay)(EGLNativeDisplayType);
+typedef EGLint (EGLAPIENTRY * PFN_eglGetError)(void);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglInitialize)(EGLDisplay,EGLint*,EGLint*);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglTerminate)(EGLDisplay);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglBindAPI)(EGLenum);
+typedef EGLContext (EGLAPIENTRY * PFN_eglCreateContext)(EGLDisplay,EGLConfig,EGLContext,const EGLint*);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglDestroySurface)(EGLDisplay,EGLSurface);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglDestroyContext)(EGLDisplay,EGLContext);
+typedef EGLSurface (EGLAPIENTRY * PFN_eglCreateWindowSurface)(EGLDisplay,EGLConfig,EGLNativeWindowType,const EGLint*);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglMakeCurrent)(EGLDisplay,EGLSurface,EGLSurface,EGLContext);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglSwapBuffers)(EGLDisplay,EGLSurface);
+typedef EGLBoolean (EGLAPIENTRY * PFN_eglSwapInterval)(EGLDisplay,EGLint);
+typedef const char* (EGLAPIENTRY * PFN_eglQueryString)(EGLDisplay,EGLint);
+typedef GLFWglproc (EGLAPIENTRY * PFN_eglGetProcAddress)(const char*);
+#define eglGetConfigAttrib _glfw.egl.GetConfigAttrib
+#define eglGetConfigs _glfw.egl.GetConfigs
+#define eglGetDisplay _glfw.egl.GetDisplay
+#define eglGetError _glfw.egl.GetError
+#define eglInitialize _glfw.egl.Initialize
+#define eglTerminate _glfw.egl.Terminate
+#define eglBindAPI _glfw.egl.BindAPI
+#define eglCreateContext _glfw.egl.CreateContext
+#define eglDestroySurface _glfw.egl.DestroySurface
+#define eglDestroyContext _glfw.egl.DestroyContext
+#define eglCreateWindowSurface _glfw.egl.CreateWindowSurface
+#define eglMakeCurrent _glfw.egl.MakeCurrent
+#define eglSwapBuffers _glfw.egl.SwapBuffers
+#define eglSwapInterval _glfw.egl.SwapInterval
+#define eglQueryString _glfw.egl.QueryString
+#define eglGetProcAddress _glfw.egl.GetProcAddress
+
+typedef EGLDisplay (EGLAPIENTRY * PFNEGLGETPLATFORMDISPLAYEXTPROC)(EGLenum,void*,const EGLint*);
+typedef EGLSurface (EGLAPIENTRY * PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC)(EGLDisplay,EGLConfig,void*,const EGLint*);
+#define eglGetPlatformDisplayEXT _glfw.egl.GetPlatformDisplayEXT
+#define eglCreatePlatformWindowSurfaceEXT _glfw.egl.CreatePlatformWindowSurfaceEXT
+
+#define OSMESA_RGBA 0x1908
+#define OSMESA_FORMAT 0x22
+#define OSMESA_DEPTH_BITS 0x30
+#define OSMESA_STENCIL_BITS 0x31
+#define OSMESA_ACCUM_BITS 0x32
+#define OSMESA_PROFILE 0x33
+#define OSMESA_CORE_PROFILE 0x34
+#define OSMESA_COMPAT_PROFILE 0x35
+#define OSMESA_CONTEXT_MAJOR_VERSION 0x36
+#define OSMESA_CONTEXT_MINOR_VERSION 0x37
+
+typedef void* OSMesaContext;
+typedef void (*OSMESAproc)(void);
+
+typedef OSMesaContext (GLAPIENTRY * PFN_OSMesaCreateContextExt)(GLenum,GLint,GLint,GLint,OSMesaContext);
+typedef OSMesaContext (GLAPIENTRY * PFN_OSMesaCreateContextAttribs)(const int*,OSMesaContext);
+typedef void (GLAPIENTRY * PFN_OSMesaDestroyContext)(OSMesaContext);
+typedef int (GLAPIENTRY * PFN_OSMesaMakeCurrent)(OSMesaContext,void*,int,int,int);
+typedef int (GLAPIENTRY * PFN_OSMesaGetColorBuffer)(OSMesaContext,int*,int*,int*,void**);
+typedef int (GLAPIENTRY * PFN_OSMesaGetDepthBuffer)(OSMesaContext,int*,int*,int*,void**);
+typedef GLFWglproc (GLAPIENTRY * PFN_OSMesaGetProcAddress)(const char*);
+#define OSMesaCreateContextExt _glfw.osmesa.CreateContextExt
+#define OSMesaCreateContextAttribs _glfw.osmesa.CreateContextAttribs
+#define OSMesaDestroyContext _glfw.osmesa.DestroyContext
+#define OSMesaMakeCurrent _glfw.osmesa.MakeCurrent
+#define OSMesaGetColorBuffer _glfw.osmesa.GetColorBuffer
+#define OSMesaGetDepthBuffer _glfw.osmesa.GetDepthBuffer
+#define OSMesaGetProcAddress _glfw.osmesa.GetProcAddress
+
+#define VK_NULL_HANDLE 0
+
+typedef void* VkInstance;
+typedef void* VkPhysicalDevice;
+typedef uint64_t VkSurfaceKHR;
+typedef uint32_t VkFlags;
+typedef uint32_t VkBool32;
+
+typedef enum VkStructureType
+{
+ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
+ VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
+ VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
+ VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
+ VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000,
+ VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000,
+ VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkStructureType;
+
+typedef enum VkResult
+{
+ VK_SUCCESS = 0,
+ VK_NOT_READY = 1,
+ VK_TIMEOUT = 2,
+ VK_EVENT_SET = 3,
+ VK_EVENT_RESET = 4,
+ VK_INCOMPLETE = 5,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_DEVICE_LOST = -4,
+ VK_ERROR_MEMORY_MAP_FAILED = -5,
+ VK_ERROR_LAYER_NOT_PRESENT = -6,
+ VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+ VK_ERROR_FEATURE_NOT_PRESENT = -8,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+ VK_ERROR_TOO_MANY_OBJECTS = -10,
+ VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+ VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+ VK_SUBOPTIMAL_KHR = 1000001003,
+ VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+ VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
+ VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+ VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
+ VK_RESULT_MAX_ENUM = 0x7FFFFFFF
+} VkResult;
+
+typedef struct VkAllocationCallbacks VkAllocationCallbacks;
+
+typedef struct VkExtensionProperties
+{
+ char extensionName[256];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+typedef void (APIENTRY * PFN_vkVoidFunction)(void);
+
+typedef PFN_vkVoidFunction (APIENTRY * PFN_vkGetInstanceProcAddr)(VkInstance,const char*);
+typedef VkResult (APIENTRY * PFN_vkEnumerateInstanceExtensionProperties)(const char*,uint32_t*,VkExtensionProperties*);
+#define vkGetInstanceProcAddr _glfw.vk.GetInstanceProcAddr
+
+#include "platform.h"
+
+// Constructs a version number string from the public header macros
+#define _GLFW_CONCAT_VERSION(m, n, r) #m "." #n "." #r
+#define _GLFW_MAKE_VERSION(m, n, r) _GLFW_CONCAT_VERSION(m, n, r)
+#define _GLFW_VERSION_NUMBER _GLFW_MAKE_VERSION(GLFW_VERSION_MAJOR, \
+ GLFW_VERSION_MINOR, \
+ GLFW_VERSION_REVISION)
+
+// Checks for whether the library has been initialized
+#define _GLFW_REQUIRE_INIT() \
+ if (!_glfw.initialized) \
+ { \
+ _glfwInputError(GLFW_NOT_INITIALIZED, NULL); \
+ return; \
+ }
+#define _GLFW_REQUIRE_INIT_OR_RETURN(x) \
+ if (!_glfw.initialized) \
+ { \
+ _glfwInputError(GLFW_NOT_INITIALIZED, NULL); \
+ return x; \
+ }
+
+// Swaps the provided pointers
+#define _GLFW_SWAP(type, x, y) \
+ { \
+ type t; \
+ t = x; \
+ x = y; \
+ y = t; \
+ }
+
+// Per-thread error structure
+//
+struct _GLFWerror
+{
+ _GLFWerror* next;
+ int code;
+ char description[_GLFW_MESSAGE_SIZE];
+};
+
+// Initialization configuration
+//
+// Parameters relating to the initialization of the library
+//
+struct _GLFWinitconfig
+{
+ GLFWbool hatButtons;
+ int angleType;
+ int platformID;
+ PFN_vkGetInstanceProcAddr vulkanLoader;
+ struct {
+ GLFWbool menubar;
+ GLFWbool chdir;
+ } ns;
+ struct {
+ GLFWbool xcbVulkanSurface;
+ } x11;
+};
+
+// Window configuration
+//
+// Parameters relating to the creation of the window but not directly related
+// to the framebuffer. This is used to pass window creation parameters from
+// shared code to the platform API.
+//
+struct _GLFWwndconfig
+{
+ int width;
+ int height;
+ const char* title;
+ GLFWbool resizable;
+ GLFWbool visible;
+ GLFWbool decorated;
+ GLFWbool focused;
+ GLFWbool autoIconify;
+ GLFWbool floating;
+ GLFWbool maximized;
+ GLFWbool centerCursor;
+ GLFWbool focusOnShow;
+ GLFWbool mousePassthrough;
+ GLFWbool scaleToMonitor;
+ struct {
+ GLFWbool retina;
+ char frameName[256];
+ } ns;
+ struct {
+ char className[256];
+ char instanceName[256];
+ } x11;
+ struct {
+ GLFWbool keymenu;
+ } win32;
+};
+
+// Context configuration
+//
+// Parameters relating to the creation of the context but not directly related
+// to the framebuffer. This is used to pass context creation parameters from
+// shared code to the platform API.
+//
+struct _GLFWctxconfig
+{
+ int client;
+ int source;
+ int major;
+ int minor;
+ GLFWbool forward;
+ GLFWbool debug;
+ GLFWbool noerror;
+ int profile;
+ int robustness;
+ int release;
+ _GLFWwindow* share;
+ struct {
+ GLFWbool offline;
+ } nsgl;
+};
+
+// Framebuffer configuration
+//
+// This describes buffers and their sizes. It also contains
+// a platform-specific ID used to map back to the backend API object.
+//
+// It is used to pass framebuffer parameters from shared code to the platform
+// API and also to enumerate and select available framebuffer configs.
+//
+struct _GLFWfbconfig
+{
+ int redBits;
+ int greenBits;
+ int blueBits;
+ int alphaBits;
+ int depthBits;
+ int stencilBits;
+ int accumRedBits;
+ int accumGreenBits;
+ int accumBlueBits;
+ int accumAlphaBits;
+ int auxBuffers;
+ GLFWbool stereo;
+ int samples;
+ GLFWbool sRGB;
+ GLFWbool doublebuffer;
+ GLFWbool transparent;
+ uintptr_t handle;
+};
+
+// Context structure
+//
+struct _GLFWcontext
+{
+ int client;
+ int source;
+ int major, minor, revision;
+ GLFWbool forward, debug, noerror;
+ int profile;
+ int robustness;
+ int release;
+
+ PFNGLGETSTRINGIPROC GetStringi;
+ PFNGLGETINTEGERVPROC GetIntegerv;
+ PFNGLGETSTRINGPROC GetString;
+
+ void (*makeCurrent)(_GLFWwindow*);
+ void (*swapBuffers)(_GLFWwindow*);
+ void (*swapInterval)(int);
+ int (*extensionSupported)(const char*);
+ GLFWglproc (*getProcAddress)(const char*);
+ void (*destroy)(_GLFWwindow*);
+
+ struct {
+ EGLConfig config;
+ EGLContext handle;
+ EGLSurface surface;
+ void* client;
+ } egl;
+
+ struct {
+ OSMesaContext handle;
+ int width;
+ int height;
+ void* buffer;
+ } osmesa;
+
+ // This is defined in platform.h
+ GLFW_PLATFORM_CONTEXT_STATE
+};
+
+// Window and context structure
+//
+struct _GLFWwindow
+{
+ struct _GLFWwindow* next;
+
+ // Window settings and state
+ GLFWbool resizable;
+ GLFWbool decorated;
+ GLFWbool autoIconify;
+ GLFWbool floating;
+ GLFWbool focusOnShow;
+ GLFWbool mousePassthrough;
+ GLFWbool shouldClose;
+ void* userPointer;
+ GLFWbool doublebuffer;
+ GLFWvidmode videoMode;
+ _GLFWmonitor* monitor;
+ _GLFWcursor* cursor;
+
+ int minwidth, minheight;
+ int maxwidth, maxheight;
+ int numer, denom;
+
+ GLFWbool stickyKeys;
+ GLFWbool stickyMouseButtons;
+ GLFWbool lockKeyMods;
+ int cursorMode;
+ char mouseButtons[GLFW_MOUSE_BUTTON_LAST + 1];
+ char keys[GLFW_KEY_LAST + 1];
+ // Virtual cursor position when cursor is disabled
+ double virtualCursorPosX, virtualCursorPosY;
+ GLFWbool rawMouseMotion;
+
+ _GLFWcontext context;
+
+ struct {
+ GLFWwindowposfun pos;
+ GLFWwindowsizefun size;
+ GLFWwindowclosefun close;
+ GLFWwindowrefreshfun refresh;
+ GLFWwindowfocusfun focus;
+ GLFWwindowiconifyfun iconify;
+ GLFWwindowmaximizefun maximize;
+ GLFWframebuffersizefun fbsize;
+ GLFWwindowcontentscalefun scale;
+ GLFWmousebuttonfun mouseButton;
+ GLFWcursorposfun cursorPos;
+ GLFWcursorenterfun cursorEnter;
+ GLFWscrollfun scroll;
+ GLFWkeyfun key;
+ GLFWcharfun character;
+ GLFWcharmodsfun charmods;
+ GLFWdropfun drop;
+ } callbacks;
+
+ // This is defined in platform.h
+ GLFW_PLATFORM_WINDOW_STATE
+};
+
+// Monitor structure
+//
+struct _GLFWmonitor
+{
+ char name[128];
+ void* userPointer;
+
+ // Physical dimensions in millimeters.
+ int widthMM, heightMM;
+
+ // The window whose video mode is current on this monitor
+ _GLFWwindow* window;
+
+ GLFWvidmode* modes;
+ int modeCount;
+ GLFWvidmode currentMode;
+
+ GLFWgammaramp originalRamp;
+ GLFWgammaramp currentRamp;
+
+ // This is defined in platform.h
+ GLFW_PLATFORM_MONITOR_STATE
+};
+
+// Cursor structure
+//
+struct _GLFWcursor
+{
+ _GLFWcursor* next;
+ // This is defined in platform.h
+ GLFW_PLATFORM_CURSOR_STATE
+};
+
+// Gamepad mapping element structure
+//
+struct _GLFWmapelement
+{
+ uint8_t type;
+ uint8_t index;
+ int8_t axisScale;
+ int8_t axisOffset;
+};
+
+// Gamepad mapping structure
+//
+struct _GLFWmapping
+{
+ char name[128];
+ char guid[33];
+ _GLFWmapelement buttons[15];
+ _GLFWmapelement axes[6];
+};
+
+// Joystick structure
+//
+struct _GLFWjoystick
+{
+ GLFWbool present;
+ float* axes;
+ int axisCount;
+ unsigned char* buttons;
+ int buttonCount;
+ unsigned char* hats;
+ int hatCount;
+ char name[128];
+ void* userPointer;
+ char guid[33];
+ _GLFWmapping* mapping;
+
+ // This is defined in platform.h
+ GLFW_PLATFORM_JOYSTICK_STATE
+};
+
+// Thread local storage structure
+//
+struct _GLFWtls
+{
+ // This is defined in platform.h
+ GLFW_PLATFORM_TLS_STATE
+};
+
+// Mutex structure
+//
+struct _GLFWmutex
+{
+ // This is defined in platform.h
+ GLFW_PLATFORM_MUTEX_STATE
+};
+
+// Platform API structure
+//
+struct _GLFWplatform
+{
+ int platformID;
+ // init
+ GLFWbool (*init)(void);
+ void (*terminate)(void);
+ // input
+ void (*getCursorPos)(_GLFWwindow*,double*,double*);
+ void (*setCursorPos)(_GLFWwindow*,double,double);
+ void (*setCursorMode)(_GLFWwindow*,int);
+ void (*setRawMouseMotion)(_GLFWwindow*,GLFWbool);
+ GLFWbool (*rawMouseMotionSupported)(void);
+ int (*createCursor)(_GLFWcursor*,const GLFWimage*,int,int);
+ int (*createStandardCursor)(_GLFWcursor*,int);
+ void (*destroyCursor)(_GLFWcursor*);
+ void (*setCursor)(_GLFWwindow*,_GLFWcursor*);
+ const char* (*getScancodeName)(int);
+ int (*getKeyScancode)(int);
+ void (*setClipboardString)(const char*);
+ const char* (*getClipboardString)(void);
+ GLFWbool (*initJoysticks)(void);
+ void (*terminateJoysticks)(void);
+ int (*pollJoystick)(_GLFWjoystick*,int);
+ const char* (*getMappingName)(void);
+ void (*updateGamepadGUID)(char*);
+ // monitor
+ void (*freeMonitor)(_GLFWmonitor*);
+ void (*getMonitorPos)(_GLFWmonitor*,int*,int*);
+ void (*getMonitorContentScale)(_GLFWmonitor*,float*,float*);
+ void (*getMonitorWorkarea)(_GLFWmonitor*,int*,int*,int*,int*);
+ GLFWvidmode* (*getVideoModes)(_GLFWmonitor*,int*);
+ void (*getVideoMode)(_GLFWmonitor*,GLFWvidmode*);
+ GLFWbool (*getGammaRamp)(_GLFWmonitor*,GLFWgammaramp*);
+ void (*setGammaRamp)(_GLFWmonitor*,const GLFWgammaramp*);
+ // window
+ int (*createWindow)(_GLFWwindow*,const _GLFWwndconfig*,const _GLFWctxconfig*,const _GLFWfbconfig*);
+ void (*destroyWindow)(_GLFWwindow*);
+ void (*setWindowTitle)(_GLFWwindow*,const char*);
+ void (*setWindowIcon)(_GLFWwindow*,int,const GLFWimage*);
+ void (*getWindowPos)(_GLFWwindow*,int*,int*);
+ void (*setWindowPos)(_GLFWwindow*,int,int);
+ void (*getWindowSize)(_GLFWwindow*,int*,int*);
+ void (*setWindowSize)(_GLFWwindow*,int,int);
+ void (*setWindowSizeLimits)(_GLFWwindow*,int,int,int,int);
+ void (*setWindowAspectRatio)(_GLFWwindow*,int,int);
+ void (*getFramebufferSize)(_GLFWwindow*,int*,int*);
+ void (*getWindowFrameSize)(_GLFWwindow*,int*,int*,int*,int*);
+ void (*getWindowContentScale)(_GLFWwindow*,float*,float*);
+ void (*iconifyWindow)(_GLFWwindow*);
+ void (*restoreWindow)(_GLFWwindow*);
+ void (*maximizeWindow)(_GLFWwindow*);
+ void (*showWindow)(_GLFWwindow*);
+ void (*hideWindow)(_GLFWwindow*);
+ void (*requestWindowAttention)(_GLFWwindow*);
+ void (*focusWindow)(_GLFWwindow*);
+ void (*setWindowMonitor)(_GLFWwindow*,_GLFWmonitor*,int,int,int,int,int);
+ int (*windowFocused)(_GLFWwindow*);
+ int (*windowIconified)(_GLFWwindow*);
+ int (*windowVisible)(_GLFWwindow*);
+ int (*windowMaximized)(_GLFWwindow*);
+ int (*windowHovered)(_GLFWwindow*);
+ int (*framebufferTransparent)(_GLFWwindow*);
+ float (*getWindowOpacity)(_GLFWwindow*);
+ void (*setWindowResizable)(_GLFWwindow*,GLFWbool);
+ void (*setWindowDecorated)(_GLFWwindow*,GLFWbool);
+ void (*setWindowFloating)(_GLFWwindow*,GLFWbool);
+ void (*setWindowOpacity)(_GLFWwindow*,float);
+ void (*setWindowMousePassthrough)(_GLFWwindow*,GLFWbool);
+ void (*pollEvents)(void);
+ void (*waitEvents)(void);
+ void (*waitEventsTimeout)(double);
+ void (*postEmptyEvent)(void);
+ // EGL
+ EGLenum (*getEGLPlatform)(EGLint**);
+ EGLNativeDisplayType (*getEGLNativeDisplay)(void);
+ EGLNativeWindowType (*getEGLNativeWindow)(_GLFWwindow*);
+ // vulkan
+ void (*getRequiredInstanceExtensions)(char**);
+ int (*getPhysicalDevicePresentationSupport)(VkInstance,VkPhysicalDevice,uint32_t);
+ VkResult (*createWindowSurface)(VkInstance,_GLFWwindow*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+};
+
+// Library global data
+//
+struct _GLFWlibrary
+{
+ GLFWbool initialized;
+ GLFWallocator allocator;
+
+ _GLFWplatform platform;
+
+ struct {
+ _GLFWinitconfig init;
+ _GLFWfbconfig framebuffer;
+ _GLFWwndconfig window;
+ _GLFWctxconfig context;
+ int refreshRate;
+ } hints;
+
+ _GLFWerror* errorListHead;
+ _GLFWcursor* cursorListHead;
+ _GLFWwindow* windowListHead;
+
+ _GLFWmonitor** monitors;
+ int monitorCount;
+
+ GLFWbool joysticksInitialized;
+ _GLFWjoystick joysticks[GLFW_JOYSTICK_LAST + 1];
+ _GLFWmapping* mappings;
+ int mappingCount;
+
+ _GLFWtls errorSlot;
+ _GLFWtls contextSlot;
+ _GLFWmutex errorLock;
+
+ struct {
+ uint64_t offset;
+ // This is defined in platform.h
+ GLFW_PLATFORM_LIBRARY_TIMER_STATE
+ } timer;
+
+ struct {
+ EGLenum platform;
+ EGLDisplay display;
+ EGLint major, minor;
+ GLFWbool prefix;
+
+ GLFWbool KHR_create_context;
+ GLFWbool KHR_create_context_no_error;
+ GLFWbool KHR_gl_colorspace;
+ GLFWbool KHR_get_all_proc_addresses;
+ GLFWbool KHR_context_flush_control;
+ GLFWbool EXT_client_extensions;
+ GLFWbool EXT_platform_base;
+ GLFWbool EXT_platform_x11;
+ GLFWbool EXT_platform_wayland;
+ GLFWbool EXT_present_opaque;
+ GLFWbool ANGLE_platform_angle;
+ GLFWbool ANGLE_platform_angle_opengl;
+ GLFWbool ANGLE_platform_angle_d3d;
+ GLFWbool ANGLE_platform_angle_vulkan;
+ GLFWbool ANGLE_platform_angle_metal;
+
+ void* handle;
+
+ PFN_eglGetConfigAttrib GetConfigAttrib;
+ PFN_eglGetConfigs GetConfigs;
+ PFN_eglGetDisplay GetDisplay;
+ PFN_eglGetError GetError;
+ PFN_eglInitialize Initialize;
+ PFN_eglTerminate Terminate;
+ PFN_eglBindAPI BindAPI;
+ PFN_eglCreateContext CreateContext;
+ PFN_eglDestroySurface DestroySurface;
+ PFN_eglDestroyContext DestroyContext;
+ PFN_eglCreateWindowSurface CreateWindowSurface;
+ PFN_eglMakeCurrent MakeCurrent;
+ PFN_eglSwapBuffers SwapBuffers;
+ PFN_eglSwapInterval SwapInterval;
+ PFN_eglQueryString QueryString;
+ PFN_eglGetProcAddress GetProcAddress;
+
+ PFNEGLGETPLATFORMDISPLAYEXTPROC GetPlatformDisplayEXT;
+ PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC CreatePlatformWindowSurfaceEXT;
+ } egl;
+
+ struct {
+ void* handle;
+
+ PFN_OSMesaCreateContextExt CreateContextExt;
+ PFN_OSMesaCreateContextAttribs CreateContextAttribs;
+ PFN_OSMesaDestroyContext DestroyContext;
+ PFN_OSMesaMakeCurrent MakeCurrent;
+ PFN_OSMesaGetColorBuffer GetColorBuffer;
+ PFN_OSMesaGetDepthBuffer GetDepthBuffer;
+ PFN_OSMesaGetProcAddress GetProcAddress;
+
+ } osmesa;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ char* extensions[2];
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddr;
+ GLFWbool KHR_surface;
+ GLFWbool KHR_win32_surface;
+ GLFWbool MVK_macos_surface;
+ GLFWbool EXT_metal_surface;
+ GLFWbool KHR_xlib_surface;
+ GLFWbool KHR_xcb_surface;
+ GLFWbool KHR_wayland_surface;
+ } vk;
+
+ struct {
+ GLFWmonitorfun monitor;
+ GLFWjoystickfun joystick;
+ } callbacks;
+
+ // These are defined in platform.h
+ GLFW_PLATFORM_LIBRARY_WINDOW_STATE
+ GLFW_PLATFORM_LIBRARY_CONTEXT_STATE
+ GLFW_PLATFORM_LIBRARY_JOYSTICK_STATE
+};
+
+// Global state shared between compilation units of GLFW
+//
+extern _GLFWlibrary _glfw;
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwPlatformInitTimer(void);
+uint64_t _glfwPlatformGetTimerValue(void);
+uint64_t _glfwPlatformGetTimerFrequency(void);
+
+GLFWbool _glfwPlatformCreateTls(_GLFWtls* tls);
+void _glfwPlatformDestroyTls(_GLFWtls* tls);
+void* _glfwPlatformGetTls(_GLFWtls* tls);
+void _glfwPlatformSetTls(_GLFWtls* tls, void* value);
+
+GLFWbool _glfwPlatformCreateMutex(_GLFWmutex* mutex);
+void _glfwPlatformDestroyMutex(_GLFWmutex* mutex);
+void _glfwPlatformLockMutex(_GLFWmutex* mutex);
+void _glfwPlatformUnlockMutex(_GLFWmutex* mutex);
+
+void* _glfwPlatformLoadModule(const char* path);
+void _glfwPlatformFreeModule(void* module);
+GLFWproc _glfwPlatformGetModuleSymbol(void* module, const char* name);
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW event API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwInputWindowFocus(_GLFWwindow* window, GLFWbool focused);
+void _glfwInputWindowPos(_GLFWwindow* window, int xpos, int ypos);
+void _glfwInputWindowSize(_GLFWwindow* window, int width, int height);
+void _glfwInputFramebufferSize(_GLFWwindow* window, int width, int height);
+void _glfwInputWindowContentScale(_GLFWwindow* window,
+ float xscale, float yscale);
+void _glfwInputWindowIconify(_GLFWwindow* window, GLFWbool iconified);
+void _glfwInputWindowMaximize(_GLFWwindow* window, GLFWbool maximized);
+void _glfwInputWindowDamage(_GLFWwindow* window);
+void _glfwInputWindowCloseRequest(_GLFWwindow* window);
+void _glfwInputWindowMonitor(_GLFWwindow* window, _GLFWmonitor* monitor);
+
+void _glfwInputKey(_GLFWwindow* window,
+ int key, int scancode, int action, int mods);
+void _glfwInputChar(_GLFWwindow* window,
+ uint32_t codepoint, int mods, GLFWbool plain);
+void _glfwInputScroll(_GLFWwindow* window, double xoffset, double yoffset);
+void _glfwInputMouseClick(_GLFWwindow* window, int button, int action, int mods);
+void _glfwInputCursorPos(_GLFWwindow* window, double xpos, double ypos);
+void _glfwInputCursorEnter(_GLFWwindow* window, GLFWbool entered);
+void _glfwInputDrop(_GLFWwindow* window, int count, const char** names);
+void _glfwInputJoystick(_GLFWjoystick* js, int event);
+void _glfwInputJoystickAxis(_GLFWjoystick* js, int axis, float value);
+void _glfwInputJoystickButton(_GLFWjoystick* js, int button, char value);
+void _glfwInputJoystickHat(_GLFWjoystick* js, int hat, char value);
+
+void _glfwInputMonitor(_GLFWmonitor* monitor, int action, int placement);
+void _glfwInputMonitorWindow(_GLFWmonitor* monitor, _GLFWwindow* window);
+
+#if defined(__GNUC__)
+void _glfwInputError(int code, const char* format, ...)
+ __attribute__((format(printf, 2, 3)));
+#else
+void _glfwInputError(int code, const char* format, ...);
+#endif
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwSelectPlatform(int platformID, _GLFWplatform* platform);
+
+GLFWbool _glfwStringInExtensionString(const char* string, const char* extensions);
+const _GLFWfbconfig* _glfwChooseFBConfig(const _GLFWfbconfig* desired,
+ const _GLFWfbconfig* alternatives,
+ unsigned int count);
+GLFWbool _glfwRefreshContextAttribs(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig);
+GLFWbool _glfwIsValidContextConfig(const _GLFWctxconfig* ctxconfig);
+
+const GLFWvidmode* _glfwChooseVideoMode(_GLFWmonitor* monitor,
+ const GLFWvidmode* desired);
+int _glfwCompareVideoModes(const GLFWvidmode* first, const GLFWvidmode* second);
+_GLFWmonitor* _glfwAllocMonitor(const char* name, int widthMM, int heightMM);
+void _glfwFreeMonitor(_GLFWmonitor* monitor);
+void _glfwAllocGammaArrays(GLFWgammaramp* ramp, unsigned int size);
+void _glfwFreeGammaArrays(GLFWgammaramp* ramp);
+void _glfwSplitBPP(int bpp, int* red, int* green, int* blue);
+
+void _glfwInitGamepadMappings(void);
+_GLFWjoystick* _glfwAllocJoystick(const char* name,
+ const char* guid,
+ int axisCount,
+ int buttonCount,
+ int hatCount);
+void _glfwFreeJoystick(_GLFWjoystick* js);
+void _glfwCenterCursorInContentArea(_GLFWwindow* window);
+
+GLFWbool _glfwInitEGL(void);
+void _glfwTerminateEGL(void);
+GLFWbool _glfwCreateContextEGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig);
+#if defined(_GLFW_X11)
+GLFWbool _glfwChooseVisualEGL(const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig,
+ Visual** visual, int* depth);
+#endif /*_GLFW_X11*/
+
+GLFWbool _glfwInitOSMesa(void);
+void _glfwTerminateOSMesa(void);
+GLFWbool _glfwCreateContextOSMesa(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig);
+
+GLFWbool _glfwInitVulkan(int mode);
+void _glfwTerminateVulkan(void);
+const char* _glfwGetVulkanResultString(VkResult result);
+
+size_t _glfwEncodeUTF8(char* s, uint32_t codepoint);
+char** _glfwParseUriList(char* text, int* count);
+
+char* _glfw_strdup(const char* source);
+int _glfw_min(int a, int b);
+int _glfw_max(int a, int b);
+float _glfw_fminf(float a, float b);
+float _glfw_fmaxf(float a, float b);
+
+void* _glfw_calloc(size_t count, size_t size);
+void* _glfw_realloc(void* pointer, size_t size);
+void _glfw_free(void* pointer);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.c b/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.c
new file mode 100644
index 00000000000..da04e9c3165
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.c
@@ -0,0 +1,431 @@
+//========================================================================
+// GLFW 3.4 Linux - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/inotify.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifndef SYN_DROPPED // < v2.6.39 kernel headers
+// Workaround for CentOS-6, which is supported till 2020-11-30, but still on v2.6.32
+#define SYN_DROPPED 3
+#endif
+
+// Apply an EV_KEY event to the specified joystick
+//
+static void handleKeyEvent(_GLFWjoystick* js, int code, int value)
+{
+ _glfwInputJoystickButton(js,
+ js->linjs.keyMap[code - BTN_MISC],
+ value ? GLFW_PRESS : GLFW_RELEASE);
+}
+
+// Apply an EV_ABS event to the specified joystick
+//
+static void handleAbsEvent(_GLFWjoystick* js, int code, int value)
+{
+ const int index = js->linjs.absMap[code];
+
+ if (code >= ABS_HAT0X && code <= ABS_HAT3Y)
+ {
+ static const char stateMap[3][3] =
+ {
+ { GLFW_HAT_CENTERED, GLFW_HAT_UP, GLFW_HAT_DOWN },
+ { GLFW_HAT_LEFT, GLFW_HAT_LEFT_UP, GLFW_HAT_LEFT_DOWN },
+ { GLFW_HAT_RIGHT, GLFW_HAT_RIGHT_UP, GLFW_HAT_RIGHT_DOWN },
+ };
+
+ const int hat = (code - ABS_HAT0X) / 2;
+ const int axis = (code - ABS_HAT0X) % 2;
+ int* state = js->linjs.hats[hat];
+
+ // NOTE: Looking at several input drivers, it seems all hat events use
+ // -1 for left / up, 0 for centered and 1 for right / down
+ if (value == 0)
+ state[axis] = 0;
+ else if (value < 0)
+ state[axis] = 1;
+ else if (value > 0)
+ state[axis] = 2;
+
+ _glfwInputJoystickHat(js, index, stateMap[state[0]][state[1]]);
+ }
+ else
+ {
+ const struct input_absinfo* info = &js->linjs.absInfo[code];
+ float normalized = value;
+
+ const int range = info->maximum - info->minimum;
+ if (range)
+ {
+ // Normalize to 0.0 -> 1.0
+ normalized = (normalized - info->minimum) / range;
+ // Normalize to -1.0 -> 1.0
+ normalized = normalized * 2.0f - 1.0f;
+ }
+
+ _glfwInputJoystickAxis(js, index, normalized);
+ }
+}
+
+// Poll state of absolute axes
+//
+static void pollAbsState(_GLFWjoystick* js)
+{
+ for (int code = 0; code < ABS_CNT; code++)
+ {
+ if (js->linjs.absMap[code] < 0)
+ continue;
+
+ struct input_absinfo* info = &js->linjs.absInfo[code];
+
+ if (ioctl(js->linjs.fd, EVIOCGABS(code), info) < 0)
+ continue;
+
+ handleAbsEvent(js, code, info->value);
+ }
+}
+
+#define isBitSet(bit, arr) (arr[(bit) / 8] & (1 << ((bit) % 8)))
+
+// Attempt to open the specified joystick device
+//
+static GLFWbool openJoystickDevice(const char* path)
+{
+ for (int jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (!_glfw.joysticks[jid].present)
+ continue;
+ if (strcmp(_glfw.joysticks[jid].linjs.path, path) == 0)
+ return GLFW_FALSE;
+ }
+
+ _GLFWjoystickLinux linjs = {0};
+ linjs.fd = open(path, O_RDONLY | O_NONBLOCK);
+ if (linjs.fd == -1)
+ return GLFW_FALSE;
+
+ char evBits[(EV_CNT + 7) / 8] = {0};
+ char keyBits[(KEY_CNT + 7) / 8] = {0};
+ char absBits[(ABS_CNT + 7) / 8] = {0};
+ struct input_id id;
+
+ if (ioctl(linjs.fd, EVIOCGBIT(0, sizeof(evBits)), evBits) < 0 ||
+ ioctl(linjs.fd, EVIOCGBIT(EV_KEY, sizeof(keyBits)), keyBits) < 0 ||
+ ioctl(linjs.fd, EVIOCGBIT(EV_ABS, sizeof(absBits)), absBits) < 0 ||
+ ioctl(linjs.fd, EVIOCGID, &id) < 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Linux: Failed to query input device: %s",
+ strerror(errno));
+ close(linjs.fd);
+ return GLFW_FALSE;
+ }
+
+ // Ensure this device supports the events expected of a joystick
+ if (!isBitSet(EV_KEY, evBits) || !isBitSet(EV_ABS, evBits))
+ {
+ close(linjs.fd);
+ return GLFW_FALSE;
+ }
+
+ char name[256] = "";
+
+ if (ioctl(linjs.fd, EVIOCGNAME(sizeof(name)), name) < 0)
+ strncpy(name, "Unknown", sizeof(name));
+
+ char guid[33] = "";
+
+ // Generate a joystick GUID that matches the SDL 2.0.5+ one
+ if (id.vendor && id.product && id.version)
+ {
+ sprintf(guid, "%02x%02x0000%02x%02x0000%02x%02x0000%02x%02x0000",
+ id.bustype & 0xff, id.bustype >> 8,
+ id.vendor & 0xff, id.vendor >> 8,
+ id.product & 0xff, id.product >> 8,
+ id.version & 0xff, id.version >> 8);
+ }
+ else
+ {
+ sprintf(guid, "%02x%02x0000%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x00",
+ id.bustype & 0xff, id.bustype >> 8,
+ name[0], name[1], name[2], name[3],
+ name[4], name[5], name[6], name[7],
+ name[8], name[9], name[10]);
+ }
+
+ int axisCount = 0, buttonCount = 0, hatCount = 0;
+
+ for (int code = BTN_MISC; code < KEY_CNT; code++)
+ {
+ if (!isBitSet(code, keyBits))
+ continue;
+
+ linjs.keyMap[code - BTN_MISC] = buttonCount;
+ buttonCount++;
+ }
+
+ for (int code = 0; code < ABS_CNT; code++)
+ {
+ linjs.absMap[code] = -1;
+ if (!isBitSet(code, absBits))
+ continue;
+
+ if (code >= ABS_HAT0X && code <= ABS_HAT3Y)
+ {
+ linjs.absMap[code] = hatCount;
+ hatCount++;
+ // Skip the Y axis
+ code++;
+ }
+ else
+ {
+ if (ioctl(linjs.fd, EVIOCGABS(code), &linjs.absInfo[code]) < 0)
+ continue;
+
+ linjs.absMap[code] = axisCount;
+ axisCount++;
+ }
+ }
+
+ _GLFWjoystick* js =
+ _glfwAllocJoystick(name, guid, axisCount, buttonCount, hatCount);
+ if (!js)
+ {
+ close(linjs.fd);
+ return GLFW_FALSE;
+ }
+
+ strncpy(linjs.path, path, sizeof(linjs.path) - 1);
+ memcpy(&js->linjs, &linjs, sizeof(linjs));
+
+ pollAbsState(js);
+
+ _glfwInputJoystick(js, GLFW_CONNECTED);
+ return GLFW_TRUE;
+}
+
+#undef isBitSet
+
+// Frees all resources associated with the specified joystick
+//
+static void closeJoystick(_GLFWjoystick* js)
+{
+ close(js->linjs.fd);
+ _glfwFreeJoystick(js);
+ _glfwInputJoystick(js, GLFW_DISCONNECTED);
+}
+
+// Lexically compare joysticks by name; used by qsort
+//
+static int compareJoysticks(const void* fp, const void* sp)
+{
+ const _GLFWjoystick* fj = fp;
+ const _GLFWjoystick* sj = sp;
+ return strcmp(fj->linjs.path, sj->linjs.path);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwDetectJoystickConnectionLinux(void)
+{
+ if (_glfw.linjs.inotify <= 0)
+ return;
+
+ ssize_t offset = 0;
+ char buffer[16384];
+ const ssize_t size = read(_glfw.linjs.inotify, buffer, sizeof(buffer));
+
+ while (size > offset)
+ {
+ regmatch_t match;
+ const struct inotify_event* e = (struct inotify_event*) (buffer + offset);
+
+ offset += sizeof(struct inotify_event) + e->len;
+
+ if (regexec(&_glfw.linjs.regex, e->name, 1, &match, 0) != 0)
+ continue;
+
+ char path[PATH_MAX];
+ snprintf(path, sizeof(path), "/dev/input/%s", e->name);
+
+ if (e->mask & (IN_CREATE | IN_ATTRIB))
+ openJoystickDevice(path);
+ else if (e->mask & IN_DELETE)
+ {
+ for (int jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (strcmp(_glfw.joysticks[jid].linjs.path, path) == 0)
+ {
+ closeJoystick(_glfw.joysticks + jid);
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitJoysticksLinux(void)
+{
+ const char* dirname = "/dev/input";
+
+ _glfw.linjs.inotify = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
+ if (_glfw.linjs.inotify > 0)
+ {
+ // HACK: Register for IN_ATTRIB to get notified when udev is done
+ // This works well in practice but the true way is libudev
+
+ _glfw.linjs.watch = inotify_add_watch(_glfw.linjs.inotify,
+ dirname,
+ IN_CREATE | IN_ATTRIB | IN_DELETE);
+ }
+
+ // Continue without device connection notifications if inotify fails
+
+ if (regcomp(&_glfw.linjs.regex, "^event[0-9]\\+$", 0) != 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Linux: Failed to compile regex");
+ return GLFW_FALSE;
+ }
+
+ int count = 0;
+
+ DIR* dir = opendir(dirname);
+ if (dir)
+ {
+ struct dirent* entry;
+
+ while ((entry = readdir(dir)))
+ {
+ regmatch_t match;
+
+ if (regexec(&_glfw.linjs.regex, entry->d_name, 1, &match, 0) != 0)
+ continue;
+
+ char path[PATH_MAX];
+
+ snprintf(path, sizeof(path), "%s/%s", dirname, entry->d_name);
+
+ if (openJoystickDevice(path))
+ count++;
+ }
+
+ closedir(dir);
+ }
+
+ // Continue with no joysticks if enumeration fails
+
+ qsort(_glfw.joysticks, count, sizeof(_GLFWjoystick), compareJoysticks);
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateJoysticksLinux(void)
+{
+ for (int jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ _GLFWjoystick* js = _glfw.joysticks + jid;
+ if (js->present)
+ closeJoystick(js);
+ }
+
+ if (_glfw.linjs.inotify > 0)
+ {
+ if (_glfw.linjs.watch > 0)
+ inotify_rm_watch(_glfw.linjs.inotify, _glfw.linjs.watch);
+
+ close(_glfw.linjs.inotify);
+ regfree(&_glfw.linjs.regex);
+ }
+}
+
+int _glfwPollJoystickLinux(_GLFWjoystick* js, int mode)
+{
+ // Read all queued events (non-blocking)
+ for (;;)
+ {
+ struct input_event e;
+
+ errno = 0;
+ if (read(js->linjs.fd, &e, sizeof(e)) < 0)
+ {
+ // Reset the joystick slot if the device was disconnected
+ if (errno == ENODEV)
+ closeJoystick(js);
+
+ break;
+ }
+
+ if (e.type == EV_SYN)
+ {
+ if (e.code == SYN_DROPPED)
+ _glfw.linjs.dropped = GLFW_TRUE;
+ else if (e.code == SYN_REPORT)
+ {
+ _glfw.linjs.dropped = GLFW_FALSE;
+ pollAbsState(js);
+ }
+ }
+
+ if (_glfw.linjs.dropped)
+ continue;
+
+ if (e.type == EV_KEY)
+ handleKeyEvent(js, e.code, e.value);
+ else if (e.type == EV_ABS)
+ handleAbsEvent(js, e.code, e.value);
+ }
+
+ return js->present;
+}
+
+const char* _glfwGetMappingNameLinux(void)
+{
+ return "Linux";
+}
+
+void _glfwUpdateGamepadGUIDLinux(char* guid)
+{
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.h b/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.h
new file mode 100644
index 00000000000..37e5d1ae0f4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/linux_joystick.h
@@ -0,0 +1,65 @@
+//========================================================================
+// GLFW 3.4 Linux - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <linux/input.h>
+#include <linux/limits.h>
+#include <regex.h>
+
+#define GLFW_LINUX_JOYSTICK_STATE _GLFWjoystickLinux linjs;
+#define GLFW_LINUX_LIBRARY_JOYSTICK_STATE _GLFWlibraryLinux linjs;
+
+#define GLFW_BUILD_LINUX_MAPPINGS
+
+// Linux-specific joystick data
+//
+typedef struct _GLFWjoystickLinux
+{
+ int fd;
+ char path[PATH_MAX];
+ int keyMap[KEY_CNT - BTN_MISC];
+ int absMap[ABS_CNT];
+ struct input_absinfo absInfo[ABS_CNT];
+ int hats[4][2];
+} _GLFWjoystickLinux;
+
+// Linux-specific joystick API data
+//
+typedef struct _GLFWlibraryLinux
+{
+ int inotify;
+ int watch;
+ regex_t regex;
+ GLFWbool dropped;
+} _GLFWlibraryLinux;
+
+void _glfwDetectJoystickConnectionLinux(void);
+
+GLFWbool _glfwInitJoysticksLinux(void);
+void _glfwTerminateJoysticksLinux(void);
+int _glfwPollJoystickLinux(_GLFWjoystick* js, int mode);
+const char* _glfwGetMappingNameLinux(void);
+void _glfwUpdateGamepadGUIDLinux(char* guid);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/mappings.h b/chromium/third_party/dawn/third_party/glfw/src/mappings.h
new file mode 100644
index 00000000000..553fe2a28dc
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/mappings.h
@@ -0,0 +1,1001 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// As mappings.h.in, this file is used by CMake to produce the mappings.h
+// header file. If you are adding a GLFW specific gamepad mapping, this is
+// where to put it.
+//========================================================================
+// As mappings.h, this provides all pre-defined gamepad mappings, including
+// all available in SDL_GameControllerDB. Do not edit this file. Any gamepad
+// mappings not specific to GLFW should be submitted to SDL_GameControllerDB.
+// This file can be re-generated from mappings.h.in and the upstream
+// gamecontrollerdb.txt with the 'update_mappings' CMake target.
+//========================================================================
+
+// All gamepad mappings not labeled GLFW are copied from the
+// SDL_GameControllerDB project under the following license:
+//
+// Simple DirectMedia Layer
+// Copyright (C) 1997-2013 Sam Lantinga <slouken@libsdl.org>
+//
+// This software is provided 'as-is', without any express or implied warranty.
+// In no event will the authors be held liable for any damages arising from the
+// use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source distribution.
+
+const char* _glfwDefaultMappings[] =
+{
+#if defined(GLFW_BUILD_WIN32_MAPPINGS)
+"03000000fa2d00000100000000000000,3DRUDDER,leftx:a0,lefty:a1,rightx:a5,righty:a2,platform:Windows,",
+"03000000c82d00002038000000000000,8bitdo,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000951000000000000,8BitDo Dogbone Modkit,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,start:b11,platform:Windows,",
+"03000000c82d000011ab000000000000,8BitDo F30,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00001038000000000000,8BitDo F30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000090000000000000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000650000000000000,8BitDo M30,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:a4,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00005106000000000000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000151000000000000,8BitDo M30 ModKit,a:b0,b:b1,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,rightshoulder:b6,righttrigger:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00000310000000000000,8BitDo N30,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00002028000000000000,8BitDo N30,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00008010000000000000,8BitDo N30,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00000451000000000000,8BitDo N30 Modkit,a:b1,b:b0,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,start:b11,platform:Windows,",
+"03000000c82d00000190000000000000,8BitDo N30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00001590000000000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00006528000000000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000022000000090000000000000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000203800000900000000000000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000360000000000000,8BitDo Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00002867000000000000,8BitDo S30 Modkit,a:b0,b:b1,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b8,lefttrigger:b9,rightshoulder:b6,righttrigger:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00000130000000000000,8BitDo SF30,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000060000000000000,8Bitdo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000061000000000000,8Bitdo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d000021ab000000000000,8BitDo SFC30,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000102800000900000000000000,8Bitdo SFC30 GamePad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00003028000000000000,8Bitdo SFC30 GamePad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000030000000000000,8BitDo SN30,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00001290000000000000,8BitDo SN30,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d000020ab000000000000,8BitDo SN30,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00004028000000000000,8BitDo SN30,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00006228000000000000,8BitDo SN30,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000351000000000000,8BitDo SN30 Modkit,a:b1,b:b0,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000160000000000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000161000000000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000121000000000000,8BitDo SN30 Pro for Android,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00000260000000000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000261000000000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00000031000000000000,8BitDo Wireless Adapter,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c82d00001890000000000000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000c82d00003032000000000000,8BitDo Zero 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Windows,",
+"03000000a00500003232000000000000,8Bitdo Zero GamePad,a:b0,b:b1,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000a30c00002700000000000000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a3,lefty:a4,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000a30c00002800000000000000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a3,lefty:a4,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"030000008f0e00001200000000000000,Acme GA-02,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000c01100000355000011010000,ACRUX USB GAME PAD,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000fa190000f0ff000000000000,Acteck AGJ-3200,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"030000006f0e00001413000000000000,Afterglow,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000341a00003608000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00000263000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001101000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001401000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001402000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001901000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001a01000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d62000001d57000000000000,Airflo PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000491900001904000000000000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000710100001904000000000000,Amazon Luna Controller,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b8,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b4,rightstick:b7,rightx:a3,righty:a4,start:b6,x:b3,y:b2,platform:Windows,",
+"03000000ef0500000300000000000000,AxisPad,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,platform:Windows,",
+"03000000d6200000e557000000000000,Batarang,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000c01100001352000000000000,Battalife Joystick,a:b6,b:b7,back:b2,leftshoulder:b0,leftx:a0,lefty:a1,rightshoulder:b1,start:b3,x:b4,y:b5,platform:Windows,",
+"030000006f0e00003201000000000000,Battlefield 4 PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d62000002a79000000000000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000bc2000006012000000000000,Betop 2126F,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bc2000000055000000000000,Betop BFM Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000bc2000006312000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bc2000006321000000000000,BETOP CONTROLLER,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bc2000006412000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000c01100000555000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000c01100000655000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000790000000700000000000000,Betop Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000808300000300000000000000,Betop Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"030000006b1400000055000000000000,Bigben PS3 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000006b1400000103000000000000,Bigben PS3 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Windows,",
+"03000000120c0000210e000000000000,Brook Mars,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"0300000066f700000500000000000000,BrutalLegendTest,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d81d00000b00000000000000,BUFFALO BSGP1601 Series ,a:b5,b:b3,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b9,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b13,x:b4,y:b2,platform:Windows,",
+"03000000e82000006058000000000000,Cideko AK08b,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000457500000401000000000000,Cobra,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000005e0400008e02000000000000,Controller (XBOX 360 For Windows),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"030000005e040000a102000000000000,Controller (Xbox 360 Wireless Receiver for Windows),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"030000005e040000ff02000000000000,Controller (Xbox One For Windows) - Wired,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"030000005e040000ea02000000000000,Controller (Xbox One For Windows) - Wireless,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000260900008888000000000000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a4,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000a306000022f6000000000000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000451300000830000000000000,Defender Game Racer X7,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000007d0400000840000000000000,Destroyer Tiltpad,+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b1,b:b2,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,x:b0,y:b3,platform:Windows,",
+"03000000791d00000103000000000000,Dual Box WII,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bd12000002e0000000000000,Dual USB Vibration Joystick,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a3,righty:a2,start:b11,x:b3,y:b0,platform:Windows,",
+"030000008f0e00000910000000000000,DualShock 2,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a3,righty:a2,start:b11,x:b3,y:b0,platform:Windows,",
+"030000006f0e00003001000000000000,EA SPORTS PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000b80500000410000000000000,Elecom Gamepad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,platform:Windows,",
+"03000000b80500000610000000000000,Elecom Gamepad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,platform:Windows,",
+"03000000120c0000f61c000000000000,Elite,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000008f0e00000f31000000000000,EXEQ,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Windows,",
+"03000000341a00000108000000000000,EXEQ RF USB Gamepad 8206,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000006f0e00008401000000000000,Faceoff Deluxe+ Audio Wired Controller for Nintendo Switch,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00008001000000000000,Faceoff Wired Pro Controller for Nintendo Switch,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000852100000201000000000000,FF-GP1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008500000000000000,Fighting Commander 2016 PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008400000000000000,Fighting Commander 5,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008700000000000000,Fighting Stick mini 4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008800000000000000,Fighting Stick mini 4,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b8,x:b0,y:b3,platform:Windows,",
+"030000000d0f00002700000000000000,FIGHTING STICK V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"78696e70757403000000000000000000,Fightstick TES,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,lefttrigger:a2,rightshoulder:b5,righttrigger:a5,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000790000002201000000000000,Game Controller for PC,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"0300000066f700000100000000000000,Game VIB Joystick,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,platform:Windows,",
+"03000000260900002625000000000000,Gamecube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,lefttrigger:a4,leftx:a0,lefty:a1,righttrigger:a5,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000790000004618000000000000,GameCube Controller Adapter,a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"030000008f0e00000d31000000000000,GAMEPAD 3 TURBO,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000280400000140000000000000,GamePad Pro USB,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000ac0500003d03000000000000,GameSir,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000ac0500004d04000000000000,GameSir,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000ffff00000000000000000000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000c01100000140000000000000,GameStop PS4 Fun Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000009b2800003200000000000000,GC/N64 to USB v3.4,a:b0,b:b7,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,lefttrigger:+a5,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:+a2,rightx:a3,righty:a4,start:b3,x:b1,y:b8,platform:Windows,",
+"030000009b2800006000000000000000,GC/N64 to USB v3.6,a:b0,b:b7,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,lefttrigger:+a5,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:+a2,rightx:a3,righty:a4,start:b3,x:b1,y:b8,platform:Windows,",
+"030000008305000009a0000000000000,Genius,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000008305000031b0000000000000,Genius Maxfire Blaze 3,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000451300000010000000000000,Genius Maxfire Grandias 12,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000005c1a00003330000000000000,Genius MaxFire Grandias 12V,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b4,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000300f00000b01000000000000,GGE909 Recoil Pad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000f0250000c283000000000000,Gioteck,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000f025000021c1000000000000,Gioteck PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000f0250000c383000000000000,Gioteck VX2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000f0250000c483000000000000,Gioteck VX2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"030000007d0400000540000000000000,Gravis Eliminator GamePad Pro,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000341a00000302000000000000,Hama Scorpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00004900000000000000,Hatsune Miku Sho Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000001008000001e1000000000000,Havit HV-G60,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000d81400000862000000000000,HitBox Edition Cthulhu+,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b4,rightshoulder:b7,righttrigger:b6,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000632500002605000000000000,HJD-X,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"030000000d0f00002d00000000000000,Hori Fighting Commander 3 Pro,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005f00000000000000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005e00000000000000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00004000000000000000,Hori Fighting Stick Mini 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b4,rightshoulder:b7,righttrigger:b6,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005400000000000000,Hori Pad 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00000900000000000000,Hori Pad 3 Turbo,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00004d00000000000000,Hori Pad A,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00009200000000000000,Hori Pokken Tournament DX Pro Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00001600000000007803,HORI Real Arcade Pro EX-SE (Xbox 360),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,lefttrigger:a2,rightshoulder:b5,righttrigger:a5,start:b7,x:b2,y:b3,platform:Windows,",
+"030000000d0f00009c00000000000000,Hori TAC Pro,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f0000c100000000000000,Horipad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00006e00000000000000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00006600000000000000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005500000000000000,Horipad 4 FPS,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f0000ee00000000000000,HORIPAD mini4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000250900000017000000000000,HRAP2 on PS/SS/N64 Joypad to USB BOX,a:b2,b:b1,back:b9,leftshoulder:b5,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b6,start:b8,x:b3,y:b0,platform:Windows,",
+"030000008f0e00001330000000000000,HuiJia SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000d81d00000f00000000000000,iBUFFALO BSGP1204 Series,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000d81d00001000000000000000,iBUFFALO BSGP1204P Series,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000830500006020000000000000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,platform:Windows,",
+"03000000b50700001403000000000000,Impact Black,a:b2,b:b3,back:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Windows,",
+"030000006f0e00002401000000000000,INJUSTICE FightStick PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000ac0500002c02000000000000,IPEGA,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,leftstick:b13,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b9,rightstick:b14,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000491900000204000000000000,Ipega PG-9023,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000491900000304000000000000,Ipega PG-9087 - Bluetooth Gamepad,+righty:+a5,-righty:-a4,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,start:b11,x:b3,y:b4,platform:Windows,",
+"030000006e0500000a20000000000000,JC-DUX60 ELECOM MMO Gamepad,a:b2,b:b3,back:b17,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,leftstick:b14,lefttrigger:b12,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b15,righttrigger:b13,rightx:a3,righty:a4,start:b20,x:b0,y:b1,platform:Windows,",
+"030000006e0500000520000000000000,JC-P301U,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,platform:Windows,",
+"030000006e0500000320000000000000,JC-U3613M (DInput),a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,platform:Windows,",
+"030000006e0500000720000000000000,JC-W01U,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,platform:Windows,",
+"030000007e0500000620000000000000,Joy-Con (L),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b13,leftshoulder:b4,leftstick:b10,rightshoulder:b5,start:b8,x:b2,y:b3,platform:Windows,",
+"030000007e0500000620000001000000,Joy-Con (L),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b13,leftshoulder:b4,leftstick:b10,rightshoulder:b5,start:b8,x:b2,y:b3,platform:Windows,",
+"030000007e0500000720000000000000,Joy-Con (R),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b12,leftshoulder:b4,leftstick:b11,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Windows,",
+"030000007e0500000720000001000000,Joy-Con (R),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b12,leftshoulder:b4,leftstick:b11,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000bd12000003c0000010010000,Joypad Alpha Shock,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000bd12000003c0000000000000,JY-P70UR,a:b1,b:b0,back:b5,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b8,rightstick:b11,righttrigger:b9,rightx:a3,righty:a2,start:b4,x:b3,y:b2,platform:Windows,",
+"03000000242f00002d00000000000000,JYS Wireless Adapter,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000242f00008a00000000000000,JYS Wireless Adapter,a:b1,b:b4,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b0,y:b3,platform:Windows,",
+"03000000790000000200000000000000,King PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"030000006d040000d1ca000000000000,Logitech ChillStream,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d040000d2ca000000000000,Logitech Cordless Precision,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d04000011c2000000000000,Logitech Cordless Wingman,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b9,leftstick:b5,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b2,righttrigger:b7,rightx:a3,righty:a4,x:b4,platform:Windows,",
+"030000006d04000016c2000000000000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d04000018c2000000000000,Logitech F510 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d04000019c2000000000000,Logitech F710 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d0400001ac2000000000000,Logitech Precision Gamepad,a:b1,b:b2,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006d0400000ac2000000000000,Logitech WingMan RumblePad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b2,rightx:a3,righty:a4,x:b3,y:b4,platform:Windows,",
+"03000000380700006652000000000000,Mad Catz C.T.R.L.R,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700005032000000000000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700005082000000000000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008433000000000000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008483000000000000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008134000000000000,Mad Catz FightStick TE2+ PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b7,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b4,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008184000000000000,Mad Catz FightStick TE2+ PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,leftstick:b10,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b4,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700006252000000000000,Mad Catz Micro C.T.R.L.R,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008034000000000000,Mad Catz TE2 PS3 Fightstick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008084000000000000,Mad Catz TE2 PS4 Fightstick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700008532000000000000,Madcatz Arcade Fightstick TE S PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700003888000000000000,Madcatz Arcade Fightstick TE S+ PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000380700001888000000000000,MadCatz SFIV FightStick PS3,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b4,righttrigger:b6,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000380700008081000000000000,MADCATZ SFV Arcade FightStick Alpha PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000002a0600001024000000000000,Matricom,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b2,y:b3,platform:Windows,",
+"030000009f000000adbb000000000000,MaxJoypad Virtual Controller,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Windows,",
+"03000000250900000128000000000000,Mayflash Arcade Stick,a:b1,b:b2,back:b8,leftshoulder:b0,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b3,righttrigger:b7,start:b9,x:b5,y:b6,platform:Windows,",
+"03000000790000004418000000000000,Mayflash GameCube Controller,a:b1,b:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000790000004318000000000000,Mayflash GameCube Controller Adapter,a:b1,b:b2,back:b0,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b0,leftshoulder:b4,leftstick:b0,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b0,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000242f00007300000000000000,Mayflash Magic NS,a:b1,b:b4,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b0,y:b3,platform:Windows,",
+"0300000079000000d218000000000000,Mayflash Magic NS,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000d620000010a7000000000000,Mayflash Magic NS,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000008f0e00001030000000000000,Mayflash USB Adapter for original Sega Saturn controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b5,rightshoulder:b2,righttrigger:b7,start:b9,x:b3,y:b4,platform:Windows,",
+"0300000025090000e803000000000000,Mayflash Wii Classic Controller,a:b1,b:b0,back:b8,dpdown:b13,dpleft:b12,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Windows,",
+"03000000790000000018000000000000,Mayflash WiiU Pro Game Controller Adapter (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000790000002418000000000000,Mega Drive,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,rightshoulder:b2,start:b9,x:b3,y:b4,platform:Windows,",
+"03000000380700006382000000000000,MLG GamePad PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000c62400002a89000000000000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c62400002b89000000000000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c62400001a89000000000000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000c62400001b89000000000000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000efbe0000edfe000000000000,Monect Virtual Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000250900006688000000000000,MP-8866 Super Dual Box,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Windows,",
+"030000006b140000010c000000000000,NACON GC-400ES,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000921200004b46000000000000,NES 2-port Adapter,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,start:b11,platform:Windows,",
+"03000000790000004518000000000000,NEXILUX GAMECUBE Controller Adapter,platform:Windows,a:b1,b:b0,x:b2,y:b3,start:b9,rightshoulder:b7,dpup:h0.1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,leftx:a0,lefty:a1,rightx:a5,righty:a2,lefttrigger:a3,righttrigger:a4,",
+"030000001008000001e5000000000000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,righttrigger:b6,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000152000000182000000000000,NGDS,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bd12000015d0000000000000,Nintendo Retrolink USB Super SNES Classic Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"030000007e0500000920000000000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000000d0500000308000000000000,Nostromo N45,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b12,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b10,x:b2,y:b3,platform:Windows,",
+"03000000550900001472000000000000,NVIDIA Controller v01.04,a:b11,b:b10,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b7,leftstick:b5,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b4,righttrigger:a5,rightx:a3,righty:a6,start:b3,x:b9,y:b8,platform:Windows,",
+"030000004b120000014d000000000000,NYKO AIRFLO,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:a3,leftstick:a0,lefttrigger:b6,rightshoulder:b5,rightstick:a2,righttrigger:b7,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000d620000013a7000000000000,NSW wired controller,platform:Windows,a:b1,b:b2,x:b0,y:b3,back:b8,guide:b12,start:b9,leftstick:b10,rightstick:b11,leftshoulder:b4,rightshoulder:b5,dpup:h0.1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:b6,righttrigger:b7,",
+"03000000782300000a10000000000000,Onlive Wireless Controller,a:b15,b:b14,back:b7,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b5,leftshoulder:b11,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b8,righttrigger:a5,rightx:a3,righty:a4,start:b6,x:b13,y:b12,platform:Windows,",
+"03000000d62000006d57000000000000,OPP PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006b14000001a1000000000000,Orange Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000362800000100000000000000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:b13,rightx:a3,righty:a4,x:b1,y:b2,platform:Windows,",
+"03000000120c0000f60e000000000000,P4 Wired Gamepad,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b5,lefttrigger:b7,rightshoulder:b4,righttrigger:b6,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00000901000000000000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000008f0e00000300000000000000,Piranha xtreme,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"030000004c050000da0c000000000000,PlayStation Classic Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,lefttrigger:b4,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"030000004c0500003713000000000000,PlayStation Vita,a:b1,b:b2,back:b8,dpdown:b13,dpleft:b15,dpright:b14,dpup:b12,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d62000006dca000000000000,PowerA Pro Ex,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d62000009557000000000000,Pro Elite PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d62000009f31000000000000,Pro Ex mini PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000d6200000c757000000000000,Pro Ex mini PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000632500002306000000000000,PS Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000e30500009605000000000000,PS to USB convert cable,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Windows,",
+"03000000100800000100000000000000,PS1 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"030000008f0e00007530000000000000,PS1 Controller,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b1,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000100800000300000000000000,PS2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a4,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000250900008888000000000000,PS2 Controller,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Windows,",
+"03000000666600006706000000000000,PS2 Controller,a:b2,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a2,righty:a3,start:b11,x:b3,y:b0,platform:Windows,",
+"030000006b1400000303000000000000,PS2 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000009d0d00001330000000000000,PS2 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000250900000500000000000000,PS3 Controller,a:b2,b:b1,back:b9,dpdown:h0.8,dpleft:h0.4,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b0,y:b3,platform:Windows,",
+"030000004c0500006802000000000000,PS3 Controller,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b10,lefttrigger:a3~,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:a4~,rightx:a2,righty:a5,start:b8,x:b3,y:b0,platform:Windows,",
+"03000000632500007505000000000000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000888800000803000000000000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.8,dpleft:h0.4,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b9,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b0,y:b3,platform:Windows,",
+"030000008f0e00001431000000000000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000003807000056a8000000000000,PS3 RF pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000100000008200000000000000,PS360+ v1.66,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:h0.4,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004c050000a00b000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004c050000c405000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004c050000cc09000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004c050000e60c000000000000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000ff000000cb01000000000000,PSP,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000300f00000011000000000000,QanBa Arcade JoyStick 1008,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b10,x:b0,y:b3,platform:Windows,",
+"03000000300f00001611000000000000,QanBa Arcade JoyStick 4018,a:b1,b:b2,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b8,x:b0,y:b3,platform:Windows,",
+"03000000222c00000020000000000000,QANBA DRONE ARCADE JOYSTICK,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,rightshoulder:b5,righttrigger:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000300f00001210000000000000,QanBa Joystick Plus,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000341a00000104000000000000,QanBa Joystick Q4RAF,a:b5,b:b6,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b0,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b3,righttrigger:b7,start:b9,x:b1,y:b2,platform:Windows,",
+"03000000222c00000223000000000000,Qanba Obsidian Arcade Joystick PS3 Mode,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000222c00000023000000000000,Qanba Obsidian Arcade Joystick PS4 Mode,a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000321500000003000000000000,Razer Hydra,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000321500000204000000000000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000321500000104000000000000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000321500000507000000000000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000321500000707000000000000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000321500000011000000000000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000321500000009000000000000,Razer Serval,+lefty:+a2,-lefty:-a1,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,leftx:a0,rightshoulder:b5,rightstick:b9,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"030000000d0f00001100000000000000,REAL ARCADE PRO.3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00006a00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00006b00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008a00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00008b00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00007000000000000000,REAL ARCADE PRO.4 VLX,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00002200000000000000,REAL ARCADE Pro.V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005b00000000000000,Real Arcade Pro.V4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000000d0f00005c00000000000000,Real Arcade Pro.V4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000790000001100000000000000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000bd12000013d0000000000000,Retrolink USB SEGA Saturn Classic,a:b0,b:b1,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b5,lefttrigger:b6,rightshoulder:b2,righttrigger:b7,start:b8,x:b3,y:b4,platform:Windows,",
+"0300000000f000000300000000000000,RetroUSB.com RetroPad,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,platform:Windows,",
+"0300000000f00000f100000000000000,RetroUSB.com Super RetroPort,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,platform:Windows,",
+"030000006b140000010d000000000000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006b140000020d000000000000,Revolution Pro Controller 2(1/2),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006b140000130d000000000000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00001e01000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00002801000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00002f01000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004f04000003d0000000000000,run'n'drive,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b7,leftshoulder:a3,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:a4,rightstick:b11,righttrigger:b5,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000a30600001af5000000000000,Saitek Cyborg,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000a306000023f6000000000000,Saitek Cyborg V.1 Game pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000300f00001201000000000000,Saitek Dual Analog Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Windows,",
+"03000000a30600000701000000000000,Saitek P220,a:b2,b:b3,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,lefttrigger:b7,rightshoulder:b4,righttrigger:b5,x:b0,y:b1,platform:Windows,",
+"03000000a30600000cff000000000000,Saitek P2500 Force Rumble Pad,a:b2,b:b3,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b0,y:b1,platform:Windows,",
+"03000000a30600000c04000000000000,Saitek P2900,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000300f00001001000000000000,Saitek P480 Rumble Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Windows,",
+"03000000a30600000b04000000000000,Saitek P990,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000a30600000b04000000010000,Saitek P990 Dual Analog Pad,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b8,x:b0,y:b3,platform:Windows,",
+"03000000a30600002106000000000000,Saitek PS1000,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000a306000020f6000000000000,Saitek PS2700,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000300f00001101000000000000,Saitek Rumble Pad,a:b2,b:b3,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Windows,",
+"03000000730700000401000000000000,Sanwa PlayOnline Mobile,a:b0,b:b1,back:b2,leftx:a0,lefty:a1,start:b3,platform:Windows,",
+"0300000000050000289b000000000000,Saturn_Adapter_2.0,a:b1,b:b2,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000009b2800000500000000000000,Saturn_Adapter_2.0,a:b1,b:b2,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000a30c00002500000000000000,Sega Genesis Mini 3B controller,a:b2,b:b1,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,righttrigger:b5,start:b9,platform:Windows,",
+"03000000a30c00002400000000000000,Sega Mega Drive Mini 6B controller,a:b2,b:b1,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000341a00000208000000000000,SL-6555-SBK,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:-a4,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a4,rightx:a3,righty:a2,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000341a00000908000000000000,SL-6566,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000008f0e00000800000000000000,SpeedLink Strike FX,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000c01100000591000000000000,Speedlink Torid,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000d11800000094000000000000,Stadia Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:b12,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:b11,rightx:a3,righty:a4,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000110100001914000000000000,SteelSeries,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftstick:b13,lefttrigger:b6,leftx:a0,lefty:a1,rightstick:b14,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000381000001214000000000000,SteelSeries Free,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000110100003114000000000000,SteelSeries Stratus Duo,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000381000001814000000000000,SteelSeries Stratus XL,a:b0,b:b1,back:b18,dpdown:b13,dpleft:b14,dpright:b15,dpup:b12,guide:b19,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000790000001c18000000000000,STK-7024X,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000ff1100003133000000000000,SVEN X-PAD,a:b2,b:b3,back:b4,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,rightx:a2,righty:a4,start:b5,x:b0,y:b1,platform:Windows,",
+"03000000d620000011a7000000000000,Switch,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000457500002211000000000000,SZMY-POWER PC Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004f04000007d0000000000000,T Mini Wireless,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004f0400000ab1000000000000,T.16000M,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b4,lefttrigger:b9,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,start:b10,x:b2,y:b3,platform:Windows,",
+"03000000fa1900000706000000000000,Team 5,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000b50700001203000000000000,Techmobility X6-38V,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Windows,",
+"030000004f04000015b3000000000000,Thrustmaster Dual Analog 4,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Windows,",
+"030000004f04000023b3000000000000,Thrustmaster Dual Trigger 3-in-1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004f0400000ed0000000000000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"030000004f04000000b3000000000000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,platform:Windows,",
+"030000004f04000004b3000000000000,Thrustmaster Firestorm Dual Power 3,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Windows,",
+"03000000666600000488000000000000,TigerGame PS/PS2 Game Controller Adapter,a:b2,b:b1,back:b9,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Windows,",
+"03000000d62000006000000000000000,Tournament PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"030000005f140000c501000000000000,Trust Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000b80500000210000000000000,Trust Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"030000004f04000087b6000000000000,TWCS Throttle,dpdown:b8,dpleft:b9,dpright:b7,dpup:b6,leftstick:b5,lefttrigger:-a5,leftx:a0,lefty:a1,righttrigger:+a5,platform:Windows,",
+"03000000d90400000200000000000000,TwinShock PS2,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"030000006e0500001320000000000000,U4113,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000101c0000171c000000000000,uRage Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000300f00000701000000000000,USB 4-Axis 12-Button Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000341a00002308000000000000,USB gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"030000005509000000b4000000000000,USB gamepad,a:b10,b:b11,back:b5,dpdown:b1,dpleft:b2,dpright:b3,dpup:b0,guide:b14,leftshoulder:b8,leftstick:b6,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b9,rightstick:b7,righttrigger:a5,rightx:a2,righty:a3,start:b4,x:b12,y:b13,platform:Windows,",
+"030000006b1400000203000000000000,USB gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000790000000a00000000000000,USB gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000f0250000c183000000000000,USB gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000ff1100004133000000000000,USB gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a4,righty:a2,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000632500002305000000000000,USB Vibration Joystick (BM),a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000790000001a18000000000000,Venom,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Windows,",
+"03000000790000001b18000000000000,Venom Arcade Joystick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00000302000000000000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"030000006f0e00000702000000000000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Windows,",
+"0300000034120000adbe000000000000,vJoy Device,a:b0,b:b1,back:b15,dpdown:b6,dpleft:b7,dpright:b8,dpup:b5,guide:b16,leftshoulder:b9,leftstick:b13,lefttrigger:b11,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b14,righttrigger:b12,rightx:a3,righty:a4,start:b4,x:b2,y:b3,platform:Windows,",
+"030000005e0400000a0b000000000000,Xbox Adaptive Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"030000005e040000130b000000000000,Xbox Series Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000341a00000608000000000000,Xeox,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000450c00002043000000000000,XEOX Gamepad SL-6556-BK,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Windows,",
+"03000000ac0500005b05000000000000,Xiaoji Gamesir-G3w,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Windows,",
+"03000000172700004431000000000000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b20,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a7,rightx:a2,righty:a5,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000786901006e70000000000000,XInput Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Windows,",
+"03000000790000004f18000000000000,ZD-T Android,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,platform:Windows,",
+"03000000120c0000101e000000000000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Windows,",
+"78696e70757401000000000000000000,XInput Gamepad (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757402000000000000000000,XInput Wheel (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757403000000000000000000,XInput Arcade Stick (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757404000000000000000000,XInput Flight Stick (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757405000000000000000000,XInput Dance Pad (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757406000000000000000000,XInput Guitar (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757408000000000000000000,XInput Drum Kit (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+#endif // GLFW_BUILD_WIN32_MAPPINGS
+
+#if defined(GLFW_BUILD_COCOA_MAPPINGS)
+"030000008f0e00000300000009010000,2In1 USB Joystick,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000c82d00000090000001000000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00001038000000010000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000650000001000000,8BitDo M30,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,lefttrigger:b9,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000c82d00005106000000010000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00001590000001000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"030000003512000012ab000001000000,8BitDo NES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000022000000090000001000000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000203800000900000000010000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000190000001000000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000102800000900000000000000,8Bitdo SFC30 GamePad Joystick,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00001290000001000000,8BitDo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00004028000000010000,8Bitdo SN30 GamePad,a:b1,b:b0,x:b4,y:b3,back:b10,start:b11,leftshoulder:b6,rightshoulder:b7,dpup:-a1,dpdown:+a1,dpleft:-a0,dpright:+a0,platform:Mac OS X,",
+"03000000c82d00000160000001000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000161000000010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a5,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000260000001000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00000031000001000000,8BitDo Wireless Adapter,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000c82d00001890000001000000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000c82d00003032000000010000,8BitDo Zero 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,rightx:a2,righty:a31,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"03000000a00500003232000008010000,8Bitdo Zero GamePad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000a00500003232000009010000,8Bitdo Zero GamePad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000a30c00002700000003030000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a3,lefty:a4,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000a30c00002800000003030000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a3,lefty:a4,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000050b00000045000031000000,ASUS Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"03000000ef0500000300000000020000,AxisPad,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,platform:Mac OS X,",
+"03000000491900001904000001010000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"03000000710100001904000000010000,Amazon Luna Controller,a:b0,b:b1,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Mac OS X,",
+"03000000c62400001a89000000010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b14,leftshoulder:b6,leftstick:b15,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b16,righttrigger:a4,rightx:a2,righty:a3,start:b13,x:b3,y:b4,platform:Mac OS X,",
+"03000000c62400001b89000000010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000d62000002a79000000010000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000120c0000200e000000010000,Brook Mars,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000120c0000210e000000010000,Brook Mars,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000008305000031b0000000000000,Cideko AK08b,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000260900008888000088020000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a5,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"03000000a306000022f6000001030000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000790000004618000000010000,GameCube Controller Adapter,a:b4,b:b0,dpdown:b56,dpleft:b60,dpright:b52,dpup:b48,lefttrigger:a12,leftx:a0,lefty:a4,rightshoulder:b28,righttrigger:a16,rightx:a20,righty:a8,start:b36,x:b8,y:b12,platform:Mac OS X,",
+"03000000ad1b000001f9000000000000,Gamestop BB-070 X360 Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"0500000047532047616d657061640000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"03000000c01100000140000000010000,GameStop PS4 Fun Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006f0e00000102000000000000,GameStop Xbox 360 Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000007d0400000540000001010000,Gravis Eliminator GamePad Pro,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000280400000140000000020000,Gravis Gamepad Pro,a:b1,b:b2,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000008f0e00000300000007010000,GreenAsia Inc. USB Joystick,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Mac OS X,",
+"030000000d0f00002d00000000100000,Hori Fighting Commander 3 Pro,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00005f00000000010000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00005e00000000010000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00005f00000000000000,HORI Fighting Commander 4 PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00005e00000000000000,HORI Fighting Commander 4 PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00004d00000000000000,HORI Gem Pad 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00009200000000010000,Hori Pokken Tournament DX Pro Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00006e00000000010000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00006600000000010000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f00006600000000000000,HORIPAD FPS PLUS 4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000000d0f0000ee00000000010000,HORIPAD mini4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000008f0e00001330000011010000,HuiJia SNES Controller,a:b4,b:b2,back:b16,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b12,rightshoulder:b14,start:b18,x:b6,y:b0,platform:Mac OS X,",
+"03000000830500006020000000010000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,platform:Mac OS X,",
+"03000000830500006020000000000000,iBuffalo USB 2-axis 8-button Gamepad,a:b1,b:b0,back:b6,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b3,y:b2,platform:Mac OS X,",
+"030000007e0500000620000001000000,Joy-Con (L),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b13,leftshoulder:b4,leftstick:b10,rightshoulder:b5,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000007e0500000720000001000000,Joy-Con (R),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b12,leftshoulder:b4,leftstick:b11,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"03000000242f00002d00000007010000,JYS Wireless Adapter,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"030000006d04000016c2000000020000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000016c2000000030000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000016c2000014040000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000016c2000000000000,Logitech F310 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000018c2000000000000,Logitech F510 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000019c2000005030000,Logitech F710,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d0400001fc2000000000000,Logitech F710 Gamepad (XInput),a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000006d04000018c2000000010000,Logitech RumblePad 2 USB,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3~,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006d04000019c2000000000000,Logitech Wireless Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000380700005032000000010000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000380700005082000000010000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000380700008433000000010000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000380700008483000000010000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000790000000600000007010000,Marvo GT-004,a:b2,b:b1,x:b3,y:b0,back:b8,start:b9,leftstick:b10,rightstick:b11,leftshoulder:b4,rightshoulder:b5,dpup:h0.1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:b6,righttrigger:b7,platform:Mac OS X,",
+"03000000790000004418000000010000,Mayflash GameCube Controller,a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000242f00007300000000020000,Mayflash Magic NS,a:b1,b:b4,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b0,y:b3,platform:Mac OS X,",
+"0300000079000000d218000026010000,Mayflash Magic NS,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000d620000010a7000003010000,Mayflash Magic NS,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"0300000025090000e803000000000000,Mayflash Wii Classic Controller,a:b1,b:b0,back:b8,dpdown:b13,dpleft:b12,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Mac OS X,",
+"03000000790000000018000000010000,Mayflash Wii U Pro Controller Adapter,a:b4,b:b8,back:b32,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b16,leftstick:b40,lefttrigger:b24,leftx:a0,lefty:a4,rightshoulder:b20,rightstick:b44,righttrigger:b28,rightx:a8,righty:a12,start:b36,x:b0,y:b12,platform:Mac OS X,",
+"03000000790000000018000000000000,Mayflash WiiU Pro Game Controller Adapter (DInput),a:b4,b:b8,back:b32,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b16,leftstick:b40,lefttrigger:b24,leftx:a0,lefty:a4,rightshoulder:b20,rightstick:b44,righttrigger:b28,rightx:a8,righty:a12,start:b36,x:b0,y:b12,platform:Mac OS X,",
+"03000000d8140000cecf000000000000,MC Cthulhu,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000005e0400002700000001010000,Microsoft SideWinder Plug & Play Game Pad,a:b0,b:b1,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,lefttrigger:b4,leftx:a0,lefty:a1,righttrigger:b5,x:b2,y:b3,platform:Mac OS X,",
+"03000000d62000007162000001000000,Moga Pro 2 HID,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Mac OS X,",
+"03000000c62400002a89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000c62400002b89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000632500007505000000020000,NEOGEO mini PAD Controller,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"03000000921200004b46000003020000,NES 2-port Adapter,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,start:b11,platform:Mac OS X,",
+"030000001008000001e5000006010000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,righttrigger:b6,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000d620000011a7000000020000,Nintendo Switch Core (Plus) Wired Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000d620000011a7000010050000,Nintendo Switch PowerA Wired Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000007e0500000920000000000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"030000007e0500000920000001000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Mac OS X,",
+"03000000550900001472000025050000,NVIDIA Controller v01.04,a:b0,b:b1,back:b17,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b4,leftstick:b7,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a5,start:b6,x:b2,y:b3,platform:Mac OS X,",
+"030000006f0e00000901000002010000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000008f0e00000300000000000000,Piranha xtreme,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"030000004c050000da0c000000010000,Playstation Classic Controller,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"030000004c0500003713000000010000,PlayStation Vita,a:b1,b:b2,back:b8,dpdown:b13,dpleft:b15,dpright:b14,dpup:b12,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000d62000006dca000000010000,PowerA Pro Ex,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000100800000300000006010000,PS2 Adapter,a:b2,b:b1,back:b8,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a4,righty:a3,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"030000004c0500006802000000000000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Mac OS X,",
+"030000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Mac OS X,",
+"030000004c050000a00b000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004c050000c405000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004c050000c405000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"050000004c050000e60c000000010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000008916000000fd000000000000,Razer Onza TE,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"03000000321500000204000000010000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000321500000104000000010000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000321500000010000000010000,Razer RAIJU,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000321500000507000001010000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000321500000011000000010000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000321500000009000000020000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"030000003215000000090000163a0000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"0300000032150000030a000000000000,Razer Wildcat,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"03000000790000001100000000000000,Retrolink Classic Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a3,lefty:a4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000790000001100000006010000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"030000006b140000010d000000010000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006b140000130d000000010000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000c6240000fefa000000000000,Rock Candy Gamepad for PS3,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"03000000730700000401000000010000,Sanwa PlayOnline Mobile,a:b0,b:b1,back:b2,leftx:a0,lefty:a1,start:b3,platform:Mac OS X,",
+"03000000811700007e05000000000000,Sega Saturn,a:b2,b:b4,dpdown:b16,dpleft:b15,dpright:b14,dpup:b17,leftshoulder:b8,lefttrigger:a5,leftx:a0,lefty:a2,rightshoulder:b9,righttrigger:a4,start:b13,x:b0,y:b6,platform:Mac OS X,",
+"03000000b40400000a01000000000000,Sega Saturn USB Gamepad,a:b0,b:b1,back:b5,guide:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b8,x:b3,y:b4,platform:Mac OS X,",
+"030000003512000021ab000000000000,SFC30 Joystick,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Mac OS X,",
+"0300000000f00000f100000000000000,SNES RetroPort,a:b2,b:b3,back:b4,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b5,rightshoulder:b7,start:b6,x:b0,y:b1,platform:Mac OS X,",
+"030000004c050000e60c000000010000,Sony DualSense,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004c050000cc09000000000000,Sony DualShock 4 V2,a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004c050000a00b000000000000,Sony DualShock 4 Wireless Adaptor,a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000d11800000094000000010000,Stadia Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"030000005e0400008e02000001000000,Steam Virtual Gamepad,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"03000000110100002014000000000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b12,x:b2,y:b3,platform:Mac OS X,",
+"03000000110100002014000001000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,x:b2,y:b3,platform:Mac OS X,",
+"03000000381000002014000001000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,x:b2,y:b3,platform:Mac OS X,",
+"050000004e696d6275732b0000000000,SteelSeries Nimbus Plus,a:b0,b:b1,back:b15,dpdown:b11,dpleft:b13,dpright:b12,dpup:b10,guide:b16,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3~,start:b14,x:b2,y:b3,platform:Mac OS X,",
+"03000000110100001714000000000000,SteelSeries Stratus XL,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b12,x:b2,y:b3,platform:Mac OS X,",
+"03000000110100001714000020010000,SteelSeries Stratus XL,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b12,x:b2,y:b3,platform:Mac OS X,",
+"03000000457500002211000000010000,SZMY-POWER PC Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004f04000015b3000000000000,Thrustmaster Dual Analog 3.2,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Mac OS X,",
+"030000004f0400000ed0000000020000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000004f04000000b3000000000000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,platform:Mac OS X,",
+"03000000bd12000015d0000000000000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000bd12000015d0000000010000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"03000000100800000100000000000000,Twin USB Joystick,a:b4,b:b2,back:b16,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b12,leftstick:b20,lefttrigger:b8,leftx:a0,lefty:a2,rightshoulder:b14,rightstick:b22,righttrigger:b10,rightx:a6,righty:a4,start:b18,x:b6,y:b0,platform:Mac OS X,",
+"030000006f0e00000302000025040000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"030000006f0e00000702000003060000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000791d00000103000009010000,Wii Classic Controller,a:b2,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b10,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Mac OS X,",
+"050000005769696d6f74652028303000,Wii Remote,a:b4,b:b5,back:b7,dpdown:b3,dpleft:b0,dpright:b1,dpup:b2,guide:b8,leftshoulder:b11,lefttrigger:b12,leftx:a0,lefty:a1,start:b6,x:b10,y:b9,platform:Mac OS X,",
+"050000005769696d6f74652028313800,Wii U Pro Controller,a:b16,b:b15,back:b7,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b8,leftshoulder:b19,leftstick:b23,lefttrigger:b21,leftx:a0,lefty:a1,rightshoulder:b20,rightstick:b24,righttrigger:b22,rightx:a2,righty:a3,start:b6,x:b18,y:b17,platform:Mac OS X,",
+"030000005e0400008e02000000000000,X360 Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000006f0e00000104000000000000,Xbox 360 Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"03000000c6240000045d000000000000,Xbox 360 Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e0400000a0b000000000000,Xbox Adaptive Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000050b000003090000,Xbox Elite Wireless Controller Series 2,a:b0,b:b1,back:b31,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b53,leftshoulder:b6,leftstick:b13,lefttrigger:a6,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000c62400003a54000000000000,Xbox One PowerA Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000d102000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000dd02000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000e302000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000130b000001050000,Xbox Series Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"030000005e040000130b000005050000,Xbox Series Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"030000005e040000e002000000000000,Xbox Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000e002000003090000,Xbox Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000ea02000000000000,Xbox Wireless Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,platform:Mac OS X,",
+"030000005e040000fd02000003090000,Xbox Wireless Controller,a:b0,b:b1,back:b16,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000172700004431000029010000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a5,start:b11,x:b3,y:b4,platform:Mac OS X,",
+"03000000120c0000100e000000010000,ZEROPLUS P4 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+"03000000120c0000101e000000010000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Mac OS X,",
+#endif // GLFW_BUILD_COCOA_MAPPINGS
+
+#if defined(GLFW_BUILD_LINUX_MAPPINGS)
+"03000000c82d00000090000011010000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00001038000000010000,8Bitdo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00005106000000010000,8BitDo M30,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,lefttrigger:b9,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000c82d00001590000011010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000310000011010000,8BitDo NES30,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b7,lefttrigger:b6,rightshoulder:b9,righttrigger:b8,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000c82d00008010000000010000,8BitDo NES30,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b7,lefttrigger:b6,rightshoulder:b9,righttrigger:b8,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000022000000090000011010000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000203800000900000000010000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00002038000000010000,8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000190000011010000,8Bitdo NES30 Pro 8Bitdo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00000060000000010000,8BitDo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00000061000000010000,8Bitdo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d000021ab000010010000,8BitDo SFC30,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Linux,",
+"030000003512000012ab000010010000,8Bitdo SFC30 GamePad,a:b2,b:b1,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b0,platform:Linux,",
+"05000000102800000900000000010000,8Bitdo SFC30 GamePad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00003028000000010000,8Bitdo SFC30 GamePad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000160000000000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000160000011010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000161000000000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00001290000011010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00000161000000010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00006228000000010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000260000011010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000202800000900000000010000,8BitDo SNES30 Gamepad,a:b1,b:b0,back:b10,dpdown:b122,dpleft:b119,dpright:b120,dpup:b117,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Linux,",
+"03000000c82d00000031000011010000,8BitDo Wireless Adapter (DInput),a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e0400008e02000020010000,8BitDo Wireless Adapter (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c82d00001890000011010000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,platform:Linux,",
+"05000000c82d00003032000000010000,8BitDo Zero 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,rightx:a2,righty:a3,start:b11,x:b4,y:b3,platform:Linux,",
+"050000005e040000e002000030110000,8BitDo Zero 2 (XInput),a:b0,b:b1,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000a00500003232000001000000,8Bitdo Zero GamePad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000a00500003232000008010000,8Bitdo Zero GamePad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000c01100000355000011010000,ACRUX USB GAME PAD,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e00001302000000010000,Afterglow,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00003901000020060000,Afterglow Controller for Xbox One,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00003901000000430000,Afterglow Prismatic Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00003901000013020000,Afterglow Prismatic Wired Controller 048-007-NA,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000100000008200000011010000,Akishop Customs PS360+ v1.66,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000007c1800000006000010010000,Alienware Dual Compatible Game Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,platform:Linux,",
+"05000000491900000204000021000000,Amazon Fire Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b17,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b12,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000491900001904000011010000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000710100001904000000010000,Amazon Luna Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b11,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Linux,",
+"03000000790000003018000011010000,Arcade Fightstick F300,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000a30c00002700000011010000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a0,lefty:a1,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000a30c00002800000011010000,Astro City Mini,a:b2,b:b1,back:b8,leftx:a0,lefty:a1,rightshoulder:b4,righttrigger:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"05000000050b00000045000031000000,ASUS Gamepad,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b10,x:b2,y:b3,platform:Linux,",
+"05000000050b00000045000040000000,ASUS Gamepad,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b10,x:b2,y:b3,platform:Linux,",
+"03000000503200000110000000000000,Atari Classic Controller,a:b0,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b4,start:b3,x:b1,platform:Linux,",
+"05000000503200000110000000000000,Atari Classic Controller,a:b0,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b4,start:b3,x:b1,platform:Linux,",
+"03000000503200000210000000000000,Atari Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b2,platform:Linux,",
+"05000000503200000210000000000000,Atari Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b2,platform:Linux,",
+"03000000120c00000500000010010000,AxisPad,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,platform:Linux,",
+"03000000ef0500000300000000010000,AxisPad,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,platform:Linux,",
+"03000000c62400001b89000011010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000d62000002a79000011010000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000c21100000791000011010000,Be1 GC101 Controller 1.03 mode,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000c31100000791000011010000,Be1 GC101 GAMEPAD 1.03 mode,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e0400008e02000003030000,Be1 GC101 Xbox 360 Controller mode,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000bc2000000055000001000000,BETOP AX1 BFM,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000666600006706000000010000,boom PSX to PC Converter,a:b2,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a2,righty:a3,start:b11,x:b3,y:b0,platform:Linux,",
+"03000000120c0000200e000011010000,Brook Mars,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000120c0000210e000011010000,Brook Mars,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000120c0000f70e000011010000,Brook Universal Fighting Board,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000ffff0000ffff000000010000,Chinese-made Xbox Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,platform:Linux,",
+"03000000e82000006058000001010000,Cideko AK08b,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000000b0400003365000000010000,Competition Pro,a:b0,b:b1,back:b2,leftx:a0,lefty:a1,start:b3,platform:Linux,",
+"03000000260900008888000000010000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a5,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000a306000022f6000011010000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000b40400000a01000000010000,CYPRESS USB Gamepad,a:b0,b:b1,back:b5,guide:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b8,x:b3,y:b4,platform:Linux,",
+"03000000790000000600000010010000,DragonRise Inc. Generic USB Joystick,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,platform:Linux,",
+"030000004f04000004b3000010010000,Dual Power 2,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Linux,",
+"030000006f0e00003001000001010000,EA Sports PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000341a000005f7000010010000,GameCube {HuiJia USB box},a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000bc2000000055000011010000,GameSir G3w,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"0500000047532047616d657061640000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000006f0e00000104000000010000,Gamestop Logic3 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000008f0e00000800000010010000,Gasia Co. Ltd PS(R) Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000006f0e00001304000000010000,Generic X-Box pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000451300000010000010010000,Genius Maxfire Grandias 12,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000f0250000c183000010010000,Goodbetterbest Ltd USB Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"0300000079000000d418000000010000,GPD Win 2 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000007d0400000540000000010000,Gravis Eliminator GamePad Pro,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000280400000140000000010000,Gravis GamePad Pro USB ,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000008f0e00000610000000010000,GreenAsia Electronics 4Axes 12Keys GamePad ,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a3,righty:a2,start:b11,x:b3,y:b0,platform:Linux,",
+"030000008f0e00001200000010010000,GreenAsia Inc. USB Joystick,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,platform:Linux,",
+"0500000047532067616d657061640000,GS gamepad,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000f0250000c383000010010000,GT VX2,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"06000000adde0000efbe000002010000,Hidromancer Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000d81400000862000011010000,HitBox (PS3/PC) Analog Mode,a:b1,b:b2,back:b8,guide:b9,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b12,x:b0,y:b3,platform:Linux,",
+"03000000c9110000f055000011010000,HJC Game GAMEPAD,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000632500002605000010010000,HJD-X,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000000d0f00000d00000000010000,hori,a:b0,b:b6,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b3,leftx:b4,lefty:b5,rightshoulder:b7,start:b9,x:b1,y:b2,platform:Linux,",
+"030000000d0f00001000000011010000,HORI CO. LTD. FIGHTING STICK 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f0000c100000011010000,HORI CO. LTD. HORIPAD S,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b13,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00006a00000011010000,HORI CO. LTD. Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00006b00000011010000,HORI CO. LTD. Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00002200000011010000,HORI CO. LTD. REAL ARCADE Pro.V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00008500000010010000,HORI Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00008600000002010000,Hori Fighting Commander,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000000d0f00005f00000011010000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00005e00000011010000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000ad1b000001f5000033050000,Hori Pad EX Turbo 2,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000000d0f00009200000011010000,Hori Pokken Tournament DX Pro Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f0000aa00000011010000,HORI Real Arcade Pro,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000000d0f0000d800000072056800,HORI Real Arcade Pro S,a:b0,b:b1,back:b4,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b5,leftshoulder:b9,leftstick:b7,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b8,righttrigger:a5,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Linux,",
+"030000000d0f00001600000000010000,Hori Real Arcade Pro.EX-SE (Xbox 360),a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b2,y:b3,platform:Linux,",
+"030000000d0f00006e00000011010000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00006600000011010000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f0000ee00000011010000,HORIPAD mini4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000000d0f00006700000001010000,HORIPAD ONE,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000008f0e00001330000010010000,HuiJia SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000242e00008816000001010000,Hyperkin X91,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000830500006020000010010000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,platform:Linux,",
+"050000006964726f69643a636f6e0000,idroid:con,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000b50700001503000010010000,impact,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Linux,",
+"03000000d80400008200000003000000,IMS PCU#0 Gamepad Interface,a:b1,b:b0,back:b4,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,start:b5,x:b3,y:b2,platform:Linux,",
+"03000000fd0500000030000000010000,InterAct GoPad I-73000 (Fighting Game Layout),a:b3,b:b4,back:b6,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:b5,start:b7,x:b0,y:b1,platform:Linux,",
+"0500000049190000020400001b010000,Ipega PG-9069 - Bluetooth Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b161,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000632500007505000011010000,Ipega PG-9099 - Bluetooth Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000006e0500000320000010010000,JC-U3613M - DirectInput Mode,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,platform:Linux,",
+"03000000300f00001001000010010000,Jess Tech Dual Analog Rumble Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Linux,",
+"03000000300f00000b01000010010000,Jess Tech GGE909 PC Recoil Pad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000ba2200002010000001010000,Jess Technology USB Game Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Linux,",
+"030000007e0500000620000001000000,Joy-Con (L),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b13,leftshoulder:b4,leftstick:b10,rightshoulder:b5,start:b8,x:b2,y:b3,platform:Linux,",
+"050000007e0500000620000001000000,Joy-Con (L),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b13,leftshoulder:b4,leftstick:b10,rightshoulder:b5,start:b8,x:b2,y:b3,platform:Linux,",
+"030000007e0500000720000001000000,Joy-Con (R),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b12,leftshoulder:b4,leftstick:b11,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Linux,",
+"050000007e0500000720000001000000,Joy-Con (R),+leftx:h0.2,+lefty:h0.4,-leftx:h0.8,-lefty:h0.1,a:b0,b:b1,back:b12,leftshoulder:b4,leftstick:b11,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000bd12000003c0000010010000,Joypad Alpha Shock,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000242f00002d00000011010000,JYS Wireless Adapter,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000242f00008a00000011010000,JYS Wireless Adapter,a:b1,b:b4,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b0,y:b3,platform:Linux,",
+"030000006f0e00000103000000020000,Logic3 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d040000d1ca000000000000,Logitech ChillStream,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d04000019c2000010010000,Logitech Cordless RumblePad 2,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d04000016c2000010010000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d04000016c2000011010000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d0400001dc2000014400000,Logitech F310 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d0400001ec2000019200000,Logitech F510 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d0400001ec2000020200000,Logitech F510 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d04000019c2000011010000,Logitech F710 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d0400001fc2000005030000,Logitech F710 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d0400000ac2000010010000,Logitech Inc. WingMan RumblePad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b2,rightx:a3,righty:a4,x:b3,y:b4,platform:Linux,",
+"030000006d04000018c2000010010000,Logitech RumblePad 2,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006d04000011c2000010010000,Logitech WingMan Cordless RumblePad,a:b0,b:b1,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b5,leftshoulder:b6,lefttrigger:b9,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b10,rightx:a3,righty:a4,start:b8,x:b3,y:b4,platform:Linux,",
+"050000004d4f435554452d3035305800,M54-PC,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000380700006652000025010000,Mad Catz C.T.R.L.R ,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700005032000011010000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700005082000011010000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000ad1b00002ef0000090040000,Mad Catz Fightpad SFxT,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:a2,rightshoulder:b5,righttrigger:a5,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000380700008034000011010000,Mad Catz fightstick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700008084000011010000,Mad Catz fightstick (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700008433000011010000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700008483000011010000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700001647000010040000,Mad Catz Wired Xbox 360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000380700003847000090040000,Mad Catz Wired Xbox 360 Controller (SFIV),a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000ad1b000016f0000090040000,Mad Catz Xbox 360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000380700001888000010010000,MadCatz PC USB Wired Stick 8818,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000380700003888000010010000,MadCatz PC USB Wired Stick 8838,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:a0,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000242f0000f700000001010000,Magic-S Pro,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000120c00000500000000010000,Manta Dualshock 2,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000790000004418000010010000,Mayflash GameCube Controller,a:b1,b:b0,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000790000004318000010010000,Mayflash GameCube Controller Adapter,a:b1,b:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000242f00007300000011010000,Mayflash Magic NS,a:b1,b:b4,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b0,y:b3,platform:Linux,",
+"0300000079000000d218000011010000,Mayflash Magic NS,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000d620000010a7000011010000,Mayflash Magic NS,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"0300000025090000e803000001010000,Mayflash Wii Classic Controller,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:a4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:a5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Linux,",
+"03000000780000000600000010010000,Microntek USB Joystick,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"030000005e0400000e00000000010000,Microsoft SideWinder,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,rightshoulder:b7,start:b8,x:b3,y:b4,platform:Linux,",
+"030000005e0400008e02000004010000,Microsoft X-Box 360 pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400008e02000062230000,Microsoft X-Box 360 pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"050000005e040000050b000003090000,Microsoft X-Box One Elite 2 pad,a:b0,b:b1,back:b17,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a6,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e040000e302000003020000,Microsoft X-Box One Elite pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000d102000001010000,Microsoft X-Box One pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000dd02000003020000,Microsoft X-Box One pad (Firmware 2015),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000d102000003020000,Microsoft X-Box One pad v2,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400008502000000010000,Microsoft X-Box pad (Japan),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,platform:Linux,",
+"030000005e0400008902000021010000,Microsoft X-Box pad v2 (US),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,platform:Linux,",
+"030000005e040000000b000008040000,Microsoft Xbox One Elite 2 pad - Wired,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000ea02000008040000,Microsoft Xbox One S pad - Wired,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c62400001a53000000010000,Mini PE,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000030000000300000002000000,Miroof,a:b1,b:b0,back:b6,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b3,y:b2,platform:Linux,",
+"05000000d6200000e589000001000000,Moga 2 HID,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Linux,",
+"05000000d6200000ad0d000001000000,Moga Pro,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Linux,",
+"05000000d62000007162000001000000,Moga Pro 2 HID,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,platform:Linux,",
+"03000000c62400002b89000011010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000c62400002a89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b22,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000c62400001a89000000010000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000250900006688000000010000,MP-8866 Super Dual Box,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Linux,",
+"030000006b140000010c000010010000,NACON GC-400ES,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000000d0f00000900000010010000,Natec Genesis P44,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000790000004518000010010000,NEXILUX GAMECUBE Controller Adapter,a:b1,b:b0,x:b2,y:b3,start:b9,rightshoulder:b7,dpup:h0.1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,leftx:a0,lefty:a1,rightx:a5,righty:a2,lefttrigger:a3,righttrigger:a4,platform:Linux,",
+"030000001008000001e5000010010000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,righttrigger:b6,start:b9,x:b3,y:b0,platform:Linux,",
+"060000007e0500003713000000000000,Nintendo 3DS,a:b0,b:b1,back:b8,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Linux,",
+"060000007e0500000820000000000000,Nintendo Combined Joy-Cons (joycond),a:b0,b:b1,back:b9,dpdown:b15,dpleft:b16,dpright:b17,dpup:b14,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,platform:Linux,",
+"030000007e0500003703000000016800,Nintendo GameCube Controller,a:b0,b:b2,dpdown:b6,dpleft:b4,dpright:b5,dpup:b7,lefttrigger:a4,leftx:a0,lefty:a1~,rightshoulder:b9,righttrigger:a5,rightx:a2,righty:a3~,start:b8,x:b1,y:b3,platform:Linux,",
+"03000000790000004618000010010000,Nintendo GameCube Controller Adapter,a:b1,b:b0,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a5~,righty:a2~,start:b9,x:b2,y:b3,platform:Linux,",
+"050000007e0500000620000001800000,Nintendo Switch Left Joy-Con,a:b9,b:b8,back:b5,leftshoulder:b2,leftstick:b6,leftx:a1,lefty:a0~,rightshoulder:b4,start:b0,x:b7,y:b10,platform:Linux,",
+"030000007e0500000920000011810000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,platform:Linux,",
+"050000007e0500000920000001000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"050000007e0500000920000001800000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,platform:Linux,",
+"050000007e0500000720000001800000,Nintendo Switch Right Joy-Con,a:b1,b:b2,back:b9,leftshoulder:b4,leftstick:b10,leftx:a1~,lefty:a0~,rightshoulder:b6,start:b8,x:b0,y:b3,platform:Linux,",
+"050000007e0500001720000001000000,Nintendo Switch SNES Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b2,y:b3,platform:Linux,",
+"050000007e0500003003000001000000,Nintendo Wii Remote Pro Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,platform:Linux,",
+"05000000010000000100000003000000,Nintendo Wiimote,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000000d0500000308000010010000,Nostromo n45 Dual Analog Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b12,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b10,x:b2,y:b3,platform:Linux,",
+"03000000550900001072000011010000,NVIDIA Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b13,leftshoulder:b4,leftstick:b8,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000550900001472000011010000,NVIDIA Controller v01.04,a:b0,b:b1,back:b14,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b16,leftshoulder:b4,leftstick:b7,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a5,start:b6,x:b2,y:b3,platform:Linux,",
+"05000000550900001472000001000000,NVIDIA Controller v01.04,a:b0,b:b1,back:b14,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b16,leftshoulder:b4,leftstick:b7,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a5,start:b6,x:b2,y:b3,platform:Linux,",
+"03000000451300000830000010010000,NYKO CORE,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"19000000010000000100000001010000,odroidgo2_joypad,a:b1,b:b0,dpdown:b7,dpleft:b8,dpright:b9,dpup:b6,guide:b10,leftshoulder:b4,leftstick:b12,lefttrigger:b11,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b13,righttrigger:b14,start:b15,x:b2,y:b3,platform:Linux,",
+"19000000010000000200000011000000,odroidgo2_joypad_v11,a:b1,b:b0,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b12,leftshoulder:b4,leftstick:b14,lefttrigger:b13,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b15,righttrigger:b16,start:b17,x:b2,y:b3,platform:Linux,",
+"030000005e0400000202000000010000,Old Xbox pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,platform:Linux,",
+"03000000c0160000dc27000001010000,OnyxSoft Dual JoyDivision,a:b0,b:b1,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b6,x:b2,y:b3,platform:Linux,",
+"05000000362800000100000002010000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,x:b1,y:b2,platform:Linux,",
+"05000000362800000100000003010000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,x:b1,y:b2,platform:Linux,",
+"03000000830500005020000010010000,Padix Co. Ltd. Rockfire PSX/USB Bridge,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b2,y:b3,platform:Linux,",
+"03000000790000001c18000011010000,PC Game Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000ff1100003133000010010000,PC Game Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000006f0e0000b802000001010000,PDP AFTERGLOW Wired Xbox One Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e0000b802000013020000,PDP AFTERGLOW Wired Xbox One Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00006401000001010000,PDP Battlefield One,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00008001000011010000,PDP CO. LTD. Faceoff Wired Pro Controller for Nintendo Switch,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e00003101000000010000,PDP EA Sports Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e0000c802000012010000,PDP Kingdom Hearts Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00008701000011010000,PDP Rock Candy Wired Controller for Nintendo Switch,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b13,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000006f0e00000901000011010000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e0000a802000023020000,PDP Wired Controller for Xbox One,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000006f0e00008501000011010000,PDP Wired Fight Pad Pro for Nintendo Switch,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"0500000049190000030400001b010000,PG-9099,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000491900000204000000000000,PG-9118,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000004c050000da0c000011010000,Playstation Controller,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"030000004c0500003713000011010000,PlayStation Vita,a:b1,b:b2,back:b8,dpdown:b13,dpleft:b15,dpright:b14,dpup:b12,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000c62400000053000000010000,PowerA,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c62400003a54000001010000,PowerA 1428124-01,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000d62000006dca000011010000,PowerA Pro Ex,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000d62000000228000001010000,PowerA Wired Controller for Xbox One,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c62400001a58000001010000,PowerA Xbox One Cabled,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c62400001a54000001010000,PowerA Xbox One Mini Wired Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006d040000d2ca000011010000,Precision Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000ff1100004133000010010000,PS2 Controller,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000341a00003608000011010000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004c0500006802000010010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"030000004c0500006802000010810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"030000004c0500006802000011010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"030000004c0500006802000011810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"030000006f0e00001402000011010000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000008f0e00000300000010010000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"050000004c0500006802000000000000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"050000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:a12,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:a13,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"050000004c0500006802000000800000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"050000004c0500006802000000810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"05000000504c415953544154494f4e00,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"060000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,platform:Linux,",
+"030000004c050000a00b000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004c050000a00b000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"030000004c050000c405000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004c050000c405000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"030000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004c050000cc09000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004c050000cc09000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"03000000c01100000140000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"050000004c050000c405000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"050000004c050000c405000000810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"050000004c050000c405000001800000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"050000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"050000004c050000cc09000000810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"050000004c050000cc09000001800000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"030000004c050000e60c000011010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"050000004c050000e60c000000010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000ff000000cb01000010010000,PSP,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000300f00001211000011010000,QanBa Arcade JoyStick,a:b2,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b5,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b6,start:b9,x:b1,y:b3,platform:Linux,",
+"030000009b2800004200000001010000,Raphnet Technologies Dual NES to USB v2.0,a:b0,b:b1,back:b2,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,start:b3,platform:Linux,",
+"030000009b2800003200000001010000,Raphnet Technologies GC/N64 to USB v3.4,a:b0,b:b7,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:b5,rightx:a3,righty:a4,start:b3,x:b1,y:b8,platform:Linux,",
+"030000009b2800006000000001010000,Raphnet Technologies GC/N64 to USB v3.6,a:b0,b:b7,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:b5,rightx:a3,righty:a4,start:b3,x:b1,y:b8,platform:Linux,",
+"030000009b2800000300000001010000,raphnet.net 4nes4snes v1.5,a:b0,b:b4,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b1,y:b5,platform:Linux,",
+"030000008916000001fd000024010000,Razer Onza Classic Edition,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000008916000000fd000024010000,Razer Onza Tournament Edition,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000321500000204000011010000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000321500000104000011010000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000321500000810000011010000,Razer Panthera Evo Arcade Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b13,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000321500000010000011010000,Razer RAIJU,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000321500000507000000010000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000321500000011000011010000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000008916000000fe000024010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c6240000045d000024010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c6240000045d000025010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000321500000009000011010000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"050000003215000000090000163a0000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"0300000032150000030a000001010000,Razer Wildcat,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000790000001100000010010000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"0300000081170000990a000001010000,Retronic Adapter,a:b0,leftx:a0,lefty:a1,platform:Linux,",
+"0300000000f000000300000000010000,RetroPad,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,platform:Linux,",
+"030000006b140000010d000011010000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006b140000130d000011010000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e00001f01000000010000,Rock Candy,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000006f0e00001e01000011010000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e00004601000001010000,Rock Candy Xbox One Controller,a:b0,b:b1,back:b6,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000a306000023f6000011010000,Saitek Cyborg V.1 Game Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000a30600001005000000010000,Saitek P150,a:b0,b:b1,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b7,lefttrigger:b6,rightshoulder:b2,righttrigger:b5,x:b3,y:b4,platform:Linux,",
+"03000000a30600000701000000010000,Saitek P220,a:b2,b:b3,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,lefttrigger:b7,rightshoulder:b4,righttrigger:b5,x:b0,y:b1,platform:Linux,",
+"03000000a30600000cff000010010000,Saitek P2500 Force Rumble Pad,a:b2,b:b3,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b10,x:b0,y:b1,platform:Linux,",
+"03000000a30600000c04000011010000,Saitek P2900 Wireless Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b12,x:b0,y:b3,platform:Linux,",
+"03000000300f00001201000010010000,Saitek P380,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,platform:Linux,",
+"03000000a30600000901000000010000,Saitek P880,a:b2,b:b3,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,x:b0,y:b1,platform:Linux,",
+"03000000a30600000b04000000010000,Saitek P990 Dual Analog Pad,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b8,x:b0,y:b3,platform:Linux,",
+"03000000a306000018f5000010010000,Saitek PLC Saitek P3200 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000a306000020f6000011010000,Saitek PS2700 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000d81d00000e00000010010000,Savior,a:b0,b:b1,back:b8,leftshoulder:b6,leftstick:b10,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b11,righttrigger:b3,start:b9,x:b4,y:b5,platform:Linux,",
+"03000000f025000021c1000010010000,ShanWan Gioteck PS3 Wired Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000632500007505000010010000,SHANWAN PS3/PC Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000bc2000000055000010010000,ShanWan PS3/PC Wired GamePad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005f140000c501000010010000,SHANWAN Trust Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000632500002305000010010000,ShanWan USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000341a00000908000010010000,SL-6566,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000004c050000e60c000011810000,Sony DualSense,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"050000004c050000e60c000000810000,Sony DualSense ,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,platform:Linux,",
+"03000000250900000500000000010000,Sony PS2 pad with SmartJoy adapter,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Linux,",
+"030000005e0400008e02000073050000,Speedlink TORID Wireless Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400008e02000020200000,SpeedLink XEOX Pro Analog Gamepad pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000d11800000094000011010000,Stadia Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000de2800000112000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000de2800000211000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000de2800000211000011010000,Steam Controller,a:b2,b:b3,back:b10,dpdown:b18,dpleft:b19,dpright:b20,dpup:b17,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,paddle1:b15,paddle2:b16,rightshoulder:b7,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b5,platform:Linux,",
+"03000000de2800004211000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000de2800004211000011010000,Steam Controller,a:b2,b:b3,back:b10,dpdown:b18,dpleft:b19,dpright:b20,dpup:b17,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,paddle1:b15,paddle2:b16,rightshoulder:b7,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b5,platform:Linux,",
+"03000000de280000fc11000001000000,Steam Controller,a:b0,b:b1,back:b6,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000de2800000212000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000de2800000511000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"05000000de2800000611000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000de280000ff11000001000000,Steam Virtual Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000381000003014000075010000,SteelSeries Stratus Duo,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000381000003114000075010000,SteelSeries Stratus Duo,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"0500000011010000311400001b010000,SteelSeries Stratus Duo,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b32,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"05000000110100001914000009010000,SteelSeries Stratus XL,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000ad1b000038f0000090040000,Street Fighter IV FightStick TE,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000003b07000004a1000000010000,Suncom SFX Plus for USB,a:b0,b:b2,back:b7,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b9,righttrigger:b5,start:b8,x:b1,y:b3,platform:Linux,",
+"03000000666600000488000000010000,Super Joy Box 5 Pro,a:b2,b:b1,back:b9,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,platform:Linux,",
+"0300000000f00000f100000000010000,Super RetroPort,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,platform:Linux,",
+"03000000457500002211000010010000,SZMY-POWER CO. LTD. GAMEPAD,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"030000008f0e00000d31000010010000,SZMY-POWER CO. LTD. GAMEPAD 3 TURBO,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000008f0e00001431000010010000,SZMY-POWER CO. LTD. PS3 gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004f04000020b3000010010000,Thrustmaster 2 in 1 DT,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Linux,",
+"030000004f04000015b3000010010000,Thrustmaster Dual Analog 4,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Linux,",
+"030000004f04000023b3000000010000,Thrustmaster Dual Trigger 3-in-1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004f0400000ed0000011010000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000b50700000399000000010000,Thrustmaster Firestorm Digital 2,a:b2,b:b4,back:b11,leftshoulder:b6,leftstick:b10,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b8,rightstick:b0,righttrigger:b9,start:b1,x:b3,y:b5,platform:Linux,",
+"030000004f04000003b3000010010000,Thrustmaster Firestorm Dual Analog 2,a:b0,b:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b9,rightx:a2,righty:a3,x:b1,y:b3,platform:Linux,",
+"030000004f04000000b3000010010000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,platform:Linux,",
+"030000004f04000026b3000002040000,Thrustmaster Gamepad GP XID,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c6240000025b000002020000,Thrustmaster GPX Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000004f04000008d0000000010000,Thrustmaster Run N Drive Wireless,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004f04000009d0000000010000,Thrustmaster Run N Drive Wireless PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004f04000007d0000000010000,Thrustmaster T Mini Wireless,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,platform:Linux,",
+"030000004f04000012b3000010010000,Thrustmaster vibrating gamepad,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,platform:Linux,",
+"03000000bd12000015d0000010010000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000d814000007cd000011010000,Toodles 2008 Chimp PC/PS3,a:b0,b:b1,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b3,y:b2,platform:Linux,",
+"030000005e0400008e02000070050000,Torid,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000c01100000591000011010000,Torid,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000100800000100000010010000,Twin USB PS2 Adapter,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000100800000300000010010000,USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000790000000600000007010000,USB gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,platform:Linux,",
+"03000000790000001100000000010000,USB Gamepad1,a:b2,b:b1,back:b8,dpdown:a0,dpleft:a1,dpright:a2,dpup:a4,start:b9,platform:Linux,",
+"030000006f0e00000302000011010000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"030000006f0e00000702000011010000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,platform:Linux,",
+"05000000ac0500003232000001000000,VR-BOX,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000791d00000103000010010000,Wii Classic Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"050000000d0f0000f600000001000000,Wireless HORIPAD Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"030000005e0400008e02000010010000,X360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400008e02000014010000,X360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400001907000000010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400009102000007010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000a102000000010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000a102000007010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"0000000058626f782033363020576900,Xbox 360 Wireless Controller,a:b0,b:b1,back:b14,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,guide:b7,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b6,x:b2,y:b3,platform:Linux,",
+"030000005e040000a102000014010000,Xbox 360 Wireless Receiver (XBOX),a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"0000000058626f782047616d65706100,Xbox Gamepad (userspace driver),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000d102000002010000,Xbox One Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"050000005e040000fd02000030110000,Xbox One Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"050000005e040000050b000002090000,Xbox One Elite Series 2,a:b0,b:b1,back:b136,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a6,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e040000ea02000000000000,Xbox One Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"050000005e040000e002000003090000,Xbox One Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"050000005e040000fd02000003090000,Xbox One Wireless Controller,a:b0,b:b1,back:b15,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b16,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e040000ea02000001030000,Xbox One Wireless Controller (Model 1708),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000120b000001050000,Xbox Series Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e040000130b000005050000,Xbox Series Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"050000005e040000130b000001050000,Xbox Series Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"050000005e040000130b000005050000,Xbox Series Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,platform:Linux,",
+"030000005e040000120b000005050000,XBox Series pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"030000005e0400008e02000000010000,xbox360 Wireless EasySMX,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,platform:Linux,",
+"03000000450c00002043000010010000,XEOX Gamepad SL-6556-BK,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,platform:Linux,",
+"03000000ac0500005b05000010010000,Xiaoji Gamesir-G3w,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,platform:Linux,",
+"05000000172700004431000029010000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b20,leftshoulder:b6,leftstick:b13,lefttrigger:a7,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a5,start:b11,x:b3,y:b4,platform:Linux,",
+"03000000c0160000e105000001010000,Xin-Mo Xin-Mo Dual Arcade,a:b4,b:b3,back:b6,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b9,leftshoulder:b2,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b1,y:b0,platform:Linux,",
+"03000000120c0000100e000011010000,ZEROPLUS P4 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+"03000000120c0000101e000011010000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,platform:Linux,",
+#endif // GLFW_BUILD_LINUX_MAPPINGS
+};
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/mappings.h.in b/chromium/third_party/dawn/third_party/glfw/src/mappings.h.in
new file mode 100644
index 00000000000..f2604390a43
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/mappings.h.in
@@ -0,0 +1,82 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// As mappings.h.in, this file is used by CMake to produce the mappings.h
+// header file. If you are adding a GLFW specific gamepad mapping, this is
+// where to put it.
+//========================================================================
+// As mappings.h, this provides all pre-defined gamepad mappings, including
+// all available in SDL_GameControllerDB. Do not edit this file. Any gamepad
+// mappings not specific to GLFW should be submitted to SDL_GameControllerDB.
+// This file can be re-generated from mappings.h.in and the upstream
+// gamecontrollerdb.txt with the 'update_mappings' CMake target.
+//========================================================================
+
+// All gamepad mappings not labeled GLFW are copied from the
+// SDL_GameControllerDB project under the following license:
+//
+// Simple DirectMedia Layer
+// Copyright (C) 1997-2013 Sam Lantinga <slouken@libsdl.org>
+//
+// This software is provided 'as-is', without any express or implied warranty.
+// In no event will the authors be held liable for any damages arising from the
+// use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source distribution.
+
+const char* _glfwDefaultMappings[] =
+{
+#if defined(GLFW_BUILD_WIN32_MAPPINGS)
+@GLFW_WIN32_MAPPINGS@
+"78696e70757401000000000000000000,XInput Gamepad (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757402000000000000000000,XInput Wheel (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757403000000000000000000,XInput Arcade Stick (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757404000000000000000000,XInput Flight Stick (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757405000000000000000000,XInput Dance Pad (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757406000000000000000000,XInput Guitar (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+"78696e70757408000000000000000000,XInput Drum Kit (GLFW),platform:Windows,a:b0,b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,",
+#endif // GLFW_BUILD_WIN32_MAPPINGS
+
+#if defined(GLFW_BUILD_COCOA_MAPPINGS)
+@GLFW_COCOA_MAPPINGS@
+#endif // GLFW_BUILD_COCOA_MAPPINGS
+
+#if defined(GLFW_BUILD_LINUX_MAPPINGS)
+@GLFW_LINUX_MAPPINGS@
+#endif // GLFW_BUILD_LINUX_MAPPINGS
+};
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/monitor.c b/chromium/third_party/dawn/third_party/glfw/src/monitor.c
new file mode 100644
index 00000000000..082140ad903
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/monitor.c
@@ -0,0 +1,543 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+#include <math.h>
+#include <float.h>
+#include <string.h>
+#include <stdlib.h>
+#include <limits.h>
+
+
+// Lexically compare video modes, used by qsort
+//
+static int compareVideoModes(const void* fp, const void* sp)
+{
+ const GLFWvidmode* fm = fp;
+ const GLFWvidmode* sm = sp;
+ const int fbpp = fm->redBits + fm->greenBits + fm->blueBits;
+ const int sbpp = sm->redBits + sm->greenBits + sm->blueBits;
+ const int farea = fm->width * fm->height;
+ const int sarea = sm->width * sm->height;
+
+ // First sort on color bits per pixel
+ if (fbpp != sbpp)
+ return fbpp - sbpp;
+
+ // Then sort on screen area
+ if (farea != sarea)
+ return farea - sarea;
+
+ // Then sort on width
+ if (fm->width != sm->width)
+ return fm->width - sm->width;
+
+ // Lastly sort on refresh rate
+ return fm->refreshRate - sm->refreshRate;
+}
+
+// Retrieves the available modes for the specified monitor
+//
+static GLFWbool refreshVideoModes(_GLFWmonitor* monitor)
+{
+ int modeCount;
+ GLFWvidmode* modes;
+
+ if (monitor->modes)
+ return GLFW_TRUE;
+
+ modes = _glfw.platform.getVideoModes(monitor, &modeCount);
+ if (!modes)
+ return GLFW_FALSE;
+
+ qsort(modes, modeCount, sizeof(GLFWvidmode), compareVideoModes);
+
+ _glfw_free(monitor->modes);
+ monitor->modes = modes;
+ monitor->modeCount = modeCount;
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW event API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Notifies shared code of a monitor connection or disconnection
+//
+void _glfwInputMonitor(_GLFWmonitor* monitor, int action, int placement)
+{
+ if (action == GLFW_CONNECTED)
+ {
+ _glfw.monitorCount++;
+ _glfw.monitors =
+ _glfw_realloc(_glfw.monitors,
+ sizeof(_GLFWmonitor*) * _glfw.monitorCount);
+
+ if (placement == _GLFW_INSERT_FIRST)
+ {
+ memmove(_glfw.monitors + 1,
+ _glfw.monitors,
+ ((size_t) _glfw.monitorCount - 1) * sizeof(_GLFWmonitor*));
+ _glfw.monitors[0] = monitor;
+ }
+ else
+ _glfw.monitors[_glfw.monitorCount - 1] = monitor;
+ }
+ else if (action == GLFW_DISCONNECTED)
+ {
+ int i;
+ _GLFWwindow* window;
+
+ for (window = _glfw.windowListHead; window; window = window->next)
+ {
+ if (window->monitor == monitor)
+ {
+ int width, height, xoff, yoff;
+ _glfw.platform.getWindowSize(window, &width, &height);
+ _glfw.platform.setWindowMonitor(window, NULL, 0, 0, width, height, 0);
+ _glfw.platform.getWindowFrameSize(window, &xoff, &yoff, NULL, NULL);
+ _glfw.platform.setWindowPos(window, xoff, yoff);
+ }
+ }
+
+ for (i = 0; i < _glfw.monitorCount; i++)
+ {
+ if (_glfw.monitors[i] == monitor)
+ {
+ _glfw.monitorCount--;
+ memmove(_glfw.monitors + i,
+ _glfw.monitors + i + 1,
+ ((size_t) _glfw.monitorCount - i) * sizeof(_GLFWmonitor*));
+ break;
+ }
+ }
+ }
+
+ if (_glfw.callbacks.monitor)
+ _glfw.callbacks.monitor((GLFWmonitor*) monitor, action);
+
+ if (action == GLFW_DISCONNECTED)
+ _glfwFreeMonitor(monitor);
+}
+
+// Notifies shared code that a full screen window has acquired or released
+// a monitor
+//
+void _glfwInputMonitorWindow(_GLFWmonitor* monitor, _GLFWwindow* window)
+{
+ monitor->window = window;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Allocates and returns a monitor object with the specified name and dimensions
+//
+_GLFWmonitor* _glfwAllocMonitor(const char* name, int widthMM, int heightMM)
+{
+ _GLFWmonitor* monitor = _glfw_calloc(1, sizeof(_GLFWmonitor));
+ monitor->widthMM = widthMM;
+ monitor->heightMM = heightMM;
+
+ strncpy(monitor->name, name, sizeof(monitor->name) - 1);
+
+ return monitor;
+}
+
+// Frees a monitor object and any data associated with it
+//
+void _glfwFreeMonitor(_GLFWmonitor* monitor)
+{
+ if (monitor == NULL)
+ return;
+
+ _glfw.platform.freeMonitor(monitor);
+
+ _glfwFreeGammaArrays(&monitor->originalRamp);
+ _glfwFreeGammaArrays(&monitor->currentRamp);
+
+ _glfw_free(monitor->modes);
+ _glfw_free(monitor);
+}
+
+// Allocates red, green and blue value arrays of the specified size
+//
+void _glfwAllocGammaArrays(GLFWgammaramp* ramp, unsigned int size)
+{
+ ramp->red = _glfw_calloc(size, sizeof(unsigned short));
+ ramp->green = _glfw_calloc(size, sizeof(unsigned short));
+ ramp->blue = _glfw_calloc(size, sizeof(unsigned short));
+ ramp->size = size;
+}
+
+// Frees the red, green and blue value arrays and clears the struct
+//
+void _glfwFreeGammaArrays(GLFWgammaramp* ramp)
+{
+ _glfw_free(ramp->red);
+ _glfw_free(ramp->green);
+ _glfw_free(ramp->blue);
+
+ memset(ramp, 0, sizeof(GLFWgammaramp));
+}
+
+// Chooses the video mode most closely matching the desired one
+//
+const GLFWvidmode* _glfwChooseVideoMode(_GLFWmonitor* monitor,
+ const GLFWvidmode* desired)
+{
+ int i;
+ unsigned int sizeDiff, leastSizeDiff = UINT_MAX;
+ unsigned int rateDiff, leastRateDiff = UINT_MAX;
+ unsigned int colorDiff, leastColorDiff = UINT_MAX;
+ const GLFWvidmode* current;
+ const GLFWvidmode* closest = NULL;
+
+ if (!refreshVideoModes(monitor))
+ return NULL;
+
+ for (i = 0; i < monitor->modeCount; i++)
+ {
+ current = monitor->modes + i;
+
+ colorDiff = 0;
+
+ if (desired->redBits != GLFW_DONT_CARE)
+ colorDiff += abs(current->redBits - desired->redBits);
+ if (desired->greenBits != GLFW_DONT_CARE)
+ colorDiff += abs(current->greenBits - desired->greenBits);
+ if (desired->blueBits != GLFW_DONT_CARE)
+ colorDiff += abs(current->blueBits - desired->blueBits);
+
+ sizeDiff = abs((current->width - desired->width) *
+ (current->width - desired->width) +
+ (current->height - desired->height) *
+ (current->height - desired->height));
+
+ if (desired->refreshRate != GLFW_DONT_CARE)
+ rateDiff = abs(current->refreshRate - desired->refreshRate);
+ else
+ rateDiff = UINT_MAX - current->refreshRate;
+
+ if ((colorDiff < leastColorDiff) ||
+ (colorDiff == leastColorDiff && sizeDiff < leastSizeDiff) ||
+ (colorDiff == leastColorDiff && sizeDiff == leastSizeDiff && rateDiff < leastRateDiff))
+ {
+ closest = current;
+ leastSizeDiff = sizeDiff;
+ leastRateDiff = rateDiff;
+ leastColorDiff = colorDiff;
+ }
+ }
+
+ return closest;
+}
+
+// Performs lexical comparison between two @ref GLFWvidmode structures
+//
+int _glfwCompareVideoModes(const GLFWvidmode* fm, const GLFWvidmode* sm)
+{
+ return compareVideoModes(fm, sm);
+}
+
+// Splits a color depth into red, green and blue bit depths
+//
+void _glfwSplitBPP(int bpp, int* red, int* green, int* blue)
+{
+ int delta;
+
+ // We assume that by 32 the user really meant 24
+ if (bpp == 32)
+ bpp = 24;
+
+ // Convert "bits per pixel" to red, green & blue sizes
+
+ *red = *green = *blue = bpp / 3;
+ delta = bpp - (*red * 3);
+ if (delta >= 1)
+ *green = *green + 1;
+
+ if (delta == 2)
+ *red = *red + 1;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI GLFWmonitor** glfwGetMonitors(int* count)
+{
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ *count = _glfw.monitorCount;
+ return (GLFWmonitor**) _glfw.monitors;
+}
+
+GLFWAPI GLFWmonitor* glfwGetPrimaryMonitor(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (!_glfw.monitorCount)
+ return NULL;
+
+ return (GLFWmonitor*) _glfw.monitors[0];
+}
+
+GLFWAPI void glfwGetMonitorPos(GLFWmonitor* handle, int* xpos, int* ypos)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 0;
+
+ _GLFW_REQUIRE_INIT();
+
+ _glfw.platform.getMonitorPos(monitor, xpos, ypos);
+}
+
+GLFWAPI void glfwGetMonitorWorkarea(GLFWmonitor* handle,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 0;
+ if (width)
+ *width = 0;
+ if (height)
+ *height = 0;
+
+ _GLFW_REQUIRE_INIT();
+
+ _glfw.platform.getMonitorWorkarea(monitor, xpos, ypos, width, height);
+}
+
+GLFWAPI void glfwGetMonitorPhysicalSize(GLFWmonitor* handle, int* widthMM, int* heightMM)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ if (widthMM)
+ *widthMM = 0;
+ if (heightMM)
+ *heightMM = 0;
+
+ _GLFW_REQUIRE_INIT();
+
+ if (widthMM)
+ *widthMM = monitor->widthMM;
+ if (heightMM)
+ *heightMM = monitor->heightMM;
+}
+
+GLFWAPI void glfwGetMonitorContentScale(GLFWmonitor* handle,
+ float* xscale, float* yscale)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ if (xscale)
+ *xscale = 0.f;
+ if (yscale)
+ *yscale = 0.f;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getMonitorContentScale(monitor, xscale, yscale);
+}
+
+GLFWAPI const char* glfwGetMonitorName(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return monitor->name;
+}
+
+GLFWAPI void glfwSetMonitorUserPointer(GLFWmonitor* handle, void* pointer)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ monitor->userPointer = pointer;
+}
+
+GLFWAPI void* glfwGetMonitorUserPointer(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return monitor->userPointer;
+}
+
+GLFWAPI GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun cbfun)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWmonitorfun, _glfw.callbacks.monitor, cbfun);
+ return cbfun;
+}
+
+GLFWAPI const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* handle, int* count)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (!refreshVideoModes(monitor))
+ return NULL;
+
+ *count = monitor->modeCount;
+ return monitor->modes;
+}
+
+GLFWAPI const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ _glfw.platform.getVideoMode(monitor, &monitor->currentMode);
+ return &monitor->currentMode;
+}
+
+GLFWAPI void glfwSetGamma(GLFWmonitor* handle, float gamma)
+{
+ unsigned int i;
+ unsigned short* values;
+ GLFWgammaramp ramp;
+ const GLFWgammaramp* original;
+ assert(handle != NULL);
+ assert(gamma > 0.f);
+ assert(gamma <= FLT_MAX);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (gamma != gamma || gamma <= 0.f || gamma > FLT_MAX)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid gamma value %f", gamma);
+ return;
+ }
+
+ original = glfwGetGammaRamp(handle);
+ if (!original)
+ return;
+
+ values = _glfw_calloc(original->size, sizeof(unsigned short));
+
+ for (i = 0; i < original->size; i++)
+ {
+ float value;
+
+ // Calculate intensity
+ value = i / (float) (original->size - 1);
+ // Apply gamma curve
+ value = powf(value, 1.f / gamma) * 65535.f + 0.5f;
+ // Clamp to value range
+ value = _glfw_fminf(value, 65535.f);
+
+ values[i] = (unsigned short) value;
+ }
+
+ ramp.red = values;
+ ramp.green = values;
+ ramp.blue = values;
+ ramp.size = original->size;
+
+ glfwSetGammaRamp(handle, &ramp);
+ _glfw_free(values);
+}
+
+GLFWAPI const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ _glfwFreeGammaArrays(&monitor->currentRamp);
+ if (!_glfw.platform.getGammaRamp(monitor, &monitor->currentRamp))
+ return NULL;
+
+ return &monitor->currentRamp;
+}
+
+GLFWAPI void glfwSetGammaRamp(GLFWmonitor* handle, const GLFWgammaramp* ramp)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ assert(monitor != NULL);
+ assert(ramp != NULL);
+ assert(ramp->size > 0);
+ assert(ramp->red != NULL);
+ assert(ramp->green != NULL);
+ assert(ramp->blue != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (ramp->size <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid gamma ramp size %i",
+ ramp->size);
+ return;
+ }
+
+ if (!monitor->originalRamp.size)
+ {
+ if (!_glfw.platform.getGammaRamp(monitor, &monitor->originalRamp))
+ return;
+ }
+
+ _glfw.platform.setGammaRamp(monitor, ramp);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/nsgl_context.m b/chromium/third_party/dawn/third_party/glfw/src/nsgl_context.m
new file mode 100644
index 00000000000..fc1f752108c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/nsgl_context.m
@@ -0,0 +1,376 @@
+//========================================================================
+// GLFW 3.4 macOS - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2009-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <unistd.h>
+#include <math.h>
+
+static void makeContextCurrentNSGL(_GLFWwindow* window)
+{
+ @autoreleasepool {
+
+ if (window)
+ [window->context.nsgl.object makeCurrentContext];
+ else
+ [NSOpenGLContext clearCurrentContext];
+
+ _glfwPlatformSetTls(&_glfw.contextSlot, window);
+
+ } // autoreleasepool
+}
+
+static void swapBuffersNSGL(_GLFWwindow* window)
+{
+ @autoreleasepool {
+
+ // HACK: Simulate vsync with usleep as NSGL swap interval does not apply to
+ // windows with a non-visible occlusion state
+ if (window->ns.occluded)
+ {
+ int interval = 0;
+ [window->context.nsgl.object getValues:&interval
+ forParameter:NSOpenGLContextParameterSwapInterval];
+
+ if (interval > 0)
+ {
+ const double framerate = 60.0;
+ const uint64_t frequency = _glfwPlatformGetTimerFrequency();
+ const uint64_t value = _glfwPlatformGetTimerValue();
+
+ const double elapsed = value / (double) frequency;
+ const double period = 1.0 / framerate;
+ const double delay = period - fmod(elapsed, period);
+
+ usleep(floorl(delay * 1e6));
+ }
+ }
+
+ [window->context.nsgl.object flushBuffer];
+
+ } // autoreleasepool
+}
+
+static void swapIntervalNSGL(int interval)
+{
+ @autoreleasepool {
+
+ _GLFWwindow* window = _glfwPlatformGetTls(&_glfw.contextSlot);
+ if (window)
+ {
+ [window->context.nsgl.object setValues:&interval
+ forParameter:NSOpenGLContextParameterSwapInterval];
+ }
+
+ } // autoreleasepool
+}
+
+static int extensionSupportedNSGL(const char* extension)
+{
+ // There are no NSGL extensions
+ return GLFW_FALSE;
+}
+
+static GLFWglproc getProcAddressNSGL(const char* procname)
+{
+ CFStringRef symbolName = CFStringCreateWithCString(kCFAllocatorDefault,
+ procname,
+ kCFStringEncodingASCII);
+
+ GLFWglproc symbol = CFBundleGetFunctionPointerForName(_glfw.nsgl.framework,
+ symbolName);
+
+ CFRelease(symbolName);
+
+ return symbol;
+}
+
+static void destroyContextNSGL(_GLFWwindow* window)
+{
+ @autoreleasepool {
+
+ [window->context.nsgl.pixelFormat release];
+ window->context.nsgl.pixelFormat = nil;
+
+ [window->context.nsgl.object release];
+ window->context.nsgl.object = nil;
+
+ } // autoreleasepool
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Initialize OpenGL support
+//
+GLFWbool _glfwInitNSGL(void)
+{
+ if (_glfw.nsgl.framework)
+ return GLFW_TRUE;
+
+ _glfw.nsgl.framework =
+ CFBundleGetBundleWithIdentifier(CFSTR("com.apple.opengl"));
+ if (_glfw.nsgl.framework == NULL)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "NSGL: Failed to locate OpenGL framework");
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+// Terminate OpenGL support
+//
+void _glfwTerminateNSGL(void)
+{
+}
+
+// Create the OpenGL context
+//
+GLFWbool _glfwCreateContextNSGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "NSGL: OpenGL ES is not available on macOS");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->major > 2)
+ {
+ if (ctxconfig->major == 3 && ctxconfig->minor < 2)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "NSGL: The targeted version of macOS does not support OpenGL 3.0 or 3.1 but may support 3.2 and above");
+ return GLFW_FALSE;
+ }
+ }
+
+ // Context robustness modes (GL_KHR_robustness) are not yet supported by
+ // macOS but are not a hard constraint, so ignore and continue
+
+ // Context release behaviors (GL_KHR_context_flush_control) are not yet
+ // supported by macOS but are not a hard constraint, so ignore and continue
+
+ // Debug contexts (GL_KHR_debug) are not yet supported by macOS but are not
+ // a hard constraint, so ignore and continue
+
+ // No-error contexts (GL_KHR_no_error) are not yet supported by macOS but
+ // are not a hard constraint, so ignore and continue
+
+#define ADD_ATTRIB(a) \
+{ \
+ assert((size_t) index < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[index++] = a; \
+}
+#define SET_ATTRIB(a, v) { ADD_ATTRIB(a); ADD_ATTRIB(v); }
+
+ NSOpenGLPixelFormatAttribute attribs[40];
+ int index = 0;
+
+ ADD_ATTRIB(NSOpenGLPFAAccelerated);
+ ADD_ATTRIB(NSOpenGLPFAClosestPolicy);
+
+ if (ctxconfig->nsgl.offline)
+ {
+ ADD_ATTRIB(NSOpenGLPFAAllowOfflineRenderers);
+ // NOTE: This replaces the NSSupportsAutomaticGraphicsSwitching key in
+ // Info.plist for unbundled applications
+ // HACK: This assumes that NSOpenGLPixelFormat will remain
+ // a straightforward wrapper of its CGL counterpart
+ ADD_ATTRIB(kCGLPFASupportsAutomaticGraphicsSwitching);
+ }
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED >= 101000
+ if (ctxconfig->major >= 4)
+ {
+ SET_ATTRIB(NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion4_1Core);
+ }
+ else
+#endif /*MAC_OS_X_VERSION_MAX_ALLOWED*/
+ if (ctxconfig->major >= 3)
+ {
+ SET_ATTRIB(NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion3_2Core);
+ }
+
+ if (ctxconfig->major <= 2)
+ {
+ if (fbconfig->auxBuffers != GLFW_DONT_CARE)
+ SET_ATTRIB(NSOpenGLPFAAuxBuffers, fbconfig->auxBuffers);
+
+ if (fbconfig->accumRedBits != GLFW_DONT_CARE &&
+ fbconfig->accumGreenBits != GLFW_DONT_CARE &&
+ fbconfig->accumBlueBits != GLFW_DONT_CARE &&
+ fbconfig->accumAlphaBits != GLFW_DONT_CARE)
+ {
+ const int accumBits = fbconfig->accumRedBits +
+ fbconfig->accumGreenBits +
+ fbconfig->accumBlueBits +
+ fbconfig->accumAlphaBits;
+
+ SET_ATTRIB(NSOpenGLPFAAccumSize, accumBits);
+ }
+ }
+
+ if (fbconfig->redBits != GLFW_DONT_CARE &&
+ fbconfig->greenBits != GLFW_DONT_CARE &&
+ fbconfig->blueBits != GLFW_DONT_CARE)
+ {
+ int colorBits = fbconfig->redBits +
+ fbconfig->greenBits +
+ fbconfig->blueBits;
+
+ // macOS needs non-zero color size, so set reasonable values
+ if (colorBits == 0)
+ colorBits = 24;
+ else if (colorBits < 15)
+ colorBits = 15;
+
+ SET_ATTRIB(NSOpenGLPFAColorSize, colorBits);
+ }
+
+ if (fbconfig->alphaBits != GLFW_DONT_CARE)
+ SET_ATTRIB(NSOpenGLPFAAlphaSize, fbconfig->alphaBits);
+
+ if (fbconfig->depthBits != GLFW_DONT_CARE)
+ SET_ATTRIB(NSOpenGLPFADepthSize, fbconfig->depthBits);
+
+ if (fbconfig->stencilBits != GLFW_DONT_CARE)
+ SET_ATTRIB(NSOpenGLPFAStencilSize, fbconfig->stencilBits);
+
+ if (fbconfig->stereo)
+ {
+#if MAC_OS_X_VERSION_MAX_ALLOWED >= 101200
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "NSGL: Stereo rendering is deprecated");
+ return GLFW_FALSE;
+#else
+ ADD_ATTRIB(NSOpenGLPFAStereo);
+#endif
+ }
+
+ if (fbconfig->doublebuffer)
+ ADD_ATTRIB(NSOpenGLPFADoubleBuffer);
+
+ if (fbconfig->samples != GLFW_DONT_CARE)
+ {
+ if (fbconfig->samples == 0)
+ {
+ SET_ATTRIB(NSOpenGLPFASampleBuffers, 0);
+ }
+ else
+ {
+ SET_ATTRIB(NSOpenGLPFASampleBuffers, 1);
+ SET_ATTRIB(NSOpenGLPFASamples, fbconfig->samples);
+ }
+ }
+
+ // NOTE: All NSOpenGLPixelFormats on the relevant cards support sRGB
+ // framebuffer, so there's no need (and no way) to request it
+
+ ADD_ATTRIB(0);
+
+#undef ADD_ATTRIB
+#undef SET_ATTRIB
+
+ window->context.nsgl.pixelFormat =
+ [[NSOpenGLPixelFormat alloc] initWithAttributes:attribs];
+ if (window->context.nsgl.pixelFormat == nil)
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "NSGL: Failed to find a suitable pixel format");
+ return GLFW_FALSE;
+ }
+
+ NSOpenGLContext* share = nil;
+
+ if (ctxconfig->share)
+ share = ctxconfig->share->context.nsgl.object;
+
+ window->context.nsgl.object =
+ [[NSOpenGLContext alloc] initWithFormat:window->context.nsgl.pixelFormat
+ shareContext:share];
+ if (window->context.nsgl.object == nil)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "NSGL: Failed to create OpenGL context");
+ return GLFW_FALSE;
+ }
+
+ if (fbconfig->transparent)
+ {
+ GLint opaque = 0;
+ [window->context.nsgl.object setValues:&opaque
+ forParameter:NSOpenGLContextParameterSurfaceOpacity];
+ }
+
+ [window->ns.view setWantsBestResolutionOpenGLSurface:window->ns.retina];
+
+ [window->context.nsgl.object setView:window->ns.view];
+
+ window->context.makeCurrent = makeContextCurrentNSGL;
+ window->context.swapBuffers = swapBuffersNSGL;
+ window->context.swapInterval = swapIntervalNSGL;
+ window->context.extensionSupported = extensionSupportedNSGL;
+ window->context.getProcAddress = getProcAddressNSGL;
+ window->context.destroy = destroyContextNSGL;
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI id glfwGetNSGLContext(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(nil);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_COCOA)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "NSGL: Platform not initialized");
+ return nil;
+ }
+
+ if (window->context.source != GLFW_NATIVE_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return nil;
+ }
+
+ return window->context.nsgl.object;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_init.c b/chromium/third_party/dawn/third_party/glfw/src/null_init.c
new file mode 100644
index 00000000000..de4b28f350c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_init.c
@@ -0,0 +1,133 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016 Google Inc.
+// Copyright (c) 2016-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwConnectNull(int platformID, _GLFWplatform* platform)
+{
+ const _GLFWplatform null =
+ {
+ GLFW_PLATFORM_NULL,
+ _glfwInitNull,
+ _glfwTerminateNull,
+ _glfwGetCursorPosNull,
+ _glfwSetCursorPosNull,
+ _glfwSetCursorModeNull,
+ _glfwSetRawMouseMotionNull,
+ _glfwRawMouseMotionSupportedNull,
+ _glfwCreateCursorNull,
+ _glfwCreateStandardCursorNull,
+ _glfwDestroyCursorNull,
+ _glfwSetCursorNull,
+ _glfwGetScancodeNameNull,
+ _glfwGetKeyScancodeNull,
+ _glfwSetClipboardStringNull,
+ _glfwGetClipboardStringNull,
+ _glfwInitJoysticksNull,
+ _glfwTerminateJoysticksNull,
+ _glfwPollJoystickNull,
+ _glfwGetMappingNameNull,
+ _glfwUpdateGamepadGUIDNull,
+ _glfwFreeMonitorNull,
+ _glfwGetMonitorPosNull,
+ _glfwGetMonitorContentScaleNull,
+ _glfwGetMonitorWorkareaNull,
+ _glfwGetVideoModesNull,
+ _glfwGetVideoModeNull,
+ _glfwGetGammaRampNull,
+ _glfwSetGammaRampNull,
+ _glfwCreateWindowNull,
+ _glfwDestroyWindowNull,
+ _glfwSetWindowTitleNull,
+ _glfwSetWindowIconNull,
+ _glfwGetWindowPosNull,
+ _glfwSetWindowPosNull,
+ _glfwGetWindowSizeNull,
+ _glfwSetWindowSizeNull,
+ _glfwSetWindowSizeLimitsNull,
+ _glfwSetWindowAspectRatioNull,
+ _glfwGetFramebufferSizeNull,
+ _glfwGetWindowFrameSizeNull,
+ _glfwGetWindowContentScaleNull,
+ _glfwIconifyWindowNull,
+ _glfwRestoreWindowNull,
+ _glfwMaximizeWindowNull,
+ _glfwShowWindowNull,
+ _glfwHideWindowNull,
+ _glfwRequestWindowAttentionNull,
+ _glfwFocusWindowNull,
+ _glfwSetWindowMonitorNull,
+ _glfwWindowFocusedNull,
+ _glfwWindowIconifiedNull,
+ _glfwWindowVisibleNull,
+ _glfwWindowMaximizedNull,
+ _glfwWindowHoveredNull,
+ _glfwFramebufferTransparentNull,
+ _glfwGetWindowOpacityNull,
+ _glfwSetWindowResizableNull,
+ _glfwSetWindowDecoratedNull,
+ _glfwSetWindowFloatingNull,
+ _glfwSetWindowOpacityNull,
+ _glfwSetWindowMousePassthroughNull,
+ _glfwPollEventsNull,
+ _glfwWaitEventsNull,
+ _glfwWaitEventsTimeoutNull,
+ _glfwPostEmptyEventNull,
+ _glfwGetEGLPlatformNull,
+ _glfwGetEGLNativeDisplayNull,
+ _glfwGetEGLNativeWindowNull,
+ _glfwGetRequiredInstanceExtensionsNull,
+ _glfwGetPhysicalDevicePresentationSupportNull,
+ _glfwCreateWindowSurfaceNull,
+ };
+
+ *platform = null;
+ return GLFW_TRUE;
+}
+
+int _glfwInitNull(void)
+{
+ _glfwPollMonitorsNull();
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateNull(void)
+{
+ free(_glfw.null.clipboardString);
+ _glfwTerminateOSMesa();
+ _glfwTerminateEGL();
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_joystick.c b/chromium/third_party/dawn/third_party/glfw/src/null_joystick.c
new file mode 100644
index 00000000000..e2770dd809c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_joystick.c
@@ -0,0 +1,58 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitJoysticksNull(void)
+{
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateJoysticksNull(void)
+{
+}
+
+int _glfwPollJoystickNull(_GLFWjoystick* js, int mode)
+{
+ return GLFW_FALSE;
+}
+
+const char* _glfwGetMappingNameNull(void)
+{
+ return "";
+}
+
+void _glfwUpdateGamepadGUIDNull(char* guid)
+{
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_joystick.h b/chromium/third_party/dawn/third_party/glfw/src/null_joystick.h
new file mode 100644
index 00000000000..ec223ecd2c5
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_joystick.h
@@ -0,0 +1,32 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+GLFWbool _glfwInitJoysticksNull(void);
+void _glfwTerminateJoysticksNull(void);
+int _glfwPollJoystickNull(_GLFWjoystick* js, int mode);
+const char* _glfwGetMappingNameNull(void);
+void _glfwUpdateGamepadGUIDNull(char* guid);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_monitor.c b/chromium/third_party/dawn/third_party/glfw/src/null_monitor.c
new file mode 100644
index 00000000000..63a1cd20580
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_monitor.c
@@ -0,0 +1,161 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016 Google Inc.
+// Copyright (c) 2016-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+// The the sole (fake) video mode of our (sole) fake monitor
+//
+static GLFWvidmode getVideoMode(void)
+{
+ GLFWvidmode mode;
+ mode.width = 1920;
+ mode.height = 1080;
+ mode.redBits = 8;
+ mode.greenBits = 8;
+ mode.blueBits = 8;
+ mode.refreshRate = 60;
+ return mode;
+}
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwPollMonitorsNull(void)
+{
+ const float dpi = 141.f;
+ const GLFWvidmode mode = getVideoMode();
+ _GLFWmonitor* monitor = _glfwAllocMonitor("Null SuperNoop 0",
+ (int) (mode.width * 25.4f / dpi),
+ (int) (mode.height * 25.4f / dpi));
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, _GLFW_INSERT_FIRST);
+}
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwFreeMonitorNull(_GLFWmonitor* monitor)
+{
+ _glfwFreeGammaArrays(&monitor->null.ramp);
+}
+
+void _glfwGetMonitorPosNull(_GLFWmonitor* monitor, int* xpos, int* ypos)
+{
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 0;
+}
+
+void _glfwGetMonitorContentScaleNull(_GLFWmonitor* monitor,
+ float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = 1.f;
+ if (yscale)
+ *yscale = 1.f;
+}
+
+void _glfwGetMonitorWorkareaNull(_GLFWmonitor* monitor,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ const GLFWvidmode mode = getVideoMode();
+
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 10;
+ if (width)
+ *width = mode.width;
+ if (height)
+ *height = mode.height - 10;
+}
+
+GLFWvidmode* _glfwGetVideoModesNull(_GLFWmonitor* monitor, int* found)
+{
+ GLFWvidmode* mode = _glfw_calloc(1, sizeof(GLFWvidmode));
+ *mode = getVideoMode();
+ *found = 1;
+ return mode;
+}
+
+void _glfwGetVideoModeNull(_GLFWmonitor* monitor, GLFWvidmode* mode)
+{
+ *mode = getVideoMode();
+}
+
+GLFWbool _glfwGetGammaRampNull(_GLFWmonitor* monitor, GLFWgammaramp* ramp)
+{
+ if (!monitor->null.ramp.size)
+ {
+ unsigned int i;
+
+ _glfwAllocGammaArrays(&monitor->null.ramp, 256);
+
+ for (i = 0; i < monitor->null.ramp.size; i++)
+ {
+ const float gamma = 2.2f;
+ float value;
+ value = i / (float) (monitor->null.ramp.size - 1);
+ value = powf(value, 1.f / gamma) * 65535.f + 0.5f;
+ value = _glfw_fminf(value, 65535.f);
+
+ monitor->null.ramp.red[i] = (unsigned short) value;
+ monitor->null.ramp.green[i] = (unsigned short) value;
+ monitor->null.ramp.blue[i] = (unsigned short) value;
+ }
+ }
+
+ _glfwAllocGammaArrays(ramp, monitor->null.ramp.size);
+ memcpy(ramp->red, monitor->null.ramp.red, sizeof(short) * ramp->size);
+ memcpy(ramp->green, monitor->null.ramp.green, sizeof(short) * ramp->size);
+ memcpy(ramp->blue, monitor->null.ramp.blue, sizeof(short) * ramp->size);
+ return GLFW_TRUE;
+}
+
+void _glfwSetGammaRampNull(_GLFWmonitor* monitor, const GLFWgammaramp* ramp)
+{
+ if (monitor->null.ramp.size != ramp->size)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Null: Gamma ramp size must match current ramp size");
+ return;
+ }
+
+ memcpy(monitor->null.ramp.red, ramp->red, sizeof(short) * ramp->size);
+ memcpy(monitor->null.ramp.green, ramp->green, sizeof(short) * ramp->size);
+ memcpy(monitor->null.ramp.blue, ramp->blue, sizeof(short) * ramp->size);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_platform.h b/chromium/third_party/dawn/third_party/glfw/src/null_platform.h
new file mode 100644
index 00000000000..fca7c11f782
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_platform.h
@@ -0,0 +1,149 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016 Google Inc.
+// Copyright (c) 2016-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLFW_NULL_WINDOW_STATE _GLFWwindowNull null;
+#define GLFW_NULL_LIBRARY_WINDOW_STATE _GLFWlibraryNull null;
+#define GLFW_NULL_MONITOR_STATE _GLFWmonitorNull null;
+
+#define GLFW_NULL_CONTEXT_STATE
+#define GLFW_NULL_CURSOR_STATE
+#define GLFW_NULL_LIBRARY_CONTEXT_STATE
+
+
+// Null-specific per-window data
+//
+typedef struct _GLFWwindowNull
+{
+ int xpos;
+ int ypos;
+ int width;
+ int height;
+ char* title;
+ GLFWbool visible;
+ GLFWbool iconified;
+ GLFWbool maximized;
+ GLFWbool resizable;
+ GLFWbool decorated;
+ GLFWbool floating;
+ GLFWbool transparent;
+ float opacity;
+} _GLFWwindowNull;
+
+// Null-specific per-monitor data
+//
+typedef struct _GLFWmonitorNull
+{
+ GLFWgammaramp ramp;
+} _GLFWmonitorNull;
+
+// Null-specific global data
+//
+typedef struct _GLFWlibraryNull
+{
+ int xcursor;
+ int ycursor;
+ char* clipboardString;
+ _GLFWwindow* focusedWindow;
+} _GLFWlibraryNull;
+
+void _glfwPollMonitorsNull(void);
+
+GLFWbool _glfwConnectNull(int platformID, _GLFWplatform* platform);
+int _glfwInitNull(void);
+void _glfwTerminateNull(void);
+
+void _glfwFreeMonitorNull(_GLFWmonitor* monitor);
+void _glfwGetMonitorPosNull(_GLFWmonitor* monitor, int* xpos, int* ypos);
+void _glfwGetMonitorContentScaleNull(_GLFWmonitor* monitor, float* xscale, float* yscale);
+void _glfwGetMonitorWorkareaNull(_GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+GLFWvidmode* _glfwGetVideoModesNull(_GLFWmonitor* monitor, int* found);
+void _glfwGetVideoModeNull(_GLFWmonitor* monitor, GLFWvidmode* mode);
+GLFWbool _glfwGetGammaRampNull(_GLFWmonitor* monitor, GLFWgammaramp* ramp);
+void _glfwSetGammaRampNull(_GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+int _glfwCreateWindowNull(_GLFWwindow* window, const _GLFWwndconfig* wndconfig, const _GLFWctxconfig* ctxconfig, const _GLFWfbconfig* fbconfig);
+void _glfwDestroyWindowNull(_GLFWwindow* window);
+void _glfwSetWindowTitleNull(_GLFWwindow* window, const char* title);
+void _glfwSetWindowIconNull(_GLFWwindow* window, int count, const GLFWimage* images);
+void _glfwSetWindowMonitorNull(_GLFWwindow* window, _GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+void _glfwGetWindowPosNull(_GLFWwindow* window, int* xpos, int* ypos);
+void _glfwSetWindowPosNull(_GLFWwindow* window, int xpos, int ypos);
+void _glfwGetWindowSizeNull(_GLFWwindow* window, int* width, int* height);
+void _glfwSetWindowSizeNull(_GLFWwindow* window, int width, int height);
+void _glfwSetWindowSizeLimitsNull(_GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+void _glfwSetWindowAspectRatioNull(_GLFWwindow* window, int n, int d);
+void _glfwGetFramebufferSizeNull(_GLFWwindow* window, int* width, int* height);
+void _glfwGetWindowFrameSizeNull(_GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+void _glfwGetWindowContentScaleNull(_GLFWwindow* window, float* xscale, float* yscale);
+void _glfwIconifyWindowNull(_GLFWwindow* window);
+void _glfwRestoreWindowNull(_GLFWwindow* window);
+void _glfwMaximizeWindowNull(_GLFWwindow* window);
+int _glfwWindowMaximizedNull(_GLFWwindow* window);
+int _glfwWindowHoveredNull(_GLFWwindow* window);
+int _glfwFramebufferTransparentNull(_GLFWwindow* window);
+void _glfwSetWindowResizableNull(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowDecoratedNull(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowFloatingNull(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowMousePassthroughNull(_GLFWwindow* window, GLFWbool enabled);
+float _glfwGetWindowOpacityNull(_GLFWwindow* window);
+void _glfwSetWindowOpacityNull(_GLFWwindow* window, float opacity);
+void _glfwSetRawMouseMotionNull(_GLFWwindow *window, GLFWbool enabled);
+GLFWbool _glfwRawMouseMotionSupportedNull(void);
+void _glfwShowWindowNull(_GLFWwindow* window);
+void _glfwRequestWindowAttentionNull(_GLFWwindow* window);
+void _glfwRequestWindowAttentionNull(_GLFWwindow* window);
+void _glfwHideWindowNull(_GLFWwindow* window);
+void _glfwFocusWindowNull(_GLFWwindow* window);
+int _glfwWindowFocusedNull(_GLFWwindow* window);
+int _glfwWindowIconifiedNull(_GLFWwindow* window);
+int _glfwWindowVisibleNull(_GLFWwindow* window);
+void _glfwPollEventsNull(void);
+void _glfwWaitEventsNull(void);
+void _glfwWaitEventsTimeoutNull(double timeout);
+void _glfwPostEmptyEventNull(void);
+void _glfwGetCursorPosNull(_GLFWwindow* window, double* xpos, double* ypos);
+void _glfwSetCursorPosNull(_GLFWwindow* window, double x, double y);
+void _glfwSetCursorModeNull(_GLFWwindow* window, int mode);
+int _glfwCreateCursorNull(_GLFWcursor* cursor, const GLFWimage* image, int xhot, int yhot);
+int _glfwCreateStandardCursorNull(_GLFWcursor* cursor, int shape);
+void _glfwDestroyCursorNull(_GLFWcursor* cursor);
+void _glfwSetCursorNull(_GLFWwindow* window, _GLFWcursor* cursor);
+void _glfwSetClipboardStringNull(const char* string);
+const char* _glfwGetClipboardStringNull(void);
+const char* _glfwGetScancodeNameNull(int scancode);
+int _glfwGetKeyScancodeNull(int key);
+
+EGLenum _glfwGetEGLPlatformNull(EGLint** attribs);
+EGLNativeDisplayType _glfwGetEGLNativeDisplayNull(void);
+EGLNativeWindowType _glfwGetEGLNativeWindowNull(_GLFWwindow* window);
+
+void _glfwGetRequiredInstanceExtensionsNull(char** extensions);
+int _glfwGetPhysicalDevicePresentationSupportNull(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+VkResult _glfwCreateWindowSurfaceNull(VkInstance instance, _GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+void _glfwPollMonitorsNull(void);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/null_window.c b/chromium/third_party/dawn/third_party/glfw/src/null_window.c
new file mode 100644
index 00000000000..b40110b8979
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/null_window.c
@@ -0,0 +1,711 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016 Google Inc.
+// Copyright (c) 2016-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+
+static void applySizeLimits(_GLFWwindow* window, int* width, int* height)
+{
+ if (window->numer != GLFW_DONT_CARE && window->denom != GLFW_DONT_CARE)
+ {
+ const float ratio = (float) window->numer / (float) window->denom;
+ *height = (int) (*width / ratio);
+ }
+
+ if (window->minwidth != GLFW_DONT_CARE)
+ *width = _glfw_max(*width, window->minwidth);
+ else if (window->maxwidth != GLFW_DONT_CARE)
+ *width = _glfw_min(*width, window->maxwidth);
+
+ if (window->minheight != GLFW_DONT_CARE)
+ *height = _glfw_min(*height, window->minheight);
+ else if (window->maxheight != GLFW_DONT_CARE)
+ *height = _glfw_max(*height, window->maxheight);
+}
+
+static void fitToMonitor(_GLFWwindow* window)
+{
+ GLFWvidmode mode;
+ _glfwGetVideoModeNull(window->monitor, &mode);
+ _glfwGetMonitorPosNull(window->monitor,
+ &window->null.xpos,
+ &window->null.ypos);
+ window->null.width = mode.width;
+ window->null.height = mode.height;
+}
+
+static void acquireMonitor(_GLFWwindow* window)
+{
+ _glfwInputMonitorWindow(window->monitor, window);
+}
+
+static void releaseMonitor(_GLFWwindow* window)
+{
+ if (window->monitor->window != window)
+ return;
+
+ _glfwInputMonitorWindow(window->monitor, NULL);
+}
+
+static int createNativeWindow(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ if (window->monitor)
+ fitToMonitor(window);
+ else
+ {
+ window->null.xpos = 17;
+ window->null.ypos = 17;
+ window->null.width = wndconfig->width;
+ window->null.height = wndconfig->height;
+ }
+
+ window->null.visible = wndconfig->visible;
+ window->null.decorated = wndconfig->decorated;
+ window->null.maximized = wndconfig->maximized;
+ window->null.floating = wndconfig->floating;
+ window->null.transparent = fbconfig->transparent;
+ window->null.opacity = 1.f;
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+int _glfwCreateWindowNull(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ if (!createNativeWindow(window, wndconfig, fbconfig))
+ return GLFW_FALSE;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_NATIVE_CONTEXT_API ||
+ ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwInitOSMesa())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextOSMesa(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_EGL_CONTEXT_API)
+ {
+ if (!_glfwInitEGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextEGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwRefreshContextAttribs(window, ctxconfig))
+ return GLFW_FALSE;
+ }
+
+ if (wndconfig->mousePassthrough)
+ _glfwSetWindowMousePassthroughNull(window, GLFW_TRUE);
+
+ if (window->monitor)
+ {
+ _glfwShowWindowNull(window);
+ _glfwFocusWindowNull(window);
+ acquireMonitor(window);
+
+ if (wndconfig->centerCursor)
+ _glfwCenterCursorInContentArea(window);
+ }
+ else
+ {
+ if (wndconfig->visible)
+ {
+ _glfwShowWindowNull(window);
+ if (wndconfig->focused)
+ _glfwFocusWindowNull(window);
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyWindowNull(_GLFWwindow* window)
+{
+ if (window->monitor)
+ releaseMonitor(window);
+
+ if (_glfw.null.focusedWindow == window)
+ _glfw.null.focusedWindow = NULL;
+
+ if (window->context.destroy)
+ window->context.destroy(window);
+}
+
+void _glfwSetWindowTitleNull(_GLFWwindow* window, const char* title)
+{
+}
+
+void _glfwSetWindowIconNull(_GLFWwindow* window, int count, const GLFWimage* images)
+{
+}
+
+void _glfwSetWindowMonitorNull(_GLFWwindow* window,
+ _GLFWmonitor* monitor,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ if (window->monitor == monitor)
+ {
+ if (!monitor)
+ {
+ _glfwSetWindowPosNull(window, xpos, ypos);
+ _glfwSetWindowSizeNull(window, width, height);
+ }
+
+ return;
+ }
+
+ if (window->monitor)
+ releaseMonitor(window);
+
+ _glfwInputWindowMonitor(window, monitor);
+
+ if (window->monitor)
+ {
+ window->null.visible = GLFW_TRUE;
+ acquireMonitor(window);
+ fitToMonitor(window);
+ }
+ else
+ {
+ _glfwSetWindowPosNull(window, xpos, ypos);
+ _glfwSetWindowSizeNull(window, width, height);
+ }
+}
+
+void _glfwGetWindowPosNull(_GLFWwindow* window, int* xpos, int* ypos)
+{
+ if (xpos)
+ *xpos = window->null.xpos;
+ if (ypos)
+ *ypos = window->null.ypos;
+}
+
+void _glfwSetWindowPosNull(_GLFWwindow* window, int xpos, int ypos)
+{
+ if (window->monitor)
+ return;
+
+ if (window->null.xpos != xpos || window->null.ypos != ypos)
+ {
+ window->null.xpos = xpos;
+ window->null.ypos = ypos;
+ _glfwInputWindowPos(window, xpos, ypos);
+ }
+}
+
+void _glfwGetWindowSizeNull(_GLFWwindow* window, int* width, int* height)
+{
+ if (width)
+ *width = window->null.width;
+ if (height)
+ *height = window->null.height;
+}
+
+void _glfwSetWindowSizeNull(_GLFWwindow* window, int width, int height)
+{
+ if (window->monitor)
+ return;
+
+ if (window->null.width != width || window->null.height != height)
+ {
+ window->null.width = width;
+ window->null.height = height;
+ _glfwInputWindowSize(window, width, height);
+ _glfwInputFramebufferSize(window, width, height);
+ }
+}
+
+void _glfwSetWindowSizeLimitsNull(_GLFWwindow* window,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ int width = window->null.width;
+ int height = window->null.height;
+ applySizeLimits(window, &width, &height);
+ _glfwSetWindowSizeNull(window, width, height);
+}
+
+void _glfwSetWindowAspectRatioNull(_GLFWwindow* window, int n, int d)
+{
+ int width = window->null.width;
+ int height = window->null.height;
+ applySizeLimits(window, &width, &height);
+ _glfwSetWindowSizeNull(window, width, height);
+}
+
+void _glfwGetFramebufferSizeNull(_GLFWwindow* window, int* width, int* height)
+{
+ if (width)
+ *width = window->null.width;
+ if (height)
+ *height = window->null.height;
+}
+
+void _glfwGetWindowFrameSizeNull(_GLFWwindow* window,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ if (window->null.decorated && !window->monitor)
+ {
+ if (left)
+ *left = 1;
+ if (top)
+ *top = 10;
+ if (right)
+ *right = 1;
+ if (bottom)
+ *bottom = 1;
+ }
+ else
+ {
+ if (left)
+ *left = 0;
+ if (top)
+ *top = 0;
+ if (right)
+ *right = 0;
+ if (bottom)
+ *bottom = 0;
+ }
+}
+
+void _glfwGetWindowContentScaleNull(_GLFWwindow* window, float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = 1.f;
+ if (yscale)
+ *yscale = 1.f;
+}
+
+void _glfwIconifyWindowNull(_GLFWwindow* window)
+{
+ if (_glfw.null.focusedWindow == window)
+ {
+ _glfw.null.focusedWindow = NULL;
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+ }
+
+ if (!window->null.iconified)
+ {
+ window->null.iconified = GLFW_TRUE;
+ _glfwInputWindowIconify(window, GLFW_TRUE);
+
+ if (window->monitor)
+ releaseMonitor(window);
+ }
+}
+
+void _glfwRestoreWindowNull(_GLFWwindow* window)
+{
+ if (window->null.iconified)
+ {
+ window->null.iconified = GLFW_FALSE;
+ _glfwInputWindowIconify(window, GLFW_FALSE);
+
+ if (window->monitor)
+ acquireMonitor(window);
+ }
+ else if (window->null.maximized)
+ {
+ window->null.maximized = GLFW_FALSE;
+ _glfwInputWindowMaximize(window, GLFW_FALSE);
+ }
+}
+
+void _glfwMaximizeWindowNull(_GLFWwindow* window)
+{
+ if (!window->null.maximized)
+ {
+ window->null.maximized = GLFW_TRUE;
+ _glfwInputWindowMaximize(window, GLFW_TRUE);
+ }
+}
+
+int _glfwWindowMaximizedNull(_GLFWwindow* window)
+{
+ return window->null.maximized;
+}
+
+int _glfwWindowHoveredNull(_GLFWwindow* window)
+{
+ return _glfw.null.xcursor >= window->null.xpos &&
+ _glfw.null.ycursor >= window->null.ypos &&
+ _glfw.null.xcursor <= window->null.xpos + window->null.width - 1 &&
+ _glfw.null.ycursor <= window->null.ypos + window->null.height - 1;
+}
+
+int _glfwFramebufferTransparentNull(_GLFWwindow* window)
+{
+ return window->null.transparent;
+}
+
+void _glfwSetWindowResizableNull(_GLFWwindow* window, GLFWbool enabled)
+{
+ window->null.resizable = enabled;
+}
+
+void _glfwSetWindowDecoratedNull(_GLFWwindow* window, GLFWbool enabled)
+{
+ window->null.decorated = enabled;
+}
+
+void _glfwSetWindowFloatingNull(_GLFWwindow* window, GLFWbool enabled)
+{
+ window->null.floating = enabled;
+}
+
+void _glfwSetWindowMousePassthroughNull(_GLFWwindow* window, GLFWbool enabled)
+{
+}
+
+float _glfwGetWindowOpacityNull(_GLFWwindow* window)
+{
+ return window->null.opacity;
+}
+
+void _glfwSetWindowOpacityNull(_GLFWwindow* window, float opacity)
+{
+ window->null.opacity = opacity;
+}
+
+void _glfwSetRawMouseMotionNull(_GLFWwindow *window, GLFWbool enabled)
+{
+}
+
+GLFWbool _glfwRawMouseMotionSupportedNull(void)
+{
+ return GLFW_TRUE;
+}
+
+void _glfwShowWindowNull(_GLFWwindow* window)
+{
+ window->null.visible = GLFW_TRUE;
+}
+
+void _glfwRequestWindowAttentionNull(_GLFWwindow* window)
+{
+}
+
+void _glfwHideWindowNull(_GLFWwindow* window)
+{
+ if (_glfw.null.focusedWindow == window)
+ {
+ _glfw.null.focusedWindow = NULL;
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+ }
+
+ window->null.visible = GLFW_FALSE;
+}
+
+void _glfwFocusWindowNull(_GLFWwindow* window)
+{
+ _GLFWwindow* previous;
+
+ if (_glfw.null.focusedWindow == window)
+ return;
+
+ if (!window->null.visible)
+ return;
+
+ previous = _glfw.null.focusedWindow;
+ _glfw.null.focusedWindow = window;
+
+ if (previous)
+ {
+ _glfwInputWindowFocus(previous, GLFW_FALSE);
+ if (previous->monitor && previous->autoIconify)
+ _glfwIconifyWindowNull(previous);
+ }
+
+ _glfwInputWindowFocus(window, GLFW_TRUE);
+}
+
+int _glfwWindowFocusedNull(_GLFWwindow* window)
+{
+ return _glfw.null.focusedWindow == window;
+}
+
+int _glfwWindowIconifiedNull(_GLFWwindow* window)
+{
+ return window->null.iconified;
+}
+
+int _glfwWindowVisibleNull(_GLFWwindow* window)
+{
+ return window->null.visible;
+}
+
+void _glfwPollEventsNull(void)
+{
+}
+
+void _glfwWaitEventsNull(void)
+{
+}
+
+void _glfwWaitEventsTimeoutNull(double timeout)
+{
+}
+
+void _glfwPostEmptyEventNull(void)
+{
+}
+
+void _glfwGetCursorPosNull(_GLFWwindow* window, double* xpos, double* ypos)
+{
+ if (xpos)
+ *xpos = _glfw.null.xcursor - window->null.xpos;
+ if (ypos)
+ *ypos = _glfw.null.ycursor - window->null.ypos;
+}
+
+void _glfwSetCursorPosNull(_GLFWwindow* window, double x, double y)
+{
+ _glfw.null.xcursor = window->null.xpos + (int) x;
+ _glfw.null.ycursor = window->null.ypos + (int) y;
+}
+
+void _glfwSetCursorModeNull(_GLFWwindow* window, int mode)
+{
+}
+
+int _glfwCreateCursorNull(_GLFWcursor* cursor,
+ const GLFWimage* image,
+ int xhot, int yhot)
+{
+ return GLFW_TRUE;
+}
+
+int _glfwCreateStandardCursorNull(_GLFWcursor* cursor, int shape)
+{
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyCursorNull(_GLFWcursor* cursor)
+{
+}
+
+void _glfwSetCursorNull(_GLFWwindow* window, _GLFWcursor* cursor)
+{
+}
+
+void _glfwSetClipboardStringNull(const char* string)
+{
+ char* copy = _glfw_strdup(string);
+ _glfw_free(_glfw.null.clipboardString);
+ _glfw.null.clipboardString = copy;
+}
+
+const char* _glfwGetClipboardStringNull(void)
+{
+ return _glfw.null.clipboardString;
+}
+
+EGLenum _glfwGetEGLPlatformNull(EGLint** attribs)
+{
+ return 0;
+}
+
+EGLNativeDisplayType _glfwGetEGLNativeDisplayNull(void)
+{
+ return 0;
+}
+
+EGLNativeWindowType _glfwGetEGLNativeWindowNull(_GLFWwindow* window)
+{
+ return 0;
+}
+
+const char* _glfwGetScancodeNameNull(int scancode)
+{
+ if (scancode < GLFW_KEY_SPACE || scancode > GLFW_KEY_LAST)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid scancode %i", scancode);
+ return NULL;
+ }
+
+ switch (scancode)
+ {
+ case GLFW_KEY_APOSTROPHE:
+ return "'";
+ case GLFW_KEY_COMMA:
+ return ",";
+ case GLFW_KEY_MINUS:
+ case GLFW_KEY_KP_SUBTRACT:
+ return "-";
+ case GLFW_KEY_PERIOD:
+ case GLFW_KEY_KP_DECIMAL:
+ return ".";
+ case GLFW_KEY_SLASH:
+ case GLFW_KEY_KP_DIVIDE:
+ return "/";
+ case GLFW_KEY_SEMICOLON:
+ return ";";
+ case GLFW_KEY_EQUAL:
+ case GLFW_KEY_KP_EQUAL:
+ return "=";
+ case GLFW_KEY_LEFT_BRACKET:
+ return "[";
+ case GLFW_KEY_RIGHT_BRACKET:
+ return "]";
+ case GLFW_KEY_KP_MULTIPLY:
+ return "*";
+ case GLFW_KEY_KP_ADD:
+ return "+";
+ case GLFW_KEY_BACKSLASH:
+ case GLFW_KEY_WORLD_1:
+ case GLFW_KEY_WORLD_2:
+ return "\\";
+ case GLFW_KEY_0:
+ case GLFW_KEY_KP_0:
+ return "0";
+ case GLFW_KEY_1:
+ case GLFW_KEY_KP_1:
+ return "1";
+ case GLFW_KEY_2:
+ case GLFW_KEY_KP_2:
+ return "2";
+ case GLFW_KEY_3:
+ case GLFW_KEY_KP_3:
+ return "3";
+ case GLFW_KEY_4:
+ case GLFW_KEY_KP_4:
+ return "4";
+ case GLFW_KEY_5:
+ case GLFW_KEY_KP_5:
+ return "5";
+ case GLFW_KEY_6:
+ case GLFW_KEY_KP_6:
+ return "6";
+ case GLFW_KEY_7:
+ case GLFW_KEY_KP_7:
+ return "7";
+ case GLFW_KEY_8:
+ case GLFW_KEY_KP_8:
+ return "8";
+ case GLFW_KEY_9:
+ case GLFW_KEY_KP_9:
+ return "9";
+ case GLFW_KEY_A:
+ return "a";
+ case GLFW_KEY_B:
+ return "b";
+ case GLFW_KEY_C:
+ return "c";
+ case GLFW_KEY_D:
+ return "d";
+ case GLFW_KEY_E:
+ return "e";
+ case GLFW_KEY_F:
+ return "f";
+ case GLFW_KEY_G:
+ return "g";
+ case GLFW_KEY_H:
+ return "h";
+ case GLFW_KEY_I:
+ return "i";
+ case GLFW_KEY_J:
+ return "j";
+ case GLFW_KEY_K:
+ return "k";
+ case GLFW_KEY_L:
+ return "l";
+ case GLFW_KEY_M:
+ return "m";
+ case GLFW_KEY_N:
+ return "n";
+ case GLFW_KEY_O:
+ return "o";
+ case GLFW_KEY_P:
+ return "p";
+ case GLFW_KEY_Q:
+ return "q";
+ case GLFW_KEY_R:
+ return "r";
+ case GLFW_KEY_S:
+ return "s";
+ case GLFW_KEY_T:
+ return "t";
+ case GLFW_KEY_U:
+ return "u";
+ case GLFW_KEY_V:
+ return "v";
+ case GLFW_KEY_W:
+ return "w";
+ case GLFW_KEY_X:
+ return "x";
+ case GLFW_KEY_Y:
+ return "y";
+ case GLFW_KEY_Z:
+ return "z";
+ }
+
+ return NULL;
+}
+
+int _glfwGetKeyScancodeNull(int key)
+{
+ return key;
+}
+
+void _glfwGetRequiredInstanceExtensionsNull(char** extensions)
+{
+}
+
+int _glfwGetPhysicalDevicePresentationSupportNull(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ return GLFW_FALSE;
+}
+
+VkResult _glfwCreateWindowSurfaceNull(VkInstance instance,
+ _GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ // This seems like the most appropriate error to return here
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/osmesa_context.c b/chromium/third_party/dawn/third_party/glfw/src/osmesa_context.c
new file mode 100644
index 00000000000..38adabbc0ac
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/osmesa_context.c
@@ -0,0 +1,386 @@
+//========================================================================
+// GLFW 3.4 OSMesa - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2016 Google Inc.
+// Copyright (c) 2016-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "internal.h"
+
+
+static void makeContextCurrentOSMesa(_GLFWwindow* window)
+{
+ if (window)
+ {
+ int width, height;
+ _glfw.platform.getFramebufferSize(window, &width, &height);
+
+ // Check to see if we need to allocate a new buffer
+ if ((window->context.osmesa.buffer == NULL) ||
+ (width != window->context.osmesa.width) ||
+ (height != window->context.osmesa.height))
+ {
+ _glfw_free(window->context.osmesa.buffer);
+
+ // Allocate the new buffer (width * height * 8-bit RGBA)
+ window->context.osmesa.buffer = _glfw_calloc(4, (size_t) width * height);
+ window->context.osmesa.width = width;
+ window->context.osmesa.height = height;
+ }
+
+ if (!OSMesaMakeCurrent(window->context.osmesa.handle,
+ window->context.osmesa.buffer,
+ GL_UNSIGNED_BYTE,
+ width, height))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OSMesa: Failed to make context current");
+ return;
+ }
+ }
+
+ _glfwPlatformSetTls(&_glfw.contextSlot, window);
+}
+
+static GLFWglproc getProcAddressOSMesa(const char* procname)
+{
+ return (GLFWglproc) OSMesaGetProcAddress(procname);
+}
+
+static void destroyContextOSMesa(_GLFWwindow* window)
+{
+ if (window->context.osmesa.handle)
+ {
+ OSMesaDestroyContext(window->context.osmesa.handle);
+ window->context.osmesa.handle = NULL;
+ }
+
+ if (window->context.osmesa.buffer)
+ {
+ _glfw_free(window->context.osmesa.buffer);
+ window->context.osmesa.width = 0;
+ window->context.osmesa.height = 0;
+ }
+}
+
+static void swapBuffersOSMesa(_GLFWwindow* window)
+{
+ // No double buffering on OSMesa
+}
+
+static void swapIntervalOSMesa(int interval)
+{
+ // No swap interval on OSMesa
+}
+
+static int extensionSupportedOSMesa(const char* extension)
+{
+ // OSMesa does not have extensions
+ return GLFW_FALSE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitOSMesa(void)
+{
+ int i;
+ const char* sonames[] =
+ {
+#if defined(_GLFW_OSMESA_LIBRARY)
+ _GLFW_OSMESA_LIBRARY,
+#elif defined(_WIN32)
+ "libOSMesa.dll",
+ "OSMesa.dll",
+#elif defined(__APPLE__)
+ "libOSMesa.8.dylib",
+#elif defined(__CYGWIN__)
+ "libOSMesa-8.so",
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ "libOSMesa.so",
+#else
+ "libOSMesa.so.8",
+ "libOSMesa.so.6",
+#endif
+ NULL
+ };
+
+ if (_glfw.osmesa.handle)
+ return GLFW_TRUE;
+
+ for (i = 0; sonames[i]; i++)
+ {
+ _glfw.osmesa.handle = _glfwPlatformLoadModule(sonames[i]);
+ if (_glfw.osmesa.handle)
+ break;
+ }
+
+ if (!_glfw.osmesa.handle)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE, "OSMesa: Library not found");
+ return GLFW_FALSE;
+ }
+
+ _glfw.osmesa.CreateContextExt = (PFN_OSMesaCreateContextExt)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaCreateContextExt");
+ _glfw.osmesa.CreateContextAttribs = (PFN_OSMesaCreateContextAttribs)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaCreateContextAttribs");
+ _glfw.osmesa.DestroyContext = (PFN_OSMesaDestroyContext)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaDestroyContext");
+ _glfw.osmesa.MakeCurrent = (PFN_OSMesaMakeCurrent)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaMakeCurrent");
+ _glfw.osmesa.GetColorBuffer = (PFN_OSMesaGetColorBuffer)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaGetColorBuffer");
+ _glfw.osmesa.GetDepthBuffer = (PFN_OSMesaGetDepthBuffer)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaGetDepthBuffer");
+ _glfw.osmesa.GetProcAddress = (PFN_OSMesaGetProcAddress)
+ _glfwPlatformGetModuleSymbol(_glfw.osmesa.handle, "OSMesaGetProcAddress");
+
+ if (!_glfw.osmesa.CreateContextExt ||
+ !_glfw.osmesa.DestroyContext ||
+ !_glfw.osmesa.MakeCurrent ||
+ !_glfw.osmesa.GetColorBuffer ||
+ !_glfw.osmesa.GetDepthBuffer ||
+ !_glfw.osmesa.GetProcAddress)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OSMesa: Failed to load required entry points");
+
+ _glfwTerminateOSMesa();
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateOSMesa(void)
+{
+ if (_glfw.osmesa.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.osmesa.handle);
+ _glfw.osmesa.handle = NULL;
+ }
+}
+
+#define SET_ATTRIB(a, v) \
+{ \
+ assert(((size_t) index + 1) < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[index++] = a; \
+ attribs[index++] = v; \
+}
+
+GLFWbool _glfwCreateContextOSMesa(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ OSMesaContext share = NULL;
+ const int accumBits = fbconfig->accumRedBits +
+ fbconfig->accumGreenBits +
+ fbconfig->accumBlueBits +
+ fbconfig->accumAlphaBits;
+
+ if (ctxconfig->client == GLFW_OPENGL_ES_API)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "OSMesa: OpenGL ES is not available on OSMesa");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->share)
+ share = ctxconfig->share->context.osmesa.handle;
+
+ if (OSMesaCreateContextAttribs)
+ {
+ int index = 0, attribs[40];
+
+ SET_ATTRIB(OSMESA_FORMAT, OSMESA_RGBA);
+ SET_ATTRIB(OSMESA_DEPTH_BITS, fbconfig->depthBits);
+ SET_ATTRIB(OSMESA_STENCIL_BITS, fbconfig->stencilBits);
+ SET_ATTRIB(OSMESA_ACCUM_BITS, accumBits);
+
+ if (ctxconfig->profile == GLFW_OPENGL_CORE_PROFILE)
+ {
+ SET_ATTRIB(OSMESA_PROFILE, OSMESA_CORE_PROFILE);
+ }
+ else if (ctxconfig->profile == GLFW_OPENGL_COMPAT_PROFILE)
+ {
+ SET_ATTRIB(OSMESA_PROFILE, OSMESA_COMPAT_PROFILE);
+ }
+
+ if (ctxconfig->major != 1 || ctxconfig->minor != 0)
+ {
+ SET_ATTRIB(OSMESA_CONTEXT_MAJOR_VERSION, ctxconfig->major);
+ SET_ATTRIB(OSMESA_CONTEXT_MINOR_VERSION, ctxconfig->minor);
+ }
+
+ if (ctxconfig->forward)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "OSMesa: Forward-compatible contexts not supported");
+ return GLFW_FALSE;
+ }
+
+ SET_ATTRIB(0, 0);
+
+ window->context.osmesa.handle =
+ OSMesaCreateContextAttribs(attribs, share);
+ }
+ else
+ {
+ if (ctxconfig->profile)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "OSMesa: OpenGL profiles unavailable");
+ return GLFW_FALSE;
+ }
+
+ window->context.osmesa.handle =
+ OSMesaCreateContextExt(OSMESA_RGBA,
+ fbconfig->depthBits,
+ fbconfig->stencilBits,
+ accumBits,
+ share);
+ }
+
+ if (window->context.osmesa.handle == NULL)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "OSMesa: Failed to create context");
+ return GLFW_FALSE;
+ }
+
+ window->context.makeCurrent = makeContextCurrentOSMesa;
+ window->context.swapBuffers = swapBuffersOSMesa;
+ window->context.swapInterval = swapIntervalOSMesa;
+ window->context.extensionSupported = extensionSupportedOSMesa;
+ window->context.getProcAddress = getProcAddressOSMesa;
+ window->context.destroy = destroyContextOSMesa;
+
+ return GLFW_TRUE;
+}
+
+#undef SET_ATTRIB
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI int glfwGetOSMesaColorBuffer(GLFWwindow* handle, int* width,
+ int* height, int* format, void** buffer)
+{
+ void* mesaBuffer;
+ GLint mesaWidth, mesaHeight, mesaFormat;
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (window->context.source != GLFW_OSMESA_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return GLFW_FALSE;
+ }
+
+ if (!OSMesaGetColorBuffer(window->context.osmesa.handle,
+ &mesaWidth, &mesaHeight,
+ &mesaFormat, &mesaBuffer))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OSMesa: Failed to retrieve color buffer");
+ return GLFW_FALSE;
+ }
+
+ if (width)
+ *width = mesaWidth;
+ if (height)
+ *height = mesaHeight;
+ if (format)
+ *format = mesaFormat;
+ if (buffer)
+ *buffer = mesaBuffer;
+
+ return GLFW_TRUE;
+}
+
+GLFWAPI int glfwGetOSMesaDepthBuffer(GLFWwindow* handle,
+ int* width, int* height,
+ int* bytesPerValue,
+ void** buffer)
+{
+ void* mesaBuffer;
+ GLint mesaWidth, mesaHeight, mesaBytes;
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (window->context.source != GLFW_OSMESA_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return GLFW_FALSE;
+ }
+
+ if (!OSMesaGetDepthBuffer(window->context.osmesa.handle,
+ &mesaWidth, &mesaHeight,
+ &mesaBytes, &mesaBuffer))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "OSMesa: Failed to retrieve depth buffer");
+ return GLFW_FALSE;
+ }
+
+ if (width)
+ *width = mesaWidth;
+ if (height)
+ *height = mesaHeight;
+ if (bytesPerValue)
+ *bytesPerValue = mesaBytes;
+ if (buffer)
+ *buffer = mesaBuffer;
+
+ return GLFW_TRUE;
+}
+
+GLFWAPI OSMesaContext glfwGetOSMesaContext(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (window->context.source != GLFW_OSMESA_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return NULL;
+ }
+
+ return window->context.osmesa.handle;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/platform.c b/chromium/third_party/dawn/third_party/glfw/src/platform.c
new file mode 100644
index 00000000000..d0bbd06df54
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/platform.c
@@ -0,0 +1,189 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+static const struct
+{
+ int ID;
+ GLFWbool (*connect)(int,_GLFWplatform*);
+} supportedPlatforms[] =
+{
+#if defined(_GLFW_WIN32)
+ { GLFW_PLATFORM_WIN32, _glfwConnectWin32 },
+#endif
+#if defined(_GLFW_COCOA)
+ { GLFW_PLATFORM_COCOA, _glfwConnectCocoa },
+#endif
+#if defined(_GLFW_X11)
+ { GLFW_PLATFORM_X11, _glfwConnectX11 },
+#endif
+#if defined(_GLFW_WAYLAND)
+ { GLFW_PLATFORM_WAYLAND, _glfwConnectWayland },
+#endif
+};
+
+GLFWbool _glfwSelectPlatform(int desiredID, _GLFWplatform* platform)
+{
+ const size_t count = sizeof(supportedPlatforms) / sizeof(supportedPlatforms[0]);
+ size_t i;
+
+ if (desiredID != GLFW_ANY_PLATFORM &&
+ desiredID != GLFW_PLATFORM_WIN32 &&
+ desiredID != GLFW_PLATFORM_COCOA &&
+ desiredID != GLFW_PLATFORM_WAYLAND &&
+ desiredID != GLFW_PLATFORM_X11 &&
+ desiredID != GLFW_PLATFORM_NULL)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid platform ID 0x%08X", desiredID);
+ return GLFW_FALSE;
+ }
+
+ // Only allow the Null platform if specifically requested
+ if (desiredID == GLFW_PLATFORM_NULL)
+ return _glfwConnectNull(desiredID, platform);
+ else if (count == 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "This binary only supports the Null platform");
+ return GLFW_FALSE;
+ }
+
+ if (desiredID == GLFW_ANY_PLATFORM)
+ {
+ // If there is exactly one platform available for auto-selection, let it emit the
+ // error on failure as the platform-specific error description may be more helpful
+ if (count == 1)
+ return supportedPlatforms[0].connect(supportedPlatforms[0].ID, platform);
+
+ for (i = 0; i < count; i++)
+ {
+ if (supportedPlatforms[i].connect(desiredID, platform))
+ return GLFW_TRUE;
+ }
+
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "Failed to detect any supported platform");
+ }
+ else
+ {
+ for (i = 0; i < count; i++)
+ {
+ if (supportedPlatforms[i].ID == desiredID)
+ return supportedPlatforms[i].connect(desiredID, platform);
+ }
+
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "The requested platform is not supported");
+ }
+
+ return GLFW_FALSE;
+}
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI int glfwGetPlatform(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+ return _glfw.platform.platformID;
+}
+
+GLFWAPI int glfwPlatformSupported(int platformID)
+{
+ const size_t count = sizeof(supportedPlatforms) / sizeof(supportedPlatforms[0]);
+ size_t i;
+
+ if (platformID != GLFW_PLATFORM_WIN32 &&
+ platformID != GLFW_PLATFORM_COCOA &&
+ platformID != GLFW_PLATFORM_WAYLAND &&
+ platformID != GLFW_PLATFORM_X11 &&
+ platformID != GLFW_PLATFORM_NULL)
+ {
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid platform ID 0x%08X", platformID);
+ return GLFW_FALSE;
+ }
+
+ if (platformID == GLFW_PLATFORM_NULL)
+ return GLFW_TRUE;
+
+ for (i = 0; i < count; i++)
+ {
+ if (platformID == supportedPlatforms[i].ID)
+ return GLFW_TRUE;
+ }
+
+ return GLFW_FALSE;
+}
+
+GLFWAPI const char* glfwGetVersionString(void)
+{
+ return _GLFW_VERSION_NUMBER
+#if defined(_GLFW_WIN32)
+ " Win32 WGL"
+#endif
+#if defined(_GLFW_COCOA)
+ " Cocoa NSGL"
+#endif
+#if defined(_GLFW_WAYLAND)
+ " Wayland"
+#endif
+#if defined(_GLFW_X11)
+ " X11 GLX"
+#endif
+ " Null"
+ " EGL"
+ " OSMesa"
+#if defined(__MINGW64_VERSION_MAJOR)
+ " MinGW-w64"
+#elif defined(__MINGW32__)
+ " MinGW"
+#elif defined(_MSC_VER)
+ " VisualC"
+#endif
+#if defined(_GLFW_USE_HYBRID_HPG) || defined(_GLFW_USE_OPTIMUS_HPG)
+ " hybrid-GPU"
+#endif
+#if defined(_POSIX_MONOTONIC_CLOCK)
+ " monotonic"
+#endif
+#if defined(_GLFW_BUILD_DLL)
+#if defined(_WIN32)
+ " DLL"
+#elif defined(__APPLE__)
+ " dynamic"
+#else
+ " shared"
+#endif
+#endif
+ ;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/platform.h b/chromium/third_party/dawn/third_party/glfw/src/platform.h
new file mode 100644
index 00000000000..0c593676c4f
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/platform.h
@@ -0,0 +1,163 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include "null_platform.h"
+
+#if defined(_GLFW_WIN32)
+ #include "win32_platform.h"
+#else
+ #define GLFW_WIN32_WINDOW_STATE
+ #define GLFW_WIN32_MONITOR_STATE
+ #define GLFW_WIN32_CURSOR_STATE
+ #define GLFW_WIN32_LIBRARY_WINDOW_STATE
+ #define GLFW_WGL_CONTEXT_STATE
+ #define GLFW_WGL_LIBRARY_CONTEXT_STATE
+#endif
+
+#if defined(_GLFW_COCOA)
+ #include "cocoa_platform.h"
+#else
+ #define GLFW_COCOA_WINDOW_STATE
+ #define GLFW_COCOA_MONITOR_STATE
+ #define GLFW_COCOA_CURSOR_STATE
+ #define GLFW_COCOA_LIBRARY_WINDOW_STATE
+ #define GLFW_NSGL_CONTEXT_STATE
+ #define GLFW_NSGL_LIBRARY_CONTEXT_STATE
+#endif
+
+#if defined(_GLFW_WAYLAND)
+ #include "wl_platform.h"
+#else
+ #define GLFW_WAYLAND_WINDOW_STATE
+ #define GLFW_WAYLAND_MONITOR_STATE
+ #define GLFW_WAYLAND_CURSOR_STATE
+ #define GLFW_WAYLAND_LIBRARY_WINDOW_STATE
+#endif
+
+#if defined(_GLFW_X11)
+ #include "x11_platform.h"
+#else
+ #define GLFW_X11_WINDOW_STATE
+ #define GLFW_X11_MONITOR_STATE
+ #define GLFW_X11_CURSOR_STATE
+ #define GLFW_X11_LIBRARY_WINDOW_STATE
+ #define GLFW_GLX_CONTEXT_STATE
+ #define GLFW_GLX_LIBRARY_CONTEXT_STATE
+#endif
+
+#include "null_joystick.h"
+
+#if defined(_GLFW_WIN32)
+ #include "win32_joystick.h"
+#else
+ #define GLFW_WIN32_JOYSTICK_STATE
+ #define GLFW_WIN32_LIBRARY_JOYSTICK_STATE
+#endif
+
+#if defined(_GLFW_COCOA)
+ #include "cocoa_joystick.h"
+#else
+ #define GLFW_COCOA_JOYSTICK_STATE
+ #define GLFW_COCOA_LIBRARY_JOYSTICK_STATE
+#endif
+
+#if (defined(_GLFW_X11) || defined(_GLFW_WAYLAND)) && defined(__linux__)
+ #include "linux_joystick.h"
+#else
+ #define GLFW_LINUX_JOYSTICK_STATE
+ #define GLFW_LINUX_LIBRARY_JOYSTICK_STATE
+#endif
+
+#define GLFW_PLATFORM_WINDOW_STATE \
+ GLFW_WIN32_WINDOW_STATE \
+ GLFW_COCOA_WINDOW_STATE \
+ GLFW_WAYLAND_WINDOW_STATE \
+ GLFW_X11_WINDOW_STATE \
+ GLFW_NULL_WINDOW_STATE \
+
+#define GLFW_PLATFORM_MONITOR_STATE \
+ GLFW_WIN32_MONITOR_STATE \
+ GLFW_COCOA_MONITOR_STATE \
+ GLFW_WAYLAND_MONITOR_STATE \
+ GLFW_X11_MONITOR_STATE \
+ GLFW_NULL_MONITOR_STATE \
+
+#define GLFW_PLATFORM_CURSOR_STATE \
+ GLFW_WIN32_CURSOR_STATE \
+ GLFW_COCOA_CURSOR_STATE \
+ GLFW_WAYLAND_CURSOR_STATE \
+ GLFW_X11_CURSOR_STATE \
+ GLFW_NULL_CURSOR_STATE \
+
+#define GLFW_PLATFORM_JOYSTICK_STATE \
+ GLFW_WIN32_JOYSTICK_STATE \
+ GLFW_COCOA_JOYSTICK_STATE \
+ GLFW_LINUX_JOYSTICK_STATE
+
+#define GLFW_PLATFORM_LIBRARY_WINDOW_STATE \
+ GLFW_WIN32_LIBRARY_WINDOW_STATE \
+ GLFW_COCOA_LIBRARY_WINDOW_STATE \
+ GLFW_WAYLAND_LIBRARY_WINDOW_STATE \
+ GLFW_X11_LIBRARY_WINDOW_STATE \
+ GLFW_NULL_LIBRARY_WINDOW_STATE \
+
+#define GLFW_PLATFORM_LIBRARY_JOYSTICK_STATE \
+ GLFW_WIN32_LIBRARY_JOYSTICK_STATE \
+ GLFW_COCOA_LIBRARY_JOYSTICK_STATE \
+ GLFW_LINUX_LIBRARY_JOYSTICK_STATE
+
+#define GLFW_PLATFORM_CONTEXT_STATE \
+ GLFW_WGL_CONTEXT_STATE \
+ GLFW_NSGL_CONTEXT_STATE \
+ GLFW_GLX_CONTEXT_STATE
+
+#define GLFW_PLATFORM_LIBRARY_CONTEXT_STATE \
+ GLFW_WGL_LIBRARY_CONTEXT_STATE \
+ GLFW_NSGL_LIBRARY_CONTEXT_STATE \
+ GLFW_GLX_LIBRARY_CONTEXT_STATE
+
+#if defined(_WIN32)
+ #include "win32_thread.h"
+ #define GLFW_PLATFORM_TLS_STATE GLFW_WIN32_TLS_STATE
+ #define GLFW_PLATFORM_MUTEX_STATE GLFW_WIN32_MUTEX_STATE
+#else
+ #include "posix_thread.h"
+ #define GLFW_PLATFORM_TLS_STATE GLFW_POSIX_TLS_STATE
+ #define GLFW_PLATFORM_MUTEX_STATE GLFW_POSIX_MUTEX_STATE
+#endif
+
+#if defined(_WIN32)
+ #include "win32_time.h"
+ #define GLFW_PLATFORM_LIBRARY_TIMER_STATE GLFW_WIN32_LIBRARY_TIMER_STATE
+#elif defined(__APPLE__)
+ #include "cocoa_time.h"
+ #define GLFW_PLATFORM_LIBRARY_TIMER_STATE GLFW_COCOA_LIBRARY_TIMER_STATE
+#else
+ #include "posix_time.h"
+ #define GLFW_PLATFORM_LIBRARY_TIMER_STATE GLFW_POSIX_LIBRARY_TIMER_STATE
+#endif
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_module.c b/chromium/third_party/dawn/third_party/glfw/src/posix_module.c
new file mode 100644
index 00000000000..7079e5b45d5
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_module.c
@@ -0,0 +1,51 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2021 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <dlfcn.h>
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void* _glfwPlatformLoadModule(const char* path)
+{
+ return dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+}
+
+void _glfwPlatformFreeModule(void* module)
+{
+ dlclose(module);
+}
+
+GLFWproc _glfwPlatformGetModuleSymbol(void* module, const char* name)
+{
+ return dlsym(module, name);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_poll.c b/chromium/third_party/dawn/third_party/glfw/src/posix_poll.c
new file mode 100644
index 00000000000..676a8a51006
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_poll.c
@@ -0,0 +1,81 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2022 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#define _GNU_SOURCE
+
+#include "internal.h"
+
+#include <signal.h>
+#include <time.h>
+#include <errno.h>
+
+GLFWbool _glfwPollPOSIX(struct pollfd* fds, nfds_t count, double* timeout)
+{
+ for (;;)
+ {
+ if (timeout)
+ {
+ const uint64_t base = _glfwPlatformGetTimerValue();
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__CYGWIN__)
+ const time_t seconds = (time_t) *timeout;
+ const long nanoseconds = (long) ((*timeout - seconds) * 1e9);
+ const struct timespec ts = { seconds, nanoseconds };
+ const int result = ppoll(fds, count, &ts, NULL);
+#elif defined(__NetBSD__)
+ const time_t seconds = (time_t) *timeout;
+ const long nanoseconds = (long) ((*timeout - seconds) * 1e9);
+ const struct timespec ts = { seconds, nanoseconds };
+ const int result = pollts(fds, count, &ts, NULL);
+#else
+ const int milliseconds = (int) (*timeout * 1e3);
+ const int result = poll(fds, count, milliseconds);
+#endif
+ const int error = errno; // clock_gettime may overwrite our error
+
+ *timeout -= (_glfwPlatformGetTimerValue() - base) /
+ (double) _glfwPlatformGetTimerFrequency();
+
+ if (result > 0)
+ return GLFW_TRUE;
+ else if (result == -1 && error != EINTR && error != EAGAIN)
+ return GLFW_FALSE;
+ else if (*timeout <= 0.0)
+ return GLFW_FALSE;
+ }
+ else
+ {
+ const int result = poll(fds, count, -1);
+ if (result > 0)
+ return GLFW_TRUE;
+ else if (result == -1 && errno != EINTR && errno != EAGAIN)
+ return GLFW_FALSE;
+ }
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_poll.h b/chromium/third_party/dawn/third_party/glfw/src/posix_poll.h
new file mode 100644
index 00000000000..1effd1cd39d
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_poll.h
@@ -0,0 +1,32 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2022 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include <poll.h>
+
+GLFWbool _glfwPollPOSIX(struct pollfd* fds, nfds_t count, double* timeout);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_thread.c b/chromium/third_party/dawn/third_party/glfw/src/posix_thread.c
new file mode 100644
index 00000000000..0236145781d
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_thread.c
@@ -0,0 +1,105 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwPlatformCreateTls(_GLFWtls* tls)
+{
+ assert(tls->posix.allocated == GLFW_FALSE);
+
+ if (pthread_key_create(&tls->posix.key, NULL) != 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "POSIX: Failed to create context TLS");
+ return GLFW_FALSE;
+ }
+
+ tls->posix.allocated = GLFW_TRUE;
+ return GLFW_TRUE;
+}
+
+void _glfwPlatformDestroyTls(_GLFWtls* tls)
+{
+ if (tls->posix.allocated)
+ pthread_key_delete(tls->posix.key);
+ memset(tls, 0, sizeof(_GLFWtls));
+}
+
+void* _glfwPlatformGetTls(_GLFWtls* tls)
+{
+ assert(tls->posix.allocated == GLFW_TRUE);
+ return pthread_getspecific(tls->posix.key);
+}
+
+void _glfwPlatformSetTls(_GLFWtls* tls, void* value)
+{
+ assert(tls->posix.allocated == GLFW_TRUE);
+ pthread_setspecific(tls->posix.key, value);
+}
+
+GLFWbool _glfwPlatformCreateMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->posix.allocated == GLFW_FALSE);
+
+ if (pthread_mutex_init(&mutex->posix.handle, NULL) != 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "POSIX: Failed to create mutex");
+ return GLFW_FALSE;
+ }
+
+ return mutex->posix.allocated = GLFW_TRUE;
+}
+
+void _glfwPlatformDestroyMutex(_GLFWmutex* mutex)
+{
+ if (mutex->posix.allocated)
+ pthread_mutex_destroy(&mutex->posix.handle);
+ memset(mutex, 0, sizeof(_GLFWmutex));
+}
+
+void _glfwPlatformLockMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->posix.allocated == GLFW_TRUE);
+ pthread_mutex_lock(&mutex->posix.handle);
+}
+
+void _glfwPlatformUnlockMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->posix.allocated == GLFW_TRUE);
+ pthread_mutex_unlock(&mutex->posix.handle);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_thread.h b/chromium/third_party/dawn/third_party/glfw/src/posix_thread.h
new file mode 100644
index 00000000000..5a5d7b7c380
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_thread.h
@@ -0,0 +1,49 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <pthread.h>
+
+#define GLFW_POSIX_TLS_STATE _GLFWtlsPOSIX posix;
+#define GLFW_POSIX_MUTEX_STATE _GLFWmutexPOSIX posix;
+
+
+// POSIX-specific thread local storage data
+//
+typedef struct _GLFWtlsPOSIX
+{
+ GLFWbool allocated;
+ pthread_key_t key;
+} _GLFWtlsPOSIX;
+
+// POSIX-specific mutex data
+//
+typedef struct _GLFWmutexPOSIX
+{
+ GLFWbool allocated;
+ pthread_mutex_t handle;
+} _GLFWmutexPOSIX;
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_time.c b/chromium/third_party/dawn/third_party/glfw/src/posix_time.c
new file mode 100644
index 00000000000..f134be47eb4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_time.c
@@ -0,0 +1,63 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <unistd.h>
+#include <sys/time.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwPlatformInitTimer(void)
+{
+ _glfw.timer.posix.clock = CLOCK_REALTIME;
+ _glfw.timer.posix.frequency = 1000000000;
+
+#if defined(_POSIX_MONOTONIC_CLOCK)
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
+ _glfw.timer.posix.clock = CLOCK_MONOTONIC;
+#endif
+}
+
+uint64_t _glfwPlatformGetTimerValue(void)
+{
+ struct timespec ts;
+ clock_gettime(_glfw.timer.posix.clock, &ts);
+ return (uint64_t) ts.tv_sec * _glfw.timer.posix.frequency + (uint64_t) ts.tv_nsec;
+}
+
+uint64_t _glfwPlatformGetTimerFrequency(void)
+{
+ return _glfw.timer.posix.frequency;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/posix_time.h b/chromium/third_party/dawn/third_party/glfw/src/posix_time.h
new file mode 100644
index 00000000000..94374adb8ab
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/posix_time.h
@@ -0,0 +1,41 @@
+//========================================================================
+// GLFW 3.4 POSIX - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLFW_POSIX_LIBRARY_TIMER_STATE _GLFWtimerPOSIX posix;
+
+#include <stdint.h>
+#include <time.h>
+
+
+// POSIX-specific global timer data
+//
+typedef struct _GLFWtimerPOSIX
+{
+ clockid_t clock;
+ uint64_t frequency;
+} _GLFWtimerPOSIX;
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/vulkan.c b/chromium/third_party/dawn/third_party/glfw/src/vulkan.c
new file mode 100644
index 00000000000..64a4650fc98
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/vulkan.c
@@ -0,0 +1,330 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2018 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define _GLFW_FIND_LOADER 1
+#define _GLFW_REQUIRE_LOADER 2
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitVulkan(int mode)
+{
+ VkResult err;
+ VkExtensionProperties* ep;
+ PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;
+ uint32_t i, count;
+
+ if (_glfw.vk.available)
+ return GLFW_TRUE;
+
+ if (_glfw.hints.init.vulkanLoader)
+ _glfw.vk.GetInstanceProcAddr = _glfw.hints.init.vulkanLoader;
+ else
+ {
+#if defined(_GLFW_VULKAN_LIBRARY)
+ _glfw.vk.handle = _glfwPlatformLoadModule(_GLFW_VULKAN_LIBRARY);
+#elif defined(_GLFW_WIN32)
+ _glfw.vk.handle = _glfwPlatformLoadModule("vulkan-1.dll");
+#elif defined(_GLFW_COCOA)
+ _glfw.vk.handle = _glfwPlatformLoadModule("libvulkan.1.dylib");
+ if (!_glfw.vk.handle)
+ _glfw.vk.handle = _glfwLoadLocalVulkanLoaderCocoa();
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.vk.handle = _glfwPlatformLoadModule("libvulkan.so");
+#else
+ _glfw.vk.handle = _glfwPlatformLoadModule("libvulkan.so.1");
+#endif
+ if (!_glfw.vk.handle)
+ {
+ if (mode == _GLFW_REQUIRE_LOADER)
+ _glfwInputError(GLFW_API_UNAVAILABLE, "Vulkan: Loader not found");
+
+ return GLFW_FALSE;
+ }
+
+ _glfw.vk.GetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)
+ _glfwPlatformGetModuleSymbol(_glfw.vk.handle, "vkGetInstanceProcAddr");
+ if (!_glfw.vk.GetInstanceProcAddr)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Loader does not export vkGetInstanceProcAddr");
+
+ _glfwTerminateVulkan();
+ return GLFW_FALSE;
+ }
+ }
+
+ vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)
+ vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
+ if (!vkEnumerateInstanceExtensionProperties)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Failed to retrieve vkEnumerateInstanceExtensionProperties");
+
+ _glfwTerminateVulkan();
+ return GLFW_FALSE;
+ }
+
+ err = vkEnumerateInstanceExtensionProperties(NULL, &count, NULL);
+ if (err)
+ {
+ // NOTE: This happens on systems with a loader but without any Vulkan ICD
+ if (mode == _GLFW_REQUIRE_LOADER)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Failed to query instance extension count: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ _glfwTerminateVulkan();
+ return GLFW_FALSE;
+ }
+
+ ep = _glfw_calloc(count, sizeof(VkExtensionProperties));
+
+ err = vkEnumerateInstanceExtensionProperties(NULL, &count, ep);
+ if (err)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Failed to query instance extensions: %s",
+ _glfwGetVulkanResultString(err));
+
+ _glfw_free(ep);
+ _glfwTerminateVulkan();
+ return GLFW_FALSE;
+ }
+
+ for (i = 0; i < count; i++)
+ {
+ if (strcmp(ep[i].extensionName, "VK_KHR_surface") == 0)
+ _glfw.vk.KHR_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_KHR_win32_surface") == 0)
+ _glfw.vk.KHR_win32_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_MVK_macos_surface") == 0)
+ _glfw.vk.MVK_macos_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_EXT_metal_surface") == 0)
+ _glfw.vk.EXT_metal_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_KHR_xlib_surface") == 0)
+ _glfw.vk.KHR_xlib_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_KHR_xcb_surface") == 0)
+ _glfw.vk.KHR_xcb_surface = GLFW_TRUE;
+ else if (strcmp(ep[i].extensionName, "VK_KHR_wayland_surface") == 0)
+ _glfw.vk.KHR_wayland_surface = GLFW_TRUE;
+ }
+
+ _glfw_free(ep);
+
+ _glfw.vk.available = GLFW_TRUE;
+
+ _glfw.platform.getRequiredInstanceExtensions(_glfw.vk.extensions);
+
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateVulkan(void)
+{
+ if (_glfw.vk.handle)
+ _glfwPlatformFreeModule(_glfw.vk.handle);
+}
+
+const char* _glfwGetVulkanResultString(VkResult result)
+{
+ switch (result)
+ {
+ case VK_SUCCESS:
+ return "Success";
+ case VK_NOT_READY:
+ return "A fence or query has not yet completed";
+ case VK_TIMEOUT:
+ return "A wait operation has not completed in the specified time";
+ case VK_EVENT_SET:
+ return "An event is signaled";
+ case VK_EVENT_RESET:
+ return "An event is unsignaled";
+ case VK_INCOMPLETE:
+ return "A return array was too small for the result";
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ return "A host memory allocation has failed";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ return "A device memory allocation has failed";
+ case VK_ERROR_INITIALIZATION_FAILED:
+ return "Initialization of an object could not be completed for implementation-specific reasons";
+ case VK_ERROR_DEVICE_LOST:
+ return "The logical or physical device has been lost";
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ return "Mapping of a memory object has failed";
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ return "A requested layer is not present or could not be loaded";
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ return "A requested extension is not supported";
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return "A requested feature is not supported";
+ case VK_ERROR_INCOMPATIBLE_DRIVER:
+ return "The requested version of Vulkan is not supported by the driver or is otherwise incompatible";
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return "Too many objects of the type have already been created";
+ case VK_ERROR_FORMAT_NOT_SUPPORTED:
+ return "A requested format is not supported on this device";
+ case VK_ERROR_SURFACE_LOST_KHR:
+ return "A surface is no longer available";
+ case VK_SUBOPTIMAL_KHR:
+ return "A swapchain no longer matches the surface properties exactly, but can still be used";
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ return "A surface has changed in such a way that it is no longer compatible with the swapchain";
+ case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
+ return "The display used by a swapchain does not use the same presentable image layout";
+ case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+ return "The requested window is already connected to a VkSurfaceKHR, or to some other non-Vulkan API";
+ case VK_ERROR_VALIDATION_FAILED_EXT:
+ return "A validation layer found an error";
+ default:
+ return "ERROR: UNKNOWN VULKAN ERROR";
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI int glfwVulkanSupported(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+ return _glfwInitVulkan(_GLFW_FIND_LOADER);
+}
+
+GLFWAPI const char** glfwGetRequiredInstanceExtensions(uint32_t* count)
+{
+ assert(count != NULL);
+
+ *count = 0;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (!_glfwInitVulkan(_GLFW_REQUIRE_LOADER))
+ return NULL;
+
+ if (!_glfw.vk.extensions[0])
+ return NULL;
+
+ *count = 2;
+ return (const char**) _glfw.vk.extensions;
+}
+
+GLFWAPI GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance,
+ const char* procname)
+{
+ GLFWvkproc proc;
+ assert(procname != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (!_glfwInitVulkan(_GLFW_REQUIRE_LOADER))
+ return NULL;
+
+ // NOTE: Vulkan 1.0 and 1.1 vkGetInstanceProcAddr cannot return itself
+ if (strcmp(procname, "vkGetInstanceProcAddr") == 0)
+ return (GLFWvkproc) vkGetInstanceProcAddr;
+
+ proc = (GLFWvkproc) vkGetInstanceProcAddr(instance, procname);
+ if (!proc)
+ {
+ if (_glfw.vk.handle)
+ proc = (GLFWvkproc) _glfwPlatformGetModuleSymbol(_glfw.vk.handle, procname);
+ }
+
+ return proc;
+}
+
+GLFWAPI int glfwGetPhysicalDevicePresentationSupport(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ assert(instance != VK_NULL_HANDLE);
+ assert(device != VK_NULL_HANDLE);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(GLFW_FALSE);
+
+ if (!_glfwInitVulkan(_GLFW_REQUIRE_LOADER))
+ return GLFW_FALSE;
+
+ if (!_glfw.vk.extensions[0])
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Window surface creation extensions not found");
+ return GLFW_FALSE;
+ }
+
+ return _glfw.platform.getPhysicalDevicePresentationSupport(instance,
+ device,
+ queuefamily);
+}
+
+GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance,
+ GLFWwindow* handle,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(instance != VK_NULL_HANDLE);
+ assert(window != NULL);
+ assert(surface != NULL);
+
+ *surface = VK_NULL_HANDLE;
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(VK_ERROR_INITIALIZATION_FAILED);
+
+ if (!_glfwInitVulkan(_GLFW_REQUIRE_LOADER))
+ return VK_ERROR_INITIALIZATION_FAILED;
+
+ if (!_glfw.vk.extensions[0])
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Vulkan: Window surface creation extensions not found");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ if (window->context.client != GLFW_NO_API)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Vulkan: Window surface creation requires the window to have the client API set to GLFW_NO_API");
+ return VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
+ }
+
+ return _glfw.platform.createWindowSurface(instance, window, allocator, surface);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/wgl_context.c b/chromium/third_party/dawn/third_party/glfw/src/wgl_context.c
new file mode 100644
index 00000000000..a82c736b7ba
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/wgl_context.c
@@ -0,0 +1,790 @@
+//========================================================================
+// GLFW 3.4 WGL - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+#include <assert.h>
+
+// Return the value corresponding to the specified attribute
+//
+static int findPixelFormatAttribValueWGL(const int* attribs,
+ int attribCount,
+ const int* values,
+ int attrib)
+{
+ int i;
+
+ for (i = 0; i < attribCount; i++)
+ {
+ if (attribs[i] == attrib)
+ return values[i];
+ }
+
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Unknown pixel format attribute requested");
+ return 0;
+}
+
+#define ADD_ATTRIB(a) \
+{ \
+ assert((size_t) attribCount < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[attribCount++] = a; \
+}
+#define FIND_ATTRIB_VALUE(a) \
+ findPixelFormatAttribValueWGL(attribs, attribCount, values, a)
+
+// Return a list of available and usable framebuffer configs
+//
+static int choosePixelFormatWGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ _GLFWfbconfig* usableConfigs;
+ const _GLFWfbconfig* closest;
+ int i, pixelFormat, nativeCount, usableCount = 0, attribCount = 0;
+ int attribs[40];
+ int values[sizeof(attribs) / sizeof(attribs[0])];
+
+ if (_glfw.wgl.ARB_pixel_format)
+ {
+ const int attrib = WGL_NUMBER_PIXEL_FORMATS_ARB;
+
+ if (!wglGetPixelFormatAttribivARB(window->context.wgl.dc,
+ 1, 0, 1, &attrib, &nativeCount))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to retrieve pixel format attribute");
+ return 0;
+ }
+
+ ADD_ATTRIB(WGL_SUPPORT_OPENGL_ARB);
+ ADD_ATTRIB(WGL_DRAW_TO_WINDOW_ARB);
+ ADD_ATTRIB(WGL_PIXEL_TYPE_ARB);
+ ADD_ATTRIB(WGL_ACCELERATION_ARB);
+ ADD_ATTRIB(WGL_RED_BITS_ARB);
+ ADD_ATTRIB(WGL_RED_SHIFT_ARB);
+ ADD_ATTRIB(WGL_GREEN_BITS_ARB);
+ ADD_ATTRIB(WGL_GREEN_SHIFT_ARB);
+ ADD_ATTRIB(WGL_BLUE_BITS_ARB);
+ ADD_ATTRIB(WGL_BLUE_SHIFT_ARB);
+ ADD_ATTRIB(WGL_ALPHA_BITS_ARB);
+ ADD_ATTRIB(WGL_ALPHA_SHIFT_ARB);
+ ADD_ATTRIB(WGL_DEPTH_BITS_ARB);
+ ADD_ATTRIB(WGL_STENCIL_BITS_ARB);
+ ADD_ATTRIB(WGL_ACCUM_BITS_ARB);
+ ADD_ATTRIB(WGL_ACCUM_RED_BITS_ARB);
+ ADD_ATTRIB(WGL_ACCUM_GREEN_BITS_ARB);
+ ADD_ATTRIB(WGL_ACCUM_BLUE_BITS_ARB);
+ ADD_ATTRIB(WGL_ACCUM_ALPHA_BITS_ARB);
+ ADD_ATTRIB(WGL_AUX_BUFFERS_ARB);
+ ADD_ATTRIB(WGL_STEREO_ARB);
+ ADD_ATTRIB(WGL_DOUBLE_BUFFER_ARB);
+
+ if (_glfw.wgl.ARB_multisample)
+ ADD_ATTRIB(WGL_SAMPLES_ARB);
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (_glfw.wgl.ARB_framebuffer_sRGB || _glfw.wgl.EXT_framebuffer_sRGB)
+ ADD_ATTRIB(WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB);
+ }
+ else
+ {
+ if (_glfw.wgl.EXT_colorspace)
+ ADD_ATTRIB(WGL_COLORSPACE_EXT);
+ }
+ }
+ else
+ {
+ nativeCount = DescribePixelFormat(window->context.wgl.dc,
+ 1,
+ sizeof(PIXELFORMATDESCRIPTOR),
+ NULL);
+ }
+
+ usableConfigs = _glfw_calloc(nativeCount, sizeof(_GLFWfbconfig));
+
+ for (i = 0; i < nativeCount; i++)
+ {
+ _GLFWfbconfig* u = usableConfigs + usableCount;
+ pixelFormat = i + 1;
+
+ if (_glfw.wgl.ARB_pixel_format)
+ {
+ // Get pixel format attributes through "modern" extension
+
+ if (!wglGetPixelFormatAttribivARB(window->context.wgl.dc,
+ pixelFormat, 0,
+ attribCount,
+ attribs, values))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to retrieve pixel format attributes");
+
+ _glfw_free(usableConfigs);
+ return 0;
+ }
+
+ if (!FIND_ATTRIB_VALUE(WGL_SUPPORT_OPENGL_ARB) ||
+ !FIND_ATTRIB_VALUE(WGL_DRAW_TO_WINDOW_ARB))
+ {
+ continue;
+ }
+
+ if (FIND_ATTRIB_VALUE(WGL_PIXEL_TYPE_ARB) != WGL_TYPE_RGBA_ARB)
+ continue;
+
+ if (FIND_ATTRIB_VALUE(WGL_ACCELERATION_ARB) == WGL_NO_ACCELERATION_ARB)
+ continue;
+
+ if (FIND_ATTRIB_VALUE(WGL_DOUBLE_BUFFER_ARB) != fbconfig->doublebuffer)
+ continue;
+
+ u->redBits = FIND_ATTRIB_VALUE(WGL_RED_BITS_ARB);
+ u->greenBits = FIND_ATTRIB_VALUE(WGL_GREEN_BITS_ARB);
+ u->blueBits = FIND_ATTRIB_VALUE(WGL_BLUE_BITS_ARB);
+ u->alphaBits = FIND_ATTRIB_VALUE(WGL_ALPHA_BITS_ARB);
+
+ u->depthBits = FIND_ATTRIB_VALUE(WGL_DEPTH_BITS_ARB);
+ u->stencilBits = FIND_ATTRIB_VALUE(WGL_STENCIL_BITS_ARB);
+
+ u->accumRedBits = FIND_ATTRIB_VALUE(WGL_ACCUM_RED_BITS_ARB);
+ u->accumGreenBits = FIND_ATTRIB_VALUE(WGL_ACCUM_GREEN_BITS_ARB);
+ u->accumBlueBits = FIND_ATTRIB_VALUE(WGL_ACCUM_BLUE_BITS_ARB);
+ u->accumAlphaBits = FIND_ATTRIB_VALUE(WGL_ACCUM_ALPHA_BITS_ARB);
+
+ u->auxBuffers = FIND_ATTRIB_VALUE(WGL_AUX_BUFFERS_ARB);
+
+ if (FIND_ATTRIB_VALUE(WGL_STEREO_ARB))
+ u->stereo = GLFW_TRUE;
+
+ if (_glfw.wgl.ARB_multisample)
+ u->samples = FIND_ATTRIB_VALUE(WGL_SAMPLES_ARB);
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (_glfw.wgl.ARB_framebuffer_sRGB ||
+ _glfw.wgl.EXT_framebuffer_sRGB)
+ {
+ if (FIND_ATTRIB_VALUE(WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB))
+ u->sRGB = GLFW_TRUE;
+ }
+ }
+ else
+ {
+ if (_glfw.wgl.EXT_colorspace)
+ {
+ if (FIND_ATTRIB_VALUE(WGL_COLORSPACE_EXT) == WGL_COLORSPACE_SRGB_EXT)
+ u->sRGB = GLFW_TRUE;
+ }
+ }
+ }
+ else
+ {
+ // Get pixel format attributes through legacy PFDs
+
+ PIXELFORMATDESCRIPTOR pfd;
+
+ if (!DescribePixelFormat(window->context.wgl.dc,
+ pixelFormat,
+ sizeof(PIXELFORMATDESCRIPTOR),
+ &pfd))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to describe pixel format");
+
+ _glfw_free(usableConfigs);
+ return 0;
+ }
+
+ if (!(pfd.dwFlags & PFD_DRAW_TO_WINDOW) ||
+ !(pfd.dwFlags & PFD_SUPPORT_OPENGL))
+ {
+ continue;
+ }
+
+ if (!(pfd.dwFlags & PFD_GENERIC_ACCELERATED) &&
+ (pfd.dwFlags & PFD_GENERIC_FORMAT))
+ {
+ continue;
+ }
+
+ if (pfd.iPixelType != PFD_TYPE_RGBA)
+ continue;
+
+ if (!!(pfd.dwFlags & PFD_DOUBLEBUFFER) != fbconfig->doublebuffer)
+ continue;
+
+ u->redBits = pfd.cRedBits;
+ u->greenBits = pfd.cGreenBits;
+ u->blueBits = pfd.cBlueBits;
+ u->alphaBits = pfd.cAlphaBits;
+
+ u->depthBits = pfd.cDepthBits;
+ u->stencilBits = pfd.cStencilBits;
+
+ u->accumRedBits = pfd.cAccumRedBits;
+ u->accumGreenBits = pfd.cAccumGreenBits;
+ u->accumBlueBits = pfd.cAccumBlueBits;
+ u->accumAlphaBits = pfd.cAccumAlphaBits;
+
+ u->auxBuffers = pfd.cAuxBuffers;
+
+ if (pfd.dwFlags & PFD_STEREO)
+ u->stereo = GLFW_TRUE;
+ }
+
+ u->handle = pixelFormat;
+ usableCount++;
+ }
+
+ if (!usableCount)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "WGL: The driver does not appear to support OpenGL");
+
+ _glfw_free(usableConfigs);
+ return 0;
+ }
+
+ closest = _glfwChooseFBConfig(fbconfig, usableConfigs, usableCount);
+ if (!closest)
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "WGL: Failed to find a suitable pixel format");
+
+ _glfw_free(usableConfigs);
+ return 0;
+ }
+
+ pixelFormat = (int) closest->handle;
+ _glfw_free(usableConfigs);
+
+ return pixelFormat;
+}
+
+#undef ADD_ATTRIB
+#undef FIND_ATTRIB_VALUE
+
+static void makeContextCurrentWGL(_GLFWwindow* window)
+{
+ if (window)
+ {
+ if (wglMakeCurrent(window->context.wgl.dc, window->context.wgl.handle))
+ _glfwPlatformSetTls(&_glfw.contextSlot, window);
+ else
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to make context current");
+ _glfwPlatformSetTls(&_glfw.contextSlot, NULL);
+ }
+ }
+ else
+ {
+ if (!wglMakeCurrent(NULL, NULL))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to clear current context");
+ }
+
+ _glfwPlatformSetTls(&_glfw.contextSlot, NULL);
+ }
+}
+
+static void swapBuffersWGL(_GLFWwindow* window)
+{
+ if (!window->monitor)
+ {
+ // HACK: Use DwmFlush when desktop composition is enabled on Windows Vista and 7
+ if (!IsWindows8OrGreater() && IsWindowsVistaOrGreater())
+ {
+ BOOL enabled = FALSE;
+
+ if (SUCCEEDED(DwmIsCompositionEnabled(&enabled)) && enabled)
+ {
+ int count = abs(window->context.wgl.interval);
+ while (count--)
+ DwmFlush();
+ }
+ }
+ }
+
+ SwapBuffers(window->context.wgl.dc);
+}
+
+static void swapIntervalWGL(int interval)
+{
+ _GLFWwindow* window = _glfwPlatformGetTls(&_glfw.contextSlot);
+
+ window->context.wgl.interval = interval;
+
+ if (!window->monitor)
+ {
+ // HACK: Disable WGL swap interval when desktop composition is enabled on Windows
+ // Vista and 7 to avoid interfering with DWM vsync
+ if (!IsWindows8OrGreater() && IsWindowsVistaOrGreater())
+ {
+ BOOL enabled = FALSE;
+
+ if (SUCCEEDED(DwmIsCompositionEnabled(&enabled)) && enabled)
+ interval = 0;
+ }
+ }
+
+ if (_glfw.wgl.EXT_swap_control)
+ wglSwapIntervalEXT(interval);
+}
+
+static int extensionSupportedWGL(const char* extension)
+{
+ const char* extensions = NULL;
+
+ if (_glfw.wgl.GetExtensionsStringARB)
+ extensions = wglGetExtensionsStringARB(wglGetCurrentDC());
+ else if (_glfw.wgl.GetExtensionsStringEXT)
+ extensions = wglGetExtensionsStringEXT();
+
+ if (!extensions)
+ return GLFW_FALSE;
+
+ return _glfwStringInExtensionString(extension, extensions);
+}
+
+static GLFWglproc getProcAddressWGL(const char* procname)
+{
+ const GLFWglproc proc = (GLFWglproc) wglGetProcAddress(procname);
+ if (proc)
+ return proc;
+
+ return (GLFWglproc) _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, procname);
+}
+
+static void destroyContextWGL(_GLFWwindow* window)
+{
+ if (window->context.wgl.handle)
+ {
+ wglDeleteContext(window->context.wgl.handle);
+ window->context.wgl.handle = NULL;
+ }
+}
+
+// Initialize WGL
+//
+GLFWbool _glfwInitWGL(void)
+{
+ PIXELFORMATDESCRIPTOR pfd;
+ HGLRC prc, rc;
+ HDC pdc, dc;
+
+ if (_glfw.wgl.instance)
+ return GLFW_TRUE;
+
+ _glfw.wgl.instance = _glfwPlatformLoadModule("opengl32.dll");
+ if (!_glfw.wgl.instance)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to load opengl32.dll");
+ return GLFW_FALSE;
+ }
+
+ _glfw.wgl.CreateContext = (PFN_wglCreateContext)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglCreateContext");
+ _glfw.wgl.DeleteContext = (PFN_wglDeleteContext)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglDeleteContext");
+ _glfw.wgl.GetProcAddress = (PFN_wglGetProcAddress)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglGetProcAddress");
+ _glfw.wgl.GetCurrentDC = (PFN_wglGetCurrentDC)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglGetCurrentDC");
+ _glfw.wgl.GetCurrentContext = (PFN_wglGetCurrentContext)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglGetCurrentContext");
+ _glfw.wgl.MakeCurrent = (PFN_wglMakeCurrent)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglMakeCurrent");
+ _glfw.wgl.ShareLists = (PFN_wglShareLists)
+ _glfwPlatformGetModuleSymbol(_glfw.wgl.instance, "wglShareLists");
+
+ // NOTE: A dummy context has to be created for opengl32.dll to load the
+ // OpenGL ICD, from which we can then query WGL extensions
+ // NOTE: This code will accept the Microsoft GDI ICD; accelerated context
+ // creation failure occurs during manual pixel format enumeration
+
+ dc = GetDC(_glfw.win32.helperWindowHandle);
+
+ ZeroMemory(&pfd, sizeof(pfd));
+ pfd.nSize = sizeof(pfd);
+ pfd.nVersion = 1;
+ pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
+ pfd.iPixelType = PFD_TYPE_RGBA;
+ pfd.cColorBits = 24;
+
+ if (!SetPixelFormat(dc, ChoosePixelFormat(dc, &pfd), &pfd))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to set pixel format for dummy context");
+ return GLFW_FALSE;
+ }
+
+ rc = wglCreateContext(dc);
+ if (!rc)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to create dummy context");
+ return GLFW_FALSE;
+ }
+
+ pdc = wglGetCurrentDC();
+ prc = wglGetCurrentContext();
+
+ if (!wglMakeCurrent(dc, rc))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to make dummy context current");
+ wglMakeCurrent(pdc, prc);
+ wglDeleteContext(rc);
+ return GLFW_FALSE;
+ }
+
+ // NOTE: Functions must be loaded first as they're needed to retrieve the
+ // extension string that tells us whether the functions are supported
+ _glfw.wgl.GetExtensionsStringEXT = (PFNWGLGETEXTENSIONSSTRINGEXTPROC)
+ wglGetProcAddress("wglGetExtensionsStringEXT");
+ _glfw.wgl.GetExtensionsStringARB = (PFNWGLGETEXTENSIONSSTRINGARBPROC)
+ wglGetProcAddress("wglGetExtensionsStringARB");
+ _glfw.wgl.CreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)
+ wglGetProcAddress("wglCreateContextAttribsARB");
+ _glfw.wgl.SwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC)
+ wglGetProcAddress("wglSwapIntervalEXT");
+ _glfw.wgl.GetPixelFormatAttribivARB = (PFNWGLGETPIXELFORMATATTRIBIVARBPROC)
+ wglGetProcAddress("wglGetPixelFormatAttribivARB");
+
+ // NOTE: WGL_ARB_extensions_string and WGL_EXT_extensions_string are not
+ // checked below as we are already using them
+ _glfw.wgl.ARB_multisample =
+ extensionSupportedWGL("WGL_ARB_multisample");
+ _glfw.wgl.ARB_framebuffer_sRGB =
+ extensionSupportedWGL("WGL_ARB_framebuffer_sRGB");
+ _glfw.wgl.EXT_framebuffer_sRGB =
+ extensionSupportedWGL("WGL_EXT_framebuffer_sRGB");
+ _glfw.wgl.ARB_create_context =
+ extensionSupportedWGL("WGL_ARB_create_context");
+ _glfw.wgl.ARB_create_context_profile =
+ extensionSupportedWGL("WGL_ARB_create_context_profile");
+ _glfw.wgl.EXT_create_context_es2_profile =
+ extensionSupportedWGL("WGL_EXT_create_context_es2_profile");
+ _glfw.wgl.ARB_create_context_robustness =
+ extensionSupportedWGL("WGL_ARB_create_context_robustness");
+ _glfw.wgl.ARB_create_context_no_error =
+ extensionSupportedWGL("WGL_ARB_create_context_no_error");
+ _glfw.wgl.EXT_swap_control =
+ extensionSupportedWGL("WGL_EXT_swap_control");
+ _glfw.wgl.EXT_colorspace =
+ extensionSupportedWGL("WGL_EXT_colorspace");
+ _glfw.wgl.ARB_pixel_format =
+ extensionSupportedWGL("WGL_ARB_pixel_format");
+ _glfw.wgl.ARB_context_flush_control =
+ extensionSupportedWGL("WGL_ARB_context_flush_control");
+
+ wglMakeCurrent(pdc, prc);
+ wglDeleteContext(rc);
+ return GLFW_TRUE;
+}
+
+// Terminate WGL
+//
+void _glfwTerminateWGL(void)
+{
+ if (_glfw.wgl.instance)
+ _glfwPlatformFreeModule(_glfw.wgl.instance);
+}
+
+#define SET_ATTRIB(a, v) \
+{ \
+ assert(((size_t) index + 1) < sizeof(attribs) / sizeof(attribs[0])); \
+ attribs[index++] = a; \
+ attribs[index++] = v; \
+}
+
+// Create the OpenGL or OpenGL ES context
+//
+GLFWbool _glfwCreateContextWGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ int attribs[40];
+ int pixelFormat;
+ PIXELFORMATDESCRIPTOR pfd;
+ HGLRC share = NULL;
+
+ if (ctxconfig->share)
+ share = ctxconfig->share->context.wgl.handle;
+
+ window->context.wgl.dc = GetDC(window->win32.handle);
+ if (!window->context.wgl.dc)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to retrieve DC for window");
+ return GLFW_FALSE;
+ }
+
+ pixelFormat = choosePixelFormatWGL(window, ctxconfig, fbconfig);
+ if (!pixelFormat)
+ return GLFW_FALSE;
+
+ if (!DescribePixelFormat(window->context.wgl.dc,
+ pixelFormat, sizeof(pfd), &pfd))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to retrieve PFD for selected pixel format");
+ return GLFW_FALSE;
+ }
+
+ if (!SetPixelFormat(window->context.wgl.dc, pixelFormat, &pfd))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to set selected pixel format");
+ return GLFW_FALSE;
+ }
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (ctxconfig->forward)
+ {
+ if (!_glfw.wgl.ARB_create_context)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: A forward compatible OpenGL context requested but WGL_ARB_create_context is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+
+ if (ctxconfig->profile)
+ {
+ if (!_glfw.wgl.ARB_create_context_profile)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: OpenGL profile requested but WGL_ARB_create_context_profile is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+ }
+ else
+ {
+ if (!_glfw.wgl.ARB_create_context ||
+ !_glfw.wgl.ARB_create_context_profile ||
+ !_glfw.wgl.EXT_create_context_es2_profile)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "WGL: OpenGL ES requested but WGL_ARB_create_context_es2_profile is unavailable");
+ return GLFW_FALSE;
+ }
+ }
+
+ if (_glfw.wgl.ARB_create_context)
+ {
+ int index = 0, mask = 0, flags = 0;
+
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ if (ctxconfig->forward)
+ flags |= WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB;
+
+ if (ctxconfig->profile == GLFW_OPENGL_CORE_PROFILE)
+ mask |= WGL_CONTEXT_CORE_PROFILE_BIT_ARB;
+ else if (ctxconfig->profile == GLFW_OPENGL_COMPAT_PROFILE)
+ mask |= WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB;
+ }
+ else
+ mask |= WGL_CONTEXT_ES2_PROFILE_BIT_EXT;
+
+ if (ctxconfig->debug)
+ flags |= WGL_CONTEXT_DEBUG_BIT_ARB;
+
+ if (ctxconfig->robustness)
+ {
+ if (_glfw.wgl.ARB_create_context_robustness)
+ {
+ if (ctxconfig->robustness == GLFW_NO_RESET_NOTIFICATION)
+ {
+ SET_ATTRIB(WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB,
+ WGL_NO_RESET_NOTIFICATION_ARB);
+ }
+ else if (ctxconfig->robustness == GLFW_LOSE_CONTEXT_ON_RESET)
+ {
+ SET_ATTRIB(WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB,
+ WGL_LOSE_CONTEXT_ON_RESET_ARB);
+ }
+
+ flags |= WGL_CONTEXT_ROBUST_ACCESS_BIT_ARB;
+ }
+ }
+
+ if (ctxconfig->release)
+ {
+ if (_glfw.wgl.ARB_context_flush_control)
+ {
+ if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_NONE)
+ {
+ SET_ATTRIB(WGL_CONTEXT_RELEASE_BEHAVIOR_ARB,
+ WGL_CONTEXT_RELEASE_BEHAVIOR_NONE_ARB);
+ }
+ else if (ctxconfig->release == GLFW_RELEASE_BEHAVIOR_FLUSH)
+ {
+ SET_ATTRIB(WGL_CONTEXT_RELEASE_BEHAVIOR_ARB,
+ WGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB);
+ }
+ }
+ }
+
+ if (ctxconfig->noerror)
+ {
+ if (_glfw.wgl.ARB_create_context_no_error)
+ SET_ATTRIB(WGL_CONTEXT_OPENGL_NO_ERROR_ARB, GLFW_TRUE);
+ }
+
+ // NOTE: Only request an explicitly versioned context when necessary, as
+ // explicitly requesting version 1.0 does not always return the
+ // highest version supported by the driver
+ if (ctxconfig->major != 1 || ctxconfig->minor != 0)
+ {
+ SET_ATTRIB(WGL_CONTEXT_MAJOR_VERSION_ARB, ctxconfig->major);
+ SET_ATTRIB(WGL_CONTEXT_MINOR_VERSION_ARB, ctxconfig->minor);
+ }
+
+ if (flags)
+ SET_ATTRIB(WGL_CONTEXT_FLAGS_ARB, flags);
+
+ if (mask)
+ SET_ATTRIB(WGL_CONTEXT_PROFILE_MASK_ARB, mask);
+
+ SET_ATTRIB(0, 0);
+
+ window->context.wgl.handle =
+ wglCreateContextAttribsARB(window->context.wgl.dc, share, attribs);
+ if (!window->context.wgl.handle)
+ {
+ const DWORD error = GetLastError();
+
+ if (error == (0xc0070000 | ERROR_INVALID_VERSION_ARB))
+ {
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Driver does not support OpenGL version %i.%i",
+ ctxconfig->major,
+ ctxconfig->minor);
+ }
+ else
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Driver does not support OpenGL ES version %i.%i",
+ ctxconfig->major,
+ ctxconfig->minor);
+ }
+ }
+ else if (error == (0xc0070000 | ERROR_INVALID_PROFILE_ARB))
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Driver does not support the requested OpenGL profile");
+ }
+ else if (error == (0xc0070000 | ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB))
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "WGL: The share context is not compatible with the requested context");
+ }
+ else
+ {
+ if (ctxconfig->client == GLFW_OPENGL_API)
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Failed to create OpenGL context");
+ }
+ else
+ {
+ _glfwInputError(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Failed to create OpenGL ES context");
+ }
+ }
+
+ return GLFW_FALSE;
+ }
+ }
+ else
+ {
+ window->context.wgl.handle = wglCreateContext(window->context.wgl.dc);
+ if (!window->context.wgl.handle)
+ {
+ _glfwInputErrorWin32(GLFW_VERSION_UNAVAILABLE,
+ "WGL: Failed to create OpenGL context");
+ return GLFW_FALSE;
+ }
+
+ if (share)
+ {
+ if (!wglShareLists(share, window->context.wgl.handle))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "WGL: Failed to enable sharing with specified OpenGL context");
+ return GLFW_FALSE;
+ }
+ }
+ }
+
+ window->context.makeCurrent = makeContextCurrentWGL;
+ window->context.swapBuffers = swapBuffersWGL;
+ window->context.swapInterval = swapIntervalWGL;
+ window->context.extensionSupported = extensionSupportedWGL;
+ window->context.getProcAddress = getProcAddressWGL;
+ window->context.destroy = destroyContextWGL;
+
+ return GLFW_TRUE;
+}
+
+#undef SET_ATTRIB
+
+GLFWAPI HGLRC glfwGetWGLContext(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_WIN32)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "WGL: Platform not initialized");
+ return NULL;
+ }
+
+ if (window->context.source != GLFW_NATIVE_CONTEXT_API)
+ {
+ _glfwInputError(GLFW_NO_WINDOW_CONTEXT, NULL);
+ return NULL;
+ }
+
+ return window->context.wgl.handle;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_init.c b/chromium/third_party/dawn/third_party/glfw/src/win32_init.c
new file mode 100644
index 00000000000..1cdc5b98d63
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_init.c
@@ -0,0 +1,679 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+
+static const GUID _glfw_GUID_DEVINTERFACE_HID =
+ {0x4d1e55b2,0xf16f,0x11cf,{0x88,0xcb,0x00,0x11,0x11,0x00,0x00,0x30}};
+
+#define GUID_DEVINTERFACE_HID _glfw_GUID_DEVINTERFACE_HID
+
+#if defined(_GLFW_USE_HYBRID_HPG) || defined(_GLFW_USE_OPTIMUS_HPG)
+
+#if defined(_GLFW_BUILD_DLL)
+ #pragma message("These symbols must be exported by the executable and have no effect in a DLL")
+#endif
+
+// Executables (but not DLLs) exporting this symbol with this value will be
+// automatically directed to the high-performance GPU on Nvidia Optimus systems
+// with up-to-date drivers
+//
+__declspec(dllexport) DWORD NvOptimusEnablement = 1;
+
+// Executables (but not DLLs) exporting this symbol with this value will be
+// automatically directed to the high-performance GPU on AMD PowerXpress systems
+// with up-to-date drivers
+//
+__declspec(dllexport) int AmdPowerXpressRequestHighPerformance = 1;
+
+#endif // _GLFW_USE_HYBRID_HPG
+
+#if defined(_GLFW_BUILD_DLL)
+
+// GLFW DLL entry point
+//
+BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
+{
+ return TRUE;
+}
+
+#endif // _GLFW_BUILD_DLL
+
+// Load necessary libraries (DLLs)
+//
+static GLFWbool loadLibraries(void)
+{
+ if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const WCHAR*) &_glfw,
+ (HMODULE*) &_glfw.win32.instance))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to retrieve own module handle");
+ return GLFW_FALSE;
+ }
+
+ _glfw.win32.user32.instance = _glfwPlatformLoadModule("user32.dll");
+ if (!_glfw.win32.user32.instance)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to load user32.dll");
+ return GLFW_FALSE;
+ }
+
+ _glfw.win32.user32.SetProcessDPIAware_ = (PFN_SetProcessDPIAware)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "SetProcessDPIAware");
+ _glfw.win32.user32.ChangeWindowMessageFilterEx_ = (PFN_ChangeWindowMessageFilterEx)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "ChangeWindowMessageFilterEx");
+ _glfw.win32.user32.EnableNonClientDpiScaling_ = (PFN_EnableNonClientDpiScaling)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "EnableNonClientDpiScaling");
+ _glfw.win32.user32.SetProcessDpiAwarenessContext_ = (PFN_SetProcessDpiAwarenessContext)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "SetProcessDpiAwarenessContext");
+ _glfw.win32.user32.GetDpiForWindow_ = (PFN_GetDpiForWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "GetDpiForWindow");
+ _glfw.win32.user32.AdjustWindowRectExForDpi_ = (PFN_AdjustWindowRectExForDpi)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "AdjustWindowRectExForDpi");
+ _glfw.win32.user32.GetSystemMetricsForDpi_ = (PFN_GetSystemMetricsForDpi)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.user32.instance, "GetSystemMetricsForDpi");
+
+ _glfw.win32.dinput8.instance = _glfwPlatformLoadModule("dinput8.dll");
+ if (_glfw.win32.dinput8.instance)
+ {
+ _glfw.win32.dinput8.Create = (PFN_DirectInput8Create)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.dinput8.instance, "DirectInput8Create");
+ }
+
+ {
+ int i;
+ const char* names[] =
+ {
+ "xinput1_4.dll",
+ "xinput1_3.dll",
+ "xinput9_1_0.dll",
+ "xinput1_2.dll",
+ "xinput1_1.dll",
+ NULL
+ };
+
+ for (i = 0; names[i]; i++)
+ {
+ _glfw.win32.xinput.instance = _glfwPlatformLoadModule(names[i]);
+ if (_glfw.win32.xinput.instance)
+ {
+ _glfw.win32.xinput.GetCapabilities = (PFN_XInputGetCapabilities)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.xinput.instance, "XInputGetCapabilities");
+ _glfw.win32.xinput.GetState = (PFN_XInputGetState)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.xinput.instance, "XInputGetState");
+
+ break;
+ }
+ }
+ }
+
+ _glfw.win32.dwmapi.instance = _glfwPlatformLoadModule("dwmapi.dll");
+ if (_glfw.win32.dwmapi.instance)
+ {
+ _glfw.win32.dwmapi.IsCompositionEnabled = (PFN_DwmIsCompositionEnabled)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.dwmapi.instance, "DwmIsCompositionEnabled");
+ _glfw.win32.dwmapi.Flush = (PFN_DwmFlush)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.dwmapi.instance, "DwmFlush");
+ _glfw.win32.dwmapi.EnableBlurBehindWindow = (PFN_DwmEnableBlurBehindWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.dwmapi.instance, "DwmEnableBlurBehindWindow");
+ _glfw.win32.dwmapi.GetColorizationColor = (PFN_DwmGetColorizationColor)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.dwmapi.instance, "DwmGetColorizationColor");
+ }
+
+ _glfw.win32.shcore.instance = _glfwPlatformLoadModule("shcore.dll");
+ if (_glfw.win32.shcore.instance)
+ {
+ _glfw.win32.shcore.SetProcessDpiAwareness_ = (PFN_SetProcessDpiAwareness)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.shcore.instance, "SetProcessDpiAwareness");
+ _glfw.win32.shcore.GetDpiForMonitor_ = (PFN_GetDpiForMonitor)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.shcore.instance, "GetDpiForMonitor");
+ }
+
+ _glfw.win32.ntdll.instance = _glfwPlatformLoadModule("ntdll.dll");
+ if (_glfw.win32.ntdll.instance)
+ {
+ _glfw.win32.ntdll.RtlVerifyVersionInfo_ = (PFN_RtlVerifyVersionInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.win32.ntdll.instance, "RtlVerifyVersionInfo");
+ }
+
+ return GLFW_TRUE;
+}
+
+// Unload used libraries (DLLs)
+//
+static void freeLibraries(void)
+{
+ if (_glfw.win32.xinput.instance)
+ _glfwPlatformFreeModule(_glfw.win32.xinput.instance);
+
+ if (_glfw.win32.dinput8.instance)
+ _glfwPlatformFreeModule(_glfw.win32.dinput8.instance);
+
+ if (_glfw.win32.user32.instance)
+ _glfwPlatformFreeModule(_glfw.win32.user32.instance);
+
+ if (_glfw.win32.dwmapi.instance)
+ _glfwPlatformFreeModule(_glfw.win32.dwmapi.instance);
+
+ if (_glfw.win32.shcore.instance)
+ _glfwPlatformFreeModule(_glfw.win32.shcore.instance);
+
+ if (_glfw.win32.ntdll.instance)
+ _glfwPlatformFreeModule(_glfw.win32.ntdll.instance);
+}
+
+// Create key code translation tables
+//
+static void createKeyTables(void)
+{
+ int scancode;
+
+ memset(_glfw.win32.keycodes, -1, sizeof(_glfw.win32.keycodes));
+ memset(_glfw.win32.scancodes, -1, sizeof(_glfw.win32.scancodes));
+
+ _glfw.win32.keycodes[0x00B] = GLFW_KEY_0;
+ _glfw.win32.keycodes[0x002] = GLFW_KEY_1;
+ _glfw.win32.keycodes[0x003] = GLFW_KEY_2;
+ _glfw.win32.keycodes[0x004] = GLFW_KEY_3;
+ _glfw.win32.keycodes[0x005] = GLFW_KEY_4;
+ _glfw.win32.keycodes[0x006] = GLFW_KEY_5;
+ _glfw.win32.keycodes[0x007] = GLFW_KEY_6;
+ _glfw.win32.keycodes[0x008] = GLFW_KEY_7;
+ _glfw.win32.keycodes[0x009] = GLFW_KEY_8;
+ _glfw.win32.keycodes[0x00A] = GLFW_KEY_9;
+ _glfw.win32.keycodes[0x01E] = GLFW_KEY_A;
+ _glfw.win32.keycodes[0x030] = GLFW_KEY_B;
+ _glfw.win32.keycodes[0x02E] = GLFW_KEY_C;
+ _glfw.win32.keycodes[0x020] = GLFW_KEY_D;
+ _glfw.win32.keycodes[0x012] = GLFW_KEY_E;
+ _glfw.win32.keycodes[0x021] = GLFW_KEY_F;
+ _glfw.win32.keycodes[0x022] = GLFW_KEY_G;
+ _glfw.win32.keycodes[0x023] = GLFW_KEY_H;
+ _glfw.win32.keycodes[0x017] = GLFW_KEY_I;
+ _glfw.win32.keycodes[0x024] = GLFW_KEY_J;
+ _glfw.win32.keycodes[0x025] = GLFW_KEY_K;
+ _glfw.win32.keycodes[0x026] = GLFW_KEY_L;
+ _glfw.win32.keycodes[0x032] = GLFW_KEY_M;
+ _glfw.win32.keycodes[0x031] = GLFW_KEY_N;
+ _glfw.win32.keycodes[0x018] = GLFW_KEY_O;
+ _glfw.win32.keycodes[0x019] = GLFW_KEY_P;
+ _glfw.win32.keycodes[0x010] = GLFW_KEY_Q;
+ _glfw.win32.keycodes[0x013] = GLFW_KEY_R;
+ _glfw.win32.keycodes[0x01F] = GLFW_KEY_S;
+ _glfw.win32.keycodes[0x014] = GLFW_KEY_T;
+ _glfw.win32.keycodes[0x016] = GLFW_KEY_U;
+ _glfw.win32.keycodes[0x02F] = GLFW_KEY_V;
+ _glfw.win32.keycodes[0x011] = GLFW_KEY_W;
+ _glfw.win32.keycodes[0x02D] = GLFW_KEY_X;
+ _glfw.win32.keycodes[0x015] = GLFW_KEY_Y;
+ _glfw.win32.keycodes[0x02C] = GLFW_KEY_Z;
+
+ _glfw.win32.keycodes[0x028] = GLFW_KEY_APOSTROPHE;
+ _glfw.win32.keycodes[0x02B] = GLFW_KEY_BACKSLASH;
+ _glfw.win32.keycodes[0x033] = GLFW_KEY_COMMA;
+ _glfw.win32.keycodes[0x00D] = GLFW_KEY_EQUAL;
+ _glfw.win32.keycodes[0x029] = GLFW_KEY_GRAVE_ACCENT;
+ _glfw.win32.keycodes[0x01A] = GLFW_KEY_LEFT_BRACKET;
+ _glfw.win32.keycodes[0x00C] = GLFW_KEY_MINUS;
+ _glfw.win32.keycodes[0x034] = GLFW_KEY_PERIOD;
+ _glfw.win32.keycodes[0x01B] = GLFW_KEY_RIGHT_BRACKET;
+ _glfw.win32.keycodes[0x027] = GLFW_KEY_SEMICOLON;
+ _glfw.win32.keycodes[0x035] = GLFW_KEY_SLASH;
+ _glfw.win32.keycodes[0x056] = GLFW_KEY_WORLD_2;
+
+ _glfw.win32.keycodes[0x00E] = GLFW_KEY_BACKSPACE;
+ _glfw.win32.keycodes[0x153] = GLFW_KEY_DELETE;
+ _glfw.win32.keycodes[0x14F] = GLFW_KEY_END;
+ _glfw.win32.keycodes[0x01C] = GLFW_KEY_ENTER;
+ _glfw.win32.keycodes[0x001] = GLFW_KEY_ESCAPE;
+ _glfw.win32.keycodes[0x147] = GLFW_KEY_HOME;
+ _glfw.win32.keycodes[0x152] = GLFW_KEY_INSERT;
+ _glfw.win32.keycodes[0x15D] = GLFW_KEY_MENU;
+ _glfw.win32.keycodes[0x151] = GLFW_KEY_PAGE_DOWN;
+ _glfw.win32.keycodes[0x149] = GLFW_KEY_PAGE_UP;
+ _glfw.win32.keycodes[0x045] = GLFW_KEY_PAUSE;
+ _glfw.win32.keycodes[0x039] = GLFW_KEY_SPACE;
+ _glfw.win32.keycodes[0x00F] = GLFW_KEY_TAB;
+ _glfw.win32.keycodes[0x03A] = GLFW_KEY_CAPS_LOCK;
+ _glfw.win32.keycodes[0x145] = GLFW_KEY_NUM_LOCK;
+ _glfw.win32.keycodes[0x046] = GLFW_KEY_SCROLL_LOCK;
+ _glfw.win32.keycodes[0x03B] = GLFW_KEY_F1;
+ _glfw.win32.keycodes[0x03C] = GLFW_KEY_F2;
+ _glfw.win32.keycodes[0x03D] = GLFW_KEY_F3;
+ _glfw.win32.keycodes[0x03E] = GLFW_KEY_F4;
+ _glfw.win32.keycodes[0x03F] = GLFW_KEY_F5;
+ _glfw.win32.keycodes[0x040] = GLFW_KEY_F6;
+ _glfw.win32.keycodes[0x041] = GLFW_KEY_F7;
+ _glfw.win32.keycodes[0x042] = GLFW_KEY_F8;
+ _glfw.win32.keycodes[0x043] = GLFW_KEY_F9;
+ _glfw.win32.keycodes[0x044] = GLFW_KEY_F10;
+ _glfw.win32.keycodes[0x057] = GLFW_KEY_F11;
+ _glfw.win32.keycodes[0x058] = GLFW_KEY_F12;
+ _glfw.win32.keycodes[0x064] = GLFW_KEY_F13;
+ _glfw.win32.keycodes[0x065] = GLFW_KEY_F14;
+ _glfw.win32.keycodes[0x066] = GLFW_KEY_F15;
+ _glfw.win32.keycodes[0x067] = GLFW_KEY_F16;
+ _glfw.win32.keycodes[0x068] = GLFW_KEY_F17;
+ _glfw.win32.keycodes[0x069] = GLFW_KEY_F18;
+ _glfw.win32.keycodes[0x06A] = GLFW_KEY_F19;
+ _glfw.win32.keycodes[0x06B] = GLFW_KEY_F20;
+ _glfw.win32.keycodes[0x06C] = GLFW_KEY_F21;
+ _glfw.win32.keycodes[0x06D] = GLFW_KEY_F22;
+ _glfw.win32.keycodes[0x06E] = GLFW_KEY_F23;
+ _glfw.win32.keycodes[0x076] = GLFW_KEY_F24;
+ _glfw.win32.keycodes[0x038] = GLFW_KEY_LEFT_ALT;
+ _glfw.win32.keycodes[0x01D] = GLFW_KEY_LEFT_CONTROL;
+ _glfw.win32.keycodes[0x02A] = GLFW_KEY_LEFT_SHIFT;
+ _glfw.win32.keycodes[0x15B] = GLFW_KEY_LEFT_SUPER;
+ _glfw.win32.keycodes[0x137] = GLFW_KEY_PRINT_SCREEN;
+ _glfw.win32.keycodes[0x138] = GLFW_KEY_RIGHT_ALT;
+ _glfw.win32.keycodes[0x11D] = GLFW_KEY_RIGHT_CONTROL;
+ _glfw.win32.keycodes[0x036] = GLFW_KEY_RIGHT_SHIFT;
+ _glfw.win32.keycodes[0x15C] = GLFW_KEY_RIGHT_SUPER;
+ _glfw.win32.keycodes[0x150] = GLFW_KEY_DOWN;
+ _glfw.win32.keycodes[0x14B] = GLFW_KEY_LEFT;
+ _glfw.win32.keycodes[0x14D] = GLFW_KEY_RIGHT;
+ _glfw.win32.keycodes[0x148] = GLFW_KEY_UP;
+
+ _glfw.win32.keycodes[0x052] = GLFW_KEY_KP_0;
+ _glfw.win32.keycodes[0x04F] = GLFW_KEY_KP_1;
+ _glfw.win32.keycodes[0x050] = GLFW_KEY_KP_2;
+ _glfw.win32.keycodes[0x051] = GLFW_KEY_KP_3;
+ _glfw.win32.keycodes[0x04B] = GLFW_KEY_KP_4;
+ _glfw.win32.keycodes[0x04C] = GLFW_KEY_KP_5;
+ _glfw.win32.keycodes[0x04D] = GLFW_KEY_KP_6;
+ _glfw.win32.keycodes[0x047] = GLFW_KEY_KP_7;
+ _glfw.win32.keycodes[0x048] = GLFW_KEY_KP_8;
+ _glfw.win32.keycodes[0x049] = GLFW_KEY_KP_9;
+ _glfw.win32.keycodes[0x04E] = GLFW_KEY_KP_ADD;
+ _glfw.win32.keycodes[0x053] = GLFW_KEY_KP_DECIMAL;
+ _glfw.win32.keycodes[0x135] = GLFW_KEY_KP_DIVIDE;
+ _glfw.win32.keycodes[0x11C] = GLFW_KEY_KP_ENTER;
+ _glfw.win32.keycodes[0x059] = GLFW_KEY_KP_EQUAL;
+ _glfw.win32.keycodes[0x037] = GLFW_KEY_KP_MULTIPLY;
+ _glfw.win32.keycodes[0x04A] = GLFW_KEY_KP_SUBTRACT;
+
+ for (scancode = 0; scancode < 512; scancode++)
+ {
+ if (_glfw.win32.keycodes[scancode] > 0)
+ _glfw.win32.scancodes[_glfw.win32.keycodes[scancode]] = scancode;
+ }
+}
+
+// Creates a dummy window for behind-the-scenes work
+//
+static GLFWbool createHelperWindow(void)
+{
+ MSG msg;
+
+ _glfw.win32.helperWindowHandle =
+ CreateWindowExW(WS_EX_OVERLAPPEDWINDOW,
+ _GLFW_WNDCLASSNAME,
+ L"GLFW message window",
+ WS_CLIPSIBLINGS | WS_CLIPCHILDREN,
+ 0, 0, 1, 1,
+ NULL, NULL,
+ _glfw.win32.instance,
+ NULL);
+
+ if (!_glfw.win32.helperWindowHandle)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create helper window");
+ return GLFW_FALSE;
+ }
+
+ // HACK: The command to the first ShowWindow call is ignored if the parent
+ // process passed along a STARTUPINFO, so clear that with a no-op call
+ ShowWindow(_glfw.win32.helperWindowHandle, SW_HIDE);
+
+ // Register for HID device notifications
+ {
+ DEV_BROADCAST_DEVICEINTERFACE_W dbi;
+ ZeroMemory(&dbi, sizeof(dbi));
+ dbi.dbcc_size = sizeof(dbi);
+ dbi.dbcc_devicetype = DBT_DEVTYP_DEVICEINTERFACE;
+ dbi.dbcc_classguid = GUID_DEVINTERFACE_HID;
+
+ _glfw.win32.deviceNotificationHandle =
+ RegisterDeviceNotificationW(_glfw.win32.helperWindowHandle,
+ (DEV_BROADCAST_HDR*) &dbi,
+ DEVICE_NOTIFY_WINDOW_HANDLE);
+ }
+
+ while (PeekMessageW(&msg, _glfw.win32.helperWindowHandle, 0, 0, PM_REMOVE))
+ {
+ TranslateMessage(&msg);
+ DispatchMessageW(&msg);
+ }
+
+ return GLFW_TRUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Returns a wide string version of the specified UTF-8 string
+//
+WCHAR* _glfwCreateWideStringFromUTF8Win32(const char* source)
+{
+ WCHAR* target;
+ int count;
+
+ count = MultiByteToWideChar(CP_UTF8, 0, source, -1, NULL, 0);
+ if (!count)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to convert string from UTF-8");
+ return NULL;
+ }
+
+ target = _glfw_calloc(count, sizeof(WCHAR));
+
+ if (!MultiByteToWideChar(CP_UTF8, 0, source, -1, target, count))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to convert string from UTF-8");
+ _glfw_free(target);
+ return NULL;
+ }
+
+ return target;
+}
+
+// Returns a UTF-8 string version of the specified wide string
+//
+char* _glfwCreateUTF8FromWideStringWin32(const WCHAR* source)
+{
+ char* target;
+ int size;
+
+ size = WideCharToMultiByte(CP_UTF8, 0, source, -1, NULL, 0, NULL, NULL);
+ if (!size)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to convert string to UTF-8");
+ return NULL;
+ }
+
+ target = _glfw_calloc(size, 1);
+
+ if (!WideCharToMultiByte(CP_UTF8, 0, source, -1, target, size, NULL, NULL))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to convert string to UTF-8");
+ _glfw_free(target);
+ return NULL;
+ }
+
+ return target;
+}
+
+// Reports the specified error, appending information about the last Win32 error
+//
+void _glfwInputErrorWin32(int error, const char* description)
+{
+ WCHAR buffer[_GLFW_MESSAGE_SIZE] = L"";
+ char message[_GLFW_MESSAGE_SIZE] = "";
+
+ FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS |
+ FORMAT_MESSAGE_MAX_WIDTH_MASK,
+ NULL,
+ GetLastError() & 0xffff,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ buffer,
+ sizeof(buffer) / sizeof(WCHAR),
+ NULL);
+ WideCharToMultiByte(CP_UTF8, 0, buffer, -1, message, sizeof(message), NULL, NULL);
+
+ _glfwInputError(error, "%s: %s", description, message);
+}
+
+// Updates key names according to the current keyboard layout
+//
+void _glfwUpdateKeyNamesWin32(void)
+{
+ int key;
+ BYTE state[256] = {0};
+
+ memset(_glfw.win32.keynames, 0, sizeof(_glfw.win32.keynames));
+
+ for (key = GLFW_KEY_SPACE; key <= GLFW_KEY_LAST; key++)
+ {
+ UINT vk;
+ int scancode, length;
+ WCHAR chars[16];
+
+ scancode = _glfw.win32.scancodes[key];
+ if (scancode == -1)
+ continue;
+
+ if (key >= GLFW_KEY_KP_0 && key <= GLFW_KEY_KP_ADD)
+ {
+ const UINT vks[] = {
+ VK_NUMPAD0, VK_NUMPAD1, VK_NUMPAD2, VK_NUMPAD3,
+ VK_NUMPAD4, VK_NUMPAD5, VK_NUMPAD6, VK_NUMPAD7,
+ VK_NUMPAD8, VK_NUMPAD9, VK_DECIMAL, VK_DIVIDE,
+ VK_MULTIPLY, VK_SUBTRACT, VK_ADD
+ };
+
+ vk = vks[key - GLFW_KEY_KP_0];
+ }
+ else
+ vk = MapVirtualKeyW(scancode, MAPVK_VSC_TO_VK);
+
+ length = ToUnicode(vk, scancode, state,
+ chars, sizeof(chars) / sizeof(WCHAR),
+ 0);
+
+ if (length == -1)
+ {
+ // This is a dead key, so we need a second simulated key press
+ // to make it output its own character (usually a diacritic)
+ length = ToUnicode(vk, scancode, state,
+ chars, sizeof(chars) / sizeof(WCHAR),
+ 0);
+ }
+
+ if (length < 1)
+ continue;
+
+ WideCharToMultiByte(CP_UTF8, 0, chars, 1,
+ _glfw.win32.keynames[key],
+ sizeof(_glfw.win32.keynames[key]),
+ NULL, NULL);
+ }
+}
+
+// Replacement for IsWindowsVersionOrGreater, as we cannot rely on the
+// application having a correct embedded manifest
+//
+BOOL _glfwIsWindowsVersionOrGreaterWin32(WORD major, WORD minor, WORD sp)
+{
+ OSVERSIONINFOEXW osvi = { sizeof(osvi), major, minor, 0, 0, {0}, sp };
+ DWORD mask = VER_MAJORVERSION | VER_MINORVERSION | VER_SERVICEPACKMAJOR;
+ ULONGLONG cond = VerSetConditionMask(0, VER_MAJORVERSION, VER_GREATER_EQUAL);
+ cond = VerSetConditionMask(cond, VER_MINORVERSION, VER_GREATER_EQUAL);
+ cond = VerSetConditionMask(cond, VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL);
+ // HACK: Use RtlVerifyVersionInfo instead of VerifyVersionInfoW as the
+ // latter lies unless the user knew to embed a non-default manifest
+ // announcing support for Windows 10 via supportedOS GUID
+ return RtlVerifyVersionInfo(&osvi, mask, cond) == 0;
+}
+
+// Checks whether we are on at least the specified build of Windows 10
+//
+BOOL _glfwIsWindows10BuildOrGreaterWin32(WORD build)
+{
+ OSVERSIONINFOEXW osvi = { sizeof(osvi), 10, 0, build };
+ DWORD mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER;
+ ULONGLONG cond = VerSetConditionMask(0, VER_MAJORVERSION, VER_GREATER_EQUAL);
+ cond = VerSetConditionMask(cond, VER_MINORVERSION, VER_GREATER_EQUAL);
+ cond = VerSetConditionMask(cond, VER_BUILDNUMBER, VER_GREATER_EQUAL);
+ // HACK: Use RtlVerifyVersionInfo instead of VerifyVersionInfoW as the
+ // latter lies unless the user knew to embed a non-default manifest
+ // announcing support for Windows 10 via supportedOS GUID
+ return RtlVerifyVersionInfo(&osvi, mask, cond) == 0;
+}
+
+GLFWbool _glfwConnectWin32(int platformID, _GLFWplatform* platform)
+{
+ const _GLFWplatform win32 =
+ {
+ GLFW_PLATFORM_WIN32,
+ _glfwInitWin32,
+ _glfwTerminateWin32,
+ _glfwGetCursorPosWin32,
+ _glfwSetCursorPosWin32,
+ _glfwSetCursorModeWin32,
+ _glfwSetRawMouseMotionWin32,
+ _glfwRawMouseMotionSupportedWin32,
+ _glfwCreateCursorWin32,
+ _glfwCreateStandardCursorWin32,
+ _glfwDestroyCursorWin32,
+ _glfwSetCursorWin32,
+ _glfwGetScancodeNameWin32,
+ _glfwGetKeyScancodeWin32,
+ _glfwSetClipboardStringWin32,
+ _glfwGetClipboardStringWin32,
+ _glfwInitJoysticksWin32,
+ _glfwTerminateJoysticksWin32,
+ _glfwPollJoystickWin32,
+ _glfwGetMappingNameWin32,
+ _glfwUpdateGamepadGUIDWin32,
+ _glfwFreeMonitorWin32,
+ _glfwGetMonitorPosWin32,
+ _glfwGetMonitorContentScaleWin32,
+ _glfwGetMonitorWorkareaWin32,
+ _glfwGetVideoModesWin32,
+ _glfwGetVideoModeWin32,
+ _glfwGetGammaRampWin32,
+ _glfwSetGammaRampWin32,
+ _glfwCreateWindowWin32,
+ _glfwDestroyWindowWin32,
+ _glfwSetWindowTitleWin32,
+ _glfwSetWindowIconWin32,
+ _glfwGetWindowPosWin32,
+ _glfwSetWindowPosWin32,
+ _glfwGetWindowSizeWin32,
+ _glfwSetWindowSizeWin32,
+ _glfwSetWindowSizeLimitsWin32,
+ _glfwSetWindowAspectRatioWin32,
+ _glfwGetFramebufferSizeWin32,
+ _glfwGetWindowFrameSizeWin32,
+ _glfwGetWindowContentScaleWin32,
+ _glfwIconifyWindowWin32,
+ _glfwRestoreWindowWin32,
+ _glfwMaximizeWindowWin32,
+ _glfwShowWindowWin32,
+ _glfwHideWindowWin32,
+ _glfwRequestWindowAttentionWin32,
+ _glfwFocusWindowWin32,
+ _glfwSetWindowMonitorWin32,
+ _glfwWindowFocusedWin32,
+ _glfwWindowIconifiedWin32,
+ _glfwWindowVisibleWin32,
+ _glfwWindowMaximizedWin32,
+ _glfwWindowHoveredWin32,
+ _glfwFramebufferTransparentWin32,
+ _glfwGetWindowOpacityWin32,
+ _glfwSetWindowResizableWin32,
+ _glfwSetWindowDecoratedWin32,
+ _glfwSetWindowFloatingWin32,
+ _glfwSetWindowOpacityWin32,
+ _glfwSetWindowMousePassthroughWin32,
+ _glfwPollEventsWin32,
+ _glfwWaitEventsWin32,
+ _glfwWaitEventsTimeoutWin32,
+ _glfwPostEmptyEventWin32,
+ _glfwGetEGLPlatformWin32,
+ _glfwGetEGLNativeDisplayWin32,
+ _glfwGetEGLNativeWindowWin32,
+ _glfwGetRequiredInstanceExtensionsWin32,
+ _glfwGetPhysicalDevicePresentationSupportWin32,
+ _glfwCreateWindowSurfaceWin32,
+ };
+
+ *platform = win32;
+ return GLFW_TRUE;
+}
+
+int _glfwInitWin32(void)
+{
+ if (!loadLibraries())
+ return GLFW_FALSE;
+
+ createKeyTables();
+ _glfwUpdateKeyNamesWin32();
+
+ if (_glfwIsWindows10Version1703OrGreaterWin32())
+ SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2);
+ else if (IsWindows8Point1OrGreater())
+ SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE);
+ else if (IsWindowsVistaOrGreater())
+ SetProcessDPIAware();
+
+ if (!_glfwRegisterWindowClassWin32())
+ return GLFW_FALSE;
+
+ if (!createHelperWindow())
+ return GLFW_FALSE;
+
+ _glfwPollMonitorsWin32();
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateWin32(void)
+{
+ if (_glfw.win32.deviceNotificationHandle)
+ UnregisterDeviceNotification(_glfw.win32.deviceNotificationHandle);
+
+ if (_glfw.win32.helperWindowHandle)
+ DestroyWindow(_glfw.win32.helperWindowHandle);
+
+ _glfwUnregisterWindowClassWin32();
+
+ _glfw_free(_glfw.win32.clipboardString);
+ _glfw_free(_glfw.win32.rawInput);
+
+ _glfwTerminateWGL();
+ _glfwTerminateEGL();
+ _glfwTerminateOSMesa();
+
+ freeLibraries();
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.c b/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.c
new file mode 100644
index 00000000000..7eb9b203589
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.c
@@ -0,0 +1,758 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <stdio.h>
+#include <math.h>
+
+#define _GLFW_TYPE_AXIS 0
+#define _GLFW_TYPE_SLIDER 1
+#define _GLFW_TYPE_BUTTON 2
+#define _GLFW_TYPE_POV 3
+
+// Data produced with DirectInput device object enumeration
+//
+typedef struct _GLFWobjenumWin32
+{
+ IDirectInputDevice8W* device;
+ _GLFWjoyobjectWin32* objects;
+ int objectCount;
+ int axisCount;
+ int sliderCount;
+ int buttonCount;
+ int povCount;
+} _GLFWobjenumWin32;
+
+// Define local copies of the necessary GUIDs
+//
+static const GUID _glfw_IID_IDirectInput8W =
+ {0xbf798031,0x483a,0x4da2,{0xaa,0x99,0x5d,0x64,0xed,0x36,0x97,0x00}};
+static const GUID _glfw_GUID_XAxis =
+ {0xa36d02e0,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_YAxis =
+ {0xa36d02e1,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_ZAxis =
+ {0xa36d02e2,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_RxAxis =
+ {0xa36d02f4,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_RyAxis =
+ {0xa36d02f5,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_RzAxis =
+ {0xa36d02e3,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_Slider =
+ {0xa36d02e4,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+static const GUID _glfw_GUID_POV =
+ {0xa36d02f2,0xc9f3,0x11cf,{0xbf,0xc7,0x44,0x45,0x53,0x54,0x00,0x00}};
+
+#define IID_IDirectInput8W _glfw_IID_IDirectInput8W
+#define GUID_XAxis _glfw_GUID_XAxis
+#define GUID_YAxis _glfw_GUID_YAxis
+#define GUID_ZAxis _glfw_GUID_ZAxis
+#define GUID_RxAxis _glfw_GUID_RxAxis
+#define GUID_RyAxis _glfw_GUID_RyAxis
+#define GUID_RzAxis _glfw_GUID_RzAxis
+#define GUID_Slider _glfw_GUID_Slider
+#define GUID_POV _glfw_GUID_POV
+
+// Object data array for our clone of c_dfDIJoystick
+// Generated with https://github.com/elmindreda/c_dfDIJoystick2
+//
+static DIOBJECTDATAFORMAT _glfwObjectDataFormats[] =
+{
+ { &GUID_XAxis,DIJOFS_X,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_YAxis,DIJOFS_Y,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_ZAxis,DIJOFS_Z,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_RxAxis,DIJOFS_RX,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_RyAxis,DIJOFS_RY,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_RzAxis,DIJOFS_RZ,DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_Slider,DIJOFS_SLIDER(0),DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_Slider,DIJOFS_SLIDER(1),DIDFT_AXIS|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,DIDOI_ASPECTPOSITION },
+ { &GUID_POV,DIJOFS_POV(0),DIDFT_POV|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { &GUID_POV,DIJOFS_POV(1),DIDFT_POV|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { &GUID_POV,DIJOFS_POV(2),DIDFT_POV|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { &GUID_POV,DIJOFS_POV(3),DIDFT_POV|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(0),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(1),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(2),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(3),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(4),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(5),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(6),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(7),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(8),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(9),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(10),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(11),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(12),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(13),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(14),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(15),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(16),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(17),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(18),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(19),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(20),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(21),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(22),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(23),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(24),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(25),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(26),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(27),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(28),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(29),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(30),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+ { NULL,DIJOFS_BUTTON(31),DIDFT_BUTTON|DIDFT_OPTIONAL|DIDFT_ANYINSTANCE,0 },
+};
+
+// Our clone of c_dfDIJoystick
+//
+static const DIDATAFORMAT _glfwDataFormat =
+{
+ sizeof(DIDATAFORMAT),
+ sizeof(DIOBJECTDATAFORMAT),
+ DIDFT_ABSAXIS,
+ sizeof(DIJOYSTATE),
+ sizeof(_glfwObjectDataFormats) / sizeof(DIOBJECTDATAFORMAT),
+ _glfwObjectDataFormats
+};
+
+// Returns a description fitting the specified XInput capabilities
+//
+static const char* getDeviceDescription(const XINPUT_CAPABILITIES* xic)
+{
+ switch (xic->SubType)
+ {
+ case XINPUT_DEVSUBTYPE_WHEEL:
+ return "XInput Wheel";
+ case XINPUT_DEVSUBTYPE_ARCADE_STICK:
+ return "XInput Arcade Stick";
+ case XINPUT_DEVSUBTYPE_FLIGHT_STICK:
+ return "XInput Flight Stick";
+ case XINPUT_DEVSUBTYPE_DANCE_PAD:
+ return "XInput Dance Pad";
+ case XINPUT_DEVSUBTYPE_GUITAR:
+ return "XInput Guitar";
+ case XINPUT_DEVSUBTYPE_DRUM_KIT:
+ return "XInput Drum Kit";
+ case XINPUT_DEVSUBTYPE_GAMEPAD:
+ {
+ if (xic->Flags & XINPUT_CAPS_WIRELESS)
+ return "Wireless Xbox Controller";
+ else
+ return "Xbox Controller";
+ }
+ }
+
+ return "Unknown XInput Device";
+}
+
+// Lexically compare device objects
+//
+static int compareJoystickObjects(const void* first, const void* second)
+{
+ const _GLFWjoyobjectWin32* fo = first;
+ const _GLFWjoyobjectWin32* so = second;
+
+ if (fo->type != so->type)
+ return fo->type - so->type;
+
+ return fo->offset - so->offset;
+}
+
+// Checks whether the specified device supports XInput
+// Technique from FDInputJoystickManager::IsXInputDeviceFast in ZDoom
+//
+static GLFWbool supportsXInput(const GUID* guid)
+{
+ UINT i, count = 0;
+ RAWINPUTDEVICELIST* ridl;
+ GLFWbool result = GLFW_FALSE;
+
+ if (GetRawInputDeviceList(NULL, &count, sizeof(RAWINPUTDEVICELIST)) != 0)
+ return GLFW_FALSE;
+
+ ridl = _glfw_calloc(count, sizeof(RAWINPUTDEVICELIST));
+
+ if (GetRawInputDeviceList(ridl, &count, sizeof(RAWINPUTDEVICELIST)) == (UINT) -1)
+ {
+ _glfw_free(ridl);
+ return GLFW_FALSE;
+ }
+
+ for (i = 0; i < count; i++)
+ {
+ RID_DEVICE_INFO rdi;
+ char name[256];
+ UINT size;
+
+ if (ridl[i].dwType != RIM_TYPEHID)
+ continue;
+
+ ZeroMemory(&rdi, sizeof(rdi));
+ rdi.cbSize = sizeof(rdi);
+ size = sizeof(rdi);
+
+ if ((INT) GetRawInputDeviceInfoA(ridl[i].hDevice,
+ RIDI_DEVICEINFO,
+ &rdi, &size) == -1)
+ {
+ continue;
+ }
+
+ if (MAKELONG(rdi.hid.dwVendorId, rdi.hid.dwProductId) != (LONG) guid->Data1)
+ continue;
+
+ memset(name, 0, sizeof(name));
+ size = sizeof(name);
+
+ if ((INT) GetRawInputDeviceInfoA(ridl[i].hDevice,
+ RIDI_DEVICENAME,
+ name, &size) == -1)
+ {
+ break;
+ }
+
+ name[sizeof(name) - 1] = '\0';
+ if (strstr(name, "IG_"))
+ {
+ result = GLFW_TRUE;
+ break;
+ }
+ }
+
+ _glfw_free(ridl);
+ return result;
+}
+
+// Frees all resources associated with the specified joystick
+//
+static void closeJoystick(_GLFWjoystick* js)
+{
+ if (js->win32.device)
+ {
+ IDirectInputDevice8_Unacquire(js->win32.device);
+ IDirectInputDevice8_Release(js->win32.device);
+ }
+
+ _glfw_free(js->win32.objects);
+
+ _glfwFreeJoystick(js);
+ _glfwInputJoystick(js, GLFW_DISCONNECTED);
+}
+
+// DirectInput device object enumeration callback
+// Insights gleaned from SDL
+//
+static BOOL CALLBACK deviceObjectCallback(const DIDEVICEOBJECTINSTANCEW* doi,
+ void* user)
+{
+ _GLFWobjenumWin32* data = user;
+ _GLFWjoyobjectWin32* object = data->objects + data->objectCount;
+
+ if (DIDFT_GETTYPE(doi->dwType) & DIDFT_AXIS)
+ {
+ DIPROPRANGE dipr;
+
+ if (memcmp(&doi->guidType, &GUID_Slider, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_SLIDER(data->sliderCount);
+ else if (memcmp(&doi->guidType, &GUID_XAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_X;
+ else if (memcmp(&doi->guidType, &GUID_YAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_Y;
+ else if (memcmp(&doi->guidType, &GUID_ZAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_Z;
+ else if (memcmp(&doi->guidType, &GUID_RxAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_RX;
+ else if (memcmp(&doi->guidType, &GUID_RyAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_RY;
+ else if (memcmp(&doi->guidType, &GUID_RzAxis, sizeof(GUID)) == 0)
+ object->offset = DIJOFS_RZ;
+ else
+ return DIENUM_CONTINUE;
+
+ ZeroMemory(&dipr, sizeof(dipr));
+ dipr.diph.dwSize = sizeof(dipr);
+ dipr.diph.dwHeaderSize = sizeof(dipr.diph);
+ dipr.diph.dwObj = doi->dwType;
+ dipr.diph.dwHow = DIPH_BYID;
+ dipr.lMin = -32768;
+ dipr.lMax = 32767;
+
+ if (FAILED(IDirectInputDevice8_SetProperty(data->device,
+ DIPROP_RANGE,
+ &dipr.diph)))
+ {
+ return DIENUM_CONTINUE;
+ }
+
+ if (memcmp(&doi->guidType, &GUID_Slider, sizeof(GUID)) == 0)
+ {
+ object->type = _GLFW_TYPE_SLIDER;
+ data->sliderCount++;
+ }
+ else
+ {
+ object->type = _GLFW_TYPE_AXIS;
+ data->axisCount++;
+ }
+ }
+ else if (DIDFT_GETTYPE(doi->dwType) & DIDFT_BUTTON)
+ {
+ object->offset = DIJOFS_BUTTON(data->buttonCount);
+ object->type = _GLFW_TYPE_BUTTON;
+ data->buttonCount++;
+ }
+ else if (DIDFT_GETTYPE(doi->dwType) & DIDFT_POV)
+ {
+ object->offset = DIJOFS_POV(data->povCount);
+ object->type = _GLFW_TYPE_POV;
+ data->povCount++;
+ }
+
+ data->objectCount++;
+ return DIENUM_CONTINUE;
+}
+
+// DirectInput device enumeration callback
+//
+static BOOL CALLBACK deviceCallback(const DIDEVICEINSTANCE* di, void* user)
+{
+ int jid = 0;
+ DIDEVCAPS dc;
+ DIPROPDWORD dipd;
+ IDirectInputDevice8* device;
+ _GLFWobjenumWin32 data;
+ _GLFWjoystick* js;
+ char guid[33];
+ char name[256];
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ js = _glfw.joysticks + jid;
+ if (js->present)
+ {
+ if (memcmp(&js->win32.guid, &di->guidInstance, sizeof(GUID)) == 0)
+ return DIENUM_CONTINUE;
+ }
+ }
+
+ if (supportsXInput(&di->guidProduct))
+ return DIENUM_CONTINUE;
+
+ if (FAILED(IDirectInput8_CreateDevice(_glfw.win32.dinput8.api,
+ &di->guidInstance,
+ &device,
+ NULL)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Win32: Failed to create device");
+ return DIENUM_CONTINUE;
+ }
+
+ if (FAILED(IDirectInputDevice8_SetDataFormat(device, &_glfwDataFormat)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to set device data format");
+
+ IDirectInputDevice8_Release(device);
+ return DIENUM_CONTINUE;
+ }
+
+ ZeroMemory(&dc, sizeof(dc));
+ dc.dwSize = sizeof(dc);
+
+ if (FAILED(IDirectInputDevice8_GetCapabilities(device, &dc)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to query device capabilities");
+
+ IDirectInputDevice8_Release(device);
+ return DIENUM_CONTINUE;
+ }
+
+ ZeroMemory(&dipd, sizeof(dipd));
+ dipd.diph.dwSize = sizeof(dipd);
+ dipd.diph.dwHeaderSize = sizeof(dipd.diph);
+ dipd.diph.dwHow = DIPH_DEVICE;
+ dipd.dwData = DIPROPAXISMODE_ABS;
+
+ if (FAILED(IDirectInputDevice8_SetProperty(device,
+ DIPROP_AXISMODE,
+ &dipd.diph)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to set device axis mode");
+
+ IDirectInputDevice8_Release(device);
+ return DIENUM_CONTINUE;
+ }
+
+ memset(&data, 0, sizeof(data));
+ data.device = device;
+ data.objects = _glfw_calloc(dc.dwAxes + (size_t) dc.dwButtons + dc.dwPOVs,
+ sizeof(_GLFWjoyobjectWin32));
+
+ if (FAILED(IDirectInputDevice8_EnumObjects(device,
+ deviceObjectCallback,
+ &data,
+ DIDFT_AXIS | DIDFT_BUTTON | DIDFT_POV)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to enumerate device objects");
+
+ IDirectInputDevice8_Release(device);
+ _glfw_free(data.objects);
+ return DIENUM_CONTINUE;
+ }
+
+ qsort(data.objects, data.objectCount,
+ sizeof(_GLFWjoyobjectWin32),
+ compareJoystickObjects);
+
+ if (!WideCharToMultiByte(CP_UTF8, 0,
+ di->tszInstanceName, -1,
+ name, sizeof(name),
+ NULL, NULL))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to convert joystick name to UTF-8");
+
+ IDirectInputDevice8_Release(device);
+ _glfw_free(data.objects);
+ return DIENUM_STOP;
+ }
+
+ // Generate a joystick GUID that matches the SDL 2.0.5+ one
+ if (memcmp(&di->guidProduct.Data4[2], "PIDVID", 6) == 0)
+ {
+ sprintf(guid, "03000000%02x%02x0000%02x%02x000000000000",
+ (uint8_t) di->guidProduct.Data1,
+ (uint8_t) (di->guidProduct.Data1 >> 8),
+ (uint8_t) (di->guidProduct.Data1 >> 16),
+ (uint8_t) (di->guidProduct.Data1 >> 24));
+ }
+ else
+ {
+ sprintf(guid, "05000000%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x00",
+ name[0], name[1], name[2], name[3],
+ name[4], name[5], name[6], name[7],
+ name[8], name[9], name[10]);
+ }
+
+ js = _glfwAllocJoystick(name, guid,
+ data.axisCount + data.sliderCount,
+ data.buttonCount,
+ data.povCount);
+ if (!js)
+ {
+ IDirectInputDevice8_Release(device);
+ _glfw_free(data.objects);
+ return DIENUM_STOP;
+ }
+
+ js->win32.device = device;
+ js->win32.guid = di->guidInstance;
+ js->win32.objects = data.objects;
+ js->win32.objectCount = data.objectCount;
+
+ _glfwInputJoystick(js, GLFW_CONNECTED);
+ return DIENUM_CONTINUE;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Checks for new joysticks after DBT_DEVICEARRIVAL
+//
+void _glfwDetectJoystickConnectionWin32(void)
+{
+ if (_glfw.win32.xinput.instance)
+ {
+ DWORD index;
+
+ for (index = 0; index < XUSER_MAX_COUNT; index++)
+ {
+ int jid;
+ char guid[33];
+ XINPUT_CAPABILITIES xic;
+ _GLFWjoystick* js;
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ if (_glfw.joysticks[jid].present &&
+ _glfw.joysticks[jid].win32.device == NULL &&
+ _glfw.joysticks[jid].win32.index == index)
+ {
+ break;
+ }
+ }
+
+ if (jid <= GLFW_JOYSTICK_LAST)
+ continue;
+
+ if (XInputGetCapabilities(index, 0, &xic) != ERROR_SUCCESS)
+ continue;
+
+ // Generate a joystick GUID that matches the SDL 2.0.5+ one
+ sprintf(guid, "78696e707574%02x000000000000000000",
+ xic.SubType & 0xff);
+
+ js = _glfwAllocJoystick(getDeviceDescription(&xic), guid, 6, 10, 1);
+ if (!js)
+ continue;
+
+ js->win32.index = index;
+
+ _glfwInputJoystick(js, GLFW_CONNECTED);
+ }
+ }
+
+ if (_glfw.win32.dinput8.api)
+ {
+ if (FAILED(IDirectInput8_EnumDevices(_glfw.win32.dinput8.api,
+ DI8DEVCLASS_GAMECTRL,
+ deviceCallback,
+ NULL,
+ DIEDFL_ALLDEVICES)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Failed to enumerate DirectInput8 devices");
+ return;
+ }
+ }
+}
+
+// Checks for joystick disconnection after DBT_DEVICEREMOVECOMPLETE
+//
+void _glfwDetectJoystickDisconnectionWin32(void)
+{
+ int jid;
+
+ for (jid = 0; jid <= GLFW_JOYSTICK_LAST; jid++)
+ {
+ _GLFWjoystick* js = _glfw.joysticks + jid;
+ if (js->present)
+ _glfwPollJoystickWin32(js, _GLFW_POLL_PRESENCE);
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwInitJoysticksWin32(void)
+{
+ if (_glfw.win32.dinput8.instance)
+ {
+ if (FAILED(DirectInput8Create(_glfw.win32.instance,
+ DIRECTINPUT_VERSION,
+ &IID_IDirectInput8W,
+ (void**) &_glfw.win32.dinput8.api,
+ NULL)))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create interface");
+ return GLFW_FALSE;
+ }
+ }
+
+ _glfwDetectJoystickConnectionWin32();
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateJoysticksWin32(void)
+{
+ int jid;
+
+ for (jid = GLFW_JOYSTICK_1; jid <= GLFW_JOYSTICK_LAST; jid++)
+ closeJoystick(_glfw.joysticks + jid);
+
+ if (_glfw.win32.dinput8.api)
+ IDirectInput8_Release(_glfw.win32.dinput8.api);
+}
+
+int _glfwPollJoystickWin32(_GLFWjoystick* js, int mode)
+{
+ if (js->win32.device)
+ {
+ int i, ai = 0, bi = 0, pi = 0;
+ HRESULT result;
+ DIJOYSTATE state = {0};
+
+ IDirectInputDevice8_Poll(js->win32.device);
+ result = IDirectInputDevice8_GetDeviceState(js->win32.device,
+ sizeof(state),
+ &state);
+ if (result == DIERR_NOTACQUIRED || result == DIERR_INPUTLOST)
+ {
+ IDirectInputDevice8_Acquire(js->win32.device);
+ IDirectInputDevice8_Poll(js->win32.device);
+ result = IDirectInputDevice8_GetDeviceState(js->win32.device,
+ sizeof(state),
+ &state);
+ }
+
+ if (FAILED(result))
+ {
+ closeJoystick(js);
+ return GLFW_FALSE;
+ }
+
+ if (mode == _GLFW_POLL_PRESENCE)
+ return GLFW_TRUE;
+
+ for (i = 0; i < js->win32.objectCount; i++)
+ {
+ const void* data = (char*) &state + js->win32.objects[i].offset;
+
+ switch (js->win32.objects[i].type)
+ {
+ case _GLFW_TYPE_AXIS:
+ case _GLFW_TYPE_SLIDER:
+ {
+ const float value = (*((LONG*) data) + 0.5f) / 32767.5f;
+ _glfwInputJoystickAxis(js, ai, value);
+ ai++;
+ break;
+ }
+
+ case _GLFW_TYPE_BUTTON:
+ {
+ const char value = (*((BYTE*) data) & 0x80) != 0;
+ _glfwInputJoystickButton(js, bi, value);
+ bi++;
+ break;
+ }
+
+ case _GLFW_TYPE_POV:
+ {
+ const int states[9] =
+ {
+ GLFW_HAT_UP,
+ GLFW_HAT_RIGHT_UP,
+ GLFW_HAT_RIGHT,
+ GLFW_HAT_RIGHT_DOWN,
+ GLFW_HAT_DOWN,
+ GLFW_HAT_LEFT_DOWN,
+ GLFW_HAT_LEFT,
+ GLFW_HAT_LEFT_UP,
+ GLFW_HAT_CENTERED
+ };
+
+ // Screams of horror are appropriate at this point
+ int stateIndex = LOWORD(*(DWORD*) data) / (45 * DI_DEGREES);
+ if (stateIndex < 0 || stateIndex > 8)
+ stateIndex = 8;
+
+ _glfwInputJoystickHat(js, pi, states[stateIndex]);
+ pi++;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ int i, dpad = 0;
+ DWORD result;
+ XINPUT_STATE xis;
+ const WORD buttons[10] =
+ {
+ XINPUT_GAMEPAD_A,
+ XINPUT_GAMEPAD_B,
+ XINPUT_GAMEPAD_X,
+ XINPUT_GAMEPAD_Y,
+ XINPUT_GAMEPAD_LEFT_SHOULDER,
+ XINPUT_GAMEPAD_RIGHT_SHOULDER,
+ XINPUT_GAMEPAD_BACK,
+ XINPUT_GAMEPAD_START,
+ XINPUT_GAMEPAD_LEFT_THUMB,
+ XINPUT_GAMEPAD_RIGHT_THUMB
+ };
+
+ result = XInputGetState(js->win32.index, &xis);
+ if (result != ERROR_SUCCESS)
+ {
+ if (result == ERROR_DEVICE_NOT_CONNECTED)
+ closeJoystick(js);
+
+ return GLFW_FALSE;
+ }
+
+ if (mode == _GLFW_POLL_PRESENCE)
+ return GLFW_TRUE;
+
+ _glfwInputJoystickAxis(js, 0, (xis.Gamepad.sThumbLX + 0.5f) / 32767.5f);
+ _glfwInputJoystickAxis(js, 1, -(xis.Gamepad.sThumbLY + 0.5f) / 32767.5f);
+ _glfwInputJoystickAxis(js, 2, (xis.Gamepad.sThumbRX + 0.5f) / 32767.5f);
+ _glfwInputJoystickAxis(js, 3, -(xis.Gamepad.sThumbRY + 0.5f) / 32767.5f);
+ _glfwInputJoystickAxis(js, 4, xis.Gamepad.bLeftTrigger / 127.5f - 1.f);
+ _glfwInputJoystickAxis(js, 5, xis.Gamepad.bRightTrigger / 127.5f - 1.f);
+
+ for (i = 0; i < 10; i++)
+ {
+ const char value = (xis.Gamepad.wButtons & buttons[i]) ? 1 : 0;
+ _glfwInputJoystickButton(js, i, value);
+ }
+
+ if (xis.Gamepad.wButtons & XINPUT_GAMEPAD_DPAD_UP)
+ dpad |= GLFW_HAT_UP;
+ if (xis.Gamepad.wButtons & XINPUT_GAMEPAD_DPAD_RIGHT)
+ dpad |= GLFW_HAT_RIGHT;
+ if (xis.Gamepad.wButtons & XINPUT_GAMEPAD_DPAD_DOWN)
+ dpad |= GLFW_HAT_DOWN;
+ if (xis.Gamepad.wButtons & XINPUT_GAMEPAD_DPAD_LEFT)
+ dpad |= GLFW_HAT_LEFT;
+
+ _glfwInputJoystickHat(js, 0, dpad);
+ }
+
+ return GLFW_TRUE;
+}
+
+const char* _glfwGetMappingNameWin32(void)
+{
+ return "Windows";
+}
+
+void _glfwUpdateGamepadGUIDWin32(char* guid)
+{
+ if (strcmp(guid + 20, "504944564944") == 0)
+ {
+ char original[33];
+ strncpy(original, guid, sizeof(original) - 1);
+ sprintf(guid, "03000000%.4s0000%.4s000000000000",
+ original, original + 4);
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.h b/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.h
new file mode 100644
index 00000000000..d7c2bb6f7e4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_joystick.h
@@ -0,0 +1,53 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLFW_WIN32_JOYSTICK_STATE _GLFWjoystickWin32 win32;
+#define GLFW_WIN32_LIBRARY_JOYSTICK_STATE
+
+#define GLFW_BUILD_WIN32_MAPPINGS
+
+// Joystick element (axis, button or slider)
+//
+typedef struct _GLFWjoyobjectWin32
+{
+ int offset;
+ int type;
+} _GLFWjoyobjectWin32;
+
+// Win32-specific per-joystick data
+//
+typedef struct _GLFWjoystickWin32
+{
+ _GLFWjoyobjectWin32* objects;
+ int objectCount;
+ IDirectInputDevice8W* device;
+ DWORD index;
+ GUID guid;
+} _GLFWjoystickWin32;
+
+void _glfwDetectJoystickConnectionWin32(void);
+void _glfwDetectJoystickDisconnectionWin32(void);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_module.c b/chromium/third_party/dawn/third_party/glfw/src/win32_module.c
new file mode 100644
index 00000000000..35bdd71d57b
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_module.c
@@ -0,0 +1,49 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2021 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void* _glfwPlatformLoadModule(const char* path)
+{
+ return LoadLibraryA(path);
+}
+
+void _glfwPlatformFreeModule(void* module)
+{
+ FreeLibrary((HMODULE) module);
+}
+
+GLFWproc _glfwPlatformGetModuleSymbol(void* module, const char* name)
+{
+ return (GLFWproc) GetProcAddress((HMODULE) module, name);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_monitor.c b/chromium/third_party/dawn/third_party/glfw/src/win32_monitor.c
new file mode 100644
index 00000000000..57b44af3e18
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_monitor.c
@@ -0,0 +1,547 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <wchar.h>
+
+
+// Callback for EnumDisplayMonitors in createMonitor
+//
+static BOOL CALLBACK monitorCallback(HMONITOR handle,
+ HDC dc,
+ RECT* rect,
+ LPARAM data)
+{
+ MONITORINFOEXW mi;
+ ZeroMemory(&mi, sizeof(mi));
+ mi.cbSize = sizeof(mi);
+
+ if (GetMonitorInfoW(handle, (MONITORINFO*) &mi))
+ {
+ _GLFWmonitor* monitor = (_GLFWmonitor*) data;
+ if (wcscmp(mi.szDevice, monitor->win32.adapterName) == 0)
+ monitor->win32.handle = handle;
+ }
+
+ return TRUE;
+}
+
+// Create monitor from an adapter and (optionally) a display
+//
+static _GLFWmonitor* createMonitor(DISPLAY_DEVICEW* adapter,
+ DISPLAY_DEVICEW* display)
+{
+ _GLFWmonitor* monitor;
+ int widthMM, heightMM;
+ char* name;
+ HDC dc;
+ DEVMODEW dm;
+ RECT rect;
+
+ if (display)
+ name = _glfwCreateUTF8FromWideStringWin32(display->DeviceString);
+ else
+ name = _glfwCreateUTF8FromWideStringWin32(adapter->DeviceString);
+ if (!name)
+ return NULL;
+
+ ZeroMemory(&dm, sizeof(dm));
+ dm.dmSize = sizeof(dm);
+ EnumDisplaySettingsW(adapter->DeviceName, ENUM_CURRENT_SETTINGS, &dm);
+
+ dc = CreateDCW(L"DISPLAY", adapter->DeviceName, NULL, NULL);
+
+ if (IsWindows8Point1OrGreater())
+ {
+ widthMM = GetDeviceCaps(dc, HORZSIZE);
+ heightMM = GetDeviceCaps(dc, VERTSIZE);
+ }
+ else
+ {
+ widthMM = (int) (dm.dmPelsWidth * 25.4f / GetDeviceCaps(dc, LOGPIXELSX));
+ heightMM = (int) (dm.dmPelsHeight * 25.4f / GetDeviceCaps(dc, LOGPIXELSY));
+ }
+
+ DeleteDC(dc);
+
+ monitor = _glfwAllocMonitor(name, widthMM, heightMM);
+ _glfw_free(name);
+
+ if (adapter->StateFlags & DISPLAY_DEVICE_MODESPRUNED)
+ monitor->win32.modesPruned = GLFW_TRUE;
+
+ wcscpy(monitor->win32.adapterName, adapter->DeviceName);
+ WideCharToMultiByte(CP_UTF8, 0,
+ adapter->DeviceName, -1,
+ monitor->win32.publicAdapterName,
+ sizeof(monitor->win32.publicAdapterName),
+ NULL, NULL);
+
+ if (display)
+ {
+ wcscpy(monitor->win32.displayName, display->DeviceName);
+ WideCharToMultiByte(CP_UTF8, 0,
+ display->DeviceName, -1,
+ monitor->win32.publicDisplayName,
+ sizeof(monitor->win32.publicDisplayName),
+ NULL, NULL);
+ }
+
+ rect.left = dm.dmPosition.x;
+ rect.top = dm.dmPosition.y;
+ rect.right = dm.dmPosition.x + dm.dmPelsWidth;
+ rect.bottom = dm.dmPosition.y + dm.dmPelsHeight;
+
+ EnumDisplayMonitors(NULL, &rect, monitorCallback, (LPARAM) monitor);
+ return monitor;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Poll for changes in the set of connected monitors
+//
+void _glfwPollMonitorsWin32(void)
+{
+ int i, disconnectedCount;
+ _GLFWmonitor** disconnected = NULL;
+ DWORD adapterIndex, displayIndex;
+ DISPLAY_DEVICEW adapter, display;
+ _GLFWmonitor* monitor;
+
+ disconnectedCount = _glfw.monitorCount;
+ if (disconnectedCount)
+ {
+ disconnected = _glfw_calloc(_glfw.monitorCount, sizeof(_GLFWmonitor*));
+ memcpy(disconnected,
+ _glfw.monitors,
+ _glfw.monitorCount * sizeof(_GLFWmonitor*));
+ }
+
+ for (adapterIndex = 0; ; adapterIndex++)
+ {
+ int type = _GLFW_INSERT_LAST;
+
+ ZeroMemory(&adapter, sizeof(adapter));
+ adapter.cb = sizeof(adapter);
+
+ if (!EnumDisplayDevicesW(NULL, adapterIndex, &adapter, 0))
+ break;
+
+ if (!(adapter.StateFlags & DISPLAY_DEVICE_ACTIVE))
+ continue;
+
+ if (adapter.StateFlags & DISPLAY_DEVICE_PRIMARY_DEVICE)
+ type = _GLFW_INSERT_FIRST;
+
+ for (displayIndex = 0; ; displayIndex++)
+ {
+ ZeroMemory(&display, sizeof(display));
+ display.cb = sizeof(display);
+
+ if (!EnumDisplayDevicesW(adapter.DeviceName, displayIndex, &display, 0))
+ break;
+
+ if (!(display.StateFlags & DISPLAY_DEVICE_ACTIVE))
+ continue;
+
+ for (i = 0; i < disconnectedCount; i++)
+ {
+ if (disconnected[i] &&
+ wcscmp(disconnected[i]->win32.displayName,
+ display.DeviceName) == 0)
+ {
+ disconnected[i] = NULL;
+ // handle may have changed, update
+ EnumDisplayMonitors(NULL, NULL, monitorCallback, (LPARAM) _glfw.monitors[i]);
+ break;
+ }
+ }
+
+ if (i < disconnectedCount)
+ continue;
+
+ monitor = createMonitor(&adapter, &display);
+ if (!monitor)
+ {
+ _glfw_free(disconnected);
+ return;
+ }
+
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, type);
+
+ type = _GLFW_INSERT_LAST;
+ }
+
+ // HACK: If an active adapter does not have any display devices
+ // (as sometimes happens), add it directly as a monitor
+ if (displayIndex == 0)
+ {
+ for (i = 0; i < disconnectedCount; i++)
+ {
+ if (disconnected[i] &&
+ wcscmp(disconnected[i]->win32.adapterName,
+ adapter.DeviceName) == 0)
+ {
+ disconnected[i] = NULL;
+ break;
+ }
+ }
+
+ if (i < disconnectedCount)
+ continue;
+
+ monitor = createMonitor(&adapter, NULL);
+ if (!monitor)
+ {
+ _glfw_free(disconnected);
+ return;
+ }
+
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, type);
+ }
+ }
+
+ for (i = 0; i < disconnectedCount; i++)
+ {
+ if (disconnected[i])
+ _glfwInputMonitor(disconnected[i], GLFW_DISCONNECTED, 0);
+ }
+
+ _glfw_free(disconnected);
+}
+
+// Change the current video mode
+//
+void _glfwSetVideoModeWin32(_GLFWmonitor* monitor, const GLFWvidmode* desired)
+{
+ GLFWvidmode current;
+ const GLFWvidmode* best;
+ DEVMODEW dm;
+ LONG result;
+
+ best = _glfwChooseVideoMode(monitor, desired);
+ _glfwGetVideoModeWin32(monitor, &current);
+ if (_glfwCompareVideoModes(&current, best) == 0)
+ return;
+
+ ZeroMemory(&dm, sizeof(dm));
+ dm.dmSize = sizeof(dm);
+ dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT | DM_BITSPERPEL |
+ DM_DISPLAYFREQUENCY;
+ dm.dmPelsWidth = best->width;
+ dm.dmPelsHeight = best->height;
+ dm.dmBitsPerPel = best->redBits + best->greenBits + best->blueBits;
+ dm.dmDisplayFrequency = best->refreshRate;
+
+ if (dm.dmBitsPerPel < 15 || dm.dmBitsPerPel >= 24)
+ dm.dmBitsPerPel = 32;
+
+ result = ChangeDisplaySettingsExW(monitor->win32.adapterName,
+ &dm,
+ NULL,
+ CDS_FULLSCREEN,
+ NULL);
+ if (result == DISP_CHANGE_SUCCESSFUL)
+ monitor->win32.modeChanged = GLFW_TRUE;
+ else
+ {
+ const char* description = "Unknown error";
+
+ if (result == DISP_CHANGE_BADDUALVIEW)
+ description = "The system uses DualView";
+ else if (result == DISP_CHANGE_BADFLAGS)
+ description = "Invalid flags";
+ else if (result == DISP_CHANGE_BADMODE)
+ description = "Graphics mode not supported";
+ else if (result == DISP_CHANGE_BADPARAM)
+ description = "Invalid parameter";
+ else if (result == DISP_CHANGE_FAILED)
+ description = "Graphics mode failed";
+ else if (result == DISP_CHANGE_NOTUPDATED)
+ description = "Failed to write to registry";
+ else if (result == DISP_CHANGE_RESTART)
+ description = "Computer restart required";
+
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to set video mode: %s",
+ description);
+ }
+}
+
+// Restore the previously saved (original) video mode
+//
+void _glfwRestoreVideoModeWin32(_GLFWmonitor* monitor)
+{
+ if (monitor->win32.modeChanged)
+ {
+ ChangeDisplaySettingsExW(monitor->win32.adapterName,
+ NULL, NULL, CDS_FULLSCREEN, NULL);
+ monitor->win32.modeChanged = GLFW_FALSE;
+ }
+}
+
+void _glfwGetHMONITORContentScaleWin32(HMONITOR handle, float* xscale, float* yscale)
+{
+ UINT xdpi, ydpi;
+
+ if (xscale)
+ *xscale = 0.f;
+ if (yscale)
+ *yscale = 0.f;
+
+ if (IsWindows8Point1OrGreater())
+ {
+ if (GetDpiForMonitor(handle, MDT_EFFECTIVE_DPI, &xdpi, &ydpi) != S_OK)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Win32: Failed to query monitor DPI");
+ return;
+ }
+ }
+ else
+ {
+ const HDC dc = GetDC(NULL);
+ xdpi = GetDeviceCaps(dc, LOGPIXELSX);
+ ydpi = GetDeviceCaps(dc, LOGPIXELSY);
+ ReleaseDC(NULL, dc);
+ }
+
+ if (xscale)
+ *xscale = xdpi / (float) USER_DEFAULT_SCREEN_DPI;
+ if (yscale)
+ *yscale = ydpi / (float) USER_DEFAULT_SCREEN_DPI;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwFreeMonitorWin32(_GLFWmonitor* monitor)
+{
+}
+
+void _glfwGetMonitorPosWin32(_GLFWmonitor* monitor, int* xpos, int* ypos)
+{
+ DEVMODEW dm;
+ ZeroMemory(&dm, sizeof(dm));
+ dm.dmSize = sizeof(dm);
+
+ EnumDisplaySettingsExW(monitor->win32.adapterName,
+ ENUM_CURRENT_SETTINGS,
+ &dm,
+ EDS_ROTATEDMODE);
+
+ if (xpos)
+ *xpos = dm.dmPosition.x;
+ if (ypos)
+ *ypos = dm.dmPosition.y;
+}
+
+void _glfwGetMonitorContentScaleWin32(_GLFWmonitor* monitor,
+ float* xscale, float* yscale)
+{
+ _glfwGetHMONITORContentScaleWin32(monitor->win32.handle, xscale, yscale);
+}
+
+void _glfwGetMonitorWorkareaWin32(_GLFWmonitor* monitor,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ MONITORINFO mi = { sizeof(mi) };
+ GetMonitorInfoW(monitor->win32.handle, &mi);
+
+ if (xpos)
+ *xpos = mi.rcWork.left;
+ if (ypos)
+ *ypos = mi.rcWork.top;
+ if (width)
+ *width = mi.rcWork.right - mi.rcWork.left;
+ if (height)
+ *height = mi.rcWork.bottom - mi.rcWork.top;
+}
+
+GLFWvidmode* _glfwGetVideoModesWin32(_GLFWmonitor* monitor, int* count)
+{
+ int modeIndex = 0, size = 0;
+ GLFWvidmode* result = NULL;
+
+ *count = 0;
+
+ for (;;)
+ {
+ int i;
+ GLFWvidmode mode;
+ DEVMODEW dm;
+
+ ZeroMemory(&dm, sizeof(dm));
+ dm.dmSize = sizeof(dm);
+
+ if (!EnumDisplaySettingsW(monitor->win32.adapterName, modeIndex, &dm))
+ break;
+
+ modeIndex++;
+
+ // Skip modes with less than 15 BPP
+ if (dm.dmBitsPerPel < 15)
+ continue;
+
+ mode.width = dm.dmPelsWidth;
+ mode.height = dm.dmPelsHeight;
+ mode.refreshRate = dm.dmDisplayFrequency;
+ _glfwSplitBPP(dm.dmBitsPerPel,
+ &mode.redBits,
+ &mode.greenBits,
+ &mode.blueBits);
+
+ for (i = 0; i < *count; i++)
+ {
+ if (_glfwCompareVideoModes(result + i, &mode) == 0)
+ break;
+ }
+
+ // Skip duplicate modes
+ if (i < *count)
+ continue;
+
+ if (monitor->win32.modesPruned)
+ {
+ // Skip modes not supported by the connected displays
+ if (ChangeDisplaySettingsExW(monitor->win32.adapterName,
+ &dm,
+ NULL,
+ CDS_TEST,
+ NULL) != DISP_CHANGE_SUCCESSFUL)
+ {
+ continue;
+ }
+ }
+
+ if (*count == size)
+ {
+ size += 128;
+ result = (GLFWvidmode*) _glfw_realloc(result, size * sizeof(GLFWvidmode));
+ }
+
+ (*count)++;
+ result[*count - 1] = mode;
+ }
+
+ if (!*count)
+ {
+ // HACK: Report the current mode if no valid modes were found
+ result = _glfw_calloc(1, sizeof(GLFWvidmode));
+ _glfwGetVideoModeWin32(monitor, result);
+ *count = 1;
+ }
+
+ return result;
+}
+
+void _glfwGetVideoModeWin32(_GLFWmonitor* monitor, GLFWvidmode* mode)
+{
+ DEVMODEW dm;
+ ZeroMemory(&dm, sizeof(dm));
+ dm.dmSize = sizeof(dm);
+
+ EnumDisplaySettingsW(monitor->win32.adapterName, ENUM_CURRENT_SETTINGS, &dm);
+
+ mode->width = dm.dmPelsWidth;
+ mode->height = dm.dmPelsHeight;
+ mode->refreshRate = dm.dmDisplayFrequency;
+ _glfwSplitBPP(dm.dmBitsPerPel,
+ &mode->redBits,
+ &mode->greenBits,
+ &mode->blueBits);
+}
+
+GLFWbool _glfwGetGammaRampWin32(_GLFWmonitor* monitor, GLFWgammaramp* ramp)
+{
+ HDC dc;
+ WORD values[3][256];
+
+ dc = CreateDCW(L"DISPLAY", monitor->win32.adapterName, NULL, NULL);
+ GetDeviceGammaRamp(dc, values);
+ DeleteDC(dc);
+
+ _glfwAllocGammaArrays(ramp, 256);
+
+ memcpy(ramp->red, values[0], sizeof(values[0]));
+ memcpy(ramp->green, values[1], sizeof(values[1]));
+ memcpy(ramp->blue, values[2], sizeof(values[2]));
+
+ return GLFW_TRUE;
+}
+
+void _glfwSetGammaRampWin32(_GLFWmonitor* monitor, const GLFWgammaramp* ramp)
+{
+ HDC dc;
+ WORD values[3][256];
+
+ if (ramp->size != 256)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Gamma ramp size must be 256");
+ return;
+ }
+
+ memcpy(values[0], ramp->red, sizeof(values[0]));
+ memcpy(values[1], ramp->green, sizeof(values[1]));
+ memcpy(values[2], ramp->blue, sizeof(values[2]));
+
+ dc = CreateDCW(L"DISPLAY", monitor->win32.adapterName, NULL, NULL);
+ SetDeviceGammaRamp(dc, values);
+ DeleteDC(dc);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI const char* glfwGetWin32Adapter(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return monitor->win32.publicAdapterName;
+}
+
+GLFWAPI const char* glfwGetWin32Monitor(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return monitor->win32.publicDisplayName;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_platform.h b/chromium/third_party/dawn/third_party/glfw/src/win32_platform.h
new file mode 100644
index 00000000000..c3dc262f320
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_platform.h
@@ -0,0 +1,627 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+// We don't need all the fancy stuff
+#ifndef NOMINMAX
+ #define NOMINMAX
+#endif
+
+#ifndef VC_EXTRALEAN
+ #define VC_EXTRALEAN
+#endif
+
+#ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+#endif
+
+// This is a workaround for the fact that glfw3.h needs to export APIENTRY (for
+// example to allow applications to correctly declare a GL_KHR_debug callback)
+// but windows.h assumes no one will define APIENTRY before it does
+#undef APIENTRY
+
+// GLFW on Windows is Unicode only and does not work in MBCS mode
+#ifndef UNICODE
+ #define UNICODE
+#endif
+
+// GLFW requires Windows XP or later
+#if WINVER < 0x0501
+ #undef WINVER
+ #define WINVER 0x0501
+#endif
+#if _WIN32_WINNT < 0x0501
+ #undef _WIN32_WINNT
+ #define _WIN32_WINNT 0x0501
+#endif
+
+// GLFW uses DirectInput8 interfaces
+#define DIRECTINPUT_VERSION 0x0800
+
+// GLFW uses OEM cursor resources
+#define OEMRESOURCE
+
+#include <wctype.h>
+#include <windows.h>
+#include <dinput.h>
+#include <xinput.h>
+#include <dbt.h>
+
+// HACK: Define macros that some windows.h variants don't
+#ifndef WM_MOUSEHWHEEL
+ #define WM_MOUSEHWHEEL 0x020E
+#endif
+#ifndef WM_DWMCOMPOSITIONCHANGED
+ #define WM_DWMCOMPOSITIONCHANGED 0x031E
+#endif
+#ifndef WM_DWMCOLORIZATIONCOLORCHANGED
+ #define WM_DWMCOLORIZATIONCOLORCHANGED 0x0320
+#endif
+#ifndef WM_COPYGLOBALDATA
+ #define WM_COPYGLOBALDATA 0x0049
+#endif
+#ifndef WM_UNICHAR
+ #define WM_UNICHAR 0x0109
+#endif
+#ifndef UNICODE_NOCHAR
+ #define UNICODE_NOCHAR 0xFFFF
+#endif
+#ifndef WM_DPICHANGED
+ #define WM_DPICHANGED 0x02E0
+#endif
+#ifndef GET_XBUTTON_WPARAM
+ #define GET_XBUTTON_WPARAM(w) (HIWORD(w))
+#endif
+#ifndef EDS_ROTATEDMODE
+ #define EDS_ROTATEDMODE 0x00000004
+#endif
+#ifndef DISPLAY_DEVICE_ACTIVE
+ #define DISPLAY_DEVICE_ACTIVE 0x00000001
+#endif
+#ifndef _WIN32_WINNT_WINBLUE
+ #define _WIN32_WINNT_WINBLUE 0x0603
+#endif
+#ifndef _WIN32_WINNT_WIN8
+ #define _WIN32_WINNT_WIN8 0x0602
+#endif
+#ifndef WM_GETDPISCALEDSIZE
+ #define WM_GETDPISCALEDSIZE 0x02e4
+#endif
+#ifndef USER_DEFAULT_SCREEN_DPI
+ #define USER_DEFAULT_SCREEN_DPI 96
+#endif
+#ifndef OCR_HAND
+ #define OCR_HAND 32649
+#endif
+
+#if WINVER < 0x0601
+typedef struct
+{
+ DWORD cbSize;
+ DWORD ExtStatus;
+} CHANGEFILTERSTRUCT;
+#ifndef MSGFLT_ALLOW
+ #define MSGFLT_ALLOW 1
+#endif
+#endif /*Windows 7*/
+
+#if WINVER < 0x0600
+#define DWM_BB_ENABLE 0x00000001
+#define DWM_BB_BLURREGION 0x00000002
+typedef struct
+{
+ DWORD dwFlags;
+ BOOL fEnable;
+ HRGN hRgnBlur;
+ BOOL fTransitionOnMaximized;
+} DWM_BLURBEHIND;
+#else
+ #include <dwmapi.h>
+#endif /*Windows Vista*/
+
+#ifndef DPI_ENUMS_DECLARED
+typedef enum
+{
+ PROCESS_DPI_UNAWARE = 0,
+ PROCESS_SYSTEM_DPI_AWARE = 1,
+ PROCESS_PER_MONITOR_DPI_AWARE = 2
+} PROCESS_DPI_AWARENESS;
+typedef enum
+{
+ MDT_EFFECTIVE_DPI = 0,
+ MDT_ANGULAR_DPI = 1,
+ MDT_RAW_DPI = 2,
+ MDT_DEFAULT = MDT_EFFECTIVE_DPI
+} MONITOR_DPI_TYPE;
+#endif /*DPI_ENUMS_DECLARED*/
+
+#ifndef DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2
+#define DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 ((HANDLE) -4)
+#endif /*DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2*/
+
+// Replacement for versionhelpers.h macros, as we cannot rely on the
+// application having a correct embedded manifest
+//
+#define IsWindowsVistaOrGreater() \
+ _glfwIsWindowsVersionOrGreaterWin32(HIBYTE(_WIN32_WINNT_VISTA), \
+ LOBYTE(_WIN32_WINNT_VISTA), 0)
+#define IsWindows7OrGreater() \
+ _glfwIsWindowsVersionOrGreaterWin32(HIBYTE(_WIN32_WINNT_WIN7), \
+ LOBYTE(_WIN32_WINNT_WIN7), 0)
+#define IsWindows8OrGreater() \
+ _glfwIsWindowsVersionOrGreaterWin32(HIBYTE(_WIN32_WINNT_WIN8), \
+ LOBYTE(_WIN32_WINNT_WIN8), 0)
+#define IsWindows8Point1OrGreater() \
+ _glfwIsWindowsVersionOrGreaterWin32(HIBYTE(_WIN32_WINNT_WINBLUE), \
+ LOBYTE(_WIN32_WINNT_WINBLUE), 0)
+
+// Windows 10 Anniversary Update
+#define _glfwIsWindows10Version1607OrGreaterWin32() \
+ _glfwIsWindows10BuildOrGreaterWin32(14393)
+// Windows 10 Creators Update
+#define _glfwIsWindows10Version1703OrGreaterWin32() \
+ _glfwIsWindows10BuildOrGreaterWin32(15063)
+
+// HACK: Define macros that some xinput.h variants don't
+#ifndef XINPUT_CAPS_WIRELESS
+ #define XINPUT_CAPS_WIRELESS 0x0002
+#endif
+#ifndef XINPUT_DEVSUBTYPE_WHEEL
+ #define XINPUT_DEVSUBTYPE_WHEEL 0x02
+#endif
+#ifndef XINPUT_DEVSUBTYPE_ARCADE_STICK
+ #define XINPUT_DEVSUBTYPE_ARCADE_STICK 0x03
+#endif
+#ifndef XINPUT_DEVSUBTYPE_FLIGHT_STICK
+ #define XINPUT_DEVSUBTYPE_FLIGHT_STICK 0x04
+#endif
+#ifndef XINPUT_DEVSUBTYPE_DANCE_PAD
+ #define XINPUT_DEVSUBTYPE_DANCE_PAD 0x05
+#endif
+#ifndef XINPUT_DEVSUBTYPE_GUITAR
+ #define XINPUT_DEVSUBTYPE_GUITAR 0x06
+#endif
+#ifndef XINPUT_DEVSUBTYPE_DRUM_KIT
+ #define XINPUT_DEVSUBTYPE_DRUM_KIT 0x08
+#endif
+#ifndef XINPUT_DEVSUBTYPE_ARCADE_PAD
+ #define XINPUT_DEVSUBTYPE_ARCADE_PAD 0x13
+#endif
+#ifndef XUSER_MAX_COUNT
+ #define XUSER_MAX_COUNT 4
+#endif
+
+// HACK: Define macros that some dinput.h variants don't
+#ifndef DIDFT_OPTIONAL
+ #define DIDFT_OPTIONAL 0x80000000
+#endif
+
+#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000
+#define WGL_SUPPORT_OPENGL_ARB 0x2010
+#define WGL_DRAW_TO_WINDOW_ARB 0x2001
+#define WGL_PIXEL_TYPE_ARB 0x2013
+#define WGL_TYPE_RGBA_ARB 0x202b
+#define WGL_ACCELERATION_ARB 0x2003
+#define WGL_NO_ACCELERATION_ARB 0x2025
+#define WGL_RED_BITS_ARB 0x2015
+#define WGL_RED_SHIFT_ARB 0x2016
+#define WGL_GREEN_BITS_ARB 0x2017
+#define WGL_GREEN_SHIFT_ARB 0x2018
+#define WGL_BLUE_BITS_ARB 0x2019
+#define WGL_BLUE_SHIFT_ARB 0x201a
+#define WGL_ALPHA_BITS_ARB 0x201b
+#define WGL_ALPHA_SHIFT_ARB 0x201c
+#define WGL_ACCUM_BITS_ARB 0x201d
+#define WGL_ACCUM_RED_BITS_ARB 0x201e
+#define WGL_ACCUM_GREEN_BITS_ARB 0x201f
+#define WGL_ACCUM_BLUE_BITS_ARB 0x2020
+#define WGL_ACCUM_ALPHA_BITS_ARB 0x2021
+#define WGL_DEPTH_BITS_ARB 0x2022
+#define WGL_STENCIL_BITS_ARB 0x2023
+#define WGL_AUX_BUFFERS_ARB 0x2024
+#define WGL_STEREO_ARB 0x2012
+#define WGL_DOUBLE_BUFFER_ARB 0x2011
+#define WGL_SAMPLES_ARB 0x2042
+#define WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB 0x20a9
+#define WGL_CONTEXT_DEBUG_BIT_ARB 0x00000001
+#define WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002
+#define WGL_CONTEXT_PROFILE_MASK_ARB 0x9126
+#define WGL_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001
+#define WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002
+#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091
+#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092
+#define WGL_CONTEXT_FLAGS_ARB 0x2094
+#define WGL_CONTEXT_ES2_PROFILE_BIT_EXT 0x00000004
+#define WGL_CONTEXT_ROBUST_ACCESS_BIT_ARB 0x00000004
+#define WGL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
+#define WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
+#define WGL_NO_RESET_NOTIFICATION_ARB 0x8261
+#define WGL_CONTEXT_RELEASE_BEHAVIOR_ARB 0x2097
+#define WGL_CONTEXT_RELEASE_BEHAVIOR_NONE_ARB 0
+#define WGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB 0x2098
+#define WGL_CONTEXT_OPENGL_NO_ERROR_ARB 0x31b3
+#define WGL_COLORSPACE_EXT 0x309d
+#define WGL_COLORSPACE_SRGB_EXT 0x3089
+
+#define ERROR_INVALID_VERSION_ARB 0x2095
+#define ERROR_INVALID_PROFILE_ARB 0x2096
+#define ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB 0x2054
+
+// xinput.dll function pointer typedefs
+typedef DWORD (WINAPI * PFN_XInputGetCapabilities)(DWORD,DWORD,XINPUT_CAPABILITIES*);
+typedef DWORD (WINAPI * PFN_XInputGetState)(DWORD,XINPUT_STATE*);
+#define XInputGetCapabilities _glfw.win32.xinput.GetCapabilities
+#define XInputGetState _glfw.win32.xinput.GetState
+
+// dinput8.dll function pointer typedefs
+typedef HRESULT (WINAPI * PFN_DirectInput8Create)(HINSTANCE,DWORD,REFIID,LPVOID*,LPUNKNOWN);
+#define DirectInput8Create _glfw.win32.dinput8.Create
+
+// user32.dll function pointer typedefs
+typedef BOOL (WINAPI * PFN_SetProcessDPIAware)(void);
+typedef BOOL (WINAPI * PFN_ChangeWindowMessageFilterEx)(HWND,UINT,DWORD,CHANGEFILTERSTRUCT*);
+typedef BOOL (WINAPI * PFN_EnableNonClientDpiScaling)(HWND);
+typedef BOOL (WINAPI * PFN_SetProcessDpiAwarenessContext)(HANDLE);
+typedef UINT (WINAPI * PFN_GetDpiForWindow)(HWND);
+typedef BOOL (WINAPI * PFN_AdjustWindowRectExForDpi)(LPRECT,DWORD,BOOL,DWORD,UINT);
+typedef int (WINAPI * PFN_GetSystemMetricsForDpi)(int,UINT);
+#define SetProcessDPIAware _glfw.win32.user32.SetProcessDPIAware_
+#define ChangeWindowMessageFilterEx _glfw.win32.user32.ChangeWindowMessageFilterEx_
+#define EnableNonClientDpiScaling _glfw.win32.user32.EnableNonClientDpiScaling_
+#define SetProcessDpiAwarenessContext _glfw.win32.user32.SetProcessDpiAwarenessContext_
+#define GetDpiForWindow _glfw.win32.user32.GetDpiForWindow_
+#define AdjustWindowRectExForDpi _glfw.win32.user32.AdjustWindowRectExForDpi_
+#define GetSystemMetricsForDpi _glfw.win32.user32.GetSystemMetricsForDpi_
+
+// dwmapi.dll function pointer typedefs
+typedef HRESULT (WINAPI * PFN_DwmIsCompositionEnabled)(BOOL*);
+typedef HRESULT (WINAPI * PFN_DwmFlush)(VOID);
+typedef HRESULT(WINAPI * PFN_DwmEnableBlurBehindWindow)(HWND,const DWM_BLURBEHIND*);
+typedef HRESULT (WINAPI * PFN_DwmGetColorizationColor)(DWORD*,BOOL*);
+#define DwmIsCompositionEnabled _glfw.win32.dwmapi.IsCompositionEnabled
+#define DwmFlush _glfw.win32.dwmapi.Flush
+#define DwmEnableBlurBehindWindow _glfw.win32.dwmapi.EnableBlurBehindWindow
+#define DwmGetColorizationColor _glfw.win32.dwmapi.GetColorizationColor
+
+// shcore.dll function pointer typedefs
+typedef HRESULT (WINAPI * PFN_SetProcessDpiAwareness)(PROCESS_DPI_AWARENESS);
+typedef HRESULT (WINAPI * PFN_GetDpiForMonitor)(HMONITOR,MONITOR_DPI_TYPE,UINT*,UINT*);
+#define SetProcessDpiAwareness _glfw.win32.shcore.SetProcessDpiAwareness_
+#define GetDpiForMonitor _glfw.win32.shcore.GetDpiForMonitor_
+
+// ntdll.dll function pointer typedefs
+typedef LONG (WINAPI * PFN_RtlVerifyVersionInfo)(OSVERSIONINFOEXW*,ULONG,ULONGLONG);
+#define RtlVerifyVersionInfo _glfw.win32.ntdll.RtlVerifyVersionInfo_
+
+// WGL extension pointer typedefs
+typedef BOOL (WINAPI * PFNWGLSWAPINTERVALEXTPROC)(int);
+typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVARBPROC)(HDC,int,int,UINT,const int*,int*);
+typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGEXTPROC)(void);
+typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGARBPROC)(HDC);
+typedef HGLRC (WINAPI * PFNWGLCREATECONTEXTATTRIBSARBPROC)(HDC,HGLRC,const int*);
+#define wglSwapIntervalEXT _glfw.wgl.SwapIntervalEXT
+#define wglGetPixelFormatAttribivARB _glfw.wgl.GetPixelFormatAttribivARB
+#define wglGetExtensionsStringEXT _glfw.wgl.GetExtensionsStringEXT
+#define wglGetExtensionsStringARB _glfw.wgl.GetExtensionsStringARB
+#define wglCreateContextAttribsARB _glfw.wgl.CreateContextAttribsARB
+
+// opengl32.dll function pointer typedefs
+typedef HGLRC (WINAPI * PFN_wglCreateContext)(HDC);
+typedef BOOL (WINAPI * PFN_wglDeleteContext)(HGLRC);
+typedef PROC (WINAPI * PFN_wglGetProcAddress)(LPCSTR);
+typedef HDC (WINAPI * PFN_wglGetCurrentDC)(void);
+typedef HGLRC (WINAPI * PFN_wglGetCurrentContext)(void);
+typedef BOOL (WINAPI * PFN_wglMakeCurrent)(HDC,HGLRC);
+typedef BOOL (WINAPI * PFN_wglShareLists)(HGLRC,HGLRC);
+#define wglCreateContext _glfw.wgl.CreateContext
+#define wglDeleteContext _glfw.wgl.DeleteContext
+#define wglGetProcAddress _glfw.wgl.GetProcAddress
+#define wglGetCurrentDC _glfw.wgl.GetCurrentDC
+#define wglGetCurrentContext _glfw.wgl.GetCurrentContext
+#define wglMakeCurrent _glfw.wgl.MakeCurrent
+#define wglShareLists _glfw.wgl.ShareLists
+
+typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
+
+typedef struct VkWin32SurfaceCreateInfoKHR
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkWin32SurfaceCreateFlagsKHR flags;
+ HINSTANCE hinstance;
+ HWND hwnd;
+} VkWin32SurfaceCreateInfoKHR;
+
+typedef VkResult (APIENTRY *PFN_vkCreateWin32SurfaceKHR)(VkInstance,const VkWin32SurfaceCreateInfoKHR*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+typedef VkBool32 (APIENTRY *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice,uint32_t);
+
+#if !defined(_GLFW_WNDCLASSNAME)
+ #define _GLFW_WNDCLASSNAME L"GLFW30"
+#endif
+
+#define GLFW_WIN32_WINDOW_STATE _GLFWwindowWin32 win32;
+#define GLFW_WIN32_LIBRARY_WINDOW_STATE _GLFWlibraryWin32 win32;
+#define GLFW_WIN32_MONITOR_STATE _GLFWmonitorWin32 win32;
+#define GLFW_WIN32_CURSOR_STATE _GLFWcursorWin32 win32;
+
+#define GLFW_WGL_CONTEXT_STATE _GLFWcontextWGL wgl;
+#define GLFW_WGL_LIBRARY_CONTEXT_STATE _GLFWlibraryWGL wgl;
+
+
+// WGL-specific per-context data
+//
+typedef struct _GLFWcontextWGL
+{
+ HDC dc;
+ HGLRC handle;
+ int interval;
+} _GLFWcontextWGL;
+
+// WGL-specific global data
+//
+typedef struct _GLFWlibraryWGL
+{
+ HINSTANCE instance;
+ PFN_wglCreateContext CreateContext;
+ PFN_wglDeleteContext DeleteContext;
+ PFN_wglGetProcAddress GetProcAddress;
+ PFN_wglGetCurrentDC GetCurrentDC;
+ PFN_wglGetCurrentContext GetCurrentContext;
+ PFN_wglMakeCurrent MakeCurrent;
+ PFN_wglShareLists ShareLists;
+
+ PFNWGLSWAPINTERVALEXTPROC SwapIntervalEXT;
+ PFNWGLGETPIXELFORMATATTRIBIVARBPROC GetPixelFormatAttribivARB;
+ PFNWGLGETEXTENSIONSSTRINGEXTPROC GetExtensionsStringEXT;
+ PFNWGLGETEXTENSIONSSTRINGARBPROC GetExtensionsStringARB;
+ PFNWGLCREATECONTEXTATTRIBSARBPROC CreateContextAttribsARB;
+ GLFWbool EXT_swap_control;
+ GLFWbool EXT_colorspace;
+ GLFWbool ARB_multisample;
+ GLFWbool ARB_framebuffer_sRGB;
+ GLFWbool EXT_framebuffer_sRGB;
+ GLFWbool ARB_pixel_format;
+ GLFWbool ARB_create_context;
+ GLFWbool ARB_create_context_profile;
+ GLFWbool EXT_create_context_es2_profile;
+ GLFWbool ARB_create_context_robustness;
+ GLFWbool ARB_create_context_no_error;
+ GLFWbool ARB_context_flush_control;
+} _GLFWlibraryWGL;
+
+// Win32-specific per-window data
+//
+typedef struct _GLFWwindowWin32
+{
+ HWND handle;
+ HICON bigIcon;
+ HICON smallIcon;
+
+ GLFWbool cursorTracked;
+ GLFWbool frameAction;
+ GLFWbool iconified;
+ GLFWbool maximized;
+ // Whether to enable framebuffer transparency on DWM
+ GLFWbool transparent;
+ GLFWbool scaleToMonitor;
+ GLFWbool keymenu;
+
+ // Cached size used to filter out duplicate events
+ int width, height;
+
+ // The last received cursor position, regardless of source
+ int lastCursorPosX, lastCursorPosY;
+ // The last received high surrogate when decoding pairs of UTF-16 messages
+ WCHAR highSurrogate;
+} _GLFWwindowWin32;
+
+// Win32-specific global data
+//
+typedef struct _GLFWlibraryWin32
+{
+ HINSTANCE instance;
+ HWND helperWindowHandle;
+ HDEVNOTIFY deviceNotificationHandle;
+ int acquiredMonitorCount;
+ char* clipboardString;
+ short int keycodes[512];
+ short int scancodes[GLFW_KEY_LAST + 1];
+ char keynames[GLFW_KEY_LAST + 1][5];
+ // Where to place the cursor when re-enabled
+ double restoreCursorPosX, restoreCursorPosY;
+ // The window whose disabled cursor mode is active
+ _GLFWwindow* disabledCursorWindow;
+ RAWINPUT* rawInput;
+ int rawInputSize;
+ UINT mouseTrailSize;
+
+ struct {
+ HINSTANCE instance;
+ PFN_DirectInput8Create Create;
+ IDirectInput8W* api;
+ } dinput8;
+
+ struct {
+ HINSTANCE instance;
+ PFN_XInputGetCapabilities GetCapabilities;
+ PFN_XInputGetState GetState;
+ } xinput;
+
+ struct {
+ HINSTANCE instance;
+ PFN_SetProcessDPIAware SetProcessDPIAware_;
+ PFN_ChangeWindowMessageFilterEx ChangeWindowMessageFilterEx_;
+ PFN_EnableNonClientDpiScaling EnableNonClientDpiScaling_;
+ PFN_SetProcessDpiAwarenessContext SetProcessDpiAwarenessContext_;
+ PFN_GetDpiForWindow GetDpiForWindow_;
+ PFN_AdjustWindowRectExForDpi AdjustWindowRectExForDpi_;
+ PFN_GetSystemMetricsForDpi GetSystemMetricsForDpi_;
+ } user32;
+
+ struct {
+ HINSTANCE instance;
+ PFN_DwmIsCompositionEnabled IsCompositionEnabled;
+ PFN_DwmFlush Flush;
+ PFN_DwmEnableBlurBehindWindow EnableBlurBehindWindow;
+ PFN_DwmGetColorizationColor GetColorizationColor;
+ } dwmapi;
+
+ struct {
+ HINSTANCE instance;
+ PFN_SetProcessDpiAwareness SetProcessDpiAwareness_;
+ PFN_GetDpiForMonitor GetDpiForMonitor_;
+ } shcore;
+
+ struct {
+ HINSTANCE instance;
+ PFN_RtlVerifyVersionInfo RtlVerifyVersionInfo_;
+ } ntdll;
+} _GLFWlibraryWin32;
+
+// Win32-specific per-monitor data
+//
+typedef struct _GLFWmonitorWin32
+{
+ HMONITOR handle;
+ // This size matches the static size of DISPLAY_DEVICE.DeviceName
+ WCHAR adapterName[32];
+ WCHAR displayName[32];
+ char publicAdapterName[32];
+ char publicDisplayName[32];
+ GLFWbool modesPruned;
+ GLFWbool modeChanged;
+} _GLFWmonitorWin32;
+
+// Win32-specific per-cursor data
+//
+typedef struct _GLFWcursorWin32
+{
+ HCURSOR handle;
+} _GLFWcursorWin32;
+
+
+GLFWbool _glfwConnectWin32(int platformID, _GLFWplatform* platform);
+int _glfwInitWin32(void);
+void _glfwTerminateWin32(void);
+
+GLFWbool _glfwRegisterWindowClassWin32(void);
+void _glfwUnregisterWindowClassWin32(void);
+
+WCHAR* _glfwCreateWideStringFromUTF8Win32(const char* source);
+char* _glfwCreateUTF8FromWideStringWin32(const WCHAR* source);
+BOOL _glfwIsWindowsVersionOrGreaterWin32(WORD major, WORD minor, WORD sp);
+BOOL _glfwIsWindows10BuildOrGreaterWin32(WORD build);
+void _glfwInputErrorWin32(int error, const char* description);
+void _glfwUpdateKeyNamesWin32(void);
+
+void _glfwPollMonitorsWin32(void);
+void _glfwSetVideoModeWin32(_GLFWmonitor* monitor, const GLFWvidmode* desired);
+void _glfwRestoreVideoModeWin32(_GLFWmonitor* monitor);
+void _glfwGetHMONITORContentScaleWin32(HMONITOR handle, float* xscale, float* yscale);
+
+int _glfwCreateWindowWin32(_GLFWwindow* window, const _GLFWwndconfig* wndconfig, const _GLFWctxconfig* ctxconfig, const _GLFWfbconfig* fbconfig);
+void _glfwDestroyWindowWin32(_GLFWwindow* window);
+void _glfwSetWindowTitleWin32(_GLFWwindow* window, const char* title);
+void _glfwSetWindowIconWin32(_GLFWwindow* window, int count, const GLFWimage* images);
+void _glfwGetWindowPosWin32(_GLFWwindow* window, int* xpos, int* ypos);
+void _glfwSetWindowPosWin32(_GLFWwindow* window, int xpos, int ypos);
+void _glfwGetWindowSizeWin32(_GLFWwindow* window, int* width, int* height);
+void _glfwSetWindowSizeWin32(_GLFWwindow* window, int width, int height);
+void _glfwSetWindowSizeLimitsWin32(_GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+void _glfwSetWindowAspectRatioWin32(_GLFWwindow* window, int numer, int denom);
+void _glfwGetFramebufferSizeWin32(_GLFWwindow* window, int* width, int* height);
+void _glfwGetWindowFrameSizeWin32(_GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+void _glfwGetWindowContentScaleWin32(_GLFWwindow* window, float* xscale, float* yscale);
+void _glfwIconifyWindowWin32(_GLFWwindow* window);
+void _glfwRestoreWindowWin32(_GLFWwindow* window);
+void _glfwMaximizeWindowWin32(_GLFWwindow* window);
+void _glfwShowWindowWin32(_GLFWwindow* window);
+void _glfwHideWindowWin32(_GLFWwindow* window);
+void _glfwRequestWindowAttentionWin32(_GLFWwindow* window);
+void _glfwFocusWindowWin32(_GLFWwindow* window);
+void _glfwSetWindowMonitorWin32(_GLFWwindow* window, _GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+int _glfwWindowFocusedWin32(_GLFWwindow* window);
+int _glfwWindowIconifiedWin32(_GLFWwindow* window);
+int _glfwWindowVisibleWin32(_GLFWwindow* window);
+int _glfwWindowMaximizedWin32(_GLFWwindow* window);
+int _glfwWindowHoveredWin32(_GLFWwindow* window);
+int _glfwFramebufferTransparentWin32(_GLFWwindow* window);
+void _glfwSetWindowResizableWin32(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowDecoratedWin32(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowFloatingWin32(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowMousePassthroughWin32(_GLFWwindow* window, GLFWbool enabled);
+float _glfwGetWindowOpacityWin32(_GLFWwindow* window);
+void _glfwSetWindowOpacityWin32(_GLFWwindow* window, float opacity);
+
+void _glfwSetRawMouseMotionWin32(_GLFWwindow *window, GLFWbool enabled);
+GLFWbool _glfwRawMouseMotionSupportedWin32(void);
+
+void _glfwPollEventsWin32(void);
+void _glfwWaitEventsWin32(void);
+void _glfwWaitEventsTimeoutWin32(double timeout);
+void _glfwPostEmptyEventWin32(void);
+
+void _glfwGetCursorPosWin32(_GLFWwindow* window, double* xpos, double* ypos);
+void _glfwSetCursorPosWin32(_GLFWwindow* window, double xpos, double ypos);
+void _glfwSetCursorModeWin32(_GLFWwindow* window, int mode);
+const char* _glfwGetScancodeNameWin32(int scancode);
+int _glfwGetKeyScancodeWin32(int key);
+int _glfwCreateCursorWin32(_GLFWcursor* cursor, const GLFWimage* image, int xhot, int yhot);
+int _glfwCreateStandardCursorWin32(_GLFWcursor* cursor, int shape);
+void _glfwDestroyCursorWin32(_GLFWcursor* cursor);
+void _glfwSetCursorWin32(_GLFWwindow* window, _GLFWcursor* cursor);
+void _glfwSetClipboardStringWin32(const char* string);
+const char* _glfwGetClipboardStringWin32(void);
+
+EGLenum _glfwGetEGLPlatformWin32(EGLint** attribs);
+EGLNativeDisplayType _glfwGetEGLNativeDisplayWin32(void);
+EGLNativeWindowType _glfwGetEGLNativeWindowWin32(_GLFWwindow* window);
+
+void _glfwGetRequiredInstanceExtensionsWin32(char** extensions);
+int _glfwGetPhysicalDevicePresentationSupportWin32(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+VkResult _glfwCreateWindowSurfaceWin32(VkInstance instance, _GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+void _glfwFreeMonitorWin32(_GLFWmonitor* monitor);
+void _glfwGetMonitorPosWin32(_GLFWmonitor* monitor, int* xpos, int* ypos);
+void _glfwGetMonitorContentScaleWin32(_GLFWmonitor* monitor, float* xscale, float* yscale);
+void _glfwGetMonitorWorkareaWin32(_GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+GLFWvidmode* _glfwGetVideoModesWin32(_GLFWmonitor* monitor, int* count);
+void _glfwGetVideoModeWin32(_GLFWmonitor* monitor, GLFWvidmode* mode);
+GLFWbool _glfwGetGammaRampWin32(_GLFWmonitor* monitor, GLFWgammaramp* ramp);
+void _glfwSetGammaRampWin32(_GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+GLFWbool _glfwInitJoysticksWin32(void);
+void _glfwTerminateJoysticksWin32(void);
+int _glfwPollJoystickWin32(_GLFWjoystick* js, int mode);
+const char* _glfwGetMappingNameWin32(void);
+void _glfwUpdateGamepadGUIDWin32(char* guid);
+
+GLFWbool _glfwInitWGL(void);
+void _glfwTerminateWGL(void);
+GLFWbool _glfwCreateContextWGL(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_thread.c b/chromium/third_party/dawn/third_party/glfw/src/win32_thread.c
new file mode 100644
index 00000000000..35b8f99ebe4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_thread.c
@@ -0,0 +1,98 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwPlatformCreateTls(_GLFWtls* tls)
+{
+ assert(tls->win32.allocated == GLFW_FALSE);
+
+ tls->win32.index = TlsAlloc();
+ if (tls->win32.index == TLS_OUT_OF_INDEXES)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Win32: Failed to allocate TLS index");
+ return GLFW_FALSE;
+ }
+
+ tls->win32.allocated = GLFW_TRUE;
+ return GLFW_TRUE;
+}
+
+void _glfwPlatformDestroyTls(_GLFWtls* tls)
+{
+ if (tls->win32.allocated)
+ TlsFree(tls->win32.index);
+ memset(tls, 0, sizeof(_GLFWtls));
+}
+
+void* _glfwPlatformGetTls(_GLFWtls* tls)
+{
+ assert(tls->win32.allocated == GLFW_TRUE);
+ return TlsGetValue(tls->win32.index);
+}
+
+void _glfwPlatformSetTls(_GLFWtls* tls, void* value)
+{
+ assert(tls->win32.allocated == GLFW_TRUE);
+ TlsSetValue(tls->win32.index, value);
+}
+
+GLFWbool _glfwPlatformCreateMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->win32.allocated == GLFW_FALSE);
+ InitializeCriticalSection(&mutex->win32.section);
+ return mutex->win32.allocated = GLFW_TRUE;
+}
+
+void _glfwPlatformDestroyMutex(_GLFWmutex* mutex)
+{
+ if (mutex->win32.allocated)
+ DeleteCriticalSection(&mutex->win32.section);
+ memset(mutex, 0, sizeof(_GLFWmutex));
+}
+
+void _glfwPlatformLockMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->win32.allocated == GLFW_TRUE);
+ EnterCriticalSection(&mutex->win32.section);
+}
+
+void _glfwPlatformUnlockMutex(_GLFWmutex* mutex)
+{
+ assert(mutex->win32.allocated == GLFW_TRUE);
+ LeaveCriticalSection(&mutex->win32.section);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_thread.h b/chromium/third_party/dawn/third_party/glfw/src/win32_thread.h
new file mode 100644
index 00000000000..4b5a696f891
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_thread.h
@@ -0,0 +1,48 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <windows.h>
+
+#define GLFW_WIN32_TLS_STATE _GLFWtlsWin32 win32;
+#define GLFW_WIN32_MUTEX_STATE _GLFWmutexWin32 win32;
+
+// Win32-specific thread local storage data
+//
+typedef struct _GLFWtlsWin32
+{
+ GLFWbool allocated;
+ DWORD index;
+} _GLFWtlsWin32;
+
+// Win32-specific mutex data
+//
+typedef struct _GLFWmutexWin32
+{
+ GLFWbool allocated;
+ CRITICAL_SECTION section;
+} _GLFWmutexWin32;
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_time.c b/chromium/third_party/dawn/third_party/glfw/src/win32_time.c
new file mode 100644
index 00000000000..a1c64141924
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_time.c
@@ -0,0 +1,53 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwPlatformInitTimer(void)
+{
+ QueryPerformanceFrequency((LARGE_INTEGER*) &_glfw.timer.win32.frequency);
+}
+
+uint64_t _glfwPlatformGetTimerValue(void)
+{
+ uint64_t value;
+ QueryPerformanceCounter((LARGE_INTEGER*) &value);
+ return value;
+}
+
+uint64_t _glfwPlatformGetTimerFrequency(void)
+{
+ return _glfw.timer.win32.frequency;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_time.h b/chromium/third_party/dawn/third_party/glfw/src/win32_time.h
new file mode 100644
index 00000000000..da5afa41e40
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_time.h
@@ -0,0 +1,38 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <windows.h>
+
+#define GLFW_WIN32_LIBRARY_TIMER_STATE _GLFWtimerWin32 win32;
+
+// Win32-specific global timer data
+//
+typedef struct _GLFWtimerWin32
+{
+ uint64_t frequency;
+} _GLFWtimerWin32;
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/win32_window.c b/chromium/third_party/dawn/third_party/glfw/src/win32_window.c
new file mode 100644
index 00000000000..333cef1b99c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/win32_window.c
@@ -0,0 +1,2497 @@
+//========================================================================
+// GLFW 3.4 Win32 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <windowsx.h>
+#include <shellapi.h>
+
+// Returns the window style for the specified window
+//
+static DWORD getWindowStyle(const _GLFWwindow* window)
+{
+ DWORD style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN;
+
+ if (window->monitor)
+ style |= WS_POPUP;
+ else
+ {
+ style |= WS_SYSMENU | WS_MINIMIZEBOX;
+
+ if (window->decorated)
+ {
+ style |= WS_CAPTION;
+
+ if (window->resizable)
+ style |= WS_MAXIMIZEBOX | WS_THICKFRAME;
+ }
+ else
+ style |= WS_POPUP;
+ }
+
+ return style;
+}
+
+// Returns the extended window style for the specified window
+//
+static DWORD getWindowExStyle(const _GLFWwindow* window)
+{
+ DWORD style = WS_EX_APPWINDOW;
+
+ if (window->monitor || window->floating)
+ style |= WS_EX_TOPMOST;
+
+ return style;
+}
+
+// Returns the image whose area most closely matches the desired one
+//
+static const GLFWimage* chooseImage(int count, const GLFWimage* images,
+ int width, int height)
+{
+ int i, leastDiff = INT_MAX;
+ const GLFWimage* closest = NULL;
+
+ for (i = 0; i < count; i++)
+ {
+ const int currDiff = abs(images[i].width * images[i].height -
+ width * height);
+ if (currDiff < leastDiff)
+ {
+ closest = images + i;
+ leastDiff = currDiff;
+ }
+ }
+
+ return closest;
+}
+
+// Creates an RGBA icon or cursor
+//
+static HICON createIcon(const GLFWimage* image, int xhot, int yhot, GLFWbool icon)
+{
+ int i;
+ HDC dc;
+ HICON handle;
+ HBITMAP color, mask;
+ BITMAPV5HEADER bi;
+ ICONINFO ii;
+ unsigned char* target = NULL;
+ unsigned char* source = image->pixels;
+
+ ZeroMemory(&bi, sizeof(bi));
+ bi.bV5Size = sizeof(bi);
+ bi.bV5Width = image->width;
+ bi.bV5Height = -image->height;
+ bi.bV5Planes = 1;
+ bi.bV5BitCount = 32;
+ bi.bV5Compression = BI_BITFIELDS;
+ bi.bV5RedMask = 0x00ff0000;
+ bi.bV5GreenMask = 0x0000ff00;
+ bi.bV5BlueMask = 0x000000ff;
+ bi.bV5AlphaMask = 0xff000000;
+
+ dc = GetDC(NULL);
+ color = CreateDIBSection(dc,
+ (BITMAPINFO*) &bi,
+ DIB_RGB_COLORS,
+ (void**) &target,
+ NULL,
+ (DWORD) 0);
+ ReleaseDC(NULL, dc);
+
+ if (!color)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create RGBA bitmap");
+ return NULL;
+ }
+
+ mask = CreateBitmap(image->width, image->height, 1, 1, NULL);
+ if (!mask)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create mask bitmap");
+ DeleteObject(color);
+ return NULL;
+ }
+
+ for (i = 0; i < image->width * image->height; i++)
+ {
+ target[0] = source[2];
+ target[1] = source[1];
+ target[2] = source[0];
+ target[3] = source[3];
+ target += 4;
+ source += 4;
+ }
+
+ ZeroMemory(&ii, sizeof(ii));
+ ii.fIcon = icon;
+ ii.xHotspot = xhot;
+ ii.yHotspot = yhot;
+ ii.hbmMask = mask;
+ ii.hbmColor = color;
+
+ handle = CreateIconIndirect(&ii);
+
+ DeleteObject(color);
+ DeleteObject(mask);
+
+ if (!handle)
+ {
+ if (icon)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create icon");
+ }
+ else
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create cursor");
+ }
+ }
+
+ return handle;
+}
+
+// Translate content area size to full window size according to styles and DPI
+//
+static void getFullWindowSize(DWORD style, DWORD exStyle,
+ int contentWidth, int contentHeight,
+ int* fullWidth, int* fullHeight,
+ UINT dpi)
+{
+ RECT rect = { 0, 0, contentWidth, contentHeight };
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ AdjustWindowRectExForDpi(&rect, style, FALSE, exStyle, dpi);
+ else
+ AdjustWindowRectEx(&rect, style, FALSE, exStyle);
+
+ *fullWidth = rect.right - rect.left;
+ *fullHeight = rect.bottom - rect.top;
+}
+
+// Enforce the content area aspect ratio based on which edge is being dragged
+//
+static void applyAspectRatio(_GLFWwindow* window, int edge, RECT* area)
+{
+ int xoff, yoff;
+ UINT dpi = USER_DEFAULT_SCREEN_DPI;
+ const float ratio = (float) window->numer / (float) window->denom;
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ dpi = GetDpiForWindow(window->win32.handle);
+
+ getFullWindowSize(getWindowStyle(window), getWindowExStyle(window),
+ 0, 0, &xoff, &yoff, dpi);
+
+ if (edge == WMSZ_LEFT || edge == WMSZ_BOTTOMLEFT ||
+ edge == WMSZ_RIGHT || edge == WMSZ_BOTTOMRIGHT)
+ {
+ area->bottom = area->top + yoff +
+ (int) ((area->right - area->left - xoff) / ratio);
+ }
+ else if (edge == WMSZ_TOPLEFT || edge == WMSZ_TOPRIGHT)
+ {
+ area->top = area->bottom - yoff -
+ (int) ((area->right - area->left - xoff) / ratio);
+ }
+ else if (edge == WMSZ_TOP || edge == WMSZ_BOTTOM)
+ {
+ area->right = area->left + xoff +
+ (int) ((area->bottom - area->top - yoff) * ratio);
+ }
+}
+
+// Updates the cursor image according to its cursor mode
+//
+static void updateCursorImage(_GLFWwindow* window)
+{
+ if (window->cursorMode == GLFW_CURSOR_NORMAL)
+ {
+ if (window->cursor)
+ SetCursor(window->cursor->win32.handle);
+ else
+ SetCursor(LoadCursorW(NULL, IDC_ARROW));
+ }
+ else
+ SetCursor(NULL);
+}
+
+// Updates the cursor clip rect
+//
+static void updateClipRect(_GLFWwindow* window)
+{
+ if (window)
+ {
+ RECT clipRect;
+ GetClientRect(window->win32.handle, &clipRect);
+ ClientToScreen(window->win32.handle, (POINT*) &clipRect.left);
+ ClientToScreen(window->win32.handle, (POINT*) &clipRect.right);
+ ClipCursor(&clipRect);
+ }
+ else
+ ClipCursor(NULL);
+}
+
+// Enables WM_INPUT messages for the mouse for the specified window
+//
+static void enableRawMouseMotion(_GLFWwindow* window)
+{
+ const RAWINPUTDEVICE rid = { 0x01, 0x02, 0, window->win32.handle };
+
+ if (!RegisterRawInputDevices(&rid, 1, sizeof(rid)))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to register raw input device");
+ }
+}
+
+// Disables WM_INPUT messages for the mouse
+//
+static void disableRawMouseMotion(_GLFWwindow* window)
+{
+ const RAWINPUTDEVICE rid = { 0x01, 0x02, RIDEV_REMOVE, NULL };
+
+ if (!RegisterRawInputDevices(&rid, 1, sizeof(rid)))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to remove raw input device");
+ }
+}
+
+// Apply disabled cursor mode to a focused window
+//
+static void disableCursor(_GLFWwindow* window)
+{
+ _glfw.win32.disabledCursorWindow = window;
+ _glfwGetCursorPosWin32(window,
+ &_glfw.win32.restoreCursorPosX,
+ &_glfw.win32.restoreCursorPosY);
+ updateCursorImage(window);
+ _glfwCenterCursorInContentArea(window);
+ updateClipRect(window);
+
+ if (window->rawMouseMotion)
+ enableRawMouseMotion(window);
+}
+
+// Exit disabled cursor mode for the specified window
+//
+static void enableCursor(_GLFWwindow* window)
+{
+ if (window->rawMouseMotion)
+ disableRawMouseMotion(window);
+
+ _glfw.win32.disabledCursorWindow = NULL;
+ updateClipRect(NULL);
+ _glfwSetCursorPosWin32(window,
+ _glfw.win32.restoreCursorPosX,
+ _glfw.win32.restoreCursorPosY);
+ updateCursorImage(window);
+}
+
+// Returns whether the cursor is in the content area of the specified window
+//
+static GLFWbool cursorInContentArea(_GLFWwindow* window)
+{
+ RECT area;
+ POINT pos;
+
+ if (!GetCursorPos(&pos))
+ return GLFW_FALSE;
+
+ if (WindowFromPoint(pos) != window->win32.handle)
+ return GLFW_FALSE;
+
+ GetClientRect(window->win32.handle, &area);
+ ClientToScreen(window->win32.handle, (POINT*) &area.left);
+ ClientToScreen(window->win32.handle, (POINT*) &area.right);
+
+ return PtInRect(&area, pos);
+}
+
+// Update native window styles to match attributes
+//
+static void updateWindowStyles(const _GLFWwindow* window)
+{
+ RECT rect;
+ DWORD style = GetWindowLongW(window->win32.handle, GWL_STYLE);
+ style &= ~(WS_OVERLAPPEDWINDOW | WS_POPUP);
+ style |= getWindowStyle(window);
+
+ GetClientRect(window->win32.handle, &rect);
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, style, FALSE,
+ getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ AdjustWindowRectEx(&rect, style, FALSE, getWindowExStyle(window));
+
+ ClientToScreen(window->win32.handle, (POINT*) &rect.left);
+ ClientToScreen(window->win32.handle, (POINT*) &rect.right);
+ SetWindowLongW(window->win32.handle, GWL_STYLE, style);
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ rect.left, rect.top,
+ rect.right - rect.left, rect.bottom - rect.top,
+ SWP_FRAMECHANGED | SWP_NOACTIVATE | SWP_NOZORDER);
+}
+
+// Update window framebuffer transparency
+//
+static void updateFramebufferTransparency(const _GLFWwindow* window)
+{
+ BOOL composition, opaque;
+ DWORD color;
+
+ if (!IsWindowsVistaOrGreater())
+ return;
+
+ if (FAILED(DwmIsCompositionEnabled(&composition)) || !composition)
+ return;
+
+ if (IsWindows8OrGreater() ||
+ (SUCCEEDED(DwmGetColorizationColor(&color, &opaque)) && !opaque))
+ {
+ HRGN region = CreateRectRgn(0, 0, -1, -1);
+ DWM_BLURBEHIND bb = {0};
+ bb.dwFlags = DWM_BB_ENABLE | DWM_BB_BLURREGION;
+ bb.hRgnBlur = region;
+ bb.fEnable = TRUE;
+
+ DwmEnableBlurBehindWindow(window->win32.handle, &bb);
+ DeleteObject(region);
+ }
+ else
+ {
+ // HACK: Disable framebuffer transparency on Windows 7 when the
+ // colorization color is opaque, because otherwise the window
+ // contents is blended additively with the previous frame instead
+ // of replacing it
+ DWM_BLURBEHIND bb = {0};
+ bb.dwFlags = DWM_BB_ENABLE;
+ DwmEnableBlurBehindWindow(window->win32.handle, &bb);
+ }
+}
+
+// Retrieves and translates modifier keys
+//
+static int getKeyMods(void)
+{
+ int mods = 0;
+
+ if (GetKeyState(VK_SHIFT) & 0x8000)
+ mods |= GLFW_MOD_SHIFT;
+ if (GetKeyState(VK_CONTROL) & 0x8000)
+ mods |= GLFW_MOD_CONTROL;
+ if (GetKeyState(VK_MENU) & 0x8000)
+ mods |= GLFW_MOD_ALT;
+ if ((GetKeyState(VK_LWIN) | GetKeyState(VK_RWIN)) & 0x8000)
+ mods |= GLFW_MOD_SUPER;
+ if (GetKeyState(VK_CAPITAL) & 1)
+ mods |= GLFW_MOD_CAPS_LOCK;
+ if (GetKeyState(VK_NUMLOCK) & 1)
+ mods |= GLFW_MOD_NUM_LOCK;
+
+ return mods;
+}
+
+static void fitToMonitor(_GLFWwindow* window)
+{
+ MONITORINFO mi = { sizeof(mi) };
+ GetMonitorInfoW(window->monitor->win32.handle, &mi);
+ SetWindowPos(window->win32.handle, HWND_TOPMOST,
+ mi.rcMonitor.left,
+ mi.rcMonitor.top,
+ mi.rcMonitor.right - mi.rcMonitor.left,
+ mi.rcMonitor.bottom - mi.rcMonitor.top,
+ SWP_NOZORDER | SWP_NOACTIVATE | SWP_NOCOPYBITS);
+}
+
+// Make the specified window and its video mode active on its monitor
+//
+static void acquireMonitor(_GLFWwindow* window)
+{
+ if (!_glfw.win32.acquiredMonitorCount)
+ {
+ SetThreadExecutionState(ES_CONTINUOUS | ES_DISPLAY_REQUIRED);
+
+ // HACK: When mouse trails are enabled the cursor becomes invisible when
+ // the OpenGL ICD switches to page flipping
+ SystemParametersInfoW(SPI_GETMOUSETRAILS, 0, &_glfw.win32.mouseTrailSize, 0);
+ SystemParametersInfoW(SPI_SETMOUSETRAILS, 0, 0, 0);
+ }
+
+ if (!window->monitor->window)
+ _glfw.win32.acquiredMonitorCount++;
+
+ _glfwSetVideoModeWin32(window->monitor, &window->videoMode);
+ _glfwInputMonitorWindow(window->monitor, window);
+}
+
+// Remove the window and restore the original video mode
+//
+static void releaseMonitor(_GLFWwindow* window)
+{
+ if (window->monitor->window != window)
+ return;
+
+ _glfw.win32.acquiredMonitorCount--;
+ if (!_glfw.win32.acquiredMonitorCount)
+ {
+ SetThreadExecutionState(ES_CONTINUOUS);
+
+ // HACK: Restore mouse trail length saved in acquireMonitor
+ SystemParametersInfoW(SPI_SETMOUSETRAILS, _glfw.win32.mouseTrailSize, 0, 0);
+ }
+
+ _glfwInputMonitorWindow(window->monitor, NULL);
+ _glfwRestoreVideoModeWin32(window->monitor);
+}
+
+// Manually maximize the window, for when SW_MAXIMIZE cannot be used
+//
+static void maximizeWindowManually(_GLFWwindow* window)
+{
+ RECT rect;
+ DWORD style;
+ MONITORINFO mi = { sizeof(mi) };
+
+ GetMonitorInfoW(MonitorFromWindow(window->win32.handle,
+ MONITOR_DEFAULTTONEAREST), &mi);
+
+ rect = mi.rcWork;
+
+ if (window->maxwidth != GLFW_DONT_CARE && window->maxheight != GLFW_DONT_CARE)
+ {
+ rect.right = _glfw_min(rect.right, rect.left + window->maxwidth);
+ rect.bottom = _glfw_min(rect.bottom, rect.top + window->maxheight);
+ }
+
+ style = GetWindowLongW(window->win32.handle, GWL_STYLE);
+ style |= WS_MAXIMIZE;
+ SetWindowLongW(window->win32.handle, GWL_STYLE, style);
+
+ if (window->decorated)
+ {
+ const DWORD exStyle = GetWindowLongW(window->win32.handle, GWL_EXSTYLE);
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ const UINT dpi = GetDpiForWindow(window->win32.handle);
+ AdjustWindowRectExForDpi(&rect, style, FALSE, exStyle, dpi);
+ OffsetRect(&rect, 0, GetSystemMetricsForDpi(SM_CYCAPTION, dpi));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, style, FALSE, exStyle);
+ OffsetRect(&rect, 0, GetSystemMetrics(SM_CYCAPTION));
+ }
+
+ rect.bottom = _glfw_min(rect.bottom, mi.rcWork.bottom);
+ }
+
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ rect.left,
+ rect.top,
+ rect.right - rect.left,
+ rect.bottom - rect.top,
+ SWP_NOACTIVATE | SWP_NOZORDER | SWP_FRAMECHANGED);
+}
+
+// Window callback function (handles window messages)
+//
+static LRESULT CALLBACK windowProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
+{
+ _GLFWwindow* window = GetPropW(hWnd, L"GLFW");
+ if (!window)
+ {
+ // This is the message handling for the hidden helper window
+ // and for a regular window during its initial creation
+
+ switch (uMsg)
+ {
+ case WM_NCCREATE:
+ {
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ const CREATESTRUCTW* cs = (const CREATESTRUCTW*) lParam;
+ const _GLFWwndconfig* wndconfig = cs->lpCreateParams;
+
+ // On per-monitor DPI aware V1 systems, only enable
+ // non-client scaling for windows that scale the client area
+ // We need WM_GETDPISCALEDSIZE from V2 to keep the client
+ // area static when the non-client area is scaled
+ if (wndconfig && wndconfig->scaleToMonitor)
+ EnableNonClientDpiScaling(hWnd);
+ }
+
+ break;
+ }
+
+ case WM_DISPLAYCHANGE:
+ _glfwPollMonitorsWin32();
+ break;
+
+ case WM_DEVICECHANGE:
+ {
+ if (!_glfw.joysticksInitialized)
+ break;
+
+ if (wParam == DBT_DEVICEARRIVAL)
+ {
+ DEV_BROADCAST_HDR* dbh = (DEV_BROADCAST_HDR*) lParam;
+ if (dbh && dbh->dbch_devicetype == DBT_DEVTYP_DEVICEINTERFACE)
+ _glfwDetectJoystickConnectionWin32();
+ }
+ else if (wParam == DBT_DEVICEREMOVECOMPLETE)
+ {
+ DEV_BROADCAST_HDR* dbh = (DEV_BROADCAST_HDR*) lParam;
+ if (dbh && dbh->dbch_devicetype == DBT_DEVTYP_DEVICEINTERFACE)
+ _glfwDetectJoystickDisconnectionWin32();
+ }
+
+ break;
+ }
+ }
+
+ return DefWindowProcW(hWnd, uMsg, wParam, lParam);
+ }
+
+ switch (uMsg)
+ {
+ case WM_MOUSEACTIVATE:
+ {
+ // HACK: Postpone cursor disabling when the window was activated by
+ // clicking a caption button
+ if (HIWORD(lParam) == WM_LBUTTONDOWN)
+ {
+ if (LOWORD(lParam) != HTCLIENT)
+ window->win32.frameAction = GLFW_TRUE;
+ }
+
+ break;
+ }
+
+ case WM_CAPTURECHANGED:
+ {
+ // HACK: Disable the cursor once the caption button action has been
+ // completed or cancelled
+ if (lParam == 0 && window->win32.frameAction)
+ {
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ disableCursor(window);
+
+ window->win32.frameAction = GLFW_FALSE;
+ }
+
+ break;
+ }
+
+ case WM_SETFOCUS:
+ {
+ _glfwInputWindowFocus(window, GLFW_TRUE);
+
+ // HACK: Do not disable cursor while the user is interacting with
+ // a caption button
+ if (window->win32.frameAction)
+ break;
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ disableCursor(window);
+
+ return 0;
+ }
+
+ case WM_KILLFOCUS:
+ {
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ enableCursor(window);
+
+ if (window->monitor && window->autoIconify)
+ _glfwIconifyWindowWin32(window);
+
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+ return 0;
+ }
+
+ case WM_SYSCOMMAND:
+ {
+ switch (wParam & 0xfff0)
+ {
+ case SC_SCREENSAVE:
+ case SC_MONITORPOWER:
+ {
+ if (window->monitor)
+ {
+ // We are running in full screen mode, so disallow
+ // screen saver and screen blanking
+ return 0;
+ }
+ else
+ break;
+ }
+
+ // User trying to access application menu using ALT?
+ case SC_KEYMENU:
+ {
+ if (!window->win32.keymenu)
+ return 0;
+
+ break;
+ }
+ }
+ break;
+ }
+
+ case WM_CLOSE:
+ {
+ _glfwInputWindowCloseRequest(window);
+ return 0;
+ }
+
+ case WM_INPUTLANGCHANGE:
+ {
+ _glfwUpdateKeyNamesWin32();
+ break;
+ }
+
+ case WM_CHAR:
+ case WM_SYSCHAR:
+ {
+ if (wParam >= 0xd800 && wParam <= 0xdbff)
+ window->win32.highSurrogate = (WCHAR) wParam;
+ else
+ {
+ uint32_t codepoint = 0;
+
+ if (wParam >= 0xdc00 && wParam <= 0xdfff)
+ {
+ if (window->win32.highSurrogate)
+ {
+ codepoint += (window->win32.highSurrogate - 0xd800) << 10;
+ codepoint += (WCHAR) wParam - 0xdc00;
+ codepoint += 0x10000;
+ }
+ }
+ else
+ codepoint = (WCHAR) wParam;
+
+ window->win32.highSurrogate = 0;
+ _glfwInputChar(window, codepoint, getKeyMods(), uMsg != WM_SYSCHAR);
+ }
+
+ if (uMsg == WM_SYSCHAR && window->win32.keymenu)
+ break;
+
+ return 0;
+ }
+
+ case WM_UNICHAR:
+ {
+ if (wParam == UNICODE_NOCHAR)
+ {
+ // WM_UNICHAR is not sent by Windows, but is sent by some
+ // third-party input method engine
+ // Returning TRUE here announces support for this message
+ return TRUE;
+ }
+
+ _glfwInputChar(window, (uint32_t) wParam, getKeyMods(), GLFW_TRUE);
+ return 0;
+ }
+
+ case WM_KEYDOWN:
+ case WM_SYSKEYDOWN:
+ case WM_KEYUP:
+ case WM_SYSKEYUP:
+ {
+ int key, scancode;
+ const int action = (HIWORD(lParam) & KF_UP) ? GLFW_RELEASE : GLFW_PRESS;
+ const int mods = getKeyMods();
+
+ scancode = (HIWORD(lParam) & (KF_EXTENDED | 0xff));
+ if (!scancode)
+ {
+ // NOTE: Some synthetic key messages have a scancode of zero
+ // HACK: Map the virtual key back to a usable scancode
+ scancode = MapVirtualKeyW((UINT) wParam, MAPVK_VK_TO_VSC);
+ }
+
+ // HACK: Alt+PrtSc has a different scancode than just PrtSc
+ if (scancode == 0x54)
+ scancode = 0x137;
+
+ // HACK: Ctrl+Pause has a different scancode than just Pause
+ if (scancode == 0x146)
+ scancode = 0x45;
+
+ key = _glfw.win32.keycodes[scancode];
+
+ // The Ctrl keys require special handling
+ if (wParam == VK_CONTROL)
+ {
+ if (HIWORD(lParam) & KF_EXTENDED)
+ {
+ // Right side keys have the extended key bit set
+ key = GLFW_KEY_RIGHT_CONTROL;
+ }
+ else
+ {
+ // NOTE: Alt Gr sends Left Ctrl followed by Right Alt
+ // HACK: We only want one event for Alt Gr, so if we detect
+ // this sequence we discard this Left Ctrl message now
+ // and later report Right Alt normally
+ MSG next;
+ const DWORD time = GetMessageTime();
+
+ if (PeekMessageW(&next, NULL, 0, 0, PM_NOREMOVE))
+ {
+ if (next.message == WM_KEYDOWN ||
+ next.message == WM_SYSKEYDOWN ||
+ next.message == WM_KEYUP ||
+ next.message == WM_SYSKEYUP)
+ {
+ if (next.wParam == VK_MENU &&
+ (HIWORD(next.lParam) & KF_EXTENDED) &&
+ next.time == time)
+ {
+ // Next message is Right Alt down so discard this
+ break;
+ }
+ }
+ }
+
+ // This is a regular Left Ctrl message
+ key = GLFW_KEY_LEFT_CONTROL;
+ }
+ }
+ else if (wParam == VK_PROCESSKEY)
+ {
+ // IME notifies that keys have been filtered by setting the
+ // virtual key-code to VK_PROCESSKEY
+ break;
+ }
+
+ if (action == GLFW_RELEASE && wParam == VK_SHIFT)
+ {
+ // HACK: Release both Shift keys on Shift up event, as when both
+ // are pressed the first release does not emit any event
+ // NOTE: The other half of this is in _glfwPollEventsWin32
+ _glfwInputKey(window, GLFW_KEY_LEFT_SHIFT, scancode, action, mods);
+ _glfwInputKey(window, GLFW_KEY_RIGHT_SHIFT, scancode, action, mods);
+ }
+ else if (wParam == VK_SNAPSHOT)
+ {
+ // HACK: Key down is not reported for the Print Screen key
+ _glfwInputKey(window, key, scancode, GLFW_PRESS, mods);
+ _glfwInputKey(window, key, scancode, GLFW_RELEASE, mods);
+ }
+ else
+ _glfwInputKey(window, key, scancode, action, mods);
+
+ break;
+ }
+
+ case WM_LBUTTONDOWN:
+ case WM_RBUTTONDOWN:
+ case WM_MBUTTONDOWN:
+ case WM_XBUTTONDOWN:
+ case WM_LBUTTONUP:
+ case WM_RBUTTONUP:
+ case WM_MBUTTONUP:
+ case WM_XBUTTONUP:
+ {
+ int i, button, action;
+
+ if (uMsg == WM_LBUTTONDOWN || uMsg == WM_LBUTTONUP)
+ button = GLFW_MOUSE_BUTTON_LEFT;
+ else if (uMsg == WM_RBUTTONDOWN || uMsg == WM_RBUTTONUP)
+ button = GLFW_MOUSE_BUTTON_RIGHT;
+ else if (uMsg == WM_MBUTTONDOWN || uMsg == WM_MBUTTONUP)
+ button = GLFW_MOUSE_BUTTON_MIDDLE;
+ else if (GET_XBUTTON_WPARAM(wParam) == XBUTTON1)
+ button = GLFW_MOUSE_BUTTON_4;
+ else
+ button = GLFW_MOUSE_BUTTON_5;
+
+ if (uMsg == WM_LBUTTONDOWN || uMsg == WM_RBUTTONDOWN ||
+ uMsg == WM_MBUTTONDOWN || uMsg == WM_XBUTTONDOWN)
+ {
+ action = GLFW_PRESS;
+ }
+ else
+ action = GLFW_RELEASE;
+
+ for (i = 0; i <= GLFW_MOUSE_BUTTON_LAST; i++)
+ {
+ if (window->mouseButtons[i] == GLFW_PRESS)
+ break;
+ }
+
+ if (i > GLFW_MOUSE_BUTTON_LAST)
+ SetCapture(hWnd);
+
+ _glfwInputMouseClick(window, button, action, getKeyMods());
+
+ for (i = 0; i <= GLFW_MOUSE_BUTTON_LAST; i++)
+ {
+ if (window->mouseButtons[i] == GLFW_PRESS)
+ break;
+ }
+
+ if (i > GLFW_MOUSE_BUTTON_LAST)
+ ReleaseCapture();
+
+ if (uMsg == WM_XBUTTONDOWN || uMsg == WM_XBUTTONUP)
+ return TRUE;
+
+ return 0;
+ }
+
+ case WM_MOUSEMOVE:
+ {
+ const int x = GET_X_LPARAM(lParam);
+ const int y = GET_Y_LPARAM(lParam);
+
+ if (!window->win32.cursorTracked)
+ {
+ TRACKMOUSEEVENT tme;
+ ZeroMemory(&tme, sizeof(tme));
+ tme.cbSize = sizeof(tme);
+ tme.dwFlags = TME_LEAVE;
+ tme.hwndTrack = window->win32.handle;
+ TrackMouseEvent(&tme);
+
+ window->win32.cursorTracked = GLFW_TRUE;
+ _glfwInputCursorEnter(window, GLFW_TRUE);
+ }
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ const int dx = x - window->win32.lastCursorPosX;
+ const int dy = y - window->win32.lastCursorPosY;
+
+ if (_glfw.win32.disabledCursorWindow != window)
+ break;
+ if (window->rawMouseMotion)
+ break;
+
+ _glfwInputCursorPos(window,
+ window->virtualCursorPosX + dx,
+ window->virtualCursorPosY + dy);
+ }
+ else
+ _glfwInputCursorPos(window, x, y);
+
+ window->win32.lastCursorPosX = x;
+ window->win32.lastCursorPosY = y;
+
+ return 0;
+ }
+
+ case WM_INPUT:
+ {
+ UINT size = 0;
+ HRAWINPUT ri = (HRAWINPUT) lParam;
+ RAWINPUT* data = NULL;
+ int dx, dy;
+
+ if (_glfw.win32.disabledCursorWindow != window)
+ break;
+ if (!window->rawMouseMotion)
+ break;
+
+ GetRawInputData(ri, RID_INPUT, NULL, &size, sizeof(RAWINPUTHEADER));
+ if (size > (UINT) _glfw.win32.rawInputSize)
+ {
+ _glfw_free(_glfw.win32.rawInput);
+ _glfw.win32.rawInput = _glfw_calloc(size, 1);
+ _glfw.win32.rawInputSize = size;
+ }
+
+ size = _glfw.win32.rawInputSize;
+ if (GetRawInputData(ri, RID_INPUT,
+ _glfw.win32.rawInput, &size,
+ sizeof(RAWINPUTHEADER)) == (UINT) -1)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to retrieve raw input data");
+ break;
+ }
+
+ data = _glfw.win32.rawInput;
+ if (data->data.mouse.usFlags & MOUSE_MOVE_ABSOLUTE)
+ {
+ dx = data->data.mouse.lLastX - window->win32.lastCursorPosX;
+ dy = data->data.mouse.lLastY - window->win32.lastCursorPosY;
+ }
+ else
+ {
+ dx = data->data.mouse.lLastX;
+ dy = data->data.mouse.lLastY;
+ }
+
+ _glfwInputCursorPos(window,
+ window->virtualCursorPosX + dx,
+ window->virtualCursorPosY + dy);
+
+ window->win32.lastCursorPosX += dx;
+ window->win32.lastCursorPosY += dy;
+ break;
+ }
+
+ case WM_MOUSELEAVE:
+ {
+ window->win32.cursorTracked = GLFW_FALSE;
+ _glfwInputCursorEnter(window, GLFW_FALSE);
+ return 0;
+ }
+
+ case WM_MOUSEWHEEL:
+ {
+ _glfwInputScroll(window, 0.0, (SHORT) HIWORD(wParam) / (double) WHEEL_DELTA);
+ return 0;
+ }
+
+ case WM_MOUSEHWHEEL:
+ {
+ // This message is only sent on Windows Vista and later
+ // NOTE: The X-axis is inverted for consistency with macOS and X11
+ _glfwInputScroll(window, -((SHORT) HIWORD(wParam) / (double) WHEEL_DELTA), 0.0);
+ return 0;
+ }
+
+ case WM_ENTERSIZEMOVE:
+ case WM_ENTERMENULOOP:
+ {
+ if (window->win32.frameAction)
+ break;
+
+ // HACK: Enable the cursor while the user is moving or
+ // resizing the window or using the window menu
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ enableCursor(window);
+
+ break;
+ }
+
+ case WM_EXITSIZEMOVE:
+ case WM_EXITMENULOOP:
+ {
+ if (window->win32.frameAction)
+ break;
+
+ // HACK: Disable the cursor once the user is done moving or
+ // resizing the window or using the menu
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ disableCursor(window);
+
+ break;
+ }
+
+ case WM_SIZE:
+ {
+ const int width = LOWORD(lParam);
+ const int height = HIWORD(lParam);
+ const GLFWbool iconified = wParam == SIZE_MINIMIZED;
+ const GLFWbool maximized = wParam == SIZE_MAXIMIZED ||
+ (window->win32.maximized &&
+ wParam != SIZE_RESTORED);
+
+ if (_glfw.win32.disabledCursorWindow == window)
+ updateClipRect(window);
+
+ if (window->win32.iconified != iconified)
+ _glfwInputWindowIconify(window, iconified);
+
+ if (window->win32.maximized != maximized)
+ _glfwInputWindowMaximize(window, maximized);
+
+ if (width != window->win32.width || height != window->win32.height)
+ {
+ window->win32.width = width;
+ window->win32.height = height;
+
+ _glfwInputFramebufferSize(window, width, height);
+ _glfwInputWindowSize(window, width, height);
+ }
+
+ if (window->monitor && window->win32.iconified != iconified)
+ {
+ if (iconified)
+ releaseMonitor(window);
+ else
+ {
+ acquireMonitor(window);
+ fitToMonitor(window);
+ }
+ }
+
+ window->win32.iconified = iconified;
+ window->win32.maximized = maximized;
+ return 0;
+ }
+
+ case WM_MOVE:
+ {
+ if (_glfw.win32.disabledCursorWindow == window)
+ updateClipRect(window);
+
+ // NOTE: This cannot use LOWORD/HIWORD recommended by MSDN, as
+ // those macros do not handle negative window positions correctly
+ _glfwInputWindowPos(window,
+ GET_X_LPARAM(lParam),
+ GET_Y_LPARAM(lParam));
+ return 0;
+ }
+
+ case WM_SIZING:
+ {
+ if (window->numer == GLFW_DONT_CARE ||
+ window->denom == GLFW_DONT_CARE)
+ {
+ break;
+ }
+
+ applyAspectRatio(window, (int) wParam, (RECT*) lParam);
+ return TRUE;
+ }
+
+ case WM_GETMINMAXINFO:
+ {
+ int xoff, yoff;
+ UINT dpi = USER_DEFAULT_SCREEN_DPI;
+ MINMAXINFO* mmi = (MINMAXINFO*) lParam;
+
+ if (window->monitor)
+ break;
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ dpi = GetDpiForWindow(window->win32.handle);
+
+ getFullWindowSize(getWindowStyle(window), getWindowExStyle(window),
+ 0, 0, &xoff, &yoff, dpi);
+
+ if (window->minwidth != GLFW_DONT_CARE &&
+ window->minheight != GLFW_DONT_CARE)
+ {
+ mmi->ptMinTrackSize.x = window->minwidth + xoff;
+ mmi->ptMinTrackSize.y = window->minheight + yoff;
+ }
+
+ if (window->maxwidth != GLFW_DONT_CARE &&
+ window->maxheight != GLFW_DONT_CARE)
+ {
+ mmi->ptMaxTrackSize.x = window->maxwidth + xoff;
+ mmi->ptMaxTrackSize.y = window->maxheight + yoff;
+ }
+
+ if (!window->decorated)
+ {
+ MONITORINFO mi;
+ const HMONITOR mh = MonitorFromWindow(window->win32.handle,
+ MONITOR_DEFAULTTONEAREST);
+
+ ZeroMemory(&mi, sizeof(mi));
+ mi.cbSize = sizeof(mi);
+ GetMonitorInfoW(mh, &mi);
+
+ mmi->ptMaxPosition.x = mi.rcWork.left - mi.rcMonitor.left;
+ mmi->ptMaxPosition.y = mi.rcWork.top - mi.rcMonitor.top;
+ mmi->ptMaxSize.x = mi.rcWork.right - mi.rcWork.left;
+ mmi->ptMaxSize.y = mi.rcWork.bottom - mi.rcWork.top;
+ }
+
+ return 0;
+ }
+
+ case WM_PAINT:
+ {
+ _glfwInputWindowDamage(window);
+ break;
+ }
+
+ case WM_ERASEBKGND:
+ {
+ return TRUE;
+ }
+
+ case WM_NCACTIVATE:
+ case WM_NCPAINT:
+ {
+ // Prevent title bar from being drawn after restoring a minimized
+ // undecorated window
+ if (!window->decorated)
+ return TRUE;
+
+ break;
+ }
+
+ case WM_DWMCOMPOSITIONCHANGED:
+ case WM_DWMCOLORIZATIONCOLORCHANGED:
+ {
+ if (window->win32.transparent)
+ updateFramebufferTransparency(window);
+ return 0;
+ }
+
+ case WM_GETDPISCALEDSIZE:
+ {
+ if (window->win32.scaleToMonitor)
+ break;
+
+ // Adjust the window size to keep the content area size constant
+ if (_glfwIsWindows10Version1703OrGreaterWin32())
+ {
+ RECT source = {0}, target = {0};
+ SIZE* size = (SIZE*) lParam;
+
+ AdjustWindowRectExForDpi(&source, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ AdjustWindowRectExForDpi(&target, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ LOWORD(wParam));
+
+ size->cx += (target.right - target.left) -
+ (source.right - source.left);
+ size->cy += (target.bottom - target.top) -
+ (source.bottom - source.top);
+ return TRUE;
+ }
+
+ break;
+ }
+
+ case WM_DPICHANGED:
+ {
+ const float xscale = HIWORD(wParam) / (float) USER_DEFAULT_SCREEN_DPI;
+ const float yscale = LOWORD(wParam) / (float) USER_DEFAULT_SCREEN_DPI;
+
+ // Resize windowed mode windows that either permit rescaling or that
+ // need it to compensate for non-client area scaling
+ if (!window->monitor &&
+ (window->win32.scaleToMonitor ||
+ _glfwIsWindows10Version1703OrGreaterWin32()))
+ {
+ RECT* suggested = (RECT*) lParam;
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ suggested->left,
+ suggested->top,
+ suggested->right - suggested->left,
+ suggested->bottom - suggested->top,
+ SWP_NOACTIVATE | SWP_NOZORDER);
+ }
+
+ _glfwInputWindowContentScale(window, xscale, yscale);
+ break;
+ }
+
+ case WM_SETCURSOR:
+ {
+ if (LOWORD(lParam) == HTCLIENT)
+ {
+ updateCursorImage(window);
+ return TRUE;
+ }
+
+ break;
+ }
+
+ case WM_DROPFILES:
+ {
+ HDROP drop = (HDROP) wParam;
+ POINT pt;
+ int i;
+
+ const int count = DragQueryFileW(drop, 0xffffffff, NULL, 0);
+ char** paths = _glfw_calloc(count, sizeof(char*));
+
+ // Move the mouse to the position of the drop
+ DragQueryPoint(drop, &pt);
+ _glfwInputCursorPos(window, pt.x, pt.y);
+
+ for (i = 0; i < count; i++)
+ {
+ const UINT length = DragQueryFileW(drop, i, NULL, 0);
+ WCHAR* buffer = _glfw_calloc((size_t) length + 1, sizeof(WCHAR));
+
+ DragQueryFileW(drop, i, buffer, length + 1);
+ paths[i] = _glfwCreateUTF8FromWideStringWin32(buffer);
+
+ _glfw_free(buffer);
+ }
+
+ _glfwInputDrop(window, count, (const char**) paths);
+
+ for (i = 0; i < count; i++)
+ _glfw_free(paths[i]);
+ _glfw_free(paths);
+
+ DragFinish(drop);
+ return 0;
+ }
+ }
+
+ return DefWindowProcW(hWnd, uMsg, wParam, lParam);
+}
+
+// Creates the GLFW window
+//
+static int createNativeWindow(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ int xpos, ypos, fullWidth, fullHeight;
+ WCHAR* wideTitle;
+ DWORD style = getWindowStyle(window);
+ DWORD exStyle = getWindowExStyle(window);
+
+ if (window->monitor)
+ {
+ MONITORINFO mi = { sizeof(mi) };
+ GetMonitorInfoW(window->monitor->win32.handle, &mi);
+
+ // NOTE: This window placement is temporary and approximate, as the
+ // correct position and size cannot be known until the monitor
+ // video mode has been picked in _glfwSetVideoModeWin32
+ xpos = mi.rcMonitor.left;
+ ypos = mi.rcMonitor.top;
+ fullWidth = mi.rcMonitor.right - mi.rcMonitor.left;
+ fullHeight = mi.rcMonitor.bottom - mi.rcMonitor.top;
+ }
+ else
+ {
+ xpos = CW_USEDEFAULT;
+ ypos = CW_USEDEFAULT;
+
+ window->win32.maximized = wndconfig->maximized;
+ if (wndconfig->maximized)
+ style |= WS_MAXIMIZE;
+
+ getFullWindowSize(style, exStyle,
+ wndconfig->width, wndconfig->height,
+ &fullWidth, &fullHeight,
+ USER_DEFAULT_SCREEN_DPI);
+ }
+
+ wideTitle = _glfwCreateWideStringFromUTF8Win32(wndconfig->title);
+ if (!wideTitle)
+ return GLFW_FALSE;
+
+ window->win32.handle = CreateWindowExW(exStyle,
+ _GLFW_WNDCLASSNAME,
+ wideTitle,
+ style,
+ xpos, ypos,
+ fullWidth, fullHeight,
+ NULL, // No parent window
+ NULL, // No window menu
+ _glfw.win32.instance,
+ (LPVOID) wndconfig);
+
+ _glfw_free(wideTitle);
+
+ if (!window->win32.handle)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create window");
+ return GLFW_FALSE;
+ }
+
+ SetPropW(window->win32.handle, L"GLFW", window);
+
+ if (IsWindows7OrGreater())
+ {
+ ChangeWindowMessageFilterEx(window->win32.handle,
+ WM_DROPFILES, MSGFLT_ALLOW, NULL);
+ ChangeWindowMessageFilterEx(window->win32.handle,
+ WM_COPYDATA, MSGFLT_ALLOW, NULL);
+ ChangeWindowMessageFilterEx(window->win32.handle,
+ WM_COPYGLOBALDATA, MSGFLT_ALLOW, NULL);
+ }
+
+ window->win32.scaleToMonitor = wndconfig->scaleToMonitor;
+ window->win32.keymenu = wndconfig->win32.keymenu;
+
+ if (!window->monitor)
+ {
+ RECT rect = { 0, 0, wndconfig->width, wndconfig->height };
+ WINDOWPLACEMENT wp = { sizeof(wp) };
+ const HMONITOR mh = MonitorFromWindow(window->win32.handle,
+ MONITOR_DEFAULTTONEAREST);
+
+ // Adjust window rect to account for DPI scaling of the window frame and
+ // (if enabled) DPI scaling of the content area
+ // This cannot be done until we know what monitor the window was placed on
+ // Only update the restored window rect as the window may be maximized
+
+ if (wndconfig->scaleToMonitor)
+ {
+ float xscale, yscale;
+ _glfwGetHMONITORContentScaleWin32(mh, &xscale, &yscale);
+
+ if (xscale > 0.f && yscale > 0.f)
+ {
+ rect.right = (int) (rect.right * xscale);
+ rect.bottom = (int) (rect.bottom * yscale);
+ }
+ }
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, style, FALSE, exStyle,
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ AdjustWindowRectEx(&rect, style, FALSE, exStyle);
+
+ GetWindowPlacement(window->win32.handle, &wp);
+ OffsetRect(&rect,
+ wp.rcNormalPosition.left - rect.left,
+ wp.rcNormalPosition.top - rect.top);
+
+ wp.rcNormalPosition = rect;
+ wp.showCmd = SW_HIDE;
+ SetWindowPlacement(window->win32.handle, &wp);
+
+ // Adjust rect of maximized undecorated window, because by default Windows will
+ // make such a window cover the whole monitor instead of its workarea
+
+ if (wndconfig->maximized && !wndconfig->decorated)
+ {
+ MONITORINFO mi = { sizeof(mi) };
+ GetMonitorInfoW(mh, &mi);
+
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ mi.rcWork.left,
+ mi.rcWork.top,
+ mi.rcWork.right - mi.rcWork.left,
+ mi.rcWork.bottom - mi.rcWork.top,
+ SWP_NOACTIVATE | SWP_NOZORDER);
+ }
+ }
+
+ DragAcceptFiles(window->win32.handle, TRUE);
+
+ if (fbconfig->transparent)
+ {
+ updateFramebufferTransparency(window);
+ window->win32.transparent = GLFW_TRUE;
+ }
+
+ _glfwGetWindowSizeWin32(window, &window->win32.width, &window->win32.height);
+
+ return GLFW_TRUE;
+}
+
+// Registers the GLFW window class
+//
+GLFWbool _glfwRegisterWindowClassWin32(void)
+{
+ WNDCLASSEXW wc;
+
+ ZeroMemory(&wc, sizeof(wc));
+ wc.cbSize = sizeof(wc);
+ wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
+ wc.lpfnWndProc = windowProc;
+ wc.hInstance = _glfw.win32.instance;
+ wc.hCursor = LoadCursorW(NULL, IDC_ARROW);
+ wc.lpszClassName = _GLFW_WNDCLASSNAME;
+
+ // Load user-provided icon if available
+ wc.hIcon = LoadImageW(GetModuleHandleW(NULL),
+ L"GLFW_ICON", IMAGE_ICON,
+ 0, 0, LR_DEFAULTSIZE | LR_SHARED);
+ if (!wc.hIcon)
+ {
+ // No user-provided icon found, load default icon
+ wc.hIcon = LoadImageW(NULL,
+ IDI_APPLICATION, IMAGE_ICON,
+ 0, 0, LR_DEFAULTSIZE | LR_SHARED);
+ }
+
+ if (!RegisterClassExW(&wc))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to register window class");
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+// Unregisters the GLFW window class
+//
+void _glfwUnregisterWindowClassWin32(void)
+{
+ UnregisterClassW(_GLFW_WNDCLASSNAME, _glfw.win32.instance);
+}
+
+int _glfwCreateWindowWin32(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ if (!createNativeWindow(window, wndconfig, fbconfig))
+ return GLFW_FALSE;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_NATIVE_CONTEXT_API)
+ {
+ if (!_glfwInitWGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextWGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_EGL_CONTEXT_API)
+ {
+ if (!_glfwInitEGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextEGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwInitOSMesa())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextOSMesa(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwRefreshContextAttribs(window, ctxconfig))
+ return GLFW_FALSE;
+ }
+
+ if (wndconfig->mousePassthrough)
+ _glfwSetWindowMousePassthroughWin32(window, GLFW_TRUE);
+
+ if (window->monitor)
+ {
+ _glfwShowWindowWin32(window);
+ _glfwFocusWindowWin32(window);
+ acquireMonitor(window);
+ fitToMonitor(window);
+
+ if (wndconfig->centerCursor)
+ _glfwCenterCursorInContentArea(window);
+ }
+ else
+ {
+ if (wndconfig->visible)
+ {
+ _glfwShowWindowWin32(window);
+ if (wndconfig->focused)
+ _glfwFocusWindowWin32(window);
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyWindowWin32(_GLFWwindow* window)
+{
+ if (window->monitor)
+ releaseMonitor(window);
+
+ if (window->context.destroy)
+ window->context.destroy(window);
+
+ if (_glfw.win32.disabledCursorWindow == window)
+ _glfw.win32.disabledCursorWindow = NULL;
+
+ if (window->win32.handle)
+ {
+ RemovePropW(window->win32.handle, L"GLFW");
+ DestroyWindow(window->win32.handle);
+ window->win32.handle = NULL;
+ }
+
+ if (window->win32.bigIcon)
+ DestroyIcon(window->win32.bigIcon);
+
+ if (window->win32.smallIcon)
+ DestroyIcon(window->win32.smallIcon);
+}
+
+void _glfwSetWindowTitleWin32(_GLFWwindow* window, const char* title)
+{
+ WCHAR* wideTitle = _glfwCreateWideStringFromUTF8Win32(title);
+ if (!wideTitle)
+ return;
+
+ SetWindowTextW(window->win32.handle, wideTitle);
+ _glfw_free(wideTitle);
+}
+
+void _glfwSetWindowIconWin32(_GLFWwindow* window, int count, const GLFWimage* images)
+{
+ HICON bigIcon = NULL, smallIcon = NULL;
+
+ if (count)
+ {
+ const GLFWimage* bigImage = chooseImage(count, images,
+ GetSystemMetrics(SM_CXICON),
+ GetSystemMetrics(SM_CYICON));
+ const GLFWimage* smallImage = chooseImage(count, images,
+ GetSystemMetrics(SM_CXSMICON),
+ GetSystemMetrics(SM_CYSMICON));
+
+ bigIcon = createIcon(bigImage, 0, 0, GLFW_TRUE);
+ smallIcon = createIcon(smallImage, 0, 0, GLFW_TRUE);
+ }
+ else
+ {
+ bigIcon = (HICON) GetClassLongPtrW(window->win32.handle, GCLP_HICON);
+ smallIcon = (HICON) GetClassLongPtrW(window->win32.handle, GCLP_HICONSM);
+ }
+
+ SendMessageW(window->win32.handle, WM_SETICON, ICON_BIG, (LPARAM) bigIcon);
+ SendMessageW(window->win32.handle, WM_SETICON, ICON_SMALL, (LPARAM) smallIcon);
+
+ if (window->win32.bigIcon)
+ DestroyIcon(window->win32.bigIcon);
+
+ if (window->win32.smallIcon)
+ DestroyIcon(window->win32.smallIcon);
+
+ if (count)
+ {
+ window->win32.bigIcon = bigIcon;
+ window->win32.smallIcon = smallIcon;
+ }
+}
+
+void _glfwGetWindowPosWin32(_GLFWwindow* window, int* xpos, int* ypos)
+{
+ POINT pos = { 0, 0 };
+ ClientToScreen(window->win32.handle, &pos);
+
+ if (xpos)
+ *xpos = pos.x;
+ if (ypos)
+ *ypos = pos.y;
+}
+
+void _glfwSetWindowPosWin32(_GLFWwindow* window, int xpos, int ypos)
+{
+ RECT rect = { xpos, ypos, xpos, ypos };
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window));
+ }
+
+ SetWindowPos(window->win32.handle, NULL, rect.left, rect.top, 0, 0,
+ SWP_NOACTIVATE | SWP_NOZORDER | SWP_NOSIZE);
+}
+
+void _glfwGetWindowSizeWin32(_GLFWwindow* window, int* width, int* height)
+{
+ RECT area;
+ GetClientRect(window->win32.handle, &area);
+
+ if (width)
+ *width = area.right;
+ if (height)
+ *height = area.bottom;
+}
+
+void _glfwSetWindowSizeWin32(_GLFWwindow* window, int width, int height)
+{
+ if (window->monitor)
+ {
+ if (window->monitor->window == window)
+ {
+ acquireMonitor(window);
+ fitToMonitor(window);
+ }
+ }
+ else
+ {
+ RECT rect = { 0, 0, width, height };
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window));
+ }
+
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ 0, 0, rect.right - rect.left, rect.bottom - rect.top,
+ SWP_NOACTIVATE | SWP_NOOWNERZORDER | SWP_NOMOVE | SWP_NOZORDER);
+ }
+}
+
+void _glfwSetWindowSizeLimitsWin32(_GLFWwindow* window,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ RECT area;
+
+ if ((minwidth == GLFW_DONT_CARE || minheight == GLFW_DONT_CARE) &&
+ (maxwidth == GLFW_DONT_CARE || maxheight == GLFW_DONT_CARE))
+ {
+ return;
+ }
+
+ GetWindowRect(window->win32.handle, &area);
+ MoveWindow(window->win32.handle,
+ area.left, area.top,
+ area.right - area.left,
+ area.bottom - area.top, TRUE);
+}
+
+void _glfwSetWindowAspectRatioWin32(_GLFWwindow* window, int numer, int denom)
+{
+ RECT area;
+
+ if (numer == GLFW_DONT_CARE || denom == GLFW_DONT_CARE)
+ return;
+
+ GetWindowRect(window->win32.handle, &area);
+ applyAspectRatio(window, WMSZ_BOTTOMRIGHT, &area);
+ MoveWindow(window->win32.handle,
+ area.left, area.top,
+ area.right - area.left,
+ area.bottom - area.top, TRUE);
+}
+
+void _glfwGetFramebufferSizeWin32(_GLFWwindow* window, int* width, int* height)
+{
+ _glfwGetWindowSizeWin32(window, width, height);
+}
+
+void _glfwGetWindowFrameSizeWin32(_GLFWwindow* window,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ RECT rect;
+ int width, height;
+
+ _glfwGetWindowSizeWin32(window, &width, &height);
+ SetRect(&rect, 0, 0, width, height);
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window));
+ }
+
+ if (left)
+ *left = -rect.left;
+ if (top)
+ *top = -rect.top;
+ if (right)
+ *right = rect.right - width;
+ if (bottom)
+ *bottom = rect.bottom - height;
+}
+
+void _glfwGetWindowContentScaleWin32(_GLFWwindow* window, float* xscale, float* yscale)
+{
+ const HANDLE handle = MonitorFromWindow(window->win32.handle,
+ MONITOR_DEFAULTTONEAREST);
+ _glfwGetHMONITORContentScaleWin32(handle, xscale, yscale);
+}
+
+void _glfwIconifyWindowWin32(_GLFWwindow* window)
+{
+ ShowWindow(window->win32.handle, SW_MINIMIZE);
+}
+
+void _glfwRestoreWindowWin32(_GLFWwindow* window)
+{
+ ShowWindow(window->win32.handle, SW_RESTORE);
+}
+
+void _glfwMaximizeWindowWin32(_GLFWwindow* window)
+{
+ if (IsWindowVisible(window->win32.handle))
+ ShowWindow(window->win32.handle, SW_MAXIMIZE);
+ else
+ maximizeWindowManually(window);
+}
+
+void _glfwShowWindowWin32(_GLFWwindow* window)
+{
+ ShowWindow(window->win32.handle, SW_SHOWNA);
+}
+
+void _glfwHideWindowWin32(_GLFWwindow* window)
+{
+ ShowWindow(window->win32.handle, SW_HIDE);
+}
+
+void _glfwRequestWindowAttentionWin32(_GLFWwindow* window)
+{
+ FlashWindow(window->win32.handle, TRUE);
+}
+
+void _glfwFocusWindowWin32(_GLFWwindow* window)
+{
+ BringWindowToTop(window->win32.handle);
+ SetForegroundWindow(window->win32.handle);
+ SetFocus(window->win32.handle);
+}
+
+void _glfwSetWindowMonitorWin32(_GLFWwindow* window,
+ _GLFWmonitor* monitor,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ if (window->monitor == monitor)
+ {
+ if (monitor)
+ {
+ if (monitor->window == window)
+ {
+ acquireMonitor(window);
+ fitToMonitor(window);
+ }
+ }
+ else
+ {
+ RECT rect = { xpos, ypos, xpos + width, ypos + height };
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window));
+ }
+
+ SetWindowPos(window->win32.handle, HWND_TOP,
+ rect.left, rect.top,
+ rect.right - rect.left, rect.bottom - rect.top,
+ SWP_NOCOPYBITS | SWP_NOACTIVATE | SWP_NOZORDER);
+ }
+
+ return;
+ }
+
+ if (window->monitor)
+ releaseMonitor(window);
+
+ _glfwInputWindowMonitor(window, monitor);
+
+ if (window->monitor)
+ {
+ MONITORINFO mi = { sizeof(mi) };
+ UINT flags = SWP_SHOWWINDOW | SWP_NOACTIVATE | SWP_NOCOPYBITS;
+
+ if (window->decorated)
+ {
+ DWORD style = GetWindowLongW(window->win32.handle, GWL_STYLE);
+ style &= ~WS_OVERLAPPEDWINDOW;
+ style |= getWindowStyle(window);
+ SetWindowLongW(window->win32.handle, GWL_STYLE, style);
+ flags |= SWP_FRAMECHANGED;
+ }
+
+ acquireMonitor(window);
+
+ GetMonitorInfoW(window->monitor->win32.handle, &mi);
+ SetWindowPos(window->win32.handle, HWND_TOPMOST,
+ mi.rcMonitor.left,
+ mi.rcMonitor.top,
+ mi.rcMonitor.right - mi.rcMonitor.left,
+ mi.rcMonitor.bottom - mi.rcMonitor.top,
+ flags);
+ }
+ else
+ {
+ HWND after;
+ RECT rect = { xpos, ypos, xpos + width, ypos + height };
+ DWORD style = GetWindowLongW(window->win32.handle, GWL_STYLE);
+ UINT flags = SWP_NOACTIVATE | SWP_NOCOPYBITS;
+
+ if (window->decorated)
+ {
+ style &= ~WS_POPUP;
+ style |= getWindowStyle(window);
+ SetWindowLongW(window->win32.handle, GWL_STYLE, style);
+
+ flags |= SWP_FRAMECHANGED;
+ }
+
+ if (window->floating)
+ after = HWND_TOPMOST;
+ else
+ after = HWND_NOTOPMOST;
+
+ if (_glfwIsWindows10Version1607OrGreaterWin32())
+ {
+ AdjustWindowRectExForDpi(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window),
+ GetDpiForWindow(window->win32.handle));
+ }
+ else
+ {
+ AdjustWindowRectEx(&rect, getWindowStyle(window),
+ FALSE, getWindowExStyle(window));
+ }
+
+ SetWindowPos(window->win32.handle, after,
+ rect.left, rect.top,
+ rect.right - rect.left, rect.bottom - rect.top,
+ flags);
+ }
+}
+
+int _glfwWindowFocusedWin32(_GLFWwindow* window)
+{
+ return window->win32.handle == GetActiveWindow();
+}
+
+int _glfwWindowIconifiedWin32(_GLFWwindow* window)
+{
+ return IsIconic(window->win32.handle);
+}
+
+int _glfwWindowVisibleWin32(_GLFWwindow* window)
+{
+ return IsWindowVisible(window->win32.handle);
+}
+
+int _glfwWindowMaximizedWin32(_GLFWwindow* window)
+{
+ return IsZoomed(window->win32.handle);
+}
+
+int _glfwWindowHoveredWin32(_GLFWwindow* window)
+{
+ return cursorInContentArea(window);
+}
+
+int _glfwFramebufferTransparentWin32(_GLFWwindow* window)
+{
+ BOOL composition, opaque;
+ DWORD color;
+
+ if (!window->win32.transparent)
+ return GLFW_FALSE;
+
+ if (!IsWindowsVistaOrGreater())
+ return GLFW_FALSE;
+
+ if (FAILED(DwmIsCompositionEnabled(&composition)) || !composition)
+ return GLFW_FALSE;
+
+ if (!IsWindows8OrGreater())
+ {
+ // HACK: Disable framebuffer transparency on Windows 7 when the
+ // colorization color is opaque, because otherwise the window
+ // contents is blended additively with the previous frame instead
+ // of replacing it
+ if (FAILED(DwmGetColorizationColor(&color, &opaque)) || opaque)
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwSetWindowResizableWin32(_GLFWwindow* window, GLFWbool enabled)
+{
+ updateWindowStyles(window);
+}
+
+void _glfwSetWindowDecoratedWin32(_GLFWwindow* window, GLFWbool enabled)
+{
+ updateWindowStyles(window);
+}
+
+void _glfwSetWindowFloatingWin32(_GLFWwindow* window, GLFWbool enabled)
+{
+ const HWND after = enabled ? HWND_TOPMOST : HWND_NOTOPMOST;
+ SetWindowPos(window->win32.handle, after, 0, 0, 0, 0,
+ SWP_NOACTIVATE | SWP_NOMOVE | SWP_NOSIZE);
+}
+
+void _glfwSetWindowMousePassthroughWin32(_GLFWwindow* window, GLFWbool enabled)
+{
+ COLORREF key = 0;
+ BYTE alpha = 0;
+ DWORD flags = 0;
+ DWORD exStyle = GetWindowLongW(window->win32.handle, GWL_EXSTYLE);
+
+ if (exStyle & WS_EX_LAYERED)
+ GetLayeredWindowAttributes(window->win32.handle, &key, &alpha, &flags);
+
+ if (enabled)
+ exStyle |= (WS_EX_TRANSPARENT | WS_EX_LAYERED);
+ else
+ {
+ exStyle &= ~WS_EX_TRANSPARENT;
+ // NOTE: Window opacity also needs the layered window style so do not
+ // remove it if the window is alpha blended
+ if (exStyle & WS_EX_LAYERED)
+ {
+ if (!(flags & LWA_ALPHA))
+ exStyle &= ~WS_EX_LAYERED;
+ }
+ }
+
+ SetWindowLongW(window->win32.handle, GWL_EXSTYLE, exStyle);
+
+ if (enabled)
+ SetLayeredWindowAttributes(window->win32.handle, key, alpha, flags);
+}
+
+float _glfwGetWindowOpacityWin32(_GLFWwindow* window)
+{
+ BYTE alpha;
+ DWORD flags;
+
+ if ((GetWindowLongW(window->win32.handle, GWL_EXSTYLE) & WS_EX_LAYERED) &&
+ GetLayeredWindowAttributes(window->win32.handle, NULL, &alpha, &flags))
+ {
+ if (flags & LWA_ALPHA)
+ return alpha / 255.f;
+ }
+
+ return 1.f;
+}
+
+void _glfwSetWindowOpacityWin32(_GLFWwindow* window, float opacity)
+{
+ LONG exStyle = GetWindowLongW(window->win32.handle, GWL_EXSTYLE);
+ if (opacity < 1.f || (exStyle & WS_EX_TRANSPARENT))
+ {
+ const BYTE alpha = (BYTE) (255 * opacity);
+ exStyle |= WS_EX_LAYERED;
+ SetWindowLongW(window->win32.handle, GWL_EXSTYLE, exStyle);
+ SetLayeredWindowAttributes(window->win32.handle, 0, alpha, LWA_ALPHA);
+ }
+ else if (exStyle & WS_EX_TRANSPARENT)
+ {
+ SetLayeredWindowAttributes(window->win32.handle, 0, 0, 0);
+ }
+ else
+ {
+ exStyle &= ~WS_EX_LAYERED;
+ SetWindowLongW(window->win32.handle, GWL_EXSTYLE, exStyle);
+ }
+}
+
+void _glfwSetRawMouseMotionWin32(_GLFWwindow *window, GLFWbool enabled)
+{
+ if (_glfw.win32.disabledCursorWindow != window)
+ return;
+
+ if (enabled)
+ enableRawMouseMotion(window);
+ else
+ disableRawMouseMotion(window);
+}
+
+GLFWbool _glfwRawMouseMotionSupportedWin32(void)
+{
+ return GLFW_TRUE;
+}
+
+void _glfwPollEventsWin32(void)
+{
+ MSG msg;
+ HWND handle;
+ _GLFWwindow* window;
+
+ while (PeekMessageW(&msg, NULL, 0, 0, PM_REMOVE))
+ {
+ if (msg.message == WM_QUIT)
+ {
+ // NOTE: While GLFW does not itself post WM_QUIT, other processes
+ // may post it to this one, for example Task Manager
+ // HACK: Treat WM_QUIT as a close on all windows
+
+ window = _glfw.windowListHead;
+ while (window)
+ {
+ _glfwInputWindowCloseRequest(window);
+ window = window->next;
+ }
+ }
+ else
+ {
+ TranslateMessage(&msg);
+ DispatchMessageW(&msg);
+ }
+ }
+
+ // HACK: Release modifier keys that the system did not emit KEYUP for
+ // NOTE: Shift keys on Windows tend to "stick" when both are pressed as
+ // no key up message is generated by the first key release
+ // NOTE: Windows key is not reported as released by the Win+V hotkey
+ // Other Win hotkeys are handled implicitly by _glfwInputWindowFocus
+ // because they change the input focus
+ // NOTE: The other half of this is in the WM_*KEY* handler in windowProc
+ handle = GetActiveWindow();
+ if (handle)
+ {
+ window = GetPropW(handle, L"GLFW");
+ if (window)
+ {
+ int i;
+ const int keys[4][2] =
+ {
+ { VK_LSHIFT, GLFW_KEY_LEFT_SHIFT },
+ { VK_RSHIFT, GLFW_KEY_RIGHT_SHIFT },
+ { VK_LWIN, GLFW_KEY_LEFT_SUPER },
+ { VK_RWIN, GLFW_KEY_RIGHT_SUPER }
+ };
+
+ for (i = 0; i < 4; i++)
+ {
+ const int vk = keys[i][0];
+ const int key = keys[i][1];
+ const int scancode = _glfw.win32.scancodes[key];
+
+ if ((GetKeyState(vk) & 0x8000))
+ continue;
+ if (window->keys[key] != GLFW_PRESS)
+ continue;
+
+ _glfwInputKey(window, key, scancode, GLFW_RELEASE, getKeyMods());
+ }
+ }
+ }
+
+ window = _glfw.win32.disabledCursorWindow;
+ if (window)
+ {
+ int width, height;
+ _glfwGetWindowSizeWin32(window, &width, &height);
+
+ // NOTE: Re-center the cursor only if it has moved since the last call,
+ // to avoid breaking glfwWaitEvents with WM_MOUSEMOVE
+ if (window->win32.lastCursorPosX != width / 2 ||
+ window->win32.lastCursorPosY != height / 2)
+ {
+ _glfwSetCursorPosWin32(window, width / 2, height / 2);
+ }
+ }
+}
+
+void _glfwWaitEventsWin32(void)
+{
+ WaitMessage();
+
+ _glfwPollEventsWin32();
+}
+
+void _glfwWaitEventsTimeoutWin32(double timeout)
+{
+ MsgWaitForMultipleObjects(0, NULL, FALSE, (DWORD) (timeout * 1e3), QS_ALLEVENTS);
+
+ _glfwPollEventsWin32();
+}
+
+void _glfwPostEmptyEventWin32(void)
+{
+ PostMessageW(_glfw.win32.helperWindowHandle, WM_NULL, 0, 0);
+}
+
+void _glfwGetCursorPosWin32(_GLFWwindow* window, double* xpos, double* ypos)
+{
+ POINT pos;
+
+ if (GetCursorPos(&pos))
+ {
+ ScreenToClient(window->win32.handle, &pos);
+
+ if (xpos)
+ *xpos = pos.x;
+ if (ypos)
+ *ypos = pos.y;
+ }
+}
+
+void _glfwSetCursorPosWin32(_GLFWwindow* window, double xpos, double ypos)
+{
+ POINT pos = { (int) xpos, (int) ypos };
+
+ // Store the new position so it can be recognized later
+ window->win32.lastCursorPosX = pos.x;
+ window->win32.lastCursorPosY = pos.y;
+
+ ClientToScreen(window->win32.handle, &pos);
+ SetCursorPos(pos.x, pos.y);
+}
+
+void _glfwSetCursorModeWin32(_GLFWwindow* window, int mode)
+{
+ if (mode == GLFW_CURSOR_DISABLED)
+ {
+ if (_glfwWindowFocusedWin32(window))
+ disableCursor(window);
+ }
+ else if (_glfw.win32.disabledCursorWindow == window)
+ enableCursor(window);
+ else if (cursorInContentArea(window))
+ updateCursorImage(window);
+}
+
+const char* _glfwGetScancodeNameWin32(int scancode)
+{
+ if (scancode < 0 || scancode > (KF_EXTENDED | 0xff) ||
+ _glfw.win32.keycodes[scancode] == GLFW_KEY_UNKNOWN)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid scancode %i", scancode);
+ return NULL;
+ }
+
+ return _glfw.win32.keynames[_glfw.win32.keycodes[scancode]];
+}
+
+int _glfwGetKeyScancodeWin32(int key)
+{
+ return _glfw.win32.scancodes[key];
+}
+
+int _glfwCreateCursorWin32(_GLFWcursor* cursor,
+ const GLFWimage* image,
+ int xhot, int yhot)
+{
+ cursor->win32.handle = (HCURSOR) createIcon(image, xhot, yhot, GLFW_FALSE);
+ if (!cursor->win32.handle)
+ return GLFW_FALSE;
+
+ return GLFW_TRUE;
+}
+
+int _glfwCreateStandardCursorWin32(_GLFWcursor* cursor, int shape)
+{
+ int id = 0;
+
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ id = OCR_NORMAL;
+ break;
+ case GLFW_IBEAM_CURSOR:
+ id = OCR_IBEAM;
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ id = OCR_CROSS;
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ id = OCR_HAND;
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ id = OCR_SIZEWE;
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ id = OCR_SIZENS;
+ break;
+ case GLFW_RESIZE_NWSE_CURSOR:
+ id = OCR_SIZENWSE;
+ break;
+ case GLFW_RESIZE_NESW_CURSOR:
+ id = OCR_SIZENESW;
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ id = OCR_SIZEALL;
+ break;
+ case GLFW_NOT_ALLOWED_CURSOR:
+ id = OCR_NO;
+ break;
+ default:
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Win32: Unknown standard cursor");
+ return GLFW_FALSE;
+ }
+
+ cursor->win32.handle = LoadImageW(NULL,
+ MAKEINTRESOURCEW(id), IMAGE_CURSOR, 0, 0,
+ LR_DEFAULTSIZE | LR_SHARED);
+ if (!cursor->win32.handle)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create standard cursor");
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyCursorWin32(_GLFWcursor* cursor)
+{
+ if (cursor->win32.handle)
+ DestroyIcon((HICON) cursor->win32.handle);
+}
+
+void _glfwSetCursorWin32(_GLFWwindow* window, _GLFWcursor* cursor)
+{
+ if (cursorInContentArea(window))
+ updateCursorImage(window);
+}
+
+void _glfwSetClipboardStringWin32(const char* string)
+{
+ int characterCount;
+ HANDLE object;
+ WCHAR* buffer;
+
+ characterCount = MultiByteToWideChar(CP_UTF8, 0, string, -1, NULL, 0);
+ if (!characterCount)
+ return;
+
+ object = GlobalAlloc(GMEM_MOVEABLE, characterCount * sizeof(WCHAR));
+ if (!object)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to allocate global handle for clipboard");
+ return;
+ }
+
+ buffer = GlobalLock(object);
+ if (!buffer)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to lock global handle");
+ GlobalFree(object);
+ return;
+ }
+
+ MultiByteToWideChar(CP_UTF8, 0, string, -1, buffer, characterCount);
+ GlobalUnlock(object);
+
+ if (!OpenClipboard(_glfw.win32.helperWindowHandle))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to open clipboard");
+ GlobalFree(object);
+ return;
+ }
+
+ EmptyClipboard();
+ SetClipboardData(CF_UNICODETEXT, object);
+ CloseClipboard();
+}
+
+const char* _glfwGetClipboardStringWin32(void)
+{
+ HANDLE object;
+ WCHAR* buffer;
+
+ if (!OpenClipboard(_glfw.win32.helperWindowHandle))
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to open clipboard");
+ return NULL;
+ }
+
+ object = GetClipboardData(CF_UNICODETEXT);
+ if (!object)
+ {
+ _glfwInputErrorWin32(GLFW_FORMAT_UNAVAILABLE,
+ "Win32: Failed to convert clipboard to string");
+ CloseClipboard();
+ return NULL;
+ }
+
+ buffer = GlobalLock(object);
+ if (!buffer)
+ {
+ _glfwInputErrorWin32(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to lock global handle");
+ CloseClipboard();
+ return NULL;
+ }
+
+ _glfw_free(_glfw.win32.clipboardString);
+ _glfw.win32.clipboardString = _glfwCreateUTF8FromWideStringWin32(buffer);
+
+ GlobalUnlock(object);
+ CloseClipboard();
+
+ return _glfw.win32.clipboardString;
+}
+
+EGLenum _glfwGetEGLPlatformWin32(EGLint** attribs)
+{
+ if (_glfw.egl.ANGLE_platform_angle)
+ {
+ int type = 0;
+
+ if (_glfw.egl.ANGLE_platform_angle_opengl)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_OPENGL)
+ type = EGL_PLATFORM_ANGLE_TYPE_OPENGL_ANGLE;
+ else if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_OPENGLES)
+ type = EGL_PLATFORM_ANGLE_TYPE_OPENGLES_ANGLE;
+ }
+
+ if (_glfw.egl.ANGLE_platform_angle_d3d)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_D3D9)
+ type = EGL_PLATFORM_ANGLE_TYPE_D3D9_ANGLE;
+ else if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_D3D11)
+ type = EGL_PLATFORM_ANGLE_TYPE_D3D11_ANGLE;
+ }
+
+ if (_glfw.egl.ANGLE_platform_angle_vulkan)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_VULKAN)
+ type = EGL_PLATFORM_ANGLE_TYPE_VULKAN_ANGLE;
+ }
+
+ if (type)
+ {
+ *attribs = _glfw_calloc(3, sizeof(EGLint));
+ (*attribs)[0] = EGL_PLATFORM_ANGLE_TYPE_ANGLE;
+ (*attribs)[1] = type;
+ (*attribs)[2] = EGL_NONE;
+ return EGL_PLATFORM_ANGLE_ANGLE;
+ }
+ }
+
+ return 0;
+}
+
+EGLNativeDisplayType _glfwGetEGLNativeDisplayWin32(void)
+{
+ return GetDC(_glfw.win32.helperWindowHandle);
+}
+
+EGLNativeWindowType _glfwGetEGLNativeWindowWin32(_GLFWwindow* window)
+{
+ return window->win32.handle;
+}
+
+void _glfwGetRequiredInstanceExtensionsWin32(char** extensions)
+{
+ if (!_glfw.vk.KHR_surface || !_glfw.vk.KHR_win32_surface)
+ return;
+
+ extensions[0] = "VK_KHR_surface";
+ extensions[1] = "VK_KHR_win32_surface";
+}
+
+int _glfwGetPhysicalDevicePresentationSupportWin32(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
+ vkGetPhysicalDeviceWin32PresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)
+ vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
+ if (!vkGetPhysicalDeviceWin32PresentationSupportKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Win32: Vulkan instance missing VK_KHR_win32_surface extension");
+ return GLFW_FALSE;
+ }
+
+ return vkGetPhysicalDeviceWin32PresentationSupportKHR(device, queuefamily);
+}
+
+VkResult _glfwCreateWindowSurfaceWin32(VkInstance instance,
+ _GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ VkResult err;
+ VkWin32SurfaceCreateInfoKHR sci;
+ PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;
+
+ vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)
+ vkGetInstanceProcAddr(instance, "vkCreateWin32SurfaceKHR");
+ if (!vkCreateWin32SurfaceKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Win32: Vulkan instance missing VK_KHR_win32_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+ sci.hinstance = _glfw.win32.instance;
+ sci.hwnd = window->win32.handle;
+
+ err = vkCreateWin32SurfaceKHR(instance, &sci, allocator, surface);
+ if (err)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Win32: Failed to create Vulkan surface: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ return err;
+}
+
+GLFWAPI HWND glfwGetWin32Window(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_WIN32)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "Win32: Platform not initialized");
+ return NULL;
+ }
+
+ return window->win32.handle;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/window.c b/chromium/third_party/dawn/third_party/glfw/src/window.c
new file mode 100644
index 00000000000..621e2e64ad6
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/window.c
@@ -0,0 +1,1113 @@
+//========================================================================
+// GLFW 3.4 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+// Copyright (c) 2012 Torsten Walluhn <tw@mad-cad.net>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// Please use C89 style variable declarations in this file because VS 2010
+//========================================================================
+
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <float.h>
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW event API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Notifies shared code that a window has lost or received input focus
+//
+void _glfwInputWindowFocus(_GLFWwindow* window, GLFWbool focused)
+{
+ if (window->callbacks.focus)
+ window->callbacks.focus((GLFWwindow*) window, focused);
+
+ if (!focused)
+ {
+ int key, button;
+
+ for (key = 0; key <= GLFW_KEY_LAST; key++)
+ {
+ if (window->keys[key] == GLFW_PRESS)
+ {
+ const int scancode = _glfw.platform.getKeyScancode(key);
+ _glfwInputKey(window, key, scancode, GLFW_RELEASE, 0);
+ }
+ }
+
+ for (button = 0; button <= GLFW_MOUSE_BUTTON_LAST; button++)
+ {
+ if (window->mouseButtons[button] == GLFW_PRESS)
+ _glfwInputMouseClick(window, button, GLFW_RELEASE, 0);
+ }
+ }
+}
+
+// Notifies shared code that a window has moved
+// The position is specified in content area relative screen coordinates
+//
+void _glfwInputWindowPos(_GLFWwindow* window, int x, int y)
+{
+ if (window->callbacks.pos)
+ window->callbacks.pos((GLFWwindow*) window, x, y);
+}
+
+// Notifies shared code that a window has been resized
+// The size is specified in screen coordinates
+//
+void _glfwInputWindowSize(_GLFWwindow* window, int width, int height)
+{
+ if (window->callbacks.size)
+ window->callbacks.size((GLFWwindow*) window, width, height);
+}
+
+// Notifies shared code that a window has been iconified or restored
+//
+void _glfwInputWindowIconify(_GLFWwindow* window, GLFWbool iconified)
+{
+ if (window->callbacks.iconify)
+ window->callbacks.iconify((GLFWwindow*) window, iconified);
+}
+
+// Notifies shared code that a window has been maximized or restored
+//
+void _glfwInputWindowMaximize(_GLFWwindow* window, GLFWbool maximized)
+{
+ if (window->callbacks.maximize)
+ window->callbacks.maximize((GLFWwindow*) window, maximized);
+}
+
+// Notifies shared code that a window framebuffer has been resized
+// The size is specified in pixels
+//
+void _glfwInputFramebufferSize(_GLFWwindow* window, int width, int height)
+{
+ if (window->callbacks.fbsize)
+ window->callbacks.fbsize((GLFWwindow*) window, width, height);
+}
+
+// Notifies shared code that a window content scale has changed
+// The scale is specified as the ratio between the current and default DPI
+//
+void _glfwInputWindowContentScale(_GLFWwindow* window, float xscale, float yscale)
+{
+ if (window->callbacks.scale)
+ window->callbacks.scale((GLFWwindow*) window, xscale, yscale);
+}
+
+// Notifies shared code that the window contents needs updating
+//
+void _glfwInputWindowDamage(_GLFWwindow* window)
+{
+ if (window->callbacks.refresh)
+ window->callbacks.refresh((GLFWwindow*) window);
+}
+
+// Notifies shared code that the user wishes to close a window
+//
+void _glfwInputWindowCloseRequest(_GLFWwindow* window)
+{
+ window->shouldClose = GLFW_TRUE;
+
+ if (window->callbacks.close)
+ window->callbacks.close((GLFWwindow*) window);
+}
+
+// Notifies shared code that a window has changed its desired monitor
+//
+void _glfwInputWindowMonitor(_GLFWwindow* window, _GLFWmonitor* monitor)
+{
+ window->monitor = monitor;
+}
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW public API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI GLFWwindow* glfwCreateWindow(int width, int height,
+ const char* title,
+ GLFWmonitor* monitor,
+ GLFWwindow* share)
+{
+ _GLFWfbconfig fbconfig;
+ _GLFWctxconfig ctxconfig;
+ _GLFWwndconfig wndconfig;
+ _GLFWwindow* window;
+
+ assert(title != NULL);
+ assert(width >= 0);
+ assert(height >= 0);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (width <= 0 || height <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid window size %ix%i",
+ width, height);
+
+ return NULL;
+ }
+
+ fbconfig = _glfw.hints.framebuffer;
+ ctxconfig = _glfw.hints.context;
+ wndconfig = _glfw.hints.window;
+
+ wndconfig.width = width;
+ wndconfig.height = height;
+ wndconfig.title = title;
+ ctxconfig.share = (_GLFWwindow*) share;
+
+ if (!_glfwIsValidContextConfig(&ctxconfig))
+ return NULL;
+
+ window = _glfw_calloc(1, sizeof(_GLFWwindow));
+ window->next = _glfw.windowListHead;
+ _glfw.windowListHead = window;
+
+ window->videoMode.width = width;
+ window->videoMode.height = height;
+ window->videoMode.redBits = fbconfig.redBits;
+ window->videoMode.greenBits = fbconfig.greenBits;
+ window->videoMode.blueBits = fbconfig.blueBits;
+ window->videoMode.refreshRate = _glfw.hints.refreshRate;
+
+ window->monitor = (_GLFWmonitor*) monitor;
+ window->resizable = wndconfig.resizable;
+ window->decorated = wndconfig.decorated;
+ window->autoIconify = wndconfig.autoIconify;
+ window->floating = wndconfig.floating;
+ window->focusOnShow = wndconfig.focusOnShow;
+ window->mousePassthrough = wndconfig.mousePassthrough;
+ window->cursorMode = GLFW_CURSOR_NORMAL;
+
+ window->doublebuffer = fbconfig.doublebuffer;
+
+ window->minwidth = GLFW_DONT_CARE;
+ window->minheight = GLFW_DONT_CARE;
+ window->maxwidth = GLFW_DONT_CARE;
+ window->maxheight = GLFW_DONT_CARE;
+ window->numer = GLFW_DONT_CARE;
+ window->denom = GLFW_DONT_CARE;
+
+ if (!_glfw.platform.createWindow(window, &wndconfig, &ctxconfig, &fbconfig))
+ {
+ glfwDestroyWindow((GLFWwindow*) window);
+ return NULL;
+ }
+
+ return (GLFWwindow*) window;
+}
+
+void glfwDefaultWindowHints(void)
+{
+ _GLFW_REQUIRE_INIT();
+
+ // The default is OpenGL with minimum version 1.0
+ memset(&_glfw.hints.context, 0, sizeof(_glfw.hints.context));
+ _glfw.hints.context.client = GLFW_OPENGL_API;
+ _glfw.hints.context.source = GLFW_NATIVE_CONTEXT_API;
+ _glfw.hints.context.major = 1;
+ _glfw.hints.context.minor = 0;
+
+ // The default is a focused, visible, resizable window with decorations
+ memset(&_glfw.hints.window, 0, sizeof(_glfw.hints.window));
+ _glfw.hints.window.resizable = GLFW_TRUE;
+ _glfw.hints.window.visible = GLFW_TRUE;
+ _glfw.hints.window.decorated = GLFW_TRUE;
+ _glfw.hints.window.focused = GLFW_TRUE;
+ _glfw.hints.window.autoIconify = GLFW_TRUE;
+ _glfw.hints.window.centerCursor = GLFW_TRUE;
+ _glfw.hints.window.focusOnShow = GLFW_TRUE;
+
+ // The default is 24 bits of color, 24 bits of depth and 8 bits of stencil,
+ // double buffered
+ memset(&_glfw.hints.framebuffer, 0, sizeof(_glfw.hints.framebuffer));
+ _glfw.hints.framebuffer.redBits = 8;
+ _glfw.hints.framebuffer.greenBits = 8;
+ _glfw.hints.framebuffer.blueBits = 8;
+ _glfw.hints.framebuffer.alphaBits = 8;
+ _glfw.hints.framebuffer.depthBits = 24;
+ _glfw.hints.framebuffer.stencilBits = 8;
+ _glfw.hints.framebuffer.doublebuffer = GLFW_TRUE;
+
+ // The default is to select the highest available refresh rate
+ _glfw.hints.refreshRate = GLFW_DONT_CARE;
+
+ // The default is to use full Retina resolution framebuffers
+ _glfw.hints.window.ns.retina = GLFW_TRUE;
+}
+
+GLFWAPI void glfwWindowHint(int hint, int value)
+{
+ _GLFW_REQUIRE_INIT();
+
+ switch (hint)
+ {
+ case GLFW_RED_BITS:
+ _glfw.hints.framebuffer.redBits = value;
+ return;
+ case GLFW_GREEN_BITS:
+ _glfw.hints.framebuffer.greenBits = value;
+ return;
+ case GLFW_BLUE_BITS:
+ _glfw.hints.framebuffer.blueBits = value;
+ return;
+ case GLFW_ALPHA_BITS:
+ _glfw.hints.framebuffer.alphaBits = value;
+ return;
+ case GLFW_DEPTH_BITS:
+ _glfw.hints.framebuffer.depthBits = value;
+ return;
+ case GLFW_STENCIL_BITS:
+ _glfw.hints.framebuffer.stencilBits = value;
+ return;
+ case GLFW_ACCUM_RED_BITS:
+ _glfw.hints.framebuffer.accumRedBits = value;
+ return;
+ case GLFW_ACCUM_GREEN_BITS:
+ _glfw.hints.framebuffer.accumGreenBits = value;
+ return;
+ case GLFW_ACCUM_BLUE_BITS:
+ _glfw.hints.framebuffer.accumBlueBits = value;
+ return;
+ case GLFW_ACCUM_ALPHA_BITS:
+ _glfw.hints.framebuffer.accumAlphaBits = value;
+ return;
+ case GLFW_AUX_BUFFERS:
+ _glfw.hints.framebuffer.auxBuffers = value;
+ return;
+ case GLFW_STEREO:
+ _glfw.hints.framebuffer.stereo = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_DOUBLEBUFFER:
+ _glfw.hints.framebuffer.doublebuffer = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_TRANSPARENT_FRAMEBUFFER:
+ _glfw.hints.framebuffer.transparent = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_SAMPLES:
+ _glfw.hints.framebuffer.samples = value;
+ return;
+ case GLFW_SRGB_CAPABLE:
+ _glfw.hints.framebuffer.sRGB = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_RESIZABLE:
+ _glfw.hints.window.resizable = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_DECORATED:
+ _glfw.hints.window.decorated = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_FOCUSED:
+ _glfw.hints.window.focused = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_AUTO_ICONIFY:
+ _glfw.hints.window.autoIconify = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_FLOATING:
+ _glfw.hints.window.floating = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_MAXIMIZED:
+ _glfw.hints.window.maximized = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_VISIBLE:
+ _glfw.hints.window.visible = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_COCOA_RETINA_FRAMEBUFFER:
+ _glfw.hints.window.ns.retina = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_WIN32_KEYBOARD_MENU:
+ _glfw.hints.window.win32.keymenu = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_COCOA_GRAPHICS_SWITCHING:
+ _glfw.hints.context.nsgl.offline = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_SCALE_TO_MONITOR:
+ _glfw.hints.window.scaleToMonitor = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_CENTER_CURSOR:
+ _glfw.hints.window.centerCursor = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_FOCUS_ON_SHOW:
+ _glfw.hints.window.focusOnShow = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_MOUSE_PASSTHROUGH:
+ _glfw.hints.window.mousePassthrough = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_CLIENT_API:
+ _glfw.hints.context.client = value;
+ return;
+ case GLFW_CONTEXT_CREATION_API:
+ _glfw.hints.context.source = value;
+ return;
+ case GLFW_CONTEXT_VERSION_MAJOR:
+ _glfw.hints.context.major = value;
+ return;
+ case GLFW_CONTEXT_VERSION_MINOR:
+ _glfw.hints.context.minor = value;
+ return;
+ case GLFW_CONTEXT_ROBUSTNESS:
+ _glfw.hints.context.robustness = value;
+ return;
+ case GLFW_OPENGL_FORWARD_COMPAT:
+ _glfw.hints.context.forward = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_CONTEXT_DEBUG:
+ _glfw.hints.context.debug = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_CONTEXT_NO_ERROR:
+ _glfw.hints.context.noerror = value ? GLFW_TRUE : GLFW_FALSE;
+ return;
+ case GLFW_OPENGL_PROFILE:
+ _glfw.hints.context.profile = value;
+ return;
+ case GLFW_CONTEXT_RELEASE_BEHAVIOR:
+ _glfw.hints.context.release = value;
+ return;
+ case GLFW_REFRESH_RATE:
+ _glfw.hints.refreshRate = value;
+ return;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid window hint 0x%08X", hint);
+}
+
+GLFWAPI void glfwWindowHintString(int hint, const char* value)
+{
+ assert(value != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ switch (hint)
+ {
+ case GLFW_COCOA_FRAME_NAME:
+ strncpy(_glfw.hints.window.ns.frameName, value,
+ sizeof(_glfw.hints.window.ns.frameName) - 1);
+ return;
+ case GLFW_X11_CLASS_NAME:
+ strncpy(_glfw.hints.window.x11.className, value,
+ sizeof(_glfw.hints.window.x11.className) - 1);
+ return;
+ case GLFW_X11_INSTANCE_NAME:
+ strncpy(_glfw.hints.window.x11.instanceName, value,
+ sizeof(_glfw.hints.window.x11.instanceName) - 1);
+ return;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid window hint string 0x%08X", hint);
+}
+
+GLFWAPI void glfwDestroyWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+
+ _GLFW_REQUIRE_INIT();
+
+ // Allow closing of NULL (to match the behavior of free)
+ if (window == NULL)
+ return;
+
+ // Clear all callbacks to avoid exposing a half torn-down window object
+ memset(&window->callbacks, 0, sizeof(window->callbacks));
+
+ // The window's context must not be current on another thread when the
+ // window is destroyed
+ if (window == _glfwPlatformGetTls(&_glfw.contextSlot))
+ glfwMakeContextCurrent(NULL);
+
+ _glfw.platform.destroyWindow(window);
+
+ // Unlink window from global linked list
+ {
+ _GLFWwindow** prev = &_glfw.windowListHead;
+
+ while (*prev != window)
+ prev = &((*prev)->next);
+
+ *prev = window->next;
+ }
+
+ _glfw_free(window);
+}
+
+GLFWAPI int glfwWindowShouldClose(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+ return window->shouldClose;
+}
+
+GLFWAPI void glfwSetWindowShouldClose(GLFWwindow* handle, int value)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ window->shouldClose = value;
+}
+
+GLFWAPI void glfwSetWindowTitle(GLFWwindow* handle, const char* title)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+ assert(title != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.setWindowTitle(window, title);
+}
+
+GLFWAPI void glfwSetWindowIcon(GLFWwindow* handle,
+ int count, const GLFWimage* images)
+{
+ int i;
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+
+ assert(window != NULL);
+ assert(count >= 0);
+ assert(count == 0 || images != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (count < 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid image count for window icon");
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ {
+ assert(images[i].pixels != NULL);
+
+ if (images[i].width <= 0 || images[i].height <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid image dimensions for window icon");
+ return;
+ }
+ }
+
+ _glfw.platform.setWindowIcon(window, count, images);
+}
+
+GLFWAPI void glfwGetWindowPos(GLFWwindow* handle, int* xpos, int* ypos)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (xpos)
+ *xpos = 0;
+ if (ypos)
+ *ypos = 0;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getWindowPos(window, xpos, ypos);
+}
+
+GLFWAPI void glfwSetWindowPos(GLFWwindow* handle, int xpos, int ypos)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->monitor)
+ return;
+
+ _glfw.platform.setWindowPos(window, xpos, ypos);
+}
+
+GLFWAPI void glfwGetWindowSize(GLFWwindow* handle, int* width, int* height)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (width)
+ *width = 0;
+ if (height)
+ *height = 0;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getWindowSize(window, width, height);
+}
+
+GLFWAPI void glfwSetWindowSize(GLFWwindow* handle, int width, int height)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+ assert(width >= 0);
+ assert(height >= 0);
+
+ _GLFW_REQUIRE_INIT();
+
+ window->videoMode.width = width;
+ window->videoMode.height = height;
+
+ _glfw.platform.setWindowSize(window, width, height);
+}
+
+GLFWAPI void glfwSetWindowSizeLimits(GLFWwindow* handle,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (minwidth != GLFW_DONT_CARE && minheight != GLFW_DONT_CARE)
+ {
+ if (minwidth < 0 || minheight < 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid window minimum size %ix%i",
+ minwidth, minheight);
+ return;
+ }
+ }
+
+ if (maxwidth != GLFW_DONT_CARE && maxheight != GLFW_DONT_CARE)
+ {
+ if (maxwidth < 0 || maxheight < 0 ||
+ maxwidth < minwidth || maxheight < minheight)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid window maximum size %ix%i",
+ maxwidth, maxheight);
+ return;
+ }
+ }
+
+ window->minwidth = minwidth;
+ window->minheight = minheight;
+ window->maxwidth = maxwidth;
+ window->maxheight = maxheight;
+
+ if (window->monitor || !window->resizable)
+ return;
+
+ _glfw.platform.setWindowSizeLimits(window,
+ minwidth, minheight,
+ maxwidth, maxheight);
+}
+
+GLFWAPI void glfwSetWindowAspectRatio(GLFWwindow* handle, int numer, int denom)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+ assert(numer != 0);
+ assert(denom != 0);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (numer != GLFW_DONT_CARE && denom != GLFW_DONT_CARE)
+ {
+ if (numer <= 0 || denom <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid window aspect ratio %i:%i",
+ numer, denom);
+ return;
+ }
+ }
+
+ window->numer = numer;
+ window->denom = denom;
+
+ if (window->monitor || !window->resizable)
+ return;
+
+ _glfw.platform.setWindowAspectRatio(window, numer, denom);
+}
+
+GLFWAPI void glfwGetFramebufferSize(GLFWwindow* handle, int* width, int* height)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (width)
+ *width = 0;
+ if (height)
+ *height = 0;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getFramebufferSize(window, width, height);
+}
+
+GLFWAPI void glfwGetWindowFrameSize(GLFWwindow* handle,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (left)
+ *left = 0;
+ if (top)
+ *top = 0;
+ if (right)
+ *right = 0;
+ if (bottom)
+ *bottom = 0;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getWindowFrameSize(window, left, top, right, bottom);
+}
+
+GLFWAPI void glfwGetWindowContentScale(GLFWwindow* handle,
+ float* xscale, float* yscale)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ if (xscale)
+ *xscale = 0.f;
+ if (yscale)
+ *yscale = 0.f;
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.getWindowContentScale(window, xscale, yscale);
+}
+
+GLFWAPI float glfwGetWindowOpacity(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(1.f);
+ return _glfw.platform.getWindowOpacity(window);
+}
+
+GLFWAPI void glfwSetWindowOpacity(GLFWwindow* handle, float opacity)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+ assert(opacity == opacity);
+ assert(opacity >= 0.f);
+ assert(opacity <= 1.f);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (opacity != opacity || opacity < 0.f || opacity > 1.f)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid window opacity %f", opacity);
+ return;
+ }
+
+ _glfw.platform.setWindowOpacity(window, opacity);
+}
+
+GLFWAPI void glfwIconifyWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.iconifyWindow(window);
+}
+
+GLFWAPI void glfwRestoreWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.restoreWindow(window);
+}
+
+GLFWAPI void glfwMaximizeWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->monitor)
+ return;
+
+ _glfw.platform.maximizeWindow(window);
+}
+
+GLFWAPI void glfwShowWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->monitor)
+ return;
+
+ _glfw.platform.showWindow(window);
+
+ if (window->focusOnShow)
+ _glfw.platform.focusWindow(window);
+}
+
+GLFWAPI void glfwRequestWindowAttention(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ _glfw.platform.requestWindowAttention(window);
+}
+
+GLFWAPI void glfwHideWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (window->monitor)
+ return;
+
+ _glfw.platform.hideWindow(window);
+}
+
+GLFWAPI void glfwFocusWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ _glfw.platform.focusWindow(window);
+}
+
+GLFWAPI int glfwGetWindowAttrib(GLFWwindow* handle, int attrib)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(0);
+
+ switch (attrib)
+ {
+ case GLFW_FOCUSED:
+ return _glfw.platform.windowFocused(window);
+ case GLFW_ICONIFIED:
+ return _glfw.platform.windowIconified(window);
+ case GLFW_VISIBLE:
+ return _glfw.platform.windowVisible(window);
+ case GLFW_MAXIMIZED:
+ return _glfw.platform.windowMaximized(window);
+ case GLFW_HOVERED:
+ return _glfw.platform.windowHovered(window);
+ case GLFW_FOCUS_ON_SHOW:
+ return window->focusOnShow;
+ case GLFW_MOUSE_PASSTHROUGH:
+ return window->mousePassthrough;
+ case GLFW_TRANSPARENT_FRAMEBUFFER:
+ return _glfw.platform.framebufferTransparent(window);
+ case GLFW_RESIZABLE:
+ return window->resizable;
+ case GLFW_DECORATED:
+ return window->decorated;
+ case GLFW_FLOATING:
+ return window->floating;
+ case GLFW_AUTO_ICONIFY:
+ return window->autoIconify;
+ case GLFW_DOUBLEBUFFER:
+ return window->doublebuffer;
+ case GLFW_CLIENT_API:
+ return window->context.client;
+ case GLFW_CONTEXT_CREATION_API:
+ return window->context.source;
+ case GLFW_CONTEXT_VERSION_MAJOR:
+ return window->context.major;
+ case GLFW_CONTEXT_VERSION_MINOR:
+ return window->context.minor;
+ case GLFW_CONTEXT_REVISION:
+ return window->context.revision;
+ case GLFW_CONTEXT_ROBUSTNESS:
+ return window->context.robustness;
+ case GLFW_OPENGL_FORWARD_COMPAT:
+ return window->context.forward;
+ case GLFW_CONTEXT_DEBUG:
+ return window->context.debug;
+ case GLFW_OPENGL_PROFILE:
+ return window->context.profile;
+ case GLFW_CONTEXT_RELEASE_BEHAVIOR:
+ return window->context.release;
+ case GLFW_CONTEXT_NO_ERROR:
+ return window->context.noerror;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid window attribute 0x%08X", attrib);
+ return 0;
+}
+
+GLFWAPI void glfwSetWindowAttrib(GLFWwindow* handle, int attrib, int value)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+
+ value = value ? GLFW_TRUE : GLFW_FALSE;
+
+ switch (attrib)
+ {
+ case GLFW_AUTO_ICONIFY:
+ window->autoIconify = value;
+ return;
+
+ case GLFW_RESIZABLE:
+ window->resizable = value;
+ if (!window->monitor)
+ _glfw.platform.setWindowResizable(window, value);
+ return;
+
+ case GLFW_DECORATED:
+ window->decorated = value;
+ if (!window->monitor)
+ _glfw.platform.setWindowDecorated(window, value);
+ return;
+
+ case GLFW_FLOATING:
+ window->floating = value;
+ if (!window->monitor)
+ _glfw.platform.setWindowFloating(window, value);
+ return;
+
+ case GLFW_FOCUS_ON_SHOW:
+ window->focusOnShow = value;
+ return;
+
+ case GLFW_MOUSE_PASSTHROUGH:
+ window->mousePassthrough = value;
+ _glfw.platform.setWindowMousePassthrough(window, value);
+ return;
+ }
+
+ _glfwInputError(GLFW_INVALID_ENUM, "Invalid window attribute 0x%08X", attrib);
+}
+
+GLFWAPI GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return (GLFWmonitor*) window->monitor;
+}
+
+GLFWAPI void glfwSetWindowMonitor(GLFWwindow* wh,
+ GLFWmonitor* mh,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ _GLFWwindow* window = (_GLFWwindow*) wh;
+ _GLFWmonitor* monitor = (_GLFWmonitor*) mh;
+ assert(window != NULL);
+ assert(width >= 0);
+ assert(height >= 0);
+
+ _GLFW_REQUIRE_INIT();
+
+ if (width <= 0 || height <= 0)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid window size %ix%i",
+ width, height);
+ return;
+ }
+
+ if (refreshRate < 0 && refreshRate != GLFW_DONT_CARE)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Invalid refresh rate %i",
+ refreshRate);
+ return;
+ }
+
+ window->videoMode.width = width;
+ window->videoMode.height = height;
+ window->videoMode.refreshRate = refreshRate;
+
+ _glfw.platform.setWindowMonitor(window, monitor,
+ xpos, ypos, width, height,
+ refreshRate);
+}
+
+GLFWAPI void glfwSetWindowUserPointer(GLFWwindow* handle, void* pointer)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT();
+ window->userPointer = pointer;
+}
+
+GLFWAPI void* glfwGetWindowUserPointer(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return window->userPointer;
+}
+
+GLFWAPI GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* handle,
+ GLFWwindowposfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowposfun, window->callbacks.pos, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* handle,
+ GLFWwindowsizefun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowsizefun, window->callbacks.size, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* handle,
+ GLFWwindowclosefun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowclosefun, window->callbacks.close, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* handle,
+ GLFWwindowrefreshfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowrefreshfun, window->callbacks.refresh, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* handle,
+ GLFWwindowfocusfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowfocusfun, window->callbacks.focus, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* handle,
+ GLFWwindowiconifyfun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowiconifyfun, window->callbacks.iconify, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowmaximizefun glfwSetWindowMaximizeCallback(GLFWwindow* handle,
+ GLFWwindowmaximizefun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowmaximizefun, window->callbacks.maximize, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* handle,
+ GLFWframebuffersizefun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWframebuffersizefun, window->callbacks.fbsize, cbfun);
+ return cbfun;
+}
+
+GLFWAPI GLFWwindowcontentscalefun glfwSetWindowContentScaleCallback(GLFWwindow* handle,
+ GLFWwindowcontentscalefun cbfun)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ assert(window != NULL);
+
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ _GLFW_SWAP(GLFWwindowcontentscalefun, window->callbacks.scale, cbfun);
+ return cbfun;
+}
+
+GLFWAPI void glfwPollEvents(void)
+{
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.pollEvents();
+}
+
+GLFWAPI void glfwWaitEvents(void)
+{
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.waitEvents();
+}
+
+GLFWAPI void glfwWaitEventsTimeout(double timeout)
+{
+ _GLFW_REQUIRE_INIT();
+ assert(timeout == timeout);
+ assert(timeout >= 0.0);
+ assert(timeout <= DBL_MAX);
+
+ if (timeout != timeout || timeout < 0.0 || timeout > DBL_MAX)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid time %f", timeout);
+ return;
+ }
+
+ _glfw.platform.waitEventsTimeout(timeout);
+}
+
+GLFWAPI void glfwPostEmptyEvent(void)
+{
+ _GLFW_REQUIRE_INIT();
+ _glfw.platform.postEmptyEvent();
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/wl_init.c b/chromium/third_party/dawn/third_party/glfw/src/wl_init.c
new file mode 100644
index 00000000000..c232ce79720
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/wl_init.c
@@ -0,0 +1,779 @@
+//========================================================================
+// GLFW 3.4 Wayland - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <linux/input.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/timerfd.h>
+#include <unistd.h>
+#include <time.h>
+
+#include "wayland-client-protocol.h"
+#include "wayland-xdg-shell-client-protocol.h"
+#include "wayland-xdg-decoration-client-protocol.h"
+#include "wayland-viewporter-client-protocol.h"
+#include "wayland-relative-pointer-unstable-v1-client-protocol.h"
+#include "wayland-pointer-constraints-unstable-v1-client-protocol.h"
+#include "wayland-idle-inhibit-unstable-v1-client-protocol.h"
+
+// NOTE: Versions of wayland-scanner prior to 1.17.91 named every global array of
+// wl_interface pointers 'types', making it impossible to combine several unmodified
+// private-code files into a single compilation unit
+// HACK: We override this name with a macro for each file, allowing them to coexist
+
+#define types _glfw_wayland_types
+#include "wayland-client-protocol-code.h"
+#undef types
+
+#define types _glfw_xdg_shell_types
+#include "wayland-xdg-shell-client-protocol-code.h"
+#undef types
+
+#define types _glfw_xdg_decoration_types
+#include "wayland-xdg-decoration-client-protocol-code.h"
+#undef types
+
+#define types _glfw_viewporter_types
+#include "wayland-viewporter-client-protocol-code.h"
+#undef types
+
+#define types _glfw_relative_pointer_types
+#include "wayland-relative-pointer-unstable-v1-client-protocol-code.h"
+#undef types
+
+#define types _glfw_pointer_constraints_types
+#include "wayland-pointer-constraints-unstable-v1-client-protocol-code.h"
+#undef types
+
+#define types _glfw_idle_inhibit_types
+#include "wayland-idle-inhibit-unstable-v1-client-protocol-code.h"
+#undef types
+
+static void wmBaseHandlePing(void* userData,
+ struct xdg_wm_base* wmBase,
+ uint32_t serial)
+{
+ xdg_wm_base_pong(wmBase, serial);
+}
+
+static const struct xdg_wm_base_listener wmBaseListener =
+{
+ wmBaseHandlePing
+};
+
+static void registryHandleGlobal(void* userData,
+ struct wl_registry* registry,
+ uint32_t name,
+ const char* interface,
+ uint32_t version)
+{
+ if (strcmp(interface, "wl_compositor") == 0)
+ {
+ _glfw.wl.compositorVersion = _glfw_min(3, version);
+ _glfw.wl.compositor =
+ wl_registry_bind(registry, name, &wl_compositor_interface,
+ _glfw.wl.compositorVersion);
+ }
+ else if (strcmp(interface, "wl_subcompositor") == 0)
+ {
+ _glfw.wl.subcompositor =
+ wl_registry_bind(registry, name, &wl_subcompositor_interface, 1);
+ }
+ else if (strcmp(interface, "wl_shm") == 0)
+ {
+ _glfw.wl.shm =
+ wl_registry_bind(registry, name, &wl_shm_interface, 1);
+ }
+ else if (strcmp(interface, "wl_output") == 0)
+ {
+ _glfwAddOutputWayland(name, version);
+ }
+ else if (strcmp(interface, "wl_seat") == 0)
+ {
+ if (!_glfw.wl.seat)
+ {
+ _glfw.wl.seatVersion = _glfw_min(4, version);
+ _glfw.wl.seat =
+ wl_registry_bind(registry, name, &wl_seat_interface,
+ _glfw.wl.seatVersion);
+ _glfwAddSeatListenerWayland(_glfw.wl.seat);
+ }
+ }
+ else if (strcmp(interface, "wl_data_device_manager") == 0)
+ {
+ if (!_glfw.wl.dataDeviceManager)
+ {
+ _glfw.wl.dataDeviceManager =
+ wl_registry_bind(registry, name,
+ &wl_data_device_manager_interface, 1);
+ }
+ }
+ else if (strcmp(interface, "xdg_wm_base") == 0)
+ {
+ _glfw.wl.wmBase =
+ wl_registry_bind(registry, name, &xdg_wm_base_interface, 1);
+ xdg_wm_base_add_listener(_glfw.wl.wmBase, &wmBaseListener, NULL);
+ }
+ else if (strcmp(interface, "zxdg_decoration_manager_v1") == 0)
+ {
+ _glfw.wl.decorationManager =
+ wl_registry_bind(registry, name,
+ &zxdg_decoration_manager_v1_interface,
+ 1);
+ }
+ else if (strcmp(interface, "wp_viewporter") == 0)
+ {
+ _glfw.wl.viewporter =
+ wl_registry_bind(registry, name, &wp_viewporter_interface, 1);
+ }
+ else if (strcmp(interface, "zwp_relative_pointer_manager_v1") == 0)
+ {
+ _glfw.wl.relativePointerManager =
+ wl_registry_bind(registry, name,
+ &zwp_relative_pointer_manager_v1_interface,
+ 1);
+ }
+ else if (strcmp(interface, "zwp_pointer_constraints_v1") == 0)
+ {
+ _glfw.wl.pointerConstraints =
+ wl_registry_bind(registry, name,
+ &zwp_pointer_constraints_v1_interface,
+ 1);
+ }
+ else if (strcmp(interface, "zwp_idle_inhibit_manager_v1") == 0)
+ {
+ _glfw.wl.idleInhibitManager =
+ wl_registry_bind(registry, name,
+ &zwp_idle_inhibit_manager_v1_interface,
+ 1);
+ }
+}
+
+static void registryHandleGlobalRemove(void* userData,
+ struct wl_registry* registry,
+ uint32_t name)
+{
+ _GLFWmonitor* monitor;
+
+ for (int i = 0; i < _glfw.monitorCount; ++i)
+ {
+ monitor = _glfw.monitors[i];
+ if (monitor->wl.name == name)
+ {
+ _glfwInputMonitor(monitor, GLFW_DISCONNECTED, 0);
+ return;
+ }
+ }
+}
+
+
+static const struct wl_registry_listener registryListener =
+{
+ registryHandleGlobal,
+ registryHandleGlobalRemove
+};
+
+// Create key code translation tables
+//
+static void createKeyTables(void)
+{
+ memset(_glfw.wl.keycodes, -1, sizeof(_glfw.wl.keycodes));
+ memset(_glfw.wl.scancodes, -1, sizeof(_glfw.wl.scancodes));
+
+ _glfw.wl.keycodes[KEY_GRAVE] = GLFW_KEY_GRAVE_ACCENT;
+ _glfw.wl.keycodes[KEY_1] = GLFW_KEY_1;
+ _glfw.wl.keycodes[KEY_2] = GLFW_KEY_2;
+ _glfw.wl.keycodes[KEY_3] = GLFW_KEY_3;
+ _glfw.wl.keycodes[KEY_4] = GLFW_KEY_4;
+ _glfw.wl.keycodes[KEY_5] = GLFW_KEY_5;
+ _glfw.wl.keycodes[KEY_6] = GLFW_KEY_6;
+ _glfw.wl.keycodes[KEY_7] = GLFW_KEY_7;
+ _glfw.wl.keycodes[KEY_8] = GLFW_KEY_8;
+ _glfw.wl.keycodes[KEY_9] = GLFW_KEY_9;
+ _glfw.wl.keycodes[KEY_0] = GLFW_KEY_0;
+ _glfw.wl.keycodes[KEY_SPACE] = GLFW_KEY_SPACE;
+ _glfw.wl.keycodes[KEY_MINUS] = GLFW_KEY_MINUS;
+ _glfw.wl.keycodes[KEY_EQUAL] = GLFW_KEY_EQUAL;
+ _glfw.wl.keycodes[KEY_Q] = GLFW_KEY_Q;
+ _glfw.wl.keycodes[KEY_W] = GLFW_KEY_W;
+ _glfw.wl.keycodes[KEY_E] = GLFW_KEY_E;
+ _glfw.wl.keycodes[KEY_R] = GLFW_KEY_R;
+ _glfw.wl.keycodes[KEY_T] = GLFW_KEY_T;
+ _glfw.wl.keycodes[KEY_Y] = GLFW_KEY_Y;
+ _glfw.wl.keycodes[KEY_U] = GLFW_KEY_U;
+ _glfw.wl.keycodes[KEY_I] = GLFW_KEY_I;
+ _glfw.wl.keycodes[KEY_O] = GLFW_KEY_O;
+ _glfw.wl.keycodes[KEY_P] = GLFW_KEY_P;
+ _glfw.wl.keycodes[KEY_LEFTBRACE] = GLFW_KEY_LEFT_BRACKET;
+ _glfw.wl.keycodes[KEY_RIGHTBRACE] = GLFW_KEY_RIGHT_BRACKET;
+ _glfw.wl.keycodes[KEY_A] = GLFW_KEY_A;
+ _glfw.wl.keycodes[KEY_S] = GLFW_KEY_S;
+ _glfw.wl.keycodes[KEY_D] = GLFW_KEY_D;
+ _glfw.wl.keycodes[KEY_F] = GLFW_KEY_F;
+ _glfw.wl.keycodes[KEY_G] = GLFW_KEY_G;
+ _glfw.wl.keycodes[KEY_H] = GLFW_KEY_H;
+ _glfw.wl.keycodes[KEY_J] = GLFW_KEY_J;
+ _glfw.wl.keycodes[KEY_K] = GLFW_KEY_K;
+ _glfw.wl.keycodes[KEY_L] = GLFW_KEY_L;
+ _glfw.wl.keycodes[KEY_SEMICOLON] = GLFW_KEY_SEMICOLON;
+ _glfw.wl.keycodes[KEY_APOSTROPHE] = GLFW_KEY_APOSTROPHE;
+ _glfw.wl.keycodes[KEY_Z] = GLFW_KEY_Z;
+ _glfw.wl.keycodes[KEY_X] = GLFW_KEY_X;
+ _glfw.wl.keycodes[KEY_C] = GLFW_KEY_C;
+ _glfw.wl.keycodes[KEY_V] = GLFW_KEY_V;
+ _glfw.wl.keycodes[KEY_B] = GLFW_KEY_B;
+ _glfw.wl.keycodes[KEY_N] = GLFW_KEY_N;
+ _glfw.wl.keycodes[KEY_M] = GLFW_KEY_M;
+ _glfw.wl.keycodes[KEY_COMMA] = GLFW_KEY_COMMA;
+ _glfw.wl.keycodes[KEY_DOT] = GLFW_KEY_PERIOD;
+ _glfw.wl.keycodes[KEY_SLASH] = GLFW_KEY_SLASH;
+ _glfw.wl.keycodes[KEY_BACKSLASH] = GLFW_KEY_BACKSLASH;
+ _glfw.wl.keycodes[KEY_ESC] = GLFW_KEY_ESCAPE;
+ _glfw.wl.keycodes[KEY_TAB] = GLFW_KEY_TAB;
+ _glfw.wl.keycodes[KEY_LEFTSHIFT] = GLFW_KEY_LEFT_SHIFT;
+ _glfw.wl.keycodes[KEY_RIGHTSHIFT] = GLFW_KEY_RIGHT_SHIFT;
+ _glfw.wl.keycodes[KEY_LEFTCTRL] = GLFW_KEY_LEFT_CONTROL;
+ _glfw.wl.keycodes[KEY_RIGHTCTRL] = GLFW_KEY_RIGHT_CONTROL;
+ _glfw.wl.keycodes[KEY_LEFTALT] = GLFW_KEY_LEFT_ALT;
+ _glfw.wl.keycodes[KEY_RIGHTALT] = GLFW_KEY_RIGHT_ALT;
+ _glfw.wl.keycodes[KEY_LEFTMETA] = GLFW_KEY_LEFT_SUPER;
+ _glfw.wl.keycodes[KEY_RIGHTMETA] = GLFW_KEY_RIGHT_SUPER;
+ _glfw.wl.keycodes[KEY_COMPOSE] = GLFW_KEY_MENU;
+ _glfw.wl.keycodes[KEY_NUMLOCK] = GLFW_KEY_NUM_LOCK;
+ _glfw.wl.keycodes[KEY_CAPSLOCK] = GLFW_KEY_CAPS_LOCK;
+ _glfw.wl.keycodes[KEY_PRINT] = GLFW_KEY_PRINT_SCREEN;
+ _glfw.wl.keycodes[KEY_SCROLLLOCK] = GLFW_KEY_SCROLL_LOCK;
+ _glfw.wl.keycodes[KEY_PAUSE] = GLFW_KEY_PAUSE;
+ _glfw.wl.keycodes[KEY_DELETE] = GLFW_KEY_DELETE;
+ _glfw.wl.keycodes[KEY_BACKSPACE] = GLFW_KEY_BACKSPACE;
+ _glfw.wl.keycodes[KEY_ENTER] = GLFW_KEY_ENTER;
+ _glfw.wl.keycodes[KEY_HOME] = GLFW_KEY_HOME;
+ _glfw.wl.keycodes[KEY_END] = GLFW_KEY_END;
+ _glfw.wl.keycodes[KEY_PAGEUP] = GLFW_KEY_PAGE_UP;
+ _glfw.wl.keycodes[KEY_PAGEDOWN] = GLFW_KEY_PAGE_DOWN;
+ _glfw.wl.keycodes[KEY_INSERT] = GLFW_KEY_INSERT;
+ _glfw.wl.keycodes[KEY_LEFT] = GLFW_KEY_LEFT;
+ _glfw.wl.keycodes[KEY_RIGHT] = GLFW_KEY_RIGHT;
+ _glfw.wl.keycodes[KEY_DOWN] = GLFW_KEY_DOWN;
+ _glfw.wl.keycodes[KEY_UP] = GLFW_KEY_UP;
+ _glfw.wl.keycodes[KEY_F1] = GLFW_KEY_F1;
+ _glfw.wl.keycodes[KEY_F2] = GLFW_KEY_F2;
+ _glfw.wl.keycodes[KEY_F3] = GLFW_KEY_F3;
+ _glfw.wl.keycodes[KEY_F4] = GLFW_KEY_F4;
+ _glfw.wl.keycodes[KEY_F5] = GLFW_KEY_F5;
+ _glfw.wl.keycodes[KEY_F6] = GLFW_KEY_F6;
+ _glfw.wl.keycodes[KEY_F7] = GLFW_KEY_F7;
+ _glfw.wl.keycodes[KEY_F8] = GLFW_KEY_F8;
+ _glfw.wl.keycodes[KEY_F9] = GLFW_KEY_F9;
+ _glfw.wl.keycodes[KEY_F10] = GLFW_KEY_F10;
+ _glfw.wl.keycodes[KEY_F11] = GLFW_KEY_F11;
+ _glfw.wl.keycodes[KEY_F12] = GLFW_KEY_F12;
+ _glfw.wl.keycodes[KEY_F13] = GLFW_KEY_F13;
+ _glfw.wl.keycodes[KEY_F14] = GLFW_KEY_F14;
+ _glfw.wl.keycodes[KEY_F15] = GLFW_KEY_F15;
+ _glfw.wl.keycodes[KEY_F16] = GLFW_KEY_F16;
+ _glfw.wl.keycodes[KEY_F17] = GLFW_KEY_F17;
+ _glfw.wl.keycodes[KEY_F18] = GLFW_KEY_F18;
+ _glfw.wl.keycodes[KEY_F19] = GLFW_KEY_F19;
+ _glfw.wl.keycodes[KEY_F20] = GLFW_KEY_F20;
+ _glfw.wl.keycodes[KEY_F21] = GLFW_KEY_F21;
+ _glfw.wl.keycodes[KEY_F22] = GLFW_KEY_F22;
+ _glfw.wl.keycodes[KEY_F23] = GLFW_KEY_F23;
+ _glfw.wl.keycodes[KEY_F24] = GLFW_KEY_F24;
+ _glfw.wl.keycodes[KEY_KPSLASH] = GLFW_KEY_KP_DIVIDE;
+ _glfw.wl.keycodes[KEY_KPASTERISK] = GLFW_KEY_KP_MULTIPLY;
+ _glfw.wl.keycodes[KEY_KPMINUS] = GLFW_KEY_KP_SUBTRACT;
+ _glfw.wl.keycodes[KEY_KPPLUS] = GLFW_KEY_KP_ADD;
+ _glfw.wl.keycodes[KEY_KP0] = GLFW_KEY_KP_0;
+ _glfw.wl.keycodes[KEY_KP1] = GLFW_KEY_KP_1;
+ _glfw.wl.keycodes[KEY_KP2] = GLFW_KEY_KP_2;
+ _glfw.wl.keycodes[KEY_KP3] = GLFW_KEY_KP_3;
+ _glfw.wl.keycodes[KEY_KP4] = GLFW_KEY_KP_4;
+ _glfw.wl.keycodes[KEY_KP5] = GLFW_KEY_KP_5;
+ _glfw.wl.keycodes[KEY_KP6] = GLFW_KEY_KP_6;
+ _glfw.wl.keycodes[KEY_KP7] = GLFW_KEY_KP_7;
+ _glfw.wl.keycodes[KEY_KP8] = GLFW_KEY_KP_8;
+ _glfw.wl.keycodes[KEY_KP9] = GLFW_KEY_KP_9;
+ _glfw.wl.keycodes[KEY_KPDOT] = GLFW_KEY_KP_DECIMAL;
+ _glfw.wl.keycodes[KEY_KPEQUAL] = GLFW_KEY_KP_EQUAL;
+ _glfw.wl.keycodes[KEY_KPENTER] = GLFW_KEY_KP_ENTER;
+ _glfw.wl.keycodes[KEY_102ND] = GLFW_KEY_WORLD_2;
+
+ for (int scancode = 0; scancode < 256; scancode++)
+ {
+ if (_glfw.wl.keycodes[scancode] > 0)
+ _glfw.wl.scancodes[_glfw.wl.keycodes[scancode]] = scancode;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwConnectWayland(int platformID, _GLFWplatform* platform)
+{
+ const _GLFWplatform wayland =
+ {
+ GLFW_PLATFORM_WAYLAND,
+ _glfwInitWayland,
+ _glfwTerminateWayland,
+ _glfwGetCursorPosWayland,
+ _glfwSetCursorPosWayland,
+ _glfwSetCursorModeWayland,
+ _glfwSetRawMouseMotionWayland,
+ _glfwRawMouseMotionSupportedWayland,
+ _glfwCreateCursorWayland,
+ _glfwCreateStandardCursorWayland,
+ _glfwDestroyCursorWayland,
+ _glfwSetCursorWayland,
+ _glfwGetScancodeNameWayland,
+ _glfwGetKeyScancodeWayland,
+ _glfwSetClipboardStringWayland,
+ _glfwGetClipboardStringWayland,
+#if defined(__linux__)
+ _glfwInitJoysticksLinux,
+ _glfwTerminateJoysticksLinux,
+ _glfwPollJoystickLinux,
+ _glfwGetMappingNameLinux,
+ _glfwUpdateGamepadGUIDLinux,
+#else
+ _glfwInitJoysticksNull,
+ _glfwTerminateJoysticksNull,
+ _glfwPollJoystickNull,
+ _glfwGetMappingNameNull,
+ _glfwUpdateGamepadGUIDNull,
+#endif
+ _glfwFreeMonitorWayland,
+ _glfwGetMonitorPosWayland,
+ _glfwGetMonitorContentScaleWayland,
+ _glfwGetMonitorWorkareaWayland,
+ _glfwGetVideoModesWayland,
+ _glfwGetVideoModeWayland,
+ _glfwGetGammaRampWayland,
+ _glfwSetGammaRampWayland,
+ _glfwCreateWindowWayland,
+ _glfwDestroyWindowWayland,
+ _glfwSetWindowTitleWayland,
+ _glfwSetWindowIconWayland,
+ _glfwGetWindowPosWayland,
+ _glfwSetWindowPosWayland,
+ _glfwGetWindowSizeWayland,
+ _glfwSetWindowSizeWayland,
+ _glfwSetWindowSizeLimitsWayland,
+ _glfwSetWindowAspectRatioWayland,
+ _glfwGetFramebufferSizeWayland,
+ _glfwGetWindowFrameSizeWayland,
+ _glfwGetWindowContentScaleWayland,
+ _glfwIconifyWindowWayland,
+ _glfwRestoreWindowWayland,
+ _glfwMaximizeWindowWayland,
+ _glfwShowWindowWayland,
+ _glfwHideWindowWayland,
+ _glfwRequestWindowAttentionWayland,
+ _glfwFocusWindowWayland,
+ _glfwSetWindowMonitorWayland,
+ _glfwWindowFocusedWayland,
+ _glfwWindowIconifiedWayland,
+ _glfwWindowVisibleWayland,
+ _glfwWindowMaximizedWayland,
+ _glfwWindowHoveredWayland,
+ _glfwFramebufferTransparentWayland,
+ _glfwGetWindowOpacityWayland,
+ _glfwSetWindowResizableWayland,
+ _glfwSetWindowDecoratedWayland,
+ _glfwSetWindowFloatingWayland,
+ _glfwSetWindowOpacityWayland,
+ _glfwSetWindowMousePassthroughWayland,
+ _glfwPollEventsWayland,
+ _glfwWaitEventsWayland,
+ _glfwWaitEventsTimeoutWayland,
+ _glfwPostEmptyEventWayland,
+ _glfwGetEGLPlatformWayland,
+ _glfwGetEGLNativeDisplayWayland,
+ _glfwGetEGLNativeWindowWayland,
+ _glfwGetRequiredInstanceExtensionsWayland,
+ _glfwGetPhysicalDevicePresentationSupportWayland,
+ _glfwCreateWindowSurfaceWayland,
+ };
+
+ void* module = _glfwPlatformLoadModule("libwayland-client.so.0");
+ if (!module)
+ {
+ if (platformID == GLFW_PLATFORM_WAYLAND)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libwayland-client");
+ }
+
+ return GLFW_FALSE;
+ }
+
+ PFN_wl_display_connect wl_display_connect = (PFN_wl_display_connect)
+ _glfwPlatformGetModuleSymbol(module, "wl_display_connect");
+ if (!wl_display_connect)
+ {
+ if (platformID == GLFW_PLATFORM_WAYLAND)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libwayland-client entry point");
+ }
+
+ _glfwPlatformFreeModule(module);
+ return GLFW_FALSE;
+ }
+
+ struct wl_display* display = wl_display_connect(NULL);
+ if (!display)
+ {
+ if (platformID == GLFW_PLATFORM_WAYLAND)
+ _glfwInputError(GLFW_PLATFORM_ERROR, "Wayland: Failed to connect to display");
+
+ _glfwPlatformFreeModule(module);
+ return GLFW_FALSE;
+ }
+
+ _glfw.wl.display = display;
+ _glfw.wl.client.handle = module;
+
+ *platform = wayland;
+ return GLFW_TRUE;
+}
+
+int _glfwInitWayland(void)
+{
+ const char* cursorTheme;
+ const char* cursorSizeStr;
+ char* cursorSizeEnd;
+ long cursorSizeLong;
+ int cursorSize;
+
+ _glfw.wl.client.display_flush = (PFN_wl_display_flush)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_flush");
+ _glfw.wl.client.display_cancel_read = (PFN_wl_display_cancel_read)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_cancel_read");
+ _glfw.wl.client.display_dispatch_pending = (PFN_wl_display_dispatch_pending)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_dispatch_pending");
+ _glfw.wl.client.display_read_events = (PFN_wl_display_read_events)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_read_events");
+ _glfw.wl.client.display_disconnect = (PFN_wl_display_disconnect)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_disconnect");
+ _glfw.wl.client.display_roundtrip = (PFN_wl_display_roundtrip)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_roundtrip");
+ _glfw.wl.client.display_get_fd = (PFN_wl_display_get_fd)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_get_fd");
+ _glfw.wl.client.display_prepare_read = (PFN_wl_display_prepare_read)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_display_prepare_read");
+ _glfw.wl.client.proxy_marshal = (PFN_wl_proxy_marshal)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_marshal");
+ _glfw.wl.client.proxy_add_listener = (PFN_wl_proxy_add_listener)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_add_listener");
+ _glfw.wl.client.proxy_destroy = (PFN_wl_proxy_destroy)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_destroy");
+ _glfw.wl.client.proxy_marshal_constructor = (PFN_wl_proxy_marshal_constructor)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_marshal_constructor");
+ _glfw.wl.client.proxy_marshal_constructor_versioned = (PFN_wl_proxy_marshal_constructor_versioned)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_marshal_constructor_versioned");
+ _glfw.wl.client.proxy_get_user_data = (PFN_wl_proxy_get_user_data)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_get_user_data");
+ _glfw.wl.client.proxy_set_user_data = (PFN_wl_proxy_set_user_data)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_set_user_data");
+ _glfw.wl.client.proxy_get_version = (PFN_wl_proxy_get_version)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_get_version");
+ _glfw.wl.client.proxy_marshal_flags = (PFN_wl_proxy_marshal_flags)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.client.handle, "wl_proxy_marshal_flags");
+
+ if (!_glfw.wl.client.display_flush ||
+ !_glfw.wl.client.display_cancel_read ||
+ !_glfw.wl.client.display_dispatch_pending ||
+ !_glfw.wl.client.display_read_events ||
+ !_glfw.wl.client.display_disconnect ||
+ !_glfw.wl.client.display_roundtrip ||
+ !_glfw.wl.client.display_get_fd ||
+ !_glfw.wl.client.display_prepare_read ||
+ !_glfw.wl.client.proxy_marshal ||
+ !_glfw.wl.client.proxy_add_listener ||
+ !_glfw.wl.client.proxy_destroy ||
+ !_glfw.wl.client.proxy_marshal_constructor ||
+ !_glfw.wl.client.proxy_marshal_constructor_versioned ||
+ !_glfw.wl.client.proxy_get_user_data ||
+ !_glfw.wl.client.proxy_set_user_data)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libwayland-client entry point");
+ return GLFW_FALSE;
+ }
+
+ _glfw.wl.cursor.handle = _glfwPlatformLoadModule("libwayland-cursor.so.0");
+ if (!_glfw.wl.cursor.handle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libwayland-cursor");
+ return GLFW_FALSE;
+ }
+
+ _glfw.wl.cursor.theme_load = (PFN_wl_cursor_theme_load)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.cursor.handle, "wl_cursor_theme_load");
+ _glfw.wl.cursor.theme_destroy = (PFN_wl_cursor_theme_destroy)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.cursor.handle, "wl_cursor_theme_destroy");
+ _glfw.wl.cursor.theme_get_cursor = (PFN_wl_cursor_theme_get_cursor)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.cursor.handle, "wl_cursor_theme_get_cursor");
+ _glfw.wl.cursor.image_get_buffer = (PFN_wl_cursor_image_get_buffer)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.cursor.handle, "wl_cursor_image_get_buffer");
+
+ _glfw.wl.egl.handle = _glfwPlatformLoadModule("libwayland-egl.so.1");
+ if (!_glfw.wl.egl.handle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libwayland-egl");
+ return GLFW_FALSE;
+ }
+
+ _glfw.wl.egl.window_create = (PFN_wl_egl_window_create)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.egl.handle, "wl_egl_window_create");
+ _glfw.wl.egl.window_destroy = (PFN_wl_egl_window_destroy)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.egl.handle, "wl_egl_window_destroy");
+ _glfw.wl.egl.window_resize = (PFN_wl_egl_window_resize)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.egl.handle, "wl_egl_window_resize");
+
+ _glfw.wl.xkb.handle = _glfwPlatformLoadModule("libxkbcommon.so.0");
+ if (!_glfw.wl.xkb.handle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load libxkbcommon");
+ return GLFW_FALSE;
+ }
+
+ _glfw.wl.xkb.context_new = (PFN_xkb_context_new)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_context_new");
+ _glfw.wl.xkb.context_unref = (PFN_xkb_context_unref)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_context_unref");
+ _glfw.wl.xkb.keymap_new_from_string = (PFN_xkb_keymap_new_from_string)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_keymap_new_from_string");
+ _glfw.wl.xkb.keymap_unref = (PFN_xkb_keymap_unref)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_keymap_unref");
+ _glfw.wl.xkb.keymap_mod_get_index = (PFN_xkb_keymap_mod_get_index)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_keymap_mod_get_index");
+ _glfw.wl.xkb.keymap_key_repeats = (PFN_xkb_keymap_key_repeats)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_keymap_key_repeats");
+ _glfw.wl.xkb.keymap_key_get_syms_by_level = (PFN_xkb_keymap_key_get_syms_by_level)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_keymap_key_get_syms_by_level");
+ _glfw.wl.xkb.state_new = (PFN_xkb_state_new)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_new");
+ _glfw.wl.xkb.state_unref = (PFN_xkb_state_unref)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_unref");
+ _glfw.wl.xkb.state_key_get_syms = (PFN_xkb_state_key_get_syms)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_key_get_syms");
+ _glfw.wl.xkb.state_update_mask = (PFN_xkb_state_update_mask)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_update_mask");
+ _glfw.wl.xkb.state_serialize_mods = (PFN_xkb_state_serialize_mods)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_serialize_mods");
+ _glfw.wl.xkb.state_key_get_layout = (PFN_xkb_state_key_get_layout)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_state_key_get_layout");
+ _glfw.wl.xkb.compose_table_new_from_locale = (PFN_xkb_compose_table_new_from_locale)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_table_new_from_locale");
+ _glfw.wl.xkb.compose_table_unref = (PFN_xkb_compose_table_unref)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_table_unref");
+ _glfw.wl.xkb.compose_state_new = (PFN_xkb_compose_state_new)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_state_new");
+ _glfw.wl.xkb.compose_state_unref = (PFN_xkb_compose_state_unref)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_state_unref");
+ _glfw.wl.xkb.compose_state_feed = (PFN_xkb_compose_state_feed)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_state_feed");
+ _glfw.wl.xkb.compose_state_get_status = (PFN_xkb_compose_state_get_status)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_state_get_status");
+ _glfw.wl.xkb.compose_state_get_one_sym = (PFN_xkb_compose_state_get_one_sym)
+ _glfwPlatformGetModuleSymbol(_glfw.wl.xkb.handle, "xkb_compose_state_get_one_sym");
+
+ _glfw.wl.registry = wl_display_get_registry(_glfw.wl.display);
+ wl_registry_add_listener(_glfw.wl.registry, &registryListener, NULL);
+
+ createKeyTables();
+
+ _glfw.wl.xkb.context = xkb_context_new(0);
+ if (!_glfw.wl.xkb.context)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to initialize xkb context");
+ return GLFW_FALSE;
+ }
+
+ // Sync so we got all registry objects
+ wl_display_roundtrip(_glfw.wl.display);
+
+ // Sync so we got all initial output events
+ wl_display_roundtrip(_glfw.wl.display);
+
+ _glfw.wl.timerfd = -1;
+ if (_glfw.wl.seatVersion >= 4)
+ _glfw.wl.timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK);
+
+ if (!_glfw.wl.wmBase)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to find xdg-shell in your compositor");
+ return GLFW_FALSE;
+ }
+
+ if (_glfw.wl.pointer && _glfw.wl.shm)
+ {
+ cursorTheme = getenv("XCURSOR_THEME");
+ cursorSizeStr = getenv("XCURSOR_SIZE");
+ cursorSize = 32;
+ if (cursorSizeStr)
+ {
+ errno = 0;
+ cursorSizeLong = strtol(cursorSizeStr, &cursorSizeEnd, 10);
+ if (!*cursorSizeEnd && !errno && cursorSizeLong > 0 && cursorSizeLong <= INT_MAX)
+ cursorSize = (int)cursorSizeLong;
+ }
+ _glfw.wl.cursorTheme =
+ wl_cursor_theme_load(cursorTheme, cursorSize, _glfw.wl.shm);
+ if (!_glfw.wl.cursorTheme)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to load default cursor theme");
+ return GLFW_FALSE;
+ }
+ // If this happens to be NULL, we just fallback to the scale=1 version.
+ _glfw.wl.cursorThemeHiDPI =
+ wl_cursor_theme_load(cursorTheme, 2 * cursorSize, _glfw.wl.shm);
+ _glfw.wl.cursorSurface =
+ wl_compositor_create_surface(_glfw.wl.compositor);
+ _glfw.wl.cursorTimerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK);
+ }
+
+ if (_glfw.wl.seat && _glfw.wl.dataDeviceManager)
+ {
+ _glfw.wl.dataDevice =
+ wl_data_device_manager_get_data_device(_glfw.wl.dataDeviceManager,
+ _glfw.wl.seat);
+ _glfwAddDataDeviceListenerWayland(_glfw.wl.dataDevice);
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateWayland(void)
+{
+ _glfwTerminateEGL();
+ _glfwTerminateOSMesa();
+
+ if (_glfw.wl.egl.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.wl.egl.handle);
+ _glfw.wl.egl.handle = NULL;
+ }
+
+ if (_glfw.wl.xkb.composeState)
+ xkb_compose_state_unref(_glfw.wl.xkb.composeState);
+ if (_glfw.wl.xkb.keymap)
+ xkb_keymap_unref(_glfw.wl.xkb.keymap);
+ if (_glfw.wl.xkb.state)
+ xkb_state_unref(_glfw.wl.xkb.state);
+ if (_glfw.wl.xkb.context)
+ xkb_context_unref(_glfw.wl.xkb.context);
+ if (_glfw.wl.xkb.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.wl.xkb.handle);
+ _glfw.wl.xkb.handle = NULL;
+ }
+
+ if (_glfw.wl.cursorTheme)
+ wl_cursor_theme_destroy(_glfw.wl.cursorTheme);
+ if (_glfw.wl.cursorThemeHiDPI)
+ wl_cursor_theme_destroy(_glfw.wl.cursorThemeHiDPI);
+ if (_glfw.wl.cursor.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.wl.cursor.handle);
+ _glfw.wl.cursor.handle = NULL;
+ }
+
+ for (unsigned int i = 0; i < _glfw.wl.offerCount; i++)
+ wl_data_offer_destroy(_glfw.wl.offers[i].offer);
+
+ _glfw_free(_glfw.wl.offers);
+
+ if (_glfw.wl.cursorSurface)
+ wl_surface_destroy(_glfw.wl.cursorSurface);
+ if (_glfw.wl.subcompositor)
+ wl_subcompositor_destroy(_glfw.wl.subcompositor);
+ if (_glfw.wl.compositor)
+ wl_compositor_destroy(_glfw.wl.compositor);
+ if (_glfw.wl.shm)
+ wl_shm_destroy(_glfw.wl.shm);
+ if (_glfw.wl.viewporter)
+ wp_viewporter_destroy(_glfw.wl.viewporter);
+ if (_glfw.wl.decorationManager)
+ zxdg_decoration_manager_v1_destroy(_glfw.wl.decorationManager);
+ if (_glfw.wl.wmBase)
+ xdg_wm_base_destroy(_glfw.wl.wmBase);
+ if (_glfw.wl.selectionOffer)
+ wl_data_offer_destroy(_glfw.wl.selectionOffer);
+ if (_glfw.wl.dragOffer)
+ wl_data_offer_destroy(_glfw.wl.dragOffer);
+ if (_glfw.wl.selectionSource)
+ wl_data_source_destroy(_glfw.wl.selectionSource);
+ if (_glfw.wl.dataDevice)
+ wl_data_device_destroy(_glfw.wl.dataDevice);
+ if (_glfw.wl.dataDeviceManager)
+ wl_data_device_manager_destroy(_glfw.wl.dataDeviceManager);
+ if (_glfw.wl.pointer)
+ wl_pointer_destroy(_glfw.wl.pointer);
+ if (_glfw.wl.keyboard)
+ wl_keyboard_destroy(_glfw.wl.keyboard);
+ if (_glfw.wl.seat)
+ wl_seat_destroy(_glfw.wl.seat);
+ if (_glfw.wl.relativePointerManager)
+ zwp_relative_pointer_manager_v1_destroy(_glfw.wl.relativePointerManager);
+ if (_glfw.wl.pointerConstraints)
+ zwp_pointer_constraints_v1_destroy(_glfw.wl.pointerConstraints);
+ if (_glfw.wl.idleInhibitManager)
+ zwp_idle_inhibit_manager_v1_destroy(_glfw.wl.idleInhibitManager);
+ if (_glfw.wl.registry)
+ wl_registry_destroy(_glfw.wl.registry);
+ if (_glfw.wl.display)
+ {
+ wl_display_flush(_glfw.wl.display);
+ wl_display_disconnect(_glfw.wl.display);
+ }
+
+ if (_glfw.wl.timerfd >= 0)
+ close(_glfw.wl.timerfd);
+ if (_glfw.wl.cursorTimerfd >= 0)
+ close(_glfw.wl.cursorTimerfd);
+
+ _glfw_free(_glfw.wl.clipboardString);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/wl_monitor.c b/chromium/third_party/dawn/third_party/glfw/src/wl_monitor.c
new file mode 100644
index 00000000000..568bdc5fe5e
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/wl_monitor.c
@@ -0,0 +1,233 @@
+//========================================================================
+// GLFW 3.4 Wayland - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <math.h>
+
+#include "wayland-client-protocol.h"
+
+
+static void outputHandleGeometry(void* userData,
+ struct wl_output* output,
+ int32_t x,
+ int32_t y,
+ int32_t physicalWidth,
+ int32_t physicalHeight,
+ int32_t subpixel,
+ const char* make,
+ const char* model,
+ int32_t transform)
+{
+ struct _GLFWmonitor* monitor = userData;
+
+ monitor->wl.x = x;
+ monitor->wl.y = y;
+ monitor->widthMM = physicalWidth;
+ monitor->heightMM = physicalHeight;
+
+ snprintf(monitor->name, sizeof(monitor->name), "%s %s", make, model);
+}
+
+static void outputHandleMode(void* userData,
+ struct wl_output* output,
+ uint32_t flags,
+ int32_t width,
+ int32_t height,
+ int32_t refresh)
+{
+ struct _GLFWmonitor* monitor = userData;
+ GLFWvidmode mode;
+
+ mode.width = width;
+ mode.height = height;
+ mode.redBits = 8;
+ mode.greenBits = 8;
+ mode.blueBits = 8;
+ mode.refreshRate = (int) round(refresh / 1000.0);
+
+ monitor->modeCount++;
+ monitor->modes =
+ _glfw_realloc(monitor->modes, monitor->modeCount * sizeof(GLFWvidmode));
+ monitor->modes[monitor->modeCount - 1] = mode;
+
+ if (flags & WL_OUTPUT_MODE_CURRENT)
+ monitor->wl.currentMode = monitor->modeCount - 1;
+}
+
+static void outputHandleDone(void* userData, struct wl_output* output)
+{
+ struct _GLFWmonitor* monitor = userData;
+
+ if (monitor->widthMM <= 0 || monitor->heightMM <= 0)
+ {
+ // If Wayland does not provide a physical size, assume the default 96 DPI
+ const GLFWvidmode* mode = &monitor->modes[monitor->wl.currentMode];
+ monitor->widthMM = (int) (mode->width * 25.4f / 96.f);
+ monitor->heightMM = (int) (mode->height * 25.4f / 96.f);
+ }
+
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, _GLFW_INSERT_LAST);
+}
+
+static void outputHandleScale(void* userData,
+ struct wl_output* output,
+ int32_t factor)
+{
+ struct _GLFWmonitor* monitor = userData;
+
+ monitor->wl.scale = factor;
+}
+
+static const struct wl_output_listener outputListener =
+{
+ outputHandleGeometry,
+ outputHandleMode,
+ outputHandleDone,
+ outputHandleScale,
+};
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwAddOutputWayland(uint32_t name, uint32_t version)
+{
+ _GLFWmonitor* monitor;
+ struct wl_output* output;
+
+ if (version < 2)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Unsupported output interface version");
+ return;
+ }
+
+ // The actual name of this output will be set in the geometry handler.
+ monitor = _glfwAllocMonitor("", 0, 0);
+
+ output = wl_registry_bind(_glfw.wl.registry,
+ name,
+ &wl_output_interface,
+ 2);
+ if (!output)
+ {
+ _glfwFreeMonitor(monitor);
+ return;
+ }
+
+ monitor->wl.scale = 1;
+ monitor->wl.output = output;
+ monitor->wl.name = name;
+
+ wl_output_add_listener(output, &outputListener, monitor);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwFreeMonitorWayland(_GLFWmonitor* monitor)
+{
+ if (monitor->wl.output)
+ wl_output_destroy(monitor->wl.output);
+}
+
+void _glfwGetMonitorPosWayland(_GLFWmonitor* monitor, int* xpos, int* ypos)
+{
+ if (xpos)
+ *xpos = monitor->wl.x;
+ if (ypos)
+ *ypos = monitor->wl.y;
+}
+
+void _glfwGetMonitorContentScaleWayland(_GLFWmonitor* monitor,
+ float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = (float) monitor->wl.scale;
+ if (yscale)
+ *yscale = (float) monitor->wl.scale;
+}
+
+void _glfwGetMonitorWorkareaWayland(_GLFWmonitor* monitor,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ if (xpos)
+ *xpos = monitor->wl.x;
+ if (ypos)
+ *ypos = monitor->wl.y;
+ if (width)
+ *width = monitor->modes[monitor->wl.currentMode].width;
+ if (height)
+ *height = monitor->modes[monitor->wl.currentMode].height;
+}
+
+GLFWvidmode* _glfwGetVideoModesWayland(_GLFWmonitor* monitor, int* found)
+{
+ *found = monitor->modeCount;
+ return monitor->modes;
+}
+
+void _glfwGetVideoModeWayland(_GLFWmonitor* monitor, GLFWvidmode* mode)
+{
+ *mode = monitor->modes[monitor->wl.currentMode];
+}
+
+GLFWbool _glfwGetGammaRampWayland(_GLFWmonitor* monitor, GLFWgammaramp* ramp)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: Gamma ramp access is not available");
+ return GLFW_FALSE;
+}
+
+void _glfwSetGammaRampWayland(_GLFWmonitor* monitor, const GLFWgammaramp* ramp)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: Gamma ramp access is not available");
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI struct wl_output* glfwGetWaylandMonitor(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+ return monitor->wl.output;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/wl_platform.h b/chromium/third_party/dawn/third_party/glfw/src/wl_platform.h
new file mode 100644
index 00000000000..d6c8c4dabfd
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/wl_platform.h
@@ -0,0 +1,515 @@
+//========================================================================
+// GLFW 3.4 Wayland - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <wayland-client-core.h>
+#include <xkbcommon/xkbcommon.h>
+#include <xkbcommon/xkbcommon-compose.h>
+
+typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
+
+typedef struct VkWaylandSurfaceCreateInfoKHR
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkWaylandSurfaceCreateFlagsKHR flags;
+ struct wl_display* display;
+ struct wl_surface* surface;
+} VkWaylandSurfaceCreateInfoKHR;
+
+typedef VkResult (APIENTRY *PFN_vkCreateWaylandSurfaceKHR)(VkInstance,const VkWaylandSurfaceCreateInfoKHR*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+typedef VkBool32 (APIENTRY *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice,uint32_t,struct wl_display*);
+
+#include "xkb_unicode.h"
+#include "posix_poll.h"
+
+typedef int (* PFN_wl_display_flush)(struct wl_display* display);
+typedef void (* PFN_wl_display_cancel_read)(struct wl_display* display);
+typedef int (* PFN_wl_display_dispatch_pending)(struct wl_display* display);
+typedef int (* PFN_wl_display_read_events)(struct wl_display* display);
+typedef struct wl_display* (* PFN_wl_display_connect)(const char*);
+typedef void (* PFN_wl_display_disconnect)(struct wl_display*);
+typedef int (* PFN_wl_display_roundtrip)(struct wl_display*);
+typedef int (* PFN_wl_display_get_fd)(struct wl_display*);
+typedef int (* PFN_wl_display_prepare_read)(struct wl_display*);
+typedef void (* PFN_wl_proxy_marshal)(struct wl_proxy*,uint32_t,...);
+typedef int (* PFN_wl_proxy_add_listener)(struct wl_proxy*,void(**)(void),void*);
+typedef void (* PFN_wl_proxy_destroy)(struct wl_proxy*);
+typedef struct wl_proxy* (* PFN_wl_proxy_marshal_constructor)(struct wl_proxy*,uint32_t,const struct wl_interface*,...);
+typedef struct wl_proxy* (* PFN_wl_proxy_marshal_constructor_versioned)(struct wl_proxy*,uint32_t,const struct wl_interface*,uint32_t,...);
+typedef void* (* PFN_wl_proxy_get_user_data)(struct wl_proxy*);
+typedef void (* PFN_wl_proxy_set_user_data)(struct wl_proxy*,void*);
+typedef uint32_t (* PFN_wl_proxy_get_version)(struct wl_proxy*);
+typedef struct wl_proxy* (* PFN_wl_proxy_marshal_flags)(struct wl_proxy*,uint32_t,const struct wl_interface*,uint32_t,uint32_t,...);
+#define wl_display_flush _glfw.wl.client.display_flush
+#define wl_display_cancel_read _glfw.wl.client.display_cancel_read
+#define wl_display_dispatch_pending _glfw.wl.client.display_dispatch_pending
+#define wl_display_read_events _glfw.wl.client.display_read_events
+#define wl_display_disconnect _glfw.wl.client.display_disconnect
+#define wl_display_roundtrip _glfw.wl.client.display_roundtrip
+#define wl_display_get_fd _glfw.wl.client.display_get_fd
+#define wl_display_prepare_read _glfw.wl.client.display_prepare_read
+#define wl_proxy_marshal _glfw.wl.client.proxy_marshal
+#define wl_proxy_add_listener _glfw.wl.client.proxy_add_listener
+#define wl_proxy_destroy _glfw.wl.client.proxy_destroy
+#define wl_proxy_marshal_constructor _glfw.wl.client.proxy_marshal_constructor
+#define wl_proxy_marshal_constructor_versioned _glfw.wl.client.proxy_marshal_constructor_versioned
+#define wl_proxy_get_user_data _glfw.wl.client.proxy_get_user_data
+#define wl_proxy_set_user_data _glfw.wl.client.proxy_set_user_data
+#define wl_proxy_get_version _glfw.wl.client.proxy_get_version
+#define wl_proxy_marshal_flags _glfw.wl.client.proxy_marshal_flags
+
+struct wl_shm;
+
+#define wl_display_interface _glfw_wl_display_interface
+#define wl_subcompositor_interface _glfw_wl_subcompositor_interface
+#define wl_compositor_interface _glfw_wl_compositor_interface
+#define wl_shm_interface _glfw_wl_shm_interface
+#define wl_data_device_manager_interface _glfw_wl_data_device_manager_interface
+#define wl_shell_interface _glfw_wl_shell_interface
+#define wl_buffer_interface _glfw_wl_buffer_interface
+#define wl_callback_interface _glfw_wl_callback_interface
+#define wl_data_device_interface _glfw_wl_data_device_interface
+#define wl_data_offer_interface _glfw_wl_data_offer_interface
+#define wl_data_source_interface _glfw_wl_data_source_interface
+#define wl_keyboard_interface _glfw_wl_keyboard_interface
+#define wl_output_interface _glfw_wl_output_interface
+#define wl_pointer_interface _glfw_wl_pointer_interface
+#define wl_region_interface _glfw_wl_region_interface
+#define wl_registry_interface _glfw_wl_registry_interface
+#define wl_seat_interface _glfw_wl_seat_interface
+#define wl_shell_surface_interface _glfw_wl_shell_surface_interface
+#define wl_shm_pool_interface _glfw_wl_shm_pool_interface
+#define wl_subsurface_interface _glfw_wl_subsurface_interface
+#define wl_surface_interface _glfw_wl_surface_interface
+#define wl_touch_interface _glfw_wl_touch_interface
+#define zwp_idle_inhibitor_v1_interface _glfw_zwp_idle_inhibitor_v1_interface
+#define zwp_idle_inhibit_manager_v1_interface _glfw_zwp_idle_inhibit_manager_v1_interface
+#define zwp_confined_pointer_v1_interface _glfw_zwp_confined_pointer_v1_interface
+#define zwp_locked_pointer_v1_interface _glfw_zwp_locked_pointer_v1_interface
+#define zwp_pointer_constraints_v1_interface _glfw_zwp_pointer_constraints_v1_interface
+#define zwp_relative_pointer_v1_interface _glfw_zwp_relative_pointer_v1_interface
+#define zwp_relative_pointer_manager_v1_interface _glfw_zwp_relative_pointer_manager_v1_interface
+#define wp_viewport_interface _glfw_wp_viewport_interface
+#define wp_viewporter_interface _glfw_wp_viewporter_interface
+#define xdg_toplevel_interface _glfw_xdg_toplevel_interface
+#define zxdg_toplevel_decoration_v1_interface _glfw_zxdg_toplevel_decoration_v1_interface
+#define zxdg_decoration_manager_v1_interface _glfw_zxdg_decoration_manager_v1_interface
+#define xdg_popup_interface _glfw_xdg_popup_interface
+#define xdg_positioner_interface _glfw_xdg_positioner_interface
+#define xdg_surface_interface _glfw_xdg_surface_interface
+#define xdg_toplevel_interface _glfw_xdg_toplevel_interface
+#define xdg_wm_base_interface _glfw_xdg_wm_base_interface
+
+#define GLFW_WAYLAND_WINDOW_STATE _GLFWwindowWayland wl;
+#define GLFW_WAYLAND_LIBRARY_WINDOW_STATE _GLFWlibraryWayland wl;
+#define GLFW_WAYLAND_MONITOR_STATE _GLFWmonitorWayland wl;
+#define GLFW_WAYLAND_CURSOR_STATE _GLFWcursorWayland wl;
+
+struct wl_cursor_image {
+ uint32_t width;
+ uint32_t height;
+ uint32_t hotspot_x;
+ uint32_t hotspot_y;
+ uint32_t delay;
+};
+struct wl_cursor {
+ unsigned int image_count;
+ struct wl_cursor_image** images;
+ char* name;
+};
+typedef struct wl_cursor_theme* (* PFN_wl_cursor_theme_load)(const char*, int, struct wl_shm*);
+typedef void (* PFN_wl_cursor_theme_destroy)(struct wl_cursor_theme*);
+typedef struct wl_cursor* (* PFN_wl_cursor_theme_get_cursor)(struct wl_cursor_theme*, const char*);
+typedef struct wl_buffer* (* PFN_wl_cursor_image_get_buffer)(struct wl_cursor_image*);
+#define wl_cursor_theme_load _glfw.wl.cursor.theme_load
+#define wl_cursor_theme_destroy _glfw.wl.cursor.theme_destroy
+#define wl_cursor_theme_get_cursor _glfw.wl.cursor.theme_get_cursor
+#define wl_cursor_image_get_buffer _glfw.wl.cursor.image_get_buffer
+
+typedef struct wl_egl_window* (* PFN_wl_egl_window_create)(struct wl_surface*, int, int);
+typedef void (* PFN_wl_egl_window_destroy)(struct wl_egl_window*);
+typedef void (* PFN_wl_egl_window_resize)(struct wl_egl_window*, int, int, int, int);
+#define wl_egl_window_create _glfw.wl.egl.window_create
+#define wl_egl_window_destroy _glfw.wl.egl.window_destroy
+#define wl_egl_window_resize _glfw.wl.egl.window_resize
+
+typedef struct xkb_context* (* PFN_xkb_context_new)(enum xkb_context_flags);
+typedef void (* PFN_xkb_context_unref)(struct xkb_context*);
+typedef struct xkb_keymap* (* PFN_xkb_keymap_new_from_string)(struct xkb_context*, const char*, enum xkb_keymap_format, enum xkb_keymap_compile_flags);
+typedef void (* PFN_xkb_keymap_unref)(struct xkb_keymap*);
+typedef xkb_mod_index_t (* PFN_xkb_keymap_mod_get_index)(struct xkb_keymap*, const char*);
+typedef int (* PFN_xkb_keymap_key_repeats)(struct xkb_keymap*, xkb_keycode_t);
+typedef int (* PFN_xkb_keymap_key_get_syms_by_level)(struct xkb_keymap*,xkb_keycode_t,xkb_layout_index_t,xkb_level_index_t,const xkb_keysym_t**);
+typedef struct xkb_state* (* PFN_xkb_state_new)(struct xkb_keymap*);
+typedef void (* PFN_xkb_state_unref)(struct xkb_state*);
+typedef int (* PFN_xkb_state_key_get_syms)(struct xkb_state*, xkb_keycode_t, const xkb_keysym_t**);
+typedef enum xkb_state_component (* PFN_xkb_state_update_mask)(struct xkb_state*, xkb_mod_mask_t, xkb_mod_mask_t, xkb_mod_mask_t, xkb_layout_index_t, xkb_layout_index_t, xkb_layout_index_t);
+typedef xkb_mod_mask_t (* PFN_xkb_state_serialize_mods)(struct xkb_state*, enum xkb_state_component);
+typedef xkb_layout_index_t (* PFN_xkb_state_key_get_layout)(struct xkb_state*,xkb_keycode_t);
+#define xkb_context_new _glfw.wl.xkb.context_new
+#define xkb_context_unref _glfw.wl.xkb.context_unref
+#define xkb_keymap_new_from_string _glfw.wl.xkb.keymap_new_from_string
+#define xkb_keymap_unref _glfw.wl.xkb.keymap_unref
+#define xkb_keymap_mod_get_index _glfw.wl.xkb.keymap_mod_get_index
+#define xkb_keymap_key_repeats _glfw.wl.xkb.keymap_key_repeats
+#define xkb_keymap_key_get_syms_by_level _glfw.wl.xkb.keymap_key_get_syms_by_level
+#define xkb_state_new _glfw.wl.xkb.state_new
+#define xkb_state_unref _glfw.wl.xkb.state_unref
+#define xkb_state_key_get_syms _glfw.wl.xkb.state_key_get_syms
+#define xkb_state_update_mask _glfw.wl.xkb.state_update_mask
+#define xkb_state_serialize_mods _glfw.wl.xkb.state_serialize_mods
+#define xkb_state_key_get_layout _glfw.wl.xkb.state_key_get_layout
+
+typedef struct xkb_compose_table* (* PFN_xkb_compose_table_new_from_locale)(struct xkb_context*, const char*, enum xkb_compose_compile_flags);
+typedef void (* PFN_xkb_compose_table_unref)(struct xkb_compose_table*);
+typedef struct xkb_compose_state* (* PFN_xkb_compose_state_new)(struct xkb_compose_table*, enum xkb_compose_state_flags);
+typedef void (* PFN_xkb_compose_state_unref)(struct xkb_compose_state*);
+typedef enum xkb_compose_feed_result (* PFN_xkb_compose_state_feed)(struct xkb_compose_state*, xkb_keysym_t);
+typedef enum xkb_compose_status (* PFN_xkb_compose_state_get_status)(struct xkb_compose_state*);
+typedef xkb_keysym_t (* PFN_xkb_compose_state_get_one_sym)(struct xkb_compose_state*);
+#define xkb_compose_table_new_from_locale _glfw.wl.xkb.compose_table_new_from_locale
+#define xkb_compose_table_unref _glfw.wl.xkb.compose_table_unref
+#define xkb_compose_state_new _glfw.wl.xkb.compose_state_new
+#define xkb_compose_state_unref _glfw.wl.xkb.compose_state_unref
+#define xkb_compose_state_feed _glfw.wl.xkb.compose_state_feed
+#define xkb_compose_state_get_status _glfw.wl.xkb.compose_state_get_status
+#define xkb_compose_state_get_one_sym _glfw.wl.xkb.compose_state_get_one_sym
+
+#define _GLFW_DECORATION_WIDTH 4
+#define _GLFW_DECORATION_TOP 24
+#define _GLFW_DECORATION_VERTICAL (_GLFW_DECORATION_TOP + _GLFW_DECORATION_WIDTH)
+#define _GLFW_DECORATION_HORIZONTAL (2 * _GLFW_DECORATION_WIDTH)
+
+typedef enum _GLFWdecorationSideWayland
+{
+ mainWindow,
+ topDecoration,
+ leftDecoration,
+ rightDecoration,
+ bottomDecoration,
+} _GLFWdecorationSideWayland;
+
+typedef struct _GLFWdecorationWayland
+{
+ struct wl_surface* surface;
+ struct wl_subsurface* subsurface;
+ struct wp_viewport* viewport;
+} _GLFWdecorationWayland;
+
+typedef struct _GLFWofferWayland
+{
+ struct wl_data_offer* offer;
+ GLFWbool text_plain_utf8;
+ GLFWbool text_uri_list;
+} _GLFWofferWayland;
+
+// Wayland-specific per-window data
+//
+typedef struct _GLFWwindowWayland
+{
+ int width, height;
+ GLFWbool visible;
+ GLFWbool maximized;
+ GLFWbool hovered;
+ GLFWbool transparent;
+ struct wl_surface* surface;
+ struct wl_egl_window* native;
+ struct wl_callback* callback;
+
+ struct {
+ struct xdg_surface* surface;
+ struct xdg_toplevel* toplevel;
+ struct zxdg_toplevel_decoration_v1* decoration;
+ } xdg;
+
+ _GLFWcursor* currentCursor;
+ double cursorPosX, cursorPosY;
+
+ char* title;
+
+ // We need to track the monitors the window spans on to calculate the
+ // optimal scaling factor.
+ int scale;
+ _GLFWmonitor** monitors;
+ int monitorsCount;
+ int monitorsSize;
+
+ struct {
+ struct zwp_relative_pointer_v1* relativePointer;
+ struct zwp_locked_pointer_v1* lockedPointer;
+ } pointerLock;
+
+ struct zwp_idle_inhibitor_v1* idleInhibitor;
+
+ GLFWbool wasFullscreen;
+
+ struct {
+ GLFWbool serverSide;
+ struct wl_buffer* buffer;
+ _GLFWdecorationWayland top, left, right, bottom;
+ int focus;
+ } decorations;
+} _GLFWwindowWayland;
+
+// Wayland-specific global data
+//
+typedef struct _GLFWlibraryWayland
+{
+ struct wl_display* display;
+ struct wl_registry* registry;
+ struct wl_compositor* compositor;
+ struct wl_subcompositor* subcompositor;
+ struct wl_shm* shm;
+ struct wl_seat* seat;
+ struct wl_pointer* pointer;
+ struct wl_keyboard* keyboard;
+ struct wl_data_device_manager* dataDeviceManager;
+ struct wl_data_device* dataDevice;
+ struct xdg_wm_base* wmBase;
+ struct zxdg_decoration_manager_v1* decorationManager;
+ struct wp_viewporter* viewporter;
+ struct zwp_relative_pointer_manager_v1* relativePointerManager;
+ struct zwp_pointer_constraints_v1* pointerConstraints;
+ struct zwp_idle_inhibit_manager_v1* idleInhibitManager;
+
+ _GLFWofferWayland* offers;
+ unsigned int offerCount;
+
+ struct wl_data_offer* selectionOffer;
+ struct wl_data_source* selectionSource;
+
+ struct wl_data_offer* dragOffer;
+ _GLFWwindow* dragFocus;
+ uint32_t dragSerial;
+
+ int compositorVersion;
+ int seatVersion;
+
+ struct wl_cursor_theme* cursorTheme;
+ struct wl_cursor_theme* cursorThemeHiDPI;
+ struct wl_surface* cursorSurface;
+ const char* cursorPreviousName;
+ int cursorTimerfd;
+ uint32_t serial;
+ uint32_t pointerEnterSerial;
+
+ int32_t keyboardRepeatRate;
+ int32_t keyboardRepeatDelay;
+ int keyboardLastKey;
+ int keyboardLastScancode;
+ char* clipboardString;
+ int timerfd;
+ short int keycodes[256];
+ short int scancodes[GLFW_KEY_LAST + 1];
+ char keynames[GLFW_KEY_LAST + 1][5];
+
+ struct {
+ void* handle;
+ struct xkb_context* context;
+ struct xkb_keymap* keymap;
+ struct xkb_state* state;
+
+ struct xkb_compose_state* composeState;
+
+ xkb_mod_mask_t controlMask;
+ xkb_mod_mask_t altMask;
+ xkb_mod_mask_t shiftMask;
+ xkb_mod_mask_t superMask;
+ xkb_mod_mask_t capsLockMask;
+ xkb_mod_mask_t numLockMask;
+ unsigned int modifiers;
+
+ PFN_xkb_context_new context_new;
+ PFN_xkb_context_unref context_unref;
+ PFN_xkb_keymap_new_from_string keymap_new_from_string;
+ PFN_xkb_keymap_unref keymap_unref;
+ PFN_xkb_keymap_mod_get_index keymap_mod_get_index;
+ PFN_xkb_keymap_key_repeats keymap_key_repeats;
+ PFN_xkb_keymap_key_get_syms_by_level keymap_key_get_syms_by_level;
+ PFN_xkb_state_new state_new;
+ PFN_xkb_state_unref state_unref;
+ PFN_xkb_state_key_get_syms state_key_get_syms;
+ PFN_xkb_state_update_mask state_update_mask;
+ PFN_xkb_state_serialize_mods state_serialize_mods;
+ PFN_xkb_state_key_get_layout state_key_get_layout;
+
+ PFN_xkb_compose_table_new_from_locale compose_table_new_from_locale;
+ PFN_xkb_compose_table_unref compose_table_unref;
+ PFN_xkb_compose_state_new compose_state_new;
+ PFN_xkb_compose_state_unref compose_state_unref;
+ PFN_xkb_compose_state_feed compose_state_feed;
+ PFN_xkb_compose_state_get_status compose_state_get_status;
+ PFN_xkb_compose_state_get_one_sym compose_state_get_one_sym;
+ } xkb;
+
+ _GLFWwindow* pointerFocus;
+ _GLFWwindow* keyboardFocus;
+
+ struct {
+ void* handle;
+ PFN_wl_display_flush display_flush;
+ PFN_wl_display_cancel_read display_cancel_read;
+ PFN_wl_display_dispatch_pending display_dispatch_pending;
+ PFN_wl_display_read_events display_read_events;
+ PFN_wl_display_disconnect display_disconnect;
+ PFN_wl_display_roundtrip display_roundtrip;
+ PFN_wl_display_get_fd display_get_fd;
+ PFN_wl_display_prepare_read display_prepare_read;
+ PFN_wl_proxy_marshal proxy_marshal;
+ PFN_wl_proxy_add_listener proxy_add_listener;
+ PFN_wl_proxy_destroy proxy_destroy;
+ PFN_wl_proxy_marshal_constructor proxy_marshal_constructor;
+ PFN_wl_proxy_marshal_constructor_versioned proxy_marshal_constructor_versioned;
+ PFN_wl_proxy_get_user_data proxy_get_user_data;
+ PFN_wl_proxy_set_user_data proxy_set_user_data;
+ PFN_wl_proxy_get_version proxy_get_version;
+ PFN_wl_proxy_marshal_flags proxy_marshal_flags;
+ } client;
+
+ struct {
+ void* handle;
+
+ PFN_wl_cursor_theme_load theme_load;
+ PFN_wl_cursor_theme_destroy theme_destroy;
+ PFN_wl_cursor_theme_get_cursor theme_get_cursor;
+ PFN_wl_cursor_image_get_buffer image_get_buffer;
+ } cursor;
+
+ struct {
+ void* handle;
+
+ PFN_wl_egl_window_create window_create;
+ PFN_wl_egl_window_destroy window_destroy;
+ PFN_wl_egl_window_resize window_resize;
+ } egl;
+} _GLFWlibraryWayland;
+
+// Wayland-specific per-monitor data
+//
+typedef struct _GLFWmonitorWayland
+{
+ struct wl_output* output;
+ uint32_t name;
+ int currentMode;
+
+ int x;
+ int y;
+ int scale;
+} _GLFWmonitorWayland;
+
+// Wayland-specific per-cursor data
+//
+typedef struct _GLFWcursorWayland
+{
+ struct wl_cursor* cursor;
+ struct wl_cursor* cursorHiDPI;
+ struct wl_buffer* buffer;
+ int width, height;
+ int xhot, yhot;
+ int currentImage;
+} _GLFWcursorWayland;
+
+GLFWbool _glfwConnectWayland(int platformID, _GLFWplatform* platform);
+int _glfwInitWayland(void);
+void _glfwTerminateWayland(void);
+
+int _glfwCreateWindowWayland(_GLFWwindow* window, const _GLFWwndconfig* wndconfig, const _GLFWctxconfig* ctxconfig, const _GLFWfbconfig* fbconfig);
+void _glfwDestroyWindowWayland(_GLFWwindow* window);
+void _glfwSetWindowTitleWayland(_GLFWwindow* window, const char* title);
+void _glfwSetWindowIconWayland(_GLFWwindow* window, int count, const GLFWimage* images);
+void _glfwGetWindowPosWayland(_GLFWwindow* window, int* xpos, int* ypos);
+void _glfwSetWindowPosWayland(_GLFWwindow* window, int xpos, int ypos);
+void _glfwGetWindowSizeWayland(_GLFWwindow* window, int* width, int* height);
+void _glfwSetWindowSizeWayland(_GLFWwindow* window, int width, int height);
+void _glfwSetWindowSizeLimitsWayland(_GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+void _glfwSetWindowAspectRatioWayland(_GLFWwindow* window, int numer, int denom);
+void _glfwGetFramebufferSizeWayland(_GLFWwindow* window, int* width, int* height);
+void _glfwGetWindowFrameSizeWayland(_GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+void _glfwGetWindowContentScaleWayland(_GLFWwindow* window, float* xscale, float* yscale);
+void _glfwIconifyWindowWayland(_GLFWwindow* window);
+void _glfwRestoreWindowWayland(_GLFWwindow* window);
+void _glfwMaximizeWindowWayland(_GLFWwindow* window);
+void _glfwShowWindowWayland(_GLFWwindow* window);
+void _glfwHideWindowWayland(_GLFWwindow* window);
+void _glfwRequestWindowAttentionWayland(_GLFWwindow* window);
+void _glfwFocusWindowWayland(_GLFWwindow* window);
+void _glfwSetWindowMonitorWayland(_GLFWwindow* window, _GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+int _glfwWindowFocusedWayland(_GLFWwindow* window);
+int _glfwWindowIconifiedWayland(_GLFWwindow* window);
+int _glfwWindowVisibleWayland(_GLFWwindow* window);
+int _glfwWindowMaximizedWayland(_GLFWwindow* window);
+int _glfwWindowHoveredWayland(_GLFWwindow* window);
+int _glfwFramebufferTransparentWayland(_GLFWwindow* window);
+void _glfwSetWindowResizableWayland(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowDecoratedWayland(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowFloatingWayland(_GLFWwindow* window, GLFWbool enabled);
+float _glfwGetWindowOpacityWayland(_GLFWwindow* window);
+void _glfwSetWindowOpacityWayland(_GLFWwindow* window, float opacity);
+void _glfwSetWindowMousePassthroughWayland(_GLFWwindow* window, GLFWbool enabled);
+
+void _glfwSetRawMouseMotionWayland(_GLFWwindow* window, GLFWbool enabled);
+GLFWbool _glfwRawMouseMotionSupportedWayland(void);
+
+void _glfwPollEventsWayland(void);
+void _glfwWaitEventsWayland(void);
+void _glfwWaitEventsTimeoutWayland(double timeout);
+void _glfwPostEmptyEventWayland(void);
+
+void _glfwGetCursorPosWayland(_GLFWwindow* window, double* xpos, double* ypos);
+void _glfwSetCursorPosWayland(_GLFWwindow* window, double xpos, double ypos);
+void _glfwSetCursorModeWayland(_GLFWwindow* window, int mode);
+const char* _glfwGetScancodeNameWayland(int scancode);
+int _glfwGetKeyScancodeWayland(int key);
+int _glfwCreateCursorWayland(_GLFWcursor* cursor, const GLFWimage* image, int xhot, int yhot);
+int _glfwCreateStandardCursorWayland(_GLFWcursor* cursor, int shape);
+void _glfwDestroyCursorWayland(_GLFWcursor* cursor);
+void _glfwSetCursorWayland(_GLFWwindow* window, _GLFWcursor* cursor);
+void _glfwSetClipboardStringWayland(const char* string);
+const char* _glfwGetClipboardStringWayland(void);
+
+EGLenum _glfwGetEGLPlatformWayland(EGLint** attribs);
+EGLNativeDisplayType _glfwGetEGLNativeDisplayWayland(void);
+EGLNativeWindowType _glfwGetEGLNativeWindowWayland(_GLFWwindow* window);
+
+void _glfwGetRequiredInstanceExtensionsWayland(char** extensions);
+int _glfwGetPhysicalDevicePresentationSupportWayland(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+VkResult _glfwCreateWindowSurfaceWayland(VkInstance instance, _GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+void _glfwFreeMonitorWayland(_GLFWmonitor* monitor);
+void _glfwGetMonitorPosWayland(_GLFWmonitor* monitor, int* xpos, int* ypos);
+void _glfwGetMonitorContentScaleWayland(_GLFWmonitor* monitor, float* xscale, float* yscale);
+void _glfwGetMonitorWorkareaWayland(_GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+GLFWvidmode* _glfwGetVideoModesWayland(_GLFWmonitor* monitor, int* count);
+void _glfwGetVideoModeWayland(_GLFWmonitor* monitor, GLFWvidmode* mode);
+GLFWbool _glfwGetGammaRampWayland(_GLFWmonitor* monitor, GLFWgammaramp* ramp);
+void _glfwSetGammaRampWayland(_GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+void _glfwAddOutputWayland(uint32_t name, uint32_t version);
+GLFWbool _glfwInputTextWayland(_GLFWwindow* window, uint32_t scancode);
+
+void _glfwAddSeatListenerWayland(struct wl_seat* seat);
+void _glfwAddDataDeviceListenerWayland(struct wl_data_device* device);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/wl_window.c b/chromium/third_party/dawn/third_party/glfw/src/wl_window.c
new file mode 100644
index 00000000000..a1f931851db
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/wl_window.c
@@ -0,0 +1,2709 @@
+//========================================================================
+// GLFW 3.4 Wayland - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#define _GNU_SOURCE
+
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/timerfd.h>
+#include <poll.h>
+
+#include "wayland-client-protocol.h"
+#include "wayland-xdg-shell-client-protocol.h"
+#include "wayland-xdg-decoration-client-protocol.h"
+#include "wayland-viewporter-client-protocol.h"
+#include "wayland-relative-pointer-unstable-v1-client-protocol.h"
+#include "wayland-pointer-constraints-unstable-v1-client-protocol.h"
+#include "wayland-idle-inhibit-unstable-v1-client-protocol.h"
+
+
+static int createTmpfileCloexec(char* tmpname)
+{
+ int fd;
+
+ fd = mkostemp(tmpname, O_CLOEXEC);
+ if (fd >= 0)
+ unlink(tmpname);
+
+ return fd;
+}
+
+/*
+ * Create a new, unique, anonymous file of the given size, and
+ * return the file descriptor for it. The file descriptor is set
+ * CLOEXEC. The file is immediately suitable for mmap()'ing
+ * the given size at offset zero.
+ *
+ * The file should not have a permanent backing store like a disk,
+ * but may have if XDG_RUNTIME_DIR is not properly implemented in OS.
+ *
+ * The file name is deleted from the file system.
+ *
+ * The file is suitable for buffer sharing between processes by
+ * transmitting the file descriptor over Unix sockets using the
+ * SCM_RIGHTS methods.
+ *
+ * posix_fallocate() is used to guarantee that disk space is available
+ * for the file at the given size. If disk space is insufficient, errno
+ * is set to ENOSPC. If posix_fallocate() is not supported, program may
+ * receive SIGBUS on accessing mmap()'ed file contents instead.
+ */
+static int createAnonymousFile(off_t size)
+{
+ static const char template[] = "/glfw-shared-XXXXXX";
+ const char* path;
+ char* name;
+ int fd;
+ int ret;
+
+#ifdef HAVE_MEMFD_CREATE
+ fd = memfd_create("glfw-shared", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+ if (fd >= 0)
+ {
+ // We can add this seal before calling posix_fallocate(), as the file
+ // is currently zero-sized anyway.
+ //
+ // There is also no need to check for the return value, we couldn’t do
+ // anything with it anyway.
+ fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_SEAL);
+ }
+ else
+#elif defined(SHM_ANON)
+ fd = shm_open(SHM_ANON, O_RDWR | O_CLOEXEC, 0600);
+ if (fd < 0)
+#endif
+ {
+ path = getenv("XDG_RUNTIME_DIR");
+ if (!path)
+ {
+ errno = ENOENT;
+ return -1;
+ }
+
+ name = _glfw_calloc(strlen(path) + sizeof(template), 1);
+ strcpy(name, path);
+ strcat(name, template);
+
+ fd = createTmpfileCloexec(name);
+ _glfw_free(name);
+ if (fd < 0)
+ return -1;
+ }
+
+#if defined(SHM_ANON)
+ // posix_fallocate does not work on SHM descriptors
+ ret = ftruncate(fd, size);
+#else
+ ret = posix_fallocate(fd, 0, size);
+#endif
+ if (ret != 0)
+ {
+ close(fd);
+ errno = ret;
+ return -1;
+ }
+ return fd;
+}
+
+static struct wl_buffer* createShmBuffer(const GLFWimage* image)
+{
+ struct wl_shm_pool* pool;
+ struct wl_buffer* buffer;
+ int stride = image->width * 4;
+ int length = image->width * image->height * 4;
+ void* data;
+
+ const int fd = createAnonymousFile(length);
+ if (fd < 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create buffer file of size %d: %s",
+ length, strerror(errno));
+ return NULL;
+ }
+
+ data = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (data == MAP_FAILED)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to map file: %s", strerror(errno));
+ close(fd);
+ return NULL;
+ }
+
+ pool = wl_shm_create_pool(_glfw.wl.shm, fd, length);
+
+ close(fd);
+ unsigned char* source = (unsigned char*) image->pixels;
+ unsigned char* target = data;
+ for (int i = 0; i < image->width * image->height; i++, source += 4)
+ {
+ unsigned int alpha = source[3];
+
+ *target++ = (unsigned char) ((source[2] * alpha) / 255);
+ *target++ = (unsigned char) ((source[1] * alpha) / 255);
+ *target++ = (unsigned char) ((source[0] * alpha) / 255);
+ *target++ = (unsigned char) alpha;
+ }
+
+ buffer =
+ wl_shm_pool_create_buffer(pool, 0,
+ image->width,
+ image->height,
+ stride, WL_SHM_FORMAT_ARGB8888);
+ munmap(data, length);
+ wl_shm_pool_destroy(pool);
+
+ return buffer;
+}
+
+static void createDecoration(_GLFWdecorationWayland* decoration,
+ struct wl_surface* parent,
+ struct wl_buffer* buffer, GLFWbool opaque,
+ int x, int y,
+ int width, int height)
+{
+ struct wl_region* region;
+
+ decoration->surface = wl_compositor_create_surface(_glfw.wl.compositor);
+ decoration->subsurface =
+ wl_subcompositor_get_subsurface(_glfw.wl.subcompositor,
+ decoration->surface, parent);
+ wl_subsurface_set_position(decoration->subsurface, x, y);
+ decoration->viewport = wp_viewporter_get_viewport(_glfw.wl.viewporter,
+ decoration->surface);
+ wp_viewport_set_destination(decoration->viewport, width, height);
+ wl_surface_attach(decoration->surface, buffer, 0, 0);
+
+ if (opaque)
+ {
+ region = wl_compositor_create_region(_glfw.wl.compositor);
+ wl_region_add(region, 0, 0, width, height);
+ wl_surface_set_opaque_region(decoration->surface, region);
+ wl_surface_commit(decoration->surface);
+ wl_region_destroy(region);
+ }
+ else
+ wl_surface_commit(decoration->surface);
+}
+
+static void createDecorations(_GLFWwindow* window)
+{
+ unsigned char data[] = { 224, 224, 224, 255 };
+ const GLFWimage image = { 1, 1, data };
+ GLFWbool opaque = (data[3] == 255);
+
+ if (!_glfw.wl.viewporter || !window->decorated || window->wl.decorations.serverSide)
+ return;
+
+ if (!window->wl.decorations.buffer)
+ window->wl.decorations.buffer = createShmBuffer(&image);
+ if (!window->wl.decorations.buffer)
+ return;
+
+ createDecoration(&window->wl.decorations.top, window->wl.surface,
+ window->wl.decorations.buffer, opaque,
+ 0, -_GLFW_DECORATION_TOP,
+ window->wl.width, _GLFW_DECORATION_TOP);
+ createDecoration(&window->wl.decorations.left, window->wl.surface,
+ window->wl.decorations.buffer, opaque,
+ -_GLFW_DECORATION_WIDTH, -_GLFW_DECORATION_TOP,
+ _GLFW_DECORATION_WIDTH, window->wl.height + _GLFW_DECORATION_TOP);
+ createDecoration(&window->wl.decorations.right, window->wl.surface,
+ window->wl.decorations.buffer, opaque,
+ window->wl.width, -_GLFW_DECORATION_TOP,
+ _GLFW_DECORATION_WIDTH, window->wl.height + _GLFW_DECORATION_TOP);
+ createDecoration(&window->wl.decorations.bottom, window->wl.surface,
+ window->wl.decorations.buffer, opaque,
+ -_GLFW_DECORATION_WIDTH, window->wl.height,
+ window->wl.width + _GLFW_DECORATION_HORIZONTAL, _GLFW_DECORATION_WIDTH);
+}
+
+static void destroyDecoration(_GLFWdecorationWayland* decoration)
+{
+ if (decoration->subsurface)
+ wl_subsurface_destroy(decoration->subsurface);
+ if (decoration->surface)
+ wl_surface_destroy(decoration->surface);
+ if (decoration->viewport)
+ wp_viewport_destroy(decoration->viewport);
+ decoration->surface = NULL;
+ decoration->subsurface = NULL;
+ decoration->viewport = NULL;
+}
+
+static void destroyDecorations(_GLFWwindow* window)
+{
+ destroyDecoration(&window->wl.decorations.top);
+ destroyDecoration(&window->wl.decorations.left);
+ destroyDecoration(&window->wl.decorations.right);
+ destroyDecoration(&window->wl.decorations.bottom);
+}
+
+static void xdgDecorationHandleConfigure(void* userData,
+ struct zxdg_toplevel_decoration_v1* decoration,
+ uint32_t mode)
+{
+ _GLFWwindow* window = userData;
+
+ window->wl.decorations.serverSide = (mode == ZXDG_TOPLEVEL_DECORATION_V1_MODE_SERVER_SIDE);
+
+ if (!window->wl.decorations.serverSide)
+ createDecorations(window);
+}
+
+static const struct zxdg_toplevel_decoration_v1_listener xdgDecorationListener =
+{
+ xdgDecorationHandleConfigure,
+};
+
+// Makes the surface considered as XRGB instead of ARGB.
+static void setOpaqueRegion(_GLFWwindow* window)
+{
+ struct wl_region* region;
+
+ region = wl_compositor_create_region(_glfw.wl.compositor);
+ if (!region)
+ return;
+
+ wl_region_add(region, 0, 0, window->wl.width, window->wl.height);
+ wl_surface_set_opaque_region(window->wl.surface, region);
+ wl_surface_commit(window->wl.surface);
+ wl_region_destroy(region);
+}
+
+
+static void resizeWindow(_GLFWwindow* window)
+{
+ int scale = window->wl.scale;
+ int scaledWidth = window->wl.width * scale;
+ int scaledHeight = window->wl.height * scale;
+ wl_egl_window_resize(window->wl.native, scaledWidth, scaledHeight, 0, 0);
+ if (!window->wl.transparent)
+ setOpaqueRegion(window);
+ _glfwInputFramebufferSize(window, scaledWidth, scaledHeight);
+ _glfwInputWindowContentScale(window, scale, scale);
+
+ if (!window->wl.decorations.top.surface)
+ return;
+
+ // Top decoration.
+ wp_viewport_set_destination(window->wl.decorations.top.viewport,
+ window->wl.width, _GLFW_DECORATION_TOP);
+ wl_surface_commit(window->wl.decorations.top.surface);
+
+ // Left decoration.
+ wp_viewport_set_destination(window->wl.decorations.left.viewport,
+ _GLFW_DECORATION_WIDTH, window->wl.height + _GLFW_DECORATION_TOP);
+ wl_surface_commit(window->wl.decorations.left.surface);
+
+ // Right decoration.
+ wl_subsurface_set_position(window->wl.decorations.right.subsurface,
+ window->wl.width, -_GLFW_DECORATION_TOP);
+ wp_viewport_set_destination(window->wl.decorations.right.viewport,
+ _GLFW_DECORATION_WIDTH, window->wl.height + _GLFW_DECORATION_TOP);
+ wl_surface_commit(window->wl.decorations.right.surface);
+
+ // Bottom decoration.
+ wl_subsurface_set_position(window->wl.decorations.bottom.subsurface,
+ -_GLFW_DECORATION_WIDTH, window->wl.height);
+ wp_viewport_set_destination(window->wl.decorations.bottom.viewport,
+ window->wl.width + _GLFW_DECORATION_HORIZONTAL, _GLFW_DECORATION_WIDTH);
+ wl_surface_commit(window->wl.decorations.bottom.surface);
+}
+
+static void checkScaleChange(_GLFWwindow* window)
+{
+ // Check if we will be able to set the buffer scale or not.
+ if (_glfw.wl.compositorVersion < 3)
+ return;
+
+ // Get the scale factor from the highest scale monitor.
+ int maxScale = 1;
+
+ for (int i = 0; i < window->wl.monitorsCount; i++)
+ maxScale = _glfw_max(window->wl.monitors[i]->wl.scale, maxScale);
+
+ // Only change the framebuffer size if the scale changed.
+ if (window->wl.scale != maxScale)
+ {
+ window->wl.scale = maxScale;
+ wl_surface_set_buffer_scale(window->wl.surface, maxScale);
+ resizeWindow(window);
+ }
+}
+
+static void surfaceHandleEnter(void* userData,
+ struct wl_surface* surface,
+ struct wl_output* output)
+{
+ _GLFWwindow* window = userData;
+ _GLFWmonitor* monitor = wl_output_get_user_data(output);
+
+ if (window->wl.monitorsCount + 1 > window->wl.monitorsSize)
+ {
+ ++window->wl.monitorsSize;
+ window->wl.monitors =
+ _glfw_realloc(window->wl.monitors,
+ window->wl.monitorsSize * sizeof(_GLFWmonitor*));
+ }
+
+ window->wl.monitors[window->wl.monitorsCount++] = monitor;
+
+ checkScaleChange(window);
+}
+
+static void surfaceHandleLeave(void* userData,
+ struct wl_surface* surface,
+ struct wl_output* output)
+{
+ _GLFWwindow* window = userData;
+ _GLFWmonitor* monitor = wl_output_get_user_data(output);
+ GLFWbool found = GLFW_FALSE;
+
+ for (int i = 0; i < window->wl.monitorsCount - 1; ++i)
+ {
+ if (monitor == window->wl.monitors[i])
+ found = GLFW_TRUE;
+ if (found)
+ window->wl.monitors[i] = window->wl.monitors[i + 1];
+ }
+ window->wl.monitors[--window->wl.monitorsCount] = NULL;
+
+ checkScaleChange(window);
+}
+
+static const struct wl_surface_listener surfaceListener = {
+ surfaceHandleEnter,
+ surfaceHandleLeave
+};
+
+static void setIdleInhibitor(_GLFWwindow* window, GLFWbool enable)
+{
+ if (enable && !window->wl.idleInhibitor && _glfw.wl.idleInhibitManager)
+ {
+ window->wl.idleInhibitor =
+ zwp_idle_inhibit_manager_v1_create_inhibitor(
+ _glfw.wl.idleInhibitManager, window->wl.surface);
+ if (!window->wl.idleInhibitor)
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create idle inhibitor");
+ }
+ else if (!enable && window->wl.idleInhibitor)
+ {
+ zwp_idle_inhibitor_v1_destroy(window->wl.idleInhibitor);
+ window->wl.idleInhibitor = NULL;
+ }
+}
+
+static void setFullscreen(_GLFWwindow* window, _GLFWmonitor* monitor,
+ int refreshRate)
+{
+ if (window->wl.xdg.toplevel)
+ {
+ xdg_toplevel_set_fullscreen(
+ window->wl.xdg.toplevel,
+ monitor->wl.output);
+ }
+ setIdleInhibitor(window, GLFW_TRUE);
+ if (!window->wl.decorations.serverSide)
+ destroyDecorations(window);
+}
+
+static void xdgToplevelHandleConfigure(void* userData,
+ struct xdg_toplevel* toplevel,
+ int32_t width,
+ int32_t height,
+ struct wl_array* states)
+{
+ _GLFWwindow* window = userData;
+ float aspectRatio;
+ float targetRatio;
+ uint32_t* state;
+ GLFWbool maximized = GLFW_FALSE;
+ GLFWbool fullscreen = GLFW_FALSE;
+ GLFWbool activated = GLFW_FALSE;
+
+ wl_array_for_each(state, states)
+ {
+ switch (*state)
+ {
+ case XDG_TOPLEVEL_STATE_MAXIMIZED:
+ maximized = GLFW_TRUE;
+ break;
+ case XDG_TOPLEVEL_STATE_FULLSCREEN:
+ fullscreen = GLFW_TRUE;
+ break;
+ case XDG_TOPLEVEL_STATE_RESIZING:
+ break;
+ case XDG_TOPLEVEL_STATE_ACTIVATED:
+ activated = GLFW_TRUE;
+ break;
+ }
+ }
+
+ if (width != 0 && height != 0)
+ {
+ if (!maximized && !fullscreen)
+ {
+ if (window->numer != GLFW_DONT_CARE && window->denom != GLFW_DONT_CARE)
+ {
+ aspectRatio = (float)width / (float)height;
+ targetRatio = (float)window->numer / (float)window->denom;
+ if (aspectRatio < targetRatio)
+ height = width / targetRatio;
+ else if (aspectRatio > targetRatio)
+ width = height * targetRatio;
+ }
+ }
+
+ _glfwInputWindowSize(window, width, height);
+ _glfwSetWindowSizeWayland(window, width, height);
+ _glfwInputWindowDamage(window);
+ }
+
+ if (window->wl.wasFullscreen && window->autoIconify)
+ {
+ if (!activated || !fullscreen)
+ {
+ _glfwIconifyWindowWayland(window);
+ window->wl.wasFullscreen = GLFW_FALSE;
+ }
+ }
+ if (fullscreen && activated)
+ window->wl.wasFullscreen = GLFW_TRUE;
+}
+
+static void xdgToplevelHandleClose(void* userData,
+ struct xdg_toplevel* toplevel)
+{
+ _GLFWwindow* window = userData;
+ _glfwInputWindowCloseRequest(window);
+}
+
+static const struct xdg_toplevel_listener xdgToplevelListener =
+{
+ xdgToplevelHandleConfigure,
+ xdgToplevelHandleClose
+};
+
+static void xdgSurfaceHandleConfigure(void* userData,
+ struct xdg_surface* surface,
+ uint32_t serial)
+{
+ xdg_surface_ack_configure(surface, serial);
+}
+
+static const struct xdg_surface_listener xdgSurfaceListener = {
+ xdgSurfaceHandleConfigure
+};
+
+static void setXdgDecorations(_GLFWwindow* window)
+{
+ if (_glfw.wl.decorationManager)
+ {
+ window->wl.xdg.decoration =
+ zxdg_decoration_manager_v1_get_toplevel_decoration(
+ _glfw.wl.decorationManager, window->wl.xdg.toplevel);
+ zxdg_toplevel_decoration_v1_add_listener(window->wl.xdg.decoration,
+ &xdgDecorationListener,
+ window);
+ zxdg_toplevel_decoration_v1_set_mode(
+ window->wl.xdg.decoration,
+ ZXDG_TOPLEVEL_DECORATION_V1_MODE_SERVER_SIDE);
+ }
+ else
+ {
+ window->wl.decorations.serverSide = GLFW_FALSE;
+ createDecorations(window);
+ }
+}
+
+static GLFWbool createXdgSurface(_GLFWwindow* window)
+{
+ window->wl.xdg.surface = xdg_wm_base_get_xdg_surface(_glfw.wl.wmBase,
+ window->wl.surface);
+ if (!window->wl.xdg.surface)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create xdg-surface for window");
+ return GLFW_FALSE;
+ }
+
+ xdg_surface_add_listener(window->wl.xdg.surface,
+ &xdgSurfaceListener,
+ window);
+
+ window->wl.xdg.toplevel = xdg_surface_get_toplevel(window->wl.xdg.surface);
+ if (!window->wl.xdg.toplevel)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create xdg-toplevel for window");
+ return GLFW_FALSE;
+ }
+
+ xdg_toplevel_add_listener(window->wl.xdg.toplevel,
+ &xdgToplevelListener,
+ window);
+
+ if (window->wl.title)
+ xdg_toplevel_set_title(window->wl.xdg.toplevel, window->wl.title);
+
+ if (window->minwidth != GLFW_DONT_CARE && window->minheight != GLFW_DONT_CARE)
+ xdg_toplevel_set_min_size(window->wl.xdg.toplevel,
+ window->minwidth, window->minheight);
+ if (window->maxwidth != GLFW_DONT_CARE && window->maxheight != GLFW_DONT_CARE)
+ xdg_toplevel_set_max_size(window->wl.xdg.toplevel,
+ window->maxwidth, window->maxheight);
+
+ if (window->monitor)
+ {
+ xdg_toplevel_set_fullscreen(window->wl.xdg.toplevel,
+ window->monitor->wl.output);
+ setIdleInhibitor(window, GLFW_TRUE);
+ }
+ else if (window->wl.maximized)
+ {
+ xdg_toplevel_set_maximized(window->wl.xdg.toplevel);
+ setIdleInhibitor(window, GLFW_FALSE);
+ setXdgDecorations(window);
+ }
+ else
+ {
+ setIdleInhibitor(window, GLFW_FALSE);
+ setXdgDecorations(window);
+ }
+
+ wl_surface_commit(window->wl.surface);
+ wl_display_roundtrip(_glfw.wl.display);
+
+ return GLFW_TRUE;
+}
+
+static GLFWbool createSurface(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ window->wl.surface = wl_compositor_create_surface(_glfw.wl.compositor);
+ if (!window->wl.surface)
+ return GLFW_FALSE;
+
+ wl_surface_add_listener(window->wl.surface,
+ &surfaceListener,
+ window);
+
+ wl_surface_set_user_data(window->wl.surface, window);
+
+ window->wl.native = wl_egl_window_create(window->wl.surface,
+ wndconfig->width,
+ wndconfig->height);
+ if (!window->wl.native)
+ return GLFW_FALSE;
+
+ window->wl.width = wndconfig->width;
+ window->wl.height = wndconfig->height;
+ window->wl.scale = 1;
+ window->wl.title = _glfw_strdup(wndconfig->title);
+
+ window->wl.transparent = fbconfig->transparent;
+ if (!window->wl.transparent)
+ setOpaqueRegion(window);
+
+ if (window->monitor || wndconfig->visible)
+ {
+ if (!createXdgSurface(window))
+ return GLFW_FALSE;
+
+ window->wl.visible = GLFW_TRUE;
+ }
+
+ return GLFW_TRUE;
+}
+
+static void setCursorImage(_GLFWwindow* window,
+ _GLFWcursorWayland* cursorWayland)
+{
+ struct itimerspec timer = {};
+ struct wl_cursor* wlCursor = cursorWayland->cursor;
+ struct wl_cursor_image* image;
+ struct wl_buffer* buffer;
+ struct wl_surface* surface = _glfw.wl.cursorSurface;
+ int scale = 1;
+
+ if (!wlCursor)
+ buffer = cursorWayland->buffer;
+ else
+ {
+ if (window->wl.scale > 1 && cursorWayland->cursorHiDPI)
+ {
+ wlCursor = cursorWayland->cursorHiDPI;
+ scale = 2;
+ }
+
+ image = wlCursor->images[cursorWayland->currentImage];
+ buffer = wl_cursor_image_get_buffer(image);
+ if (!buffer)
+ return;
+
+ timer.it_value.tv_sec = image->delay / 1000;
+ timer.it_value.tv_nsec = (image->delay % 1000) * 1000000;
+ timerfd_settime(_glfw.wl.cursorTimerfd, 0, &timer, NULL);
+
+ cursorWayland->width = image->width;
+ cursorWayland->height = image->height;
+ cursorWayland->xhot = image->hotspot_x;
+ cursorWayland->yhot = image->hotspot_y;
+ }
+
+ wl_pointer_set_cursor(_glfw.wl.pointer, _glfw.wl.pointerEnterSerial,
+ surface,
+ cursorWayland->xhot / scale,
+ cursorWayland->yhot / scale);
+ wl_surface_set_buffer_scale(surface, scale);
+ wl_surface_attach(surface, buffer, 0, 0);
+ wl_surface_damage(surface, 0, 0,
+ cursorWayland->width, cursorWayland->height);
+ wl_surface_commit(surface);
+}
+
+static void incrementCursorImage(_GLFWwindow* window)
+{
+ _GLFWcursor* cursor;
+
+ if (!window || window->wl.decorations.focus != mainWindow)
+ return;
+
+ cursor = window->wl.currentCursor;
+ if (cursor && cursor->wl.cursor)
+ {
+ cursor->wl.currentImage += 1;
+ cursor->wl.currentImage %= cursor->wl.cursor->image_count;
+ setCursorImage(window, &cursor->wl);
+ }
+}
+
+static GLFWbool flushDisplay(void)
+{
+ while (wl_display_flush(_glfw.wl.display) == -1)
+ {
+ if (errno != EAGAIN)
+ return GLFW_FALSE;
+
+ struct pollfd fd = { wl_display_get_fd(_glfw.wl.display), POLLOUT };
+
+ while (poll(&fd, 1, -1) == -1)
+ {
+ if (errno != EINTR && errno != EAGAIN)
+ return GLFW_FALSE;
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+static void handleEvents(double* timeout)
+{
+ GLFWbool event = GLFW_FALSE;
+ struct pollfd fds[] =
+ {
+ { wl_display_get_fd(_glfw.wl.display), POLLIN },
+ { _glfw.wl.timerfd, POLLIN },
+ { _glfw.wl.cursorTimerfd, POLLIN },
+ };
+
+ while (!event)
+ {
+ while (wl_display_prepare_read(_glfw.wl.display) != 0)
+ wl_display_dispatch_pending(_glfw.wl.display);
+
+ // If an error other than EAGAIN happens, we have likely been disconnected
+ // from the Wayland session; try to handle that the best we can.
+ if (!flushDisplay())
+ {
+ wl_display_cancel_read(_glfw.wl.display);
+
+ _GLFWwindow* window = _glfw.windowListHead;
+ while (window)
+ {
+ _glfwInputWindowCloseRequest(window);
+ window = window->next;
+ }
+
+ return;
+ }
+
+ if (!_glfwPollPOSIX(fds, 3, timeout))
+ {
+ wl_display_cancel_read(_glfw.wl.display);
+ return;
+ }
+
+ if (fds[0].revents & POLLIN)
+ {
+ wl_display_read_events(_glfw.wl.display);
+ if (wl_display_dispatch_pending(_glfw.wl.display) > 0)
+ event = GLFW_TRUE;
+ }
+ else
+ wl_display_cancel_read(_glfw.wl.display);
+
+ if (fds[1].revents & POLLIN)
+ {
+ uint64_t repeats;
+
+ if (read(_glfw.wl.timerfd, &repeats, sizeof(repeats)) == 8)
+ {
+ for (uint64_t i = 0; i < repeats; i++)
+ {
+ _glfwInputKey(_glfw.wl.keyboardFocus,
+ _glfw.wl.keyboardLastKey,
+ _glfw.wl.keyboardLastScancode,
+ GLFW_PRESS,
+ _glfw.wl.xkb.modifiers);
+ _glfwInputTextWayland(_glfw.wl.keyboardFocus,
+ _glfw.wl.keyboardLastScancode);
+ }
+
+ event = GLFW_TRUE;
+ }
+ }
+
+ if (fds[2].revents & POLLIN)
+ {
+ uint64_t repeats;
+
+ if (read(_glfw.wl.cursorTimerfd, &repeats, sizeof(repeats)) == 8)
+ {
+ incrementCursorImage(_glfw.wl.pointerFocus);
+ event = GLFW_TRUE;
+ }
+ }
+ }
+}
+
+// Reads the specified data offer as the specified MIME type
+//
+static char* readDataOfferAsString(struct wl_data_offer* offer, const char* mimeType)
+{
+ int fds[2];
+
+ if (pipe2(fds, O_CLOEXEC) == -1)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create pipe for data offer: %s",
+ strerror(errno));
+ return NULL;
+ }
+
+ wl_data_offer_receive(offer, mimeType, fds[1]);
+ flushDisplay();
+ close(fds[1]);
+
+ char* string = NULL;
+ size_t size = 0;
+ size_t length = 0;
+
+ for (;;)
+ {
+ const size_t readSize = 4096;
+ const size_t requiredSize = length + readSize + 1;
+ if (requiredSize > size)
+ {
+ char* longer = _glfw_realloc(string, requiredSize);
+ if (!longer)
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY, NULL);
+ close(fds[0]);
+ return NULL;
+ }
+
+ string = longer;
+ size = requiredSize;
+ }
+
+ const ssize_t result = read(fds[0], string + length, readSize);
+ if (result == 0)
+ break;
+ else if (result == -1)
+ {
+ if (errno == EINTR)
+ continue;
+
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to read from data offer pipe: %s",
+ strerror(errno));
+ close(fds[0]);
+ return NULL;
+ }
+
+ length += result;
+ }
+
+ close(fds[0]);
+
+ string[length] = '\0';
+ return string;
+}
+
+static _GLFWwindow* findWindowFromDecorationSurface(struct wl_surface* surface,
+ int* which)
+{
+ int focus;
+ _GLFWwindow* window = _glfw.windowListHead;
+ if (!which)
+ which = &focus;
+ while (window)
+ {
+ if (surface == window->wl.decorations.top.surface)
+ {
+ *which = topDecoration;
+ break;
+ }
+ if (surface == window->wl.decorations.left.surface)
+ {
+ *which = leftDecoration;
+ break;
+ }
+ if (surface == window->wl.decorations.right.surface)
+ {
+ *which = rightDecoration;
+ break;
+ }
+ if (surface == window->wl.decorations.bottom.surface)
+ {
+ *which = bottomDecoration;
+ break;
+ }
+ window = window->next;
+ }
+ return window;
+}
+
+static void pointerHandleEnter(void* userData,
+ struct wl_pointer* pointer,
+ uint32_t serial,
+ struct wl_surface* surface,
+ wl_fixed_t sx,
+ wl_fixed_t sy)
+{
+ // Happens in the case we just destroyed the surface.
+ if (!surface)
+ return;
+
+ int focus = 0;
+ _GLFWwindow* window = wl_surface_get_user_data(surface);
+ if (!window)
+ {
+ window = findWindowFromDecorationSurface(surface, &focus);
+ if (!window)
+ return;
+ }
+
+ window->wl.decorations.focus = focus;
+ _glfw.wl.serial = serial;
+ _glfw.wl.pointerEnterSerial = serial;
+ _glfw.wl.pointerFocus = window;
+
+ window->wl.hovered = GLFW_TRUE;
+
+ _glfwSetCursorWayland(window, window->wl.currentCursor);
+ _glfwInputCursorEnter(window, GLFW_TRUE);
+}
+
+static void pointerHandleLeave(void* userData,
+ struct wl_pointer* pointer,
+ uint32_t serial,
+ struct wl_surface* surface)
+{
+ _GLFWwindow* window = _glfw.wl.pointerFocus;
+
+ if (!window)
+ return;
+
+ window->wl.hovered = GLFW_FALSE;
+
+ _glfw.wl.serial = serial;
+ _glfw.wl.pointerFocus = NULL;
+ _glfwInputCursorEnter(window, GLFW_FALSE);
+ _glfw.wl.cursorPreviousName = NULL;
+}
+
+static void setCursor(_GLFWwindow* window, const char* name)
+{
+ struct wl_buffer* buffer;
+ struct wl_cursor* cursor;
+ struct wl_cursor_image* image;
+ struct wl_surface* surface = _glfw.wl.cursorSurface;
+ struct wl_cursor_theme* theme = _glfw.wl.cursorTheme;
+ int scale = 1;
+
+ if (window->wl.scale > 1 && _glfw.wl.cursorThemeHiDPI)
+ {
+ // We only support up to scale=2 for now, since libwayland-cursor
+ // requires us to load a different theme for each size.
+ scale = 2;
+ theme = _glfw.wl.cursorThemeHiDPI;
+ }
+
+ cursor = wl_cursor_theme_get_cursor(theme, name);
+ if (!cursor)
+ {
+ _glfwInputError(GLFW_CURSOR_UNAVAILABLE,
+ "Wayland: Standard cursor shape unavailable");
+ return;
+ }
+ // TODO: handle animated cursors too.
+ image = cursor->images[0];
+
+ if (!image)
+ return;
+
+ buffer = wl_cursor_image_get_buffer(image);
+ if (!buffer)
+ return;
+ wl_pointer_set_cursor(_glfw.wl.pointer, _glfw.wl.pointerEnterSerial,
+ surface,
+ image->hotspot_x / scale,
+ image->hotspot_y / scale);
+ wl_surface_set_buffer_scale(surface, scale);
+ wl_surface_attach(surface, buffer, 0, 0);
+ wl_surface_damage(surface, 0, 0,
+ image->width, image->height);
+ wl_surface_commit(surface);
+ _glfw.wl.cursorPreviousName = name;
+}
+
+static void pointerHandleMotion(void* userData,
+ struct wl_pointer* pointer,
+ uint32_t time,
+ wl_fixed_t sx,
+ wl_fixed_t sy)
+{
+ _GLFWwindow* window = _glfw.wl.pointerFocus;
+ const char* cursorName = NULL;
+ double x, y;
+
+ if (!window)
+ return;
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ return;
+ x = wl_fixed_to_double(sx);
+ y = wl_fixed_to_double(sy);
+ window->wl.cursorPosX = x;
+ window->wl.cursorPosY = y;
+
+ switch (window->wl.decorations.focus)
+ {
+ case mainWindow:
+ _glfwInputCursorPos(window, x, y);
+ _glfw.wl.cursorPreviousName = NULL;
+ return;
+ case topDecoration:
+ if (y < _GLFW_DECORATION_WIDTH)
+ cursorName = "n-resize";
+ else
+ cursorName = "left_ptr";
+ break;
+ case leftDecoration:
+ if (y < _GLFW_DECORATION_WIDTH)
+ cursorName = "nw-resize";
+ else
+ cursorName = "w-resize";
+ break;
+ case rightDecoration:
+ if (y < _GLFW_DECORATION_WIDTH)
+ cursorName = "ne-resize";
+ else
+ cursorName = "e-resize";
+ break;
+ case bottomDecoration:
+ if (x < _GLFW_DECORATION_WIDTH)
+ cursorName = "sw-resize";
+ else if (x > window->wl.width + _GLFW_DECORATION_WIDTH)
+ cursorName = "se-resize";
+ else
+ cursorName = "s-resize";
+ break;
+ default:
+ assert(0);
+ }
+ if (_glfw.wl.cursorPreviousName != cursorName)
+ setCursor(window, cursorName);
+}
+
+static void pointerHandleButton(void* userData,
+ struct wl_pointer* pointer,
+ uint32_t serial,
+ uint32_t time,
+ uint32_t button,
+ uint32_t state)
+{
+ _GLFWwindow* window = _glfw.wl.pointerFocus;
+ int glfwButton;
+ uint32_t edges = XDG_TOPLEVEL_RESIZE_EDGE_NONE;
+
+ if (!window)
+ return;
+ if (button == BTN_LEFT)
+ {
+ switch (window->wl.decorations.focus)
+ {
+ case mainWindow:
+ break;
+ case topDecoration:
+ if (window->wl.cursorPosY < _GLFW_DECORATION_WIDTH)
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_TOP;
+ else
+ xdg_toplevel_move(window->wl.xdg.toplevel, _glfw.wl.seat, serial);
+ break;
+ case leftDecoration:
+ if (window->wl.cursorPosY < _GLFW_DECORATION_WIDTH)
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_TOP_LEFT;
+ else
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_LEFT;
+ break;
+ case rightDecoration:
+ if (window->wl.cursorPosY < _GLFW_DECORATION_WIDTH)
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_TOP_RIGHT;
+ else
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_RIGHT;
+ break;
+ case bottomDecoration:
+ if (window->wl.cursorPosX < _GLFW_DECORATION_WIDTH)
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM_LEFT;
+ else if (window->wl.cursorPosX > window->wl.width + _GLFW_DECORATION_WIDTH)
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM_RIGHT;
+ else
+ edges = XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM;
+ break;
+ default:
+ assert(0);
+ }
+ if (edges != XDG_TOPLEVEL_RESIZE_EDGE_NONE)
+ {
+ xdg_toplevel_resize(window->wl.xdg.toplevel, _glfw.wl.seat,
+ serial, edges);
+ return;
+ }
+ }
+ else if (button == BTN_RIGHT)
+ {
+ if (window->wl.decorations.focus != mainWindow && window->wl.xdg.toplevel)
+ {
+ xdg_toplevel_show_window_menu(window->wl.xdg.toplevel,
+ _glfw.wl.seat, serial,
+ window->wl.cursorPosX,
+ window->wl.cursorPosY);
+ return;
+ }
+ }
+
+ // Don’t pass the button to the user if it was related to a decoration.
+ if (window->wl.decorations.focus != mainWindow)
+ return;
+
+ _glfw.wl.serial = serial;
+
+ /* Makes left, right and middle 0, 1 and 2. Overall order follows evdev
+ * codes. */
+ glfwButton = button - BTN_LEFT;
+
+ _glfwInputMouseClick(window,
+ glfwButton,
+ state == WL_POINTER_BUTTON_STATE_PRESSED
+ ? GLFW_PRESS
+ : GLFW_RELEASE,
+ _glfw.wl.xkb.modifiers);
+}
+
+static void pointerHandleAxis(void* userData,
+ struct wl_pointer* pointer,
+ uint32_t time,
+ uint32_t axis,
+ wl_fixed_t value)
+{
+ _GLFWwindow* window = _glfw.wl.pointerFocus;
+ double x = 0.0, y = 0.0;
+ // Wayland scroll events are in pointer motion coordinate space (think two
+ // finger scroll). The factor 10 is commonly used to convert to "scroll
+ // step means 1.0.
+ const double scrollFactor = 1.0 / 10.0;
+
+ if (!window)
+ return;
+
+ assert(axis == WL_POINTER_AXIS_HORIZONTAL_SCROLL ||
+ axis == WL_POINTER_AXIS_VERTICAL_SCROLL);
+
+ if (axis == WL_POINTER_AXIS_HORIZONTAL_SCROLL)
+ x = -wl_fixed_to_double(value) * scrollFactor;
+ else if (axis == WL_POINTER_AXIS_VERTICAL_SCROLL)
+ y = -wl_fixed_to_double(value) * scrollFactor;
+
+ _glfwInputScroll(window, x, y);
+}
+
+static const struct wl_pointer_listener pointerListener =
+{
+ pointerHandleEnter,
+ pointerHandleLeave,
+ pointerHandleMotion,
+ pointerHandleButton,
+ pointerHandleAxis,
+};
+
+static void keyboardHandleKeymap(void* userData,
+ struct wl_keyboard* keyboard,
+ uint32_t format,
+ int fd,
+ uint32_t size)
+{
+ struct xkb_keymap* keymap;
+ struct xkb_state* state;
+ struct xkb_compose_table* composeTable;
+ struct xkb_compose_state* composeState;
+
+ char* mapStr;
+ const char* locale;
+
+ if (format != WL_KEYBOARD_KEYMAP_FORMAT_XKB_V1)
+ {
+ close(fd);
+ return;
+ }
+
+ mapStr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (mapStr == MAP_FAILED) {
+ close(fd);
+ return;
+ }
+
+ keymap = xkb_keymap_new_from_string(_glfw.wl.xkb.context,
+ mapStr,
+ XKB_KEYMAP_FORMAT_TEXT_V1,
+ 0);
+ munmap(mapStr, size);
+ close(fd);
+
+ if (!keymap)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to compile keymap");
+ return;
+ }
+
+ state = xkb_state_new(keymap);
+ if (!state)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create XKB state");
+ xkb_keymap_unref(keymap);
+ return;
+ }
+
+ // Look up the preferred locale, falling back to "C" as default.
+ locale = getenv("LC_ALL");
+ if (!locale)
+ locale = getenv("LC_CTYPE");
+ if (!locale)
+ locale = getenv("LANG");
+ if (!locale)
+ locale = "C";
+
+ composeTable =
+ xkb_compose_table_new_from_locale(_glfw.wl.xkb.context, locale,
+ XKB_COMPOSE_COMPILE_NO_FLAGS);
+ if (composeTable)
+ {
+ composeState =
+ xkb_compose_state_new(composeTable, XKB_COMPOSE_STATE_NO_FLAGS);
+ xkb_compose_table_unref(composeTable);
+ if (composeState)
+ _glfw.wl.xkb.composeState = composeState;
+ else
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create XKB compose state");
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create XKB compose table");
+ }
+
+ xkb_keymap_unref(_glfw.wl.xkb.keymap);
+ xkb_state_unref(_glfw.wl.xkb.state);
+ _glfw.wl.xkb.keymap = keymap;
+ _glfw.wl.xkb.state = state;
+
+ _glfw.wl.xkb.controlMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Control");
+ _glfw.wl.xkb.altMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Mod1");
+ _glfw.wl.xkb.shiftMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Shift");
+ _glfw.wl.xkb.superMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Mod4");
+ _glfw.wl.xkb.capsLockMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Lock");
+ _glfw.wl.xkb.numLockMask =
+ 1 << xkb_keymap_mod_get_index(_glfw.wl.xkb.keymap, "Mod2");
+}
+
+static void keyboardHandleEnter(void* userData,
+ struct wl_keyboard* keyboard,
+ uint32_t serial,
+ struct wl_surface* surface,
+ struct wl_array* keys)
+{
+ // Happens in the case we just destroyed the surface.
+ if (!surface)
+ return;
+
+ _GLFWwindow* window = wl_surface_get_user_data(surface);
+ if (!window)
+ {
+ window = findWindowFromDecorationSurface(surface, NULL);
+ if (!window)
+ return;
+ }
+
+ _glfw.wl.serial = serial;
+ _glfw.wl.keyboardFocus = window;
+ _glfwInputWindowFocus(window, GLFW_TRUE);
+}
+
+static void keyboardHandleLeave(void* userData,
+ struct wl_keyboard* keyboard,
+ uint32_t serial,
+ struct wl_surface* surface)
+{
+ _GLFWwindow* window = _glfw.wl.keyboardFocus;
+
+ if (!window)
+ return;
+
+ struct itimerspec timer = {};
+ timerfd_settime(_glfw.wl.timerfd, 0, &timer, NULL);
+
+ _glfw.wl.serial = serial;
+ _glfw.wl.keyboardFocus = NULL;
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+}
+
+static int translateKey(uint32_t scancode)
+{
+ if (scancode < sizeof(_glfw.wl.keycodes) / sizeof(_glfw.wl.keycodes[0]))
+ return _glfw.wl.keycodes[scancode];
+
+ return GLFW_KEY_UNKNOWN;
+}
+
+static xkb_keysym_t composeSymbol(xkb_keysym_t sym)
+{
+ if (sym == XKB_KEY_NoSymbol || !_glfw.wl.xkb.composeState)
+ return sym;
+ if (xkb_compose_state_feed(_glfw.wl.xkb.composeState, sym)
+ != XKB_COMPOSE_FEED_ACCEPTED)
+ return sym;
+ switch (xkb_compose_state_get_status(_glfw.wl.xkb.composeState))
+ {
+ case XKB_COMPOSE_COMPOSED:
+ return xkb_compose_state_get_one_sym(_glfw.wl.xkb.composeState);
+ case XKB_COMPOSE_COMPOSING:
+ case XKB_COMPOSE_CANCELLED:
+ return XKB_KEY_NoSymbol;
+ case XKB_COMPOSE_NOTHING:
+ default:
+ return sym;
+ }
+}
+
+GLFWbool _glfwInputTextWayland(_GLFWwindow* window, uint32_t scancode)
+{
+ const xkb_keysym_t* keysyms;
+ const xkb_keycode_t keycode = scancode + 8;
+
+ if (xkb_state_key_get_syms(_glfw.wl.xkb.state, keycode, &keysyms) == 1)
+ {
+ const xkb_keysym_t keysym = composeSymbol(keysyms[0]);
+ const uint32_t codepoint = _glfwKeySym2Unicode(keysym);
+ if (codepoint != GLFW_INVALID_CODEPOINT)
+ {
+ const int mods = _glfw.wl.xkb.modifiers;
+ const int plain = !(mods & (GLFW_MOD_CONTROL | GLFW_MOD_ALT));
+ _glfwInputChar(window, codepoint, mods, plain);
+ }
+ }
+
+ return xkb_keymap_key_repeats(_glfw.wl.xkb.keymap, keycode);
+}
+
+static void keyboardHandleKey(void* userData,
+ struct wl_keyboard* keyboard,
+ uint32_t serial,
+ uint32_t time,
+ uint32_t scancode,
+ uint32_t state)
+{
+ _GLFWwindow* window = _glfw.wl.keyboardFocus;
+ if (!window)
+ return;
+
+ const int key = translateKey(scancode);
+ const int action =
+ state == WL_KEYBOARD_KEY_STATE_PRESSED ? GLFW_PRESS : GLFW_RELEASE;
+
+ _glfw.wl.serial = serial;
+ _glfwInputKey(window, key, scancode, action, _glfw.wl.xkb.modifiers);
+
+ struct itimerspec timer = {};
+
+ if (action == GLFW_PRESS)
+ {
+ const GLFWbool shouldRepeat = _glfwInputTextWayland(window, scancode);
+
+ if (shouldRepeat && _glfw.wl.keyboardRepeatRate > 0)
+ {
+ _glfw.wl.keyboardLastKey = key;
+ _glfw.wl.keyboardLastScancode = scancode;
+ if (_glfw.wl.keyboardRepeatRate > 1)
+ timer.it_interval.tv_nsec = 1000000000 / _glfw.wl.keyboardRepeatRate;
+ else
+ timer.it_interval.tv_sec = 1;
+
+ timer.it_value.tv_sec = _glfw.wl.keyboardRepeatDelay / 1000;
+ timer.it_value.tv_nsec = (_glfw.wl.keyboardRepeatDelay % 1000) * 1000000;
+ }
+ }
+
+ timerfd_settime(_glfw.wl.timerfd, 0, &timer, NULL);
+}
+
+static void keyboardHandleModifiers(void* userData,
+ struct wl_keyboard* keyboard,
+ uint32_t serial,
+ uint32_t modsDepressed,
+ uint32_t modsLatched,
+ uint32_t modsLocked,
+ uint32_t group)
+{
+ _glfw.wl.serial = serial;
+
+ if (!_glfw.wl.xkb.keymap)
+ return;
+
+ xkb_state_update_mask(_glfw.wl.xkb.state,
+ modsDepressed,
+ modsLatched,
+ modsLocked,
+ 0,
+ 0,
+ group);
+
+ const xkb_mod_mask_t mask =
+ xkb_state_serialize_mods(_glfw.wl.xkb.state,
+ XKB_STATE_MODS_DEPRESSED |
+ XKB_STATE_LAYOUT_DEPRESSED |
+ XKB_STATE_MODS_LATCHED |
+ XKB_STATE_LAYOUT_LATCHED);
+
+ unsigned int mods = 0;
+
+ if (mask & _glfw.wl.xkb.controlMask)
+ mods |= GLFW_MOD_CONTROL;
+ if (mask & _glfw.wl.xkb.altMask)
+ mods |= GLFW_MOD_ALT;
+ if (mask & _glfw.wl.xkb.shiftMask)
+ mods |= GLFW_MOD_SHIFT;
+ if (mask & _glfw.wl.xkb.superMask)
+ mods |= GLFW_MOD_SUPER;
+ if (mask & _glfw.wl.xkb.capsLockMask)
+ mods |= GLFW_MOD_CAPS_LOCK;
+ if (mask & _glfw.wl.xkb.numLockMask)
+ mods |= GLFW_MOD_NUM_LOCK;
+
+ _glfw.wl.xkb.modifiers = mods;
+}
+
+#ifdef WL_KEYBOARD_REPEAT_INFO_SINCE_VERSION
+static void keyboardHandleRepeatInfo(void* userData,
+ struct wl_keyboard* keyboard,
+ int32_t rate,
+ int32_t delay)
+{
+ if (keyboard != _glfw.wl.keyboard)
+ return;
+
+ _glfw.wl.keyboardRepeatRate = rate;
+ _glfw.wl.keyboardRepeatDelay = delay;
+}
+#endif
+
+static const struct wl_keyboard_listener keyboardListener =
+{
+ keyboardHandleKeymap,
+ keyboardHandleEnter,
+ keyboardHandleLeave,
+ keyboardHandleKey,
+ keyboardHandleModifiers,
+#ifdef WL_KEYBOARD_REPEAT_INFO_SINCE_VERSION
+ keyboardHandleRepeatInfo,
+#endif
+};
+
+static void seatHandleCapabilities(void* userData,
+ struct wl_seat* seat,
+ enum wl_seat_capability caps)
+{
+ if ((caps & WL_SEAT_CAPABILITY_POINTER) && !_glfw.wl.pointer)
+ {
+ _glfw.wl.pointer = wl_seat_get_pointer(seat);
+ wl_pointer_add_listener(_glfw.wl.pointer, &pointerListener, NULL);
+ }
+ else if (!(caps & WL_SEAT_CAPABILITY_POINTER) && _glfw.wl.pointer)
+ {
+ wl_pointer_destroy(_glfw.wl.pointer);
+ _glfw.wl.pointer = NULL;
+ }
+
+ if ((caps & WL_SEAT_CAPABILITY_KEYBOARD) && !_glfw.wl.keyboard)
+ {
+ _glfw.wl.keyboard = wl_seat_get_keyboard(seat);
+ wl_keyboard_add_listener(_glfw.wl.keyboard, &keyboardListener, NULL);
+ }
+ else if (!(caps & WL_SEAT_CAPABILITY_KEYBOARD) && _glfw.wl.keyboard)
+ {
+ wl_keyboard_destroy(_glfw.wl.keyboard);
+ _glfw.wl.keyboard = NULL;
+ }
+}
+
+static void seatHandleName(void* userData,
+ struct wl_seat* seat,
+ const char* name)
+{
+}
+
+static const struct wl_seat_listener seatListener =
+{
+ seatHandleCapabilities,
+ seatHandleName,
+};
+
+static void dataOfferHandleOffer(void* userData,
+ struct wl_data_offer* offer,
+ const char* mimeType)
+{
+ for (unsigned int i = 0; i < _glfw.wl.offerCount; i++)
+ {
+ if (_glfw.wl.offers[i].offer == offer)
+ {
+ if (strcmp(mimeType, "text/plain;charset=utf-8") == 0)
+ _glfw.wl.offers[i].text_plain_utf8 = GLFW_TRUE;
+ else if (strcmp(mimeType, "text/uri-list") == 0)
+ _glfw.wl.offers[i].text_uri_list = GLFW_TRUE;
+
+ break;
+ }
+ }
+}
+
+static const struct wl_data_offer_listener dataOfferListener =
+{
+ dataOfferHandleOffer
+};
+
+static void dataDeviceHandleDataOffer(void* userData,
+ struct wl_data_device* device,
+ struct wl_data_offer* offer)
+{
+ _GLFWofferWayland* offers =
+ _glfw_realloc(_glfw.wl.offers, _glfw.wl.offerCount + 1);
+ if (!offers)
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY, NULL);
+ return;
+ }
+
+ _glfw.wl.offers = offers;
+ _glfw.wl.offerCount++;
+
+ _glfw.wl.offers[_glfw.wl.offerCount - 1] = (_GLFWofferWayland) { offer };
+ wl_data_offer_add_listener(offer, &dataOfferListener, NULL);
+}
+
+static void dataDeviceHandleEnter(void* userData,
+ struct wl_data_device* device,
+ uint32_t serial,
+ struct wl_surface* surface,
+ wl_fixed_t x,
+ wl_fixed_t y,
+ struct wl_data_offer* offer)
+{
+ if (_glfw.wl.dragOffer)
+ {
+ wl_data_offer_destroy(_glfw.wl.dragOffer);
+ _glfw.wl.dragOffer = NULL;
+ _glfw.wl.dragFocus = NULL;
+ }
+
+ for (unsigned int i = 0; i < _glfw.wl.offerCount; i++)
+ {
+ if (_glfw.wl.offers[i].offer == offer)
+ {
+ _GLFWwindow* window = NULL;
+
+ if (surface)
+ window = wl_surface_get_user_data(surface);
+
+ if (window && _glfw.wl.offers[i].text_uri_list)
+ {
+ _glfw.wl.dragOffer = offer;
+ _glfw.wl.dragFocus = window;
+ _glfw.wl.dragSerial = serial;
+ }
+
+ _glfw.wl.offers[i] = _glfw.wl.offers[_glfw.wl.offerCount - 1];
+ _glfw.wl.offerCount--;
+ break;
+ }
+ }
+
+ if (_glfw.wl.dragOffer)
+ wl_data_offer_accept(offer, serial, "text/uri-list");
+ else
+ {
+ wl_data_offer_accept(offer, serial, NULL);
+ wl_data_offer_destroy(offer);
+ }
+}
+
+static void dataDeviceHandleLeave(void* userData,
+ struct wl_data_device* device)
+{
+ if (_glfw.wl.dragOffer)
+ {
+ wl_data_offer_destroy(_glfw.wl.dragOffer);
+ _glfw.wl.dragOffer = NULL;
+ _glfw.wl.dragFocus = NULL;
+ }
+}
+
+static void dataDeviceHandleMotion(void* userData,
+ struct wl_data_device* device,
+ uint32_t time,
+ wl_fixed_t x,
+ wl_fixed_t y)
+{
+}
+
+static void dataDeviceHandleDrop(void* userData,
+ struct wl_data_device* device)
+{
+ if (!_glfw.wl.dragOffer)
+ return;
+
+ char* string = readDataOfferAsString(_glfw.wl.dragOffer, "text/uri-list");
+ if (string)
+ {
+ int count;
+ char** paths = _glfwParseUriList(string, &count);
+ if (paths)
+ _glfwInputDrop(_glfw.wl.dragFocus, count, (const char**) paths);
+
+ for (int i = 0; i < count; i++)
+ _glfw_free(paths[i]);
+
+ _glfw_free(paths);
+ }
+
+ _glfw_free(string);
+}
+
+static void dataDeviceHandleSelection(void* userData,
+ struct wl_data_device* device,
+ struct wl_data_offer* offer)
+{
+ if (_glfw.wl.selectionOffer)
+ {
+ wl_data_offer_destroy(_glfw.wl.selectionOffer);
+ _glfw.wl.selectionOffer = NULL;
+ }
+
+ for (unsigned int i = 0; i < _glfw.wl.offerCount; i++)
+ {
+ if (_glfw.wl.offers[i].offer == offer)
+ {
+ if (_glfw.wl.offers[i].text_plain_utf8)
+ _glfw.wl.selectionOffer = offer;
+ else
+ wl_data_offer_destroy(offer);
+
+ _glfw.wl.offers[i] = _glfw.wl.offers[_glfw.wl.offerCount - 1];
+ _glfw.wl.offerCount--;
+ break;
+ }
+ }
+}
+
+const struct wl_data_device_listener dataDeviceListener =
+{
+ dataDeviceHandleDataOffer,
+ dataDeviceHandleEnter,
+ dataDeviceHandleLeave,
+ dataDeviceHandleMotion,
+ dataDeviceHandleDrop,
+ dataDeviceHandleSelection,
+};
+
+void _glfwAddSeatListenerWayland(struct wl_seat* seat)
+{
+ wl_seat_add_listener(seat, &seatListener, NULL);
+}
+
+void _glfwAddDataDeviceListenerWayland(struct wl_data_device* device)
+{
+ wl_data_device_add_listener(device, &dataDeviceListener, NULL);
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+int _glfwCreateWindowWayland(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ if (!createSurface(window, wndconfig, fbconfig))
+ return GLFW_FALSE;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_EGL_CONTEXT_API ||
+ ctxconfig->source == GLFW_NATIVE_CONTEXT_API)
+ {
+ if (!_glfwInitEGL())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextEGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwInitOSMesa())
+ return GLFW_FALSE;
+ if (!_glfwCreateContextOSMesa(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwRefreshContextAttribs(window, ctxconfig))
+ return GLFW_FALSE;
+ }
+
+ if (wndconfig->mousePassthrough)
+ _glfwSetWindowMousePassthroughWayland(window, GLFW_TRUE);
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyWindowWayland(_GLFWwindow* window)
+{
+ if (window == _glfw.wl.pointerFocus)
+ {
+ _glfw.wl.pointerFocus = NULL;
+ _glfwInputCursorEnter(window, GLFW_FALSE);
+ }
+ if (window == _glfw.wl.keyboardFocus)
+ {
+ _glfw.wl.keyboardFocus = NULL;
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+ }
+
+ if (window->wl.idleInhibitor)
+ zwp_idle_inhibitor_v1_destroy(window->wl.idleInhibitor);
+
+ if (window->context.destroy)
+ window->context.destroy(window);
+
+ destroyDecorations(window);
+ if (window->wl.xdg.decoration)
+ zxdg_toplevel_decoration_v1_destroy(window->wl.xdg.decoration);
+
+ if (window->wl.decorations.buffer)
+ wl_buffer_destroy(window->wl.decorations.buffer);
+
+ if (window->wl.native)
+ wl_egl_window_destroy(window->wl.native);
+
+ if (window->wl.xdg.toplevel)
+ xdg_toplevel_destroy(window->wl.xdg.toplevel);
+
+ if (window->wl.xdg.surface)
+ xdg_surface_destroy(window->wl.xdg.surface);
+
+ if (window->wl.surface)
+ wl_surface_destroy(window->wl.surface);
+
+ _glfw_free(window->wl.title);
+ _glfw_free(window->wl.monitors);
+}
+
+void _glfwSetWindowTitleWayland(_GLFWwindow* window, const char* title)
+{
+ if (window->wl.title)
+ _glfw_free(window->wl.title);
+ window->wl.title = _glfw_strdup(title);
+ if (window->wl.xdg.toplevel)
+ xdg_toplevel_set_title(window->wl.xdg.toplevel, title);
+}
+
+void _glfwSetWindowIconWayland(_GLFWwindow* window,
+ int count, const GLFWimage* images)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: The platform does not support setting the window icon");
+}
+
+void _glfwGetWindowPosWayland(_GLFWwindow* window, int* xpos, int* ypos)
+{
+ // A Wayland client is not aware of its position, so just warn and leave it
+ // as (0, 0)
+
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: The platform does not provide the window position");
+}
+
+void _glfwSetWindowPosWayland(_GLFWwindow* window, int xpos, int ypos)
+{
+ // A Wayland client can not set its position, so just warn
+
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: The platform does not support setting the window position");
+}
+
+void _glfwGetWindowSizeWayland(_GLFWwindow* window, int* width, int* height)
+{
+ if (width)
+ *width = window->wl.width;
+ if (height)
+ *height = window->wl.height;
+}
+
+void _glfwSetWindowSizeWayland(_GLFWwindow* window, int width, int height)
+{
+ window->wl.width = width;
+ window->wl.height = height;
+ resizeWindow(window);
+}
+
+void _glfwSetWindowSizeLimitsWayland(_GLFWwindow* window,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ if (window->wl.xdg.toplevel)
+ {
+ if (minwidth == GLFW_DONT_CARE || minheight == GLFW_DONT_CARE)
+ minwidth = minheight = 0;
+ if (maxwidth == GLFW_DONT_CARE || maxheight == GLFW_DONT_CARE)
+ maxwidth = maxheight = 0;
+ xdg_toplevel_set_min_size(window->wl.xdg.toplevel, minwidth, minheight);
+ xdg_toplevel_set_max_size(window->wl.xdg.toplevel, maxwidth, maxheight);
+ wl_surface_commit(window->wl.surface);
+ }
+}
+
+void _glfwSetWindowAspectRatioWayland(_GLFWwindow* window, int numer, int denom)
+{
+ // TODO: find out how to trigger a resize.
+ // The actual limits are checked in the xdg_toplevel::configure handler.
+ _glfwInputError(GLFW_FEATURE_UNIMPLEMENTED,
+ "Wayland: Window aspect ratio not yet implemented");
+}
+
+void _glfwGetFramebufferSizeWayland(_GLFWwindow* window, int* width, int* height)
+{
+ _glfwGetWindowSizeWayland(window, width, height);
+ if (width)
+ *width *= window->wl.scale;
+ if (height)
+ *height *= window->wl.scale;
+}
+
+void _glfwGetWindowFrameSizeWayland(_GLFWwindow* window,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ if (window->decorated && !window->monitor && !window->wl.decorations.serverSide)
+ {
+ if (top)
+ *top = _GLFW_DECORATION_TOP;
+ if (left)
+ *left = _GLFW_DECORATION_WIDTH;
+ if (right)
+ *right = _GLFW_DECORATION_WIDTH;
+ if (bottom)
+ *bottom = _GLFW_DECORATION_WIDTH;
+ }
+}
+
+void _glfwGetWindowContentScaleWayland(_GLFWwindow* window,
+ float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = (float) window->wl.scale;
+ if (yscale)
+ *yscale = (float) window->wl.scale;
+}
+
+void _glfwIconifyWindowWayland(_GLFWwindow* window)
+{
+ if (window->wl.xdg.toplevel)
+ xdg_toplevel_set_minimized(window->wl.xdg.toplevel);
+}
+
+void _glfwRestoreWindowWayland(_GLFWwindow* window)
+{
+ if (window->wl.xdg.toplevel)
+ {
+ if (window->monitor)
+ xdg_toplevel_unset_fullscreen(window->wl.xdg.toplevel);
+ if (window->wl.maximized)
+ xdg_toplevel_unset_maximized(window->wl.xdg.toplevel);
+ // There is no way to unset minimized, or even to know if we are
+ // minimized, so there is nothing to do in this case.
+ }
+ _glfwInputWindowMonitor(window, NULL);
+ window->wl.maximized = GLFW_FALSE;
+}
+
+void _glfwMaximizeWindowWayland(_GLFWwindow* window)
+{
+ if (window->wl.xdg.toplevel)
+ {
+ xdg_toplevel_set_maximized(window->wl.xdg.toplevel);
+ }
+ window->wl.maximized = GLFW_TRUE;
+}
+
+void _glfwShowWindowWayland(_GLFWwindow* window)
+{
+ if (!window->wl.visible)
+ {
+ // NOTE: The XDG surface and role are created here so command-line applications
+ // with off-screen windows do not appear in for example the Unity dock
+ if (!window->wl.xdg.toplevel)
+ createXdgSurface(window);
+
+ window->wl.visible = GLFW_TRUE;
+ _glfwInputWindowDamage(window);
+ }
+}
+
+void _glfwHideWindowWayland(_GLFWwindow* window)
+{
+ if (window->wl.visible)
+ {
+ window->wl.visible = GLFW_FALSE;
+ wl_surface_attach(window->wl.surface, NULL, 0, 0);
+ wl_surface_commit(window->wl.surface);
+ }
+}
+
+void _glfwRequestWindowAttentionWayland(_GLFWwindow* window)
+{
+ // TODO
+ _glfwInputError(GLFW_FEATURE_UNIMPLEMENTED,
+ "Wayland: Window attention request not implemented yet");
+}
+
+void _glfwFocusWindowWayland(_GLFWwindow* window)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: The platform does not support setting the input focus");
+}
+
+void _glfwSetWindowMonitorWayland(_GLFWwindow* window,
+ _GLFWmonitor* monitor,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ if (monitor)
+ {
+ setFullscreen(window, monitor, refreshRate);
+ }
+ else
+ {
+ if (window->wl.xdg.toplevel)
+ xdg_toplevel_unset_fullscreen(window->wl.xdg.toplevel);
+ setIdleInhibitor(window, GLFW_FALSE);
+ if (!_glfw.wl.decorationManager)
+ createDecorations(window);
+ }
+ _glfwInputWindowMonitor(window, monitor);
+}
+
+int _glfwWindowFocusedWayland(_GLFWwindow* window)
+{
+ return _glfw.wl.keyboardFocus == window;
+}
+
+int _glfwWindowIconifiedWayland(_GLFWwindow* window)
+{
+ // xdg-shell doesn’t give any way to request whether a surface is
+ // iconified.
+ return GLFW_FALSE;
+}
+
+int _glfwWindowVisibleWayland(_GLFWwindow* window)
+{
+ return window->wl.visible;
+}
+
+int _glfwWindowMaximizedWayland(_GLFWwindow* window)
+{
+ return window->wl.maximized;
+}
+
+int _glfwWindowHoveredWayland(_GLFWwindow* window)
+{
+ return window->wl.hovered;
+}
+
+int _glfwFramebufferTransparentWayland(_GLFWwindow* window)
+{
+ return window->wl.transparent;
+}
+
+void _glfwSetWindowResizableWayland(_GLFWwindow* window, GLFWbool enabled)
+{
+ // TODO
+ _glfwInputError(GLFW_FEATURE_UNIMPLEMENTED,
+ "Wayland: Window attribute setting not implemented yet");
+}
+
+void _glfwSetWindowDecoratedWayland(_GLFWwindow* window, GLFWbool enabled)
+{
+ if (!window->monitor)
+ {
+ if (enabled)
+ createDecorations(window);
+ else
+ destroyDecorations(window);
+ }
+}
+
+void _glfwSetWindowFloatingWayland(_GLFWwindow* window, GLFWbool enabled)
+{
+ // TODO
+ _glfwInputError(GLFW_FEATURE_UNIMPLEMENTED,
+ "Wayland: Window attribute setting not implemented yet");
+}
+
+void _glfwSetWindowMousePassthroughWayland(_GLFWwindow* window, GLFWbool enabled)
+{
+ if (enabled)
+ {
+ struct wl_region* region = wl_compositor_create_region(_glfw.wl.compositor);
+ wl_surface_set_input_region(window->wl.surface, region);
+ wl_region_destroy(region);
+ }
+ else
+ wl_surface_set_input_region(window->wl.surface, 0);
+ wl_surface_commit(window->wl.surface);
+}
+
+float _glfwGetWindowOpacityWayland(_GLFWwindow* window)
+{
+ return 1.f;
+}
+
+void _glfwSetWindowOpacityWayland(_GLFWwindow* window, float opacity)
+{
+ _glfwInputError(GLFW_FEATURE_UNAVAILABLE,
+ "Wayland: The platform does not support setting the window opacity");
+}
+
+void _glfwSetRawMouseMotionWayland(_GLFWwindow* window, GLFWbool enabled)
+{
+ // This is handled in relativePointerHandleRelativeMotion
+}
+
+GLFWbool _glfwRawMouseMotionSupportedWayland(void)
+{
+ return GLFW_TRUE;
+}
+
+void _glfwPollEventsWayland(void)
+{
+ double timeout = 0.0;
+ handleEvents(&timeout);
+}
+
+void _glfwWaitEventsWayland(void)
+{
+ handleEvents(NULL);
+}
+
+void _glfwWaitEventsTimeoutWayland(double timeout)
+{
+ handleEvents(&timeout);
+}
+
+void _glfwPostEmptyEventWayland(void)
+{
+ wl_display_sync(_glfw.wl.display);
+ flushDisplay();
+}
+
+void _glfwGetCursorPosWayland(_GLFWwindow* window, double* xpos, double* ypos)
+{
+ if (xpos)
+ *xpos = window->wl.cursorPosX;
+ if (ypos)
+ *ypos = window->wl.cursorPosY;
+}
+
+static GLFWbool isPointerLocked(_GLFWwindow* window);
+
+void _glfwSetCursorPosWayland(_GLFWwindow* window, double x, double y)
+{
+ if (isPointerLocked(window))
+ {
+ zwp_locked_pointer_v1_set_cursor_position_hint(
+ window->wl.pointerLock.lockedPointer,
+ wl_fixed_from_double(x), wl_fixed_from_double(y));
+ wl_surface_commit(window->wl.surface);
+ }
+}
+
+void _glfwSetCursorModeWayland(_GLFWwindow* window, int mode)
+{
+ _glfwSetCursorWayland(window, window->wl.currentCursor);
+}
+
+const char* _glfwGetScancodeNameWayland(int scancode)
+{
+ if (scancode < 0 || scancode > 255 ||
+ _glfw.wl.keycodes[scancode] == GLFW_KEY_UNKNOWN)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE,
+ "Wayland: Invalid scancode %i",
+ scancode);
+ return NULL;
+ }
+
+ const int key = _glfw.wl.keycodes[scancode];
+ const xkb_keycode_t keycode = scancode + 8;
+ const xkb_layout_index_t layout =
+ xkb_state_key_get_layout(_glfw.wl.xkb.state, keycode);
+ if (layout == XKB_LAYOUT_INVALID)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to retrieve layout for key name");
+ return NULL;
+ }
+
+ const xkb_keysym_t* keysyms = NULL;
+ xkb_keymap_key_get_syms_by_level(_glfw.wl.xkb.keymap,
+ keycode,
+ layout,
+ 0,
+ &keysyms);
+ if (keysyms == NULL)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to retrieve keysym for key name");
+ return NULL;
+ }
+
+ const uint32_t codepoint = _glfwKeySym2Unicode(keysyms[0]);
+ if (codepoint == GLFW_INVALID_CODEPOINT)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to retrieve codepoint for key name");
+ return NULL;
+ }
+
+ const size_t count = _glfwEncodeUTF8(_glfw.wl.keynames[key], codepoint);
+ if (count == 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to encode codepoint for key name");
+ return NULL;
+ }
+
+ _glfw.wl.keynames[key][count] = '\0';
+ return _glfw.wl.keynames[key];
+}
+
+int _glfwGetKeyScancodeWayland(int key)
+{
+ return _glfw.wl.scancodes[key];
+}
+
+int _glfwCreateCursorWayland(_GLFWcursor* cursor,
+ const GLFWimage* image,
+ int xhot, int yhot)
+{
+ cursor->wl.buffer = createShmBuffer(image);
+ if (!cursor->wl.buffer)
+ return GLFW_FALSE;
+
+ cursor->wl.width = image->width;
+ cursor->wl.height = image->height;
+ cursor->wl.xhot = xhot;
+ cursor->wl.yhot = yhot;
+ return GLFW_TRUE;
+}
+
+int _glfwCreateStandardCursorWayland(_GLFWcursor* cursor, int shape)
+{
+ const char* name = NULL;
+
+ // Try the XDG names first
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ name = "default";
+ break;
+ case GLFW_IBEAM_CURSOR:
+ name = "text";
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ name = "crosshair";
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ name = "pointer";
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ name = "ew-resize";
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ name = "ns-resize";
+ break;
+ case GLFW_RESIZE_NWSE_CURSOR:
+ name = "nwse-resize";
+ break;
+ case GLFW_RESIZE_NESW_CURSOR:
+ name = "nesw-resize";
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ name = "all-scroll";
+ break;
+ case GLFW_NOT_ALLOWED_CURSOR:
+ name = "not-allowed";
+ break;
+ }
+
+ cursor->wl.cursor = wl_cursor_theme_get_cursor(_glfw.wl.cursorTheme, name);
+
+ if (_glfw.wl.cursorThemeHiDPI)
+ {
+ cursor->wl.cursorHiDPI =
+ wl_cursor_theme_get_cursor(_glfw.wl.cursorThemeHiDPI, name);
+ }
+
+ if (!cursor->wl.cursor)
+ {
+ // Fall back to the core X11 names
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ name = "left_ptr";
+ break;
+ case GLFW_IBEAM_CURSOR:
+ name = "xterm";
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ name = "crosshair";
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ name = "hand2";
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ name = "sb_h_double_arrow";
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ name = "sb_v_double_arrow";
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ name = "fleur";
+ break;
+ default:
+ _glfwInputError(GLFW_CURSOR_UNAVAILABLE,
+ "Wayland: Standard cursor shape unavailable");
+ return GLFW_FALSE;
+ }
+
+ cursor->wl.cursor = wl_cursor_theme_get_cursor(_glfw.wl.cursorTheme, name);
+ if (!cursor->wl.cursor)
+ {
+ _glfwInputError(GLFW_CURSOR_UNAVAILABLE,
+ "Wayland: Failed to create standard cursor \"%s\"",
+ name);
+ return GLFW_FALSE;
+ }
+
+ if (_glfw.wl.cursorThemeHiDPI)
+ {
+ if (!cursor->wl.cursorHiDPI)
+ {
+ cursor->wl.cursorHiDPI =
+ wl_cursor_theme_get_cursor(_glfw.wl.cursorThemeHiDPI, name);
+ }
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyCursorWayland(_GLFWcursor* cursor)
+{
+ // If it's a standard cursor we don't need to do anything here
+ if (cursor->wl.cursor)
+ return;
+
+ if (cursor->wl.buffer)
+ wl_buffer_destroy(cursor->wl.buffer);
+}
+
+static void relativePointerHandleRelativeMotion(void* userData,
+ struct zwp_relative_pointer_v1* pointer,
+ uint32_t timeHi,
+ uint32_t timeLo,
+ wl_fixed_t dx,
+ wl_fixed_t dy,
+ wl_fixed_t dxUnaccel,
+ wl_fixed_t dyUnaccel)
+{
+ _GLFWwindow* window = userData;
+ double xpos = window->virtualCursorPosX;
+ double ypos = window->virtualCursorPosY;
+
+ if (window->cursorMode != GLFW_CURSOR_DISABLED)
+ return;
+
+ if (window->rawMouseMotion)
+ {
+ xpos += wl_fixed_to_double(dxUnaccel);
+ ypos += wl_fixed_to_double(dyUnaccel);
+ }
+ else
+ {
+ xpos += wl_fixed_to_double(dx);
+ ypos += wl_fixed_to_double(dy);
+ }
+
+ _glfwInputCursorPos(window, xpos, ypos);
+}
+
+static const struct zwp_relative_pointer_v1_listener relativePointerListener =
+{
+ relativePointerHandleRelativeMotion
+};
+
+static void lockedPointerHandleLocked(void* userData,
+ struct zwp_locked_pointer_v1* lockedPointer)
+{
+}
+
+static void unlockPointer(_GLFWwindow* window)
+{
+ struct zwp_relative_pointer_v1* relativePointer =
+ window->wl.pointerLock.relativePointer;
+ struct zwp_locked_pointer_v1* lockedPointer =
+ window->wl.pointerLock.lockedPointer;
+
+ zwp_relative_pointer_v1_destroy(relativePointer);
+ zwp_locked_pointer_v1_destroy(lockedPointer);
+
+ window->wl.pointerLock.relativePointer = NULL;
+ window->wl.pointerLock.lockedPointer = NULL;
+}
+
+static void lockPointer(_GLFWwindow* window);
+
+static void lockedPointerHandleUnlocked(void* userData,
+ struct zwp_locked_pointer_v1* lockedPointer)
+{
+}
+
+static const struct zwp_locked_pointer_v1_listener lockedPointerListener =
+{
+ lockedPointerHandleLocked,
+ lockedPointerHandleUnlocked
+};
+
+static void lockPointer(_GLFWwindow* window)
+{
+ struct zwp_relative_pointer_v1* relativePointer;
+ struct zwp_locked_pointer_v1* lockedPointer;
+
+ if (!_glfw.wl.relativePointerManager)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: no relative pointer manager");
+ return;
+ }
+
+ relativePointer =
+ zwp_relative_pointer_manager_v1_get_relative_pointer(
+ _glfw.wl.relativePointerManager,
+ _glfw.wl.pointer);
+ zwp_relative_pointer_v1_add_listener(relativePointer,
+ &relativePointerListener,
+ window);
+
+ lockedPointer =
+ zwp_pointer_constraints_v1_lock_pointer(
+ _glfw.wl.pointerConstraints,
+ window->wl.surface,
+ _glfw.wl.pointer,
+ NULL,
+ ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT);
+ zwp_locked_pointer_v1_add_listener(lockedPointer,
+ &lockedPointerListener,
+ window);
+
+ window->wl.pointerLock.relativePointer = relativePointer;
+ window->wl.pointerLock.lockedPointer = lockedPointer;
+
+ wl_pointer_set_cursor(_glfw.wl.pointer, _glfw.wl.pointerEnterSerial,
+ NULL, 0, 0);
+}
+
+static GLFWbool isPointerLocked(_GLFWwindow* window)
+{
+ return window->wl.pointerLock.lockedPointer != NULL;
+}
+
+void _glfwSetCursorWayland(_GLFWwindow* window, _GLFWcursor* cursor)
+{
+ struct wl_cursor* defaultCursor;
+ struct wl_cursor* defaultCursorHiDPI = NULL;
+
+ if (!_glfw.wl.pointer)
+ return;
+
+ window->wl.currentCursor = cursor;
+
+ // If we're not in the correct window just save the cursor
+ // the next time the pointer enters the window the cursor will change
+ if (window != _glfw.wl.pointerFocus || window->wl.decorations.focus != mainWindow)
+ return;
+
+ // Unlock possible pointer lock if no longer disabled.
+ if (window->cursorMode != GLFW_CURSOR_DISABLED && isPointerLocked(window))
+ unlockPointer(window);
+
+ if (window->cursorMode == GLFW_CURSOR_NORMAL)
+ {
+ if (cursor)
+ setCursorImage(window, &cursor->wl);
+ else
+ {
+ defaultCursor = wl_cursor_theme_get_cursor(_glfw.wl.cursorTheme,
+ "left_ptr");
+ if (!defaultCursor)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Standard cursor not found");
+ return;
+ }
+ if (_glfw.wl.cursorThemeHiDPI)
+ defaultCursorHiDPI =
+ wl_cursor_theme_get_cursor(_glfw.wl.cursorThemeHiDPI,
+ "left_ptr");
+ _GLFWcursorWayland cursorWayland = {
+ defaultCursor,
+ defaultCursorHiDPI,
+ NULL,
+ 0, 0,
+ 0, 0,
+ 0
+ };
+ setCursorImage(window, &cursorWayland);
+ }
+ }
+ else if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ if (!isPointerLocked(window))
+ lockPointer(window);
+ }
+ else if (window->cursorMode == GLFW_CURSOR_HIDDEN)
+ {
+ wl_pointer_set_cursor(_glfw.wl.pointer, _glfw.wl.pointerEnterSerial, NULL, 0, 0);
+ }
+}
+
+static void dataSourceHandleTarget(void* userData,
+ struct wl_data_source* source,
+ const char* mimeType)
+{
+ if (_glfw.wl.selectionSource != source)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Unknown clipboard data source");
+ return;
+ }
+}
+
+static void dataSourceHandleSend(void* userData,
+ struct wl_data_source* source,
+ const char* mimeType,
+ int fd)
+{
+ // Ignore it if this is an outdated or invalid request
+ if (_glfw.wl.selectionSource != source ||
+ strcmp(mimeType, "text/plain;charset=utf-8") != 0)
+ {
+ close(fd);
+ return;
+ }
+
+ char* string = _glfw.wl.clipboardString;
+ size_t length = strlen(string);
+
+ while (length > 0)
+ {
+ const ssize_t result = write(fd, string, length);
+ if (result == -1)
+ {
+ if (errno == EINTR)
+ continue;
+
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Error while writing the clipboard: %s",
+ strerror(errno));
+ break;
+ }
+
+ length -= result;
+ string += result;
+ }
+
+ close(fd);
+}
+
+static void dataSourceHandleCancelled(void* userData,
+ struct wl_data_source* source)
+{
+ wl_data_source_destroy(source);
+
+ if (_glfw.wl.selectionSource != source)
+ return;
+
+ _glfw.wl.selectionSource = NULL;
+}
+
+static const struct wl_data_source_listener dataSourceListener =
+{
+ dataSourceHandleTarget,
+ dataSourceHandleSend,
+ dataSourceHandleCancelled,
+};
+
+void _glfwSetClipboardStringWayland(const char* string)
+{
+ if (_glfw.wl.selectionSource)
+ {
+ wl_data_source_destroy(_glfw.wl.selectionSource);
+ _glfw.wl.selectionSource = NULL;
+ }
+
+ char* copy = _glfw_strdup(string);
+ if (!copy)
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY, NULL);
+ return;
+ }
+
+ _glfw_free(_glfw.wl.clipboardString);
+ _glfw.wl.clipboardString = copy;
+
+ _glfw.wl.selectionSource =
+ wl_data_device_manager_create_data_source(_glfw.wl.dataDeviceManager);
+ if (!_glfw.wl.selectionSource)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create clipboard data source");
+ return;
+ }
+ wl_data_source_add_listener(_glfw.wl.selectionSource,
+ &dataSourceListener,
+ NULL);
+ wl_data_source_offer(_glfw.wl.selectionSource, "text/plain;charset=utf-8");
+ wl_data_device_set_selection(_glfw.wl.dataDevice,
+ _glfw.wl.selectionSource,
+ _glfw.wl.serial);
+}
+
+const char* _glfwGetClipboardStringWayland(void)
+{
+ if (!_glfw.wl.selectionOffer)
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "Wayland: No clipboard data available");
+ return NULL;
+ }
+
+ if (_glfw.wl.selectionSource)
+ return _glfw.wl.clipboardString;
+
+ _glfw_free(_glfw.wl.clipboardString);
+ _glfw.wl.clipboardString =
+ readDataOfferAsString(_glfw.wl.selectionOffer, "text/plain;charset=utf-8");
+ return _glfw.wl.clipboardString;
+}
+
+EGLenum _glfwGetEGLPlatformWayland(EGLint** attribs)
+{
+ if (_glfw.egl.EXT_platform_base && _glfw.egl.EXT_platform_wayland)
+ return EGL_PLATFORM_WAYLAND_EXT;
+ else
+ return 0;
+}
+
+EGLNativeDisplayType _glfwGetEGLNativeDisplayWayland(void)
+{
+ return _glfw.wl.display;
+}
+
+EGLNativeWindowType _glfwGetEGLNativeWindowWayland(_GLFWwindow* window)
+{
+ return window->wl.native;
+}
+
+void _glfwGetRequiredInstanceExtensionsWayland(char** extensions)
+{
+ if (!_glfw.vk.KHR_surface || !_glfw.vk.KHR_wayland_surface)
+ return;
+
+ extensions[0] = "VK_KHR_surface";
+ extensions[1] = "VK_KHR_wayland_surface";
+}
+
+int _glfwGetPhysicalDevicePresentationSupportWayland(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR
+ vkGetPhysicalDeviceWaylandPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)
+ vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+ if (!vkGetPhysicalDeviceWaylandPresentationSupportKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Wayland: Vulkan instance missing VK_KHR_wayland_surface extension");
+ return VK_NULL_HANDLE;
+ }
+
+ return vkGetPhysicalDeviceWaylandPresentationSupportKHR(device,
+ queuefamily,
+ _glfw.wl.display);
+}
+
+VkResult _glfwCreateWindowSurfaceWayland(VkInstance instance,
+ _GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ VkResult err;
+ VkWaylandSurfaceCreateInfoKHR sci;
+ PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;
+
+ vkCreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)
+ vkGetInstanceProcAddr(instance, "vkCreateWaylandSurfaceKHR");
+ if (!vkCreateWaylandSurfaceKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "Wayland: Vulkan instance missing VK_KHR_wayland_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
+ sci.display = _glfw.wl.display;
+ sci.surface = window->wl.surface;
+
+ err = vkCreateWaylandSurfaceKHR(instance, &sci, allocator, surface);
+ if (err)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "Wayland: Failed to create Vulkan surface: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ return err;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI struct wl_display* glfwGetWaylandDisplay(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_WAYLAND)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "Wayland: Platform not initialized");
+ return NULL;
+ }
+
+ return _glfw.wl.display;
+}
+
+GLFWAPI struct wl_surface* glfwGetWaylandWindow(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_WAYLAND)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "Wayland: Platform not initialized");
+ return NULL;
+ }
+
+ return window->wl.surface;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/x11_init.c b/chromium/third_party/dawn/third_party/glfw/src/x11_init.c
new file mode 100644
index 00000000000..aefa82db3a7
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/x11_init.c
@@ -0,0 +1,1651 @@
+//========================================================================
+// GLFW 3.4 X11 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <stdio.h>
+#include <locale.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+
+
+// Translate the X11 KeySyms for a key to a GLFW key code
+// NOTE: This is only used as a fallback, in case the XKB method fails
+// It is layout-dependent and will fail partially on most non-US layouts
+//
+static int translateKeySyms(const KeySym* keysyms, int width)
+{
+ if (width > 1)
+ {
+ switch (keysyms[1])
+ {
+ case XK_KP_0: return GLFW_KEY_KP_0;
+ case XK_KP_1: return GLFW_KEY_KP_1;
+ case XK_KP_2: return GLFW_KEY_KP_2;
+ case XK_KP_3: return GLFW_KEY_KP_3;
+ case XK_KP_4: return GLFW_KEY_KP_4;
+ case XK_KP_5: return GLFW_KEY_KP_5;
+ case XK_KP_6: return GLFW_KEY_KP_6;
+ case XK_KP_7: return GLFW_KEY_KP_7;
+ case XK_KP_8: return GLFW_KEY_KP_8;
+ case XK_KP_9: return GLFW_KEY_KP_9;
+ case XK_KP_Separator:
+ case XK_KP_Decimal: return GLFW_KEY_KP_DECIMAL;
+ case XK_KP_Equal: return GLFW_KEY_KP_EQUAL;
+ case XK_KP_Enter: return GLFW_KEY_KP_ENTER;
+ default: break;
+ }
+ }
+
+ switch (keysyms[0])
+ {
+ case XK_Escape: return GLFW_KEY_ESCAPE;
+ case XK_Tab: return GLFW_KEY_TAB;
+ case XK_Shift_L: return GLFW_KEY_LEFT_SHIFT;
+ case XK_Shift_R: return GLFW_KEY_RIGHT_SHIFT;
+ case XK_Control_L: return GLFW_KEY_LEFT_CONTROL;
+ case XK_Control_R: return GLFW_KEY_RIGHT_CONTROL;
+ case XK_Meta_L:
+ case XK_Alt_L: return GLFW_KEY_LEFT_ALT;
+ case XK_Mode_switch: // Mapped to Alt_R on many keyboards
+ case XK_ISO_Level3_Shift: // AltGr on at least some machines
+ case XK_Meta_R:
+ case XK_Alt_R: return GLFW_KEY_RIGHT_ALT;
+ case XK_Super_L: return GLFW_KEY_LEFT_SUPER;
+ case XK_Super_R: return GLFW_KEY_RIGHT_SUPER;
+ case XK_Menu: return GLFW_KEY_MENU;
+ case XK_Num_Lock: return GLFW_KEY_NUM_LOCK;
+ case XK_Caps_Lock: return GLFW_KEY_CAPS_LOCK;
+ case XK_Print: return GLFW_KEY_PRINT_SCREEN;
+ case XK_Scroll_Lock: return GLFW_KEY_SCROLL_LOCK;
+ case XK_Pause: return GLFW_KEY_PAUSE;
+ case XK_Delete: return GLFW_KEY_DELETE;
+ case XK_BackSpace: return GLFW_KEY_BACKSPACE;
+ case XK_Return: return GLFW_KEY_ENTER;
+ case XK_Home: return GLFW_KEY_HOME;
+ case XK_End: return GLFW_KEY_END;
+ case XK_Page_Up: return GLFW_KEY_PAGE_UP;
+ case XK_Page_Down: return GLFW_KEY_PAGE_DOWN;
+ case XK_Insert: return GLFW_KEY_INSERT;
+ case XK_Left: return GLFW_KEY_LEFT;
+ case XK_Right: return GLFW_KEY_RIGHT;
+ case XK_Down: return GLFW_KEY_DOWN;
+ case XK_Up: return GLFW_KEY_UP;
+ case XK_F1: return GLFW_KEY_F1;
+ case XK_F2: return GLFW_KEY_F2;
+ case XK_F3: return GLFW_KEY_F3;
+ case XK_F4: return GLFW_KEY_F4;
+ case XK_F5: return GLFW_KEY_F5;
+ case XK_F6: return GLFW_KEY_F6;
+ case XK_F7: return GLFW_KEY_F7;
+ case XK_F8: return GLFW_KEY_F8;
+ case XK_F9: return GLFW_KEY_F9;
+ case XK_F10: return GLFW_KEY_F10;
+ case XK_F11: return GLFW_KEY_F11;
+ case XK_F12: return GLFW_KEY_F12;
+ case XK_F13: return GLFW_KEY_F13;
+ case XK_F14: return GLFW_KEY_F14;
+ case XK_F15: return GLFW_KEY_F15;
+ case XK_F16: return GLFW_KEY_F16;
+ case XK_F17: return GLFW_KEY_F17;
+ case XK_F18: return GLFW_KEY_F18;
+ case XK_F19: return GLFW_KEY_F19;
+ case XK_F20: return GLFW_KEY_F20;
+ case XK_F21: return GLFW_KEY_F21;
+ case XK_F22: return GLFW_KEY_F22;
+ case XK_F23: return GLFW_KEY_F23;
+ case XK_F24: return GLFW_KEY_F24;
+ case XK_F25: return GLFW_KEY_F25;
+
+ // Numeric keypad
+ case XK_KP_Divide: return GLFW_KEY_KP_DIVIDE;
+ case XK_KP_Multiply: return GLFW_KEY_KP_MULTIPLY;
+ case XK_KP_Subtract: return GLFW_KEY_KP_SUBTRACT;
+ case XK_KP_Add: return GLFW_KEY_KP_ADD;
+
+ // These should have been detected in secondary keysym test above!
+ case XK_KP_Insert: return GLFW_KEY_KP_0;
+ case XK_KP_End: return GLFW_KEY_KP_1;
+ case XK_KP_Down: return GLFW_KEY_KP_2;
+ case XK_KP_Page_Down: return GLFW_KEY_KP_3;
+ case XK_KP_Left: return GLFW_KEY_KP_4;
+ case XK_KP_Right: return GLFW_KEY_KP_6;
+ case XK_KP_Home: return GLFW_KEY_KP_7;
+ case XK_KP_Up: return GLFW_KEY_KP_8;
+ case XK_KP_Page_Up: return GLFW_KEY_KP_9;
+ case XK_KP_Delete: return GLFW_KEY_KP_DECIMAL;
+ case XK_KP_Equal: return GLFW_KEY_KP_EQUAL;
+ case XK_KP_Enter: return GLFW_KEY_KP_ENTER;
+
+ // Last resort: Check for printable keys (should not happen if the XKB
+ // extension is available). This will give a layout dependent mapping
+ // (which is wrong, and we may miss some keys, especially on non-US
+ // keyboards), but it's better than nothing...
+ case XK_a: return GLFW_KEY_A;
+ case XK_b: return GLFW_KEY_B;
+ case XK_c: return GLFW_KEY_C;
+ case XK_d: return GLFW_KEY_D;
+ case XK_e: return GLFW_KEY_E;
+ case XK_f: return GLFW_KEY_F;
+ case XK_g: return GLFW_KEY_G;
+ case XK_h: return GLFW_KEY_H;
+ case XK_i: return GLFW_KEY_I;
+ case XK_j: return GLFW_KEY_J;
+ case XK_k: return GLFW_KEY_K;
+ case XK_l: return GLFW_KEY_L;
+ case XK_m: return GLFW_KEY_M;
+ case XK_n: return GLFW_KEY_N;
+ case XK_o: return GLFW_KEY_O;
+ case XK_p: return GLFW_KEY_P;
+ case XK_q: return GLFW_KEY_Q;
+ case XK_r: return GLFW_KEY_R;
+ case XK_s: return GLFW_KEY_S;
+ case XK_t: return GLFW_KEY_T;
+ case XK_u: return GLFW_KEY_U;
+ case XK_v: return GLFW_KEY_V;
+ case XK_w: return GLFW_KEY_W;
+ case XK_x: return GLFW_KEY_X;
+ case XK_y: return GLFW_KEY_Y;
+ case XK_z: return GLFW_KEY_Z;
+ case XK_1: return GLFW_KEY_1;
+ case XK_2: return GLFW_KEY_2;
+ case XK_3: return GLFW_KEY_3;
+ case XK_4: return GLFW_KEY_4;
+ case XK_5: return GLFW_KEY_5;
+ case XK_6: return GLFW_KEY_6;
+ case XK_7: return GLFW_KEY_7;
+ case XK_8: return GLFW_KEY_8;
+ case XK_9: return GLFW_KEY_9;
+ case XK_0: return GLFW_KEY_0;
+ case XK_space: return GLFW_KEY_SPACE;
+ case XK_minus: return GLFW_KEY_MINUS;
+ case XK_equal: return GLFW_KEY_EQUAL;
+ case XK_bracketleft: return GLFW_KEY_LEFT_BRACKET;
+ case XK_bracketright: return GLFW_KEY_RIGHT_BRACKET;
+ case XK_backslash: return GLFW_KEY_BACKSLASH;
+ case XK_semicolon: return GLFW_KEY_SEMICOLON;
+ case XK_apostrophe: return GLFW_KEY_APOSTROPHE;
+ case XK_grave: return GLFW_KEY_GRAVE_ACCENT;
+ case XK_comma: return GLFW_KEY_COMMA;
+ case XK_period: return GLFW_KEY_PERIOD;
+ case XK_slash: return GLFW_KEY_SLASH;
+ case XK_less: return GLFW_KEY_WORLD_1; // At least in some layouts...
+ default: break;
+ }
+
+ // No matching translation was found
+ return GLFW_KEY_UNKNOWN;
+}
+
+// Create key code translation tables
+//
+static void createKeyTables(void)
+{
+ int scancodeMin, scancodeMax;
+
+ memset(_glfw.x11.keycodes, -1, sizeof(_glfw.x11.keycodes));
+ memset(_glfw.x11.scancodes, -1, sizeof(_glfw.x11.scancodes));
+
+ if (_glfw.x11.xkb.available)
+ {
+ // Use XKB to determine physical key locations independently of the
+ // current keyboard layout
+
+ XkbDescPtr desc = XkbGetMap(_glfw.x11.display, 0, XkbUseCoreKbd);
+ XkbGetNames(_glfw.x11.display, XkbKeyNamesMask | XkbKeyAliasesMask, desc);
+
+ scancodeMin = desc->min_key_code;
+ scancodeMax = desc->max_key_code;
+
+ const struct
+ {
+ int key;
+ char* name;
+ } keymap[] =
+ {
+ { GLFW_KEY_GRAVE_ACCENT, "TLDE" },
+ { GLFW_KEY_1, "AE01" },
+ { GLFW_KEY_2, "AE02" },
+ { GLFW_KEY_3, "AE03" },
+ { GLFW_KEY_4, "AE04" },
+ { GLFW_KEY_5, "AE05" },
+ { GLFW_KEY_6, "AE06" },
+ { GLFW_KEY_7, "AE07" },
+ { GLFW_KEY_8, "AE08" },
+ { GLFW_KEY_9, "AE09" },
+ { GLFW_KEY_0, "AE10" },
+ { GLFW_KEY_MINUS, "AE11" },
+ { GLFW_KEY_EQUAL, "AE12" },
+ { GLFW_KEY_Q, "AD01" },
+ { GLFW_KEY_W, "AD02" },
+ { GLFW_KEY_E, "AD03" },
+ { GLFW_KEY_R, "AD04" },
+ { GLFW_KEY_T, "AD05" },
+ { GLFW_KEY_Y, "AD06" },
+ { GLFW_KEY_U, "AD07" },
+ { GLFW_KEY_I, "AD08" },
+ { GLFW_KEY_O, "AD09" },
+ { GLFW_KEY_P, "AD10" },
+ { GLFW_KEY_LEFT_BRACKET, "AD11" },
+ { GLFW_KEY_RIGHT_BRACKET, "AD12" },
+ { GLFW_KEY_A, "AC01" },
+ { GLFW_KEY_S, "AC02" },
+ { GLFW_KEY_D, "AC03" },
+ { GLFW_KEY_F, "AC04" },
+ { GLFW_KEY_G, "AC05" },
+ { GLFW_KEY_H, "AC06" },
+ { GLFW_KEY_J, "AC07" },
+ { GLFW_KEY_K, "AC08" },
+ { GLFW_KEY_L, "AC09" },
+ { GLFW_KEY_SEMICOLON, "AC10" },
+ { GLFW_KEY_APOSTROPHE, "AC11" },
+ { GLFW_KEY_Z, "AB01" },
+ { GLFW_KEY_X, "AB02" },
+ { GLFW_KEY_C, "AB03" },
+ { GLFW_KEY_V, "AB04" },
+ { GLFW_KEY_B, "AB05" },
+ { GLFW_KEY_N, "AB06" },
+ { GLFW_KEY_M, "AB07" },
+ { GLFW_KEY_COMMA, "AB08" },
+ { GLFW_KEY_PERIOD, "AB09" },
+ { GLFW_KEY_SLASH, "AB10" },
+ { GLFW_KEY_BACKSLASH, "BKSL" },
+ { GLFW_KEY_WORLD_1, "LSGT" },
+ { GLFW_KEY_SPACE, "SPCE" },
+ { GLFW_KEY_ESCAPE, "ESC" },
+ { GLFW_KEY_ENTER, "RTRN" },
+ { GLFW_KEY_TAB, "TAB" },
+ { GLFW_KEY_BACKSPACE, "BKSP" },
+ { GLFW_KEY_INSERT, "INS" },
+ { GLFW_KEY_DELETE, "DELE" },
+ { GLFW_KEY_RIGHT, "RGHT" },
+ { GLFW_KEY_LEFT, "LEFT" },
+ { GLFW_KEY_DOWN, "DOWN" },
+ { GLFW_KEY_UP, "UP" },
+ { GLFW_KEY_PAGE_UP, "PGUP" },
+ { GLFW_KEY_PAGE_DOWN, "PGDN" },
+ { GLFW_KEY_HOME, "HOME" },
+ { GLFW_KEY_END, "END" },
+ { GLFW_KEY_CAPS_LOCK, "CAPS" },
+ { GLFW_KEY_SCROLL_LOCK, "SCLK" },
+ { GLFW_KEY_NUM_LOCK, "NMLK" },
+ { GLFW_KEY_PRINT_SCREEN, "PRSC" },
+ { GLFW_KEY_PAUSE, "PAUS" },
+ { GLFW_KEY_F1, "FK01" },
+ { GLFW_KEY_F2, "FK02" },
+ { GLFW_KEY_F3, "FK03" },
+ { GLFW_KEY_F4, "FK04" },
+ { GLFW_KEY_F5, "FK05" },
+ { GLFW_KEY_F6, "FK06" },
+ { GLFW_KEY_F7, "FK07" },
+ { GLFW_KEY_F8, "FK08" },
+ { GLFW_KEY_F9, "FK09" },
+ { GLFW_KEY_F10, "FK10" },
+ { GLFW_KEY_F11, "FK11" },
+ { GLFW_KEY_F12, "FK12" },
+ { GLFW_KEY_F13, "FK13" },
+ { GLFW_KEY_F14, "FK14" },
+ { GLFW_KEY_F15, "FK15" },
+ { GLFW_KEY_F16, "FK16" },
+ { GLFW_KEY_F17, "FK17" },
+ { GLFW_KEY_F18, "FK18" },
+ { GLFW_KEY_F19, "FK19" },
+ { GLFW_KEY_F20, "FK20" },
+ { GLFW_KEY_F21, "FK21" },
+ { GLFW_KEY_F22, "FK22" },
+ { GLFW_KEY_F23, "FK23" },
+ { GLFW_KEY_F24, "FK24" },
+ { GLFW_KEY_F25, "FK25" },
+ { GLFW_KEY_KP_0, "KP0" },
+ { GLFW_KEY_KP_1, "KP1" },
+ { GLFW_KEY_KP_2, "KP2" },
+ { GLFW_KEY_KP_3, "KP3" },
+ { GLFW_KEY_KP_4, "KP4" },
+ { GLFW_KEY_KP_5, "KP5" },
+ { GLFW_KEY_KP_6, "KP6" },
+ { GLFW_KEY_KP_7, "KP7" },
+ { GLFW_KEY_KP_8, "KP8" },
+ { GLFW_KEY_KP_9, "KP9" },
+ { GLFW_KEY_KP_DECIMAL, "KPDL" },
+ { GLFW_KEY_KP_DIVIDE, "KPDV" },
+ { GLFW_KEY_KP_MULTIPLY, "KPMU" },
+ { GLFW_KEY_KP_SUBTRACT, "KPSU" },
+ { GLFW_KEY_KP_ADD, "KPAD" },
+ { GLFW_KEY_KP_ENTER, "KPEN" },
+ { GLFW_KEY_KP_EQUAL, "KPEQ" },
+ { GLFW_KEY_LEFT_SHIFT, "LFSH" },
+ { GLFW_KEY_LEFT_CONTROL, "LCTL" },
+ { GLFW_KEY_LEFT_ALT, "LALT" },
+ { GLFW_KEY_LEFT_SUPER, "LWIN" },
+ { GLFW_KEY_RIGHT_SHIFT, "RTSH" },
+ { GLFW_KEY_RIGHT_CONTROL, "RCTL" },
+ { GLFW_KEY_RIGHT_ALT, "RALT" },
+ { GLFW_KEY_RIGHT_ALT, "LVL3" },
+ { GLFW_KEY_RIGHT_ALT, "MDSW" },
+ { GLFW_KEY_RIGHT_SUPER, "RWIN" },
+ { GLFW_KEY_MENU, "MENU" }
+ };
+
+ // Find the X11 key code -> GLFW key code mapping
+ for (int scancode = scancodeMin; scancode <= scancodeMax; scancode++)
+ {
+ int key = GLFW_KEY_UNKNOWN;
+
+ // Map the key name to a GLFW key code. Note: We use the US
+ // keyboard layout. Because function keys aren't mapped correctly
+ // when using traditional KeySym translations, they are mapped
+ // here instead.
+ for (int i = 0; i < sizeof(keymap) / sizeof(keymap[0]); i++)
+ {
+ if (strncmp(desc->names->keys[scancode].name,
+ keymap[i].name,
+ XkbKeyNameLength) == 0)
+ {
+ key = keymap[i].key;
+ break;
+ }
+ }
+
+ // Fall back to key aliases in case the key name did not match
+ for (int i = 0; i < desc->names->num_key_aliases; i++)
+ {
+ if (key != GLFW_KEY_UNKNOWN)
+ break;
+
+ if (strncmp(desc->names->key_aliases[i].real,
+ desc->names->keys[scancode].name,
+ XkbKeyNameLength) != 0)
+ {
+ continue;
+ }
+
+ for (int j = 0; j < sizeof(keymap) / sizeof(keymap[0]); j++)
+ {
+ if (strncmp(desc->names->key_aliases[i].alias,
+ keymap[j].name,
+ XkbKeyNameLength) == 0)
+ {
+ key = keymap[j].key;
+ break;
+ }
+ }
+ }
+
+ _glfw.x11.keycodes[scancode] = key;
+ }
+
+ XkbFreeNames(desc, XkbKeyNamesMask, True);
+ XkbFreeKeyboard(desc, 0, True);
+ }
+ else
+ XDisplayKeycodes(_glfw.x11.display, &scancodeMin, &scancodeMax);
+
+ int width;
+ KeySym* keysyms = XGetKeyboardMapping(_glfw.x11.display,
+ scancodeMin,
+ scancodeMax - scancodeMin + 1,
+ &width);
+
+ for (int scancode = scancodeMin; scancode <= scancodeMax; scancode++)
+ {
+ // Translate the un-translated key codes using traditional X11 KeySym
+ // lookups
+ if (_glfw.x11.keycodes[scancode] < 0)
+ {
+ const size_t base = (scancode - scancodeMin) * width;
+ _glfw.x11.keycodes[scancode] = translateKeySyms(&keysyms[base], width);
+ }
+
+ // Store the reverse translation for faster key name lookup
+ if (_glfw.x11.keycodes[scancode] > 0)
+ _glfw.x11.scancodes[_glfw.x11.keycodes[scancode]] = scancode;
+ }
+
+ XFree(keysyms);
+}
+
+// Check whether the IM has a usable style
+//
+static GLFWbool hasUsableInputMethodStyle(void)
+{
+ GLFWbool found = GLFW_FALSE;
+ XIMStyles* styles = NULL;
+
+ if (XGetIMValues(_glfw.x11.im, XNQueryInputStyle, &styles, NULL) != NULL)
+ return GLFW_FALSE;
+
+ for (unsigned int i = 0; i < styles->count_styles; i++)
+ {
+ if (styles->supported_styles[i] == (XIMPreeditNothing | XIMStatusNothing))
+ {
+ found = GLFW_TRUE;
+ break;
+ }
+ }
+
+ XFree(styles);
+ return found;
+}
+
+static void inputMethodDestroyCallback(XIM im, XPointer clientData, XPointer callData)
+{
+ _glfw.x11.im = NULL;
+}
+
+static void inputMethodInstantiateCallback(Display* display,
+ XPointer clientData,
+ XPointer callData)
+{
+ if (_glfw.x11.im)
+ return;
+
+ _glfw.x11.im = XOpenIM(_glfw.x11.display, 0, NULL, NULL);
+ if (_glfw.x11.im)
+ {
+ if (!hasUsableInputMethodStyle())
+ {
+ XCloseIM(_glfw.x11.im);
+ _glfw.x11.im = NULL;
+ }
+ }
+
+ if (_glfw.x11.im)
+ {
+ XIMCallback callback;
+ callback.callback = (XIMProc) inputMethodDestroyCallback;
+ callback.client_data = NULL;
+ XSetIMValues(_glfw.x11.im, XNDestroyCallback, &callback, NULL);
+
+ for (_GLFWwindow* window = _glfw.windowListHead; window; window = window->next)
+ _glfwCreateInputContextX11(window);
+ }
+}
+
+// Return the atom ID only if it is listed in the specified array
+//
+static Atom getAtomIfSupported(Atom* supportedAtoms,
+ unsigned long atomCount,
+ const char* atomName)
+{
+ const Atom atom = XInternAtom(_glfw.x11.display, atomName, False);
+
+ for (unsigned long i = 0; i < atomCount; i++)
+ {
+ if (supportedAtoms[i] == atom)
+ return atom;
+ }
+
+ return None;
+}
+
+// Check whether the running window manager is EWMH-compliant
+//
+static void detectEWMH(void)
+{
+ // First we read the _NET_SUPPORTING_WM_CHECK property on the root window
+
+ Window* windowFromRoot = NULL;
+ if (!_glfwGetWindowPropertyX11(_glfw.x11.root,
+ _glfw.x11.NET_SUPPORTING_WM_CHECK,
+ XA_WINDOW,
+ (unsigned char**) &windowFromRoot))
+ {
+ return;
+ }
+
+ _glfwGrabErrorHandlerX11();
+
+ // If it exists, it should be the XID of a top-level window
+ // Then we look for the same property on that window
+
+ Window* windowFromChild = NULL;
+ if (!_glfwGetWindowPropertyX11(*windowFromRoot,
+ _glfw.x11.NET_SUPPORTING_WM_CHECK,
+ XA_WINDOW,
+ (unsigned char**) &windowFromChild))
+ {
+ XFree(windowFromRoot);
+ return;
+ }
+
+ _glfwReleaseErrorHandlerX11();
+
+ // If the property exists, it should contain the XID of the window
+
+ if (*windowFromRoot != *windowFromChild)
+ {
+ XFree(windowFromRoot);
+ XFree(windowFromChild);
+ return;
+ }
+
+ XFree(windowFromRoot);
+ XFree(windowFromChild);
+
+ // We are now fairly sure that an EWMH-compliant WM is currently running
+ // We can now start querying the WM about what features it supports by
+ // looking in the _NET_SUPPORTED property on the root window
+ // It should contain a list of supported EWMH protocol and state atoms
+
+ Atom* supportedAtoms = NULL;
+ const unsigned long atomCount =
+ _glfwGetWindowPropertyX11(_glfw.x11.root,
+ _glfw.x11.NET_SUPPORTED,
+ XA_ATOM,
+ (unsigned char**) &supportedAtoms);
+
+ // See which of the atoms we support that are supported by the WM
+
+ _glfw.x11.NET_WM_STATE =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE");
+ _glfw.x11.NET_WM_STATE_ABOVE =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE_ABOVE");
+ _glfw.x11.NET_WM_STATE_FULLSCREEN =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE_FULLSCREEN");
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE_MAXIMIZED_VERT");
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE_MAXIMIZED_HORZ");
+ _glfw.x11.NET_WM_STATE_DEMANDS_ATTENTION =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_STATE_DEMANDS_ATTENTION");
+ _glfw.x11.NET_WM_FULLSCREEN_MONITORS =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_FULLSCREEN_MONITORS");
+ _glfw.x11.NET_WM_WINDOW_TYPE =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_WINDOW_TYPE");
+ _glfw.x11.NET_WM_WINDOW_TYPE_NORMAL =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WM_WINDOW_TYPE_NORMAL");
+ _glfw.x11.NET_WORKAREA =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_WORKAREA");
+ _glfw.x11.NET_CURRENT_DESKTOP =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_CURRENT_DESKTOP");
+ _glfw.x11.NET_ACTIVE_WINDOW =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_ACTIVE_WINDOW");
+ _glfw.x11.NET_FRAME_EXTENTS =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_FRAME_EXTENTS");
+ _glfw.x11.NET_REQUEST_FRAME_EXTENTS =
+ getAtomIfSupported(supportedAtoms, atomCount, "_NET_REQUEST_FRAME_EXTENTS");
+
+ if (supportedAtoms)
+ XFree(supportedAtoms);
+}
+
+// Look for and initialize supported X11 extensions
+//
+static GLFWbool initExtensions(void)
+{
+#if defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.vidmode.handle = _glfwPlatformLoadModule("libXxf86vm.so");
+#else
+ _glfw.x11.vidmode.handle = _glfwPlatformLoadModule("libXxf86vm.so.1");
+#endif
+ if (_glfw.x11.vidmode.handle)
+ {
+ _glfw.x11.vidmode.QueryExtension = (PFN_XF86VidModeQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.vidmode.handle, "XF86VidModeQueryExtension");
+ _glfw.x11.vidmode.GetGammaRamp = (PFN_XF86VidModeGetGammaRamp)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.vidmode.handle, "XF86VidModeGetGammaRamp");
+ _glfw.x11.vidmode.SetGammaRamp = (PFN_XF86VidModeSetGammaRamp)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.vidmode.handle, "XF86VidModeSetGammaRamp");
+ _glfw.x11.vidmode.GetGammaRampSize = (PFN_XF86VidModeGetGammaRampSize)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.vidmode.handle, "XF86VidModeGetGammaRampSize");
+
+ _glfw.x11.vidmode.available =
+ XF86VidModeQueryExtension(_glfw.x11.display,
+ &_glfw.x11.vidmode.eventBase,
+ &_glfw.x11.vidmode.errorBase);
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.xi.handle = _glfwPlatformLoadModule("libXi-6.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.xi.handle = _glfwPlatformLoadModule("libXi.so");
+#else
+ _glfw.x11.xi.handle = _glfwPlatformLoadModule("libXi.so.6");
+#endif
+ if (_glfw.x11.xi.handle)
+ {
+ _glfw.x11.xi.QueryVersion = (PFN_XIQueryVersion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xi.handle, "XIQueryVersion");
+ _glfw.x11.xi.SelectEvents = (PFN_XISelectEvents)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xi.handle, "XISelectEvents");
+
+ if (XQueryExtension(_glfw.x11.display,
+ "XInputExtension",
+ &_glfw.x11.xi.majorOpcode,
+ &_glfw.x11.xi.eventBase,
+ &_glfw.x11.xi.errorBase))
+ {
+ _glfw.x11.xi.major = 2;
+ _glfw.x11.xi.minor = 0;
+
+ if (XIQueryVersion(_glfw.x11.display,
+ &_glfw.x11.xi.major,
+ &_glfw.x11.xi.minor) == Success)
+ {
+ _glfw.x11.xi.available = GLFW_TRUE;
+ }
+ }
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.randr.handle = _glfwPlatformLoadModule("libXrandr-2.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.randr.handle = _glfwPlatformLoadModule("libXrandr.so");
+#else
+ _glfw.x11.randr.handle = _glfwPlatformLoadModule("libXrandr.so.2");
+#endif
+ if (_glfw.x11.randr.handle)
+ {
+ _glfw.x11.randr.AllocGamma = (PFN_XRRAllocGamma)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRAllocGamma");
+ _glfw.x11.randr.FreeGamma = (PFN_XRRFreeGamma)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRFreeGamma");
+ _glfw.x11.randr.FreeCrtcInfo = (PFN_XRRFreeCrtcInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRFreeCrtcInfo");
+ _glfw.x11.randr.FreeGamma = (PFN_XRRFreeGamma)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRFreeGamma");
+ _glfw.x11.randr.FreeOutputInfo = (PFN_XRRFreeOutputInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRFreeOutputInfo");
+ _glfw.x11.randr.FreeScreenResources = (PFN_XRRFreeScreenResources)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRFreeScreenResources");
+ _glfw.x11.randr.GetCrtcGamma = (PFN_XRRGetCrtcGamma)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetCrtcGamma");
+ _glfw.x11.randr.GetCrtcGammaSize = (PFN_XRRGetCrtcGammaSize)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetCrtcGammaSize");
+ _glfw.x11.randr.GetCrtcInfo = (PFN_XRRGetCrtcInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetCrtcInfo");
+ _glfw.x11.randr.GetOutputInfo = (PFN_XRRGetOutputInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetOutputInfo");
+ _glfw.x11.randr.GetOutputPrimary = (PFN_XRRGetOutputPrimary)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetOutputPrimary");
+ _glfw.x11.randr.GetScreenResourcesCurrent = (PFN_XRRGetScreenResourcesCurrent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRGetScreenResourcesCurrent");
+ _glfw.x11.randr.QueryExtension = (PFN_XRRQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRQueryExtension");
+ _glfw.x11.randr.QueryVersion = (PFN_XRRQueryVersion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRQueryVersion");
+ _glfw.x11.randr.SelectInput = (PFN_XRRSelectInput)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRSelectInput");
+ _glfw.x11.randr.SetCrtcConfig = (PFN_XRRSetCrtcConfig)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRSetCrtcConfig");
+ _glfw.x11.randr.SetCrtcGamma = (PFN_XRRSetCrtcGamma)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRSetCrtcGamma");
+ _glfw.x11.randr.UpdateConfiguration = (PFN_XRRUpdateConfiguration)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.randr.handle, "XRRUpdateConfiguration");
+
+ if (XRRQueryExtension(_glfw.x11.display,
+ &_glfw.x11.randr.eventBase,
+ &_glfw.x11.randr.errorBase))
+ {
+ if (XRRQueryVersion(_glfw.x11.display,
+ &_glfw.x11.randr.major,
+ &_glfw.x11.randr.minor))
+ {
+ // The GLFW RandR path requires at least version 1.3
+ if (_glfw.x11.randr.major > 1 || _glfw.x11.randr.minor >= 3)
+ _glfw.x11.randr.available = GLFW_TRUE;
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to query RandR version");
+ }
+ }
+ }
+
+ if (_glfw.x11.randr.available)
+ {
+ XRRScreenResources* sr = XRRGetScreenResourcesCurrent(_glfw.x11.display,
+ _glfw.x11.root);
+
+ if (!sr->ncrtc || !XRRGetCrtcGammaSize(_glfw.x11.display, sr->crtcs[0]))
+ {
+ // This is likely an older Nvidia driver with broken gamma support
+ // Flag it as useless and fall back to xf86vm gamma, if available
+ _glfw.x11.randr.gammaBroken = GLFW_TRUE;
+ }
+
+ if (!sr->ncrtc)
+ {
+ // A system without CRTCs is likely a system with broken RandR
+ // Disable the RandR monitor path and fall back to core functions
+ _glfw.x11.randr.monitorBroken = GLFW_TRUE;
+ }
+
+ XRRFreeScreenResources(sr);
+ }
+
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ XRRSelectInput(_glfw.x11.display, _glfw.x11.root,
+ RROutputChangeNotifyMask);
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.xcursor.handle = _glfwPlatformLoadModule("libXcursor-1.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.xcursor.handle = _glfwPlatformLoadModule("libXcursor.so");
+#else
+ _glfw.x11.xcursor.handle = _glfwPlatformLoadModule("libXcursor.so.1");
+#endif
+ if (_glfw.x11.xcursor.handle)
+ {
+ _glfw.x11.xcursor.ImageCreate = (PFN_XcursorImageCreate)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorImageCreate");
+ _glfw.x11.xcursor.ImageDestroy = (PFN_XcursorImageDestroy)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorImageDestroy");
+ _glfw.x11.xcursor.ImageLoadCursor = (PFN_XcursorImageLoadCursor)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorImageLoadCursor");
+ _glfw.x11.xcursor.GetTheme = (PFN_XcursorGetTheme)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorGetTheme");
+ _glfw.x11.xcursor.GetDefaultSize = (PFN_XcursorGetDefaultSize)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorGetDefaultSize");
+ _glfw.x11.xcursor.LibraryLoadImage = (PFN_XcursorLibraryLoadImage)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xcursor.handle, "XcursorLibraryLoadImage");
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.xinerama.handle = _glfwPlatformLoadModule("libXinerama-1.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.xinerama.handle = _glfwPlatformLoadModule("libXinerama.so");
+#else
+ _glfw.x11.xinerama.handle = _glfwPlatformLoadModule("libXinerama.so.1");
+#endif
+ if (_glfw.x11.xinerama.handle)
+ {
+ _glfw.x11.xinerama.IsActive = (PFN_XineramaIsActive)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xinerama.handle, "XineramaIsActive");
+ _glfw.x11.xinerama.QueryExtension = (PFN_XineramaQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xinerama.handle, "XineramaQueryExtension");
+ _glfw.x11.xinerama.QueryScreens = (PFN_XineramaQueryScreens)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xinerama.handle, "XineramaQueryScreens");
+
+ if (XineramaQueryExtension(_glfw.x11.display,
+ &_glfw.x11.xinerama.major,
+ &_glfw.x11.xinerama.minor))
+ {
+ if (XineramaIsActive(_glfw.x11.display))
+ _glfw.x11.xinerama.available = GLFW_TRUE;
+ }
+ }
+
+ _glfw.x11.xkb.major = 1;
+ _glfw.x11.xkb.minor = 0;
+ _glfw.x11.xkb.available =
+ XkbQueryExtension(_glfw.x11.display,
+ &_glfw.x11.xkb.majorOpcode,
+ &_glfw.x11.xkb.eventBase,
+ &_glfw.x11.xkb.errorBase,
+ &_glfw.x11.xkb.major,
+ &_glfw.x11.xkb.minor);
+
+ if (_glfw.x11.xkb.available)
+ {
+ Bool supported;
+
+ if (XkbSetDetectableAutoRepeat(_glfw.x11.display, True, &supported))
+ {
+ if (supported)
+ _glfw.x11.xkb.detectable = GLFW_TRUE;
+ }
+
+ XkbStateRec state;
+ if (XkbGetState(_glfw.x11.display, XkbUseCoreKbd, &state) == Success)
+ _glfw.x11.xkb.group = (unsigned int)state.group;
+
+ XkbSelectEventDetails(_glfw.x11.display, XkbUseCoreKbd, XkbStateNotify,
+ XkbGroupStateMask, XkbGroupStateMask);
+ }
+
+ if (_glfw.hints.init.x11.xcbVulkanSurface)
+ {
+#if defined(__CYGWIN__)
+ _glfw.x11.x11xcb.handle = _glfwPlatformLoadModule("libX11-xcb-1.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.x11xcb.handle = _glfwPlatformLoadModule("libX11-xcb.so");
+#else
+ _glfw.x11.x11xcb.handle = _glfwPlatformLoadModule("libX11-xcb.so.1");
+#endif
+ }
+
+ if (_glfw.x11.x11xcb.handle)
+ {
+ _glfw.x11.x11xcb.GetXCBConnection = (PFN_XGetXCBConnection)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.x11xcb.handle, "XGetXCBConnection");
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.xrender.handle = _glfwPlatformLoadModule("libXrender-1.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.xrender.handle = _glfwPlatformLoadModule("libXrender.so");
+#else
+ _glfw.x11.xrender.handle = _glfwPlatformLoadModule("libXrender.so.1");
+#endif
+ if (_glfw.x11.xrender.handle)
+ {
+ _glfw.x11.xrender.QueryExtension = (PFN_XRenderQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xrender.handle, "XRenderQueryExtension");
+ _glfw.x11.xrender.QueryVersion = (PFN_XRenderQueryVersion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xrender.handle, "XRenderQueryVersion");
+ _glfw.x11.xrender.FindVisualFormat = (PFN_XRenderFindVisualFormat)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xrender.handle, "XRenderFindVisualFormat");
+
+ if (XRenderQueryExtension(_glfw.x11.display,
+ &_glfw.x11.xrender.errorBase,
+ &_glfw.x11.xrender.eventBase))
+ {
+ if (XRenderQueryVersion(_glfw.x11.display,
+ &_glfw.x11.xrender.major,
+ &_glfw.x11.xrender.minor))
+ {
+ _glfw.x11.xrender.available = GLFW_TRUE;
+ }
+ }
+ }
+
+#if defined(__CYGWIN__)
+ _glfw.x11.xshape.handle = _glfwPlatformLoadModule("libXext-6.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ _glfw.x11.xshape.handle = _glfwPlatformLoadModule("libXext.so");
+#else
+ _glfw.x11.xshape.handle = _glfwPlatformLoadModule("libXext.so.6");
+#endif
+ if (_glfw.x11.xshape.handle)
+ {
+ _glfw.x11.xshape.QueryExtension = (PFN_XShapeQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xshape.handle, "XShapeQueryExtension");
+ _glfw.x11.xshape.ShapeCombineRegion = (PFN_XShapeCombineRegion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xshape.handle, "XShapeCombineRegion");
+ _glfw.x11.xshape.QueryVersion = (PFN_XShapeQueryVersion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xshape.handle, "XShapeQueryVersion");
+ _glfw.x11.xshape.ShapeCombineMask = (PFN_XShapeCombineMask)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xshape.handle, "XShapeCombineMask");
+
+ if (XShapeQueryExtension(_glfw.x11.display,
+ &_glfw.x11.xshape.errorBase,
+ &_glfw.x11.xshape.eventBase))
+ {
+ if (XShapeQueryVersion(_glfw.x11.display,
+ &_glfw.x11.xshape.major,
+ &_glfw.x11.xshape.minor))
+ {
+ _glfw.x11.xshape.available = GLFW_TRUE;
+ }
+ }
+ }
+
+ // Update the key code LUT
+ // FIXME: We should listen to XkbMapNotify events to track changes to
+ // the keyboard mapping.
+ createKeyTables();
+
+ // String format atoms
+ _glfw.x11.NULL_ = XInternAtom(_glfw.x11.display, "NULL", False);
+ _glfw.x11.UTF8_STRING = XInternAtom(_glfw.x11.display, "UTF8_STRING", False);
+ _glfw.x11.ATOM_PAIR = XInternAtom(_glfw.x11.display, "ATOM_PAIR", False);
+
+ // Custom selection property atom
+ _glfw.x11.GLFW_SELECTION =
+ XInternAtom(_glfw.x11.display, "GLFW_SELECTION", False);
+
+ // ICCCM standard clipboard atoms
+ _glfw.x11.TARGETS = XInternAtom(_glfw.x11.display, "TARGETS", False);
+ _glfw.x11.MULTIPLE = XInternAtom(_glfw.x11.display, "MULTIPLE", False);
+ _glfw.x11.PRIMARY = XInternAtom(_glfw.x11.display, "PRIMARY", False);
+ _glfw.x11.INCR = XInternAtom(_glfw.x11.display, "INCR", False);
+ _glfw.x11.CLIPBOARD = XInternAtom(_glfw.x11.display, "CLIPBOARD", False);
+
+ // Clipboard manager atoms
+ _glfw.x11.CLIPBOARD_MANAGER =
+ XInternAtom(_glfw.x11.display, "CLIPBOARD_MANAGER", False);
+ _glfw.x11.SAVE_TARGETS =
+ XInternAtom(_glfw.x11.display, "SAVE_TARGETS", False);
+
+ // Xdnd (drag and drop) atoms
+ _glfw.x11.XdndAware = XInternAtom(_glfw.x11.display, "XdndAware", False);
+ _glfw.x11.XdndEnter = XInternAtom(_glfw.x11.display, "XdndEnter", False);
+ _glfw.x11.XdndPosition = XInternAtom(_glfw.x11.display, "XdndPosition", False);
+ _glfw.x11.XdndStatus = XInternAtom(_glfw.x11.display, "XdndStatus", False);
+ _glfw.x11.XdndActionCopy = XInternAtom(_glfw.x11.display, "XdndActionCopy", False);
+ _glfw.x11.XdndDrop = XInternAtom(_glfw.x11.display, "XdndDrop", False);
+ _glfw.x11.XdndFinished = XInternAtom(_glfw.x11.display, "XdndFinished", False);
+ _glfw.x11.XdndSelection = XInternAtom(_glfw.x11.display, "XdndSelection", False);
+ _glfw.x11.XdndTypeList = XInternAtom(_glfw.x11.display, "XdndTypeList", False);
+ _glfw.x11.text_uri_list = XInternAtom(_glfw.x11.display, "text/uri-list", False);
+
+ // ICCCM, EWMH and Motif window property atoms
+ // These can be set safely even without WM support
+ // The EWMH atoms that require WM support are handled in detectEWMH
+ _glfw.x11.WM_PROTOCOLS =
+ XInternAtom(_glfw.x11.display, "WM_PROTOCOLS", False);
+ _glfw.x11.WM_STATE =
+ XInternAtom(_glfw.x11.display, "WM_STATE", False);
+ _glfw.x11.WM_DELETE_WINDOW =
+ XInternAtom(_glfw.x11.display, "WM_DELETE_WINDOW", False);
+ _glfw.x11.NET_SUPPORTED =
+ XInternAtom(_glfw.x11.display, "_NET_SUPPORTED", False);
+ _glfw.x11.NET_SUPPORTING_WM_CHECK =
+ XInternAtom(_glfw.x11.display, "_NET_SUPPORTING_WM_CHECK", False);
+ _glfw.x11.NET_WM_ICON =
+ XInternAtom(_glfw.x11.display, "_NET_WM_ICON", False);
+ _glfw.x11.NET_WM_PING =
+ XInternAtom(_glfw.x11.display, "_NET_WM_PING", False);
+ _glfw.x11.NET_WM_PID =
+ XInternAtom(_glfw.x11.display, "_NET_WM_PID", False);
+ _glfw.x11.NET_WM_NAME =
+ XInternAtom(_glfw.x11.display, "_NET_WM_NAME", False);
+ _glfw.x11.NET_WM_ICON_NAME =
+ XInternAtom(_glfw.x11.display, "_NET_WM_ICON_NAME", False);
+ _glfw.x11.NET_WM_BYPASS_COMPOSITOR =
+ XInternAtom(_glfw.x11.display, "_NET_WM_BYPASS_COMPOSITOR", False);
+ _glfw.x11.NET_WM_WINDOW_OPACITY =
+ XInternAtom(_glfw.x11.display, "_NET_WM_WINDOW_OPACITY", False);
+ _glfw.x11.MOTIF_WM_HINTS =
+ XInternAtom(_glfw.x11.display, "_MOTIF_WM_HINTS", False);
+
+ // The compositing manager selection name contains the screen number
+ {
+ char name[32];
+ snprintf(name, sizeof(name), "_NET_WM_CM_S%u", _glfw.x11.screen);
+ _glfw.x11.NET_WM_CM_Sx = XInternAtom(_glfw.x11.display, name, False);
+ }
+
+ // Detect whether an EWMH-conformant window manager is running
+ detectEWMH();
+
+ return GLFW_TRUE;
+}
+
+// Retrieve system content scale via folklore heuristics
+//
+static void getSystemContentScale(float* xscale, float* yscale)
+{
+ // Start by assuming the default X11 DPI
+ // NOTE: Some desktop environments (KDE) may remove the Xft.dpi field when it
+ // would be set to 96, so assume that is the case if we cannot find it
+ float xdpi = 96.f, ydpi = 96.f;
+
+ // NOTE: Basing the scale on Xft.dpi where available should provide the most
+ // consistent user experience (matches Qt, Gtk, etc), although not
+ // always the most accurate one
+ char* rms = XResourceManagerString(_glfw.x11.display);
+ if (rms)
+ {
+ XrmDatabase db = XrmGetStringDatabase(rms);
+ if (db)
+ {
+ XrmValue value;
+ char* type = NULL;
+
+ if (XrmGetResource(db, "Xft.dpi", "Xft.Dpi", &type, &value))
+ {
+ if (type && strcmp(type, "String") == 0)
+ xdpi = ydpi = atof(value.addr);
+ }
+
+ XrmDestroyDatabase(db);
+ }
+ }
+
+ *xscale = xdpi / 96.f;
+ *yscale = ydpi / 96.f;
+}
+
+// Create a blank cursor for hidden and disabled cursor modes
+//
+static Cursor createHiddenCursor(void)
+{
+ unsigned char pixels[16 * 16 * 4] = { 0 };
+ GLFWimage image = { 16, 16, pixels };
+ return _glfwCreateNativeCursorX11(&image, 0, 0);
+}
+
+// Create a helper window for IPC
+//
+static Window createHelperWindow(void)
+{
+ XSetWindowAttributes wa;
+ wa.event_mask = PropertyChangeMask;
+
+ return XCreateWindow(_glfw.x11.display, _glfw.x11.root,
+ 0, 0, 1, 1, 0, 0,
+ InputOnly,
+ DefaultVisual(_glfw.x11.display, _glfw.x11.screen),
+ CWEventMask, &wa);
+}
+
+// Create the pipe for empty events without assumuing the OS has pipe2(2)
+//
+static GLFWbool createEmptyEventPipe(void)
+{
+ if (pipe(_glfw.x11.emptyEventPipe) != 0)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to create empty event pipe: %s",
+ strerror(errno));
+ return GLFW_FALSE;
+ }
+
+ for (int i = 0; i < 2; i++)
+ {
+ const int sf = fcntl(_glfw.x11.emptyEventPipe[i], F_GETFL, 0);
+ const int df = fcntl(_glfw.x11.emptyEventPipe[i], F_GETFD, 0);
+
+ if (sf == -1 || df == -1 ||
+ fcntl(_glfw.x11.emptyEventPipe[i], F_SETFL, sf | O_NONBLOCK) == -1 ||
+ fcntl(_glfw.x11.emptyEventPipe[i], F_SETFD, df | FD_CLOEXEC) == -1)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to set flags for empty event pipe: %s",
+ strerror(errno));
+ return GLFW_FALSE;
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+// X error handler
+//
+static int errorHandler(Display *display, XErrorEvent* event)
+{
+ if (_glfw.x11.display != display)
+ return 0;
+
+ _glfw.x11.errorCode = event->error_code;
+ return 0;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Sets the X error handler callback
+//
+void _glfwGrabErrorHandlerX11(void)
+{
+ _glfw.x11.errorCode = Success;
+ XSetErrorHandler(errorHandler);
+}
+
+// Clears the X error handler callback
+//
+void _glfwReleaseErrorHandlerX11(void)
+{
+ // Synchronize to make sure all commands are processed
+ XSync(_glfw.x11.display, False);
+ XSetErrorHandler(NULL);
+}
+
+// Reports the specified error, appending information about the last X error
+//
+void _glfwInputErrorX11(int error, const char* message)
+{
+ char buffer[_GLFW_MESSAGE_SIZE];
+ XGetErrorText(_glfw.x11.display, _glfw.x11.errorCode,
+ buffer, sizeof(buffer));
+
+ _glfwInputError(error, "%s: %s", message, buffer);
+}
+
+// Creates a native cursor object from the specified image and hotspot
+//
+Cursor _glfwCreateNativeCursorX11(const GLFWimage* image, int xhot, int yhot)
+{
+ Cursor cursor;
+
+ if (!_glfw.x11.xcursor.handle)
+ return None;
+
+ XcursorImage* native = XcursorImageCreate(image->width, image->height);
+ if (native == NULL)
+ return None;
+
+ native->xhot = xhot;
+ native->yhot = yhot;
+
+ unsigned char* source = (unsigned char*) image->pixels;
+ XcursorPixel* target = native->pixels;
+
+ for (int i = 0; i < image->width * image->height; i++, target++, source += 4)
+ {
+ unsigned int alpha = source[3];
+
+ *target = (alpha << 24) |
+ ((unsigned char) ((source[0] * alpha) / 255) << 16) |
+ ((unsigned char) ((source[1] * alpha) / 255) << 8) |
+ ((unsigned char) ((source[2] * alpha) / 255) << 0);
+ }
+
+ cursor = XcursorImageLoadCursor(_glfw.x11.display, native);
+ XcursorImageDestroy(native);
+
+ return cursor;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWbool _glfwConnectX11(int platformID, _GLFWplatform* platform)
+{
+ const _GLFWplatform x11 =
+ {
+ GLFW_PLATFORM_X11,
+ _glfwInitX11,
+ _glfwTerminateX11,
+ _glfwGetCursorPosX11,
+ _glfwSetCursorPosX11,
+ _glfwSetCursorModeX11,
+ _glfwSetRawMouseMotionX11,
+ _glfwRawMouseMotionSupportedX11,
+ _glfwCreateCursorX11,
+ _glfwCreateStandardCursorX11,
+ _glfwDestroyCursorX11,
+ _glfwSetCursorX11,
+ _glfwGetScancodeNameX11,
+ _glfwGetKeyScancodeX11,
+ _glfwSetClipboardStringX11,
+ _glfwGetClipboardStringX11,
+#if defined(__linux__)
+ _glfwInitJoysticksLinux,
+ _glfwTerminateJoysticksLinux,
+ _glfwPollJoystickLinux,
+ _glfwGetMappingNameLinux,
+ _glfwUpdateGamepadGUIDLinux,
+#else
+ _glfwInitJoysticksNull,
+ _glfwTerminateJoysticksNull,
+ _glfwPollJoystickNull,
+ _glfwGetMappingNameNull,
+ _glfwUpdateGamepadGUIDNull,
+#endif
+ _glfwFreeMonitorX11,
+ _glfwGetMonitorPosX11,
+ _glfwGetMonitorContentScaleX11,
+ _glfwGetMonitorWorkareaX11,
+ _glfwGetVideoModesX11,
+ _glfwGetVideoModeX11,
+ _glfwGetGammaRampX11,
+ _glfwSetGammaRampX11,
+ _glfwCreateWindowX11,
+ _glfwDestroyWindowX11,
+ _glfwSetWindowTitleX11,
+ _glfwSetWindowIconX11,
+ _glfwGetWindowPosX11,
+ _glfwSetWindowPosX11,
+ _glfwGetWindowSizeX11,
+ _glfwSetWindowSizeX11,
+ _glfwSetWindowSizeLimitsX11,
+ _glfwSetWindowAspectRatioX11,
+ _glfwGetFramebufferSizeX11,
+ _glfwGetWindowFrameSizeX11,
+ _glfwGetWindowContentScaleX11,
+ _glfwIconifyWindowX11,
+ _glfwRestoreWindowX11,
+ _glfwMaximizeWindowX11,
+ _glfwShowWindowX11,
+ _glfwHideWindowX11,
+ _glfwRequestWindowAttentionX11,
+ _glfwFocusWindowX11,
+ _glfwSetWindowMonitorX11,
+ _glfwWindowFocusedX11,
+ _glfwWindowIconifiedX11,
+ _glfwWindowVisibleX11,
+ _glfwWindowMaximizedX11,
+ _glfwWindowHoveredX11,
+ _glfwFramebufferTransparentX11,
+ _glfwGetWindowOpacityX11,
+ _glfwSetWindowResizableX11,
+ _glfwSetWindowDecoratedX11,
+ _glfwSetWindowFloatingX11,
+ _glfwSetWindowOpacityX11,
+ _glfwSetWindowMousePassthroughX11,
+ _glfwPollEventsX11,
+ _glfwWaitEventsX11,
+ _glfwWaitEventsTimeoutX11,
+ _glfwPostEmptyEventX11,
+ _glfwGetEGLPlatformX11,
+ _glfwGetEGLNativeDisplayX11,
+ _glfwGetEGLNativeWindowX11,
+ _glfwGetRequiredInstanceExtensionsX11,
+ _glfwGetPhysicalDevicePresentationSupportX11,
+ _glfwCreateWindowSurfaceX11,
+ };
+
+ // HACK: If the application has left the locale as "C" then both wide
+ // character text input and explicit UTF-8 input via XIM will break
+ // This sets the CTYPE part of the current locale from the environment
+ // in the hope that it is set to something more sane than "C"
+ if (strcmp(setlocale(LC_CTYPE, NULL), "C") == 0)
+ setlocale(LC_CTYPE, "");
+
+#if defined(__CYGWIN__)
+ void* module = _glfwPlatformLoadModule("libX11-6.so");
+#elif defined(__OpenBSD__) || defined(__NetBSD__)
+ void* module = _glfwPlatformLoadModule("libX11.so");
+#else
+ void* module = _glfwPlatformLoadModule("libX11.so.6");
+#endif
+ if (!module)
+ {
+ if (platformID == GLFW_PLATFORM_X11)
+ _glfwInputError(GLFW_PLATFORM_ERROR, "X11: Failed to load Xlib");
+
+ return GLFW_FALSE;
+ }
+
+ PFN_XInitThreads XInitThreads = (PFN_XInitThreads)
+ _glfwPlatformGetModuleSymbol(module, "XInitThreads");
+ PFN_XrmInitialize XrmInitialize = (PFN_XrmInitialize)
+ _glfwPlatformGetModuleSymbol(module, "XrmInitialize");
+ PFN_XOpenDisplay XOpenDisplay = (PFN_XOpenDisplay)
+ _glfwPlatformGetModuleSymbol(module, "XOpenDisplay");
+ if (!XInitThreads || !XrmInitialize || !XOpenDisplay)
+ {
+ if (platformID == GLFW_PLATFORM_X11)
+ _glfwInputError(GLFW_PLATFORM_ERROR, "X11: Failed to load Xlib entry point");
+
+ _glfwPlatformFreeModule(module);
+ return GLFW_FALSE;
+ }
+
+ XInitThreads();
+ XrmInitialize();
+
+ Display* display = XOpenDisplay(NULL);
+ if (!display)
+ {
+ if (platformID == GLFW_PLATFORM_X11)
+ {
+ const char* name = getenv("DISPLAY");
+ if (name)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "X11: Failed to open display %s", name);
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE,
+ "X11: The DISPLAY environment variable is missing");
+ }
+ }
+
+ _glfwPlatformFreeModule(module);
+ return GLFW_FALSE;
+ }
+
+ _glfw.x11.display = display;
+ _glfw.x11.xlib.handle = module;
+
+ *platform = x11;
+ return GLFW_TRUE;
+}
+
+int _glfwInitX11(void)
+{
+ _glfw.x11.xlib.AllocClassHint = (PFN_XAllocClassHint)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XAllocClassHint");
+ _glfw.x11.xlib.AllocSizeHints = (PFN_XAllocSizeHints)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XAllocSizeHints");
+ _glfw.x11.xlib.AllocWMHints = (PFN_XAllocWMHints)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XAllocWMHints");
+ _glfw.x11.xlib.ChangeProperty = (PFN_XChangeProperty)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XChangeProperty");
+ _glfw.x11.xlib.ChangeWindowAttributes = (PFN_XChangeWindowAttributes)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XChangeWindowAttributes");
+ _glfw.x11.xlib.CheckIfEvent = (PFN_XCheckIfEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCheckIfEvent");
+ _glfw.x11.xlib.CheckTypedWindowEvent = (PFN_XCheckTypedWindowEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCheckTypedWindowEvent");
+ _glfw.x11.xlib.CloseDisplay = (PFN_XCloseDisplay)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCloseDisplay");
+ _glfw.x11.xlib.CloseIM = (PFN_XCloseIM)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCloseIM");
+ _glfw.x11.xlib.ConvertSelection = (PFN_XConvertSelection)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XConvertSelection");
+ _glfw.x11.xlib.CreateColormap = (PFN_XCreateColormap)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCreateColormap");
+ _glfw.x11.xlib.CreateFontCursor = (PFN_XCreateFontCursor)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCreateFontCursor");
+ _glfw.x11.xlib.CreateIC = (PFN_XCreateIC)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCreateIC");
+ _glfw.x11.xlib.CreateRegion = (PFN_XCreateRegion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCreateRegion");
+ _glfw.x11.xlib.CreateWindow = (PFN_XCreateWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XCreateWindow");
+ _glfw.x11.xlib.DefineCursor = (PFN_XDefineCursor)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDefineCursor");
+ _glfw.x11.xlib.DeleteContext = (PFN_XDeleteContext)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDeleteContext");
+ _glfw.x11.xlib.DeleteProperty = (PFN_XDeleteProperty)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDeleteProperty");
+ _glfw.x11.xlib.DestroyIC = (PFN_XDestroyIC)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDestroyIC");
+ _glfw.x11.xlib.DestroyRegion = (PFN_XDestroyRegion)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDestroyRegion");
+ _glfw.x11.xlib.DestroyWindow = (PFN_XDestroyWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDestroyWindow");
+ _glfw.x11.xlib.DisplayKeycodes = (PFN_XDisplayKeycodes)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XDisplayKeycodes");
+ _glfw.x11.xlib.EventsQueued = (PFN_XEventsQueued)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XEventsQueued");
+ _glfw.x11.xlib.FilterEvent = (PFN_XFilterEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFilterEvent");
+ _glfw.x11.xlib.FindContext = (PFN_XFindContext)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFindContext");
+ _glfw.x11.xlib.Flush = (PFN_XFlush)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFlush");
+ _glfw.x11.xlib.Free = (PFN_XFree)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFree");
+ _glfw.x11.xlib.FreeColormap = (PFN_XFreeColormap)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFreeColormap");
+ _glfw.x11.xlib.FreeCursor = (PFN_XFreeCursor)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFreeCursor");
+ _glfw.x11.xlib.FreeEventData = (PFN_XFreeEventData)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XFreeEventData");
+ _glfw.x11.xlib.GetErrorText = (PFN_XGetErrorText)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetErrorText");
+ _glfw.x11.xlib.GetEventData = (PFN_XGetEventData)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetEventData");
+ _glfw.x11.xlib.GetICValues = (PFN_XGetICValues)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetICValues");
+ _glfw.x11.xlib.GetIMValues = (PFN_XGetIMValues)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetIMValues");
+ _glfw.x11.xlib.GetInputFocus = (PFN_XGetInputFocus)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetInputFocus");
+ _glfw.x11.xlib.GetKeyboardMapping = (PFN_XGetKeyboardMapping)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetKeyboardMapping");
+ _glfw.x11.xlib.GetScreenSaver = (PFN_XGetScreenSaver)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetScreenSaver");
+ _glfw.x11.xlib.GetSelectionOwner = (PFN_XGetSelectionOwner)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetSelectionOwner");
+ _glfw.x11.xlib.GetVisualInfo = (PFN_XGetVisualInfo)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetVisualInfo");
+ _glfw.x11.xlib.GetWMNormalHints = (PFN_XGetWMNormalHints)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetWMNormalHints");
+ _glfw.x11.xlib.GetWindowAttributes = (PFN_XGetWindowAttributes)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetWindowAttributes");
+ _glfw.x11.xlib.GetWindowProperty = (PFN_XGetWindowProperty)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGetWindowProperty");
+ _glfw.x11.xlib.GrabPointer = (PFN_XGrabPointer)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XGrabPointer");
+ _glfw.x11.xlib.IconifyWindow = (PFN_XIconifyWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XIconifyWindow");
+ _glfw.x11.xlib.InternAtom = (PFN_XInternAtom)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XInternAtom");
+ _glfw.x11.xlib.LookupString = (PFN_XLookupString)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XLookupString");
+ _glfw.x11.xlib.MapRaised = (PFN_XMapRaised)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XMapRaised");
+ _glfw.x11.xlib.MapWindow = (PFN_XMapWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XMapWindow");
+ _glfw.x11.xlib.MoveResizeWindow = (PFN_XMoveResizeWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XMoveResizeWindow");
+ _glfw.x11.xlib.MoveWindow = (PFN_XMoveWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XMoveWindow");
+ _glfw.x11.xlib.NextEvent = (PFN_XNextEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XNextEvent");
+ _glfw.x11.xlib.OpenIM = (PFN_XOpenIM)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XOpenIM");
+ _glfw.x11.xlib.PeekEvent = (PFN_XPeekEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XPeekEvent");
+ _glfw.x11.xlib.Pending = (PFN_XPending)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XPending");
+ _glfw.x11.xlib.QueryExtension = (PFN_XQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XQueryExtension");
+ _glfw.x11.xlib.QueryPointer = (PFN_XQueryPointer)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XQueryPointer");
+ _glfw.x11.xlib.RaiseWindow = (PFN_XRaiseWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XRaiseWindow");
+ _glfw.x11.xlib.RegisterIMInstantiateCallback = (PFN_XRegisterIMInstantiateCallback)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XRegisterIMInstantiateCallback");
+ _glfw.x11.xlib.ResizeWindow = (PFN_XResizeWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XResizeWindow");
+ _glfw.x11.xlib.ResourceManagerString = (PFN_XResourceManagerString)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XResourceManagerString");
+ _glfw.x11.xlib.SaveContext = (PFN_XSaveContext)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSaveContext");
+ _glfw.x11.xlib.SelectInput = (PFN_XSelectInput)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSelectInput");
+ _glfw.x11.xlib.SendEvent = (PFN_XSendEvent)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSendEvent");
+ _glfw.x11.xlib.SetClassHint = (PFN_XSetClassHint)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetClassHint");
+ _glfw.x11.xlib.SetErrorHandler = (PFN_XSetErrorHandler)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetErrorHandler");
+ _glfw.x11.xlib.SetICFocus = (PFN_XSetICFocus)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetICFocus");
+ _glfw.x11.xlib.SetIMValues = (PFN_XSetIMValues)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetIMValues");
+ _glfw.x11.xlib.SetInputFocus = (PFN_XSetInputFocus)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetInputFocus");
+ _glfw.x11.xlib.SetLocaleModifiers = (PFN_XSetLocaleModifiers)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetLocaleModifiers");
+ _glfw.x11.xlib.SetScreenSaver = (PFN_XSetScreenSaver)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetScreenSaver");
+ _glfw.x11.xlib.SetSelectionOwner = (PFN_XSetSelectionOwner)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetSelectionOwner");
+ _glfw.x11.xlib.SetWMHints = (PFN_XSetWMHints)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetWMHints");
+ _glfw.x11.xlib.SetWMNormalHints = (PFN_XSetWMNormalHints)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetWMNormalHints");
+ _glfw.x11.xlib.SetWMProtocols = (PFN_XSetWMProtocols)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSetWMProtocols");
+ _glfw.x11.xlib.SupportsLocale = (PFN_XSupportsLocale)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSupportsLocale");
+ _glfw.x11.xlib.Sync = (PFN_XSync)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XSync");
+ _glfw.x11.xlib.TranslateCoordinates = (PFN_XTranslateCoordinates)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XTranslateCoordinates");
+ _glfw.x11.xlib.UndefineCursor = (PFN_XUndefineCursor)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XUndefineCursor");
+ _glfw.x11.xlib.UngrabPointer = (PFN_XUngrabPointer)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XUngrabPointer");
+ _glfw.x11.xlib.UnmapWindow = (PFN_XUnmapWindow)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XUnmapWindow");
+ _glfw.x11.xlib.UnsetICFocus = (PFN_XUnsetICFocus)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XUnsetICFocus");
+ _glfw.x11.xlib.VisualIDFromVisual = (PFN_XVisualIDFromVisual)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XVisualIDFromVisual");
+ _glfw.x11.xlib.WarpPointer = (PFN_XWarpPointer)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XWarpPointer");
+ _glfw.x11.xkb.FreeKeyboard = (PFN_XkbFreeKeyboard)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbFreeKeyboard");
+ _glfw.x11.xkb.FreeNames = (PFN_XkbFreeNames)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbFreeNames");
+ _glfw.x11.xkb.GetMap = (PFN_XkbGetMap)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbGetMap");
+ _glfw.x11.xkb.GetNames = (PFN_XkbGetNames)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbGetNames");
+ _glfw.x11.xkb.GetState = (PFN_XkbGetState)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbGetState");
+ _glfw.x11.xkb.KeycodeToKeysym = (PFN_XkbKeycodeToKeysym)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbKeycodeToKeysym");
+ _glfw.x11.xkb.QueryExtension = (PFN_XkbQueryExtension)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbQueryExtension");
+ _glfw.x11.xkb.SelectEventDetails = (PFN_XkbSelectEventDetails)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbSelectEventDetails");
+ _glfw.x11.xkb.SetDetectableAutoRepeat = (PFN_XkbSetDetectableAutoRepeat)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XkbSetDetectableAutoRepeat");
+ _glfw.x11.xrm.DestroyDatabase = (PFN_XrmDestroyDatabase)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XrmDestroyDatabase");
+ _glfw.x11.xrm.GetResource = (PFN_XrmGetResource)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XrmGetResource");
+ _glfw.x11.xrm.GetStringDatabase = (PFN_XrmGetStringDatabase)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XrmGetStringDatabase");
+ _glfw.x11.xrm.UniqueQuark = (PFN_XrmUniqueQuark)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XrmUniqueQuark");
+ _glfw.x11.xlib.UnregisterIMInstantiateCallback = (PFN_XUnregisterIMInstantiateCallback)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "XUnregisterIMInstantiateCallback");
+ _glfw.x11.xlib.utf8LookupString = (PFN_Xutf8LookupString)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "Xutf8LookupString");
+ _glfw.x11.xlib.utf8SetWMProperties = (PFN_Xutf8SetWMProperties)
+ _glfwPlatformGetModuleSymbol(_glfw.x11.xlib.handle, "Xutf8SetWMProperties");
+
+ if (_glfw.x11.xlib.utf8LookupString && _glfw.x11.xlib.utf8SetWMProperties)
+ _glfw.x11.xlib.utf8 = GLFW_TRUE;
+
+ _glfw.x11.screen = DefaultScreen(_glfw.x11.display);
+ _glfw.x11.root = RootWindow(_glfw.x11.display, _glfw.x11.screen);
+ _glfw.x11.context = XUniqueContext();
+
+ getSystemContentScale(&_glfw.x11.contentScaleX, &_glfw.x11.contentScaleY);
+
+ if (!createEmptyEventPipe())
+ return GLFW_FALSE;
+
+ if (!initExtensions())
+ return GLFW_FALSE;
+
+ _glfw.x11.helperWindowHandle = createHelperWindow();
+ _glfw.x11.hiddenCursorHandle = createHiddenCursor();
+
+ if (XSupportsLocale() && _glfw.x11.xlib.utf8)
+ {
+ XSetLocaleModifiers("");
+
+ // If an IM is already present our callback will be called right away
+ XRegisterIMInstantiateCallback(_glfw.x11.display,
+ NULL, NULL, NULL,
+ inputMethodInstantiateCallback,
+ NULL);
+ }
+
+ _glfwPollMonitorsX11();
+ return GLFW_TRUE;
+}
+
+void _glfwTerminateX11(void)
+{
+ if (_glfw.x11.helperWindowHandle)
+ {
+ if (XGetSelectionOwner(_glfw.x11.display, _glfw.x11.CLIPBOARD) ==
+ _glfw.x11.helperWindowHandle)
+ {
+ _glfwPushSelectionToManagerX11();
+ }
+
+ XDestroyWindow(_glfw.x11.display, _glfw.x11.helperWindowHandle);
+ _glfw.x11.helperWindowHandle = None;
+ }
+
+ if (_glfw.x11.hiddenCursorHandle)
+ {
+ XFreeCursor(_glfw.x11.display, _glfw.x11.hiddenCursorHandle);
+ _glfw.x11.hiddenCursorHandle = (Cursor) 0;
+ }
+
+ _glfw_free(_glfw.x11.primarySelectionString);
+ _glfw_free(_glfw.x11.clipboardString);
+
+ XUnregisterIMInstantiateCallback(_glfw.x11.display,
+ NULL, NULL, NULL,
+ inputMethodInstantiateCallback,
+ NULL);
+
+ if (_glfw.x11.im)
+ {
+ XCloseIM(_glfw.x11.im);
+ _glfw.x11.im = NULL;
+ }
+
+ if (_glfw.x11.display)
+ {
+ XCloseDisplay(_glfw.x11.display);
+ _glfw.x11.display = NULL;
+ }
+
+ if (_glfw.x11.x11xcb.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.x11xcb.handle);
+ _glfw.x11.x11xcb.handle = NULL;
+ }
+
+ if (_glfw.x11.xcursor.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.xcursor.handle);
+ _glfw.x11.xcursor.handle = NULL;
+ }
+
+ if (_glfw.x11.randr.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.randr.handle);
+ _glfw.x11.randr.handle = NULL;
+ }
+
+ if (_glfw.x11.xinerama.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.xinerama.handle);
+ _glfw.x11.xinerama.handle = NULL;
+ }
+
+ if (_glfw.x11.xrender.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.xrender.handle);
+ _glfw.x11.xrender.handle = NULL;
+ }
+
+ if (_glfw.x11.vidmode.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.vidmode.handle);
+ _glfw.x11.vidmode.handle = NULL;
+ }
+
+ if (_glfw.x11.xi.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.xi.handle);
+ _glfw.x11.xi.handle = NULL;
+ }
+
+ _glfwTerminateOSMesa();
+ // NOTE: These need to be unloaded after XCloseDisplay, as they register
+ // cleanup callbacks that get called by that function
+ _glfwTerminateEGL();
+ _glfwTerminateGLX();
+
+ if (_glfw.x11.xlib.handle)
+ {
+ _glfwPlatformFreeModule(_glfw.x11.xlib.handle);
+ _glfw.x11.xlib.handle = NULL;
+ }
+
+ if (_glfw.x11.emptyEventPipe[0] || _glfw.x11.emptyEventPipe[1])
+ {
+ close(_glfw.x11.emptyEventPipe[0]);
+ close(_glfw.x11.emptyEventPipe[1]);
+ }
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/x11_monitor.c b/chromium/third_party/dawn/third_party/glfw/src/x11_monitor.c
new file mode 100644
index 00000000000..b031c83c0e4
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/x11_monitor.c
@@ -0,0 +1,616 @@
+//========================================================================
+// GLFW 3.4 X11 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+
+// Check whether the display mode should be included in enumeration
+//
+static GLFWbool modeIsGood(const XRRModeInfo* mi)
+{
+ return (mi->modeFlags & RR_Interlace) == 0;
+}
+
+// Calculates the refresh rate, in Hz, from the specified RandR mode info
+//
+static int calculateRefreshRate(const XRRModeInfo* mi)
+{
+ if (mi->hTotal && mi->vTotal)
+ return (int) round((double) mi->dotClock / ((double) mi->hTotal * (double) mi->vTotal));
+ else
+ return 0;
+}
+
+// Returns the mode info for a RandR mode XID
+//
+static const XRRModeInfo* getModeInfo(const XRRScreenResources* sr, RRMode id)
+{
+ for (int i = 0; i < sr->nmode; i++)
+ {
+ if (sr->modes[i].id == id)
+ return sr->modes + i;
+ }
+
+ return NULL;
+}
+
+// Convert RandR mode info to GLFW video mode
+//
+static GLFWvidmode vidmodeFromModeInfo(const XRRModeInfo* mi,
+ const XRRCrtcInfo* ci)
+{
+ GLFWvidmode mode;
+
+ if (ci->rotation == RR_Rotate_90 || ci->rotation == RR_Rotate_270)
+ {
+ mode.width = mi->height;
+ mode.height = mi->width;
+ }
+ else
+ {
+ mode.width = mi->width;
+ mode.height = mi->height;
+ }
+
+ mode.refreshRate = calculateRefreshRate(mi);
+
+ _glfwSplitBPP(DefaultDepth(_glfw.x11.display, _glfw.x11.screen),
+ &mode.redBits, &mode.greenBits, &mode.blueBits);
+
+ return mode;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Poll for changes in the set of connected monitors
+//
+void _glfwPollMonitorsX11(void)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ int disconnectedCount, screenCount = 0;
+ _GLFWmonitor** disconnected = NULL;
+ XineramaScreenInfo* screens = NULL;
+ XRRScreenResources* sr = XRRGetScreenResourcesCurrent(_glfw.x11.display,
+ _glfw.x11.root);
+ RROutput primary = XRRGetOutputPrimary(_glfw.x11.display,
+ _glfw.x11.root);
+
+ if (_glfw.x11.xinerama.available)
+ screens = XineramaQueryScreens(_glfw.x11.display, &screenCount);
+
+ disconnectedCount = _glfw.monitorCount;
+ if (disconnectedCount)
+ {
+ disconnected = _glfw_calloc(_glfw.monitorCount, sizeof(_GLFWmonitor*));
+ memcpy(disconnected,
+ _glfw.monitors,
+ _glfw.monitorCount * sizeof(_GLFWmonitor*));
+ }
+
+ for (int i = 0; i < sr->noutput; i++)
+ {
+ int j, type, widthMM, heightMM;
+
+ XRROutputInfo* oi = XRRGetOutputInfo(_glfw.x11.display, sr, sr->outputs[i]);
+ if (oi->connection != RR_Connected || oi->crtc == None)
+ {
+ XRRFreeOutputInfo(oi);
+ continue;
+ }
+
+ for (j = 0; j < disconnectedCount; j++)
+ {
+ if (disconnected[j] &&
+ disconnected[j]->x11.output == sr->outputs[i])
+ {
+ disconnected[j] = NULL;
+ break;
+ }
+ }
+
+ if (j < disconnectedCount)
+ {
+ XRRFreeOutputInfo(oi);
+ continue;
+ }
+
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, oi->crtc);
+ if (ci->rotation == RR_Rotate_90 || ci->rotation == RR_Rotate_270)
+ {
+ widthMM = oi->mm_height;
+ heightMM = oi->mm_width;
+ }
+ else
+ {
+ widthMM = oi->mm_width;
+ heightMM = oi->mm_height;
+ }
+
+ if (widthMM <= 0 || heightMM <= 0)
+ {
+ // HACK: If RandR does not provide a physical size, assume the
+ // X11 default 96 DPI and calculate from the CRTC viewport
+ // NOTE: These members are affected by rotation, unlike the mode
+ // info and output info members
+ widthMM = (int) (ci->width * 25.4f / 96.f);
+ heightMM = (int) (ci->height * 25.4f / 96.f);
+ }
+
+ _GLFWmonitor* monitor = _glfwAllocMonitor(oi->name, widthMM, heightMM);
+ monitor->x11.output = sr->outputs[i];
+ monitor->x11.crtc = oi->crtc;
+
+ for (j = 0; j < screenCount; j++)
+ {
+ if (screens[j].x_org == ci->x &&
+ screens[j].y_org == ci->y &&
+ screens[j].width == ci->width &&
+ screens[j].height == ci->height)
+ {
+ monitor->x11.index = j;
+ break;
+ }
+ }
+
+ if (monitor->x11.output == primary)
+ type = _GLFW_INSERT_FIRST;
+ else
+ type = _GLFW_INSERT_LAST;
+
+ _glfwInputMonitor(monitor, GLFW_CONNECTED, type);
+
+ XRRFreeOutputInfo(oi);
+ XRRFreeCrtcInfo(ci);
+ }
+
+ XRRFreeScreenResources(sr);
+
+ if (screens)
+ XFree(screens);
+
+ for (int i = 0; i < disconnectedCount; i++)
+ {
+ if (disconnected[i])
+ _glfwInputMonitor(disconnected[i], GLFW_DISCONNECTED, 0);
+ }
+
+ _glfw_free(disconnected);
+ }
+ else
+ {
+ const int widthMM = DisplayWidthMM(_glfw.x11.display, _glfw.x11.screen);
+ const int heightMM = DisplayHeightMM(_glfw.x11.display, _glfw.x11.screen);
+
+ _glfwInputMonitor(_glfwAllocMonitor("Display", widthMM, heightMM),
+ GLFW_CONNECTED,
+ _GLFW_INSERT_FIRST);
+ }
+}
+
+// Set the current video mode for the specified monitor
+//
+void _glfwSetVideoModeX11(_GLFWmonitor* monitor, const GLFWvidmode* desired)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ GLFWvidmode current;
+ RRMode native = None;
+
+ const GLFWvidmode* best = _glfwChooseVideoMode(monitor, desired);
+ _glfwGetVideoModeX11(monitor, &current);
+ if (_glfwCompareVideoModes(&current, best) == 0)
+ return;
+
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+ XRROutputInfo* oi = XRRGetOutputInfo(_glfw.x11.display, sr, monitor->x11.output);
+
+ for (int i = 0; i < oi->nmode; i++)
+ {
+ const XRRModeInfo* mi = getModeInfo(sr, oi->modes[i]);
+ if (!modeIsGood(mi))
+ continue;
+
+ const GLFWvidmode mode = vidmodeFromModeInfo(mi, ci);
+ if (_glfwCompareVideoModes(best, &mode) == 0)
+ {
+ native = mi->id;
+ break;
+ }
+ }
+
+ if (native)
+ {
+ if (monitor->x11.oldMode == None)
+ monitor->x11.oldMode = ci->mode;
+
+ XRRSetCrtcConfig(_glfw.x11.display,
+ sr, monitor->x11.crtc,
+ CurrentTime,
+ ci->x, ci->y,
+ native,
+ ci->rotation,
+ ci->outputs,
+ ci->noutput);
+ }
+
+ XRRFreeOutputInfo(oi);
+ XRRFreeCrtcInfo(ci);
+ XRRFreeScreenResources(sr);
+ }
+}
+
+// Restore the saved (original) video mode for the specified monitor
+//
+void _glfwRestoreVideoModeX11(_GLFWmonitor* monitor)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ if (monitor->x11.oldMode == None)
+ return;
+
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+
+ XRRSetCrtcConfig(_glfw.x11.display,
+ sr, monitor->x11.crtc,
+ CurrentTime,
+ ci->x, ci->y,
+ monitor->x11.oldMode,
+ ci->rotation,
+ ci->outputs,
+ ci->noutput);
+
+ XRRFreeCrtcInfo(ci);
+ XRRFreeScreenResources(sr);
+
+ monitor->x11.oldMode = None;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+void _glfwFreeMonitorX11(_GLFWmonitor* monitor)
+{
+}
+
+void _glfwGetMonitorPosX11(_GLFWmonitor* monitor, int* xpos, int* ypos)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+
+ if (ci)
+ {
+ if (xpos)
+ *xpos = ci->x;
+ if (ypos)
+ *ypos = ci->y;
+
+ XRRFreeCrtcInfo(ci);
+ }
+
+ XRRFreeScreenResources(sr);
+ }
+}
+
+void _glfwGetMonitorContentScaleX11(_GLFWmonitor* monitor,
+ float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = _glfw.x11.contentScaleX;
+ if (yscale)
+ *yscale = _glfw.x11.contentScaleY;
+}
+
+void _glfwGetMonitorWorkareaX11(_GLFWmonitor* monitor,
+ int* xpos, int* ypos,
+ int* width, int* height)
+{
+ int areaX = 0, areaY = 0, areaWidth = 0, areaHeight = 0;
+
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+
+ areaX = ci->x;
+ areaY = ci->y;
+
+ const XRRModeInfo* mi = getModeInfo(sr, ci->mode);
+
+ if (ci->rotation == RR_Rotate_90 || ci->rotation == RR_Rotate_270)
+ {
+ areaWidth = mi->height;
+ areaHeight = mi->width;
+ }
+ else
+ {
+ areaWidth = mi->width;
+ areaHeight = mi->height;
+ }
+
+ XRRFreeCrtcInfo(ci);
+ XRRFreeScreenResources(sr);
+ }
+ else
+ {
+ areaWidth = DisplayWidth(_glfw.x11.display, _glfw.x11.screen);
+ areaHeight = DisplayHeight(_glfw.x11.display, _glfw.x11.screen);
+ }
+
+ if (_glfw.x11.NET_WORKAREA && _glfw.x11.NET_CURRENT_DESKTOP)
+ {
+ Atom* extents = NULL;
+ Atom* desktop = NULL;
+ const unsigned long extentCount =
+ _glfwGetWindowPropertyX11(_glfw.x11.root,
+ _glfw.x11.NET_WORKAREA,
+ XA_CARDINAL,
+ (unsigned char**) &extents);
+
+ if (_glfwGetWindowPropertyX11(_glfw.x11.root,
+ _glfw.x11.NET_CURRENT_DESKTOP,
+ XA_CARDINAL,
+ (unsigned char**) &desktop) > 0)
+ {
+ if (extentCount >= 4 && *desktop < extentCount / 4)
+ {
+ const int globalX = extents[*desktop * 4 + 0];
+ const int globalY = extents[*desktop * 4 + 1];
+ const int globalWidth = extents[*desktop * 4 + 2];
+ const int globalHeight = extents[*desktop * 4 + 3];
+
+ if (areaX < globalX)
+ {
+ areaWidth -= globalX - areaX;
+ areaX = globalX;
+ }
+
+ if (areaY < globalY)
+ {
+ areaHeight -= globalY - areaY;
+ areaY = globalY;
+ }
+
+ if (areaX + areaWidth > globalX + globalWidth)
+ areaWidth = globalX - areaX + globalWidth;
+ if (areaY + areaHeight > globalY + globalHeight)
+ areaHeight = globalY - areaY + globalHeight;
+ }
+ }
+
+ if (extents)
+ XFree(extents);
+ if (desktop)
+ XFree(desktop);
+ }
+
+ if (xpos)
+ *xpos = areaX;
+ if (ypos)
+ *ypos = areaY;
+ if (width)
+ *width = areaWidth;
+ if (height)
+ *height = areaHeight;
+}
+
+GLFWvidmode* _glfwGetVideoModesX11(_GLFWmonitor* monitor, int* count)
+{
+ GLFWvidmode* result;
+
+ *count = 0;
+
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+ XRROutputInfo* oi = XRRGetOutputInfo(_glfw.x11.display, sr, monitor->x11.output);
+
+ result = _glfw_calloc(oi->nmode, sizeof(GLFWvidmode));
+
+ for (int i = 0; i < oi->nmode; i++)
+ {
+ const XRRModeInfo* mi = getModeInfo(sr, oi->modes[i]);
+ if (!modeIsGood(mi))
+ continue;
+
+ const GLFWvidmode mode = vidmodeFromModeInfo(mi, ci);
+ int j;
+
+ for (j = 0; j < *count; j++)
+ {
+ if (_glfwCompareVideoModes(result + j, &mode) == 0)
+ break;
+ }
+
+ // Skip duplicate modes
+ if (j < *count)
+ continue;
+
+ (*count)++;
+ result[*count - 1] = mode;
+ }
+
+ XRRFreeOutputInfo(oi);
+ XRRFreeCrtcInfo(ci);
+ XRRFreeScreenResources(sr);
+ }
+ else
+ {
+ *count = 1;
+ result = _glfw_calloc(1, sizeof(GLFWvidmode));
+ _glfwGetVideoModeX11(monitor, result);
+ }
+
+ return result;
+}
+
+void _glfwGetVideoModeX11(_GLFWmonitor* monitor, GLFWvidmode* mode)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.monitorBroken)
+ {
+ XRRScreenResources* sr =
+ XRRGetScreenResourcesCurrent(_glfw.x11.display, _glfw.x11.root);
+ XRRCrtcInfo* ci = XRRGetCrtcInfo(_glfw.x11.display, sr, monitor->x11.crtc);
+
+ if (ci)
+ {
+ const XRRModeInfo* mi = getModeInfo(sr, ci->mode);
+ if (mi) // mi can be NULL if the monitor has been disconnected
+ *mode = vidmodeFromModeInfo(mi, ci);
+
+ XRRFreeCrtcInfo(ci);
+ }
+
+ XRRFreeScreenResources(sr);
+ }
+ else
+ {
+ mode->width = DisplayWidth(_glfw.x11.display, _glfw.x11.screen);
+ mode->height = DisplayHeight(_glfw.x11.display, _glfw.x11.screen);
+ mode->refreshRate = 0;
+
+ _glfwSplitBPP(DefaultDepth(_glfw.x11.display, _glfw.x11.screen),
+ &mode->redBits, &mode->greenBits, &mode->blueBits);
+ }
+}
+
+GLFWbool _glfwGetGammaRampX11(_GLFWmonitor* monitor, GLFWgammaramp* ramp)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.gammaBroken)
+ {
+ const size_t size = XRRGetCrtcGammaSize(_glfw.x11.display,
+ monitor->x11.crtc);
+ XRRCrtcGamma* gamma = XRRGetCrtcGamma(_glfw.x11.display,
+ monitor->x11.crtc);
+
+ _glfwAllocGammaArrays(ramp, size);
+
+ memcpy(ramp->red, gamma->red, size * sizeof(unsigned short));
+ memcpy(ramp->green, gamma->green, size * sizeof(unsigned short));
+ memcpy(ramp->blue, gamma->blue, size * sizeof(unsigned short));
+
+ XRRFreeGamma(gamma);
+ return GLFW_TRUE;
+ }
+ else if (_glfw.x11.vidmode.available)
+ {
+ int size;
+ XF86VidModeGetGammaRampSize(_glfw.x11.display, _glfw.x11.screen, &size);
+
+ _glfwAllocGammaArrays(ramp, size);
+
+ XF86VidModeGetGammaRamp(_glfw.x11.display,
+ _glfw.x11.screen,
+ ramp->size, ramp->red, ramp->green, ramp->blue);
+ return GLFW_TRUE;
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Gamma ramp access not supported by server");
+ return GLFW_FALSE;
+ }
+}
+
+void _glfwSetGammaRampX11(_GLFWmonitor* monitor, const GLFWgammaramp* ramp)
+{
+ if (_glfw.x11.randr.available && !_glfw.x11.randr.gammaBroken)
+ {
+ if (XRRGetCrtcGammaSize(_glfw.x11.display, monitor->x11.crtc) != ramp->size)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Gamma ramp size must match current ramp size");
+ return;
+ }
+
+ XRRCrtcGamma* gamma = XRRAllocGamma(ramp->size);
+
+ memcpy(gamma->red, ramp->red, ramp->size * sizeof(unsigned short));
+ memcpy(gamma->green, ramp->green, ramp->size * sizeof(unsigned short));
+ memcpy(gamma->blue, ramp->blue, ramp->size * sizeof(unsigned short));
+
+ XRRSetCrtcGamma(_glfw.x11.display, monitor->x11.crtc, gamma);
+ XRRFreeGamma(gamma);
+ }
+ else if (_glfw.x11.vidmode.available)
+ {
+ XF86VidModeSetGammaRamp(_glfw.x11.display,
+ _glfw.x11.screen,
+ ramp->size,
+ (unsigned short*) ramp->red,
+ (unsigned short*) ramp->green,
+ (unsigned short*) ramp->blue);
+ }
+ else
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Gamma ramp access not supported by server");
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI RRCrtc glfwGetX11Adapter(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(None);
+ return monitor->x11.crtc;
+}
+
+GLFWAPI RROutput glfwGetX11Monitor(GLFWmonitor* handle)
+{
+ _GLFWmonitor* monitor = (_GLFWmonitor*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(None);
+ return monitor->x11.output;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/x11_platform.h b/chromium/third_party/dawn/third_party/glfw/src/x11_platform.h
new file mode 100644
index 00000000000..956104faf62
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/x11_platform.h
@@ -0,0 +1,1003 @@
+//========================================================================
+// GLFW 3.4 X11 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#include <unistd.h>
+#include <signal.h>
+#include <stdint.h>
+
+#include <X11/Xlib.h>
+#include <X11/keysym.h>
+#include <X11/Xatom.h>
+#include <X11/Xresource.h>
+#include <X11/Xcursor/Xcursor.h>
+
+// The XRandR extension provides mode setting and gamma control
+#include <X11/extensions/Xrandr.h>
+
+// The Xkb extension provides improved keyboard support
+#include <X11/XKBlib.h>
+
+// The Xinerama extension provides legacy monitor indices
+#include <X11/extensions/Xinerama.h>
+
+// The XInput extension provides raw mouse motion input
+#include <X11/extensions/XInput2.h>
+
+// The Shape extension provides custom window shapes
+#include <X11/extensions/shape.h>
+
+#define GLX_VENDOR 1
+#define GLX_RGBA_BIT 0x00000001
+#define GLX_WINDOW_BIT 0x00000001
+#define GLX_DRAWABLE_TYPE 0x8010
+#define GLX_RENDER_TYPE 0x8011
+#define GLX_RGBA_TYPE 0x8014
+#define GLX_DOUBLEBUFFER 5
+#define GLX_STEREO 6
+#define GLX_AUX_BUFFERS 7
+#define GLX_RED_SIZE 8
+#define GLX_GREEN_SIZE 9
+#define GLX_BLUE_SIZE 10
+#define GLX_ALPHA_SIZE 11
+#define GLX_DEPTH_SIZE 12
+#define GLX_STENCIL_SIZE 13
+#define GLX_ACCUM_RED_SIZE 14
+#define GLX_ACCUM_GREEN_SIZE 15
+#define GLX_ACCUM_BLUE_SIZE 16
+#define GLX_ACCUM_ALPHA_SIZE 17
+#define GLX_SAMPLES 0x186a1
+#define GLX_VISUAL_ID 0x800b
+
+#define GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB 0x20b2
+#define GLX_CONTEXT_DEBUG_BIT_ARB 0x00000001
+#define GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002
+#define GLX_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001
+#define GLX_CONTEXT_PROFILE_MASK_ARB 0x9126
+#define GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002
+#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
+#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
+#define GLX_CONTEXT_FLAGS_ARB 0x2094
+#define GLX_CONTEXT_ES2_PROFILE_BIT_EXT 0x00000004
+#define GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB 0x00000004
+#define GLX_LOSE_CONTEXT_ON_RESET_ARB 0x8252
+#define GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
+#define GLX_NO_RESET_NOTIFICATION_ARB 0x8261
+#define GLX_CONTEXT_RELEASE_BEHAVIOR_ARB 0x2097
+#define GLX_CONTEXT_RELEASE_BEHAVIOR_NONE_ARB 0
+#define GLX_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB 0x2098
+#define GLX_CONTEXT_OPENGL_NO_ERROR_ARB 0x31b3
+
+typedef XID GLXWindow;
+typedef XID GLXDrawable;
+typedef struct __GLXFBConfig* GLXFBConfig;
+typedef struct __GLXcontext* GLXContext;
+typedef void (*__GLXextproc)(void);
+
+typedef XClassHint* (* PFN_XAllocClassHint)(void);
+typedef XSizeHints* (* PFN_XAllocSizeHints)(void);
+typedef XWMHints* (* PFN_XAllocWMHints)(void);
+typedef int (* PFN_XChangeProperty)(Display*,Window,Atom,Atom,int,int,const unsigned char*,int);
+typedef int (* PFN_XChangeWindowAttributes)(Display*,Window,unsigned long,XSetWindowAttributes*);
+typedef Bool (* PFN_XCheckIfEvent)(Display*,XEvent*,Bool(*)(Display*,XEvent*,XPointer),XPointer);
+typedef Bool (* PFN_XCheckTypedWindowEvent)(Display*,Window,int,XEvent*);
+typedef int (* PFN_XCloseDisplay)(Display*);
+typedef Status (* PFN_XCloseIM)(XIM);
+typedef int (* PFN_XConvertSelection)(Display*,Atom,Atom,Atom,Window,Time);
+typedef Colormap (* PFN_XCreateColormap)(Display*,Window,Visual*,int);
+typedef Cursor (* PFN_XCreateFontCursor)(Display*,unsigned int);
+typedef XIC (* PFN_XCreateIC)(XIM,...);
+typedef Region (* PFN_XCreateRegion)(void);
+typedef Window (* PFN_XCreateWindow)(Display*,Window,int,int,unsigned int,unsigned int,unsigned int,int,unsigned int,Visual*,unsigned long,XSetWindowAttributes*);
+typedef int (* PFN_XDefineCursor)(Display*,Window,Cursor);
+typedef int (* PFN_XDeleteContext)(Display*,XID,XContext);
+typedef int (* PFN_XDeleteProperty)(Display*,Window,Atom);
+typedef void (* PFN_XDestroyIC)(XIC);
+typedef int (* PFN_XDestroyRegion)(Region);
+typedef int (* PFN_XDestroyWindow)(Display*,Window);
+typedef int (* PFN_XDisplayKeycodes)(Display*,int*,int*);
+typedef int (* PFN_XEventsQueued)(Display*,int);
+typedef Bool (* PFN_XFilterEvent)(XEvent*,Window);
+typedef int (* PFN_XFindContext)(Display*,XID,XContext,XPointer*);
+typedef int (* PFN_XFlush)(Display*);
+typedef int (* PFN_XFree)(void*);
+typedef int (* PFN_XFreeColormap)(Display*,Colormap);
+typedef int (* PFN_XFreeCursor)(Display*,Cursor);
+typedef void (* PFN_XFreeEventData)(Display*,XGenericEventCookie*);
+typedef int (* PFN_XGetErrorText)(Display*,int,char*,int);
+typedef Bool (* PFN_XGetEventData)(Display*,XGenericEventCookie*);
+typedef char* (* PFN_XGetICValues)(XIC,...);
+typedef char* (* PFN_XGetIMValues)(XIM,...);
+typedef int (* PFN_XGetInputFocus)(Display*,Window*,int*);
+typedef KeySym* (* PFN_XGetKeyboardMapping)(Display*,KeyCode,int,int*);
+typedef int (* PFN_XGetScreenSaver)(Display*,int*,int*,int*,int*);
+typedef Window (* PFN_XGetSelectionOwner)(Display*,Atom);
+typedef XVisualInfo* (* PFN_XGetVisualInfo)(Display*,long,XVisualInfo*,int*);
+typedef Status (* PFN_XGetWMNormalHints)(Display*,Window,XSizeHints*,long*);
+typedef Status (* PFN_XGetWindowAttributes)(Display*,Window,XWindowAttributes*);
+typedef int (* PFN_XGetWindowProperty)(Display*,Window,Atom,long,long,Bool,Atom,Atom*,int*,unsigned long*,unsigned long*,unsigned char**);
+typedef int (* PFN_XGrabPointer)(Display*,Window,Bool,unsigned int,int,int,Window,Cursor,Time);
+typedef Status (* PFN_XIconifyWindow)(Display*,Window,int);
+typedef Status (* PFN_XInitThreads)(void);
+typedef Atom (* PFN_XInternAtom)(Display*,const char*,Bool);
+typedef int (* PFN_XLookupString)(XKeyEvent*,char*,int,KeySym*,XComposeStatus*);
+typedef int (* PFN_XMapRaised)(Display*,Window);
+typedef int (* PFN_XMapWindow)(Display*,Window);
+typedef int (* PFN_XMoveResizeWindow)(Display*,Window,int,int,unsigned int,unsigned int);
+typedef int (* PFN_XMoveWindow)(Display*,Window,int,int);
+typedef int (* PFN_XNextEvent)(Display*,XEvent*);
+typedef Display* (* PFN_XOpenDisplay)(const char*);
+typedef XIM (* PFN_XOpenIM)(Display*,XrmDatabase*,char*,char*);
+typedef int (* PFN_XPeekEvent)(Display*,XEvent*);
+typedef int (* PFN_XPending)(Display*);
+typedef Bool (* PFN_XQueryExtension)(Display*,const char*,int*,int*,int*);
+typedef Bool (* PFN_XQueryPointer)(Display*,Window,Window*,Window*,int*,int*,int*,int*,unsigned int*);
+typedef int (* PFN_XRaiseWindow)(Display*,Window);
+typedef Bool (* PFN_XRegisterIMInstantiateCallback)(Display*,void*,char*,char*,XIDProc,XPointer);
+typedef int (* PFN_XResizeWindow)(Display*,Window,unsigned int,unsigned int);
+typedef char* (* PFN_XResourceManagerString)(Display*);
+typedef int (* PFN_XSaveContext)(Display*,XID,XContext,const char*);
+typedef int (* PFN_XSelectInput)(Display*,Window,long);
+typedef Status (* PFN_XSendEvent)(Display*,Window,Bool,long,XEvent*);
+typedef int (* PFN_XSetClassHint)(Display*,Window,XClassHint*);
+typedef XErrorHandler (* PFN_XSetErrorHandler)(XErrorHandler);
+typedef void (* PFN_XSetICFocus)(XIC);
+typedef char* (* PFN_XSetIMValues)(XIM,...);
+typedef int (* PFN_XSetInputFocus)(Display*,Window,int,Time);
+typedef char* (* PFN_XSetLocaleModifiers)(const char*);
+typedef int (* PFN_XSetScreenSaver)(Display*,int,int,int,int);
+typedef int (* PFN_XSetSelectionOwner)(Display*,Atom,Window,Time);
+typedef int (* PFN_XSetWMHints)(Display*,Window,XWMHints*);
+typedef void (* PFN_XSetWMNormalHints)(Display*,Window,XSizeHints*);
+typedef Status (* PFN_XSetWMProtocols)(Display*,Window,Atom*,int);
+typedef Bool (* PFN_XSupportsLocale)(void);
+typedef int (* PFN_XSync)(Display*,Bool);
+typedef Bool (* PFN_XTranslateCoordinates)(Display*,Window,Window,int,int,int*,int*,Window*);
+typedef int (* PFN_XUndefineCursor)(Display*,Window);
+typedef int (* PFN_XUngrabPointer)(Display*,Time);
+typedef int (* PFN_XUnmapWindow)(Display*,Window);
+typedef void (* PFN_XUnsetICFocus)(XIC);
+typedef VisualID (* PFN_XVisualIDFromVisual)(Visual*);
+typedef int (* PFN_XWarpPointer)(Display*,Window,Window,int,int,unsigned int,unsigned int,int,int);
+typedef void (* PFN_XkbFreeKeyboard)(XkbDescPtr,unsigned int,Bool);
+typedef void (* PFN_XkbFreeNames)(XkbDescPtr,unsigned int,Bool);
+typedef XkbDescPtr (* PFN_XkbGetMap)(Display*,unsigned int,unsigned int);
+typedef Status (* PFN_XkbGetNames)(Display*,unsigned int,XkbDescPtr);
+typedef Status (* PFN_XkbGetState)(Display*,unsigned int,XkbStatePtr);
+typedef KeySym (* PFN_XkbKeycodeToKeysym)(Display*,KeyCode,int,int);
+typedef Bool (* PFN_XkbQueryExtension)(Display*,int*,int*,int*,int*,int*);
+typedef Bool (* PFN_XkbSelectEventDetails)(Display*,unsigned int,unsigned int,unsigned long,unsigned long);
+typedef Bool (* PFN_XkbSetDetectableAutoRepeat)(Display*,Bool,Bool*);
+typedef void (* PFN_XrmDestroyDatabase)(XrmDatabase);
+typedef Bool (* PFN_XrmGetResource)(XrmDatabase,const char*,const char*,char**,XrmValue*);
+typedef XrmDatabase (* PFN_XrmGetStringDatabase)(const char*);
+typedef void (* PFN_XrmInitialize)(void);
+typedef XrmQuark (* PFN_XrmUniqueQuark)(void);
+typedef Bool (* PFN_XUnregisterIMInstantiateCallback)(Display*,void*,char*,char*,XIDProc,XPointer);
+typedef int (* PFN_Xutf8LookupString)(XIC,XKeyPressedEvent*,char*,int,KeySym*,Status*);
+typedef void (* PFN_Xutf8SetWMProperties)(Display*,Window,const char*,const char*,char**,int,XSizeHints*,XWMHints*,XClassHint*);
+#define XAllocClassHint _glfw.x11.xlib.AllocClassHint
+#define XAllocSizeHints _glfw.x11.xlib.AllocSizeHints
+#define XAllocWMHints _glfw.x11.xlib.AllocWMHints
+#define XChangeProperty _glfw.x11.xlib.ChangeProperty
+#define XChangeWindowAttributes _glfw.x11.xlib.ChangeWindowAttributes
+#define XCheckIfEvent _glfw.x11.xlib.CheckIfEvent
+#define XCheckTypedWindowEvent _glfw.x11.xlib.CheckTypedWindowEvent
+#define XCloseDisplay _glfw.x11.xlib.CloseDisplay
+#define XCloseIM _glfw.x11.xlib.CloseIM
+#define XConvertSelection _glfw.x11.xlib.ConvertSelection
+#define XCreateColormap _glfw.x11.xlib.CreateColormap
+#define XCreateFontCursor _glfw.x11.xlib.CreateFontCursor
+#define XCreateIC _glfw.x11.xlib.CreateIC
+#define XCreateRegion _glfw.x11.xlib.CreateRegion
+#define XCreateWindow _glfw.x11.xlib.CreateWindow
+#define XDefineCursor _glfw.x11.xlib.DefineCursor
+#define XDeleteContext _glfw.x11.xlib.DeleteContext
+#define XDeleteProperty _glfw.x11.xlib.DeleteProperty
+#define XDestroyIC _glfw.x11.xlib.DestroyIC
+#define XDestroyRegion _glfw.x11.xlib.DestroyRegion
+#define XDestroyWindow _glfw.x11.xlib.DestroyWindow
+#define XDisplayKeycodes _glfw.x11.xlib.DisplayKeycodes
+#define XEventsQueued _glfw.x11.xlib.EventsQueued
+#define XFilterEvent _glfw.x11.xlib.FilterEvent
+#define XFindContext _glfw.x11.xlib.FindContext
+#define XFlush _glfw.x11.xlib.Flush
+#define XFree _glfw.x11.xlib.Free
+#define XFreeColormap _glfw.x11.xlib.FreeColormap
+#define XFreeCursor _glfw.x11.xlib.FreeCursor
+#define XFreeEventData _glfw.x11.xlib.FreeEventData
+#define XGetErrorText _glfw.x11.xlib.GetErrorText
+#define XGetEventData _glfw.x11.xlib.GetEventData
+#define XGetICValues _glfw.x11.xlib.GetICValues
+#define XGetIMValues _glfw.x11.xlib.GetIMValues
+#define XGetInputFocus _glfw.x11.xlib.GetInputFocus
+#define XGetKeyboardMapping _glfw.x11.xlib.GetKeyboardMapping
+#define XGetScreenSaver _glfw.x11.xlib.GetScreenSaver
+#define XGetSelectionOwner _glfw.x11.xlib.GetSelectionOwner
+#define XGetVisualInfo _glfw.x11.xlib.GetVisualInfo
+#define XGetWMNormalHints _glfw.x11.xlib.GetWMNormalHints
+#define XGetWindowAttributes _glfw.x11.xlib.GetWindowAttributes
+#define XGetWindowProperty _glfw.x11.xlib.GetWindowProperty
+#define XGrabPointer _glfw.x11.xlib.GrabPointer
+#define XIconifyWindow _glfw.x11.xlib.IconifyWindow
+#define XInternAtom _glfw.x11.xlib.InternAtom
+#define XLookupString _glfw.x11.xlib.LookupString
+#define XMapRaised _glfw.x11.xlib.MapRaised
+#define XMapWindow _glfw.x11.xlib.MapWindow
+#define XMoveResizeWindow _glfw.x11.xlib.MoveResizeWindow
+#define XMoveWindow _glfw.x11.xlib.MoveWindow
+#define XNextEvent _glfw.x11.xlib.NextEvent
+#define XOpenIM _glfw.x11.xlib.OpenIM
+#define XPeekEvent _glfw.x11.xlib.PeekEvent
+#define XPending _glfw.x11.xlib.Pending
+#define XQueryExtension _glfw.x11.xlib.QueryExtension
+#define XQueryPointer _glfw.x11.xlib.QueryPointer
+#define XRaiseWindow _glfw.x11.xlib.RaiseWindow
+#define XRegisterIMInstantiateCallback _glfw.x11.xlib.RegisterIMInstantiateCallback
+#define XResizeWindow _glfw.x11.xlib.ResizeWindow
+#define XResourceManagerString _glfw.x11.xlib.ResourceManagerString
+#define XSaveContext _glfw.x11.xlib.SaveContext
+#define XSelectInput _glfw.x11.xlib.SelectInput
+#define XSendEvent _glfw.x11.xlib.SendEvent
+#define XSetClassHint _glfw.x11.xlib.SetClassHint
+#define XSetErrorHandler _glfw.x11.xlib.SetErrorHandler
+#define XSetICFocus _glfw.x11.xlib.SetICFocus
+#define XSetIMValues _glfw.x11.xlib.SetIMValues
+#define XSetInputFocus _glfw.x11.xlib.SetInputFocus
+#define XSetLocaleModifiers _glfw.x11.xlib.SetLocaleModifiers
+#define XSetScreenSaver _glfw.x11.xlib.SetScreenSaver
+#define XSetSelectionOwner _glfw.x11.xlib.SetSelectionOwner
+#define XSetWMHints _glfw.x11.xlib.SetWMHints
+#define XSetWMNormalHints _glfw.x11.xlib.SetWMNormalHints
+#define XSetWMProtocols _glfw.x11.xlib.SetWMProtocols
+#define XSupportsLocale _glfw.x11.xlib.SupportsLocale
+#define XSync _glfw.x11.xlib.Sync
+#define XTranslateCoordinates _glfw.x11.xlib.TranslateCoordinates
+#define XUndefineCursor _glfw.x11.xlib.UndefineCursor
+#define XUngrabPointer _glfw.x11.xlib.UngrabPointer
+#define XUnmapWindow _glfw.x11.xlib.UnmapWindow
+#define XUnsetICFocus _glfw.x11.xlib.UnsetICFocus
+#define XVisualIDFromVisual _glfw.x11.xlib.VisualIDFromVisual
+#define XWarpPointer _glfw.x11.xlib.WarpPointer
+#define XkbFreeKeyboard _glfw.x11.xkb.FreeKeyboard
+#define XkbFreeNames _glfw.x11.xkb.FreeNames
+#define XkbGetMap _glfw.x11.xkb.GetMap
+#define XkbGetNames _glfw.x11.xkb.GetNames
+#define XkbGetState _glfw.x11.xkb.GetState
+#define XkbKeycodeToKeysym _glfw.x11.xkb.KeycodeToKeysym
+#define XkbQueryExtension _glfw.x11.xkb.QueryExtension
+#define XkbSelectEventDetails _glfw.x11.xkb.SelectEventDetails
+#define XkbSetDetectableAutoRepeat _glfw.x11.xkb.SetDetectableAutoRepeat
+#define XrmDestroyDatabase _glfw.x11.xrm.DestroyDatabase
+#define XrmGetResource _glfw.x11.xrm.GetResource
+#define XrmGetStringDatabase _glfw.x11.xrm.GetStringDatabase
+#define XrmUniqueQuark _glfw.x11.xrm.UniqueQuark
+#define XUnregisterIMInstantiateCallback _glfw.x11.xlib.UnregisterIMInstantiateCallback
+#define Xutf8LookupString _glfw.x11.xlib.utf8LookupString
+#define Xutf8SetWMProperties _glfw.x11.xlib.utf8SetWMProperties
+
+typedef XRRCrtcGamma* (* PFN_XRRAllocGamma)(int);
+typedef void (* PFN_XRRFreeCrtcInfo)(XRRCrtcInfo*);
+typedef void (* PFN_XRRFreeGamma)(XRRCrtcGamma*);
+typedef void (* PFN_XRRFreeOutputInfo)(XRROutputInfo*);
+typedef void (* PFN_XRRFreeScreenResources)(XRRScreenResources*);
+typedef XRRCrtcGamma* (* PFN_XRRGetCrtcGamma)(Display*,RRCrtc);
+typedef int (* PFN_XRRGetCrtcGammaSize)(Display*,RRCrtc);
+typedef XRRCrtcInfo* (* PFN_XRRGetCrtcInfo) (Display*,XRRScreenResources*,RRCrtc);
+typedef XRROutputInfo* (* PFN_XRRGetOutputInfo)(Display*,XRRScreenResources*,RROutput);
+typedef RROutput (* PFN_XRRGetOutputPrimary)(Display*,Window);
+typedef XRRScreenResources* (* PFN_XRRGetScreenResourcesCurrent)(Display*,Window);
+typedef Bool (* PFN_XRRQueryExtension)(Display*,int*,int*);
+typedef Status (* PFN_XRRQueryVersion)(Display*,int*,int*);
+typedef void (* PFN_XRRSelectInput)(Display*,Window,int);
+typedef Status (* PFN_XRRSetCrtcConfig)(Display*,XRRScreenResources*,RRCrtc,Time,int,int,RRMode,Rotation,RROutput*,int);
+typedef void (* PFN_XRRSetCrtcGamma)(Display*,RRCrtc,XRRCrtcGamma*);
+typedef int (* PFN_XRRUpdateConfiguration)(XEvent*);
+#define XRRAllocGamma _glfw.x11.randr.AllocGamma
+#define XRRFreeCrtcInfo _glfw.x11.randr.FreeCrtcInfo
+#define XRRFreeGamma _glfw.x11.randr.FreeGamma
+#define XRRFreeOutputInfo _glfw.x11.randr.FreeOutputInfo
+#define XRRFreeScreenResources _glfw.x11.randr.FreeScreenResources
+#define XRRGetCrtcGamma _glfw.x11.randr.GetCrtcGamma
+#define XRRGetCrtcGammaSize _glfw.x11.randr.GetCrtcGammaSize
+#define XRRGetCrtcInfo _glfw.x11.randr.GetCrtcInfo
+#define XRRGetOutputInfo _glfw.x11.randr.GetOutputInfo
+#define XRRGetOutputPrimary _glfw.x11.randr.GetOutputPrimary
+#define XRRGetScreenResourcesCurrent _glfw.x11.randr.GetScreenResourcesCurrent
+#define XRRQueryExtension _glfw.x11.randr.QueryExtension
+#define XRRQueryVersion _glfw.x11.randr.QueryVersion
+#define XRRSelectInput _glfw.x11.randr.SelectInput
+#define XRRSetCrtcConfig _glfw.x11.randr.SetCrtcConfig
+#define XRRSetCrtcGamma _glfw.x11.randr.SetCrtcGamma
+#define XRRUpdateConfiguration _glfw.x11.randr.UpdateConfiguration
+
+typedef XcursorImage* (* PFN_XcursorImageCreate)(int,int);
+typedef void (* PFN_XcursorImageDestroy)(XcursorImage*);
+typedef Cursor (* PFN_XcursorImageLoadCursor)(Display*,const XcursorImage*);
+typedef char* (* PFN_XcursorGetTheme)(Display*);
+typedef int (* PFN_XcursorGetDefaultSize)(Display*);
+typedef XcursorImage* (* PFN_XcursorLibraryLoadImage)(const char*,const char*,int);
+#define XcursorImageCreate _glfw.x11.xcursor.ImageCreate
+#define XcursorImageDestroy _glfw.x11.xcursor.ImageDestroy
+#define XcursorImageLoadCursor _glfw.x11.xcursor.ImageLoadCursor
+#define XcursorGetTheme _glfw.x11.xcursor.GetTheme
+#define XcursorGetDefaultSize _glfw.x11.xcursor.GetDefaultSize
+#define XcursorLibraryLoadImage _glfw.x11.xcursor.LibraryLoadImage
+
+typedef Bool (* PFN_XineramaIsActive)(Display*);
+typedef Bool (* PFN_XineramaQueryExtension)(Display*,int*,int*);
+typedef XineramaScreenInfo* (* PFN_XineramaQueryScreens)(Display*,int*);
+#define XineramaIsActive _glfw.x11.xinerama.IsActive
+#define XineramaQueryExtension _glfw.x11.xinerama.QueryExtension
+#define XineramaQueryScreens _glfw.x11.xinerama.QueryScreens
+
+typedef XID xcb_window_t;
+typedef XID xcb_visualid_t;
+typedef struct xcb_connection_t xcb_connection_t;
+typedef xcb_connection_t* (* PFN_XGetXCBConnection)(Display*);
+#define XGetXCBConnection _glfw.x11.x11xcb.GetXCBConnection
+
+typedef Bool (* PFN_XF86VidModeQueryExtension)(Display*,int*,int*);
+typedef Bool (* PFN_XF86VidModeGetGammaRamp)(Display*,int,int,unsigned short*,unsigned short*,unsigned short*);
+typedef Bool (* PFN_XF86VidModeSetGammaRamp)(Display*,int,int,unsigned short*,unsigned short*,unsigned short*);
+typedef Bool (* PFN_XF86VidModeGetGammaRampSize)(Display*,int,int*);
+#define XF86VidModeQueryExtension _glfw.x11.vidmode.QueryExtension
+#define XF86VidModeGetGammaRamp _glfw.x11.vidmode.GetGammaRamp
+#define XF86VidModeSetGammaRamp _glfw.x11.vidmode.SetGammaRamp
+#define XF86VidModeGetGammaRampSize _glfw.x11.vidmode.GetGammaRampSize
+
+typedef Status (* PFN_XIQueryVersion)(Display*,int*,int*);
+typedef int (* PFN_XISelectEvents)(Display*,Window,XIEventMask*,int);
+#define XIQueryVersion _glfw.x11.xi.QueryVersion
+#define XISelectEvents _glfw.x11.xi.SelectEvents
+
+typedef Bool (* PFN_XRenderQueryExtension)(Display*,int*,int*);
+typedef Status (* PFN_XRenderQueryVersion)(Display*dpy,int*,int*);
+typedef XRenderPictFormat* (* PFN_XRenderFindVisualFormat)(Display*,Visual const*);
+#define XRenderQueryExtension _glfw.x11.xrender.QueryExtension
+#define XRenderQueryVersion _glfw.x11.xrender.QueryVersion
+#define XRenderFindVisualFormat _glfw.x11.xrender.FindVisualFormat
+
+typedef Bool (* PFN_XShapeQueryExtension)(Display*,int*,int*);
+typedef Status (* PFN_XShapeQueryVersion)(Display*dpy,int*,int*);
+typedef void (* PFN_XShapeCombineRegion)(Display*,Window,int,int,int,Region,int);
+typedef void (* PFN_XShapeCombineMask)(Display*,Window,int,int,int,Pixmap,int);
+
+#define XShapeQueryExtension _glfw.x11.xshape.QueryExtension
+#define XShapeQueryVersion _glfw.x11.xshape.QueryVersion
+#define XShapeCombineRegion _glfw.x11.xshape.ShapeCombineRegion
+#define XShapeCombineMask _glfw.x11.xshape.ShapeCombineMask
+
+typedef int (*PFNGLXGETFBCONFIGATTRIBPROC)(Display*,GLXFBConfig,int,int*);
+typedef const char* (*PFNGLXGETCLIENTSTRINGPROC)(Display*,int);
+typedef Bool (*PFNGLXQUERYEXTENSIONPROC)(Display*,int*,int*);
+typedef Bool (*PFNGLXQUERYVERSIONPROC)(Display*,int*,int*);
+typedef void (*PFNGLXDESTROYCONTEXTPROC)(Display*,GLXContext);
+typedef Bool (*PFNGLXMAKECURRENTPROC)(Display*,GLXDrawable,GLXContext);
+typedef void (*PFNGLXSWAPBUFFERSPROC)(Display*,GLXDrawable);
+typedef const char* (*PFNGLXQUERYEXTENSIONSSTRINGPROC)(Display*,int);
+typedef GLXFBConfig* (*PFNGLXGETFBCONFIGSPROC)(Display*,int,int*);
+typedef GLXContext (*PFNGLXCREATENEWCONTEXTPROC)(Display*,GLXFBConfig,int,GLXContext,Bool);
+typedef __GLXextproc (* PFNGLXGETPROCADDRESSPROC)(const GLubyte *procName);
+typedef void (*PFNGLXSWAPINTERVALEXTPROC)(Display*,GLXDrawable,int);
+typedef XVisualInfo* (*PFNGLXGETVISUALFROMFBCONFIGPROC)(Display*,GLXFBConfig);
+typedef GLXWindow (*PFNGLXCREATEWINDOWPROC)(Display*,GLXFBConfig,Window,const int*);
+typedef void (*PFNGLXDESTROYWINDOWPROC)(Display*,GLXWindow);
+
+typedef int (*PFNGLXSWAPINTERVALMESAPROC)(int);
+typedef int (*PFNGLXSWAPINTERVALSGIPROC)(int);
+typedef GLXContext (*PFNGLXCREATECONTEXTATTRIBSARBPROC)(Display*,GLXFBConfig,GLXContext,Bool,const int*);
+
+// libGL.so function pointer typedefs
+#define glXGetFBConfigs _glfw.glx.GetFBConfigs
+#define glXGetFBConfigAttrib _glfw.glx.GetFBConfigAttrib
+#define glXGetClientString _glfw.glx.GetClientString
+#define glXQueryExtension _glfw.glx.QueryExtension
+#define glXQueryVersion _glfw.glx.QueryVersion
+#define glXDestroyContext _glfw.glx.DestroyContext
+#define glXMakeCurrent _glfw.glx.MakeCurrent
+#define glXSwapBuffers _glfw.glx.SwapBuffers
+#define glXQueryExtensionsString _glfw.glx.QueryExtensionsString
+#define glXCreateNewContext _glfw.glx.CreateNewContext
+#define glXGetVisualFromFBConfig _glfw.glx.GetVisualFromFBConfig
+#define glXCreateWindow _glfw.glx.CreateWindow
+#define glXDestroyWindow _glfw.glx.DestroyWindow
+
+typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
+typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
+
+typedef struct VkXlibSurfaceCreateInfoKHR
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkXlibSurfaceCreateFlagsKHR flags;
+ Display* dpy;
+ Window window;
+} VkXlibSurfaceCreateInfoKHR;
+
+typedef struct VkXcbSurfaceCreateInfoKHR
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkXcbSurfaceCreateFlagsKHR flags;
+ xcb_connection_t* connection;
+ xcb_window_t window;
+} VkXcbSurfaceCreateInfoKHR;
+
+typedef VkResult (APIENTRY *PFN_vkCreateXlibSurfaceKHR)(VkInstance,const VkXlibSurfaceCreateInfoKHR*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+typedef VkBool32 (APIENTRY *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice,uint32_t,Display*,VisualID);
+typedef VkResult (APIENTRY *PFN_vkCreateXcbSurfaceKHR)(VkInstance,const VkXcbSurfaceCreateInfoKHR*,const VkAllocationCallbacks*,VkSurfaceKHR*);
+typedef VkBool32 (APIENTRY *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice,uint32_t,xcb_connection_t*,xcb_visualid_t);
+
+#include "xkb_unicode.h"
+#include "posix_poll.h"
+
+#define GLFW_X11_WINDOW_STATE _GLFWwindowX11 x11;
+#define GLFW_X11_LIBRARY_WINDOW_STATE _GLFWlibraryX11 x11;
+#define GLFW_X11_MONITOR_STATE _GLFWmonitorX11 x11;
+#define GLFW_X11_CURSOR_STATE _GLFWcursorX11 x11;
+
+#define GLFW_GLX_CONTEXT_STATE _GLFWcontextGLX glx;
+#define GLFW_GLX_LIBRARY_CONTEXT_STATE _GLFWlibraryGLX glx;
+
+
+// GLX-specific per-context data
+//
+typedef struct _GLFWcontextGLX
+{
+ GLXContext handle;
+ GLXWindow window;
+} _GLFWcontextGLX;
+
+// GLX-specific global data
+//
+typedef struct _GLFWlibraryGLX
+{
+ int major, minor;
+ int eventBase;
+ int errorBase;
+
+ // dlopen handle for libGL.so.1
+ void* handle;
+
+ // GLX 1.3 functions
+ PFNGLXGETFBCONFIGSPROC GetFBConfigs;
+ PFNGLXGETFBCONFIGATTRIBPROC GetFBConfigAttrib;
+ PFNGLXGETCLIENTSTRINGPROC GetClientString;
+ PFNGLXQUERYEXTENSIONPROC QueryExtension;
+ PFNGLXQUERYVERSIONPROC QueryVersion;
+ PFNGLXDESTROYCONTEXTPROC DestroyContext;
+ PFNGLXMAKECURRENTPROC MakeCurrent;
+ PFNGLXSWAPBUFFERSPROC SwapBuffers;
+ PFNGLXQUERYEXTENSIONSSTRINGPROC QueryExtensionsString;
+ PFNGLXCREATENEWCONTEXTPROC CreateNewContext;
+ PFNGLXGETVISUALFROMFBCONFIGPROC GetVisualFromFBConfig;
+ PFNGLXCREATEWINDOWPROC CreateWindow;
+ PFNGLXDESTROYWINDOWPROC DestroyWindow;
+
+ // GLX 1.4 and extension functions
+ PFNGLXGETPROCADDRESSPROC GetProcAddress;
+ PFNGLXGETPROCADDRESSPROC GetProcAddressARB;
+ PFNGLXSWAPINTERVALSGIPROC SwapIntervalSGI;
+ PFNGLXSWAPINTERVALEXTPROC SwapIntervalEXT;
+ PFNGLXSWAPINTERVALMESAPROC SwapIntervalMESA;
+ PFNGLXCREATECONTEXTATTRIBSARBPROC CreateContextAttribsARB;
+ GLFWbool SGI_swap_control;
+ GLFWbool EXT_swap_control;
+ GLFWbool MESA_swap_control;
+ GLFWbool ARB_multisample;
+ GLFWbool ARB_framebuffer_sRGB;
+ GLFWbool EXT_framebuffer_sRGB;
+ GLFWbool ARB_create_context;
+ GLFWbool ARB_create_context_profile;
+ GLFWbool ARB_create_context_robustness;
+ GLFWbool EXT_create_context_es2_profile;
+ GLFWbool ARB_create_context_no_error;
+ GLFWbool ARB_context_flush_control;
+} _GLFWlibraryGLX;
+
+// X11-specific per-window data
+//
+typedef struct _GLFWwindowX11
+{
+ Colormap colormap;
+ Window handle;
+ Window parent;
+ XIC ic;
+
+ GLFWbool overrideRedirect;
+ GLFWbool iconified;
+ GLFWbool maximized;
+
+ // Whether the visual supports framebuffer transparency
+ GLFWbool transparent;
+
+ // Cached position and size used to filter out duplicate events
+ int width, height;
+ int xpos, ypos;
+
+ // The last received cursor position, regardless of source
+ int lastCursorPosX, lastCursorPosY;
+ // The last position the cursor was warped to by GLFW
+ int warpCursorPosX, warpCursorPosY;
+
+ // The time of the last KeyPress event per keycode, for discarding
+ // duplicate key events generated for some keys by ibus
+ Time keyPressTimes[256];
+} _GLFWwindowX11;
+
+// X11-specific global data
+//
+typedef struct _GLFWlibraryX11
+{
+ Display* display;
+ int screen;
+ Window root;
+
+ // System content scale
+ float contentScaleX, contentScaleY;
+ // Helper window for IPC
+ Window helperWindowHandle;
+ // Invisible cursor for hidden cursor mode
+ Cursor hiddenCursorHandle;
+ // Context for mapping window XIDs to _GLFWwindow pointers
+ XContext context;
+ // XIM input method
+ XIM im;
+ // Most recent error code received by X error handler
+ int errorCode;
+ // Primary selection string (while the primary selection is owned)
+ char* primarySelectionString;
+ // Clipboard string (while the selection is owned)
+ char* clipboardString;
+ // Key name string
+ char keynames[GLFW_KEY_LAST + 1][5];
+ // X11 keycode to GLFW key LUT
+ short int keycodes[256];
+ // GLFW key to X11 keycode LUT
+ short int scancodes[GLFW_KEY_LAST + 1];
+ // Where to place the cursor when re-enabled
+ double restoreCursorPosX, restoreCursorPosY;
+ // The window whose disabled cursor mode is active
+ _GLFWwindow* disabledCursorWindow;
+ int emptyEventPipe[2];
+
+ // Window manager atoms
+ Atom NET_SUPPORTED;
+ Atom NET_SUPPORTING_WM_CHECK;
+ Atom WM_PROTOCOLS;
+ Atom WM_STATE;
+ Atom WM_DELETE_WINDOW;
+ Atom NET_WM_NAME;
+ Atom NET_WM_ICON_NAME;
+ Atom NET_WM_ICON;
+ Atom NET_WM_PID;
+ Atom NET_WM_PING;
+ Atom NET_WM_WINDOW_TYPE;
+ Atom NET_WM_WINDOW_TYPE_NORMAL;
+ Atom NET_WM_STATE;
+ Atom NET_WM_STATE_ABOVE;
+ Atom NET_WM_STATE_FULLSCREEN;
+ Atom NET_WM_STATE_MAXIMIZED_VERT;
+ Atom NET_WM_STATE_MAXIMIZED_HORZ;
+ Atom NET_WM_STATE_DEMANDS_ATTENTION;
+ Atom NET_WM_BYPASS_COMPOSITOR;
+ Atom NET_WM_FULLSCREEN_MONITORS;
+ Atom NET_WM_WINDOW_OPACITY;
+ Atom NET_WM_CM_Sx;
+ Atom NET_WORKAREA;
+ Atom NET_CURRENT_DESKTOP;
+ Atom NET_ACTIVE_WINDOW;
+ Atom NET_FRAME_EXTENTS;
+ Atom NET_REQUEST_FRAME_EXTENTS;
+ Atom MOTIF_WM_HINTS;
+
+ // Xdnd (drag and drop) atoms
+ Atom XdndAware;
+ Atom XdndEnter;
+ Atom XdndPosition;
+ Atom XdndStatus;
+ Atom XdndActionCopy;
+ Atom XdndDrop;
+ Atom XdndFinished;
+ Atom XdndSelection;
+ Atom XdndTypeList;
+ Atom text_uri_list;
+
+ // Selection (clipboard) atoms
+ Atom TARGETS;
+ Atom MULTIPLE;
+ Atom INCR;
+ Atom CLIPBOARD;
+ Atom PRIMARY;
+ Atom CLIPBOARD_MANAGER;
+ Atom SAVE_TARGETS;
+ Atom NULL_;
+ Atom UTF8_STRING;
+ Atom COMPOUND_STRING;
+ Atom ATOM_PAIR;
+ Atom GLFW_SELECTION;
+
+ struct {
+ void* handle;
+ GLFWbool utf8;
+ PFN_XAllocClassHint AllocClassHint;
+ PFN_XAllocSizeHints AllocSizeHints;
+ PFN_XAllocWMHints AllocWMHints;
+ PFN_XChangeProperty ChangeProperty;
+ PFN_XChangeWindowAttributes ChangeWindowAttributes;
+ PFN_XCheckIfEvent CheckIfEvent;
+ PFN_XCheckTypedWindowEvent CheckTypedWindowEvent;
+ PFN_XCloseDisplay CloseDisplay;
+ PFN_XCloseIM CloseIM;
+ PFN_XConvertSelection ConvertSelection;
+ PFN_XCreateColormap CreateColormap;
+ PFN_XCreateFontCursor CreateFontCursor;
+ PFN_XCreateIC CreateIC;
+ PFN_XCreateRegion CreateRegion;
+ PFN_XCreateWindow CreateWindow;
+ PFN_XDefineCursor DefineCursor;
+ PFN_XDeleteContext DeleteContext;
+ PFN_XDeleteProperty DeleteProperty;
+ PFN_XDestroyIC DestroyIC;
+ PFN_XDestroyRegion DestroyRegion;
+ PFN_XDestroyWindow DestroyWindow;
+ PFN_XDisplayKeycodes DisplayKeycodes;
+ PFN_XEventsQueued EventsQueued;
+ PFN_XFilterEvent FilterEvent;
+ PFN_XFindContext FindContext;
+ PFN_XFlush Flush;
+ PFN_XFree Free;
+ PFN_XFreeColormap FreeColormap;
+ PFN_XFreeCursor FreeCursor;
+ PFN_XFreeEventData FreeEventData;
+ PFN_XGetErrorText GetErrorText;
+ PFN_XGetEventData GetEventData;
+ PFN_XGetICValues GetICValues;
+ PFN_XGetIMValues GetIMValues;
+ PFN_XGetInputFocus GetInputFocus;
+ PFN_XGetKeyboardMapping GetKeyboardMapping;
+ PFN_XGetScreenSaver GetScreenSaver;
+ PFN_XGetSelectionOwner GetSelectionOwner;
+ PFN_XGetVisualInfo GetVisualInfo;
+ PFN_XGetWMNormalHints GetWMNormalHints;
+ PFN_XGetWindowAttributes GetWindowAttributes;
+ PFN_XGetWindowProperty GetWindowProperty;
+ PFN_XGrabPointer GrabPointer;
+ PFN_XIconifyWindow IconifyWindow;
+ PFN_XInternAtom InternAtom;
+ PFN_XLookupString LookupString;
+ PFN_XMapRaised MapRaised;
+ PFN_XMapWindow MapWindow;
+ PFN_XMoveResizeWindow MoveResizeWindow;
+ PFN_XMoveWindow MoveWindow;
+ PFN_XNextEvent NextEvent;
+ PFN_XOpenIM OpenIM;
+ PFN_XPeekEvent PeekEvent;
+ PFN_XPending Pending;
+ PFN_XQueryExtension QueryExtension;
+ PFN_XQueryPointer QueryPointer;
+ PFN_XRaiseWindow RaiseWindow;
+ PFN_XRegisterIMInstantiateCallback RegisterIMInstantiateCallback;
+ PFN_XResizeWindow ResizeWindow;
+ PFN_XResourceManagerString ResourceManagerString;
+ PFN_XSaveContext SaveContext;
+ PFN_XSelectInput SelectInput;
+ PFN_XSendEvent SendEvent;
+ PFN_XSetClassHint SetClassHint;
+ PFN_XSetErrorHandler SetErrorHandler;
+ PFN_XSetICFocus SetICFocus;
+ PFN_XSetIMValues SetIMValues;
+ PFN_XSetInputFocus SetInputFocus;
+ PFN_XSetLocaleModifiers SetLocaleModifiers;
+ PFN_XSetScreenSaver SetScreenSaver;
+ PFN_XSetSelectionOwner SetSelectionOwner;
+ PFN_XSetWMHints SetWMHints;
+ PFN_XSetWMNormalHints SetWMNormalHints;
+ PFN_XSetWMProtocols SetWMProtocols;
+ PFN_XSupportsLocale SupportsLocale;
+ PFN_XSync Sync;
+ PFN_XTranslateCoordinates TranslateCoordinates;
+ PFN_XUndefineCursor UndefineCursor;
+ PFN_XUngrabPointer UngrabPointer;
+ PFN_XUnmapWindow UnmapWindow;
+ PFN_XUnsetICFocus UnsetICFocus;
+ PFN_XVisualIDFromVisual VisualIDFromVisual;
+ PFN_XWarpPointer WarpPointer;
+ PFN_XUnregisterIMInstantiateCallback UnregisterIMInstantiateCallback;
+ PFN_Xutf8LookupString utf8LookupString;
+ PFN_Xutf8SetWMProperties utf8SetWMProperties;
+ } xlib;
+
+ struct {
+ PFN_XrmDestroyDatabase DestroyDatabase;
+ PFN_XrmGetResource GetResource;
+ PFN_XrmGetStringDatabase GetStringDatabase;
+ PFN_XrmUniqueQuark UniqueQuark;
+ } xrm;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int eventBase;
+ int errorBase;
+ int major;
+ int minor;
+ GLFWbool gammaBroken;
+ GLFWbool monitorBroken;
+ PFN_XRRAllocGamma AllocGamma;
+ PFN_XRRFreeCrtcInfo FreeCrtcInfo;
+ PFN_XRRFreeGamma FreeGamma;
+ PFN_XRRFreeOutputInfo FreeOutputInfo;
+ PFN_XRRFreeScreenResources FreeScreenResources;
+ PFN_XRRGetCrtcGamma GetCrtcGamma;
+ PFN_XRRGetCrtcGammaSize GetCrtcGammaSize;
+ PFN_XRRGetCrtcInfo GetCrtcInfo;
+ PFN_XRRGetOutputInfo GetOutputInfo;
+ PFN_XRRGetOutputPrimary GetOutputPrimary;
+ PFN_XRRGetScreenResourcesCurrent GetScreenResourcesCurrent;
+ PFN_XRRQueryExtension QueryExtension;
+ PFN_XRRQueryVersion QueryVersion;
+ PFN_XRRSelectInput SelectInput;
+ PFN_XRRSetCrtcConfig SetCrtcConfig;
+ PFN_XRRSetCrtcGamma SetCrtcGamma;
+ PFN_XRRUpdateConfiguration UpdateConfiguration;
+ } randr;
+
+ struct {
+ GLFWbool available;
+ GLFWbool detectable;
+ int majorOpcode;
+ int eventBase;
+ int errorBase;
+ int major;
+ int minor;
+ unsigned int group;
+ PFN_XkbFreeKeyboard FreeKeyboard;
+ PFN_XkbFreeNames FreeNames;
+ PFN_XkbGetMap GetMap;
+ PFN_XkbGetNames GetNames;
+ PFN_XkbGetState GetState;
+ PFN_XkbKeycodeToKeysym KeycodeToKeysym;
+ PFN_XkbQueryExtension QueryExtension;
+ PFN_XkbSelectEventDetails SelectEventDetails;
+ PFN_XkbSetDetectableAutoRepeat SetDetectableAutoRepeat;
+ } xkb;
+
+ struct {
+ int count;
+ int timeout;
+ int interval;
+ int blanking;
+ int exposure;
+ } saver;
+
+ struct {
+ int version;
+ Window source;
+ Atom format;
+ } xdnd;
+
+ struct {
+ void* handle;
+ PFN_XcursorImageCreate ImageCreate;
+ PFN_XcursorImageDestroy ImageDestroy;
+ PFN_XcursorImageLoadCursor ImageLoadCursor;
+ PFN_XcursorGetTheme GetTheme;
+ PFN_XcursorGetDefaultSize GetDefaultSize;
+ PFN_XcursorLibraryLoadImage LibraryLoadImage;
+ } xcursor;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int major;
+ int minor;
+ PFN_XineramaIsActive IsActive;
+ PFN_XineramaQueryExtension QueryExtension;
+ PFN_XineramaQueryScreens QueryScreens;
+ } xinerama;
+
+ struct {
+ void* handle;
+ PFN_XGetXCBConnection GetXCBConnection;
+ } x11xcb;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int eventBase;
+ int errorBase;
+ PFN_XF86VidModeQueryExtension QueryExtension;
+ PFN_XF86VidModeGetGammaRamp GetGammaRamp;
+ PFN_XF86VidModeSetGammaRamp SetGammaRamp;
+ PFN_XF86VidModeGetGammaRampSize GetGammaRampSize;
+ } vidmode;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int majorOpcode;
+ int eventBase;
+ int errorBase;
+ int major;
+ int minor;
+ PFN_XIQueryVersion QueryVersion;
+ PFN_XISelectEvents SelectEvents;
+ } xi;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int major;
+ int minor;
+ int eventBase;
+ int errorBase;
+ PFN_XRenderQueryExtension QueryExtension;
+ PFN_XRenderQueryVersion QueryVersion;
+ PFN_XRenderFindVisualFormat FindVisualFormat;
+ } xrender;
+
+ struct {
+ GLFWbool available;
+ void* handle;
+ int major;
+ int minor;
+ int eventBase;
+ int errorBase;
+ PFN_XShapeQueryExtension QueryExtension;
+ PFN_XShapeCombineRegion ShapeCombineRegion;
+ PFN_XShapeQueryVersion QueryVersion;
+ PFN_XShapeCombineMask ShapeCombineMask;
+ } xshape;
+} _GLFWlibraryX11;
+
+// X11-specific per-monitor data
+//
+typedef struct _GLFWmonitorX11
+{
+ RROutput output;
+ RRCrtc crtc;
+ RRMode oldMode;
+
+ // Index of corresponding Xinerama screen,
+ // for EWMH full screen window placement
+ int index;
+} _GLFWmonitorX11;
+
+// X11-specific per-cursor data
+//
+typedef struct _GLFWcursorX11
+{
+ Cursor handle;
+} _GLFWcursorX11;
+
+
+GLFWbool _glfwConnectX11(int platformID, _GLFWplatform* platform);
+int _glfwInitX11(void);
+void _glfwTerminateX11(void);
+
+int _glfwCreateWindowX11(_GLFWwindow* window, const _GLFWwndconfig* wndconfig, const _GLFWctxconfig* ctxconfig, const _GLFWfbconfig* fbconfig);
+void _glfwDestroyWindowX11(_GLFWwindow* window);
+void _glfwSetWindowTitleX11(_GLFWwindow* window, const char* title);
+void _glfwSetWindowIconX11(_GLFWwindow* window, int count, const GLFWimage* images);
+void _glfwGetWindowPosX11(_GLFWwindow* window, int* xpos, int* ypos);
+void _glfwSetWindowPosX11(_GLFWwindow* window, int xpos, int ypos);
+void _glfwGetWindowSizeX11(_GLFWwindow* window, int* width, int* height);
+void _glfwSetWindowSizeX11(_GLFWwindow* window, int width, int height);
+void _glfwSetWindowSizeLimitsX11(_GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
+void _glfwSetWindowAspectRatioX11(_GLFWwindow* window, int numer, int denom);
+void _glfwGetFramebufferSizeX11(_GLFWwindow* window, int* width, int* height);
+void _glfwGetWindowFrameSizeX11(_GLFWwindow* window, int* left, int* top, int* right, int* bottom);
+void _glfwGetWindowContentScaleX11(_GLFWwindow* window, float* xscale, float* yscale);
+void _glfwIconifyWindowX11(_GLFWwindow* window);
+void _glfwRestoreWindowX11(_GLFWwindow* window);
+void _glfwMaximizeWindowX11(_GLFWwindow* window);
+void _glfwShowWindowX11(_GLFWwindow* window);
+void _glfwHideWindowX11(_GLFWwindow* window);
+void _glfwRequestWindowAttentionX11(_GLFWwindow* window);
+void _glfwFocusWindowX11(_GLFWwindow* window);
+void _glfwSetWindowMonitorX11(_GLFWwindow* window, _GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
+int _glfwWindowFocusedX11(_GLFWwindow* window);
+int _glfwWindowIconifiedX11(_GLFWwindow* window);
+int _glfwWindowVisibleX11(_GLFWwindow* window);
+int _glfwWindowMaximizedX11(_GLFWwindow* window);
+int _glfwWindowHoveredX11(_GLFWwindow* window);
+int _glfwFramebufferTransparentX11(_GLFWwindow* window);
+void _glfwSetWindowResizableX11(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowDecoratedX11(_GLFWwindow* window, GLFWbool enabled);
+void _glfwSetWindowFloatingX11(_GLFWwindow* window, GLFWbool enabled);
+float _glfwGetWindowOpacityX11(_GLFWwindow* window);
+void _glfwSetWindowOpacityX11(_GLFWwindow* window, float opacity);
+void _glfwSetWindowMousePassthroughX11(_GLFWwindow* window, GLFWbool enabled);
+
+void _glfwSetRawMouseMotionX11(_GLFWwindow *window, GLFWbool enabled);
+GLFWbool _glfwRawMouseMotionSupportedX11(void);
+
+void _glfwPollEventsX11(void);
+void _glfwWaitEventsX11(void);
+void _glfwWaitEventsTimeoutX11(double timeout);
+void _glfwPostEmptyEventX11(void);
+
+void _glfwGetCursorPosX11(_GLFWwindow* window, double* xpos, double* ypos);
+void _glfwSetCursorPosX11(_GLFWwindow* window, double xpos, double ypos);
+void _glfwSetCursorModeX11(_GLFWwindow* window, int mode);
+const char* _glfwGetScancodeNameX11(int scancode);
+int _glfwGetKeyScancodeX11(int key);
+int _glfwCreateCursorX11(_GLFWcursor* cursor, const GLFWimage* image, int xhot, int yhot);
+int _glfwCreateStandardCursorX11(_GLFWcursor* cursor, int shape);
+void _glfwDestroyCursorX11(_GLFWcursor* cursor);
+void _glfwSetCursorX11(_GLFWwindow* window, _GLFWcursor* cursor);
+void _glfwSetClipboardStringX11(const char* string);
+const char* _glfwGetClipboardStringX11(void);
+
+EGLenum _glfwGetEGLPlatformX11(EGLint** attribs);
+EGLNativeDisplayType _glfwGetEGLNativeDisplayX11(void);
+EGLNativeWindowType _glfwGetEGLNativeWindowX11(_GLFWwindow* window);
+
+void _glfwGetRequiredInstanceExtensionsX11(char** extensions);
+int _glfwGetPhysicalDevicePresentationSupportX11(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
+VkResult _glfwCreateWindowSurfaceX11(VkInstance instance, _GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
+
+void _glfwFreeMonitorX11(_GLFWmonitor* monitor);
+void _glfwGetMonitorPosX11(_GLFWmonitor* monitor, int* xpos, int* ypos);
+void _glfwGetMonitorContentScaleX11(_GLFWmonitor* monitor, float* xscale, float* yscale);
+void _glfwGetMonitorWorkareaX11(_GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height);
+GLFWvidmode* _glfwGetVideoModesX11(_GLFWmonitor* monitor, int* count);
+void _glfwGetVideoModeX11(_GLFWmonitor* monitor, GLFWvidmode* mode);
+GLFWbool _glfwGetGammaRampX11(_GLFWmonitor* monitor, GLFWgammaramp* ramp);
+void _glfwSetGammaRampX11(_GLFWmonitor* monitor, const GLFWgammaramp* ramp);
+
+void _glfwPollMonitorsX11(void);
+void _glfwSetVideoModeX11(_GLFWmonitor* monitor, const GLFWvidmode* desired);
+void _glfwRestoreVideoModeX11(_GLFWmonitor* monitor);
+
+Cursor _glfwCreateNativeCursorX11(const GLFWimage* image, int xhot, int yhot);
+
+unsigned long _glfwGetWindowPropertyX11(Window window,
+ Atom property,
+ Atom type,
+ unsigned char** value);
+GLFWbool _glfwIsVisualTransparentX11(Visual* visual);
+
+void _glfwGrabErrorHandlerX11(void);
+void _glfwReleaseErrorHandlerX11(void);
+void _glfwInputErrorX11(int error, const char* message);
+
+void _glfwPushSelectionToManagerX11(void);
+void _glfwCreateInputContextX11(_GLFWwindow* window);
+
+GLFWbool _glfwInitGLX(void);
+void _glfwTerminateGLX(void);
+GLFWbool _glfwCreateContextGLX(_GLFWwindow* window,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig);
+void _glfwDestroyContextGLX(_GLFWwindow* window);
+GLFWbool _glfwChooseVisualGLX(const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig,
+ Visual** visual, int* depth);
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/x11_window.c b/chromium/third_party/dawn/third_party/glfw/src/x11_window.c
new file mode 100644
index 00000000000..280dc986436
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/x11_window.c
@@ -0,0 +1,3267 @@
+//========================================================================
+// GLFW 3.4 X11 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2019 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+#include <X11/cursorfont.h>
+#include <X11/Xmd.h>
+
+#include <poll.h>
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <assert.h>
+
+// Action for EWMH client messages
+#define _NET_WM_STATE_REMOVE 0
+#define _NET_WM_STATE_ADD 1
+#define _NET_WM_STATE_TOGGLE 2
+
+// Additional mouse button names for XButtonEvent
+#define Button6 6
+#define Button7 7
+
+// Motif WM hints flags
+#define MWM_HINTS_DECORATIONS 2
+#define MWM_DECOR_ALL 1
+
+#define _GLFW_XDND_VERSION 5
+
+// Wait for event data to arrive on the X11 display socket
+// This avoids blocking other threads via the per-display Xlib lock that also
+// covers GLX functions
+//
+static GLFWbool waitForX11Event(double* timeout)
+{
+ struct pollfd fd = { ConnectionNumber(_glfw.x11.display), POLLIN };
+
+ while (!XPending(_glfw.x11.display))
+ {
+ if (!_glfwPollPOSIX(&fd, 1, timeout))
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+// Wait for event data to arrive on any event file descriptor
+// This avoids blocking other threads via the per-display Xlib lock that also
+// covers GLX functions
+//
+static GLFWbool waitForAnyEvent(double* timeout)
+{
+ nfds_t count = 2;
+ struct pollfd fds[3] =
+ {
+ { ConnectionNumber(_glfw.x11.display), POLLIN },
+ { _glfw.x11.emptyEventPipe[0], POLLIN }
+ };
+
+#if defined(__linux__)
+ if (_glfw.joysticksInitialized)
+ fds[count++] = (struct pollfd) { _glfw.linjs.inotify, POLLIN };
+#endif
+
+ while (!XPending(_glfw.x11.display))
+ {
+ if (!_glfwPollPOSIX(fds, count, timeout))
+ return GLFW_FALSE;
+
+ for (int i = 1; i < count; i++)
+ {
+ if (fds[i].revents & POLLIN)
+ return GLFW_TRUE;
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+// Writes a byte to the empty event pipe
+//
+static void writeEmptyEvent(void)
+{
+ for (;;)
+ {
+ const char byte = 0;
+ const ssize_t result = write(_glfw.x11.emptyEventPipe[1], &byte, 1);
+ if (result == 1 || (result == -1 && errno != EINTR))
+ break;
+ }
+}
+
+// Drains available data from the empty event pipe
+//
+static void drainEmptyEvents(void)
+{
+ for (;;)
+ {
+ char dummy[64];
+ const ssize_t result = read(_glfw.x11.emptyEventPipe[0], dummy, sizeof(dummy));
+ if (result == -1 && errno != EINTR)
+ break;
+ }
+}
+
+// Waits until a VisibilityNotify event arrives for the specified window or the
+// timeout period elapses (ICCCM section 4.2.2)
+//
+static GLFWbool waitForVisibilityNotify(_GLFWwindow* window)
+{
+ XEvent dummy;
+ double timeout = 0.1;
+
+ while (!XCheckTypedWindowEvent(_glfw.x11.display,
+ window->x11.handle,
+ VisibilityNotify,
+ &dummy))
+ {
+ if (!waitForX11Event(&timeout))
+ return GLFW_FALSE;
+ }
+
+ return GLFW_TRUE;
+}
+
+// Returns whether the window is iconified
+//
+static int getWindowState(_GLFWwindow* window)
+{
+ int result = WithdrawnState;
+ struct {
+ CARD32 state;
+ Window icon;
+ } *state = NULL;
+
+ if (_glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.WM_STATE,
+ _glfw.x11.WM_STATE,
+ (unsigned char**) &state) >= 2)
+ {
+ result = state->state;
+ }
+
+ if (state)
+ XFree(state);
+
+ return result;
+}
+
+// Returns whether the event is a selection event
+//
+static Bool isSelectionEvent(Display* display, XEvent* event, XPointer pointer)
+{
+ if (event->xany.window != _glfw.x11.helperWindowHandle)
+ return False;
+
+ return event->type == SelectionRequest ||
+ event->type == SelectionNotify ||
+ event->type == SelectionClear;
+}
+
+// Returns whether it is a _NET_FRAME_EXTENTS event for the specified window
+//
+static Bool isFrameExtentsEvent(Display* display, XEvent* event, XPointer pointer)
+{
+ _GLFWwindow* window = (_GLFWwindow*) pointer;
+ return event->type == PropertyNotify &&
+ event->xproperty.state == PropertyNewValue &&
+ event->xproperty.window == window->x11.handle &&
+ event->xproperty.atom == _glfw.x11.NET_FRAME_EXTENTS;
+}
+
+// Returns whether it is a property event for the specified selection transfer
+//
+static Bool isSelPropNewValueNotify(Display* display, XEvent* event, XPointer pointer)
+{
+ XEvent* notification = (XEvent*) pointer;
+ return event->type == PropertyNotify &&
+ event->xproperty.state == PropertyNewValue &&
+ event->xproperty.window == notification->xselection.requestor &&
+ event->xproperty.atom == notification->xselection.property;
+}
+
+// Translates an X event modifier state mask
+//
+static int translateState(int state)
+{
+ int mods = 0;
+
+ if (state & ShiftMask)
+ mods |= GLFW_MOD_SHIFT;
+ if (state & ControlMask)
+ mods |= GLFW_MOD_CONTROL;
+ if (state & Mod1Mask)
+ mods |= GLFW_MOD_ALT;
+ if (state & Mod4Mask)
+ mods |= GLFW_MOD_SUPER;
+ if (state & LockMask)
+ mods |= GLFW_MOD_CAPS_LOCK;
+ if (state & Mod2Mask)
+ mods |= GLFW_MOD_NUM_LOCK;
+
+ return mods;
+}
+
+// Translates an X11 key code to a GLFW key token
+//
+static int translateKey(int scancode)
+{
+ // Use the pre-filled LUT (see createKeyTables() in x11_init.c)
+ if (scancode < 0 || scancode > 255)
+ return GLFW_KEY_UNKNOWN;
+
+ return _glfw.x11.keycodes[scancode];
+}
+
+// Sends an EWMH or ICCCM event to the window manager
+//
+static void sendEventToWM(_GLFWwindow* window, Atom type,
+ long a, long b, long c, long d, long e)
+{
+ XEvent event = { ClientMessage };
+ event.xclient.window = window->x11.handle;
+ event.xclient.format = 32; // Data is 32-bit longs
+ event.xclient.message_type = type;
+ event.xclient.data.l[0] = a;
+ event.xclient.data.l[1] = b;
+ event.xclient.data.l[2] = c;
+ event.xclient.data.l[3] = d;
+ event.xclient.data.l[4] = e;
+
+ XSendEvent(_glfw.x11.display, _glfw.x11.root,
+ False,
+ SubstructureNotifyMask | SubstructureRedirectMask,
+ &event);
+}
+
+// Updates the normal hints according to the window settings
+//
+static void updateNormalHints(_GLFWwindow* window, int width, int height)
+{
+ XSizeHints* hints = XAllocSizeHints();
+
+ if (!window->monitor)
+ {
+ if (window->resizable)
+ {
+ if (window->minwidth != GLFW_DONT_CARE &&
+ window->minheight != GLFW_DONT_CARE)
+ {
+ hints->flags |= PMinSize;
+ hints->min_width = window->minwidth;
+ hints->min_height = window->minheight;
+ }
+
+ if (window->maxwidth != GLFW_DONT_CARE &&
+ window->maxheight != GLFW_DONT_CARE)
+ {
+ hints->flags |= PMaxSize;
+ hints->max_width = window->maxwidth;
+ hints->max_height = window->maxheight;
+ }
+
+ if (window->numer != GLFW_DONT_CARE &&
+ window->denom != GLFW_DONT_CARE)
+ {
+ hints->flags |= PAspect;
+ hints->min_aspect.x = hints->max_aspect.x = window->numer;
+ hints->min_aspect.y = hints->max_aspect.y = window->denom;
+ }
+ }
+ else
+ {
+ hints->flags |= (PMinSize | PMaxSize);
+ hints->min_width = hints->max_width = width;
+ hints->min_height = hints->max_height = height;
+ }
+ }
+
+ hints->flags |= PWinGravity;
+ hints->win_gravity = StaticGravity;
+
+ XSetWMNormalHints(_glfw.x11.display, window->x11.handle, hints);
+ XFree(hints);
+}
+
+// Updates the full screen status of the window
+//
+static void updateWindowMode(_GLFWwindow* window)
+{
+ if (window->monitor)
+ {
+ if (_glfw.x11.xinerama.available &&
+ _glfw.x11.NET_WM_FULLSCREEN_MONITORS)
+ {
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_FULLSCREEN_MONITORS,
+ window->monitor->x11.index,
+ window->monitor->x11.index,
+ window->monitor->x11.index,
+ window->monitor->x11.index,
+ 0);
+ }
+
+ if (_glfw.x11.NET_WM_STATE && _glfw.x11.NET_WM_STATE_FULLSCREEN)
+ {
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ _NET_WM_STATE_ADD,
+ _glfw.x11.NET_WM_STATE_FULLSCREEN,
+ 0, 1, 0);
+ }
+ else
+ {
+ // This is the butcher's way of removing window decorations
+ // Setting the override-redirect attribute on a window makes the
+ // window manager ignore the window completely (ICCCM, section 4)
+ // The good thing is that this makes undecorated full screen windows
+ // easy to do; the bad thing is that we have to do everything
+ // manually and some things (like iconify/restore) won't work at
+ // all, as those are tasks usually performed by the window manager
+
+ XSetWindowAttributes attributes;
+ attributes.override_redirect = True;
+ XChangeWindowAttributes(_glfw.x11.display,
+ window->x11.handle,
+ CWOverrideRedirect,
+ &attributes);
+
+ window->x11.overrideRedirect = GLFW_TRUE;
+ }
+
+ // Enable compositor bypass
+ if (!window->x11.transparent)
+ {
+ const unsigned long value = 1;
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_BYPASS_COMPOSITOR, XA_CARDINAL, 32,
+ PropModeReplace, (unsigned char*) &value, 1);
+ }
+ }
+ else
+ {
+ if (_glfw.x11.xinerama.available &&
+ _glfw.x11.NET_WM_FULLSCREEN_MONITORS)
+ {
+ XDeleteProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_FULLSCREEN_MONITORS);
+ }
+
+ if (_glfw.x11.NET_WM_STATE && _glfw.x11.NET_WM_STATE_FULLSCREEN)
+ {
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ _NET_WM_STATE_REMOVE,
+ _glfw.x11.NET_WM_STATE_FULLSCREEN,
+ 0, 1, 0);
+ }
+ else
+ {
+ XSetWindowAttributes attributes;
+ attributes.override_redirect = False;
+ XChangeWindowAttributes(_glfw.x11.display,
+ window->x11.handle,
+ CWOverrideRedirect,
+ &attributes);
+
+ window->x11.overrideRedirect = GLFW_FALSE;
+ }
+
+ // Disable compositor bypass
+ if (!window->x11.transparent)
+ {
+ XDeleteProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_BYPASS_COMPOSITOR);
+ }
+ }
+}
+
+// Decode a Unicode code point from a UTF-8 stream
+// Based on cutef8 by Jeff Bezanson (Public Domain)
+//
+static uint32_t decodeUTF8(const char** s)
+{
+ uint32_t codepoint = 0, count = 0;
+ static const uint32_t offsets[] =
+ {
+ 0x00000000u, 0x00003080u, 0x000e2080u,
+ 0x03c82080u, 0xfa082080u, 0x82082080u
+ };
+
+ do
+ {
+ codepoint = (codepoint << 6) + (unsigned char) **s;
+ (*s)++;
+ count++;
+ } while ((**s & 0xc0) == 0x80);
+
+ assert(count <= 6);
+ return codepoint - offsets[count - 1];
+}
+
+// Convert the specified Latin-1 string to UTF-8
+//
+static char* convertLatin1toUTF8(const char* source)
+{
+ size_t size = 1;
+ const char* sp;
+
+ for (sp = source; *sp; sp++)
+ size += (*sp & 0x80) ? 2 : 1;
+
+ char* target = _glfw_calloc(size, 1);
+ char* tp = target;
+
+ for (sp = source; *sp; sp++)
+ tp += _glfwEncodeUTF8(tp, *sp);
+
+ return target;
+}
+
+// Updates the cursor image according to its cursor mode
+//
+static void updateCursorImage(_GLFWwindow* window)
+{
+ if (window->cursorMode == GLFW_CURSOR_NORMAL)
+ {
+ if (window->cursor)
+ {
+ XDefineCursor(_glfw.x11.display, window->x11.handle,
+ window->cursor->x11.handle);
+ }
+ else
+ XUndefineCursor(_glfw.x11.display, window->x11.handle);
+ }
+ else
+ {
+ XDefineCursor(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.hiddenCursorHandle);
+ }
+}
+
+// Enable XI2 raw mouse motion events
+//
+static void enableRawMouseMotion(_GLFWwindow* window)
+{
+ XIEventMask em;
+ unsigned char mask[XIMaskLen(XI_RawMotion)] = { 0 };
+
+ em.deviceid = XIAllMasterDevices;
+ em.mask_len = sizeof(mask);
+ em.mask = mask;
+ XISetMask(mask, XI_RawMotion);
+
+ XISelectEvents(_glfw.x11.display, _glfw.x11.root, &em, 1);
+}
+
+// Disable XI2 raw mouse motion events
+//
+static void disableRawMouseMotion(_GLFWwindow* window)
+{
+ XIEventMask em;
+ unsigned char mask[] = { 0 };
+
+ em.deviceid = XIAllMasterDevices;
+ em.mask_len = sizeof(mask);
+ em.mask = mask;
+
+ XISelectEvents(_glfw.x11.display, _glfw.x11.root, &em, 1);
+}
+
+// Apply disabled cursor mode to a focused window
+//
+static void disableCursor(_GLFWwindow* window)
+{
+ if (window->rawMouseMotion)
+ enableRawMouseMotion(window);
+
+ _glfw.x11.disabledCursorWindow = window;
+ _glfwGetCursorPosX11(window,
+ &_glfw.x11.restoreCursorPosX,
+ &_glfw.x11.restoreCursorPosY);
+ updateCursorImage(window);
+ _glfwCenterCursorInContentArea(window);
+ XGrabPointer(_glfw.x11.display, window->x11.handle, True,
+ ButtonPressMask | ButtonReleaseMask | PointerMotionMask,
+ GrabModeAsync, GrabModeAsync,
+ window->x11.handle,
+ _glfw.x11.hiddenCursorHandle,
+ CurrentTime);
+}
+
+// Exit disabled cursor mode for the specified window
+//
+static void enableCursor(_GLFWwindow* window)
+{
+ if (window->rawMouseMotion)
+ disableRawMouseMotion(window);
+
+ _glfw.x11.disabledCursorWindow = NULL;
+ XUngrabPointer(_glfw.x11.display, CurrentTime);
+ _glfwSetCursorPosX11(window,
+ _glfw.x11.restoreCursorPosX,
+ _glfw.x11.restoreCursorPosY);
+ updateCursorImage(window);
+}
+
+// Clear its handle when the input context has been destroyed
+//
+static void inputContextDestroyCallback(XIC ic, XPointer clientData, XPointer callData)
+{
+ _GLFWwindow* window = (_GLFWwindow*) clientData;
+ window->x11.ic = NULL;
+}
+
+// Create the X11 window (and its colormap)
+//
+static GLFWbool createNativeWindow(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ Visual* visual, int depth)
+{
+ int width = wndconfig->width;
+ int height = wndconfig->height;
+
+ if (wndconfig->scaleToMonitor)
+ {
+ width *= _glfw.x11.contentScaleX;
+ height *= _glfw.x11.contentScaleY;
+ }
+
+ // Create a colormap based on the visual used by the current context
+ window->x11.colormap = XCreateColormap(_glfw.x11.display,
+ _glfw.x11.root,
+ visual,
+ AllocNone);
+
+ window->x11.transparent = _glfwIsVisualTransparentX11(visual);
+
+ XSetWindowAttributes wa = { 0 };
+ wa.colormap = window->x11.colormap;
+ wa.event_mask = StructureNotifyMask | KeyPressMask | KeyReleaseMask |
+ PointerMotionMask | ButtonPressMask | ButtonReleaseMask |
+ ExposureMask | FocusChangeMask | VisibilityChangeMask |
+ EnterWindowMask | LeaveWindowMask | PropertyChangeMask;
+
+ _glfwGrabErrorHandlerX11();
+
+ window->x11.parent = _glfw.x11.root;
+ window->x11.handle = XCreateWindow(_glfw.x11.display,
+ _glfw.x11.root,
+ 0, 0, // Position
+ width, height,
+ 0, // Border width
+ depth, // Color depth
+ InputOutput,
+ visual,
+ CWBorderPixel | CWColormap | CWEventMask,
+ &wa);
+
+ _glfwReleaseErrorHandlerX11();
+
+ if (!window->x11.handle)
+ {
+ _glfwInputErrorX11(GLFW_PLATFORM_ERROR,
+ "X11: Failed to create window");
+ return GLFW_FALSE;
+ }
+
+ XSaveContext(_glfw.x11.display,
+ window->x11.handle,
+ _glfw.x11.context,
+ (XPointer) window);
+
+ if (!wndconfig->decorated)
+ _glfwSetWindowDecoratedX11(window, GLFW_FALSE);
+
+ if (_glfw.x11.NET_WM_STATE && !window->monitor)
+ {
+ Atom states[3];
+ int count = 0;
+
+ if (wndconfig->floating)
+ {
+ if (_glfw.x11.NET_WM_STATE_ABOVE)
+ states[count++] = _glfw.x11.NET_WM_STATE_ABOVE;
+ }
+
+ if (wndconfig->maximized)
+ {
+ if (_glfw.x11.NET_WM_STATE_MAXIMIZED_VERT &&
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ)
+ {
+ states[count++] = _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT;
+ states[count++] = _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ;
+ window->x11.maximized = GLFW_TRUE;
+ }
+ }
+
+ if (count)
+ {
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_STATE, XA_ATOM, 32,
+ PropModeReplace, (unsigned char*) states, count);
+ }
+ }
+
+ // Declare the WM protocols supported by GLFW
+ {
+ Atom protocols[] =
+ {
+ _glfw.x11.WM_DELETE_WINDOW,
+ _glfw.x11.NET_WM_PING
+ };
+
+ XSetWMProtocols(_glfw.x11.display, window->x11.handle,
+ protocols, sizeof(protocols) / sizeof(Atom));
+ }
+
+ // Declare our PID
+ {
+ const long pid = getpid();
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_PID, XA_CARDINAL, 32,
+ PropModeReplace,
+ (unsigned char*) &pid, 1);
+ }
+
+ if (_glfw.x11.NET_WM_WINDOW_TYPE && _glfw.x11.NET_WM_WINDOW_TYPE_NORMAL)
+ {
+ Atom type = _glfw.x11.NET_WM_WINDOW_TYPE_NORMAL;
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_WINDOW_TYPE, XA_ATOM, 32,
+ PropModeReplace, (unsigned char*) &type, 1);
+ }
+
+ // Set ICCCM WM_HINTS property
+ {
+ XWMHints* hints = XAllocWMHints();
+ if (!hints)
+ {
+ _glfwInputError(GLFW_OUT_OF_MEMORY,
+ "X11: Failed to allocate WM hints");
+ return GLFW_FALSE;
+ }
+
+ hints->flags = StateHint;
+ hints->initial_state = NormalState;
+
+ XSetWMHints(_glfw.x11.display, window->x11.handle, hints);
+ XFree(hints);
+ }
+
+ updateNormalHints(window, width, height);
+
+ // Set ICCCM WM_CLASS property
+ {
+ XClassHint* hint = XAllocClassHint();
+
+ if (strlen(wndconfig->x11.instanceName) &&
+ strlen(wndconfig->x11.className))
+ {
+ hint->res_name = (char*) wndconfig->x11.instanceName;
+ hint->res_class = (char*) wndconfig->x11.className;
+ }
+ else
+ {
+ const char* resourceName = getenv("RESOURCE_NAME");
+ if (resourceName && strlen(resourceName))
+ hint->res_name = (char*) resourceName;
+ else if (strlen(wndconfig->title))
+ hint->res_name = (char*) wndconfig->title;
+ else
+ hint->res_name = (char*) "glfw-application";
+
+ if (strlen(wndconfig->title))
+ hint->res_class = (char*) wndconfig->title;
+ else
+ hint->res_class = (char*) "GLFW-Application";
+ }
+
+ XSetClassHint(_glfw.x11.display, window->x11.handle, hint);
+ XFree(hint);
+ }
+
+ // Announce support for Xdnd (drag and drop)
+ {
+ const Atom version = _GLFW_XDND_VERSION;
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.XdndAware, XA_ATOM, 32,
+ PropModeReplace, (unsigned char*) &version, 1);
+ }
+
+ if (_glfw.x11.im)
+ _glfwCreateInputContextX11(window);
+
+ _glfwSetWindowTitleX11(window, wndconfig->title);
+ _glfwGetWindowPosX11(window, &window->x11.xpos, &window->x11.ypos);
+ _glfwGetWindowSizeX11(window, &window->x11.width, &window->x11.height);
+
+ return GLFW_TRUE;
+}
+
+// Set the specified property to the selection converted to the requested target
+//
+static Atom writeTargetToProperty(const XSelectionRequestEvent* request)
+{
+ char* selectionString = NULL;
+ const Atom formats[] = { _glfw.x11.UTF8_STRING, XA_STRING };
+ const int formatCount = sizeof(formats) / sizeof(formats[0]);
+
+ if (request->selection == _glfw.x11.PRIMARY)
+ selectionString = _glfw.x11.primarySelectionString;
+ else
+ selectionString = _glfw.x11.clipboardString;
+
+ if (request->property == None)
+ {
+ // The requester is a legacy client (ICCCM section 2.2)
+ // We don't support legacy clients, so fail here
+ return None;
+ }
+
+ if (request->target == _glfw.x11.TARGETS)
+ {
+ // The list of supported targets was requested
+
+ const Atom targets[] = { _glfw.x11.TARGETS,
+ _glfw.x11.MULTIPLE,
+ _glfw.x11.UTF8_STRING,
+ XA_STRING };
+
+ XChangeProperty(_glfw.x11.display,
+ request->requestor,
+ request->property,
+ XA_ATOM,
+ 32,
+ PropModeReplace,
+ (unsigned char*) targets,
+ sizeof(targets) / sizeof(targets[0]));
+
+ return request->property;
+ }
+
+ if (request->target == _glfw.x11.MULTIPLE)
+ {
+ // Multiple conversions were requested
+
+ Atom* targets;
+ const unsigned long count =
+ _glfwGetWindowPropertyX11(request->requestor,
+ request->property,
+ _glfw.x11.ATOM_PAIR,
+ (unsigned char**) &targets);
+
+ for (unsigned long i = 0; i < count; i += 2)
+ {
+ int j;
+
+ for (j = 0; j < formatCount; j++)
+ {
+ if (targets[i] == formats[j])
+ break;
+ }
+
+ if (j < formatCount)
+ {
+ XChangeProperty(_glfw.x11.display,
+ request->requestor,
+ targets[i + 1],
+ targets[i],
+ 8,
+ PropModeReplace,
+ (unsigned char *) selectionString,
+ strlen(selectionString));
+ }
+ else
+ targets[i + 1] = None;
+ }
+
+ XChangeProperty(_glfw.x11.display,
+ request->requestor,
+ request->property,
+ _glfw.x11.ATOM_PAIR,
+ 32,
+ PropModeReplace,
+ (unsigned char*) targets,
+ count);
+
+ XFree(targets);
+
+ return request->property;
+ }
+
+ if (request->target == _glfw.x11.SAVE_TARGETS)
+ {
+ // The request is a check whether we support SAVE_TARGETS
+ // It should be handled as a no-op side effect target
+
+ XChangeProperty(_glfw.x11.display,
+ request->requestor,
+ request->property,
+ _glfw.x11.NULL_,
+ 32,
+ PropModeReplace,
+ NULL,
+ 0);
+
+ return request->property;
+ }
+
+ // Conversion to a data target was requested
+
+ for (int i = 0; i < formatCount; i++)
+ {
+ if (request->target == formats[i])
+ {
+ // The requested target is one we support
+
+ XChangeProperty(_glfw.x11.display,
+ request->requestor,
+ request->property,
+ request->target,
+ 8,
+ PropModeReplace,
+ (unsigned char *) selectionString,
+ strlen(selectionString));
+
+ return request->property;
+ }
+ }
+
+ // The requested target is not supported
+
+ return None;
+}
+
+static void handleSelectionRequest(XEvent* event)
+{
+ const XSelectionRequestEvent* request = &event->xselectionrequest;
+
+ XEvent reply = { SelectionNotify };
+ reply.xselection.property = writeTargetToProperty(request);
+ reply.xselection.display = request->display;
+ reply.xselection.requestor = request->requestor;
+ reply.xselection.selection = request->selection;
+ reply.xselection.target = request->target;
+ reply.xselection.time = request->time;
+
+ XSendEvent(_glfw.x11.display, request->requestor, False, 0, &reply);
+}
+
+static const char* getSelectionString(Atom selection)
+{
+ char** selectionString = NULL;
+ const Atom targets[] = { _glfw.x11.UTF8_STRING, XA_STRING };
+ const size_t targetCount = sizeof(targets) / sizeof(targets[0]);
+
+ if (selection == _glfw.x11.PRIMARY)
+ selectionString = &_glfw.x11.primarySelectionString;
+ else
+ selectionString = &_glfw.x11.clipboardString;
+
+ if (XGetSelectionOwner(_glfw.x11.display, selection) ==
+ _glfw.x11.helperWindowHandle)
+ {
+ // Instead of doing a large number of X round-trips just to put this
+ // string into a window property and then read it back, just return it
+ return *selectionString;
+ }
+
+ _glfw_free(*selectionString);
+ *selectionString = NULL;
+
+ for (size_t i = 0; i < targetCount; i++)
+ {
+ char* data;
+ Atom actualType;
+ int actualFormat;
+ unsigned long itemCount, bytesAfter;
+ XEvent notification, dummy;
+
+ XConvertSelection(_glfw.x11.display,
+ selection,
+ targets[i],
+ _glfw.x11.GLFW_SELECTION,
+ _glfw.x11.helperWindowHandle,
+ CurrentTime);
+
+ while (!XCheckTypedWindowEvent(_glfw.x11.display,
+ _glfw.x11.helperWindowHandle,
+ SelectionNotify,
+ &notification))
+ {
+ waitForX11Event(NULL);
+ }
+
+ if (notification.xselection.property == None)
+ continue;
+
+ XCheckIfEvent(_glfw.x11.display,
+ &dummy,
+ isSelPropNewValueNotify,
+ (XPointer) &notification);
+
+ XGetWindowProperty(_glfw.x11.display,
+ notification.xselection.requestor,
+ notification.xselection.property,
+ 0,
+ LONG_MAX,
+ True,
+ AnyPropertyType,
+ &actualType,
+ &actualFormat,
+ &itemCount,
+ &bytesAfter,
+ (unsigned char**) &data);
+
+ if (actualType == _glfw.x11.INCR)
+ {
+ size_t size = 1;
+ char* string = NULL;
+
+ for (;;)
+ {
+ while (!XCheckIfEvent(_glfw.x11.display,
+ &dummy,
+ isSelPropNewValueNotify,
+ (XPointer) &notification))
+ {
+ waitForX11Event(NULL);
+ }
+
+ XFree(data);
+ XGetWindowProperty(_glfw.x11.display,
+ notification.xselection.requestor,
+ notification.xselection.property,
+ 0,
+ LONG_MAX,
+ True,
+ AnyPropertyType,
+ &actualType,
+ &actualFormat,
+ &itemCount,
+ &bytesAfter,
+ (unsigned char**) &data);
+
+ if (itemCount)
+ {
+ size += itemCount;
+ string = _glfw_realloc(string, size);
+ string[size - itemCount - 1] = '\0';
+ strcat(string, data);
+ }
+
+ if (!itemCount)
+ {
+ if (string)
+ {
+ if (targets[i] == XA_STRING)
+ {
+ *selectionString = convertLatin1toUTF8(string);
+ _glfw_free(string);
+ }
+ else
+ *selectionString = string;
+ }
+
+ break;
+ }
+ }
+ }
+ else if (actualType == targets[i])
+ {
+ if (targets[i] == XA_STRING)
+ *selectionString = convertLatin1toUTF8(data);
+ else
+ *selectionString = _glfw_strdup(data);
+ }
+
+ XFree(data);
+
+ if (*selectionString)
+ break;
+ }
+
+ if (!*selectionString)
+ {
+ _glfwInputError(GLFW_FORMAT_UNAVAILABLE,
+ "X11: Failed to convert selection to string");
+ }
+
+ return *selectionString;
+}
+
+// Make the specified window and its video mode active on its monitor
+//
+static void acquireMonitor(_GLFWwindow* window)
+{
+ if (_glfw.x11.saver.count == 0)
+ {
+ // Remember old screen saver settings
+ XGetScreenSaver(_glfw.x11.display,
+ &_glfw.x11.saver.timeout,
+ &_glfw.x11.saver.interval,
+ &_glfw.x11.saver.blanking,
+ &_glfw.x11.saver.exposure);
+
+ // Disable screen saver
+ XSetScreenSaver(_glfw.x11.display, 0, 0, DontPreferBlanking,
+ DefaultExposures);
+ }
+
+ if (!window->monitor->window)
+ _glfw.x11.saver.count++;
+
+ _glfwSetVideoModeX11(window->monitor, &window->videoMode);
+
+ if (window->x11.overrideRedirect)
+ {
+ int xpos, ypos;
+ GLFWvidmode mode;
+
+ // Manually position the window over its monitor
+ _glfwGetMonitorPosX11(window->monitor, &xpos, &ypos);
+ _glfwGetVideoModeX11(window->monitor, &mode);
+
+ XMoveResizeWindow(_glfw.x11.display, window->x11.handle,
+ xpos, ypos, mode.width, mode.height);
+ }
+
+ _glfwInputMonitorWindow(window->monitor, window);
+}
+
+// Remove the window and restore the original video mode
+//
+static void releaseMonitor(_GLFWwindow* window)
+{
+ if (window->monitor->window != window)
+ return;
+
+ _glfwInputMonitorWindow(window->monitor, NULL);
+ _glfwRestoreVideoModeX11(window->monitor);
+
+ _glfw.x11.saver.count--;
+
+ if (_glfw.x11.saver.count == 0)
+ {
+ // Restore old screen saver settings
+ XSetScreenSaver(_glfw.x11.display,
+ _glfw.x11.saver.timeout,
+ _glfw.x11.saver.interval,
+ _glfw.x11.saver.blanking,
+ _glfw.x11.saver.exposure);
+ }
+}
+
+// Process the specified X event
+//
+static void processEvent(XEvent *event)
+{
+ int keycode = 0;
+ Bool filtered = False;
+
+ // HACK: Save scancode as some IMs clear the field in XFilterEvent
+ if (event->type == KeyPress || event->type == KeyRelease)
+ keycode = event->xkey.keycode;
+
+ filtered = XFilterEvent(event, None);
+
+ if (_glfw.x11.randr.available)
+ {
+ if (event->type == _glfw.x11.randr.eventBase + RRNotify)
+ {
+ XRRUpdateConfiguration(event);
+ _glfwPollMonitorsX11();
+ return;
+ }
+ }
+
+ if (_glfw.x11.xkb.available)
+ {
+ if (event->type == _glfw.x11.xkb.eventBase + XkbEventCode)
+ {
+ if (((XkbEvent*) event)->any.xkb_type == XkbStateNotify &&
+ (((XkbEvent*) event)->state.changed & XkbGroupStateMask))
+ {
+ _glfw.x11.xkb.group = ((XkbEvent*) event)->state.group;
+ }
+
+ return;
+ }
+ }
+
+ if (event->type == GenericEvent)
+ {
+ if (_glfw.x11.xi.available)
+ {
+ _GLFWwindow* window = _glfw.x11.disabledCursorWindow;
+
+ if (window &&
+ window->rawMouseMotion &&
+ event->xcookie.extension == _glfw.x11.xi.majorOpcode &&
+ XGetEventData(_glfw.x11.display, &event->xcookie) &&
+ event->xcookie.evtype == XI_RawMotion)
+ {
+ XIRawEvent* re = event->xcookie.data;
+ if (re->valuators.mask_len)
+ {
+ const double* values = re->raw_values;
+ double xpos = window->virtualCursorPosX;
+ double ypos = window->virtualCursorPosY;
+
+ if (XIMaskIsSet(re->valuators.mask, 0))
+ {
+ xpos += *values;
+ values++;
+ }
+
+ if (XIMaskIsSet(re->valuators.mask, 1))
+ ypos += *values;
+
+ _glfwInputCursorPos(window, xpos, ypos);
+ }
+ }
+
+ XFreeEventData(_glfw.x11.display, &event->xcookie);
+ }
+
+ return;
+ }
+
+ if (event->type == SelectionRequest)
+ {
+ handleSelectionRequest(event);
+ return;
+ }
+
+ _GLFWwindow* window = NULL;
+ if (XFindContext(_glfw.x11.display,
+ event->xany.window,
+ _glfw.x11.context,
+ (XPointer*) &window) != 0)
+ {
+ // This is an event for a window that has already been destroyed
+ return;
+ }
+
+ switch (event->type)
+ {
+ case ReparentNotify:
+ {
+ window->x11.parent = event->xreparent.parent;
+ return;
+ }
+
+ case KeyPress:
+ {
+ const int key = translateKey(keycode);
+ const int mods = translateState(event->xkey.state);
+ const int plain = !(mods & (GLFW_MOD_CONTROL | GLFW_MOD_ALT));
+
+ if (window->x11.ic)
+ {
+ // HACK: Do not report the key press events duplicated by XIM
+ // Duplicate key releases are filtered out implicitly by
+ // the GLFW key repeat logic in _glfwInputKey
+ // A timestamp per key is used to handle simultaneous keys
+ // NOTE: Always allow the first event for each key through
+ // (the server never sends a timestamp of zero)
+ // NOTE: Timestamp difference is compared to handle wrap-around
+ Time diff = event->xkey.time - window->x11.keyPressTimes[keycode];
+ if (diff == event->xkey.time || (diff > 0 && diff < ((Time)1 << 31)))
+ {
+ if (keycode)
+ _glfwInputKey(window, key, keycode, GLFW_PRESS, mods);
+
+ window->x11.keyPressTimes[keycode] = event->xkey.time;
+ }
+
+ if (!filtered)
+ {
+ int count;
+ Status status;
+ char buffer[100];
+ char* chars = buffer;
+
+ count = Xutf8LookupString(window->x11.ic,
+ &event->xkey,
+ buffer, sizeof(buffer) - 1,
+ NULL, &status);
+
+ if (status == XBufferOverflow)
+ {
+ chars = _glfw_calloc(count + 1, 1);
+ count = Xutf8LookupString(window->x11.ic,
+ &event->xkey,
+ chars, count,
+ NULL, &status);
+ }
+
+ if (status == XLookupChars || status == XLookupBoth)
+ {
+ const char* c = chars;
+ chars[count] = '\0';
+ while (c - chars < count)
+ _glfwInputChar(window, decodeUTF8(&c), mods, plain);
+ }
+
+ if (chars != buffer)
+ _glfw_free(chars);
+ }
+ }
+ else
+ {
+ KeySym keysym;
+ XLookupString(&event->xkey, NULL, 0, &keysym, NULL);
+
+ _glfwInputKey(window, key, keycode, GLFW_PRESS, mods);
+
+ const uint32_t codepoint = _glfwKeySym2Unicode(keysym);
+ if (codepoint != GLFW_INVALID_CODEPOINT)
+ _glfwInputChar(window, codepoint, mods, plain);
+ }
+
+ return;
+ }
+
+ case KeyRelease:
+ {
+ const int key = translateKey(keycode);
+ const int mods = translateState(event->xkey.state);
+
+ if (!_glfw.x11.xkb.detectable)
+ {
+ // HACK: Key repeat events will arrive as KeyRelease/KeyPress
+ // pairs with similar or identical time stamps
+ // The key repeat logic in _glfwInputKey expects only key
+ // presses to repeat, so detect and discard release events
+ if (XEventsQueued(_glfw.x11.display, QueuedAfterReading))
+ {
+ XEvent next;
+ XPeekEvent(_glfw.x11.display, &next);
+
+ if (next.type == KeyPress &&
+ next.xkey.window == event->xkey.window &&
+ next.xkey.keycode == keycode)
+ {
+ // HACK: The time of repeat events sometimes doesn't
+ // match that of the press event, so add an
+ // epsilon
+ // Toshiyuki Takahashi can press a button
+ // 16 times per second so it's fairly safe to
+ // assume that no human is pressing the key 50
+ // times per second (value is ms)
+ if ((next.xkey.time - event->xkey.time) < 20)
+ {
+ // This is very likely a server-generated key repeat
+ // event, so ignore it
+ return;
+ }
+ }
+ }
+ }
+
+ _glfwInputKey(window, key, keycode, GLFW_RELEASE, mods);
+ return;
+ }
+
+ case ButtonPress:
+ {
+ const int mods = translateState(event->xbutton.state);
+
+ if (event->xbutton.button == Button1)
+ _glfwInputMouseClick(window, GLFW_MOUSE_BUTTON_LEFT, GLFW_PRESS, mods);
+ else if (event->xbutton.button == Button2)
+ _glfwInputMouseClick(window, GLFW_MOUSE_BUTTON_MIDDLE, GLFW_PRESS, mods);
+ else if (event->xbutton.button == Button3)
+ _glfwInputMouseClick(window, GLFW_MOUSE_BUTTON_RIGHT, GLFW_PRESS, mods);
+
+ // Modern X provides scroll events as mouse button presses
+ else if (event->xbutton.button == Button4)
+ _glfwInputScroll(window, 0.0, 1.0);
+ else if (event->xbutton.button == Button5)
+ _glfwInputScroll(window, 0.0, -1.0);
+ else if (event->xbutton.button == Button6)
+ _glfwInputScroll(window, 1.0, 0.0);
+ else if (event->xbutton.button == Button7)
+ _glfwInputScroll(window, -1.0, 0.0);
+
+ else
+ {
+ // Additional buttons after 7 are treated as regular buttons
+ // We subtract 4 to fill the gap left by scroll input above
+ _glfwInputMouseClick(window,
+ event->xbutton.button - Button1 - 4,
+ GLFW_PRESS,
+ mods);
+ }
+
+ return;
+ }
+
+ case ButtonRelease:
+ {
+ const int mods = translateState(event->xbutton.state);
+
+ if (event->xbutton.button == Button1)
+ {
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_LEFT,
+ GLFW_RELEASE,
+ mods);
+ }
+ else if (event->xbutton.button == Button2)
+ {
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_MIDDLE,
+ GLFW_RELEASE,
+ mods);
+ }
+ else if (event->xbutton.button == Button3)
+ {
+ _glfwInputMouseClick(window,
+ GLFW_MOUSE_BUTTON_RIGHT,
+ GLFW_RELEASE,
+ mods);
+ }
+ else if (event->xbutton.button > Button7)
+ {
+ // Additional buttons after 7 are treated as regular buttons
+ // We subtract 4 to fill the gap left by scroll input above
+ _glfwInputMouseClick(window,
+ event->xbutton.button - Button1 - 4,
+ GLFW_RELEASE,
+ mods);
+ }
+
+ return;
+ }
+
+ case EnterNotify:
+ {
+ // XEnterWindowEvent is XCrossingEvent
+ const int x = event->xcrossing.x;
+ const int y = event->xcrossing.y;
+
+ // HACK: This is a workaround for WMs (KWM, Fluxbox) that otherwise
+ // ignore the defined cursor for hidden cursor mode
+ if (window->cursorMode == GLFW_CURSOR_HIDDEN)
+ updateCursorImage(window);
+
+ _glfwInputCursorEnter(window, GLFW_TRUE);
+ _glfwInputCursorPos(window, x, y);
+
+ window->x11.lastCursorPosX = x;
+ window->x11.lastCursorPosY = y;
+ return;
+ }
+
+ case LeaveNotify:
+ {
+ _glfwInputCursorEnter(window, GLFW_FALSE);
+ return;
+ }
+
+ case MotionNotify:
+ {
+ const int x = event->xmotion.x;
+ const int y = event->xmotion.y;
+
+ if (x != window->x11.warpCursorPosX ||
+ y != window->x11.warpCursorPosY)
+ {
+ // The cursor was moved by something other than GLFW
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ {
+ if (_glfw.x11.disabledCursorWindow != window)
+ return;
+ if (window->rawMouseMotion)
+ return;
+
+ const int dx = x - window->x11.lastCursorPosX;
+ const int dy = y - window->x11.lastCursorPosY;
+
+ _glfwInputCursorPos(window,
+ window->virtualCursorPosX + dx,
+ window->virtualCursorPosY + dy);
+ }
+ else
+ _glfwInputCursorPos(window, x, y);
+ }
+
+ window->x11.lastCursorPosX = x;
+ window->x11.lastCursorPosY = y;
+ return;
+ }
+
+ case ConfigureNotify:
+ {
+ if (event->xconfigure.width != window->x11.width ||
+ event->xconfigure.height != window->x11.height)
+ {
+ _glfwInputFramebufferSize(window,
+ event->xconfigure.width,
+ event->xconfigure.height);
+
+ _glfwInputWindowSize(window,
+ event->xconfigure.width,
+ event->xconfigure.height);
+
+ window->x11.width = event->xconfigure.width;
+ window->x11.height = event->xconfigure.height;
+ }
+
+ int xpos = event->xconfigure.x;
+ int ypos = event->xconfigure.y;
+
+ // NOTE: ConfigureNotify events from the server are in local
+ // coordinates, so if we are reparented we need to translate
+ // the position into root (screen) coordinates
+ if (!event->xany.send_event && window->x11.parent != _glfw.x11.root)
+ {
+ _glfwGrabErrorHandlerX11();
+
+ Window dummy;
+ XTranslateCoordinates(_glfw.x11.display,
+ window->x11.parent,
+ _glfw.x11.root,
+ xpos, ypos,
+ &xpos, &ypos,
+ &dummy);
+
+ _glfwReleaseErrorHandlerX11();
+ if (_glfw.x11.errorCode == BadWindow)
+ return;
+ }
+
+ if (xpos != window->x11.xpos || ypos != window->x11.ypos)
+ {
+ _glfwInputWindowPos(window, xpos, ypos);
+ window->x11.xpos = xpos;
+ window->x11.ypos = ypos;
+ }
+
+ return;
+ }
+
+ case ClientMessage:
+ {
+ // Custom client message, probably from the window manager
+
+ if (filtered)
+ return;
+
+ if (event->xclient.message_type == None)
+ return;
+
+ if (event->xclient.message_type == _glfw.x11.WM_PROTOCOLS)
+ {
+ const Atom protocol = event->xclient.data.l[0];
+ if (protocol == None)
+ return;
+
+ if (protocol == _glfw.x11.WM_DELETE_WINDOW)
+ {
+ // The window manager was asked to close the window, for
+ // example by the user pressing a 'close' window decoration
+ // button
+ _glfwInputWindowCloseRequest(window);
+ }
+ else if (protocol == _glfw.x11.NET_WM_PING)
+ {
+ // The window manager is pinging the application to ensure
+ // it's still responding to events
+
+ XEvent reply = *event;
+ reply.xclient.window = _glfw.x11.root;
+
+ XSendEvent(_glfw.x11.display, _glfw.x11.root,
+ False,
+ SubstructureNotifyMask | SubstructureRedirectMask,
+ &reply);
+ }
+ }
+ else if (event->xclient.message_type == _glfw.x11.XdndEnter)
+ {
+ // A drag operation has entered the window
+ unsigned long count;
+ Atom* formats = NULL;
+ const GLFWbool list = event->xclient.data.l[1] & 1;
+
+ _glfw.x11.xdnd.source = event->xclient.data.l[0];
+ _glfw.x11.xdnd.version = event->xclient.data.l[1] >> 24;
+ _glfw.x11.xdnd.format = None;
+
+ if (_glfw.x11.xdnd.version > _GLFW_XDND_VERSION)
+ return;
+
+ if (list)
+ {
+ count = _glfwGetWindowPropertyX11(_glfw.x11.xdnd.source,
+ _glfw.x11.XdndTypeList,
+ XA_ATOM,
+ (unsigned char**) &formats);
+ }
+ else
+ {
+ count = 3;
+ formats = (Atom*) event->xclient.data.l + 2;
+ }
+
+ for (unsigned int i = 0; i < count; i++)
+ {
+ if (formats[i] == _glfw.x11.text_uri_list)
+ {
+ _glfw.x11.xdnd.format = _glfw.x11.text_uri_list;
+ break;
+ }
+ }
+
+ if (list && formats)
+ XFree(formats);
+ }
+ else if (event->xclient.message_type == _glfw.x11.XdndDrop)
+ {
+ // The drag operation has finished by dropping on the window
+ Time time = CurrentTime;
+
+ if (_glfw.x11.xdnd.version > _GLFW_XDND_VERSION)
+ return;
+
+ if (_glfw.x11.xdnd.format)
+ {
+ if (_glfw.x11.xdnd.version >= 1)
+ time = event->xclient.data.l[2];
+
+ // Request the chosen format from the source window
+ XConvertSelection(_glfw.x11.display,
+ _glfw.x11.XdndSelection,
+ _glfw.x11.xdnd.format,
+ _glfw.x11.XdndSelection,
+ window->x11.handle,
+ time);
+ }
+ else if (_glfw.x11.xdnd.version >= 2)
+ {
+ XEvent reply = { ClientMessage };
+ reply.xclient.window = _glfw.x11.xdnd.source;
+ reply.xclient.message_type = _glfw.x11.XdndFinished;
+ reply.xclient.format = 32;
+ reply.xclient.data.l[0] = window->x11.handle;
+ reply.xclient.data.l[1] = 0; // The drag was rejected
+ reply.xclient.data.l[2] = None;
+
+ XSendEvent(_glfw.x11.display, _glfw.x11.xdnd.source,
+ False, NoEventMask, &reply);
+ XFlush(_glfw.x11.display);
+ }
+ }
+ else if (event->xclient.message_type == _glfw.x11.XdndPosition)
+ {
+ // The drag operation has moved over the window
+ const int xabs = (event->xclient.data.l[2] >> 16) & 0xffff;
+ const int yabs = (event->xclient.data.l[2]) & 0xffff;
+ Window dummy;
+ int xpos, ypos;
+
+ if (_glfw.x11.xdnd.version > _GLFW_XDND_VERSION)
+ return;
+
+ XTranslateCoordinates(_glfw.x11.display,
+ _glfw.x11.root,
+ window->x11.handle,
+ xabs, yabs,
+ &xpos, &ypos,
+ &dummy);
+
+ _glfwInputCursorPos(window, xpos, ypos);
+
+ XEvent reply = { ClientMessage };
+ reply.xclient.window = _glfw.x11.xdnd.source;
+ reply.xclient.message_type = _glfw.x11.XdndStatus;
+ reply.xclient.format = 32;
+ reply.xclient.data.l[0] = window->x11.handle;
+ reply.xclient.data.l[2] = 0; // Specify an empty rectangle
+ reply.xclient.data.l[3] = 0;
+
+ if (_glfw.x11.xdnd.format)
+ {
+ // Reply that we are ready to copy the dragged data
+ reply.xclient.data.l[1] = 1; // Accept with no rectangle
+ if (_glfw.x11.xdnd.version >= 2)
+ reply.xclient.data.l[4] = _glfw.x11.XdndActionCopy;
+ }
+
+ XSendEvent(_glfw.x11.display, _glfw.x11.xdnd.source,
+ False, NoEventMask, &reply);
+ XFlush(_glfw.x11.display);
+ }
+
+ return;
+ }
+
+ case SelectionNotify:
+ {
+ if (event->xselection.property == _glfw.x11.XdndSelection)
+ {
+ // The converted data from the drag operation has arrived
+ char* data;
+ const unsigned long result =
+ _glfwGetWindowPropertyX11(event->xselection.requestor,
+ event->xselection.property,
+ event->xselection.target,
+ (unsigned char**) &data);
+
+ if (result)
+ {
+ int count;
+ char** paths = _glfwParseUriList(data, &count);
+
+ _glfwInputDrop(window, count, (const char**) paths);
+
+ for (int i = 0; i < count; i++)
+ _glfw_free(paths[i]);
+ _glfw_free(paths);
+ }
+
+ if (data)
+ XFree(data);
+
+ if (_glfw.x11.xdnd.version >= 2)
+ {
+ XEvent reply = { ClientMessage };
+ reply.xclient.window = _glfw.x11.xdnd.source;
+ reply.xclient.message_type = _glfw.x11.XdndFinished;
+ reply.xclient.format = 32;
+ reply.xclient.data.l[0] = window->x11.handle;
+ reply.xclient.data.l[1] = result;
+ reply.xclient.data.l[2] = _glfw.x11.XdndActionCopy;
+
+ XSendEvent(_glfw.x11.display, _glfw.x11.xdnd.source,
+ False, NoEventMask, &reply);
+ XFlush(_glfw.x11.display);
+ }
+ }
+
+ return;
+ }
+
+ case FocusIn:
+ {
+ if (event->xfocus.mode == NotifyGrab ||
+ event->xfocus.mode == NotifyUngrab)
+ {
+ // Ignore focus events from popup indicator windows, window menu
+ // key chords and window dragging
+ return;
+ }
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ disableCursor(window);
+
+ if (window->x11.ic)
+ XSetICFocus(window->x11.ic);
+
+ _glfwInputWindowFocus(window, GLFW_TRUE);
+ return;
+ }
+
+ case FocusOut:
+ {
+ if (event->xfocus.mode == NotifyGrab ||
+ event->xfocus.mode == NotifyUngrab)
+ {
+ // Ignore focus events from popup indicator windows, window menu
+ // key chords and window dragging
+ return;
+ }
+
+ if (window->cursorMode == GLFW_CURSOR_DISABLED)
+ enableCursor(window);
+
+ if (window->x11.ic)
+ XUnsetICFocus(window->x11.ic);
+
+ if (window->monitor && window->autoIconify)
+ _glfwIconifyWindowX11(window);
+
+ _glfwInputWindowFocus(window, GLFW_FALSE);
+ return;
+ }
+
+ case Expose:
+ {
+ _glfwInputWindowDamage(window);
+ return;
+ }
+
+ case PropertyNotify:
+ {
+ if (event->xproperty.state != PropertyNewValue)
+ return;
+
+ if (event->xproperty.atom == _glfw.x11.WM_STATE)
+ {
+ const int state = getWindowState(window);
+ if (state != IconicState && state != NormalState)
+ return;
+
+ const GLFWbool iconified = (state == IconicState);
+ if (window->x11.iconified != iconified)
+ {
+ if (window->monitor)
+ {
+ if (iconified)
+ releaseMonitor(window);
+ else
+ acquireMonitor(window);
+ }
+
+ window->x11.iconified = iconified;
+ _glfwInputWindowIconify(window, iconified);
+ }
+ }
+ else if (event->xproperty.atom == _glfw.x11.NET_WM_STATE)
+ {
+ const GLFWbool maximized = _glfwWindowMaximizedX11(window);
+ if (window->x11.maximized != maximized)
+ {
+ window->x11.maximized = maximized;
+ _glfwInputWindowMaximize(window, maximized);
+ }
+ }
+
+ return;
+ }
+
+ case DestroyNotify:
+ return;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Retrieve a single window property of the specified type
+// Inspired by fghGetWindowProperty from freeglut
+//
+unsigned long _glfwGetWindowPropertyX11(Window window,
+ Atom property,
+ Atom type,
+ unsigned char** value)
+{
+ Atom actualType;
+ int actualFormat;
+ unsigned long itemCount, bytesAfter;
+
+ XGetWindowProperty(_glfw.x11.display,
+ window,
+ property,
+ 0,
+ LONG_MAX,
+ False,
+ type,
+ &actualType,
+ &actualFormat,
+ &itemCount,
+ &bytesAfter,
+ value);
+
+ return itemCount;
+}
+
+GLFWbool _glfwIsVisualTransparentX11(Visual* visual)
+{
+ if (!_glfw.x11.xrender.available)
+ return GLFW_FALSE;
+
+ XRenderPictFormat* pf = XRenderFindVisualFormat(_glfw.x11.display, visual);
+ return pf && pf->direct.alphaMask;
+}
+
+// Push contents of our selection to clipboard manager
+//
+void _glfwPushSelectionToManagerX11(void)
+{
+ XConvertSelection(_glfw.x11.display,
+ _glfw.x11.CLIPBOARD_MANAGER,
+ _glfw.x11.SAVE_TARGETS,
+ None,
+ _glfw.x11.helperWindowHandle,
+ CurrentTime);
+
+ for (;;)
+ {
+ XEvent event;
+
+ while (XCheckIfEvent(_glfw.x11.display, &event, isSelectionEvent, NULL))
+ {
+ switch (event.type)
+ {
+ case SelectionRequest:
+ handleSelectionRequest(&event);
+ break;
+
+ case SelectionNotify:
+ {
+ if (event.xselection.target == _glfw.x11.SAVE_TARGETS)
+ {
+ // This means one of two things; either the selection
+ // was not owned, which means there is no clipboard
+ // manager, or the transfer to the clipboard manager has
+ // completed
+ // In either case, it means we are done here
+ return;
+ }
+
+ break;
+ }
+ }
+ }
+
+ waitForX11Event(NULL);
+ }
+}
+
+void _glfwCreateInputContextX11(_GLFWwindow* window)
+{
+ XIMCallback callback;
+ callback.callback = (XIMProc) inputContextDestroyCallback;
+ callback.client_data = (XPointer) window;
+
+ window->x11.ic = XCreateIC(_glfw.x11.im,
+ XNInputStyle,
+ XIMPreeditNothing | XIMStatusNothing,
+ XNClientWindow,
+ window->x11.handle,
+ XNFocusWindow,
+ window->x11.handle,
+ XNDestroyCallback,
+ &callback,
+ NULL);
+
+ if (window->x11.ic)
+ {
+ XWindowAttributes attribs;
+ XGetWindowAttributes(_glfw.x11.display, window->x11.handle, &attribs);
+
+ unsigned long filter = 0;
+ if (XGetICValues(window->x11.ic, XNFilterEvents, &filter, NULL) == NULL)
+ {
+ XSelectInput(_glfw.x11.display,
+ window->x11.handle,
+ attribs.your_event_mask | filter);
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW platform API //////
+//////////////////////////////////////////////////////////////////////////
+
+int _glfwCreateWindowX11(_GLFWwindow* window,
+ const _GLFWwndconfig* wndconfig,
+ const _GLFWctxconfig* ctxconfig,
+ const _GLFWfbconfig* fbconfig)
+{
+ Visual* visual = NULL;
+ int depth;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_NATIVE_CONTEXT_API)
+ {
+ if (!_glfwInitGLX())
+ return GLFW_FALSE;
+ if (!_glfwChooseVisualGLX(wndconfig, ctxconfig, fbconfig, &visual, &depth))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_EGL_CONTEXT_API)
+ {
+ if (!_glfwInitEGL())
+ return GLFW_FALSE;
+ if (!_glfwChooseVisualEGL(wndconfig, ctxconfig, fbconfig, &visual, &depth))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwInitOSMesa())
+ return GLFW_FALSE;
+ }
+ }
+
+ if (!visual)
+ {
+ visual = DefaultVisual(_glfw.x11.display, _glfw.x11.screen);
+ depth = DefaultDepth(_glfw.x11.display, _glfw.x11.screen);
+ }
+
+ if (!createNativeWindow(window, wndconfig, visual, depth))
+ return GLFW_FALSE;
+
+ if (ctxconfig->client != GLFW_NO_API)
+ {
+ if (ctxconfig->source == GLFW_NATIVE_CONTEXT_API)
+ {
+ if (!_glfwCreateContextGLX(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_EGL_CONTEXT_API)
+ {
+ if (!_glfwCreateContextEGL(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+ else if (ctxconfig->source == GLFW_OSMESA_CONTEXT_API)
+ {
+ if (!_glfwCreateContextOSMesa(window, ctxconfig, fbconfig))
+ return GLFW_FALSE;
+ }
+
+ if (!_glfwRefreshContextAttribs(window, ctxconfig))
+ return GLFW_FALSE;
+ }
+
+ if (wndconfig->mousePassthrough)
+ _glfwSetWindowMousePassthroughX11(window, GLFW_TRUE);
+
+ if (window->monitor)
+ {
+ _glfwShowWindowX11(window);
+ updateWindowMode(window);
+ acquireMonitor(window);
+
+ if (wndconfig->centerCursor)
+ _glfwCenterCursorInContentArea(window);
+ }
+ else
+ {
+ if (wndconfig->visible)
+ {
+ _glfwShowWindowX11(window);
+ if (wndconfig->focused)
+ _glfwFocusWindowX11(window);
+ }
+ }
+
+ XFlush(_glfw.x11.display);
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyWindowX11(_GLFWwindow* window)
+{
+ if (_glfw.x11.disabledCursorWindow == window)
+ _glfw.x11.disabledCursorWindow = NULL;
+
+ if (window->monitor)
+ releaseMonitor(window);
+
+ if (window->x11.ic)
+ {
+ XDestroyIC(window->x11.ic);
+ window->x11.ic = NULL;
+ }
+
+ if (window->context.destroy)
+ window->context.destroy(window);
+
+ if (window->x11.handle)
+ {
+ XDeleteContext(_glfw.x11.display, window->x11.handle, _glfw.x11.context);
+ XUnmapWindow(_glfw.x11.display, window->x11.handle);
+ XDestroyWindow(_glfw.x11.display, window->x11.handle);
+ window->x11.handle = (Window) 0;
+ }
+
+ if (window->x11.colormap)
+ {
+ XFreeColormap(_glfw.x11.display, window->x11.colormap);
+ window->x11.colormap = (Colormap) 0;
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowTitleX11(_GLFWwindow* window, const char* title)
+{
+ if (_glfw.x11.xlib.utf8)
+ {
+ Xutf8SetWMProperties(_glfw.x11.display,
+ window->x11.handle,
+ title, title,
+ NULL, 0,
+ NULL, NULL, NULL);
+ }
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_NAME, _glfw.x11.UTF8_STRING, 8,
+ PropModeReplace,
+ (unsigned char*) title, strlen(title));
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_ICON_NAME, _glfw.x11.UTF8_STRING, 8,
+ PropModeReplace,
+ (unsigned char*) title, strlen(title));
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowIconX11(_GLFWwindow* window, int count, const GLFWimage* images)
+{
+ if (count)
+ {
+ int longCount = 0;
+
+ for (int i = 0; i < count; i++)
+ longCount += 2 + images[i].width * images[i].height;
+
+ unsigned long* icon = _glfw_calloc(longCount, sizeof(unsigned long));
+ unsigned long* target = icon;
+
+ for (int i = 0; i < count; i++)
+ {
+ *target++ = images[i].width;
+ *target++ = images[i].height;
+
+ for (int j = 0; j < images[i].width * images[i].height; j++)
+ {
+ *target++ = (((unsigned long) images[i].pixels[j * 4 + 0]) << 16) |
+ (((unsigned long) images[i].pixels[j * 4 + 1]) << 8) |
+ (((unsigned long) images[i].pixels[j * 4 + 2]) << 0) |
+ (((unsigned long) images[i].pixels[j * 4 + 3]) << 24);
+ }
+ }
+
+ // NOTE: XChangeProperty expects 32-bit values like the image data above to be
+ // placed in the 32 least significant bits of individual longs. This is
+ // true even if long is 64-bit and a WM protocol calls for "packed" data.
+ // This is because of a historical mistake that then became part of the Xlib
+ // ABI. Xlib will pack these values into a regular array of 32-bit values
+ // before sending it over the wire.
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_ICON,
+ XA_CARDINAL, 32,
+ PropModeReplace,
+ (unsigned char*) icon,
+ longCount);
+
+ _glfw_free(icon);
+ }
+ else
+ {
+ XDeleteProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_ICON);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwGetWindowPosX11(_GLFWwindow* window, int* xpos, int* ypos)
+{
+ Window dummy;
+ int x, y;
+
+ XTranslateCoordinates(_glfw.x11.display, window->x11.handle, _glfw.x11.root,
+ 0, 0, &x, &y, &dummy);
+
+ if (xpos)
+ *xpos = x;
+ if (ypos)
+ *ypos = y;
+}
+
+void _glfwSetWindowPosX11(_GLFWwindow* window, int xpos, int ypos)
+{
+ // HACK: Explicitly setting PPosition to any value causes some WMs, notably
+ // Compiz and Metacity, to honor the position of unmapped windows
+ if (!_glfwWindowVisibleX11(window))
+ {
+ long supplied;
+ XSizeHints* hints = XAllocSizeHints();
+
+ if (XGetWMNormalHints(_glfw.x11.display, window->x11.handle, hints, &supplied))
+ {
+ hints->flags |= PPosition;
+ hints->x = hints->y = 0;
+
+ XSetWMNormalHints(_glfw.x11.display, window->x11.handle, hints);
+ }
+
+ XFree(hints);
+ }
+
+ XMoveWindow(_glfw.x11.display, window->x11.handle, xpos, ypos);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwGetWindowSizeX11(_GLFWwindow* window, int* width, int* height)
+{
+ XWindowAttributes attribs;
+ XGetWindowAttributes(_glfw.x11.display, window->x11.handle, &attribs);
+
+ if (width)
+ *width = attribs.width;
+ if (height)
+ *height = attribs.height;
+}
+
+void _glfwSetWindowSizeX11(_GLFWwindow* window, int width, int height)
+{
+ if (window->monitor)
+ {
+ if (window->monitor->window == window)
+ acquireMonitor(window);
+ }
+ else
+ {
+ if (!window->resizable)
+ updateNormalHints(window, width, height);
+
+ XResizeWindow(_glfw.x11.display, window->x11.handle, width, height);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowSizeLimitsX11(_GLFWwindow* window,
+ int minwidth, int minheight,
+ int maxwidth, int maxheight)
+{
+ int width, height;
+ _glfwGetWindowSizeX11(window, &width, &height);
+ updateNormalHints(window, width, height);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowAspectRatioX11(_GLFWwindow* window, int numer, int denom)
+{
+ int width, height;
+ _glfwGetWindowSizeX11(window, &width, &height);
+ updateNormalHints(window, width, height);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwGetFramebufferSizeX11(_GLFWwindow* window, int* width, int* height)
+{
+ _glfwGetWindowSizeX11(window, width, height);
+}
+
+void _glfwGetWindowFrameSizeX11(_GLFWwindow* window,
+ int* left, int* top,
+ int* right, int* bottom)
+{
+ long* extents = NULL;
+
+ if (window->monitor || !window->decorated)
+ return;
+
+ if (_glfw.x11.NET_FRAME_EXTENTS == None)
+ return;
+
+ if (!_glfwWindowVisibleX11(window) &&
+ _glfw.x11.NET_REQUEST_FRAME_EXTENTS)
+ {
+ XEvent event;
+ double timeout = 0.5;
+
+ // Ensure _NET_FRAME_EXTENTS is set, allowing glfwGetWindowFrameSize to
+ // function before the window is mapped
+ sendEventToWM(window, _glfw.x11.NET_REQUEST_FRAME_EXTENTS,
+ 0, 0, 0, 0, 0);
+
+ // HACK: Use a timeout because earlier versions of some window managers
+ // (at least Unity, Fluxbox and Xfwm) failed to send the reply
+ // They have been fixed but broken versions are still in the wild
+ // If you are affected by this and your window manager is NOT
+ // listed above, PLEASE report it to their and our issue trackers
+ while (!XCheckIfEvent(_glfw.x11.display,
+ &event,
+ isFrameExtentsEvent,
+ (XPointer) window))
+ {
+ if (!waitForX11Event(&timeout))
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: The window manager has a broken _NET_REQUEST_FRAME_EXTENTS implementation; please report this issue");
+ return;
+ }
+ }
+ }
+
+ if (_glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.NET_FRAME_EXTENTS,
+ XA_CARDINAL,
+ (unsigned char**) &extents) == 4)
+ {
+ if (left)
+ *left = extents[0];
+ if (top)
+ *top = extents[2];
+ if (right)
+ *right = extents[1];
+ if (bottom)
+ *bottom = extents[3];
+ }
+
+ if (extents)
+ XFree(extents);
+}
+
+void _glfwGetWindowContentScaleX11(_GLFWwindow* window, float* xscale, float* yscale)
+{
+ if (xscale)
+ *xscale = _glfw.x11.contentScaleX;
+ if (yscale)
+ *yscale = _glfw.x11.contentScaleY;
+}
+
+void _glfwIconifyWindowX11(_GLFWwindow* window)
+{
+ if (window->x11.overrideRedirect)
+ {
+ // Override-redirect windows cannot be iconified or restored, as those
+ // tasks are performed by the window manager
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Iconification of full screen windows requires a WM that supports EWMH full screen");
+ return;
+ }
+
+ XIconifyWindow(_glfw.x11.display, window->x11.handle, _glfw.x11.screen);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwRestoreWindowX11(_GLFWwindow* window)
+{
+ if (window->x11.overrideRedirect)
+ {
+ // Override-redirect windows cannot be iconified or restored, as those
+ // tasks are performed by the window manager
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Iconification of full screen windows requires a WM that supports EWMH full screen");
+ return;
+ }
+
+ if (_glfwWindowIconifiedX11(window))
+ {
+ XMapWindow(_glfw.x11.display, window->x11.handle);
+ waitForVisibilityNotify(window);
+ }
+ else if (_glfwWindowVisibleX11(window))
+ {
+ if (_glfw.x11.NET_WM_STATE &&
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT &&
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ)
+ {
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ _NET_WM_STATE_REMOVE,
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT,
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ,
+ 1, 0);
+ }
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwMaximizeWindowX11(_GLFWwindow* window)
+{
+ if (!_glfw.x11.NET_WM_STATE ||
+ !_glfw.x11.NET_WM_STATE_MAXIMIZED_VERT ||
+ !_glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ)
+ {
+ return;
+ }
+
+ if (_glfwWindowVisibleX11(window))
+ {
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ _NET_WM_STATE_ADD,
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT,
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ,
+ 1, 0);
+ }
+ else
+ {
+ Atom* states = NULL;
+ unsigned long count =
+ _glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.NET_WM_STATE,
+ XA_ATOM,
+ (unsigned char**) &states);
+
+ // NOTE: We don't check for failure as this property may not exist yet
+ // and that's fine (and we'll create it implicitly with append)
+
+ Atom missing[2] =
+ {
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT,
+ _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ
+ };
+ unsigned long missingCount = 2;
+
+ for (unsigned long i = 0; i < count; i++)
+ {
+ for (unsigned long j = 0; j < missingCount; j++)
+ {
+ if (states[i] == missing[j])
+ {
+ missing[j] = missing[missingCount - 1];
+ missingCount--;
+ }
+ }
+ }
+
+ if (states)
+ XFree(states);
+
+ if (!missingCount)
+ return;
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_STATE, XA_ATOM, 32,
+ PropModeAppend,
+ (unsigned char*) missing,
+ missingCount);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwShowWindowX11(_GLFWwindow* window)
+{
+ if (_glfwWindowVisibleX11(window))
+ return;
+
+ XMapWindow(_glfw.x11.display, window->x11.handle);
+ waitForVisibilityNotify(window);
+}
+
+void _glfwHideWindowX11(_GLFWwindow* window)
+{
+ XUnmapWindow(_glfw.x11.display, window->x11.handle);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwRequestWindowAttentionX11(_GLFWwindow* window)
+{
+ if (!_glfw.x11.NET_WM_STATE || !_glfw.x11.NET_WM_STATE_DEMANDS_ATTENTION)
+ return;
+
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ _NET_WM_STATE_ADD,
+ _glfw.x11.NET_WM_STATE_DEMANDS_ATTENTION,
+ 0, 1, 0);
+}
+
+void _glfwFocusWindowX11(_GLFWwindow* window)
+{
+ if (_glfw.x11.NET_ACTIVE_WINDOW)
+ sendEventToWM(window, _glfw.x11.NET_ACTIVE_WINDOW, 1, 0, 0, 0, 0);
+ else if (_glfwWindowVisibleX11(window))
+ {
+ XRaiseWindow(_glfw.x11.display, window->x11.handle);
+ XSetInputFocus(_glfw.x11.display, window->x11.handle,
+ RevertToParent, CurrentTime);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowMonitorX11(_GLFWwindow* window,
+ _GLFWmonitor* monitor,
+ int xpos, int ypos,
+ int width, int height,
+ int refreshRate)
+{
+ if (window->monitor == monitor)
+ {
+ if (monitor)
+ {
+ if (monitor->window == window)
+ acquireMonitor(window);
+ }
+ else
+ {
+ if (!window->resizable)
+ updateNormalHints(window, width, height);
+
+ XMoveResizeWindow(_glfw.x11.display, window->x11.handle,
+ xpos, ypos, width, height);
+ }
+
+ XFlush(_glfw.x11.display);
+ return;
+ }
+
+ if (window->monitor)
+ {
+ _glfwSetWindowDecoratedX11(window, window->decorated);
+ _glfwSetWindowFloatingX11(window, window->floating);
+ releaseMonitor(window);
+ }
+
+ _glfwInputWindowMonitor(window, monitor);
+ updateNormalHints(window, width, height);
+
+ if (window->monitor)
+ {
+ if (!_glfwWindowVisibleX11(window))
+ {
+ XMapRaised(_glfw.x11.display, window->x11.handle);
+ waitForVisibilityNotify(window);
+ }
+
+ updateWindowMode(window);
+ acquireMonitor(window);
+ }
+ else
+ {
+ updateWindowMode(window);
+ XMoveResizeWindow(_glfw.x11.display, window->x11.handle,
+ xpos, ypos, width, height);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+int _glfwWindowFocusedX11(_GLFWwindow* window)
+{
+ Window focused;
+ int state;
+
+ XGetInputFocus(_glfw.x11.display, &focused, &state);
+ return window->x11.handle == focused;
+}
+
+int _glfwWindowIconifiedX11(_GLFWwindow* window)
+{
+ return getWindowState(window) == IconicState;
+}
+
+int _glfwWindowVisibleX11(_GLFWwindow* window)
+{
+ XWindowAttributes wa;
+ XGetWindowAttributes(_glfw.x11.display, window->x11.handle, &wa);
+ return wa.map_state == IsViewable;
+}
+
+int _glfwWindowMaximizedX11(_GLFWwindow* window)
+{
+ Atom* states;
+ GLFWbool maximized = GLFW_FALSE;
+
+ if (!_glfw.x11.NET_WM_STATE ||
+ !_glfw.x11.NET_WM_STATE_MAXIMIZED_VERT ||
+ !_glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ)
+ {
+ return maximized;
+ }
+
+ const unsigned long count =
+ _glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.NET_WM_STATE,
+ XA_ATOM,
+ (unsigned char**) &states);
+
+ for (unsigned long i = 0; i < count; i++)
+ {
+ if (states[i] == _glfw.x11.NET_WM_STATE_MAXIMIZED_VERT ||
+ states[i] == _glfw.x11.NET_WM_STATE_MAXIMIZED_HORZ)
+ {
+ maximized = GLFW_TRUE;
+ break;
+ }
+ }
+
+ if (states)
+ XFree(states);
+
+ return maximized;
+}
+
+int _glfwWindowHoveredX11(_GLFWwindow* window)
+{
+ Window w = _glfw.x11.root;
+ while (w)
+ {
+ Window root;
+ int rootX, rootY, childX, childY;
+ unsigned int mask;
+
+ _glfwGrabErrorHandlerX11();
+
+ const Bool result = XQueryPointer(_glfw.x11.display, w,
+ &root, &w, &rootX, &rootY,
+ &childX, &childY, &mask);
+
+ _glfwReleaseErrorHandlerX11();
+
+ if (_glfw.x11.errorCode == BadWindow)
+ w = _glfw.x11.root;
+ else if (!result)
+ return GLFW_FALSE;
+ else if (w == window->x11.handle)
+ return GLFW_TRUE;
+ }
+
+ return GLFW_FALSE;
+}
+
+int _glfwFramebufferTransparentX11(_GLFWwindow* window)
+{
+ if (!window->x11.transparent)
+ return GLFW_FALSE;
+
+ return XGetSelectionOwner(_glfw.x11.display, _glfw.x11.NET_WM_CM_Sx) != None;
+}
+
+void _glfwSetWindowResizableX11(_GLFWwindow* window, GLFWbool enabled)
+{
+ int width, height;
+ _glfwGetWindowSizeX11(window, &width, &height);
+ updateNormalHints(window, width, height);
+}
+
+void _glfwSetWindowDecoratedX11(_GLFWwindow* window, GLFWbool enabled)
+{
+ struct
+ {
+ unsigned long flags;
+ unsigned long functions;
+ unsigned long decorations;
+ long input_mode;
+ unsigned long status;
+ } hints = {0};
+
+ hints.flags = MWM_HINTS_DECORATIONS;
+ hints.decorations = enabled ? MWM_DECOR_ALL : 0;
+
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.MOTIF_WM_HINTS,
+ _glfw.x11.MOTIF_WM_HINTS, 32,
+ PropModeReplace,
+ (unsigned char*) &hints,
+ sizeof(hints) / sizeof(long));
+}
+
+void _glfwSetWindowFloatingX11(_GLFWwindow* window, GLFWbool enabled)
+{
+ if (!_glfw.x11.NET_WM_STATE || !_glfw.x11.NET_WM_STATE_ABOVE)
+ return;
+
+ if (_glfwWindowVisibleX11(window))
+ {
+ const long action = enabled ? _NET_WM_STATE_ADD : _NET_WM_STATE_REMOVE;
+ sendEventToWM(window,
+ _glfw.x11.NET_WM_STATE,
+ action,
+ _glfw.x11.NET_WM_STATE_ABOVE,
+ 0, 1, 0);
+ }
+ else
+ {
+ Atom* states = NULL;
+ const unsigned long count =
+ _glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.NET_WM_STATE,
+ XA_ATOM,
+ (unsigned char**) &states);
+
+ // NOTE: We don't check for failure as this property may not exist yet
+ // and that's fine (and we'll create it implicitly with append)
+
+ if (enabled)
+ {
+ unsigned long i;
+
+ for (i = 0; i < count; i++)
+ {
+ if (states[i] == _glfw.x11.NET_WM_STATE_ABOVE)
+ break;
+ }
+
+ if (i == count)
+ {
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_STATE, XA_ATOM, 32,
+ PropModeAppend,
+ (unsigned char*) &_glfw.x11.NET_WM_STATE_ABOVE,
+ 1);
+ }
+ }
+ else if (states)
+ {
+ for (unsigned long i = 0; i < count; i++)
+ {
+ if (states[i] == _glfw.x11.NET_WM_STATE_ABOVE)
+ {
+ states[i] = states[count - 1];
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_STATE, XA_ATOM, 32,
+ PropModeReplace, (unsigned char*) states, count - 1);
+ break;
+ }
+ }
+ }
+
+ if (states)
+ XFree(states);
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetWindowMousePassthroughX11(_GLFWwindow* window, GLFWbool enabled)
+{
+ if (!_glfw.x11.xshape.available)
+ return;
+
+ if (enabled)
+ {
+ Region region = XCreateRegion();
+ XShapeCombineRegion(_glfw.x11.display, window->x11.handle,
+ ShapeInput, 0, 0, region, ShapeSet);
+ XDestroyRegion(region);
+ }
+ else
+ {
+ XShapeCombineMask(_glfw.x11.display, window->x11.handle,
+ ShapeInput, 0, 0, None, ShapeSet);
+ }
+}
+
+float _glfwGetWindowOpacityX11(_GLFWwindow* window)
+{
+ float opacity = 1.f;
+
+ if (XGetSelectionOwner(_glfw.x11.display, _glfw.x11.NET_WM_CM_Sx))
+ {
+ CARD32* value = NULL;
+
+ if (_glfwGetWindowPropertyX11(window->x11.handle,
+ _glfw.x11.NET_WM_WINDOW_OPACITY,
+ XA_CARDINAL,
+ (unsigned char**) &value))
+ {
+ opacity = (float) (*value / (double) 0xffffffffu);
+ }
+
+ if (value)
+ XFree(value);
+ }
+
+ return opacity;
+}
+
+void _glfwSetWindowOpacityX11(_GLFWwindow* window, float opacity)
+{
+ const CARD32 value = (CARD32) (0xffffffffu * (double) opacity);
+ XChangeProperty(_glfw.x11.display, window->x11.handle,
+ _glfw.x11.NET_WM_WINDOW_OPACITY, XA_CARDINAL, 32,
+ PropModeReplace, (unsigned char*) &value, 1);
+}
+
+void _glfwSetRawMouseMotionX11(_GLFWwindow *window, GLFWbool enabled)
+{
+ if (!_glfw.x11.xi.available)
+ return;
+
+ if (_glfw.x11.disabledCursorWindow != window)
+ return;
+
+ if (enabled)
+ enableRawMouseMotion(window);
+ else
+ disableRawMouseMotion(window);
+}
+
+GLFWbool _glfwRawMouseMotionSupportedX11(void)
+{
+ return _glfw.x11.xi.available;
+}
+
+void _glfwPollEventsX11(void)
+{
+ drainEmptyEvents();
+
+#if defined(__linux__)
+ if (_glfw.joysticksInitialized)
+ _glfwDetectJoystickConnectionLinux();
+#endif
+ XPending(_glfw.x11.display);
+
+ while (QLength(_glfw.x11.display))
+ {
+ XEvent event;
+ XNextEvent(_glfw.x11.display, &event);
+ processEvent(&event);
+ }
+
+ _GLFWwindow* window = _glfw.x11.disabledCursorWindow;
+ if (window)
+ {
+ int width, height;
+ _glfwGetWindowSizeX11(window, &width, &height);
+
+ // NOTE: Re-center the cursor only if it has moved since the last call,
+ // to avoid breaking glfwWaitEvents with MotionNotify
+ if (window->x11.lastCursorPosX != width / 2 ||
+ window->x11.lastCursorPosY != height / 2)
+ {
+ _glfwSetCursorPosX11(window, width / 2, height / 2);
+ }
+ }
+
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwWaitEventsX11(void)
+{
+ waitForAnyEvent(NULL);
+ _glfwPollEventsX11();
+}
+
+void _glfwWaitEventsTimeoutX11(double timeout)
+{
+ waitForAnyEvent(&timeout);
+ _glfwPollEventsX11();
+}
+
+void _glfwPostEmptyEventX11(void)
+{
+ writeEmptyEvent();
+}
+
+void _glfwGetCursorPosX11(_GLFWwindow* window, double* xpos, double* ypos)
+{
+ Window root, child;
+ int rootX, rootY, childX, childY;
+ unsigned int mask;
+
+ XQueryPointer(_glfw.x11.display, window->x11.handle,
+ &root, &child,
+ &rootX, &rootY, &childX, &childY,
+ &mask);
+
+ if (xpos)
+ *xpos = childX;
+ if (ypos)
+ *ypos = childY;
+}
+
+void _glfwSetCursorPosX11(_GLFWwindow* window, double x, double y)
+{
+ // Store the new position so it can be recognized later
+ window->x11.warpCursorPosX = (int) x;
+ window->x11.warpCursorPosY = (int) y;
+
+ XWarpPointer(_glfw.x11.display, None, window->x11.handle,
+ 0,0,0,0, (int) x, (int) y);
+ XFlush(_glfw.x11.display);
+}
+
+void _glfwSetCursorModeX11(_GLFWwindow* window, int mode)
+{
+ if (mode == GLFW_CURSOR_DISABLED)
+ {
+ if (_glfwWindowFocusedX11(window))
+ disableCursor(window);
+ }
+ else if (_glfw.x11.disabledCursorWindow == window)
+ enableCursor(window);
+ else
+ updateCursorImage(window);
+
+ XFlush(_glfw.x11.display);
+}
+
+const char* _glfwGetScancodeNameX11(int scancode)
+{
+ if (!_glfw.x11.xkb.available)
+ return NULL;
+
+ if (scancode < 0 || scancode > 0xff ||
+ _glfw.x11.keycodes[scancode] == GLFW_KEY_UNKNOWN)
+ {
+ _glfwInputError(GLFW_INVALID_VALUE, "Invalid scancode %i", scancode);
+ return NULL;
+ }
+
+ const int key = _glfw.x11.keycodes[scancode];
+ const KeySym keysym = XkbKeycodeToKeysym(_glfw.x11.display,
+ scancode, _glfw.x11.xkb.group, 0);
+ if (keysym == NoSymbol)
+ return NULL;
+
+ const uint32_t codepoint = _glfwKeySym2Unicode(keysym);
+ if (codepoint == GLFW_INVALID_CODEPOINT)
+ return NULL;
+
+ const size_t count = _glfwEncodeUTF8(_glfw.x11.keynames[key], codepoint);
+ if (count == 0)
+ return NULL;
+
+ _glfw.x11.keynames[key][count] = '\0';
+ return _glfw.x11.keynames[key];
+}
+
+int _glfwGetKeyScancodeX11(int key)
+{
+ return _glfw.x11.scancodes[key];
+}
+
+int _glfwCreateCursorX11(_GLFWcursor* cursor,
+ const GLFWimage* image,
+ int xhot, int yhot)
+{
+ cursor->x11.handle = _glfwCreateNativeCursorX11(image, xhot, yhot);
+ if (!cursor->x11.handle)
+ return GLFW_FALSE;
+
+ return GLFW_TRUE;
+}
+
+int _glfwCreateStandardCursorX11(_GLFWcursor* cursor, int shape)
+{
+ if (_glfw.x11.xcursor.handle)
+ {
+ char* theme = XcursorGetTheme(_glfw.x11.display);
+ if (theme)
+ {
+ const int size = XcursorGetDefaultSize(_glfw.x11.display);
+ const char* name = NULL;
+
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ name = "default";
+ break;
+ case GLFW_IBEAM_CURSOR:
+ name = "text";
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ name = "crosshair";
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ name = "pointer";
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ name = "ew-resize";
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ name = "ns-resize";
+ break;
+ case GLFW_RESIZE_NWSE_CURSOR:
+ name = "nwse-resize";
+ break;
+ case GLFW_RESIZE_NESW_CURSOR:
+ name = "nesw-resize";
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ name = "all-scroll";
+ break;
+ case GLFW_NOT_ALLOWED_CURSOR:
+ name = "not-allowed";
+ break;
+ }
+
+ XcursorImage* image = XcursorLibraryLoadImage(name, theme, size);
+ if (image)
+ {
+ cursor->x11.handle = XcursorImageLoadCursor(_glfw.x11.display, image);
+ XcursorImageDestroy(image);
+ }
+ }
+ }
+
+ if (!cursor->x11.handle)
+ {
+ unsigned int native = 0;
+
+ switch (shape)
+ {
+ case GLFW_ARROW_CURSOR:
+ native = XC_left_ptr;
+ break;
+ case GLFW_IBEAM_CURSOR:
+ native = XC_xterm;
+ break;
+ case GLFW_CROSSHAIR_CURSOR:
+ native = XC_crosshair;
+ break;
+ case GLFW_POINTING_HAND_CURSOR:
+ native = XC_hand2;
+ break;
+ case GLFW_RESIZE_EW_CURSOR:
+ native = XC_sb_h_double_arrow;
+ break;
+ case GLFW_RESIZE_NS_CURSOR:
+ native = XC_sb_v_double_arrow;
+ break;
+ case GLFW_RESIZE_ALL_CURSOR:
+ native = XC_fleur;
+ break;
+ default:
+ _glfwInputError(GLFW_CURSOR_UNAVAILABLE,
+ "X11: Standard cursor shape unavailable");
+ return GLFW_FALSE;
+ }
+
+ cursor->x11.handle = XCreateFontCursor(_glfw.x11.display, native);
+ if (!cursor->x11.handle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to create standard cursor");
+ return GLFW_FALSE;
+ }
+ }
+
+ return GLFW_TRUE;
+}
+
+void _glfwDestroyCursorX11(_GLFWcursor* cursor)
+{
+ if (cursor->x11.handle)
+ XFreeCursor(_glfw.x11.display, cursor->x11.handle);
+}
+
+void _glfwSetCursorX11(_GLFWwindow* window, _GLFWcursor* cursor)
+{
+ if (window->cursorMode == GLFW_CURSOR_NORMAL)
+ {
+ updateCursorImage(window);
+ XFlush(_glfw.x11.display);
+ }
+}
+
+void _glfwSetClipboardStringX11(const char* string)
+{
+ char* copy = _glfw_strdup(string);
+ _glfw_free(_glfw.x11.clipboardString);
+ _glfw.x11.clipboardString = copy;
+
+ XSetSelectionOwner(_glfw.x11.display,
+ _glfw.x11.CLIPBOARD,
+ _glfw.x11.helperWindowHandle,
+ CurrentTime);
+
+ if (XGetSelectionOwner(_glfw.x11.display, _glfw.x11.CLIPBOARD) !=
+ _glfw.x11.helperWindowHandle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to become owner of clipboard selection");
+ }
+}
+
+const char* _glfwGetClipboardStringX11(void)
+{
+ return getSelectionString(_glfw.x11.CLIPBOARD);
+}
+
+EGLenum _glfwGetEGLPlatformX11(EGLint** attribs)
+{
+ if (_glfw.egl.ANGLE_platform_angle)
+ {
+ int type = 0;
+
+ if (_glfw.egl.ANGLE_platform_angle_opengl)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_OPENGL)
+ type = EGL_PLATFORM_ANGLE_TYPE_OPENGL_ANGLE;
+ }
+
+ if (_glfw.egl.ANGLE_platform_angle_vulkan)
+ {
+ if (_glfw.hints.init.angleType == GLFW_ANGLE_PLATFORM_TYPE_VULKAN)
+ type = EGL_PLATFORM_ANGLE_TYPE_VULKAN_ANGLE;
+ }
+
+ if (type)
+ {
+ *attribs = _glfw_calloc(5, sizeof(EGLint));
+ (*attribs)[0] = EGL_PLATFORM_ANGLE_TYPE_ANGLE;
+ (*attribs)[1] = type;
+ (*attribs)[2] = EGL_PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE;
+ (*attribs)[3] = EGL_PLATFORM_X11_EXT;
+ (*attribs)[4] = EGL_NONE;
+ return EGL_PLATFORM_ANGLE_ANGLE;
+ }
+ }
+
+ if (_glfw.egl.EXT_platform_base && _glfw.egl.EXT_platform_x11)
+ return EGL_PLATFORM_X11_EXT;
+
+ return 0;
+}
+
+EGLNativeDisplayType _glfwGetEGLNativeDisplayX11(void)
+{
+ return _glfw.x11.display;
+}
+
+EGLNativeWindowType _glfwGetEGLNativeWindowX11(_GLFWwindow* window)
+{
+ if (_glfw.egl.platform)
+ return &window->x11.handle;
+ else
+ return (EGLNativeWindowType) window->x11.handle;
+}
+
+void _glfwGetRequiredInstanceExtensionsX11(char** extensions)
+{
+ if (!_glfw.vk.KHR_surface)
+ return;
+
+ if (!_glfw.vk.KHR_xcb_surface || !_glfw.x11.x11xcb.handle)
+ {
+ if (!_glfw.vk.KHR_xlib_surface)
+ return;
+ }
+
+ extensions[0] = "VK_KHR_surface";
+
+ // NOTE: VK_KHR_xcb_surface is preferred due to some early ICDs exposing but
+ // not correctly implementing VK_KHR_xlib_surface
+ if (_glfw.vk.KHR_xcb_surface && _glfw.x11.x11xcb.handle)
+ extensions[1] = "VK_KHR_xcb_surface";
+ else
+ extensions[1] = "VK_KHR_xlib_surface";
+}
+
+int _glfwGetPhysicalDevicePresentationSupportX11(VkInstance instance,
+ VkPhysicalDevice device,
+ uint32_t queuefamily)
+{
+ VisualID visualID = XVisualIDFromVisual(DefaultVisual(_glfw.x11.display,
+ _glfw.x11.screen));
+
+ if (_glfw.vk.KHR_xcb_surface && _glfw.x11.x11xcb.handle)
+ {
+ PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
+ vkGetPhysicalDeviceXcbPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)
+ vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
+ if (!vkGetPhysicalDeviceXcbPresentationSupportKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "X11: Vulkan instance missing VK_KHR_xcb_surface extension");
+ return GLFW_FALSE;
+ }
+
+ xcb_connection_t* connection = XGetXCBConnection(_glfw.x11.display);
+ if (!connection)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to retrieve XCB connection");
+ return GLFW_FALSE;
+ }
+
+ return vkGetPhysicalDeviceXcbPresentationSupportKHR(device,
+ queuefamily,
+ connection,
+ visualID);
+ }
+ else
+ {
+ PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
+ vkGetPhysicalDeviceXlibPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)
+ vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
+ if (!vkGetPhysicalDeviceXlibPresentationSupportKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "X11: Vulkan instance missing VK_KHR_xlib_surface extension");
+ return GLFW_FALSE;
+ }
+
+ return vkGetPhysicalDeviceXlibPresentationSupportKHR(device,
+ queuefamily,
+ _glfw.x11.display,
+ visualID);
+ }
+}
+
+VkResult _glfwCreateWindowSurfaceX11(VkInstance instance,
+ _GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface)
+{
+ if (_glfw.vk.KHR_xcb_surface && _glfw.x11.x11xcb.handle)
+ {
+ VkResult err;
+ VkXcbSurfaceCreateInfoKHR sci;
+ PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;
+
+ xcb_connection_t* connection = XGetXCBConnection(_glfw.x11.display);
+ if (!connection)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to retrieve XCB connection");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ vkCreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)
+ vkGetInstanceProcAddr(instance, "vkCreateXcbSurfaceKHR");
+ if (!vkCreateXcbSurfaceKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "X11: Vulkan instance missing VK_KHR_xcb_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
+ sci.connection = connection;
+ sci.window = window->x11.handle;
+
+ err = vkCreateXcbSurfaceKHR(instance, &sci, allocator, surface);
+ if (err)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to create Vulkan XCB surface: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ return err;
+ }
+ else
+ {
+ VkResult err;
+ VkXlibSurfaceCreateInfoKHR sci;
+ PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;
+
+ vkCreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)
+ vkGetInstanceProcAddr(instance, "vkCreateXlibSurfaceKHR");
+ if (!vkCreateXlibSurfaceKHR)
+ {
+ _glfwInputError(GLFW_API_UNAVAILABLE,
+ "X11: Vulkan instance missing VK_KHR_xlib_surface extension");
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+
+ memset(&sci, 0, sizeof(sci));
+ sci.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+ sci.dpy = _glfw.x11.display;
+ sci.window = window->x11.handle;
+
+ err = vkCreateXlibSurfaceKHR(instance, &sci, allocator, surface);
+ if (err)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to create Vulkan X11 surface: %s",
+ _glfwGetVulkanResultString(err));
+ }
+
+ return err;
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW native API //////
+//////////////////////////////////////////////////////////////////////////
+
+GLFWAPI Display* glfwGetX11Display(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "X11: Platform not initialized");
+ return NULL;
+ }
+
+ return _glfw.x11.display;
+}
+
+GLFWAPI Window glfwGetX11Window(GLFWwindow* handle)
+{
+ _GLFWwindow* window = (_GLFWwindow*) handle;
+ _GLFW_REQUIRE_INIT_OR_RETURN(None);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "X11: Platform not initialized");
+ return None;
+ }
+
+ return window->x11.handle;
+}
+
+GLFWAPI void glfwSetX11SelectionString(const char* string)
+{
+ _GLFW_REQUIRE_INIT();
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "X11: Platform not initialized");
+ return;
+ }
+
+ _glfw_free(_glfw.x11.primarySelectionString);
+ _glfw.x11.primarySelectionString = _glfw_strdup(string);
+
+ XSetSelectionOwner(_glfw.x11.display,
+ _glfw.x11.PRIMARY,
+ _glfw.x11.helperWindowHandle,
+ CurrentTime);
+
+ if (XGetSelectionOwner(_glfw.x11.display, _glfw.x11.PRIMARY) !=
+ _glfw.x11.helperWindowHandle)
+ {
+ _glfwInputError(GLFW_PLATFORM_ERROR,
+ "X11: Failed to become owner of primary selection");
+ }
+}
+
+GLFWAPI const char* glfwGetX11SelectionString(void)
+{
+ _GLFW_REQUIRE_INIT_OR_RETURN(NULL);
+
+ if (_glfw.platform.platformID != GLFW_PLATFORM_X11)
+ {
+ _glfwInputError(GLFW_PLATFORM_UNAVAILABLE, "X11: Platform not initialized");
+ return NULL;
+ }
+
+ return getSelectionString(_glfw.x11.PRIMARY);
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.c b/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.c
new file mode 100644
index 00000000000..1b2482cdf38
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.c
@@ -0,0 +1,942 @@
+//========================================================================
+// GLFW 3.4 X11 - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2002-2006 Marcus Geelnard
+// Copyright (c) 2006-2017 Camilla Löwy <elmindreda@glfw.org>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+// It is fine to use C99 in this file because it will not be built with VS
+//========================================================================
+
+#include "internal.h"
+
+
+/*
+ * Marcus: This code was originally written by Markus G. Kuhn.
+ * I have made some slight changes (trimmed it down a bit from >60 KB to
+ * 20 KB), but the functionality is the same.
+ */
+
+/*
+ * This module converts keysym values into the corresponding ISO 10646
+ * (UCS, Unicode) values.
+ *
+ * The array keysymtab[] contains pairs of X11 keysym values for graphical
+ * characters and the corresponding Unicode value. The function
+ * _glfwKeySym2Unicode() maps a keysym onto a Unicode value using a binary
+ * search, therefore keysymtab[] must remain SORTED by keysym value.
+ *
+ * We allow to represent any UCS character in the range U-00000000 to
+ * U-00FFFFFF by a keysym value in the range 0x01000000 to 0x01ffffff.
+ * This admittedly does not cover the entire 31-bit space of UCS, but
+ * it does cover all of the characters up to U-10FFFF, which can be
+ * represented by UTF-16, and more, and it is very unlikely that higher
+ * UCS codes will ever be assigned by ISO. So to get Unicode character
+ * U+ABCD you can directly use keysym 0x0100abcd.
+ *
+ * Original author: Markus G. Kuhn <mkuhn@acm.org>, University of
+ * Cambridge, April 2001
+ *
+ * Special thanks to Richard Verhoeven <river@win.tue.nl> for preparing
+ * an initial draft of the mapping table.
+ *
+ */
+
+
+//************************************************************************
+//**** KeySym to Unicode mapping table ****
+//************************************************************************
+
+static const struct codepair {
+ unsigned short keysym;
+ unsigned short ucs;
+} keysymtab[] = {
+ { 0x01a1, 0x0104 },
+ { 0x01a2, 0x02d8 },
+ { 0x01a3, 0x0141 },
+ { 0x01a5, 0x013d },
+ { 0x01a6, 0x015a },
+ { 0x01a9, 0x0160 },
+ { 0x01aa, 0x015e },
+ { 0x01ab, 0x0164 },
+ { 0x01ac, 0x0179 },
+ { 0x01ae, 0x017d },
+ { 0x01af, 0x017b },
+ { 0x01b1, 0x0105 },
+ { 0x01b2, 0x02db },
+ { 0x01b3, 0x0142 },
+ { 0x01b5, 0x013e },
+ { 0x01b6, 0x015b },
+ { 0x01b7, 0x02c7 },
+ { 0x01b9, 0x0161 },
+ { 0x01ba, 0x015f },
+ { 0x01bb, 0x0165 },
+ { 0x01bc, 0x017a },
+ { 0x01bd, 0x02dd },
+ { 0x01be, 0x017e },
+ { 0x01bf, 0x017c },
+ { 0x01c0, 0x0154 },
+ { 0x01c3, 0x0102 },
+ { 0x01c5, 0x0139 },
+ { 0x01c6, 0x0106 },
+ { 0x01c8, 0x010c },
+ { 0x01ca, 0x0118 },
+ { 0x01cc, 0x011a },
+ { 0x01cf, 0x010e },
+ { 0x01d0, 0x0110 },
+ { 0x01d1, 0x0143 },
+ { 0x01d2, 0x0147 },
+ { 0x01d5, 0x0150 },
+ { 0x01d8, 0x0158 },
+ { 0x01d9, 0x016e },
+ { 0x01db, 0x0170 },
+ { 0x01de, 0x0162 },
+ { 0x01e0, 0x0155 },
+ { 0x01e3, 0x0103 },
+ { 0x01e5, 0x013a },
+ { 0x01e6, 0x0107 },
+ { 0x01e8, 0x010d },
+ { 0x01ea, 0x0119 },
+ { 0x01ec, 0x011b },
+ { 0x01ef, 0x010f },
+ { 0x01f0, 0x0111 },
+ { 0x01f1, 0x0144 },
+ { 0x01f2, 0x0148 },
+ { 0x01f5, 0x0151 },
+ { 0x01f8, 0x0159 },
+ { 0x01f9, 0x016f },
+ { 0x01fb, 0x0171 },
+ { 0x01fe, 0x0163 },
+ { 0x01ff, 0x02d9 },
+ { 0x02a1, 0x0126 },
+ { 0x02a6, 0x0124 },
+ { 0x02a9, 0x0130 },
+ { 0x02ab, 0x011e },
+ { 0x02ac, 0x0134 },
+ { 0x02b1, 0x0127 },
+ { 0x02b6, 0x0125 },
+ { 0x02b9, 0x0131 },
+ { 0x02bb, 0x011f },
+ { 0x02bc, 0x0135 },
+ { 0x02c5, 0x010a },
+ { 0x02c6, 0x0108 },
+ { 0x02d5, 0x0120 },
+ { 0x02d8, 0x011c },
+ { 0x02dd, 0x016c },
+ { 0x02de, 0x015c },
+ { 0x02e5, 0x010b },
+ { 0x02e6, 0x0109 },
+ { 0x02f5, 0x0121 },
+ { 0x02f8, 0x011d },
+ { 0x02fd, 0x016d },
+ { 0x02fe, 0x015d },
+ { 0x03a2, 0x0138 },
+ { 0x03a3, 0x0156 },
+ { 0x03a5, 0x0128 },
+ { 0x03a6, 0x013b },
+ { 0x03aa, 0x0112 },
+ { 0x03ab, 0x0122 },
+ { 0x03ac, 0x0166 },
+ { 0x03b3, 0x0157 },
+ { 0x03b5, 0x0129 },
+ { 0x03b6, 0x013c },
+ { 0x03ba, 0x0113 },
+ { 0x03bb, 0x0123 },
+ { 0x03bc, 0x0167 },
+ { 0x03bd, 0x014a },
+ { 0x03bf, 0x014b },
+ { 0x03c0, 0x0100 },
+ { 0x03c7, 0x012e },
+ { 0x03cc, 0x0116 },
+ { 0x03cf, 0x012a },
+ { 0x03d1, 0x0145 },
+ { 0x03d2, 0x014c },
+ { 0x03d3, 0x0136 },
+ { 0x03d9, 0x0172 },
+ { 0x03dd, 0x0168 },
+ { 0x03de, 0x016a },
+ { 0x03e0, 0x0101 },
+ { 0x03e7, 0x012f },
+ { 0x03ec, 0x0117 },
+ { 0x03ef, 0x012b },
+ { 0x03f1, 0x0146 },
+ { 0x03f2, 0x014d },
+ { 0x03f3, 0x0137 },
+ { 0x03f9, 0x0173 },
+ { 0x03fd, 0x0169 },
+ { 0x03fe, 0x016b },
+ { 0x047e, 0x203e },
+ { 0x04a1, 0x3002 },
+ { 0x04a2, 0x300c },
+ { 0x04a3, 0x300d },
+ { 0x04a4, 0x3001 },
+ { 0x04a5, 0x30fb },
+ { 0x04a6, 0x30f2 },
+ { 0x04a7, 0x30a1 },
+ { 0x04a8, 0x30a3 },
+ { 0x04a9, 0x30a5 },
+ { 0x04aa, 0x30a7 },
+ { 0x04ab, 0x30a9 },
+ { 0x04ac, 0x30e3 },
+ { 0x04ad, 0x30e5 },
+ { 0x04ae, 0x30e7 },
+ { 0x04af, 0x30c3 },
+ { 0x04b0, 0x30fc },
+ { 0x04b1, 0x30a2 },
+ { 0x04b2, 0x30a4 },
+ { 0x04b3, 0x30a6 },
+ { 0x04b4, 0x30a8 },
+ { 0x04b5, 0x30aa },
+ { 0x04b6, 0x30ab },
+ { 0x04b7, 0x30ad },
+ { 0x04b8, 0x30af },
+ { 0x04b9, 0x30b1 },
+ { 0x04ba, 0x30b3 },
+ { 0x04bb, 0x30b5 },
+ { 0x04bc, 0x30b7 },
+ { 0x04bd, 0x30b9 },
+ { 0x04be, 0x30bb },
+ { 0x04bf, 0x30bd },
+ { 0x04c0, 0x30bf },
+ { 0x04c1, 0x30c1 },
+ { 0x04c2, 0x30c4 },
+ { 0x04c3, 0x30c6 },
+ { 0x04c4, 0x30c8 },
+ { 0x04c5, 0x30ca },
+ { 0x04c6, 0x30cb },
+ { 0x04c7, 0x30cc },
+ { 0x04c8, 0x30cd },
+ { 0x04c9, 0x30ce },
+ { 0x04ca, 0x30cf },
+ { 0x04cb, 0x30d2 },
+ { 0x04cc, 0x30d5 },
+ { 0x04cd, 0x30d8 },
+ { 0x04ce, 0x30db },
+ { 0x04cf, 0x30de },
+ { 0x04d0, 0x30df },
+ { 0x04d1, 0x30e0 },
+ { 0x04d2, 0x30e1 },
+ { 0x04d3, 0x30e2 },
+ { 0x04d4, 0x30e4 },
+ { 0x04d5, 0x30e6 },
+ { 0x04d6, 0x30e8 },
+ { 0x04d7, 0x30e9 },
+ { 0x04d8, 0x30ea },
+ { 0x04d9, 0x30eb },
+ { 0x04da, 0x30ec },
+ { 0x04db, 0x30ed },
+ { 0x04dc, 0x30ef },
+ { 0x04dd, 0x30f3 },
+ { 0x04de, 0x309b },
+ { 0x04df, 0x309c },
+ { 0x05ac, 0x060c },
+ { 0x05bb, 0x061b },
+ { 0x05bf, 0x061f },
+ { 0x05c1, 0x0621 },
+ { 0x05c2, 0x0622 },
+ { 0x05c3, 0x0623 },
+ { 0x05c4, 0x0624 },
+ { 0x05c5, 0x0625 },
+ { 0x05c6, 0x0626 },
+ { 0x05c7, 0x0627 },
+ { 0x05c8, 0x0628 },
+ { 0x05c9, 0x0629 },
+ { 0x05ca, 0x062a },
+ { 0x05cb, 0x062b },
+ { 0x05cc, 0x062c },
+ { 0x05cd, 0x062d },
+ { 0x05ce, 0x062e },
+ { 0x05cf, 0x062f },
+ { 0x05d0, 0x0630 },
+ { 0x05d1, 0x0631 },
+ { 0x05d2, 0x0632 },
+ { 0x05d3, 0x0633 },
+ { 0x05d4, 0x0634 },
+ { 0x05d5, 0x0635 },
+ { 0x05d6, 0x0636 },
+ { 0x05d7, 0x0637 },
+ { 0x05d8, 0x0638 },
+ { 0x05d9, 0x0639 },
+ { 0x05da, 0x063a },
+ { 0x05e0, 0x0640 },
+ { 0x05e1, 0x0641 },
+ { 0x05e2, 0x0642 },
+ { 0x05e3, 0x0643 },
+ { 0x05e4, 0x0644 },
+ { 0x05e5, 0x0645 },
+ { 0x05e6, 0x0646 },
+ { 0x05e7, 0x0647 },
+ { 0x05e8, 0x0648 },
+ { 0x05e9, 0x0649 },
+ { 0x05ea, 0x064a },
+ { 0x05eb, 0x064b },
+ { 0x05ec, 0x064c },
+ { 0x05ed, 0x064d },
+ { 0x05ee, 0x064e },
+ { 0x05ef, 0x064f },
+ { 0x05f0, 0x0650 },
+ { 0x05f1, 0x0651 },
+ { 0x05f2, 0x0652 },
+ { 0x06a1, 0x0452 },
+ { 0x06a2, 0x0453 },
+ { 0x06a3, 0x0451 },
+ { 0x06a4, 0x0454 },
+ { 0x06a5, 0x0455 },
+ { 0x06a6, 0x0456 },
+ { 0x06a7, 0x0457 },
+ { 0x06a8, 0x0458 },
+ { 0x06a9, 0x0459 },
+ { 0x06aa, 0x045a },
+ { 0x06ab, 0x045b },
+ { 0x06ac, 0x045c },
+ { 0x06ae, 0x045e },
+ { 0x06af, 0x045f },
+ { 0x06b0, 0x2116 },
+ { 0x06b1, 0x0402 },
+ { 0x06b2, 0x0403 },
+ { 0x06b3, 0x0401 },
+ { 0x06b4, 0x0404 },
+ { 0x06b5, 0x0405 },
+ { 0x06b6, 0x0406 },
+ { 0x06b7, 0x0407 },
+ { 0x06b8, 0x0408 },
+ { 0x06b9, 0x0409 },
+ { 0x06ba, 0x040a },
+ { 0x06bb, 0x040b },
+ { 0x06bc, 0x040c },
+ { 0x06be, 0x040e },
+ { 0x06bf, 0x040f },
+ { 0x06c0, 0x044e },
+ { 0x06c1, 0x0430 },
+ { 0x06c2, 0x0431 },
+ { 0x06c3, 0x0446 },
+ { 0x06c4, 0x0434 },
+ { 0x06c5, 0x0435 },
+ { 0x06c6, 0x0444 },
+ { 0x06c7, 0x0433 },
+ { 0x06c8, 0x0445 },
+ { 0x06c9, 0x0438 },
+ { 0x06ca, 0x0439 },
+ { 0x06cb, 0x043a },
+ { 0x06cc, 0x043b },
+ { 0x06cd, 0x043c },
+ { 0x06ce, 0x043d },
+ { 0x06cf, 0x043e },
+ { 0x06d0, 0x043f },
+ { 0x06d1, 0x044f },
+ { 0x06d2, 0x0440 },
+ { 0x06d3, 0x0441 },
+ { 0x06d4, 0x0442 },
+ { 0x06d5, 0x0443 },
+ { 0x06d6, 0x0436 },
+ { 0x06d7, 0x0432 },
+ { 0x06d8, 0x044c },
+ { 0x06d9, 0x044b },
+ { 0x06da, 0x0437 },
+ { 0x06db, 0x0448 },
+ { 0x06dc, 0x044d },
+ { 0x06dd, 0x0449 },
+ { 0x06de, 0x0447 },
+ { 0x06df, 0x044a },
+ { 0x06e0, 0x042e },
+ { 0x06e1, 0x0410 },
+ { 0x06e2, 0x0411 },
+ { 0x06e3, 0x0426 },
+ { 0x06e4, 0x0414 },
+ { 0x06e5, 0x0415 },
+ { 0x06e6, 0x0424 },
+ { 0x06e7, 0x0413 },
+ { 0x06e8, 0x0425 },
+ { 0x06e9, 0x0418 },
+ { 0x06ea, 0x0419 },
+ { 0x06eb, 0x041a },
+ { 0x06ec, 0x041b },
+ { 0x06ed, 0x041c },
+ { 0x06ee, 0x041d },
+ { 0x06ef, 0x041e },
+ { 0x06f0, 0x041f },
+ { 0x06f1, 0x042f },
+ { 0x06f2, 0x0420 },
+ { 0x06f3, 0x0421 },
+ { 0x06f4, 0x0422 },
+ { 0x06f5, 0x0423 },
+ { 0x06f6, 0x0416 },
+ { 0x06f7, 0x0412 },
+ { 0x06f8, 0x042c },
+ { 0x06f9, 0x042b },
+ { 0x06fa, 0x0417 },
+ { 0x06fb, 0x0428 },
+ { 0x06fc, 0x042d },
+ { 0x06fd, 0x0429 },
+ { 0x06fe, 0x0427 },
+ { 0x06ff, 0x042a },
+ { 0x07a1, 0x0386 },
+ { 0x07a2, 0x0388 },
+ { 0x07a3, 0x0389 },
+ { 0x07a4, 0x038a },
+ { 0x07a5, 0x03aa },
+ { 0x07a7, 0x038c },
+ { 0x07a8, 0x038e },
+ { 0x07a9, 0x03ab },
+ { 0x07ab, 0x038f },
+ { 0x07ae, 0x0385 },
+ { 0x07af, 0x2015 },
+ { 0x07b1, 0x03ac },
+ { 0x07b2, 0x03ad },
+ { 0x07b3, 0x03ae },
+ { 0x07b4, 0x03af },
+ { 0x07b5, 0x03ca },
+ { 0x07b6, 0x0390 },
+ { 0x07b7, 0x03cc },
+ { 0x07b8, 0x03cd },
+ { 0x07b9, 0x03cb },
+ { 0x07ba, 0x03b0 },
+ { 0x07bb, 0x03ce },
+ { 0x07c1, 0x0391 },
+ { 0x07c2, 0x0392 },
+ { 0x07c3, 0x0393 },
+ { 0x07c4, 0x0394 },
+ { 0x07c5, 0x0395 },
+ { 0x07c6, 0x0396 },
+ { 0x07c7, 0x0397 },
+ { 0x07c8, 0x0398 },
+ { 0x07c9, 0x0399 },
+ { 0x07ca, 0x039a },
+ { 0x07cb, 0x039b },
+ { 0x07cc, 0x039c },
+ { 0x07cd, 0x039d },
+ { 0x07ce, 0x039e },
+ { 0x07cf, 0x039f },
+ { 0x07d0, 0x03a0 },
+ { 0x07d1, 0x03a1 },
+ { 0x07d2, 0x03a3 },
+ { 0x07d4, 0x03a4 },
+ { 0x07d5, 0x03a5 },
+ { 0x07d6, 0x03a6 },
+ { 0x07d7, 0x03a7 },
+ { 0x07d8, 0x03a8 },
+ { 0x07d9, 0x03a9 },
+ { 0x07e1, 0x03b1 },
+ { 0x07e2, 0x03b2 },
+ { 0x07e3, 0x03b3 },
+ { 0x07e4, 0x03b4 },
+ { 0x07e5, 0x03b5 },
+ { 0x07e6, 0x03b6 },
+ { 0x07e7, 0x03b7 },
+ { 0x07e8, 0x03b8 },
+ { 0x07e9, 0x03b9 },
+ { 0x07ea, 0x03ba },
+ { 0x07eb, 0x03bb },
+ { 0x07ec, 0x03bc },
+ { 0x07ed, 0x03bd },
+ { 0x07ee, 0x03be },
+ { 0x07ef, 0x03bf },
+ { 0x07f0, 0x03c0 },
+ { 0x07f1, 0x03c1 },
+ { 0x07f2, 0x03c3 },
+ { 0x07f3, 0x03c2 },
+ { 0x07f4, 0x03c4 },
+ { 0x07f5, 0x03c5 },
+ { 0x07f6, 0x03c6 },
+ { 0x07f7, 0x03c7 },
+ { 0x07f8, 0x03c8 },
+ { 0x07f9, 0x03c9 },
+ { 0x08a1, 0x23b7 },
+ { 0x08a2, 0x250c },
+ { 0x08a3, 0x2500 },
+ { 0x08a4, 0x2320 },
+ { 0x08a5, 0x2321 },
+ { 0x08a6, 0x2502 },
+ { 0x08a7, 0x23a1 },
+ { 0x08a8, 0x23a3 },
+ { 0x08a9, 0x23a4 },
+ { 0x08aa, 0x23a6 },
+ { 0x08ab, 0x239b },
+ { 0x08ac, 0x239d },
+ { 0x08ad, 0x239e },
+ { 0x08ae, 0x23a0 },
+ { 0x08af, 0x23a8 },
+ { 0x08b0, 0x23ac },
+ { 0x08bc, 0x2264 },
+ { 0x08bd, 0x2260 },
+ { 0x08be, 0x2265 },
+ { 0x08bf, 0x222b },
+ { 0x08c0, 0x2234 },
+ { 0x08c1, 0x221d },
+ { 0x08c2, 0x221e },
+ { 0x08c5, 0x2207 },
+ { 0x08c8, 0x223c },
+ { 0x08c9, 0x2243 },
+ { 0x08cd, 0x21d4 },
+ { 0x08ce, 0x21d2 },
+ { 0x08cf, 0x2261 },
+ { 0x08d6, 0x221a },
+ { 0x08da, 0x2282 },
+ { 0x08db, 0x2283 },
+ { 0x08dc, 0x2229 },
+ { 0x08dd, 0x222a },
+ { 0x08de, 0x2227 },
+ { 0x08df, 0x2228 },
+ { 0x08ef, 0x2202 },
+ { 0x08f6, 0x0192 },
+ { 0x08fb, 0x2190 },
+ { 0x08fc, 0x2191 },
+ { 0x08fd, 0x2192 },
+ { 0x08fe, 0x2193 },
+ { 0x09e0, 0x25c6 },
+ { 0x09e1, 0x2592 },
+ { 0x09e2, 0x2409 },
+ { 0x09e3, 0x240c },
+ { 0x09e4, 0x240d },
+ { 0x09e5, 0x240a },
+ { 0x09e8, 0x2424 },
+ { 0x09e9, 0x240b },
+ { 0x09ea, 0x2518 },
+ { 0x09eb, 0x2510 },
+ { 0x09ec, 0x250c },
+ { 0x09ed, 0x2514 },
+ { 0x09ee, 0x253c },
+ { 0x09ef, 0x23ba },
+ { 0x09f0, 0x23bb },
+ { 0x09f1, 0x2500 },
+ { 0x09f2, 0x23bc },
+ { 0x09f3, 0x23bd },
+ { 0x09f4, 0x251c },
+ { 0x09f5, 0x2524 },
+ { 0x09f6, 0x2534 },
+ { 0x09f7, 0x252c },
+ { 0x09f8, 0x2502 },
+ { 0x0aa1, 0x2003 },
+ { 0x0aa2, 0x2002 },
+ { 0x0aa3, 0x2004 },
+ { 0x0aa4, 0x2005 },
+ { 0x0aa5, 0x2007 },
+ { 0x0aa6, 0x2008 },
+ { 0x0aa7, 0x2009 },
+ { 0x0aa8, 0x200a },
+ { 0x0aa9, 0x2014 },
+ { 0x0aaa, 0x2013 },
+ { 0x0aae, 0x2026 },
+ { 0x0aaf, 0x2025 },
+ { 0x0ab0, 0x2153 },
+ { 0x0ab1, 0x2154 },
+ { 0x0ab2, 0x2155 },
+ { 0x0ab3, 0x2156 },
+ { 0x0ab4, 0x2157 },
+ { 0x0ab5, 0x2158 },
+ { 0x0ab6, 0x2159 },
+ { 0x0ab7, 0x215a },
+ { 0x0ab8, 0x2105 },
+ { 0x0abb, 0x2012 },
+ { 0x0abc, 0x2329 },
+ { 0x0abe, 0x232a },
+ { 0x0ac3, 0x215b },
+ { 0x0ac4, 0x215c },
+ { 0x0ac5, 0x215d },
+ { 0x0ac6, 0x215e },
+ { 0x0ac9, 0x2122 },
+ { 0x0aca, 0x2613 },
+ { 0x0acc, 0x25c1 },
+ { 0x0acd, 0x25b7 },
+ { 0x0ace, 0x25cb },
+ { 0x0acf, 0x25af },
+ { 0x0ad0, 0x2018 },
+ { 0x0ad1, 0x2019 },
+ { 0x0ad2, 0x201c },
+ { 0x0ad3, 0x201d },
+ { 0x0ad4, 0x211e },
+ { 0x0ad6, 0x2032 },
+ { 0x0ad7, 0x2033 },
+ { 0x0ad9, 0x271d },
+ { 0x0adb, 0x25ac },
+ { 0x0adc, 0x25c0 },
+ { 0x0add, 0x25b6 },
+ { 0x0ade, 0x25cf },
+ { 0x0adf, 0x25ae },
+ { 0x0ae0, 0x25e6 },
+ { 0x0ae1, 0x25ab },
+ { 0x0ae2, 0x25ad },
+ { 0x0ae3, 0x25b3 },
+ { 0x0ae4, 0x25bd },
+ { 0x0ae5, 0x2606 },
+ { 0x0ae6, 0x2022 },
+ { 0x0ae7, 0x25aa },
+ { 0x0ae8, 0x25b2 },
+ { 0x0ae9, 0x25bc },
+ { 0x0aea, 0x261c },
+ { 0x0aeb, 0x261e },
+ { 0x0aec, 0x2663 },
+ { 0x0aed, 0x2666 },
+ { 0x0aee, 0x2665 },
+ { 0x0af0, 0x2720 },
+ { 0x0af1, 0x2020 },
+ { 0x0af2, 0x2021 },
+ { 0x0af3, 0x2713 },
+ { 0x0af4, 0x2717 },
+ { 0x0af5, 0x266f },
+ { 0x0af6, 0x266d },
+ { 0x0af7, 0x2642 },
+ { 0x0af8, 0x2640 },
+ { 0x0af9, 0x260e },
+ { 0x0afa, 0x2315 },
+ { 0x0afb, 0x2117 },
+ { 0x0afc, 0x2038 },
+ { 0x0afd, 0x201a },
+ { 0x0afe, 0x201e },
+ { 0x0ba3, 0x003c },
+ { 0x0ba6, 0x003e },
+ { 0x0ba8, 0x2228 },
+ { 0x0ba9, 0x2227 },
+ { 0x0bc0, 0x00af },
+ { 0x0bc2, 0x22a5 },
+ { 0x0bc3, 0x2229 },
+ { 0x0bc4, 0x230a },
+ { 0x0bc6, 0x005f },
+ { 0x0bca, 0x2218 },
+ { 0x0bcc, 0x2395 },
+ { 0x0bce, 0x22a4 },
+ { 0x0bcf, 0x25cb },
+ { 0x0bd3, 0x2308 },
+ { 0x0bd6, 0x222a },
+ { 0x0bd8, 0x2283 },
+ { 0x0bda, 0x2282 },
+ { 0x0bdc, 0x22a2 },
+ { 0x0bfc, 0x22a3 },
+ { 0x0cdf, 0x2017 },
+ { 0x0ce0, 0x05d0 },
+ { 0x0ce1, 0x05d1 },
+ { 0x0ce2, 0x05d2 },
+ { 0x0ce3, 0x05d3 },
+ { 0x0ce4, 0x05d4 },
+ { 0x0ce5, 0x05d5 },
+ { 0x0ce6, 0x05d6 },
+ { 0x0ce7, 0x05d7 },
+ { 0x0ce8, 0x05d8 },
+ { 0x0ce9, 0x05d9 },
+ { 0x0cea, 0x05da },
+ { 0x0ceb, 0x05db },
+ { 0x0cec, 0x05dc },
+ { 0x0ced, 0x05dd },
+ { 0x0cee, 0x05de },
+ { 0x0cef, 0x05df },
+ { 0x0cf0, 0x05e0 },
+ { 0x0cf1, 0x05e1 },
+ { 0x0cf2, 0x05e2 },
+ { 0x0cf3, 0x05e3 },
+ { 0x0cf4, 0x05e4 },
+ { 0x0cf5, 0x05e5 },
+ { 0x0cf6, 0x05e6 },
+ { 0x0cf7, 0x05e7 },
+ { 0x0cf8, 0x05e8 },
+ { 0x0cf9, 0x05e9 },
+ { 0x0cfa, 0x05ea },
+ { 0x0da1, 0x0e01 },
+ { 0x0da2, 0x0e02 },
+ { 0x0da3, 0x0e03 },
+ { 0x0da4, 0x0e04 },
+ { 0x0da5, 0x0e05 },
+ { 0x0da6, 0x0e06 },
+ { 0x0da7, 0x0e07 },
+ { 0x0da8, 0x0e08 },
+ { 0x0da9, 0x0e09 },
+ { 0x0daa, 0x0e0a },
+ { 0x0dab, 0x0e0b },
+ { 0x0dac, 0x0e0c },
+ { 0x0dad, 0x0e0d },
+ { 0x0dae, 0x0e0e },
+ { 0x0daf, 0x0e0f },
+ { 0x0db0, 0x0e10 },
+ { 0x0db1, 0x0e11 },
+ { 0x0db2, 0x0e12 },
+ { 0x0db3, 0x0e13 },
+ { 0x0db4, 0x0e14 },
+ { 0x0db5, 0x0e15 },
+ { 0x0db6, 0x0e16 },
+ { 0x0db7, 0x0e17 },
+ { 0x0db8, 0x0e18 },
+ { 0x0db9, 0x0e19 },
+ { 0x0dba, 0x0e1a },
+ { 0x0dbb, 0x0e1b },
+ { 0x0dbc, 0x0e1c },
+ { 0x0dbd, 0x0e1d },
+ { 0x0dbe, 0x0e1e },
+ { 0x0dbf, 0x0e1f },
+ { 0x0dc0, 0x0e20 },
+ { 0x0dc1, 0x0e21 },
+ { 0x0dc2, 0x0e22 },
+ { 0x0dc3, 0x0e23 },
+ { 0x0dc4, 0x0e24 },
+ { 0x0dc5, 0x0e25 },
+ { 0x0dc6, 0x0e26 },
+ { 0x0dc7, 0x0e27 },
+ { 0x0dc8, 0x0e28 },
+ { 0x0dc9, 0x0e29 },
+ { 0x0dca, 0x0e2a },
+ { 0x0dcb, 0x0e2b },
+ { 0x0dcc, 0x0e2c },
+ { 0x0dcd, 0x0e2d },
+ { 0x0dce, 0x0e2e },
+ { 0x0dcf, 0x0e2f },
+ { 0x0dd0, 0x0e30 },
+ { 0x0dd1, 0x0e31 },
+ { 0x0dd2, 0x0e32 },
+ { 0x0dd3, 0x0e33 },
+ { 0x0dd4, 0x0e34 },
+ { 0x0dd5, 0x0e35 },
+ { 0x0dd6, 0x0e36 },
+ { 0x0dd7, 0x0e37 },
+ { 0x0dd8, 0x0e38 },
+ { 0x0dd9, 0x0e39 },
+ { 0x0dda, 0x0e3a },
+ { 0x0ddf, 0x0e3f },
+ { 0x0de0, 0x0e40 },
+ { 0x0de1, 0x0e41 },
+ { 0x0de2, 0x0e42 },
+ { 0x0de3, 0x0e43 },
+ { 0x0de4, 0x0e44 },
+ { 0x0de5, 0x0e45 },
+ { 0x0de6, 0x0e46 },
+ { 0x0de7, 0x0e47 },
+ { 0x0de8, 0x0e48 },
+ { 0x0de9, 0x0e49 },
+ { 0x0dea, 0x0e4a },
+ { 0x0deb, 0x0e4b },
+ { 0x0dec, 0x0e4c },
+ { 0x0ded, 0x0e4d },
+ { 0x0df0, 0x0e50 },
+ { 0x0df1, 0x0e51 },
+ { 0x0df2, 0x0e52 },
+ { 0x0df3, 0x0e53 },
+ { 0x0df4, 0x0e54 },
+ { 0x0df5, 0x0e55 },
+ { 0x0df6, 0x0e56 },
+ { 0x0df7, 0x0e57 },
+ { 0x0df8, 0x0e58 },
+ { 0x0df9, 0x0e59 },
+ { 0x0ea1, 0x3131 },
+ { 0x0ea2, 0x3132 },
+ { 0x0ea3, 0x3133 },
+ { 0x0ea4, 0x3134 },
+ { 0x0ea5, 0x3135 },
+ { 0x0ea6, 0x3136 },
+ { 0x0ea7, 0x3137 },
+ { 0x0ea8, 0x3138 },
+ { 0x0ea9, 0x3139 },
+ { 0x0eaa, 0x313a },
+ { 0x0eab, 0x313b },
+ { 0x0eac, 0x313c },
+ { 0x0ead, 0x313d },
+ { 0x0eae, 0x313e },
+ { 0x0eaf, 0x313f },
+ { 0x0eb0, 0x3140 },
+ { 0x0eb1, 0x3141 },
+ { 0x0eb2, 0x3142 },
+ { 0x0eb3, 0x3143 },
+ { 0x0eb4, 0x3144 },
+ { 0x0eb5, 0x3145 },
+ { 0x0eb6, 0x3146 },
+ { 0x0eb7, 0x3147 },
+ { 0x0eb8, 0x3148 },
+ { 0x0eb9, 0x3149 },
+ { 0x0eba, 0x314a },
+ { 0x0ebb, 0x314b },
+ { 0x0ebc, 0x314c },
+ { 0x0ebd, 0x314d },
+ { 0x0ebe, 0x314e },
+ { 0x0ebf, 0x314f },
+ { 0x0ec0, 0x3150 },
+ { 0x0ec1, 0x3151 },
+ { 0x0ec2, 0x3152 },
+ { 0x0ec3, 0x3153 },
+ { 0x0ec4, 0x3154 },
+ { 0x0ec5, 0x3155 },
+ { 0x0ec6, 0x3156 },
+ { 0x0ec7, 0x3157 },
+ { 0x0ec8, 0x3158 },
+ { 0x0ec9, 0x3159 },
+ { 0x0eca, 0x315a },
+ { 0x0ecb, 0x315b },
+ { 0x0ecc, 0x315c },
+ { 0x0ecd, 0x315d },
+ { 0x0ece, 0x315e },
+ { 0x0ecf, 0x315f },
+ { 0x0ed0, 0x3160 },
+ { 0x0ed1, 0x3161 },
+ { 0x0ed2, 0x3162 },
+ { 0x0ed3, 0x3163 },
+ { 0x0ed4, 0x11a8 },
+ { 0x0ed5, 0x11a9 },
+ { 0x0ed6, 0x11aa },
+ { 0x0ed7, 0x11ab },
+ { 0x0ed8, 0x11ac },
+ { 0x0ed9, 0x11ad },
+ { 0x0eda, 0x11ae },
+ { 0x0edb, 0x11af },
+ { 0x0edc, 0x11b0 },
+ { 0x0edd, 0x11b1 },
+ { 0x0ede, 0x11b2 },
+ { 0x0edf, 0x11b3 },
+ { 0x0ee0, 0x11b4 },
+ { 0x0ee1, 0x11b5 },
+ { 0x0ee2, 0x11b6 },
+ { 0x0ee3, 0x11b7 },
+ { 0x0ee4, 0x11b8 },
+ { 0x0ee5, 0x11b9 },
+ { 0x0ee6, 0x11ba },
+ { 0x0ee7, 0x11bb },
+ { 0x0ee8, 0x11bc },
+ { 0x0ee9, 0x11bd },
+ { 0x0eea, 0x11be },
+ { 0x0eeb, 0x11bf },
+ { 0x0eec, 0x11c0 },
+ { 0x0eed, 0x11c1 },
+ { 0x0eee, 0x11c2 },
+ { 0x0eef, 0x316d },
+ { 0x0ef0, 0x3171 },
+ { 0x0ef1, 0x3178 },
+ { 0x0ef2, 0x317f },
+ { 0x0ef3, 0x3181 },
+ { 0x0ef4, 0x3184 },
+ { 0x0ef5, 0x3186 },
+ { 0x0ef6, 0x318d },
+ { 0x0ef7, 0x318e },
+ { 0x0ef8, 0x11eb },
+ { 0x0ef9, 0x11f0 },
+ { 0x0efa, 0x11f9 },
+ { 0x0eff, 0x20a9 },
+ { 0x13a4, 0x20ac },
+ { 0x13bc, 0x0152 },
+ { 0x13bd, 0x0153 },
+ { 0x13be, 0x0178 },
+ { 0x20ac, 0x20ac },
+ { 0xfe50, '`' },
+ { 0xfe51, 0x00b4 },
+ { 0xfe52, '^' },
+ { 0xfe53, '~' },
+ { 0xfe54, 0x00af },
+ { 0xfe55, 0x02d8 },
+ { 0xfe56, 0x02d9 },
+ { 0xfe57, 0x00a8 },
+ { 0xfe58, 0x02da },
+ { 0xfe59, 0x02dd },
+ { 0xfe5a, 0x02c7 },
+ { 0xfe5b, 0x00b8 },
+ { 0xfe5c, 0x02db },
+ { 0xfe5d, 0x037a },
+ { 0xfe5e, 0x309b },
+ { 0xfe5f, 0x309c },
+ { 0xfe63, '/' },
+ { 0xfe64, 0x02bc },
+ { 0xfe65, 0x02bd },
+ { 0xfe66, 0x02f5 },
+ { 0xfe67, 0x02f3 },
+ { 0xfe68, 0x02cd },
+ { 0xfe69, 0xa788 },
+ { 0xfe6a, 0x02f7 },
+ { 0xfe6e, ',' },
+ { 0xfe6f, 0x00a4 },
+ { 0xfe80, 'a' }, // XK_dead_a
+ { 0xfe81, 'A' }, // XK_dead_A
+ { 0xfe82, 'e' }, // XK_dead_e
+ { 0xfe83, 'E' }, // XK_dead_E
+ { 0xfe84, 'i' }, // XK_dead_i
+ { 0xfe85, 'I' }, // XK_dead_I
+ { 0xfe86, 'o' }, // XK_dead_o
+ { 0xfe87, 'O' }, // XK_dead_O
+ { 0xfe88, 'u' }, // XK_dead_u
+ { 0xfe89, 'U' }, // XK_dead_U
+ { 0xfe8a, 0x0259 },
+ { 0xfe8b, 0x018f },
+ { 0xfe8c, 0x00b5 },
+ { 0xfe90, '_' },
+ { 0xfe91, 0x02c8 },
+ { 0xfe92, 0x02cc },
+ { 0xff80 /*XKB_KEY_KP_Space*/, ' ' },
+ { 0xff95 /*XKB_KEY_KP_7*/, 0x0037 },
+ { 0xff96 /*XKB_KEY_KP_4*/, 0x0034 },
+ { 0xff97 /*XKB_KEY_KP_8*/, 0x0038 },
+ { 0xff98 /*XKB_KEY_KP_6*/, 0x0036 },
+ { 0xff99 /*XKB_KEY_KP_2*/, 0x0032 },
+ { 0xff9a /*XKB_KEY_KP_9*/, 0x0039 },
+ { 0xff9b /*XKB_KEY_KP_3*/, 0x0033 },
+ { 0xff9c /*XKB_KEY_KP_1*/, 0x0031 },
+ { 0xff9d /*XKB_KEY_KP_5*/, 0x0035 },
+ { 0xff9e /*XKB_KEY_KP_0*/, 0x0030 },
+ { 0xffaa /*XKB_KEY_KP_Multiply*/, '*' },
+ { 0xffab /*XKB_KEY_KP_Add*/, '+' },
+ { 0xffac /*XKB_KEY_KP_Separator*/, ',' },
+ { 0xffad /*XKB_KEY_KP_Subtract*/, '-' },
+ { 0xffae /*XKB_KEY_KP_Decimal*/, '.' },
+ { 0xffaf /*XKB_KEY_KP_Divide*/, '/' },
+ { 0xffb0 /*XKB_KEY_KP_0*/, 0x0030 },
+ { 0xffb1 /*XKB_KEY_KP_1*/, 0x0031 },
+ { 0xffb2 /*XKB_KEY_KP_2*/, 0x0032 },
+ { 0xffb3 /*XKB_KEY_KP_3*/, 0x0033 },
+ { 0xffb4 /*XKB_KEY_KP_4*/, 0x0034 },
+ { 0xffb5 /*XKB_KEY_KP_5*/, 0x0035 },
+ { 0xffb6 /*XKB_KEY_KP_6*/, 0x0036 },
+ { 0xffb7 /*XKB_KEY_KP_7*/, 0x0037 },
+ { 0xffb8 /*XKB_KEY_KP_8*/, 0x0038 },
+ { 0xffb9 /*XKB_KEY_KP_9*/, 0x0039 },
+ { 0xffbd /*XKB_KEY_KP_Equal*/, '=' }
+};
+
+
+//////////////////////////////////////////////////////////////////////////
+////// GLFW internal API //////
+//////////////////////////////////////////////////////////////////////////
+
+// Convert XKB KeySym to Unicode
+//
+uint32_t _glfwKeySym2Unicode(unsigned int keysym)
+{
+ int min = 0;
+ int max = sizeof(keysymtab) / sizeof(struct codepair) - 1;
+ int mid;
+
+ // First check for Latin-1 characters (1:1 mapping)
+ if ((keysym >= 0x0020 && keysym <= 0x007e) ||
+ (keysym >= 0x00a0 && keysym <= 0x00ff))
+ {
+ return keysym;
+ }
+
+ // Also check for directly encoded 24-bit UCS characters
+ if ((keysym & 0xff000000) == 0x01000000)
+ return keysym & 0x00ffffff;
+
+ // Binary search in table
+ while (max >= min)
+ {
+ mid = (min + max) / 2;
+ if (keysymtab[mid].keysym < keysym)
+ min = mid + 1;
+ else if (keysymtab[mid].keysym > keysym)
+ max = mid - 1;
+ else
+ return keysymtab[mid].ucs;
+ }
+
+ // No matching Unicode value found
+ return GLFW_INVALID_CODEPOINT;
+}
+
diff --git a/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.h b/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.h
new file mode 100644
index 00000000000..b07408f617c
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/glfw/src/xkb_unicode.h
@@ -0,0 +1,30 @@
+//========================================================================
+// GLFW 3.4 Linux - www.glfw.org
+//------------------------------------------------------------------------
+// Copyright (c) 2014 Jonas Ådahl <jadahl@gmail.com>
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would
+// be appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not
+// be misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source
+// distribution.
+//
+//========================================================================
+
+#define GLFW_INVALID_CODEPOINT 0xffffffffu
+
+uint32_t _glfwKeySym2Unicode(unsigned int keysym);
+
diff --git a/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn b/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
index b592b4d2c42..1c00094c13c 100644
--- a/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
+++ b/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
@@ -25,23 +25,6 @@ glfw_dir = dawn_glfw_dir
config("glfw_public") {
include_dirs = [ "${glfw_dir}/include" ]
-
- if (is_win) {
- defines = [ "_GLFW_WIN32" ]
- }
-
- if (is_mac) {
- defines = [ "_GLFW_COCOA" ]
- }
-
- if (is_linux) {
- # ANGLE builds only libEGL.so, so tell GLFW to load that instead of
- # the default libEGL.so.1.
- defines = [
- "_GLFW_X11",
- "_GLFW_EGL_LIBRARY=\"libEGL.so\"",
- ]
- }
}
static_library("glfw") {
@@ -59,56 +42,79 @@ static_library("glfw") {
cflags_c = [
"-Wno-sign-compare",
"-Wno-missing-field-initializers",
+ "-Wno-macro-redefined",
]
}
+ defines = []
+ libs = []
+
+ # Common sources.
sources = [
"${glfw_dir}/include/GLFW/glfw3.h",
"${glfw_dir}/include/GLFW/glfw3native.h",
"${glfw_dir}/src/context.c",
"${glfw_dir}/src/egl_context.c",
- "${glfw_dir}/src/egl_context.h",
"${glfw_dir}/src/init.c",
"${glfw_dir}/src/input.c",
"${glfw_dir}/src/internal.h",
+ "${glfw_dir}/src/mappings.h",
"${glfw_dir}/src/monitor.c",
+ "${glfw_dir}/src/null_init.c",
+ "${glfw_dir}/src/null_joystick.c",
+ "${glfw_dir}/src/null_joystick.h",
+ "${glfw_dir}/src/null_monitor.c",
+ "${glfw_dir}/src/null_platform.h",
+ "${glfw_dir}/src/null_window.c",
"${glfw_dir}/src/osmesa_context.c",
- "${glfw_dir}/src/osmesa_context.h",
+ "${glfw_dir}/src/platform.c",
+ "${glfw_dir}/src/platform.h",
"${glfw_dir}/src/vulkan.c",
"${glfw_dir}/src/window.c",
]
- libs = []
- if (is_win) {
+ # Code shared by all backends on an OS.
+ if (is_linux) {
+ defines += [ "_GLFW_EGL_LIBRARY=\"libEGL.so\"" ]
sources += [
- "${glfw_dir}/src/wgl_context.c",
- "${glfw_dir}/src/wgl_context.h",
- "${glfw_dir}/src/win32_init.c",
- "${glfw_dir}/src/win32_joystick.c",
- "${glfw_dir}/src/win32_joystick.h",
- "${glfw_dir}/src/win32_monitor.c",
- "${glfw_dir}/src/win32_platform.h",
- "${glfw_dir}/src/win32_thread.c",
- "${glfw_dir}/src/win32_time.c",
- "${glfw_dir}/src/win32_window.c",
+ "${glfw_dir}/src/posix_module.c",
+ "${glfw_dir}/src/posix_thread.c",
+ "${glfw_dir}/src/posix_thread.h",
+ "${glfw_dir}/src/posix_time.c",
+ "${glfw_dir}/src/posix_time.h",
+ ]
+ libs += [
+ "m",
+ "rt",
+ "dl",
]
}
- if (is_linux || is_mac) {
+ if (is_mac) {
sources += [
+ "${glfw_dir}/src/cocoa_time.c",
+ "${glfw_dir}/src/cocoa_time.h",
+ "${glfw_dir}/src/posix_module.c",
"${glfw_dir}/src/posix_thread.c",
"${glfw_dir}/src/posix_thread.h",
]
}
- if (is_linux) {
+ if (is_win) {
+ sources += [
+ "${glfw_dir}/src/win32_module.c",
+ "${glfw_dir}/src/win32_thread.c",
+ "${glfw_dir}/src/win32_thread.h",
+ "${glfw_dir}/src/win32_time.c",
+ "${glfw_dir}/src/win32_time.h",
+ ]
+ }
+
+ # Per-backend code.
+ if (dawn_use_x11) {
+ defines += [ "_GLFW_X11" ]
sources += [
"${glfw_dir}/src/glx_context.c",
- "${glfw_dir}/src/glx_context.h",
- "${glfw_dir}/src/linux_joystick.c",
- "${glfw_dir}/src/linux_joystick.h",
- "${glfw_dir}/src/posix_time.c",
- "${glfw_dir}/src/posix_time.h",
"${glfw_dir}/src/x11_init.c",
"${glfw_dir}/src/x11_monitor.c",
"${glfw_dir}/src/x11_platform.h",
@@ -116,34 +122,44 @@ static_library("glfw") {
"${glfw_dir}/src/xkb_unicode.c",
"${glfw_dir}/src/xkb_unicode.h",
]
-
libs += [
- "rt",
- "dl",
"X11",
+ "Xi",
"Xcursor",
"Xinerama",
"Xrandr",
]
}
+ # No wayland support for GLFW in GN builds.
+ if (dawn_use_x11) {
+ if (is_linux) {
+ sources += [
+ "${glfw_dir}/src/linux_joystick.c",
+ "${glfw_dir}/src/linux_joystick.h",
+ ]
+ }
+ sources += [
+ "${glfw_dir}/src/posix_poll.c",
+ "${glfw_dir}/src/posix_poll.h",
+ ]
+ }
if (is_mac) {
+ defines += [ "_GLFW_COCOA" ]
sources += [
"${glfw_dir}/src/cocoa_init.m",
"${glfw_dir}/src/cocoa_joystick.h",
"${glfw_dir}/src/cocoa_joystick.m",
"${glfw_dir}/src/cocoa_monitor.m",
"${glfw_dir}/src/cocoa_platform.h",
- "${glfw_dir}/src/cocoa_time.c",
"${glfw_dir}/src/cocoa_window.m",
- "${glfw_dir}/src/nsgl_context.h",
"${glfw_dir}/src/nsgl_context.m",
]
+
frameworks = [
"Cocoa.framework",
"IOKit.framework",
"CoreFoundation.framework",
- "CoreVideo.framework",
]
cflags_objc = [
"-Wno-sign-compare",
@@ -151,4 +167,17 @@ static_library("glfw") {
"-Wno-objc-multiple-method-names",
]
}
+
+ if (is_win) {
+ defines += [ "_GLFW_WIN32" ]
+ sources += [
+ "${glfw_dir}/src/wgl_context.c",
+ "${glfw_dir}/src/win32_init.c",
+ "${glfw_dir}/src/win32_joystick.c",
+ "${glfw_dir}/src/win32_joystick.h",
+ "${glfw_dir}/src/win32_monitor.c",
+ "${glfw_dir}/src/win32_platform.h",
+ "${glfw_dir}/src/win32_window.c",
+ ]
+ }
}
diff --git a/chromium/third_party/dawn/third_party/gn/webgpu-cts/BUILD.gn b/chromium/third_party/dawn/third_party/gn/webgpu-cts/BUILD.gn
index 70239413888..3582bc27e8a 100644
--- a/chromium/third_party/dawn/third_party/gn/webgpu-cts/BUILD.gn
+++ b/chromium/third_party/dawn/third_party/gn/webgpu-cts/BUILD.gn
@@ -88,7 +88,7 @@ copy("copy_resources") {
action("verify_gen_ts_dep_list") {
script = "${dawn_root}/webgpu-cts/scripts/gen_ts_dep_lists.py"
inputs = [
- # TODO(kainino): Make sure this gets retriggered when the CTS dep changes.
+ "../../../DEPS",
"resource_files.txt",
"ts_sources.txt",
]
diff --git a/chromium/third_party/dawn/third_party/gn/webgpu-cts/resource_files.txt b/chromium/third_party/dawn/third_party/gn/webgpu-cts/resource_files.txt
index 812c137c6de..f9b6c360b3e 100644
--- a/chromium/third_party/dawn/third_party/gn/webgpu-cts/resource_files.txt
+++ b/chromium/third_party/dawn/third_party/gn/webgpu-cts/resource_files.txt
@@ -1,6 +1,8 @@
Di-3d.png
README.md
+red-green.bt2020.vp9.webm
red-green.bt601.vp9.webm
+red-green.bt709.vp9.webm
red-green.mp4
red-green.theora.ogv
red-green.webmvp8.webm
diff --git a/chromium/third_party/dawn/third_party/gn/webgpu-cts/ts_sources.txt b/chromium/third_party/dawn/third_party/gn/webgpu-cts/ts_sources.txt
index 8033cb07bd2..703c4dfd8a0 100644
--- a/chromium/third_party/dawn/third_party/gn/webgpu-cts/ts_sources.txt
+++ b/chromium/third_party/dawn/third_party/gn/webgpu-cts/ts_sources.txt
@@ -27,6 +27,7 @@ src/common/internal/test_suite_listing.ts
src/common/internal/util.ts
src/common/internal/tree.ts
src/common/internal/file_loader.ts
+src/common/util/colors.ts
src/common/util/navigator_gpu.ts
src/common/runtime/helper/sys.ts
src/common/runtime/cmdline.ts
@@ -43,7 +44,6 @@ src/common/tools/gen_wpt_cts_html.ts
src/common/tools/presubmit.ts
src/common/tools/version.ts
src/common/util/collect_garbage.ts
-src/common/util/colors.ts
src/common/util/preprocessor.ts
src/unittests/unit_test.ts
src/demo/a.spec.ts
@@ -155,6 +155,8 @@ src/webgpu/api/operation/render_pass/storeop2.spec.ts
src/webgpu/api/operation/render_pipeline/alpha_to_coverage.spec.ts
src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts
src/webgpu/api/operation/render_pipeline/entry_point_name.spec.ts
+src/webgpu/util/shader.ts
+src/webgpu/util/texture/texture_ok.ts
src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts
src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts
src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts
@@ -176,7 +178,6 @@ src/webgpu/api/operation/sampling/anisotropy.spec.ts
src/webgpu/api/operation/sampling/filter_mode.spec.ts
src/webgpu/api/operation/sampling/lod_clamp.spec.ts
src/webgpu/api/operation/shader_module/compilation_info.spec.ts
-src/webgpu/util/texture/texture_ok.ts
src/webgpu/api/operation/texture_view/format_reinterpretation.spec.ts
src/webgpu/api/operation/texture_view/read.spec.ts
src/webgpu/api/operation/texture_view/write.spec.ts
@@ -246,6 +247,7 @@ src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts
src/webgpu/api/validation/queue/destroyed/query_set.spec.ts
src/webgpu/api/validation/render_pass/resolve.spec.ts
src/webgpu/api/validation/render_pass/storeOp.spec.ts
+src/webgpu/api/validation/resource_usages/buffer/in_pass_encoder.spec.ts
src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts
src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts
src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts
@@ -268,34 +270,114 @@ src/webgpu/shader/execution/expression/binary/f32_arithmetic.spec.ts
src/webgpu/shader/execution/expression/binary/f32_logical.spec.ts
src/webgpu/shader/execution/expression/call/builtin/builtin.ts
src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/acos.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/acosh.spec.ts
src/webgpu/shader/execution/expression/call/builtin/all.spec.ts
src/webgpu/shader/execution/expression/call/builtin/any.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/arrayLength.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/asin.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/asinh.spec.ts
src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts
src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atanh.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicAdd.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicAnd.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicCompareExchangeWeak.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicExchange.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicLoad.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicMax.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicMin.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicOr.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicStore.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicSub.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atomicXor.spec.ts
src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts
src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts
src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/cosh.spec.ts
src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts
src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts
src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/cross.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/degrees.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/determinant.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/distance.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dot.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdx.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdxCoarse.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdxFine.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdy.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdyCoarse.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/dpdyFine.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/exp.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/exp2.spec.ts
src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/faceForward.spec.ts
src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts
src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts
-src/webgpu/shader/execution/expression/call/builtin/float_built_functions.spec.ts
src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/fma.spec.ts
src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/frexp.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/fwidth.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/fwidthCoarse.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/fwidthFine.spec.ts
src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts
src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts
src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/length.spec.ts
src/webgpu/shader/execution/expression/call/builtin/log.spec.ts
src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts
-src/webgpu/shader/execution/expression/call/builtin/logical_built_in_functions.spec.ts
src/webgpu/shader/execution/expression/call/builtin/max.spec.ts
src/webgpu/shader/execution/expression/call/builtin/min.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/mix.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/modf.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/normalize.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pack2x16float.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pack2x16snorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pack2x16unorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pack4x8snorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pack4x8unorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/pow.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/quantizeToF16.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/radians.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/reflect.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/refract.spec.ts
src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/round.spec.ts
src/webgpu/shader/execution/expression/call/builtin/select.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/sign.spec.ts
src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts
-src/webgpu/shader/execution/expression/call/builtin/value_testing_built_in_functions.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/sinh.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/smoothstep.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/sqrt.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/step.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/storageBarrier.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/tan.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/tanh.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureDimension.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/utils.ts
+src/webgpu/shader/execution/expression/call/builtin/textureGather.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureGatherCompare.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureLoad.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureNumLayers.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureNumLevels.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureNumSamples.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSample.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSampleBias.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSampleCompare.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSampleCompareLevel.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSampleGrad.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureSampleLevel.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/textureStore.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/transpose.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/trunc.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/unpack2x16float.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/unpack2x16snorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/unpack2x16unorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/unpack4x8snorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/unpack4x8unorm.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/workgroupBarrier.spec.ts
src/webgpu/shader/execution/expression/unary/unary.ts
src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts
src/webgpu/shader/execution/memory_model/memory_model_setup.ts
@@ -307,15 +389,19 @@ src/webgpu/shader/execution/sampling/gradients_in_varying_loop.spec.ts
src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts
src/webgpu/shader/execution/shader_io/shared_structs.spec.ts
src/webgpu/shader/validation/shader_validation_test.ts
-src/webgpu/shader/validation/tokens.spec.ts
-src/webgpu/shader/validation/variable_and_const.spec.ts
+src/webgpu/shader/validation/parse/blankspace.spec.ts
+src/webgpu/shader/validation/parse/comments.spec.ts
+src/webgpu/shader/validation/parse/identifiers.spec.ts
+src/webgpu/shader/validation/parse/literal.spec.ts
+src/webgpu/shader/validation/parse/source.spec.ts
+src/webgpu/shader/validation/parse/var_and_let.spec.ts
src/webgpu/shader/validation/shader_io/util.ts
src/webgpu/shader/validation/shader_io/builtins.spec.ts
-src/webgpu/shader/validation/shader_io/generic.spec.ts
+src/webgpu/shader/validation/shader_io/entry_point.ts
src/webgpu/shader/validation/shader_io/interpolate.spec.ts
src/webgpu/shader/validation/shader_io/invariant.spec.ts
src/webgpu/shader/validation/shader_io/locations.spec.ts
-src/webgpu/shader/validation/wgsl/basic.spec.ts
+src/webgpu/shader/validation/shader_io/shareable_types.spec.ts
src/webgpu/util/color_space_conversion.ts
src/webgpu/util/copy_to_texture.ts
src/webgpu/util/texture/texel_data.spec.ts
diff --git a/chromium/third_party/dawn/third_party/go.mod b/chromium/third_party/dawn/third_party/go.mod
new file mode 100644
index 00000000000..fa8228e3af0
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/go.mod
@@ -0,0 +1 @@
+// Empty go.mod file used to tell go not to scan this directory
diff --git a/chromium/third_party/dawn/tools/format b/chromium/third_party/dawn/tools/format
index d0c9c64dd57..d6de1335f96 100755
--- a/chromium/third_party/dawn/tools/format
+++ b/chromium/third_party/dawn/tools/format
@@ -13,7 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-find src -name "*.h" -exec clang-format -i {} \;
-find src -name "*.cc" -exec clang-format -i {} \;
-find src/tint/cmd -name "*.h" -exec clang-format -i {} \;
-find src/tint/cmd -name "*.cc" -exec clang-format -i {} \;
+find src -name "*.h" -exec clang_format.py -i {} \;
+find src -name "*.cc" -exec clang_format.py -i {} \;
+find src -name "*.cpp" -exec clang_format.py -i {} \;
+find src -name "*.m" -exec clang_format.py -i {} \;
+find src -name "*.mm" -exec clang_format.py -i {} \;
+find include -name "*.h" -exec clang_format.py -i {} \;
diff --git a/chromium/third_party/dawn/tools/setup-build b/chromium/third_party/dawn/tools/setup-build
index 69087f76848..76ec76c5b96 100755
--- a/chromium/third_party/dawn/tools/setup-build
+++ b/chromium/third_party/dawn/tools/setup-build
@@ -32,9 +32,10 @@ function show_usage() {
function generate() {
CMD=$1
pushd "$ROOT_DIR" > /dev/null
- ${CMD}
- rm -fr "out/active" || true
- ln -s "$BUILD_DIR" "out/active"
+ mkdir -p "out/$BUILD_DIR"
+ rm -fr "out/active" || true
+ ln -s "$BUILD_DIR" "out/active"
+ ${CMD}
popd > /dev/null
}
@@ -42,10 +43,10 @@ case $BUILD_SYSTEM in
"gn")
case $BUILD_TYPE in
"debug")
- generate "gn gen out/${BUILD_DIR} --args=is_debug=true"
+ generate "gn gen out/active --args=is_debug=true"
;;
"release")
- generate "gn gen out/${BUILD_DIR}"
+ generate "gn gen out/active --args=is_debug=false"
;;
*)
echo "invalid build type '${BUILD_TYPE}'"
@@ -54,12 +55,16 @@ case $BUILD_SYSTEM in
esac
;;
"cmake")
+ CMAKE_FLAGS=""
+ if [[ -x $(command -v ccache) ]]; then
+ CMAKE_FLAGS+="-DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
+ fi
case $BUILD_TYPE in
"debug")
- generate "cmake -S . -B out/$BUILD_DIR -GNinja -DCMAKE_BUILD_TYPE=Debug"
+ generate "cmake -S . -B out/active -GNinja -DCMAKE_BUILD_TYPE=Debug ${CMAKE_FLAGS}"
;;
"release")
- generate "cmake -S . -B out/$BUILD_DIR -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo"
+ generate "cmake -S . -B out/active -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ${CMAKE_FLAGS}"
;;
*)
echo "invalid build type '${BUILD_TYPE}'"
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser_test.go b/chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser_test.go
deleted file mode 100644
index 10f355d08c0..00000000000
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser_test.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser_test
-
-import (
- "testing"
-
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/ast"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/parser"
-)
-
-func TestParser(t *testing.T) {
- type test struct {
- src string
- expect ast.AST
- }
-
- for _, test := range []test{
- {"enum E {}", ast.AST{
- Enums: []ast.EnumDecl{{Name: "E"}},
- }},
- {"enum E { A [[deco]] B C }", ast.AST{
- Enums: []ast.EnumDecl{{
- Name: "E",
- Entries: []ast.EnumEntry{
- {Name: "A"},
- {
- Decorations: ast.Decorations{{Name: "deco"}},
- Name: "B",
- },
- {Name: "C"},
- },
- }},
- }},
- {"type T", ast.AST{
- Types: []ast.TypeDecl{{Name: "T"}},
- }},
- {"type T<A, B, C>", ast.AST{
- Types: []ast.TypeDecl{{
- Name: "T",
- TemplateParams: ast.TemplateParams{
- {Name: "A"},
- {Name: "B"},
- {Name: "C"},
- },
- }},
- }},
- {"[[deco]] type T", ast.AST{
- Types: []ast.TypeDecl{{
- Decorations: ast.Decorations{
- {Name: "deco"},
- },
- Name: "T",
- }},
- }},
- {`[[deco("a", "b")]] type T`, ast.AST{
- Types: []ast.TypeDecl{{
- Decorations: ast.Decorations{
- {Name: "deco", Values: []string{"a", "b"}},
- },
- Name: "T",
- }},
- }},
- {"match M : A", ast.AST{
- Matchers: []ast.MatcherDecl{{
- Name: "M",
- Options: ast.MatcherOptions{
- ast.TemplatedName{Name: "A"},
- },
- }},
- }},
- {"match M : A | B", ast.AST{
- Matchers: []ast.MatcherDecl{{
- Name: "M",
- Options: ast.MatcherOptions{
- ast.TemplatedName{Name: "A"},
- ast.TemplatedName{Name: "B"},
- },
- }},
- }},
- {"fn F()", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- }},
- }},
- {"[[deco]] fn F()", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- Decorations: ast.Decorations{
- {Name: "deco"},
- },
- }},
- }},
- {"fn F(a)", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- Parameters: ast.Parameters{
- {Type: ast.TemplatedName{Name: "a"}},
- },
- }},
- }},
- {"fn F(a: T)", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- Parameters: ast.Parameters{
- {Name: "a", Type: ast.TemplatedName{Name: "T"}},
- },
- }},
- }},
- {"fn F(a, b)", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- Parameters: ast.Parameters{
- {Type: ast.TemplatedName{Name: "a"}},
- {Type: ast.TemplatedName{Name: "b"}},
- },
- }},
- }},
- {"fn F<A : B<C>>()", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- TemplateParams: ast.TemplateParams{
- {
- Name: "A", Type: ast.TemplatedName{
- Name: "B",
- TemplateArgs: ast.TemplatedNames{
- {Name: "C"},
- },
- },
- },
- },
- }},
- }},
- {"fn F<T>(a: X, b: Y<T>)", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- TemplateParams: ast.TemplateParams{
- {Name: "T"},
- },
- Parameters: ast.Parameters{
- {Name: "a", Type: ast.TemplatedName{Name: "X"}},
- {Name: "b", Type: ast.TemplatedName{
- Name: "Y",
- TemplateArgs: []ast.TemplatedName{{Name: "T"}},
- }},
- },
- }},
- }},
- {"fn F() -> X", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- ReturnType: &ast.TemplatedName{Name: "X"},
- }},
- }},
- {"fn F() -> X<T>", ast.AST{
- Functions: []ast.FunctionDecl{{
- Name: "F",
- ReturnType: &ast.TemplatedName{
- Name: "X",
- TemplateArgs: []ast.TemplatedName{{Name: "T"}},
- },
- }},
- }},
- } {
- got, err := parser.Parse(test.src, "file.txt")
- if err != nil {
- t.Errorf("While parsing:\n%s\nParse() returned error: %v", test.src, err)
- continue
- }
-
- gotStr, expectStr := got.String(), test.expect.String()
- if gotStr != expectStr {
- t.Errorf("While parsing:\n%s\nGot:\n%s\nExpected:\n%s", test.src, gotStr, expectStr)
- }
- }
-}
-
-func TestErrors(t *testing.T) {
- type test struct {
- src string
- expect string
- }
-
- for _, test := range []test{
- {"+", "test.txt:1:1: unexpected '+'"},
- {"123", "test.txt:1:1 unexpected token 'integer'"},
- {"[[123]]", "test.txt:1:3 expected 'ident' for decoration name, got 'integer'"},
- {"[[abc", "expected ']]' for decoration list, but reached end of file"},
- } {
- got, err := parser.Parse(test.src, "test.txt")
- if gotErr := err.Error(); test.expect != gotErr {
- t.Errorf(`Parse() returned error "%+v", expected error "%+v"`, gotErr, test.expect)
- }
- if got != nil {
- t.Errorf("Lex() returned non-nil for error")
- }
- }
-}
diff --git a/chromium/third_party/dawn/tools/src/cmd/check-spec-examples/main.go b/chromium/third_party/dawn/tools/src/cmd/check-spec-examples/main.go
index 0b891266d39..b641dc9e9e9 100644
--- a/chromium/third_party/dawn/tools/src/cmd/check-spec-examples/main.go
+++ b/chromium/third_party/dawn/tools/src/cmd/check-spec-examples/main.go
@@ -203,7 +203,7 @@ type example struct {
func tryCompile(compiler, wd string, e example) error {
code := e.code
if e.functionScope {
- code = "\n@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" + code + " return vec4<f32>();}\n"
+ code = "\n@vertex fn main() -> @builtin(position) vec4<f32> {\n" + code + " return vec4<f32>();}\n"
}
addedStubFunction := false
@@ -214,7 +214,7 @@ func tryCompile(compiler, wd string, e example) error {
}
if !addedStubFunction {
- code += "\n@stage(vertex) fn main() {}\n"
+ code += "\n@vertex fn main() {}\n"
addedStubFunction = true
continue
}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/build.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/build.go
new file mode 100644
index 00000000000..99ce99d791a
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/build.go
@@ -0,0 +1,168 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sort"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/buildbucket"
+ "dawn.googlesource.com/dawn/tools/src/gerrit"
+)
+
+// BuildsByName is a map of builder name to build result
+type BuildsByName map[string]buildbucket.Build
+
+func (b BuildsByName) ids() []buildbucket.BuildID {
+ ids := make([]buildbucket.BuildID, 0, len(b))
+ for _, build := range b {
+ ids = append(ids, build.ID)
+ }
+ return ids
+}
+
+// GetBuilds returns the builds, as declared in the config file, for the given
+// patchset
+func GetBuilds(
+ ctx context.Context,
+ cfg Config,
+ ps gerrit.Patchset,
+ bb *buildbucket.Buildbucket) (BuildsByName, error) {
+
+ builds := BuildsByName{}
+
+ err := bb.SearchBuilds(ctx, ps, func(build buildbucket.Build) error {
+ for name, builder := range cfg.Builders {
+ if build.Builder == builder {
+ builds[name] = build
+ break
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return builds, err
+}
+
+// WaitForBuildsToComplete waits until all the provided builds have finished.
+func WaitForBuildsToComplete(
+ ctx context.Context,
+ cfg Config,
+ ps gerrit.Patchset,
+ bb *buildbucket.Buildbucket,
+ builds BuildsByName) error {
+
+ buildsStillRunning := func() []string {
+ out := []string{}
+ for name, build := range builds {
+ if build.Status.Running() {
+ out = append(out, name)
+ }
+ }
+ sort.Strings(out)
+ return out
+ }
+
+ for {
+ // Refresh build status
+ for name, build := range builds {
+ build, err := bb.QueryBuild(ctx, build.ID)
+ if err != nil {
+ return fmt.Errorf("failed to query build for '%v': %w", name, err)
+ }
+ builds[name] = build
+ }
+ running := buildsStillRunning()
+ if len(running) == 0 {
+ break
+ }
+ log.Println("waiting for builds to complete: ", running)
+ time.Sleep(time.Minute * 2)
+ }
+
+ for name, build := range builds {
+ if build.Status == buildbucket.StatusInfraFailure ||
+ build.Status == buildbucket.StatusCanceled {
+ return fmt.Errorf("%v builder failed with %v", name, build.Status)
+ }
+ }
+
+ return nil
+}
+
+// GetOrStartBuildsAndWait starts the builds as declared in the config file,
+// for the given patchset, if they haven't already been started or if retest is
+// true. GetOrStartBuildsAndWait then waits for the builds to complete, and then
+// returns the results.
+func GetOrStartBuildsAndWait(
+ ctx context.Context,
+ cfg Config,
+ ps gerrit.Patchset,
+ bb *buildbucket.Buildbucket,
+ retest bool) (BuildsByName, error) {
+
+ builds := BuildsByName{}
+
+ if !retest {
+ // Find any existing builds for the patchset
+ err := bb.SearchBuilds(ctx, ps, func(build buildbucket.Build) error {
+ for name, builder := range cfg.Builders {
+ if build.Builder == builder {
+ builds[name] = build
+ break
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Returns true if the build should be re-kicked
+ shouldKick := func(build buildbucket.Build) bool {
+ switch build.Status {
+ case buildbucket.StatusUnknown,
+ buildbucket.StatusInfraFailure,
+ buildbucket.StatusCanceled:
+ return true
+ }
+ return false
+ }
+
+ // Kick any missing builds
+ for name, builder := range cfg.Builders {
+ if build, found := builds[name]; !found || shouldKick(build) {
+ build, err := bb.StartBuild(ctx, ps, builder, retest)
+ if err != nil {
+ return nil, err
+ }
+ log.Printf("started build: %+v", build)
+ builds[name] = build
+ }
+ }
+
+ if err := WaitForBuildsToComplete(ctx, cfg, ps, bb, builds); err != nil {
+ return nil, err
+ }
+
+ return builds, nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/cmds.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/cmds.go
new file mode 100644
index 00000000000..00a655ae24e
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/cmds.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "dawn.googlesource.com/dawn/tools/src/subcmd"
+)
+
+// The registered commands
+var commands []Command
+
+// Command is the type of a single cts command
+type Command = subcmd.Command[Config]
+
+// Register registers the command for use by the 'cts' tool
+func Register(c Command) { commands = append(commands, c) }
+
+// Commands returns all the commands registered
+func Commands() []Command { return commands }
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/config.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/config.go
new file mode 100644
index 00000000000..84393c5a16f
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/config.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/buildbucket"
+ "github.com/tidwall/jsonc"
+)
+
+// Config holds the configuration data for the 'cts' command.
+// Config is loaded from a JSON file stored next to the
+// tools/src/cmd/cts/config.json.
+type Config struct {
+ // Test holds configuration data for test results.
+ Test struct {
+ // The ResultDB string prefix for CTS tests.
+ Prefix string
+ // The time threshold used to classify tests as slow.
+ SlowThreshold time.Duration
+ }
+ // Gerrit holds configuration for Dawn's Gerrit server.
+ Gerrit struct {
+ // The host URL
+ Host string
+ // The project name
+ Project string
+ }
+ // Git holds configuration data for the various Git repositories.
+ Git struct {
+ // The CTS git repository.
+ CTS GitProject
+ // The Dawn git repository.
+ Dawn GitProject
+ }
+ // Builders is a map of builder name (as displayed in the UI) to buildbucket
+ // builder information.
+ Builders map[string]buildbucket.Builder
+ // Tags holds configuration data for cleaning result tags before processing
+ Tag struct {
+ // Remove holds tags that should be removed before processing.
+ // See crbug.com/dawn/1401 for more information.
+ Remove []string
+ }
+ // Sheets holds information about the Google Sheets document used for
+ // tracking CTS statistics.
+ Sheets struct {
+ ID string
+ }
+}
+
+// GitProject holds a git host URL and project.
+type GitProject struct {
+ Host string
+ Project string
+}
+
+// HttpsURL returns the https URL of the project
+func (g GitProject) HttpsURL() string {
+ return fmt.Sprintf("https://%v/%v", g.Host, g.Project)
+}
+
+// LoadConfig loads the JSON config file at the given path
+func LoadConfig(path string) (*Config, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open '%v': %w", path, err)
+ }
+
+ // Remove comments, trailing commas.
+ data = jsonc.ToJSONInPlace(data)
+
+ cfg := Config{}
+ if err := json.NewDecoder(bytes.NewReader(data)).Decode(&cfg); err != nil {
+ return nil, fmt.Errorf("failed to load config: %w", err)
+ }
+ return &cfg, nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/constants.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/constants.go
new file mode 100644
index 00000000000..a210f0d77b0
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/constants.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "go.chromium.org/luci/auth"
+ "go.chromium.org/luci/hardcoded/chromeinfra"
+)
+
+const (
+ // RollSubjectPrefix is the subject prefix for CTS roll changes
+ RollSubjectPrefix = "Roll third_party/webgpu-cts/ "
+
+ // DefaultCacheDir is the default directory for the results cache
+ DefaultCacheDir = "~/.cache/webgpu-cts-results"
+)
+
+// DefaultAuthOptions returns the default authentication options for use by
+// command line arguments.
+func DefaultAuthOptions() auth.Options {
+ def := chromeinfra.DefaultAuthOptions()
+ def.SecretsDir = utils.ExpandHome("~/.config/dawn-cts")
+ return def
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/deps.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/deps.go
new file mode 100644
index 00000000000..f82ddc36e13
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/deps.go
@@ -0,0 +1,67 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "dawn.googlesource.com/dawn/tools/src/container"
+)
+
+// The regular expression used to search for the CTS hash
+var reCTSHash = regexp.MustCompile(reEscape(ctsHashPrefix) + `[0-9a-fA-F]+`)
+
+const (
+ // The string prefix for the CTS hash in the DEPs file, used for identifying
+ // and updating the DEPS file.
+ ctsHashPrefix = `{chromium_git}/external/github.com/gpuweb/cts@`
+)
+
+func reEscape(s string) string {
+ return strings.ReplaceAll(strings.ReplaceAll(s, `/`, `\/`), `.`, `\.`)
+}
+
+// UpdateCTSHashInDeps replaces the CTS hashes in 'deps' with 'newCTSHash'.
+// Returns:
+// newDEPS - the new DEPS content
+// oldCTSHash - the old CTS hash in the 'deps'
+func UpdateCTSHashInDeps(deps, newCTSHash string) (newDEPS, oldCTSHash string, err error) {
+ // Collect old CTS hashes, and replace these with newCTSHash
+ b := strings.Builder{}
+ oldCTSHashes := []string{}
+ matches := reCTSHash.FindAllStringIndex(deps, -1)
+ if len(matches) == 0 {
+ return "", "", fmt.Errorf("failed to find a CTS hash in DEPS file")
+ }
+ end := 0
+ for _, match := range matches {
+ oldCTSHashes = append(oldCTSHashes, deps[match[0]+len(ctsHashPrefix):match[1]])
+ b.WriteString(deps[end:match[0]])
+ b.WriteString(ctsHashPrefix + newCTSHash)
+ end = match[1]
+ }
+ b.WriteString(deps[end:])
+
+ newDEPS = b.String()
+
+ if s := container.NewSet(oldCTSHashes...); len(s) > 1 {
+ fmt.Println("DEPS contained multiple hashes for CTS, using first for logs")
+ }
+ oldCTSHash = oldCTSHashes[0]
+
+ return newDEPS, oldCTSHash, nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/paths.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/paths.go
new file mode 100644
index 00000000000..fda05ed20d6
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/paths.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "os"
+ "path/filepath"
+
+ "dawn.googlesource.com/dawn/tools/src/utils"
+)
+
+const (
+ // RelativeExpectationsPath is the dawn-root relative path to the
+ // expectations.txt file.
+ RelativeExpectationsPath = "webgpu-cts/expectations.txt"
+)
+
+// DefaultExpectationsPath returns the default path to the expectations.txt
+// file. Returns an empty string if the file cannot be found.
+func DefaultExpectationsPath() string {
+ path := filepath.Join(utils.DawnRoot(), RelativeExpectationsPath)
+ if _, err := os.Stat(path); err != nil {
+ return ""
+ }
+ return path
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/common/results.go b/chromium/third_party/dawn/tools/src/cmd/cts/common/results.go
new file mode 100644
index 00000000000..934195ef8c8
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/common/results.go
@@ -0,0 +1,355 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/buildbucket"
+ "dawn.googlesource.com/dawn/tools/src/cts/query"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/gerrit"
+ "dawn.googlesource.com/dawn/tools/src/resultsdb"
+ "dawn.googlesource.com/dawn/tools/src/subcmd"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "go.chromium.org/luci/auth"
+ rdbpb "go.chromium.org/luci/resultdb/proto/v1"
+)
+
+// ResultSource describes the source of CTS test results.
+// ResultSource is commonly used by command line flags for specifying from
+// where the results should be loaded / fetched.
+// If neither File or Patchset are specified, then results will be fetched from
+// the last successful CTS roll.
+type ResultSource struct {
+ // The directory used to cache results fetched from ResultDB
+ CacheDir string
+ // If specified, results will be loaded from this file path
+ // Must not be specified if Patchset is also specified.
+ File string
+ // If specified, results will be fetched from this gerrit patchset
+ // Must not be specified if File is also specified.
+ Patchset gerrit.Patchset
+}
+
+// RegisterFlags registers the ResultSource fields as commandline flags for use
+// by command line tools.
+func (r *ResultSource) RegisterFlags(cfg Config) {
+ flag.StringVar(&r.CacheDir, "cache", DefaultCacheDir, "path to the results cache")
+ flag.StringVar(&r.File, "results", "", "local results.txt file (mutually exclusive with --cl)")
+ r.Patchset.RegisterFlags(cfg.Gerrit.Host, cfg.Gerrit.Project)
+}
+
+// GetResults loads or fetches the results, based on the values of r.
+// GetResults will update the ResultSource with the inferred patchset, if a file
+// and specific patchset was not specified.
+func (r *ResultSource) GetResults(ctx context.Context, cfg Config, auth auth.Options) (result.List, error) {
+ // Check that File and Patchset weren't both specified
+ ps := &r.Patchset
+ if r.File != "" && ps.Change != 0 {
+ fmt.Fprintln(flag.CommandLine.Output(), "only one of --results and --cl can be specified")
+ return nil, subcmd.ErrInvalidCLA
+ }
+
+ // If a file was specified, then load that.
+ if r.File != "" {
+ return result.Load(r.File)
+ }
+
+ // Initialize the buildbucket and resultdb clients
+ bb, err := buildbucket.New(ctx, auth)
+ if err != nil {
+ return nil, err
+ }
+ rdb, err := resultsdb.New(ctx, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no change was specified, then pull the results from the most recent
+ // CTS roll.
+ if ps.Change == 0 {
+ fmt.Println("no change specified, scanning gerrit for last CTS roll...")
+ gerrit, err := gerrit.New(cfg.Gerrit.Host, gerrit.Credentials{})
+ if err != nil {
+ return nil, err
+ }
+ latest, err := LatestCTSRoll(gerrit)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Printf("scanning for latest patchset of %v...\n", latest.Number)
+ var results result.List
+ results, *ps, err = MostRecentResultsForChange(ctx, cfg, r.CacheDir, gerrit, bb, rdb, latest.Number)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Printf("using results from cl %v ps %v...\n", ps.Change, ps.Patchset)
+ return results, nil
+ }
+
+ // If a change, but no patchset was specified, then query the most recent
+ // patchset.
+ if ps.Patchset == 0 {
+ gerrit, err := gerrit.New(cfg.Gerrit.Host, gerrit.Credentials{})
+ if err != nil {
+ return nil, err
+ }
+ *ps, err = gerrit.LatestPatchest(strconv.Itoa(ps.Change))
+ if err != nil {
+ err := fmt.Errorf("failed to find latest patchset of change %v: %w",
+ ps.Change, err)
+ return nil, err
+ }
+ }
+
+ // Obtain the patchset's results, kicking a build if there are no results
+ // already available.
+ log.Printf("fetching results from cl %v ps %v...", ps.Change, ps.Patchset)
+ builds, err := GetOrStartBuildsAndWait(ctx, cfg, *ps, bb, false)
+ if err != nil {
+ return nil, err
+ }
+
+ results, err := CacheResults(ctx, cfg, *ps, r.CacheDir, rdb, builds)
+ if err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+// CacheResults looks in the cache at 'cacheDir' for the results for the given
+// patchset. If the cache contains the results, then these are loaded and
+// returned. If the cache does not contain the results, then they are fetched
+// using GetResults(), saved to the cache directory and are returned.
+func CacheResults(
+ ctx context.Context,
+ cfg Config,
+ ps gerrit.Patchset,
+ cacheDir string,
+ rdb *resultsdb.ResultsDB,
+ builds BuildsByName) (result.List, error) {
+
+ var cachePath string
+ if cacheDir != "" {
+ dir := utils.ExpandHome(cacheDir)
+ path := filepath.Join(dir, strconv.Itoa(ps.Change), fmt.Sprintf("ps-%v.txt", ps.Patchset))
+ if _, err := os.Stat(path); err == nil {
+ return result.Load(path)
+ }
+ cachePath = path
+ }
+
+ results, err := GetResults(ctx, cfg, rdb, builds)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := result.Save(cachePath, results); err != nil {
+ log.Println("failed to save results to cache: %w", err)
+ }
+
+ return results, nil
+}
+
+// GetResults fetches the build results from ResultDB.
+// GetResults does not trigger new builds.
+func GetResults(
+ ctx context.Context,
+ cfg Config,
+ rdb *resultsdb.ResultsDB,
+ builds BuildsByName) (result.List, error) {
+
+ fmt.Printf("fetching results from resultdb...")
+
+ lastPrintedDot := time.Now()
+
+ toStatus := func(s rdbpb.TestStatus) result.Status {
+ switch s {
+ default:
+ return result.Unknown
+ case rdbpb.TestStatus_PASS:
+ return result.Pass
+ case rdbpb.TestStatus_FAIL:
+ return result.Failure
+ case rdbpb.TestStatus_CRASH:
+ return result.Crash
+ case rdbpb.TestStatus_ABORT:
+ return result.Abort
+ case rdbpb.TestStatus_SKIP:
+ return result.Skip
+ }
+ }
+
+ results := result.List{}
+ err := rdb.QueryTestResults(ctx, builds.ids(), cfg.Test.Prefix+".*", func(rpb *rdbpb.TestResult) error {
+ if time.Since(lastPrintedDot) > 5*time.Second {
+ lastPrintedDot = time.Now()
+ fmt.Printf(".")
+ }
+
+ if !strings.HasPrefix(rpb.GetTestId(), cfg.Test.Prefix) {
+ return nil
+ }
+
+ testName := rpb.GetTestId()[len(cfg.Test.Prefix):]
+ status := toStatus(rpb.Status)
+ tags := result.NewTags()
+
+ for _, sp := range rpb.Tags {
+ if sp.Key == "typ_tag" {
+ tags.Add(sp.Value)
+ }
+ }
+
+ duration := rpb.GetDuration().AsDuration()
+ if status == result.Pass && duration > cfg.Test.SlowThreshold {
+ status = result.Slow
+ }
+
+ results = append(results, result.Result{
+ Query: query.Parse(testName),
+ Status: status,
+ Tags: tags,
+ Duration: duration,
+ })
+
+ return nil
+ })
+
+ fmt.Println(" done")
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Expand aliased tags, remove specific tags
+ CleanTags(cfg, &results)
+
+ results.Sort()
+ return results, err
+}
+
+// LatestCTSRoll returns for the latest merged CTS roll that landed in the past
+// month. If no roll can be found, then an error is returned.
+func LatestCTSRoll(g *gerrit.Gerrit) (gerrit.ChangeInfo, error) {
+ changes, _, err := g.QueryChanges(
+ `status:merged`,
+ `-age:1month`,
+ fmt.Sprintf(`message:"%v"`, RollSubjectPrefix))
+ if err != nil {
+ return gerrit.ChangeInfo{}, err
+ }
+ if len(changes) == 0 {
+ return gerrit.ChangeInfo{}, fmt.Errorf("no change found")
+ }
+ sort.Slice(changes, func(i, j int) bool {
+ return changes[i].Submitted.Time.After(changes[j].Submitted.Time)
+ })
+ return changes[0], nil
+}
+
+// LatestPatchset returns the most recent patchset for the given change.
+func LatestPatchset(g *gerrit.Gerrit, change int) (gerrit.Patchset, error) {
+ ps, err := g.LatestPatchest(strconv.Itoa(change))
+ if err != nil {
+ err := fmt.Errorf("failed to find latest patchset of change %v: %w",
+ ps.Change, err)
+ return gerrit.Patchset{}, err
+ }
+ return ps, nil
+}
+
+// MostRecentResultsForChange returns the results from the most recent patchset
+// that has build results. If no results can be found for the entire change,
+// then an error is returned.
+func MostRecentResultsForChange(
+ ctx context.Context,
+ cfg Config,
+ cacheDir string,
+ g *gerrit.Gerrit,
+ bb *buildbucket.Buildbucket,
+ rdb *resultsdb.ResultsDB,
+ change int) (result.List, gerrit.Patchset, error) {
+
+ ps, err := LatestPatchset(g, change)
+ if err != nil {
+ return nil, gerrit.Patchset{}, nil
+ }
+
+ for ps.Patchset > 0 {
+ builds, err := GetBuilds(ctx, cfg, ps, bb)
+ if err != nil {
+ return nil, gerrit.Patchset{}, err
+ }
+ if len(builds) > 0 {
+ if err := WaitForBuildsToComplete(ctx, cfg, ps, bb, builds); err != nil {
+ return nil, gerrit.Patchset{}, err
+ }
+
+ results, err := CacheResults(ctx, cfg, ps, cacheDir, rdb, builds)
+ if err != nil {
+ return nil, gerrit.Patchset{}, err
+ }
+
+ if len(results) > 0 {
+ return results, ps, nil
+ }
+ }
+ ps.Patchset--
+ }
+
+ return nil, gerrit.Patchset{}, fmt.Errorf("no builds found for change %v", change)
+}
+
+// CleanTags modifies each result so that tags in cfg.Tag.Remove are removed and
+// duplicate results are removed by erring towards Failure.
+// See: crbug.com/dawn/1387, crbug.com/dawn/1401
+func CleanTags(cfg Config, results *result.List) {
+ // Remove any tags found in cfg.Tag.Remove
+ remove := result.NewTags(cfg.Tag.Remove...)
+ for _, r := range *results {
+ r.Tags.RemoveAll(remove)
+ }
+ // Clean up duplicate results
+ *results = results.ReplaceDuplicates(func(s result.Statuses) result.Status {
+ // If all results have the same status, then use that.
+ if len(s) == 1 {
+ return s.One()
+ }
+ // Mixed statuses. Replace with something appropriate.
+ switch {
+ case s.Contains(result.Crash):
+ return result.Crash
+ case s.Contains(result.Abort):
+ return result.Abort
+ case s.Contains(result.Failure):
+ return result.Failure
+ case s.Contains(result.Slow):
+ return result.Slow
+ }
+ return result.Failure
+ })
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/config.json b/chromium/third_party/dawn/tools/src/cmd/cts/config.json
new file mode 100644
index 00000000000..c2b16fd4a8a
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/config.json
@@ -0,0 +1,56 @@
+{
+ "Test": {
+ "Prefix": "ninja://chrome/test:telemetry_gpu_integration_test/gpu_tests.webgpu_cts_integration_test.WebGpuCtsIntegrationTest.",
+ "SlowThreshold": 15000000000 // 15 seconds
+ },
+ "Gerrit": {
+ "Host": "https://dawn-review.googlesource.com",
+ "Project": "dawn"
+ },
+ "Git": {
+ "CTS": {
+ "Host": "chromium.googlesource.com",
+ "Project": "external/github.com/gpuweb/cts"
+ },
+ "Dawn": {
+ "Host": "dawn.googlesource.com",
+ "Project": "dawn"
+ }
+ },
+ "Builders": {
+ "Win": {
+ "Project": "chromium",
+ "Bucket": "try",
+ "Builder": "win-dawn-rel"
+ },
+ "Win10-x86": {
+ "Project": "chromium",
+ "Bucket": "try",
+ "Builder": "dawn-try-win10-x86-rel"
+ },
+ "Mac": {
+ "Project": "chromium",
+ "Bucket": "try",
+ "Builder": "mac-dawn-rel"
+ },
+ "Linux": {
+ "Project": "chromium",
+ "Bucket": "try",
+ "Builder": "linux-dawn-rel"
+ }
+ },
+ "Tag": {
+ "Remove": [
+ // crbug.com/dawn/1401
+ "release-x64",
+ // crbug.com/dawn/1387, crbug.com/dawn/1444
+ "intel-0x5912",
+ "intel-0x3e92"
+ ],
+ },
+ "Sheets": {
+ // Spreadsheet to export results data to
+ // https://docs.google.com/spreadsheets/d/1OFsh-r_njG5pKDwjL1HOvLJKDRC4FgO-LE9Kw7WPQcc
+ "ID": "1OFsh-r_njG5pKDwjL1HOvLJKDRC4FgO-LE9Kw7WPQcc"
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/export/export.go b/chromium/third_party/dawn/tools/src/cmd/cts/export/export.go
new file mode 100644
index 00000000000..70792dbf2f0
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/export/export.go
@@ -0,0 +1,317 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/git"
+ "dawn.googlesource.com/dawn/tools/src/gitiles"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "go.chromium.org/luci/auth/client/authcli"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/sheets/v4"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ auth authcli.Flags
+ results common.ResultSource
+ }
+}
+
+func (cmd) Name() string {
+ return "export"
+}
+
+func (cmd) Desc() string {
+ return "exports the latest CTS results to Google sheets"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ c.flags.auth.Register(flag.CommandLine, common.DefaultAuthOptions())
+ c.flags.results.RegisterFlags(cfg)
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Validate command line arguments
+ auth, err := c.flags.auth.Options()
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+
+ // Load the credentials used for accessing the sheets document
+ authdir := utils.ExpandHome(os.ExpandEnv(auth.SecretsDir))
+ credentialsPath := filepath.Join(authdir, "credentials.json")
+ b, err := ioutil.ReadFile(credentialsPath)
+ if err != nil {
+ return fmt.Errorf("unable to read credentials file '%v'\n"+
+ "Obtain this file from: https://console.developers.google.com/apis/credentials\n%w",
+ credentialsPath, err)
+ }
+ credentials, err := google.CredentialsFromJSON(ctx, b, "https://www.googleapis.com/auth/spreadsheets")
+ if err != nil {
+ return fmt.Errorf("unable to parse client secret file to config: %w", err)
+ }
+
+ // Create the sheets service client
+ s, err := sheets.New(oauth2.NewClient(ctx, credentials.TokenSource))
+ if err != nil {
+ return fmt.Errorf("unable to create sheets client: %w", err)
+ }
+
+ // Get the CTS statistics spreadsheet
+ spreadsheet, err := s.Spreadsheets.Get(cfg.Sheets.ID).Do()
+ if err != nil {
+ return fmt.Errorf("failed to get spreadsheet: %w", err)
+ }
+
+ // Scan the sheets of the spreadsheet (tabs at the bottom) for the 'data'
+ // sheet.
+ var dataSheet *sheets.Sheet
+ for _, sheet := range spreadsheet.Sheets {
+ if strings.ToLower(sheet.Properties.Title) == "data" {
+ dataSheet = sheet
+ break
+ }
+ }
+ if dataSheet == nil {
+ return fmt.Errorf("failed to find data sheet")
+ }
+
+ // Fetch the table column names
+ columns, err := fetchRow[string](s, spreadsheet, dataSheet, 0)
+
+ // Grab the results
+ results, err := c.flags.results.GetResults(ctx, cfg, auth)
+ if err != nil {
+ return err
+ }
+ if len(results) == 0 {
+ return fmt.Errorf("no results found")
+ }
+ ps := c.flags.results.Patchset
+
+ // Find the CTS revision
+ dawn, err := gitiles.New(ctx, cfg.Git.Dawn.Host, cfg.Git.Dawn.Project)
+ if err != nil {
+ return fmt.Errorf("failed to open dawn host: %w", err)
+ }
+ deps, err := dawn.DownloadFile(ctx, ps.RefsChanges(), "DEPS")
+ if err != nil {
+ return fmt.Errorf("failed to download DEPS from %v: %w", ps.RefsChanges(), err)
+ }
+ _, ctsHash, err := common.UpdateCTSHashInDeps(deps, "<unused>")
+ if err != nil {
+ return fmt.Errorf("failed to find CTS hash in deps: %w", err)
+ }
+
+ // Grab the CTS revision to count the number of unimplemented tests
+ numUnimplemented, err := countUnimplementedTests(cfg, ctsHash)
+ if err != nil {
+ return fmt.Errorf("failed to obtain number of unimplemented tests: %w", err)
+ }
+
+ // Generate a new set of counts of test by status
+ log.Printf("exporting results from cl %v ps %v...", ps.Change, ps.Patchset)
+ counts := map[result.Status]int{}
+ for _, r := range results {
+ counts[r.Status] = counts[r.Status] + 1
+ }
+
+ // Generate new cell data based on the table column names
+ data := []any{}
+ for _, column := range columns {
+ switch strings.ToLower(column) {
+ case "date":
+ data = append(data, time.Now().UTC().Format("2006-01-02"))
+ case "change":
+ data = append(data, ps.Change)
+ case "unimplemented":
+ data = append(data, numUnimplemented)
+ default:
+ count, ok := counts[result.Status(column)]
+ if !ok {
+ log.Println("no results with status", column)
+ }
+ data = append(data, count)
+ }
+ }
+
+ // Insert a blank row under the column header row
+ if err := insertBlankRows(s, spreadsheet, dataSheet, 1, 1); err != nil {
+ return err
+ }
+
+ // Add a new row to the spreadsheet
+ _, err = s.Spreadsheets.Values.BatchUpdate(spreadsheet.SpreadsheetId,
+ &sheets.BatchUpdateValuesRequest{
+ ValueInputOption: "RAW",
+ Data: []*sheets.ValueRange{{
+ Range: rowRange(1, dataSheet),
+ Values: [][]any{data},
+ }},
+ }).Do()
+ if err != nil {
+ return fmt.Errorf("failed to update spreadsheet: %v", err)
+ }
+
+ return nil
+}
+
+// rowRange returns a sheets range ("name!Ai:i") for the entire row with the
+// given index.
+func rowRange(index int, sheet *sheets.Sheet) string {
+ return fmt.Sprintf("%v!A%v:%v", sheet.Properties.Title, index+1, index+1)
+}
+
+// columnRange returns a sheets range ("name!i1:i") for the entire column with
+// the given index.
+func columnRange(index int, sheet *sheets.Sheet) string {
+ col := 'A' + index
+ if index > 25 {
+ panic("UNIMPLEMENTED")
+ }
+ return fmt.Sprintf("%v!%c1:%c", sheet.Properties.Title, col, col)
+}
+
+// fetchRow returns all the values in the given sheet's row.
+func fetchRow[T any](srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheet *sheets.Sheet, row int) ([]T, error) {
+ rng := rowRange(row, sheet)
+ data, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, rng).Do()
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't fetch %v: %w", rng, err)
+ }
+ out := make([]T, len(data.Values[0]))
+ for column, v := range data.Values[0] {
+ val, ok := v.(T)
+ if !ok {
+ return nil, fmt.Errorf("cell at %v:%v was type %T, but expected type %T", row, column, v, val)
+ }
+ out[column] = val
+ }
+ return out, nil
+}
+
+// insertBlankRows inserts blank rows into the given sheet.
+func insertBlankRows(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheet *sheets.Sheet, aboveRow, count int) error {
+ req := sheets.BatchUpdateSpreadsheetRequest{
+ Requests: []*sheets.Request{{
+ InsertRange: &sheets.InsertRangeRequest{
+ Range: &sheets.GridRange{
+ SheetId: sheet.Properties.SheetId,
+ StartRowIndex: int64(aboveRow),
+ EndRowIndex: int64(aboveRow + count),
+ },
+ ShiftDimension: "ROWS",
+ }},
+ },
+ }
+ if _, err := srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, &req).Do(); err != nil {
+ return fmt.Errorf("BatchUpdate failed: %v", err)
+ }
+ return nil
+}
+
+// countUnimplementedTests checks out the WebGPU CTS at ctsHash, builds the node
+// command line tool, and runs it with '--list-unimplemented webgpu:*' to count
+// the total number of unimplemented tests, which is returned.
+func countUnimplementedTests(cfg common.Config, ctsHash string) (int, error) {
+ tmpDir, err := os.MkdirTemp("", "dawn-cts-export")
+ if err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ dir := filepath.Join(tmpDir, "cts")
+
+ gitExe, err := exec.LookPath("git")
+ if err != nil {
+ return 0, fmt.Errorf("failed to find git on PATH: %w", err)
+ }
+
+ git, err := git.New(gitExe)
+ if err != nil {
+ return 0, err
+ }
+
+ log.Printf("cloning cts to '%v'...", dir)
+ repo, err := git.Clone(dir, cfg.Git.CTS.HttpsURL(), nil)
+ if err != nil {
+ return 0, fmt.Errorf("failed to clone cts: %v", err)
+ }
+
+ log.Printf("checking out cts @ '%v'...", ctsHash)
+ if _, err := repo.Fetch(ctsHash, nil); err != nil {
+ return 0, fmt.Errorf("failed to fetch cts: %v", err)
+ }
+ if err := repo.Checkout(ctsHash, nil); err != nil {
+ return 0, fmt.Errorf("failed to clone cts: %v", err)
+ }
+
+ {
+ npm, err := exec.LookPath("npm")
+ if err != nil {
+ return 0, fmt.Errorf("failed to find npm on PATH: %w", err)
+ }
+ cmd := exec.Command(npm, "ci")
+ cmd.Dir = dir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return 0, fmt.Errorf("failed to run npm ci: %w\n%v", err, string(out))
+ }
+ }
+ {
+ npx, err := exec.LookPath("npx")
+ if err != nil {
+ return 0, fmt.Errorf("failed to find npx on PATH: %w", err)
+ }
+ cmd := exec.Command(npx, "grunt", "run:build-out-node")
+ cmd.Dir = dir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return 0, fmt.Errorf("failed to build CTS typescript: %w\n%v", err, string(out))
+ }
+ }
+ {
+ sh, err := exec.LookPath("node")
+ if err != nil {
+ return 0, fmt.Errorf("failed to find sh on PATH: %w", err)
+ }
+ cmd := exec.Command(sh, "./tools/run_node", "--list-unimplemented", "webgpu:*")
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("failed to gather unimplemented tests: %w", err)
+ }
+ lines := strings.Split(string(out), "\n")
+ return len(lines), nil
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/format/format.go b/chromium/third_party/dawn/tools/src/cmd/cts/format/format.go
new file mode 100644
index 00000000000..823e1bea1c3
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/format/format.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package format
+
+import (
+ "context"
+ "flag"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/expectations"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ expectations string // expectations file path
+ }
+}
+
+func (cmd) Name() string {
+ return "format"
+}
+
+func (cmd) Desc() string {
+ return "formats a WebGPUExpectation file"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ defaultExpectations := common.DefaultExpectationsPath()
+ flag.StringVar(&c.flags.expectations, "expectations", defaultExpectations, "path to CTS expectations file to update")
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ ex, err := expectations.Load(c.flags.expectations)
+ if err != nil {
+ return err
+ }
+ return ex.Save(c.flags.expectations)
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/main.go b/chromium/third_party/dawn/tools/src/cmd/cts/main.go
new file mode 100644
index 00000000000..4f90c2ac6be
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/main.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// cts is a collection of sub-commands for operating on the WebGPU CTS.
+//
+// To view available commands run: '<dawn>/tools/run cts --help'
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/subcmd"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+
+ // Register sub-commands
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/export"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/format"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/merge"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/results"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/roll"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/time"
+ _ "dawn.googlesource.com/dawn/tools/src/cmd/cts/update"
+)
+
+func main() {
+ ctx := context.Background()
+
+ cfg, err := common.LoadConfig(filepath.Join(utils.ThisDir(), "config.json"))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if err := subcmd.Run(ctx, *cfg, common.Commands()...); err != nil {
+ if err != subcmd.ErrInvalidCLA {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ os.Exit(1)
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/merge/merge.go b/chromium/third_party/dawn/tools/src/cmd/cts/merge/merge.go
new file mode 100644
index 00000000000..b2b80e8cb0d
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/merge/merge.go
@@ -0,0 +1,76 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package merge
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ output string
+ }
+}
+
+func (cmd) Name() string { return "merge" }
+
+func (cmd) Desc() string { return "merges results files into one" }
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ flag.StringVar(&c.flags.output, "o", "results.txt", "output file. '-' writes to stdout")
+ return []string{"first-results.txt", "second-results.txt ..."}, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Load each of the results files and merge together
+ var results result.List
+ for _, path := range flag.Args() {
+ // Load results
+ r, err := result.Load(path)
+ if err != nil {
+ return fmt.Errorf("while reading '%v': %w", path, err)
+ }
+ // Combine and merge
+ if len(results) > 0 {
+ results = result.Merge(results, r)
+ } else {
+ results = r
+ }
+ }
+
+ // Open output file
+ output := os.Stdout
+ if c.flags.output != "-" {
+ var err error
+ output, err = os.Create(c.flags.output)
+ if err != nil {
+ return fmt.Errorf("failed to open output file '%v': %w", c.flags.output, err)
+ }
+ defer output.Close()
+ }
+
+ // Write out
+ return result.Write(output, results)
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/results/results.go b/chromium/third_party/dawn/tools/src/cmd/cts/results/results.go
new file mode 100644
index 00000000000..93eaf79973b
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/results/results.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package results
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "go.chromium.org/luci/auth/client/authcli"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ output string
+ source common.ResultSource
+ auth authcli.Flags
+ }
+}
+
+func (cmd) Name() string {
+ return "results"
+}
+
+func (cmd) Desc() string {
+ return "obtains the CTS results from a patchset"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ flag.StringVar(&c.flags.output, "o", "results.txt", "output file. '-' writes to stdout")
+ c.flags.source.RegisterFlags(cfg)
+ c.flags.auth.Register(flag.CommandLine, common.DefaultAuthOptions())
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Validate command line arguments
+ auth, err := c.flags.auth.Options()
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+
+ // Obtain the results
+ results, err := c.flags.source.GetResults(ctx, cfg, auth)
+ if err != nil {
+ return err
+ }
+
+ // Open output file
+ output := os.Stdout
+ if c.flags.output != "-" {
+ output, err = os.Create(c.flags.output)
+ if err != nil {
+ return fmt.Errorf("failed to open output file '%v': %w", c.flags.output, err)
+ }
+ defer output.Close()
+ }
+
+ return result.Write(output, results)
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll.go b/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll.go
new file mode 100644
index 00000000000..489ef1dd4e8
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll.go
@@ -0,0 +1,629 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package roll
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/buildbucket"
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/container"
+ "dawn.googlesource.com/dawn/tools/src/cts/expectations"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/gerrit"
+ "dawn.googlesource.com/dawn/tools/src/git"
+ "dawn.googlesource.com/dawn/tools/src/gitiles"
+ "dawn.googlesource.com/dawn/tools/src/resultsdb"
+ "go.chromium.org/luci/auth"
+ "go.chromium.org/luci/auth/client/authcli"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+const (
+ depsRelPath = "DEPS"
+ tsSourcesRelPath = "third_party/gn/webgpu-cts/ts_sources.txt"
+ resourceFilesRelPath = "third_party/gn/webgpu-cts/resource_files.txt"
+ refMain = "refs/heads/main"
+ noExpectations = `# Clear all expectations to obtain full list of results`
+)
+
+type rollerFlags struct {
+ gitPath string
+ tscPath string
+ auth authcli.Flags
+ cacheDir string
+ force bool // Create a new roll, even if CTS is up to date
+ rebuild bool // Rebuild the expectations file from scratch
+ preserve bool // If false, abandon past roll changes
+}
+
+type cmd struct {
+ flags rollerFlags
+}
+
+func (cmd) Name() string {
+ return "roll"
+}
+
+func (cmd) Desc() string {
+ return "roll CTS and re-generate expectations"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ gitPath, _ := exec.LookPath("git")
+ tscPath, _ := exec.LookPath("tsc")
+ c.flags.auth.Register(flag.CommandLine, common.DefaultAuthOptions())
+ flag.StringVar(&c.flags.gitPath, "git", gitPath, "path to git")
+ flag.StringVar(&c.flags.tscPath, "tsc", tscPath, "path to tsc")
+ flag.StringVar(&c.flags.cacheDir, "cache", common.DefaultCacheDir, "path to the results cache")
+ flag.BoolVar(&c.flags.force, "force", false, "create a new roll, even if CTS is up to date")
+ flag.BoolVar(&c.flags.rebuild, "rebuild", false, "rebuild the expectation file from scratch")
+ flag.BoolVar(&c.flags.preserve, "preserve", false, "do not abandon existing rolls")
+
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Validate command line arguments
+ auth, err := c.flags.auth.Options()
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+
+ // Check tools can be found
+ for _, tool := range []struct {
+ name, path, hint string
+ }{
+ {name: "git", path: c.flags.gitPath},
+ {name: "tsc", path: c.flags.tscPath, hint: "Try using '-tsc third_party/webgpu-cts/node_modules/.bin/tsc' after an 'npm ci'."},
+ } {
+ if _, err := os.Stat(tool.path); err != nil {
+ return fmt.Errorf("failed to find path to %v: %v. %v", tool.name, err, tool.hint)
+ }
+ }
+
+ // Create a temporary directory for local checkouts
+ tmpDir, err := os.MkdirTemp("", "dawn-cts-roll")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+ ctsDir := filepath.Join(tmpDir, "cts")
+
+ // Create the various service clients
+ git, err := git.New(c.flags.gitPath)
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+ gerrit, err := gerrit.New(cfg.Gerrit.Host, gerrit.Credentials{})
+ if err != nil {
+ return err
+ }
+ chromium, err := gitiles.New(ctx, cfg.Git.CTS.Host, cfg.Git.CTS.Project)
+ if err != nil {
+ return err
+ }
+ dawn, err := gitiles.New(ctx, cfg.Git.Dawn.Host, cfg.Git.Dawn.Project)
+ if err != nil {
+ return err
+ }
+ bb, err := buildbucket.New(ctx, auth)
+ if err != nil {
+ return err
+ }
+ rdb, err := resultsdb.New(ctx, auth)
+ if err != nil {
+ return err
+ }
+
+ // Construct the roller, and roll
+ r := roller{
+ cfg: cfg,
+ flags: c.flags,
+ auth: auth,
+ bb: bb,
+ rdb: rdb,
+ git: git,
+ gerrit: gerrit,
+ chromium: chromium,
+ dawn: dawn,
+ ctsDir: ctsDir,
+ }
+ return r.roll(ctx)
+}
+
+type roller struct {
+ cfg common.Config
+ flags rollerFlags
+ auth auth.Options
+ bb *buildbucket.Buildbucket
+ rdb *resultsdb.ResultsDB
+ git *git.Git
+ gerrit *gerrit.Gerrit
+ chromium *gitiles.Gitiles
+ dawn *gitiles.Gitiles
+ ctsDir string
+}
+
+func (r *roller) roll(ctx context.Context) error {
+ // Fetch the latest Dawn main revision
+ dawnHash, err := r.dawn.Hash(ctx, refMain)
+ if err != nil {
+ return err
+ }
+
+ // Update the DEPS file
+ updatedDEPS, newCTSHash, oldCTSHash, err := r.updateDEPS(ctx, dawnHash)
+ if err != nil {
+ return err
+ }
+ if newCTSHash == oldCTSHash && !r.flags.force {
+ // Already up to date
+ fmt.Println("CTS is already up to date")
+ return nil
+ }
+
+ log.Printf("starting CTS roll from %v to %v...", oldCTSHash[:8], newCTSHash[:8])
+
+ // Checkout the CTS at the latest revision
+ ctsRepo, err := r.checkout("cts", r.ctsDir, r.cfg.Git.CTS.HttpsURL(), newCTSHash)
+ if err != nil {
+ return err
+ }
+
+ // Fetch the log of changes between last roll and now
+ ctsLog, err := ctsRepo.Log(&git.LogOptions{From: oldCTSHash, To: newCTSHash})
+ if err != nil {
+ return err
+ }
+ ctsLog = ctsLog[:len(ctsLog)-1] // Don't include the oldest change in the log
+
+ // Download and parse the expectations file
+ expectationsFile, err := r.dawn.DownloadFile(ctx, refMain, common.RelativeExpectationsPath)
+ if err != nil {
+ return err
+ }
+ ex, err := expectations.Parse(expectationsFile)
+ if err != nil {
+ return fmt.Errorf("failed to load expectations: %v", err)
+ }
+
+ // If the user requested a full rebuild of the expecations, strip out
+ // everything but comment chunks.
+ if r.flags.rebuild {
+ rebuilt := ex.Clone()
+ rebuilt.Chunks = rebuilt.Chunks[:0]
+ for _, c := range ex.Chunks {
+ switch {
+ case c.IsBlankLine():
+ rebuilt.MaybeAddBlankLine()
+ case c.IsCommentOnly():
+ rebuilt.Chunks = append(rebuilt.Chunks, c)
+ }
+ }
+ ex = rebuilt
+ }
+
+ // Regenerate the typescript dependency list
+ tsSources, err := r.genTSDepList(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to generate ts_sources.txt: %v", err)
+ }
+
+ // Regenerate the resource files list
+ resources, err := r.genResourceFilesList(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to generate resource_files.txt: %v", err)
+ }
+
+ // Look for an existing gerrit change to update
+ existingRolls, err := r.findExistingRolls()
+ if err != nil {
+ return err
+ }
+
+ // Abandon existing rolls, if -preserve is false
+ if !r.flags.preserve && len(existingRolls) > 0 {
+ log.Printf("abandoning %v existing roll...", len(existingRolls))
+ for _, change := range existingRolls {
+ if err := r.gerrit.Abandon(change.ChangeID); err != nil {
+ return err
+ }
+ }
+ existingRolls = nil
+ }
+
+ // Create a new gerrit change, if needed
+ changeID := ""
+ if len(existingRolls) == 0 {
+ msg := r.rollCommitMessage(oldCTSHash, newCTSHash, ctsLog, "")
+ change, err := r.gerrit.CreateChange(r.cfg.Gerrit.Project, "main", msg, true)
+ if err != nil {
+ return err
+ }
+ changeID = change.ID
+ log.Printf("created gerrit change %v...", change.Number)
+ } else {
+ changeID = existingRolls[0].ID
+ log.Printf("reusing existing gerrit change %v...", existingRolls[0].Number)
+ }
+
+ // Update the DEPS, and ts-sources file.
+ // Update the expectations with the re-formatted content, and updated
+ //timestamp.
+ updateExpectationUpdateTimestamp(&ex)
+ msg := r.rollCommitMessage(oldCTSHash, newCTSHash, ctsLog, changeID)
+ ps, err := r.gerrit.EditFiles(changeID, msg, map[string]string{
+ depsRelPath: updatedDEPS,
+ common.RelativeExpectationsPath: ex.String(),
+ tsSourcesRelPath: tsSources,
+ resourceFilesRelPath: resources,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to update change '%v': %v", changeID, err)
+ }
+
+ // Begin main roll loop
+ const maxAttempts = 3
+ results := result.List{}
+ for attempt := 0; ; attempt++ {
+ // Kick builds
+ log.Printf("building (attempt %v)...\n", attempt)
+ builds, err := common.GetOrStartBuildsAndWait(ctx, r.cfg, ps, r.bb, false)
+ if err != nil {
+ return err
+ }
+
+ // Look to see if any of the builds failed
+ failingBuilds := []string{}
+ for id, build := range builds {
+ if build.Status != buildbucket.StatusSuccess {
+ failingBuilds = append(failingBuilds, id)
+ }
+ }
+ if len(failingBuilds) > 0 {
+ sort.Strings(failingBuilds)
+ log.Println("builds failed: ", failingBuilds)
+ }
+
+ // Gather the build results
+ log.Println("gathering results...")
+ psResults, err := common.CacheResults(ctx, r.cfg, ps, r.flags.cacheDir, r.rdb, builds)
+ if err != nil {
+ return err
+ }
+
+ // Merge the new results into the accumulated results
+ log.Println("merging results...")
+ results = result.Merge(results, psResults)
+
+ // Rebuild the expectations with the accumulated results
+ log.Println("building new expectations...")
+ // Note: The new expectations are not used if the last attempt didn't
+ // fail, but we always want to post the diagnostics
+ newExpectations := ex.Clone()
+ diags, err := newExpectations.Update(results)
+ if err != nil {
+ return err
+ }
+
+ // Post statistics and expectation diagnostics
+ log.Println("posting stats & diagnostics...")
+ if err := r.postComments(ps, diags, results); err != nil {
+ return err
+ }
+
+ // If all the builds attempted, then we're done!
+ if len(failingBuilds) == 0 {
+ break
+ }
+
+ // Otherwise, push the updated expectations, and try again
+ log.Println("updating expectations...")
+ updateExpectationUpdateTimestamp(&newExpectations)
+ ps, err = r.gerrit.EditFiles(changeID, msg, map[string]string{
+ common.RelativeExpectationsPath: newExpectations.String(),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to update change '%v': %v", changeID, err)
+ }
+
+ if attempt >= maxAttempts {
+ err := fmt.Errorf("CTS failed after %v attempts.\nGiving up", attempt)
+ r.gerrit.Comment(ps, err.Error(), nil)
+ return err
+ }
+ }
+
+ if err := r.gerrit.SetReadyForReview(changeID, "CTS roll succeeded"); err != nil {
+ return fmt.Errorf("failed to mark change as ready for review: %v", err)
+ }
+
+ return nil
+}
+
+// Updates the '# Last rolled:' string in the expectations file.
+func updateExpectationUpdateTimestamp(content *expectations.Content) {
+ prefix := "# Last rolled: "
+ comment := prefix + time.Now().UTC().Format("2006-01-02 03:04:05PM")
+ for _, chunk := range content.Chunks {
+ for l, line := range chunk.Comments {
+ if strings.HasPrefix(line, prefix) {
+ chunk.Comments[l] = comment
+ return
+ }
+ }
+ }
+ newChunks := []expectations.Chunk{}
+ if len(content.Chunks) > 0 {
+ newChunks = append(newChunks,
+ content.Chunks[0],
+ expectations.Chunk{},
+ )
+ }
+ newChunks = append(newChunks,
+ expectations.Chunk{Comments: []string{comment}},
+ )
+ if len(content.Chunks) > 0 {
+ newChunks = append(newChunks, content.Chunks[1:]...)
+ }
+
+ content.Chunks = newChunks
+}
+
+// rollCommitMessage returns the commit message for the roll
+func (r *roller) rollCommitMessage(
+ oldCTSHash, newCTSHash string,
+ ctsLog []git.CommitInfo,
+ changeID string) string {
+
+ msg := &strings.Builder{}
+ msg.WriteString(common.RollSubjectPrefix)
+ msg.WriteString(oldCTSHash[:9])
+ msg.WriteString("..")
+ msg.WriteString(newCTSHash[:9])
+ msg.WriteString(" (")
+ msg.WriteString(strconv.Itoa(len(ctsLog)))
+ if len(ctsLog) == 1 {
+ msg.WriteString(" commit)")
+ } else {
+ msg.WriteString(" commits)")
+ }
+ msg.WriteString("\n\n")
+ msg.WriteString("Update expectations and ts_sources")
+ msg.WriteString("\n\n")
+ msg.WriteString("https://chromium.googlesource.com/external/github.com/gpuweb/cts/+log/")
+ msg.WriteString(oldCTSHash[:12])
+ msg.WriteString("..")
+ msg.WriteString(newCTSHash[:12])
+ msg.WriteString("\n")
+ for _, change := range ctsLog {
+ msg.WriteString(" - ")
+ msg.WriteString(change.Hash.String()[:6])
+ msg.WriteString(" ")
+ msg.WriteString(change.Subject)
+ msg.WriteString("\n")
+ }
+ msg.WriteString("\n")
+ msg.WriteString("Created with './tools/run cts roll'")
+ msg.WriteString("\n")
+ msg.WriteString("\n")
+ if len(r.cfg.Builders) > 0 {
+ msg.WriteString("Cq-Include-Trybots: ")
+ buildersByBucket := container.NewMap[string, []string]()
+ for _, build := range r.cfg.Builders {
+ key := fmt.Sprintf("luci.%v.%v", build.Project, build.Bucket)
+ buildersByBucket[key] = append(buildersByBucket[key], build.Builder)
+ }
+ first := true
+ for _, bucket := range buildersByBucket.Keys() {
+ // Cq-Include-Trybots: luci.chromium.try:win-dawn-rel;luci.dawn.try:mac-dbg,mac-rel
+ if !first {
+ msg.WriteString(";")
+ }
+ first = false
+ msg.WriteString(bucket)
+ msg.WriteString(":")
+ builders := buildersByBucket[bucket]
+ sort.Strings(builders)
+ msg.WriteString(strings.Join(builders, ","))
+ }
+ msg.WriteString("\n")
+ }
+ if changeID != "" {
+ msg.WriteString("Change-Id: ")
+ msg.WriteString(changeID)
+ msg.WriteString("\n")
+ }
+ return msg.String()
+}
+
+func (r *roller) postComments(ps gerrit.Patchset, diags []expectations.Diagnostic, results result.List) error {
+ fc := make([]gerrit.FileComment, len(diags))
+ for i, d := range diags {
+ var prefix string
+ switch d.Severity {
+ case expectations.Error:
+ prefix = "🟥"
+ case expectations.Warning:
+ prefix = "🟨"
+ case expectations.Note:
+ prefix = "🟦"
+ }
+ fc[i] = gerrit.FileComment{
+ Path: common.RelativeExpectationsPath,
+ Side: gerrit.Left,
+ Line: d.Line,
+ Message: fmt.Sprintf("%v %v: %v", prefix, d.Severity, d.Message),
+ }
+ }
+
+ sb := &strings.Builder{}
+
+ {
+ sb.WriteString("Tests by status:\n")
+ counts := map[result.Status]int{}
+ for _, r := range results {
+ counts[r.Status] = counts[r.Status] + 1
+ }
+ type StatusCount struct {
+ status result.Status
+ count int
+ }
+ statusCounts := []StatusCount{}
+ for s, n := range counts {
+ if n > 0 {
+ statusCounts = append(statusCounts, StatusCount{s, n})
+ }
+ }
+ sort.Slice(statusCounts, func(i, j int) bool { return statusCounts[i].status < statusCounts[j].status })
+ sb.WriteString("```\n")
+ tw := tabwriter.NewWriter(sb, 0, 1, 0, ' ', 0)
+ for _, sc := range statusCounts {
+ fmt.Fprintf(tw, "%v:\t %v\n", sc.status, sc.count)
+ }
+ tw.Flush()
+ sb.WriteString("```\n")
+ }
+ {
+ sb.WriteString("Top 25 slowest tests:\n")
+ sort.Slice(results, func(i, j int) bool {
+ return results[i].Duration > results[j].Duration
+ })
+ const N = 25
+ topN := results
+ if len(topN) > N {
+ topN = topN[:N]
+ }
+ sb.WriteString("```\n")
+ for i, r := range topN {
+ fmt.Fprintf(sb, "%3.1d: %v\n", i, r)
+ }
+ sb.WriteString("```\n")
+ }
+
+ if err := r.gerrit.Comment(ps, sb.String(), fc); err != nil {
+ return fmt.Errorf("failed to post stats on change: %v", err)
+ }
+ return nil
+}
+
+// findExistingRolls looks for all existing open CTS rolls by this user
+func (r *roller) findExistingRolls() ([]gerrit.ChangeInfo, error) {
+ // Look for an existing gerrit change to update
+ changes, _, err := r.gerrit.QueryChanges("owner:me",
+ "is:open",
+ fmt.Sprintf(`repo:"%v"`, r.cfg.Git.Dawn.Project),
+ fmt.Sprintf(`message:"%v"`, common.RollSubjectPrefix))
+ if err != nil {
+ return nil, fmt.Errorf("failed to find existing roll gerrit changes: %v", err)
+ }
+ return changes, nil
+}
+
+// checkout performs a git checkout of the repo at host to dir at the given hash
+func (r *roller) checkout(project, dir, host, hash string) (*git.Repository, error) {
+ log.Printf("cloning %v to '%v'...", project, dir)
+ repo, err := r.git.Clone(dir, host, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to clone %v: %v", project, err)
+ }
+ log.Printf("checking out %v @ '%v'...", project, hash)
+ if _, err := repo.Fetch(hash, nil); err != nil {
+ return nil, fmt.Errorf("failed to fetch project %v @ %v: %v",
+ project, hash, err)
+ }
+ if err := repo.Checkout(hash, nil); err != nil {
+ return nil, fmt.Errorf("failed to checkout project %v @ %v: %v",
+ project, hash, err)
+ }
+ return repo, nil
+}
+
+// updateDEPS fetches and updates the Dawn DEPS file at 'dawnRef' so that all
+// CTS hashes are changed to the latest CTS hash.
+func (r *roller) updateDEPS(ctx context.Context, dawnRef string) (newDEPS, newCTSHash, oldCTSHash string, err error) {
+ newCTSHash, err = r.chromium.Hash(ctx, refMain)
+ if err != nil {
+ return "", "", "", err
+ }
+ deps, err := r.dawn.DownloadFile(ctx, dawnRef, depsRelPath)
+ if err != nil {
+ return "", "", "", err
+ }
+ newDEPS, oldCTSHash, err = common.UpdateCTSHashInDeps(deps, newCTSHash)
+ if err != nil {
+ return "", "", "", err
+ }
+
+ return newDEPS, newCTSHash, oldCTSHash, nil
+}
+
+// genTSDepList returns a list of source files, for the CTS checkout at r.ctsDir
+// This list can be used to populate the ts_sources.txt file.
+func (r *roller) genTSDepList(ctx context.Context) (string, error) {
+ cmd := exec.CommandContext(ctx, r.flags.tscPath, "--project",
+ filepath.Join(r.ctsDir, "tsconfig.json"),
+ "--listFiles",
+ "--declaration", "false",
+ "--sourceMap", "false")
+ out, _ := cmd.Output()
+
+ prefix := filepath.ToSlash(r.ctsDir) + "/"
+
+ deps := []string{}
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.HasPrefix(line, prefix) {
+ line = line[len(prefix):]
+ if strings.HasPrefix(line, "src/") {
+ deps = append(deps, line)
+ }
+ }
+ }
+
+ return strings.Join(deps, "\n") + "\n", nil
+}
+
+// genResourceFilesList returns a list of resource files, for the CTS checkout at r.ctsDir
+// This list can be used to populate the resource_files.txt file.
+func (r *roller) genResourceFilesList(ctx context.Context) (string, error) {
+ dir := filepath.Join(r.ctsDir, "src", "resources")
+ files, err := filepath.Glob(filepath.Join(dir, "*"))
+ if err != nil {
+ return "", err
+ }
+ for i, file := range files {
+ file, err := filepath.Rel(dir, file)
+ if err != nil {
+ return "", err
+ }
+ files[i] = file
+ }
+ return strings.Join(files, "\n") + "\n", nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll_test.go b/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll_test.go
new file mode 100644
index 00000000000..b7ba0540ca7
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/roll/roll_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package roll
+
+import (
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/buildbucket"
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/git"
+ "github.com/google/go-cmp/cmp"
+)
+
+func MustParseHash(s string) git.Hash {
+ hash, err := git.ParseHash("d5e605a556408eaeeda64fb9d33c3f596fd90b70")
+ if err != nil {
+ panic(err)
+ }
+ return hash
+}
+
+func TestRollCommitMessage(t *testing.T) {
+ r := roller{
+ cfg: common.Config{
+ Builders: map[string]buildbucket.Builder{
+ "Win": {Project: "chromium", Bucket: "try", Builder: "win-dawn-rel"},
+ "Mac": {Project: "dawn", Bucket: "try", Builder: "mac-dbg"},
+ "Linux": {Project: "chromium", Bucket: "try", Builder: "linux-dawn-rel"},
+ },
+ },
+ }
+ msg := r.rollCommitMessage(
+ "d5e605a556408eaeeda64fb9d33c3f596fd90b70",
+ "29275672eefe76986bd4baa7c29ed17b66616b1b",
+ []git.CommitInfo{
+ {
+ Hash: MustParseHash("d5e605a556408eaeeda64fb9d33c3f596fd90b70"),
+ Subject: "Added thing A",
+ },
+ {
+ Hash: MustParseHash("29275672eefe76986bd4baa7c29ed17b66616b1b"),
+ Subject: "Tweaked thing B",
+ },
+ },
+ "I4aa059c6c183e622975b74dbdfdfe0b12341ae15",
+ )
+ expect := `Roll third_party/webgpu-cts/ d5e605a55..29275672e (2 commits)
+
+Update expectations and ts_sources
+
+https://chromium.googlesource.com/external/github.com/gpuweb/cts/+log/d5e605a55640..29275672eefe
+ - d5e605 Added thing A
+ - d5e605 Tweaked thing B
+
+Created with './tools/run cts roll'
+
+Cq-Include-Trybots: luci.chromium.try:linux-dawn-rel,win-dawn-rel;luci.dawn.try:mac-dbg
+Change-Id: I4aa059c6c183e622975b74dbdfdfe0b12341ae15
+`
+ if diff := cmp.Diff(msg, expect); diff != "" {
+ t.Errorf("rollCommitMessage: %v", diff)
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/time/time.go b/chromium/third_party/dawn/tools/src/cmd/cts/time/time.go
new file mode 100644
index 00000000000..7bcb682a065
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/time/time.go
@@ -0,0 +1,148 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package time
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "math"
+ "sort"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/subcmd"
+ "go.chromium.org/luci/auth/client/authcli"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ source common.ResultSource
+ auth authcli.Flags
+ tags string
+ topN int
+ histogram bool
+ }
+}
+
+func (cmd) Name() string {
+ return "time"
+}
+
+func (cmd) Desc() string {
+ return "displays timing information for tests"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ c.flags.source.RegisterFlags(cfg)
+ c.flags.auth.Register(flag.CommandLine, common.DefaultAuthOptions())
+ flag.IntVar(&c.flags.topN, "top", 0, "print the top N slowest tests")
+ flag.BoolVar(&c.flags.histogram, "histogram", false, "print a histogram of test timings")
+ flag.StringVar(&c.flags.tags, "tags", "", "comma-separated list of tags to filter results")
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Validate command line arguments
+ auth, err := c.flags.auth.Options()
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+
+ // Obtain the results
+ results, err := c.flags.source.GetResults(ctx, cfg, auth)
+ if err != nil {
+ return err
+ }
+
+ if len(results) == 0 {
+ return fmt.Errorf("no results found")
+ }
+
+ // If tags were provided, filter the results to those that contain these tags
+ if c.flags.tags != "" {
+ results = results.FilterByTags(result.StringToTags(c.flags.tags))
+ if len(results) == 0 {
+ return fmt.Errorf("no results after filtering by tags")
+ }
+ }
+
+ // Sort the results with longest duration first
+ sort.Slice(results, func(i, j int) bool {
+ return results[i].Duration > results[j].Duration
+ })
+
+ didSomething := false
+
+ // Did the user request --top N ?
+ if c.flags.topN > 0 {
+ didSomething = true
+ topN := results
+ if c.flags.topN < len(results) {
+ topN = topN[:c.flags.topN]
+ }
+ for i, r := range topN {
+ fmt.Printf("%3.1d: %v\n", i, r)
+ }
+ }
+
+ // Did the user request --histogram ?
+ if c.flags.histogram {
+ maxTime := results[0].Duration
+
+ const (
+ numBins = 25
+ pow = 2.0
+ )
+
+ binToDuration := func(i int) time.Duration {
+ frac := math.Pow(float64(i)/float64(numBins), pow)
+ return time.Duration(float64(maxTime) * frac)
+ }
+ durationToBin := func(d time.Duration) int {
+ frac := math.Pow(float64(d)/float64(maxTime), 1.0/pow)
+ idx := int(frac * numBins)
+ if idx >= numBins-1 {
+ return numBins - 1
+ }
+ return idx
+ }
+
+ didSomething = true
+ bins := make([]int, numBins)
+ for _, r := range results {
+ idx := durationToBin(r.Duration)
+ bins[idx] = bins[idx] + 1
+ }
+ for i, bin := range bins {
+ fmt.Printf("[%.8v, %.8v]: %v\n", binToDuration(i), binToDuration(i+1), bin)
+ }
+ }
+
+ // If the user didn't request anything, show a helpful message
+ if !didSomething {
+ fmt.Fprintln(flag.CommandLine.Output(), "no action flags specified for", c.Name())
+ fmt.Fprintln(flag.CommandLine.Output())
+ flag.Usage()
+ return subcmd.ErrInvalidCLA
+ }
+
+ return nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/cts/update/update.go b/chromium/third_party/dawn/tools/src/cmd/cts/update/update.go
new file mode 100644
index 00000000000..43013937c2e
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/cts/update/update.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package update
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+ "dawn.googlesource.com/dawn/tools/src/cts/expectations"
+ "go.chromium.org/luci/auth/client/authcli"
+)
+
+func init() {
+ common.Register(&cmd{})
+}
+
+type cmd struct {
+ flags struct {
+ results common.ResultSource
+ expectations string
+ auth authcli.Flags
+ }
+}
+
+func (cmd) Name() string {
+ return "update"
+}
+
+func (cmd) Desc() string {
+ return "updates a CTS expectations file"
+}
+
+func (c *cmd) RegisterFlags(ctx context.Context, cfg common.Config) ([]string, error) {
+ defaultExpectations := common.DefaultExpectationsPath()
+ c.flags.results.RegisterFlags(cfg)
+ c.flags.auth.Register(flag.CommandLine, common.DefaultAuthOptions())
+ flag.StringVar(&c.flags.expectations, "expectations", defaultExpectations, "path to CTS expectations file to update")
+ return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+ // Validate command line arguments
+ auth, err := c.flags.auth.Options()
+ if err != nil {
+ return fmt.Errorf("failed to obtain authentication options: %w", err)
+ }
+
+ // Fetch the results
+ results, err := c.flags.results.GetResults(ctx, cfg, auth)
+ if err != nil {
+ return err
+ }
+
+ // Load the expectations file
+ ex, err := expectations.Load(c.flags.expectations)
+ if err != nil {
+ return err
+ }
+
+ // Update the expectations file with the results
+ msgs, err := ex.Update(results)
+ if err != nil {
+ return err
+ }
+
+ // Print any diagnostics
+ for _, msg := range msgs {
+ fmt.Printf("%v:%v %v\n", c.flags.expectations, msg.Line, msg.Message)
+ }
+
+ // Save the updated expectations file
+ return ex.Save(c.flags.expectations)
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/ast/ast.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/ast/ast.go
index 228c9f6bc9f..f127dc8bd3e 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/ast/ast.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/ast/ast.go
@@ -20,15 +20,18 @@ import (
"fmt"
"strings"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/tok"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/tok"
)
// AST is the parsed syntax tree of the intrinsic definition file
type AST struct {
- Enums []EnumDecl
- Types []TypeDecl
- Matchers []MatcherDecl
- Functions []FunctionDecl
+ Enums []EnumDecl
+ Types []TypeDecl
+ Matchers []MatcherDecl
+ Builtins []IntrinsicDecl
+ Constructors []IntrinsicDecl
+ Converters []IntrinsicDecl
+ Operators []IntrinsicDecl
}
func (a AST) String() string {
@@ -45,8 +48,20 @@ func (a AST) String() string {
fmt.Fprintf(&sb, "%v", m)
fmt.Fprintln(&sb)
}
- for _, f := range a.Functions {
- fmt.Fprintf(&sb, "%v", f)
+ for _, b := range a.Builtins {
+ fmt.Fprintf(&sb, "%v", b)
+ fmt.Fprintln(&sb)
+ }
+ for _, o := range a.Constructors {
+ fmt.Fprintf(&sb, "%v", o)
+ fmt.Fprintln(&sb)
+ }
+ for _, o := range a.Converters {
+ fmt.Fprintf(&sb, "%v", o)
+ fmt.Fprintln(&sb)
+ }
+ for _, o := range a.Operators {
+ fmt.Fprintf(&sb, "%v", o)
fmt.Fprintln(&sb)
}
return sb.String()
@@ -70,15 +85,15 @@ func (e EnumDecl) Format(w fmt.State, verb rune) {
// EnumEntry describes an entry in a enumerator
type EnumEntry struct {
- Source tok.Source
- Name string
- Decorations Decorations
+ Source tok.Source
+ Name string
+ Attributes Attributes
}
// Format implements the fmt.Formatter interface
func (e EnumEntry) Format(w fmt.State, verb rune) {
- if len(e.Decorations) > 0 {
- fmt.Fprintf(w, "%v %v", e.Decorations, e.Name)
+ if len(e.Attributes) > 0 {
+ fmt.Fprintf(w, "%v %v", e.Attributes, e.Name)
} else {
fmt.Fprint(w, e.Name)
}
@@ -98,24 +113,53 @@ func (m MatcherDecl) Format(w fmt.State, verb rune) {
m.Options.Format(w, verb)
}
-// FunctionDecl describes a function declaration
-type FunctionDecl struct {
+// IntrinsicKind is either a Builtin, Operator, Constructor or Converter
+type IntrinsicKind string
+
+const (
+ // Builtin is a builtin function (max, fract, etc).
+ // Declared with 'fn'.
+ Builtin IntrinsicKind = "builtin"
+ // Operator is a unary or binary operator.
+ // Declared with 'op'.
+ Operator IntrinsicKind = "operator"
+ // Constructor is a type constructor function.
+ // Declared with 'ctor'.
+ Constructor IntrinsicKind = "constructor"
+ // Converter is a type conversion function.
+ // Declared with 'conv'.
+ Converter IntrinsicKind = "converter"
+)
+
+// IntrinsicDecl describes a builtin or operator declaration
+type IntrinsicDecl struct {
Source tok.Source
+ Kind IntrinsicKind
Name string
- Decorations Decorations
+ Attributes Attributes
TemplateParams TemplateParams
Parameters Parameters
ReturnType *TemplatedName
}
// Format implements the fmt.Formatter interface
-func (f FunctionDecl) Format(w fmt.State, verb rune) {
- fmt.Fprintf(w, "fn %v", f.Name)
- f.TemplateParams.Format(w, verb)
- f.Parameters.Format(w, verb)
- if f.ReturnType != nil {
+func (i IntrinsicDecl) Format(w fmt.State, verb rune) {
+ switch i.Kind {
+ case Builtin:
+ fmt.Fprintf(w, "fn ")
+ case Operator:
+ fmt.Fprintf(w, "op ")
+ case Constructor:
+ fmt.Fprintf(w, "ctor ")
+ case Converter:
+ fmt.Fprintf(w, "conv ")
+ }
+ fmt.Fprintf(w, "%v", i.Name)
+ i.TemplateParams.Format(w, verb)
+ i.Parameters.Format(w, verb)
+ if i.ReturnType != nil {
fmt.Fprintf(w, " -> ")
- f.ReturnType.Format(w, verb)
+ i.ReturnType.Format(w, verb)
}
}
@@ -129,6 +173,7 @@ func (l Parameters) Format(w fmt.State, verb rune) {
if i > 0 {
fmt.Fprintf(w, ", ")
}
+ p.Attributes.Format(w, verb)
p.Format(w, verb)
}
fmt.Fprintf(w, ")")
@@ -136,9 +181,10 @@ func (l Parameters) Format(w fmt.State, verb rune) {
// Parameter describes a single parameter of a function
type Parameter struct {
- Source tok.Source
- Name string // Optional
- Type TemplatedName
+ Source tok.Source
+ Attributes Attributes
+ Name string // Optional
+ Type TemplatedName
}
// Format implements the fmt.Formatter interface
@@ -199,15 +245,15 @@ func (t TemplatedName) Format(w fmt.State, verb rune) {
// TypeDecl describes a type declaration
type TypeDecl struct {
Source tok.Source
- Decorations Decorations
+ Attributes Attributes
Name string
TemplateParams TemplateParams
}
// Format implements the fmt.Formatter interface
func (p TypeDecl) Format(w fmt.State, verb rune) {
- if len(p.Decorations) > 0 {
- p.Decorations.Format(w, verb)
+ if len(p.Attributes) > 0 {
+ p.Attributes.Format(w, verb)
fmt.Fprintf(w, " type %v", p.Name)
}
fmt.Fprintf(w, "type %v", p.Name)
@@ -252,47 +298,44 @@ func (t TemplateParam) Format(w fmt.State, verb rune) {
}
}
-// Decorations is a list of Decoration
+// Attributes is a list of Attribute
// Example:
// [[a(x), b(y)]]
-type Decorations []Decoration
+type Attributes []Attribute
// Format implements the fmt.Formatter interface
-func (l Decorations) Format(w fmt.State, verb rune) {
- fmt.Fprint(w, "[[")
- for i, d := range l {
- if i > 0 {
- fmt.Fprintf(w, ", ")
- }
+func (l Attributes) Format(w fmt.State, verb rune) {
+ for _, d := range l {
+ fmt.Fprint(w, "@")
d.Format(w, verb)
+ fmt.Fprint(w, " ")
}
- fmt.Fprint(w, "]]")
}
-// Take looks up the decoration with the given name. If the decoration is found
-// it is removed from the Decorations list and returned, otherwise nil is
-// returned and the Decorations are not altered.
-func (l *Decorations) Take(name string) *Decoration {
- for i, d := range *l {
- if d.Name == name {
+// Take looks up the attribute with the given name. If the attribute is found
+// it is removed from the Attributes list and returned, otherwise nil is
+// returned and the Attributes are not altered.
+func (l *Attributes) Take(name string) *Attribute {
+ for i, a := range *l {
+ if a.Name == name {
*l = append((*l)[:i], (*l)[i+1:]...)
- return &d
+ return &a
}
}
return nil
}
-// Decoration describes a single decoration
+// Attribute describes a single attribute
// Example:
-// a(x)
-type Decoration struct {
+// @a(x)
+type Attribute struct {
Source tok.Source
Name string
Values []string
}
// Format implements the fmt.Formatter interface
-func (d Decoration) Format(w fmt.State, verb rune) {
+func (d Attribute) Format(w fmt.State, verb rune) {
fmt.Fprintf(w, "%v", d.Name)
if len(d.Values) > 0 {
fmt.Fprintf(w, "(")
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/builtin_table.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/builtin_table.go
index 10cdcb0da9c..7d9ef7ad4b4 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/builtin_table.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/builtin_table.go
@@ -17,46 +17,49 @@ package gen
import (
"fmt"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/sem"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/sem"
"dawn.googlesource.com/dawn/tools/src/list"
"dawn.googlesource.com/dawn/tools/src/lut"
)
-// BuiltinTable holds data specific to the intrinsic_table.inl.tmpl template
-type BuiltinTable struct {
+// IntrinsicTable holds data specific to the intrinsic_table.inl.tmpl template
+type IntrinsicTable struct {
// The semantic info
Sem *sem.Sem
- // TMatchers are all the sem.OpenType, sem.Type and sem.TypeMatchers.
+ // TMatchers are all the sem.TemplateType, sem.Type and sem.TypeMatchers.
// These are all implemented by classes deriving from tint::TypeMatcher
TMatchers []sem.Named
TMatcherIndex map[sem.Named]int // [object -> index] in TMatcher
- // NMatchers are all the sem.OpenNumber and sem.EnumMatchers.
+ // NMatchers are all the sem.TemplateNumber and sem.EnumMatchers.
// These are all implemented by classes deriving from tint::NumberMatcher
NMatchers []sem.Named
NMatcherIndex map[sem.Named]int // [object -> index] in NMatchers
- MatcherIndices []int // kMatcherIndices table content
- OpenTypes []OpenType // kOpenTypes table content
- OpenNumbers []OpenNumber // kOpenNumbers table content
- Parameters []Parameter // kParameters table content
- Overloads []Overload // kOverloads table content
- Functions []Function // kBuiltins table content
+ MatcherIndices []int // kMatcherIndices table content
+ TemplateTypes []TemplateType // kTemplateTypes table content
+ TemplateNumbers []TemplateNumber // kTemplateNumbers table content
+ Parameters []Parameter // kParameters table content
+ Overloads []Overload // kOverloads table content
+ Builtins []Intrinsic // kBuiltins table content
+ UnaryOperators []Intrinsic // kUnaryOperators table content
+ BinaryOperators []Intrinsic // kBinaryOperators table content
+ ConstructorsAndConverters []Intrinsic // kConstructorsAndConverters table content
}
-// OpenType is used to create the C++ OpenTypeInfo structure
-type OpenType struct {
- // Name of the open type (e.g. 'T')
+// TemplateType is used to create the C++ TemplateTypeInfo structure
+type TemplateType struct {
+ // Name of the template type (e.g. 'T')
Name string
// Optional type matcher constraint.
// Either an index in Matchers::type, or -1
MatcherIndex int
}
-// OpenNumber is used to create the C++ OpenNumberInfo structure
-type OpenNumber struct {
- // Name of the open number (e.g. 'N')
+// TemplateNumber is used to create the C++ TemplateNumberInfo structure
+type TemplateNumber struct {
+ // Name of the template number (e.g. 'N')
Name string
// Optional type matcher constraint.
// Either an index in Matchers::type, or -1
@@ -68,9 +71,9 @@ type Parameter struct {
// The parameter usage (parameter name)
Usage string
- // Index into BuiltinTable.MatcherIndices, beginning the list of matchers
+ // Index into IntrinsicTable.MatcherIndices, beginning the list of matchers
// required to match the parameter type. The matcher indices index
- // into BuiltinTable::TMatchers and / or BuiltinTable::NMatchers.
+ // into IntrinsicTable::TMatchers and / or IntrinsicTable::NMatchers.
// These indices are consumed by the matchers themselves.
// The first index is always a TypeMatcher.
MatcherIndicesOffset *int
@@ -80,19 +83,19 @@ type Parameter struct {
type Overload struct {
// Total number of parameters for the overload
NumParameters int
- // Total number of open types for the overload
- NumOpenTypes int
- // Total number of open numbers for the overload
- NumOpenNumbers int
- // Index to the first open type in BuiltinTable.OpenTypes
- OpenTypesOffset *int
- // Index to the first open number in BuiltinTable.OpenNumbers
- OpenNumbersOffset *int
- // Index to the first parameter in BuiltinTable.Parameters
+ // Total number of template types for the overload
+ NumTemplateTypes int
+ // Total number of template numbers for the overload
+ NumTemplateNumbers int
+ // Index to the first template type in IntrinsicTable.TemplateTypes
+ TemplateTypesOffset *int
+ // Index to the first template number in IntrinsicTable.TemplateNumbers
+ TemplateNumbersOffset *int
+ // Index to the first parameter in IntrinsicTable.Parameters
ParametersOffset *int
- // Index into BuiltinTable.MatcherIndices, beginning the list of matchers
+ // Index into IntrinsicTable.MatcherIndices, beginning the list of matchers
// required to match the return type. The matcher indices index
- // into BuiltinTable::TMatchers and / or BuiltinTable::NMatchers.
+ // into IntrinsicTable::TMatchers and / or IntrinsicTable::NMatchers.
// These indices are consumed by the matchers themselves.
// The first index is always a TypeMatcher.
ReturnMatcherIndicesOffset *int
@@ -100,47 +103,52 @@ type Overload struct {
CanBeUsedInStage sem.StageUses
// True if the overload is marked as deprecated
IsDeprecated bool
+ // The kind of overload
+ Kind string
+ // The function name used to evaluate the overload at shader-creation time
+ ConstEvalFunction string
}
-// Function is used to create the C++ IntrinsicInfo structure
-type Function struct {
+// Intrinsic is used to create the C++ IntrinsicInfo structure
+type Intrinsic struct {
+ Name string
OverloadDescriptions []string
NumOverloads int
OverloadsOffset *int
}
-// Helper for building the BuiltinTable
-type BuiltinTableBuilder struct {
+// Helper for building the IntrinsicTable
+type IntrinsicTableBuilder struct {
// The output of the builder
- BuiltinTable
+ IntrinsicTable
// Lookup tables.
// These are packed (compressed) once all the entries have been added.
lut struct {
- matcherIndices lut.LUT
- openTypes lut.LUT
- openNumbers lut.LUT
- parameters lut.LUT
- overloads lut.LUT
+ matcherIndices lut.LUT
+ templateTypes lut.LUT
+ templateNumbers lut.LUT
+ parameters lut.LUT
+ overloads lut.LUT
}
}
// Helper for building a single overload
type overloadBuilder struct {
- *BuiltinTableBuilder
- // Maps TemplateParam to index in openTypes
- openTypeIndex map[sem.TemplateParam]int
- // Maps TemplateParam to index in openNumbers
- openNumberIndex map[sem.TemplateParam]int
- // Open types used by the overload
- openTypes []OpenType
- // Open numbers used by the overload
- openNumbers []OpenNumber
+ *IntrinsicTableBuilder
+ // Maps TemplateParam to index in templateTypes
+ templateTypeIndex map[sem.TemplateParam]int
+ // Maps TemplateParam to index in templateNumbers
+ templateNumberIndex map[sem.TemplateParam]int
+ // Template types used by the overload
+ templateTypes []TemplateType
+ // Template numbers used by the overload
+ templateNumbers []TemplateNumber
// All parameters declared by the overload
parameters []Parameter
- // Index into BuiltinTable.MatcherIndices, beginning the list of matchers
+ // Index into IntrinsicTable.MatcherIndices, beginning the list of matchers
// required to match the return type. The matcher indices index
- // into BuiltinTable::TMatchers and / or BuiltinTable::NMatchers.
+ // into IntrinsicTable::TMatchers and / or IntrinsicTable::NMatchers.
// These indices are consumed by the matchers themselves.
// The first index is always a TypeMatcher.
returnTypeMatcherIndicesOffset *int
@@ -148,9 +156,9 @@ type overloadBuilder struct {
// layoutMatchers assigns each of the TMatchers and NMatchers a unique index
// in the C++ Matchers::type and Matchers::number arrays, respectively.
-func (b *BuiltinTableBuilder) layoutMatchers(s *sem.Sem) {
- // First MaxOpenTypes of TMatchers are open types
- b.TMatchers = make([]sem.Named, s.MaxOpenTypes)
+func (b *IntrinsicTableBuilder) layoutMatchers(s *sem.Sem) {
+ // First MaxTemplateTypes of TMatchers are template types
+ b.TMatchers = make([]sem.Named, s.MaxTemplateTypes)
for _, m := range s.Types {
b.TMatcherIndex[m] = len(b.TMatchers)
b.TMatchers = append(b.TMatchers, m)
@@ -160,8 +168,8 @@ func (b *BuiltinTableBuilder) layoutMatchers(s *sem.Sem) {
b.TMatchers = append(b.TMatchers, m)
}
- // First MaxOpenNumbers of NMatchers are open numbers
- b.NMatchers = make([]sem.Named, s.MaxOpenNumbers)
+ // First MaxTemplateNumbers of NMatchers are template numbers
+ b.NMatchers = make([]sem.Named, s.MaxTemplateNumbers)
for _, m := range s.EnumMatchers {
b.NMatcherIndex[m] = len(b.NMatchers)
b.NMatchers = append(b.NMatchers, m)
@@ -169,17 +177,17 @@ func (b *BuiltinTableBuilder) layoutMatchers(s *sem.Sem) {
}
// buildOverload constructs an Overload for a sem.Overload
-func (b *BuiltinTableBuilder) buildOverload(o *sem.Overload) (Overload, error) {
+func (b *IntrinsicTableBuilder) buildOverload(o *sem.Overload) (Overload, error) {
ob := overloadBuilder{
- BuiltinTableBuilder: b,
- openTypeIndex: map[sem.TemplateParam]int{},
- openNumberIndex: map[sem.TemplateParam]int{},
+ IntrinsicTableBuilder: b,
+ templateTypeIndex: map[sem.TemplateParam]int{},
+ templateNumberIndex: map[sem.TemplateParam]int{},
}
- if err := ob.buildOpenTypes(o); err != nil {
+ if err := ob.buildTemplateTypes(o); err != nil {
return Overload{}, err
}
- if err := ob.buildOpenNumbers(o); err != nil {
+ if err := ob.buildTemplateNumbers(o); err != nil {
return Overload{}, err
}
if err := ob.buildParameters(o); err != nil {
@@ -191,23 +199,25 @@ func (b *BuiltinTableBuilder) buildOverload(o *sem.Overload) (Overload, error) {
return Overload{
NumParameters: len(ob.parameters),
- NumOpenTypes: len(ob.openTypes),
- NumOpenNumbers: len(ob.openNumbers),
- OpenTypesOffset: b.lut.openTypes.Add(ob.openTypes),
- OpenNumbersOffset: b.lut.openNumbers.Add(ob.openNumbers),
+ NumTemplateTypes: len(ob.templateTypes),
+ NumTemplateNumbers: len(ob.templateNumbers),
+ TemplateTypesOffset: b.lut.templateTypes.Add(ob.templateTypes),
+ TemplateNumbersOffset: b.lut.templateNumbers.Add(ob.templateNumbers),
ParametersOffset: b.lut.parameters.Add(ob.parameters),
ReturnMatcherIndicesOffset: ob.returnTypeMatcherIndicesOffset,
CanBeUsedInStage: o.CanBeUsedInStage,
IsDeprecated: o.IsDeprecated,
+ Kind: string(o.Decl.Kind),
+ ConstEvalFunction: o.ConstEvalFunction,
}, nil
}
-// buildOpenTypes constructs the OpenTypes used by the overload, populating
-// b.openTypes
-func (b *overloadBuilder) buildOpenTypes(o *sem.Overload) error {
- b.openTypes = make([]OpenType, len(o.OpenTypes))
- for i, t := range o.OpenTypes {
- b.openTypeIndex[t] = i
+// buildTemplateTypes constructs the TemplateTypes used by the overload, populating
+// b.templateTypes
+func (b *overloadBuilder) buildTemplateTypes(o *sem.Overload) error {
+ b.templateTypes = make([]TemplateType, len(o.TemplateTypes))
+ for i, t := range o.TemplateTypes {
+ b.templateTypeIndex[t] = i
matcherIndex := -1
if t.Type != nil {
var err error
@@ -216,7 +226,7 @@ func (b *overloadBuilder) buildOpenTypes(o *sem.Overload) error {
return err
}
}
- b.openTypes[i] = OpenType{
+ b.templateTypes[i] = TemplateType{
Name: t.Name,
MatcherIndex: matcherIndex,
}
@@ -224,12 +234,12 @@ func (b *overloadBuilder) buildOpenTypes(o *sem.Overload) error {
return nil
}
-// buildOpenNumbers constructs the OpenNumbers used by the overload, populating
-// b.openNumbers
-func (b *overloadBuilder) buildOpenNumbers(o *sem.Overload) error {
- b.openNumbers = make([]OpenNumber, len(o.OpenNumbers))
- for i, t := range o.OpenNumbers {
- b.openNumberIndex[t] = i
+// buildTemplateNumbers constructs the TemplateNumbers used by the overload, populating
+// b.templateNumbers
+func (b *overloadBuilder) buildTemplateNumbers(o *sem.Overload) error {
+ b.templateNumbers = make([]TemplateNumber, len(o.TemplateNumbers))
+ for i, t := range o.TemplateNumbers {
+ b.templateNumberIndex[t] = i
matcherIndex := -1
if e, ok := t.(*sem.TemplateEnumParam); ok && e.Matcher != nil {
var err error
@@ -238,7 +248,7 @@ func (b *overloadBuilder) buildOpenNumbers(o *sem.Overload) error {
return err
}
}
- b.openNumbers[i] = OpenNumber{
+ b.templateNumbers[i] = TemplateNumber{
Name: t.GetName(),
MatcherIndex: matcherIndex,
}
@@ -279,7 +289,7 @@ func (b *overloadBuilder) buildReturnType(o *sem.Overload) error {
}
// matcherIndex returns the index of TMatcher or NMatcher in
-// BuiltinTable.TMatcher or BuiltinTable.NMatcher, respectively.
+// IntrinsicTable.TMatcher or IntrinsicTable.NMatcher, respectively.
func (b *overloadBuilder) matcherIndex(n sem.Named) (int, error) {
switch n := n.(type) {
case *sem.Type, *sem.TypeMatcher:
@@ -288,25 +298,25 @@ func (b *overloadBuilder) matcherIndex(n sem.Named) (int, error) {
}
return 0, fmt.Errorf("matcherIndex missing entry for %v %T", n.GetName(), n)
case *sem.TemplateTypeParam:
- if i, ok := b.openTypeIndex[n]; ok {
+ if i, ok := b.templateTypeIndex[n]; ok {
return i, nil
}
- return 0, fmt.Errorf("openTypeIndex missing entry for %v %T", n.Name, n)
+ return 0, fmt.Errorf("templateTypeIndex missing entry for %v %T", n.Name, n)
case *sem.EnumMatcher:
if i, ok := b.NMatcherIndex[n]; ok {
return i, nil
}
return 0, fmt.Errorf("matcherIndex missing entry for %v %T", n.GetName(), n)
case *sem.TemplateEnumParam:
- if i, ok := b.openNumberIndex[n]; ok {
+ if i, ok := b.templateNumberIndex[n]; ok {
return i, nil
}
- return 0, fmt.Errorf("openNumberIndex missing entry for %v %T", n, n)
+ return 0, fmt.Errorf("templateNumberIndex missing entry for %v %T", n, n)
case *sem.TemplateNumberParam:
- if i, ok := b.openNumberIndex[n]; ok {
+ if i, ok := b.templateNumberIndex[n]; ok {
return i, nil
}
- return 0, fmt.Errorf("openNumberIndex missing entry for %v %T", n, n)
+ return 0, fmt.Errorf("templateNumberIndex missing entry for %v %T", n, n)
default:
return 0, fmt.Errorf("overload.matcherIndex() does not handle %v %T", n, n)
}
@@ -342,46 +352,58 @@ func (b *overloadBuilder) collectMatcherIndices(fqn sem.FullyQualifiedName) ([]i
return out, nil
}
-// buildBuiltinTable builds the BuiltinTable from the semantic info
-func buildBuiltinTable(s *sem.Sem) (*BuiltinTable, error) {
- b := BuiltinTableBuilder{
- BuiltinTable: BuiltinTable{
+// buildIntrinsicTable builds the IntrinsicTable from the semantic info
+func buildIntrinsicTable(s *sem.Sem) (*IntrinsicTable, error) {
+ b := IntrinsicTableBuilder{
+ IntrinsicTable: IntrinsicTable{
Sem: s,
TMatcherIndex: map[sem.Named]int{},
NMatcherIndex: map[sem.Named]int{},
},
}
b.lut.matcherIndices = lut.New(list.Wrap(&b.MatcherIndices))
- b.lut.openTypes = lut.New(list.Wrap(&b.OpenTypes))
- b.lut.openNumbers = lut.New(list.Wrap(&b.OpenNumbers))
+ b.lut.templateTypes = lut.New(list.Wrap(&b.TemplateTypes))
+ b.lut.templateNumbers = lut.New(list.Wrap(&b.TemplateNumbers))
b.lut.parameters = lut.New(list.Wrap(&b.Parameters))
b.lut.overloads = lut.New(list.Wrap(&b.Overloads))
b.layoutMatchers(s)
- for _, f := range s.Functions {
- overloads := make([]Overload, len(f.Overloads))
- overloadDescriptions := make([]string, len(f.Overloads))
- for i, o := range f.Overloads {
- overloadDescriptions[i] = fmt.Sprint(o.Decl)
- var err error
- if overloads[i], err = b.buildOverload(o); err != nil {
- return nil, err
+ for _, intrinsics := range []struct {
+ in []*sem.Intrinsic
+ out *[]Intrinsic
+ }{
+ {s.Builtins, &b.Builtins},
+ {s.UnaryOperators, &b.UnaryOperators},
+ {s.BinaryOperators, &b.BinaryOperators},
+ {s.ConstructorsAndConverters, &b.ConstructorsAndConverters},
+ } {
+ out := make([]Intrinsic, len(intrinsics.in))
+ for i, f := range intrinsics.in {
+ overloads := make([]Overload, len(f.Overloads))
+ overloadDescriptions := make([]string, len(f.Overloads))
+ for i, o := range f.Overloads {
+ overloadDescriptions[i] = fmt.Sprint(o.Decl)
+ var err error
+ if overloads[i], err = b.buildOverload(o); err != nil {
+ return nil, err
+ }
+ }
+ out[i] = Intrinsic{
+ Name: f.Name,
+ OverloadDescriptions: overloadDescriptions,
+ NumOverloads: len(overloads),
+ OverloadsOffset: b.lut.overloads.Add(overloads),
}
}
-
- b.Functions = append(b.Functions, Function{
- OverloadDescriptions: overloadDescriptions,
- NumOverloads: len(overloads),
- OverloadsOffset: b.lut.overloads.Add(overloads),
- })
+ *intrinsics.out = out
}
b.lut.matcherIndices.Compact()
- b.lut.openTypes.Compact()
- b.lut.openNumbers.Compact()
+ b.lut.templateTypes.Compact()
+ b.lut.templateNumbers.Compact()
b.lut.parameters.Compact()
b.lut.overloads.Compact()
- return &b.BuiltinTable, nil
+ return &b.IntrinsicTable, nil
}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/generate.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/generate.go
index 71456b13415..5d83b192354 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/generate.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/generate.go
@@ -22,15 +22,15 @@ import (
"text/template"
"unicode"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/sem"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/sem"
)
type generator struct {
s *sem.Sem
t *template.Template
cached struct {
- builtinTable *BuiltinTable // lazily built by builtinTable()
- permuter *Permuter // lazily built by permute()
+ intrinsicTable *IntrinsicTable // lazily built by intrinsicTable()
+ permuter *Permuter // lazily built by permute()
}
}
@@ -73,7 +73,7 @@ func (g *generator) generate(tmpl string, w io.Writer, writeFile WriteFile) erro
"IsDeclarable": isDeclarable,
"IsFirstIn": isFirstIn,
"IsLastIn": isLastIn,
- "BuiltinTable": g.builtinTable,
+ "IntrinsicTable": g.intrinsicTable,
"Permute": g.permute,
"Eval": g.eval,
"WriteFile": func(relpath, content string) (string, error) { return "", writeFile(relpath, content) },
@@ -121,17 +121,17 @@ func (g *generator) eval(template string, args ...interface{}) (string, error) {
return sb.String(), nil
}
-// builtinTable lazily calls and returns the result of buildBuiltinTable(),
+// intrinsicTable lazily calls and returns the result of buildIntrinsicTable(),
// caching the result for repeated calls.
-func (g *generator) builtinTable() (*BuiltinTable, error) {
- if g.cached.builtinTable == nil {
+func (g *generator) intrinsicTable() (*IntrinsicTable, error) {
+ if g.cached.intrinsicTable == nil {
var err error
- g.cached.builtinTable, err = buildBuiltinTable(g.s)
+ g.cached.intrinsicTable, err = buildIntrinsicTable(g.s)
if err != nil {
return nil, err
}
}
- return g.cached.builtinTable, nil
+ return g.cached.intrinsicTable, nil
}
// permute lazily calls buildPermuter(), caching the result for repeated
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/permutate.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/permutate.go
index e9a5644f26c..fbc390b3951 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/gen/permutate.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/gen/permutate.go
@@ -20,7 +20,7 @@ import (
"fmt"
"strings"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/sem"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/sem"
"dawn.googlesource.com/dawn/tools/src/fileutils"
)
@@ -32,7 +32,7 @@ type Permuter struct {
// buildPermuter returns a new initialized Permuter
func buildPermuter(s *sem.Sem) (*Permuter, error) {
- // allTypes are the list of FQNs that are used for open, unconstrained types
+ // allTypes are the list of FQNs that are used for unconstrained types
allTypes := []sem.FullyQualifiedName{}
for _, ty := range s.Types {
if len(ty.TemplateParams) > 0 {
@@ -58,10 +58,10 @@ type Permutation struct {
// Permute generates a set of permutations for the given intrinsic overload
func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
state := permutationState{
- Permuter: p,
- closedTypes: map[sem.TemplateParam]sem.FullyQualifiedName{},
- closedNumbers: map[sem.TemplateParam]interface{}{},
- parameters: map[int]sem.FullyQualifiedName{},
+ Permuter: p,
+ templateTypes: map[sem.TemplateParam]sem.FullyQualifiedName{},
+ templateNumbers: map[sem.TemplateParam]interface{}{},
+ parameters: map[int]sem.FullyQualifiedName{},
}
out := []Permutation{}
@@ -74,7 +74,7 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
permutate := func() error {
o := sem.Overload{
Decl: overload.Decl,
- Function: overload.Function,
+ Intrinsic: overload.Intrinsic,
CanBeUsedInStage: overload.CanBeUsedInStage,
}
for i, p := range overload.Parameters {
@@ -83,8 +83,9 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
return nil
}
o.Parameters = append(o.Parameters, sem.Parameter{
- Name: p.Name,
- Type: ty,
+ Name: p.Name,
+ Type: ty,
+ IsConst: p.IsConst,
})
}
if overload.ReturnType != nil {
@@ -144,15 +145,15 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
var err error
types, err = state.permutateFQN(sem.FullyQualifiedName{Target: t.Type})
if err != nil {
- return nil, fmt.Errorf("while permutating open types: %w", err)
+ return nil, fmt.Errorf("while permutating template types: %w", err)
}
}
if len(types) == 0 {
- return nil, fmt.Errorf("open type %v has no permutations", t.Name)
+ return nil, fmt.Errorf("template type %v has no permutations", t.Name)
}
permutate = func() error {
for _, ty := range types {
- state.closedTypes[t] = ty
+ state.templateTypes[t] = ty
if err := next(); err != nil {
return err
}
@@ -168,14 +169,14 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
permutations, err = state.permutateFQN(sem.FullyQualifiedName{Target: t.Enum})
}
if err != nil {
- return nil, fmt.Errorf("while permutating open numbers: %w", err)
+ return nil, fmt.Errorf("while permutating template numbers: %w", err)
}
if len(permutations) == 0 {
- return nil, fmt.Errorf("open type %v has no permutations", t.Name)
+ return nil, fmt.Errorf("template type %v has no permutations", t.Name)
}
permutate = func() error {
for _, n := range permutations {
- state.closedNumbers[t] = n
+ state.templateNumbers[t] = n
if err := next(); err != nil {
return err
}
@@ -187,7 +188,7 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
permutations := []int{2, 3, 4}
permutate = func() error {
for _, n := range permutations {
- state.closedNumbers[t] = n
+ state.templateNumbers[t] = n
if err := next(); err != nil {
return err
}
@@ -206,19 +207,19 @@ func (p *Permuter) Permute(overload *sem.Overload) ([]Permutation, error) {
type permutationState struct {
*Permuter
- closedTypes map[sem.TemplateParam]sem.FullyQualifiedName
- closedNumbers map[sem.TemplateParam]interface{}
- parameters map[int]sem.FullyQualifiedName
+ templateTypes map[sem.TemplateParam]sem.FullyQualifiedName
+ templateNumbers map[sem.TemplateParam]interface{}
+ parameters map[int]sem.FullyQualifiedName
}
func (s permutationState) String() string {
sb := &strings.Builder{}
- sb.WriteString("Closed types:\n")
- for ct, ty := range s.closedTypes {
+ sb.WriteString("Template types:\n")
+ for ct, ty := range s.templateTypes {
fmt.Fprintf(sb, " %v: %v\n", ct.GetName(), ty)
}
- sb.WriteString("Closed numbers:\n")
- for cn, v := range s.closedNumbers {
+ sb.WriteString("Template numbers:\n")
+ for cn, v := range s.templateNumbers {
fmt.Fprintf(sb, " %v: %v\n", cn.GetName(), v)
}
return sb.String()
@@ -240,13 +241,13 @@ func (s *permutationState) permutateFQN(in sem.FullyQualifiedName) ([]sem.FullyQ
return nil
}
case sem.TemplateParam:
- if ty, ok := s.closedTypes[target]; ok {
+ if ty, ok := s.templateTypes[target]; ok {
permutate = func() error {
out = append(out, ty)
return nil
}
} else {
- return nil, fmt.Errorf("'%v' was not found in closedTypes", target.GetName())
+ return nil, fmt.Errorf("'%v' was not found in templateTypes", target.GetName())
}
case *sem.TypeMatcher:
permutate = func() error {
@@ -284,12 +285,12 @@ func (s *permutationState) permutateFQN(in sem.FullyQualifiedName) ([]sem.FullyQ
case sem.FullyQualifiedName:
switch target := arg.Target.(type) {
case sem.TemplateParam:
- if ty, ok := s.closedTypes[target]; ok {
+ if ty, ok := s.templateTypes[target]; ok {
args[i] = ty
- } else if num, ok := s.closedNumbers[target]; ok {
+ } else if num, ok := s.templateNumbers[target]; ok {
args[i] = num
} else {
- return nil, fmt.Errorf("'%v' was not found in closedTypes or closedNumbers", target.GetName())
+ return nil, fmt.Errorf("'%v' was not found in templateTypes or templateNumbers", target.GetName())
}
default:
perms, err := s.permutateFQN(arg)
@@ -331,6 +332,8 @@ func validate(fqn sem.FullyQualifiedName, uses *sem.StageUses) bool {
strings.Contains(elTyName, "sampler"),
strings.Contains(elTyName, "texture"):
return false // Not storable
+ case elTyName == "af" || elTyName == "ai":
+ return false // Abstract types are not typeable nor supported by arrays
}
case "ptr":
// https://gpuweb.github.io/gpuweb/wgsl/#storage-class
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer.go
index db8d92e88bb..d8a486a41f2 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer.go
@@ -20,7 +20,7 @@ import (
"fmt"
"unicode"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/tok"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/tok"
)
// Lex produces a list of tokens for the given source code
@@ -52,10 +52,8 @@ func (l *lexer) lex() error {
l.next()
case '\n':
l.next()
- case '<':
- l.tok(1, tok.Lt)
- case '>':
- l.tok(1, tok.Gt)
+ case '@':
+ l.tok(1, tok.Attr)
case '(':
l.tok(1, tok.Lparen)
case ')':
@@ -68,8 +66,16 @@ func (l *lexer) lex() error {
l.tok(1, tok.Colon)
case ',':
l.tok(1, tok.Comma)
- case '|':
- l.tok(1, tok.Or)
+ case '*':
+ l.tok(1, tok.Star)
+ case '+':
+ l.tok(1, tok.Plus)
+ case '%':
+ l.tok(1, tok.Modulo)
+ case '^':
+ l.tok(1, tok.Xor)
+ case '~':
+ l.tok(1, tok.Complement)
case '"':
start := l.loc
l.next() // Skip opening quote
@@ -81,20 +87,37 @@ func (l *lexer) lex() error {
l.next() // Skip closing quote
default:
switch {
- case l.peek(1) == '/':
+ case l.peek(0) == '/' && l.peek(1) == '/':
l.skip(l.count(toFirst('\n')))
l.next() // Consume newline
- case l.match("[[", tok.Ldeco):
- case l.match("]]", tok.Rdeco):
+ case l.match("/", tok.Divide):
case l.match("->", tok.Arrow):
+ case l.match("-", tok.Minus):
case l.match("fn", tok.Function):
+ case l.match("op", tok.Operator):
case l.match("enum", tok.Enum):
case l.match("type", tok.Type):
+ case l.match("ctor", tok.Constructor):
+ case l.match("conv", tok.Converter):
case l.match("match", tok.Match):
case unicode.IsLetter(l.peek(0)) || l.peek(0) == '_':
l.tok(l.count(alphaNumericOrUnderscore), tok.Identifier)
case unicode.IsNumber(l.peek(0)):
l.tok(l.count(unicode.IsNumber), tok.Integer)
+ case l.match("&&", tok.AndAnd):
+ case l.match("&", tok.And):
+ case l.match("||", tok.OrOr):
+ case l.match("|", tok.Or):
+ case l.match("!=", tok.NotEqual):
+ case l.match("!", tok.Not):
+ case l.match("==", tok.Equal):
+ case l.match("=", tok.Assign):
+ case l.match("<<", tok.Shl):
+ case l.match("<=", tok.Le):
+ case l.match("<", tok.Lt):
+ case l.match(">=", tok.Ge):
+ case l.match(">>", tok.Shr):
+ case l.match(">", tok.Gt):
default:
return fmt.Errorf("%v: unexpected '%v'", l.loc, string(l.runes[0]))
}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer_test.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer_test.go
index bfd512859dd..21d5787fe7f 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/lexer/lexer_test.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/lexer/lexer_test.go
@@ -18,8 +18,8 @@ import (
"fmt"
"testing"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/lexer"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/tok"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/lexer"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/tok"
)
func TestLexTokens(t *testing.T) {
@@ -52,9 +52,18 @@ func TestLexTokens(t *testing.T) {
{"fn", tok.Token{Kind: tok.Function, Runes: []rune("fn"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 3, 2),
}}},
+ {"op", tok.Token{Kind: tok.Operator, Runes: []rune("op"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
{"type", tok.Token{Kind: tok.Type, Runes: []rune("type"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 5, 4),
}}},
+ {"ctor", tok.Token{Kind: tok.Constructor, Runes: []rune("ctor"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 5, 4),
+ }}},
+ {"conv", tok.Token{Kind: tok.Converter, Runes: []rune("conv"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 5, 4),
+ }}},
{"enum", tok.Token{Kind: tok.Enum, Runes: []rune("enum"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 5, 4),
}}},
@@ -76,12 +85,51 @@ func TestLexTokens(t *testing.T) {
{"}", tok.Token{Kind: tok.Rbrace, Runes: []rune("}"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}},
- {"[[", tok.Token{Kind: tok.Ldeco, Runes: []rune("[["), Source: tok.Source{
+ {"&&", tok.Token{Kind: tok.AndAnd, Runes: []rune("&&"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {"&", tok.Token{Kind: tok.And, Runes: []rune("&"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {"||", tok.Token{Kind: tok.OrOr, Runes: []rune("||"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {"|", tok.Token{Kind: tok.Or, Runes: []rune("|"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {"!", tok.Token{Kind: tok.Not, Runes: []rune("!"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {"!=", tok.Token{Kind: tok.NotEqual, Runes: []rune("!="), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {"==", tok.Token{Kind: tok.Equal, Runes: []rune("=="), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {"=", tok.Token{Kind: tok.Assign, Runes: []rune("="), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {"<<", tok.Token{Kind: tok.Shl, Runes: []rune("<<"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {"<=", tok.Token{Kind: tok.Le, Runes: []rune("<="), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 3, 2),
}}},
- {"]]", tok.Token{Kind: tok.Rdeco, Runes: []rune("]]"), Source: tok.Source{
+ {"<", tok.Token{Kind: tok.Lt, Runes: []rune("<"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {">=", tok.Token{Kind: tok.Ge, Runes: []rune(">="), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 3, 2),
}}},
+ {">>", tok.Token{Kind: tok.Shr, Runes: []rune(">>"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 3, 2),
+ }}},
+ {">", tok.Token{Kind: tok.Gt, Runes: []rune(">"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
+ {"@", tok.Token{Kind: tok.Attr, Runes: []rune("@"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
{"(", tok.Token{Kind: tok.Lparen, Runes: []rune("("), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}},
@@ -91,6 +139,9 @@ func TestLexTokens(t *testing.T) {
{"|", tok.Token{Kind: tok.Or, Runes: []rune("|"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}},
+ {"*", tok.Token{Kind: tok.Star, Runes: []rune("*"), Source: tok.Source{
+ S: loc(1, 1, 0), E: loc(1, 2, 1),
+ }}},
{"->", tok.Token{Kind: tok.Arrow, Runes: []rune("->"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 3, 2),
}}},
@@ -134,10 +185,14 @@ func TestErrors(t *testing.T) {
for _, test := range []test{
{" \"abc", "test.txt:1:2 unterminated string"},
{" \"abc\n", "test.txt:1:2 unterminated string"},
- {"*", "test.txt:1:1: unexpected '*'"},
+ {"£", "test.txt:1:1: unexpected '£'"},
} {
got, err := lexer.Lex([]rune(test.src), "test.txt")
- if gotErr := err.Error(); test.expect != gotErr {
+ gotErr := "<nil>"
+ if err != nil {
+ gotErr = err.Error()
+ }
+ if test.expect != gotErr {
t.Errorf(`Lex() returned error "%+v", expected error "%+v"`, gotErr, test.expect)
}
if got != nil {
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/main.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/main.go
index eba89957361..c9235f455e7 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/main.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/main.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// builtin-gen parses the <tint>/src/tint/builtins.def file, then scans the
+// intrinsic-gen parses the <tint>/src/tint/intrinsics.def file, then scans the
// project directory for '<file>.tmpl' files, to produce '<file>' source code
// files.
package main
@@ -25,14 +25,14 @@ import (
"path/filepath"
"strings"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/gen"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/parser"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/resolver"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/gen"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/parser"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/resolver"
"dawn.googlesource.com/dawn/tools/src/fileutils"
"dawn.googlesource.com/dawn/tools/src/glob"
)
-const defProjectRelPath = "src/tint/builtins.def"
+const defProjectRelPath = "src/tint/intrinsics.def"
func main() {
if err := run(); err != nil {
@@ -43,13 +43,13 @@ func main() {
func showUsage() {
fmt.Println(`
-builtin-gen generates the builtin table for the Tint compiler
+intrinsic-gen generates the intrinsic table for the Tint compiler
-builtin-gen parses the <tint>/src/tint/builtins.def file, then scans the project
+intrinsic-gen parses the <tint>/src/tint/intrinsics.def file, then scans the project
directory for '<file>.tmpl' files, to produce '<file>' source code files.
usage:
- builtin-gen
+ intrinsic-gen
optional flags:`)
flag.PrintDefaults()
@@ -161,10 +161,10 @@ const header = `// Copyright 2021 The Tint Authors.
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/builtin-gen
+// File generated by tools/intrinsic-gen
// using the template:
// %v
-// and the builtin defintion file:
+// and the intrinsic defintion file:
// %v
//
// Do not modify this file directly
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser.go
index d0343a505a7..84f816db164 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/parser/parser.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser.go
@@ -19,9 +19,9 @@ package parser
import (
"fmt"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/ast"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/lexer"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/tok"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/ast"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/lexer"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/tok"
)
// Parse produces a list of tokens for the given source code
@@ -43,31 +43,40 @@ type parser struct {
func (p *parser) parse() (*ast.AST, error) {
out := ast.AST{}
- var decorations ast.Decorations
+ var attributes ast.Attributes
for p.err == nil {
t := p.peek(0)
if t == nil {
break
}
switch t.Kind {
- case tok.Ldeco:
- decorations = append(decorations, p.decorations()...)
+ case tok.Attr:
+ attributes = append(attributes, p.attributes()...)
case tok.Enum:
- if len(decorations) > 0 {
- p.err = fmt.Errorf("%v unexpected decoration", decorations[0].Source)
+ if len(attributes) > 0 {
+ p.err = fmt.Errorf("%v unexpected attribute", attributes[0].Source)
}
out.Enums = append(out.Enums, p.enumDecl())
case tok.Match:
- if len(decorations) > 0 {
- p.err = fmt.Errorf("%v unexpected decoration", decorations[0].Source)
+ if len(attributes) > 0 {
+ p.err = fmt.Errorf("%v unexpected attribute", attributes[0].Source)
}
out.Matchers = append(out.Matchers, p.matcherDecl())
case tok.Type:
- out.Types = append(out.Types, p.typeDecl(decorations))
- decorations = nil
+ out.Types = append(out.Types, p.typeDecl(attributes))
+ attributes = nil
case tok.Function:
- out.Functions = append(out.Functions, p.functionDecl(decorations))
- decorations = nil
+ out.Builtins = append(out.Builtins, p.builtinDecl(attributes))
+ attributes = nil
+ case tok.Operator:
+ out.Operators = append(out.Operators, p.operatorDecl(attributes))
+ attributes = nil
+ case tok.Constructor:
+ out.Constructors = append(out.Constructors, p.constructorDecl(attributes))
+ attributes = nil
+ case tok.Converter:
+ out.Converters = append(out.Converters, p.converterDecl(attributes))
+ attributes = nil
default:
p.err = fmt.Errorf("%v unexpected token '%v'", t.Source, t.Kind)
}
@@ -90,9 +99,9 @@ func (p *parser) enumDecl() ast.EnumDecl {
}
func (p *parser) enumEntry() ast.EnumEntry {
- decos := p.decorations()
+ decos := p.attributes()
name := p.expect(tok.Identifier, "enum entry")
- return ast.EnumEntry{Source: name.Source, Decorations: decos, Name: string(name.Runes)}
+ return ast.EnumEntry{Source: name.Source, Attributes: decos, Name: string(name.Runes)}
}
func (p *parser) matcherDecl() ast.MatcherDecl {
@@ -109,13 +118,13 @@ func (p *parser) matcherDecl() ast.MatcherDecl {
return m
}
-func (p *parser) typeDecl(decos ast.Decorations) ast.TypeDecl {
+func (p *parser) typeDecl(decos ast.Attributes) ast.TypeDecl {
p.expect(tok.Type, "type declaration")
name := p.expect(tok.Identifier, "type name")
m := ast.TypeDecl{
- Source: name.Source,
- Decorations: decos,
- Name: string(name.Runes),
+ Source: name.Source,
+ Attributes: decos,
+ Name: string(name.Runes),
}
if p.peekIs(0, tok.Lt) {
m.TemplateParams = p.templateParams()
@@ -123,43 +132,97 @@ func (p *parser) typeDecl(decos ast.Decorations) ast.TypeDecl {
return m
}
-func (p *parser) decorations() ast.Decorations {
- if p.match(tok.Ldeco) == nil {
- return nil
- }
- out := ast.Decorations{}
- for p.err == nil {
- name := p.expect(tok.Identifier, "decoration name")
+func (p *parser) attributes() ast.Attributes {
+ var out ast.Attributes
+ for p.match(tok.Attr) != nil && p.err == nil {
+ name := p.expect(tok.Identifier, "attribute name")
values := []string{}
if p.match(tok.Lparen) != nil {
for p.err == nil {
- values = append(values, p.string())
+ values = append(values, string(p.next().Runes))
if p.match(tok.Comma) == nil {
break
}
}
- p.expect(tok.Rparen, "decoration values")
+ p.expect(tok.Rparen, "attribute values")
}
- out = append(out, ast.Decoration{
+ out = append(out, ast.Attribute{
Source: name.Source,
Name: string(name.Runes),
Values: values,
})
- if !p.peekIs(0, tok.Comma) {
- break
- }
}
- p.expect(tok.Rdeco, "decoration list")
return out
}
-func (p *parser) functionDecl(decos ast.Decorations) ast.FunctionDecl {
+func (p *parser) builtinDecl(decos ast.Attributes) ast.IntrinsicDecl {
p.expect(tok.Function, "function declaration")
name := p.expect(tok.Identifier, "function name")
- f := ast.FunctionDecl{
- Source: name.Source,
- Decorations: decos,
- Name: string(name.Runes),
+ f := ast.IntrinsicDecl{
+ Source: name.Source,
+ Kind: ast.Builtin,
+ Attributes: decos,
+ Name: string(name.Runes),
+ }
+ if p.peekIs(0, tok.Lt) {
+ f.TemplateParams = p.templateParams()
+ }
+ f.Parameters = p.parameters()
+ if p.match(tok.Arrow) != nil {
+ ret := p.templatedName()
+ f.ReturnType = &ret
+ }
+ return f
+}
+
+func (p *parser) operatorDecl(decos ast.Attributes) ast.IntrinsicDecl {
+ p.expect(tok.Operator, "operator declaration")
+ name := p.next()
+ f := ast.IntrinsicDecl{
+ Source: name.Source,
+ Kind: ast.Operator,
+ Attributes: decos,
+ Name: string(name.Runes),
+ }
+ if p.peekIs(0, tok.Lt) {
+ f.TemplateParams = p.templateParams()
+ }
+ f.Parameters = p.parameters()
+ if p.match(tok.Arrow) != nil {
+ ret := p.templatedName()
+ f.ReturnType = &ret
+ }
+ return f
+}
+
+func (p *parser) constructorDecl(decos ast.Attributes) ast.IntrinsicDecl {
+ p.expect(tok.Constructor, "constructor declaration")
+ name := p.next()
+ f := ast.IntrinsicDecl{
+ Source: name.Source,
+ Kind: ast.Constructor,
+ Attributes: decos,
+ Name: string(name.Runes),
+ }
+ if p.peekIs(0, tok.Lt) {
+ f.TemplateParams = p.templateParams()
+ }
+ f.Parameters = p.parameters()
+ if p.match(tok.Arrow) != nil {
+ ret := p.templatedName()
+ f.ReturnType = &ret
+ }
+ return f
+}
+
+func (p *parser) converterDecl(decos ast.Attributes) ast.IntrinsicDecl {
+ p.expect(tok.Converter, "converter declaration")
+ name := p.next()
+ f := ast.IntrinsicDecl{
+ Source: name.Source,
+ Kind: ast.Converter,
+ Attributes: decos,
+ Name: string(name.Runes),
}
if p.peekIs(0, tok.Lt) {
f.TemplateParams = p.templateParams()
@@ -188,21 +251,24 @@ func (p *parser) parameters() ast.Parameters {
}
func (p *parser) parameter() ast.Parameter {
+ attributes := p.attributes()
if p.peekIs(1, tok.Colon) {
// name type
name := p.expect(tok.Identifier, "parameter name")
p.expect(tok.Colon, "parameter type")
return ast.Parameter{
- Source: name.Source,
- Name: string(name.Runes),
- Type: p.templatedName(),
+ Source: name.Source,
+ Name: string(name.Runes),
+ Attributes: attributes,
+ Type: p.templatedName(),
}
}
// type
ty := p.templatedName()
return ast.Parameter{
- Source: ty.Source,
- Type: ty,
+ Source: ty.Source,
+ Attributes: attributes,
+ Type: ty,
}
}
@@ -270,20 +336,6 @@ func (p *parser) ident(use string) string {
return string(p.expect(tok.Identifier, use).Runes)
}
-// TODO(bclayton): Currently unused, but will be needed for integer bounds
-// func (p *parser) integer(use string) int {
-// t := p.expect(tok.Integer, use)
-// if t.Kind != tok.Integer {
-// return 0
-// }
-// i, err := strconv.Atoi(string(t.Runes))
-// if err != nil {
-// p.err = err
-// return 0
-// }
-// return i
-// }
-
func (p *parser) match(kind tok.Kind) *tok.Token {
if p.err != nil || len(p.tokens) == 0 {
return nil
@@ -296,6 +348,18 @@ func (p *parser) match(kind tok.Kind) *tok.Token {
return &t
}
+func (p *parser) next() *tok.Token {
+ if p.err != nil {
+ return nil
+ }
+ if len(p.tokens) == 0 {
+ p.err = fmt.Errorf("reached end of file")
+ }
+ t := p.tokens[0]
+ p.tokens = p.tokens[1:]
+ return &t
+}
+
func (p *parser) peekIs(i int, kind tok.Kind) bool {
t := p.peek(i)
if t == nil {
diff --git a/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser_test.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser_test.go
new file mode 100644
index 00000000000..43c95615b55
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/parser/parser_test.go
@@ -0,0 +1,705 @@
+// Copyright 2021 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser_test
+
+import (
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/ast"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/parser"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "github.com/google/go-cmp/cmp"
+)
+
+var ignoreSource = cmp.FilterPath(func(p cmp.Path) bool {
+ return p.Last().String() == ".Source"
+}, cmp.Ignore())
+
+func TestParser(t *testing.T) {
+ type test struct {
+ location string
+ src string
+ expect ast.AST
+ }
+
+ for _, test := range []test{
+ {
+ utils.ThisLine(),
+ "enum E {}",
+ ast.AST{
+ Enums: []ast.EnumDecl{{Name: "E"}},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "enum E { A @attr B C }",
+ ast.AST{
+ Enums: []ast.EnumDecl{{
+ Name: "E",
+ Entries: []ast.EnumEntry{
+ {Name: "A"},
+ {
+ Attributes: ast.Attributes{{
+ Name: "attr",
+ Values: []string{},
+ }},
+ Name: "B",
+ },
+ {Name: "C"},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "type T",
+ ast.AST{
+ Types: []ast.TypeDecl{{Name: "T"}},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "type T<A, B, C>",
+ ast.AST{
+ Types: []ast.TypeDecl{{
+ Name: "T",
+ TemplateParams: ast.TemplateParams{
+ {Name: "A"},
+ {Name: "B"},
+ {Name: "C"},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr type T",
+ ast.AST{
+ Types: []ast.TypeDecl{{
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{}},
+ },
+ Name: "T",
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr_a @attr_b type T",
+ ast.AST{
+ Types: []ast.TypeDecl{{
+ Attributes: ast.Attributes{
+ {Name: "attr_a", Values: []string{}},
+ {Name: "attr_b", Values: []string{}},
+ },
+ Name: "T",
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ `@attr("a", "b") type T`, ast.AST{
+ Types: []ast.TypeDecl{{
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{"a", "b"}},
+ },
+ Name: "T",
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ `@attr(1, "x") type T`, ast.AST{
+ Types: []ast.TypeDecl{{
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{"1", "x"}},
+ },
+ Name: "T",
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "match M : A",
+ ast.AST{
+ Matchers: []ast.MatcherDecl{{
+ Name: "M",
+ Options: ast.MatcherOptions{
+ ast.TemplatedName{Name: "A"},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "match M : A | B",
+ ast.AST{
+ Matchers: []ast.MatcherDecl{{
+ Name: "M",
+ Options: ast.MatcherOptions{
+ ast.TemplatedName{Name: "A"},
+ ast.TemplatedName{Name: "B"},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F()",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr fn F()",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F(a)",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F(a: T)",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "T"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F(a, b)",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ {Type: ast.TemplatedName{Name: "b"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F<A : B<C> >()",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {
+ Name: "A", Type: ast.TemplatedName{
+ Name: "B",
+ TemplateArgs: ast.TemplatedNames{
+ {Name: "C"},
+ },
+ },
+ },
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F<T>(a: X, b: Y<T>)",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {Name: "T"},
+ },
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "X"}},
+ {Name: "b", Type: ast.TemplatedName{
+ Name: "Y",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ }},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F() -> X",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{Name: "X"},
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "fn F() -> X<T>",
+ ast.AST{
+ Builtins: []ast.IntrinsicDecl{{
+ Kind: ast.Builtin,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{
+ Name: "X",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F()",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr op F()",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F(a)",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F(@blah a)",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {
+ Attributes: ast.Attributes{{Name: "blah", Values: []string{}}},
+ Type: ast.TemplatedName{Name: "a"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F(a: T)",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "T"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F(a, b)",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ {Type: ast.TemplatedName{Name: "b"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F<A : B<C> >()",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {
+ Name: "A", Type: ast.TemplatedName{
+ Name: "B",
+ TemplateArgs: ast.TemplatedNames{
+ {Name: "C"},
+ },
+ },
+ },
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F<T>(a: X, b: Y<T>)",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {Name: "T"},
+ },
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "X"}},
+ {Name: "b", Type: ast.TemplatedName{
+ Name: "Y",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ }},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F() -> X",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{Name: "X"},
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "op F() -> X<T>",
+ ast.AST{
+ Operators: []ast.IntrinsicDecl{{
+ Kind: ast.Operator,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{
+ Name: "X",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F()",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr ctor F()",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F(a)",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F(a: T)",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "T"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F(a, b)",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ {Type: ast.TemplatedName{Name: "b"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F<A : B<C> >()",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {
+ Name: "A", Type: ast.TemplatedName{
+ Name: "B",
+ TemplateArgs: ast.TemplatedNames{
+ {Name: "C"},
+ },
+ },
+ },
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F<T>(a: X, b: Y<T>)",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {Name: "T"},
+ },
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "X"}},
+ {Name: "b", Type: ast.TemplatedName{
+ Name: "Y",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ }},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F() -> X",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{Name: "X"},
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "ctor F() -> X<T>",
+ ast.AST{
+ Constructors: []ast.IntrinsicDecl{{
+ Kind: ast.Constructor,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{
+ Name: "X",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F()",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "@attr conv F()",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ Attributes: ast.Attributes{
+ {Name: "attr", Values: []string{}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F(a)",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F(a: T)",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "T"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F(a, b)",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ Parameters: ast.Parameters{
+ {Type: ast.TemplatedName{Name: "a"}},
+ {Type: ast.TemplatedName{Name: "b"}},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F<A : B<C> >()",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {
+ Name: "A", Type: ast.TemplatedName{
+ Name: "B",
+ TemplateArgs: ast.TemplatedNames{
+ {Name: "C"},
+ },
+ },
+ },
+ },
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F<T>(a: X, b: Y<T>)",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ TemplateParams: ast.TemplateParams{
+ {Name: "T"},
+ },
+ Parameters: ast.Parameters{
+ {Name: "a", Type: ast.TemplatedName{Name: "X"}},
+ {Name: "b", Type: ast.TemplatedName{
+ Name: "Y",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ }},
+ },
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F() -> X",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{Name: "X"},
+ Parameters: ast.Parameters{},
+ }},
+ },
+ }, { ///////////////////////////////////////////////////////////////////
+ utils.ThisLine(),
+ "conv F() -> X<T>",
+ ast.AST{
+ Converters: []ast.IntrinsicDecl{{
+ Kind: ast.Converter,
+ Name: "F",
+ ReturnType: &ast.TemplatedName{
+ Name: "X",
+ TemplateArgs: []ast.TemplatedName{{Name: "T"}},
+ },
+ Parameters: ast.Parameters{},
+ }},
+ }},
+ } {
+ got, err := parser.Parse(test.src, "file.txt")
+ if err != nil {
+ t.Errorf("\n%v\nWhile parsing:\n%s\nParse() returned error: %v",
+ test.location, test.src, err)
+ continue
+ }
+
+ if diff := cmp.Diff(got, &test.expect, ignoreSource); diff != "" {
+ t.Errorf("\n%v\nWhile parsing:\n%s\n\n%s",
+ test.location, test.src, diff)
+ }
+ }
+}
+
+func TestErrors(t *testing.T) {
+ type test struct {
+ src string
+ expect string
+ }
+
+ for _, test := range []test{
+ {
+ "£",
+ "test.txt:1:1: unexpected '£'",
+ },
+ {
+ "123",
+ "test.txt:1:1 unexpected token 'integer'",
+ },
+ {
+ "@123",
+ "test.txt:1:2 expected 'ident' for attribute name, got 'integer'",
+ },
+ } {
+ got, err := parser.Parse(test.src, "test.txt")
+ gotErr := ""
+ if err != nil {
+ gotErr = err.Error()
+ }
+ if test.expect != gotErr {
+ t.Errorf(`Parse() returned error "%+v", expected error "%+v"`, gotErr, test.expect)
+ }
+ if got != nil {
+ t.Errorf("Lex() returned non-nil for error")
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolve.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolve.go
index b9c701479f1..9044d2a590a 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolve.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolve.go
@@ -17,29 +17,36 @@ package resolver
import (
"fmt"
"sort"
+ "strconv"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/ast"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/sem"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/tok"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/ast"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/sem"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/tok"
)
type resolver struct {
a *ast.AST
s *sem.Sem
- globals scope
- functions map[string]*sem.Function
- enumEntryMatchers map[*sem.EnumEntry]*sem.EnumMatcher
+ globals scope
+ builtins map[string]*sem.Intrinsic
+ unaryOperators map[string]*sem.Intrinsic
+ binaryOperators map[string]*sem.Intrinsic
+ constructorsAndConverters map[string]*sem.Intrinsic
+ enumEntryMatchers map[*sem.EnumEntry]*sem.EnumMatcher
}
// Resolve processes the AST
func Resolve(a *ast.AST) (*sem.Sem, error) {
r := resolver{
- a: a,
- s: sem.New(),
- globals: newScope(nil),
- functions: map[string]*sem.Function{},
- enumEntryMatchers: map[*sem.EnumEntry]*sem.EnumMatcher{},
+ a: a,
+ s: sem.New(),
+ globals: newScope(nil),
+ builtins: map[string]*sem.Intrinsic{},
+ unaryOperators: map[string]*sem.Intrinsic{},
+ binaryOperators: map[string]*sem.Intrinsic{},
+ constructorsAndConverters: map[string]*sem.Intrinsic{},
+ enumEntryMatchers: map[*sem.EnumEntry]*sem.EnumMatcher{},
}
// Declare and resolve all the enumerators
for _, e := range a.Enums {
@@ -59,9 +66,39 @@ func Resolve(a *ast.AST) (*sem.Sem, error) {
return nil, err
}
}
- // Declare and resolve the functions
- for _, f := range a.Functions {
- if err := r.function(f); err != nil {
+ // Declare and resolve the builtins
+ for _, f := range a.Builtins {
+ if err := r.intrinsic(f, r.builtins, &r.s.Builtins); err != nil {
+ return nil, err
+ }
+ }
+ // Declare and resolve the unary and binary operators
+ for _, o := range a.Operators {
+ switch len(o.Parameters) {
+ case 1:
+ if err := r.intrinsic(o, r.unaryOperators, &r.s.UnaryOperators); err != nil {
+ return nil, err
+ }
+ case 2:
+ if err := r.intrinsic(o, r.binaryOperators, &r.s.BinaryOperators); err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("%v operators must have either 1 or 2 parameters", o.Source)
+ }
+ }
+
+ // Declare and resolve type constructors and converters
+ for _, c := range a.Constructors {
+ if err := r.intrinsic(c, r.constructorsAndConverters, &r.s.ConstructorsAndConverters); err != nil {
+ return nil, err
+ }
+ }
+ for _, c := range a.Converters {
+ if len(c.Parameters) != 1 {
+ return nil, fmt.Errorf("%v conversions must have a single parameter", c.Source)
+ }
+ if err := r.intrinsic(c, r.constructorsAndConverters, &r.s.ConstructorsAndConverters); err != nil {
return nil, err
}
}
@@ -93,14 +130,14 @@ func (r *resolver) enum(e ast.EnumDecl) error {
Name: ast.Name,
Enum: s,
}
- if internal := ast.Decorations.Take("internal"); internal != nil {
+ if internal := ast.Attributes.Take("internal"); internal != nil {
entry.IsInternal = true
if len(internal.Values) != 0 {
- return fmt.Errorf("%v unexpected value for internal decoration", ast.Source)
+ return fmt.Errorf("%v unexpected value for internal attribute", ast.Source)
}
}
- if len(ast.Decorations) != 0 {
- return fmt.Errorf("%v unknown decoration", ast.Decorations[0].Source)
+ if len(ast.Attributes) != 0 {
+ return fmt.Errorf("%v unknown attribute", ast.Attributes[0].Source)
}
if err := r.globals.declare(entry, e.Source); err != nil {
return err
@@ -136,15 +173,26 @@ func (r *resolver) ty(a ast.TypeDecl) error {
}
t.TemplateParams = templateParams
- // Scan for decorations
- if d := a.Decorations.Take("display"); d != nil {
+ // Scan for attributes
+ if d := a.Attributes.Take("display"); d != nil {
if len(d.Values) != 1 {
- return fmt.Errorf("%v expected a single value for 'display' decoration", d.Source)
+ return fmt.Errorf("%v expected a single value for 'display' attribute", d.Source)
}
t.DisplayName = d.Values[0]
}
- if len(a.Decorations) != 0 {
- return fmt.Errorf("%v unknown decoration", a.Decorations[0].Source)
+ if d := a.Attributes.Take("precedence"); d != nil {
+ if len(d.Values) != 1 {
+ return fmt.Errorf("%v expected a single integer value for 'precedence' attribute", d.Source)
+ }
+ n, err := strconv.Atoi(d.Values[0])
+ if err != nil {
+ return fmt.Errorf("%v %v", d.Source, err)
+ }
+ t.Precedence = n
+ }
+
+ if len(a.Attributes) != 0 {
+ return fmt.Errorf("%v unknown attribute", a.Attributes[0].Source)
}
return nil
@@ -220,18 +268,21 @@ func (r *resolver) matcher(a ast.MatcherDecl) error {
return fmt.Errorf("'%v' cannot be used for matcher", a.Name)
}
-// function() resolves a function overload declaration.
-// The the first overload for the function creates and appends the sem.Function
-// to Sem.Functions. Subsequent overloads append their resolved overload to the
-// sem.Function.Overloads list.
-func (r *resolver) function(a ast.FunctionDecl) error {
- // If this is the first overload of the function, create and register the
- // semantic function.
- f := r.functions[a.Name]
- if f == nil {
- f = &sem.Function{Name: a.Name}
- r.functions[a.Name] = f
- r.s.Functions = append(r.s.Functions, f)
+// intrinsic() resolves a intrinsic overload declaration.
+// The the first overload for the intrinsic creates and appends the sem.Intrinsic
+// to Sem.Intrinsics. Subsequent overloads append their resolved overload to the
+// sem.intrinsic.Overloads list.
+func (r *resolver) intrinsic(
+ a ast.IntrinsicDecl,
+ intrinsicsByName map[string]*sem.Intrinsic,
+ semIntrinsics *[]*sem.Intrinsic) error {
+ // If this is the first overload of the intrinsic, create and register the
+ // semantic intrinsic.
+ intrinsic := intrinsicsByName[a.Name]
+ if intrinsic == nil {
+ intrinsic = &sem.Intrinsic{Name: a.Name}
+ intrinsicsByName[a.Name] = intrinsic
+ *semIntrinsics = append(*semIntrinsics, intrinsic)
}
// Create a new scope for resolving template parameters
@@ -246,13 +297,13 @@ func (r *resolver) function(a ast.FunctionDecl) error {
// Construct the semantic overload
overload := &sem.Overload{
Decl: a,
- Function: f,
+ Intrinsic: intrinsic,
Parameters: make([]sem.Parameter, len(a.Parameters)),
TemplateParams: templateParams,
}
- // Process overload decorations
- if stageDeco := a.Decorations.Take("stage"); stageDeco != nil {
+ // Process overload attributes
+ if stageDeco := a.Attributes.Take("stage"); stageDeco != nil {
for stageDeco != nil {
for _, stage := range stageDeco.Values {
switch stage {
@@ -266,7 +317,7 @@ func (r *resolver) function(a ast.FunctionDecl) error {
return fmt.Errorf("%v unknown stage '%v'", stageDeco.Source, stage)
}
}
- stageDeco = a.Decorations.Take("stage")
+ stageDeco = a.Attributes.Take("stage")
}
} else {
overload.CanBeUsedInStage = sem.StageUses{
@@ -275,36 +326,46 @@ func (r *resolver) function(a ast.FunctionDecl) error {
Compute: true,
}
}
- if deprecated := a.Decorations.Take("deprecated"); deprecated != nil {
+ if constEvalFn := a.Attributes.Take("const"); constEvalFn != nil {
+ switch len(constEvalFn.Values) {
+ case 0:
+ overload.ConstEvalFunction = overload.Decl.Name
+ case 1:
+ overload.ConstEvalFunction = constEvalFn.Values[0]
+ default:
+ return fmt.Errorf("%v too many values for @const attribute", constEvalFn.Source)
+ }
+ }
+ if deprecated := a.Attributes.Take("deprecated"); deprecated != nil {
overload.IsDeprecated = true
if len(deprecated.Values) != 0 {
- return fmt.Errorf("%v unexpected value for deprecated decoration", deprecated.Source)
+ return fmt.Errorf("%v unexpected value for deprecated attribute", deprecated.Source)
}
}
- if len(a.Decorations) != 0 {
- return fmt.Errorf("%v unknown decoration", a.Decorations[0].Source)
+ if len(a.Attributes) != 0 {
+ return fmt.Errorf("%v unknown attribute", a.Attributes[0].Source)
}
- // Append the overload to the function
- f.Overloads = append(f.Overloads, overload)
+ // Append the overload to the intrinsic
+ intrinsic.Overloads = append(intrinsic.Overloads, overload)
// Sort the template parameters by resolved type. Append these to
- // sem.Overload.OpenTypes or sem.Overload.OpenNumbers based on their kind.
+ // sem.Overload.TemplateTypes or sem.Overload.TemplateNumbers based on their kind.
for _, param := range templateParams {
switch param := param.(type) {
case *sem.TemplateTypeParam:
- overload.OpenTypes = append(overload.OpenTypes, param)
+ overload.TemplateTypes = append(overload.TemplateTypes, param)
case *sem.TemplateEnumParam, *sem.TemplateNumberParam:
- overload.OpenNumbers = append(overload.OpenNumbers, param)
+ overload.TemplateNumbers = append(overload.TemplateNumbers, param)
}
}
- // Update high-water marks of open types / numbers
- if r.s.MaxOpenTypes < len(overload.OpenTypes) {
- r.s.MaxOpenTypes = len(overload.OpenTypes)
+ // Update high-water marks of template types and numbers
+ if r.s.MaxTemplateTypes < len(overload.TemplateTypes) {
+ r.s.MaxTemplateTypes = len(overload.TemplateTypes)
}
- if r.s.MaxOpenNumbers < len(overload.OpenNumbers) {
- r.s.MaxOpenNumbers = len(overload.OpenNumbers)
+ if r.s.MaxTemplateNumbers < len(overload.TemplateNumbers) {
+ r.s.MaxTemplateNumbers = len(overload.TemplateNumbers)
}
// Resolve the parameters
@@ -313,9 +374,17 @@ func (r *resolver) function(a ast.FunctionDecl) error {
if err != nil {
return err
}
+ isConst := false
+ if attribute := p.Attributes.Take("const"); attribute != nil {
+ isConst = true
+ }
+ if len(p.Attributes) != 0 {
+ return fmt.Errorf("%v unknown attribute", p.Attributes[0].Source)
+ }
overload.Parameters[i] = sem.Parameter{
- Name: p.Name,
- Type: usage,
+ Name: p.Name,
+ Type: usage,
+ IsConst: isConst,
}
}
@@ -418,6 +487,8 @@ func (r *resolver) templateParam(a ast.TemplateParam) (sem.TemplateParam, error)
return &sem.TemplateEnumParam{Name: a.Name, Enum: r.Enum, Matcher: r}, nil
case *sem.TypeMatcher:
return &sem.TemplateTypeParam{Name: a.Name, Type: r}, nil
+ case *sem.Type:
+ return &sem.TemplateTypeParam{Name: a.Name, Type: r}, nil
default:
return nil, fmt.Errorf("%v invalid template parameter type '%v'", a.Source, a.Type.Name)
}
@@ -495,16 +566,23 @@ func (r *resolver) lookupNamed(s *scope, a ast.TemplatedName) (sem.Named, error)
}
// calculateUniqueParameterNames() iterates over all the parameters of all
-// overloads, calculating the list of unique parameter names
+// builtin overloads, calculating the list of unique parameter names
func (r *resolver) calculateUniqueParameterNames() []string {
set := map[string]struct{}{"": {}}
names := []string{}
- for _, f := range r.s.Functions {
- for _, o := range f.Overloads {
- for _, p := range o.Parameters {
- if _, dup := set[p.Name]; !dup {
- set[p.Name] = struct{}{}
- names = append(names, p.Name)
+ for _, intrinsics := range [][]*sem.Intrinsic{
+ r.s.Builtins,
+ r.s.UnaryOperators,
+ r.s.BinaryOperators,
+ r.s.ConstructorsAndConverters,
+ } {
+ for _, i := range intrinsics {
+ for _, o := range i.Overloads {
+ for _, p := range o.Parameters {
+ if _, dup := set[p.Name]; !dup {
+ set[p.Name] = struct{}{}
+ names = append(names, p.Name)
+ }
}
}
}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolver_test.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolver_test.go
index d331a12c91b..66794316192 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/resolver/resolver_test.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/resolver/resolver_test.go
@@ -19,8 +19,8 @@ import (
"strings"
"testing"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/parser"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/resolver"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/parser"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/resolver"
)
func TestResolver(t *testing.T) {
@@ -44,7 +44,7 @@ func TestResolver(t *testing.T) {
`type X`,
success,
}, {
- `[[display("Y")]] type X`,
+ `@display("Y") type X`,
success,
}, {
`
@@ -139,7 +139,41 @@ fn f<E: m>()`,
`
type f32
type T<x>
-fn f(T<T<f32>>)`,
+fn f(T< T<f32> >)`,
+ success,
+ }, {
+ `
+type f32
+op -(f32)`,
+ success,
+ }, {
+ `
+type f32
+type T<x>
+op +(T<f32>, T<f32>)`,
+ success,
+ }, {
+ `
+type f32
+ctor f32(f32)`,
+ success,
+ }, {
+ `
+type f32
+type T<x>
+ctor f32(T<f32>)`,
+ success,
+ }, {
+ `
+type f32
+type i32
+conv f32(i32)`,
+ success,
+ }, {
+ `
+type f32
+type T<x>
+conv f32(T<f32>)`,
success,
}, {
`enum E {A A}`,
@@ -154,14 +188,14 @@ First declared here: file.txt:1:6
file.txt:1:13 'X' already declared
First declared here: file.txt:1:6`,
}, {
- `[[meow]] type X`,
+ `@meow type X`,
`
-file.txt:1:3 unknown decoration
+file.txt:1:2 unknown attribute
`,
}, {
- `[[display("Y", "Z")]] type X`,
+ `@display("Y", "Z") type X`,
`
-file.txt:1:3 expected a single value for 'display' decoration`,
+file.txt:1:2 expected a single value for 'display' attribute`,
}, {
`
enum e { a }
@@ -299,6 +333,189 @@ fn f<M: m>(P<M>)`,
`file.txt:4:14 cannot use template enum 'E' as template number`,
}, {
`
+type i
+enum e { a }
+op << (i) -> e`,
+ `file.txt:3:14 cannot use 'e' as return type. Must be a type or template type`,
+ }, {
+ `
+type T<x>
+op << (T<u>)`,
+ `file.txt:2:10 cannot resolve 'u'`,
+ }, {
+ `
+op << ()`,
+ `file.txt:1:4 operators must have either 1 or 2 parameters`,
+ }, {
+ `
+type i
+op << (i, i, i)`,
+ `file.txt:2:4 operators must have either 1 or 2 parameters`,
+ }, {
+ `
+type x
+op << <T>(T<x>)`,
+ `file.txt:2:11 'T' template parameters do not accept template arguments`,
+ }, {
+ `
+type A<N: num>
+type B
+op << (A<B>)`,
+ `file.txt:3:10 cannot use type 'B' as template number`,
+ }, {
+ `
+type A<N>
+enum E { b }
+op << (A<b>)`,
+ `file.txt:3:10 cannot use enum entry 'E.b' as template type`,
+ }, {
+ `
+type T
+type P<N: num>
+match m: T
+op << (P<m>)`,
+ `file.txt:4:10 cannot use type matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { b }
+op << (P<E>)`,
+ `file.txt:3:10 cannot use enum 'E' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+op << (P<m>)`,
+ `file.txt:4:10 cannot use enum matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+op << <M: m>(P<M>)`,
+ `file.txt:4:16 cannot use template enum 'E' as template number`,
+ }, {
+ `
+type i
+enum e { a }
+ctor F(i) -> e`,
+ `file.txt:3:14 cannot use 'e' as return type. Must be a type or template type`,
+ }, {
+ `
+type T<x>
+ctor F(T<u>)`,
+ `file.txt:2:10 cannot resolve 'u'`,
+ }, {
+ `
+type x
+ctor F<T>(T<x>)`,
+ `file.txt:2:11 'T' template parameters do not accept template arguments`,
+ }, {
+ `
+type A<N: num>
+type B
+ctor F(A<B>)`,
+ `file.txt:3:10 cannot use type 'B' as template number`,
+ }, {
+ `
+type A<N>
+enum E { b }
+ctor F(A<b>)`,
+ `file.txt:3:10 cannot use enum entry 'E.b' as template type`,
+ }, {
+ `
+type T
+type P<N: num>
+match m: T
+ctor F(P<m>)`,
+ `file.txt:4:10 cannot use type matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { b }
+ctor F(P<E>)`,
+ `file.txt:3:10 cannot use enum 'E' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+ctor F(P<m>)`,
+ `file.txt:4:10 cannot use enum matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+ctor F<M: m>(P<M>)`,
+ `file.txt:4:16 cannot use template enum 'E' as template number`,
+ }, {
+ `
+conv F()`,
+ `file.txt:1:6 conversions must have a single parameter`,
+ }, {
+ `
+type i
+conv F(i, i, i)`,
+ `file.txt:2:6 conversions must have a single parameter`,
+ }, {
+ `
+type i
+enum e { a }
+conv F(i) -> e`,
+ `file.txt:3:14 cannot use 'e' as return type. Must be a type or template type`,
+ }, {
+ `
+type T<x>
+conv F(T<u>)`,
+ `file.txt:2:10 cannot resolve 'u'`,
+ }, {
+ `
+type x
+conv F<T>(T<x>)`,
+ `file.txt:2:11 'T' template parameters do not accept template arguments`,
+ }, {
+ `
+type A<N: num>
+type B
+conv F(A<B>)`,
+ `file.txt:3:10 cannot use type 'B' as template number`,
+ }, {
+ `
+type A<N>
+enum E { b }
+conv F(A<b>)`,
+ `file.txt:3:10 cannot use enum entry 'E.b' as template type`,
+ }, {
+ `
+type T
+type P<N: num>
+match m: T
+conv F(P<m>)`,
+ `file.txt:4:10 cannot use type matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { b }
+conv F(P<E>)`,
+ `file.txt:3:10 cannot use enum 'E' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+conv F(P<m>)`,
+ `file.txt:4:10 cannot use enum matcher 'm' as template number`,
+ }, {
+ `
+type P<N: num>
+enum E { a b }
+match m: a | b
+conv F<M: m>(P<M>)`,
+ `file.txt:4:16 cannot use template enum 'E' as template number`,
+ }, {
+ `
enum E { a }
type T<X: a>`,
`file.txt:2:8 invalid template parameter type 'a'`,
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/sem/sem.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/sem/sem.go
index 8db6fb07fba..82d08b7dc5b 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/sem/sem.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/sem/sem.go
@@ -16,21 +16,25 @@ package sem
import (
"fmt"
+ "sort"
- "dawn.googlesource.com/dawn/tools/src/cmd/builtin-gen/ast"
+ "dawn.googlesource.com/dawn/tools/src/cmd/intrinsic-gen/ast"
)
// Sem is the root of the semantic tree
type Sem struct {
- Enums []*Enum
- Types []*Type
- TypeMatchers []*TypeMatcher
- EnumMatchers []*EnumMatcher
- Functions []*Function
- // Maximum number of open-types used across all builtins
- MaxOpenTypes int
- // Maximum number of open-numbers used across all builtins
- MaxOpenNumbers int
+ Enums []*Enum
+ Types []*Type
+ TypeMatchers []*TypeMatcher
+ EnumMatchers []*EnumMatcher
+ Builtins []*Intrinsic
+ UnaryOperators []*Intrinsic
+ BinaryOperators []*Intrinsic
+ ConstructorsAndConverters []*Intrinsic
+ // Maximum number of template types used across all builtins
+ MaxTemplateTypes int
+ // Maximum number of template numbers used across all builtins
+ MaxTemplateNumbers int
// The alphabetically sorted list of unique parameter names
UniqueParameterNames []string
}
@@ -38,11 +42,13 @@ type Sem struct {
// New returns a new Sem
func New() *Sem {
return &Sem{
- Enums: []*Enum{},
- Types: []*Type{},
- TypeMatchers: []*TypeMatcher{},
- EnumMatchers: []*EnumMatcher{},
- Functions: []*Function{},
+ Enums: []*Enum{},
+ Types: []*Type{},
+ TypeMatchers: []*TypeMatcher{},
+ EnumMatchers: []*EnumMatcher{},
+ Builtins: []*Intrinsic{},
+ UnaryOperators: []*Intrinsic{},
+ BinaryOperators: []*Intrinsic{},
}
}
@@ -84,6 +90,7 @@ type Type struct {
Decl ast.TypeDecl
Name string
DisplayName string
+ Precedence int
}
// TypeMatcher declares a type matcher
@@ -94,6 +101,13 @@ type TypeMatcher struct {
Types []*Type
}
+func (t TypeMatcher) PrecedenceSortedTypes() []*Type {
+ out := make([]*Type, len(t.Types))
+ copy(out, t.Types)
+ sort.Slice(out, func(i, j int) bool { return out[i].Precedence > out[j].Precedence })
+ return out
+}
+
// EnumMatcher declares a enum matcher
type EnumMatcher struct {
TemplateParams []TemplateParam
@@ -121,23 +135,24 @@ type TemplateNumberParam struct {
Name string
}
-// Function describes the overloads of a builtin function
-type Function struct {
+// Intrinsic describes the overloads of a builtin or operator
+type Intrinsic struct {
Name string
Overloads []*Overload
}
-// Overload describes a single overload of a function
+// Overload describes a single overload of a builtin or operator
type Overload struct {
- Decl ast.FunctionDecl
- Function *Function
- TemplateParams []TemplateParam
- OpenTypes []*TemplateTypeParam
- OpenNumbers []TemplateParam
- ReturnType *FullyQualifiedName
- Parameters []Parameter
- CanBeUsedInStage StageUses
- IsDeprecated bool // True if this overload is deprecated
+ Decl ast.IntrinsicDecl
+ Intrinsic *Intrinsic
+ TemplateParams []TemplateParam
+ TemplateTypes []*TemplateTypeParam
+ TemplateNumbers []TemplateParam
+ ReturnType *FullyQualifiedName
+ Parameters []Parameter
+ CanBeUsedInStage StageUses
+ IsDeprecated bool // True if this overload is deprecated
+ ConstEvalFunction string // Name of the function used to evaluate the intrinsic at shader creation time
}
// StageUses describes the stages an overload can be used in
@@ -164,7 +179,13 @@ func (u StageUses) List() []string {
// Format implements the fmt.Formatter interface
func (o Overload) Format(w fmt.State, verb rune) {
- fmt.Fprintf(w, "fn %v", o.Function.Name)
+ switch o.Decl.Kind {
+ case ast.Builtin:
+ fmt.Fprintf(w, "fn ")
+ case ast.Operator:
+ fmt.Fprintf(w, "op ")
+ }
+ fmt.Fprintf(w, "%v", o.Intrinsic.Name)
if len(o.TemplateParams) > 0 {
fmt.Fprintf(w, "<")
for i, t := range o.TemplateParams {
@@ -190,12 +211,16 @@ func (o Overload) Format(w fmt.State, verb rune) {
// Parameter describes a single parameter of a function overload
type Parameter struct {
- Name string
- Type FullyQualifiedName
+ Name string
+ Type FullyQualifiedName
+ IsConst bool // Did this parameter have a @const attribute?
}
// Format implements the fmt.Formatter interface
func (p Parameter) Format(w fmt.State, verb rune) {
+ if p.IsConst {
+ fmt.Fprint(w, "@const ")
+ }
if p.Name != "" {
fmt.Fprintf(w, "%v: ", p.Name)
}
diff --git a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/tok/tok.go b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/tok/tok.go
index c15a2359b9e..b34f19ec55a 100644
--- a/chromium/third_party/dawn/tools/src/cmd/builtin-gen/tok/tok.go
+++ b/chromium/third_party/dawn/tools/src/cmd/intrinsic-gen/tok/tok.go
@@ -29,20 +29,40 @@ const (
String Kind = "string"
Match Kind = "match"
Function Kind = "fn"
+ Operator Kind = "op"
+ Constructor Kind = "ctor"
+ Converter Kind = "conv"
Type Kind = "type"
Enum Kind = "enum"
+ And Kind = "&"
+ AndAnd Kind = "&&"
+ Arrow Kind = "->"
+ Attr Kind = "@"
+ Assign Kind = "="
Colon Kind = ":"
Comma Kind = ","
- Lt Kind = "<"
+ Complement Kind = "~"
+ Divide Kind = "/"
+ Equal Kind = "=="
+ Ge Kind = ">="
Gt Kind = ">"
Lbrace Kind = "{"
- Rbrace Kind = "}"
- Ldeco Kind = "[["
- Rdeco Kind = "]]"
+ Le Kind = "<="
Lparen Kind = "("
- Rparen Kind = ")"
+ Lt Kind = "<"
+ Minus Kind = "-"
+ Modulo Kind = "%"
+ Not Kind = "!"
+ NotEqual Kind = "!="
Or Kind = "|"
- Arrow Kind = "->"
+ OrOr Kind = "||"
+ Plus Kind = "+"
+ Rbrace Kind = "}"
+ Rparen Kind = ")"
+ Shl Kind = "<<"
+ Shr Kind = ">>"
+ Star Kind = "*"
+ Xor Kind = "^"
)
// Invalid represents an invalid token
diff --git a/chromium/third_party/dawn/tools/src/cmd/perfmon/main.go b/chromium/third_party/dawn/tools/src/cmd/perfmon/main.go
index 672c24eb9dd..5179d67a38b 100644
--- a/chromium/third_party/dawn/tools/src/cmd/perfmon/main.go
+++ b/chromium/third_party/dawn/tools/src/cmd/perfmon/main.go
@@ -24,10 +24,12 @@ import (
"fmt"
"io"
"log"
+ "math"
"os"
"os/exec"
"path/filepath"
"reflect"
+ "regexp"
"sort"
"strings"
"time"
@@ -35,8 +37,6 @@ import (
"dawn.googlesource.com/dawn/tools/src/bench"
"dawn.googlesource.com/dawn/tools/src/git"
"github.com/andygrunwald/go-gerrit"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/shirou/gopsutil/cpu"
)
@@ -73,11 +73,11 @@ func run(cfgPath string) error {
return err
}
- tintDir, resultsDir, err := makeWorkingDirs(cfg)
+ dawnDir, resultsDir, err := makeWorkingDirs(cfg)
if err != nil {
return err
}
- tintRepo, err := createOrOpenGitRepo(g, tintDir, cfg.Tint)
+ dawnRepo, err := createOrOpenGitRepo(g, dawnDir, cfg.Dawn)
if err != nil {
return err
}
@@ -96,25 +96,32 @@ func run(cfgPath string) error {
return fmt.Errorf("failed to obtain system info:\n %v", err)
}
+ // Some machines report slightly different CPU clock speeds each reboot
+ // To work around this, quantize the reported speed to the nearest 100MHz
+ for i, s := range sysInfo {
+ sysInfo[i].Mhz = math.Round(s.Mhz/100) * 100
+ }
+
e := env{
cfg: cfg,
git: g,
system: sysInfo,
systemID: hash(sysInfo)[:8],
- tintDir: tintDir,
- buildDir: filepath.Join(tintDir, "out"),
+ dawnDir: dawnDir,
+ buildDir: filepath.Join(dawnDir, "out"),
resultsDir: resultsDir,
- tintRepo: tintRepo,
+ dawnRepo: dawnRepo,
resultsRepo: resultsRepo,
gerrit: gerritClient,
benchmarkCache: map[git.Hash]*bench.Run{},
}
- for true {
+ for {
didSomething, err := e.doSomeWork()
if err != nil {
log.Printf("ERROR: %v", err)
+ log.Printf("Pausing...")
time.Sleep(time.Minute * 10)
continue
}
@@ -123,20 +130,20 @@ func run(cfgPath string) error {
time.Sleep(time.Minute * 5)
}
}
-
- return nil
}
// Config holds the root configuration options for the perfmon tool
type Config struct {
WorkingDir string
RootChange git.Hash
- Tint GitConfig
+ Dawn GitConfig
Results GitConfig
Gerrit GerritConfig
Timeouts TimeoutsConfig
ExternalAccounts []string
BenchmarkRepetitions int
+ BenchmarkMaxTemp float32 // celsius
+ CPUTempSensorName string // Name of the sensor to use for CPU temp
}
// GitConfig holds the configuration options for accessing a git repo
@@ -168,7 +175,7 @@ type HistoricResults struct {
Commits []CommitResults
}
-// CommitResults holds the results of a single tint commit
+// CommitResults holds the results of a single dawn commit
type CommitResults struct {
Commit string
CommitTime time.Time
@@ -178,22 +185,30 @@ type CommitResults struct {
// Benchmark holds the benchmark results for a single test
type Benchmark struct {
- Name string
- Mean float64
- Median float64
- Stddev float64
+ Name string
+ Time float64
+ Repeats int `json:",omitempty"`
+}
+
+// AuthConfig holds the authentication options for accessing a git repo
+type AuthConfig struct {
+ Username string
+ Password string
}
// setDefaults assigns default values to unassigned fields of cfg
func (cfg *Config) setDefaults() {
if cfg.RootChange.IsZero() {
- cfg.RootChange, _ = git.ParseHash("be2362b18c792364c6bf5744db6d3837fbc655a0")
+ cfg.RootChange, _ = git.ParseHash("e72e42d9e0c851311512ca6da4d7b59f0bcc60d9")
}
- cfg.Tint.setDefaults()
+ cfg.Dawn.setDefaults()
cfg.Results.setDefaults()
cfg.Timeouts.setDefaults()
- if cfg.BenchmarkRepetitions < 2 {
- cfg.BenchmarkRepetitions = 2
+ if cfg.BenchmarkRepetitions < 1 {
+ cfg.BenchmarkRepetitions = 1
+ }
+ if cfg.BenchmarkMaxTemp == 0 {
+ cfg.BenchmarkMaxTemp = 50
}
}
@@ -217,30 +232,58 @@ func (cfg *TimeoutsConfig) setDefaults() {
}
}
-// AuthConfig holds the authentication options for accessing a git repo
-type AuthConfig struct {
- Username string
- Password string
+// findCommitResults looks for a CommitResult with the given commit id,
+// returning a pointer to the CommitResult if found, otherwise nil
+func (h *HistoricResults) findCommitResults(commit string) *CommitResults {
+ for i, c := range h.Commits {
+ if c.Commit == commit {
+ return &h.Commits[i]
+ }
+ }
+ return nil
}
-// authMethod returns a http.BasicAuth constructed from the AuthConfig
-func (cfg AuthConfig) authMethod() transport.AuthMethod {
- if cfg.Username != "" || cfg.Password != "" {
- return &http.BasicAuth{Username: cfg.Username, Password: cfg.Password}
+// sorts all the benchmarks by commit date
+func (h *HistoricResults) sort() {
+ sort.Slice(h.Commits, func(i, j int) bool {
+ if h.Commits[i].CommitTime.Before(h.Commits[j].CommitTime) {
+ return true
+ }
+ if h.Commits[j].CommitTime.Before(h.Commits[i].CommitTime) {
+ return false
+ }
+ return h.Commits[i].CommitDescription < h.Commits[j].CommitDescription
+ })
+}
+
+// findBenchmark looks for a Benchmark with the given commit id,
+// returning a pointer to the Benchmark if found, otherwise nil
+func (r *CommitResults) findBenchmark(name string) *Benchmark {
+ for i, b := range r.Benchmarks {
+ if b.Name == name {
+ return &r.Benchmarks[i]
+ }
}
return nil
}
+// sorts all the benchmarks by name
+func (r *CommitResults) sort() {
+ sort.Slice(r.Benchmarks, func(i, j int) bool {
+ return r.Benchmarks[i].Name < r.Benchmarks[j].Name
+ })
+}
+
// env holds the perfmon main environment state
type env struct {
cfg Config
git *git.Git
system []cpu.InfoStat
systemID string
- tintDir string
+ dawnDir string
buildDir string
resultsDir string
- tintRepo *git.Repository
+ dawnRepo *git.Repository
resultsRepo *git.Repository
gerrit *gerrit.Client
@@ -272,12 +315,21 @@ func (e env) doSomeWork() (bool, error) {
}
if len(changesToBenchmark) > 0 {
- log.Printf("benchmarking %v changes...", len(changesToBenchmark))
+ log.Printf("%v submitted changes to benchmark...", len(changesToBenchmark))
+
+ start := time.Now()
for i, c := range changesToBenchmark {
- log.Printf("benchmarking %v/%v....", i+1, len(changesToBenchmark))
+ if time.Since(start) > time.Minute*15 {
+ // It's been a while since we scanned for review changes.
+ // Take a break from benchmarking submitted changes so we
+ // can scan for review changes to benchmark.
+ log.Printf("benchmarked %v changes", i)
+ return true, nil
+ }
benchRes, err := e.benchmarkTintChange(c)
if err != nil {
- return true, err
+ log.Printf("benchmarking failed: %v", err)
+ benchRes = &bench.Run{}
}
commitRes, err := e.benchmarksToCommitResults(c, *benchRes)
if err != nil {
@@ -291,25 +343,51 @@ func (e env) doSomeWork() (bool, error) {
return true, nil
}
}
+
+ {
+ log.Println("scanning for benchmarks to refine...")
+ changeToBenchmark, err := e.changeToRefineBenchmarks()
+ if err != nil {
+ return true, err
+ }
+
+ if changeToBenchmark != nil {
+ log.Printf("re-benchmarking change '%v'", *changeToBenchmark)
+ benchRes, err := e.benchmarkTintChange(*changeToBenchmark)
+ if err != nil {
+ log.Printf("benchmarking failed: %v", err)
+ benchRes = &bench.Run{}
+ }
+ commitRes, err := e.benchmarksToCommitResults(*changeToBenchmark, *benchRes)
+ if err != nil {
+ return true, err
+ }
+ log.Printf("pushing results...")
+ if err := e.pushUpdatedResults(*commitRes); err != nil {
+ return true, err
+ }
+ return true, nil
+ }
+ }
return false, nil
}
// changesToBenchmark fetches the list of changes that do not currently have
// benchmark results, which should be benchmarked.
func (e env) changesToBenchmark() ([]git.Hash, error) {
- log.Println("syncing tint repo...")
- latest, err := e.tintRepo.Fetch(e.cfg.Tint.Branch, &git.FetchOptions{
- Credentials: e.cfg.Tint.Credentials,
+ log.Println("syncing dawn repo...")
+ latest, err := e.dawnRepo.Fetch(e.cfg.Dawn.Branch, &git.FetchOptions{
+ Credentials: e.cfg.Dawn.Credentials,
})
if err != nil {
return nil, err
}
- allChanges, err := e.tintRepo.Log(&git.LogOptions{
+ allChanges, err := e.dawnRepo.Log(&git.LogOptions{
From: e.cfg.RootChange.String(),
To: latest.String(),
})
if err != nil {
- return nil, fmt.Errorf("failed to obtain tint log:\n %w", err)
+ return nil, fmt.Errorf("failed to obtain dawn log:\n %w", err)
}
changesWithBenchmarks, err := e.changesWithBenchmarks()
if err != nil {
@@ -330,28 +408,103 @@ func (e env) changesToBenchmark() ([]git.Hash, error) {
return changesToBenchmark, nil
}
-// benchmarkTintChange checks out the given commit, fetches the tint third party
-// dependencies, builds tint, then runs the benchmarks, returning the results.
-func (e env) benchmarkTintChange(hash git.Hash) (*bench.Run, error) {
+// changeToRefineBenchmarks scans for the most suitable historic commit to
+// re-benchmark and refine the results. Returns nil if there are no suitable
+// changes.
+func (e env) changeToRefineBenchmarks() (*git.Hash, error) {
+ log.Println("syncing results repo...")
+ if err := fetchAndCheckoutLatest(e.resultsRepo, e.cfg.Results); err != nil {
+ return nil, err
+ }
+
+ _, absPath, err := e.resultsFilePaths()
+ if err != nil {
+ return nil, err
+ }
+
+ results, err := e.loadHistoricResults(absPath)
+ if err != nil {
+ log.Println(fmt.Errorf("WARNING: failed to open result file '%v':\n %w", absPath, err))
+ return nil, nil
+ }
+
+ if len(results.Commits) == 0 {
+ return nil, nil
+ }
+
+ type hashDelta struct {
+ hash git.Hash
+ delta float64
+ }
+ hashDeltas := make([]hashDelta, 0, len(results.Commits))
+ for i, c := range results.Commits {
+ hash, err := git.ParseHash(c.Commit)
+ if err != nil {
+ return nil, err
+ }
+
+ prev := results.Commits[max(0, i-1)]
+ next := results.Commits[min(len(results.Commits)-1, i+1)]
+ delta, count := 0.0, 0
+ for _, b := range c.Benchmarks {
+ if b.Time == 0 {
+ continue
+ }
+ p, n := b.Time, b.Time
+ if pb := prev.findBenchmark(b.Name); pb != nil {
+ p = pb.Time
+ }
+ if nb := next.findBenchmark(b.Name); nb != nil {
+ n = nb.Time
+ }
+ avr := (p + n) / 2
+ confidence := math.Pow(2, float64(b.Repeats))
+ delta += math.Abs(avr-b.Time) / (b.Time * confidence)
+ count++
+ }
+ if count > 0 {
+ delta = delta / float64(count)
+ hashDeltas = append(hashDeltas, hashDelta{hash, delta})
+ }
+ }
+
+ sort.Slice(hashDeltas, func(i, j int) bool { return hashDeltas[i].delta > hashDeltas[j].delta })
+
+ return &hashDeltas[0].hash, nil
+}
+
+// benchmarkTintChangeIfNotCached first checks the results cache for existing
+// benchmark values for the given change, returning those cached values if hit.
+// If the cache does not contain results for the change, then
+// e.benchmarkTintChange() is called.
+func (e env) benchmarkTintChangeIfNotCached(hash git.Hash) (*bench.Run, error) {
if cached, ok := e.benchmarkCache[hash]; ok {
log.Printf("reusing cached benchmark results of '%v'...", hash)
return cached, nil
}
+ return e.benchmarkTintChange(hash)
+}
- log.Printf("checking out tint at '%v'...", hash)
- if err := checkout(hash, e.tintRepo); err != nil {
+// benchmarkTintChange checks out the given commit, fetches the dawn third party
+// dependencies, builds tint, then runs the benchmarks, returning the results.
+func (e env) benchmarkTintChange(hash git.Hash) (*bench.Run, error) {
+ log.Printf("checking out dawn at '%v'...", hash)
+ if err := checkout(hash, e.dawnRepo); err != nil {
return nil, err
}
- log.Println("fetching tint dependencies...")
- if err := e.fetchTintDeps(); err != nil {
+ log.Println("fetching dawn dependencies...")
+ if err := e.fetchDawnDeps(); err != nil {
return nil, err
}
log.Println("building tint...")
if err := e.buildTint(); err != nil {
return nil, err
}
+ if err := e.waitForTempsToSettle(); err != nil {
+ return nil, err
+ }
log.Println("benchmarking tint...")
- run, err := e.benchmarkTint()
+ run, err := e.repeatedlyBenchmarkTint()
if err != nil {
return nil, err
}
@@ -363,45 +516,40 @@ func (e env) benchmarkTintChange(hash git.Hash) (*bench.Run, error) {
// benchmarksToCommitResults converts the benchmarks in the provided bench.Run
// to a CommitResults.
func (e env) benchmarksToCommitResults(hash git.Hash, results bench.Run) (*CommitResults, error) {
- commits, err := e.tintRepo.Log(&git.LogOptions{
- From: hash.String(),
- Count: 1,
+ commits, err := e.dawnRepo.Log(&git.LogOptions{
+ From: hash.String(),
})
- if err != nil || len(commits) != 1 {
- return nil, fmt.Errorf("failed to get commit object '%v' of tint repo:\n %w", hash, err)
+ if err != nil || len(commits) == 0 {
+ return nil, fmt.Errorf("failed to get commit object '%v' of dawn repo:\n %w", hash, err)
+ }
+ commit := commits[len(commits)-1]
+ if commit.Hash != hash {
+ panic(fmt.Errorf("git.Repository.Log({From: %v}) returned:\n%+v", hash, commits))
}
- commit := commits[0]
m := map[string]Benchmark{}
for _, b := range results.Benchmarks {
- benchmark := m[b.Name]
- benchmark.Name = b.Name
- switch b.AggregateType {
- case bench.Mean:
- benchmark.Mean = float64(b.Duration) / float64(time.Second)
- case bench.Median:
- benchmark.Median = float64(b.Duration) / float64(time.Second)
- case bench.Stddev:
- benchmark.Stddev = float64(b.Duration) / float64(time.Second)
+ m[b.Name] = Benchmark{
+ Name: b.Name,
+ Time: float64(b.Duration) / float64(time.Second),
}
- m[b.Name] = benchmark
}
- sorted := make([]Benchmark, 0, len(m))
- for _, b := range m {
- sorted = append(sorted, b)
- }
- sort.Slice(sorted, func(i, j int) bool { return sorted[i].Name < sorted[i].Name })
-
- return &CommitResults{
+ out := &CommitResults{
Commit: commit.Hash.String(),
CommitDescription: commit.Subject,
CommitTime: commit.Date,
- Benchmarks: sorted,
- }, nil
+ Benchmarks: make([]Benchmark, 0, len(m)),
+ }
+ for _, b := range m {
+ out.Benchmarks = append(out.Benchmarks, b)
+ }
+ out.sort()
+
+ return out, nil
}
-// changesWithBenchmarks returns a set of tint changes that we already have
+// changesWithBenchmarks returns a set of dawn changes that we already have
// benchmarks for.
func (e env) changesWithBenchmarks() (map[git.Hash]struct{}, error) {
log.Println("syncing results repo...")
@@ -431,6 +579,9 @@ func (e env) changesWithBenchmarks() (map[git.Hash]struct{}, error) {
return m, nil
}
+// pushUpdatedResults fetches and loads the latest benchmark results, adds or
+// merges the new results 'res' to the file, and then pushes the new results to
+// the server.
func (e env) pushUpdatedResults(res CommitResults) error {
log.Println("syncing results repo...")
if err := fetchAndCheckoutLatest(e.resultsRepo, e.cfg.Results); err != nil {
@@ -448,10 +599,27 @@ func (e env) pushUpdatedResults(res CommitResults) error {
h = &HistoricResults{System: e.system}
}
- h.Commits = append(h.Commits, res)
+ // Are there existing benchmark results for this commit?
+ if existing := h.findCommitResults(res.Commit); existing != nil {
+ // Yes: merge in the new results
+ for _, b := range res.Benchmarks {
+ if e := existing.findBenchmark(b.Name); e != nil {
+ // Benchmark found to merge. Add a weighted contribution to the benchmark value.
+ e.Time = (e.Time*float64(e.Repeats+1) + b.Time) / float64(e.Repeats+2)
+ e.Repeats++
+ } else {
+ // New benchmark? Just append.
+ existing.Benchmarks = append(existing.Benchmarks, b)
+ }
+ }
+ existing.sort()
+ } else {
+ // New benchmark results for this commit. Just append.
+ h.Commits = append(h.Commits, res)
+ }
// Sort the commits by timestamp
- sort.Slice(h.Commits, func(i, j int) bool { return h.Commits[i].CommitTime.Before(h.Commits[j].CommitTime) })
+ h.sort()
// Write the new results to the file
f, err := os.Create(absPath)
@@ -527,20 +695,20 @@ System: %+v`, path, res.System, e.system)
return res, nil
}
-// fetchTintDeps fetches the third party tint dependencies using gclient.
-func (e env) fetchTintDeps() error {
- gclientConfig := filepath.Join(e.tintDir, ".gclient")
+// fetchDawnDeps fetches the third party dawn dependencies using gclient.
+func (e env) fetchDawnDeps() error {
+ gclientConfig := filepath.Join(e.dawnDir, ".gclient")
if _, err := os.Stat(gclientConfig); errors.Is(err, os.ErrNotExist) {
- standalone := filepath.Join(e.tintDir, "scripts", "standalone.gclient")
+ standalone := filepath.Join(e.dawnDir, "scripts", "standalone.gclient")
if err := copyFile(gclientConfig, standalone); err != nil {
return fmt.Errorf("failed to copy '%v' to '%v':\n %w", standalone, gclientConfig, err)
}
}
- if _, err := call(tools.gclient, e.tintDir, e.cfg.Timeouts.Sync,
+ if _, err := call(tools.gclient, e.dawnDir, e.cfg.Timeouts.Sync,
"sync",
"--force",
); err != nil {
- return fmt.Errorf("failed to fetch tint dependencies:\n %w", err)
+ return fmt.Errorf("failed to fetch dawn dependencies:\n %w", err)
}
return nil
}
@@ -551,10 +719,15 @@ func (e env) buildTint() error {
return fmt.Errorf("failed to create build directory at '%v':\n %w", e.buildDir, err)
}
if _, err := call(tools.cmake, e.buildDir, e.cfg.Timeouts.Build,
- e.tintDir,
+ e.dawnDir,
"-GNinja",
"-DCMAKE_CXX_COMPILER_LAUNCHER=ccache",
"-DCMAKE_BUILD_TYPE=Release",
+ "-DCMAKE_BUILD_TESTS=0",
+ "-DCMAKE_BUILD_SAMPLES=0",
+ "-DTINT_BUILD_DOCS=0",
+ "-DTINT_BUILD_SAMPLES=0",
+ "-DTINT_BUILD_TESTS=0",
"-DTINT_BUILD_SPV_READER=1",
"-DTINT_BUILD_WGSL_READER=1",
"-DTINT_BUILD_GLSL_WRITER=1",
@@ -563,8 +736,9 @@ func (e env) buildTint() error {
"-DTINT_BUILD_SPV_WRITER=1",
"-DTINT_BUILD_WGSL_WRITER=1",
"-DTINT_BUILD_BENCHMARKS=1",
+ "-DDAWN_BUILD_SAMPLES=0",
); err != nil {
- return errFailedToBuild{fmt.Errorf("failed to generate tint build config:\n %w", err)}
+ return errFailedToBuild{fmt.Errorf("failed to generate dawn build config:\n %w", err)}
}
if _, err := call(tools.ninja, e.buildDir, e.cfg.Timeouts.Build); err != nil {
return errFailedToBuild{err}
@@ -582,20 +756,71 @@ func (e errFailedToBuild) Error() string {
return fmt.Sprintf("failed to build: %v", e.reason)
}
-// benchmarkTint runs the tint benchmarks, returning the results.
+// errFailedToBenchmark is the error returned by benchmarkTint() if the benchmark failed
+type errFailedToBenchmark struct {
+ // The reason
+ reason error
+}
+
+func (e errFailedToBenchmark) Error() string {
+ return fmt.Sprintf("failed to benchmark: %v", e.reason)
+}
+
+// benchmarkTint runs the tint benchmarks e.cfg.BenchmarkRepetitions times,
+// returning the averaged results.
+func (e env) repeatedlyBenchmarkTint() (*bench.Run, error) {
+ type durationAndCount struct {
+ duration time.Duration
+ count int
+ }
+
+ var ctx *bench.Context
+ acc := map[string]durationAndCount{}
+ for i := 0; i < e.cfg.BenchmarkRepetitions; i++ {
+ if err := e.waitForTempsToSettle(); err != nil {
+ return nil, err
+ }
+ log.Printf("benchmark pass %v/%v...", (i + 1), e.cfg.BenchmarkRepetitions)
+ run, err := e.benchmarkTint()
+ if err != nil {
+ return nil, err
+ }
+ for _, b := range run.Benchmarks {
+ v := acc[b.Name]
+ v.duration += b.Duration
+ v.count++
+ acc[b.Name] = v
+ }
+ if ctx == nil {
+ ctx = run.Context
+ }
+ }
+
+ out := bench.Run{Context: ctx}
+ for name, dc := range acc {
+ out.Benchmarks = append(out.Benchmarks, bench.Benchmark{
+ Name: name,
+ Duration: dc.duration / time.Duration(dc.count),
+ })
+ }
+
+ return &out, nil
+}
+
+// benchmarkTint runs the tint benchmarks once, returning the results.
func (e env) benchmarkTint() (*bench.Run, error) {
exe := filepath.Join(e.buildDir, "tint-benchmark")
out, err := call(exe, e.buildDir, e.cfg.Timeouts.Benchmark,
"--benchmark_format=json",
- fmt.Sprintf("--benchmark_repetitions=%v", e.cfg.BenchmarkRepetitions),
+ "--benchmark_enable_random_interleaving=true",
)
if err != nil {
- return nil, fmt.Errorf("failed to benchmark tint:\n %w", err)
+ return nil, errFailedToBenchmark{err}
}
results, err := bench.Parse(out)
if err != nil {
- return nil, fmt.Errorf("failed to parse benchmark results:\n %w", err)
+ return nil, errFailedToBenchmark{err}
}
return &results, nil
}
@@ -605,7 +830,7 @@ func (e env) findGerritChangeToBenchmark() (*gerrit.ChangeInfo, error) {
log.Println("querying gerrit for changes...")
results, _, err := e.gerrit.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
- Query: []string{"project:tint status:open+-age:3d"},
+ Query: []string{"project:dawn status:open+-age:3d"},
Limit: 100,
},
ChangeOptions: gerrit.ChangeOptions{
@@ -643,7 +868,7 @@ func (e env) findGerritChangeToBenchmark() (*gerrit.ChangeInfo, error) {
strings.HasSuffix(change.Labels["Presubmit-Ready"].Approved.Email, "@google.com")) {
permitted := false
for _, email := range e.cfg.ExternalAccounts {
- if strings.ToLower(current.Commit.Committer.Email) == strings.ToLower(email) {
+ if strings.EqualFold(current.Commit.Committer.Email, email) {
permitted = true
break
}
@@ -704,8 +929,8 @@ func (e env) findGerritChangeToBenchmark() (*gerrit.ChangeInfo, error) {
func (e env) benchmarkGerritChange(change gerrit.ChangeInfo) error {
current := change.Revisions[change.CurrentRevision]
log.Printf("fetching '%v'...", current.Ref)
- currentHash, err := e.tintRepo.Fetch(current.Ref, &git.FetchOptions{
- Credentials: e.cfg.Tint.Credentials,
+ currentHash, err := e.dawnRepo.Fetch(current.Ref, &git.FetchOptions{
+ Credentials: e.cfg.Dawn.Credentials,
})
if err != nil {
return err
@@ -730,39 +955,30 @@ func (e env) benchmarkGerritChange(change gerrit.ChangeInfo) error {
newRun, err := e.benchmarkTintChange(currentHash)
if err != nil {
- var ftb errFailedToBuild
- if errors.As(err, &ftb) {
+ log.Printf("ERROR: %v", err)
+ buildErr := errFailedToBuild{}
+ if errors.As(err, &buildErr) {
return postMsg("OWNER", fmt.Sprintf("patchset %v failed to build", current.Number))
}
+ benchErr := errFailedToBenchmark{}
+ if errors.As(err, &benchErr) {
+ return postMsg("OWNER", fmt.Sprintf("patchset %v failed to benchmark", current.Number))
+ }
return err
}
- if _, err := e.tintRepo.Fetch(parent, &git.FetchOptions{
- Credentials: e.cfg.Tint.Credentials,
+ if _, err := e.dawnRepo.Fetch(parent, &git.FetchOptions{
+ Credentials: e.cfg.Dawn.Credentials,
}); err != nil {
return err
}
- parentRun, err := e.benchmarkTintChange(parentHash)
+ parentRun, err := e.benchmarkTintChangeIfNotCached(parentHash)
if err != nil {
return err
}
- // filters the benchmark results to only the mean aggregate values
- meanBenchmarkResults := func(in []bench.Benchmark) []bench.Benchmark {
- out := make([]bench.Benchmark, 0, len(in))
- for _, b := range in {
- if b.AggregateType == bench.Mean {
- out = append(out, b)
- }
- }
- return out
- }
-
- newResults := meanBenchmarkResults(newRun.Benchmarks)
- parentResults := meanBenchmarkResults(parentRun.Benchmarks)
-
const minDiff = time.Microsecond * 50 // Ignore time diffs less than this duration
const minRelDiff = 0.01 // Ignore absolute relative diffs between [1, 1+x]
- diff := bench.Compare(parentResults, newResults, minDiff, minRelDiff)
+ diff := bench.Compare(parentRun.Benchmarks, newRun.Benchmarks, minDiff, minRelDiff)
diffFmt := bench.DiffFormat{
TestName: true,
Delta: true,
@@ -772,7 +988,7 @@ func (e env) benchmarkGerritChange(change gerrit.ChangeInfo) error {
}
msg := &strings.Builder{}
- fmt.Fprintf(msg, "Tint perfmon analysis:\n")
+ fmt.Fprintf(msg, "Perfmon analysis:\n")
fmt.Fprintf(msg, " \n")
fmt.Fprintf(msg, " A: parent change (%v) -> B: patchset %v\n", parent[:7], current.Number)
fmt.Fprintf(msg, " \n")
@@ -787,6 +1003,33 @@ func (e env) benchmarkGerritChange(change gerrit.ChangeInfo) error {
return postMsg(notify, msg.String())
}
+// waitForTempsToSettle waits for the maximum temperature of all sensors to drop
+// below the threshold value specified by the config.
+func (e env) waitForTempsToSettle() error {
+ if e.cfg.CPUTempSensorName == "" {
+ time.Sleep(time.Second * 30)
+ return nil
+ }
+ const timeout = 5 * time.Minute
+ start := time.Now()
+ for {
+ temp, err := maxTemp(e.cfg.CPUTempSensorName)
+ if err != nil {
+ return fmt.Errorf("failed to obtain system temeratures: %v", err)
+ }
+ if temp < e.cfg.BenchmarkMaxTemp {
+ log.Printf("temperatures settled. current: %v°C", temp)
+ return nil
+ }
+ if time.Since(start) > timeout {
+ log.Printf("timeout waiting for temperatures to settle. current: %v°C", temp)
+ return nil
+ }
+ log.Printf("waiting for temperatures to settle. current: %v°C, max: %v°C", temp, e.cfg.BenchmarkMaxTemp)
+ time.Sleep(time.Second * 10)
+ }
+}
+
// createOrOpenGitRepo creates a new local repo by cloning cfg.URL into
// filepath, or opens the existing repo at filepath.
func createOrOpenGitRepo(g *git.Git, filepath string, cfg GitConfig) (*git.Repository, error) {
@@ -818,8 +1061,8 @@ func loadConfig(path string) (Config, error) {
return cfg, nil
}
-// makeWorkingDirs builds the tint repo and results repo directories.
-func makeWorkingDirs(cfg Config) (tintDir, resultsDir string, err error) {
+// makeWorkingDirs creates the dawn repo and results repo directories.
+func makeWorkingDirs(cfg Config) (dawnDir, resultsDir string, err error) {
wd, err := expandHomeDir(cfg.WorkingDir)
if err != nil {
return "", "", err
@@ -827,15 +1070,15 @@ func makeWorkingDirs(cfg Config) (tintDir, resultsDir string, err error) {
if err := os.MkdirAll(wd, 0777); err != nil {
return "", "", fmt.Errorf("failed to create working directory '%v':\n %w", wd, err)
}
- tintDir = filepath.Join(wd, "tint")
- if err := os.MkdirAll(tintDir, 0777); err != nil {
- return "", "", fmt.Errorf("failed to create working tint directory '%v':\n %w", tintDir, err)
+ dawnDir = filepath.Join(wd, "dawn")
+ if err := os.MkdirAll(dawnDir, 0777); err != nil {
+ return "", "", fmt.Errorf("failed to create working dawn directory '%v':\n %w", dawnDir, err)
}
resultsDir = filepath.Join(wd, "results")
if err := os.MkdirAll(resultsDir, 0777); err != nil {
return "", "", fmt.Errorf("failed to create working results directory '%v':\n %w", resultsDir, err)
}
- return tintDir, resultsDir, nil
+ return dawnDir, resultsDir, nil
}
// fetchAndCheckoutLatest calls fetch(cfg.Branch) followed by checkoutLatest().
@@ -882,6 +1125,7 @@ var tools struct {
gclient string
git string
ninja string
+ sensors string
}
// findTools looks for the file paths for executables used by this tool,
@@ -896,6 +1140,7 @@ func findTools() error {
{"gclient", &tools.gclient},
{"git", &tools.git},
{"ninja", &tools.ninja},
+ {"sensors", &tools.sensors},
} {
path, err := exec.LookPath(tool.name)
if err != nil {
@@ -922,6 +1167,27 @@ func copyFile(dstPath, srcPath string) error {
return err
}
+// The regular expression to parse a temperature from 'sensors'
+var reTemp = regexp.MustCompile("([0-9]+.[0-9])°C")
+
+// maxTemp returns the maximum sensor temperature in celsius returned by 'sensors'
+func maxTemp(sensorName string) (float32, error) {
+ output, err := call(tools.sensors, "", time.Second*2, sensorName)
+ if err != nil {
+ return 0, err
+ }
+ var maxTemp float32
+ for _, match := range reTemp.FindAllStringSubmatch(output, -1) {
+ var temp float32
+ if _, err := fmt.Sscanf(match[1], "%f", &temp); err == nil {
+ if temp > maxTemp {
+ maxTemp = temp
+ }
+ }
+ }
+ return maxTemp, nil
+}
+
// call invokes the executable exe in the current working directory wd, with
// the provided arguments.
// If the executable does not complete within the timeout duration, then an
@@ -945,3 +1211,17 @@ func hash(o interface{}) string {
hash.Write([]byte(str))
return hex.EncodeToString(hash.Sum(nil))[:8]
}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/chromium/third_party/dawn/tools/src/container/set.go b/chromium/third_party/dawn/tools/src/container/set.go
index c48a649a336..4adde5c97e3 100644
--- a/chromium/third_party/dawn/tools/src/container/set.go
+++ b/chromium/third_party/dawn/tools/src/container/set.go
@@ -14,7 +14,10 @@
package container
-import "sort"
+import (
+ "fmt"
+ "sort"
+)
// Set is a generic unordered set, which wrap's go's builtin 'map'.
// T is the set key, which must match the 'key' constraint.
@@ -68,7 +71,7 @@ func (s Set[T]) Contains(item T) bool {
return found
}
-// Contains returns true if the set contains all the items in o
+// ContainsAll returns true if the set contains all the items in o
func (s Set[T]) ContainsAll(o Set[T]) bool {
for item := range o {
if !s.Contains(item) {
@@ -78,6 +81,16 @@ func (s Set[T]) ContainsAll(o Set[T]) bool {
return true
}
+// ContainsAny returns true if the set contains any of the items in o
+func (s Set[T]) ContainsAny(o Set[T]) bool {
+ for item := range o {
+ if s.Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
// Intersection returns true if the set contains all the items in o
func (s Set[T]) Intersection(o Set[T]) Set[T] {
out := NewSet[T]()
@@ -98,3 +111,24 @@ func (s Set[T]) List() []T {
sort.Slice(out, func(i, j int) bool { return out[i] < out[j] })
return out
}
+
+// One returns a random item from the set, or an empty item if the set is empty.
+func (s Set[T]) One() T {
+ for item := range s {
+ return item
+ }
+ var zero T
+ return zero
+}
+
+// Format writes the Target to the fmt.State
+func (s Set[T]) Format(f fmt.State, verb rune) {
+ fmt.Fprint(f, "[")
+ for i, item := range s.List() {
+ if i > 0 {
+ fmt.Fprint(f, ", ")
+ }
+ fmt.Fprint(f, item)
+ }
+ fmt.Fprint(f, "]")
+}
diff --git a/chromium/third_party/dawn/tools/src/container/set_test.go b/chromium/third_party/dawn/tools/src/container/set_test.go
index ff1e28f6ba3..35ebd4f2391 100644
--- a/chromium/third_party/dawn/tools/src/container/set_test.go
+++ b/chromium/third_party/dawn/tools/src/container/set_test.go
@@ -15,6 +15,7 @@
package container_test
import (
+ "fmt"
"testing"
"dawn.googlesource.com/dawn/tools/src/container"
@@ -122,6 +123,39 @@ func TestSetContainsAll(t *testing.T) {
expectEq(t, `s.ContainsAll("c", "a", "b")`, s.ContainsAll(S("c", "a", "b")), true)
}
+func TestSetContainsAny(t *testing.T) {
+ S := container.NewSet[string]
+
+ s := container.NewSet[string]()
+ s.Add("c")
+ expectEq(t, `s.ContainsAny("a")`, s.ContainsAny(S("a")), false)
+ expectEq(t, `s.ContainsAny("b")`, s.ContainsAny(S("b")), false)
+ expectEq(t, `s.ContainsAny("c")`, s.ContainsAny(S("c")), true)
+ expectEq(t, `s.ContainsAny("a", "b")`, s.ContainsAny(S("a", "b")), false)
+ expectEq(t, `s.ContainsAny("b", "c")`, s.ContainsAny(S("b", "c")), true)
+ expectEq(t, `s.ContainsAny("c", "a")`, s.ContainsAny(S("c", "a")), true)
+ expectEq(t, `s.ContainsAny("c", "a", "b")`, s.ContainsAny(S("c", "a", "b")), true)
+
+ s.Add("a")
+ expectEq(t, `s.ContainsAny("a")`, s.ContainsAny(S("a")), true)
+ expectEq(t, `s.ContainsAny("b")`, s.ContainsAny(S("b")), false)
+ expectEq(t, `s.ContainsAny("c")`, s.ContainsAny(S("c")), true)
+ expectEq(t, `s.ContainsAny("a", "b")`, s.ContainsAny(S("a", "b")), true)
+ expectEq(t, `s.ContainsAny("b", "c")`, s.ContainsAny(S("b", "c")), true)
+ expectEq(t, `s.ContainsAny("c", "a")`, s.ContainsAny(S("c", "a")), true)
+ expectEq(t, `s.ContainsAny("c", "a", "b")`, s.ContainsAny(S("c", "a", "b")), true)
+
+ s.Remove("c")
+ s.Add("b")
+ expectEq(t, `s.ContainsAny("a")`, s.ContainsAny(S("a")), true)
+ expectEq(t, `s.ContainsAny("b")`, s.ContainsAny(S("b")), true)
+ expectEq(t, `s.ContainsAny("c")`, s.ContainsAny(S("c")), false)
+ expectEq(t, `s.ContainsAny("a", "b")`, s.ContainsAny(S("a", "b")), true)
+ expectEq(t, `s.ContainsAny("b", "c")`, s.ContainsAny(S("b", "c")), true)
+ expectEq(t, `s.ContainsAny("c", "a")`, s.ContainsAny(S("c", "a")), true)
+ expectEq(t, `s.ContainsAny("c", "a", "b")`, s.ContainsAny(S("c", "a", "b")), true)
+}
+
func TestSetIntersection(t *testing.T) {
a := container.NewSet(1, 3, 4, 6)
b := container.NewSet(2, 3, 4, 5)
@@ -143,3 +177,19 @@ func TestSetRemoveAll(t *testing.T) {
expectEq(t, "len(s)", len(s), 1)
expectEq(t, "s.List()", s.List(), []string{"b"})
}
+
+func TestSetOne(t *testing.T) {
+ expectEq(t, "NewSet[string]().One()", container.NewSet[string]().One(), "")
+ expectEq(t, `NewSet("x").One()`, container.NewSet("x").One(), "x")
+ if got := container.NewSet("x", "y").One(); got != "x" && got != "y" {
+ t.Errorf(`NewSet("x", "y").One() returned "%v"`, got)
+ }
+}
+
+func TestFormat(t *testing.T) {
+ expectEq(t, "NewSet[string]()", fmt.Sprint(container.NewSet[string]()), "[]")
+ expectEq(t, `NewSet("x")`, fmt.Sprint(container.NewSet("x")), `[x]`)
+ expectEq(t, `NewSet(1)`, fmt.Sprint(container.NewSet(1)), `[1]`)
+ expectEq(t, `NewSet("y", "x")`, fmt.Sprint(container.NewSet("y", "x")), `[x, y]`)
+ expectEq(t, `NewSet(3, 1, 2)`, fmt.Sprint(container.NewSet(3, 1, 2)), `[1, 2, 3]`)
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/diagnostic.go b/chromium/third_party/dawn/tools/src/cts/expectations/diagnostic.go
new file mode 100644
index 00000000000..c7059aa1057
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/diagnostic.go
@@ -0,0 +1,56 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expectations
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Severity is an enumerator of diagnostic severity
+type Severity string
+
+const (
+ Error Severity = "error"
+ Warning Severity = "warning"
+ Note Severity = "note"
+)
+
+// Diagnostic holds a line, column, message and severity.
+// Diagnostic also implements the 'error' interface.
+type Diagnostic struct {
+ Severity Severity
+ Line int // 1-based
+ Column int // 1-based
+ Message string
+}
+
+func (e Diagnostic) String() string {
+ sb := &strings.Builder{}
+ if e.Line > 0 {
+ fmt.Fprintf(sb, "%v", e.Line)
+ if e.Column > 0 {
+ fmt.Fprintf(sb, ":%v", e.Column)
+ }
+ sb.WriteString(" ")
+ }
+ sb.WriteString(string(e.Severity))
+ sb.WriteString(": ")
+ sb.WriteString(e.Message)
+ return sb.String()
+}
+
+// Error implements the 'error' interface.
+func (e Diagnostic) Error() string { return e.String() }
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/expectations.go b/chromium/third_party/dawn/tools/src/cts/expectations/expectations.go
new file mode 100644
index 00000000000..ae6034c568a
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/expectations.go
@@ -0,0 +1,230 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expectations provides types and helpers for parsing, updating and
+// writing WebGPU expectations files.
+//
+// See <dawn>/webgpu-cts/expectations.txt for more information.
+package expectations
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+)
+
+// Content holds the full content of an expectations file.
+type Content struct {
+ Chunks []Chunk
+ Tags Tags
+}
+
+// Chunk is an optional comment followed by a run of expectations.
+// A chunk ends at the first blank line, or at the transition from an
+// expectation to a line-comment.
+type Chunk struct {
+ Comments []string // Line comments at the top of the chunk
+ Expectations []Expectation // Expectations for the chunk
+}
+
+// Tags holds the tag information parsed in the comments between the
+// 'BEGIN TAG HEADER' and 'END TAG HEADER' markers.
+// Tags are grouped in tag-sets.
+type Tags struct {
+ // Map of tag-set name to tags
+ Sets []TagSet
+ // Map of tag name to tag-set and priority
+ ByName map[string]TagSetAndPriority
+}
+
+// TagSet is a named collection of tags, parsed from the 'TAG HEADER'
+type TagSet struct {
+ Name string // Name of the tag-set
+ Tags result.Tags // Tags belonging to the tag-set
+}
+
+// TagSetAndPriority is used by the Tags.ByName map to identify which tag-set
+// a tag belongs to.
+type TagSetAndPriority struct {
+ // The tag-set that the tag belongs to.
+ Set string
+ // The declared order of tag in the set.
+ // An expectation may only list a single tag from any set. This priority
+ // is used to decide which tag(s) should be dropped when multiple tags are
+ // found in the same set.
+ Priority int
+}
+
+// Expectation holds a single expectation line
+type Expectation struct {
+ Line int // The 1-based line number of the expectation
+ Bug string // The associated bug URL for this expectation
+ Tags result.Tags // Tags used to filter the expectation
+ Query string // The CTS query
+ Status []string // The expected result status
+ Comment string // Optional comment at end of line
+}
+
+// Load loads the expectation file at 'path', returning a Content.
+func Load(path string) (Content, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return Content{}, err
+ }
+ ex, err := Parse(string(content))
+ if err != nil {
+ return Content{}, err
+ }
+ return ex, nil
+}
+
+// Save saves the Content file to 'path'.
+func (c Content) Save(path string) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return c.Write(f)
+}
+
+// Clone makes a deep-copy of the Content.
+func (c Content) Clone() Content {
+ chunks := make([]Chunk, len(c.Chunks))
+ for i, c := range c.Chunks {
+ chunks[i] = c.Clone()
+ }
+ return Content{chunks, c.Tags.Clone()}
+}
+
+// Empty returns true if the Content has no chunks.
+func (c Content) Empty() bool {
+ return len(c.Chunks) == 0
+}
+
+// EndsInBlankLine returns true if the Content ends with a blank line
+func (c Content) EndsInBlankLine() bool {
+ return !c.Empty() && c.Chunks[len(c.Chunks)-1].IsBlankLine()
+}
+
+// MaybeAddBlankLine appends a new blank line to the content, if the content
+// does not already end in a blank line.
+func (c *Content) MaybeAddBlankLine() {
+ if !c.Empty() && !c.EndsInBlankLine() {
+ c.Chunks = append(c.Chunks, Chunk{})
+ }
+}
+
+// Write writes the Content, in textual form, to the writer w.
+func (c Content) Write(w io.Writer) error {
+ for _, chunk := range c.Chunks {
+ if len(chunk.Comments) == 0 && len(chunk.Expectations) == 0 {
+ if _, err := fmt.Fprintln(w); err != nil {
+ return err
+ }
+ continue
+ }
+ for _, comment := range chunk.Comments {
+ if _, err := fmt.Fprintln(w, comment); err != nil {
+ return err
+ }
+ }
+ for _, expectation := range chunk.Expectations {
+ parts := []string{}
+ if expectation.Bug != "" {
+ parts = append(parts, expectation.Bug)
+ }
+ if len(expectation.Tags) > 0 {
+ parts = append(parts, fmt.Sprintf("[ %v ]", strings.Join(expectation.Tags.List(), " ")))
+ }
+ parts = append(parts, expectation.Query)
+ parts = append(parts, fmt.Sprintf("[ %v ]", strings.Join(expectation.Status, " ")))
+ if expectation.Comment != "" {
+ parts = append(parts, expectation.Comment)
+ }
+ if _, err := fmt.Fprintln(w, strings.Join(parts, " ")); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// String returns the Content as a string.
+func (c Content) String() string {
+ sb := strings.Builder{}
+ c.Write(&sb)
+ return sb.String()
+}
+
+// IsCommentOnly returns true if the Chunk contains comments and no expectations.
+func (c Chunk) IsCommentOnly() bool {
+ return len(c.Comments) > 0 && len(c.Expectations) == 0
+}
+
+// IsBlankLine returns true if the Chunk has no comments or expectations.
+func (c Chunk) IsBlankLine() bool {
+ return len(c.Comments) == 0 && len(c.Expectations) == 0
+}
+
+// Clone returns a deep-copy of the Chunk
+func (c Chunk) Clone() Chunk {
+ comments := make([]string, len(c.Comments))
+ for i, c := range c.Comments {
+ comments[i] = c
+ }
+ expectations := make([]Expectation, len(c.Expectations))
+ for i, e := range c.Expectations {
+ expectations[i] = e.Clone()
+ }
+ return Chunk{comments, expectations}
+}
+
+// Clone returns a deep-copy of the Tags
+func (t Tags) Clone() Tags {
+ out := Tags{}
+ if t.ByName != nil {
+ out.ByName = make(map[string]TagSetAndPriority, len(t.ByName))
+ for n, t := range t.ByName {
+ out.ByName[n] = t
+ }
+ }
+ if t.Sets != nil {
+ out.Sets = make([]TagSet, len(t.Sets))
+ copy(out.Sets, t.Sets)
+ }
+ return out
+}
+
+// Clone makes a deep-copy of the Expectation.
+func (e Expectation) Clone() Expectation {
+ out := Expectation{
+ Line: e.Line,
+ Bug: e.Bug,
+ Query: e.Query,
+ Comment: e.Comment,
+ }
+ if e.Tags != nil {
+ out.Tags = e.Tags.Clone()
+ }
+ if e.Status != nil {
+ out.Status = append([]string{}, e.Status...)
+ }
+ return out
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/parse.go b/chromium/third_party/dawn/tools/src/cts/expectations/parse.go
new file mode 100644
index 00000000000..aafb800a4f2
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/parse.go
@@ -0,0 +1,298 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expectations
+
+import (
+ "strings"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+)
+
+const (
+ tagHeaderStart = `BEGIN TAG HEADER`
+ tagHeaderEnd = `END TAG HEADER`
+)
+
+// Parse parses an expectations file, returning the Content
+func Parse(body string) (Content, error) {
+ // LineType is an enumerator classifying the 'type' of the line.
+ type LineType int
+ const (
+ comment LineType = iota // The line starts with the '#'
+ expectation // The line declares an expectation
+ blank // The line is blank
+ )
+
+ // classifyLine returns the LineType for the given line
+ classifyLine := func(line string) LineType {
+ line = strings.TrimSpace(line)
+ switch {
+ case line == "":
+ return blank
+ case strings.HasPrefix(line, "#"):
+ return comment
+ default:
+ return expectation
+ }
+ }
+
+ content := Content{} // The output content
+
+ var pending Chunk // The current Chunk being parsed
+
+ // flush completes the current chunk, appending it to 'content'
+ flush := func() {
+ parseTags(&content.Tags, pending.Comments)
+ content.Chunks = append(content.Chunks, pending)
+ pending = Chunk{}
+ }
+
+ lastLineType := blank // The type of the last parsed line
+ for i, l := range strings.Split(body, "\n") { // For each line...
+ lineIdx := i + 1 // line index
+ lineType := classifyLine(l)
+
+ // Compare the new line type to the last.
+ // Flush the pending chunk if needed.
+ if i > 0 {
+ switch {
+ case
+ lastLineType == blank && lineType != blank, // blank -> !blank
+ lastLineType != blank && lineType == blank, // !blank -> blank
+ lastLineType == expectation && lineType != expectation: // expectation -> comment
+ flush()
+ }
+ }
+
+ lastLineType = lineType
+
+ // Handle blank lines and comments.
+ switch lineType {
+ case blank:
+ continue
+ case comment:
+ pending.Comments = append(pending.Comments, l)
+ continue
+ }
+
+ // Below this point, we're dealing with an expectation
+
+ // Split the line by whitespace to form a list of tokens
+ type Token struct {
+ str string
+ start, end int // line offsets (0-based)
+ }
+ tokens := []Token{}
+ if len(l) > 0 { // Parse the tokens
+ inToken, s := false, 0
+ for i, c := range l {
+ if c == ' ' {
+ if inToken {
+ tokens = append(tokens, Token{l[s:i], s, i})
+ inToken = false
+ }
+ } else if !inToken {
+ s = i
+ inToken = true
+ }
+ }
+ if inToken {
+ tokens = append(tokens, Token{l[s:], s, len(l)})
+ }
+ }
+
+ // syntaxErr is a helper for returning a SyntaxError with the current
+ // line and column index.
+ syntaxErr := func(at Token, msg string) error {
+ columnIdx := at.start + 1
+ if columnIdx == 1 {
+ columnIdx = len(l) + 1
+ }
+ return Diagnostic{Error, lineIdx, columnIdx, msg}
+ }
+
+ // peek returns the next token without consuming it.
+ // If there are no more tokens then an empty Token is returned.
+ peek := func() Token {
+ if len(tokens) > 0 {
+ return tokens[0]
+ }
+ return Token{}
+ }
+
+ // next returns the next token, consuming it and incrementing the
+ // column index.
+ // If there are no more tokens then an empty Token is returned.
+ next := func() Token {
+ if len(tokens) > 0 {
+ tok := tokens[0]
+ tokens = tokens[1:]
+ return tok
+ }
+ return Token{}
+ }
+
+ match := func(str string) bool {
+ if peek().str != str {
+ return false
+ }
+ next()
+ return true
+ }
+
+ // tags parses a [ tag ] block.
+ tags := func(use string) (result.Tags, error) {
+ if !match("[") {
+ return result.Tags{}, nil
+ }
+ out := result.NewTags()
+ for {
+ t := next()
+ switch t.str {
+ case "]":
+ return out, nil
+ case "":
+ return result.Tags{}, syntaxErr(t, "expected ']' for "+use)
+ default:
+ out.Add(t.str)
+ }
+ }
+ }
+
+ // Parse the optional bug
+ var bug string
+ if strings.HasPrefix(peek().str, "crbug.com") {
+ bug = next().str
+ }
+
+ // Parse the optional test tags
+ testTags, err := tags("tags")
+ if err != nil {
+ return Content{}, err
+ }
+
+ // Parse the query
+ if t := peek(); t.str == "" || t.str[0] == '#' || t.str[0] == '[' {
+ return Content{}, syntaxErr(t, "expected test query")
+ }
+ query := next().str
+
+ // Parse the expected status
+ if t := peek(); !strings.HasPrefix(t.str, "[") {
+ return Content{}, syntaxErr(t, "expected status")
+ }
+ status, err := tags("status")
+ if err != nil {
+ return Content{}, err
+ }
+
+ // Parse any optional trailing comment
+ comment := ""
+ if t := peek(); strings.HasPrefix(t.str, "#") {
+ comment = l[t.start:]
+ }
+
+ // Append the expectation to the list.
+ pending.Expectations = append(pending.Expectations, Expectation{
+ Line: lineIdx,
+ Bug: bug,
+ Tags: testTags,
+ Query: query,
+ Status: status.List(),
+ Comment: comment,
+ })
+ }
+
+ if lastLineType != blank {
+ flush()
+ }
+
+ return content, nil
+}
+
+// parseTags parses the tag information found between tagHeaderStart and
+// tagHeaderEnd comments.
+func parseTags(tags *Tags, lines []string) {
+ // Flags for whether we're currently parsing a TAG HEADER and whether we're
+ // also within a tag-set.
+ inTagsHeader, inTagSet := false, false
+ tagSet := TagSet{} // The currently parsed tag-set
+ for _, line := range lines {
+ line = strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(line), "#"))
+ if strings.Contains(line, tagHeaderStart) {
+ if tags.ByName == nil {
+ *tags = Tags{
+ ByName: map[string]TagSetAndPriority{},
+ Sets: []TagSet{},
+ }
+ }
+ inTagsHeader = true
+ continue
+ }
+ if strings.Contains(line, tagHeaderEnd) {
+ return // Reached the end of the TAG HEADER
+ }
+ if !inTagsHeader {
+ continue // Still looking for a tagHeaderStart
+ }
+
+ // Below this point, we're in a TAG HEADER.
+ tokens := removeEmpty(strings.Split(line, " "))
+ for len(tokens) > 0 {
+ if inTagSet {
+ // Parsing tags in a tag-set (between the '[' and ']')
+ if tokens[0] == "]" {
+ // End of the tag-set.
+ tags.Sets = append(tags.Sets, tagSet)
+ inTagSet = false
+ break
+ } else {
+ // Still inside the tag-set. Consume the tag.
+ tag := tokens[0]
+ tags.ByName[tag] = TagSetAndPriority{
+ Set: tagSet.Name,
+ Priority: len(tagSet.Tags),
+ }
+ tagSet.Tags.Add(tag)
+ }
+ tokens = tokens[1:]
+ } else {
+ // Outside of tag-set. Scan for 'tags: ['
+ if len(tokens) > 2 && tokens[0] == "tags:" && tokens[1] == "[" {
+ inTagSet = true
+ tagSet.Tags = result.NewTags()
+ tokens = tokens[2:] // Skip 'tags:' and '['
+ } else {
+ // Tag set names are on their own line.
+ // Remember the content of the line, in case the next line
+ // starts a tag-set.
+ tagSet.Name = strings.Join(tokens, " ")
+ break
+ }
+ }
+ }
+ }
+}
+
+// removeEmpty returns the list of strings with all empty strings removed.
+func removeEmpty(in []string) []string {
+ out := make([]string, 0, len(in))
+ for _, s := range in {
+ if s != "" {
+ out = append(out, s)
+ }
+ }
+ return out
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/parse_test.go b/chromium/third_party/dawn/tools/src/cts/expectations/parse_test.go
new file mode 100644
index 00000000000..041080b5c78
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/parse_test.go
@@ -0,0 +1,472 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expectations_test
+
+import (
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/expectations"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestParse(t *testing.T) {
+ type Test struct {
+ name string
+ in string
+ expect expectations.Content
+ expectErr string
+ }
+ for _, test := range []Test{
+ {
+ name: "empty",
+ in: ``,
+ expect: expectations.Content{},
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "single line comment",
+ in: `# a comment`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {Comments: []string{`# a comment`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "single line comment, followed by newline",
+ in: `# a comment
+`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {Comments: []string{`# a comment`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "newline, followed by single line comment",
+ in: `
+# a comment`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {},
+ {Comments: []string{`# a comment`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "comments separated by single newline",
+ in: `# comment 1
+# comment 2`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Comments: []string{
+ `# comment 1`,
+ `# comment 2`,
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "comments separated by two newlines",
+ in: `# comment 1
+
+# comment 2`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {Comments: []string{`# comment 1`}},
+ {},
+ {Comments: []string{`# comment 2`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "comments separated by multiple newlines",
+ in: `# comment 1
+
+
+
+# comment 2`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {Comments: []string{`# comment 1`}},
+ {},
+ {Comments: []string{`# comment 2`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, single result",
+ in: `abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Tags: result.NewTags(),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with comment",
+ in: `abc,def [ FAIL ] # this is a comment`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Tags: result.NewTags(),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ Comment: "# this is a comment",
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, multiple results",
+ in: `abc,def [ FAIL SLOW ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Tags: result.NewTags(),
+ Query: "abc,def",
+ Status: []string{"FAIL", "SLOW"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with single tag",
+ in: `[ Win ] abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Tags: result.NewTags("Win"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with multiple tags",
+ in: `[ Win Mac ] abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Tags: result.NewTags("Win", "Mac"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with bug",
+ in: `crbug.com/123 abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags(),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with bug and tag",
+ in: `crbug.com/123 [ Win ] abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 1,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags("Win"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with comment",
+ in: `# a comment
+crbug.com/123 [ Win ] abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Comments: []string{`# a comment`},
+ Expectations: []expectations.Expectation{
+ {
+ Line: 2,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags("Win"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "expectation, with multiple comments",
+ in: `# comment 1
+# comment 2
+crbug.com/123 [ Win ] abc,def [ FAIL ]`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Comments: []string{`# comment 1`, `# comment 2`},
+ Expectations: []expectations.Expectation{
+ {
+ Line: 3,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags("Win"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "comment, test, newline, comment",
+ in: `# comment 1
+crbug.com/123 abc_def [ Skip ]
+
+### comment 2`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {
+ Comments: []string{`# comment 1`},
+ Expectations: []expectations.Expectation{
+ {
+ Line: 2,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags(),
+ Query: "abc_def",
+ Status: []string{"Skip"},
+ },
+ },
+ },
+ {},
+ {Comments: []string{`### comment 2`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "complex",
+ in: `# comment 1
+
+# comment 2
+# comment 3
+
+crbug.com/123 [ Win ] abc,def [ FAIL ]
+
+# comment 4
+# comment 5
+crbug.com/456 [ Mac ] ghi_jkl [ PASS ]
+# comment 6
+
+# comment 7
+`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {Comments: []string{`# comment 1`}},
+ {},
+ {Comments: []string{`# comment 2`, `# comment 3`}},
+ {},
+ {
+ Expectations: []expectations.Expectation{
+ {
+ Line: 6,
+ Bug: "crbug.com/123",
+ Tags: result.NewTags("Win"),
+ Query: "abc,def",
+ Status: []string{"FAIL"},
+ },
+ },
+ },
+ {},
+ {
+ Comments: []string{`# comment 4`, `# comment 5`},
+ Expectations: []expectations.Expectation{
+ {
+ Line: 10,
+ Bug: "crbug.com/456",
+ Tags: result.NewTags("Mac"),
+ Query: "ghi_jkl",
+ Status: []string{"PASS"},
+ },
+ },
+ },
+ {Comments: []string{`# comment 6`}},
+ {},
+ {Comments: []string{`# comment 7`}},
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "tag header",
+ in: `
+# BEGIN TAG HEADER (autogenerated, see validate_tag_consistency.py)
+# Devices
+# tags: [ duck-fish-5 duck-fish-5x duck-horse-2 duck-horse-4
+# duck-horse-6 duck-shield-duck-tv
+# mouse-snake-frog mouse-snake-ant mouse-snake
+# fly-snake-bat fly-snake-worm fly-snake-snail-rabbit ]
+# Platform
+# tags: [ hamster
+# lion ]
+# Driver
+# tags: [ goat.1 ]
+# END TAG HEADER
+`,
+ expect: expectations.Content{
+ Chunks: []expectations.Chunk{
+ {},
+ {Comments: []string{
+ `# BEGIN TAG HEADER (autogenerated, see validate_tag_consistency.py)`,
+ `# Devices`,
+ `# tags: [ duck-fish-5 duck-fish-5x duck-horse-2 duck-horse-4`,
+ `# duck-horse-6 duck-shield-duck-tv`,
+ `# mouse-snake-frog mouse-snake-ant mouse-snake`,
+ `# fly-snake-bat fly-snake-worm fly-snake-snail-rabbit ]`,
+ `# Platform`,
+ `# tags: [ hamster`,
+ `# lion ]`,
+ `# Driver`,
+ `# tags: [ goat.1 ]`,
+ `# END TAG HEADER`,
+ }},
+ },
+ Tags: expectations.Tags{
+ ByName: map[string]expectations.TagSetAndPriority{
+ "duck-fish-5": {Set: "Devices", Priority: 0},
+ "duck-fish-5x": {Set: "Devices", Priority: 1},
+ "duck-horse-2": {Set: "Devices", Priority: 2},
+ "duck-horse-4": {Set: "Devices", Priority: 3},
+ "duck-horse-6": {Set: "Devices", Priority: 4},
+ "duck-shield-duck-tv": {Set: "Devices", Priority: 5},
+ "mouse-snake-frog": {Set: "Devices", Priority: 6},
+ "mouse-snake-ant": {Set: "Devices", Priority: 7},
+ "mouse-snake": {Set: "Devices", Priority: 8},
+ "fly-snake-bat": {Set: "Devices", Priority: 9},
+ "fly-snake-worm": {Set: "Devices", Priority: 10},
+ "fly-snake-snail-rabbit": {Set: "Devices", Priority: 11},
+ "hamster": {Set: "Platform", Priority: 0},
+ "lion": {Set: "Platform", Priority: 1},
+ "goat.1": {Set: "Driver", Priority: 0},
+ },
+ Sets: []expectations.TagSet{
+ {
+ Name: "Devices",
+ Tags: result.NewTags(
+ "duck-fish-5", "duck-fish-5x", "duck-horse-2",
+ "duck-horse-4", "duck-horse-6", "duck-shield-duck-tv",
+ "mouse-snake-frog", "mouse-snake-ant", "mouse-snake",
+ "fly-snake-bat", "fly-snake-worm", "fly-snake-snail-rabbit",
+ ),
+ }, {
+ Name: "Platform",
+ Tags: result.NewTags("hamster", "lion"),
+ }, {
+ Name: "Driver",
+ Tags: result.NewTags("goat.1"),
+ },
+ },
+ },
+ },
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "err missing tag ']'",
+ in: `[`,
+ expectErr: "1:2 error: expected ']' for tags",
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "err missing test query",
+ in: `[ a ]`,
+ expectErr: "1:6 error: expected test query",
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "err missing status EOL",
+ in: `[ a ] b`,
+ expectErr: "1:8 error: expected status",
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "err missing status comment",
+ in: `[ a ] b # c`,
+ expectErr: "1:9 error: expected status",
+ }, /////////////////////////////////////////////////////////////////////
+ {
+ name: "err missing status ']'",
+ in: `[ a ] b [ c`,
+ expectErr: "1:12 error: expected ']' for status",
+ },
+ } {
+
+ got, err := expectations.Parse(test.in)
+ errMsg := ""
+ if err != nil {
+ errMsg = err.Error()
+ }
+ if diff := cmp.Diff(errMsg, test.expectErr); diff != "" {
+ t.Errorf("'%v': Parse() error %v", test.name, diff)
+ continue
+ }
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("'%v': Parse() was not as expected:\n%v", test.name, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/update.go b/chromium/third_party/dawn/tools/src/cts/expectations/update.go
new file mode 100644
index 00000000000..a49455149cf
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/update.go
@@ -0,0 +1,610 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expectations
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "dawn.googlesource.com/dawn/tools/src/container"
+ "dawn.googlesource.com/dawn/tools/src/cts/query"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+)
+
+// Update performs an incremental update on the expectations using the provided
+// results.
+//
+// Update will:
+// • Remove any expectation lines that have a query where no results match.
+// • Remove expectations lines that are in a chunk which is not annotated with
+// 'KEEP', and all test results have the status 'Pass'.
+// • Remove chunks that have had all expectation lines removed.
+// • Appends new chunks for flaky and failing tests which are not covered by
+// existing expectation lines.
+//
+// Update returns a list of diagnostics for things that should be addressed.
+func (c *Content) Update(results result.List) ([]Diagnostic, error) {
+ // Make a copy of the results. This code mutates the list.
+ results = append(result.List{}, results...)
+
+ // Replace statuses that the CTS runner doesn't recognize with 'Failure'
+ simplifyStatuses(results)
+
+ // Produce a list of tag sets.
+ // We reverse the declared order, as webgpu-cts/expectations.txt lists the
+ // most important first (OS, GPU, etc), and result.MinimalVariantTags will
+ // prioritize folding away the earlier tag-sets.
+ tagSets := make([]result.Tags, len(c.Tags.Sets))
+ for i, s := range c.Tags.Sets {
+ tagSets[len(tagSets)-i-1] = s.Tags
+ }
+
+ // Update those expectations!
+ u := updater{
+ in: *c,
+ out: Content{},
+ qt: newQueryTree(results),
+ tagSets: tagSets,
+ }
+ if err := u.build(); err != nil {
+ return nil, fmt.Errorf("while updating expectations: %w", err)
+ }
+
+ *c = u.out
+ return u.diags, nil
+}
+
+// updater holds the state used for updating the expectations
+type updater struct {
+ in Content // the original expectations Content
+ out Content // newly built expectations Content
+ qt queryTree // the query tree
+ diags []Diagnostic // diagnostics raised during update
+ tagSets []result.Tags // reverse-ordered tag-sets of 'in'
+}
+
+// simplifyStatuses replaces all result statuses that are not 'Pass',
+// 'RetryOnFailure', 'Slow', 'Skip' with 'Failure'.
+func simplifyStatuses(results result.List) {
+ for i, r := range results {
+ switch r.Status {
+ case result.Pass, result.RetryOnFailure, result.Slow, result.Skip:
+ // keep
+ default:
+ results[i].Status = result.Failure
+ }
+ }
+}
+
+const (
+ // Status used to mark results that have been already handled by an
+ // expectation.
+ consumed result.Status = "<<consumed>>"
+ // Chunk comment for new flakes
+ newFlakesComment = "# New flakes. Please triage:"
+ // Chunk comment for new failures
+ newFailuresComment = "# New failures. Please triage:"
+)
+
+// queryTree holds tree of queries to all results (no filtering by tag or
+// status). The queryTree is used to glob all the results that match a
+// particular query.
+type queryTree struct {
+ // All the results.
+ results result.List
+ // consumedAt is a list of line numbers for the i'th result in 'results'
+ // Initially all line numbers are 0. When a result is consumed the line
+ // number is set.
+ consumedAt []int
+ // Each tree node holds a list of indices to results.
+ tree query.Tree[[]int]
+}
+
+// newQueryTree builds the queryTree from the list of results.
+func newQueryTree(results result.List) queryTree {
+ // Build a map of query to result indices
+ queryToIndices := map[query.Query][]int{}
+ for i, r := range results {
+ l := queryToIndices[r.Query]
+ l = append(l, i)
+ queryToIndices[r.Query] = l
+ }
+
+ // Construct the query tree to result indices
+ tree := query.Tree[[]int]{}
+ for query, indices := range queryToIndices {
+ if err := tree.Add(query, indices); err != nil {
+ // Unreachable: The only error we could get is duplicate data for
+ // the same query, which should be impossible.
+ panic(err)
+ }
+ }
+
+ consumedAt := make([]int, len(results))
+ return queryTree{results, consumedAt, tree}
+}
+
+// glob returns the list of results matching the given tags under (or with) the
+// given query.
+func (qt *queryTree) glob(q query.Query) (result.List, error) {
+ glob, err := qt.tree.Glob(q)
+ if err != nil {
+ return nil, fmt.Errorf("while gathering results for query '%v': %w", q, err)
+ }
+
+ out := result.List{}
+ for _, indices := range glob {
+ for _, idx := range indices.Data {
+ out = append(out, qt.results[idx])
+ }
+ }
+
+ return out, nil
+}
+
+// globAndCheckForCollisions returns the list of results matching the given tags
+// under (or with) the given query.
+// globAndCheckForCollisions will return an error if any of the results are
+// already consumed by a non-zero line. The non-zero line distinguishes between
+// results consumed by expectations declared in the input (non-zero line), vs
+// those that were introduced by the update (zero line). We only want to error
+// if there's a collision in user declared expectations.
+func (qt *queryTree) globAndCheckForCollisions(q query.Query, t result.Tags) (result.List, error) {
+ glob, err := qt.tree.Glob(q)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.List{}
+ for _, indices := range glob {
+ for _, idx := range indices.Data {
+ if r := qt.results[idx]; r.Tags.ContainsAll(t) {
+ if at := qt.consumedAt[idx]; at > 0 {
+ if len(t) > 0 {
+ return nil, fmt.Errorf("%v %v collides with expectation at line %v", t, q, at)
+ }
+ return nil, fmt.Errorf("%v collides with expectation at line %v", q, at)
+ }
+ out = append(out, r)
+ }
+ }
+ }
+ return out, nil
+}
+
+// markAsConsumed marks all the results matching the given tags
+// under (or with) the given query, as consumed.
+// line is used to record the line at which the results were consumed. If the
+// results were consumed as part of generating new expectations then line should
+// be 0. See queryTree.globAndCheckForCollisions().
+func (qt *queryTree) markAsConsumed(q query.Query, t result.Tags, line int) {
+ if glob, err := qt.tree.Glob(q); err == nil {
+ for _, indices := range glob {
+ for _, idx := range indices.Data {
+ r := &qt.results[idx]
+ if r.Tags.ContainsAll(t) {
+ r.Status = consumed
+ qt.consumedAt[idx] = line
+ }
+ }
+ }
+ }
+}
+
+// build is the updater top-level function.
+// build first appends to u.out all chunks from 'u.in' with expectations updated
+// using the new results, and then appends any new expectations to u.out.
+func (u *updater) build() error {
+ // Update all the existing chunks
+ for _, in := range u.in.Chunks {
+ out := u.chunk(in)
+
+ // If all chunk had expectations, but now they've gone, remove the chunk
+ if len(in.Expectations) > 0 && len(out.Expectations) == 0 {
+ continue
+ }
+ if out.IsBlankLine() {
+ u.out.MaybeAddBlankLine()
+ continue
+ }
+ u.out.Chunks = append(u.out.Chunks, out)
+ }
+
+ // Emit new expectations (flaky, failing)
+ if err := u.addNewExpectations(); err != nil {
+ return fmt.Errorf("failed to add new expectations: %w", err)
+ }
+
+ return nil
+}
+
+// chunk returns a new Chunk, based on 'in', with the expectations updated.
+func (u *updater) chunk(in Chunk) Chunk {
+ if len(in.Expectations) == 0 {
+ return in // Just a comment / blank line
+ }
+
+ // Skip over any untriaged failures / flake chunks.
+ // We'll just rebuild them at the end.
+ if len(in.Comments) > 0 {
+ switch in.Comments[0] {
+ case newFailuresComment, newFlakesComment:
+ return Chunk{}
+ }
+ }
+
+ keep := false // Does the chunk comment contain 'KEEP' ?
+ for _, l := range in.Comments {
+ if strings.Contains(l, "KEEP") {
+ keep = true
+ break
+ }
+ }
+
+ // Begin building the output chunk.
+ // Copy over the chunk's comments.
+ out := Chunk{Comments: in.Comments}
+
+ // Build the new chunk's expectations
+ for _, exIn := range in.Expectations {
+ exOut := u.expectation(exIn, keep)
+ out.Expectations = append(out.Expectations, exOut...)
+ }
+
+ // Sort the expectations to keep things clean and tidy.
+ sort.Slice(out.Expectations, func(i, j int) bool {
+ switch {
+ case out.Expectations[i].Query < out.Expectations[j].Query:
+ return true
+ case out.Expectations[i].Query > out.Expectations[j].Query:
+ return false
+ }
+ a := result.TagsToString(out.Expectations[i].Tags)
+ b := result.TagsToString(out.Expectations[j].Tags)
+ switch {
+ case a < b:
+ return true
+ case a > b:
+ return false
+ }
+ return false
+ })
+
+ return out
+}
+
+// chunk returns a new list of Expectations, based on the Expectation 'in',
+// using the new result data.
+func (u *updater) expectation(in Expectation, keep bool) []Expectation {
+ // noResults is a helper for returning when the expectation has no test
+ // results.
+ // If the expectation has an expected 'Skip' result, then we're likely
+ // to be missing results (as the test was not run). In this situation
+ // the expectation is preserved, and no diagnostics are raised.
+ // If the expectation did not have a 'Skip' result, then a diagnostic will
+ // be raised and the expectation will be removed.
+ noResults := func() []Expectation {
+ if container.NewSet(in.Status...).Contains(string(result.Skip)) {
+ return []Expectation{in}
+ }
+ // Expectation does not have a 'Skip' result.
+ if len(in.Tags) > 0 {
+ u.diag(Warning, in.Line, "no results found for '%v' with tags %v", in.Query, in.Tags)
+ } else {
+ u.diag(Warning, in.Line, "no results found for '%v'", in.Query)
+ }
+ // Remove the no-results expectation
+ return []Expectation{}
+ }
+
+ // Grab all the results that match the expectation's query
+ q := query.Parse(in.Query)
+
+ // Glob the results for the expectation's query + tag combination.
+ // Ensure that none of these are already consumed.
+ results, err := u.qt.globAndCheckForCollisions(q, in.Tags)
+ // If we can't find any results for this query + tag combination, then bail.
+ switch {
+ case errors.As(err, &query.ErrNoDataForQuery{}):
+ return noResults()
+ case err != nil:
+ u.diag(Error, in.Line, "%v", err)
+ return []Expectation{}
+ case len(results) == 0:
+ return noResults()
+ }
+
+ // Before returning, mark all the results as consumed.
+ // Note: this has to happen *after* we've generated the new expectations, as
+ // marking the results as 'consumed' will impact the logic of
+ // expectationsForRoot()
+ defer u.qt.markAsConsumed(q, in.Tags, in.Line)
+
+ if keep { // Expectation chunk was marked with 'KEEP'
+ // Add a diagnostic if all tests of the expectation were 'Pass'
+ if s := results.Statuses(); len(s) == 1 && s.One() == result.Pass {
+ if ex := container.NewSet(in.Status...); len(ex) == 1 && ex.One() == string(result.Slow) {
+ // Expectation was 'Slow'. Give feedback on actual time taken.
+ var longest, average time.Duration
+ for _, r := range results {
+ if r.Duration > longest {
+ longest = r.Duration
+ }
+ average += r.Duration
+ }
+ if c := len(results); c > 1 {
+ average /= time.Duration(c)
+ u.diag(Note, in.Line, "longest test took %v (average %v)", longest, average)
+ } else {
+ u.diag(Note, in.Line, "test took %v", longest)
+ }
+ } else {
+ if c := len(results); c > 1 {
+ u.diag(Note, in.Line, "all %d tests now pass", len(results))
+ } else {
+ u.diag(Note, in.Line, "test now passes")
+ }
+ }
+ }
+ return []Expectation{in}
+ }
+
+ // Rebuild the expectations for this query.
+ return u.expectationsForRoot(q, in.Line, in.Bug, in.Comment)
+}
+
+// addNewExpectations (potentially) appends to 'u.out' chunks for new flaky and
+// failing tests.
+func (u *updater) addNewExpectations() error {
+ // Scan the full result list to obtain all the test variants
+ // (unique tag combinations).
+ allVariants := u.qt.results.Variants()
+
+ // For each variant:
+ // • Build a query tree using the results filtered to the variant, and then
+ // reduce the tree.
+ // • Take all the reduced-tree leaf nodes, and add these to 'roots'.
+ // Once we've collected all the roots, we'll use these to build the
+ // expectations across the reduced set of tags.
+ roots := container.NewMap[string, query.Query]()
+ for _, variant := range allVariants {
+ // Build a tree from the results matching the given variant.
+ tree, err := u.qt.results.FilterByVariant(variant).StatusTree()
+ if err != nil {
+ return fmt.Errorf("while building tree for tags '%v': %w", variant, err)
+ }
+ // Reduce the tree.
+ tree.Reduce(treeReducer)
+ // Add all the reduced leaf nodes to 'roots'.
+ for _, qd := range tree.List() {
+ roots.Add(qd.Query.String(), qd.Query)
+ }
+ }
+
+ // Build all the expectations for each of the roots.
+ expectations := []Expectation{}
+ for _, root := range roots.Values() {
+ expectations = append(expectations, u.expectationsForRoot(
+ root, // Root query
+ 0, // Line number
+ "crbug.com/dawn/0000", // Bug
+ "", // Comment
+ )...)
+ }
+
+ // Bin the expectations by failure or flake.
+ flakes, failures := []Expectation{}, []Expectation{}
+ for _, r := range expectations {
+ if container.NewSet(r.Status...).Contains(string(result.RetryOnFailure)) {
+ flakes = append(flakes, r)
+ } else {
+ failures = append(failures, r)
+ }
+ }
+
+ // Create chunks for any flakes and failures, in that order.
+ for _, group := range []struct {
+ results []Expectation
+ comment string
+ }{
+ {flakes, newFlakesComment},
+ {failures, newFailuresComment},
+ } {
+ if len(group.results) > 0 {
+ u.out.MaybeAddBlankLine()
+ u.out.Chunks = append(u.out.Chunks, Chunk{
+ Comments: []string{group.comment},
+ Expectations: group.results,
+ })
+ }
+ }
+
+ return nil
+}
+
+// expectationsForRoot builds a list of expectations that cover the failing
+// tests for the results under root.
+// The returned list of expectations is optimized by reducing queries to the
+// most common root, and reducing tags to the smallest required set.
+func (u *updater) expectationsForRoot(
+ root query.Query, // The sub-tree query root
+ line int, // The originating line, when producing diagnostics
+ bug string, // The bug to apply to all returned expectations
+ comment string, // The comment to apply to all returned expectations
+) []Expectation {
+ results, err := u.qt.glob(root)
+ if err != nil {
+ u.diag(Error, line, "%v", err)
+ return nil
+ }
+
+ // Using the full list of unfiltered tests, generate the minimal set of
+ // variants (tags) that uniquely classify the results with differing status.
+ minimalVariants := u.
+ cleanupTags(results).
+ MinimalVariantTags(u.tagSets)
+
+ // For each minimized variant...
+ reduced := result.List{}
+ for _, variant := range minimalVariants {
+ // Build a query tree from this variant...
+ tree := result.StatusTree{}
+ filtered := results.FilterByTags(variant)
+ for _, r := range filtered {
+ // Note: variants may overlap, but overlaped queries will have
+ // identical statuses, so we can just ignore the error for Add().
+ tree.Add(r.Query, r.Status)
+ }
+
+ // ... and reduce the tree by collapsing sub-trees that have common
+ // statuses.
+ tree.ReduceUnder(root, treeReducer)
+
+ // Append the reduced tree nodes to the results list
+ for _, qs := range tree.List() {
+ reduced = append(reduced, result.Result{
+ Query: qs.Query,
+ Tags: variant,
+ Status: qs.Data,
+ })
+ }
+ }
+
+ // Filter out any results that passed or have already been consumed
+ filtered := reduced.Filter(func(r result.Result) bool {
+ return r.Status != result.Pass && r.Status != consumed
+ })
+
+ // Mark all the new expectation results as consumed.
+ for _, r := range filtered {
+ u.qt.markAsConsumed(r.Query, r.Tags, 0)
+ }
+
+ // Transform the results to expectations.
+ return u.resultsToExpectations(filtered, bug, comment)
+}
+
+// resultsToExpectations returns a list of expectations from the given results.
+// Each expectation will have the same query, tags and status as the input
+// result, along with the specified bug and comment.
+//
+// If the result query target is a test without a wildcard, then the expectation
+// will have a wildcard automatically appended. This is to satisfy a requirement
+// of the expectation validator.
+func (u *updater) resultsToExpectations(results result.List, bug, comment string) []Expectation {
+ results.Sort()
+
+ out := make([]Expectation, len(results))
+ for i, r := range results {
+ q := r.Query.String()
+ if r.Query.Target() == query.Tests && !r.Query.IsWildcard() {
+ // The expectation validator wants a trailing ':' for test queries
+ q += query.TargetDelimiter
+ }
+ out[i] = Expectation{
+ Bug: bug,
+ Tags: r.Tags,
+ Query: q,
+ Status: []string{string(r.Status)},
+ Comment: comment,
+ }
+ }
+
+ return out
+}
+
+// cleanupTags returns a copy of the provided results with:
+// • All tags not found in the expectations list removed
+// • All but the highest priority tag for any tag-set.
+// The tag sets are defined by the `BEGIN TAG HEADER` / `END TAG HEADER`
+// section at the top of the expectations file.
+func (u *updater) cleanupTags(results result.List) result.List {
+ return results.TransformTags(func(t result.Tags) result.Tags {
+ type HighestPrioritySetTag struct {
+ tag string
+ priority int
+ }
+ // Set name to highest priority tag for that set
+ best := map[string]HighestPrioritySetTag{}
+ for tag := range t {
+ sp, ok := u.in.Tags.ByName[tag]
+ if ok {
+ if set := best[sp.Set]; sp.Priority >= set.priority {
+ best[sp.Set] = HighestPrioritySetTag{tag, sp.Priority}
+ }
+ }
+ }
+ t = result.NewTags()
+ for _, ts := range best {
+ t.Add(ts.tag)
+ }
+ return t
+ })
+}
+
+// treeReducer is a function that can be used by StatusTree.Reduce() to reduce
+// tree nodes with the same status.
+// treeReducer will collapse trees nodes if any of the following are true:
+// • All child nodes have the same status
+// • More than 75% of the child nodes have a non-pass status, and none of the
+// children are consumed.
+// • There are more than 20 child nodes with a non-pass status, and none of the
+// children are consumed.
+func treeReducer(statuses []result.Status) *result.Status {
+ counts := map[result.Status]int{}
+ for _, s := range statuses {
+ counts[s] = counts[s] + 1
+ }
+ if len(counts) == 1 {
+ return &statuses[0] // All the same status
+ }
+ if counts[consumed] > 0 {
+ return nil // Partially consumed trees cannot be merged
+ }
+ highestNonPassCount := 0
+ highestNonPassStatus := result.Failure
+ for s, n := range counts {
+ if s != result.Pass {
+ if percent := (100 * n) / len(statuses); percent > 75 {
+ // Over 75% of all the children are of non-pass status s.
+ return &s
+ }
+ if n > highestNonPassCount {
+ highestNonPassCount = n
+ highestNonPassStatus = s
+ }
+ }
+ }
+
+ if highestNonPassCount > 20 {
+ // Over 20 child node failed.
+ return &highestNonPassStatus
+ }
+
+ return nil
+}
+
+// diag appends a new diagnostic to u.diags with the given severity, line and
+// message.
+func (u *updater) diag(severity Severity, line int, msg string, args ...interface{}) {
+ u.diags = append(u.diags, Diagnostic{
+ Severity: severity,
+ Line: line,
+ Message: fmt.Sprintf(msg, args...),
+ })
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/expectations/update_test.go b/chromium/third_party/dawn/tools/src/cts/expectations/update_test.go
new file mode 100644
index 00000000000..69cc314e6ee
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/expectations/update_test.go
@@ -0,0 +1,672 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expectations_test
+
+import (
+ "strings"
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/expectations"
+ "dawn.googlesource.com/dawn/tools/src/cts/query"
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "github.com/google/go-cmp/cmp"
+)
+
+var Q = query.Parse
+
+func TestUpdate(t *testing.T) {
+ header := `# BEGIN TAG HEADER
+# OS
+# tags: [ os-a os-b os-c ]
+# GPU
+# tags: [ gpu-a gpu-b gpu-c ]
+# END TAG HEADER
+`
+ headerLines := strings.Count(header, "\n")
+
+ type Test struct {
+ name string
+ expectations string
+ results result.List
+ updated string
+ diagnostics []expectations.Diagnostic
+ err string
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ name: "empty results",
+ expectations: ``,
+ results: result.List{},
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "no results found",
+ expectations: `
+crbug.com/a/123 a:missing,test,result:* [ Failure ]
+crbug.com/a/123 [ tag ] another:missing,test,result:* [ Failure ]
+
+some:other,test:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+some:other,test:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Warning,
+ Line: headerLines + 2,
+ Message: "no results found for 'a:missing,test,result:*'",
+ },
+ {
+ Severity: expectations.Warning,
+ Line: headerLines + 3,
+ Message: "no results found for 'another:missing,test,result:*' with tags [tag]",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "no results found KEEP",
+ expectations: `
+# KEEP
+crbug.com/a/123 a:missing,test,result:* [ Failure ]
+
+some:other,test:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+some:other,test:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Warning,
+ Line: headerLines + 3,
+ Message: "no results found for 'a:missing,test,result:*'",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "no results found Skip",
+ expectations: `
+crbug.com/a/123 a:missing,test,result:* [ Skip ]
+
+some:other,test:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("some:other,test:*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+crbug.com/a/123 a:missing,test,result:* [ Skip ]
+
+some:other,test:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "simple expectation collision",
+ expectations: `
+a:b,c:* [ Failure ]
+a:b,c:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+a:b,c:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Error,
+ Line: headerLines + 3,
+ Message: "a:b,c:* collides with expectation at line 8",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "simple expectation with tags",
+ expectations: `
+[ os-a ] a:b,c:* [ Failure ]
+[ gpu-b ] a:b,c:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+a:b,c:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Error,
+ Line: headerLines + 3,
+ Message: "[gpu-b] a:b,c:* collides with expectation at line 8",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "simple expectation collision KEEP",
+ expectations: `
+# KEEP
+a:b,c:* [ Failure ]
+a:b,c:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+# KEEP
+a:b,c:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Error,
+ Line: headerLines + 4,
+ Message: "a:b,c:* collides with expectation at line 9",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "collision with child-expectation",
+ expectations: `
+a:b:x:* [ Failure ]
+a:b:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b:x:*"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b:y:*"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+a:b:x:* [ Failure ]
+
+# New failures. Please triage:
+crbug.com/dawn/0000 a:b:y:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Error,
+ Line: headerLines + 3,
+ Message: "a:b:* collides with expectation at line 8",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "collision with parent-expectation",
+ expectations: `
+a:b:* [ Failure ]
+a:b:x:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b:x:*"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b:y:*"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ },
+ updated: `
+a:b:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Error,
+ Line: headerLines + 3,
+ Message: "a:b:x:* collides with expectation at line 8",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "expectation test now passes",
+ expectations: `
+crbug.com/a/123 [ gpu-a os-a ] a:b,c:* [ Failure ]
+crbug.com/a/123 [ gpu-b os-b ] a:b,c:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Pass,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Abort,
+ },
+ },
+ updated: `
+crbug.com/a/123 [ os-b ] a:b,c:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "expectation case now passes",
+ expectations: `
+crbug.com/a/123 [ gpu-a os-a ] a:b,c:d [ Failure ]
+crbug.com/a/123 [ gpu-b os-b ] a:b,c:d [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Pass,
+ },
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Abort,
+ },
+ },
+ updated: `
+crbug.com/a/123 [ os-b ] a:b,c:d: [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "expectation case now passes KEEP - single",
+ expectations: `
+# KEEP
+crbug.com/a/123 [ gpu-a os-a ] a:b,c:d [ Failure ]
+crbug.com/a/123 [ gpu-b os-b ] a:b,c:d [ Failure ]
+`,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Pass,
+ },
+ result.Result{
+ Query: Q("a:b,c:d"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Abort,
+ },
+ },
+ updated: `
+# KEEP
+crbug.com/a/123 [ gpu-a os-a ] a:b,c:d [ Failure ]
+crbug.com/a/123 [ gpu-b os-b ] a:b,c:d [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Note,
+ Line: headerLines + 3,
+ Message: "test now passes",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "expectation case now passes KEEP - multiple",
+ expectations: `
+# KEEP
+crbug.com/a/123 a:b,c:d:* [ Failure ]
+`,
+ results: result.List{
+ result.Result{Query: Q("a:b,c:d:a"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:d:b"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:d:c"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:d:d"), Status: result.Pass},
+ },
+ updated: `
+# KEEP
+crbug.com/a/123 a:b,c:d:* [ Failure ]
+`,
+ diagnostics: []expectations.Diagnostic{
+ {
+ Severity: expectations.Note,
+ Line: headerLines + 3,
+ Message: "all 4 tests now pass",
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "new test results",
+ expectations: `# A comment`,
+ results: result.List{
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_a:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Abort,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_a:*"),
+ Tags: result.NewTags("os-a", "gpu-b"),
+ Status: result.Abort,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_c:case=4;*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Crash,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_c:case=5;*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.RetryOnFailure,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_b;case=5;*"),
+ Tags: result.NewTags("os-b", "gpu-b"),
+ Status: result.Pass,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_b:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.Skip,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_b:*"),
+ Tags: result.NewTags("os-b", "gpu-a"),
+ Status: result.Pass,
+ },
+ result.Result{
+ Query: Q("suite:dir_a,dir_b:test_c:*"),
+ Tags: result.NewTags("os-a", "gpu-a"),
+ Status: result.RetryOnFailure,
+ },
+ },
+ updated: `# A comment
+
+# New flakes. Please triage:
+crbug.com/dawn/0000 [ gpu-a os-a ] suite:dir_a,dir_b:test_c:* [ RetryOnFailure ]
+crbug.com/dawn/0000 [ gpu-b os-b ] suite:dir_a,dir_b:test_c:case=5;* [ RetryOnFailure ]
+
+# New failures. Please triage:
+crbug.com/dawn/0000 [ gpu-b os-a ] suite:* [ Failure ]
+crbug.com/dawn/0000 [ gpu-a os-a ] suite:dir_a,dir_b:test_a:* [ Failure ]
+crbug.com/dawn/0000 [ gpu-a os-a ] suite:dir_a,dir_b:test_b:* [ Skip ]
+crbug.com/dawn/0000 [ gpu-b os-b ] suite:dir_a,dir_b:test_c:case=4;* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "filter unknown tags",
+ expectations: ``,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-a", "gpu-x"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-b", "gpu-x"),
+ Status: result.Crash,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-x", "gpu-b"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-x", "gpu-a"),
+ Status: result.Crash,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-c", "gpu-c"),
+ Status: result.Pass,
+ },
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 [ gpu-a ] a:* [ Failure ]
+crbug.com/dawn/0000 [ gpu-b ] a:* [ Failure ]
+crbug.com/dawn/0000 [ os-a ] a:* [ Failure ]
+crbug.com/dawn/0000 [ os-b ] a:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "prioritize tag sets",
+ expectations: ``,
+ results: result.List{
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-a", "os-c", "gpu-b"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("gpu-a", "os-b", "gpu-c"),
+ Status: result.Failure,
+ },
+ result.Result{
+ Query: Q("a:b,c:*"),
+ Tags: result.NewTags("os-c", "gpu-c"),
+ Status: result.Pass,
+ },
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 [ gpu-b os-c ] a:* [ Failure ]
+crbug.com/dawn/0000 [ gpu-c os-b ] a:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "merge when over 75% of children fail",
+ expectations: ``,
+ results: result.List{
+ result.Result{Query: Q("a:b,c:t0:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t1:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t2:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t3:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t4:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t5:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t6:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t7:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t8:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t9:*"), Status: result.Failure},
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 a:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "don't merge when under 75% of children fail",
+ expectations: ``,
+ results: result.List{
+ result.Result{Query: Q("a:b,c:t0:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t1:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t2:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t3:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t4:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t5:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t6:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t7:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t8:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t9:*"), Status: result.Failure},
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 a:b,c:t0:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t2:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t3:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t5:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t6:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t8:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t9:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "merge when over 20 children fail",
+ expectations: ``,
+ results: result.List{ // 21 failures, 70% fail
+ result.Result{Query: Q("a:b,c:t00:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t01:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t02:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t03:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t04:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t05:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t06:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t07:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t08:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t09:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t10:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t11:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t12:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t13:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t14:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t15:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t16:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t17:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t18:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t19:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t20:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t21:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t22:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t23:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t24:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t25:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t26:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t27:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t28:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t29:*"), Status: result.Failure},
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 a:* [ Failure ]
+`,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "dont merge when under 21 children fail",
+ expectations: ``,
+ results: result.List{ // 20 failures, 66% fail
+ result.Result{Query: Q("a:b,c:t00:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t01:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t02:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t03:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t04:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t05:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t06:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t07:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t08:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t09:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t10:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t11:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t12:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t13:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t14:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t15:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t16:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t17:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t18:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t19:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t20:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t21:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t22:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t23:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t24:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t25:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t26:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t27:*"), Status: result.Pass},
+ result.Result{Query: Q("a:b,c:t28:*"), Status: result.Failure},
+ result.Result{Query: Q("a:b,c:t29:*"), Status: result.Failure},
+ },
+ updated: `
+# New failures. Please triage:
+crbug.com/dawn/0000 a:b,c:t00:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t02:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t04:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t05:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t06:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t08:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t09:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t10:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t13:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t15:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t16:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t18:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t19:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t20:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t22:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t23:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t25:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t26:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t28:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:t29:* [ Failure ]
+`,
+ },
+ } {
+ ex, err := expectations.Parse(header + test.expectations)
+ if err != nil {
+ t.Fatalf("'%v': expectations.Parse():\n%v", test.name, err)
+ }
+
+ errMsg := ""
+ diagnostics, err := ex.Update(test.results)
+ if err != nil {
+ errMsg = err.Error()
+ }
+ if diff := cmp.Diff(errMsg, test.err); diff != "" {
+ t.Errorf("'%v': expectations.Update() error:\n%v", test.name, diff)
+ }
+
+ if diff := cmp.Diff(diagnostics, test.diagnostics); diff != "" {
+ t.Errorf("'%v': diagnostics were not as expected:\n%v", test.name, diff)
+ }
+
+ if diff := cmp.Diff(
+ strings.Split(ex.String(), "\n"),
+ strings.Split(header+test.updated, "\n")); diff != "" {
+ t.Errorf("'%v': updated was not as expected:\n%v", test.name, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/query/errors.go b/chromium/third_party/dawn/tools/src/cts/query/errors.go
new file mode 100644
index 00000000000..1ea605dd313
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/query/errors.go
@@ -0,0 +1,33 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package query
+
+import "fmt"
+
+type ErrNoDataForQuery struct {
+ Query Query
+}
+
+func (e ErrNoDataForQuery) Error() string {
+ return fmt.Sprintf("no data for query '%v'", e.Query)
+}
+
+type ErrDuplicateData struct {
+ Query Query
+}
+
+func (e ErrDuplicateData) Error() string {
+ return fmt.Sprintf("duplicate data '%v'", e.Query)
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/query/query.go b/chromium/third_party/dawn/tools/src/cts/query/query.go
index 46efa710a7c..27b3d6270ac 100644
--- a/chromium/third_party/dawn/tools/src/cts/query/query.go
+++ b/chromium/third_party/dawn/tools/src/cts/query/query.go
@@ -198,6 +198,18 @@ func (q Query) CaseParameters() CaseParameters {
// Append returns the query with the additional strings appended to the target
func (q Query) Append(t Target, n ...string) Query {
switch t {
+ case Suite:
+ switch len(n) {
+ case 0:
+ return q
+ case 1:
+ if q.Suite != "" {
+ panic("cannot append suite when query already contains suite")
+ }
+ return Query{Suite: n[0]}
+ default:
+ panic("cannot append more than one suite")
+ }
case Files:
return q.AppendFiles(n...)
case Tests:
diff --git a/chromium/third_party/dawn/tools/src/cts/query/tree.go b/chromium/third_party/dawn/tools/src/cts/query/tree.go
new file mode 100644
index 00000000000..b52d993e1e6
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/query/tree.go
@@ -0,0 +1,404 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package query
+
+import (
+ "fmt"
+ "io"
+ "sort"
+)
+
+// Tree holds a tree structure of Query to generic Data type.
+// Each separate suite, file, test of the query produces a separate tree node.
+// All cases of the query produce a single leaf tree node.
+type Tree[Data any] struct {
+ TreeNode[Data]
+}
+
+// TreeNode is a single node in the Tree
+type TreeNode[Data any] struct {
+ // The full query of the node
+ Query Query
+ // The data associated with this node. nil is used to represent no-data.
+ Data *Data
+ // Children of the node. Keyed by query.Target and name.
+ Children TreeNodeChildren[Data]
+}
+
+// TreeNodeChildKey is the key used by TreeNode for the Children map
+type TreeNodeChildKey struct {
+ // The child name. This is the string between `:` and `,` delimiters.
+ // Note: that all test cases are held by a single TreeNode.
+ Name string
+ // The target type of the child. Examples:
+ // Query | Target of 'child'
+ // -----------------+--------------------
+ // parent:child | Files
+ // parent:x,child | Files
+ // parent:x:child | Test
+ // parent:x:y,child | Test
+ // parent:x:y:child | Cases
+ //
+ // It's possible to have a directory and '.spec.ts' share the same name,
+ // hence why we include the Target as part of the child key.
+ Target Target
+}
+
+// TreeNodeChildren is a map of TreeNodeChildKey to TreeNode pointer.
+// Data is the data type held by a TreeNode.
+type TreeNodeChildren[Data any] map[TreeNodeChildKey]*TreeNode[Data]
+
+// sortedChildKeys returns all the sorted children keys.
+func (n *TreeNode[Data]) sortedChildKeys() []TreeNodeChildKey {
+ keys := make([]TreeNodeChildKey, 0, len(n.Children))
+ for key := range n.Children {
+ keys = append(keys, key)
+ }
+ sort.Slice(keys, func(i, j int) bool {
+ a, b := keys[i], keys[j]
+ switch {
+ case a.Name < b.Name:
+ return true
+ case a.Name > b.Name:
+ return false
+ case a.Target < b.Target:
+ return true
+ case a.Target > b.Target:
+ return false
+ }
+ return false
+ })
+ return keys
+}
+
+// traverse performs a depth-first-search of the tree calling f for each visited
+// node, starting with n, then visiting each of children in sorted order
+// (pre-order traversal).
+func (n *TreeNode[Data]) traverse(f func(n *TreeNode[Data]) error) error {
+ if err := f(n); err != nil {
+ return err
+ }
+ for _, key := range n.sortedChildKeys() {
+ if err := n.Children[key].traverse(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Merger is a function used to merge the children nodes of a tree.
+// Merger is called with the Data of each child node. If the function returns a
+// non-nil Data pointer, then this is used as the merged result. If the function
+// returns nil, then the node will not be merged.
+type Merger[Data any] func([]Data) *Data
+
+// merge collapses tree nodes based on child node data, using the function f.
+// merge operates on the leaf nodes first, working its way towards the root of
+// the tree.
+// Returns the merged target data for this node, or nil if the node is not a
+// leaf and its children has non-uniform data.
+func (n *TreeNode[Data]) merge(f Merger[Data]) *Data {
+ // If the node is a leaf, then simply return the node's data.
+ if len(n.Children) == 0 {
+ return n.Data
+ }
+
+ // Build a map of child target to merged child data.
+ // A nil for the value indicates that one or more children could not merge.
+ mergedChildren := map[Target][]Data{}
+ for key, child := range n.Children {
+ // Call merge() on the child. Even if we cannot merge this node, we want
+ // to do this for all children so they can merge their sub-graphs.
+ childData := child.merge(f)
+
+ if childData == nil {
+ // If merge() returned nil, then the data could not be merged.
+ // Mark the entire target as unmergeable.
+ mergedChildren[key.Target] = nil
+ continue
+ }
+
+ // Fetch the merge list for this child's target.
+ list, found := mergedChildren[key.Target]
+ if !found {
+ // First child with the given target?
+ mergedChildren[key.Target] = []Data{*childData}
+ continue
+ }
+ if list != nil {
+ mergedChildren[key.Target] = append(list, *childData)
+ }
+ }
+
+ merge := func(in []Data) *Data {
+ switch len(in) {
+ case 0:
+ return nil // nothing to merge.
+ case 1:
+ return &in[0] // merge of a single item results in that item
+ default:
+ return f(in)
+ }
+ }
+
+ // Might it possible to merge this node?
+ maybeMergeable := true
+
+ // The merged data, per target
+ mergedTargets := map[Target]Data{}
+
+ // Attempt to merge each of the target's data
+ for target, list := range mergedChildren {
+ if list != nil { // nil == unmergeable target
+ if data := merge(list); data != nil {
+ // Merge success!
+ mergedTargets[target] = *data
+ continue
+ }
+ }
+ maybeMergeable = false // Merge of this node is not possible
+ }
+
+ // Remove all children that have been merged
+ for key := range n.Children {
+ if _, merged := mergedTargets[key.Target]; merged {
+ delete(n.Children, key)
+ }
+ }
+
+ // Add wildcards for merged targets
+ for target, data := range mergedTargets {
+ data := data // Don't take address of iterator
+ n.getOrCreateChild(TreeNodeChildKey{"*", target}).Data = &data
+ }
+
+ // If any of the targets are unmergeable, then we cannot merge the node itself.
+ if !maybeMergeable {
+ return nil
+ }
+
+ // All targets were merged. Attempt to merge each of the targets.
+ data := make([]Data, 0, len(mergedTargets))
+ for _, d := range mergedTargets {
+ data = append(data, d)
+ }
+ return merge(data)
+}
+
+// print writes a textual representation of this node and its children to w.
+// prefix is used as the line prefix for each node, which is appended with
+// whitespace for each child node.
+func (n *TreeNode[Data]) print(w io.Writer, prefix string) {
+ fmt.Fprintf(w, "%v{\n", prefix)
+ fmt.Fprintf(w, "%v query: '%v'\n", prefix, n.Query)
+ fmt.Fprintf(w, "%v data: '%v'\n", prefix, n.Data)
+ for _, key := range n.sortedChildKeys() {
+ n.Children[key].print(w, prefix+" ")
+ }
+ fmt.Fprintf(w, "%v}\n", prefix)
+}
+
+// Format implements the io.Formatter interface.
+// See https://pkg.go.dev/fmt#Formatter
+func (n *TreeNode[Data]) Format(f fmt.State, verb rune) {
+ n.print(f, "")
+}
+
+// getOrCreateChild returns the child with the given key if it exists,
+// otherwise the child node is created and added to n and is returned.
+func (n *TreeNode[Data]) getOrCreateChild(key TreeNodeChildKey) *TreeNode[Data] {
+ if n.Children == nil {
+ child := &TreeNode[Data]{Query: n.Query.Append(key.Target, key.Name)}
+ n.Children = TreeNodeChildren[Data]{key: child}
+ return child
+ }
+ if child, ok := n.Children[key]; ok {
+ return child
+ }
+ child := &TreeNode[Data]{Query: n.Query.Append(key.Target, key.Name)}
+ n.Children[key] = child
+ return child
+}
+
+// QueryData is a pair of a Query and a generic Data type.
+// Used by NewTree for constructing a tree with entries.
+type QueryData[Data any] struct {
+ Query Query
+ Data Data
+}
+
+// NewTree returns a new Tree populated with the given entries.
+// If entries returns duplicate queries, then ErrDuplicateData will be returned.
+func NewTree[Data any](entries ...QueryData[Data]) (Tree[Data], error) {
+ out := Tree[Data]{}
+ for _, qd := range entries {
+ if err := out.Add(qd.Query, qd.Data); err != nil {
+ return Tree[Data]{}, err
+ }
+ }
+ return out, nil
+}
+
+// Add adds a new data to the tree.
+// Returns ErrDuplicateData if the tree already contains a data for the given
+func (t *Tree[Data]) Add(q Query, d Data) error {
+ node := &t.TreeNode
+ q.Walk(func(q Query, t Target, n string) error {
+ node = node.getOrCreateChild(TreeNodeChildKey{n, t})
+ return nil
+ })
+ if node.Data != nil {
+ return ErrDuplicateData{node.Query}
+ }
+ node.Data = &d
+ return nil
+}
+
+// Reduce reduces the tree using the Merger function f.
+// If the Merger function returns a non-nil Data value, then this will be used
+// to replace the non-leaf node with a new leaf node holding the returned Data.
+// This process recurses up to the tree root.
+func (t *Tree[Data]) Reduce(f Merger[Data]) {
+ for _, root := range t.TreeNode.Children {
+ root.merge(f)
+ }
+}
+
+// ReduceUnder reduces the sub-tree under the given query using the Merger
+// function f.
+// If the Merger function returns a non-nil Data value, then this will be used
+// to replace the non-leaf node with a new leaf node holding the returned Data.
+// This process recurses up to the node pointed at by the query to.
+func (t *Tree[Data]) ReduceUnder(to Query, f Merger[Data]) error {
+ node := &t.TreeNode
+ return to.Walk(func(q Query, t Target, n string) error {
+ if n == "*" {
+ node.merge(f)
+ return nil
+ }
+ child, ok := node.Children[TreeNodeChildKey{n, t}]
+ if !ok {
+ return ErrNoDataForQuery{q}
+ }
+ node = child
+ if q == to {
+ node.merge(f)
+ }
+ return nil
+ })
+}
+
+// glob calls f for every node under the given query.
+func (t *Tree[Data]) glob(fq Query, f func(f *TreeNode[Data]) error) error {
+ node := &t.TreeNode
+ return fq.Walk(func(q Query, t Target, n string) error {
+ if n == "*" {
+ // Wildcard reached.
+ // Glob the parent, but restrict to the wildcard target type.
+ for _, key := range node.sortedChildKeys() {
+ child := node.Children[key]
+ if child.Query.Target() == t {
+ if err := child.traverse(f); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ switch t {
+ case Suite, Files, Tests:
+ child, ok := node.Children[TreeNodeChildKey{n, t}]
+ if !ok {
+ return ErrNoDataForQuery{q}
+ }
+ node = child
+ case Cases:
+ for _, key := range node.sortedChildKeys() {
+ child := node.Children[key]
+ if child.Query.Contains(fq) {
+ if err := f(child); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ if q == fq {
+ return node.traverse(f)
+ }
+ return nil
+ })
+}
+
+// Replace replaces the sub-tree matching the query 'what' with the Data 'with'
+func (t *Tree[Data]) Replace(what Query, with Data) error {
+ node := &t.TreeNode
+ return what.Walk(func(q Query, t Target, n string) error {
+ childKey := TreeNodeChildKey{n, t}
+ if q == what {
+ for key, child := range node.Children {
+ // Use Query.Contains() to handle matching of Cases
+ // (which are not split into tree nodes)
+ if q.Contains(child.Query) {
+ delete(node.Children, key)
+ }
+ }
+ node = node.getOrCreateChild(childKey)
+ node.Data = &with
+ } else {
+ child, ok := node.Children[childKey]
+ if !ok {
+ return ErrNoDataForQuery{q}
+ }
+ node = child
+ }
+ return nil
+ })
+}
+
+// List returns the tree nodes flattened as a list of QueryData
+func (t *Tree[Data]) List() []QueryData[Data] {
+ out := []QueryData[Data]{}
+ t.traverse(func(n *TreeNode[Data]) error {
+ if n.Data != nil {
+ out = append(out, QueryData[Data]{n.Query, *n.Data})
+ }
+ return nil
+ })
+ return out
+}
+
+// Glob returns a list of QueryData's for every node that is under the given
+// query, which holds data.
+// Glob handles wildcards as well as non-wildcard queries:
+// * A non-wildcard query will match the node itself, along with every node
+// under the query. For example: 'a:b' will match every File and Test
+// node under 'a:b', including 'a:b' itself.
+// * A wildcard Query will include every node under the parent node with the
+// matching Query target. For example: 'a:b:*' will match every Test
+// node (excluding File nodes) under 'a:b', 'a:b' will not be included.
+func (t *Tree[Data]) Glob(q Query) ([]QueryData[Data], error) {
+ out := []QueryData[Data]{}
+ err := t.glob(q, func(n *TreeNode[Data]) error {
+ if n.Data != nil {
+ out = append(out, QueryData[Data]{n.Query, *n.Data})
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/query/tree_test.go b/chromium/third_party/dawn/tools/src/cts/query/tree_test.go
new file mode 100644
index 00000000000..80af4756a2e
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/query/tree_test.go
@@ -0,0 +1,934 @@
+package query_test
+
+import (
+ "fmt"
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/container"
+ "dawn.googlesource.com/dawn/tools/src/cts/query"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "github.com/google/go-cmp/cmp"
+)
+
+var (
+ abort = "Abort"
+ crash = "Crash"
+ failure = "Failure"
+ pass = "Pass"
+ skip = "Skip"
+)
+
+func NewTree[Data any](t *testing.T, entries ...query.QueryData[Data]) (query.Tree[Data], error) {
+ return query.NewTree(entries...)
+}
+
+func TestNewSingle(t *testing.T) {
+ type Tree = query.Tree[string]
+ type Node = query.TreeNode[string]
+ type QueryData = query.QueryData[string]
+ type Children = query.TreeNodeChildren[string]
+
+ type Test struct {
+ in QueryData
+ expect Tree
+ }
+ for _, test := range []Test{
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:*`),
+ Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`*`, query.Files}: {
+ Query: Q(`suite:*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:a,*`),
+ Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`*`, query.Files}: {
+ Query: Q(`suite:a,*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:a,b:*`),
+ Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: Q(`suite:a,b`),
+ Children: Children{
+ query.TreeNodeChildKey{`*`, query.Tests}: {
+ Query: Q(`suite:a,b:*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:a,b:c:*`),
+ Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: Q(`suite:a,b`),
+ Children: Children{
+ query.TreeNodeChildKey{`c`, query.Tests}: {
+ Query: Q(`suite:a,b:c`),
+ Children: Children{
+ query.TreeNodeChildKey{`*`, query.Cases}: {
+ Query: Q(`suite:a,b:c:*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:a,b,c:d,e:f="g";h=[1,2,3];i=4;*`),
+ Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: Q(`suite:a,b`),
+ Children: Children{
+ query.TreeNodeChildKey{`c`, query.Files}: {
+ Query: Q(`suite:a,b,c`),
+ Children: Children{
+ query.TreeNodeChildKey{`d`, query.Tests}: {
+ Query: Q(`suite:a,b,c:d`),
+ Children: Children{
+ query.TreeNodeChildKey{`e`, query.Tests}: {
+ Query: Q(`suite:a,b,c:d,e`),
+ Children: Children{
+ query.TreeNodeChildKey{`f="g";h=[1,2,3];i=4;*`, query.Cases}: {
+ Query: Q(`suite:a,b,c:d,e:f="g";h=[1,2,3];i=4;*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { /////////////////////////////////////////////////////////////////////
+ in: QueryData{
+ Query: Q(`suite:a,b:c:d="e";*`), Data: pass,
+ },
+ expect: Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: Q(`suite:a,b`),
+ Children: Children{
+ query.TreeNodeChildKey{`c`, query.Tests}: {
+ Query: Q(`suite:a,b:c`),
+ Children: Children{
+ query.TreeNodeChildKey{`d="e";*`, query.Cases}: {
+ Query: Q(`suite:a,b:c:d="e";*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ got, err := NewTree(t, test.in)
+ if err != nil {
+ t.Errorf("NewTree(%v): %v", test.in, err)
+ continue
+ }
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("NewTree(%v) tree was not as expected:\n%v", test.in, diff)
+ }
+ }
+
+}
+
+func TestNewMultiple(t *testing.T) {
+ type Tree = query.Tree[string]
+ type Node = query.TreeNode[string]
+ type QueryData = query.QueryData[string]
+ type Children = query.TreeNodeChildren[string]
+
+ got, err := NewTree(t,
+ QueryData{Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ QueryData{Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ QueryData{Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ )
+ if err != nil {
+ t.Fatalf("NewTree() returned %v", err)
+ }
+
+ expect := Tree{
+ TreeNode: Node{
+ Children: Children{
+ query.TreeNodeChildKey{`suite`, query.Suite}: {
+ Query: Q(`suite`),
+ Children: Children{
+ query.TreeNodeChildKey{`a`, query.Files}: {
+ Query: Q(`suite:a`),
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: Q(`suite:a,b`),
+ Children: Children{
+ query.TreeNodeChildKey{`c`, query.Tests}: {
+ Query: Q(`suite:a,b:c`),
+ Children: Children{
+ query.TreeNodeChildKey{`d="e";*`, query.Cases}: {
+ Query: Q(`suite:a,b:c:d="e";*`),
+ Data: &failure,
+ },
+ query.TreeNodeChildKey{`f="g";*`, query.Cases}: {
+ Query: Q(`suite:a,b:c:f="g";*`),
+ Data: &skip,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ query.TreeNodeChildKey{`h`, query.Files}: {
+ Query: query.Query{
+ Suite: `suite`,
+ Files: `h`,
+ },
+ Children: Children{
+ query.TreeNodeChildKey{`b`, query.Files}: {
+ Query: query.Query{
+ Suite: `suite`,
+ Files: `h,b`,
+ },
+ Children: Children{
+ query.TreeNodeChildKey{`c`, query.Tests}: {
+ Query: query.Query{
+ Suite: `suite`,
+ Files: `h,b`,
+ Tests: `c`,
+ },
+ Children: Children{
+ query.TreeNodeChildKey{`f="g";*`, query.Cases}: {
+ Query: query.Query{
+ Suite: `suite`,
+ Files: `h,b`,
+ Tests: `c`,
+ Cases: `f="g";*`,
+ },
+ Data: &abort,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ if diff := cmp.Diff(got, expect); diff != "" {
+ t.Errorf("NewTree() was not as expected:\n%v", diff)
+ t.Errorf("got:\n%v", got)
+ t.Errorf("expect:\n%v", expect)
+ }
+}
+
+func TestNewWithCollision(t *testing.T) {
+ type Tree = query.Tree[string]
+ type QueryData = query.QueryData[string]
+
+ got, err := NewTree(t,
+ QueryData{Query: Q(`suite:a,b:c:*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:*`), Data: skip},
+ )
+ expect := Tree{}
+ expectErr := query.ErrDuplicateData{
+ Query: Q(`suite:a,b:c:*`),
+ }
+ if diff := cmp.Diff(err, expectErr); diff != "" {
+ t.Errorf("NewTree() error was not as expected:\n%v", diff)
+ }
+ if diff := cmp.Diff(got, expect); diff != "" {
+ t.Errorf("NewTree() was not as expected:\n%v", diff)
+ }
+}
+
+func TestList(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ tree, err := NewTree(t,
+ QueryData{Query: Q(`suite:*`), Data: skip},
+ QueryData{Query: Q(`suite:a,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ QueryData{Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ QueryData{Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ )
+ if err != nil {
+ t.Fatalf("NewTree() returned %v", err)
+ }
+
+ got := tree.List()
+ expect := []QueryData{
+ {Query: Q(`suite:*`), Data: skip},
+ {Query: Q(`suite:a,*`), Data: failure},
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ }
+ if diff := cmp.Diff(got, expect); diff != "" {
+ t.Errorf("List() was not as expected:\n%v", diff)
+ }
+}
+
+// reducer is used by Reduce() and ReduceUnder() tests for reducing the tree.
+// reducer returns a pointer to the common string if all strings in data are
+// equal, otherwise returns nil
+func reducer(data []string) *string {
+ if s := container.NewSet(data...); len(s) == 1 {
+ item := s.One()
+ return &item
+ }
+ return nil
+}
+
+func TestReduce(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ type Test struct {
+ name string
+ in []QueryData
+ expect []QueryData
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ name: "Different file results - A",
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Different file results - B",
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ {Query: Q(`suite:a,d,*`), Data: skip},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ {Query: Q(`suite:a,d,*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Different test results",
+ in: []QueryData{
+ {Query: Q(`suite:a,b:*`), Data: failure},
+ {Query: Q(`suite:a,c:*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:*`), Data: failure},
+ {Query: Q(`suite:a,c:*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Same file results",
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: failure},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Same test results",
+ in: []QueryData{
+ {Query: Q(`suite:a,b:*`), Data: failure},
+ {Query: Q(`suite:a,c:*`), Data: failure},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "File vs test",
+ in: []QueryData{
+ {Query: Q(`suite:a:b,c*`), Data: failure},
+ {Query: Q(`suite:a,b,c*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: pass},
+ {Query: Q(`suite:a:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Sibling cases, no reduce",
+ in: []QueryData{
+ {Query: Q(`suite:a:b:c;d=e;f=g;*`), Data: failure},
+ {Query: Q(`suite:a:b:c;d=e;f=h;*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a:b:c;d=e;f=g;*`), Data: failure},
+ {Query: Q(`suite:a:b:c;d=e;f=h;*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Sibling cases, reduce to test",
+ in: []QueryData{
+ {Query: Q(`suite:a:b:c=1;d="x";*`), Data: failure},
+ {Query: Q(`suite:a:b:c=1;d="y";*`), Data: failure},
+ {Query: Q(`suite:a:z:*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a:b:*`), Data: failure},
+ {Query: Q(`suite:a:z:*`), Data: pass},
+ },
+ },
+ } {
+ tree, err := NewTree(t, test.in...)
+ if err != nil {
+ t.Errorf("Test '%v':\nNewTree() returned %v", test.name, err)
+ continue
+ }
+ tree.Reduce(reducer)
+ results := tree.List()
+ if diff := cmp.Diff(results, test.expect); diff != "" {
+ t.Errorf("Test '%v':\n%v", test.name, diff)
+ }
+ }
+}
+
+func TestReduceUnder(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ type Test struct {
+ location string
+ to query.Query
+ in []QueryData
+ expect []QueryData
+ expectErr error
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a,b,*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a,*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b:*`), Data: failure},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a,*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a,*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:x`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ expectErr: query.ErrNoDataForQuery{
+ Query: Q(`suite:x`),
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ to: Q(`suite:a,b,c,*`),
+ in: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ },
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: pass},
+ },
+ expectErr: query.ErrNoDataForQuery{
+ Query: Q(`suite:a,b,c`),
+ },
+ },
+ } {
+ tree, err := NewTree(t, test.in...)
+ if err != nil {
+ t.Errorf("\n%v NewTree(): %v", test.location, err)
+ continue
+ }
+ err = tree.ReduceUnder(test.to, reducer)
+ if diff := cmp.Diff(err, test.expectErr); diff != "" {
+ t.Errorf("\n%v ReduceUnder(): %v", test.location, err)
+ }
+ results := tree.List()
+ if diff := cmp.Diff(results, test.expect); diff != "" {
+ t.Errorf("\n%v List(): %v", test.location, diff)
+ }
+ }
+}
+
+func TestReplace(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ type Test struct {
+ name string
+ base []QueryData
+ replacement QueryData
+ expect []QueryData
+ expectErr error
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ name: "Replace file. Direct",
+ base: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ replacement: QueryData{Q(`suite:a,b,*`), skip},
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: skip},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Replace file. Indirect",
+ base: []QueryData{
+ {Query: Q(`suite:a,b,c,*`), Data: failure},
+ {Query: Q(`suite:a,b,d,*`), Data: pass},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ replacement: QueryData{Q(`suite:a,b,*`), skip},
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: skip},
+ {Query: Q(`suite:a,c,*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "File vs Test",
+ base: []QueryData{
+ {Query: Q(`suite:a,b:c,*`), Data: crash},
+ {Query: Q(`suite:a,b:d,*`), Data: abort},
+ {Query: Q(`suite:a,b,c,*`), Data: failure},
+ {Query: Q(`suite:a,b,d,*`), Data: pass},
+ },
+ replacement: QueryData{Q(`suite:a,b,*`), skip},
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Cases. * with *",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:*`), Data: failure},
+ },
+ replacement: QueryData{Q(`suite:file:test:*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Cases. Mixed with *",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:a=1,*`), Data: failure},
+ {Query: Q(`suite:file:test:a=2,*`), Data: skip},
+ {Query: Q(`suite:file:test:a=3,*`), Data: crash},
+ },
+ replacement: QueryData{Q(`suite:file:test:*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Cases. Replace partial - (a=1)",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:a=1;b=x;*`), Data: failure},
+ {Query: Q(`suite:file:test:a=1;b=y;*`), Data: failure},
+ {Query: Q(`suite:file:test:a=2;b=y;*`), Data: failure},
+ },
+ replacement: QueryData{Q(`suite:file:test:a=1;*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:a=1;*`), Data: pass},
+ {Query: Q(`suite:file:test:a=2;b=y;*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Cases. Replace partial - (b=y)",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:a=1;b=x;*`), Data: failure},
+ {Query: Q(`suite:file:test:a=1;b=y;*`), Data: failure},
+ {Query: Q(`suite:file:test:a=2;b=y;*`), Data: failure},
+ },
+ replacement: QueryData{Q(`suite:file:test:b=y;*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:a=1;b=x;*`), Data: failure},
+ {Query: Q(`suite:file:test:b=y;*`), Data: pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Error. No data for query - short",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:a=1;b=x;*`), Data: failure},
+ },
+ replacement: QueryData{Q(`suite:missing:*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:a=1;b=x;*`), Data: failure},
+ },
+ expectErr: query.ErrNoDataForQuery{Q(`suite:missing`)},
+ },
+ { //////////////////////////////////////////////////////////////////////
+ name: "Error. No data for query - long",
+ base: []QueryData{
+ {Query: Q(`suite:file:test:*`), Data: failure},
+ },
+ replacement: QueryData{Q(`suite:file:test,missing,*`), pass},
+ expect: []QueryData{
+ {Query: Q(`suite:file:test:*`), Data: failure},
+ },
+ expectErr: query.ErrNoDataForQuery{Q(`suite:file:test,missing`)},
+ },
+ } {
+ tree, err := NewTree(t, test.base...)
+ if err != nil {
+ t.Errorf("Test '%v':\nNewTree(): %v", test.name, err)
+ continue
+ }
+ err = tree.Replace(test.replacement.Query, test.replacement.Data)
+ if diff := cmp.Diff(err, test.expectErr); diff != "" {
+ t.Errorf("Test '%v':\nReplace() error: %v", test.name, err)
+ continue
+ }
+ if diff := cmp.Diff(tree.List(), test.expect); diff != "" {
+ t.Errorf("Test '%v':\n%v", test.name, diff)
+ }
+ }
+}
+
+func TestGlob(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ tree, err := NewTree(t,
+ QueryData{Query: Q(`suite:*`), Data: skip},
+ QueryData{Query: Q(`suite:a,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ QueryData{Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ QueryData{Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ QueryData{Query: Q(`suite:a,b:d:*`), Data: failure},
+ )
+ if err != nil {
+ t.Fatalf("NewTree() returned %v", err)
+ }
+
+ type Test struct {
+ query query.Query
+ expect []QueryData
+ expectErr error
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite`),
+ expect: []QueryData{
+ {Query: Q(`suite:*`), Data: skip},
+ {Query: Q(`suite:a,*`), Data: failure},
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:a,b:d:*`), Data: failure},
+ {Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:*`),
+ expect: []QueryData{
+ {Query: Q(`suite:*`), Data: skip},
+ {Query: Q(`suite:a,*`), Data: failure},
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:a,b:d:*`), Data: failure},
+ {Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: failure},
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:a,b:d:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,*`), Data: failure},
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:a,b:d:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ {Query: Q(`suite:a,b:d:*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b,*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b,*`), Data: failure},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:c:*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:c`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:c:d="e";*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:c:d;*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:c:f="g";*`),
+ expect: []QueryData{
+ {Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ {Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ {Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:x,y`),
+ expectErr: query.ErrNoDataForQuery{Q(`suite:x`)},
+ },
+ { //////////////////////////////////////////////////////////////////////
+ query: Q(`suite:a,b:x`),
+ expectErr: query.ErrNoDataForQuery{Q(`suite:a,b:x`)},
+ },
+ } {
+ got, err := tree.Glob(test.query)
+ if diff := cmp.Diff(err, test.expectErr); diff != "" {
+ t.Errorf("Glob('%v') error: %v", test.query, err)
+ continue
+ }
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("Glob('%v'):\n%v", test.query, diff)
+ }
+ }
+}
+
+func TestFormat(t *testing.T) {
+ type QueryData = query.QueryData[string]
+
+ tree, err := NewTree(t,
+ QueryData{Query: Q(`suite:*`), Data: skip},
+ QueryData{Query: Q(`suite:a,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b,*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d;*`), Data: failure},
+ QueryData{Query: Q(`suite:a,b:c:d="e";*`), Data: failure},
+ QueryData{Query: Q(`suite:h,b:c:f="g";*`), Data: abort},
+ QueryData{Query: Q(`suite:a,b:c:f="g";*`), Data: skip},
+ QueryData{Query: Q(`suite:a,b:d:*`), Data: failure},
+ )
+ if err != nil {
+ t.Fatalf("NewTree() returned %v", err)
+ }
+
+ callA := fmt.Sprint(tree)
+ callB := fmt.Sprint(tree)
+
+ if diff := cmp.Diff(callA, callB); diff != "" {
+ t.Errorf("Format():\n%v", diff)
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/mvt.go b/chromium/third_party/dawn/tools/src/cts/result/mvt.go
new file mode 100644
index 00000000000..2318821bed4
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/result/mvt.go
@@ -0,0 +1,146 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package result
+
+import (
+ "sort"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/query"
+)
+
+// MinimalVariantTags accepts a list of tag-sets (e.g GPU tags, OS tags, etc),
+// and returns an optimized list of variants, folding together variants that
+// have identical result query-to-status mappings, and removing redundant tags.
+//
+// MinimalVariantTags will attempt to remove variant tags starting with the
+// first set of tags in tagSets, then second, and so on. If a tag-set cannot
+// be removed, then the tags of the set are left alone, and the algorithm will
+// progress to the next tag-set.
+//
+// MinimalVariantTags assumes that there are no duplicate results (same query,
+// same tags) in l.
+func (l List) MinimalVariantTags(tagSets []Tags) []Variant {
+ type VariantData struct {
+ // The variant tags
+ tags Variant
+ // The query -> status for all results in l that have this variant's
+ // tags.
+ queryToStatus map[query.Query]Status
+ }
+
+ variants := []VariantData{}
+
+ // Build the initial list of variants from l.
+ // Bin result [query -> status] to the variant.
+ {
+ variantIndices := map[string]int{}
+ for _, r := range l {
+ key := TagsToString(r.Tags)
+ if idx, found := variantIndices[key]; !found {
+ variantIndices[key] = len(variants)
+ variants = append(variants, VariantData{
+ tags: Variant(r.Tags.Clone()),
+ queryToStatus: map[query.Query]Status{
+ r.Query: r.Status,
+ },
+ })
+ } else {
+ variants[idx].queryToStatus[r.Query] = r.Status
+ }
+ }
+ }
+
+ // canReduce checks that the variant would match the same results if the
+ // tags were reduced to 'tags'. Returns true if the variant's tags could
+ // be reduced, otherwise false.
+ canReduce := func(variant VariantData, tags Tags) bool {
+ for _, r := range l.FilterByTags(tags) {
+ existing, found := variant.queryToStatus[r.Query]
+ if !found {
+ // Removing the tag has expanded the set of queries.
+ return false
+ }
+ if existing != r.Status {
+ // Removing the tag has resulted in two queries with different
+ // results.
+ return false
+ }
+ }
+ return true
+ }
+
+ // tryToRemoveTags will remove all the tags in 'tags' from all variants
+ // iff doing so does not affect the set of results filtered by each variant.
+ // If it was possible to remove the tags, then variants that now have the
+ // same tags may be folded together, reducing the total number of variants.
+ tryToRemoveTags := func(tags Tags) {
+ newVariants := make([]VariantData, 0, len(variants))
+
+ for _, v := range variants {
+ // Does the variant even contain these tags?
+ if !v.tags.ContainsAny(tags) {
+ // Nope. Skip the canReduce() call, and keep the variant.
+ newVariants = append(newVariants, v)
+ continue
+ }
+
+ // Build the new set of tags with 'tags' removed.
+ newTags := v.tags.Clone()
+ newTags.RemoveAll(tags)
+
+ // Check wether removal of these tags affected the outcome.
+ if !canReduce(v, newTags) {
+ // Removing these tags resulted in differences.
+ return // Abort
+ }
+ newVariants = append(newVariants, VariantData{newTags, v.queryToStatus})
+ }
+
+ // Remove variants that are now subsets of others.
+ // Start by sorting the variants by number of tags.
+ // This ensures that the variants with fewer tags (fewer constraints)
+ // come first.
+ sort.Slice(newVariants, func(i, j int) bool {
+ return len(newVariants[i].tags) < len(newVariants[j].tags)
+ })
+
+ // Now check each variant's tags against the previous variant tags.
+ // As we've sorted, we know that supersets (fewer-tags) come before
+ // subsets (more-tags).
+ variants = []VariantData{}
+
+ nextVariant:
+ for i, v1 := range newVariants { // for variants 0..N
+ for _, v2 := range newVariants[:i] { // for variants 0..i
+ if v1.tags.ContainsAll(v2.tags) {
+ continue nextVariant // v1 is a subset of v2. Omit.
+ }
+ }
+ variants = append(variants, v1)
+ }
+ }
+
+ // Attempt to remove the tag sets from the variants, one by one.
+ for _, tags := range tagSets {
+ tryToRemoveTags(tags)
+ }
+
+ // Return the final set of unique variants
+ out := make([]Variant, len(variants))
+ for i, v := range variants {
+ out[i] = v.tags
+ }
+ return out
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/mvt_test.go b/chromium/third_party/dawn/tools/src/cts/result/mvt_test.go
new file mode 100644
index 00000000000..bfa67a7694e
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/result/mvt_test.go
@@ -0,0 +1,117 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package result_test
+
+import (
+ "fmt"
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestMinimalVariantTags(t *testing.T) {
+ type Test struct {
+ location string
+ results result.List
+ expect []result.Variant
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ results: result.List{},
+ expect: []result.Variant{},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Single variant, that can be entirely optimized away
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b1", "c2"), Status: result.Pass},
+ },
+ expect: []result.Variant{T()},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Multiple variants on the same query.
+ // Can also be entirely optimized away.
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b1", "c2"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b2", "c0"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a2", "b1", "c0"), Status: result.Pass},
+ },
+ expect: []result.Variant{T()},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Two variants where the 1st and 2nd tag-sets are redundant.
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b0", "c0"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b1", "c1"), Status: result.Failure},
+ },
+ expect: []result.Variant{T("c0"), T("c1")},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Two variants where the 1st and 3rd tag-sets are redundant.
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b0", "c0"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b1", "c1"), Status: result.Failure},
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b0", "c1"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b1", "c0"), Status: result.Failure},
+ },
+ expect: []result.Variant{T("b0"), T("b1")},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Two variants where the 2nd and 3rd tag-sets are redundant.
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b0", "c0"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b1", "c1"), Status: result.Failure},
+ {Query: Q("a:b,c:d,*"), Tags: T("a0", "b1", "c1"), Status: result.Pass},
+ {Query: Q("a:b,c:d,*"), Tags: T("a1", "b0", "c0"), Status: result.Failure},
+ },
+ expect: []result.Variant{T("a0"), T("a1")},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Check that variants aren't optimized to expand the set of results
+ // they target, even if results are uniform
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d0,*"), Tags: T("a0", "b0", "c0"), Status: result.Pass},
+ {Query: Q("a:b,c:d1,*"), Tags: T("a1", "b1", "c1"), Status: result.Pass},
+ },
+ expect: []result.Variant{T("c0"), T("c1")},
+ }, { ///////////////////////////////////////////////////////////////////
+ // Exercise the optimizations to skip checks on tag removals that
+ // aren't found in all variants
+ location: utils.ThisLine(),
+ results: result.List{
+ {Query: Q("a:b,c:d0,*"), Tags: T("a0"), Status: result.Pass},
+ {Query: Q("a:b,c:d1,*"), Tags: T("b0"), Status: result.Pass},
+ {Query: Q("a:b,c:d2,*"), Tags: T("c0"), Status: result.Pass},
+ },
+ expect: []result.Variant{T("a0"), T("b0"), T("c0")},
+ },
+ } {
+ preReduce := fmt.Sprint(test.results)
+ got := test.results.MinimalVariantTags([]result.Tags{
+ T("a0", "a1", "a2"),
+ T("b0", "b1", "b2"),
+ T("c0", "c1", "c2"),
+ })
+ postReduce := fmt.Sprint(test.results)
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("%v MinimalVariantTags() diff:\n%v", test.location, diff)
+ }
+ if diff := cmp.Diff(preReduce, postReduce); diff != "" {
+ t.Errorf("%v MinimalVariantTags() modified original list:\n%v", test.location, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/result.go b/chromium/third_party/dawn/tools/src/cts/result/result.go
index 2d4709458ac..623a90611b0 100644
--- a/chromium/third_party/dawn/tools/src/cts/result/result.go
+++ b/chromium/third_party/dawn/tools/src/cts/result/result.go
@@ -16,9 +16,14 @@
package result
import (
+ "bufio"
"fmt"
+ "io"
+ "os"
+ "path/filepath"
"sort"
"strings"
+ "time"
"dawn.googlesource.com/dawn/tools/src/container"
"dawn.googlesource.com/dawn/tools/src/cts/query"
@@ -26,9 +31,10 @@ import (
// Result holds the result of a CTS test
type Result struct {
- Query query.Query
- Tags Tags
- Status Status
+ Query query.Query
+ Tags Tags
+ Status Status
+ Duration time.Duration
}
// Format writes the Result to the fmt.State
@@ -37,9 +43,9 @@ type Result struct {
// This matches the order in which results are sorted.
func (r Result) Format(f fmt.State, verb rune) {
if len(r.Tags) > 0 {
- fmt.Fprintf(f, "%v %v %v", r.Query, TagsToString(r.Tags), r.Status)
+ fmt.Fprintf(f, "%v %v %v %v", r.Query, TagsToString(r.Tags), r.Status, r.Duration)
} else {
- fmt.Fprintf(f, "%v %v", r.Query, r.Status)
+ fmt.Fprintf(f, "%v %v %v", r.Query, r.Status, r.Duration)
}
}
@@ -50,6 +56,34 @@ func (r Result) String() string {
return sb.String()
}
+// Compare compares the relative order of r and o, returning:
+// -1 if r should come before o
+// 1 if r should come after o
+// 0 if r and o are identical
+// Note: Result.Duration is not considered in comparison.
+func (r Result) Compare(o Result) int {
+ a, b := r, o
+ switch a.Query.Compare(b.Query) {
+ case -1:
+ return -1
+ case 1:
+ return 1
+ }
+ ta := strings.Join(a.Tags.List(), TagDelimiter)
+ tb := strings.Join(b.Tags.List(), TagDelimiter)
+ switch {
+ case ta < tb:
+ return -1
+ case ta > tb:
+ return 1
+ case a.Status < b.Status:
+ return -1
+ case a.Status > b.Status:
+ return 1
+ }
+ return 0
+}
+
// Parse parses the result from a string of the form:
// <query> <tags> <status>
// <tags> may be omitted if there were no tags.
@@ -77,25 +111,41 @@ func Parse(in string) (Result, error) {
a := token()
b := token()
c := token()
- if a == "" || b == "" || token() != "" {
+ d := token()
+ if a == "" || b == "" || c == "" || token() != "" {
return Result{}, fmt.Errorf("unable to parse result '%v'", in)
}
- q := query.Parse(a)
- if c == "" {
+
+ query := query.Parse(a)
+
+ if d == "" {
status := Status(b)
- return Result{q, nil, status}, nil
+ duration, err := time.ParseDuration(c)
+ if err != nil {
+ return Result{}, fmt.Errorf("unable to parse result '%v': %w", in, err)
+ }
+ return Result{query, nil, status, duration}, nil
+ } else {
+ tags := StringToTags(b)
+ status := Status(c)
+ duration, err := time.ParseDuration(d)
+ if err != nil {
+ return Result{}, fmt.Errorf("unable to parse result '%v': %w", in, err)
+ }
+ return Result{query, tags, status, duration}, nil
}
- tags := StringToTags(b)
- status := Status(c)
- return Result{q, tags, status}, nil
}
// List is a list of results
type List []Result
-// Returns the list of unique tags across all results.
-func (l List) UniqueTags() []Tags {
- tags := container.NewMap[string, Tags]()
+// Variant is a collection of tags that uniquely identify a test
+// configuration (e.g the combination of OS, GPU, validation-modes, etc).
+type Variant = Tags
+
+// Variants returns the list of unique tags (variants) across all results.
+func (l List) Variants() []Variant {
+ tags := container.NewMap[string, Variant]()
for _, r := range l {
tags.Add(TagsToString(r.Tags), r.Tags)
}
@@ -115,9 +165,10 @@ func (l List) TransformTags(f func(Tags) Tags) List {
cache[key] = tags
}
out = append(out, Result{
- Query: r.Query,
- Tags: tags,
- Status: r.Status,
+ Query: r.Query,
+ Tags: tags,
+ Status: r.Status,
+ Duration: r.Duration,
})
}
return out
@@ -126,29 +177,56 @@ func (l List) TransformTags(f func(Tags) Tags) List {
// ReplaceDuplicates returns a new list with duplicate test results replaced.
// When a duplicate is found, the function f is called with the duplicate
// results. The returned status will be used as the replaced result.
-func (l List) ReplaceDuplicates(f func(List) Status) List {
+// Merged results will use the average (mean) duration of the duplicates.
+func (l List) ReplaceDuplicates(f func(Statuses) Status) List {
type key struct {
query query.Query
tags string
}
- m := map[key]List{}
- for _, r := range l {
+ // Collect all duplicates
+ keyToIndices := map[key][]int{} // key to index
+ for i, r := range l {
k := key{r.Query, TagsToString(r.Tags)}
- m[k] = append(m[k], r)
+ keyToIndices[k] = append(keyToIndices[k], i)
}
- for key, results := range m {
- if len(results) > 1 {
- result := results[0]
- result.Status = f(results)
- m[key] = List{result}
+ // Resolve duplicates
+ type StatusAndDuration struct {
+ Status Status
+ Duration time.Duration
+ }
+ merged := map[key]StatusAndDuration{}
+ for key, indices := range keyToIndices {
+ statuses := NewStatuses()
+ duration := time.Duration(0)
+ for _, i := range indices {
+ r := l[i]
+ statuses.Add(r.Status)
+ duration += r.Duration
+ }
+ status := func() Status {
+ if len(statuses) > 1 {
+ return f(statuses)
+ }
+ return statuses.One()
+ }()
+ duration = duration / time.Duration(len(indices))
+ merged[key] = StatusAndDuration{
+ Status: status,
+ Duration: duration,
}
}
- out := make(List, 0, len(m))
+ // Rebuild list
+ out := make(List, 0, len(keyToIndices))
for _, r := range l {
k := key{r.Query, TagsToString(r.Tags)}
- if unique, ok := m[k]; ok {
- out = append(out, unique[0])
- delete(m, k)
+ if sd, ok := merged[k]; ok {
+ out = append(out, Result{
+ Query: r.Query,
+ Tags: r.Tags,
+ Status: sd.Status,
+ Duration: sd.Duration,
+ })
+ delete(merged, k) // Remove from map to prevent duplicates
}
}
return out
@@ -156,24 +234,7 @@ func (l List) ReplaceDuplicates(f func(List) Status) List {
// Sort sorts the list
func (l List) Sort() {
- sort.Slice(l, func(i, j int) bool {
- a, b := l[i], l[j]
- switch a.Query.Compare(b.Query) {
- case -1:
- return true
- case 1:
- return false
- }
- ta := strings.Join(a.Tags.List(), TagDelimiter)
- tb := strings.Join(b.Tags.List(), TagDelimiter)
- switch {
- case ta < tb:
- return true
- case ta > tb:
- return false
- }
- return a.Status < b.Status
- })
+ sort.Slice(l, func(i, j int) bool { return l[i].Compare(l[j]) < 0 })
}
// Filter returns the results that match the given predicate
@@ -201,11 +262,133 @@ func (l List) FilterByTags(tags Tags) List {
})
}
+// FilterByVariant returns the results that exactly match the given tags
+func (l List) FilterByVariant(tags Tags) List {
+ str := TagsToString(tags)
+ return l.Filter(func(r Result) bool {
+ return len(r.Tags) == len(tags) && TagsToString(r.Tags) == str
+ })
+}
+
+// Statuses is a set of Status
+type Statuses = container.Set[Status]
+
+// NewStatuses returns a new status set with the provided statuses
+func NewStatuses(s ...Status) Statuses { return container.NewSet(s...) }
+
// Statuses returns a set of all the statuses in the list
-func (l List) Statuses() container.Set[Status] {
- set := container.NewSet[Status]()
+func (l List) Statuses() Statuses {
+ set := NewStatuses()
for _, r := range l {
set.Add(r.Status)
}
return set
}
+
+// StatusTree is a query tree of statuses
+type StatusTree = query.Tree[Status]
+
+// StatusTree returns a query.Tree from the List, with the Status as the tree
+// node data.
+func (l List) StatusTree() (StatusTree, error) {
+ tree := StatusTree{}
+ for _, r := range l {
+ if err := tree.Add(r.Query, r.Status); err != nil {
+ return StatusTree{}, err
+ }
+ }
+ return tree, nil
+}
+
+// Load loads the result list from the file with the given path
+func Load(path string) (List, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ results, err := Read(file)
+ if err != nil {
+ return nil, fmt.Errorf("while reading '%v': %w", path, err)
+ }
+ return results, nil
+}
+
+// Save saves the result list to the file with the given path
+func Save(path string, results List) error {
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ file, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ return Write(file, results)
+}
+
+// Read reads a result list from the given reader
+func Read(r io.Reader) (List, error) {
+ scanner := bufio.NewScanner(r)
+ l := List{}
+ for scanner.Scan() {
+ r, err := Parse(scanner.Text())
+ if err != nil {
+ return nil, err
+ }
+ l = append(l, r)
+ }
+ return l, nil
+}
+
+// Write writes a result list to the given writer
+func Write(w io.Writer, l List) error {
+ for _, r := range l {
+ if _, err := fmt.Fprintln(w, r); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Merge merges and sorts two results lists.
+// Duplicates are removed using the Deduplicate() function.
+func Merge(a, b List) List {
+ merged := make(List, 0, len(a)+len(b))
+ merged = append(merged, a...)
+ merged = append(merged, b...)
+ out := merged.ReplaceDuplicates(Deduplicate)
+ out.Sort()
+ return out
+}
+
+// Deduplicate is the standard algorithm used to de-duplicating mixed results.
+// This function is expected to be handed to List.ReplaceDuplicates().
+func Deduplicate(s Statuses) Status {
+ // If all results have the same status, then use that
+ if len(s) == 1 {
+ return s.One()
+ }
+
+ // Mixed statuses. Replace with something appropriate.
+ switch {
+ // Crash + * = Crash
+ case s.Contains(Crash):
+ return Crash
+ // Abort + * = Abort
+ case s.Contains(Abort):
+ return Abort
+ // Unknown + * = Unknown
+ case s.Contains(Unknown):
+ return Unknown
+ // RetryOnFailure + ~(Crash | Abort | Unknown) = RetryOnFailure
+ case s.Contains(RetryOnFailure):
+ return RetryOnFailure
+ // Pass + ~(Crash | Abort | Unknown | RetryOnFailure | Slow) = RetryOnFailure
+ case s.Contains(Pass):
+ return RetryOnFailure
+ }
+ return Unknown
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/result_test.go b/chromium/third_party/dawn/tools/src/cts/result/result_test.go
index fd76ccc3cc0..946dbbbff9d 100644
--- a/chromium/third_party/dawn/tools/src/cts/result/result_test.go
+++ b/chromium/third_party/dawn/tools/src/cts/result/result_test.go
@@ -15,12 +15,14 @@
package result_test
import (
- "fmt"
+ "bytes"
"testing"
+ "time"
"dawn.googlesource.com/dawn/tools/src/container"
"dawn.googlesource.com/dawn/tools/src/cts/query"
"dawn.googlesource.com/dawn/tools/src/cts/result"
+ "dawn.googlesource.com/dawn/tools/src/utils"
"github.com/google/go-cmp/cmp"
)
@@ -38,25 +40,28 @@ func TestStringAndParse(t *testing.T) {
for _, test := range []Test{
{
result.Result{
- Query: Q(`a`),
- Status: result.Failure,
+ Query: Q(`a`),
+ Status: result.Failure,
+ Duration: time.Second * 42,
},
- `a Failure`,
+ `a Failure 42s`,
}, {
result.Result{
- Query: Q(`a:b,c,*`),
- Tags: T("x"),
- Status: result.Pass,
+ Query: Q(`a:b,c,*`),
+ Tags: T("x"),
+ Status: result.Pass,
+ Duration: time.Second * 42,
},
- `a:b,c,* x Pass`,
+ `a:b,c,* x Pass 42s`,
},
{
result.Result{
- Query: Q(`a:b,c:d,*`),
- Tags: T("zzz", "x", "yy"),
- Status: result.Failure,
+ Query: Q(`a:b,c:d,*`),
+ Tags: T("zzz", "x", "yy"),
+ Status: result.Failure,
+ Duration: time.Second * 42,
},
- `a:b,c:d,* x,yy,zzz Failure`,
+ `a:b,c:d,* x,yy,zzz Failure 42s`,
},
} {
if diff := cmp.Diff(test.result.String(), test.expect); diff != "" {
@@ -75,21 +80,26 @@ func TestStringAndParse(t *testing.T) {
}
func TestParseError(t *testing.T) {
- for _, test := range []string{
- ``,
- `a`,
- `a b c d`,
+ for _, test := range []struct {
+ in, expect string
+ }{
+ {``, `unable to parse result ''`},
+ {`a`, `unable to parse result 'a'`},
+ {`a b c d`, `unable to parse result 'a b c d': time: invalid duration "d"`},
} {
- _, err := result.Parse(test)
- expect := fmt.Sprintf(`unable to parse result '%v'`, test)
- if err == nil || err.Error() != expect {
- t.Errorf("Parse('%v') returned '%v'", test, err)
+ _, err := result.Parse(test.in)
+ got := ""
+ if err != nil {
+ got = err.Error()
+ }
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("Parse('%v'): %v", test, diff)
continue
}
}
}
-func TestUniqueTags(t *testing.T) {
+func TestVariants(t *testing.T) {
type Test struct {
results result.List
expect []result.Tags
@@ -196,7 +206,7 @@ func TestUniqueTags(t *testing.T) {
},
},
} {
- got := test.results.UniqueTags()
+ got := test.results.Variants()
if diff := cmp.Diff(got, test.expect); diff != "" {
t.Errorf("Results:\n%v\nUniqueTags() was not as expected:\n%v", test.results, diff)
}
@@ -304,40 +314,44 @@ func TestTransformTags(t *testing.T) {
func TestReplaceDuplicates(t *testing.T) {
type Test struct {
- results result.List
- fn func(result.List) result.Status
- expect result.List
+ location string
+ results result.List
+ fn func(result.Statuses) result.Status
+ expect result.List
}
for _, test := range []Test{
{ //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
results: result.List{
- result.Result{Query: Q(`a`), Status: result.Pass},
+ result.Result{Query: Q(`a`), Status: result.Pass, Duration: 1},
},
- fn: func(l result.List) result.Status {
+ fn: func(result.Statuses) result.Status {
return result.Abort
},
expect: result.List{
- result.Result{Query: Q(`a`), Status: result.Pass},
+ result.Result{Query: Q(`a`), Status: result.Pass, Duration: 1},
},
},
{ //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
results: result.List{
- result.Result{Query: Q(`a`), Status: result.Pass},
- result.Result{Query: Q(`a`), Status: result.Pass},
+ result.Result{Query: Q(`a`), Status: result.Pass, Duration: 1},
+ result.Result{Query: Q(`a`), Status: result.Pass, Duration: 3},
},
- fn: func(l result.List) result.Status {
+ fn: func(result.Statuses) result.Status {
return result.Abort
},
expect: result.List{
- result.Result{Query: Q(`a`), Status: result.Abort},
+ result.Result{Query: Q(`a`), Status: result.Pass, Duration: 2},
},
},
{ //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
results: result.List{
result.Result{Query: Q(`a`), Status: result.Pass},
result.Result{Query: Q(`b`), Status: result.Pass},
},
- fn: func(l result.List) result.Status {
+ fn: func(result.Statuses) result.Status {
return result.Abort
},
expect: result.List{
@@ -346,16 +360,14 @@ func TestReplaceDuplicates(t *testing.T) {
},
},
{ //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
results: result.List{
result.Result{Query: Q(`a`), Status: result.Pass},
result.Result{Query: Q(`b`), Status: result.Pass},
result.Result{Query: Q(`a`), Status: result.Skip},
},
- fn: func(got result.List) result.Status {
- expect := result.List{
- result.Result{Query: Q(`a`), Status: result.Pass},
- result.Result{Query: Q(`a`), Status: result.Skip},
- }
+ fn: func(got result.Statuses) result.Status {
+ expect := result.NewStatuses(result.Pass, result.Skip)
if diff := cmp.Diff(got, expect); diff != "" {
t.Errorf("function's parameter was not as expected:\n%v", diff)
}
@@ -369,7 +381,7 @@ func TestReplaceDuplicates(t *testing.T) {
} {
got := test.results.ReplaceDuplicates(test.fn)
if diff := cmp.Diff(got, test.expect); diff != "" {
- t.Errorf("Results:\n%v\nReplaceDuplicates() was not as expected:\n%v", test.results, diff)
+ t.Errorf("\n%v ReplaceDuplicates() was not as expected:\n%v", test.location, diff)
}
}
}
@@ -778,6 +790,96 @@ func TestFilterByTags(t *testing.T) {
}
}
+func TestFilterByVariant(t *testing.T) {
+ type Test struct {
+ results result.List
+ tags result.Tags
+ expect result.List
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{
+ result.Result{
+ Query: Q(`a`),
+ Status: result.Pass,
+ Tags: result.NewTags("x"),
+ },
+ result.Result{
+ Query: Q(`b`),
+ Status: result.Failure,
+ Tags: result.NewTags("y"),
+ },
+ result.Result{
+ Query: Q(`c`),
+ Status: result.Pass,
+ Tags: result.NewTags("x", "y"),
+ },
+ },
+ tags: result.NewTags("x", "y"),
+ expect: result.List{
+ result.Result{
+ Query: Q(`c`),
+ Status: result.Pass,
+ Tags: result.NewTags("x", "y"),
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{
+ result.Result{
+ Query: Q(`a`),
+ Status: result.Pass,
+ Tags: result.NewTags("x"),
+ },
+ result.Result{
+ Query: Q(`b`),
+ Status: result.Failure,
+ Tags: result.NewTags("y"),
+ },
+ result.Result{
+ Query: Q(`c`),
+ Status: result.Pass,
+ Tags: result.NewTags("x", "y"),
+ },
+ },
+ tags: result.NewTags("x"),
+ expect: result.List{
+ result.Result{
+ Query: Q(`a`),
+ Status: result.Pass,
+ Tags: result.NewTags("x"),
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{
+ result.Result{
+ Query: Q(`a`),
+ Status: result.Pass,
+ Tags: result.NewTags("x"),
+ },
+ result.Result{
+ Query: Q(`b`),
+ Status: result.Failure,
+ Tags: result.NewTags("y"),
+ },
+ result.Result{
+ Query: Q(`c`),
+ Status: result.Pass,
+ Tags: result.NewTags("x", "y"),
+ },
+ },
+ tags: result.NewTags("q"),
+ expect: result.List{},
+ },
+ } {
+ got := test.results.FilterByVariant(test.tags)
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("Results:\n%v\nFilterByVariant(%v) was not as expected:\n%v", test.results, test.tags, diff)
+ }
+ }
+}
+
func TestStatuses(t *testing.T) {
type Test struct {
results result.List
@@ -847,3 +949,322 @@ func TestStatuses(t *testing.T) {
}
}
}
+
+func TestStatusTree(t *testing.T) {
+ type Node = query.TreeNode[result.Status]
+ type Children = query.TreeNodeChildren[result.Status]
+ type ChildKey = query.TreeNodeChildKey
+
+ pass := result.Pass
+
+ type Test struct {
+ results result.List
+ expectErr error
+ expect result.StatusTree
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{},
+ expect: result.StatusTree{},
+ },
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{
+ {Query: Q(`suite:a:*`), Status: result.Pass},
+ },
+ expect: result.StatusTree{
+ TreeNode: Node{
+ Children: Children{
+ ChildKey{Name: `suite`, Target: query.Suite}: &Node{
+ Query: Q(`suite`),
+ Children: Children{
+ ChildKey{Name: `a`, Target: query.Files}: &Node{
+ Query: Q(`suite:a`),
+ Children: Children{
+ ChildKey{Name: `*`, Target: query.Tests}: &Node{
+ Query: Q(`suite:a:*`),
+ Data: &pass,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ results: result.List{
+ {Query: Q(`suite:a:*`), Status: result.Pass},
+ {Query: Q(`suite:a:*`), Status: result.Failure},
+ },
+ expectErr: query.ErrDuplicateData{Query: Q(`suite:a:*`)},
+ },
+ } {
+ got, err := test.results.StatusTree()
+ if diff := cmp.Diff(err, test.expectErr); diff != "" {
+ t.Errorf("Results:\n%v\nStatusTree() error was not as expected:\n%v", test.results, diff)
+ continue
+ }
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("Results:\n%v\nStatusTree() was not as expected:\n%v", test.results, diff)
+ }
+ }
+}
+
+func TestReadWrite(t *testing.T) {
+ in := result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:b,*`), Tags: T(`y`), Status: result.Failure},
+ {Query: Q(`suite:a:b:*`), Tags: T(`x`, `y`), Status: result.Skip},
+ {Query: Q(`suite:a:c,*`), Tags: T(`y`, `x`), Status: result.Failure},
+ {Query: Q(`suite:a,b:c,*`), Tags: T(`y`, `x`), Status: result.Crash},
+ {Query: Q(`suite:a,b:c:*`), Status: result.Slow},
+ }
+ buf := &bytes.Buffer{}
+ if err := result.Write(buf, in); err != nil {
+ t.Fatalf("Write(): %v", err)
+ }
+ got, err := result.Read(buf)
+ if err != nil {
+ t.Fatalf("Read(): %v", err)
+ }
+ if diff := cmp.Diff(got, in); diff != "" {
+ t.Errorf("Read() was not as expected:\n%v", diff)
+ }
+}
+
+func TestMerge(t *testing.T) {
+ type Test struct {
+ location string
+ a, b result.List
+ expect result.List
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{},
+ b: result.List{},
+ expect: result.List{},
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ b: result.List{},
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{},
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ b: result.List{
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`y`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:a:*`), Tags: T(`y`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Status: result.Pass},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Crash},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Crash},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Crash},
+ },
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ a: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:c:*`), Tags: T(`x`), Status: result.Failure},
+ {Query: Q(`suite:d:*`), Tags: T(`x`), Status: result.Failure},
+ {Query: Q(`suite:e:*`), Tags: T(`x`), Status: result.Crash},
+ },
+ b: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Failure},
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:c:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:d:*`), Tags: T(`y`), Status: result.Pass},
+ {Query: Q(`suite:e:*`), Tags: T(`x`), Status: result.Pass},
+ },
+ expect: result.List{
+ {Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.RetryOnFailure},
+ {Query: Q(`suite:b:*`), Tags: T(`x`), Status: result.Pass},
+ {Query: Q(`suite:c:*`), Tags: T(`x`), Status: result.RetryOnFailure},
+ {Query: Q(`suite:d:*`), Tags: T(`x`), Status: result.Failure},
+ {Query: Q(`suite:d:*`), Tags: T(`y`), Status: result.Pass},
+ {Query: Q(`suite:e:*`), Tags: T(`x`), Status: result.Crash},
+ },
+ },
+ } {
+ got := result.Merge(test.a, test.b)
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("%v\nStatusTree() was not as expected:\n%v", test.location, diff)
+ }
+ }
+}
+
+func TestDeduplicate(t *testing.T) {
+ type Test struct {
+ location string
+ statuses result.Statuses
+ expect result.Status
+ }
+ for _, test := range []Test{
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass),
+ expect: result.Pass,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Abort),
+ expect: result.Abort,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Failure),
+ expect: result.Failure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Skip),
+ expect: result.Skip,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Crash),
+ expect: result.Crash,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Slow),
+ expect: result.Slow,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Unknown),
+ expect: result.Unknown,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.RetryOnFailure),
+ expect: result.RetryOnFailure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Failure),
+ expect: result.RetryOnFailure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Abort),
+ expect: result.Abort,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Skip),
+ expect: result.RetryOnFailure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Crash),
+ expect: result.Crash,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Slow),
+ expect: result.RetryOnFailure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.Unknown),
+ expect: result.Unknown,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Pass, result.RetryOnFailure),
+ expect: result.RetryOnFailure,
+ },
+ { //////////////////////////////////////////////////////////////////////
+ location: utils.ThisLine(),
+ statuses: result.NewStatuses(result.Status("??"), result.Status("?!")),
+ expect: result.Unknown,
+ },
+ } {
+ got := result.Deduplicate(test.statuses)
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("\n%v Deduplicate() was not as expected:\n%v", test.location, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/status.go b/chromium/third_party/dawn/tools/src/cts/result/status.go
index 5e5fd157be1..50daae435a1 100644
--- a/chromium/third_party/dawn/tools/src/cts/result/status.go
+++ b/chromium/third_party/dawn/tools/src/cts/result/status.go
@@ -14,6 +14,8 @@
package result
+import "dawn.googlesource.com/dawn/tools/src/container"
+
// Status is an enumerator of test results
type Status string
@@ -28,3 +30,12 @@ const (
Slow = Status("Slow")
Unknown = Status("Unknown")
)
+
+// CommonStatus is a function that can be used by StatusTree.Reduce() to reduce
+// tree nodes with the same status
+func CommonStatus(statuses []Status) *Status {
+ if set := container.NewSet(statuses...); len(set) == 1 {
+ return &statuses[0]
+ }
+ return nil
+}
diff --git a/chromium/third_party/dawn/tools/src/cts/result/status_test.go b/chromium/third_party/dawn/tools/src/cts/result/status_test.go
new file mode 100644
index 00000000000..e6838316185
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/cts/result/status_test.go
@@ -0,0 +1,54 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package result_test
+
+import (
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/cts/result"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestCommonStatus(t *testing.T) {
+ pass := result.Pass
+
+ type Test struct {
+ in []result.Status
+ expect *result.Status
+ }
+ for _, test := range []Test{
+ {
+ in: nil,
+ expect: nil,
+ }, {
+ in: []result.Status{},
+ expect: nil,
+ }, {
+ in: []result.Status{result.Pass},
+ expect: &pass,
+ }, {
+ in: []result.Status{result.Pass, result.Pass, result.Pass},
+ expect: &pass,
+ }, {
+ in: []result.Status{result.Pass, result.Failure, result.Pass},
+ expect: nil,
+ },
+ } {
+ got := result.CommonStatus(test.in)
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("%v.CommonStatus('%v') was not as expected:\n%v", test.in, test.expect, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/gerrit/gerrit.go b/chromium/third_party/dawn/tools/src/gerrit/gerrit.go
index 29a56233954..763574f6337 100644
--- a/chromium/third_party/dawn/tools/src/gerrit/gerrit.go
+++ b/chromium/third_party/dawn/tools/src/gerrit/gerrit.go
@@ -75,13 +75,25 @@ func (p *Patchset) RegisterFlags(defaultHost, defaultProject string) {
flag.IntVar(&p.Patchset, "ps", 0, "gerrit patchset id")
}
+// RefsChanges returns the gerrit 'refs/changes/X/Y/Z' string for the patchset
+func (p Patchset) RefsChanges() string {
+ // https://gerrit-review.googlesource.com/Documentation/intro-user.html
+ // A change ref has the format refs/changes/X/Y/Z where X is the last two
+ // digits of the change number, Y is the entire change number, and Z is the
+ // patch set. For example, if the change number is 263270, the ref would be
+ // refs/changes/70/263270/2 for the second patch set.
+ shortChange := fmt.Sprintf("%.2v", p.Change)
+ shortChange = shortChange[len(shortChange)-2:]
+ return fmt.Sprintf("refs/changes/%v/%v/%v", shortChange, p.Change, p.Patchset)
+}
+
// LoadCredentials attempts to load the gerrit credentials for the given gerrit
// URL from the git cookies file. Returns an empty Credentials on failure.
func LoadCredentials(url string) Credentials {
cookiesFile := os.Getenv("HOME") + "/.gitcookies"
if cookies, err := ioutil.ReadFile(cookiesFile); err == nil {
- url := strings.TrimPrefix(url, "https://")
- re := regexp.MustCompile(url + `\s+(?:FALSE|TRUE)[\s/]+(?:FALSE|TRUE)\s+[0-9]+\s+.\s+(.*)=(.*)`)
+ url := strings.TrimSuffix(strings.TrimPrefix(url, "https://"), "/")
+ re := regexp.MustCompile(url + `/?\s+(?:FALSE|TRUE)[\s/]+(?:FALSE|TRUE)\s+[0-9]+\s+.\s+(.*)=(.*)`)
match := re.FindStringSubmatch(string(cookies))
if len(match) == 3 {
return Credentials{match[1], match[2]}
@@ -200,19 +212,67 @@ func (g *Gerrit) LatestPatchest(changeID string) (Patchset, error) {
return ps, nil
}
+// CommentSide is an enumerator for specifying which side code-comments should
+// be shown.
+type CommentSide int
+
+const (
+ // Left is used to specifiy that code comments should appear on the parent
+ // change
+ Left CommentSide = iota
+ // Right is used to specifiy that code comments should appear on the new
+ // change
+ Right
+)
+
+// FileComment describes a single comment on a file
+type FileComment struct {
+ Path string // The file path
+ Side CommentSide // Which side the comment should appear
+ Line int // The 1-based line number for the comment
+ Message string // The comment message
+}
+
// Comment posts a review comment on the given patchset.
-func (g *Gerrit) Comment(ps Patchset, msg string) error {
- _, _, err := g.client.Changes.SetReview(
- strconv.Itoa(ps.Change),
- strconv.Itoa(ps.Patchset),
- &gerrit.ReviewInput{
- Message: msg,
- })
+// If comments is an optional list of file-comments to include in the comment.
+func (g *Gerrit) Comment(ps Patchset, msg string, comments []FileComment) error {
+ input := &gerrit.ReviewInput{
+ Message: msg,
+ }
+ if len(comments) > 0 {
+ input.Comments = map[string][]gerrit.CommentInput{}
+ for _, c := range comments {
+ ci := gerrit.CommentInput{
+ Line: c.Line,
+ // Updated: &gerrit.Timestamp{Time: time.Now()},
+ Message: c.Message,
+ }
+ if c.Side == Left {
+ ci.Side = "PARENT"
+ } else {
+ ci.Side = "REVISION"
+ }
+ input.Comments[c.Path] = append(input.Comments[c.Path], ci)
+ }
+ }
+ _, _, err := g.client.Changes.SetReview(strconv.Itoa(ps.Change), strconv.Itoa(ps.Patchset), input)
if err != nil {
return g.maybeWrapError(err)
}
return nil
}
+
+// SetReadyForReview marks the change as ready for review.
+func (g *Gerrit) SetReadyForReview(changeID, message string) error {
+ resp, err := g.client.Changes.SetReadyForReview(changeID, &gerrit.ReadyForReviewInput{
+ Message: message,
+ })
+ if err != nil && resp.StatusCode != 409 { // 409: already ready
+ return g.maybeWrapError(err)
+ }
+ return nil
+}
+
func (g *Gerrit) maybeWrapError(err error) error {
if err != nil && !g.authenticated {
return fmt.Errorf(`query failed, possibly because of authentication.
diff --git a/chromium/third_party/dawn/tools/src/gerrit/gerrit_test.go b/chromium/third_party/dawn/tools/src/gerrit/gerrit_test.go
new file mode 100644
index 00000000000..a253620e113
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/gerrit/gerrit_test.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gerrit_test
+
+import (
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/gerrit"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestPatchsetRefsChanges(t *testing.T) {
+ type Test struct {
+ in gerrit.Patchset
+ expect string
+ }
+ for _, test := range []Test{
+ {
+ in: gerrit.Patchset{
+ Change: 123456,
+ Patchset: 42,
+ },
+ expect: `refs/changes/56/123456/42`,
+ },
+ {
+ in: gerrit.Patchset{
+ Change: 1234,
+ Patchset: 42,
+ },
+ expect: `refs/changes/34/1234/42`,
+ },
+ {
+ in: gerrit.Patchset{
+ Change: 12,
+ Patchset: 42,
+ },
+ expect: `refs/changes/12/12/42`,
+ },
+ {
+ in: gerrit.Patchset{
+ Change: 1,
+ Patchset: 42,
+ },
+ expect: `refs/changes/01/1/42`,
+ },
+ } {
+ got := test.in.RefsChanges()
+ if diff := cmp.Diff(got, test.expect); diff != "" {
+ t.Errorf("%v.RefsChanges() was not as expected:\n%v", test.in, diff)
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/tools/src/git/git.go b/chromium/third_party/dawn/tools/src/git/git.go
index 04ed43a6f46..f08169da6c9 100644
--- a/chromium/third_party/dawn/tools/src/git/git.go
+++ b/chromium/third_party/dawn/tools/src/git/git.go
@@ -133,7 +133,7 @@ func (g Git) Clone(path, url string, opt *CloneOptions) (*Repository, error) {
if opt.Branch != "" {
args = append(args, "--branch", opt.Branch)
}
- if _, err := r.run(opt.Timeout, args...); err != nil {
+ if _, err := r.run(nil, opt.Timeout, args...); err != nil {
return nil, err
}
return r, nil
@@ -166,10 +166,10 @@ func (r Repository) Fetch(ref string, opt *FetchOptions) (Hash, error) {
if opt.Remote == "" {
opt.Remote = "origin"
}
- if _, err := r.run(opt.Timeout, "fetch", opt.Remote, ref); err != nil {
+ if _, err := r.run(nil, opt.Timeout, "fetch", opt.Remote, ref); err != nil {
return Hash{}, err
}
- out, err := r.run(0, "rev-parse", "FETCH_HEAD")
+ out, err := r.run(nil, 0, "rev-parse", "FETCH_HEAD")
if err != nil {
return Hash{}, err
}
@@ -194,7 +194,7 @@ func (r Repository) Push(localRef, remoteRef string, opt *PushOptions) error {
if opt.Remote == "" {
opt.Remote = "origin"
}
- url, err := r.run(opt.Timeout, "remote", "get-url", opt.Remote)
+ url, err := r.run(nil, opt.Timeout, "remote", "get-url", opt.Remote)
if err != nil {
return err
}
@@ -202,7 +202,7 @@ func (r Repository) Push(localRef, remoteRef string, opt *PushOptions) error {
if err != nil {
return err
}
- if _, err := r.run(opt.Timeout, "push", url, localRef+":"+remoteRef); err != nil {
+ if _, err := r.run(nil, opt.Timeout, "push", url, localRef+":"+remoteRef); err != nil {
return err
}
return nil
@@ -221,7 +221,7 @@ func (r Repository) Add(path string, opt *AddOptions) error {
if opt == nil {
opt = &AddOptions{}
}
- if _, err := r.run(opt.Timeout, "add", path); err != nil {
+ if _, err := r.run(nil, opt.Timeout, "add", path); err != nil {
return err
}
return nil
@@ -245,19 +245,27 @@ func (r Repository) Commit(msg string, opt *CommitOptions) (Hash, error) {
if opt == nil {
opt = &CommitOptions{}
}
+
args := []string{"commit"}
if opt.Amend {
args = append(args, "--amend")
} else {
args = append(args, "-m", msg)
}
+
+ var env []string
if opt.AuthorName != "" || opt.AuthorEmail != "" {
- args = append(args, "--author", fmt.Sprintf("%v <%v>", opt.AuthorName, opt.AuthorEmail))
+ env = []string{
+ fmt.Sprintf("GIT_AUTHOR_NAME=%v", opt.AuthorName),
+ fmt.Sprintf("GIT_AUTHOR_EMAIL=%v", opt.AuthorEmail),
+ fmt.Sprintf("GIT_COMMITTER_NAME=%v", opt.AuthorName),
+ fmt.Sprintf("GIT_COMMITTER_EMAIL=%v", opt.AuthorEmail),
+ }
}
- if _, err := r.run(opt.Timeout, args...); err != nil {
+ if _, err := r.run(env, opt.Timeout, "commit", "-m", msg); err != nil {
return Hash{}, err
}
- out, err := r.run(0, "rev-parse", "HEAD")
+ out, err := r.run(nil, 0, "rev-parse", "HEAD")
if err != nil {
return Hash{}, err
}
@@ -275,7 +283,7 @@ func (r Repository) Checkout(ref string, opt *CheckoutOptions) error {
if opt == nil {
opt = &CheckoutOptions{}
}
- if _, err := r.run(opt.Timeout, "checkout", ref); err != nil {
+ if _, err := r.run(nil, opt.Timeout, "checkout", ref); err != nil {
return err
}
return nil
@@ -287,8 +295,6 @@ type LogOptions struct {
From string
// The git reference to the newest commit in the range to query.
To string
- // The maximum number of entries to return.
- Count int
// Timeout for the operation
Timeout time.Duration
}
@@ -317,10 +323,7 @@ func (r Repository) Log(opt *LogOptions) ([]CommitInfo, error) {
rng = opt.From + "^.." + rng
}
args = append(args, rng, "--pretty=format:ǁ%Hǀ%cIǀ%an <%ae>ǀ%sǀ%b")
- if opt.Count != 0 {
- args = append(args, fmt.Sprintf("-%d", opt.Count))
- }
- out, err := r.run(opt.Timeout, args...)
+ out, err := r.run(nil, opt.Timeout, args...)
if err != nil {
return nil, err
}
@@ -338,7 +341,7 @@ func (r Repository) Config(opt *ConfigOptions) (map[string]string, error) {
if opt == nil {
opt = &ConfigOptions{}
}
- text, err := r.run(opt.Timeout, "config", "-l")
+ text, err := r.run(nil, opt.Timeout, "config", "-l")
if err != nil {
return nil, err
}
@@ -354,20 +357,11 @@ func (r Repository) Config(opt *ConfigOptions) (map[string]string, error) {
return out, nil
}
-func (r Repository) run(timeout time.Duration, args ...string) (string, error) {
- return r.Git.run(r.Path, timeout, args...)
-}
-
-func (r Repository) runAll(timeout time.Duration, args ...[]string) error {
- for _, a := range args {
- if _, err := r.run(timeout, a...); err != nil {
- return err
- }
- }
- return nil
+func (r Repository) run(env []string, timeout time.Duration, args ...string) (string, error) {
+ return r.Git.run(r.Path, env, timeout, args...)
}
-func (g Git) run(dir string, timeout time.Duration, args ...string) (string, error) {
+func (g Git) run(dir string, env []string, timeout time.Duration, args ...string) (string, error) {
if timeout == 0 {
timeout = DefaultTimeout
}
@@ -375,6 +369,12 @@ func (g Git) run(dir string, timeout time.Duration, args ...string) (string, err
defer cancel()
cmd := exec.CommandContext(ctx, g.exe, args...)
cmd.Dir = dir
+ if env != nil {
+ // Godocs for exec.Cmd.Env:
+ // "If Env contains duplicate environment keys, only the last value in
+ // the slice for each duplicate key is used.
+ cmd.Env = append(os.Environ(), env...)
+ }
if g.LogAllActions {
fmt.Printf("%v> %v %v\n", dir, g.exe, strings.Join(args, " "))
}
diff --git a/chromium/third_party/dawn/tools/src/resultsdb/resultsdb.go b/chromium/third_party/dawn/tools/src/resultsdb/resultsdb.go
index 3a8f0d6472d..1f727fbe447 100644
--- a/chromium/third_party/dawn/tools/src/resultsdb/resultsdb.go
+++ b/chromium/third_party/dawn/tools/src/resultsdb/resultsdb.go
@@ -72,7 +72,7 @@ func (r *ResultsDB) QueryTestResults(
TestIdRegexp: filterRegex,
},
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{
- "status", "tags", "failure_reason",
+ "test_id", "status", "tags", "duration",
}},
PageSize: 1000, // Maximum page size.
PageToken: pageToken,
diff --git a/chromium/third_party/dawn/tools/src/subcmd/subcmd.go b/chromium/third_party/dawn/tools/src/subcmd/subcmd.go
new file mode 100644
index 00000000000..88f115839bf
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/subcmd/subcmd.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package subcmd provides a multi-command interface for command line tools.
+package subcmd
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+ "os"
+ "path/filepath"
+ "strings"
+ "text/tabwriter"
+)
+
+// ErrInvalidCLA is the error returned when an invalid command line argument was
+// provided, and the usage was already printed.
+var ErrInvalidCLA = errors.New("invalid command line args")
+
+// InvalidCLA shows the flag usage, and returns ErrInvalidCLA
+func InvalidCLA() error {
+ flag.Usage()
+ return ErrInvalidCLA
+}
+
+// Command is the interface for a command
+// Data is a generic data type passed down to the sub-command when run.
+type Command[Data any] interface {
+ // Name returns the name of the command.
+ Name() string
+ // Desc returns a description of the command.
+ Desc() string
+ // RegisterFlags registers all the command-specific flags
+ // Returns a list of mandatory arguments that must immediately follow the
+ // command name
+ RegisterFlags(context.Context, Data) ([]string, error)
+ // Run invokes the command
+ Run(context.Context, Data) error
+}
+
+// Run handles the parses the command line arguments, possibly invoking one of
+// the provided commands.
+// If the command line arguments are invalid, then an error message is printed
+// and Run returns ErrInvalidCLA.
+func Run[Data any](ctx context.Context, data Data, cmds ...Command[Data]) error {
+ _, exe := filepath.Split(os.Args[0])
+
+ flag.Usage = func() {
+ out := flag.CommandLine.Output()
+ tw := tabwriter.NewWriter(out, 0, 1, 0, ' ', 0)
+ fmt.Fprintln(tw, exe, "[command]")
+ fmt.Fprintln(tw)
+ fmt.Fprintln(tw, "Commands:")
+ for _, cmd := range cmds {
+ fmt.Fprintln(tw, " ", cmd.Name(), "\t-", cmd.Desc())
+ }
+ fmt.Fprintln(tw)
+ fmt.Fprintln(tw, "Common flags:")
+ tw.Flush()
+ flag.PrintDefaults()
+ }
+
+ profile := false
+ flag.BoolVar(&profile, "profile", false, "enable a webserver at localhost:8080/profile that exposes a CPU profiler")
+ mux := http.NewServeMux()
+ mux.HandleFunc("/profile", pprof.Profile)
+
+ if len(os.Args) < 2 {
+ return InvalidCLA()
+ }
+ help := os.Args[1] == "help"
+ if help {
+ copy(os.Args[1:], os.Args[2:])
+ os.Args = os.Args[:len(os.Args)-1]
+ }
+
+ for _, cmd := range cmds {
+ if cmd.Name() == os.Args[1] {
+ out := flag.CommandLine.Output()
+ mandatory, err := cmd.RegisterFlags(ctx, data)
+ if err != nil {
+ return err
+ }
+ flag.Usage = func() {
+ flagsAndArgs := append([]string{"<flags>"}, mandatory...)
+ fmt.Fprintln(out, exe, cmd.Name(), strings.Join(flagsAndArgs, " "))
+ fmt.Fprintln(out)
+ fmt.Fprintln(out, cmd.Desc())
+ fmt.Fprintln(out)
+ fmt.Fprintln(out, "flags:")
+ flag.PrintDefaults()
+ }
+ if help {
+ flag.Usage()
+ return nil
+ }
+ args := os.Args[2:] // all arguments after the exe and command
+ if err := flag.CommandLine.Parse(args); err != nil {
+ return err
+ }
+ if nonFlagArgs := flag.Args(); len(nonFlagArgs) < len(mandatory) {
+ fmt.Fprintln(out, "missing argument", mandatory[len(nonFlagArgs)])
+ fmt.Fprintln(out)
+ return InvalidCLA()
+ }
+ if profile {
+ fmt.Println("download profile at: localhost:8080/profile")
+ fmt.Println("then run: 'go tool pprof <file>")
+ go http.ListenAndServe(":8080", mux)
+ }
+ return cmd.Run(ctx, data)
+ }
+ }
+
+ return InvalidCLA()
+}
diff --git a/chromium/third_party/dawn/tools/src/utils/paths.go b/chromium/third_party/dawn/tools/src/utils/paths.go
new file mode 100644
index 00000000000..bc7a9099d65
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/utils/paths.go
@@ -0,0 +1,77 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// ThisLine returns the filepath and line number of the calling function
+func ThisLine() string {
+ _, file, line, ok := runtime.Caller(1)
+ if !ok {
+ return ""
+ }
+ return fmt.Sprintf("%v:%v", file, line)
+}
+
+// ThisDir returns the directory of the caller function
+func ThisDir() string {
+ _, file, _, ok := runtime.Caller(1)
+ if !ok {
+ return ""
+ }
+ return filepath.Dir(file)
+}
+
+// DawnRoot returns the path to the dawn project's root directory or empty
+// string if not found.
+func DawnRoot() string {
+ return pathOfFileInParentDirs(ThisDir(), "DEPS")
+}
+
+// pathOfFileInParentDirs looks for file with `name` in paths starting from
+// `path`, and up into parent directories, returning the clean path in which the
+// file is found, or empty string if not found.
+func pathOfFileInParentDirs(path string, name string) string {
+ sep := string(filepath.Separator)
+ path, _ = filepath.Abs(path)
+ numDirs := strings.Count(path, sep) + 1
+ for i := 0; i < numDirs; i++ {
+ test := filepath.Join(path, name)
+ if _, err := os.Stat(test); err == nil {
+ return filepath.Clean(path)
+ }
+
+ path = path + sep + ".."
+ }
+ return ""
+}
+
+// ExpandHome returns the string with all occurrences of '~' replaced with the
+// user's home directory. The the user's home directory cannot be found, then
+// the input string is returned.
+func ExpandHome(path string) string {
+ if strings.ContainsRune(path, '~') {
+ if home, err := os.UserHomeDir(); err == nil {
+ return strings.ReplaceAll(path, "~", home)
+ }
+ }
+ return path
+}
diff --git a/chromium/third_party/dawn/tools/src/utils/paths_test.go b/chromium/third_party/dawn/tools/src/utils/paths_test.go
new file mode 100644
index 00000000000..998b79e56e8
--- /dev/null
+++ b/chromium/third_party/dawn/tools/src/utils/paths_test.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Tint Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils_test
+
+import (
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "dawn.googlesource.com/dawn/tools/src/utils"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestThisLine(t *testing.T) {
+ td := utils.ThisLine()
+ if !strings.HasSuffix(td, "paths_test.go:27") {
+ t.Errorf("TestThisLine() returned %v", td)
+ }
+}
+
+func TestThisDir(t *testing.T) {
+ td := utils.ThisDir()
+ if !strings.HasSuffix(td, "utils") {
+ t.Errorf("ThisDir() returned %v", td)
+ }
+}
+
+func TestDawnRoot(t *testing.T) {
+ dr := utils.DawnRoot()
+ rel, err := filepath.Rel(dr, utils.ThisDir())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ got := filepath.ToSlash(rel)
+ expect := `tools/src/utils`
+ if diff := cmp.Diff(got, expect); diff != "" {
+ t.Errorf("DawnRoot() returned %v.\n%v", dr, diff)
+ }
+}
diff --git a/chromium/third_party/dawn/webgpu-cts/expectations.txt b/chromium/third_party/dawn/webgpu-cts/expectations.txt
index 30c4fe84c2f..5dcffe12351 100644
--- a/chromium/third_party/dawn/webgpu-cts/expectations.txt
+++ b/chromium/third_party/dawn/webgpu-cts/expectations.txt
@@ -1,5 +1,3 @@
-# THIS FILE IS AUTOGENERATED. DO NOT MANUALLY EDIT.
-# SEE //content/test/gpu/process_generated_webgpu_expectations.py
# BEGIN TAG HEADER (autogenerated, see validate_tag_consistency.py)
# OS
# tags: [ android android-lollipop android-marshmallow android-nougat
@@ -29,7 +27,7 @@
# arm
# google google-0xffff
# intel intel-0xa2e intel-0xd26 intel-0xa011 intel-0x3e92 intel-0x3e9b
-# intel-0x5912 intel-0x9bc5
+# intel-0x5912
# nvidia nvidia-0xfe9 nvidia-0x1cb3 nvidia-0x2184
# qualcomm ]
# Decoder
@@ -63,322 +61,176 @@
# tags: [ webgpu-adapter-default webgpu-adapter-swiftshader ]
# END TAG HEADER
-################################################################################
-# webgpu:shader,execution,zero_init:compute,zero_init failures on Linux / Intel
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,1];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,48];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,63];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,32,1];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,47,1];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[33,1,1];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[64,1,1];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[7,7,3];batch__=1 [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[8,8,2];batch__=1 [ Failure ]
-
-################################################################################
-# webgpu:shader,execution,zero_init:compute,zero_init failures on mac
-# Mostly failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,zero_init:compute,zero_init:* [ Failure ]
+# Last rolled: 2022-06-07 10:38:33AM
################################################################################
-# webgpu:shader,execution,zero_init:compute,zero_init - flaky, slow
+# webgpu:shader,execution,zero_init:compute,zero_init failures on intel
# KEEP
################################################################################
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,zero_init:compute,zero_init:* [ RetryOnFailure Slow ]
-
-################################################################################
-# webgpu:api,operation,command_buffer,image_copy:mip_levels
-# Partial failures on all platforms
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,operation,command_buffer,image_copy:mip_levels:* [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,1];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,48];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,63];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,32,1];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,47,1];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[33,1,1];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[64,1,1];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[7,7,3];batch__=1 [ Failure ]
+crbug.com/dawn/1111 [ intel ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[8,8,2];batch__=1 [ Failure ]
################################################################################
# webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero
-# Partial failures for all platforms
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:* [ Failure ]
-
-################################################################################
-# webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas
-# Failures on mac
+# Spec / CTS bugs because depth24unorm-stencil8 depth-specific format is depth24plus
# KEEP
################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ Failure ]
+crbug.com/dawn/1388 [ linux ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth24unorm-stencil8" [ Failure ]
+crbug.com/dawn/1388 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth24unorm-stencil8" [ Failure ]
+crbug.com/dawn/1388 [ win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth24unorm-stencil8" [ Failure ]
+crbug.com/dawn/1388 [ linux ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="depth24unorm-stencil8" [ Failure ]
+crbug.com/dawn/1388 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="depth24unorm-stencil8" [ Failure ]
+crbug.com/dawn/1388 [ win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="depth24unorm-stencil8" [ Failure ]
################################################################################
-# webgpu:web_platform,copyToTexture,ImageBitmap:from_ImageData
-# Failures on all platforms
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:web_platform,copyToTexture,ImageBitmap:* [ Failure ]
-
-################################################################################
-# webgpu:api,validation,state,device_lost,destroy
-# Failures on windows
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,state,device_lost,destroy:* [ Failure ]
-
-################################################################################
-# webgpu:api,validation,queue,writeBuffer:ranges:
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,validation,queue,writeBuffer:ranges: [ Failure ]
-
-################################################################################
-# webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:* [ Failure ]
-
-################################################################################
-# webgpu:web_platform,copyToTexture,canvas:color_space_conversion:
+# webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero
+# Unexpected failures on mac.
# KEEP
################################################################################
-crbug.com/dawn/0000 webgpu:web_platform,copyToTexture,canvas:color_space_conversion:* [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth16unorm" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth32float-stencil8" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="stencil8" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="depth16unorm" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="stencil8" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="DepthTest";format="depth16unorm" [ Failure ]
+crbug.com/dawn/1389 [ mac ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="StencilTest";format="stencil8" [ Failure ]
################################################################################
-# Flaky tests
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes:* [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:origins_and_extents:* [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ RetryOnFailure ]
+# webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero
+# Unexpected failures on windows.
+# KEEP
+################################################################################
+crbug.com/dawn/0000 [ intel win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth16unorm" [ Failure ]
+crbug.com/dawn/0000 [ win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="depth32float-stencil8" [ Failure ]
+crbug.com/dawn/0000 [ intel win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="stencil8" [ Failure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="stencil8" [ Failure ]
+crbug.com/dawn/0000 [ win ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="StencilTest";* [ Failure ]
+
+################################################################################
+# copyToTexture failures on Linux
+# Skipped instead of just Crash because of the number of failures
+# KEEP
+################################################################################
+crbug.com/1234041 [ linux ] webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:* [ Skip ]
+crbug.com/1236130 [ linux ] webgpu:web_platform,canvas,readbackFromWebGPUCanvas:* [ Skip ]
+crbug.com/1309194 [ linux ] webgpu:web_platform,copyToTexture,ImageBitmap:from_canvas:* [ Skip ]
+crbug.com/1309194 [ linux ] webgpu:web_platform,copyToTexture,canvas:color_space_conversion:* [ Skip ]
+crbug.com/1309194 [ linux ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ Skip ]
+crbug.com/1309194 [ linux ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ Skip ]
+crbug.com/1309194 [ linux ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:* [ Skip ]
+
+################################################################################
+# copyToTexture failures. Needs investigation
+# KEEP
+################################################################################
+crbug.com/dawn/0000 [ intel mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,* [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rg16sint";dstFormat="rg16sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rg16uint";dstFormat="rg16uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rg32float";dstFormat="rg32float";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rg32sint";dstFormat="rg32sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rg32uint";dstFormat="rg32uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba16sint";dstFormat="rgba16sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba16uint";dstFormat="rgba16uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba32float";dstFormat="rgba32float";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba32sint";dstFormat="rgba32sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba32uint";dstFormat="rgba32uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba8sint";dstFormat="rgba8sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba8snorm";dstFormat="rgba8snorm";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgba8uint";dstFormat="rgba8uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg16sint";dstFormat="rg16sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg16uint";dstFormat="rg16uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg32float";dstFormat="rg32float";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg32sint";dstFormat="rg32sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg32uint";dstFormat="rg32uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba16sint";dstFormat="rgba16sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba16uint";dstFormat="rgba16uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba32float";dstFormat="rgba32float";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba32sint";dstFormat="rgba32sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba32uint";dstFormat="rgba32uint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba8sint";dstFormat="rgba8sint";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba8snorm";dstFormat="rgba8snorm";dimension="2d" [ Failure ]
+crbug.com/dawn/1319 [ intel win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba8uint";dstFormat="rgba8uint";dimension="2d" [ Failure ]
+
+################################################################################
+# General test slowness because of https://github.com/gpuweb/cts/issues/1162
+# KEEP
+################################################################################
+crbug.com/dawn/1398 [ amd mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,* [ Slow ]
+crbug.com/dawn/1398 webgpu:web_platform,copyToTexture,ImageBitmap:* [ Slow ]
+crbug.com/dawn/1398 [ mac ] webgpu:web_platform,copyToTexture,canvas:color_space_conversion:* [ Slow ]
+crbug.com/dawn/1398 [ win ] webgpu:web_platform,copyToTexture,canvas:color_space_conversion:* [ Slow ]
+crbug.com/dawn/1398 [ mac ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ Slow ]
+crbug.com/dawn/1398 [ win ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ Slow ]
+crbug.com/dawn/1398 [ mac ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ Slow ]
+crbug.com/dawn/1398 [ win ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ Slow ]
+crbug.com/dawn/1398 [ mac ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:* [ Slow ]
+crbug.com/dawn/1398 [ win ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:* [ Slow ]
################################################################################
# webgpu:shader,execution,robust_access
# Slow tests - win
# KEEP
################################################################################
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=false;containerType="matrix";isAtomic=false [ Slow ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=false;containerType="vector";isAtomic=false [ Slow ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=true;containerType="matrix";isAtomic=false [ Slow ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=true;containerType="vector";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read";access="read";dynamicOffset=false;containerType="matrix";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read";access="read";dynamicOffset=false;containerType="vector";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read";access="read";dynamicOffset=true;containerType="matrix";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read";access="read";dynamicOffset=true;containerType="vector";isAtomic=false [ Slow ]
+crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=false;containerType="matrix";isAtomic=false [ Slow ]
+crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=false;containerType="vector";isAtomic=false [ Slow ]
+crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=true;containerType="matrix";isAtomic=false [ Slow ]
+crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="storage";storageMode="read_write";access="read";dynamicOffset=true;containerType="vector";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="uniform";access="read";dynamicOffset=false;containerType="matrix";isAtomic=false [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access:linear_memory:storageClass="uniform";access="read";dynamicOffset=true;containerType="matrix";isAtomic=false [ Slow ]
################################################################################
# webgpu:shader,execution,robust_access
-# Failures - mac
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,robust_access:linear_memory:* [ Failure ]
-
-################################################################################
-# windows 3D texture failures
-# KEEP
+# Failures - mac, linux
################################################################################
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow:* [ Failure ]
+crbug.com/dawn/0000 [ monterey ] webgpu:shader,execution,robust_access:linear_memory:storageClass="function";access="write";containerType="array";isAtomic=false [ Failure ]
+crbug.com/dawn/0000 [ monterey ] webgpu:shader,execution,robust_access:linear_memory:storageClass="function";access="write";containerType="matrix";isAtomic=false [ Failure ]
+crbug.com/dawn/0000 [ monterey ] webgpu:shader,execution,robust_access:linear_memory:storageClass="private";access="write";containerType="array";isAtomic=false [ Failure ]
+crbug.com/dawn/0000 [ monterey ] webgpu:shader,execution,robust_access:linear_memory:storageClass="private";access="write";containerType="matrix";isAtomic=false [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:shader,execution,robust_access:linear_memory:storageClass="workgroup";access="read";containerType="vector";isAtomic=false [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:shader,execution,robust_access:linear_memory:storageClass="workgroup";access="write";containerType="vector";isAtomic=false [ Failure ]
################################################################################
# stencil8 failures
# KEEP
################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes_copy_depth_stencil:format="depth24unorm-stencil8";copyMethod="CopyB2T";aspect="stencil-only" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="depth32float-stencil8";copyMethod="CopyT2B";aspect="depth-only" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="stencil8";* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:depthStencilFormat="stencil8";* [ Failure ]
+crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24unorm-stencil8" [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth32float-stencil8" [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="stencil8" [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes_copy_depth_stencil:format="stencil8";* [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="depth32float-stencil8";copyMethod="CopyT2B";aspect="depth-only" [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="stencil8";* [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:depthStencilFormat="stencil8";* [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24unorm-stencil8" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="depth32float-stencil8";copyMethod="CopyT2B";aspect="depth-only" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:depthStencilFormat="stencil8";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes_copy_depth_stencil:format="depth32float-stencil8";copyMethod="CopyT2B";aspect="depth-only" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyB2T";format="stencil8";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyT2B";format="stencil8";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=false;binding1InBundle=false;format="depth24unorm-stencil8" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=false;binding1InBundle=false;format="depth32float-stencil8" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=true;binding1InBundle=false;format="depth24plus-stencil8" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=true;binding1InBundle=false;format="depth24unorm-stencil8" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=true;binding1InBundle=false;format="depth32float-stencil8" [ Failure ]
-crbug.com/dawn/0000 worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:depthStencilFormat="stencil8";* [ Failure ]
################################################################################
# depth16unorm failures
-# KEEP
################################################################################
-crbug.com/dawn/0000 webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth16unorm" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes_copy_depth_stencil:format="depth16unorm";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:format="depth16unorm";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,encoding,cmds,buffer_texture_copies:depth_stencil_format,copy_usage_and_aspect:format="depth16unorm" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyB2T";format="depth16unorm";* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyT2B";format="depth16unorm";* [ Failure ]
+[ bigsur ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth16unorm" [ Failure ]
################################################################################
# depth24plus failures
-# KEEP
################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus-stencil8" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:compute=false;binding0InBundle=false;binding1InBundle=false;format="depth24plus-stencil8" [ Failure ]
-
-################################################################################
-# webgpu:api,operation,command_buffer,copyTextureToTexture compressed texture failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-12x10-unorm";dstFormat="astc-12x10-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x5-unorm";dstFormat="astc-10x5-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x8-unorm";dstFormat="astc-10x8-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-5x5-unorm-srgb";dstFormat="astc-5x5-unorm-srgb";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x5-unorm";dstFormat="astc-6x5-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x6-unorm";dstFormat="astc-6x6-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x5-unorm-srgb";dstFormat="astc-8x5-unorm-srgb";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x10-unorm";dstFormat="astc-10x10-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x10-unorm-srgb";dstFormat="astc-10x10-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x5-unorm";dstFormat="astc-10x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x5-unorm-srgb";dstFormat="astc-10x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x6-unorm";dstFormat="astc-10x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x6-unorm-srgb";dstFormat="astc-10x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x8-unorm";dstFormat="astc-10x8-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-10x8-unorm-srgb";dstFormat="astc-10x8-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-12x10-unorm";dstFormat="astc-12x10-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-12x10-unorm-srgb";dstFormat="astc-12x10-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-12x12-unorm";dstFormat="astc-12x12-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-12x12-unorm-srgb";dstFormat="astc-12x12-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-4x4-unorm";dstFormat="astc-4x4-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-4x4-unorm-srgb";dstFormat="astc-4x4-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-5x4-unorm";dstFormat="astc-5x4-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-5x4-unorm-srgb";dstFormat="astc-5x4-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-5x5-unorm";dstFormat="astc-5x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-5x5-unorm-srgb";dstFormat="astc-5x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-6x5-unorm";dstFormat="astc-6x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-6x5-unorm-srgb";dstFormat="astc-6x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-6x6-unorm";dstFormat="astc-6x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-6x6-unorm-srgb";dstFormat="astc-6x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x5-unorm";dstFormat="astc-8x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x5-unorm-srgb";dstFormat="astc-8x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x6-unorm";dstFormat="astc-8x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x6-unorm-srgb";dstFormat="astc-8x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x8-unorm";dstFormat="astc-8x8-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x8-unorm-srgb";dstFormat="astc-8x8-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x10-unorm";dstFormat="astc-10x10-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x10-unorm-srgb";dstFormat="astc-10x10-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x5-unorm";dstFormat="astc-10x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x5-unorm-srgb";dstFormat="astc-10x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x6-unorm";dstFormat="astc-10x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x6-unorm-srgb";dstFormat="astc-10x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x8-unorm";dstFormat="astc-10x8-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-10x8-unorm-srgb";dstFormat="astc-10x8-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-12x10-unorm";dstFormat="astc-12x10-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-12x10-unorm-srgb";dstFormat="astc-12x10-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-12x12-unorm";dstFormat="astc-12x12-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-12x12-unorm-srgb";dstFormat="astc-12x12-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-4x4-unorm";dstFormat="astc-4x4-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-4x4-unorm-srgb";dstFormat="astc-4x4-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-5x4-unorm";dstFormat="astc-5x4-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-5x4-unorm-srgb";dstFormat="astc-5x4-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-5x5-unorm";dstFormat="astc-5x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-5x5-unorm-srgb";dstFormat="astc-5x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x5-unorm";dstFormat="astc-6x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x5-unorm-srgb";dstFormat="astc-6x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x6-unorm";dstFormat="astc-6x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-6x6-unorm-srgb";dstFormat="astc-6x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x5-unorm";dstFormat="astc-8x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x5-unorm-srgb";dstFormat="astc-8x5-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x6-unorm";dstFormat="astc-8x6-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x6-unorm-srgb";dstFormat="astc-8x6-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x8-unorm";dstFormat="astc-8x8-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="astc-8x8-unorm-srgb";dstFormat="astc-8x8-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc1-rgba-unorm";dstFormat="bc1-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc1-rgba-unorm-srgb";dstFormat="bc1-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc2-rgba-unorm";dstFormat="bc2-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc2-rgba-unorm-srgb";dstFormat="bc2-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc3-rgba-unorm";dstFormat="bc3-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc3-rgba-unorm-srgb";dstFormat="bc3-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc7-rgba-unorm";dstFormat="bc7-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc7-rgba-unorm-srgb";dstFormat="bc7-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgb8a1unorm";dstFormat="etc2-rgb8a1unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgb8a1unorm-srgb";dstFormat="etc2-rgb8a1unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgb8unorm";dstFormat="etc2-rgb8unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgb8unorm-srgb";dstFormat="etc2-rgb8unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgba8unorm";dstFormat="etc2-rgba8unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="etc2-rgba8unorm-srgb";dstFormat="etc2-rgba8unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc1-rgba-unorm";dstFormat="bc1-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc1-rgba-unorm-srgb";dstFormat="bc1-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc2-rgba-unorm";dstFormat="bc2-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc2-rgba-unorm-srgb";dstFormat="bc2-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc3-rgba-unorm";dstFormat="bc3-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc3-rgba-unorm-srgb";dstFormat="bc3-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc7-rgba-unorm";dstFormat="bc7-rgba-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc7-rgba-unorm-srgb";dstFormat="bc7-rgba-unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgb8a1unorm";dstFormat="etc2-rgb8a1unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgb8a1unorm-srgb";dstFormat="etc2-rgb8a1unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgb8unorm";dstFormat="etc2-rgb8unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgb8unorm-srgb";dstFormat="etc2-rgb8unorm";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgba8unorm";dstFormat="etc2-rgba8unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="etc2-rgba8unorm-srgb";dstFormat="etc2-rgba8unorm";dimension="2d" [ Failure ]
+crbug.com/dawn/0000 [ bigsur ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus" [ Failure ]
+crbug.com/dawn/0000 [ bigsur ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus-stencil8" [ Failure ]
################################################################################
# entry_point_name_must_match failures
-# KEEP
################################################################################
crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=false;shaderModuleEntryPoint="main";stageEntryPoint="main%5Cu0000" [ Failure ]
crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=false;shaderModuleEntryPoint="main";stageEntryPoint="main%5Cu0000a" [ Failure ]
+crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=false;shaderModuleEntryPoint="s%C3%A9quen%C3%A7age";stageEntryPoint="s%C3%A9quen%C3%A7age" [ Failure ]
crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=true;shaderModuleEntryPoint="main";stageEntryPoint="main%5Cu0000" [ Failure ]
crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=true;shaderModuleEntryPoint="main";stageEntryPoint="main%5Cu0000a" [ Failure ]
-crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=false;shaderModuleEntryPoint="s%C3%A9quen%C3%A7age";* [ Failure ]
-crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=true;shaderModuleEntryPoint="s%C3%A9quen%C3%A7age";* [ Failure ]
-
-################################################################################
-# windows draw failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=0;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=false;indirect=true;vertex_buffer_offset=0;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=false;indirect=true;vertex_buffer_offset=32;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=false;indirect=true;vertex_buffer_offset=0;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=false;indirect=true;vertex_buffer_offset=32;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=3;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=false;indirect=true;vertex_buffer_offset=0;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=false;indirect=true;vertex_buffer_offset=32;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=1;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=false;indirect=true;vertex_buffer_offset=0;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=false;indirect=true;vertex_buffer_offset=32;index_buffer_offset="_undef_";base_vertex="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=0;index_buffer_offset=16;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=0;base_vertex=9 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,draw:arguments:first=3;count=6;first_instance=0;instance_count=4;indexed=true;indirect=true;vertex_buffer_offset=32;index_buffer_offset=16;base_vertex=9 [ Failure ]
+crbug.com/dawn/1345 webgpu:api,validation,createComputePipeline:entry_point_name_must_match:isAsync=true;shaderModuleEntryPoint="s%C3%A9quen%C3%A7age";stageEntryPoint="s%C3%A9quen%C3%A7age" [ Failure ]
################################################################################
# external_texture failures
@@ -388,48 +240,14 @@ crbug.com/dawn/0000 webgpu:web_platform,external_texture,* [ Failure ]
################################################################################
# webgpu:api,operation,buffers failures
-# KEEP
################################################################################
+crbug.com/dawn/0000 webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=137438953472 [ Failure ]
+crbug.com/dawn/0000 webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=9007199254740984 [ Failure ]
crbug.com/dawn/0000 webgpu:api,operation,buffers,map_oom:mappedAtCreation,smaller_getMappedRange:oom=true;size=137438953472 [ Failure ]
crbug.com/dawn/0000 webgpu:api,operation,buffers,map_oom:mappedAtCreation,smaller_getMappedRange:oom=true;size=9007199254740984 [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=137438953472 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=137438953472 [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=9007199254740984 [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=9007199254740984 [ Failure ]
-crbug.com/dawn/0000 [ angle-opengl linux ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=137438953472 [ Failure ]
-crbug.com/dawn/0000 [ angle-opengl linux ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:oom=true;size=9007199254740984 [ Failure ]
-crbug.com/dawn/0000 [ angle-disabled linux ] webgpu:api,operation,buffers,map_oom:mappedAtCreation,full_getMappedRange:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,buffers,map:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,buffers,map_ArrayBuffer:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,buffers,map_detach:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] worker_webgpu:api,operation,buffers,map:mapAsync,read,typedArrayAccess:mapAsyncRegionLeft="default-expand";mapAsyncRegionRight="minimal" [ Failure ]
-
-################################################################################
-# token validation failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:shader,validation,tokens:identifiers:ident="enable" [ Failure ]
-crbug.com/dawn/0000 webgpu:shader,validation,tokens:null_characters:contains_null=true;placement="comment" [ Failure ]
-crbug.com/dawn/0000 webgpu:shader,validation,tokens:null_characters:contains_null=true;placement="eol" [ Failure ]
-
-################################################################################
-# request device limits failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="minStorageBufferOffsetAlignment" [ Failure ]
-crbug.com/dawn/0000 webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="minUniformBufferOffsetAlignment" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:better_than_supported:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:supported_limits:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxComputeInvocationsPerWorkgroup" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxComputeWorkgroupStorageSize" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxInterStageShaderComponents" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxStorageBuffersPerShaderStage" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxTextureDimension2D" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,adapter,requestDevice_limits:worse_than_default:limit="maxVertexAttributes" [ Failure ]
################################################################################
# texel_data failures
-# KEEP
################################################################################
crbug.com/dawn/0000 webgpu:util,texture,texel_data:ufloat_texel_data_in_shader:* [ Failure ]
crbug.com/dawn/0000 webgpu:util,texture,texel_data:unorm_texel_data_in_shader:format="bgra8unorm-srgb" [ Failure ]
@@ -444,73 +262,61 @@ crbug.com/dawn/0000 webgpu:api,operation,rendering,depth_clip_clamp:depth_test_i
################################################################################
# compilation_info failures
-# KEEP
################################################################################
-crbug.com/dawn/0000 webgpu:api,operation,shader_module,compilation_info:offset_and_length:valid=false;unicode=true [ Failure ]
+crbug.com/dawn/1357 webgpu:api,operation,shader_module,compilation_info:offset_and_length:valid=false;unicode=true [ Failure ]
################################################################################
# createBuffer_invalid_and_oom failures
-# KEEP
################################################################################
crbug.com/dawn/0000 webgpu:api,validation,buffer,create:createBuffer_invalid_and_oom: [ Failure ]
################################################################################
-# CopyExternalImageToTexture failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:* [ Failure ]
-
-################################################################################
-# createTexture validation failures
-# KEEP
+# atan2 shader execution failures
+# Very slow, many failing. Skip for now.
################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:dimension="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,createTexture:texture_size,2d_texture,compressed_format:dimension="2d";format="astc-4x4-unorm-srgb";size=[1,8192,1] [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:mipLevelCount,format:dimension="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:mipLevelCount,format:dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:mipLevelCount,format:dimension="3d" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:dimension="_undef_" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:dimension="3d" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,createTexture:mipLevelCount,format:dimension="3d" [ RetryOnFailure ]
+crbug.com/tint/1524 [ win ] webgpu:shader,execution,expression,call,builtin,atan2:* [ Skip ]
################################################################################
# shader execution failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,expression,call,builtin,atan2:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,expression,call,builtin,countTrailingZeros:* [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,expression,call,builtin,extractBits:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,expression,call,builtin,firstTrailingBit:* [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,expression,call,builtin,insertBits:* [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,expression,call,builtin,ldexp:* [ Failure ]
-crbug.com/tint/1500 [ nvidia release win ] webgpu:shader,execution,shader_io,compute_builtins:inputs:* [ Failure ]
-crbug.com/1342945 [ linux intel-0x9bc5 ] webgpu:shader,execution,expression,call,builtin,float_built_functions:float_builtin_functions* [ Failure ]
-
-################################################################################
-# vertex_state failures
-# KEEP
################################################################################
-crbug.com/dawn/0000 webgpu:api,validation,vertex_state:vertex_shader_input_location_limit: [ Failure ]
-
-################################################################################
-# webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed failures
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,* [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,* [ Failure ]
-
-################################################################################
-# crbug.com/dawn/1364 Add validation that render attachments must be 2D (array) textures
-# KEEP
-################################################################################
-crbug.com/dawn/1364 webgpu:api,validation,createTexture:texture_usage:* [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:shader,execution,expression,call,builtin,countTrailingZeros:* [ Failure ]
+crbug.com/dawn/0000 [ win10 ] webgpu:shader,execution,expression,call,builtin,extractBits:* [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:shader,execution,expression,call,builtin,firstTrailingBit:* [ Failure ]
+crbug.com/dawn/0000 [ win10 ] webgpu:shader,execution,expression,call,builtin,insertBits:* [ Failure ]
+crbug.com/dawn/0000 [ win10 ] webgpu:shader,execution,expression,call,builtin,ldexp:* [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="mixed";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="mixed";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":8,"y":4,"z":2} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="mixed";dispatch="indirect";groupSize={"x":3,"y":7,"z":5};numGroups={"x":13,"y":9,"z":11} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="mixed";dispatch="indirect";groupSize={"x":8,"y":4,"z":2};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="param";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="param";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":8,"y":4,"z":2} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="param";dispatch="indirect";groupSize={"x":3,"y":7,"z":5};numGroups={"x":13,"y":9,"z":11} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="param";dispatch="indirect";groupSize={"x":8,"y":4,"z":2};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="struct";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="struct";dispatch="indirect";groupSize={"x":1,"y":1,"z":1};numGroups={"x":8,"y":4,"z":2} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="struct";dispatch="indirect";groupSize={"x":3,"y":7,"z":5};numGroups={"x":13,"y":9,"z":11} [ Failure ]
+crbug.com/tint/1500 [ nvidia-0x2184 win10 ] webgpu:shader,execution,shader_io,compute_builtins:inputs:method="struct";dispatch="indirect";groupSize={"x":8,"y":4,"z":2};numGroups={"x":1,"y":1,"z":1} [ Failure ]
+crbug.com/tint/0000 webgpu:shader,validation,parse,blankspace:null_characters:contains_null=true;placement="comment" [ Failure ]
+crbug.com/tint/0000 webgpu:shader,validation,parse,blankspace:null_characters:contains_null=true;placement="eol" [ Failure ]
+crbug.com/tint/0000 webgpu:shader,validation,parse,identifiers:identifiers:* [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val=".1h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0X3h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0x3.2p%2B2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0x3p%2B2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0xEp-2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="0xep2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="1.1E%2B2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="1.1e2h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="1h" [ Failure ]
+crbug.com/tint/1502 webgpu:shader,validation,parse,literal:abstract_float:val="2.4e-2h" [ Failure ]
+crbug.com/tint/1525 webgpu:shader,validation,parse,literal:u32:val="-2147483648" [ Failure ]
################################################################################
# Failures from upgrading Intel Mac Minis to 12.4
# KEEP
################################################################################
+crbug.com/dawn/1461 [ monterey intel ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth16unorm" [ Failure ]
crbug.com/dawn/1461 [ monterey intel ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus" [ Failure ]
crbug.com/dawn/1461 [ monterey intel ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth24plus-stencil8" [ Failure ]
@@ -518,42 +324,19 @@ crbug.com/dawn/1461 [ monterey intel ] webgpu:api,operation,command_buffer,copyT
# untriaged failures
# KEEP
################################################################################
-crbug.com/dawn/0000 webgpu:api,validation,createBindGroupLayout:max_resources_per_stage,in_bind_group_layout:* [ Failure ]
-crbug.com/dawn/0000 webgpu:api,validation,encoding,cmds,copyTextureToTexture:texture_format_compatibility: [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,basic:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,copyBufferToBuffer:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,robust_access:linear_memory:storageClass="workgroup";access="read";containerType="vector";isAtomic=false [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,robust_access:linear_memory:storageClass="workgroup";access="write";containerType="vector";isAtomic=false [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:web_platform,canvas,readbackFromWebGPUCanvas:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:* [ Failure ]
-crbug.com/dawn/0000 [ linux ] webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:* [ Failure ]
crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:format="depth32float" [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="r8unorm";* [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="rg8unorm";* [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,image_copy,texture_related:format:* [ Failure ]
-crbug.com/dawn/0000 [ mac ] webgpu:web_platform,canvas,readbackFromWebGPUCanvas:* [ Failure ]
-crbug.com/dawn/0000 [ mac ] worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="r8unorm";storeOperation="discard" [ Failure ]
-crbug.com/dawn/0000 [ mac ] worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="r8unorm";storeOperation="store" [ Failure ]
-crbug.com/dawn/0000 [ mac ] worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="rg8unorm";storeOperation="discard" [ Failure ]
-crbug.com/dawn/0000 [ mac ] worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="rg8unorm";storeOperation="store" [ Failure ]
crbug.com/dawn/0000 [ win ] webgpu:api,operation,render_pass,resolve:* [ Failure ]
crbug.com/dawn/0000 [ win ] webgpu:api,operation,render_pipeline,pipeline_output_targets:color,component_count,blend:format="rg8unorm" [ Failure ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,execution,robust_access_vertex:vertex_buffer_access:indexed=true;indirect=true;drawCallTestParameter="baseVertex";* [ Failure ]
-
-################################################################################
-# vertex_state flakes
-# KEEP
-################################################################################
-crbug.com/dawn/0000 webgpu:api,validation,vertex_state:vertex_attribute_contained_in_stride:* [ RetryOnFailure ]
-crbug.com/dawn/0000 webgpu:api,validation,vertex_state:vertex_attribute_shaderLocation_unique: [ RetryOnFailure ]
+crbug.com/dawn/0000 [ mac ] webgpu:web_platform,canvas,readbackFromWebGPUCanvas:* [ Failure ]
+crbug.com/dawn/0000 [ mac ] worker_webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:depthStencilFormat="stencil8";* [ Failure ]
################################################################################
# webgpu:shader,execution,memory_model
# Flakes / slow tests
# KEEP
################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,memory_model,* [ RetryOnFailure Slow ]
-crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,memory_model,* [ RetryOnFailure Slow ]
+crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,memory_model,* [ Slow ]
+crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,memory_model,* [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,memory_model,* [ Failure ]
################################################################################
@@ -562,72 +345,71 @@ crbug.com/dawn/0000 [ win ] webgpu:shader,execution,memory_model,* [ Failure ]
################################################################################
crbug.com/dawn/0000 webgpu:api,validation,createRenderPipeline:pipeline_output_targets,blend_min_max:isAsync=true;component="alpha" [ Slow ]
crbug.com/dawn/0000 webgpu:api,validation,createRenderPipeline:pipeline_output_targets,blend_min_max:isAsync=true;component="color" [ Slow ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,adapter,requestDevice:* [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,expression,binary,f32_arithmetic:* [ Slow ]
crbug.com/dawn/0000 [ win ] webgpu:shader,execution,expression,binary,f32_logical:* [ Slow ]
-################################################################################
-# untriaged flakes
-# KEEP
-################################################################################
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow:initMethod="WriteTexture";checkMethod="FullCopyT2B";format="bgra8unorm-srgb";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow:initMethod="WriteTexture";checkMethod="FullCopyT2B";format="eac-rg11unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,multiple_color_attachments:storeOperation1="store";storeOperation2="discard" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,rendering,blending:GPUBlendComponent:component="alpha";srcFactor="one-minus-dst-alpha";dstFactor="one-minus-src-alpha";operation="add" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,rendering,blending:GPUBlendComponent:component="alpha";srcFactor="src-alpha-saturated";dstFactor="zero";operation="reverse-subtract" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,rendering,blending:GPUBlendComponent:component="alpha";srcFactor="zero";dstFactor="constant";operation="subtract" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,rendering,draw:arguments:first=0;count=0;first_instance=0;instance_count=0;indexed=false;indirect=false;vertex_buffer_offset=32;index_buffer_offset="_undef_";base_vertex="_undef_" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,resource_init,buffer:copy_buffer_to_texture: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,resource_init,buffer:copy_texture_to_partial_buffer: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,operation,resource_init,buffer:index_buffer: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,attachment_compatibility:render_pass_and_bundle,color_format: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,attachment_compatibility:render_pass_or_bundle_and_pipeline,color_format:encoderType="render%20pass" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,buffer,mapping:getMappedRange,state,mappingPending: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,encoding,cmds,buffer_texture_copies:depth_stencil_format,copy_usage_and_aspect:format="depth32float" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyT2B";format="astc-10x5-unorm-srgb";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="WriteTexture";format="rgba32uint";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="WriteTexture";format="rgba8sint";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="WriteTexture";format="rgba8uint";dimension="1d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="WriteTexture";format="rgba8unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="WriteTexture";format="rgba16sint";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:api,validation,queue,submit:* [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,48];batch__=21 [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:shader,validation,variable_and_const:initializer_type:variableOrConstant="var" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ linux ] webgpu:util,texture,texture_ok:norm:mode="bytes";format="r8unorm" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc7-rgba-unorm";dstFormat="bc7-rgba-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:colorFormat="r16uint";storeOperation="store" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,operation,rendering,blending:GPUBlendComponent:component="alpha";srcFactor="one";dstFactor="src-alpha-saturated";operation="add" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,attachment_compatibility:render_pass_and_bundle,color_format: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createBindGroup:binding_must_contain_resource_defined_in_layout: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,createRenderPipeline:pipeline_output_targets:isAsync=false;format="rgba32sint" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_resource_type_mismatch:encoderType="compute%20pass";call="dispatch";callWithZero=false [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_resource_type_mismatch:encoderType="render%20bundle";call="drawIndexedIndirect";callWithZero=false [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:method="CopyT2B";format="r8snorm";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="CopyB2T";format="bc4-r-snorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ mac ] webgpu:shader,execution,evaluation_order:assignment:name="ToArrayFromArray" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="bc7-rgba-unorm-srgb";dstFormat="bc7-rgba-unorm";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:srcFormat="bc7-rgba-unorm-srgb";dstFormat="bc7-rgba-unorm-srgb";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,memory_sync,texture,same_subresource:ww:boundary="dispatch";first={"op":"storage","in":"compute-pass-encoder"};second={"op":"storage","in":"compute-pass-encoder"} [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,memory_sync,texture,same_subresource:ww:boundary="pass";first={"op":"storage","in":"compute-pass-encoder"};second={"op":"storage","in":"compute-pass-encoder"} [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,rendering,indirect_draw:basics:isIndexed=true [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,resource_init,buffer:index_buffer: [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,operation,vertex_state,correctness:setVertexBuffer_offset_and_attribute_offset:format="sint32x4" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,encoding,cmds,render,draw:index_buffer_OOB:bufferSizeInElements=100;bindingSizeInElements=10;drawIndexCount=10;drawType="drawIndexedIndirect" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="CopyT2B";format="rg11b10ufloat";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="CopyT2B";format="rg16uint";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="CopyT2B";format="rg32float";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="WriteTexture";format="rg32sint";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:method="WriteTexture";format="rgba16float";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,texture_related:copy_rectangle:method="CopyT2B";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:api,validation,image_copy,texture_related:copy_rectangle:method="WriteTexture";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ win ] webgpu:shader,validation,variable_and_const:io_shareable_type:storageClass="in";type="array%3Cu32,%2012%3E" [ RetryOnFailure ]
-
# New flakes. Please triage:
-crbug.com/dawn/0000 [ angle-opengl dawn-backend-validation no-oop-c nvidia-0x2184 release skia-renderer-vulkan ubuntu ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg16uint";dstFormat="rg16uint";dimension="3d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ angle-opengl dawn-backend-validation no-oop-c nvidia-0x2184 release skia-renderer-vulkan ubuntu ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rg32sint";dstFormat="rg32sint";dimension="2d" [ RetryOnFailure ]
-crbug.com/dawn/0000 [ angle-opengl dawn-backend-validation no-oop-c nvidia-0x2184 release skia-renderer-vulkan ubuntu ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba16float";dstFormat="rgba16float";dimension="3d" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,adapter,requestDevice_limits:better_than_supported:limit="maxComputeWorkgroupSizeY" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,adapter,requestDevice_limits:better_than_supported:limit="maxStorageBufferBindingSize" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="rgb9e5ufloat";dstFormat="rgb9e5ufloat";dimension="2d" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="bgra8unorm";dstFormat="bgra8unorm";dimension="1d" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:srcFormat="rgba8sint";dstFormat="rgba8sint";dimension="1d" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation win10 ] webgpu:api,operation,render_pipeline,pipeline_output_targets:color,attachments:format="r16sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="rgba32float" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="r8snorm" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="r32uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="r8sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="rg8uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="rg8unorm" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="rgba32float" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,validation,createBindGroup:buffer,* [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,createRenderPipeline:pipeline_output_targets:isAsync=true;writeMask=0;format="r16float" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation nvidia-0x2184 win10 ] webgpu:api,validation,createRenderPipeline:pipeline_output_targets:isAsync=true;writeMask=15;format="r32sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,createRenderPipeline:pipeline_output_targets:isAsync=true;writeMask=4;format="rg16float" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ amd-0x6821 dawn-no-backend-validation monterey ] webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_resource_type_mismatch:encoderType="render%20pass";call="drawIndexed";callWithZero=false [ RetryOnFailure ]
+crbug.com/dawn/0000 [ bigsur dawn-backend-validation intel-0x3e9b ] webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_resource_type_mismatch:encoderType="render%20pass";call="drawIndexed";callWithZero=false [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg32sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg32sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg32sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg32uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg32uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ bigsur dawn-no-backend-validation intel-0x3e9b ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg8uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg8uint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rgba16float" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation nvidia-0x2184 ubuntu ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rgba16sint" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-backend-validation intel-0x3e9b ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,63];batch__=12 [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel-0x3e9b ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,1,63];batch__=12 [ RetryOnFailure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel-0x3e9b ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,47,1];batch__=12 [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,validation,parse,literal:abstract_float:val=".0" [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,validation,parse,literal:abstract_float:val="00012." [ RetryOnFailure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,validation,parse,literal:abstract_float:val="1.2E2" [ RetryOnFailure ]
# New failures. Please triage:
-crbug.com/dawn/0000 [ angle-opengl dawn-no-backend-validation intel-0x5912 no-oop-c release skia-renderer-vulkan ubuntu ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:srcFormat="astc-8x5-unorm-srgb";dstFormat="astc-8x5-unorm-srgb";dimension="2d" [ Failure ]
-crbug.com/dawn/0000 [ angle-opengl dawn-no-backend-validation no-oop-c nvidia-0x2184 release skia-renderer-vulkan ubuntu ] webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:srcFormat="bgra8unorm";dstFormat="bgra8unorm";dimension="3d" [ Failure ]
-crbug.com/dawn/0000 [ angle-d3d11 dawn-no-backend-validation intel-0x5912 oop-c release-x64 skia-renderer-gl win10 ] webgpu:api,operation,memory_sync,texture,same_subresource:rw,* [ Slow ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="rg16sint" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="rg32float" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="rg32uint" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToBuffer";format="rgba8uint" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="depth16unorm" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="CopyToTexture";format="rgba32sint" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="DepthTest";format="depth16unorm" [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:dimension="2d";readMethod="Sample";format="rgba16uint" [ Failure ]
+crbug.com/dawn/0000 [ amd-0x6821 monterey ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ bigsur intel-0x3e9b ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ nvidia-0x2184 ubuntu ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ nvidia-0x2184 win10 ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rg8snorm" [ Failure ]
+crbug.com/dawn/0000 [ dawn-no-backend-validation intel win10 ] webgpu:api,validation,image_copy,texture_related:format:method="CopyB2T";depthOrArrayLayers=1;dimension="1d";format="rgba16float" [ Failure ]
+crbug.com/dawn/0000 webgpu:examples:gpu,with_texture_compression,bc:textureCompressionBC=false [ Failure ]
+crbug.com/dawn/0000 [ amd-0x6821 monterey ] webgpu:examples:gpu,with_texture_compression,etc2:* [ Failure ]
+crbug.com/dawn/0000 [ bigsur intel-0x3e9b ] webgpu:examples:gpu,with_texture_compression,etc2:* [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:examples:gpu,with_texture_compression,etc2:* [ Failure ]
+crbug.com/dawn/0000 [ nvidia-0x2184 ubuntu ] webgpu:examples:gpu,with_texture_compression,etc2:* [ Failure ]
+crbug.com/dawn/0000 [ nvidia-0x2184 win10 ] webgpu:examples:gpu,with_texture_compression,etc2:* [ Failure ]
+crbug.com/dawn/0000 [ intel ubuntu ] webgpu:examples:gpu,with_texture_compression,etc2:textureCompressionETC2=false [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,execution,expression,call,builtin,faceForward:* [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,execution,expression,call,builtin,inversesqrt:abstract_float:* [ Failure ]
+crbug.com/dawn/0000 [ intel win10 ] webgpu:shader,execution,expression,call,builtin,length:* [ Failure ]
+crbug.com/dawn/0000 [ dawn-backend-validation intel-0x3e9b ] webgpu:shader,execution,zero_init:compute,zero_init:storageClass="workgroup";workgroupSize=[1,47,1];batch__=12 [ Failure ]
+crbug.com/dawn/0000 [ mac intel ] webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:* [ Failure ]
diff --git a/chromium/third_party/dawn/webgpu-cts/scripts/compile_src.py b/chromium/third_party/dawn/webgpu-cts/scripts/compile_src.py
index 91c0eb43258..83d1de68be9 100755
--- a/chromium/third_party/dawn/webgpu-cts/scripts/compile_src.py
+++ b/chromium/third_party/dawn/webgpu-cts/scripts/compile_src.py
@@ -32,23 +32,23 @@ finally:
def compile_src(out_dir):
# First, clean the output directory so deleted files are pruned from old builds.
- shutil.rmtree(out_dir)
+ shutil.rmtree(out_dir, ignore_errors=True)
run_tsc_ignore_errors([
- '--project',
- os.path.join(webgpu_cts_root_dir, 'tsconfig.json'),
- '--outDir',
+ "--project",
+ os.path.join(webgpu_cts_root_dir, "tsconfig.json"),
+ "--outDir",
out_dir,
- '--noEmit',
- 'false',
- '--noEmitOnError',
- 'false',
- '--declaration',
- 'false',
- '--sourceMap',
- 'false',
- '--target',
- 'ES2017',
+ "--noEmit",
+ "false",
+ "--noEmitOnError",
+ "false",
+ "--declaration",
+ "false",
+ "--sourceMap",
+ "false",
+ "--target",
+ "ES2017",
])
@@ -56,46 +56,46 @@ def compile_src_for_node(out_dir, additional_args=None, clean=True):
additional_args = additional_args or []
if clean:
# First, clean the output directory so deleted files are pruned from old builds.
- shutil.rmtree(out_dir)
+ shutil.rmtree(out_dir, ignore_errors=True)
args = [
- '--project',
- os.path.join(webgpu_cts_root_dir, 'node.tsconfig.json'),
- '--outDir',
+ "--project",
+ os.path.join(webgpu_cts_root_dir, "node.tsconfig.json"),
+ "--outDir",
out_dir,
- '--noEmit',
- 'false',
- '--noEmitOnError',
- 'false',
- '--declaration',
- 'false',
- '--sourceMap',
- 'false',
- '--target',
- 'ES6',
+ "--noEmit",
+ "false",
+ "--noEmitOnError",
+ "false",
+ "--declaration",
+ "false",
+ "--sourceMap",
+ "false",
+ "--target",
+ "ES6",
]
args.extend(additional_args)
run_tsc_ignore_errors(args)
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) != 2:
- print('Usage: compile_src.py GEN_DIR')
+ print("Usage: compile_src.py GEN_DIR")
sys.exit(1)
gen_dir = sys.argv[1]
# Compile the CTS src.
- compile_src(os.path.join(gen_dir, 'src'))
- compile_src_for_node(os.path.join(gen_dir, 'src-node'))
+ compile_src(os.path.join(gen_dir, "src"))
+ compile_src_for_node(os.path.join(gen_dir, "src-node"))
- # Run gen_listings.js to overwrite the dummy src/webgpu/listings.js created
+ # Run gen_listings.js to overwrite the placeholder src/webgpu/listings.js created
# from transpiling src/
RunNode([
- os.path.join(gen_dir, 'src-node', 'common', 'tools',
- 'gen_listings.js'),
- '--no-validate',
- os.path.join(gen_dir, 'src'),
- os.path.join(gen_dir, 'src-node', 'webgpu'),
+ os.path.join(gen_dir, "src-node", "common", "tools",
+ "gen_listings.js"),
+ "--no-validate",
+ os.path.join(gen_dir, "src"),
+ os.path.join(gen_dir, "src-node", "webgpu"),
])
diff --git a/chromium/third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py b/chromium/third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py
index 3ef1f2a41fd..93bb692bacd 100755
--- a/chromium/third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py
+++ b/chromium/third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py
@@ -68,12 +68,12 @@ if __name__ == '__main__':
txt = f.readlines()
if (txt != ts_sources):
raise RuntimeError(
- '%s is out of date. Please re-run //third_party/dawn/third_party/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
+ '%s is out of date. Please re-run //third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
% ts_sources_txt)
with open(resource_files_txt, 'r') as f:
if (f.readlines() != resource_files):
raise RuntimeError(
- '%s is out of date. Please re-run //third_party/dawn/third_party/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
+ '%s is out of date. Please re-run //third_party/dawn/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
% resource_files_txt)
else:
with open(ts_sources_txt, 'w') as f:
diff --git a/chromium/third_party/dawn/webgpu-cts/scripts/list.py b/chromium/third_party/dawn/webgpu-cts/scripts/list.py
index 70d278ac994..fb72d85e3c1 100755
--- a/chromium/third_party/dawn/webgpu-cts/scripts/list.py
+++ b/chromium/third_party/dawn/webgpu-cts/scripts/list.py
@@ -35,11 +35,9 @@ def list_testcases(query, js_out_dir=None):
try:
logging.info('WebGPU CTS: Transpiling tools...')
- compile_src_for_node(js_out_dir, [
- '--incremental', '--tsBuildInfoFile',
- os.path.join(js_out_dir, 'build.tsbuildinfo')
- ],
- clean=False)
+ # TODO(crbug.com/dawn/1395): Bring back usage of an incremental build to
+ # speed up this operation. It was disabled due to flakiness.
+ compile_src_for_node(js_out_dir)
old_sys_path = sys.path
try: